From 76cb841cb886eef6b3bee341a2266c76578724ad Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Mon, 6 May 2024 03:02:30 +0200 Subject: Adding upstream version 4.19.249. Signed-off-by: Daniel Baumann --- include/acpi/acbuffer.h | 210 + include/acpi/acconfig.h | 217 + include/acpi/acexcep.h | 371 + include/acpi/acnames.h | 62 + include/acpi/acoutput.h | 459 + include/acpi/acpi.h | 33 + include/acpi/acpi_bus.h | 688 ++ include/acpi/acpi_drivers.h | 124 + include/acpi/acpi_io.h | 27 + include/acpi/acpi_lpat.h | 65 + include/acpi/acpi_numa.h | 26 + include/acpi/acpiosxf.h | 418 + include/acpi/acpixf.h | 961 ++ include/acpi/acrestyp.h | 678 ++ include/acpi/actbl.h | 400 + include/acpi/actbl1.h | 1626 ++++ include/acpi/actbl2.h | 1727 ++++ include/acpi/actbl3.h | 676 ++ include/acpi/actypes.h | 1288 +++ include/acpi/acuuid.h | 60 + include/acpi/apei.h | 56 + include/acpi/battery.h | 21 + include/acpi/button.h | 26 + include/acpi/cppc_acpi.h | 145 + include/acpi/ghes.h | 127 + include/acpi/hed.h | 18 + include/acpi/nfit.h | 18 + include/acpi/pcc.h | 30 + include/acpi/pdc_intel.h | 36 + include/acpi/platform/acenv.h | 356 + include/acpi/platform/acenvex.h | 48 + include/acpi/platform/acgcc.h | 57 + include/acpi/platform/acgccex.h | 24 + include/acpi/platform/acintel.h | 55 + include/acpi/platform/aclinux.h | 202 + include/acpi/platform/aclinuxex.h | 144 + include/acpi/processor.h | 445 + include/acpi/reboot.h | 12 + include/acpi/video.h | 95 + include/asm-generic/4level-fixup.h | 40 + include/asm-generic/5level-fixup.h | 43 + include/asm-generic/asm-offsets.h | 1 + include/asm-generic/asm-prototypes.h | 14 + include/asm-generic/atomic-instrumented.h | 467 + include/asm-generic/atomic-long.h | 269 + include/asm-generic/atomic.h | 202 + include/asm-generic/atomic64.h | 60 + include/asm-generic/audit_change_attr.h | 33 + include/asm-generic/audit_dir_write.h | 38 + include/asm-generic/audit_read.h | 14 + include/asm-generic/audit_signal.h | 3 + include/asm-generic/audit_write.h | 25 + include/asm-generic/barrier.h | 265 + include/asm-generic/bitops.h | 38 + include/asm-generic/bitops/__ffs.h | 44 + include/asm-generic/bitops/__fls.h | 44 + include/asm-generic/bitops/arch_hweight.h | 26 + include/asm-generic/bitops/atomic.h | 68 + include/asm-generic/bitops/builtin-__ffs.h | 16 + include/asm-generic/bitops/builtin-__fls.h | 16 + include/asm-generic/bitops/builtin-ffs.h | 18 + include/asm-generic/bitops/builtin-fls.h | 17 + include/asm-generic/bitops/const_hweight.h | 44 + include/asm-generic/bitops/ext2-atomic-setbit.h | 12 + include/asm-generic/bitops/ext2-atomic.h | 27 + include/asm-generic/bitops/ffs.h | 42 + include/asm-generic/bitops/ffz.h | 13 + include/asm-generic/bitops/find.h | 83 + include/asm-generic/bitops/fls.h | 42 + include/asm-generic/bitops/fls64.h | 37 + include/asm-generic/bitops/hweight.h | 8 + include/asm-generic/bitops/le.h | 98 + include/asm-generic/bitops/lock.h | 91 + include/asm-generic/bitops/non-atomic.h | 109 + include/asm-generic/bitops/sched.h | 32 + include/asm-generic/bitsperlong.h | 26 + include/asm-generic/bug.h | 254 + include/asm-generic/bugs.h | 11 + include/asm-generic/cache.h | 13 + include/asm-generic/cacheflush.h | 35 + include/asm-generic/checksum.h | 88 + include/asm-generic/cmpxchg-local.h | 68 + include/asm-generic/cmpxchg.h | 109 + include/asm-generic/compat.h | 3 + include/asm-generic/current.h | 10 + include/asm-generic/delay.h | 45 + include/asm-generic/device.h | 15 + include/asm-generic/div64.h | 249 + include/asm-generic/dma-contiguous.h | 10 + include/asm-generic/dma-mapping.h | 19 + include/asm-generic/dma.h | 16 + include/asm-generic/early_ioremap.h | 53 + include/asm-generic/emergency-restart.h | 10 + include/asm-generic/error-injection.h | 35 + include/asm-generic/exec.h | 19 + include/asm-generic/export.h | 93 + include/asm-generic/extable.h | 27 + include/asm-generic/fb.h | 13 + include/asm-generic/fixmap.h | 103 + include/asm-generic/ftrace.h | 16 + include/asm-generic/futex.h | 151 + include/asm-generic/getorder.h | 52 + include/asm-generic/gpio.h | 172 + include/asm-generic/hardirq.h | 22 + include/asm-generic/hugetlb.h | 43 + include/asm-generic/hw_irq.h | 9 + include/asm-generic/ide_iops.h | 39 + include/asm-generic/int-ll64.h | 47 + include/asm-generic/io.h | 1140 +++ include/asm-generic/ioctl.h | 18 + include/asm-generic/iomap.h | 94 + include/asm-generic/irq.h | 19 + include/asm-generic/irq_regs.h | 37 + include/asm-generic/irq_work.h | 11 + include/asm-generic/irqflags.h | 67 + include/asm-generic/kdebug.h | 10 + include/asm-generic/kmap_types.h | 11 + include/asm-generic/kprobes.h | 26 + include/asm-generic/kvm_para.h | 32 + include/asm-generic/linkage.h | 8 + include/asm-generic/local.h | 56 + include/asm-generic/local64.h | 97 + include/asm-generic/mcs_spinlock.h | 13 + include/asm-generic/memory_model.h | 86 + include/asm-generic/mm-arch-hooks.h | 16 + include/asm-generic/mm_hooks.h | 37 + include/asm-generic/mmu.h | 20 + include/asm-generic/mmu_context.h | 46 + include/asm-generic/module.h | 49 + include/asm-generic/msi.h | 33 + include/asm-generic/page.h | 101 + include/asm-generic/param.h | 11 + include/asm-generic/parport.h | 24 + include/asm-generic/pci.h | 17 + include/asm-generic/pci_iomap.h | 55 + include/asm-generic/percpu.h | 448 + include/asm-generic/pgalloc.h | 13 + include/asm-generic/pgtable-nop4d-hack.h | 63 + include/asm-generic/pgtable-nop4d.h | 58 + include/asm-generic/pgtable-nopmd.h | 70 + include/asm-generic/pgtable-nopud.h | 68 + include/asm-generic/pgtable.h | 1155 +++ include/asm-generic/preempt.h | 88 + include/asm-generic/ptrace.h | 74 + include/asm-generic/qrwlock.h | 138 + include/asm-generic/qrwlock_types.h | 34 + include/asm-generic/qspinlock.h | 123 + include/asm-generic/qspinlock_types.h | 112 + include/asm-generic/resource.h | 31 + include/asm-generic/rwsem.h | 140 + include/asm-generic/seccomp.h | 46 + include/asm-generic/sections.h | 147 + include/asm-generic/segment.h | 9 + include/asm-generic/serial.h | 14 + include/asm-generic/set_memory.h | 13 + include/asm-generic/signal.h | 15 + include/asm-generic/simd.h | 15 + include/asm-generic/sizes.h | 2 + include/asm-generic/spinlock.h | 12 + include/asm-generic/statfs.h | 8 + include/asm-generic/string.h | 10 + include/asm-generic/switch_to.h | 30 + include/asm-generic/syscall.h | 157 + include/asm-generic/syscalls.h | 29 + include/asm-generic/termios-base.h | 78 + include/asm-generic/termios.h | 108 + include/asm-generic/timex.h | 23 + include/asm-generic/tlb.h | 317 + include/asm-generic/tlbflush.h | 21 + include/asm-generic/topology.h | 77 + include/asm-generic/trace_clock.h | 17 + include/asm-generic/uaccess.h | 228 + include/asm-generic/unaligned.h | 36 + include/asm-generic/unistd.h | 13 + include/asm-generic/user.h | 8 + include/asm-generic/vga.h | 25 + include/asm-generic/vmlinux.lds.h | 1001 +++ include/asm-generic/vtime.h | 1 + include/asm-generic/word-at-a-time.h | 121 + include/asm-generic/xor.h | 718 ++ include/clocksource/arm_arch_timer.h | 119 + include/clocksource/pxa.h | 17 + include/clocksource/samsung_pwm.h | 43 + include/clocksource/timer-sp804.h | 29 + include/clocksource/timer-ti-dm.h | 394 + include/crypto/acompress.h | 271 + include/crypto/aead.h | 534 ++ include/crypto/aes.h | 40 + include/crypto/akcipher.h | 389 + include/crypto/algapi.h | 428 + include/crypto/authenc.h | 37 + include/crypto/b128ops.h | 80 + include/crypto/blake2s.h | 102 + include/crypto/blowfish.h | 24 + include/crypto/cast5.h | 24 + include/crypto/cast6.h | 25 + include/crypto/cast_common.h | 10 + include/crypto/cbc.h | 146 + include/crypto/chacha20.h | 42 + include/crypto/cryptd.h | 87 + include/crypto/crypto_wq.h | 8 + include/crypto/ctr.h | 20 + include/crypto/des.h | 23 + include/crypto/dh.h | 91 + include/crypto/drbg.h | 285 + include/crypto/ecdh.h | 88 + include/crypto/engine.h | 116 + include/crypto/gcm.h | 8 + include/crypto/gf128mul.h | 252 + include/crypto/ghash.h | 24 + include/crypto/hash.h | 946 ++ include/crypto/hash_info.h | 43 + include/crypto/hmac.h | 8 + include/crypto/if_alg.h | 256 + include/crypto/internal/acompress.h | 84 + include/crypto/internal/aead.h | 191 + include/crypto/internal/akcipher.h | 144 + include/crypto/internal/blake2s.h | 19 + include/crypto/internal/geniv.h | 33 + include/crypto/internal/hash.h | 251 + include/crypto/internal/kpp.h | 64 + include/crypto/internal/rng.h | 45 + include/crypto/internal/rsa.h | 62 + include/crypto/internal/scompress.h | 128 + include/crypto/internal/simd.h | 25 + include/crypto/internal/skcipher.h | 211 + include/crypto/kpp.h | 352 + include/crypto/mcryptd.h | 114 + include/crypto/md5.h | 25 + include/crypto/morus1280_glue.h | 137 + include/crypto/morus640_glue.h | 137 + include/crypto/morus_common.h | 23 + include/crypto/null.h | 15 + include/crypto/padlock.h | 29 + include/crypto/pcrypt.h | 51 + include/crypto/pkcs7.h | 47 + include/crypto/poly1305.h | 40 + include/crypto/public_key.h | 74 + include/crypto/rng.h | 202 + include/crypto/scatterwalk.h | 120 + include/crypto/serpent.h | 28 + include/crypto/sha.h | 115 + include/crypto/sha1_base.h | 106 + include/crypto/sha256_base.h | 128 + include/crypto/sha3.h | 34 + include/crypto/sha512_base.h | 131 + include/crypto/skcipher.h | 615 ++ include/crypto/sm3.h | 40 + include/crypto/sm3_base.h | 117 + include/crypto/sm4.h | 31 + include/crypto/twofish.h | 25 + include/crypto/xts.h | 60 + include/drm/amd_asic_type.h | 55 + include/drm/ati_pcigart.h | 31 + include/drm/bridge/analogix_dp.h | 60 + include/drm/bridge/dw_hdmi.h | 175 + include/drm/bridge/dw_mipi_dsi.h | 44 + include/drm/bridge/mhl.h | 380 + include/drm/drmP.h | 116 + include/drm/drm_agpsupport.h | 149 + include/drm/drm_atomic.h | 916 ++ include/drm/drm_atomic_helper.h | 265 + include/drm/drm_audio_component.h | 118 + include/drm/drm_auth.h | 106 + include/drm/drm_blend.h | 55 + include/drm/drm_bridge.h | 327 + include/drm/drm_cache.h | 73 + include/drm/drm_client.h | 140 + include/drm/drm_color_mgmt.h | 71 + include/drm/drm_connector.h | 1344 +++ include/drm/drm_crtc.h | 1141 +++ include/drm/drm_crtc_helper.h | 82 + include/drm/drm_debugfs.h | 101 + include/drm/drm_debugfs_crc.h | 74 + include/drm/drm_device.h | 232 + include/drm/drm_displayid.h | 103 + include/drm/drm_dp_dual_mode_helper.h | 119 + include/drm/drm_dp_helper.h | 1317 +++ include/drm/drm_dp_mst_helper.h | 637 ++ include/drm/drm_drv.h | 685 ++ include/drm/drm_edid.h | 492 ++ include/drm/drm_encoder.h | 268 + include/drm/drm_encoder_slave.h | 183 + include/drm/drm_fb_cma_helper.h | 41 + include/drm/drm_fb_helper.h | 618 ++ include/drm/drm_file.h | 387 + include/drm/drm_fixed.h | 211 + include/drm/drm_flip_work.h | 92 + include/drm/drm_fourcc.h | 77 + include/drm/drm_framebuffer.h | 316 + include/drm/drm_gem.h | 296 + include/drm/drm_gem_cma_helper.h | 106 + include/drm/drm_gem_framebuffer_helper.h | 40 + include/drm/drm_global.h | 53 + include/drm/drm_hashtab.h | 79 + include/drm/drm_hdcp.h | 41 + include/drm/drm_ioctl.h | 183 + include/drm/drm_irq.h | 32 + include/drm/drm_lease.h | 46 + include/drm/drm_legacy.h | 207 + include/drm/drm_mipi_dsi.h | 319 + include/drm/drm_mm.h | 549 ++ include/drm/drm_mode_config.h | 859 ++ include/drm/drm_mode_object.h | 135 + include/drm/drm_modes.h | 544 ++ include/drm/drm_modeset_helper.h | 40 + include/drm/drm_modeset_helper_vtables.h | 1252 +++ include/drm/drm_modeset_lock.h | 133 + include/drm/drm_of.h | 130 + include/drm/drm_os_linux.h | 55 + include/drm/drm_panel.h | 207 + include/drm/drm_pci.h | 61 + include/drm/drm_pciids.h | 814 ++ include/drm/drm_plane.h | 783 ++ include/drm/drm_plane_helper.h | 79 + include/drm/drm_prime.h | 107 + include/drm/drm_print.h | 426 + include/drm/drm_property.h | 300 + include/drm/drm_rect.h | 200 + include/drm/drm_scdc_helper.h | 136 + include/drm/drm_simple_kms_helper.h | 185 + include/drm/drm_syncobj.h | 151 + include/drm/drm_sysfs.h | 13 + include/drm/drm_utils.h | 15 + include/drm/drm_vblank.h | 231 + include/drm/drm_vma_manager.h | 234 + include/drm/drm_writeback.h | 136 + include/drm/gma_drm.h | 25 + include/drm/gpu_scheduler.h | 311 + include/drm/i2c/ch7006.h | 86 + include/drm/i2c/sil164.h | 63 + include/drm/i2c/tda998x.h | 40 + include/drm/i915_component.h | 49 + include/drm/i915_drm.h | 103 + include/drm/i915_pciids.h | 461 + include/drm/intel-gtt.h | 37 + include/drm/intel_lpe_audio.h | 51 + include/drm/spsc_queue.h | 122 + include/drm/tinydrm/mipi-dbi.h | 109 + include/drm/tinydrm/tinydrm-helpers.h | 84 + include/drm/tinydrm/tinydrm.h | 122 + include/drm/ttm/ttm_bo_api.h | 767 ++ include/drm/ttm/ttm_bo_driver.h | 872 ++ include/drm/ttm/ttm_debug.h | 31 + include/drm/ttm/ttm_execbuf_util.h | 120 + include/drm/ttm/ttm_lock.h | 248 + include/drm/ttm/ttm_memory.h | 98 + include/drm/ttm/ttm_module.h | 40 + include/drm/ttm/ttm_object.h | 354 + include/drm/ttm/ttm_page_alloc.h | 122 + include/drm/ttm/ttm_placement.h | 108 + include/drm/ttm/ttm_set_memory.h | 150 + include/drm/ttm/ttm_tt.h | 272 + include/dt-bindings/arm/ux500_pm_domains.h | 15 + include/dt-bindings/bus/ti-sysc.h | 24 + include/dt-bindings/clk/ti-dra7-atl.h | 40 + include/dt-bindings/clock/actions,s700-cmu.h | 118 + include/dt-bindings/clock/actions,s900-cmu.h | 129 + include/dt-bindings/clock/alphascale,asm9260.h | 97 + include/dt-bindings/clock/am3.h | 108 + include/dt-bindings/clock/am4.h | 113 + include/dt-bindings/clock/aspeed-clock.h | 54 + include/dt-bindings/clock/at91.h | 23 + include/dt-bindings/clock/ath79-clk.h | 19 + include/dt-bindings/clock/axg-aoclkc.h | 26 + include/dt-bindings/clock/axg-audio-clkc.h | 94 + include/dt-bindings/clock/axg-clkc.h | 76 + include/dt-bindings/clock/axis,artpec6-clkctrl.h | 38 + include/dt-bindings/clock/bcm-cygnus.h | 74 + include/dt-bindings/clock/bcm-ns2.h | 72 + include/dt-bindings/clock/bcm-nsp.h | 51 + include/dt-bindings/clock/bcm-sr.h | 111 + include/dt-bindings/clock/bcm21664.h | 62 + include/dt-bindings/clock/bcm281xx.h | 77 + include/dt-bindings/clock/bcm2835-aux.h | 17 + include/dt-bindings/clock/bcm2835.h | 68 + include/dt-bindings/clock/berlin2.h | 46 + include/dt-bindings/clock/berlin2q.h | 33 + include/dt-bindings/clock/boston-clock.h | 14 + include/dt-bindings/clock/clps711x-clock.h | 27 + include/dt-bindings/clock/cortina,gemini-clock.h | 30 + include/dt-bindings/clock/dm814.h | 45 + include/dt-bindings/clock/dm816.h | 53 + include/dt-bindings/clock/dra7.h | 173 + include/dt-bindings/clock/efm32-cmu.h | 43 + include/dt-bindings/clock/exynos-audss-clk.h | 27 + include/dt-bindings/clock/exynos3250.h | 356 + include/dt-bindings/clock/exynos4.h | 310 + include/dt-bindings/clock/exynos5250.h | 182 + include/dt-bindings/clock/exynos5260-clk.h | 469 + include/dt-bindings/clock/exynos5410.h | 68 + include/dt-bindings/clock/exynos5420.h | 258 + include/dt-bindings/clock/exynos5433.h | 1412 +++ include/dt-bindings/clock/exynos7-clk.h | 207 + include/dt-bindings/clock/gxbb-aoclkc.h | 67 + include/dt-bindings/clock/gxbb-clkc.h | 132 + include/dt-bindings/clock/hi3516cv300-clock.h | 48 + include/dt-bindings/clock/hi3519-clock.h | 40 + include/dt-bindings/clock/hi3620-clock.h | 157 + include/dt-bindings/clock/hi3660-clock.h | 218 + include/dt-bindings/clock/hi6220-clock.h | 181 + include/dt-bindings/clock/hip04-clock.h | 35 + include/dt-bindings/clock/histb-clock.h | 82 + include/dt-bindings/clock/hix5hd2-clock.h | 85 + include/dt-bindings/clock/imx1-clock.h | 40 + include/dt-bindings/clock/imx21-clock.h | 80 + include/dt-bindings/clock/imx27-clock.h | 108 + include/dt-bindings/clock/imx5-clock.h | 219 + include/dt-bindings/clock/imx6qdl-clock.h | 278 + include/dt-bindings/clock/imx6sl-clock.h | 180 + include/dt-bindings/clock/imx6sll-clock.h | 209 + include/dt-bindings/clock/imx6sx-clock.h | 284 + include/dt-bindings/clock/imx6ul-clock.h | 265 + include/dt-bindings/clock/imx7d-clock.h | 459 + include/dt-bindings/clock/jz4740-cgu.h | 38 + include/dt-bindings/clock/jz4770-cgu.h | 58 + include/dt-bindings/clock/jz4780-cgu.h | 89 + include/dt-bindings/clock/lpc18xx-ccu.h | 74 + include/dt-bindings/clock/lpc18xx-cgu.h | 41 + include/dt-bindings/clock/lpc32xx-clock.h | 58 + include/dt-bindings/clock/lsi,axm5516-clks.h | 36 + include/dt-bindings/clock/marvell,mmp2.h | 76 + include/dt-bindings/clock/marvell,pxa168.h | 61 + include/dt-bindings/clock/marvell,pxa1928.h | 58 + include/dt-bindings/clock/marvell,pxa910.h | 59 + include/dt-bindings/clock/maxim,max77620.h | 21 + include/dt-bindings/clock/maxim,max77686.h | 23 + include/dt-bindings/clock/maxim,max77802.h | 22 + include/dt-bindings/clock/maxim,max9485.h | 18 + include/dt-bindings/clock/meson8b-clkc.h | 107 + include/dt-bindings/clock/microchip,pic32-clock.h | 42 + include/dt-bindings/clock/mpc512x-clock.h | 77 + include/dt-bindings/clock/mt2701-clk.h | 492 ++ include/dt-bindings/clock/mt2712-clk.h | 435 + include/dt-bindings/clock/mt6797-clk.h | 281 + include/dt-bindings/clock/mt7622-clk.h | 290 + include/dt-bindings/clock/mt8135-clk.h | 194 + include/dt-bindings/clock/mt8173-clk.h | 330 + include/dt-bindings/clock/nuvoton,npcm7xx-clock.h | 44 + include/dt-bindings/clock/omap4.h | 146 + include/dt-bindings/clock/omap5.h | 118 + include/dt-bindings/clock/oxsemi,ox810se.h | 30 + include/dt-bindings/clock/oxsemi,ox820.h | 40 + include/dt-bindings/clock/pistachio-clk.h | 183 + include/dt-bindings/clock/px30-cru.h | 389 + include/dt-bindings/clock/pxa-clock.h | 78 + include/dt-bindings/clock/qcom,dispcc-sdm845.h | 45 + include/dt-bindings/clock/qcom,gcc-apq8084.h | 357 + include/dt-bindings/clock/qcom,gcc-ipq4019.h | 169 + include/dt-bindings/clock/qcom,gcc-ipq806x.h | 295 + include/dt-bindings/clock/qcom,gcc-ipq8074.h | 374 + include/dt-bindings/clock/qcom,gcc-mdm9615.h | 329 + include/dt-bindings/clock/qcom,gcc-msm8660.h | 276 + include/dt-bindings/clock/qcom,gcc-msm8916.h | 187 + include/dt-bindings/clock/qcom,gcc-msm8960.h | 323 + include/dt-bindings/clock/qcom,gcc-msm8974.h | 327 + include/dt-bindings/clock/qcom,gcc-msm8994.h | 138 + include/dt-bindings/clock/qcom,gcc-msm8996.h | 358 + include/dt-bindings/clock/qcom,gcc-msm8998.h | 208 + include/dt-bindings/clock/qcom,gcc-sdm845.h | 241 + include/dt-bindings/clock/qcom,lcc-ipq806x.h | 30 + include/dt-bindings/clock/qcom,lcc-mdm9615.h | 52 + include/dt-bindings/clock/qcom,lcc-msm8960.h | 50 + include/dt-bindings/clock/qcom,mmcc-apq8084.h | 193 + include/dt-bindings/clock/qcom,mmcc-msm8960.h | 145 + include/dt-bindings/clock/qcom,mmcc-msm8974.h | 169 + include/dt-bindings/clock/qcom,mmcc-msm8996.h | 303 + include/dt-bindings/clock/qcom,rpmcc.h | 127 + include/dt-bindings/clock/qcom,rpmh.h | 22 + include/dt-bindings/clock/qcom,videocc-sdm845.h | 35 + include/dt-bindings/clock/r7s72100-clock.h | 115 + include/dt-bindings/clock/r8a73a4-clock.h | 64 + include/dt-bindings/clock/r8a7740-clock.h | 78 + include/dt-bindings/clock/r8a7743-cpg-mssr.h | 43 + include/dt-bindings/clock/r8a7745-cpg-mssr.h | 44 + include/dt-bindings/clock/r8a77470-cpg-mssr.h | 36 + include/dt-bindings/clock/r8a7778-clock.h | 71 + include/dt-bindings/clock/r8a7779-clock.h | 64 + include/dt-bindings/clock/r8a7790-clock.h | 162 + include/dt-bindings/clock/r8a7790-cpg-mssr.h | 52 + include/dt-bindings/clock/r8a7791-clock.h | 165 + include/dt-bindings/clock/r8a7791-cpg-mssr.h | 48 + include/dt-bindings/clock/r8a7792-clock.h | 102 + include/dt-bindings/clock/r8a7792-cpg-mssr.h | 43 + include/dt-bindings/clock/r8a7793-clock.h | 167 + include/dt-bindings/clock/r8a7793-cpg-mssr.h | 48 + include/dt-bindings/clock/r8a7794-clock.h | 141 + include/dt-bindings/clock/r8a7794-cpg-mssr.h | 47 + include/dt-bindings/clock/r8a7795-cpg-mssr.h | 70 + include/dt-bindings/clock/r8a7796-cpg-mssr.h | 69 + include/dt-bindings/clock/r8a77965-cpg-mssr.h | 62 + include/dt-bindings/clock/r8a77970-cpg-mssr.h | 48 + include/dt-bindings/clock/r8a77980-cpg-mssr.h | 51 + include/dt-bindings/clock/r8a77990-cpg-mssr.h | 62 + include/dt-bindings/clock/r8a77995-cpg-mssr.h | 57 + include/dt-bindings/clock/r9a06g032-sysctrl.h | 148 + include/dt-bindings/clock/renesas-cpg-mssr.h | 15 + include/dt-bindings/clock/rk3036-cru.h | 195 + include/dt-bindings/clock/rk3066a-cru.h | 40 + include/dt-bindings/clock/rk3128-cru.h | 282 + include/dt-bindings/clock/rk3188-cru-common.h | 269 + include/dt-bindings/clock/rk3188-cru.h | 56 + include/dt-bindings/clock/rk3228-cru.h | 295 + include/dt-bindings/clock/rk3288-cru.h | 389 + include/dt-bindings/clock/rk3328-cru.h | 400 + include/dt-bindings/clock/rk3368-cru.h | 390 + include/dt-bindings/clock/rk3399-cru.h | 760 ++ include/dt-bindings/clock/rk3399-ddr.h | 56 + include/dt-bindings/clock/rockchip,rk808.h | 12 + include/dt-bindings/clock/rv1108-cru.h | 362 + include/dt-bindings/clock/s3c2410.h | 62 + include/dt-bindings/clock/s3c2412.h | 73 + include/dt-bindings/clock/s3c2443.h | 94 + include/dt-bindings/clock/s5pv210-audss.h | 34 + include/dt-bindings/clock/s5pv210.h | 239 + include/dt-bindings/clock/samsung,s2mps11.h | 23 + include/dt-bindings/clock/samsung,s3c64xx-clock.h | 178 + include/dt-bindings/clock/sh73a0-clock.h | 86 + include/dt-bindings/clock/sprd,sc9860-clk.h | 423 + include/dt-bindings/clock/ste-ab8500.h | 12 + include/dt-bindings/clock/stih407-clks.h | 91 + include/dt-bindings/clock/stih410-clks.h | 26 + include/dt-bindings/clock/stih416-clks.h | 17 + include/dt-bindings/clock/stih418-clks.h | 35 + include/dt-bindings/clock/stm32fx-clock.h | 60 + include/dt-bindings/clock/stm32h7-clks.h | 165 + include/dt-bindings/clock/stm32mp1-clks.h | 254 + include/dt-bindings/clock/stratix10-clock.h | 84 + include/dt-bindings/clock/sun4i-a10-ccu.h | 202 + include/dt-bindings/clock/sun4i-a10-pll2.h | 53 + include/dt-bindings/clock/sun50i-a64-ccu.h | 136 + include/dt-bindings/clock/sun50i-h6-ccu.h | 125 + include/dt-bindings/clock/sun50i-h6-r-ccu.h | 24 + include/dt-bindings/clock/sun5i-ccu.h | 106 + include/dt-bindings/clock/sun6i-a31-ccu.h | 191 + include/dt-bindings/clock/sun7i-a20-ccu.h | 53 + include/dt-bindings/clock/sun8i-a23-a33-ccu.h | 127 + include/dt-bindings/clock/sun8i-a83t-ccu.h | 140 + include/dt-bindings/clock/sun8i-de2.h | 18 + include/dt-bindings/clock/sun8i-h3-ccu.h | 152 + include/dt-bindings/clock/sun8i-r-ccu.h | 59 + include/dt-bindings/clock/sun8i-r40-ccu.h | 191 + include/dt-bindings/clock/sun8i-tcon-top.h | 11 + include/dt-bindings/clock/sun8i-v3s-ccu.h | 107 + include/dt-bindings/clock/sun9i-a80-ccu.h | 162 + include/dt-bindings/clock/sun9i-a80-de.h | 80 + include/dt-bindings/clock/sun9i-a80-usb.h | 59 + include/dt-bindings/clock/tegra114-car.h | 344 + include/dt-bindings/clock/tegra124-car-common.h | 346 + include/dt-bindings/clock/tegra124-car.h | 20 + include/dt-bindings/clock/tegra186-clock.h | 941 ++ include/dt-bindings/clock/tegra194-clock.h | 321 + include/dt-bindings/clock/tegra20-car.h | 159 + include/dt-bindings/clock/tegra210-car.h | 412 + include/dt-bindings/clock/tegra30-car.h | 274 + include/dt-bindings/clock/vf610-clock.h | 204 + include/dt-bindings/clock/zx296702-clock.h | 183 + include/dt-bindings/clock/zx296718-clock.h | 167 + include/dt-bindings/display/tda998x.h | 8 + include/dt-bindings/dma/at91.h | 52 + include/dt-bindings/dma/axi-dmac.h | 48 + include/dt-bindings/dma/jz4780-dma.h | 49 + include/dt-bindings/dma/nbpfaxi.h | 20 + include/dt-bindings/dma/sun4i-a10.h | 56 + include/dt-bindings/gce/mt8173-gce.h | 44 + include/dt-bindings/gpio/aspeed-gpio.h | 49 + include/dt-bindings/gpio/gpio.h | 36 + include/dt-bindings/gpio/meson-axg-gpio.h | 116 + include/dt-bindings/gpio/meson-gxbb-gpio.h | 154 + include/dt-bindings/gpio/meson-gxl-gpio.h | 131 + include/dt-bindings/gpio/meson8-gpio.h | 157 + include/dt-bindings/gpio/meson8b-gpio.h | 127 + include/dt-bindings/gpio/tegra-gpio.h | 52 + include/dt-bindings/gpio/tegra186-gpio.h | 57 + include/dt-bindings/gpio/tegra194-gpio.h | 61 + include/dt-bindings/gpio/uniphier-gpio.h | 18 + include/dt-bindings/i2c/i2c.h | 18 + include/dt-bindings/iio/adc/at91-sama5d2_adc.h | 16 + include/dt-bindings/iio/adc/fsl-imx25-gcq.h | 19 + include/dt-bindings/iio/adi,ad5592r.h | 17 + include/dt-bindings/iio/qcom,spmi-vadc.h | 119 + include/dt-bindings/input/gpio-keys.h | 13 + include/dt-bindings/input/input.h | 18 + include/dt-bindings/input/linux-event-codes.h | 1 + include/dt-bindings/input/ti-drv260x.h | 36 + include/dt-bindings/interrupt-controller/arm-gic.h | 23 + include/dt-bindings/interrupt-controller/irq-st.h | 30 + include/dt-bindings/interrupt-controller/irq.h | 20 + .../dt-bindings/interrupt-controller/mips-gic.h | 10 + .../dt-bindings/interrupt-controller/mvebu-icu.h | 16 + include/dt-bindings/leds/common.h | 22 + include/dt-bindings/leds/leds-netxbig.h | 18 + include/dt-bindings/leds/leds-ns2.h | 9 + include/dt-bindings/leds/leds-pca9532.h | 18 + include/dt-bindings/leds/leds-pca955x.h | 16 + include/dt-bindings/mailbox/tegra186-hsp.h | 25 + include/dt-bindings/media/c8sectpfe.h | 13 + include/dt-bindings/media/omap3-isp.h | 22 + include/dt-bindings/media/tda1997x.h | 74 + include/dt-bindings/media/tvp5150.h | 35 + include/dt-bindings/media/xilinx-vip.h | 39 + include/dt-bindings/memory/mt2701-larb-port.h | 85 + include/dt-bindings/memory/mt2712-larb-port.h | 95 + include/dt-bindings/memory/mt8173-larb-port.h | 107 + include/dt-bindings/memory/tegra114-mc.h | 43 + include/dt-bindings/memory/tegra124-mc.h | 57 + include/dt-bindings/memory/tegra186-mc.h | 111 + include/dt-bindings/memory/tegra20-mc.h | 21 + include/dt-bindings/memory/tegra210-mc.h | 68 + include/dt-bindings/memory/tegra30-mc.h | 44 + include/dt-bindings/mfd/arizona.h | 118 + include/dt-bindings/mfd/as3722.h | 53 + include/dt-bindings/mfd/atmel-flexcom.h | 26 + include/dt-bindings/mfd/dbx500-prcmu.h | 84 + include/dt-bindings/mfd/max77620.h | 40 + include/dt-bindings/mfd/palmas.h | 19 + include/dt-bindings/mfd/qcom-rpm.h | 183 + include/dt-bindings/mfd/st-lpc.h | 17 + include/dt-bindings/mfd/stm32f4-rcc.h | 109 + include/dt-bindings/mfd/stm32f7-rcc.h | 114 + include/dt-bindings/mfd/stm32h7-rcc.h | 136 + include/dt-bindings/mips/lantiq_rcu_gphy.h | 15 + include/dt-bindings/mux/mux.h | 17 + include/dt-bindings/net/microchip-lan78xx.h | 21 + include/dt-bindings/net/mscc-phy-vsc8531.h | 29 + include/dt-bindings/net/ti-dp83867.h | 59 + include/dt-bindings/phy/phy-pistachio-usb.h | 16 + include/dt-bindings/phy/phy-qcom-qusb2.h | 37 + include/dt-bindings/phy/phy.h | 20 + include/dt-bindings/pinctrl/am33xx.h | 44 + include/dt-bindings/pinctrl/am43xx.h | 55 + include/dt-bindings/pinctrl/at91.h | 46 + include/dt-bindings/pinctrl/bcm2835.h | 32 + .../dt-bindings/pinctrl/brcm,pinctrl-stingray.h | 68 + include/dt-bindings/pinctrl/dm814x.h | 49 + include/dt-bindings/pinctrl/dra.h | 80 + include/dt-bindings/pinctrl/hisi.h | 74 + include/dt-bindings/pinctrl/keystone.h | 39 + include/dt-bindings/pinctrl/mt6397-pinfunc.h | 257 + include/dt-bindings/pinctrl/mt65xx.h | 40 + include/dt-bindings/pinctrl/mt7623-pinfunc.h | 651 ++ include/dt-bindings/pinctrl/nomadik.h | 36 + include/dt-bindings/pinctrl/omap.h | 91 + include/dt-bindings/pinctrl/pinctrl-tegra-xusb.h | 8 + include/dt-bindings/pinctrl/pinctrl-tegra.h | 45 + include/dt-bindings/pinctrl/qcom,pmic-gpio.h | 164 + include/dt-bindings/pinctrl/qcom,pmic-mpp.h | 106 + include/dt-bindings/pinctrl/r7s72100-pinctrl.h | 17 + include/dt-bindings/pinctrl/rockchip.h | 67 + include/dt-bindings/pinctrl/samsung.h | 77 + include/dt-bindings/pinctrl/stm32-pinfunc.h | 36 + include/dt-bindings/pinctrl/sun4i-a10.h | 62 + include/dt-bindings/power/imx7-power.h | 16 + include/dt-bindings/power/mt2701-power.h | 27 + include/dt-bindings/power/mt2712-power.h | 29 + include/dt-bindings/power/mt6797-power.h | 30 + include/dt-bindings/power/mt7622-power.h | 22 + include/dt-bindings/power/mt7623a-power.h | 10 + include/dt-bindings/power/mt8173-power.h | 16 + include/dt-bindings/power/owl-s500-powergate.h | 19 + include/dt-bindings/power/owl-s700-powergate.h | 19 + include/dt-bindings/power/px30-power.h | 27 + include/dt-bindings/power/r8a7743-sysc.h | 25 + include/dt-bindings/power/r8a7745-sysc.h | 25 + include/dt-bindings/power/r8a77470-sysc.h | 22 + include/dt-bindings/power/r8a7779-sysc.h | 27 + include/dt-bindings/power/r8a7790-sysc.h | 34 + include/dt-bindings/power/r8a7791-sysc.h | 26 + include/dt-bindings/power/r8a7792-sysc.h | 26 + include/dt-bindings/power/r8a7793-sysc.h | 28 + include/dt-bindings/power/r8a7794-sysc.h | 26 + include/dt-bindings/power/r8a7795-sysc.h | 42 + include/dt-bindings/power/r8a7796-sysc.h | 36 + include/dt-bindings/power/r8a77965-sysc.h | 30 + include/dt-bindings/power/r8a77970-sysc.h | 32 + include/dt-bindings/power/r8a77980-sysc.h | 43 + include/dt-bindings/power/r8a77990-sysc.h | 26 + include/dt-bindings/power/r8a77995-sysc.h | 23 + include/dt-bindings/power/raspberrypi-power.h | 41 + include/dt-bindings/power/rk3036-power.h | 13 + include/dt-bindings/power/rk3128-power.h | 14 + include/dt-bindings/power/rk3228-power.h | 21 + include/dt-bindings/power/rk3288-power.h | 32 + include/dt-bindings/power/rk3328-power.h | 19 + include/dt-bindings/power/rk3366-power.h | 24 + include/dt-bindings/power/rk3368-power.h | 29 + include/dt-bindings/power/rk3399-power.h | 54 + include/dt-bindings/power/tegra186-powergate.h | 39 + include/dt-bindings/power/tegra194-powergate.h | 35 + include/dt-bindings/pwm/pwm.h | 15 + include/dt-bindings/regulator/maxim,max77802.h | 15 + .../dt-bindings/regulator/qcom,rpmh-regulator.h | 36 + include/dt-bindings/reset/altr,rst-mgr-a10.h | 110 + include/dt-bindings/reset/altr,rst-mgr-a10sr.h | 33 + include/dt-bindings/reset/altr,rst-mgr-s10.h | 108 + include/dt-bindings/reset/altr,rst-mgr.h | 90 + .../reset/amlogic,meson-axg-audio-arb.h | 17 + .../dt-bindings/reset/amlogic,meson-axg-reset.h | 124 + .../dt-bindings/reset/amlogic,meson-gxbb-reset.h | 210 + .../dt-bindings/reset/amlogic,meson8b-clkc-reset.h | 27 + include/dt-bindings/reset/amlogic,meson8b-reset.h | 175 + include/dt-bindings/reset/axg-aoclkc.h | 20 + include/dt-bindings/reset/cortina,gemini-reset.h | 37 + include/dt-bindings/reset/gxbb-aoclkc.h | 66 + include/dt-bindings/reset/hisi,hi6220-resets.h | 76 + include/dt-bindings/reset/imx7-reset.h | 62 + include/dt-bindings/reset/mt2701-resets.h | 93 + include/dt-bindings/reset/mt7622-reset.h | 94 + include/dt-bindings/reset/mt8135-resets.h | 64 + include/dt-bindings/reset/mt8173-resets.h | 63 + include/dt-bindings/reset/oxsemi,ox810se.h | 53 + include/dt-bindings/reset/oxsemi,ox820.h | 53 + include/dt-bindings/reset/pistachio-resets.h | 37 + include/dt-bindings/reset/qcom,gcc-apq8084.h | 109 + include/dt-bindings/reset/qcom,gcc-ipq806x.h | 175 + include/dt-bindings/reset/qcom,gcc-mdm9615.h | 136 + include/dt-bindings/reset/qcom,gcc-msm8660.h | 134 + include/dt-bindings/reset/qcom,gcc-msm8916.h | 108 + include/dt-bindings/reset/qcom,gcc-msm8960.h | 134 + include/dt-bindings/reset/qcom,gcc-msm8974.h | 96 + include/dt-bindings/reset/qcom,mmcc-apq8084.h | 64 + include/dt-bindings/reset/qcom,mmcc-msm8960.h | 101 + include/dt-bindings/reset/qcom,mmcc-msm8974.h | 62 + include/dt-bindings/reset/qcom,sdm845-aoss.h | 17 + include/dt-bindings/reset/snps,hsdk-reset.h | 17 + include/dt-bindings/reset/stih407-resets.h | 66 + include/dt-bindings/reset/stih415-resets.h | 28 + include/dt-bindings/reset/stih416-resets.h | 52 + include/dt-bindings/reset/stm32mp1-resets.h | 108 + include/dt-bindings/reset/sun4i-a10-ccu.h | 69 + include/dt-bindings/reset/sun50i-a64-ccu.h | 98 + include/dt-bindings/reset/sun50i-h6-ccu.h | 73 + include/dt-bindings/reset/sun50i-h6-r-ccu.h | 17 + include/dt-bindings/reset/sun5i-ccu.h | 32 + include/dt-bindings/reset/sun6i-a31-ccu.h | 106 + include/dt-bindings/reset/sun8i-a23-a33-ccu.h | 87 + include/dt-bindings/reset/sun8i-a83t-ccu.h | 98 + include/dt-bindings/reset/sun8i-de2.h | 14 + include/dt-bindings/reset/sun8i-h3-ccu.h | 106 + include/dt-bindings/reset/sun8i-r-ccu.h | 53 + include/dt-bindings/reset/sun8i-r40-ccu.h | 130 + include/dt-bindings/reset/sun8i-v3s-ccu.h | 78 + include/dt-bindings/reset/sun9i-a80-ccu.h | 102 + include/dt-bindings/reset/sun9i-a80-de.h | 58 + include/dt-bindings/reset/sun9i-a80-usb.h | 56 + include/dt-bindings/reset/tegra124-car.h | 13 + include/dt-bindings/reset/tegra186-reset.h | 217 + include/dt-bindings/reset/tegra194-reset.h | 152 + include/dt-bindings/reset/tegra210-car.h | 14 + include/dt-bindings/reset/ti-syscon.h | 38 + include/dt-bindings/soc/qcom,apr.h | 28 + include/dt-bindings/soc/qcom,gsbi.h | 26 + include/dt-bindings/soc/qcom,rpmh-rsc.h | 14 + include/dt-bindings/soc/rockchip,boot-mode.h | 16 + include/dt-bindings/soc/zte,pm_domains.h | 24 + include/dt-bindings/sound/apq8016-lpass.h | 10 + include/dt-bindings/sound/audio-jack-events.h | 10 + include/dt-bindings/sound/cs35l32.h | 27 + include/dt-bindings/sound/cs42l42.h | 73 + include/dt-bindings/sound/fsl-imx-audmux.h | 64 + include/dt-bindings/sound/qcom,q6afe.h | 111 + include/dt-bindings/sound/qcom,q6asm.h | 22 + include/dt-bindings/sound/rt5640.h | 25 + include/dt-bindings/sound/rt5651.h | 15 + include/dt-bindings/sound/samsung-i2s.h | 9 + include/dt-bindings/sound/tas2552.h | 19 + include/dt-bindings/sound/tlv320aic31xx-micbias.h | 9 + include/dt-bindings/spmi/spmi.h | 18 + include/dt-bindings/thermal/lm90.h | 13 + include/dt-bindings/thermal/tegra124-soctherm.h | 20 + .../dt-bindings/thermal/tegra186-bpmp-thermal.h | 14 + include/dt-bindings/thermal/thermal.h | 17 + include/dt-bindings/thermal/thermal_exynos.h | 28 + include/dt-bindings/usb/pd.h | 62 + include/keys/asymmetric-parser.h | 37 + include/keys/asymmetric-subtype.h | 55 + include/keys/asymmetric-type.h | 88 + include/keys/big_key-type.h | 26 + include/keys/ceph-type.h | 9 + include/keys/dns_resolver-type.h | 23 + include/keys/encrypted-type.h | 38 + include/keys/keyring-type.h | 18 + include/keys/request_key_auth-type.h | 36 + include/keys/rxrpc-type.h | 153 + include/keys/system_keyring.h | 65 + include/keys/trusted-type.h | 48 + include/keys/user-type.h | 62 + include/kvm/arm_arch_timer.h | 98 + include/kvm/arm_pmu.h | 121 + include/kvm/arm_psci.h | 63 + include/kvm/arm_vgic.h | 411 + include/kvm/iodev.h | 76 + include/linux/8250_pci.h | 38 + include/linux/a.out.h | 18 + include/linux/acct.h | 103 + include/linux/acpi.h | 1323 +++ include/linux/acpi_dma.h | 121 + include/linux/acpi_iort.h | 62 + include/linux/acpi_pmtmr.h | 39 + include/linux/adb.h | 67 + include/linux/adfs_fs.h | 24 + include/linux/aer.h | 74 + include/linux/agp_backend.h | 109 + include/linux/agpgart.h | 130 + include/linux/ahci-remap.h | 29 + include/linux/ahci_platform.h | 50 + include/linux/aio.h | 27 + include/linux/alarmtimer.h | 65 + include/linux/altera_jtaguart.h | 17 + include/linux/altera_uart.h | 16 + include/linux/amba/bus.h | 181 + include/linux/amba/clcd-regs.h | 86 + include/linux/amba/clcd.h | 321 + include/linux/amba/kmi.h | 92 + include/linux/amba/mmci.h | 37 + include/linux/amba/pl022.h | 295 + include/linux/amba/pl080.h | 220 + include/linux/amba/pl08x.h | 133 + include/linux/amba/pl093.h | 80 + include/linux/amba/serial.h | 241 + include/linux/amba/sp810.h | 62 + include/linux/amd-iommu.h | 215 + include/linux/amifd.h | 63 + include/linux/amifdreg.h | 82 + include/linux/anon_inodes.h | 21 + include/linux/apm-emulation.h | 62 + include/linux/apm_bios.h | 101 + include/linux/apple-gmux.h | 50 + include/linux/apple_bl.h | 27 + include/linux/arch_topology.h | 35 + include/linux/arm-cci.h | 68 + include/linux/arm-smccc.h | 396 + include/linux/arm_sdei.h | 79 + include/linux/ascii85.h | 38 + include/linux/asn1.h | 69 + include/linux/asn1_ber_bytecode.h | 93 + include/linux/asn1_decoder.h | 24 + include/linux/assoc_array.h | 92 + include/linux/assoc_array_priv.h | 182 + include/linux/async.h | 50 + include/linux/async_tx.h | 208 + include/linux/ata.h | 1156 +++ include/linux/ata_platform.h | 31 + include/linux/atalk.h | 186 + include/linux/ath9k_platform.h | 51 + include/linux/atm.h | 16 + include/linux/atm_suni.h | 12 + include/linux/atm_tcp.h | 22 + include/linux/atmdev.h | 334 + include/linux/atmel-mci.h | 46 + include/linux/atmel-ssc.h | 335 + include/linux/atmel_pdc.h | 38 + include/linux/atmel_tc.h | 270 + include/linux/atomic.h | 1317 +++ include/linux/attribute_container.h | 72 + include/linux/audit.h | 603 ++ include/linux/auto_dev-ioctl.h | 14 + include/linux/auto_fs.h | 15 + include/linux/auxvec.h | 9 + include/linux/average.h | 71 + include/linux/avf/virtchnl.h | 827 ++ include/linux/b1pcmcia.h | 21 + include/linux/backing-dev-defs.h | 308 + include/linux/backing-dev.h | 504 ++ include/linux/backlight.h | 229 + include/linux/badblocks.h | 66 + include/linux/balloon_compaction.h | 229 + include/linux/bcd.h | 23 + include/linux/bch.h | 79 + include/linux/bcm47xx_nvram.h | 50 + include/linux/bcm47xx_sprom.h | 24 + include/linux/bcm47xx_wdt.h | 27 + include/linux/bcm963xx_nvram.h | 113 + include/linux/bcm963xx_tag.h | 103 + include/linux/bcma/bcma.h | 494 ++ include/linux/bcma/bcma_driver_arm_c9.h | 16 + include/linux/bcma/bcma_driver_chipcommon.h | 716 ++ include/linux/bcma/bcma_driver_gmac_cmn.h | 95 + include/linux/bcma/bcma_driver_mips.h | 45 + include/linux/bcma/bcma_driver_pci.h | 264 + include/linux/bcma/bcma_driver_pcie2.h | 159 + include/linux/bcma/bcma_regs.h | 104 + include/linux/bcma/bcma_soc.h | 17 + include/linux/binfmts.h | 155 + include/linux/bio.h | 866 ++ include/linux/bit_spinlock.h | 101 + include/linux/bitfield.h | 153 + include/linux/bitmap.h | 482 + include/linux/bitops.h | 282 + include/linux/bitrev.h | 105 + include/linux/bits.h | 26 + include/linux/blk-cgroup.h | 973 ++ include/linux/blk-mq-pci.h | 11 + include/linux/blk-mq-rdma.h | 10 + include/linux/blk-mq-virtio.h | 11 + include/linux/blk-mq.h | 340 + include/linux/blk_types.h | 461 + include/linux/blkdev.h | 2114 +++++ include/linux/blkpg.h | 22 + include/linux/blktrace_api.h | 141 + include/linux/blockgroup_lock.h | 41 + include/linux/bma150.h | 58 + include/linux/bootmem.h | 404 + include/linux/bottom_half.h | 35 + include/linux/bpf-cgroup.h | 306 + include/linux/bpf.h | 845 ++ include/linux/bpf_lirc.h | 30 + include/linux/bpf_trace.h | 7 + include/linux/bpf_types.h | 69 + include/linux/bpf_verifier.h | 244 + include/linux/bpfilter.h | 15 + include/linux/brcmphy.h | 267 + include/linux/bsearch.h | 10 + include/linux/bsg-lib.h | 79 + include/linux/bsg.h | 39 + include/linux/btf.h | 50 + include/linux/btree-128.h | 110 + include/linux/btree-type.h | 148 + include/linux/btree.h | 244 + include/linux/btrfs.h | 7 + include/linux/buffer_head.h | 419 + include/linux/bug.h | 84 + include/linux/build-salt.h | 20 + include/linux/build_bug.h | 88 + include/linux/bvec.h | 144 + include/linux/byteorder/big_endian.h | 12 + include/linux/byteorder/generic.h | 207 + include/linux/byteorder/little_endian.h | 12 + include/linux/c2port.h | 62 + include/linux/cache.h | 82 + include/linux/cacheinfo.h | 102 + include/linux/can/core.h | 62 + include/linux/can/dev.h | 222 + include/linux/can/dev/peak_canfd.h | 308 + include/linux/can/led.h | 54 + include/linux/can/platform/cc770.h | 34 + include/linux/can/platform/mcp251x.h | 22 + include/linux/can/platform/rcar_can.h | 18 + include/linux/can/platform/sja1000.h | 36 + include/linux/can/rx-offload.h | 64 + include/linux/can/skb.h | 81 + include/linux/capability.h | 254 + include/linux/cb710.h | 208 + include/linux/cciss_ioctl.h | 32 + include/linux/ccp.h | 669 ++ include/linux/cdev.h | 39 + include/linux/cdrom.h | 317 + include/linux/ceph/auth.h | 150 + include/linux/ceph/buffer.h | 39 + include/linux/ceph/ceph_debug.h | 39 + include/linux/ceph/ceph_features.h | 224 + include/linux/ceph/ceph_frag.h | 75 + include/linux/ceph/ceph_fs.h | 828 ++ include/linux/ceph/ceph_hash.h | 14 + include/linux/ceph/cls_lock_client.h | 55 + include/linux/ceph/debugfs.h | 28 + include/linux/ceph/decode.h | 381 + include/linux/ceph/libceph.h | 319 + include/linux/ceph/mdsmap.h | 70 + include/linux/ceph/messenger.h | 392 + include/linux/ceph/mon_client.h | 152 + include/linux/ceph/msgpool.h | 26 + include/linux/ceph/msgr.h | 188 + include/linux/ceph/osd_client.h | 548 ++ include/linux/ceph/osdmap.h | 313 + include/linux/ceph/pagelist.h | 81 + include/linux/ceph/rados.h | 507 ++ include/linux/ceph/string_table.h | 63 + include/linux/ceph/striper.h | 69 + include/linux/ceph/types.h | 30 + include/linux/cfag12864b.h | 68 + include/linux/cgroup-defs.h | 841 ++ include/linux/cgroup.h | 889 ++ include/linux/cgroup_rdma.h | 53 + include/linux/cgroup_subsys.h | 73 + include/linux/circ_buf.h | 37 + include/linux/cleancache.h | 124 + include/linux/clk-provider.h | 1018 +++ include/linux/clk.h | 797 ++ include/linux/clk/at91_pmc.h | 228 + include/linux/clk/clk-conf.h | 22 + include/linux/clk/davinci.h | 40 + include/linux/clk/mmp.h | 18 + include/linux/clk/mxs.h | 14 + include/linux/clk/renesas.h | 39 + include/linux/clk/sunxi-ng.h | 35 + include/linux/clk/tegra.h | 133 + include/linux/clk/ti.h | 312 + include/linux/clk/zynq.h | 30 + include/linux/clkdev.h | 55 + include/linux/clock_cooling.h | 65 + include/linux/clockchips.h | 227 + include/linux/clocksource.h | 272 + include/linux/cm4000_cs.h | 11 + include/linux/cma.h | 40 + include/linux/cmdline-parser.h | 46 + include/linux/cn_proc.h | 58 + include/linux/cnt32_to_63.h | 107 + include/linux/coda.h | 64 + include/linux/coda_psdev.h | 83 + include/linux/compaction.h | 247 + include/linux/compat.h | 1043 +++ include/linux/compat_time.h | 32 + include/linux/compiler-clang.h | 44 + include/linux/compiler-gcc.h | 223 + include/linux/compiler-intel.h | 43 + include/linux/compiler.h | 430 + include/linux/compiler_types.h | 299 + include/linux/completion.h | 121 + include/linux/component.h | 49 + include/linux/concap.h | 112 + include/linux/configfs.h | 289 + include/linux/connector.h | 88 + include/linux/console.h | 234 + include/linux/console_struct.h | 178 + include/linux/consolemap.h | 35 + include/linux/const.h | 9 + include/linux/container.h | 25 + include/linux/context_tracking.h | 175 + include/linux/context_tracking_state.h | 50 + include/linux/cordic.h | 48 + include/linux/coredump.h | 25 + include/linux/coresight-pmu.h | 34 + include/linux/coresight-stm.h | 7 + include/linux/coresight.h | 296 + include/linux/count_zeros.h | 57 + include/linux/cper.h | 557 ++ include/linux/cpu.h | 208 + include/linux/cpu_cooling.h | 76 + include/linux/cpu_pm.h | 109 + include/linux/cpu_rmap.h | 69 + include/linux/cpufeature.h | 61 + include/linux/cpufreq.h | 950 ++ include/linux/cpuhotplug.h | 399 + include/linux/cpuidle.h | 294 + include/linux/cpumask.h | 921 ++ include/linux/cpuset.h | 280 + include/linux/crash_core.h | 78 + include/linux/crash_dump.h | 114 + include/linux/crc-ccitt.h | 23 + include/linux/crc-itu-t.h | 28 + include/linux/crc-t10dif.h | 15 + include/linux/crc16.h | 30 + include/linux/crc32.h | 79 + include/linux/crc32c.h | 13 + include/linux/crc32poly.h | 20 + include/linux/crc4.h | 9 + include/linux/crc64.h | 11 + include/linux/crc7.h | 15 + include/linux/crc8.h | 101 + include/linux/cred.h | 418 + include/linux/crush/crush.h | 345 + include/linux/crush/hash.h | 24 + include/linux/crush/mapper.h | 34 + include/linux/crypto.h | 1669 ++++ include/linux/cryptohash.h | 14 + include/linux/cs5535.h | 239 + include/linux/ctype.h | 74 + include/linux/cuda.h | 19 + include/linux/cyclades.h | 364 + include/linux/davinci_emac.h | 50 + include/linux/dax.h | 177 + include/linux/dca.h | 82 + include/linux/dcache.h | 602 ++ include/linux/dccp.h | 326 + include/linux/dcookies.h | 69 + include/linux/debug_locks.h | 76 + include/linux/debugfs.h | 389 + include/linux/debugobjects.h | 114 + include/linux/decompress/bunzip2.h | 11 + include/linux/decompress/generic.h | 40 + include/linux/decompress/inflate.h | 11 + include/linux/decompress/mm.h | 94 + include/linux/decompress/unlz4.h | 11 + include/linux/decompress/unlzma.h | 13 + include/linux/decompress/unlzo.h | 11 + include/linux/decompress/unxz.h | 19 + include/linux/delay.h | 67 + include/linux/delayacct.h | 188 + include/linux/delayed_call.h | 35 + include/linux/dell-led.h | 7 + include/linux/devcoredump.h | 105 + include/linux/devfreq-event.h | 191 + include/linux/devfreq.h | 389 + include/linux/devfreq_cooling.h | 103 + include/linux/device-mapper.h | 621 ++ include/linux/device.h | 1642 ++++ include/linux/device_cgroup.h | 79 + include/linux/devpts_fs.h | 48 + include/linux/digsig.h | 64 + include/linux/dio.h | 281 + include/linux/dirent.h | 13 + include/linux/dlm.h | 172 + include/linux/dlm_plock.h | 19 + include/linux/dm-bufio.h | 149 + include/linux/dm-dirty-log.h | 146 + include/linux/dm-io.h | 85 + include/linux/dm-kcopyd.h | 89 + include/linux/dm-region-hash.h | 103 + include/linux/dm9000.h | 42 + include/linux/dma-buf.h | 402 + include/linux/dma-contiguous.h | 164 + include/linux/dma-debug.h | 207 + include/linux/dma-direct.h | 69 + include/linux/dma-direction.h | 12 + include/linux/dma-fence-array.h | 91 + include/linux/dma-fence.h | 568 ++ include/linux/dma-iommu.h | 113 + include/linux/dma-mapping.h | 853 ++ include/linux/dma-noncoherent.h | 55 + include/linux/dma/dw.h | 53 + include/linux/dma/hsu.h | 64 + include/linux/dma/ipu-dma.h | 177 + include/linux/dma/mmp-pdma.h | 16 + include/linux/dma/pxa-dma.h | 37 + include/linux/dma/qcom_bam_dma.h | 79 + include/linux/dma/sprd-dma.h | 61 + include/linux/dma/xilinx_dma.h | 49 + include/linux/dma_remapping.h | 58 + include/linux/dmaengine.h | 1439 +++ include/linux/dmapool.h | 59 + include/linux/dmar.h | 281 + include/linux/dmi.h | 154 + include/linux/dnotify.h | 51 + include/linux/dns_resolver.h | 34 + include/linux/dqblk_qtree.h | 59 + include/linux/dqblk_v1.h | 15 + include/linux/dqblk_v2.h | 17 + include/linux/drbd.h | 411 + include/linux/drbd_genl.h | 536 ++ include/linux/drbd_genl_api.h | 56 + include/linux/drbd_limits.h | 251 + include/linux/ds2782_battery.h | 9 + include/linux/dsa/lan9303.h | 39 + include/linux/dtlk.h | 86 + include/linux/dw_apb_timer.h | 55 + include/linux/dynamic_debug.h | 186 + include/linux/dynamic_queue_limits.h | 106 + include/linux/earlycpio.h | 18 + include/linux/ecryptfs.h | 106 + include/linux/edac.h | 676 ++ include/linux/edd.h | 38 + include/linux/edma.h | 29 + include/linux/eeprom_93cx6.h | 86 + include/linux/eeprom_93xx46.h | 29 + include/linux/efi-bgrt.h | 26 + include/linux/efi.h | 1712 ++++ include/linux/efs_vh.h | 54 + include/linux/eisa.h | 112 + include/linux/elevator.h | 269 + include/linux/elf-fdpic.h | 51 + include/linux/elf-randomize.h | 23 + include/linux/elf.h | 59 + include/linux/elfcore-compat.h | 56 + include/linux/elfcore.h | 98 + include/linux/elfnote.h | 99 + include/linux/enclosure.h | 146 + include/linux/err.h | 70 + include/linux/errno.h | 34 + include/linux/error-injection.h | 27 + include/linux/errqueue.h | 28 + include/linux/errseq.h | 14 + include/linux/etherdevice.h | 525 ++ include/linux/ethtool.h | 416 + include/linux/eventfd.h | 88 + include/linux/eventpoll.h | 76 + include/linux/evm.h | 108 + include/linux/export.h | 153 + include/linux/exportfs.h | 231 + include/linux/ext2_fs.h | 43 + include/linux/extable.h | 34 + include/linux/extcon-provider.h | 142 + include/linux/extcon.h | 344 + include/linux/extcon/extcon-adc-jack.h | 72 + include/linux/f2fs_fs.h | 545 ++ include/linux/f75375s.h | 21 + include/linux/falloc.h | 32 + include/linux/fanotify.h | 9 + include/linux/fault-inject.h | 77 + include/linux/fb.h | 838 ++ include/linux/fbcon.h | 12 + include/linux/fcdevice.h | 33 + include/linux/fcntl.h | 38 + include/linux/fd.h | 25 + include/linux/fddidevice.h | 32 + include/linux/fdtable.h | 127 + include/linux/fec.h | 25 + include/linux/file.h | 94 + include/linux/filter.h | 1142 +++ include/linux/fips.h | 11 + include/linux/firewire.h | 473 + include/linux/firmware-map.h | 49 + include/linux/firmware.h | 101 + include/linux/firmware/meson/meson_sm.h | 31 + include/linux/fixp-arith.h | 156 + include/linux/flat.h | 53 + include/linux/flex_array.h | 149 + include/linux/flex_proportions.h | 103 + include/linux/fmc-sdb.h | 39 + include/linux/fmc.h | 270 + include/linux/font.h | 73 + include/linux/fpga/altera-pr-ip-core.h | 18 + include/linux/fpga/fpga-bridge.h | 72 + include/linux/fpga/fpga-mgr.h | 201 + include/linux/fpga/fpga-region.h | 47 + include/linux/frame.h | 24 + include/linux/freezer.h | 302 + include/linux/frontswap.h | 115 + include/linux/fs.h | 3477 ++++++++ include/linux/fs_enet_pd.h | 165 + include/linux/fs_pin.h | 25 + include/linux/fs_stack.h | 30 + include/linux/fs_struct.h | 45 + include/linux/fs_uart_pd.h | 71 + include/linux/fscache-cache.h | 569 ++ include/linux/fscache.h | 847 ++ include/linux/fscrypt.h | 260 + include/linux/fscrypt_notsupp.h | 243 + include/linux/fscrypt_supp.h | 236 + include/linux/fsi-sbefifo.h | 33 + include/linux/fsi.h | 93 + include/linux/fsl-diu-fb.h | 173 + include/linux/fsl/bestcomm/ata.h | 30 + include/linux/fsl/bestcomm/bestcomm.h | 213 + include/linux/fsl/bestcomm/bestcomm_priv.h | 350 + include/linux/fsl/bestcomm/fec.h | 61 + include/linux/fsl/bestcomm/gen_bd.h | 53 + include/linux/fsl/bestcomm/sram.h | 54 + include/linux/fsl/edac.h | 9 + include/linux/fsl/guts.h | 327 + include/linux/fsl/mc.h | 562 ++ include/linux/fsl/ptp_qoriq.h | 169 + include/linux/fsl_devices.h | 157 + include/linux/fsl_hypervisor.h | 63 + include/linux/fsl_ifc.h | 916 ++ include/linux/fsldma.h | 13 + include/linux/fsnotify.h | 293 + include/linux/fsnotify_backend.h | 507 ++ include/linux/ftrace.h | 921 ++ include/linux/ftrace_irq.h | 37 + include/linux/futex.h | 93 + include/linux/fwnode.h | 129 + include/linux/gameport.h | 219 + include/linux/gcd.h | 9 + include/linux/genalloc.h | 172 + include/linux/genetlink.h | 42 + include/linux/genhd.h | 766 ++ include/linux/genl_magic_func.h | 407 + include/linux/genl_magic_struct.h | 286 + include/linux/getcpu.h | 19 + include/linux/gfp.h | 623 ++ include/linux/glob.h | 10 + include/linux/gnss.h | 75 + include/linux/goldfish.h | 33 + include/linux/gpio-pxa.h | 22 + include/linux/gpio.h | 264 + include/linux/gpio/aspeed.h | 15 + include/linux/gpio/consumer.h | 561 ++ include/linux/gpio/driver.h | 594 ++ include/linux/gpio/gpio-reg.h | 14 + include/linux/gpio/machine.h | 106 + include/linux/gpio_keys.h | 60 + include/linux/hardirq.h | 92 + include/linux/hash.h | 104 + include/linux/hashtable.h | 209 + include/linux/hdlc.h | 118 + include/linux/hdlcdrv.h | 276 + include/linux/hdmi.h | 339 + include/linux/hid-debug.h | 66 + include/linux/hid-roccat.h | 29 + include/linux/hid-sensor-hub.h | 288 + include/linux/hid-sensor-ids.h | 174 + include/linux/hid.h | 1203 +++ include/linux/hiddev.h | 68 + include/linux/hidraw.h | 59 + include/linux/highmem.h | 255 + include/linux/highuid.h | 98 + include/linux/hil.h | 483 + include/linux/hil_mlc.h | 168 + include/linux/hippidevice.h | 40 + include/linux/hmm.h | 562 ++ include/linux/host1x.h | 336 + include/linux/hp_sdc.h | 301 + include/linux/hpet.h | 111 + include/linux/hrtimer.h | 516 ++ include/linux/hsi/hsi.h | 441 + include/linux/hsi/ssi_protocol.h | 42 + include/linux/htcpld.h | 25 + include/linux/huge_mm.h | 379 + include/linux/hugetlb.h | 629 ++ include/linux/hugetlb_cgroup.h | 119 + include/linux/hugetlb_inline.h | 23 + include/linux/hw_breakpoint.h | 135 + include/linux/hw_random.h | 63 + include/linux/hwmon-sysfs.h | 57 + include/linux/hwmon-vid.h | 45 + include/linux/hwmon.h | 424 + include/linux/hwspinlock.h | 405 + include/linux/hyperv.h | 1552 ++++ include/linux/hypervisor.h | 35 + include/linux/i2c-algo-bit.h | 55 + include/linux/i2c-algo-pca.h | 87 + include/linux/i2c-algo-pcf.h | 49 + include/linux/i2c-dev.h | 28 + include/linux/i2c-mux.h | 80 + include/linux/i2c-pxa.h | 18 + include/linux/i2c-smbus.h | 61 + include/linux/i2c.h | 955 ++ include/linux/i8042.h | 99 + include/linux/i8253.h | 30 + include/linux/icmp.h | 27 + include/linux/icmpv6.h | 93 + include/linux/ide.h | 1610 ++++ include/linux/idle_inject.h | 29 + include/linux/idr.h | 309 + include/linux/ieee80211.h | 3250 +++++++ include/linux/ieee802154.h | 363 + include/linux/if_arp.h | 65 + include/linux/if_bridge.h | 132 + include/linux/if_eql.h | 49 + include/linux/if_ether.h | 48 + include/linux/if_fddi.h | 121 + include/linux/if_frad.h | 96 + include/linux/if_link.h | 33 + include/linux/if_ltalk.h | 8 + include/linux/if_macvlan.h | 109 + include/linux/if_phonet.h | 15 + include/linux/if_pppol2tp.h | 21 + include/linux/if_pppox.h | 100 + include/linux/if_tap.h | 85 + include/linux/if_team.h | 321 + include/linux/if_tun.h | 58 + include/linux/if_tunnel.h | 17 + include/linux/if_vlan.h | 748 ++ include/linux/igmp.h | 135 + include/linux/ihex.h | 75 + include/linux/iio/accel/kxcjk_1013.h | 22 + include/linux/iio/adc/ad_sigma_delta.h | 188 + include/linux/iio/adc/stm32-dfsdm-adc.h | 20 + include/linux/iio/buffer-dma.h | 152 + include/linux/iio/buffer-dmaengine.h | 18 + include/linux/iio/buffer.h | 53 + include/linux/iio/buffer_impl.h | 163 + include/linux/iio/common/cros_ec_sensors_core.h | 180 + include/linux/iio/common/ssp_sensors.h | 82 + include/linux/iio/common/st_sensors.h | 358 + include/linux/iio/common/st_sensors_i2c.h | 30 + include/linux/iio/common/st_sensors_spi.h | 20 + include/linux/iio/configfs.h | 15 + include/linux/iio/consumer.h | 389 + include/linux/iio/dac/ad5421.h | 29 + include/linux/iio/dac/ad5504.h | 16 + include/linux/iio/dac/ad5791.h | 25 + include/linux/iio/dac/max517.h | 15 + include/linux/iio/dac/mcp4725.h | 26 + include/linux/iio/driver.h | 31 + include/linux/iio/events.h | 59 + include/linux/iio/frequency/ad9523.h | 195 + include/linux/iio/frequency/adf4350.h | 128 + include/linux/iio/gyro/itg3200.h | 154 + include/linux/iio/hw-consumer.h | 21 + include/linux/iio/iio.h | 766 ++ include/linux/iio/imu/adis.h | 284 + include/linux/iio/kfifo_buf.h | 14 + include/linux/iio/machine.h | 38 + include/linux/iio/magnetometer/ak8975.h | 17 + include/linux/iio/sw_device.h | 70 + include/linux/iio/sw_trigger.h | 70 + include/linux/iio/sysfs.h | 152 + include/linux/iio/timer/stm32-lptim-trigger.h | 30 + include/linux/iio/timer/stm32-timer-trigger.h | 78 + include/linux/iio/trigger.h | 186 + include/linux/iio/trigger_consumer.h | 63 + include/linux/iio/triggered_buffer.h | 24 + include/linux/iio/triggered_event.h | 12 + include/linux/iio/types.h | 65 + include/linux/ima.h | 115 + include/linux/imx-media.h | 29 + include/linux/in.h | 104 + include/linux/in6.h | 48 + include/linux/inet.h | 64 + include/linux/inet_diag.h | 77 + include/linux/inetdevice.h | 276 + include/linux/init.h | 309 + include/linux/init_ohci1394_dma.h | 5 + include/linux/init_task.h | 61 + include/linux/initrd.h | 24 + include/linux/inotify.h | 23 + include/linux/input-polldev.h | 61 + include/linux/input.h | 544 ++ include/linux/input/ad714x.h | 64 + include/linux/input/adp5589.h | 188 + include/linux/input/adxl34x.h | 358 + include/linux/input/as5011.h | 20 + include/linux/input/auo-pixcir-ts.h | 54 + include/linux/input/bu21013.h | 34 + include/linux/input/cma3000.h | 59 + include/linux/input/cy8ctmg110_pdata.h | 11 + include/linux/input/cyttsp.h | 43 + include/linux/input/gp2ap002a00f.h | 23 + include/linux/input/ili210x.h | 11 + include/linux/input/kxtj9.h | 61 + include/linux/input/lm8333.h | 24 + include/linux/input/matrix_keypad.h | 92 + include/linux/input/mt.h | 127 + include/linux/input/navpoint.h | 12 + include/linux/input/samsung-keypad.h | 43 + include/linux/input/sh_keysc.h | 16 + include/linux/input/sparse-keymap.h | 61 + include/linux/input/touchscreen.h | 35 + include/linux/input/tps6507x-ts.h | 23 + include/linux/integrity.h | 60 + include/linux/intel-iommu.h | 567 ++ include/linux/intel-pti.h | 43 + include/linux/intel-svm.h | 141 + include/linux/interrupt.h | 728 ++ include/linux/interval_tree.h | 30 + include/linux/interval_tree_generic.h | 217 + include/linux/io-64-nonatomic-hi-lo.h | 58 + include/linux/io-64-nonatomic-lo-hi.h | 58 + include/linux/io-mapping.h | 205 + include/linux/io.h | 191 + include/linux/ioc3.h | 93 + include/linux/ioc4.h | 184 + include/linux/iocontext.h | 159 + include/linux/iomap.h | 176 + include/linux/iommu-helper.h | 42 + include/linux/iommu.h | 696 ++ include/linux/iopoll.h | 150 + include/linux/ioport.h | 291 + include/linux/ioprio.h | 89 + include/linux/iova.h | 276 + include/linux/ip.h | 37 + include/linux/ipack.h | 289 + include/linux/ipc.h | 31 + include/linux/ipc_namespace.h | 181 + include/linux/ipmi-fru.h | 134 + include/linux/ipmi.h | 336 + include/linux/ipmi_smi.h | 247 + include/linux/ipv6.h | 396 + include/linux/ipv6_route.h | 19 + include/linux/irq.h | 1227 +++ include/linux/irq_cpustat.h | 28 + include/linux/irq_poll.h | 26 + include/linux/irq_sim.h | 41 + include/linux/irq_work.h | 55 + include/linux/irqbypass.h | 90 + include/linux/irqchip.h | 51 + include/linux/irqchip/arm-gic-common.h | 36 + include/linux/irqchip/arm-gic-v3.h | 620 ++ include/linux/irqchip/arm-gic-v4.h | 115 + include/linux/irqchip/arm-gic.h | 178 + include/linux/irqchip/arm-vic.h | 38 + include/linux/irqchip/chained_irq.h | 52 + include/linux/irqchip/ingenic.h | 23 + include/linux/irqchip/irq-bcm2836.h | 70 + include/linux/irqchip/irq-omap-intc.h | 28 + include/linux/irqchip/irq-partition-percpu.h | 59 + include/linux/irqchip/irq-sa11x0.h | 17 + include/linux/irqchip/mmp.h | 7 + include/linux/irqchip/mxs.h | 14 + include/linux/irqchip/versatile-fpga.h | 14 + include/linux/irqchip/xtensa-mx.h | 17 + include/linux/irqchip/xtensa-pic.h | 18 + include/linux/irqdesc.h | 276 + include/linux/irqdomain.h | 589 ++ include/linux/irqflags.h | 171 + include/linux/irqhandler.h | 15 + include/linux/irqnr.h | 34 + include/linux/irqreturn.h | 20 + include/linux/isa.h | 73 + include/linux/isapnp.h | 121 + include/linux/iscsi_boot_sysfs.h | 147 + include/linux/iscsi_ibft.h | 46 + include/linux/isdn.h | 473 + include/linux/isdn/capilli.h | 113 + include/linux/isdn/capiutil.h | 516 ++ include/linux/isdn/hdlc.h | 82 + include/linux/isdn_divertif.h | 35 + include/linux/isdn_ppp.h | 194 + include/linux/isdnif.h | 505 ++ include/linux/isicom.h | 85 + include/linux/iversion.h | 337 + include/linux/jbd2.h | 1668 ++++ include/linux/jhash.h | 174 + include/linux/jiffies.h | 463 + include/linux/journal-head.h | 107 + include/linux/joystick.h | 33 + include/linux/jump_label.h | 443 + include/linux/jump_label_ratelimit.h | 40 + include/linux/jz4740-adc.h | 33 + include/linux/jz4780-nemc.h | 43 + include/linux/kallsyms.h | 174 + include/linux/kasan-checks.h | 15 + include/linux/kasan.h | 143 + include/linux/kbd_diacr.h | 9 + include/linux/kbd_kern.h | 147 + include/linux/kbuild.h | 16 + include/linux/kconfig.h | 73 + include/linux/kcore.h | 61 + include/linux/kcov.h | 48 + include/linux/kd.h | 8 + include/linux/kdb.h | 221 + include/linux/kdebug.h | 23 + include/linux/kdev_t.h | 83 + include/linux/kern_levels.h | 39 + include/linux/kernel-page-flags.h | 21 + include/linux/kernel.h | 1036 +++ include/linux/kernel_stat.h | 100 + include/linux/kernelcapi.h | 120 + include/linux/kernfs.h | 558 ++ include/linux/kexec.h | 377 + include/linux/key-type.h | 182 + include/linux/key.h | 425 + include/linux/keyboard.h | 21 + include/linux/kfifo.h | 834 ++ include/linux/kgdb.h | 327 + include/linux/khugepaged.h | 83 + include/linux/klist.h | 69 + include/linux/kmemleak.h | 137 + include/linux/kmod.h | 48 + include/linux/kmsg_dump.h | 117 + include/linux/kobj_map.h | 20 + include/linux/kobject.h | 247 + include/linux/kobject_ns.h | 59 + include/linux/kprobes.h | 472 + include/linux/kref.h | 118 + include/linux/ks0108.h | 35 + include/linux/ks8842.h | 38 + include/linux/ks8851_mll.h | 33 + include/linux/ksm.h | 92 + include/linux/kthread.h | 214 + include/linux/ktime.h | 278 + include/linux/kvm_host.h | 1351 +++ include/linux/kvm_irqfd.h | 71 + include/linux/kvm_para.h | 17 + include/linux/kvm_types.h | 73 + include/linux/l2tp.h | 14 + include/linux/lapb.h | 58 + include/linux/latencytop.h | 57 + include/linux/lcd.h | 131 + include/linux/lcm.h | 10 + include/linux/led-class-flash.h | 196 + include/linux/led-lm3530.h | 121 + include/linux/leds-bd2802.h | 26 + include/linux/leds-lp3944.h | 50 + include/linux/leds-lp3952.h | 125 + include/linux/leds-pca9532.h | 47 + include/linux/leds-regulator.h | 46 + include/linux/leds-tca6507.h | 34 + include/linux/leds.h | 475 + include/linux/leds_pwm.h | 22 + include/linux/libata.h | 2001 +++++ include/linux/libfdt.h | 8 + include/linux/libfdt_env.h | 22 + include/linux/libgcc.h | 43 + include/linux/libnvdimm.h | 222 + include/linux/libps2.h | 64 + include/linux/license.h | 14 + include/linux/lightnvm.h | 553 ++ include/linux/linkage.h | 113 + include/linux/linux_logo.h | 59 + include/linux/lis3lv02d.h | 128 + include/linux/list.h | 829 ++ include/linux/list_bl.h | 163 + include/linux/list_lru.h | 222 + include/linux/list_nulls.h | 123 + include/linux/list_sort.h | 12 + include/linux/livepatch.h | 214 + include/linux/llc.h | 23 + include/linux/llist.h | 241 + include/linux/lockd/bind.h | 81 + include/linux/lockd/debug.h | 44 + include/linux/lockd/lockd.h | 374 + include/linux/lockd/nlm.h | 58 + include/linux/lockd/share.h | 32 + include/linux/lockd/xdr.h | 119 + include/linux/lockd/xdr4.h | 48 + include/linux/lockdep.h | 627 ++ include/linux/lockref.h | 53 + include/linux/log2.h | 227 + include/linux/logic_pio.h | 124 + include/linux/lp.h | 101 + include/linux/lru_cache.h | 314 + include/linux/lsm_audit.h | 124 + include/linux/lsm_hooks.h | 2087 +++++ include/linux/lz4.h | 648 ++ include/linux/lzo.h | 46 + include/linux/mISDNdsp.h | 40 + include/linux/mISDNhw.h | 201 + include/linux/mISDNif.h | 604 ++ include/linux/mailbox/brcm-message.h | 62 + include/linux/mailbox/mtk-cmdq-mailbox.h | 77 + include/linux/mailbox_client.h | 51 + include/linux/mailbox_controller.h | 135 + include/linux/maple.h | 106 + include/linux/marvell_phy.h | 37 + include/linux/math64.h | 287 + include/linux/max17040_battery.h | 19 + include/linux/mbcache.h | 52 + include/linux/mbus.h | 109 + include/linux/mc146818rtc.h | 129 + include/linux/mc6821.h | 52 + include/linux/mcb.h | 142 + include/linux/mdev.h | 138 + include/linux/mdio-bitbang.h | 44 + include/linux/mdio-gpio.h | 9 + include/linux/mdio-mux.h | 32 + include/linux/mdio.h | 299 + include/linux/mei_cl_bus.h | 108 + include/linux/mem_encrypt.h | 56 + include/linux/memblock.h | 452 + include/linux/memcontrol.h | 1338 +++ include/linux/memfd.h | 16 + include/linux/memory.h | 148 + include/linux/memory_hotplug.h | 349 + include/linux/mempolicy.h | 312 + include/linux/mempool.h | 111 + include/linux/memremap.h | 169 + include/linux/memstick.h | 347 + include/linux/mfd/88pm80x.h | 373 + include/linux/mfd/88pm860x.h | 487 + include/linux/mfd/aat2870.h | 181 + include/linux/mfd/ab3100.h | 129 + include/linux/mfd/abx500.h | 347 + include/linux/mfd/abx500/ab8500-bm.h | 476 + include/linux/mfd/abx500/ab8500-codec.h | 54 + include/linux/mfd/abx500/ab8500-gpadc.h | 75 + include/linux/mfd/abx500/ab8500-sysctrl.h | 301 + include/linux/mfd/abx500/ab8500.h | 518 ++ include/linux/mfd/abx500/ux500_chargalg.h | 51 + include/linux/mfd/ac100.h | 178 + include/linux/mfd/adp5520.h | 299 + include/linux/mfd/altera-a10sr.h | 85 + include/linux/mfd/arizona/core.h | 194 + include/linux/mfd/arizona/pdata.h | 197 + include/linux/mfd/arizona/registers.h | 8170 +++++++++++++++++ include/linux/mfd/as3711.h | 127 + include/linux/mfd/as3722.h | 432 + include/linux/mfd/asic3.h | 316 + include/linux/mfd/atmel-hlcdc.h | 85 + include/linux/mfd/axp20x.h | 703 ++ include/linux/mfd/bcm590xx.h | 34 + include/linux/mfd/bd9571mwv.h | 120 + include/linux/mfd/core.h | 138 + include/linux/mfd/cros_ec.h | 335 + include/linux/mfd/cros_ec_commands.h | 3270 +++++++ include/linux/mfd/cros_ec_lpc_mec.h | 90 + include/linux/mfd/cros_ec_lpc_reg.h | 61 + include/linux/mfd/da8xx-cfgchip.h | 153 + include/linux/mfd/da903x.h | 248 + include/linux/mfd/da9052/da9052.h | 232 + include/linux/mfd/da9052/pdata.h | 40 + include/linux/mfd/da9052/reg.h | 764 ++ include/linux/mfd/da9055/core.h | 94 + include/linux/mfd/da9055/pdata.h | 54 + include/linux/mfd/da9055/reg.h | 699 ++ include/linux/mfd/da9062/core.h | 75 + include/linux/mfd/da9062/registers.h | 1109 +++ include/linux/mfd/da9063/core.h | 97 + include/linux/mfd/da9063/pdata.h | 114 + include/linux/mfd/da9063/registers.h | 1073 +++ include/linux/mfd/da9150/core.h | 85 + include/linux/mfd/da9150/registers.h | 1155 +++ include/linux/mfd/davinci_voicecodec.h | 117 + include/linux/mfd/db8500-prcmu.h | 766 ++ include/linux/mfd/dbx500-prcmu.h | 647 ++ include/linux/mfd/dln2.h | 104 + include/linux/mfd/dm355evm_msp.h | 79 + include/linux/mfd/ds1wm.h | 29 + include/linux/mfd/ezx-pcap.h | 254 + include/linux/mfd/hi6421-pmic.h | 46 + include/linux/mfd/hi655x-pmic.h | 64 + include/linux/mfd/htc-pasic3.h | 54 + include/linux/mfd/imx25-tsadc.h | 141 + include/linux/mfd/intel_msic.h | 456 + include/linux/mfd/intel_soc_pmic.h | 37 + include/linux/mfd/intel_soc_pmic_bxtwc.h | 67 + include/linux/mfd/ipaq-micro.h | 149 + include/linux/mfd/janz.h | 54 + include/linux/mfd/kempld.h | 129 + include/linux/mfd/lm3533.h | 104 + include/linux/mfd/lp3943.h | 114 + include/linux/mfd/lp873x.h | 268 + include/linux/mfd/lp87565.h | 270 + include/linux/mfd/lp8788-isink.h | 52 + include/linux/mfd/lp8788.h | 334 + include/linux/mfd/lpc_ich.h | 49 + include/linux/mfd/madera/core.h | 187 + include/linux/mfd/madera/pdata.h | 59 + include/linux/mfd/madera/registers.h | 3968 +++++++++ include/linux/mfd/max14577-private.h | 485 + include/linux/mfd/max14577.h | 107 + include/linux/mfd/max77620.h | 348 + include/linux/mfd/max77686-private.h | 461 + include/linux/mfd/max77686.h | 128 + include/linux/mfd/max77693-common.h | 49 + include/linux/mfd/max77693-private.h | 532 ++ include/linux/mfd/max77693.h | 91 + include/linux/mfd/max77843-private.h | 439 + include/linux/mfd/max8907.h | 252 + include/linux/mfd/max8925.h | 277 + include/linux/mfd/max8997-private.h | 430 + include/linux/mfd/max8997.h | 223 + include/linux/mfd/max8998-private.h | 182 + include/linux/mfd/max8998.h | 118 + include/linux/mfd/mc13783.h | 90 + include/linux/mfd/mc13892.h | 39 + include/linux/mfd/mc13xxx.h | 265 + include/linux/mfd/mcp.h | 66 + include/linux/mfd/menelaus.h | 41 + include/linux/mfd/motorola-cpcap.h | 297 + include/linux/mfd/mt6323/core.h | 36 + include/linux/mfd/mt6323/registers.h | 408 + include/linux/mfd/mt6397/core.h | 67 + include/linux/mfd/mt6397/registers.h | 362 + include/linux/mfd/mxs-lradc.h | 187 + include/linux/mfd/palmas.h | 3814 ++++++++ include/linux/mfd/pcf50633/adc.h | 73 + include/linux/mfd/pcf50633/backlight.h | 51 + include/linux/mfd/pcf50633/core.h | 238 + include/linux/mfd/pcf50633/gpio.h | 52 + include/linux/mfd/pcf50633/mbc.h | 134 + include/linux/mfd/pcf50633/pmic.h | 68 + include/linux/mfd/qcom_rpm.h | 14 + include/linux/mfd/rave-sp.h | 62 + include/linux/mfd/rc5t583.h | 381 + include/linux/mfd/rdc321x.h | 27 + include/linux/mfd/retu.h | 28 + include/linux/mfd/rk808.h | 457 + include/linux/mfd/rn5t618.h | 256 + include/linux/mfd/rohm-bd718x7.h | 359 + include/linux/mfd/rt5033-private.h | 260 + include/linux/mfd/rt5033.h | 62 + include/linux/mfd/samsung/core.h | 186 + include/linux/mfd/samsung/irq.h | 253 + include/linux/mfd/samsung/rtc.h | 144 + include/linux/mfd/samsung/s2mpa01.h | 180 + include/linux/mfd/samsung/s2mps11.h | 198 + include/linux/mfd/samsung/s2mps13.h | 189 + include/linux/mfd/samsung/s2mps14.h | 146 + include/linux/mfd/samsung/s2mps15.h | 158 + include/linux/mfd/samsung/s2mpu02.h | 201 + include/linux/mfd/samsung/s5m8763.h | 96 + include/linux/mfd/samsung/s5m8767.h | 211 + include/linux/mfd/si476x-core.h | 533 ++ include/linux/mfd/si476x-platform.h | 267 + include/linux/mfd/si476x-reports.h | 163 + include/linux/mfd/sky81452.h | 31 + include/linux/mfd/smsc.h | 109 + include/linux/mfd/sta2x11-mfd.h | 518 ++ include/linux/mfd/stm32-lptimer.h | 58 + include/linux/mfd/stm32-timers.h | 143 + include/linux/mfd/stmpe.h | 142 + include/linux/mfd/stw481x.h | 52 + include/linux/mfd/sun4i-gpadc.h | 100 + include/linux/mfd/syscon.h | 54 + include/linux/mfd/syscon/atmel-matrix.h | 117 + include/linux/mfd/syscon/atmel-mc.h | 144 + include/linux/mfd/syscon/atmel-smc.h | 122 + include/linux/mfd/syscon/atmel-st.h | 49 + include/linux/mfd/syscon/clps711x.h | 94 + include/linux/mfd/syscon/imx6q-iomuxc-gpr.h | 463 + include/linux/mfd/syscon/imx7-iomuxc-gpr.h | 51 + include/linux/mfd/t7l66xb.h | 34 + include/linux/mfd/tc3589x.h | 152 + include/linux/mfd/tc6387xb.h | 20 + include/linux/mfd/tc6393xb.h | 59 + include/linux/mfd/ti-lmu-register.h | 280 + include/linux/mfd/ti-lmu.h | 87 + include/linux/mfd/ti_am335x_tscadc.h | 198 + include/linux/mfd/tmio.h | 149 + include/linux/mfd/tps6105x.h | 97 + include/linux/mfd/tps65010.h | 205 + include/linux/mfd/tps6507x.h | 168 + include/linux/mfd/tps65086.h | 117 + include/linux/mfd/tps65090.h | 163 + include/linux/mfd/tps65217.h | 289 + include/linux/mfd/tps65218.h | 276 + include/linux/mfd/tps6586x.h | 112 + include/linux/mfd/tps65910.h | 956 ++ include/linux/mfd/tps65912.h | 327 + include/linux/mfd/tps68470.h | 86 + include/linux/mfd/tps80031.h | 637 ++ include/linux/mfd/twl.h | 876 ++ include/linux/mfd/twl4030-audio.h | 272 + include/linux/mfd/twl6040.h | 271 + include/linux/mfd/ucb1x00.h | 260 + include/linux/mfd/viperboard.h | 110 + include/linux/mfd/wl1273-core.h | 290 + include/linux/mfd/wm831x/auxadc.h | 218 + include/linux/mfd/wm831x/core.h | 437 + include/linux/mfd/wm831x/gpio.h | 59 + include/linux/mfd/wm831x/irq.h | 764 ++ include/linux/mfd/wm831x/otp.h | 162 + include/linux/mfd/wm831x/pdata.h | 150 + include/linux/mfd/wm831x/pmu.h | 189 + include/linux/mfd/wm831x/regulator.h | 1218 +++ include/linux/mfd/wm831x/status.h | 34 + include/linux/mfd/wm831x/watchdog.h | 52 + include/linux/mfd/wm8350/audio.h | 625 ++ include/linux/mfd/wm8350/comparator.h | 175 + include/linux/mfd/wm8350/core.h | 694 ++ include/linux/mfd/wm8350/gpio.h | 361 + include/linux/mfd/wm8350/pmic.h | 780 ++ include/linux/mfd/wm8350/rtc.h | 269 + include/linux/mfd/wm8350/supply.h | 134 + include/linux/mfd/wm8350/wdt.h | 28 + include/linux/mfd/wm8400-audio.h | 1187 +++ include/linux/mfd/wm8400-private.h | 934 ++ include/linux/mfd/wm8400.h | 40 + include/linux/mfd/wm8994/core.h | 145 + include/linux/mfd/wm8994/gpio.h | 76 + include/linux/mfd/wm8994/pdata.h | 244 + include/linux/mfd/wm8994/registers.h | 4822 ++++++++++ include/linux/mfd/wm97xx.h | 25 + include/linux/mic_bus.h | 111 + include/linux/micrel_phy.h | 51 + include/linux/microchipphy.h | 84 + include/linux/migrate.h | 291 + include/linux/migrate_mode.h | 22 + include/linux/mii.h | 344 + include/linux/miscdevice.h | 98 + include/linux/mlx4/cmd.h | 334 + include/linux/mlx4/cq.h | 182 + include/linux/mlx4/device.h | 1605 ++++ include/linux/mlx4/doorbell.h | 86 + include/linux/mlx4/driver.h | 117 + include/linux/mlx4/qp.h | 506 ++ include/linux/mlx4/srq.h | 44 + include/linux/mlx5/accel.h | 144 + include/linux/mlx5/cmd.h | 51 + include/linux/mlx5/cq.h | 205 + include/linux/mlx5/device.h | 1262 +++ include/linux/mlx5/doorbell.h | 81 + include/linux/mlx5/driver.h | 1327 +++ include/linux/mlx5/eswitch.h | 60 + include/linux/mlx5/fs.h | 200 + include/linux/mlx5/fs_helpers.h | 142 + include/linux/mlx5/mlx5_ifc.h | 9295 ++++++++++++++++++++ include/linux/mlx5/mlx5_ifc_fpga.h | 616 ++ include/linux/mlx5/port.h | 193 + include/linux/mlx5/qp.h | 643 ++ include/linux/mlx5/srq.h | 71 + include/linux/mlx5/transobj.h | 103 + include/linux/mlx5/vport.h | 124 + include/linux/mm-arch-hooks.h | 25 + include/linux/mm.h | 2842 ++++++ include/linux/mm_inline.h | 130 + include/linux/mm_types.h | 657 ++ include/linux/mm_types_task.h | 95 + include/linux/mman.h | 138 + include/linux/mmc/card.h | 325 + include/linux/mmc/core.h | 183 + include/linux/mmc/host.h | 591 ++ include/linux/mmc/mmc.h | 441 + include/linux/mmc/pm.h | 30 + include/linux/mmc/sd.h | 94 + include/linux/mmc/sdhci-pci-data.h | 18 + include/linux/mmc/sdio.h | 193 + include/linux/mmc/sdio_func.h | 168 + include/linux/mmc/sdio_ids.h | 77 + include/linux/mmc/sh_mmcif.h | 213 + include/linux/mmc/slot-gpio.h | 39 + include/linux/mmdebug.h | 80 + include/linux/mmiotrace.h | 112 + include/linux/mmu_context.h | 17 + include/linux/mmu_notifier.h | 520 ++ include/linux/mmzone.h | 1343 +++ include/linux/mnt_namespace.h | 19 + include/linux/mod_devicetable.h | 771 ++ include/linux/module.h | 822 ++ include/linux/moduleloader.h | 96 + include/linux/moduleparam.h | 538 ++ include/linux/mount.h | 109 + include/linux/mpage.h | 25 + include/linux/mpi.h | 96 + include/linux/mpls.h | 12 + include/linux/mpls_iptunnel.h | 7 + include/linux/mroute.h | 87 + include/linux/mroute6.h | 113 + include/linux/mroute_base.h | 465 + include/linux/msdos_fs.h | 12 + include/linux/msg.h | 18 + include/linux/msi.h | 358 + include/linux/mtd/bbm.h | 169 + include/linux/mtd/blktrans.h | 96 + include/linux/mtd/cfi.h | 393 + include/linux/mtd/cfi_endian.h | 53 + include/linux/mtd/concat.h | 34 + include/linux/mtd/doc2000.h | 220 + include/linux/mtd/flashchip.h | 113 + include/linux/mtd/ftl.h | 74 + include/linux/mtd/gen_probe.h | 37 + include/linux/mtd/inftl.h | 63 + include/linux/mtd/latch-addr-flash.h | 29 + include/linux/mtd/lpc32xx_mlc.h | 20 + include/linux/mtd/lpc32xx_slc.h | 20 + include/linux/mtd/map.h | 478 + include/linux/mtd/mtd.h | 601 ++ include/linux/mtd/mtdram.h | 9 + include/linux/mtd/nand-gpio.h | 15 + include/linux/mtd/nand.h | 733 ++ include/linux/mtd/nand_bch.h | 68 + include/linux/mtd/nand_ecc.h | 40 + include/linux/mtd/ndfc.h | 65 + include/linux/mtd/nftl.h | 71 + include/linux/mtd/onenand.h | 240 + include/linux/mtd/onenand_regs.h | 223 + include/linux/mtd/partitions.h | 114 + include/linux/mtd/pfow.h | 157 + include/linux/mtd/physmap.h | 36 + include/linux/mtd/pismo.h | 17 + include/linux/mtd/plat-ram.h | 34 + include/linux/mtd/qinfo.h | 92 + include/linux/mtd/rawnand.h | 1755 ++++ include/linux/mtd/sh_flctl.h | 192 + include/linux/mtd/sharpsl.h | 21 + include/linux/mtd/spear_smi.h | 65 + include/linux/mtd/spi-nor.h | 415 + include/linux/mtd/spinand.h | 421 + include/linux/mtd/super.h | 29 + include/linux/mtd/ubi.h | 284 + include/linux/mtd/xip.h | 101 + include/linux/mutex.h | 232 + include/linux/mux/consumer.h | 31 + include/linux/mux/driver.h | 105 + include/linux/mv643xx.h | 979 +++ include/linux/mv643xx_eth.h | 87 + include/linux/mv643xx_i2c.h | 22 + include/linux/mvebu-pmsu.h | 20 + include/linux/mxm-wmi.h | 33 + include/linux/n_r3964.h | 175 + include/linux/namei.h | 118 + include/linux/nd.h | 191 + include/linux/net.h | 333 + include/linux/net_dim.h | 418 + include/linux/netdev_features.h | 248 + include/linux/netdevice.h | 4826 ++++++++++ include/linux/netfilter.h | 471 + include/linux/netfilter/ipset/ip_set.h | 470 + include/linux/netfilter/ipset/ip_set_bitmap.h | 29 + include/linux/netfilter/ipset/ip_set_comment.h | 76 + include/linux/netfilter/ipset/ip_set_counter.h | 88 + include/linux/netfilter/ipset/ip_set_getport.h | 34 + include/linux/netfilter/ipset/ip_set_hash.h | 14 + include/linux/netfilter/ipset/ip_set_list.h | 12 + include/linux/netfilter/ipset/ip_set_skbinfo.h | 46 + include/linux/netfilter/ipset/ip_set_timeout.h | 81 + include/linux/netfilter/ipset/pfxlen.h | 54 + include/linux/netfilter/nf_conntrack_amanda.h | 12 + include/linux/netfilter/nf_conntrack_common.h | 25 + include/linux/netfilter/nf_conntrack_dccp.h | 41 + include/linux/netfilter/nf_conntrack_ftp.h | 34 + include/linux/netfilter/nf_conntrack_h323.h | 98 + include/linux/netfilter/nf_conntrack_h323_asn1.h | 98 + include/linux/netfilter/nf_conntrack_h323_types.h | 934 ++ include/linux/netfilter/nf_conntrack_irc.h | 17 + include/linux/netfilter/nf_conntrack_pptp.h | 327 + include/linux/netfilter/nf_conntrack_proto_gre.h | 47 + include/linux/netfilter/nf_conntrack_sane.h | 22 + include/linux/netfilter/nf_conntrack_sctp.h | 16 + include/linux/netfilter/nf_conntrack_sip.h | 200 + include/linux/netfilter/nf_conntrack_snmp.h | 10 + include/linux/netfilter/nf_conntrack_tcp.h | 33 + include/linux/netfilter/nf_conntrack_tftp.h | 21 + .../linux/netfilter/nf_conntrack_zones_common.h | 24 + include/linux/netfilter/nfnetlink.h | 67 + include/linux/netfilter/nfnetlink_acct.h | 20 + include/linux/netfilter/nfnetlink_osf.h | 32 + include/linux/netfilter/x_tables.h | 534 ++ include/linux/netfilter/xt_hashlimit.h | 11 + include/linux/netfilter/xt_physdev.h | 8 + include/linux/netfilter_arp/arp_tables.h | 80 + include/linux/netfilter_bridge.h | 66 + include/linux/netfilter_bridge/ebt_802_3.h | 12 + include/linux/netfilter_bridge/ebtables.h | 129 + include/linux/netfilter_defs.h | 20 + include/linux/netfilter_ingress.h | 58 + include/linux/netfilter_ipv4.h | 47 + include/linux/netfilter_ipv4/ip_tables.h | 93 + include/linux/netfilter_ipv6.h | 58 + include/linux/netfilter_ipv6/ip6_tables.h | 70 + include/linux/netlink.h | 232 + include/linux/netpoll.h | 124 + include/linux/nfs.h | 55 + include/linux/nfs3.h | 14 + include/linux/nfs4.h | 675 ++ include/linux/nfs_fs.h | 589 ++ include/linux/nfs_fs_i.h | 21 + include/linux/nfs_fs_sb.h | 262 + include/linux/nfs_iostat.h | 134 + include/linux/nfs_page.h | 204 + include/linux/nfs_xdr.h | 1689 ++++ include/linux/nfsacl.h | 42 + include/linux/nl802154.h | 180 + include/linux/nls.h | 109 + include/linux/nmi.h | 220 + include/linux/node.h | 122 + include/linux/nodemask.h | 541 ++ include/linux/nospec.h | 68 + include/linux/notifier.h | 241 + include/linux/ns_common.h | 13 + include/linux/nsc_gpio.h | 41 + include/linux/nsproxy.h | 88 + include/linux/ntb.h | 1505 ++++ include/linux/ntb_transport.h | 86 + include/linux/nubus.h | 188 + include/linux/numa.h | 16 + include/linux/nvme-fc-driver.h | 896 ++ include/linux/nvme-fc.h | 357 + include/linux/nvme-rdma.h | 95 + include/linux/nvme.h | 1271 +++ include/linux/nvmem-consumer.h | 167 + include/linux/nvmem-provider.h | 116 + include/linux/nvram.h | 14 + include/linux/of.h | 1457 +++ include/linux/of_address.h | 165 + include/linux/of_clk.h | 33 + include/linux/of_device.h | 120 + include/linux/of_dma.h | 93 + include/linux/of_fdt.h | 112 + include/linux/of_gpio.h | 157 + include/linux/of_graph.h | 121 + include/linux/of_iommu.h | 35 + include/linux/of_irq.h | 118 + include/linux/of_mdio.h | 119 + include/linux/of_net.h | 40 + include/linux/of_pci.h | 52 + include/linux/of_pdt.h | 40 + include/linux/of_platform.h | 116 + include/linux/of_reserved_mem.h | 81 + include/linux/oid_registry.h | 103 + include/linux/olpc-ec.h | 43 + include/linux/omap-dma.h | 378 + include/linux/omap-dmaengine.h | 21 + include/linux/omap-gpmc.h | 103 + include/linux/omap-iommu.h | 24 + include/linux/omap-mailbox.h | 24 + include/linux/omapfb.h | 42 + include/linux/once.h | 60 + include/linux/oom.h | 121 + include/linux/openvswitch.h | 29 + include/linux/oprofile.h | 209 + include/linux/osq_lock.h | 41 + include/linux/overflow.h | 316 + include/linux/oxu210hp.h | 8 + include/linux/padata.h | 182 + include/linux/page-flags-layout.h | 97 + include/linux/page-flags.h | 804 ++ include/linux/page-isolation.h | 68 + include/linux/page_counter.h | 67 + include/linux/page_ext.h | 75 + include/linux/page_idle.h | 140 + include/linux/page_owner.h | 76 + include/linux/page_ref.h | 183 + include/linux/pageblock-flags.h | 112 + include/linux/pagemap.h | 649 ++ include/linux/pagevec.h | 89 + include/linux/parman.h | 76 + include/linux/parport.h | 521 ++ include/linux/parport_pc.h | 239 + include/linux/parser.h | 36 + include/linux/pata_arasan_cf_data.h | 47 + include/linux/patchkey.h | 26 + include/linux/path.h | 27 + include/linux/pch_dma.h | 37 + include/linux/pci-acpi.h | 125 + include/linux/pci-aspm.h | 35 + include/linux/pci-ats.h | 72 + include/linux/pci-dma-compat.h | 147 + include/linux/pci-dma.h | 12 + include/linux/pci-ecam.h | 67 + include/linux/pci-ep-cfs.h | 38 + include/linux/pci-epc.h | 174 + include/linux/pci-epf.h | 157 + include/linux/pci.h | 2347 +++++ include/linux/pci_hotplug.h | 196 + include/linux/pci_ids.h | 3121 +++++++ include/linux/pda_power.h | 42 + include/linux/pe.h | 455 + include/linux/percpu-defs.h | 528 ++ include/linux/percpu-refcount.h | 334 + include/linux/percpu-rwsem.h | 150 + include/linux/percpu.h | 154 + include/linux/percpu_counter.h | 192 + include/linux/perf/arm_pmu.h | 176 + include/linux/perf_event.h | 1434 +++ include/linux/perf_regs.h | 44 + include/linux/personality.h | 17 + include/linux/pfn.h | 24 + include/linux/pfn_t.h | 136 + include/linux/phonet.h | 40 + include/linux/phy.h | 1174 +++ include/linux/phy/omap_control_phy.h | 99 + include/linux/phy/omap_usb.h | 100 + include/linux/phy/phy-qcom-ufs.h | 38 + include/linux/phy/phy-sun4i-usb.h | 26 + include/linux/phy/phy.h | 425 + include/linux/phy/tegra/xusb.h | 30 + include/linux/phy/ulpi_phy.h | 32 + include/linux/phy_fixed.h | 52 + include/linux/phy_led_triggers.h | 51 + include/linux/phylink.h | 239 + include/linux/pid.h | 194 + include/linux/pid_namespace.h | 103 + include/linux/pim.h | 96 + include/linux/pinctrl/consumer.h | 201 + include/linux/pinctrl/devinfo.h | 61 + include/linux/pinctrl/machine.h | 170 + include/linux/pinctrl/pinconf-generic.h | 228 + include/linux/pinctrl/pinconf.h | 76 + include/linux/pinctrl/pinctrl-state.h | 33 + include/linux/pinctrl/pinctrl.h | 213 + include/linux/pinctrl/pinmux.h | 90 + include/linux/pipe_fs_i.h | 199 + include/linux/pkeys.h | 53 + include/linux/pktcdvd.h | 205 + include/linux/pl320-ipc.h | 17 + include/linux/platform_data/ad5449.h | 40 + include/linux/platform_data/ad5755.h | 103 + include/linux/platform_data/ad5761.h | 44 + include/linux/platform_data/ad7266.h | 54 + include/linux/platform_data/ad7291.h | 13 + include/linux/platform_data/ad7298.h | 20 + include/linux/platform_data/ad7303.h | 21 + include/linux/platform_data/ad7791.h | 18 + include/linux/platform_data/ad7793.h | 112 + include/linux/platform_data/ad7879.h | 42 + include/linux/platform_data/ad7887.h | 26 + include/linux/platform_data/adau17x1.h | 109 + include/linux/platform_data/adau1977.h | 45 + include/linux/platform_data/adp5588.h | 172 + include/linux/platform_data/adp8860.h | 154 + include/linux/platform_data/adp8870.h | 153 + include/linux/platform_data/ads1015.h | 36 + include/linux/platform_data/ads7828.h | 29 + include/linux/platform_data/ams-delta-fiq.h | 58 + include/linux/platform_data/apds990x.h | 79 + include/linux/platform_data/arm-ux500-pm.h | 21 + include/linux/platform_data/asoc-imx-ssi.h | 24 + include/linux/platform_data/asoc-kirkwood.h | 8 + include/linux/platform_data/asoc-mx27vis.h | 12 + include/linux/platform_data/asoc-palm27x.h | 9 + include/linux/platform_data/asoc-s3c.h | 51 + include/linux/platform_data/asoc-s3c24xx_simtec.h | 33 + include/linux/platform_data/asoc-ti-mcbsp.h | 48 + include/linux/platform_data/asoc-ux500-msp.h | 20 + include/linux/platform_data/at24.h | 60 + include/linux/platform_data/at91_adc.h | 50 + include/linux/platform_data/ata-pxa.h | 33 + include/linux/platform_data/ata-samsung_cf.h | 34 + include/linux/platform_data/atmel.h | 32 + include/linux/platform_data/b53.h | 37 + include/linux/platform_data/bcmgenet.h | 19 + include/linux/platform_data/bd6107.h | 19 + include/linux/platform_data/bh1770glc.h | 53 + include/linux/platform_data/brcmfmac.h | 185 + include/linux/platform_data/clk-da8xx-cfgchip.h | 21 + include/linux/platform_data/clk-davinci-pll.h | 21 + include/linux/platform_data/clk-integrator.h | 2 + include/linux/platform_data/clk-lpss.h | 23 + include/linux/platform_data/clk-st.h | 17 + include/linux/platform_data/clk-u300.h | 1 + include/linux/platform_data/cpuidle-exynos.h | 20 + include/linux/platform_data/crypto-atmel.h | 23 + include/linux/platform_data/crypto-ux500.h | 22 + include/linux/platform_data/cyttsp4.h | 76 + include/linux/platform_data/davinci_asp.h | 112 + include/linux/platform_data/db8500_thermal.h | 38 + include/linux/platform_data/dma-atmel.h | 65 + include/linux/platform_data/dma-coh901318.h | 72 + include/linux/platform_data/dma-dw.h | 76 + include/linux/platform_data/dma-ep93xx.h | 94 + include/linux/platform_data/dma-hsu.h | 21 + include/linux/platform_data/dma-imx-sdma.h | 71 + include/linux/platform_data/dma-imx.h | 71 + include/linux/platform_data/dma-mmp_tdma.h | 40 + include/linux/platform_data/dma-mv_xor.h | 22 + include/linux/platform_data/dma-s3c24xx.h | 52 + include/linux/platform_data/dma-ste-dma40.h | 209 + include/linux/platform_data/dmtimer-omap.h | 69 + include/linux/platform_data/ds620.h | 22 + include/linux/platform_data/dwc3-omap.h | 43 + include/linux/platform_data/edma.h | 88 + include/linux/platform_data/efm32-spi.h | 15 + include/linux/platform_data/efm32-uart.h | 19 + include/linux/platform_data/ehci-sh.h | 28 + include/linux/platform_data/elm.h | 65 + include/linux/platform_data/emif_plat.h | 129 + include/linux/platform_data/eth-netx.h | 25 + include/linux/platform_data/fsa9480.h | 27 + include/linux/platform_data/g762.h | 37 + include/linux/platform_data/gpio-ath79.h | 19 + include/linux/platform_data/gpio-davinci.h | 61 + include/linux/platform_data/gpio-dwapb.h | 32 + include/linux/platform_data/gpio-htc-egpio.h | 56 + include/linux/platform_data/gpio-omap.h | 219 + include/linux/platform_data/gpio-ts5500.h | 27 + include/linux/platform_data/gpio_backlight.h | 20 + include/linux/platform_data/gpmc-omap.h | 172 + include/linux/platform_data/hsmmc-omap.h | 81 + include/linux/platform_data/hwmon-s3c.h | 49 + include/linux/platform_data/i2c-cbus-gpio.h | 27 + include/linux/platform_data/i2c-davinci.h | 26 + include/linux/platform_data/i2c-designware.h | 21 + include/linux/platform_data/i2c-gpio.h | 34 + include/linux/platform_data/i2c-hid.h | 41 + include/linux/platform_data/i2c-imx.h | 21 + include/linux/platform_data/i2c-mux-gpio.h | 43 + include/linux/platform_data/i2c-mux-reg.h | 44 + include/linux/platform_data/i2c-ocores.h | 23 + include/linux/platform_data/i2c-omap.h | 39 + include/linux/platform_data/i2c-pca-platform.h | 10 + include/linux/platform_data/i2c-pxa.h | 74 + include/linux/platform_data/i2c-s3c2410.h | 78 + include/linux/platform_data/i2c-xiic.h | 43 + include/linux/platform_data/ina2xx.h | 19 + include/linux/platform_data/intel-mid_wdt.h | 22 + include/linux/platform_data/intel-spi.h | 31 + include/linux/platform_data/invensense_mpu6050.h | 34 + include/linux/platform_data/iommu-omap.h | 19 + include/linux/platform_data/irda-pxaficp.h | 26 + include/linux/platform_data/irda-sa11x0.h | 20 + include/linux/platform_data/isl9305.h | 30 + include/linux/platform_data/itco_wdt.h | 24 + include/linux/platform_data/jz4740/jz4740_nand.h | 34 + .../linux/platform_data/keyboard-pxa930_rotary.h | 21 + include/linux/platform_data/keyboard-spear.h | 164 + include/linux/platform_data/keypad-ep93xx.h | 32 + include/linux/platform_data/keypad-nomadik-ske.h | 50 + include/linux/platform_data/keypad-omap.h | 50 + include/linux/platform_data/keypad-pxa27x.h | 73 + include/linux/platform_data/keypad-w90p910.h | 16 + include/linux/platform_data/keyscan-davinci.h | 42 + include/linux/platform_data/lcd-mipid.h | 30 + .../linux/platform_data/leds-kirkwood-netxbig.h | 54 + include/linux/platform_data/leds-kirkwood-ns2.h | 38 + include/linux/platform_data/leds-lm355x.h | 66 + include/linux/platform_data/leds-lm3642.h | 38 + include/linux/platform_data/leds-lp55xx.h | 81 + include/linux/platform_data/leds-omap.h | 22 + include/linux/platform_data/leds-pca963x.h | 48 + include/linux/platform_data/leds-s3c24xx.h | 27 + include/linux/platform_data/lm3630a_bl.h | 65 + include/linux/platform_data/lm3639_bl.h | 69 + include/linux/platform_data/lm8323.h | 46 + include/linux/platform_data/lp855x.h | 149 + include/linux/platform_data/lp8727.h | 68 + include/linux/platform_data/lp8755.h | 71 + include/linux/platform_data/ltc4245.h | 21 + include/linux/platform_data/lv5207lp.h | 19 + include/linux/platform_data/macb.h | 32 + include/linux/platform_data/max197.h | 26 + include/linux/platform_data/max3421-hcd.h | 25 + include/linux/platform_data/max6639.h | 15 + include/linux/platform_data/max6697.h | 36 + include/linux/platform_data/max732x.h | 23 + include/linux/platform_data/mcs.h | 35 + include/linux/platform_data/mdio-bcm-unimac.h | 13 + include/linux/platform_data/media/camera-mx2.h | 44 + include/linux/platform_data/media/camera-mx3.h | 52 + include/linux/platform_data/media/camera-pxa.h | 46 + include/linux/platform_data/media/coda.h | 18 + include/linux/platform_data/media/mmp-camera.h | 29 + include/linux/platform_data/media/omap1_camera.h | 35 + include/linux/platform_data/media/omap4iss.h | 66 + include/linux/platform_data/media/s5p_hdmi.h | 36 + include/linux/platform_data/media/si4713.h | 48 + .../platform_data/media/soc_camera_platform.h | 83 + include/linux/platform_data/media/timb_radio.h | 30 + include/linux/platform_data/media/timb_video.h | 33 + include/linux/platform_data/mfd-mcp-sa11x0.h | 20 + include/linux/platform_data/microchip-ksz.h | 29 + include/linux/platform_data/mlxreg.h | 147 + include/linux/platform_data/mmc-davinci.h | 37 + include/linux/platform_data/mmc-esdhc-imx.h | 49 + include/linux/platform_data/mmc-mxcmmc.h | 41 + include/linux/platform_data/mmc-omap.h | 121 + include/linux/platform_data/mmc-pxamci.h | 29 + include/linux/platform_data/mmc-s3cmci.h | 53 + include/linux/platform_data/mmc-sdhci-s3c.h | 57 + include/linux/platform_data/mmp_audio.h | 22 + include/linux/platform_data/mmp_dma.h | 24 + include/linux/platform_data/mouse-pxa930_trkball.h | 11 + include/linux/platform_data/mtd-davinci-aemif.h | 36 + include/linux/platform_data/mtd-davinci.h | 100 + include/linux/platform_data/mtd-mxc_nand.h | 32 + include/linux/platform_data/mtd-nand-omap2.h | 67 + include/linux/platform_data/mtd-nand-pxa3xx.h | 27 + include/linux/platform_data/mtd-nand-s3c2410.h | 73 + include/linux/platform_data/mtd-orion_nand.h | 23 + include/linux/platform_data/mv88e6xxx.h | 18 + include/linux/platform_data/mv_usb.h | 53 + include/linux/platform_data/net-cw1200.h | 81 + include/linux/platform_data/nfcmrvl.h | 48 + include/linux/platform_data/ntc_thermistor.h | 62 + include/linux/platform_data/nxp-nci.h | 27 + include/linux/platform_data/omap-twl4030.h | 58 + include/linux/platform_data/omap-wd-timer.h | 38 + include/linux/platform_data/omap1_bl.h | 12 + include/linux/platform_data/omapdss.h | 36 + include/linux/platform_data/pca953x.h | 31 + include/linux/platform_data/pca954x.h | 48 + include/linux/platform_data/pcf857x.h | 45 + include/linux/platform_data/pcmcia-pxa2xx_viper.h | 12 + include/linux/platform_data/phy-da8xx-usb.h | 21 + include/linux/platform_data/pinctrl-single.h | 13 + include/linux/platform_data/pixcir_i2c_ts.h | 64 + include/linux/platform_data/pm33xx.h | 69 + include/linux/platform_data/pwm_omap_dmtimer.h | 90 + include/linux/platform_data/pxa2xx_udc.h | 28 + include/linux/platform_data/pxa_sdhci.h | 58 + include/linux/platform_data/regulator-haptic.h | 29 + include/linux/platform_data/remoteproc-omap.h | 59 + include/linux/platform_data/rtc-ds2404.h | 20 + include/linux/platform_data/rtc-v3020.h | 41 + include/linux/platform_data/s3c-hsotg.h | 42 + include/linux/platform_data/s3c-hsudc.h | 34 + include/linux/platform_data/sa11x0-serial.h | 38 + include/linux/platform_data/sc18is602.h | 19 + include/linux/platform_data/sdhci-pic32.h | 22 + include/linux/platform_data/serial-imx.h | 28 + include/linux/platform_data/serial-omap.h | 46 + include/linux/platform_data/serial-sccnxp.h | 88 + include/linux/platform_data/shmob_drm.h | 95 + include/linux/platform_data/sht3x.h | 25 + include/linux/platform_data/shtc1.h | 23 + include/linux/platform_data/si5351.h | 115 + include/linux/platform_data/simplefb.h | 64 + include/linux/platform_data/sky81452-backlight.h | 46 + include/linux/platform_data/spi-clps711x.h | 21 + include/linux/platform_data/spi-davinci.h | 90 + include/linux/platform_data/spi-ep93xx.h | 19 + include/linux/platform_data/spi-imx.h | 33 + include/linux/platform_data/spi-mt65xx.h | 22 + include/linux/platform_data/spi-nuc900.h | 33 + include/linux/platform_data/spi-omap2-mcspi.h | 23 + include/linux/platform_data/spi-s3c64xx.h | 68 + include/linux/platform_data/ssm2518.h | 22 + include/linux/platform_data/st33zp24.h | 28 + include/linux/platform_data/st_sensors_pdata.h | 28 + include/linux/platform_data/syscon.h | 9 + include/linux/platform_data/tc35876x.h | 11 + include/linux/platform_data/tda9950.h | 16 + include/linux/platform_data/ti-aemif.h | 48 + include/linux/platform_data/ti-sysc.h | 138 + include/linux/platform_data/touchscreen-s3c2410.h | 25 + include/linux/platform_data/tsc2007.h | 23 + include/linux/platform_data/tsl2563.h | 9 + include/linux/platform_data/tsl2772.h | 101 + include/linux/platform_data/txx9/ndfmc.h | 30 + include/linux/platform_data/uio_dmem_genirq.h | 26 + include/linux/platform_data/uio_pruss.h | 26 + include/linux/platform_data/usb-davinci.h | 36 + include/linux/platform_data/usb-ehci-mxc.h | 14 + include/linux/platform_data/usb-ehci-orion.h | 24 + include/linux/platform_data/usb-musb-ux500.h | 22 + include/linux/platform_data/usb-mx2.h | 38 + include/linux/platform_data/usb-ohci-pxa27x.h | 37 + include/linux/platform_data/usb-ohci-s3c2410.h | 43 + include/linux/platform_data/usb-omap.h | 88 + include/linux/platform_data/usb-omap1.h | 53 + include/linux/platform_data/usb-pxa3xx-ulpi.h | 35 + include/linux/platform_data/usb-s3c2410_udc.h | 44 + include/linux/platform_data/usb3503.h | 25 + include/linux/platform_data/ux500_wdt.h | 19 + include/linux/platform_data/video-clcd-versatile.h | 28 + include/linux/platform_data/video-ep93xx.h | 45 + include/linux/platform_data/video-imxfb.h | 70 + include/linux/platform_data/video-mx3fb.h | 53 + include/linux/platform_data/video-nuc900fb.h | 83 + include/linux/platform_data/video-pxafb.h | 173 + include/linux/platform_data/video_s3c.h | 55 + include/linux/platform_data/voltage-omap.h | 39 + include/linux/platform_data/wiznet.h | 24 + include/linux/platform_data/wkup_m3.h | 30 + include/linux/platform_data/x86/apple.h | 13 + include/linux/platform_data/x86/clk-pmc-atom.h | 47 + include/linux/platform_data/x86/mlxcpld.h | 52 + include/linux/platform_data/x86/pmc_atom.h | 158 + include/linux/platform_data/zforce_ts.h | 23 + include/linux/platform_device.h | 373 + include/linux/plist.h | 300 + include/linux/pm-trace.h | 43 + include/linux/pm.h | 830 ++ include/linux/pm2301_charger.h | 61 + include/linux/pm_clock.h | 99 + include/linux/pm_domain.h | 336 + include/linux/pm_opp.h | 348 + include/linux/pm_qos.h | 250 + include/linux/pm_runtime.h | 279 + include/linux/pm_wakeirq.h | 51 + include/linux/pm_wakeup.h | 228 + include/linux/pmbus.h | 49 + include/linux/pmu.h | 86 + include/linux/pnfs_osd_xdr.h | 317 + include/linux/pnp.h | 517 ++ include/linux/poison.h | 92 + include/linux/poll.h | 149 + include/linux/posix-clock.h | 132 + include/linux/posix-timers.h | 130 + include/linux/posix_acl.h | 124 + include/linux/posix_acl_xattr.h | 55 + include/linux/power/ab8500.h | 16 + include/linux/power/bq2415x_charger.h | 58 + include/linux/power/bq24190_charger.h | 18 + include/linux/power/bq24735-charger.h | 37 + include/linux/power/bq27xxx_battery.h | 78 + include/linux/power/charger-manager.h | 259 + include/linux/power/generic-adc-battery.h | 29 + include/linux/power/gpio-charger.h | 41 + include/linux/power/isp1704_charger.h | 30 + include/linux/power/jz4740-battery.h | 24 + include/linux/power/max17042_battery.h | 231 + include/linux/power/max8903_charger.h | 57 + include/linux/power/sbs-battery.h | 38 + include/linux/power/smartreflex.h | 329 + include/linux/power/smb347-charger.h | 117 + include/linux/power/twl4030_madc_battery.h | 39 + include/linux/power_supply.h | 453 + include/linux/powercap.h | 325 + include/linux/ppp-comp.h | 106 + include/linux/ppp_channel.h | 88 + include/linux/ppp_defs.h | 17 + include/linux/pps-gpio.h | 32 + include/linux/pps_kernel.h | 133 + include/linux/pr.h | 19 + include/linux/prandom.h | 101 + include/linux/preempt.h | 328 + include/linux/prefetch.h | 65 + include/linux/prime_numbers.h | 38 + include/linux/printk.h | 528 ++ include/linux/proc_fs.h | 143 + include/linux/proc_ns.h | 89 + include/linux/processor.h | 71 + include/linux/profile.h | 139 + include/linux/projid.h | 90 + include/linux/property.h | 314 + include/linux/psci.h | 70 + include/linux/psp-sev.h | 629 ++ include/linux/pstore.h | 279 + include/linux/pstore_ram.h | 105 + include/linux/pti.h | 12 + include/linux/ptp_classify.h | 83 + include/linux/ptp_clock_kernel.h | 258 + include/linux/ptr_ring.h | 678 ++ include/linux/ptrace.h | 403 + include/linux/purgatory.h | 24 + include/linux/pvclock_gtod.h | 17 + include/linux/pwm.h | 652 ++ include/linux/pwm_backlight.h | 28 + include/linux/pxa168_eth.h | 34 + include/linux/pxa2xx_ssp.h | 268 + include/linux/qcom-geni-se.h | 425 + include/linux/qcom_scm.h | 107 + include/linux/qed/common_hsi.h | 1391 +++ include/linux/qed/eth_common.h | 481 + include/linux/qed/fcoe_common.h | 744 ++ include/linux/qed/iscsi_common.h | 1572 ++++ include/linux/qed/iwarp_common.h | 56 + include/linux/qed/qed_chain.h | 735 ++ include/linux/qed/qed_eth_if.h | 371 + include/linux/qed/qed_fcoe_if.h | 151 + include/linux/qed/qed_if.h | 1329 +++ include/linux/qed/qed_iov_if.h | 60 + include/linux/qed/qed_iscsi_if.h | 260 + include/linux/qed/qed_ll2_if.h | 307 + include/linux/qed/qed_rdma_if.h | 704 ++ include/linux/qed/qede_rdma.h | 94 + include/linux/qed/rdma_common.h | 73 + include/linux/qed/roce_common.h | 69 + include/linux/qed/storage_common.h | 182 + include/linux/qed/tcp_common.h | 281 + include/linux/qnx6_fs.h | 135 + include/linux/quicklist.h | 94 + include/linux/quota.h | 540 ++ include/linux/quotaops.h | 404 + include/linux/radix-tree.h | 621 ++ include/linux/raid/md_u.h | 20 + include/linux/raid/pq.h | 191 + include/linux/raid/xor.h | 23 + include/linux/raid_class.h | 84 + include/linux/ramfs.h | 26 + include/linux/random.h | 141 + include/linux/range.h | 31 + include/linux/ras.h | 43 + include/linux/ratelimit.h | 113 + include/linux/rational.h | 20 + include/linux/rbtree.h | 151 + include/linux/rbtree_augmented.h | 292 + include/linux/rbtree_latch.h | 214 + include/linux/rcu_node_tree.h | 103 + include/linux/rcu_segcblist.h | 94 + include/linux/rcu_sync.h | 87 + include/linux/rculist.h | 705 ++ include/linux/rculist_bl.h | 129 + include/linux/rculist_nulls.h | 174 + include/linux/rcupdate.h | 886 ++ include/linux/rcupdate_wait.h | 51 + include/linux/rcutiny.h | 137 + include/linux/rcutree.h | 106 + include/linux/rcuwait.h | 64 + include/linux/reboot-mode.h | 19 + include/linux/reboot.h | 88 + include/linux/reciprocal_div.h | 104 + include/linux/refcount.h | 118 + include/linux/regmap.h | 1372 +++ include/linux/regset.h | 428 + include/linux/regulator/ab8500.h | 167 + include/linux/regulator/act8865.h | 90 + include/linux/regulator/arizona-ldo1.h | 21 + include/linux/regulator/arizona-micsupp.h | 21 + include/linux/regulator/consumer.h | 612 ++ include/linux/regulator/da9211.h | 48 + include/linux/regulator/db8500-prcmu.h | 45 + include/linux/regulator/driver.h | 530 ++ include/linux/regulator/fan53555.h | 61 + include/linux/regulator/fixed.h | 76 + include/linux/regulator/gpio-regulator.h | 87 + include/linux/regulator/lp3971.h | 51 + include/linux/regulator/lp3972.h | 48 + include/linux/regulator/lp872x.h | 95 + include/linux/regulator/machine.h | 265 + include/linux/regulator/max1586.h | 63 + include/linux/regulator/max8649.h | 44 + include/linux/regulator/max8660.h | 57 + include/linux/regulator/max8952.h | 134 + include/linux/regulator/max8973-regulator.h | 81 + include/linux/regulator/mt6311.h | 29 + include/linux/regulator/mt6323-regulator.h | 52 + include/linux/regulator/mt6380-regulator.h | 32 + include/linux/regulator/mt6397-regulator.h | 49 + include/linux/regulator/of_regulator.h | 46 + include/linux/regulator/pfuze100.h | 84 + include/linux/regulator/tps51632-regulator.h | 47 + include/linux/regulator/tps62360.h | 53 + include/linux/regulator/tps6507x.h | 32 + include/linux/regulator/userspace-consumer.h | 26 + include/linux/relay.h | 300 + include/linux/remoteproc.h | 577 ++ include/linux/remoteproc/qcom_rproc.h | 22 + include/linux/remoteproc/st_slim_rproc.h | 58 + include/linux/reservation.h | 290 + include/linux/reset-controller.h | 91 + include/linux/reset.h | 428 + include/linux/reset/bcm63xx_pmb.h | 88 + include/linux/resource.h | 14 + include/linux/resource_ext.h | 77 + include/linux/restart_block.h | 60 + include/linux/rfkill.h | 320 + include/linux/rhashtable-types.h | 137 + include/linux/rhashtable.h | 1154 +++ include/linux/ring_buffer.h | 213 + include/linux/rio.h | 562 ++ include/linux/rio_drv.h | 456 + include/linux/rio_ids.h | 44 + include/linux/rio_regs.h | 395 + include/linux/rmap.h | 304 + include/linux/rmi.h | 379 + include/linux/rndis.h | 392 + include/linux/rodata_test.h | 22 + include/linux/root_dev.h | 24 + include/linux/rpmsg.h | 263 + include/linux/rpmsg/qcom_glink.h | 29 + include/linux/rpmsg/qcom_smd.h | 32 + include/linux/rslib.h | 131 + include/linux/rtc.h | 280 + include/linux/rtc/ds1286.h | 52 + include/linux/rtc/ds1307.h | 22 + include/linux/rtc/ds1685.h | 375 + include/linux/rtc/m48t59.h | 64 + include/linux/rtc/sirfsoc_rtciobrg.h | 22 + include/linux/rtmutex.h | 125 + include/linux/rtnetlink.h | 137 + include/linux/rtsx_common.h | 50 + include/linux/rtsx_pci.h | 1367 +++ include/linux/rtsx_usb.h | 628 ++ include/linux/rwlock.h | 131 + include/linux/rwlock_api_smp.h | 278 + include/linux/rwlock_types.h | 45 + include/linux/rwsem-spinlock.h | 47 + include/linux/rwsem.h | 196 + include/linux/s3c_adc_battery.h | 42 + include/linux/sa11x0-dma.h | 24 + include/linux/sbitmap.h | 534 ++ include/linux/scatterlist.h | 433 + include/linux/scc.h | 86 + include/linux/sched.h | 1911 ++++ include/linux/sched/autogroup.h | 32 + include/linux/sched/clock.h | 101 + include/linux/sched/coredump.h | 81 + include/linux/sched/cpufreq.h | 28 + include/linux/sched/cputime.h | 189 + include/linux/sched/deadline.h | 26 + include/linux/sched/debug.h | 53 + include/linux/sched/hotplug.h | 25 + include/linux/sched/idle.h | 87 + include/linux/sched/init.h | 12 + include/linux/sched/isolation.h | 52 + include/linux/sched/jobctl.h | 37 + include/linux/sched/loadavg.h | 32 + include/linux/sched/mm.h | 366 + include/linux/sched/nohz.h | 38 + include/linux/sched/numa_balancing.h | 47 + include/linux/sched/prio.h | 61 + include/linux/sched/rt.h | 67 + include/linux/sched/signal.h | 702 ++ include/linux/sched/smt.h | 20 + include/linux/sched/stat.h | 41 + include/linux/sched/sysctl.h | 86 + include/linux/sched/task.h | 155 + include/linux/sched/task_stack.h | 126 + include/linux/sched/topology.h | 227 + include/linux/sched/user.h | 68 + include/linux/sched/wake_q.h | 54 + include/linux/sched/xacct.h | 49 + include/linux/sched_clock.h | 25 + include/linux/scif.h | 1339 +++ include/linux/scmi_protocol.h | 285 + include/linux/scpi_protocol.h | 84 + include/linux/screen_info.h | 9 + include/linux/sctp.h | 811 ++ include/linux/scx200.h | 52 + include/linux/scx200_gpio.h | 89 + include/linux/sdb.h | 160 + include/linux/sdla.h | 244 + include/linux/seccomp.h | 114 + include/linux/securebits.h | 8 + include/linux/security.h | 1847 ++++ include/linux/sed-opal.h | 75 + include/linux/seg6.h | 7 + include/linux/seg6_genl.h | 7 + include/linux/seg6_hmac.h | 7 + include/linux/seg6_iptunnel.h | 7 + include/linux/seg6_local.h | 6 + include/linux/selection.h | 53 + include/linux/selinux.h | 35 + include/linux/sem.h | 36 + include/linux/semaphore.h | 46 + include/linux/seq_buf.h | 134 + include/linux/seq_file.h | 244 + include/linux/seq_file_net.h | 38 + include/linux/seqlock.h | 608 ++ include/linux/seqno-fence.h | 117 + include/linux/serdev.h | 338 + include/linux/serial.h | 33 + include/linux/serial_8250.h | 186 + include/linux/serial_bcm63xx.h | 120 + include/linux/serial_core.h | 556 ++ include/linux/serial_max3100.h | 52 + include/linux/serial_pnx8xxx.h | 80 + include/linux/serial_s3c.h | 277 + include/linux/serial_sci.h | 66 + include/linux/serio.h | 167 + include/linux/set_memory.h | 46 + include/linux/sfi.h | 210 + include/linux/sfi_acpi.h | 93 + include/linux/sfp.h | 564 ++ include/linux/sh_clk.h | 213 + include/linux/sh_dma.h | 115 + include/linux/sh_eth.h | 19 + include/linux/sh_intc.h | 150 + include/linux/sh_timer.h | 9 + include/linux/sha256.h | 30 + include/linux/shdma-base.h | 137 + include/linux/shm.h | 45 + include/linux/shmem_fs.h | 139 + include/linux/shrinker.h | 90 + include/linux/signal.h | 452 + include/linux/signal_types.h | 67 + include/linux/signalfd.h | 35 + include/linux/siox.h | 77 + include/linux/siphash.h | 167 + include/linux/sirfsoc_dma.h | 7 + include/linux/sizes.h | 51 + include/linux/skb_array.h | 220 + include/linux/skbuff.h | 4251 +++++++++ include/linux/slab.h | 734 ++ include/linux/slab_def.h | 107 + include/linux/slimbus.h | 212 + include/linux/slub_def.h | 185 + include/linux/sm501-regs.h | 388 + include/linux/sm501.h | 182 + include/linux/smc911x.h | 14 + include/linux/smc91x.h | 46 + include/linux/smp.h | 224 + include/linux/smpboot.h | 49 + include/linux/smsc911x.h | 63 + include/linux/smscphy.h | 31 + include/linux/soc/actions/owl-sps.h | 11 + include/linux/soc/brcmstb/brcmstb.h | 22 + include/linux/soc/dove/pmu.h | 26 + include/linux/soc/mediatek/infracfg.h | 39 + include/linux/soc/qcom/apr.h | 128 + include/linux/soc/qcom/llcc-qcom.h | 180 + include/linux/soc/qcom/mdt_loader.h | 24 + include/linux/soc/qcom/qmi.h | 271 + include/linux/soc/qcom/smd-rpm.h | 41 + include/linux/soc/qcom/smem.h | 14 + include/linux/soc/qcom/smem_state.h | 54 + include/linux/soc/qcom/wcnss_ctrl.h | 25 + include/linux/soc/renesas/rcar-rst.h | 11 + include/linux/soc/renesas/rcar-sysc.h | 8 + include/linux/soc/samsung/exynos-pmu.h | 31 + include/linux/soc/samsung/exynos-regs-pmu.h | 666 ++ include/linux/soc/sunxi/sunxi_sram.h | 19 + include/linux/soc/ti/knav_dma.h | 193 + include/linux/soc/ti/knav_qmss.h | 91 + include/linux/soc/ti/ti-msgmgr.h | 35 + include/linux/soc/ti/ti_sci_protocol.h | 241 + include/linux/sock_diag.h | 81 + include/linux/socket.h | 387 + include/linux/sonet.h | 20 + include/linux/sony-laptop.h | 39 + include/linux/sonypi.h | 63 + include/linux/sort.h | 11 + include/linux/sound.h | 20 + include/linux/soundcard.h | 37 + include/linux/soundwire/sdw.h | 809 ++ include/linux/soundwire/sdw_intel.h | 38 + include/linux/soundwire/sdw_registers.h | 194 + include/linux/soundwire/sdw_type.h | 19 + include/linux/spi/ad7877.h | 25 + include/linux/spi/ads7846.h | 62 + include/linux/spi/at73c213.h | 26 + include/linux/spi/at86rf230.h | 28 + include/linux/spi/cc2520.h | 26 + include/linux/spi/corgi_lcd.h | 21 + include/linux/spi/ds1305.h | 36 + include/linux/spi/eeprom.h | 37 + include/linux/spi/flash.h | 32 + include/linux/spi/ifx_modem.h | 20 + include/linux/spi/l4f00242t03.h | 25 + include/linux/spi/libertas_spi.h | 29 + include/linux/spi/lms283gf05.h | 24 + include/linux/spi/max7301.h | 36 + include/linux/spi/mc33880.h | 11 + include/linux/spi/mcp23s08.h | 18 + include/linux/spi/mmc_spi.h | 65 + include/linux/spi/mxs-spi.h | 144 + include/linux/spi/pxa2xx_spi.h | 59 + include/linux/spi/rspi.h | 26 + include/linux/spi/s3c24xx.h | 28 + include/linux/spi/sh_hspi.h | 19 + include/linux/spi/sh_msiof.h | 21 + include/linux/spi/spi-fsl-dspi.h | 31 + include/linux/spi/spi-mem.h | 264 + include/linux/spi/spi.h | 1357 +++ include/linux/spi/spi_bitbang.h | 49 + include/linux/spi/spi_gpio.h | 25 + include/linux/spi/spi_oc_tiny.h | 21 + include/linux/spi/tdo24m.h | 14 + include/linux/spi/tle62x0.h | 20 + include/linux/spi/xilinx_spi.h | 20 + include/linux/spinlock.h | 471 + include/linux/spinlock_api_smp.h | 192 + include/linux/spinlock_api_up.h | 91 + include/linux/spinlock_types.h | 85 + include/linux/spinlock_types_up.h | 37 + include/linux/spinlock_up.h | 72 + include/linux/splice.h | 90 + include/linux/spmi.h | 190 + include/linux/sram.h | 27 + include/linux/srcu.h | 248 + include/linux/srcutiny.h | 103 + include/linux/srcutree.h | 144 + include/linux/ssb/ssb.h | 682 ++ include/linux/ssb/ssb_driver_chipcommon.h | 673 ++ include/linux/ssb/ssb_driver_extif.h | 259 + include/linux/ssb/ssb_driver_gige.h | 194 + include/linux/ssb/ssb_driver_mips.h | 71 + include/linux/ssb/ssb_driver_pci.h | 131 + include/linux/ssb/ssb_embedded.h | 19 + include/linux/ssb/ssb_regs.h | 687 ++ include/linux/ssbi.h | 43 + include/linux/stackdepot.h | 32 + include/linux/stackprotector.h | 17 + include/linux/stacktrace.h | 44 + include/linux/start_kernel.h | 13 + include/linux/stat.h | 51 + include/linux/statfs.h | 44 + include/linux/static_key.h | 1 + include/linux/stddef.h | 39 + include/linux/stm.h | 131 + include/linux/stmmac.h | 197 + include/linux/stmp3xxx_rtc_wdt.h | 15 + include/linux/stmp_device.h | 20 + include/linux/stop_machine.h | 166 + include/linux/string.h | 495 ++ include/linux/string_helpers.h | 79 + include/linux/stringhash.h | 79 + include/linux/stringify.h | 12 + include/linux/sudmac.h | 52 + include/linux/sungem_phy.h | 133 + include/linux/sunrpc/addr.h | 184 + include/linux/sunrpc/auth.h | 233 + include/linux/sunrpc/auth_gss.h | 94 + include/linux/sunrpc/bc_xprt.h | 72 + include/linux/sunrpc/cache.h | 302 + include/linux/sunrpc/clnt.h | 229 + include/linux/sunrpc/debug.h | 130 + include/linux/sunrpc/gss_api.h | 166 + include/linux/sunrpc/gss_asn1.h | 81 + include/linux/sunrpc/gss_err.h | 167 + include/linux/sunrpc/gss_krb5.h | 331 + include/linux/sunrpc/gss_krb5_enctypes.h | 4 + include/linux/sunrpc/metrics.h | 103 + include/linux/sunrpc/msg_prot.h | 221 + include/linux/sunrpc/rpc_pipe_fs.h | 136 + include/linux/sunrpc/rpc_rdma.h | 126 + include/linux/sunrpc/sched.h | 300 + include/linux/sunrpc/stats.h | 85 + include/linux/sunrpc/svc.h | 524 ++ include/linux/sunrpc/svc_rdma.h | 207 + include/linux/sunrpc/svc_xprt.h | 221 + include/linux/sunrpc/svcauth.h | 189 + include/linux/sunrpc/svcauth_gss.h | 29 + include/linux/sunrpc/svcsock.h | 75 + include/linux/sunrpc/timer.h | 50 + include/linux/sunrpc/types.h | 24 + include/linux/sunrpc/xdr.h | 532 ++ include/linux/sunrpc/xprt.h | 497 ++ include/linux/sunrpc/xprtmultipath.h | 72 + include/linux/sunrpc/xprtrdma.h | 73 + include/linux/sunrpc/xprtsock.h | 91 + include/linux/sunserialcore.h | 38 + include/linux/sunxi-rsb.h | 105 + include/linux/superhyway.h | 107 + include/linux/suspend.h | 542 ++ include/linux/svga.h | 125 + include/linux/sw842.h | 13 + include/linux/swab.h | 23 + include/linux/swait.h | 300 + include/linux/swap.h | 686 ++ include/linux/swap_cgroup.h | 45 + include/linux/swap_slots.h | 31 + include/linux/swapfile.h | 16 + include/linux/swapops.h | 384 + include/linux/swiotlb.h | 129 + include/linux/switchtec.h | 393 + include/linux/sxgbe_platform.h | 54 + include/linux/sync_core.h | 21 + include/linux/sync_file.h | 62 + include/linux/synclink.h | 37 + include/linux/sys.h | 30 + include/linux/sys_soc.h | 46 + include/linux/syscalls.h | 1296 +++ include/linux/syscore_ops.h | 29 + include/linux/sysctl.h | 236 + include/linux/sysfs.h | 566 ++ include/linux/syslog.h | 52 + include/linux/sysrq.h | 76 + include/linux/sysv_fs.h | 214 + include/linux/t10-pi.h | 70 + include/linux/task_io_accounting.h | 46 + include/linux/task_io_accounting_ops.h | 114 + include/linux/task_work.h | 25 + include/linux/taskstats_kern.h | 37 + include/linux/tboot.h | 162 + include/linux/tc.h | 142 + include/linux/tca6416_keypad.h | 34 + include/linux/tcp.h | 494 ++ include/linux/tee_drv.h | 468 + include/linux/textsearch.h | 179 + include/linux/textsearch_fsm.h | 49 + include/linux/tfrc.h | 55 + include/linux/thermal.h | 545 ++ include/linux/thinkpad_acpi.h | 16 + include/linux/thread_info.h | 173 + include/linux/threads.h | 46 + include/linux/thunderbolt.h | 617 ++ include/linux/ti-emif-sram.h | 144 + include/linux/ti_wilink_st.h | 452 + include/linux/tick.h | 284 + include/linux/tifm.h | 164 + include/linux/timb_dma.h | 55 + include/linux/timb_gpio.h | 37 + include/linux/time.h | 112 + include/linux/time32.h | 225 + include/linux/time64.h | 177 + include/linux/timecounter.h | 139 + include/linux/timekeeper_internal.h | 151 + include/linux/timekeeping.h | 291 + include/linux/timekeeping32.h | 111 + include/linux/timer.h | 215 + include/linux/timerfd.h | 20 + include/linux/timeriomem-rng.h | 19 + include/linux/timerqueue.h | 50 + include/linux/timex.h | 171 + include/linux/tnum.h | 83 + include/linux/topology.h | 211 + include/linux/torture.h | 107 + include/linux/toshiba.h | 25 + include/linux/tpm.h | 107 + include/linux/tpm_command.h | 29 + include/linux/tpm_eventlog.h | 124 + include/linux/trace.h | 29 + include/linux/trace_clock.h | 24 + include/linux/trace_events.h | 634 ++ include/linux/trace_seq.h | 139 + include/linux/tracefs.h | 45 + include/linux/tracehook.h | 199 + include/linux/tracepoint-defs.h | 50 + include/linux/tracepoint.h | 561 ++ include/linux/transport_class.h | 102 + include/linux/ts-nbus.h | 18 + include/linux/tsacct_kern.h | 42 + include/linux/tty.h | 803 ++ include/linux/tty_driver.h | 443 + include/linux/tty_flip.h | 43 + include/linux/tty_ldisc.h | 221 + include/linux/typecheck.h | 25 + include/linux/types.h | 234 + include/linux/u64_stats_sync.h | 175 + include/linux/uaccess.h | 310 + include/linux/ucb1400.h | 165 + include/linux/ucs2_string.h | 19 + include/linux/udp.h | 126 + include/linux/uidgid.h | 191 + include/linux/uio.h | 266 + include/linux/uio_driver.h | 143 + include/linux/ulpi/driver.h | 65 + include/linux/ulpi/interface.h | 23 + include/linux/ulpi/regs.h | 131 + include/linux/umh.h | 81 + include/linux/unaligned/access_ok.h | 68 + include/linux/unaligned/be_byteshift.h | 71 + include/linux/unaligned/be_memmove.h | 37 + include/linux/unaligned/be_struct.h | 37 + include/linux/unaligned/generic.h | 69 + include/linux/unaligned/le_byteshift.h | 71 + include/linux/unaligned/le_memmove.h | 37 + include/linux/unaligned/le_struct.h | 37 + include/linux/unaligned/memmove.h | 46 + include/linux/unaligned/packed_struct.h | 46 + include/linux/uprobes.h | 207 + include/linux/usb.h | 2013 +++++ include/linux/usb/association.h | 151 + include/linux/usb/audio-v2.h | 469 + include/linux/usb/audio-v3.h | 454 + include/linux/usb/audio.h | 45 + include/linux/usb/c67x00.h | 49 + include/linux/usb/cdc-wdm.h | 22 + include/linux/usb/cdc.h | 52 + include/linux/usb/cdc_ncm.h | 157 + include/linux/usb/ch9.h | 65 + include/linux/usb/chipidea.h | 92 + include/linux/usb/composite.h | 645 ++ include/linux/usb/ehci-dbgp.h | 84 + include/linux/usb/ehci_def.h | 196 + include/linux/usb/ehci_pdriver.h | 64 + include/linux/usb/ezusb.h | 9 + include/linux/usb/functionfs.h | 7 + include/linux/usb/g_hid.h | 33 + include/linux/usb/gadget.h | 887 ++ include/linux/usb/gadget_configfs.h | 102 + include/linux/usb/gpio_vbus.h | 33 + include/linux/usb/hcd.h | 734 ++ include/linux/usb/input.h | 26 + include/linux/usb/iowarrior.h | 43 + include/linux/usb/irda.h | 163 + include/linux/usb/isp116x.h | 34 + include/linux/usb/isp1301.h | 81 + include/linux/usb/isp1362.h | 47 + include/linux/usb/isp1760.h | 19 + include/linux/usb/m66592.h | 47 + include/linux/usb/musb-ux500.h | 32 + include/linux/usb/musb.h | 150 + include/linux/usb/net2280.h | 447 + include/linux/usb/of.h | 72 + include/linux/usb/ohci_pdriver.h | 49 + include/linux/usb/otg-fsm.h | 325 + include/linux/usb/otg.h | 132 + include/linux/usb/pd.h | 465 + include/linux/usb/pd_ado.h | 42 + include/linux/usb/pd_bdo.h | 31 + include/linux/usb/pd_ext_sdb.h | 31 + include/linux/usb/pd_vdo.h | 251 + include/linux/usb/phy.h | 361 + include/linux/usb/phy_companion.h | 35 + include/linux/usb/quirks.h | 75 + include/linux/usb/r8a66597.h | 482 + include/linux/usb/renesas_usbhs.h | 241 + include/linux/usb/rndis_host.h | 211 + include/linux/usb/role.h | 53 + include/linux/usb/samsung_usb_phy.h | 17 + include/linux/usb/serial.h | 437 + include/linux/usb/sl811.h | 30 + include/linux/usb/storage.h | 87 + include/linux/usb/tcpm.h | 178 + include/linux/usb/tegra_usb_phy.h | 92 + include/linux/usb/typec.h | 247 + include/linux/usb/typec_altmode.h | 160 + include/linux/usb/typec_dp.h | 95 + include/linux/usb/typec_mux.h | 55 + include/linux/usb/uas.h | 110 + include/linux/usb/ulpi.h | 71 + include/linux/usb/usb338x.h | 204 + include/linux/usb/usb_phy_generic.h | 34 + include/linux/usb/usbnet.h | 291 + include/linux/usb/wusb-wa.h | 304 + include/linux/usb/wusb.h | 378 + include/linux/usb/xhci-dbgp.h | 30 + include/linux/usb_usual.h | 101 + include/linux/usbdevice_fs.h | 81 + include/linux/user-return-notifier.h | 50 + include/linux/user.h | 1 + include/linux/user_namespace.h | 175 + include/linux/userfaultfd_k.h | 145 + include/linux/util_macros.h | 41 + include/linux/uts.h | 20 + include/linux/utsname.h | 92 + include/linux/uuid.h | 96 + include/linux/uwb.h | 831 ++ include/linux/uwb/debug-cmd.h | 68 + include/linux/uwb/spec.h | 781 ++ include/linux/uwb/umc.h | 193 + include/linux/uwb/whci.h | 117 + include/linux/vbox_utils.h | 56 + include/linux/verification.h | 55 + include/linux/vermagic.h | 41 + include/linux/vexpress.h | 54 + include/linux/vfio.h | 201 + include/linux/vfs.h | 7 + include/linux/vga_switcheroo.h | 199 + include/linux/vgaarb.h | 158 + include/linux/via-core.h | 236 + include/linux/via-gpio.h | 14 + include/linux/via.h | 23 + include/linux/via_i2c.h | 42 + include/linux/videodev2.h | 62 + include/linux/virtio.h | 213 + include/linux/virtio_byteorder.h | 64 + include/linux/virtio_caif.h | 24 + include/linux/virtio_config.h | 445 + include/linux/virtio_console.h | 38 + include/linux/virtio_net.h | 206 + include/linux/virtio_ring.h | 115 + include/linux/virtio_vsock.h | 160 + include/linux/visorbus.h | 344 + include/linux/vlynq.h | 162 + include/linux/vm_event_item.h | 121 + include/linux/vm_sockets.h | 23 + include/linux/vmacache.h | 28 + include/linux/vmalloc.h | 210 + include/linux/vme.h | 190 + include/linux/vmpressure.h | 52 + include/linux/vmstat.h | 389 + include/linux/vmw_vmci_api.h | 83 + include/linux/vmw_vmci_defs.h | 917 ++ include/linux/vringh.h | 264 + include/linux/vt.h | 28 + include/linux/vt_buffer.h | 73 + include/linux/vt_kern.h | 198 + include/linux/vtime.h | 117 + include/linux/w1-gpio.h | 25 + include/linux/w1.h | 327 + include/linux/wait.h | 1161 +++ include/linux/wait_bit.h | 325 + include/linux/wanrouter.h | 11 + include/linux/watchdog.h | 219 + include/linux/wimax/debug.h | 526 ++ include/linux/win_minmax.h | 38 + include/linux/wireless.h | 45 + include/linux/wkup_m3_ipc.h | 64 + include/linux/wl12xx.h | 58 + include/linux/wm97xx.h | 338 + include/linux/wmi.h | 66 + include/linux/workqueue.h | 673 ++ include/linux/writeback.h | 381 + include/linux/ww_mutex.h | 372 + include/linux/xarray.h | 24 + include/linux/xattr.h | 113 + include/linux/xxhash.h | 236 + include/linux/xz.h | 264 + include/linux/yam.h | 82 + include/linux/z2_battery.h | 18 + include/linux/zbud.h | 23 + include/linux/zconf.h | 57 + include/linux/zlib.h | 593 ++ include/linux/zorro.h | 151 + include/linux/zpool.h | 113 + include/linux/zsmalloc.h | 60 + include/linux/zstd.h | 1157 +++ include/linux/zutil.h | 106 + include/math-emu/double.h | 205 + include/math-emu/op-1.h | 303 + include/math-emu/op-2.h | 613 ++ include/math-emu/op-4.h | 692 ++ include/math-emu/op-8.h | 107 + include/math-emu/op-common.h | 876 ++ include/math-emu/quad.h | 208 + include/math-emu/single.h | 116 + include/math-emu/soft-fp.h | 207 + include/media/cec-notifier.h | 157 + include/media/cec-pin.h | 75 + include/media/cec.h | 455 + include/media/davinci/ccdc_types.h | 39 + include/media/davinci/dm355_ccdc.h | 317 + include/media/davinci/dm644x_ccdc.h | 180 + include/media/davinci/isif.h | 527 ++ include/media/davinci/vpbe.h | 196 + include/media/davinci/vpbe_display.h | 130 + include/media/davinci/vpbe_osd.h | 391 + include/media/davinci/vpbe_types.h | 82 + include/media/davinci/vpbe_venc.h | 45 + include/media/davinci/vpfe_capture.h | 186 + include/media/davinci/vpfe_types.h | 47 + include/media/davinci/vpif_types.h | 88 + include/media/davinci/vpss.h | 120 + include/media/demux.h | 600 ++ include/media/dmxdev.h | 214 + include/media/drv-intf/cx2341x.h | 295 + include/media/drv-intf/cx25840.h | 188 + include/media/drv-intf/exynos-fimc.h | 162 + include/media/drv-intf/msp3400.h | 225 + include/media/drv-intf/renesas-ceu.h | 26 + include/media/drv-intf/s3c_camif.h | 45 + include/media/drv-intf/saa7146.h | 472 + include/media/drv-intf/saa7146_vv.h | 267 + include/media/drv-intf/sh_mobile_ceu.h | 29 + include/media/drv-intf/sh_vou.h | 33 + include/media/drv-intf/si476x.h | 37 + include/media/drv-intf/soc_mediabus.h | 112 + include/media/drv-intf/tea575x.h | 80 + include/media/dvb-usb-ids.h | 425 + include/media/dvb_ca_en50221.h | 142 + include/media/dvb_demux.h | 354 + include/media/dvb_frontend.h | 824 ++ include/media/dvb_math.h | 66 + include/media/dvb_net.h | 93 + include/media/dvb_ringbuffer.h | 280 + include/media/dvb_vb2.h | 280 + include/media/dvbdev.h | 470 + include/media/i2c/ad9389b.h | 37 + include/media/i2c/adp1653.h | 123 + include/media/i2c/adv7183.h | 43 + include/media/i2c/adv7343.h | 63 + include/media/i2c/adv7393.h | 28 + include/media/i2c/adv7511.h | 33 + include/media/i2c/adv7604.h | 157 + include/media/i2c/adv7842.h | 227 + include/media/i2c/ak881x.h | 25 + include/media/i2c/bt819.h | 36 + include/media/i2c/cs5345.h | 39 + include/media/i2c/cs53l32a.h | 34 + include/media/i2c/ir-kbd-i2c.h | 61 + include/media/i2c/lm3560.h | 93 + include/media/i2c/lm3646.h | 87 + include/media/i2c/m52790.h | 93 + include/media/i2c/m5mols.h | 33 + include/media/i2c/mt9m032.h | 31 + include/media/i2c/mt9p031.h | 17 + include/media/i2c/mt9t001.h | 10 + include/media/i2c/mt9t112.h | 27 + include/media/i2c/mt9v011.h | 17 + include/media/i2c/mt9v022.h | 16 + include/media/i2c/mt9v032.h | 12 + include/media/i2c/noon010pc30.h | 28 + include/media/i2c/ov2659.h | 34 + include/media/i2c/ov7670.h | 22 + include/media/i2c/ov772x.h | 61 + include/media/i2c/ov9650.h | 27 + include/media/i2c/rj54n1cb0c.h | 19 + include/media/i2c/s5c73m3.h | 55 + include/media/i2c/s5k4ecgx.h | 37 + include/media/i2c/s5k6aa.h | 51 + include/media/i2c/saa6588.h | 43 + include/media/i2c/saa7115.h | 140 + include/media/i2c/saa7127.h | 40 + include/media/i2c/smiapp.h | 73 + include/media/i2c/sr030pc30.h | 21 + include/media/i2c/tc358743.h | 117 + include/media/i2c/tda1997x.h | 42 + include/media/i2c/ths7303.h | 28 + include/media/i2c/tvaudio.h | 64 + include/media/i2c/tvp514x.h | 107 + include/media/i2c/tvp7002.h | 50 + include/media/i2c/tw9910.h | 47 + include/media/i2c/uda1342.h | 16 + include/media/i2c/upd64031a.h | 36 + include/media/i2c/upd64083.h | 54 + include/media/i2c/wm8775.h | 44 + include/media/imx.h | 15 + include/media/media-device.h | 478 + include/media/media-devnode.h | 176 + include/media/media-entity.h | 1088 +++ include/media/rc-core.h | 402 + include/media/rc-map.h | 285 + include/media/rcar-fcp.h | 42 + include/media/soc_camera.h | 400 + include/media/tpg/v4l2-tpg.h | 650 ++ include/media/tuner-types.h | 205 + include/media/tuner.h | 236 + include/media/tveeprom.h | 116 + include/media/v4l2-async.h | 226 + include/media/v4l2-clk.h | 76 + include/media/v4l2-common.h | 387 + include/media/v4l2-ctrls.h | 1167 +++ include/media/v4l2-dev.h | 527 ++ include/media/v4l2-device.h | 527 ++ include/media/v4l2-dv-timings.h | 243 + include/media/v4l2-event.h | 205 + include/media/v4l2-fh.h | 171 + include/media/v4l2-flash-led-class.h | 189 + include/media/v4l2-fwnode.h | 361 + include/media/v4l2-image-sizes.h | 43 + include/media/v4l2-ioctl.h | 726 ++ include/media/v4l2-mc.h | 246 + include/media/v4l2-mediabus.h | 180 + include/media/v4l2-mem2mem.h | 649 ++ include/media/v4l2-rect.h | 161 + include/media/v4l2-subdev.h | 1130 +++ include/media/videobuf-core.h | 236 + include/media/videobuf-dma-contig.h | 33 + include/media/videobuf-dma-sg.h | 105 + include/media/videobuf-vmalloc.h | 46 + include/media/videobuf2-core.h | 1147 +++ include/media/videobuf2-dma-contig.h | 32 + include/media/videobuf2-dma-sg.h | 26 + include/media/videobuf2-dvb.h | 69 + include/media/videobuf2-memops.h | 41 + include/media/videobuf2-v4l2.h | 294 + include/media/videobuf2-vmalloc.h | 20 + include/media/vsp1.h | 105 + include/memory/jedec_ddr.h | 175 + include/misc/altera.h | 49 + include/misc/charlcd.h | 42 + include/misc/cxl-base.h | 52 + include/misc/cxl.h | 269 + include/misc/cxllib.h | 133 + include/misc/ocxl-config.h | 45 + include/misc/ocxl.h | 223 + include/net/6lowpan.h | 330 + include/net/9p/9p.h | 581 ++ include/net/9p/client.h | 265 + include/net/9p/transport.h | 73 + include/net/Space.h | 30 + include/net/act_api.h | 230 + include/net/addrconf.h | 493 ++ include/net/af_ieee802154.h | 67 + include/net/af_rxrpc.h | 81 + include/net/af_unix.h | 87 + include/net/af_vsock.h | 218 + include/net/ah.h | 23 + include/net/arp.h | 83 + include/net/atmclip.h | 53 + include/net/ax25.h | 495 ++ include/net/ax88796.h | 45 + include/net/bluetooth/bluetooth.h | 416 + include/net/bluetooth/hci.h | 2257 +++++ include/net/bluetooth/hci_core.h | 1607 ++++ include/net/bluetooth/hci_mon.h | 67 + include/net/bluetooth/hci_sock.h | 176 + include/net/bluetooth/l2cap.h | 953 ++ include/net/bluetooth/mgmt.h | 881 ++ include/net/bluetooth/rfcomm.h | 376 + include/net/bluetooth/sco.h | 49 + include/net/bond_3ad.h | 311 + include/net/bond_alb.h | 181 + include/net/bond_options.h | 136 + include/net/bonding.h | 750 ++ include/net/busy_poll.h | 152 + include/net/caif/caif_dev.h | 128 + include/net/caif/caif_device.h | 55 + include/net/caif/caif_hsi.h | 200 + include/net/caif/caif_layer.h | 279 + include/net/caif/caif_spi.h | 155 + include/net/caif/cfcnfg.h | 90 + include/net/caif/cfctrl.h | 130 + include/net/caif/cffrml.h | 21 + include/net/caif/cfmuxl.h | 20 + include/net/caif/cfpkt.h | 232 + include/net/caif/cfserl.h | 13 + include/net/caif/cfsrvl.h | 65 + include/net/calipso.h | 91 + include/net/cfg80211-wext.h | 55 + include/net/cfg80211.h | 6642 ++++++++++++++ include/net/cfg802154.h | 416 + include/net/checksum.h | 190 + include/net/cipso_ipv4.h | 332 + include/net/cls_cgroup.h | 88 + include/net/codel.h | 164 + include/net/codel_impl.h | 255 + include/net/codel_qdisc.h | 74 + include/net/compat.h | 59 + include/net/datalink.h | 21 + include/net/dcbevent.h | 48 + include/net/dcbnl.h | 123 + include/net/devlink.h | 830 ++ include/net/dn.h | 231 + include/net/dn_dev.h | 199 + include/net/dn_fib.h | 167 + include/net/dn_neigh.h | 30 + include/net/dn_nsp.h | 203 + include/net/dn_route.h | 123 + include/net/dsa.h | 628 ++ include/net/dsfield.h | 53 + include/net/dst.h | 560 ++ include/net/dst_cache.h | 98 + include/net/dst_metadata.h | 231 + include/net/dst_ops.h | 71 + include/net/erspan.h | 303 + include/net/esp.h | 35 + include/net/ethoc.h | 23 + include/net/failover.h | 36 + include/net/fib_notifier.h | 49 + include/net/fib_rules.h | 204 + include/net/firewire.h | 26 + include/net/flow.h | 225 + include/net/flow_dissector.h | 320 + include/net/fou.h | 20 + include/net/fq.h | 105 + include/net/fq_impl.h | 337 + include/net/garp.h | 130 + include/net/gen_stats.h | 73 + include/net/genetlink.h | 379 + include/net/geneve.h | 68 + include/net/gre.h | 145 + include/net/gro_cells.h | 19 + include/net/gtp.h | 35 + include/net/gue.h | 117 + include/net/hwbm.h | 29 + include/net/icmp.h | 65 + include/net/ieee80211_radiotap.h | 338 + include/net/ieee802154_netdev.h | 349 + include/net/if_inet6.h | 273 + include/net/ife.h | 53 + include/net/ila.h | 18 + include/net/inet6_connection_sock.h | 31 + include/net/inet6_hashtables.h | 123 + include/net/inet_common.h | 57 + include/net/inet_connection_sock.h | 321 + include/net/inet_ecn.h | 245 + include/net/inet_frag.h | 168 + include/net/inet_hashtables.h | 417 + include/net/inet_sock.h | 364 + include/net/inet_timewait_sock.h | 128 + include/net/inetpeer.h | 152 + include/net/ip.h | 709 ++ include/net/ip6_checksum.h | 105 + include/net/ip6_fib.h | 524 ++ include/net/ip6_route.h | 318 + include/net/ip6_tunnel.h | 167 + include/net/ip_fib.h | 455 + include/net/ip_tunnels.h | 546 ++ include/net/ip_vs.h | 1674 ++++ include/net/ipcomp.h | 30 + include/net/ipconfig.h | 26 + include/net/ipv6.h | 1106 +++ include/net/ipv6_frag.h | 110 + include/net/ipx.h | 171 + include/net/iucv/af_iucv.h | 164 + include/net/iucv/iucv.h | 496 ++ include/net/iw_handler.h | 552 ++ include/net/kcm.h | 201 + include/net/l3mdev.h | 277 + include/net/lag.h | 17 + include/net/lapb.h | 160 + include/net/lib80211.h | 122 + include/net/llc.h | 165 + include/net/llc_c_ac.h | 180 + include/net/llc_c_ev.h | 224 + include/net/llc_c_st.h | 48 + include/net/llc_conn.h | 119 + include/net/llc_if.h | 68 + include/net/llc_pdu.h | 444 + include/net/llc_s_ac.h | 37 + include/net/llc_s_ev.h | 66 + include/net/llc_s_st.h | 32 + include/net/llc_sap.h | 32 + include/net/lwtunnel.h | 260 + include/net/mac80211.h | 6027 +++++++++++++ include/net/mac802154.h | 498 ++ include/net/mip6.h | 53 + include/net/mld.h | 114 + include/net/mpls.h | 36 + include/net/mpls_iptunnel.h | 30 + include/net/mrp.h | 143 + include/net/ncsi.h | 70 + include/net/ndisc.h | 463 + include/net/neighbour.h | 565 ++ include/net/net_failover.h | 40 + include/net/net_namespace.h | 450 + include/net/net_ratelimit.h | 9 + include/net/netevent.h | 38 + include/net/netfilter/br_netfilter.h | 72 + include/net/netfilter/ipv4/nf_conntrack_ipv4.h | 29 + include/net/netfilter/ipv4/nf_defrag_ipv4.h | 8 + include/net/netfilter/ipv4/nf_dup_ipv4.h | 8 + include/net/netfilter/ipv4/nf_nat_masquerade.h | 15 + include/net/netfilter/ipv4/nf_reject.h | 20 + include/net/netfilter/ipv6/nf_conntrack_icmpv6.h | 21 + include/net/netfilter/ipv6/nf_conntrack_ipv6.h | 23 + include/net/netfilter/ipv6/nf_defrag_ipv6.h | 14 + include/net/netfilter/ipv6/nf_dup_ipv6.h | 8 + include/net/netfilter/ipv6/nf_nat_masquerade.h | 11 + include/net/netfilter/ipv6/nf_reject.h | 22 + include/net/netfilter/nf_conntrack.h | 331 + include/net/netfilter/nf_conntrack_acct.h | 69 + include/net/netfilter/nf_conntrack_core.h | 84 + include/net/netfilter/nf_conntrack_count.h | 36 + include/net/netfilter/nf_conntrack_ecache.h | 213 + include/net/netfilter/nf_conntrack_expect.h | 132 + include/net/netfilter/nf_conntrack_extend.h | 100 + include/net/netfilter/nf_conntrack_helper.h | 157 + include/net/netfilter/nf_conntrack_l4proto.h | 160 + include/net/netfilter/nf_conntrack_labels.h | 53 + include/net/netfilter/nf_conntrack_seqadj.h | 48 + include/net/netfilter/nf_conntrack_synproxy.h | 92 + include/net/netfilter/nf_conntrack_timeout.h | 108 + include/net/netfilter/nf_conntrack_timestamp.h | 79 + include/net/netfilter/nf_conntrack_tuple.h | 187 + include/net/netfilter/nf_conntrack_zones.h | 91 + include/net/netfilter/nf_dup_netdev.h | 8 + include/net/netfilter/nf_flow_table.h | 131 + include/net/netfilter/nf_log.h | 126 + include/net/netfilter/nf_nat.h | 82 + include/net/netfilter/nf_nat_core.h | 29 + include/net/netfilter/nf_nat_helper.h | 41 + include/net/netfilter/nf_nat_l3proto.h | 57 + include/net/netfilter/nf_nat_l4proto.h | 82 + include/net/netfilter/nf_nat_redirect.h | 13 + include/net/netfilter/nf_queue.h | 122 + include/net/netfilter/nf_socket.h | 13 + include/net/netfilter/nf_tables.h | 1408 +++ include/net/netfilter/nf_tables_core.h | 81 + include/net/netfilter/nf_tables_ipv4.h | 56 + include/net/netfilter/nf_tables_ipv6.h | 72 + include/net/netfilter/nf_tproxy.h | 121 + include/net/netfilter/nfnetlink_log.h | 1 + include/net/netfilter/nft_fib.h | 38 + include/net/netfilter/nft_masq.h | 22 + include/net/netfilter/nft_redir.h | 22 + include/net/netfilter/nft_reject.h | 25 + include/net/netfilter/xt_rateest.h | 27 + include/net/netlabel.h | 696 ++ include/net/netlink.h | 1423 +++ include/net/netns/can.h | 41 + include/net/netns/conntrack.h | 124 + include/net/netns/core.h | 20 + include/net/netns/dccp.h | 12 + include/net/netns/generic.h | 51 + include/net/netns/hash.h | 11 + include/net/netns/ieee802154_6lowpan.h | 22 + include/net/netns/ipv4.h | 221 + include/net/netns/ipv6.h | 117 + include/net/netns/mib.h | 29 + include/net/netns/mpls.h | 21 + include/net/netns/netfilter.h | 38 + include/net/netns/nftables.h | 16 + include/net/netns/packet.h | 16 + include/net/netns/sctp.h | 149 + include/net/netns/unix.h | 14 + include/net/netns/x_tables.h | 21 + include/net/netns/xfrm.h | 80 + include/net/netprio_cgroup.h | 60 + include/net/netrom.h | 272 + include/net/nexthop.h | 34 + include/net/nfc/digital.h | 274 + include/net/nfc/hci.h | 287 + include/net/nfc/llc.h | 48 + include/net/nfc/nci.h | 539 ++ include/net/nfc/nci_core.h | 477 + include/net/nfc/nfc.h | 364 + include/net/nl802154.h | 453 + include/net/nsh.h | 310 + include/net/p8022.h | 14 + include/net/page_pool.h | 144 + include/net/phonet/gprs.h | 38 + include/net/phonet/pep.h | 169 + include/net/phonet/phonet.h | 121 + include/net/phonet/pn_dev.h | 62 + include/net/ping.h | 98 + include/net/pkt_cls.h | 872 ++ include/net/pkt_sched.h | 154 + include/net/pptp.h | 24 + include/net/protocol.h | 120 + include/net/psample.h | 40 + include/net/psnap.h | 12 + include/net/raw.h | 77 + include/net/rawv6.h | 27 + include/net/red.h | 428 + include/net/regulatory.h | 255 + include/net/request_sock.h | 230 + include/net/rose.h | 248 + include/net/route.h | 351 + include/net/rsi_91x.h | 56 + include/net/rtnetlink.h | 173 + include/net/sch_generic.h | 1164 +++ include/net/scm.h | 145 + include/net/sctp/auth.h | 126 + include/net/sctp/checksum.h | 79 + include/net/sctp/command.h | 251 + include/net/sctp/constants.h | 431 + include/net/sctp/sctp.h | 633 ++ include/net/sctp/sm.h | 439 + include/net/sctp/stream_interleave.h | 61 + include/net/sctp/stream_sched.h | 77 + include/net/sctp/structs.h | 2180 +++++ include/net/sctp/tsnmap.h | 172 + include/net/sctp/ulpevent.h | 193 + include/net/sctp/ulpqueue.h | 84 + include/net/secure_seq.h | 22 + include/net/seg6.h | 72 + include/net/seg6_hmac.h | 62 + include/net/seg6_local.h | 34 + include/net/slhc_vj.h | 184 + include/net/smc.h | 86 + include/net/snmp.h | 201 + include/net/sock.h | 2589 ++++++ include/net/sock_reuseport.h | 59 + include/net/stp.h | 15 + include/net/strparser.h | 148 + include/net/switchdev.h | 247 + include/net/tc_act/tc_bpf.h | 28 + include/net/tc_act/tc_connmark.h | 15 + include/net/tc_act/tc_csum.h | 41 + include/net/tc_act/tc_defact.h | 14 + include/net/tc_act/tc_gact.h | 62 + include/net/tc_act/tc_ife.h | 66 + include/net/tc_act/tc_ipt.h | 17 + include/net/tc_act/tc_mirred.h | 40 + include/net/tc_act/tc_nat.h | 19 + include/net/tc_act/tc_pedit.h | 68 + include/net/tc_act/tc_sample.h | 50 + include/net/tc_act/tc_skbedit.h | 68 + include/net/tc_act/tc_skbmod.h | 30 + include/net/tc_act/tc_tunnel_key.h | 66 + include/net/tc_act/tc_vlan.h | 82 + include/net/tcp.h | 2217 +++++ include/net/tcp_states.h | 52 + include/net/timewait_sock.h | 40 + include/net/tipc.h | 62 + include/net/tls.h | 466 + include/net/transp_v6.h | 71 + include/net/tso.h | 24 + include/net/tun_proto.h | 49 + include/net/udp.h | 451 + include/net/udp_tunnel.h | 176 + include/net/udplite.h | 136 + include/net/vsock_addr.h | 30 + include/net/vxlan.h | 374 + include/net/wext.h | 61 + include/net/wimax.h | 518 ++ include/net/x25.h | 326 + include/net/x25device.h | 18 + include/net/xdp.h | 167 + include/net/xdp_sock.h | 102 + include/net/xfrm.h | 2100 +++++ include/pcmcia/ciscode.h | 131 + include/pcmcia/cisreg.h | 120 + include/pcmcia/cistpl.h | 580 ++ include/pcmcia/device_id.h | 284 + include/pcmcia/ds.h | 277 + include/pcmcia/ss.h | 266 + include/ras/ras_event.h | 428 + include/rdma/ib.h | 106 + include/rdma/ib_addr.h | 299 + include/rdma/ib_cache.h | 136 + include/rdma/ib_cm.h | 628 ++ include/rdma/ib_fmr_pool.h | 93 + include/rdma/ib_hdrs.h | 341 + include/rdma/ib_mad.h | 924 ++ include/rdma/ib_marshall.h | 55 + include/rdma/ib_pack.h | 311 + include/rdma/ib_pma.h | 157 + include/rdma/ib_sa.h | 668 ++ include/rdma/ib_smi.h | 175 + include/rdma/ib_umem.h | 108 + include/rdma/ib_umem_odp.h | 175 + include/rdma/ib_verbs.h | 4194 +++++++++ include/rdma/iw_cm.h | 262 + include/rdma/iw_portmap.h | 224 + include/rdma/mr_pool.h | 25 + include/rdma/opa_addr.h | 133 + include/rdma/opa_port_info.h | 418 + include/rdma/opa_smi.h | 151 + include/rdma/opa_vnic.h | 138 + include/rdma/rdma_cm.h | 426 + include/rdma/rdma_cm_ib.h | 54 + include/rdma/rdma_netlink.h | 102 + include/rdma/rdma_vt.h | 558 ++ include/rdma/rdmavt_cq.h | 100 + include/rdma/rdmavt_mr.h | 197 + include/rdma/rdmavt_qp.h | 711 ++ include/rdma/restrack.h | 197 + include/rdma/rw.h | 90 + include/rdma/uverbs_ioctl.h | 636 ++ include/rdma/uverbs_named_ioctl.h | 131 + include/rdma/uverbs_std_types.h | 144 + include/rdma/uverbs_types.h | 188 + include/scsi/fc/fc_encaps.h | 138 + include/scsi/fc/fc_fc2.h | 123 + include/scsi/fc/fc_fcoe.h | 108 + include/scsi/fc/fc_fcp.h | 216 + include/scsi/fc/fc_fip.h | 293 + include/scsi/fc/fc_ms.h | 213 + include/scsi/fc_encode.h | 739 ++ include/scsi/fc_frame.h | 261 + include/scsi/fcoe_sysfs.h | 133 + include/scsi/iscsi_if.h | 964 ++ include/scsi/iscsi_proto.h | 668 ++ include/scsi/iser.h | 78 + include/scsi/libfc.h | 1021 +++ include/scsi/libfcoe.h | 417 + include/scsi/libiscsi.h | 500 ++ include/scsi/libiscsi_tcp.h | 137 + include/scsi/libsas.h | 721 ++ include/scsi/osd_attributes.h | 398 + include/scsi/osd_initiator.h | 511 ++ include/scsi/osd_ore.h | 201 + include/scsi/osd_protocol.h | 676 ++ include/scsi/osd_sec.h | 45 + include/scsi/osd_sense.h | 263 + include/scsi/osd_types.h | 45 + include/scsi/sas.h | 727 ++ include/scsi/sas_ata.h | 105 + include/scsi/scsi.h | 283 + include/scsi/scsi_bsg_iscsi.h | 110 + include/scsi/scsi_cmnd.h | 359 + include/scsi/scsi_common.h | 78 + include/scsi/scsi_dbg.h | 87 + include/scsi/scsi_device.h | 587 ++ include/scsi/scsi_devinfo.h | 85 + include/scsi/scsi_dh.h | 102 + include/scsi/scsi_driver.h | 33 + include/scsi/scsi_eh.h | 56 + include/scsi/scsi_host.h | 896 ++ include/scsi/scsi_ioctl.h | 49 + include/scsi/scsi_proto.h | 344 + include/scsi/scsi_request.h | 33 + include/scsi/scsi_tcq.h | 47 + include/scsi/scsi_transport.h | 101 + include/scsi/scsi_transport_fc.h | 827 ++ include/scsi/scsi_transport_iscsi.h | 488 + include/scsi/scsi_transport_sas.h | 244 + include/scsi/scsi_transport_spi.h | 162 + include/scsi/scsi_transport_srp.h | 145 + include/scsi/scsicam.h | 20 + include/scsi/sg.h | 274 + include/scsi/srp.h | 298 + include/scsi/viosrp.h | 220 + include/soc/arc/aux.h | 63 + include/soc/arc/mcip.h | 125 + include/soc/arc/timers.h | 38 + include/soc/at91/at91sam9_ddrsdr.h | 127 + include/soc/at91/at91sam9_sdramc.h | 85 + include/soc/at91/atmel-secumod.h | 19 + include/soc/at91/atmel-sfr.h | 34 + include/soc/bcm2835/raspberrypi-firmware.h | 162 + include/soc/brcmstb/common.h | 15 + include/soc/fsl/bman.h | 137 + include/soc/fsl/dpaa2-fd.h | 438 + include/soc/fsl/dpaa2-global.h | 177 + include/soc/fsl/dpaa2-io.h | 115 + include/soc/fsl/qe/immap_qe.h | 469 + include/soc/fsl/qe/qe.h | 817 ++ include/soc/fsl/qe/qe_ic.h | 139 + include/soc/fsl/qe/qe_tdm.h | 94 + include/soc/fsl/qe/ucc.h | 68 + include/soc/fsl/qe/ucc_fast.h | 265 + include/soc/fsl/qe/ucc_slow.h | 277 + include/soc/fsl/qman.h | 1197 +++ include/soc/imx/cpuidle.h | 25 + include/soc/imx/revision.h | 37 + include/soc/imx/timer.h | 26 + include/soc/mediatek/smi.h | 58 + include/soc/nps/common.h | 172 + include/soc/nps/mtm.h | 59 + include/soc/qcom/cmd-db.h | 45 + include/soc/qcom/rpmh.h | 51 + include/soc/qcom/tcs.h | 56 + include/soc/rockchip/rockchip_sip.h | 27 + include/soc/sa1100/pwer.h | 15 + include/soc/tegra/ahb.h | 19 + include/soc/tegra/bpmp-abi.h | 1999 +++++ include/soc/tegra/bpmp.h | 206 + include/soc/tegra/common.h | 14 + include/soc/tegra/cpuidle.h | 25 + include/soc/tegra/emc.h | 19 + include/soc/tegra/flowctrl.h | 82 + include/soc/tegra/fuse.h | 72 + include/soc/tegra/ivc.h | 109 + include/soc/tegra/mc.h | 165 + include/soc/tegra/pm.h | 38 + include/soc/tegra/pmc.h | 252 + include/sound/ac97/codec.h | 116 + include/sound/ac97/compat.h | 17 + include/sound/ac97/controller.h | 83 + include/sound/ac97/regs.h | 246 + include/sound/ac97_codec.h | 417 + include/sound/aci.h | 91 + include/sound/ad1816a.h | 181 + include/sound/ad1843.h | 46 + include/sound/adau1373.h | 34 + include/sound/aess.h | 53 + include/sound/ak4113.h | 335 + include/sound/ak4114.h | 217 + include/sound/ak4117.h | 194 + include/sound/ak4531_codec.h | 85 + include/sound/ak4641.h | 26 + include/sound/ak4xxx-adda.h | 99 + include/sound/alc5623.h | 16 + include/sound/asequencer.h | 86 + include/sound/asound.h | 40 + include/sound/asoundef.h | 325 + include/sound/compress_driver.h | 190 + include/sound/control.h | 268 + include/sound/core.h | 447 + include/sound/cs35l33.h | 48 + include/sound/cs35l34.h | 35 + include/sound/cs35l35.h | 110 + include/sound/cs4231-regs.h | 187 + include/sound/cs4271.h | 40 + include/sound/cs42l52.h | 32 + include/sound/cs42l56.h | 48 + include/sound/cs42l73.h | 22 + include/sound/cs8403.h | 257 + include/sound/cs8427.h | 202 + include/sound/da7213.h | 49 + include/sound/da7218.h | 109 + include/sound/da7219-aad.h | 99 + include/sound/da7219.h | 49 + include/sound/da9055.h | 33 + include/sound/designware_i2s.h | 78 + include/sound/dmaengine_pcm.h | 163 + include/sound/emu10k1.h | 1909 ++++ include/sound/emu10k1_synth.h | 39 + include/sound/emu8000.h | 121 + include/sound/emu8000_reg.h | 207 + include/sound/emux_legacy.h | 146 + include/sound/emux_synth.h | 242 + include/sound/es1688.h | 122 + include/sound/gus.h | 631 ++ include/sound/hda_chmap.h | 79 + include/sound/hda_component.h | 61 + include/sound/hda_hwdep.h | 44 + include/sound/hda_i915.h | 27 + include/sound/hda_register.h | 317 + include/sound/hda_regmap.h | 222 + include/sound/hda_verbs.h | 556 ++ include/sound/hdaudio.h | 637 ++ include/sound/hdaudio_ext.h | 168 + include/sound/hdmi-codec.h | 112 + include/sound/hwdep.h | 82 + include/sound/i2c.h | 104 + include/sound/info.h | 215 + include/sound/initval.h | 104 + include/sound/jack.h | 134 + include/sound/l3.h | 28 + include/sound/max9768.h | 24 + include/sound/max98088.h | 50 + include/sound/max98090.h | 29 + include/sound/max98095.h | 66 + include/sound/memalloc.h | 157 + include/sound/minors.h | 112 + include/sound/mixer_oss.h | 81 + include/sound/mpu401.h | 138 + include/sound/omap-hdmi-audio.h | 48 + include/sound/opl3.h | 391 + include/sound/opl4.h | 32 + include/sound/pcm-indirect.h | 183 + include/sound/pcm.h | 1452 +++ include/sound/pcm_drm_eld.h | 7 + include/sound/pcm_iec958.h | 12 + include/sound/pcm_oss.h | 90 + include/sound/pcm_params.h | 384 + include/sound/pt2258.h | 37 + include/sound/pxa2xx-lib.h | 44 + include/sound/rawmidi.h | 194 + include/sound/rt286.h | 19 + include/sound/rt298.h | 20 + include/sound/rt5514.h | 22 + include/sound/rt5645.h | 33 + include/sound/rt5659.h | 50 + include/sound/rt5660.h | 31 + include/sound/rt5663.h | 25 + include/sound/rt5665.h | 47 + include/sound/rt5668.h | 40 + include/sound/rt5670.h | 29 + include/sound/rt5682.h | 40 + include/sound/s3c24xx_uda134x.h | 14 + include/sound/sb.h | 375 + include/sound/sb16_csp.h | 90 + include/sound/seq_device.h | 96 + include/sound/seq_kernel.h | 110 + include/sound/seq_midi_emul.h | 197 + include/sound/seq_midi_event.h | 52 + include/sound/seq_oss.h | 96 + include/sound/seq_oss_legacy.h | 31 + include/sound/seq_virmidi.h | 83 + include/sound/sh_dac_audio.h | 21 + include/sound/sh_fsi.h | 32 + include/sound/simple_card.h | 26 + include/sound/simple_card_utils.h | 123 + include/sound/snd_wavefront.h | 144 + include/sound/soc-acpi-intel-match.h | 28 + include/sound/soc-acpi.h | 96 + include/sound/soc-dai.h | 388 + include/sound/soc-dapm.h | 787 ++ include/sound/soc-dpcm.h | 159 + include/sound/soc-topology.h | 206 + include/sound/soc.h | 1526 ++++ include/sound/soundfont.h | 129 + include/sound/spear_dma.h | 34 + include/sound/spear_spdif.h | 29 + include/sound/sta32x.h | 43 + include/sound/sta350.h | 57 + include/sound/tas2552-plat.h | 25 + include/sound/tas5086.h | 8 + include/sound/tea6330t.h | 31 + include/sound/timer.h | 146 + include/sound/tlv.h | 60 + include/sound/tlv320aic32x4.h | 55 + include/sound/tlv320aic3x.h | 68 + include/sound/tlv320dac33-plat.h | 24 + include/sound/tpa6130a2-plat.h | 30 + include/sound/uda134x.h | 27 + include/sound/uda1380.h | 22 + include/sound/util_mem.h | 64 + include/sound/vx_core.h | 548 ++ include/sound/wavefront.h | 695 ++ include/sound/wm0010.h | 27 + include/sound/wm1250-ev1.h | 27 + include/sound/wm2000.h | 23 + include/sound/wm2200.h | 61 + include/sound/wm5100.h | 59 + include/sound/wm8903.h | 266 + include/sound/wm8904.h | 163 + include/sound/wm8955.h | 26 + include/sound/wm8960.h | 24 + include/sound/wm8962.h | 61 + include/sound/wm8993.h | 48 + include/sound/wm8996.h | 55 + include/sound/wm9081.h | 28 + include/sound/wm9090.h | 28 + include/sound/wss.h | 235 + include/target/iscsi/iscsi_target_core.h | 934 ++ include/target/iscsi/iscsi_target_stat.h | 69 + include/target/iscsi/iscsi_transport.h | 153 + include/target/target_core_backend.h | 126 + include/target/target_core_base.h | 944 ++ include/target/target_core_fabric.h | 217 + include/trace/bpf_probe.h | 92 + include/trace/define_trace.h | 127 + include/trace/events/9p.h | 174 + include/trace/events/afs.h | 623 ++ include/trace/events/alarmtimer.h | 97 + include/trace/events/asoc.h | 271 + include/trace/events/bcache.h | 483 + include/trace/events/block.h | 642 ++ include/trace/events/bridge.h | 129 + include/trace/events/btrfs.h | 1861 ++++ include/trace/events/cachefiles.h | 325 + include/trace/events/cgroup.h | 155 + include/trace/events/clk.h | 234 + include/trace/events/cma.h | 67 + include/trace/events/compaction.h | 359 + include/trace/events/context_tracking.h | 59 + include/trace/events/cpuhp.h | 95 + include/trace/events/devlink.h | 69 + include/trace/events/dma_fence.h | 89 + include/trace/events/ext4.h | 2654 ++++++ include/trace/events/f2fs.h | 1623 ++++ include/trace/events/fib.h | 93 + include/trace/events/fib6.h | 92 + include/trace/events/filelock.h | 209 + include/trace/events/filemap.h | 116 + include/trace/events/fs_dax.h | 286 + include/trace/events/fscache.h | 537 ++ include/trace/events/fsi.h | 128 + include/trace/events/fsi_master_ast_cf.h | 150 + include/trace/events/fsi_master_gpio.h | 171 + include/trace/events/gpio.h | 61 + include/trace/events/host1x.h | 254 + include/trace/events/hswadsp.h | 385 + include/trace/events/huge_memory.h | 168 + include/trace/events/i2c.h | 150 + include/trace/events/initcall.h | 74 + include/trace/events/intel-sst.h | 156 + include/trace/events/intel_ish.h | 31 + include/trace/events/iommu.h | 167 + include/trace/events/ipi.h | 90 + include/trace/events/irq.h | 166 + include/trace/events/irq_matrix.h | 201 + include/trace/events/jbd2.h | 386 + include/trace/events/kmem.h | 321 + include/trace/events/kvm.h | 405 + include/trace/events/libata.h | 335 + include/trace/events/lock.h | 87 + include/trace/events/mce.h | 75 + include/trace/events/mdio.h | 43 + include/trace/events/migrate.h | 76 + include/trace/events/mmc.h | 193 + include/trace/events/mmflags.h | 263 + include/trace/events/module.h | 134 + include/trace/events/napi.h | 44 + include/trace/events/net.h | 250 + include/trace/events/net_probe_common.h | 44 + include/trace/events/nilfs2.h | 225 + include/trace/events/nmi.h | 38 + include/trace/events/oom.h | 195 + include/trace/events/page_isolation.h | 39 + include/trace/events/page_ref.h | 135 + include/trace/events/pagemap.h | 88 + include/trace/events/percpu.h | 126 + include/trace/events/power.h | 535 ++ include/trace/events/power_cpu_migrate.h | 68 + include/trace/events/preemptirq.h | 78 + include/trace/events/printk.h | 37 + include/trace/events/qdisc.h | 50 + include/trace/events/rcu.h | 790 ++ include/trace/events/rdma.h | 129 + include/trace/events/regulator.h | 142 + include/trace/events/rpcrdma.h | 1552 ++++ include/trace/events/rpm.h | 101 + include/trace/events/rseq.h | 57 + include/trace/events/rtc.h | 206 + include/trace/events/rxrpc.h | 1590 ++++ include/trace/events/sched.h | 593 ++ include/trace/events/scsi.h | 364 + include/trace/events/sctp.h | 90 + include/trace/events/signal.h | 126 + include/trace/events/siox.h | 66 + include/trace/events/skb.h | 76 + include/trace/events/smbus.h | 249 + include/trace/events/sock.h | 206 + include/trace/events/spi.h | 157 + include/trace/events/spmi.h | 136 + include/trace/events/sunrpc.h | 845 ++ include/trace/events/sunvnet.h | 140 + include/trace/events/swiotlb.h | 50 + include/trace/events/syscalls.h | 74 + include/trace/events/target.h | 215 + include/trace/events/task.h | 62 + include/trace/events/tcp.h | 286 + include/trace/events/thermal.h | 212 + include/trace/events/thermal_power_allocator.h | 88 + include/trace/events/thp.h | 89 + include/trace/events/timer.h | 416 + include/trace/events/tlb.h | 62 + include/trace/events/udp.h | 33 + include/trace/events/ufs.h | 290 + include/trace/events/v4l2.h | 268 + include/trace/events/vb2.h | 69 + include/trace/events/vmscan.h | 471 + .../trace/events/vsock_virtio_transport_common.h | 145 + include/trace/events/wbt.h | 158 + include/trace/events/workqueue.h | 124 + include/trace/events/writeback.h | 759 ++ include/trace/events/xdp.h | 274 + include/trace/events/xen.h | 479 + include/trace/perf.h | 91 + include/trace/syscall.h | 51 + include/trace/trace_events.h | 804 ++ include/uapi/asm-generic/Kbuild.asm | 33 + include/uapi/asm-generic/auxvec.h | 8 + include/uapi/asm-generic/bitsperlong.h | 16 + include/uapi/asm-generic/bpf_perf_event.h | 9 + include/uapi/asm-generic/errno-base.h | 40 + include/uapi/asm-generic/errno.h | 123 + include/uapi/asm-generic/fcntl.h | 221 + include/uapi/asm-generic/hugetlb_encode.h | 36 + include/uapi/asm-generic/int-l64.h | 35 + include/uapi/asm-generic/int-ll64.h | 40 + include/uapi/asm-generic/ioctl.h | 107 + include/uapi/asm-generic/ioctls.h | 119 + include/uapi/asm-generic/ipcbuf.h | 35 + include/uapi/asm-generic/kvm_para.h | 4 + include/uapi/asm-generic/mman-common.h | 77 + include/uapi/asm-generic/mman.h | 24 + include/uapi/asm-generic/msgbuf.h | 47 + include/uapi/asm-generic/param.h | 20 + include/uapi/asm-generic/poll.h | 42 + include/uapi/asm-generic/posix_types.h | 98 + include/uapi/asm-generic/resource.h | 62 + include/uapi/asm-generic/sembuf.h | 45 + include/uapi/asm-generic/setup.h | 7 + include/uapi/asm-generic/shmbuf.h | 59 + include/uapi/asm-generic/shmparam.h | 7 + include/uapi/asm-generic/siginfo.h | 333 + include/uapi/asm-generic/signal-defs.h | 29 + include/uapi/asm-generic/signal.h | 122 + include/uapi/asm-generic/socket.h | 113 + include/uapi/asm-generic/sockios.h | 14 + include/uapi/asm-generic/stat.h | 73 + include/uapi/asm-generic/statfs.h | 84 + include/uapi/asm-generic/swab.h | 19 + include/uapi/asm-generic/termbits.h | 200 + include/uapi/asm-generic/termios.h | 51 + include/uapi/asm-generic/types.h | 9 + include/uapi/asm-generic/ucontext.h | 13 + include/uapi/asm-generic/unistd.h | 785 ++ include/uapi/drm/amdgpu_drm.h | 994 +++ include/uapi/drm/armada_drm.h | 56 + include/uapi/drm/drm.h | 1012 +++ include/uapi/drm/drm_fourcc.h | 588 ++ include/uapi/drm/drm_mode.h | 894 ++ include/uapi/drm/drm_sarea.h | 94 + include/uapi/drm/etnaviv_drm.h | 289 + include/uapi/drm/exynos_drm.h | 424 + include/uapi/drm/i810_drm.h | 292 + include/uapi/drm/i915_drm.h | 1724 ++++ include/uapi/drm/mga_drm.h | 427 + include/uapi/drm/msm_drm.h | 308 + include/uapi/drm/nouveau_drm.h | 152 + include/uapi/drm/omap_drm.h | 126 + include/uapi/drm/qxl_drm.h | 158 + include/uapi/drm/r128_drm.h | 336 + include/uapi/drm/radeon_drm.h | 1078 +++ include/uapi/drm/savage_drm.h | 220 + include/uapi/drm/sis_drm.h | 77 + include/uapi/drm/tegra_drm.h | 681 ++ include/uapi/drm/v3d_drm.h | 194 + include/uapi/drm/vc4_drm.h | 442 + include/uapi/drm/vgem_drm.h | 62 + include/uapi/drm/via_drm.h | 282 + include/uapi/drm/virtgpu_drm.h | 175 + include/uapi/drm/vmwgfx_drm.h | 1218 +++ include/uapi/linux/Kbuild | 13 + include/uapi/linux/a.out.h | 251 + include/uapi/linux/acct.h | 125 + include/uapi/linux/adb.h | 45 + include/uapi/linux/adfs_fs.h | 45 + include/uapi/linux/affs_hardblocks.h | 69 + include/uapi/linux/agpgart.h | 114 + include/uapi/linux/aio_abi.h | 111 + include/uapi/linux/am437x-vpfe.h | 125 + include/uapi/linux/android/binder.h | 467 + include/uapi/linux/apm_bios.h | 138 + include/uapi/linux/arcfb.h | 9 + include/uapi/linux/arm_sdei.h | 73 + include/uapi/linux/aspeed-lpc-ctrl.h | 62 + include/uapi/linux/atalk.h | 45 + include/uapi/linux/atm.h | 242 + include/uapi/linux/atm_eni.h | 24 + include/uapi/linux/atm_he.h | 21 + include/uapi/linux/atm_idt77105.h | 29 + include/uapi/linux/atm_nicstar.h | 54 + include/uapi/linux/atm_tcp.h | 62 + include/uapi/linux/atm_zatm.h | 47 + include/uapi/linux/atmapi.h | 30 + include/uapi/linux/atmarp.h | 42 + include/uapi/linux/atmbr2684.h | 118 + include/uapi/linux/atmclip.h | 22 + include/uapi/linux/atmdev.h | 216 + include/uapi/linux/atmioc.h | 42 + include/uapi/linux/atmlec.h | 92 + include/uapi/linux/atmmpc.h | 127 + include/uapi/linux/atmppp.h | 25 + include/uapi/linux/atmsap.h | 163 + include/uapi/linux/atmsvc.h | 56 + include/uapi/linux/audit.h | 489 + include/uapi/linux/auto_dev-ioctl.h | 216 + include/uapi/linux/auto_fs.h | 231 + include/uapi/linux/auto_fs4.h | 15 + include/uapi/linux/auxvec.h | 37 + include/uapi/linux/ax25.h | 117 + include/uapi/linux/b1lli.h | 74 + include/uapi/linux/batadv_packet.h | 635 ++ include/uapi/linux/batman_adv.h | 508 ++ include/uapi/linux/baycom.h | 40 + include/uapi/linux/bcache.h | 377 + include/uapi/linux/bcm933xx_hcs.h | 25 + include/uapi/linux/bfs_fs.h | 82 + include/uapi/linux/binfmts.h | 21 + include/uapi/linux/blkpg.h | 60 + include/uapi/linux/blktrace_api.h | 146 + include/uapi/linux/blkzoned.h | 144 + include/uapi/linux/bpf.h | 2798 ++++++ include/uapi/linux/bpf_common.h | 57 + include/uapi/linux/bpf_perf_event.h | 19 + include/uapi/linux/bpfilter.h | 21 + include/uapi/linux/bpqether.h | 40 + include/uapi/linux/bsg.h | 67 + include/uapi/linux/bt-bmc.h | 19 + include/uapi/linux/btf.h | 113 + include/uapi/linux/btrfs.h | 944 ++ include/uapi/linux/btrfs_tree.h | 973 ++ include/uapi/linux/byteorder/big_endian.h | 106 + include/uapi/linux/byteorder/little_endian.h | 106 + include/uapi/linux/caif/caif_socket.h | 195 + include/uapi/linux/caif/if_caif.h | 35 + include/uapi/linux/can.h | 202 + include/uapi/linux/can/bcm.h | 105 + include/uapi/linux/can/error.h | 125 + include/uapi/linux/can/gw.h | 209 + include/uapi/linux/can/netlink.h | 144 + include/uapi/linux/can/raw.h | 64 + include/uapi/linux/can/vxcan.h | 13 + include/uapi/linux/capability.h | 382 + include/uapi/linux/capi.h | 134 + include/uapi/linux/cciss_defs.h | 131 + include/uapi/linux/cciss_ioctl.h | 89 + include/uapi/linux/cdrom.h | 947 ++ include/uapi/linux/cec-funcs.h | 1944 ++++ include/uapi/linux/cec.h | 1053 +++ include/uapi/linux/cgroupstats.h | 72 + include/uapi/linux/chio.h | 169 + include/uapi/linux/cifs/cifs_mount.h | 28 + include/uapi/linux/cm4000_cs.h | 64 + include/uapi/linux/cn_proc.h | 134 + include/uapi/linux/coda.h | 741 ++ include/uapi/linux/coda_psdev.h | 15 + include/uapi/linux/coff.h | 352 + include/uapi/linux/connector.h | 81 + include/uapi/linux/const.h | 36 + include/uapi/linux/coresight-stm.h | 24 + include/uapi/linux/cramfs_fs.h | 113 + include/uapi/linux/cryptouser.h | 124 + include/uapi/linux/cuda.h | 34 + include/uapi/linux/cyclades.h | 494 ++ include/uapi/linux/cycx_cfm.h | 102 + include/uapi/linux/dcbnl.h | 769 ++ include/uapi/linux/dccp.h | 238 + include/uapi/linux/devlink.h | 329 + include/uapi/linux/dlm.h | 76 + include/uapi/linux/dlm_device.h | 109 + include/uapi/linux/dlm_netlink.h | 60 + include/uapi/linux/dlm_plock.h | 46 + include/uapi/linux/dlmconstants.h | 164 + include/uapi/linux/dm-ioctl.h | 363 + include/uapi/linux/dm-log-userspace.h | 432 + include/uapi/linux/dma-buf.h | 41 + include/uapi/linux/dn.h | 149 + include/uapi/linux/dqblk_xfs.h | 215 + include/uapi/linux/dvb/audio.h | 99 + include/uapi/linux/dvb/ca.h | 155 + include/uapi/linux/dvb/dmx.h | 330 + include/uapi/linux/dvb/frontend.h | 1011 +++ include/uapi/linux/dvb/net.h | 68 + include/uapi/linux/dvb/osd.h | 145 + include/uapi/linux/dvb/version.h | 30 + include/uapi/linux/dvb/video.h | 218 + include/uapi/linux/edd.h | 192 + include/uapi/linux/efs_fs_sb.h | 63 + include/uapi/linux/elf-em.h | 61 + include/uapi/linux/elf-fdpic.h | 35 + include/uapi/linux/elf.h | 442 + include/uapi/linux/elfcore.h | 101 + include/uapi/linux/errno.h | 1 + include/uapi/linux/errqueue.h | 54 + include/uapi/linux/erspan.h | 52 + include/uapi/linux/ethtool.h | 1848 ++++ include/uapi/linux/eventpoll.h | 94 + include/uapi/linux/fadvise.h | 22 + include/uapi/linux/falloc.h | 80 + include/uapi/linux/fanotify.h | 120 + include/uapi/linux/fb.h | 403 + include/uapi/linux/fcntl.h | 94 + include/uapi/linux/fd.h | 384 + include/uapi/linux/fdreg.h | 138 + include/uapi/linux/fib_rules.h | 90 + include/uapi/linux/fiemap.h | 70 + include/uapi/linux/filter.h | 90 + include/uapi/linux/firewire-cdev.h | 1039 +++ include/uapi/linux/firewire-constants.h | 92 + include/uapi/linux/flat.h | 59 + include/uapi/linux/fou.h | 42 + include/uapi/linux/fpga-dfl.h | 179 + include/uapi/linux/fs.h | 393 + include/uapi/linux/fsi.h | 58 + include/uapi/linux/fsl_hypervisor.h | 221 + include/uapi/linux/fsmap.h | 113 + include/uapi/linux/fuse.h | 797 ++ include/uapi/linux/futex.h | 153 + include/uapi/linux/gameport.h | 29 + include/uapi/linux/gen_stats.h | 80 + include/uapi/linux/genetlink.h | 89 + include/uapi/linux/genwqe/genwqe_card.h | 502 ++ include/uapi/linux/gfs2_ondisk.h | 535 ++ include/uapi/linux/gigaset_dev.h | 39 + include/uapi/linux/gpio.h | 158 + include/uapi/linux/gsmmux.h | 41 + include/uapi/linux/gtp.h | 35 + include/uapi/linux/hash_info.h | 39 + include/uapi/linux/hdlc.h | 24 + include/uapi/linux/hdlc/ioctl.h | 85 + include/uapi/linux/hdlcdrv.h | 111 + include/uapi/linux/hdreg.h | 659 ++ include/uapi/linux/hid.h | 67 + include/uapi/linux/hiddev.h | 213 + include/uapi/linux/hidraw.h | 51 + include/uapi/linux/hpet.h | 26 + include/uapi/linux/hsi/cs-protocol.h | 120 + include/uapi/linux/hsi/hsi_char.h | 65 + include/uapi/linux/hsr_netlink.h | 51 + include/uapi/linux/hw_breakpoint.h | 35 + include/uapi/linux/hyperv.h | 400 + include/uapi/linux/hysdn_if.h | 34 + include/uapi/linux/i2c-dev.h | 75 + include/uapi/linux/i2c.h | 158 + include/uapi/linux/i2o-dev.h | 422 + include/uapi/linux/i8k.h | 48 + include/uapi/linux/icmp.h | 99 + include/uapi/linux/icmpv6.h | 167 + include/uapi/linux/if.h | 295 + include/uapi/linux/if_addr.h | 71 + include/uapi/linux/if_addrlabel.h | 33 + include/uapi/linux/if_alg.h | 59 + include/uapi/linux/if_arcnet.h | 130 + include/uapi/linux/if_arp.h | 164 + include/uapi/linux/if_bonding.h | 131 + include/uapi/linux/if_bridge.h | 295 + include/uapi/linux/if_cablemodem.h | 23 + include/uapi/linux/if_eql.h | 55 + include/uapi/linux/if_ether.h | 169 + include/uapi/linux/if_fc.h | 52 + include/uapi/linux/if_fddi.h | 107 + include/uapi/linux/if_frad.h | 123 + include/uapi/linux/if_hippi.h | 154 + include/uapi/linux/if_infiniband.h | 30 + include/uapi/linux/if_link.h | 1002 +++ include/uapi/linux/if_ltalk.h | 10 + include/uapi/linux/if_macsec.h | 177 + include/uapi/linux/if_packet.h | 303 + include/uapi/linux/if_phonet.h | 17 + include/uapi/linux/if_plip.h | 28 + include/uapi/linux/if_ppp.h | 1 + include/uapi/linux/if_pppol2tp.h | 105 + include/uapi/linux/if_pppox.h | 160 + include/uapi/linux/if_slip.h | 31 + include/uapi/linux/if_team.h | 108 + include/uapi/linux/if_tun.h | 112 + include/uapi/linux/if_tunnel.h | 163 + include/uapi/linux/if_vlan.h | 65 + include/uapi/linux/if_x25.h | 27 + include/uapi/linux/if_xdp.h | 78 + include/uapi/linux/ife.h | 19 + include/uapi/linux/igmp.h | 129 + include/uapi/linux/iio/events.h | 43 + include/uapi/linux/iio/types.h | 109 + include/uapi/linux/ila.h | 68 + include/uapi/linux/in.h | 304 + include/uapi/linux/in6.h | 298 + include/uapi/linux/in_route.h | 33 + include/uapi/linux/inet_diag.h | 205 + include/uapi/linux/inotify.h | 84 + include/uapi/linux/input-event-codes.h | 853 ++ include/uapi/linux/input.h | 514 ++ include/uapi/linux/ioctl.h | 8 + include/uapi/linux/ip.h | 177 + include/uapi/linux/ip6_tunnel.h | 56 + include/uapi/linux/ip_vs.h | 455 + include/uapi/linux/ipc.h | 82 + include/uapi/linux/ipmi.h | 429 + include/uapi/linux/ipmi_bmc.h | 16 + include/uapi/linux/ipmi_msgdefs.h | 102 + include/uapi/linux/ipsec.h | 48 + include/uapi/linux/ipv6.h | 194 + include/uapi/linux/ipv6_route.h | 64 + include/uapi/linux/ipx.h | 87 + include/uapi/linux/irqnr.h | 4 + include/uapi/linux/isdn.h | 144 + include/uapi/linux/isdn/capicmd.h | 117 + include/uapi/linux/isdn_divertif.h | 31 + include/uapi/linux/isdn_ppp.h | 68 + include/uapi/linux/isdnif.h | 57 + include/uapi/linux/iso_fs.h | 166 + include/uapi/linux/ivtv.h | 74 + include/uapi/linux/ivtvfb.h | 38 + include/uapi/linux/jffs2.h | 223 + include/uapi/linux/joystick.h | 133 + include/uapi/linux/kcm.h | 41 + include/uapi/linux/kcmp.h | 28 + include/uapi/linux/kcov.h | 35 + include/uapi/linux/kd.h | 184 + include/uapi/linux/kdev_t.h | 14 + include/uapi/linux/kernel-page-flags.h | 40 + include/uapi/linux/kernel.h | 8 + include/uapi/linux/kernelcapi.h | 48 + include/uapi/linux/kexec.h | 62 + include/uapi/linux/keyboard.h | 465 + include/uapi/linux/keyctl.h | 85 + include/uapi/linux/kfd_ioctl.h | 481 + include/uapi/linux/kvm.h | 1555 ++++ include/uapi/linux/kvm_para.h | 37 + include/uapi/linux/l2tp.h | 201 + include/uapi/linux/libc-compat.h | 267 + include/uapi/linux/lightnvm.h | 225 + include/uapi/linux/limits.h | 21 + include/uapi/linux/lirc.h | 223 + include/uapi/linux/llc.h | 86 + include/uapi/linux/loop.h | 98 + include/uapi/linux/lp.h | 111 + include/uapi/linux/lwtunnel.h | 71 + include/uapi/linux/magic.h | 93 + include/uapi/linux/major.h | 180 + include/uapi/linux/map_to_7segment.h | 188 + include/uapi/linux/matroxfb.h | 43 + include/uapi/linux/max2175.h | 29 + include/uapi/linux/mdio.h | 298 + include/uapi/linux/media-bus-format.h | 156 + include/uapi/linux/media.h | 427 + include/uapi/linux/mei.h | 130 + include/uapi/linux/membarrier.h | 139 + include/uapi/linux/memfd.h | 35 + include/uapi/linux/mempolicy.h | 66 + include/uapi/linux/meye.h | 65 + include/uapi/linux/mic_common.h | 235 + include/uapi/linux/mic_ioctl.h | 77 + include/uapi/linux/mii.h | 164 + include/uapi/linux/minix_fs.h | 107 + include/uapi/linux/mman.h | 38 + include/uapi/linux/mmc/ioctl.h | 76 + include/uapi/linux/mmtimer.h | 57 + include/uapi/linux/module.h | 9 + include/uapi/linux/mpls.h | 77 + include/uapi/linux/mpls_iptunnel.h | 31 + include/uapi/linux/mqueue.h | 56 + include/uapi/linux/mroute.h | 179 + include/uapi/linux/mroute6.h | 149 + include/uapi/linux/msdos_fs.h | 202 + include/uapi/linux/msg.h | 90 + include/uapi/linux/mtio.h | 209 + include/uapi/linux/n_r3964.h | 99 + include/uapi/linux/nbd-netlink.h | 99 + include/uapi/linux/nbd.h | 89 + include/uapi/linux/ncsi.h | 115 + include/uapi/linux/ndctl.h | 255 + include/uapi/linux/neighbour.h | 172 + include/uapi/linux/net.h | 58 + include/uapi/linux/net_dropmon.h | 65 + include/uapi/linux/net_namespace.h | 24 + include/uapi/linux/net_tstamp.h | 162 + include/uapi/linux/netconf.h | 30 + include/uapi/linux/netdevice.h | 66 + include/uapi/linux/netfilter.h | 80 + include/uapi/linux/netfilter/ipset/ip_set.h | 305 + include/uapi/linux/netfilter/ipset/ip_set_bitmap.h | 16 + include/uapi/linux/netfilter/ipset/ip_set_hash.h | 24 + include/uapi/linux/netfilter/ipset/ip_set_list.h | 24 + include/uapi/linux/netfilter/nf_conntrack_common.h | 149 + include/uapi/linux/netfilter/nf_conntrack_ftp.h | 19 + include/uapi/linux/netfilter/nf_conntrack_sctp.h | 22 + include/uapi/linux/netfilter/nf_conntrack_tcp.h | 58 + .../linux/netfilter/nf_conntrack_tuple_common.h | 46 + include/uapi/linux/netfilter/nf_log.h | 15 + include/uapi/linux/netfilter/nf_nat.h | 53 + include/uapi/linux/netfilter/nf_tables.h | 1678 ++++ include/uapi/linux/netfilter/nf_tables_compat.h | 39 + include/uapi/linux/netfilter/nfnetlink.h | 81 + include/uapi/linux/netfilter/nfnetlink_acct.h | 46 + include/uapi/linux/netfilter/nfnetlink_compat.h | 64 + include/uapi/linux/netfilter/nfnetlink_conntrack.h | 279 + include/uapi/linux/netfilter/nfnetlink_cthelper.h | 56 + include/uapi/linux/netfilter/nfnetlink_cttimeout.h | 119 + include/uapi/linux/netfilter/nfnetlink_log.h | 101 + include/uapi/linux/netfilter/nfnetlink_osf.h | 120 + include/uapi/linux/netfilter/nfnetlink_queue.h | 128 + include/uapi/linux/netfilter/x_tables.h | 188 + include/uapi/linux/netfilter/xt_AUDIT.h | 31 + include/uapi/linux/netfilter/xt_CHECKSUM.h | 21 + include/uapi/linux/netfilter/xt_CLASSIFY.h | 11 + include/uapi/linux/netfilter/xt_CONNMARK.h | 7 + include/uapi/linux/netfilter/xt_CONNSECMARK.h | 16 + include/uapi/linux/netfilter/xt_CT.h | 42 + include/uapi/linux/netfilter/xt_DSCP.h | 27 + include/uapi/linux/netfilter/xt_HMARK.h | 52 + include/uapi/linux/netfilter/xt_IDLETIMER.h | 46 + include/uapi/linux/netfilter/xt_LED.h | 16 + include/uapi/linux/netfilter/xt_LOG.h | 20 + include/uapi/linux/netfilter/xt_MARK.h | 7 + include/uapi/linux/netfilter/xt_NFLOG.h | 25 + include/uapi/linux/netfilter/xt_NFQUEUE.h | 39 + include/uapi/linux/netfilter/xt_RATEEST.h | 17 + include/uapi/linux/netfilter/xt_SECMARK.h | 29 + include/uapi/linux/netfilter/xt_SYNPROXY.h | 19 + include/uapi/linux/netfilter/xt_TCPMSS.h | 13 + include/uapi/linux/netfilter/xt_TCPOPTSTRIP.h | 16 + include/uapi/linux/netfilter/xt_TEE.h | 15 + include/uapi/linux/netfilter/xt_TPROXY.h | 25 + include/uapi/linux/netfilter/xt_addrtype.h | 45 + include/uapi/linux/netfilter/xt_bpf.h | 42 + include/uapi/linux/netfilter/xt_cgroup.h | 41 + include/uapi/linux/netfilter/xt_cluster.h | 20 + include/uapi/linux/netfilter/xt_comment.h | 11 + include/uapi/linux/netfilter/xt_connbytes.h | 27 + include/uapi/linux/netfilter/xt_connlabel.h | 13 + include/uapi/linux/netfilter/xt_connlimit.h | 33 + include/uapi/linux/netfilter/xt_connmark.h | 42 + include/uapi/linux/netfilter/xt_conntrack.h | 79 + include/uapi/linux/netfilter/xt_cpu.h | 12 + include/uapi/linux/netfilter/xt_dccp.h | 26 + include/uapi/linux/netfilter/xt_devgroup.h | 22 + include/uapi/linux/netfilter/xt_dscp.h | 32 + include/uapi/linux/netfilter/xt_ecn.h | 36 + include/uapi/linux/netfilter/xt_esp.h | 16 + include/uapi/linux/netfilter/xt_hashlimit.h | 123 + include/uapi/linux/netfilter/xt_helper.h | 9 + include/uapi/linux/netfilter/xt_ipcomp.h | 17 + include/uapi/linux/netfilter/xt_iprange.h | 21 + include/uapi/linux/netfilter/xt_ipvs.h | 31 + include/uapi/linux/netfilter/xt_l2tp.h | 28 + include/uapi/linux/netfilter/xt_length.h | 12 + include/uapi/linux/netfilter/xt_limit.h | 25 + include/uapi/linux/netfilter/xt_mac.h | 11 + include/uapi/linux/netfilter/xt_mark.h | 16 + include/uapi/linux/netfilter/xt_multiport.h | 30 + include/uapi/linux/netfilter/xt_nfacct.h | 19 + include/uapi/linux/netfilter/xt_osf.h | 51 + include/uapi/linux/netfilter/xt_owner.h | 19 + include/uapi/linux/netfilter/xt_physdev.h | 24 + include/uapi/linux/netfilter/xt_pkttype.h | 9 + include/uapi/linux/netfilter/xt_policy.h | 72 + include/uapi/linux/netfilter/xt_quota.h | 23 + include/uapi/linux/netfilter/xt_rateest.h | 39 + include/uapi/linux/netfilter/xt_realm.h | 13 + include/uapi/linux/netfilter/xt_recent.h | 47 + include/uapi/linux/netfilter/xt_rpfilter.h | 24 + include/uapi/linux/netfilter/xt_sctp.h | 93 + include/uapi/linux/netfilter/xt_set.h | 94 + include/uapi/linux/netfilter/xt_socket.h | 30 + include/uapi/linux/netfilter/xt_state.h | 13 + include/uapi/linux/netfilter/xt_statistic.h | 37 + include/uapi/linux/netfilter/xt_string.h | 35 + include/uapi/linux/netfilter/xt_tcpmss.h | 12 + include/uapi/linux/netfilter/xt_tcpudp.h | 37 + include/uapi/linux/netfilter/xt_time.h | 33 + include/uapi/linux/netfilter/xt_u32.h | 43 + include/uapi/linux/netfilter_arp.h | 23 + include/uapi/linux/netfilter_arp/arp_tables.h | 208 + include/uapi/linux/netfilter_arp/arpt_mangle.h | 27 + include/uapi/linux/netfilter_bridge.h | 44 + include/uapi/linux/netfilter_bridge/ebt_802_3.h | 64 + include/uapi/linux/netfilter_bridge/ebt_among.h | 65 + include/uapi/linux/netfilter_bridge/ebt_arp.h | 38 + include/uapi/linux/netfilter_bridge/ebt_arpreply.h | 13 + include/uapi/linux/netfilter_bridge/ebt_ip.h | 54 + include/uapi/linux/netfilter_bridge/ebt_ip6.h | 52 + include/uapi/linux/netfilter_bridge/ebt_limit.h | 25 + include/uapi/linux/netfilter_bridge/ebt_log.h | 21 + include/uapi/linux/netfilter_bridge/ebt_mark_m.h | 17 + include/uapi/linux/netfilter_bridge/ebt_mark_t.h | 24 + include/uapi/linux/netfilter_bridge/ebt_nat.h | 16 + include/uapi/linux/netfilter_bridge/ebt_nflog.h | 24 + include/uapi/linux/netfilter_bridge/ebt_pkttype.h | 13 + include/uapi/linux/netfilter_bridge/ebt_redirect.h | 11 + include/uapi/linux/netfilter_bridge/ebt_stp.h | 47 + include/uapi/linux/netfilter_bridge/ebt_vlan.h | 23 + include/uapi/linux/netfilter_bridge/ebtables.h | 285 + include/uapi/linux/netfilter_decnet.h | 82 + include/uapi/linux/netfilter_ipv4.h | 83 + include/uapi/linux/netfilter_ipv4/ip_tables.h | 231 + include/uapi/linux/netfilter_ipv4/ipt_CLUSTERIP.h | 38 + include/uapi/linux/netfilter_ipv4/ipt_ECN.h | 34 + include/uapi/linux/netfilter_ipv4/ipt_LOG.h | 22 + include/uapi/linux/netfilter_ipv4/ipt_REJECT.h | 21 + include/uapi/linux/netfilter_ipv4/ipt_TTL.h | 24 + include/uapi/linux/netfilter_ipv4/ipt_ah.h | 18 + include/uapi/linux/netfilter_ipv4/ipt_ecn.h | 16 + include/uapi/linux/netfilter_ipv4/ipt_ttl.h | 24 + include/uapi/linux/netfilter_ipv6.h | 81 + include/uapi/linux/netfilter_ipv6/ip6_tables.h | 272 + include/uapi/linux/netfilter_ipv6/ip6t_HL.h | 25 + include/uapi/linux/netfilter_ipv6/ip6t_LOG.h | 22 + include/uapi/linux/netfilter_ipv6/ip6t_NPT.h | 17 + include/uapi/linux/netfilter_ipv6/ip6t_REJECT.h | 23 + include/uapi/linux/netfilter_ipv6/ip6t_ah.h | 23 + include/uapi/linux/netfilter_ipv6/ip6t_frag.h | 26 + include/uapi/linux/netfilter_ipv6/ip6t_hl.h | 25 + .../uapi/linux/netfilter_ipv6/ip6t_ipv6header.h | 29 + include/uapi/linux/netfilter_ipv6/ip6t_mh.h | 17 + include/uapi/linux/netfilter_ipv6/ip6t_opts.h | 25 + include/uapi/linux/netfilter_ipv6/ip6t_rt.h | 34 + include/uapi/linux/netfilter_ipv6/ip6t_srh.h | 96 + include/uapi/linux/netlink.h | 251 + include/uapi/linux/netlink_diag.h | 67 + include/uapi/linux/netrom.h | 37 + include/uapi/linux/nfc.h | 317 + include/uapi/linux/nfs.h | 135 + include/uapi/linux/nfs2.h | 68 + include/uapi/linux/nfs3.h | 98 + include/uapi/linux/nfs4.h | 183 + include/uapi/linux/nfs4_mount.h | 72 + include/uapi/linux/nfs_fs.h | 62 + include/uapi/linux/nfs_idmap.h | 65 + include/uapi/linux/nfs_mount.h | 78 + include/uapi/linux/nfsacl.h | 31 + include/uapi/linux/nfsd/cld.h | 59 + include/uapi/linux/nfsd/debug.h | 34 + include/uapi/linux/nfsd/export.h | 66 + include/uapi/linux/nfsd/nfsfh.h | 105 + include/uapi/linux/nfsd/stats.h | 18 + include/uapi/linux/nilfs2_api.h | 293 + include/uapi/linux/nilfs2_ondisk.h | 651 ++ include/uapi/linux/nl80211.h | 5801 ++++++++++++ include/uapi/linux/nsfs.h | 19 + include/uapi/linux/nubus.h | 224 + include/uapi/linux/nvme_ioctl.h | 67 + include/uapi/linux/nvram.h | 17 + include/uapi/linux/omap3isp.h | 669 ++ include/uapi/linux/omapfb.h | 223 + include/uapi/linux/oom.h | 21 + include/uapi/linux/openvswitch.h | 971 ++ include/uapi/linux/packet_diag.h | 81 + include/uapi/linux/param.h | 7 + include/uapi/linux/parport.h | 95 + include/uapi/linux/patchkey.h | 38 + include/uapi/linux/pci.h | 42 + include/uapi/linux/pci_regs.h | 1051 +++ include/uapi/linux/pcitest.h | 23 + include/uapi/linux/perf_event.h | 1130 +++ include/uapi/linux/personality.h | 70 + include/uapi/linux/pfkeyv2.h | 384 + include/uapi/linux/pg.h | 64 + include/uapi/linux/phantom.h | 50 + include/uapi/linux/phonet.h | 186 + include/uapi/linux/pkt_cls.h | 610 ++ include/uapi/linux/pkt_sched.h | 1089 +++ include/uapi/linux/pktcdvd.h | 112 + include/uapi/linux/pmu.h | 140 + include/uapi/linux/poll.h | 1 + include/uapi/linux/posix_acl.h | 40 + include/uapi/linux/posix_acl_xattr.h | 39 + include/uapi/linux/posix_types.h | 38 + include/uapi/linux/ppdev.h | 100 + include/uapi/linux/ppp-comp.h | 94 + include/uapi/linux/ppp-ioctl.h | 121 + include/uapi/linux/ppp_defs.h | 151 + include/uapi/linux/pps.h | 151 + include/uapi/linux/pr.h | 51 + include/uapi/linux/prctl.h | 223 + include/uapi/linux/psample.h | 36 + include/uapi/linux/psci.h | 112 + include/uapi/linux/psp-sev.h | 154 + include/uapi/linux/ptp_clock.h | 147 + include/uapi/linux/ptrace.h | 110 + include/uapi/linux/qemu_fw_cfg.h | 97 + include/uapi/linux/qnx4_fs.h | 89 + include/uapi/linux/qnxtypes.h | 29 + include/uapi/linux/qrtr.h | 49 + include/uapi/linux/quota.h | 199 + include/uapi/linux/radeonfb.h | 16 + include/uapi/linux/raid/md_p.h | 433 + include/uapi/linux/raid/md_u.h | 156 + include/uapi/linux/random.h | 58 + include/uapi/linux/raw.h | 19 + include/uapi/linux/rds.h | 404 + include/uapi/linux/reboot.h | 40 + include/uapi/linux/reiserfs_fs.h | 27 + include/uapi/linux/reiserfs_xattr.h | 25 + include/uapi/linux/resource.h | 81 + include/uapi/linux/rfkill.h | 111 + include/uapi/linux/rio_cm_cdev.h | 79 + include/uapi/linux/rio_mport_cdev.h | 278 + include/uapi/linux/romfs_fs.h | 60 + include/uapi/linux/rose.h | 91 + include/uapi/linux/route.h | 70 + include/uapi/linux/rpmsg.h | 27 + include/uapi/linux/rseq.h | 147 + include/uapi/linux/rtc.h | 108 + include/uapi/linux/rtnetlink.h | 751 ++ include/uapi/linux/rxrpc.h | 125 + include/uapi/linux/scc.h | 173 + include/uapi/linux/sched.h | 58 + include/uapi/linux/sched/types.h | 75 + include/uapi/linux/scif_ioctl.h | 216 + include/uapi/linux/screen_info.h | 76 + include/uapi/linux/sctp.h | 1162 +++ include/uapi/linux/sdla.h | 117 + include/uapi/linux/seccomp.h | 63 + include/uapi/linux/securebits.h | 61 + include/uapi/linux/sed-opal.h | 120 + include/uapi/linux/seg6.h | 55 + include/uapi/linux/seg6_genl.h | 33 + include/uapi/linux/seg6_hmac.h | 23 + include/uapi/linux/seg6_iptunnel.h | 61 + include/uapi/linux/seg6_local.h | 80 + include/uapi/linux/selinux_netlink.h | 51 + include/uapi/linux/sem.h | 94 + include/uapi/linux/serial.h | 135 + include/uapi/linux/serial_core.h | 284 + include/uapi/linux/serial_reg.h | 381 + include/uapi/linux/serio.h | 86 + include/uapi/linux/shm.h | 111 + include/uapi/linux/signal.h | 16 + include/uapi/linux/signalfd.h | 57 + include/uapi/linux/smc.h | 36 + include/uapi/linux/smc_diag.h | 112 + include/uapi/linux/smiapp.h | 30 + include/uapi/linux/snmp.h | 324 + include/uapi/linux/sock_diag.h | 39 + include/uapi/linux/socket.h | 22 + include/uapi/linux/sockios.h | 153 + include/uapi/linux/sonet.h | 61 + include/uapi/linux/sonypi.h | 147 + include/uapi/linux/sound.h | 32 + include/uapi/linux/soundcard.h | 1282 +++ include/uapi/linux/spi/spidev.h | 143 + include/uapi/linux/stat.h | 174 + include/uapi/linux/stddef.h | 6 + include/uapi/linux/stm.h | 46 + include/uapi/linux/string.h | 10 + include/uapi/linux/sunrpc/debug.h | 49 + include/uapi/linux/suspend_ioctls.h | 34 + include/uapi/linux/swab.h | 305 + include/uapi/linux/switchtec_ioctl.h | 137 + include/uapi/linux/sync_file.h | 98 + include/uapi/linux/synclink.h | 301 + include/uapi/linux/sysctl.h | 917 ++ include/uapi/linux/sysinfo.h | 25 + include/uapi/linux/target_core_user.h | 161 + include/uapi/linux/taskstats.h | 214 + include/uapi/linux/tc_act/tc_bpf.h | 37 + include/uapi/linux/tc_act/tc_connmark.h | 24 + include/uapi/linux/tc_act/tc_csum.h | 35 + include/uapi/linux/tc_act/tc_defact.h | 21 + include/uapi/linux/tc_act/tc_gact.h | 34 + include/uapi/linux/tc_act/tc_ife.h | 33 + include/uapi/linux/tc_act/tc_ipt.h | 23 + include/uapi/linux/tc_act/tc_mirred.h | 29 + include/uapi/linux/tc_act/tc_nat.h | 29 + include/uapi/linux/tc_act/tc_pedit.h | 72 + include/uapi/linux/tc_act/tc_sample.h | 27 + include/uapi/linux/tc_act/tc_skbedit.h | 54 + include/uapi/linux/tc_act/tc_skbmod.h | 40 + include/uapi/linux/tc_act/tc_tunnel_key.h | 72 + include/uapi/linux/tc_act/tc_vlan.h | 39 + include/uapi/linux/tc_ematch/tc_em_cmp.h | 26 + include/uapi/linux/tc_ematch/tc_em_ipt.h | 20 + include/uapi/linux/tc_ematch/tc_em_meta.h | 93 + include/uapi/linux/tc_ematch/tc_em_nbyte.h | 14 + include/uapi/linux/tc_ematch/tc_em_text.h | 20 + include/uapi/linux/tcp.h | 302 + include/uapi/linux/tcp_metrics.h | 61 + include/uapi/linux/tee.h | 384 + include/uapi/linux/termios.h | 23 + include/uapi/linux/thermal.h | 36 + include/uapi/linux/time.h | 100 + include/uapi/linux/timerfd.h | 37 + include/uapi/linux/times.h | 14 + include/uapi/linux/timex.h | 166 + include/uapi/linux/tiocl.h | 40 + include/uapi/linux/tipc.h | 290 + include/uapi/linux/tipc_config.h | 417 + include/uapi/linux/tipc_netlink.h | 331 + include/uapi/linux/tipc_sockets_diag.h | 17 + include/uapi/linux/tls.h | 78 + include/uapi/linux/toshiba.h | 64 + include/uapi/linux/tty.h | 42 + include/uapi/linux/tty_flags.h | 97 + include/uapi/linux/types.h | 55 + include/uapi/linux/udf_fs_i.h | 22 + include/uapi/linux/udp.h | 45 + include/uapi/linux/uhid.h | 200 + include/uapi/linux/uinput.h | 232 + include/uapi/linux/uio.h | 31 + include/uapi/linux/uleds.h | 25 + include/uapi/linux/ultrasound.h | 104 + include/uapi/linux/un.h | 16 + include/uapi/linux/unistd.h | 10 + include/uapi/linux/unix_diag.h | 59 + include/uapi/linux/usb/audio.h | 638 ++ include/uapi/linux/usb/cdc-wdm.h | 24 + include/uapi/linux/usb/cdc.h | 448 + include/uapi/linux/usb/ch11.h | 307 + include/uapi/linux/usb/ch9.h | 1230 +++ include/uapi/linux/usb/charger.h | 31 + include/uapi/linux/usb/functionfs.h | 293 + include/uapi/linux/usb/g_printer.h | 36 + include/uapi/linux/usb/g_uvc.h | 39 + include/uapi/linux/usb/gadgetfs.h | 89 + include/uapi/linux/usb/midi.h | 113 + include/uapi/linux/usb/tmc.h | 80 + include/uapi/linux/usb/video.h | 570 ++ include/uapi/linux/usbdevice_fs.h | 201 + include/uapi/linux/usbip.h | 27 + include/uapi/linux/userfaultfd.h | 234 + include/uapi/linux/userio.h | 45 + include/uapi/linux/utime.h | 12 + include/uapi/linux/utsname.h | 35 + include/uapi/linux/uuid.h | 42 + include/uapi/linux/uvcvideo.h | 99 + include/uapi/linux/v4l2-common.h | 108 + include/uapi/linux/v4l2-controls.h | 1095 +++ include/uapi/linux/v4l2-dv-timings.h | 979 +++ include/uapi/linux/v4l2-mediabus.h | 141 + include/uapi/linux/v4l2-subdev.h | 185 + include/uapi/linux/vbox_err.h | 151 + include/uapi/linux/vbox_vmmdev_types.h | 226 + include/uapi/linux/vboxguest.h | 330 + include/uapi/linux/veth.h | 13 + include/uapi/linux/vfio.h | 819 ++ include/uapi/linux/vfio_ccw.h | 25 + include/uapi/linux/vhost.h | 228 + include/uapi/linux/videodev2.h | 2430 +++++ include/uapi/linux/virtio_9p.h | 44 + include/uapi/linux/virtio_balloon.h | 103 + include/uapi/linux/virtio_blk.h | 149 + include/uapi/linux/virtio_config.h | 82 + include/uapi/linux/virtio_console.h | 78 + include/uapi/linux/virtio_crypto.h | 450 + include/uapi/linux/virtio_gpu.h | 317 + include/uapi/linux/virtio_ids.h | 47 + include/uapi/linux/virtio_input.h | 76 + include/uapi/linux/virtio_mmio.h | 141 + include/uapi/linux/virtio_net.h | 264 + include/uapi/linux/virtio_pci.h | 199 + include/uapi/linux/virtio_ring.h | 174 + include/uapi/linux/virtio_rng.h | 8 + include/uapi/linux/virtio_scsi.h | 172 + include/uapi/linux/virtio_types.h | 46 + include/uapi/linux/virtio_vsock.h | 94 + include/uapi/linux/vm_sockets.h | 157 + include/uapi/linux/vm_sockets_diag.h | 34 + include/uapi/linux/vmcore.h | 18 + include/uapi/linux/vsockmon.h | 61 + include/uapi/linux/vt.h | 87 + include/uapi/linux/vtpm_proxy.h | 54 + include/uapi/linux/wait.h | 22 + include/uapi/linux/wanrouter.h | 18 + include/uapi/linux/watchdog.h | 58 + include/uapi/linux/wimax.h | 239 + include/uapi/linux/wimax/i2400m.h | 572 ++ include/uapi/linux/wireless.h | 1115 +++ include/uapi/linux/wmi.h | 73 + include/uapi/linux/x25.h | 153 + include/uapi/linux/xattr.h | 81 + include/uapi/linux/xfrm.h | 548 ++ include/uapi/linux/xilinx-v4l2-controls.h | 74 + include/uapi/linux/zorro.h | 114 + include/uapi/linux/zorro_ids.h | 553 ++ include/uapi/misc/cxl.h | 156 + include/uapi/misc/ocxl.h | 80 + include/uapi/mtd/inftl-user.h | 92 + include/uapi/mtd/mtd-abi.h | 285 + include/uapi/mtd/mtd-user.h | 33 + include/uapi/mtd/nftl-user.h | 91 + include/uapi/mtd/ubi-user.h | 460 + include/uapi/rdma/bnxt_re-abi.h | 106 + include/uapi/rdma/cxgb3-abi.h | 82 + include/uapi/rdma/cxgb4-abi.h | 115 + include/uapi/rdma/hfi/hfi1_ioctl.h | 174 + include/uapi/rdma/hfi/hfi1_user.h | 267 + include/uapi/rdma/hns-abi.h | 72 + include/uapi/rdma/i40iw-abi.h | 107 + include/uapi/rdma/ib_user_cm.h | 326 + include/uapi/rdma/ib_user_ioctl_cmds.h | 160 + include/uapi/rdma/ib_user_ioctl_verbs.h | 160 + include/uapi/rdma/ib_user_mad.h | 234 + include/uapi/rdma/ib_user_sa.h | 77 + include/uapi/rdma/ib_user_verbs.h | 1292 +++ include/uapi/rdma/mlx4-abi.h | 191 + include/uapi/rdma/mlx5-abi.h | 474 + include/uapi/rdma/mlx5_user_ioctl_cmds.h | 169 + include/uapi/rdma/mlx5_user_ioctl_verbs.h | 43 + include/uapi/rdma/mthca-abi.h | 112 + include/uapi/rdma/nes-abi.h | 115 + include/uapi/rdma/ocrdma-abi.h | 152 + include/uapi/rdma/qedr-abi.h | 131 + include/uapi/rdma/rdma_netlink.h | 434 + include/uapi/rdma/rdma_user_cm.h | 324 + include/uapi/rdma/rdma_user_ioctl.h | 85 + include/uapi/rdma/rdma_user_ioctl_cmds.h | 104 + include/uapi/rdma/rdma_user_rxe.h | 179 + include/uapi/rdma/vmw_pvrdma-abi.h | 298 + include/uapi/scsi/cxlflash_ioctl.h | 276 + include/uapi/scsi/fc/fc_els.h | 832 ++ include/uapi/scsi/fc/fc_fs.h | 351 + include/uapi/scsi/fc/fc_gs.h | 97 + include/uapi/scsi/fc/fc_ns.h | 209 + include/uapi/scsi/scsi_bsg_fc.h | 321 + include/uapi/scsi/scsi_netlink.h | 124 + include/uapi/scsi/scsi_netlink_fc.h | 72 + include/uapi/sound/asequencer.h | 612 ++ include/uapi/sound/asoc.h | 633 ++ include/uapi/sound/asound.h | 1037 +++ include/uapi/sound/asound_fm.h | 135 + include/uapi/sound/compress_offload.h | 192 + include/uapi/sound/compress_params.h | 406 + include/uapi/sound/emu10k1.h | 381 + include/uapi/sound/firewire.h | 91 + include/uapi/sound/hdsp.h | 111 + include/uapi/sound/hdspm.h | 232 + include/uapi/sound/sb16_csp.h | 123 + include/uapi/sound/sfnt_info.h | 213 + include/uapi/sound/skl-tplg-interface.h | 239 + include/uapi/sound/snd_sst_tokens.h | 330 + include/uapi/sound/tlv.h | 117 + include/uapi/sound/usb_stream.h | 77 + include/uapi/video/edid.h | 10 + include/uapi/video/sisfb.h | 210 + include/uapi/video/uvesafb.h | 61 + include/uapi/xen/evtchn.h | 104 + include/uapi/xen/gntalloc.h | 84 + include/uapi/xen/gntdev.h | 309 + include/uapi/xen/privcmd.h | 129 + include/video/atmel_lcdc.h | 202 + include/video/aty128.h | 423 + include/video/broadsheetfb.h | 74 + include/video/cirrus.h | 122 + include/video/cvisionppc.h | 51 + include/video/da8xx-fb.h | 95 + include/video/display_timing.h | 106 + include/video/edid.h | 10 + include/video/gbe.h | 317 + include/video/hecubafb.h | 51 + include/video/ili9320.h | 201 + include/video/imx-ipu-image-convert.h | 207 + include/video/imx-ipu-v3.h | 448 + include/video/kyro.h | 91 + include/video/mach64.h | 1378 +++ include/video/maxinefb.h | 38 + include/video/mbxfb.h | 99 + include/video/metronomefb.h | 57 + include/video/mipi_display.h | 141 + include/video/mmp_disp.h | 358 + include/video/neomagic.h | 189 + include/video/newport.h | 584 ++ include/video/of_display_timing.h | 35 + include/video/of_videomode.h | 18 + include/video/omap-panel-data.h | 82 + include/video/omapfb_dss.h | 910 ++ include/video/omapvrfb.h | 68 + include/video/permedia2.h | 254 + include/video/platform_lcd.h | 22 + include/video/pm3fb.h | 1061 +++ include/video/pmag-ba-fb.h | 27 + include/video/pmagb-b-fb.h | 58 + include/video/pxa168fb.h | 123 + include/video/radeon.h | 1994 +++++ include/video/s1d13xxxfb.h | 174 + include/video/sa1100fb.h | 63 + include/video/samsung_fimd.h | 477 + include/video/sh_mobile_lcdc.h | 196 + include/video/sisfb.h | 37 + include/video/sstfb.h | 356 + include/video/tdfx.h | 209 + include/video/tgafb.h | 280 + include/video/trident.h | 147 + include/video/udlfb.h | 103 + include/video/uvesafb.h | 141 + include/video/vga.h | 459 + include/video/videomode.h | 58 + include/video/w100fb.h | 150 + include/xen/acpi.h | 111 + include/xen/arm/hypercall.h | 91 + include/xen/arm/hypervisor.h | 40 + include/xen/arm/interface.h | 86 + include/xen/arm/page-coherent.h | 107 + include/xen/arm/page.h | 113 + include/xen/balloon.h | 46 + include/xen/events.h | 152 + include/xen/features.h | 24 + include/xen/grant_table.h | 324 + include/xen/hvc-console.h | 19 + include/xen/hvm.h | 61 + include/xen/interface/callback.h | 102 + include/xen/interface/elfnote.h | 220 + include/xen/interface/event_channel.h | 279 + include/xen/interface/features.h | 88 + include/xen/interface/grant_table.h | 568 ++ include/xen/interface/hvm/dm_op.h | 32 + include/xen/interface/hvm/hvm_op.h | 65 + include/xen/interface/hvm/hvm_vcpu.h | 143 + include/xen/interface/hvm/params.h | 127 + include/xen/interface/hvm/start_info.h | 98 + include/xen/interface/io/9pfs.h | 36 + include/xen/interface/io/blkif.h | 302 + include/xen/interface/io/console.h | 24 + include/xen/interface/io/displif.h | 862 ++ include/xen/interface/io/fbif.h | 143 + include/xen/interface/io/kbdif.h | 560 ++ include/xen/interface/io/netif.h | 939 ++ include/xen/interface/io/pciif.h | 112 + include/xen/interface/io/protocols.h | 22 + include/xen/interface/io/pvcalls.h | 121 + include/xen/interface/io/ring.h | 455 + include/xen/interface/io/sndif.h | 1081 +++ include/xen/interface/io/tpmif.h | 52 + include/xen/interface/io/vscsiif.h | 229 + include/xen/interface/io/xenbus.h | 51 + include/xen/interface/io/xs_wire.h | 95 + include/xen/interface/memory.h | 334 + include/xen/interface/nmi.h | 52 + include/xen/interface/physdev.h | 321 + include/xen/interface/platform.h | 531 ++ include/xen/interface/sched.h | 183 + include/xen/interface/vcpu.h | 223 + include/xen/interface/version.h | 82 + include/xen/interface/xen-mca.h | 385 + include/xen/interface/xen.h | 782 ++ include/xen/interface/xenpmu.h | 95 + include/xen/mem-reservation.h | 60 + include/xen/page.h | 49 + include/xen/platform_pci.h | 73 + include/xen/swiotlb-xen.h | 10 + include/xen/tmem.h | 18 + include/xen/xen-ops.h | 219 + include/xen/xen.h | 42 + include/xen/xenbus.h | 247 + include/xen/xenbus_dev.h | 44 + 4895 files changed, 939184 insertions(+) create mode 100644 include/acpi/acbuffer.h create mode 100644 include/acpi/acconfig.h create mode 100644 include/acpi/acexcep.h create mode 100644 include/acpi/acnames.h create mode 100644 include/acpi/acoutput.h create mode 100644 include/acpi/acpi.h create mode 100644 include/acpi/acpi_bus.h create mode 100644 include/acpi/acpi_drivers.h create mode 100644 include/acpi/acpi_io.h create mode 100644 include/acpi/acpi_lpat.h create mode 100644 include/acpi/acpi_numa.h create mode 100644 include/acpi/acpiosxf.h create mode 100644 include/acpi/acpixf.h create mode 100644 include/acpi/acrestyp.h create mode 100644 include/acpi/actbl.h create mode 100644 include/acpi/actbl1.h create mode 100644 include/acpi/actbl2.h create mode 100644 include/acpi/actbl3.h create mode 100644 include/acpi/actypes.h create mode 100644 include/acpi/acuuid.h create mode 100644 include/acpi/apei.h create mode 100644 include/acpi/battery.h create mode 100644 include/acpi/button.h create mode 100644 include/acpi/cppc_acpi.h create mode 100644 include/acpi/ghes.h create mode 100644 include/acpi/hed.h create mode 100644 include/acpi/nfit.h create mode 100644 include/acpi/pcc.h create mode 100644 include/acpi/pdc_intel.h create mode 100644 include/acpi/platform/acenv.h create mode 100644 include/acpi/platform/acenvex.h create mode 100644 include/acpi/platform/acgcc.h create mode 100644 include/acpi/platform/acgccex.h create mode 100644 include/acpi/platform/acintel.h create mode 100644 include/acpi/platform/aclinux.h create mode 100644 include/acpi/platform/aclinuxex.h create mode 100644 include/acpi/processor.h create mode 100644 include/acpi/reboot.h create mode 100644 include/acpi/video.h create mode 100644 include/asm-generic/4level-fixup.h create mode 100644 include/asm-generic/5level-fixup.h create mode 100644 include/asm-generic/asm-offsets.h create mode 100644 include/asm-generic/asm-prototypes.h create mode 100644 include/asm-generic/atomic-instrumented.h create mode 100644 include/asm-generic/atomic-long.h create mode 100644 include/asm-generic/atomic.h create mode 100644 include/asm-generic/atomic64.h create mode 100644 include/asm-generic/audit_change_attr.h create mode 100644 include/asm-generic/audit_dir_write.h create mode 100644 include/asm-generic/audit_read.h create mode 100644 include/asm-generic/audit_signal.h create mode 100644 include/asm-generic/audit_write.h create mode 100644 include/asm-generic/barrier.h create mode 100644 include/asm-generic/bitops.h create mode 100644 include/asm-generic/bitops/__ffs.h create mode 100644 include/asm-generic/bitops/__fls.h create mode 100644 include/asm-generic/bitops/arch_hweight.h create mode 100644 include/asm-generic/bitops/atomic.h create mode 100644 include/asm-generic/bitops/builtin-__ffs.h create mode 100644 include/asm-generic/bitops/builtin-__fls.h create mode 100644 include/asm-generic/bitops/builtin-ffs.h create mode 100644 include/asm-generic/bitops/builtin-fls.h create mode 100644 include/asm-generic/bitops/const_hweight.h create mode 100644 include/asm-generic/bitops/ext2-atomic-setbit.h create mode 100644 include/asm-generic/bitops/ext2-atomic.h create mode 100644 include/asm-generic/bitops/ffs.h create mode 100644 include/asm-generic/bitops/ffz.h create mode 100644 include/asm-generic/bitops/find.h create mode 100644 include/asm-generic/bitops/fls.h create mode 100644 include/asm-generic/bitops/fls64.h create mode 100644 include/asm-generic/bitops/hweight.h create mode 100644 include/asm-generic/bitops/le.h create mode 100644 include/asm-generic/bitops/lock.h create mode 100644 include/asm-generic/bitops/non-atomic.h create mode 100644 include/asm-generic/bitops/sched.h create mode 100644 include/asm-generic/bitsperlong.h create mode 100644 include/asm-generic/bug.h create mode 100644 include/asm-generic/bugs.h create mode 100644 include/asm-generic/cache.h create mode 100644 include/asm-generic/cacheflush.h create mode 100644 include/asm-generic/checksum.h create mode 100644 include/asm-generic/cmpxchg-local.h create mode 100644 include/asm-generic/cmpxchg.h create mode 100644 include/asm-generic/compat.h create mode 100644 include/asm-generic/current.h create mode 100644 include/asm-generic/delay.h create mode 100644 include/asm-generic/device.h create mode 100644 include/asm-generic/div64.h create mode 100644 include/asm-generic/dma-contiguous.h create mode 100644 include/asm-generic/dma-mapping.h create mode 100644 include/asm-generic/dma.h create mode 100644 include/asm-generic/early_ioremap.h create mode 100644 include/asm-generic/emergency-restart.h create mode 100644 include/asm-generic/error-injection.h create mode 100644 include/asm-generic/exec.h create mode 100644 include/asm-generic/export.h create mode 100644 include/asm-generic/extable.h create mode 100644 include/asm-generic/fb.h create mode 100644 include/asm-generic/fixmap.h create mode 100644 include/asm-generic/ftrace.h create mode 100644 include/asm-generic/futex.h create mode 100644 include/asm-generic/getorder.h create mode 100644 include/asm-generic/gpio.h create mode 100644 include/asm-generic/hardirq.h create mode 100644 include/asm-generic/hugetlb.h create mode 100644 include/asm-generic/hw_irq.h create mode 100644 include/asm-generic/ide_iops.h create mode 100644 include/asm-generic/int-ll64.h create mode 100644 include/asm-generic/io.h create mode 100644 include/asm-generic/ioctl.h create mode 100644 include/asm-generic/iomap.h create mode 100644 include/asm-generic/irq.h create mode 100644 include/asm-generic/irq_regs.h create mode 100644 include/asm-generic/irq_work.h create mode 100644 include/asm-generic/irqflags.h create mode 100644 include/asm-generic/kdebug.h create mode 100644 include/asm-generic/kmap_types.h create mode 100644 include/asm-generic/kprobes.h create mode 100644 include/asm-generic/kvm_para.h create mode 100644 include/asm-generic/linkage.h create mode 100644 include/asm-generic/local.h create mode 100644 include/asm-generic/local64.h create mode 100644 include/asm-generic/mcs_spinlock.h create mode 100644 include/asm-generic/memory_model.h create mode 100644 include/asm-generic/mm-arch-hooks.h create mode 100644 include/asm-generic/mm_hooks.h create mode 100644 include/asm-generic/mmu.h create mode 100644 include/asm-generic/mmu_context.h create mode 100644 include/asm-generic/module.h create mode 100644 include/asm-generic/msi.h create mode 100644 include/asm-generic/page.h create mode 100644 include/asm-generic/param.h create mode 100644 include/asm-generic/parport.h create mode 100644 include/asm-generic/pci.h create mode 100644 include/asm-generic/pci_iomap.h create mode 100644 include/asm-generic/percpu.h create mode 100644 include/asm-generic/pgalloc.h create mode 100644 include/asm-generic/pgtable-nop4d-hack.h create mode 100644 include/asm-generic/pgtable-nop4d.h create mode 100644 include/asm-generic/pgtable-nopmd.h create mode 100644 include/asm-generic/pgtable-nopud.h create mode 100644 include/asm-generic/pgtable.h create mode 100644 include/asm-generic/preempt.h create mode 100644 include/asm-generic/ptrace.h create mode 100644 include/asm-generic/qrwlock.h create mode 100644 include/asm-generic/qrwlock_types.h create mode 100644 include/asm-generic/qspinlock.h create mode 100644 include/asm-generic/qspinlock_types.h create mode 100644 include/asm-generic/resource.h create mode 100644 include/asm-generic/rwsem.h create mode 100644 include/asm-generic/seccomp.h create mode 100644 include/asm-generic/sections.h create mode 100644 include/asm-generic/segment.h create mode 100644 include/asm-generic/serial.h create mode 100644 include/asm-generic/set_memory.h create mode 100644 include/asm-generic/signal.h create mode 100644 include/asm-generic/simd.h create mode 100644 include/asm-generic/sizes.h create mode 100644 include/asm-generic/spinlock.h create mode 100644 include/asm-generic/statfs.h create mode 100644 include/asm-generic/string.h create mode 100644 include/asm-generic/switch_to.h create mode 100644 include/asm-generic/syscall.h create mode 100644 include/asm-generic/syscalls.h create mode 100644 include/asm-generic/termios-base.h create mode 100644 include/asm-generic/termios.h create mode 100644 include/asm-generic/timex.h create mode 100644 include/asm-generic/tlb.h create mode 100644 include/asm-generic/tlbflush.h create mode 100644 include/asm-generic/topology.h create mode 100644 include/asm-generic/trace_clock.h create mode 100644 include/asm-generic/uaccess.h create mode 100644 include/asm-generic/unaligned.h create mode 100644 include/asm-generic/unistd.h create mode 100644 include/asm-generic/user.h create mode 100644 include/asm-generic/vga.h create mode 100644 include/asm-generic/vmlinux.lds.h create mode 100644 include/asm-generic/vtime.h create mode 100644 include/asm-generic/word-at-a-time.h create mode 100644 include/asm-generic/xor.h create mode 100644 include/clocksource/arm_arch_timer.h create mode 100644 include/clocksource/pxa.h create mode 100644 include/clocksource/samsung_pwm.h create mode 100644 include/clocksource/timer-sp804.h create mode 100644 include/clocksource/timer-ti-dm.h create mode 100644 include/crypto/acompress.h create mode 100644 include/crypto/aead.h create mode 100644 include/crypto/aes.h create mode 100644 include/crypto/akcipher.h create mode 100644 include/crypto/algapi.h create mode 100644 include/crypto/authenc.h create mode 100644 include/crypto/b128ops.h create mode 100644 include/crypto/blake2s.h create mode 100644 include/crypto/blowfish.h create mode 100644 include/crypto/cast5.h create mode 100644 include/crypto/cast6.h create mode 100644 include/crypto/cast_common.h create mode 100644 include/crypto/cbc.h create mode 100644 include/crypto/chacha20.h create mode 100644 include/crypto/cryptd.h create mode 100644 include/crypto/crypto_wq.h create mode 100644 include/crypto/ctr.h create mode 100644 include/crypto/des.h create mode 100644 include/crypto/dh.h create mode 100644 include/crypto/drbg.h create mode 100644 include/crypto/ecdh.h create mode 100644 include/crypto/engine.h create mode 100644 include/crypto/gcm.h create mode 100644 include/crypto/gf128mul.h create mode 100644 include/crypto/ghash.h create mode 100644 include/crypto/hash.h create mode 100644 include/crypto/hash_info.h create mode 100644 include/crypto/hmac.h create mode 100644 include/crypto/if_alg.h create mode 100644 include/crypto/internal/acompress.h create mode 100644 include/crypto/internal/aead.h create mode 100644 include/crypto/internal/akcipher.h create mode 100644 include/crypto/internal/blake2s.h create mode 100644 include/crypto/internal/geniv.h create mode 100644 include/crypto/internal/hash.h create mode 100644 include/crypto/internal/kpp.h create mode 100644 include/crypto/internal/rng.h create mode 100644 include/crypto/internal/rsa.h create mode 100644 include/crypto/internal/scompress.h create mode 100644 include/crypto/internal/simd.h create mode 100644 include/crypto/internal/skcipher.h create mode 100644 include/crypto/kpp.h create mode 100644 include/crypto/mcryptd.h create mode 100644 include/crypto/md5.h create mode 100644 include/crypto/morus1280_glue.h create mode 100644 include/crypto/morus640_glue.h create mode 100644 include/crypto/morus_common.h create mode 100644 include/crypto/null.h create mode 100644 include/crypto/padlock.h create mode 100644 include/crypto/pcrypt.h create mode 100644 include/crypto/pkcs7.h create mode 100644 include/crypto/poly1305.h create mode 100644 include/crypto/public_key.h create mode 100644 include/crypto/rng.h create mode 100644 include/crypto/scatterwalk.h create mode 100644 include/crypto/serpent.h create mode 100644 include/crypto/sha.h create mode 100644 include/crypto/sha1_base.h create mode 100644 include/crypto/sha256_base.h create mode 100644 include/crypto/sha3.h create mode 100644 include/crypto/sha512_base.h create mode 100644 include/crypto/skcipher.h create mode 100644 include/crypto/sm3.h create mode 100644 include/crypto/sm3_base.h create mode 100644 include/crypto/sm4.h create mode 100644 include/crypto/twofish.h create mode 100644 include/crypto/xts.h create mode 100644 include/drm/amd_asic_type.h create mode 100644 include/drm/ati_pcigart.h create mode 100644 include/drm/bridge/analogix_dp.h create mode 100644 include/drm/bridge/dw_hdmi.h create mode 100644 include/drm/bridge/dw_mipi_dsi.h create mode 100644 include/drm/bridge/mhl.h create mode 100644 include/drm/drmP.h create mode 100644 include/drm/drm_agpsupport.h create mode 100644 include/drm/drm_atomic.h create mode 100644 include/drm/drm_atomic_helper.h create mode 100644 include/drm/drm_audio_component.h create mode 100644 include/drm/drm_auth.h create mode 100644 include/drm/drm_blend.h create mode 100644 include/drm/drm_bridge.h create mode 100644 include/drm/drm_cache.h create mode 100644 include/drm/drm_client.h create mode 100644 include/drm/drm_color_mgmt.h create mode 100644 include/drm/drm_connector.h create mode 100644 include/drm/drm_crtc.h create mode 100644 include/drm/drm_crtc_helper.h create mode 100644 include/drm/drm_debugfs.h create mode 100644 include/drm/drm_debugfs_crc.h create mode 100644 include/drm/drm_device.h create mode 100644 include/drm/drm_displayid.h create mode 100644 include/drm/drm_dp_dual_mode_helper.h create mode 100644 include/drm/drm_dp_helper.h create mode 100644 include/drm/drm_dp_mst_helper.h create mode 100644 include/drm/drm_drv.h create mode 100644 include/drm/drm_edid.h create mode 100644 include/drm/drm_encoder.h create mode 100644 include/drm/drm_encoder_slave.h create mode 100644 include/drm/drm_fb_cma_helper.h create mode 100644 include/drm/drm_fb_helper.h create mode 100644 include/drm/drm_file.h create mode 100644 include/drm/drm_fixed.h create mode 100644 include/drm/drm_flip_work.h create mode 100644 include/drm/drm_fourcc.h create mode 100644 include/drm/drm_framebuffer.h create mode 100644 include/drm/drm_gem.h create mode 100644 include/drm/drm_gem_cma_helper.h create mode 100644 include/drm/drm_gem_framebuffer_helper.h create mode 100644 include/drm/drm_global.h create mode 100644 include/drm/drm_hashtab.h create mode 100644 include/drm/drm_hdcp.h create mode 100644 include/drm/drm_ioctl.h create mode 100644 include/drm/drm_irq.h create mode 100644 include/drm/drm_lease.h create mode 100644 include/drm/drm_legacy.h create mode 100644 include/drm/drm_mipi_dsi.h create mode 100644 include/drm/drm_mm.h create mode 100644 include/drm/drm_mode_config.h create mode 100644 include/drm/drm_mode_object.h create mode 100644 include/drm/drm_modes.h create mode 100644 include/drm/drm_modeset_helper.h create mode 100644 include/drm/drm_modeset_helper_vtables.h create mode 100644 include/drm/drm_modeset_lock.h create mode 100644 include/drm/drm_of.h create mode 100644 include/drm/drm_os_linux.h create mode 100644 include/drm/drm_panel.h create mode 100644 include/drm/drm_pci.h create mode 100644 include/drm/drm_pciids.h create mode 100644 include/drm/drm_plane.h create mode 100644 include/drm/drm_plane_helper.h create mode 100644 include/drm/drm_prime.h create mode 100644 include/drm/drm_print.h create mode 100644 include/drm/drm_property.h create mode 100644 include/drm/drm_rect.h create mode 100644 include/drm/drm_scdc_helper.h create mode 100644 include/drm/drm_simple_kms_helper.h create mode 100644 include/drm/drm_syncobj.h create mode 100644 include/drm/drm_sysfs.h create mode 100644 include/drm/drm_utils.h create mode 100644 include/drm/drm_vblank.h create mode 100644 include/drm/drm_vma_manager.h create mode 100644 include/drm/drm_writeback.h create mode 100644 include/drm/gma_drm.h create mode 100644 include/drm/gpu_scheduler.h create mode 100644 include/drm/i2c/ch7006.h create mode 100644 include/drm/i2c/sil164.h create mode 100644 include/drm/i2c/tda998x.h create mode 100644 include/drm/i915_component.h create mode 100644 include/drm/i915_drm.h create mode 100644 include/drm/i915_pciids.h create mode 100644 include/drm/intel-gtt.h create mode 100644 include/drm/intel_lpe_audio.h create mode 100644 include/drm/spsc_queue.h create mode 100644 include/drm/tinydrm/mipi-dbi.h create mode 100644 include/drm/tinydrm/tinydrm-helpers.h create mode 100644 include/drm/tinydrm/tinydrm.h create mode 100644 include/drm/ttm/ttm_bo_api.h create mode 100644 include/drm/ttm/ttm_bo_driver.h create mode 100644 include/drm/ttm/ttm_debug.h create mode 100644 include/drm/ttm/ttm_execbuf_util.h create mode 100644 include/drm/ttm/ttm_lock.h create mode 100644 include/drm/ttm/ttm_memory.h create mode 100644 include/drm/ttm/ttm_module.h create mode 100644 include/drm/ttm/ttm_object.h create mode 100644 include/drm/ttm/ttm_page_alloc.h create mode 100644 include/drm/ttm/ttm_placement.h create mode 100644 include/drm/ttm/ttm_set_memory.h create mode 100644 include/drm/ttm/ttm_tt.h create mode 100644 include/dt-bindings/arm/ux500_pm_domains.h create mode 100644 include/dt-bindings/bus/ti-sysc.h create mode 100644 include/dt-bindings/clk/ti-dra7-atl.h create mode 100644 include/dt-bindings/clock/actions,s700-cmu.h create mode 100644 include/dt-bindings/clock/actions,s900-cmu.h create mode 100644 include/dt-bindings/clock/alphascale,asm9260.h create mode 100644 include/dt-bindings/clock/am3.h create mode 100644 include/dt-bindings/clock/am4.h create mode 100644 include/dt-bindings/clock/aspeed-clock.h create mode 100644 include/dt-bindings/clock/at91.h create mode 100644 include/dt-bindings/clock/ath79-clk.h create mode 100644 include/dt-bindings/clock/axg-aoclkc.h create mode 100644 include/dt-bindings/clock/axg-audio-clkc.h create mode 100644 include/dt-bindings/clock/axg-clkc.h create mode 100644 include/dt-bindings/clock/axis,artpec6-clkctrl.h create mode 100644 include/dt-bindings/clock/bcm-cygnus.h create mode 100644 include/dt-bindings/clock/bcm-ns2.h create mode 100644 include/dt-bindings/clock/bcm-nsp.h create mode 100644 include/dt-bindings/clock/bcm-sr.h create mode 100644 include/dt-bindings/clock/bcm21664.h create mode 100644 include/dt-bindings/clock/bcm281xx.h create mode 100644 include/dt-bindings/clock/bcm2835-aux.h create mode 100644 include/dt-bindings/clock/bcm2835.h create mode 100644 include/dt-bindings/clock/berlin2.h create mode 100644 include/dt-bindings/clock/berlin2q.h create mode 100644 include/dt-bindings/clock/boston-clock.h create mode 100644 include/dt-bindings/clock/clps711x-clock.h create mode 100644 include/dt-bindings/clock/cortina,gemini-clock.h create mode 100644 include/dt-bindings/clock/dm814.h create mode 100644 include/dt-bindings/clock/dm816.h create mode 100644 include/dt-bindings/clock/dra7.h create mode 100644 include/dt-bindings/clock/efm32-cmu.h create mode 100644 include/dt-bindings/clock/exynos-audss-clk.h create mode 100644 include/dt-bindings/clock/exynos3250.h create mode 100644 include/dt-bindings/clock/exynos4.h create mode 100644 include/dt-bindings/clock/exynos5250.h create mode 100644 include/dt-bindings/clock/exynos5260-clk.h create mode 100644 include/dt-bindings/clock/exynos5410.h create mode 100644 include/dt-bindings/clock/exynos5420.h create mode 100644 include/dt-bindings/clock/exynos5433.h create mode 100644 include/dt-bindings/clock/exynos7-clk.h create mode 100644 include/dt-bindings/clock/gxbb-aoclkc.h create mode 100644 include/dt-bindings/clock/gxbb-clkc.h create mode 100644 include/dt-bindings/clock/hi3516cv300-clock.h create mode 100644 include/dt-bindings/clock/hi3519-clock.h create mode 100644 include/dt-bindings/clock/hi3620-clock.h create mode 100644 include/dt-bindings/clock/hi3660-clock.h create mode 100644 include/dt-bindings/clock/hi6220-clock.h create mode 100644 include/dt-bindings/clock/hip04-clock.h create mode 100644 include/dt-bindings/clock/histb-clock.h create mode 100644 include/dt-bindings/clock/hix5hd2-clock.h create mode 100644 include/dt-bindings/clock/imx1-clock.h create mode 100644 include/dt-bindings/clock/imx21-clock.h create mode 100644 include/dt-bindings/clock/imx27-clock.h create mode 100644 include/dt-bindings/clock/imx5-clock.h create mode 100644 include/dt-bindings/clock/imx6qdl-clock.h create mode 100644 include/dt-bindings/clock/imx6sl-clock.h create mode 100644 include/dt-bindings/clock/imx6sll-clock.h create mode 100644 include/dt-bindings/clock/imx6sx-clock.h create mode 100644 include/dt-bindings/clock/imx6ul-clock.h create mode 100644 include/dt-bindings/clock/imx7d-clock.h create mode 100644 include/dt-bindings/clock/jz4740-cgu.h create mode 100644 include/dt-bindings/clock/jz4770-cgu.h create mode 100644 include/dt-bindings/clock/jz4780-cgu.h create mode 100644 include/dt-bindings/clock/lpc18xx-ccu.h create mode 100644 include/dt-bindings/clock/lpc18xx-cgu.h create mode 100644 include/dt-bindings/clock/lpc32xx-clock.h create mode 100644 include/dt-bindings/clock/lsi,axm5516-clks.h create mode 100644 include/dt-bindings/clock/marvell,mmp2.h create mode 100644 include/dt-bindings/clock/marvell,pxa168.h create mode 100644 include/dt-bindings/clock/marvell,pxa1928.h create mode 100644 include/dt-bindings/clock/marvell,pxa910.h create mode 100644 include/dt-bindings/clock/maxim,max77620.h create mode 100644 include/dt-bindings/clock/maxim,max77686.h create mode 100644 include/dt-bindings/clock/maxim,max77802.h create mode 100644 include/dt-bindings/clock/maxim,max9485.h create mode 100644 include/dt-bindings/clock/meson8b-clkc.h create mode 100644 include/dt-bindings/clock/microchip,pic32-clock.h create mode 100644 include/dt-bindings/clock/mpc512x-clock.h create mode 100644 include/dt-bindings/clock/mt2701-clk.h create mode 100644 include/dt-bindings/clock/mt2712-clk.h create mode 100644 include/dt-bindings/clock/mt6797-clk.h create mode 100644 include/dt-bindings/clock/mt7622-clk.h create mode 100644 include/dt-bindings/clock/mt8135-clk.h create mode 100644 include/dt-bindings/clock/mt8173-clk.h create mode 100644 include/dt-bindings/clock/nuvoton,npcm7xx-clock.h create mode 100644 include/dt-bindings/clock/omap4.h create mode 100644 include/dt-bindings/clock/omap5.h create mode 100644 include/dt-bindings/clock/oxsemi,ox810se.h create mode 100644 include/dt-bindings/clock/oxsemi,ox820.h create mode 100644 include/dt-bindings/clock/pistachio-clk.h create mode 100644 include/dt-bindings/clock/px30-cru.h create mode 100644 include/dt-bindings/clock/pxa-clock.h create mode 100644 include/dt-bindings/clock/qcom,dispcc-sdm845.h create mode 100644 include/dt-bindings/clock/qcom,gcc-apq8084.h create mode 100644 include/dt-bindings/clock/qcom,gcc-ipq4019.h create mode 100644 include/dt-bindings/clock/qcom,gcc-ipq806x.h create mode 100644 include/dt-bindings/clock/qcom,gcc-ipq8074.h create mode 100644 include/dt-bindings/clock/qcom,gcc-mdm9615.h create mode 100644 include/dt-bindings/clock/qcom,gcc-msm8660.h create mode 100644 include/dt-bindings/clock/qcom,gcc-msm8916.h create mode 100644 include/dt-bindings/clock/qcom,gcc-msm8960.h create mode 100644 include/dt-bindings/clock/qcom,gcc-msm8974.h create mode 100644 include/dt-bindings/clock/qcom,gcc-msm8994.h create mode 100644 include/dt-bindings/clock/qcom,gcc-msm8996.h create mode 100644 include/dt-bindings/clock/qcom,gcc-msm8998.h create mode 100644 include/dt-bindings/clock/qcom,gcc-sdm845.h create mode 100644 include/dt-bindings/clock/qcom,lcc-ipq806x.h create mode 100644 include/dt-bindings/clock/qcom,lcc-mdm9615.h create mode 100644 include/dt-bindings/clock/qcom,lcc-msm8960.h create mode 100644 include/dt-bindings/clock/qcom,mmcc-apq8084.h create mode 100644 include/dt-bindings/clock/qcom,mmcc-msm8960.h create mode 100644 include/dt-bindings/clock/qcom,mmcc-msm8974.h create mode 100644 include/dt-bindings/clock/qcom,mmcc-msm8996.h create mode 100644 include/dt-bindings/clock/qcom,rpmcc.h create mode 100644 include/dt-bindings/clock/qcom,rpmh.h create mode 100644 include/dt-bindings/clock/qcom,videocc-sdm845.h create mode 100644 include/dt-bindings/clock/r7s72100-clock.h create mode 100644 include/dt-bindings/clock/r8a73a4-clock.h create mode 100644 include/dt-bindings/clock/r8a7740-clock.h create mode 100644 include/dt-bindings/clock/r8a7743-cpg-mssr.h create mode 100644 include/dt-bindings/clock/r8a7745-cpg-mssr.h create mode 100644 include/dt-bindings/clock/r8a77470-cpg-mssr.h create mode 100644 include/dt-bindings/clock/r8a7778-clock.h create mode 100644 include/dt-bindings/clock/r8a7779-clock.h create mode 100644 include/dt-bindings/clock/r8a7790-clock.h create mode 100644 include/dt-bindings/clock/r8a7790-cpg-mssr.h create mode 100644 include/dt-bindings/clock/r8a7791-clock.h create mode 100644 include/dt-bindings/clock/r8a7791-cpg-mssr.h create mode 100644 include/dt-bindings/clock/r8a7792-clock.h create mode 100644 include/dt-bindings/clock/r8a7792-cpg-mssr.h create mode 100644 include/dt-bindings/clock/r8a7793-clock.h create mode 100644 include/dt-bindings/clock/r8a7793-cpg-mssr.h create mode 100644 include/dt-bindings/clock/r8a7794-clock.h create mode 100644 include/dt-bindings/clock/r8a7794-cpg-mssr.h create mode 100644 include/dt-bindings/clock/r8a7795-cpg-mssr.h create mode 100644 include/dt-bindings/clock/r8a7796-cpg-mssr.h create mode 100644 include/dt-bindings/clock/r8a77965-cpg-mssr.h create mode 100644 include/dt-bindings/clock/r8a77970-cpg-mssr.h create mode 100644 include/dt-bindings/clock/r8a77980-cpg-mssr.h create mode 100644 include/dt-bindings/clock/r8a77990-cpg-mssr.h create mode 100644 include/dt-bindings/clock/r8a77995-cpg-mssr.h create mode 100644 include/dt-bindings/clock/r9a06g032-sysctrl.h create mode 100644 include/dt-bindings/clock/renesas-cpg-mssr.h create mode 100644 include/dt-bindings/clock/rk3036-cru.h create mode 100644 include/dt-bindings/clock/rk3066a-cru.h create mode 100644 include/dt-bindings/clock/rk3128-cru.h create mode 100644 include/dt-bindings/clock/rk3188-cru-common.h create mode 100644 include/dt-bindings/clock/rk3188-cru.h create mode 100644 include/dt-bindings/clock/rk3228-cru.h create mode 100644 include/dt-bindings/clock/rk3288-cru.h create mode 100644 include/dt-bindings/clock/rk3328-cru.h create mode 100644 include/dt-bindings/clock/rk3368-cru.h create mode 100644 include/dt-bindings/clock/rk3399-cru.h create mode 100644 include/dt-bindings/clock/rk3399-ddr.h create mode 100644 include/dt-bindings/clock/rockchip,rk808.h create mode 100644 include/dt-bindings/clock/rv1108-cru.h create mode 100644 include/dt-bindings/clock/s3c2410.h create mode 100644 include/dt-bindings/clock/s3c2412.h create mode 100644 include/dt-bindings/clock/s3c2443.h create mode 100644 include/dt-bindings/clock/s5pv210-audss.h create mode 100644 include/dt-bindings/clock/s5pv210.h create mode 100644 include/dt-bindings/clock/samsung,s2mps11.h create mode 100644 include/dt-bindings/clock/samsung,s3c64xx-clock.h create mode 100644 include/dt-bindings/clock/sh73a0-clock.h create mode 100644 include/dt-bindings/clock/sprd,sc9860-clk.h create mode 100644 include/dt-bindings/clock/ste-ab8500.h create mode 100644 include/dt-bindings/clock/stih407-clks.h create mode 100644 include/dt-bindings/clock/stih410-clks.h create mode 100644 include/dt-bindings/clock/stih416-clks.h create mode 100644 include/dt-bindings/clock/stih418-clks.h create mode 100644 include/dt-bindings/clock/stm32fx-clock.h create mode 100644 include/dt-bindings/clock/stm32h7-clks.h create mode 100644 include/dt-bindings/clock/stm32mp1-clks.h create mode 100644 include/dt-bindings/clock/stratix10-clock.h create mode 100644 include/dt-bindings/clock/sun4i-a10-ccu.h create mode 100644 include/dt-bindings/clock/sun4i-a10-pll2.h create mode 100644 include/dt-bindings/clock/sun50i-a64-ccu.h create mode 100644 include/dt-bindings/clock/sun50i-h6-ccu.h create mode 100644 include/dt-bindings/clock/sun50i-h6-r-ccu.h create mode 100644 include/dt-bindings/clock/sun5i-ccu.h create mode 100644 include/dt-bindings/clock/sun6i-a31-ccu.h create mode 100644 include/dt-bindings/clock/sun7i-a20-ccu.h create mode 100644 include/dt-bindings/clock/sun8i-a23-a33-ccu.h create mode 100644 include/dt-bindings/clock/sun8i-a83t-ccu.h create mode 100644 include/dt-bindings/clock/sun8i-de2.h create mode 100644 include/dt-bindings/clock/sun8i-h3-ccu.h create mode 100644 include/dt-bindings/clock/sun8i-r-ccu.h create mode 100644 include/dt-bindings/clock/sun8i-r40-ccu.h create mode 100644 include/dt-bindings/clock/sun8i-tcon-top.h create mode 100644 include/dt-bindings/clock/sun8i-v3s-ccu.h create mode 100644 include/dt-bindings/clock/sun9i-a80-ccu.h create mode 100644 include/dt-bindings/clock/sun9i-a80-de.h create mode 100644 include/dt-bindings/clock/sun9i-a80-usb.h create mode 100644 include/dt-bindings/clock/tegra114-car.h create mode 100644 include/dt-bindings/clock/tegra124-car-common.h create mode 100644 include/dt-bindings/clock/tegra124-car.h create mode 100644 include/dt-bindings/clock/tegra186-clock.h create mode 100644 include/dt-bindings/clock/tegra194-clock.h create mode 100644 include/dt-bindings/clock/tegra20-car.h create mode 100644 include/dt-bindings/clock/tegra210-car.h create mode 100644 include/dt-bindings/clock/tegra30-car.h create mode 100644 include/dt-bindings/clock/vf610-clock.h create mode 100644 include/dt-bindings/clock/zx296702-clock.h create mode 100644 include/dt-bindings/clock/zx296718-clock.h create mode 100644 include/dt-bindings/display/tda998x.h create mode 100644 include/dt-bindings/dma/at91.h create mode 100644 include/dt-bindings/dma/axi-dmac.h create mode 100644 include/dt-bindings/dma/jz4780-dma.h create mode 100644 include/dt-bindings/dma/nbpfaxi.h create mode 100644 include/dt-bindings/dma/sun4i-a10.h create mode 100644 include/dt-bindings/gce/mt8173-gce.h create mode 100644 include/dt-bindings/gpio/aspeed-gpio.h create mode 100644 include/dt-bindings/gpio/gpio.h create mode 100644 include/dt-bindings/gpio/meson-axg-gpio.h create mode 100644 include/dt-bindings/gpio/meson-gxbb-gpio.h create mode 100644 include/dt-bindings/gpio/meson-gxl-gpio.h create mode 100644 include/dt-bindings/gpio/meson8-gpio.h create mode 100644 include/dt-bindings/gpio/meson8b-gpio.h create mode 100644 include/dt-bindings/gpio/tegra-gpio.h create mode 100644 include/dt-bindings/gpio/tegra186-gpio.h create mode 100644 include/dt-bindings/gpio/tegra194-gpio.h create mode 100644 include/dt-bindings/gpio/uniphier-gpio.h create mode 100644 include/dt-bindings/i2c/i2c.h create mode 100644 include/dt-bindings/iio/adc/at91-sama5d2_adc.h create mode 100644 include/dt-bindings/iio/adc/fsl-imx25-gcq.h create mode 100644 include/dt-bindings/iio/adi,ad5592r.h create mode 100644 include/dt-bindings/iio/qcom,spmi-vadc.h create mode 100644 include/dt-bindings/input/gpio-keys.h create mode 100644 include/dt-bindings/input/input.h create mode 120000 include/dt-bindings/input/linux-event-codes.h create mode 100644 include/dt-bindings/input/ti-drv260x.h create mode 100644 include/dt-bindings/interrupt-controller/arm-gic.h create mode 100644 include/dt-bindings/interrupt-controller/irq-st.h create mode 100644 include/dt-bindings/interrupt-controller/irq.h create mode 100644 include/dt-bindings/interrupt-controller/mips-gic.h create mode 100644 include/dt-bindings/interrupt-controller/mvebu-icu.h create mode 100644 include/dt-bindings/leds/common.h create mode 100644 include/dt-bindings/leds/leds-netxbig.h create mode 100644 include/dt-bindings/leds/leds-ns2.h create mode 100644 include/dt-bindings/leds/leds-pca9532.h create mode 100644 include/dt-bindings/leds/leds-pca955x.h create mode 100644 include/dt-bindings/mailbox/tegra186-hsp.h create mode 100644 include/dt-bindings/media/c8sectpfe.h create mode 100644 include/dt-bindings/media/omap3-isp.h create mode 100644 include/dt-bindings/media/tda1997x.h create mode 100644 include/dt-bindings/media/tvp5150.h create mode 100644 include/dt-bindings/media/xilinx-vip.h create mode 100644 include/dt-bindings/memory/mt2701-larb-port.h create mode 100644 include/dt-bindings/memory/mt2712-larb-port.h create mode 100644 include/dt-bindings/memory/mt8173-larb-port.h create mode 100644 include/dt-bindings/memory/tegra114-mc.h create mode 100644 include/dt-bindings/memory/tegra124-mc.h create mode 100644 include/dt-bindings/memory/tegra186-mc.h create mode 100644 include/dt-bindings/memory/tegra20-mc.h create mode 100644 include/dt-bindings/memory/tegra210-mc.h create mode 100644 include/dt-bindings/memory/tegra30-mc.h create mode 100644 include/dt-bindings/mfd/arizona.h create mode 100644 include/dt-bindings/mfd/as3722.h create mode 100644 include/dt-bindings/mfd/atmel-flexcom.h create mode 100644 include/dt-bindings/mfd/dbx500-prcmu.h create mode 100644 include/dt-bindings/mfd/max77620.h create mode 100644 include/dt-bindings/mfd/palmas.h create mode 100644 include/dt-bindings/mfd/qcom-rpm.h create mode 100644 include/dt-bindings/mfd/st-lpc.h create mode 100644 include/dt-bindings/mfd/stm32f4-rcc.h create mode 100644 include/dt-bindings/mfd/stm32f7-rcc.h create mode 100644 include/dt-bindings/mfd/stm32h7-rcc.h create mode 100644 include/dt-bindings/mips/lantiq_rcu_gphy.h create mode 100644 include/dt-bindings/mux/mux.h create mode 100644 include/dt-bindings/net/microchip-lan78xx.h create mode 100644 include/dt-bindings/net/mscc-phy-vsc8531.h create mode 100644 include/dt-bindings/net/ti-dp83867.h create mode 100644 include/dt-bindings/phy/phy-pistachio-usb.h create mode 100644 include/dt-bindings/phy/phy-qcom-qusb2.h create mode 100644 include/dt-bindings/phy/phy.h create mode 100644 include/dt-bindings/pinctrl/am33xx.h create mode 100644 include/dt-bindings/pinctrl/am43xx.h create mode 100644 include/dt-bindings/pinctrl/at91.h create mode 100644 include/dt-bindings/pinctrl/bcm2835.h create mode 100644 include/dt-bindings/pinctrl/brcm,pinctrl-stingray.h create mode 100644 include/dt-bindings/pinctrl/dm814x.h create mode 100644 include/dt-bindings/pinctrl/dra.h create mode 100644 include/dt-bindings/pinctrl/hisi.h create mode 100644 include/dt-bindings/pinctrl/keystone.h create mode 100644 include/dt-bindings/pinctrl/mt6397-pinfunc.h create mode 100644 include/dt-bindings/pinctrl/mt65xx.h create mode 100644 include/dt-bindings/pinctrl/mt7623-pinfunc.h create mode 100644 include/dt-bindings/pinctrl/nomadik.h create mode 100644 include/dt-bindings/pinctrl/omap.h create mode 100644 include/dt-bindings/pinctrl/pinctrl-tegra-xusb.h create mode 100644 include/dt-bindings/pinctrl/pinctrl-tegra.h create mode 100644 include/dt-bindings/pinctrl/qcom,pmic-gpio.h create mode 100644 include/dt-bindings/pinctrl/qcom,pmic-mpp.h create mode 100644 include/dt-bindings/pinctrl/r7s72100-pinctrl.h create mode 100644 include/dt-bindings/pinctrl/rockchip.h create mode 100644 include/dt-bindings/pinctrl/samsung.h create mode 100644 include/dt-bindings/pinctrl/stm32-pinfunc.h create mode 100644 include/dt-bindings/pinctrl/sun4i-a10.h create mode 100644 include/dt-bindings/power/imx7-power.h create mode 100644 include/dt-bindings/power/mt2701-power.h create mode 100644 include/dt-bindings/power/mt2712-power.h create mode 100644 include/dt-bindings/power/mt6797-power.h create mode 100644 include/dt-bindings/power/mt7622-power.h create mode 100644 include/dt-bindings/power/mt7623a-power.h create mode 100644 include/dt-bindings/power/mt8173-power.h create mode 100644 include/dt-bindings/power/owl-s500-powergate.h create mode 100644 include/dt-bindings/power/owl-s700-powergate.h create mode 100644 include/dt-bindings/power/px30-power.h create mode 100644 include/dt-bindings/power/r8a7743-sysc.h create mode 100644 include/dt-bindings/power/r8a7745-sysc.h create mode 100644 include/dt-bindings/power/r8a77470-sysc.h create mode 100644 include/dt-bindings/power/r8a7779-sysc.h create mode 100644 include/dt-bindings/power/r8a7790-sysc.h create mode 100644 include/dt-bindings/power/r8a7791-sysc.h create mode 100644 include/dt-bindings/power/r8a7792-sysc.h create mode 100644 include/dt-bindings/power/r8a7793-sysc.h create mode 100644 include/dt-bindings/power/r8a7794-sysc.h create mode 100644 include/dt-bindings/power/r8a7795-sysc.h create mode 100644 include/dt-bindings/power/r8a7796-sysc.h create mode 100644 include/dt-bindings/power/r8a77965-sysc.h create mode 100644 include/dt-bindings/power/r8a77970-sysc.h create mode 100644 include/dt-bindings/power/r8a77980-sysc.h create mode 100644 include/dt-bindings/power/r8a77990-sysc.h create mode 100644 include/dt-bindings/power/r8a77995-sysc.h create mode 100644 include/dt-bindings/power/raspberrypi-power.h create mode 100644 include/dt-bindings/power/rk3036-power.h create mode 100644 include/dt-bindings/power/rk3128-power.h create mode 100644 include/dt-bindings/power/rk3228-power.h create mode 100644 include/dt-bindings/power/rk3288-power.h create mode 100644 include/dt-bindings/power/rk3328-power.h create mode 100644 include/dt-bindings/power/rk3366-power.h create mode 100644 include/dt-bindings/power/rk3368-power.h create mode 100644 include/dt-bindings/power/rk3399-power.h create mode 100644 include/dt-bindings/power/tegra186-powergate.h create mode 100644 include/dt-bindings/power/tegra194-powergate.h create mode 100644 include/dt-bindings/pwm/pwm.h create mode 100644 include/dt-bindings/regulator/maxim,max77802.h create mode 100644 include/dt-bindings/regulator/qcom,rpmh-regulator.h create mode 100644 include/dt-bindings/reset/altr,rst-mgr-a10.h create mode 100644 include/dt-bindings/reset/altr,rst-mgr-a10sr.h create mode 100644 include/dt-bindings/reset/altr,rst-mgr-s10.h create mode 100644 include/dt-bindings/reset/altr,rst-mgr.h create mode 100644 include/dt-bindings/reset/amlogic,meson-axg-audio-arb.h create mode 100644 include/dt-bindings/reset/amlogic,meson-axg-reset.h create mode 100644 include/dt-bindings/reset/amlogic,meson-gxbb-reset.h create mode 100644 include/dt-bindings/reset/amlogic,meson8b-clkc-reset.h create mode 100644 include/dt-bindings/reset/amlogic,meson8b-reset.h create mode 100644 include/dt-bindings/reset/axg-aoclkc.h create mode 100644 include/dt-bindings/reset/cortina,gemini-reset.h create mode 100644 include/dt-bindings/reset/gxbb-aoclkc.h create mode 100644 include/dt-bindings/reset/hisi,hi6220-resets.h create mode 100644 include/dt-bindings/reset/imx7-reset.h create mode 100644 include/dt-bindings/reset/mt2701-resets.h create mode 100644 include/dt-bindings/reset/mt7622-reset.h create mode 100644 include/dt-bindings/reset/mt8135-resets.h create mode 100644 include/dt-bindings/reset/mt8173-resets.h create mode 100644 include/dt-bindings/reset/oxsemi,ox810se.h create mode 100644 include/dt-bindings/reset/oxsemi,ox820.h create mode 100644 include/dt-bindings/reset/pistachio-resets.h create mode 100644 include/dt-bindings/reset/qcom,gcc-apq8084.h create mode 100644 include/dt-bindings/reset/qcom,gcc-ipq806x.h create mode 100644 include/dt-bindings/reset/qcom,gcc-mdm9615.h create mode 100644 include/dt-bindings/reset/qcom,gcc-msm8660.h create mode 100644 include/dt-bindings/reset/qcom,gcc-msm8916.h create mode 100644 include/dt-bindings/reset/qcom,gcc-msm8960.h create mode 100644 include/dt-bindings/reset/qcom,gcc-msm8974.h create mode 100644 include/dt-bindings/reset/qcom,mmcc-apq8084.h create mode 100644 include/dt-bindings/reset/qcom,mmcc-msm8960.h create mode 100644 include/dt-bindings/reset/qcom,mmcc-msm8974.h create mode 100644 include/dt-bindings/reset/qcom,sdm845-aoss.h create mode 100644 include/dt-bindings/reset/snps,hsdk-reset.h create mode 100644 include/dt-bindings/reset/stih407-resets.h create mode 100644 include/dt-bindings/reset/stih415-resets.h create mode 100644 include/dt-bindings/reset/stih416-resets.h create mode 100644 include/dt-bindings/reset/stm32mp1-resets.h create mode 100644 include/dt-bindings/reset/sun4i-a10-ccu.h create mode 100644 include/dt-bindings/reset/sun50i-a64-ccu.h create mode 100644 include/dt-bindings/reset/sun50i-h6-ccu.h create mode 100644 include/dt-bindings/reset/sun50i-h6-r-ccu.h create mode 100644 include/dt-bindings/reset/sun5i-ccu.h create mode 100644 include/dt-bindings/reset/sun6i-a31-ccu.h create mode 100644 include/dt-bindings/reset/sun8i-a23-a33-ccu.h create mode 100644 include/dt-bindings/reset/sun8i-a83t-ccu.h create mode 100644 include/dt-bindings/reset/sun8i-de2.h create mode 100644 include/dt-bindings/reset/sun8i-h3-ccu.h create mode 100644 include/dt-bindings/reset/sun8i-r-ccu.h create mode 100644 include/dt-bindings/reset/sun8i-r40-ccu.h create mode 100644 include/dt-bindings/reset/sun8i-v3s-ccu.h create mode 100644 include/dt-bindings/reset/sun9i-a80-ccu.h create mode 100644 include/dt-bindings/reset/sun9i-a80-de.h create mode 100644 include/dt-bindings/reset/sun9i-a80-usb.h create mode 100644 include/dt-bindings/reset/tegra124-car.h create mode 100644 include/dt-bindings/reset/tegra186-reset.h create mode 100644 include/dt-bindings/reset/tegra194-reset.h create mode 100644 include/dt-bindings/reset/tegra210-car.h create mode 100644 include/dt-bindings/reset/ti-syscon.h create mode 100644 include/dt-bindings/soc/qcom,apr.h create mode 100644 include/dt-bindings/soc/qcom,gsbi.h create mode 100644 include/dt-bindings/soc/qcom,rpmh-rsc.h create mode 100644 include/dt-bindings/soc/rockchip,boot-mode.h create mode 100644 include/dt-bindings/soc/zte,pm_domains.h create mode 100644 include/dt-bindings/sound/apq8016-lpass.h create mode 100644 include/dt-bindings/sound/audio-jack-events.h create mode 100644 include/dt-bindings/sound/cs35l32.h create mode 100644 include/dt-bindings/sound/cs42l42.h create mode 100644 include/dt-bindings/sound/fsl-imx-audmux.h create mode 100644 include/dt-bindings/sound/qcom,q6afe.h create mode 100644 include/dt-bindings/sound/qcom,q6asm.h create mode 100644 include/dt-bindings/sound/rt5640.h create mode 100644 include/dt-bindings/sound/rt5651.h create mode 100644 include/dt-bindings/sound/samsung-i2s.h create mode 100644 include/dt-bindings/sound/tas2552.h create mode 100644 include/dt-bindings/sound/tlv320aic31xx-micbias.h create mode 100644 include/dt-bindings/spmi/spmi.h create mode 100644 include/dt-bindings/thermal/lm90.h create mode 100644 include/dt-bindings/thermal/tegra124-soctherm.h create mode 100644 include/dt-bindings/thermal/tegra186-bpmp-thermal.h create mode 100644 include/dt-bindings/thermal/thermal.h create mode 100644 include/dt-bindings/thermal/thermal_exynos.h create mode 100644 include/dt-bindings/usb/pd.h create mode 100644 include/keys/asymmetric-parser.h create mode 100644 include/keys/asymmetric-subtype.h create mode 100644 include/keys/asymmetric-type.h create mode 100644 include/keys/big_key-type.h create mode 100644 include/keys/ceph-type.h create mode 100644 include/keys/dns_resolver-type.h create mode 100644 include/keys/encrypted-type.h create mode 100644 include/keys/keyring-type.h create mode 100644 include/keys/request_key_auth-type.h create mode 100644 include/keys/rxrpc-type.h create mode 100644 include/keys/system_keyring.h create mode 100644 include/keys/trusted-type.h create mode 100644 include/keys/user-type.h create mode 100644 include/kvm/arm_arch_timer.h create mode 100644 include/kvm/arm_pmu.h create mode 100644 include/kvm/arm_psci.h create mode 100644 include/kvm/arm_vgic.h create mode 100644 include/kvm/iodev.h create mode 100644 include/linux/8250_pci.h create mode 100644 include/linux/a.out.h create mode 100644 include/linux/acct.h create mode 100644 include/linux/acpi.h create mode 100644 include/linux/acpi_dma.h create mode 100644 include/linux/acpi_iort.h create mode 100644 include/linux/acpi_pmtmr.h create mode 100644 include/linux/adb.h create mode 100644 include/linux/adfs_fs.h create mode 100644 include/linux/aer.h create mode 100644 include/linux/agp_backend.h create mode 100644 include/linux/agpgart.h create mode 100644 include/linux/ahci-remap.h create mode 100644 include/linux/ahci_platform.h create mode 100644 include/linux/aio.h create mode 100644 include/linux/alarmtimer.h create mode 100644 include/linux/altera_jtaguart.h create mode 100644 include/linux/altera_uart.h create mode 100644 include/linux/amba/bus.h create mode 100644 include/linux/amba/clcd-regs.h create mode 100644 include/linux/amba/clcd.h create mode 100644 include/linux/amba/kmi.h create mode 100644 include/linux/amba/mmci.h create mode 100644 include/linux/amba/pl022.h create mode 100644 include/linux/amba/pl080.h create mode 100644 include/linux/amba/pl08x.h create mode 100644 include/linux/amba/pl093.h create mode 100644 include/linux/amba/serial.h create mode 100644 include/linux/amba/sp810.h create mode 100644 include/linux/amd-iommu.h create mode 100644 include/linux/amifd.h create mode 100644 include/linux/amifdreg.h create mode 100644 include/linux/anon_inodes.h create mode 100644 include/linux/apm-emulation.h create mode 100644 include/linux/apm_bios.h create mode 100644 include/linux/apple-gmux.h create mode 100644 include/linux/apple_bl.h create mode 100644 include/linux/arch_topology.h create mode 100644 include/linux/arm-cci.h create mode 100644 include/linux/arm-smccc.h create mode 100644 include/linux/arm_sdei.h create mode 100644 include/linux/ascii85.h create mode 100644 include/linux/asn1.h create mode 100644 include/linux/asn1_ber_bytecode.h create mode 100644 include/linux/asn1_decoder.h create mode 100644 include/linux/assoc_array.h create mode 100644 include/linux/assoc_array_priv.h create mode 100644 include/linux/async.h create mode 100644 include/linux/async_tx.h create mode 100644 include/linux/ata.h create mode 100644 include/linux/ata_platform.h create mode 100644 include/linux/atalk.h create mode 100644 include/linux/ath9k_platform.h create mode 100644 include/linux/atm.h create mode 100644 include/linux/atm_suni.h create mode 100644 include/linux/atm_tcp.h create mode 100644 include/linux/atmdev.h create mode 100644 include/linux/atmel-mci.h create mode 100644 include/linux/atmel-ssc.h create mode 100644 include/linux/atmel_pdc.h create mode 100644 include/linux/atmel_tc.h create mode 100644 include/linux/atomic.h create mode 100644 include/linux/attribute_container.h create mode 100644 include/linux/audit.h create mode 100644 include/linux/auto_dev-ioctl.h create mode 100644 include/linux/auto_fs.h create mode 100644 include/linux/auxvec.h create mode 100644 include/linux/average.h create mode 100644 include/linux/avf/virtchnl.h create mode 100644 include/linux/b1pcmcia.h create mode 100644 include/linux/backing-dev-defs.h create mode 100644 include/linux/backing-dev.h create mode 100644 include/linux/backlight.h create mode 100644 include/linux/badblocks.h create mode 100644 include/linux/balloon_compaction.h create mode 100644 include/linux/bcd.h create mode 100644 include/linux/bch.h create mode 100644 include/linux/bcm47xx_nvram.h create mode 100644 include/linux/bcm47xx_sprom.h create mode 100644 include/linux/bcm47xx_wdt.h create mode 100644 include/linux/bcm963xx_nvram.h create mode 100644 include/linux/bcm963xx_tag.h create mode 100644 include/linux/bcma/bcma.h create mode 100644 include/linux/bcma/bcma_driver_arm_c9.h create mode 100644 include/linux/bcma/bcma_driver_chipcommon.h create mode 100644 include/linux/bcma/bcma_driver_gmac_cmn.h create mode 100644 include/linux/bcma/bcma_driver_mips.h create mode 100644 include/linux/bcma/bcma_driver_pci.h create mode 100644 include/linux/bcma/bcma_driver_pcie2.h create mode 100644 include/linux/bcma/bcma_regs.h create mode 100644 include/linux/bcma/bcma_soc.h create mode 100644 include/linux/binfmts.h create mode 100644 include/linux/bio.h create mode 100644 include/linux/bit_spinlock.h create mode 100644 include/linux/bitfield.h create mode 100644 include/linux/bitmap.h create mode 100644 include/linux/bitops.h create mode 100644 include/linux/bitrev.h create mode 100644 include/linux/bits.h create mode 100644 include/linux/blk-cgroup.h create mode 100644 include/linux/blk-mq-pci.h create mode 100644 include/linux/blk-mq-rdma.h create mode 100644 include/linux/blk-mq-virtio.h create mode 100644 include/linux/blk-mq.h create mode 100644 include/linux/blk_types.h create mode 100644 include/linux/blkdev.h create mode 100644 include/linux/blkpg.h create mode 100644 include/linux/blktrace_api.h create mode 100644 include/linux/blockgroup_lock.h create mode 100644 include/linux/bma150.h create mode 100644 include/linux/bootmem.h create mode 100644 include/linux/bottom_half.h create mode 100644 include/linux/bpf-cgroup.h create mode 100644 include/linux/bpf.h create mode 100644 include/linux/bpf_lirc.h create mode 100644 include/linux/bpf_trace.h create mode 100644 include/linux/bpf_types.h create mode 100644 include/linux/bpf_verifier.h create mode 100644 include/linux/bpfilter.h create mode 100644 include/linux/brcmphy.h create mode 100644 include/linux/bsearch.h create mode 100644 include/linux/bsg-lib.h create mode 100644 include/linux/bsg.h create mode 100644 include/linux/btf.h create mode 100644 include/linux/btree-128.h create mode 100644 include/linux/btree-type.h create mode 100644 include/linux/btree.h create mode 100644 include/linux/btrfs.h create mode 100644 include/linux/buffer_head.h create mode 100644 include/linux/bug.h create mode 100644 include/linux/build-salt.h create mode 100644 include/linux/build_bug.h create mode 100644 include/linux/bvec.h create mode 100644 include/linux/byteorder/big_endian.h create mode 100644 include/linux/byteorder/generic.h create mode 100644 include/linux/byteorder/little_endian.h create mode 100644 include/linux/c2port.h create mode 100644 include/linux/cache.h create mode 100644 include/linux/cacheinfo.h create mode 100644 include/linux/can/core.h create mode 100644 include/linux/can/dev.h create mode 100644 include/linux/can/dev/peak_canfd.h create mode 100644 include/linux/can/led.h create mode 100644 include/linux/can/platform/cc770.h create mode 100644 include/linux/can/platform/mcp251x.h create mode 100644 include/linux/can/platform/rcar_can.h create mode 100644 include/linux/can/platform/sja1000.h create mode 100644 include/linux/can/rx-offload.h create mode 100644 include/linux/can/skb.h create mode 100644 include/linux/capability.h create mode 100644 include/linux/cb710.h create mode 100644 include/linux/cciss_ioctl.h create mode 100644 include/linux/ccp.h create mode 100644 include/linux/cdev.h create mode 100644 include/linux/cdrom.h create mode 100644 include/linux/ceph/auth.h create mode 100644 include/linux/ceph/buffer.h create mode 100644 include/linux/ceph/ceph_debug.h create mode 100644 include/linux/ceph/ceph_features.h create mode 100644 include/linux/ceph/ceph_frag.h create mode 100644 include/linux/ceph/ceph_fs.h create mode 100644 include/linux/ceph/ceph_hash.h create mode 100644 include/linux/ceph/cls_lock_client.h create mode 100644 include/linux/ceph/debugfs.h create mode 100644 include/linux/ceph/decode.h create mode 100644 include/linux/ceph/libceph.h create mode 100644 include/linux/ceph/mdsmap.h create mode 100644 include/linux/ceph/messenger.h create mode 100644 include/linux/ceph/mon_client.h create mode 100644 include/linux/ceph/msgpool.h create mode 100644 include/linux/ceph/msgr.h create mode 100644 include/linux/ceph/osd_client.h create mode 100644 include/linux/ceph/osdmap.h create mode 100644 include/linux/ceph/pagelist.h create mode 100644 include/linux/ceph/rados.h create mode 100644 include/linux/ceph/string_table.h create mode 100644 include/linux/ceph/striper.h create mode 100644 include/linux/ceph/types.h create mode 100644 include/linux/cfag12864b.h create mode 100644 include/linux/cgroup-defs.h create mode 100644 include/linux/cgroup.h create mode 100644 include/linux/cgroup_rdma.h create mode 100644 include/linux/cgroup_subsys.h create mode 100644 include/linux/circ_buf.h create mode 100644 include/linux/cleancache.h create mode 100644 include/linux/clk-provider.h create mode 100644 include/linux/clk.h create mode 100644 include/linux/clk/at91_pmc.h create mode 100644 include/linux/clk/clk-conf.h create mode 100644 include/linux/clk/davinci.h create mode 100644 include/linux/clk/mmp.h create mode 100644 include/linux/clk/mxs.h create mode 100644 include/linux/clk/renesas.h create mode 100644 include/linux/clk/sunxi-ng.h create mode 100644 include/linux/clk/tegra.h create mode 100644 include/linux/clk/ti.h create mode 100644 include/linux/clk/zynq.h create mode 100644 include/linux/clkdev.h create mode 100644 include/linux/clock_cooling.h create mode 100644 include/linux/clockchips.h create mode 100644 include/linux/clocksource.h create mode 100644 include/linux/cm4000_cs.h create mode 100644 include/linux/cma.h create mode 100644 include/linux/cmdline-parser.h create mode 100644 include/linux/cn_proc.h create mode 100644 include/linux/cnt32_to_63.h create mode 100644 include/linux/coda.h create mode 100644 include/linux/coda_psdev.h create mode 100644 include/linux/compaction.h create mode 100644 include/linux/compat.h create mode 100644 include/linux/compat_time.h create mode 100644 include/linux/compiler-clang.h create mode 100644 include/linux/compiler-gcc.h create mode 100644 include/linux/compiler-intel.h create mode 100644 include/linux/compiler.h create mode 100644 include/linux/compiler_types.h create mode 100644 include/linux/completion.h create mode 100644 include/linux/component.h create mode 100644 include/linux/concap.h create mode 100644 include/linux/configfs.h create mode 100644 include/linux/connector.h create mode 100644 include/linux/console.h create mode 100644 include/linux/console_struct.h create mode 100644 include/linux/consolemap.h create mode 100644 include/linux/const.h create mode 100644 include/linux/container.h create mode 100644 include/linux/context_tracking.h create mode 100644 include/linux/context_tracking_state.h create mode 100644 include/linux/cordic.h create mode 100644 include/linux/coredump.h create mode 100644 include/linux/coresight-pmu.h create mode 100644 include/linux/coresight-stm.h create mode 100644 include/linux/coresight.h create mode 100644 include/linux/count_zeros.h create mode 100644 include/linux/cper.h create mode 100644 include/linux/cpu.h create mode 100644 include/linux/cpu_cooling.h create mode 100644 include/linux/cpu_pm.h create mode 100644 include/linux/cpu_rmap.h create mode 100644 include/linux/cpufeature.h create mode 100644 include/linux/cpufreq.h create mode 100644 include/linux/cpuhotplug.h create mode 100644 include/linux/cpuidle.h create mode 100644 include/linux/cpumask.h create mode 100644 include/linux/cpuset.h create mode 100644 include/linux/crash_core.h create mode 100644 include/linux/crash_dump.h create mode 100644 include/linux/crc-ccitt.h create mode 100644 include/linux/crc-itu-t.h create mode 100644 include/linux/crc-t10dif.h create mode 100644 include/linux/crc16.h create mode 100644 include/linux/crc32.h create mode 100644 include/linux/crc32c.h create mode 100644 include/linux/crc32poly.h create mode 100644 include/linux/crc4.h create mode 100644 include/linux/crc64.h create mode 100644 include/linux/crc7.h create mode 100644 include/linux/crc8.h create mode 100644 include/linux/cred.h create mode 100644 include/linux/crush/crush.h create mode 100644 include/linux/crush/hash.h create mode 100644 include/linux/crush/mapper.h create mode 100644 include/linux/crypto.h create mode 100644 include/linux/cryptohash.h create mode 100644 include/linux/cs5535.h create mode 100644 include/linux/ctype.h create mode 100644 include/linux/cuda.h create mode 100644 include/linux/cyclades.h create mode 100644 include/linux/davinci_emac.h create mode 100644 include/linux/dax.h create mode 100644 include/linux/dca.h create mode 100644 include/linux/dcache.h create mode 100644 include/linux/dccp.h create mode 100644 include/linux/dcookies.h create mode 100644 include/linux/debug_locks.h create mode 100644 include/linux/debugfs.h create mode 100644 include/linux/debugobjects.h create mode 100644 include/linux/decompress/bunzip2.h create mode 100644 include/linux/decompress/generic.h create mode 100644 include/linux/decompress/inflate.h create mode 100644 include/linux/decompress/mm.h create mode 100644 include/linux/decompress/unlz4.h create mode 100644 include/linux/decompress/unlzma.h create mode 100644 include/linux/decompress/unlzo.h create mode 100644 include/linux/decompress/unxz.h create mode 100644 include/linux/delay.h create mode 100644 include/linux/delayacct.h create mode 100644 include/linux/delayed_call.h create mode 100644 include/linux/dell-led.h create mode 100644 include/linux/devcoredump.h create mode 100644 include/linux/devfreq-event.h create mode 100644 include/linux/devfreq.h create mode 100644 include/linux/devfreq_cooling.h create mode 100644 include/linux/device-mapper.h create mode 100644 include/linux/device.h create mode 100644 include/linux/device_cgroup.h create mode 100644 include/linux/devpts_fs.h create mode 100644 include/linux/digsig.h create mode 100644 include/linux/dio.h create mode 100644 include/linux/dirent.h create mode 100644 include/linux/dlm.h create mode 100644 include/linux/dlm_plock.h create mode 100644 include/linux/dm-bufio.h create mode 100644 include/linux/dm-dirty-log.h create mode 100644 include/linux/dm-io.h create mode 100644 include/linux/dm-kcopyd.h create mode 100644 include/linux/dm-region-hash.h create mode 100644 include/linux/dm9000.h create mode 100644 include/linux/dma-buf.h create mode 100644 include/linux/dma-contiguous.h create mode 100644 include/linux/dma-debug.h create mode 100644 include/linux/dma-direct.h create mode 100644 include/linux/dma-direction.h create mode 100644 include/linux/dma-fence-array.h create mode 100644 include/linux/dma-fence.h create mode 100644 include/linux/dma-iommu.h create mode 100644 include/linux/dma-mapping.h create mode 100644 include/linux/dma-noncoherent.h create mode 100644 include/linux/dma/dw.h create mode 100644 include/linux/dma/hsu.h create mode 100644 include/linux/dma/ipu-dma.h create mode 100644 include/linux/dma/mmp-pdma.h create mode 100644 include/linux/dma/pxa-dma.h create mode 100644 include/linux/dma/qcom_bam_dma.h create mode 100644 include/linux/dma/sprd-dma.h create mode 100644 include/linux/dma/xilinx_dma.h create mode 100644 include/linux/dma_remapping.h create mode 100644 include/linux/dmaengine.h create mode 100644 include/linux/dmapool.h create mode 100644 include/linux/dmar.h create mode 100644 include/linux/dmi.h create mode 100644 include/linux/dnotify.h create mode 100644 include/linux/dns_resolver.h create mode 100644 include/linux/dqblk_qtree.h create mode 100644 include/linux/dqblk_v1.h create mode 100644 include/linux/dqblk_v2.h create mode 100644 include/linux/drbd.h create mode 100644 include/linux/drbd_genl.h create mode 100644 include/linux/drbd_genl_api.h create mode 100644 include/linux/drbd_limits.h create mode 100644 include/linux/ds2782_battery.h create mode 100644 include/linux/dsa/lan9303.h create mode 100644 include/linux/dtlk.h create mode 100644 include/linux/dw_apb_timer.h create mode 100644 include/linux/dynamic_debug.h create mode 100644 include/linux/dynamic_queue_limits.h create mode 100644 include/linux/earlycpio.h create mode 100644 include/linux/ecryptfs.h create mode 100644 include/linux/edac.h create mode 100644 include/linux/edd.h create mode 100644 include/linux/edma.h create mode 100644 include/linux/eeprom_93cx6.h create mode 100644 include/linux/eeprom_93xx46.h create mode 100644 include/linux/efi-bgrt.h create mode 100644 include/linux/efi.h create mode 100644 include/linux/efs_vh.h create mode 100644 include/linux/eisa.h create mode 100644 include/linux/elevator.h create mode 100644 include/linux/elf-fdpic.h create mode 100644 include/linux/elf-randomize.h create mode 100644 include/linux/elf.h create mode 100644 include/linux/elfcore-compat.h create mode 100644 include/linux/elfcore.h create mode 100644 include/linux/elfnote.h create mode 100644 include/linux/enclosure.h create mode 100644 include/linux/err.h create mode 100644 include/linux/errno.h create mode 100644 include/linux/error-injection.h create mode 100644 include/linux/errqueue.h create mode 100644 include/linux/errseq.h create mode 100644 include/linux/etherdevice.h create mode 100644 include/linux/ethtool.h create mode 100644 include/linux/eventfd.h create mode 100644 include/linux/eventpoll.h create mode 100644 include/linux/evm.h create mode 100644 include/linux/export.h create mode 100644 include/linux/exportfs.h create mode 100644 include/linux/ext2_fs.h create mode 100644 include/linux/extable.h create mode 100644 include/linux/extcon-provider.h create mode 100644 include/linux/extcon.h create mode 100644 include/linux/extcon/extcon-adc-jack.h create mode 100644 include/linux/f2fs_fs.h create mode 100644 include/linux/f75375s.h create mode 100644 include/linux/falloc.h create mode 100644 include/linux/fanotify.h create mode 100644 include/linux/fault-inject.h create mode 100644 include/linux/fb.h create mode 100644 include/linux/fbcon.h create mode 100644 include/linux/fcdevice.h create mode 100644 include/linux/fcntl.h create mode 100644 include/linux/fd.h create mode 100644 include/linux/fddidevice.h create mode 100644 include/linux/fdtable.h create mode 100644 include/linux/fec.h create mode 100644 include/linux/file.h create mode 100644 include/linux/filter.h create mode 100644 include/linux/fips.h create mode 100644 include/linux/firewire.h create mode 100644 include/linux/firmware-map.h create mode 100644 include/linux/firmware.h create mode 100644 include/linux/firmware/meson/meson_sm.h create mode 100644 include/linux/fixp-arith.h create mode 100644 include/linux/flat.h create mode 100644 include/linux/flex_array.h create mode 100644 include/linux/flex_proportions.h create mode 100644 include/linux/fmc-sdb.h create mode 100644 include/linux/fmc.h create mode 100644 include/linux/font.h create mode 100644 include/linux/fpga/altera-pr-ip-core.h create mode 100644 include/linux/fpga/fpga-bridge.h create mode 100644 include/linux/fpga/fpga-mgr.h create mode 100644 include/linux/fpga/fpga-region.h create mode 100644 include/linux/frame.h create mode 100644 include/linux/freezer.h create mode 100644 include/linux/frontswap.h create mode 100644 include/linux/fs.h create mode 100644 include/linux/fs_enet_pd.h create mode 100644 include/linux/fs_pin.h create mode 100644 include/linux/fs_stack.h create mode 100644 include/linux/fs_struct.h create mode 100644 include/linux/fs_uart_pd.h create mode 100644 include/linux/fscache-cache.h create mode 100644 include/linux/fscache.h create mode 100644 include/linux/fscrypt.h create mode 100644 include/linux/fscrypt_notsupp.h create mode 100644 include/linux/fscrypt_supp.h create mode 100644 include/linux/fsi-sbefifo.h create mode 100644 include/linux/fsi.h create mode 100644 include/linux/fsl-diu-fb.h create mode 100644 include/linux/fsl/bestcomm/ata.h create mode 100644 include/linux/fsl/bestcomm/bestcomm.h create mode 100644 include/linux/fsl/bestcomm/bestcomm_priv.h create mode 100644 include/linux/fsl/bestcomm/fec.h create mode 100644 include/linux/fsl/bestcomm/gen_bd.h create mode 100644 include/linux/fsl/bestcomm/sram.h create mode 100644 include/linux/fsl/edac.h create mode 100644 include/linux/fsl/guts.h create mode 100644 include/linux/fsl/mc.h create mode 100644 include/linux/fsl/ptp_qoriq.h create mode 100644 include/linux/fsl_devices.h create mode 100644 include/linux/fsl_hypervisor.h create mode 100644 include/linux/fsl_ifc.h create mode 100644 include/linux/fsldma.h create mode 100644 include/linux/fsnotify.h create mode 100644 include/linux/fsnotify_backend.h create mode 100644 include/linux/ftrace.h create mode 100644 include/linux/ftrace_irq.h create mode 100644 include/linux/futex.h create mode 100644 include/linux/fwnode.h create mode 100644 include/linux/gameport.h create mode 100644 include/linux/gcd.h create mode 100644 include/linux/genalloc.h create mode 100644 include/linux/genetlink.h create mode 100644 include/linux/genhd.h create mode 100644 include/linux/genl_magic_func.h create mode 100644 include/linux/genl_magic_struct.h create mode 100644 include/linux/getcpu.h create mode 100644 include/linux/gfp.h create mode 100644 include/linux/glob.h create mode 100644 include/linux/gnss.h create mode 100644 include/linux/goldfish.h create mode 100644 include/linux/gpio-pxa.h create mode 100644 include/linux/gpio.h create mode 100644 include/linux/gpio/aspeed.h create mode 100644 include/linux/gpio/consumer.h create mode 100644 include/linux/gpio/driver.h create mode 100644 include/linux/gpio/gpio-reg.h create mode 100644 include/linux/gpio/machine.h create mode 100644 include/linux/gpio_keys.h create mode 100644 include/linux/hardirq.h create mode 100644 include/linux/hash.h create mode 100644 include/linux/hashtable.h create mode 100644 include/linux/hdlc.h create mode 100644 include/linux/hdlcdrv.h create mode 100644 include/linux/hdmi.h create mode 100644 include/linux/hid-debug.h create mode 100644 include/linux/hid-roccat.h create mode 100644 include/linux/hid-sensor-hub.h create mode 100644 include/linux/hid-sensor-ids.h create mode 100644 include/linux/hid.h create mode 100644 include/linux/hiddev.h create mode 100644 include/linux/hidraw.h create mode 100644 include/linux/highmem.h create mode 100644 include/linux/highuid.h create mode 100644 include/linux/hil.h create mode 100644 include/linux/hil_mlc.h create mode 100644 include/linux/hippidevice.h create mode 100644 include/linux/hmm.h create mode 100644 include/linux/host1x.h create mode 100644 include/linux/hp_sdc.h create mode 100644 include/linux/hpet.h create mode 100644 include/linux/hrtimer.h create mode 100644 include/linux/hsi/hsi.h create mode 100644 include/linux/hsi/ssi_protocol.h create mode 100644 include/linux/htcpld.h create mode 100644 include/linux/huge_mm.h create mode 100644 include/linux/hugetlb.h create mode 100644 include/linux/hugetlb_cgroup.h create mode 100644 include/linux/hugetlb_inline.h create mode 100644 include/linux/hw_breakpoint.h create mode 100644 include/linux/hw_random.h create mode 100644 include/linux/hwmon-sysfs.h create mode 100644 include/linux/hwmon-vid.h create mode 100644 include/linux/hwmon.h create mode 100644 include/linux/hwspinlock.h create mode 100644 include/linux/hyperv.h create mode 100644 include/linux/hypervisor.h create mode 100644 include/linux/i2c-algo-bit.h create mode 100644 include/linux/i2c-algo-pca.h create mode 100644 include/linux/i2c-algo-pcf.h create mode 100644 include/linux/i2c-dev.h create mode 100644 include/linux/i2c-mux.h create mode 100644 include/linux/i2c-pxa.h create mode 100644 include/linux/i2c-smbus.h create mode 100644 include/linux/i2c.h create mode 100644 include/linux/i8042.h create mode 100644 include/linux/i8253.h create mode 100644 include/linux/icmp.h create mode 100644 include/linux/icmpv6.h create mode 100644 include/linux/ide.h create mode 100644 include/linux/idle_inject.h create mode 100644 include/linux/idr.h create mode 100644 include/linux/ieee80211.h create mode 100644 include/linux/ieee802154.h create mode 100644 include/linux/if_arp.h create mode 100644 include/linux/if_bridge.h create mode 100644 include/linux/if_eql.h create mode 100644 include/linux/if_ether.h create mode 100644 include/linux/if_fddi.h create mode 100644 include/linux/if_frad.h create mode 100644 include/linux/if_link.h create mode 100644 include/linux/if_ltalk.h create mode 100644 include/linux/if_macvlan.h create mode 100644 include/linux/if_phonet.h create mode 100644 include/linux/if_pppol2tp.h create mode 100644 include/linux/if_pppox.h create mode 100644 include/linux/if_tap.h create mode 100644 include/linux/if_team.h create mode 100644 include/linux/if_tun.h create mode 100644 include/linux/if_tunnel.h create mode 100644 include/linux/if_vlan.h create mode 100644 include/linux/igmp.h create mode 100644 include/linux/ihex.h create mode 100644 include/linux/iio/accel/kxcjk_1013.h create mode 100644 include/linux/iio/adc/ad_sigma_delta.h create mode 100644 include/linux/iio/adc/stm32-dfsdm-adc.h create mode 100644 include/linux/iio/buffer-dma.h create mode 100644 include/linux/iio/buffer-dmaengine.h create mode 100644 include/linux/iio/buffer.h create mode 100644 include/linux/iio/buffer_impl.h create mode 100644 include/linux/iio/common/cros_ec_sensors_core.h create mode 100644 include/linux/iio/common/ssp_sensors.h create mode 100644 include/linux/iio/common/st_sensors.h create mode 100644 include/linux/iio/common/st_sensors_i2c.h create mode 100644 include/linux/iio/common/st_sensors_spi.h create mode 100644 include/linux/iio/configfs.h create mode 100644 include/linux/iio/consumer.h create mode 100644 include/linux/iio/dac/ad5421.h create mode 100644 include/linux/iio/dac/ad5504.h create mode 100644 include/linux/iio/dac/ad5791.h create mode 100644 include/linux/iio/dac/max517.h create mode 100644 include/linux/iio/dac/mcp4725.h create mode 100644 include/linux/iio/driver.h create mode 100644 include/linux/iio/events.h create mode 100644 include/linux/iio/frequency/ad9523.h create mode 100644 include/linux/iio/frequency/adf4350.h create mode 100644 include/linux/iio/gyro/itg3200.h create mode 100644 include/linux/iio/hw-consumer.h create mode 100644 include/linux/iio/iio.h create mode 100644 include/linux/iio/imu/adis.h create mode 100644 include/linux/iio/kfifo_buf.h create mode 100644 include/linux/iio/machine.h create mode 100644 include/linux/iio/magnetometer/ak8975.h create mode 100644 include/linux/iio/sw_device.h create mode 100644 include/linux/iio/sw_trigger.h create mode 100644 include/linux/iio/sysfs.h create mode 100644 include/linux/iio/timer/stm32-lptim-trigger.h create mode 100644 include/linux/iio/timer/stm32-timer-trigger.h create mode 100644 include/linux/iio/trigger.h create mode 100644 include/linux/iio/trigger_consumer.h create mode 100644 include/linux/iio/triggered_buffer.h create mode 100644 include/linux/iio/triggered_event.h create mode 100644 include/linux/iio/types.h create mode 100644 include/linux/ima.h create mode 100644 include/linux/imx-media.h create mode 100644 include/linux/in.h create mode 100644 include/linux/in6.h create mode 100644 include/linux/inet.h create mode 100644 include/linux/inet_diag.h create mode 100644 include/linux/inetdevice.h create mode 100644 include/linux/init.h create mode 100644 include/linux/init_ohci1394_dma.h create mode 100644 include/linux/init_task.h create mode 100644 include/linux/initrd.h create mode 100644 include/linux/inotify.h create mode 100644 include/linux/input-polldev.h create mode 100644 include/linux/input.h create mode 100644 include/linux/input/ad714x.h create mode 100644 include/linux/input/adp5589.h create mode 100644 include/linux/input/adxl34x.h create mode 100644 include/linux/input/as5011.h create mode 100644 include/linux/input/auo-pixcir-ts.h create mode 100644 include/linux/input/bu21013.h create mode 100644 include/linux/input/cma3000.h create mode 100644 include/linux/input/cy8ctmg110_pdata.h create mode 100644 include/linux/input/cyttsp.h create mode 100644 include/linux/input/gp2ap002a00f.h create mode 100644 include/linux/input/ili210x.h create mode 100644 include/linux/input/kxtj9.h create mode 100644 include/linux/input/lm8333.h create mode 100644 include/linux/input/matrix_keypad.h create mode 100644 include/linux/input/mt.h create mode 100644 include/linux/input/navpoint.h create mode 100644 include/linux/input/samsung-keypad.h create mode 100644 include/linux/input/sh_keysc.h create mode 100644 include/linux/input/sparse-keymap.h create mode 100644 include/linux/input/touchscreen.h create mode 100644 include/linux/input/tps6507x-ts.h create mode 100644 include/linux/integrity.h create mode 100644 include/linux/intel-iommu.h create mode 100644 include/linux/intel-pti.h create mode 100644 include/linux/intel-svm.h create mode 100644 include/linux/interrupt.h create mode 100644 include/linux/interval_tree.h create mode 100644 include/linux/interval_tree_generic.h create mode 100644 include/linux/io-64-nonatomic-hi-lo.h create mode 100644 include/linux/io-64-nonatomic-lo-hi.h create mode 100644 include/linux/io-mapping.h create mode 100644 include/linux/io.h create mode 100644 include/linux/ioc3.h create mode 100644 include/linux/ioc4.h create mode 100644 include/linux/iocontext.h create mode 100644 include/linux/iomap.h create mode 100644 include/linux/iommu-helper.h create mode 100644 include/linux/iommu.h create mode 100644 include/linux/iopoll.h create mode 100644 include/linux/ioport.h create mode 100644 include/linux/ioprio.h create mode 100644 include/linux/iova.h create mode 100644 include/linux/ip.h create mode 100644 include/linux/ipack.h create mode 100644 include/linux/ipc.h create mode 100644 include/linux/ipc_namespace.h create mode 100644 include/linux/ipmi-fru.h create mode 100644 include/linux/ipmi.h create mode 100644 include/linux/ipmi_smi.h create mode 100644 include/linux/ipv6.h create mode 100644 include/linux/ipv6_route.h create mode 100644 include/linux/irq.h create mode 100644 include/linux/irq_cpustat.h create mode 100644 include/linux/irq_poll.h create mode 100644 include/linux/irq_sim.h create mode 100644 include/linux/irq_work.h create mode 100644 include/linux/irqbypass.h create mode 100644 include/linux/irqchip.h create mode 100644 include/linux/irqchip/arm-gic-common.h create mode 100644 include/linux/irqchip/arm-gic-v3.h create mode 100644 include/linux/irqchip/arm-gic-v4.h create mode 100644 include/linux/irqchip/arm-gic.h create mode 100644 include/linux/irqchip/arm-vic.h create mode 100644 include/linux/irqchip/chained_irq.h create mode 100644 include/linux/irqchip/ingenic.h create mode 100644 include/linux/irqchip/irq-bcm2836.h create mode 100644 include/linux/irqchip/irq-omap-intc.h create mode 100644 include/linux/irqchip/irq-partition-percpu.h create mode 100644 include/linux/irqchip/irq-sa11x0.h create mode 100644 include/linux/irqchip/mmp.h create mode 100644 include/linux/irqchip/mxs.h create mode 100644 include/linux/irqchip/versatile-fpga.h create mode 100644 include/linux/irqchip/xtensa-mx.h create mode 100644 include/linux/irqchip/xtensa-pic.h create mode 100644 include/linux/irqdesc.h create mode 100644 include/linux/irqdomain.h create mode 100644 include/linux/irqflags.h create mode 100644 include/linux/irqhandler.h create mode 100644 include/linux/irqnr.h create mode 100644 include/linux/irqreturn.h create mode 100644 include/linux/isa.h create mode 100644 include/linux/isapnp.h create mode 100644 include/linux/iscsi_boot_sysfs.h create mode 100644 include/linux/iscsi_ibft.h create mode 100644 include/linux/isdn.h create mode 100644 include/linux/isdn/capilli.h create mode 100644 include/linux/isdn/capiutil.h create mode 100644 include/linux/isdn/hdlc.h create mode 100644 include/linux/isdn_divertif.h create mode 100644 include/linux/isdn_ppp.h create mode 100644 include/linux/isdnif.h create mode 100644 include/linux/isicom.h create mode 100644 include/linux/iversion.h create mode 100644 include/linux/jbd2.h create mode 100644 include/linux/jhash.h create mode 100644 include/linux/jiffies.h create mode 100644 include/linux/journal-head.h create mode 100644 include/linux/joystick.h create mode 100644 include/linux/jump_label.h create mode 100644 include/linux/jump_label_ratelimit.h create mode 100644 include/linux/jz4740-adc.h create mode 100644 include/linux/jz4780-nemc.h create mode 100644 include/linux/kallsyms.h create mode 100644 include/linux/kasan-checks.h create mode 100644 include/linux/kasan.h create mode 100644 include/linux/kbd_diacr.h create mode 100644 include/linux/kbd_kern.h create mode 100644 include/linux/kbuild.h create mode 100644 include/linux/kconfig.h create mode 100644 include/linux/kcore.h create mode 100644 include/linux/kcov.h create mode 100644 include/linux/kd.h create mode 100644 include/linux/kdb.h create mode 100644 include/linux/kdebug.h create mode 100644 include/linux/kdev_t.h create mode 100644 include/linux/kern_levels.h create mode 100644 include/linux/kernel-page-flags.h create mode 100644 include/linux/kernel.h create mode 100644 include/linux/kernel_stat.h create mode 100644 include/linux/kernelcapi.h create mode 100644 include/linux/kernfs.h create mode 100644 include/linux/kexec.h create mode 100644 include/linux/key-type.h create mode 100644 include/linux/key.h create mode 100644 include/linux/keyboard.h create mode 100644 include/linux/kfifo.h create mode 100644 include/linux/kgdb.h create mode 100644 include/linux/khugepaged.h create mode 100644 include/linux/klist.h create mode 100644 include/linux/kmemleak.h create mode 100644 include/linux/kmod.h create mode 100644 include/linux/kmsg_dump.h create mode 100644 include/linux/kobj_map.h create mode 100644 include/linux/kobject.h create mode 100644 include/linux/kobject_ns.h create mode 100644 include/linux/kprobes.h create mode 100644 include/linux/kref.h create mode 100644 include/linux/ks0108.h create mode 100644 include/linux/ks8842.h create mode 100644 include/linux/ks8851_mll.h create mode 100644 include/linux/ksm.h create mode 100644 include/linux/kthread.h create mode 100644 include/linux/ktime.h create mode 100644 include/linux/kvm_host.h create mode 100644 include/linux/kvm_irqfd.h create mode 100644 include/linux/kvm_para.h create mode 100644 include/linux/kvm_types.h create mode 100644 include/linux/l2tp.h create mode 100644 include/linux/lapb.h create mode 100644 include/linux/latencytop.h create mode 100644 include/linux/lcd.h create mode 100644 include/linux/lcm.h create mode 100644 include/linux/led-class-flash.h create mode 100644 include/linux/led-lm3530.h create mode 100644 include/linux/leds-bd2802.h create mode 100644 include/linux/leds-lp3944.h create mode 100644 include/linux/leds-lp3952.h create mode 100644 include/linux/leds-pca9532.h create mode 100644 include/linux/leds-regulator.h create mode 100644 include/linux/leds-tca6507.h create mode 100644 include/linux/leds.h create mode 100644 include/linux/leds_pwm.h create mode 100644 include/linux/libata.h create mode 100644 include/linux/libfdt.h create mode 100644 include/linux/libfdt_env.h create mode 100644 include/linux/libgcc.h create mode 100644 include/linux/libnvdimm.h create mode 100644 include/linux/libps2.h create mode 100644 include/linux/license.h create mode 100644 include/linux/lightnvm.h create mode 100644 include/linux/linkage.h create mode 100644 include/linux/linux_logo.h create mode 100644 include/linux/lis3lv02d.h create mode 100644 include/linux/list.h create mode 100644 include/linux/list_bl.h create mode 100644 include/linux/list_lru.h create mode 100644 include/linux/list_nulls.h create mode 100644 include/linux/list_sort.h create mode 100644 include/linux/livepatch.h create mode 100644 include/linux/llc.h create mode 100644 include/linux/llist.h create mode 100644 include/linux/lockd/bind.h create mode 100644 include/linux/lockd/debug.h create mode 100644 include/linux/lockd/lockd.h create mode 100644 include/linux/lockd/nlm.h create mode 100644 include/linux/lockd/share.h create mode 100644 include/linux/lockd/xdr.h create mode 100644 include/linux/lockd/xdr4.h create mode 100644 include/linux/lockdep.h create mode 100644 include/linux/lockref.h create mode 100644 include/linux/log2.h create mode 100644 include/linux/logic_pio.h create mode 100644 include/linux/lp.h create mode 100644 include/linux/lru_cache.h create mode 100644 include/linux/lsm_audit.h create mode 100644 include/linux/lsm_hooks.h create mode 100644 include/linux/lz4.h create mode 100644 include/linux/lzo.h create mode 100644 include/linux/mISDNdsp.h create mode 100644 include/linux/mISDNhw.h create mode 100644 include/linux/mISDNif.h create mode 100644 include/linux/mailbox/brcm-message.h create mode 100644 include/linux/mailbox/mtk-cmdq-mailbox.h create mode 100644 include/linux/mailbox_client.h create mode 100644 include/linux/mailbox_controller.h create mode 100644 include/linux/maple.h create mode 100644 include/linux/marvell_phy.h create mode 100644 include/linux/math64.h create mode 100644 include/linux/max17040_battery.h create mode 100644 include/linux/mbcache.h create mode 100644 include/linux/mbus.h create mode 100644 include/linux/mc146818rtc.h create mode 100644 include/linux/mc6821.h create mode 100644 include/linux/mcb.h create mode 100644 include/linux/mdev.h create mode 100644 include/linux/mdio-bitbang.h create mode 100644 include/linux/mdio-gpio.h create mode 100644 include/linux/mdio-mux.h create mode 100644 include/linux/mdio.h create mode 100644 include/linux/mei_cl_bus.h create mode 100644 include/linux/mem_encrypt.h create mode 100644 include/linux/memblock.h create mode 100644 include/linux/memcontrol.h create mode 100644 include/linux/memfd.h create mode 100644 include/linux/memory.h create mode 100644 include/linux/memory_hotplug.h create mode 100644 include/linux/mempolicy.h create mode 100644 include/linux/mempool.h create mode 100644 include/linux/memremap.h create mode 100644 include/linux/memstick.h create mode 100644 include/linux/mfd/88pm80x.h create mode 100644 include/linux/mfd/88pm860x.h create mode 100644 include/linux/mfd/aat2870.h create mode 100644 include/linux/mfd/ab3100.h create mode 100644 include/linux/mfd/abx500.h create mode 100644 include/linux/mfd/abx500/ab8500-bm.h create mode 100644 include/linux/mfd/abx500/ab8500-codec.h create mode 100644 include/linux/mfd/abx500/ab8500-gpadc.h create mode 100644 include/linux/mfd/abx500/ab8500-sysctrl.h create mode 100644 include/linux/mfd/abx500/ab8500.h create mode 100644 include/linux/mfd/abx500/ux500_chargalg.h create mode 100644 include/linux/mfd/ac100.h create mode 100644 include/linux/mfd/adp5520.h create mode 100644 include/linux/mfd/altera-a10sr.h create mode 100644 include/linux/mfd/arizona/core.h create mode 100644 include/linux/mfd/arizona/pdata.h create mode 100644 include/linux/mfd/arizona/registers.h create mode 100644 include/linux/mfd/as3711.h create mode 100644 include/linux/mfd/as3722.h create mode 100644 include/linux/mfd/asic3.h create mode 100644 include/linux/mfd/atmel-hlcdc.h create mode 100644 include/linux/mfd/axp20x.h create mode 100644 include/linux/mfd/bcm590xx.h create mode 100644 include/linux/mfd/bd9571mwv.h create mode 100644 include/linux/mfd/core.h create mode 100644 include/linux/mfd/cros_ec.h create mode 100644 include/linux/mfd/cros_ec_commands.h create mode 100644 include/linux/mfd/cros_ec_lpc_mec.h create mode 100644 include/linux/mfd/cros_ec_lpc_reg.h create mode 100644 include/linux/mfd/da8xx-cfgchip.h create mode 100644 include/linux/mfd/da903x.h create mode 100644 include/linux/mfd/da9052/da9052.h create mode 100644 include/linux/mfd/da9052/pdata.h create mode 100644 include/linux/mfd/da9052/reg.h create mode 100644 include/linux/mfd/da9055/core.h create mode 100644 include/linux/mfd/da9055/pdata.h create mode 100644 include/linux/mfd/da9055/reg.h create mode 100644 include/linux/mfd/da9062/core.h create mode 100644 include/linux/mfd/da9062/registers.h create mode 100644 include/linux/mfd/da9063/core.h create mode 100644 include/linux/mfd/da9063/pdata.h create mode 100644 include/linux/mfd/da9063/registers.h create mode 100644 include/linux/mfd/da9150/core.h create mode 100644 include/linux/mfd/da9150/registers.h create mode 100644 include/linux/mfd/davinci_voicecodec.h create mode 100644 include/linux/mfd/db8500-prcmu.h create mode 100644 include/linux/mfd/dbx500-prcmu.h create mode 100644 include/linux/mfd/dln2.h create mode 100644 include/linux/mfd/dm355evm_msp.h create mode 100644 include/linux/mfd/ds1wm.h create mode 100644 include/linux/mfd/ezx-pcap.h create mode 100644 include/linux/mfd/hi6421-pmic.h create mode 100644 include/linux/mfd/hi655x-pmic.h create mode 100644 include/linux/mfd/htc-pasic3.h create mode 100644 include/linux/mfd/imx25-tsadc.h create mode 100644 include/linux/mfd/intel_msic.h create mode 100644 include/linux/mfd/intel_soc_pmic.h create mode 100644 include/linux/mfd/intel_soc_pmic_bxtwc.h create mode 100644 include/linux/mfd/ipaq-micro.h create mode 100644 include/linux/mfd/janz.h create mode 100644 include/linux/mfd/kempld.h create mode 100644 include/linux/mfd/lm3533.h create mode 100644 include/linux/mfd/lp3943.h create mode 100644 include/linux/mfd/lp873x.h create mode 100644 include/linux/mfd/lp87565.h create mode 100644 include/linux/mfd/lp8788-isink.h create mode 100644 include/linux/mfd/lp8788.h create mode 100644 include/linux/mfd/lpc_ich.h create mode 100644 include/linux/mfd/madera/core.h create mode 100644 include/linux/mfd/madera/pdata.h create mode 100644 include/linux/mfd/madera/registers.h create mode 100644 include/linux/mfd/max14577-private.h create mode 100644 include/linux/mfd/max14577.h create mode 100644 include/linux/mfd/max77620.h create mode 100644 include/linux/mfd/max77686-private.h create mode 100644 include/linux/mfd/max77686.h create mode 100644 include/linux/mfd/max77693-common.h create mode 100644 include/linux/mfd/max77693-private.h create mode 100644 include/linux/mfd/max77693.h create mode 100644 include/linux/mfd/max77843-private.h create mode 100644 include/linux/mfd/max8907.h create mode 100644 include/linux/mfd/max8925.h create mode 100644 include/linux/mfd/max8997-private.h create mode 100644 include/linux/mfd/max8997.h create mode 100644 include/linux/mfd/max8998-private.h create mode 100644 include/linux/mfd/max8998.h create mode 100644 include/linux/mfd/mc13783.h create mode 100644 include/linux/mfd/mc13892.h create mode 100644 include/linux/mfd/mc13xxx.h create mode 100644 include/linux/mfd/mcp.h create mode 100644 include/linux/mfd/menelaus.h create mode 100644 include/linux/mfd/motorola-cpcap.h create mode 100644 include/linux/mfd/mt6323/core.h create mode 100644 include/linux/mfd/mt6323/registers.h create mode 100644 include/linux/mfd/mt6397/core.h create mode 100644 include/linux/mfd/mt6397/registers.h create mode 100644 include/linux/mfd/mxs-lradc.h create mode 100644 include/linux/mfd/palmas.h create mode 100644 include/linux/mfd/pcf50633/adc.h create mode 100644 include/linux/mfd/pcf50633/backlight.h create mode 100644 include/linux/mfd/pcf50633/core.h create mode 100644 include/linux/mfd/pcf50633/gpio.h create mode 100644 include/linux/mfd/pcf50633/mbc.h create mode 100644 include/linux/mfd/pcf50633/pmic.h create mode 100644 include/linux/mfd/qcom_rpm.h create mode 100644 include/linux/mfd/rave-sp.h create mode 100644 include/linux/mfd/rc5t583.h create mode 100644 include/linux/mfd/rdc321x.h create mode 100644 include/linux/mfd/retu.h create mode 100644 include/linux/mfd/rk808.h create mode 100644 include/linux/mfd/rn5t618.h create mode 100644 include/linux/mfd/rohm-bd718x7.h create mode 100644 include/linux/mfd/rt5033-private.h create mode 100644 include/linux/mfd/rt5033.h create mode 100644 include/linux/mfd/samsung/core.h create mode 100644 include/linux/mfd/samsung/irq.h create mode 100644 include/linux/mfd/samsung/rtc.h create mode 100644 include/linux/mfd/samsung/s2mpa01.h create mode 100644 include/linux/mfd/samsung/s2mps11.h create mode 100644 include/linux/mfd/samsung/s2mps13.h create mode 100644 include/linux/mfd/samsung/s2mps14.h create mode 100644 include/linux/mfd/samsung/s2mps15.h create mode 100644 include/linux/mfd/samsung/s2mpu02.h create mode 100644 include/linux/mfd/samsung/s5m8763.h create mode 100644 include/linux/mfd/samsung/s5m8767.h create mode 100644 include/linux/mfd/si476x-core.h create mode 100644 include/linux/mfd/si476x-platform.h create mode 100644 include/linux/mfd/si476x-reports.h create mode 100644 include/linux/mfd/sky81452.h create mode 100644 include/linux/mfd/smsc.h create mode 100644 include/linux/mfd/sta2x11-mfd.h create mode 100644 include/linux/mfd/stm32-lptimer.h create mode 100644 include/linux/mfd/stm32-timers.h create mode 100644 include/linux/mfd/stmpe.h create mode 100644 include/linux/mfd/stw481x.h create mode 100644 include/linux/mfd/sun4i-gpadc.h create mode 100644 include/linux/mfd/syscon.h create mode 100644 include/linux/mfd/syscon/atmel-matrix.h create mode 100644 include/linux/mfd/syscon/atmel-mc.h create mode 100644 include/linux/mfd/syscon/atmel-smc.h create mode 100644 include/linux/mfd/syscon/atmel-st.h create mode 100644 include/linux/mfd/syscon/clps711x.h create mode 100644 include/linux/mfd/syscon/imx6q-iomuxc-gpr.h create mode 100644 include/linux/mfd/syscon/imx7-iomuxc-gpr.h create mode 100644 include/linux/mfd/t7l66xb.h create mode 100644 include/linux/mfd/tc3589x.h create mode 100644 include/linux/mfd/tc6387xb.h create mode 100644 include/linux/mfd/tc6393xb.h create mode 100644 include/linux/mfd/ti-lmu-register.h create mode 100644 include/linux/mfd/ti-lmu.h create mode 100644 include/linux/mfd/ti_am335x_tscadc.h create mode 100644 include/linux/mfd/tmio.h create mode 100644 include/linux/mfd/tps6105x.h create mode 100644 include/linux/mfd/tps65010.h create mode 100644 include/linux/mfd/tps6507x.h create mode 100644 include/linux/mfd/tps65086.h create mode 100644 include/linux/mfd/tps65090.h create mode 100644 include/linux/mfd/tps65217.h create mode 100644 include/linux/mfd/tps65218.h create mode 100644 include/linux/mfd/tps6586x.h create mode 100644 include/linux/mfd/tps65910.h create mode 100644 include/linux/mfd/tps65912.h create mode 100644 include/linux/mfd/tps68470.h create mode 100644 include/linux/mfd/tps80031.h create mode 100644 include/linux/mfd/twl.h create mode 100644 include/linux/mfd/twl4030-audio.h create mode 100644 include/linux/mfd/twl6040.h create mode 100644 include/linux/mfd/ucb1x00.h create mode 100644 include/linux/mfd/viperboard.h create mode 100644 include/linux/mfd/wl1273-core.h create mode 100644 include/linux/mfd/wm831x/auxadc.h create mode 100644 include/linux/mfd/wm831x/core.h create mode 100644 include/linux/mfd/wm831x/gpio.h create mode 100644 include/linux/mfd/wm831x/irq.h create mode 100644 include/linux/mfd/wm831x/otp.h create mode 100644 include/linux/mfd/wm831x/pdata.h create mode 100644 include/linux/mfd/wm831x/pmu.h create mode 100644 include/linux/mfd/wm831x/regulator.h create mode 100644 include/linux/mfd/wm831x/status.h create mode 100644 include/linux/mfd/wm831x/watchdog.h create mode 100644 include/linux/mfd/wm8350/audio.h create mode 100644 include/linux/mfd/wm8350/comparator.h create mode 100644 include/linux/mfd/wm8350/core.h create mode 100644 include/linux/mfd/wm8350/gpio.h create mode 100644 include/linux/mfd/wm8350/pmic.h create mode 100644 include/linux/mfd/wm8350/rtc.h create mode 100644 include/linux/mfd/wm8350/supply.h create mode 100644 include/linux/mfd/wm8350/wdt.h create mode 100644 include/linux/mfd/wm8400-audio.h create mode 100644 include/linux/mfd/wm8400-private.h create mode 100644 include/linux/mfd/wm8400.h create mode 100644 include/linux/mfd/wm8994/core.h create mode 100644 include/linux/mfd/wm8994/gpio.h create mode 100644 include/linux/mfd/wm8994/pdata.h create mode 100644 include/linux/mfd/wm8994/registers.h create mode 100644 include/linux/mfd/wm97xx.h create mode 100644 include/linux/mic_bus.h create mode 100644 include/linux/micrel_phy.h create mode 100644 include/linux/microchipphy.h create mode 100644 include/linux/migrate.h create mode 100644 include/linux/migrate_mode.h create mode 100644 include/linux/mii.h create mode 100644 include/linux/miscdevice.h create mode 100644 include/linux/mlx4/cmd.h create mode 100644 include/linux/mlx4/cq.h create mode 100644 include/linux/mlx4/device.h create mode 100644 include/linux/mlx4/doorbell.h create mode 100644 include/linux/mlx4/driver.h create mode 100644 include/linux/mlx4/qp.h create mode 100644 include/linux/mlx4/srq.h create mode 100644 include/linux/mlx5/accel.h create mode 100644 include/linux/mlx5/cmd.h create mode 100644 include/linux/mlx5/cq.h create mode 100644 include/linux/mlx5/device.h create mode 100644 include/linux/mlx5/doorbell.h create mode 100644 include/linux/mlx5/driver.h create mode 100644 include/linux/mlx5/eswitch.h create mode 100644 include/linux/mlx5/fs.h create mode 100644 include/linux/mlx5/fs_helpers.h create mode 100644 include/linux/mlx5/mlx5_ifc.h create mode 100644 include/linux/mlx5/mlx5_ifc_fpga.h create mode 100644 include/linux/mlx5/port.h create mode 100644 include/linux/mlx5/qp.h create mode 100644 include/linux/mlx5/srq.h create mode 100644 include/linux/mlx5/transobj.h create mode 100644 include/linux/mlx5/vport.h create mode 100644 include/linux/mm-arch-hooks.h create mode 100644 include/linux/mm.h create mode 100644 include/linux/mm_inline.h create mode 100644 include/linux/mm_types.h create mode 100644 include/linux/mm_types_task.h create mode 100644 include/linux/mman.h create mode 100644 include/linux/mmc/card.h create mode 100644 include/linux/mmc/core.h create mode 100644 include/linux/mmc/host.h create mode 100644 include/linux/mmc/mmc.h create mode 100644 include/linux/mmc/pm.h create mode 100644 include/linux/mmc/sd.h create mode 100644 include/linux/mmc/sdhci-pci-data.h create mode 100644 include/linux/mmc/sdio.h create mode 100644 include/linux/mmc/sdio_func.h create mode 100644 include/linux/mmc/sdio_ids.h create mode 100644 include/linux/mmc/sh_mmcif.h create mode 100644 include/linux/mmc/slot-gpio.h create mode 100644 include/linux/mmdebug.h create mode 100644 include/linux/mmiotrace.h create mode 100644 include/linux/mmu_context.h create mode 100644 include/linux/mmu_notifier.h create mode 100644 include/linux/mmzone.h create mode 100644 include/linux/mnt_namespace.h create mode 100644 include/linux/mod_devicetable.h create mode 100644 include/linux/module.h create mode 100644 include/linux/moduleloader.h create mode 100644 include/linux/moduleparam.h create mode 100644 include/linux/mount.h create mode 100644 include/linux/mpage.h create mode 100644 include/linux/mpi.h create mode 100644 include/linux/mpls.h create mode 100644 include/linux/mpls_iptunnel.h create mode 100644 include/linux/mroute.h create mode 100644 include/linux/mroute6.h create mode 100644 include/linux/mroute_base.h create mode 100644 include/linux/msdos_fs.h create mode 100644 include/linux/msg.h create mode 100644 include/linux/msi.h create mode 100644 include/linux/mtd/bbm.h create mode 100644 include/linux/mtd/blktrans.h create mode 100644 include/linux/mtd/cfi.h create mode 100644 include/linux/mtd/cfi_endian.h create mode 100644 include/linux/mtd/concat.h create mode 100644 include/linux/mtd/doc2000.h create mode 100644 include/linux/mtd/flashchip.h create mode 100644 include/linux/mtd/ftl.h create mode 100644 include/linux/mtd/gen_probe.h create mode 100644 include/linux/mtd/inftl.h create mode 100644 include/linux/mtd/latch-addr-flash.h create mode 100644 include/linux/mtd/lpc32xx_mlc.h create mode 100644 include/linux/mtd/lpc32xx_slc.h create mode 100644 include/linux/mtd/map.h create mode 100644 include/linux/mtd/mtd.h create mode 100644 include/linux/mtd/mtdram.h create mode 100644 include/linux/mtd/nand-gpio.h create mode 100644 include/linux/mtd/nand.h create mode 100644 include/linux/mtd/nand_bch.h create mode 100644 include/linux/mtd/nand_ecc.h create mode 100644 include/linux/mtd/ndfc.h create mode 100644 include/linux/mtd/nftl.h create mode 100644 include/linux/mtd/onenand.h create mode 100644 include/linux/mtd/onenand_regs.h create mode 100644 include/linux/mtd/partitions.h create mode 100644 include/linux/mtd/pfow.h create mode 100644 include/linux/mtd/physmap.h create mode 100644 include/linux/mtd/pismo.h create mode 100644 include/linux/mtd/plat-ram.h create mode 100644 include/linux/mtd/qinfo.h create mode 100644 include/linux/mtd/rawnand.h create mode 100644 include/linux/mtd/sh_flctl.h create mode 100644 include/linux/mtd/sharpsl.h create mode 100644 include/linux/mtd/spear_smi.h create mode 100644 include/linux/mtd/spi-nor.h create mode 100644 include/linux/mtd/spinand.h create mode 100644 include/linux/mtd/super.h create mode 100644 include/linux/mtd/ubi.h create mode 100644 include/linux/mtd/xip.h create mode 100644 include/linux/mutex.h create mode 100644 include/linux/mux/consumer.h create mode 100644 include/linux/mux/driver.h create mode 100644 include/linux/mv643xx.h create mode 100644 include/linux/mv643xx_eth.h create mode 100644 include/linux/mv643xx_i2c.h create mode 100644 include/linux/mvebu-pmsu.h create mode 100644 include/linux/mxm-wmi.h create mode 100644 include/linux/n_r3964.h create mode 100644 include/linux/namei.h create mode 100644 include/linux/nd.h create mode 100644 include/linux/net.h create mode 100644 include/linux/net_dim.h create mode 100644 include/linux/netdev_features.h create mode 100644 include/linux/netdevice.h create mode 100644 include/linux/netfilter.h create mode 100644 include/linux/netfilter/ipset/ip_set.h create mode 100644 include/linux/netfilter/ipset/ip_set_bitmap.h create mode 100644 include/linux/netfilter/ipset/ip_set_comment.h create mode 100644 include/linux/netfilter/ipset/ip_set_counter.h create mode 100644 include/linux/netfilter/ipset/ip_set_getport.h create mode 100644 include/linux/netfilter/ipset/ip_set_hash.h create mode 100644 include/linux/netfilter/ipset/ip_set_list.h create mode 100644 include/linux/netfilter/ipset/ip_set_skbinfo.h create mode 100644 include/linux/netfilter/ipset/ip_set_timeout.h create mode 100644 include/linux/netfilter/ipset/pfxlen.h create mode 100644 include/linux/netfilter/nf_conntrack_amanda.h create mode 100644 include/linux/netfilter/nf_conntrack_common.h create mode 100644 include/linux/netfilter/nf_conntrack_dccp.h create mode 100644 include/linux/netfilter/nf_conntrack_ftp.h create mode 100644 include/linux/netfilter/nf_conntrack_h323.h create mode 100644 include/linux/netfilter/nf_conntrack_h323_asn1.h create mode 100644 include/linux/netfilter/nf_conntrack_h323_types.h create mode 100644 include/linux/netfilter/nf_conntrack_irc.h create mode 100644 include/linux/netfilter/nf_conntrack_pptp.h create mode 100644 include/linux/netfilter/nf_conntrack_proto_gre.h create mode 100644 include/linux/netfilter/nf_conntrack_sane.h create mode 100644 include/linux/netfilter/nf_conntrack_sctp.h create mode 100644 include/linux/netfilter/nf_conntrack_sip.h create mode 100644 include/linux/netfilter/nf_conntrack_snmp.h create mode 100644 include/linux/netfilter/nf_conntrack_tcp.h create mode 100644 include/linux/netfilter/nf_conntrack_tftp.h create mode 100644 include/linux/netfilter/nf_conntrack_zones_common.h create mode 100644 include/linux/netfilter/nfnetlink.h create mode 100644 include/linux/netfilter/nfnetlink_acct.h create mode 100644 include/linux/netfilter/nfnetlink_osf.h create mode 100644 include/linux/netfilter/x_tables.h create mode 100644 include/linux/netfilter/xt_hashlimit.h create mode 100644 include/linux/netfilter/xt_physdev.h create mode 100644 include/linux/netfilter_arp/arp_tables.h create mode 100644 include/linux/netfilter_bridge.h create mode 100644 include/linux/netfilter_bridge/ebt_802_3.h create mode 100644 include/linux/netfilter_bridge/ebtables.h create mode 100644 include/linux/netfilter_defs.h create mode 100644 include/linux/netfilter_ingress.h create mode 100644 include/linux/netfilter_ipv4.h create mode 100644 include/linux/netfilter_ipv4/ip_tables.h create mode 100644 include/linux/netfilter_ipv6.h create mode 100644 include/linux/netfilter_ipv6/ip6_tables.h create mode 100644 include/linux/netlink.h create mode 100644 include/linux/netpoll.h create mode 100644 include/linux/nfs.h create mode 100644 include/linux/nfs3.h create mode 100644 include/linux/nfs4.h create mode 100644 include/linux/nfs_fs.h create mode 100644 include/linux/nfs_fs_i.h create mode 100644 include/linux/nfs_fs_sb.h create mode 100644 include/linux/nfs_iostat.h create mode 100644 include/linux/nfs_page.h create mode 100644 include/linux/nfs_xdr.h create mode 100644 include/linux/nfsacl.h create mode 100644 include/linux/nl802154.h create mode 100644 include/linux/nls.h create mode 100644 include/linux/nmi.h create mode 100644 include/linux/node.h create mode 100644 include/linux/nodemask.h create mode 100644 include/linux/nospec.h create mode 100644 include/linux/notifier.h create mode 100644 include/linux/ns_common.h create mode 100644 include/linux/nsc_gpio.h create mode 100644 include/linux/nsproxy.h create mode 100644 include/linux/ntb.h create mode 100644 include/linux/ntb_transport.h create mode 100644 include/linux/nubus.h create mode 100644 include/linux/numa.h create mode 100644 include/linux/nvme-fc-driver.h create mode 100644 include/linux/nvme-fc.h create mode 100644 include/linux/nvme-rdma.h create mode 100644 include/linux/nvme.h create mode 100644 include/linux/nvmem-consumer.h create mode 100644 include/linux/nvmem-provider.h create mode 100644 include/linux/nvram.h create mode 100644 include/linux/of.h create mode 100644 include/linux/of_address.h create mode 100644 include/linux/of_clk.h create mode 100644 include/linux/of_device.h create mode 100644 include/linux/of_dma.h create mode 100644 include/linux/of_fdt.h create mode 100644 include/linux/of_gpio.h create mode 100644 include/linux/of_graph.h create mode 100644 include/linux/of_iommu.h create mode 100644 include/linux/of_irq.h create mode 100644 include/linux/of_mdio.h create mode 100644 include/linux/of_net.h create mode 100644 include/linux/of_pci.h create mode 100644 include/linux/of_pdt.h create mode 100644 include/linux/of_platform.h create mode 100644 include/linux/of_reserved_mem.h create mode 100644 include/linux/oid_registry.h create mode 100644 include/linux/olpc-ec.h create mode 100644 include/linux/omap-dma.h create mode 100644 include/linux/omap-dmaengine.h create mode 100644 include/linux/omap-gpmc.h create mode 100644 include/linux/omap-iommu.h create mode 100644 include/linux/omap-mailbox.h create mode 100644 include/linux/omapfb.h create mode 100644 include/linux/once.h create mode 100644 include/linux/oom.h create mode 100644 include/linux/openvswitch.h create mode 100644 include/linux/oprofile.h create mode 100644 include/linux/osq_lock.h create mode 100644 include/linux/overflow.h create mode 100644 include/linux/oxu210hp.h create mode 100644 include/linux/padata.h create mode 100644 include/linux/page-flags-layout.h create mode 100644 include/linux/page-flags.h create mode 100644 include/linux/page-isolation.h create mode 100644 include/linux/page_counter.h create mode 100644 include/linux/page_ext.h create mode 100644 include/linux/page_idle.h create mode 100644 include/linux/page_owner.h create mode 100644 include/linux/page_ref.h create mode 100644 include/linux/pageblock-flags.h create mode 100644 include/linux/pagemap.h create mode 100644 include/linux/pagevec.h create mode 100644 include/linux/parman.h create mode 100644 include/linux/parport.h create mode 100644 include/linux/parport_pc.h create mode 100644 include/linux/parser.h create mode 100644 include/linux/pata_arasan_cf_data.h create mode 100644 include/linux/patchkey.h create mode 100644 include/linux/path.h create mode 100644 include/linux/pch_dma.h create mode 100644 include/linux/pci-acpi.h create mode 100644 include/linux/pci-aspm.h create mode 100644 include/linux/pci-ats.h create mode 100644 include/linux/pci-dma-compat.h create mode 100644 include/linux/pci-dma.h create mode 100644 include/linux/pci-ecam.h create mode 100644 include/linux/pci-ep-cfs.h create mode 100644 include/linux/pci-epc.h create mode 100644 include/linux/pci-epf.h create mode 100644 include/linux/pci.h create mode 100644 include/linux/pci_hotplug.h create mode 100644 include/linux/pci_ids.h create mode 100644 include/linux/pda_power.h create mode 100644 include/linux/pe.h create mode 100644 include/linux/percpu-defs.h create mode 100644 include/linux/percpu-refcount.h create mode 100644 include/linux/percpu-rwsem.h create mode 100644 include/linux/percpu.h create mode 100644 include/linux/percpu_counter.h create mode 100644 include/linux/perf/arm_pmu.h create mode 100644 include/linux/perf_event.h create mode 100644 include/linux/perf_regs.h create mode 100644 include/linux/personality.h create mode 100644 include/linux/pfn.h create mode 100644 include/linux/pfn_t.h create mode 100644 include/linux/phonet.h create mode 100644 include/linux/phy.h create mode 100644 include/linux/phy/omap_control_phy.h create mode 100644 include/linux/phy/omap_usb.h create mode 100644 include/linux/phy/phy-qcom-ufs.h create mode 100644 include/linux/phy/phy-sun4i-usb.h create mode 100644 include/linux/phy/phy.h create mode 100644 include/linux/phy/tegra/xusb.h create mode 100644 include/linux/phy/ulpi_phy.h create mode 100644 include/linux/phy_fixed.h create mode 100644 include/linux/phy_led_triggers.h create mode 100644 include/linux/phylink.h create mode 100644 include/linux/pid.h create mode 100644 include/linux/pid_namespace.h create mode 100644 include/linux/pim.h create mode 100644 include/linux/pinctrl/consumer.h create mode 100644 include/linux/pinctrl/devinfo.h create mode 100644 include/linux/pinctrl/machine.h create mode 100644 include/linux/pinctrl/pinconf-generic.h create mode 100644 include/linux/pinctrl/pinconf.h create mode 100644 include/linux/pinctrl/pinctrl-state.h create mode 100644 include/linux/pinctrl/pinctrl.h create mode 100644 include/linux/pinctrl/pinmux.h create mode 100644 include/linux/pipe_fs_i.h create mode 100644 include/linux/pkeys.h create mode 100644 include/linux/pktcdvd.h create mode 100644 include/linux/pl320-ipc.h create mode 100644 include/linux/platform_data/ad5449.h create mode 100644 include/linux/platform_data/ad5755.h create mode 100644 include/linux/platform_data/ad5761.h create mode 100644 include/linux/platform_data/ad7266.h create mode 100644 include/linux/platform_data/ad7291.h create mode 100644 include/linux/platform_data/ad7298.h create mode 100644 include/linux/platform_data/ad7303.h create mode 100644 include/linux/platform_data/ad7791.h create mode 100644 include/linux/platform_data/ad7793.h create mode 100644 include/linux/platform_data/ad7879.h create mode 100644 include/linux/platform_data/ad7887.h create mode 100644 include/linux/platform_data/adau17x1.h create mode 100644 include/linux/platform_data/adau1977.h create mode 100644 include/linux/platform_data/adp5588.h create mode 100644 include/linux/platform_data/adp8860.h create mode 100644 include/linux/platform_data/adp8870.h create mode 100644 include/linux/platform_data/ads1015.h create mode 100644 include/linux/platform_data/ads7828.h create mode 100644 include/linux/platform_data/ams-delta-fiq.h create mode 100644 include/linux/platform_data/apds990x.h create mode 100644 include/linux/platform_data/arm-ux500-pm.h create mode 100644 include/linux/platform_data/asoc-imx-ssi.h create mode 100644 include/linux/platform_data/asoc-kirkwood.h create mode 100644 include/linux/platform_data/asoc-mx27vis.h create mode 100644 include/linux/platform_data/asoc-palm27x.h create mode 100644 include/linux/platform_data/asoc-s3c.h create mode 100644 include/linux/platform_data/asoc-s3c24xx_simtec.h create mode 100644 include/linux/platform_data/asoc-ti-mcbsp.h create mode 100644 include/linux/platform_data/asoc-ux500-msp.h create mode 100644 include/linux/platform_data/at24.h create mode 100644 include/linux/platform_data/at91_adc.h create mode 100644 include/linux/platform_data/ata-pxa.h create mode 100644 include/linux/platform_data/ata-samsung_cf.h create mode 100644 include/linux/platform_data/atmel.h create mode 100644 include/linux/platform_data/b53.h create mode 100644 include/linux/platform_data/bcmgenet.h create mode 100644 include/linux/platform_data/bd6107.h create mode 100644 include/linux/platform_data/bh1770glc.h create mode 100644 include/linux/platform_data/brcmfmac.h create mode 100644 include/linux/platform_data/clk-da8xx-cfgchip.h create mode 100644 include/linux/platform_data/clk-davinci-pll.h create mode 100644 include/linux/platform_data/clk-integrator.h create mode 100644 include/linux/platform_data/clk-lpss.h create mode 100644 include/linux/platform_data/clk-st.h create mode 100644 include/linux/platform_data/clk-u300.h create mode 100644 include/linux/platform_data/cpuidle-exynos.h create mode 100644 include/linux/platform_data/crypto-atmel.h create mode 100644 include/linux/platform_data/crypto-ux500.h create mode 100644 include/linux/platform_data/cyttsp4.h create mode 100644 include/linux/platform_data/davinci_asp.h create mode 100644 include/linux/platform_data/db8500_thermal.h create mode 100644 include/linux/platform_data/dma-atmel.h create mode 100644 include/linux/platform_data/dma-coh901318.h create mode 100644 include/linux/platform_data/dma-dw.h create mode 100644 include/linux/platform_data/dma-ep93xx.h create mode 100644 include/linux/platform_data/dma-hsu.h create mode 100644 include/linux/platform_data/dma-imx-sdma.h create mode 100644 include/linux/platform_data/dma-imx.h create mode 100644 include/linux/platform_data/dma-mmp_tdma.h create mode 100644 include/linux/platform_data/dma-mv_xor.h create mode 100644 include/linux/platform_data/dma-s3c24xx.h create mode 100644 include/linux/platform_data/dma-ste-dma40.h create mode 100644 include/linux/platform_data/dmtimer-omap.h create mode 100644 include/linux/platform_data/ds620.h create mode 100644 include/linux/platform_data/dwc3-omap.h create mode 100644 include/linux/platform_data/edma.h create mode 100644 include/linux/platform_data/efm32-spi.h create mode 100644 include/linux/platform_data/efm32-uart.h create mode 100644 include/linux/platform_data/ehci-sh.h create mode 100644 include/linux/platform_data/elm.h create mode 100644 include/linux/platform_data/emif_plat.h create mode 100644 include/linux/platform_data/eth-netx.h create mode 100644 include/linux/platform_data/fsa9480.h create mode 100644 include/linux/platform_data/g762.h create mode 100644 include/linux/platform_data/gpio-ath79.h create mode 100644 include/linux/platform_data/gpio-davinci.h create mode 100644 include/linux/platform_data/gpio-dwapb.h create mode 100644 include/linux/platform_data/gpio-htc-egpio.h create mode 100644 include/linux/platform_data/gpio-omap.h create mode 100644 include/linux/platform_data/gpio-ts5500.h create mode 100644 include/linux/platform_data/gpio_backlight.h create mode 100644 include/linux/platform_data/gpmc-omap.h create mode 100644 include/linux/platform_data/hsmmc-omap.h create mode 100644 include/linux/platform_data/hwmon-s3c.h create mode 100644 include/linux/platform_data/i2c-cbus-gpio.h create mode 100644 include/linux/platform_data/i2c-davinci.h create mode 100644 include/linux/platform_data/i2c-designware.h create mode 100644 include/linux/platform_data/i2c-gpio.h create mode 100644 include/linux/platform_data/i2c-hid.h create mode 100644 include/linux/platform_data/i2c-imx.h create mode 100644 include/linux/platform_data/i2c-mux-gpio.h create mode 100644 include/linux/platform_data/i2c-mux-reg.h create mode 100644 include/linux/platform_data/i2c-ocores.h create mode 100644 include/linux/platform_data/i2c-omap.h create mode 100644 include/linux/platform_data/i2c-pca-platform.h create mode 100644 include/linux/platform_data/i2c-pxa.h create mode 100644 include/linux/platform_data/i2c-s3c2410.h create mode 100644 include/linux/platform_data/i2c-xiic.h create mode 100644 include/linux/platform_data/ina2xx.h create mode 100644 include/linux/platform_data/intel-mid_wdt.h create mode 100644 include/linux/platform_data/intel-spi.h create mode 100644 include/linux/platform_data/invensense_mpu6050.h create mode 100644 include/linux/platform_data/iommu-omap.h create mode 100644 include/linux/platform_data/irda-pxaficp.h create mode 100644 include/linux/platform_data/irda-sa11x0.h create mode 100644 include/linux/platform_data/isl9305.h create mode 100644 include/linux/platform_data/itco_wdt.h create mode 100644 include/linux/platform_data/jz4740/jz4740_nand.h create mode 100644 include/linux/platform_data/keyboard-pxa930_rotary.h create mode 100644 include/linux/platform_data/keyboard-spear.h create mode 100644 include/linux/platform_data/keypad-ep93xx.h create mode 100644 include/linux/platform_data/keypad-nomadik-ske.h create mode 100644 include/linux/platform_data/keypad-omap.h create mode 100644 include/linux/platform_data/keypad-pxa27x.h create mode 100644 include/linux/platform_data/keypad-w90p910.h create mode 100644 include/linux/platform_data/keyscan-davinci.h create mode 100644 include/linux/platform_data/lcd-mipid.h create mode 100644 include/linux/platform_data/leds-kirkwood-netxbig.h create mode 100644 include/linux/platform_data/leds-kirkwood-ns2.h create mode 100644 include/linux/platform_data/leds-lm355x.h create mode 100644 include/linux/platform_data/leds-lm3642.h create mode 100644 include/linux/platform_data/leds-lp55xx.h create mode 100644 include/linux/platform_data/leds-omap.h create mode 100644 include/linux/platform_data/leds-pca963x.h create mode 100644 include/linux/platform_data/leds-s3c24xx.h create mode 100644 include/linux/platform_data/lm3630a_bl.h create mode 100644 include/linux/platform_data/lm3639_bl.h create mode 100644 include/linux/platform_data/lm8323.h create mode 100644 include/linux/platform_data/lp855x.h create mode 100644 include/linux/platform_data/lp8727.h create mode 100644 include/linux/platform_data/lp8755.h create mode 100644 include/linux/platform_data/ltc4245.h create mode 100644 include/linux/platform_data/lv5207lp.h create mode 100644 include/linux/platform_data/macb.h create mode 100644 include/linux/platform_data/max197.h create mode 100644 include/linux/platform_data/max3421-hcd.h create mode 100644 include/linux/platform_data/max6639.h create mode 100644 include/linux/platform_data/max6697.h create mode 100644 include/linux/platform_data/max732x.h create mode 100644 include/linux/platform_data/mcs.h create mode 100644 include/linux/platform_data/mdio-bcm-unimac.h create mode 100644 include/linux/platform_data/media/camera-mx2.h create mode 100644 include/linux/platform_data/media/camera-mx3.h create mode 100644 include/linux/platform_data/media/camera-pxa.h create mode 100644 include/linux/platform_data/media/coda.h create mode 100644 include/linux/platform_data/media/mmp-camera.h create mode 100644 include/linux/platform_data/media/omap1_camera.h create mode 100644 include/linux/platform_data/media/omap4iss.h create mode 100644 include/linux/platform_data/media/s5p_hdmi.h create mode 100644 include/linux/platform_data/media/si4713.h create mode 100644 include/linux/platform_data/media/soc_camera_platform.h create mode 100644 include/linux/platform_data/media/timb_radio.h create mode 100644 include/linux/platform_data/media/timb_video.h create mode 100644 include/linux/platform_data/mfd-mcp-sa11x0.h create mode 100644 include/linux/platform_data/microchip-ksz.h create mode 100644 include/linux/platform_data/mlxreg.h create mode 100644 include/linux/platform_data/mmc-davinci.h create mode 100644 include/linux/platform_data/mmc-esdhc-imx.h create mode 100644 include/linux/platform_data/mmc-mxcmmc.h create mode 100644 include/linux/platform_data/mmc-omap.h create mode 100644 include/linux/platform_data/mmc-pxamci.h create mode 100644 include/linux/platform_data/mmc-s3cmci.h create mode 100644 include/linux/platform_data/mmc-sdhci-s3c.h create mode 100644 include/linux/platform_data/mmp_audio.h create mode 100644 include/linux/platform_data/mmp_dma.h create mode 100644 include/linux/platform_data/mouse-pxa930_trkball.h create mode 100644 include/linux/platform_data/mtd-davinci-aemif.h create mode 100644 include/linux/platform_data/mtd-davinci.h create mode 100644 include/linux/platform_data/mtd-mxc_nand.h create mode 100644 include/linux/platform_data/mtd-nand-omap2.h create mode 100644 include/linux/platform_data/mtd-nand-pxa3xx.h create mode 100644 include/linux/platform_data/mtd-nand-s3c2410.h create mode 100644 include/linux/platform_data/mtd-orion_nand.h create mode 100644 include/linux/platform_data/mv88e6xxx.h create mode 100644 include/linux/platform_data/mv_usb.h create mode 100644 include/linux/platform_data/net-cw1200.h create mode 100644 include/linux/platform_data/nfcmrvl.h create mode 100644 include/linux/platform_data/ntc_thermistor.h create mode 100644 include/linux/platform_data/nxp-nci.h create mode 100644 include/linux/platform_data/omap-twl4030.h create mode 100644 include/linux/platform_data/omap-wd-timer.h create mode 100644 include/linux/platform_data/omap1_bl.h create mode 100644 include/linux/platform_data/omapdss.h create mode 100644 include/linux/platform_data/pca953x.h create mode 100644 include/linux/platform_data/pca954x.h create mode 100644 include/linux/platform_data/pcf857x.h create mode 100644 include/linux/platform_data/pcmcia-pxa2xx_viper.h create mode 100644 include/linux/platform_data/phy-da8xx-usb.h create mode 100644 include/linux/platform_data/pinctrl-single.h create mode 100644 include/linux/platform_data/pixcir_i2c_ts.h create mode 100644 include/linux/platform_data/pm33xx.h create mode 100644 include/linux/platform_data/pwm_omap_dmtimer.h create mode 100644 include/linux/platform_data/pxa2xx_udc.h create mode 100644 include/linux/platform_data/pxa_sdhci.h create mode 100644 include/linux/platform_data/regulator-haptic.h create mode 100644 include/linux/platform_data/remoteproc-omap.h create mode 100644 include/linux/platform_data/rtc-ds2404.h create mode 100644 include/linux/platform_data/rtc-v3020.h create mode 100644 include/linux/platform_data/s3c-hsotg.h create mode 100644 include/linux/platform_data/s3c-hsudc.h create mode 100644 include/linux/platform_data/sa11x0-serial.h create mode 100644 include/linux/platform_data/sc18is602.h create mode 100644 include/linux/platform_data/sdhci-pic32.h create mode 100644 include/linux/platform_data/serial-imx.h create mode 100644 include/linux/platform_data/serial-omap.h create mode 100644 include/linux/platform_data/serial-sccnxp.h create mode 100644 include/linux/platform_data/shmob_drm.h create mode 100644 include/linux/platform_data/sht3x.h create mode 100644 include/linux/platform_data/shtc1.h create mode 100644 include/linux/platform_data/si5351.h create mode 100644 include/linux/platform_data/simplefb.h create mode 100644 include/linux/platform_data/sky81452-backlight.h create mode 100644 include/linux/platform_data/spi-clps711x.h create mode 100644 include/linux/platform_data/spi-davinci.h create mode 100644 include/linux/platform_data/spi-ep93xx.h create mode 100644 include/linux/platform_data/spi-imx.h create mode 100644 include/linux/platform_data/spi-mt65xx.h create mode 100644 include/linux/platform_data/spi-nuc900.h create mode 100644 include/linux/platform_data/spi-omap2-mcspi.h create mode 100644 include/linux/platform_data/spi-s3c64xx.h create mode 100644 include/linux/platform_data/ssm2518.h create mode 100644 include/linux/platform_data/st33zp24.h create mode 100644 include/linux/platform_data/st_sensors_pdata.h create mode 100644 include/linux/platform_data/syscon.h create mode 100644 include/linux/platform_data/tc35876x.h create mode 100644 include/linux/platform_data/tda9950.h create mode 100644 include/linux/platform_data/ti-aemif.h create mode 100644 include/linux/platform_data/ti-sysc.h create mode 100644 include/linux/platform_data/touchscreen-s3c2410.h create mode 100644 include/linux/platform_data/tsc2007.h create mode 100644 include/linux/platform_data/tsl2563.h create mode 100644 include/linux/platform_data/tsl2772.h create mode 100644 include/linux/platform_data/txx9/ndfmc.h create mode 100644 include/linux/platform_data/uio_dmem_genirq.h create mode 100644 include/linux/platform_data/uio_pruss.h create mode 100644 include/linux/platform_data/usb-davinci.h create mode 100644 include/linux/platform_data/usb-ehci-mxc.h create mode 100644 include/linux/platform_data/usb-ehci-orion.h create mode 100644 include/linux/platform_data/usb-musb-ux500.h create mode 100644 include/linux/platform_data/usb-mx2.h create mode 100644 include/linux/platform_data/usb-ohci-pxa27x.h create mode 100644 include/linux/platform_data/usb-ohci-s3c2410.h create mode 100644 include/linux/platform_data/usb-omap.h create mode 100644 include/linux/platform_data/usb-omap1.h create mode 100644 include/linux/platform_data/usb-pxa3xx-ulpi.h create mode 100644 include/linux/platform_data/usb-s3c2410_udc.h create mode 100644 include/linux/platform_data/usb3503.h create mode 100644 include/linux/platform_data/ux500_wdt.h create mode 100644 include/linux/platform_data/video-clcd-versatile.h create mode 100644 include/linux/platform_data/video-ep93xx.h create mode 100644 include/linux/platform_data/video-imxfb.h create mode 100644 include/linux/platform_data/video-mx3fb.h create mode 100644 include/linux/platform_data/video-nuc900fb.h create mode 100644 include/linux/platform_data/video-pxafb.h create mode 100644 include/linux/platform_data/video_s3c.h create mode 100644 include/linux/platform_data/voltage-omap.h create mode 100644 include/linux/platform_data/wiznet.h create mode 100644 include/linux/platform_data/wkup_m3.h create mode 100644 include/linux/platform_data/x86/apple.h create mode 100644 include/linux/platform_data/x86/clk-pmc-atom.h create mode 100644 include/linux/platform_data/x86/mlxcpld.h create mode 100644 include/linux/platform_data/x86/pmc_atom.h create mode 100644 include/linux/platform_data/zforce_ts.h create mode 100644 include/linux/platform_device.h create mode 100644 include/linux/plist.h create mode 100644 include/linux/pm-trace.h create mode 100644 include/linux/pm.h create mode 100644 include/linux/pm2301_charger.h create mode 100644 include/linux/pm_clock.h create mode 100644 include/linux/pm_domain.h create mode 100644 include/linux/pm_opp.h create mode 100644 include/linux/pm_qos.h create mode 100644 include/linux/pm_runtime.h create mode 100644 include/linux/pm_wakeirq.h create mode 100644 include/linux/pm_wakeup.h create mode 100644 include/linux/pmbus.h create mode 100644 include/linux/pmu.h create mode 100644 include/linux/pnfs_osd_xdr.h create mode 100644 include/linux/pnp.h create mode 100644 include/linux/poison.h create mode 100644 include/linux/poll.h create mode 100644 include/linux/posix-clock.h create mode 100644 include/linux/posix-timers.h create mode 100644 include/linux/posix_acl.h create mode 100644 include/linux/posix_acl_xattr.h create mode 100644 include/linux/power/ab8500.h create mode 100644 include/linux/power/bq2415x_charger.h create mode 100644 include/linux/power/bq24190_charger.h create mode 100644 include/linux/power/bq24735-charger.h create mode 100644 include/linux/power/bq27xxx_battery.h create mode 100644 include/linux/power/charger-manager.h create mode 100644 include/linux/power/generic-adc-battery.h create mode 100644 include/linux/power/gpio-charger.h create mode 100644 include/linux/power/isp1704_charger.h create mode 100644 include/linux/power/jz4740-battery.h create mode 100644 include/linux/power/max17042_battery.h create mode 100644 include/linux/power/max8903_charger.h create mode 100644 include/linux/power/sbs-battery.h create mode 100644 include/linux/power/smartreflex.h create mode 100644 include/linux/power/smb347-charger.h create mode 100644 include/linux/power/twl4030_madc_battery.h create mode 100644 include/linux/power_supply.h create mode 100644 include/linux/powercap.h create mode 100644 include/linux/ppp-comp.h create mode 100644 include/linux/ppp_channel.h create mode 100644 include/linux/ppp_defs.h create mode 100644 include/linux/pps-gpio.h create mode 100644 include/linux/pps_kernel.h create mode 100644 include/linux/pr.h create mode 100644 include/linux/prandom.h create mode 100644 include/linux/preempt.h create mode 100644 include/linux/prefetch.h create mode 100644 include/linux/prime_numbers.h create mode 100644 include/linux/printk.h create mode 100644 include/linux/proc_fs.h create mode 100644 include/linux/proc_ns.h create mode 100644 include/linux/processor.h create mode 100644 include/linux/profile.h create mode 100644 include/linux/projid.h create mode 100644 include/linux/property.h create mode 100644 include/linux/psci.h create mode 100644 include/linux/psp-sev.h create mode 100644 include/linux/pstore.h create mode 100644 include/linux/pstore_ram.h create mode 100644 include/linux/pti.h create mode 100644 include/linux/ptp_classify.h create mode 100644 include/linux/ptp_clock_kernel.h create mode 100644 include/linux/ptr_ring.h create mode 100644 include/linux/ptrace.h create mode 100644 include/linux/purgatory.h create mode 100644 include/linux/pvclock_gtod.h create mode 100644 include/linux/pwm.h create mode 100644 include/linux/pwm_backlight.h create mode 100644 include/linux/pxa168_eth.h create mode 100644 include/linux/pxa2xx_ssp.h create mode 100644 include/linux/qcom-geni-se.h create mode 100644 include/linux/qcom_scm.h create mode 100644 include/linux/qed/common_hsi.h create mode 100644 include/linux/qed/eth_common.h create mode 100644 include/linux/qed/fcoe_common.h create mode 100644 include/linux/qed/iscsi_common.h create mode 100644 include/linux/qed/iwarp_common.h create mode 100644 include/linux/qed/qed_chain.h create mode 100644 include/linux/qed/qed_eth_if.h create mode 100644 include/linux/qed/qed_fcoe_if.h create mode 100644 include/linux/qed/qed_if.h create mode 100644 include/linux/qed/qed_iov_if.h create mode 100644 include/linux/qed/qed_iscsi_if.h create mode 100644 include/linux/qed/qed_ll2_if.h create mode 100644 include/linux/qed/qed_rdma_if.h create mode 100644 include/linux/qed/qede_rdma.h create mode 100644 include/linux/qed/rdma_common.h create mode 100644 include/linux/qed/roce_common.h create mode 100644 include/linux/qed/storage_common.h create mode 100644 include/linux/qed/tcp_common.h create mode 100644 include/linux/qnx6_fs.h create mode 100644 include/linux/quicklist.h create mode 100644 include/linux/quota.h create mode 100644 include/linux/quotaops.h create mode 100644 include/linux/radix-tree.h create mode 100644 include/linux/raid/md_u.h create mode 100644 include/linux/raid/pq.h create mode 100644 include/linux/raid/xor.h create mode 100644 include/linux/raid_class.h create mode 100644 include/linux/ramfs.h create mode 100644 include/linux/random.h create mode 100644 include/linux/range.h create mode 100644 include/linux/ras.h create mode 100644 include/linux/ratelimit.h create mode 100644 include/linux/rational.h create mode 100644 include/linux/rbtree.h create mode 100644 include/linux/rbtree_augmented.h create mode 100644 include/linux/rbtree_latch.h create mode 100644 include/linux/rcu_node_tree.h create mode 100644 include/linux/rcu_segcblist.h create mode 100644 include/linux/rcu_sync.h create mode 100644 include/linux/rculist.h create mode 100644 include/linux/rculist_bl.h create mode 100644 include/linux/rculist_nulls.h create mode 100644 include/linux/rcupdate.h create mode 100644 include/linux/rcupdate_wait.h create mode 100644 include/linux/rcutiny.h create mode 100644 include/linux/rcutree.h create mode 100644 include/linux/rcuwait.h create mode 100644 include/linux/reboot-mode.h create mode 100644 include/linux/reboot.h create mode 100644 include/linux/reciprocal_div.h create mode 100644 include/linux/refcount.h create mode 100644 include/linux/regmap.h create mode 100644 include/linux/regset.h create mode 100644 include/linux/regulator/ab8500.h create mode 100644 include/linux/regulator/act8865.h create mode 100644 include/linux/regulator/arizona-ldo1.h create mode 100644 include/linux/regulator/arizona-micsupp.h create mode 100644 include/linux/regulator/consumer.h create mode 100644 include/linux/regulator/da9211.h create mode 100644 include/linux/regulator/db8500-prcmu.h create mode 100644 include/linux/regulator/driver.h create mode 100644 include/linux/regulator/fan53555.h create mode 100644 include/linux/regulator/fixed.h create mode 100644 include/linux/regulator/gpio-regulator.h create mode 100644 include/linux/regulator/lp3971.h create mode 100644 include/linux/regulator/lp3972.h create mode 100644 include/linux/regulator/lp872x.h create mode 100644 include/linux/regulator/machine.h create mode 100644 include/linux/regulator/max1586.h create mode 100644 include/linux/regulator/max8649.h create mode 100644 include/linux/regulator/max8660.h create mode 100644 include/linux/regulator/max8952.h create mode 100644 include/linux/regulator/max8973-regulator.h create mode 100644 include/linux/regulator/mt6311.h create mode 100644 include/linux/regulator/mt6323-regulator.h create mode 100644 include/linux/regulator/mt6380-regulator.h create mode 100644 include/linux/regulator/mt6397-regulator.h create mode 100644 include/linux/regulator/of_regulator.h create mode 100644 include/linux/regulator/pfuze100.h create mode 100644 include/linux/regulator/tps51632-regulator.h create mode 100644 include/linux/regulator/tps62360.h create mode 100644 include/linux/regulator/tps6507x.h create mode 100644 include/linux/regulator/userspace-consumer.h create mode 100644 include/linux/relay.h create mode 100644 include/linux/remoteproc.h create mode 100644 include/linux/remoteproc/qcom_rproc.h create mode 100644 include/linux/remoteproc/st_slim_rproc.h create mode 100644 include/linux/reservation.h create mode 100644 include/linux/reset-controller.h create mode 100644 include/linux/reset.h create mode 100644 include/linux/reset/bcm63xx_pmb.h create mode 100644 include/linux/resource.h create mode 100644 include/linux/resource_ext.h create mode 100644 include/linux/restart_block.h create mode 100644 include/linux/rfkill.h create mode 100644 include/linux/rhashtable-types.h create mode 100644 include/linux/rhashtable.h create mode 100644 include/linux/ring_buffer.h create mode 100644 include/linux/rio.h create mode 100644 include/linux/rio_drv.h create mode 100644 include/linux/rio_ids.h create mode 100644 include/linux/rio_regs.h create mode 100644 include/linux/rmap.h create mode 100644 include/linux/rmi.h create mode 100644 include/linux/rndis.h create mode 100644 include/linux/rodata_test.h create mode 100644 include/linux/root_dev.h create mode 100644 include/linux/rpmsg.h create mode 100644 include/linux/rpmsg/qcom_glink.h create mode 100644 include/linux/rpmsg/qcom_smd.h create mode 100644 include/linux/rslib.h create mode 100644 include/linux/rtc.h create mode 100644 include/linux/rtc/ds1286.h create mode 100644 include/linux/rtc/ds1307.h create mode 100644 include/linux/rtc/ds1685.h create mode 100644 include/linux/rtc/m48t59.h create mode 100644 include/linux/rtc/sirfsoc_rtciobrg.h create mode 100644 include/linux/rtmutex.h create mode 100644 include/linux/rtnetlink.h create mode 100644 include/linux/rtsx_common.h create mode 100644 include/linux/rtsx_pci.h create mode 100644 include/linux/rtsx_usb.h create mode 100644 include/linux/rwlock.h create mode 100644 include/linux/rwlock_api_smp.h create mode 100644 include/linux/rwlock_types.h create mode 100644 include/linux/rwsem-spinlock.h create mode 100644 include/linux/rwsem.h create mode 100644 include/linux/s3c_adc_battery.h create mode 100644 include/linux/sa11x0-dma.h create mode 100644 include/linux/sbitmap.h create mode 100644 include/linux/scatterlist.h create mode 100644 include/linux/scc.h create mode 100644 include/linux/sched.h create mode 100644 include/linux/sched/autogroup.h create mode 100644 include/linux/sched/clock.h create mode 100644 include/linux/sched/coredump.h create mode 100644 include/linux/sched/cpufreq.h create mode 100644 include/linux/sched/cputime.h create mode 100644 include/linux/sched/deadline.h create mode 100644 include/linux/sched/debug.h create mode 100644 include/linux/sched/hotplug.h create mode 100644 include/linux/sched/idle.h create mode 100644 include/linux/sched/init.h create mode 100644 include/linux/sched/isolation.h create mode 100644 include/linux/sched/jobctl.h create mode 100644 include/linux/sched/loadavg.h create mode 100644 include/linux/sched/mm.h create mode 100644 include/linux/sched/nohz.h create mode 100644 include/linux/sched/numa_balancing.h create mode 100644 include/linux/sched/prio.h create mode 100644 include/linux/sched/rt.h create mode 100644 include/linux/sched/signal.h create mode 100644 include/linux/sched/smt.h create mode 100644 include/linux/sched/stat.h create mode 100644 include/linux/sched/sysctl.h create mode 100644 include/linux/sched/task.h create mode 100644 include/linux/sched/task_stack.h create mode 100644 include/linux/sched/topology.h create mode 100644 include/linux/sched/user.h create mode 100644 include/linux/sched/wake_q.h create mode 100644 include/linux/sched/xacct.h create mode 100644 include/linux/sched_clock.h create mode 100644 include/linux/scif.h create mode 100644 include/linux/scmi_protocol.h create mode 100644 include/linux/scpi_protocol.h create mode 100644 include/linux/screen_info.h create mode 100644 include/linux/sctp.h create mode 100644 include/linux/scx200.h create mode 100644 include/linux/scx200_gpio.h create mode 100644 include/linux/sdb.h create mode 100644 include/linux/sdla.h create mode 100644 include/linux/seccomp.h create mode 100644 include/linux/securebits.h create mode 100644 include/linux/security.h create mode 100644 include/linux/sed-opal.h create mode 100644 include/linux/seg6.h create mode 100644 include/linux/seg6_genl.h create mode 100644 include/linux/seg6_hmac.h create mode 100644 include/linux/seg6_iptunnel.h create mode 100644 include/linux/seg6_local.h create mode 100644 include/linux/selection.h create mode 100644 include/linux/selinux.h create mode 100644 include/linux/sem.h create mode 100644 include/linux/semaphore.h create mode 100644 include/linux/seq_buf.h create mode 100644 include/linux/seq_file.h create mode 100644 include/linux/seq_file_net.h create mode 100644 include/linux/seqlock.h create mode 100644 include/linux/seqno-fence.h create mode 100644 include/linux/serdev.h create mode 100644 include/linux/serial.h create mode 100644 include/linux/serial_8250.h create mode 100644 include/linux/serial_bcm63xx.h create mode 100644 include/linux/serial_core.h create mode 100644 include/linux/serial_max3100.h create mode 100644 include/linux/serial_pnx8xxx.h create mode 100644 include/linux/serial_s3c.h create mode 100644 include/linux/serial_sci.h create mode 100644 include/linux/serio.h create mode 100644 include/linux/set_memory.h create mode 100644 include/linux/sfi.h create mode 100644 include/linux/sfi_acpi.h create mode 100644 include/linux/sfp.h create mode 100644 include/linux/sh_clk.h create mode 100644 include/linux/sh_dma.h create mode 100644 include/linux/sh_eth.h create mode 100644 include/linux/sh_intc.h create mode 100644 include/linux/sh_timer.h create mode 100644 include/linux/sha256.h create mode 100644 include/linux/shdma-base.h create mode 100644 include/linux/shm.h create mode 100644 include/linux/shmem_fs.h create mode 100644 include/linux/shrinker.h create mode 100644 include/linux/signal.h create mode 100644 include/linux/signal_types.h create mode 100644 include/linux/signalfd.h create mode 100644 include/linux/siox.h create mode 100644 include/linux/siphash.h create mode 100644 include/linux/sirfsoc_dma.h create mode 100644 include/linux/sizes.h create mode 100644 include/linux/skb_array.h create mode 100644 include/linux/skbuff.h create mode 100644 include/linux/slab.h create mode 100644 include/linux/slab_def.h create mode 100644 include/linux/slimbus.h create mode 100644 include/linux/slub_def.h create mode 100644 include/linux/sm501-regs.h create mode 100644 include/linux/sm501.h create mode 100644 include/linux/smc911x.h create mode 100644 include/linux/smc91x.h create mode 100644 include/linux/smp.h create mode 100644 include/linux/smpboot.h create mode 100644 include/linux/smsc911x.h create mode 100644 include/linux/smscphy.h create mode 100644 include/linux/soc/actions/owl-sps.h create mode 100644 include/linux/soc/brcmstb/brcmstb.h create mode 100644 include/linux/soc/dove/pmu.h create mode 100644 include/linux/soc/mediatek/infracfg.h create mode 100644 include/linux/soc/qcom/apr.h create mode 100644 include/linux/soc/qcom/llcc-qcom.h create mode 100644 include/linux/soc/qcom/mdt_loader.h create mode 100644 include/linux/soc/qcom/qmi.h create mode 100644 include/linux/soc/qcom/smd-rpm.h create mode 100644 include/linux/soc/qcom/smem.h create mode 100644 include/linux/soc/qcom/smem_state.h create mode 100644 include/linux/soc/qcom/wcnss_ctrl.h create mode 100644 include/linux/soc/renesas/rcar-rst.h create mode 100644 include/linux/soc/renesas/rcar-sysc.h create mode 100644 include/linux/soc/samsung/exynos-pmu.h create mode 100644 include/linux/soc/samsung/exynos-regs-pmu.h create mode 100644 include/linux/soc/sunxi/sunxi_sram.h create mode 100644 include/linux/soc/ti/knav_dma.h create mode 100644 include/linux/soc/ti/knav_qmss.h create mode 100644 include/linux/soc/ti/ti-msgmgr.h create mode 100644 include/linux/soc/ti/ti_sci_protocol.h create mode 100644 include/linux/sock_diag.h create mode 100644 include/linux/socket.h create mode 100644 include/linux/sonet.h create mode 100644 include/linux/sony-laptop.h create mode 100644 include/linux/sonypi.h create mode 100644 include/linux/sort.h create mode 100644 include/linux/sound.h create mode 100644 include/linux/soundcard.h create mode 100644 include/linux/soundwire/sdw.h create mode 100644 include/linux/soundwire/sdw_intel.h create mode 100644 include/linux/soundwire/sdw_registers.h create mode 100644 include/linux/soundwire/sdw_type.h create mode 100644 include/linux/spi/ad7877.h create mode 100644 include/linux/spi/ads7846.h create mode 100644 include/linux/spi/at73c213.h create mode 100644 include/linux/spi/at86rf230.h create mode 100644 include/linux/spi/cc2520.h create mode 100644 include/linux/spi/corgi_lcd.h create mode 100644 include/linux/spi/ds1305.h create mode 100644 include/linux/spi/eeprom.h create mode 100644 include/linux/spi/flash.h create mode 100644 include/linux/spi/ifx_modem.h create mode 100644 include/linux/spi/l4f00242t03.h create mode 100644 include/linux/spi/libertas_spi.h create mode 100644 include/linux/spi/lms283gf05.h create mode 100644 include/linux/spi/max7301.h create mode 100644 include/linux/spi/mc33880.h create mode 100644 include/linux/spi/mcp23s08.h create mode 100644 include/linux/spi/mmc_spi.h create mode 100644 include/linux/spi/mxs-spi.h create mode 100644 include/linux/spi/pxa2xx_spi.h create mode 100644 include/linux/spi/rspi.h create mode 100644 include/linux/spi/s3c24xx.h create mode 100644 include/linux/spi/sh_hspi.h create mode 100644 include/linux/spi/sh_msiof.h create mode 100644 include/linux/spi/spi-fsl-dspi.h create mode 100644 include/linux/spi/spi-mem.h create mode 100644 include/linux/spi/spi.h create mode 100644 include/linux/spi/spi_bitbang.h create mode 100644 include/linux/spi/spi_gpio.h create mode 100644 include/linux/spi/spi_oc_tiny.h create mode 100644 include/linux/spi/tdo24m.h create mode 100644 include/linux/spi/tle62x0.h create mode 100644 include/linux/spi/xilinx_spi.h create mode 100644 include/linux/spinlock.h create mode 100644 include/linux/spinlock_api_smp.h create mode 100644 include/linux/spinlock_api_up.h create mode 100644 include/linux/spinlock_types.h create mode 100644 include/linux/spinlock_types_up.h create mode 100644 include/linux/spinlock_up.h create mode 100644 include/linux/splice.h create mode 100644 include/linux/spmi.h create mode 100644 include/linux/sram.h create mode 100644 include/linux/srcu.h create mode 100644 include/linux/srcutiny.h create mode 100644 include/linux/srcutree.h create mode 100644 include/linux/ssb/ssb.h create mode 100644 include/linux/ssb/ssb_driver_chipcommon.h create mode 100644 include/linux/ssb/ssb_driver_extif.h create mode 100644 include/linux/ssb/ssb_driver_gige.h create mode 100644 include/linux/ssb/ssb_driver_mips.h create mode 100644 include/linux/ssb/ssb_driver_pci.h create mode 100644 include/linux/ssb/ssb_embedded.h create mode 100644 include/linux/ssb/ssb_regs.h create mode 100644 include/linux/ssbi.h create mode 100644 include/linux/stackdepot.h create mode 100644 include/linux/stackprotector.h create mode 100644 include/linux/stacktrace.h create mode 100644 include/linux/start_kernel.h create mode 100644 include/linux/stat.h create mode 100644 include/linux/statfs.h create mode 100644 include/linux/static_key.h create mode 100644 include/linux/stddef.h create mode 100644 include/linux/stm.h create mode 100644 include/linux/stmmac.h create mode 100644 include/linux/stmp3xxx_rtc_wdt.h create mode 100644 include/linux/stmp_device.h create mode 100644 include/linux/stop_machine.h create mode 100644 include/linux/string.h create mode 100644 include/linux/string_helpers.h create mode 100644 include/linux/stringhash.h create mode 100644 include/linux/stringify.h create mode 100644 include/linux/sudmac.h create mode 100644 include/linux/sungem_phy.h create mode 100644 include/linux/sunrpc/addr.h create mode 100644 include/linux/sunrpc/auth.h create mode 100644 include/linux/sunrpc/auth_gss.h create mode 100644 include/linux/sunrpc/bc_xprt.h create mode 100644 include/linux/sunrpc/cache.h create mode 100644 include/linux/sunrpc/clnt.h create mode 100644 include/linux/sunrpc/debug.h create mode 100644 include/linux/sunrpc/gss_api.h create mode 100644 include/linux/sunrpc/gss_asn1.h create mode 100644 include/linux/sunrpc/gss_err.h create mode 100644 include/linux/sunrpc/gss_krb5.h create mode 100644 include/linux/sunrpc/gss_krb5_enctypes.h create mode 100644 include/linux/sunrpc/metrics.h create mode 100644 include/linux/sunrpc/msg_prot.h create mode 100644 include/linux/sunrpc/rpc_pipe_fs.h create mode 100644 include/linux/sunrpc/rpc_rdma.h create mode 100644 include/linux/sunrpc/sched.h create mode 100644 include/linux/sunrpc/stats.h create mode 100644 include/linux/sunrpc/svc.h create mode 100644 include/linux/sunrpc/svc_rdma.h create mode 100644 include/linux/sunrpc/svc_xprt.h create mode 100644 include/linux/sunrpc/svcauth.h create mode 100644 include/linux/sunrpc/svcauth_gss.h create mode 100644 include/linux/sunrpc/svcsock.h create mode 100644 include/linux/sunrpc/timer.h create mode 100644 include/linux/sunrpc/types.h create mode 100644 include/linux/sunrpc/xdr.h create mode 100644 include/linux/sunrpc/xprt.h create mode 100644 include/linux/sunrpc/xprtmultipath.h create mode 100644 include/linux/sunrpc/xprtrdma.h create mode 100644 include/linux/sunrpc/xprtsock.h create mode 100644 include/linux/sunserialcore.h create mode 100644 include/linux/sunxi-rsb.h create mode 100644 include/linux/superhyway.h create mode 100644 include/linux/suspend.h create mode 100644 include/linux/svga.h create mode 100644 include/linux/sw842.h create mode 100644 include/linux/swab.h create mode 100644 include/linux/swait.h create mode 100644 include/linux/swap.h create mode 100644 include/linux/swap_cgroup.h create mode 100644 include/linux/swap_slots.h create mode 100644 include/linux/swapfile.h create mode 100644 include/linux/swapops.h create mode 100644 include/linux/swiotlb.h create mode 100644 include/linux/switchtec.h create mode 100644 include/linux/sxgbe_platform.h create mode 100644 include/linux/sync_core.h create mode 100644 include/linux/sync_file.h create mode 100644 include/linux/synclink.h create mode 100644 include/linux/sys.h create mode 100644 include/linux/sys_soc.h create mode 100644 include/linux/syscalls.h create mode 100644 include/linux/syscore_ops.h create mode 100644 include/linux/sysctl.h create mode 100644 include/linux/sysfs.h create mode 100644 include/linux/syslog.h create mode 100644 include/linux/sysrq.h create mode 100644 include/linux/sysv_fs.h create mode 100644 include/linux/t10-pi.h create mode 100644 include/linux/task_io_accounting.h create mode 100644 include/linux/task_io_accounting_ops.h create mode 100644 include/linux/task_work.h create mode 100644 include/linux/taskstats_kern.h create mode 100644 include/linux/tboot.h create mode 100644 include/linux/tc.h create mode 100644 include/linux/tca6416_keypad.h create mode 100644 include/linux/tcp.h create mode 100644 include/linux/tee_drv.h create mode 100644 include/linux/textsearch.h create mode 100644 include/linux/textsearch_fsm.h create mode 100644 include/linux/tfrc.h create mode 100644 include/linux/thermal.h create mode 100644 include/linux/thinkpad_acpi.h create mode 100644 include/linux/thread_info.h create mode 100644 include/linux/threads.h create mode 100644 include/linux/thunderbolt.h create mode 100644 include/linux/ti-emif-sram.h create mode 100644 include/linux/ti_wilink_st.h create mode 100644 include/linux/tick.h create mode 100644 include/linux/tifm.h create mode 100644 include/linux/timb_dma.h create mode 100644 include/linux/timb_gpio.h create mode 100644 include/linux/time.h create mode 100644 include/linux/time32.h create mode 100644 include/linux/time64.h create mode 100644 include/linux/timecounter.h create mode 100644 include/linux/timekeeper_internal.h create mode 100644 include/linux/timekeeping.h create mode 100644 include/linux/timekeeping32.h create mode 100644 include/linux/timer.h create mode 100644 include/linux/timerfd.h create mode 100644 include/linux/timeriomem-rng.h create mode 100644 include/linux/timerqueue.h create mode 100644 include/linux/timex.h create mode 100644 include/linux/tnum.h create mode 100644 include/linux/topology.h create mode 100644 include/linux/torture.h create mode 100644 include/linux/toshiba.h create mode 100644 include/linux/tpm.h create mode 100644 include/linux/tpm_command.h create mode 100644 include/linux/tpm_eventlog.h create mode 100644 include/linux/trace.h create mode 100644 include/linux/trace_clock.h create mode 100644 include/linux/trace_events.h create mode 100644 include/linux/trace_seq.h create mode 100644 include/linux/tracefs.h create mode 100644 include/linux/tracehook.h create mode 100644 include/linux/tracepoint-defs.h create mode 100644 include/linux/tracepoint.h create mode 100644 include/linux/transport_class.h create mode 100644 include/linux/ts-nbus.h create mode 100644 include/linux/tsacct_kern.h create mode 100644 include/linux/tty.h create mode 100644 include/linux/tty_driver.h create mode 100644 include/linux/tty_flip.h create mode 100644 include/linux/tty_ldisc.h create mode 100644 include/linux/typecheck.h create mode 100644 include/linux/types.h create mode 100644 include/linux/u64_stats_sync.h create mode 100644 include/linux/uaccess.h create mode 100644 include/linux/ucb1400.h create mode 100644 include/linux/ucs2_string.h create mode 100644 include/linux/udp.h create mode 100644 include/linux/uidgid.h create mode 100644 include/linux/uio.h create mode 100644 include/linux/uio_driver.h create mode 100644 include/linux/ulpi/driver.h create mode 100644 include/linux/ulpi/interface.h create mode 100644 include/linux/ulpi/regs.h create mode 100644 include/linux/umh.h create mode 100644 include/linux/unaligned/access_ok.h create mode 100644 include/linux/unaligned/be_byteshift.h create mode 100644 include/linux/unaligned/be_memmove.h create mode 100644 include/linux/unaligned/be_struct.h create mode 100644 include/linux/unaligned/generic.h create mode 100644 include/linux/unaligned/le_byteshift.h create mode 100644 include/linux/unaligned/le_memmove.h create mode 100644 include/linux/unaligned/le_struct.h create mode 100644 include/linux/unaligned/memmove.h create mode 100644 include/linux/unaligned/packed_struct.h create mode 100644 include/linux/uprobes.h create mode 100644 include/linux/usb.h create mode 100644 include/linux/usb/association.h create mode 100644 include/linux/usb/audio-v2.h create mode 100644 include/linux/usb/audio-v3.h create mode 100644 include/linux/usb/audio.h create mode 100644 include/linux/usb/c67x00.h create mode 100644 include/linux/usb/cdc-wdm.h create mode 100644 include/linux/usb/cdc.h create mode 100644 include/linux/usb/cdc_ncm.h create mode 100644 include/linux/usb/ch9.h create mode 100644 include/linux/usb/chipidea.h create mode 100644 include/linux/usb/composite.h create mode 100644 include/linux/usb/ehci-dbgp.h create mode 100644 include/linux/usb/ehci_def.h create mode 100644 include/linux/usb/ehci_pdriver.h create mode 100644 include/linux/usb/ezusb.h create mode 100644 include/linux/usb/functionfs.h create mode 100644 include/linux/usb/g_hid.h create mode 100644 include/linux/usb/gadget.h create mode 100644 include/linux/usb/gadget_configfs.h create mode 100644 include/linux/usb/gpio_vbus.h create mode 100644 include/linux/usb/hcd.h create mode 100644 include/linux/usb/input.h create mode 100644 include/linux/usb/iowarrior.h create mode 100644 include/linux/usb/irda.h create mode 100644 include/linux/usb/isp116x.h create mode 100644 include/linux/usb/isp1301.h create mode 100644 include/linux/usb/isp1362.h create mode 100644 include/linux/usb/isp1760.h create mode 100644 include/linux/usb/m66592.h create mode 100644 include/linux/usb/musb-ux500.h create mode 100644 include/linux/usb/musb.h create mode 100644 include/linux/usb/net2280.h create mode 100644 include/linux/usb/of.h create mode 100644 include/linux/usb/ohci_pdriver.h create mode 100644 include/linux/usb/otg-fsm.h create mode 100644 include/linux/usb/otg.h create mode 100644 include/linux/usb/pd.h create mode 100644 include/linux/usb/pd_ado.h create mode 100644 include/linux/usb/pd_bdo.h create mode 100644 include/linux/usb/pd_ext_sdb.h create mode 100644 include/linux/usb/pd_vdo.h create mode 100644 include/linux/usb/phy.h create mode 100644 include/linux/usb/phy_companion.h create mode 100644 include/linux/usb/quirks.h create mode 100644 include/linux/usb/r8a66597.h create mode 100644 include/linux/usb/renesas_usbhs.h create mode 100644 include/linux/usb/rndis_host.h create mode 100644 include/linux/usb/role.h create mode 100644 include/linux/usb/samsung_usb_phy.h create mode 100644 include/linux/usb/serial.h create mode 100644 include/linux/usb/sl811.h create mode 100644 include/linux/usb/storage.h create mode 100644 include/linux/usb/tcpm.h create mode 100644 include/linux/usb/tegra_usb_phy.h create mode 100644 include/linux/usb/typec.h create mode 100644 include/linux/usb/typec_altmode.h create mode 100644 include/linux/usb/typec_dp.h create mode 100644 include/linux/usb/typec_mux.h create mode 100644 include/linux/usb/uas.h create mode 100644 include/linux/usb/ulpi.h create mode 100644 include/linux/usb/usb338x.h create mode 100644 include/linux/usb/usb_phy_generic.h create mode 100644 include/linux/usb/usbnet.h create mode 100644 include/linux/usb/wusb-wa.h create mode 100644 include/linux/usb/wusb.h create mode 100644 include/linux/usb/xhci-dbgp.h create mode 100644 include/linux/usb_usual.h create mode 100644 include/linux/usbdevice_fs.h create mode 100644 include/linux/user-return-notifier.h create mode 100644 include/linux/user.h create mode 100644 include/linux/user_namespace.h create mode 100644 include/linux/userfaultfd_k.h create mode 100644 include/linux/util_macros.h create mode 100644 include/linux/uts.h create mode 100644 include/linux/utsname.h create mode 100644 include/linux/uuid.h create mode 100644 include/linux/uwb.h create mode 100644 include/linux/uwb/debug-cmd.h create mode 100644 include/linux/uwb/spec.h create mode 100644 include/linux/uwb/umc.h create mode 100644 include/linux/uwb/whci.h create mode 100644 include/linux/vbox_utils.h create mode 100644 include/linux/verification.h create mode 100644 include/linux/vermagic.h create mode 100644 include/linux/vexpress.h create mode 100644 include/linux/vfio.h create mode 100644 include/linux/vfs.h create mode 100644 include/linux/vga_switcheroo.h create mode 100644 include/linux/vgaarb.h create mode 100644 include/linux/via-core.h create mode 100644 include/linux/via-gpio.h create mode 100644 include/linux/via.h create mode 100644 include/linux/via_i2c.h create mode 100644 include/linux/videodev2.h create mode 100644 include/linux/virtio.h create mode 100644 include/linux/virtio_byteorder.h create mode 100644 include/linux/virtio_caif.h create mode 100644 include/linux/virtio_config.h create mode 100644 include/linux/virtio_console.h create mode 100644 include/linux/virtio_net.h create mode 100644 include/linux/virtio_ring.h create mode 100644 include/linux/virtio_vsock.h create mode 100644 include/linux/visorbus.h create mode 100644 include/linux/vlynq.h create mode 100644 include/linux/vm_event_item.h create mode 100644 include/linux/vm_sockets.h create mode 100644 include/linux/vmacache.h create mode 100644 include/linux/vmalloc.h create mode 100644 include/linux/vme.h create mode 100644 include/linux/vmpressure.h create mode 100644 include/linux/vmstat.h create mode 100644 include/linux/vmw_vmci_api.h create mode 100644 include/linux/vmw_vmci_defs.h create mode 100644 include/linux/vringh.h create mode 100644 include/linux/vt.h create mode 100644 include/linux/vt_buffer.h create mode 100644 include/linux/vt_kern.h create mode 100644 include/linux/vtime.h create mode 100644 include/linux/w1-gpio.h create mode 100644 include/linux/w1.h create mode 100644 include/linux/wait.h create mode 100644 include/linux/wait_bit.h create mode 100644 include/linux/wanrouter.h create mode 100644 include/linux/watchdog.h create mode 100644 include/linux/wimax/debug.h create mode 100644 include/linux/win_minmax.h create mode 100644 include/linux/wireless.h create mode 100644 include/linux/wkup_m3_ipc.h create mode 100644 include/linux/wl12xx.h create mode 100644 include/linux/wm97xx.h create mode 100644 include/linux/wmi.h create mode 100644 include/linux/workqueue.h create mode 100644 include/linux/writeback.h create mode 100644 include/linux/ww_mutex.h create mode 100644 include/linux/xarray.h create mode 100644 include/linux/xattr.h create mode 100644 include/linux/xxhash.h create mode 100644 include/linux/xz.h create mode 100644 include/linux/yam.h create mode 100644 include/linux/z2_battery.h create mode 100644 include/linux/zbud.h create mode 100644 include/linux/zconf.h create mode 100644 include/linux/zlib.h create mode 100644 include/linux/zorro.h create mode 100644 include/linux/zpool.h create mode 100644 include/linux/zsmalloc.h create mode 100644 include/linux/zstd.h create mode 100644 include/linux/zutil.h create mode 100644 include/math-emu/double.h create mode 100644 include/math-emu/op-1.h create mode 100644 include/math-emu/op-2.h create mode 100644 include/math-emu/op-4.h create mode 100644 include/math-emu/op-8.h create mode 100644 include/math-emu/op-common.h create mode 100644 include/math-emu/quad.h create mode 100644 include/math-emu/single.h create mode 100644 include/math-emu/soft-fp.h create mode 100644 include/media/cec-notifier.h create mode 100644 include/media/cec-pin.h create mode 100644 include/media/cec.h create mode 100644 include/media/davinci/ccdc_types.h create mode 100644 include/media/davinci/dm355_ccdc.h create mode 100644 include/media/davinci/dm644x_ccdc.h create mode 100644 include/media/davinci/isif.h create mode 100644 include/media/davinci/vpbe.h create mode 100644 include/media/davinci/vpbe_display.h create mode 100644 include/media/davinci/vpbe_osd.h create mode 100644 include/media/davinci/vpbe_types.h create mode 100644 include/media/davinci/vpbe_venc.h create mode 100644 include/media/davinci/vpfe_capture.h create mode 100644 include/media/davinci/vpfe_types.h create mode 100644 include/media/davinci/vpif_types.h create mode 100644 include/media/davinci/vpss.h create mode 100644 include/media/demux.h create mode 100644 include/media/dmxdev.h create mode 100644 include/media/drv-intf/cx2341x.h create mode 100644 include/media/drv-intf/cx25840.h create mode 100644 include/media/drv-intf/exynos-fimc.h create mode 100644 include/media/drv-intf/msp3400.h create mode 100644 include/media/drv-intf/renesas-ceu.h create mode 100644 include/media/drv-intf/s3c_camif.h create mode 100644 include/media/drv-intf/saa7146.h create mode 100644 include/media/drv-intf/saa7146_vv.h create mode 100644 include/media/drv-intf/sh_mobile_ceu.h create mode 100644 include/media/drv-intf/sh_vou.h create mode 100644 include/media/drv-intf/si476x.h create mode 100644 include/media/drv-intf/soc_mediabus.h create mode 100644 include/media/drv-intf/tea575x.h create mode 100644 include/media/dvb-usb-ids.h create mode 100644 include/media/dvb_ca_en50221.h create mode 100644 include/media/dvb_demux.h create mode 100644 include/media/dvb_frontend.h create mode 100644 include/media/dvb_math.h create mode 100644 include/media/dvb_net.h create mode 100644 include/media/dvb_ringbuffer.h create mode 100644 include/media/dvb_vb2.h create mode 100644 include/media/dvbdev.h create mode 100644 include/media/i2c/ad9389b.h create mode 100644 include/media/i2c/adp1653.h create mode 100644 include/media/i2c/adv7183.h create mode 100644 include/media/i2c/adv7343.h create mode 100644 include/media/i2c/adv7393.h create mode 100644 include/media/i2c/adv7511.h create mode 100644 include/media/i2c/adv7604.h create mode 100644 include/media/i2c/adv7842.h create mode 100644 include/media/i2c/ak881x.h create mode 100644 include/media/i2c/bt819.h create mode 100644 include/media/i2c/cs5345.h create mode 100644 include/media/i2c/cs53l32a.h create mode 100644 include/media/i2c/ir-kbd-i2c.h create mode 100644 include/media/i2c/lm3560.h create mode 100644 include/media/i2c/lm3646.h create mode 100644 include/media/i2c/m52790.h create mode 100644 include/media/i2c/m5mols.h create mode 100644 include/media/i2c/mt9m032.h create mode 100644 include/media/i2c/mt9p031.h create mode 100644 include/media/i2c/mt9t001.h create mode 100644 include/media/i2c/mt9t112.h create mode 100644 include/media/i2c/mt9v011.h create mode 100644 include/media/i2c/mt9v022.h create mode 100644 include/media/i2c/mt9v032.h create mode 100644 include/media/i2c/noon010pc30.h create mode 100644 include/media/i2c/ov2659.h create mode 100644 include/media/i2c/ov7670.h create mode 100644 include/media/i2c/ov772x.h create mode 100644 include/media/i2c/ov9650.h create mode 100644 include/media/i2c/rj54n1cb0c.h create mode 100644 include/media/i2c/s5c73m3.h create mode 100644 include/media/i2c/s5k4ecgx.h create mode 100644 include/media/i2c/s5k6aa.h create mode 100644 include/media/i2c/saa6588.h create mode 100644 include/media/i2c/saa7115.h create mode 100644 include/media/i2c/saa7127.h create mode 100644 include/media/i2c/smiapp.h create mode 100644 include/media/i2c/sr030pc30.h create mode 100644 include/media/i2c/tc358743.h create mode 100644 include/media/i2c/tda1997x.h create mode 100644 include/media/i2c/ths7303.h create mode 100644 include/media/i2c/tvaudio.h create mode 100644 include/media/i2c/tvp514x.h create mode 100644 include/media/i2c/tvp7002.h create mode 100644 include/media/i2c/tw9910.h create mode 100644 include/media/i2c/uda1342.h create mode 100644 include/media/i2c/upd64031a.h create mode 100644 include/media/i2c/upd64083.h create mode 100644 include/media/i2c/wm8775.h create mode 100644 include/media/imx.h create mode 100644 include/media/media-device.h create mode 100644 include/media/media-devnode.h create mode 100644 include/media/media-entity.h create mode 100644 include/media/rc-core.h create mode 100644 include/media/rc-map.h create mode 100644 include/media/rcar-fcp.h create mode 100644 include/media/soc_camera.h create mode 100644 include/media/tpg/v4l2-tpg.h create mode 100644 include/media/tuner-types.h create mode 100644 include/media/tuner.h create mode 100644 include/media/tveeprom.h create mode 100644 include/media/v4l2-async.h create mode 100644 include/media/v4l2-clk.h create mode 100644 include/media/v4l2-common.h create mode 100644 include/media/v4l2-ctrls.h create mode 100644 include/media/v4l2-dev.h create mode 100644 include/media/v4l2-device.h create mode 100644 include/media/v4l2-dv-timings.h create mode 100644 include/media/v4l2-event.h create mode 100644 include/media/v4l2-fh.h create mode 100644 include/media/v4l2-flash-led-class.h create mode 100644 include/media/v4l2-fwnode.h create mode 100644 include/media/v4l2-image-sizes.h create mode 100644 include/media/v4l2-ioctl.h create mode 100644 include/media/v4l2-mc.h create mode 100644 include/media/v4l2-mediabus.h create mode 100644 include/media/v4l2-mem2mem.h create mode 100644 include/media/v4l2-rect.h create mode 100644 include/media/v4l2-subdev.h create mode 100644 include/media/videobuf-core.h create mode 100644 include/media/videobuf-dma-contig.h create mode 100644 include/media/videobuf-dma-sg.h create mode 100644 include/media/videobuf-vmalloc.h create mode 100644 include/media/videobuf2-core.h create mode 100644 include/media/videobuf2-dma-contig.h create mode 100644 include/media/videobuf2-dma-sg.h create mode 100644 include/media/videobuf2-dvb.h create mode 100644 include/media/videobuf2-memops.h create mode 100644 include/media/videobuf2-v4l2.h create mode 100644 include/media/videobuf2-vmalloc.h create mode 100644 include/media/vsp1.h create mode 100644 include/memory/jedec_ddr.h create mode 100644 include/misc/altera.h create mode 100644 include/misc/charlcd.h create mode 100644 include/misc/cxl-base.h create mode 100644 include/misc/cxl.h create mode 100644 include/misc/cxllib.h create mode 100644 include/misc/ocxl-config.h create mode 100644 include/misc/ocxl.h create mode 100644 include/net/6lowpan.h create mode 100644 include/net/9p/9p.h create mode 100644 include/net/9p/client.h create mode 100644 include/net/9p/transport.h create mode 100644 include/net/Space.h create mode 100644 include/net/act_api.h create mode 100644 include/net/addrconf.h create mode 100644 include/net/af_ieee802154.h create mode 100644 include/net/af_rxrpc.h create mode 100644 include/net/af_unix.h create mode 100644 include/net/af_vsock.h create mode 100644 include/net/ah.h create mode 100644 include/net/arp.h create mode 100644 include/net/atmclip.h create mode 100644 include/net/ax25.h create mode 100644 include/net/ax88796.h create mode 100644 include/net/bluetooth/bluetooth.h create mode 100644 include/net/bluetooth/hci.h create mode 100644 include/net/bluetooth/hci_core.h create mode 100644 include/net/bluetooth/hci_mon.h create mode 100644 include/net/bluetooth/hci_sock.h create mode 100644 include/net/bluetooth/l2cap.h create mode 100644 include/net/bluetooth/mgmt.h create mode 100644 include/net/bluetooth/rfcomm.h create mode 100644 include/net/bluetooth/sco.h create mode 100644 include/net/bond_3ad.h create mode 100644 include/net/bond_alb.h create mode 100644 include/net/bond_options.h create mode 100644 include/net/bonding.h create mode 100644 include/net/busy_poll.h create mode 100644 include/net/caif/caif_dev.h create mode 100644 include/net/caif/caif_device.h create mode 100644 include/net/caif/caif_hsi.h create mode 100644 include/net/caif/caif_layer.h create mode 100644 include/net/caif/caif_spi.h create mode 100644 include/net/caif/cfcnfg.h create mode 100644 include/net/caif/cfctrl.h create mode 100644 include/net/caif/cffrml.h create mode 100644 include/net/caif/cfmuxl.h create mode 100644 include/net/caif/cfpkt.h create mode 100644 include/net/caif/cfserl.h create mode 100644 include/net/caif/cfsrvl.h create mode 100644 include/net/calipso.h create mode 100644 include/net/cfg80211-wext.h create mode 100644 include/net/cfg80211.h create mode 100644 include/net/cfg802154.h create mode 100644 include/net/checksum.h create mode 100644 include/net/cipso_ipv4.h create mode 100644 include/net/cls_cgroup.h create mode 100644 include/net/codel.h create mode 100644 include/net/codel_impl.h create mode 100644 include/net/codel_qdisc.h create mode 100644 include/net/compat.h create mode 100644 include/net/datalink.h create mode 100644 include/net/dcbevent.h create mode 100644 include/net/dcbnl.h create mode 100644 include/net/devlink.h create mode 100644 include/net/dn.h create mode 100644 include/net/dn_dev.h create mode 100644 include/net/dn_fib.h create mode 100644 include/net/dn_neigh.h create mode 100644 include/net/dn_nsp.h create mode 100644 include/net/dn_route.h create mode 100644 include/net/dsa.h create mode 100644 include/net/dsfield.h create mode 100644 include/net/dst.h create mode 100644 include/net/dst_cache.h create mode 100644 include/net/dst_metadata.h create mode 100644 include/net/dst_ops.h create mode 100644 include/net/erspan.h create mode 100644 include/net/esp.h create mode 100644 include/net/ethoc.h create mode 100644 include/net/failover.h create mode 100644 include/net/fib_notifier.h create mode 100644 include/net/fib_rules.h create mode 100644 include/net/firewire.h create mode 100644 include/net/flow.h create mode 100644 include/net/flow_dissector.h create mode 100644 include/net/fou.h create mode 100644 include/net/fq.h create mode 100644 include/net/fq_impl.h create mode 100644 include/net/garp.h create mode 100644 include/net/gen_stats.h create mode 100644 include/net/genetlink.h create mode 100644 include/net/geneve.h create mode 100644 include/net/gre.h create mode 100644 include/net/gro_cells.h create mode 100644 include/net/gtp.h create mode 100644 include/net/gue.h create mode 100644 include/net/hwbm.h create mode 100644 include/net/icmp.h create mode 100644 include/net/ieee80211_radiotap.h create mode 100644 include/net/ieee802154_netdev.h create mode 100644 include/net/if_inet6.h create mode 100644 include/net/ife.h create mode 100644 include/net/ila.h create mode 100644 include/net/inet6_connection_sock.h create mode 100644 include/net/inet6_hashtables.h create mode 100644 include/net/inet_common.h create mode 100644 include/net/inet_connection_sock.h create mode 100644 include/net/inet_ecn.h create mode 100644 include/net/inet_frag.h create mode 100644 include/net/inet_hashtables.h create mode 100644 include/net/inet_sock.h create mode 100644 include/net/inet_timewait_sock.h create mode 100644 include/net/inetpeer.h create mode 100644 include/net/ip.h create mode 100644 include/net/ip6_checksum.h create mode 100644 include/net/ip6_fib.h create mode 100644 include/net/ip6_route.h create mode 100644 include/net/ip6_tunnel.h create mode 100644 include/net/ip_fib.h create mode 100644 include/net/ip_tunnels.h create mode 100644 include/net/ip_vs.h create mode 100644 include/net/ipcomp.h create mode 100644 include/net/ipconfig.h create mode 100644 include/net/ipv6.h create mode 100644 include/net/ipv6_frag.h create mode 100644 include/net/ipx.h create mode 100644 include/net/iucv/af_iucv.h create mode 100644 include/net/iucv/iucv.h create mode 100644 include/net/iw_handler.h create mode 100644 include/net/kcm.h create mode 100644 include/net/l3mdev.h create mode 100644 include/net/lag.h create mode 100644 include/net/lapb.h create mode 100644 include/net/lib80211.h create mode 100644 include/net/llc.h create mode 100644 include/net/llc_c_ac.h create mode 100644 include/net/llc_c_ev.h create mode 100644 include/net/llc_c_st.h create mode 100644 include/net/llc_conn.h create mode 100644 include/net/llc_if.h create mode 100644 include/net/llc_pdu.h create mode 100644 include/net/llc_s_ac.h create mode 100644 include/net/llc_s_ev.h create mode 100644 include/net/llc_s_st.h create mode 100644 include/net/llc_sap.h create mode 100644 include/net/lwtunnel.h create mode 100644 include/net/mac80211.h create mode 100644 include/net/mac802154.h create mode 100644 include/net/mip6.h create mode 100644 include/net/mld.h create mode 100644 include/net/mpls.h create mode 100644 include/net/mpls_iptunnel.h create mode 100644 include/net/mrp.h create mode 100644 include/net/ncsi.h create mode 100644 include/net/ndisc.h create mode 100644 include/net/neighbour.h create mode 100644 include/net/net_failover.h create mode 100644 include/net/net_namespace.h create mode 100644 include/net/net_ratelimit.h create mode 100644 include/net/netevent.h create mode 100644 include/net/netfilter/br_netfilter.h create mode 100644 include/net/netfilter/ipv4/nf_conntrack_ipv4.h create mode 100644 include/net/netfilter/ipv4/nf_defrag_ipv4.h create mode 100644 include/net/netfilter/ipv4/nf_dup_ipv4.h create mode 100644 include/net/netfilter/ipv4/nf_nat_masquerade.h create mode 100644 include/net/netfilter/ipv4/nf_reject.h create mode 100644 include/net/netfilter/ipv6/nf_conntrack_icmpv6.h create mode 100644 include/net/netfilter/ipv6/nf_conntrack_ipv6.h create mode 100644 include/net/netfilter/ipv6/nf_defrag_ipv6.h create mode 100644 include/net/netfilter/ipv6/nf_dup_ipv6.h create mode 100644 include/net/netfilter/ipv6/nf_nat_masquerade.h create mode 100644 include/net/netfilter/ipv6/nf_reject.h create mode 100644 include/net/netfilter/nf_conntrack.h create mode 100644 include/net/netfilter/nf_conntrack_acct.h create mode 100644 include/net/netfilter/nf_conntrack_core.h create mode 100644 include/net/netfilter/nf_conntrack_count.h create mode 100644 include/net/netfilter/nf_conntrack_ecache.h create mode 100644 include/net/netfilter/nf_conntrack_expect.h create mode 100644 include/net/netfilter/nf_conntrack_extend.h create mode 100644 include/net/netfilter/nf_conntrack_helper.h create mode 100644 include/net/netfilter/nf_conntrack_l4proto.h create mode 100644 include/net/netfilter/nf_conntrack_labels.h create mode 100644 include/net/netfilter/nf_conntrack_seqadj.h create mode 100644 include/net/netfilter/nf_conntrack_synproxy.h create mode 100644 include/net/netfilter/nf_conntrack_timeout.h create mode 100644 include/net/netfilter/nf_conntrack_timestamp.h create mode 100644 include/net/netfilter/nf_conntrack_tuple.h create mode 100644 include/net/netfilter/nf_conntrack_zones.h create mode 100644 include/net/netfilter/nf_dup_netdev.h create mode 100644 include/net/netfilter/nf_flow_table.h create mode 100644 include/net/netfilter/nf_log.h create mode 100644 include/net/netfilter/nf_nat.h create mode 100644 include/net/netfilter/nf_nat_core.h create mode 100644 include/net/netfilter/nf_nat_helper.h create mode 100644 include/net/netfilter/nf_nat_l3proto.h create mode 100644 include/net/netfilter/nf_nat_l4proto.h create mode 100644 include/net/netfilter/nf_nat_redirect.h create mode 100644 include/net/netfilter/nf_queue.h create mode 100644 include/net/netfilter/nf_socket.h create mode 100644 include/net/netfilter/nf_tables.h create mode 100644 include/net/netfilter/nf_tables_core.h create mode 100644 include/net/netfilter/nf_tables_ipv4.h create mode 100644 include/net/netfilter/nf_tables_ipv6.h create mode 100644 include/net/netfilter/nf_tproxy.h create mode 100644 include/net/netfilter/nfnetlink_log.h create mode 100644 include/net/netfilter/nft_fib.h create mode 100644 include/net/netfilter/nft_masq.h create mode 100644 include/net/netfilter/nft_redir.h create mode 100644 include/net/netfilter/nft_reject.h create mode 100644 include/net/netfilter/xt_rateest.h create mode 100644 include/net/netlabel.h create mode 100644 include/net/netlink.h create mode 100644 include/net/netns/can.h create mode 100644 include/net/netns/conntrack.h create mode 100644 include/net/netns/core.h create mode 100644 include/net/netns/dccp.h create mode 100644 include/net/netns/generic.h create mode 100644 include/net/netns/hash.h create mode 100644 include/net/netns/ieee802154_6lowpan.h create mode 100644 include/net/netns/ipv4.h create mode 100644 include/net/netns/ipv6.h create mode 100644 include/net/netns/mib.h create mode 100644 include/net/netns/mpls.h create mode 100644 include/net/netns/netfilter.h create mode 100644 include/net/netns/nftables.h create mode 100644 include/net/netns/packet.h create mode 100644 include/net/netns/sctp.h create mode 100644 include/net/netns/unix.h create mode 100644 include/net/netns/x_tables.h create mode 100644 include/net/netns/xfrm.h create mode 100644 include/net/netprio_cgroup.h create mode 100644 include/net/netrom.h create mode 100644 include/net/nexthop.h create mode 100644 include/net/nfc/digital.h create mode 100644 include/net/nfc/hci.h create mode 100644 include/net/nfc/llc.h create mode 100644 include/net/nfc/nci.h create mode 100644 include/net/nfc/nci_core.h create mode 100644 include/net/nfc/nfc.h create mode 100644 include/net/nl802154.h create mode 100644 include/net/nsh.h create mode 100644 include/net/p8022.h create mode 100644 include/net/page_pool.h create mode 100644 include/net/phonet/gprs.h create mode 100644 include/net/phonet/pep.h create mode 100644 include/net/phonet/phonet.h create mode 100644 include/net/phonet/pn_dev.h create mode 100644 include/net/ping.h create mode 100644 include/net/pkt_cls.h create mode 100644 include/net/pkt_sched.h create mode 100644 include/net/pptp.h create mode 100644 include/net/protocol.h create mode 100644 include/net/psample.h create mode 100644 include/net/psnap.h create mode 100644 include/net/raw.h create mode 100644 include/net/rawv6.h create mode 100644 include/net/red.h create mode 100644 include/net/regulatory.h create mode 100644 include/net/request_sock.h create mode 100644 include/net/rose.h create mode 100644 include/net/route.h create mode 100644 include/net/rsi_91x.h create mode 100644 include/net/rtnetlink.h create mode 100644 include/net/sch_generic.h create mode 100644 include/net/scm.h create mode 100644 include/net/sctp/auth.h create mode 100644 include/net/sctp/checksum.h create mode 100644 include/net/sctp/command.h create mode 100644 include/net/sctp/constants.h create mode 100644 include/net/sctp/sctp.h create mode 100644 include/net/sctp/sm.h create mode 100644 include/net/sctp/stream_interleave.h create mode 100644 include/net/sctp/stream_sched.h create mode 100644 include/net/sctp/structs.h create mode 100644 include/net/sctp/tsnmap.h create mode 100644 include/net/sctp/ulpevent.h create mode 100644 include/net/sctp/ulpqueue.h create mode 100644 include/net/secure_seq.h create mode 100644 include/net/seg6.h create mode 100644 include/net/seg6_hmac.h create mode 100644 include/net/seg6_local.h create mode 100644 include/net/slhc_vj.h create mode 100644 include/net/smc.h create mode 100644 include/net/snmp.h create mode 100644 include/net/sock.h create mode 100644 include/net/sock_reuseport.h create mode 100644 include/net/stp.h create mode 100644 include/net/strparser.h create mode 100644 include/net/switchdev.h create mode 100644 include/net/tc_act/tc_bpf.h create mode 100644 include/net/tc_act/tc_connmark.h create mode 100644 include/net/tc_act/tc_csum.h create mode 100644 include/net/tc_act/tc_defact.h create mode 100644 include/net/tc_act/tc_gact.h create mode 100644 include/net/tc_act/tc_ife.h create mode 100644 include/net/tc_act/tc_ipt.h create mode 100644 include/net/tc_act/tc_mirred.h create mode 100644 include/net/tc_act/tc_nat.h create mode 100644 include/net/tc_act/tc_pedit.h create mode 100644 include/net/tc_act/tc_sample.h create mode 100644 include/net/tc_act/tc_skbedit.h create mode 100644 include/net/tc_act/tc_skbmod.h create mode 100644 include/net/tc_act/tc_tunnel_key.h create mode 100644 include/net/tc_act/tc_vlan.h create mode 100644 include/net/tcp.h create mode 100644 include/net/tcp_states.h create mode 100644 include/net/timewait_sock.h create mode 100644 include/net/tipc.h create mode 100644 include/net/tls.h create mode 100644 include/net/transp_v6.h create mode 100644 include/net/tso.h create mode 100644 include/net/tun_proto.h create mode 100644 include/net/udp.h create mode 100644 include/net/udp_tunnel.h create mode 100644 include/net/udplite.h create mode 100644 include/net/vsock_addr.h create mode 100644 include/net/vxlan.h create mode 100644 include/net/wext.h create mode 100644 include/net/wimax.h create mode 100644 include/net/x25.h create mode 100644 include/net/x25device.h create mode 100644 include/net/xdp.h create mode 100644 include/net/xdp_sock.h create mode 100644 include/net/xfrm.h create mode 100644 include/pcmcia/ciscode.h create mode 100644 include/pcmcia/cisreg.h create mode 100644 include/pcmcia/cistpl.h create mode 100644 include/pcmcia/device_id.h create mode 100644 include/pcmcia/ds.h create mode 100644 include/pcmcia/ss.h create mode 100644 include/ras/ras_event.h create mode 100644 include/rdma/ib.h create mode 100644 include/rdma/ib_addr.h create mode 100644 include/rdma/ib_cache.h create mode 100644 include/rdma/ib_cm.h create mode 100644 include/rdma/ib_fmr_pool.h create mode 100644 include/rdma/ib_hdrs.h create mode 100644 include/rdma/ib_mad.h create mode 100644 include/rdma/ib_marshall.h create mode 100644 include/rdma/ib_pack.h create mode 100644 include/rdma/ib_pma.h create mode 100644 include/rdma/ib_sa.h create mode 100644 include/rdma/ib_smi.h create mode 100644 include/rdma/ib_umem.h create mode 100644 include/rdma/ib_umem_odp.h create mode 100644 include/rdma/ib_verbs.h create mode 100644 include/rdma/iw_cm.h create mode 100644 include/rdma/iw_portmap.h create mode 100644 include/rdma/mr_pool.h create mode 100644 include/rdma/opa_addr.h create mode 100644 include/rdma/opa_port_info.h create mode 100644 include/rdma/opa_smi.h create mode 100644 include/rdma/opa_vnic.h create mode 100644 include/rdma/rdma_cm.h create mode 100644 include/rdma/rdma_cm_ib.h create mode 100644 include/rdma/rdma_netlink.h create mode 100644 include/rdma/rdma_vt.h create mode 100644 include/rdma/rdmavt_cq.h create mode 100644 include/rdma/rdmavt_mr.h create mode 100644 include/rdma/rdmavt_qp.h create mode 100644 include/rdma/restrack.h create mode 100644 include/rdma/rw.h create mode 100644 include/rdma/uverbs_ioctl.h create mode 100644 include/rdma/uverbs_named_ioctl.h create mode 100644 include/rdma/uverbs_std_types.h create mode 100644 include/rdma/uverbs_types.h create mode 100644 include/scsi/fc/fc_encaps.h create mode 100644 include/scsi/fc/fc_fc2.h create mode 100644 include/scsi/fc/fc_fcoe.h create mode 100644 include/scsi/fc/fc_fcp.h create mode 100644 include/scsi/fc/fc_fip.h create mode 100644 include/scsi/fc/fc_ms.h create mode 100644 include/scsi/fc_encode.h create mode 100644 include/scsi/fc_frame.h create mode 100644 include/scsi/fcoe_sysfs.h create mode 100644 include/scsi/iscsi_if.h create mode 100644 include/scsi/iscsi_proto.h create mode 100644 include/scsi/iser.h create mode 100644 include/scsi/libfc.h create mode 100644 include/scsi/libfcoe.h create mode 100644 include/scsi/libiscsi.h create mode 100644 include/scsi/libiscsi_tcp.h create mode 100644 include/scsi/libsas.h create mode 100644 include/scsi/osd_attributes.h create mode 100644 include/scsi/osd_initiator.h create mode 100644 include/scsi/osd_ore.h create mode 100644 include/scsi/osd_protocol.h create mode 100644 include/scsi/osd_sec.h create mode 100644 include/scsi/osd_sense.h create mode 100644 include/scsi/osd_types.h create mode 100644 include/scsi/sas.h create mode 100644 include/scsi/sas_ata.h create mode 100644 include/scsi/scsi.h create mode 100644 include/scsi/scsi_bsg_iscsi.h create mode 100644 include/scsi/scsi_cmnd.h create mode 100644 include/scsi/scsi_common.h create mode 100644 include/scsi/scsi_dbg.h create mode 100644 include/scsi/scsi_device.h create mode 100644 include/scsi/scsi_devinfo.h create mode 100644 include/scsi/scsi_dh.h create mode 100644 include/scsi/scsi_driver.h create mode 100644 include/scsi/scsi_eh.h create mode 100644 include/scsi/scsi_host.h create mode 100644 include/scsi/scsi_ioctl.h create mode 100644 include/scsi/scsi_proto.h create mode 100644 include/scsi/scsi_request.h create mode 100644 include/scsi/scsi_tcq.h create mode 100644 include/scsi/scsi_transport.h create mode 100644 include/scsi/scsi_transport_fc.h create mode 100644 include/scsi/scsi_transport_iscsi.h create mode 100644 include/scsi/scsi_transport_sas.h create mode 100644 include/scsi/scsi_transport_spi.h create mode 100644 include/scsi/scsi_transport_srp.h create mode 100644 include/scsi/scsicam.h create mode 100644 include/scsi/sg.h create mode 100644 include/scsi/srp.h create mode 100644 include/scsi/viosrp.h create mode 100644 include/soc/arc/aux.h create mode 100644 include/soc/arc/mcip.h create mode 100644 include/soc/arc/timers.h create mode 100644 include/soc/at91/at91sam9_ddrsdr.h create mode 100644 include/soc/at91/at91sam9_sdramc.h create mode 100644 include/soc/at91/atmel-secumod.h create mode 100644 include/soc/at91/atmel-sfr.h create mode 100644 include/soc/bcm2835/raspberrypi-firmware.h create mode 100644 include/soc/brcmstb/common.h create mode 100644 include/soc/fsl/bman.h create mode 100644 include/soc/fsl/dpaa2-fd.h create mode 100644 include/soc/fsl/dpaa2-global.h create mode 100644 include/soc/fsl/dpaa2-io.h create mode 100644 include/soc/fsl/qe/immap_qe.h create mode 100644 include/soc/fsl/qe/qe.h create mode 100644 include/soc/fsl/qe/qe_ic.h create mode 100644 include/soc/fsl/qe/qe_tdm.h create mode 100644 include/soc/fsl/qe/ucc.h create mode 100644 include/soc/fsl/qe/ucc_fast.h create mode 100644 include/soc/fsl/qe/ucc_slow.h create mode 100644 include/soc/fsl/qman.h create mode 100644 include/soc/imx/cpuidle.h create mode 100644 include/soc/imx/revision.h create mode 100644 include/soc/imx/timer.h create mode 100644 include/soc/mediatek/smi.h create mode 100644 include/soc/nps/common.h create mode 100644 include/soc/nps/mtm.h create mode 100644 include/soc/qcom/cmd-db.h create mode 100644 include/soc/qcom/rpmh.h create mode 100644 include/soc/qcom/tcs.h create mode 100644 include/soc/rockchip/rockchip_sip.h create mode 100644 include/soc/sa1100/pwer.h create mode 100644 include/soc/tegra/ahb.h create mode 100644 include/soc/tegra/bpmp-abi.h create mode 100644 include/soc/tegra/bpmp.h create mode 100644 include/soc/tegra/common.h create mode 100644 include/soc/tegra/cpuidle.h create mode 100644 include/soc/tegra/emc.h create mode 100644 include/soc/tegra/flowctrl.h create mode 100644 include/soc/tegra/fuse.h create mode 100644 include/soc/tegra/ivc.h create mode 100644 include/soc/tegra/mc.h create mode 100644 include/soc/tegra/pm.h create mode 100644 include/soc/tegra/pmc.h create mode 100644 include/sound/ac97/codec.h create mode 100644 include/sound/ac97/compat.h create mode 100644 include/sound/ac97/controller.h create mode 100644 include/sound/ac97/regs.h create mode 100644 include/sound/ac97_codec.h create mode 100644 include/sound/aci.h create mode 100644 include/sound/ad1816a.h create mode 100644 include/sound/ad1843.h create mode 100644 include/sound/adau1373.h create mode 100644 include/sound/aess.h create mode 100644 include/sound/ak4113.h create mode 100644 include/sound/ak4114.h create mode 100644 include/sound/ak4117.h create mode 100644 include/sound/ak4531_codec.h create mode 100644 include/sound/ak4641.h create mode 100644 include/sound/ak4xxx-adda.h create mode 100644 include/sound/alc5623.h create mode 100644 include/sound/asequencer.h create mode 100644 include/sound/asound.h create mode 100644 include/sound/asoundef.h create mode 100644 include/sound/compress_driver.h create mode 100644 include/sound/control.h create mode 100644 include/sound/core.h create mode 100644 include/sound/cs35l33.h create mode 100644 include/sound/cs35l34.h create mode 100644 include/sound/cs35l35.h create mode 100644 include/sound/cs4231-regs.h create mode 100644 include/sound/cs4271.h create mode 100644 include/sound/cs42l52.h create mode 100644 include/sound/cs42l56.h create mode 100644 include/sound/cs42l73.h create mode 100644 include/sound/cs8403.h create mode 100644 include/sound/cs8427.h create mode 100644 include/sound/da7213.h create mode 100644 include/sound/da7218.h create mode 100644 include/sound/da7219-aad.h create mode 100644 include/sound/da7219.h create mode 100644 include/sound/da9055.h create mode 100644 include/sound/designware_i2s.h create mode 100644 include/sound/dmaengine_pcm.h create mode 100644 include/sound/emu10k1.h create mode 100644 include/sound/emu10k1_synth.h create mode 100644 include/sound/emu8000.h create mode 100644 include/sound/emu8000_reg.h create mode 100644 include/sound/emux_legacy.h create mode 100644 include/sound/emux_synth.h create mode 100644 include/sound/es1688.h create mode 100644 include/sound/gus.h create mode 100644 include/sound/hda_chmap.h create mode 100644 include/sound/hda_component.h create mode 100644 include/sound/hda_hwdep.h create mode 100644 include/sound/hda_i915.h create mode 100644 include/sound/hda_register.h create mode 100644 include/sound/hda_regmap.h create mode 100644 include/sound/hda_verbs.h create mode 100644 include/sound/hdaudio.h create mode 100644 include/sound/hdaudio_ext.h create mode 100644 include/sound/hdmi-codec.h create mode 100644 include/sound/hwdep.h create mode 100644 include/sound/i2c.h create mode 100644 include/sound/info.h create mode 100644 include/sound/initval.h create mode 100644 include/sound/jack.h create mode 100644 include/sound/l3.h create mode 100644 include/sound/max9768.h create mode 100644 include/sound/max98088.h create mode 100644 include/sound/max98090.h create mode 100644 include/sound/max98095.h create mode 100644 include/sound/memalloc.h create mode 100644 include/sound/minors.h create mode 100644 include/sound/mixer_oss.h create mode 100644 include/sound/mpu401.h create mode 100644 include/sound/omap-hdmi-audio.h create mode 100644 include/sound/opl3.h create mode 100644 include/sound/opl4.h create mode 100644 include/sound/pcm-indirect.h create mode 100644 include/sound/pcm.h create mode 100644 include/sound/pcm_drm_eld.h create mode 100644 include/sound/pcm_iec958.h create mode 100644 include/sound/pcm_oss.h create mode 100644 include/sound/pcm_params.h create mode 100644 include/sound/pt2258.h create mode 100644 include/sound/pxa2xx-lib.h create mode 100644 include/sound/rawmidi.h create mode 100644 include/sound/rt286.h create mode 100644 include/sound/rt298.h create mode 100644 include/sound/rt5514.h create mode 100644 include/sound/rt5645.h create mode 100644 include/sound/rt5659.h create mode 100644 include/sound/rt5660.h create mode 100644 include/sound/rt5663.h create mode 100644 include/sound/rt5665.h create mode 100644 include/sound/rt5668.h create mode 100644 include/sound/rt5670.h create mode 100644 include/sound/rt5682.h create mode 100644 include/sound/s3c24xx_uda134x.h create mode 100644 include/sound/sb.h create mode 100644 include/sound/sb16_csp.h create mode 100644 include/sound/seq_device.h create mode 100644 include/sound/seq_kernel.h create mode 100644 include/sound/seq_midi_emul.h create mode 100644 include/sound/seq_midi_event.h create mode 100644 include/sound/seq_oss.h create mode 100644 include/sound/seq_oss_legacy.h create mode 100644 include/sound/seq_virmidi.h create mode 100644 include/sound/sh_dac_audio.h create mode 100644 include/sound/sh_fsi.h create mode 100644 include/sound/simple_card.h create mode 100644 include/sound/simple_card_utils.h create mode 100644 include/sound/snd_wavefront.h create mode 100644 include/sound/soc-acpi-intel-match.h create mode 100644 include/sound/soc-acpi.h create mode 100644 include/sound/soc-dai.h create mode 100644 include/sound/soc-dapm.h create mode 100644 include/sound/soc-dpcm.h create mode 100644 include/sound/soc-topology.h create mode 100644 include/sound/soc.h create mode 100644 include/sound/soundfont.h create mode 100644 include/sound/spear_dma.h create mode 100644 include/sound/spear_spdif.h create mode 100644 include/sound/sta32x.h create mode 100644 include/sound/sta350.h create mode 100644 include/sound/tas2552-plat.h create mode 100644 include/sound/tas5086.h create mode 100644 include/sound/tea6330t.h create mode 100644 include/sound/timer.h create mode 100644 include/sound/tlv.h create mode 100644 include/sound/tlv320aic32x4.h create mode 100644 include/sound/tlv320aic3x.h create mode 100644 include/sound/tlv320dac33-plat.h create mode 100644 include/sound/tpa6130a2-plat.h create mode 100644 include/sound/uda134x.h create mode 100644 include/sound/uda1380.h create mode 100644 include/sound/util_mem.h create mode 100644 include/sound/vx_core.h create mode 100644 include/sound/wavefront.h create mode 100644 include/sound/wm0010.h create mode 100644 include/sound/wm1250-ev1.h create mode 100644 include/sound/wm2000.h create mode 100644 include/sound/wm2200.h create mode 100644 include/sound/wm5100.h create mode 100644 include/sound/wm8903.h create mode 100644 include/sound/wm8904.h create mode 100644 include/sound/wm8955.h create mode 100644 include/sound/wm8960.h create mode 100644 include/sound/wm8962.h create mode 100644 include/sound/wm8993.h create mode 100644 include/sound/wm8996.h create mode 100644 include/sound/wm9081.h create mode 100644 include/sound/wm9090.h create mode 100644 include/sound/wss.h create mode 100644 include/target/iscsi/iscsi_target_core.h create mode 100644 include/target/iscsi/iscsi_target_stat.h create mode 100644 include/target/iscsi/iscsi_transport.h create mode 100644 include/target/target_core_backend.h create mode 100644 include/target/target_core_base.h create mode 100644 include/target/target_core_fabric.h create mode 100644 include/trace/bpf_probe.h create mode 100644 include/trace/define_trace.h create mode 100644 include/trace/events/9p.h create mode 100644 include/trace/events/afs.h create mode 100644 include/trace/events/alarmtimer.h create mode 100644 include/trace/events/asoc.h create mode 100644 include/trace/events/bcache.h create mode 100644 include/trace/events/block.h create mode 100644 include/trace/events/bridge.h create mode 100644 include/trace/events/btrfs.h create mode 100644 include/trace/events/cachefiles.h create mode 100644 include/trace/events/cgroup.h create mode 100644 include/trace/events/clk.h create mode 100644 include/trace/events/cma.h create mode 100644 include/trace/events/compaction.h create mode 100644 include/trace/events/context_tracking.h create mode 100644 include/trace/events/cpuhp.h create mode 100644 include/trace/events/devlink.h create mode 100644 include/trace/events/dma_fence.h create mode 100644 include/trace/events/ext4.h create mode 100644 include/trace/events/f2fs.h create mode 100644 include/trace/events/fib.h create mode 100644 include/trace/events/fib6.h create mode 100644 include/trace/events/filelock.h create mode 100644 include/trace/events/filemap.h create mode 100644 include/trace/events/fs_dax.h create mode 100644 include/trace/events/fscache.h create mode 100644 include/trace/events/fsi.h create mode 100644 include/trace/events/fsi_master_ast_cf.h create mode 100644 include/trace/events/fsi_master_gpio.h create mode 100644 include/trace/events/gpio.h create mode 100644 include/trace/events/host1x.h create mode 100644 include/trace/events/hswadsp.h create mode 100644 include/trace/events/huge_memory.h create mode 100644 include/trace/events/i2c.h create mode 100644 include/trace/events/initcall.h create mode 100644 include/trace/events/intel-sst.h create mode 100644 include/trace/events/intel_ish.h create mode 100644 include/trace/events/iommu.h create mode 100644 include/trace/events/ipi.h create mode 100644 include/trace/events/irq.h create mode 100644 include/trace/events/irq_matrix.h create mode 100644 include/trace/events/jbd2.h create mode 100644 include/trace/events/kmem.h create mode 100644 include/trace/events/kvm.h create mode 100644 include/trace/events/libata.h create mode 100644 include/trace/events/lock.h create mode 100644 include/trace/events/mce.h create mode 100644 include/trace/events/mdio.h create mode 100644 include/trace/events/migrate.h create mode 100644 include/trace/events/mmc.h create mode 100644 include/trace/events/mmflags.h create mode 100644 include/trace/events/module.h create mode 100644 include/trace/events/napi.h create mode 100644 include/trace/events/net.h create mode 100644 include/trace/events/net_probe_common.h create mode 100644 include/trace/events/nilfs2.h create mode 100644 include/trace/events/nmi.h create mode 100644 include/trace/events/oom.h create mode 100644 include/trace/events/page_isolation.h create mode 100644 include/trace/events/page_ref.h create mode 100644 include/trace/events/pagemap.h create mode 100644 include/trace/events/percpu.h create mode 100644 include/trace/events/power.h create mode 100644 include/trace/events/power_cpu_migrate.h create mode 100644 include/trace/events/preemptirq.h create mode 100644 include/trace/events/printk.h create mode 100644 include/trace/events/qdisc.h create mode 100644 include/trace/events/rcu.h create mode 100644 include/trace/events/rdma.h create mode 100644 include/trace/events/regulator.h create mode 100644 include/trace/events/rpcrdma.h create mode 100644 include/trace/events/rpm.h create mode 100644 include/trace/events/rseq.h create mode 100644 include/trace/events/rtc.h create mode 100644 include/trace/events/rxrpc.h create mode 100644 include/trace/events/sched.h create mode 100644 include/trace/events/scsi.h create mode 100644 include/trace/events/sctp.h create mode 100644 include/trace/events/signal.h create mode 100644 include/trace/events/siox.h create mode 100644 include/trace/events/skb.h create mode 100644 include/trace/events/smbus.h create mode 100644 include/trace/events/sock.h create mode 100644 include/trace/events/spi.h create mode 100644 include/trace/events/spmi.h create mode 100644 include/trace/events/sunrpc.h create mode 100644 include/trace/events/sunvnet.h create mode 100644 include/trace/events/swiotlb.h create mode 100644 include/trace/events/syscalls.h create mode 100644 include/trace/events/target.h create mode 100644 include/trace/events/task.h create mode 100644 include/trace/events/tcp.h create mode 100644 include/trace/events/thermal.h create mode 100644 include/trace/events/thermal_power_allocator.h create mode 100644 include/trace/events/thp.h create mode 100644 include/trace/events/timer.h create mode 100644 include/trace/events/tlb.h create mode 100644 include/trace/events/udp.h create mode 100644 include/trace/events/ufs.h create mode 100644 include/trace/events/v4l2.h create mode 100644 include/trace/events/vb2.h create mode 100644 include/trace/events/vmscan.h create mode 100644 include/trace/events/vsock_virtio_transport_common.h create mode 100644 include/trace/events/wbt.h create mode 100644 include/trace/events/workqueue.h create mode 100644 include/trace/events/writeback.h create mode 100644 include/trace/events/xdp.h create mode 100644 include/trace/events/xen.h create mode 100644 include/trace/perf.h create mode 100644 include/trace/syscall.h create mode 100644 include/trace/trace_events.h create mode 100644 include/uapi/asm-generic/Kbuild.asm create mode 100644 include/uapi/asm-generic/auxvec.h create mode 100644 include/uapi/asm-generic/bitsperlong.h create mode 100644 include/uapi/asm-generic/bpf_perf_event.h create mode 100644 include/uapi/asm-generic/errno-base.h create mode 100644 include/uapi/asm-generic/errno.h create mode 100644 include/uapi/asm-generic/fcntl.h create mode 100644 include/uapi/asm-generic/hugetlb_encode.h create mode 100644 include/uapi/asm-generic/int-l64.h create mode 100644 include/uapi/asm-generic/int-ll64.h create mode 100644 include/uapi/asm-generic/ioctl.h create mode 100644 include/uapi/asm-generic/ioctls.h create mode 100644 include/uapi/asm-generic/ipcbuf.h create mode 100644 include/uapi/asm-generic/kvm_para.h create mode 100644 include/uapi/asm-generic/mman-common.h create mode 100644 include/uapi/asm-generic/mman.h create mode 100644 include/uapi/asm-generic/msgbuf.h create mode 100644 include/uapi/asm-generic/param.h create mode 100644 include/uapi/asm-generic/poll.h create mode 100644 include/uapi/asm-generic/posix_types.h create mode 100644 include/uapi/asm-generic/resource.h create mode 100644 include/uapi/asm-generic/sembuf.h create mode 100644 include/uapi/asm-generic/setup.h create mode 100644 include/uapi/asm-generic/shmbuf.h create mode 100644 include/uapi/asm-generic/shmparam.h create mode 100644 include/uapi/asm-generic/siginfo.h create mode 100644 include/uapi/asm-generic/signal-defs.h create mode 100644 include/uapi/asm-generic/signal.h create mode 100644 include/uapi/asm-generic/socket.h create mode 100644 include/uapi/asm-generic/sockios.h create mode 100644 include/uapi/asm-generic/stat.h create mode 100644 include/uapi/asm-generic/statfs.h create mode 100644 include/uapi/asm-generic/swab.h create mode 100644 include/uapi/asm-generic/termbits.h create mode 100644 include/uapi/asm-generic/termios.h create mode 100644 include/uapi/asm-generic/types.h create mode 100644 include/uapi/asm-generic/ucontext.h create mode 100644 include/uapi/asm-generic/unistd.h create mode 100644 include/uapi/drm/amdgpu_drm.h create mode 100644 include/uapi/drm/armada_drm.h create mode 100644 include/uapi/drm/drm.h create mode 100644 include/uapi/drm/drm_fourcc.h create mode 100644 include/uapi/drm/drm_mode.h create mode 100644 include/uapi/drm/drm_sarea.h create mode 100644 include/uapi/drm/etnaviv_drm.h create mode 100644 include/uapi/drm/exynos_drm.h create mode 100644 include/uapi/drm/i810_drm.h create mode 100644 include/uapi/drm/i915_drm.h create mode 100644 include/uapi/drm/mga_drm.h create mode 100644 include/uapi/drm/msm_drm.h create mode 100644 include/uapi/drm/nouveau_drm.h create mode 100644 include/uapi/drm/omap_drm.h create mode 100644 include/uapi/drm/qxl_drm.h create mode 100644 include/uapi/drm/r128_drm.h create mode 100644 include/uapi/drm/radeon_drm.h create mode 100644 include/uapi/drm/savage_drm.h create mode 100644 include/uapi/drm/sis_drm.h create mode 100644 include/uapi/drm/tegra_drm.h create mode 100644 include/uapi/drm/v3d_drm.h create mode 100644 include/uapi/drm/vc4_drm.h create mode 100644 include/uapi/drm/vgem_drm.h create mode 100644 include/uapi/drm/via_drm.h create mode 100644 include/uapi/drm/virtgpu_drm.h create mode 100644 include/uapi/drm/vmwgfx_drm.h create mode 100644 include/uapi/linux/Kbuild create mode 100644 include/uapi/linux/a.out.h create mode 100644 include/uapi/linux/acct.h create mode 100644 include/uapi/linux/adb.h create mode 100644 include/uapi/linux/adfs_fs.h create mode 100644 include/uapi/linux/affs_hardblocks.h create mode 100644 include/uapi/linux/agpgart.h create mode 100644 include/uapi/linux/aio_abi.h create mode 100644 include/uapi/linux/am437x-vpfe.h create mode 100644 include/uapi/linux/android/binder.h create mode 100644 include/uapi/linux/apm_bios.h create mode 100644 include/uapi/linux/arcfb.h create mode 100644 include/uapi/linux/arm_sdei.h create mode 100644 include/uapi/linux/aspeed-lpc-ctrl.h create mode 100644 include/uapi/linux/atalk.h create mode 100644 include/uapi/linux/atm.h create mode 100644 include/uapi/linux/atm_eni.h create mode 100644 include/uapi/linux/atm_he.h create mode 100644 include/uapi/linux/atm_idt77105.h create mode 100644 include/uapi/linux/atm_nicstar.h create mode 100644 include/uapi/linux/atm_tcp.h create mode 100644 include/uapi/linux/atm_zatm.h create mode 100644 include/uapi/linux/atmapi.h create mode 100644 include/uapi/linux/atmarp.h create mode 100644 include/uapi/linux/atmbr2684.h create mode 100644 include/uapi/linux/atmclip.h create mode 100644 include/uapi/linux/atmdev.h create mode 100644 include/uapi/linux/atmioc.h create mode 100644 include/uapi/linux/atmlec.h create mode 100644 include/uapi/linux/atmmpc.h create mode 100644 include/uapi/linux/atmppp.h create mode 100644 include/uapi/linux/atmsap.h create mode 100644 include/uapi/linux/atmsvc.h create mode 100644 include/uapi/linux/audit.h create mode 100644 include/uapi/linux/auto_dev-ioctl.h create mode 100644 include/uapi/linux/auto_fs.h create mode 100644 include/uapi/linux/auto_fs4.h create mode 100644 include/uapi/linux/auxvec.h create mode 100644 include/uapi/linux/ax25.h create mode 100644 include/uapi/linux/b1lli.h create mode 100644 include/uapi/linux/batadv_packet.h create mode 100644 include/uapi/linux/batman_adv.h create mode 100644 include/uapi/linux/baycom.h create mode 100644 include/uapi/linux/bcache.h create mode 100644 include/uapi/linux/bcm933xx_hcs.h create mode 100644 include/uapi/linux/bfs_fs.h create mode 100644 include/uapi/linux/binfmts.h create mode 100644 include/uapi/linux/blkpg.h create mode 100644 include/uapi/linux/blktrace_api.h create mode 100644 include/uapi/linux/blkzoned.h create mode 100644 include/uapi/linux/bpf.h create mode 100644 include/uapi/linux/bpf_common.h create mode 100644 include/uapi/linux/bpf_perf_event.h create mode 100644 include/uapi/linux/bpfilter.h create mode 100644 include/uapi/linux/bpqether.h create mode 100644 include/uapi/linux/bsg.h create mode 100644 include/uapi/linux/bt-bmc.h create mode 100644 include/uapi/linux/btf.h create mode 100644 include/uapi/linux/btrfs.h create mode 100644 include/uapi/linux/btrfs_tree.h create mode 100644 include/uapi/linux/byteorder/big_endian.h create mode 100644 include/uapi/linux/byteorder/little_endian.h create mode 100644 include/uapi/linux/caif/caif_socket.h create mode 100644 include/uapi/linux/caif/if_caif.h create mode 100644 include/uapi/linux/can.h create mode 100644 include/uapi/linux/can/bcm.h create mode 100644 include/uapi/linux/can/error.h create mode 100644 include/uapi/linux/can/gw.h create mode 100644 include/uapi/linux/can/netlink.h create mode 100644 include/uapi/linux/can/raw.h create mode 100644 include/uapi/linux/can/vxcan.h create mode 100644 include/uapi/linux/capability.h create mode 100644 include/uapi/linux/capi.h create mode 100644 include/uapi/linux/cciss_defs.h create mode 100644 include/uapi/linux/cciss_ioctl.h create mode 100644 include/uapi/linux/cdrom.h create mode 100644 include/uapi/linux/cec-funcs.h create mode 100644 include/uapi/linux/cec.h create mode 100644 include/uapi/linux/cgroupstats.h create mode 100644 include/uapi/linux/chio.h create mode 100644 include/uapi/linux/cifs/cifs_mount.h create mode 100644 include/uapi/linux/cm4000_cs.h create mode 100644 include/uapi/linux/cn_proc.h create mode 100644 include/uapi/linux/coda.h create mode 100644 include/uapi/linux/coda_psdev.h create mode 100644 include/uapi/linux/coff.h create mode 100644 include/uapi/linux/connector.h create mode 100644 include/uapi/linux/const.h create mode 100644 include/uapi/linux/coresight-stm.h create mode 100644 include/uapi/linux/cramfs_fs.h create mode 100644 include/uapi/linux/cryptouser.h create mode 100644 include/uapi/linux/cuda.h create mode 100644 include/uapi/linux/cyclades.h create mode 100644 include/uapi/linux/cycx_cfm.h create mode 100644 include/uapi/linux/dcbnl.h create mode 100644 include/uapi/linux/dccp.h create mode 100644 include/uapi/linux/devlink.h create mode 100644 include/uapi/linux/dlm.h create mode 100644 include/uapi/linux/dlm_device.h create mode 100644 include/uapi/linux/dlm_netlink.h create mode 100644 include/uapi/linux/dlm_plock.h create mode 100644 include/uapi/linux/dlmconstants.h create mode 100644 include/uapi/linux/dm-ioctl.h create mode 100644 include/uapi/linux/dm-log-userspace.h create mode 100644 include/uapi/linux/dma-buf.h create mode 100644 include/uapi/linux/dn.h create mode 100644 include/uapi/linux/dqblk_xfs.h create mode 100644 include/uapi/linux/dvb/audio.h create mode 100644 include/uapi/linux/dvb/ca.h create mode 100644 include/uapi/linux/dvb/dmx.h create mode 100644 include/uapi/linux/dvb/frontend.h create mode 100644 include/uapi/linux/dvb/net.h create mode 100644 include/uapi/linux/dvb/osd.h create mode 100644 include/uapi/linux/dvb/version.h create mode 100644 include/uapi/linux/dvb/video.h create mode 100644 include/uapi/linux/edd.h create mode 100644 include/uapi/linux/efs_fs_sb.h create mode 100644 include/uapi/linux/elf-em.h create mode 100644 include/uapi/linux/elf-fdpic.h create mode 100644 include/uapi/linux/elf.h create mode 100644 include/uapi/linux/elfcore.h create mode 100644 include/uapi/linux/errno.h create mode 100644 include/uapi/linux/errqueue.h create mode 100644 include/uapi/linux/erspan.h create mode 100644 include/uapi/linux/ethtool.h create mode 100644 include/uapi/linux/eventpoll.h create mode 100644 include/uapi/linux/fadvise.h create mode 100644 include/uapi/linux/falloc.h create mode 100644 include/uapi/linux/fanotify.h create mode 100644 include/uapi/linux/fb.h create mode 100644 include/uapi/linux/fcntl.h create mode 100644 include/uapi/linux/fd.h create mode 100644 include/uapi/linux/fdreg.h create mode 100644 include/uapi/linux/fib_rules.h create mode 100644 include/uapi/linux/fiemap.h create mode 100644 include/uapi/linux/filter.h create mode 100644 include/uapi/linux/firewire-cdev.h create mode 100644 include/uapi/linux/firewire-constants.h create mode 100644 include/uapi/linux/flat.h create mode 100644 include/uapi/linux/fou.h create mode 100644 include/uapi/linux/fpga-dfl.h create mode 100644 include/uapi/linux/fs.h create mode 100644 include/uapi/linux/fsi.h create mode 100644 include/uapi/linux/fsl_hypervisor.h create mode 100644 include/uapi/linux/fsmap.h create mode 100644 include/uapi/linux/fuse.h create mode 100644 include/uapi/linux/futex.h create mode 100644 include/uapi/linux/gameport.h create mode 100644 include/uapi/linux/gen_stats.h create mode 100644 include/uapi/linux/genetlink.h create mode 100644 include/uapi/linux/genwqe/genwqe_card.h create mode 100644 include/uapi/linux/gfs2_ondisk.h create mode 100644 include/uapi/linux/gigaset_dev.h create mode 100644 include/uapi/linux/gpio.h create mode 100644 include/uapi/linux/gsmmux.h create mode 100644 include/uapi/linux/gtp.h create mode 100644 include/uapi/linux/hash_info.h create mode 100644 include/uapi/linux/hdlc.h create mode 100644 include/uapi/linux/hdlc/ioctl.h create mode 100644 include/uapi/linux/hdlcdrv.h create mode 100644 include/uapi/linux/hdreg.h create mode 100644 include/uapi/linux/hid.h create mode 100644 include/uapi/linux/hiddev.h create mode 100644 include/uapi/linux/hidraw.h create mode 100644 include/uapi/linux/hpet.h create mode 100644 include/uapi/linux/hsi/cs-protocol.h create mode 100644 include/uapi/linux/hsi/hsi_char.h create mode 100644 include/uapi/linux/hsr_netlink.h create mode 100644 include/uapi/linux/hw_breakpoint.h create mode 100644 include/uapi/linux/hyperv.h create mode 100644 include/uapi/linux/hysdn_if.h create mode 100644 include/uapi/linux/i2c-dev.h create mode 100644 include/uapi/linux/i2c.h create mode 100644 include/uapi/linux/i2o-dev.h create mode 100644 include/uapi/linux/i8k.h create mode 100644 include/uapi/linux/icmp.h create mode 100644 include/uapi/linux/icmpv6.h create mode 100644 include/uapi/linux/if.h create mode 100644 include/uapi/linux/if_addr.h create mode 100644 include/uapi/linux/if_addrlabel.h create mode 100644 include/uapi/linux/if_alg.h create mode 100644 include/uapi/linux/if_arcnet.h create mode 100644 include/uapi/linux/if_arp.h create mode 100644 include/uapi/linux/if_bonding.h create mode 100644 include/uapi/linux/if_bridge.h create mode 100644 include/uapi/linux/if_cablemodem.h create mode 100644 include/uapi/linux/if_eql.h create mode 100644 include/uapi/linux/if_ether.h create mode 100644 include/uapi/linux/if_fc.h create mode 100644 include/uapi/linux/if_fddi.h create mode 100644 include/uapi/linux/if_frad.h create mode 100644 include/uapi/linux/if_hippi.h create mode 100644 include/uapi/linux/if_infiniband.h create mode 100644 include/uapi/linux/if_link.h create mode 100644 include/uapi/linux/if_ltalk.h create mode 100644 include/uapi/linux/if_macsec.h create mode 100644 include/uapi/linux/if_packet.h create mode 100644 include/uapi/linux/if_phonet.h create mode 100644 include/uapi/linux/if_plip.h create mode 100644 include/uapi/linux/if_ppp.h create mode 100644 include/uapi/linux/if_pppol2tp.h create mode 100644 include/uapi/linux/if_pppox.h create mode 100644 include/uapi/linux/if_slip.h create mode 100644 include/uapi/linux/if_team.h create mode 100644 include/uapi/linux/if_tun.h create mode 100644 include/uapi/linux/if_tunnel.h create mode 100644 include/uapi/linux/if_vlan.h create mode 100644 include/uapi/linux/if_x25.h create mode 100644 include/uapi/linux/if_xdp.h create mode 100644 include/uapi/linux/ife.h create mode 100644 include/uapi/linux/igmp.h create mode 100644 include/uapi/linux/iio/events.h create mode 100644 include/uapi/linux/iio/types.h create mode 100644 include/uapi/linux/ila.h create mode 100644 include/uapi/linux/in.h create mode 100644 include/uapi/linux/in6.h create mode 100644 include/uapi/linux/in_route.h create mode 100644 include/uapi/linux/inet_diag.h create mode 100644 include/uapi/linux/inotify.h create mode 100644 include/uapi/linux/input-event-codes.h create mode 100644 include/uapi/linux/input.h create mode 100644 include/uapi/linux/ioctl.h create mode 100644 include/uapi/linux/ip.h create mode 100644 include/uapi/linux/ip6_tunnel.h create mode 100644 include/uapi/linux/ip_vs.h create mode 100644 include/uapi/linux/ipc.h create mode 100644 include/uapi/linux/ipmi.h create mode 100644 include/uapi/linux/ipmi_bmc.h create mode 100644 include/uapi/linux/ipmi_msgdefs.h create mode 100644 include/uapi/linux/ipsec.h create mode 100644 include/uapi/linux/ipv6.h create mode 100644 include/uapi/linux/ipv6_route.h create mode 100644 include/uapi/linux/ipx.h create mode 100644 include/uapi/linux/irqnr.h create mode 100644 include/uapi/linux/isdn.h create mode 100644 include/uapi/linux/isdn/capicmd.h create mode 100644 include/uapi/linux/isdn_divertif.h create mode 100644 include/uapi/linux/isdn_ppp.h create mode 100644 include/uapi/linux/isdnif.h create mode 100644 include/uapi/linux/iso_fs.h create mode 100644 include/uapi/linux/ivtv.h create mode 100644 include/uapi/linux/ivtvfb.h create mode 100644 include/uapi/linux/jffs2.h create mode 100644 include/uapi/linux/joystick.h create mode 100644 include/uapi/linux/kcm.h create mode 100644 include/uapi/linux/kcmp.h create mode 100644 include/uapi/linux/kcov.h create mode 100644 include/uapi/linux/kd.h create mode 100644 include/uapi/linux/kdev_t.h create mode 100644 include/uapi/linux/kernel-page-flags.h create mode 100644 include/uapi/linux/kernel.h create mode 100644 include/uapi/linux/kernelcapi.h create mode 100644 include/uapi/linux/kexec.h create mode 100644 include/uapi/linux/keyboard.h create mode 100644 include/uapi/linux/keyctl.h create mode 100644 include/uapi/linux/kfd_ioctl.h create mode 100644 include/uapi/linux/kvm.h create mode 100644 include/uapi/linux/kvm_para.h create mode 100644 include/uapi/linux/l2tp.h create mode 100644 include/uapi/linux/libc-compat.h create mode 100644 include/uapi/linux/lightnvm.h create mode 100644 include/uapi/linux/limits.h create mode 100644 include/uapi/linux/lirc.h create mode 100644 include/uapi/linux/llc.h create mode 100644 include/uapi/linux/loop.h create mode 100644 include/uapi/linux/lp.h create mode 100644 include/uapi/linux/lwtunnel.h create mode 100644 include/uapi/linux/magic.h create mode 100644 include/uapi/linux/major.h create mode 100644 include/uapi/linux/map_to_7segment.h create mode 100644 include/uapi/linux/matroxfb.h create mode 100644 include/uapi/linux/max2175.h create mode 100644 include/uapi/linux/mdio.h create mode 100644 include/uapi/linux/media-bus-format.h create mode 100644 include/uapi/linux/media.h create mode 100644 include/uapi/linux/mei.h create mode 100644 include/uapi/linux/membarrier.h create mode 100644 include/uapi/linux/memfd.h create mode 100644 include/uapi/linux/mempolicy.h create mode 100644 include/uapi/linux/meye.h create mode 100644 include/uapi/linux/mic_common.h create mode 100644 include/uapi/linux/mic_ioctl.h create mode 100644 include/uapi/linux/mii.h create mode 100644 include/uapi/linux/minix_fs.h create mode 100644 include/uapi/linux/mman.h create mode 100644 include/uapi/linux/mmc/ioctl.h create mode 100644 include/uapi/linux/mmtimer.h create mode 100644 include/uapi/linux/module.h create mode 100644 include/uapi/linux/mpls.h create mode 100644 include/uapi/linux/mpls_iptunnel.h create mode 100644 include/uapi/linux/mqueue.h create mode 100644 include/uapi/linux/mroute.h create mode 100644 include/uapi/linux/mroute6.h create mode 100644 include/uapi/linux/msdos_fs.h create mode 100644 include/uapi/linux/msg.h create mode 100644 include/uapi/linux/mtio.h create mode 100644 include/uapi/linux/n_r3964.h create mode 100644 include/uapi/linux/nbd-netlink.h create mode 100644 include/uapi/linux/nbd.h create mode 100644 include/uapi/linux/ncsi.h create mode 100644 include/uapi/linux/ndctl.h create mode 100644 include/uapi/linux/neighbour.h create mode 100644 include/uapi/linux/net.h create mode 100644 include/uapi/linux/net_dropmon.h create mode 100644 include/uapi/linux/net_namespace.h create mode 100644 include/uapi/linux/net_tstamp.h create mode 100644 include/uapi/linux/netconf.h create mode 100644 include/uapi/linux/netdevice.h create mode 100644 include/uapi/linux/netfilter.h create mode 100644 include/uapi/linux/netfilter/ipset/ip_set.h create mode 100644 include/uapi/linux/netfilter/ipset/ip_set_bitmap.h create mode 100644 include/uapi/linux/netfilter/ipset/ip_set_hash.h create mode 100644 include/uapi/linux/netfilter/ipset/ip_set_list.h create mode 100644 include/uapi/linux/netfilter/nf_conntrack_common.h create mode 100644 include/uapi/linux/netfilter/nf_conntrack_ftp.h create mode 100644 include/uapi/linux/netfilter/nf_conntrack_sctp.h create mode 100644 include/uapi/linux/netfilter/nf_conntrack_tcp.h create mode 100644 include/uapi/linux/netfilter/nf_conntrack_tuple_common.h create mode 100644 include/uapi/linux/netfilter/nf_log.h create mode 100644 include/uapi/linux/netfilter/nf_nat.h create mode 100644 include/uapi/linux/netfilter/nf_tables.h create mode 100644 include/uapi/linux/netfilter/nf_tables_compat.h create mode 100644 include/uapi/linux/netfilter/nfnetlink.h create mode 100644 include/uapi/linux/netfilter/nfnetlink_acct.h create mode 100644 include/uapi/linux/netfilter/nfnetlink_compat.h create mode 100644 include/uapi/linux/netfilter/nfnetlink_conntrack.h create mode 100644 include/uapi/linux/netfilter/nfnetlink_cthelper.h create mode 100644 include/uapi/linux/netfilter/nfnetlink_cttimeout.h create mode 100644 include/uapi/linux/netfilter/nfnetlink_log.h create mode 100644 include/uapi/linux/netfilter/nfnetlink_osf.h create mode 100644 include/uapi/linux/netfilter/nfnetlink_queue.h create mode 100644 include/uapi/linux/netfilter/x_tables.h create mode 100644 include/uapi/linux/netfilter/xt_AUDIT.h create mode 100644 include/uapi/linux/netfilter/xt_CHECKSUM.h create mode 100644 include/uapi/linux/netfilter/xt_CLASSIFY.h create mode 100644 include/uapi/linux/netfilter/xt_CONNMARK.h create mode 100644 include/uapi/linux/netfilter/xt_CONNSECMARK.h create mode 100644 include/uapi/linux/netfilter/xt_CT.h create mode 100644 include/uapi/linux/netfilter/xt_DSCP.h create mode 100644 include/uapi/linux/netfilter/xt_HMARK.h create mode 100644 include/uapi/linux/netfilter/xt_IDLETIMER.h create mode 100644 include/uapi/linux/netfilter/xt_LED.h create mode 100644 include/uapi/linux/netfilter/xt_LOG.h create mode 100644 include/uapi/linux/netfilter/xt_MARK.h create mode 100644 include/uapi/linux/netfilter/xt_NFLOG.h create mode 100644 include/uapi/linux/netfilter/xt_NFQUEUE.h create mode 100644 include/uapi/linux/netfilter/xt_RATEEST.h create mode 100644 include/uapi/linux/netfilter/xt_SECMARK.h create mode 100644 include/uapi/linux/netfilter/xt_SYNPROXY.h create mode 100644 include/uapi/linux/netfilter/xt_TCPMSS.h create mode 100644 include/uapi/linux/netfilter/xt_TCPOPTSTRIP.h create mode 100644 include/uapi/linux/netfilter/xt_TEE.h create mode 100644 include/uapi/linux/netfilter/xt_TPROXY.h create mode 100644 include/uapi/linux/netfilter/xt_addrtype.h create mode 100644 include/uapi/linux/netfilter/xt_bpf.h create mode 100644 include/uapi/linux/netfilter/xt_cgroup.h create mode 100644 include/uapi/linux/netfilter/xt_cluster.h create mode 100644 include/uapi/linux/netfilter/xt_comment.h create mode 100644 include/uapi/linux/netfilter/xt_connbytes.h create mode 100644 include/uapi/linux/netfilter/xt_connlabel.h create mode 100644 include/uapi/linux/netfilter/xt_connlimit.h create mode 100644 include/uapi/linux/netfilter/xt_connmark.h create mode 100644 include/uapi/linux/netfilter/xt_conntrack.h create mode 100644 include/uapi/linux/netfilter/xt_cpu.h create mode 100644 include/uapi/linux/netfilter/xt_dccp.h create mode 100644 include/uapi/linux/netfilter/xt_devgroup.h create mode 100644 include/uapi/linux/netfilter/xt_dscp.h create mode 100644 include/uapi/linux/netfilter/xt_ecn.h create mode 100644 include/uapi/linux/netfilter/xt_esp.h create mode 100644 include/uapi/linux/netfilter/xt_hashlimit.h create mode 100644 include/uapi/linux/netfilter/xt_helper.h create mode 100644 include/uapi/linux/netfilter/xt_ipcomp.h create mode 100644 include/uapi/linux/netfilter/xt_iprange.h create mode 100644 include/uapi/linux/netfilter/xt_ipvs.h create mode 100644 include/uapi/linux/netfilter/xt_l2tp.h create mode 100644 include/uapi/linux/netfilter/xt_length.h create mode 100644 include/uapi/linux/netfilter/xt_limit.h create mode 100644 include/uapi/linux/netfilter/xt_mac.h create mode 100644 include/uapi/linux/netfilter/xt_mark.h create mode 100644 include/uapi/linux/netfilter/xt_multiport.h create mode 100644 include/uapi/linux/netfilter/xt_nfacct.h create mode 100644 include/uapi/linux/netfilter/xt_osf.h create mode 100644 include/uapi/linux/netfilter/xt_owner.h create mode 100644 include/uapi/linux/netfilter/xt_physdev.h create mode 100644 include/uapi/linux/netfilter/xt_pkttype.h create mode 100644 include/uapi/linux/netfilter/xt_policy.h create mode 100644 include/uapi/linux/netfilter/xt_quota.h create mode 100644 include/uapi/linux/netfilter/xt_rateest.h create mode 100644 include/uapi/linux/netfilter/xt_realm.h create mode 100644 include/uapi/linux/netfilter/xt_recent.h create mode 100644 include/uapi/linux/netfilter/xt_rpfilter.h create mode 100644 include/uapi/linux/netfilter/xt_sctp.h create mode 100644 include/uapi/linux/netfilter/xt_set.h create mode 100644 include/uapi/linux/netfilter/xt_socket.h create mode 100644 include/uapi/linux/netfilter/xt_state.h create mode 100644 include/uapi/linux/netfilter/xt_statistic.h create mode 100644 include/uapi/linux/netfilter/xt_string.h create mode 100644 include/uapi/linux/netfilter/xt_tcpmss.h create mode 100644 include/uapi/linux/netfilter/xt_tcpudp.h create mode 100644 include/uapi/linux/netfilter/xt_time.h create mode 100644 include/uapi/linux/netfilter/xt_u32.h create mode 100644 include/uapi/linux/netfilter_arp.h create mode 100644 include/uapi/linux/netfilter_arp/arp_tables.h create mode 100644 include/uapi/linux/netfilter_arp/arpt_mangle.h create mode 100644 include/uapi/linux/netfilter_bridge.h create mode 100644 include/uapi/linux/netfilter_bridge/ebt_802_3.h create mode 100644 include/uapi/linux/netfilter_bridge/ebt_among.h create mode 100644 include/uapi/linux/netfilter_bridge/ebt_arp.h create mode 100644 include/uapi/linux/netfilter_bridge/ebt_arpreply.h create mode 100644 include/uapi/linux/netfilter_bridge/ebt_ip.h create mode 100644 include/uapi/linux/netfilter_bridge/ebt_ip6.h create mode 100644 include/uapi/linux/netfilter_bridge/ebt_limit.h create mode 100644 include/uapi/linux/netfilter_bridge/ebt_log.h create mode 100644 include/uapi/linux/netfilter_bridge/ebt_mark_m.h create mode 100644 include/uapi/linux/netfilter_bridge/ebt_mark_t.h create mode 100644 include/uapi/linux/netfilter_bridge/ebt_nat.h create mode 100644 include/uapi/linux/netfilter_bridge/ebt_nflog.h create mode 100644 include/uapi/linux/netfilter_bridge/ebt_pkttype.h create mode 100644 include/uapi/linux/netfilter_bridge/ebt_redirect.h create mode 100644 include/uapi/linux/netfilter_bridge/ebt_stp.h create mode 100644 include/uapi/linux/netfilter_bridge/ebt_vlan.h create mode 100644 include/uapi/linux/netfilter_bridge/ebtables.h create mode 100644 include/uapi/linux/netfilter_decnet.h create mode 100644 include/uapi/linux/netfilter_ipv4.h create mode 100644 include/uapi/linux/netfilter_ipv4/ip_tables.h create mode 100644 include/uapi/linux/netfilter_ipv4/ipt_CLUSTERIP.h create mode 100644 include/uapi/linux/netfilter_ipv4/ipt_ECN.h create mode 100644 include/uapi/linux/netfilter_ipv4/ipt_LOG.h create mode 100644 include/uapi/linux/netfilter_ipv4/ipt_REJECT.h create mode 100644 include/uapi/linux/netfilter_ipv4/ipt_TTL.h create mode 100644 include/uapi/linux/netfilter_ipv4/ipt_ah.h create mode 100644 include/uapi/linux/netfilter_ipv4/ipt_ecn.h create mode 100644 include/uapi/linux/netfilter_ipv4/ipt_ttl.h create mode 100644 include/uapi/linux/netfilter_ipv6.h create mode 100644 include/uapi/linux/netfilter_ipv6/ip6_tables.h create mode 100644 include/uapi/linux/netfilter_ipv6/ip6t_HL.h create mode 100644 include/uapi/linux/netfilter_ipv6/ip6t_LOG.h create mode 100644 include/uapi/linux/netfilter_ipv6/ip6t_NPT.h create mode 100644 include/uapi/linux/netfilter_ipv6/ip6t_REJECT.h create mode 100644 include/uapi/linux/netfilter_ipv6/ip6t_ah.h create mode 100644 include/uapi/linux/netfilter_ipv6/ip6t_frag.h create mode 100644 include/uapi/linux/netfilter_ipv6/ip6t_hl.h create mode 100644 include/uapi/linux/netfilter_ipv6/ip6t_ipv6header.h create mode 100644 include/uapi/linux/netfilter_ipv6/ip6t_mh.h create mode 100644 include/uapi/linux/netfilter_ipv6/ip6t_opts.h create mode 100644 include/uapi/linux/netfilter_ipv6/ip6t_rt.h create mode 100644 include/uapi/linux/netfilter_ipv6/ip6t_srh.h create mode 100644 include/uapi/linux/netlink.h create mode 100644 include/uapi/linux/netlink_diag.h create mode 100644 include/uapi/linux/netrom.h create mode 100644 include/uapi/linux/nfc.h create mode 100644 include/uapi/linux/nfs.h create mode 100644 include/uapi/linux/nfs2.h create mode 100644 include/uapi/linux/nfs3.h create mode 100644 include/uapi/linux/nfs4.h create mode 100644 include/uapi/linux/nfs4_mount.h create mode 100644 include/uapi/linux/nfs_fs.h create mode 100644 include/uapi/linux/nfs_idmap.h create mode 100644 include/uapi/linux/nfs_mount.h create mode 100644 include/uapi/linux/nfsacl.h create mode 100644 include/uapi/linux/nfsd/cld.h create mode 100644 include/uapi/linux/nfsd/debug.h create mode 100644 include/uapi/linux/nfsd/export.h create mode 100644 include/uapi/linux/nfsd/nfsfh.h create mode 100644 include/uapi/linux/nfsd/stats.h create mode 100644 include/uapi/linux/nilfs2_api.h create mode 100644 include/uapi/linux/nilfs2_ondisk.h create mode 100644 include/uapi/linux/nl80211.h create mode 100644 include/uapi/linux/nsfs.h create mode 100644 include/uapi/linux/nubus.h create mode 100644 include/uapi/linux/nvme_ioctl.h create mode 100644 include/uapi/linux/nvram.h create mode 100644 include/uapi/linux/omap3isp.h create mode 100644 include/uapi/linux/omapfb.h create mode 100644 include/uapi/linux/oom.h create mode 100644 include/uapi/linux/openvswitch.h create mode 100644 include/uapi/linux/packet_diag.h create mode 100644 include/uapi/linux/param.h create mode 100644 include/uapi/linux/parport.h create mode 100644 include/uapi/linux/patchkey.h create mode 100644 include/uapi/linux/pci.h create mode 100644 include/uapi/linux/pci_regs.h create mode 100644 include/uapi/linux/pcitest.h create mode 100644 include/uapi/linux/perf_event.h create mode 100644 include/uapi/linux/personality.h create mode 100644 include/uapi/linux/pfkeyv2.h create mode 100644 include/uapi/linux/pg.h create mode 100644 include/uapi/linux/phantom.h create mode 100644 include/uapi/linux/phonet.h create mode 100644 include/uapi/linux/pkt_cls.h create mode 100644 include/uapi/linux/pkt_sched.h create mode 100644 include/uapi/linux/pktcdvd.h create mode 100644 include/uapi/linux/pmu.h create mode 100644 include/uapi/linux/poll.h create mode 100644 include/uapi/linux/posix_acl.h create mode 100644 include/uapi/linux/posix_acl_xattr.h create mode 100644 include/uapi/linux/posix_types.h create mode 100644 include/uapi/linux/ppdev.h create mode 100644 include/uapi/linux/ppp-comp.h create mode 100644 include/uapi/linux/ppp-ioctl.h create mode 100644 include/uapi/linux/ppp_defs.h create mode 100644 include/uapi/linux/pps.h create mode 100644 include/uapi/linux/pr.h create mode 100644 include/uapi/linux/prctl.h create mode 100644 include/uapi/linux/psample.h create mode 100644 include/uapi/linux/psci.h create mode 100644 include/uapi/linux/psp-sev.h create mode 100644 include/uapi/linux/ptp_clock.h create mode 100644 include/uapi/linux/ptrace.h create mode 100644 include/uapi/linux/qemu_fw_cfg.h create mode 100644 include/uapi/linux/qnx4_fs.h create mode 100644 include/uapi/linux/qnxtypes.h create mode 100644 include/uapi/linux/qrtr.h create mode 100644 include/uapi/linux/quota.h create mode 100644 include/uapi/linux/radeonfb.h create mode 100644 include/uapi/linux/raid/md_p.h create mode 100644 include/uapi/linux/raid/md_u.h create mode 100644 include/uapi/linux/random.h create mode 100644 include/uapi/linux/raw.h create mode 100644 include/uapi/linux/rds.h create mode 100644 include/uapi/linux/reboot.h create mode 100644 include/uapi/linux/reiserfs_fs.h create mode 100644 include/uapi/linux/reiserfs_xattr.h create mode 100644 include/uapi/linux/resource.h create mode 100644 include/uapi/linux/rfkill.h create mode 100644 include/uapi/linux/rio_cm_cdev.h create mode 100644 include/uapi/linux/rio_mport_cdev.h create mode 100644 include/uapi/linux/romfs_fs.h create mode 100644 include/uapi/linux/rose.h create mode 100644 include/uapi/linux/route.h create mode 100644 include/uapi/linux/rpmsg.h create mode 100644 include/uapi/linux/rseq.h create mode 100644 include/uapi/linux/rtc.h create mode 100644 include/uapi/linux/rtnetlink.h create mode 100644 include/uapi/linux/rxrpc.h create mode 100644 include/uapi/linux/scc.h create mode 100644 include/uapi/linux/sched.h create mode 100644 include/uapi/linux/sched/types.h create mode 100644 include/uapi/linux/scif_ioctl.h create mode 100644 include/uapi/linux/screen_info.h create mode 100644 include/uapi/linux/sctp.h create mode 100644 include/uapi/linux/sdla.h create mode 100644 include/uapi/linux/seccomp.h create mode 100644 include/uapi/linux/securebits.h create mode 100644 include/uapi/linux/sed-opal.h create mode 100644 include/uapi/linux/seg6.h create mode 100644 include/uapi/linux/seg6_genl.h create mode 100644 include/uapi/linux/seg6_hmac.h create mode 100644 include/uapi/linux/seg6_iptunnel.h create mode 100644 include/uapi/linux/seg6_local.h create mode 100644 include/uapi/linux/selinux_netlink.h create mode 100644 include/uapi/linux/sem.h create mode 100644 include/uapi/linux/serial.h create mode 100644 include/uapi/linux/serial_core.h create mode 100644 include/uapi/linux/serial_reg.h create mode 100644 include/uapi/linux/serio.h create mode 100644 include/uapi/linux/shm.h create mode 100644 include/uapi/linux/signal.h create mode 100644 include/uapi/linux/signalfd.h create mode 100644 include/uapi/linux/smc.h create mode 100644 include/uapi/linux/smc_diag.h create mode 100644 include/uapi/linux/smiapp.h create mode 100644 include/uapi/linux/snmp.h create mode 100644 include/uapi/linux/sock_diag.h create mode 100644 include/uapi/linux/socket.h create mode 100644 include/uapi/linux/sockios.h create mode 100644 include/uapi/linux/sonet.h create mode 100644 include/uapi/linux/sonypi.h create mode 100644 include/uapi/linux/sound.h create mode 100644 include/uapi/linux/soundcard.h create mode 100644 include/uapi/linux/spi/spidev.h create mode 100644 include/uapi/linux/stat.h create mode 100644 include/uapi/linux/stddef.h create mode 100644 include/uapi/linux/stm.h create mode 100644 include/uapi/linux/string.h create mode 100644 include/uapi/linux/sunrpc/debug.h create mode 100644 include/uapi/linux/suspend_ioctls.h create mode 100644 include/uapi/linux/swab.h create mode 100644 include/uapi/linux/switchtec_ioctl.h create mode 100644 include/uapi/linux/sync_file.h create mode 100644 include/uapi/linux/synclink.h create mode 100644 include/uapi/linux/sysctl.h create mode 100644 include/uapi/linux/sysinfo.h create mode 100644 include/uapi/linux/target_core_user.h create mode 100644 include/uapi/linux/taskstats.h create mode 100644 include/uapi/linux/tc_act/tc_bpf.h create mode 100644 include/uapi/linux/tc_act/tc_connmark.h create mode 100644 include/uapi/linux/tc_act/tc_csum.h create mode 100644 include/uapi/linux/tc_act/tc_defact.h create mode 100644 include/uapi/linux/tc_act/tc_gact.h create mode 100644 include/uapi/linux/tc_act/tc_ife.h create mode 100644 include/uapi/linux/tc_act/tc_ipt.h create mode 100644 include/uapi/linux/tc_act/tc_mirred.h create mode 100644 include/uapi/linux/tc_act/tc_nat.h create mode 100644 include/uapi/linux/tc_act/tc_pedit.h create mode 100644 include/uapi/linux/tc_act/tc_sample.h create mode 100644 include/uapi/linux/tc_act/tc_skbedit.h create mode 100644 include/uapi/linux/tc_act/tc_skbmod.h create mode 100644 include/uapi/linux/tc_act/tc_tunnel_key.h create mode 100644 include/uapi/linux/tc_act/tc_vlan.h create mode 100644 include/uapi/linux/tc_ematch/tc_em_cmp.h create mode 100644 include/uapi/linux/tc_ematch/tc_em_ipt.h create mode 100644 include/uapi/linux/tc_ematch/tc_em_meta.h create mode 100644 include/uapi/linux/tc_ematch/tc_em_nbyte.h create mode 100644 include/uapi/linux/tc_ematch/tc_em_text.h create mode 100644 include/uapi/linux/tcp.h create mode 100644 include/uapi/linux/tcp_metrics.h create mode 100644 include/uapi/linux/tee.h create mode 100644 include/uapi/linux/termios.h create mode 100644 include/uapi/linux/thermal.h create mode 100644 include/uapi/linux/time.h create mode 100644 include/uapi/linux/timerfd.h create mode 100644 include/uapi/linux/times.h create mode 100644 include/uapi/linux/timex.h create mode 100644 include/uapi/linux/tiocl.h create mode 100644 include/uapi/linux/tipc.h create mode 100644 include/uapi/linux/tipc_config.h create mode 100644 include/uapi/linux/tipc_netlink.h create mode 100644 include/uapi/linux/tipc_sockets_diag.h create mode 100644 include/uapi/linux/tls.h create mode 100644 include/uapi/linux/toshiba.h create mode 100644 include/uapi/linux/tty.h create mode 100644 include/uapi/linux/tty_flags.h create mode 100644 include/uapi/linux/types.h create mode 100644 include/uapi/linux/udf_fs_i.h create mode 100644 include/uapi/linux/udp.h create mode 100644 include/uapi/linux/uhid.h create mode 100644 include/uapi/linux/uinput.h create mode 100644 include/uapi/linux/uio.h create mode 100644 include/uapi/linux/uleds.h create mode 100644 include/uapi/linux/ultrasound.h create mode 100644 include/uapi/linux/un.h create mode 100644 include/uapi/linux/unistd.h create mode 100644 include/uapi/linux/unix_diag.h create mode 100644 include/uapi/linux/usb/audio.h create mode 100644 include/uapi/linux/usb/cdc-wdm.h create mode 100644 include/uapi/linux/usb/cdc.h create mode 100644 include/uapi/linux/usb/ch11.h create mode 100644 include/uapi/linux/usb/ch9.h create mode 100644 include/uapi/linux/usb/charger.h create mode 100644 include/uapi/linux/usb/functionfs.h create mode 100644 include/uapi/linux/usb/g_printer.h create mode 100644 include/uapi/linux/usb/g_uvc.h create mode 100644 include/uapi/linux/usb/gadgetfs.h create mode 100644 include/uapi/linux/usb/midi.h create mode 100644 include/uapi/linux/usb/tmc.h create mode 100644 include/uapi/linux/usb/video.h create mode 100644 include/uapi/linux/usbdevice_fs.h create mode 100644 include/uapi/linux/usbip.h create mode 100644 include/uapi/linux/userfaultfd.h create mode 100644 include/uapi/linux/userio.h create mode 100644 include/uapi/linux/utime.h create mode 100644 include/uapi/linux/utsname.h create mode 100644 include/uapi/linux/uuid.h create mode 100644 include/uapi/linux/uvcvideo.h create mode 100644 include/uapi/linux/v4l2-common.h create mode 100644 include/uapi/linux/v4l2-controls.h create mode 100644 include/uapi/linux/v4l2-dv-timings.h create mode 100644 include/uapi/linux/v4l2-mediabus.h create mode 100644 include/uapi/linux/v4l2-subdev.h create mode 100644 include/uapi/linux/vbox_err.h create mode 100644 include/uapi/linux/vbox_vmmdev_types.h create mode 100644 include/uapi/linux/vboxguest.h create mode 100644 include/uapi/linux/veth.h create mode 100644 include/uapi/linux/vfio.h create mode 100644 include/uapi/linux/vfio_ccw.h create mode 100644 include/uapi/linux/vhost.h create mode 100644 include/uapi/linux/videodev2.h create mode 100644 include/uapi/linux/virtio_9p.h create mode 100644 include/uapi/linux/virtio_balloon.h create mode 100644 include/uapi/linux/virtio_blk.h create mode 100644 include/uapi/linux/virtio_config.h create mode 100644 include/uapi/linux/virtio_console.h create mode 100644 include/uapi/linux/virtio_crypto.h create mode 100644 include/uapi/linux/virtio_gpu.h create mode 100644 include/uapi/linux/virtio_ids.h create mode 100644 include/uapi/linux/virtio_input.h create mode 100644 include/uapi/linux/virtio_mmio.h create mode 100644 include/uapi/linux/virtio_net.h create mode 100644 include/uapi/linux/virtio_pci.h create mode 100644 include/uapi/linux/virtio_ring.h create mode 100644 include/uapi/linux/virtio_rng.h create mode 100644 include/uapi/linux/virtio_scsi.h create mode 100644 include/uapi/linux/virtio_types.h create mode 100644 include/uapi/linux/virtio_vsock.h create mode 100644 include/uapi/linux/vm_sockets.h create mode 100644 include/uapi/linux/vm_sockets_diag.h create mode 100644 include/uapi/linux/vmcore.h create mode 100644 include/uapi/linux/vsockmon.h create mode 100644 include/uapi/linux/vt.h create mode 100644 include/uapi/linux/vtpm_proxy.h create mode 100644 include/uapi/linux/wait.h create mode 100644 include/uapi/linux/wanrouter.h create mode 100644 include/uapi/linux/watchdog.h create mode 100644 include/uapi/linux/wimax.h create mode 100644 include/uapi/linux/wimax/i2400m.h create mode 100644 include/uapi/linux/wireless.h create mode 100644 include/uapi/linux/wmi.h create mode 100644 include/uapi/linux/x25.h create mode 100644 include/uapi/linux/xattr.h create mode 100644 include/uapi/linux/xfrm.h create mode 100644 include/uapi/linux/xilinx-v4l2-controls.h create mode 100644 include/uapi/linux/zorro.h create mode 100644 include/uapi/linux/zorro_ids.h create mode 100644 include/uapi/misc/cxl.h create mode 100644 include/uapi/misc/ocxl.h create mode 100644 include/uapi/mtd/inftl-user.h create mode 100644 include/uapi/mtd/mtd-abi.h create mode 100644 include/uapi/mtd/mtd-user.h create mode 100644 include/uapi/mtd/nftl-user.h create mode 100644 include/uapi/mtd/ubi-user.h create mode 100644 include/uapi/rdma/bnxt_re-abi.h create mode 100644 include/uapi/rdma/cxgb3-abi.h create mode 100644 include/uapi/rdma/cxgb4-abi.h create mode 100644 include/uapi/rdma/hfi/hfi1_ioctl.h create mode 100644 include/uapi/rdma/hfi/hfi1_user.h create mode 100644 include/uapi/rdma/hns-abi.h create mode 100644 include/uapi/rdma/i40iw-abi.h create mode 100644 include/uapi/rdma/ib_user_cm.h create mode 100644 include/uapi/rdma/ib_user_ioctl_cmds.h create mode 100644 include/uapi/rdma/ib_user_ioctl_verbs.h create mode 100644 include/uapi/rdma/ib_user_mad.h create mode 100644 include/uapi/rdma/ib_user_sa.h create mode 100644 include/uapi/rdma/ib_user_verbs.h create mode 100644 include/uapi/rdma/mlx4-abi.h create mode 100644 include/uapi/rdma/mlx5-abi.h create mode 100644 include/uapi/rdma/mlx5_user_ioctl_cmds.h create mode 100644 include/uapi/rdma/mlx5_user_ioctl_verbs.h create mode 100644 include/uapi/rdma/mthca-abi.h create mode 100644 include/uapi/rdma/nes-abi.h create mode 100644 include/uapi/rdma/ocrdma-abi.h create mode 100644 include/uapi/rdma/qedr-abi.h create mode 100644 include/uapi/rdma/rdma_netlink.h create mode 100644 include/uapi/rdma/rdma_user_cm.h create mode 100644 include/uapi/rdma/rdma_user_ioctl.h create mode 100644 include/uapi/rdma/rdma_user_ioctl_cmds.h create mode 100644 include/uapi/rdma/rdma_user_rxe.h create mode 100644 include/uapi/rdma/vmw_pvrdma-abi.h create mode 100644 include/uapi/scsi/cxlflash_ioctl.h create mode 100644 include/uapi/scsi/fc/fc_els.h create mode 100644 include/uapi/scsi/fc/fc_fs.h create mode 100644 include/uapi/scsi/fc/fc_gs.h create mode 100644 include/uapi/scsi/fc/fc_ns.h create mode 100644 include/uapi/scsi/scsi_bsg_fc.h create mode 100644 include/uapi/scsi/scsi_netlink.h create mode 100644 include/uapi/scsi/scsi_netlink_fc.h create mode 100644 include/uapi/sound/asequencer.h create mode 100644 include/uapi/sound/asoc.h create mode 100644 include/uapi/sound/asound.h create mode 100644 include/uapi/sound/asound_fm.h create mode 100644 include/uapi/sound/compress_offload.h create mode 100644 include/uapi/sound/compress_params.h create mode 100644 include/uapi/sound/emu10k1.h create mode 100644 include/uapi/sound/firewire.h create mode 100644 include/uapi/sound/hdsp.h create mode 100644 include/uapi/sound/hdspm.h create mode 100644 include/uapi/sound/sb16_csp.h create mode 100644 include/uapi/sound/sfnt_info.h create mode 100644 include/uapi/sound/skl-tplg-interface.h create mode 100644 include/uapi/sound/snd_sst_tokens.h create mode 100644 include/uapi/sound/tlv.h create mode 100644 include/uapi/sound/usb_stream.h create mode 100644 include/uapi/video/edid.h create mode 100644 include/uapi/video/sisfb.h create mode 100644 include/uapi/video/uvesafb.h create mode 100644 include/uapi/xen/evtchn.h create mode 100644 include/uapi/xen/gntalloc.h create mode 100644 include/uapi/xen/gntdev.h create mode 100644 include/uapi/xen/privcmd.h create mode 100644 include/video/atmel_lcdc.h create mode 100644 include/video/aty128.h create mode 100644 include/video/broadsheetfb.h create mode 100644 include/video/cirrus.h create mode 100644 include/video/cvisionppc.h create mode 100644 include/video/da8xx-fb.h create mode 100644 include/video/display_timing.h create mode 100644 include/video/edid.h create mode 100644 include/video/gbe.h create mode 100644 include/video/hecubafb.h create mode 100644 include/video/ili9320.h create mode 100644 include/video/imx-ipu-image-convert.h create mode 100644 include/video/imx-ipu-v3.h create mode 100644 include/video/kyro.h create mode 100644 include/video/mach64.h create mode 100644 include/video/maxinefb.h create mode 100644 include/video/mbxfb.h create mode 100644 include/video/metronomefb.h create mode 100644 include/video/mipi_display.h create mode 100644 include/video/mmp_disp.h create mode 100644 include/video/neomagic.h create mode 100644 include/video/newport.h create mode 100644 include/video/of_display_timing.h create mode 100644 include/video/of_videomode.h create mode 100644 include/video/omap-panel-data.h create mode 100644 include/video/omapfb_dss.h create mode 100644 include/video/omapvrfb.h create mode 100644 include/video/permedia2.h create mode 100644 include/video/platform_lcd.h create mode 100644 include/video/pm3fb.h create mode 100644 include/video/pmag-ba-fb.h create mode 100644 include/video/pmagb-b-fb.h create mode 100644 include/video/pxa168fb.h create mode 100644 include/video/radeon.h create mode 100644 include/video/s1d13xxxfb.h create mode 100644 include/video/sa1100fb.h create mode 100644 include/video/samsung_fimd.h create mode 100644 include/video/sh_mobile_lcdc.h create mode 100644 include/video/sisfb.h create mode 100644 include/video/sstfb.h create mode 100644 include/video/tdfx.h create mode 100644 include/video/tgafb.h create mode 100644 include/video/trident.h create mode 100644 include/video/udlfb.h create mode 100644 include/video/uvesafb.h create mode 100644 include/video/vga.h create mode 100644 include/video/videomode.h create mode 100644 include/video/w100fb.h create mode 100644 include/xen/acpi.h create mode 100644 include/xen/arm/hypercall.h create mode 100644 include/xen/arm/hypervisor.h create mode 100644 include/xen/arm/interface.h create mode 100644 include/xen/arm/page-coherent.h create mode 100644 include/xen/arm/page.h create mode 100644 include/xen/balloon.h create mode 100644 include/xen/events.h create mode 100644 include/xen/features.h create mode 100644 include/xen/grant_table.h create mode 100644 include/xen/hvc-console.h create mode 100644 include/xen/hvm.h create mode 100644 include/xen/interface/callback.h create mode 100644 include/xen/interface/elfnote.h create mode 100644 include/xen/interface/event_channel.h create mode 100644 include/xen/interface/features.h create mode 100644 include/xen/interface/grant_table.h create mode 100644 include/xen/interface/hvm/dm_op.h create mode 100644 include/xen/interface/hvm/hvm_op.h create mode 100644 include/xen/interface/hvm/hvm_vcpu.h create mode 100644 include/xen/interface/hvm/params.h create mode 100644 include/xen/interface/hvm/start_info.h create mode 100644 include/xen/interface/io/9pfs.h create mode 100644 include/xen/interface/io/blkif.h create mode 100644 include/xen/interface/io/console.h create mode 100644 include/xen/interface/io/displif.h create mode 100644 include/xen/interface/io/fbif.h create mode 100644 include/xen/interface/io/kbdif.h create mode 100644 include/xen/interface/io/netif.h create mode 100644 include/xen/interface/io/pciif.h create mode 100644 include/xen/interface/io/protocols.h create mode 100644 include/xen/interface/io/pvcalls.h create mode 100644 include/xen/interface/io/ring.h create mode 100644 include/xen/interface/io/sndif.h create mode 100644 include/xen/interface/io/tpmif.h create mode 100644 include/xen/interface/io/vscsiif.h create mode 100644 include/xen/interface/io/xenbus.h create mode 100644 include/xen/interface/io/xs_wire.h create mode 100644 include/xen/interface/memory.h create mode 100644 include/xen/interface/nmi.h create mode 100644 include/xen/interface/physdev.h create mode 100644 include/xen/interface/platform.h create mode 100644 include/xen/interface/sched.h create mode 100644 include/xen/interface/vcpu.h create mode 100644 include/xen/interface/version.h create mode 100644 include/xen/interface/xen-mca.h create mode 100644 include/xen/interface/xen.h create mode 100644 include/xen/interface/xenpmu.h create mode 100644 include/xen/mem-reservation.h create mode 100644 include/xen/page.h create mode 100644 include/xen/platform_pci.h create mode 100644 include/xen/swiotlb-xen.h create mode 100644 include/xen/tmem.h create mode 100644 include/xen/xen-ops.h create mode 100644 include/xen/xen.h create mode 100644 include/xen/xenbus.h create mode 100644 include/xen/xenbus_dev.h (limited to 'include') diff --git a/include/acpi/acbuffer.h b/include/acpi/acbuffer.h new file mode 100644 index 000000000..6488d5727 --- /dev/null +++ b/include/acpi/acbuffer.h @@ -0,0 +1,210 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: acbuffer.h - Support for buffers returned by ACPI predefined names + * + * Copyright (C) 2000 - 2018, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACBUFFER_H__ +#define __ACBUFFER_H__ + +/* + * Contains buffer structures for these predefined names: + * _FDE, _GRT, _GTM, _PLD, _SRT + */ + +/* + * Note: C bitfields are not used for this reason: + * + * "Bitfields are great and easy to read, but unfortunately the C language + * does not specify the layout of bitfields in memory, which means they are + * essentially useless for dealing with packed data in on-disk formats or + * binary wire protocols." (Or ACPI tables and buffers.) "If you ask me, + * this decision was a design error in C. Ritchie could have picked an order + * and stuck with it." Norman Ramsey. + * See http://stackoverflow.com/a/1053662/41661 + */ + +/* _FDE return value */ + +struct acpi_fde_info { + u32 floppy0; + u32 floppy1; + u32 floppy2; + u32 floppy3; + u32 tape; +}; + +/* + * _GRT return value + * _SRT input value + */ +struct acpi_grt_info { + u16 year; + u8 month; + u8 day; + u8 hour; + u8 minute; + u8 second; + u8 valid; + u16 milliseconds; + u16 timezone; + u8 daylight; + u8 reserved[3]; +}; + +/* _GTM return value */ + +struct acpi_gtm_info { + u32 pio_speed0; + u32 dma_speed0; + u32 pio_speed1; + u32 dma_speed1; + u32 flags; +}; + +/* + * Formatted _PLD return value. The minimum size is a package containing + * one buffer. + * Revision 1: Buffer is 16 bytes (128 bits) + * Revision 2: Buffer is 20 bytes (160 bits) + * + * Note: This structure is returned from the acpi_decode_pld_buffer + * interface. + */ +struct acpi_pld_info { + u8 revision; + u8 ignore_color; + u8 red; + u8 green; + u8 blue; + u16 width; + u16 height; + u8 user_visible; + u8 dock; + u8 lid; + u8 panel; + u8 vertical_position; + u8 horizontal_position; + u8 shape; + u8 group_orientation; + u8 group_token; + u8 group_position; + u8 bay; + u8 ejectable; + u8 ospm_eject_required; + u8 cabinet_number; + u8 card_cage_number; + u8 reference; + u8 rotation; + u8 order; + u8 reserved; + u16 vertical_offset; + u16 horizontal_offset; +}; + +/* + * Macros to: + * 1) Convert a _PLD buffer to internal struct acpi_pld_info format - ACPI_PLD_GET* + * (Used by acpi_decode_pld_buffer) + * 2) Construct a _PLD buffer - ACPI_PLD_SET* + * (Intended for BIOS use only) + */ +#define ACPI_PLD_REV1_BUFFER_SIZE 16 /* For Revision 1 of the buffer (From ACPI spec) */ +#define ACPI_PLD_REV2_BUFFER_SIZE 20 /* For Revision 2 of the buffer (From ACPI spec) */ +#define ACPI_PLD_BUFFER_SIZE 20 /* For Revision 2 of the buffer (From ACPI spec) */ + +/* First 32-bit dword, bits 0:32 */ + +#define ACPI_PLD_GET_REVISION(dword) ACPI_GET_BITS (dword, 0, ACPI_7BIT_MASK) +#define ACPI_PLD_SET_REVISION(dword,value) ACPI_SET_BITS (dword, 0, ACPI_7BIT_MASK, value) /* Offset 0, Len 7 */ + +#define ACPI_PLD_GET_IGNORE_COLOR(dword) ACPI_GET_BITS (dword, 7, ACPI_1BIT_MASK) +#define ACPI_PLD_SET_IGNORE_COLOR(dword,value) ACPI_SET_BITS (dword, 7, ACPI_1BIT_MASK, value) /* Offset 7, Len 1 */ + +#define ACPI_PLD_GET_RED(dword) ACPI_GET_BITS (dword, 8, ACPI_8BIT_MASK) +#define ACPI_PLD_SET_RED(dword,value) ACPI_SET_BITS (dword, 8, ACPI_8BIT_MASK, value) /* Offset 8, Len 8 */ + +#define ACPI_PLD_GET_GREEN(dword) ACPI_GET_BITS (dword, 16, ACPI_8BIT_MASK) +#define ACPI_PLD_SET_GREEN(dword,value) ACPI_SET_BITS (dword, 16, ACPI_8BIT_MASK, value) /* Offset 16, Len 8 */ + +#define ACPI_PLD_GET_BLUE(dword) ACPI_GET_BITS (dword, 24, ACPI_8BIT_MASK) +#define ACPI_PLD_SET_BLUE(dword,value) ACPI_SET_BITS (dword, 24, ACPI_8BIT_MASK, value) /* Offset 24, Len 8 */ + +/* Second 32-bit dword, bits 33:63 */ + +#define ACPI_PLD_GET_WIDTH(dword) ACPI_GET_BITS (dword, 0, ACPI_16BIT_MASK) +#define ACPI_PLD_SET_WIDTH(dword,value) ACPI_SET_BITS (dword, 0, ACPI_16BIT_MASK, value) /* Offset 32+0=32, Len 16 */ + +#define ACPI_PLD_GET_HEIGHT(dword) ACPI_GET_BITS (dword, 16, ACPI_16BIT_MASK) +#define ACPI_PLD_SET_HEIGHT(dword,value) ACPI_SET_BITS (dword, 16, ACPI_16BIT_MASK, value) /* Offset 32+16=48, Len 16 */ + +/* Third 32-bit dword, bits 64:95 */ + +#define ACPI_PLD_GET_USER_VISIBLE(dword) ACPI_GET_BITS (dword, 0, ACPI_1BIT_MASK) +#define ACPI_PLD_SET_USER_VISIBLE(dword,value) ACPI_SET_BITS (dword, 0, ACPI_1BIT_MASK, value) /* Offset 64+0=64, Len 1 */ + +#define ACPI_PLD_GET_DOCK(dword) ACPI_GET_BITS (dword, 1, ACPI_1BIT_MASK) +#define ACPI_PLD_SET_DOCK(dword,value) ACPI_SET_BITS (dword, 1, ACPI_1BIT_MASK, value) /* Offset 64+1=65, Len 1 */ + +#define ACPI_PLD_GET_LID(dword) ACPI_GET_BITS (dword, 2, ACPI_1BIT_MASK) +#define ACPI_PLD_SET_LID(dword,value) ACPI_SET_BITS (dword, 2, ACPI_1BIT_MASK, value) /* Offset 64+2=66, Len 1 */ + +#define ACPI_PLD_GET_PANEL(dword) ACPI_GET_BITS (dword, 3, ACPI_3BIT_MASK) +#define ACPI_PLD_SET_PANEL(dword,value) ACPI_SET_BITS (dword, 3, ACPI_3BIT_MASK, value) /* Offset 64+3=67, Len 3 */ + +#define ACPI_PLD_GET_VERTICAL(dword) ACPI_GET_BITS (dword, 6, ACPI_2BIT_MASK) +#define ACPI_PLD_SET_VERTICAL(dword,value) ACPI_SET_BITS (dword, 6, ACPI_2BIT_MASK, value) /* Offset 64+6=70, Len 2 */ + +#define ACPI_PLD_GET_HORIZONTAL(dword) ACPI_GET_BITS (dword, 8, ACPI_2BIT_MASK) +#define ACPI_PLD_SET_HORIZONTAL(dword,value) ACPI_SET_BITS (dword, 8, ACPI_2BIT_MASK, value) /* Offset 64+8=72, Len 2 */ + +#define ACPI_PLD_GET_SHAPE(dword) ACPI_GET_BITS (dword, 10, ACPI_4BIT_MASK) +#define ACPI_PLD_SET_SHAPE(dword,value) ACPI_SET_BITS (dword, 10, ACPI_4BIT_MASK, value) /* Offset 64+10=74, Len 4 */ + +#define ACPI_PLD_GET_ORIENTATION(dword) ACPI_GET_BITS (dword, 14, ACPI_1BIT_MASK) +#define ACPI_PLD_SET_ORIENTATION(dword,value) ACPI_SET_BITS (dword, 14, ACPI_1BIT_MASK, value) /* Offset 64+14=78, Len 1 */ + +#define ACPI_PLD_GET_TOKEN(dword) ACPI_GET_BITS (dword, 15, ACPI_8BIT_MASK) +#define ACPI_PLD_SET_TOKEN(dword,value) ACPI_SET_BITS (dword, 15, ACPI_8BIT_MASK, value) /* Offset 64+15=79, Len 8 */ + +#define ACPI_PLD_GET_POSITION(dword) ACPI_GET_BITS (dword, 23, ACPI_8BIT_MASK) +#define ACPI_PLD_SET_POSITION(dword,value) ACPI_SET_BITS (dword, 23, ACPI_8BIT_MASK, value) /* Offset 64+23=87, Len 8 */ + +#define ACPI_PLD_GET_BAY(dword) ACPI_GET_BITS (dword, 31, ACPI_1BIT_MASK) +#define ACPI_PLD_SET_BAY(dword,value) ACPI_SET_BITS (dword, 31, ACPI_1BIT_MASK, value) /* Offset 64+31=95, Len 1 */ + +/* Fourth 32-bit dword, bits 96:127 */ + +#define ACPI_PLD_GET_EJECTABLE(dword) ACPI_GET_BITS (dword, 0, ACPI_1BIT_MASK) +#define ACPI_PLD_SET_EJECTABLE(dword,value) ACPI_SET_BITS (dword, 0, ACPI_1BIT_MASK, value) /* Offset 96+0=96, Len 1 */ + +#define ACPI_PLD_GET_OSPM_EJECT(dword) ACPI_GET_BITS (dword, 1, ACPI_1BIT_MASK) +#define ACPI_PLD_SET_OSPM_EJECT(dword,value) ACPI_SET_BITS (dword, 1, ACPI_1BIT_MASK, value) /* Offset 96+1=97, Len 1 */ + +#define ACPI_PLD_GET_CABINET(dword) ACPI_GET_BITS (dword, 2, ACPI_8BIT_MASK) +#define ACPI_PLD_SET_CABINET(dword,value) ACPI_SET_BITS (dword, 2, ACPI_8BIT_MASK, value) /* Offset 96+2=98, Len 8 */ + +#define ACPI_PLD_GET_CARD_CAGE(dword) ACPI_GET_BITS (dword, 10, ACPI_8BIT_MASK) +#define ACPI_PLD_SET_CARD_CAGE(dword,value) ACPI_SET_BITS (dword, 10, ACPI_8BIT_MASK, value) /* Offset 96+10=106, Len 8 */ + +#define ACPI_PLD_GET_REFERENCE(dword) ACPI_GET_BITS (dword, 18, ACPI_1BIT_MASK) +#define ACPI_PLD_SET_REFERENCE(dword,value) ACPI_SET_BITS (dword, 18, ACPI_1BIT_MASK, value) /* Offset 96+18=114, Len 1 */ + +#define ACPI_PLD_GET_ROTATION(dword) ACPI_GET_BITS (dword, 19, ACPI_4BIT_MASK) +#define ACPI_PLD_SET_ROTATION(dword,value) ACPI_SET_BITS (dword, 19, ACPI_4BIT_MASK, value) /* Offset 96+19=115, Len 4 */ + +#define ACPI_PLD_GET_ORDER(dword) ACPI_GET_BITS (dword, 23, ACPI_5BIT_MASK) +#define ACPI_PLD_SET_ORDER(dword,value) ACPI_SET_BITS (dword, 23, ACPI_5BIT_MASK, value) /* Offset 96+23=119, Len 5 */ + +/* Fifth 32-bit dword, bits 128:159 (Revision 2 of _PLD only) */ + +#define ACPI_PLD_GET_VERT_OFFSET(dword) ACPI_GET_BITS (dword, 0, ACPI_16BIT_MASK) +#define ACPI_PLD_SET_VERT_OFFSET(dword,value) ACPI_SET_BITS (dword, 0, ACPI_16BIT_MASK, value) /* Offset 128+0=128, Len 16 */ + +#define ACPI_PLD_GET_HORIZ_OFFSET(dword) ACPI_GET_BITS (dword, 16, ACPI_16BIT_MASK) +#define ACPI_PLD_SET_HORIZ_OFFSET(dword,value) ACPI_SET_BITS (dword, 16, ACPI_16BIT_MASK, value) /* Offset 128+16=144, Len 16 */ + +#endif /* ACBUFFER_H */ diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h new file mode 100644 index 000000000..e6964e97a --- /dev/null +++ b/include/acpi/acconfig.h @@ -0,0 +1,217 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: acconfig.h - Global configuration constants + * + * Copyright (C) 2000 - 2018, Intel Corp. + * + *****************************************************************************/ + +#ifndef _ACCONFIG_H +#define _ACCONFIG_H + +/****************************************************************************** + * + * Configuration options + * + *****************************************************************************/ + +/* + * ACPI_DEBUG_OUTPUT - This switch enables all the debug facilities of the + * ACPI subsystem. This includes the DEBUG_PRINT output + * statements. When disabled, all DEBUG_PRINT + * statements are compiled out. + * + * ACPI_APPLICATION - Use this switch if the subsystem is going to be run + * at the application level. + * + */ + +/* + * OS name, used for the _OS object. The _OS object is essentially obsolete, + * but there is a large base of ASL/AML code in existing machines that check + * for the string below. The use of this string usually guarantees that + * the ASL will execute down the most tested code path. Also, there is some + * code that will not execute the _OSI method unless _OS matches the string + * below. Therefore, change this string at your own risk. + */ +#define ACPI_OS_NAME "Microsoft Windows NT" + +/* Maximum objects in the various object caches */ + +#define ACPI_MAX_STATE_CACHE_DEPTH 96 /* State objects */ +#define ACPI_MAX_PARSE_CACHE_DEPTH 96 /* Parse tree objects */ +#define ACPI_MAX_EXTPARSE_CACHE_DEPTH 96 /* Parse tree objects */ +#define ACPI_MAX_OBJECT_CACHE_DEPTH 96 /* Interpreter operand objects */ +#define ACPI_MAX_NAMESPACE_CACHE_DEPTH 96 /* Namespace objects */ +#define ACPI_MAX_COMMENT_CACHE_DEPTH 96 /* Comments for the -ca option */ + +/* + * Should the subsystem abort the loading of an ACPI table if the + * table checksum is incorrect? + */ +#ifndef ACPI_CHECKSUM_ABORT +#define ACPI_CHECKSUM_ABORT FALSE +#endif + +/* + * Generate a version of ACPICA that only supports "reduced hardware" + * platforms (as defined in ACPI 5.0). Set to TRUE to generate a specialized + * version of ACPICA that ONLY supports the ACPI 5.0 "reduced hardware" + * model. In other words, no ACPI hardware is supported. + * + * If TRUE, this means no support for the following: + * PM Event and Control registers + * SCI interrupt (and handler) + * Fixed Events + * General Purpose Events (GPEs) + * Global Lock + * ACPI PM timer + * FACS table (Waking vectors and Global Lock) + */ +#ifndef ACPI_REDUCED_HARDWARE +#define ACPI_REDUCED_HARDWARE FALSE +#endif + +/****************************************************************************** + * + * Subsystem Constants + * + *****************************************************************************/ + +/* Version of ACPI supported */ + +#define ACPI_CA_SUPPORT_LEVEL 5 + +/* Maximum count for a semaphore object */ + +#define ACPI_MAX_SEMAPHORE_COUNT 256 + +/* Maximum object reference count (detects object deletion issues) */ + +#define ACPI_MAX_REFERENCE_COUNT 0x4000 + +/* Default page size for use in mapping memory for operation regions */ + +#define ACPI_DEFAULT_PAGE_SIZE 4096 /* Must be power of 2 */ + +/* owner_id tracking. 8 entries allows for 255 owner_ids */ + +#define ACPI_NUM_OWNERID_MASKS 8 + +/* Size of the root table array is increased by this increment */ + +#define ACPI_ROOT_TABLE_SIZE_INCREMENT 4 + +/* Maximum sleep allowed via Sleep() operator */ + +#define ACPI_MAX_SLEEP 2000 /* 2000 millisec == two seconds */ + +/* Address Range lists are per-space_id (Memory and I/O only) */ + +#define ACPI_ADDRESS_RANGE_MAX 2 + +/* Maximum time (default 30s) of While() loops before abort */ + +#define ACPI_MAX_LOOP_TIMEOUT 30 + +/****************************************************************************** + * + * ACPI Specification constants (Do not change unless the specification changes) + * + *****************************************************************************/ + +/* Method info (in WALK_STATE), containing local variables and argumetns */ + +#define ACPI_METHOD_NUM_LOCALS 8 +#define ACPI_METHOD_MAX_LOCAL 7 + +#define ACPI_METHOD_NUM_ARGS 7 +#define ACPI_METHOD_MAX_ARG 6 + +/* + * Operand Stack (in WALK_STATE), Must be large enough to contain METHOD_MAX_ARG + */ +#define ACPI_OBJ_NUM_OPERANDS 8 +#define ACPI_OBJ_MAX_OPERAND 7 + +/* Number of elements in the Result Stack frame, can be an arbitrary value */ + +#define ACPI_RESULTS_FRAME_OBJ_NUM 8 + +/* + * Maximal number of elements the Result Stack can contain, + * it may be an arbitray value not exceeding the types of + * result_size and result_count (now u8). + */ +#define ACPI_RESULTS_OBJ_NUM_MAX 255 + +/* Constants used in searching for the RSDP in low memory */ + +#define ACPI_EBDA_PTR_LOCATION 0x0000040E /* Physical Address */ +#define ACPI_EBDA_PTR_LENGTH 2 +#define ACPI_EBDA_WINDOW_SIZE 1024 +#define ACPI_HI_RSDP_WINDOW_BASE 0x000E0000 /* Physical Address */ +#define ACPI_HI_RSDP_WINDOW_SIZE 0x00020000 +#define ACPI_RSDP_SCAN_STEP 16 + +/* Operation regions */ + +#define ACPI_USER_REGION_BEGIN 0x80 + +/* Maximum space_ids for Operation Regions */ + +#define ACPI_MAX_ADDRESS_SPACE 255 +#define ACPI_NUM_DEFAULT_SPACES 4 + +/* Array sizes. Used for range checking also */ + +#define ACPI_MAX_MATCH_OPCODE 5 + +/* RSDP checksums */ + +#define ACPI_RSDP_CHECKSUM_LENGTH 20 +#define ACPI_RSDP_XCHECKSUM_LENGTH 36 + +/* SMBus, GSBus and IPMI bidirectional buffer size */ + +#define ACPI_SMBUS_BUFFER_SIZE 34 +#define ACPI_GSBUS_BUFFER_SIZE 34 +#define ACPI_IPMI_BUFFER_SIZE 66 + +/* _sx_d and _sx_w control methods */ + +#define ACPI_NUM_sx_d_METHODS 4 +#define ACPI_NUM_sx_w_METHODS 5 + +/****************************************************************************** + * + * Miscellaneous constants + * + *****************************************************************************/ + +/* UUID constants */ + +#define UUID_BUFFER_LENGTH 16 /* Length of UUID in memory */ +#define UUID_STRING_LENGTH 36 /* Total length of a UUID string */ + +/* Positions for required hyphens (dashes) in UUID strings */ + +#define UUID_HYPHEN1_OFFSET 8 +#define UUID_HYPHEN2_OFFSET 13 +#define UUID_HYPHEN3_OFFSET 18 +#define UUID_HYPHEN4_OFFSET 23 + +/****************************************************************************** + * + * ACPI AML Debugger + * + *****************************************************************************/ + +#define ACPI_DEBUGGER_MAX_ARGS ACPI_METHOD_NUM_ARGS + 4 /* Max command line arguments */ +#define ACPI_DB_LINE_BUFFER_SIZE 512 + +#define ACPI_DEBUGGER_COMMAND_PROMPT '-' +#define ACPI_DEBUGGER_EXECUTE_PROMPT '%' + +#endif /* _ACCONFIG_H */ diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h new file mode 100644 index 000000000..878b8e26c --- /dev/null +++ b/include/acpi/acexcep.h @@ -0,0 +1,371 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: acexcep.h - Exception codes returned by the ACPI subsystem + * + * Copyright (C) 2000 - 2018, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACEXCEP_H__ +#define __ACEXCEP_H__ + +/* This module contains all possible exception codes for acpi_status */ + +/* + * Exception code classes + */ +#define AE_CODE_ENVIRONMENTAL 0x0000 /* General ACPICA environment */ +#define AE_CODE_PROGRAMMER 0x1000 /* External ACPICA interface caller */ +#define AE_CODE_ACPI_TABLES 0x2000 /* ACPI tables */ +#define AE_CODE_AML 0x3000 /* From executing AML code */ +#define AE_CODE_CONTROL 0x4000 /* Internal control codes */ + +#define AE_CODE_MAX 0x4000 +#define AE_CODE_MASK 0xF000 + +/* + * Macros to insert the exception code classes + */ +#define EXCEP_ENV(code) ((acpi_status) (code | AE_CODE_ENVIRONMENTAL)) +#define EXCEP_PGM(code) ((acpi_status) (code | AE_CODE_PROGRAMMER)) +#define EXCEP_TBL(code) ((acpi_status) (code | AE_CODE_ACPI_TABLES)) +#define EXCEP_AML(code) ((acpi_status) (code | AE_CODE_AML)) +#define EXCEP_CTL(code) ((acpi_status) (code | AE_CODE_CONTROL)) + +/* + * Exception info table. The "Description" field is used only by the + * ACPICA help application (acpihelp). + */ +struct acpi_exception_info { + char *name; + +#ifdef ACPI_HELP_APP + char *description; +#endif +}; + +#ifdef ACPI_HELP_APP +#define EXCEP_TXT(name,description) {name, description} +#else +#define EXCEP_TXT(name,description) {name} +#endif + +/* + * Success is always zero, failure is non-zero + */ +#define ACPI_SUCCESS(a) (!(a)) +#define ACPI_FAILURE(a) (a) + +#define AE_OK (acpi_status) 0x0000 + +#define ACPI_ENV_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_ENVIRONMENTAL) +#define ACPI_AML_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_AML) +#define ACPI_PROG_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_PROGRAMMER) +#define ACPI_TABLE_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_ACPI_TABLES) +#define ACPI_CNTL_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_CONTROL) + +/* + * Environmental exceptions + */ +#define AE_ERROR EXCEP_ENV (0x0001) +#define AE_NO_ACPI_TABLES EXCEP_ENV (0x0002) +#define AE_NO_NAMESPACE EXCEP_ENV (0x0003) +#define AE_NO_MEMORY EXCEP_ENV (0x0004) +#define AE_NOT_FOUND EXCEP_ENV (0x0005) +#define AE_NOT_EXIST EXCEP_ENV (0x0006) +#define AE_ALREADY_EXISTS EXCEP_ENV (0x0007) +#define AE_TYPE EXCEP_ENV (0x0008) +#define AE_NULL_OBJECT EXCEP_ENV (0x0009) +#define AE_NULL_ENTRY EXCEP_ENV (0x000A) +#define AE_BUFFER_OVERFLOW EXCEP_ENV (0x000B) +#define AE_STACK_OVERFLOW EXCEP_ENV (0x000C) +#define AE_STACK_UNDERFLOW EXCEP_ENV (0x000D) +#define AE_NOT_IMPLEMENTED EXCEP_ENV (0x000E) +#define AE_SUPPORT EXCEP_ENV (0x000F) +#define AE_LIMIT EXCEP_ENV (0x0010) +#define AE_TIME EXCEP_ENV (0x0011) +#define AE_ACQUIRE_DEADLOCK EXCEP_ENV (0x0012) +#define AE_RELEASE_DEADLOCK EXCEP_ENV (0x0013) +#define AE_NOT_ACQUIRED EXCEP_ENV (0x0014) +#define AE_ALREADY_ACQUIRED EXCEP_ENV (0x0015) +#define AE_NO_HARDWARE_RESPONSE EXCEP_ENV (0x0016) +#define AE_NO_GLOBAL_LOCK EXCEP_ENV (0x0017) +#define AE_ABORT_METHOD EXCEP_ENV (0x0018) +#define AE_SAME_HANDLER EXCEP_ENV (0x0019) +#define AE_NO_HANDLER EXCEP_ENV (0x001A) +#define AE_OWNER_ID_LIMIT EXCEP_ENV (0x001B) +#define AE_NOT_CONFIGURED EXCEP_ENV (0x001C) +#define AE_ACCESS EXCEP_ENV (0x001D) +#define AE_IO_ERROR EXCEP_ENV (0x001E) +#define AE_NUMERIC_OVERFLOW EXCEP_ENV (0x001F) +#define AE_HEX_OVERFLOW EXCEP_ENV (0x0020) +#define AE_DECIMAL_OVERFLOW EXCEP_ENV (0x0021) +#define AE_OCTAL_OVERFLOW EXCEP_ENV (0x0022) +#define AE_END_OF_TABLE EXCEP_ENV (0x0023) + +#define AE_CODE_ENV_MAX 0x0023 + +/* + * Programmer exceptions + */ +#define AE_BAD_PARAMETER EXCEP_PGM (0x0001) +#define AE_BAD_CHARACTER EXCEP_PGM (0x0002) +#define AE_BAD_PATHNAME EXCEP_PGM (0x0003) +#define AE_BAD_DATA EXCEP_PGM (0x0004) +#define AE_BAD_HEX_CONSTANT EXCEP_PGM (0x0005) +#define AE_BAD_OCTAL_CONSTANT EXCEP_PGM (0x0006) +#define AE_BAD_DECIMAL_CONSTANT EXCEP_PGM (0x0007) +#define AE_MISSING_ARGUMENTS EXCEP_PGM (0x0008) +#define AE_BAD_ADDRESS EXCEP_PGM (0x0009) + +#define AE_CODE_PGM_MAX 0x0009 + +/* + * Acpi table exceptions + */ +#define AE_BAD_SIGNATURE EXCEP_TBL (0x0001) +#define AE_BAD_HEADER EXCEP_TBL (0x0002) +#define AE_BAD_CHECKSUM EXCEP_TBL (0x0003) +#define AE_BAD_VALUE EXCEP_TBL (0x0004) +#define AE_INVALID_TABLE_LENGTH EXCEP_TBL (0x0005) + +#define AE_CODE_TBL_MAX 0x0005 + +/* + * AML exceptions. These are caused by problems with + * the actual AML byte stream + */ +#define AE_AML_BAD_OPCODE EXCEP_AML (0x0001) +#define AE_AML_NO_OPERAND EXCEP_AML (0x0002) +#define AE_AML_OPERAND_TYPE EXCEP_AML (0x0003) +#define AE_AML_OPERAND_VALUE EXCEP_AML (0x0004) +#define AE_AML_UNINITIALIZED_LOCAL EXCEP_AML (0x0005) +#define AE_AML_UNINITIALIZED_ARG EXCEP_AML (0x0006) +#define AE_AML_UNINITIALIZED_ELEMENT EXCEP_AML (0x0007) +#define AE_AML_NUMERIC_OVERFLOW EXCEP_AML (0x0008) +#define AE_AML_REGION_LIMIT EXCEP_AML (0x0009) +#define AE_AML_BUFFER_LIMIT EXCEP_AML (0x000A) +#define AE_AML_PACKAGE_LIMIT EXCEP_AML (0x000B) +#define AE_AML_DIVIDE_BY_ZERO EXCEP_AML (0x000C) +#define AE_AML_BAD_NAME EXCEP_AML (0x000D) +#define AE_AML_NAME_NOT_FOUND EXCEP_AML (0x000E) +#define AE_AML_INTERNAL EXCEP_AML (0x000F) +#define AE_AML_INVALID_SPACE_ID EXCEP_AML (0x0010) +#define AE_AML_STRING_LIMIT EXCEP_AML (0x0011) +#define AE_AML_NO_RETURN_VALUE EXCEP_AML (0x0012) +#define AE_AML_METHOD_LIMIT EXCEP_AML (0x0013) +#define AE_AML_NOT_OWNER EXCEP_AML (0x0014) +#define AE_AML_MUTEX_ORDER EXCEP_AML (0x0015) +#define AE_AML_MUTEX_NOT_ACQUIRED EXCEP_AML (0x0016) +#define AE_AML_INVALID_RESOURCE_TYPE EXCEP_AML (0x0017) +#define AE_AML_INVALID_INDEX EXCEP_AML (0x0018) +#define AE_AML_REGISTER_LIMIT EXCEP_AML (0x0019) +#define AE_AML_NO_WHILE EXCEP_AML (0x001A) +#define AE_AML_ALIGNMENT EXCEP_AML (0x001B) +#define AE_AML_NO_RESOURCE_END_TAG EXCEP_AML (0x001C) +#define AE_AML_BAD_RESOURCE_VALUE EXCEP_AML (0x001D) +#define AE_AML_CIRCULAR_REFERENCE EXCEP_AML (0x001E) +#define AE_AML_BAD_RESOURCE_LENGTH EXCEP_AML (0x001F) +#define AE_AML_ILLEGAL_ADDRESS EXCEP_AML (0x0020) +#define AE_AML_LOOP_TIMEOUT EXCEP_AML (0x0021) +#define AE_AML_UNINITIALIZED_NODE EXCEP_AML (0x0022) +#define AE_AML_TARGET_TYPE EXCEP_AML (0x0023) + +#define AE_CODE_AML_MAX 0x0023 + +/* + * Internal exceptions used for control + */ +#define AE_CTRL_RETURN_VALUE EXCEP_CTL (0x0001) +#define AE_CTRL_PENDING EXCEP_CTL (0x0002) +#define AE_CTRL_TERMINATE EXCEP_CTL (0x0003) +#define AE_CTRL_TRUE EXCEP_CTL (0x0004) +#define AE_CTRL_FALSE EXCEP_CTL (0x0005) +#define AE_CTRL_DEPTH EXCEP_CTL (0x0006) +#define AE_CTRL_END EXCEP_CTL (0x0007) +#define AE_CTRL_TRANSFER EXCEP_CTL (0x0008) +#define AE_CTRL_BREAK EXCEP_CTL (0x0009) +#define AE_CTRL_CONTINUE EXCEP_CTL (0x000A) +#define AE_CTRL_PARSE_CONTINUE EXCEP_CTL (0x000B) +#define AE_CTRL_PARSE_PENDING EXCEP_CTL (0x000C) + +#define AE_CODE_CTRL_MAX 0x000C + +/* Exception strings for acpi_format_exception */ + +#ifdef ACPI_DEFINE_EXCEPTION_TABLE + +/* + * String versions of the exception codes above + * These strings must match the corresponding defines exactly + */ +static const struct acpi_exception_info acpi_gbl_exception_names_env[] = { + EXCEP_TXT("AE_OK", "No error"), + EXCEP_TXT("AE_ERROR", "Unspecified error"), + EXCEP_TXT("AE_NO_ACPI_TABLES", "ACPI tables could not be found"), + EXCEP_TXT("AE_NO_NAMESPACE", "A namespace has not been loaded"), + EXCEP_TXT("AE_NO_MEMORY", "Insufficient dynamic memory"), + EXCEP_TXT("AE_NOT_FOUND", "A requested entity is not found"), + EXCEP_TXT("AE_NOT_EXIST", "A required entity does not exist"), + EXCEP_TXT("AE_ALREADY_EXISTS", "An entity already exists"), + EXCEP_TXT("AE_TYPE", "The object type is incorrect"), + EXCEP_TXT("AE_NULL_OBJECT", "A required object was missing"), + EXCEP_TXT("AE_NULL_ENTRY", "The requested object does not exist"), + EXCEP_TXT("AE_BUFFER_OVERFLOW", "The buffer provided is too small"), + EXCEP_TXT("AE_STACK_OVERFLOW", "An internal stack overflowed"), + EXCEP_TXT("AE_STACK_UNDERFLOW", "An internal stack underflowed"), + EXCEP_TXT("AE_NOT_IMPLEMENTED", "The feature is not implemented"), + EXCEP_TXT("AE_SUPPORT", "The feature is not supported"), + EXCEP_TXT("AE_LIMIT", "A predefined limit was exceeded"), + EXCEP_TXT("AE_TIME", "A time limit or timeout expired"), + EXCEP_TXT("AE_ACQUIRE_DEADLOCK", + "Internal error, attempt was made to acquire a mutex in improper order"), + EXCEP_TXT("AE_RELEASE_DEADLOCK", + "Internal error, attempt was made to release a mutex in improper order"), + EXCEP_TXT("AE_NOT_ACQUIRED", + "An attempt to release a mutex or Global Lock without a previous acquire"), + EXCEP_TXT("AE_ALREADY_ACQUIRED", + "Internal error, attempt was made to acquire a mutex twice"), + EXCEP_TXT("AE_NO_HARDWARE_RESPONSE", + "Hardware did not respond after an I/O operation"), + EXCEP_TXT("AE_NO_GLOBAL_LOCK", "There is no FACS Global Lock"), + EXCEP_TXT("AE_ABORT_METHOD", "A control method was aborted"), + EXCEP_TXT("AE_SAME_HANDLER", + "Attempt was made to install the same handler that is already installed"), + EXCEP_TXT("AE_NO_HANDLER", + "A handler for the operation is not installed"), + EXCEP_TXT("AE_OWNER_ID_LIMIT", + "There are no more Owner IDs available for ACPI tables or control methods"), + EXCEP_TXT("AE_NOT_CONFIGURED", + "The interface is not part of the current subsystem configuration"), + EXCEP_TXT("AE_ACCESS", "Permission denied for the requested operation"), + EXCEP_TXT("AE_IO_ERROR", "An I/O error occurred"), + EXCEP_TXT("AE_NUMERIC_OVERFLOW", + "Overflow during string-to-integer conversion"), + EXCEP_TXT("AE_HEX_OVERFLOW", + "Overflow during ASCII hex-to-binary conversion"), + EXCEP_TXT("AE_DECIMAL_OVERFLOW", + "Overflow during ASCII decimal-to-binary conversion"), + EXCEP_TXT("AE_OCTAL_OVERFLOW", + "Overflow during ASCII octal-to-binary conversion"), + EXCEP_TXT("AE_END_OF_TABLE", "Reached the end of table") +}; + +static const struct acpi_exception_info acpi_gbl_exception_names_pgm[] = { + EXCEP_TXT(NULL, NULL), + EXCEP_TXT("AE_BAD_PARAMETER", "A parameter is out of range or invalid"), + EXCEP_TXT("AE_BAD_CHARACTER", + "An invalid character was found in a name"), + EXCEP_TXT("AE_BAD_PATHNAME", + "An invalid character was found in a pathname"), + EXCEP_TXT("AE_BAD_DATA", + "A package or buffer contained incorrect data"), + EXCEP_TXT("AE_BAD_HEX_CONSTANT", "Invalid character in a Hex constant"), + EXCEP_TXT("AE_BAD_OCTAL_CONSTANT", + "Invalid character in an Octal constant"), + EXCEP_TXT("AE_BAD_DECIMAL_CONSTANT", + "Invalid character in a Decimal constant"), + EXCEP_TXT("AE_MISSING_ARGUMENTS", + "Too few arguments were passed to a control method"), + EXCEP_TXT("AE_BAD_ADDRESS", "An illegal null I/O address") +}; + +static const struct acpi_exception_info acpi_gbl_exception_names_tbl[] = { + EXCEP_TXT(NULL, NULL), + EXCEP_TXT("AE_BAD_SIGNATURE", "An ACPI table has an invalid signature"), + EXCEP_TXT("AE_BAD_HEADER", "Invalid field in an ACPI table header"), + EXCEP_TXT("AE_BAD_CHECKSUM", "An ACPI table checksum is not correct"), + EXCEP_TXT("AE_BAD_VALUE", "An invalid value was found in a table"), + EXCEP_TXT("AE_INVALID_TABLE_LENGTH", + "The FADT or FACS has improper length") +}; + +static const struct acpi_exception_info acpi_gbl_exception_names_aml[] = { + EXCEP_TXT(NULL, NULL), + EXCEP_TXT("AE_AML_BAD_OPCODE", "Invalid AML opcode encountered"), + EXCEP_TXT("AE_AML_NO_OPERAND", "A required operand is missing"), + EXCEP_TXT("AE_AML_OPERAND_TYPE", + "An operand of an incorrect type was encountered"), + EXCEP_TXT("AE_AML_OPERAND_VALUE", + "The operand had an inappropriate or invalid value"), + EXCEP_TXT("AE_AML_UNINITIALIZED_LOCAL", + "Method tried to use an uninitialized local variable"), + EXCEP_TXT("AE_AML_UNINITIALIZED_ARG", + "Method tried to use an uninitialized argument"), + EXCEP_TXT("AE_AML_UNINITIALIZED_ELEMENT", + "Method tried to use an empty package element"), + EXCEP_TXT("AE_AML_NUMERIC_OVERFLOW", + "Overflow during BCD conversion or other"), + EXCEP_TXT("AE_AML_REGION_LIMIT", + "Tried to access beyond the end of an Operation Region"), + EXCEP_TXT("AE_AML_BUFFER_LIMIT", + "Tried to access beyond the end of a buffer"), + EXCEP_TXT("AE_AML_PACKAGE_LIMIT", + "Tried to access beyond the end of a package"), + EXCEP_TXT("AE_AML_DIVIDE_BY_ZERO", + "During execution of AML Divide operator"), + EXCEP_TXT("AE_AML_BAD_NAME", + "An ACPI name contains invalid character(s)"), + EXCEP_TXT("AE_AML_NAME_NOT_FOUND", + "Could not resolve a named reference"), + EXCEP_TXT("AE_AML_INTERNAL", "An internal error within the interprete"), + EXCEP_TXT("AE_AML_INVALID_SPACE_ID", + "An Operation Region SpaceID is invalid"), + EXCEP_TXT("AE_AML_STRING_LIMIT", + "String is longer than 200 characters"), + EXCEP_TXT("AE_AML_NO_RETURN_VALUE", + "A method did not return a required value"), + EXCEP_TXT("AE_AML_METHOD_LIMIT", + "A control method reached the maximum reentrancy limit of 255"), + EXCEP_TXT("AE_AML_NOT_OWNER", + "A thread tried to release a mutex that it does not own"), + EXCEP_TXT("AE_AML_MUTEX_ORDER", "Mutex SyncLevel release mismatch"), + EXCEP_TXT("AE_AML_MUTEX_NOT_ACQUIRED", + "Attempt to release a mutex that was not previously acquired"), + EXCEP_TXT("AE_AML_INVALID_RESOURCE_TYPE", + "Invalid resource type in resource list"), + EXCEP_TXT("AE_AML_INVALID_INDEX", + "Invalid Argx or Localx (x too large)"), + EXCEP_TXT("AE_AML_REGISTER_LIMIT", + "Bank value or Index value beyond range of register"), + EXCEP_TXT("AE_AML_NO_WHILE", "Break or Continue without a While"), + EXCEP_TXT("AE_AML_ALIGNMENT", + "Non-aligned memory transfer on platform that does not support this"), + EXCEP_TXT("AE_AML_NO_RESOURCE_END_TAG", + "No End Tag in a resource list"), + EXCEP_TXT("AE_AML_BAD_RESOURCE_VALUE", + "Invalid value of a resource element"), + EXCEP_TXT("AE_AML_CIRCULAR_REFERENCE", + "Two references refer to each other"), + EXCEP_TXT("AE_AML_BAD_RESOURCE_LENGTH", + "The length of a Resource Descriptor in the AML is incorrect"), + EXCEP_TXT("AE_AML_ILLEGAL_ADDRESS", + "A memory, I/O, or PCI configuration address is invalid"), + EXCEP_TXT("AE_AML_LOOP_TIMEOUT", + "An AML While loop exceeded the maximum execution time"), + EXCEP_TXT("AE_AML_UNINITIALIZED_NODE", + "A namespace node is uninitialized or unresolved"), + EXCEP_TXT("AE_AML_TARGET_TYPE", + "A target operand of an incorrect type was encountered") +}; + +static const struct acpi_exception_info acpi_gbl_exception_names_ctrl[] = { + EXCEP_TXT(NULL, NULL), + EXCEP_TXT("AE_CTRL_RETURN_VALUE", "A Method returned a value"), + EXCEP_TXT("AE_CTRL_PENDING", "Method is calling another method"), + EXCEP_TXT("AE_CTRL_TERMINATE", "Terminate the executing method"), + EXCEP_TXT("AE_CTRL_TRUE", "An If or While predicate result"), + EXCEP_TXT("AE_CTRL_FALSE", "An If or While predicate result"), + EXCEP_TXT("AE_CTRL_DEPTH", "Maximum search depth has been reached"), + EXCEP_TXT("AE_CTRL_END", "An If or While predicate is false"), + EXCEP_TXT("AE_CTRL_TRANSFER", "Transfer control to called method"), + EXCEP_TXT("AE_CTRL_BREAK", "A Break has been executed"), + EXCEP_TXT("AE_CTRL_CONTINUE", "A Continue has been executed"), + EXCEP_TXT("AE_CTRL_PARSE_CONTINUE", "Used to skip over bad opcodes"), + EXCEP_TXT("AE_CTRL_PARSE_PENDING", "Used to implement AML While loops") +}; + +#endif /* EXCEPTION_TABLE */ + +#endif /* __ACEXCEP_H__ */ diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h new file mode 100644 index 000000000..6f69a4f63 --- /dev/null +++ b/include/acpi/acnames.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: acnames.h - Global names and strings + * + * Copyright (C) 2000 - 2018, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACNAMES_H__ +#define __ACNAMES_H__ + +/* Method names - these methods can appear anywhere in the namespace */ + +#define METHOD_NAME__ADR "_ADR" +#define METHOD_NAME__AEI "_AEI" +#define METHOD_NAME__BBN "_BBN" +#define METHOD_NAME__CBA "_CBA" +#define METHOD_NAME__CID "_CID" +#define METHOD_NAME__CLS "_CLS" +#define METHOD_NAME__CRS "_CRS" +#define METHOD_NAME__DDN "_DDN" +#define METHOD_NAME__DMA "_DMA" +#define METHOD_NAME__HID "_HID" +#define METHOD_NAME__INI "_INI" +#define METHOD_NAME__PLD "_PLD" +#define METHOD_NAME__DSD "_DSD" +#define METHOD_NAME__PRS "_PRS" +#define METHOD_NAME__PRT "_PRT" +#define METHOD_NAME__PRW "_PRW" +#define METHOD_NAME__PS0 "_PS0" +#define METHOD_NAME__PS1 "_PS1" +#define METHOD_NAME__PS2 "_PS2" +#define METHOD_NAME__PS3 "_PS3" +#define METHOD_NAME__REG "_REG" +#define METHOD_NAME__SB_ "_SB_" +#define METHOD_NAME__SEG "_SEG" +#define METHOD_NAME__SRS "_SRS" +#define METHOD_NAME__STA "_STA" +#define METHOD_NAME__SUB "_SUB" +#define METHOD_NAME__UID "_UID" + +/* Method names - these methods must appear at the namespace root */ + +#define METHOD_PATHNAME__PTS "\\_PTS" +#define METHOD_PATHNAME__SST "\\_SI._SST" +#define METHOD_PATHNAME__WAK "\\_WAK" + +/* Definitions of the predefined namespace names */ + +#define ACPI_UNKNOWN_NAME (u32) 0x3F3F3F3F /* Unknown name is "????" */ +#define ACPI_PREFIX_MIXED (u32) 0x69706341 /* "Acpi" */ +#define ACPI_PREFIX_LOWER (u32) 0x69706361 /* "acpi" */ + +/* Root name stuff */ + +#define ACPI_ROOT_NAME (u32) 0x5F5F5F5C /* Root name is "\___" */ +#define ACPI_ROOT_PATHNAME "\\___" +#define ACPI_NAMESPACE_ROOT "Namespace Root" +#define ACPI_NS_ROOT_PATH "\\" + +#endif /* __ACNAMES_H__ */ diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h new file mode 100644 index 000000000..3a26aa7ea --- /dev/null +++ b/include/acpi/acoutput.h @@ -0,0 +1,459 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: acoutput.h -- debug output + * + * Copyright (C) 2000 - 2018, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACOUTPUT_H__ +#define __ACOUTPUT_H__ + +/* + * Debug levels and component IDs. These are used to control the + * granularity of the output of the ACPI_DEBUG_PRINT macro -- on a + * per-component basis and a per-exception-type basis. + */ + +/* Component IDs are used in the global "DebugLayer" */ + +#define ACPI_UTILITIES 0x00000001 +#define ACPI_HARDWARE 0x00000002 +#define ACPI_EVENTS 0x00000004 +#define ACPI_TABLES 0x00000008 +#define ACPI_NAMESPACE 0x00000010 +#define ACPI_PARSER 0x00000020 +#define ACPI_DISPATCHER 0x00000040 +#define ACPI_EXECUTER 0x00000080 +#define ACPI_RESOURCES 0x00000100 +#define ACPI_CA_DEBUGGER 0x00000200 +#define ACPI_OS_SERVICES 0x00000400 +#define ACPI_CA_DISASSEMBLER 0x00000800 + +/* Component IDs for ACPI tools and utilities */ + +#define ACPI_COMPILER 0x00001000 +#define ACPI_TOOLS 0x00002000 +#define ACPI_EXAMPLE 0x00004000 +#define ACPI_DRIVER 0x00008000 +#define DT_COMPILER 0x00010000 +#define ASL_PREPROCESSOR 0x00020000 + +#define ACPI_ALL_COMPONENTS 0x0001FFFF +#define ACPI_COMPONENT_DEFAULT (ACPI_ALL_COMPONENTS) + +/* Component IDs reserved for ACPI drivers */ + +#define ACPI_ALL_DRIVERS 0xFFFF0000 + +/* + * Raw debug output levels, do not use these in the ACPI_DEBUG_PRINT macros + */ +#define ACPI_LV_INIT 0x00000001 +#define ACPI_LV_DEBUG_OBJECT 0x00000002 +#define ACPI_LV_INFO 0x00000004 +#define ACPI_LV_REPAIR 0x00000008 +#define ACPI_LV_TRACE_POINT 0x00000010 +#define ACPI_LV_ALL_EXCEPTIONS 0x0000001F + +/* Trace verbosity level 1 [Standard Trace Level] */ + +#define ACPI_LV_INIT_NAMES 0x00000020 +#define ACPI_LV_PARSE 0x00000040 +#define ACPI_LV_LOAD 0x00000080 +#define ACPI_LV_DISPATCH 0x00000100 +#define ACPI_LV_EXEC 0x00000200 +#define ACPI_LV_NAMES 0x00000400 +#define ACPI_LV_OPREGION 0x00000800 +#define ACPI_LV_BFIELD 0x00001000 +#define ACPI_LV_TABLES 0x00002000 +#define ACPI_LV_VALUES 0x00004000 +#define ACPI_LV_OBJECTS 0x00008000 +#define ACPI_LV_RESOURCES 0x00010000 +#define ACPI_LV_USER_REQUESTS 0x00020000 +#define ACPI_LV_PACKAGE 0x00040000 +#define ACPI_LV_VERBOSITY1 0x0007FF40 | ACPI_LV_ALL_EXCEPTIONS + +/* Trace verbosity level 2 [Function tracing and memory allocation] */ + +#define ACPI_LV_ALLOCATIONS 0x00100000 +#define ACPI_LV_FUNCTIONS 0x00200000 +#define ACPI_LV_OPTIMIZATIONS 0x00400000 +#define ACPI_LV_PARSE_TREES 0x00800000 +#define ACPI_LV_VERBOSITY2 0x00F00000 | ACPI_LV_VERBOSITY1 +#define ACPI_LV_ALL ACPI_LV_VERBOSITY2 + +/* Trace verbosity level 3 [Threading, I/O, and Interrupts] */ + +#define ACPI_LV_MUTEX 0x01000000 +#define ACPI_LV_THREADS 0x02000000 +#define ACPI_LV_IO 0x04000000 +#define ACPI_LV_INTERRUPTS 0x08000000 +#define ACPI_LV_VERBOSITY3 0x0F000000 | ACPI_LV_VERBOSITY2 + +/* Exceptionally verbose output -- also used in the global "DebugLevel" */ + +#define ACPI_LV_AML_DISASSEMBLE 0x10000000 +#define ACPI_LV_VERBOSE_INFO 0x20000000 +#define ACPI_LV_FULL_TABLES 0x40000000 +#define ACPI_LV_EVENTS 0x80000000 +#define ACPI_LV_VERBOSE 0xF0000000 + +/* + * Debug level macros that are used in the DEBUG_PRINT macros + */ +#define ACPI_DEBUG_LEVEL(dl) (u32) dl,ACPI_DEBUG_PARAMETERS + +/* + * Exception level -- used in the global "DebugLevel" + * + * Note: For errors, use the ACPI_ERROR or ACPI_EXCEPTION interfaces. + * For warnings, use ACPI_WARNING. + */ +#define ACPI_DB_INIT ACPI_DEBUG_LEVEL (ACPI_LV_INIT) +#define ACPI_DB_DEBUG_OBJECT ACPI_DEBUG_LEVEL (ACPI_LV_DEBUG_OBJECT) +#define ACPI_DB_INFO ACPI_DEBUG_LEVEL (ACPI_LV_INFO) +#define ACPI_DB_REPAIR ACPI_DEBUG_LEVEL (ACPI_LV_REPAIR) +#define ACPI_DB_TRACE_POINT ACPI_DEBUG_LEVEL (ACPI_LV_TRACE_POINT) +#define ACPI_DB_ALL_EXCEPTIONS ACPI_DEBUG_LEVEL (ACPI_LV_ALL_EXCEPTIONS) + +/* Trace level -- also used in the global "DebugLevel" */ + +#define ACPI_DB_INIT_NAMES ACPI_DEBUG_LEVEL (ACPI_LV_INIT_NAMES) +#define ACPI_DB_THREADS ACPI_DEBUG_LEVEL (ACPI_LV_THREADS) +#define ACPI_DB_PARSE ACPI_DEBUG_LEVEL (ACPI_LV_PARSE) +#define ACPI_DB_DISPATCH ACPI_DEBUG_LEVEL (ACPI_LV_DISPATCH) +#define ACPI_DB_LOAD ACPI_DEBUG_LEVEL (ACPI_LV_LOAD) +#define ACPI_DB_EXEC ACPI_DEBUG_LEVEL (ACPI_LV_EXEC) +#define ACPI_DB_NAMES ACPI_DEBUG_LEVEL (ACPI_LV_NAMES) +#define ACPI_DB_OPREGION ACPI_DEBUG_LEVEL (ACPI_LV_OPREGION) +#define ACPI_DB_BFIELD ACPI_DEBUG_LEVEL (ACPI_LV_BFIELD) +#define ACPI_DB_TABLES ACPI_DEBUG_LEVEL (ACPI_LV_TABLES) +#define ACPI_DB_FUNCTIONS ACPI_DEBUG_LEVEL (ACPI_LV_FUNCTIONS) +#define ACPI_DB_OPTIMIZATIONS ACPI_DEBUG_LEVEL (ACPI_LV_OPTIMIZATIONS) +#define ACPI_DB_PARSE_TREES ACPI_DEBUG_LEVEL (ACPI_LV_PARSE_TREES) +#define ACPI_DB_VALUES ACPI_DEBUG_LEVEL (ACPI_LV_VALUES) +#define ACPI_DB_OBJECTS ACPI_DEBUG_LEVEL (ACPI_LV_OBJECTS) +#define ACPI_DB_ALLOCATIONS ACPI_DEBUG_LEVEL (ACPI_LV_ALLOCATIONS) +#define ACPI_DB_RESOURCES ACPI_DEBUG_LEVEL (ACPI_LV_RESOURCES) +#define ACPI_DB_IO ACPI_DEBUG_LEVEL (ACPI_LV_IO) +#define ACPI_DB_INTERRUPTS ACPI_DEBUG_LEVEL (ACPI_LV_INTERRUPTS) +#define ACPI_DB_USER_REQUESTS ACPI_DEBUG_LEVEL (ACPI_LV_USER_REQUESTS) +#define ACPI_DB_PACKAGE ACPI_DEBUG_LEVEL (ACPI_LV_PACKAGE) +#define ACPI_DB_MUTEX ACPI_DEBUG_LEVEL (ACPI_LV_MUTEX) +#define ACPI_DB_EVENTS ACPI_DEBUG_LEVEL (ACPI_LV_EVENTS) + +#define ACPI_DB_ALL ACPI_DEBUG_LEVEL (ACPI_LV_ALL) + +/* Defaults for debug_level, debug and normal */ + +#define ACPI_DEBUG_DEFAULT (ACPI_LV_INFO | ACPI_LV_REPAIR) +#define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_REPAIR) +#define ACPI_DEBUG_ALL (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL) + +/* + * Global trace flags + */ +#define ACPI_TRACE_ENABLED ((u32) 4) +#define ACPI_TRACE_ONESHOT ((u32) 2) +#define ACPI_TRACE_OPCODE ((u32) 1) + +/* Defaults for trace debugging level/layer */ + +#define ACPI_TRACE_LEVEL_ALL ACPI_LV_ALL +#define ACPI_TRACE_LAYER_ALL 0x000001FF +#define ACPI_TRACE_LEVEL_DEFAULT ACPI_LV_TRACE_POINT +#define ACPI_TRACE_LAYER_DEFAULT ACPI_EXECUTER + +#if defined (ACPI_DEBUG_OUTPUT) || !defined (ACPI_NO_ERROR_MESSAGES) +/* + * The module name is used primarily for error and debug messages. + * The __FILE__ macro is not very useful for this, because it + * usually includes the entire pathname to the module making the + * debug output difficult to read. + */ +#define ACPI_MODULE_NAME(name) static const char ACPI_UNUSED_VAR _acpi_module_name[] = name; +#else +/* + * For the no-debug and no-error-msg cases, we must at least define + * a null module name. + */ +#define ACPI_MODULE_NAME(name) +#define _acpi_module_name "" +#endif + +/* + * Ascii error messages can be configured out + */ +#ifndef ACPI_NO_ERROR_MESSAGES +#define AE_INFO _acpi_module_name, __LINE__ + +/* + * Error reporting. Callers module and line number are inserted by AE_INFO, + * the plist contains a set of parens to allow variable-length lists. + * These macros are used for both the debug and non-debug versions of the code. + */ +#define ACPI_INFO(plist) acpi_info plist +#define ACPI_WARNING(plist) acpi_warning plist +#define ACPI_EXCEPTION(plist) acpi_exception plist +#define ACPI_ERROR(plist) acpi_error plist +#define ACPI_BIOS_WARNING(plist) acpi_bios_warning plist +#define ACPI_BIOS_ERROR(plist) acpi_bios_error plist +#define ACPI_DEBUG_OBJECT(obj,l,i) acpi_ex_do_debug_object(obj,l,i) + +#else + +/* No error messages */ + +#define ACPI_INFO(plist) +#define ACPI_WARNING(plist) +#define ACPI_EXCEPTION(plist) +#define ACPI_ERROR(plist) +#define ACPI_BIOS_WARNING(plist) +#define ACPI_BIOS_ERROR(plist) +#define ACPI_DEBUG_OBJECT(obj,l,i) + +#endif /* ACPI_NO_ERROR_MESSAGES */ + +/* + * Debug macros that are conditionally compiled + */ +#ifdef ACPI_DEBUG_OUTPUT + +/* + * If ACPI_GET_FUNCTION_NAME was not defined in the compiler-dependent header, + * define it now. This is the case where there the compiler does not support + * a __func__ macro or equivalent. + */ +#ifndef ACPI_GET_FUNCTION_NAME +#define ACPI_GET_FUNCTION_NAME _acpi_function_name + +/* + * The Name parameter should be the procedure name as a non-quoted string. + * The function name is also used by the function exit macros below. + * Note: (const char) is used to be compatible with the debug interfaces + * and macros such as __func__. + */ +#define ACPI_FUNCTION_NAME(name) static const char _acpi_function_name[] = #name; + +#else +/* Compiler supports __func__ (or equivalent) -- Ignore this macro */ + +#define ACPI_FUNCTION_NAME(name) +#endif /* ACPI_GET_FUNCTION_NAME */ + +/* + * Common parameters used for debug output functions: + * line number, function name, module(file) name, component ID + */ +#define ACPI_DEBUG_PARAMETERS \ + __LINE__, ACPI_GET_FUNCTION_NAME, _acpi_module_name, _COMPONENT + +/* Check if debug output is currently dynamically enabled */ + +#define ACPI_IS_DEBUG_ENABLED(level, component) \ + ((level & acpi_dbg_level) && (component & acpi_dbg_layer)) + +/* + * Master debug print macros + * Print message if and only if: + * 1) Debug print for the current component is enabled + * 2) Debug error level or trace level for the print statement is enabled + * + * November 2012: Moved the runtime check for whether to actually emit the + * debug message outside of the print function itself. This improves overall + * performance at a relatively small code cost. Implementation involves the + * use of variadic macros supported by C99. + * + * Note: the ACPI_DO_WHILE0 macro is used to prevent some compilers from + * complaining about these constructs. On other compilers the do...while + * adds some extra code, so this feature is optional. + */ +#ifdef ACPI_USE_DO_WHILE_0 +#define ACPI_DO_WHILE0(a) do a while(0) +#else +#define ACPI_DO_WHILE0(a) a +#endif + +/* DEBUG_PRINT functions */ + +#ifndef COMPILER_VA_MACRO + +#define ACPI_DEBUG_PRINT(plist) acpi_debug_print plist +#define ACPI_DEBUG_PRINT_RAW(plist) acpi_debug_print_raw plist + +#else + +/* Helper macros for DEBUG_PRINT */ + +#define ACPI_DO_DEBUG_PRINT(function, level, line, filename, modulename, component, ...) \ + ACPI_DO_WHILE0 ({ \ + if (ACPI_IS_DEBUG_ENABLED (level, component)) \ + { \ + function (level, line, filename, modulename, component, __VA_ARGS__); \ + } \ + }) + +#define ACPI_ACTUAL_DEBUG(level, line, filename, modulename, component, ...) \ + ACPI_DO_DEBUG_PRINT (acpi_debug_print, level, line, \ + filename, modulename, component, __VA_ARGS__) + +#define ACPI_ACTUAL_DEBUG_RAW(level, line, filename, modulename, component, ...) \ + ACPI_DO_DEBUG_PRINT (acpi_debug_print_raw, level, line, \ + filename, modulename, component, __VA_ARGS__) + +#define ACPI_DEBUG_PRINT(plist) ACPI_ACTUAL_DEBUG plist +#define ACPI_DEBUG_PRINT_RAW(plist) ACPI_ACTUAL_DEBUG_RAW plist + +#endif + +/* + * Function entry tracing + * + * The name of the function is emitted as a local variable that is + * intended to be used by both the entry trace and the exit trace. + */ + +/* Helper macro */ + +#define ACPI_TRACE_ENTRY(name, function, type, param) \ + ACPI_FUNCTION_NAME (name) \ + function (ACPI_DEBUG_PARAMETERS, (type) (param)) + +/* The actual entry trace macros */ + +#define ACPI_FUNCTION_TRACE(name) \ + ACPI_FUNCTION_NAME(name) \ + acpi_ut_trace (ACPI_DEBUG_PARAMETERS) + +#define ACPI_FUNCTION_TRACE_PTR(name, pointer) \ + ACPI_TRACE_ENTRY (name, acpi_ut_trace_ptr, void *, pointer) + +#define ACPI_FUNCTION_TRACE_U32(name, value) \ + ACPI_TRACE_ENTRY (name, acpi_ut_trace_u32, u32, value) + +#define ACPI_FUNCTION_TRACE_STR(name, string) \ + ACPI_TRACE_ENTRY (name, acpi_ut_trace_str, const char *, string) + +#define ACPI_FUNCTION_ENTRY() \ + acpi_ut_track_stack_ptr() + +/* + * Function exit tracing + * + * These macros include a return statement. This is usually considered + * bad form, but having a separate exit macro before the actual return + * is very ugly and difficult to maintain. + * + * One of the FUNCTION_TRACE macros above must be used in conjunction + * with these macros so that "_AcpiFunctionName" is defined. + * + * There are two versions of most of the return macros. The default version is + * safer, since it avoids side-effects by guaranteeing that the argument will + * not be evaluated twice. + * + * A less-safe version of the macros is provided for optional use if the + * compiler uses excessive CPU stack (for example, this may happen in the + * debug case if code optimzation is disabled.) + */ + +/* Exit trace helper macro */ + +#ifndef ACPI_SIMPLE_RETURN_MACROS + +#define ACPI_TRACE_EXIT(function, type, param) \ + ACPI_DO_WHILE0 ({ \ + register type _param = (type) (param); \ + function (ACPI_DEBUG_PARAMETERS, _param); \ + return (_param); \ + }) + +#else /* Use original less-safe macros */ + +#define ACPI_TRACE_EXIT(function, type, param) \ + ACPI_DO_WHILE0 ({ \ + function (ACPI_DEBUG_PARAMETERS, (type) (param)); \ + return (param); \ + }) + +#endif /* ACPI_SIMPLE_RETURN_MACROS */ + +/* The actual exit macros */ + +#define return_VOID \ + ACPI_DO_WHILE0 ({ \ + acpi_ut_exit (ACPI_DEBUG_PARAMETERS); \ + return; \ + }) + +#define return_ACPI_STATUS(status) \ + ACPI_TRACE_EXIT (acpi_ut_status_exit, acpi_status, status) + +#define return_PTR(pointer) \ + ACPI_TRACE_EXIT (acpi_ut_ptr_exit, void *, pointer) + +#define return_STR(string) \ + ACPI_TRACE_EXIT (acpi_ut_str_exit, const char *, string) + +#define return_VALUE(value) \ + ACPI_TRACE_EXIT (acpi_ut_value_exit, u64, value) + +#define return_UINT32(value) \ + ACPI_TRACE_EXIT (acpi_ut_value_exit, u32, value) + +#define return_UINT8(value) \ + ACPI_TRACE_EXIT (acpi_ut_value_exit, u8, value) + +/* Conditional execution */ + +#define ACPI_DEBUG_EXEC(a) a +#define ACPI_DEBUG_ONLY_MEMBERS(a) a; +#define _VERBOSE_STRUCTURES + +/* Various object display routines for debug */ + +#define ACPI_DUMP_STACK_ENTRY(a) acpi_ex_dump_operand((a), 0) +#define ACPI_DUMP_OPERANDS(a, b ,c) acpi_ex_dump_operands(a, b, c) +#define ACPI_DUMP_ENTRY(a, b) acpi_ns_dump_entry (a, b) +#define ACPI_DUMP_PATHNAME(a, b, c, d) acpi_ns_dump_pathname(a, b, c, d) +#define ACPI_DUMP_BUFFER(a, b) acpi_ut_debug_dump_buffer((u8 *) a, b, DB_BYTE_DISPLAY, _COMPONENT) + +#define ACPI_TRACE_POINT(a, b, c, d) acpi_trace_point (a, b, c, d) + +#else /* ACPI_DEBUG_OUTPUT */ +/* + * This is the non-debug case -- make everything go away, + * leaving no executable debug code! + */ +#define ACPI_DEBUG_PRINT(pl) +#define ACPI_DEBUG_PRINT_RAW(pl) +#define ACPI_DEBUG_EXEC(a) +#define ACPI_DEBUG_ONLY_MEMBERS(a) +#define ACPI_FUNCTION_NAME(a) +#define ACPI_FUNCTION_TRACE(a) +#define ACPI_FUNCTION_TRACE_PTR(a, b) +#define ACPI_FUNCTION_TRACE_U32(a, b) +#define ACPI_FUNCTION_TRACE_STR(a, b) +#define ACPI_FUNCTION_ENTRY() +#define ACPI_DUMP_STACK_ENTRY(a) +#define ACPI_DUMP_OPERANDS(a, b, c) +#define ACPI_DUMP_ENTRY(a, b) +#define ACPI_DUMP_PATHNAME(a, b, c, d) +#define ACPI_DUMP_BUFFER(a, b) +#define ACPI_IS_DEBUG_ENABLED(level, component) 0 +#define ACPI_TRACE_POINT(a, b, c, d) + +/* Return macros must have a return statement at the minimum */ + +#define return_VOID return +#define return_ACPI_STATUS(s) return(s) +#define return_PTR(s) return(s) +#define return_STR(s) return(s) +#define return_VALUE(s) return(s) +#define return_UINT8(s) return(s) +#define return_UINT32(s) return(s) + +#endif /* ACPI_DEBUG_OUTPUT */ + +#endif /* __ACOUTPUT_H__ */ diff --git a/include/acpi/acpi.h b/include/acpi/acpi.h new file mode 100644 index 000000000..ccdc5981b --- /dev/null +++ b/include/acpi/acpi.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: acpi.h - Master public include file used to interface to ACPICA + * + * Copyright (C) 2000 - 2018, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACPI_H__ +#define __ACPI_H__ + +/* + * Public include files for use by code that will interface to ACPICA. + * + * Information includes the ACPICA data types, names, exceptions, and + * external interface prototypes. Also included are the definitions for + * all ACPI tables (FADT, MADT, etc.) + * + * Note: The order of these include files is important. + */ +#include /* Environment-specific items */ +#include /* Common ACPI names and strings */ +#include /* ACPICA data types and structures */ +#include /* ACPICA exceptions */ +#include /* ACPI table definitions */ +#include /* Resource Descriptor structs */ +#include /* Extra environment-specific items */ +#include /* Error output and Debug macros */ +#include /* OSL interfaces (ACPICA-to-OS) */ +#include /* ACPI core subsystem external interfaces */ + +#endif /* __ACPI_H__ */ diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h new file mode 100644 index 000000000..8b19618ba --- /dev/null +++ b/include/acpi/acpi_bus.h @@ -0,0 +1,688 @@ +/* + * acpi_bus.h - ACPI Bus Driver ($Revision: 22 $) + * + * Copyright (C) 2001, 2002 Andy Grover + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#ifndef __ACPI_BUS_H__ +#define __ACPI_BUS_H__ + +#include +#include + +/* TBD: Make dynamic */ +#define ACPI_MAX_HANDLES 10 +struct acpi_handle_list { + u32 count; + acpi_handle handles[ACPI_MAX_HANDLES]; +}; + +/* acpi_utils.h */ +acpi_status +acpi_extract_package(union acpi_object *package, + struct acpi_buffer *format, struct acpi_buffer *buffer); +acpi_status +acpi_evaluate_integer(acpi_handle handle, + acpi_string pathname, + struct acpi_object_list *arguments, unsigned long long *data); +acpi_status +acpi_evaluate_reference(acpi_handle handle, + acpi_string pathname, + struct acpi_object_list *arguments, + struct acpi_handle_list *list); +acpi_status +acpi_evaluate_ost(acpi_handle handle, u32 source_event, u32 status_code, + struct acpi_buffer *status_buf); + +acpi_status +acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld); + +bool acpi_has_method(acpi_handle handle, char *name); +acpi_status acpi_execute_simple_method(acpi_handle handle, char *method, + u64 arg); +acpi_status acpi_evaluate_ej0(acpi_handle handle); +acpi_status acpi_evaluate_lck(acpi_handle handle, int lock); +bool acpi_ata_match(acpi_handle handle); +bool acpi_bay_match(acpi_handle handle); +bool acpi_dock_match(acpi_handle handle); + +bool acpi_check_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 funcs); +union acpi_object *acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid, + u64 rev, u64 func, union acpi_object *argv4); + +static inline union acpi_object * +acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev, + u64 func, union acpi_object *argv4, + acpi_object_type type) +{ + union acpi_object *obj; + + obj = acpi_evaluate_dsm(handle, guid, rev, func, argv4); + if (obj && obj->type != type) { + ACPI_FREE(obj); + obj = NULL; + } + + return obj; +} + +#define ACPI_INIT_DSM_ARGV4(cnt, eles) \ + { \ + .package.type = ACPI_TYPE_PACKAGE, \ + .package.count = (cnt), \ + .package.elements = (eles) \ + } + +bool acpi_dev_found(const char *hid); +bool acpi_dev_present(const char *hid, const char *uid, s64 hrv); + +const char * +acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv); + +#ifdef CONFIG_ACPI + +#include + +#define ACPI_BUS_FILE_ROOT "acpi" +extern struct proc_dir_entry *acpi_root_dir; + +enum acpi_bus_device_type { + ACPI_BUS_TYPE_DEVICE = 0, + ACPI_BUS_TYPE_POWER, + ACPI_BUS_TYPE_PROCESSOR, + ACPI_BUS_TYPE_THERMAL, + ACPI_BUS_TYPE_POWER_BUTTON, + ACPI_BUS_TYPE_SLEEP_BUTTON, + ACPI_BUS_TYPE_ECDT_EC, + ACPI_BUS_DEVICE_TYPE_COUNT +}; + +struct acpi_driver; +struct acpi_device; + +/* + * ACPI Scan Handler + * ----------------- + */ + +struct acpi_hotplug_profile { + struct kobject kobj; + int (*scan_dependent)(struct acpi_device *adev); + void (*notify_online)(struct acpi_device *adev); + bool enabled:1; + bool demand_offline:1; +}; + +static inline struct acpi_hotplug_profile *to_acpi_hotplug_profile( + struct kobject *kobj) +{ + return container_of(kobj, struct acpi_hotplug_profile, kobj); +} + +struct acpi_scan_handler { + const struct acpi_device_id *ids; + struct list_head list_node; + bool (*match)(const char *idstr, const struct acpi_device_id **matchid); + int (*attach)(struct acpi_device *dev, const struct acpi_device_id *id); + void (*detach)(struct acpi_device *dev); + void (*bind)(struct device *phys_dev); + void (*unbind)(struct device *phys_dev); + struct acpi_hotplug_profile hotplug; +}; + +/* + * ACPI Hotplug Context + * -------------------- + */ + +struct acpi_hotplug_context { + struct acpi_device *self; + int (*notify)(struct acpi_device *, u32); + void (*uevent)(struct acpi_device *, u32); + void (*fixup)(struct acpi_device *); +}; + +/* + * ACPI Driver + * ----------- + */ + +typedef int (*acpi_op_add) (struct acpi_device * device); +typedef int (*acpi_op_remove) (struct acpi_device * device); +typedef void (*acpi_op_notify) (struct acpi_device * device, u32 event); + +struct acpi_device_ops { + acpi_op_add add; + acpi_op_remove remove; + acpi_op_notify notify; +}; + +#define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */ + +struct acpi_driver { + char name[80]; + char class[80]; + const struct acpi_device_id *ids; /* Supported Hardware IDs */ + unsigned int flags; + struct acpi_device_ops ops; + struct device_driver drv; + struct module *owner; +}; + +/* + * ACPI Device + * ----------- + */ + +/* Status (_STA) */ + +struct acpi_device_status { + u32 present:1; + u32 enabled:1; + u32 show_in_ui:1; + u32 functional:1; + u32 battery_present:1; + u32 reserved:27; +}; + +/* Flags */ + +struct acpi_device_flags { + u32 dynamic_status:1; + u32 removable:1; + u32 ejectable:1; + u32 power_manageable:1; + u32 match_driver:1; + u32 initialized:1; + u32 visited:1; + u32 hotplug_notify:1; + u32 is_dock_station:1; + u32 of_compatible_ok:1; + u32 coherent_dma:1; + u32 cca_seen:1; + u32 enumeration_by_parent:1; + u32 reserved:19; +}; + +/* File System */ + +struct acpi_device_dir { + struct proc_dir_entry *entry; +}; + +#define acpi_device_dir(d) ((d)->dir.entry) + +/* Plug and Play */ + +typedef char acpi_bus_id[8]; +typedef unsigned long acpi_bus_address; +typedef char acpi_device_name[40]; +typedef char acpi_device_class[20]; + +struct acpi_hardware_id { + struct list_head list; + const char *id; +}; + +struct acpi_pnp_type { + u32 hardware_id:1; + u32 bus_address:1; + u32 platform_id:1; + u32 reserved:29; +}; + +struct acpi_device_pnp { + acpi_bus_id bus_id; /* Object name */ + int instance_no; /* Instance number of this object */ + struct acpi_pnp_type type; /* ID type */ + acpi_bus_address bus_address; /* _ADR */ + char *unique_id; /* _UID */ + struct list_head ids; /* _HID and _CIDs */ + acpi_device_name device_name; /* Driver-determined */ + acpi_device_class device_class; /* " */ + union acpi_object *str_obj; /* unicode string for _STR method */ +}; + +#define acpi_device_bid(d) ((d)->pnp.bus_id) +#define acpi_device_adr(d) ((d)->pnp.bus_address) +const char *acpi_device_hid(struct acpi_device *device); +#define acpi_device_uid(d) ((d)->pnp.unique_id) +#define acpi_device_name(d) ((d)->pnp.device_name) +#define acpi_device_class(d) ((d)->pnp.device_class) + +/* Power Management */ + +struct acpi_device_power_flags { + u32 explicit_get:1; /* _PSC present? */ + u32 power_resources:1; /* Power resources */ + u32 inrush_current:1; /* Serialize Dx->D0 */ + u32 power_removed:1; /* Optimize Dx->D0 */ + u32 ignore_parent:1; /* Power is independent of parent power state */ + u32 dsw_present:1; /* _DSW present? */ + u32 reserved:26; +}; + +struct acpi_device_power_state { + struct { + u8 valid:1; + u8 explicit_set:1; /* _PSx present? */ + u8 reserved:6; + } flags; + int power; /* % Power (compared to D0) */ + int latency; /* Dx->D0 time (microseconds) */ + struct list_head resources; /* Power resources referenced */ +}; + +struct acpi_device_power { + int state; /* Current state */ + struct acpi_device_power_flags flags; + struct acpi_device_power_state states[ACPI_D_STATE_COUNT]; /* Power states (D0-D3Cold) */ +}; + +/* Performance Management */ + +struct acpi_device_perf_flags { + u8 reserved:8; +}; + +struct acpi_device_perf_state { + struct { + u8 valid:1; + u8 reserved:7; + } flags; + u8 power; /* % Power (compared to P0) */ + u8 performance; /* % Performance ( " ) */ + int latency; /* Px->P0 time (microseconds) */ +}; + +struct acpi_device_perf { + int state; + struct acpi_device_perf_flags flags; + int state_count; + struct acpi_device_perf_state *states; +}; + +/* Wakeup Management */ +struct acpi_device_wakeup_flags { + u8 valid:1; /* Can successfully enable wakeup? */ + u8 notifier_present:1; /* Wake-up notify handler has been installed */ +}; + +struct acpi_device_wakeup_context { + void (*func)(struct acpi_device_wakeup_context *context); + struct device *dev; +}; + +struct acpi_device_wakeup { + acpi_handle gpe_device; + u64 gpe_number; + u64 sleep_state; + struct list_head resources; + struct acpi_device_wakeup_flags flags; + struct acpi_device_wakeup_context context; + struct wakeup_source *ws; + int prepare_count; + int enable_count; +}; + +struct acpi_device_physical_node { + unsigned int node_id; + struct list_head node; + struct device *dev; + bool put_online:1; +}; + +/* ACPI Device Specific Data (_DSD) */ +struct acpi_device_data { + const union acpi_object *pointer; + const union acpi_object *properties; + const union acpi_object *of_compatible; + struct list_head subnodes; +}; + +struct acpi_gpio_mapping; + +/* Device */ +struct acpi_device { + int device_type; + acpi_handle handle; /* no handle for fixed hardware */ + struct fwnode_handle fwnode; + struct acpi_device *parent; + struct list_head children; + struct list_head node; + struct list_head wakeup_list; + struct list_head del_list; + struct acpi_device_status status; + struct acpi_device_flags flags; + struct acpi_device_pnp pnp; + struct acpi_device_power power; + struct acpi_device_wakeup wakeup; + struct acpi_device_perf performance; + struct acpi_device_dir dir; + struct acpi_device_data data; + struct acpi_scan_handler *handler; + struct acpi_hotplug_context *hp; + struct acpi_driver *driver; + const struct acpi_gpio_mapping *driver_gpios; + void *driver_data; + struct device dev; + unsigned int physical_node_count; + unsigned int dep_unmet; + struct list_head physical_node_list; + struct mutex physical_node_lock; + void (*remove)(struct acpi_device *); +}; + +/* Non-device subnode */ +struct acpi_data_node { + const char *name; + acpi_handle handle; + struct fwnode_handle fwnode; + struct fwnode_handle *parent; + struct acpi_device_data data; + struct list_head sibling; + struct kobject kobj; + struct completion kobj_done; +}; + +extern const struct fwnode_operations acpi_device_fwnode_ops; +extern const struct fwnode_operations acpi_data_fwnode_ops; +extern const struct fwnode_operations acpi_static_fwnode_ops; + +bool is_acpi_device_node(const struct fwnode_handle *fwnode); +bool is_acpi_data_node(const struct fwnode_handle *fwnode); + +static inline bool is_acpi_node(const struct fwnode_handle *fwnode) +{ + return (is_acpi_device_node(fwnode) || is_acpi_data_node(fwnode)); +} + +#define to_acpi_device_node(__fwnode) \ + ({ \ + typeof(__fwnode) __to_acpi_device_node_fwnode = __fwnode; \ + \ + is_acpi_device_node(__to_acpi_device_node_fwnode) ? \ + container_of(__to_acpi_device_node_fwnode, \ + struct acpi_device, fwnode) : \ + NULL; \ + }) + +#define to_acpi_data_node(__fwnode) \ + ({ \ + typeof(__fwnode) __to_acpi_data_node_fwnode = __fwnode; \ + \ + is_acpi_data_node(__to_acpi_data_node_fwnode) ? \ + container_of(__to_acpi_data_node_fwnode, \ + struct acpi_data_node, fwnode) : \ + NULL; \ + }) + +static inline bool is_acpi_static_node(const struct fwnode_handle *fwnode) +{ + return !IS_ERR_OR_NULL(fwnode) && + fwnode->ops == &acpi_static_fwnode_ops; +} + +static inline bool acpi_data_node_match(const struct fwnode_handle *fwnode, + const char *name) +{ + return is_acpi_data_node(fwnode) ? + (!strcmp(to_acpi_data_node(fwnode)->name, name)) : false; +} + +static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev) +{ + return &adev->fwnode; +} + +static inline void *acpi_driver_data(struct acpi_device *d) +{ + return d->driver_data; +} + +#define to_acpi_device(d) container_of(d, struct acpi_device, dev) +#define to_acpi_driver(d) container_of(d, struct acpi_driver, drv) + +static inline void acpi_set_device_status(struct acpi_device *adev, u32 sta) +{ + *((u32 *)&adev->status) = sta; +} + +static inline void acpi_set_hp_context(struct acpi_device *adev, + struct acpi_hotplug_context *hp) +{ + hp->self = adev; + adev->hp = hp; +} + +void acpi_initialize_hp_context(struct acpi_device *adev, + struct acpi_hotplug_context *hp, + int (*notify)(struct acpi_device *, u32), + void (*uevent)(struct acpi_device *, u32)); + +/* acpi_device.dev.bus == &acpi_bus_type */ +extern struct bus_type acpi_bus_type; + +/* + * Events + * ------ + */ + +struct acpi_bus_event { + struct list_head node; + acpi_device_class device_class; + acpi_bus_id bus_id; + u32 type; + u32 data; +}; + +extern struct kobject *acpi_kobj; +extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int); +void acpi_bus_private_data_handler(acpi_handle, void *); +int acpi_bus_get_private_data(acpi_handle, void **); +int acpi_bus_attach_private_data(acpi_handle, void *); +void acpi_bus_detach_private_data(acpi_handle); +extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32); +extern int register_acpi_notifier(struct notifier_block *); +extern int unregister_acpi_notifier(struct notifier_block *); + +/* + * External Functions + */ + +int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device); +struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle); +void acpi_bus_put_acpi_device(struct acpi_device *adev); +acpi_status acpi_bus_get_status_handle(acpi_handle handle, + unsigned long long *sta); +int acpi_bus_get_status(struct acpi_device *device); + +int acpi_bus_set_power(acpi_handle handle, int state); +const char *acpi_power_state_string(int state); +int acpi_device_get_power(struct acpi_device *device, int *state); +int acpi_device_set_power(struct acpi_device *device, int state); +int acpi_bus_init_power(struct acpi_device *device); +int acpi_device_fix_up_power(struct acpi_device *device); +int acpi_bus_update_power(acpi_handle handle, int *state_p); +int acpi_device_update_power(struct acpi_device *device, int *state_p); +bool acpi_bus_power_manageable(acpi_handle handle); + +#ifdef CONFIG_PM +bool acpi_bus_can_wakeup(acpi_handle handle); +#else +static inline bool acpi_bus_can_wakeup(acpi_handle handle) { return false; } +#endif + +void acpi_scan_lock_acquire(void); +void acpi_scan_lock_release(void); +void acpi_lock_hp_context(void); +void acpi_unlock_hp_context(void); +int acpi_scan_add_handler(struct acpi_scan_handler *handler); +int acpi_bus_register_driver(struct acpi_driver *driver); +void acpi_bus_unregister_driver(struct acpi_driver *driver); +int acpi_bus_scan(acpi_handle handle); +void acpi_bus_trim(struct acpi_device *start); +acpi_status acpi_bus_get_ejd(acpi_handle handle, acpi_handle * ejd); +int acpi_match_device_ids(struct acpi_device *device, + const struct acpi_device_id *ids); +void acpi_set_modalias(struct acpi_device *adev, const char *default_id, + char *modalias, size_t len); +int acpi_create_dir(struct acpi_device *); +void acpi_remove_dir(struct acpi_device *); + +static inline bool acpi_device_enumerated(struct acpi_device *adev) +{ + return adev && adev->flags.initialized && adev->flags.visited; +} + +/** + * module_acpi_driver(acpi_driver) - Helper macro for registering an ACPI driver + * @__acpi_driver: acpi_driver struct + * + * Helper macro for ACPI drivers which do not do anything special in module + * init/exit. This eliminates a lot of boilerplate. Each module may only + * use this macro once, and calling it replaces module_init() and module_exit() + */ +#define module_acpi_driver(__acpi_driver) \ + module_driver(__acpi_driver, acpi_bus_register_driver, \ + acpi_bus_unregister_driver) + +/* + * Bind physical devices with ACPI devices + */ +struct acpi_bus_type { + struct list_head list; + const char *name; + bool (*match)(struct device *dev); + struct acpi_device * (*find_companion)(struct device *); + void (*setup)(struct device *); + void (*cleanup)(struct device *); +}; +int register_acpi_bus_type(struct acpi_bus_type *); +int unregister_acpi_bus_type(struct acpi_bus_type *); +int acpi_bind_one(struct device *dev, struct acpi_device *adev); +int acpi_unbind_one(struct device *dev); + +struct acpi_pci_root { + struct acpi_device * device; + struct pci_bus *bus; + u16 segment; + struct resource secondary; /* downstream bus range */ + + u32 osc_support_set; /* _OSC state of support bits */ + u32 osc_control_set; /* _OSC state of control bits */ + phys_addr_t mcfg_addr; +}; + +/* helper */ + +bool acpi_dma_supported(struct acpi_device *adev); +enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev); +int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset, + u64 *size); +int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr); +void acpi_dma_deconfigure(struct device *dev); + +struct acpi_device *acpi_find_child_device(struct acpi_device *parent, + u64 address, bool check_children); +int acpi_is_root_bridge(acpi_handle); +struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle); + +int acpi_enable_wakeup_device_power(struct acpi_device *dev, int state); +int acpi_disable_wakeup_device_power(struct acpi_device *dev); + +#ifdef CONFIG_X86 +bool acpi_device_always_present(struct acpi_device *adev); +#else +static inline bool acpi_device_always_present(struct acpi_device *adev) +{ + return false; +} +#endif + +#ifdef CONFIG_PM +void acpi_pm_wakeup_event(struct device *dev); +acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev, + void (*func)(struct acpi_device_wakeup_context *context)); +acpi_status acpi_remove_pm_notifier(struct acpi_device *adev); +bool acpi_pm_device_can_wakeup(struct device *dev); +int acpi_pm_device_sleep_state(struct device *, int *, int); +int acpi_pm_set_device_wakeup(struct device *dev, bool enable); +#else +static inline void acpi_pm_wakeup_event(struct device *dev) +{ +} +static inline acpi_status acpi_add_pm_notifier(struct acpi_device *adev, + struct device *dev, + void (*func)(struct acpi_device_wakeup_context *context)) +{ + return AE_SUPPORT; +} +static inline acpi_status acpi_remove_pm_notifier(struct acpi_device *adev) +{ + return AE_SUPPORT; +} +static inline bool acpi_pm_device_can_wakeup(struct device *dev) +{ + return false; +} +static inline int acpi_pm_device_sleep_state(struct device *d, int *p, int m) +{ + if (p) + *p = ACPI_STATE_D0; + + return (m >= ACPI_STATE_D0 && m <= ACPI_STATE_D3_COLD) ? + m : ACPI_STATE_D0; +} +static inline int acpi_pm_set_device_wakeup(struct device *dev, bool enable) +{ + return -ENODEV; +} +#endif + +#ifdef CONFIG_ACPI_SLEEP +u32 acpi_target_system_state(void); +#else +static inline u32 acpi_target_system_state(void) { return ACPI_STATE_S0; } +#endif + +static inline bool acpi_device_power_manageable(struct acpi_device *adev) +{ + return adev->flags.power_manageable; +} + +static inline bool acpi_device_can_wakeup(struct acpi_device *adev) +{ + return adev->wakeup.flags.valid; +} + +static inline bool acpi_device_can_poweroff(struct acpi_device *adev) +{ + return adev->power.states[ACPI_STATE_D3_COLD].flags.valid || + ((acpi_gbl_FADT.header.revision < 6) && + adev->power.states[ACPI_STATE_D3_HOT].flags.explicit_set); +} + +#else /* CONFIG_ACPI */ + +static inline int register_acpi_bus_type(void *bus) { return 0; } +static inline int unregister_acpi_bus_type(void *bus) { return 0; } + +#endif /* CONFIG_ACPI */ + +#endif /*__ACPI_BUS_H__*/ diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h new file mode 100644 index 000000000..144997573 --- /dev/null +++ b/include/acpi/acpi_drivers.h @@ -0,0 +1,124 @@ +/* + * acpi_drivers.h ($Revision: 31 $) + * + * Copyright (C) 2001, 2002 Andy Grover + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#ifndef __ACPI_DRIVERS_H__ +#define __ACPI_DRIVERS_H__ + +#define ACPI_MAX_STRING 80 + +/* + * Please update drivers/acpi/debug.c and Documentation/acpi/debug.txt + * if you add to this list. + */ +#define ACPI_BUS_COMPONENT 0x00010000 +#define ACPI_AC_COMPONENT 0x00020000 +#define ACPI_BATTERY_COMPONENT 0x00040000 +#define ACPI_BUTTON_COMPONENT 0x00080000 +#define ACPI_SBS_COMPONENT 0x00100000 +#define ACPI_FAN_COMPONENT 0x00200000 +#define ACPI_PCI_COMPONENT 0x00400000 +#define ACPI_POWER_COMPONENT 0x00800000 +#define ACPI_CONTAINER_COMPONENT 0x01000000 +#define ACPI_SYSTEM_COMPONENT 0x02000000 +#define ACPI_THERMAL_COMPONENT 0x04000000 +#define ACPI_MEMORY_DEVICE_COMPONENT 0x08000000 +#define ACPI_VIDEO_COMPONENT 0x10000000 +#define ACPI_PROCESSOR_COMPONENT 0x20000000 + +/* + * _HID definitions + * HIDs must conform to ACPI spec(6.1.4) + * Linux specific HIDs do not apply to this and begin with LNX: + */ + +#define ACPI_POWER_HID "LNXPOWER" +#define ACPI_PROCESSOR_OBJECT_HID "LNXCPU" +#define ACPI_SYSTEM_HID "LNXSYSTM" +#define ACPI_THERMAL_HID "LNXTHERM" +#define ACPI_BUTTON_HID_POWERF "LNXPWRBN" +#define ACPI_BUTTON_HID_SLEEPF "LNXSLPBN" +#define ACPI_VIDEO_HID "LNXVIDEO" +#define ACPI_BAY_HID "LNXIOBAY" +#define ACPI_DOCK_HID "LNXDOCK" +#define ACPI_ECDT_HID "LNXEC" +/* Quirk for broken IBM BIOSes */ +#define ACPI_SMBUS_IBM_HID "SMBUSIBM" + +/* + * For fixed hardware buttons, we fabricate acpi_devices with HID + * ACPI_BUTTON_HID_POWERF or ACPI_BUTTON_HID_SLEEPF. Fixed hardware + * signals only an event; it doesn't supply a notification value. + * To allow drivers to treat notifications from fixed hardware the + * same as those from real devices, we turn the events into this + * notification value. + */ +#define ACPI_FIXED_HARDWARE_EVENT 0x100 + +/* -------------------------------------------------------------------------- + PCI + -------------------------------------------------------------------------- */ + + +/* ACPI PCI Interrupt Link (pci_link.c) */ + +int acpi_irq_penalty_init(void); +int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering, + int *polarity, char **name); +int acpi_pci_link_free_irq(acpi_handle handle); + +/* ACPI PCI Device Binding (pci_bind.c) */ + +struct pci_bus; + +struct pci_dev *acpi_get_pci_dev(acpi_handle); + +/* Arch-defined function to add a bus to the system */ + +struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root); + +#ifdef CONFIG_X86 +void pci_acpi_crs_quirks(void); +#else +static inline void pci_acpi_crs_quirks(void) { } +#endif + +/* -------------------------------------------------------------------------- + Processor + -------------------------------------------------------------------------- */ + +#define ACPI_PROCESSOR_LIMIT_NONE 0x00 +#define ACPI_PROCESSOR_LIMIT_INCREMENT 0x01 +#define ACPI_PROCESSOR_LIMIT_DECREMENT 0x02 + +/*-------------------------------------------------------------------------- + Dock Station + -------------------------------------------------------------------------- */ + +#ifdef CONFIG_ACPI_DOCK +extern int is_dock_device(struct acpi_device *adev); +#else +static inline int is_dock_device(struct acpi_device *adev) +{ + return 0; +} +#endif /* CONFIG_ACPI_DOCK */ + +#endif /*__ACPI_DRIVERS_H__*/ diff --git a/include/acpi/acpi_io.h b/include/acpi/acpi_io.h new file mode 100644 index 000000000..d0633fc1f --- /dev/null +++ b/include/acpi/acpi_io.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ACPI_IO_H_ +#define _ACPI_IO_H_ + +#include + +#include + +#ifndef acpi_os_ioremap +static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys, + acpi_size size) +{ + return ioremap_cache(phys, size); +} +#endif + +extern bool acpi_permanent_mmap; + +void __iomem *__ref +acpi_os_map_iomem(acpi_physical_address phys, acpi_size size); +void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size); +void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size); + +int acpi_os_map_generic_address(struct acpi_generic_address *addr); +void acpi_os_unmap_generic_address(struct acpi_generic_address *addr); + +#endif diff --git a/include/acpi/acpi_lpat.h b/include/acpi/acpi_lpat.h new file mode 100644 index 000000000..da37e12d2 --- /dev/null +++ b/include/acpi/acpi_lpat.h @@ -0,0 +1,65 @@ +/* + * acpi_lpat.h - LPAT table processing functions + * + * Copyright (C) 2015 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef ACPI_LPAT_H +#define ACPI_LPAT_H + +struct acpi_lpat { + int temp; + int raw; +}; + +struct acpi_lpat_conversion_table { + struct acpi_lpat *lpat; + int lpat_count; +}; + +#ifdef CONFIG_ACPI + +int acpi_lpat_raw_to_temp(struct acpi_lpat_conversion_table *lpat_table, + int raw); +int acpi_lpat_temp_to_raw(struct acpi_lpat_conversion_table *lpat_table, + int temp); +struct acpi_lpat_conversion_table *acpi_lpat_get_conversion_table(acpi_handle + handle); +void acpi_lpat_free_conversion_table(struct acpi_lpat_conversion_table + *lpat_table); + +#else +static int acpi_lpat_raw_to_temp(struct acpi_lpat_conversion_table *lpat_table, + int raw) +{ + return 0; +} + +static int acpi_lpat_temp_to_raw(struct acpi_lpat_conversion_table *lpat_table, + int temp) +{ + return 0; +} + +static struct acpi_lpat_conversion_table *acpi_lpat_get_conversion_table( + acpi_handle handle) +{ + return NULL; +} + +static void acpi_lpat_free_conversion_table(struct acpi_lpat_conversion_table + *lpat_table) +{ +} + +#endif +#endif diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h new file mode 100644 index 000000000..fdebcfc6c --- /dev/null +++ b/include/acpi/acpi_numa.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ACPI_NUMA_H +#define __ACPI_NUMA_H + +#ifdef CONFIG_ACPI_NUMA +#include +#include + +/* Proximity bitmap length */ +#if MAX_NUMNODES > 256 +#define MAX_PXM_DOMAINS MAX_NUMNODES +#else +#define MAX_PXM_DOMAINS (256) /* Old pxm spec is defined 8 bit */ +#endif + +extern int pxm_to_node(int); +extern int node_to_pxm(int); +extern int acpi_map_pxm_to_node(int); +extern unsigned char acpi_srat_revision; +extern int acpi_numa __initdata; + +extern void bad_srat(void); +extern int srat_disabled(void); + +#endif /* CONFIG_ACPI_NUMA */ +#endif /* __ACP_NUMA_H */ diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h new file mode 100644 index 000000000..eb1f21af7 --- /dev/null +++ b/include/acpi/acpiosxf.h @@ -0,0 +1,418 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: acpiosxf.h - All interfaces to the OS Services Layer (OSL). These + * interfaces must be implemented by OSL to interface the + * ACPI components to the host operating system. + * + * Copyright (C) 2000 - 2018, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACPIOSXF_H__ +#define __ACPIOSXF_H__ + +#include +#include + +/* Types for acpi_os_execute */ + +typedef enum { + OSL_GLOBAL_LOCK_HANDLER, + OSL_NOTIFY_HANDLER, + OSL_GPE_HANDLER, + OSL_DEBUGGER_MAIN_THREAD, + OSL_DEBUGGER_EXEC_THREAD, + OSL_EC_POLL_HANDLER, + OSL_EC_BURST_HANDLER +} acpi_execute_type; + +#define ACPI_NO_UNIT_LIMIT ((u32) -1) +#define ACPI_MUTEX_SEM 1 + +/* Functions for acpi_os_signal */ + +#define ACPI_SIGNAL_FATAL 0 +#define ACPI_SIGNAL_BREAKPOINT 1 + +struct acpi_signal_fatal_info { + u32 type; + u32 code; + u32 argument; +}; + +/* + * OSL Initialization and shutdown primitives + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_initialize +acpi_status acpi_os_initialize(void); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_terminate +acpi_status acpi_os_terminate(void); +#endif + +/* + * ACPI Table interfaces + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_root_pointer +acpi_physical_address acpi_os_get_root_pointer(void); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_predefined_override +acpi_status +acpi_os_predefined_override(const struct acpi_predefined_names *init_val, + acpi_string *new_val); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_table_override +acpi_status +acpi_os_table_override(struct acpi_table_header *existing_table, + struct acpi_table_header **new_table); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_physical_table_override +acpi_status +acpi_os_physical_table_override(struct acpi_table_header *existing_table, + acpi_physical_address *new_address, + u32 *new_table_length); +#endif + +/* + * Spinlock primitives + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock +acpi_status acpi_os_create_lock(acpi_spinlock * out_handle); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_delete_lock +void acpi_os_delete_lock(acpi_spinlock handle); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_acquire_lock +acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock handle); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_release_lock +void acpi_os_release_lock(acpi_spinlock handle, acpi_cpu_flags flags); +#endif + +/* + * RAW spinlock primitives. If the OS does not provide them, fallback to + * spinlock primitives + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_raw_lock +# define acpi_os_create_raw_lock(out_handle) acpi_os_create_lock(out_handle) +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_delete_raw_lock +# define acpi_os_delete_raw_lock(handle) acpi_os_delete_lock(handle) +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_acquire_raw_lock +# define acpi_os_acquire_raw_lock(handle) acpi_os_acquire_lock(handle) +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_release_raw_lock +# define acpi_os_release_raw_lock(handle, flags) \ + acpi_os_release_lock(handle, flags) +#endif + +/* + * Semaphore primitives + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_semaphore +acpi_status +acpi_os_create_semaphore(u32 max_units, + u32 initial_units, acpi_semaphore * out_handle); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_delete_semaphore +acpi_status acpi_os_delete_semaphore(acpi_semaphore handle); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_wait_semaphore +acpi_status +acpi_os_wait_semaphore(acpi_semaphore handle, u32 units, u16 timeout); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_signal_semaphore +acpi_status acpi_os_signal_semaphore(acpi_semaphore handle, u32 units); +#endif + +/* + * Mutex primitives. May be configured to use semaphores instead via + * ACPI_MUTEX_TYPE (see platform/acenv.h) + */ +#if (ACPI_MUTEX_TYPE != ACPI_BINARY_SEMAPHORE) + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_mutex +acpi_status acpi_os_create_mutex(acpi_mutex * out_handle); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_delete_mutex +void acpi_os_delete_mutex(acpi_mutex handle); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_acquire_mutex +acpi_status acpi_os_acquire_mutex(acpi_mutex handle, u16 timeout); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_release_mutex +void acpi_os_release_mutex(acpi_mutex handle); +#endif + +#endif + +/* + * Memory allocation and mapping + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_allocate +void *acpi_os_allocate(acpi_size size); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_allocate_zeroed +void *acpi_os_allocate_zeroed(acpi_size size); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_free +void acpi_os_free(void *memory); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_map_memory +void *acpi_os_map_memory(acpi_physical_address where, acpi_size length); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_unmap_memory +void acpi_os_unmap_memory(void *logical_address, acpi_size size); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_physical_address +acpi_status +acpi_os_get_physical_address(void *logical_address, + acpi_physical_address *physical_address); +#endif + +/* + * Memory/Object Cache + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_cache +acpi_status +acpi_os_create_cache(char *cache_name, + u16 object_size, + u16 max_depth, acpi_cache_t ** return_cache); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_delete_cache +acpi_status acpi_os_delete_cache(acpi_cache_t * cache); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_purge_cache +acpi_status acpi_os_purge_cache(acpi_cache_t * cache); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_acquire_object +void *acpi_os_acquire_object(acpi_cache_t * cache); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_release_object +acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object); +#endif + +/* + * Interrupt handlers + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_install_interrupt_handler +acpi_status +acpi_os_install_interrupt_handler(u32 interrupt_number, + acpi_osd_handler service_routine, + void *context); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_remove_interrupt_handler +acpi_status +acpi_os_remove_interrupt_handler(u32 interrupt_number, + acpi_osd_handler service_routine); +#endif + +/* + * Threads and Scheduling + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_thread_id +acpi_thread_id acpi_os_get_thread_id(void); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_execute +acpi_status +acpi_os_execute(acpi_execute_type type, + acpi_osd_exec_callback function, void *context); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_wait_events_complete +void acpi_os_wait_events_complete(void); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_sleep +void acpi_os_sleep(u64 milliseconds); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_stall +void acpi_os_stall(u32 microseconds); +#endif + +/* + * Platform and hardware-independent I/O interfaces + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_read_port +acpi_status acpi_os_read_port(acpi_io_address address, u32 *value, u32 width); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_write_port +acpi_status acpi_os_write_port(acpi_io_address address, u32 value, u32 width); +#endif + +/* + * Platform and hardware-independent physical memory interfaces + */ +int acpi_os_read_iomem(void __iomem *virt_addr, u64 *value, u32 width); + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_read_memory +acpi_status +acpi_os_read_memory(acpi_physical_address address, u64 *value, u32 width); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_write_memory +acpi_status +acpi_os_write_memory(acpi_physical_address address, u64 value, u32 width); +#endif + +/* + * Platform and hardware-independent PCI configuration space access + * Note: Can't use "Register" as a parameter, changed to "Reg" -- + * certain compilers complain. + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_read_pci_configuration +acpi_status +acpi_os_read_pci_configuration(struct acpi_pci_id *pci_id, + u32 reg, u64 *value, u32 width); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_write_pci_configuration +acpi_status +acpi_os_write_pci_configuration(struct acpi_pci_id *pci_id, + u32 reg, u64 value, u32 width); +#endif + +/* + * Miscellaneous + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_readable +u8 acpi_os_readable(void *pointer, acpi_size length); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_writable +u8 acpi_os_writable(void *pointer, acpi_size length); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_timer +u64 acpi_os_get_timer(void); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_signal +acpi_status acpi_os_signal(u32 function, void *info); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_enter_sleep +acpi_status acpi_os_enter_sleep(u8 sleep_state, u32 rega_value, u32 regb_value); +#endif + +/* + * Debug print routines + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_printf +void ACPI_INTERNAL_VAR_XFACE acpi_os_printf(const char *format, ...); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_vprintf +void acpi_os_vprintf(const char *format, va_list args); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_redirect_output +void acpi_os_redirect_output(void *destination); +#endif + +/* + * Debug IO + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_line +acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_initialize_debugger +acpi_status acpi_os_initialize_debugger(void); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_terminate_debugger +void acpi_os_terminate_debugger(void); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_wait_command_ready +acpi_status acpi_os_wait_command_ready(void); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_notify_command_complete +acpi_status acpi_os_notify_command_complete(void); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_trace_point +void +acpi_os_trace_point(acpi_trace_event_type type, + u8 begin, u8 *aml, char *pathname); +#endif + +/* + * Obtain ACPI table(s) + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_name +acpi_status +acpi_os_get_table_by_name(char *signature, + u32 instance, + struct acpi_table_header **table, + acpi_physical_address *address); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_index +acpi_status +acpi_os_get_table_by_index(u32 index, + struct acpi_table_header **table, + u32 *instance, acpi_physical_address *address); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_address +acpi_status +acpi_os_get_table_by_address(acpi_physical_address address, + struct acpi_table_header **table); +#endif + +/* + * Directory manipulation + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_open_directory +void *acpi_os_open_directory(char *pathname, + char *wildcard_spec, char requested_file_type); +#endif + +/* requeste_file_type values */ + +#define REQUEST_FILE_ONLY 0 +#define REQUEST_DIR_ONLY 1 + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_next_filename +char *acpi_os_get_next_filename(void *dir_handle); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_close_directory +void acpi_os_close_directory(void *dir_handle); +#endif + +#endif /* __ACPIOSXF_H__ */ diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h new file mode 100644 index 000000000..9566f99cc --- /dev/null +++ b/include/acpi/acpixf.h @@ -0,0 +1,961 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: acpixf.h - External interfaces to the ACPI subsystem + * + * Copyright (C) 2000 - 2018, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACXFACE_H__ +#define __ACXFACE_H__ + +/* Current ACPICA subsystem version in YYYYMMDD format */ + +#define ACPI_CA_VERSION 0x20180810 + +#include +#include +#include +#include + +/***************************************************************************** + * + * Macros used for ACPICA globals and configuration + * + ****************************************************************************/ + +/* + * Ensure that global variables are defined and initialized only once. + * + * The use of these macros allows for a single list of globals (here) + * in order to simplify maintenance of the code. + */ +#ifdef DEFINE_ACPI_GLOBALS +#define ACPI_GLOBAL(type,name) \ + extern type name; \ + type name + +#define ACPI_INIT_GLOBAL(type,name,value) \ + type name=value + +#else +#ifndef ACPI_GLOBAL +#define ACPI_GLOBAL(type,name) \ + extern type name +#endif + +#ifndef ACPI_INIT_GLOBAL +#define ACPI_INIT_GLOBAL(type,name,value) \ + extern type name +#endif +#endif + +/* + * These macros configure the various ACPICA interfaces. They are + * useful for generating stub inline functions for features that are + * configured out of the current kernel or ACPICA application. + */ +#ifndef ACPI_EXTERNAL_RETURN_STATUS +#define ACPI_EXTERNAL_RETURN_STATUS(prototype) \ + prototype; +#endif + +#ifndef ACPI_EXTERNAL_RETURN_OK +#define ACPI_EXTERNAL_RETURN_OK(prototype) \ + prototype; +#endif + +#ifndef ACPI_EXTERNAL_RETURN_VOID +#define ACPI_EXTERNAL_RETURN_VOID(prototype) \ + prototype; +#endif + +#ifndef ACPI_EXTERNAL_RETURN_UINT32 +#define ACPI_EXTERNAL_RETURN_UINT32(prototype) \ + prototype; +#endif + +#ifndef ACPI_EXTERNAL_RETURN_PTR +#define ACPI_EXTERNAL_RETURN_PTR(prototype) \ + prototype; +#endif + +/***************************************************************************** + * + * Public globals and runtime configuration options + * + ****************************************************************************/ + +/* + * Enable "slack mode" of the AML interpreter? Default is FALSE, and the + * interpreter strictly follows the ACPI specification. Setting to TRUE + * allows the interpreter to ignore certain errors and/or bad AML constructs. + * + * Currently, these features are enabled by this flag: + * + * 1) Allow "implicit return" of last value in a control method + * 2) Allow access beyond the end of an operation region + * 3) Allow access to uninitialized locals/args (auto-init to integer 0) + * 4) Allow ANY object type to be a source operand for the Store() operator + * 5) Allow unresolved references (invalid target name) in package objects + * 6) Enable warning messages for behavior that is not ACPI spec compliant + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_enable_interpreter_slack, FALSE); + +/* + * Automatically serialize all methods that create named objects? Default + * is TRUE, meaning that all non_serialized methods are scanned once at + * table load time to determine those that create named objects. Methods + * that create named objects are marked Serialized in order to prevent + * possible run-time problems if they are entered by more than one thread. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_auto_serialize_methods, TRUE); + +/* + * Create the predefined _OSI method in the namespace? Default is TRUE + * because ACPICA is fully compatible with other ACPI implementations. + * Changing this will revert ACPICA (and machine ASL) to pre-OSI behavior. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_create_osi_method, TRUE); + +/* + * Optionally use default values for the ACPI register widths. Set this to + * TRUE to use the defaults, if an FADT contains incorrect widths/lengths. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_use_default_register_widths, TRUE); + +/* + * Whether or not to validate (map) an entire table to verify + * checksum/duplication in early stage before install. Set this to TRUE to + * allow early table validation before install it to the table manager. + * Note that enabling this option causes errors to happen in some OSPMs + * during early initialization stages. Default behavior is to allow such + * validation. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_enable_table_validation, TRUE); + +/* + * Optionally enable output from the AML Debug Object. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_enable_aml_debug_object, FALSE); + +/* + * Optionally copy the entire DSDT to local memory (instead of simply + * mapping it.) There are some BIOSs that corrupt or replace the original + * DSDT, creating the need for this option. Default is FALSE, do not copy + * the DSDT. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_copy_dsdt_locally, FALSE); + +/* + * Optionally ignore an XSDT if present and use the RSDT instead. + * Although the ACPI specification requires that an XSDT be used instead + * of the RSDT, the XSDT has been found to be corrupt or ill-formed on + * some machines. Default behavior is to use the XSDT if present. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE); + +/* + * Optionally support group module level code. + * NOTE, this is essentially obsolete and will be removed soon + * (01/2018). + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, FALSE); + +/* + * Optionally support module level code by parsing an entire table as + * a method as it is loaded. Default is TRUE. + * NOTE, this is essentially obsolete and will be removed soon + * (01/2018). + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_execute_tables_as_methods, TRUE); + +/* + * Optionally use 32-bit FADT addresses if and when there is a conflict + * (address mismatch) between the 32-bit and 64-bit versions of the + * address. Although ACPICA adheres to the ACPI specification which + * requires the use of the corresponding 64-bit address if it is non-zero, + * some machines have been found to have a corrupted non-zero 64-bit + * address. Default is FALSE, do not favor the 32-bit addresses. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, FALSE); + +/* + * Optionally use 32-bit FACS table addresses. + * It is reported that some platforms fail to resume from system suspending + * if 64-bit FACS table address is selected: + * https://bugzilla.kernel.org/show_bug.cgi?id=74021 + * Default is TRUE, favor the 32-bit addresses. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_facs_addresses, TRUE); + +/* + * Optionally truncate I/O addresses to 16 bits. Provides compatibility + * with other ACPI implementations. NOTE: During ACPICA initialization, + * this value is set to TRUE if any Windows OSI strings have been + * requested by the BIOS. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_truncate_io_addresses, FALSE); + +/* + * Disable runtime checking and repair of values returned by control methods. + * Use only if the repair is causing a problem on a particular machine. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_disable_auto_repair, FALSE); + +/* + * Optionally do not install any SSDTs from the RSDT/XSDT during initialization. + * This can be useful for debugging ACPI problems on some machines. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_disable_ssdt_table_install, FALSE); + +/* + * Optionally enable runtime namespace override. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_runtime_namespace_override, TRUE); + +/* + * We keep track of the latest version of Windows that has been requested by + * the BIOS. ACPI 5.0. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_osi_data, 0); + +/* + * ACPI 5.0 introduces the concept of a "reduced hardware platform", meaning + * that the ACPI hardware is no longer required. A flag in the FADT indicates + * a reduced HW machine, and that flag is duplicated here for convenience. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_reduced_hardware, FALSE); + +/* + * Maximum timeout for While() loop iterations before forced method abort. + * This mechanism is intended to prevent infinite loops during interpreter + * execution within a host kernel. + */ +ACPI_INIT_GLOBAL(u32, acpi_gbl_max_loop_iterations, ACPI_MAX_LOOP_TIMEOUT); + +/* + * Optionally ignore AE_NOT_FOUND errors from named reference package elements + * during DSDT/SSDT table loading. This reduces error "noise" in platforms + * whose firmware is carrying around a bunch of unused package objects that + * refer to non-existent named objects. However, If the AML actually tries to + * use such a package, the unresolved element(s) will be replaced with NULL + * elements. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_ignore_package_resolution_errors, FALSE); + +/* + * This mechanism is used to trace a specified AML method. The method is + * traced each time it is executed. + */ +ACPI_INIT_GLOBAL(u32, acpi_gbl_trace_flags, 0); +ACPI_INIT_GLOBAL(const char *, acpi_gbl_trace_method_name, NULL); +ACPI_INIT_GLOBAL(u32, acpi_gbl_trace_dbg_level, ACPI_TRACE_LEVEL_DEFAULT); +ACPI_INIT_GLOBAL(u32, acpi_gbl_trace_dbg_layer, ACPI_TRACE_LAYER_DEFAULT); + +/* + * Runtime configuration of debug output control masks. We want the debug + * switches statically initialized so they are already set when the debugger + * is entered. + */ +ACPI_INIT_GLOBAL(u32, acpi_dbg_level, ACPI_DEBUG_DEFAULT); +ACPI_INIT_GLOBAL(u32, acpi_dbg_layer, 0); + +/* Optionally enable timer output with Debug Object output */ + +ACPI_INIT_GLOBAL(u8, acpi_gbl_display_debug_timer, FALSE); + +/* + * Debugger command handshake globals. Host OSes need to access these + * variables to implement their own command handshake mechanism. + */ +#ifdef ACPI_DEBUGGER +ACPI_INIT_GLOBAL(u8, acpi_gbl_method_executing, FALSE); +ACPI_GLOBAL(char, acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE]); +#endif + +/* + * Other miscellaneous globals + */ +ACPI_GLOBAL(struct acpi_table_fadt, acpi_gbl_FADT); +ACPI_GLOBAL(u32, acpi_current_gpe_count); +ACPI_GLOBAL(u8, acpi_gbl_system_awake_and_running); + +/***************************************************************************** + * + * ACPICA public interface configuration. + * + * Interfaces that are configured out of the ACPICA build are replaced + * by inlined stubs by default. + * + ****************************************************************************/ + +/* + * Hardware-reduced prototypes (default: Not hardware reduced). + * + * All ACPICA hardware-related interfaces that use these macros will be + * configured out of the ACPICA build if the ACPI_REDUCED_HARDWARE flag + * is set to TRUE. + * + * Note: This static build option for reduced hardware is intended to + * reduce ACPICA code size if desired or necessary. However, even if this + * option is not specified, the runtime behavior of ACPICA is dependent + * on the actual FADT reduced hardware flag (HW_REDUCED_ACPI). If set, + * the flag will enable similar behavior -- ACPICA will not attempt + * to access any ACPI-relate hardware (SCI, GPEs, Fixed Events, etc.) + */ +#if (!ACPI_REDUCED_HARDWARE) +#define ACPI_HW_DEPENDENT_RETURN_STATUS(prototype) \ + ACPI_EXTERNAL_RETURN_STATUS(prototype) + +#define ACPI_HW_DEPENDENT_RETURN_OK(prototype) \ + ACPI_EXTERNAL_RETURN_OK(prototype) + +#define ACPI_HW_DEPENDENT_RETURN_VOID(prototype) \ + ACPI_EXTERNAL_RETURN_VOID(prototype) + +#else +#define ACPI_HW_DEPENDENT_RETURN_STATUS(prototype) \ + static ACPI_INLINE prototype {return(AE_NOT_CONFIGURED);} + +#define ACPI_HW_DEPENDENT_RETURN_OK(prototype) \ + static ACPI_INLINE prototype {return(AE_OK);} + +#define ACPI_HW_DEPENDENT_RETURN_VOID(prototype) \ + static ACPI_INLINE prototype {return;} + +#endif /* !ACPI_REDUCED_HARDWARE */ + +/* + * Error message prototypes (default: error messages enabled). + * + * All interfaces related to error and warning messages + * will be configured out of the ACPICA build if the + * ACPI_NO_ERROR_MESSAGE flag is defined. + */ +#ifndef ACPI_NO_ERROR_MESSAGES +#define ACPI_MSG_DEPENDENT_RETURN_VOID(prototype) \ + prototype; + +#else +#define ACPI_MSG_DEPENDENT_RETURN_VOID(prototype) \ + static ACPI_INLINE prototype {return;} + +#endif /* ACPI_NO_ERROR_MESSAGES */ + +/* + * Debugging output prototypes (default: no debug output). + * + * All interfaces related to debug output messages + * will be configured out of the ACPICA build unless the + * ACPI_DEBUG_OUTPUT flag is defined. + */ +#ifdef ACPI_DEBUG_OUTPUT +#define ACPI_DBG_DEPENDENT_RETURN_VOID(prototype) \ + prototype; + +#else +#define ACPI_DBG_DEPENDENT_RETURN_VOID(prototype) \ + static ACPI_INLINE prototype {return;} + +#endif /* ACPI_DEBUG_OUTPUT */ + +/* + * Application prototypes + * + * All interfaces used by application will be configured + * out of the ACPICA build unless the ACPI_APPLICATION + * flag is defined. + */ +#ifdef ACPI_APPLICATION +#define ACPI_APP_DEPENDENT_RETURN_VOID(prototype) \ + prototype; + +#else +#define ACPI_APP_DEPENDENT_RETURN_VOID(prototype) \ + static ACPI_INLINE prototype {return;} + +#endif /* ACPI_APPLICATION */ + +/* + * Debugger prototypes + * + * All interfaces used by debugger will be configured + * out of the ACPICA build unless the ACPI_DEBUGGER + * flag is defined. + */ +#ifdef ACPI_DEBUGGER +#define ACPI_DBR_DEPENDENT_RETURN_OK(prototype) \ + ACPI_EXTERNAL_RETURN_OK(prototype) + +#define ACPI_DBR_DEPENDENT_RETURN_VOID(prototype) \ + ACPI_EXTERNAL_RETURN_VOID(prototype) + +#else +#define ACPI_DBR_DEPENDENT_RETURN_OK(prototype) \ + static ACPI_INLINE prototype {return(AE_OK);} + +#define ACPI_DBR_DEPENDENT_RETURN_VOID(prototype) \ + static ACPI_INLINE prototype {return;} + +#endif /* ACPI_DEBUGGER */ + +/***************************************************************************** + * + * ACPICA public interface prototypes + * + ****************************************************************************/ + +/* + * Initialization + */ +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION + acpi_initialize_tables(struct acpi_table_desc + *initial_storage, + u32 initial_table_count, + u8 allow_resize)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION + acpi_initialize_subsystem(void)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION + acpi_enable_subsystem(u32 flags)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION + acpi_initialize_objects(u32 flags)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION + acpi_terminate(void)) + +/* + * Miscellaneous global interfaces + */ +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable(void)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable(void)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_subsystem_status(void)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_system_info(struct acpi_buffer + *ret_buffer)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_statistics(struct acpi_statistics *stats)) +ACPI_EXTERNAL_RETURN_PTR(const char + *acpi_format_exception(acpi_status exception)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_purge_cached_objects(void)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_install_interface(acpi_string interface_name)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_remove_interface(acpi_string interface_name)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_update_interfaces(u8 action)) + +ACPI_EXTERNAL_RETURN_UINT32(u32 + acpi_check_address_range(acpi_adr_space_type + space_id, + acpi_physical_address + address, acpi_size length, + u8 warn)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_decode_pld_buffer(u8 *in_buffer, + acpi_size length, + struct acpi_pld_info + **return_buffer)) + +/* + * ACPI table load/unload interfaces + */ +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION + acpi_install_table(acpi_physical_address address, + u8 physical)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_load_table(struct acpi_table_header *table)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_unload_parent_table(acpi_handle object)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION + acpi_load_tables(void)) + +/* + * ACPI table manipulation interfaces + */ +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION + acpi_reallocate_root_table(void)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION + acpi_find_root_pointer(acpi_physical_address + *rsdp_address)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_table_header(acpi_string signature, + u32 instance, + struct acpi_table_header + *out_table_header)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_table(acpi_string signature, u32 instance, + struct acpi_table_header + **out_table)) +ACPI_EXTERNAL_RETURN_VOID(void acpi_put_table(struct acpi_table_header *table)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_table_by_index(u32 table_index, + struct acpi_table_header + **out_table)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_install_table_handler(acpi_table_handler + handler, void *context)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_remove_table_handler(acpi_table_handler + handler)) + +/* + * Namespace and name interfaces + */ +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_walk_namespace(acpi_object_type type, + acpi_handle start_object, + u32 max_depth, + acpi_walk_callback + descending_callback, + acpi_walk_callback + ascending_callback, + void *context, + void **return_value)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_devices(const char *HID, + acpi_walk_callback user_function, + void *context, + void **return_value)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_name(acpi_handle object, u32 name_type, + struct acpi_buffer *ret_path_ptr)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_handle(acpi_handle parent, + acpi_string pathname, + acpi_handle *ret_handle)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_attach_data(acpi_handle object, + acpi_object_handler handler, + void *data)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_detach_data(acpi_handle object, + acpi_object_handler handler)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_data(acpi_handle object, + acpi_object_handler handler, + void **data)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_debug_trace(const char *name, u32 debug_level, + u32 debug_layer, u32 flags)) + +/* + * Object manipulation and enumeration + */ +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_evaluate_object(acpi_handle object, + acpi_string pathname, + struct acpi_object_list + *parameter_objects, + struct acpi_buffer + *return_object_buffer)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_evaluate_object_typed(acpi_handle object, + acpi_string pathname, + struct acpi_object_list + *external_params, + struct acpi_buffer + *return_buffer, + acpi_object_type + return_type)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_object_info(acpi_handle object, + struct acpi_device_info + **return_buffer)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_install_method(u8 *buffer)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_next_object(acpi_object_type type, + acpi_handle parent, + acpi_handle child, + acpi_handle *out_handle)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_type(acpi_handle object, + acpi_object_type *out_type)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_parent(acpi_handle object, + acpi_handle *out_handle)) + +/* + * Handler interfaces + */ +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_install_initialization_handler + (acpi_init_handler handler, u32 function)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_install_sci_handler(acpi_sci_handler + address, + void *context)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_remove_sci_handler(acpi_sci_handler + address)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_install_global_event_handler + (acpi_gbl_event_handler handler, + void *context)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_install_fixed_event_handler(u32 + acpi_event, + acpi_event_handler + handler, + void + *context)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_remove_fixed_event_handler(u32 acpi_event, + acpi_event_handler + handler)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_install_gpe_handler(acpi_handle + gpe_device, + u32 gpe_number, + u32 type, + acpi_gpe_handler + address, + void *context)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_install_gpe_raw_handler(acpi_handle + gpe_device, + u32 gpe_number, + u32 type, + acpi_gpe_handler + address, + void *context)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_remove_gpe_handler(acpi_handle gpe_device, + u32 gpe_number, + acpi_gpe_handler + address)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_install_notify_handler(acpi_handle device, + u32 handler_type, + acpi_notify_handler + handler, + void *context)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_remove_notify_handler(acpi_handle device, + u32 handler_type, + acpi_notify_handler + handler)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_install_address_space_handler(acpi_handle + device, + acpi_adr_space_type + space_id, + acpi_adr_space_handler + handler, + acpi_adr_space_setup + setup, + void *context)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_remove_address_space_handler(acpi_handle + device, + acpi_adr_space_type + space_id, + acpi_adr_space_handler + handler)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_install_exception_handler + (acpi_exception_handler handler)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_install_interface_handler + (acpi_interface_handler handler)) + +/* + * Global Lock interfaces + */ +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_acquire_global_lock(u16 timeout, + u32 *handle)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_release_global_lock(u32 handle)) + +/* + * Interfaces to AML mutex objects + */ +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_acquire_mutex(acpi_handle handle, + acpi_string pathname, + u16 timeout)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_release_mutex(acpi_handle handle, + acpi_string pathname)) + +/* + * Fixed Event interfaces + */ +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_enable_event(u32 event, u32 flags)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_disable_event(u32 event, u32 flags)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_clear_event(u32 event)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_get_event_status(u32 event, + acpi_event_status + *event_status)) + +/* + * General Purpose Event (GPE) Interfaces + */ +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_update_all_gpes(void)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_enable_gpe(acpi_handle gpe_device, + u32 gpe_number)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_disable_gpe(acpi_handle gpe_device, + u32 gpe_number)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_clear_gpe(acpi_handle gpe_device, + u32 gpe_number)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_set_gpe(acpi_handle gpe_device, + u32 gpe_number, u8 action)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_finish_gpe(acpi_handle gpe_device, + u32 gpe_number)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_mask_gpe(acpi_handle gpe_device, + u32 gpe_number, u8 is_masked)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_mark_gpe_for_wake(acpi_handle gpe_device, + u32 gpe_number)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_setup_gpe_for_wake(acpi_handle + parent_device, + acpi_handle gpe_device, + u32 gpe_number)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_set_gpe_wake_mask(acpi_handle gpe_device, + u32 gpe_number, + u8 action)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_get_gpe_status(acpi_handle gpe_device, + u32 gpe_number, + acpi_event_status + *event_status)) +ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_dispatch_gpe(acpi_handle gpe_device, u32 gpe_number)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_get_gpe_device(u32 gpe_index, + acpi_handle *gpe_device)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_install_gpe_block(acpi_handle gpe_device, + struct + acpi_generic_address + *gpe_block_address, + u32 register_count, + u32 interrupt_number)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_remove_gpe_block(acpi_handle gpe_device)) + +/* + * Resource interfaces + */ +typedef +acpi_status (*acpi_walk_resource_callback) (struct acpi_resource * resource, + void *context); + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_vendor_resource(acpi_handle device, + char *name, + struct acpi_vendor_uuid + *uuid, + struct acpi_buffer + *ret_buffer)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_current_resources(acpi_handle device, + struct acpi_buffer + *ret_buffer)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_possible_resources(acpi_handle device, + struct acpi_buffer + *ret_buffer)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_event_resources(acpi_handle device_handle, + struct acpi_buffer + *ret_buffer)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_walk_resource_buffer(struct acpi_buffer + *buffer, + acpi_walk_resource_callback + user_function, + void *context)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_walk_resources(acpi_handle device, char *name, + acpi_walk_resource_callback + user_function, void *context)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_set_current_resources(acpi_handle device, + struct acpi_buffer + *in_buffer)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_irq_routing_table(acpi_handle device, + struct acpi_buffer + *ret_buffer)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_resource_to_address64(struct acpi_resource + *resource, + struct + acpi_resource_address64 + *out)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_buffer_to_resource(u8 *aml_buffer, + u16 aml_buffer_length, + struct acpi_resource + **resource_ptr)) + +/* + * Hardware (ACPI device) interfaces + */ +ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_reset(void)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_read(u64 *value, + struct acpi_generic_address *reg)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_write(u64 value, + struct acpi_generic_address *reg)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_read_bit_register(u32 register_id, + u32 *return_value)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_write_bit_register(u32 register_id, + u32 value)) + +/* + * Sleep/Wake interfaces + */ +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_sleep_type_data(u8 sleep_state, + u8 *slp_typ_a, + u8 *slp_typ_b)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_enter_sleep_state_prep(u8 sleep_state)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_enter_sleep_state(u8 sleep_state)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enter_sleep_state_s4bios(void)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_leave_sleep_state_prep(u8 sleep_state)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_leave_sleep_state(u8 sleep_state)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_set_firmware_waking_vector + (acpi_physical_address physical_address, + acpi_physical_address physical_address64)) +/* + * ACPI Timer interfaces + */ +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_get_timer_resolution(u32 *resolution)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_get_timer(u32 *ticks)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_get_timer_duration(u32 start_ticks, + u32 end_ticks, + u32 *time_elapsed)) + +/* + * Error/Warning output + */ +ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(3) + void ACPI_INTERNAL_VAR_XFACE + acpi_error(const char *module_name, + u32 line_number, + const char *format, ...)) +ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(4) + void ACPI_INTERNAL_VAR_XFACE + acpi_exception(const char *module_name, + u32 line_number, + acpi_status status, + const char *format, ...)) +ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(3) + void ACPI_INTERNAL_VAR_XFACE + acpi_warning(const char *module_name, + u32 line_number, + const char *format, ...)) +ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(1) + void ACPI_INTERNAL_VAR_XFACE + acpi_info(const char *format, ...)) +ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(3) + void ACPI_INTERNAL_VAR_XFACE + acpi_bios_error(const char *module_name, + u32 line_number, + const char *format, ...)) +ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(3) + void ACPI_INTERNAL_VAR_XFACE + acpi_bios_warning(const char *module_name, + u32 line_number, + const char *format, ...)) + +/* + * Debug output + */ +ACPI_DBG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(6) + void ACPI_INTERNAL_VAR_XFACE + acpi_debug_print(u32 requested_debug_level, + u32 line_number, + const char *function_name, + const char *module_name, + u32 component_id, + const char *format, ...)) +ACPI_DBG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(6) + void ACPI_INTERNAL_VAR_XFACE + acpi_debug_print_raw(u32 requested_debug_level, + u32 line_number, + const char *function_name, + const char *module_name, + u32 component_id, + const char *format, ...)) + +ACPI_DBG_DEPENDENT_RETURN_VOID(void + acpi_trace_point(acpi_trace_event_type type, + u8 begin, + u8 *aml, char *pathname)) + +acpi_status acpi_initialize_debugger(void); + +void acpi_terminate_debugger(void); + +/* + * Divergences + */ +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_data_full(acpi_handle object, + acpi_object_handler handler, + void **data, + void (*callback)(void *))) + +void acpi_run_debugger(char *batch_buffer); + +void acpi_set_debugger_thread_id(acpi_thread_id thread_id); + +#endif /* __ACXFACE_H__ */ diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h new file mode 100644 index 000000000..724ad5f29 --- /dev/null +++ b/include/acpi/acrestyp.h @@ -0,0 +1,678 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: acrestyp.h - Defines, types, and structures for resource descriptors + * + * Copyright (C) 2000 - 2018, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACRESTYP_H__ +#define __ACRESTYP_H__ + +/* + * Definitions for Resource Attributes + */ +typedef u16 acpi_rs_length; /* Resource Length field is fixed at 16 bits */ +typedef u32 acpi_rsdesc_size; /* Max Resource Descriptor size is (Length+3) = (64K-1)+3 */ + +/* + * Memory Attributes + */ +#define ACPI_READ_ONLY_MEMORY (u8) 0x00 +#define ACPI_READ_WRITE_MEMORY (u8) 0x01 + +#define ACPI_NON_CACHEABLE_MEMORY (u8) 0x00 +#define ACPI_CACHABLE_MEMORY (u8) 0x01 +#define ACPI_WRITE_COMBINING_MEMORY (u8) 0x02 +#define ACPI_PREFETCHABLE_MEMORY (u8) 0x03 + +/*! [Begin] no source code translation */ +/* + * IO Attributes + * The ISA IO ranges are: n000-n0FFh, n400-n4FFh, n800-n8FFh, nC00-nCFFh. + * The non-ISA IO ranges are: n100-n3FFh, n500-n7FFh, n900-nBFFh, nCD0-nFFFh. + */ +/*! [End] no source code translation !*/ + +#define ACPI_NON_ISA_ONLY_RANGES (u8) 0x01 +#define ACPI_ISA_ONLY_RANGES (u8) 0x02 +#define ACPI_ENTIRE_RANGE (ACPI_NON_ISA_ONLY_RANGES | ACPI_ISA_ONLY_RANGES) + +/* Type of translation - 1=Sparse, 0=Dense */ + +#define ACPI_SPARSE_TRANSLATION (u8) 0x01 + +/* + * IO Port Descriptor Decode + */ +#define ACPI_DECODE_10 (u8) 0x00 /* 10-bit IO address decode */ +#define ACPI_DECODE_16 (u8) 0x01 /* 16-bit IO address decode */ + +/* + * Interrupt attributes - used in multiple descriptors + */ + +/* Triggering */ + +#define ACPI_LEVEL_SENSITIVE (u8) 0x00 +#define ACPI_EDGE_SENSITIVE (u8) 0x01 + +/* Polarity */ + +#define ACPI_ACTIVE_HIGH (u8) 0x00 +#define ACPI_ACTIVE_LOW (u8) 0x01 +#define ACPI_ACTIVE_BOTH (u8) 0x02 + +/* Sharing */ + +#define ACPI_EXCLUSIVE (u8) 0x00 +#define ACPI_SHARED (u8) 0x01 + +/* Wake */ + +#define ACPI_NOT_WAKE_CAPABLE (u8) 0x00 +#define ACPI_WAKE_CAPABLE (u8) 0x01 + +/* + * DMA Attributes + */ +#define ACPI_COMPATIBILITY (u8) 0x00 +#define ACPI_TYPE_A (u8) 0x01 +#define ACPI_TYPE_B (u8) 0x02 +#define ACPI_TYPE_F (u8) 0x03 + +#define ACPI_NOT_BUS_MASTER (u8) 0x00 +#define ACPI_BUS_MASTER (u8) 0x01 + +#define ACPI_TRANSFER_8 (u8) 0x00 +#define ACPI_TRANSFER_8_16 (u8) 0x01 +#define ACPI_TRANSFER_16 (u8) 0x02 + +/* + * Start Dependent Functions Priority definitions + */ +#define ACPI_GOOD_CONFIGURATION (u8) 0x00 +#define ACPI_ACCEPTABLE_CONFIGURATION (u8) 0x01 +#define ACPI_SUB_OPTIMAL_CONFIGURATION (u8) 0x02 + +/* + * 16, 32 and 64-bit Address Descriptor resource types + */ +#define ACPI_MEMORY_RANGE (u8) 0x00 +#define ACPI_IO_RANGE (u8) 0x01 +#define ACPI_BUS_NUMBER_RANGE (u8) 0x02 + +#define ACPI_ADDRESS_NOT_FIXED (u8) 0x00 +#define ACPI_ADDRESS_FIXED (u8) 0x01 + +#define ACPI_POS_DECODE (u8) 0x00 +#define ACPI_SUB_DECODE (u8) 0x01 + +/* Producer/Consumer */ + +#define ACPI_PRODUCER (u8) 0x00 +#define ACPI_CONSUMER (u8) 0x01 + +/* + * If possible, pack the following structures to byte alignment + */ +#ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED +#pragma pack(1) +#endif + +/* UUID data structures for use in vendor-defined resource descriptors */ + +struct acpi_uuid { + u8 data[ACPI_UUID_LENGTH]; +}; + +struct acpi_vendor_uuid { + u8 subtype; + u8 data[ACPI_UUID_LENGTH]; +}; + +/* + * Structures used to describe device resources + */ +struct acpi_resource_irq { + u8 descriptor_length; + u8 triggering; + u8 polarity; + u8 sharable; + u8 wake_capable; + u8 interrupt_count; + u8 interrupts[1]; +}; + +struct acpi_resource_dma { + u8 type; + u8 bus_master; + u8 transfer; + u8 channel_count; + u8 channels[1]; +}; + +struct acpi_resource_start_dependent { + u8 descriptor_length; + u8 compatibility_priority; + u8 performance_robustness; +}; + +/* + * The END_DEPENDENT_FUNCTIONS_RESOURCE struct is not + * needed because it has no fields + */ + +struct acpi_resource_io { + u8 io_decode; + u8 alignment; + u8 address_length; + u16 minimum; + u16 maximum; +}; + +struct acpi_resource_fixed_io { + u16 address; + u8 address_length; +}; + +struct acpi_resource_fixed_dma { + u16 request_lines; + u16 channels; + u8 width; +}; + +/* Values for Width field above */ + +#define ACPI_DMA_WIDTH8 0 +#define ACPI_DMA_WIDTH16 1 +#define ACPI_DMA_WIDTH32 2 +#define ACPI_DMA_WIDTH64 3 +#define ACPI_DMA_WIDTH128 4 +#define ACPI_DMA_WIDTH256 5 + +struct acpi_resource_vendor { + u16 byte_length; + u8 byte_data[1]; +}; + +/* Vendor resource with UUID info (introduced in ACPI 3.0) */ + +struct acpi_resource_vendor_typed { + u16 byte_length; + u8 uuid_subtype; + u8 uuid[ACPI_UUID_LENGTH]; + u8 byte_data[1]; +}; + +struct acpi_resource_end_tag { + u8 checksum; +}; + +struct acpi_resource_memory24 { + u8 write_protect; + u16 minimum; + u16 maximum; + u16 alignment; + u16 address_length; +}; + +struct acpi_resource_memory32 { + u8 write_protect; + u32 minimum; + u32 maximum; + u32 alignment; + u32 address_length; +}; + +struct acpi_resource_fixed_memory32 { + u8 write_protect; + u32 address; + u32 address_length; +}; + +struct acpi_memory_attribute { + u8 write_protect; + u8 caching; + u8 range_type; + u8 translation; +}; + +struct acpi_io_attribute { + u8 range_type; + u8 translation; + u8 translation_type; + u8 reserved1; +}; + +union acpi_resource_attribute { + struct acpi_memory_attribute mem; + struct acpi_io_attribute io; + + /* Used for the *word_space macros */ + + u8 type_specific; +}; + +struct acpi_resource_label { + u16 string_length; + char *string_ptr; +}; + +struct acpi_resource_source { + u8 index; + u16 string_length; + char *string_ptr; +}; + +/* Fields common to all address descriptors, 16/32/64 bit */ + +#define ACPI_RESOURCE_ADDRESS_COMMON \ + u8 resource_type; \ + u8 producer_consumer; \ + u8 decode; \ + u8 min_address_fixed; \ + u8 max_address_fixed; \ + union acpi_resource_attribute info; + +struct acpi_address16_attribute { + u16 granularity; + u16 minimum; + u16 maximum; + u16 translation_offset; + u16 address_length; +}; + +struct acpi_address32_attribute { + u32 granularity; + u32 minimum; + u32 maximum; + u32 translation_offset; + u32 address_length; +}; + +struct acpi_address64_attribute { + u64 granularity; + u64 minimum; + u64 maximum; + u64 translation_offset; + u64 address_length; +}; + +struct acpi_resource_address { +ACPI_RESOURCE_ADDRESS_COMMON}; + +struct acpi_resource_address16 { + ACPI_RESOURCE_ADDRESS_COMMON struct acpi_address16_attribute address; + struct acpi_resource_source resource_source; +}; + +struct acpi_resource_address32 { + ACPI_RESOURCE_ADDRESS_COMMON struct acpi_address32_attribute address; + struct acpi_resource_source resource_source; +}; + +struct acpi_resource_address64 { + ACPI_RESOURCE_ADDRESS_COMMON struct acpi_address64_attribute address; + struct acpi_resource_source resource_source; +}; + +struct acpi_resource_extended_address64 { + ACPI_RESOURCE_ADDRESS_COMMON u8 revision_ID; + struct acpi_address64_attribute address; + u64 type_specific; +}; + +struct acpi_resource_extended_irq { + u8 producer_consumer; + u8 triggering; + u8 polarity; + u8 sharable; + u8 wake_capable; + u8 interrupt_count; + struct acpi_resource_source resource_source; + u32 interrupts[1]; +}; + +struct acpi_resource_generic_register { + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 access_size; + u64 address; +}; + +struct acpi_resource_gpio { + u8 revision_id; + u8 connection_type; + u8 producer_consumer; /* For values, see Producer/Consumer above */ + u8 pin_config; + u8 sharable; /* For values, see Interrupt Attributes above */ + u8 wake_capable; /* For values, see Interrupt Attributes above */ + u8 io_restriction; + u8 triggering; /* For values, see Interrupt Attributes above */ + u8 polarity; /* For values, see Interrupt Attributes above */ + u16 drive_strength; + u16 debounce_timeout; + u16 pin_table_length; + u16 vendor_length; + struct acpi_resource_source resource_source; + u16 *pin_table; + u8 *vendor_data; +}; + +/* Values for GPIO connection_type field above */ + +#define ACPI_RESOURCE_GPIO_TYPE_INT 0 +#define ACPI_RESOURCE_GPIO_TYPE_IO 1 + +/* Values for pin_config field above */ + +#define ACPI_PIN_CONFIG_DEFAULT 0 +#define ACPI_PIN_CONFIG_PULLUP 1 +#define ACPI_PIN_CONFIG_PULLDOWN 2 +#define ACPI_PIN_CONFIG_NOPULL 3 + +/* Values for io_restriction field above */ + +#define ACPI_IO_RESTRICT_NONE 0 +#define ACPI_IO_RESTRICT_INPUT 1 +#define ACPI_IO_RESTRICT_OUTPUT 2 +#define ACPI_IO_RESTRICT_NONE_PRESERVE 3 + +/* Common structure for I2C, SPI, and UART serial descriptors */ + +#define ACPI_RESOURCE_SERIAL_COMMON \ + u8 revision_id; \ + u8 type; \ + u8 producer_consumer; /* For values, see Producer/Consumer above */\ + u8 slave_mode; \ + u8 connection_sharing; \ + u8 type_revision_id; \ + u16 type_data_length; \ + u16 vendor_length; \ + struct acpi_resource_source resource_source; \ + u8 *vendor_data; + +struct acpi_resource_common_serialbus { +ACPI_RESOURCE_SERIAL_COMMON}; + +/* Values for the Type field above */ + +#define ACPI_RESOURCE_SERIAL_TYPE_I2C 1 +#define ACPI_RESOURCE_SERIAL_TYPE_SPI 2 +#define ACPI_RESOURCE_SERIAL_TYPE_UART 3 + +/* Values for slave_mode field above */ + +#define ACPI_CONTROLLER_INITIATED 0 +#define ACPI_DEVICE_INITIATED 1 + +struct acpi_resource_i2c_serialbus { + ACPI_RESOURCE_SERIAL_COMMON u8 access_mode; + u16 slave_address; + u32 connection_speed; +}; + +/* Values for access_mode field above */ + +#define ACPI_I2C_7BIT_MODE 0 +#define ACPI_I2C_10BIT_MODE 1 + +struct acpi_resource_spi_serialbus { + ACPI_RESOURCE_SERIAL_COMMON u8 wire_mode; + u8 device_polarity; + u8 data_bit_length; + u8 clock_phase; + u8 clock_polarity; + u16 device_selection; + u32 connection_speed; +}; + +/* Values for wire_mode field above */ + +#define ACPI_SPI_4WIRE_MODE 0 +#define ACPI_SPI_3WIRE_MODE 1 + +/* Values for device_polarity field above */ + +#define ACPI_SPI_ACTIVE_LOW 0 +#define ACPI_SPI_ACTIVE_HIGH 1 + +/* Values for clock_phase field above */ + +#define ACPI_SPI_FIRST_PHASE 0 +#define ACPI_SPI_SECOND_PHASE 1 + +/* Values for clock_polarity field above */ + +#define ACPI_SPI_START_LOW 0 +#define ACPI_SPI_START_HIGH 1 + +struct acpi_resource_uart_serialbus { + ACPI_RESOURCE_SERIAL_COMMON u8 endian; + u8 data_bits; + u8 stop_bits; + u8 flow_control; + u8 parity; + u8 lines_enabled; + u16 rx_fifo_size; + u16 tx_fifo_size; + u32 default_baud_rate; +}; + +/* Values for Endian field above */ + +#define ACPI_UART_LITTLE_ENDIAN 0 +#define ACPI_UART_BIG_ENDIAN 1 + +/* Values for data_bits field above */ + +#define ACPI_UART_5_DATA_BITS 0 +#define ACPI_UART_6_DATA_BITS 1 +#define ACPI_UART_7_DATA_BITS 2 +#define ACPI_UART_8_DATA_BITS 3 +#define ACPI_UART_9_DATA_BITS 4 + +/* Values for stop_bits field above */ + +#define ACPI_UART_NO_STOP_BITS 0 +#define ACPI_UART_1_STOP_BIT 1 +#define ACPI_UART_1P5_STOP_BITS 2 +#define ACPI_UART_2_STOP_BITS 3 + +/* Values for flow_control field above */ + +#define ACPI_UART_FLOW_CONTROL_NONE 0 +#define ACPI_UART_FLOW_CONTROL_HW 1 +#define ACPI_UART_FLOW_CONTROL_XON_XOFF 2 + +/* Values for Parity field above */ + +#define ACPI_UART_PARITY_NONE 0 +#define ACPI_UART_PARITY_EVEN 1 +#define ACPI_UART_PARITY_ODD 2 +#define ACPI_UART_PARITY_MARK 3 +#define ACPI_UART_PARITY_SPACE 4 + +/* Values for lines_enabled bitfield above */ + +#define ACPI_UART_CARRIER_DETECT (1<<2) +#define ACPI_UART_RING_INDICATOR (1<<3) +#define ACPI_UART_DATA_SET_READY (1<<4) +#define ACPI_UART_DATA_TERMINAL_READY (1<<5) +#define ACPI_UART_CLEAR_TO_SEND (1<<6) +#define ACPI_UART_REQUEST_TO_SEND (1<<7) + +struct acpi_resource_pin_function { + u8 revision_id; + u8 pin_config; + u8 sharable; /* For values, see Interrupt Attributes above */ + u16 function_number; + u16 pin_table_length; + u16 vendor_length; + struct acpi_resource_source resource_source; + u16 *pin_table; + u8 *vendor_data; +}; + +struct acpi_resource_pin_config { + u8 revision_id; + u8 producer_consumer; /* For values, see Producer/Consumer above */ + u8 sharable; /* For values, see Interrupt Attributes above */ + u8 pin_config_type; + u32 pin_config_value; + u16 pin_table_length; + u16 vendor_length; + struct acpi_resource_source resource_source; + u16 *pin_table; + u8 *vendor_data; +}; + +/* Values for pin_config_type field above */ + +#define ACPI_PIN_CONFIG_DEFAULT 0 +#define ACPI_PIN_CONFIG_BIAS_PULL_UP 1 +#define ACPI_PIN_CONFIG_BIAS_PULL_DOWN 2 +#define ACPI_PIN_CONFIG_BIAS_DEFAULT 3 +#define ACPI_PIN_CONFIG_BIAS_DISABLE 4 +#define ACPI_PIN_CONFIG_BIAS_HIGH_IMPEDANCE 5 +#define ACPI_PIN_CONFIG_BIAS_BUS_HOLD 6 +#define ACPI_PIN_CONFIG_DRIVE_OPEN_DRAIN 7 +#define ACPI_PIN_CONFIG_DRIVE_OPEN_SOURCE 8 +#define ACPI_PIN_CONFIG_DRIVE_PUSH_PULL 9 +#define ACPI_PIN_CONFIG_DRIVE_STRENGTH 10 +#define ACPI_PIN_CONFIG_SLEW_RATE 11 +#define ACPI_PIN_CONFIG_INPUT_DEBOUNCE 12 +#define ACPI_PIN_CONFIG_INPUT_SCHMITT_TRIGGER 13 + +struct acpi_resource_pin_group { + u8 revision_id; + u8 producer_consumer; /* For values, see Producer/Consumer above */ + u16 pin_table_length; + u16 vendor_length; + u16 *pin_table; + struct acpi_resource_label resource_label; + u8 *vendor_data; +}; + +struct acpi_resource_pin_group_function { + u8 revision_id; + u8 producer_consumer; /* For values, see Producer/Consumer above */ + u8 sharable; /* For values, see Interrupt Attributes above */ + u16 function_number; + u16 vendor_length; + struct acpi_resource_source resource_source; + struct acpi_resource_label resource_source_label; + u8 *vendor_data; +}; + +struct acpi_resource_pin_group_config { + u8 revision_id; + u8 producer_consumer; /* For values, see Producer/Consumer above */ + u8 sharable; /* For values, see Interrupt Attributes above */ + u8 pin_config_type; /* For values, see pin_config_type above */ + u32 pin_config_value; + u16 vendor_length; + struct acpi_resource_source resource_source; + struct acpi_resource_label resource_source_label; + u8 *vendor_data; +}; + +/* ACPI_RESOURCE_TYPEs */ + +#define ACPI_RESOURCE_TYPE_IRQ 0 +#define ACPI_RESOURCE_TYPE_DMA 1 +#define ACPI_RESOURCE_TYPE_START_DEPENDENT 2 +#define ACPI_RESOURCE_TYPE_END_DEPENDENT 3 +#define ACPI_RESOURCE_TYPE_IO 4 +#define ACPI_RESOURCE_TYPE_FIXED_IO 5 +#define ACPI_RESOURCE_TYPE_VENDOR 6 +#define ACPI_RESOURCE_TYPE_END_TAG 7 +#define ACPI_RESOURCE_TYPE_MEMORY24 8 +#define ACPI_RESOURCE_TYPE_MEMORY32 9 +#define ACPI_RESOURCE_TYPE_FIXED_MEMORY32 10 +#define ACPI_RESOURCE_TYPE_ADDRESS16 11 +#define ACPI_RESOURCE_TYPE_ADDRESS32 12 +#define ACPI_RESOURCE_TYPE_ADDRESS64 13 +#define ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 14 /* ACPI 3.0 */ +#define ACPI_RESOURCE_TYPE_EXTENDED_IRQ 15 +#define ACPI_RESOURCE_TYPE_GENERIC_REGISTER 16 +#define ACPI_RESOURCE_TYPE_GPIO 17 /* ACPI 5.0 */ +#define ACPI_RESOURCE_TYPE_FIXED_DMA 18 /* ACPI 5.0 */ +#define ACPI_RESOURCE_TYPE_SERIAL_BUS 19 /* ACPI 5.0 */ +#define ACPI_RESOURCE_TYPE_PIN_FUNCTION 20 /* ACPI 6.2 */ +#define ACPI_RESOURCE_TYPE_PIN_CONFIG 21 /* ACPI 6.2 */ +#define ACPI_RESOURCE_TYPE_PIN_GROUP 22 /* ACPI 6.2 */ +#define ACPI_RESOURCE_TYPE_PIN_GROUP_FUNCTION 23 /* ACPI 6.2 */ +#define ACPI_RESOURCE_TYPE_PIN_GROUP_CONFIG 24 /* ACPI 6.2 */ +#define ACPI_RESOURCE_TYPE_MAX 24 + +/* Master union for resource descriptors */ + +union acpi_resource_data { + struct acpi_resource_irq irq; + struct acpi_resource_dma dma; + struct acpi_resource_start_dependent start_dpf; + struct acpi_resource_io io; + struct acpi_resource_fixed_io fixed_io; + struct acpi_resource_fixed_dma fixed_dma; + struct acpi_resource_vendor vendor; + struct acpi_resource_vendor_typed vendor_typed; + struct acpi_resource_end_tag end_tag; + struct acpi_resource_memory24 memory24; + struct acpi_resource_memory32 memory32; + struct acpi_resource_fixed_memory32 fixed_memory32; + struct acpi_resource_address16 address16; + struct acpi_resource_address32 address32; + struct acpi_resource_address64 address64; + struct acpi_resource_extended_address64 ext_address64; + struct acpi_resource_extended_irq extended_irq; + struct acpi_resource_generic_register generic_reg; + struct acpi_resource_gpio gpio; + struct acpi_resource_i2c_serialbus i2c_serial_bus; + struct acpi_resource_spi_serialbus spi_serial_bus; + struct acpi_resource_uart_serialbus uart_serial_bus; + struct acpi_resource_common_serialbus common_serial_bus; + struct acpi_resource_pin_function pin_function; + struct acpi_resource_pin_config pin_config; + struct acpi_resource_pin_group pin_group; + struct acpi_resource_pin_group_function pin_group_function; + struct acpi_resource_pin_group_config pin_group_config; + + /* Common fields */ + + struct acpi_resource_address address; /* Common 16/32/64 address fields */ +}; + +/* Common resource header */ + +struct acpi_resource { + u32 type; + u32 length; + union acpi_resource_data data; +}; + +/* restore default alignment */ + +#pragma pack() + +#define ACPI_RS_SIZE_NO_DATA 8 /* Id + Length fields */ +#define ACPI_RS_SIZE_MIN (u32) ACPI_ROUND_UP_TO_NATIVE_WORD (12) +#define ACPI_RS_SIZE(type) (u32) (ACPI_RS_SIZE_NO_DATA + sizeof (type)) + +/* Macro for walking resource templates with multiple descriptors */ + +#define ACPI_NEXT_RESOURCE(res) \ + ACPI_ADD_PTR (struct acpi_resource, (res), (res)->length) + +struct acpi_pci_routing_table { + u32 length; + u32 pin; + u64 address; /* here for 64-bit alignment */ + u32 source_index; + char source[4]; /* pad to 64 bits so sizeof() works in all cases */ +}; + +#endif /* __ACRESTYP_H__ */ diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h new file mode 100644 index 000000000..517addd6b --- /dev/null +++ b/include/acpi/actbl.h @@ -0,0 +1,400 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: actbl.h - Basic ACPI Table Definitions + * + * Copyright (C) 2000 - 2018, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACTBL_H__ +#define __ACTBL_H__ + +/******************************************************************************* + * + * Fundamental ACPI tables + * + * This file contains definitions for the ACPI tables that are directly consumed + * by ACPICA. All other tables are consumed by the OS-dependent ACPI-related + * device drivers and other OS support code. + * + * The RSDP and FACS do not use the common ACPI table header. All other ACPI + * tables use the header. + * + ******************************************************************************/ + +/* + * Values for description table header signatures for tables defined in this + * file. Useful because they make it more difficult to inadvertently type in + * the wrong signature. + */ +#define ACPI_SIG_DSDT "DSDT" /* Differentiated System Description Table */ +#define ACPI_SIG_FADT "FACP" /* Fixed ACPI Description Table */ +#define ACPI_SIG_FACS "FACS" /* Firmware ACPI Control Structure */ +#define ACPI_SIG_OSDT "OSDT" /* Override System Description Table */ +#define ACPI_SIG_PSDT "PSDT" /* Persistent System Description Table */ +#define ACPI_SIG_RSDP "RSD PTR " /* Root System Description Pointer */ +#define ACPI_SIG_RSDT "RSDT" /* Root System Description Table */ +#define ACPI_SIG_XSDT "XSDT" /* Extended System Description Table */ +#define ACPI_SIG_SSDT "SSDT" /* Secondary System Description Table */ +#define ACPI_RSDP_NAME "RSDP" /* Short name for RSDP, not signature */ + +/* + * All tables and structures must be byte-packed to match the ACPI + * specification, since the tables are provided by the system BIOS + */ +#pragma pack(1) + +/* + * Note: C bitfields are not used for this reason: + * + * "Bitfields are great and easy to read, but unfortunately the C language + * does not specify the layout of bitfields in memory, which means they are + * essentially useless for dealing with packed data in on-disk formats or + * binary wire protocols." (Or ACPI tables and buffers.) "If you ask me, + * this decision was a design error in C. Ritchie could have picked an order + * and stuck with it." Norman Ramsey. + * See http://stackoverflow.com/a/1053662/41661 + */ + +/******************************************************************************* + * + * Master ACPI Table Header. This common header is used by all ACPI tables + * except the RSDP and FACS. + * + ******************************************************************************/ + +struct acpi_table_header { + char signature[ACPI_NAME_SIZE]; /* ASCII table signature */ + u32 length; /* Length of table in bytes, including this header */ + u8 revision; /* ACPI Specification minor version number */ + u8 checksum; /* To make sum of entire table == 0 */ + char oem_id[ACPI_OEM_ID_SIZE]; /* ASCII OEM identification */ + char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; /* ASCII OEM table identification */ + u32 oem_revision; /* OEM revision number */ + char asl_compiler_id[ACPI_NAME_SIZE]; /* ASCII ASL compiler vendor ID */ + u32 asl_compiler_revision; /* ASL compiler version */ +}; + +/******************************************************************************* + * + * GAS - Generic Address Structure (ACPI 2.0+) + * + * Note: Since this structure is used in the ACPI tables, it is byte aligned. + * If misaligned access is not supported by the hardware, accesses to the + * 64-bit Address field must be performed with care. + * + ******************************************************************************/ + +struct acpi_generic_address { + u8 space_id; /* Address space where struct or register exists */ + u8 bit_width; /* Size in bits of given register */ + u8 bit_offset; /* Bit offset within the register */ + u8 access_width; /* Minimum Access size (ACPI 3.0) */ + u64 address; /* 64-bit address of struct or register */ +}; + +/******************************************************************************* + * + * RSDP - Root System Description Pointer (Signature is "RSD PTR ") + * Version 2 + * + ******************************************************************************/ + +struct acpi_table_rsdp { + char signature[8]; /* ACPI signature, contains "RSD PTR " */ + u8 checksum; /* ACPI 1.0 checksum */ + char oem_id[ACPI_OEM_ID_SIZE]; /* OEM identification */ + u8 revision; /* Must be (0) for ACPI 1.0 or (2) for ACPI 2.0+ */ + u32 rsdt_physical_address; /* 32-bit physical address of the RSDT */ + u32 length; /* Table length in bytes, including header (ACPI 2.0+) */ + u64 xsdt_physical_address; /* 64-bit physical address of the XSDT (ACPI 2.0+) */ + u8 extended_checksum; /* Checksum of entire table (ACPI 2.0+) */ + u8 reserved[3]; /* Reserved, must be zero */ +}; + +/* Standalone struct for the ACPI 1.0 RSDP */ + +struct acpi_rsdp_common { + char signature[8]; + u8 checksum; + char oem_id[ACPI_OEM_ID_SIZE]; + u8 revision; + u32 rsdt_physical_address; +}; + +/* Standalone struct for the extended part of the RSDP (ACPI 2.0+) */ + +struct acpi_rsdp_extension { + u32 length; + u64 xsdt_physical_address; + u8 extended_checksum; + u8 reserved[3]; +}; + +/******************************************************************************* + * + * RSDT/XSDT - Root System Description Tables + * Version 1 (both) + * + ******************************************************************************/ + +struct acpi_table_rsdt { + struct acpi_table_header header; /* Common ACPI table header */ + u32 table_offset_entry[1]; /* Array of pointers to ACPI tables */ +}; + +struct acpi_table_xsdt { + struct acpi_table_header header; /* Common ACPI table header */ + u64 table_offset_entry[1]; /* Array of pointers to ACPI tables */ +}; + +#define ACPI_RSDT_ENTRY_SIZE (sizeof (u32)) +#define ACPI_XSDT_ENTRY_SIZE (sizeof (u64)) + +/******************************************************************************* + * + * FACS - Firmware ACPI Control Structure (FACS) + * + ******************************************************************************/ + +struct acpi_table_facs { + char signature[4]; /* ASCII table signature */ + u32 length; /* Length of structure, in bytes */ + u32 hardware_signature; /* Hardware configuration signature */ + u32 firmware_waking_vector; /* 32-bit physical address of the Firmware Waking Vector */ + u32 global_lock; /* Global Lock for shared hardware resources */ + u32 flags; + u64 xfirmware_waking_vector; /* 64-bit version of the Firmware Waking Vector (ACPI 2.0+) */ + u8 version; /* Version of this table (ACPI 2.0+) */ + u8 reserved[3]; /* Reserved, must be zero */ + u32 ospm_flags; /* Flags to be set by OSPM (ACPI 4.0) */ + u8 reserved1[24]; /* Reserved, must be zero */ +}; + +/* Masks for global_lock flag field above */ + +#define ACPI_GLOCK_PENDING (1) /* 00: Pending global lock ownership */ +#define ACPI_GLOCK_OWNED (1<<1) /* 01: Global lock is owned */ + +/* Masks for Flags field above */ + +#define ACPI_FACS_S4_BIOS_PRESENT (1) /* 00: S4BIOS support is present */ +#define ACPI_FACS_64BIT_WAKE (1<<1) /* 01: 64-bit wake vector supported (ACPI 4.0) */ + +/* Masks for ospm_flags field above */ + +#define ACPI_FACS_64BIT_ENVIRONMENT (1) /* 00: 64-bit wake environment is required (ACPI 4.0) */ + +/******************************************************************************* + * + * FADT - Fixed ACPI Description Table (Signature "FACP") + * Version 6 + * + ******************************************************************************/ + +/* Fields common to all versions of the FADT */ + +struct acpi_table_fadt { + struct acpi_table_header header; /* Common ACPI table header */ + u32 facs; /* 32-bit physical address of FACS */ + u32 dsdt; /* 32-bit physical address of DSDT */ + u8 model; /* System Interrupt Model (ACPI 1.0) - not used in ACPI 2.0+ */ + u8 preferred_profile; /* Conveys preferred power management profile to OSPM. */ + u16 sci_interrupt; /* System vector of SCI interrupt */ + u32 smi_command; /* 32-bit Port address of SMI command port */ + u8 acpi_enable; /* Value to write to SMI_CMD to enable ACPI */ + u8 acpi_disable; /* Value to write to SMI_CMD to disable ACPI */ + u8 s4_bios_request; /* Value to write to SMI_CMD to enter S4BIOS state */ + u8 pstate_control; /* Processor performance state control */ + u32 pm1a_event_block; /* 32-bit port address of Power Mgt 1a Event Reg Blk */ + u32 pm1b_event_block; /* 32-bit port address of Power Mgt 1b Event Reg Blk */ + u32 pm1a_control_block; /* 32-bit port address of Power Mgt 1a Control Reg Blk */ + u32 pm1b_control_block; /* 32-bit port address of Power Mgt 1b Control Reg Blk */ + u32 pm2_control_block; /* 32-bit port address of Power Mgt 2 Control Reg Blk */ + u32 pm_timer_block; /* 32-bit port address of Power Mgt Timer Ctrl Reg Blk */ + u32 gpe0_block; /* 32-bit port address of General Purpose Event 0 Reg Blk */ + u32 gpe1_block; /* 32-bit port address of General Purpose Event 1 Reg Blk */ + u8 pm1_event_length; /* Byte Length of ports at pm1x_event_block */ + u8 pm1_control_length; /* Byte Length of ports at pm1x_control_block */ + u8 pm2_control_length; /* Byte Length of ports at pm2_control_block */ + u8 pm_timer_length; /* Byte Length of ports at pm_timer_block */ + u8 gpe0_block_length; /* Byte Length of ports at gpe0_block */ + u8 gpe1_block_length; /* Byte Length of ports at gpe1_block */ + u8 gpe1_base; /* Offset in GPE number space where GPE1 events start */ + u8 cst_control; /* Support for the _CST object and C-States change notification */ + u16 c2_latency; /* Worst case HW latency to enter/exit C2 state */ + u16 c3_latency; /* Worst case HW latency to enter/exit C3 state */ + u16 flush_size; /* Processor memory cache line width, in bytes */ + u16 flush_stride; /* Number of flush strides that need to be read */ + u8 duty_offset; /* Processor duty cycle index in processor P_CNT reg */ + u8 duty_width; /* Processor duty cycle value bit width in P_CNT register */ + u8 day_alarm; /* Index to day-of-month alarm in RTC CMOS RAM */ + u8 month_alarm; /* Index to month-of-year alarm in RTC CMOS RAM */ + u8 century; /* Index to century in RTC CMOS RAM */ + u16 boot_flags; /* IA-PC Boot Architecture Flags (see below for individual flags) */ + u8 reserved; /* Reserved, must be zero */ + u32 flags; /* Miscellaneous flag bits (see below for individual flags) */ + struct acpi_generic_address reset_register; /* 64-bit address of the Reset register */ + u8 reset_value; /* Value to write to the reset_register port to reset the system */ + u16 arm_boot_flags; /* ARM-Specific Boot Flags (see below for individual flags) (ACPI 5.1) */ + u8 minor_revision; /* FADT Minor Revision (ACPI 5.1) */ + u64 Xfacs; /* 64-bit physical address of FACS */ + u64 Xdsdt; /* 64-bit physical address of DSDT */ + struct acpi_generic_address xpm1a_event_block; /* 64-bit Extended Power Mgt 1a Event Reg Blk address */ + struct acpi_generic_address xpm1b_event_block; /* 64-bit Extended Power Mgt 1b Event Reg Blk address */ + struct acpi_generic_address xpm1a_control_block; /* 64-bit Extended Power Mgt 1a Control Reg Blk address */ + struct acpi_generic_address xpm1b_control_block; /* 64-bit Extended Power Mgt 1b Control Reg Blk address */ + struct acpi_generic_address xpm2_control_block; /* 64-bit Extended Power Mgt 2 Control Reg Blk address */ + struct acpi_generic_address xpm_timer_block; /* 64-bit Extended Power Mgt Timer Ctrl Reg Blk address */ + struct acpi_generic_address xgpe0_block; /* 64-bit Extended General Purpose Event 0 Reg Blk address */ + struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */ + struct acpi_generic_address sleep_control; /* 64-bit Sleep Control register (ACPI 5.0) */ + struct acpi_generic_address sleep_status; /* 64-bit Sleep Status register (ACPI 5.0) */ + u64 hypervisor_id; /* Hypervisor Vendor ID (ACPI 6.0) */ +}; + +/* Masks for FADT IA-PC Boot Architecture Flags (boot_flags) [Vx]=Introduced in this FADT revision */ + +#define ACPI_FADT_LEGACY_DEVICES (1) /* 00: [V2] System has LPC or ISA bus devices */ +#define ACPI_FADT_8042 (1<<1) /* 01: [V3] System has an 8042 controller on port 60/64 */ +#define ACPI_FADT_NO_VGA (1<<2) /* 02: [V4] It is not safe to probe for VGA hardware */ +#define ACPI_FADT_NO_MSI (1<<3) /* 03: [V4] Message Signaled Interrupts (MSI) must not be enabled */ +#define ACPI_FADT_NO_ASPM (1<<4) /* 04: [V4] PCIe ASPM control must not be enabled */ +#define ACPI_FADT_NO_CMOS_RTC (1<<5) /* 05: [V5] No CMOS real-time clock present */ + +#define FADT2_REVISION_ID 3 + +/* Masks for FADT ARM Boot Architecture Flags (arm_boot_flags) ACPI 5.1 */ + +#define ACPI_FADT_PSCI_COMPLIANT (1) /* 00: [V5+] PSCI 0.2+ is implemented */ +#define ACPI_FADT_PSCI_USE_HVC (1<<1) /* 01: [V5+] HVC must be used instead of SMC as the PSCI conduit */ + +/* Masks for FADT flags */ + +#define ACPI_FADT_WBINVD (1) /* 00: [V1] The WBINVD instruction works properly */ +#define ACPI_FADT_WBINVD_FLUSH (1<<1) /* 01: [V1] WBINVD flushes but does not invalidate caches */ +#define ACPI_FADT_C1_SUPPORTED (1<<2) /* 02: [V1] All processors support C1 state */ +#define ACPI_FADT_C2_MP_SUPPORTED (1<<3) /* 03: [V1] C2 state works on MP system */ +#define ACPI_FADT_POWER_BUTTON (1<<4) /* 04: [V1] Power button is handled as a control method device */ +#define ACPI_FADT_SLEEP_BUTTON (1<<5) /* 05: [V1] Sleep button is handled as a control method device */ +#define ACPI_FADT_FIXED_RTC (1<<6) /* 06: [V1] RTC wakeup status is not in fixed register space */ +#define ACPI_FADT_S4_RTC_WAKE (1<<7) /* 07: [V1] RTC alarm can wake system from S4 */ +#define ACPI_FADT_32BIT_TIMER (1<<8) /* 08: [V1] ACPI timer width is 32-bit (0=24-bit) */ +#define ACPI_FADT_DOCKING_SUPPORTED (1<<9) /* 09: [V1] Docking supported */ +#define ACPI_FADT_RESET_REGISTER (1<<10) /* 10: [V2] System reset via the FADT RESET_REG supported */ +#define ACPI_FADT_SEALED_CASE (1<<11) /* 11: [V3] No internal expansion capabilities and case is sealed */ +#define ACPI_FADT_HEADLESS (1<<12) /* 12: [V3] No local video capabilities or local input devices */ +#define ACPI_FADT_SLEEP_TYPE (1<<13) /* 13: [V3] Must execute native instruction after writing SLP_TYPx register */ +#define ACPI_FADT_PCI_EXPRESS_WAKE (1<<14) /* 14: [V4] System supports PCIEXP_WAKE (STS/EN) bits (ACPI 3.0) */ +#define ACPI_FADT_PLATFORM_CLOCK (1<<15) /* 15: [V4] OSPM should use platform-provided timer (ACPI 3.0) */ +#define ACPI_FADT_S4_RTC_VALID (1<<16) /* 16: [V4] Contents of RTC_STS valid after S4 wake (ACPI 3.0) */ +#define ACPI_FADT_REMOTE_POWER_ON (1<<17) /* 17: [V4] System is compatible with remote power on (ACPI 3.0) */ +#define ACPI_FADT_APIC_CLUSTER (1<<18) /* 18: [V4] All local APICs must use cluster model (ACPI 3.0) */ +#define ACPI_FADT_APIC_PHYSICAL (1<<19) /* 19: [V4] All local xAPICs must use physical dest mode (ACPI 3.0) */ +#define ACPI_FADT_HW_REDUCED (1<<20) /* 20: [V5] ACPI hardware is not implemented (ACPI 5.0) */ +#define ACPI_FADT_LOW_POWER_S0 (1<<21) /* 21: [V5] S0 power savings are equal or better than S3 (ACPI 5.0) */ + +/* Values for preferred_profile (Preferred Power Management Profiles) */ + +enum acpi_preferred_pm_profiles { + PM_UNSPECIFIED = 0, + PM_DESKTOP = 1, + PM_MOBILE = 2, + PM_WORKSTATION = 3, + PM_ENTERPRISE_SERVER = 4, + PM_SOHO_SERVER = 5, + PM_APPLIANCE_PC = 6, + PM_PERFORMANCE_SERVER = 7, + PM_TABLET = 8 +}; + +/* Values for sleep_status and sleep_control registers (V5+ FADT) */ + +#define ACPI_X_WAKE_STATUS 0x80 +#define ACPI_X_SLEEP_TYPE_MASK 0x1C +#define ACPI_X_SLEEP_TYPE_POSITION 0x02 +#define ACPI_X_SLEEP_ENABLE 0x20 + +/* Reset to default packing */ + +#pragma pack() + +/* + * Internal table-related structures + */ +union acpi_name_union { + u32 integer; + char ascii[4]; +}; + +/* Internal ACPI Table Descriptor. One per ACPI table. */ + +struct acpi_table_desc { + acpi_physical_address address; + struct acpi_table_header *pointer; + u32 length; /* Length fixed at 32 bits (fixed in table header) */ + union acpi_name_union signature; + acpi_owner_id owner_id; + u8 flags; + u16 validation_count; +}; + +/* + * Maximum value of the validation_count field in struct acpi_table_desc. + * When reached, validation_count cannot be changed any more and the table will + * be permanently regarded as validated. + * + * This is to prevent situations in which unbalanced table get/put operations + * may cause premature table unmapping in the OS to happen. + * + * The maximum validation count can be defined to any value, but should be + * greater than the maximum number of OS early stage mapping slots to avoid + * leaking early stage table mappings to the late stage. + */ +#define ACPI_MAX_TABLE_VALIDATIONS ACPI_UINT16_MAX + +/* Masks for Flags field above */ + +#define ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL (0) /* Virtual address, external maintained */ +#define ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL (1) /* Physical address, internally mapped */ +#define ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL (2) /* Virtual address, internallly allocated */ +#define ACPI_TABLE_ORIGIN_MASK (3) +#define ACPI_TABLE_IS_VERIFIED (4) +#define ACPI_TABLE_IS_LOADED (8) + +/* + * Get the remaining ACPI tables + */ +#include +#include +#include + +/* Macros used to generate offsets to specific table fields */ + +#define ACPI_FADT_OFFSET(f) (u16) ACPI_OFFSET (struct acpi_table_fadt, f) + +/* + * Sizes of the various flavors of FADT. We need to look closely + * at the FADT length because the version number essentially tells + * us nothing because of many BIOS bugs where the version does not + * match the expected length. In other words, the length of the + * FADT is the bottom line as to what the version really is. + * + * For reference, the values below are as follows: + * FADT V1 size: 0x074 + * FADT V2 size: 0x084 + * FADT V3 size: 0x0F4 + * FADT V4 size: 0x0F4 + * FADT V5 size: 0x10C + * FADT V6 size: 0x114 + */ +#define ACPI_FADT_V1_SIZE (u32) (ACPI_FADT_OFFSET (flags) + 4) +#define ACPI_FADT_V2_SIZE (u32) (ACPI_FADT_OFFSET (minor_revision) + 1) +#define ACPI_FADT_V3_SIZE (u32) (ACPI_FADT_OFFSET (sleep_control)) +#define ACPI_FADT_V5_SIZE (u32) (ACPI_FADT_OFFSET (hypervisor_id)) +#define ACPI_FADT_V6_SIZE (u32) (sizeof (struct acpi_table_fadt)) + +#define ACPI_FADT_CONFORMANCE "ACPI 6.1 (FADT version 6)" + +#endif /* __ACTBL_H__ */ diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h new file mode 100644 index 000000000..ab424509c --- /dev/null +++ b/include/acpi/actbl1.h @@ -0,0 +1,1626 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: actbl1.h - Additional ACPI table definitions + * + * Copyright (C) 2000 - 2018, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACTBL1_H__ +#define __ACTBL1_H__ + +/******************************************************************************* + * + * Additional ACPI Tables + * + * These tables are not consumed directly by the ACPICA subsystem, but are + * included here to support device drivers and the AML disassembler. + * + ******************************************************************************/ + +/* + * Values for description table header signatures for tables defined in this + * file. Useful because they make it more difficult to inadvertently type in + * the wrong signature. + */ +#define ACPI_SIG_ASF "ASF!" /* Alert Standard Format table */ +#define ACPI_SIG_BERT "BERT" /* Boot Error Record Table */ +#define ACPI_SIG_BGRT "BGRT" /* Boot Graphics Resource Table */ +#define ACPI_SIG_BOOT "BOOT" /* Simple Boot Flag Table */ +#define ACPI_SIG_CPEP "CPEP" /* Corrected Platform Error Polling table */ +#define ACPI_SIG_CSRT "CSRT" /* Core System Resource Table */ +#define ACPI_SIG_DBG2 "DBG2" /* Debug Port table type 2 */ +#define ACPI_SIG_DBGP "DBGP" /* Debug Port table */ +#define ACPI_SIG_DMAR "DMAR" /* DMA Remapping table */ +#define ACPI_SIG_DRTM "DRTM" /* Dynamic Root of Trust for Measurement table */ +#define ACPI_SIG_ECDT "ECDT" /* Embedded Controller Boot Resources Table */ +#define ACPI_SIG_EINJ "EINJ" /* Error Injection table */ +#define ACPI_SIG_ERST "ERST" /* Error Record Serialization Table */ +#define ACPI_SIG_FPDT "FPDT" /* Firmware Performance Data Table */ +#define ACPI_SIG_GTDT "GTDT" /* Generic Timer Description Table */ +#define ACPI_SIG_HEST "HEST" /* Hardware Error Source Table */ +#define ACPI_SIG_HMAT "HMAT" /* Heterogeneous Memory Attributes Table */ +#define ACPI_SIG_HPET "HPET" /* High Precision Event Timer table */ +#define ACPI_SIG_IBFT "IBFT" /* iSCSI Boot Firmware Table */ + +#define ACPI_SIG_S3PT "S3PT" /* S3 Performance (sub)Table */ +#define ACPI_SIG_PCCS "PCC" /* PCC Shared Memory Region */ + +/* Reserved table signatures */ + +#define ACPI_SIG_MATR "MATR" /* Memory Address Translation Table */ +#define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */ + +/* + * These tables have been seen in the field, but no definition has been found + */ +#ifdef ACPI_UNDEFINED_TABLES +#define ACPI_SIG_ATKG "ATKG" +#define ACPI_SIG_GSCI "GSCI" /* GMCH SCI table */ +#define ACPI_SIG_IEIT "IEIT" +#endif + +/* + * All tables must be byte-packed to match the ACPI specification, since + * the tables are provided by the system BIOS. + */ +#pragma pack(1) + +/* + * Note: C bitfields are not used for this reason: + * + * "Bitfields are great and easy to read, but unfortunately the C language + * does not specify the layout of bitfields in memory, which means they are + * essentially useless for dealing with packed data in on-disk formats or + * binary wire protocols." (Or ACPI tables and buffers.) "If you ask me, + * this decision was a design error in C. Ritchie could have picked an order + * and stuck with it." Norman Ramsey. + * See http://stackoverflow.com/a/1053662/41661 + */ + +/******************************************************************************* + * + * Common subtable headers + * + ******************************************************************************/ + +/* Generic subtable header (used in MADT, SRAT, etc.) */ + +struct acpi_subtable_header { + u8 type; + u8 length; +}; + +/* Subtable header for WHEA tables (EINJ, ERST, WDAT) */ + +struct acpi_whea_header { + u8 action; + u8 instruction; + u8 flags; + u8 reserved; + struct acpi_generic_address register_region; + u64 value; /* Value used with Read/Write register */ + u64 mask; /* Bitmask required for this register instruction */ +}; + +/******************************************************************************* + * + * ASF - Alert Standard Format table (Signature "ASF!") + * Revision 0x10 + * + * Conforms to the Alert Standard Format Specification V2.0, 23 April 2003 + * + ******************************************************************************/ + +struct acpi_table_asf { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +/* ASF subtable header */ + +struct acpi_asf_header { + u8 type; + u8 reserved; + u16 length; +}; + +/* Values for Type field above */ + +enum acpi_asf_type { + ACPI_ASF_TYPE_INFO = 0, + ACPI_ASF_TYPE_ALERT = 1, + ACPI_ASF_TYPE_CONTROL = 2, + ACPI_ASF_TYPE_BOOT = 3, + ACPI_ASF_TYPE_ADDRESS = 4, + ACPI_ASF_TYPE_RESERVED = 5 +}; + +/* + * ASF subtables + */ + +/* 0: ASF Information */ + +struct acpi_asf_info { + struct acpi_asf_header header; + u8 min_reset_value; + u8 min_poll_interval; + u16 system_id; + u32 mfg_id; + u8 flags; + u8 reserved2[3]; +}; + +/* Masks for Flags field above */ + +#define ACPI_ASF_SMBUS_PROTOCOLS (1) + +/* 1: ASF Alerts */ + +struct acpi_asf_alert { + struct acpi_asf_header header; + u8 assert_mask; + u8 deassert_mask; + u8 alerts; + u8 data_length; +}; + +struct acpi_asf_alert_data { + u8 address; + u8 command; + u8 mask; + u8 value; + u8 sensor_type; + u8 type; + u8 offset; + u8 source_type; + u8 severity; + u8 sensor_number; + u8 entity; + u8 instance; +}; + +/* 2: ASF Remote Control */ + +struct acpi_asf_remote { + struct acpi_asf_header header; + u8 controls; + u8 data_length; + u16 reserved2; +}; + +struct acpi_asf_control_data { + u8 function; + u8 address; + u8 command; + u8 value; +}; + +/* 3: ASF RMCP Boot Options */ + +struct acpi_asf_rmcp { + struct acpi_asf_header header; + u8 capabilities[7]; + u8 completion_code; + u32 enterprise_id; + u8 command; + u16 parameter; + u16 boot_options; + u16 oem_parameters; +}; + +/* 4: ASF Address */ + +struct acpi_asf_address { + struct acpi_asf_header header; + u8 eprom_address; + u8 devices; +}; + +/******************************************************************************* + * + * BERT - Boot Error Record Table (ACPI 4.0) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_bert { + struct acpi_table_header header; /* Common ACPI table header */ + u32 region_length; /* Length of the boot error region */ + u64 address; /* Physical address of the error region */ +}; + +/* Boot Error Region (not a subtable, pointed to by Address field above) */ + +struct acpi_bert_region { + u32 block_status; /* Type of error information */ + u32 raw_data_offset; /* Offset to raw error data */ + u32 raw_data_length; /* Length of raw error data */ + u32 data_length; /* Length of generic error data */ + u32 error_severity; /* Severity code */ +}; + +/* Values for block_status flags above */ + +#define ACPI_BERT_UNCORRECTABLE (1) +#define ACPI_BERT_CORRECTABLE (1<<1) +#define ACPI_BERT_MULTIPLE_UNCORRECTABLE (1<<2) +#define ACPI_BERT_MULTIPLE_CORRECTABLE (1<<3) +#define ACPI_BERT_ERROR_ENTRY_COUNT (0xFF<<4) /* 8 bits, error count */ + +/* Values for error_severity above */ + +enum acpi_bert_error_severity { + ACPI_BERT_ERROR_CORRECTABLE = 0, + ACPI_BERT_ERROR_FATAL = 1, + ACPI_BERT_ERROR_CORRECTED = 2, + ACPI_BERT_ERROR_NONE = 3, + ACPI_BERT_ERROR_RESERVED = 4 /* 4 and greater are reserved */ +}; + +/* + * Note: The generic error data that follows the error_severity field above + * uses the struct acpi_hest_generic_data defined under the HEST table below + */ + +/******************************************************************************* + * + * BGRT - Boot Graphics Resource Table (ACPI 5.0) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_bgrt { + struct acpi_table_header header; /* Common ACPI table header */ + u16 version; + u8 status; + u8 image_type; + u64 image_address; + u32 image_offset_x; + u32 image_offset_y; +}; + +/* Flags for Status field above */ + +#define ACPI_BGRT_DISPLAYED (1) +#define ACPI_BGRT_ORIENTATION_OFFSET (3 << 1) + +/******************************************************************************* + * + * BOOT - Simple Boot Flag Table + * Version 1 + * + * Conforms to the "Simple Boot Flag Specification", Version 2.1 + * + ******************************************************************************/ + +struct acpi_table_boot { + struct acpi_table_header header; /* Common ACPI table header */ + u8 cmos_index; /* Index in CMOS RAM for the boot register */ + u8 reserved[3]; +}; + +/******************************************************************************* + * + * CPEP - Corrected Platform Error Polling table (ACPI 4.0) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_cpep { + struct acpi_table_header header; /* Common ACPI table header */ + u64 reserved; +}; + +/* Subtable */ + +struct acpi_cpep_polling { + struct acpi_subtable_header header; + u8 id; /* Processor ID */ + u8 eid; /* Processor EID */ + u32 interval; /* Polling interval (msec) */ +}; + +/******************************************************************************* + * + * CSRT - Core System Resource Table + * Version 0 + * + * Conforms to the "Core System Resource Table (CSRT)", November 14, 2011 + * + ******************************************************************************/ + +struct acpi_table_csrt { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +/* Resource Group subtable */ + +struct acpi_csrt_group { + u32 length; + u32 vendor_id; + u32 subvendor_id; + u16 device_id; + u16 subdevice_id; + u16 revision; + u16 reserved; + u32 shared_info_length; + + /* Shared data immediately follows (Length = shared_info_length) */ +}; + +/* Shared Info subtable */ + +struct acpi_csrt_shared_info { + u16 major_version; + u16 minor_version; + u32 mmio_base_low; + u32 mmio_base_high; + u32 gsi_interrupt; + u8 interrupt_polarity; + u8 interrupt_mode; + u8 num_channels; + u8 dma_address_width; + u16 base_request_line; + u16 num_handshake_signals; + u32 max_block_size; + + /* Resource descriptors immediately follow (Length = Group length - shared_info_length) */ +}; + +/* Resource Descriptor subtable */ + +struct acpi_csrt_descriptor { + u32 length; + u16 type; + u16 subtype; + u32 uid; + + /* Resource-specific information immediately follows */ +}; + +/* Resource Types */ + +#define ACPI_CSRT_TYPE_INTERRUPT 0x0001 +#define ACPI_CSRT_TYPE_TIMER 0x0002 +#define ACPI_CSRT_TYPE_DMA 0x0003 + +/* Resource Subtypes */ + +#define ACPI_CSRT_XRUPT_LINE 0x0000 +#define ACPI_CSRT_XRUPT_CONTROLLER 0x0001 +#define ACPI_CSRT_TIMER 0x0000 +#define ACPI_CSRT_DMA_CHANNEL 0x0000 +#define ACPI_CSRT_DMA_CONTROLLER 0x0001 + +/******************************************************************************* + * + * DBG2 - Debug Port Table 2 + * Version 0 (Both main table and subtables) + * + * Conforms to "Microsoft Debug Port Table 2 (DBG2)", December 10, 2015 + * + ******************************************************************************/ + +struct acpi_table_dbg2 { + struct acpi_table_header header; /* Common ACPI table header */ + u32 info_offset; + u32 info_count; +}; + +struct acpi_dbg2_header { + u32 info_offset; + u32 info_count; +}; + +/* Debug Device Information Subtable */ + +struct acpi_dbg2_device { + u8 revision; + u16 length; + u8 register_count; /* Number of base_address registers */ + u16 namepath_length; + u16 namepath_offset; + u16 oem_data_length; + u16 oem_data_offset; + u16 port_type; + u16 port_subtype; + u16 reserved; + u16 base_address_offset; + u16 address_size_offset; + /* + * Data that follows: + * base_address (required) - Each in 12-byte Generic Address Structure format. + * address_size (required) - Array of u32 sizes corresponding to each base_address register. + * Namepath (required) - Null terminated string. Single dot if not supported. + * oem_data (optional) - Length is oem_data_length. + */ +}; + +/* Types for port_type field above */ + +#define ACPI_DBG2_SERIAL_PORT 0x8000 +#define ACPI_DBG2_1394_PORT 0x8001 +#define ACPI_DBG2_USB_PORT 0x8002 +#define ACPI_DBG2_NET_PORT 0x8003 + +/* Subtypes for port_subtype field above */ + +#define ACPI_DBG2_16550_COMPATIBLE 0x0000 +#define ACPI_DBG2_16550_SUBSET 0x0001 +#define ACPI_DBG2_ARM_PL011 0x0003 +#define ACPI_DBG2_ARM_SBSA_32BIT 0x000D +#define ACPI_DBG2_ARM_SBSA_GENERIC 0x000E +#define ACPI_DBG2_ARM_DCC 0x000F +#define ACPI_DBG2_BCM2835 0x0010 + +#define ACPI_DBG2_1394_STANDARD 0x0000 + +#define ACPI_DBG2_USB_XHCI 0x0000 +#define ACPI_DBG2_USB_EHCI 0x0001 + +/******************************************************************************* + * + * DBGP - Debug Port table + * Version 1 + * + * Conforms to the "Debug Port Specification", Version 1.00, 2/9/2000 + * + ******************************************************************************/ + +struct acpi_table_dbgp { + struct acpi_table_header header; /* Common ACPI table header */ + u8 type; /* 0=full 16550, 1=subset of 16550 */ + u8 reserved[3]; + struct acpi_generic_address debug_port; +}; + +/******************************************************************************* + * + * DMAR - DMA Remapping table + * Version 1 + * + * Conforms to "Intel Virtualization Technology for Directed I/O", + * Version 2.3, October 2014 + * + ******************************************************************************/ + +struct acpi_table_dmar { + struct acpi_table_header header; /* Common ACPI table header */ + u8 width; /* Host Address Width */ + u8 flags; + u8 reserved[10]; +}; + +/* Masks for Flags field above */ + +#define ACPI_DMAR_INTR_REMAP (1) +#define ACPI_DMAR_X2APIC_OPT_OUT (1<<1) +#define ACPI_DMAR_X2APIC_MODE (1<<2) + +/* DMAR subtable header */ + +struct acpi_dmar_header { + u16 type; + u16 length; +}; + +/* Values for subtable type in struct acpi_dmar_header */ + +enum acpi_dmar_type { + ACPI_DMAR_TYPE_HARDWARE_UNIT = 0, + ACPI_DMAR_TYPE_RESERVED_MEMORY = 1, + ACPI_DMAR_TYPE_ROOT_ATS = 2, + ACPI_DMAR_TYPE_HARDWARE_AFFINITY = 3, + ACPI_DMAR_TYPE_NAMESPACE = 4, + ACPI_DMAR_TYPE_RESERVED = 5 /* 5 and greater are reserved */ +}; + +/* DMAR Device Scope structure */ + +struct acpi_dmar_device_scope { + u8 entry_type; + u8 length; + u16 reserved; + u8 enumeration_id; + u8 bus; +}; + +/* Values for entry_type in struct acpi_dmar_device_scope - device types */ + +enum acpi_dmar_scope_type { + ACPI_DMAR_SCOPE_TYPE_NOT_USED = 0, + ACPI_DMAR_SCOPE_TYPE_ENDPOINT = 1, + ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2, + ACPI_DMAR_SCOPE_TYPE_IOAPIC = 3, + ACPI_DMAR_SCOPE_TYPE_HPET = 4, + ACPI_DMAR_SCOPE_TYPE_NAMESPACE = 5, + ACPI_DMAR_SCOPE_TYPE_RESERVED = 6 /* 6 and greater are reserved */ +}; + +struct acpi_dmar_pci_path { + u8 device; + u8 function; +}; + +/* + * DMAR Subtables, correspond to Type in struct acpi_dmar_header + */ + +/* 0: Hardware Unit Definition */ + +struct acpi_dmar_hardware_unit { + struct acpi_dmar_header header; + u8 flags; + u8 reserved; + u16 segment; + u64 address; /* Register Base Address */ +}; + +/* Masks for Flags field above */ + +#define ACPI_DMAR_INCLUDE_ALL (1) + +/* 1: Reserved Memory Defininition */ + +struct acpi_dmar_reserved_memory { + struct acpi_dmar_header header; + u16 reserved; + u16 segment; + u64 base_address; /* 4K aligned base address */ + u64 end_address; /* 4K aligned limit address */ +}; + +/* Masks for Flags field above */ + +#define ACPI_DMAR_ALLOW_ALL (1) + +/* 2: Root Port ATS Capability Reporting Structure */ + +struct acpi_dmar_atsr { + struct acpi_dmar_header header; + u8 flags; + u8 reserved; + u16 segment; +}; + +/* Masks for Flags field above */ + +#define ACPI_DMAR_ALL_PORTS (1) + +/* 3: Remapping Hardware Static Affinity Structure */ + +struct acpi_dmar_rhsa { + struct acpi_dmar_header header; + u32 reserved; + u64 base_address; + u32 proximity_domain; +}; + +/* 4: ACPI Namespace Device Declaration Structure */ + +struct acpi_dmar_andd { + struct acpi_dmar_header header; + u8 reserved[3]; + u8 device_number; + char device_name[1]; +}; + +/******************************************************************************* + * + * DRTM - Dynamic Root of Trust for Measurement table + * Conforms to "TCG D-RTM Architecture" June 17 2013, Version 1.0.0 + * Table version 1 + * + ******************************************************************************/ + +struct acpi_table_drtm { + struct acpi_table_header header; /* Common ACPI table header */ + u64 entry_base_address; + u64 entry_length; + u32 entry_address32; + u64 entry_address64; + u64 exit_address; + u64 log_area_address; + u32 log_area_length; + u64 arch_dependent_address; + u32 flags; +}; + +/* Flag Definitions for above */ + +#define ACPI_DRTM_ACCESS_ALLOWED (1) +#define ACPI_DRTM_ENABLE_GAP_CODE (1<<1) +#define ACPI_DRTM_INCOMPLETE_MEASUREMENTS (1<<2) +#define ACPI_DRTM_AUTHORITY_ORDER (1<<3) + +/* 1) Validated Tables List (64-bit addresses) */ + +struct acpi_drtm_vtable_list { + u32 validated_table_count; + u64 validated_tables[1]; +}; + +/* 2) Resources List (of Resource Descriptors) */ + +/* Resource Descriptor */ + +struct acpi_drtm_resource { + u8 size[7]; + u8 type; + u64 address; +}; + +struct acpi_drtm_resource_list { + u32 resource_count; + struct acpi_drtm_resource resources[1]; +}; + +/* 3) Platform-specific Identifiers List */ + +struct acpi_drtm_dps_id { + u32 dps_id_length; + u8 dps_id[16]; +}; + +/******************************************************************************* + * + * ECDT - Embedded Controller Boot Resources Table + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_ecdt { + struct acpi_table_header header; /* Common ACPI table header */ + struct acpi_generic_address control; /* Address of EC command/status register */ + struct acpi_generic_address data; /* Address of EC data register */ + u32 uid; /* Unique ID - must be same as the EC _UID method */ + u8 gpe; /* The GPE for the EC */ + u8 id[1]; /* Full namepath of the EC in the ACPI namespace */ +}; + +/******************************************************************************* + * + * EINJ - Error Injection Table (ACPI 4.0) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_einj { + struct acpi_table_header header; /* Common ACPI table header */ + u32 header_length; + u8 flags; + u8 reserved[3]; + u32 entries; +}; + +/* EINJ Injection Instruction Entries (actions) */ + +struct acpi_einj_entry { + struct acpi_whea_header whea_header; /* Common header for WHEA tables */ +}; + +/* Masks for Flags field above */ + +#define ACPI_EINJ_PRESERVE (1) + +/* Values for Action field above */ + +enum acpi_einj_actions { + ACPI_EINJ_BEGIN_OPERATION = 0, + ACPI_EINJ_GET_TRIGGER_TABLE = 1, + ACPI_EINJ_SET_ERROR_TYPE = 2, + ACPI_EINJ_GET_ERROR_TYPE = 3, + ACPI_EINJ_END_OPERATION = 4, + ACPI_EINJ_EXECUTE_OPERATION = 5, + ACPI_EINJ_CHECK_BUSY_STATUS = 6, + ACPI_EINJ_GET_COMMAND_STATUS = 7, + ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS = 8, + ACPI_EINJ_GET_EXECUTE_TIMINGS = 9, + ACPI_EINJ_ACTION_RESERVED = 10, /* 10 and greater are reserved */ + ACPI_EINJ_TRIGGER_ERROR = 0xFF /* Except for this value */ +}; + +/* Values for Instruction field above */ + +enum acpi_einj_instructions { + ACPI_EINJ_READ_REGISTER = 0, + ACPI_EINJ_READ_REGISTER_VALUE = 1, + ACPI_EINJ_WRITE_REGISTER = 2, + ACPI_EINJ_WRITE_REGISTER_VALUE = 3, + ACPI_EINJ_NOOP = 4, + ACPI_EINJ_FLUSH_CACHELINE = 5, + ACPI_EINJ_INSTRUCTION_RESERVED = 6 /* 6 and greater are reserved */ +}; + +struct acpi_einj_error_type_with_addr { + u32 error_type; + u32 vendor_struct_offset; + u32 flags; + u32 apic_id; + u64 address; + u64 range; + u32 pcie_id; +}; + +struct acpi_einj_vendor { + u32 length; + u32 pcie_id; + u16 vendor_id; + u16 device_id; + u8 revision_id; + u8 reserved[3]; +}; + +/* EINJ Trigger Error Action Table */ + +struct acpi_einj_trigger { + u32 header_size; + u32 revision; + u32 table_size; + u32 entry_count; +}; + +/* Command status return values */ + +enum acpi_einj_command_status { + ACPI_EINJ_SUCCESS = 0, + ACPI_EINJ_FAILURE = 1, + ACPI_EINJ_INVALID_ACCESS = 2, + ACPI_EINJ_STATUS_RESERVED = 3 /* 3 and greater are reserved */ +}; + +/* Error types returned from ACPI_EINJ_GET_ERROR_TYPE (bitfield) */ + +#define ACPI_EINJ_PROCESSOR_CORRECTABLE (1) +#define ACPI_EINJ_PROCESSOR_UNCORRECTABLE (1<<1) +#define ACPI_EINJ_PROCESSOR_FATAL (1<<2) +#define ACPI_EINJ_MEMORY_CORRECTABLE (1<<3) +#define ACPI_EINJ_MEMORY_UNCORRECTABLE (1<<4) +#define ACPI_EINJ_MEMORY_FATAL (1<<5) +#define ACPI_EINJ_PCIX_CORRECTABLE (1<<6) +#define ACPI_EINJ_PCIX_UNCORRECTABLE (1<<7) +#define ACPI_EINJ_PCIX_FATAL (1<<8) +#define ACPI_EINJ_PLATFORM_CORRECTABLE (1<<9) +#define ACPI_EINJ_PLATFORM_UNCORRECTABLE (1<<10) +#define ACPI_EINJ_PLATFORM_FATAL (1<<11) +#define ACPI_EINJ_VENDOR_DEFINED (1<<31) + +/******************************************************************************* + * + * ERST - Error Record Serialization Table (ACPI 4.0) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_erst { + struct acpi_table_header header; /* Common ACPI table header */ + u32 header_length; + u32 reserved; + u32 entries; +}; + +/* ERST Serialization Entries (actions) */ + +struct acpi_erst_entry { + struct acpi_whea_header whea_header; /* Common header for WHEA tables */ +}; + +/* Masks for Flags field above */ + +#define ACPI_ERST_PRESERVE (1) + +/* Values for Action field above */ + +enum acpi_erst_actions { + ACPI_ERST_BEGIN_WRITE = 0, + ACPI_ERST_BEGIN_READ = 1, + ACPI_ERST_BEGIN_CLEAR = 2, + ACPI_ERST_END = 3, + ACPI_ERST_SET_RECORD_OFFSET = 4, + ACPI_ERST_EXECUTE_OPERATION = 5, + ACPI_ERST_CHECK_BUSY_STATUS = 6, + ACPI_ERST_GET_COMMAND_STATUS = 7, + ACPI_ERST_GET_RECORD_ID = 8, + ACPI_ERST_SET_RECORD_ID = 9, + ACPI_ERST_GET_RECORD_COUNT = 10, + ACPI_ERST_BEGIN_DUMMY_WRIITE = 11, + ACPI_ERST_NOT_USED = 12, + ACPI_ERST_GET_ERROR_RANGE = 13, + ACPI_ERST_GET_ERROR_LENGTH = 14, + ACPI_ERST_GET_ERROR_ATTRIBUTES = 15, + ACPI_ERST_EXECUTE_TIMINGS = 16, + ACPI_ERST_ACTION_RESERVED = 17 /* 17 and greater are reserved */ +}; + +/* Values for Instruction field above */ + +enum acpi_erst_instructions { + ACPI_ERST_READ_REGISTER = 0, + ACPI_ERST_READ_REGISTER_VALUE = 1, + ACPI_ERST_WRITE_REGISTER = 2, + ACPI_ERST_WRITE_REGISTER_VALUE = 3, + ACPI_ERST_NOOP = 4, + ACPI_ERST_LOAD_VAR1 = 5, + ACPI_ERST_LOAD_VAR2 = 6, + ACPI_ERST_STORE_VAR1 = 7, + ACPI_ERST_ADD = 8, + ACPI_ERST_SUBTRACT = 9, + ACPI_ERST_ADD_VALUE = 10, + ACPI_ERST_SUBTRACT_VALUE = 11, + ACPI_ERST_STALL = 12, + ACPI_ERST_STALL_WHILE_TRUE = 13, + ACPI_ERST_SKIP_NEXT_IF_TRUE = 14, + ACPI_ERST_GOTO = 15, + ACPI_ERST_SET_SRC_ADDRESS_BASE = 16, + ACPI_ERST_SET_DST_ADDRESS_BASE = 17, + ACPI_ERST_MOVE_DATA = 18, + ACPI_ERST_INSTRUCTION_RESERVED = 19 /* 19 and greater are reserved */ +}; + +/* Command status return values */ + +enum acpi_erst_command_status { + ACPI_ERST_SUCESS = 0, + ACPI_ERST_NO_SPACE = 1, + ACPI_ERST_NOT_AVAILABLE = 2, + ACPI_ERST_FAILURE = 3, + ACPI_ERST_RECORD_EMPTY = 4, + ACPI_ERST_NOT_FOUND = 5, + ACPI_ERST_STATUS_RESERVED = 6 /* 6 and greater are reserved */ +}; + +/* Error Record Serialization Information */ + +struct acpi_erst_info { + u16 signature; /* Should be "ER" */ + u8 data[48]; +}; + +/******************************************************************************* + * + * FPDT - Firmware Performance Data Table (ACPI 5.0) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_fpdt { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +/* FPDT subtable header (Performance Record Structure) */ + +struct acpi_fpdt_header { + u16 type; + u8 length; + u8 revision; +}; + +/* Values for Type field above */ + +enum acpi_fpdt_type { + ACPI_FPDT_TYPE_BOOT = 0, + ACPI_FPDT_TYPE_S3PERF = 1 +}; + +/* + * FPDT subtables + */ + +/* 0: Firmware Basic Boot Performance Record */ + +struct acpi_fpdt_boot_pointer { + struct acpi_fpdt_header header; + u8 reserved[4]; + u64 address; +}; + +/* 1: S3 Performance Table Pointer Record */ + +struct acpi_fpdt_s3pt_pointer { + struct acpi_fpdt_header header; + u8 reserved[4]; + u64 address; +}; + +/* + * S3PT - S3 Performance Table. This table is pointed to by the + * S3 Pointer Record above. + */ +struct acpi_table_s3pt { + u8 signature[4]; /* "S3PT" */ + u32 length; +}; + +/* + * S3PT Subtables (Not part of the actual FPDT) + */ + +/* Values for Type field in S3PT header */ + +enum acpi_s3pt_type { + ACPI_S3PT_TYPE_RESUME = 0, + ACPI_S3PT_TYPE_SUSPEND = 1, + ACPI_FPDT_BOOT_PERFORMANCE = 2 +}; + +struct acpi_s3pt_resume { + struct acpi_fpdt_header header; + u32 resume_count; + u64 full_resume; + u64 average_resume; +}; + +struct acpi_s3pt_suspend { + struct acpi_fpdt_header header; + u64 suspend_start; + u64 suspend_end; +}; + +/* + * FPDT Boot Performance Record (Not part of the actual FPDT) + */ +struct acpi_fpdt_boot { + struct acpi_fpdt_header header; + u8 reserved[4]; + u64 reset_end; + u64 load_start; + u64 startup_start; + u64 exit_services_entry; + u64 exit_services_exit; +}; + +/******************************************************************************* + * + * GTDT - Generic Timer Description Table (ACPI 5.1) + * Version 2 + * + ******************************************************************************/ + +struct acpi_table_gtdt { + struct acpi_table_header header; /* Common ACPI table header */ + u64 counter_block_addresss; + u32 reserved; + u32 secure_el1_interrupt; + u32 secure_el1_flags; + u32 non_secure_el1_interrupt; + u32 non_secure_el1_flags; + u32 virtual_timer_interrupt; + u32 virtual_timer_flags; + u32 non_secure_el2_interrupt; + u32 non_secure_el2_flags; + u64 counter_read_block_address; + u32 platform_timer_count; + u32 platform_timer_offset; +}; + +/* Flag Definitions: Timer Block Physical Timers and Virtual timers */ + +#define ACPI_GTDT_INTERRUPT_MODE (1) +#define ACPI_GTDT_INTERRUPT_POLARITY (1<<1) +#define ACPI_GTDT_ALWAYS_ON (1<<2) + +/* Common GTDT subtable header */ + +struct acpi_gtdt_header { + u8 type; + u16 length; +}; + +/* Values for GTDT subtable type above */ + +enum acpi_gtdt_type { + ACPI_GTDT_TYPE_TIMER_BLOCK = 0, + ACPI_GTDT_TYPE_WATCHDOG = 1, + ACPI_GTDT_TYPE_RESERVED = 2 /* 2 and greater are reserved */ +}; + +/* GTDT Subtables, correspond to Type in struct acpi_gtdt_header */ + +/* 0: Generic Timer Block */ + +struct acpi_gtdt_timer_block { + struct acpi_gtdt_header header; + u8 reserved; + u64 block_address; + u32 timer_count; + u32 timer_offset; +}; + +/* Timer Sub-Structure, one per timer */ + +struct acpi_gtdt_timer_entry { + u8 frame_number; + u8 reserved[3]; + u64 base_address; + u64 el0_base_address; + u32 timer_interrupt; + u32 timer_flags; + u32 virtual_timer_interrupt; + u32 virtual_timer_flags; + u32 common_flags; +}; + +/* Flag Definitions: timer_flags and virtual_timer_flags above */ + +#define ACPI_GTDT_GT_IRQ_MODE (1) +#define ACPI_GTDT_GT_IRQ_POLARITY (1<<1) + +/* Flag Definitions: common_flags above */ + +#define ACPI_GTDT_GT_IS_SECURE_TIMER (1) +#define ACPI_GTDT_GT_ALWAYS_ON (1<<1) + +/* 1: SBSA Generic Watchdog Structure */ + +struct acpi_gtdt_watchdog { + struct acpi_gtdt_header header; + u8 reserved; + u64 refresh_frame_address; + u64 control_frame_address; + u32 timer_interrupt; + u32 timer_flags; +}; + +/* Flag Definitions: timer_flags above */ + +#define ACPI_GTDT_WATCHDOG_IRQ_MODE (1) +#define ACPI_GTDT_WATCHDOG_IRQ_POLARITY (1<<1) +#define ACPI_GTDT_WATCHDOG_SECURE (1<<2) + +/******************************************************************************* + * + * HEST - Hardware Error Source Table (ACPI 4.0) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_hest { + struct acpi_table_header header; /* Common ACPI table header */ + u32 error_source_count; +}; + +/* HEST subtable header */ + +struct acpi_hest_header { + u16 type; + u16 source_id; +}; + +/* Values for Type field above for subtables */ + +enum acpi_hest_types { + ACPI_HEST_TYPE_IA32_CHECK = 0, + ACPI_HEST_TYPE_IA32_CORRECTED_CHECK = 1, + ACPI_HEST_TYPE_IA32_NMI = 2, + ACPI_HEST_TYPE_NOT_USED3 = 3, + ACPI_HEST_TYPE_NOT_USED4 = 4, + ACPI_HEST_TYPE_NOT_USED5 = 5, + ACPI_HEST_TYPE_AER_ROOT_PORT = 6, + ACPI_HEST_TYPE_AER_ENDPOINT = 7, + ACPI_HEST_TYPE_AER_BRIDGE = 8, + ACPI_HEST_TYPE_GENERIC_ERROR = 9, + ACPI_HEST_TYPE_GENERIC_ERROR_V2 = 10, + ACPI_HEST_TYPE_IA32_DEFERRED_CHECK = 11, + ACPI_HEST_TYPE_RESERVED = 12 /* 12 and greater are reserved */ +}; + +/* + * HEST substructures contained in subtables + */ + +/* + * IA32 Error Bank(s) - Follows the struct acpi_hest_ia_machine_check and + * struct acpi_hest_ia_corrected structures. + */ +struct acpi_hest_ia_error_bank { + u8 bank_number; + u8 clear_status_on_init; + u8 status_format; + u8 reserved; + u32 control_register; + u64 control_data; + u32 status_register; + u32 address_register; + u32 misc_register; +}; + +/* Common HEST sub-structure for PCI/AER structures below (6,7,8) */ + +struct acpi_hest_aer_common { + u16 reserved1; + u8 flags; + u8 enabled; + u32 records_to_preallocate; + u32 max_sections_per_record; + u32 bus; /* Bus and Segment numbers */ + u16 device; + u16 function; + u16 device_control; + u16 reserved2; + u32 uncorrectable_mask; + u32 uncorrectable_severity; + u32 correctable_mask; + u32 advanced_capabilities; +}; + +/* Masks for HEST Flags fields */ + +#define ACPI_HEST_FIRMWARE_FIRST (1) +#define ACPI_HEST_GLOBAL (1<<1) +#define ACPI_HEST_GHES_ASSIST (1<<2) + +/* + * Macros to access the bus/segment numbers in Bus field above: + * Bus number is encoded in bits 7:0 + * Segment number is encoded in bits 23:8 + */ +#define ACPI_HEST_BUS(bus) ((bus) & 0xFF) +#define ACPI_HEST_SEGMENT(bus) (((bus) >> 8) & 0xFFFF) + +/* Hardware Error Notification */ + +struct acpi_hest_notify { + u8 type; + u8 length; + u16 config_write_enable; + u32 poll_interval; + u32 vector; + u32 polling_threshold_value; + u32 polling_threshold_window; + u32 error_threshold_value; + u32 error_threshold_window; +}; + +/* Values for Notify Type field above */ + +enum acpi_hest_notify_types { + ACPI_HEST_NOTIFY_POLLED = 0, + ACPI_HEST_NOTIFY_EXTERNAL = 1, + ACPI_HEST_NOTIFY_LOCAL = 2, + ACPI_HEST_NOTIFY_SCI = 3, + ACPI_HEST_NOTIFY_NMI = 4, + ACPI_HEST_NOTIFY_CMCI = 5, /* ACPI 5.0 */ + ACPI_HEST_NOTIFY_MCE = 6, /* ACPI 5.0 */ + ACPI_HEST_NOTIFY_GPIO = 7, /* ACPI 6.0 */ + ACPI_HEST_NOTIFY_SEA = 8, /* ACPI 6.1 */ + ACPI_HEST_NOTIFY_SEI = 9, /* ACPI 6.1 */ + ACPI_HEST_NOTIFY_GSIV = 10, /* ACPI 6.1 */ + ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED = 11, /* ACPI 6.2 */ + ACPI_HEST_NOTIFY_RESERVED = 12 /* 12 and greater are reserved */ +}; + +/* Values for config_write_enable bitfield above */ + +#define ACPI_HEST_TYPE (1) +#define ACPI_HEST_POLL_INTERVAL (1<<1) +#define ACPI_HEST_POLL_THRESHOLD_VALUE (1<<2) +#define ACPI_HEST_POLL_THRESHOLD_WINDOW (1<<3) +#define ACPI_HEST_ERR_THRESHOLD_VALUE (1<<4) +#define ACPI_HEST_ERR_THRESHOLD_WINDOW (1<<5) + +/* + * HEST subtables + */ + +/* 0: IA32 Machine Check Exception */ + +struct acpi_hest_ia_machine_check { + struct acpi_hest_header header; + u16 reserved1; + u8 flags; /* See flags ACPI_HEST_GLOBAL, etc. above */ + u8 enabled; + u32 records_to_preallocate; + u32 max_sections_per_record; + u64 global_capability_data; + u64 global_control_data; + u8 num_hardware_banks; + u8 reserved3[7]; +}; + +/* 1: IA32 Corrected Machine Check */ + +struct acpi_hest_ia_corrected { + struct acpi_hest_header header; + u16 reserved1; + u8 flags; /* See flags ACPI_HEST_GLOBAL, etc. above */ + u8 enabled; + u32 records_to_preallocate; + u32 max_sections_per_record; + struct acpi_hest_notify notify; + u8 num_hardware_banks; + u8 reserved2[3]; +}; + +/* 2: IA32 Non-Maskable Interrupt */ + +struct acpi_hest_ia_nmi { + struct acpi_hest_header header; + u32 reserved; + u32 records_to_preallocate; + u32 max_sections_per_record; + u32 max_raw_data_length; +}; + +/* 3,4,5: Not used */ + +/* 6: PCI Express Root Port AER */ + +struct acpi_hest_aer_root { + struct acpi_hest_header header; + struct acpi_hest_aer_common aer; + u32 root_error_command; +}; + +/* 7: PCI Express AER (AER Endpoint) */ + +struct acpi_hest_aer { + struct acpi_hest_header header; + struct acpi_hest_aer_common aer; +}; + +/* 8: PCI Express/PCI-X Bridge AER */ + +struct acpi_hest_aer_bridge { + struct acpi_hest_header header; + struct acpi_hest_aer_common aer; + u32 uncorrectable_mask2; + u32 uncorrectable_severity2; + u32 advanced_capabilities2; +}; + +/* 9: Generic Hardware Error Source */ + +struct acpi_hest_generic { + struct acpi_hest_header header; + u16 related_source_id; + u8 reserved; + u8 enabled; + u32 records_to_preallocate; + u32 max_sections_per_record; + u32 max_raw_data_length; + struct acpi_generic_address error_status_address; + struct acpi_hest_notify notify; + u32 error_block_length; +}; + +/* 10: Generic Hardware Error Source, version 2 */ + +struct acpi_hest_generic_v2 { + struct acpi_hest_header header; + u16 related_source_id; + u8 reserved; + u8 enabled; + u32 records_to_preallocate; + u32 max_sections_per_record; + u32 max_raw_data_length; + struct acpi_generic_address error_status_address; + struct acpi_hest_notify notify; + u32 error_block_length; + struct acpi_generic_address read_ack_register; + u64 read_ack_preserve; + u64 read_ack_write; +}; + +/* Generic Error Status block */ + +struct acpi_hest_generic_status { + u32 block_status; + u32 raw_data_offset; + u32 raw_data_length; + u32 data_length; + u32 error_severity; +}; + +/* Values for block_status flags above */ + +#define ACPI_HEST_UNCORRECTABLE (1) +#define ACPI_HEST_CORRECTABLE (1<<1) +#define ACPI_HEST_MULTIPLE_UNCORRECTABLE (1<<2) +#define ACPI_HEST_MULTIPLE_CORRECTABLE (1<<3) +#define ACPI_HEST_ERROR_ENTRY_COUNT (0xFF<<4) /* 8 bits, error count */ + +/* Generic Error Data entry */ + +struct acpi_hest_generic_data { + u8 section_type[16]; + u32 error_severity; + u16 revision; + u8 validation_bits; + u8 flags; + u32 error_data_length; + u8 fru_id[16]; + u8 fru_text[20]; +}; + +/* Extension for revision 0x0300 */ + +struct acpi_hest_generic_data_v300 { + u8 section_type[16]; + u32 error_severity; + u16 revision; + u8 validation_bits; + u8 flags; + u32 error_data_length; + u8 fru_id[16]; + u8 fru_text[20]; + u64 time_stamp; +}; + +/* Values for error_severity above */ + +#define ACPI_HEST_GEN_ERROR_RECOVERABLE 0 +#define ACPI_HEST_GEN_ERROR_FATAL 1 +#define ACPI_HEST_GEN_ERROR_CORRECTED 2 +#define ACPI_HEST_GEN_ERROR_NONE 3 + +/* Flags for validation_bits above */ + +#define ACPI_HEST_GEN_VALID_FRU_ID (1) +#define ACPI_HEST_GEN_VALID_FRU_STRING (1<<1) +#define ACPI_HEST_GEN_VALID_TIMESTAMP (1<<2) + +/* 11: IA32 Deferred Machine Check Exception (ACPI 6.2) */ + +struct acpi_hest_ia_deferred_check { + struct acpi_hest_header header; + u16 reserved1; + u8 flags; /* See flags ACPI_HEST_GLOBAL, etc. above */ + u8 enabled; + u32 records_to_preallocate; + u32 max_sections_per_record; + struct acpi_hest_notify notify; + u8 num_hardware_banks; + u8 reserved2[3]; +}; + +/******************************************************************************* + * + * HMAT - Heterogeneous Memory Attributes Table (ACPI 6.2) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_hmat { + struct acpi_table_header header; /* Common ACPI table header */ + u32 reserved; +}; + +/* Values for HMAT structure types */ + +enum acpi_hmat_type { + ACPI_HMAT_TYPE_ADDRESS_RANGE = 0, /* Memory subystem address range */ + ACPI_HMAT_TYPE_LOCALITY = 1, /* System locality latency and bandwidth information */ + ACPI_HMAT_TYPE_CACHE = 2, /* Memory side cache information */ + ACPI_HMAT_TYPE_RESERVED = 3 /* 3 and greater are reserved */ +}; + +struct acpi_hmat_structure { + u16 type; + u16 reserved; + u32 length; +}; + +/* + * HMAT Structures, correspond to Type in struct acpi_hmat_structure + */ + +/* 0: Memory subystem address range */ + +struct acpi_hmat_address_range { + struct acpi_hmat_structure header; + u16 flags; + u16 reserved1; + u32 processor_PD; /* Processor proximity domain */ + u32 memory_PD; /* Memory proximity domain */ + u32 reserved2; + u64 physical_address_base; /* Physical address range base */ + u64 physical_address_length; /* Physical address range length */ +}; + +/* Masks for Flags field above */ + +#define ACPI_HMAT_PROCESSOR_PD_VALID (1) /* 1: processor_PD field is valid */ +#define ACPI_HMAT_MEMORY_PD_VALID (1<<1) /* 1: memory_PD field is valid */ +#define ACPI_HMAT_RESERVATION_HINT (1<<2) /* 1: Reservation hint */ + +/* 1: System locality latency and bandwidth information */ + +struct acpi_hmat_locality { + struct acpi_hmat_structure header; + u8 flags; + u8 data_type; + u16 reserved1; + u32 number_of_initiator_Pds; + u32 number_of_target_Pds; + u32 reserved2; + u64 entry_base_unit; +}; + +/* Masks for Flags field above */ + +#define ACPI_HMAT_MEMORY_HIERARCHY (0x0F) + +/* Values for Memory Hierarchy flag */ + +#define ACPI_HMAT_MEMORY 0 +#define ACPI_HMAT_LAST_LEVEL_CACHE 1 +#define ACPI_HMAT_1ST_LEVEL_CACHE 2 +#define ACPI_HMAT_2ND_LEVEL_CACHE 3 +#define ACPI_HMAT_3RD_LEVEL_CACHE 4 + +/* Values for data_type field above */ + +#define ACPI_HMAT_ACCESS_LATENCY 0 +#define ACPI_HMAT_READ_LATENCY 1 +#define ACPI_HMAT_WRITE_LATENCY 2 +#define ACPI_HMAT_ACCESS_BANDWIDTH 3 +#define ACPI_HMAT_READ_BANDWIDTH 4 +#define ACPI_HMAT_WRITE_BANDWIDTH 5 + +/* 2: Memory side cache information */ + +struct acpi_hmat_cache { + struct acpi_hmat_structure header; + u32 memory_PD; + u32 reserved1; + u64 cache_size; + u32 cache_attributes; + u16 reserved2; + u16 number_of_SMBIOShandles; +}; + +/* Masks for cache_attributes field above */ + +#define ACPI_HMAT_TOTAL_CACHE_LEVEL (0x0000000F) +#define ACPI_HMAT_CACHE_LEVEL (0x000000F0) +#define ACPI_HMAT_CACHE_ASSOCIATIVITY (0x00000F00) +#define ACPI_HMAT_WRITE_POLICY (0x0000F000) +#define ACPI_HMAT_CACHE_LINE_SIZE (0xFFFF0000) + +/* Values for cache associativity flag */ + +#define ACPI_HMAT_CA_NONE (0) +#define ACPI_HMAT_CA_DIRECT_MAPPED (1) +#define ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING (2) + +/* Values for write policy flag */ + +#define ACPI_HMAT_CP_NONE (0) +#define ACPI_HMAT_CP_WB (1) +#define ACPI_HMAT_CP_WT (2) + +/******************************************************************************* + * + * HPET - High Precision Event Timer table + * Version 1 + * + * Conforms to "IA-PC HPET (High Precision Event Timers) Specification", + * Version 1.0a, October 2004 + * + ******************************************************************************/ + +struct acpi_table_hpet { + struct acpi_table_header header; /* Common ACPI table header */ + u32 id; /* Hardware ID of event timer block */ + struct acpi_generic_address address; /* Address of event timer block */ + u8 sequence; /* HPET sequence number */ + u16 minimum_tick; /* Main counter min tick, periodic mode */ + u8 flags; +}; + +/* Masks for Flags field above */ + +#define ACPI_HPET_PAGE_PROTECT_MASK (3) + +/* Values for Page Protect flags */ + +enum acpi_hpet_page_protect { + ACPI_HPET_NO_PAGE_PROTECT = 0, + ACPI_HPET_PAGE_PROTECT4 = 1, + ACPI_HPET_PAGE_PROTECT64 = 2 +}; + +/******************************************************************************* + * + * IBFT - Boot Firmware Table + * Version 1 + * + * Conforms to "iSCSI Boot Firmware Table (iBFT) as Defined in ACPI 3.0b + * Specification", Version 1.01, March 1, 2007 + * + * Note: It appears that this table is not intended to appear in the RSDT/XSDT. + * Therefore, it is not currently supported by the disassembler. + * + ******************************************************************************/ + +struct acpi_table_ibft { + struct acpi_table_header header; /* Common ACPI table header */ + u8 reserved[12]; +}; + +/* IBFT common subtable header */ + +struct acpi_ibft_header { + u8 type; + u8 version; + u16 length; + u8 index; + u8 flags; +}; + +/* Values for Type field above */ + +enum acpi_ibft_type { + ACPI_IBFT_TYPE_NOT_USED = 0, + ACPI_IBFT_TYPE_CONTROL = 1, + ACPI_IBFT_TYPE_INITIATOR = 2, + ACPI_IBFT_TYPE_NIC = 3, + ACPI_IBFT_TYPE_TARGET = 4, + ACPI_IBFT_TYPE_EXTENSIONS = 5, + ACPI_IBFT_TYPE_RESERVED = 6 /* 6 and greater are reserved */ +}; + +/* IBFT subtables */ + +struct acpi_ibft_control { + struct acpi_ibft_header header; + u16 extensions; + u16 initiator_offset; + u16 nic0_offset; + u16 target0_offset; + u16 nic1_offset; + u16 target1_offset; +}; + +struct acpi_ibft_initiator { + struct acpi_ibft_header header; + u8 sns_server[16]; + u8 slp_server[16]; + u8 primary_server[16]; + u8 secondary_server[16]; + u16 name_length; + u16 name_offset; +}; + +struct acpi_ibft_nic { + struct acpi_ibft_header header; + u8 ip_address[16]; + u8 subnet_mask_prefix; + u8 origin; + u8 gateway[16]; + u8 primary_dns[16]; + u8 secondary_dns[16]; + u8 dhcp[16]; + u16 vlan; + u8 mac_address[6]; + u16 pci_address; + u16 name_length; + u16 name_offset; +}; + +struct acpi_ibft_target { + struct acpi_ibft_header header; + u8 target_ip_address[16]; + u16 target_ip_socket; + u8 target_boot_lun[8]; + u8 chap_type; + u8 nic_association; + u16 target_name_length; + u16 target_name_offset; + u16 chap_name_length; + u16 chap_name_offset; + u16 chap_secret_length; + u16 chap_secret_offset; + u16 reverse_chap_name_length; + u16 reverse_chap_name_offset; + u16 reverse_chap_secret_length; + u16 reverse_chap_secret_offset; +}; + +/* Reset to default packing */ + +#pragma pack() + +#endif /* __ACTBL1_H__ */ diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h new file mode 100644 index 000000000..1d4ef0621 --- /dev/null +++ b/include/acpi/actbl2.h @@ -0,0 +1,1727 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: actbl2.h - ACPI Table Definitions (tables not in ACPI spec) + * + * Copyright (C) 2000 - 2018, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACTBL2_H__ +#define __ACTBL2_H__ + +/******************************************************************************* + * + * Additional ACPI Tables (2) + * + * These tables are not consumed directly by the ACPICA subsystem, but are + * included here to support device drivers and the AML disassembler. + * + ******************************************************************************/ + +/* + * Values for description table header signatures for tables defined in this + * file. Useful because they make it more difficult to inadvertently type in + * the wrong signature. + */ +#define ACPI_SIG_IORT "IORT" /* IO Remapping Table */ +#define ACPI_SIG_IVRS "IVRS" /* I/O Virtualization Reporting Structure */ +#define ACPI_SIG_LPIT "LPIT" /* Low Power Idle Table */ +#define ACPI_SIG_MADT "APIC" /* Multiple APIC Description Table */ +#define ACPI_SIG_MCFG "MCFG" /* PCI Memory Mapped Configuration table */ +#define ACPI_SIG_MCHI "MCHI" /* Management Controller Host Interface table */ +#define ACPI_SIG_MPST "MPST" /* Memory Power State Table */ +#define ACPI_SIG_MSCT "MSCT" /* Maximum System Characteristics Table */ +#define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */ +#define ACPI_SIG_MTMR "MTMR" /* MID Timer table */ +#define ACPI_SIG_NFIT "NFIT" /* NVDIMM Firmware Interface Table */ +#define ACPI_SIG_PCCT "PCCT" /* Platform Communications Channel Table */ +#define ACPI_SIG_PDTT "PDTT" /* Platform Debug Trigger Table */ +#define ACPI_SIG_PMTT "PMTT" /* Platform Memory Topology Table */ +#define ACPI_SIG_PPTT "PPTT" /* Processor Properties Topology Table */ +#define ACPI_SIG_RASF "RASF" /* RAS Feature table */ +#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */ +#define ACPI_SIG_SDEI "SDEI" /* Software Delegated Exception Interface Table */ +#define ACPI_SIG_SDEV "SDEV" /* Secure Devices table */ + +/* + * All tables must be byte-packed to match the ACPI specification, since + * the tables are provided by the system BIOS. + */ +#pragma pack(1) + +/* + * Note: C bitfields are not used for this reason: + * + * "Bitfields are great and easy to read, but unfortunately the C language + * does not specify the layout of bitfields in memory, which means they are + * essentially useless for dealing with packed data in on-disk formats or + * binary wire protocols." (Or ACPI tables and buffers.) "If you ask me, + * this decision was a design error in C. Ritchie could have picked an order + * and stuck with it." Norman Ramsey. + * See http://stackoverflow.com/a/1053662/41661 + */ + +/******************************************************************************* + * + * IORT - IO Remapping Table + * + * Conforms to "IO Remapping Table System Software on ARM Platforms", + * Document number: ARM DEN 0049D, March 2018 + * + ******************************************************************************/ + +struct acpi_table_iort { + struct acpi_table_header header; + u32 node_count; + u32 node_offset; + u32 reserved; +}; + +/* + * IORT subtables + */ +struct acpi_iort_node { + u8 type; + u16 length; + u8 revision; + u32 reserved; + u32 mapping_count; + u32 mapping_offset; + char node_data[1]; +}; + +/* Values for subtable Type above */ + +enum acpi_iort_node_type { + ACPI_IORT_NODE_ITS_GROUP = 0x00, + ACPI_IORT_NODE_NAMED_COMPONENT = 0x01, + ACPI_IORT_NODE_PCI_ROOT_COMPLEX = 0x02, + ACPI_IORT_NODE_SMMU = 0x03, + ACPI_IORT_NODE_SMMU_V3 = 0x04, + ACPI_IORT_NODE_PMCG = 0x05 +}; + +struct acpi_iort_id_mapping { + u32 input_base; /* Lowest value in input range */ + u32 id_count; /* Number of IDs */ + u32 output_base; /* Lowest value in output range */ + u32 output_reference; /* A reference to the output node */ + u32 flags; +}; + +/* Masks for Flags field above for IORT subtable */ + +#define ACPI_IORT_ID_SINGLE_MAPPING (1) + +struct acpi_iort_memory_access { + u32 cache_coherency; + u8 hints; + u16 reserved; + u8 memory_flags; +}; + +/* Values for cache_coherency field above */ + +#define ACPI_IORT_NODE_COHERENT 0x00000001 /* The device node is fully coherent */ +#define ACPI_IORT_NODE_NOT_COHERENT 0x00000000 /* The device node is not coherent */ + +/* Masks for Hints field above */ + +#define ACPI_IORT_HT_TRANSIENT (1) +#define ACPI_IORT_HT_WRITE (1<<1) +#define ACPI_IORT_HT_READ (1<<2) +#define ACPI_IORT_HT_OVERRIDE (1<<3) + +/* Masks for memory_flags field above */ + +#define ACPI_IORT_MF_COHERENCY (1) +#define ACPI_IORT_MF_ATTRIBUTES (1<<1) + +/* + * IORT node specific subtables + */ +struct acpi_iort_its_group { + u32 its_count; + u32 identifiers[1]; /* GIC ITS identifier arrary */ +}; + +struct acpi_iort_named_component { + u32 node_flags; + u64 memory_properties; /* Memory access properties */ + u8 memory_address_limit; /* Memory address size limit */ + char device_name[1]; /* Path of namespace object */ +}; + +/* Masks for Flags field above */ + +#define ACPI_IORT_NC_STALL_SUPPORTED (1) +#define ACPI_IORT_NC_PASID_BITS (31<<1) + +struct acpi_iort_root_complex { + u64 memory_properties; /* Memory access properties */ + u32 ats_attribute; + u32 pci_segment_number; + u8 memory_address_limit; /* Memory address size limit */ + u8 reserved[3]; /* Reserved, must be zero */ +}; + +/* Values for ats_attribute field above */ + +#define ACPI_IORT_ATS_SUPPORTED 0x00000001 /* The root complex supports ATS */ +#define ACPI_IORT_ATS_UNSUPPORTED 0x00000000 /* The root complex doesn't support ATS */ + +struct acpi_iort_smmu { + u64 base_address; /* SMMU base address */ + u64 span; /* Length of memory range */ + u32 model; + u32 flags; + u32 global_interrupt_offset; + u32 context_interrupt_count; + u32 context_interrupt_offset; + u32 pmu_interrupt_count; + u32 pmu_interrupt_offset; + u64 interrupts[1]; /* Interrupt array */ +}; + +/* Values for Model field above */ + +#define ACPI_IORT_SMMU_V1 0x00000000 /* Generic SMMUv1 */ +#define ACPI_IORT_SMMU_V2 0x00000001 /* Generic SMMUv2 */ +#define ACPI_IORT_SMMU_CORELINK_MMU400 0x00000002 /* ARM Corelink MMU-400 */ +#define ACPI_IORT_SMMU_CORELINK_MMU500 0x00000003 /* ARM Corelink MMU-500 */ +#define ACPI_IORT_SMMU_CORELINK_MMU401 0x00000004 /* ARM Corelink MMU-401 */ +#define ACPI_IORT_SMMU_CAVIUM_THUNDERX 0x00000005 /* Cavium thunder_x SMMUv2 */ + +/* Masks for Flags field above */ + +#define ACPI_IORT_SMMU_DVM_SUPPORTED (1) +#define ACPI_IORT_SMMU_COHERENT_WALK (1<<1) + +/* Global interrupt format */ + +struct acpi_iort_smmu_gsi { + u32 nsg_irpt; + u32 nsg_irpt_flags; + u32 nsg_cfg_irpt; + u32 nsg_cfg_irpt_flags; +}; + +struct acpi_iort_smmu_v3 { + u64 base_address; /* SMMUv3 base address */ + u32 flags; + u32 reserved; + u64 vatos_address; + u32 model; + u32 event_gsiv; + u32 pri_gsiv; + u32 gerr_gsiv; + u32 sync_gsiv; + u32 pxm; + u32 id_mapping_index; +}; + +/* Values for Model field above */ + +#define ACPI_IORT_SMMU_V3_GENERIC 0x00000000 /* Generic SMMUv3 */ +#define ACPI_IORT_SMMU_V3_HISILICON_HI161X 0x00000001 /* hi_silicon Hi161x SMMUv3 */ +#define ACPI_IORT_SMMU_V3_CAVIUM_CN99XX 0x00000002 /* Cavium CN99xx SMMUv3 */ + +/* Masks for Flags field above */ + +#define ACPI_IORT_SMMU_V3_COHACC_OVERRIDE (1) +#define ACPI_IORT_SMMU_V3_HTTU_OVERRIDE (3<<1) +#define ACPI_IORT_SMMU_V3_PXM_VALID (1<<3) + +struct acpi_iort_pmcg { + u64 page0_base_address; + u32 overflow_gsiv; + u32 node_reference; + u64 page1_base_address; +}; + +/******************************************************************************* + * + * IVRS - I/O Virtualization Reporting Structure + * Version 1 + * + * Conforms to "AMD I/O Virtualization Technology (IOMMU) Specification", + * Revision 1.26, February 2009. + * + ******************************************************************************/ + +struct acpi_table_ivrs { + struct acpi_table_header header; /* Common ACPI table header */ + u32 info; /* Common virtualization info */ + u64 reserved; +}; + +/* Values for Info field above */ + +#define ACPI_IVRS_PHYSICAL_SIZE 0x00007F00 /* 7 bits, physical address size */ +#define ACPI_IVRS_VIRTUAL_SIZE 0x003F8000 /* 7 bits, virtual address size */ +#define ACPI_IVRS_ATS_RESERVED 0x00400000 /* ATS address translation range reserved */ + +/* IVRS subtable header */ + +struct acpi_ivrs_header { + u8 type; /* Subtable type */ + u8 flags; + u16 length; /* Subtable length */ + u16 device_id; /* ID of IOMMU */ +}; + +/* Values for subtable Type above */ + +enum acpi_ivrs_type { + ACPI_IVRS_TYPE_HARDWARE = 0x10, + ACPI_IVRS_TYPE_MEMORY1 = 0x20, + ACPI_IVRS_TYPE_MEMORY2 = 0x21, + ACPI_IVRS_TYPE_MEMORY3 = 0x22 +}; + +/* Masks for Flags field above for IVHD subtable */ + +#define ACPI_IVHD_TT_ENABLE (1) +#define ACPI_IVHD_PASS_PW (1<<1) +#define ACPI_IVHD_RES_PASS_PW (1<<2) +#define ACPI_IVHD_ISOC (1<<3) +#define ACPI_IVHD_IOTLB (1<<4) + +/* Masks for Flags field above for IVMD subtable */ + +#define ACPI_IVMD_UNITY (1) +#define ACPI_IVMD_READ (1<<1) +#define ACPI_IVMD_WRITE (1<<2) +#define ACPI_IVMD_EXCLUSION_RANGE (1<<3) + +/* + * IVRS subtables, correspond to Type in struct acpi_ivrs_header + */ + +/* 0x10: I/O Virtualization Hardware Definition Block (IVHD) */ + +struct acpi_ivrs_hardware { + struct acpi_ivrs_header header; + u16 capability_offset; /* Offset for IOMMU control fields */ + u64 base_address; /* IOMMU control registers */ + u16 pci_segment_group; + u16 info; /* MSI number and unit ID */ + u32 reserved; +}; + +/* Masks for Info field above */ + +#define ACPI_IVHD_MSI_NUMBER_MASK 0x001F /* 5 bits, MSI message number */ +#define ACPI_IVHD_UNIT_ID_MASK 0x1F00 /* 5 bits, unit_ID */ + +/* + * Device Entries for IVHD subtable, appear after struct acpi_ivrs_hardware structure. + * Upper two bits of the Type field are the (encoded) length of the structure. + * Currently, only 4 and 8 byte entries are defined. 16 and 32 byte entries + * are reserved for future use but not defined. + */ +struct acpi_ivrs_de_header { + u8 type; + u16 id; + u8 data_setting; +}; + +/* Length of device entry is in the top two bits of Type field above */ + +#define ACPI_IVHD_ENTRY_LENGTH 0xC0 + +/* Values for device entry Type field above */ + +enum acpi_ivrs_device_entry_type { + /* 4-byte device entries, all use struct acpi_ivrs_device4 */ + + ACPI_IVRS_TYPE_PAD4 = 0, + ACPI_IVRS_TYPE_ALL = 1, + ACPI_IVRS_TYPE_SELECT = 2, + ACPI_IVRS_TYPE_START = 3, + ACPI_IVRS_TYPE_END = 4, + + /* 8-byte device entries */ + + ACPI_IVRS_TYPE_PAD8 = 64, + ACPI_IVRS_TYPE_NOT_USED = 65, + ACPI_IVRS_TYPE_ALIAS_SELECT = 66, /* Uses struct acpi_ivrs_device8a */ + ACPI_IVRS_TYPE_ALIAS_START = 67, /* Uses struct acpi_ivrs_device8a */ + ACPI_IVRS_TYPE_EXT_SELECT = 70, /* Uses struct acpi_ivrs_device8b */ + ACPI_IVRS_TYPE_EXT_START = 71, /* Uses struct acpi_ivrs_device8b */ + ACPI_IVRS_TYPE_SPECIAL = 72 /* Uses struct acpi_ivrs_device8c */ +}; + +/* Values for Data field above */ + +#define ACPI_IVHD_INIT_PASS (1) +#define ACPI_IVHD_EINT_PASS (1<<1) +#define ACPI_IVHD_NMI_PASS (1<<2) +#define ACPI_IVHD_SYSTEM_MGMT (3<<4) +#define ACPI_IVHD_LINT0_PASS (1<<6) +#define ACPI_IVHD_LINT1_PASS (1<<7) + +/* Types 0-4: 4-byte device entry */ + +struct acpi_ivrs_device4 { + struct acpi_ivrs_de_header header; +}; + +/* Types 66-67: 8-byte device entry */ + +struct acpi_ivrs_device8a { + struct acpi_ivrs_de_header header; + u8 reserved1; + u16 used_id; + u8 reserved2; +}; + +/* Types 70-71: 8-byte device entry */ + +struct acpi_ivrs_device8b { + struct acpi_ivrs_de_header header; + u32 extended_data; +}; + +/* Values for extended_data above */ + +#define ACPI_IVHD_ATS_DISABLED (1<<31) + +/* Type 72: 8-byte device entry */ + +struct acpi_ivrs_device8c { + struct acpi_ivrs_de_header header; + u8 handle; + u16 used_id; + u8 variety; +}; + +/* Values for Variety field above */ + +#define ACPI_IVHD_IOAPIC 1 +#define ACPI_IVHD_HPET 2 + +/* 0x20, 0x21, 0x22: I/O Virtualization Memory Definition Block (IVMD) */ + +struct acpi_ivrs_memory { + struct acpi_ivrs_header header; + u16 aux_data; + u64 reserved; + u64 start_address; + u64 memory_length; +}; + +/******************************************************************************* + * + * LPIT - Low Power Idle Table + * + * Conforms to "ACPI Low Power Idle Table (LPIT)" July 2014. + * + ******************************************************************************/ + +struct acpi_table_lpit { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +/* LPIT subtable header */ + +struct acpi_lpit_header { + u32 type; /* Subtable type */ + u32 length; /* Subtable length */ + u16 unique_id; + u16 reserved; + u32 flags; +}; + +/* Values for subtable Type above */ + +enum acpi_lpit_type { + ACPI_LPIT_TYPE_NATIVE_CSTATE = 0x00, + ACPI_LPIT_TYPE_RESERVED = 0x01 /* 1 and above are reserved */ +}; + +/* Masks for Flags field above */ + +#define ACPI_LPIT_STATE_DISABLED (1) +#define ACPI_LPIT_NO_COUNTER (1<<1) + +/* + * LPIT subtables, correspond to Type in struct acpi_lpit_header + */ + +/* 0x00: Native C-state instruction based LPI structure */ + +struct acpi_lpit_native { + struct acpi_lpit_header header; + struct acpi_generic_address entry_trigger; + u32 residency; + u32 latency; + struct acpi_generic_address residency_counter; + u64 counter_frequency; +}; + +/******************************************************************************* + * + * MADT - Multiple APIC Description Table + * Version 3 + * + ******************************************************************************/ + +struct acpi_table_madt { + struct acpi_table_header header; /* Common ACPI table header */ + u32 address; /* Physical address of local APIC */ + u32 flags; +}; + +/* Masks for Flags field above */ + +#define ACPI_MADT_PCAT_COMPAT (1) /* 00: System also has dual 8259s */ + +/* Values for PCATCompat flag */ + +#define ACPI_MADT_DUAL_PIC 1 +#define ACPI_MADT_MULTIPLE_APIC 0 + +/* Values for MADT subtable type in struct acpi_subtable_header */ + +enum acpi_madt_type { + ACPI_MADT_TYPE_LOCAL_APIC = 0, + ACPI_MADT_TYPE_IO_APIC = 1, + ACPI_MADT_TYPE_INTERRUPT_OVERRIDE = 2, + ACPI_MADT_TYPE_NMI_SOURCE = 3, + ACPI_MADT_TYPE_LOCAL_APIC_NMI = 4, + ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE = 5, + ACPI_MADT_TYPE_IO_SAPIC = 6, + ACPI_MADT_TYPE_LOCAL_SAPIC = 7, + ACPI_MADT_TYPE_INTERRUPT_SOURCE = 8, + ACPI_MADT_TYPE_LOCAL_X2APIC = 9, + ACPI_MADT_TYPE_LOCAL_X2APIC_NMI = 10, + ACPI_MADT_TYPE_GENERIC_INTERRUPT = 11, + ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR = 12, + ACPI_MADT_TYPE_GENERIC_MSI_FRAME = 13, + ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR = 14, + ACPI_MADT_TYPE_GENERIC_TRANSLATOR = 15, + ACPI_MADT_TYPE_RESERVED = 16 /* 16 and greater are reserved */ +}; + +/* + * MADT Subtables, correspond to Type in struct acpi_subtable_header + */ + +/* 0: Processor Local APIC */ + +struct acpi_madt_local_apic { + struct acpi_subtable_header header; + u8 processor_id; /* ACPI processor id */ + u8 id; /* Processor's local APIC id */ + u32 lapic_flags; +}; + +/* 1: IO APIC */ + +struct acpi_madt_io_apic { + struct acpi_subtable_header header; + u8 id; /* I/O APIC ID */ + u8 reserved; /* reserved - must be zero */ + u32 address; /* APIC physical address */ + u32 global_irq_base; /* Global system interrupt where INTI lines start */ +}; + +/* 2: Interrupt Override */ + +struct acpi_madt_interrupt_override { + struct acpi_subtable_header header; + u8 bus; /* 0 - ISA */ + u8 source_irq; /* Interrupt source (IRQ) */ + u32 global_irq; /* Global system interrupt */ + u16 inti_flags; +}; + +/* 3: NMI Source */ + +struct acpi_madt_nmi_source { + struct acpi_subtable_header header; + u16 inti_flags; + u32 global_irq; /* Global system interrupt */ +}; + +/* 4: Local APIC NMI */ + +struct acpi_madt_local_apic_nmi { + struct acpi_subtable_header header; + u8 processor_id; /* ACPI processor id */ + u16 inti_flags; + u8 lint; /* LINTn to which NMI is connected */ +}; + +/* 5: Address Override */ + +struct acpi_madt_local_apic_override { + struct acpi_subtable_header header; + u16 reserved; /* Reserved, must be zero */ + u64 address; /* APIC physical address */ +}; + +/* 6: I/O Sapic */ + +struct acpi_madt_io_sapic { + struct acpi_subtable_header header; + u8 id; /* I/O SAPIC ID */ + u8 reserved; /* Reserved, must be zero */ + u32 global_irq_base; /* Global interrupt for SAPIC start */ + u64 address; /* SAPIC physical address */ +}; + +/* 7: Local Sapic */ + +struct acpi_madt_local_sapic { + struct acpi_subtable_header header; + u8 processor_id; /* ACPI processor id */ + u8 id; /* SAPIC ID */ + u8 eid; /* SAPIC EID */ + u8 reserved[3]; /* Reserved, must be zero */ + u32 lapic_flags; + u32 uid; /* Numeric UID - ACPI 3.0 */ + char uid_string[1]; /* String UID - ACPI 3.0 */ +}; + +/* 8: Platform Interrupt Source */ + +struct acpi_madt_interrupt_source { + struct acpi_subtable_header header; + u16 inti_flags; + u8 type; /* 1=PMI, 2=INIT, 3=corrected */ + u8 id; /* Processor ID */ + u8 eid; /* Processor EID */ + u8 io_sapic_vector; /* Vector value for PMI interrupts */ + u32 global_irq; /* Global system interrupt */ + u32 flags; /* Interrupt Source Flags */ +}; + +/* Masks for Flags field above */ + +#define ACPI_MADT_CPEI_OVERRIDE (1) + +/* 9: Processor Local X2APIC (ACPI 4.0) */ + +struct acpi_madt_local_x2apic { + struct acpi_subtable_header header; + u16 reserved; /* reserved - must be zero */ + u32 local_apic_id; /* Processor x2APIC ID */ + u32 lapic_flags; + u32 uid; /* ACPI processor UID */ +}; + +/* 10: Local X2APIC NMI (ACPI 4.0) */ + +struct acpi_madt_local_x2apic_nmi { + struct acpi_subtable_header header; + u16 inti_flags; + u32 uid; /* ACPI processor UID */ + u8 lint; /* LINTn to which NMI is connected */ + u8 reserved[3]; /* reserved - must be zero */ +}; + +/* 11: Generic Interrupt (ACPI 5.0 + ACPI 6.0 changes) */ + +struct acpi_madt_generic_interrupt { + struct acpi_subtable_header header; + u16 reserved; /* reserved - must be zero */ + u32 cpu_interface_number; + u32 uid; + u32 flags; + u32 parking_version; + u32 performance_interrupt; + u64 parked_address; + u64 base_address; + u64 gicv_base_address; + u64 gich_base_address; + u32 vgic_interrupt; + u64 gicr_base_address; + u64 arm_mpidr; + u8 efficiency_class; + u8 reserved2[3]; +}; + +/* Masks for Flags field above */ + +/* ACPI_MADT_ENABLED (1) Processor is usable if set */ +#define ACPI_MADT_PERFORMANCE_IRQ_MODE (1<<1) /* 01: Performance Interrupt Mode */ +#define ACPI_MADT_VGIC_IRQ_MODE (1<<2) /* 02: VGIC Maintenance Interrupt mode */ + +/* 12: Generic Distributor (ACPI 5.0 + ACPI 6.0 changes) */ + +struct acpi_madt_generic_distributor { + struct acpi_subtable_header header; + u16 reserved; /* reserved - must be zero */ + u32 gic_id; + u64 base_address; + u32 global_irq_base; + u8 version; + u8 reserved2[3]; /* reserved - must be zero */ +}; + +/* Values for Version field above */ + +enum acpi_madt_gic_version { + ACPI_MADT_GIC_VERSION_NONE = 0, + ACPI_MADT_GIC_VERSION_V1 = 1, + ACPI_MADT_GIC_VERSION_V2 = 2, + ACPI_MADT_GIC_VERSION_V3 = 3, + ACPI_MADT_GIC_VERSION_V4 = 4, + ACPI_MADT_GIC_VERSION_RESERVED = 5 /* 5 and greater are reserved */ +}; + +/* 13: Generic MSI Frame (ACPI 5.1) */ + +struct acpi_madt_generic_msi_frame { + struct acpi_subtable_header header; + u16 reserved; /* reserved - must be zero */ + u32 msi_frame_id; + u64 base_address; + u32 flags; + u16 spi_count; + u16 spi_base; +}; + +/* Masks for Flags field above */ + +#define ACPI_MADT_OVERRIDE_SPI_VALUES (1) + +/* 14: Generic Redistributor (ACPI 5.1) */ + +struct acpi_madt_generic_redistributor { + struct acpi_subtable_header header; + u16 reserved; /* reserved - must be zero */ + u64 base_address; + u32 length; +}; + +/* 15: Generic Translator (ACPI 6.0) */ + +struct acpi_madt_generic_translator { + struct acpi_subtable_header header; + u16 reserved; /* reserved - must be zero */ + u32 translation_id; + u64 base_address; + u32 reserved2; +}; + +/* + * Common flags fields for MADT subtables + */ + +/* MADT Local APIC flags */ + +#define ACPI_MADT_ENABLED (1) /* 00: Processor is usable if set */ + +/* MADT MPS INTI flags (inti_flags) */ + +#define ACPI_MADT_POLARITY_MASK (3) /* 00-01: Polarity of APIC I/O input signals */ +#define ACPI_MADT_TRIGGER_MASK (3<<2) /* 02-03: Trigger mode of APIC input signals */ + +/* Values for MPS INTI flags */ + +#define ACPI_MADT_POLARITY_CONFORMS 0 +#define ACPI_MADT_POLARITY_ACTIVE_HIGH 1 +#define ACPI_MADT_POLARITY_RESERVED 2 +#define ACPI_MADT_POLARITY_ACTIVE_LOW 3 + +#define ACPI_MADT_TRIGGER_CONFORMS (0) +#define ACPI_MADT_TRIGGER_EDGE (1<<2) +#define ACPI_MADT_TRIGGER_RESERVED (2<<2) +#define ACPI_MADT_TRIGGER_LEVEL (3<<2) + +/******************************************************************************* + * + * MCFG - PCI Memory Mapped Configuration table and subtable + * Version 1 + * + * Conforms to "PCI Firmware Specification", Revision 3.0, June 20, 2005 + * + ******************************************************************************/ + +struct acpi_table_mcfg { + struct acpi_table_header header; /* Common ACPI table header */ + u8 reserved[8]; +}; + +/* Subtable */ + +struct acpi_mcfg_allocation { + u64 address; /* Base address, processor-relative */ + u16 pci_segment; /* PCI segment group number */ + u8 start_bus_number; /* Starting PCI Bus number */ + u8 end_bus_number; /* Final PCI Bus number */ + u32 reserved; +}; + +/******************************************************************************* + * + * MCHI - Management Controller Host Interface Table + * Version 1 + * + * Conforms to "Management Component Transport Protocol (MCTP) Host + * Interface Specification", Revision 1.0.0a, October 13, 2009 + * + ******************************************************************************/ + +struct acpi_table_mchi { + struct acpi_table_header header; /* Common ACPI table header */ + u8 interface_type; + u8 protocol; + u64 protocol_data; + u8 interrupt_type; + u8 gpe; + u8 pci_device_flag; + u32 global_interrupt; + struct acpi_generic_address control_register; + u8 pci_segment; + u8 pci_bus; + u8 pci_device; + u8 pci_function; +}; + +/******************************************************************************* + * + * MPST - Memory Power State Table (ACPI 5.0) + * Version 1 + * + ******************************************************************************/ + +#define ACPI_MPST_CHANNEL_INFO \ + u8 channel_id; \ + u8 reserved1[3]; \ + u16 power_node_count; \ + u16 reserved2; + +/* Main table */ + +struct acpi_table_mpst { + struct acpi_table_header header; /* Common ACPI table header */ + ACPI_MPST_CHANNEL_INFO /* Platform Communication Channel */ +}; + +/* Memory Platform Communication Channel Info */ + +struct acpi_mpst_channel { + ACPI_MPST_CHANNEL_INFO /* Platform Communication Channel */ +}; + +/* Memory Power Node Structure */ + +struct acpi_mpst_power_node { + u8 flags; + u8 reserved1; + u16 node_id; + u32 length; + u64 range_address; + u64 range_length; + u32 num_power_states; + u32 num_physical_components; +}; + +/* Values for Flags field above */ + +#define ACPI_MPST_ENABLED 1 +#define ACPI_MPST_POWER_MANAGED 2 +#define ACPI_MPST_HOT_PLUG_CAPABLE 4 + +/* Memory Power State Structure (follows POWER_NODE above) */ + +struct acpi_mpst_power_state { + u8 power_state; + u8 info_index; +}; + +/* Physical Component ID Structure (follows POWER_STATE above) */ + +struct acpi_mpst_component { + u16 component_id; +}; + +/* Memory Power State Characteristics Structure (follows all POWER_NODEs) */ + +struct acpi_mpst_data_hdr { + u16 characteristics_count; + u16 reserved; +}; + +struct acpi_mpst_power_data { + u8 structure_id; + u8 flags; + u16 reserved1; + u32 average_power; + u32 power_saving; + u64 exit_latency; + u64 reserved2; +}; + +/* Values for Flags field above */ + +#define ACPI_MPST_PRESERVE 1 +#define ACPI_MPST_AUTOENTRY 2 +#define ACPI_MPST_AUTOEXIT 4 + +/* Shared Memory Region (not part of an ACPI table) */ + +struct acpi_mpst_shared { + u32 signature; + u16 pcc_command; + u16 pcc_status; + u32 command_register; + u32 status_register; + u32 power_state_id; + u32 power_node_id; + u64 energy_consumed; + u64 average_power; +}; + +/******************************************************************************* + * + * MSCT - Maximum System Characteristics Table (ACPI 4.0) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_msct { + struct acpi_table_header header; /* Common ACPI table header */ + u32 proximity_offset; /* Location of proximity info struct(s) */ + u32 max_proximity_domains; /* Max number of proximity domains */ + u32 max_clock_domains; /* Max number of clock domains */ + u64 max_address; /* Max physical address in system */ +}; + +/* subtable - Maximum Proximity Domain Information. Version 1 */ + +struct acpi_msct_proximity { + u8 revision; + u8 length; + u32 range_start; /* Start of domain range */ + u32 range_end; /* End of domain range */ + u32 processor_capacity; + u64 memory_capacity; /* In bytes */ +}; + +/******************************************************************************* + * + * MSDM - Microsoft Data Management table + * + * Conforms to "Microsoft Software Licensing Tables (SLIC and MSDM)", + * November 29, 2011. Copyright 2011 Microsoft + * + ******************************************************************************/ + +/* Basic MSDM table is only the common ACPI header */ + +struct acpi_table_msdm { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +/******************************************************************************* + * + * MTMR - MID Timer Table + * Version 1 + * + * Conforms to "Simple Firmware Interface Specification", + * Draft 0.8.2, Oct 19, 2010 + * NOTE: The ACPI MTMR is equivalent to the SFI MTMR table. + * + ******************************************************************************/ + +struct acpi_table_mtmr { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +/* MTMR entry */ + +struct acpi_mtmr_entry { + struct acpi_generic_address physical_address; + u32 frequency; + u32 irq; +}; + +/******************************************************************************* + * + * NFIT - NVDIMM Interface Table (ACPI 6.0+) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_nfit { + struct acpi_table_header header; /* Common ACPI table header */ + u32 reserved; /* Reserved, must be zero */ +}; + +/* Subtable header for NFIT */ + +struct acpi_nfit_header { + u16 type; + u16 length; +}; + +/* Values for subtable type in struct acpi_nfit_header */ + +enum acpi_nfit_type { + ACPI_NFIT_TYPE_SYSTEM_ADDRESS = 0, + ACPI_NFIT_TYPE_MEMORY_MAP = 1, + ACPI_NFIT_TYPE_INTERLEAVE = 2, + ACPI_NFIT_TYPE_SMBIOS = 3, + ACPI_NFIT_TYPE_CONTROL_REGION = 4, + ACPI_NFIT_TYPE_DATA_REGION = 5, + ACPI_NFIT_TYPE_FLUSH_ADDRESS = 6, + ACPI_NFIT_TYPE_CAPABILITIES = 7, + ACPI_NFIT_TYPE_RESERVED = 8 /* 8 and greater are reserved */ +}; + +/* + * NFIT Subtables + */ + +/* 0: System Physical Address Range Structure */ + +struct acpi_nfit_system_address { + struct acpi_nfit_header header; + u16 range_index; + u16 flags; + u32 reserved; /* Reserved, must be zero */ + u32 proximity_domain; + u8 range_guid[16]; + u64 address; + u64 length; + u64 memory_mapping; +}; + +/* Flags */ + +#define ACPI_NFIT_ADD_ONLINE_ONLY (1) /* 00: Add/Online Operation Only */ +#define ACPI_NFIT_PROXIMITY_VALID (1<<1) /* 01: Proximity Domain Valid */ + +/* Range Type GUIDs appear in the include/acuuid.h file */ + +/* 1: Memory Device to System Address Range Map Structure */ + +struct acpi_nfit_memory_map { + struct acpi_nfit_header header; + u32 device_handle; + u16 physical_id; + u16 region_id; + u16 range_index; + u16 region_index; + u64 region_size; + u64 region_offset; + u64 address; + u16 interleave_index; + u16 interleave_ways; + u16 flags; + u16 reserved; /* Reserved, must be zero */ +}; + +/* Flags */ + +#define ACPI_NFIT_MEM_SAVE_FAILED (1) /* 00: Last SAVE to Memory Device failed */ +#define ACPI_NFIT_MEM_RESTORE_FAILED (1<<1) /* 01: Last RESTORE from Memory Device failed */ +#define ACPI_NFIT_MEM_FLUSH_FAILED (1<<2) /* 02: Platform flush failed */ +#define ACPI_NFIT_MEM_NOT_ARMED (1<<3) /* 03: Memory Device is not armed */ +#define ACPI_NFIT_MEM_HEALTH_OBSERVED (1<<4) /* 04: Memory Device observed SMART/health events */ +#define ACPI_NFIT_MEM_HEALTH_ENABLED (1<<5) /* 05: SMART/health events enabled */ +#define ACPI_NFIT_MEM_MAP_FAILED (1<<6) /* 06: Mapping to SPA failed */ + +/* 2: Interleave Structure */ + +struct acpi_nfit_interleave { + struct acpi_nfit_header header; + u16 interleave_index; + u16 reserved; /* Reserved, must be zero */ + u32 line_count; + u32 line_size; + u32 line_offset[1]; /* Variable length */ +}; + +/* 3: SMBIOS Management Information Structure */ + +struct acpi_nfit_smbios { + struct acpi_nfit_header header; + u32 reserved; /* Reserved, must be zero */ + u8 data[1]; /* Variable length */ +}; + +/* 4: NVDIMM Control Region Structure */ + +struct acpi_nfit_control_region { + struct acpi_nfit_header header; + u16 region_index; + u16 vendor_id; + u16 device_id; + u16 revision_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 subsystem_revision_id; + u8 valid_fields; + u8 manufacturing_location; + u16 manufacturing_date; + u8 reserved[2]; /* Reserved, must be zero */ + u32 serial_number; + u16 code; + u16 windows; + u64 window_size; + u64 command_offset; + u64 command_size; + u64 status_offset; + u64 status_size; + u16 flags; + u8 reserved1[6]; /* Reserved, must be zero */ +}; + +/* Flags */ + +#define ACPI_NFIT_CONTROL_BUFFERED (1) /* Block Data Windows implementation is buffered */ + +/* valid_fields bits */ + +#define ACPI_NFIT_CONTROL_MFG_INFO_VALID (1) /* Manufacturing fields are valid */ + +/* 5: NVDIMM Block Data Window Region Structure */ + +struct acpi_nfit_data_region { + struct acpi_nfit_header header; + u16 region_index; + u16 windows; + u64 offset; + u64 size; + u64 capacity; + u64 start_address; +}; + +/* 6: Flush Hint Address Structure */ + +struct acpi_nfit_flush_address { + struct acpi_nfit_header header; + u32 device_handle; + u16 hint_count; + u8 reserved[6]; /* Reserved, must be zero */ + u64 hint_address[1]; /* Variable length */ +}; + +/* 7: Platform Capabilities Structure */ + +struct acpi_nfit_capabilities { + struct acpi_nfit_header header; + u8 highest_capability; + u8 reserved[3]; /* Reserved, must be zero */ + u32 capabilities; + u32 reserved2; +}; + +/* Capabilities Flags */ + +#define ACPI_NFIT_CAPABILITY_CACHE_FLUSH (1) /* 00: Cache Flush to NVDIMM capable */ +#define ACPI_NFIT_CAPABILITY_MEM_FLUSH (1<<1) /* 01: Memory Flush to NVDIMM capable */ +#define ACPI_NFIT_CAPABILITY_MEM_MIRRORING (1<<2) /* 02: Memory Mirroring capable */ + +/* + * NFIT/DVDIMM device handle support - used as the _ADR for each NVDIMM + */ +struct nfit_device_handle { + u32 handle; +}; + +/* Device handle construction and extraction macros */ + +#define ACPI_NFIT_DIMM_NUMBER_MASK 0x0000000F +#define ACPI_NFIT_CHANNEL_NUMBER_MASK 0x000000F0 +#define ACPI_NFIT_MEMORY_ID_MASK 0x00000F00 +#define ACPI_NFIT_SOCKET_ID_MASK 0x0000F000 +#define ACPI_NFIT_NODE_ID_MASK 0x0FFF0000 + +#define ACPI_NFIT_DIMM_NUMBER_OFFSET 0 +#define ACPI_NFIT_CHANNEL_NUMBER_OFFSET 4 +#define ACPI_NFIT_MEMORY_ID_OFFSET 8 +#define ACPI_NFIT_SOCKET_ID_OFFSET 12 +#define ACPI_NFIT_NODE_ID_OFFSET 16 + +/* Macro to construct a NFIT/NVDIMM device handle */ + +#define ACPI_NFIT_BUILD_DEVICE_HANDLE(dimm, channel, memory, socket, node) \ + ((dimm) | \ + ((channel) << ACPI_NFIT_CHANNEL_NUMBER_OFFSET) | \ + ((memory) << ACPI_NFIT_MEMORY_ID_OFFSET) | \ + ((socket) << ACPI_NFIT_SOCKET_ID_OFFSET) | \ + ((node) << ACPI_NFIT_NODE_ID_OFFSET)) + +/* Macros to extract individual fields from a NFIT/NVDIMM device handle */ + +#define ACPI_NFIT_GET_DIMM_NUMBER(handle) \ + ((handle) & ACPI_NFIT_DIMM_NUMBER_MASK) + +#define ACPI_NFIT_GET_CHANNEL_NUMBER(handle) \ + (((handle) & ACPI_NFIT_CHANNEL_NUMBER_MASK) >> ACPI_NFIT_CHANNEL_NUMBER_OFFSET) + +#define ACPI_NFIT_GET_MEMORY_ID(handle) \ + (((handle) & ACPI_NFIT_MEMORY_ID_MASK) >> ACPI_NFIT_MEMORY_ID_OFFSET) + +#define ACPI_NFIT_GET_SOCKET_ID(handle) \ + (((handle) & ACPI_NFIT_SOCKET_ID_MASK) >> ACPI_NFIT_SOCKET_ID_OFFSET) + +#define ACPI_NFIT_GET_NODE_ID(handle) \ + (((handle) & ACPI_NFIT_NODE_ID_MASK) >> ACPI_NFIT_NODE_ID_OFFSET) + +/******************************************************************************* + * + * PCCT - Platform Communications Channel Table (ACPI 5.0) + * Version 2 (ACPI 6.2) + * + ******************************************************************************/ + +struct acpi_table_pcct { + struct acpi_table_header header; /* Common ACPI table header */ + u32 flags; + u64 reserved; +}; + +/* Values for Flags field above */ + +#define ACPI_PCCT_DOORBELL 1 + +/* Values for subtable type in struct acpi_subtable_header */ + +enum acpi_pcct_type { + ACPI_PCCT_TYPE_GENERIC_SUBSPACE = 0, + ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE = 1, + ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2 = 2, /* ACPI 6.1 */ + ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE = 3, /* ACPI 6.2 */ + ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE = 4, /* ACPI 6.2 */ + ACPI_PCCT_TYPE_RESERVED = 5 /* 5 and greater are reserved */ +}; + +/* + * PCCT Subtables, correspond to Type in struct acpi_subtable_header + */ + +/* 0: Generic Communications Subspace */ + +struct acpi_pcct_subspace { + struct acpi_subtable_header header; + u8 reserved[6]; + u64 base_address; + u64 length; + struct acpi_generic_address doorbell_register; + u64 preserve_mask; + u64 write_mask; + u32 latency; + u32 max_access_rate; + u16 min_turnaround_time; +}; + +/* 1: HW-reduced Communications Subspace (ACPI 5.1) */ + +struct acpi_pcct_hw_reduced { + struct acpi_subtable_header header; + u32 platform_interrupt; + u8 flags; + u8 reserved; + u64 base_address; + u64 length; + struct acpi_generic_address doorbell_register; + u64 preserve_mask; + u64 write_mask; + u32 latency; + u32 max_access_rate; + u16 min_turnaround_time; +}; + +/* 2: HW-reduced Communications Subspace Type 2 (ACPI 6.1) */ + +struct acpi_pcct_hw_reduced_type2 { + struct acpi_subtable_header header; + u32 platform_interrupt; + u8 flags; + u8 reserved; + u64 base_address; + u64 length; + struct acpi_generic_address doorbell_register; + u64 preserve_mask; + u64 write_mask; + u32 latency; + u32 max_access_rate; + u16 min_turnaround_time; + struct acpi_generic_address platform_ack_register; + u64 ack_preserve_mask; + u64 ack_write_mask; +}; + +/* 3: Extended PCC Master Subspace Type 3 (ACPI 6.2) */ + +struct acpi_pcct_ext_pcc_master { + struct acpi_subtable_header header; + u32 platform_interrupt; + u8 flags; + u8 reserved1; + u64 base_address; + u32 length; + struct acpi_generic_address doorbell_register; + u64 preserve_mask; + u64 write_mask; + u32 latency; + u32 max_access_rate; + u32 min_turnaround_time; + struct acpi_generic_address platform_ack_register; + u64 ack_preserve_mask; + u64 ack_set_mask; + u64 reserved2; + struct acpi_generic_address cmd_complete_register; + u64 cmd_complete_mask; + struct acpi_generic_address cmd_update_register; + u64 cmd_update_preserve_mask; + u64 cmd_update_set_mask; + struct acpi_generic_address error_status_register; + u64 error_status_mask; +}; + +/* 4: Extended PCC Slave Subspace Type 4 (ACPI 6.2) */ + +struct acpi_pcct_ext_pcc_slave { + struct acpi_subtable_header header; + u32 platform_interrupt; + u8 flags; + u8 reserved1; + u64 base_address; + u32 length; + struct acpi_generic_address doorbell_register; + u64 preserve_mask; + u64 write_mask; + u32 latency; + u32 max_access_rate; + u32 min_turnaround_time; + struct acpi_generic_address platform_ack_register; + u64 ack_preserve_mask; + u64 ack_set_mask; + u64 reserved2; + struct acpi_generic_address cmd_complete_register; + u64 cmd_complete_mask; + struct acpi_generic_address cmd_update_register; + u64 cmd_update_preserve_mask; + u64 cmd_update_set_mask; + struct acpi_generic_address error_status_register; + u64 error_status_mask; +}; + +/* Values for doorbell flags above */ + +#define ACPI_PCCT_INTERRUPT_POLARITY (1) +#define ACPI_PCCT_INTERRUPT_MODE (1<<1) + +/* + * PCC memory structures (not part of the ACPI table) + */ + +/* Shared Memory Region */ + +struct acpi_pcct_shared_memory { + u32 signature; + u16 command; + u16 status; +}; + +/* Extended PCC Subspace Shared Memory Region (ACPI 6.2) */ + +struct acpi_pcct_ext_pcc_shared_memory { + u32 signature; + u32 flags; + u32 length; + u32 command; +}; + +/******************************************************************************* + * + * PDTT - Platform Debug Trigger Table (ACPI 6.2) + * Version 0 + * + ******************************************************************************/ + +struct acpi_table_pdtt { + struct acpi_table_header header; /* Common ACPI table header */ + u8 trigger_count; + u8 reserved[3]; + u32 array_offset; +}; + +/* + * PDTT Communication Channel Identifier Structure. + * The number of these structures is defined by trigger_count above, + * starting at array_offset. + */ +struct acpi_pdtt_channel { + u8 subchannel_id; + u8 flags; +}; + +/* Flags for above */ + +#define ACPI_PDTT_RUNTIME_TRIGGER (1) +#define ACPI_PDTT_WAIT_COMPLETION (1<<1) + +/******************************************************************************* + * + * PMTT - Platform Memory Topology Table (ACPI 5.0) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_pmtt { + struct acpi_table_header header; /* Common ACPI table header */ + u32 reserved; +}; + +/* Common header for PMTT subtables that follow main table */ + +struct acpi_pmtt_header { + u8 type; + u8 reserved1; + u16 length; + u16 flags; + u16 reserved2; +}; + +/* Values for Type field above */ + +#define ACPI_PMTT_TYPE_SOCKET 0 +#define ACPI_PMTT_TYPE_CONTROLLER 1 +#define ACPI_PMTT_TYPE_DIMM 2 +#define ACPI_PMTT_TYPE_RESERVED 3 /* 0x03-0xFF are reserved */ + +/* Values for Flags field above */ + +#define ACPI_PMTT_TOP_LEVEL 0x0001 +#define ACPI_PMTT_PHYSICAL 0x0002 +#define ACPI_PMTT_MEMORY_TYPE 0x000C + +/* + * PMTT subtables, correspond to Type in struct acpi_pmtt_header + */ + +/* 0: Socket Structure */ + +struct acpi_pmtt_socket { + struct acpi_pmtt_header header; + u16 socket_id; + u16 reserved; +}; + +/* 1: Memory Controller subtable */ + +struct acpi_pmtt_controller { + struct acpi_pmtt_header header; + u32 read_latency; + u32 write_latency; + u32 read_bandwidth; + u32 write_bandwidth; + u16 access_width; + u16 alignment; + u16 reserved; + u16 domain_count; +}; + +/* 1a: Proximity Domain substructure */ + +struct acpi_pmtt_domain { + u32 proximity_domain; +}; + +/* 2: Physical Component Identifier (DIMM) */ + +struct acpi_pmtt_physical_component { + struct acpi_pmtt_header header; + u16 component_id; + u16 reserved; + u32 memory_size; + u32 bios_handle; +}; + +/******************************************************************************* + * + * PPTT - Processor Properties Topology Table (ACPI 6.2) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_pptt { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +/* Values for Type field above */ + +enum acpi_pptt_type { + ACPI_PPTT_TYPE_PROCESSOR = 0, + ACPI_PPTT_TYPE_CACHE = 1, + ACPI_PPTT_TYPE_ID = 2, + ACPI_PPTT_TYPE_RESERVED = 3 +}; + +/* 0: Processor Hierarchy Node Structure */ + +struct acpi_pptt_processor { + struct acpi_subtable_header header; + u16 reserved; + u32 flags; + u32 parent; + u32 acpi_processor_id; + u32 number_of_priv_resources; +}; + +/* Flags */ + +#define ACPI_PPTT_PHYSICAL_PACKAGE (1) +#define ACPI_PPTT_ACPI_PROCESSOR_ID_VALID (1<<1) +#define ACPI_PPTT_ACPI_PROCESSOR_IS_THREAD (1<<2) /* ACPI 6.3 */ +#define ACPI_PPTT_ACPI_LEAF_NODE (1<<3) /* ACPI 6.3 */ +#define ACPI_PPTT_ACPI_IDENTICAL (1<<4) /* ACPI 6.3 */ + +/* 1: Cache Type Structure */ + +struct acpi_pptt_cache { + struct acpi_subtable_header header; + u16 reserved; + u32 flags; + u32 next_level_of_cache; + u32 size; + u32 number_of_sets; + u8 associativity; + u8 attributes; + u16 line_size; +}; + +/* Flags */ + +#define ACPI_PPTT_SIZE_PROPERTY_VALID (1) /* Physical property valid */ +#define ACPI_PPTT_NUMBER_OF_SETS_VALID (1<<1) /* Number of sets valid */ +#define ACPI_PPTT_ASSOCIATIVITY_VALID (1<<2) /* Associativity valid */ +#define ACPI_PPTT_ALLOCATION_TYPE_VALID (1<<3) /* Allocation type valid */ +#define ACPI_PPTT_CACHE_TYPE_VALID (1<<4) /* Cache type valid */ +#define ACPI_PPTT_WRITE_POLICY_VALID (1<<5) /* Write policy valid */ +#define ACPI_PPTT_LINE_SIZE_VALID (1<<6) /* Line size valid */ + +/* Masks for Attributes */ + +#define ACPI_PPTT_MASK_ALLOCATION_TYPE (0x03) /* Allocation type */ +#define ACPI_PPTT_MASK_CACHE_TYPE (0x0C) /* Cache type */ +#define ACPI_PPTT_MASK_WRITE_POLICY (0x10) /* Write policy */ + +/* Attributes describing cache */ +#define ACPI_PPTT_CACHE_READ_ALLOCATE (0x0) /* Cache line is allocated on read */ +#define ACPI_PPTT_CACHE_WRITE_ALLOCATE (0x01) /* Cache line is allocated on write */ +#define ACPI_PPTT_CACHE_RW_ALLOCATE (0x02) /* Cache line is allocated on read and write */ +#define ACPI_PPTT_CACHE_RW_ALLOCATE_ALT (0x03) /* Alternate representation of above */ + +#define ACPI_PPTT_CACHE_TYPE_DATA (0x0) /* Data cache */ +#define ACPI_PPTT_CACHE_TYPE_INSTR (1<<2) /* Instruction cache */ +#define ACPI_PPTT_CACHE_TYPE_UNIFIED (2<<2) /* Unified I & D cache */ +#define ACPI_PPTT_CACHE_TYPE_UNIFIED_ALT (3<<2) /* Alternate representation of above */ + +#define ACPI_PPTT_CACHE_POLICY_WB (0x0) /* Cache is write back */ +#define ACPI_PPTT_CACHE_POLICY_WT (1<<4) /* Cache is write through */ + +/* 2: ID Structure */ + +struct acpi_pptt_id { + struct acpi_subtable_header header; + u16 reserved; + u32 vendor_id; + u64 level1_id; + u64 level2_id; + u16 major_rev; + u16 minor_rev; + u16 spin_rev; +}; + +/******************************************************************************* + * + * RASF - RAS Feature Table (ACPI 5.0) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_rasf { + struct acpi_table_header header; /* Common ACPI table header */ + u8 channel_id[12]; +}; + +/* RASF Platform Communication Channel Shared Memory Region */ + +struct acpi_rasf_shared_memory { + u32 signature; + u16 command; + u16 status; + u16 version; + u8 capabilities[16]; + u8 set_capabilities[16]; + u16 num_parameter_blocks; + u32 set_capabilities_status; +}; + +/* RASF Parameter Block Structure Header */ + +struct acpi_rasf_parameter_block { + u16 type; + u16 version; + u16 length; +}; + +/* RASF Parameter Block Structure for PATROL_SCRUB */ + +struct acpi_rasf_patrol_scrub_parameter { + struct acpi_rasf_parameter_block header; + u16 patrol_scrub_command; + u64 requested_address_range[2]; + u64 actual_address_range[2]; + u16 flags; + u8 requested_speed; +}; + +/* Masks for Flags and Speed fields above */ + +#define ACPI_RASF_SCRUBBER_RUNNING 1 +#define ACPI_RASF_SPEED (7<<1) +#define ACPI_RASF_SPEED_SLOW (0<<1) +#define ACPI_RASF_SPEED_MEDIUM (4<<1) +#define ACPI_RASF_SPEED_FAST (7<<1) + +/* Channel Commands */ + +enum acpi_rasf_commands { + ACPI_RASF_EXECUTE_RASF_COMMAND = 1 +}; + +/* Platform RAS Capabilities */ + +enum acpi_rasf_capabiliities { + ACPI_HW_PATROL_SCRUB_SUPPORTED = 0, + ACPI_SW_PATROL_SCRUB_EXPOSED = 1 +}; + +/* Patrol Scrub Commands */ + +enum acpi_rasf_patrol_scrub_commands { + ACPI_RASF_GET_PATROL_PARAMETERS = 1, + ACPI_RASF_START_PATROL_SCRUBBER = 2, + ACPI_RASF_STOP_PATROL_SCRUBBER = 3 +}; + +/* Channel Command flags */ + +#define ACPI_RASF_GENERATE_SCI (1<<15) + +/* Status values */ + +enum acpi_rasf_status { + ACPI_RASF_SUCCESS = 0, + ACPI_RASF_NOT_VALID = 1, + ACPI_RASF_NOT_SUPPORTED = 2, + ACPI_RASF_BUSY = 3, + ACPI_RASF_FAILED = 4, + ACPI_RASF_ABORTED = 5, + ACPI_RASF_INVALID_DATA = 6 +}; + +/* Status flags */ + +#define ACPI_RASF_COMMAND_COMPLETE (1) +#define ACPI_RASF_SCI_DOORBELL (1<<1) +#define ACPI_RASF_ERROR (1<<2) +#define ACPI_RASF_STATUS (0x1F<<3) + +/******************************************************************************* + * + * SBST - Smart Battery Specification Table + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_sbst { + struct acpi_table_header header; /* Common ACPI table header */ + u32 warning_level; + u32 low_level; + u32 critical_level; +}; + +/******************************************************************************* + * + * SDEI - Software Delegated Exception Interface Descriptor Table + * + * Conforms to "Software Delegated Exception Interface (SDEI)" ARM DEN0054A, + * May 8th, 2017. Copyright 2017 ARM Ltd. + * + ******************************************************************************/ + +struct acpi_table_sdei { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +/******************************************************************************* + * + * SDEV - Secure Devices Table (ACPI 6.2) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_sdev { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +struct acpi_sdev_header { + u8 type; + u8 flags; + u16 length; +}; + +/* Values for subtable type above */ + +enum acpi_sdev_type { + ACPI_SDEV_TYPE_NAMESPACE_DEVICE = 0, + ACPI_SDEV_TYPE_PCIE_ENDPOINT_DEVICE = 1, + ACPI_SDEV_TYPE_RESERVED = 2 /* 2 and greater are reserved */ +}; + +/* Values for flags above */ + +#define ACPI_SDEV_HANDOFF_TO_UNSECURE_OS (1) + +/* + * SDEV subtables + */ + +/* 0: Namespace Device Based Secure Device Structure */ + +struct acpi_sdev_namespace { + struct acpi_sdev_header header; + u16 device_id_offset; + u16 device_id_length; + u16 vendor_data_offset; + u16 vendor_data_length; +}; + +/* 1: PCIe Endpoint Device Based Device Structure */ + +struct acpi_sdev_pcie { + struct acpi_sdev_header header; + u16 segment; + u16 start_bus; + u16 path_offset; + u16 path_length; + u16 vendor_data_offset; + u16 vendor_data_length; +}; + +/* 1a: PCIe Endpoint path entry */ + +struct acpi_sdev_pcie_path { + u8 device; + u8 function; +}; + +/* Reset to default packing */ + +#pragma pack() + +#endif /* __ACTBL2_H__ */ diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h new file mode 100644 index 000000000..501f341d1 --- /dev/null +++ b/include/acpi/actbl3.h @@ -0,0 +1,676 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: actbl3.h - ACPI Table Definitions + * + * Copyright (C) 2000 - 2018, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACTBL3_H__ +#define __ACTBL3_H__ + +/******************************************************************************* + * + * Additional ACPI Tables + * + * These tables are not consumed directly by the ACPICA subsystem, but are + * included here to support device drivers and the AML disassembler. + * + ******************************************************************************/ + +/* + * Values for description table header signatures for tables defined in this + * file. Useful because they make it more difficult to inadvertently type in + * the wrong signature. + */ +#define ACPI_SIG_SLIC "SLIC" /* Software Licensing Description Table */ +#define ACPI_SIG_SLIT "SLIT" /* System Locality Distance Information Table */ +#define ACPI_SIG_SPCR "SPCR" /* Serial Port Console Redirection table */ +#define ACPI_SIG_SPMI "SPMI" /* Server Platform Management Interface table */ +#define ACPI_SIG_SRAT "SRAT" /* System Resource Affinity Table */ +#define ACPI_SIG_STAO "STAO" /* Status Override table */ +#define ACPI_SIG_TCPA "TCPA" /* Trusted Computing Platform Alliance table */ +#define ACPI_SIG_TPM2 "TPM2" /* Trusted Platform Module 2.0 H/W interface table */ +#define ACPI_SIG_UEFI "UEFI" /* Uefi Boot Optimization Table */ +#define ACPI_SIG_VRTC "VRTC" /* Virtual Real Time Clock Table */ +#define ACPI_SIG_WAET "WAET" /* Windows ACPI Emulated devices Table */ +#define ACPI_SIG_WDAT "WDAT" /* Watchdog Action Table */ +#define ACPI_SIG_WDDT "WDDT" /* Watchdog Timer Description Table */ +#define ACPI_SIG_WDRT "WDRT" /* Watchdog Resource Table */ +#define ACPI_SIG_WPBT "WPBT" /* Windows Platform Binary Table */ +#define ACPI_SIG_WSMT "WSMT" /* Windows SMM Security Migrations Table */ +#define ACPI_SIG_XENV "XENV" /* Xen Environment table */ +#define ACPI_SIG_XXXX "XXXX" /* Intermediate AML header for ASL/ASL+ converter */ + +/* + * All tables must be byte-packed to match the ACPI specification, since + * the tables are provided by the system BIOS. + */ +#pragma pack(1) + +/* + * Note: C bitfields are not used for this reason: + * + * "Bitfields are great and easy to read, but unfortunately the C language + * does not specify the layout of bitfields in memory, which means they are + * essentially useless for dealing with packed data in on-disk formats or + * binary wire protocols." (Or ACPI tables and buffers.) "If you ask me, + * this decision was a design error in C. Ritchie could have picked an order + * and stuck with it." Norman Ramsey. + * See http://stackoverflow.com/a/1053662/41661 + */ + +/******************************************************************************* + * + * SLIC - Software Licensing Description Table + * + * Conforms to "Microsoft Software Licensing Tables (SLIC and MSDM)", + * November 29, 2011. Copyright 2011 Microsoft + * + ******************************************************************************/ + +/* Basic SLIC table is only the common ACPI header */ + +struct acpi_table_slic { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +/******************************************************************************* + * + * SLIT - System Locality Distance Information Table + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_slit { + struct acpi_table_header header; /* Common ACPI table header */ + u64 locality_count; + u8 entry[1]; /* Real size = localities^2 */ +}; + +/******************************************************************************* + * + * SPCR - Serial Port Console Redirection table + * Version 2 + * + * Conforms to "Serial Port Console Redirection Table", + * Version 1.03, August 10, 2015 + * + ******************************************************************************/ + +struct acpi_table_spcr { + struct acpi_table_header header; /* Common ACPI table header */ + u8 interface_type; /* 0=full 16550, 1=subset of 16550 */ + u8 reserved[3]; + struct acpi_generic_address serial_port; + u8 interrupt_type; + u8 pc_interrupt; + u32 interrupt; + u8 baud_rate; + u8 parity; + u8 stop_bits; + u8 flow_control; + u8 terminal_type; + u8 reserved1; + u16 pci_device_id; + u16 pci_vendor_id; + u8 pci_bus; + u8 pci_device; + u8 pci_function; + u32 pci_flags; + u8 pci_segment; + u32 reserved2; +}; + +/* Masks for pci_flags field above */ + +#define ACPI_SPCR_DO_NOT_DISABLE (1) + +/* Values for Interface Type: See the definition of the DBG2 table */ + +/******************************************************************************* + * + * SPMI - Server Platform Management Interface table + * Version 5 + * + * Conforms to "Intelligent Platform Management Interface Specification + * Second Generation v2.0", Document Revision 1.0, February 12, 2004 with + * June 12, 2009 markup. + * + ******************************************************************************/ + +struct acpi_table_spmi { + struct acpi_table_header header; /* Common ACPI table header */ + u8 interface_type; + u8 reserved; /* Must be 1 */ + u16 spec_revision; /* Version of IPMI */ + u8 interrupt_type; + u8 gpe_number; /* GPE assigned */ + u8 reserved1; + u8 pci_device_flag; + u32 interrupt; + struct acpi_generic_address ipmi_register; + u8 pci_segment; + u8 pci_bus; + u8 pci_device; + u8 pci_function; + u8 reserved2; +}; + +/* Values for interface_type above */ + +enum acpi_spmi_interface_types { + ACPI_SPMI_NOT_USED = 0, + ACPI_SPMI_KEYBOARD = 1, + ACPI_SPMI_SMI = 2, + ACPI_SPMI_BLOCK_TRANSFER = 3, + ACPI_SPMI_SMBUS = 4, + ACPI_SPMI_RESERVED = 5 /* 5 and above are reserved */ +}; + +/******************************************************************************* + * + * SRAT - System Resource Affinity Table + * Version 3 + * + ******************************************************************************/ + +struct acpi_table_srat { + struct acpi_table_header header; /* Common ACPI table header */ + u32 table_revision; /* Must be value '1' */ + u64 reserved; /* Reserved, must be zero */ +}; + +/* Values for subtable type in struct acpi_subtable_header */ + +enum acpi_srat_type { + ACPI_SRAT_TYPE_CPU_AFFINITY = 0, + ACPI_SRAT_TYPE_MEMORY_AFFINITY = 1, + ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY = 2, + ACPI_SRAT_TYPE_GICC_AFFINITY = 3, + ACPI_SRAT_TYPE_GIC_ITS_AFFINITY = 4, /* ACPI 6.2 */ + ACPI_SRAT_TYPE_RESERVED = 5 /* 5 and greater are reserved */ +}; + +/* + * SRAT Subtables, correspond to Type in struct acpi_subtable_header + */ + +/* 0: Processor Local APIC/SAPIC Affinity */ + +struct acpi_srat_cpu_affinity { + struct acpi_subtable_header header; + u8 proximity_domain_lo; + u8 apic_id; + u32 flags; + u8 local_sapic_eid; + u8 proximity_domain_hi[3]; + u32 clock_domain; +}; + +/* Flags */ + +#define ACPI_SRAT_CPU_USE_AFFINITY (1) /* 00: Use affinity structure */ + +/* 1: Memory Affinity */ + +struct acpi_srat_mem_affinity { + struct acpi_subtable_header header; + u32 proximity_domain; + u16 reserved; /* Reserved, must be zero */ + u64 base_address; + u64 length; + u32 reserved1; + u32 flags; + u64 reserved2; /* Reserved, must be zero */ +}; + +/* Flags */ + +#define ACPI_SRAT_MEM_ENABLED (1) /* 00: Use affinity structure */ +#define ACPI_SRAT_MEM_HOT_PLUGGABLE (1<<1) /* 01: Memory region is hot pluggable */ +#define ACPI_SRAT_MEM_NON_VOLATILE (1<<2) /* 02: Memory region is non-volatile */ + +/* 2: Processor Local X2_APIC Affinity (ACPI 4.0) */ + +struct acpi_srat_x2apic_cpu_affinity { + struct acpi_subtable_header header; + u16 reserved; /* Reserved, must be zero */ + u32 proximity_domain; + u32 apic_id; + u32 flags; + u32 clock_domain; + u32 reserved2; +}; + +/* Flags for struct acpi_srat_cpu_affinity and struct acpi_srat_x2apic_cpu_affinity */ + +#define ACPI_SRAT_CPU_ENABLED (1) /* 00: Use affinity structure */ + +/* 3: GICC Affinity (ACPI 5.1) */ + +struct acpi_srat_gicc_affinity { + struct acpi_subtable_header header; + u32 proximity_domain; + u32 acpi_processor_uid; + u32 flags; + u32 clock_domain; +}; + +/* Flags for struct acpi_srat_gicc_affinity */ + +#define ACPI_SRAT_GICC_ENABLED (1) /* 00: Use affinity structure */ + +/* 4: GCC ITS Affinity (ACPI 6.2) */ + +struct acpi_srat_gic_its_affinity { + struct acpi_subtable_header header; + u32 proximity_domain; + u16 reserved; + u32 its_id; +}; + +/******************************************************************************* + * + * STAO - Status Override Table (_STA override) - ACPI 6.0 + * Version 1 + * + * Conforms to "ACPI Specification for Status Override Table" + * 6 January 2015 + * + ******************************************************************************/ + +struct acpi_table_stao { + struct acpi_table_header header; /* Common ACPI table header */ + u8 ignore_uart; +}; + +/******************************************************************************* + * + * TCPA - Trusted Computing Platform Alliance table + * Version 2 + * + * TCG Hardware Interface Table for TPM 1.2 Clients and Servers + * + * Conforms to "TCG ACPI Specification, Family 1.2 and 2.0", + * Version 1.2, Revision 8 + * February 27, 2017 + * + * NOTE: There are two versions of the table with the same signature -- + * the client version and the server version. The common platform_class + * field is used to differentiate the two types of tables. + * + ******************************************************************************/ + +struct acpi_table_tcpa_hdr { + struct acpi_table_header header; /* Common ACPI table header */ + u16 platform_class; +}; + +/* + * Values for platform_class above. + * This is how the client and server subtables are differentiated + */ +#define ACPI_TCPA_CLIENT_TABLE 0 +#define ACPI_TCPA_SERVER_TABLE 1 + +struct acpi_table_tcpa_client { + u32 minimum_log_length; /* Minimum length for the event log area */ + u64 log_address; /* Address of the event log area */ +}; + +struct acpi_table_tcpa_server { + u16 reserved; + u64 minimum_log_length; /* Minimum length for the event log area */ + u64 log_address; /* Address of the event log area */ + u16 spec_revision; + u8 device_flags; + u8 interrupt_flags; + u8 gpe_number; + u8 reserved2[3]; + u32 global_interrupt; + struct acpi_generic_address address; + u32 reserved3; + struct acpi_generic_address config_address; + u8 group; + u8 bus; /* PCI Bus/Segment/Function numbers */ + u8 device; + u8 function; +}; + +/* Values for device_flags above */ + +#define ACPI_TCPA_PCI_DEVICE (1) +#define ACPI_TCPA_BUS_PNP (1<<1) +#define ACPI_TCPA_ADDRESS_VALID (1<<2) + +/* Values for interrupt_flags above */ + +#define ACPI_TCPA_INTERRUPT_MODE (1) +#define ACPI_TCPA_INTERRUPT_POLARITY (1<<1) +#define ACPI_TCPA_SCI_VIA_GPE (1<<2) +#define ACPI_TCPA_GLOBAL_INTERRUPT (1<<3) + +/******************************************************************************* + * + * TPM2 - Trusted Platform Module (TPM) 2.0 Hardware Interface Table + * Version 4 + * + * TCG Hardware Interface Table for TPM 2.0 Clients and Servers + * + * Conforms to "TCG ACPI Specification, Family 1.2 and 2.0", + * Version 1.2, Revision 8 + * February 27, 2017 + * + ******************************************************************************/ + +struct acpi_table_tpm2 { + struct acpi_table_header header; /* Common ACPI table header */ + u16 platform_class; + u16 reserved; + u64 control_address; + u32 start_method; + + /* Platform-specific data follows */ +}; + +/* Values for start_method above */ + +#define ACPI_TPM2_NOT_ALLOWED 0 +#define ACPI_TPM2_RESERVED1 1 +#define ACPI_TPM2_START_METHOD 2 +#define ACPI_TPM2_RESERVED3 3 +#define ACPI_TPM2_RESERVED4 4 +#define ACPI_TPM2_RESERVED5 5 +#define ACPI_TPM2_MEMORY_MAPPED 6 +#define ACPI_TPM2_COMMAND_BUFFER 7 +#define ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD 8 +#define ACPI_TPM2_RESERVED9 9 +#define ACPI_TPM2_RESERVED10 10 +#define ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC 11 /* V1.2 Rev 8 */ +#define ACPI_TPM2_RESERVED 12 + +/* Optional trailer appears after any start_method subtables */ + +struct acpi_tpm2_trailer { + u8 method_parameters[12]; + u32 minimum_log_length; /* Minimum length for the event log area */ + u64 log_address; /* Address of the event log area */ +}; + +/* + * Subtables (start_method-specific) + */ + +/* 11: Start Method for ARM SMC (V1.2 Rev 8) */ + +struct acpi_tpm2_arm_smc { + u32 global_interrupt; + u8 interrupt_flags; + u8 operation_flags; + u16 reserved; + u32 function_id; +}; + +/* Values for interrupt_flags above */ + +#define ACPI_TPM2_INTERRUPT_SUPPORT (1) + +/* Values for operation_flags above */ + +#define ACPI_TPM2_IDLE_SUPPORT (1) + +/******************************************************************************* + * + * UEFI - UEFI Boot optimization Table + * Version 1 + * + * Conforms to "Unified Extensible Firmware Interface Specification", + * Version 2.3, May 8, 2009 + * + ******************************************************************************/ + +struct acpi_table_uefi { + struct acpi_table_header header; /* Common ACPI table header */ + u8 identifier[16]; /* UUID identifier */ + u16 data_offset; /* Offset of remaining data in table */ +}; + +/******************************************************************************* + * + * VRTC - Virtual Real Time Clock Table + * Version 1 + * + * Conforms to "Simple Firmware Interface Specification", + * Draft 0.8.2, Oct 19, 2010 + * NOTE: The ACPI VRTC is equivalent to The SFI MRTC table. + * + ******************************************************************************/ + +struct acpi_table_vrtc { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +/* VRTC entry */ + +struct acpi_vrtc_entry { + struct acpi_generic_address physical_address; + u32 irq; +}; + +/******************************************************************************* + * + * WAET - Windows ACPI Emulated devices Table + * Version 1 + * + * Conforms to "Windows ACPI Emulated Devices Table", version 1.0, April 6, 2009 + * + ******************************************************************************/ + +struct acpi_table_waet { + struct acpi_table_header header; /* Common ACPI table header */ + u32 flags; +}; + +/* Masks for Flags field above */ + +#define ACPI_WAET_RTC_NO_ACK (1) /* RTC requires no int acknowledge */ +#define ACPI_WAET_TIMER_ONE_READ (1<<1) /* PM timer requires only one read */ + +/******************************************************************************* + * + * WDAT - Watchdog Action Table + * Version 1 + * + * Conforms to "Hardware Watchdog Timers Design Specification", + * Copyright 2006 Microsoft Corporation. + * + ******************************************************************************/ + +struct acpi_table_wdat { + struct acpi_table_header header; /* Common ACPI table header */ + u32 header_length; /* Watchdog Header Length */ + u16 pci_segment; /* PCI Segment number */ + u8 pci_bus; /* PCI Bus number */ + u8 pci_device; /* PCI Device number */ + u8 pci_function; /* PCI Function number */ + u8 reserved[3]; + u32 timer_period; /* Period of one timer count (msec) */ + u32 max_count; /* Maximum counter value supported */ + u32 min_count; /* Minimum counter value */ + u8 flags; + u8 reserved2[3]; + u32 entries; /* Number of watchdog entries that follow */ +}; + +/* Masks for Flags field above */ + +#define ACPI_WDAT_ENABLED (1) +#define ACPI_WDAT_STOPPED 0x80 + +/* WDAT Instruction Entries (actions) */ + +struct acpi_wdat_entry { + u8 action; + u8 instruction; + u16 reserved; + struct acpi_generic_address register_region; + u32 value; /* Value used with Read/Write register */ + u32 mask; /* Bitmask required for this register instruction */ +}; + +/* Values for Action field above */ + +enum acpi_wdat_actions { + ACPI_WDAT_RESET = 1, + ACPI_WDAT_GET_CURRENT_COUNTDOWN = 4, + ACPI_WDAT_GET_COUNTDOWN = 5, + ACPI_WDAT_SET_COUNTDOWN = 6, + ACPI_WDAT_GET_RUNNING_STATE = 8, + ACPI_WDAT_SET_RUNNING_STATE = 9, + ACPI_WDAT_GET_STOPPED_STATE = 10, + ACPI_WDAT_SET_STOPPED_STATE = 11, + ACPI_WDAT_GET_REBOOT = 16, + ACPI_WDAT_SET_REBOOT = 17, + ACPI_WDAT_GET_SHUTDOWN = 18, + ACPI_WDAT_SET_SHUTDOWN = 19, + ACPI_WDAT_GET_STATUS = 32, + ACPI_WDAT_SET_STATUS = 33, + ACPI_WDAT_ACTION_RESERVED = 34 /* 34 and greater are reserved */ +}; + +/* Values for Instruction field above */ + +enum acpi_wdat_instructions { + ACPI_WDAT_READ_VALUE = 0, + ACPI_WDAT_READ_COUNTDOWN = 1, + ACPI_WDAT_WRITE_VALUE = 2, + ACPI_WDAT_WRITE_COUNTDOWN = 3, + ACPI_WDAT_INSTRUCTION_RESERVED = 4, /* 4 and greater are reserved */ + ACPI_WDAT_PRESERVE_REGISTER = 0x80 /* Except for this value */ +}; + +/******************************************************************************* + * + * WDDT - Watchdog Descriptor Table + * Version 1 + * + * Conforms to "Using the Intel ICH Family Watchdog Timer (WDT)", + * Version 001, September 2002 + * + ******************************************************************************/ + +struct acpi_table_wddt { + struct acpi_table_header header; /* Common ACPI table header */ + u16 spec_version; + u16 table_version; + u16 pci_vendor_id; + struct acpi_generic_address address; + u16 max_count; /* Maximum counter value supported */ + u16 min_count; /* Minimum counter value supported */ + u16 period; + u16 status; + u16 capability; +}; + +/* Flags for Status field above */ + +#define ACPI_WDDT_AVAILABLE (1) +#define ACPI_WDDT_ACTIVE (1<<1) +#define ACPI_WDDT_TCO_OS_OWNED (1<<2) +#define ACPI_WDDT_USER_RESET (1<<11) +#define ACPI_WDDT_WDT_RESET (1<<12) +#define ACPI_WDDT_POWER_FAIL (1<<13) +#define ACPI_WDDT_UNKNOWN_RESET (1<<14) + +/* Flags for Capability field above */ + +#define ACPI_WDDT_AUTO_RESET (1) +#define ACPI_WDDT_ALERT_SUPPORT (1<<1) + +/******************************************************************************* + * + * WDRT - Watchdog Resource Table + * Version 1 + * + * Conforms to "Watchdog Timer Hardware Requirements for Windows Server 2003", + * Version 1.01, August 28, 2006 + * + ******************************************************************************/ + +struct acpi_table_wdrt { + struct acpi_table_header header; /* Common ACPI table header */ + struct acpi_generic_address control_register; + struct acpi_generic_address count_register; + u16 pci_device_id; + u16 pci_vendor_id; + u8 pci_bus; /* PCI Bus number */ + u8 pci_device; /* PCI Device number */ + u8 pci_function; /* PCI Function number */ + u8 pci_segment; /* PCI Segment number */ + u16 max_count; /* Maximum counter value supported */ + u8 units; +}; + +/******************************************************************************* + * + * WPBT - Windows Platform Environment Table (ACPI 6.0) + * Version 1 + * + * Conforms to "Windows Platform Binary Table (WPBT)" 29 November 2011 + * + ******************************************************************************/ + +struct acpi_table_wpbt { + struct acpi_table_header header; /* Common ACPI table header */ + u32 handoff_size; + u64 handoff_address; + u8 layout; + u8 type; + u16 arguments_length; +}; + +/******************************************************************************* + * + * WSMT - Windows SMM Security Migrations Table + * Version 1 + * + * Conforms to "Windows SMM Security Migrations Table", + * Version 1.0, April 18, 2016 + * + ******************************************************************************/ + +struct acpi_table_wsmt { + struct acpi_table_header header; /* Common ACPI table header */ + u32 protection_flags; +}; + +/* Flags for protection_flags field above */ + +#define ACPI_WSMT_FIXED_COMM_BUFFERS (1) +#define ACPI_WSMT_COMM_BUFFER_NESTED_PTR_PROTECTION (2) +#define ACPI_WSMT_SYSTEM_RESOURCE_PROTECTION (4) + +/******************************************************************************* + * + * XENV - Xen Environment Table (ACPI 6.0) + * Version 1 + * + * Conforms to "ACPI Specification for Xen Environment Table" 4 January 2015 + * + ******************************************************************************/ + +struct acpi_table_xenv { + struct acpi_table_header header; /* Common ACPI table header */ + u64 grant_table_address; + u64 grant_table_size; + u32 event_interrupt; + u8 event_flags; +}; + +/* Reset to default packing */ + +#pragma pack() + +#endif /* __ACTBL3_H__ */ diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h new file mode 100644 index 000000000..9fc1dfc7f --- /dev/null +++ b/include/acpi/actypes.h @@ -0,0 +1,1288 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: actypes.h - Common data types for the entire ACPI subsystem + * + * Copyright (C) 2000 - 2018, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACTYPES_H__ +#define __ACTYPES_H__ + +/* acpisrc:struct_defs -- for acpisrc conversion */ + +/* + * ACPI_MACHINE_WIDTH must be specified in an OS- or compiler-dependent + * header and must be either 32 or 64. 16-bit ACPICA is no longer + * supported, as of 12/2006. + */ +#ifndef ACPI_MACHINE_WIDTH +#error ACPI_MACHINE_WIDTH not defined +#endif + +/* + * Data type ranges + * Note: These macros are designed to be compiler independent as well as + * working around problems that some 32-bit compilers have with 64-bit + * constants. + */ +#define ACPI_UINT8_MAX (u8) (~((u8) 0)) /* 0xFF */ +#define ACPI_UINT16_MAX (u16)(~((u16) 0)) /* 0xFFFF */ +#define ACPI_UINT32_MAX (u32)(~((u32) 0)) /* 0xFFFFFFFF */ +#define ACPI_UINT64_MAX (u64)(~((u64) 0)) /* 0xFFFFFFFFFFFFFFFF */ +#define ACPI_ASCII_MAX 0x7F + +/* + * Architecture-specific ACPICA Subsystem Data Types + * + * The goal of these types is to provide source code portability across + * 16-bit, 32-bit, and 64-bit targets. + * + * 1) The following types are of fixed size for all targets (16/32/64): + * + * u8 Logical boolean + * + * u8 8-bit (1 byte) unsigned value + * u16 16-bit (2 byte) unsigned value + * u32 32-bit (4 byte) unsigned value + * u64 64-bit (8 byte) unsigned value + * + * s16 16-bit (2 byte) signed value + * s32 32-bit (4 byte) signed value + * s64 64-bit (8 byte) signed value + * + * COMPILER_DEPENDENT_UINT64/s64 - These types are defined in the + * compiler-dependent header(s) and were introduced because there is no + * common 64-bit integer type across the various compilation models, as + * shown in the table below. + * + * Datatype LP64 ILP64 LLP64 ILP32 LP32 16bit + * char 8 8 8 8 8 8 + * short 16 16 16 16 16 16 + * _int32 32 + * int 32 64 32 32 16 16 + * long 64 64 32 32 32 32 + * long long 64 64 + * pointer 64 64 64 32 32 32 + * + * Note: ILP64 and LP32 are currently not supported. + * + * + * 2) These types represent the native word size of the target mode of the + * processor, and may be 16-bit, 32-bit, or 64-bit as required. They are + * usually used for memory allocation, efficient loop counters, and array + * indexes. The types are similar to the size_t type in the C library and + * are required because there is no C type that consistently represents the + * native data width. acpi_size is needed because there is no guarantee + * that a kernel-level C library is present. + * + * acpi_size 16/32/64-bit unsigned value + * acpi_native_int 16/32/64-bit signed value + */ + +/******************************************************************************* + * + * Common types for all compilers, all targets + * + ******************************************************************************/ + +#ifndef ACPI_USE_SYSTEM_INTTYPES + +typedef unsigned char u8; +typedef unsigned short u16; +typedef short s16; +typedef COMPILER_DEPENDENT_UINT64 u64; +typedef COMPILER_DEPENDENT_INT64 s64; + +#endif /* ACPI_USE_SYSTEM_INTTYPES */ + +/* + * Value returned by acpi_os_get_thread_id. There is no standard "thread_id" + * across operating systems or even the various UNIX systems. Since ACPICA + * only needs the thread ID as a unique thread identifier, we use a u64 + * as the only common data type - it will accommodate any type of pointer or + * any type of integer. It is up to the host-dependent OSL to cast the + * native thread ID type to a u64 (in acpi_os_get_thread_id). + */ +#define acpi_thread_id u64 + +/******************************************************************************* + * + * Types specific to 64-bit targets + * + ******************************************************************************/ + +#if ACPI_MACHINE_WIDTH == 64 + +#ifndef ACPI_USE_SYSTEM_INTTYPES + +typedef unsigned int u32; +typedef int s32; + +#endif /* ACPI_USE_SYSTEM_INTTYPES */ + +typedef s64 acpi_native_int; + +typedef u64 acpi_size; +typedef u64 acpi_io_address; +typedef u64 acpi_physical_address; + +#define ACPI_MAX_PTR ACPI_UINT64_MAX +#define ACPI_SIZE_MAX ACPI_UINT64_MAX + +#define ACPI_USE_NATIVE_DIVIDE /* Has native 64-bit integer support */ +#define ACPI_USE_NATIVE_MATH64 /* Has native 64-bit integer support */ + +/* + * In the case of the Itanium Processor Family (IPF), the hardware does not + * support misaligned memory transfers. Set the MISALIGNMENT_NOT_SUPPORTED + * flag to indicate that special precautions must be taken to avoid alignment + * faults. (IA64 or ia64 is currently used by existing compilers to indicate + * IPF.) + * + * Note: EM64T and other X86-64 processors support misaligned transfers, + * so there is no need to define this flag. + */ +#if defined (__IA64__) || defined (__ia64__) +#define ACPI_MISALIGNMENT_NOT_SUPPORTED +#endif + +/******************************************************************************* + * + * Types specific to 32-bit targets + * + ******************************************************************************/ + +#elif ACPI_MACHINE_WIDTH == 32 + +#ifndef ACPI_USE_SYSTEM_INTTYPES + +typedef unsigned int u32; +typedef int s32; + +#endif /* ACPI_USE_SYSTEM_INTTYPES */ + +typedef s32 acpi_native_int; + +typedef u32 acpi_size; + +#ifdef ACPI_32BIT_PHYSICAL_ADDRESS + +/* + * OSPMs can define this to shrink the size of the structures for 32-bit + * none PAE environment. ASL compiler may always define this to generate + * 32-bit OSPM compliant tables. + */ +typedef u32 acpi_io_address; +typedef u32 acpi_physical_address; + +#else /* ACPI_32BIT_PHYSICAL_ADDRESS */ + +/* + * It is reported that, after some calculations, the physical addresses can + * wrap over the 32-bit boundary on 32-bit PAE environment. + * https://bugzilla.kernel.org/show_bug.cgi?id=87971 + */ +typedef u64 acpi_io_address; +typedef u64 acpi_physical_address; + +#endif /* ACPI_32BIT_PHYSICAL_ADDRESS */ + +#define ACPI_MAX_PTR ACPI_UINT32_MAX +#define ACPI_SIZE_MAX ACPI_UINT32_MAX + +#else + +/* ACPI_MACHINE_WIDTH must be either 64 or 32 */ + +#error unknown ACPI_MACHINE_WIDTH +#endif + +/******************************************************************************* + * + * OS-dependent types + * + * If the defaults below are not appropriate for the host system, they can + * be defined in the OS-specific header, and this will take precedence. + * + ******************************************************************************/ + +/* Flags for acpi_os_acquire_lock/acpi_os_release_lock */ + +#ifndef acpi_cpu_flags +#define acpi_cpu_flags acpi_size +#endif + +/* Object returned from acpi_os_create_cache */ + +#ifndef acpi_cache_t +#ifdef ACPI_USE_LOCAL_CACHE +#define acpi_cache_t struct acpi_memory_list +#else +#define acpi_cache_t void * +#endif +#endif + +/* + * Synchronization objects - Mutexes, Semaphores, and spin_locks + */ +#if (ACPI_MUTEX_TYPE == ACPI_BINARY_SEMAPHORE) +/* + * These macros are used if the host OS does not support a mutex object. + * Map the OSL Mutex interfaces to binary semaphores. + */ +#define acpi_mutex acpi_semaphore +#define acpi_os_create_mutex(out_handle) acpi_os_create_semaphore (1, 1, out_handle) +#define acpi_os_delete_mutex(handle) (void) acpi_os_delete_semaphore (handle) +#define acpi_os_acquire_mutex(handle,time) acpi_os_wait_semaphore (handle, 1, time) +#define acpi_os_release_mutex(handle) (void) acpi_os_signal_semaphore (handle, 1) +#endif + +/* Configurable types for synchronization objects */ + +#ifndef acpi_spinlock +#define acpi_spinlock void * +#endif + +#ifndef acpi_raw_spinlock +#define acpi_raw_spinlock acpi_spinlock +#endif + +#ifndef acpi_semaphore +#define acpi_semaphore void * +#endif + +#ifndef acpi_mutex +#define acpi_mutex void * +#endif + +/******************************************************************************* + * + * Compiler-dependent types + * + * If the defaults below are not appropriate for the host compiler, they can + * be defined in the compiler-specific header, and this will take precedence. + * + ******************************************************************************/ + +/* Use C99 uintptr_t for pointer casting if available, "void *" otherwise */ + +#ifndef acpi_uintptr_t +#define acpi_uintptr_t void * +#endif + +/* + * ACPI_PRINTF_LIKE is used to tag functions as "printf-like" because + * some compilers can catch printf format string problems + */ +#ifndef ACPI_PRINTF_LIKE +#define ACPI_PRINTF_LIKE(c) +#endif + +/* + * Some compilers complain about unused variables. Sometimes we don't want + * to use all the variables (for example, _acpi_module_name). This allows us + * to tell the compiler in a per-variable manner that a variable + * is unused + */ +#ifndef ACPI_UNUSED_VAR +#define ACPI_UNUSED_VAR +#endif + +/* + * All ACPICA external functions that are available to the rest of the + * kernel are tagged with these macros which can be defined as appropriate + * for the host. + * + * Notes: + * ACPI_EXPORT_SYMBOL_INIT is used for initialization and termination + * interfaces that may need special processing. + * ACPI_EXPORT_SYMBOL is used for all other public external functions. + */ +#ifndef ACPI_EXPORT_SYMBOL_INIT +#define ACPI_EXPORT_SYMBOL_INIT(symbol) +#endif + +#ifndef ACPI_EXPORT_SYMBOL +#define ACPI_EXPORT_SYMBOL(symbol) +#endif + +/* + * Compiler/Clibrary-dependent debug initialization. Used for ACPICA + * utilities only. + */ +#ifndef ACPI_DEBUG_INITIALIZE +#define ACPI_DEBUG_INITIALIZE() +#endif + +/******************************************************************************* + * + * Configuration + * + ******************************************************************************/ + +#ifdef ACPI_NO_MEM_ALLOCATIONS + +#define ACPI_ALLOCATE(a) NULL +#define ACPI_ALLOCATE_ZEROED(a) NULL +#define ACPI_FREE(a) +#define ACPI_MEM_TRACKING(a) + +#else /* ACPI_NO_MEM_ALLOCATIONS */ + +#ifdef ACPI_DBG_TRACK_ALLOCATIONS +/* + * Memory allocation tracking (used by acpi_exec to detect memory leaks) + */ +#define ACPI_MEM_PARAMETERS _COMPONENT, _acpi_module_name, __LINE__ +#define ACPI_ALLOCATE(a) acpi_ut_allocate_and_track ((acpi_size) (a), ACPI_MEM_PARAMETERS) +#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed_and_track ((acpi_size) (a), ACPI_MEM_PARAMETERS) +#define ACPI_FREE(a) acpi_ut_free_and_track (a, ACPI_MEM_PARAMETERS) +#define ACPI_MEM_TRACKING(a) a + +#else +/* + * Normal memory allocation directly via the OS services layer + */ +#define ACPI_ALLOCATE(a) acpi_os_allocate ((acpi_size) (a)) +#define ACPI_ALLOCATE_ZEROED(a) acpi_os_allocate_zeroed ((acpi_size) (a)) +#define ACPI_FREE(a) acpi_os_free (a) +#define ACPI_MEM_TRACKING(a) + +#endif /* ACPI_DBG_TRACK_ALLOCATIONS */ + +#endif /* ACPI_NO_MEM_ALLOCATIONS */ + +/****************************************************************************** + * + * ACPI Specification constants (Do not change unless the specification + * changes) + * + *****************************************************************************/ + +/* Number of distinct FADT-based GPE register blocks (GPE0 and GPE1) */ + +#define ACPI_MAX_GPE_BLOCKS 2 + +/* Default ACPI register widths */ + +#define ACPI_GPE_REGISTER_WIDTH 8 +#define ACPI_PM1_REGISTER_WIDTH 16 +#define ACPI_PM2_REGISTER_WIDTH 8 +#define ACPI_PM_TIMER_WIDTH 32 +#define ACPI_RESET_REGISTER_WIDTH 8 + +/* Names within the namespace are 4 bytes long */ + +#define ACPI_NAME_SIZE 4 +#define ACPI_PATH_SEGMENT_LENGTH 5 /* 4 chars for name + 1 char for separator */ +#define ACPI_PATH_SEPARATOR '.' + +/* Sizes for ACPI table headers */ + +#define ACPI_OEM_ID_SIZE 6 +#define ACPI_OEM_TABLE_ID_SIZE 8 + +/* ACPI/PNP hardware IDs */ + +#define PCI_ROOT_HID_STRING "PNP0A03" +#define PCI_EXPRESS_ROOT_HID_STRING "PNP0A08" + +/* PM Timer ticks per second (HZ) */ + +#define ACPI_PM_TIMER_FREQUENCY 3579545 + +/******************************************************************************* + * + * Independent types + * + ******************************************************************************/ + +/* Logical defines and NULL */ + +#ifdef FALSE +#undef FALSE +#endif +#define FALSE (1 == 0) + +#ifdef TRUE +#undef TRUE +#endif +#define TRUE (1 == 1) + +#ifndef NULL +#define NULL (void *) 0 +#endif + +/* + * Miscellaneous types + */ +typedef u32 acpi_status; /* All ACPI Exceptions */ +typedef u32 acpi_name; /* 4-byte ACPI name */ +typedef char *acpi_string; /* Null terminated ASCII string */ +typedef void *acpi_handle; /* Actually a ptr to a NS Node */ + +/* Time constants for timer calculations */ + +#define ACPI_MSEC_PER_SEC 1000L + +#define ACPI_USEC_PER_MSEC 1000L +#define ACPI_USEC_PER_SEC 1000000L + +#define ACPI_100NSEC_PER_USEC 10L +#define ACPI_100NSEC_PER_MSEC 10000L +#define ACPI_100NSEC_PER_SEC 10000000L + +#define ACPI_NSEC_PER_USEC 1000L +#define ACPI_NSEC_PER_MSEC 1000000L +#define ACPI_NSEC_PER_SEC 1000000000L + +#define ACPI_TIME_AFTER(a, b) ((s64)((b) - (a)) < 0) + +/* Owner IDs are used to track namespace nodes for selective deletion */ + +typedef u8 acpi_owner_id; +#define ACPI_OWNER_ID_MAX 0xFF + +#define ACPI_INTEGER_BIT_SIZE 64 +#define ACPI_MAX_DECIMAL_DIGITS 20 /* 2^64 = 18,446,744,073,709,551,616 */ +#define ACPI_MAX64_DECIMAL_DIGITS 20 +#define ACPI_MAX32_DECIMAL_DIGITS 10 +#define ACPI_MAX16_DECIMAL_DIGITS 5 +#define ACPI_MAX8_DECIMAL_DIGITS 3 + +/* + * Constants with special meanings + */ +#define ACPI_ROOT_OBJECT ((acpi_handle) ACPI_TO_POINTER (ACPI_MAX_PTR)) +#define ACPI_WAIT_FOREVER 0xFFFF /* u16, as per ACPI spec */ +#define ACPI_DO_NOT_WAIT 0 + +/* + * Obsolete: Acpi integer width. In ACPI version 1 (1996), integers are + * 32 bits. In ACPI version 2 (2000) and later, integers are max 64 bits. + * Note that this pertains to the ACPI integer type only, not to other + * integers used in the implementation of the ACPICA subsystem. + * + * 01/2010: This type is obsolete and has been removed from the entire ACPICA + * code base. It remains here for compatibility with device drivers that use + * the type. However, it will be removed in the future. + */ +typedef u64 acpi_integer; +#define ACPI_INTEGER_MAX ACPI_UINT64_MAX + +/******************************************************************************* + * + * Commonly used macros + * + ******************************************************************************/ + +/* Data manipulation */ + +#define ACPI_LOBYTE(integer) ((u8) (u16)(integer)) +#define ACPI_HIBYTE(integer) ((u8) (((u16)(integer)) >> 8)) +#define ACPI_LOWORD(integer) ((u16) (u32)(integer)) +#define ACPI_HIWORD(integer) ((u16)(((u32)(integer)) >> 16)) +#define ACPI_LODWORD(integer64) ((u32) (u64)(integer64)) +#define ACPI_HIDWORD(integer64) ((u32)(((u64)(integer64)) >> 32)) + +#define ACPI_SET_BIT(target,bit) ((target) |= (bit)) +#define ACPI_CLEAR_BIT(target,bit) ((target) &= ~(bit)) +#define ACPI_MIN(a,b) (((a)<(b))?(a):(b)) +#define ACPI_MAX(a,b) (((a)>(b))?(a):(b)) + +/* Size calculation */ + +#define ACPI_ARRAY_LENGTH(x) (sizeof(x) / sizeof((x)[0])) + +/* Pointer manipulation */ + +#define ACPI_CAST_PTR(t, p) ((t *) (acpi_uintptr_t) (p)) +#define ACPI_CAST_INDIRECT_PTR(t, p) ((t **) (acpi_uintptr_t) (p)) +#define ACPI_ADD_PTR(t, a, b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) + (acpi_size)(b))) +#define ACPI_SUB_PTR(t, a, b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) - (acpi_size)(b))) +#define ACPI_PTR_DIFF(a, b) ((acpi_size) (ACPI_CAST_PTR (u8, (a)) - ACPI_CAST_PTR (u8, (b)))) + +/* Pointer/Integer type conversions */ + +#define ACPI_TO_POINTER(i) ACPI_ADD_PTR (void, (void *) 0, (acpi_size) (i)) +#define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p, (void *) 0) +#define ACPI_OFFSET(d, f) ACPI_PTR_DIFF (&(((d *) 0)->f), (void *) 0) +#define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i) +#define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i) + +/* Optimizations for 4-character (32-bit) acpi_name manipulation */ + +#ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED +#define ACPI_COMPARE_NAME(a,b) (*ACPI_CAST_PTR (u32, (a)) == *ACPI_CAST_PTR (u32, (b))) +#define ACPI_MOVE_NAME(dest,src) (*ACPI_CAST_PTR (u32, (dest)) = *ACPI_CAST_PTR (u32, (src))) +#else +#define ACPI_COMPARE_NAME(a,b) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAME_SIZE)) +#define ACPI_MOVE_NAME(dest,src) (strncpy (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAME_SIZE)) +#endif + +/* Support for the special RSDP signature (8 characters) */ + +#define ACPI_VALIDATE_RSDP_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, 8)) +#define ACPI_MAKE_RSDP_SIG(dest) (memcpy (ACPI_CAST_PTR (char, (dest)), ACPI_SIG_RSDP, 8)) + +/* + * Algorithm to obtain access bit or byte width. + * Can be used with access_width of struct acpi_generic_address and access_size of + * struct acpi_resource_generic_register. + */ +#define ACPI_ACCESS_BIT_SHIFT 2 +#define ACPI_ACCESS_BYTE_SHIFT -1 +#define ACPI_ACCESS_BIT_MAX (31 - ACPI_ACCESS_BIT_SHIFT) +#define ACPI_ACCESS_BYTE_MAX (31 - ACPI_ACCESS_BYTE_SHIFT) +#define ACPI_ACCESS_BIT_DEFAULT (8 - ACPI_ACCESS_BIT_SHIFT) +#define ACPI_ACCESS_BYTE_DEFAULT (8 - ACPI_ACCESS_BYTE_SHIFT) +#define ACPI_ACCESS_BIT_WIDTH(size) (1 << ((size) + ACPI_ACCESS_BIT_SHIFT)) +#define ACPI_ACCESS_BYTE_WIDTH(size) (1 << ((size) + ACPI_ACCESS_BYTE_SHIFT)) + +/******************************************************************************* + * + * Miscellaneous constants + * + ******************************************************************************/ + +/* + * Initialization sequence options + */ +#define ACPI_FULL_INITIALIZATION 0x0000 +#define ACPI_NO_FACS_INIT 0x0001 +#define ACPI_NO_ACPI_ENABLE 0x0002 +#define ACPI_NO_HARDWARE_INIT 0x0004 +#define ACPI_NO_EVENT_INIT 0x0008 +#define ACPI_NO_HANDLER_INIT 0x0010 +#define ACPI_NO_OBJECT_INIT 0x0020 +#define ACPI_NO_DEVICE_INIT 0x0040 +#define ACPI_NO_ADDRESS_SPACE_INIT 0x0080 + +/* + * Initialization state + */ +#define ACPI_SUBSYSTEM_INITIALIZE 0x01 +#define ACPI_INITIALIZED_OK 0x02 + +/* + * Power state values + */ +#define ACPI_STATE_UNKNOWN (u8) 0xFF + +#define ACPI_STATE_S0 (u8) 0 +#define ACPI_STATE_S1 (u8) 1 +#define ACPI_STATE_S2 (u8) 2 +#define ACPI_STATE_S3 (u8) 3 +#define ACPI_STATE_S4 (u8) 4 +#define ACPI_STATE_S5 (u8) 5 +#define ACPI_S_STATES_MAX ACPI_STATE_S5 +#define ACPI_S_STATE_COUNT 6 + +#define ACPI_STATE_D0 (u8) 0 +#define ACPI_STATE_D1 (u8) 1 +#define ACPI_STATE_D2 (u8) 2 +#define ACPI_STATE_D3_HOT (u8) 3 +#define ACPI_STATE_D3 (u8) 4 +#define ACPI_STATE_D3_COLD ACPI_STATE_D3 +#define ACPI_D_STATES_MAX ACPI_STATE_D3 +#define ACPI_D_STATE_COUNT 5 + +#define ACPI_STATE_C0 (u8) 0 +#define ACPI_STATE_C1 (u8) 1 +#define ACPI_STATE_C2 (u8) 2 +#define ACPI_STATE_C3 (u8) 3 +#define ACPI_C_STATES_MAX ACPI_STATE_C3 +#define ACPI_C_STATE_COUNT 4 + +/* + * Sleep type invalid value + */ +#define ACPI_SLEEP_TYPE_MAX 0x7 +#define ACPI_SLEEP_TYPE_INVALID 0xFF + +/* + * Standard notify values + */ +#define ACPI_NOTIFY_BUS_CHECK (u8) 0x00 +#define ACPI_NOTIFY_DEVICE_CHECK (u8) 0x01 +#define ACPI_NOTIFY_DEVICE_WAKE (u8) 0x02 +#define ACPI_NOTIFY_EJECT_REQUEST (u8) 0x03 +#define ACPI_NOTIFY_DEVICE_CHECK_LIGHT (u8) 0x04 +#define ACPI_NOTIFY_FREQUENCY_MISMATCH (u8) 0x05 +#define ACPI_NOTIFY_BUS_MODE_MISMATCH (u8) 0x06 +#define ACPI_NOTIFY_POWER_FAULT (u8) 0x07 +#define ACPI_NOTIFY_CAPABILITIES_CHECK (u8) 0x08 +#define ACPI_NOTIFY_DEVICE_PLD_CHECK (u8) 0x09 +#define ACPI_NOTIFY_RESERVED (u8) 0x0A +#define ACPI_NOTIFY_LOCALITY_UPDATE (u8) 0x0B +#define ACPI_NOTIFY_SHUTDOWN_REQUEST (u8) 0x0C +#define ACPI_NOTIFY_AFFINITY_UPDATE (u8) 0x0D +#define ACPI_NOTIFY_MEMORY_UPDATE (u8) 0x0E + +#define ACPI_GENERIC_NOTIFY_MAX 0x0E +#define ACPI_SPECIFIC_NOTIFY_MAX 0x84 + +/* + * Types associated with ACPI names and objects. The first group of + * values (up to ACPI_TYPE_EXTERNAL_MAX) correspond to the definition + * of the ACPI object_type() operator (See the ACPI Spec). Therefore, + * only add to the first group if the spec changes. + * + * NOTE: Types must be kept in sync with the global acpi_ns_properties + * and acpi_ns_type_names arrays. + */ +typedef u32 acpi_object_type; + +#define ACPI_TYPE_ANY 0x00 +#define ACPI_TYPE_INTEGER 0x01 /* Byte/Word/Dword/Zero/One/Ones */ +#define ACPI_TYPE_STRING 0x02 +#define ACPI_TYPE_BUFFER 0x03 +#define ACPI_TYPE_PACKAGE 0x04 /* byte_const, multiple data_term/Constant/super_name */ +#define ACPI_TYPE_FIELD_UNIT 0x05 +#define ACPI_TYPE_DEVICE 0x06 /* Name, multiple Node */ +#define ACPI_TYPE_EVENT 0x07 +#define ACPI_TYPE_METHOD 0x08 /* Name, byte_const, multiple Code */ +#define ACPI_TYPE_MUTEX 0x09 +#define ACPI_TYPE_REGION 0x0A +#define ACPI_TYPE_POWER 0x0B /* Name,byte_const,word_const,multi Node */ +#define ACPI_TYPE_PROCESSOR 0x0C /* Name,byte_const,Dword_const,byte_const,multi nm_o */ +#define ACPI_TYPE_THERMAL 0x0D /* Name, multiple Node */ +#define ACPI_TYPE_BUFFER_FIELD 0x0E +#define ACPI_TYPE_DDB_HANDLE 0x0F +#define ACPI_TYPE_DEBUG_OBJECT 0x10 + +#define ACPI_TYPE_EXTERNAL_MAX 0x10 +#define ACPI_NUM_TYPES (ACPI_TYPE_EXTERNAL_MAX + 1) + +/* + * These are object types that do not map directly to the ACPI + * object_type() operator. They are used for various internal purposes + * only. If new predefined ACPI_TYPEs are added (via the ACPI + * specification), these internal types must move upwards. (There + * is code that depends on these values being contiguous with the + * external types above.) + */ +#define ACPI_TYPE_LOCAL_REGION_FIELD 0x11 +#define ACPI_TYPE_LOCAL_BANK_FIELD 0x12 +#define ACPI_TYPE_LOCAL_INDEX_FIELD 0x13 +#define ACPI_TYPE_LOCAL_REFERENCE 0x14 /* Arg#, Local#, Name, Debug, ref_of, Index */ +#define ACPI_TYPE_LOCAL_ALIAS 0x15 +#define ACPI_TYPE_LOCAL_METHOD_ALIAS 0x16 +#define ACPI_TYPE_LOCAL_NOTIFY 0x17 +#define ACPI_TYPE_LOCAL_ADDRESS_HANDLER 0x18 +#define ACPI_TYPE_LOCAL_RESOURCE 0x19 +#define ACPI_TYPE_LOCAL_RESOURCE_FIELD 0x1A +#define ACPI_TYPE_LOCAL_SCOPE 0x1B /* 1 Name, multiple object_list Nodes */ + +#define ACPI_TYPE_NS_NODE_MAX 0x1B /* Last typecode used within a NS Node */ +#define ACPI_TOTAL_TYPES (ACPI_TYPE_NS_NODE_MAX + 1) + +/* + * These are special object types that never appear in + * a Namespace node, only in an object of union acpi_operand_object + */ +#define ACPI_TYPE_LOCAL_EXTRA 0x1C +#define ACPI_TYPE_LOCAL_DATA 0x1D + +#define ACPI_TYPE_LOCAL_MAX 0x1D + +/* All types above here are invalid */ + +#define ACPI_TYPE_INVALID 0x1E +#define ACPI_TYPE_NOT_FOUND 0xFF + +#define ACPI_NUM_NS_TYPES (ACPI_TYPE_INVALID + 1) + +/* + * All I/O + */ +#define ACPI_READ 0 +#define ACPI_WRITE 1 +#define ACPI_IO_MASK 1 + +/* + * Event Types: Fixed & General Purpose + */ +typedef u32 acpi_event_type; + +/* + * Fixed events + */ +#define ACPI_EVENT_PMTIMER 0 +#define ACPI_EVENT_GLOBAL 1 +#define ACPI_EVENT_POWER_BUTTON 2 +#define ACPI_EVENT_SLEEP_BUTTON 3 +#define ACPI_EVENT_RTC 4 +#define ACPI_EVENT_MAX 4 +#define ACPI_NUM_FIXED_EVENTS ACPI_EVENT_MAX + 1 + +/* + * Event status - Per event + * ------------- + * The encoding of acpi_event_status is illustrated below. + * Note that a set bit (1) indicates the property is TRUE + * (e.g. if bit 0 is set then the event is enabled). + * +-------------+-+-+-+-+-+-+ + * | Bits 31:6 |5|4|3|2|1|0| + * +-------------+-+-+-+-+-+-+ + * | | | | | | | + * | | | | | | +- Enabled? + * | | | | | +--- Enabled for wake? + * | | | | +----- Status bit set? + * | | | +------- Enable bit set? + * | | +--------- Has a handler? + * | +----------- Masked? + * +----------------- + */ +typedef u32 acpi_event_status; + +#define ACPI_EVENT_FLAG_DISABLED (acpi_event_status) 0x00 +#define ACPI_EVENT_FLAG_ENABLED (acpi_event_status) 0x01 +#define ACPI_EVENT_FLAG_WAKE_ENABLED (acpi_event_status) 0x02 +#define ACPI_EVENT_FLAG_STATUS_SET (acpi_event_status) 0x04 +#define ACPI_EVENT_FLAG_ENABLE_SET (acpi_event_status) 0x08 +#define ACPI_EVENT_FLAG_HAS_HANDLER (acpi_event_status) 0x10 +#define ACPI_EVENT_FLAG_MASKED (acpi_event_status) 0x20 +#define ACPI_EVENT_FLAG_SET ACPI_EVENT_FLAG_STATUS_SET + +/* Actions for acpi_set_gpe, acpi_gpe_wakeup, acpi_hw_low_set_gpe */ + +#define ACPI_GPE_ENABLE 0 +#define ACPI_GPE_DISABLE 1 +#define ACPI_GPE_CONDITIONAL_ENABLE 2 + +/* + * GPE info flags - Per GPE + * +---+-+-+-+---+ + * |7:6|5|4|3|2:0| + * +---+-+-+-+---+ + * | | | | | + * | | | | +-- Type of dispatch:to method, handler, notify, or none + * | | | +----- Interrupt type: edge or level triggered + * | | +------- Is a Wake GPE + * | +--------- Has been enabled automatically at init time + * +------------ + */ +#define ACPI_GPE_DISPATCH_NONE (u8) 0x00 +#define ACPI_GPE_DISPATCH_METHOD (u8) 0x01 +#define ACPI_GPE_DISPATCH_HANDLER (u8) 0x02 +#define ACPI_GPE_DISPATCH_NOTIFY (u8) 0x03 +#define ACPI_GPE_DISPATCH_RAW_HANDLER (u8) 0x04 +#define ACPI_GPE_DISPATCH_MASK (u8) 0x07 +#define ACPI_GPE_DISPATCH_TYPE(flags) ((u8) ((flags) & ACPI_GPE_DISPATCH_MASK)) + +#define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x08 +#define ACPI_GPE_EDGE_TRIGGERED (u8) 0x00 +#define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x08 + +#define ACPI_GPE_CAN_WAKE (u8) 0x10 +#define ACPI_GPE_AUTO_ENABLED (u8) 0x20 +#define ACPI_GPE_INITIALIZED (u8) 0x40 + +/* + * Flags for GPE and Lock interfaces + */ +#define ACPI_NOT_ISR 0x1 +#define ACPI_ISR 0x0 + +/* Notify types */ + +#define ACPI_SYSTEM_NOTIFY 0x1 +#define ACPI_DEVICE_NOTIFY 0x2 +#define ACPI_ALL_NOTIFY (ACPI_SYSTEM_NOTIFY | ACPI_DEVICE_NOTIFY) +#define ACPI_MAX_NOTIFY_HANDLER_TYPE 0x3 +#define ACPI_NUM_NOTIFY_TYPES 2 + +#define ACPI_MAX_SYS_NOTIFY 0x7F +#define ACPI_MAX_DEVICE_SPECIFIC_NOTIFY 0xBF + +#define ACPI_SYSTEM_HANDLER_LIST 0 /* Used as index, must be SYSTEM_NOTIFY -1 */ +#define ACPI_DEVICE_HANDLER_LIST 1 /* Used as index, must be DEVICE_NOTIFY -1 */ + +/* Address Space (Operation Region) Types */ + +typedef u8 acpi_adr_space_type; + +#define ACPI_ADR_SPACE_SYSTEM_MEMORY (acpi_adr_space_type) 0 +#define ACPI_ADR_SPACE_SYSTEM_IO (acpi_adr_space_type) 1 +#define ACPI_ADR_SPACE_PCI_CONFIG (acpi_adr_space_type) 2 +#define ACPI_ADR_SPACE_EC (acpi_adr_space_type) 3 +#define ACPI_ADR_SPACE_SMBUS (acpi_adr_space_type) 4 +#define ACPI_ADR_SPACE_CMOS (acpi_adr_space_type) 5 +#define ACPI_ADR_SPACE_PCI_BAR_TARGET (acpi_adr_space_type) 6 +#define ACPI_ADR_SPACE_IPMI (acpi_adr_space_type) 7 +#define ACPI_ADR_SPACE_GPIO (acpi_adr_space_type) 8 +#define ACPI_ADR_SPACE_GSBUS (acpi_adr_space_type) 9 +#define ACPI_ADR_SPACE_PLATFORM_COMM (acpi_adr_space_type) 10 + +#define ACPI_NUM_PREDEFINED_REGIONS 11 + +/* + * Special Address Spaces + * + * Note: A Data Table region is a special type of operation region + * that has its own AML opcode. However, internally, the AML + * interpreter simply creates an operation region with an an address + * space type of ACPI_ADR_SPACE_DATA_TABLE. + */ +#define ACPI_ADR_SPACE_DATA_TABLE (acpi_adr_space_type) 0x7E /* Internal to ACPICA only */ +#define ACPI_ADR_SPACE_FIXED_HARDWARE (acpi_adr_space_type) 0x7F + +/* Values for _REG connection code */ + +#define ACPI_REG_DISCONNECT 0 +#define ACPI_REG_CONNECT 1 + +/* + * bit_register IDs + * + * These values are intended to be used by the hardware interfaces + * and are mapped to individual bitfields defined within the ACPI + * registers. See the acpi_gbl_bit_register_info global table in utglobal.c + * for this mapping. + */ + +/* PM1 Status register */ + +#define ACPI_BITREG_TIMER_STATUS 0x00 +#define ACPI_BITREG_BUS_MASTER_STATUS 0x01 +#define ACPI_BITREG_GLOBAL_LOCK_STATUS 0x02 +#define ACPI_BITREG_POWER_BUTTON_STATUS 0x03 +#define ACPI_BITREG_SLEEP_BUTTON_STATUS 0x04 +#define ACPI_BITREG_RT_CLOCK_STATUS 0x05 +#define ACPI_BITREG_WAKE_STATUS 0x06 +#define ACPI_BITREG_PCIEXP_WAKE_STATUS 0x07 + +/* PM1 Enable register */ + +#define ACPI_BITREG_TIMER_ENABLE 0x08 +#define ACPI_BITREG_GLOBAL_LOCK_ENABLE 0x09 +#define ACPI_BITREG_POWER_BUTTON_ENABLE 0x0A +#define ACPI_BITREG_SLEEP_BUTTON_ENABLE 0x0B +#define ACPI_BITREG_RT_CLOCK_ENABLE 0x0C +#define ACPI_BITREG_PCIEXP_WAKE_DISABLE 0x0D + +/* PM1 Control register */ + +#define ACPI_BITREG_SCI_ENABLE 0x0E +#define ACPI_BITREG_BUS_MASTER_RLD 0x0F +#define ACPI_BITREG_GLOBAL_LOCK_RELEASE 0x10 +#define ACPI_BITREG_SLEEP_TYPE 0x11 +#define ACPI_BITREG_SLEEP_ENABLE 0x12 + +/* PM2 Control register */ + +#define ACPI_BITREG_ARB_DISABLE 0x13 + +#define ACPI_BITREG_MAX 0x13 +#define ACPI_NUM_BITREG ACPI_BITREG_MAX + 1 + +/* Status register values. A 1 clears a status bit. 0 = no effect */ + +#define ACPI_CLEAR_STATUS 1 + +/* Enable and Control register values */ + +#define ACPI_ENABLE_EVENT 1 +#define ACPI_DISABLE_EVENT 0 + +/* Sleep function dispatch */ + +typedef acpi_status (*acpi_sleep_function) (u8 sleep_state); + +struct acpi_sleep_functions { + acpi_sleep_function legacy_function; + acpi_sleep_function extended_function; +}; + +/* + * External ACPI object definition + */ + +/* + * Note: Type == ACPI_TYPE_ANY (0) is used to indicate a NULL package + * element or an unresolved named reference. + */ +union acpi_object { + acpi_object_type type; /* See definition of acpi_ns_type for values */ + struct { + acpi_object_type type; /* ACPI_TYPE_INTEGER */ + u64 value; /* The actual number */ + } integer; + + struct { + acpi_object_type type; /* ACPI_TYPE_STRING */ + u32 length; /* # of bytes in string, excluding trailing null */ + char *pointer; /* points to the string value */ + } string; + + struct { + acpi_object_type type; /* ACPI_TYPE_BUFFER */ + u32 length; /* # of bytes in buffer */ + u8 *pointer; /* points to the buffer */ + } buffer; + + struct { + acpi_object_type type; /* ACPI_TYPE_PACKAGE */ + u32 count; /* # of elements in package */ + union acpi_object *elements; /* Pointer to an array of ACPI_OBJECTs */ + } package; + + struct { + acpi_object_type type; /* ACPI_TYPE_LOCAL_REFERENCE */ + acpi_object_type actual_type; /* Type associated with the Handle */ + acpi_handle handle; /* object reference */ + } reference; + + struct { + acpi_object_type type; /* ACPI_TYPE_PROCESSOR */ + u32 proc_id; + acpi_io_address pblk_address; + u32 pblk_length; + } processor; + + struct { + acpi_object_type type; /* ACPI_TYPE_POWER */ + u32 system_level; + u32 resource_order; + } power_resource; +}; + +/* + * List of objects, used as a parameter list for control method evaluation + */ +struct acpi_object_list { + u32 count; + union acpi_object *pointer; +}; + +/* + * Miscellaneous common Data Structures used by the interfaces + */ +#define ACPI_NO_BUFFER 0 + +#ifdef ACPI_NO_MEM_ALLOCATIONS + +#define ACPI_ALLOCATE_BUFFER (acpi_size) (0) +#define ACPI_ALLOCATE_LOCAL_BUFFER (acpi_size) (0) + +#else /* ACPI_NO_MEM_ALLOCATIONS */ + +#define ACPI_ALLOCATE_BUFFER (acpi_size) (-1) /* Let ACPICA allocate buffer */ +#define ACPI_ALLOCATE_LOCAL_BUFFER (acpi_size) (-2) /* For internal use only (enables tracking) */ + +#endif /* ACPI_NO_MEM_ALLOCATIONS */ + +struct acpi_buffer { + acpi_size length; /* Length in bytes of the buffer */ + void *pointer; /* pointer to buffer */ +}; + +/* + * name_type for acpi_get_name + */ +#define ACPI_FULL_PATHNAME 0 +#define ACPI_SINGLE_NAME 1 +#define ACPI_FULL_PATHNAME_NO_TRAILING 2 +#define ACPI_NAME_TYPE_MAX 2 + +/* + * Predefined Namespace items + */ +struct acpi_predefined_names { + const char *name; + u8 type; + char *val; +}; + +/* + * Structure and flags for acpi_get_system_info + */ +#define ACPI_SYS_MODE_UNKNOWN 0x0000 +#define ACPI_SYS_MODE_ACPI 0x0001 +#define ACPI_SYS_MODE_LEGACY 0x0002 +#define ACPI_SYS_MODES_MASK 0x0003 + +/* + * System info returned by acpi_get_system_info() + */ +struct acpi_system_info { + u32 acpi_ca_version; + u32 flags; + u32 timer_resolution; + u32 reserved1; + u32 reserved2; + u32 debug_level; + u32 debug_layer; +}; + +/* + * System statistics returned by acpi_get_statistics() + */ +struct acpi_statistics { + u32 sci_count; + u32 gpe_count; + u32 fixed_event_count[ACPI_NUM_FIXED_EVENTS]; + u32 method_count; +}; + +/* + * Types specific to the OS service interfaces + */ +typedef u32 + (ACPI_SYSTEM_XFACE * acpi_osd_handler) (void *context); + +typedef void + (ACPI_SYSTEM_XFACE * acpi_osd_exec_callback) (void *context); + +/* + * Various handlers and callback procedures + */ +typedef +u32 (*acpi_sci_handler) (void *context); + +typedef +void (*acpi_gbl_event_handler) (u32 event_type, + acpi_handle device, + u32 event_number, void *context); + +#define ACPI_EVENT_TYPE_GPE 0 +#define ACPI_EVENT_TYPE_FIXED 1 + +typedef +u32(*acpi_event_handler) (void *context); + +typedef +u32 (*acpi_gpe_handler) (acpi_handle gpe_device, u32 gpe_number, void *context); + +typedef +void (*acpi_notify_handler) (acpi_handle device, u32 value, void *context); + +typedef +void (*acpi_object_handler) (acpi_handle object, void *data); + +typedef +acpi_status (*acpi_init_handler) (acpi_handle object, u32 function); + +#define ACPI_INIT_DEVICE_INI 1 + +typedef +acpi_status (*acpi_exception_handler) (acpi_status aml_status, + acpi_name name, + u16 opcode, + u32 aml_offset, void *context); + +/* Table Event handler (Load, load_table, etc.) and types */ + +typedef +acpi_status (*acpi_table_handler) (u32 event, void *table, void *context); + +/* Table Event Types */ + +#define ACPI_TABLE_EVENT_LOAD 0x0 +#define ACPI_TABLE_EVENT_UNLOAD 0x1 +#define ACPI_TABLE_EVENT_INSTALL 0x2 +#define ACPI_TABLE_EVENT_UNINSTALL 0x3 +#define ACPI_NUM_TABLE_EVENTS 4 + +/* Address Spaces (For Operation Regions) */ + +typedef +acpi_status (*acpi_adr_space_handler) (u32 function, + acpi_physical_address address, + u32 bit_width, + u64 *value, + void *handler_context, + void *region_context); + +#define ACPI_DEFAULT_HANDLER NULL + +/* Special Context data for generic_serial_bus/general_purpose_io (ACPI 5.0) */ + +struct acpi_connection_info { + u8 *connection; + u16 length; + u8 access_length; +}; + +typedef +acpi_status (*acpi_adr_space_setup) (acpi_handle region_handle, + u32 function, + void *handler_context, + void **region_context); + +#define ACPI_REGION_ACTIVATE 0 +#define ACPI_REGION_DEACTIVATE 1 + +typedef +acpi_status (*acpi_walk_callback) (acpi_handle object, + u32 nesting_level, + void *context, void **return_value); + +typedef +u32 (*acpi_interface_handler) (acpi_string interface_name, u32 supported); + +/* Interrupt handler return values */ + +#define ACPI_INTERRUPT_NOT_HANDLED 0x00 +#define ACPI_INTERRUPT_HANDLED 0x01 + +/* GPE handler return values */ + +#define ACPI_REENABLE_GPE 0x80 + +/* Length of 32-bit EISAID values when converted back to a string */ + +#define ACPI_EISAID_STRING_SIZE 8 /* Includes null terminator */ + +/* Length of UUID (string) values */ + +#define ACPI_UUID_LENGTH 16 + +/* Length of 3-byte PCI class code values when converted back to a string */ + +#define ACPI_PCICLS_STRING_SIZE 7 /* Includes null terminator */ + +/* Structures used for device/processor HID, UID, CID */ + +struct acpi_pnp_device_id { + u32 length; /* Length of string + null */ + char *string; +}; + +struct acpi_pnp_device_id_list { + u32 count; /* Number of IDs in Ids array */ + u32 list_size; /* Size of list, including ID strings */ + struct acpi_pnp_device_id ids[1]; /* ID array */ +}; + +/* + * Structure returned from acpi_get_object_info. + * Optimized for both 32-bit and 64-bit builds. + */ +struct acpi_device_info { + u32 info_size; /* Size of info, including ID strings */ + u32 name; /* ACPI object Name */ + acpi_object_type type; /* ACPI object Type */ + u8 param_count; /* If a method, required parameter count */ + u16 valid; /* Indicates which optional fields are valid */ + u8 flags; /* Miscellaneous info */ + u8 highest_dstates[4]; /* _sx_d values: 0xFF indicates not valid */ + u8 lowest_dstates[5]; /* _sx_w values: 0xFF indicates not valid */ + u64 address; /* _ADR value */ + struct acpi_pnp_device_id hardware_id; /* _HID value */ + struct acpi_pnp_device_id unique_id; /* _UID value */ + struct acpi_pnp_device_id class_code; /* _CLS value */ + struct acpi_pnp_device_id_list compatible_id_list; /* _CID list */ +}; + +/* Values for Flags field above (acpi_get_object_info) */ + +#define ACPI_PCI_ROOT_BRIDGE 0x01 + +/* Flags for Valid field above (acpi_get_object_info) */ + +#define ACPI_VALID_ADR 0x0002 +#define ACPI_VALID_HID 0x0004 +#define ACPI_VALID_UID 0x0008 +#define ACPI_VALID_CID 0x0020 +#define ACPI_VALID_CLS 0x0040 +#define ACPI_VALID_SXDS 0x0100 +#define ACPI_VALID_SXWS 0x0200 + +/* Flags for _STA method */ + +#define ACPI_STA_DEVICE_PRESENT 0x01 +#define ACPI_STA_DEVICE_ENABLED 0x02 +#define ACPI_STA_DEVICE_UI 0x04 +#define ACPI_STA_DEVICE_FUNCTIONING 0x08 +#define ACPI_STA_DEVICE_OK 0x08 /* Synonym */ +#define ACPI_STA_BATTERY_PRESENT 0x10 + +/* Context structs for address space handlers */ + +struct acpi_pci_id { + u16 segment; + u16 bus; + u16 device; + u16 function; +}; + +struct acpi_mem_space_context { + u32 length; + acpi_physical_address address; + acpi_physical_address mapped_physical_address; + u8 *mapped_logical_address; + acpi_size mapped_length; +}; + +/* + * struct acpi_memory_list is used only if the ACPICA local cache is enabled + */ +struct acpi_memory_list { + const char *list_name; + void *list_head; + u16 object_size; + u16 max_depth; + u16 current_depth; + +#ifdef ACPI_DBG_TRACK_ALLOCATIONS + + /* Statistics for debug memory tracking only */ + + u32 total_allocated; + u32 total_freed; + u32 max_occupied; + u32 total_size; + u32 current_total_size; + u32 requests; + u32 hits; +#endif +}; + +/* Definitions of trace event types */ + +typedef enum { + ACPI_TRACE_AML_METHOD, + ACPI_TRACE_AML_OPCODE, + ACPI_TRACE_AML_REGION +} acpi_trace_event_type; + +/* Definitions of _OSI support */ + +#define ACPI_VENDOR_STRINGS 0x01 +#define ACPI_FEATURE_STRINGS 0x02 +#define ACPI_ENABLE_INTERFACES 0x00 +#define ACPI_DISABLE_INTERFACES 0x04 + +#define ACPI_DISABLE_ALL_VENDOR_STRINGS (ACPI_DISABLE_INTERFACES | ACPI_VENDOR_STRINGS) +#define ACPI_DISABLE_ALL_FEATURE_STRINGS (ACPI_DISABLE_INTERFACES | ACPI_FEATURE_STRINGS) +#define ACPI_DISABLE_ALL_STRINGS (ACPI_DISABLE_INTERFACES | ACPI_VENDOR_STRINGS | ACPI_FEATURE_STRINGS) +#define ACPI_ENABLE_ALL_VENDOR_STRINGS (ACPI_ENABLE_INTERFACES | ACPI_VENDOR_STRINGS) +#define ACPI_ENABLE_ALL_FEATURE_STRINGS (ACPI_ENABLE_INTERFACES | ACPI_FEATURE_STRINGS) +#define ACPI_ENABLE_ALL_STRINGS (ACPI_ENABLE_INTERFACES | ACPI_VENDOR_STRINGS | ACPI_FEATURE_STRINGS) + +#define ACPI_OSI_WIN_2000 0x01 +#define ACPI_OSI_WIN_XP 0x02 +#define ACPI_OSI_WIN_XP_SP1 0x03 +#define ACPI_OSI_WINSRV_2003 0x04 +#define ACPI_OSI_WIN_XP_SP2 0x05 +#define ACPI_OSI_WINSRV_2003_SP1 0x06 +#define ACPI_OSI_WIN_VISTA 0x07 +#define ACPI_OSI_WINSRV_2008 0x08 +#define ACPI_OSI_WIN_VISTA_SP1 0x09 +#define ACPI_OSI_WIN_VISTA_SP2 0x0A +#define ACPI_OSI_WIN_7 0x0B +#define ACPI_OSI_WIN_8 0x0C +#define ACPI_OSI_WIN_10 0x0D +#define ACPI_OSI_WIN_10_RS1 0x0E +#define ACPI_OSI_WIN_10_RS2 0x0F +#define ACPI_OSI_WIN_10_RS3 0x10 + +/* Definitions of getopt */ + +#define ACPI_OPT_END -1 + +#endif /* __ACTYPES_H__ */ diff --git a/include/acpi/acuuid.h b/include/acpi/acuuid.h new file mode 100644 index 000000000..e63f21453 --- /dev/null +++ b/include/acpi/acuuid.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: acuuid.h - ACPI-related UUID/GUID definitions + * + * Copyright (C) 2000 - 2018, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACUUID_H__ +#define __ACUUID_H__ + +/* + * Note1: UUIDs and GUIDs are defined to be identical in ACPI. + * + * Note2: This file is standalone and should remain that way. + */ + +/* Controllers */ + +#define UUID_GPIO_CONTROLLER "4f248f40-d5e2-499f-834c-27758ea1cd3f" +#define UUID_USB_CONTROLLER "ce2ee385-00e6-48cb-9f05-2edb927c4899" +#define UUID_SATA_CONTROLLER "e4db149b-fcfe-425b-a6d8-92357d78fc7f" + +/* Devices */ + +#define UUID_PCI_HOST_BRIDGE "33db4d5b-1ff7-401c-9657-7441c03dd766" +#define UUID_I2C_DEVICE "3cdff6f7-4267-4555-ad05-b30a3d8938de" +#define UUID_POWER_BUTTON "dfbcf3c5-e7a5-44e6-9c1f-29c76f6e059c" + +/* Interfaces */ + +#define UUID_DEVICE_LABELING "e5c937d0-3553-4d7a-9117-ea4d19c3434d" +#define UUID_PHYSICAL_PRESENCE "3dddfaa6-361b-4eb4-a424-8d10089d1653" + +/* NVDIMM - NFIT table */ + +#define UUID_VOLATILE_MEMORY "7305944f-fdda-44e3-b16c-3f22d252e5d0" +#define UUID_PERSISTENT_MEMORY "66f0d379-b4f3-4074-ac43-0d3318b78cdb" +#define UUID_CONTROL_REGION "92f701f6-13b4-405d-910b-299367e8234c" +#define UUID_DATA_REGION "91af0530-5d86-470e-a6b0-0a2db9408249" +#define UUID_VOLATILE_VIRTUAL_DISK "77ab535a-45fc-624b-5560-f7b281d1f96e" +#define UUID_VOLATILE_VIRTUAL_CD "3d5abd30-4175-87ce-6d64-d2ade523c4bb" +#define UUID_PERSISTENT_VIRTUAL_DISK "5cea02c9-4d07-69d3-269f-4496fbe096f9" +#define UUID_PERSISTENT_VIRTUAL_CD "08018188-42cd-bb48-100f-5387d53ded3d" + +/* Processor Properties (ACPI 6.2) */ + +#define UUID_CACHE_PROPERTIES "6DC63E77-257E-4E78-A973-A21F2796898D" +#define UUID_PHYSICAL_PROPERTY "DDE4D59A-AA42-4349-B407-EA40F57D9FB7" + +/* Miscellaneous */ + +#define UUID_PLATFORM_CAPABILITIES "0811b06e-4a27-44f9-8d60-3cbbc22e7b48" +#define UUID_DYNAMIC_ENUMERATION "d8c1a3a6-be9b-4c9b-91bf-c3cb81fc5daf" +#define UUID_BATTERY_THERMAL_LIMIT "4c2067e3-887d-475c-9720-4af1d3ed602e" +#define UUID_THERMAL_EXTENSIONS "14d399cd-7a27-4b18-8fb4-7cb7b9f4e500" +#define UUID_DEVICE_PROPERTIES "daffd814-6eba-4d8c-8a91-bc9bbf4aa301" + +#endif /* __AUUID_H__ */ diff --git a/include/acpi/apei.h b/include/acpi/apei.h new file mode 100644 index 000000000..680f80960 --- /dev/null +++ b/include/acpi/apei.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * apei.h - ACPI Platform Error Interface + */ + +#ifndef ACPI_APEI_H +#define ACPI_APEI_H + +#include +#include +#include + +#define APEI_ERST_INVALID_RECORD_ID 0xffffffffffffffffULL + +#define APEI_ERST_CLEAR_RECORD _IOW('E', 1, u64) +#define APEI_ERST_GET_RECORD_COUNT _IOR('E', 2, u32) + +#ifdef __KERNEL__ + +enum hest_status { + HEST_ENABLED, + HEST_DISABLED, + HEST_NOT_FOUND, +}; + +extern int hest_disable; +extern int erst_disable; +#ifdef CONFIG_ACPI_APEI_GHES +extern bool ghes_disable; +#else +#define ghes_disable 1 +#endif + +#ifdef CONFIG_ACPI_APEI +void __init acpi_hest_init(void); +#else +static inline void acpi_hest_init(void) { return; } +#endif + +typedef int (*apei_hest_func_t)(struct acpi_hest_header *hest_hdr, void *data); +int apei_hest_parse(apei_hest_func_t func, void *data); + +int erst_write(const struct cper_record_header *record); +ssize_t erst_get_record_count(void); +int erst_get_record_id_begin(int *pos); +int erst_get_record_id_next(int *pos, u64 *record_id); +void erst_get_record_id_end(void); +ssize_t erst_read(u64 record_id, struct cper_record_header *record, + size_t buflen); +int erst_clear(u64 record_id); + +int arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr, void *data); +void arch_apei_report_mem_error(int sev, struct cper_sec_mem_err *mem_err); + +#endif +#endif diff --git a/include/acpi/battery.h b/include/acpi/battery.h new file mode 100644 index 000000000..5d8f5d910 --- /dev/null +++ b/include/acpi/battery.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ACPI_BATTERY_H +#define __ACPI_BATTERY_H + +#define ACPI_BATTERY_CLASS "battery" + +#define ACPI_BATTERY_NOTIFY_STATUS 0x80 +#define ACPI_BATTERY_NOTIFY_INFO 0x81 +#define ACPI_BATTERY_NOTIFY_THRESHOLD 0x82 + +struct acpi_battery_hook { + const char *name; + int (*add_battery)(struct power_supply *battery); + int (*remove_battery)(struct power_supply *battery); + struct list_head list; +}; + +void battery_hook_register(struct acpi_battery_hook *hook); +void battery_hook_unregister(struct acpi_battery_hook *hook); + +#endif diff --git a/include/acpi/button.h b/include/acpi/button.h new file mode 100644 index 000000000..3a2b8535d --- /dev/null +++ b/include/acpi/button.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ACPI_BUTTON_H +#define ACPI_BUTTON_H + +#include + +#if IS_ENABLED(CONFIG_ACPI_BUTTON) +extern int acpi_lid_notifier_register(struct notifier_block *nb); +extern int acpi_lid_notifier_unregister(struct notifier_block *nb); +extern int acpi_lid_open(void); +#else +static inline int acpi_lid_notifier_register(struct notifier_block *nb) +{ + return 0; +} +static inline int acpi_lid_notifier_unregister(struct notifier_block *nb) +{ + return 0; +} +static inline int acpi_lid_open(void) +{ + return 1; +} +#endif /* IS_ENABLED(CONFIG_ACPI_BUTTON) */ + +#endif /* ACPI_BUTTON_H */ diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h new file mode 100644 index 000000000..8e0b8250a --- /dev/null +++ b/include/acpi/cppc_acpi.h @@ -0,0 +1,145 @@ +/* + * CPPC (Collaborative Processor Performance Control) methods used + * by CPUfreq drivers. + * + * (C) Copyright 2014, 2015 Linaro Ltd. + * Author: Ashwin Chaugule + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + +#ifndef _CPPC_ACPI_H +#define _CPPC_ACPI_H + +#include +#include + +#include +#include + +/* Support CPPCv2 and CPPCv3 */ +#define CPPC_V2_REV 2 +#define CPPC_V3_REV 3 +#define CPPC_V2_NUM_ENT 21 +#define CPPC_V3_NUM_ENT 23 + +#define PCC_CMD_COMPLETE_MASK (1 << 0) +#define PCC_ERROR_MASK (1 << 2) + +#define MAX_CPC_REG_ENT 21 + +/* CPPC specific PCC commands. */ +#define CMD_READ 0 +#define CMD_WRITE 1 + +/* Each register has the folowing format. */ +struct cpc_reg { + u8 descriptor; + u16 length; + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 access_width; + u64 __iomem address; +} __packed; + +/* + * Each entry in the CPC table is either + * of type ACPI_TYPE_BUFFER or + * ACPI_TYPE_INTEGER. + */ +struct cpc_register_resource { + acpi_object_type type; + u64 __iomem *sys_mem_vaddr; + union { + struct cpc_reg reg; + u64 int_value; + } cpc_entry; +}; + +/* Container to hold the CPC details for each CPU */ +struct cpc_desc { + int num_entries; + int version; + int cpu_id; + int write_cmd_status; + int write_cmd_id; + struct cpc_register_resource cpc_regs[MAX_CPC_REG_ENT]; + struct acpi_psd_package domain_info; + struct kobject kobj; +}; + +/* These are indexes into the per-cpu cpc_regs[]. Order is important. */ +enum cppc_regs { + HIGHEST_PERF, + NOMINAL_PERF, + LOW_NON_LINEAR_PERF, + LOWEST_PERF, + GUARANTEED_PERF, + DESIRED_PERF, + MIN_PERF, + MAX_PERF, + PERF_REDUC_TOLERANCE, + TIME_WINDOW, + CTR_WRAP_TIME, + REFERENCE_CTR, + DELIVERED_CTR, + PERF_LIMITED, + ENABLE, + AUTO_SEL_ENABLE, + AUTO_ACT_WINDOW, + ENERGY_PERF, + REFERENCE_PERF, + LOWEST_FREQ, + NOMINAL_FREQ, +}; + +/* + * Categorization of registers as described + * in the ACPI v.5.1 spec. + * XXX: Only filling up ones which are used by governors + * today. + */ +struct cppc_perf_caps { + u32 highest_perf; + u32 nominal_perf; + u32 lowest_perf; + u32 lowest_nonlinear_perf; + u32 lowest_freq; + u32 nominal_freq; +}; + +struct cppc_perf_ctrls { + u32 max_perf; + u32 min_perf; + u32 desired_perf; +}; + +struct cppc_perf_fb_ctrs { + u64 reference; + u64 delivered; + u64 reference_perf; + u64 wraparound_time; +}; + +/* Per CPU container for runtime CPPC management. */ +struct cppc_cpudata { + int cpu; + struct cppc_perf_caps perf_caps; + struct cppc_perf_ctrls perf_ctrls; + struct cppc_perf_fb_ctrs perf_fb_ctrs; + struct cpufreq_policy *cur_policy; + unsigned int shared_type; + cpumask_var_t shared_cpu_map; +}; + +extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs); +extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls); +extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps); +extern int acpi_get_psd_map(struct cppc_cpudata **); +extern unsigned int cppc_get_transition_latency(int cpu); + +#endif /* _CPPC_ACPI_H*/ diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h new file mode 100644 index 000000000..82cb4eb22 --- /dev/null +++ b/include/acpi/ghes.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef GHES_H +#define GHES_H + +#include +#include + +/* + * One struct ghes is created for each generic hardware error source. + * It provides the context for APEI hardware error timer/IRQ/SCI/NMI + * handler. + * + * estatus: memory buffer for error status block, allocated during + * HEST parsing. + */ +#define GHES_TO_CLEAR 0x0001 +#define GHES_EXITING 0x0002 + +struct ghes { + union { + struct acpi_hest_generic *generic; + struct acpi_hest_generic_v2 *generic_v2; + }; + struct acpi_hest_generic_status *estatus; + u64 buffer_paddr; + unsigned long flags; + union { + struct list_head list; + struct timer_list timer; + unsigned int irq; + }; +}; + +struct ghes_estatus_node { + struct llist_node llnode; + struct acpi_hest_generic *generic; + struct ghes *ghes; +}; + +struct ghes_estatus_cache { + u32 estatus_len; + atomic_t count; + struct acpi_hest_generic *generic; + unsigned long long time_in; + struct rcu_head rcu; +}; + +enum { + GHES_SEV_NO = 0x0, + GHES_SEV_CORRECTED = 0x1, + GHES_SEV_RECOVERABLE = 0x2, + GHES_SEV_PANIC = 0x3, +}; + +/* From drivers/edac/ghes_edac.c */ + +#ifdef CONFIG_EDAC_GHES +void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err); + +int ghes_edac_register(struct ghes *ghes, struct device *dev); + +void ghes_edac_unregister(struct ghes *ghes); + +#else +static inline void ghes_edac_report_mem_error(int sev, + struct cper_sec_mem_err *mem_err) +{ +} + +static inline int ghes_edac_register(struct ghes *ghes, struct device *dev) +{ + return -ENODEV; +} + +static inline void ghes_edac_unregister(struct ghes *ghes) +{ +} +#endif + +static inline int acpi_hest_get_version(struct acpi_hest_generic_data *gdata) +{ + return gdata->revision >> 8; +} + +static inline void *acpi_hest_get_payload(struct acpi_hest_generic_data *gdata) +{ + if (acpi_hest_get_version(gdata) >= 3) + return (void *)(((struct acpi_hest_generic_data_v300 *)(gdata)) + 1); + + return gdata + 1; +} + +static inline int acpi_hest_get_error_length(struct acpi_hest_generic_data *gdata) +{ + return ((struct acpi_hest_generic_data *)(gdata))->error_data_length; +} + +static inline int acpi_hest_get_size(struct acpi_hest_generic_data *gdata) +{ + if (acpi_hest_get_version(gdata) >= 3) + return sizeof(struct acpi_hest_generic_data_v300); + + return sizeof(struct acpi_hest_generic_data); +} + +static inline int acpi_hest_get_record_size(struct acpi_hest_generic_data *gdata) +{ + return (acpi_hest_get_size(gdata) + acpi_hest_get_error_length(gdata)); +} + +static inline void *acpi_hest_get_next(struct acpi_hest_generic_data *gdata) +{ + return (void *)(gdata) + acpi_hest_get_record_size(gdata); +} + +#define apei_estatus_for_each_section(estatus, section) \ + for (section = (struct acpi_hest_generic_data *)(estatus + 1); \ + (void *)section - (void *)(estatus + 1) < estatus->data_length; \ + section = acpi_hest_get_next(section)) + +#ifdef CONFIG_ACPI_APEI_SEA +int ghes_notify_sea(void); +#else +static inline int ghes_notify_sea(void) { return -ENOENT; } +#endif + +#endif /* GHES_H */ diff --git a/include/acpi/hed.h b/include/acpi/hed.h new file mode 100644 index 000000000..46e1249b7 --- /dev/null +++ b/include/acpi/hed.h @@ -0,0 +1,18 @@ +/* + * hed.h - ACPI Hardware Error Device + * + * Copyright (C) 2009, Intel Corp. + * Author: Huang Ying + * + * This file is released under the GPLv2. + */ + +#ifndef ACPI_HED_H +#define ACPI_HED_H + +#include + +int register_acpi_hed_notifier(struct notifier_block *nb); +void unregister_acpi_hed_notifier(struct notifier_block *nb); + +#endif diff --git a/include/acpi/nfit.h b/include/acpi/nfit.h new file mode 100644 index 000000000..86ed07c12 --- /dev/null +++ b/include/acpi/nfit.h @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __ACPI_NFIT_H +#define __ACPI_NFIT_H + +#if IS_ENABLED(CONFIG_ACPI_NFIT) +int nfit_get_smbios_id(u32 device_handle, u16 *flags); +#else +static inline int nfit_get_smbios_id(u32 device_handle, u16 *flags) +{ + return -EOPNOTSUPP; +} +#endif + +#endif /* __ACPI_NFIT_H */ diff --git a/include/acpi/pcc.h b/include/acpi/pcc.h new file mode 100644 index 000000000..cd6ef45e6 --- /dev/null +++ b/include/acpi/pcc.h @@ -0,0 +1,30 @@ +/* + * PCC (Platform Communications Channel) methods + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + +#ifndef _PCC_H +#define _PCC_H + +#include +#include + +#define MAX_PCC_SUBSPACES 256 +#ifdef CONFIG_PCC +extern struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl, + int subspace_id); +extern void pcc_mbox_free_channel(struct mbox_chan *chan); +#else +static inline struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl, + int subspace_id) +{ + return ERR_PTR(-ENODEV); +} +static inline void pcc_mbox_free_channel(struct mbox_chan *chan) { } +#endif + +#endif /* _PCC_H */ diff --git a/include/acpi/pdc_intel.h b/include/acpi/pdc_intel.h new file mode 100644 index 000000000..967c552d1 --- /dev/null +++ b/include/acpi/pdc_intel.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* _PDC bit definition for Intel processors */ + +#ifndef __PDC_INTEL_H__ +#define __PDC_INTEL_H__ + +#define ACPI_PDC_P_FFH (0x0001) +#define ACPI_PDC_C_C1_HALT (0x0002) +#define ACPI_PDC_T_FFH (0x0004) +#define ACPI_PDC_SMP_C1PT (0x0008) +#define ACPI_PDC_SMP_C2C3 (0x0010) +#define ACPI_PDC_SMP_P_SWCOORD (0x0020) +#define ACPI_PDC_SMP_C_SWCOORD (0x0040) +#define ACPI_PDC_SMP_T_SWCOORD (0x0080) +#define ACPI_PDC_C_C1_FFH (0x0100) +#define ACPI_PDC_C_C2C3_FFH (0x0200) +#define ACPI_PDC_SMP_P_HWCOORD (0x0800) + +#define ACPI_PDC_EST_CAPABILITY_SMP (ACPI_PDC_SMP_C1PT | \ + ACPI_PDC_C_C1_HALT | \ + ACPI_PDC_P_FFH) + +#define ACPI_PDC_EST_CAPABILITY_SWSMP (ACPI_PDC_SMP_C1PT | \ + ACPI_PDC_C_C1_HALT | \ + ACPI_PDC_SMP_P_SWCOORD | \ + ACPI_PDC_SMP_P_HWCOORD | \ + ACPI_PDC_P_FFH) + +#define ACPI_PDC_C_CAPABILITY_SMP (ACPI_PDC_SMP_C2C3 | \ + ACPI_PDC_SMP_C1PT | \ + ACPI_PDC_C_C1_HALT | \ + ACPI_PDC_C_C1_FFH | \ + ACPI_PDC_C_C2C3_FFH) + +#endif /* __PDC_INTEL_H__ */ diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h new file mode 100644 index 000000000..f444e5b0f --- /dev/null +++ b/include/acpi/platform/acenv.h @@ -0,0 +1,356 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: acenv.h - Host and compiler configuration + * + * Copyright (C) 2000 - 2018, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACENV_H__ +#define __ACENV_H__ + +/* + * Environment configuration. The purpose of this file is to interface ACPICA + * to the local environment. This includes compiler-specific, OS-specific, + * and machine-specific configuration. + */ + +/* Types for ACPI_MUTEX_TYPE */ + +#define ACPI_BINARY_SEMAPHORE 0 +#define ACPI_OSL_MUTEX 1 + +/* Types for DEBUGGER_THREADING */ + +#define DEBUGGER_SINGLE_THREADED 0 +#define DEBUGGER_MULTI_THREADED 1 + +/****************************************************************************** + * + * Configuration for ACPI tools and utilities + * + *****************************************************************************/ + +/* Common application configuration. All single threaded except for acpi_exec. */ + +#if (defined ACPI_ASL_COMPILER) || \ + (defined ACPI_BIN_APP) || \ + (defined ACPI_DUMP_APP) || \ + (defined ACPI_HELP_APP) || \ + (defined ACPI_NAMES_APP) || \ + (defined ACPI_SRC_APP) || \ + (defined ACPI_XTRACT_APP) || \ + (defined ACPI_EXAMPLE_APP) || \ + (defined ACPI_EFI_HELLO) +#define ACPI_APPLICATION +#define ACPI_SINGLE_THREADED +#define USE_NATIVE_ALLOCATE_ZEROED +#endif + +/* iASL configuration */ + +#ifdef ACPI_ASL_COMPILER +#define ACPI_DEBUG_OUTPUT +#define ACPI_CONSTANT_EVAL_ONLY +#define ACPI_LARGE_NAMESPACE_NODE +#define ACPI_DATA_TABLE_DISASSEMBLY +#define ACPI_32BIT_PHYSICAL_ADDRESS +#define ACPI_DISASSEMBLER 1 +#endif + +/* acpi_exec configuration. Multithreaded with full AML debugger */ + +#ifdef ACPI_EXEC_APP +#define ACPI_APPLICATION +#define ACPI_FULL_DEBUG +#define ACPI_MUTEX_DEBUG +#define ACPI_DBG_TRACK_ALLOCATIONS +#endif + +/* acpi_help configuration. Error messages disabled. */ + +#ifdef ACPI_HELP_APP +#define ACPI_NO_ERROR_MESSAGES +#endif + +/* acpi_names configuration. Debug output enabled. */ + +#ifdef ACPI_NAMES_APP +#define ACPI_DEBUG_OUTPUT +#endif + +/* acpi_exec/acpi_names/Example configuration. Native RSDP used. */ + +#if (defined ACPI_EXEC_APP) || \ + (defined ACPI_EXAMPLE_APP) || \ + (defined ACPI_NAMES_APP) +#define ACPI_USE_NATIVE_RSDP_POINTER +#endif + +/* acpi_dump configuration. Native mapping used if provided by the host */ + +#ifdef ACPI_DUMP_APP +#define ACPI_USE_NATIVE_MEMORY_MAPPING +#endif + +/* acpi_names/Example configuration. Hardware disabled */ + +#if (defined ACPI_EXAMPLE_APP) || \ + (defined ACPI_NAMES_APP) +#define ACPI_REDUCED_HARDWARE 1 +#endif + +/* Linkable ACPICA library. Two versions, one with full debug. */ + +#ifdef ACPI_LIBRARY +#define ACPI_USE_LOCAL_CACHE +#define ACPI_DEBUGGER 1 +#define ACPI_DISASSEMBLER 1 + +#ifdef _DEBUG +#define ACPI_DEBUG_OUTPUT +#endif +#endif + +/* Common for all ACPICA applications */ + +#ifdef ACPI_APPLICATION +#define ACPI_USE_LOCAL_CACHE +#endif + +/* Common debug/disassembler support */ + +#ifdef ACPI_FULL_DEBUG +#define ACPI_DEBUG_OUTPUT +#define ACPI_DEBUGGER 1 +#define ACPI_DISASSEMBLER 1 +#endif + + +/*! [Begin] no source code translation */ + +/****************************************************************************** + * + * Host configuration files. The compiler configuration files are included + * first. + * + *****************************************************************************/ + +#if defined(__GNUC__) && !defined(__INTEL_COMPILER) +#include + +#elif defined(_MSC_VER) +#include "acmsvc.h" + +#elif defined(__INTEL_COMPILER) +#include + +#endif + +#if defined(_LINUX) || defined(__linux__) +#include + +#elif defined(_APPLE) || defined(__APPLE__) +#include "acmacosx.h" + +#elif defined(__DragonFly__) +#include "acdragonfly.h" + +#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) +#include "acfreebsd.h" + +#elif defined(__NetBSD__) +#include "acnetbsd.h" + +#elif defined(__sun) +#include "acsolaris.h" + +#elif defined(MODESTO) +#include "acmodesto.h" + +#elif defined(NETWARE) +#include "acnetware.h" + +#elif defined(_CYGWIN) +#include "accygwin.h" + +#elif defined(WIN32) +#include "acwin.h" + +#elif defined(WIN64) +#include "acwin64.h" + +#elif defined(_WRS_LIB_BUILD) +#include "acvxworks.h" + +#elif defined(__OS2__) +#include "acos2.h" + +#elif defined(__HAIKU__) +#include "achaiku.h" + +#elif defined(__QNX__) +#include "acqnx.h" + +/* + * EFI applications can be built with -nostdlib, in this case, it must be + * included after including all other host environmental definitions, in + * order to override the definitions. + */ +#elif defined(_AED_EFI) || defined(_GNU_EFI) || defined(_EDK2_EFI) +#include "acefi.h" + +#else + +/* Unknown environment */ + +#error Unknown target environment +#endif + +/*! [End] no source code translation !*/ + +/****************************************************************************** + * + * Setup defaults for the required symbols that were not defined in one of + * the host/compiler files above. + * + *****************************************************************************/ + +/* 64-bit data types */ + +#ifndef COMPILER_DEPENDENT_INT64 +#define COMPILER_DEPENDENT_INT64 long long +#endif + +#ifndef COMPILER_DEPENDENT_UINT64 +#define COMPILER_DEPENDENT_UINT64 unsigned long long +#endif + +/* Type of mutex supported by host. Default is binary semaphores. */ +#ifndef ACPI_MUTEX_TYPE +#define ACPI_MUTEX_TYPE ACPI_BINARY_SEMAPHORE +#endif + +/* Global Lock acquire/release */ + +#ifndef ACPI_ACQUIRE_GLOBAL_LOCK +#define ACPI_ACQUIRE_GLOBAL_LOCK(Glptr, acquired) acquired = 1 +#endif + +#ifndef ACPI_RELEASE_GLOBAL_LOCK +#define ACPI_RELEASE_GLOBAL_LOCK(Glptr, pending) pending = 0 +#endif + +/* Flush CPU cache - used when going to sleep. Wbinvd or similar. */ + +#ifndef ACPI_FLUSH_CPU_CACHE +#define ACPI_FLUSH_CPU_CACHE() +#endif + +/* "inline" keywords - configurable since inline is not standardized */ + +#ifndef ACPI_INLINE +#define ACPI_INLINE +#endif + +/* Use ordered initialization if compiler doesn't support designated. */ +#ifndef ACPI_STRUCT_INIT +#define ACPI_STRUCT_INIT(field, value) value +#endif + +/* + * Configurable calling conventions: + * + * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) + * ACPI_EXTERNAL_XFACE - External ACPI interfaces + * ACPI_INTERNAL_XFACE - Internal ACPI interfaces + * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces + */ +#ifndef ACPI_SYSTEM_XFACE +#define ACPI_SYSTEM_XFACE +#endif + +#ifndef ACPI_EXTERNAL_XFACE +#define ACPI_EXTERNAL_XFACE +#endif + +#ifndef ACPI_INTERNAL_XFACE +#define ACPI_INTERNAL_XFACE +#endif + +#ifndef ACPI_INTERNAL_VAR_XFACE +#define ACPI_INTERNAL_VAR_XFACE +#endif + +/* + * Debugger threading model + * Use single threaded if the entire subsystem is contained in an application + * Use multiple threaded when the subsystem is running in the kernel. + * + * By default the model is single threaded if ACPI_APPLICATION is set, + * multi-threaded if ACPI_APPLICATION is not set. + */ +#ifndef DEBUGGER_THREADING +#if !defined (ACPI_APPLICATION) || defined (ACPI_EXEC_APP) +#define DEBUGGER_THREADING DEBUGGER_MULTI_THREADED + +#else +#define DEBUGGER_THREADING DEBUGGER_SINGLE_THREADED +#endif +#endif /* !DEBUGGER_THREADING */ + +/****************************************************************************** + * + * C library configuration + * + *****************************************************************************/ + +/* + * ACPI_USE_SYSTEM_CLIBRARY - Define this if linking to an actual C library. + * Otherwise, local versions of string/memory functions will be used. + * ACPI_USE_STANDARD_HEADERS - Define this if linking to a C library and + * the standard header files may be used. Defining this implies that + * ACPI_USE_SYSTEM_CLIBRARY has been defined. + * + * The ACPICA subsystem only uses low level C library functions that do not + * call operating system services and may therefore be inlined in the code. + * + * It may be necessary to tailor these include files to the target + * generation environment. + */ + +/* Use the standard C library headers. We want to keep these to a minimum. */ + +#ifdef ACPI_USE_STANDARD_HEADERS + +/* Use the standard headers from the standard locations */ + +#include +#include +#include +#if defined (ACPI_APPLICATION) || defined(ACPI_LIBRARY) +#include +#include +#include +#include +#include +#endif + +#endif /* ACPI_USE_STANDARD_HEADERS */ + +#ifdef ACPI_APPLICATION +#define ACPI_FILE FILE * +#define ACPI_FILE_OUT stdout +#define ACPI_FILE_ERR stderr +#else +#define ACPI_FILE void * +#define ACPI_FILE_OUT NULL +#define ACPI_FILE_ERR NULL +#endif /* ACPI_APPLICATION */ + +#ifndef ACPI_INIT_FUNCTION +#define ACPI_INIT_FUNCTION +#endif + +#endif /* __ACENV_H__ */ diff --git a/include/acpi/platform/acenvex.h b/include/acpi/platform/acenvex.h new file mode 100644 index 000000000..47d690eaf --- /dev/null +++ b/include/acpi/platform/acenvex.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: acenvex.h - Extra host and compiler configuration + * + * Copyright (C) 2000 - 2018, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACENVEX_H__ +#define __ACENVEX_H__ + +/*! [Begin] no source code translation */ + +/****************************************************************************** + * + * Extra host configuration files. All ACPICA headers are included before + * including these files. + * + *****************************************************************************/ + +#if defined(_LINUX) || defined(__linux__) +#include + +#elif defined(__DragonFly__) +#include "acdragonflyex.h" + +/* + * EFI applications can be built with -nostdlib, in this case, it must be + * included after including all other host environmental definitions, in + * order to override the definitions. + */ +#elif defined(_AED_EFI) || defined(_GNU_EFI) || defined(_EDK2_EFI) +#include "acefiex.h" + +#endif + +#if defined(__GNUC__) && !defined(__INTEL_COMPILER) +#include "acgccex.h" + +#elif defined(_MSC_VER) +#include "acmsvcex.h" + +#endif + +/*! [End] no source code translation !*/ + +#endif /* __ACENVEX_H__ */ diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h new file mode 100644 index 000000000..085db95a3 --- /dev/null +++ b/include/acpi/platform/acgcc.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: acgcc.h - GCC specific defines, etc. + * + * Copyright (C) 2000 - 2018, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACGCC_H__ +#define __ACGCC_H__ + +/* + * Use compiler specific is a good practice for even when + * -nostdinc is specified (i.e., ACPI_USE_STANDARD_HEADERS undefined. + */ +#ifndef va_arg +#ifdef ACPI_USE_BUILTIN_STDARG +typedef __builtin_va_list va_list; +#define va_start(v, l) __builtin_va_start(v, l) +#define va_end(v) __builtin_va_end(v) +#define va_arg(v, l) __builtin_va_arg(v, l) +#define va_copy(d, s) __builtin_va_copy(d, s) +#else +#include +#endif +#endif + +#define ACPI_INLINE __inline__ + +/* Function name is used for debug output. Non-ANSI, compiler-dependent */ + +#define ACPI_GET_FUNCTION_NAME __func__ + +/* + * This macro is used to tag functions as "printf-like" because + * some compilers (like GCC) can catch printf format string problems. + */ +#define ACPI_PRINTF_LIKE(c) __attribute__ ((__format__ (__printf__, c, c+1))) + +/* + * Some compilers complain about unused variables. Sometimes we don't want to + * use all the variables (for example, _acpi_module_name). This allows us + * to tell the compiler warning in a per-variable manner that a variable + * is unused. + */ +#define ACPI_UNUSED_VAR __attribute__ ((unused)) + +/* GCC supports __VA_ARGS__ in macros */ + +#define COMPILER_VA_MACRO 1 + +/* GCC supports native multiply/shift on 32-bit platforms */ + +#define ACPI_USE_NATIVE_MATH64 + +#endif /* __ACGCC_H__ */ diff --git a/include/acpi/platform/acgccex.h b/include/acpi/platform/acgccex.h new file mode 100644 index 000000000..5d2b667af --- /dev/null +++ b/include/acpi/platform/acgccex.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: acgccex.h - Extra GCC specific defines, etc. + * + * Copyright (C) 2000 - 2018, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACGCCEX_H__ +#define __ACGCCEX_H__ + +/* + * Some versions of gcc implement strchr() with a buggy macro. So, + * undef it here. Prevents error messages of this form (usually from the + * file getopt.c): + * + * error: logical '&&' with non-zero constant will always evaluate as true + */ +#ifdef strchr +#undef strchr +#endif + +#endif /* __ACGCCEX_H__ */ diff --git a/include/acpi/platform/acintel.h b/include/acpi/platform/acintel.h new file mode 100644 index 000000000..626265833 --- /dev/null +++ b/include/acpi/platform/acintel.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: acintel.h - VC specific defines, etc. + * + * Copyright (C) 2000 - 2018, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACINTEL_H__ +#define __ACINTEL_H__ + +/* + * Use compiler specific is a good practice for even when + * -nostdinc is specified (i.e., ACPI_USE_STANDARD_HEADERS undefined. + */ +#ifndef va_arg +#include +#endif + +/* Configuration specific to Intel 64-bit C compiler */ + +#define COMPILER_DEPENDENT_INT64 __int64 +#define COMPILER_DEPENDENT_UINT64 unsigned __int64 +#define ACPI_INLINE __inline + +/* + * Calling conventions: + * + * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) + * ACPI_EXTERNAL_XFACE - External ACPI interfaces + * ACPI_INTERNAL_XFACE - Internal ACPI interfaces + * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces + */ +#define ACPI_SYSTEM_XFACE +#define ACPI_EXTERNAL_XFACE +#define ACPI_INTERNAL_XFACE +#define ACPI_INTERNAL_VAR_XFACE + +/* remark 981 - operands evaluated in no particular order */ +#pragma warning(disable:981) + +/* warn C4100: unreferenced formal parameter */ +#pragma warning(disable:4100) + +/* warn C4127: conditional expression is constant */ +#pragma warning(disable:4127) + +/* warn C4706: assignment within conditional expression */ +#pragma warning(disable:4706) + +/* warn C4214: bit field types other than int */ +#pragma warning(disable:4214) + +#endif /* __ACINTEL_H__ */ diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h new file mode 100644 index 000000000..7451b3bca --- /dev/null +++ b/include/acpi/platform/aclinux.h @@ -0,0 +1,202 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: aclinux.h - OS specific defines, etc. for Linux + * + * Copyright (C) 2000 - 2018, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACLINUX_H__ +#define __ACLINUX_H__ + +#ifdef __KERNEL__ + +/* ACPICA external files should not include ACPICA headers directly. */ + +#if !defined(BUILDING_ACPICA) && !defined(_LINUX_ACPI_H) +#error "Please don't include directly, include instead." +#endif + +#endif + +/* Common (in-kernel/user-space) ACPICA configuration */ + +#define ACPI_USE_SYSTEM_CLIBRARY +#define ACPI_USE_DO_WHILE_0 +#define ACPI_IGNORE_PACKAGE_RESOLUTION_ERRORS + +#ifdef __KERNEL__ + +#define ACPI_USE_SYSTEM_INTTYPES +#define ACPI_USE_GPE_POLLING + +/* Kernel specific ACPICA configuration */ + +#ifdef CONFIG_ACPI_REDUCED_HARDWARE_ONLY +#define ACPI_REDUCED_HARDWARE 1 +#endif + +#ifdef CONFIG_ACPI_DEBUGGER +#define ACPI_DEBUGGER +#endif + +#ifdef CONFIG_ACPI_DEBUG +#define ACPI_MUTEX_DEBUG +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef EXPORT_ACPI_INTERFACES +#include +#endif +#ifdef CONFIG_ACPI +#include +#endif + +#define ACPI_INIT_FUNCTION __init + +#ifndef CONFIG_ACPI + +/* External globals for __KERNEL__, stubs is needed */ + +#define ACPI_GLOBAL(t,a) +#define ACPI_INIT_GLOBAL(t,a,b) + +/* Generating stubs for configurable ACPICA macros */ + +#define ACPI_NO_MEM_ALLOCATIONS + +/* Generating stubs for configurable ACPICA functions */ + +#define ACPI_NO_ERROR_MESSAGES +#undef ACPI_DEBUG_OUTPUT + +/* External interface for __KERNEL__, stub is needed */ + +#define ACPI_EXTERNAL_RETURN_STATUS(prototype) \ + static ACPI_INLINE prototype {return(AE_NOT_CONFIGURED);} +#define ACPI_EXTERNAL_RETURN_OK(prototype) \ + static ACPI_INLINE prototype {return(AE_OK);} +#define ACPI_EXTERNAL_RETURN_VOID(prototype) \ + static ACPI_INLINE prototype {return;} +#define ACPI_EXTERNAL_RETURN_UINT32(prototype) \ + static ACPI_INLINE prototype {return(0);} +#define ACPI_EXTERNAL_RETURN_PTR(prototype) \ + static ACPI_INLINE prototype {return(NULL);} + +#endif /* CONFIG_ACPI */ + +/* Host-dependent types and defines for in-kernel ACPICA */ + +#define ACPI_MACHINE_WIDTH BITS_PER_LONG +#define ACPI_USE_NATIVE_MATH64 +#define ACPI_EXPORT_SYMBOL(symbol) EXPORT_SYMBOL(symbol); +#define strtoul simple_strtoul + +#define acpi_cache_t struct kmem_cache +#define acpi_spinlock spinlock_t * +#define acpi_raw_spinlock raw_spinlock_t * +#define acpi_cpu_flags unsigned long + +/* Use native linux version of acpi_os_allocate_zeroed */ + +#define USE_NATIVE_ALLOCATE_ZEROED + +/* + * Overrides for in-kernel ACPICA + */ +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_initialize +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_terminate +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_allocate +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_allocate_zeroed +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_free +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_acquire_object +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_thread_id +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_raw_lock +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_delete_raw_lock +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_acquire_raw_lock +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_release_raw_lock + +/* + * OSL interfaces used by debugger/disassembler + */ +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_readable +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_writable +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_initialize_debugger +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_terminate_debugger + +/* + * OSL interfaces used by utilities + */ +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_redirect_output +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_name +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_index +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_address +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_open_directory +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_next_filename +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_close_directory + +#define ACPI_MSG_ERROR KERN_ERR "ACPI Error: " +#define ACPI_MSG_EXCEPTION KERN_ERR "ACPI Exception: " +#define ACPI_MSG_WARNING KERN_WARNING "ACPI Warning: " +#define ACPI_MSG_INFO KERN_INFO "ACPI: " + +#define ACPI_MSG_BIOS_ERROR KERN_ERR "ACPI BIOS Error (bug): " +#define ACPI_MSG_BIOS_WARNING KERN_WARNING "ACPI BIOS Warning (bug): " + +/* + * Linux wants to use designated initializers for function pointer structs. + */ +#define ACPI_STRUCT_INIT(field, value) .field = value + +#else /* !__KERNEL__ */ + +#define ACPI_USE_STANDARD_HEADERS + +#ifdef ACPI_USE_STANDARD_HEADERS +#include +#endif + +/* Define/disable kernel-specific declarators */ + +#ifndef __init +#define __init +#endif +#ifndef __iomem +#define __iomem +#endif + +/* Host-dependent types and defines for user-space ACPICA */ + +#define ACPI_FLUSH_CPU_CACHE() +#define ACPI_CAST_PTHREAD_T(pthread) ((acpi_thread_id) (pthread)) + +#if defined(__ia64__) || (defined(__x86_64__) && !defined(__ILP32__)) ||\ + defined(__aarch64__) || defined(__PPC64__) ||\ + defined(__s390x__) +#define ACPI_MACHINE_WIDTH 64 +#define COMPILER_DEPENDENT_INT64 long +#define COMPILER_DEPENDENT_UINT64 unsigned long +#else +#define ACPI_MACHINE_WIDTH 32 +#define COMPILER_DEPENDENT_INT64 long long +#define COMPILER_DEPENDENT_UINT64 unsigned long long +#define ACPI_USE_NATIVE_DIVIDE +#define ACPI_USE_NATIVE_MATH64 +#endif + +#ifndef __cdecl +#define __cdecl +#endif + +#endif /* __KERNEL__ */ + +#endif /* __ACLINUX_H__ */ diff --git a/include/acpi/platform/aclinuxex.h b/include/acpi/platform/aclinuxex.h new file mode 100644 index 000000000..d754a1b12 --- /dev/null +++ b/include/acpi/platform/aclinuxex.h @@ -0,0 +1,144 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: aclinuxex.h - Extra OS specific defines, etc. for Linux + * + * Copyright (C) 2000 - 2018, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACLINUXEX_H__ +#define __ACLINUXEX_H__ + +#ifdef __KERNEL__ + +#ifndef ACPI_USE_NATIVE_DIVIDE + +#ifndef ACPI_DIV_64_BY_32 +#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \ + do { \ + u64 (__n) = ((u64) n_hi) << 32 | (n_lo); \ + (r32) = do_div ((__n), (d32)); \ + (q32) = (u32) (__n); \ + } while (0) +#endif + +#ifndef ACPI_SHIFT_RIGHT_64 +#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \ + do { \ + (n_lo) >>= 1; \ + (n_lo) |= (((n_hi) & 1) << 31); \ + (n_hi) >>= 1; \ + } while (0) +#endif + +#endif + +/* + * Overrides for in-kernel ACPICA + */ +acpi_status ACPI_INIT_FUNCTION acpi_os_initialize(void); + +acpi_status acpi_os_terminate(void); + +/* + * The irqs_disabled() check is for resume from RAM. + * Interrupts are off during resume, just like they are for boot. + * However, boot has (system_state != SYSTEM_RUNNING) + * to quiet __might_sleep() in kmalloc() and resume does not. + */ +static inline void *acpi_os_allocate(acpi_size size) +{ + return kmalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL); +} + +static inline void *acpi_os_allocate_zeroed(acpi_size size) +{ + return kzalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL); +} + +static inline void acpi_os_free(void *memory) +{ + kfree(memory); +} + +static inline void *acpi_os_acquire_object(acpi_cache_t * cache) +{ + return kmem_cache_zalloc(cache, + irqs_disabled()? GFP_ATOMIC : GFP_KERNEL); +} + +static inline acpi_thread_id acpi_os_get_thread_id(void) +{ + return (acpi_thread_id) (unsigned long)current; +} + +/* + * When lockdep is enabled, the spin_lock_init() macro stringifies it's + * argument and uses that as a name for the lock in debugging. + * By executing spin_lock_init() in a macro the key changes from "lock" for + * all locks to the name of the argument of acpi_os_create_lock(), which + * prevents lockdep from reporting false positives for ACPICA locks. + */ +#define acpi_os_create_lock(__handle) \ + ({ \ + spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \ + if (lock) { \ + *(__handle) = lock; \ + spin_lock_init(*(__handle)); \ + } \ + lock ? AE_OK : AE_NO_MEMORY; \ + }) + + +#define acpi_os_create_raw_lock(__handle) \ + ({ \ + raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \ + if (lock) { \ + *(__handle) = lock; \ + raw_spin_lock_init(*(__handle)); \ + } \ + lock ? AE_OK : AE_NO_MEMORY; \ + }) + +static inline acpi_cpu_flags acpi_os_acquire_raw_lock(acpi_raw_spinlock lockp) +{ + acpi_cpu_flags flags; + + raw_spin_lock_irqsave(lockp, flags); + return flags; +} + +static inline void acpi_os_release_raw_lock(acpi_raw_spinlock lockp, + acpi_cpu_flags flags) +{ + raw_spin_unlock_irqrestore(lockp, flags); +} + +static inline void acpi_os_delete_raw_lock(acpi_raw_spinlock handle) +{ + ACPI_FREE(handle); +} + +static inline u8 acpi_os_readable(void *pointer, acpi_size length) +{ + return TRUE; +} + +static inline acpi_status acpi_os_initialize_debugger(void) +{ + return AE_OK; +} + +static inline void acpi_os_terminate_debugger(void) +{ + return; +} + +/* + * OSL interfaces added by Linux + */ + +#endif /* __KERNEL__ */ + +#endif /* __ACLINUXEX_H__ */ diff --git a/include/acpi/processor.h b/include/acpi/processor.h new file mode 100644 index 000000000..5b9eab15a --- /dev/null +++ b/include/acpi/processor.h @@ -0,0 +1,445 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ACPI_PROCESSOR_H +#define __ACPI_PROCESSOR_H + +#include +#include +#include +#include + +#define ACPI_PROCESSOR_CLASS "processor" +#define ACPI_PROCESSOR_DEVICE_NAME "Processor" +#define ACPI_PROCESSOR_DEVICE_HID "ACPI0007" +#define ACPI_PROCESSOR_CONTAINER_HID "ACPI0010" + +#define ACPI_PROCESSOR_BUSY_METRIC 10 + +#define ACPI_PROCESSOR_MAX_POWER 8 +#define ACPI_PROCESSOR_MAX_C2_LATENCY 100 +#define ACPI_PROCESSOR_MAX_C3_LATENCY 1000 + +#define ACPI_PROCESSOR_MAX_THROTTLING 16 +#define ACPI_PROCESSOR_MAX_THROTTLE 250 /* 25% */ +#define ACPI_PROCESSOR_MAX_DUTY_WIDTH 4 + +#define ACPI_PDC_REVISION_ID 0x1 + +#define ACPI_PSD_REV0_REVISION 0 /* Support for _PSD as in ACPI 3.0 */ +#define ACPI_PSD_REV0_ENTRIES 5 + +#define ACPI_TSD_REV0_REVISION 0 /* Support for _PSD as in ACPI 3.0 */ +#define ACPI_TSD_REV0_ENTRIES 5 +/* + * Types of coordination defined in ACPI 3.0. Same macros can be used across + * P, C and T states + */ +#define DOMAIN_COORD_TYPE_SW_ALL 0xfc +#define DOMAIN_COORD_TYPE_SW_ANY 0xfd +#define DOMAIN_COORD_TYPE_HW_ALL 0xfe + +#define ACPI_CSTATE_SYSTEMIO 0 +#define ACPI_CSTATE_FFH 1 +#define ACPI_CSTATE_HALT 2 +#define ACPI_CSTATE_INTEGER 3 + +#define ACPI_CX_DESC_LEN 32 + +/* Power Management */ + +struct acpi_processor_cx; + +struct acpi_power_register { + u8 descriptor; + u16 length; + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 access_size; + u64 address; +} __packed; + +struct acpi_processor_cx { + u8 valid; + u8 type; + u32 address; + u8 entry_method; + u8 index; + u32 latency; + u8 bm_sts_skip; + char desc[ACPI_CX_DESC_LEN]; +}; + +struct acpi_lpi_state { + u32 min_residency; + u32 wake_latency; /* worst case */ + u32 flags; + u32 arch_flags; + u32 res_cnt_freq; + u32 enable_parent_state; + u64 address; + u8 index; + u8 entry_method; + char desc[ACPI_CX_DESC_LEN]; +}; + +struct acpi_processor_power { + int count; + union { + struct acpi_processor_cx states[ACPI_PROCESSOR_MAX_POWER]; + struct acpi_lpi_state lpi_states[ACPI_PROCESSOR_MAX_POWER]; + }; + int timer_broadcast_on_state; +}; + +/* Performance Management */ + +struct acpi_psd_package { + u64 num_entries; + u64 revision; + u64 domain; + u64 coord_type; + u64 num_processors; +} __packed; + +struct acpi_pct_register { + u8 descriptor; + u16 length; + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 reserved; + u64 address; +} __packed; + +struct acpi_processor_px { + u64 core_frequency; /* megahertz */ + u64 power; /* milliWatts */ + u64 transition_latency; /* microseconds */ + u64 bus_master_latency; /* microseconds */ + u64 control; /* control value */ + u64 status; /* success indicator */ +}; + +struct acpi_processor_performance { + unsigned int state; + unsigned int platform_limit; + struct acpi_pct_register control_register; + struct acpi_pct_register status_register; + unsigned int state_count; + struct acpi_processor_px *states; + struct acpi_psd_package domain_info; + cpumask_var_t shared_cpu_map; + unsigned int shared_type; +}; + +/* Throttling Control */ + +struct acpi_tsd_package { + u64 num_entries; + u64 revision; + u64 domain; + u64 coord_type; + u64 num_processors; +} __packed; + +struct acpi_ptc_register { + u8 descriptor; + u16 length; + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 reserved; + u64 address; +} __packed; + +struct acpi_processor_tx_tss { + u64 freqpercentage; /* */ + u64 power; /* milliWatts */ + u64 transition_latency; /* microseconds */ + u64 control; /* control value */ + u64 status; /* success indicator */ +}; +struct acpi_processor_tx { + u16 power; + u16 performance; +}; + +struct acpi_processor; +struct acpi_processor_throttling { + unsigned int state; + unsigned int platform_limit; + struct acpi_pct_register control_register; + struct acpi_pct_register status_register; + unsigned int state_count; + struct acpi_processor_tx_tss *states_tss; + struct acpi_tsd_package domain_info; + cpumask_var_t shared_cpu_map; + int (*acpi_processor_get_throttling) (struct acpi_processor * pr); + int (*acpi_processor_set_throttling) (struct acpi_processor * pr, + int state, bool force); + + u32 address; + u8 duty_offset; + u8 duty_width; + u8 tsd_valid_flag; + unsigned int shared_type; + struct acpi_processor_tx states[ACPI_PROCESSOR_MAX_THROTTLING]; +}; + +/* Limit Interface */ + +struct acpi_processor_lx { + int px; /* performance state */ + int tx; /* throttle level */ +}; + +struct acpi_processor_limit { + struct acpi_processor_lx state; /* current limit */ + struct acpi_processor_lx thermal; /* thermal limit */ + struct acpi_processor_lx user; /* user limit */ +}; + +struct acpi_processor_flags { + u8 power:1; + u8 performance:1; + u8 throttling:1; + u8 limit:1; + u8 bm_control:1; + u8 bm_check:1; + u8 has_cst:1; + u8 has_lpi:1; + u8 power_setup_done:1; + u8 bm_rld_set:1; + u8 need_hotplug_init:1; +}; + +struct acpi_processor { + acpi_handle handle; + u32 acpi_id; + phys_cpuid_t phys_id; /* CPU hardware ID such as APIC ID for x86 */ + u32 id; /* CPU logical ID allocated by OS */ + u32 pblk; + int performance_platform_limit; + int throttling_platform_limit; + /* 0 - states 0..n-th state available */ + + struct acpi_processor_flags flags; + struct acpi_processor_power power; + struct acpi_processor_performance *performance; + struct acpi_processor_throttling throttling; + struct acpi_processor_limit limit; + struct thermal_cooling_device *cdev; + struct device *dev; /* Processor device. */ +}; + +struct acpi_processor_errata { + u8 smp; + struct { + u8 throttle:1; + u8 fdma:1; + u8 reserved:6; + u32 bmisx; + } piix4; +}; + +extern int acpi_processor_preregister_performance(struct + acpi_processor_performance + __percpu *performance); + +extern int acpi_processor_register_performance(struct acpi_processor_performance + *performance, unsigned int cpu); +extern void acpi_processor_unregister_performance(unsigned int cpu); + +int acpi_processor_pstate_control(void); +/* note: this locks both the calling module and the processor module + if a _PPC object exists, rmmod is disallowed then */ +int acpi_processor_notify_smm(struct module *calling_module); +int acpi_processor_get_psd(acpi_handle handle, + struct acpi_psd_package *pdomain); + +/* parsing the _P* objects. */ +extern int acpi_processor_get_performance_info(struct acpi_processor *pr); + +/* for communication between multiple parts of the processor kernel module */ +DECLARE_PER_CPU(struct acpi_processor *, processors); +extern struct acpi_processor_errata errata; + +#if defined(ARCH_HAS_POWER_INIT) && defined(CONFIG_ACPI_PROCESSOR_CSTATE) +void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, + unsigned int cpu); +int acpi_processor_ffh_cstate_probe(unsigned int cpu, + struct acpi_processor_cx *cx, + struct acpi_power_register *reg); +void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cstate); +#else +static inline void acpi_processor_power_init_bm_check(struct + acpi_processor_flags + *flags, unsigned int cpu) +{ + flags->bm_check = 1; + return; +} +static inline int acpi_processor_ffh_cstate_probe(unsigned int cpu, + struct acpi_processor_cx *cx, + struct acpi_power_register + *reg) +{ + return -1; +} +static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx + *cstate) +{ + return; +} +#endif + +static inline int call_on_cpu(int cpu, long (*fn)(void *), void *arg, + bool direct) +{ + if (direct || (is_percpu_thread() && cpu == smp_processor_id())) + return fn(arg); + return work_on_cpu(cpu, fn, arg); +} + +/* in processor_perflib.c */ + +#ifdef CONFIG_CPU_FREQ +void acpi_processor_ppc_init(void); +void acpi_processor_ppc_exit(void); +void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag); +extern int acpi_processor_get_bios_limit(int cpu, unsigned int *limit); +#else +static inline void acpi_processor_ppc_init(void) +{ + return; +} +static inline void acpi_processor_ppc_exit(void) +{ + return; +} +static inline void acpi_processor_ppc_has_changed(struct acpi_processor *pr, + int event_flag) +{ + static unsigned int printout = 1; + if (printout) { + printk(KERN_WARNING + "Warning: Processor Platform Limit event detected, but not handled.\n"); + printk(KERN_WARNING + "Consider compiling CPUfreq support into your kernel.\n"); + printout = 0; + } +} +static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) +{ + return -ENODEV; +} + +#endif /* CONFIG_CPU_FREQ */ + +/* in processor_core.c */ +phys_cpuid_t acpi_get_phys_id(acpi_handle, int type, u32 acpi_id); +phys_cpuid_t acpi_map_madt_entry(u32 acpi_id); +int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id); +int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id); + +#ifdef CONFIG_ACPI_CPPC_LIB +extern int acpi_cppc_processor_probe(struct acpi_processor *pr); +extern void acpi_cppc_processor_exit(struct acpi_processor *pr); +#else +static inline int acpi_cppc_processor_probe(struct acpi_processor *pr) +{ + return 0; +} +static inline void acpi_cppc_processor_exit(struct acpi_processor *pr) +{ + return; +} +#endif /* CONFIG_ACPI_CPPC_LIB */ + +/* in processor_pdc.c */ +void acpi_processor_set_pdc(acpi_handle handle); + +/* in processor_throttling.c */ +#ifdef CONFIG_ACPI_CPU_FREQ_PSS +int acpi_processor_tstate_has_changed(struct acpi_processor *pr); +int acpi_processor_get_throttling_info(struct acpi_processor *pr); +extern int acpi_processor_set_throttling(struct acpi_processor *pr, + int state, bool force); +/* + * Reevaluate whether the T-state is invalid after one cpu is + * onlined/offlined. In such case the flags.throttling will be updated. + */ +extern void acpi_processor_reevaluate_tstate(struct acpi_processor *pr, + bool is_dead); +extern const struct file_operations acpi_processor_throttling_fops; +extern void acpi_processor_throttling_init(void); +#else +static inline int acpi_processor_tstate_has_changed(struct acpi_processor *pr) +{ + return 0; +} + +static inline int acpi_processor_get_throttling_info(struct acpi_processor *pr) +{ + return -ENODEV; +} + +static inline int acpi_processor_set_throttling(struct acpi_processor *pr, + int state, bool force) +{ + return -ENODEV; +} + +static inline void acpi_processor_reevaluate_tstate(struct acpi_processor *pr, + bool is_dead) {} + +static inline void acpi_processor_throttling_init(void) {} +#endif /* CONFIG_ACPI_CPU_FREQ_PSS */ + +/* in processor_idle.c */ +extern struct cpuidle_driver acpi_idle_driver; +#ifdef CONFIG_ACPI_PROCESSOR_IDLE +int acpi_processor_power_init(struct acpi_processor *pr); +int acpi_processor_power_exit(struct acpi_processor *pr); +int acpi_processor_power_state_has_changed(struct acpi_processor *pr); +int acpi_processor_hotplug(struct acpi_processor *pr); +#else +static inline int acpi_processor_power_init(struct acpi_processor *pr) +{ + return -ENODEV; +} + +static inline int acpi_processor_power_exit(struct acpi_processor *pr) +{ + return -ENODEV; +} + +static inline int acpi_processor_power_state_has_changed(struct acpi_processor *pr) +{ + return -ENODEV; +} + +static inline int acpi_processor_hotplug(struct acpi_processor *pr) +{ + return -ENODEV; +} +#endif /* CONFIG_ACPI_PROCESSOR_IDLE */ + +/* in processor_thermal.c */ +int acpi_processor_get_limit_info(struct acpi_processor *pr); +extern const struct thermal_cooling_device_ops processor_cooling_ops; +#if defined(CONFIG_ACPI_CPU_FREQ_PSS) & defined(CONFIG_CPU_FREQ) +void acpi_thermal_cpufreq_init(void); +void acpi_thermal_cpufreq_exit(void); +#else +static inline void acpi_thermal_cpufreq_init(void) +{ + return; +} +static inline void acpi_thermal_cpufreq_exit(void) +{ + return; +} +#endif /* CONFIG_ACPI_CPU_FREQ_PSS */ + +#endif diff --git a/include/acpi/reboot.h b/include/acpi/reboot.h new file mode 100644 index 000000000..14122fc55 --- /dev/null +++ b/include/acpi/reboot.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ACPI_REBOOT_H +#define __ACPI_REBOOT_H + +#ifdef CONFIG_ACPI +extern void acpi_reboot(void); +#else +static inline void acpi_reboot(void) { } +#endif + +#endif + diff --git a/include/acpi/video.h b/include/acpi/video.h new file mode 100644 index 000000000..db8548ff0 --- /dev/null +++ b/include/acpi/video.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ACPI_VIDEO_H +#define __ACPI_VIDEO_H + +#include /* for ENODEV */ +#include /* for bool */ + +struct acpi_video_brightness_flags { + u8 _BCL_no_ac_battery_levels:1; /* no AC/Battery levels in _BCL */ + u8 _BCL_reversed:1; /* _BCL package is in a reversed order */ + u8 _BQC_use_index:1; /* _BQC returns an index value */ +}; + +struct acpi_video_device_brightness { + int curr; + int count; + int *levels; + struct acpi_video_brightness_flags flags; +}; + +struct acpi_device; + +#define ACPI_VIDEO_CLASS "video" + +#define ACPI_VIDEO_DISPLAY_CRT 1 +#define ACPI_VIDEO_DISPLAY_TV 2 +#define ACPI_VIDEO_DISPLAY_DVI 3 +#define ACPI_VIDEO_DISPLAY_LCD 4 + +#define ACPI_VIDEO_DISPLAY_LEGACY_MONITOR 0x0100 +#define ACPI_VIDEO_DISPLAY_LEGACY_PANEL 0x0110 +#define ACPI_VIDEO_DISPLAY_LEGACY_TV 0x0200 + +#define ACPI_VIDEO_NOTIFY_SWITCH 0x80 +#define ACPI_VIDEO_NOTIFY_PROBE 0x81 +#define ACPI_VIDEO_NOTIFY_CYCLE 0x82 +#define ACPI_VIDEO_NOTIFY_NEXT_OUTPUT 0x83 +#define ACPI_VIDEO_NOTIFY_PREV_OUTPUT 0x84 +#define ACPI_VIDEO_NOTIFY_CYCLE_BRIGHTNESS 0x85 +#define ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS 0x86 +#define ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS 0x87 +#define ACPI_VIDEO_NOTIFY_ZERO_BRIGHTNESS 0x88 +#define ACPI_VIDEO_NOTIFY_DISPLAY_OFF 0x89 + +enum acpi_backlight_type { + acpi_backlight_undef = -1, + acpi_backlight_none = 0, + acpi_backlight_video, + acpi_backlight_vendor, + acpi_backlight_native, +}; + +#if IS_ENABLED(CONFIG_ACPI_VIDEO) +extern int acpi_video_register(void); +extern void acpi_video_unregister(void); +extern int acpi_video_get_edid(struct acpi_device *device, int type, + int device_id, void **edid); +extern enum acpi_backlight_type acpi_video_get_backlight_type(void); +extern void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type); +/* + * Note: The value returned by acpi_video_handles_brightness_key_presses() + * may change over time and should not be cached. + */ +extern bool acpi_video_handles_brightness_key_presses(void); +extern int acpi_video_get_levels(struct acpi_device *device, + struct acpi_video_device_brightness **dev_br, + int *pmax_level); +#else +static inline int acpi_video_register(void) { return -ENODEV; } +static inline void acpi_video_unregister(void) { return; } +static inline int acpi_video_get_edid(struct acpi_device *device, int type, + int device_id, void **edid) +{ + return -ENODEV; +} +static inline enum acpi_backlight_type acpi_video_get_backlight_type(void) +{ + return acpi_backlight_vendor; +} +static inline void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type) +{ +} +static inline bool acpi_video_handles_brightness_key_presses(void) +{ + return false; +} +static inline int acpi_video_get_levels(struct acpi_device *device, + struct acpi_video_device_brightness **dev_br, + int *pmax_level) +{ + return -ENODEV; +} +#endif + +#endif diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h new file mode 100644 index 000000000..e3667c9a3 --- /dev/null +++ b/include/asm-generic/4level-fixup.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _4LEVEL_FIXUP_H +#define _4LEVEL_FIXUP_H + +#define __ARCH_HAS_4LEVEL_HACK +#define __PAGETABLE_PUD_FOLDED 1 + +#define PUD_SHIFT PGDIR_SHIFT +#define PUD_SIZE PGDIR_SIZE +#define PUD_MASK PGDIR_MASK +#define PTRS_PER_PUD 1 + +#define pud_t pgd_t + +#define pmd_alloc(mm, pud, address) \ + ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \ + NULL: pmd_offset(pud, address)) + +#define pud_offset(pgd, start) (pgd) +#define pud_none(pud) 0 +#define pud_bad(pud) 0 +#define pud_present(pud) 1 +#define pud_ERROR(pud) do { } while (0) +#define pud_clear(pud) pgd_clear(pud) +#define pud_val(pud) pgd_val(pud) +#define pud_populate(mm, pud, pmd) pgd_populate(mm, pud, pmd) +#define pud_page(pud) pgd_page(pud) +#define pud_page_vaddr(pud) pgd_page_vaddr(pud) + +#undef pud_free_tlb +#define pud_free_tlb(tlb, x, addr) do { } while (0) +#define pud_free(mm, x) do { } while (0) +#define __pud_free_tlb(tlb, x, addr) do { } while (0) + +#undef pud_addr_end +#define pud_addr_end(addr, end) (end) + +#include + +#endif diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h new file mode 100644 index 000000000..73474bb52 --- /dev/null +++ b/include/asm-generic/5level-fixup.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _5LEVEL_FIXUP_H +#define _5LEVEL_FIXUP_H + +#define __ARCH_HAS_5LEVEL_HACK +#define __PAGETABLE_P4D_FOLDED 1 + +#define P4D_SHIFT PGDIR_SHIFT +#define P4D_SIZE PGDIR_SIZE +#define P4D_MASK PGDIR_MASK +#define MAX_PTRS_PER_P4D 1 +#define PTRS_PER_P4D 1 + +#define p4d_t pgd_t + +#define pud_alloc(mm, p4d, address) \ + ((unlikely(pgd_none(*(p4d))) && __pud_alloc(mm, p4d, address)) ? \ + NULL : pud_offset(p4d, address)) + +#define p4d_alloc(mm, pgd, address) (pgd) +#define p4d_offset(pgd, start) (pgd) +#define p4d_none(p4d) 0 +#define p4d_bad(p4d) 0 +#define p4d_present(p4d) 1 +#define p4d_ERROR(p4d) do { } while (0) +#define p4d_clear(p4d) pgd_clear(p4d) +#define p4d_val(p4d) pgd_val(p4d) +#define p4d_populate(mm, p4d, pud) pgd_populate(mm, p4d, pud) +#define p4d_page(p4d) pgd_page(p4d) +#define p4d_page_vaddr(p4d) pgd_page_vaddr(p4d) + +#define __p4d(x) __pgd(x) +#define set_p4d(p4dp, p4d) set_pgd(p4dp, p4d) + +#undef p4d_free_tlb +#define p4d_free_tlb(tlb, x, addr) do { } while (0) +#define p4d_free(mm, x) do { } while (0) +#define __p4d_free_tlb(tlb, x, addr) do { } while (0) + +#undef p4d_addr_end +#define p4d_addr_end(addr, end) (end) + +#endif diff --git a/include/asm-generic/asm-offsets.h b/include/asm-generic/asm-offsets.h new file mode 100644 index 000000000..d370ee36a --- /dev/null +++ b/include/asm-generic/asm-offsets.h @@ -0,0 +1 @@ +#include diff --git a/include/asm-generic/asm-prototypes.h b/include/asm-generic/asm-prototypes.h new file mode 100644 index 000000000..2fa2bc208 --- /dev/null +++ b/include/asm-generic/asm-prototypes.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#undef __memset +extern void *__memset(void *, int, __kernel_size_t); +#undef __memcpy +extern void *__memcpy(void *, const void *, __kernel_size_t); +#undef __memmove +extern void *__memmove(void *, const void *, __kernel_size_t); +#undef memset +extern void *memset(void *, int, __kernel_size_t); +#undef memcpy +extern void *memcpy(void *, const void *, __kernel_size_t); +#undef memmove +extern void *memmove(void *, const void *, __kernel_size_t); diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h new file mode 100644 index 000000000..0d4b1d3db --- /dev/null +++ b/include/asm-generic/atomic-instrumented.h @@ -0,0 +1,467 @@ +/* + * This file provides wrappers with KASAN instrumentation for atomic operations. + * To use this functionality an arch's atomic.h file needs to define all + * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include + * this file at the end. This file provides atomic_read() that forwards to + * arch_atomic_read() for actual atomic operation. + * Note: if an arch atomic operation is implemented by means of other atomic + * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use + * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid + * double instrumentation. + */ + +#ifndef _LINUX_ATOMIC_INSTRUMENTED_H +#define _LINUX_ATOMIC_INSTRUMENTED_H + +#include +#include + +static __always_inline int atomic_read(const atomic_t *v) +{ + kasan_check_read(v, sizeof(*v)); + return arch_atomic_read(v); +} + +static __always_inline s64 atomic64_read(const atomic64_t *v) +{ + kasan_check_read(v, sizeof(*v)); + return arch_atomic64_read(v); +} + +static __always_inline void atomic_set(atomic_t *v, int i) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic_set(v, i); +} + +static __always_inline void atomic64_set(atomic64_t *v, s64 i) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic64_set(v, i); +} + +static __always_inline int atomic_xchg(atomic_t *v, int i) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_xchg(v, i); +} + +static __always_inline s64 atomic64_xchg(atomic64_t *v, s64 i) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_xchg(v, i); +} + +static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_cmpxchg(v, old, new); +} + +static __always_inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_cmpxchg(v, old, new); +} + +#ifdef arch_atomic_try_cmpxchg +#define atomic_try_cmpxchg atomic_try_cmpxchg +static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) +{ + kasan_check_write(v, sizeof(*v)); + kasan_check_read(old, sizeof(*old)); + return arch_atomic_try_cmpxchg(v, old, new); +} +#endif + +#ifdef arch_atomic64_try_cmpxchg +#define atomic64_try_cmpxchg atomic64_try_cmpxchg +static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) +{ + kasan_check_write(v, sizeof(*v)); + kasan_check_read(old, sizeof(*old)); + return arch_atomic64_try_cmpxchg(v, old, new); +} +#endif + +#ifdef arch_atomic_fetch_add_unless +#define atomic_fetch_add_unless atomic_fetch_add_unless +static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_add_unless(v, a, u); +} +#endif + +#ifdef arch_atomic64_fetch_add_unless +#define atomic64_fetch_add_unless atomic64_fetch_add_unless +static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_add_unless(v, a, u); +} +#endif + +#ifdef arch_atomic_inc +#define atomic_inc atomic_inc +static __always_inline void atomic_inc(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic_inc(v); +} +#endif + +#ifdef arch_atomic64_inc +#define atomic64_inc atomic64_inc +static __always_inline void atomic64_inc(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic64_inc(v); +} +#endif + +#ifdef arch_atomic_dec +#define atomic_dec atomic_dec +static __always_inline void atomic_dec(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic_dec(v); +} +#endif + +#ifdef atch_atomic64_dec +#define atomic64_dec +static __always_inline void atomic64_dec(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic64_dec(v); +} +#endif + +static __always_inline void atomic_add(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic_add(i, v); +} + +static __always_inline void atomic64_add(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic64_add(i, v); +} + +static __always_inline void atomic_sub(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic_sub(i, v); +} + +static __always_inline void atomic64_sub(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic64_sub(i, v); +} + +static __always_inline void atomic_and(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic_and(i, v); +} + +static __always_inline void atomic64_and(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic64_and(i, v); +} + +static __always_inline void atomic_or(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic_or(i, v); +} + +static __always_inline void atomic64_or(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic64_or(i, v); +} + +static __always_inline void atomic_xor(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic_xor(i, v); +} + +static __always_inline void atomic64_xor(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic64_xor(i, v); +} + +#ifdef arch_atomic_inc_return +#define atomic_inc_return atomic_inc_return +static __always_inline int atomic_inc_return(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_inc_return(v); +} +#endif + +#ifdef arch_atomic64_in_return +#define atomic64_inc_return atomic64_inc_return +static __always_inline s64 atomic64_inc_return(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_inc_return(v); +} +#endif + +#ifdef arch_atomic_dec_return +#define atomic_dec_return atomic_dec_return +static __always_inline int atomic_dec_return(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_dec_return(v); +} +#endif + +#ifdef arch_atomic64_dec_return +#define atomic64_dec_return atomic64_dec_return +static __always_inline s64 atomic64_dec_return(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_dec_return(v); +} +#endif + +#ifdef arch_atomic64_inc_not_zero +#define atomic64_inc_not_zero atomic64_inc_not_zero +static __always_inline bool atomic64_inc_not_zero(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_inc_not_zero(v); +} +#endif + +#ifdef arch_atomic64_dec_if_positive +#define atomic64_dec_if_positive atomic64_dec_if_positive +static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_dec_if_positive(v); +} +#endif + +#ifdef arch_atomic_dec_and_test +#define atomic_dec_and_test atomic_dec_and_test +static __always_inline bool atomic_dec_and_test(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_dec_and_test(v); +} +#endif + +#ifdef arch_atomic64_dec_and_test +#define atomic64_dec_and_test atomic64_dec_and_test +static __always_inline bool atomic64_dec_and_test(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_dec_and_test(v); +} +#endif + +#ifdef arch_atomic_inc_and_test +#define atomic_inc_and_test atomic_inc_and_test +static __always_inline bool atomic_inc_and_test(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_inc_and_test(v); +} +#endif + +#ifdef arch_atomic64_inc_and_test +#define atomic64_inc_and_test atomic64_inc_and_test +static __always_inline bool atomic64_inc_and_test(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_inc_and_test(v); +} +#endif + +static __always_inline int atomic_add_return(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_add_return(i, v); +} + +static __always_inline s64 atomic64_add_return(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_add_return(i, v); +} + +static __always_inline int atomic_sub_return(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_sub_return(i, v); +} + +static __always_inline s64 atomic64_sub_return(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_sub_return(i, v); +} + +static __always_inline int atomic_fetch_add(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_add(i, v); +} + +static __always_inline s64 atomic64_fetch_add(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_add(i, v); +} + +static __always_inline int atomic_fetch_sub(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_sub(i, v); +} + +static __always_inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_sub(i, v); +} + +static __always_inline int atomic_fetch_and(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_and(i, v); +} + +static __always_inline s64 atomic64_fetch_and(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_and(i, v); +} + +static __always_inline int atomic_fetch_or(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_or(i, v); +} + +static __always_inline s64 atomic64_fetch_or(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_or(i, v); +} + +static __always_inline int atomic_fetch_xor(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_xor(i, v); +} + +static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_xor(i, v); +} + +#ifdef arch_atomic_sub_and_test +#define atomic_sub_and_test atomic_sub_and_test +static __always_inline bool atomic_sub_and_test(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_sub_and_test(i, v); +} +#endif + +#ifdef arch_atomic64_sub_and_test +#define atomic64_sub_and_test atomic64_sub_and_test +static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_sub_and_test(i, v); +} +#endif + +#ifdef arch_atomic_add_negative +#define atomic_add_negative atomic_add_negative +static __always_inline bool atomic_add_negative(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_add_negative(i, v); +} +#endif + +#ifdef arch_atomic64_add_negative +#define atomic64_add_negative atomic64_add_negative +static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_add_negative(i, v); +} +#endif + +#define xchg(ptr, new) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_xchg(__ai_ptr, (new)); \ +}) + +#define cmpxchg(ptr, old, new) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg(__ai_ptr, (old), (new)); \ +}) + +#define sync_cmpxchg(ptr, old, new) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_sync_cmpxchg(__ai_ptr, (old), (new)); \ +}) + +#define cmpxchg_local(ptr, old, new) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg_local(__ai_ptr, (old), (new)); \ +}) + +#define cmpxchg64(ptr, old, new) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg64(__ai_ptr, (old), (new)); \ +}) + +#define cmpxchg64_local(ptr, old, new) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg64_local(__ai_ptr, (old), (new)); \ +}) + +#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \ +({ \ + typeof(p1) __ai_p1 = (p1); \ + kasan_check_write(__ai_p1, 2 * sizeof(*__ai_p1)); \ + arch_cmpxchg_double(__ai_p1, (p2), (o1), (o2), (n1), (n2)); \ +}) + +#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \ +({ \ + typeof(p1) __ai_p1 = (p1); \ + kasan_check_write(__ai_p1, 2 * sizeof(*__ai_p1)); \ + arch_cmpxchg_double_local(__ai_p1, (p2), (o1), (o2), (n1), (n2)); \ +}) + +#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */ diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h new file mode 100644 index 000000000..87d14476e --- /dev/null +++ b/include/asm-generic/atomic-long.h @@ -0,0 +1,269 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_ATOMIC_LONG_H +#define _ASM_GENERIC_ATOMIC_LONG_H +/* + * Copyright (C) 2005 Silicon Graphics, Inc. + * Christoph Lameter + * + * Allows to provide arch independent atomic definitions without the need to + * edit all arch specific atomic.h files. + */ + +#include + +/* + * Suppport for atomic_long_t + * + * Casts for parameters are avoided for existing atomic functions in order to + * avoid issues with cast-as-lval under gcc 4.x and other limitations that the + * macros of a platform may have. + */ + +#if BITS_PER_LONG == 64 + +typedef atomic64_t atomic_long_t; + +#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) +#define ATOMIC_LONG_PFX(x) atomic64 ## x +#define ATOMIC_LONG_TYPE s64 + +#else + +typedef atomic_t atomic_long_t; + +#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) +#define ATOMIC_LONG_PFX(x) atomic ## x +#define ATOMIC_LONG_TYPE int + +#endif + +#define ATOMIC_LONG_READ_OP(mo) \ +static inline long atomic_long_read##mo(const atomic_long_t *l) \ +{ \ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ + \ + return (long)ATOMIC_LONG_PFX(_read##mo)(v); \ +} +ATOMIC_LONG_READ_OP() +ATOMIC_LONG_READ_OP(_acquire) + +#undef ATOMIC_LONG_READ_OP + +#define ATOMIC_LONG_SET_OP(mo) \ +static inline void atomic_long_set##mo(atomic_long_t *l, long i) \ +{ \ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ + \ + ATOMIC_LONG_PFX(_set##mo)(v, i); \ +} +ATOMIC_LONG_SET_OP() +ATOMIC_LONG_SET_OP(_release) + +#undef ATOMIC_LONG_SET_OP + +#define ATOMIC_LONG_ADD_SUB_OP(op, mo) \ +static inline long \ +atomic_long_##op##_return##mo(long i, atomic_long_t *l) \ +{ \ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ + \ + return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(i, v); \ +} +ATOMIC_LONG_ADD_SUB_OP(add,) +ATOMIC_LONG_ADD_SUB_OP(add, _relaxed) +ATOMIC_LONG_ADD_SUB_OP(add, _acquire) +ATOMIC_LONG_ADD_SUB_OP(add, _release) +ATOMIC_LONG_ADD_SUB_OP(sub,) +ATOMIC_LONG_ADD_SUB_OP(sub, _relaxed) +ATOMIC_LONG_ADD_SUB_OP(sub, _acquire) +ATOMIC_LONG_ADD_SUB_OP(sub, _release) + +#undef ATOMIC_LONG_ADD_SUB_OP + +#define atomic_long_cmpxchg_relaxed(l, old, new) \ + (ATOMIC_LONG_PFX(_cmpxchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(l), \ + (old), (new))) +#define atomic_long_cmpxchg_acquire(l, old, new) \ + (ATOMIC_LONG_PFX(_cmpxchg_acquire)((ATOMIC_LONG_PFX(_t) *)(l), \ + (old), (new))) +#define atomic_long_cmpxchg_release(l, old, new) \ + (ATOMIC_LONG_PFX(_cmpxchg_release)((ATOMIC_LONG_PFX(_t) *)(l), \ + (old), (new))) +#define atomic_long_cmpxchg(l, old, new) \ + (ATOMIC_LONG_PFX(_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), (old), (new))) + + +#define atomic_long_try_cmpxchg_relaxed(l, old, new) \ + (ATOMIC_LONG_PFX(_try_cmpxchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(l), \ + (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new))) +#define atomic_long_try_cmpxchg_acquire(l, old, new) \ + (ATOMIC_LONG_PFX(_try_cmpxchg_acquire)((ATOMIC_LONG_PFX(_t) *)(l), \ + (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new))) +#define atomic_long_try_cmpxchg_release(l, old, new) \ + (ATOMIC_LONG_PFX(_try_cmpxchg_release)((ATOMIC_LONG_PFX(_t) *)(l), \ + (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new))) +#define atomic_long_try_cmpxchg(l, old, new) \ + (ATOMIC_LONG_PFX(_try_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), \ + (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new))) + + +#define atomic_long_xchg_relaxed(v, new) \ + (ATOMIC_LONG_PFX(_xchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (new))) +#define atomic_long_xchg_acquire(v, new) \ + (ATOMIC_LONG_PFX(_xchg_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (new))) +#define atomic_long_xchg_release(v, new) \ + (ATOMIC_LONG_PFX(_xchg_release)((ATOMIC_LONG_PFX(_t) *)(v), (new))) +#define atomic_long_xchg(v, new) \ + (ATOMIC_LONG_PFX(_xchg)((ATOMIC_LONG_PFX(_t) *)(v), (new))) + +static __always_inline void atomic_long_inc(atomic_long_t *l) +{ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; + + ATOMIC_LONG_PFX(_inc)(v); +} + +static __always_inline void atomic_long_dec(atomic_long_t *l) +{ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; + + ATOMIC_LONG_PFX(_dec)(v); +} + +#define ATOMIC_LONG_FETCH_OP(op, mo) \ +static inline long \ +atomic_long_fetch_##op##mo(long i, atomic_long_t *l) \ +{ \ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ + \ + return (long)ATOMIC_LONG_PFX(_fetch_##op##mo)(i, v); \ +} + +ATOMIC_LONG_FETCH_OP(add, ) +ATOMIC_LONG_FETCH_OP(add, _relaxed) +ATOMIC_LONG_FETCH_OP(add, _acquire) +ATOMIC_LONG_FETCH_OP(add, _release) +ATOMIC_LONG_FETCH_OP(sub, ) +ATOMIC_LONG_FETCH_OP(sub, _relaxed) +ATOMIC_LONG_FETCH_OP(sub, _acquire) +ATOMIC_LONG_FETCH_OP(sub, _release) +ATOMIC_LONG_FETCH_OP(and, ) +ATOMIC_LONG_FETCH_OP(and, _relaxed) +ATOMIC_LONG_FETCH_OP(and, _acquire) +ATOMIC_LONG_FETCH_OP(and, _release) +ATOMIC_LONG_FETCH_OP(andnot, ) +ATOMIC_LONG_FETCH_OP(andnot, _relaxed) +ATOMIC_LONG_FETCH_OP(andnot, _acquire) +ATOMIC_LONG_FETCH_OP(andnot, _release) +ATOMIC_LONG_FETCH_OP(or, ) +ATOMIC_LONG_FETCH_OP(or, _relaxed) +ATOMIC_LONG_FETCH_OP(or, _acquire) +ATOMIC_LONG_FETCH_OP(or, _release) +ATOMIC_LONG_FETCH_OP(xor, ) +ATOMIC_LONG_FETCH_OP(xor, _relaxed) +ATOMIC_LONG_FETCH_OP(xor, _acquire) +ATOMIC_LONG_FETCH_OP(xor, _release) + +#undef ATOMIC_LONG_FETCH_OP + +#define ATOMIC_LONG_FETCH_INC_DEC_OP(op, mo) \ +static inline long \ +atomic_long_fetch_##op##mo(atomic_long_t *l) \ +{ \ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ + \ + return (long)ATOMIC_LONG_PFX(_fetch_##op##mo)(v); \ +} + +ATOMIC_LONG_FETCH_INC_DEC_OP(inc,) +ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _relaxed) +ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _acquire) +ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _release) +ATOMIC_LONG_FETCH_INC_DEC_OP(dec,) +ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _relaxed) +ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _acquire) +ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _release) + +#undef ATOMIC_LONG_FETCH_INC_DEC_OP + +#define ATOMIC_LONG_OP(op) \ +static __always_inline void \ +atomic_long_##op(long i, atomic_long_t *l) \ +{ \ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ + \ + ATOMIC_LONG_PFX(_##op)(i, v); \ +} + +ATOMIC_LONG_OP(add) +ATOMIC_LONG_OP(sub) +ATOMIC_LONG_OP(and) +ATOMIC_LONG_OP(andnot) +ATOMIC_LONG_OP(or) +ATOMIC_LONG_OP(xor) + +#undef ATOMIC_LONG_OP + +static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) +{ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; + + return ATOMIC_LONG_PFX(_sub_and_test)(i, v); +} + +static inline int atomic_long_dec_and_test(atomic_long_t *l) +{ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; + + return ATOMIC_LONG_PFX(_dec_and_test)(v); +} + +static inline int atomic_long_inc_and_test(atomic_long_t *l) +{ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; + + return ATOMIC_LONG_PFX(_inc_and_test)(v); +} + +static inline int atomic_long_add_negative(long i, atomic_long_t *l) +{ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; + + return ATOMIC_LONG_PFX(_add_negative)(i, v); +} + +#define ATOMIC_LONG_INC_DEC_OP(op, mo) \ +static inline long \ +atomic_long_##op##_return##mo(atomic_long_t *l) \ +{ \ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ + \ + return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(v); \ +} +ATOMIC_LONG_INC_DEC_OP(inc,) +ATOMIC_LONG_INC_DEC_OP(inc, _relaxed) +ATOMIC_LONG_INC_DEC_OP(inc, _acquire) +ATOMIC_LONG_INC_DEC_OP(inc, _release) +ATOMIC_LONG_INC_DEC_OP(dec,) +ATOMIC_LONG_INC_DEC_OP(dec, _relaxed) +ATOMIC_LONG_INC_DEC_OP(dec, _acquire) +ATOMIC_LONG_INC_DEC_OP(dec, _release) + +#undef ATOMIC_LONG_INC_DEC_OP + +static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) +{ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; + + return (long)ATOMIC_LONG_PFX(_add_unless)(v, a, u); +} + +#define atomic_long_inc_not_zero(l) \ + ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l)) + +#define atomic_long_cond_read_relaxed(v, c) \ + ATOMIC_LONG_PFX(_cond_read_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (c)) +#define atomic_long_cond_read_acquire(v, c) \ + ATOMIC_LONG_PFX(_cond_read_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (c)) + +#endif /* _ASM_GENERIC_ATOMIC_LONG_H */ diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h new file mode 100644 index 000000000..13324aa82 --- /dev/null +++ b/include/asm-generic/atomic.h @@ -0,0 +1,202 @@ +/* + * Generic C implementation of atomic counter operations. Usable on + * UP systems only. Do not include in machine independent code. + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#ifndef __ASM_GENERIC_ATOMIC_H +#define __ASM_GENERIC_ATOMIC_H + +#include +#include + +/* + * atomic_$op() - $op integer to atomic variable + * @i: integer value to $op + * @v: pointer to the atomic variable + * + * Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use + * smp_mb__{before,after}_atomic(). + */ + +/* + * atomic_$op_return() - $op interer to atomic variable and returns the result + * @i: integer value to $op + * @v: pointer to the atomic variable + * + * Atomically $ops @i to @v. Does imply a full memory barrier. + */ + +#ifdef CONFIG_SMP + +/* we can build all atomic primitives from cmpxchg */ + +#define ATOMIC_OP(op, c_op) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + int c, old; \ + \ + c = v->counter; \ + while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ + c = old; \ +} + +#define ATOMIC_OP_RETURN(op, c_op) \ +static inline int atomic_##op##_return(int i, atomic_t *v) \ +{ \ + int c, old; \ + \ + c = v->counter; \ + while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ + c = old; \ + \ + return c c_op i; \ +} + +#define ATOMIC_FETCH_OP(op, c_op) \ +static inline int atomic_fetch_##op(int i, atomic_t *v) \ +{ \ + int c, old; \ + \ + c = v->counter; \ + while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ + c = old; \ + \ + return c; \ +} + +#else + +#include + +#define ATOMIC_OP(op, c_op) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + \ + raw_local_irq_save(flags); \ + v->counter = v->counter c_op i; \ + raw_local_irq_restore(flags); \ +} + +#define ATOMIC_OP_RETURN(op, c_op) \ +static inline int atomic_##op##_return(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + int ret; \ + \ + raw_local_irq_save(flags); \ + ret = (v->counter = v->counter c_op i); \ + raw_local_irq_restore(flags); \ + \ + return ret; \ +} + +#define ATOMIC_FETCH_OP(op, c_op) \ +static inline int atomic_fetch_##op(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + int ret; \ + \ + raw_local_irq_save(flags); \ + ret = v->counter; \ + v->counter = v->counter c_op i; \ + raw_local_irq_restore(flags); \ + \ + return ret; \ +} + +#endif /* CONFIG_SMP */ + +#ifndef atomic_add_return +ATOMIC_OP_RETURN(add, +) +#endif + +#ifndef atomic_sub_return +ATOMIC_OP_RETURN(sub, -) +#endif + +#ifndef atomic_fetch_add +ATOMIC_FETCH_OP(add, +) +#endif + +#ifndef atomic_fetch_sub +ATOMIC_FETCH_OP(sub, -) +#endif + +#ifndef atomic_fetch_and +ATOMIC_FETCH_OP(and, &) +#endif + +#ifndef atomic_fetch_or +ATOMIC_FETCH_OP(or, |) +#endif + +#ifndef atomic_fetch_xor +ATOMIC_FETCH_OP(xor, ^) +#endif + +#ifndef atomic_and +ATOMIC_OP(and, &) +#endif + +#ifndef atomic_or +ATOMIC_OP(or, |) +#endif + +#ifndef atomic_xor +ATOMIC_OP(xor, ^) +#endif + +#undef ATOMIC_FETCH_OP +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP + +/* + * Atomic operations that C can't guarantee us. Useful for + * resource counting etc.. + */ + +#define ATOMIC_INIT(i) { (i) } + +/** + * atomic_read - read atomic variable + * @v: pointer of type atomic_t + * + * Atomically reads the value of @v. + */ +#ifndef atomic_read +#define atomic_read(v) READ_ONCE((v)->counter) +#endif + +/** + * atomic_set - set atomic variable + * @v: pointer of type atomic_t + * @i: required value + * + * Atomically sets the value of @v to @i. + */ +#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) + +#include + +static inline void atomic_add(int i, atomic_t *v) +{ + atomic_add_return(i, v); +} + +static inline void atomic_sub(int i, atomic_t *v) +{ + atomic_sub_return(i, v); +} + +#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) +#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) + +#endif /* __ASM_GENERIC_ATOMIC_H */ diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h new file mode 100644 index 000000000..97b28b7f1 --- /dev/null +++ b/include/asm-generic/atomic64.h @@ -0,0 +1,60 @@ +/* + * Generic implementation of 64-bit atomics using spinlocks, + * useful on processors that don't have 64-bit atomic instructions. + * + * Copyright © 2009 Paul Mackerras, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _ASM_GENERIC_ATOMIC64_H +#define _ASM_GENERIC_ATOMIC64_H +#include + +typedef struct { + long long counter; +} atomic64_t; + +#define ATOMIC64_INIT(i) { (i) } + +extern long long atomic64_read(const atomic64_t *v); +extern void atomic64_set(atomic64_t *v, long long i); + +#define atomic64_set_release(v, i) atomic64_set((v), (i)) + +#define ATOMIC64_OP(op) \ +extern void atomic64_##op(long long a, atomic64_t *v); + +#define ATOMIC64_OP_RETURN(op) \ +extern long long atomic64_##op##_return(long long a, atomic64_t *v); + +#define ATOMIC64_FETCH_OP(op) \ +extern long long atomic64_fetch_##op(long long a, atomic64_t *v); + +#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op) + +ATOMIC64_OPS(add) +ATOMIC64_OPS(sub) + +#undef ATOMIC64_OPS +#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_FETCH_OP(op) + +ATOMIC64_OPS(and) +ATOMIC64_OPS(or) +ATOMIC64_OPS(xor) + +#undef ATOMIC64_OPS +#undef ATOMIC64_FETCH_OP +#undef ATOMIC64_OP_RETURN +#undef ATOMIC64_OP + +extern long long atomic64_dec_if_positive(atomic64_t *v); +#define atomic64_dec_if_positive atomic64_dec_if_positive +extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n); +extern long long atomic64_xchg(atomic64_t *v, long long new); +extern long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u); +#define atomic64_fetch_add_unless atomic64_fetch_add_unless + +#endif /* _ASM_GENERIC_ATOMIC64_H */ diff --git a/include/asm-generic/audit_change_attr.h b/include/asm-generic/audit_change_attr.h new file mode 100644 index 000000000..331670807 --- /dev/null +++ b/include/asm-generic/audit_change_attr.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifdef __NR_chmod +__NR_chmod, +#endif +__NR_fchmod, +#ifdef __NR_chown +__NR_chown, +__NR_lchown, +#endif +#ifdef __NR_fchown +__NR_fchown, +#endif +__NR_setxattr, +__NR_lsetxattr, +__NR_fsetxattr, +__NR_removexattr, +__NR_lremovexattr, +__NR_fremovexattr, +#ifdef __NR_fchownat +__NR_fchownat, +__NR_fchmodat, +#endif +#ifdef __NR_chown32 +__NR_chown32, +__NR_fchown32, +__NR_lchown32, +#endif +#ifdef __NR_link +__NR_link, +#endif +#ifdef __NR_linkat +__NR_linkat, +#endif diff --git a/include/asm-generic/audit_dir_write.h b/include/asm-generic/audit_dir_write.h new file mode 100644 index 000000000..dd5a9dd7a --- /dev/null +++ b/include/asm-generic/audit_dir_write.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifdef __NR_rename +__NR_rename, +#endif +#ifdef __NR_mkdir +__NR_mkdir, +#endif +#ifdef __NR_rmdir +__NR_rmdir, +#endif +#ifdef __NR_creat +__NR_creat, +#endif +#ifdef __NR_link +__NR_link, +#endif +#ifdef __NR_unlink +__NR_unlink, +#endif +#ifdef __NR_symlink +__NR_symlink, +#endif +#ifdef __NR_mknod +__NR_mknod, +#endif +#ifdef __NR_mkdirat +__NR_mkdirat, +__NR_mknodat, +__NR_unlinkat, +#ifdef __NR_renameat +__NR_renameat, +#endif +__NR_linkat, +__NR_symlinkat, +#endif +#ifdef __NR_renameat2 +__NR_renameat2, +#endif diff --git a/include/asm-generic/audit_read.h b/include/asm-generic/audit_read.h new file mode 100644 index 000000000..7bb7b5a83 --- /dev/null +++ b/include/asm-generic/audit_read.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifdef __NR_readlink +__NR_readlink, +#endif +__NR_quotactl, +__NR_listxattr, +__NR_llistxattr, +__NR_flistxattr, +__NR_getxattr, +__NR_lgetxattr, +__NR_fgetxattr, +#ifdef __NR_readlinkat +__NR_readlinkat, +#endif diff --git a/include/asm-generic/audit_signal.h b/include/asm-generic/audit_signal.h new file mode 100644 index 000000000..6feab7f18 --- /dev/null +++ b/include/asm-generic/audit_signal.h @@ -0,0 +1,3 @@ +__NR_kill, +__NR_tgkill, +__NR_tkill, diff --git a/include/asm-generic/audit_write.h b/include/asm-generic/audit_write.h new file mode 100644 index 000000000..f9f1d0ae1 --- /dev/null +++ b/include/asm-generic/audit_write.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +__NR_acct, +#ifdef __NR_swapon +__NR_swapon, +#endif +__NR_quotactl, +#ifdef __NR_truncate +__NR_truncate, +#endif +#ifdef __NR_truncate64 +__NR_truncate64, +#endif +#ifdef __NR_ftruncate +__NR_ftruncate, +#endif +#ifdef __NR_ftruncate64 +__NR_ftruncate64, +#endif +#ifdef __NR_bind +__NR_bind, /* bind can affect fs object only in one way... */ +#endif +#ifdef __NR_fallocate +__NR_fallocate, +#endif diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h new file mode 100644 index 000000000..2cafdbb9a --- /dev/null +++ b/include/asm-generic/barrier.h @@ -0,0 +1,265 @@ +/* + * Generic barrier definitions. + * + * It should be possible to use these on really simple architectures, + * but it serves more as a starting point for new ports. + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#ifndef __ASM_GENERIC_BARRIER_H +#define __ASM_GENERIC_BARRIER_H + +#ifndef __ASSEMBLY__ + +#include + +#ifndef nop +#define nop() asm volatile ("nop") +#endif + +/* + * Force strict CPU ordering. And yes, this is required on UP too when we're + * talking to devices. + * + * Fall back to compiler barriers if nothing better is provided. + */ + +#ifndef mb +#define mb() barrier() +#endif + +#ifndef rmb +#define rmb() mb() +#endif + +#ifndef wmb +#define wmb() mb() +#endif + +#ifndef dma_rmb +#define dma_rmb() rmb() +#endif + +#ifndef dma_wmb +#define dma_wmb() wmb() +#endif + +#ifndef read_barrier_depends +#define read_barrier_depends() do { } while (0) +#endif + +#ifndef __smp_mb +#define __smp_mb() mb() +#endif + +#ifndef __smp_rmb +#define __smp_rmb() rmb() +#endif + +#ifndef __smp_wmb +#define __smp_wmb() wmb() +#endif + +#ifndef __smp_read_barrier_depends +#define __smp_read_barrier_depends() read_barrier_depends() +#endif + +#ifdef CONFIG_SMP + +#ifndef smp_mb +#define smp_mb() __smp_mb() +#endif + +#ifndef smp_rmb +#define smp_rmb() __smp_rmb() +#endif + +#ifndef smp_wmb +#define smp_wmb() __smp_wmb() +#endif + +#ifndef smp_read_barrier_depends +#define smp_read_barrier_depends() __smp_read_barrier_depends() +#endif + +#else /* !CONFIG_SMP */ + +#ifndef smp_mb +#define smp_mb() barrier() +#endif + +#ifndef smp_rmb +#define smp_rmb() barrier() +#endif + +#ifndef smp_wmb +#define smp_wmb() barrier() +#endif + +#ifndef smp_read_barrier_depends +#define smp_read_barrier_depends() do { } while (0) +#endif + +#endif /* CONFIG_SMP */ + +#ifndef __smp_store_mb +#define __smp_store_mb(var, value) do { WRITE_ONCE(var, value); __smp_mb(); } while (0) +#endif + +#ifndef __smp_mb__before_atomic +#define __smp_mb__before_atomic() __smp_mb() +#endif + +#ifndef __smp_mb__after_atomic +#define __smp_mb__after_atomic() __smp_mb() +#endif + +#ifndef __smp_store_release +#define __smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + __smp_mb(); \ + WRITE_ONCE(*p, v); \ +} while (0) +#endif + +#ifndef __smp_load_acquire +#define __smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = READ_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + __smp_mb(); \ + ___p1; \ +}) +#endif + +#ifdef CONFIG_SMP + +#ifndef smp_store_mb +#define smp_store_mb(var, value) __smp_store_mb(var, value) +#endif + +#ifndef smp_mb__before_atomic +#define smp_mb__before_atomic() __smp_mb__before_atomic() +#endif + +#ifndef smp_mb__after_atomic +#define smp_mb__after_atomic() __smp_mb__after_atomic() +#endif + +#ifndef smp_store_release +#define smp_store_release(p, v) __smp_store_release(p, v) +#endif + +#ifndef smp_load_acquire +#define smp_load_acquire(p) __smp_load_acquire(p) +#endif + +#else /* !CONFIG_SMP */ + +#ifndef smp_store_mb +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) +#endif + +#ifndef smp_mb__before_atomic +#define smp_mb__before_atomic() barrier() +#endif + +#ifndef smp_mb__after_atomic +#define smp_mb__after_atomic() barrier() +#endif + +#ifndef smp_store_release +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + WRITE_ONCE(*p, v); \ +} while (0) +#endif + +#ifndef smp_load_acquire +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = READ_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ___p1; \ +}) +#endif + +#endif /* CONFIG_SMP */ + +/* Barriers for virtual machine guests when talking to an SMP host */ +#define virt_mb() __smp_mb() +#define virt_rmb() __smp_rmb() +#define virt_wmb() __smp_wmb() +#define virt_read_barrier_depends() __smp_read_barrier_depends() +#define virt_store_mb(var, value) __smp_store_mb(var, value) +#define virt_mb__before_atomic() __smp_mb__before_atomic() +#define virt_mb__after_atomic() __smp_mb__after_atomic() +#define virt_store_release(p, v) __smp_store_release(p, v) +#define virt_load_acquire(p) __smp_load_acquire(p) + +/** + * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency + * + * A control dependency provides a LOAD->STORE order, the additional RMB + * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order, + * aka. (load)-ACQUIRE. + * + * Architectures that do not do load speculation can have this be barrier(). + */ +#ifndef smp_acquire__after_ctrl_dep +#define smp_acquire__after_ctrl_dep() smp_rmb() +#endif + +/** + * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees + * @ptr: pointer to the variable to wait on + * @cond: boolean expression to wait for + * + * Equivalent to using READ_ONCE() on the condition variable. + * + * Due to C lacking lambda expressions we load the value of *ptr into a + * pre-named variable @VAL to be used in @cond. + */ +#ifndef smp_cond_load_relaxed +#define smp_cond_load_relaxed(ptr, cond_expr) ({ \ + typeof(ptr) __PTR = (ptr); \ + typeof(*ptr) VAL; \ + for (;;) { \ + VAL = READ_ONCE(*__PTR); \ + if (cond_expr) \ + break; \ + cpu_relax(); \ + } \ + VAL; \ +}) +#endif + +/** + * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering + * @ptr: pointer to the variable to wait on + * @cond: boolean expression to wait for + * + * Equivalent to using smp_load_acquire() on the condition variable but employs + * the control dependency of the wait to reduce the barrier on many platforms. + */ +#ifndef smp_cond_load_acquire +#define smp_cond_load_acquire(ptr, cond_expr) ({ \ + typeof(*ptr) _val; \ + _val = smp_cond_load_relaxed(ptr, cond_expr); \ + smp_acquire__after_ctrl_dep(); \ + _val; \ +}) +#endif + +#endif /* !__ASSEMBLY__ */ +#endif /* __ASM_GENERIC_BARRIER_H */ diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h new file mode 100644 index 000000000..bfc96bf66 --- /dev/null +++ b/include/asm-generic/bitops.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_BITOPS_H +#define __ASM_GENERIC_BITOPS_H + +/* + * For the benefit of those who are trying to port Linux to another + * architecture, here are some C-language equivalents. You should + * recode these in the native assembly language, if at all possible. + * + * C language equivalents written by Theodore Ts'o, 9/26/92 + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#ifndef _LINUX_BITOPS_H +#error only can be included directly +#endif + +#include +#include +#include +#include + +#include +#include +#include +#include + +#endif /* __ASM_GENERIC_BITOPS_H */ diff --git a/include/asm-generic/bitops/__ffs.h b/include/asm-generic/bitops/__ffs.h new file mode 100644 index 000000000..39e56e1c7 --- /dev/null +++ b/include/asm-generic/bitops/__ffs.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS___FFS_H_ +#define _ASM_GENERIC_BITOPS___FFS_H_ + +#include + +/** + * __ffs - find first bit in word. + * @word: The word to search + * + * Undefined if no bit exists, so code should check against 0 first. + */ +static __always_inline unsigned long __ffs(unsigned long word) +{ + int num = 0; + +#if BITS_PER_LONG == 64 + if ((word & 0xffffffff) == 0) { + num += 32; + word >>= 32; + } +#endif + if ((word & 0xffff) == 0) { + num += 16; + word >>= 16; + } + if ((word & 0xff) == 0) { + num += 8; + word >>= 8; + } + if ((word & 0xf) == 0) { + num += 4; + word >>= 4; + } + if ((word & 0x3) == 0) { + num += 2; + word >>= 2; + } + if ((word & 0x1) == 0) + num += 1; + return num; +} + +#endif /* _ASM_GENERIC_BITOPS___FFS_H_ */ diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h new file mode 100644 index 000000000..03f721a8a --- /dev/null +++ b/include/asm-generic/bitops/__fls.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS___FLS_H_ +#define _ASM_GENERIC_BITOPS___FLS_H_ + +#include + +/** + * __fls - find last (most-significant) set bit in a long word + * @word: the word to search + * + * Undefined if no set bit exists, so code should check against 0 first. + */ +static __always_inline unsigned long __fls(unsigned long word) +{ + int num = BITS_PER_LONG - 1; + +#if BITS_PER_LONG == 64 + if (!(word & (~0ul << 32))) { + num -= 32; + word <<= 32; + } +#endif + if (!(word & (~0ul << (BITS_PER_LONG-16)))) { + num -= 16; + word <<= 16; + } + if (!(word & (~0ul << (BITS_PER_LONG-8)))) { + num -= 8; + word <<= 8; + } + if (!(word & (~0ul << (BITS_PER_LONG-4)))) { + num -= 4; + word <<= 4; + } + if (!(word & (~0ul << (BITS_PER_LONG-2)))) { + num -= 2; + word <<= 2; + } + if (!(word & (~0ul << (BITS_PER_LONG-1)))) + num -= 1; + return num; +} + +#endif /* _ASM_GENERIC_BITOPS___FLS_H_ */ diff --git a/include/asm-generic/bitops/arch_hweight.h b/include/asm-generic/bitops/arch_hweight.h new file mode 100644 index 000000000..c2705e1d2 --- /dev/null +++ b/include/asm-generic/bitops/arch_hweight.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_ +#define _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_ + +#include + +static inline unsigned int __arch_hweight32(unsigned int w) +{ + return __sw_hweight32(w); +} + +static inline unsigned int __arch_hweight16(unsigned int w) +{ + return __sw_hweight16(w); +} + +static inline unsigned int __arch_hweight8(unsigned int w) +{ + return __sw_hweight8(w); +} + +static inline unsigned long __arch_hweight64(__u64 w) +{ + return __sw_hweight64(w); +} +#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h new file mode 100644 index 000000000..dd90c9792 --- /dev/null +++ b/include/asm-generic/bitops/atomic.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_ +#define _ASM_GENERIC_BITOPS_ATOMIC_H_ + +#include +#include +#include + +/* + * Implementation of atomic bitops using atomic-fetch ops. + * See Documentation/atomic_bitops.txt for details. + */ + +static inline void set_bit(unsigned int nr, volatile unsigned long *p) +{ + p += BIT_WORD(nr); + atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p); +} + +static inline void clear_bit(unsigned int nr, volatile unsigned long *p) +{ + p += BIT_WORD(nr); + atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p); +} + +static inline void change_bit(unsigned int nr, volatile unsigned long *p) +{ + p += BIT_WORD(nr); + atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p); +} + +static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p) +{ + long old; + unsigned long mask = BIT_MASK(nr); + + p += BIT_WORD(nr); + if (READ_ONCE(*p) & mask) + return 1; + + old = atomic_long_fetch_or(mask, (atomic_long_t *)p); + return !!(old & mask); +} + +static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p) +{ + long old; + unsigned long mask = BIT_MASK(nr); + + p += BIT_WORD(nr); + if (!(READ_ONCE(*p) & mask)) + return 0; + + old = atomic_long_fetch_andnot(mask, (atomic_long_t *)p); + return !!(old & mask); +} + +static inline int test_and_change_bit(unsigned int nr, volatile unsigned long *p) +{ + long old; + unsigned long mask = BIT_MASK(nr); + + p += BIT_WORD(nr); + old = atomic_long_fetch_xor(mask, (atomic_long_t *)p); + return !!(old & mask); +} + +#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */ diff --git a/include/asm-generic/bitops/builtin-__ffs.h b/include/asm-generic/bitops/builtin-__ffs.h new file mode 100644 index 000000000..87024da44 --- /dev/null +++ b/include/asm-generic/bitops/builtin-__ffs.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_BUILTIN___FFS_H_ +#define _ASM_GENERIC_BITOPS_BUILTIN___FFS_H_ + +/** + * __ffs - find first bit in word. + * @word: The word to search + * + * Undefined if no bit exists, so code should check against 0 first. + */ +static __always_inline unsigned long __ffs(unsigned long word) +{ + return __builtin_ctzl(word); +} + +#endif diff --git a/include/asm-generic/bitops/builtin-__fls.h b/include/asm-generic/bitops/builtin-__fls.h new file mode 100644 index 000000000..43a5aa9af --- /dev/null +++ b/include/asm-generic/bitops/builtin-__fls.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_BUILTIN___FLS_H_ +#define _ASM_GENERIC_BITOPS_BUILTIN___FLS_H_ + +/** + * __fls - find last (most-significant) set bit in a long word + * @word: the word to search + * + * Undefined if no set bit exists, so code should check against 0 first. + */ +static __always_inline unsigned long __fls(unsigned long word) +{ + return (sizeof(word) * 8) - 1 - __builtin_clzl(word); +} + +#endif diff --git a/include/asm-generic/bitops/builtin-ffs.h b/include/asm-generic/bitops/builtin-ffs.h new file mode 100644 index 000000000..458c85ebc --- /dev/null +++ b/include/asm-generic/bitops/builtin-ffs.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_BUILTIN_FFS_H_ +#define _ASM_GENERIC_BITOPS_BUILTIN_FFS_H_ + +/** + * ffs - find first bit set + * @x: the word to search + * + * This is defined the same way as + * the libc and compiler builtin ffs routines, therefore + * differs in spirit from the above ffz (man ffs). + */ +static __always_inline int ffs(int x) +{ + return __builtin_ffs(x); +} + +#endif diff --git a/include/asm-generic/bitops/builtin-fls.h b/include/asm-generic/bitops/builtin-fls.h new file mode 100644 index 000000000..62daf9409 --- /dev/null +++ b/include/asm-generic/bitops/builtin-fls.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_BUILTIN_FLS_H_ +#define _ASM_GENERIC_BITOPS_BUILTIN_FLS_H_ + +/** + * fls - find last (most-significant) bit set + * @x: the word to search + * + * This is defined the same way as ffs. + * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. + */ +static __always_inline int fls(int x) +{ + return x ? sizeof(x) * 8 - __builtin_clz(x) : 0; +} + +#endif diff --git a/include/asm-generic/bitops/const_hweight.h b/include/asm-generic/bitops/const_hweight.h new file mode 100644 index 000000000..149faeeee --- /dev/null +++ b/include/asm-generic/bitops/const_hweight.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ +#define _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ + +/* + * Compile time versions of __arch_hweightN() + */ +#define __const_hweight8(w) \ + ((unsigned int) \ + ((!!((w) & (1ULL << 0))) + \ + (!!((w) & (1ULL << 1))) + \ + (!!((w) & (1ULL << 2))) + \ + (!!((w) & (1ULL << 3))) + \ + (!!((w) & (1ULL << 4))) + \ + (!!((w) & (1ULL << 5))) + \ + (!!((w) & (1ULL << 6))) + \ + (!!((w) & (1ULL << 7))))) + +#define __const_hweight16(w) (__const_hweight8(w) + __const_hweight8((w) >> 8 )) +#define __const_hweight32(w) (__const_hweight16(w) + __const_hweight16((w) >> 16)) +#define __const_hweight64(w) (__const_hweight32(w) + __const_hweight32((w) >> 32)) + +/* + * Generic interface. + */ +#define hweight8(w) (__builtin_constant_p(w) ? __const_hweight8(w) : __arch_hweight8(w)) +#define hweight16(w) (__builtin_constant_p(w) ? __const_hweight16(w) : __arch_hweight16(w)) +#define hweight32(w) (__builtin_constant_p(w) ? __const_hweight32(w) : __arch_hweight32(w)) +#define hweight64(w) (__builtin_constant_p(w) ? __const_hweight64(w) : __arch_hweight64(w)) + +/* + * Interface for known constant arguments + */ +#define HWEIGHT8(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight8(w)) +#define HWEIGHT16(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight16(w)) +#define HWEIGHT32(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight32(w)) +#define HWEIGHT64(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight64(w)) + +/* + * Type invariant interface to the compile time constant hweight functions. + */ +#define HWEIGHT(w) HWEIGHT64((u64)w) + +#endif /* _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ */ diff --git a/include/asm-generic/bitops/ext2-atomic-setbit.h b/include/asm-generic/bitops/ext2-atomic-setbit.h new file mode 100644 index 000000000..b041cbf0d --- /dev/null +++ b/include/asm-generic/bitops/ext2-atomic-setbit.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_ +#define _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_ + +/* + * Atomic bitops based version of ext2 atomic bitops + */ + +#define ext2_set_bit_atomic(l, nr, addr) test_and_set_bit_le(nr, addr) +#define ext2_clear_bit_atomic(l, nr, addr) test_and_clear_bit_le(nr, addr) + +#endif /* _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_ */ diff --git a/include/asm-generic/bitops/ext2-atomic.h b/include/asm-generic/bitops/ext2-atomic.h new file mode 100644 index 000000000..0cfc3180b --- /dev/null +++ b/include/asm-generic/bitops/ext2-atomic.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ +#define _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ + +/* + * Spinlock based version of ext2 atomic bitops + */ + +#define ext2_set_bit_atomic(lock, nr, addr) \ + ({ \ + int ret; \ + spin_lock(lock); \ + ret = __test_and_set_bit_le(nr, addr); \ + spin_unlock(lock); \ + ret; \ + }) + +#define ext2_clear_bit_atomic(lock, nr, addr) \ + ({ \ + int ret; \ + spin_lock(lock); \ + ret = __test_and_clear_bit_le(nr, addr); \ + spin_unlock(lock); \ + ret; \ + }) + +#endif /* _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ */ diff --git a/include/asm-generic/bitops/ffs.h b/include/asm-generic/bitops/ffs.h new file mode 100644 index 000000000..e81868b2c --- /dev/null +++ b/include/asm-generic/bitops/ffs.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_FFS_H_ +#define _ASM_GENERIC_BITOPS_FFS_H_ + +/** + * ffs - find first bit set + * @x: the word to search + * + * This is defined the same way as + * the libc and compiler builtin ffs routines, therefore + * differs in spirit from the above ffz (man ffs). + */ +static inline int ffs(int x) +{ + int r = 1; + + if (!x) + return 0; + if (!(x & 0xffff)) { + x >>= 16; + r += 16; + } + if (!(x & 0xff)) { + x >>= 8; + r += 8; + } + if (!(x & 0xf)) { + x >>= 4; + r += 4; + } + if (!(x & 3)) { + x >>= 2; + r += 2; + } + if (!(x & 1)) { + x >>= 1; + r += 1; + } + return r; +} + +#endif /* _ASM_GENERIC_BITOPS_FFS_H_ */ diff --git a/include/asm-generic/bitops/ffz.h b/include/asm-generic/bitops/ffz.h new file mode 100644 index 000000000..0d010085f --- /dev/null +++ b/include/asm-generic/bitops/ffz.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_FFZ_H_ +#define _ASM_GENERIC_BITOPS_FFZ_H_ + +/* + * ffz - find first zero in word. + * @word: The word to search + * + * Undefined if no zero exists, so code should check against ~0UL first. + */ +#define ffz(x) __ffs(~(x)) + +#endif /* _ASM_GENERIC_BITOPS_FFZ_H_ */ diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h new file mode 100644 index 000000000..8a1ee1001 --- /dev/null +++ b/include/asm-generic/bitops/find.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_FIND_H_ +#define _ASM_GENERIC_BITOPS_FIND_H_ + +#ifndef find_next_bit +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The bitmap size in bits + * + * Returns the bit number for the next set bit + * If no bits are set, returns @size. + */ +extern unsigned long find_next_bit(const unsigned long *addr, unsigned long + size, unsigned long offset); +#endif + +#ifndef find_next_and_bit +/** + * find_next_and_bit - find the next set bit in both memory regions + * @addr1: The first address to base the search on + * @addr2: The second address to base the search on + * @offset: The bitnumber to start searching at + * @size: The bitmap size in bits + * + * Returns the bit number for the next set bit + * If no bits are set, returns @size. + */ +extern unsigned long find_next_and_bit(const unsigned long *addr1, + const unsigned long *addr2, unsigned long size, + unsigned long offset); +#endif + +#ifndef find_next_zero_bit +/** + * find_next_zero_bit - find the next cleared bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The bitmap size in bits + * + * Returns the bit number of the next zero bit + * If no bits are zero, returns @size. + */ +extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned + long size, unsigned long offset); +#endif + +#ifdef CONFIG_GENERIC_FIND_FIRST_BIT + +/** + * find_first_bit - find the first set bit in a memory region + * @addr: The address to start the search at + * @size: The maximum number of bits to search + * + * Returns the bit number of the first set bit. + * If no bits are set, returns @size. + */ +extern unsigned long find_first_bit(const unsigned long *addr, + unsigned long size); + +/** + * find_first_zero_bit - find the first cleared bit in a memory region + * @addr: The address to start the search at + * @size: The maximum number of bits to search + * + * Returns the bit number of the first cleared bit. + * If no bits are zero, returns @size. + */ +extern unsigned long find_first_zero_bit(const unsigned long *addr, + unsigned long size); +#else /* CONFIG_GENERIC_FIND_FIRST_BIT */ + +#ifndef find_first_bit +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) +#endif +#ifndef find_first_zero_bit +#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) +#endif + +#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ + +#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */ diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h new file mode 100644 index 000000000..753aecaab --- /dev/null +++ b/include/asm-generic/bitops/fls.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_FLS_H_ +#define _ASM_GENERIC_BITOPS_FLS_H_ + +/** + * fls - find last (most-significant) bit set + * @x: the word to search + * + * This is defined the same way as ffs. + * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. + */ + +static __always_inline int fls(int x) +{ + int r = 32; + + if (!x) + return 0; + if (!(x & 0xffff0000u)) { + x <<= 16; + r -= 16; + } + if (!(x & 0xff000000u)) { + x <<= 8; + r -= 8; + } + if (!(x & 0xf0000000u)) { + x <<= 4; + r -= 4; + } + if (!(x & 0xc0000000u)) { + x <<= 2; + r -= 2; + } + if (!(x & 0x80000000u)) { + x <<= 1; + r -= 1; + } + return r; +} + +#endif /* _ASM_GENERIC_BITOPS_FLS_H_ */ diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h new file mode 100644 index 000000000..866f2b230 --- /dev/null +++ b/include/asm-generic/bitops/fls64.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_FLS64_H_ +#define _ASM_GENERIC_BITOPS_FLS64_H_ + +#include + +/** + * fls64 - find last set bit in a 64-bit word + * @x: the word to search + * + * This is defined in a similar way as the libc and compiler builtin + * ffsll, but returns the position of the most significant set bit. + * + * fls64(value) returns 0 if value is 0 or the position of the last + * set bit if value is nonzero. The last (most significant) bit is + * at position 64. + */ +#if BITS_PER_LONG == 32 +static __always_inline int fls64(__u64 x) +{ + __u32 h = x >> 32; + if (h) + return fls(h) + 32; + return fls(x); +} +#elif BITS_PER_LONG == 64 +static __always_inline int fls64(__u64 x) +{ + if (x == 0) + return 0; + return __fls(x) + 1; +} +#else +#error BITS_PER_LONG not 32 or 64 +#endif + +#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */ diff --git a/include/asm-generic/bitops/hweight.h b/include/asm-generic/bitops/hweight.h new file mode 100644 index 000000000..6bf1bba83 --- /dev/null +++ b/include/asm-generic/bitops/hweight.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_ +#define _ASM_GENERIC_BITOPS_HWEIGHT_H_ + +#include +#include + +#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h new file mode 100644 index 000000000..188d3eba3 --- /dev/null +++ b/include/asm-generic/bitops/le.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_LE_H_ +#define _ASM_GENERIC_BITOPS_LE_H_ + +#include +#include + +#if defined(__LITTLE_ENDIAN) + +#define BITOP_LE_SWIZZLE 0 + +static inline unsigned long find_next_zero_bit_le(const void *addr, + unsigned long size, unsigned long offset) +{ + return find_next_zero_bit(addr, size, offset); +} + +static inline unsigned long find_next_bit_le(const void *addr, + unsigned long size, unsigned long offset) +{ + return find_next_bit(addr, size, offset); +} + +static inline unsigned long find_first_zero_bit_le(const void *addr, + unsigned long size) +{ + return find_first_zero_bit(addr, size); +} + +#elif defined(__BIG_ENDIAN) + +#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) + +#ifndef find_next_zero_bit_le +extern unsigned long find_next_zero_bit_le(const void *addr, + unsigned long size, unsigned long offset); +#endif + +#ifndef find_next_bit_le +extern unsigned long find_next_bit_le(const void *addr, + unsigned long size, unsigned long offset); +#endif + +#ifndef find_first_zero_bit_le +#define find_first_zero_bit_le(addr, size) \ + find_next_zero_bit_le((addr), (size), 0) +#endif + +#else +#error "Please fix " +#endif + +static inline int test_bit_le(int nr, const void *addr) +{ + return test_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +static inline void set_bit_le(int nr, void *addr) +{ + set_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +static inline void clear_bit_le(int nr, void *addr) +{ + clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +static inline void __set_bit_le(int nr, void *addr) +{ + __set_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +static inline void __clear_bit_le(int nr, void *addr) +{ + __clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +static inline int test_and_set_bit_le(int nr, void *addr) +{ + return test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +static inline int test_and_clear_bit_le(int nr, void *addr) +{ + return test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +static inline int __test_and_set_bit_le(int nr, void *addr) +{ + return __test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +static inline int __test_and_clear_bit_le(int nr, void *addr) +{ + return __test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +#endif /* _ASM_GENERIC_BITOPS_LE_H_ */ diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h new file mode 100644 index 000000000..3ae021368 --- /dev/null +++ b/include/asm-generic/bitops/lock.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_LOCK_H_ +#define _ASM_GENERIC_BITOPS_LOCK_H_ + +#include +#include +#include + +/** + * test_and_set_bit_lock - Set a bit and return its old value, for lock + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and provides acquire barrier semantics if + * the returned value is 0. + * It can be used to implement bit locks. + */ +static inline int test_and_set_bit_lock(unsigned int nr, + volatile unsigned long *p) +{ + long old; + unsigned long mask = BIT_MASK(nr); + + p += BIT_WORD(nr); + if (READ_ONCE(*p) & mask) + return 1; + + old = atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p); + return !!(old & mask); +} + + +/** + * clear_bit_unlock - Clear a bit in memory, for unlock + * @nr: the bit to set + * @addr: the address to start counting from + * + * This operation is atomic and provides release barrier semantics. + */ +static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p) +{ + p += BIT_WORD(nr); + atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p); +} + +/** + * __clear_bit_unlock - Clear a bit in memory, for unlock + * @nr: the bit to set + * @addr: the address to start counting from + * + * A weaker form of clear_bit_unlock() as used by __bit_lock_unlock(). If all + * the bits in the word are protected by this lock some archs can use weaker + * ops to safely unlock. + * + * See for example x86's implementation. + */ +static inline void __clear_bit_unlock(unsigned int nr, + volatile unsigned long *p) +{ + unsigned long old; + + p += BIT_WORD(nr); + old = READ_ONCE(*p); + old &= ~BIT_MASK(nr); + atomic_long_set_release((atomic_long_t *)p, old); +} + +/** + * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom + * byte is negative, for unlock. + * @nr: the bit to clear + * @addr: the address to start counting from + * + * This is a bit of a one-trick-pony for the filemap code, which clears + * PG_locked and tests PG_waiters, + */ +#ifndef clear_bit_unlock_is_negative_byte +static inline bool clear_bit_unlock_is_negative_byte(unsigned int nr, + volatile unsigned long *p) +{ + long old; + unsigned long mask = BIT_MASK(nr); + + p += BIT_WORD(nr); + old = atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p); + return !!(old & BIT(7)); +} +#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte +#endif + +#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */ diff --git a/include/asm-generic/bitops/non-atomic.h b/include/asm-generic/bitops/non-atomic.h new file mode 100644 index 000000000..7e10c4b50 --- /dev/null +++ b/include/asm-generic/bitops/non-atomic.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ +#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ + +#include + +/** + * __set_bit - Set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Unlike set_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static inline void __set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p |= mask; +} + +static inline void __clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p &= ~mask; +} + +/** + * __change_bit - Toggle a bit in memory + * @nr: the bit to change + * @addr: the address to start counting from + * + * Unlike change_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static inline void __change_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p ^= mask; +} + +/** + * __test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old = *p; + + *p = old | mask; + return (old & mask) != 0; +} + +/** + * __test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old = *p; + + *p = old & ~mask; + return (old & mask) != 0; +} + +/* WARNING: non atomic and it can be reordered! */ +static inline int __test_and_change_bit(int nr, + volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old = *p; + + *p = old ^ mask; + return (old & mask) != 0; +} + +/** + * test_bit - Determine whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static inline int test_bit(int nr, const volatile unsigned long *addr) +{ + return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); +} + +#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */ diff --git a/include/asm-generic/bitops/sched.h b/include/asm-generic/bitops/sched.h new file mode 100644 index 000000000..86470cfce --- /dev/null +++ b/include/asm-generic/bitops/sched.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_SCHED_H_ +#define _ASM_GENERIC_BITOPS_SCHED_H_ + +#include /* unlikely() */ +#include + +/* + * Every architecture must define this function. It's the fastest + * way of searching a 100-bit bitmap. It's guaranteed that at least + * one of the 100 bits is cleared. + */ +static inline int sched_find_first_bit(const unsigned long *b) +{ +#if BITS_PER_LONG == 64 + if (b[0]) + return __ffs(b[0]); + return __ffs(b[1]) + 64; +#elif BITS_PER_LONG == 32 + if (b[0]) + return __ffs(b[0]); + if (b[1]) + return __ffs(b[1]) + 32; + if (b[2]) + return __ffs(b[2]) + 64; + return __ffs(b[3]) + 96; +#else +#error BITS_PER_LONG not defined +#endif +} + +#endif /* _ASM_GENERIC_BITOPS_SCHED_H_ */ diff --git a/include/asm-generic/bitsperlong.h b/include/asm-generic/bitsperlong.h new file mode 100644 index 000000000..3905c1c93 --- /dev/null +++ b/include/asm-generic/bitsperlong.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_BITS_PER_LONG +#define __ASM_GENERIC_BITS_PER_LONG + +#include + + +#ifdef CONFIG_64BIT +#define BITS_PER_LONG 64 +#else +#define BITS_PER_LONG 32 +#endif /* CONFIG_64BIT */ + +/* + * FIXME: The check currently breaks x86-64 build, so it's + * temporarily disabled. Please fix x86-64 and reenable + */ +#if 0 && BITS_PER_LONG != __BITS_PER_LONG +#error Inconsistent word size. Check asm/bitsperlong.h +#endif + +#ifndef BITS_PER_LONG_LONG +#define BITS_PER_LONG_LONG 64 +#endif + +#endif /* __ASM_GENERIC_BITS_PER_LONG */ diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h new file mode 100644 index 000000000..d4fb510a4 --- /dev/null +++ b/include/asm-generic/bug.h @@ -0,0 +1,254 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BUG_H +#define _ASM_GENERIC_BUG_H + +#include + +#define CUT_HERE "------------[ cut here ]------------\n" + +#ifdef CONFIG_GENERIC_BUG +#define BUGFLAG_WARNING (1 << 0) +#define BUGFLAG_ONCE (1 << 1) +#define BUGFLAG_DONE (1 << 2) +#define BUGFLAG_TAINT(taint) ((taint) << 8) +#define BUG_GET_TAINT(bug) ((bug)->flags >> 8) +#endif + +#ifndef __ASSEMBLY__ +#include + +#ifdef CONFIG_BUG + +#ifdef CONFIG_GENERIC_BUG +struct bug_entry { +#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS + unsigned long bug_addr; +#else + signed int bug_addr_disp; +#endif +#ifdef CONFIG_DEBUG_BUGVERBOSE +#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS + const char *file; +#else + signed int file_disp; +#endif + unsigned short line; +#endif + unsigned short flags; +}; +#endif /* CONFIG_GENERIC_BUG */ + +/* + * Don't use BUG() or BUG_ON() unless there's really no way out; one + * example might be detecting data structure corruption in the middle + * of an operation that can't be backed out of. If the (sub)system + * can somehow continue operating, perhaps with reduced functionality, + * it's probably not BUG-worthy. + * + * If you're tempted to BUG(), think again: is completely giving up + * really the *only* solution? There are usually better options, where + * users don't need to reboot ASAP and can mostly shut down cleanly. + */ +#ifndef HAVE_ARCH_BUG +#define BUG() do { \ + printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ + barrier_before_unreachable(); \ + panic("BUG!"); \ +} while (0) +#endif + +#ifndef HAVE_ARCH_BUG_ON +#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0) +#endif + +#ifdef __WARN_FLAGS +#define __WARN_TAINT(taint) __WARN_FLAGS(BUGFLAG_TAINT(taint)) +#define __WARN_ONCE_TAINT(taint) __WARN_FLAGS(BUGFLAG_ONCE|BUGFLAG_TAINT(taint)) + +#define WARN_ON_ONCE(condition) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_ONCE_TAINT(TAINT_WARN); \ + unlikely(__ret_warn_on); \ +}) +#endif + +/* + * WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report + * significant kernel issues that need prompt attention if they should ever + * appear at runtime. + * + * Do not use these macros when checking for invalid external inputs + * (e.g. invalid system call arguments, or invalid data coming from + * network/devices), and on transient conditions like ENOMEM or EAGAIN. + * These macros should be used for recoverable kernel issues only. + * For invalid external inputs, transient conditions, etc use + * pr_err[_once/_ratelimited]() followed by dump_stack(), if necessary. + * Do not include "BUG"/"WARNING" in format strings manually to make these + * conditions distinguishable from kernel issues. + * + * Use the versions with printk format strings to provide better diagnostics. + */ +#ifndef __WARN_TAINT +extern __printf(3, 4) +void warn_slowpath_fmt(const char *file, const int line, + const char *fmt, ...); +extern __printf(4, 5) +void warn_slowpath_fmt_taint(const char *file, const int line, unsigned taint, + const char *fmt, ...); +extern void warn_slowpath_null(const char *file, const int line); +#define WANT_WARN_ON_SLOWPATH +#define __WARN() warn_slowpath_null(__FILE__, __LINE__) +#define __WARN_printf(arg...) warn_slowpath_fmt(__FILE__, __LINE__, arg) +#define __WARN_printf_taint(taint, arg...) \ + warn_slowpath_fmt_taint(__FILE__, __LINE__, taint, arg) +#else +extern __printf(1, 2) void __warn_printk(const char *fmt, ...); +#define __WARN() do { \ + printk(KERN_WARNING CUT_HERE); __WARN_TAINT(TAINT_WARN); \ +} while (0) +#define __WARN_printf(arg...) __WARN_printf_taint(TAINT_WARN, arg) +#define __WARN_printf_taint(taint, arg...) \ + do { __warn_printk(arg); __WARN_TAINT(taint); } while (0) +#endif + +/* used internally by panic.c */ +struct warn_args; +struct pt_regs; + +void __warn(const char *file, int line, void *caller, unsigned taint, + struct pt_regs *regs, struct warn_args *args); + +#ifndef WARN_ON +#define WARN_ON(condition) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN(); \ + unlikely(__ret_warn_on); \ +}) +#endif + +#ifndef WARN +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf(format); \ + unlikely(__ret_warn_on); \ +}) +#endif + +#define WARN_TAINT(condition, taint, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf_taint(taint, format); \ + unlikely(__ret_warn_on); \ +}) + +#ifndef WARN_ON_ONCE +#define WARN_ON_ONCE(condition) ({ \ + static bool __section(.data.once) __warned; \ + int __ret_warn_once = !!(condition); \ + \ + if (unlikely(__ret_warn_once && !__warned)) { \ + __warned = true; \ + WARN_ON(1); \ + } \ + unlikely(__ret_warn_once); \ +}) +#endif + +#define WARN_ONCE(condition, format...) ({ \ + static bool __section(.data.once) __warned; \ + int __ret_warn_once = !!(condition); \ + \ + if (unlikely(__ret_warn_once && !__warned)) { \ + __warned = true; \ + WARN(1, format); \ + } \ + unlikely(__ret_warn_once); \ +}) + +#define WARN_TAINT_ONCE(condition, taint, format...) ({ \ + static bool __section(.data.once) __warned; \ + int __ret_warn_once = !!(condition); \ + \ + if (unlikely(__ret_warn_once && !__warned)) { \ + __warned = true; \ + WARN_TAINT(1, taint, format); \ + } \ + unlikely(__ret_warn_once); \ +}) + +#else /* !CONFIG_BUG */ +#ifndef HAVE_ARCH_BUG +#define BUG() do {} while (1) +#endif + +#ifndef HAVE_ARCH_BUG_ON +#define BUG_ON(condition) do { if (condition) BUG(); } while (0) +#endif + +#ifndef HAVE_ARCH_WARN_ON +#define WARN_ON(condition) ({ \ + int __ret_warn_on = !!(condition); \ + unlikely(__ret_warn_on); \ +}) +#endif + +#ifndef WARN +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + no_printk(format); \ + unlikely(__ret_warn_on); \ +}) +#endif + +#define WARN_ON_ONCE(condition) WARN_ON(condition) +#define WARN_ONCE(condition, format...) WARN(condition, format) +#define WARN_TAINT(condition, taint, format...) WARN(condition, format) +#define WARN_TAINT_ONCE(condition, taint, format...) WARN(condition, format) + +#endif + +/* + * WARN_ON_SMP() is for cases that the warning is either + * meaningless for !SMP or may even cause failures. + * This is usually used for cases that we have + * WARN_ON(!spin_is_locked(&lock)) checks, as spin_is_locked() + * returns 0 for uniprocessor settings. + * It can also be used with values that are only defined + * on SMP: + * + * struct foo { + * [...] + * #ifdef CONFIG_SMP + * int bar; + * #endif + * }; + * + * void func(struct foo *zoot) + * { + * WARN_ON_SMP(!zoot->bar); + * + * For CONFIG_SMP, WARN_ON_SMP() should act the same as WARN_ON(), + * and should be a nop and return false for uniprocessor. + * + * if (WARN_ON_SMP(x)) returns true only when CONFIG_SMP is set + * and x is true. + */ +#ifdef CONFIG_SMP +# define WARN_ON_SMP(x) WARN_ON(x) +#else +/* + * Use of ({0;}) because WARN_ON_SMP(x) may be used either as + * a stand alone line statement or as a condition in an if () + * statement. + * A simple "0" would cause gcc to give a "statement has no effect" + * warning. + */ +# define WARN_ON_SMP(x) ({0;}) +#endif + +#endif /* __ASSEMBLY__ */ + +#endif diff --git a/include/asm-generic/bugs.h b/include/asm-generic/bugs.h new file mode 100644 index 000000000..69021830f --- /dev/null +++ b/include/asm-generic/bugs.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_BUGS_H +#define __ASM_GENERIC_BUGS_H +/* + * This file is included by 'init/main.c' to check for + * architecture-dependent bugs. + */ + +static inline void check_bugs(void) { } + +#endif /* __ASM_GENERIC_BUGS_H */ diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h new file mode 100644 index 000000000..60386e164 --- /dev/null +++ b/include/asm-generic/cache.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_CACHE_H +#define __ASM_GENERIC_CACHE_H +/* + * 32 bytes appears to be the most common cache line size, + * so make that the default here. Architectures with larger + * cache lines need to provide their own cache.h. + */ + +#define L1_CACHE_SHIFT 5 +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +#endif /* __ASM_GENERIC_CACHE_H */ diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h new file mode 100644 index 000000000..0dd47a6db --- /dev/null +++ b/include/asm-generic/cacheflush.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_CACHEFLUSH_H +#define __ASM_CACHEFLUSH_H + +/* Keep includes the same across arches. */ +#include + +/* + * The cache doesn't need to be flushed when TLB entries change when + * the cache is mapped to physical memory, not virtual memory + */ +#define flush_cache_all() do { } while (0) +#define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) +#define flush_cache_range(vma, start, end) do { } while (0) +#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 +#define flush_dcache_page(page) do { } while (0) +#define flush_dcache_mmap_lock(mapping) do { } while (0) +#define flush_dcache_mmap_unlock(mapping) do { } while (0) +#define flush_icache_range(start, end) do { } while (0) +#define flush_icache_page(vma,pg) do { } while (0) +#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) +#define flush_cache_vmap(start, end) do { } while (0) +#define flush_cache_vunmap(start, end) do { } while (0) + +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ + do { \ + memcpy(dst, src, len); \ + flush_icache_user_range(vma, page, vaddr, len); \ + } while (0) +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + memcpy(dst, src, len) + +#endif /* __ASM_CACHEFLUSH_H */ diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h new file mode 100644 index 000000000..34785c0f5 --- /dev/null +++ b/include/asm-generic/checksum.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_CHECKSUM_H +#define __ASM_GENERIC_CHECKSUM_H + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +extern __wsum csum_partial(const void *buff, int len, __wsum sum); + +/* + * the same as csum_partial, but copies from src while it + * checksums + * + * here even more important to align src and dst on a 32-bit (or even + * better 64-bit) boundary + */ +extern __wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum); + +/* + * the same as csum_partial_copy, but copies from user space. + * + * here even more important to align src and dst on a 32-bit (or even + * better 64-bit) boundary + */ +extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, + int len, __wsum sum, int *csum_err); + +#ifndef csum_partial_copy_nocheck +#define csum_partial_copy_nocheck(src, dst, len, sum) \ + csum_partial_copy((src), (dst), (len), (sum)) +#endif + +#ifndef ip_fast_csum +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. + */ +extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); +#endif + +#ifndef csum_fold +/* + * Fold a partial checksum + */ +static inline __sum16 csum_fold(__wsum csum) +{ + u32 sum = (__force u32)csum; + sum = (sum & 0xffff) + (sum >> 16); + sum = (sum & 0xffff) + (sum >> 16); + return (__force __sum16)~sum; +} +#endif + +#ifndef csum_tcpudp_nofold +/* + * computes the checksum of the TCP/UDP pseudo-header + * returns a 16-bit checksum, already complemented + */ +extern __wsum +csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum); +#endif + +#ifndef csum_tcpudp_magic +static inline __sum16 +csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum) +{ + return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); +} +#endif + +/* + * this routine is used for miscellaneous IP-like checksums, mainly + * in icmp.c + */ +extern __sum16 ip_compute_csum(const void *buff, int len); + +#endif /* __ASM_GENERIC_CHECKSUM_H */ diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h new file mode 100644 index 000000000..f17f14f84 --- /dev/null +++ b/include/asm-generic/cmpxchg-local.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_CMPXCHG_LOCAL_H +#define __ASM_GENERIC_CMPXCHG_LOCAL_H + +#include +#include + +extern unsigned long wrong_size_cmpxchg(volatile void *ptr) + __noreturn; + +/* + * Generic version of __cmpxchg_local (disables interrupts). Takes an unsigned + * long parameter, supporting various types of architectures. + */ +static inline unsigned long __cmpxchg_local_generic(volatile void *ptr, + unsigned long old, unsigned long new, int size) +{ + unsigned long flags, prev; + + /* + * Sanity checking, compile-time. + */ + if (size == 8 && sizeof(unsigned long) != 8) + wrong_size_cmpxchg(ptr); + + raw_local_irq_save(flags); + switch (size) { + case 1: prev = *(u8 *)ptr; + if (prev == old) + *(u8 *)ptr = (u8)new; + break; + case 2: prev = *(u16 *)ptr; + if (prev == old) + *(u16 *)ptr = (u16)new; + break; + case 4: prev = *(u32 *)ptr; + if (prev == old) + *(u32 *)ptr = (u32)new; + break; + case 8: prev = *(u64 *)ptr; + if (prev == old) + *(u64 *)ptr = (u64)new; + break; + default: + wrong_size_cmpxchg(ptr); + } + raw_local_irq_restore(flags); + return prev; +} + +/* + * Generic version of __cmpxchg64_local. Takes an u64 parameter. + */ +static inline u64 __cmpxchg64_local_generic(volatile void *ptr, + u64 old, u64 new) +{ + u64 prev; + unsigned long flags; + + raw_local_irq_save(flags); + prev = *(u64 *)ptr; + if (prev == old) + *(u64 *)ptr = new; + raw_local_irq_restore(flags); + return prev; +} + +#endif diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h new file mode 100644 index 000000000..9a24510cd --- /dev/null +++ b/include/asm-generic/cmpxchg.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Generic UP xchg and cmpxchg using interrupt disablement. Does not + * support SMP. + */ + +#ifndef __ASM_GENERIC_CMPXCHG_H +#define __ASM_GENERIC_CMPXCHG_H + +#ifdef CONFIG_SMP +#error "Cannot use generic cmpxchg on SMP" +#endif + +#include +#include + +#ifndef xchg + +/* + * This function doesn't exist, so you'll get a linker error if + * something tries to do an invalidly-sized xchg(). + */ +extern void __xchg_called_with_bad_pointer(void); + +static inline +unsigned long __xchg(unsigned long x, volatile void *ptr, int size) +{ + unsigned long ret, flags; + + switch (size) { + case 1: +#ifdef __xchg_u8 + return __xchg_u8(x, ptr); +#else + local_irq_save(flags); + ret = *(volatile u8 *)ptr; + *(volatile u8 *)ptr = x; + local_irq_restore(flags); + return ret; +#endif /* __xchg_u8 */ + + case 2: +#ifdef __xchg_u16 + return __xchg_u16(x, ptr); +#else + local_irq_save(flags); + ret = *(volatile u16 *)ptr; + *(volatile u16 *)ptr = x; + local_irq_restore(flags); + return ret; +#endif /* __xchg_u16 */ + + case 4: +#ifdef __xchg_u32 + return __xchg_u32(x, ptr); +#else + local_irq_save(flags); + ret = *(volatile u32 *)ptr; + *(volatile u32 *)ptr = x; + local_irq_restore(flags); + return ret; +#endif /* __xchg_u32 */ + +#ifdef CONFIG_64BIT + case 8: +#ifdef __xchg_u64 + return __xchg_u64(x, ptr); +#else + local_irq_save(flags); + ret = *(volatile u64 *)ptr; + *(volatile u64 *)ptr = x; + local_irq_restore(flags); + return ret; +#endif /* __xchg_u64 */ +#endif /* CONFIG_64BIT */ + + default: + __xchg_called_with_bad_pointer(); + return x; + } +} + +#define xchg(ptr, x) ({ \ + ((__typeof__(*(ptr))) \ + __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \ +}) + +#endif /* xchg */ + +/* + * Atomic compare and exchange. + */ +#include + +#ifndef cmpxchg_local +#define cmpxchg_local(ptr, o, n) ({ \ + ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ + (unsigned long)(n), sizeof(*(ptr)))); \ +}) +#endif + +#ifndef cmpxchg64_local +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) +#endif + +#define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n)) +#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) + +#endif /* __ASM_GENERIC_CMPXCHG_H */ diff --git a/include/asm-generic/compat.h b/include/asm-generic/compat.h new file mode 100644 index 000000000..28819451b --- /dev/null +++ b/include/asm-generic/compat.h @@ -0,0 +1,3 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* This is an empty stub for 32-bit-only architectures */ diff --git a/include/asm-generic/current.h b/include/asm-generic/current.h new file mode 100644 index 000000000..3a2e224b9 --- /dev/null +++ b/include/asm-generic/current.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_CURRENT_H +#define __ASM_GENERIC_CURRENT_H + +#include + +#define get_current() (current_thread_info()->task) +#define current get_current() + +#endif /* __ASM_GENERIC_CURRENT_H */ diff --git a/include/asm-generic/delay.h b/include/asm-generic/delay.h new file mode 100644 index 000000000..e448ac614 --- /dev/null +++ b/include/asm-generic/delay.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_DELAY_H +#define __ASM_GENERIC_DELAY_H + +/* Undefined functions to get compile-time errors */ +extern void __bad_udelay(void); +extern void __bad_ndelay(void); + +extern void __udelay(unsigned long usecs); +extern void __ndelay(unsigned long nsecs); +extern void __const_udelay(unsigned long xloops); +extern void __delay(unsigned long loops); + +/* + * The weird n/20000 thing suppresses a "comparison is always false due to + * limited range of data type" warning with non-const 8-bit arguments. + */ + +/* 0x10c7 is 2**32 / 1000000 (rounded up) */ +#define udelay(n) \ + ({ \ + if (__builtin_constant_p(n)) { \ + if ((n) / 20000 >= 1) \ + __bad_udelay(); \ + else \ + __const_udelay((n) * 0x10c7ul); \ + } else { \ + __udelay(n); \ + } \ + }) + +/* 0x5 is 2**32 / 1000000000 (rounded up) */ +#define ndelay(n) \ + ({ \ + if (__builtin_constant_p(n)) { \ + if ((n) / 20000 >= 1) \ + __bad_ndelay(); \ + else \ + __const_udelay((n) * 5ul); \ + } else { \ + __ndelay(n); \ + } \ + }) + +#endif /* __ASM_GENERIC_DELAY_H */ diff --git a/include/asm-generic/device.h b/include/asm-generic/device.h new file mode 100644 index 000000000..d7c76bba6 --- /dev/null +++ b/include/asm-generic/device.h @@ -0,0 +1,15 @@ +/* + * Arch specific extensions to struct device + * + * This file is released under the GPLv2 + */ +#ifndef _ASM_GENERIC_DEVICE_H +#define _ASM_GENERIC_DEVICE_H + +struct dev_archdata { +}; + +struct pdev_archdata { +}; + +#endif /* _ASM_GENERIC_DEVICE_H */ diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h new file mode 100644 index 000000000..dc9726fda --- /dev/null +++ b/include/asm-generic/div64.h @@ -0,0 +1,249 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_DIV64_H +#define _ASM_GENERIC_DIV64_H +/* + * Copyright (C) 2003 Bernardo Innocenti + * Based on former asm-ppc/div64.h and asm-m68knommu/div64.h + * + * Optimization for constant divisors on 32-bit machines: + * Copyright (C) 2006-2015 Nicolas Pitre + * + * The semantics of do_div() are: + * + * uint32_t do_div(uint64_t *n, uint32_t base) + * { + * uint32_t remainder = *n % base; + * *n = *n / base; + * return remainder; + * } + * + * NOTE: macro parameter n is evaluated multiple times, + * beware of side effects! + */ + +#include +#include + +#if BITS_PER_LONG == 64 + +/** + * do_div - returns 2 values: calculate remainder and update new dividend + * @n: pointer to uint64_t dividend (will be updated) + * @base: uint32_t divisor + * + * Summary: + * ``uint32_t remainder = *n % base;`` + * ``*n = *n / base;`` + * + * Return: (uint32_t)remainder + * + * NOTE: macro parameter @n is evaluated multiple times, + * beware of side effects! + */ +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + __rem = ((uint64_t)(n)) % __base; \ + (n) = ((uint64_t)(n)) / __base; \ + __rem; \ + }) + +#elif BITS_PER_LONG == 32 + +#include + +/* + * If the divisor happens to be constant, we determine the appropriate + * inverse at compile time to turn the division into a few inline + * multiplications which ought to be much faster. And yet only if compiling + * with a sufficiently recent gcc version to perform proper 64-bit constant + * propagation. + * + * (It is unfortunate that gcc doesn't perform all this internally.) + */ + +#ifndef __div64_const32_is_OK +#define __div64_const32_is_OK (__GNUC__ >= 4) +#endif + +#define __div64_const32(n, ___b) \ +({ \ + /* \ + * Multiplication by reciprocal of b: n / b = n * (p / b) / p \ + * \ + * We rely on the fact that most of this code gets optimized \ + * away at compile time due to constant propagation and only \ + * a few multiplication instructions should remain. \ + * Hence this monstrous macro (static inline doesn't always \ + * do the trick here). \ + */ \ + uint64_t ___res, ___x, ___t, ___m, ___n = (n); \ + uint32_t ___p, ___bias; \ + \ + /* determine MSB of b */ \ + ___p = 1 << ilog2(___b); \ + \ + /* compute m = ((p << 64) + b - 1) / b */ \ + ___m = (~0ULL / ___b) * ___p; \ + ___m += (((~0ULL % ___b + 1) * ___p) + ___b - 1) / ___b; \ + \ + /* one less than the dividend with highest result */ \ + ___x = ~0ULL / ___b * ___b - 1; \ + \ + /* test our ___m with res = m * x / (p << 64) */ \ + ___res = ((___m & 0xffffffff) * (___x & 0xffffffff)) >> 32; \ + ___t = ___res += (___m & 0xffffffff) * (___x >> 32); \ + ___res += (___x & 0xffffffff) * (___m >> 32); \ + ___t = (___res < ___t) ? (1ULL << 32) : 0; \ + ___res = (___res >> 32) + ___t; \ + ___res += (___m >> 32) * (___x >> 32); \ + ___res /= ___p; \ + \ + /* Now sanitize and optimize what we've got. */ \ + if (~0ULL % (___b / (___b & -___b)) == 0) { \ + /* special case, can be simplified to ... */ \ + ___n /= (___b & -___b); \ + ___m = ~0ULL / (___b / (___b & -___b)); \ + ___p = 1; \ + ___bias = 1; \ + } else if (___res != ___x / ___b) { \ + /* \ + * We can't get away without a bias to compensate \ + * for bit truncation errors. To avoid it we'd need an \ + * additional bit to represent m which would overflow \ + * a 64-bit variable. \ + * \ + * Instead we do m = p / b and n / b = (n * m + m) / p. \ + */ \ + ___bias = 1; \ + /* Compute m = (p << 64) / b */ \ + ___m = (~0ULL / ___b) * ___p; \ + ___m += ((~0ULL % ___b + 1) * ___p) / ___b; \ + } else { \ + /* \ + * Reduce m / p, and try to clear bit 31 of m when \ + * possible, otherwise that'll need extra overflow \ + * handling later. \ + */ \ + uint32_t ___bits = -(___m & -___m); \ + ___bits |= ___m >> 32; \ + ___bits = (~___bits) << 1; \ + /* \ + * If ___bits == 0 then setting bit 31 is unavoidable. \ + * Simply apply the maximum possible reduction in that \ + * case. Otherwise the MSB of ___bits indicates the \ + * best reduction we should apply. \ + */ \ + if (!___bits) { \ + ___p /= (___m & -___m); \ + ___m /= (___m & -___m); \ + } else { \ + ___p >>= ilog2(___bits); \ + ___m >>= ilog2(___bits); \ + } \ + /* No bias needed. */ \ + ___bias = 0; \ + } \ + \ + /* \ + * Now we have a combination of 2 conditions: \ + * \ + * 1) whether or not we need to apply a bias, and \ + * \ + * 2) whether or not there might be an overflow in the cross \ + * product determined by (___m & ((1 << 63) | (1 << 31))). \ + * \ + * Select the best way to do (m_bias + m * n) / (1 << 64). \ + * From now on there will be actual runtime code generated. \ + */ \ + ___res = __arch_xprod_64(___m, ___n, ___bias); \ + \ + ___res /= ___p; \ +}) + +#ifndef __arch_xprod_64 +/* + * Default C implementation for __arch_xprod_64() + * + * Prototype: uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias) + * Semantic: retval = ((bias ? m : 0) + m * n) >> 64 + * + * The product is a 128-bit value, scaled down to 64 bits. + * Assuming constant propagation to optimize away unused conditional code. + * Architectures may provide their own optimized assembly implementation. + */ +static inline uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias) +{ + uint32_t m_lo = m; + uint32_t m_hi = m >> 32; + uint32_t n_lo = n; + uint32_t n_hi = n >> 32; + uint64_t res, tmp; + + if (!bias) { + res = ((uint64_t)m_lo * n_lo) >> 32; + } else if (!(m & ((1ULL << 63) | (1ULL << 31)))) { + /* there can't be any overflow here */ + res = (m + (uint64_t)m_lo * n_lo) >> 32; + } else { + res = m + (uint64_t)m_lo * n_lo; + tmp = (res < m) ? (1ULL << 32) : 0; + res = (res >> 32) + tmp; + } + + if (!(m & ((1ULL << 63) | (1ULL << 31)))) { + /* there can't be any overflow here */ + res += (uint64_t)m_lo * n_hi; + res += (uint64_t)m_hi * n_lo; + res >>= 32; + } else { + tmp = res += (uint64_t)m_lo * n_hi; + res += (uint64_t)m_hi * n_lo; + tmp = (res < tmp) ? (1ULL << 32) : 0; + res = (res >> 32) + tmp; + } + + res += (uint64_t)m_hi * n_hi; + + return res; +} +#endif + +#ifndef __div64_32 +extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor); +#endif + +/* The unnecessary pointer compare is there + * to check for type safety (n must be 64bit) + */ +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + (void)(((typeof((n)) *)0) == ((uint64_t *)0)); \ + if (__builtin_constant_p(__base) && \ + is_power_of_2(__base)) { \ + __rem = (n) & (__base - 1); \ + (n) >>= ilog2(__base); \ + } else if (__div64_const32_is_OK && \ + __builtin_constant_p(__base) && \ + __base != 0) { \ + uint32_t __res_lo, __n_lo = (n); \ + (n) = __div64_const32(n, __base); \ + /* the remainder can be computed with 32-bit regs */ \ + __res_lo = (n); \ + __rem = __n_lo - __res_lo * __base; \ + } else if (likely(((n) >> 32) == 0)) { \ + __rem = (uint32_t)(n) % __base; \ + (n) = (uint32_t)(n) / __base; \ + } else \ + __rem = __div64_32(&(n), __base); \ + __rem; \ + }) + +#else /* BITS_PER_LONG == ?? */ + +# error do_div() does not yet support the C64 + +#endif /* BITS_PER_LONG */ + +#endif /* _ASM_GENERIC_DIV64_H */ diff --git a/include/asm-generic/dma-contiguous.h b/include/asm-generic/dma-contiguous.h new file mode 100644 index 000000000..f24b0f9a4 --- /dev/null +++ b/include/asm-generic/dma-contiguous.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_DMA_CONTIGUOUS_H +#define _ASM_GENERIC_DMA_CONTIGUOUS_H + +#include + +static inline void +dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { } + +#endif diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h new file mode 100644 index 000000000..ad2868263 --- /dev/null +++ b/include/asm-generic/dma-mapping.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_DMA_MAPPING_H +#define _ASM_GENERIC_DMA_MAPPING_H + +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) +{ + /* + * Use the non-coherent ops if available. If an architecture wants a + * more fine-grained selection of operations it will have to implement + * get_arch_dma_ops itself or use the per-device dma_ops. + */ +#ifdef CONFIG_DMA_NONCOHERENT_OPS + return &dma_noncoherent_ops; +#else + return &dma_direct_ops; +#endif +} + +#endif /* _ASM_GENERIC_DMA_MAPPING_H */ diff --git a/include/asm-generic/dma.h b/include/asm-generic/dma.h new file mode 100644 index 000000000..43d0c8af8 --- /dev/null +++ b/include/asm-generic/dma.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_DMA_H +#define __ASM_GENERIC_DMA_H +/* + * This file traditionally describes the i8237 PC style DMA controller. + * Most architectures don't have these any more and can get the minimal + * implementation from kernel/dma.c by not defining MAX_DMA_CHANNELS. + * + * Some code relies on seeing MAX_DMA_ADDRESS though. + */ +#define MAX_DMA_ADDRESS PAGE_OFFSET + +extern int request_dma(unsigned int dmanr, const char *device_id); +extern void free_dma(unsigned int dmanr); + +#endif /* __ASM_GENERIC_DMA_H */ diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h new file mode 100644 index 000000000..9def22e6e --- /dev/null +++ b/include/asm-generic/early_ioremap.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_EARLY_IOREMAP_H_ +#define _ASM_EARLY_IOREMAP_H_ + +#include + +/* + * early_ioremap() and early_iounmap() are for temporary early boot-time + * mappings, before the real ioremap() is functional. + */ +extern void __iomem *early_ioremap(resource_size_t phys_addr, + unsigned long size); +extern void *early_memremap(resource_size_t phys_addr, + unsigned long size); +extern void *early_memremap_ro(resource_size_t phys_addr, + unsigned long size); +extern void *early_memremap_prot(resource_size_t phys_addr, + unsigned long size, unsigned long prot_val); +extern void early_iounmap(void __iomem *addr, unsigned long size); +extern void early_memunmap(void *addr, unsigned long size); + +/* + * Weak function called by early_ioremap_reset(). It does nothing, but + * architectures may provide their own version to do any needed cleanups. + */ +extern void early_ioremap_shutdown(void); + +#if defined(CONFIG_GENERIC_EARLY_IOREMAP) && defined(CONFIG_MMU) +/* Arch-specific initialization */ +extern void early_ioremap_init(void); + +/* Generic initialization called by architecture code */ +extern void early_ioremap_setup(void); + +/* + * Called as last step in paging_init() so library can act + * accordingly for subsequent map/unmap requests. + */ +extern void early_ioremap_reset(void); + +/* + * Early copy from unmapped memory to kernel mapped memory. + */ +extern void copy_from_early_mem(void *dest, phys_addr_t src, + unsigned long size); + +#else +static inline void early_ioremap_init(void) { } +static inline void early_ioremap_setup(void) { } +static inline void early_ioremap_reset(void) { } +#endif + +#endif /* _ASM_EARLY_IOREMAP_H_ */ diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h new file mode 100644 index 000000000..445de38b7 --- /dev/null +++ b/include/asm-generic/emergency-restart.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_EMERGENCY_RESTART_H +#define _ASM_GENERIC_EMERGENCY_RESTART_H + +static inline void machine_emergency_restart(void) +{ + machine_restart(NULL); +} + +#endif /* _ASM_GENERIC_EMERGENCY_RESTART_H */ diff --git a/include/asm-generic/error-injection.h b/include/asm-generic/error-injection.h new file mode 100644 index 000000000..296c65442 --- /dev/null +++ b/include/asm-generic/error-injection.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_ERROR_INJECTION_H +#define _ASM_GENERIC_ERROR_INJECTION_H + +#if defined(__KERNEL__) && !defined(__ASSEMBLY__) +enum { + EI_ETYPE_NONE, /* Dummy value for undefined case */ + EI_ETYPE_NULL, /* Return NULL if failure */ + EI_ETYPE_ERRNO, /* Return -ERRNO if failure */ + EI_ETYPE_ERRNO_NULL, /* Return -ERRNO or NULL if failure */ +}; + +struct error_injection_entry { + unsigned long addr; + int etype; +}; + +#ifdef CONFIG_FUNCTION_ERROR_INJECTION +/* + * Whitelist ganerating macro. Specify functions which can be + * error-injectable using this macro. + */ +#define ALLOW_ERROR_INJECTION(fname, _etype) \ +static struct error_injection_entry __used \ + __attribute__((__section__("_error_injection_whitelist"))) \ + _eil_addr_##fname = { \ + .addr = (unsigned long)fname, \ + .etype = EI_ETYPE_##_etype, \ + }; +#else +#define ALLOW_ERROR_INJECTION(fname, _etype) +#endif +#endif + +#endif /* _ASM_GENERIC_ERROR_INJECTION_H */ diff --git a/include/asm-generic/exec.h b/include/asm-generic/exec.h new file mode 100644 index 000000000..32c0a216f --- /dev/null +++ b/include/asm-generic/exec.h @@ -0,0 +1,19 @@ +/* Generic process execution definitions. + * + * It should be possible to use these on really simple architectures, + * but it serves more as a starting point for new ports. + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#ifndef __ASM_GENERIC_EXEC_H +#define __ASM_GENERIC_EXEC_H + +#define arch_align_stack(x) (x) + +#endif /* __ASM_GENERIC_EXEC_H */ diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h new file mode 100644 index 000000000..4d73e6e3c --- /dev/null +++ b/include/asm-generic/export.h @@ -0,0 +1,93 @@ +#ifndef __ASM_GENERIC_EXPORT_H +#define __ASM_GENERIC_EXPORT_H + +#ifndef KSYM_FUNC +#define KSYM_FUNC(x) x +#endif +#ifdef CONFIG_64BIT +#ifndef KSYM_ALIGN +#define KSYM_ALIGN 8 +#endif +#else +#ifndef KSYM_ALIGN +#define KSYM_ALIGN 4 +#endif +#endif +#ifndef KCRC_ALIGN +#define KCRC_ALIGN 4 +#endif + +.macro __put, val, name +#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS + .long \val - ., \name - . +#elif defined(CONFIG_64BIT) + .quad \val, \name +#else + .long \val, \name +#endif +.endm + +/* + * note on .section use: @progbits vs %progbits nastiness doesn't matter, + * since we immediately emit into those sections anyway. + */ +.macro ___EXPORT_SYMBOL name,val,sec +#ifdef CONFIG_MODULES + .globl __ksymtab_\name + .section ___ksymtab\sec+\name,"a" + .balign KSYM_ALIGN +__ksymtab_\name: + __put \val, __kstrtab_\name + .previous + .section __ksymtab_strings,"a" +__kstrtab_\name: + .asciz "\name" + .previous +#ifdef CONFIG_MODVERSIONS + .section ___kcrctab\sec+\name,"a" + .balign KCRC_ALIGN +__kcrctab_\name: +#if defined(CONFIG_MODULE_REL_CRCS) + .long __crc_\name - . +#else + .long __crc_\name +#endif + .weak __crc_\name + .previous +#endif +#endif +.endm +#undef __put + +#if defined(__KSYM_DEPS__) + +#define __EXPORT_SYMBOL(sym, val, sec) === __KSYM_##sym === + +#elif defined(CONFIG_TRIM_UNUSED_KSYMS) + +#include +#include + +#define __EXPORT_SYMBOL(sym, val, sec) \ + __cond_export_sym(sym, val, sec, __is_defined(__KSYM_##sym)) +#define __cond_export_sym(sym, val, sec, conf) \ + ___cond_export_sym(sym, val, sec, conf) +#define ___cond_export_sym(sym, val, sec, enabled) \ + __cond_export_sym_##enabled(sym, val, sec) +#define __cond_export_sym_1(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec +#define __cond_export_sym_0(sym, val, sec) /* nothing */ + +#else +#define __EXPORT_SYMBOL(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec +#endif + +#define EXPORT_SYMBOL(name) \ + __EXPORT_SYMBOL(name, KSYM_FUNC(name),) +#define EXPORT_SYMBOL_GPL(name) \ + __EXPORT_SYMBOL(name, KSYM_FUNC(name), _gpl) +#define EXPORT_DATA_SYMBOL(name) \ + __EXPORT_SYMBOL(name, name,) +#define EXPORT_DATA_SYMBOL_GPL(name) \ + __EXPORT_SYMBOL(name, name,_gpl) + +#endif diff --git a/include/asm-generic/extable.h b/include/asm-generic/extable.h new file mode 100644 index 000000000..f9618bd07 --- /dev/null +++ b/include/asm-generic/extable.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_EXTABLE_H +#define __ASM_GENERIC_EXTABLE_H + +/* + * The exception table consists of pairs of addresses: the first is the + * address of an instruction that is allowed to fault, and the second is + * the address at which the program should continue. No registers are + * modified, so it is entirely up to the continuation code to figure out + * what to do. + * + * All the routines below use bits of fixup code that are out of line + * with the main instruction path. This means when everything is well, + * we don't even have to jump over them. Further, they do not intrude + * on our cache or tlb entries. + */ + +struct exception_table_entry +{ + unsigned long insn, fixup; +}; + + +struct pt_regs; +extern int fixup_exception(struct pt_regs *regs); + +#endif diff --git a/include/asm-generic/fb.h b/include/asm-generic/fb.h new file mode 100644 index 000000000..f9f18101e --- /dev/null +++ b/include/asm-generic/fb.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_FB_H_ +#define __ASM_GENERIC_FB_H_ +#include + +#define fb_pgprotect(...) do {} while (0) + +static inline int fb_is_primary_device(struct fb_info *info) +{ + return 0; +} + +#endif /* __ASM_GENERIC_FB_H_ */ diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h new file mode 100644 index 000000000..827e4d3bb --- /dev/null +++ b/include/asm-generic/fixmap.h @@ -0,0 +1,103 @@ +/* + * fixmap.h: compile-time virtual memory allocation + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1998 Ingo Molnar + * + * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 + * x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009 + * Break out common bits to asm-generic by Mark Salter, November 2013 + */ + +#ifndef __ASM_GENERIC_FIXMAP_H +#define __ASM_GENERIC_FIXMAP_H + +#include + +#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) +#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) + +#ifndef __ASSEMBLY__ +/* + * 'index to address' translation. If anyone tries to use the idx + * directly without translation, we catch the bug with a NULL-deference + * kernel oops. Illegal ranges of incoming indices are caught too. + */ +static __always_inline unsigned long fix_to_virt(const unsigned int idx) +{ + BUILD_BUG_ON(idx >= __end_of_fixed_addresses); + return __fix_to_virt(idx); +} + +static inline unsigned long virt_to_fix(const unsigned long vaddr) +{ + BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); + return __virt_to_fix(vaddr); +} + +/* + * Provide some reasonable defaults for page flags. + * Not all architectures use all of these different types and some + * architectures use different names. + */ +#ifndef FIXMAP_PAGE_NORMAL +#define FIXMAP_PAGE_NORMAL PAGE_KERNEL +#endif +#if !defined(FIXMAP_PAGE_RO) && defined(PAGE_KERNEL_RO) +#define FIXMAP_PAGE_RO PAGE_KERNEL_RO +#endif +#ifndef FIXMAP_PAGE_NOCACHE +#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NOCACHE +#endif +#ifndef FIXMAP_PAGE_IO +#define FIXMAP_PAGE_IO PAGE_KERNEL_IO +#endif +#ifndef FIXMAP_PAGE_CLEAR +#define FIXMAP_PAGE_CLEAR __pgprot(0) +#endif + +#ifndef set_fixmap +#define set_fixmap(idx, phys) \ + __set_fixmap(idx, phys, FIXMAP_PAGE_NORMAL) +#endif + +#ifndef clear_fixmap +#define clear_fixmap(idx) \ + __set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR) +#endif + +/* Return a pointer with offset calculated */ +#define __set_fixmap_offset(idx, phys, flags) \ +({ \ + unsigned long ________addr; \ + __set_fixmap(idx, phys, flags); \ + ________addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \ + ________addr; \ +}) + +#define set_fixmap_offset(idx, phys) \ + __set_fixmap_offset(idx, phys, FIXMAP_PAGE_NORMAL) + +/* + * Some hardware wants to get fixmapped without caching. + */ +#define set_fixmap_nocache(idx, phys) \ + __set_fixmap(idx, phys, FIXMAP_PAGE_NOCACHE) + +#define set_fixmap_offset_nocache(idx, phys) \ + __set_fixmap_offset(idx, phys, FIXMAP_PAGE_NOCACHE) + +/* + * Some fixmaps are for IO + */ +#define set_fixmap_io(idx, phys) \ + __set_fixmap(idx, phys, FIXMAP_PAGE_IO) + +#define set_fixmap_offset_io(idx, phys) \ + __set_fixmap_offset(idx, phys, FIXMAP_PAGE_IO) + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_GENERIC_FIXMAP_H */ diff --git a/include/asm-generic/ftrace.h b/include/asm-generic/ftrace.h new file mode 100644 index 000000000..51abba9ea --- /dev/null +++ b/include/asm-generic/ftrace.h @@ -0,0 +1,16 @@ +/* + * linux/include/asm-generic/ftrace.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ASM_GENERIC_FTRACE_H__ +#define __ASM_GENERIC_FTRACE_H__ + +/* + * Not all architectures need their own ftrace.h, the most + * common definitions are already in linux/ftrace.h. + */ + +#endif /* __ASM_GENERIC_FTRACE_H__ */ diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h new file mode 100644 index 000000000..8666fe7f3 --- /dev/null +++ b/include/asm-generic/futex.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_FUTEX_H +#define _ASM_GENERIC_FUTEX_H + +#include +#include +#include + +#ifndef CONFIG_SMP +/* + * The following implementation only for uniprocessor machines. + * It relies on preempt_disable() ensuring mutual exclusion. + * + */ + +/** + * arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant + * argument and comparison of the previous + * futex value with another constant. + * + * @encoded_op: encoded operation to execute + * @uaddr: pointer to user space address + * + * Return: + * 0 - On success + * -EFAULT - User access resulted in a page fault + * -EAGAIN - Atomic operation was unable to complete due to contention + * -ENOSYS - Operation not supported + */ +static inline int +arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) +{ + int oldval, ret; + u32 tmp; + + preempt_disable(); + pagefault_disable(); + + ret = -EFAULT; + if (unlikely(get_user(oldval, uaddr) != 0)) + goto out_pagefault_enable; + + ret = 0; + tmp = oldval; + + switch (op) { + case FUTEX_OP_SET: + tmp = oparg; + break; + case FUTEX_OP_ADD: + tmp += oparg; + break; + case FUTEX_OP_OR: + tmp |= oparg; + break; + case FUTEX_OP_ANDN: + tmp &= ~oparg; + break; + case FUTEX_OP_XOR: + tmp ^= oparg; + break; + default: + ret = -ENOSYS; + } + + if (ret == 0 && unlikely(put_user(tmp, uaddr) != 0)) + ret = -EFAULT; + +out_pagefault_enable: + pagefault_enable(); + preempt_enable(); + + if (ret == 0) + *oval = oldval; + + return ret; +} + +/** + * futex_atomic_cmpxchg_inatomic() - Compare and exchange the content of the + * uaddr with newval if the current value is + * oldval. + * @uval: pointer to store content of @uaddr + * @uaddr: pointer to user space address + * @oldval: old value + * @newval: new value to store to @uaddr + * + * Return: + * 0 - On success + * -EFAULT - User access resulted in a page fault + * -EAGAIN - Atomic operation was unable to complete due to contention + * -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG) + */ +static inline int +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + u32 val; + + preempt_disable(); + if (unlikely(get_user(val, uaddr) != 0)) { + preempt_enable(); + return -EFAULT; + } + + if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) { + preempt_enable(); + return -EFAULT; + } + + *uval = val; + preempt_enable(); + + return 0; +} + +#else +static inline int +arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) +{ + int oldval = 0, ret; + + pagefault_disable(); + + switch (op) { + case FUTEX_OP_SET: + case FUTEX_OP_ADD: + case FUTEX_OP_OR: + case FUTEX_OP_ANDN: + case FUTEX_OP_XOR: + default: + ret = -ENOSYS; + } + + pagefault_enable(); + + if (!ret) + *oval = oldval; + + return ret; +} + +static inline int +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + return -ENOSYS; +} + +#endif /* CONFIG_SMP */ +#endif diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h new file mode 100644 index 000000000..e9f20b813 --- /dev/null +++ b/include/asm-generic/getorder.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_GETORDER_H +#define __ASM_GENERIC_GETORDER_H + +#ifndef __ASSEMBLY__ + +#include +#include + +/** + * get_order - Determine the allocation order of a memory size + * @size: The size for which to get the order + * + * Determine the allocation order of a particular sized block of memory. This + * is on a logarithmic scale, where: + * + * 0 -> 2^0 * PAGE_SIZE and below + * 1 -> 2^1 * PAGE_SIZE to 2^0 * PAGE_SIZE + 1 + * 2 -> 2^2 * PAGE_SIZE to 2^1 * PAGE_SIZE + 1 + * 3 -> 2^3 * PAGE_SIZE to 2^2 * PAGE_SIZE + 1 + * 4 -> 2^4 * PAGE_SIZE to 2^3 * PAGE_SIZE + 1 + * ... + * + * The order returned is used to find the smallest allocation granule required + * to hold an object of the specified size. + * + * The result is undefined if the size is 0. + */ +static inline __attribute_const__ int get_order(unsigned long size) +{ + if (__builtin_constant_p(size)) { + if (!size) + return BITS_PER_LONG - PAGE_SHIFT; + + if (size < (1UL << PAGE_SHIFT)) + return 0; + + return ilog2((size) - 1) - PAGE_SHIFT + 1; + } + + size--; + size >>= PAGE_SHIFT; +#if BITS_PER_LONG == 32 + return fls(size); +#else + return fls64(size); +#endif +} + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_GENERIC_GETORDER_H */ diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h new file mode 100644 index 000000000..19eadac41 --- /dev/null +++ b/include/asm-generic/gpio.h @@ -0,0 +1,172 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_GPIO_H +#define _ASM_GENERIC_GPIO_H + +#include +#include +#include +#include + +#ifdef CONFIG_GPIOLIB + +#include +#include +#include + +/* Platforms may implement their GPIO interface with library code, + * at a small performance cost for non-inlined operations and some + * extra memory (for code and for per-GPIO table entries). + * + * While the GPIO programming interface defines valid GPIO numbers + * to be in the range 0..MAX_INT, this library restricts them to the + * smaller range 0..ARCH_NR_GPIOS-1. + * + * ARCH_NR_GPIOS is somewhat arbitrary; it usually reflects the sum of + * builtin/SoC GPIOs plus a number of GPIOs on expanders; the latter is + * actually an estimate of a board-specific value. + */ + +#ifndef ARCH_NR_GPIOS +#if defined(CONFIG_ARCH_NR_GPIO) && CONFIG_ARCH_NR_GPIO > 0 +#define ARCH_NR_GPIOS CONFIG_ARCH_NR_GPIO +#else +#define ARCH_NR_GPIOS 512 +#endif +#endif + +/* + * "valid" GPIO numbers are nonnegative and may be passed to + * setup routines like gpio_request(). only some valid numbers + * can successfully be requested and used. + * + * Invalid GPIO numbers are useful for indicating no-such-GPIO in + * platform data and other tables. + */ + +static inline bool gpio_is_valid(int number) +{ + return number >= 0 && number < ARCH_NR_GPIOS; +} + +struct device; +struct gpio; +struct seq_file; +struct module; +struct device_node; +struct gpio_desc; + +/* caller holds gpio_lock *OR* gpio is marked as requested */ +static inline struct gpio_chip *gpio_to_chip(unsigned gpio) +{ + return gpiod_to_chip(gpio_to_desc(gpio)); +} + +/* Always use the library code for GPIO management calls, + * or when sleeping may be involved. + */ +extern int gpio_request(unsigned gpio, const char *label); +extern void gpio_free(unsigned gpio); + +static inline int gpio_direction_input(unsigned gpio) +{ + return gpiod_direction_input(gpio_to_desc(gpio)); +} +static inline int gpio_direction_output(unsigned gpio, int value) +{ + return gpiod_direction_output_raw(gpio_to_desc(gpio), value); +} + +static inline int gpio_set_debounce(unsigned gpio, unsigned debounce) +{ + return gpiod_set_debounce(gpio_to_desc(gpio), debounce); +} + +static inline int gpio_get_value_cansleep(unsigned gpio) +{ + return gpiod_get_raw_value_cansleep(gpio_to_desc(gpio)); +} +static inline void gpio_set_value_cansleep(unsigned gpio, int value) +{ + return gpiod_set_raw_value_cansleep(gpio_to_desc(gpio), value); +} + + +/* A platform's code may want to inline the I/O calls when + * the GPIO is constant and refers to some always-present controller, + * giving direct access to chip registers and tight bitbanging loops. + */ +static inline int __gpio_get_value(unsigned gpio) +{ + return gpiod_get_raw_value(gpio_to_desc(gpio)); +} +static inline void __gpio_set_value(unsigned gpio, int value) +{ + return gpiod_set_raw_value(gpio_to_desc(gpio), value); +} + +static inline int __gpio_cansleep(unsigned gpio) +{ + return gpiod_cansleep(gpio_to_desc(gpio)); +} + +static inline int __gpio_to_irq(unsigned gpio) +{ + return gpiod_to_irq(gpio_to_desc(gpio)); +} + +extern int gpio_request_one(unsigned gpio, unsigned long flags, const char *label); +extern int gpio_request_array(const struct gpio *array, size_t num); +extern void gpio_free_array(const struct gpio *array, size_t num); + +/* + * A sysfs interface can be exported by individual drivers if they want, + * but more typically is configured entirely from userspace. + */ +static inline int gpio_export(unsigned gpio, bool direction_may_change) +{ + return gpiod_export(gpio_to_desc(gpio), direction_may_change); +} + +static inline int gpio_export_link(struct device *dev, const char *name, + unsigned gpio) +{ + return gpiod_export_link(dev, name, gpio_to_desc(gpio)); +} + +static inline void gpio_unexport(unsigned gpio) +{ + gpiod_unexport(gpio_to_desc(gpio)); +} + +#else /* !CONFIG_GPIOLIB */ + +static inline bool gpio_is_valid(int number) +{ + /* only non-negative numbers are valid */ + return number >= 0; +} + +/* platforms that don't directly support access to GPIOs through I2C, SPI, + * or other blocking infrastructure can use these wrappers. + */ + +static inline int gpio_cansleep(unsigned gpio) +{ + return 0; +} + +static inline int gpio_get_value_cansleep(unsigned gpio) +{ + might_sleep(); + return __gpio_get_value(gpio); +} + +static inline void gpio_set_value_cansleep(unsigned gpio, int value) +{ + might_sleep(); + __gpio_set_value(gpio, value); +} + +#endif /* !CONFIG_GPIOLIB */ + +#endif /* _ASM_GENERIC_GPIO_H */ diff --git a/include/asm-generic/hardirq.h b/include/asm-generic/hardirq.h new file mode 100644 index 000000000..d14214dfc --- /dev/null +++ b/include/asm-generic/hardirq.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_HARDIRQ_H +#define __ASM_GENERIC_HARDIRQ_H + +#include +#include + +typedef struct { + unsigned int __softirq_pending; +} ____cacheline_aligned irq_cpustat_t; + +#include /* Standard mappings for irq_cpustat_t above */ +#include + +#ifndef ack_bad_irq +static inline void ack_bad_irq(unsigned int irq) +{ + printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq); +} +#endif + +#endif /* __ASM_GENERIC_HARDIRQ_H */ diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h new file mode 100644 index 000000000..9d0cde8ab --- /dev/null +++ b/include/asm-generic/hugetlb.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_HUGETLB_H +#define _ASM_GENERIC_HUGETLB_H + +static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot) +{ + return mk_pte(page, pgprot); +} + +static inline unsigned long huge_pte_write(pte_t pte) +{ + return pte_write(pte); +} + +static inline unsigned long huge_pte_dirty(pte_t pte) +{ + return pte_dirty(pte); +} + +static inline pte_t huge_pte_mkwrite(pte_t pte) +{ + return pte_mkwrite(pte); +} + +static inline pte_t huge_pte_mkdirty(pte_t pte) +{ + return pte_mkdirty(pte); +} + +static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) +{ + return pte_modify(pte, newprot); +} + +#ifndef huge_pte_clear +static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long sz) +{ + pte_clear(mm, addr, ptep); +} +#endif + +#endif /* _ASM_GENERIC_HUGETLB_H */ diff --git a/include/asm-generic/hw_irq.h b/include/asm-generic/hw_irq.h new file mode 100644 index 000000000..89036d7b4 --- /dev/null +++ b/include/asm-generic/hw_irq.h @@ -0,0 +1,9 @@ +#ifndef __ASM_GENERIC_HW_IRQ_H +#define __ASM_GENERIC_HW_IRQ_H +/* + * hw_irq.h has internal declarations for the low-level interrupt + * controller, like the original i8259A. + * In general, this is not needed for new architectures. + */ + +#endif /* __ASM_GENERIC_HW_IRQ_H */ diff --git a/include/asm-generic/ide_iops.h b/include/asm-generic/ide_iops.h new file mode 100644 index 000000000..81dfa3ee5 --- /dev/null +++ b/include/asm-generic/ide_iops.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Generic I/O and MEMIO string operations. */ + +#define __ide_insw insw +#define __ide_insl insl +#define __ide_outsw outsw +#define __ide_outsl outsl + +static __inline__ void __ide_mm_insw(void __iomem *port, void *addr, u32 count) +{ + while (count--) { + *(u16 *)addr = readw(port); + addr += 2; + } +} + +static __inline__ void __ide_mm_insl(void __iomem *port, void *addr, u32 count) +{ + while (count--) { + *(u32 *)addr = readl(port); + addr += 4; + } +} + +static __inline__ void __ide_mm_outsw(void __iomem *port, void *addr, u32 count) +{ + while (count--) { + writew(*(u16 *)addr, port); + addr += 2; + } +} + +static __inline__ void __ide_mm_outsl(void __iomem * port, void *addr, u32 count) +{ + while (count--) { + writel(*(u32 *)addr, port); + addr += 4; + } +} diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h new file mode 100644 index 000000000..a248545f1 --- /dev/null +++ b/include/asm-generic/int-ll64.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * asm-generic/int-ll64.h + * + * Integer declarations for architectures which use "long long" + * for 64-bit types. + */ +#ifndef _ASM_GENERIC_INT_LL64_H +#define _ASM_GENERIC_INT_LL64_H + +#include + + +#ifndef __ASSEMBLY__ + +typedef __s8 s8; +typedef __u8 u8; +typedef __s16 s16; +typedef __u16 u16; +typedef __s32 s32; +typedef __u32 u32; +typedef __s64 s64; +typedef __u64 u64; + +#define S8_C(x) x +#define U8_C(x) x ## U +#define S16_C(x) x +#define U16_C(x) x ## U +#define S32_C(x) x +#define U32_C(x) x ## U +#define S64_C(x) x ## LL +#define U64_C(x) x ## ULL + +#else /* __ASSEMBLY__ */ + +#define S8_C(x) x +#define U8_C(x) x +#define S16_C(x) x +#define U16_C(x) x +#define S32_C(x) x +#define U32_C(x) x +#define S64_C(x) x +#define U64_C(x) x + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_GENERIC_INT_LL64_H */ diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h new file mode 100644 index 000000000..d356f8029 --- /dev/null +++ b/include/asm-generic/io.h @@ -0,0 +1,1140 @@ +/* Generic I/O port emulation. + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#ifndef __ASM_GENERIC_IO_H +#define __ASM_GENERIC_IO_H + +#include /* I/O is all done through memory accesses */ +#include /* for memset() and memcpy() */ +#include + +#ifdef CONFIG_GENERIC_IOMAP +#include +#endif + +#include + +#ifndef mmiowb +#define mmiowb() do {} while (0) +#endif + +#ifndef __io_br +#define __io_br() barrier() +#endif + +/* prevent prefetching of coherent DMA data ahead of a dma-complete */ +#ifndef __io_ar +#ifdef rmb +#define __io_ar() rmb() +#else +#define __io_ar() barrier() +#endif +#endif + +/* flush writes to coherent DMA data before possibly triggering a DMA read */ +#ifndef __io_bw +#ifdef wmb +#define __io_bw() wmb() +#else +#define __io_bw() barrier() +#endif +#endif + +/* serialize device access against a spin_unlock, usually handled there. */ +#ifndef __io_aw +#define __io_aw() barrier() +#endif + +#ifndef __io_pbw +#define __io_pbw() __io_bw() +#endif + +#ifndef __io_paw +#define __io_paw() __io_aw() +#endif + +#ifndef __io_pbr +#define __io_pbr() __io_br() +#endif + +#ifndef __io_par +#define __io_par() __io_ar() +#endif + + +/* + * __raw_{read,write}{b,w,l,q}() access memory in native endianness. + * + * On some architectures memory mapped IO needs to be accessed differently. + * On the simple architectures, we just read/write the memory location + * directly. + */ + +#ifndef __raw_readb +#define __raw_readb __raw_readb +static inline u8 __raw_readb(const volatile void __iomem *addr) +{ + return *(const volatile u8 __force *)addr; +} +#endif + +#ifndef __raw_readw +#define __raw_readw __raw_readw +static inline u16 __raw_readw(const volatile void __iomem *addr) +{ + return *(const volatile u16 __force *)addr; +} +#endif + +#ifndef __raw_readl +#define __raw_readl __raw_readl +static inline u32 __raw_readl(const volatile void __iomem *addr) +{ + return *(const volatile u32 __force *)addr; +} +#endif + +#ifdef CONFIG_64BIT +#ifndef __raw_readq +#define __raw_readq __raw_readq +static inline u64 __raw_readq(const volatile void __iomem *addr) +{ + return *(const volatile u64 __force *)addr; +} +#endif +#endif /* CONFIG_64BIT */ + +#ifndef __raw_writeb +#define __raw_writeb __raw_writeb +static inline void __raw_writeb(u8 value, volatile void __iomem *addr) +{ + *(volatile u8 __force *)addr = value; +} +#endif + +#ifndef __raw_writew +#define __raw_writew __raw_writew +static inline void __raw_writew(u16 value, volatile void __iomem *addr) +{ + *(volatile u16 __force *)addr = value; +} +#endif + +#ifndef __raw_writel +#define __raw_writel __raw_writel +static inline void __raw_writel(u32 value, volatile void __iomem *addr) +{ + *(volatile u32 __force *)addr = value; +} +#endif + +#ifdef CONFIG_64BIT +#ifndef __raw_writeq +#define __raw_writeq __raw_writeq +static inline void __raw_writeq(u64 value, volatile void __iomem *addr) +{ + *(volatile u64 __force *)addr = value; +} +#endif +#endif /* CONFIG_64BIT */ + +/* + * {read,write}{b,w,l,q}() access little endian memory and return result in + * native endianness. + */ + +#ifndef readb +#define readb readb +static inline u8 readb(const volatile void __iomem *addr) +{ + u8 val; + + __io_br(); + val = __raw_readb(addr); + __io_ar(); + return val; +} +#endif + +#ifndef readw +#define readw readw +static inline u16 readw(const volatile void __iomem *addr) +{ + u16 val; + + __io_br(); + val = __le16_to_cpu(__raw_readw(addr)); + __io_ar(); + return val; +} +#endif + +#ifndef readl +#define readl readl +static inline u32 readl(const volatile void __iomem *addr) +{ + u32 val; + + __io_br(); + val = __le32_to_cpu(__raw_readl(addr)); + __io_ar(); + return val; +} +#endif + +#ifdef CONFIG_64BIT +#ifndef readq +#define readq readq +static inline u64 readq(const volatile void __iomem *addr) +{ + u64 val; + + __io_br(); + val = __le64_to_cpu(__raw_readq(addr)); + __io_ar(); + return val; +} +#endif +#endif /* CONFIG_64BIT */ + +#ifndef writeb +#define writeb writeb +static inline void writeb(u8 value, volatile void __iomem *addr) +{ + __io_bw(); + __raw_writeb(value, addr); + __io_aw(); +} +#endif + +#ifndef writew +#define writew writew +static inline void writew(u16 value, volatile void __iomem *addr) +{ + __io_bw(); + __raw_writew(cpu_to_le16(value), addr); + __io_aw(); +} +#endif + +#ifndef writel +#define writel writel +static inline void writel(u32 value, volatile void __iomem *addr) +{ + __io_bw(); + __raw_writel(__cpu_to_le32(value), addr); + __io_aw(); +} +#endif + +#ifdef CONFIG_64BIT +#ifndef writeq +#define writeq writeq +static inline void writeq(u64 value, volatile void __iomem *addr) +{ + __io_bw(); + __raw_writeq(__cpu_to_le64(value), addr); + __io_aw(); +} +#endif +#endif /* CONFIG_64BIT */ + +/* + * {read,write}{b,w,l,q}_relaxed() are like the regular version, but + * are not guaranteed to provide ordering against spinlocks or memory + * accesses. + */ +#ifndef readb_relaxed +#define readb_relaxed readb_relaxed +static inline u8 readb_relaxed(const volatile void __iomem *addr) +{ + return __raw_readb(addr); +} +#endif + +#ifndef readw_relaxed +#define readw_relaxed readw_relaxed +static inline u16 readw_relaxed(const volatile void __iomem *addr) +{ + return __le16_to_cpu(__raw_readw(addr)); +} +#endif + +#ifndef readl_relaxed +#define readl_relaxed readl_relaxed +static inline u32 readl_relaxed(const volatile void __iomem *addr) +{ + return __le32_to_cpu(__raw_readl(addr)); +} +#endif + +#if defined(readq) && !defined(readq_relaxed) +#define readq_relaxed readq_relaxed +static inline u64 readq_relaxed(const volatile void __iomem *addr) +{ + return __le64_to_cpu(__raw_readq(addr)); +} +#endif + +#ifndef writeb_relaxed +#define writeb_relaxed writeb_relaxed +static inline void writeb_relaxed(u8 value, volatile void __iomem *addr) +{ + __raw_writeb(value, addr); +} +#endif + +#ifndef writew_relaxed +#define writew_relaxed writew_relaxed +static inline void writew_relaxed(u16 value, volatile void __iomem *addr) +{ + __raw_writew(cpu_to_le16(value), addr); +} +#endif + +#ifndef writel_relaxed +#define writel_relaxed writel_relaxed +static inline void writel_relaxed(u32 value, volatile void __iomem *addr) +{ + __raw_writel(__cpu_to_le32(value), addr); +} +#endif + +#if defined(writeq) && !defined(writeq_relaxed) +#define writeq_relaxed writeq_relaxed +static inline void writeq_relaxed(u64 value, volatile void __iomem *addr) +{ + __raw_writeq(__cpu_to_le64(value), addr); +} +#endif + +/* + * {read,write}s{b,w,l,q}() repeatedly access the same memory address in + * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times). + */ +#ifndef readsb +#define readsb readsb +static inline void readsb(const volatile void __iomem *addr, void *buffer, + unsigned int count) +{ + if (count) { + u8 *buf = buffer; + + do { + u8 x = __raw_readb(addr); + *buf++ = x; + } while (--count); + } +} +#endif + +#ifndef readsw +#define readsw readsw +static inline void readsw(const volatile void __iomem *addr, void *buffer, + unsigned int count) +{ + if (count) { + u16 *buf = buffer; + + do { + u16 x = __raw_readw(addr); + *buf++ = x; + } while (--count); + } +} +#endif + +#ifndef readsl +#define readsl readsl +static inline void readsl(const volatile void __iomem *addr, void *buffer, + unsigned int count) +{ + if (count) { + u32 *buf = buffer; + + do { + u32 x = __raw_readl(addr); + *buf++ = x; + } while (--count); + } +} +#endif + +#ifdef CONFIG_64BIT +#ifndef readsq +#define readsq readsq +static inline void readsq(const volatile void __iomem *addr, void *buffer, + unsigned int count) +{ + if (count) { + u64 *buf = buffer; + + do { + u64 x = __raw_readq(addr); + *buf++ = x; + } while (--count); + } +} +#endif +#endif /* CONFIG_64BIT */ + +#ifndef writesb +#define writesb writesb +static inline void writesb(volatile void __iomem *addr, const void *buffer, + unsigned int count) +{ + if (count) { + const u8 *buf = buffer; + + do { + __raw_writeb(*buf++, addr); + } while (--count); + } +} +#endif + +#ifndef writesw +#define writesw writesw +static inline void writesw(volatile void __iomem *addr, const void *buffer, + unsigned int count) +{ + if (count) { + const u16 *buf = buffer; + + do { + __raw_writew(*buf++, addr); + } while (--count); + } +} +#endif + +#ifndef writesl +#define writesl writesl +static inline void writesl(volatile void __iomem *addr, const void *buffer, + unsigned int count) +{ + if (count) { + const u32 *buf = buffer; + + do { + __raw_writel(*buf++, addr); + } while (--count); + } +} +#endif + +#ifdef CONFIG_64BIT +#ifndef writesq +#define writesq writesq +static inline void writesq(volatile void __iomem *addr, const void *buffer, + unsigned int count) +{ + if (count) { + const u64 *buf = buffer; + + do { + __raw_writeq(*buf++, addr); + } while (--count); + } +} +#endif +#endif /* CONFIG_64BIT */ + +#ifndef PCI_IOBASE +#define PCI_IOBASE ((void __iomem *)0) +#endif + +#ifndef IO_SPACE_LIMIT +#define IO_SPACE_LIMIT 0xffff +#endif + +#include + +/* + * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be + * implemented on hardware that needs an additional delay for I/O accesses to + * take effect. + */ + +#ifndef inb +#define inb inb +static inline u8 inb(unsigned long addr) +{ + u8 val; + + __io_pbr(); + val = __raw_readb(PCI_IOBASE + addr); + __io_par(); + return val; +} +#endif + +#ifndef inw +#define inw inw +static inline u16 inw(unsigned long addr) +{ + u16 val; + + __io_pbr(); + val = __le16_to_cpu(__raw_readw(PCI_IOBASE + addr)); + __io_par(); + return val; +} +#endif + +#ifndef inl +#define inl inl +static inline u32 inl(unsigned long addr) +{ + u32 val; + + __io_pbr(); + val = __le32_to_cpu(__raw_readl(PCI_IOBASE + addr)); + __io_par(); + return val; +} +#endif + +#ifndef outb +#define outb outb +static inline void outb(u8 value, unsigned long addr) +{ + __io_pbw(); + __raw_writeb(value, PCI_IOBASE + addr); + __io_paw(); +} +#endif + +#ifndef outw +#define outw outw +static inline void outw(u16 value, unsigned long addr) +{ + __io_pbw(); + __raw_writew(cpu_to_le16(value), PCI_IOBASE + addr); + __io_paw(); +} +#endif + +#ifndef outl +#define outl outl +static inline void outl(u32 value, unsigned long addr) +{ + __io_pbw(); + __raw_writel(cpu_to_le32(value), PCI_IOBASE + addr); + __io_paw(); +} +#endif + +#ifndef inb_p +#define inb_p inb_p +static inline u8 inb_p(unsigned long addr) +{ + return inb(addr); +} +#endif + +#ifndef inw_p +#define inw_p inw_p +static inline u16 inw_p(unsigned long addr) +{ + return inw(addr); +} +#endif + +#ifndef inl_p +#define inl_p inl_p +static inline u32 inl_p(unsigned long addr) +{ + return inl(addr); +} +#endif + +#ifndef outb_p +#define outb_p outb_p +static inline void outb_p(u8 value, unsigned long addr) +{ + outb(value, addr); +} +#endif + +#ifndef outw_p +#define outw_p outw_p +static inline void outw_p(u16 value, unsigned long addr) +{ + outw(value, addr); +} +#endif + +#ifndef outl_p +#define outl_p outl_p +static inline void outl_p(u32 value, unsigned long addr) +{ + outl(value, addr); +} +#endif + +/* + * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a + * single I/O port multiple times. + */ + +#ifndef insb +#define insb insb +static inline void insb(unsigned long addr, void *buffer, unsigned int count) +{ + readsb(PCI_IOBASE + addr, buffer, count); +} +#endif + +#ifndef insw +#define insw insw +static inline void insw(unsigned long addr, void *buffer, unsigned int count) +{ + readsw(PCI_IOBASE + addr, buffer, count); +} +#endif + +#ifndef insl +#define insl insl +static inline void insl(unsigned long addr, void *buffer, unsigned int count) +{ + readsl(PCI_IOBASE + addr, buffer, count); +} +#endif + +#ifndef outsb +#define outsb outsb +static inline void outsb(unsigned long addr, const void *buffer, + unsigned int count) +{ + writesb(PCI_IOBASE + addr, buffer, count); +} +#endif + +#ifndef outsw +#define outsw outsw +static inline void outsw(unsigned long addr, const void *buffer, + unsigned int count) +{ + writesw(PCI_IOBASE + addr, buffer, count); +} +#endif + +#ifndef outsl +#define outsl outsl +static inline void outsl(unsigned long addr, const void *buffer, + unsigned int count) +{ + writesl(PCI_IOBASE + addr, buffer, count); +} +#endif + +#ifndef insb_p +#define insb_p insb_p +static inline void insb_p(unsigned long addr, void *buffer, unsigned int count) +{ + insb(addr, buffer, count); +} +#endif + +#ifndef insw_p +#define insw_p insw_p +static inline void insw_p(unsigned long addr, void *buffer, unsigned int count) +{ + insw(addr, buffer, count); +} +#endif + +#ifndef insl_p +#define insl_p insl_p +static inline void insl_p(unsigned long addr, void *buffer, unsigned int count) +{ + insl(addr, buffer, count); +} +#endif + +#ifndef outsb_p +#define outsb_p outsb_p +static inline void outsb_p(unsigned long addr, const void *buffer, + unsigned int count) +{ + outsb(addr, buffer, count); +} +#endif + +#ifndef outsw_p +#define outsw_p outsw_p +static inline void outsw_p(unsigned long addr, const void *buffer, + unsigned int count) +{ + outsw(addr, buffer, count); +} +#endif + +#ifndef outsl_p +#define outsl_p outsl_p +static inline void outsl_p(unsigned long addr, const void *buffer, + unsigned int count) +{ + outsl(addr, buffer, count); +} +#endif + +#ifndef CONFIG_GENERIC_IOMAP +#ifndef ioread8 +#define ioread8 ioread8 +static inline u8 ioread8(const volatile void __iomem *addr) +{ + return readb(addr); +} +#endif + +#ifndef ioread16 +#define ioread16 ioread16 +static inline u16 ioread16(const volatile void __iomem *addr) +{ + return readw(addr); +} +#endif + +#ifndef ioread32 +#define ioread32 ioread32 +static inline u32 ioread32(const volatile void __iomem *addr) +{ + return readl(addr); +} +#endif + +#ifdef CONFIG_64BIT +#ifndef ioread64 +#define ioread64 ioread64 +static inline u64 ioread64(const volatile void __iomem *addr) +{ + return readq(addr); +} +#endif +#endif /* CONFIG_64BIT */ + +#ifndef iowrite8 +#define iowrite8 iowrite8 +static inline void iowrite8(u8 value, volatile void __iomem *addr) +{ + writeb(value, addr); +} +#endif + +#ifndef iowrite16 +#define iowrite16 iowrite16 +static inline void iowrite16(u16 value, volatile void __iomem *addr) +{ + writew(value, addr); +} +#endif + +#ifndef iowrite32 +#define iowrite32 iowrite32 +static inline void iowrite32(u32 value, volatile void __iomem *addr) +{ + writel(value, addr); +} +#endif + +#ifdef CONFIG_64BIT +#ifndef iowrite64 +#define iowrite64 iowrite64 +static inline void iowrite64(u64 value, volatile void __iomem *addr) +{ + writeq(value, addr); +} +#endif +#endif /* CONFIG_64BIT */ + +#ifndef ioread16be +#define ioread16be ioread16be +static inline u16 ioread16be(const volatile void __iomem *addr) +{ + return swab16(readw(addr)); +} +#endif + +#ifndef ioread32be +#define ioread32be ioread32be +static inline u32 ioread32be(const volatile void __iomem *addr) +{ + return swab32(readl(addr)); +} +#endif + +#ifdef CONFIG_64BIT +#ifndef ioread64be +#define ioread64be ioread64be +static inline u64 ioread64be(const volatile void __iomem *addr) +{ + return swab64(readq(addr)); +} +#endif +#endif /* CONFIG_64BIT */ + +#ifndef iowrite16be +#define iowrite16be iowrite16be +static inline void iowrite16be(u16 value, void volatile __iomem *addr) +{ + writew(swab16(value), addr); +} +#endif + +#ifndef iowrite32be +#define iowrite32be iowrite32be +static inline void iowrite32be(u32 value, volatile void __iomem *addr) +{ + writel(swab32(value), addr); +} +#endif + +#ifdef CONFIG_64BIT +#ifndef iowrite64be +#define iowrite64be iowrite64be +static inline void iowrite64be(u64 value, volatile void __iomem *addr) +{ + writeq(swab64(value), addr); +} +#endif +#endif /* CONFIG_64BIT */ + +#ifndef ioread8_rep +#define ioread8_rep ioread8_rep +static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer, + unsigned int count) +{ + readsb(addr, buffer, count); +} +#endif + +#ifndef ioread16_rep +#define ioread16_rep ioread16_rep +static inline void ioread16_rep(const volatile void __iomem *addr, + void *buffer, unsigned int count) +{ + readsw(addr, buffer, count); +} +#endif + +#ifndef ioread32_rep +#define ioread32_rep ioread32_rep +static inline void ioread32_rep(const volatile void __iomem *addr, + void *buffer, unsigned int count) +{ + readsl(addr, buffer, count); +} +#endif + +#ifdef CONFIG_64BIT +#ifndef ioread64_rep +#define ioread64_rep ioread64_rep +static inline void ioread64_rep(const volatile void __iomem *addr, + void *buffer, unsigned int count) +{ + readsq(addr, buffer, count); +} +#endif +#endif /* CONFIG_64BIT */ + +#ifndef iowrite8_rep +#define iowrite8_rep iowrite8_rep +static inline void iowrite8_rep(volatile void __iomem *addr, + const void *buffer, + unsigned int count) +{ + writesb(addr, buffer, count); +} +#endif + +#ifndef iowrite16_rep +#define iowrite16_rep iowrite16_rep +static inline void iowrite16_rep(volatile void __iomem *addr, + const void *buffer, + unsigned int count) +{ + writesw(addr, buffer, count); +} +#endif + +#ifndef iowrite32_rep +#define iowrite32_rep iowrite32_rep +static inline void iowrite32_rep(volatile void __iomem *addr, + const void *buffer, + unsigned int count) +{ + writesl(addr, buffer, count); +} +#endif + +#ifdef CONFIG_64BIT +#ifndef iowrite64_rep +#define iowrite64_rep iowrite64_rep +static inline void iowrite64_rep(volatile void __iomem *addr, + const void *buffer, + unsigned int count) +{ + writesq(addr, buffer, count); +} +#endif +#endif /* CONFIG_64BIT */ +#endif /* CONFIG_GENERIC_IOMAP */ + +#ifdef __KERNEL__ + +#include +#define __io_virt(x) ((void __force *)(x)) + +#ifndef CONFIG_GENERIC_IOMAP +struct pci_dev; +extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); + +#ifndef pci_iounmap +#define pci_iounmap pci_iounmap +static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) +{ +} +#endif +#endif /* CONFIG_GENERIC_IOMAP */ + +/* + * Change virtual addresses to physical addresses and vv. + * These are pretty trivial + */ +#ifndef virt_to_phys +#define virt_to_phys virt_to_phys +static inline unsigned long virt_to_phys(volatile void *address) +{ + return __pa((unsigned long)address); +} +#endif + +#ifndef phys_to_virt +#define phys_to_virt phys_to_virt +static inline void *phys_to_virt(unsigned long address) +{ + return __va(address); +} +#endif + +/** + * DOC: ioremap() and ioremap_*() variants + * + * If you have an IOMMU your architecture is expected to have both ioremap() + * and iounmap() implemented otherwise the asm-generic helpers will provide a + * direct mapping. + * + * There are ioremap_*() call variants, if you have no IOMMU we naturally will + * default to direct mapping for all of them, you can override these defaults. + * If you have an IOMMU you are highly encouraged to provide your own + * ioremap variant implementation as there currently is no safe architecture + * agnostic default. To avoid possible improper behaviour default asm-generic + * ioremap_*() variants all return NULL when an IOMMU is available. If you've + * defined your own ioremap_*() variant you must then declare your own + * ioremap_*() variant as defined to itself to avoid the default NULL return. + */ + +#ifdef CONFIG_MMU + +#ifndef ioremap_uc +#define ioremap_uc ioremap_uc +static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size) +{ + return NULL; +} +#endif + +#else /* !CONFIG_MMU */ + +/* + * Change "struct page" to physical address. + * + * This implementation is for the no-MMU case only... if you have an MMU + * you'll need to provide your own definitions. + */ + +#ifndef ioremap +#define ioremap ioremap +static inline void __iomem *ioremap(phys_addr_t offset, size_t size) +{ + return (void __iomem *)(unsigned long)offset; +} +#endif + +#ifndef __ioremap +#define __ioremap __ioremap +static inline void __iomem *__ioremap(phys_addr_t offset, size_t size, + unsigned long flags) +{ + return ioremap(offset, size); +} +#endif + +#ifndef iounmap +#define iounmap iounmap + +static inline void iounmap(void __iomem *addr) +{ +} +#endif +#endif /* CONFIG_MMU */ +#ifndef ioremap_nocache +void __iomem *ioremap(phys_addr_t phys_addr, size_t size); +#define ioremap_nocache ioremap_nocache +static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size) +{ + return ioremap(offset, size); +} +#endif + +#ifndef ioremap_uc +#define ioremap_uc ioremap_uc +static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size) +{ + return ioremap_nocache(offset, size); +} +#endif + +#ifndef ioremap_wc +#define ioremap_wc ioremap_wc +static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size) +{ + return ioremap_nocache(offset, size); +} +#endif + +#ifndef ioremap_wt +#define ioremap_wt ioremap_wt +static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size) +{ + return ioremap_nocache(offset, size); +} +#endif + +#ifdef CONFIG_HAS_IOPORT_MAP +#ifndef CONFIG_GENERIC_IOMAP +#ifndef ioport_map +#define ioport_map ioport_map +static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) +{ + port &= IO_SPACE_LIMIT; + return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port; +} +#endif + +#ifndef ioport_unmap +#define ioport_unmap ioport_unmap +static inline void ioport_unmap(void __iomem *p) +{ +} +#endif +#else /* CONFIG_GENERIC_IOMAP */ +extern void __iomem *ioport_map(unsigned long port, unsigned int nr); +extern void ioport_unmap(void __iomem *p); +#endif /* CONFIG_GENERIC_IOMAP */ +#endif /* CONFIG_HAS_IOPORT_MAP */ + +/* + * Convert a virtual cached pointer to an uncached pointer + */ +#ifndef xlate_dev_kmem_ptr +#define xlate_dev_kmem_ptr xlate_dev_kmem_ptr +static inline void *xlate_dev_kmem_ptr(void *addr) +{ + return addr; +} +#endif + +#ifndef xlate_dev_mem_ptr +#define xlate_dev_mem_ptr xlate_dev_mem_ptr +static inline void *xlate_dev_mem_ptr(phys_addr_t addr) +{ + return __va(addr); +} +#endif + +#ifndef unxlate_dev_mem_ptr +#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr +static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) +{ +} +#endif + +#ifdef CONFIG_VIRT_TO_BUS +#ifndef virt_to_bus +static inline unsigned long virt_to_bus(void *address) +{ + return (unsigned long)address; +} + +static inline void *bus_to_virt(unsigned long address) +{ + return (void *)address; +} +#endif +#endif + +#ifndef memset_io +#define memset_io memset_io +/** + * memset_io Set a range of I/O memory to a constant value + * @addr: The beginning of the I/O-memory range to set + * @val: The value to set the memory to + * @count: The number of bytes to set + * + * Set a range of I/O memory to a given value. + */ +static inline void memset_io(volatile void __iomem *addr, int value, + size_t size) +{ + memset(__io_virt(addr), value, size); +} +#endif + +#ifndef memcpy_fromio +#define memcpy_fromio memcpy_fromio +/** + * memcpy_fromio Copy a block of data from I/O memory + * @dst: The (RAM) destination for the copy + * @src: The (I/O memory) source for the data + * @count: The number of bytes to copy + * + * Copy a block of data from I/O memory. + */ +static inline void memcpy_fromio(void *buffer, + const volatile void __iomem *addr, + size_t size) +{ + memcpy(buffer, __io_virt(addr), size); +} +#endif + +#ifndef memcpy_toio +#define memcpy_toio memcpy_toio +/** + * memcpy_toio Copy a block of data into I/O memory + * @dst: The (I/O memory) destination for the copy + * @src: The (RAM) source for the data + * @count: The number of bytes to copy + * + * Copy a block of data to I/O memory. + */ +static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer, + size_t size) +{ + memcpy(__io_virt(addr), buffer, size); +} +#endif + +#endif /* __KERNEL__ */ + +#endif /* __ASM_GENERIC_IO_H */ diff --git a/include/asm-generic/ioctl.h b/include/asm-generic/ioctl.h new file mode 100644 index 000000000..9fda9ed00 --- /dev/null +++ b/include/asm-generic/ioctl.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_IOCTL_H +#define _ASM_GENERIC_IOCTL_H + +#include + +#ifdef __CHECKER__ +#define _IOC_TYPECHECK(t) (sizeof(t)) +#else +/* provoke compile error for invalid uses of size argument */ +extern unsigned int __invalid_size_argument_for_IOC; +#define _IOC_TYPECHECK(t) \ + ((sizeof(t) == sizeof(t[1]) && \ + sizeof(t) < (1 << _IOC_SIZEBITS)) ? \ + sizeof(t) : __invalid_size_argument_for_IOC) +#endif + +#endif /* _ASM_GENERIC_IOCTL_H */ diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h new file mode 100644 index 000000000..5b63b94ef --- /dev/null +++ b/include/asm-generic/iomap.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __GENERIC_IO_H +#define __GENERIC_IO_H + +#include +#include + +/* + * These are the "generic" interfaces for doing new-style + * memory-mapped or PIO accesses. Architectures may do + * their own arch-optimized versions, these just act as + * wrappers around the old-style IO register access functions: + * read[bwl]/write[bwl]/in[bwl]/out[bwl] + * + * Don't include this directly, include it from . + */ + +/* + * Read/write from/to an (offsettable) iomem cookie. It might be a PIO + * access or a MMIO access, these functions don't care. The info is + * encoded in the hardware mapping set up by the mapping functions + * (or the cookie itself, depending on implementation and hw). + * + * The generic routines just encode the PIO/MMIO as part of the + * cookie, and coldly assume that the MMIO IO mappings are not + * in the low address range. Architectures for which this is not + * true can't use this generic implementation. + */ +extern unsigned int ioread8(void __iomem *); +extern unsigned int ioread16(void __iomem *); +extern unsigned int ioread16be(void __iomem *); +extern unsigned int ioread32(void __iomem *); +extern unsigned int ioread32be(void __iomem *); +#ifdef CONFIG_64BIT +extern u64 ioread64(void __iomem *); +extern u64 ioread64be(void __iomem *); +#endif + +extern void iowrite8(u8, void __iomem *); +extern void iowrite16(u16, void __iomem *); +extern void iowrite16be(u16, void __iomem *); +extern void iowrite32(u32, void __iomem *); +extern void iowrite32be(u32, void __iomem *); +#ifdef CONFIG_64BIT +extern void iowrite64(u64, void __iomem *); +extern void iowrite64be(u64, void __iomem *); +#endif + +/* + * "string" versions of the above. Note that they + * use native byte ordering for the accesses (on + * the assumption that IO and memory agree on a + * byte order, and CPU byteorder is irrelevant). + * + * They do _not_ update the port address. If you + * want MMIO that copies stuff laid out in MMIO + * memory across multiple ports, use "memcpy_toio()" + * and friends. + */ +extern void ioread8_rep(void __iomem *port, void *buf, unsigned long count); +extern void ioread16_rep(void __iomem *port, void *buf, unsigned long count); +extern void ioread32_rep(void __iomem *port, void *buf, unsigned long count); + +extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count); +extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count); +extern void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count); + +#ifdef CONFIG_HAS_IOPORT_MAP +/* Create a virtual mapping cookie for an IO port range */ +extern void __iomem *ioport_map(unsigned long port, unsigned int nr); +extern void ioport_unmap(void __iomem *); +#endif + +#ifndef ARCH_HAS_IOREMAP_WC +#define ioremap_wc ioremap_nocache +#endif + +#ifndef ARCH_HAS_IOREMAP_WT +#define ioremap_wt ioremap_nocache +#endif + +#ifdef CONFIG_PCI +/* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */ +struct pci_dev; +extern void pci_iounmap(struct pci_dev *dev, void __iomem *); +#elif defined(CONFIG_GENERIC_IOMAP) +struct pci_dev; +static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) +{ } +#endif + +#include + +#endif diff --git a/include/asm-generic/irq.h b/include/asm-generic/irq.h new file mode 100644 index 000000000..da21de991 --- /dev/null +++ b/include/asm-generic/irq.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_IRQ_H +#define __ASM_GENERIC_IRQ_H + +/* + * NR_IRQS is the upper bound of how many interrupts can be handled + * in the platform. It is used to size the static irq_map array, + * so don't make it too big. + */ +#ifndef NR_IRQS +#define NR_IRQS 64 +#endif + +static inline int irq_canonicalize(int irq) +{ + return irq; +} + +#endif /* __ASM_GENERIC_IRQ_H */ diff --git a/include/asm-generic/irq_regs.h b/include/asm-generic/irq_regs.h new file mode 100644 index 000000000..6bf9355fa --- /dev/null +++ b/include/asm-generic/irq_regs.h @@ -0,0 +1,37 @@ +/* Fallback per-CPU frame pointer holder + * + * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _ASM_GENERIC_IRQ_REGS_H +#define _ASM_GENERIC_IRQ_REGS_H + +#include + +/* + * Per-cpu current frame pointer - the location of the last exception frame on + * the stack + */ +DECLARE_PER_CPU(struct pt_regs *, __irq_regs); + +static inline struct pt_regs *get_irq_regs(void) +{ + return __this_cpu_read(__irq_regs); +} + +static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) +{ + struct pt_regs *old_regs; + + old_regs = __this_cpu_read(__irq_regs); + __this_cpu_write(__irq_regs, new_regs); + return old_regs; +} + +#endif /* _ASM_GENERIC_IRQ_REGS_H */ diff --git a/include/asm-generic/irq_work.h b/include/asm-generic/irq_work.h new file mode 100644 index 000000000..d5dce06f7 --- /dev/null +++ b/include/asm-generic/irq_work.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_IRQ_WORK_H +#define __ASM_IRQ_WORK_H + +static inline bool arch_irq_work_has_interrupt(void) +{ + return false; +} + +#endif /* __ASM_IRQ_WORK_H */ + diff --git a/include/asm-generic/irqflags.h b/include/asm-generic/irqflags.h new file mode 100644 index 000000000..19ccbf483 --- /dev/null +++ b/include/asm-generic/irqflags.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_IRQFLAGS_H +#define __ASM_GENERIC_IRQFLAGS_H + +/* + * All architectures should implement at least the first two functions, + * usually inline assembly will be the best way. + */ +#ifndef ARCH_IRQ_DISABLED +#define ARCH_IRQ_DISABLED 0 +#define ARCH_IRQ_ENABLED 1 +#endif + +/* read interrupt enabled status */ +#ifndef arch_local_save_flags +unsigned long arch_local_save_flags(void); +#endif + +/* set interrupt enabled status */ +#ifndef arch_local_irq_restore +void arch_local_irq_restore(unsigned long flags); +#endif + +/* get status and disable interrupts */ +#ifndef arch_local_irq_save +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags; + flags = arch_local_save_flags(); + arch_local_irq_restore(ARCH_IRQ_DISABLED); + return flags; +} +#endif + +/* test flags */ +#ifndef arch_irqs_disabled_flags +static inline int arch_irqs_disabled_flags(unsigned long flags) +{ + return flags == ARCH_IRQ_DISABLED; +} +#endif + +/* unconditionally enable interrupts */ +#ifndef arch_local_irq_enable +static inline void arch_local_irq_enable(void) +{ + arch_local_irq_restore(ARCH_IRQ_ENABLED); +} +#endif + +/* unconditionally disable interrupts */ +#ifndef arch_local_irq_disable +static inline void arch_local_irq_disable(void) +{ + arch_local_irq_restore(ARCH_IRQ_DISABLED); +} +#endif + +/* test hardware interrupt enable bit */ +#ifndef arch_irqs_disabled +static inline int arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} +#endif + +#endif /* __ASM_GENERIC_IRQFLAGS_H */ diff --git a/include/asm-generic/kdebug.h b/include/asm-generic/kdebug.h new file mode 100644 index 000000000..2b10b31b0 --- /dev/null +++ b/include/asm-generic/kdebug.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_KDEBUG_H +#define _ASM_GENERIC_KDEBUG_H + +enum die_val { + DIE_UNUSED, + DIE_OOPS = 1, +}; + +#endif /* _ASM_GENERIC_KDEBUG_H */ diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h new file mode 100644 index 000000000..9f95b7b63 --- /dev/null +++ b/include/asm-generic/kmap_types.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_KMAP_TYPES_H +#define _ASM_GENERIC_KMAP_TYPES_H + +#ifdef __WITH_KM_FENCE +# define KM_TYPE_NR 41 +#else +# define KM_TYPE_NR 20 +#endif + +#endif diff --git a/include/asm-generic/kprobes.h b/include/asm-generic/kprobes.h new file mode 100644 index 000000000..4a982089c --- /dev/null +++ b/include/asm-generic/kprobes.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_KPROBES_H +#define _ASM_GENERIC_KPROBES_H + +#if defined(__KERNEL__) && !defined(__ASSEMBLY__) +#ifdef CONFIG_KPROBES +/* + * Blacklist ganerating macro. Specify functions which is not probed + * by using this macro. + */ +# define __NOKPROBE_SYMBOL(fname) \ +static unsigned long __used \ + __attribute__((__section__("_kprobe_blacklist"))) \ + _kbl_addr_##fname = (unsigned long)fname; +# define NOKPROBE_SYMBOL(fname) __NOKPROBE_SYMBOL(fname) +/* Use this to forbid a kprobes attach on very low level functions */ +# define __kprobes __attribute__((__section__(".kprobes.text"))) +# define nokprobe_inline __always_inline +#else +# define NOKPROBE_SYMBOL(fname) +# define __kprobes +# define nokprobe_inline inline +#endif +#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ + +#endif /* _ASM_GENERIC_KPROBES_H */ diff --git a/include/asm-generic/kvm_para.h b/include/asm-generic/kvm_para.h new file mode 100644 index 000000000..728e5c570 --- /dev/null +++ b/include/asm-generic/kvm_para.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_KVM_PARA_H +#define _ASM_GENERIC_KVM_PARA_H + +#include + + +/* + * This function is used by architectures that support kvm to avoid issuing + * false soft lockup messages. + */ +static inline bool kvm_check_and_clear_guest_paused(void) +{ + return false; +} + +static inline unsigned int kvm_arch_para_features(void) +{ + return 0; +} + +static inline unsigned int kvm_arch_para_hints(void) +{ + return 0; +} + +static inline bool kvm_para_available(void) +{ + return false; +} + +#endif diff --git a/include/asm-generic/linkage.h b/include/asm-generic/linkage.h new file mode 100644 index 000000000..fef7a01e5 --- /dev/null +++ b/include/asm-generic/linkage.h @@ -0,0 +1,8 @@ +#ifndef __ASM_GENERIC_LINKAGE_H +#define __ASM_GENERIC_LINKAGE_H +/* + * linux/linkage.h provides reasonable defaults. + * an architecture can override them by providing its own version. + */ + +#endif /* __ASM_GENERIC_LINKAGE_H */ diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h new file mode 100644 index 000000000..fca7f1d84 --- /dev/null +++ b/include/asm-generic/local.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_LOCAL_H +#define _ASM_GENERIC_LOCAL_H + +#include +#include +#include + +/* + * A signed long type for operations which are atomic for a single CPU. + * Usually used in combination with per-cpu variables. + * + * This is the default implementation, which uses atomic_long_t. Which is + * rather pointless. The whole point behind local_t is that some processors + * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs + * running on this CPU. local_t allows exploitation of such capabilities. + */ + +/* Implement in terms of atomics. */ + +/* Don't use typedef: don't want them to be mixed with atomic_t's. */ +typedef struct +{ + atomic_long_t a; +} local_t; + +#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } + +#define local_read(l) atomic_long_read(&(l)->a) +#define local_set(l,i) atomic_long_set((&(l)->a),(i)) +#define local_inc(l) atomic_long_inc(&(l)->a) +#define local_dec(l) atomic_long_dec(&(l)->a) +#define local_add(i,l) atomic_long_add((i),(&(l)->a)) +#define local_sub(i,l) atomic_long_sub((i),(&(l)->a)) + +#define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a)) +#define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a) +#define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a) +#define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a)) +#define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a)) +#define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a)) +#define local_inc_return(l) atomic_long_inc_return(&(l)->a) + +#define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n)) +#define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n)) +#define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u)) +#define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a) + +/* Non-atomic variants, ie. preemption disabled and won't be touched + * in interrupt, etc. Some archs can optimize this case well. */ +#define __local_inc(l) local_set((l), local_read(l) + 1) +#define __local_dec(l) local_set((l), local_read(l) - 1) +#define __local_add(i,l) local_set((l), local_read(l) + (i)) +#define __local_sub(i,l) local_set((l), local_read(l) - (i)) + +#endif /* _ASM_GENERIC_LOCAL_H */ diff --git a/include/asm-generic/local64.h b/include/asm-generic/local64.h new file mode 100644 index 000000000..765be0b7d --- /dev/null +++ b/include/asm-generic/local64.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_LOCAL64_H +#define _ASM_GENERIC_LOCAL64_H + +#include +#include + +/* + * A signed long type for operations which are atomic for a single CPU. + * Usually used in combination with per-cpu variables. + * + * This is the default implementation, which uses atomic64_t. Which is + * rather pointless. The whole point behind local64_t is that some processors + * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs + * running on this CPU. local64_t allows exploitation of such capabilities. + */ + +/* Implement in terms of atomics. */ + +#if BITS_PER_LONG == 64 + +#include + +typedef struct { + local_t a; +} local64_t; + +#define LOCAL64_INIT(i) { LOCAL_INIT(i) } + +#define local64_read(l) local_read(&(l)->a) +#define local64_set(l,i) local_set((&(l)->a),(i)) +#define local64_inc(l) local_inc(&(l)->a) +#define local64_dec(l) local_dec(&(l)->a) +#define local64_add(i,l) local_add((i),(&(l)->a)) +#define local64_sub(i,l) local_sub((i),(&(l)->a)) + +#define local64_sub_and_test(i, l) local_sub_and_test((i), (&(l)->a)) +#define local64_dec_and_test(l) local_dec_and_test(&(l)->a) +#define local64_inc_and_test(l) local_inc_and_test(&(l)->a) +#define local64_add_negative(i, l) local_add_negative((i), (&(l)->a)) +#define local64_add_return(i, l) local_add_return((i), (&(l)->a)) +#define local64_sub_return(i, l) local_sub_return((i), (&(l)->a)) +#define local64_inc_return(l) local_inc_return(&(l)->a) + +#define local64_cmpxchg(l, o, n) local_cmpxchg((&(l)->a), (o), (n)) +#define local64_xchg(l, n) local_xchg((&(l)->a), (n)) +#define local64_add_unless(l, _a, u) local_add_unless((&(l)->a), (_a), (u)) +#define local64_inc_not_zero(l) local_inc_not_zero(&(l)->a) + +/* Non-atomic variants, ie. preemption disabled and won't be touched + * in interrupt, etc. Some archs can optimize this case well. */ +#define __local64_inc(l) local64_set((l), local64_read(l) + 1) +#define __local64_dec(l) local64_set((l), local64_read(l) - 1) +#define __local64_add(i,l) local64_set((l), local64_read(l) + (i)) +#define __local64_sub(i,l) local64_set((l), local64_read(l) - (i)) + +#else /* BITS_PER_LONG != 64 */ + +#include + +/* Don't use typedef: don't want them to be mixed with atomic_t's. */ +typedef struct { + atomic64_t a; +} local64_t; + +#define LOCAL64_INIT(i) { ATOMIC_LONG_INIT(i) } + +#define local64_read(l) atomic64_read(&(l)->a) +#define local64_set(l,i) atomic64_set((&(l)->a),(i)) +#define local64_inc(l) atomic64_inc(&(l)->a) +#define local64_dec(l) atomic64_dec(&(l)->a) +#define local64_add(i,l) atomic64_add((i),(&(l)->a)) +#define local64_sub(i,l) atomic64_sub((i),(&(l)->a)) + +#define local64_sub_and_test(i, l) atomic64_sub_and_test((i), (&(l)->a)) +#define local64_dec_and_test(l) atomic64_dec_and_test(&(l)->a) +#define local64_inc_and_test(l) atomic64_inc_and_test(&(l)->a) +#define local64_add_negative(i, l) atomic64_add_negative((i), (&(l)->a)) +#define local64_add_return(i, l) atomic64_add_return((i), (&(l)->a)) +#define local64_sub_return(i, l) atomic64_sub_return((i), (&(l)->a)) +#define local64_inc_return(l) atomic64_inc_return(&(l)->a) + +#define local64_cmpxchg(l, o, n) atomic64_cmpxchg((&(l)->a), (o), (n)) +#define local64_xchg(l, n) atomic64_xchg((&(l)->a), (n)) +#define local64_add_unless(l, _a, u) atomic64_add_unless((&(l)->a), (_a), (u)) +#define local64_inc_not_zero(l) atomic64_inc_not_zero(&(l)->a) + +/* Non-atomic variants, ie. preemption disabled and won't be touched + * in interrupt, etc. Some archs can optimize this case well. */ +#define __local64_inc(l) local64_set((l), local64_read(l) + 1) +#define __local64_dec(l) local64_set((l), local64_read(l) - 1) +#define __local64_add(i,l) local64_set((l), local64_read(l) + (i)) +#define __local64_sub(i,l) local64_set((l), local64_read(l) - (i)) + +#endif /* BITS_PER_LONG != 64 */ + +#endif /* _ASM_GENERIC_LOCAL64_H */ diff --git a/include/asm-generic/mcs_spinlock.h b/include/asm-generic/mcs_spinlock.h new file mode 100644 index 000000000..10cd4ffc6 --- /dev/null +++ b/include/asm-generic/mcs_spinlock.h @@ -0,0 +1,13 @@ +#ifndef __ASM_MCS_SPINLOCK_H +#define __ASM_MCS_SPINLOCK_H + +/* + * Architectures can define their own: + * + * arch_mcs_spin_lock_contended(l) + * arch_mcs_spin_unlock_contended(l) + * + * See kernel/locking/mcs_spinlock.c. + */ + +#endif /* __ASM_MCS_SPINLOCK_H */ diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h new file mode 100644 index 000000000..7637fb46b --- /dev/null +++ b/include/asm-generic/memory_model.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_MEMORY_MODEL_H +#define __ASM_MEMORY_MODEL_H + +#include + +#ifndef __ASSEMBLY__ + +#if defined(CONFIG_FLATMEM) + +#ifndef ARCH_PFN_OFFSET +#define ARCH_PFN_OFFSET (0UL) +#endif + +#elif defined(CONFIG_DISCONTIGMEM) + +#ifndef arch_pfn_to_nid +#define arch_pfn_to_nid(pfn) pfn_to_nid(pfn) +#endif + +#ifndef arch_local_page_offset +#define arch_local_page_offset(pfn, nid) \ + ((pfn) - NODE_DATA(nid)->node_start_pfn) +#endif + +#endif /* CONFIG_DISCONTIGMEM */ + +/* + * supports 3 memory models. + */ +#if defined(CONFIG_FLATMEM) + +#define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET)) +#define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \ + ARCH_PFN_OFFSET) +#elif defined(CONFIG_DISCONTIGMEM) + +#define __pfn_to_page(pfn) \ +({ unsigned long __pfn = (pfn); \ + unsigned long __nid = arch_pfn_to_nid(__pfn); \ + NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\ +}) + +#define __page_to_pfn(pg) \ +({ const struct page *__pg = (pg); \ + struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \ + (unsigned long)(__pg - __pgdat->node_mem_map) + \ + __pgdat->node_start_pfn; \ +}) + +#elif defined(CONFIG_SPARSEMEM_VMEMMAP) + +/* memmap is virtually contiguous. */ +#define __pfn_to_page(pfn) (vmemmap + (pfn)) +#define __page_to_pfn(page) (unsigned long)((page) - vmemmap) + +#elif defined(CONFIG_SPARSEMEM) +/* + * Note: section's mem_map is encoded to reflect its start_pfn. + * section[i].section_mem_map == mem_map's address - start_pfn; + */ +#define __page_to_pfn(pg) \ +({ const struct page *__pg = (pg); \ + int __sec = page_to_section(__pg); \ + (unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \ +}) + +#define __pfn_to_page(pfn) \ +({ unsigned long __pfn = (pfn); \ + struct mem_section *__sec = __pfn_to_section(__pfn); \ + __section_mem_map_addr(__sec) + __pfn; \ +}) +#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */ + +/* + * Convert a physical address to a Page Frame Number and back + */ +#define __phys_to_pfn(paddr) PHYS_PFN(paddr) +#define __pfn_to_phys(pfn) PFN_PHYS(pfn) + +#define page_to_pfn __page_to_pfn +#define pfn_to_page __pfn_to_page + +#endif /* __ASSEMBLY__ */ + +#endif diff --git a/include/asm-generic/mm-arch-hooks.h b/include/asm-generic/mm-arch-hooks.h new file mode 100644 index 000000000..5ff0e5193 --- /dev/null +++ b/include/asm-generic/mm-arch-hooks.h @@ -0,0 +1,16 @@ +/* + * Architecture specific mm hooks + */ + +#ifndef _ASM_GENERIC_MM_ARCH_HOOKS_H +#define _ASM_GENERIC_MM_ARCH_HOOKS_H + +/* + * This file should be included through arch/../include/asm/Kbuild for + * the architecture which doesn't need specific mm hooks. + * + * In that case, the generic hooks defined in include/linux/mm-arch-hooks.h + * are used. + */ + +#endif /* _ASM_GENERIC_MM_ARCH_HOOKS_H */ diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h new file mode 100644 index 000000000..8ac4e68a1 --- /dev/null +++ b/include/asm-generic/mm_hooks.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Define generic no-op hooks for arch_dup_mmap, arch_exit_mmap + * and arch_unmap to be included in asm-FOO/mmu_context.h for any + * arch FOO which doesn't need to hook these. + */ +#ifndef _ASM_GENERIC_MM_HOOKS_H +#define _ASM_GENERIC_MM_HOOKS_H + +static inline int arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) +{ + return 0; +} + +static inline void arch_exit_mmap(struct mm_struct *mm) +{ +} + +static inline void arch_unmap(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ +} + +static inline void arch_bprm_mm_init(struct mm_struct *mm, + struct vm_area_struct *vma) +{ +} + +static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, + bool write, bool execute, bool foreign) +{ + /* by default, allow everything */ + return true; +} +#endif /* _ASM_GENERIC_MM_HOOKS_H */ diff --git a/include/asm-generic/mmu.h b/include/asm-generic/mmu.h new file mode 100644 index 000000000..061838037 --- /dev/null +++ b/include/asm-generic/mmu.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_MMU_H +#define __ASM_GENERIC_MMU_H + +/* + * This is the mmu.h header for nommu implementations. + * Architectures with an MMU need something more complex. + */ +#ifndef __ASSEMBLY__ +typedef struct { + unsigned long end_brk; + +#ifdef CONFIG_BINFMT_ELF_FDPIC + unsigned long exec_fdpic_loadmap; + unsigned long interp_fdpic_loadmap; +#endif +} mm_context_t; +#endif + +#endif /* __ASM_GENERIC_MMU_H */ diff --git a/include/asm-generic/mmu_context.h b/include/asm-generic/mmu_context.h new file mode 100644 index 000000000..6be9106fb --- /dev/null +++ b/include/asm-generic/mmu_context.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_MMU_CONTEXT_H +#define __ASM_GENERIC_MMU_CONTEXT_H + +/* + * Generic hooks for NOMMU architectures, which do not need to do + * anything special here. + */ + +#include + +struct task_struct; +struct mm_struct; + +static inline void enter_lazy_tlb(struct mm_struct *mm, + struct task_struct *tsk) +{ +} + +static inline int init_new_context(struct task_struct *tsk, + struct mm_struct *mm) +{ + return 0; +} + +static inline void destroy_context(struct mm_struct *mm) +{ +} + +static inline void deactivate_mm(struct task_struct *task, + struct mm_struct *mm) +{ +} + +static inline void switch_mm(struct mm_struct *prev, + struct mm_struct *next, + struct task_struct *tsk) +{ +} + +static inline void activate_mm(struct mm_struct *prev_mm, + struct mm_struct *next_mm) +{ +} + +#endif /* __ASM_GENERIC_MMU_CONTEXT_H */ diff --git a/include/asm-generic/module.h b/include/asm-generic/module.h new file mode 100644 index 000000000..98e1541b7 --- /dev/null +++ b/include/asm-generic/module.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_MODULE_H +#define __ASM_GENERIC_MODULE_H + +/* + * Many architectures just need a simple module + * loader without arch specific data. + */ +#ifndef CONFIG_HAVE_MOD_ARCH_SPECIFIC +struct mod_arch_specific +{ +}; +#endif + +#ifdef CONFIG_64BIT +#define Elf_Shdr Elf64_Shdr +#define Elf_Phdr Elf64_Phdr +#define Elf_Sym Elf64_Sym +#define Elf_Dyn Elf64_Dyn +#define Elf_Ehdr Elf64_Ehdr +#define Elf_Addr Elf64_Addr +#ifdef CONFIG_MODULES_USE_ELF_REL +#define Elf_Rel Elf64_Rel +#endif +#ifdef CONFIG_MODULES_USE_ELF_RELA +#define Elf_Rela Elf64_Rela +#endif +#define ELF_R_TYPE(X) ELF64_R_TYPE(X) +#define ELF_R_SYM(X) ELF64_R_SYM(X) + +#else /* CONFIG_64BIT */ + +#define Elf_Shdr Elf32_Shdr +#define Elf_Phdr Elf32_Phdr +#define Elf_Sym Elf32_Sym +#define Elf_Dyn Elf32_Dyn +#define Elf_Ehdr Elf32_Ehdr +#define Elf_Addr Elf32_Addr +#ifdef CONFIG_MODULES_USE_ELF_REL +#define Elf_Rel Elf32_Rel +#endif +#ifdef CONFIG_MODULES_USE_ELF_RELA +#define Elf_Rela Elf32_Rela +#endif +#define ELF_R_TYPE(X) ELF32_R_TYPE(X) +#define ELF_R_SYM(X) ELF32_R_SYM(X) +#endif + +#endif /* __ASM_GENERIC_MODULE_H */ diff --git a/include/asm-generic/msi.h b/include/asm-generic/msi.h new file mode 100644 index 000000000..e6795f088 --- /dev/null +++ b/include/asm-generic/msi.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_MSI_H +#define __ASM_GENERIC_MSI_H + +#include + +#ifndef NUM_MSI_ALLOC_SCRATCHPAD_REGS +# define NUM_MSI_ALLOC_SCRATCHPAD_REGS 2 +#endif + +struct msi_desc; + +/** + * struct msi_alloc_info - Default structure for MSI interrupt allocation. + * @desc: Pointer to msi descriptor + * @hwirq: Associated hw interrupt number in the domain + * @scratchpad: Storage for implementation specific scratch data + * + * Architectures can provide their own implementation by not including + * asm-generic/msi.h into their arch specific header file. + */ +typedef struct msi_alloc_info { + struct msi_desc *desc; + irq_hw_number_t hwirq; + union { + unsigned long ul; + void *ptr; + } scratchpad[NUM_MSI_ALLOC_SCRATCHPAD_REGS]; +} msi_alloc_info_t; + +#define GENERIC_MSI_DOMAIN_OPS 1 + +#endif diff --git a/include/asm-generic/page.h b/include/asm-generic/page.h new file mode 100644 index 000000000..27bf3377b --- /dev/null +++ b/include/asm-generic/page.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_PAGE_H +#define __ASM_GENERIC_PAGE_H +/* + * Generic page.h implementation, for NOMMU architectures. + * This provides the dummy definitions for the memory management. + */ + +#ifdef CONFIG_MMU +#error need to prove a real asm/page.h +#endif + + +/* PAGE_SHIFT determines the page size */ + +#define PAGE_SHIFT 12 +#ifdef __ASSEMBLY__ +#define PAGE_SIZE (1 << PAGE_SHIFT) +#else +#define PAGE_SIZE (1UL << PAGE_SHIFT) +#endif +#define PAGE_MASK (~(PAGE_SIZE-1)) + +#include + +#ifndef __ASSEMBLY__ + +#define clear_page(page) memset((page), 0, PAGE_SIZE) +#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE) + +#define clear_user_page(page, vaddr, pg) clear_page(page) +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) + +/* + * These are used to make use of C type-checking.. + */ +typedef struct { + unsigned long pte; +} pte_t; +typedef struct { + unsigned long pmd[16]; +} pmd_t; +typedef struct { + unsigned long pgd; +} pgd_t; +typedef struct { + unsigned long pgprot; +} pgprot_t; +typedef struct page *pgtable_t; + +#define pte_val(x) ((x).pte) +#define pmd_val(x) ((&x)->pmd[0]) +#define pgd_val(x) ((x).pgd) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) } ) +#define __pmd(x) ((pmd_t) { (x) } ) +#define __pgd(x) ((pgd_t) { (x) } ) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +extern unsigned long memory_start; +extern unsigned long memory_end; + +#endif /* !__ASSEMBLY__ */ + +#ifdef CONFIG_KERNEL_RAM_BASE_ADDRESS +#define PAGE_OFFSET (CONFIG_KERNEL_RAM_BASE_ADDRESS) +#else +#define PAGE_OFFSET (0) +#endif + +#ifndef ARCH_PFN_OFFSET +#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) +#endif + +#ifndef __ASSEMBLY__ + +#define __va(x) ((void *)((unsigned long) (x))) +#define __pa(x) ((unsigned long) (x)) + +#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) +#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) + +#define virt_to_page(addr) pfn_to_page(virt_to_pfn(addr)) +#define page_to_virt(page) pfn_to_virt(page_to_pfn(page)) + +#ifndef page_to_phys +#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) +#endif + +#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr) + +#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \ + ((void *)(kaddr) < (void *)memory_end)) + +#endif /* __ASSEMBLY__ */ + +#include +#include + +#endif /* __ASM_GENERIC_PAGE_H */ diff --git a/include/asm-generic/param.h b/include/asm-generic/param.h new file mode 100644 index 000000000..8d3009dd2 --- /dev/null +++ b/include/asm-generic/param.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_PARAM_H +#define __ASM_GENERIC_PARAM_H + +#include + +# undef HZ +# define HZ CONFIG_HZ /* Internal kernel timer frequency */ +# define USER_HZ 100 /* some user interfaces are */ +# define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */ +#endif /* __ASM_GENERIC_PARAM_H */ diff --git a/include/asm-generic/parport.h b/include/asm-generic/parport.h new file mode 100644 index 000000000..483991d61 --- /dev/null +++ b/include/asm-generic/parport.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_PARPORT_H +#define __ASM_GENERIC_PARPORT_H + +/* + * An ISA bus may have i8255 parallel ports at well-known + * locations in the I/O space, which are scanned by + * parport_pc_find_isa_ports. + * + * Without ISA support, the driver will only attach + * to devices on the PCI bus. + */ + +static int parport_pc_find_isa_ports(int autoirq, int autodma); +static int parport_pc_find_nonpci_ports(int autoirq, int autodma) +{ +#ifdef CONFIG_ISA + return parport_pc_find_isa_ports(autoirq, autodma); +#else + return 0; +#endif +} + +#endif /* __ASM_GENERIC_PARPORT_H */ diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h new file mode 100644 index 000000000..6bb3cd3d6 --- /dev/null +++ b/include/asm-generic/pci.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/asm-generic/pci.h + * + * Copyright (C) 2003 Russell King + */ +#ifndef _ASM_GENERIC_PCI_H +#define _ASM_GENERIC_PCI_H + +#ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ +static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) +{ + return channel ? 15 : 14; +} +#endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */ + +#endif /* _ASM_GENERIC_PCI_H */ diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h new file mode 100644 index 000000000..d4f16dcc2 --- /dev/null +++ b/include/asm-generic/pci_iomap.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Generic I/O port emulation. + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ +#ifndef __ASM_GENERIC_PCI_IOMAP_H +#define __ASM_GENERIC_PCI_IOMAP_H + +struct pci_dev; +#ifdef CONFIG_PCI +/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ +extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); +extern void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long max); +extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar, + unsigned long offset, + unsigned long maxlen); +extern void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar, + unsigned long offset, + unsigned long maxlen); +/* Create a virtual mapping cookie for a port on a given PCI device. + * Do not call this directly, it exists to make it easier for architectures + * to override */ +#ifdef CONFIG_NO_GENERIC_PCI_IOPORT_MAP +extern void __iomem *__pci_ioport_map(struct pci_dev *dev, unsigned long port, + unsigned int nr); +#else +#define __pci_ioport_map(dev, port, nr) ioport_map((port), (nr)) +#endif + +#elif defined(CONFIG_GENERIC_PCI_IOMAP) +static inline void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max) +{ + return NULL; +} + +static inline void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long max) +{ + return NULL; +} +static inline void __iomem *pci_iomap_range(struct pci_dev *dev, int bar, + unsigned long offset, + unsigned long maxlen) +{ + return NULL; +} +static inline void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar, + unsigned long offset, + unsigned long maxlen) +{ + return NULL; +} +#endif + +#endif /* __ASM_GENERIC_IO_H */ diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h new file mode 100644 index 000000000..1817a8415 --- /dev/null +++ b/include/asm-generic/percpu.h @@ -0,0 +1,448 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_PERCPU_H_ +#define _ASM_GENERIC_PERCPU_H_ + +#include +#include +#include + +#ifdef CONFIG_SMP + +/* + * per_cpu_offset() is the offset that has to be added to a + * percpu variable to get to the instance for a certain processor. + * + * Most arches use the __per_cpu_offset array for those offsets but + * some arches have their own ways of determining the offset (x86_64, s390). + */ +#ifndef __per_cpu_offset +extern unsigned long __per_cpu_offset[NR_CPUS]; + +#define per_cpu_offset(x) (__per_cpu_offset[x]) +#endif + +/* + * Determine the offset for the currently active processor. + * An arch may define __my_cpu_offset to provide a more effective + * means of obtaining the offset to the per cpu variables of the + * current processor. + */ +#ifndef __my_cpu_offset +#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id()) +#endif +#ifdef CONFIG_DEBUG_PREEMPT +#define my_cpu_offset per_cpu_offset(smp_processor_id()) +#else +#define my_cpu_offset __my_cpu_offset +#endif + +/* + * Arch may define arch_raw_cpu_ptr() to provide more efficient address + * translations for raw_cpu_ptr(). + */ +#ifndef arch_raw_cpu_ptr +#define arch_raw_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) +#endif + +#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA +extern void setup_per_cpu_areas(void); +#endif + +#endif /* SMP */ + +#ifndef PER_CPU_BASE_SECTION +#ifdef CONFIG_SMP +#define PER_CPU_BASE_SECTION ".data..percpu" +#else +#define PER_CPU_BASE_SECTION ".data" +#endif +#endif + +#ifndef PER_CPU_ATTRIBUTES +#define PER_CPU_ATTRIBUTES +#endif + +#ifndef PER_CPU_DEF_ATTRIBUTES +#define PER_CPU_DEF_ATTRIBUTES +#endif + +#define raw_cpu_generic_read(pcp) \ +({ \ + *raw_cpu_ptr(&(pcp)); \ +}) + +#define raw_cpu_generic_to_op(pcp, val, op) \ +do { \ + *raw_cpu_ptr(&(pcp)) op val; \ +} while (0) + +#define raw_cpu_generic_add_return(pcp, val) \ +({ \ + typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp)); \ + \ + *__p += val; \ + *__p; \ +}) + +#define raw_cpu_generic_xchg(pcp, nval) \ +({ \ + typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp)); \ + typeof(pcp) __ret; \ + __ret = *__p; \ + *__p = nval; \ + __ret; \ +}) + +#define raw_cpu_generic_cmpxchg(pcp, oval, nval) \ +({ \ + typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp)); \ + typeof(pcp) __ret; \ + __ret = *__p; \ + if (__ret == (oval)) \ + *__p = nval; \ + __ret; \ +}) + +#define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ +({ \ + typeof(&(pcp1)) __p1 = raw_cpu_ptr(&(pcp1)); \ + typeof(&(pcp2)) __p2 = raw_cpu_ptr(&(pcp2)); \ + int __ret = 0; \ + if (*__p1 == (oval1) && *__p2 == (oval2)) { \ + *__p1 = nval1; \ + *__p2 = nval2; \ + __ret = 1; \ + } \ + (__ret); \ +}) + +#define __this_cpu_generic_read_nopreempt(pcp) \ +({ \ + typeof(pcp) __ret; \ + preempt_disable_notrace(); \ + __ret = READ_ONCE(*raw_cpu_ptr(&(pcp))); \ + preempt_enable_notrace(); \ + __ret; \ +}) + +#define __this_cpu_generic_read_noirq(pcp) \ +({ \ + typeof(pcp) __ret; \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + __ret = raw_cpu_generic_read(pcp); \ + raw_local_irq_restore(__flags); \ + __ret; \ +}) + +#define this_cpu_generic_read(pcp) \ +({ \ + typeof(pcp) __ret; \ + if (__native_word(pcp)) \ + __ret = __this_cpu_generic_read_nopreempt(pcp); \ + else \ + __ret = __this_cpu_generic_read_noirq(pcp); \ + __ret; \ +}) + +#define this_cpu_generic_to_op(pcp, val, op) \ +do { \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + raw_cpu_generic_to_op(pcp, val, op); \ + raw_local_irq_restore(__flags); \ +} while (0) + + +#define this_cpu_generic_add_return(pcp, val) \ +({ \ + typeof(pcp) __ret; \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + __ret = raw_cpu_generic_add_return(pcp, val); \ + raw_local_irq_restore(__flags); \ + __ret; \ +}) + +#define this_cpu_generic_xchg(pcp, nval) \ +({ \ + typeof(pcp) __ret; \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + __ret = raw_cpu_generic_xchg(pcp, nval); \ + raw_local_irq_restore(__flags); \ + __ret; \ +}) + +#define this_cpu_generic_cmpxchg(pcp, oval, nval) \ +({ \ + typeof(pcp) __ret; \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + __ret = raw_cpu_generic_cmpxchg(pcp, oval, nval); \ + raw_local_irq_restore(__flags); \ + __ret; \ +}) + +#define this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ +({ \ + int __ret; \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + __ret = raw_cpu_generic_cmpxchg_double(pcp1, pcp2, \ + oval1, oval2, nval1, nval2); \ + raw_local_irq_restore(__flags); \ + __ret; \ +}) + +#ifndef raw_cpu_read_1 +#define raw_cpu_read_1(pcp) raw_cpu_generic_read(pcp) +#endif +#ifndef raw_cpu_read_2 +#define raw_cpu_read_2(pcp) raw_cpu_generic_read(pcp) +#endif +#ifndef raw_cpu_read_4 +#define raw_cpu_read_4(pcp) raw_cpu_generic_read(pcp) +#endif +#ifndef raw_cpu_read_8 +#define raw_cpu_read_8(pcp) raw_cpu_generic_read(pcp) +#endif + +#ifndef raw_cpu_write_1 +#define raw_cpu_write_1(pcp, val) raw_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef raw_cpu_write_2 +#define raw_cpu_write_2(pcp, val) raw_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef raw_cpu_write_4 +#define raw_cpu_write_4(pcp, val) raw_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef raw_cpu_write_8 +#define raw_cpu_write_8(pcp, val) raw_cpu_generic_to_op(pcp, val, =) +#endif + +#ifndef raw_cpu_add_1 +#define raw_cpu_add_1(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef raw_cpu_add_2 +#define raw_cpu_add_2(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef raw_cpu_add_4 +#define raw_cpu_add_4(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef raw_cpu_add_8 +#define raw_cpu_add_8(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) +#endif + +#ifndef raw_cpu_and_1 +#define raw_cpu_and_1(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef raw_cpu_and_2 +#define raw_cpu_and_2(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef raw_cpu_and_4 +#define raw_cpu_and_4(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef raw_cpu_and_8 +#define raw_cpu_and_8(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) +#endif + +#ifndef raw_cpu_or_1 +#define raw_cpu_or_1(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef raw_cpu_or_2 +#define raw_cpu_or_2(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef raw_cpu_or_4 +#define raw_cpu_or_4(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef raw_cpu_or_8 +#define raw_cpu_or_8(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) +#endif + +#ifndef raw_cpu_add_return_1 +#define raw_cpu_add_return_1(pcp, val) raw_cpu_generic_add_return(pcp, val) +#endif +#ifndef raw_cpu_add_return_2 +#define raw_cpu_add_return_2(pcp, val) raw_cpu_generic_add_return(pcp, val) +#endif +#ifndef raw_cpu_add_return_4 +#define raw_cpu_add_return_4(pcp, val) raw_cpu_generic_add_return(pcp, val) +#endif +#ifndef raw_cpu_add_return_8 +#define raw_cpu_add_return_8(pcp, val) raw_cpu_generic_add_return(pcp, val) +#endif + +#ifndef raw_cpu_xchg_1 +#define raw_cpu_xchg_1(pcp, nval) raw_cpu_generic_xchg(pcp, nval) +#endif +#ifndef raw_cpu_xchg_2 +#define raw_cpu_xchg_2(pcp, nval) raw_cpu_generic_xchg(pcp, nval) +#endif +#ifndef raw_cpu_xchg_4 +#define raw_cpu_xchg_4(pcp, nval) raw_cpu_generic_xchg(pcp, nval) +#endif +#ifndef raw_cpu_xchg_8 +#define raw_cpu_xchg_8(pcp, nval) raw_cpu_generic_xchg(pcp, nval) +#endif + +#ifndef raw_cpu_cmpxchg_1 +#define raw_cpu_cmpxchg_1(pcp, oval, nval) \ + raw_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef raw_cpu_cmpxchg_2 +#define raw_cpu_cmpxchg_2(pcp, oval, nval) \ + raw_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef raw_cpu_cmpxchg_4 +#define raw_cpu_cmpxchg_4(pcp, oval, nval) \ + raw_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef raw_cpu_cmpxchg_8 +#define raw_cpu_cmpxchg_8(pcp, oval, nval) \ + raw_cpu_generic_cmpxchg(pcp, oval, nval) +#endif + +#ifndef raw_cpu_cmpxchg_double_1 +#define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef raw_cpu_cmpxchg_double_2 +#define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef raw_cpu_cmpxchg_double_4 +#define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef raw_cpu_cmpxchg_double_8 +#define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif + +#ifndef this_cpu_read_1 +#define this_cpu_read_1(pcp) this_cpu_generic_read(pcp) +#endif +#ifndef this_cpu_read_2 +#define this_cpu_read_2(pcp) this_cpu_generic_read(pcp) +#endif +#ifndef this_cpu_read_4 +#define this_cpu_read_4(pcp) this_cpu_generic_read(pcp) +#endif +#ifndef this_cpu_read_8 +#define this_cpu_read_8(pcp) this_cpu_generic_read(pcp) +#endif + +#ifndef this_cpu_write_1 +#define this_cpu_write_1(pcp, val) this_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef this_cpu_write_2 +#define this_cpu_write_2(pcp, val) this_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef this_cpu_write_4 +#define this_cpu_write_4(pcp, val) this_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef this_cpu_write_8 +#define this_cpu_write_8(pcp, val) this_cpu_generic_to_op(pcp, val, =) +#endif + +#ifndef this_cpu_add_1 +#define this_cpu_add_1(pcp, val) this_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef this_cpu_add_2 +#define this_cpu_add_2(pcp, val) this_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef this_cpu_add_4 +#define this_cpu_add_4(pcp, val) this_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef this_cpu_add_8 +#define this_cpu_add_8(pcp, val) this_cpu_generic_to_op(pcp, val, +=) +#endif + +#ifndef this_cpu_and_1 +#define this_cpu_and_1(pcp, val) this_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef this_cpu_and_2 +#define this_cpu_and_2(pcp, val) this_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef this_cpu_and_4 +#define this_cpu_and_4(pcp, val) this_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef this_cpu_and_8 +#define this_cpu_and_8(pcp, val) this_cpu_generic_to_op(pcp, val, &=) +#endif + +#ifndef this_cpu_or_1 +#define this_cpu_or_1(pcp, val) this_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef this_cpu_or_2 +#define this_cpu_or_2(pcp, val) this_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef this_cpu_or_4 +#define this_cpu_or_4(pcp, val) this_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef this_cpu_or_8 +#define this_cpu_or_8(pcp, val) this_cpu_generic_to_op(pcp, val, |=) +#endif + +#ifndef this_cpu_add_return_1 +#define this_cpu_add_return_1(pcp, val) this_cpu_generic_add_return(pcp, val) +#endif +#ifndef this_cpu_add_return_2 +#define this_cpu_add_return_2(pcp, val) this_cpu_generic_add_return(pcp, val) +#endif +#ifndef this_cpu_add_return_4 +#define this_cpu_add_return_4(pcp, val) this_cpu_generic_add_return(pcp, val) +#endif +#ifndef this_cpu_add_return_8 +#define this_cpu_add_return_8(pcp, val) this_cpu_generic_add_return(pcp, val) +#endif + +#ifndef this_cpu_xchg_1 +#define this_cpu_xchg_1(pcp, nval) this_cpu_generic_xchg(pcp, nval) +#endif +#ifndef this_cpu_xchg_2 +#define this_cpu_xchg_2(pcp, nval) this_cpu_generic_xchg(pcp, nval) +#endif +#ifndef this_cpu_xchg_4 +#define this_cpu_xchg_4(pcp, nval) this_cpu_generic_xchg(pcp, nval) +#endif +#ifndef this_cpu_xchg_8 +#define this_cpu_xchg_8(pcp, nval) this_cpu_generic_xchg(pcp, nval) +#endif + +#ifndef this_cpu_cmpxchg_1 +#define this_cpu_cmpxchg_1(pcp, oval, nval) \ + this_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef this_cpu_cmpxchg_2 +#define this_cpu_cmpxchg_2(pcp, oval, nval) \ + this_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef this_cpu_cmpxchg_4 +#define this_cpu_cmpxchg_4(pcp, oval, nval) \ + this_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef this_cpu_cmpxchg_8 +#define this_cpu_cmpxchg_8(pcp, oval, nval) \ + this_cpu_generic_cmpxchg(pcp, oval, nval) +#endif + +#ifndef this_cpu_cmpxchg_double_1 +#define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef this_cpu_cmpxchg_double_2 +#define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef this_cpu_cmpxchg_double_4 +#define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef this_cpu_cmpxchg_double_8 +#define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif + +#endif /* _ASM_GENERIC_PERCPU_H_ */ diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h new file mode 100644 index 000000000..948714c15 --- /dev/null +++ b/include/asm-generic/pgalloc.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_PGALLOC_H +#define __ASM_GENERIC_PGALLOC_H +/* + * an empty file is enough for a nommu architecture + */ +#ifdef CONFIG_MMU +#error need to implement an architecture specific asm/pgalloc.h +#endif + +#define check_pgt_cache() do { } while (0) + +#endif /* __ASM_GENERIC_PGALLOC_H */ diff --git a/include/asm-generic/pgtable-nop4d-hack.h b/include/asm-generic/pgtable-nop4d-hack.h new file mode 100644 index 000000000..1d6dd38c0 --- /dev/null +++ b/include/asm-generic/pgtable-nop4d-hack.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _PGTABLE_NOP4D_HACK_H +#define _PGTABLE_NOP4D_HACK_H + +#ifndef __ASSEMBLY__ +#include + +#define __PAGETABLE_PUD_FOLDED 1 + +/* + * Having the pud type consist of a pgd gets the size right, and allows + * us to conceptually access the pgd entry that this pud is folded into + * without casting. + */ +typedef struct { pgd_t pgd; } pud_t; + +#define PUD_SHIFT PGDIR_SHIFT +#define PTRS_PER_PUD 1 +#define PUD_SIZE (1UL << PUD_SHIFT) +#define PUD_MASK (~(PUD_SIZE-1)) + +/* + * The "pgd_xxx()" functions here are trivial for a folded two-level + * setup: the pud is never bad, and a pud always exists (as it's folded + * into the pgd entry) + */ +static inline int pgd_none(pgd_t pgd) { return 0; } +static inline int pgd_bad(pgd_t pgd) { return 0; } +static inline int pgd_present(pgd_t pgd) { return 1; } +static inline void pgd_clear(pgd_t *pgd) { } +#define pud_ERROR(pud) (pgd_ERROR((pud).pgd)) + +#define pgd_populate(mm, pgd, pud) do { } while (0) +/* + * (puds are folded into pgds so this doesn't get actually called, + * but the define is needed for a generic inline function.) + */ +#define set_pgd(pgdptr, pgdval) set_pud((pud_t *)(pgdptr), (pud_t) { pgdval }) + +static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) +{ + return (pud_t *)pgd; +} + +#define pud_val(x) (pgd_val((x).pgd)) +#define __pud(x) ((pud_t) { __pgd(x) }) + +#define pgd_page(pgd) (pud_page((pud_t){ pgd })) +#define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd })) + +/* + * allocating and freeing a pud is trivial: the 1-entry pud is + * inside the pgd, so has no extra memory associated with it. + */ +#define pud_alloc_one(mm, address) NULL +#define pud_free(mm, x) do { } while (0) +#define __pud_free_tlb(tlb, x, a) do { } while (0) + +#undef pud_addr_end +#define pud_addr_end(addr, end) (end) + +#endif /* __ASSEMBLY__ */ +#endif /* _PGTABLE_NOP4D_HACK_H */ diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h new file mode 100644 index 000000000..04cb91379 --- /dev/null +++ b/include/asm-generic/pgtable-nop4d.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _PGTABLE_NOP4D_H +#define _PGTABLE_NOP4D_H + +#ifndef __ASSEMBLY__ + +#define __PAGETABLE_P4D_FOLDED 1 + +typedef struct { pgd_t pgd; } p4d_t; + +#define P4D_SHIFT PGDIR_SHIFT +#define MAX_PTRS_PER_P4D 1 +#define PTRS_PER_P4D 1 +#define P4D_SIZE (1UL << P4D_SHIFT) +#define P4D_MASK (~(P4D_SIZE-1)) + +/* + * The "pgd_xxx()" functions here are trivial for a folded two-level + * setup: the p4d is never bad, and a p4d always exists (as it's folded + * into the pgd entry) + */ +static inline int pgd_none(pgd_t pgd) { return 0; } +static inline int pgd_bad(pgd_t pgd) { return 0; } +static inline int pgd_present(pgd_t pgd) { return 1; } +static inline void pgd_clear(pgd_t *pgd) { } +#define p4d_ERROR(p4d) (pgd_ERROR((p4d).pgd)) + +#define pgd_populate(mm, pgd, p4d) do { } while (0) +/* + * (p4ds are folded into pgds so this doesn't get actually called, + * but the define is needed for a generic inline function.) + */ +#define set_pgd(pgdptr, pgdval) set_p4d((p4d_t *)(pgdptr), (p4d_t) { pgdval }) + +static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) +{ + return (p4d_t *)pgd; +} + +#define p4d_val(x) (pgd_val((x).pgd)) +#define __p4d(x) ((p4d_t) { __pgd(x) }) + +#define pgd_page(pgd) (p4d_page((p4d_t){ pgd })) +#define pgd_page_vaddr(pgd) (p4d_page_vaddr((p4d_t){ pgd })) + +/* + * allocating and freeing a p4d is trivial: the 1-entry p4d is + * inside the pgd, so has no extra memory associated with it. + */ +#define p4d_alloc_one(mm, address) NULL +#define p4d_free(mm, x) do { } while (0) +#define __p4d_free_tlb(tlb, x, a) do { } while (0) + +#undef p4d_addr_end +#define p4d_addr_end(addr, end) (end) + +#endif /* __ASSEMBLY__ */ +#endif /* _PGTABLE_NOP4D_H */ diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h new file mode 100644 index 000000000..b85b8271a --- /dev/null +++ b/include/asm-generic/pgtable-nopmd.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _PGTABLE_NOPMD_H +#define _PGTABLE_NOPMD_H + +#ifndef __ASSEMBLY__ + +#include + +struct mm_struct; + +#define __PAGETABLE_PMD_FOLDED 1 + +/* + * Having the pmd type consist of a pud gets the size right, and allows + * us to conceptually access the pud entry that this pmd is folded into + * without casting. + */ +typedef struct { pud_t pud; } pmd_t; + +#define PMD_SHIFT PUD_SHIFT +#define PTRS_PER_PMD 1 +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) + +/* + * The "pud_xxx()" functions here are trivial for a folded two-level + * setup: the pmd is never bad, and a pmd always exists (as it's folded + * into the pud entry) + */ +static inline int pud_none(pud_t pud) { return 0; } +static inline int pud_bad(pud_t pud) { return 0; } +static inline int pud_present(pud_t pud) { return 1; } +static inline void pud_clear(pud_t *pud) { } +#define pmd_ERROR(pmd) (pud_ERROR((pmd).pud)) + +#define pud_populate(mm, pmd, pte) do { } while (0) + +/* + * (pmds are folded into puds so this doesn't get actually called, + * but the define is needed for a generic inline function.) + */ +#define set_pud(pudptr, pudval) set_pmd((pmd_t *)(pudptr), (pmd_t) { pudval }) + +static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address) +{ + return (pmd_t *)pud; +} + +#define pmd_val(x) (pud_val((x).pud)) +#define __pmd(x) ((pmd_t) { __pud(x) } ) + +#define pud_page(pud) (pmd_page((pmd_t){ pud })) +#define pud_page_vaddr(pud) (pmd_page_vaddr((pmd_t){ pud })) + +/* + * allocating and freeing a pmd is trivial: the 1-entry pmd is + * inside the pud, so has no extra memory associated with it. + */ +#define pmd_alloc_one(mm, address) NULL +static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) +{ +} +#define __pmd_free_tlb(tlb, x, a) do { } while (0) + +#undef pmd_addr_end +#define pmd_addr_end(addr, end) (end) + +#endif /* __ASSEMBLY__ */ + +#endif /* _PGTABLE_NOPMD_H */ diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h new file mode 100644 index 000000000..9bef475db --- /dev/null +++ b/include/asm-generic/pgtable-nopud.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _PGTABLE_NOPUD_H +#define _PGTABLE_NOPUD_H + +#ifndef __ASSEMBLY__ + +#ifdef __ARCH_USE_5LEVEL_HACK +#include +#else +#include + +#define __PAGETABLE_PUD_FOLDED 1 + +/* + * Having the pud type consist of a p4d gets the size right, and allows + * us to conceptually access the p4d entry that this pud is folded into + * without casting. + */ +typedef struct { p4d_t p4d; } pud_t; + +#define PUD_SHIFT P4D_SHIFT +#define PTRS_PER_PUD 1 +#define PUD_SIZE (1UL << PUD_SHIFT) +#define PUD_MASK (~(PUD_SIZE-1)) + +/* + * The "p4d_xxx()" functions here are trivial for a folded two-level + * setup: the pud is never bad, and a pud always exists (as it's folded + * into the p4d entry) + */ +static inline int p4d_none(p4d_t p4d) { return 0; } +static inline int p4d_bad(p4d_t p4d) { return 0; } +static inline int p4d_present(p4d_t p4d) { return 1; } +static inline void p4d_clear(p4d_t *p4d) { } +#define pud_ERROR(pud) (p4d_ERROR((pud).p4d)) + +#define p4d_populate(mm, p4d, pud) do { } while (0) +/* + * (puds are folded into p4ds so this doesn't get actually called, + * but the define is needed for a generic inline function.) + */ +#define set_p4d(p4dptr, p4dval) set_pud((pud_t *)(p4dptr), (pud_t) { p4dval }) + +static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) +{ + return (pud_t *)p4d; +} + +#define pud_val(x) (p4d_val((x).p4d)) +#define __pud(x) ((pud_t) { __p4d(x) }) + +#define p4d_page(p4d) (pud_page((pud_t){ p4d })) +#define p4d_page_vaddr(p4d) (pud_page_vaddr((pud_t){ p4d })) + +/* + * allocating and freeing a pud is trivial: the 1-entry pud is + * inside the p4d, so has no extra memory associated with it. + */ +#define pud_alloc_one(mm, address) NULL +#define pud_free(mm, x) do { } while (0) +#define __pud_free_tlb(tlb, x, a) do { } while (0) + +#undef pud_addr_end +#define pud_addr_end(addr, end) (end) + +#endif /* __ASSEMBLY__ */ +#endif /* !__ARCH_USE_5LEVEL_HACK */ +#endif /* _PGTABLE_NOPUD_H */ diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h new file mode 100644 index 000000000..1544331be --- /dev/null +++ b/include/asm-generic/pgtable.h @@ -0,0 +1,1155 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_PGTABLE_H +#define _ASM_GENERIC_PGTABLE_H + +#include + +#ifndef __ASSEMBLY__ +#ifdef CONFIG_MMU + +#include +#include +#include + +#if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \ + defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS +#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED +#endif + +/* + * On almost all architectures and configurations, 0 can be used as the + * upper ceiling to free_pgtables(): on many architectures it has the same + * effect as using TASK_SIZE. However, there is one configuration which + * must impose a more careful limit, to avoid freeing kernel pgtables. + */ +#ifndef USER_PGTABLES_CEILING +#define USER_PGTABLES_CEILING 0UL +#endif + +#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +extern int ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, + pte_t entry, int dirty); +#endif + +#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty); +extern int pudp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pud_t *pudp, + pud_t entry, int dirty); +#else +static inline int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty) +{ + BUILD_BUG(); + return 0; +} +static inline int pudp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pud_t *pudp, + pud_t entry, int dirty) +{ + BUILD_BUG(); + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, + pte_t *ptep) +{ + pte_t pte = *ptep; + int r = 1; + if (!pte_young(pte)) + r = 0; + else + set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); + return r; +} +#endif + +#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmdp) +{ + pmd_t pmd = *pmdp; + int r = 1; + if (!pmd_young(pmd)) + r = 0; + else + set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); + return r; +} +#else +static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmdp) +{ + BUILD_BUG(); + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH +int ptep_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep); +#endif + +#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); +#else +/* + * Despite relevant to THP only, this API is called from generic rmap code + * under PageTransHuge(), hence needs a dummy implementation for !THP + */ +static inline int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + BUILD_BUG(); + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, + unsigned long address, + pte_t *ptep) +{ + pte_t pte = *ptep; + pte_clear(mm, address, ptep); + return pte; +} +#endif + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR +static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, + unsigned long address, + pmd_t *pmdp) +{ + pmd_t pmd = *pmdp; + pmd_clear(pmdp); + return pmd; +} +#endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */ +#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR +static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm, + unsigned long address, + pud_t *pudp) +{ + pud_t pud = *pudp; + + pud_clear(pudp); + return pud; +} +#endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */ +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL +static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp, + int full) +{ + return pmdp_huge_get_and_clear(mm, address, pmdp); +} +#endif + +#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL +static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm, + unsigned long address, pud_t *pudp, + int full) +{ + return pudp_huge_get_and_clear(mm, address, pudp); +} +#endif +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL +static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, + unsigned long address, pte_t *ptep, + int full) +{ + pte_t pte; + pte = ptep_get_and_clear(mm, address, ptep); + return pte; +} +#endif + +/* + * Some architectures may be able to avoid expensive synchronization + * primitives when modifications are made to PTE's which are already + * not present, or in the process of an address space destruction. + */ +#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL +static inline void pte_clear_not_present_full(struct mm_struct *mm, + unsigned long address, + pte_t *ptep, + int full) +{ + pte_clear(mm, address, ptep); +} +#endif + +#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH +extern pte_t ptep_clear_flush(struct vm_area_struct *vma, + unsigned long address, + pte_t *ptep); +#endif + +#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH +extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmdp); +extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, + unsigned long address, + pud_t *pudp); +#endif + +#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT +struct mm_struct; +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) +{ + pte_t old_pte = *ptep; + set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); +} +#endif + +#ifndef pte_savedwrite +#define pte_savedwrite pte_write +#endif + +#ifndef pte_mk_savedwrite +#define pte_mk_savedwrite pte_mkwrite +#endif + +#ifndef pte_clear_savedwrite +#define pte_clear_savedwrite pte_wrprotect +#endif + +#ifndef pmd_savedwrite +#define pmd_savedwrite pmd_write +#endif + +#ifndef pmd_mk_savedwrite +#define pmd_mk_savedwrite pmd_mkwrite +#endif + +#ifndef pmd_clear_savedwrite +#define pmd_clear_savedwrite pmd_wrprotect +#endif + +#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline void pmdp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp) +{ + pmd_t old_pmd = *pmdp; + set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd)); +} +#else +static inline void pmdp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp) +{ + BUILD_BUG(); +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif +#ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD +static inline void pudp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pud_t *pudp) +{ + pud_t old_pud = *pudp; + + set_pud_at(mm, address, pudp, pud_wrprotect(old_pud)); +} +#else +static inline void pudp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pud_t *pudp) +{ + BUILD_BUG(); +} +#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ +#endif + +#ifndef pmdp_collapse_flush +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); +#else +static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmdp) +{ + BUILD_BUG(); + return *pmdp; +} +#define pmdp_collapse_flush pmdp_collapse_flush +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT +extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, + pgtable_t pgtable); +#endif + +#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW +extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); +#endif + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +/* + * This is an implementation of pmdp_establish() that is only suitable for an + * architecture that doesn't have hardware dirty/accessed bits. In this case we + * can't race with CPU which sets these bits and non-atomic aproach is fine. + */ +static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, pmd_t pmd) +{ + pmd_t old_pmd = *pmdp; + set_pmd_at(vma->vm_mm, address, pmdp, pmd); + return old_pmd; +} +#endif + +#ifndef __HAVE_ARCH_PMDP_INVALIDATE +extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp); +#endif + +#ifndef __HAVE_ARCH_PTE_SAME +static inline int pte_same(pte_t pte_a, pte_t pte_b) +{ + return pte_val(pte_a) == pte_val(pte_b); +} +#endif + +#ifndef __HAVE_ARCH_PTE_UNUSED +/* + * Some architectures provide facilities to virtualization guests + * so that they can flag allocated pages as unused. This allows the + * host to transparently reclaim unused pages. This function returns + * whether the pte's page is unused. + */ +static inline int pte_unused(pte_t pte) +{ + return 0; +} +#endif + +#ifndef pte_access_permitted +#define pte_access_permitted(pte, write) \ + (pte_present(pte) && (!(write) || pte_write(pte))) +#endif + +#ifndef pmd_access_permitted +#define pmd_access_permitted(pmd, write) \ + (pmd_present(pmd) && (!(write) || pmd_write(pmd))) +#endif + +#ifndef pud_access_permitted +#define pud_access_permitted(pud, write) \ + (pud_present(pud) && (!(write) || pud_write(pud))) +#endif + +#ifndef p4d_access_permitted +#define p4d_access_permitted(p4d, write) \ + (p4d_present(p4d) && (!(write) || p4d_write(p4d))) +#endif + +#ifndef pgd_access_permitted +#define pgd_access_permitted(pgd, write) \ + (pgd_present(pgd) && (!(write) || pgd_write(pgd))) +#endif + +#ifndef __HAVE_ARCH_PMD_SAME +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) +{ + return pmd_val(pmd_a) == pmd_val(pmd_b); +} + +static inline int pud_same(pud_t pud_a, pud_t pud_b) +{ + return pud_val(pud_a) == pud_val(pud_b); +} +#else /* CONFIG_TRANSPARENT_HUGEPAGE */ +static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) +{ + BUILD_BUG(); + return 0; +} + +static inline int pud_same(pud_t pud_a, pud_t pud_b) +{ + BUILD_BUG(); + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef __HAVE_ARCH_DO_SWAP_PAGE +/* + * Some architectures support metadata associated with a page. When a + * page is being swapped out, this metadata must be saved so it can be + * restored when the page is swapped back in. SPARC M7 and newer + * processors support an ADI (Application Data Integrity) tag for the + * page as metadata for the page. arch_do_swap_page() can restore this + * metadata when a page is swapped back in. + */ +static inline void arch_do_swap_page(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long addr, + pte_t pte, pte_t oldpte) +{ + +} +#endif + +#ifndef __HAVE_ARCH_UNMAP_ONE +/* + * Some architectures support metadata associated with a page. When a + * page is being swapped out, this metadata must be saved so it can be + * restored when the page is swapped back in. SPARC M7 and newer + * processors support an ADI (Application Data Integrity) tag for the + * page as metadata for the page. arch_unmap_one() can save this + * metadata on a swap-out of a page. + */ +static inline int arch_unmap_one(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long addr, + pte_t orig_pte) +{ + return 0; +} +#endif + +#ifndef __HAVE_ARCH_PGD_OFFSET_GATE +#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr) +#endif + +#ifndef __HAVE_ARCH_MOVE_PTE +#define move_pte(pte, prot, old_addr, new_addr) (pte) +#endif + +#ifndef pte_accessible +# define pte_accessible(mm, pte) ((void)(pte), 1) +#endif + +#ifndef flush_tlb_fix_spurious_fault +#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) +#endif + +#ifndef pgprot_noncached +#define pgprot_noncached(prot) (prot) +#endif + +#ifndef pgprot_writecombine +#define pgprot_writecombine pgprot_noncached +#endif + +#ifndef pgprot_writethrough +#define pgprot_writethrough pgprot_noncached +#endif + +#ifndef pgprot_device +#define pgprot_device pgprot_noncached +#endif + +#ifndef pgprot_modify +#define pgprot_modify pgprot_modify +static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) +{ + if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot))) + newprot = pgprot_noncached(newprot); + if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot))) + newprot = pgprot_writecombine(newprot); + if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot))) + newprot = pgprot_device(newprot); + return newprot; +} +#endif + +/* + * When walking page tables, get the address of the next boundary, + * or the end address of the range if that comes earlier. Although no + * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. + */ + +#define pgd_addr_end(addr, end) \ +({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) + +#ifndef p4d_addr_end +#define p4d_addr_end(addr, end) \ +({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) +#endif + +#ifndef pud_addr_end +#define pud_addr_end(addr, end) \ +({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) +#endif + +#ifndef pmd_addr_end +#define pmd_addr_end(addr, end) \ +({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) +#endif + +/* + * When walking page tables, we usually want to skip any p?d_none entries; + * and any p?d_bad entries - reporting the error before resetting to none. + * Do the tests inline, but report and clear the bad entry in mm/memory.c. + */ +void pgd_clear_bad(pgd_t *); +void p4d_clear_bad(p4d_t *); +void pud_clear_bad(pud_t *); +void pmd_clear_bad(pmd_t *); + +static inline int pgd_none_or_clear_bad(pgd_t *pgd) +{ + if (pgd_none(*pgd)) + return 1; + if (unlikely(pgd_bad(*pgd))) { + pgd_clear_bad(pgd); + return 1; + } + return 0; +} + +static inline int p4d_none_or_clear_bad(p4d_t *p4d) +{ + if (p4d_none(*p4d)) + return 1; + if (unlikely(p4d_bad(*p4d))) { + p4d_clear_bad(p4d); + return 1; + } + return 0; +} + +static inline int pud_none_or_clear_bad(pud_t *pud) +{ + if (pud_none(*pud)) + return 1; + if (unlikely(pud_bad(*pud))) { + pud_clear_bad(pud); + return 1; + } + return 0; +} + +static inline int pmd_none_or_clear_bad(pmd_t *pmd) +{ + if (pmd_none(*pmd)) + return 1; + if (unlikely(pmd_bad(*pmd))) { + pmd_clear_bad(pmd); + return 1; + } + return 0; +} + +static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep) +{ + /* + * Get the current pte state, but zero it out to make it + * non-present, preventing the hardware from asynchronously + * updating it. + */ + return ptep_get_and_clear(mm, addr, ptep); +} + +static inline void __ptep_modify_prot_commit(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep, pte_t pte) +{ + /* + * The pte is non-present, so there's no hardware state to + * preserve. + */ + set_pte_at(mm, addr, ptep, pte); +} + +#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION +/* + * Start a pte protection read-modify-write transaction, which + * protects against asynchronous hardware modifications to the pte. + * The intention is not to prevent the hardware from making pte + * updates, but to prevent any updates it may make from being lost. + * + * This does not protect against other software modifications of the + * pte; the appropriate pte lock must be held over the transation. + * + * Note that this interface is intended to be batchable, meaning that + * ptep_modify_prot_commit may not actually update the pte, but merely + * queue the update to be done at some later time. The update must be + * actually committed before the pte lock is released, however. + */ +static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep) +{ + return __ptep_modify_prot_start(mm, addr, ptep); +} + +/* + * Commit an update to a pte, leaving any hardware-controlled bits in + * the PTE unmodified. + */ +static inline void ptep_modify_prot_commit(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep, pte_t pte) +{ + __ptep_modify_prot_commit(mm, addr, ptep, pte); +} +#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */ +#endif /* CONFIG_MMU */ + +/* + * No-op macros that just return the current protection value. Defined here + * because these macros can be used used even if CONFIG_MMU is not defined. + */ +#ifndef pgprot_encrypted +#define pgprot_encrypted(prot) (prot) +#endif + +#ifndef pgprot_decrypted +#define pgprot_decrypted(prot) (prot) +#endif + +/* + * A facility to provide lazy MMU batching. This allows PTE updates and + * page invalidations to be delayed until a call to leave lazy MMU mode + * is issued. Some architectures may benefit from doing this, and it is + * beneficial for both shadow and direct mode hypervisors, which may batch + * the PTE updates which happen during this window. Note that using this + * interface requires that read hazards be removed from the code. A read + * hazard could result in the direct mode hypervisor case, since the actual + * write to the page tables may not yet have taken place, so reads though + * a raw PTE pointer after it has been modified are not guaranteed to be + * up to date. This mode can only be entered and left under the protection of + * the page table locks for all page tables which may be modified. In the UP + * case, this is required so that preemption is disabled, and in the SMP case, + * it must synchronize the delayed page table writes properly on other CPUs. + */ +#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE +#define arch_enter_lazy_mmu_mode() do {} while (0) +#define arch_leave_lazy_mmu_mode() do {} while (0) +#define arch_flush_lazy_mmu_mode() do {} while (0) +#endif + +/* + * A facility to provide batching of the reload of page tables and + * other process state with the actual context switch code for + * paravirtualized guests. By convention, only one of the batched + * update (lazy) modes (CPU, MMU) should be active at any given time, + * entry should never be nested, and entry and exits should always be + * paired. This is for sanity of maintaining and reasoning about the + * kernel code. In this case, the exit (end of the context switch) is + * in architecture-specific code, and so doesn't need a generic + * definition. + */ +#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH +#define arch_start_context_switch(prev) do {} while (0) +#endif + +#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY +#ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION +static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) +{ + return pmd; +} + +static inline int pmd_swp_soft_dirty(pmd_t pmd) +{ + return 0; +} + +static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) +{ + return pmd; +} +#endif +#else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */ +static inline int pte_soft_dirty(pte_t pte) +{ + return 0; +} + +static inline int pmd_soft_dirty(pmd_t pmd) +{ + return 0; +} + +static inline pte_t pte_mksoft_dirty(pte_t pte) +{ + return pte; +} + +static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) +{ + return pmd; +} + +static inline pte_t pte_clear_soft_dirty(pte_t pte) +{ + return pte; +} + +static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) +{ + return pmd; +} + +static inline pte_t pte_swp_mksoft_dirty(pte_t pte) +{ + return pte; +} + +static inline int pte_swp_soft_dirty(pte_t pte) +{ + return 0; +} + +static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) +{ + return pte; +} + +static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) +{ + return pmd; +} + +static inline int pmd_swp_soft_dirty(pmd_t pmd) +{ + return 0; +} + +static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) +{ + return pmd; +} +#endif + +#ifndef __HAVE_PFNMAP_TRACKING +/* + * Interfaces that can be used by architecture code to keep track of + * memory type of pfn mappings specified by the remap_pfn_range, + * vm_insert_pfn. + */ + +/* + * track_pfn_remap is called when a _new_ pfn mapping is being established + * by remap_pfn_range() for physical range indicated by pfn and size. + */ +static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, + unsigned long pfn, unsigned long addr, + unsigned long size) +{ + return 0; +} + +/* + * track_pfn_insert is called when a _new_ single pfn is established + * by vm_insert_pfn(). + */ +static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, + pfn_t pfn) +{ +} + +/* + * track_pfn_copy is called when vma that is covering the pfnmap gets + * copied through copy_page_range(). + */ +static inline int track_pfn_copy(struct vm_area_struct *vma) +{ + return 0; +} + +/* + * untrack_pfn is called while unmapping a pfnmap for a region. + * untrack can be called for a specific region indicated by pfn and size or + * can be for the entire vma (in which case pfn, size are zero). + */ +static inline void untrack_pfn(struct vm_area_struct *vma, + unsigned long pfn, unsigned long size) +{ +} + +/* + * untrack_pfn_moved is called while mremapping a pfnmap for a new region. + */ +static inline void untrack_pfn_moved(struct vm_area_struct *vma) +{ +} +#else +extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, + unsigned long pfn, unsigned long addr, + unsigned long size); +extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, + pfn_t pfn); +extern int track_pfn_copy(struct vm_area_struct *vma); +extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, + unsigned long size); +extern void untrack_pfn_moved(struct vm_area_struct *vma); +#endif + +#ifdef __HAVE_COLOR_ZERO_PAGE +static inline int is_zero_pfn(unsigned long pfn) +{ + extern unsigned long zero_pfn; + unsigned long offset_from_zero_pfn = pfn - zero_pfn; + return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); +} + +#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) + +#else +static inline int is_zero_pfn(unsigned long pfn) +{ + extern unsigned long zero_pfn; + return pfn == zero_pfn; +} + +static inline unsigned long my_zero_pfn(unsigned long addr) +{ + extern unsigned long zero_pfn; + return zero_pfn; +} +#endif + +#ifdef CONFIG_MMU + +#ifndef CONFIG_TRANSPARENT_HUGEPAGE +static inline int pmd_trans_huge(pmd_t pmd) +{ + return 0; +} +#ifndef pmd_write +static inline int pmd_write(pmd_t pmd) +{ + BUG(); + return 0; +} +#endif /* pmd_write */ +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#ifndef pud_write +static inline int pud_write(pud_t pud) +{ + BUG(); + return 0; +} +#endif /* pud_write */ + +#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \ + (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ + !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) +static inline int pud_trans_huge(pud_t pud) +{ + return 0; +} +#endif + +#ifndef pmd_read_atomic +static inline pmd_t pmd_read_atomic(pmd_t *pmdp) +{ + /* + * Depend on compiler for an atomic pmd read. NOTE: this is + * only going to work, if the pmdval_t isn't larger than + * an unsigned long. + */ + return *pmdp; +} +#endif + +#ifndef arch_needs_pgtable_deposit +#define arch_needs_pgtable_deposit() (false) +#endif +/* + * This function is meant to be used by sites walking pagetables with + * the mmap_sem hold in read mode to protect against MADV_DONTNEED and + * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd + * into a null pmd and the transhuge page fault can convert a null pmd + * into an hugepmd or into a regular pmd (if the hugepage allocation + * fails). While holding the mmap_sem in read mode the pmd becomes + * stable and stops changing under us only if it's not null and not a + * transhuge pmd. When those races occurs and this function makes a + * difference vs the standard pmd_none_or_clear_bad, the result is + * undefined so behaving like if the pmd was none is safe (because it + * can return none anyway). The compiler level barrier() is critically + * important to compute the two checks atomically on the same pmdval. + * + * For 32bit kernels with a 64bit large pmd_t this automatically takes + * care of reading the pmd atomically to avoid SMP race conditions + * against pmd_populate() when the mmap_sem is hold for reading by the + * caller (a special atomic read not done by "gcc" as in the generic + * version above, is also needed when THP is disabled because the page + * fault can populate the pmd from under us). + */ +static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) +{ + pmd_t pmdval = pmd_read_atomic(pmd); + /* + * The barrier will stabilize the pmdval in a register or on + * the stack so that it will stop changing under the code. + * + * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE, + * pmd_read_atomic is allowed to return a not atomic pmdval + * (for example pointing to an hugepage that has never been + * mapped in the pmd). The below checks will only care about + * the low part of the pmd with 32bit PAE x86 anyway, with the + * exception of pmd_none(). So the important thing is that if + * the low part of the pmd is found null, the high part will + * be also null or the pmd_none() check below would be + * confused. + */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + barrier(); +#endif + /* + * !pmd_present() checks for pmd migration entries + * + * The complete check uses is_pmd_migration_entry() in linux/swapops.h + * But using that requires moving current function and pmd_trans_unstable() + * to linux/swapops.h to resovle dependency, which is too much code move. + * + * !pmd_present() is equivalent to is_pmd_migration_entry() currently, + * because !pmd_present() pages can only be under migration not swapped + * out. + * + * pmd_none() is preseved for future condition checks on pmd migration + * entries and not confusing with this function name, although it is + * redundant with !pmd_present(). + */ + if (pmd_none(pmdval) || pmd_trans_huge(pmdval) || + (IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION) && !pmd_present(pmdval))) + return 1; + if (unlikely(pmd_bad(pmdval))) { + pmd_clear_bad(pmd); + return 1; + } + return 0; +} + +/* + * This is a noop if Transparent Hugepage Support is not built into + * the kernel. Otherwise it is equivalent to + * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in + * places that already verified the pmd is not none and they want to + * walk ptes while holding the mmap sem in read mode (write mode don't + * need this). If THP is not enabled, the pmd can't go away under the + * code even if MADV_DONTNEED runs, but if THP is enabled we need to + * run a pmd_trans_unstable before walking the ptes after + * split_huge_page_pmd returns (because it may have run when the pmd + * become null, but then a page fault can map in a THP and not a + * regular page). + */ +static inline int pmd_trans_unstable(pmd_t *pmd) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + return pmd_none_or_trans_huge_or_clear_bad(pmd); +#else + return 0; +#endif +} + +#ifndef CONFIG_NUMA_BALANCING +/* + * Technically a PTE can be PROTNONE even when not doing NUMA balancing but + * the only case the kernel cares is for NUMA balancing and is only ever set + * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked + * _PAGE_PROTNONE so by by default, implement the helper as "always no". It + * is the responsibility of the caller to distinguish between PROT_NONE + * protections and NUMA hinting fault protections. + */ +static inline int pte_protnone(pte_t pte) +{ + return 0; +} + +static inline int pmd_protnone(pmd_t pmd) +{ + return 0; +} +#endif /* CONFIG_NUMA_BALANCING */ + +#endif /* CONFIG_MMU */ + +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP + +#ifndef __PAGETABLE_P4D_FOLDED +int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot); +int p4d_clear_huge(p4d_t *p4d); +#else +static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) +{ + return 0; +} +static inline int p4d_clear_huge(p4d_t *p4d) +{ + return 0; +} +#endif /* !__PAGETABLE_P4D_FOLDED */ + +int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); +int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); +int pud_clear_huge(pud_t *pud); +int pmd_clear_huge(pmd_t *pmd); +int pud_free_pmd_page(pud_t *pud, unsigned long addr); +int pmd_free_pte_page(pmd_t *pmd, unsigned long addr); +#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ +static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) +{ + return 0; +} +static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) +{ + return 0; +} +static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) +{ + return 0; +} +static inline int p4d_clear_huge(p4d_t *p4d) +{ + return 0; +} +static inline int pud_clear_huge(pud_t *pud) +{ + return 0; +} +static inline int pmd_clear_huge(pmd_t *pmd) +{ + return 0; +} +static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr) +{ + return 0; +} +static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) +{ + return 0; +} +#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ + +#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +/* + * ARCHes with special requirements for evicting THP backing TLB entries can + * implement this. Otherwise also, it can help optimize normal TLB flush in + * THP regime. stock flush_tlb_range() typically has optimization to nuke the + * entire TLB TLB if flush span is greater than a threshold, which will + * likely be true for a single huge page. Thus a single thp flush will + * invalidate the entire TLB which is not desitable. + * e.g. see arch/arc: flush_pmd_tlb_range + */ +#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) +#define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) +#else +#define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG() +#define flush_pud_tlb_range(vma, addr, end) BUILD_BUG() +#endif +#endif + +struct file; +int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t *vma_prot); + +#ifndef CONFIG_X86_ESPFIX64 +static inline void init_espfix_bsp(void) { } +#endif + +#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED +static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot) +{ + return true; +} + +static inline bool arch_has_pfn_modify_check(void) +{ + return false; +} +#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */ + +/* + * Architecture PAGE_KERNEL_* fallbacks + * + * Some architectures don't define certain PAGE_KERNEL_* flags. This is either + * because they really don't support them, or the port needs to be updated to + * reflect the required functionality. Below are a set of relatively safe + * fallbacks, as best effort, which we can count on in lieu of the architectures + * not defining them on their own yet. + */ + +#ifndef PAGE_KERNEL_RO +# define PAGE_KERNEL_RO PAGE_KERNEL +#endif + +#ifndef PAGE_KERNEL_EXEC +# define PAGE_KERNEL_EXEC PAGE_KERNEL +#endif + +#endif /* !__ASSEMBLY__ */ + +#if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT) +#ifdef CONFIG_PHYS_ADDR_T_64BIT +/* + * ZSMALLOC needs to know the highest PFN on 32-bit architectures + * with physical address space extension, but falls back to + * BITS_PER_LONG otherwise. + */ +#error Missing MAX_POSSIBLE_PHYSMEM_BITS definition +#else +#define MAX_POSSIBLE_PHYSMEM_BITS 32 +#endif +#endif + +#ifndef has_transparent_hugepage +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define has_transparent_hugepage() 1 +#else +#define has_transparent_hugepage() 0 +#endif +#endif + +/* + * On some architectures it depends on the mm if the p4d/pud or pmd + * layer of the page table hierarchy is folded or not. + */ +#ifndef mm_p4d_folded +#define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED) +#endif + +#ifndef mm_pud_folded +#define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED) +#endif + +#ifndef mm_pmd_folded +#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED) +#endif + +#endif /* _ASM_GENERIC_PGTABLE_H */ diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h new file mode 100644 index 000000000..c3046c920 --- /dev/null +++ b/include/asm-generic/preempt.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_PREEMPT_H +#define __ASM_PREEMPT_H + +#include + +#define PREEMPT_ENABLED (0) + +static __always_inline int preempt_count(void) +{ + return READ_ONCE(current_thread_info()->preempt_count); +} + +static __always_inline volatile int *preempt_count_ptr(void) +{ + return ¤t_thread_info()->preempt_count; +} + +static __always_inline void preempt_count_set(int pc) +{ + *preempt_count_ptr() = pc; +} + +/* + * must be macros to avoid header recursion hell + */ +#define init_task_preempt_count(p) do { \ + task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \ +} while (0) + +#define init_idle_preempt_count(p, cpu) do { \ + task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \ +} while (0) + +static __always_inline void set_preempt_need_resched(void) +{ +} + +static __always_inline void clear_preempt_need_resched(void) +{ +} + +static __always_inline bool test_preempt_need_resched(void) +{ + return false; +} + +/* + * The various preempt_count add/sub methods + */ + +static __always_inline void __preempt_count_add(int val) +{ + *preempt_count_ptr() += val; +} + +static __always_inline void __preempt_count_sub(int val) +{ + *preempt_count_ptr() -= val; +} + +static __always_inline bool __preempt_count_dec_and_test(void) +{ + /* + * Because of load-store architectures cannot do per-cpu atomic + * operations; we cannot use PREEMPT_NEED_RESCHED because it might get + * lost. + */ + return !--*preempt_count_ptr() && tif_need_resched(); +} + +/* + * Returns true when we need to resched and can (barring IRQ state). + */ +static __always_inline bool should_resched(int preempt_offset) +{ + return unlikely(preempt_count() == preempt_offset && + tif_need_resched()); +} + +#ifdef CONFIG_PREEMPT +extern asmlinkage void preempt_schedule(void); +#define __preempt_schedule() preempt_schedule() +extern asmlinkage void preempt_schedule_notrace(void); +#define __preempt_schedule_notrace() preempt_schedule_notrace() +#endif /* CONFIG_PREEMPT */ + +#endif /* __ASM_PREEMPT_H */ diff --git a/include/asm-generic/ptrace.h b/include/asm-generic/ptrace.h new file mode 100644 index 000000000..82e674f6b --- /dev/null +++ b/include/asm-generic/ptrace.h @@ -0,0 +1,74 @@ +/* + * Common low level (register) ptrace helpers + * + * Copyright 2004-2011 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef __ASM_GENERIC_PTRACE_H__ +#define __ASM_GENERIC_PTRACE_H__ + +#ifndef __ASSEMBLY__ + +/* Helpers for working with the instruction pointer */ +#ifndef GET_IP +#define GET_IP(regs) ((regs)->pc) +#endif +#ifndef SET_IP +#define SET_IP(regs, val) (GET_IP(regs) = (val)) +#endif + +static inline unsigned long instruction_pointer(struct pt_regs *regs) +{ + return GET_IP(regs); +} +static inline void instruction_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + SET_IP(regs, val); +} + +#ifndef profile_pc +#define profile_pc(regs) instruction_pointer(regs) +#endif + +/* Helpers for working with the user stack pointer */ +#ifndef GET_USP +#define GET_USP(regs) ((regs)->usp) +#endif +#ifndef SET_USP +#define SET_USP(regs, val) (GET_USP(regs) = (val)) +#endif + +static inline unsigned long user_stack_pointer(struct pt_regs *regs) +{ + return GET_USP(regs); +} +static inline void user_stack_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + SET_USP(regs, val); +} + +/* Helpers for working with the frame pointer */ +#ifndef GET_FP +#define GET_FP(regs) ((regs)->fp) +#endif +#ifndef SET_FP +#define SET_FP(regs, val) (GET_FP(regs) = (val)) +#endif + +static inline unsigned long frame_pointer(struct pt_regs *regs) +{ + return GET_FP(regs); +} +static inline void frame_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + SET_FP(regs, val); +} + +#endif /* __ASSEMBLY__ */ + +#endif diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h new file mode 100644 index 000000000..0f7062bd5 --- /dev/null +++ b/include/asm-generic/qrwlock.h @@ -0,0 +1,138 @@ +/* + * Queue read/write lock + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P. + * + * Authors: Waiman Long + */ +#ifndef __ASM_GENERIC_QRWLOCK_H +#define __ASM_GENERIC_QRWLOCK_H + +#include +#include +#include + +#include + +/* + * Writer states & reader shift and bias. + */ +#define _QW_WAITING 0x100 /* A writer is waiting */ +#define _QW_LOCKED 0x0ff /* A writer holds the lock */ +#define _QW_WMASK 0x1ff /* Writer mask */ +#define _QR_SHIFT 9 /* Reader count shift */ +#define _QR_BIAS (1U << _QR_SHIFT) + +/* + * External function declarations + */ +extern void queued_read_lock_slowpath(struct qrwlock *lock); +extern void queued_write_lock_slowpath(struct qrwlock *lock); + +/** + * queued_read_trylock - try to acquire read lock of a queue rwlock + * @lock : Pointer to queue rwlock structure + * Return: 1 if lock acquired, 0 if failed + */ +static inline int queued_read_trylock(struct qrwlock *lock) +{ + u32 cnts; + + cnts = atomic_read(&lock->cnts); + if (likely(!(cnts & _QW_WMASK))) { + cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts); + if (likely(!(cnts & _QW_WMASK))) + return 1; + atomic_sub(_QR_BIAS, &lock->cnts); + } + return 0; +} + +/** + * queued_write_trylock - try to acquire write lock of a queue rwlock + * @lock : Pointer to queue rwlock structure + * Return: 1 if lock acquired, 0 if failed + */ +static inline int queued_write_trylock(struct qrwlock *lock) +{ + u32 cnts; + + cnts = atomic_read(&lock->cnts); + if (unlikely(cnts)) + return 0; + + return likely(atomic_cmpxchg_acquire(&lock->cnts, + cnts, cnts | _QW_LOCKED) == cnts); +} +/** + * queued_read_lock - acquire read lock of a queue rwlock + * @lock: Pointer to queue rwlock structure + */ +static inline void queued_read_lock(struct qrwlock *lock) +{ + u32 cnts; + + cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts); + if (likely(!(cnts & _QW_WMASK))) + return; + + /* The slowpath will decrement the reader count, if necessary. */ + queued_read_lock_slowpath(lock); +} + +/** + * queued_write_lock - acquire write lock of a queue rwlock + * @lock : Pointer to queue rwlock structure + */ +static inline void queued_write_lock(struct qrwlock *lock) +{ + /* Optimize for the unfair lock case where the fair flag is 0. */ + if (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0) + return; + + queued_write_lock_slowpath(lock); +} + +/** + * queued_read_unlock - release read lock of a queue rwlock + * @lock : Pointer to queue rwlock structure + */ +static inline void queued_read_unlock(struct qrwlock *lock) +{ + /* + * Atomically decrement the reader count + */ + (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts); +} + +/** + * queued_write_unlock - release write lock of a queue rwlock + * @lock : Pointer to queue rwlock structure + */ +static inline void queued_write_unlock(struct qrwlock *lock) +{ + smp_store_release(&lock->wlocked, 0); +} + +/* + * Remapping rwlock architecture specific functions to the corresponding + * queue rwlock functions. + */ +#define arch_read_lock(l) queued_read_lock(l) +#define arch_write_lock(l) queued_write_lock(l) +#define arch_read_trylock(l) queued_read_trylock(l) +#define arch_write_trylock(l) queued_write_trylock(l) +#define arch_read_unlock(l) queued_read_unlock(l) +#define arch_write_unlock(l) queued_write_unlock(l) + +#endif /* __ASM_GENERIC_QRWLOCK_H */ diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h new file mode 100644 index 000000000..c36f1d5a2 --- /dev/null +++ b/include/asm-generic/qrwlock_types.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_QRWLOCK_TYPES_H +#define __ASM_GENERIC_QRWLOCK_TYPES_H + +#include +#include +#include + +/* + * The queue read/write lock data structure + */ + +typedef struct qrwlock { + union { + atomic_t cnts; + struct { +#ifdef __LITTLE_ENDIAN + u8 wlocked; /* Locked for write? */ + u8 __lstate[3]; +#else + u8 __lstate[3]; + u8 wlocked; /* Locked for write? */ +#endif + }; + }; + arch_spinlock_t wait_lock; +} arch_rwlock_t; + +#define __ARCH_RW_LOCK_UNLOCKED { \ + { .cnts = ATOMIC_INIT(0), }, \ + .wait_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ +} + +#endif /* __ASM_GENERIC_QRWLOCK_TYPES_H */ diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h new file mode 100644 index 000000000..9cc457597 --- /dev/null +++ b/include/asm-generic/qspinlock.h @@ -0,0 +1,123 @@ +/* + * Queued spinlock + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. + * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP + * + * Authors: Waiman Long + */ +#ifndef __ASM_GENERIC_QSPINLOCK_H +#define __ASM_GENERIC_QSPINLOCK_H + +#include + +/** + * queued_spin_is_locked - is the spinlock locked? + * @lock: Pointer to queued spinlock structure + * Return: 1 if it is locked, 0 otherwise + */ +static __always_inline int queued_spin_is_locked(struct qspinlock *lock) +{ + /* + * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL + * isn't immediately observable. + */ + return atomic_read(&lock->val); +} + +/** + * queued_spin_value_unlocked - is the spinlock structure unlocked? + * @lock: queued spinlock structure + * Return: 1 if it is unlocked, 0 otherwise + * + * N.B. Whenever there are tasks waiting for the lock, it is considered + * locked wrt the lockref code to avoid lock stealing by the lockref + * code and change things underneath the lock. This also allows some + * optimizations to be applied without conflict with lockref. + */ +static __always_inline int queued_spin_value_unlocked(struct qspinlock lock) +{ + return !atomic_read(&lock.val); +} + +/** + * queued_spin_is_contended - check if the lock is contended + * @lock : Pointer to queued spinlock structure + * Return: 1 if lock contended, 0 otherwise + */ +static __always_inline int queued_spin_is_contended(struct qspinlock *lock) +{ + return atomic_read(&lock->val) & ~_Q_LOCKED_MASK; +} +/** + * queued_spin_trylock - try to acquire the queued spinlock + * @lock : Pointer to queued spinlock structure + * Return: 1 if lock acquired, 0 if failed + */ +static __always_inline int queued_spin_trylock(struct qspinlock *lock) +{ + if (!atomic_read(&lock->val) && + (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0)) + return 1; + return 0; +} + +extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); + +/** + * queued_spin_lock - acquire a queued spinlock + * @lock: Pointer to queued spinlock structure + */ +static __always_inline void queued_spin_lock(struct qspinlock *lock) +{ + u32 val; + + val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL); + if (likely(val == 0)) + return; + queued_spin_lock_slowpath(lock, val); +} + +#ifndef queued_spin_unlock +/** + * queued_spin_unlock - release a queued spinlock + * @lock : Pointer to queued spinlock structure + */ +static __always_inline void queued_spin_unlock(struct qspinlock *lock) +{ + /* + * unlock() needs release semantics: + */ + smp_store_release(&lock->locked, 0); +} +#endif + +#ifndef virt_spin_lock +static __always_inline bool virt_spin_lock(struct qspinlock *lock) +{ + return false; +} +#endif + +/* + * Remapping spinlock architecture specific functions to the corresponding + * queued spinlock functions. + */ +#define arch_spin_is_locked(l) queued_spin_is_locked(l) +#define arch_spin_is_contended(l) queued_spin_is_contended(l) +#define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l) +#define arch_spin_lock(l) queued_spin_lock(l) +#define arch_spin_trylock(l) queued_spin_trylock(l) +#define arch_spin_unlock(l) queued_spin_unlock(l) + +#endif /* __ASM_GENERIC_QSPINLOCK_H */ diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h new file mode 100644 index 000000000..d10f1e7d6 --- /dev/null +++ b/include/asm-generic/qspinlock_types.h @@ -0,0 +1,112 @@ +/* + * Queued spinlock + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. + * + * Authors: Waiman Long + */ +#ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H +#define __ASM_GENERIC_QSPINLOCK_TYPES_H + +/* + * Including atomic.h with PARAVIRT on will cause compilation errors because + * of recursive header file incluson via paravirt_types.h. So don't include + * it if PARAVIRT is on. + */ +#ifndef CONFIG_PARAVIRT +#include +#include +#endif + +typedef struct qspinlock { + union { + atomic_t val; + + /* + * By using the whole 2nd least significant byte for the + * pending bit, we can allow better optimization of the lock + * acquisition for the pending bit holder. + */ +#ifdef __LITTLE_ENDIAN + struct { + u8 locked; + u8 pending; + }; + struct { + u16 locked_pending; + u16 tail; + }; +#else + struct { + u16 tail; + u16 locked_pending; + }; + struct { + u8 reserved[2]; + u8 pending; + u8 locked; + }; +#endif + }; +} arch_spinlock_t; + +/* + * Initializier + */ +#define __ARCH_SPIN_LOCK_UNLOCKED { { .val = ATOMIC_INIT(0) } } + +/* + * Bitfields in the atomic value: + * + * When NR_CPUS < 16K + * 0- 7: locked byte + * 8: pending + * 9-15: not used + * 16-17: tail index + * 18-31: tail cpu (+1) + * + * When NR_CPUS >= 16K + * 0- 7: locked byte + * 8: pending + * 9-10: tail index + * 11-31: tail cpu (+1) + */ +#define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\ + << _Q_ ## type ## _OFFSET) +#define _Q_LOCKED_OFFSET 0 +#define _Q_LOCKED_BITS 8 +#define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED) + +#define _Q_PENDING_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS) +#if CONFIG_NR_CPUS < (1U << 14) +#define _Q_PENDING_BITS 8 +#else +#define _Q_PENDING_BITS 1 +#endif +#define _Q_PENDING_MASK _Q_SET_MASK(PENDING) + +#define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS) +#define _Q_TAIL_IDX_BITS 2 +#define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX) + +#define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS) +#define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET) +#define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) + +#define _Q_TAIL_OFFSET _Q_TAIL_IDX_OFFSET +#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK) + +#define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) +#define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET) + +#endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */ diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h new file mode 100644 index 000000000..8874f681b --- /dev/null +++ b/include/asm-generic/resource.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_RESOURCE_H +#define _ASM_GENERIC_RESOURCE_H + +#include + + +/* + * boot-time rlimit defaults for the init task: + */ +#define INIT_RLIMITS \ +{ \ + [RLIMIT_CPU] = { RLIM_INFINITY, RLIM_INFINITY }, \ + [RLIMIT_FSIZE] = { RLIM_INFINITY, RLIM_INFINITY }, \ + [RLIMIT_DATA] = { RLIM_INFINITY, RLIM_INFINITY }, \ + [RLIMIT_STACK] = { _STK_LIM, RLIM_INFINITY }, \ + [RLIMIT_CORE] = { 0, RLIM_INFINITY }, \ + [RLIMIT_RSS] = { RLIM_INFINITY, RLIM_INFINITY }, \ + [RLIMIT_NPROC] = { 0, 0 }, \ + [RLIMIT_NOFILE] = { INR_OPEN_CUR, INR_OPEN_MAX }, \ + [RLIMIT_MEMLOCK] = { MLOCK_LIMIT, MLOCK_LIMIT }, \ + [RLIMIT_AS] = { RLIM_INFINITY, RLIM_INFINITY }, \ + [RLIMIT_LOCKS] = { RLIM_INFINITY, RLIM_INFINITY }, \ + [RLIMIT_SIGPENDING] = { 0, 0 }, \ + [RLIMIT_MSGQUEUE] = { MQ_BYTES_MAX, MQ_BYTES_MAX }, \ + [RLIMIT_NICE] = { 0, 0 }, \ + [RLIMIT_RTPRIO] = { 0, 0 }, \ + [RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \ +} + +#endif diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h new file mode 100644 index 000000000..93e67a055 --- /dev/null +++ b/include/asm-generic/rwsem.h @@ -0,0 +1,140 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_RWSEM_H +#define _ASM_GENERIC_RWSEM_H + +#ifndef _LINUX_RWSEM_H +#error "Please don't include directly, use instead." +#endif + +#ifdef __KERNEL__ + +/* + * R/W semaphores originally for PPC using the stuff in lib/rwsem.c. + * Adapted largely from include/asm-i386/rwsem.h + * by Paul Mackerras . + */ + +/* + * the semaphore definition + */ +#ifdef CONFIG_64BIT +# define RWSEM_ACTIVE_MASK 0xffffffffL +#else +# define RWSEM_ACTIVE_MASK 0x0000ffffL +#endif + +#define RWSEM_UNLOCKED_VALUE 0x00000000L +#define RWSEM_ACTIVE_BIAS 0x00000001L +#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) +#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS +#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) + +/* + * lock for reading + */ +static inline void __down_read(struct rw_semaphore *sem) +{ + if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) + rwsem_down_read_failed(sem); +} + +static inline int __down_read_killable(struct rw_semaphore *sem) +{ + if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) { + if (IS_ERR(rwsem_down_read_failed_killable(sem))) + return -EINTR; + } + + return 0; +} + +static inline int __down_read_trylock(struct rw_semaphore *sem) +{ + long tmp; + + while ((tmp = atomic_long_read(&sem->count)) >= 0) { + if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp, + tmp + RWSEM_ACTIVE_READ_BIAS)) { + return 1; + } + } + return 0; +} + +/* + * lock for writing + */ +static inline void __down_write(struct rw_semaphore *sem) +{ + long tmp; + + tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS, + &sem->count); + if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) + rwsem_down_write_failed(sem); +} + +static inline int __down_write_killable(struct rw_semaphore *sem) +{ + long tmp; + + tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS, + &sem->count); + if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) + if (IS_ERR(rwsem_down_write_failed_killable(sem))) + return -EINTR; + return 0; +} + +static inline int __down_write_trylock(struct rw_semaphore *sem) +{ + long tmp; + + tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE, + RWSEM_ACTIVE_WRITE_BIAS); + return tmp == RWSEM_UNLOCKED_VALUE; +} + +/* + * unlock after reading + */ +static inline void __up_read(struct rw_semaphore *sem) +{ + long tmp; + + tmp = atomic_long_dec_return_release(&sem->count); + if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)) + rwsem_wake(sem); +} + +/* + * unlock after writing + */ +static inline void __up_write(struct rw_semaphore *sem) +{ + if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS, + &sem->count) < 0)) + rwsem_wake(sem); +} + +/* + * downgrade write lock to read lock + */ +static inline void __downgrade_write(struct rw_semaphore *sem) +{ + long tmp; + + /* + * When downgrading from exclusive to shared ownership, + * anything inside the write-locked region cannot leak + * into the read side. In contrast, anything in the + * read-locked region is ok to be re-ordered into the + * write side. As such, rely on RELEASE semantics. + */ + tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, &sem->count); + if (tmp < 0) + rwsem_downgrade_wake(sem); +} + +#endif /* __KERNEL__ */ +#endif /* _ASM_GENERIC_RWSEM_H */ diff --git a/include/asm-generic/seccomp.h b/include/asm-generic/seccomp.h new file mode 100644 index 000000000..e74072d23 --- /dev/null +++ b/include/asm-generic/seccomp.h @@ -0,0 +1,46 @@ +/* + * include/asm-generic/seccomp.h + * + * Copyright (C) 2014 Linaro Limited + * Author: AKASHI Takahiro + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASM_GENERIC_SECCOMP_H +#define _ASM_GENERIC_SECCOMP_H + +#include + +#if defined(CONFIG_COMPAT) && !defined(__NR_seccomp_read_32) +#define __NR_seccomp_read_32 __NR_read +#define __NR_seccomp_write_32 __NR_write +#define __NR_seccomp_exit_32 __NR_exit +#ifndef __NR_seccomp_sigreturn_32 +#define __NR_seccomp_sigreturn_32 __NR_rt_sigreturn +#endif +#endif /* CONFIG_COMPAT && ! already defined */ + +#define __NR_seccomp_read __NR_read +#define __NR_seccomp_write __NR_write +#define __NR_seccomp_exit __NR_exit +#ifndef __NR_seccomp_sigreturn +#define __NR_seccomp_sigreturn __NR_rt_sigreturn +#endif + +#ifdef CONFIG_COMPAT +#ifndef get_compat_mode1_syscalls +static inline const int *get_compat_mode1_syscalls(void) +{ + static const int mode1_syscalls_32[] = { + __NR_seccomp_read_32, __NR_seccomp_write_32, + __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32, + 0, /* null terminated */ + }; + return mode1_syscalls_32; +} +#endif +#endif /* CONFIG_COMPAT */ + +#endif /* _ASM_GENERIC_SECCOMP_H */ diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h new file mode 100644 index 000000000..ea5987bb0 --- /dev/null +++ b/include/asm-generic/sections.h @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_SECTIONS_H_ +#define _ASM_GENERIC_SECTIONS_H_ + +/* References to section boundaries */ + +#include +#include + +/* + * Usage guidelines: + * _text, _data: architecture specific, don't use them in arch-independent code + * [_stext, _etext]: contains .text.* sections, may also contain .rodata.* + * and/or .init.* sections + * [_sdata, _edata]: contains .data.* sections, may also contain .rodata.* + * and/or .init.* sections. + * [__start_rodata, __end_rodata]: contains .rodata.* sections + * [__start_ro_after_init, __end_ro_after_init]: + * contains .data..ro_after_init section + * [__init_begin, __init_end]: contains .init.* sections, but .init.text.* + * may be out of this range on some architectures. + * [_sinittext, _einittext]: contains .init.text.* sections + * [__bss_start, __bss_stop]: contains BSS sections + * + * Following global variables are optional and may be unavailable on some + * architectures and/or kernel configurations. + * _text, _data + * __kprobes_text_start, __kprobes_text_end + * __entry_text_start, __entry_text_end + * __ctors_start, __ctors_end + * __irqentry_text_start, __irqentry_text_end + * __softirqentry_text_start, __softirqentry_text_end + * __start_opd, __end_opd + */ +extern char _text[], _stext[], _etext[]; +extern char _data[], _sdata[], _edata[]; +extern char __bss_start[], __bss_stop[]; +extern char __init_begin[], __init_end[]; +extern char _sinittext[], _einittext[]; +extern char __start_ro_after_init[], __end_ro_after_init[]; +extern char _end[]; +extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[]; +extern char __kprobes_text_start[], __kprobes_text_end[]; +extern char __entry_text_start[], __entry_text_end[]; +extern char __start_rodata[], __end_rodata[]; +extern char __irqentry_text_start[], __irqentry_text_end[]; +extern char __softirqentry_text_start[], __softirqentry_text_end[]; +extern char __start_once[], __end_once[]; + +/* Start and end of .ctors section - used for constructor calls. */ +extern char __ctors_start[], __ctors_end[]; + +/* Start and end of .opd section - used for function descriptors. */ +extern char __start_opd[], __end_opd[]; + +/* Start and end of instrumentation protected text section */ +extern char __noinstr_text_start[], __noinstr_text_end[]; + +extern __visible const void __nosave_begin, __nosave_end; + +/* Function descriptor handling (if any). Override in asm/sections.h */ +#ifndef dereference_function_descriptor +#define dereference_function_descriptor(p) (p) +#define dereference_kernel_function_descriptor(p) (p) +#endif + +/* random extra sections (if any). Override + * in asm/sections.h */ +#ifndef arch_is_kernel_text +static inline int arch_is_kernel_text(unsigned long addr) +{ + return 0; +} +#endif + +#ifndef arch_is_kernel_data +static inline int arch_is_kernel_data(unsigned long addr) +{ + return 0; +} +#endif + +/** + * memory_contains - checks if an object is contained within a memory region + * @begin: virtual address of the beginning of the memory region + * @end: virtual address of the end of the memory region + * @virt: virtual address of the memory object + * @size: size of the memory object + * + * Returns: true if the object specified by @virt and @size is entirely + * contained within the memory region defined by @begin and @end, false + * otherwise. + */ +static inline bool memory_contains(void *begin, void *end, void *virt, + size_t size) +{ + return virt >= begin && virt + size <= end; +} + +/** + * memory_intersects - checks if the region occupied by an object intersects + * with another memory region + * @begin: virtual address of the beginning of the memory regien + * @end: virtual address of the end of the memory region + * @virt: virtual address of the memory object + * @size: size of the memory object + * + * Returns: true if an object's memory region, specified by @virt and @size, + * intersects with the region specified by @begin and @end, false otherwise. + */ +static inline bool memory_intersects(void *begin, void *end, void *virt, + size_t size) +{ + void *vend = virt + size; + + return (virt >= begin && virt < end) || (vend >= begin && vend < end); +} + +/** + * init_section_contains - checks if an object is contained within the init + * section + * @virt: virtual address of the memory object + * @size: size of the memory object + * + * Returns: true if the object specified by @virt and @size is entirely + * contained within the init section, false otherwise. + */ +static inline bool init_section_contains(void *virt, size_t size) +{ + return memory_contains(__init_begin, __init_end, virt, size); +} + +/** + * init_section_intersects - checks if the region occupied by an object + * intersects with the init section + * @virt: virtual address of the memory object + * @size: size of the memory object + * + * Returns: true if an object's memory region, specified by @virt and @size, + * intersects with the init section, false otherwise. + */ +static inline bool init_section_intersects(void *virt, size_t size) +{ + return memory_intersects(__init_begin, __init_end, virt, size); +} + +#endif /* _ASM_GENERIC_SECTIONS_H_ */ diff --git a/include/asm-generic/segment.h b/include/asm-generic/segment.h new file mode 100644 index 000000000..5580eace6 --- /dev/null +++ b/include/asm-generic/segment.h @@ -0,0 +1,9 @@ +#ifndef __ASM_GENERIC_SEGMENT_H +#define __ASM_GENERIC_SEGMENT_H +/* + * Only here because we have some old header files that expect it... + * + * New architectures probably don't want to have their own version. + */ + +#endif /* __ASM_GENERIC_SEGMENT_H */ diff --git a/include/asm-generic/serial.h b/include/asm-generic/serial.h new file mode 100644 index 000000000..ca9f7b6be --- /dev/null +++ b/include/asm-generic/serial.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_SERIAL_H +#define __ASM_GENERIC_SERIAL_H + +/* + * This should not be an architecture specific #define, oh well. + * + * Traditionally, it just describes i8250 and related serial ports + * that have this clock rate. + */ + +#define BASE_BAUD (1843200 / 16) + +#endif /* __ASM_GENERIC_SERIAL_H */ diff --git a/include/asm-generic/set_memory.h b/include/asm-generic/set_memory.h new file mode 100644 index 000000000..c86abf6bc --- /dev/null +++ b/include/asm-generic/set_memory.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_SET_MEMORY_H +#define __ASM_SET_MEMORY_H + +/* + * Functions to change memory attributes. + */ +int set_memory_ro(unsigned long addr, int numpages); +int set_memory_rw(unsigned long addr, int numpages); +int set_memory_x(unsigned long addr, int numpages); +int set_memory_nx(unsigned long addr, int numpages); + +#endif diff --git a/include/asm-generic/signal.h b/include/asm-generic/signal.h new file mode 100644 index 000000000..c53984fa9 --- /dev/null +++ b/include/asm-generic/signal.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_SIGNAL_H +#define __ASM_GENERIC_SIGNAL_H + +#include + +#ifndef __ASSEMBLY__ +#ifdef SA_RESTORER +#endif + +#include +#undef __HAVE_ARCH_SIG_BITOPS + +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_GENERIC_SIGNAL_H */ diff --git a/include/asm-generic/simd.h b/include/asm-generic/simd.h new file mode 100644 index 000000000..d0343d58a --- /dev/null +++ b/include/asm-generic/simd.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include + +/* + * may_use_simd - whether it is allowable at this time to issue SIMD + * instructions or access the SIMD register file + * + * As architectures typically don't preserve the SIMD register file when + * taking an interrupt, !in_interrupt() should be a reasonable default. + */ +static __must_check inline bool may_use_simd(void) +{ + return !in_interrupt(); +} diff --git a/include/asm-generic/sizes.h b/include/asm-generic/sizes.h new file mode 100644 index 000000000..1dcfad962 --- /dev/null +++ b/include/asm-generic/sizes.h @@ -0,0 +1,2 @@ +/* This is a placeholder, to be removed over time */ +#include diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h new file mode 100644 index 000000000..adaf6acab --- /dev/null +++ b/include/asm-generic/spinlock.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_SPINLOCK_H +#define __ASM_GENERIC_SPINLOCK_H +/* + * You need to implement asm/spinlock.h for SMP support. The generic + * version does not handle SMP. + */ +#ifdef CONFIG_SMP +#error need an architecture specific asm/spinlock.h +#endif + +#endif /* __ASM_GENERIC_SPINLOCK_H */ diff --git a/include/asm-generic/statfs.h b/include/asm-generic/statfs.h new file mode 100644 index 000000000..f88dcd8ed --- /dev/null +++ b/include/asm-generic/statfs.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _GENERIC_STATFS_H +#define _GENERIC_STATFS_H + +#include + +typedef __kernel_fsid_t fsid_t; +#endif diff --git a/include/asm-generic/string.h b/include/asm-generic/string.h new file mode 100644 index 000000000..de5e02014 --- /dev/null +++ b/include/asm-generic/string.h @@ -0,0 +1,10 @@ +#ifndef __ASM_GENERIC_STRING_H +#define __ASM_GENERIC_STRING_H +/* + * The kernel provides all required functions in lib/string.c + * + * Architectures probably want to provide at least their own optimized + * memcpy and memset functions though. + */ + +#endif /* __ASM_GENERIC_STRING_H */ diff --git a/include/asm-generic/switch_to.h b/include/asm-generic/switch_to.h new file mode 100644 index 000000000..986acc9d3 --- /dev/null +++ b/include/asm-generic/switch_to.h @@ -0,0 +1,30 @@ +/* Generic task switch macro wrapper. + * + * It should be possible to use these on really simple architectures, + * but it serves more as a starting point for new ports. + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#ifndef __ASM_GENERIC_SWITCH_TO_H +#define __ASM_GENERIC_SWITCH_TO_H + +#include + +/* + * Context switching is now performed out-of-line in switch_to.S + */ +extern struct task_struct *__switch_to(struct task_struct *, + struct task_struct *); + +#define switch_to(prev, next, last) \ + do { \ + ((last) = __switch_to((prev), (next))); \ + } while (0) + +#endif /* __ASM_GENERIC_SWITCH_TO_H */ diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h new file mode 100644 index 000000000..0c938a435 --- /dev/null +++ b/include/asm-generic/syscall.h @@ -0,0 +1,157 @@ +/* + * Access to user system call parameters and results + * + * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved. + * + * This copyrighted material is made available to anyone wishing to use, + * modify, copy, or redistribute it subject to the terms and conditions + * of the GNU General Public License v.2. + * + * This file is a stub providing documentation for what functions + * asm-ARCH/syscall.h files need to define. Most arch definitions + * will be simple inlines. + * + * All of these functions expect to be called with no locks, + * and only when the caller is sure that the task of interest + * cannot return to user mode while we are looking at it. + */ + +#ifndef _ASM_SYSCALL_H +#define _ASM_SYSCALL_H 1 + +struct task_struct; +struct pt_regs; + +/** + * syscall_get_nr - find what system call a task is executing + * @task: task of interest, must be blocked + * @regs: task_pt_regs() of @task + * + * If @task is executing a system call or is at system call + * tracing about to attempt one, returns the system call number. + * If @task is not executing a system call, i.e. it's blocked + * inside the kernel for a fault or signal, returns -1. + * + * Note this returns int even on 64-bit machines. Only 32 bits of + * system call number can be meaningful. If the actual arch value + * is 64 bits, this truncates to 32 bits so 0xffffffff means -1. + * + * It's only valid to call this when @task is known to be blocked. + */ +int syscall_get_nr(struct task_struct *task, struct pt_regs *regs); + +/** + * syscall_rollback - roll back registers after an aborted system call + * @task: task of interest, must be in system call exit tracing + * @regs: task_pt_regs() of @task + * + * It's only valid to call this when @task is stopped for system + * call exit tracing (due to TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT), + * after tracehook_report_syscall_entry() returned nonzero to prevent + * the system call from taking place. + * + * This rolls back the register state in @regs so it's as if the + * system call instruction was a no-op. The registers containing + * the system call number and arguments are as they were before the + * system call instruction. This may not be the same as what the + * register state looked like at system call entry tracing. + */ +void syscall_rollback(struct task_struct *task, struct pt_regs *regs); + +/** + * syscall_get_error - check result of traced system call + * @task: task of interest, must be blocked + * @regs: task_pt_regs() of @task + * + * Returns 0 if the system call succeeded, or -ERRORCODE if it failed. + * + * It's only valid to call this when @task is stopped for tracing on exit + * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. + */ +long syscall_get_error(struct task_struct *task, struct pt_regs *regs); + +/** + * syscall_get_return_value - get the return value of a traced system call + * @task: task of interest, must be blocked + * @regs: task_pt_regs() of @task + * + * Returns the return value of the successful system call. + * This value is meaningless if syscall_get_error() returned nonzero. + * + * It's only valid to call this when @task is stopped for tracing on exit + * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. + */ +long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs); + +/** + * syscall_set_return_value - change the return value of a traced system call + * @task: task of interest, must be blocked + * @regs: task_pt_regs() of @task + * @error: negative error code, or zero to indicate success + * @val: user return value if @error is zero + * + * This changes the results of the system call that user mode will see. + * If @error is zero, the user sees a successful system call with a + * return value of @val. If @error is nonzero, it's a negated errno + * code; the user sees a failed system call with this errno code. + * + * It's only valid to call this when @task is stopped for tracing on exit + * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. + */ +void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, + int error, long val); + +/** + * syscall_get_arguments - extract system call parameter values + * @task: task of interest, must be blocked + * @regs: task_pt_regs() of @task + * @i: argument index [0,5] + * @n: number of arguments; n+i must be [1,6]. + * @args: array filled with argument values + * + * Fetches @n arguments to the system call starting with the @i'th argument + * (from 0 through 5). Argument @i is stored in @args[0], and so on. + * An arch inline version is probably optimal when @i and @n are constants. + * + * It's only valid to call this when @task is stopped for tracing on + * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. + * It's invalid to call this with @i + @n > 6; we only support system calls + * taking up to 6 arguments. + */ +void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, + unsigned int i, unsigned int n, unsigned long *args); + +/** + * syscall_set_arguments - change system call parameter value + * @task: task of interest, must be in system call entry tracing + * @regs: task_pt_regs() of @task + * @i: argument index [0,5] + * @n: number of arguments; n+i must be [1,6]. + * @args: array of argument values to store + * + * Changes @n arguments to the system call starting with the @i'th argument. + * Argument @i gets value @args[0], and so on. + * An arch inline version is probably optimal when @i and @n are constants. + * + * It's only valid to call this when @task is stopped for tracing on + * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. + * It's invalid to call this with @i + @n > 6; we only support system calls + * taking up to 6 arguments. + */ +void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, + unsigned int i, unsigned int n, + const unsigned long *args); + +/** + * syscall_get_arch - return the AUDIT_ARCH for the current system call + * + * Returns the AUDIT_ARCH_* based on the system call convention in use. + * + * It's only valid to call this when current is stopped on entry to a system + * call, due to %TIF_SYSCALL_TRACE, %TIF_SYSCALL_AUDIT, or %TIF_SECCOMP. + * + * Architectures which permit CONFIG_HAVE_ARCH_SECCOMP_FILTER must + * provide an implementation of this. + */ +int syscall_get_arch(void); +#endif /* _ASM_SYSCALL_H */ diff --git a/include/asm-generic/syscalls.h b/include/asm-generic/syscalls.h new file mode 100644 index 000000000..933ca6581 --- /dev/null +++ b/include/asm-generic/syscalls.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_SYSCALLS_H +#define __ASM_GENERIC_SYSCALLS_H + +#include +#include + +/* + * Calling conventions for these system calls can differ, so + * it's possible to override them. + */ + +#ifndef sys_mmap2 +asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff); +#endif + +#ifndef sys_mmap +asmlinkage long sys_mmap(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, off_t pgoff); +#endif + +#ifndef sys_rt_sigreturn +asmlinkage long sys_rt_sigreturn(struct pt_regs *regs); +#endif + +#endif /* __ASM_GENERIC_SYSCALLS_H */ diff --git a/include/asm-generic/termios-base.h b/include/asm-generic/termios-base.h new file mode 100644 index 000000000..59c5a3bd4 --- /dev/null +++ b/include/asm-generic/termios-base.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* termios.h: generic termios/termio user copying/translation + */ + +#ifndef _ASM_GENERIC_TERMIOS_BASE_H +#define _ASM_GENERIC_TERMIOS_BASE_H + +#include + +#ifndef __ARCH_TERMIO_GETPUT + +/* + * Translate a "termio" structure into a "termios". Ugh. + */ +static inline int user_termio_to_kernel_termios(struct ktermios *termios, + struct termio __user *termio) +{ + unsigned short tmp; + + if (get_user(tmp, &termio->c_iflag) < 0) + goto fault; + termios->c_iflag = (0xffff0000 & termios->c_iflag) | tmp; + + if (get_user(tmp, &termio->c_oflag) < 0) + goto fault; + termios->c_oflag = (0xffff0000 & termios->c_oflag) | tmp; + + if (get_user(tmp, &termio->c_cflag) < 0) + goto fault; + termios->c_cflag = (0xffff0000 & termios->c_cflag) | tmp; + + if (get_user(tmp, &termio->c_lflag) < 0) + goto fault; + termios->c_lflag = (0xffff0000 & termios->c_lflag) | tmp; + + if (get_user(termios->c_line, &termio->c_line) < 0) + goto fault; + + if (copy_from_user(termios->c_cc, termio->c_cc, NCC) != 0) + goto fault; + + return 0; + + fault: + return -EFAULT; +} + +/* + * Translate a "termios" structure into a "termio". Ugh. + */ +static inline int kernel_termios_to_user_termio(struct termio __user *termio, + struct ktermios *termios) +{ + if (put_user(termios->c_iflag, &termio->c_iflag) < 0 || + put_user(termios->c_oflag, &termio->c_oflag) < 0 || + put_user(termios->c_cflag, &termio->c_cflag) < 0 || + put_user(termios->c_lflag, &termio->c_lflag) < 0 || + put_user(termios->c_line, &termio->c_line) < 0 || + copy_to_user(termio->c_cc, termios->c_cc, NCC) != 0) + return -EFAULT; + + return 0; +} + +#ifndef user_termios_to_kernel_termios +#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) +#endif + +#ifndef kernel_termios_to_user_termios +#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) +#endif + +#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios)) +#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios)) + +#endif /* __ARCH_TERMIO_GETPUT */ + +#endif /* _ASM_GENERIC_TERMIOS_BASE_H */ diff --git a/include/asm-generic/termios.h b/include/asm-generic/termios.h new file mode 100644 index 000000000..b1398d0d4 --- /dev/null +++ b/include/asm-generic/termios.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_TERMIOS_H +#define _ASM_GENERIC_TERMIOS_H + + +#include +#include + +/* intr=^C quit=^\ erase=del kill=^U + eof=^D vtime=\0 vmin=\1 sxtc=\0 + start=^Q stop=^S susp=^Z eol=\0 + reprint=^R discard=^U werase=^W lnext=^V + eol2=\0 +*/ +#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" + +/* + * Translate a "termio" structure into a "termios". Ugh. + */ +static inline int user_termio_to_kernel_termios(struct ktermios *termios, + const struct termio __user *termio) +{ + unsigned short tmp; + + if (get_user(tmp, &termio->c_iflag) < 0) + goto fault; + termios->c_iflag = (0xffff0000 & termios->c_iflag) | tmp; + + if (get_user(tmp, &termio->c_oflag) < 0) + goto fault; + termios->c_oflag = (0xffff0000 & termios->c_oflag) | tmp; + + if (get_user(tmp, &termio->c_cflag) < 0) + goto fault; + termios->c_cflag = (0xffff0000 & termios->c_cflag) | tmp; + + if (get_user(tmp, &termio->c_lflag) < 0) + goto fault; + termios->c_lflag = (0xffff0000 & termios->c_lflag) | tmp; + + if (get_user(termios->c_line, &termio->c_line) < 0) + goto fault; + + if (copy_from_user(termios->c_cc, termio->c_cc, NCC) != 0) + goto fault; + + return 0; + + fault: + return -EFAULT; +} + +/* + * Translate a "termios" structure into a "termio". Ugh. + */ +static inline int kernel_termios_to_user_termio(struct termio __user *termio, + struct ktermios *termios) +{ + if (put_user(termios->c_iflag, &termio->c_iflag) < 0 || + put_user(termios->c_oflag, &termio->c_oflag) < 0 || + put_user(termios->c_cflag, &termio->c_cflag) < 0 || + put_user(termios->c_lflag, &termio->c_lflag) < 0 || + put_user(termios->c_line, &termio->c_line) < 0 || + copy_to_user(termio->c_cc, termios->c_cc, NCC) != 0) + return -EFAULT; + + return 0; +} + +#ifdef TCGETS2 +static inline int user_termios_to_kernel_termios(struct ktermios *k, + struct termios2 __user *u) +{ + return copy_from_user(k, u, sizeof(struct termios2)); +} + +static inline int kernel_termios_to_user_termios(struct termios2 __user *u, + struct ktermios *k) +{ + return copy_to_user(u, k, sizeof(struct termios2)); +} + +static inline int user_termios_to_kernel_termios_1(struct ktermios *k, + struct termios __user *u) +{ + return copy_from_user(k, u, sizeof(struct termios)); +} + +static inline int kernel_termios_to_user_termios_1(struct termios __user *u, + struct ktermios *k) +{ + return copy_to_user(u, k, sizeof(struct termios)); +} +#else /* TCGETS2 */ +static inline int user_termios_to_kernel_termios(struct ktermios *k, + struct termios __user *u) +{ + return copy_from_user(k, u, sizeof(struct termios)); +} + +static inline int kernel_termios_to_user_termios(struct termios __user *u, + struct ktermios *k) +{ + return copy_to_user(u, k, sizeof(struct termios)); +} +#endif /* TCGETS2 */ + +#endif /* _ASM_GENERIC_TERMIOS_H */ diff --git a/include/asm-generic/timex.h b/include/asm-generic/timex.h new file mode 100644 index 000000000..50ba9b5ce --- /dev/null +++ b/include/asm-generic/timex.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_TIMEX_H +#define __ASM_GENERIC_TIMEX_H + +/* + * If you have a cycle counter, return the value here. + */ +typedef unsigned long cycles_t; +#ifndef get_cycles +static inline cycles_t get_cycles(void) +{ + return 0; +} +#endif + +/* + * Architectures are encouraged to implement read_current_timer + * and define this in order to avoid the expensive delay loop + * calibration during boot. + */ +#undef ARCH_HAS_READ_CURRENT_TIMER + +#endif /* __ASM_GENERIC_TIMEX_H */ diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h new file mode 100644 index 000000000..db72ad398 --- /dev/null +++ b/include/asm-generic/tlb.h @@ -0,0 +1,317 @@ +/* include/asm-generic/tlb.h + * + * Generic TLB shootdown code + * + * Copyright 2001 Red Hat, Inc. + * Based on code from mm/memory.c Copyright Linus Torvalds and others. + * + * Copyright 2011 Red Hat, Inc., Peter Zijlstra + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _ASM_GENERIC__TLB_H +#define _ASM_GENERIC__TLB_H + +#include +#include +#include +#include + +#ifdef CONFIG_HAVE_RCU_TABLE_FREE +/* + * Semi RCU freeing of the page directories. + * + * This is needed by some architectures to implement software pagetable walkers. + * + * gup_fast() and other software pagetable walkers do a lockless page-table + * walk and therefore needs some synchronization with the freeing of the page + * directories. The chosen means to accomplish that is by disabling IRQs over + * the walk. + * + * Architectures that use IPIs to flush TLBs will then automagically DTRT, + * since we unlink the page, flush TLBs, free the page. Since the disabling of + * IRQs delays the completion of the TLB flush we can never observe an already + * freed page. + * + * Architectures that do not have this (PPC) need to delay the freeing by some + * other means, this is that means. + * + * What we do is batch the freed directory pages (tables) and RCU free them. + * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling + * holds off grace periods. + * + * However, in order to batch these pages we need to allocate storage, this + * allocation is deep inside the MM code and can thus easily fail on memory + * pressure. To guarantee progress we fall back to single table freeing, see + * the implementation of tlb_remove_table_one(). + * + */ +struct mmu_table_batch { + struct rcu_head rcu; + unsigned int nr; + void *tables[0]; +}; + +#define MAX_TABLE_BATCH \ + ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) + +extern void tlb_table_flush(struct mmu_gather *tlb); +extern void tlb_remove_table(struct mmu_gather *tlb, void *table); + +#endif + +/* + * If we can't allocate a page to make a big batch of page pointers + * to work on, then just handle a few from the on-stack structure. + */ +#define MMU_GATHER_BUNDLE 8 + +struct mmu_gather_batch { + struct mmu_gather_batch *next; + unsigned int nr; + unsigned int max; + struct page *pages[0]; +}; + +#define MAX_GATHER_BATCH \ + ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *)) + +/* + * Limit the maximum number of mmu_gather batches to reduce a risk of soft + * lockups for non-preemptible kernels on huge machines when a lot of memory + * is zapped during unmapping. + * 10K pages freed at once should be safe even without a preemption point. + */ +#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) + +/* struct mmu_gather is an opaque type used by the mm code for passing around + * any data needed by arch specific code for tlb_remove_page. + */ +struct mmu_gather { + struct mm_struct *mm; +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + struct mmu_table_batch *batch; +#endif + unsigned long start; + unsigned long end; + /* we are in the middle of an operation to clear + * a full mm and can make some optimizations */ + unsigned int fullmm : 1, + /* we have performed an operation which + * requires a complete flush of the tlb */ + need_flush_all : 1; + + struct mmu_gather_batch *active; + struct mmu_gather_batch local; + struct page *__pages[MMU_GATHER_BUNDLE]; + unsigned int batch_count; + int page_size; +}; + +#define HAVE_GENERIC_MMU_GATHER + +void arch_tlb_gather_mmu(struct mmu_gather *tlb, + struct mm_struct *mm, unsigned long start, unsigned long end); +void tlb_flush_mmu(struct mmu_gather *tlb); +void arch_tlb_finish_mmu(struct mmu_gather *tlb, + unsigned long start, unsigned long end, bool force); +void tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address, + unsigned long size); +extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, + int page_size); + +static inline void __tlb_adjust_range(struct mmu_gather *tlb, + unsigned long address, + unsigned int range_size) +{ + tlb->start = min(tlb->start, address); + tlb->end = max(tlb->end, address + range_size); +} + +static inline void __tlb_reset_range(struct mmu_gather *tlb) +{ + if (tlb->fullmm) { + tlb->start = tlb->end = ~0; + } else { + tlb->start = TASK_SIZE; + tlb->end = 0; + } +} + +static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) +{ + if (!tlb->end) + return; + + tlb_flush(tlb); + mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); + __tlb_reset_range(tlb); +} + +static inline void tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + if (__tlb_remove_page_size(tlb, page, page_size)) + tlb_flush_mmu(tlb); +} + +static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) +{ + return __tlb_remove_page_size(tlb, page, PAGE_SIZE); +} + +/* tlb_remove_page + * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when + * required. + */ +static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) +{ + return tlb_remove_page_size(tlb, page, PAGE_SIZE); +} + +#ifndef tlb_remove_check_page_size_change +#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change +static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, + unsigned int page_size) +{ + /* + * We don't care about page size change, just update + * mmu_gather page size here so that debug checks + * doesn't throw false warning. + */ +#ifdef CONFIG_DEBUG_VM + tlb->page_size = page_size; +#endif +} +#endif + +/* + * In the case of tlb vma handling, we can optimise these away in the + * case where we're doing a full MM flush. When we're doing a munmap, + * the vmas are adjusted to only cover the region to be torn down. + */ +#ifndef tlb_start_vma +#define tlb_start_vma(tlb, vma) do { } while (0) +#endif + +#define __tlb_end_vma(tlb, vma) \ + do { \ + if (!tlb->fullmm) \ + tlb_flush_mmu_tlbonly(tlb); \ + } while (0) + +#ifndef tlb_end_vma +#define tlb_end_vma __tlb_end_vma +#endif + +#ifndef __tlb_remove_tlb_entry +#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) +#endif + +/** + * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. + * + * Record the fact that pte's were really unmapped by updating the range, + * so we can later optimise away the tlb invalidate. This helps when + * userspace is unmapping already-unmapped pages, which happens quite a lot. + */ +#define tlb_remove_tlb_entry(tlb, ptep, address) \ + do { \ + __tlb_adjust_range(tlb, address, PAGE_SIZE); \ + __tlb_remove_tlb_entry(tlb, ptep, address); \ + } while (0) + +#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ + do { \ + __tlb_adjust_range(tlb, address, huge_page_size(h)); \ + __tlb_remove_tlb_entry(tlb, ptep, address); \ + } while (0) + +/** + * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation + * This is a nop so far, because only x86 needs it. + */ +#ifndef __tlb_remove_pmd_tlb_entry +#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0) +#endif + +#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \ + do { \ + __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \ + __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \ + } while (0) + +/** + * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb + * invalidation. This is a nop so far, because only x86 needs it. + */ +#ifndef __tlb_remove_pud_tlb_entry +#define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0) +#endif + +#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \ + do { \ + __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \ + __tlb_remove_pud_tlb_entry(tlb, pudp, address); \ + } while (0) + +/* + * For things like page tables caches (ie caching addresses "inside" the + * page tables, like x86 does), for legacy reasons, flushing an + * individual page had better flush the page table caches behind it. This + * is definitely how x86 works, for example. And if you have an + * architected non-legacy page table cache (which I'm not aware of + * anybody actually doing), you're going to have some architecturally + * explicit flushing for that, likely *separate* from a regular TLB entry + * flush, and thus you'd need more than just some range expansion.. + * + * So if we ever find an architecture + * that would want something that odd, I think it is up to that + * architecture to do its own odd thing, not cause pain for others + * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com + * + * For now w.r.t page table cache, mark the range_size as PAGE_SIZE + */ + +#ifndef pte_free_tlb +#define pte_free_tlb(tlb, ptep, address) \ + do { \ + __tlb_adjust_range(tlb, address, PAGE_SIZE); \ + __pte_free_tlb(tlb, ptep, address); \ + } while (0) +#endif + +#ifndef pmd_free_tlb +#define pmd_free_tlb(tlb, pmdp, address) \ + do { \ + __tlb_adjust_range(tlb, address, PAGE_SIZE); \ + __pmd_free_tlb(tlb, pmdp, address); \ + } while (0) +#endif + +#ifndef __ARCH_HAS_4LEVEL_HACK +#ifndef pud_free_tlb +#define pud_free_tlb(tlb, pudp, address) \ + do { \ + __tlb_adjust_range(tlb, address, PAGE_SIZE); \ + __pud_free_tlb(tlb, pudp, address); \ + } while (0) +#endif +#endif + +#ifndef __ARCH_HAS_5LEVEL_HACK +#ifndef p4d_free_tlb +#define p4d_free_tlb(tlb, pudp, address) \ + do { \ + __tlb_adjust_range(tlb, address, PAGE_SIZE); \ + __p4d_free_tlb(tlb, pudp, address); \ + } while (0) +#endif +#endif + +#define tlb_migrate_finish(mm) do {} while (0) + +#endif /* _ASM_GENERIC__TLB_H */ diff --git a/include/asm-generic/tlbflush.h b/include/asm-generic/tlbflush.h new file mode 100644 index 000000000..dc2669289 --- /dev/null +++ b/include/asm-generic/tlbflush.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_TLBFLUSH_H +#define __ASM_GENERIC_TLBFLUSH_H +/* + * This is a dummy tlbflush implementation that can be used on all + * nommu architectures. + * If you have an MMU, you need to write your own functions. + */ +#ifdef CONFIG_MMU +#error need to implement an architecture specific asm/tlbflush.h +#endif + +#include + +static inline void flush_tlb_mm(struct mm_struct *mm) +{ + BUG(); +} + + +#endif /* __ASM_GENERIC_TLBFLUSH_H */ diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h new file mode 100644 index 000000000..5aa8705df --- /dev/null +++ b/include/asm-generic/topology.h @@ -0,0 +1,77 @@ +/* + * linux/include/asm-generic/topology.h + * + * Written by: Matthew Dobson, IBM Corporation + * + * Copyright (C) 2002, IBM Corp. + * + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Send feedback to + */ +#ifndef _ASM_GENERIC_TOPOLOGY_H +#define _ASM_GENERIC_TOPOLOGY_H + +#ifndef CONFIG_NUMA + +/* Other architectures wishing to use this simple topology API should fill + in the below functions as appropriate in their own file. */ +#ifndef cpu_to_node +#define cpu_to_node(cpu) ((void)(cpu),0) +#endif +#ifndef set_numa_node +#define set_numa_node(node) +#endif +#ifndef set_cpu_numa_node +#define set_cpu_numa_node(cpu, node) +#endif +#ifndef cpu_to_mem +#define cpu_to_mem(cpu) ((void)(cpu),0) +#endif + +#ifndef cpumask_of_node + #ifdef CONFIG_NEED_MULTIPLE_NODES + #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask) + #else + #define cpumask_of_node(node) ((void)(node), cpu_online_mask) + #endif +#endif +#ifndef pcibus_to_node +#define pcibus_to_node(bus) ((void)(bus), -1) +#endif + +#ifndef cpumask_of_pcibus +#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ + cpu_all_mask : \ + cpumask_of_node(pcibus_to_node(bus))) +#endif + +#endif /* CONFIG_NUMA */ + +#if !defined(CONFIG_NUMA) || !defined(CONFIG_HAVE_MEMORYLESS_NODES) + +#ifndef set_numa_mem +#define set_numa_mem(node) +#endif +#ifndef set_cpu_numa_mem +#define set_cpu_numa_mem(cpu, node) +#endif + +#endif /* !CONFIG_NUMA || !CONFIG_HAVE_MEMORYLESS_NODES */ + +#endif /* _ASM_GENERIC_TOPOLOGY_H */ diff --git a/include/asm-generic/trace_clock.h b/include/asm-generic/trace_clock.h new file mode 100644 index 000000000..cbbca2959 --- /dev/null +++ b/include/asm-generic/trace_clock.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_TRACE_CLOCK_H +#define _ASM_GENERIC_TRACE_CLOCK_H +/* + * Arch-specific trace clocks. + */ + +/* + * Additional trace clocks added to the trace_clocks + * array in kernel/trace/trace.c + * None if the architecture has not defined it. + */ +#ifndef ARCH_TRACE_CLOCKS +# define ARCH_TRACE_CLOCKS +#endif + +#endif /* _ASM_GENERIC_TRACE_CLOCK_H */ diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h new file mode 100644 index 000000000..6b2e63df2 --- /dev/null +++ b/include/asm-generic/uaccess.h @@ -0,0 +1,228 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_UACCESS_H +#define __ASM_GENERIC_UACCESS_H + +/* + * User space memory access functions, these should work + * on any machine that has kernel and user data in the same + * address space, e.g. all NOMMU machines. + */ +#include + +#include + +#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) + +#ifndef KERNEL_DS +#define KERNEL_DS MAKE_MM_SEG(~0UL) +#endif + +#ifndef USER_DS +#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) +#endif + +#ifndef get_fs +#define get_ds() (KERNEL_DS) +#define get_fs() (current_thread_info()->addr_limit) + +static inline void set_fs(mm_segment_t fs) +{ + current_thread_info()->addr_limit = fs; +} +#endif + +#ifndef segment_eq +#define segment_eq(a, b) ((a).seg == (b).seg) +#endif + +#define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size)) + +/* + * The architecture should really override this if possible, at least + * doing a check on the get_fs() + */ +#ifndef __access_ok +static inline int __access_ok(unsigned long addr, unsigned long size) +{ + return 1; +} +#endif + +/* + * These are the main single-value transfer routines. They automatically + * use the right size if we just have the right pointer type. + * This version just falls back to copy_{from,to}_user, which should + * provide a fast-path for small values. + */ +#define __put_user(x, ptr) \ +({ \ + __typeof__(*(ptr)) __x = (x); \ + int __pu_err = -EFAULT; \ + __chk_user_ptr(ptr); \ + switch (sizeof (*(ptr))) { \ + case 1: \ + case 2: \ + case 4: \ + case 8: \ + __pu_err = __put_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ + break; \ + default: \ + __put_user_bad(); \ + break; \ + } \ + __pu_err; \ +}) + +#define put_user(x, ptr) \ +({ \ + void __user *__p = (ptr); \ + might_fault(); \ + access_ok(VERIFY_WRITE, __p, sizeof(*ptr)) ? \ + __put_user((x), ((__typeof__(*(ptr)) __user *)__p)) : \ + -EFAULT; \ +}) + +#ifndef __put_user_fn + +static inline int __put_user_fn(size_t size, void __user *ptr, void *x) +{ + return unlikely(raw_copy_to_user(ptr, x, size)) ? -EFAULT : 0; +} + +#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k) + +#endif + +extern int __put_user_bad(void) __attribute__((noreturn)); + +#define __get_user(x, ptr) \ +({ \ + int __gu_err = -EFAULT; \ + __chk_user_ptr(ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: { \ + unsigned char __x = 0; \ + __gu_err = __get_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ + (x) = *(__force __typeof__(*(ptr)) *) &__x; \ + break; \ + }; \ + case 2: { \ + unsigned short __x = 0; \ + __gu_err = __get_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ + (x) = *(__force __typeof__(*(ptr)) *) &__x; \ + break; \ + }; \ + case 4: { \ + unsigned int __x = 0; \ + __gu_err = __get_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ + (x) = *(__force __typeof__(*(ptr)) *) &__x; \ + break; \ + }; \ + case 8: { \ + unsigned long long __x = 0; \ + __gu_err = __get_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ + (x) = *(__force __typeof__(*(ptr)) *) &__x; \ + break; \ + }; \ + default: \ + __get_user_bad(); \ + break; \ + } \ + __gu_err; \ +}) + +#define get_user(x, ptr) \ +({ \ + const void __user *__p = (ptr); \ + might_fault(); \ + access_ok(VERIFY_READ, __p, sizeof(*ptr)) ? \ + __get_user((x), (__typeof__(*(ptr)) __user *)__p) :\ + ((x) = (__typeof__(*(ptr)))0,-EFAULT); \ +}) + +#ifndef __get_user_fn +static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) +{ + return unlikely(raw_copy_from_user(x, ptr, size)) ? -EFAULT : 0; +} + +#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k) + +#endif + +extern int __get_user_bad(void) __attribute__((noreturn)); + +/* + * Copy a null terminated string from userspace. + */ +#ifndef __strncpy_from_user +static inline long +__strncpy_from_user(char *dst, const char __user *src, long count) +{ + char *tmp; + strncpy(dst, (const char __force *)src, count); + for (tmp = dst; *tmp && count > 0; tmp++, count--) + ; + return (tmp - dst); +} +#endif + +static inline long +strncpy_from_user(char *dst, const char __user *src, long count) +{ + if (!access_ok(VERIFY_READ, src, 1)) + return -EFAULT; + return __strncpy_from_user(dst, src, count); +} + +/* + * Return the size of a string (including the ending 0) + * + * Return 0 on exception, a value greater than N if too long + */ +#ifndef __strnlen_user +#define __strnlen_user(s, n) (strnlen((s), (n)) + 1) +#endif + +/* + * Unlike strnlen, strnlen_user includes the nul terminator in + * its returned count. Callers should check for a returned value + * greater than N as an indication the string is too long. + */ +static inline long strnlen_user(const char __user *src, long n) +{ + if (!access_ok(VERIFY_READ, src, 1)) + return 0; + return __strnlen_user(src, n); +} + +/* + * Zero Userspace + */ +#ifndef __clear_user +static inline __must_check unsigned long +__clear_user(void __user *to, unsigned long n) +{ + memset((void __force *)to, 0, n); + return 0; +} +#endif + +static inline __must_check unsigned long +clear_user(void __user *to, unsigned long n) +{ + might_fault(); + if (!access_ok(VERIFY_WRITE, to, n)) + return n; + + return __clear_user(to, n); +} + +#include + +#endif /* __ASM_GENERIC_UACCESS_H */ diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h new file mode 100644 index 000000000..374c940e9 --- /dev/null +++ b/include/asm-generic/unaligned.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_UNALIGNED_H +#define __ASM_GENERIC_UNALIGNED_H + +/* + * This is the most generic implementation of unaligned accesses + * and should work almost anywhere. + */ +#include + +/* Set by the arch if it can handle unaligned accesses in hardware. */ +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +# include +#endif + +#if defined(__LITTLE_ENDIAN) +# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +# include +# include +# endif +# include +# define get_unaligned __get_unaligned_le +# define put_unaligned __put_unaligned_le +#elif defined(__BIG_ENDIAN) +# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +# include +# include +# endif +# include +# define get_unaligned __get_unaligned_be +# define put_unaligned __put_unaligned_be +#else +# error need to define endianess +#endif + +#endif /* __ASM_GENERIC_UNALIGNED_H */ diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h new file mode 100644 index 000000000..cdf904265 --- /dev/null +++ b/include/asm-generic/unistd.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#include + +/* + * These are required system calls, we should + * invert the logic eventually and let them + * be selected by default. + */ +#if __BITS_PER_LONG == 32 +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYS_LLSEEK +#endif diff --git a/include/asm-generic/user.h b/include/asm-generic/user.h new file mode 100644 index 000000000..35638c347 --- /dev/null +++ b/include/asm-generic/user.h @@ -0,0 +1,8 @@ +#ifndef __ASM_GENERIC_USER_H +#define __ASM_GENERIC_USER_H +/* + * This file may define a 'struct user' structure. However, it is only + * used for a.out files, which are not supported on new architectures. + */ + +#endif /* __ASM_GENERIC_USER_H */ diff --git a/include/asm-generic/vga.h b/include/asm-generic/vga.h new file mode 100644 index 000000000..adf91a783 --- /dev/null +++ b/include/asm-generic/vga.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Access to VGA videoram + * + * (c) 1998 Martin Mares + */ +#ifndef __ASM_GENERIC_VGA_H +#define __ASM_GENERIC_VGA_H + +/* + * On most architectures that support VGA, we can just + * recalculate addresses and then access the videoram + * directly without any black magic. + * + * Everyone else needs to ioremap the address and use + * proper I/O accesses. + */ +#ifndef VGA_MAP_MEM +#define VGA_MAP_MEM(x, s) (unsigned long)phys_to_virt(x) +#endif + +#define vga_readb(x) (*(x)) +#define vga_writeb(x, y) (*(y) = (x)) + +#endif /* _ASM_GENERIC_VGA_H */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h new file mode 100644 index 000000000..a26e6f503 --- /dev/null +++ b/include/asm-generic/vmlinux.lds.h @@ -0,0 +1,1001 @@ +/* + * Helper macros to support writing architecture specific + * linker scripts. + * + * A minimal linker scripts has following content: + * [This is a sample, architectures may have special requiriements] + * + * OUTPUT_FORMAT(...) + * OUTPUT_ARCH(...) + * ENTRY(...) + * SECTIONS + * { + * . = START; + * __init_begin = .; + * HEAD_TEXT_SECTION + * INIT_TEXT_SECTION(PAGE_SIZE) + * INIT_DATA_SECTION(...) + * PERCPU_SECTION(CACHELINE_SIZE) + * __init_end = .; + * + * _stext = .; + * TEXT_SECTION = 0 + * _etext = .; + * + * _sdata = .; + * RO_DATA_SECTION(PAGE_SIZE) + * RW_DATA_SECTION(...) + * _edata = .; + * + * EXCEPTION_TABLE(...) + * NOTES + * + * BSS_SECTION(0, 0, 0) + * _end = .; + * + * STABS_DEBUG + * DWARF_DEBUG + * + * DISCARDS // must be the last + * } + * + * [__init_begin, __init_end] is the init section that may be freed after init + * // __init_begin and __init_end should be page aligned, so that we can + * // free the whole .init memory + * [_stext, _etext] is the text section + * [_sdata, _edata] is the data section + * + * Some of the included output section have their own set of constants. + * Examples are: [__initramfs_start, __initramfs_end] for initramfs and + * [__nosave_begin, __nosave_end] for the nosave data + */ + +#ifndef LOAD_OFFSET +#define LOAD_OFFSET 0 +#endif + +/* Align . to a 8 byte boundary equals to maximum function alignment. */ +#define ALIGN_FUNCTION() . = ALIGN(8) + +/* + * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which + * generates .data.identifier sections, which need to be pulled in with + * .data. We don't want to pull in .data..other sections, which Linux + * has defined. Same for text and bss. + * + * RODATA_MAIN is not used because existing code already defines .rodata.x + * sections to be brought in with rodata. + */ +#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION +#define TEXT_MAIN .text .text.[0-9a-zA-Z_]* +#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX* +#define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* +#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* +#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* +#define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]* +#else +#define TEXT_MAIN .text +#define DATA_MAIN .data +#define SDATA_MAIN .sdata +#define RODATA_MAIN .rodata +#define BSS_MAIN .bss +#define SBSS_MAIN .sbss +#endif + +/* + * Align to a 32 byte boundary equal to the + * alignment gcc 4.5 uses for a struct + */ +#define STRUCT_ALIGNMENT 32 +#define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT) + +/* The actual configuration determine if the init/exit sections + * are handled as text/data or they can be discarded (which + * often happens at runtime) + */ +#ifdef CONFIG_HOTPLUG_CPU +#define CPU_KEEP(sec) *(.cpu##sec) +#define CPU_DISCARD(sec) +#else +#define CPU_KEEP(sec) +#define CPU_DISCARD(sec) *(.cpu##sec) +#endif + +#if defined(CONFIG_MEMORY_HOTPLUG) +#define MEM_KEEP(sec) *(.mem##sec) +#define MEM_DISCARD(sec) +#else +#define MEM_KEEP(sec) +#define MEM_DISCARD(sec) *(.mem##sec) +#endif + +#ifdef CONFIG_FTRACE_MCOUNT_RECORD +#define MCOUNT_REC() . = ALIGN(8); \ + __start_mcount_loc = .; \ + KEEP(*(__mcount_loc)) \ + __stop_mcount_loc = .; +#else +#define MCOUNT_REC() +#endif + +#ifdef CONFIG_TRACE_BRANCH_PROFILING +#define LIKELY_PROFILE() __start_annotated_branch_profile = .; \ + KEEP(*(_ftrace_annotated_branch)) \ + __stop_annotated_branch_profile = .; +#else +#define LIKELY_PROFILE() +#endif + +#ifdef CONFIG_PROFILE_ALL_BRANCHES +#define BRANCH_PROFILE() __start_branch_profile = .; \ + KEEP(*(_ftrace_branch)) \ + __stop_branch_profile = .; +#else +#define BRANCH_PROFILE() +#endif + +#ifdef CONFIG_KPROBES +#define KPROBE_BLACKLIST() . = ALIGN(8); \ + __start_kprobe_blacklist = .; \ + KEEP(*(_kprobe_blacklist)) \ + __stop_kprobe_blacklist = .; +#else +#define KPROBE_BLACKLIST() +#endif + +#ifdef CONFIG_FUNCTION_ERROR_INJECTION +#define ERROR_INJECT_WHITELIST() STRUCT_ALIGN(); \ + __start_error_injection_whitelist = .; \ + KEEP(*(_error_injection_whitelist)) \ + __stop_error_injection_whitelist = .; +#else +#define ERROR_INJECT_WHITELIST() +#endif + +#ifdef CONFIG_EVENT_TRACING +#define FTRACE_EVENTS() . = ALIGN(8); \ + __start_ftrace_events = .; \ + KEEP(*(_ftrace_events)) \ + __stop_ftrace_events = .; \ + __start_ftrace_eval_maps = .; \ + KEEP(*(_ftrace_eval_map)) \ + __stop_ftrace_eval_maps = .; +#else +#define FTRACE_EVENTS() +#endif + +#ifdef CONFIG_TRACING +#define TRACE_PRINTKS() __start___trace_bprintk_fmt = .; \ + KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \ + __stop___trace_bprintk_fmt = .; +#define TRACEPOINT_STR() __start___tracepoint_str = .; \ + KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \ + __stop___tracepoint_str = .; +#else +#define TRACE_PRINTKS() +#define TRACEPOINT_STR() +#endif + +#ifdef CONFIG_FTRACE_SYSCALLS +#define TRACE_SYSCALLS() . = ALIGN(8); \ + __start_syscalls_metadata = .; \ + KEEP(*(__syscalls_metadata)) \ + __stop_syscalls_metadata = .; +#else +#define TRACE_SYSCALLS() +#endif + +#ifdef CONFIG_BPF_EVENTS +#define BPF_RAW_TP() STRUCT_ALIGN(); \ + __start__bpf_raw_tp = .; \ + KEEP(*(__bpf_raw_tp_map)) \ + __stop__bpf_raw_tp = .; +#else +#define BPF_RAW_TP() +#endif + +#ifdef CONFIG_SERIAL_EARLYCON +#define EARLYCON_TABLE() . = ALIGN(8); \ + __earlycon_table = .; \ + KEEP(*(__earlycon_table)) \ + __earlycon_table_end = .; +#else +#define EARLYCON_TABLE() +#endif + +#define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name) +#define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name) +#define OF_TABLE(cfg, name) __OF_TABLE(IS_ENABLED(cfg), name) +#define _OF_TABLE_0(name) +#define _OF_TABLE_1(name) \ + . = ALIGN(8); \ + __##name##_of_table = .; \ + KEEP(*(__##name##_of_table)) \ + KEEP(*(__##name##_of_table_end)) + +#define TIMER_OF_TABLES() OF_TABLE(CONFIG_TIMER_OF, timer) +#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) +#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) +#define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem) +#define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method) +#define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method) + +#ifdef CONFIG_ACPI +#define ACPI_PROBE_TABLE(name) \ + . = ALIGN(8); \ + __##name##_acpi_probe_table = .; \ + KEEP(*(__##name##_acpi_probe_table)) \ + __##name##_acpi_probe_table_end = .; +#else +#define ACPI_PROBE_TABLE(name) +#endif + +#define KERNEL_DTB() \ + STRUCT_ALIGN(); \ + __dtb_start = .; \ + KEEP(*(.dtb.init.rodata)) \ + __dtb_end = .; + +/* + * .data section + */ +#define DATA_DATA \ + *(.xiptext) \ + *(DATA_MAIN) \ + *(.ref.data) \ + *(.data..shared_aligned) /* percpu related */ \ + MEM_KEEP(init.data*) \ + MEM_KEEP(exit.data*) \ + *(.data.unlikely) \ + __start_once = .; \ + *(.data.once) \ + __end_once = .; \ + STRUCT_ALIGN(); \ + *(__tracepoints) \ + /* implement dynamic printk debug */ \ + . = ALIGN(8); \ + __start___jump_table = .; \ + KEEP(*(__jump_table)) \ + __stop___jump_table = .; \ + . = ALIGN(8); \ + __start___verbose = .; \ + KEEP(*(__verbose)) \ + __stop___verbose = .; \ + LIKELY_PROFILE() \ + BRANCH_PROFILE() \ + TRACE_PRINTKS() \ + BPF_RAW_TP() \ + TRACEPOINT_STR() + +/* + * Data section helpers + */ +#define NOSAVE_DATA \ + . = ALIGN(PAGE_SIZE); \ + __nosave_begin = .; \ + *(.data..nosave) \ + . = ALIGN(PAGE_SIZE); \ + __nosave_end = .; + +#define PAGE_ALIGNED_DATA(page_align) \ + . = ALIGN(page_align); \ + *(.data..page_aligned) \ + . = ALIGN(page_align); + +#define READ_MOSTLY_DATA(align) \ + . = ALIGN(align); \ + *(.data..read_mostly) \ + . = ALIGN(align); + +#define CACHELINE_ALIGNED_DATA(align) \ + . = ALIGN(align); \ + *(.data..cacheline_aligned) + +#define INIT_TASK_DATA(align) \ + . = ALIGN(align); \ + __start_init_task = .; \ + init_thread_union = .; \ + init_stack = .; \ + KEEP(*(.data..init_task)) \ + KEEP(*(.data..init_thread_info)) \ + . = __start_init_task + THREAD_SIZE; \ + __end_init_task = .; + +/* + * Allow architectures to handle ro_after_init data on their + * own by defining an empty RO_AFTER_INIT_DATA. + */ +#ifndef RO_AFTER_INIT_DATA +#define RO_AFTER_INIT_DATA \ + . = ALIGN(8); \ + __start_ro_after_init = .; \ + *(.data..ro_after_init) \ + __end_ro_after_init = .; +#endif + +/* + * Read only Data + */ +#define RO_DATA_SECTION(align) \ + . = ALIGN((align)); \ + .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ + __start_rodata = .; \ + *(.rodata) *(.rodata.*) \ + RO_AFTER_INIT_DATA /* Read only after init */ \ + KEEP(*(__vermagic)) /* Kernel version magic */ \ + . = ALIGN(8); \ + __start___tracepoints_ptrs = .; \ + KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \ + __stop___tracepoints_ptrs = .; \ + *(__tracepoints_strings)/* Tracepoints: strings */ \ + } \ + \ + .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ + *(.rodata1) \ + } \ + \ + /* PCI quirks */ \ + .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ + __start_pci_fixups_early = .; \ + KEEP(*(.pci_fixup_early)) \ + __end_pci_fixups_early = .; \ + __start_pci_fixups_header = .; \ + KEEP(*(.pci_fixup_header)) \ + __end_pci_fixups_header = .; \ + __start_pci_fixups_final = .; \ + KEEP(*(.pci_fixup_final)) \ + __end_pci_fixups_final = .; \ + __start_pci_fixups_enable = .; \ + KEEP(*(.pci_fixup_enable)) \ + __end_pci_fixups_enable = .; \ + __start_pci_fixups_resume = .; \ + KEEP(*(.pci_fixup_resume)) \ + __end_pci_fixups_resume = .; \ + __start_pci_fixups_resume_early = .; \ + KEEP(*(.pci_fixup_resume_early)) \ + __end_pci_fixups_resume_early = .; \ + __start_pci_fixups_suspend = .; \ + KEEP(*(.pci_fixup_suspend)) \ + __end_pci_fixups_suspend = .; \ + __start_pci_fixups_suspend_late = .; \ + KEEP(*(.pci_fixup_suspend_late)) \ + __end_pci_fixups_suspend_late = .; \ + } \ + \ + /* Built-in firmware blobs */ \ + .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \ + __start_builtin_fw = .; \ + KEEP(*(.builtin_fw)) \ + __end_builtin_fw = .; \ + } \ + \ + TRACEDATA \ + \ + /* Kernel symbol table: Normal symbols */ \ + __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ + __start___ksymtab = .; \ + KEEP(*(SORT(___ksymtab+*))) \ + __stop___ksymtab = .; \ + } \ + \ + /* Kernel symbol table: GPL-only symbols */ \ + __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ + __start___ksymtab_gpl = .; \ + KEEP(*(SORT(___ksymtab_gpl+*))) \ + __stop___ksymtab_gpl = .; \ + } \ + \ + /* Kernel symbol table: Normal unused symbols */ \ + __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ + __start___ksymtab_unused = .; \ + KEEP(*(SORT(___ksymtab_unused+*))) \ + __stop___ksymtab_unused = .; \ + } \ + \ + /* Kernel symbol table: GPL-only unused symbols */ \ + __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ + __start___ksymtab_unused_gpl = .; \ + KEEP(*(SORT(___ksymtab_unused_gpl+*))) \ + __stop___ksymtab_unused_gpl = .; \ + } \ + \ + /* Kernel symbol table: GPL-future-only symbols */ \ + __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ + __start___ksymtab_gpl_future = .; \ + KEEP(*(SORT(___ksymtab_gpl_future+*))) \ + __stop___ksymtab_gpl_future = .; \ + } \ + \ + /* Kernel symbol table: Normal symbols */ \ + __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ + __start___kcrctab = .; \ + KEEP(*(SORT(___kcrctab+*))) \ + __stop___kcrctab = .; \ + } \ + \ + /* Kernel symbol table: GPL-only symbols */ \ + __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ + __start___kcrctab_gpl = .; \ + KEEP(*(SORT(___kcrctab_gpl+*))) \ + __stop___kcrctab_gpl = .; \ + } \ + \ + /* Kernel symbol table: Normal unused symbols */ \ + __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ + __start___kcrctab_unused = .; \ + KEEP(*(SORT(___kcrctab_unused+*))) \ + __stop___kcrctab_unused = .; \ + } \ + \ + /* Kernel symbol table: GPL-only unused symbols */ \ + __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ + __start___kcrctab_unused_gpl = .; \ + KEEP(*(SORT(___kcrctab_unused_gpl+*))) \ + __stop___kcrctab_unused_gpl = .; \ + } \ + \ + /* Kernel symbol table: GPL-future-only symbols */ \ + __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ + __start___kcrctab_gpl_future = .; \ + KEEP(*(SORT(___kcrctab_gpl_future+*))) \ + __stop___kcrctab_gpl_future = .; \ + } \ + \ + /* Kernel symbol table: strings */ \ + __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ + *(__ksymtab_strings) \ + } \ + \ + /* __*init sections */ \ + __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ + *(.ref.rodata) \ + MEM_KEEP(init.rodata) \ + MEM_KEEP(exit.rodata) \ + } \ + \ + /* Built-in module parameters. */ \ + __param : AT(ADDR(__param) - LOAD_OFFSET) { \ + __start___param = .; \ + KEEP(*(__param)) \ + __stop___param = .; \ + } \ + \ + /* Built-in module versions. */ \ + __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \ + __start___modver = .; \ + KEEP(*(__modver)) \ + __stop___modver = .; \ + . = ALIGN((align)); \ + __end_rodata = .; \ + } \ + . = ALIGN((align)); + +/* RODATA & RO_DATA provided for backward compatibility. + * All archs are supposed to use RO_DATA() */ +#define RODATA RO_DATA_SECTION(4096) +#define RO_DATA(align) RO_DATA_SECTION(align) + +#define SECURITY_INIT \ + .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ + __security_initcall_start = .; \ + KEEP(*(.security_initcall.init)) \ + __security_initcall_end = .; \ + } + +/* + * Non-instrumentable text section + */ +#define NOINSTR_TEXT \ + ALIGN_FUNCTION(); \ + __noinstr_text_start = .; \ + *(.noinstr.text) \ + __noinstr_text_end = .; + +/* + * .text section. Map to function alignment to avoid address changes + * during second ld run in second ld pass when generating System.map + * + * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead + * code elimination is enabled, so these sections should be converted + * to use ".." first. + */ +#define TEXT_TEXT \ + ALIGN_FUNCTION(); \ + *(.text.hot .text.hot.*) \ + *(TEXT_MAIN .text.fixup) \ + *(.text.unlikely .text.unlikely.*) \ + *(.text.unknown .text.unknown.*) \ + NOINSTR_TEXT \ + *(.text..refcount) \ + *(.ref.text) \ + *(.text.asan.* .text.tsan.*) \ + MEM_KEEP(init.text*) \ + MEM_KEEP(exit.text*) \ + + +/* sched.text is aling to function alignment to secure we have same + * address even at second ld pass when generating System.map */ +#define SCHED_TEXT \ + ALIGN_FUNCTION(); \ + __sched_text_start = .; \ + *(.sched.text) \ + __sched_text_end = .; + +/* spinlock.text is aling to function alignment to secure we have same + * address even at second ld pass when generating System.map */ +#define LOCK_TEXT \ + ALIGN_FUNCTION(); \ + __lock_text_start = .; \ + *(.spinlock.text) \ + __lock_text_end = .; + +#define CPUIDLE_TEXT \ + ALIGN_FUNCTION(); \ + __cpuidle_text_start = .; \ + *(.cpuidle.text) \ + __cpuidle_text_end = .; + +#define KPROBES_TEXT \ + ALIGN_FUNCTION(); \ + __kprobes_text_start = .; \ + *(.kprobes.text) \ + __kprobes_text_end = .; + +#define ENTRY_TEXT \ + ALIGN_FUNCTION(); \ + __entry_text_start = .; \ + *(.entry.text) \ + __entry_text_end = .; + +#define IRQENTRY_TEXT \ + ALIGN_FUNCTION(); \ + __irqentry_text_start = .; \ + *(.irqentry.text) \ + __irqentry_text_end = .; + +#define SOFTIRQENTRY_TEXT \ + ALIGN_FUNCTION(); \ + __softirqentry_text_start = .; \ + *(.softirqentry.text) \ + __softirqentry_text_end = .; + +/* Section used for early init (in .S files) */ +#define HEAD_TEXT KEEP(*(.head.text)) + +#define HEAD_TEXT_SECTION \ + .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \ + HEAD_TEXT \ + } + +/* + * Exception table + */ +#define EXCEPTION_TABLE(align) \ + . = ALIGN(align); \ + __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \ + __start___ex_table = .; \ + KEEP(*(__ex_table)) \ + __stop___ex_table = .; \ + } + +/* + * Init task + */ +#define INIT_TASK_DATA_SECTION(align) \ + . = ALIGN(align); \ + .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \ + INIT_TASK_DATA(align) \ + } + +#ifdef CONFIG_CONSTRUCTORS +#define KERNEL_CTORS() . = ALIGN(8); \ + __ctors_start = .; \ + KEEP(*(.ctors)) \ + KEEP(*(SORT(.init_array.*))) \ + KEEP(*(.init_array)) \ + __ctors_end = .; +#else +#define KERNEL_CTORS() +#endif + +/* init and exit section handling */ +#define INIT_DATA \ + KEEP(*(SORT(___kentry+*))) \ + *(.init.data init.data.*) \ + MEM_DISCARD(init.data*) \ + KERNEL_CTORS() \ + MCOUNT_REC() \ + *(.init.rodata .init.rodata.*) \ + FTRACE_EVENTS() \ + TRACE_SYSCALLS() \ + KPROBE_BLACKLIST() \ + ERROR_INJECT_WHITELIST() \ + MEM_DISCARD(init.rodata) \ + CLK_OF_TABLES() \ + RESERVEDMEM_OF_TABLES() \ + TIMER_OF_TABLES() \ + CPU_METHOD_OF_TABLES() \ + CPUIDLE_METHOD_OF_TABLES() \ + KERNEL_DTB() \ + IRQCHIP_OF_MATCH_TABLE() \ + ACPI_PROBE_TABLE(irqchip) \ + ACPI_PROBE_TABLE(timer) \ + EARLYCON_TABLE() + +#define INIT_TEXT \ + *(.init.text .init.text.*) \ + *(.text.startup) \ + MEM_DISCARD(init.text*) + +#define EXIT_DATA \ + *(.exit.data .exit.data.*) \ + *(.fini_array .fini_array.*) \ + *(.dtors .dtors.*) \ + MEM_DISCARD(exit.data*) \ + MEM_DISCARD(exit.rodata*) + +#define EXIT_TEXT \ + *(.exit.text) \ + *(.text.exit) \ + MEM_DISCARD(exit.text) + +#define EXIT_CALL \ + *(.exitcall.exit) + +/* + * bss (Block Started by Symbol) - uninitialized data + * zeroed during startup + */ +#define SBSS(sbss_align) \ + . = ALIGN(sbss_align); \ + .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ + *(.dynsbss) \ + *(SBSS_MAIN) \ + *(.scommon) \ + } + +/* + * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra + * sections to the front of bss. + */ +#ifndef BSS_FIRST_SECTIONS +#define BSS_FIRST_SECTIONS +#endif + +#define BSS(bss_align) \ + . = ALIGN(bss_align); \ + .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ + BSS_FIRST_SECTIONS \ + . = ALIGN(PAGE_SIZE); \ + *(.bss..page_aligned) \ + . = ALIGN(PAGE_SIZE); \ + *(.dynbss) \ + *(BSS_MAIN) \ + *(COMMON) \ + } + +/* + * DWARF debug sections. + * Symbols in the DWARF debugging sections are relative to + * the beginning of the section so we begin them at 0. + */ +#define DWARF_DEBUG \ + /* DWARF 1 */ \ + .debug 0 : { *(.debug) } \ + .line 0 : { *(.line) } \ + /* GNU DWARF 1 extensions */ \ + .debug_srcinfo 0 : { *(.debug_srcinfo) } \ + .debug_sfnames 0 : { *(.debug_sfnames) } \ + /* DWARF 1.1 and DWARF 2 */ \ + .debug_aranges 0 : { *(.debug_aranges) } \ + .debug_pubnames 0 : { *(.debug_pubnames) } \ + /* DWARF 2 */ \ + .debug_info 0 : { *(.debug_info \ + .gnu.linkonce.wi.*) } \ + .debug_abbrev 0 : { *(.debug_abbrev) } \ + .debug_line 0 : { *(.debug_line) } \ + .debug_frame 0 : { *(.debug_frame) } \ + .debug_str 0 : { *(.debug_str) } \ + .debug_loc 0 : { *(.debug_loc) } \ + .debug_macinfo 0 : { *(.debug_macinfo) } \ + .debug_pubtypes 0 : { *(.debug_pubtypes) } \ + /* DWARF 3 */ \ + .debug_ranges 0 : { *(.debug_ranges) } \ + /* SGI/MIPS DWARF 2 extensions */ \ + .debug_weaknames 0 : { *(.debug_weaknames) } \ + .debug_funcnames 0 : { *(.debug_funcnames) } \ + .debug_typenames 0 : { *(.debug_typenames) } \ + .debug_varnames 0 : { *(.debug_varnames) } \ + /* GNU DWARF 2 extensions */ \ + .debug_gnu_pubnames 0 : { *(.debug_gnu_pubnames) } \ + .debug_gnu_pubtypes 0 : { *(.debug_gnu_pubtypes) } \ + /* DWARF 4 */ \ + .debug_types 0 : { *(.debug_types) } \ + /* DWARF 5 */ \ + .debug_addr 0 : { *(.debug_addr) } \ + .debug_line_str 0 : { *(.debug_line_str) } \ + .debug_loclists 0 : { *(.debug_loclists) } \ + .debug_macro 0 : { *(.debug_macro) } \ + .debug_names 0 : { *(.debug_names) } \ + .debug_rnglists 0 : { *(.debug_rnglists) } \ + .debug_str_offsets 0 : { *(.debug_str_offsets) } + + /* Stabs debugging sections. */ +#define STABS_DEBUG \ + .stab 0 : { *(.stab) } \ + .stabstr 0 : { *(.stabstr) } \ + .stab.excl 0 : { *(.stab.excl) } \ + .stab.exclstr 0 : { *(.stab.exclstr) } \ + .stab.index 0 : { *(.stab.index) } \ + .stab.indexstr 0 : { *(.stab.indexstr) } \ + .comment 0 : { *(.comment) } + +#ifdef CONFIG_GENERIC_BUG +#define BUG_TABLE \ + . = ALIGN(8); \ + __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ + __start___bug_table = .; \ + KEEP(*(__bug_table)) \ + __stop___bug_table = .; \ + } +#else +#define BUG_TABLE +#endif + +#ifdef CONFIG_UNWINDER_ORC +#define ORC_UNWIND_TABLE \ + . = ALIGN(4); \ + .orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \ + __start_orc_unwind_ip = .; \ + KEEP(*(.orc_unwind_ip)) \ + __stop_orc_unwind_ip = .; \ + } \ + . = ALIGN(2); \ + .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \ + __start_orc_unwind = .; \ + KEEP(*(.orc_unwind)) \ + __stop_orc_unwind = .; \ + } \ + . = ALIGN(4); \ + .orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \ + orc_lookup = .; \ + . += (((SIZEOF(.text) + LOOKUP_BLOCK_SIZE - 1) / \ + LOOKUP_BLOCK_SIZE) + 1) * 4; \ + orc_lookup_end = .; \ + } +#else +#define ORC_UNWIND_TABLE +#endif + +#ifdef CONFIG_PM_TRACE +#define TRACEDATA \ + . = ALIGN(4); \ + .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ + __tracedata_start = .; \ + KEEP(*(.tracedata)) \ + __tracedata_end = .; \ + } +#else +#define TRACEDATA +#endif + +#define NOTES \ + .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ + __start_notes = .; \ + KEEP(*(.note.*)) \ + __stop_notes = .; \ + } + +#define INIT_SETUP(initsetup_align) \ + . = ALIGN(initsetup_align); \ + __setup_start = .; \ + KEEP(*(.init.setup)) \ + __setup_end = .; + +#define INIT_CALLS_LEVEL(level) \ + __initcall##level##_start = .; \ + KEEP(*(.initcall##level##.init)) \ + KEEP(*(.initcall##level##s.init)) \ + +#define INIT_CALLS \ + __initcall_start = .; \ + KEEP(*(.initcallearly.init)) \ + INIT_CALLS_LEVEL(0) \ + INIT_CALLS_LEVEL(1) \ + INIT_CALLS_LEVEL(2) \ + INIT_CALLS_LEVEL(3) \ + INIT_CALLS_LEVEL(4) \ + INIT_CALLS_LEVEL(5) \ + INIT_CALLS_LEVEL(rootfs) \ + INIT_CALLS_LEVEL(6) \ + INIT_CALLS_LEVEL(7) \ + __initcall_end = .; + +#define CON_INITCALL \ + __con_initcall_start = .; \ + KEEP(*(.con_initcall.init)) \ + __con_initcall_end = .; + +#define SECURITY_INITCALL \ + __security_initcall_start = .; \ + KEEP(*(.security_initcall.init)) \ + __security_initcall_end = .; + +#ifdef CONFIG_BLK_DEV_INITRD +#define INIT_RAM_FS \ + . = ALIGN(4); \ + __initramfs_start = .; \ + KEEP(*(.init.ramfs)) \ + . = ALIGN(8); \ + KEEP(*(.init.ramfs.info)) +#else +#define INIT_RAM_FS +#endif + +/* + * Memory encryption operates on a page basis. Since we need to clear + * the memory encryption mask for this section, it needs to be aligned + * on a page boundary and be a page-size multiple in length. + * + * Note: We use a separate section so that only this section gets + * decrypted to avoid exposing more than we wish. + */ +#ifdef CONFIG_AMD_MEM_ENCRYPT +#define PERCPU_DECRYPTED_SECTION \ + . = ALIGN(PAGE_SIZE); \ + *(.data..decrypted) \ + *(.data..percpu..decrypted) \ + . = ALIGN(PAGE_SIZE); +#else +#define PERCPU_DECRYPTED_SECTION +#endif + + +/* + * Default discarded sections. + * + * Some archs want to discard exit text/data at runtime rather than + * link time due to cross-section references such as alt instructions, + * bug table, eh_frame, etc. DISCARDS must be the last of output + * section definitions so that such archs put those in earlier section + * definitions. + */ +#define DISCARDS \ + /DISCARD/ : { \ + EXIT_TEXT \ + EXIT_DATA \ + EXIT_CALL \ + *(.discard) \ + *(.discard.*) \ + } + +/** + * PERCPU_INPUT - the percpu input sections + * @cacheline: cacheline size + * + * The core percpu section names and core symbols which do not rely + * directly upon load addresses. + * + * @cacheline is used to align subsections to avoid false cacheline + * sharing between subsections for different purposes. + */ +#define PERCPU_INPUT(cacheline) \ + __per_cpu_start = .; \ + *(.data..percpu..first) \ + . = ALIGN(PAGE_SIZE); \ + *(.data..percpu..page_aligned) \ + . = ALIGN(cacheline); \ + *(.data..percpu..read_mostly) \ + . = ALIGN(cacheline); \ + *(.data..percpu) \ + *(.data..percpu..shared_aligned) \ + PERCPU_DECRYPTED_SECTION \ + __per_cpu_end = .; + +/** + * PERCPU_VADDR - define output section for percpu area + * @cacheline: cacheline size + * @vaddr: explicit base address (optional) + * @phdr: destination PHDR (optional) + * + * Macro which expands to output section for percpu area. + * + * @cacheline is used to align subsections to avoid false cacheline + * sharing between subsections for different purposes. + * + * If @vaddr is not blank, it specifies explicit base address and all + * percpu symbols will be offset from the given address. If blank, + * @vaddr always equals @laddr + LOAD_OFFSET. + * + * @phdr defines the output PHDR to use if not blank. Be warned that + * output PHDR is sticky. If @phdr is specified, the next output + * section in the linker script will go there too. @phdr should have + * a leading colon. + * + * Note that this macros defines __per_cpu_load as an absolute symbol. + * If there is no need to put the percpu section at a predetermined + * address, use PERCPU_SECTION. + */ +#define PERCPU_VADDR(cacheline, vaddr, phdr) \ + __per_cpu_load = .; \ + .data..percpu vaddr : AT(__per_cpu_load - LOAD_OFFSET) { \ + PERCPU_INPUT(cacheline) \ + } phdr \ + . = __per_cpu_load + SIZEOF(.data..percpu); + +/** + * PERCPU_SECTION - define output section for percpu area, simple version + * @cacheline: cacheline size + * + * Align to PAGE_SIZE and outputs output section for percpu area. This + * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and + * __per_cpu_start will be identical. + * + * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,) + * except that __per_cpu_load is defined as a relative symbol against + * .data..percpu which is required for relocatable x86_32 configuration. + */ +#define PERCPU_SECTION(cacheline) \ + . = ALIGN(PAGE_SIZE); \ + .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ + __per_cpu_load = .; \ + PERCPU_INPUT(cacheline) \ + } + + +/* + * Definition of the high level *_SECTION macros + * They will fit only a subset of the architectures + */ + + +/* + * Writeable data. + * All sections are combined in a single .data section. + * The sections following CONSTRUCTORS are arranged so their + * typical alignment matches. + * A cacheline is typical/always less than a PAGE_SIZE so + * the sections that has this restriction (or similar) + * is located before the ones requiring PAGE_SIZE alignment. + * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which + * matches the requirement of PAGE_ALIGNED_DATA. + * + * use 0 as page_align if page_aligned data is not used */ +#define RW_DATA_SECTION(cacheline, pagealigned, inittask) \ + . = ALIGN(PAGE_SIZE); \ + .data : AT(ADDR(.data) - LOAD_OFFSET) { \ + INIT_TASK_DATA(inittask) \ + NOSAVE_DATA \ + PAGE_ALIGNED_DATA(pagealigned) \ + CACHELINE_ALIGNED_DATA(cacheline) \ + READ_MOSTLY_DATA(cacheline) \ + DATA_DATA \ + CONSTRUCTORS \ + } \ + BUG_TABLE \ + +#define INIT_TEXT_SECTION(inittext_align) \ + . = ALIGN(inittext_align); \ + .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \ + _sinittext = .; \ + INIT_TEXT \ + _einittext = .; \ + } + +#define INIT_DATA_SECTION(initsetup_align) \ + .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \ + INIT_DATA \ + INIT_SETUP(initsetup_align) \ + INIT_CALLS \ + CON_INITCALL \ + SECURITY_INITCALL \ + INIT_RAM_FS \ + } + +#define BSS_SECTION(sbss_align, bss_align, stop_align) \ + . = ALIGN(sbss_align); \ + __bss_start = .; \ + SBSS(sbss_align) \ + BSS(bss_align) \ + . = ALIGN(stop_align); \ + __bss_stop = .; diff --git a/include/asm-generic/vtime.h b/include/asm-generic/vtime.h new file mode 100644 index 000000000..b1a49677f --- /dev/null +++ b/include/asm-generic/vtime.h @@ -0,0 +1 @@ +/* no content, but patch(1) dislikes empty files */ diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h new file mode 100644 index 000000000..20c93f08c --- /dev/null +++ b/include/asm-generic/word-at-a-time.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_WORD_AT_A_TIME_H +#define _ASM_WORD_AT_A_TIME_H + +#include +#include + +#ifdef __BIG_ENDIAN + +struct word_at_a_time { + const unsigned long high_bits, low_bits; +}; + +#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0xfe) + 1, REPEAT_BYTE(0x7f) } + +/* Bit set in the bytes that have a zero */ +static inline long prep_zero_mask(unsigned long val, unsigned long rhs, const struct word_at_a_time *c) +{ + unsigned long mask = (val & c->low_bits) + c->low_bits; + return ~(mask | rhs); +} + +#define create_zero_mask(mask) (mask) + +static inline long find_zero(unsigned long mask) +{ + long byte = 0; +#ifdef CONFIG_64BIT + if (mask >> 32) + mask >>= 32; + else + byte = 4; +#endif + if (mask >> 16) + mask >>= 16; + else + byte += 2; + return (mask >> 8) ? byte : byte + 1; +} + +static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) +{ + unsigned long rhs = val | c->low_bits; + *data = rhs; + return (val + c->high_bits) & ~rhs; +} + +#ifndef zero_bytemask +#define zero_bytemask(mask) (~1ul << __fls(mask)) +#endif + +#else + +/* + * The optimal byte mask counting is probably going to be something + * that is architecture-specific. If you have a reliably fast + * bit count instruction, that might be better than the multiply + * and shift, for example. + */ +struct word_at_a_time { + const unsigned long one_bits, high_bits; +}; + +#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } + +#ifdef CONFIG_64BIT + +/* + * Jan Achrenius on G+: microoptimized version of + * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56" + * that works for the bytemasks without having to + * mask them first. + */ +static inline long count_masked_bytes(unsigned long mask) +{ + return mask*0x0001020304050608ul >> 56; +} + +#else /* 32-bit case */ + +/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */ +static inline long count_masked_bytes(long mask) +{ + /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ + long a = (0x0ff0001+mask) >> 23; + /* Fix the 1 for 00 case */ + return a & mask; +} + +#endif + +/* Return nonzero if it has a zero */ +static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) +{ + unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; + *bits = mask; + return mask; +} + +static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c) +{ + return bits; +} + +static inline unsigned long create_zero_mask(unsigned long bits) +{ + bits = (bits - 1) & ~bits; + return bits >> 7; +} + +/* The mask we created is directly usable as a bytemask */ +#define zero_bytemask(mask) (mask) + +static inline unsigned long find_zero(unsigned long mask) +{ + return count_masked_bytes(mask); +} + +#endif /* __BIG_ENDIAN */ + +#endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/include/asm-generic/xor.h b/include/asm-generic/xor.h new file mode 100644 index 000000000..b4d843225 --- /dev/null +++ b/include/asm-generic/xor.h @@ -0,0 +1,718 @@ +/* + * include/asm-generic/xor.h + * + * Generic optimized RAID-5 checksumming functions. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * You should have received a copy of the GNU General Public License + * (for example /usr/src/linux/COPYING); if not, write to the Free + * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include + +static void +xor_8regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +{ + long lines = bytes / (sizeof (long)) / 8; + + do { + p1[0] ^= p2[0]; + p1[1] ^= p2[1]; + p1[2] ^= p2[2]; + p1[3] ^= p2[3]; + p1[4] ^= p2[4]; + p1[5] ^= p2[5]; + p1[6] ^= p2[6]; + p1[7] ^= p2[7]; + p1 += 8; + p2 += 8; + } while (--lines > 0); +} + +static void +xor_8regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3) +{ + long lines = bytes / (sizeof (long)) / 8; + + do { + p1[0] ^= p2[0] ^ p3[0]; + p1[1] ^= p2[1] ^ p3[1]; + p1[2] ^= p2[2] ^ p3[2]; + p1[3] ^= p2[3] ^ p3[3]; + p1[4] ^= p2[4] ^ p3[4]; + p1[5] ^= p2[5] ^ p3[5]; + p1[6] ^= p2[6] ^ p3[6]; + p1[7] ^= p2[7] ^ p3[7]; + p1 += 8; + p2 += 8; + p3 += 8; + } while (--lines > 0); +} + +static void +xor_8regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4) +{ + long lines = bytes / (sizeof (long)) / 8; + + do { + p1[0] ^= p2[0] ^ p3[0] ^ p4[0]; + p1[1] ^= p2[1] ^ p3[1] ^ p4[1]; + p1[2] ^= p2[2] ^ p3[2] ^ p4[2]; + p1[3] ^= p2[3] ^ p3[3] ^ p4[3]; + p1[4] ^= p2[4] ^ p3[4] ^ p4[4]; + p1[5] ^= p2[5] ^ p3[5] ^ p4[5]; + p1[6] ^= p2[6] ^ p3[6] ^ p4[6]; + p1[7] ^= p2[7] ^ p3[7] ^ p4[7]; + p1 += 8; + p2 += 8; + p3 += 8; + p4 += 8; + } while (--lines > 0); +} + +static void +xor_8regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4, unsigned long *p5) +{ + long lines = bytes / (sizeof (long)) / 8; + + do { + p1[0] ^= p2[0] ^ p3[0] ^ p4[0] ^ p5[0]; + p1[1] ^= p2[1] ^ p3[1] ^ p4[1] ^ p5[1]; + p1[2] ^= p2[2] ^ p3[2] ^ p4[2] ^ p5[2]; + p1[3] ^= p2[3] ^ p3[3] ^ p4[3] ^ p5[3]; + p1[4] ^= p2[4] ^ p3[4] ^ p4[4] ^ p5[4]; + p1[5] ^= p2[5] ^ p3[5] ^ p4[5] ^ p5[5]; + p1[6] ^= p2[6] ^ p3[6] ^ p4[6] ^ p5[6]; + p1[7] ^= p2[7] ^ p3[7] ^ p4[7] ^ p5[7]; + p1 += 8; + p2 += 8; + p3 += 8; + p4 += 8; + p5 += 8; + } while (--lines > 0); +} + +static void +xor_32regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +{ + long lines = bytes / (sizeof (long)) / 8; + + do { + register long d0, d1, d2, d3, d4, d5, d6, d7; + d0 = p1[0]; /* Pull the stuff into registers */ + d1 = p1[1]; /* ... in bursts, if possible. */ + d2 = p1[2]; + d3 = p1[3]; + d4 = p1[4]; + d5 = p1[5]; + d6 = p1[6]; + d7 = p1[7]; + d0 ^= p2[0]; + d1 ^= p2[1]; + d2 ^= p2[2]; + d3 ^= p2[3]; + d4 ^= p2[4]; + d5 ^= p2[5]; + d6 ^= p2[6]; + d7 ^= p2[7]; + p1[0] = d0; /* Store the result (in bursts) */ + p1[1] = d1; + p1[2] = d2; + p1[3] = d3; + p1[4] = d4; + p1[5] = d5; + p1[6] = d6; + p1[7] = d7; + p1 += 8; + p2 += 8; + } while (--lines > 0); +} + +static void +xor_32regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3) +{ + long lines = bytes / (sizeof (long)) / 8; + + do { + register long d0, d1, d2, d3, d4, d5, d6, d7; + d0 = p1[0]; /* Pull the stuff into registers */ + d1 = p1[1]; /* ... in bursts, if possible. */ + d2 = p1[2]; + d3 = p1[3]; + d4 = p1[4]; + d5 = p1[5]; + d6 = p1[6]; + d7 = p1[7]; + d0 ^= p2[0]; + d1 ^= p2[1]; + d2 ^= p2[2]; + d3 ^= p2[3]; + d4 ^= p2[4]; + d5 ^= p2[5]; + d6 ^= p2[6]; + d7 ^= p2[7]; + d0 ^= p3[0]; + d1 ^= p3[1]; + d2 ^= p3[2]; + d3 ^= p3[3]; + d4 ^= p3[4]; + d5 ^= p3[5]; + d6 ^= p3[6]; + d7 ^= p3[7]; + p1[0] = d0; /* Store the result (in bursts) */ + p1[1] = d1; + p1[2] = d2; + p1[3] = d3; + p1[4] = d4; + p1[5] = d5; + p1[6] = d6; + p1[7] = d7; + p1 += 8; + p2 += 8; + p3 += 8; + } while (--lines > 0); +} + +static void +xor_32regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4) +{ + long lines = bytes / (sizeof (long)) / 8; + + do { + register long d0, d1, d2, d3, d4, d5, d6, d7; + d0 = p1[0]; /* Pull the stuff into registers */ + d1 = p1[1]; /* ... in bursts, if possible. */ + d2 = p1[2]; + d3 = p1[3]; + d4 = p1[4]; + d5 = p1[5]; + d6 = p1[6]; + d7 = p1[7]; + d0 ^= p2[0]; + d1 ^= p2[1]; + d2 ^= p2[2]; + d3 ^= p2[3]; + d4 ^= p2[4]; + d5 ^= p2[5]; + d6 ^= p2[6]; + d7 ^= p2[7]; + d0 ^= p3[0]; + d1 ^= p3[1]; + d2 ^= p3[2]; + d3 ^= p3[3]; + d4 ^= p3[4]; + d5 ^= p3[5]; + d6 ^= p3[6]; + d7 ^= p3[7]; + d0 ^= p4[0]; + d1 ^= p4[1]; + d2 ^= p4[2]; + d3 ^= p4[3]; + d4 ^= p4[4]; + d5 ^= p4[5]; + d6 ^= p4[6]; + d7 ^= p4[7]; + p1[0] = d0; /* Store the result (in bursts) */ + p1[1] = d1; + p1[2] = d2; + p1[3] = d3; + p1[4] = d4; + p1[5] = d5; + p1[6] = d6; + p1[7] = d7; + p1 += 8; + p2 += 8; + p3 += 8; + p4 += 8; + } while (--lines > 0); +} + +static void +xor_32regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4, unsigned long *p5) +{ + long lines = bytes / (sizeof (long)) / 8; + + do { + register long d0, d1, d2, d3, d4, d5, d6, d7; + d0 = p1[0]; /* Pull the stuff into registers */ + d1 = p1[1]; /* ... in bursts, if possible. */ + d2 = p1[2]; + d3 = p1[3]; + d4 = p1[4]; + d5 = p1[5]; + d6 = p1[6]; + d7 = p1[7]; + d0 ^= p2[0]; + d1 ^= p2[1]; + d2 ^= p2[2]; + d3 ^= p2[3]; + d4 ^= p2[4]; + d5 ^= p2[5]; + d6 ^= p2[6]; + d7 ^= p2[7]; + d0 ^= p3[0]; + d1 ^= p3[1]; + d2 ^= p3[2]; + d3 ^= p3[3]; + d4 ^= p3[4]; + d5 ^= p3[5]; + d6 ^= p3[6]; + d7 ^= p3[7]; + d0 ^= p4[0]; + d1 ^= p4[1]; + d2 ^= p4[2]; + d3 ^= p4[3]; + d4 ^= p4[4]; + d5 ^= p4[5]; + d6 ^= p4[6]; + d7 ^= p4[7]; + d0 ^= p5[0]; + d1 ^= p5[1]; + d2 ^= p5[2]; + d3 ^= p5[3]; + d4 ^= p5[4]; + d5 ^= p5[5]; + d6 ^= p5[6]; + d7 ^= p5[7]; + p1[0] = d0; /* Store the result (in bursts) */ + p1[1] = d1; + p1[2] = d2; + p1[3] = d3; + p1[4] = d4; + p1[5] = d5; + p1[6] = d6; + p1[7] = d7; + p1 += 8; + p2 += 8; + p3 += 8; + p4 += 8; + p5 += 8; + } while (--lines > 0); +} + +static void +xor_8regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +{ + long lines = bytes / (sizeof (long)) / 8 - 1; + prefetchw(p1); + prefetch(p2); + + do { + prefetchw(p1+8); + prefetch(p2+8); + once_more: + p1[0] ^= p2[0]; + p1[1] ^= p2[1]; + p1[2] ^= p2[2]; + p1[3] ^= p2[3]; + p1[4] ^= p2[4]; + p1[5] ^= p2[5]; + p1[6] ^= p2[6]; + p1[7] ^= p2[7]; + p1 += 8; + p2 += 8; + } while (--lines > 0); + if (lines == 0) + goto once_more; +} + +static void +xor_8regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3) +{ + long lines = bytes / (sizeof (long)) / 8 - 1; + prefetchw(p1); + prefetch(p2); + prefetch(p3); + + do { + prefetchw(p1+8); + prefetch(p2+8); + prefetch(p3+8); + once_more: + p1[0] ^= p2[0] ^ p3[0]; + p1[1] ^= p2[1] ^ p3[1]; + p1[2] ^= p2[2] ^ p3[2]; + p1[3] ^= p2[3] ^ p3[3]; + p1[4] ^= p2[4] ^ p3[4]; + p1[5] ^= p2[5] ^ p3[5]; + p1[6] ^= p2[6] ^ p3[6]; + p1[7] ^= p2[7] ^ p3[7]; + p1 += 8; + p2 += 8; + p3 += 8; + } while (--lines > 0); + if (lines == 0) + goto once_more; +} + +static void +xor_8regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4) +{ + long lines = bytes / (sizeof (long)) / 8 - 1; + + prefetchw(p1); + prefetch(p2); + prefetch(p3); + prefetch(p4); + + do { + prefetchw(p1+8); + prefetch(p2+8); + prefetch(p3+8); + prefetch(p4+8); + once_more: + p1[0] ^= p2[0] ^ p3[0] ^ p4[0]; + p1[1] ^= p2[1] ^ p3[1] ^ p4[1]; + p1[2] ^= p2[2] ^ p3[2] ^ p4[2]; + p1[3] ^= p2[3] ^ p3[3] ^ p4[3]; + p1[4] ^= p2[4] ^ p3[4] ^ p4[4]; + p1[5] ^= p2[5] ^ p3[5] ^ p4[5]; + p1[6] ^= p2[6] ^ p3[6] ^ p4[6]; + p1[7] ^= p2[7] ^ p3[7] ^ p4[7]; + p1 += 8; + p2 += 8; + p3 += 8; + p4 += 8; + } while (--lines > 0); + if (lines == 0) + goto once_more; +} + +static void +xor_8regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4, unsigned long *p5) +{ + long lines = bytes / (sizeof (long)) / 8 - 1; + + prefetchw(p1); + prefetch(p2); + prefetch(p3); + prefetch(p4); + prefetch(p5); + + do { + prefetchw(p1+8); + prefetch(p2+8); + prefetch(p3+8); + prefetch(p4+8); + prefetch(p5+8); + once_more: + p1[0] ^= p2[0] ^ p3[0] ^ p4[0] ^ p5[0]; + p1[1] ^= p2[1] ^ p3[1] ^ p4[1] ^ p5[1]; + p1[2] ^= p2[2] ^ p3[2] ^ p4[2] ^ p5[2]; + p1[3] ^= p2[3] ^ p3[3] ^ p4[3] ^ p5[3]; + p1[4] ^= p2[4] ^ p3[4] ^ p4[4] ^ p5[4]; + p1[5] ^= p2[5] ^ p3[5] ^ p4[5] ^ p5[5]; + p1[6] ^= p2[6] ^ p3[6] ^ p4[6] ^ p5[6]; + p1[7] ^= p2[7] ^ p3[7] ^ p4[7] ^ p5[7]; + p1 += 8; + p2 += 8; + p3 += 8; + p4 += 8; + p5 += 8; + } while (--lines > 0); + if (lines == 0) + goto once_more; +} + +static void +xor_32regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +{ + long lines = bytes / (sizeof (long)) / 8 - 1; + + prefetchw(p1); + prefetch(p2); + + do { + register long d0, d1, d2, d3, d4, d5, d6, d7; + + prefetchw(p1+8); + prefetch(p2+8); + once_more: + d0 = p1[0]; /* Pull the stuff into registers */ + d1 = p1[1]; /* ... in bursts, if possible. */ + d2 = p1[2]; + d3 = p1[3]; + d4 = p1[4]; + d5 = p1[5]; + d6 = p1[6]; + d7 = p1[7]; + d0 ^= p2[0]; + d1 ^= p2[1]; + d2 ^= p2[2]; + d3 ^= p2[3]; + d4 ^= p2[4]; + d5 ^= p2[5]; + d6 ^= p2[6]; + d7 ^= p2[7]; + p1[0] = d0; /* Store the result (in bursts) */ + p1[1] = d1; + p1[2] = d2; + p1[3] = d3; + p1[4] = d4; + p1[5] = d5; + p1[6] = d6; + p1[7] = d7; + p1 += 8; + p2 += 8; + } while (--lines > 0); + if (lines == 0) + goto once_more; +} + +static void +xor_32regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3) +{ + long lines = bytes / (sizeof (long)) / 8 - 1; + + prefetchw(p1); + prefetch(p2); + prefetch(p3); + + do { + register long d0, d1, d2, d3, d4, d5, d6, d7; + + prefetchw(p1+8); + prefetch(p2+8); + prefetch(p3+8); + once_more: + d0 = p1[0]; /* Pull the stuff into registers */ + d1 = p1[1]; /* ... in bursts, if possible. */ + d2 = p1[2]; + d3 = p1[3]; + d4 = p1[4]; + d5 = p1[5]; + d6 = p1[6]; + d7 = p1[7]; + d0 ^= p2[0]; + d1 ^= p2[1]; + d2 ^= p2[2]; + d3 ^= p2[3]; + d4 ^= p2[4]; + d5 ^= p2[5]; + d6 ^= p2[6]; + d7 ^= p2[7]; + d0 ^= p3[0]; + d1 ^= p3[1]; + d2 ^= p3[2]; + d3 ^= p3[3]; + d4 ^= p3[4]; + d5 ^= p3[5]; + d6 ^= p3[6]; + d7 ^= p3[7]; + p1[0] = d0; /* Store the result (in bursts) */ + p1[1] = d1; + p1[2] = d2; + p1[3] = d3; + p1[4] = d4; + p1[5] = d5; + p1[6] = d6; + p1[7] = d7; + p1 += 8; + p2 += 8; + p3 += 8; + } while (--lines > 0); + if (lines == 0) + goto once_more; +} + +static void +xor_32regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4) +{ + long lines = bytes / (sizeof (long)) / 8 - 1; + + prefetchw(p1); + prefetch(p2); + prefetch(p3); + prefetch(p4); + + do { + register long d0, d1, d2, d3, d4, d5, d6, d7; + + prefetchw(p1+8); + prefetch(p2+8); + prefetch(p3+8); + prefetch(p4+8); + once_more: + d0 = p1[0]; /* Pull the stuff into registers */ + d1 = p1[1]; /* ... in bursts, if possible. */ + d2 = p1[2]; + d3 = p1[3]; + d4 = p1[4]; + d5 = p1[5]; + d6 = p1[6]; + d7 = p1[7]; + d0 ^= p2[0]; + d1 ^= p2[1]; + d2 ^= p2[2]; + d3 ^= p2[3]; + d4 ^= p2[4]; + d5 ^= p2[5]; + d6 ^= p2[6]; + d7 ^= p2[7]; + d0 ^= p3[0]; + d1 ^= p3[1]; + d2 ^= p3[2]; + d3 ^= p3[3]; + d4 ^= p3[4]; + d5 ^= p3[5]; + d6 ^= p3[6]; + d7 ^= p3[7]; + d0 ^= p4[0]; + d1 ^= p4[1]; + d2 ^= p4[2]; + d3 ^= p4[3]; + d4 ^= p4[4]; + d5 ^= p4[5]; + d6 ^= p4[6]; + d7 ^= p4[7]; + p1[0] = d0; /* Store the result (in bursts) */ + p1[1] = d1; + p1[2] = d2; + p1[3] = d3; + p1[4] = d4; + p1[5] = d5; + p1[6] = d6; + p1[7] = d7; + p1 += 8; + p2 += 8; + p3 += 8; + p4 += 8; + } while (--lines > 0); + if (lines == 0) + goto once_more; +} + +static void +xor_32regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4, unsigned long *p5) +{ + long lines = bytes / (sizeof (long)) / 8 - 1; + + prefetchw(p1); + prefetch(p2); + prefetch(p3); + prefetch(p4); + prefetch(p5); + + do { + register long d0, d1, d2, d3, d4, d5, d6, d7; + + prefetchw(p1+8); + prefetch(p2+8); + prefetch(p3+8); + prefetch(p4+8); + prefetch(p5+8); + once_more: + d0 = p1[0]; /* Pull the stuff into registers */ + d1 = p1[1]; /* ... in bursts, if possible. */ + d2 = p1[2]; + d3 = p1[3]; + d4 = p1[4]; + d5 = p1[5]; + d6 = p1[6]; + d7 = p1[7]; + d0 ^= p2[0]; + d1 ^= p2[1]; + d2 ^= p2[2]; + d3 ^= p2[3]; + d4 ^= p2[4]; + d5 ^= p2[5]; + d6 ^= p2[6]; + d7 ^= p2[7]; + d0 ^= p3[0]; + d1 ^= p3[1]; + d2 ^= p3[2]; + d3 ^= p3[3]; + d4 ^= p3[4]; + d5 ^= p3[5]; + d6 ^= p3[6]; + d7 ^= p3[7]; + d0 ^= p4[0]; + d1 ^= p4[1]; + d2 ^= p4[2]; + d3 ^= p4[3]; + d4 ^= p4[4]; + d5 ^= p4[5]; + d6 ^= p4[6]; + d7 ^= p4[7]; + d0 ^= p5[0]; + d1 ^= p5[1]; + d2 ^= p5[2]; + d3 ^= p5[3]; + d4 ^= p5[4]; + d5 ^= p5[5]; + d6 ^= p5[6]; + d7 ^= p5[7]; + p1[0] = d0; /* Store the result (in bursts) */ + p1[1] = d1; + p1[2] = d2; + p1[3] = d3; + p1[4] = d4; + p1[5] = d5; + p1[6] = d6; + p1[7] = d7; + p1 += 8; + p2 += 8; + p3 += 8; + p4 += 8; + p5 += 8; + } while (--lines > 0); + if (lines == 0) + goto once_more; +} + +static struct xor_block_template xor_block_8regs = { + .name = "8regs", + .do_2 = xor_8regs_2, + .do_3 = xor_8regs_3, + .do_4 = xor_8regs_4, + .do_5 = xor_8regs_5, +}; + +static struct xor_block_template xor_block_32regs = { + .name = "32regs", + .do_2 = xor_32regs_2, + .do_3 = xor_32regs_3, + .do_4 = xor_32regs_4, + .do_5 = xor_32regs_5, +}; + +static struct xor_block_template xor_block_8regs_p __maybe_unused = { + .name = "8regs_prefetch", + .do_2 = xor_8regs_p_2, + .do_3 = xor_8regs_p_3, + .do_4 = xor_8regs_p_4, + .do_5 = xor_8regs_p_5, +}; + +static struct xor_block_template xor_block_32regs_p __maybe_unused = { + .name = "32regs_prefetch", + .do_2 = xor_32regs_p_2, + .do_3 = xor_32regs_p_3, + .do_4 = xor_32regs_p_4, + .do_5 = xor_32regs_p_5, +}; + +#define XOR_TRY_TEMPLATES \ + do { \ + xor_speed(&xor_block_8regs); \ + xor_speed(&xor_block_8regs_p); \ + xor_speed(&xor_block_32regs); \ + xor_speed(&xor_block_32regs_p); \ + } while (0) diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h new file mode 100644 index 000000000..349e5957c --- /dev/null +++ b/include/clocksource/arm_arch_timer.h @@ -0,0 +1,119 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef __CLKSOURCE_ARM_ARCH_TIMER_H +#define __CLKSOURCE_ARM_ARCH_TIMER_H + +#include +#include +#include + +#define ARCH_TIMER_TYPE_CP15 BIT(0) +#define ARCH_TIMER_TYPE_MEM BIT(1) + +#define ARCH_TIMER_CTRL_ENABLE (1 << 0) +#define ARCH_TIMER_CTRL_IT_MASK (1 << 1) +#define ARCH_TIMER_CTRL_IT_STAT (1 << 2) + +#define CNTHCTL_EL1PCTEN (1 << 0) +#define CNTHCTL_EL1PCEN (1 << 1) +#define CNTHCTL_EVNTEN (1 << 2) +#define CNTHCTL_EVNTDIR (1 << 3) +#define CNTHCTL_EVNTI (0xF << 4) + +enum arch_timer_reg { + ARCH_TIMER_REG_CTRL, + ARCH_TIMER_REG_TVAL, +}; + +enum arch_timer_ppi_nr { + ARCH_TIMER_PHYS_SECURE_PPI, + ARCH_TIMER_PHYS_NONSECURE_PPI, + ARCH_TIMER_VIRT_PPI, + ARCH_TIMER_HYP_PPI, + ARCH_TIMER_MAX_TIMER_PPI +}; + +enum arch_timer_spi_nr { + ARCH_TIMER_PHYS_SPI, + ARCH_TIMER_VIRT_SPI, + ARCH_TIMER_MAX_TIMER_SPI +}; + +#define ARCH_TIMER_PHYS_ACCESS 0 +#define ARCH_TIMER_VIRT_ACCESS 1 +#define ARCH_TIMER_MEM_PHYS_ACCESS 2 +#define ARCH_TIMER_MEM_VIRT_ACCESS 3 + +#define ARCH_TIMER_MEM_MAX_FRAMES 8 + +#define ARCH_TIMER_USR_PCT_ACCESS_EN (1 << 0) /* physical counter */ +#define ARCH_TIMER_USR_VCT_ACCESS_EN (1 << 1) /* virtual counter */ +#define ARCH_TIMER_VIRT_EVT_EN (1 << 2) +#define ARCH_TIMER_EVT_TRIGGER_SHIFT (4) +#define ARCH_TIMER_EVT_TRIGGER_MASK (0xF << ARCH_TIMER_EVT_TRIGGER_SHIFT) +#define ARCH_TIMER_USR_VT_ACCESS_EN (1 << 8) /* virtual timer registers */ +#define ARCH_TIMER_USR_PT_ACCESS_EN (1 << 9) /* physical timer registers */ + +#define ARCH_TIMER_EVT_STREAM_PERIOD_US 100 +#define ARCH_TIMER_EVT_STREAM_FREQ \ + (USEC_PER_SEC / ARCH_TIMER_EVT_STREAM_PERIOD_US) + +struct arch_timer_kvm_info { + struct timecounter timecounter; + int virtual_irq; +}; + +struct arch_timer_mem_frame { + bool valid; + phys_addr_t cntbase; + size_t size; + int phys_irq; + int virt_irq; +}; + +struct arch_timer_mem { + phys_addr_t cntctlbase; + size_t size; + struct arch_timer_mem_frame frame[ARCH_TIMER_MEM_MAX_FRAMES]; +}; + +#ifdef CONFIG_ARM_ARCH_TIMER + +extern u32 arch_timer_get_rate(void); +extern u64 (*arch_timer_read_counter)(void); +extern struct arch_timer_kvm_info *arch_timer_get_kvm_info(void); +extern bool arch_timer_evtstrm_available(void); + +#else + +static inline u32 arch_timer_get_rate(void) +{ + return 0; +} + +static inline u64 arch_timer_read_counter(void) +{ + return 0; +} + +static inline bool arch_timer_evtstrm_available(void) +{ + return false; +} + +#endif + +#endif diff --git a/include/clocksource/pxa.h b/include/clocksource/pxa.h new file mode 100644 index 000000000..a9a0f0302 --- /dev/null +++ b/include/clocksource/pxa.h @@ -0,0 +1,17 @@ +/* + * PXA clocksource, clockevents, and OST interrupt handlers. + * + * Copyright (C) 2014 Robert Jarzmik + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + */ + +#ifndef _CLOCKSOURCE_PXA_H +#define _CLOCKSOURCE_PXA_H + +extern void pxa_timer_nodt_init(int irq, void __iomem *base); + +#endif diff --git a/include/clocksource/samsung_pwm.h b/include/clocksource/samsung_pwm.h new file mode 100644 index 000000000..0c7d48b8b --- /dev/null +++ b/include/clocksource/samsung_pwm.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef __CLOCKSOURCE_SAMSUNG_PWM_H +#define __CLOCKSOURCE_SAMSUNG_PWM_H + +#include + +#define SAMSUNG_PWM_NUM 5 + +/* + * Following declaration must be in an ifdef due to this symbol being static + * in pwm-samsung driver if the clocksource driver is not compiled in and the + * spinlock is not shared between both drivers. + */ +#ifdef CONFIG_CLKSRC_SAMSUNG_PWM +extern spinlock_t samsung_pwm_lock; +#endif + +struct samsung_pwm_variant { + u8 bits; + u8 div_base; + u8 tclk_mask; + u8 output_mask; + bool has_tint_cstat; +}; + +void samsung_pwm_clocksource_init(void __iomem *base, + unsigned int *irqs, struct samsung_pwm_variant *variant); + +#endif /* __CLOCKSOURCE_SAMSUNG_PWM_H */ diff --git a/include/clocksource/timer-sp804.h b/include/clocksource/timer-sp804.h new file mode 100644 index 000000000..a5b41f31a --- /dev/null +++ b/include/clocksource/timer-sp804.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __CLKSOURCE_TIMER_SP804_H +#define __CLKSOURCE_TIMER_SP804_H + +struct clk; + +int __sp804_clocksource_and_sched_clock_init(void __iomem *, + const char *, struct clk *, int); +int __sp804_clockevents_init(void __iomem *, unsigned int, + struct clk *, const char *); +void sp804_timer_disable(void __iomem *); + +static inline void sp804_clocksource_init(void __iomem *base, const char *name) +{ + __sp804_clocksource_and_sched_clock_init(base, name, NULL, 0); +} + +static inline void sp804_clocksource_and_sched_clock_init(void __iomem *base, + const char *name) +{ + __sp804_clocksource_and_sched_clock_init(base, name, NULL, 1); +} + +static inline void sp804_clockevents_init(void __iomem *base, unsigned int irq, const char *name) +{ + __sp804_clockevents_init(base, irq, NULL, name); + +} +#endif diff --git a/include/clocksource/timer-ti-dm.h b/include/clocksource/timer-ti-dm.h new file mode 100644 index 000000000..7d9598dc5 --- /dev/null +++ b/include/clocksource/timer-ti-dm.h @@ -0,0 +1,394 @@ +/* + * OMAP Dual-Mode Timers + * + * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/ + * Tarun Kanti DebBarma + * Thara Gopinath + * + * Platform device conversion and hwmod support. + * + * Copyright (C) 2005 Nokia Corporation + * Author: Lauri Leukkunen + * PWM and clock framwork support by Timo Teras. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include + +#ifndef __CLOCKSOURCE_DMTIMER_H +#define __CLOCKSOURCE_DMTIMER_H + +/* clock sources */ +#define OMAP_TIMER_SRC_SYS_CLK 0x00 +#define OMAP_TIMER_SRC_32_KHZ 0x01 +#define OMAP_TIMER_SRC_EXT_CLK 0x02 + +/* timer interrupt enable bits */ +#define OMAP_TIMER_INT_CAPTURE (1 << 2) +#define OMAP_TIMER_INT_OVERFLOW (1 << 1) +#define OMAP_TIMER_INT_MATCH (1 << 0) + +/* trigger types */ +#define OMAP_TIMER_TRIGGER_NONE 0x00 +#define OMAP_TIMER_TRIGGER_OVERFLOW 0x01 +#define OMAP_TIMER_TRIGGER_OVERFLOW_AND_COMPARE 0x02 + +/* posted mode types */ +#define OMAP_TIMER_NONPOSTED 0x00 +#define OMAP_TIMER_POSTED 0x01 + +/* timer capabilities used in hwmod database */ +#define OMAP_TIMER_SECURE 0x80000000 +#define OMAP_TIMER_ALWON 0x40000000 +#define OMAP_TIMER_HAS_PWM 0x20000000 +#define OMAP_TIMER_NEEDS_RESET 0x10000000 +#define OMAP_TIMER_HAS_DSP_IRQ 0x08000000 + +/* + * timer errata flags + * + * Errata i103/i767 impacts all OMAP3/4/5 devices including AM33xx. This + * errata prevents us from using posted mode on these devices, unless the + * timer counter register is never read. For more details please refer to + * the OMAP3/4/5 errata documents. + */ +#define OMAP_TIMER_ERRATA_I103_I767 0x80000000 + +struct timer_regs { + u32 tidr; + u32 tier; + u32 twer; + u32 tclr; + u32 tcrr; + u32 tldr; + u32 ttrg; + u32 twps; + u32 tmar; + u32 tcar1; + u32 tsicr; + u32 tcar2; + u32 tpir; + u32 tnir; + u32 tcvr; + u32 tocr; + u32 towr; +}; + +struct omap_dm_timer { + int id; + int irq; + struct clk *fclk; + + void __iomem *io_base; + void __iomem *irq_stat; /* TISR/IRQSTATUS interrupt status */ + void __iomem *irq_ena; /* irq enable */ + void __iomem *irq_dis; /* irq disable, only on v2 ip */ + void __iomem *pend; /* write pending */ + void __iomem *func_base; /* function register base */ + + unsigned long rate; + unsigned reserved:1; + unsigned posted:1; + struct timer_regs context; + int (*get_context_loss_count)(struct device *); + int ctx_loss_count; + int revision; + u32 capability; + u32 errata; + struct platform_device *pdev; + struct list_head node; +}; + +int omap_dm_timer_reserve_systimer(int id); +struct omap_dm_timer *omap_dm_timer_request_by_cap(u32 cap); + +int omap_dm_timer_get_irq(struct omap_dm_timer *timer); + +u32 omap_dm_timer_modify_idlect_mask(u32 inputmask); + +int omap_dm_timer_trigger(struct omap_dm_timer *timer); + +int omap_dm_timers_active(void); + +/* + * Do not use the defines below, they are not needed. They should be only + * used by dmtimer.c and sys_timer related code. + */ + +/* + * The interrupt registers are different between v1 and v2 ip. + * These registers are offsets from timer->iobase. + */ +#define OMAP_TIMER_ID_OFFSET 0x00 +#define OMAP_TIMER_OCP_CFG_OFFSET 0x10 + +#define OMAP_TIMER_V1_SYS_STAT_OFFSET 0x14 +#define OMAP_TIMER_V1_STAT_OFFSET 0x18 +#define OMAP_TIMER_V1_INT_EN_OFFSET 0x1c + +#define OMAP_TIMER_V2_IRQSTATUS_RAW 0x24 +#define OMAP_TIMER_V2_IRQSTATUS 0x28 +#define OMAP_TIMER_V2_IRQENABLE_SET 0x2c +#define OMAP_TIMER_V2_IRQENABLE_CLR 0x30 + +/* + * The functional registers have a different base on v1 and v2 ip. + * These registers are offsets from timer->func_base. The func_base + * is samae as io_base for v1 and io_base + 0x14 for v2 ip. + * + */ +#define OMAP_TIMER_V2_FUNC_OFFSET 0x14 + +#define _OMAP_TIMER_WAKEUP_EN_OFFSET 0x20 +#define _OMAP_TIMER_CTRL_OFFSET 0x24 +#define OMAP_TIMER_CTRL_GPOCFG (1 << 14) +#define OMAP_TIMER_CTRL_CAPTMODE (1 << 13) +#define OMAP_TIMER_CTRL_PT (1 << 12) +#define OMAP_TIMER_CTRL_TCM_LOWTOHIGH (0x1 << 8) +#define OMAP_TIMER_CTRL_TCM_HIGHTOLOW (0x2 << 8) +#define OMAP_TIMER_CTRL_TCM_BOTHEDGES (0x3 << 8) +#define OMAP_TIMER_CTRL_SCPWM (1 << 7) +#define OMAP_TIMER_CTRL_CE (1 << 6) /* compare enable */ +#define OMAP_TIMER_CTRL_PRE (1 << 5) /* prescaler enable */ +#define OMAP_TIMER_CTRL_PTV_SHIFT 2 /* prescaler value shift */ +#define OMAP_TIMER_CTRL_POSTED (1 << 2) +#define OMAP_TIMER_CTRL_AR (1 << 1) /* auto-reload enable */ +#define OMAP_TIMER_CTRL_ST (1 << 0) /* start timer */ +#define _OMAP_TIMER_COUNTER_OFFSET 0x28 +#define _OMAP_TIMER_LOAD_OFFSET 0x2c +#define _OMAP_TIMER_TRIGGER_OFFSET 0x30 +#define _OMAP_TIMER_WRITE_PEND_OFFSET 0x34 +#define WP_NONE 0 /* no write pending bit */ +#define WP_TCLR (1 << 0) +#define WP_TCRR (1 << 1) +#define WP_TLDR (1 << 2) +#define WP_TTGR (1 << 3) +#define WP_TMAR (1 << 4) +#define WP_TPIR (1 << 5) +#define WP_TNIR (1 << 6) +#define WP_TCVR (1 << 7) +#define WP_TOCR (1 << 8) +#define WP_TOWR (1 << 9) +#define _OMAP_TIMER_MATCH_OFFSET 0x38 +#define _OMAP_TIMER_CAPTURE_OFFSET 0x3c +#define _OMAP_TIMER_IF_CTRL_OFFSET 0x40 +#define _OMAP_TIMER_CAPTURE2_OFFSET 0x44 /* TCAR2, 34xx only */ +#define _OMAP_TIMER_TICK_POS_OFFSET 0x48 /* TPIR, 34xx only */ +#define _OMAP_TIMER_TICK_NEG_OFFSET 0x4c /* TNIR, 34xx only */ +#define _OMAP_TIMER_TICK_COUNT_OFFSET 0x50 /* TCVR, 34xx only */ +#define _OMAP_TIMER_TICK_INT_MASK_SET_OFFSET 0x54 /* TOCR, 34xx only */ +#define _OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET 0x58 /* TOWR, 34xx only */ + +/* register offsets with the write pending bit encoded */ +#define WPSHIFT 16 + +#define OMAP_TIMER_WAKEUP_EN_REG (_OMAP_TIMER_WAKEUP_EN_OFFSET \ + | (WP_NONE << WPSHIFT)) + +#define OMAP_TIMER_CTRL_REG (_OMAP_TIMER_CTRL_OFFSET \ + | (WP_TCLR << WPSHIFT)) + +#define OMAP_TIMER_COUNTER_REG (_OMAP_TIMER_COUNTER_OFFSET \ + | (WP_TCRR << WPSHIFT)) + +#define OMAP_TIMER_LOAD_REG (_OMAP_TIMER_LOAD_OFFSET \ + | (WP_TLDR << WPSHIFT)) + +#define OMAP_TIMER_TRIGGER_REG (_OMAP_TIMER_TRIGGER_OFFSET \ + | (WP_TTGR << WPSHIFT)) + +#define OMAP_TIMER_WRITE_PEND_REG (_OMAP_TIMER_WRITE_PEND_OFFSET \ + | (WP_NONE << WPSHIFT)) + +#define OMAP_TIMER_MATCH_REG (_OMAP_TIMER_MATCH_OFFSET \ + | (WP_TMAR << WPSHIFT)) + +#define OMAP_TIMER_CAPTURE_REG (_OMAP_TIMER_CAPTURE_OFFSET \ + | (WP_NONE << WPSHIFT)) + +#define OMAP_TIMER_IF_CTRL_REG (_OMAP_TIMER_IF_CTRL_OFFSET \ + | (WP_NONE << WPSHIFT)) + +#define OMAP_TIMER_CAPTURE2_REG (_OMAP_TIMER_CAPTURE2_OFFSET \ + | (WP_NONE << WPSHIFT)) + +#define OMAP_TIMER_TICK_POS_REG (_OMAP_TIMER_TICK_POS_OFFSET \ + | (WP_TPIR << WPSHIFT)) + +#define OMAP_TIMER_TICK_NEG_REG (_OMAP_TIMER_TICK_NEG_OFFSET \ + | (WP_TNIR << WPSHIFT)) + +#define OMAP_TIMER_TICK_COUNT_REG (_OMAP_TIMER_TICK_COUNT_OFFSET \ + | (WP_TCVR << WPSHIFT)) + +#define OMAP_TIMER_TICK_INT_MASK_SET_REG \ + (_OMAP_TIMER_TICK_INT_MASK_SET_OFFSET | (WP_TOCR << WPSHIFT)) + +#define OMAP_TIMER_TICK_INT_MASK_COUNT_REG \ + (_OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET | (WP_TOWR << WPSHIFT)) + +/* + * The below are inlined to optimize code size for system timers. Other code + * should not need these at all, see + * include/linux/platform_data/pwm_omap_dmtimer.h + */ +#if defined(CONFIG_ARCH_OMAP1) || defined(CONFIG_ARCH_OMAP2PLUS) +static inline u32 __omap_dm_timer_read(struct omap_dm_timer *timer, u32 reg, + int posted) +{ + if (posted) + while (readl_relaxed(timer->pend) & (reg >> WPSHIFT)) + cpu_relax(); + + return readl_relaxed(timer->func_base + (reg & 0xff)); +} + +static inline void __omap_dm_timer_write(struct omap_dm_timer *timer, + u32 reg, u32 val, int posted) +{ + if (posted) + while (readl_relaxed(timer->pend) & (reg >> WPSHIFT)) + cpu_relax(); + + writel_relaxed(val, timer->func_base + (reg & 0xff)); +} + +static inline void __omap_dm_timer_init_regs(struct omap_dm_timer *timer) +{ + u32 tidr; + + /* Assume v1 ip if bits [31:16] are zero */ + tidr = readl_relaxed(timer->io_base); + if (!(tidr >> 16)) { + timer->revision = 1; + timer->irq_stat = timer->io_base + OMAP_TIMER_V1_STAT_OFFSET; + timer->irq_ena = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET; + timer->irq_dis = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET; + timer->pend = timer->io_base + _OMAP_TIMER_WRITE_PEND_OFFSET; + timer->func_base = timer->io_base; + } else { + timer->revision = 2; + timer->irq_stat = timer->io_base + OMAP_TIMER_V2_IRQSTATUS; + timer->irq_ena = timer->io_base + OMAP_TIMER_V2_IRQENABLE_SET; + timer->irq_dis = timer->io_base + OMAP_TIMER_V2_IRQENABLE_CLR; + timer->pend = timer->io_base + + _OMAP_TIMER_WRITE_PEND_OFFSET + + OMAP_TIMER_V2_FUNC_OFFSET; + timer->func_base = timer->io_base + OMAP_TIMER_V2_FUNC_OFFSET; + } +} + +/* + * __omap_dm_timer_enable_posted - enables write posted mode + * @timer: pointer to timer instance handle + * + * Enables the write posted mode for the timer. When posted mode is enabled + * writes to certain timer registers are immediately acknowledged by the + * internal bus and hence prevents stalling the CPU waiting for the write to + * complete. Enabling this feature can improve performance for writing to the + * timer registers. + */ +static inline void __omap_dm_timer_enable_posted(struct omap_dm_timer *timer) +{ + if (timer->posted) + return; + + if (timer->errata & OMAP_TIMER_ERRATA_I103_I767) { + timer->posted = OMAP_TIMER_NONPOSTED; + __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0, 0); + return; + } + + __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, + OMAP_TIMER_CTRL_POSTED, 0); + timer->context.tsicr = OMAP_TIMER_CTRL_POSTED; + timer->posted = OMAP_TIMER_POSTED; +} + +/** + * __omap_dm_timer_override_errata - override errata flags for a timer + * @timer: pointer to timer handle + * @errata: errata flags to be ignored + * + * For a given timer, override a timer errata by clearing the flags + * specified by the errata argument. A specific erratum should only be + * overridden for a timer if the timer is used in such a way the erratum + * has no impact. + */ +static inline void __omap_dm_timer_override_errata(struct omap_dm_timer *timer, + u32 errata) +{ + timer->errata &= ~errata; +} + +static inline void __omap_dm_timer_stop(struct omap_dm_timer *timer, + int posted, unsigned long rate) +{ + u32 l; + + l = __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted); + if (l & OMAP_TIMER_CTRL_ST) { + l &= ~0x1; + __omap_dm_timer_write(timer, OMAP_TIMER_CTRL_REG, l, posted); +#ifdef CONFIG_ARCH_OMAP2PLUS + /* Readback to make sure write has completed */ + __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted); + /* + * Wait for functional clock period x 3.5 to make sure that + * timer is stopped + */ + udelay(3500000 / rate + 1); +#endif + } + + /* Ack possibly pending interrupt */ + writel_relaxed(OMAP_TIMER_INT_OVERFLOW, timer->irq_stat); +} + +static inline void __omap_dm_timer_load_start(struct omap_dm_timer *timer, + u32 ctrl, unsigned int load, + int posted) +{ + __omap_dm_timer_write(timer, OMAP_TIMER_COUNTER_REG, load, posted); + __omap_dm_timer_write(timer, OMAP_TIMER_CTRL_REG, ctrl, posted); +} + +static inline void __omap_dm_timer_int_enable(struct omap_dm_timer *timer, + unsigned int value) +{ + writel_relaxed(value, timer->irq_ena); + __omap_dm_timer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, value, 0); +} + +static inline unsigned int +__omap_dm_timer_read_counter(struct omap_dm_timer *timer, int posted) +{ + return __omap_dm_timer_read(timer, OMAP_TIMER_COUNTER_REG, posted); +} + +static inline void __omap_dm_timer_write_status(struct omap_dm_timer *timer, + unsigned int value) +{ + writel_relaxed(value, timer->irq_stat); +} +#endif /* CONFIG_ARCH_OMAP1 || CONFIG_ARCH_OMAP2PLUS */ +#endif /* __CLOCKSOURCE_DMTIMER_H */ diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h new file mode 100644 index 000000000..1ff783656 --- /dev/null +++ b/include/crypto/acompress.h @@ -0,0 +1,271 @@ +/* + * Asynchronous Compression operations + * + * Copyright (c) 2016, Intel Corporation + * Authors: Weigang Li + * Giovanni Cabiddu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#ifndef _CRYPTO_ACOMP_H +#define _CRYPTO_ACOMP_H +#include + +#define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001 + +/** + * struct acomp_req - asynchronous (de)compression request + * + * @base: Common attributes for asynchronous crypto requests + * @src: Source Data + * @dst: Destination data + * @slen: Size of the input buffer + * @dlen: Size of the output buffer and number of bytes produced + * @flags: Internal flags + * @__ctx: Start of private context data + */ +struct acomp_req { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int slen; + unsigned int dlen; + u32 flags; + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +/** + * struct crypto_acomp - user-instantiated objects which encapsulate + * algorithms and core processing logic + * + * @compress: Function performs a compress operation + * @decompress: Function performs a de-compress operation + * @dst_free: Frees destination buffer if allocated inside the + * algorithm + * @reqsize: Context size for (de)compression requests + * @base: Common crypto API algorithm data structure + */ +struct crypto_acomp { + int (*compress)(struct acomp_req *req); + int (*decompress)(struct acomp_req *req); + void (*dst_free)(struct scatterlist *dst); + unsigned int reqsize; + struct crypto_tfm base; +}; + +/** + * struct acomp_alg - asynchronous compression algorithm + * + * @compress: Function performs a compress operation + * @decompress: Function performs a de-compress operation + * @dst_free: Frees destination buffer if allocated inside the algorithm + * @init: Initialize the cryptographic transformation object. + * This function is used to initialize the cryptographic + * transformation object. This function is called only once at + * the instantiation time, right after the transformation context + * was allocated. In case the cryptographic hardware has some + * special requirements which need to be handled by software, this + * function shall check for the precise requirement of the + * transformation and put any software fallbacks in place. + * @exit: Deinitialize the cryptographic transformation object. This is a + * counterpart to @init, used to remove various changes set in + * @init. + * + * @reqsize: Context size for (de)compression requests + * @base: Common crypto API algorithm data structure + */ +struct acomp_alg { + int (*compress)(struct acomp_req *req); + int (*decompress)(struct acomp_req *req); + void (*dst_free)(struct scatterlist *dst); + int (*init)(struct crypto_acomp *tfm); + void (*exit)(struct crypto_acomp *tfm); + unsigned int reqsize; + struct crypto_alg base; +}; + +/** + * DOC: Asynchronous Compression API + * + * The Asynchronous Compression API is used with the algorithms of type + * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto) + */ + +/** + * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * compression algorithm e.g. "deflate" + * @type: specifies the type of the algorithm + * @mask: specifies the mask for the algorithm + * + * Allocate a handle for a compression algorithm. The returned struct + * crypto_acomp is the handle that is required for any subsequent + * API invocation for the compression operations. + * + * Return: allocated handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type, + u32 mask); + +static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm) +{ + return &tfm->base; +} + +static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg) +{ + return container_of(alg, struct acomp_alg, base); +} + +static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_acomp, base); +} + +static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm) +{ + return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg); +} + +static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm) +{ + return tfm->reqsize; +} + +static inline void acomp_request_set_tfm(struct acomp_req *req, + struct crypto_acomp *tfm) +{ + req->base.tfm = crypto_acomp_tfm(tfm); +} + +static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req) +{ + return __crypto_acomp_tfm(req->base.tfm); +} + +/** + * crypto_free_acomp() -- free ACOMPRESS tfm handle + * + * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() + * + * If @tfm is a NULL or error pointer, this function does nothing. + */ +static inline void crypto_free_acomp(struct crypto_acomp *tfm) +{ + crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm)); +} + +static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_ACOMPRESS; + mask |= CRYPTO_ALG_TYPE_MASK; + + return crypto_has_alg(alg_name, type, mask); +} + +/** + * acomp_request_alloc() -- allocates asynchronous (de)compression request + * + * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() + * + * Return: allocated handle in case of success or NULL in case of an error + */ +struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm); + +/** + * acomp_request_free() -- zeroize and free asynchronous (de)compression + * request as well as the output buffer if allocated + * inside the algorithm + * + * @req: request to free + */ +void acomp_request_free(struct acomp_req *req); + +/** + * acomp_request_set_callback() -- Sets an asynchronous callback + * + * Callback will be called when an asynchronous operation on a given + * request is finished. + * + * @req: request that the callback will be set for + * @flgs: specify for instance if the operation may backlog + * @cmlp: callback which will be called + * @data: private data used by the caller + */ +static inline void acomp_request_set_callback(struct acomp_req *req, + u32 flgs, + crypto_completion_t cmpl, + void *data) +{ + req->base.complete = cmpl; + req->base.data = data; + req->base.flags = flgs; +} + +/** + * acomp_request_set_params() -- Sets request parameters + * + * Sets parameters required by an acomp operation + * + * @req: asynchronous compress request + * @src: pointer to input buffer scatterlist + * @dst: pointer to output buffer scatterlist. If this is NULL, the + * acomp layer will allocate the output memory + * @slen: size of the input buffer + * @dlen: size of the output buffer. If dst is NULL, this can be used by + * the user to specify the maximum amount of memory to allocate + */ +static inline void acomp_request_set_params(struct acomp_req *req, + struct scatterlist *src, + struct scatterlist *dst, + unsigned int slen, + unsigned int dlen) +{ + req->src = src; + req->dst = dst; + req->slen = slen; + req->dlen = dlen; + + if (!req->dst) + req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT; +} + +/** + * crypto_acomp_compress() -- Invoke asynchronous compress operation + * + * Function invokes the asynchronous compress operation + * + * @req: asynchronous compress request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_acomp_compress(struct acomp_req *req) +{ + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + + return tfm->compress(req); +} + +/** + * crypto_acomp_decompress() -- Invoke asynchronous decompress operation + * + * Function invokes the asynchronous decompress operation + * + * @req: asynchronous compress request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_acomp_decompress(struct acomp_req *req) +{ + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + + return tfm->decompress(req); +} + +#endif diff --git a/include/crypto/aead.h b/include/crypto/aead.h new file mode 100644 index 000000000..c69c545ba --- /dev/null +++ b/include/crypto/aead.h @@ -0,0 +1,534 @@ +/* + * AEAD: Authenticated Encryption with Associated Data + * + * Copyright (c) 2007-2015 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_AEAD_H +#define _CRYPTO_AEAD_H + +#include +#include +#include + +/** + * DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API + * + * The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD + * (listed as type "aead" in /proc/crypto) + * + * The most prominent examples for this type of encryption is GCM and CCM. + * However, the kernel supports other types of AEAD ciphers which are defined + * with the following cipher string: + * + * authenc(keyed message digest, block cipher) + * + * For example: authenc(hmac(sha256), cbc(aes)) + * + * The example code provided for the symmetric key cipher operation + * applies here as well. Naturally all *skcipher* symbols must be exchanged + * the *aead* pendants discussed in the following. In addition, for the AEAD + * operation, the aead_request_set_ad function must be used to set the + * pointer to the associated data memory location before performing the + * encryption or decryption operation. In case of an encryption, the associated + * data memory is filled during the encryption operation. For decryption, the + * associated data memory must contain data that is used to verify the integrity + * of the decrypted data. Another deviation from the asynchronous block cipher + * operation is that the caller should explicitly check for -EBADMSG of the + * crypto_aead_decrypt. That error indicates an authentication error, i.e. + * a breach in the integrity of the message. In essence, that -EBADMSG error + * code is the key bonus an AEAD cipher has over "standard" block chaining + * modes. + * + * Memory Structure: + * + * To support the needs of the most prominent user of AEAD ciphers, namely + * IPSEC, the AEAD ciphers have a special memory layout the caller must adhere + * to. + * + * The scatter list pointing to the input data must contain: + * + * * for RFC4106 ciphers, the concatenation of + * associated authentication data || IV || plaintext or ciphertext. Note, the + * same IV (buffer) is also set with the aead_request_set_crypt call. Note, + * the API call of aead_request_set_ad must provide the length of the AAD and + * the IV. The API call of aead_request_set_crypt only points to the size of + * the input plaintext or ciphertext. + * + * * for "normal" AEAD ciphers, the concatenation of + * associated authentication data || plaintext or ciphertext. + * + * It is important to note that if multiple scatter gather list entries form + * the input data mentioned above, the first entry must not point to a NULL + * buffer. If there is any potential where the AAD buffer can be NULL, the + * calling code must contain a precaution to ensure that this does not result + * in the first scatter gather list entry pointing to a NULL buffer. + */ + +struct crypto_aead; + +/** + * struct aead_request - AEAD request + * @base: Common attributes for async crypto requests + * @assoclen: Length in bytes of associated data for authentication + * @cryptlen: Length of data to be encrypted or decrypted + * @iv: Initialisation vector + * @src: Source data + * @dst: Destination data + * @__ctx: Start of private context data + */ +struct aead_request { + struct crypto_async_request base; + + unsigned int assoclen; + unsigned int cryptlen; + + u8 *iv; + + struct scatterlist *src; + struct scatterlist *dst; + + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +/** + * struct aead_alg - AEAD cipher definition + * @maxauthsize: Set the maximum authentication tag size supported by the + * transformation. A transformation may support smaller tag sizes. + * As the authentication tag is a message digest to ensure the + * integrity of the encrypted data, a consumer typically wants the + * largest authentication tag possible as defined by this + * variable. + * @setauthsize: Set authentication size for the AEAD transformation. This + * function is used to specify the consumer requested size of the + * authentication tag to be either generated by the transformation + * during encryption or the size of the authentication tag to be + * supplied during the decryption operation. This function is also + * responsible for checking the authentication tag size for + * validity. + * @setkey: see struct skcipher_alg + * @encrypt: see struct skcipher_alg + * @decrypt: see struct skcipher_alg + * @geniv: see struct skcipher_alg + * @ivsize: see struct skcipher_alg + * @chunksize: see struct skcipher_alg + * @init: Initialize the cryptographic transformation object. This function + * is used to initialize the cryptographic transformation object. + * This function is called only once at the instantiation time, right + * after the transformation context was allocated. In case the + * cryptographic hardware has some special requirements which need to + * be handled by software, this function shall check for the precise + * requirement of the transformation and put any software fallbacks + * in place. + * @exit: Deinitialize the cryptographic transformation object. This is a + * counterpart to @init, used to remove various changes set in + * @init. + * @base: Definition of a generic crypto cipher algorithm. + * + * All fields except @ivsize is mandatory and must be filled. + */ +struct aead_alg { + int (*setkey)(struct crypto_aead *tfm, const u8 *key, + unsigned int keylen); + int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize); + int (*encrypt)(struct aead_request *req); + int (*decrypt)(struct aead_request *req); + int (*init)(struct crypto_aead *tfm); + void (*exit)(struct crypto_aead *tfm); + + const char *geniv; + + unsigned int ivsize; + unsigned int maxauthsize; + unsigned int chunksize; + + struct crypto_alg base; +}; + +struct crypto_aead { + unsigned int authsize; + unsigned int reqsize; + + struct crypto_tfm base; +}; + +static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_aead, base); +} + +/** + * crypto_alloc_aead() - allocate AEAD cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * AEAD cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for an AEAD. The returned struct + * crypto_aead is the cipher handle that is required for any subsequent + * API invocation for that AEAD. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask); + +static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) +{ + return &tfm->base; +} + +/** + * crypto_free_aead() - zeroize and free aead handle + * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. + */ +static inline void crypto_free_aead(struct crypto_aead *tfm) +{ + crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm)); +} + +static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm) +{ + return container_of(crypto_aead_tfm(tfm)->__crt_alg, + struct aead_alg, base); +} + +static inline unsigned int crypto_aead_alg_ivsize(struct aead_alg *alg) +{ + return alg->ivsize; +} + +/** + * crypto_aead_ivsize() - obtain IV size + * @tfm: cipher handle + * + * The size of the IV for the aead referenced by the cipher handle is + * returned. This IV size may be zero if the cipher does not need an IV. + * + * Return: IV size in bytes + */ +static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm) +{ + return crypto_aead_alg_ivsize(crypto_aead_alg(tfm)); +} + +/** + * crypto_aead_authsize() - obtain maximum authentication data size + * @tfm: cipher handle + * + * The maximum size of the authentication data for the AEAD cipher referenced + * by the AEAD cipher handle is returned. The authentication data size may be + * zero if the cipher implements a hard-coded maximum. + * + * The authentication data may also be known as "tag value". + * + * Return: authentication data size / tag size in bytes + */ +static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm) +{ + return tfm->authsize; +} + +/** + * crypto_aead_blocksize() - obtain block size of cipher + * @tfm: cipher handle + * + * The block size for the AEAD referenced with the cipher handle is returned. + * The caller may use that information to allocate appropriate memory for the + * data returned by the encryption or decryption operation + * + * Return: block size of cipher + */ +static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm)); +} + +static inline unsigned int crypto_aead_alignmask(struct crypto_aead *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm)); +} + +static inline u32 crypto_aead_get_flags(struct crypto_aead *tfm) +{ + return crypto_tfm_get_flags(crypto_aead_tfm(tfm)); +} + +static inline void crypto_aead_set_flags(struct crypto_aead *tfm, u32 flags) +{ + crypto_tfm_set_flags(crypto_aead_tfm(tfm), flags); +} + +static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags) +{ + crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags); +} + +/** + * crypto_aead_setkey() - set key for cipher + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the AEAD referenced by the cipher + * handle. + * + * Note, the key length determines the cipher type. Many block ciphers implement + * different cipher modes depending on the key size, such as AES-128 vs AES-192 + * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 + * is performed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +int crypto_aead_setkey(struct crypto_aead *tfm, + const u8 *key, unsigned int keylen); + +/** + * crypto_aead_setauthsize() - set authentication data size + * @tfm: cipher handle + * @authsize: size of the authentication data / tag in bytes + * + * Set the authentication data size / tag size. AEAD requires an authentication + * tag (or MAC) in addition to the associated data. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize); + +static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) +{ + return __crypto_aead_cast(req->base.tfm); +} + +/** + * crypto_aead_encrypt() - encrypt plaintext + * @req: reference to the aead_request handle that holds all information + * needed to perform the cipher operation + * + * Encrypt plaintext data using the aead_request handle. That data structure + * and how it is filled with data is discussed with the aead_request_* + * functions. + * + * IMPORTANT NOTE The encryption operation creates the authentication data / + * tag. That data is concatenated with the created ciphertext. + * The ciphertext memory size is therefore the given number of + * block cipher blocks + the size defined by the + * crypto_aead_setauthsize invocation. The caller must ensure + * that sufficient memory is available for the ciphertext and + * the authentication tag. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ +static inline int crypto_aead_encrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + + if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return crypto_aead_alg(aead)->encrypt(req); +} + +/** + * crypto_aead_decrypt() - decrypt ciphertext + * @req: reference to the ablkcipher_request handle that holds all information + * needed to perform the cipher operation + * + * Decrypt ciphertext data using the aead_request handle. That data structure + * and how it is filled with data is discussed with the aead_request_* + * functions. + * + * IMPORTANT NOTE The caller must concatenate the ciphertext followed by the + * authentication data / tag. That authentication data / tag + * must have the size defined by the crypto_aead_setauthsize + * invocation. + * + * + * Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD + * cipher operation performs the authentication of the data during the + * decryption operation. Therefore, the function returns this error if + * the authentication of the ciphertext was unsuccessful (i.e. the + * integrity of the ciphertext or the associated data was violated); + * < 0 if an error occurred. + */ +static inline int crypto_aead_decrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + + if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + if (req->cryptlen < crypto_aead_authsize(aead)) + return -EINVAL; + + return crypto_aead_alg(aead)->decrypt(req); +} + +/** + * DOC: Asynchronous AEAD Request Handle + * + * The aead_request data structure contains all pointers to data required for + * the AEAD cipher operation. This includes the cipher handle (which can be + * used by multiple aead_request instances), pointer to plaintext and + * ciphertext, asynchronous callback function, etc. It acts as a handle to the + * aead_request_* API calls in a similar way as AEAD handle to the + * crypto_aead_* API calls. + */ + +/** + * crypto_aead_reqsize() - obtain size of the request data structure + * @tfm: cipher handle + * + * Return: number of bytes + */ +static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm) +{ + return tfm->reqsize; +} + +/** + * aead_request_set_tfm() - update cipher handle reference in request + * @req: request handle to be modified + * @tfm: cipher handle that shall be added to the request handle + * + * Allow the caller to replace the existing aead handle in the request + * data structure with a different one. + */ +static inline void aead_request_set_tfm(struct aead_request *req, + struct crypto_aead *tfm) +{ + req->base.tfm = crypto_aead_tfm(tfm); +} + +/** + * aead_request_alloc() - allocate request data structure + * @tfm: cipher handle to be registered with the request + * @gfp: memory allocation flag that is handed to kmalloc by the API call. + * + * Allocate the request data structure that must be used with the AEAD + * encrypt and decrypt API calls. During the allocation, the provided aead + * handle is registered in the request data structure. + * + * Return: allocated request handle in case of success, or NULL if out of memory + */ +static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, + gfp_t gfp) +{ + struct aead_request *req; + + req = kmalloc(sizeof(*req) + crypto_aead_reqsize(tfm), gfp); + + if (likely(req)) + aead_request_set_tfm(req, tfm); + + return req; +} + +/** + * aead_request_free() - zeroize and free request data structure + * @req: request data structure cipher handle to be freed + */ +static inline void aead_request_free(struct aead_request *req) +{ + kzfree(req); +} + +/** + * aead_request_set_callback() - set asynchronous callback function + * @req: request handle + * @flags: specify zero or an ORing of the flags + * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and + * increase the wait queue beyond the initial maximum size; + * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep + * @compl: callback function pointer to be registered with the request handle + * @data: The data pointer refers to memory that is not used by the kernel + * crypto API, but provided to the callback function for it to use. Here, + * the caller can provide a reference to memory the callback function can + * operate on. As the callback function is invoked asynchronously to the + * related functionality, it may need to access data structures of the + * related functionality which can be referenced using this pointer. The + * callback function can access the memory via the "data" field in the + * crypto_async_request data structure provided to the callback function. + * + * Setting the callback function that is triggered once the cipher operation + * completes + * + * The callback function is registered with the aead_request handle and + * must comply with the following template:: + * + * void callback_function(struct crypto_async_request *req, int error) + */ +static inline void aead_request_set_callback(struct aead_request *req, + u32 flags, + crypto_completion_t compl, + void *data) +{ + req->base.complete = compl; + req->base.data = data; + req->base.flags = flags; +} + +/** + * aead_request_set_crypt - set data buffers + * @req: request handle + * @src: source scatter / gather list + * @dst: destination scatter / gather list + * @cryptlen: number of bytes to process from @src + * @iv: IV for the cipher operation which must comply with the IV size defined + * by crypto_aead_ivsize() + * + * Setting the source data and destination data scatter / gather lists which + * hold the associated data concatenated with the plaintext or ciphertext. See + * below for the authentication tag. + * + * For encryption, the source is treated as the plaintext and the + * destination is the ciphertext. For a decryption operation, the use is + * reversed - the source is the ciphertext and the destination is the plaintext. + * + * The memory structure for cipher operation has the following structure: + * + * - AEAD encryption input: assoc data || plaintext + * - AEAD encryption output: assoc data || cipherntext || auth tag + * - AEAD decryption input: assoc data || ciphertext || auth tag + * - AEAD decryption output: assoc data || plaintext + * + * Albeit the kernel requires the presence of the AAD buffer, however, + * the kernel does not fill the AAD buffer in the output case. If the + * caller wants to have that data buffer filled, the caller must either + * use an in-place cipher operation (i.e. same memory location for + * input/output memory location). + */ +static inline void aead_request_set_crypt(struct aead_request *req, + struct scatterlist *src, + struct scatterlist *dst, + unsigned int cryptlen, u8 *iv) +{ + req->src = src; + req->dst = dst; + req->cryptlen = cryptlen; + req->iv = iv; +} + +/** + * aead_request_set_ad - set associated data information + * @req: request handle + * @assoclen: number of bytes in associated data + * + * Setting the AD information. This function sets the length of + * the associated data. + */ +static inline void aead_request_set_ad(struct aead_request *req, + unsigned int assoclen) +{ + req->assoclen = assoclen; +} + +#endif /* _CRYPTO_AEAD_H */ diff --git a/include/crypto/aes.h b/include/crypto/aes.h new file mode 100644 index 000000000..852eaa9cd --- /dev/null +++ b/include/crypto/aes.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for AES algorithms + */ + +#ifndef _CRYPTO_AES_H +#define _CRYPTO_AES_H + +#include +#include + +#define AES_MIN_KEY_SIZE 16 +#define AES_MAX_KEY_SIZE 32 +#define AES_KEYSIZE_128 16 +#define AES_KEYSIZE_192 24 +#define AES_KEYSIZE_256 32 +#define AES_BLOCK_SIZE 16 +#define AES_MAX_KEYLENGTH (15 * 16) +#define AES_MAX_KEYLENGTH_U32 (AES_MAX_KEYLENGTH / sizeof(u32)) + +/* + * Please ensure that the first two fields are 16-byte aligned + * relative to the start of the structure, i.e., don't move them! + */ +struct crypto_aes_ctx { + u32 key_enc[AES_MAX_KEYLENGTH_U32]; + u32 key_dec[AES_MAX_KEYLENGTH_U32]; + u32 key_length; +}; + +extern const u32 crypto_ft_tab[4][256]; +extern const u32 crypto_fl_tab[4][256]; +extern const u32 crypto_it_tab[4][256]; +extern const u32 crypto_il_tab[4][256]; + +int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len); +int crypto_aes_expand_key(struct crypto_aes_ctx *ctx, const u8 *in_key, + unsigned int key_len); +#endif diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h new file mode 100644 index 000000000..9817f2e5b --- /dev/null +++ b/include/crypto/akcipher.h @@ -0,0 +1,389 @@ +/* + * Public Key Encryption + * + * Copyright (c) 2015, Intel Corporation + * Authors: Tadeusz Struk + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#ifndef _CRYPTO_AKCIPHER_H +#define _CRYPTO_AKCIPHER_H +#include + +/** + * struct akcipher_request - public key request + * + * @base: Common attributes for async crypto requests + * @src: Source data + * @dst: Destination data + * @src_len: Size of the input buffer + * @dst_len: Size of the output buffer. It needs to be at least + * as big as the expected result depending on the operation + * After operation it will be updated with the actual size of the + * result. + * In case of error where the dst sgl size was insufficient, + * it will be updated to the size required for the operation. + * @__ctx: Start of private context data + */ +struct akcipher_request { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int src_len; + unsigned int dst_len; + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +/** + * struct crypto_akcipher - user-instantiated objects which encapsulate + * algorithms and core processing logic + * + * @base: Common crypto API algorithm data structure + */ +struct crypto_akcipher { + struct crypto_tfm base; +}; + +/** + * struct akcipher_alg - generic public key algorithm + * + * @sign: Function performs a sign operation as defined by public key + * algorithm. In case of error, where the dst_len was insufficient, + * the req->dst_len will be updated to the size required for the + * operation + * @verify: Function performs a sign operation as defined by public key + * algorithm. In case of error, where the dst_len was insufficient, + * the req->dst_len will be updated to the size required for the + * operation + * @encrypt: Function performs an encrypt operation as defined by public key + * algorithm. In case of error, where the dst_len was insufficient, + * the req->dst_len will be updated to the size required for the + * operation + * @decrypt: Function performs a decrypt operation as defined by public key + * algorithm. In case of error, where the dst_len was insufficient, + * the req->dst_len will be updated to the size required for the + * operation + * @set_pub_key: Function invokes the algorithm specific set public key + * function, which knows how to decode and interpret + * the BER encoded public key + * @set_priv_key: Function invokes the algorithm specific set private key + * function, which knows how to decode and interpret + * the BER encoded private key + * @max_size: Function returns dest buffer size required for a given key. + * @init: Initialize the cryptographic transformation object. + * This function is used to initialize the cryptographic + * transformation object. This function is called only once at + * the instantiation time, right after the transformation context + * was allocated. In case the cryptographic hardware has some + * special requirements which need to be handled by software, this + * function shall check for the precise requirement of the + * transformation and put any software fallbacks in place. + * @exit: Deinitialize the cryptographic transformation object. This is a + * counterpart to @init, used to remove various changes set in + * @init. + * + * @reqsize: Request context size required by algorithm implementation + * @base: Common crypto API algorithm data structure + */ +struct akcipher_alg { + int (*sign)(struct akcipher_request *req); + int (*verify)(struct akcipher_request *req); + int (*encrypt)(struct akcipher_request *req); + int (*decrypt)(struct akcipher_request *req); + int (*set_pub_key)(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen); + int (*set_priv_key)(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen); + unsigned int (*max_size)(struct crypto_akcipher *tfm); + int (*init)(struct crypto_akcipher *tfm); + void (*exit)(struct crypto_akcipher *tfm); + + unsigned int reqsize; + struct crypto_alg base; +}; + +/** + * DOC: Generic Public Key API + * + * The Public Key API is used with the algorithms of type + * CRYPTO_ALG_TYPE_AKCIPHER (listed as type "akcipher" in /proc/crypto) + */ + +/** + * crypto_alloc_akcipher() - allocate AKCIPHER tfm handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * public key algorithm e.g. "rsa" + * @type: specifies the type of the algorithm + * @mask: specifies the mask for the algorithm + * + * Allocate a handle for public key algorithm. The returned struct + * crypto_akcipher is the handle that is required for any subsequent + * API invocation for the public key operations. + * + * Return: allocated handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type, + u32 mask); + +static inline struct crypto_tfm *crypto_akcipher_tfm( + struct crypto_akcipher *tfm) +{ + return &tfm->base; +} + +static inline struct akcipher_alg *__crypto_akcipher_alg(struct crypto_alg *alg) +{ + return container_of(alg, struct akcipher_alg, base); +} + +static inline struct crypto_akcipher *__crypto_akcipher_tfm( + struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_akcipher, base); +} + +static inline struct akcipher_alg *crypto_akcipher_alg( + struct crypto_akcipher *tfm) +{ + return __crypto_akcipher_alg(crypto_akcipher_tfm(tfm)->__crt_alg); +} + +static inline unsigned int crypto_akcipher_reqsize(struct crypto_akcipher *tfm) +{ + return crypto_akcipher_alg(tfm)->reqsize; +} + +static inline void akcipher_request_set_tfm(struct akcipher_request *req, + struct crypto_akcipher *tfm) +{ + req->base.tfm = crypto_akcipher_tfm(tfm); +} + +static inline struct crypto_akcipher *crypto_akcipher_reqtfm( + struct akcipher_request *req) +{ + return __crypto_akcipher_tfm(req->base.tfm); +} + +/** + * crypto_free_akcipher() - free AKCIPHER tfm handle + * + * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() + * + * If @tfm is a NULL or error pointer, this function does nothing. + */ +static inline void crypto_free_akcipher(struct crypto_akcipher *tfm) +{ + crypto_destroy_tfm(tfm, crypto_akcipher_tfm(tfm)); +} + +/** + * akcipher_request_alloc() - allocates public key request + * + * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() + * @gfp: allocation flags + * + * Return: allocated handle in case of success or NULL in case of an error. + */ +static inline struct akcipher_request *akcipher_request_alloc( + struct crypto_akcipher *tfm, gfp_t gfp) +{ + struct akcipher_request *req; + + req = kmalloc(sizeof(*req) + crypto_akcipher_reqsize(tfm), gfp); + if (likely(req)) + akcipher_request_set_tfm(req, tfm); + + return req; +} + +/** + * akcipher_request_free() - zeroize and free public key request + * + * @req: request to free + */ +static inline void akcipher_request_free(struct akcipher_request *req) +{ + kzfree(req); +} + +/** + * akcipher_request_set_callback() - Sets an asynchronous callback. + * + * Callback will be called when an asynchronous operation on a given + * request is finished. + * + * @req: request that the callback will be set for + * @flgs: specify for instance if the operation may backlog + * @cmpl: callback which will be called + * @data: private data used by the caller + */ +static inline void akcipher_request_set_callback(struct akcipher_request *req, + u32 flgs, + crypto_completion_t cmpl, + void *data) +{ + req->base.complete = cmpl; + req->base.data = data; + req->base.flags = flgs; +} + +/** + * akcipher_request_set_crypt() - Sets request parameters + * + * Sets parameters required by crypto operation + * + * @req: public key request + * @src: ptr to input scatter list + * @dst: ptr to output scatter list + * @src_len: size of the src input scatter list to be processed + * @dst_len: size of the dst output scatter list + */ +static inline void akcipher_request_set_crypt(struct akcipher_request *req, + struct scatterlist *src, + struct scatterlist *dst, + unsigned int src_len, + unsigned int dst_len) +{ + req->src = src; + req->dst = dst; + req->src_len = src_len; + req->dst_len = dst_len; +} + +/** + * crypto_akcipher_maxsize() - Get len for output buffer + * + * Function returns the dest buffer size required for a given key. + * Function assumes that the key is already set in the transformation. If this + * function is called without a setkey or with a failed setkey, you will end up + * in a NULL dereference. + * + * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() + */ +static inline unsigned int crypto_akcipher_maxsize(struct crypto_akcipher *tfm) +{ + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + + return alg->max_size(tfm); +} + +/** + * crypto_akcipher_encrypt() - Invoke public key encrypt operation + * + * Function invokes the specific public key encrypt operation for a given + * public key algorithm + * + * @req: asymmetric key request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_akcipher_encrypt(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + + return alg->encrypt(req); +} + +/** + * crypto_akcipher_decrypt() - Invoke public key decrypt operation + * + * Function invokes the specific public key decrypt operation for a given + * public key algorithm + * + * @req: asymmetric key request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_akcipher_decrypt(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + + return alg->decrypt(req); +} + +/** + * crypto_akcipher_sign() - Invoke public key sign operation + * + * Function invokes the specific public key sign operation for a given + * public key algorithm + * + * @req: asymmetric key request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_akcipher_sign(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + + return alg->sign(req); +} + +/** + * crypto_akcipher_verify() - Invoke public key verify operation + * + * Function invokes the specific public key verify operation for a given + * public key algorithm + * + * @req: asymmetric key request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_akcipher_verify(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + + return alg->verify(req); +} + +/** + * crypto_akcipher_set_pub_key() - Invoke set public key operation + * + * Function invokes the algorithm specific set key function, which knows + * how to decode and interpret the encoded key + * + * @tfm: tfm handle + * @key: BER encoded public key + * @keylen: length of the key + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_akcipher_set_pub_key(struct crypto_akcipher *tfm, + const void *key, + unsigned int keylen) +{ + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + + return alg->set_pub_key(tfm, key, keylen); +} + +/** + * crypto_akcipher_set_priv_key() - Invoke set private key operation + * + * Function invokes the algorithm specific set key function, which knows + * how to decode and interpret the encoded key + * + * @tfm: tfm handle + * @key: BER encoded private key + * @keylen: length of the key + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_akcipher_set_priv_key(struct crypto_akcipher *tfm, + const void *key, + unsigned int keylen) +{ + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + + return alg->set_priv_key(tfm, key, keylen); +} +#endif diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h new file mode 100644 index 000000000..bd5e8ccf1 --- /dev/null +++ b/include/crypto/algapi.h @@ -0,0 +1,428 @@ +/* + * Cryptographic API for algorithms (i.e., low-level API). + * + * Copyright (c) 2006 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#ifndef _CRYPTO_ALGAPI_H +#define _CRYPTO_ALGAPI_H + +#include +#include +#include +#include + +/* + * Maximum values for blocksize and alignmask, used to allocate + * static buffers that are big enough for any combination of + * ciphers and architectures. + */ +#define MAX_CIPHER_BLOCKSIZE 16 +#define MAX_CIPHER_ALIGNMASK 15 + +struct crypto_aead; +struct crypto_instance; +struct module; +struct rtattr; +struct seq_file; + +struct crypto_type { + unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask); + unsigned int (*extsize)(struct crypto_alg *alg); + int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask); + int (*init_tfm)(struct crypto_tfm *tfm); + void (*show)(struct seq_file *m, struct crypto_alg *alg); + int (*report)(struct sk_buff *skb, struct crypto_alg *alg); + void (*free)(struct crypto_instance *inst); + + unsigned int type; + unsigned int maskclear; + unsigned int maskset; + unsigned int tfmsize; +}; + +struct crypto_instance { + struct crypto_alg alg; + + struct crypto_template *tmpl; + struct hlist_node list; + + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +struct crypto_template { + struct list_head list; + struct hlist_head instances; + struct module *module; + + struct crypto_instance *(*alloc)(struct rtattr **tb); + void (*free)(struct crypto_instance *inst); + int (*create)(struct crypto_template *tmpl, struct rtattr **tb); + + char name[CRYPTO_MAX_ALG_NAME]; +}; + +struct crypto_spawn { + struct list_head list; + struct crypto_alg *alg; + struct crypto_instance *inst; + const struct crypto_type *frontend; + u32 mask; +}; + +struct crypto_queue { + struct list_head list; + struct list_head *backlog; + + unsigned int qlen; + unsigned int max_qlen; +}; + +struct scatter_walk { + struct scatterlist *sg; + unsigned int offset; +}; + +struct blkcipher_walk { + union { + struct { + struct page *page; + unsigned long offset; + } phys; + + struct { + u8 *page; + u8 *addr; + } virt; + } src, dst; + + struct scatter_walk in; + unsigned int nbytes; + + struct scatter_walk out; + unsigned int total; + + void *page; + u8 *buffer; + u8 *iv; + unsigned int ivsize; + + int flags; + unsigned int walk_blocksize; + unsigned int cipher_blocksize; + unsigned int alignmask; +}; + +struct ablkcipher_walk { + struct { + struct page *page; + unsigned int offset; + } src, dst; + + struct scatter_walk in; + unsigned int nbytes; + struct scatter_walk out; + unsigned int total; + struct list_head buffers; + u8 *iv_buffer; + u8 *iv; + int flags; + unsigned int blocksize; +}; + +extern const struct crypto_type crypto_ablkcipher_type; +extern const struct crypto_type crypto_blkcipher_type; + +void crypto_mod_put(struct crypto_alg *alg); + +int crypto_register_template(struct crypto_template *tmpl); +void crypto_unregister_template(struct crypto_template *tmpl); +struct crypto_template *crypto_lookup_template(const char *name); + +int crypto_register_instance(struct crypto_template *tmpl, + struct crypto_instance *inst); +int crypto_unregister_instance(struct crypto_instance *inst); + +int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, + struct crypto_instance *inst, u32 mask); +int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg, + struct crypto_instance *inst, + const struct crypto_type *frontend); +int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name, + u32 type, u32 mask); + +void crypto_drop_spawn(struct crypto_spawn *spawn); +struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, + u32 mask); +void *crypto_spawn_tfm2(struct crypto_spawn *spawn); + +static inline void crypto_set_spawn(struct crypto_spawn *spawn, + struct crypto_instance *inst) +{ + spawn->inst = inst; +} + +struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); +int crypto_check_attr_type(struct rtattr **tb, u32 type); +const char *crypto_attr_alg_name(struct rtattr *rta); +struct crypto_alg *crypto_attr_alg2(struct rtattr *rta, + const struct crypto_type *frontend, + u32 type, u32 mask); + +static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta, + u32 type, u32 mask) +{ + return crypto_attr_alg2(rta, NULL, type, mask); +} + +int crypto_attr_u32(struct rtattr *rta, u32 *num); +int crypto_inst_setname(struct crypto_instance *inst, const char *name, + struct crypto_alg *alg); +void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg, + unsigned int head); +struct crypto_instance *crypto_alloc_instance(const char *name, + struct crypto_alg *alg); + +void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); +int crypto_enqueue_request(struct crypto_queue *queue, + struct crypto_async_request *request); +struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); +int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm); +static inline unsigned int crypto_queue_len(struct crypto_queue *queue) +{ + return queue->qlen; +} + +void crypto_inc(u8 *a, unsigned int size); +void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size); + +static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size) +{ + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && + __builtin_constant_p(size) && + (size % sizeof(unsigned long)) == 0) { + unsigned long *d = (unsigned long *)dst; + unsigned long *s = (unsigned long *)src; + + while (size > 0) { + *d++ ^= *s++; + size -= sizeof(unsigned long); + } + } else { + __crypto_xor(dst, dst, src, size); + } +} + +static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2, + unsigned int size) +{ + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && + __builtin_constant_p(size) && + (size % sizeof(unsigned long)) == 0) { + unsigned long *d = (unsigned long *)dst; + unsigned long *s1 = (unsigned long *)src1; + unsigned long *s2 = (unsigned long *)src2; + + while (size > 0) { + *d++ = *s1++ ^ *s2++; + size -= sizeof(unsigned long); + } + } else { + __crypto_xor(dst, src1, src2, size); + } +} + +int blkcipher_walk_done(struct blkcipher_desc *desc, + struct blkcipher_walk *walk, int err); +int blkcipher_walk_virt(struct blkcipher_desc *desc, + struct blkcipher_walk *walk); +int blkcipher_walk_phys(struct blkcipher_desc *desc, + struct blkcipher_walk *walk); +int blkcipher_walk_virt_block(struct blkcipher_desc *desc, + struct blkcipher_walk *walk, + unsigned int blocksize); +int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc, + struct blkcipher_walk *walk, + struct crypto_aead *tfm, + unsigned int blocksize); + +int ablkcipher_walk_done(struct ablkcipher_request *req, + struct ablkcipher_walk *walk, int err); +int ablkcipher_walk_phys(struct ablkcipher_request *req, + struct ablkcipher_walk *walk); +void __ablkcipher_walk_complete(struct ablkcipher_walk *walk); + +static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm) +{ + return PTR_ALIGN(crypto_tfm_ctx(tfm), + crypto_tfm_alg_alignmask(tfm) + 1); +} + +static inline struct crypto_instance *crypto_tfm_alg_instance( + struct crypto_tfm *tfm) +{ + return container_of(tfm->__crt_alg, struct crypto_instance, alg); +} + +static inline void *crypto_instance_ctx(struct crypto_instance *inst) +{ + return inst->__ctx; +} + +static inline struct ablkcipher_alg *crypto_ablkcipher_alg( + struct crypto_ablkcipher *tfm) +{ + return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_ablkcipher; +} + +static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + +static inline void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_ctx_aligned(&tfm->base); +} + +static inline struct crypto_blkcipher *crypto_spawn_blkcipher( + struct crypto_spawn *spawn) +{ + u32 type = CRYPTO_ALG_TYPE_BLKCIPHER; + u32 mask = CRYPTO_ALG_TYPE_MASK; + + return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask)); +} + +static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + +static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm) +{ + return crypto_tfm_ctx_aligned(&tfm->base); +} + +static inline struct crypto_cipher *crypto_spawn_cipher( + struct crypto_spawn *spawn) +{ + u32 type = CRYPTO_ALG_TYPE_CIPHER; + u32 mask = CRYPTO_ALG_TYPE_MASK; + + return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask)); +} + +static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm) +{ + return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher; +} + +static inline void blkcipher_walk_init(struct blkcipher_walk *walk, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + walk->in.sg = src; + walk->out.sg = dst; + walk->total = nbytes; +} + +static inline void ablkcipher_walk_init(struct ablkcipher_walk *walk, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + walk->in.sg = src; + walk->out.sg = dst; + walk->total = nbytes; + INIT_LIST_HEAD(&walk->buffers); +} + +static inline void ablkcipher_walk_complete(struct ablkcipher_walk *walk) +{ + if (unlikely(!list_empty(&walk->buffers))) + __ablkcipher_walk_complete(walk); +} + +static inline struct crypto_async_request *crypto_get_backlog( + struct crypto_queue *queue) +{ + return queue->backlog == &queue->list ? NULL : + container_of(queue->backlog, struct crypto_async_request, list); +} + +static inline int ablkcipher_enqueue_request(struct crypto_queue *queue, + struct ablkcipher_request *request) +{ + return crypto_enqueue_request(queue, &request->base); +} + +static inline struct ablkcipher_request *ablkcipher_dequeue_request( + struct crypto_queue *queue) +{ + return ablkcipher_request_cast(crypto_dequeue_request(queue)); +} + +static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req) +{ + return req->__ctx; +} + +static inline int ablkcipher_tfm_in_queue(struct crypto_queue *queue, + struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm)); +} + +static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, + u32 type, u32 mask) +{ + return crypto_attr_alg(tb[1], type, mask); +} + +static inline int crypto_requires_off(u32 type, u32 mask, u32 off) +{ + return (type ^ off) & mask & off; +} + +/* + * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms. + * Otherwise returns zero. + */ +static inline int crypto_requires_sync(u32 type, u32 mask) +{ + return crypto_requires_off(type, mask, CRYPTO_ALG_ASYNC); +} + +noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size); + +/** + * crypto_memneq - Compare two areas of memory without leaking + * timing information. + * + * @a: One area of memory + * @b: Another area of memory + * @size: The size of the area. + * + * Returns 0 when data is equal, 1 otherwise. + */ +static inline int crypto_memneq(const void *a, const void *b, size_t size) +{ + return __crypto_memneq(a, b, size) != 0UL ? 1 : 0; +} + +static inline void crypto_yield(u32 flags) +{ +#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY) + if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) + cond_resched(); +#endif +} + +#endif /* _CRYPTO_ALGAPI_H */ diff --git a/include/crypto/authenc.h b/include/crypto/authenc.h new file mode 100644 index 000000000..677505953 --- /dev/null +++ b/include/crypto/authenc.h @@ -0,0 +1,37 @@ +/* + * Authenc: Simple AEAD wrapper for IPsec + * + * Copyright (c) 2007 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#ifndef _CRYPTO_AUTHENC_H +#define _CRYPTO_AUTHENC_H + +#include + +enum { + CRYPTO_AUTHENC_KEYA_UNSPEC, + CRYPTO_AUTHENC_KEYA_PARAM, +}; + +struct crypto_authenc_key_param { + __be32 enckeylen; +}; + +struct crypto_authenc_keys { + const u8 *authkey; + const u8 *enckey; + + unsigned int authkeylen; + unsigned int enckeylen; +}; + +int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, + unsigned int keylen); + +#endif /* _CRYPTO_AUTHENC_H */ diff --git a/include/crypto/b128ops.h b/include/crypto/b128ops.h new file mode 100644 index 000000000..0b8e6bc55 --- /dev/null +++ b/include/crypto/b128ops.h @@ -0,0 +1,80 @@ +/* b128ops.h - common 128-bit block operations + * + * Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. + * Copyright (c) 2006, Rik Snel + * + * Based on Dr Brian Gladman's (GPL'd) work published at + * http://fp.gladman.plus.com/cryptography_technology/index.htm + * See the original copyright notice below. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ +/* + --------------------------------------------------------------------------- + Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. All rights reserved. + + LICENSE TERMS + + The free distribution and use of this software in both source and binary + form is allowed (with or without changes) provided that: + + 1. distributions of this source code include the above copyright + notice, this list of conditions and the following disclaimer; + + 2. distributions in binary form include the above copyright + notice, this list of conditions and the following disclaimer + in the documentation and/or other associated materials; + + 3. the copyright holder's name is not used to endorse products + built using this software without specific written permission. + + ALTERNATIVELY, provided that this notice is retained in full, this product + may be distributed under the terms of the GNU General Public License (GPL), + in which case the provisions of the GPL apply INSTEAD OF those given above. + + DISCLAIMER + + This software is provided 'as is' with no explicit or implied warranties + in respect of its properties, including, but not limited to, correctness + and/or fitness for purpose. + --------------------------------------------------------------------------- + Issue Date: 13/06/2006 +*/ + +#ifndef _CRYPTO_B128OPS_H +#define _CRYPTO_B128OPS_H + +#include + +typedef struct { + u64 a, b; +} u128; + +typedef struct { + __be64 a, b; +} be128; + +typedef struct { + __le64 b, a; +} le128; + +static inline void u128_xor(u128 *r, const u128 *p, const u128 *q) +{ + r->a = p->a ^ q->a; + r->b = p->b ^ q->b; +} + +static inline void be128_xor(be128 *r, const be128 *p, const be128 *q) +{ + u128_xor((u128 *)r, (u128 *)p, (u128 *)q); +} + +static inline void le128_xor(le128 *r, const le128 *p, const le128 *q) +{ + u128_xor((u128 *)r, (u128 *)p, (u128 *)q); +} + +#endif /* _CRYPTO_B128OPS_H */ diff --git a/include/crypto/blake2s.h b/include/crypto/blake2s.h new file mode 100644 index 000000000..d439496fa --- /dev/null +++ b/include/crypto/blake2s.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#ifndef _CRYPTO_BLAKE2S_H +#define _CRYPTO_BLAKE2S_H + +#include +#include +#include +#include + +enum blake2s_lengths { + BLAKE2S_BLOCK_SIZE = 64, + BLAKE2S_HASH_SIZE = 32, + BLAKE2S_KEY_SIZE = 32, + + BLAKE2S_128_HASH_SIZE = 16, + BLAKE2S_160_HASH_SIZE = 20, + BLAKE2S_224_HASH_SIZE = 28, + BLAKE2S_256_HASH_SIZE = 32, +}; + +struct blake2s_state { + u32 h[8]; + u32 t[2]; + u32 f[2]; + u8 buf[BLAKE2S_BLOCK_SIZE]; + unsigned int buflen; + unsigned int outlen; +}; + +enum blake2s_iv { + BLAKE2S_IV0 = 0x6A09E667UL, + BLAKE2S_IV1 = 0xBB67AE85UL, + BLAKE2S_IV2 = 0x3C6EF372UL, + BLAKE2S_IV3 = 0xA54FF53AUL, + BLAKE2S_IV4 = 0x510E527FUL, + BLAKE2S_IV5 = 0x9B05688CUL, + BLAKE2S_IV6 = 0x1F83D9ABUL, + BLAKE2S_IV7 = 0x5BE0CD19UL, +}; + +void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen); +void blake2s_final(struct blake2s_state *state, u8 *out); + +static inline void blake2s_init_param(struct blake2s_state *state, + const u32 param) +{ + *state = (struct blake2s_state){{ + BLAKE2S_IV0 ^ param, + BLAKE2S_IV1, + BLAKE2S_IV2, + BLAKE2S_IV3, + BLAKE2S_IV4, + BLAKE2S_IV5, + BLAKE2S_IV6, + BLAKE2S_IV7, + }}; +} + +static inline void blake2s_init(struct blake2s_state *state, + const size_t outlen) +{ + blake2s_init_param(state, 0x01010000 | outlen); + state->outlen = outlen; +} + +static inline void blake2s_init_key(struct blake2s_state *state, + const size_t outlen, const void *key, + const size_t keylen) +{ + WARN_ON(IS_ENABLED(DEBUG) && (!outlen || outlen > BLAKE2S_HASH_SIZE || + !key || !keylen || keylen > BLAKE2S_KEY_SIZE)); + + blake2s_init_param(state, 0x01010000 | keylen << 8 | outlen); + memcpy(state->buf, key, keylen); + state->buflen = BLAKE2S_BLOCK_SIZE; + state->outlen = outlen; +} + +static inline void blake2s(u8 *out, const u8 *in, const u8 *key, + const size_t outlen, const size_t inlen, + const size_t keylen) +{ + struct blake2s_state state; + + WARN_ON(IS_ENABLED(DEBUG) && ((!in && inlen > 0) || !out || !outlen || + outlen > BLAKE2S_HASH_SIZE || keylen > BLAKE2S_KEY_SIZE || + (!key && keylen))); + + if (keylen) + blake2s_init_key(&state, outlen, key, keylen); + else + blake2s_init(&state, outlen); + + blake2s_update(&state, in, inlen); + blake2s_final(&state, out); +} + +#endif /* _CRYPTO_BLAKE2S_H */ diff --git a/include/crypto/blowfish.h b/include/crypto/blowfish.h new file mode 100644 index 000000000..9b384670b --- /dev/null +++ b/include/crypto/blowfish.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for blowfish algorithms + */ + +#ifndef _CRYPTO_BLOWFISH_H +#define _CRYPTO_BLOWFISH_H + +#include +#include + +#define BF_BLOCK_SIZE 8 +#define BF_MIN_KEY_SIZE 4 +#define BF_MAX_KEY_SIZE 56 + +struct bf_ctx { + u32 p[18]; + u32 s[1024]; +}; + +int blowfish_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int key_len); + +#endif diff --git a/include/crypto/cast5.h b/include/crypto/cast5.h new file mode 100644 index 000000000..3d4ed4ea9 --- /dev/null +++ b/include/crypto/cast5.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CRYPTO_CAST5_H +#define _CRYPTO_CAST5_H + +#include +#include +#include + +#define CAST5_BLOCK_SIZE 8 +#define CAST5_MIN_KEY_SIZE 5 +#define CAST5_MAX_KEY_SIZE 16 + +struct cast5_ctx { + u32 Km[16]; + u8 Kr[16]; + int rr; /* rr ? rounds = 12 : rounds = 16; (rfc 2144) */ +}; + +int cast5_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); + +void __cast5_encrypt(struct cast5_ctx *ctx, u8 *dst, const u8 *src); +void __cast5_decrypt(struct cast5_ctx *ctx, u8 *dst, const u8 *src); + +#endif diff --git a/include/crypto/cast6.h b/include/crypto/cast6.h new file mode 100644 index 000000000..c71f6ef47 --- /dev/null +++ b/include/crypto/cast6.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CRYPTO_CAST6_H +#define _CRYPTO_CAST6_H + +#include +#include +#include + +#define CAST6_BLOCK_SIZE 16 +#define CAST6_MIN_KEY_SIZE 16 +#define CAST6_MAX_KEY_SIZE 32 + +struct cast6_ctx { + u32 Km[12][4]; + u8 Kr[12][4]; +}; + +int __cast6_setkey(struct cast6_ctx *ctx, const u8 *key, + unsigned int keylen, u32 *flags); +int cast6_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); + +void __cast6_encrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src); +void __cast6_decrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src); + +#endif diff --git a/include/crypto/cast_common.h b/include/crypto/cast_common.h new file mode 100644 index 000000000..b90090244 --- /dev/null +++ b/include/crypto/cast_common.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CRYPTO_CAST_COMMON_H +#define _CRYPTO_CAST_COMMON_H + +extern const u32 cast_s1[256]; +extern const u32 cast_s2[256]; +extern const u32 cast_s3[256]; +extern const u32 cast_s4[256]; + +#endif diff --git a/include/crypto/cbc.h b/include/crypto/cbc.h new file mode 100644 index 000000000..f5b8bfc22 --- /dev/null +++ b/include/crypto/cbc.h @@ -0,0 +1,146 @@ +/* + * CBC: Cipher Block Chaining mode + * + * Copyright (c) 2016 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_CBC_H +#define _CRYPTO_CBC_H + +#include +#include +#include + +static inline int crypto_cbc_encrypt_segment( + struct skcipher_walk *walk, struct crypto_skcipher *tfm, + void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) +{ + unsigned int bsize = crypto_skcipher_blocksize(tfm); + unsigned int nbytes = walk->nbytes; + u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; + u8 *iv = walk->iv; + + do { + crypto_xor(iv, src, bsize); + fn(tfm, iv, dst); + memcpy(iv, dst, bsize); + + src += bsize; + dst += bsize; + } while ((nbytes -= bsize) >= bsize); + + return nbytes; +} + +static inline int crypto_cbc_encrypt_inplace( + struct skcipher_walk *walk, struct crypto_skcipher *tfm, + void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) +{ + unsigned int bsize = crypto_skcipher_blocksize(tfm); + unsigned int nbytes = walk->nbytes; + u8 *src = walk->src.virt.addr; + u8 *iv = walk->iv; + + do { + crypto_xor(src, iv, bsize); + fn(tfm, src, src); + iv = src; + + src += bsize; + } while ((nbytes -= bsize) >= bsize); + + memcpy(walk->iv, iv, bsize); + + return nbytes; +} + +static inline int crypto_cbc_encrypt_walk(struct skcipher_request *req, + void (*fn)(struct crypto_skcipher *, + const u8 *, u8 *)) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct skcipher_walk walk; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while (walk.nbytes) { + if (walk.src.virt.addr == walk.dst.virt.addr) + err = crypto_cbc_encrypt_inplace(&walk, tfm, fn); + else + err = crypto_cbc_encrypt_segment(&walk, tfm, fn); + err = skcipher_walk_done(&walk, err); + } + + return err; +} + +static inline int crypto_cbc_decrypt_segment( + struct skcipher_walk *walk, struct crypto_skcipher *tfm, + void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) +{ + unsigned int bsize = crypto_skcipher_blocksize(tfm); + unsigned int nbytes = walk->nbytes; + u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; + u8 *iv = walk->iv; + + do { + fn(tfm, src, dst); + crypto_xor(dst, iv, bsize); + iv = src; + + src += bsize; + dst += bsize; + } while ((nbytes -= bsize) >= bsize); + + memcpy(walk->iv, iv, bsize); + + return nbytes; +} + +static inline int crypto_cbc_decrypt_inplace( + struct skcipher_walk *walk, struct crypto_skcipher *tfm, + void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) +{ + unsigned int bsize = crypto_skcipher_blocksize(tfm); + unsigned int nbytes = walk->nbytes; + u8 *src = walk->src.virt.addr; + u8 last_iv[bsize]; + + /* Start of the last block. */ + src += nbytes - (nbytes & (bsize - 1)) - bsize; + memcpy(last_iv, src, bsize); + + for (;;) { + fn(tfm, src, src); + if ((nbytes -= bsize) < bsize) + break; + crypto_xor(src, src - bsize, bsize); + src -= bsize; + } + + crypto_xor(src, walk->iv, bsize); + memcpy(walk->iv, last_iv, bsize); + + return nbytes; +} + +static inline int crypto_cbc_decrypt_blocks( + struct skcipher_walk *walk, struct crypto_skcipher *tfm, + void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) +{ + if (walk->src.virt.addr == walk->dst.virt.addr) + return crypto_cbc_decrypt_inplace(walk, tfm, fn); + else + return crypto_cbc_decrypt_segment(walk, tfm, fn); +} + +#endif /* _CRYPTO_CBC_H */ diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h new file mode 100644 index 000000000..3dd5ab189 --- /dev/null +++ b/include/crypto/chacha20.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for the ChaCha20 algorithm + */ + +#ifndef _CRYPTO_CHACHA20_H +#define _CRYPTO_CHACHA20_H + +#include +#include +#include + +#define CHACHA20_IV_SIZE 16 +#define CHACHA20_KEY_SIZE 32 +#define CHACHA20_BLOCK_SIZE 64 + +struct chacha20_ctx { + u32 key[8]; +}; + +void chacha20_block(u32 *state, u8 *stream); +void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv); +int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keysize); +int crypto_chacha20_crypt(struct skcipher_request *req); + +enum chacha_constants { /* expand 32-byte k */ + CHACHA_CONSTANT_EXPA = 0x61707865U, + CHACHA_CONSTANT_ND_3 = 0x3320646eU, + CHACHA_CONSTANT_2_BY = 0x79622d32U, + CHACHA_CONSTANT_TE_K = 0x6b206574U +}; + +static inline void chacha_init_consts(u32 *state) +{ + state[0] = CHACHA_CONSTANT_EXPA; + state[1] = CHACHA_CONSTANT_ND_3; + state[2] = CHACHA_CONSTANT_2_BY; + state[3] = CHACHA_CONSTANT_TE_K; +} + +#endif diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h new file mode 100644 index 000000000..1e64f354c --- /dev/null +++ b/include/crypto/cryptd.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Software async crypto daemon + * + * Added AEAD support to cryptd. + * Authors: Tadeusz Struk (tadeusz.struk@intel.com) + * Adrian Hoban + * Gabriele Paoloni + * Aidan O'Mahony (aidan.o.mahony@intel.com) + * Copyright (c) 2010, Intel Corporation. + */ + +#ifndef _CRYPTO_CRYPT_H +#define _CRYPTO_CRYPT_H + +#include +#include +#include +#include + +struct cryptd_ablkcipher { + struct crypto_ablkcipher base; +}; + +static inline struct cryptd_ablkcipher *__cryptd_ablkcipher_cast( + struct crypto_ablkcipher *tfm) +{ + return (struct cryptd_ablkcipher *)tfm; +} + +/* alg_name should be algorithm to be cryptd-ed */ +struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, + u32 type, u32 mask); +struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm); +bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm); +void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm); + +struct cryptd_skcipher { + struct crypto_skcipher base; +}; + +struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, + u32 type, u32 mask); +struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm); +/* Must be called without moving CPUs. */ +bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm); +void cryptd_free_skcipher(struct cryptd_skcipher *tfm); + +struct cryptd_ahash { + struct crypto_ahash base; +}; + +static inline struct cryptd_ahash *__cryptd_ahash_cast( + struct crypto_ahash *tfm) +{ + return (struct cryptd_ahash *)tfm; +} + +/* alg_name should be algorithm to be cryptd-ed */ +struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, + u32 type, u32 mask); +struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm); +struct shash_desc *cryptd_shash_desc(struct ahash_request *req); +/* Must be called without moving CPUs. */ +bool cryptd_ahash_queued(struct cryptd_ahash *tfm); +void cryptd_free_ahash(struct cryptd_ahash *tfm); + +struct cryptd_aead { + struct crypto_aead base; +}; + +static inline struct cryptd_aead *__cryptd_aead_cast( + struct crypto_aead *tfm) +{ + return (struct cryptd_aead *)tfm; +} + +struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, + u32 type, u32 mask); + +struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm); +/* Must be called without moving CPUs. */ +bool cryptd_aead_queued(struct cryptd_aead *tfm); + +void cryptd_free_aead(struct cryptd_aead *tfm); + +#endif diff --git a/include/crypto/crypto_wq.h b/include/crypto/crypto_wq.h new file mode 100644 index 000000000..23114746a --- /dev/null +++ b/include/crypto/crypto_wq.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef CRYPTO_WQ_H +#define CRYPTO_WQ_H + +#include + +extern struct workqueue_struct *kcrypto_wq; +#endif diff --git a/include/crypto/ctr.h b/include/crypto/ctr.h new file mode 100644 index 000000000..4180fc080 --- /dev/null +++ b/include/crypto/ctr.h @@ -0,0 +1,20 @@ +/* + * CTR: Counter mode + * + * Copyright (c) 2007 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_CTR_H +#define _CRYPTO_CTR_H + +#define CTR_RFC3686_NONCE_SIZE 4 +#define CTR_RFC3686_IV_SIZE 8 +#define CTR_RFC3686_BLOCK_SIZE 16 + +#endif /* _CRYPTO_CTR_H */ diff --git a/include/crypto/des.h b/include/crypto/des.h new file mode 100644 index 000000000..d4094d58a --- /dev/null +++ b/include/crypto/des.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * DES & Triple DES EDE Cipher Algorithms. + */ + +#ifndef __CRYPTO_DES_H +#define __CRYPTO_DES_H + +#define DES_KEY_SIZE 8 +#define DES_EXPKEY_WORDS 32 +#define DES_BLOCK_SIZE 8 + +#define DES3_EDE_KEY_SIZE (3 * DES_KEY_SIZE) +#define DES3_EDE_EXPKEY_WORDS (3 * DES_EXPKEY_WORDS) +#define DES3_EDE_BLOCK_SIZE DES_BLOCK_SIZE + + +extern unsigned long des_ekey(u32 *pe, const u8 *k); + +extern int __des3_ede_setkey(u32 *expkey, u32 *flags, const u8 *key, + unsigned int keylen); + +#endif /* __CRYPTO_DES_H */ diff --git a/include/crypto/dh.h b/include/crypto/dh.h new file mode 100644 index 000000000..7e0dad94c --- /dev/null +++ b/include/crypto/dh.h @@ -0,0 +1,91 @@ +/* + * Diffie-Hellman secret to be used with kpp API along with helper functions + * + * Copyright (c) 2016, Intel Corporation + * Authors: Salvatore Benedetto + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#ifndef _CRYPTO_DH_ +#define _CRYPTO_DH_ + +/** + * DOC: DH Helper Functions + * + * To use DH with the KPP cipher API, the following data structure and + * functions should be used. + * + * To use DH with KPP, the following functions should be used to operate on + * a DH private key. The packet private key that can be set with + * the KPP API function call of crypto_kpp_set_secret. + */ + +/** + * struct dh - define a DH private key + * + * @key: Private DH key + * @p: Diffie-Hellman parameter P + * @q: Diffie-Hellman parameter Q + * @g: Diffie-Hellman generator G + * @key_size: Size of the private DH key + * @p_size: Size of DH parameter P + * @q_size: Size of DH parameter Q + * @g_size: Size of DH generator G + */ +struct dh { + void *key; + void *p; + void *q; + void *g; + unsigned int key_size; + unsigned int p_size; + unsigned int q_size; + unsigned int g_size; +}; + +/** + * crypto_dh_key_len() - Obtain the size of the private DH key + * @params: private DH key + * + * This function returns the packet DH key size. A caller can use that + * with the provided DH private key reference to obtain the required + * memory size to hold a packet key. + * + * Return: size of the key in bytes + */ +unsigned int crypto_dh_key_len(const struct dh *params); + +/** + * crypto_dh_encode_key() - encode the private key + * @buf: Buffer allocated by the caller to hold the packet DH + * private key. The buffer should be at least crypto_dh_key_len + * bytes in size. + * @len: Length of the packet private key buffer + * @params: Buffer with the caller-specified private key + * + * The DH implementations operate on a packet representation of the private + * key. + * + * Return: -EINVAL if buffer has insufficient size, 0 on success + */ +int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params); + +/** + * crypto_dh_decode_key() - decode a private key + * @buf: Buffer holding a packet key that should be decoded + * @len: Length of the packet private key buffer + * @params: Buffer allocated by the caller that is filled with the + * unpacked DH private key. + * + * The unpacking obtains the private key by pointing @p to the correct location + * in @buf. Thus, both pointers refer to the same memory. + * + * Return: -EINVAL if buffer has insufficient size, 0 on success + */ +int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params); + +#endif diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h new file mode 100644 index 000000000..a6c3b8e7d --- /dev/null +++ b/include/crypto/drbg.h @@ -0,0 +1,285 @@ +/* + * DRBG based on NIST SP800-90A + * + * Copyright Stephan Mueller , 2014 + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, and the entire permission notice in its entirety, + * including the disclaimer of warranties. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * ALTERNATIVELY, this product may be distributed under the terms of + * the GNU General Public License, in which case the provisions of the GPL are + * required INSTEAD OF the above restrictions. (This clause is + * necessary due to a potential bad interaction between the GPL and + * the restrictions contained in a BSD-style copyright.) + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF + * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + */ + +#ifndef _DRBG_H +#define _DRBG_H + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Concatenation Helper and string operation helper + * + * SP800-90A requires the concatenation of different data. To avoid copying + * buffers around or allocate additional memory, the following data structure + * is used to point to the original memory with its size. In addition, it + * is used to build a linked list. The linked list defines the concatenation + * of individual buffers. The order of memory block referenced in that + * linked list determines the order of concatenation. + */ +struct drbg_string { + const unsigned char *buf; + size_t len; + struct list_head list; +}; + +static inline void drbg_string_fill(struct drbg_string *string, + const unsigned char *buf, size_t len) +{ + string->buf = buf; + string->len = len; + INIT_LIST_HEAD(&string->list); +} + +struct drbg_state; +typedef uint32_t drbg_flag_t; + +struct drbg_core { + drbg_flag_t flags; /* flags for the cipher */ + __u8 statelen; /* maximum state length */ + __u8 blocklen_bytes; /* block size of output in bytes */ + char cra_name[CRYPTO_MAX_ALG_NAME]; /* mapping to kernel crypto API */ + /* kernel crypto API backend cipher name */ + char backend_cra_name[CRYPTO_MAX_ALG_NAME]; +}; + +struct drbg_state_ops { + int (*update)(struct drbg_state *drbg, struct list_head *seed, + int reseed); + int (*generate)(struct drbg_state *drbg, + unsigned char *buf, unsigned int buflen, + struct list_head *addtl); + int (*crypto_init)(struct drbg_state *drbg); + int (*crypto_fini)(struct drbg_state *drbg); + +}; + +struct drbg_test_data { + struct drbg_string *testentropy; /* TEST PARAMETER: test entropy */ +}; + +enum drbg_seed_state { + DRBG_SEED_STATE_UNSEEDED, + DRBG_SEED_STATE_PARTIAL, /* Seeded with !rng_is_initialized() */ + DRBG_SEED_STATE_FULL, +}; + +struct drbg_state { + struct mutex drbg_mutex; /* lock around DRBG */ + unsigned char *V; /* internal state 10.1.1.1 1a) */ + unsigned char *Vbuf; + /* hash: static value 10.1.1.1 1b) hmac / ctr: key */ + unsigned char *C; + unsigned char *Cbuf; + /* Number of RNG requests since last reseed -- 10.1.1.1 1c) */ + size_t reseed_ctr; + size_t reseed_threshold; + /* some memory the DRBG can use for its operation */ + unsigned char *scratchpad; + unsigned char *scratchpadbuf; + void *priv_data; /* Cipher handle */ + + struct crypto_skcipher *ctr_handle; /* CTR mode cipher handle */ + struct skcipher_request *ctr_req; /* CTR mode request handle */ + __u8 *outscratchpadbuf; /* CTR mode output scratchpad */ + __u8 *outscratchpad; /* CTR mode aligned outbuf */ + struct crypto_wait ctr_wait; /* CTR mode async wait obj */ + struct scatterlist sg_in, sg_out; /* CTR mode SGLs */ + + enum drbg_seed_state seeded; /* DRBG fully seeded? */ + bool pr; /* Prediction resistance enabled? */ + bool fips_primed; /* Continuous test primed? */ + unsigned char *prev; /* FIPS 140-2 continuous test value */ + struct crypto_rng *jent; + const struct drbg_state_ops *d_ops; + const struct drbg_core *core; + struct drbg_string test_data; +}; + +static inline __u8 drbg_statelen(struct drbg_state *drbg) +{ + if (drbg && drbg->core) + return drbg->core->statelen; + return 0; +} + +static inline __u8 drbg_blocklen(struct drbg_state *drbg) +{ + if (drbg && drbg->core) + return drbg->core->blocklen_bytes; + return 0; +} + +static inline __u8 drbg_keylen(struct drbg_state *drbg) +{ + if (drbg && drbg->core) + return (drbg->core->statelen - drbg->core->blocklen_bytes); + return 0; +} + +static inline size_t drbg_max_request_bytes(struct drbg_state *drbg) +{ + /* SP800-90A requires the limit 2**19 bits, but we return bytes */ + return (1 << 16); +} + +static inline size_t drbg_max_addtl(struct drbg_state *drbg) +{ + /* SP800-90A requires 2**35 bytes additional info str / pers str */ +#if (__BITS_PER_LONG == 32) + /* + * SP800-90A allows smaller maximum numbers to be returned -- we + * return SIZE_MAX - 1 to allow the verification of the enforcement + * of this value in drbg_healthcheck_sanity. + */ + return (SIZE_MAX - 1); +#else + return (1UL<<35); +#endif +} + +static inline size_t drbg_max_requests(struct drbg_state *drbg) +{ + /* SP800-90A requires 2**48 maximum requests before reseeding */ + return (1<<20); +} + +/* + * This is a wrapper to the kernel crypto API function of + * crypto_rng_generate() to allow the caller to provide additional data. + * + * @drng DRBG handle -- see crypto_rng_get_bytes + * @outbuf output buffer -- see crypto_rng_get_bytes + * @outlen length of output buffer -- see crypto_rng_get_bytes + * @addtl_input additional information string input buffer + * @addtllen length of additional information string buffer + * + * return + * see crypto_rng_get_bytes + */ +static inline int crypto_drbg_get_bytes_addtl(struct crypto_rng *drng, + unsigned char *outbuf, unsigned int outlen, + struct drbg_string *addtl) +{ + return crypto_rng_generate(drng, addtl->buf, addtl->len, + outbuf, outlen); +} + +/* + * TEST code + * + * This is a wrapper to the kernel crypto API function of + * crypto_rng_generate() to allow the caller to provide additional data and + * allow furnishing of test_data + * + * @drng DRBG handle -- see crypto_rng_get_bytes + * @outbuf output buffer -- see crypto_rng_get_bytes + * @outlen length of output buffer -- see crypto_rng_get_bytes + * @addtl_input additional information string input buffer + * @addtllen length of additional information string buffer + * @test_data filled test data + * + * return + * see crypto_rng_get_bytes + */ +static inline int crypto_drbg_get_bytes_addtl_test(struct crypto_rng *drng, + unsigned char *outbuf, unsigned int outlen, + struct drbg_string *addtl, + struct drbg_test_data *test_data) +{ + crypto_rng_set_entropy(drng, test_data->testentropy->buf, + test_data->testentropy->len); + return crypto_rng_generate(drng, addtl->buf, addtl->len, + outbuf, outlen); +} + +/* + * TEST code + * + * This is a wrapper to the kernel crypto API function of + * crypto_rng_reset() to allow the caller to provide test_data + * + * @drng DRBG handle -- see crypto_rng_reset + * @pers personalization string input buffer + * @perslen length of additional information string buffer + * @test_data filled test data + * + * return + * see crypto_rng_reset + */ +static inline int crypto_drbg_reset_test(struct crypto_rng *drng, + struct drbg_string *pers, + struct drbg_test_data *test_data) +{ + crypto_rng_set_entropy(drng, test_data->testentropy->buf, + test_data->testentropy->len); + return crypto_rng_reset(drng, pers->buf, pers->len); +} + +/* DRBG type flags */ +#define DRBG_CTR ((drbg_flag_t)1<<0) +#define DRBG_HMAC ((drbg_flag_t)1<<1) +#define DRBG_HASH ((drbg_flag_t)1<<2) +#define DRBG_TYPE_MASK (DRBG_CTR | DRBG_HMAC | DRBG_HASH) +/* DRBG strength flags */ +#define DRBG_STRENGTH128 ((drbg_flag_t)1<<3) +#define DRBG_STRENGTH192 ((drbg_flag_t)1<<4) +#define DRBG_STRENGTH256 ((drbg_flag_t)1<<5) +#define DRBG_STRENGTH_MASK (DRBG_STRENGTH128 | DRBG_STRENGTH192 | \ + DRBG_STRENGTH256) + +enum drbg_prefixes { + DRBG_PREFIX0 = 0x00, + DRBG_PREFIX1, + DRBG_PREFIX2, + DRBG_PREFIX3 +}; + +#endif /* _DRBG_H */ diff --git a/include/crypto/ecdh.h b/include/crypto/ecdh.h new file mode 100644 index 000000000..d696317c4 --- /dev/null +++ b/include/crypto/ecdh.h @@ -0,0 +1,88 @@ +/* + * ECDH params to be used with kpp API + * + * Copyright (c) 2016, Intel Corporation + * Authors: Salvatore Benedetto + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#ifndef _CRYPTO_ECDH_ +#define _CRYPTO_ECDH_ + +/** + * DOC: ECDH Helper Functions + * + * To use ECDH with the KPP cipher API, the following data structure and + * functions should be used. + * + * The ECC curves known to the ECDH implementation are specified in this + * header file. + * + * To use ECDH with KPP, the following functions should be used to operate on + * an ECDH private key. The packet private key that can be set with + * the KPP API function call of crypto_kpp_set_secret. + */ + +/* Curves IDs */ +#define ECC_CURVE_NIST_P192 0x0001 +#define ECC_CURVE_NIST_P256 0x0002 + +/** + * struct ecdh - define an ECDH private key + * + * @curve_id: ECC curve the key is based on. + * @key: Private ECDH key + * @key_size: Size of the private ECDH key + */ +struct ecdh { + unsigned short curve_id; + char *key; + unsigned short key_size; +}; + +/** + * crypto_ecdh_key_len() - Obtain the size of the private ECDH key + * @params: private ECDH key + * + * This function returns the packet ECDH key size. A caller can use that + * with the provided ECDH private key reference to obtain the required + * memory size to hold a packet key. + * + * Return: size of the key in bytes + */ +unsigned int crypto_ecdh_key_len(const struct ecdh *params); + +/** + * crypto_ecdh_encode_key() - encode the private key + * @buf: Buffer allocated by the caller to hold the packet ECDH + * private key. The buffer should be at least crypto_ecdh_key_len + * bytes in size. + * @len: Length of the packet private key buffer + * @p: Buffer with the caller-specified private key + * + * The ECDH implementations operate on a packet representation of the private + * key. + * + * Return: -EINVAL if buffer has insufficient size, 0 on success + */ +int crypto_ecdh_encode_key(char *buf, unsigned int len, const struct ecdh *p); + +/** + * crypto_ecdh_decode_key() - decode a private key + * @buf: Buffer holding a packet key that should be decoded + * @len: Length of the packet private key buffer + * @p: Buffer allocated by the caller that is filled with the + * unpacked ECDH private key. + * + * The unpacking obtains the private key by pointing @p to the correct location + * in @buf. Thus, both pointers refer to the same memory. + * + * Return: -EINVAL if buffer has insufficient size, 0 on success + */ +int crypto_ecdh_decode_key(const char *buf, unsigned int len, struct ecdh *p); + +#endif diff --git a/include/crypto/engine.h b/include/crypto/engine.h new file mode 100644 index 000000000..1cbec29af --- /dev/null +++ b/include/crypto/engine.h @@ -0,0 +1,116 @@ +/* + * Crypto engine API + * + * Copyright (c) 2016 Baolin Wang + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#ifndef _CRYPTO_ENGINE_H +#define _CRYPTO_ENGINE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ENGINE_NAME_LEN 30 +/* + * struct crypto_engine - crypto hardware engine + * @name: the engine name + * @idling: the engine is entering idle state + * @busy: request pump is busy + * @running: the engine is on working + * @cur_req_prepared: current request is prepared + * @list: link with the global crypto engine list + * @queue_lock: spinlock to syncronise access to request queue + * @queue: the crypto queue of the engine + * @rt: whether this queue is set to run as a realtime task + * @prepare_crypt_hardware: a request will soon arrive from the queue + * so the subsystem requests the driver to prepare the hardware + * by issuing this call + * @unprepare_crypt_hardware: there are currently no more requests on the + * queue so the subsystem notifies the driver that it may relax the + * hardware by issuing this call + * @kworker: kthread worker struct for request pump + * @pump_requests: work struct for scheduling work to the request pump + * @priv_data: the engine private data + * @cur_req: the current request which is on processing + */ +struct crypto_engine { + char name[ENGINE_NAME_LEN]; + bool idling; + bool busy; + bool running; + bool cur_req_prepared; + + struct list_head list; + spinlock_t queue_lock; + struct crypto_queue queue; + struct device *dev; + + bool rt; + + int (*prepare_crypt_hardware)(struct crypto_engine *engine); + int (*unprepare_crypt_hardware)(struct crypto_engine *engine); + + struct kthread_worker *kworker; + struct kthread_work pump_requests; + + void *priv_data; + struct crypto_async_request *cur_req; +}; + +/* + * struct crypto_engine_op - crypto hardware engine operations + * @prepare__request: do some prepare if need before handle the current request + * @unprepare_request: undo any work done by prepare_request() + * @do_one_request: do encryption for current request + */ +struct crypto_engine_op { + int (*prepare_request)(struct crypto_engine *engine, + void *areq); + int (*unprepare_request)(struct crypto_engine *engine, + void *areq); + int (*do_one_request)(struct crypto_engine *engine, + void *areq); +}; + +struct crypto_engine_ctx { + struct crypto_engine_op op; +}; + +int crypto_transfer_ablkcipher_request_to_engine(struct crypto_engine *engine, + struct ablkcipher_request *req); +int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine, + struct aead_request *req); +int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine, + struct akcipher_request *req); +int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine, + struct ahash_request *req); +int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine, + struct skcipher_request *req); +void crypto_finalize_ablkcipher_request(struct crypto_engine *engine, + struct ablkcipher_request *req, int err); +void crypto_finalize_aead_request(struct crypto_engine *engine, + struct aead_request *req, int err); +void crypto_finalize_akcipher_request(struct crypto_engine *engine, + struct akcipher_request *req, int err); +void crypto_finalize_hash_request(struct crypto_engine *engine, + struct ahash_request *req, int err); +void crypto_finalize_skcipher_request(struct crypto_engine *engine, + struct skcipher_request *req, int err); +int crypto_engine_start(struct crypto_engine *engine); +int crypto_engine_stop(struct crypto_engine *engine); +struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt); +int crypto_engine_exit(struct crypto_engine *engine); + +#endif /* _CRYPTO_ENGINE_H */ diff --git a/include/crypto/gcm.h b/include/crypto/gcm.h new file mode 100644 index 000000000..c50e057ea --- /dev/null +++ b/include/crypto/gcm.h @@ -0,0 +1,8 @@ +#ifndef _CRYPTO_GCM_H +#define _CRYPTO_GCM_H + +#define GCM_AES_IV_SIZE 12 +#define GCM_RFC4106_IV_SIZE 8 +#define GCM_RFC4543_IV_SIZE 8 + +#endif diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h new file mode 100644 index 000000000..fa0a63d29 --- /dev/null +++ b/include/crypto/gf128mul.h @@ -0,0 +1,252 @@ +/* gf128mul.h - GF(2^128) multiplication functions + * + * Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. + * Copyright (c) 2006 Rik Snel + * + * Based on Dr Brian Gladman's (GPL'd) work published at + * http://fp.gladman.plus.com/cryptography_technology/index.htm + * See the original copyright notice below. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ +/* + --------------------------------------------------------------------------- + Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. All rights reserved. + + LICENSE TERMS + + The free distribution and use of this software in both source and binary + form is allowed (with or without changes) provided that: + + 1. distributions of this source code include the above copyright + notice, this list of conditions and the following disclaimer; + + 2. distributions in binary form include the above copyright + notice, this list of conditions and the following disclaimer + in the documentation and/or other associated materials; + + 3. the copyright holder's name is not used to endorse products + built using this software without specific written permission. + + ALTERNATIVELY, provided that this notice is retained in full, this product + may be distributed under the terms of the GNU General Public License (GPL), + in which case the provisions of the GPL apply INSTEAD OF those given above. + + DISCLAIMER + + This software is provided 'as is' with no explicit or implied warranties + in respect of its properties, including, but not limited to, correctness + and/or fitness for purpose. + --------------------------------------------------------------------------- + Issue Date: 31/01/2006 + + An implementation of field multiplication in Galois Field GF(2^128) +*/ + +#ifndef _CRYPTO_GF128MUL_H +#define _CRYPTO_GF128MUL_H + +#include +#include +#include + +/* Comment by Rik: + * + * For some background on GF(2^128) see for example: + * http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/gcm/gcm-revised-spec.pdf + * + * The elements of GF(2^128) := GF(2)[X]/(X^128-X^7-X^2-X^1-1) can + * be mapped to computer memory in a variety of ways. Let's examine + * three common cases. + * + * Take a look at the 16 binary octets below in memory order. The msb's + * are left and the lsb's are right. char b[16] is an array and b[0] is + * the first octet. + * + * 10000000 00000000 00000000 00000000 .... 00000000 00000000 00000000 + * b[0] b[1] b[2] b[3] b[13] b[14] b[15] + * + * Every bit is a coefficient of some power of X. We can store the bits + * in every byte in little-endian order and the bytes themselves also in + * little endian order. I will call this lle (little-little-endian). + * The above buffer represents the polynomial 1, and X^7+X^2+X^1+1 looks + * like 11100001 00000000 .... 00000000 = { 0xE1, 0x00, }. + * This format was originally implemented in gf128mul and is used + * in GCM (Galois/Counter mode) and in ABL (Arbitrary Block Length). + * + * Another convention says: store the bits in bigendian order and the + * bytes also. This is bbe (big-big-endian). Now the buffer above + * represents X^127. X^7+X^2+X^1+1 looks like 00000000 .... 10000111, + * b[15] = 0x87 and the rest is 0. LRW uses this convention and bbe + * is partly implemented. + * + * Both of the above formats are easy to implement on big-endian + * machines. + * + * XTS and EME (the latter of which is patent encumbered) use the ble + * format (bits are stored in big endian order and the bytes in little + * endian). The above buffer represents X^7 in this case and the + * primitive polynomial is b[0] = 0x87. + * + * The common machine word-size is smaller than 128 bits, so to make + * an efficient implementation we must split into machine word sizes. + * This implementation uses 64-bit words for the moment. Machine + * endianness comes into play. The lle format in relation to machine + * endianness is discussed below by the original author of gf128mul Dr + * Brian Gladman. + * + * Let's look at the bbe and ble format on a little endian machine. + * + * bbe on a little endian machine u32 x[4]: + * + * MS x[0] LS MS x[1] LS + * ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + * 103..96 111.104 119.112 127.120 71...64 79...72 87...80 95...88 + * + * MS x[2] LS MS x[3] LS + * ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + * 39...32 47...40 55...48 63...56 07...00 15...08 23...16 31...24 + * + * ble on a little endian machine + * + * MS x[0] LS MS x[1] LS + * ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + * 31...24 23...16 15...08 07...00 63...56 55...48 47...40 39...32 + * + * MS x[2] LS MS x[3] LS + * ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + * 95...88 87...80 79...72 71...64 127.120 199.112 111.104 103..96 + * + * Multiplications in GF(2^128) are mostly bit-shifts, so you see why + * ble (and lbe also) are easier to implement on a little-endian + * machine than on a big-endian machine. The converse holds for bbe + * and lle. + * + * Note: to have good alignment, it seems to me that it is sufficient + * to keep elements of GF(2^128) in type u64[2]. On 32-bit wordsize + * machines this will automatically aligned to wordsize and on a 64-bit + * machine also. + */ +/* Multiply a GF(2^128) field element by x. Field elements are + held in arrays of bytes in which field bits 8n..8n + 7 are held in + byte[n], with lower indexed bits placed in the more numerically + significant bit positions within bytes. + + On little endian machines the bit indexes translate into the bit + positions within four 32-bit words in the following way + + MS x[0] LS MS x[1] LS + ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + 24...31 16...23 08...15 00...07 56...63 48...55 40...47 32...39 + + MS x[2] LS MS x[3] LS + ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + 88...95 80...87 72...79 64...71 120.127 112.119 104.111 96..103 + + On big endian machines the bit indexes translate into the bit + positions within four 32-bit words in the following way + + MS x[0] LS MS x[1] LS + ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + 00...07 08...15 16...23 24...31 32...39 40...47 48...55 56...63 + + MS x[2] LS MS x[3] LS + ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + 64...71 72...79 80...87 88...95 96..103 104.111 112.119 120.127 +*/ + +/* A slow generic version of gf_mul, implemented for lle and bbe + * It multiplies a and b and puts the result in a */ +void gf128mul_lle(be128 *a, const be128 *b); + +void gf128mul_bbe(be128 *a, const be128 *b); + +/* + * The following functions multiply a field element by x in + * the polynomial field representation. They use 64-bit word operations + * to gain speed but compensate for machine endianness and hence work + * correctly on both styles of machine. + * + * They are defined here for performance. + */ + +static inline u64 gf128mul_mask_from_bit(u64 x, int which) +{ + /* a constant-time version of 'x & ((u64)1 << which) ? (u64)-1 : 0' */ + return ((s64)(x << (63 - which)) >> 63); +} + +static inline void gf128mul_x_lle(be128 *r, const be128 *x) +{ + u64 a = be64_to_cpu(x->a); + u64 b = be64_to_cpu(x->b); + + /* equivalent to gf128mul_table_le[(b << 7) & 0xff] << 48 + * (see crypto/gf128mul.c): */ + u64 _tt = gf128mul_mask_from_bit(b, 0) & ((u64)0xe1 << 56); + + r->b = cpu_to_be64((b >> 1) | (a << 63)); + r->a = cpu_to_be64((a >> 1) ^ _tt); +} + +static inline void gf128mul_x_bbe(be128 *r, const be128 *x) +{ + u64 a = be64_to_cpu(x->a); + u64 b = be64_to_cpu(x->b); + + /* equivalent to gf128mul_table_be[a >> 63] (see crypto/gf128mul.c): */ + u64 _tt = gf128mul_mask_from_bit(a, 63) & 0x87; + + r->a = cpu_to_be64((a << 1) | (b >> 63)); + r->b = cpu_to_be64((b << 1) ^ _tt); +} + +/* needed by XTS */ +static inline void gf128mul_x_ble(le128 *r, const le128 *x) +{ + u64 a = le64_to_cpu(x->a); + u64 b = le64_to_cpu(x->b); + + /* equivalent to gf128mul_table_be[b >> 63] (see crypto/gf128mul.c): */ + u64 _tt = gf128mul_mask_from_bit(a, 63) & 0x87; + + r->a = cpu_to_le64((a << 1) | (b >> 63)); + r->b = cpu_to_le64((b << 1) ^ _tt); +} + +/* 4k table optimization */ + +struct gf128mul_4k { + be128 t[256]; +}; + +struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g); +struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g); +void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t); +void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t); +void gf128mul_x8_ble(le128 *r, const le128 *x); +static inline void gf128mul_free_4k(struct gf128mul_4k *t) +{ + kzfree(t); +} + + +/* 64k table optimization, implemented for bbe */ + +struct gf128mul_64k { + struct gf128mul_4k *t[16]; +}; + +/* First initialize with the constant factor with which you + * want to multiply and then call gf128mul_64k_bbe with the other + * factor in the first argument, and the table in the second. + * Afterwards, the result is stored in *a. + */ +struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g); +void gf128mul_free_64k(struct gf128mul_64k *t); +void gf128mul_64k_bbe(be128 *a, const struct gf128mul_64k *t); + +#endif /* _CRYPTO_GF128MUL_H */ diff --git a/include/crypto/ghash.h b/include/crypto/ghash.h new file mode 100644 index 000000000..913630106 --- /dev/null +++ b/include/crypto/ghash.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for GHASH algorithms + */ + +#ifndef __CRYPTO_GHASH_H__ +#define __CRYPTO_GHASH_H__ + +#include +#include + +#define GHASH_BLOCK_SIZE 16 +#define GHASH_DIGEST_SIZE 16 + +struct ghash_ctx { + struct gf128mul_4k *gf128; +}; + +struct ghash_desc_ctx { + u8 buffer[GHASH_BLOCK_SIZE]; + u32 bytes; +}; + +#endif diff --git a/include/crypto/hash.h b/include/crypto/hash.h new file mode 100644 index 000000000..552517dcf --- /dev/null +++ b/include/crypto/hash.h @@ -0,0 +1,946 @@ +/* + * Hash: Hash algorithms under the crypto API + * + * Copyright (c) 2008 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_HASH_H +#define _CRYPTO_HASH_H + +#include +#include + +struct crypto_ahash; + +/** + * DOC: Message Digest Algorithm Definitions + * + * These data structures define modular message digest algorithm + * implementations, managed via crypto_register_ahash(), + * crypto_register_shash(), crypto_unregister_ahash() and + * crypto_unregister_shash(). + */ + +/** + * struct hash_alg_common - define properties of message digest + * @digestsize: Size of the result of the transformation. A buffer of this size + * must be available to the @final and @finup calls, so they can + * store the resulting hash into it. For various predefined sizes, + * search include/crypto/ using + * git grep _DIGEST_SIZE include/crypto. + * @statesize: Size of the block for partial state of the transformation. A + * buffer of this size must be passed to the @export function as it + * will save the partial state of the transformation into it. On the + * other side, the @import function will load the state from a + * buffer of this size as well. + * @base: Start of data structure of cipher algorithm. The common data + * structure of crypto_alg contains information common to all ciphers. + * The hash_alg_common data structure now adds the hash-specific + * information. + */ +struct hash_alg_common { + unsigned int digestsize; + unsigned int statesize; + + struct crypto_alg base; +}; + +struct ahash_request { + struct crypto_async_request base; + + unsigned int nbytes; + struct scatterlist *src; + u8 *result; + + /* This field may only be used by the ahash API code. */ + void *priv; + + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +#define AHASH_REQUEST_ON_STACK(name, ahash) \ + char __##name##_desc[sizeof(struct ahash_request) + \ + crypto_ahash_reqsize(ahash)] CRYPTO_MINALIGN_ATTR; \ + struct ahash_request *name = (void *)__##name##_desc + +/** + * struct ahash_alg - asynchronous message digest definition + * @init: **[mandatory]** Initialize the transformation context. Intended only to initialize the + * state of the HASH transformation at the beginning. This shall fill in + * the internal structures used during the entire duration of the whole + * transformation. No data processing happens at this point. Driver code + * implementation must not use req->result. + * @update: **[mandatory]** Push a chunk of data into the driver for transformation. This + * function actually pushes blocks of data from upper layers into the + * driver, which then passes those to the hardware as seen fit. This + * function must not finalize the HASH transformation by calculating the + * final message digest as this only adds more data into the + * transformation. This function shall not modify the transformation + * context, as this function may be called in parallel with the same + * transformation object. Data processing can happen synchronously + * [SHASH] or asynchronously [AHASH] at this point. Driver must not use + * req->result. + * @final: **[mandatory]** Retrieve result from the driver. This function finalizes the + * transformation and retrieves the resulting hash from the driver and + * pushes it back to upper layers. No data processing happens at this + * point unless hardware requires it to finish the transformation + * (then the data buffered by the device driver is processed). + * @finup: **[optional]** Combination of @update and @final. This function is effectively a + * combination of @update and @final calls issued in sequence. As some + * hardware cannot do @update and @final separately, this callback was + * added to allow such hardware to be used at least by IPsec. Data + * processing can happen synchronously [SHASH] or asynchronously [AHASH] + * at this point. + * @digest: Combination of @init and @update and @final. This function + * effectively behaves as the entire chain of operations, @init, + * @update and @final issued in sequence. Just like @finup, this was + * added for hardware which cannot do even the @finup, but can only do + * the whole transformation in one run. Data processing can happen + * synchronously [SHASH] or asynchronously [AHASH] at this point. + * @setkey: Set optional key used by the hashing algorithm. Intended to push + * optional key used by the hashing algorithm from upper layers into + * the driver. This function can store the key in the transformation + * context or can outright program it into the hardware. In the former + * case, one must be careful to program the key into the hardware at + * appropriate time and one must be careful that .setkey() can be + * called multiple times during the existence of the transformation + * object. Not all hashing algorithms do implement this function as it + * is only needed for keyed message digests. SHAx/MDx/CRCx do NOT + * implement this function. HMAC(MDx)/HMAC(SHAx)/CMAC(AES) do implement + * this function. This function must be called before any other of the + * @init, @update, @final, @finup, @digest is called. No data + * processing happens at this point. + * @export: Export partial state of the transformation. This function dumps the + * entire state of the ongoing transformation into a provided block of + * data so it can be @import 'ed back later on. This is useful in case + * you want to save partial result of the transformation after + * processing certain amount of data and reload this partial result + * multiple times later on for multiple re-use. No data processing + * happens at this point. Driver must not use req->result. + * @import: Import partial state of the transformation. This function loads the + * entire state of the ongoing transformation from a provided block of + * data so the transformation can continue from this point onward. No + * data processing happens at this point. Driver must not use + * req->result. + * @halg: see struct hash_alg_common + */ +struct ahash_alg { + int (*init)(struct ahash_request *req); + int (*update)(struct ahash_request *req); + int (*final)(struct ahash_request *req); + int (*finup)(struct ahash_request *req); + int (*digest)(struct ahash_request *req); + int (*export)(struct ahash_request *req, void *out); + int (*import)(struct ahash_request *req, const void *in); + int (*setkey)(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen); + + struct hash_alg_common halg; +}; + +struct shash_desc { + struct crypto_shash *tfm; + u32 flags; + + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +#define SHASH_DESC_ON_STACK(shash, ctx) \ + char __##shash##_desc[sizeof(struct shash_desc) + \ + crypto_shash_descsize(ctx)] CRYPTO_MINALIGN_ATTR; \ + struct shash_desc *shash = (struct shash_desc *)__##shash##_desc + +/** + * struct shash_alg - synchronous message digest definition + * @init: see struct ahash_alg + * @update: see struct ahash_alg + * @final: see struct ahash_alg + * @finup: see struct ahash_alg + * @digest: see struct ahash_alg + * @export: see struct ahash_alg + * @import: see struct ahash_alg + * @setkey: see struct ahash_alg + * @digestsize: see struct ahash_alg + * @statesize: see struct ahash_alg + * @descsize: Size of the operational state for the message digest. This state + * size is the memory size that needs to be allocated for + * shash_desc.__ctx + * @base: internally used + */ +struct shash_alg { + int (*init)(struct shash_desc *desc); + int (*update)(struct shash_desc *desc, const u8 *data, + unsigned int len); + int (*final)(struct shash_desc *desc, u8 *out); + int (*finup)(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out); + int (*digest)(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out); + int (*export)(struct shash_desc *desc, void *out); + int (*import)(struct shash_desc *desc, const void *in); + int (*setkey)(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen); + + unsigned int descsize; + + /* These fields must match hash_alg_common. */ + unsigned int digestsize + __attribute__ ((aligned(__alignof__(struct hash_alg_common)))); + unsigned int statesize; + + struct crypto_alg base; +}; + +struct crypto_ahash { + int (*init)(struct ahash_request *req); + int (*update)(struct ahash_request *req); + int (*final)(struct ahash_request *req); + int (*finup)(struct ahash_request *req); + int (*digest)(struct ahash_request *req); + int (*export)(struct ahash_request *req, void *out); + int (*import)(struct ahash_request *req, const void *in); + int (*setkey)(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen); + + unsigned int reqsize; + struct crypto_tfm base; +}; + +struct crypto_shash { + unsigned int descsize; + struct crypto_tfm base; +}; + +/** + * DOC: Asynchronous Message Digest API + * + * The asynchronous message digest API is used with the ciphers of type + * CRYPTO_ALG_TYPE_AHASH (listed as type "ahash" in /proc/crypto) + * + * The asynchronous cipher operation discussion provided for the + * CRYPTO_ALG_TYPE_ABLKCIPHER API applies here as well. + */ + +static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_ahash, base); +} + +/** + * crypto_alloc_ahash() - allocate ahash cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * ahash cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for an ahash. The returned struct + * crypto_ahash is the cipher handle that is required for any subsequent + * API invocation for that ahash. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, + u32 mask); + +static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm) +{ + return &tfm->base; +} + +/** + * crypto_free_ahash() - zeroize and free the ahash handle + * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. + */ +static inline void crypto_free_ahash(struct crypto_ahash *tfm) +{ + crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm)); +} + +/** + * crypto_has_ahash() - Search for the availability of an ahash. + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * ahash + * @type: specifies the type of the ahash + * @mask: specifies the mask for the ahash + * + * Return: true when the ahash is known to the kernel crypto API; false + * otherwise + */ +int crypto_has_ahash(const char *alg_name, u32 type, u32 mask); + +static inline const char *crypto_ahash_alg_name(struct crypto_ahash *tfm) +{ + return crypto_tfm_alg_name(crypto_ahash_tfm(tfm)); +} + +static inline const char *crypto_ahash_driver_name(struct crypto_ahash *tfm) +{ + return crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); +} + +static inline unsigned int crypto_ahash_alignmask( + struct crypto_ahash *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_ahash_tfm(tfm)); +} + +/** + * crypto_ahash_blocksize() - obtain block size for cipher + * @tfm: cipher handle + * + * The block size for the message digest cipher referenced with the cipher + * handle is returned. + * + * Return: block size of cipher + */ +static inline unsigned int crypto_ahash_blocksize(struct crypto_ahash *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); +} + +static inline struct hash_alg_common *__crypto_hash_alg_common( + struct crypto_alg *alg) +{ + return container_of(alg, struct hash_alg_common, base); +} + +static inline struct hash_alg_common *crypto_hash_alg_common( + struct crypto_ahash *tfm) +{ + return __crypto_hash_alg_common(crypto_ahash_tfm(tfm)->__crt_alg); +} + +/** + * crypto_ahash_digestsize() - obtain message digest size + * @tfm: cipher handle + * + * The size for the message digest created by the message digest cipher + * referenced with the cipher handle is returned. + * + * + * Return: message digest size of cipher + */ +static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm) +{ + return crypto_hash_alg_common(tfm)->digestsize; +} + +/** + * crypto_ahash_statesize() - obtain size of the ahash state + * @tfm: cipher handle + * + * Return the size of the ahash state. With the crypto_ahash_export() + * function, the caller can export the state into a buffer whose size is + * defined with this function. + * + * Return: size of the ahash state + */ +static inline unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm) +{ + return crypto_hash_alg_common(tfm)->statesize; +} + +static inline u32 crypto_ahash_get_flags(struct crypto_ahash *tfm) +{ + return crypto_tfm_get_flags(crypto_ahash_tfm(tfm)); +} + +static inline void crypto_ahash_set_flags(struct crypto_ahash *tfm, u32 flags) +{ + crypto_tfm_set_flags(crypto_ahash_tfm(tfm), flags); +} + +static inline void crypto_ahash_clear_flags(struct crypto_ahash *tfm, u32 flags) +{ + crypto_tfm_clear_flags(crypto_ahash_tfm(tfm), flags); +} + +/** + * crypto_ahash_reqtfm() - obtain cipher handle from request + * @req: asynchronous request handle that contains the reference to the ahash + * cipher handle + * + * Return the ahash cipher handle that is registered with the asynchronous + * request handle ahash_request. + * + * Return: ahash cipher handle + */ +static inline struct crypto_ahash *crypto_ahash_reqtfm( + struct ahash_request *req) +{ + return __crypto_ahash_cast(req->base.tfm); +} + +/** + * crypto_ahash_reqsize() - obtain size of the request data structure + * @tfm: cipher handle + * + * Return: size of the request data + */ +static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm) +{ + return tfm->reqsize; +} + +static inline void *ahash_request_ctx(struct ahash_request *req) +{ + return req->__ctx; +} + +/** + * crypto_ahash_setkey - set key for cipher handle + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the ahash cipher. The cipher + * handle must point to a keyed hash in order for this function to succeed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen); + +/** + * crypto_ahash_finup() - update and finalize message digest + * @req: reference to the ahash_request handle that holds all information + * needed to perform the cipher operation + * + * This function is a "short-hand" for the function calls of + * crypto_ahash_update and crypto_ahash_final. The parameters have the same + * meaning as discussed for those separate functions. + * + * Return: see crypto_ahash_final() + */ +int crypto_ahash_finup(struct ahash_request *req); + +/** + * crypto_ahash_final() - calculate message digest + * @req: reference to the ahash_request handle that holds all information + * needed to perform the cipher operation + * + * Finalize the message digest operation and create the message digest + * based on all data added to the cipher handle. The message digest is placed + * into the output buffer registered with the ahash_request handle. + * + * Return: + * 0 if the message digest was successfully calculated; + * -EINPROGRESS if data is feeded into hardware (DMA) or queued for later; + * -EBUSY if queue is full and request should be resubmitted later; + * other < 0 if an error occurred + */ +int crypto_ahash_final(struct ahash_request *req); + +/** + * crypto_ahash_digest() - calculate message digest for a buffer + * @req: reference to the ahash_request handle that holds all information + * needed to perform the cipher operation + * + * This function is a "short-hand" for the function calls of crypto_ahash_init, + * crypto_ahash_update and crypto_ahash_final. The parameters have the same + * meaning as discussed for those separate three functions. + * + * Return: see crypto_ahash_final() + */ +int crypto_ahash_digest(struct ahash_request *req); + +/** + * crypto_ahash_export() - extract current message digest state + * @req: reference to the ahash_request handle whose state is exported + * @out: output buffer of sufficient size that can hold the hash state + * + * This function exports the hash state of the ahash_request handle into the + * caller-allocated output buffer out which must have sufficient size (e.g. by + * calling crypto_ahash_statesize()). + * + * Return: 0 if the export was successful; < 0 if an error occurred + */ +static inline int crypto_ahash_export(struct ahash_request *req, void *out) +{ + return crypto_ahash_reqtfm(req)->export(req, out); +} + +/** + * crypto_ahash_import() - import message digest state + * @req: reference to ahash_request handle the state is imported into + * @in: buffer holding the state + * + * This function imports the hash state into the ahash_request handle from the + * input buffer. That buffer should have been generated with the + * crypto_ahash_export function. + * + * Return: 0 if the import was successful; < 0 if an error occurred + */ +static inline int crypto_ahash_import(struct ahash_request *req, const void *in) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return tfm->import(req, in); +} + +/** + * crypto_ahash_init() - (re)initialize message digest handle + * @req: ahash_request handle that already is initialized with all necessary + * data using the ahash_request_* API functions + * + * The call (re-)initializes the message digest referenced by the ahash_request + * handle. Any potentially existing state created by previous operations is + * discarded. + * + * Return: see crypto_ahash_final() + */ +static inline int crypto_ahash_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return tfm->init(req); +} + +/** + * crypto_ahash_update() - add data to message digest for processing + * @req: ahash_request handle that was previously initialized with the + * crypto_ahash_init call. + * + * Updates the message digest state of the &ahash_request handle. The input data + * is pointed to by the scatter/gather list registered in the &ahash_request + * handle + * + * Return: see crypto_ahash_final() + */ +static inline int crypto_ahash_update(struct ahash_request *req) +{ + return crypto_ahash_reqtfm(req)->update(req); +} + +/** + * DOC: Asynchronous Hash Request Handle + * + * The &ahash_request data structure contains all pointers to data + * required for the asynchronous cipher operation. This includes the cipher + * handle (which can be used by multiple &ahash_request instances), pointer + * to plaintext and the message digest output buffer, asynchronous callback + * function, etc. It acts as a handle to the ahash_request_* API calls in a + * similar way as ahash handle to the crypto_ahash_* API calls. + */ + +/** + * ahash_request_set_tfm() - update cipher handle reference in request + * @req: request handle to be modified + * @tfm: cipher handle that shall be added to the request handle + * + * Allow the caller to replace the existing ahash handle in the request + * data structure with a different one. + */ +static inline void ahash_request_set_tfm(struct ahash_request *req, + struct crypto_ahash *tfm) +{ + req->base.tfm = crypto_ahash_tfm(tfm); +} + +/** + * ahash_request_alloc() - allocate request data structure + * @tfm: cipher handle to be registered with the request + * @gfp: memory allocation flag that is handed to kmalloc by the API call. + * + * Allocate the request data structure that must be used with the ahash + * message digest API calls. During + * the allocation, the provided ahash handle + * is registered in the request data structure. + * + * Return: allocated request handle in case of success, or NULL if out of memory + */ +static inline struct ahash_request *ahash_request_alloc( + struct crypto_ahash *tfm, gfp_t gfp) +{ + struct ahash_request *req; + + req = kmalloc(sizeof(struct ahash_request) + + crypto_ahash_reqsize(tfm), gfp); + + if (likely(req)) + ahash_request_set_tfm(req, tfm); + + return req; +} + +/** + * ahash_request_free() - zeroize and free the request data structure + * @req: request data structure cipher handle to be freed + */ +static inline void ahash_request_free(struct ahash_request *req) +{ + kzfree(req); +} + +static inline void ahash_request_zero(struct ahash_request *req) +{ + memzero_explicit(req, sizeof(*req) + + crypto_ahash_reqsize(crypto_ahash_reqtfm(req))); +} + +static inline struct ahash_request *ahash_request_cast( + struct crypto_async_request *req) +{ + return container_of(req, struct ahash_request, base); +} + +/** + * ahash_request_set_callback() - set asynchronous callback function + * @req: request handle + * @flags: specify zero or an ORing of the flags + * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and + * increase the wait queue beyond the initial maximum size; + * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep + * @compl: callback function pointer to be registered with the request handle + * @data: The data pointer refers to memory that is not used by the kernel + * crypto API, but provided to the callback function for it to use. Here, + * the caller can provide a reference to memory the callback function can + * operate on. As the callback function is invoked asynchronously to the + * related functionality, it may need to access data structures of the + * related functionality which can be referenced using this pointer. The + * callback function can access the memory via the "data" field in the + * &crypto_async_request data structure provided to the callback function. + * + * This function allows setting the callback function that is triggered once + * the cipher operation completes. + * + * The callback function is registered with the &ahash_request handle and + * must comply with the following template:: + * + * void callback_function(struct crypto_async_request *req, int error) + */ +static inline void ahash_request_set_callback(struct ahash_request *req, + u32 flags, + crypto_completion_t compl, + void *data) +{ + req->base.complete = compl; + req->base.data = data; + req->base.flags = flags; +} + +/** + * ahash_request_set_crypt() - set data buffers + * @req: ahash_request handle to be updated + * @src: source scatter/gather list + * @result: buffer that is filled with the message digest -- the caller must + * ensure that the buffer has sufficient space by, for example, calling + * crypto_ahash_digestsize() + * @nbytes: number of bytes to process from the source scatter/gather list + * + * By using this call, the caller references the source scatter/gather list. + * The source scatter/gather list points to the data the message digest is to + * be calculated for. + */ +static inline void ahash_request_set_crypt(struct ahash_request *req, + struct scatterlist *src, u8 *result, + unsigned int nbytes) +{ + req->src = src; + req->nbytes = nbytes; + req->result = result; +} + +/** + * DOC: Synchronous Message Digest API + * + * The synchronous message digest API is used with the ciphers of type + * CRYPTO_ALG_TYPE_SHASH (listed as type "shash" in /proc/crypto) + * + * The message digest API is able to maintain state information for the + * caller. + * + * The synchronous message digest API can store user-related context in in its + * shash_desc request data structure. + */ + +/** + * crypto_alloc_shash() - allocate message digest handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * message digest cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for a message digest. The returned &struct + * crypto_shash is the cipher handle that is required for any subsequent + * API invocation for that message digest. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type, + u32 mask); + +static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm) +{ + return &tfm->base; +} + +/** + * crypto_free_shash() - zeroize and free the message digest handle + * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. + */ +static inline void crypto_free_shash(struct crypto_shash *tfm) +{ + crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm)); +} + +static inline const char *crypto_shash_alg_name(struct crypto_shash *tfm) +{ + return crypto_tfm_alg_name(crypto_shash_tfm(tfm)); +} + +static inline const char *crypto_shash_driver_name(struct crypto_shash *tfm) +{ + return crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm)); +} + +static inline unsigned int crypto_shash_alignmask( + struct crypto_shash *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm)); +} + +/** + * crypto_shash_blocksize() - obtain block size for cipher + * @tfm: cipher handle + * + * The block size for the message digest cipher referenced with the cipher + * handle is returned. + * + * Return: block size of cipher + */ +static inline unsigned int crypto_shash_blocksize(struct crypto_shash *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_shash_tfm(tfm)); +} + +static inline struct shash_alg *__crypto_shash_alg(struct crypto_alg *alg) +{ + return container_of(alg, struct shash_alg, base); +} + +static inline struct shash_alg *crypto_shash_alg(struct crypto_shash *tfm) +{ + return __crypto_shash_alg(crypto_shash_tfm(tfm)->__crt_alg); +} + +/** + * crypto_shash_digestsize() - obtain message digest size + * @tfm: cipher handle + * + * The size for the message digest created by the message digest cipher + * referenced with the cipher handle is returned. + * + * Return: digest size of cipher + */ +static inline unsigned int crypto_shash_digestsize(struct crypto_shash *tfm) +{ + return crypto_shash_alg(tfm)->digestsize; +} + +static inline unsigned int crypto_shash_statesize(struct crypto_shash *tfm) +{ + return crypto_shash_alg(tfm)->statesize; +} + +static inline u32 crypto_shash_get_flags(struct crypto_shash *tfm) +{ + return crypto_tfm_get_flags(crypto_shash_tfm(tfm)); +} + +static inline void crypto_shash_set_flags(struct crypto_shash *tfm, u32 flags) +{ + crypto_tfm_set_flags(crypto_shash_tfm(tfm), flags); +} + +static inline void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags) +{ + crypto_tfm_clear_flags(crypto_shash_tfm(tfm), flags); +} + +/** + * crypto_shash_descsize() - obtain the operational state size + * @tfm: cipher handle + * + * The size of the operational state the cipher needs during operation is + * returned for the hash referenced with the cipher handle. This size is + * required to calculate the memory requirements to allow the caller allocating + * sufficient memory for operational state. + * + * The operational state is defined with struct shash_desc where the size of + * that data structure is to be calculated as + * sizeof(struct shash_desc) + crypto_shash_descsize(alg) + * + * Return: size of the operational state + */ +static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm) +{ + return tfm->descsize; +} + +static inline void *shash_desc_ctx(struct shash_desc *desc) +{ + return desc->__ctx; +} + +/** + * crypto_shash_setkey() - set key for message digest + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the keyed message digest cipher. The + * cipher handle must point to a keyed message digest cipher in order for this + * function to succeed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen); + +/** + * crypto_shash_digest() - calculate message digest for buffer + * @desc: see crypto_shash_final() + * @data: see crypto_shash_update() + * @len: see crypto_shash_update() + * @out: see crypto_shash_final() + * + * This function is a "short-hand" for the function calls of crypto_shash_init, + * crypto_shash_update and crypto_shash_final. The parameters have the same + * meaning as discussed for those separate three functions. + * + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred + */ +int crypto_shash_digest(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out); + +/** + * crypto_shash_export() - extract operational state for message digest + * @desc: reference to the operational state handle whose state is exported + * @out: output buffer of sufficient size that can hold the hash state + * + * This function exports the hash state of the operational state handle into the + * caller-allocated output buffer out which must have sufficient size (e.g. by + * calling crypto_shash_descsize). + * + * Return: 0 if the export creation was successful; < 0 if an error occurred + */ +static inline int crypto_shash_export(struct shash_desc *desc, void *out) +{ + return crypto_shash_alg(desc->tfm)->export(desc, out); +} + +/** + * crypto_shash_import() - import operational state + * @desc: reference to the operational state handle the state imported into + * @in: buffer holding the state + * + * This function imports the hash state into the operational state handle from + * the input buffer. That buffer should have been generated with the + * crypto_ahash_export function. + * + * Return: 0 if the import was successful; < 0 if an error occurred + */ +static inline int crypto_shash_import(struct shash_desc *desc, const void *in) +{ + struct crypto_shash *tfm = desc->tfm; + + if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return crypto_shash_alg(tfm)->import(desc, in); +} + +/** + * crypto_shash_init() - (re)initialize message digest + * @desc: operational state handle that is already filled + * + * The call (re-)initializes the message digest referenced by the + * operational state handle. Any potentially existing state created by + * previous operations is discarded. + * + * Return: 0 if the message digest initialization was successful; < 0 if an + * error occurred + */ +static inline int crypto_shash_init(struct shash_desc *desc) +{ + struct crypto_shash *tfm = desc->tfm; + + if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return crypto_shash_alg(tfm)->init(desc); +} + +/** + * crypto_shash_update() - add data to message digest for processing + * @desc: operational state handle that is already initialized + * @data: input data to be added to the message digest + * @len: length of the input data + * + * Updates the message digest state of the operational state handle. + * + * Return: 0 if the message digest update was successful; < 0 if an error + * occurred + */ +int crypto_shash_update(struct shash_desc *desc, const u8 *data, + unsigned int len); + +/** + * crypto_shash_final() - calculate message digest + * @desc: operational state handle that is already filled with data + * @out: output buffer filled with the message digest + * + * Finalize the message digest operation and create the message digest + * based on all data added to the cipher handle. The message digest is placed + * into the output buffer. The caller must ensure that the output buffer is + * large enough by using crypto_shash_digestsize. + * + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred + */ +int crypto_shash_final(struct shash_desc *desc, u8 *out); + +/** + * crypto_shash_finup() - calculate message digest of buffer + * @desc: see crypto_shash_final() + * @data: see crypto_shash_update() + * @len: see crypto_shash_update() + * @out: see crypto_shash_final() + * + * This function is a "short-hand" for the function calls of + * crypto_shash_update and crypto_shash_final. The parameters have the same + * meaning as discussed for those separate functions. + * + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred + */ +int crypto_shash_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out); + +static inline void shash_desc_zero(struct shash_desc *desc) +{ + memzero_explicit(desc, + sizeof(*desc) + crypto_shash_descsize(desc->tfm)); +} + +#endif /* _CRYPTO_HASH_H */ diff --git a/include/crypto/hash_info.h b/include/crypto/hash_info.h new file mode 100644 index 000000000..56f217d41 --- /dev/null +++ b/include/crypto/hash_info.h @@ -0,0 +1,43 @@ +/* + * Hash Info: Hash algorithms information + * + * Copyright (c) 2013 Dmitry Kasatkin + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_HASH_INFO_H +#define _CRYPTO_HASH_INFO_H + +#include +#include + +#include + +/* not defined in include/crypto/ */ +#define RMD128_DIGEST_SIZE 16 +#define RMD160_DIGEST_SIZE 20 +#define RMD256_DIGEST_SIZE 32 +#define RMD320_DIGEST_SIZE 40 + +/* not defined in include/crypto/ */ +#define WP512_DIGEST_SIZE 64 +#define WP384_DIGEST_SIZE 48 +#define WP256_DIGEST_SIZE 32 + +/* not defined in include/crypto/ */ +#define TGR128_DIGEST_SIZE 16 +#define TGR160_DIGEST_SIZE 20 +#define TGR192_DIGEST_SIZE 24 + +/* not defined in include/crypto/ */ +#define SM3256_DIGEST_SIZE 32 + +extern const char *const hash_algo_name[HASH_ALGO__LAST]; +extern const int hash_digest_size[HASH_ALGO__LAST]; + +#endif /* _CRYPTO_HASH_INFO_H */ diff --git a/include/crypto/hmac.h b/include/crypto/hmac.h new file mode 100644 index 000000000..66774132a --- /dev/null +++ b/include/crypto/hmac.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CRYPTO_HMAC_H +#define _CRYPTO_HMAC_H + +#define HMAC_IPAD_VALUE 0x36 +#define HMAC_OPAD_VALUE 0x5c + +#endif /* _CRYPTO_HMAC_H */ diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h new file mode 100644 index 000000000..11f107df7 --- /dev/null +++ b/include/crypto/if_alg.h @@ -0,0 +1,256 @@ +/* + * if_alg: User-space algorithm interface + * + * Copyright (c) 2010 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_IF_ALG_H +#define _CRYPTO_IF_ALG_H + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define ALG_MAX_PAGES 16 + +struct crypto_async_request; + +struct alg_sock { + /* struct sock must be the first member of struct alg_sock */ + struct sock sk; + + struct sock *parent; + + atomic_t refcnt; + atomic_t nokey_refcnt; + + const struct af_alg_type *type; + void *private; +}; + +struct af_alg_control { + struct af_alg_iv *iv; + int op; + unsigned int aead_assoclen; +}; + +struct af_alg_type { + void *(*bind)(const char *name, u32 type, u32 mask); + void (*release)(void *private); + int (*setkey)(void *private, const u8 *key, unsigned int keylen); + int (*accept)(void *private, struct sock *sk); + int (*accept_nokey)(void *private, struct sock *sk); + int (*setauthsize)(void *private, unsigned int authsize); + + struct proto_ops *ops; + struct proto_ops *ops_nokey; + struct module *owner; + char name[14]; +}; + +struct af_alg_sgl { + struct scatterlist sg[ALG_MAX_PAGES + 1]; + struct page *pages[ALG_MAX_PAGES]; + unsigned int npages; +}; + +/* TX SGL entry */ +struct af_alg_tsgl { + struct list_head list; + unsigned int cur; /* Last processed SG entry */ + struct scatterlist sg[0]; /* Array of SGs forming the SGL */ +}; + +#define MAX_SGL_ENTS ((4096 - sizeof(struct af_alg_tsgl)) / \ + sizeof(struct scatterlist) - 1) + +/* RX SGL entry */ +struct af_alg_rsgl { + struct af_alg_sgl sgl; + struct list_head list; + size_t sg_num_bytes; /* Bytes of data in that SGL */ +}; + +/** + * struct af_alg_async_req - definition of crypto request + * @iocb: IOCB for AIO operations + * @sk: Socket the request is associated with + * @first_rsgl: First RX SG + * @last_rsgl: Pointer to last RX SG + * @rsgl_list: Track RX SGs + * @tsgl: Private, per request TX SGL of buffers to process + * @tsgl_entries: Number of entries in priv. TX SGL + * @outlen: Number of output bytes generated by crypto op + * @areqlen: Length of this data structure + * @cra_u: Cipher request + */ +struct af_alg_async_req { + struct kiocb *iocb; + struct sock *sk; + + struct af_alg_rsgl first_rsgl; + struct af_alg_rsgl *last_rsgl; + struct list_head rsgl_list; + + struct scatterlist *tsgl; + unsigned int tsgl_entries; + + unsigned int outlen; + unsigned int areqlen; + + union { + struct aead_request aead_req; + struct skcipher_request skcipher_req; + } cra_u; + + /* req ctx trails this struct */ +}; + +/** + * struct af_alg_ctx - definition of the crypto context + * + * The crypto context tracks the input data during the lifetime of an AF_ALG + * socket. + * + * @tsgl_list: Link to TX SGL + * @iv: IV for cipher operation + * @aead_assoclen: Length of AAD for AEAD cipher operations + * @completion: Work queue for synchronous operation + * @used: TX bytes sent to kernel. This variable is used to + * ensure that user space cannot cause the kernel + * to allocate too much memory in sendmsg operation. + * @rcvused: Total RX bytes to be filled by kernel. This variable + * is used to ensure user space cannot cause the kernel + * to allocate too much memory in a recvmsg operation. + * @more: More data to be expected from user space? + * @merge: Shall new data from user space be merged into existing + * SG? + * @enc: Cryptographic operation to be performed when + * recvmsg is invoked. + * @len: Length of memory allocated for this data structure. + */ +struct af_alg_ctx { + struct list_head tsgl_list; + + void *iv; + size_t aead_assoclen; + + struct crypto_wait wait; + + size_t used; + atomic_t rcvused; + + bool more; + bool merge; + bool enc; + + unsigned int len; +}; + +int af_alg_register_type(const struct af_alg_type *type); +int af_alg_unregister_type(const struct af_alg_type *type); + +int af_alg_release(struct socket *sock); +void af_alg_release_parent(struct sock *sk); +int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern); + +int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len); +void af_alg_free_sg(struct af_alg_sgl *sgl); +void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new); + +int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con); + +static inline struct alg_sock *alg_sk(struct sock *sk) +{ + return (struct alg_sock *)sk; +} + +/** + * Size of available buffer for sending data from user space to kernel. + * + * @sk socket of connection to user space + * @return number of bytes still available + */ +static inline int af_alg_sndbuf(struct sock *sk) +{ + struct alg_sock *ask = alg_sk(sk); + struct af_alg_ctx *ctx = ask->private; + + return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - + ctx->used, 0); +} + +/** + * Can the send buffer still be written to? + * + * @sk socket of connection to user space + * @return true => writable, false => not writable + */ +static inline bool af_alg_writable(struct sock *sk) +{ + return PAGE_SIZE <= af_alg_sndbuf(sk); +} + +/** + * Size of available buffer used by kernel for the RX user space operation. + * + * @sk socket of connection to user space + * @return number of bytes still available + */ +static inline int af_alg_rcvbuf(struct sock *sk) +{ + struct alg_sock *ask = alg_sk(sk); + struct af_alg_ctx *ctx = ask->private; + + return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) - + atomic_read(&ctx->rcvused), 0); +} + +/** + * Can the RX buffer still be written to? + * + * @sk socket of connection to user space + * @return true => writable, false => not writable + */ +static inline bool af_alg_readable(struct sock *sk) +{ + return PAGE_SIZE <= af_alg_rcvbuf(sk); +} + +int af_alg_alloc_tsgl(struct sock *sk); +unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset); +void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, + size_t dst_offset); +void af_alg_free_areq_sgls(struct af_alg_async_req *areq); +int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags); +void af_alg_wmem_wakeup(struct sock *sk); +int af_alg_wait_for_data(struct sock *sk, unsigned flags); +void af_alg_data_wakeup(struct sock *sk); +int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, + unsigned int ivsize); +ssize_t af_alg_sendpage(struct socket *sock, struct page *page, + int offset, size_t size, int flags); +void af_alg_free_resources(struct af_alg_async_req *areq); +void af_alg_async_cb(struct crypto_async_request *_req, int err); +__poll_t af_alg_poll(struct file *file, struct socket *sock, + poll_table *wait); +struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk, + unsigned int areqlen); +int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, + struct af_alg_async_req *areq, size_t maxsize, + size_t *outlen); + +#endif /* _CRYPTO_IF_ALG_H */ diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h new file mode 100644 index 000000000..51052f65c --- /dev/null +++ b/include/crypto/internal/acompress.h @@ -0,0 +1,84 @@ +/* + * Asynchronous Compression operations + * + * Copyright (c) 2016, Intel Corporation + * Authors: Weigang Li + * Giovanni Cabiddu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#ifndef _CRYPTO_ACOMP_INT_H +#define _CRYPTO_ACOMP_INT_H +#include + +/* + * Transform internal helpers. + */ +static inline void *acomp_request_ctx(struct acomp_req *req) +{ + return req->__ctx; +} + +static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm) +{ + return tfm->base.__crt_ctx; +} + +static inline void acomp_request_complete(struct acomp_req *req, + int err) +{ + req->base.complete(&req->base, err); +} + +static inline const char *acomp_alg_name(struct crypto_acomp *tfm) +{ + return crypto_acomp_tfm(tfm)->__crt_alg->cra_name; +} + +static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm) +{ + struct acomp_req *req; + + req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL); + if (likely(req)) + acomp_request_set_tfm(req, tfm); + return req; +} + +static inline void __acomp_request_free(struct acomp_req *req) +{ + kzfree(req); +} + +/** + * crypto_register_acomp() -- Register asynchronous compression algorithm + * + * Function registers an implementation of an asynchronous + * compression algorithm + * + * @alg: algorithm definition + * + * Return: zero on success; error code in case of error + */ +int crypto_register_acomp(struct acomp_alg *alg); + +/** + * crypto_unregister_acomp() -- Unregister asynchronous compression algorithm + * + * Function unregisters an implementation of an asynchronous + * compression algorithm + * + * @alg: algorithm definition + * + * Return: zero on success; error code in case of error + */ +int crypto_unregister_acomp(struct acomp_alg *alg); + +int crypto_register_acomps(struct acomp_alg *algs, int count); +void crypto_unregister_acomps(struct acomp_alg *algs, int count); + +#endif diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h new file mode 100644 index 000000000..6ad8e31d3 --- /dev/null +++ b/include/crypto/internal/aead.h @@ -0,0 +1,191 @@ +/* + * AEAD: Authenticated Encryption with Associated Data + * + * Copyright (c) 2007-2015 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_INTERNAL_AEAD_H +#define _CRYPTO_INTERNAL_AEAD_H + +#include +#include +#include +#include + +struct rtattr; + +struct aead_instance { + void (*free)(struct aead_instance *inst); + union { + struct { + char head[offsetof(struct aead_alg, base)]; + struct crypto_instance base; + } s; + struct aead_alg alg; + }; +}; + +struct crypto_aead_spawn { + struct crypto_spawn base; +}; + +struct aead_queue { + struct crypto_queue base; +}; + +static inline void *crypto_aead_ctx(struct crypto_aead *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + +static inline struct crypto_instance *aead_crypto_instance( + struct aead_instance *inst) +{ + return container_of(&inst->alg.base, struct crypto_instance, alg); +} + +static inline struct aead_instance *aead_instance(struct crypto_instance *inst) +{ + return container_of(&inst->alg, struct aead_instance, alg.base); +} + +static inline struct aead_instance *aead_alg_instance(struct crypto_aead *aead) +{ + return aead_instance(crypto_tfm_alg_instance(&aead->base)); +} + +static inline void *aead_instance_ctx(struct aead_instance *inst) +{ + return crypto_instance_ctx(aead_crypto_instance(inst)); +} + +static inline void *aead_request_ctx(struct aead_request *req) +{ + return req->__ctx; +} + +static inline void aead_request_complete(struct aead_request *req, int err) +{ + req->base.complete(&req->base, err); +} + +static inline u32 aead_request_flags(struct aead_request *req) +{ + return req->base.flags; +} + +static inline struct aead_request *aead_request_cast( + struct crypto_async_request *req) +{ + return container_of(req, struct aead_request, base); +} + +static inline void crypto_set_aead_spawn( + struct crypto_aead_spawn *spawn, struct crypto_instance *inst) +{ + crypto_set_spawn(&spawn->base, inst); +} + +int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name, + u32 type, u32 mask); + +static inline void crypto_drop_aead(struct crypto_aead_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct aead_alg *crypto_spawn_aead_alg( + struct crypto_aead_spawn *spawn) +{ + return container_of(spawn->base.alg, struct aead_alg, base); +} + +static inline struct crypto_aead *crypto_spawn_aead( + struct crypto_aead_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline void crypto_aead_set_reqsize(struct crypto_aead *aead, + unsigned int reqsize) +{ + aead->reqsize = reqsize; +} + +static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg) +{ + return alg->maxauthsize; +} + +static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead) +{ + return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead)); +} + +static inline void aead_init_queue(struct aead_queue *queue, + unsigned int max_qlen) +{ + crypto_init_queue(&queue->base, max_qlen); +} + +static inline int aead_enqueue_request(struct aead_queue *queue, + struct aead_request *request) +{ + return crypto_enqueue_request(&queue->base, &request->base); +} + +static inline struct aead_request *aead_dequeue_request( + struct aead_queue *queue) +{ + struct crypto_async_request *req; + + req = crypto_dequeue_request(&queue->base); + + return req ? container_of(req, struct aead_request, base) : NULL; +} + +static inline struct aead_request *aead_get_backlog(struct aead_queue *queue) +{ + struct crypto_async_request *req; + + req = crypto_get_backlog(&queue->base); + + return req ? container_of(req, struct aead_request, base) : NULL; +} + +static inline unsigned int crypto_aead_alg_chunksize(struct aead_alg *alg) +{ + return alg->chunksize; +} + +/** + * crypto_aead_chunksize() - obtain chunk size + * @tfm: cipher handle + * + * The block size is set to one for ciphers such as CCM. However, + * you still need to provide incremental updates in multiples of + * the underlying block size as the IV does not have sub-block + * granularity. This is known in this API as the chunk size. + * + * Return: chunk size in bytes + */ +static inline unsigned int crypto_aead_chunksize(struct crypto_aead *tfm) +{ + return crypto_aead_alg_chunksize(crypto_aead_alg(tfm)); +} + +int crypto_register_aead(struct aead_alg *alg); +void crypto_unregister_aead(struct aead_alg *alg); +int crypto_register_aeads(struct aead_alg *algs, int count); +void crypto_unregister_aeads(struct aead_alg *algs, int count); +int aead_register_instance(struct crypto_template *tmpl, + struct aead_instance *inst); + +#endif /* _CRYPTO_INTERNAL_AEAD_H */ + diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h new file mode 100644 index 000000000..805686ba2 --- /dev/null +++ b/include/crypto/internal/akcipher.h @@ -0,0 +1,144 @@ +/* + * Public Key Encryption + * + * Copyright (c) 2015, Intel Corporation + * Authors: Tadeusz Struk + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#ifndef _CRYPTO_AKCIPHER_INT_H +#define _CRYPTO_AKCIPHER_INT_H +#include +#include + +struct akcipher_instance { + void (*free)(struct akcipher_instance *inst); + union { + struct { + char head[offsetof(struct akcipher_alg, base)]; + struct crypto_instance base; + } s; + struct akcipher_alg alg; + }; +}; + +struct crypto_akcipher_spawn { + struct crypto_spawn base; +}; + +/* + * Transform internal helpers. + */ +static inline void *akcipher_request_ctx(struct akcipher_request *req) +{ + return req->__ctx; +} + +static inline void akcipher_set_reqsize(struct crypto_akcipher *akcipher, + unsigned int reqsize) +{ + crypto_akcipher_alg(akcipher)->reqsize = reqsize; +} + +static inline void *akcipher_tfm_ctx(struct crypto_akcipher *tfm) +{ + return tfm->base.__crt_ctx; +} + +static inline void akcipher_request_complete(struct akcipher_request *req, + int err) +{ + req->base.complete(&req->base, err); +} + +static inline const char *akcipher_alg_name(struct crypto_akcipher *tfm) +{ + return crypto_akcipher_tfm(tfm)->__crt_alg->cra_name; +} + +static inline struct crypto_instance *akcipher_crypto_instance( + struct akcipher_instance *inst) +{ + return container_of(&inst->alg.base, struct crypto_instance, alg); +} + +static inline struct akcipher_instance *akcipher_instance( + struct crypto_instance *inst) +{ + return container_of(&inst->alg, struct akcipher_instance, alg.base); +} + +static inline struct akcipher_instance *akcipher_alg_instance( + struct crypto_akcipher *akcipher) +{ + return akcipher_instance(crypto_tfm_alg_instance(&akcipher->base)); +} + +static inline void *akcipher_instance_ctx(struct akcipher_instance *inst) +{ + return crypto_instance_ctx(akcipher_crypto_instance(inst)); +} + +static inline void crypto_set_akcipher_spawn( + struct crypto_akcipher_spawn *spawn, + struct crypto_instance *inst) +{ + crypto_set_spawn(&spawn->base, inst); +} + +int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, const char *name, + u32 type, u32 mask); + +static inline struct crypto_akcipher *crypto_spawn_akcipher( + struct crypto_akcipher_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline void crypto_drop_akcipher(struct crypto_akcipher_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct akcipher_alg *crypto_spawn_akcipher_alg( + struct crypto_akcipher_spawn *spawn) +{ + return container_of(spawn->base.alg, struct akcipher_alg, base); +} + +/** + * crypto_register_akcipher() -- Register public key algorithm + * + * Function registers an implementation of a public key verify algorithm + * + * @alg: algorithm definition + * + * Return: zero on success; error code in case of error + */ +int crypto_register_akcipher(struct akcipher_alg *alg); + +/** + * crypto_unregister_akcipher() -- Unregister public key algorithm + * + * Function unregisters an implementation of a public key verify algorithm + * + * @alg: algorithm definition + */ +void crypto_unregister_akcipher(struct akcipher_alg *alg); + +/** + * akcipher_register_instance() -- Unregister public key template instance + * + * Function registers an implementation of an asymmetric key algorithm + * created from a template + * + * @tmpl: the template from which the algorithm was created + * @inst: the template instance + */ +int akcipher_register_instance(struct crypto_template *tmpl, + struct akcipher_instance *inst); +#endif diff --git a/include/crypto/internal/blake2s.h b/include/crypto/internal/blake2s.h new file mode 100644 index 000000000..3ba066845 --- /dev/null +++ b/include/crypto/internal/blake2s.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ + +#ifndef _CRYPTO_INTERNAL_BLAKE2S_H +#define _CRYPTO_INTERNAL_BLAKE2S_H + +#include + +void blake2s_compress_generic(struct blake2s_state *state,const u8 *block, + size_t nblocks, const u32 inc); + +void blake2s_compress_arch(struct blake2s_state *state,const u8 *block, + size_t nblocks, const u32 inc); + +static inline void blake2s_set_lastblock(struct blake2s_state *state) +{ + state->f[0] = -1; +} + +#endif /* _CRYPTO_INTERNAL_BLAKE2S_H */ diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h new file mode 100644 index 000000000..2bcfb931b --- /dev/null +++ b/include/crypto/internal/geniv.h @@ -0,0 +1,33 @@ +/* + * geniv: IV generation + * + * Copyright (c) 2015 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_INTERNAL_GENIV_H +#define _CRYPTO_INTERNAL_GENIV_H + +#include +#include +#include + +struct aead_geniv_ctx { + spinlock_t lock; + struct crypto_aead *child; + struct crypto_skcipher *sknull; + u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); +}; + +struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, + struct rtattr **tb, u32 type, u32 mask); +void aead_geniv_free(struct aead_instance *inst); +int aead_init_geniv(struct crypto_aead *tfm); +void aead_exit_geniv(struct crypto_aead *tfm); + +#endif /* _CRYPTO_INTERNAL_GENIV_H */ diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h new file mode 100644 index 000000000..64283c22f --- /dev/null +++ b/include/crypto/internal/hash.h @@ -0,0 +1,251 @@ +/* + * Hash algorithms. + * + * Copyright (c) 2008 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_INTERNAL_HASH_H +#define _CRYPTO_INTERNAL_HASH_H + +#include +#include + +struct ahash_request; +struct scatterlist; + +struct crypto_hash_walk { + char *data; + + unsigned int offset; + unsigned int alignmask; + + struct page *pg; + unsigned int entrylen; + + unsigned int total; + struct scatterlist *sg; + + unsigned int flags; +}; + +struct ahash_instance { + struct ahash_alg alg; +}; + +struct shash_instance { + struct shash_alg alg; +}; + +struct crypto_ahash_spawn { + struct crypto_spawn base; +}; + +struct crypto_shash_spawn { + struct crypto_spawn base; +}; + +extern const struct crypto_type crypto_ahash_type; + +int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err); +int crypto_hash_walk_first(struct ahash_request *req, + struct crypto_hash_walk *walk); +int crypto_ahash_walk_first(struct ahash_request *req, + struct crypto_hash_walk *walk); + +static inline int crypto_ahash_walk_done(struct crypto_hash_walk *walk, + int err) +{ + return crypto_hash_walk_done(walk, err); +} + +static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk) +{ + return !(walk->entrylen | walk->total); +} + +static inline int crypto_ahash_walk_last(struct crypto_hash_walk *walk) +{ + return crypto_hash_walk_last(walk); +} + +int crypto_register_ahash(struct ahash_alg *alg); +int crypto_unregister_ahash(struct ahash_alg *alg); +int crypto_register_ahashes(struct ahash_alg *algs, int count); +void crypto_unregister_ahashes(struct ahash_alg *algs, int count); +int ahash_register_instance(struct crypto_template *tmpl, + struct ahash_instance *inst); +void ahash_free_instance(struct crypto_instance *inst); + +bool crypto_shash_alg_has_setkey(struct shash_alg *alg); + +bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg); + +int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, + struct hash_alg_common *alg, + struct crypto_instance *inst); + +static inline void crypto_drop_ahash(struct crypto_ahash_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask); + +int crypto_register_shash(struct shash_alg *alg); +int crypto_unregister_shash(struct shash_alg *alg); +int crypto_register_shashes(struct shash_alg *algs, int count); +int crypto_unregister_shashes(struct shash_alg *algs, int count); +int shash_register_instance(struct crypto_template *tmpl, + struct shash_instance *inst); +void shash_free_instance(struct crypto_instance *inst); + +int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn, + struct shash_alg *alg, + struct crypto_instance *inst); + +static inline void crypto_drop_shash(struct crypto_shash_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask); + +int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc); +int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc); +int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc); + +int crypto_init_shash_ops_async(struct crypto_tfm *tfm); + +static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm) +{ + return crypto_tfm_ctx(crypto_ahash_tfm(tfm)); +} + +static inline struct ahash_alg *__crypto_ahash_alg(struct crypto_alg *alg) +{ + return container_of(__crypto_hash_alg_common(alg), struct ahash_alg, + halg); +} + +static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm, + unsigned int reqsize) +{ + tfm->reqsize = reqsize; +} + +static inline struct crypto_instance *ahash_crypto_instance( + struct ahash_instance *inst) +{ + return container_of(&inst->alg.halg.base, struct crypto_instance, alg); +} + +static inline struct ahash_instance *ahash_instance( + struct crypto_instance *inst) +{ + return container_of(&inst->alg, struct ahash_instance, alg.halg.base); +} + +static inline void *ahash_instance_ctx(struct ahash_instance *inst) +{ + return crypto_instance_ctx(ahash_crypto_instance(inst)); +} + +static inline unsigned int ahash_instance_headroom(void) +{ + return sizeof(struct ahash_alg) - sizeof(struct crypto_alg); +} + +static inline struct ahash_instance *ahash_alloc_instance( + const char *name, struct crypto_alg *alg) +{ + return crypto_alloc_instance2(name, alg, ahash_instance_headroom()); +} + +static inline void ahash_request_complete(struct ahash_request *req, int err) +{ + req->base.complete(&req->base, err); +} + +static inline u32 ahash_request_flags(struct ahash_request *req) +{ + return req->base.flags; +} + +static inline struct crypto_ahash *crypto_spawn_ahash( + struct crypto_ahash_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline int ahash_enqueue_request(struct crypto_queue *queue, + struct ahash_request *request) +{ + return crypto_enqueue_request(queue, &request->base); +} + +static inline struct ahash_request *ahash_dequeue_request( + struct crypto_queue *queue) +{ + return ahash_request_cast(crypto_dequeue_request(queue)); +} + +static inline int ahash_tfm_in_queue(struct crypto_queue *queue, + struct crypto_ahash *tfm) +{ + return crypto_tfm_in_queue(queue, crypto_ahash_tfm(tfm)); +} + +static inline void *crypto_shash_ctx(struct crypto_shash *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + +static inline struct crypto_instance *shash_crypto_instance( + struct shash_instance *inst) +{ + return container_of(&inst->alg.base, struct crypto_instance, alg); +} + +static inline struct shash_instance *shash_instance( + struct crypto_instance *inst) +{ + return container_of(__crypto_shash_alg(&inst->alg), + struct shash_instance, alg); +} + +static inline void *shash_instance_ctx(struct shash_instance *inst) +{ + return crypto_instance_ctx(shash_crypto_instance(inst)); +} + +static inline struct shash_instance *shash_alloc_instance( + const char *name, struct crypto_alg *alg) +{ + return crypto_alloc_instance2(name, alg, + sizeof(struct shash_alg) - sizeof(*alg)); +} + +static inline struct crypto_shash *crypto_spawn_shash( + struct crypto_shash_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline void *crypto_shash_ctx_aligned(struct crypto_shash *tfm) +{ + return crypto_tfm_ctx_aligned(&tfm->base); +} + +static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_shash, base); +} + +#endif /* _CRYPTO_INTERNAL_HASH_H */ + diff --git a/include/crypto/internal/kpp.h b/include/crypto/internal/kpp.h new file mode 100644 index 000000000..ad3acf364 --- /dev/null +++ b/include/crypto/internal/kpp.h @@ -0,0 +1,64 @@ +/* + * Key-agreement Protocol Primitives (KPP) + * + * Copyright (c) 2016, Intel Corporation + * Authors: Salvatore Benedetto + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#ifndef _CRYPTO_KPP_INT_H +#define _CRYPTO_KPP_INT_H +#include +#include + +/* + * Transform internal helpers. + */ +static inline void *kpp_request_ctx(struct kpp_request *req) +{ + return req->__ctx; +} + +static inline void *kpp_tfm_ctx(struct crypto_kpp *tfm) +{ + return tfm->base.__crt_ctx; +} + +static inline void kpp_request_complete(struct kpp_request *req, int err) +{ + req->base.complete(&req->base, err); +} + +static inline const char *kpp_alg_name(struct crypto_kpp *tfm) +{ + return crypto_kpp_tfm(tfm)->__crt_alg->cra_name; +} + +/** + * crypto_register_kpp() -- Register key-agreement protocol primitives algorithm + * + * Function registers an implementation of a key-agreement protocol primitive + * algorithm + * + * @alg: algorithm definition + * + * Return: zero on success; error code in case of error + */ +int crypto_register_kpp(struct kpp_alg *alg); + +/** + * crypto_unregister_kpp() -- Unregister key-agreement protocol primitive + * algorithm + * + * Function unregisters an implementation of a key-agreement protocol primitive + * algorithm + * + * @alg: algorithm definition + */ +void crypto_unregister_kpp(struct kpp_alg *alg); + +#endif diff --git a/include/crypto/internal/rng.h b/include/crypto/internal/rng.h new file mode 100644 index 000000000..a52ef3483 --- /dev/null +++ b/include/crypto/internal/rng.h @@ -0,0 +1,45 @@ +/* + * RNG: Random Number Generator algorithms under the crypto API + * + * Copyright (c) 2008 Neil Horman + * Copyright (c) 2015 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_INTERNAL_RNG_H +#define _CRYPTO_INTERNAL_RNG_H + +#include +#include + +int crypto_register_rng(struct rng_alg *alg); +void crypto_unregister_rng(struct rng_alg *alg); +int crypto_register_rngs(struct rng_alg *algs, int count); +void crypto_unregister_rngs(struct rng_alg *algs, int count); + +#if defined(CONFIG_CRYPTO_RNG) || defined(CONFIG_CRYPTO_RNG_MODULE) +int crypto_del_default_rng(void); +#else +static inline int crypto_del_default_rng(void) +{ + return 0; +} +#endif + +static inline void *crypto_rng_ctx(struct crypto_rng *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + +static inline void crypto_rng_set_entropy(struct crypto_rng *tfm, + const u8 *data, unsigned int len) +{ + crypto_rng_alg(tfm)->set_ent(tfm, data, len); +} + +#endif diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h new file mode 100644 index 000000000..9e8f1590d --- /dev/null +++ b/include/crypto/internal/rsa.h @@ -0,0 +1,62 @@ +/* + * RSA internal helpers + * + * Copyright (c) 2015, Intel Corporation + * Authors: Tadeusz Struk + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#ifndef _RSA_HELPER_ +#define _RSA_HELPER_ +#include + +/** + * rsa_key - RSA key structure + * @n : RSA modulus raw byte stream + * @e : RSA public exponent raw byte stream + * @d : RSA private exponent raw byte stream + * @p : RSA prime factor p of n raw byte stream + * @q : RSA prime factor q of n raw byte stream + * @dp : RSA exponent d mod (p - 1) raw byte stream + * @dq : RSA exponent d mod (q - 1) raw byte stream + * @qinv : RSA CRT coefficient q^(-1) mod p raw byte stream + * @n_sz : length in bytes of RSA modulus n + * @e_sz : length in bytes of RSA public exponent + * @d_sz : length in bytes of RSA private exponent + * @p_sz : length in bytes of p field + * @q_sz : length in bytes of q field + * @dp_sz : length in bytes of dp field + * @dq_sz : length in bytes of dq field + * @qinv_sz : length in bytes of qinv field + */ +struct rsa_key { + const u8 *n; + const u8 *e; + const u8 *d; + const u8 *p; + const u8 *q; + const u8 *dp; + const u8 *dq; + const u8 *qinv; + size_t n_sz; + size_t e_sz; + size_t d_sz; + size_t p_sz; + size_t q_sz; + size_t dp_sz; + size_t dq_sz; + size_t qinv_sz; +}; + +int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key, + unsigned int key_len); + +int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key, + unsigned int key_len); + +extern struct crypto_template rsa_pkcs1pad_tmpl; +#endif diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h new file mode 100644 index 000000000..0f6ddac1a --- /dev/null +++ b/include/crypto/internal/scompress.h @@ -0,0 +1,128 @@ +/* + * Synchronous Compression operations + * + * Copyright 2015 LG Electronics Inc. + * Copyright (c) 2016, Intel Corporation + * Author: Giovanni Cabiddu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#ifndef _CRYPTO_SCOMP_INT_H +#define _CRYPTO_SCOMP_INT_H +#include + +#define SCOMP_SCRATCH_SIZE 131072 + +struct crypto_scomp { + struct crypto_tfm base; +}; + +/** + * struct scomp_alg - synchronous compression algorithm + * + * @alloc_ctx: Function allocates algorithm specific context + * @free_ctx: Function frees context allocated with alloc_ctx + * @compress: Function performs a compress operation + * @decompress: Function performs a de-compress operation + * @base: Common crypto API algorithm data structure + */ +struct scomp_alg { + void *(*alloc_ctx)(struct crypto_scomp *tfm); + void (*free_ctx)(struct crypto_scomp *tfm, void *ctx); + int (*compress)(struct crypto_scomp *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen, + void *ctx); + int (*decompress)(struct crypto_scomp *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen, + void *ctx); + struct crypto_alg base; +}; + +static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg) +{ + return container_of(alg, struct scomp_alg, base); +} + +static inline struct crypto_scomp *__crypto_scomp_tfm(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_scomp, base); +} + +static inline struct crypto_tfm *crypto_scomp_tfm(struct crypto_scomp *tfm) +{ + return &tfm->base; +} + +static inline void crypto_free_scomp(struct crypto_scomp *tfm) +{ + crypto_destroy_tfm(tfm, crypto_scomp_tfm(tfm)); +} + +static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm) +{ + return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg); +} + +static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm) +{ + return crypto_scomp_alg(tfm)->alloc_ctx(tfm); +} + +static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm, + void *ctx) +{ + return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx); +} + +static inline int crypto_scomp_compress(struct crypto_scomp *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen, void *ctx) +{ + return crypto_scomp_alg(tfm)->compress(tfm, src, slen, dst, dlen, ctx); +} + +static inline int crypto_scomp_decompress(struct crypto_scomp *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen, + void *ctx) +{ + return crypto_scomp_alg(tfm)->decompress(tfm, src, slen, dst, dlen, + ctx); +} + +int crypto_init_scomp_ops_async(struct crypto_tfm *tfm); +struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req); +void crypto_acomp_scomp_free_ctx(struct acomp_req *req); + +/** + * crypto_register_scomp() -- Register synchronous compression algorithm + * + * Function registers an implementation of a synchronous + * compression algorithm + * + * @alg: algorithm definition + * + * Return: zero on success; error code in case of error + */ +int crypto_register_scomp(struct scomp_alg *alg); + +/** + * crypto_unregister_scomp() -- Unregister synchronous compression algorithm + * + * Function unregisters an implementation of a synchronous + * compression algorithm + * + * @alg: algorithm definition + * + * Return: zero on success; error code in case of error + */ +int crypto_unregister_scomp(struct scomp_alg *alg); + +int crypto_register_scomps(struct scomp_alg *algs, int count); +void crypto_unregister_scomps(struct scomp_alg *algs, int count); + +#endif diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h new file mode 100644 index 000000000..f18344518 --- /dev/null +++ b/include/crypto/internal/simd.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Shared crypto simd helpers + */ + +#ifndef _CRYPTO_INTERNAL_SIMD_H +#define _CRYPTO_INTERNAL_SIMD_H + +struct simd_skcipher_alg; +struct skcipher_alg; + +struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, + const char *drvname, + const char *basename); +struct simd_skcipher_alg *simd_skcipher_create(const char *algname, + const char *basename); +void simd_skcipher_free(struct simd_skcipher_alg *alg); + +int simd_register_skciphers_compat(struct skcipher_alg *algs, int count, + struct simd_skcipher_alg **simd_algs); + +void simd_unregister_skciphers(struct skcipher_alg *algs, int count, + struct simd_skcipher_alg **simd_algs); + +#endif /* _CRYPTO_INTERNAL_SIMD_H */ diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h new file mode 100644 index 000000000..e42f7063f --- /dev/null +++ b/include/crypto/internal/skcipher.h @@ -0,0 +1,211 @@ +/* + * Symmetric key ciphers. + * + * Copyright (c) 2007 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_INTERNAL_SKCIPHER_H +#define _CRYPTO_INTERNAL_SKCIPHER_H + +#include +#include +#include +#include + +struct aead_request; +struct rtattr; + +struct skcipher_instance { + void (*free)(struct skcipher_instance *inst); + union { + struct { + char head[offsetof(struct skcipher_alg, base)]; + struct crypto_instance base; + } s; + struct skcipher_alg alg; + }; +}; + +struct crypto_skcipher_spawn { + struct crypto_spawn base; +}; + +struct skcipher_walk { + union { + struct { + struct page *page; + unsigned long offset; + } phys; + + struct { + u8 *page; + void *addr; + } virt; + } src, dst; + + struct scatter_walk in; + unsigned int nbytes; + + struct scatter_walk out; + unsigned int total; + + struct list_head buffers; + + u8 *page; + u8 *buffer; + u8 *oiv; + void *iv; + + unsigned int ivsize; + + int flags; + unsigned int blocksize; + unsigned int stride; + unsigned int alignmask; +}; + +extern const struct crypto_type crypto_givcipher_type; + +static inline struct crypto_instance *skcipher_crypto_instance( + struct skcipher_instance *inst) +{ + return &inst->s.base; +} + +static inline struct skcipher_instance *skcipher_alg_instance( + struct crypto_skcipher *skcipher) +{ + return container_of(crypto_skcipher_alg(skcipher), + struct skcipher_instance, alg); +} + +static inline void *skcipher_instance_ctx(struct skcipher_instance *inst) +{ + return crypto_instance_ctx(skcipher_crypto_instance(inst)); +} + +static inline void skcipher_request_complete(struct skcipher_request *req, int err) +{ + req->base.complete(&req->base, err); +} + +static inline void crypto_set_skcipher_spawn( + struct crypto_skcipher_spawn *spawn, struct crypto_instance *inst) +{ + crypto_set_spawn(&spawn->base, inst); +} + +int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name, + u32 type, u32 mask); + +static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct skcipher_alg *crypto_skcipher_spawn_alg( + struct crypto_skcipher_spawn *spawn) +{ + return container_of(spawn->base.alg, struct skcipher_alg, base); +} + +static inline struct skcipher_alg *crypto_spawn_skcipher_alg( + struct crypto_skcipher_spawn *spawn) +{ + return crypto_skcipher_spawn_alg(spawn); +} + +static inline struct crypto_skcipher *crypto_spawn_skcipher( + struct crypto_skcipher_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline void crypto_skcipher_set_reqsize( + struct crypto_skcipher *skcipher, unsigned int reqsize) +{ + skcipher->reqsize = reqsize; +} + +int crypto_register_skcipher(struct skcipher_alg *alg); +void crypto_unregister_skcipher(struct skcipher_alg *alg); +int crypto_register_skciphers(struct skcipher_alg *algs, int count); +void crypto_unregister_skciphers(struct skcipher_alg *algs, int count); +int skcipher_register_instance(struct crypto_template *tmpl, + struct skcipher_instance *inst); + +int skcipher_walk_done(struct skcipher_walk *walk, int err); +int skcipher_walk_virt(struct skcipher_walk *walk, + struct skcipher_request *req, + bool atomic); +void skcipher_walk_atomise(struct skcipher_walk *walk); +int skcipher_walk_async(struct skcipher_walk *walk, + struct skcipher_request *req); +int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req, + bool atomic); +int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, + struct aead_request *req, bool atomic); +int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, + struct aead_request *req, bool atomic); +void skcipher_walk_complete(struct skcipher_walk *walk, int err); + +static inline void ablkcipher_request_complete(struct ablkcipher_request *req, + int err) +{ + req->base.complete(&req->base, err); +} + +static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req) +{ + return req->base.flags; +} + +static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + +static inline void *skcipher_request_ctx(struct skcipher_request *req) +{ + return req->__ctx; +} + +static inline u32 skcipher_request_flags(struct skcipher_request *req) +{ + return req->base.flags; +} + +static inline unsigned int crypto_skcipher_alg_min_keysize( + struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blkcipher.min_keysize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_ablkcipher.min_keysize; + + return alg->min_keysize; +} + +static inline unsigned int crypto_skcipher_alg_max_keysize( + struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blkcipher.max_keysize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_ablkcipher.max_keysize; + + return alg->max_keysize; +} + +#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */ + diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h new file mode 100644 index 000000000..1a34630fc --- /dev/null +++ b/include/crypto/kpp.h @@ -0,0 +1,352 @@ +/* + * Key-agreement Protocol Primitives (KPP) + * + * Copyright (c) 2016, Intel Corporation + * Authors: Salvatore Benedetto + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_KPP_ +#define _CRYPTO_KPP_ +#include + +/** + * struct kpp_request + * + * @base: Common attributes for async crypto requests + * @src: Source data + * @dst: Destination data + * @src_len: Size of the input buffer + * @dst_len: Size of the output buffer. It needs to be at least + * as big as the expected result depending on the operation + * After operation it will be updated with the actual size of the + * result. In case of error where the dst sgl size was insufficient, + * it will be updated to the size required for the operation. + * @__ctx: Start of private context data + */ +struct kpp_request { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int src_len; + unsigned int dst_len; + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +/** + * struct crypto_kpp - user-instantiated object which encapsulate + * algorithms and core processing logic + * + * @base: Common crypto API algorithm data structure + */ +struct crypto_kpp { + struct crypto_tfm base; +}; + +/** + * struct kpp_alg - generic key-agreement protocol primitives + * + * @set_secret: Function invokes the protocol specific function to + * store the secret private key along with parameters. + * The implementation knows how to decode the buffer + * @generate_public_key: Function generate the public key to be sent to the + * counterpart. In case of error, where output is not big + * enough req->dst_len will be updated to the size + * required + * @compute_shared_secret: Function compute the shared secret as defined by + * the algorithm. The result is given back to the user. + * In case of error, where output is not big enough, + * req->dst_len will be updated to the size required + * @max_size: Function returns the size of the output buffer + * @init: Initialize the object. This is called only once at + * instantiation time. In case the cryptographic hardware + * needs to be initialized. Software fallback should be + * put in place here. + * @exit: Undo everything @init did. + * + * @reqsize: Request context size required by algorithm + * implementation + * @base: Common crypto API algorithm data structure + */ +struct kpp_alg { + int (*set_secret)(struct crypto_kpp *tfm, const void *buffer, + unsigned int len); + int (*generate_public_key)(struct kpp_request *req); + int (*compute_shared_secret)(struct kpp_request *req); + + unsigned int (*max_size)(struct crypto_kpp *tfm); + + int (*init)(struct crypto_kpp *tfm); + void (*exit)(struct crypto_kpp *tfm); + + unsigned int reqsize; + struct crypto_alg base; +}; + +/** + * DOC: Generic Key-agreement Protocol Primitives API + * + * The KPP API is used with the algorithm type + * CRYPTO_ALG_TYPE_KPP (listed as type "kpp" in /proc/crypto) + */ + +/** + * crypto_alloc_kpp() - allocate KPP tfm handle + * @alg_name: is the name of the kpp algorithm (e.g. "dh", "ecdh") + * @type: specifies the type of the algorithm + * @mask: specifies the mask for the algorithm + * + * Allocate a handle for kpp algorithm. The returned struct crypto_kpp + * is required for any following API invocation + * + * Return: allocated handle in case of success; IS_ERR() is true in case of + * an error, PTR_ERR() returns the error code. + */ +struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask); + +static inline struct crypto_tfm *crypto_kpp_tfm(struct crypto_kpp *tfm) +{ + return &tfm->base; +} + +static inline struct kpp_alg *__crypto_kpp_alg(struct crypto_alg *alg) +{ + return container_of(alg, struct kpp_alg, base); +} + +static inline struct crypto_kpp *__crypto_kpp_tfm(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_kpp, base); +} + +static inline struct kpp_alg *crypto_kpp_alg(struct crypto_kpp *tfm) +{ + return __crypto_kpp_alg(crypto_kpp_tfm(tfm)->__crt_alg); +} + +static inline unsigned int crypto_kpp_reqsize(struct crypto_kpp *tfm) +{ + return crypto_kpp_alg(tfm)->reqsize; +} + +static inline void kpp_request_set_tfm(struct kpp_request *req, + struct crypto_kpp *tfm) +{ + req->base.tfm = crypto_kpp_tfm(tfm); +} + +static inline struct crypto_kpp *crypto_kpp_reqtfm(struct kpp_request *req) +{ + return __crypto_kpp_tfm(req->base.tfm); +} + +static inline u32 crypto_kpp_get_flags(struct crypto_kpp *tfm) +{ + return crypto_tfm_get_flags(crypto_kpp_tfm(tfm)); +} + +static inline void crypto_kpp_set_flags(struct crypto_kpp *tfm, u32 flags) +{ + crypto_tfm_set_flags(crypto_kpp_tfm(tfm), flags); +} + +/** + * crypto_free_kpp() - free KPP tfm handle + * + * @tfm: KPP tfm handle allocated with crypto_alloc_kpp() + * + * If @tfm is a NULL or error pointer, this function does nothing. + */ +static inline void crypto_free_kpp(struct crypto_kpp *tfm) +{ + crypto_destroy_tfm(tfm, crypto_kpp_tfm(tfm)); +} + +/** + * kpp_request_alloc() - allocates kpp request + * + * @tfm: KPP tfm handle allocated with crypto_alloc_kpp() + * @gfp: allocation flags + * + * Return: allocated handle in case of success or NULL in case of an error. + */ +static inline struct kpp_request *kpp_request_alloc(struct crypto_kpp *tfm, + gfp_t gfp) +{ + struct kpp_request *req; + + req = kmalloc(sizeof(*req) + crypto_kpp_reqsize(tfm), gfp); + if (likely(req)) + kpp_request_set_tfm(req, tfm); + + return req; +} + +/** + * kpp_request_free() - zeroize and free kpp request + * + * @req: request to free + */ +static inline void kpp_request_free(struct kpp_request *req) +{ + kzfree(req); +} + +/** + * kpp_request_set_callback() - Sets an asynchronous callback. + * + * Callback will be called when an asynchronous operation on a given + * request is finished. + * + * @req: request that the callback will be set for + * @flgs: specify for instance if the operation may backlog + * @cmpl: callback which will be called + * @data: private data used by the caller + */ +static inline void kpp_request_set_callback(struct kpp_request *req, + u32 flgs, + crypto_completion_t cmpl, + void *data) +{ + req->base.complete = cmpl; + req->base.data = data; + req->base.flags = flgs; +} + +/** + * kpp_request_set_input() - Sets input buffer + * + * Sets parameters required by generate_public_key + * + * @req: kpp request + * @input: ptr to input scatter list + * @input_len: size of the input scatter list + */ +static inline void kpp_request_set_input(struct kpp_request *req, + struct scatterlist *input, + unsigned int input_len) +{ + req->src = input; + req->src_len = input_len; +} + +/** + * kpp_request_set_output() - Sets output buffer + * + * Sets parameters required by kpp operation + * + * @req: kpp request + * @output: ptr to output scatter list + * @output_len: size of the output scatter list + */ +static inline void kpp_request_set_output(struct kpp_request *req, + struct scatterlist *output, + unsigned int output_len) +{ + req->dst = output; + req->dst_len = output_len; +} + +enum { + CRYPTO_KPP_SECRET_TYPE_UNKNOWN, + CRYPTO_KPP_SECRET_TYPE_DH, + CRYPTO_KPP_SECRET_TYPE_ECDH, +}; + +/** + * struct kpp_secret - small header for packing secret buffer + * + * @type: define type of secret. Each kpp type will define its own + * @len: specify the len of the secret, include the header, that + * follows the struct + */ +struct kpp_secret { + unsigned short type; + unsigned short len; +}; + +/** + * crypto_kpp_set_secret() - Invoke kpp operation + * + * Function invokes the specific kpp operation for a given alg. + * + * @tfm: tfm handle + * @buffer: Buffer holding the packet representation of the private + * key. The structure of the packet key depends on the particular + * KPP implementation. Packing and unpacking helpers are provided + * for ECDH and DH (see the respective header files for those + * implementations). + * @len: Length of the packet private key buffer. + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm, + const void *buffer, unsigned int len) +{ + struct kpp_alg *alg = crypto_kpp_alg(tfm); + + return alg->set_secret(tfm, buffer, len); +} + +/** + * crypto_kpp_generate_public_key() - Invoke kpp operation + * + * Function invokes the specific kpp operation for generating the public part + * for a given kpp algorithm. + * + * To generate a private key, the caller should use a random number generator. + * The output of the requested length serves as the private key. + * + * @req: kpp key request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_kpp_generate_public_key(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct kpp_alg *alg = crypto_kpp_alg(tfm); + + return alg->generate_public_key(req); +} + +/** + * crypto_kpp_compute_shared_secret() - Invoke kpp operation + * + * Function invokes the specific kpp operation for computing the shared secret + * for a given kpp algorithm. + * + * @req: kpp key request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct kpp_alg *alg = crypto_kpp_alg(tfm); + + return alg->compute_shared_secret(req); +} + +/** + * crypto_kpp_maxsize() - Get len for output buffer + * + * Function returns the output buffer size required for a given key. + * Function assumes that the key is already set in the transformation. If this + * function is called without a setkey or with a failed setkey, you will end up + * in a NULL dereference. + * + * @tfm: KPP tfm handle allocated with crypto_alloc_kpp() + */ +static inline unsigned int crypto_kpp_maxsize(struct crypto_kpp *tfm) +{ + struct kpp_alg *alg = crypto_kpp_alg(tfm); + + return alg->max_size(tfm); +} + +#endif diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h new file mode 100644 index 000000000..b67404fc4 --- /dev/null +++ b/include/crypto/mcryptd.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Software async multibuffer crypto daemon headers + * + * Author: + * Tim Chen + * + * Copyright (c) 2014, Intel Corporation. + */ + +#ifndef _CRYPTO_MCRYPT_H +#define _CRYPTO_MCRYPT_H + +#include +#include +#include + +struct mcryptd_ahash { + struct crypto_ahash base; +}; + +static inline struct mcryptd_ahash *__mcryptd_ahash_cast( + struct crypto_ahash *tfm) +{ + return (struct mcryptd_ahash *)tfm; +} + +struct mcryptd_cpu_queue { + struct crypto_queue queue; + spinlock_t q_lock; + struct work_struct work; +}; + +struct mcryptd_queue { + struct mcryptd_cpu_queue __percpu *cpu_queue; +}; + +struct mcryptd_instance_ctx { + struct crypto_spawn spawn; + struct mcryptd_queue *queue; +}; + +struct mcryptd_hash_ctx { + struct crypto_ahash *child; + struct mcryptd_alg_state *alg_state; +}; + +struct mcryptd_tag { + /* seq number of request */ + unsigned seq_num; + /* arrival time of request */ + unsigned long arrival; + unsigned long expire; + int cpu; +}; + +struct mcryptd_hash_request_ctx { + struct list_head waiter; + crypto_completion_t complete; + struct mcryptd_tag tag; + struct crypto_hash_walk walk; + u8 *out; + int flag; + struct ahash_request areq; +}; + +struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name, + u32 type, u32 mask); +struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm); +struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req); +void mcryptd_free_ahash(struct mcryptd_ahash *tfm); +void mcryptd_flusher(struct work_struct *work); + +enum mcryptd_req_type { + MCRYPTD_NONE, + MCRYPTD_UPDATE, + MCRYPTD_FINUP, + MCRYPTD_DIGEST, + MCRYPTD_FINAL +}; + +struct mcryptd_alg_cstate { + unsigned long next_flush; + unsigned next_seq_num; + bool flusher_engaged; + struct delayed_work flush; + int cpu; + struct mcryptd_alg_state *alg_state; + void *mgr; + spinlock_t work_lock; + struct list_head work_list; + struct list_head flush_list; +}; + +struct mcryptd_alg_state { + struct mcryptd_alg_cstate __percpu *alg_cstate; + unsigned long (*flusher)(struct mcryptd_alg_cstate *cstate); +}; + +/* return delay in jiffies from current time */ +static inline unsigned long get_delay(unsigned long t) +{ + long delay; + + delay = (long) t - (long) jiffies; + if (delay <= 0) + return 0; + else + return (unsigned long) delay; +} + +void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay); + +#endif diff --git a/include/crypto/md5.h b/include/crypto/md5.h new file mode 100644 index 000000000..cf9e9dec3 --- /dev/null +++ b/include/crypto/md5.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CRYPTO_MD5_H +#define _CRYPTO_MD5_H + +#include + +#define MD5_DIGEST_SIZE 16 +#define MD5_HMAC_BLOCK_SIZE 64 +#define MD5_BLOCK_WORDS 16 +#define MD5_HASH_WORDS 4 + +#define MD5_H0 0x67452301UL +#define MD5_H1 0xefcdab89UL +#define MD5_H2 0x98badcfeUL +#define MD5_H3 0x10325476UL + +extern const u8 md5_zero_message_hash[MD5_DIGEST_SIZE]; + +struct md5_state { + u32 hash[MD5_HASH_WORDS]; + u32 block[MD5_BLOCK_WORDS]; + u64 byte_count; +}; + +#endif diff --git a/include/crypto/morus1280_glue.h b/include/crypto/morus1280_glue.h new file mode 100644 index 000000000..b26dd70ef --- /dev/null +++ b/include/crypto/morus1280_glue.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * The MORUS-1280 Authenticated-Encryption Algorithm + * Common glue skeleton -- header file + * + * Copyright (c) 2016-2018 Ondrej Mosnacek + * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#ifndef _CRYPTO_MORUS1280_GLUE_H +#define _CRYPTO_MORUS1280_GLUE_H + +#include +#include +#include +#include +#include + +#define MORUS1280_WORD_SIZE 8 +#define MORUS1280_BLOCK_SIZE (MORUS_BLOCK_WORDS * MORUS1280_WORD_SIZE) + +struct morus1280_block { + u8 bytes[MORUS1280_BLOCK_SIZE]; +}; + +struct morus1280_glue_ops { + void (*init)(void *state, const void *key, const void *iv); + void (*ad)(void *state, const void *data, unsigned int length); + void (*enc)(void *state, const void *src, void *dst, unsigned int length); + void (*dec)(void *state, const void *src, void *dst, unsigned int length); + void (*enc_tail)(void *state, const void *src, void *dst, unsigned int length); + void (*dec_tail)(void *state, const void *src, void *dst, unsigned int length); + void (*final)(void *state, void *tag_xor, u64 assoclen, u64 cryptlen); +}; + +struct morus1280_ctx { + const struct morus1280_glue_ops *ops; + struct morus1280_block key; +}; + +void crypto_morus1280_glue_init_ops(struct crypto_aead *aead, + const struct morus1280_glue_ops *ops); +int crypto_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen); +int crypto_morus1280_glue_setauthsize(struct crypto_aead *tfm, + unsigned int authsize); +int crypto_morus1280_glue_encrypt(struct aead_request *req); +int crypto_morus1280_glue_decrypt(struct aead_request *req); + +int cryptd_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen); +int cryptd_morus1280_glue_setauthsize(struct crypto_aead *aead, + unsigned int authsize); +int cryptd_morus1280_glue_encrypt(struct aead_request *req); +int cryptd_morus1280_glue_decrypt(struct aead_request *req); +int cryptd_morus1280_glue_init_tfm(struct crypto_aead *aead); +void cryptd_morus1280_glue_exit_tfm(struct crypto_aead *aead); + +#define MORUS1280_DECLARE_ALGS(id, driver_name, priority) \ + static const struct morus1280_glue_ops crypto_morus1280_##id##_ops = {\ + .init = crypto_morus1280_##id##_init, \ + .ad = crypto_morus1280_##id##_ad, \ + .enc = crypto_morus1280_##id##_enc, \ + .enc_tail = crypto_morus1280_##id##_enc_tail, \ + .dec = crypto_morus1280_##id##_dec, \ + .dec_tail = crypto_morus1280_##id##_dec_tail, \ + .final = crypto_morus1280_##id##_final, \ + }; \ + \ + static int crypto_morus1280_##id##_init_tfm(struct crypto_aead *tfm) \ + { \ + crypto_morus1280_glue_init_ops(tfm, &crypto_morus1280_##id##_ops); \ + return 0; \ + } \ + \ + static void crypto_morus1280_##id##_exit_tfm(struct crypto_aead *tfm) \ + { \ + } \ + \ + struct aead_alg crypto_morus1280_##id##_algs[] = {\ + { \ + .setkey = crypto_morus1280_glue_setkey, \ + .setauthsize = crypto_morus1280_glue_setauthsize, \ + .encrypt = crypto_morus1280_glue_encrypt, \ + .decrypt = crypto_morus1280_glue_decrypt, \ + .init = crypto_morus1280_##id##_init_tfm, \ + .exit = crypto_morus1280_##id##_exit_tfm, \ + \ + .ivsize = MORUS_NONCE_SIZE, \ + .maxauthsize = MORUS_MAX_AUTH_SIZE, \ + .chunksize = MORUS1280_BLOCK_SIZE, \ + \ + .base = { \ + .cra_flags = CRYPTO_ALG_INTERNAL, \ + .cra_blocksize = 1, \ + .cra_ctxsize = sizeof(struct morus1280_ctx), \ + .cra_alignmask = 0, \ + \ + .cra_name = "__morus1280", \ + .cra_driver_name = "__"driver_name, \ + \ + .cra_module = THIS_MODULE, \ + } \ + }, { \ + .setkey = cryptd_morus1280_glue_setkey, \ + .setauthsize = cryptd_morus1280_glue_setauthsize, \ + .encrypt = cryptd_morus1280_glue_encrypt, \ + .decrypt = cryptd_morus1280_glue_decrypt, \ + .init = cryptd_morus1280_glue_init_tfm, \ + .exit = cryptd_morus1280_glue_exit_tfm, \ + \ + .ivsize = MORUS_NONCE_SIZE, \ + .maxauthsize = MORUS_MAX_AUTH_SIZE, \ + .chunksize = MORUS1280_BLOCK_SIZE, \ + \ + .base = { \ + .cra_flags = CRYPTO_ALG_ASYNC, \ + .cra_blocksize = 1, \ + .cra_ctxsize = sizeof(struct crypto_aead *), \ + .cra_alignmask = 0, \ + \ + .cra_priority = priority, \ + \ + .cra_name = "morus1280", \ + .cra_driver_name = driver_name, \ + \ + .cra_module = THIS_MODULE, \ + } \ + } \ + } + +#endif /* _CRYPTO_MORUS1280_GLUE_H */ diff --git a/include/crypto/morus640_glue.h b/include/crypto/morus640_glue.h new file mode 100644 index 000000000..90c8db07e --- /dev/null +++ b/include/crypto/morus640_glue.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * The MORUS-640 Authenticated-Encryption Algorithm + * Common glue skeleton -- header file + * + * Copyright (c) 2016-2018 Ondrej Mosnacek + * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#ifndef _CRYPTO_MORUS640_GLUE_H +#define _CRYPTO_MORUS640_GLUE_H + +#include +#include +#include +#include +#include + +#define MORUS640_WORD_SIZE 4 +#define MORUS640_BLOCK_SIZE (MORUS_BLOCK_WORDS * MORUS640_WORD_SIZE) + +struct morus640_block { + u8 bytes[MORUS640_BLOCK_SIZE]; +}; + +struct morus640_glue_ops { + void (*init)(void *state, const void *key, const void *iv); + void (*ad)(void *state, const void *data, unsigned int length); + void (*enc)(void *state, const void *src, void *dst, unsigned int length); + void (*dec)(void *state, const void *src, void *dst, unsigned int length); + void (*enc_tail)(void *state, const void *src, void *dst, unsigned int length); + void (*dec_tail)(void *state, const void *src, void *dst, unsigned int length); + void (*final)(void *state, void *tag_xor, u64 assoclen, u64 cryptlen); +}; + +struct morus640_ctx { + const struct morus640_glue_ops *ops; + struct morus640_block key; +}; + +void crypto_morus640_glue_init_ops(struct crypto_aead *aead, + const struct morus640_glue_ops *ops); +int crypto_morus640_glue_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen); +int crypto_morus640_glue_setauthsize(struct crypto_aead *tfm, + unsigned int authsize); +int crypto_morus640_glue_encrypt(struct aead_request *req); +int crypto_morus640_glue_decrypt(struct aead_request *req); + +int cryptd_morus640_glue_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen); +int cryptd_morus640_glue_setauthsize(struct crypto_aead *aead, + unsigned int authsize); +int cryptd_morus640_glue_encrypt(struct aead_request *req); +int cryptd_morus640_glue_decrypt(struct aead_request *req); +int cryptd_morus640_glue_init_tfm(struct crypto_aead *aead); +void cryptd_morus640_glue_exit_tfm(struct crypto_aead *aead); + +#define MORUS640_DECLARE_ALGS(id, driver_name, priority) \ + static const struct morus640_glue_ops crypto_morus640_##id##_ops = {\ + .init = crypto_morus640_##id##_init, \ + .ad = crypto_morus640_##id##_ad, \ + .enc = crypto_morus640_##id##_enc, \ + .enc_tail = crypto_morus640_##id##_enc_tail, \ + .dec = crypto_morus640_##id##_dec, \ + .dec_tail = crypto_morus640_##id##_dec_tail, \ + .final = crypto_morus640_##id##_final, \ + }; \ + \ + static int crypto_morus640_##id##_init_tfm(struct crypto_aead *tfm) \ + { \ + crypto_morus640_glue_init_ops(tfm, &crypto_morus640_##id##_ops); \ + return 0; \ + } \ + \ + static void crypto_morus640_##id##_exit_tfm(struct crypto_aead *tfm) \ + { \ + } \ + \ + struct aead_alg crypto_morus640_##id##_algs[] = {\ + { \ + .setkey = crypto_morus640_glue_setkey, \ + .setauthsize = crypto_morus640_glue_setauthsize, \ + .encrypt = crypto_morus640_glue_encrypt, \ + .decrypt = crypto_morus640_glue_decrypt, \ + .init = crypto_morus640_##id##_init_tfm, \ + .exit = crypto_morus640_##id##_exit_tfm, \ + \ + .ivsize = MORUS_NONCE_SIZE, \ + .maxauthsize = MORUS_MAX_AUTH_SIZE, \ + .chunksize = MORUS640_BLOCK_SIZE, \ + \ + .base = { \ + .cra_flags = CRYPTO_ALG_INTERNAL, \ + .cra_blocksize = 1, \ + .cra_ctxsize = sizeof(struct morus640_ctx), \ + .cra_alignmask = 0, \ + \ + .cra_name = "__morus640", \ + .cra_driver_name = "__"driver_name, \ + \ + .cra_module = THIS_MODULE, \ + } \ + }, { \ + .setkey = cryptd_morus640_glue_setkey, \ + .setauthsize = cryptd_morus640_glue_setauthsize, \ + .encrypt = cryptd_morus640_glue_encrypt, \ + .decrypt = cryptd_morus640_glue_decrypt, \ + .init = cryptd_morus640_glue_init_tfm, \ + .exit = cryptd_morus640_glue_exit_tfm, \ + \ + .ivsize = MORUS_NONCE_SIZE, \ + .maxauthsize = MORUS_MAX_AUTH_SIZE, \ + .chunksize = MORUS640_BLOCK_SIZE, \ + \ + .base = { \ + .cra_flags = CRYPTO_ALG_ASYNC, \ + .cra_blocksize = 1, \ + .cra_ctxsize = sizeof(struct crypto_aead *), \ + .cra_alignmask = 0, \ + \ + .cra_priority = priority, \ + \ + .cra_name = "morus640", \ + .cra_driver_name = driver_name, \ + \ + .cra_module = THIS_MODULE, \ + } \ + } \ + } + +#endif /* _CRYPTO_MORUS640_GLUE_H */ diff --git a/include/crypto/morus_common.h b/include/crypto/morus_common.h new file mode 100644 index 000000000..39f28c749 --- /dev/null +++ b/include/crypto/morus_common.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * The MORUS Authenticated-Encryption Algorithm + * Common definitions + * + * Copyright (c) 2016-2018 Ondrej Mosnacek + * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#ifndef _CRYPTO_MORUS_COMMON_H +#define _CRYPTO_MORUS_COMMON_H + +#define MORUS_BLOCK_WORDS 4 +#define MORUS_STATE_BLOCKS 5 +#define MORUS_NONCE_SIZE 16 +#define MORUS_MAX_AUTH_SIZE 16 + +#endif /* _CRYPTO_MORUS_COMMON_H */ diff --git a/include/crypto/null.h b/include/crypto/null.h new file mode 100644 index 000000000..15aeef6e3 --- /dev/null +++ b/include/crypto/null.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Values for NULL algorithms */ + +#ifndef _CRYPTO_NULL_H +#define _CRYPTO_NULL_H + +#define NULL_KEY_SIZE 0 +#define NULL_BLOCK_SIZE 1 +#define NULL_DIGEST_SIZE 0 +#define NULL_IV_SIZE 0 + +struct crypto_skcipher *crypto_get_default_null_skcipher(void); +void crypto_put_default_null_skcipher(void); + +#endif diff --git a/include/crypto/padlock.h b/include/crypto/padlock.h new file mode 100644 index 000000000..d2cfa2ef4 --- /dev/null +++ b/include/crypto/padlock.h @@ -0,0 +1,29 @@ +/* + * Driver for VIA PadLock + * + * Copyright (c) 2004 Michal Ludvig + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_PADLOCK_H +#define _CRYPTO_PADLOCK_H + +#define PADLOCK_ALIGNMENT 16 + +#define PFX KBUILD_MODNAME ": " + +#define PADLOCK_CRA_PRIORITY 300 +#define PADLOCK_COMPOSITE_PRIORITY 400 + +#ifdef CONFIG_64BIT +#define STACK_ALIGN 16 +#else +#define STACK_ALIGN 4 +#endif + +#endif /* _CRYPTO_PADLOCK_H */ diff --git a/include/crypto/pcrypt.h b/include/crypto/pcrypt.h new file mode 100644 index 000000000..d7d8bd8c6 --- /dev/null +++ b/include/crypto/pcrypt.h @@ -0,0 +1,51 @@ +/* + * pcrypt - Parallel crypto engine. + * + * Copyright (C) 2009 secunet Security Networks AG + * Copyright (C) 2009 Steffen Klassert + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef _CRYPTO_PCRYPT_H +#define _CRYPTO_PCRYPT_H + +#include +#include +#include + +struct pcrypt_request { + struct padata_priv padata; + void *data; + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +static inline void *pcrypt_request_ctx(struct pcrypt_request *req) +{ + return req->__ctx; +} + +static inline +struct padata_priv *pcrypt_request_padata(struct pcrypt_request *req) +{ + return &req->padata; +} + +static inline +struct pcrypt_request *pcrypt_padata_request(struct padata_priv *padata) +{ + return container_of(padata, struct pcrypt_request, padata); +} + +#endif diff --git a/include/crypto/pkcs7.h b/include/crypto/pkcs7.h new file mode 100644 index 000000000..583f19940 --- /dev/null +++ b/include/crypto/pkcs7.h @@ -0,0 +1,47 @@ +/* PKCS#7 crypto data parser + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _CRYPTO_PKCS7_H +#define _CRYPTO_PKCS7_H + +#include +#include + +struct key; +struct pkcs7_message; + +/* + * pkcs7_parser.c + */ +extern struct pkcs7_message *pkcs7_parse_message(const void *data, + size_t datalen); +extern void pkcs7_free_message(struct pkcs7_message *pkcs7); + +extern int pkcs7_get_content_data(const struct pkcs7_message *pkcs7, + const void **_data, size_t *_datalen, + size_t *_headerlen); + +/* + * pkcs7_trust.c + */ +extern int pkcs7_validate_trust(struct pkcs7_message *pkcs7, + struct key *trust_keyring); + +/* + * pkcs7_verify.c + */ +extern int pkcs7_verify(struct pkcs7_message *pkcs7, + enum key_being_used_for usage); + +extern int pkcs7_supply_detached_data(struct pkcs7_message *pkcs7, + const void *data, size_t datalen); + +#endif /* _CRYPTO_PKCS7_H */ diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h new file mode 100644 index 000000000..f718a19da --- /dev/null +++ b/include/crypto/poly1305.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for the Poly1305 algorithm + */ + +#ifndef _CRYPTO_POLY1305_H +#define _CRYPTO_POLY1305_H + +#include +#include + +#define POLY1305_BLOCK_SIZE 16 +#define POLY1305_KEY_SIZE 32 +#define POLY1305_DIGEST_SIZE 16 + +struct poly1305_desc_ctx { + /* key */ + u32 r[5]; + /* finalize key */ + u32 s[4]; + /* accumulator */ + u32 h[5]; + /* partial buffer */ + u8 buf[POLY1305_BLOCK_SIZE]; + /* bytes used in partial buffer */ + unsigned int buflen; + /* r key has been set */ + bool rset; + /* s key has been set */ + bool sset; +}; + +int crypto_poly1305_init(struct shash_desc *desc); +unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, + const u8 *src, unsigned int srclen); +int crypto_poly1305_update(struct shash_desc *desc, + const u8 *src, unsigned int srclen); +int crypto_poly1305_final(struct shash_desc *desc, u8 *dst); + +#endif diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h new file mode 100644 index 000000000..052e26fda --- /dev/null +++ b/include/crypto/public_key.h @@ -0,0 +1,74 @@ +/* Asymmetric public-key algorithm definitions + * + * See Documentation/crypto/asymmetric-keys.txt + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _LINUX_PUBLIC_KEY_H +#define _LINUX_PUBLIC_KEY_H + +/* + * Cryptographic data for the public-key subtype of the asymmetric key type. + * + * Note that this may include private part of the key as well as the public + * part. + */ +struct public_key { + void *key; + u32 keylen; + const char *id_type; + const char *pkey_algo; +}; + +extern void public_key_free(struct public_key *key); + +/* + * Public key cryptography signature data + */ +struct public_key_signature { + struct asymmetric_key_id *auth_ids[2]; + u8 *s; /* Signature */ + u8 *digest; + u32 s_size; /* Number of bytes in signature */ + u32 digest_size; /* Number of bytes in digest */ + const char *pkey_algo; + const char *hash_algo; +}; + +extern void public_key_signature_free(struct public_key_signature *sig); + +extern struct asymmetric_key_subtype public_key_subtype; + +struct key; +struct key_type; +union key_payload; + +extern int restrict_link_by_signature(struct key *dest_keyring, + const struct key_type *type, + const union key_payload *payload, + struct key *trust_keyring); + +extern int restrict_link_by_key_or_keyring(struct key *dest_keyring, + const struct key_type *type, + const union key_payload *payload, + struct key *trusted); + +extern int restrict_link_by_key_or_keyring_chain(struct key *trust_keyring, + const struct key_type *type, + const union key_payload *payload, + struct key *trusted); + +extern int verify_signature(const struct key *key, + const struct public_key_signature *sig); + +int public_key_verify_signature(const struct public_key *pkey, + const struct public_key_signature *sig); + +#endif /* _LINUX_PUBLIC_KEY_H */ diff --git a/include/crypto/rng.h b/include/crypto/rng.h new file mode 100644 index 000000000..a788c1e5a --- /dev/null +++ b/include/crypto/rng.h @@ -0,0 +1,202 @@ +/* + * RNG: Random Number Generator algorithms under the crypto API + * + * Copyright (c) 2008 Neil Horman + * Copyright (c) 2015 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_RNG_H +#define _CRYPTO_RNG_H + +#include + +struct crypto_rng; + +/** + * struct rng_alg - random number generator definition + * + * @generate: The function defined by this variable obtains a + * random number. The random number generator transform + * must generate the random number out of the context + * provided with this call, plus any additional data + * if provided to the call. + * @seed: Seed or reseed the random number generator. With the + * invocation of this function call, the random number + * generator shall become ready for generation. If the + * random number generator requires a seed for setting + * up a new state, the seed must be provided by the + * consumer while invoking this function. The required + * size of the seed is defined with @seedsize . + * @set_ent: Set entropy that would otherwise be obtained from + * entropy source. Internal use only. + * @seedsize: The seed size required for a random number generator + * initialization defined with this variable. Some + * random number generators does not require a seed + * as the seeding is implemented internally without + * the need of support by the consumer. In this case, + * the seed size is set to zero. + * @base: Common crypto API algorithm data structure. + */ +struct rng_alg { + int (*generate)(struct crypto_rng *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int dlen); + int (*seed)(struct crypto_rng *tfm, const u8 *seed, unsigned int slen); + void (*set_ent)(struct crypto_rng *tfm, const u8 *data, + unsigned int len); + + unsigned int seedsize; + + struct crypto_alg base; +}; + +struct crypto_rng { + struct crypto_tfm base; +}; + +extern struct crypto_rng *crypto_default_rng; + +int crypto_get_default_rng(void); +void crypto_put_default_rng(void); + +/** + * DOC: Random number generator API + * + * The random number generator API is used with the ciphers of type + * CRYPTO_ALG_TYPE_RNG (listed as type "rng" in /proc/crypto) + */ + +/** + * crypto_alloc_rng() -- allocate RNG handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * message digest cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for a random number generator. The returned struct + * crypto_rng is the cipher handle that is required for any subsequent + * API invocation for that random number generator. + * + * For all random number generators, this call creates a new private copy of + * the random number generator that does not share a state with other + * instances. The only exception is the "krng" random number generator which + * is a kernel crypto API use case for the get_random_bytes() function of the + * /dev/random driver. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask); + +static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm) +{ + return &tfm->base; +} + +/** + * crypto_rng_alg - obtain name of RNG + * @tfm: cipher handle + * + * Return the generic name (cra_name) of the initialized random number generator + * + * Return: generic name string + */ +static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm) +{ + return container_of(crypto_rng_tfm(tfm)->__crt_alg, + struct rng_alg, base); +} + +/** + * crypto_free_rng() - zeroize and free RNG handle + * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. + */ +static inline void crypto_free_rng(struct crypto_rng *tfm) +{ + crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm)); +} + +/** + * crypto_rng_generate() - get random number + * @tfm: cipher handle + * @src: Input buffer holding additional data, may be NULL + * @slen: Length of additional data + * @dst: output buffer holding the random numbers + * @dlen: length of the output buffer + * + * This function fills the caller-allocated buffer with random + * numbers using the random number generator referenced by the + * cipher handle. + * + * Return: 0 function was successful; < 0 if an error occurred + */ +static inline int crypto_rng_generate(struct crypto_rng *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int dlen) +{ + return crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen); +} + +/** + * crypto_rng_get_bytes() - get random number + * @tfm: cipher handle + * @rdata: output buffer holding the random numbers + * @dlen: length of the output buffer + * + * This function fills the caller-allocated buffer with random numbers using the + * random number generator referenced by the cipher handle. + * + * Return: 0 function was successful; < 0 if an error occurred + */ +static inline int crypto_rng_get_bytes(struct crypto_rng *tfm, + u8 *rdata, unsigned int dlen) +{ + return crypto_rng_generate(tfm, NULL, 0, rdata, dlen); +} + +/** + * crypto_rng_reset() - re-initialize the RNG + * @tfm: cipher handle + * @seed: seed input data + * @slen: length of the seed input data + * + * The reset function completely re-initializes the random number generator + * referenced by the cipher handle by clearing the current state. The new state + * is initialized with the caller provided seed or automatically, depending + * on the random number generator type (the ANSI X9.31 RNG requires + * caller-provided seed, the SP800-90A DRBGs perform an automatic seeding). + * The seed is provided as a parameter to this function call. The provided seed + * should have the length of the seed size defined for the random number + * generator as defined by crypto_rng_seedsize. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, + unsigned int slen); + +/** + * crypto_rng_seedsize() - obtain seed size of RNG + * @tfm: cipher handle + * + * The function returns the seed size for the random number generator + * referenced by the cipher handle. This value may be zero if the random + * number generator does not implement or require a reseeding. For example, + * the SP800-90A DRBGs implement an automated reseeding after reaching a + * pre-defined threshold. + * + * Return: seed size for the random number generator + */ +static inline int crypto_rng_seedsize(struct crypto_rng *tfm) +{ + return crypto_rng_alg(tfm)->seedsize; +} + +#endif diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h new file mode 100644 index 000000000..a66c127a2 --- /dev/null +++ b/include/crypto/scatterwalk.h @@ -0,0 +1,120 @@ +/* + * Cryptographic scatter and gather helpers. + * + * Copyright (c) 2002 James Morris + * Copyright (c) 2002 Adam J. Richter + * Copyright (c) 2004 Jean-Luc Cooke + * Copyright (c) 2007 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_SCATTERWALK_H +#define _CRYPTO_SCATTERWALK_H + +#include +#include +#include +#include + +static inline void scatterwalk_crypto_chain(struct scatterlist *head, + struct scatterlist *sg, int num) +{ + if (sg) + sg_chain(head, num, sg); + else + sg_mark_end(head); +} + +static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk) +{ + unsigned int len = walk->sg->offset + walk->sg->length - walk->offset; + unsigned int len_this_page = offset_in_page(~walk->offset) + 1; + return len_this_page > len ? len : len_this_page; +} + +static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk, + unsigned int nbytes) +{ + unsigned int len_this_page = scatterwalk_pagelen(walk); + return nbytes > len_this_page ? len_this_page : nbytes; +} + +static inline void scatterwalk_advance(struct scatter_walk *walk, + unsigned int nbytes) +{ + walk->offset += nbytes; +} + +static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk, + unsigned int alignmask) +{ + return !(walk->offset & alignmask); +} + +static inline struct page *scatterwalk_page(struct scatter_walk *walk) +{ + return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); +} + +static inline void scatterwalk_unmap(void *vaddr) +{ + kunmap_atomic(vaddr); +} + +static inline void scatterwalk_start(struct scatter_walk *walk, + struct scatterlist *sg) +{ + walk->sg = sg; + walk->offset = sg->offset; +} + +static inline void *scatterwalk_map(struct scatter_walk *walk) +{ + return kmap_atomic(scatterwalk_page(walk)) + + offset_in_page(walk->offset); +} + +static inline void scatterwalk_pagedone(struct scatter_walk *walk, int out, + unsigned int more) +{ + if (out) { + struct page *page; + + page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT); + /* Test ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE first as + * PageSlab cannot be optimised away per se due to + * use of volatile pointer. + */ + if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE && !PageSlab(page)) + flush_dcache_page(page); + } + + if (more && walk->offset >= walk->sg->offset + walk->sg->length) + scatterwalk_start(walk, sg_next(walk->sg)); +} + +static inline void scatterwalk_done(struct scatter_walk *walk, int out, + int more) +{ + if (!more || walk->offset >= walk->sg->offset + walk->sg->length || + !(walk->offset & (PAGE_SIZE - 1))) + scatterwalk_pagedone(walk, out, more); +} + +void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, + size_t nbytes, int out); +void *scatterwalk_map(struct scatter_walk *walk); + +void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, + unsigned int start, unsigned int nbytes, int out); + +struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], + struct scatterlist *src, + unsigned int len); + +#endif /* _CRYPTO_SCATTERWALK_H */ diff --git a/include/crypto/serpent.h b/include/crypto/serpent.h new file mode 100644 index 000000000..7dd780c5d --- /dev/null +++ b/include/crypto/serpent.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for serpent algorithms + */ + +#ifndef _CRYPTO_SERPENT_H +#define _CRYPTO_SERPENT_H + +#include +#include + +#define SERPENT_MIN_KEY_SIZE 0 +#define SERPENT_MAX_KEY_SIZE 32 +#define SERPENT_EXPKEY_WORDS 132 +#define SERPENT_BLOCK_SIZE 16 + +struct serpent_ctx { + u32 expkey[SERPENT_EXPKEY_WORDS]; +}; + +int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key, + unsigned int keylen); +int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); + +void __serpent_encrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src); +void __serpent_decrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src); + +#endif diff --git a/include/crypto/sha.h b/include/crypto/sha.h new file mode 100644 index 000000000..8a46202b1 --- /dev/null +++ b/include/crypto/sha.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for SHA algorithms + */ + +#ifndef _CRYPTO_SHA_H +#define _CRYPTO_SHA_H + +#include + +#define SHA1_DIGEST_SIZE 20 +#define SHA1_BLOCK_SIZE 64 + +#define SHA224_DIGEST_SIZE 28 +#define SHA224_BLOCK_SIZE 64 + +#define SHA256_DIGEST_SIZE 32 +#define SHA256_BLOCK_SIZE 64 + +#define SHA384_DIGEST_SIZE 48 +#define SHA384_BLOCK_SIZE 128 + +#define SHA512_DIGEST_SIZE 64 +#define SHA512_BLOCK_SIZE 128 + +#define SHA1_H0 0x67452301UL +#define SHA1_H1 0xefcdab89UL +#define SHA1_H2 0x98badcfeUL +#define SHA1_H3 0x10325476UL +#define SHA1_H4 0xc3d2e1f0UL + +#define SHA224_H0 0xc1059ed8UL +#define SHA224_H1 0x367cd507UL +#define SHA224_H2 0x3070dd17UL +#define SHA224_H3 0xf70e5939UL +#define SHA224_H4 0xffc00b31UL +#define SHA224_H5 0x68581511UL +#define SHA224_H6 0x64f98fa7UL +#define SHA224_H7 0xbefa4fa4UL + +#define SHA256_H0 0x6a09e667UL +#define SHA256_H1 0xbb67ae85UL +#define SHA256_H2 0x3c6ef372UL +#define SHA256_H3 0xa54ff53aUL +#define SHA256_H4 0x510e527fUL +#define SHA256_H5 0x9b05688cUL +#define SHA256_H6 0x1f83d9abUL +#define SHA256_H7 0x5be0cd19UL + +#define SHA384_H0 0xcbbb9d5dc1059ed8ULL +#define SHA384_H1 0x629a292a367cd507ULL +#define SHA384_H2 0x9159015a3070dd17ULL +#define SHA384_H3 0x152fecd8f70e5939ULL +#define SHA384_H4 0x67332667ffc00b31ULL +#define SHA384_H5 0x8eb44a8768581511ULL +#define SHA384_H6 0xdb0c2e0d64f98fa7ULL +#define SHA384_H7 0x47b5481dbefa4fa4ULL + +#define SHA512_H0 0x6a09e667f3bcc908ULL +#define SHA512_H1 0xbb67ae8584caa73bULL +#define SHA512_H2 0x3c6ef372fe94f82bULL +#define SHA512_H3 0xa54ff53a5f1d36f1ULL +#define SHA512_H4 0x510e527fade682d1ULL +#define SHA512_H5 0x9b05688c2b3e6c1fULL +#define SHA512_H6 0x1f83d9abfb41bd6bULL +#define SHA512_H7 0x5be0cd19137e2179ULL + +extern const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE]; + +extern const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE]; + +extern const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE]; + +extern const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE]; + +extern const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE]; + +struct sha1_state { + u32 state[SHA1_DIGEST_SIZE / 4]; + u64 count; + u8 buffer[SHA1_BLOCK_SIZE]; +}; + +struct sha256_state { + u32 state[SHA256_DIGEST_SIZE / 4]; + u64 count; + u8 buf[SHA256_BLOCK_SIZE]; +}; + +struct sha512_state { + u64 state[SHA512_DIGEST_SIZE / 8]; + u64 count[2]; + u8 buf[SHA512_BLOCK_SIZE]; +}; + +struct shash_desc; + +extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data, + unsigned int len); + +extern int crypto_sha1_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *hash); + +extern int crypto_sha256_update(struct shash_desc *desc, const u8 *data, + unsigned int len); + +extern int crypto_sha256_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *hash); + +extern int crypto_sha512_update(struct shash_desc *desc, const u8 *data, + unsigned int len); + +extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *hash); +#endif diff --git a/include/crypto/sha1_base.h b/include/crypto/sha1_base.h new file mode 100644 index 000000000..d0df431f9 --- /dev/null +++ b/include/crypto/sha1_base.h @@ -0,0 +1,106 @@ +/* + * sha1_base.h - core logic for SHA-1 implementations + * + * Copyright (C) 2015 Linaro Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include + +#include + +typedef void (sha1_block_fn)(struct sha1_state *sst, u8 const *src, int blocks); + +static inline int sha1_base_init(struct shash_desc *desc) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + + sctx->state[0] = SHA1_H0; + sctx->state[1] = SHA1_H1; + sctx->state[2] = SHA1_H2; + sctx->state[3] = SHA1_H3; + sctx->state[4] = SHA1_H4; + sctx->count = 0; + + return 0; +} + +static inline int sha1_base_do_update(struct shash_desc *desc, + const u8 *data, + unsigned int len, + sha1_block_fn *block_fn) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; + + sctx->count += len; + + if (unlikely((partial + len) >= SHA1_BLOCK_SIZE)) { + int blocks; + + if (partial) { + int p = SHA1_BLOCK_SIZE - partial; + + memcpy(sctx->buffer + partial, data, p); + data += p; + len -= p; + + block_fn(sctx, sctx->buffer, 1); + } + + blocks = len / SHA1_BLOCK_SIZE; + len %= SHA1_BLOCK_SIZE; + + if (blocks) { + block_fn(sctx, data, blocks); + data += blocks * SHA1_BLOCK_SIZE; + } + partial = 0; + } + if (len) + memcpy(sctx->buffer + partial, data, len); + + return 0; +} + +static inline int sha1_base_do_finalize(struct shash_desc *desc, + sha1_block_fn *block_fn) +{ + const int bit_offset = SHA1_BLOCK_SIZE - sizeof(__be64); + struct sha1_state *sctx = shash_desc_ctx(desc); + __be64 *bits = (__be64 *)(sctx->buffer + bit_offset); + unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; + + sctx->buffer[partial++] = 0x80; + if (partial > bit_offset) { + memset(sctx->buffer + partial, 0x0, SHA1_BLOCK_SIZE - partial); + partial = 0; + + block_fn(sctx, sctx->buffer, 1); + } + + memset(sctx->buffer + partial, 0x0, bit_offset - partial); + *bits = cpu_to_be64(sctx->count << 3); + block_fn(sctx, sctx->buffer, 1); + + return 0; +} + +static inline int sha1_base_finish(struct shash_desc *desc, u8 *out) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + __be32 *digest = (__be32 *)out; + int i; + + for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++) + put_unaligned_be32(sctx->state[i], digest++); + + *sctx = (struct sha1_state){}; + return 0; +} diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h new file mode 100644 index 000000000..d1f2195bb --- /dev/null +++ b/include/crypto/sha256_base.h @@ -0,0 +1,128 @@ +/* + * sha256_base.h - core logic for SHA-256 implementations + * + * Copyright (C) 2015 Linaro Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include + +#include + +typedef void (sha256_block_fn)(struct sha256_state *sst, u8 const *src, + int blocks); + +static inline int sha224_base_init(struct shash_desc *desc) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + + sctx->state[0] = SHA224_H0; + sctx->state[1] = SHA224_H1; + sctx->state[2] = SHA224_H2; + sctx->state[3] = SHA224_H3; + sctx->state[4] = SHA224_H4; + sctx->state[5] = SHA224_H5; + sctx->state[6] = SHA224_H6; + sctx->state[7] = SHA224_H7; + sctx->count = 0; + + return 0; +} + +static inline int sha256_base_init(struct shash_desc *desc) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + + sctx->state[0] = SHA256_H0; + sctx->state[1] = SHA256_H1; + sctx->state[2] = SHA256_H2; + sctx->state[3] = SHA256_H3; + sctx->state[4] = SHA256_H4; + sctx->state[5] = SHA256_H5; + sctx->state[6] = SHA256_H6; + sctx->state[7] = SHA256_H7; + sctx->count = 0; + + return 0; +} + +static inline int sha256_base_do_update(struct shash_desc *desc, + const u8 *data, + unsigned int len, + sha256_block_fn *block_fn) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; + + sctx->count += len; + + if (unlikely((partial + len) >= SHA256_BLOCK_SIZE)) { + int blocks; + + if (partial) { + int p = SHA256_BLOCK_SIZE - partial; + + memcpy(sctx->buf + partial, data, p); + data += p; + len -= p; + + block_fn(sctx, sctx->buf, 1); + } + + blocks = len / SHA256_BLOCK_SIZE; + len %= SHA256_BLOCK_SIZE; + + if (blocks) { + block_fn(sctx, data, blocks); + data += blocks * SHA256_BLOCK_SIZE; + } + partial = 0; + } + if (len) + memcpy(sctx->buf + partial, data, len); + + return 0; +} + +static inline int sha256_base_do_finalize(struct shash_desc *desc, + sha256_block_fn *block_fn) +{ + const int bit_offset = SHA256_BLOCK_SIZE - sizeof(__be64); + struct sha256_state *sctx = shash_desc_ctx(desc); + __be64 *bits = (__be64 *)(sctx->buf + bit_offset); + unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; + + sctx->buf[partial++] = 0x80; + if (partial > bit_offset) { + memset(sctx->buf + partial, 0x0, SHA256_BLOCK_SIZE - partial); + partial = 0; + + block_fn(sctx, sctx->buf, 1); + } + + memset(sctx->buf + partial, 0x0, bit_offset - partial); + *bits = cpu_to_be64(sctx->count << 3); + block_fn(sctx, sctx->buf, 1); + + return 0; +} + +static inline int sha256_base_finish(struct shash_desc *desc, u8 *out) +{ + unsigned int digest_size = crypto_shash_digestsize(desc->tfm); + struct sha256_state *sctx = shash_desc_ctx(desc); + __be32 *digest = (__be32 *)out; + int i; + + for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be32)) + put_unaligned_be32(sctx->state[i], digest++); + + *sctx = (struct sha256_state){}; + return 0; +} diff --git a/include/crypto/sha3.h b/include/crypto/sha3.h new file mode 100644 index 000000000..080f60c2e --- /dev/null +++ b/include/crypto/sha3.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for SHA-3 algorithms + */ +#ifndef __CRYPTO_SHA3_H__ +#define __CRYPTO_SHA3_H__ + +#define SHA3_224_DIGEST_SIZE (224 / 8) +#define SHA3_224_BLOCK_SIZE (200 - 2 * SHA3_224_DIGEST_SIZE) + +#define SHA3_256_DIGEST_SIZE (256 / 8) +#define SHA3_256_BLOCK_SIZE (200 - 2 * SHA3_256_DIGEST_SIZE) + +#define SHA3_384_DIGEST_SIZE (384 / 8) +#define SHA3_384_BLOCK_SIZE (200 - 2 * SHA3_384_DIGEST_SIZE) + +#define SHA3_512_DIGEST_SIZE (512 / 8) +#define SHA3_512_BLOCK_SIZE (200 - 2 * SHA3_512_DIGEST_SIZE) + +struct sha3_state { + u64 st[25]; + unsigned int rsiz; + unsigned int rsizw; + + unsigned int partial; + u8 buf[SHA3_224_BLOCK_SIZE]; +}; + +int crypto_sha3_init(struct shash_desc *desc); +int crypto_sha3_update(struct shash_desc *desc, const u8 *data, + unsigned int len); +int crypto_sha3_final(struct shash_desc *desc, u8 *out); + +#endif diff --git a/include/crypto/sha512_base.h b/include/crypto/sha512_base.h new file mode 100644 index 000000000..6c5341e00 --- /dev/null +++ b/include/crypto/sha512_base.h @@ -0,0 +1,131 @@ +/* + * sha512_base.h - core logic for SHA-512 implementations + * + * Copyright (C) 2015 Linaro Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include + +#include + +typedef void (sha512_block_fn)(struct sha512_state *sst, u8 const *src, + int blocks); + +static inline int sha384_base_init(struct shash_desc *desc) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); + + sctx->state[0] = SHA384_H0; + sctx->state[1] = SHA384_H1; + sctx->state[2] = SHA384_H2; + sctx->state[3] = SHA384_H3; + sctx->state[4] = SHA384_H4; + sctx->state[5] = SHA384_H5; + sctx->state[6] = SHA384_H6; + sctx->state[7] = SHA384_H7; + sctx->count[0] = sctx->count[1] = 0; + + return 0; +} + +static inline int sha512_base_init(struct shash_desc *desc) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); + + sctx->state[0] = SHA512_H0; + sctx->state[1] = SHA512_H1; + sctx->state[2] = SHA512_H2; + sctx->state[3] = SHA512_H3; + sctx->state[4] = SHA512_H4; + sctx->state[5] = SHA512_H5; + sctx->state[6] = SHA512_H6; + sctx->state[7] = SHA512_H7; + sctx->count[0] = sctx->count[1] = 0; + + return 0; +} + +static inline int sha512_base_do_update(struct shash_desc *desc, + const u8 *data, + unsigned int len, + sha512_block_fn *block_fn) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); + unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; + + sctx->count[0] += len; + if (sctx->count[0] < len) + sctx->count[1]++; + + if (unlikely((partial + len) >= SHA512_BLOCK_SIZE)) { + int blocks; + + if (partial) { + int p = SHA512_BLOCK_SIZE - partial; + + memcpy(sctx->buf + partial, data, p); + data += p; + len -= p; + + block_fn(sctx, sctx->buf, 1); + } + + blocks = len / SHA512_BLOCK_SIZE; + len %= SHA512_BLOCK_SIZE; + + if (blocks) { + block_fn(sctx, data, blocks); + data += blocks * SHA512_BLOCK_SIZE; + } + partial = 0; + } + if (len) + memcpy(sctx->buf + partial, data, len); + + return 0; +} + +static inline int sha512_base_do_finalize(struct shash_desc *desc, + sha512_block_fn *block_fn) +{ + const int bit_offset = SHA512_BLOCK_SIZE - sizeof(__be64[2]); + struct sha512_state *sctx = shash_desc_ctx(desc); + __be64 *bits = (__be64 *)(sctx->buf + bit_offset); + unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; + + sctx->buf[partial++] = 0x80; + if (partial > bit_offset) { + memset(sctx->buf + partial, 0x0, SHA512_BLOCK_SIZE - partial); + partial = 0; + + block_fn(sctx, sctx->buf, 1); + } + + memset(sctx->buf + partial, 0x0, bit_offset - partial); + bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); + bits[1] = cpu_to_be64(sctx->count[0] << 3); + block_fn(sctx, sctx->buf, 1); + + return 0; +} + +static inline int sha512_base_finish(struct shash_desc *desc, u8 *out) +{ + unsigned int digest_size = crypto_shash_digestsize(desc->tfm); + struct sha512_state *sctx = shash_desc_ctx(desc); + __be64 *digest = (__be64 *)out; + int i; + + for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be64)) + put_unaligned_be64(sctx->state[i], digest++); + + *sctx = (struct sha512_state){}; + return 0; +} diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h new file mode 100644 index 000000000..c7553f8b1 --- /dev/null +++ b/include/crypto/skcipher.h @@ -0,0 +1,615 @@ +/* + * Symmetric key ciphers. + * + * Copyright (c) 2007-2015 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_SKCIPHER_H +#define _CRYPTO_SKCIPHER_H + +#include +#include +#include + +/** + * struct skcipher_request - Symmetric key cipher request + * @cryptlen: Number of bytes to encrypt or decrypt + * @iv: Initialisation Vector + * @src: Source SG list + * @dst: Destination SG list + * @base: Underlying async request request + * @__ctx: Start of private context data + */ +struct skcipher_request { + unsigned int cryptlen; + + u8 *iv; + + struct scatterlist *src; + struct scatterlist *dst; + + struct crypto_async_request base; + + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +/** + * struct skcipher_givcrypt_request - Crypto request with IV generation + * @seq: Sequence number for IV generation + * @giv: Space for generated IV + * @creq: The crypto request itself + */ +struct skcipher_givcrypt_request { + u64 seq; + u8 *giv; + + struct ablkcipher_request creq; +}; + +struct crypto_skcipher { + int (*setkey)(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct skcipher_request *req); + int (*decrypt)(struct skcipher_request *req); + + unsigned int ivsize; + unsigned int reqsize; + unsigned int keysize; + + struct crypto_tfm base; +}; + +/** + * struct skcipher_alg - symmetric key cipher definition + * @min_keysize: Minimum key size supported by the transformation. This is the + * smallest key length supported by this transformation algorithm. + * This must be set to one of the pre-defined values as this is + * not hardware specific. Possible values for this field can be + * found via git grep "_MIN_KEY_SIZE" include/crypto/ + * @max_keysize: Maximum key size supported by the transformation. This is the + * largest key length supported by this transformation algorithm. + * This must be set to one of the pre-defined values as this is + * not hardware specific. Possible values for this field can be + * found via git grep "_MAX_KEY_SIZE" include/crypto/ + * @setkey: Set key for the transformation. This function is used to either + * program a supplied key into the hardware or store the key in the + * transformation context for programming it later. Note that this + * function does modify the transformation context. This function can + * be called multiple times during the existence of the transformation + * object, so one must make sure the key is properly reprogrammed into + * the hardware. This function is also responsible for checking the key + * length for validity. In case a software fallback was put in place in + * the @cra_init call, this function might need to use the fallback if + * the algorithm doesn't support all of the key sizes. + * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt + * the supplied scatterlist containing the blocks of data. The crypto + * API consumer is responsible for aligning the entries of the + * scatterlist properly and making sure the chunks are correctly + * sized. In case a software fallback was put in place in the + * @cra_init call, this function might need to use the fallback if + * the algorithm doesn't support all of the key sizes. In case the + * key was stored in transformation context, the key might need to be + * re-programmed into the hardware in this function. This function + * shall not modify the transformation context, as this function may + * be called in parallel with the same transformation object. + * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt + * and the conditions are exactly the same. + * @init: Initialize the cryptographic transformation object. This function + * is used to initialize the cryptographic transformation object. + * This function is called only once at the instantiation time, right + * after the transformation context was allocated. In case the + * cryptographic hardware has some special requirements which need to + * be handled by software, this function shall check for the precise + * requirement of the transformation and put any software fallbacks + * in place. + * @exit: Deinitialize the cryptographic transformation object. This is a + * counterpart to @init, used to remove various changes set in + * @init. + * @ivsize: IV size applicable for transformation. The consumer must provide an + * IV of exactly that size to perform the encrypt or decrypt operation. + * @chunksize: Equal to the block size except for stream ciphers such as + * CTR where it is set to the underlying block size. + * @walksize: Equal to the chunk size except in cases where the algorithm is + * considerably more efficient if it can operate on multiple chunks + * in parallel. Should be a multiple of chunksize. + * @base: Definition of a generic crypto algorithm. + * + * All fields except @ivsize are mandatory and must be filled. + */ +struct skcipher_alg { + int (*setkey)(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct skcipher_request *req); + int (*decrypt)(struct skcipher_request *req); + int (*init)(struct crypto_skcipher *tfm); + void (*exit)(struct crypto_skcipher *tfm); + + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; + unsigned int chunksize; + unsigned int walksize; + + struct crypto_alg base; +}; + +#define SKCIPHER_REQUEST_ON_STACK(name, tfm) \ + char __##name##_desc[sizeof(struct skcipher_request) + \ + crypto_skcipher_reqsize(tfm)] CRYPTO_MINALIGN_ATTR; \ + struct skcipher_request *name = (void *)__##name##_desc + +/** + * DOC: Symmetric Key Cipher API + * + * Symmetric key cipher API is used with the ciphers of type + * CRYPTO_ALG_TYPE_SKCIPHER (listed as type "skcipher" in /proc/crypto). + * + * Asynchronous cipher operations imply that the function invocation for a + * cipher request returns immediately before the completion of the operation. + * The cipher request is scheduled as a separate kernel thread and therefore + * load-balanced on the different CPUs via the process scheduler. To allow + * the kernel crypto API to inform the caller about the completion of a cipher + * request, the caller must provide a callback function. That function is + * invoked with the cipher handle when the request completes. + * + * To support the asynchronous operation, additional information than just the + * cipher handle must be supplied to the kernel crypto API. That additional + * information is given by filling in the skcipher_request data structure. + * + * For the symmetric key cipher API, the state is maintained with the tfm + * cipher handle. A single tfm can be used across multiple calls and in + * parallel. For asynchronous block cipher calls, context data supplied and + * only used by the caller can be referenced the request data structure in + * addition to the IV used for the cipher request. The maintenance of such + * state information would be important for a crypto driver implementer to + * have, because when calling the callback function upon completion of the + * cipher operation, that callback function may need some information about + * which operation just finished if it invoked multiple in parallel. This + * state information is unused by the kernel crypto API. + */ + +static inline struct crypto_skcipher *__crypto_skcipher_cast( + struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_skcipher, base); +} + +/** + * crypto_alloc_skcipher() - allocate symmetric key cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * skcipher cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for an skcipher. The returned struct + * crypto_skcipher is the cipher handle that is required for any subsequent + * API invocation for that skcipher. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, + u32 type, u32 mask); + +static inline struct crypto_tfm *crypto_skcipher_tfm( + struct crypto_skcipher *tfm) +{ + return &tfm->base; +} + +/** + * crypto_free_skcipher() - zeroize and free cipher handle + * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. + */ +static inline void crypto_free_skcipher(struct crypto_skcipher *tfm) +{ + crypto_destroy_tfm(tfm, crypto_skcipher_tfm(tfm)); +} + +/** + * crypto_has_skcipher() - Search for the availability of an skcipher. + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * skcipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Return: true when the skcipher is known to the kernel crypto API; false + * otherwise + */ +static inline int crypto_has_skcipher(const char *alg_name, u32 type, + u32 mask) +{ + return crypto_has_alg(alg_name, crypto_skcipher_type(type), + crypto_skcipher_mask(mask)); +} + +/** + * crypto_has_skcipher2() - Search for the availability of an skcipher. + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * skcipher + * @type: specifies the type of the skcipher + * @mask: specifies the mask for the skcipher + * + * Return: true when the skcipher is known to the kernel crypto API; false + * otherwise + */ +int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask); + +static inline const char *crypto_skcipher_driver_name( + struct crypto_skcipher *tfm) +{ + return crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)); +} + +static inline struct skcipher_alg *crypto_skcipher_alg( + struct crypto_skcipher *tfm) +{ + return container_of(crypto_skcipher_tfm(tfm)->__crt_alg, + struct skcipher_alg, base); +} + +static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blkcipher.ivsize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_ablkcipher.ivsize; + + return alg->ivsize; +} + +/** + * crypto_skcipher_ivsize() - obtain IV size + * @tfm: cipher handle + * + * The size of the IV for the skcipher referenced by the cipher handle is + * returned. This IV size may be zero if the cipher does not need an IV. + * + * Return: IV size in bytes + */ +static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm) +{ + return tfm->ivsize; +} + +static inline unsigned int crypto_skcipher_alg_chunksize( + struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blocksize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_blocksize; + + return alg->chunksize; +} + +static inline unsigned int crypto_skcipher_alg_walksize( + struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blocksize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_blocksize; + + return alg->walksize; +} + +/** + * crypto_skcipher_chunksize() - obtain chunk size + * @tfm: cipher handle + * + * The block size is set to one for ciphers such as CTR. However, + * you still need to provide incremental updates in multiples of + * the underlying block size as the IV does not have sub-block + * granularity. This is known in this API as the chunk size. + * + * Return: chunk size in bytes + */ +static inline unsigned int crypto_skcipher_chunksize( + struct crypto_skcipher *tfm) +{ + return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm)); +} + +/** + * crypto_skcipher_walksize() - obtain walk size + * @tfm: cipher handle + * + * In some cases, algorithms can only perform optimally when operating on + * multiple blocks in parallel. This is reflected by the walksize, which + * must be a multiple of the chunksize (or equal if the concern does not + * apply) + * + * Return: walk size in bytes + */ +static inline unsigned int crypto_skcipher_walksize( + struct crypto_skcipher *tfm) +{ + return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm)); +} + +/** + * crypto_skcipher_blocksize() - obtain block size of cipher + * @tfm: cipher handle + * + * The block size for the skcipher referenced with the cipher handle is + * returned. The caller may use that information to allocate appropriate + * memory for the data returned by the encryption or decryption operation + * + * Return: block size of cipher + */ +static inline unsigned int crypto_skcipher_blocksize( + struct crypto_skcipher *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm)); +} + +static inline unsigned int crypto_skcipher_alignmask( + struct crypto_skcipher *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm)); +} + +static inline u32 crypto_skcipher_get_flags(struct crypto_skcipher *tfm) +{ + return crypto_tfm_get_flags(crypto_skcipher_tfm(tfm)); +} + +static inline void crypto_skcipher_set_flags(struct crypto_skcipher *tfm, + u32 flags) +{ + crypto_tfm_set_flags(crypto_skcipher_tfm(tfm), flags); +} + +static inline void crypto_skcipher_clear_flags(struct crypto_skcipher *tfm, + u32 flags) +{ + crypto_tfm_clear_flags(crypto_skcipher_tfm(tfm), flags); +} + +/** + * crypto_skcipher_setkey() - set key for cipher + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the skcipher referenced by the cipher + * handle. + * + * Note, the key length determines the cipher type. Many block ciphers implement + * different cipher modes depending on the key size, such as AES-128 vs AES-192 + * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 + * is performed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm, + const u8 *key, unsigned int keylen) +{ + return tfm->setkey(tfm, key, keylen); +} + +static inline unsigned int crypto_skcipher_default_keysize( + struct crypto_skcipher *tfm) +{ + return tfm->keysize; +} + +/** + * crypto_skcipher_reqtfm() - obtain cipher handle from request + * @req: skcipher_request out of which the cipher handle is to be obtained + * + * Return the crypto_skcipher handle when furnishing an skcipher_request + * data structure. + * + * Return: crypto_skcipher handle + */ +static inline struct crypto_skcipher *crypto_skcipher_reqtfm( + struct skcipher_request *req) +{ + return __crypto_skcipher_cast(req->base.tfm); +} + +/** + * crypto_skcipher_encrypt() - encrypt plaintext + * @req: reference to the skcipher_request handle that holds all information + * needed to perform the cipher operation + * + * Encrypt plaintext data using the skcipher_request handle. That data + * structure and how it is filled with data is discussed with the + * skcipher_request_* functions. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ +static inline int crypto_skcipher_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + + if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return tfm->encrypt(req); +} + +/** + * crypto_skcipher_decrypt() - decrypt ciphertext + * @req: reference to the skcipher_request handle that holds all information + * needed to perform the cipher operation + * + * Decrypt ciphertext data using the skcipher_request handle. That data + * structure and how it is filled with data is discussed with the + * skcipher_request_* functions. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ +static inline int crypto_skcipher_decrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + + if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return tfm->decrypt(req); +} + +/** + * DOC: Symmetric Key Cipher Request Handle + * + * The skcipher_request data structure contains all pointers to data + * required for the symmetric key cipher operation. This includes the cipher + * handle (which can be used by multiple skcipher_request instances), pointer + * to plaintext and ciphertext, asynchronous callback function, etc. It acts + * as a handle to the skcipher_request_* API calls in a similar way as + * skcipher handle to the crypto_skcipher_* API calls. + */ + +/** + * crypto_skcipher_reqsize() - obtain size of the request data structure + * @tfm: cipher handle + * + * Return: number of bytes + */ +static inline unsigned int crypto_skcipher_reqsize(struct crypto_skcipher *tfm) +{ + return tfm->reqsize; +} + +/** + * skcipher_request_set_tfm() - update cipher handle reference in request + * @req: request handle to be modified + * @tfm: cipher handle that shall be added to the request handle + * + * Allow the caller to replace the existing skcipher handle in the request + * data structure with a different one. + */ +static inline void skcipher_request_set_tfm(struct skcipher_request *req, + struct crypto_skcipher *tfm) +{ + req->base.tfm = crypto_skcipher_tfm(tfm); +} + +static inline struct skcipher_request *skcipher_request_cast( + struct crypto_async_request *req) +{ + return container_of(req, struct skcipher_request, base); +} + +/** + * skcipher_request_alloc() - allocate request data structure + * @tfm: cipher handle to be registered with the request + * @gfp: memory allocation flag that is handed to kmalloc by the API call. + * + * Allocate the request data structure that must be used with the skcipher + * encrypt and decrypt API calls. During the allocation, the provided skcipher + * handle is registered in the request data structure. + * + * Return: allocated request handle in case of success, or NULL if out of memory + */ +static inline struct skcipher_request *skcipher_request_alloc( + struct crypto_skcipher *tfm, gfp_t gfp) +{ + struct skcipher_request *req; + + req = kmalloc(sizeof(struct skcipher_request) + + crypto_skcipher_reqsize(tfm), gfp); + + if (likely(req)) + skcipher_request_set_tfm(req, tfm); + + return req; +} + +/** + * skcipher_request_free() - zeroize and free request data structure + * @req: request data structure cipher handle to be freed + */ +static inline void skcipher_request_free(struct skcipher_request *req) +{ + kzfree(req); +} + +static inline void skcipher_request_zero(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + + memzero_explicit(req, sizeof(*req) + crypto_skcipher_reqsize(tfm)); +} + +/** + * skcipher_request_set_callback() - set asynchronous callback function + * @req: request handle + * @flags: specify zero or an ORing of the flags + * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and + * increase the wait queue beyond the initial maximum size; + * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep + * @compl: callback function pointer to be registered with the request handle + * @data: The data pointer refers to memory that is not used by the kernel + * crypto API, but provided to the callback function for it to use. Here, + * the caller can provide a reference to memory the callback function can + * operate on. As the callback function is invoked asynchronously to the + * related functionality, it may need to access data structures of the + * related functionality which can be referenced using this pointer. The + * callback function can access the memory via the "data" field in the + * crypto_async_request data structure provided to the callback function. + * + * This function allows setting the callback function that is triggered once the + * cipher operation completes. + * + * The callback function is registered with the skcipher_request handle and + * must comply with the following template:: + * + * void callback_function(struct crypto_async_request *req, int error) + */ +static inline void skcipher_request_set_callback(struct skcipher_request *req, + u32 flags, + crypto_completion_t compl, + void *data) +{ + req->base.complete = compl; + req->base.data = data; + req->base.flags = flags; +} + +/** + * skcipher_request_set_crypt() - set data buffers + * @req: request handle + * @src: source scatter / gather list + * @dst: destination scatter / gather list + * @cryptlen: number of bytes to process from @src + * @iv: IV for the cipher operation which must comply with the IV size defined + * by crypto_skcipher_ivsize + * + * This function allows setting of the source data and destination data + * scatter / gather lists. + * + * For encryption, the source is treated as the plaintext and the + * destination is the ciphertext. For a decryption operation, the use is + * reversed - the source is the ciphertext and the destination is the plaintext. + */ +static inline void skcipher_request_set_crypt( + struct skcipher_request *req, + struct scatterlist *src, struct scatterlist *dst, + unsigned int cryptlen, void *iv) +{ + req->src = src; + req->dst = dst; + req->cryptlen = cryptlen; + req->iv = iv; +} + +#endif /* _CRYPTO_SKCIPHER_H */ + diff --git a/include/crypto/sm3.h b/include/crypto/sm3.h new file mode 100644 index 000000000..1438942dc --- /dev/null +++ b/include/crypto/sm3.h @@ -0,0 +1,40 @@ +/* + * Common values for SM3 algorithm + */ + +#ifndef _CRYPTO_SM3_H +#define _CRYPTO_SM3_H + +#include + +#define SM3_DIGEST_SIZE 32 +#define SM3_BLOCK_SIZE 64 + +#define SM3_T1 0x79CC4519 +#define SM3_T2 0x7A879D8A + +#define SM3_IVA 0x7380166f +#define SM3_IVB 0x4914b2b9 +#define SM3_IVC 0x172442d7 +#define SM3_IVD 0xda8a0600 +#define SM3_IVE 0xa96f30bc +#define SM3_IVF 0x163138aa +#define SM3_IVG 0xe38dee4d +#define SM3_IVH 0xb0fb0e4e + +extern const u8 sm3_zero_message_hash[SM3_DIGEST_SIZE]; + +struct sm3_state { + u32 state[SM3_DIGEST_SIZE / 4]; + u64 count; + u8 buffer[SM3_BLOCK_SIZE]; +}; + +struct shash_desc; + +extern int crypto_sm3_update(struct shash_desc *desc, const u8 *data, + unsigned int len); + +extern int crypto_sm3_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *hash); +#endif diff --git a/include/crypto/sm3_base.h b/include/crypto/sm3_base.h new file mode 100644 index 000000000..256948e39 --- /dev/null +++ b/include/crypto/sm3_base.h @@ -0,0 +1,117 @@ +/* + * sm3_base.h - core logic for SM3 implementations + * + * Copyright (C) 2017 ARM Limited or its affiliates. + * Written by Gilad Ben-Yossef + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include +#include +#include +#include +#include + +typedef void (sm3_block_fn)(struct sm3_state *sst, u8 const *src, int blocks); + +static inline int sm3_base_init(struct shash_desc *desc) +{ + struct sm3_state *sctx = shash_desc_ctx(desc); + + sctx->state[0] = SM3_IVA; + sctx->state[1] = SM3_IVB; + sctx->state[2] = SM3_IVC; + sctx->state[3] = SM3_IVD; + sctx->state[4] = SM3_IVE; + sctx->state[5] = SM3_IVF; + sctx->state[6] = SM3_IVG; + sctx->state[7] = SM3_IVH; + sctx->count = 0; + + return 0; +} + +static inline int sm3_base_do_update(struct shash_desc *desc, + const u8 *data, + unsigned int len, + sm3_block_fn *block_fn) +{ + struct sm3_state *sctx = shash_desc_ctx(desc); + unsigned int partial = sctx->count % SM3_BLOCK_SIZE; + + sctx->count += len; + + if (unlikely((partial + len) >= SM3_BLOCK_SIZE)) { + int blocks; + + if (partial) { + int p = SM3_BLOCK_SIZE - partial; + + memcpy(sctx->buffer + partial, data, p); + data += p; + len -= p; + + block_fn(sctx, sctx->buffer, 1); + } + + blocks = len / SM3_BLOCK_SIZE; + len %= SM3_BLOCK_SIZE; + + if (blocks) { + block_fn(sctx, data, blocks); + data += blocks * SM3_BLOCK_SIZE; + } + partial = 0; + } + if (len) + memcpy(sctx->buffer + partial, data, len); + + return 0; +} + +static inline int sm3_base_do_finalize(struct shash_desc *desc, + sm3_block_fn *block_fn) +{ + const int bit_offset = SM3_BLOCK_SIZE - sizeof(__be64); + struct sm3_state *sctx = shash_desc_ctx(desc); + __be64 *bits = (__be64 *)(sctx->buffer + bit_offset); + unsigned int partial = sctx->count % SM3_BLOCK_SIZE; + + sctx->buffer[partial++] = 0x80; + if (partial > bit_offset) { + memset(sctx->buffer + partial, 0x0, SM3_BLOCK_SIZE - partial); + partial = 0; + + block_fn(sctx, sctx->buffer, 1); + } + + memset(sctx->buffer + partial, 0x0, bit_offset - partial); + *bits = cpu_to_be64(sctx->count << 3); + block_fn(sctx, sctx->buffer, 1); + + return 0; +} + +static inline int sm3_base_finish(struct shash_desc *desc, u8 *out) +{ + struct sm3_state *sctx = shash_desc_ctx(desc); + __be32 *digest = (__be32 *)out; + int i; + + for (i = 0; i < SM3_DIGEST_SIZE / sizeof(__be32); i++) + put_unaligned_be32(sctx->state[i], digest++); + + *sctx = (struct sm3_state){}; + return 0; +} diff --git a/include/crypto/sm4.h b/include/crypto/sm4.h new file mode 100644 index 000000000..7afd730d1 --- /dev/null +++ b/include/crypto/sm4.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Common values for the SM4 algorithm + * Copyright (C) 2018 ARM Limited or its affiliates. + */ + +#ifndef _CRYPTO_SM4_H +#define _CRYPTO_SM4_H + +#include +#include + +#define SM4_KEY_SIZE 16 +#define SM4_BLOCK_SIZE 16 +#define SM4_RKEY_WORDS 32 + +struct crypto_sm4_ctx { + u32 rkey_enc[SM4_RKEY_WORDS]; + u32 rkey_dec[SM4_RKEY_WORDS]; +}; + +int crypto_sm4_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len); +int crypto_sm4_expand_key(struct crypto_sm4_ctx *ctx, const u8 *in_key, + unsigned int key_len); + +void crypto_sm4_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in); +void crypto_sm4_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in); + +#endif diff --git a/include/crypto/twofish.h b/include/crypto/twofish.h new file mode 100644 index 000000000..2e2c09673 --- /dev/null +++ b/include/crypto/twofish.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CRYPTO_TWOFISH_H +#define _CRYPTO_TWOFISH_H + +#include + +#define TF_MIN_KEY_SIZE 16 +#define TF_MAX_KEY_SIZE 32 +#define TF_BLOCK_SIZE 16 + +struct crypto_tfm; + +/* Structure for an expanded Twofish key. s contains the key-dependent + * S-boxes composed with the MDS matrix; w contains the eight "whitening" + * subkeys, K[0] through K[7]. k holds the remaining, "round" subkeys. Note + * that k[i] corresponds to what the Twofish paper calls K[i+8]. */ +struct twofish_ctx { + u32 s[4][256], w[8], k[32]; +}; + +int __twofish_setkey(struct twofish_ctx *ctx, const u8 *key, + unsigned int key_len, u32 *flags); +int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len); + +#endif diff --git a/include/crypto/xts.h b/include/crypto/xts.h new file mode 100644 index 000000000..34d94c954 --- /dev/null +++ b/include/crypto/xts.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CRYPTO_XTS_H +#define _CRYPTO_XTS_H + +#include +#include +#include + +#define XTS_BLOCK_SIZE 16 + +#define XTS_TWEAK_CAST(x) ((void (*)(void *, u8*, const u8*))(x)) + +static inline int xts_check_key(struct crypto_tfm *tfm, + const u8 *key, unsigned int keylen) +{ + u32 *flags = &tfm->crt_flags; + + /* + * key consists of keys of equal size concatenated, therefore + * the length must be even. + */ + if (keylen % 2) { + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + + /* ensure that the AES and tweak key are not identical */ + if (fips_enabled && + !crypto_memneq(key, key + (keylen / 2), keylen / 2)) { + *flags |= CRYPTO_TFM_RES_WEAK_KEY; + return -EINVAL; + } + + return 0; +} + +static inline int xts_verify_key(struct crypto_skcipher *tfm, + const u8 *key, unsigned int keylen) +{ + /* + * key consists of keys of equal size concatenated, therefore + * the length must be even. + */ + if (keylen % 2) { + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + /* ensure that the AES and tweak key are not identical */ + if ((fips_enabled || crypto_skcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_WEAK_KEY) && + !crypto_memneq(key, key + (keylen / 2), keylen / 2)) { + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY); + return -EINVAL; + } + + return 0; +} + +#endif /* _CRYPTO_XTS_H */ diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h new file mode 100644 index 000000000..dd63d08cc --- /dev/null +++ b/include/drm/amd_asic_type.h @@ -0,0 +1,55 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __AMD_ASIC_TYPE_H__ +#define __AMD_ASIC_TYPE_H__ +/* + * Supported ASIC types + */ +enum amd_asic_type { + CHIP_TAHITI = 0, + CHIP_PITCAIRN, + CHIP_VERDE, + CHIP_OLAND, + CHIP_HAINAN, + CHIP_BONAIRE, + CHIP_KAVERI, + CHIP_KABINI, + CHIP_HAWAII, + CHIP_MULLINS, + CHIP_TOPAZ, + CHIP_TONGA, + CHIP_FIJI, + CHIP_CARRIZO, + CHIP_STONEY, + CHIP_POLARIS10, + CHIP_POLARIS11, + CHIP_POLARIS12, + CHIP_VEGAM, + CHIP_VEGA10, + CHIP_VEGA12, + CHIP_VEGA20, + CHIP_RAVEN, + CHIP_LAST, +}; + +#endif /*__AMD_ASIC_TYPE_H__ */ diff --git a/include/drm/ati_pcigart.h b/include/drm/ati_pcigart.h new file mode 100644 index 000000000..a728a1364 --- /dev/null +++ b/include/drm/ati_pcigart.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef DRM_ATI_PCIGART_H +#define DRM_ATI_PCIGART_H + +#include + +/* location of GART table */ +#define DRM_ATI_GART_MAIN 1 +#define DRM_ATI_GART_FB 2 + +#define DRM_ATI_GART_PCI 1 +#define DRM_ATI_GART_PCIE 2 +#define DRM_ATI_GART_IGP 3 + +struct drm_ati_pcigart_info { + int gart_table_location; + int gart_reg_if; + void *addr; + dma_addr_t bus_addr; + dma_addr_t table_mask; + struct drm_dma_handle *table_handle; + struct drm_local_map mapping; + int table_size; +}; + +extern int drm_ati_pcigart_init(struct drm_device *dev, + struct drm_ati_pcigart_info * gart_info); +extern int drm_ati_pcigart_cleanup(struct drm_device *dev, + struct drm_ati_pcigart_info * gart_info); + +#endif diff --git a/include/drm/bridge/analogix_dp.h b/include/drm/bridge/analogix_dp.h new file mode 100644 index 000000000..475b706b4 --- /dev/null +++ b/include/drm/bridge/analogix_dp.h @@ -0,0 +1,60 @@ +/* + * Analogix DP (Display Port) Core interface driver. + * + * Copyright (C) 2015 Rockchip Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef _ANALOGIX_DP_H_ +#define _ANALOGIX_DP_H_ + +#include + +struct analogix_dp_device; + +enum analogix_dp_devtype { + EXYNOS_DP, + RK3288_DP, + RK3399_EDP, +}; + +static inline bool is_rockchip(enum analogix_dp_devtype type) +{ + return type == RK3288_DP || type == RK3399_EDP; +} + +struct analogix_dp_plat_data { + enum analogix_dp_devtype dev_type; + struct drm_panel *panel; + struct drm_encoder *encoder; + struct drm_connector *connector; + bool skip_connector; + + int (*power_on_start)(struct analogix_dp_plat_data *); + int (*power_on_end)(struct analogix_dp_plat_data *); + int (*power_off)(struct analogix_dp_plat_data *); + int (*attach)(struct analogix_dp_plat_data *, struct drm_bridge *, + struct drm_connector *); + int (*get_modes)(struct analogix_dp_plat_data *, + struct drm_connector *); +}; + +int analogix_dp_psr_enabled(struct analogix_dp_device *dp); +int analogix_dp_enable_psr(struct analogix_dp_device *dp); +int analogix_dp_disable_psr(struct analogix_dp_device *dp); + +int analogix_dp_resume(struct analogix_dp_device *dp); +int analogix_dp_suspend(struct analogix_dp_device *dp); + +struct analogix_dp_device * +analogix_dp_bind(struct device *dev, struct drm_device *drm_dev, + struct analogix_dp_plat_data *plat_data); +void analogix_dp_unbind(struct analogix_dp_device *dp); + +int analogix_dp_start_crc(struct drm_connector *connector); +int analogix_dp_stop_crc(struct drm_connector *connector); + +#endif /* _ANALOGIX_DP_H_ */ diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h new file mode 100644 index 000000000..ccb5aa846 --- /dev/null +++ b/include/drm/bridge/dw_hdmi.h @@ -0,0 +1,175 @@ +/* + * Copyright (C) 2011 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __DW_HDMI__ +#define __DW_HDMI__ + +#include + +struct dw_hdmi; + +/** + * DOC: Supported input formats and encodings + * + * Depending on the Hardware configuration of the Controller IP, it supports + * a subset of the following input formats and encodings on its internal + * 48bit bus. + * + * +----------------------+----------------------------------+------------------------------+ + * | Format Name | Format Code | Encodings | + * +----------------------+----------------------------------+------------------------------+ + * | RGB 4:4:4 8bit | ``MEDIA_BUS_FMT_RGB888_1X24`` | ``V4L2_YCBCR_ENC_DEFAULT`` | + * +----------------------+----------------------------------+------------------------------+ + * | RGB 4:4:4 10bits | ``MEDIA_BUS_FMT_RGB101010_1X30`` | ``V4L2_YCBCR_ENC_DEFAULT`` | + * +----------------------+----------------------------------+------------------------------+ + * | RGB 4:4:4 12bits | ``MEDIA_BUS_FMT_RGB121212_1X36`` | ``V4L2_YCBCR_ENC_DEFAULT`` | + * +----------------------+----------------------------------+------------------------------+ + * | RGB 4:4:4 16bits | ``MEDIA_BUS_FMT_RGB161616_1X48`` | ``V4L2_YCBCR_ENC_DEFAULT`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:4:4 8bit | ``MEDIA_BUS_FMT_YUV8_1X24`` | ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * | | | or ``V4L2_YCBCR_ENC_XV601`` | + * | | | or ``V4L2_YCBCR_ENC_XV709`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:4:4 10bits | ``MEDIA_BUS_FMT_YUV10_1X30`` | ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * | | | or ``V4L2_YCBCR_ENC_XV601`` | + * | | | or ``V4L2_YCBCR_ENC_XV709`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:4:4 12bits | ``MEDIA_BUS_FMT_YUV12_1X36`` | ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * | | | or ``V4L2_YCBCR_ENC_XV601`` | + * | | | or ``V4L2_YCBCR_ENC_XV709`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:4:4 16bits | ``MEDIA_BUS_FMT_YUV16_1X48`` | ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * | | | or ``V4L2_YCBCR_ENC_XV601`` | + * | | | or ``V4L2_YCBCR_ENC_XV709`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:2:2 8bit | ``MEDIA_BUS_FMT_UYVY8_1X16`` | ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:2:2 10bits | ``MEDIA_BUS_FMT_UYVY10_1X20`` | ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:2:2 12bits | ``MEDIA_BUS_FMT_UYVY12_1X24`` | ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:2:0 8bit | ``MEDIA_BUS_FMT_UYYVYY8_0_5X24`` | ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:2:0 10bits | ``MEDIA_BUS_FMT_UYYVYY10_0_5X30``| ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:2:0 12bits | ``MEDIA_BUS_FMT_UYYVYY12_0_5X36``| ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:2:0 16bits | ``MEDIA_BUS_FMT_UYYVYY16_0_5X48``| ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * +----------------------+----------------------------------+------------------------------+ + */ + +enum { + DW_HDMI_RES_8, + DW_HDMI_RES_10, + DW_HDMI_RES_12, + DW_HDMI_RES_MAX, +}; + +enum dw_hdmi_phy_type { + DW_HDMI_PHY_DWC_HDMI_TX_PHY = 0x00, + DW_HDMI_PHY_DWC_MHL_PHY_HEAC = 0xb2, + DW_HDMI_PHY_DWC_MHL_PHY = 0xc2, + DW_HDMI_PHY_DWC_HDMI_3D_TX_PHY_HEAC = 0xe2, + DW_HDMI_PHY_DWC_HDMI_3D_TX_PHY = 0xf2, + DW_HDMI_PHY_DWC_HDMI20_TX_PHY = 0xf3, + DW_HDMI_PHY_VENDOR_PHY = 0xfe, +}; + +struct dw_hdmi_mpll_config { + unsigned long mpixelclock; + struct { + u16 cpce; + u16 gmp; + } res[DW_HDMI_RES_MAX]; +}; + +struct dw_hdmi_curr_ctrl { + unsigned long mpixelclock; + u16 curr[DW_HDMI_RES_MAX]; +}; + +struct dw_hdmi_phy_config { + unsigned long mpixelclock; + u16 sym_ctr; /*clock symbol and transmitter control*/ + u16 term; /*transmission termination value*/ + u16 vlev_ctr; /* voltage level control */ +}; + +struct dw_hdmi_phy_ops { + int (*init)(struct dw_hdmi *hdmi, void *data, + struct drm_display_mode *mode); + void (*disable)(struct dw_hdmi *hdmi, void *data); + enum drm_connector_status (*read_hpd)(struct dw_hdmi *hdmi, void *data); + void (*update_hpd)(struct dw_hdmi *hdmi, void *data, + bool force, bool disabled, bool rxsense); + void (*setup_hpd)(struct dw_hdmi *hdmi, void *data); +}; + +struct dw_hdmi_plat_data { + struct regmap *regm; + enum drm_mode_status (*mode_valid)(struct drm_connector *connector, + const struct drm_display_mode *mode); + unsigned long input_bus_format; + unsigned long input_bus_encoding; + + /* Vendor PHY support */ + const struct dw_hdmi_phy_ops *phy_ops; + const char *phy_name; + void *phy_data; + + /* Synopsys PHY support */ + const struct dw_hdmi_mpll_config *mpll_cfg; + const struct dw_hdmi_curr_ctrl *cur_ctr; + const struct dw_hdmi_phy_config *phy_config; + int (*configure_phy)(struct dw_hdmi *hdmi, + const struct dw_hdmi_plat_data *pdata, + unsigned long mpixelclock); +}; + +struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, + const struct dw_hdmi_plat_data *plat_data); +void dw_hdmi_remove(struct dw_hdmi *hdmi); +void dw_hdmi_unbind(struct dw_hdmi *hdmi); +struct dw_hdmi *dw_hdmi_bind(struct platform_device *pdev, + struct drm_encoder *encoder, + const struct dw_hdmi_plat_data *plat_data); + +void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense); + +void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate); +void dw_hdmi_audio_enable(struct dw_hdmi *hdmi); +void dw_hdmi_audio_disable(struct dw_hdmi *hdmi); + +/* PHY configuration */ +void dw_hdmi_phy_i2c_set_addr(struct dw_hdmi *hdmi, u8 address); +void dw_hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data, + unsigned char addr); + +void dw_hdmi_phy_gen2_pddq(struct dw_hdmi *hdmi, u8 enable); +void dw_hdmi_phy_gen2_txpwron(struct dw_hdmi *hdmi, u8 enable); +void dw_hdmi_phy_reset(struct dw_hdmi *hdmi); + +enum drm_connector_status dw_hdmi_phy_read_hpd(struct dw_hdmi *hdmi, + void *data); +void dw_hdmi_phy_update_hpd(struct dw_hdmi *hdmi, void *data, + bool force, bool disabled, bool rxsense); +void dw_hdmi_phy_setup_hpd(struct dw_hdmi *hdmi, void *data); + +#endif /* __IMX_HDMI_H__ */ diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h new file mode 100644 index 000000000..d9c6d549f --- /dev/null +++ b/include/drm/bridge/dw_mipi_dsi.h @@ -0,0 +1,44 @@ +/* + * Copyright (C) STMicroelectronics SA 2017 + * + * Authors: Philippe Cornu + * Yannick Fertre + * + * License terms: GNU General Public License (GPL), version 2 + */ + +#ifndef __DW_MIPI_DSI__ +#define __DW_MIPI_DSI__ + +struct dw_mipi_dsi; + +struct dw_mipi_dsi_phy_ops { + int (*init)(void *priv_data); + int (*get_lane_mbps)(void *priv_data, struct drm_display_mode *mode, + unsigned long mode_flags, u32 lanes, u32 format, + unsigned int *lane_mbps); +}; + +struct dw_mipi_dsi_plat_data { + void __iomem *base; + unsigned int max_data_lanes; + + enum drm_mode_status (*mode_valid)(void *priv_data, + const struct drm_display_mode *mode); + + const struct dw_mipi_dsi_phy_ops *phy_ops; + + void *priv_data; +}; + +struct dw_mipi_dsi *dw_mipi_dsi_probe(struct platform_device *pdev, + const struct dw_mipi_dsi_plat_data + *plat_data); +void dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi); +struct dw_mipi_dsi *dw_mipi_dsi_bind(struct platform_device *pdev, + struct drm_encoder *encoder, + const struct dw_mipi_dsi_plat_data + *plat_data); +void dw_mipi_dsi_unbind(struct dw_mipi_dsi *dsi); + +#endif /* __DW_MIPI_DSI__ */ diff --git a/include/drm/bridge/mhl.h b/include/drm/bridge/mhl.h new file mode 100644 index 000000000..96a5e0f6f --- /dev/null +++ b/include/drm/bridge/mhl.h @@ -0,0 +1,380 @@ +/* + * Defines for Mobile High-Definition Link (MHL) interface + * + * Copyright (C) 2015, Samsung Electronics, Co., Ltd. + * Andrzej Hajda + * + * Based on MHL driver for Android devices. + * Copyright (C) 2013-2014 Silicon Image, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MHL_H__ +#define __MHL_H__ + +#include + +/* Device Capabilities Registers */ +enum { + MHL_DCAP_DEV_STATE, + MHL_DCAP_MHL_VERSION, + MHL_DCAP_CAT, + MHL_DCAP_ADOPTER_ID_H, + MHL_DCAP_ADOPTER_ID_L, + MHL_DCAP_VID_LINK_MODE, + MHL_DCAP_AUD_LINK_MODE, + MHL_DCAP_VIDEO_TYPE, + MHL_DCAP_LOG_DEV_MAP, + MHL_DCAP_BANDWIDTH, + MHL_DCAP_FEATURE_FLAG, + MHL_DCAP_DEVICE_ID_H, + MHL_DCAP_DEVICE_ID_L, + MHL_DCAP_SCRATCHPAD_SIZE, + MHL_DCAP_INT_STAT_SIZE, + MHL_DCAP_RESERVED, + MHL_DCAP_SIZE +}; + +#define MHL_DCAP_CAT_SINK 0x01 +#define MHL_DCAP_CAT_SOURCE 0x02 +#define MHL_DCAP_CAT_POWER 0x10 +#define MHL_DCAP_CAT_PLIM(x) ((x) << 5) + +#define MHL_DCAP_VID_LINK_RGB444 0x01 +#define MHL_DCAP_VID_LINK_YCBCR444 0x02 +#define MHL_DCAP_VID_LINK_YCBCR422 0x04 +#define MHL_DCAP_VID_LINK_PPIXEL 0x08 +#define MHL_DCAP_VID_LINK_ISLANDS 0x10 +#define MHL_DCAP_VID_LINK_VGA 0x20 +#define MHL_DCAP_VID_LINK_16BPP 0x40 + +#define MHL_DCAP_AUD_LINK_2CH 0x01 +#define MHL_DCAP_AUD_LINK_8CH 0x02 + +#define MHL_DCAP_VT_GRAPHICS 0x00 +#define MHL_DCAP_VT_PHOTO 0x02 +#define MHL_DCAP_VT_CINEMA 0x04 +#define MHL_DCAP_VT_GAMES 0x08 +#define MHL_DCAP_SUPP_VT 0x80 + +#define MHL_DCAP_LD_DISPLAY 0x01 +#define MHL_DCAP_LD_VIDEO 0x02 +#define MHL_DCAP_LD_AUDIO 0x04 +#define MHL_DCAP_LD_MEDIA 0x08 +#define MHL_DCAP_LD_TUNER 0x10 +#define MHL_DCAP_LD_RECORD 0x20 +#define MHL_DCAP_LD_SPEAKER 0x40 +#define MHL_DCAP_LD_GUI 0x80 +#define MHL_DCAP_LD_ALL 0xFF + +#define MHL_DCAP_FEATURE_RCP_SUPPORT 0x01 +#define MHL_DCAP_FEATURE_RAP_SUPPORT 0x02 +#define MHL_DCAP_FEATURE_SP_SUPPORT 0x04 +#define MHL_DCAP_FEATURE_UCP_SEND_SUPPOR 0x08 +#define MHL_DCAP_FEATURE_UCP_RECV_SUPPORT 0x10 +#define MHL_DCAP_FEATURE_RBP_SUPPORT 0x40 + +/* Extended Device Capabilities Registers */ +enum { + MHL_XDC_ECBUS_SPEEDS, + MHL_XDC_TMDS_SPEEDS, + MHL_XDC_ECBUS_ROLES, + MHL_XDC_LOG_DEV_MAPX, + MHL_XDC_SIZE +}; + +#define MHL_XDC_ECBUS_S_075 0x01 +#define MHL_XDC_ECBUS_S_8BIT 0x02 +#define MHL_XDC_ECBUS_S_12BIT 0x04 +#define MHL_XDC_ECBUS_D_150 0x10 +#define MHL_XDC_ECBUS_D_8BIT 0x20 + +#define MHL_XDC_TMDS_000 0x00 +#define MHL_XDC_TMDS_150 0x01 +#define MHL_XDC_TMDS_300 0x02 +#define MHL_XDC_TMDS_600 0x04 + +/* MHL_XDC_ECBUS_ROLES flags */ +#define MHL_XDC_DEV_HOST 0x01 +#define MHL_XDC_DEV_DEVICE 0x02 +#define MHL_XDC_DEV_CHARGER 0x04 +#define MHL_XDC_HID_HOST 0x08 +#define MHL_XDC_HID_DEVICE 0x10 + +/* MHL_XDC_LOG_DEV_MAPX flags */ +#define MHL_XDC_LD_PHONE 0x01 + +/* Device Status Registers */ +enum { + MHL_DST_CONNECTED_RDY, + MHL_DST_LINK_MODE, + MHL_DST_VERSION, + MHL_DST_SIZE +}; + +/* Offset of DEVSTAT registers */ +#define MHL_DST_OFFSET 0x30 +#define MHL_DST_REG(name) (MHL_DST_OFFSET + MHL_DST_##name) + +#define MHL_DST_CONN_DCAP_RDY 0x01 +#define MHL_DST_CONN_XDEVCAPP_SUPP 0x02 +#define MHL_DST_CONN_POW_STAT 0x04 +#define MHL_DST_CONN_PLIM_STAT_MASK 0x38 + +#define MHL_DST_LM_CLK_MODE_MASK 0x07 +#define MHL_DST_LM_CLK_MODE_PACKED_PIXEL 0x02 +#define MHL_DST_LM_CLK_MODE_NORMAL 0x03 +#define MHL_DST_LM_PATH_EN_MASK 0x08 +#define MHL_DST_LM_PATH_ENABLED 0x08 +#define MHL_DST_LM_PATH_DISABLED 0x00 +#define MHL_DST_LM_MUTED_MASK 0x10 + +/* Extended Device Status Registers */ +enum { + MHL_XDS_CURR_ECBUS_MODE, + MHL_XDS_AVLINK_MODE_STATUS, + MHL_XDS_AVLINK_MODE_CONTROL, + MHL_XDS_MULTI_SINK_STATUS, + MHL_XDS_SIZE +}; + +/* Offset of XDEVSTAT registers */ +#define MHL_XDS_OFFSET 0x90 +#define MHL_XDS_REG(name) (MHL_XDS_OFFSET + MHL_XDS_##name) + +/* MHL_XDS_REG_CURR_ECBUS_MODE flags */ +#define MHL_XDS_SLOT_MODE_8BIT 0x00 +#define MHL_XDS_SLOT_MODE_6BIT 0x01 +#define MHL_XDS_ECBUS_S 0x04 +#define MHL_XDS_ECBUS_D 0x08 + +#define MHL_XDS_LINK_CLOCK_75MHZ 0x00 +#define MHL_XDS_LINK_CLOCK_150MHZ 0x10 +#define MHL_XDS_LINK_CLOCK_300MHZ 0x20 +#define MHL_XDS_LINK_CLOCK_600MHZ 0x30 + +#define MHL_XDS_LINK_STATUS_NO_SIGNAL 0x00 +#define MHL_XDS_LINK_STATUS_CRU_LOCKED 0x01 +#define MHL_XDS_LINK_STATUS_TMDS_NORMAL 0x02 +#define MHL_XDS_LINK_STATUS_TMDS_RESERVED 0x03 + +#define MHL_XDS_LINK_RATE_1_5_GBPS 0x00 +#define MHL_XDS_LINK_RATE_3_0_GBPS 0x01 +#define MHL_XDS_LINK_RATE_6_0_GBPS 0x02 +#define MHL_XDS_ATT_CAPABLE 0x08 + +#define MHL_XDS_SINK_STATUS_1_HPD_LOW 0x00 +#define MHL_XDS_SINK_STATUS_1_HPD_HIGH 0x01 +#define MHL_XDS_SINK_STATUS_2_HPD_LOW 0x00 +#define MHL_XDS_SINK_STATUS_2_HPD_HIGH 0x04 +#define MHL_XDS_SINK_STATUS_3_HPD_LOW 0x00 +#define MHL_XDS_SINK_STATUS_3_HPD_HIGH 0x10 +#define MHL_XDS_SINK_STATUS_4_HPD_LOW 0x00 +#define MHL_XDS_SINK_STATUS_4_HPD_HIGH 0x40 + +/* Interrupt Registers */ +enum { + MHL_INT_RCHANGE, + MHL_INT_DCHANGE, + MHL_INT_SIZE +}; + +/* Offset of DEVSTAT registers */ +#define MHL_INT_OFFSET 0x20 +#define MHL_INT_REG(name) (MHL_INT_OFFSET + MHL_INT_##name) + +#define MHL_INT_RC_DCAP_CHG 0x01 +#define MHL_INT_RC_DSCR_CHG 0x02 +#define MHL_INT_RC_REQ_WRT 0x04 +#define MHL_INT_RC_GRT_WRT 0x08 +#define MHL_INT_RC_3D_REQ 0x10 +#define MHL_INT_RC_FEAT_REQ 0x20 +#define MHL_INT_RC_FEAT_COMPLETE 0x40 + +#define MHL_INT_DC_EDID_CHG 0x02 + +enum { + MHL_ACK = 0x33, /* Command or Data byte acknowledge */ + MHL_NACK = 0x34, /* Command or Data byte not acknowledge */ + MHL_ABORT = 0x35, /* Transaction abort */ + MHL_WRITE_STAT = 0xe0, /* Write one status register */ + MHL_SET_INT = 0x60, /* Write one interrupt register */ + MHL_READ_DEVCAP_REG = 0x61, /* Read one register */ + MHL_GET_STATE = 0x62, /* Read CBUS revision level from follower */ + MHL_GET_VENDOR_ID = 0x63, /* Read vendor ID value from follower */ + MHL_SET_HPD = 0x64, /* Set Hot Plug Detect in follower */ + MHL_CLR_HPD = 0x65, /* Clear Hot Plug Detect in follower */ + MHL_SET_CAP_ID = 0x66, /* Set Capture ID for downstream device */ + MHL_GET_CAP_ID = 0x67, /* Get Capture ID from downstream device */ + MHL_MSC_MSG = 0x68, /* VS command to send RCP sub-commands */ + MHL_GET_SC1_ERRORCODE = 0x69, /* Get Vendor-Specific error code */ + MHL_GET_DDC_ERRORCODE = 0x6A, /* Get DDC channel command error code */ + MHL_GET_MSC_ERRORCODE = 0x6B, /* Get MSC command error code */ + MHL_WRITE_BURST = 0x6C, /* Write 1-16 bytes to responder's scratchpad */ + MHL_GET_SC3_ERRORCODE = 0x6D, /* Get channel 3 command error code */ + MHL_WRITE_XSTAT = 0x70, /* Write one extended status register */ + MHL_READ_XDEVCAP_REG = 0x71, /* Read one extended devcap register */ + /* let the rest of these float, they are software specific */ + MHL_READ_EDID_BLOCK, + MHL_SEND_3D_REQ_OR_FEAT_REQ, + MHL_READ_DEVCAP, + MHL_READ_XDEVCAP +}; + +/* MSC message types */ +enum { + MHL_MSC_MSG_RCP = 0x10, /* RCP sub-command */ + MHL_MSC_MSG_RCPK = 0x11, /* RCP Acknowledge sub-command */ + MHL_MSC_MSG_RCPE = 0x12, /* RCP Error sub-command */ + MHL_MSC_MSG_RAP = 0x20, /* Mode Change Warning sub-command */ + MHL_MSC_MSG_RAPK = 0x21, /* MCW Acknowledge sub-command */ + MHL_MSC_MSG_RBP = 0x22, /* Remote Button Protocol sub-command */ + MHL_MSC_MSG_RBPK = 0x23, /* RBP Acknowledge sub-command */ + MHL_MSC_MSG_RBPE = 0x24, /* RBP Error sub-command */ + MHL_MSC_MSG_UCP = 0x30, /* UCP sub-command */ + MHL_MSC_MSG_UCPK = 0x31, /* UCP Acknowledge sub-command */ + MHL_MSC_MSG_UCPE = 0x32, /* UCP Error sub-command */ + MHL_MSC_MSG_RUSB = 0x40, /* Request USB host role */ + MHL_MSC_MSG_RUSBK = 0x41, /* Acknowledge request for USB host role */ + MHL_MSC_MSG_RHID = 0x42, /* Request HID host role */ + MHL_MSC_MSG_RHIDK = 0x43, /* Acknowledge request for HID host role */ + MHL_MSC_MSG_ATT = 0x50, /* Request attention sub-command */ + MHL_MSC_MSG_ATTK = 0x51, /* ATT Acknowledge sub-command */ + MHL_MSC_MSG_BIST_TRIGGER = 0x60, + MHL_MSC_MSG_BIST_REQUEST_STAT = 0x61, + MHL_MSC_MSG_BIST_READY = 0x62, + MHL_MSC_MSG_BIST_STOP = 0x63, +}; + +/* RAP action codes */ +#define MHL_RAP_POLL 0x00 /* Just do an ack */ +#define MHL_RAP_CONTENT_ON 0x10 /* Turn content stream ON */ +#define MHL_RAP_CONTENT_OFF 0x11 /* Turn content stream OFF */ +#define MHL_RAP_CBUS_MODE_DOWN 0x20 +#define MHL_RAP_CBUS_MODE_UP 0x21 + +/* RAPK status codes */ +#define MHL_RAPK_NO_ERR 0x00 /* RAP action recognized & supported */ +#define MHL_RAPK_UNRECOGNIZED 0x01 /* Unknown RAP action code received */ +#define MHL_RAPK_UNSUPPORTED 0x02 /* Rcvd RAP action code not supported */ +#define MHL_RAPK_BUSY 0x03 /* Responder too busy to respond */ + +/* Bit masks for RCP messages */ +#define MHL_RCP_KEY_RELEASED_MASK 0x80 +#define MHL_RCP_KEY_ID_MASK 0x7F + +/* + * Error status codes for RCPE messages + */ +/* No error. (Not allowed in RCPE messages) */ +#define MHL_RCPE_STATUS_NO_ERROR 0x00 +/* Unsupported/unrecognized key code */ +#define MHL_RCPE_STATUS_INEFFECTIVE_KEY_CODE 0x01 +/* Responder busy. Initiator may retry message */ +#define MHL_RCPE_STATUS_BUSY 0x02 + +/* + * Error status codes for RBPE messages + */ +/* No error. (Not allowed in RBPE messages) */ +#define MHL_RBPE_STATUS_NO_ERROR 0x00 +/* Unsupported/unrecognized button code */ +#define MHL_RBPE_STATUS_INEFFECTIVE_BUTTON_CODE 0x01 +/* Responder busy. Initiator may retry message */ +#define MHL_RBPE_STATUS_BUSY 0x02 + +/* + * Error status codes for UCPE messages + */ +/* No error. (Not allowed in UCPE messages) */ +#define MHL_UCPE_STATUS_NO_ERROR 0x00 +/* Unsupported/unrecognized key code */ +#define MHL_UCPE_STATUS_INEFFECTIVE_KEY_CODE 0x01 + +enum mhl_burst_id { + MHL_BURST_ID_3D_VIC = 0x10, + MHL_BURST_ID_3D_DTD = 0x11, + MHL_BURST_ID_HEV_VIC = 0x20, + MHL_BURST_ID_HEV_DTDA = 0x21, + MHL_BURST_ID_HEV_DTDB = 0x22, + MHL_BURST_ID_VC_ASSIGN = 0x38, + MHL_BURST_ID_VC_CONFIRM = 0x39, + MHL_BURST_ID_AUD_DELAY = 0x40, + MHL_BURST_ID_ADT_BURSTID = 0x41, + MHL_BURST_ID_BIST_SETUP = 0x51, + MHL_BURST_ID_BIST_RETURN_STAT = 0x52, + MHL_BURST_ID_EMSC_SUPPORT = 0x61, + MHL_BURST_ID_HID_PAYLOAD = 0x62, + MHL_BURST_ID_BLK_RCV_BUFFER_INFO = 0x63, + MHL_BURST_ID_BITS_PER_PIXEL_FMT = 0x64, +}; + +struct mhl_burst_blk_rcv_buffer_info { + __be16 id; + __le16 size; +} __packed; + +struct mhl3_burst_header { + __be16 id; + u8 checksum; + u8 total_entries; + u8 sequence_index; +} __packed; + +struct mhl_burst_bits_per_pixel_fmt { + struct mhl3_burst_header hdr; + u8 num_entries; + struct { + u8 stream_id; + u8 pixel_format; + } __packed desc[0]; +} __packed; + +struct mhl_burst_emsc_support { + struct mhl3_burst_header hdr; + u8 num_entries; + __be16 burst_id[0]; +} __packed; + +struct mhl_burst_audio_descr { + struct mhl3_burst_header hdr; + u8 flags; + u8 short_desc[9]; +} __packed; + +/* + * MHL3 infoframe related definitions + */ + +#define MHL3_IEEE_OUI 0x7ca61d +#define MHL3_INFOFRAME_SIZE 15 + +enum mhl3_video_format { + MHL3_VIDEO_FORMAT_NONE, + MHL3_VIDEO_FORMAT_3D, + MHL3_VIDEO_FORMAT_MULTI_VIEW, + MHL3_VIDEO_FORMAT_DUAL_3D +}; + +enum mhl3_3d_format_type { + MHL3_3D_FORMAT_TYPE_FS, /* frame sequential */ + MHL3_3D_FORMAT_TYPE_TB, /* top-bottom */ + MHL3_3D_FORMAT_TYPE_LR, /* left-right */ + MHL3_3D_FORMAT_TYPE_FS_TB, /* frame sequential, top-bottom */ + MHL3_3D_FORMAT_TYPE_FS_LR, /* frame sequential, left-right */ + MHL3_3D_FORMAT_TYPE_TB_LR /* top-bottom, left-right */ +}; + +struct mhl3_infoframe { + unsigned char version; + enum mhl3_video_format video_format; + enum mhl3_3d_format_type format_type; + bool sep_audio; + int hev_format; + int av_delay; +}; + +#endif /* __MHL_H__ */ diff --git a/include/drm/drmP.h b/include/drm/drmP.h new file mode 100644 index 000000000..f7a19c2a7 --- /dev/null +++ b/include/drm/drmP.h @@ -0,0 +1,116 @@ +/* + * Internal Header for the Direct Rendering Manager + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright (c) 2009-2010, Code Aurora Forum. + * All rights reserved. + * + * Author: Rickard E. (Rik) Faith + * Author: Gareth Hughes + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DRM_P_H_ +#define _DRM_P_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct module; + +struct device_node; +struct videomode; +struct reservation_object; +struct dma_buf_attachment; + +struct pci_dev; +struct pci_controller; + +#define DRM_IF_VERSION(maj, min) (maj << 16 | min) + +#define DRM_SWITCH_POWER_ON 0 +#define DRM_SWITCH_POWER_OFF 1 +#define DRM_SWITCH_POWER_CHANGING 2 +#define DRM_SWITCH_POWER_DYNAMIC_OFF 3 + +/* returns true if currently okay to sleep */ +static inline bool drm_can_sleep(void) +{ + if (in_atomic() || in_dbg_master() || irqs_disabled()) + return false; + return true; +} + +/* helper for handling conditionals in various for_each macros */ +#define for_each_if(condition) if (!(condition)) {} else + +#endif diff --git a/include/drm/drm_agpsupport.h b/include/drm/drm_agpsupport.h new file mode 100644 index 000000000..b05e46227 --- /dev/null +++ b/include/drm/drm_agpsupport.h @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DRM_AGPSUPPORT_H_ +#define _DRM_AGPSUPPORT_H_ + +#include +#include +#include +#include +#include +#include +#include + +struct drm_device; +struct drm_file; + +struct drm_agp_head { + struct agp_kern_info agp_info; + struct list_head memory; + unsigned long mode; + struct agp_bridge_data *bridge; + int enabled; + int acquired; + unsigned long base; + int agp_mtrr; + int cant_use_aperture; + unsigned long page_mask; +}; + +#if IS_ENABLED(CONFIG_AGP) + +void drm_free_agp(struct agp_memory * handle, int pages); +int drm_bind_agp(struct agp_memory * handle, unsigned int start); +int drm_unbind_agp(struct agp_memory * handle); +struct agp_memory *drm_agp_bind_pages(struct drm_device *dev, + struct page **pages, + unsigned long num_pages, + uint32_t gtt_offset, + uint32_t type); + +struct drm_agp_head *drm_agp_init(struct drm_device *dev); +void drm_legacy_agp_clear(struct drm_device *dev); +int drm_agp_acquire(struct drm_device *dev); +int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_agp_release(struct drm_device *dev); +int drm_agp_release_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); +int drm_agp_enable_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info); +int drm_agp_info_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); +int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); +int drm_agp_free_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); +int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); +int drm_agp_bind_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +#else /* CONFIG_AGP */ + +static inline void drm_free_agp(struct agp_memory * handle, int pages) +{ +} + +static inline int drm_bind_agp(struct agp_memory * handle, unsigned int start) +{ + return -ENODEV; +} + +static inline int drm_unbind_agp(struct agp_memory * handle) +{ + return -ENODEV; +} + +static inline struct agp_memory *drm_agp_bind_pages(struct drm_device *dev, + struct page **pages, + unsigned long num_pages, + uint32_t gtt_offset, + uint32_t type) +{ + return NULL; +} + +static inline struct drm_agp_head *drm_agp_init(struct drm_device *dev) +{ + return NULL; +} + +static inline void drm_legacy_agp_clear(struct drm_device *dev) +{ +} + +static inline int drm_agp_acquire(struct drm_device *dev) +{ + return -ENODEV; +} + +static inline int drm_agp_release(struct drm_device *dev) +{ + return -ENODEV; +} + +static inline int drm_agp_enable(struct drm_device *dev, + struct drm_agp_mode mode) +{ + return -ENODEV; +} + +static inline int drm_agp_info(struct drm_device *dev, + struct drm_agp_info *info) +{ + return -ENODEV; +} + +static inline int drm_agp_alloc(struct drm_device *dev, + struct drm_agp_buffer *request) +{ + return -ENODEV; +} + +static inline int drm_agp_free(struct drm_device *dev, + struct drm_agp_buffer *request) +{ + return -ENODEV; +} + +static inline int drm_agp_unbind(struct drm_device *dev, + struct drm_agp_binding *request) +{ + return -ENODEV; +} + +static inline int drm_agp_bind(struct drm_device *dev, + struct drm_agp_binding *request) +{ + return -ENODEV; +} + +#endif /* CONFIG_AGP */ + +#endif /* _DRM_AGPSUPPORT_H_ */ diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h new file mode 100644 index 000000000..1e713154f --- /dev/null +++ b/include/drm/drm_atomic.h @@ -0,0 +1,916 @@ +/* + * Copyright (C) 2014 Red Hat + * Copyright (C) 2014 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark + * Daniel Vetter + */ + +#ifndef DRM_ATOMIC_H_ +#define DRM_ATOMIC_H_ + +#include + +/** + * struct drm_crtc_commit - track modeset commits on a CRTC + * + * This structure is used to track pending modeset changes and atomic commit on + * a per-CRTC basis. Since updating the list should never block this structure + * is reference counted to allow waiters to safely wait on an event to complete, + * without holding any locks. + * + * It has 3 different events in total to allow a fine-grained synchronization + * between outstanding updates:: + * + * atomic commit thread hardware + * + * write new state into hardware ----> ... + * signal hw_done + * switch to new state on next + * ... v/hblank + * + * wait for buffers to show up ... + * + * ... send completion irq + * irq handler signals flip_done + * cleanup old buffers + * + * signal cleanup_done + * + * wait for flip_done <---- + * clean up atomic state + * + * The important bit to know is that cleanup_done is the terminal event, but the + * ordering between flip_done and hw_done is entirely up to the specific driver + * and modeset state change. + * + * For an implementation of how to use this look at + * drm_atomic_helper_setup_commit() from the atomic helper library. + */ +struct drm_crtc_commit { + /** + * @crtc: + * + * DRM CRTC for this commit. + */ + struct drm_crtc *crtc; + + /** + * @ref: + * + * Reference count for this structure. Needed to allow blocking on + * completions without the risk of the completion disappearing + * meanwhile. + */ + struct kref ref; + + /** + * @flip_done: + * + * Will be signaled when the hardware has flipped to the new set of + * buffers. Signals at the same time as when the drm event for this + * commit is sent to userspace, or when an out-fence is singalled. Note + * that for most hardware, in most cases this happens after @hw_done is + * signalled. + */ + struct completion flip_done; + + /** + * @hw_done: + * + * Will be signalled when all hw register changes for this commit have + * been written out. Especially when disabling a pipe this can be much + * later than than @flip_done, since that can signal already when the + * screen goes black, whereas to fully shut down a pipe more register + * I/O is required. + * + * Note that this does not need to include separately reference-counted + * resources like backing storage buffer pinning, or runtime pm + * management. + */ + struct completion hw_done; + + /** + * @cleanup_done: + * + * Will be signalled after old buffers have been cleaned up by calling + * drm_atomic_helper_cleanup_planes(). Since this can only happen after + * a vblank wait completed it might be a bit later. This completion is + * useful to throttle updates and avoid hardware updates getting ahead + * of the buffer cleanup too much. + */ + struct completion cleanup_done; + + /** + * @commit_entry: + * + * Entry on the per-CRTC &drm_crtc.commit_list. Protected by + * $drm_crtc.commit_lock. + */ + struct list_head commit_entry; + + /** + * @event: + * + * &drm_pending_vblank_event pointer to clean up private events. + */ + struct drm_pending_vblank_event *event; + + /** + * @abort_completion: + * + * A flag that's set after drm_atomic_helper_setup_commit takes a second + * reference for the completion of $drm_crtc_state.event. It's used by + * the free code to remove the second reference if commit fails. + */ + bool abort_completion; +}; + +struct __drm_planes_state { + struct drm_plane *ptr; + struct drm_plane_state *state, *old_state, *new_state; +}; + +struct __drm_crtcs_state { + struct drm_crtc *ptr; + struct drm_crtc_state *state, *old_state, *new_state; + + /** + * @commit: + * + * A reference to the CRTC commit object that is kept for use by + * drm_atomic_helper_wait_for_flip_done() after + * drm_atomic_helper_commit_hw_done() is called. This ensures that a + * concurrent commit won't free a commit object that is still in use. + */ + struct drm_crtc_commit *commit; + + s32 __user *out_fence_ptr; + u64 last_vblank_count; +}; + +struct __drm_connnectors_state { + struct drm_connector *ptr; + struct drm_connector_state *state, *old_state, *new_state; + /** + * @out_fence_ptr: + * + * User-provided pointer which the kernel uses to return a sync_file + * file descriptor. Used by writeback connectors to signal completion of + * the writeback. + */ + s32 __user *out_fence_ptr; +}; + +struct drm_private_obj; +struct drm_private_state; + +/** + * struct drm_private_state_funcs - atomic state functions for private objects + * + * These hooks are used by atomic helpers to create, swap and destroy states of + * private objects. The structure itself is used as a vtable to identify the + * associated private object type. Each private object type that needs to be + * added to the atomic states is expected to have an implementation of these + * hooks and pass a pointer to it's drm_private_state_funcs struct to + * drm_atomic_get_private_obj_state(). + */ +struct drm_private_state_funcs { + /** + * @atomic_duplicate_state: + * + * Duplicate the current state of the private object and return it. It + * is an error to call this before obj->state has been initialized. + * + * RETURNS: + * + * Duplicated atomic state or NULL when obj->state is not + * initialized or allocation failed. + */ + struct drm_private_state *(*atomic_duplicate_state)(struct drm_private_obj *obj); + + /** + * @atomic_destroy_state: + * + * Frees the private object state created with @atomic_duplicate_state. + */ + void (*atomic_destroy_state)(struct drm_private_obj *obj, + struct drm_private_state *state); +}; + +/** + * struct drm_private_obj - base struct for driver private atomic object + * + * A driver private object is initialized by calling + * drm_atomic_private_obj_init() and cleaned up by calling + * drm_atomic_private_obj_fini(). + * + * Currently only tracks the state update functions and the opaque driver + * private state itself, but in the future might also track which + * &drm_modeset_lock is required to duplicate and update this object's state. + */ +struct drm_private_obj { + /** + * @state: Current atomic state for this driver private object. + */ + struct drm_private_state *state; + + /** + * @funcs: + * + * Functions to manipulate the state of this driver private object, see + * &drm_private_state_funcs. + */ + const struct drm_private_state_funcs *funcs; +}; + +/** + * struct drm_private_state - base struct for driver private object state + * @state: backpointer to global drm_atomic_state + * + * Currently only contains a backpointer to the overall atomic update, but in + * the future also might hold synchronization information similar to e.g. + * &drm_crtc.commit. + */ +struct drm_private_state { + struct drm_atomic_state *state; +}; + +struct __drm_private_objs_state { + struct drm_private_obj *ptr; + struct drm_private_state *state, *old_state, *new_state; +}; + +/** + * struct drm_atomic_state - the global state object for atomic updates + * @ref: count of all references to this state (will not be freed until zero) + * @dev: parent DRM device + * @allow_modeset: allow full modeset + * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics + * @async_update: hint for asynchronous plane update + * @planes: pointer to array of structures with per-plane data + * @crtcs: pointer to array of CRTC pointers + * @num_connector: size of the @connectors and @connector_states arrays + * @connectors: pointer to array of structures with per-connector data + * @num_private_objs: size of the @private_objs array + * @private_objs: pointer to array of private object pointers + * @acquire_ctx: acquire context for this atomic modeset state update + * + * States are added to an atomic update by calling drm_atomic_get_crtc_state(), + * drm_atomic_get_plane_state(), drm_atomic_get_connector_state(), or for + * private state structures, drm_atomic_get_private_obj_state(). + */ +struct drm_atomic_state { + struct kref ref; + + struct drm_device *dev; + bool allow_modeset : 1; + bool legacy_cursor_update : 1; + bool async_update : 1; + struct __drm_planes_state *planes; + struct __drm_crtcs_state *crtcs; + int num_connector; + struct __drm_connnectors_state *connectors; + int num_private_objs; + struct __drm_private_objs_state *private_objs; + + struct drm_modeset_acquire_ctx *acquire_ctx; + + /** + * @fake_commit: + * + * Used for signaling unbound planes/connectors. + * When a connector or plane is not bound to any CRTC, it's still important + * to preserve linearity to prevent the atomic states from being freed to early. + * + * This commit (if set) is not bound to any crtc, but will be completed when + * drm_atomic_helper_commit_hw_done() is called. + */ + struct drm_crtc_commit *fake_commit; + + /** + * @commit_work: + * + * Work item which can be used by the driver or helpers to execute the + * commit without blocking. + */ + struct work_struct commit_work; +}; + +void __drm_crtc_commit_free(struct kref *kref); + +/** + * drm_crtc_commit_get - acquire a reference to the CRTC commit + * @commit: CRTC commit + * + * Increases the reference of @commit. + * + * Returns: + * The pointer to @commit, with reference increased. + */ +static inline struct drm_crtc_commit *drm_crtc_commit_get(struct drm_crtc_commit *commit) +{ + kref_get(&commit->ref); + return commit; +} + +/** + * drm_crtc_commit_put - release a reference to the CRTC commmit + * @commit: CRTC commit + * + * This releases a reference to @commit which is freed after removing the + * final reference. No locking required and callable from any context. + */ +static inline void drm_crtc_commit_put(struct drm_crtc_commit *commit) +{ + kref_put(&commit->ref, __drm_crtc_commit_free); +} + +struct drm_atomic_state * __must_check +drm_atomic_state_alloc(struct drm_device *dev); +void drm_atomic_state_clear(struct drm_atomic_state *state); + +/** + * drm_atomic_state_get - acquire a reference to the atomic state + * @state: The atomic state + * + * Returns a new reference to the @state + */ +static inline struct drm_atomic_state * +drm_atomic_state_get(struct drm_atomic_state *state) +{ + kref_get(&state->ref); + return state; +} + +void __drm_atomic_state_free(struct kref *ref); + +/** + * drm_atomic_state_put - release a reference to the atomic state + * @state: The atomic state + * + * This releases a reference to @state which is freed after removing the + * final reference. No locking required and callable from any context. + */ +static inline void drm_atomic_state_put(struct drm_atomic_state *state) +{ + kref_put(&state->ref, __drm_atomic_state_free); +} + +int __must_check +drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state); +void drm_atomic_state_default_clear(struct drm_atomic_state *state); +void drm_atomic_state_default_release(struct drm_atomic_state *state); + +struct drm_crtc_state * __must_check +drm_atomic_get_crtc_state(struct drm_atomic_state *state, + struct drm_crtc *crtc); +int drm_atomic_crtc_set_property(struct drm_crtc *crtc, + struct drm_crtc_state *state, struct drm_property *property, + uint64_t val); +struct drm_plane_state * __must_check +drm_atomic_get_plane_state(struct drm_atomic_state *state, + struct drm_plane *plane); +struct drm_connector_state * __must_check +drm_atomic_get_connector_state(struct drm_atomic_state *state, + struct drm_connector *connector); + +void drm_atomic_private_obj_init(struct drm_private_obj *obj, + struct drm_private_state *state, + const struct drm_private_state_funcs *funcs); +void drm_atomic_private_obj_fini(struct drm_private_obj *obj); + +struct drm_private_state * __must_check +drm_atomic_get_private_obj_state(struct drm_atomic_state *state, + struct drm_private_obj *obj); + +/** + * drm_atomic_get_existing_crtc_state - get crtc state, if it exists + * @state: global atomic state object + * @crtc: crtc to grab + * + * This function returns the crtc state for the given crtc, or NULL + * if the crtc is not part of the global atomic state. + * + * This function is deprecated, @drm_atomic_get_old_crtc_state or + * @drm_atomic_get_new_crtc_state should be used instead. + */ +static inline struct drm_crtc_state * +drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state, + struct drm_crtc *crtc) +{ + return state->crtcs[drm_crtc_index(crtc)].state; +} + +/** + * drm_atomic_get_old_crtc_state - get old crtc state, if it exists + * @state: global atomic state object + * @crtc: crtc to grab + * + * This function returns the old crtc state for the given crtc, or + * NULL if the crtc is not part of the global atomic state. + */ +static inline struct drm_crtc_state * +drm_atomic_get_old_crtc_state(struct drm_atomic_state *state, + struct drm_crtc *crtc) +{ + return state->crtcs[drm_crtc_index(crtc)].old_state; +} +/** + * drm_atomic_get_new_crtc_state - get new crtc state, if it exists + * @state: global atomic state object + * @crtc: crtc to grab + * + * This function returns the new crtc state for the given crtc, or + * NULL if the crtc is not part of the global atomic state. + */ +static inline struct drm_crtc_state * +drm_atomic_get_new_crtc_state(struct drm_atomic_state *state, + struct drm_crtc *crtc) +{ + return state->crtcs[drm_crtc_index(crtc)].new_state; +} + +/** + * drm_atomic_get_existing_plane_state - get plane state, if it exists + * @state: global atomic state object + * @plane: plane to grab + * + * This function returns the plane state for the given plane, or NULL + * if the plane is not part of the global atomic state. + * + * This function is deprecated, @drm_atomic_get_old_plane_state or + * @drm_atomic_get_new_plane_state should be used instead. + */ +static inline struct drm_plane_state * +drm_atomic_get_existing_plane_state(struct drm_atomic_state *state, + struct drm_plane *plane) +{ + return state->planes[drm_plane_index(plane)].state; +} + +/** + * drm_atomic_get_old_plane_state - get plane state, if it exists + * @state: global atomic state object + * @plane: plane to grab + * + * This function returns the old plane state for the given plane, or + * NULL if the plane is not part of the global atomic state. + */ +static inline struct drm_plane_state * +drm_atomic_get_old_plane_state(struct drm_atomic_state *state, + struct drm_plane *plane) +{ + return state->planes[drm_plane_index(plane)].old_state; +} + +/** + * drm_atomic_get_new_plane_state - get plane state, if it exists + * @state: global atomic state object + * @plane: plane to grab + * + * This function returns the new plane state for the given plane, or + * NULL if the plane is not part of the global atomic state. + */ +static inline struct drm_plane_state * +drm_atomic_get_new_plane_state(struct drm_atomic_state *state, + struct drm_plane *plane) +{ + return state->planes[drm_plane_index(plane)].new_state; +} + +/** + * drm_atomic_get_existing_connector_state - get connector state, if it exists + * @state: global atomic state object + * @connector: connector to grab + * + * This function returns the connector state for the given connector, + * or NULL if the connector is not part of the global atomic state. + * + * This function is deprecated, @drm_atomic_get_old_connector_state or + * @drm_atomic_get_new_connector_state should be used instead. + */ +static inline struct drm_connector_state * +drm_atomic_get_existing_connector_state(struct drm_atomic_state *state, + struct drm_connector *connector) +{ + int index = drm_connector_index(connector); + + if (index >= state->num_connector) + return NULL; + + return state->connectors[index].state; +} + +/** + * drm_atomic_get_old_connector_state - get connector state, if it exists + * @state: global atomic state object + * @connector: connector to grab + * + * This function returns the old connector state for the given connector, + * or NULL if the connector is not part of the global atomic state. + */ +static inline struct drm_connector_state * +drm_atomic_get_old_connector_state(struct drm_atomic_state *state, + struct drm_connector *connector) +{ + int index = drm_connector_index(connector); + + if (index >= state->num_connector) + return NULL; + + return state->connectors[index].old_state; +} + +/** + * drm_atomic_get_new_connector_state - get connector state, if it exists + * @state: global atomic state object + * @connector: connector to grab + * + * This function returns the new connector state for the given connector, + * or NULL if the connector is not part of the global atomic state. + */ +static inline struct drm_connector_state * +drm_atomic_get_new_connector_state(struct drm_atomic_state *state, + struct drm_connector *connector) +{ + int index = drm_connector_index(connector); + + if (index >= state->num_connector) + return NULL; + + return state->connectors[index].new_state; +} + +/** + * __drm_atomic_get_current_plane_state - get current plane state + * @state: global atomic state object + * @plane: plane to grab + * + * This function returns the plane state for the given plane, either from + * @state, or if the plane isn't part of the atomic state update, from @plane. + * This is useful in atomic check callbacks, when drivers need to peek at, but + * not change, state of other planes, since it avoids threading an error code + * back up the call chain. + * + * WARNING: + * + * Note that this function is in general unsafe since it doesn't check for the + * required locking for access state structures. Drivers must ensure that it is + * safe to access the returned state structure through other means. One common + * example is when planes are fixed to a single CRTC, and the driver knows that + * the CRTC lock is held already. In that case holding the CRTC lock gives a + * read-lock on all planes connected to that CRTC. But if planes can be + * reassigned things get more tricky. In that case it's better to use + * drm_atomic_get_plane_state and wire up full error handling. + * + * Returns: + * + * Read-only pointer to the current plane state. + */ +static inline const struct drm_plane_state * +__drm_atomic_get_current_plane_state(struct drm_atomic_state *state, + struct drm_plane *plane) +{ + if (state->planes[drm_plane_index(plane)].state) + return state->planes[drm_plane_index(plane)].state; + + return plane->state; +} + +int __must_check +drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state, + const struct drm_display_mode *mode); +int __must_check +drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, + struct drm_property_blob *blob); +int __must_check +drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, + struct drm_crtc *crtc); +void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, + struct drm_framebuffer *fb); +void drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state, + struct dma_fence *fence); +int __must_check +drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, + struct drm_crtc *crtc); +int drm_atomic_set_writeback_fb_for_connector( + struct drm_connector_state *conn_state, + struct drm_framebuffer *fb); +int __must_check +drm_atomic_add_affected_connectors(struct drm_atomic_state *state, + struct drm_crtc *crtc); +int __must_check +drm_atomic_add_affected_planes(struct drm_atomic_state *state, + struct drm_crtc *crtc); + +int __must_check drm_atomic_check_only(struct drm_atomic_state *state); +int __must_check drm_atomic_commit(struct drm_atomic_state *state); +int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state); + +void drm_state_dump(struct drm_device *dev, struct drm_printer *p); + +/** + * for_each_oldnew_connector_in_state - iterate over all connectors in an atomic update + * @__state: &struct drm_atomic_state pointer + * @connector: &struct drm_connector iteration cursor + * @old_connector_state: &struct drm_connector_state iteration cursor for the + * old state + * @new_connector_state: &struct drm_connector_state iteration cursor for the + * new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all connectors in an atomic update, tracking both old and + * new state. This is useful in places where the state delta needs to be + * considered, for example in atomic check functions. + */ +#define for_each_oldnew_connector_in_state(__state, connector, old_connector_state, new_connector_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_connector; \ + (__i)++) \ + for_each_if ((__state)->connectors[__i].ptr && \ + ((connector) = (__state)->connectors[__i].ptr, \ + (old_connector_state) = (__state)->connectors[__i].old_state, \ + (new_connector_state) = (__state)->connectors[__i].new_state, 1)) + +/** + * for_each_old_connector_in_state - iterate over all connectors in an atomic update + * @__state: &struct drm_atomic_state pointer + * @connector: &struct drm_connector iteration cursor + * @old_connector_state: &struct drm_connector_state iteration cursor for the + * old state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all connectors in an atomic update, tracking only the old + * state. This is useful in disable functions, where we need the old state the + * hardware is still in. + */ +#define for_each_old_connector_in_state(__state, connector, old_connector_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_connector; \ + (__i)++) \ + for_each_if ((__state)->connectors[__i].ptr && \ + ((connector) = (__state)->connectors[__i].ptr, \ + (old_connector_state) = (__state)->connectors[__i].old_state, 1)) + +/** + * for_each_new_connector_in_state - iterate over all connectors in an atomic update + * @__state: &struct drm_atomic_state pointer + * @connector: &struct drm_connector iteration cursor + * @new_connector_state: &struct drm_connector_state iteration cursor for the + * new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all connectors in an atomic update, tracking only the new + * state. This is useful in enable functions, where we need the new state the + * hardware should be in when the atomic commit operation has completed. + */ +#define for_each_new_connector_in_state(__state, connector, new_connector_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_connector; \ + (__i)++) \ + for_each_if ((__state)->connectors[__i].ptr && \ + ((connector) = (__state)->connectors[__i].ptr, \ + (new_connector_state) = (__state)->connectors[__i].new_state, 1)) + +/** + * for_each_oldnew_crtc_in_state - iterate over all CRTCs in an atomic update + * @__state: &struct drm_atomic_state pointer + * @crtc: &struct drm_crtc iteration cursor + * @old_crtc_state: &struct drm_crtc_state iteration cursor for the old state + * @new_crtc_state: &struct drm_crtc_state iteration cursor for the new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all CRTCs in an atomic update, tracking both old and + * new state. This is useful in places where the state delta needs to be + * considered, for example in atomic check functions. + */ +#define for_each_oldnew_crtc_in_state(__state, crtc, old_crtc_state, new_crtc_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->dev->mode_config.num_crtc; \ + (__i)++) \ + for_each_if ((__state)->crtcs[__i].ptr && \ + ((crtc) = (__state)->crtcs[__i].ptr, \ + (old_crtc_state) = (__state)->crtcs[__i].old_state, \ + (new_crtc_state) = (__state)->crtcs[__i].new_state, 1)) + +/** + * for_each_old_crtc_in_state - iterate over all CRTCs in an atomic update + * @__state: &struct drm_atomic_state pointer + * @crtc: &struct drm_crtc iteration cursor + * @old_crtc_state: &struct drm_crtc_state iteration cursor for the old state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all CRTCs in an atomic update, tracking only the old + * state. This is useful in disable functions, where we need the old state the + * hardware is still in. + */ +#define for_each_old_crtc_in_state(__state, crtc, old_crtc_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->dev->mode_config.num_crtc; \ + (__i)++) \ + for_each_if ((__state)->crtcs[__i].ptr && \ + ((crtc) = (__state)->crtcs[__i].ptr, \ + (old_crtc_state) = (__state)->crtcs[__i].old_state, 1)) + +/** + * for_each_new_crtc_in_state - iterate over all CRTCs in an atomic update + * @__state: &struct drm_atomic_state pointer + * @crtc: &struct drm_crtc iteration cursor + * @new_crtc_state: &struct drm_crtc_state iteration cursor for the new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all CRTCs in an atomic update, tracking only the new + * state. This is useful in enable functions, where we need the new state the + * hardware should be in when the atomic commit operation has completed. + */ +#define for_each_new_crtc_in_state(__state, crtc, new_crtc_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->dev->mode_config.num_crtc; \ + (__i)++) \ + for_each_if ((__state)->crtcs[__i].ptr && \ + ((crtc) = (__state)->crtcs[__i].ptr, \ + (new_crtc_state) = (__state)->crtcs[__i].new_state, 1)) + +/** + * for_each_oldnew_plane_in_state - iterate over all planes in an atomic update + * @__state: &struct drm_atomic_state pointer + * @plane: &struct drm_plane iteration cursor + * @old_plane_state: &struct drm_plane_state iteration cursor for the old state + * @new_plane_state: &struct drm_plane_state iteration cursor for the new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all planes in an atomic update, tracking both old and + * new state. This is useful in places where the state delta needs to be + * considered, for example in atomic check functions. + */ +#define for_each_oldnew_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->dev->mode_config.num_total_plane; \ + (__i)++) \ + for_each_if ((__state)->planes[__i].ptr && \ + ((plane) = (__state)->planes[__i].ptr, \ + (old_plane_state) = (__state)->planes[__i].old_state,\ + (new_plane_state) = (__state)->planes[__i].new_state, 1)) + +/** + * for_each_oldnew_plane_in_state_reverse - iterate over all planes in an atomic + * update in reverse order + * @__state: &struct drm_atomic_state pointer + * @plane: &struct drm_plane iteration cursor + * @old_plane_state: &struct drm_plane_state iteration cursor for the old state + * @new_plane_state: &struct drm_plane_state iteration cursor for the new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all planes in an atomic update in reverse order, + * tracking both old and new state. This is useful in places where the + * state delta needs to be considered, for example in atomic check functions. + */ +#define for_each_oldnew_plane_in_state_reverse(__state, plane, old_plane_state, new_plane_state, __i) \ + for ((__i) = ((__state)->dev->mode_config.num_total_plane - 1); \ + (__i) >= 0; \ + (__i)--) \ + for_each_if ((__state)->planes[__i].ptr && \ + ((plane) = (__state)->planes[__i].ptr, \ + (old_plane_state) = (__state)->planes[__i].old_state,\ + (new_plane_state) = (__state)->planes[__i].new_state, 1)) + +/** + * for_each_old_plane_in_state - iterate over all planes in an atomic update + * @__state: &struct drm_atomic_state pointer + * @plane: &struct drm_plane iteration cursor + * @old_plane_state: &struct drm_plane_state iteration cursor for the old state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all planes in an atomic update, tracking only the old + * state. This is useful in disable functions, where we need the old state the + * hardware is still in. + */ +#define for_each_old_plane_in_state(__state, plane, old_plane_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->dev->mode_config.num_total_plane; \ + (__i)++) \ + for_each_if ((__state)->planes[__i].ptr && \ + ((plane) = (__state)->planes[__i].ptr, \ + (old_plane_state) = (__state)->planes[__i].old_state, 1)) +/** + * for_each_new_plane_in_state - iterate over all planes in an atomic update + * @__state: &struct drm_atomic_state pointer + * @plane: &struct drm_plane iteration cursor + * @new_plane_state: &struct drm_plane_state iteration cursor for the new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all planes in an atomic update, tracking only the new + * state. This is useful in enable functions, where we need the new state the + * hardware should be in when the atomic commit operation has completed. + */ +#define for_each_new_plane_in_state(__state, plane, new_plane_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->dev->mode_config.num_total_plane; \ + (__i)++) \ + for_each_if ((__state)->planes[__i].ptr && \ + ((plane) = (__state)->planes[__i].ptr, \ + (new_plane_state) = (__state)->planes[__i].new_state, 1)) + +/** + * for_each_oldnew_private_obj_in_state - iterate over all private objects in an atomic update + * @__state: &struct drm_atomic_state pointer + * @obj: &struct drm_private_obj iteration cursor + * @old_obj_state: &struct drm_private_state iteration cursor for the old state + * @new_obj_state: &struct drm_private_state iteration cursor for the new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all private objects in an atomic update, tracking both + * old and new state. This is useful in places where the state delta needs + * to be considered, for example in atomic check functions. + */ +#define for_each_oldnew_private_obj_in_state(__state, obj, old_obj_state, new_obj_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_private_objs && \ + ((obj) = (__state)->private_objs[__i].ptr, \ + (old_obj_state) = (__state)->private_objs[__i].old_state, \ + (new_obj_state) = (__state)->private_objs[__i].new_state, 1); \ + (__i)++) + +/** + * for_each_old_private_obj_in_state - iterate over all private objects in an atomic update + * @__state: &struct drm_atomic_state pointer + * @obj: &struct drm_private_obj iteration cursor + * @old_obj_state: &struct drm_private_state iteration cursor for the old state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all private objects in an atomic update, tracking only + * the old state. This is useful in disable functions, where we need the old + * state the hardware is still in. + */ +#define for_each_old_private_obj_in_state(__state, obj, old_obj_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_private_objs && \ + ((obj) = (__state)->private_objs[__i].ptr, \ + (old_obj_state) = (__state)->private_objs[__i].old_state, 1); \ + (__i)++) + +/** + * for_each_new_private_obj_in_state - iterate over all private objects in an atomic update + * @__state: &struct drm_atomic_state pointer + * @obj: &struct drm_private_obj iteration cursor + * @new_obj_state: &struct drm_private_state iteration cursor for the new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all private objects in an atomic update, tracking only + * the new state. This is useful in enable functions, where we need the new state the + * hardware should be in when the atomic commit operation has completed. + */ +#define for_each_new_private_obj_in_state(__state, obj, new_obj_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_private_objs && \ + ((obj) = (__state)->private_objs[__i].ptr, \ + (new_obj_state) = (__state)->private_objs[__i].new_state, 1); \ + (__i)++) + +/** + * drm_atomic_crtc_needs_modeset - compute combined modeset need + * @state: &drm_crtc_state for the CRTC + * + * To give drivers flexibility &struct drm_crtc_state has 3 booleans to track + * whether the state CRTC changed enough to need a full modeset cycle: + * mode_changed, active_changed and connectors_changed. This helper simply + * combines these three to compute the overall need for a modeset for @state. + * + * The atomic helper code sets these booleans, but drivers can and should + * change them appropriately to accurately represent whether a modeset is + * really needed. In general, drivers should avoid full modesets whenever + * possible. + * + * For example if the CRTC mode has changed, and the hardware is able to enact + * the requested mode change without going through a full modeset, the driver + * should clear mode_changed in its &drm_mode_config_funcs.atomic_check + * implementation. + */ +static inline bool +drm_atomic_crtc_needs_modeset(const struct drm_crtc_state *state) +{ + return state->mode_changed || state->active_changed || + state->connectors_changed; +} + +#endif /* DRM_ATOMIC_H_ */ diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h new file mode 100644 index 000000000..99e2a5297 --- /dev/null +++ b/include/drm/drm_atomic_helper.h @@ -0,0 +1,265 @@ +/* + * Copyright (C) 2014 Red Hat + * Copyright (C) 2014 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark + * Daniel Vetter + */ + +#ifndef DRM_ATOMIC_HELPER_H_ +#define DRM_ATOMIC_HELPER_H_ + +#include +#include +#include + +struct drm_atomic_state; +struct drm_private_obj; +struct drm_private_state; + +int drm_atomic_helper_check_modeset(struct drm_device *dev, + struct drm_atomic_state *state); +int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, + const struct drm_crtc_state *crtc_state, + int min_scale, + int max_scale, + bool can_position, + bool can_update_disabled); +int drm_atomic_helper_check_planes(struct drm_device *dev, + struct drm_atomic_state *state); +int drm_atomic_helper_check(struct drm_device *dev, + struct drm_atomic_state *state); +void drm_atomic_helper_commit_tail(struct drm_atomic_state *state); +void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *state); +int drm_atomic_helper_commit(struct drm_device *dev, + struct drm_atomic_state *state, + bool nonblock); +int drm_atomic_helper_async_check(struct drm_device *dev, + struct drm_atomic_state *state); +void drm_atomic_helper_async_commit(struct drm_device *dev, + struct drm_atomic_state *state); + +int drm_atomic_helper_wait_for_fences(struct drm_device *dev, + struct drm_atomic_state *state, + bool pre_swap); + +void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, + struct drm_atomic_state *old_state); + +void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev, + struct drm_atomic_state *old_state); + +void +drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev, + struct drm_atomic_state *old_state); + +void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev, + struct drm_atomic_state *state); +void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, + struct drm_atomic_state *old_state); + +int drm_atomic_helper_prepare_planes(struct drm_device *dev, + struct drm_atomic_state *state); + +#define DRM_PLANE_COMMIT_ACTIVE_ONLY BIT(0) +#define DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET BIT(1) + +void drm_atomic_helper_commit_planes(struct drm_device *dev, + struct drm_atomic_state *state, + uint32_t flags); +void drm_atomic_helper_cleanup_planes(struct drm_device *dev, + struct drm_atomic_state *old_state); +void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state); +void +drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state, + bool atomic); + +int __must_check drm_atomic_helper_swap_state(struct drm_atomic_state *state, + bool stall); + +/* nonblocking commit helpers */ +int drm_atomic_helper_setup_commit(struct drm_atomic_state *state, + bool nonblock); +void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state); +void drm_atomic_helper_fake_vblank(struct drm_atomic_state *state); +void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state); +void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state); + +/* implementations for legacy interfaces */ +int drm_atomic_helper_update_plane(struct drm_plane *plane, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h, + struct drm_modeset_acquire_ctx *ctx); +int drm_atomic_helper_disable_plane(struct drm_plane *plane, + struct drm_modeset_acquire_ctx *ctx); +int __drm_atomic_helper_disable_plane(struct drm_plane *plane, + struct drm_plane_state *plane_state); +int drm_atomic_helper_set_config(struct drm_mode_set *set, + struct drm_modeset_acquire_ctx *ctx); +int __drm_atomic_helper_set_config(struct drm_mode_set *set, + struct drm_atomic_state *state); + +int drm_atomic_helper_disable_all(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx); +void drm_atomic_helper_shutdown(struct drm_device *dev); +struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev); +int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state, + struct drm_modeset_acquire_ctx *ctx); +int drm_atomic_helper_resume(struct drm_device *dev, + struct drm_atomic_state *state); + +int drm_atomic_helper_page_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags, + struct drm_modeset_acquire_ctx *ctx); +int drm_atomic_helper_page_flip_target( + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags, + uint32_t target, + struct drm_modeset_acquire_ctx *ctx); +struct drm_encoder * +drm_atomic_helper_best_encoder(struct drm_connector *connector); + +/* default implementations for state handling */ +void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc); +void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc, + struct drm_crtc_state *state); +struct drm_crtc_state * +drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc); +void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state); +void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state); + +void drm_atomic_helper_plane_reset(struct drm_plane *plane); +void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane, + struct drm_plane_state *state); +struct drm_plane_state * +drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane); +void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state); +void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state); + +void __drm_atomic_helper_connector_reset(struct drm_connector *connector, + struct drm_connector_state *conn_state); +void drm_atomic_helper_connector_reset(struct drm_connector *connector); +void +__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector, + struct drm_connector_state *state); +struct drm_connector_state * +drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector); +struct drm_atomic_state * +drm_atomic_helper_duplicate_state(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx); +void +__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state); +void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector, + struct drm_connector_state *state); +int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, + u16 *red, u16 *green, u16 *blue, + uint32_t size, + struct drm_modeset_acquire_ctx *ctx); +void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj, + struct drm_private_state *state); + +/** + * drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC + * @plane: the loop cursor + * @crtc: the crtc whose planes are iterated + * + * This iterates over the current state, useful (for example) when applying + * atomic state after it has been checked and swapped. To iterate over the + * planes which *will* be attached (more useful in code called from + * &drm_mode_config_funcs.atomic_check) see + * drm_atomic_crtc_state_for_each_plane(). + */ +#define drm_atomic_crtc_for_each_plane(plane, crtc) \ + drm_for_each_plane_mask(plane, (crtc)->dev, (crtc)->state->plane_mask) + +/** + * drm_crtc_atomic_state_for_each_plane - iterate over attached planes in new state + * @plane: the loop cursor + * @crtc_state: the incoming crtc-state + * + * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be + * attached if the specified state is applied. Useful during for example + * in code called from &drm_mode_config_funcs.atomic_check operations, to + * validate the incoming state. + */ +#define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \ + drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask) + +/** + * drm_crtc_atomic_state_for_each_plane_state - iterate over attached planes in new state + * @plane: the loop cursor + * @plane_state: loop cursor for the plane's state, must be const + * @crtc_state: the incoming crtc-state + * + * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be + * attached if the specified state is applied. Useful during for example + * in code called from &drm_mode_config_funcs.atomic_check operations, to + * validate the incoming state. + * + * Compared to just drm_atomic_crtc_state_for_each_plane() this also fills in a + * const plane_state. This is useful when a driver just wants to peek at other + * active planes on this crtc, but does not need to change it. + */ +#define drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) \ + drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask) \ + for_each_if ((plane_state = \ + __drm_atomic_get_current_plane_state((crtc_state)->state, \ + plane))) + +/** + * drm_atomic_plane_disabling - check whether a plane is being disabled + * @old_plane_state: old atomic plane state + * @new_plane_state: new atomic plane state + * + * Checks the atomic state of a plane to determine whether it's being disabled + * or not. This also WARNs if it detects an invalid state (both CRTC and FB + * need to either both be NULL or both be non-NULL). + * + * RETURNS: + * True if the plane is being disabled, false otherwise. + */ +static inline bool +drm_atomic_plane_disabling(struct drm_plane_state *old_plane_state, + struct drm_plane_state *new_plane_state) +{ + /* + * When disabling a plane, CRTC and FB should always be NULL together. + * Anything else should be considered a bug in the atomic core, so we + * gently warn about it. + */ + WARN_ON((new_plane_state->crtc == NULL && new_plane_state->fb != NULL) || + (new_plane_state->crtc != NULL && new_plane_state->fb == NULL)); + + return old_plane_state->crtc && !new_plane_state->crtc; +} + +#endif /* DRM_ATOMIC_HELPER_H_ */ diff --git a/include/drm/drm_audio_component.h b/include/drm/drm_audio_component.h new file mode 100644 index 000000000..4923b0032 --- /dev/null +++ b/include/drm/drm_audio_component.h @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: MIT +// Copyright © 2014 Intel Corporation + +#ifndef _DRM_AUDIO_COMPONENT_H_ +#define _DRM_AUDIO_COMPONENT_H_ + +struct drm_audio_component; + +/** + * struct drm_audio_component_ops - Ops implemented by DRM driver, called by hda driver + */ +struct drm_audio_component_ops { + /** + * @owner: drm module to pin down + */ + struct module *owner; + /** + * @get_power: get the POWER_DOMAIN_AUDIO power well + * + * Request the power well to be turned on. + */ + void (*get_power)(struct device *); + /** + * @put_power: put the POWER_DOMAIN_AUDIO power well + * + * Allow the power well to be turned off. + */ + void (*put_power)(struct device *); + /** + * @codec_wake_override: Enable/disable codec wake signal + */ + void (*codec_wake_override)(struct device *, bool enable); + /** + * @get_cdclk_freq: Get the Core Display Clock in kHz + */ + int (*get_cdclk_freq)(struct device *); + /** + * @sync_audio_rate: set n/cts based on the sample rate + * + * Called from audio driver. After audio driver sets the + * sample rate, it will call this function to set n/cts + */ + int (*sync_audio_rate)(struct device *, int port, int pipe, int rate); + /** + * @get_eld: fill the audio state and ELD bytes for the given port + * + * Called from audio driver to get the HDMI/DP audio state of the given + * digital port, and also fetch ELD bytes to the given pointer. + * + * It returns the byte size of the original ELD (not the actually + * copied size), zero for an invalid ELD, or a negative error code. + * + * Note that the returned size may be over @max_bytes. Then it + * implies that only a part of ELD has been copied to the buffer. + */ + int (*get_eld)(struct device *, int port, int pipe, bool *enabled, + unsigned char *buf, int max_bytes); +}; + +/** + * struct drm_audio_component_audio_ops - Ops implemented by hda driver, called by DRM driver + */ +struct drm_audio_component_audio_ops { + /** + * @audio_ptr: Pointer to be used in call to pin_eld_notify + */ + void *audio_ptr; + /** + * @pin_eld_notify: Notify the HDA driver that pin sense and/or ELD information has changed + * + * Called when the DRM driver has set up audio pipeline or has just + * begun to tear it down. This allows the HDA driver to update its + * status accordingly (even when the HDA controller is in power save + * mode). + */ + void (*pin_eld_notify)(void *audio_ptr, int port, int pipe); + /** + * @pin2port: Check and convert from pin node to port number + * + * Called by HDA driver to check and convert from the pin widget node + * number to a port number in the graphics side. + */ + int (*pin2port)(void *audio_ptr, int pin); + /** + * @master_bind: (Optional) component master bind callback + * + * Called at binding master component, for HDA codec-specific + * handling of dynamic binding. + */ + int (*master_bind)(struct device *dev, struct drm_audio_component *); + /** + * @master_unbind: (Optional) component master unbind callback + * + * Called at unbinding master component, for HDA codec-specific + * handling of dynamic unbinding. + */ + void (*master_unbind)(struct device *dev, struct drm_audio_component *); +}; + +/** + * struct drm_audio_component - Used for direct communication between DRM and hda drivers + */ +struct drm_audio_component { + /** + * @dev: DRM device, used as parameter for ops + */ + struct device *dev; + /** + * @ops: Ops implemented by DRM driver, called by hda driver + */ + const struct drm_audio_component_ops *ops; + /** + * @audio_ops: Ops implemented by hda driver, called by DRM driver + */ + const struct drm_audio_component_audio_ops *audio_ops; +}; + +#endif /* _DRM_AUDIO_COMPONENT_H_ */ diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h new file mode 100644 index 000000000..86bff9841 --- /dev/null +++ b/include/drm/drm_auth.h @@ -0,0 +1,106 @@ +/* + * Internal Header for the Direct Rendering Manager + * + * Copyright 2016 Intel Corporation + * + * Author: Daniel Vetter + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DRM_AUTH_H_ +#define _DRM_AUTH_H_ + +/* + * Legacy DRI1 locking data structure. Only here instead of in drm_legacy.h for + * include ordering reasons. + * + * DO NOT USE. + */ +struct drm_lock_data { + struct drm_hw_lock *hw_lock; + struct drm_file *file_priv; + wait_queue_head_t lock_queue; + unsigned long lock_time; + spinlock_t spinlock; + uint32_t kernel_waiters; + uint32_t user_waiters; + int idle_has_lock; +}; + +/** + * struct drm_master - drm master structure + * + * @refcount: Refcount for this master object. + * @dev: Link back to the DRM device + * @lock: DRI1 lock information. + * @driver_priv: Pointer to driver-private information. + * @lessor: Lease holder + * @lessee_id: id for lessees. Owners always have id 0 + * @lessee_list: other lessees of the same master + * @lessees: drm_masters leasing from this one + * @leases: Objects leased to this drm_master. + * @lessee_idr: All lessees under this owner (only used where lessor == NULL) + * + * Note that master structures are only relevant for the legacy/primary device + * nodes, hence there can only be one per device, not one per drm_minor. + */ +struct drm_master { + struct kref refcount; + struct drm_device *dev; + /** + * @unique: Unique identifier: e.g. busid. Protected by + * &drm_device.master_mutex. + */ + char *unique; + /** + * @unique_len: Length of unique field. Protected by + * &drm_device.master_mutex. + */ + int unique_len; + /** + * @magic_map: Map of used authentication tokens. Protected by + * &drm_device.master_mutex. + */ + struct idr magic_map; + struct drm_lock_data lock; + void *driver_priv; + + /* Tree of display resource leases, each of which is a drm_master struct + * All of these get activated simultaneously, so drm_device master points + * at the top of the tree (for which lessor is NULL). Protected by + * &drm_device.mode_config.idr_mutex. + */ + + struct drm_master *lessor; + int lessee_id; + struct list_head lessee_list; + struct list_head lessees; + struct idr leases; + struct idr lessee_idr; +}; + +struct drm_master *drm_master_get(struct drm_master *master); +void drm_master_put(struct drm_master **master); +bool drm_is_current_master(struct drm_file *fpriv); + +struct drm_master *drm_master_create(struct drm_device *dev); + +#endif diff --git a/include/drm/drm_blend.h b/include/drm/drm_blend.h new file mode 100644 index 000000000..330c561c4 --- /dev/null +++ b/include/drm/drm_blend.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_BLEND_H__ +#define __DRM_BLEND_H__ + +#include +#include +#include + +struct drm_device; +struct drm_atomic_state; +struct drm_plane; + +static inline bool drm_rotation_90_or_270(unsigned int rotation) +{ + return rotation & (DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270); +} + +#define DRM_BLEND_ALPHA_OPAQUE 0xffff + +int drm_plane_create_alpha_property(struct drm_plane *plane); +int drm_plane_create_rotation_property(struct drm_plane *plane, + unsigned int rotation, + unsigned int supported_rotations); +unsigned int drm_rotation_simplify(unsigned int rotation, + unsigned int supported_rotations); + +int drm_plane_create_zpos_property(struct drm_plane *plane, + unsigned int zpos, + unsigned int min, unsigned int max); +int drm_plane_create_zpos_immutable_property(struct drm_plane *plane, + unsigned int zpos); +int drm_atomic_normalize_zpos(struct drm_device *dev, + struct drm_atomic_state *state); +#endif diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h new file mode 100644 index 000000000..bd850747c --- /dev/null +++ b/include/drm/drm_bridge.h @@ -0,0 +1,327 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_BRIDGE_H__ +#define __DRM_BRIDGE_H__ + +#include +#include +#include +#include + +struct drm_bridge; +struct drm_bridge_timings; +struct drm_panel; + +/** + * struct drm_bridge_funcs - drm_bridge control functions + */ +struct drm_bridge_funcs { + /** + * @attach: + * + * This callback is invoked whenever our bridge is being attached to a + * &drm_encoder. + * + * The attach callback is optional. + * + * RETURNS: + * + * Zero on success, error code on failure. + */ + int (*attach)(struct drm_bridge *bridge); + + /** + * @detach: + * + * This callback is invoked whenever our bridge is being detached from a + * &drm_encoder. + * + * The detach callback is optional. + */ + void (*detach)(struct drm_bridge *bridge); + + /** + * @mode_valid: + * + * This callback is used to check if a specific mode is valid in this + * bridge. This should be implemented if the bridge has some sort of + * restriction in the modes it can display. For example, a given bridge + * may be responsible to set a clock value. If the clock can not + * produce all the values for the available modes then this callback + * can be used to restrict the number of modes to only the ones that + * can be displayed. + * + * This hook is used by the probe helpers to filter the mode list in + * drm_helper_probe_single_connector_modes(), and it is used by the + * atomic helpers to validate modes supplied by userspace in + * drm_atomic_helper_check_modeset(). + * + * This function is optional. + * + * NOTE: + * + * Since this function is both called from the check phase of an atomic + * commit, and the mode validation in the probe paths it is not allowed + * to look at anything else but the passed-in mode, and validate it + * against configuration-invariant hardward constraints. Any further + * limits which depend upon the configuration can only be checked in + * @mode_fixup. + * + * RETURNS: + * + * drm_mode_status Enum + */ + enum drm_mode_status (*mode_valid)(struct drm_bridge *bridge, + const struct drm_display_mode *mode); + + /** + * @mode_fixup: + * + * This callback is used to validate and adjust a mode. The parameter + * mode is the display mode that should be fed to the next element in + * the display chain, either the final &drm_connector or the next + * &drm_bridge. The parameter adjusted_mode is the input mode the bridge + * requires. It can be modified by this callback and does not need to + * match mode. See also &drm_crtc_state.adjusted_mode for more details. + * + * This is the only hook that allows a bridge to reject a modeset. If + * this function passes all other callbacks must succeed for this + * configuration. + * + * The mode_fixup callback is optional. + * + * NOTE: + * + * This function is called in the check phase of atomic modesets, which + * can be aborted for any reason (including on userspace's request to + * just check whether a configuration would be possible). Drivers MUST + * NOT touch any persistent state (hardware or software) or data + * structures except the passed in @state parameter. + * + * Also beware that userspace can request its own custom modes, neither + * core nor helpers filter modes to the list of probe modes reported by + * the GETCONNECTOR IOCTL and stored in &drm_connector.modes. To ensure + * that modes are filtered consistently put any bridge constraints and + * limits checks into @mode_valid. + * + * RETURNS: + * + * True if an acceptable configuration is possible, false if the modeset + * operation should be rejected. + */ + bool (*mode_fixup)(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + /** + * @disable: + * + * This callback should disable the bridge. It is called right before + * the preceding element in the display pipe is disabled. If the + * preceding element is a bridge this means it's called before that + * bridge's @disable vfunc. If the preceding element is a &drm_encoder + * it's called right before the &drm_encoder_helper_funcs.disable, + * &drm_encoder_helper_funcs.prepare or &drm_encoder_helper_funcs.dpms + * hook. + * + * The bridge can assume that the display pipe (i.e. clocks and timing + * signals) feeding it is still running when this callback is called. + * + * The disable callback is optional. + */ + void (*disable)(struct drm_bridge *bridge); + + /** + * @post_disable: + * + * This callback should disable the bridge. It is called right after the + * preceding element in the display pipe is disabled. If the preceding + * element is a bridge this means it's called after that bridge's + * @post_disable function. If the preceding element is a &drm_encoder + * it's called right after the encoder's + * &drm_encoder_helper_funcs.disable, &drm_encoder_helper_funcs.prepare + * or &drm_encoder_helper_funcs.dpms hook. + * + * The bridge must assume that the display pipe (i.e. clocks and timing + * singals) feeding it is no longer running when this callback is + * called. + * + * The post_disable callback is optional. + */ + void (*post_disable)(struct drm_bridge *bridge); + + /** + * @mode_set: + * + * This callback should set the given mode on the bridge. It is called + * after the @mode_set callback for the preceding element in the display + * pipeline has been called already. If the bridge is the first element + * then this would be &drm_encoder_helper_funcs.mode_set. The display + * pipe (i.e. clocks and timing signals) is off when this function is + * called. + * + * The adjusted_mode parameter is the mode output by the CRTC for the + * first bridge in the chain. It can be different from the mode + * parameter that contains the desired mode for the connector at the end + * of the bridges chain, for instance when the first bridge in the chain + * performs scaling. The adjusted mode is mostly useful for the first + * bridge in the chain and is likely irrelevant for the other bridges. + * + * For atomic drivers the adjusted_mode is the mode stored in + * &drm_crtc_state.adjusted_mode. + * + * NOTE: + * + * If a need arises to store and access modes adjusted for other + * locations than the connection between the CRTC and the first bridge, + * the DRM framework will have to be extended with DRM bridge states. + */ + void (*mode_set)(struct drm_bridge *bridge, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + /** + * @pre_enable: + * + * This callback should enable the bridge. It is called right before + * the preceding element in the display pipe is enabled. If the + * preceding element is a bridge this means it's called before that + * bridge's @pre_enable function. If the preceding element is a + * &drm_encoder it's called right before the encoder's + * &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or + * &drm_encoder_helper_funcs.dpms hook. + * + * The display pipe (i.e. clocks and timing signals) feeding this bridge + * will not yet be running when this callback is called. The bridge must + * not enable the display link feeding the next bridge in the chain (if + * there is one) when this callback is called. + * + * The pre_enable callback is optional. + */ + void (*pre_enable)(struct drm_bridge *bridge); + + /** + * @enable: + * + * This callback should enable the bridge. It is called right after + * the preceding element in the display pipe is enabled. If the + * preceding element is a bridge this means it's called after that + * bridge's @enable function. If the preceding element is a + * &drm_encoder it's called right after the encoder's + * &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or + * &drm_encoder_helper_funcs.dpms hook. + * + * The bridge can assume that the display pipe (i.e. clocks and timing + * signals) feeding it is running when this callback is called. This + * callback must enable the display link feeding the next bridge in the + * chain if there is one. + * + * The enable callback is optional. + */ + void (*enable)(struct drm_bridge *bridge); +}; + +/** + * struct drm_bridge_timings - timing information for the bridge + */ +struct drm_bridge_timings { + /** + * @sampling_edge: + * + * Tells whether the bridge samples the digital input signal + * from the display engine on the positive or negative edge of the + * clock, this should reuse the DRM_BUS_FLAG_PIXDATA_[POS|NEG]EDGE + * bitwise flags from the DRM connector (bit 2 and 3 valid). + */ + u32 sampling_edge; + /** + * @setup_time_ps: + * + * Defines the time in picoseconds the input data lines must be + * stable before the clock edge. + */ + u32 setup_time_ps; + /** + * @hold_time_ps: + * + * Defines the time in picoseconds taken for the bridge to sample the + * input signal after the clock edge. + */ + u32 hold_time_ps; +}; + +/** + * struct drm_bridge - central DRM bridge control structure + */ +struct drm_bridge { + /** @dev: DRM device this bridge belongs to */ + struct drm_device *dev; + /** @encoder: encoder to which this bridge is connected */ + struct drm_encoder *encoder; + /** @next: the next bridge in the encoder chain */ + struct drm_bridge *next; +#ifdef CONFIG_OF + /** @of_node: device node pointer to the bridge */ + struct device_node *of_node; +#endif + /** @list: to keep track of all added bridges */ + struct list_head list; + /** + * @timings: + * + * the timing specification for the bridge, if any (may be NULL) + */ + const struct drm_bridge_timings *timings; + /** @funcs: control functions */ + const struct drm_bridge_funcs *funcs; + /** @driver_private: pointer to the bridge driver's internal context */ + void *driver_private; +}; + +void drm_bridge_add(struct drm_bridge *bridge); +void drm_bridge_remove(struct drm_bridge *bridge); +struct drm_bridge *of_drm_find_bridge(struct device_node *np); +int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge, + struct drm_bridge *previous); + +bool drm_bridge_mode_fixup(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +enum drm_mode_status drm_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_mode *mode); +void drm_bridge_disable(struct drm_bridge *bridge); +void drm_bridge_post_disable(struct drm_bridge *bridge); +void drm_bridge_mode_set(struct drm_bridge *bridge, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +void drm_bridge_pre_enable(struct drm_bridge *bridge); +void drm_bridge_enable(struct drm_bridge *bridge); + +#ifdef CONFIG_DRM_PANEL_BRIDGE +struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel, + u32 connector_type); +void drm_panel_bridge_remove(struct drm_bridge *bridge); +struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev, + struct drm_panel *panel, + u32 connector_type); +#endif + +#endif diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h new file mode 100644 index 000000000..97fc498dc --- /dev/null +++ b/include/drm/drm_cache.h @@ -0,0 +1,73 @@ +/************************************************************************** + * + * Copyright 2009 Red Hat Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * + **************************************************************************/ +/* + * Authors: + * Dave Airlie + */ + +#ifndef _DRM_CACHE_H_ +#define _DRM_CACHE_H_ + +#include + +void drm_clflush_pages(struct page *pages[], unsigned long num_pages); +void drm_clflush_sg(struct sg_table *st); +void drm_clflush_virt_range(void *addr, unsigned long length); +u64 drm_get_max_iomem(void); + + +static inline bool drm_arch_can_wc_memory(void) +{ +#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE) + return false; +#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3) + return false; +#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64) + /* + * The DRM driver stack is designed to work with cache coherent devices + * only, but permits an optimization to be enabled in some cases, where + * for some buffers, both the CPU and the GPU use uncached mappings, + * removing the need for DMA snooping and allocation in the CPU caches. + * + * The use of uncached GPU mappings relies on the correct implementation + * of the PCIe NoSnoop TLP attribute by the platform, otherwise the GPU + * will use cached mappings nonetheless. On x86 platforms, this does not + * seem to matter, as uncached CPU mappings will snoop the caches in any + * case. However, on ARM and arm64, enabling this optimization on a + * platform where NoSnoop is ignored results in loss of coherency, which + * breaks correct operation of the device. Since we have no way of + * detecting whether NoSnoop works or not, just disable this + * optimization entirely for ARM and arm64. + */ + return false; +#else + return true; +#endif +} + +#endif diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h new file mode 100644 index 000000000..971bb7853 --- /dev/null +++ b/include/drm/drm_client.h @@ -0,0 +1,140 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _DRM_CLIENT_H_ +#define _DRM_CLIENT_H_ + +#include + +struct drm_client_dev; +struct drm_device; +struct drm_file; +struct drm_framebuffer; +struct drm_gem_object; +struct drm_minor; +struct module; + +/** + * struct drm_client_funcs - DRM client callbacks + */ +struct drm_client_funcs { + /** + * @owner: The module owner + */ + struct module *owner; + + /** + * @unregister: + * + * Called when &drm_device is unregistered. The client should respond by + * releasing it's resources using drm_client_release(). + * + * This callback is optional. + */ + void (*unregister)(struct drm_client_dev *client); + + /** + * @restore: + * + * Called on drm_lastclose(). The first client instance in the list that + * returns zero gets the privilege to restore and no more clients are + * called. This callback is not called after @unregister has been called. + * + * This callback is optional. + */ + int (*restore)(struct drm_client_dev *client); + + /** + * @hotplug: + * + * Called on drm_kms_helper_hotplug_event(). + * This callback is not called after @unregister has been called. + * + * This callback is optional. + */ + int (*hotplug)(struct drm_client_dev *client); +}; + +/** + * struct drm_client_dev - DRM client instance + */ +struct drm_client_dev { + /** + * @dev: DRM device + */ + struct drm_device *dev; + + /** + * @name: Name of the client. + */ + const char *name; + + /** + * @list: + * + * List of all clients of a DRM device, linked into + * &drm_device.clientlist. Protected by &drm_device.clientlist_mutex. + */ + struct list_head list; + + /** + * @funcs: DRM client functions (optional) + */ + const struct drm_client_funcs *funcs; + + /** + * @file: DRM file + */ + struct drm_file *file; +}; + +int drm_client_init(struct drm_device *dev, struct drm_client_dev *client, + const char *name, const struct drm_client_funcs *funcs); +void drm_client_release(struct drm_client_dev *client); +void drm_client_add(struct drm_client_dev *client); + +void drm_client_dev_unregister(struct drm_device *dev); +void drm_client_dev_hotplug(struct drm_device *dev); +void drm_client_dev_restore(struct drm_device *dev); + +/** + * struct drm_client_buffer - DRM client buffer + */ +struct drm_client_buffer { + /** + * @client: DRM client + */ + struct drm_client_dev *client; + + /** + * @handle: Buffer handle + */ + u32 handle; + + /** + * @pitch: Buffer pitch + */ + u32 pitch; + + /** + * @gem: GEM object backing this buffer + */ + struct drm_gem_object *gem; + + /** + * @vaddr: Virtual address for the buffer + */ + void *vaddr; + + /** + * @fb: DRM framebuffer + */ + struct drm_framebuffer *fb; +}; + +struct drm_client_buffer * +drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format); +void drm_client_framebuffer_delete(struct drm_client_buffer *buffer); + +int drm_client_debugfs_init(struct drm_minor *minor); + +#endif diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h new file mode 100644 index 000000000..44f04233e --- /dev/null +++ b/include/drm/drm_color_mgmt.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_COLOR_MGMT_H__ +#define __DRM_COLOR_MGMT_H__ + +#include + +struct drm_crtc; +struct drm_plane; + +uint32_t drm_color_lut_extract(uint32_t user_input, uint32_t bit_precision); + +void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc, + uint degamma_lut_size, + bool has_ctm, + uint gamma_lut_size); + +int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, + int gamma_size); + +/** + * drm_color_lut_size - calculate the number of entries in the LUT + * @blob: blob containing the LUT + * + * Returns: + * The number of entries in the color LUT stored in @blob. + */ +static inline int drm_color_lut_size(const struct drm_property_blob *blob) +{ + return blob->length / sizeof(struct drm_color_lut); +} + +enum drm_color_encoding { + DRM_COLOR_YCBCR_BT601, + DRM_COLOR_YCBCR_BT709, + DRM_COLOR_YCBCR_BT2020, + DRM_COLOR_ENCODING_MAX, +}; + +enum drm_color_range { + DRM_COLOR_YCBCR_LIMITED_RANGE, + DRM_COLOR_YCBCR_FULL_RANGE, + DRM_COLOR_RANGE_MAX, +}; + +int drm_plane_create_color_properties(struct drm_plane *plane, + u32 supported_encodings, + u32 supported_ranges, + enum drm_color_encoding default_encoding, + enum drm_color_range default_range); +#endif diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h new file mode 100644 index 000000000..e5f641cda --- /dev/null +++ b/include/drm/drm_connector.h @@ -0,0 +1,1344 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_CONNECTOR_H__ +#define __DRM_CONNECTOR_H__ + +#include +#include +#include +#include +#include + +#include + +struct drm_connector_helper_funcs; +struct drm_modeset_acquire_ctx; +struct drm_device; +struct drm_crtc; +struct drm_encoder; +struct drm_property; +struct drm_property_blob; +struct drm_printer; +struct edid; + +enum drm_connector_force { + DRM_FORCE_UNSPECIFIED, + DRM_FORCE_OFF, + DRM_FORCE_ON, /* force on analog part normally */ + DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */ +}; + +/** + * enum drm_connector_status - status for a &drm_connector + * + * This enum is used to track the connector status. There are no separate + * #defines for the uapi! + */ +enum drm_connector_status { + /** + * @connector_status_connected: The connector is definitely connected to + * a sink device, and can be enabled. + */ + connector_status_connected = 1, + /** + * @connector_status_disconnected: The connector isn't connected to a + * sink device which can be autodetect. For digital outputs like DP or + * HDMI (which can be realiable probed) this means there's really + * nothing there. It is driver-dependent whether a connector with this + * status can be lit up or not. + */ + connector_status_disconnected = 2, + /** + * @connector_status_unknown: The connector's status could not be + * reliably detected. This happens when probing would either cause + * flicker (like load-detection when the connector is in use), or when a + * hardware resource isn't available (like when load-detection needs a + * free CRTC). It should be possible to light up the connector with one + * of the listed fallback modes. For default configuration userspace + * should only try to light up connectors with unknown status when + * there's not connector with @connector_status_connected. + */ + connector_status_unknown = 3, +}; + +/** + * enum drm_connector_registration_status - userspace registration status for + * a &drm_connector + * + * This enum is used to track the status of initializing a connector and + * registering it with userspace, so that DRM can prevent bogus modesets on + * connectors that no longer exist. + */ +enum drm_connector_registration_state { + /** + * @DRM_CONNECTOR_INITIALIZING: The connector has just been created, + * but has yet to be exposed to userspace. There should be no + * additional restrictions to how the state of this connector may be + * modified. + */ + DRM_CONNECTOR_INITIALIZING = 0, + + /** + * @DRM_CONNECTOR_REGISTERED: The connector has been fully initialized + * and registered with sysfs, as such it has been exposed to + * userspace. There should be no additional restrictions to how the + * state of this connector may be modified. + */ + DRM_CONNECTOR_REGISTERED = 1, + + /** + * @DRM_CONNECTOR_UNREGISTERED: The connector has either been exposed + * to userspace and has since been unregistered and removed from + * userspace, or the connector was unregistered before it had a chance + * to be exposed to userspace (e.g. still in the + * @DRM_CONNECTOR_INITIALIZING state). When a connector is + * unregistered, there are additional restrictions to how its state + * may be modified: + * + * - An unregistered connector may only have its DPMS changed from + * On->Off. Once DPMS is changed to Off, it may not be switched back + * to On. + * - Modesets are not allowed on unregistered connectors, unless they + * would result in disabling its assigned CRTCs. This means + * disabling a CRTC on an unregistered connector is OK, but enabling + * one is not. + * - Removing a CRTC from an unregistered connector is OK, but new + * CRTCs may never be assigned to an unregistered connector. + */ + DRM_CONNECTOR_UNREGISTERED = 2, +}; + +enum subpixel_order { + SubPixelUnknown = 0, + SubPixelHorizontalRGB, + SubPixelHorizontalBGR, + SubPixelVerticalRGB, + SubPixelVerticalBGR, + SubPixelNone, + +}; + +/** + * struct drm_scrambling: sink's scrambling support. + */ +struct drm_scrambling { + /** + * @supported: scrambling supported for rates > 340 Mhz. + */ + bool supported; + /** + * @low_rates: scrambling supported for rates <= 340 Mhz. + */ + bool low_rates; +}; + +/* + * struct drm_scdc - Information about scdc capabilities of a HDMI 2.0 sink + * + * Provides SCDC register support and capabilities related information on a + * HDMI 2.0 sink. In case of a HDMI 1.4 sink, all parameter must be 0. + */ +struct drm_scdc { + /** + * @supported: status control & data channel present. + */ + bool supported; + /** + * @read_request: sink is capable of generating scdc read request. + */ + bool read_request; + /** + * @scrambling: sink's scrambling capabilities + */ + struct drm_scrambling scrambling; +}; + + +/** + * struct drm_hdmi_info - runtime information about the connected HDMI sink + * + * Describes if a given display supports advanced HDMI 2.0 features. + * This information is available in CEA-861-F extension blocks (like HF-VSDB). + */ +struct drm_hdmi_info { + /** @scdc: sink's scdc support and capabilities */ + struct drm_scdc scdc; + + /** + * @y420_vdb_modes: bitmap of modes which can support ycbcr420 + * output only (not normal RGB/YCBCR444/422 outputs). There are total + * 107 VICs defined by CEA-861-F spec, so the size is 128 bits to map + * upto 128 VICs; + */ + unsigned long y420_vdb_modes[BITS_TO_LONGS(128)]; + + /** + * @y420_cmdb_modes: bitmap of modes which can support ycbcr420 + * output also, along with normal HDMI outputs. There are total 107 + * VICs defined by CEA-861-F spec, so the size is 128 bits to map upto + * 128 VICs; + */ + unsigned long y420_cmdb_modes[BITS_TO_LONGS(128)]; + + /** @y420_cmdb_map: bitmap of SVD index, to extraxt vcb modes */ + u64 y420_cmdb_map; + + /** @y420_dc_modes: bitmap of deep color support index */ + u8 y420_dc_modes; +}; + +/** + * enum drm_link_status - connector's link_status property value + * + * This enum is used as the connector's link status property value. + * It is set to the values defined in uapi. + * + * @DRM_LINK_STATUS_GOOD: DP Link is Good as a result of successful + * link training + * @DRM_LINK_STATUS_BAD: DP Link is BAD as a result of link training + * failure + */ +enum drm_link_status { + DRM_LINK_STATUS_GOOD = DRM_MODE_LINK_STATUS_GOOD, + DRM_LINK_STATUS_BAD = DRM_MODE_LINK_STATUS_BAD, +}; + +/** + * enum drm_panel_orientation - panel_orientation info for &drm_display_info + * + * This enum is used to track the (LCD) panel orientation. There are no + * separate #defines for the uapi! + * + * @DRM_MODE_PANEL_ORIENTATION_UNKNOWN: The drm driver has not provided any + * panel orientation information (normal + * for non panels) in this case the "panel + * orientation" connector prop will not be + * attached. + * @DRM_MODE_PANEL_ORIENTATION_NORMAL: The top side of the panel matches the + * top side of the device's casing. + * @DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP: The top side of the panel matches the + * bottom side of the device's casing, iow + * the panel is mounted upside-down. + * @DRM_MODE_PANEL_ORIENTATION_LEFT_UP: The left side of the panel matches the + * top side of the device's casing. + * @DRM_MODE_PANEL_ORIENTATION_RIGHT_UP: The right side of the panel matches the + * top side of the device's casing. + */ +enum drm_panel_orientation { + DRM_MODE_PANEL_ORIENTATION_UNKNOWN = -1, + DRM_MODE_PANEL_ORIENTATION_NORMAL = 0, + DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP, + DRM_MODE_PANEL_ORIENTATION_LEFT_UP, + DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, +}; + +/** + * struct drm_display_info - runtime data about the connected sink + * + * Describes a given display (e.g. CRT or flat panel) and its limitations. For + * fixed display sinks like built-in panels there's not much difference between + * this and &struct drm_connector. But for sinks with a real cable this + * structure is meant to describe all the things at the other end of the cable. + * + * For sinks which provide an EDID this can be filled out by calling + * drm_add_edid_modes(). + */ +struct drm_display_info { + /** + * @name: Name of the display. + */ + char name[DRM_DISPLAY_INFO_LEN]; + + /** + * @width_mm: Physical width in mm. + */ + unsigned int width_mm; + /** + * @height_mm: Physical height in mm. + */ + unsigned int height_mm; + + /** + * @pixel_clock: Maximum pixel clock supported by the sink, in units of + * 100Hz. This mismatches the clock in &drm_display_mode (which is in + * kHZ), because that's what the EDID uses as base unit. + */ + unsigned int pixel_clock; + /** + * @bpc: Maximum bits per color channel. Used by HDMI and DP outputs. + */ + unsigned int bpc; + + /** + * @subpixel_order: Subpixel order of LCD panels. + */ + enum subpixel_order subpixel_order; + +#define DRM_COLOR_FORMAT_RGB444 (1<<0) +#define DRM_COLOR_FORMAT_YCRCB444 (1<<1) +#define DRM_COLOR_FORMAT_YCRCB422 (1<<2) +#define DRM_COLOR_FORMAT_YCRCB420 (1<<3) + + /** + * @panel_orientation: Read only connector property for built-in panels, + * indicating the orientation of the panel vs the device's casing. + * drm_connector_init() sets this to DRM_MODE_PANEL_ORIENTATION_UNKNOWN. + * When not UNKNOWN this gets used by the drm_fb_helpers to rotate the + * fb to compensate and gets exported as prop to userspace. + */ + int panel_orientation; + + /** + * @color_formats: HDMI Color formats, selects between RGB and YCrCb + * modes. Used DRM_COLOR_FORMAT\_ defines, which are _not_ the same ones + * as used to describe the pixel format in framebuffers, and also don't + * match the formats in @bus_formats which are shared with v4l. + */ + u32 color_formats; + + /** + * @bus_formats: Pixel data format on the wire, somewhat redundant with + * @color_formats. Array of size @num_bus_formats encoded using + * MEDIA_BUS_FMT\_ defines shared with v4l and media drivers. + */ + const u32 *bus_formats; + /** + * @num_bus_formats: Size of @bus_formats array. + */ + unsigned int num_bus_formats; + +#define DRM_BUS_FLAG_DE_LOW (1<<0) +#define DRM_BUS_FLAG_DE_HIGH (1<<1) +/* drive data on pos. edge */ +#define DRM_BUS_FLAG_PIXDATA_POSEDGE (1<<2) +/* drive data on neg. edge */ +#define DRM_BUS_FLAG_PIXDATA_NEGEDGE (1<<3) +/* data is transmitted MSB to LSB on the bus */ +#define DRM_BUS_FLAG_DATA_MSB_TO_LSB (1<<4) +/* data is transmitted LSB to MSB on the bus */ +#define DRM_BUS_FLAG_DATA_LSB_TO_MSB (1<<5) +/* drive sync on pos. edge */ +#define DRM_BUS_FLAG_SYNC_POSEDGE (1<<6) +/* drive sync on neg. edge */ +#define DRM_BUS_FLAG_SYNC_NEGEDGE (1<<7) + + /** + * @bus_flags: Additional information (like pixel signal polarity) for + * the pixel data on the bus, using DRM_BUS_FLAGS\_ defines. + */ + u32 bus_flags; + + /** + * @max_tmds_clock: Maximum TMDS clock rate supported by the + * sink in kHz. 0 means undefined. + */ + int max_tmds_clock; + + /** + * @dvi_dual: Dual-link DVI sink? + */ + bool dvi_dual; + + /** + * @has_hdmi_infoframe: Does the sink support the HDMI infoframe? + */ + bool has_hdmi_infoframe; + + /** + * @edid_hdmi_dc_modes: Mask of supported hdmi deep color modes. Even + * more stuff redundant with @bus_formats. + */ + u8 edid_hdmi_dc_modes; + + /** + * @cea_rev: CEA revision of the HDMI sink. + */ + u8 cea_rev; + + /** + * @hdmi: advance features of a HDMI sink. + */ + struct drm_hdmi_info hdmi; + + /** + * @non_desktop: Non desktop display (HMD). + */ + bool non_desktop; +}; + +int drm_display_info_set_bus_formats(struct drm_display_info *info, + const u32 *formats, + unsigned int num_formats); + +/** + * struct drm_tv_connector_state - TV connector related states + * @subconnector: selected subconnector + * @margins: margins + * @margins.left: left margin + * @margins.right: right margin + * @margins.top: top margin + * @margins.bottom: bottom margin + * @mode: TV mode + * @brightness: brightness in percent + * @contrast: contrast in percent + * @flicker_reduction: flicker reduction in percent + * @overscan: overscan in percent + * @saturation: saturation in percent + * @hue: hue in percent + */ +struct drm_tv_connector_state { + enum drm_mode_subconnector subconnector; + struct { + unsigned int left; + unsigned int right; + unsigned int top; + unsigned int bottom; + } margins; + unsigned int mode; + unsigned int brightness; + unsigned int contrast; + unsigned int flicker_reduction; + unsigned int overscan; + unsigned int saturation; + unsigned int hue; +}; + +/** + * struct drm_connector_state - mutable connector state + */ +struct drm_connector_state { + /** @connector: backpointer to the connector */ + struct drm_connector *connector; + + /** + * @crtc: CRTC to connect connector to, NULL if disabled. + * + * Do not change this directly, use drm_atomic_set_crtc_for_connector() + * instead. + */ + struct drm_crtc *crtc; + + /** + * @best_encoder: + * + * Used by the atomic helpers to select the encoder, through the + * &drm_connector_helper_funcs.atomic_best_encoder or + * &drm_connector_helper_funcs.best_encoder callbacks. + */ + struct drm_encoder *best_encoder; + + /** + * @link_status: Connector link_status to keep track of whether link is + * GOOD or BAD to notify userspace if retraining is necessary. + */ + enum drm_link_status link_status; + + /** @state: backpointer to global drm_atomic_state */ + struct drm_atomic_state *state; + + /** + * @commit: Tracks the pending commit to prevent use-after-free conditions. + * + * Is only set when @crtc is NULL. + */ + struct drm_crtc_commit *commit; + + /** @tv: TV connector state */ + struct drm_tv_connector_state tv; + + /** + * @picture_aspect_ratio: Connector property to control the + * HDMI infoframe aspect ratio setting. + * + * The %DRM_MODE_PICTURE_ASPECT_\* values much match the + * values for &enum hdmi_picture_aspect + */ + enum hdmi_picture_aspect picture_aspect_ratio; + + /** + * @content_type: Connector property to control the + * HDMI infoframe content type setting. + * The %DRM_MODE_CONTENT_TYPE_\* values much + * match the values. + */ + unsigned int content_type; + + /** + * @scaling_mode: Connector property to control the + * upscaling, mostly used for built-in panels. + */ + unsigned int scaling_mode; + + /** + * @content_protection: Connector property to request content + * protection. This is most commonly used for HDCP. + */ + unsigned int content_protection; + + /** + * @writeback_job: Writeback job for writeback connectors + * + * Holds the framebuffer and out-fence for a writeback connector. As + * the writeback completion may be asynchronous to the normal commit + * cycle, the writeback job lifetime is managed separately from the + * normal atomic state by this object. + * + * See also: drm_writeback_queue_job() and + * drm_writeback_signal_completion() + */ + struct drm_writeback_job *writeback_job; +}; + +/** + * struct drm_connector_funcs - control connectors on a given device + * + * Each CRTC may have one or more connectors attached to it. The functions + * below allow the core DRM code to control connectors, enumerate available modes, + * etc. + */ +struct drm_connector_funcs { + /** + * @dpms: + * + * Legacy entry point to set the per-connector DPMS state. Legacy DPMS + * is exposed as a standard property on the connector, but diverted to + * this callback in the drm core. Note that atomic drivers don't + * implement the 4 level DPMS support on the connector any more, but + * instead only have an on/off "ACTIVE" property on the CRTC object. + * + * This hook is not used by atomic drivers, remapping of the legacy DPMS + * property is entirely handled in the DRM core. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*dpms)(struct drm_connector *connector, int mode); + + /** + * @reset: + * + * Reset connector hardware and software state to off. This function isn't + * called by the core directly, only through drm_mode_config_reset(). + * It's not a helper hook only for historical reasons. + * + * Atomic drivers can use drm_atomic_helper_connector_reset() to reset + * atomic state using this hook. + */ + void (*reset)(struct drm_connector *connector); + + /** + * @detect: + * + * Check to see if anything is attached to the connector. The parameter + * force is set to false whilst polling, true when checking the + * connector due to a user request. force can be used by the driver to + * avoid expensive, destructive operations during automated probing. + * + * This callback is optional, if not implemented the connector will be + * considered as always being attached. + * + * FIXME: + * + * Note that this hook is only called by the probe helper. It's not in + * the helper library vtable purely for historical reasons. The only DRM + * core entry point to probe connector state is @fill_modes. + * + * Note that the helper library will already hold + * &drm_mode_config.connection_mutex. Drivers which need to grab additional + * locks to avoid races with concurrent modeset changes need to use + * &drm_connector_helper_funcs.detect_ctx instead. + * + * RETURNS: + * + * drm_connector_status indicating the connector's status. + */ + enum drm_connector_status (*detect)(struct drm_connector *connector, + bool force); + + /** + * @force: + * + * This function is called to update internal encoder state when the + * connector is forced to a certain state by userspace, either through + * the sysfs interfaces or on the kernel cmdline. In that case the + * @detect callback isn't called. + * + * FIXME: + * + * Note that this hook is only called by the probe helper. It's not in + * the helper library vtable purely for historical reasons. The only DRM + * core entry point to probe connector state is @fill_modes. + */ + void (*force)(struct drm_connector *connector); + + /** + * @fill_modes: + * + * Entry point for output detection and basic mode validation. The + * driver should reprobe the output if needed (e.g. when hotplug + * handling is unreliable), add all detected modes to &drm_connector.modes + * and filter out any the device can't support in any configuration. It + * also needs to filter out any modes wider or higher than the + * parameters max_width and max_height indicate. + * + * The drivers must also prune any modes no longer valid from + * &drm_connector.modes. Furthermore it must update + * &drm_connector.status and &drm_connector.edid. If no EDID has been + * received for this output connector->edid must be NULL. + * + * Drivers using the probe helpers should use + * drm_helper_probe_single_connector_modes() to implement this + * function. + * + * RETURNS: + * + * The number of modes detected and filled into &drm_connector.modes. + */ + int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height); + + /** + * @set_property: + * + * This is the legacy entry point to update a property attached to the + * connector. + * + * This callback is optional if the driver does not support any legacy + * driver-private properties. For atomic drivers it is not used because + * property handling is done entirely in the DRM core. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*set_property)(struct drm_connector *connector, struct drm_property *property, + uint64_t val); + + /** + * @late_register: + * + * This optional hook can be used to register additional userspace + * interfaces attached to the connector, light backlight control, i2c, + * DP aux or similar interfaces. It is called late in the driver load + * sequence from drm_connector_register() when registering all the + * core drm connector interfaces. Everything added from this callback + * should be unregistered in the early_unregister callback. + * + * This is called while holding &drm_connector.mutex. + * + * Returns: + * + * 0 on success, or a negative error code on failure. + */ + int (*late_register)(struct drm_connector *connector); + + /** + * @early_unregister: + * + * This optional hook should be used to unregister the additional + * userspace interfaces attached to the connector from + * late_register(). It is called from drm_connector_unregister(), + * early in the driver unload sequence to disable userspace access + * before data structures are torndown. + * + * This is called while holding &drm_connector.mutex. + */ + void (*early_unregister)(struct drm_connector *connector); + + /** + * @destroy: + * + * Clean up connector resources. This is called at driver unload time + * through drm_mode_config_cleanup(). It can also be called at runtime + * when a connector is being hot-unplugged for drivers that support + * connector hotplugging (e.g. DisplayPort MST). + */ + void (*destroy)(struct drm_connector *connector); + + /** + * @atomic_duplicate_state: + * + * Duplicate the current atomic state for this connector and return it. + * The core and helpers guarantee that any atomic state duplicated with + * this hook and still owned by the caller (i.e. not transferred to the + * driver by calling &drm_mode_config_funcs.atomic_commit) will be + * cleaned up by calling the @atomic_destroy_state hook in this + * structure. + * + * This callback is mandatory for atomic drivers. + * + * Atomic drivers which don't subclass &struct drm_connector_state should use + * drm_atomic_helper_connector_duplicate_state(). Drivers that subclass the + * state structure to extend it with driver-private state should use + * __drm_atomic_helper_connector_duplicate_state() to make sure shared state is + * duplicated in a consistent fashion across drivers. + * + * It is an error to call this hook before &drm_connector.state has been + * initialized correctly. + * + * NOTE: + * + * If the duplicate state references refcounted resources this hook must + * acquire a reference for each of them. The driver must release these + * references again in @atomic_destroy_state. + * + * RETURNS: + * + * Duplicated atomic state or NULL when the allocation failed. + */ + struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector *connector); + + /** + * @atomic_destroy_state: + * + * Destroy a state duplicated with @atomic_duplicate_state and release + * or unreference all resources it references + * + * This callback is mandatory for atomic drivers. + */ + void (*atomic_destroy_state)(struct drm_connector *connector, + struct drm_connector_state *state); + + /** + * @atomic_set_property: + * + * Decode a driver-private property value and store the decoded value + * into the passed-in state structure. Since the atomic core decodes all + * standardized properties (even for extensions beyond the core set of + * properties which might not be implemented by all drivers) this + * requires drivers to subclass the state structure. + * + * Such driver-private properties should really only be implemented for + * truly hardware/vendor specific state. Instead it is preferred to + * standardize atomic extension and decode the properties used to expose + * such an extension in the core. + * + * Do not call this function directly, use + * drm_atomic_connector_set_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * NOTE: + * + * This function is called in the state assembly phase of atomic + * modesets, which can be aborted for any reason (including on + * userspace's request to just check whether a configuration would be + * possible). Drivers MUST NOT touch any persistent state (hardware or + * software) or data structures except the passed in @state parameter. + * + * Also since userspace controls in which order properties are set this + * function must not do any input validation (since the state update is + * incomplete and hence likely inconsistent). Instead any such input + * validation must be done in the various atomic_check callbacks. + * + * RETURNS: + * + * 0 if the property has been found, -EINVAL if the property isn't + * implemented by the driver (which shouldn't ever happen, the core only + * asks for properties attached to this connector). No other validation + * is allowed by the driver. The core already checks that the property + * value is within the range (integer, valid enum value, ...) the driver + * set when registering the property. + */ + int (*atomic_set_property)(struct drm_connector *connector, + struct drm_connector_state *state, + struct drm_property *property, + uint64_t val); + + /** + * @atomic_get_property: + * + * Reads out the decoded driver-private property. This is used to + * implement the GETCONNECTOR IOCTL. + * + * Do not call this function directly, use + * drm_atomic_connector_get_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * RETURNS: + * + * 0 on success, -EINVAL if the property isn't implemented by the + * driver (which shouldn't ever happen, the core only asks for + * properties attached to this connector). + */ + int (*atomic_get_property)(struct drm_connector *connector, + const struct drm_connector_state *state, + struct drm_property *property, + uint64_t *val); + + /** + * @atomic_print_state: + * + * If driver subclasses &struct drm_connector_state, it should implement + * this optional hook for printing additional driver specific state. + * + * Do not call this directly, use drm_atomic_connector_print_state() + * instead. + */ + void (*atomic_print_state)(struct drm_printer *p, + const struct drm_connector_state *state); +}; + +/* mode specified on the command line */ +struct drm_cmdline_mode { + bool specified; + bool refresh_specified; + bool bpp_specified; + int xres, yres; + int bpp; + int refresh; + bool rb; + bool interlace; + bool cvt; + bool margins; + enum drm_connector_force force; +}; + +/** + * struct drm_connector - central DRM connector control structure + * + * Each connector may be connected to one or more CRTCs, or may be clonable by + * another connector if they can share a CRTC. Each connector also has a specific + * position in the broader display (referred to as a 'screen' though it could + * span multiple monitors). + */ +struct drm_connector { + /** @dev: parent DRM device */ + struct drm_device *dev; + /** @kdev: kernel device for sysfs attributes */ + struct device *kdev; + /** @attr: sysfs attributes */ + struct device_attribute *attr; + + /** + * @head: + * + * List of all connectors on a @dev, linked from + * &drm_mode_config.connector_list. Protected by + * &drm_mode_config.connector_list_lock, but please only use + * &drm_connector_list_iter to walk this list. + */ + struct list_head head; + + /** @base: base KMS object */ + struct drm_mode_object base; + + /** @name: human readable name, can be overwritten by the driver */ + char *name; + + /** + * @mutex: Lock for general connector state, but currently only protects + * @registered. Most of the connector state is still protected by + * &drm_mode_config.mutex. + */ + struct mutex mutex; + + /** + * @index: Compacted connector index, which matches the position inside + * the mode_config.list for drivers not supporting hot-add/removing. Can + * be used as an array index. It is invariant over the lifetime of the + * connector. + */ + unsigned index; + + /** + * @connector_type: + * one of the DRM_MODE_CONNECTOR_ types from drm_mode.h + */ + int connector_type; + /** @connector_type_id: index into connector type enum */ + int connector_type_id; + /** + * @interlace_allowed: + * Can this connector handle interlaced modes? Only used by + * drm_helper_probe_single_connector_modes() for mode filtering. + */ + bool interlace_allowed; + /** + * @doublescan_allowed: + * Can this connector handle doublescan? Only used by + * drm_helper_probe_single_connector_modes() for mode filtering. + */ + bool doublescan_allowed; + /** + * @stereo_allowed: + * Can this connector handle stereo modes? Only used by + * drm_helper_probe_single_connector_modes() for mode filtering. + */ + bool stereo_allowed; + + /** + * @ycbcr_420_allowed : This bool indicates if this connector is + * capable of handling YCBCR 420 output. While parsing the EDID + * blocks, its very helpful to know, if the source is capable of + * handling YCBCR 420 outputs. + */ + bool ycbcr_420_allowed; + + /** + * @registration_state: Is this connector initializing, exposed + * (registered) with userspace, or unregistered? + * + * Protected by @mutex. + */ + enum drm_connector_registration_state registration_state; + + /** + * @modes: + * Modes available on this connector (from fill_modes() + user). + * Protected by &drm_mode_config.mutex. + */ + struct list_head modes; + + /** + * @status: + * One of the drm_connector_status enums (connected, not, or unknown). + * Protected by &drm_mode_config.mutex. + */ + enum drm_connector_status status; + + /** + * @probed_modes: + * These are modes added by probing with DDC or the BIOS, before + * filtering is applied. Used by the probe helpers. Protected by + * &drm_mode_config.mutex. + */ + struct list_head probed_modes; + + /** + * @display_info: Display information is filled from EDID information + * when a display is detected. For non hot-pluggable displays such as + * flat panels in embedded systems, the driver should initialize the + * &drm_display_info.width_mm and &drm_display_info.height_mm fields + * with the physical size of the display. + * + * Protected by &drm_mode_config.mutex. + */ + struct drm_display_info display_info; + + /** @funcs: connector control functions */ + const struct drm_connector_funcs *funcs; + + /** + * @edid_blob_ptr: DRM property containing EDID if present. Protected by + * &drm_mode_config.mutex. This should be updated only by calling + * drm_connector_update_edid_property(). + */ + struct drm_property_blob *edid_blob_ptr; + + /** @properties: property tracking for this connector */ + struct drm_object_properties properties; + + /** + * @scaling_mode_property: Optional atomic property to control the + * upscaling. See drm_connector_attach_content_protection_property(). + */ + struct drm_property *scaling_mode_property; + + /** + * @content_protection_property: DRM ENUM property for content + * protection. See drm_connector_attach_content_protection_property(). + */ + struct drm_property *content_protection_property; + + /** + * @path_blob_ptr: + * + * DRM blob property data for the DP MST path property. This should only + * be updated by calling drm_connector_set_path_property(). + */ + struct drm_property_blob *path_blob_ptr; + +#define DRM_CONNECTOR_POLL_HPD (1 << 0) +#define DRM_CONNECTOR_POLL_CONNECT (1 << 1) +#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2) + + /** + * @polled: + * + * Connector polling mode, a combination of + * + * DRM_CONNECTOR_POLL_HPD + * The connector generates hotplug events and doesn't need to be + * periodically polled. The CONNECT and DISCONNECT flags must not + * be set together with the HPD flag. + * + * DRM_CONNECTOR_POLL_CONNECT + * Periodically poll the connector for connection. + * + * DRM_CONNECTOR_POLL_DISCONNECT + * Periodically poll the connector for disconnection, without + * causing flickering even when the connector is in use. DACs should + * rarely do this without a lot of testing. + * + * Set to 0 for connectors that don't support connection status + * discovery. + */ + uint8_t polled; + + /** + * @dpms: Current dpms state. For legacy drivers the + * &drm_connector_funcs.dpms callback must update this. For atomic + * drivers, this is handled by the core atomic code, and drivers must + * only take &drm_crtc_state.active into account. + */ + int dpms; + + /** @helper_private: mid-layer private data */ + const struct drm_connector_helper_funcs *helper_private; + + /** @cmdline_mode: mode line parsed from the kernel cmdline for this connector */ + struct drm_cmdline_mode cmdline_mode; + /** @force: a DRM_FORCE_ state for forced mode sets */ + enum drm_connector_force force; + /** @override_edid: has the EDID been overwritten through debugfs for testing? */ + bool override_edid; + +#define DRM_CONNECTOR_MAX_ENCODER 3 + /** + * @encoder_ids: Valid encoders for this connector. Please only use + * drm_connector_for_each_possible_encoder() to enumerate these. + */ + uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; + + /** + * @encoder: Currently bound encoder driving this connector, if any. + * Only really meaningful for non-atomic drivers. Atomic drivers should + * instead look at &drm_connector_state.best_encoder, and in case they + * need the CRTC driving this output, &drm_connector_state.crtc. + */ + struct drm_encoder *encoder; + +#define MAX_ELD_BYTES 128 + /** @eld: EDID-like data, if present */ + uint8_t eld[MAX_ELD_BYTES]; + /** @latency_present: AV delay info from ELD, if found */ + bool latency_present[2]; + /** + * @video_latency: Video latency info from ELD, if found. + * [0]: progressive, [1]: interlaced + */ + int video_latency[2]; + /** + * @audio_latency: audio latency info from ELD, if found + * [0]: progressive, [1]: interlaced + */ + int audio_latency[2]; + /** + * @null_edid_counter: track sinks that give us all zeros for the EDID. + * Needed to workaround some HW bugs where we get all 0s + */ + int null_edid_counter; + + /** @bad_edid_counter: track sinks that give us an EDID with invalid checksum */ + unsigned bad_edid_counter; + + /** + * @edid_corrupt: Indicates whether the last read EDID was corrupt. Used + * in Displayport compliance testing - Displayport Link CTS Core 1.2 + * rev1.1 4.2.2.6 + */ + bool edid_corrupt; + + /** @debugfs_entry: debugfs directory for this connector */ + struct dentry *debugfs_entry; + + /** + * @state: + * + * Current atomic state for this connector. + * + * This is protected by &drm_mode_config.connection_mutex. Note that + * nonblocking atomic commits access the current connector state without + * taking locks. Either by going through the &struct drm_atomic_state + * pointers, see for_each_oldnew_connector_in_state(), + * for_each_old_connector_in_state() and + * for_each_new_connector_in_state(). Or through careful ordering of + * atomic commit operations as implemented in the atomic helpers, see + * &struct drm_crtc_commit. + */ + struct drm_connector_state *state; + + /* DisplayID bits. FIXME: Extract into a substruct? */ + + /** + * @tile_blob_ptr: + * + * DRM blob property data for the tile property (used mostly by DP MST). + * This is meant for screens which are driven through separate display + * pipelines represented by &drm_crtc, which might not be running with + * genlocked clocks. For tiled panels which are genlocked, like + * dual-link LVDS or dual-link DSI, the driver should try to not expose + * the tiling and virtualize both &drm_crtc and &drm_plane if needed. + * + * This should only be updated by calling + * drm_connector_set_tile_property(). + */ + struct drm_property_blob *tile_blob_ptr; + + /** @has_tile: is this connector connected to a tiled monitor */ + bool has_tile; + /** @tile_group: tile group for the connected monitor */ + struct drm_tile_group *tile_group; + /** @tile_is_single_monitor: whether the tile is one monitor housing */ + bool tile_is_single_monitor; + + /** @num_h_tile: number of horizontal tiles in the tile group */ + /** @num_v_tile: number of vertical tiles in the tile group */ + uint8_t num_h_tile, num_v_tile; + /** @tile_h_loc: horizontal location of this tile */ + /** @tile_v_loc: vertical location of this tile */ + uint8_t tile_h_loc, tile_v_loc; + /** @tile_h_size: horizontal size of this tile. */ + /** @tile_v_size: vertical size of this tile. */ + uint16_t tile_h_size, tile_v_size; + + /** + * @free_node: + * + * List used only by &drm_connector_list_iter to be able to clean up a + * connector from any context, in conjunction with + * &drm_mode_config.connector_free_work. + */ + struct llist_node free_node; +}; + +#define obj_to_connector(x) container_of(x, struct drm_connector, base) + +int drm_connector_init(struct drm_device *dev, + struct drm_connector *connector, + const struct drm_connector_funcs *funcs, + int connector_type); +int drm_connector_register(struct drm_connector *connector); +void drm_connector_unregister(struct drm_connector *connector); +int drm_connector_attach_encoder(struct drm_connector *connector, + struct drm_encoder *encoder); + +void drm_connector_cleanup(struct drm_connector *connector); + +static inline unsigned int drm_connector_index(const struct drm_connector *connector) +{ + return connector->index; +} + +static inline u32 drm_connector_mask(const struct drm_connector *connector) +{ + return 1 << connector->index; +} + +/** + * drm_connector_lookup - lookup connector object + * @dev: DRM device + * @file_priv: drm file to check for lease against. + * @id: connector object id + * + * This function looks up the connector object specified by id + * add takes a reference to it. + */ +static inline struct drm_connector *drm_connector_lookup(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t id) +{ + struct drm_mode_object *mo; + mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_CONNECTOR); + return mo ? obj_to_connector(mo) : NULL; +} + +/** + * drm_connector_get - acquire a connector reference + * @connector: DRM connector + * + * This function increments the connector's refcount. + */ +static inline void drm_connector_get(struct drm_connector *connector) +{ + drm_mode_object_get(&connector->base); +} + +/** + * drm_connector_put - release a connector reference + * @connector: DRM connector + * + * This function decrements the connector's reference count and frees the + * object if the reference count drops to zero. + */ +static inline void drm_connector_put(struct drm_connector *connector) +{ + drm_mode_object_put(&connector->base); +} + +/** + * drm_connector_reference - acquire a connector reference + * @connector: DRM connector + * + * This is a compatibility alias for drm_connector_get() and should not be + * used by new code. + */ +static inline void drm_connector_reference(struct drm_connector *connector) +{ + drm_connector_get(connector); +} + +/** + * drm_connector_unreference - release a connector reference + * @connector: DRM connector + * + * This is a compatibility alias for drm_connector_put() and should not be + * used by new code. + */ +static inline void drm_connector_unreference(struct drm_connector *connector) +{ + drm_connector_put(connector); +} + +/** + * drm_connector_is_unregistered - has the connector been unregistered from + * userspace? + * @connector: DRM connector + * + * Checks whether or not @connector has been unregistered from userspace. + * + * Returns: + * True if the connector was unregistered, false if the connector is + * registered or has not yet been registered with userspace. + */ +static inline bool +drm_connector_is_unregistered(struct drm_connector *connector) +{ + return READ_ONCE(connector->registration_state) == + DRM_CONNECTOR_UNREGISTERED; +} + +const char *drm_get_connector_status_name(enum drm_connector_status status); +const char *drm_get_subpixel_order_name(enum subpixel_order order); +const char *drm_get_dpms_name(int val); +const char *drm_get_dvi_i_subconnector_name(int val); +const char *drm_get_dvi_i_select_name(int val); +const char *drm_get_tv_subconnector_name(int val); +const char *drm_get_tv_select_name(int val); +const char *drm_get_content_protection_name(int val); + +int drm_mode_create_dvi_i_properties(struct drm_device *dev); +int drm_mode_create_tv_properties(struct drm_device *dev, + unsigned int num_modes, + const char * const modes[]); +int drm_mode_create_scaling_mode_property(struct drm_device *dev); +int drm_connector_attach_content_type_property(struct drm_connector *dev); +int drm_connector_attach_scaling_mode_property(struct drm_connector *connector, + u32 scaling_mode_mask); +int drm_connector_attach_content_protection_property( + struct drm_connector *connector); +int drm_mode_create_aspect_ratio_property(struct drm_device *dev); +int drm_mode_create_content_type_property(struct drm_device *dev); +void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame, + const struct drm_connector_state *conn_state); + +int drm_mode_create_suggested_offset_properties(struct drm_device *dev); + +int drm_connector_set_path_property(struct drm_connector *connector, + const char *path); +int drm_connector_set_tile_property(struct drm_connector *connector); +int drm_connector_update_edid_property(struct drm_connector *connector, + const struct edid *edid); +void drm_connector_set_link_status_property(struct drm_connector *connector, + uint64_t link_status); +int drm_connector_init_panel_orientation_property( + struct drm_connector *connector, int width, int height); + +/** + * struct drm_tile_group - Tile group metadata + * @refcount: reference count + * @dev: DRM device + * @id: tile group id exposed to userspace + * @group_data: Sink-private data identifying this group + * + * @group_data corresponds to displayid vend/prod/serial for external screens + * with an EDID. + */ +struct drm_tile_group { + struct kref refcount; + struct drm_device *dev; + int id; + u8 group_data[8]; +}; + +struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev, + char topology[8]); +struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev, + char topology[8]); +void drm_mode_put_tile_group(struct drm_device *dev, + struct drm_tile_group *tg); + +/** + * struct drm_connector_list_iter - connector_list iterator + * + * This iterator tracks state needed to be able to walk the connector_list + * within struct drm_mode_config. Only use together with + * drm_connector_list_iter_begin(), drm_connector_list_iter_end() and + * drm_connector_list_iter_next() respectively the convenience macro + * drm_for_each_connector_iter(). + */ +struct drm_connector_list_iter { +/* private: */ + struct drm_device *dev; + struct drm_connector *conn; +}; + +void drm_connector_list_iter_begin(struct drm_device *dev, + struct drm_connector_list_iter *iter); +struct drm_connector * +drm_connector_list_iter_next(struct drm_connector_list_iter *iter); +void drm_connector_list_iter_end(struct drm_connector_list_iter *iter); + +bool drm_connector_has_possible_encoder(struct drm_connector *connector, + struct drm_encoder *encoder); + +/** + * drm_for_each_connector_iter - connector_list iterator macro + * @connector: &struct drm_connector pointer used as cursor + * @iter: &struct drm_connector_list_iter + * + * Note that @connector is only valid within the list body, if you want to use + * @connector after calling drm_connector_list_iter_end() then you need to grab + * your own reference first using drm_connector_get(). + */ +#define drm_for_each_connector_iter(connector, iter) \ + while ((connector = drm_connector_list_iter_next(iter))) + +/** + * drm_connector_for_each_possible_encoder - iterate connector's possible encoders + * @connector: &struct drm_connector pointer + * @encoder: &struct drm_encoder pointer used as cursor + * @__i: int iteration cursor, for macro-internal use + */ +#define drm_connector_for_each_possible_encoder(connector, encoder, __i) \ + for ((__i) = 0; (__i) < ARRAY_SIZE((connector)->encoder_ids) && \ + (connector)->encoder_ids[(__i)] != 0; (__i)++) \ + for_each_if((encoder) = \ + drm_encoder_find((connector)->dev, NULL, \ + (connector)->encoder_ids[(__i)])) \ + +#endif diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h new file mode 100644 index 000000000..92e7fc7f0 --- /dev/null +++ b/include/drm/drm_crtc.h @@ -0,0 +1,1141 @@ +/* + * Copyright © 2006 Keith Packard + * Copyright © 2007-2008 Dave Airlie + * Copyright © 2007-2008 Intel Corporation + * Jesse Barnes + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef __DRM_CRTC_H__ +#define __DRM_CRTC_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct drm_device; +struct drm_mode_set; +struct drm_file; +struct drm_clip_rect; +struct drm_printer; +struct device_node; +struct dma_fence; +struct edid; + +static inline int64_t U642I64(uint64_t val) +{ + return (int64_t)*((int64_t *)&val); +} +static inline uint64_t I642U64(int64_t val) +{ + return (uint64_t)*((uint64_t *)&val); +} + +struct drm_crtc; +struct drm_pending_vblank_event; +struct drm_plane; +struct drm_bridge; +struct drm_atomic_state; + +struct drm_crtc_helper_funcs; +struct drm_plane_helper_funcs; + +/** + * struct drm_crtc_state - mutable CRTC state + * + * Note that the distinction between @enable and @active is rather subtile: + * Flipping @active while @enable is set without changing anything else may + * never return in a failure from the &drm_mode_config_funcs.atomic_check + * callback. Userspace assumes that a DPMS On will always succeed. In other + * words: @enable controls resource assignment, @active controls the actual + * hardware state. + * + * The three booleans active_changed, connectors_changed and mode_changed are + * intended to indicate whether a full modeset is needed, rather than strictly + * describing what has changed in a commit. See also: + * drm_atomic_crtc_needs_modeset() + * + * WARNING: Transitional helpers (like drm_helper_crtc_mode_set() or + * drm_helper_crtc_mode_set_base()) do not maintain many of the derived control + * state like @plane_mask so drivers not converted over to atomic helpers should + * not rely on these being accurate! + */ +struct drm_crtc_state { + /** @crtc: backpointer to the CRTC */ + struct drm_crtc *crtc; + + /** + * @enable: Whether the CRTC should be enabled, gates all other state. + * This controls reservations of shared resources. Actual hardware state + * is controlled by @active. + */ + bool enable; + + /** + * @active: Whether the CRTC is actively displaying (used for DPMS). + * Implies that @enable is set. The driver must not release any shared + * resources if @active is set to false but @enable still true, because + * userspace expects that a DPMS ON always succeeds. + * + * Hence drivers must not consult @active in their various + * &drm_mode_config_funcs.atomic_check callback to reject an atomic + * commit. They can consult it to aid in the computation of derived + * hardware state, since even in the DPMS OFF state the display hardware + * should be as much powered down as when the CRTC is completely + * disabled through setting @enable to false. + */ + bool active; + + /** + * @planes_changed: Planes on this crtc are updated. Used by the atomic + * helpers and drivers to steer the atomic commit control flow. + */ + bool planes_changed : 1; + + /** + * @mode_changed: @mode or @enable has been changed. Used by the atomic + * helpers and drivers to steer the atomic commit control flow. See also + * drm_atomic_crtc_needs_modeset(). + * + * Drivers are supposed to set this for any CRTC state changes that + * require a full modeset. They can also reset it to false if e.g. a + * @mode change can be done without a full modeset by only changing + * scaler settings. + */ + bool mode_changed : 1; + + /** + * @active_changed: @active has been toggled. Used by the atomic + * helpers and drivers to steer the atomic commit control flow. See also + * drm_atomic_crtc_needs_modeset(). + */ + bool active_changed : 1; + + /** + * @connectors_changed: Connectors to this crtc have been updated, + * either in their state or routing. Used by the atomic + * helpers and drivers to steer the atomic commit control flow. See also + * drm_atomic_crtc_needs_modeset(). + * + * Drivers are supposed to set this as-needed from their own atomic + * check code, e.g. from &drm_encoder_helper_funcs.atomic_check + */ + bool connectors_changed : 1; + /** + * @zpos_changed: zpos values of planes on this crtc have been updated. + * Used by the atomic helpers and drivers to steer the atomic commit + * control flow. + */ + bool zpos_changed : 1; + /** + * @color_mgmt_changed: Color management properties have changed + * (@gamma_lut, @degamma_lut or @ctm). Used by the atomic helpers and + * drivers to steer the atomic commit control flow. + */ + bool color_mgmt_changed : 1; + + /** + * @no_vblank: + * + * Reflects the ability of a CRTC to send VBLANK events. This state + * usually depends on the pipeline configuration, and the main usuage + * is CRTCs feeding a writeback connector operating in oneshot mode. + * In this case the VBLANK event is only generated when a job is queued + * to the writeback connector, and we want the core to fake VBLANK + * events when this part of the pipeline hasn't changed but others had + * or when the CRTC and connectors are being disabled. + * + * __drm_atomic_helper_crtc_duplicate_state() will not reset the value + * from the current state, the CRTC driver is then responsible for + * updating this field when needed. + * + * Note that the combination of &drm_crtc_state.event == NULL and + * &drm_crtc_state.no_blank == true is valid and usually used when the + * writeback connector attached to the CRTC has a new job queued. In + * this case the driver will send the VBLANK event on its own when the + * writeback job is complete. + */ + bool no_vblank : 1; + + /** + * @plane_mask: Bitmask of drm_plane_mask(plane) of planes attached to + * this CRTC. + */ + u32 plane_mask; + + /** + * @connector_mask: Bitmask of drm_connector_mask(connector) of + * connectors attached to this CRTC. + */ + u32 connector_mask; + + /** + * @encoder_mask: Bitmask of drm_encoder_mask(encoder) of encoders + * attached to this CRTC. + */ + u32 encoder_mask; + + /** + * @adjusted_mode: + * + * Internal display timings which can be used by the driver to handle + * differences between the mode requested by userspace in @mode and what + * is actually programmed into the hardware. + * + * For drivers using &drm_bridge, this stores hardware display timings + * used between the CRTC and the first bridge. For other drivers, the + * meaning of the adjusted_mode field is purely driver implementation + * defined information, and will usually be used to store the hardware + * display timings used between the CRTC and encoder blocks. + */ + struct drm_display_mode adjusted_mode; + + /** + * @mode: + * + * Display timings requested by userspace. The driver should try to + * match the refresh rate as close as possible (but note that it's + * undefined what exactly is close enough, e.g. some of the HDMI modes + * only differ in less than 1% of the refresh rate). The active width + * and height as observed by userspace for positioning planes must match + * exactly. + * + * For external connectors where the sink isn't fixed (like with a + * built-in panel), this mode here should match the physical mode on the + * wire to the last details (i.e. including sync polarities and + * everything). + */ + struct drm_display_mode mode; + + /** + * @mode_blob: &drm_property_blob for @mode, for exposing the mode to + * atomic userspace. + */ + struct drm_property_blob *mode_blob; + + /** + * @degamma_lut: + * + * Lookup table for converting framebuffer pixel data before apply the + * color conversion matrix @ctm. See drm_crtc_enable_color_mgmt(). The + * blob (if not NULL) is an array of &struct drm_color_lut. + */ + struct drm_property_blob *degamma_lut; + + /** + * @ctm: + * + * Color transformation matrix. See drm_crtc_enable_color_mgmt(). The + * blob (if not NULL) is a &struct drm_color_ctm. + */ + struct drm_property_blob *ctm; + + /** + * @gamma_lut: + * + * Lookup table for converting pixel data after the color conversion + * matrix @ctm. See drm_crtc_enable_color_mgmt(). The blob (if not + * NULL) is an array of &struct drm_color_lut. + */ + struct drm_property_blob *gamma_lut; + + /** + * @target_vblank: + * + * Target vertical blank period when a page flip + * should take effect. + */ + u32 target_vblank; + + /** + * @pageflip_flags: + * + * DRM_MODE_PAGE_FLIP_* flags, as passed to the page flip ioctl. + * Zero in any other case. + */ + u32 pageflip_flags; + + /** + * @event: + * + * Optional pointer to a DRM event to signal upon completion of the + * state update. The driver must send out the event when the atomic + * commit operation completes. There are two cases: + * + * - The event is for a CRTC which is being disabled through this + * atomic commit. In that case the event can be send out any time + * after the hardware has stopped scanning out the current + * framebuffers. It should contain the timestamp and counter for the + * last vblank before the display pipeline was shut off. The simplest + * way to achieve that is calling drm_crtc_send_vblank_event() + * somewhen after drm_crtc_vblank_off() has been called. + * + * - For a CRTC which is enabled at the end of the commit (even when it + * undergoes an full modeset) the vblank timestamp and counter must + * be for the vblank right before the first frame that scans out the + * new set of buffers. Again the event can only be sent out after the + * hardware has stopped scanning out the old buffers. + * + * - Events for disabled CRTCs are not allowed, and drivers can ignore + * that case. + * + * This can be handled by the drm_crtc_send_vblank_event() function, + * which the driver should call on the provided event upon completion of + * the atomic commit. Note that if the driver supports vblank signalling + * and timestamping the vblank counters and timestamps must agree with + * the ones returned from page flip events. With the current vblank + * helper infrastructure this can be achieved by holding a vblank + * reference while the page flip is pending, acquired through + * drm_crtc_vblank_get() and released with drm_crtc_vblank_put(). + * Drivers are free to implement their own vblank counter and timestamp + * tracking though, e.g. if they have accurate timestamp registers in + * hardware. + * + * For hardware which supports some means to synchronize vblank + * interrupt delivery with committing display state there's also + * drm_crtc_arm_vblank_event(). See the documentation of that function + * for a detailed discussion of the constraints it needs to be used + * safely. + * + * If the device can't notify of flip completion in a race-free way + * at all, then the event should be armed just after the page flip is + * committed. In the worst case the driver will send the event to + * userspace one frame too late. This doesn't allow for a real atomic + * update, but it should avoid tearing. + */ + struct drm_pending_vblank_event *event; + + /** + * @commit: + * + * This tracks how the commit for this update proceeds through the + * various phases. This is never cleared, except when we destroy the + * state, so that subsequent commits can synchronize with previous ones. + */ + struct drm_crtc_commit *commit; + + /** @state: backpointer to global drm_atomic_state */ + struct drm_atomic_state *state; +}; + +/** + * struct drm_crtc_funcs - control CRTCs for a given device + * + * The drm_crtc_funcs structure is the central CRTC management structure + * in the DRM. Each CRTC controls one or more connectors (note that the name + * CRTC is simply historical, a CRTC may control LVDS, VGA, DVI, TV out, etc. + * connectors, not just CRTs). + * + * Each driver is responsible for filling out this structure at startup time, + * in addition to providing other modesetting features, like i2c and DDC + * bus accessors. + */ +struct drm_crtc_funcs { + /** + * @reset: + * + * Reset CRTC hardware and software state to off. This function isn't + * called by the core directly, only through drm_mode_config_reset(). + * It's not a helper hook only for historical reasons. + * + * Atomic drivers can use drm_atomic_helper_crtc_reset() to reset + * atomic state using this hook. + */ + void (*reset)(struct drm_crtc *crtc); + + /** + * @cursor_set: + * + * Update the cursor image. The cursor position is relative to the CRTC + * and can be partially or fully outside of the visible area. + * + * Note that contrary to all other KMS functions the legacy cursor entry + * points don't take a framebuffer object, but instead take directly a + * raw buffer object id from the driver's buffer manager (which is + * either GEM or TTM for current drivers). + * + * This entry point is deprecated, drivers should instead implement + * universal plane support and register a proper cursor plane using + * drm_crtc_init_with_planes(). + * + * This callback is optional + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv, + uint32_t handle, uint32_t width, uint32_t height); + + /** + * @cursor_set2: + * + * Update the cursor image, including hotspot information. The hotspot + * must not affect the cursor position in CRTC coordinates, but is only + * meant as a hint for virtualized display hardware to coordinate the + * guests and hosts cursor position. The cursor hotspot is relative to + * the cursor image. Otherwise this works exactly like @cursor_set. + * + * This entry point is deprecated, drivers should instead implement + * universal plane support and register a proper cursor plane using + * drm_crtc_init_with_planes(). + * + * This callback is optional. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*cursor_set2)(struct drm_crtc *crtc, struct drm_file *file_priv, + uint32_t handle, uint32_t width, uint32_t height, + int32_t hot_x, int32_t hot_y); + + /** + * @cursor_move: + * + * Update the cursor position. The cursor does not need to be visible + * when this hook is called. + * + * This entry point is deprecated, drivers should instead implement + * universal plane support and register a proper cursor plane using + * drm_crtc_init_with_planes(). + * + * This callback is optional. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*cursor_move)(struct drm_crtc *crtc, int x, int y); + + /** + * @gamma_set: + * + * Set gamma on the CRTC. + * + * This callback is optional. + * + * Atomic drivers who want to support gamma tables should implement the + * atomic color management support, enabled by calling + * drm_crtc_enable_color_mgmt(), which then supports the legacy gamma + * interface through the drm_atomic_helper_legacy_gamma_set() + * compatibility implementation. + */ + int (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, + uint32_t size, + struct drm_modeset_acquire_ctx *ctx); + + /** + * @destroy: + * + * Clean up plane resources. This is only called at driver unload time + * through drm_mode_config_cleanup() since a CRTC cannot be hotplugged + * in DRM. + */ + void (*destroy)(struct drm_crtc *crtc); + + /** + * @set_config: + * + * This is the main legacy entry point to change the modeset state on a + * CRTC. All the details of the desired configuration are passed in a + * &struct drm_mode_set - see there for details. + * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_set_config() to implement this hook. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*set_config)(struct drm_mode_set *set, + struct drm_modeset_acquire_ctx *ctx); + + /** + * @page_flip: + * + * Legacy entry point to schedule a flip to the given framebuffer. + * + * Page flipping is a synchronization mechanism that replaces the frame + * buffer being scanned out by the CRTC with a new frame buffer during + * vertical blanking, avoiding tearing (except when requested otherwise + * through the DRM_MODE_PAGE_FLIP_ASYNC flag). When an application + * requests a page flip the DRM core verifies that the new frame buffer + * is large enough to be scanned out by the CRTC in the currently + * configured mode and then calls this hook with a pointer to the new + * frame buffer. + * + * The driver must wait for any pending rendering to the new framebuffer + * to complete before executing the flip. It should also wait for any + * pending rendering from other drivers if the underlying buffer is a + * shared dma-buf. + * + * An application can request to be notified when the page flip has + * completed. The drm core will supply a &struct drm_event in the event + * parameter in this case. This can be handled by the + * drm_crtc_send_vblank_event() function, which the driver should call on + * the provided event upon completion of the flip. Note that if + * the driver supports vblank signalling and timestamping the vblank + * counters and timestamps must agree with the ones returned from page + * flip events. With the current vblank helper infrastructure this can + * be achieved by holding a vblank reference while the page flip is + * pending, acquired through drm_crtc_vblank_get() and released with + * drm_crtc_vblank_put(). Drivers are free to implement their own vblank + * counter and timestamp tracking though, e.g. if they have accurate + * timestamp registers in hardware. + * + * This callback is optional. + * + * NOTE: + * + * Very early versions of the KMS ABI mandated that the driver must + * block (but not reject) any rendering to the old framebuffer until the + * flip operation has completed and the old framebuffer is no longer + * visible. This requirement has been lifted, and userspace is instead + * expected to request delivery of an event and wait with recycling old + * buffers until such has been received. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. Note that if a + * page flip operation is already pending the callback should return + * -EBUSY. Pageflips on a disabled CRTC (either by setting a NULL mode + * or just runtime disabled through DPMS respectively the new atomic + * "ACTIVE" state) should result in an -EINVAL error code. Note that + * drm_atomic_helper_page_flip() checks this already for atomic drivers. + */ + int (*page_flip)(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags, + struct drm_modeset_acquire_ctx *ctx); + + /** + * @page_flip_target: + * + * Same as @page_flip but with an additional parameter specifying the + * absolute target vertical blank period (as reported by + * drm_crtc_vblank_count()) when the flip should take effect. + * + * Note that the core code calls drm_crtc_vblank_get before this entry + * point, and will call drm_crtc_vblank_put if this entry point returns + * any non-0 error code. It's the driver's responsibility to call + * drm_crtc_vblank_put after this entry point returns 0, typically when + * the flip completes. + */ + int (*page_flip_target)(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags, uint32_t target, + struct drm_modeset_acquire_ctx *ctx); + + /** + * @set_property: + * + * This is the legacy entry point to update a property attached to the + * CRTC. + * + * This callback is optional if the driver does not support any legacy + * driver-private properties. For atomic drivers it is not used because + * property handling is done entirely in the DRM core. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*set_property)(struct drm_crtc *crtc, + struct drm_property *property, uint64_t val); + + /** + * @atomic_duplicate_state: + * + * Duplicate the current atomic state for this CRTC and return it. + * The core and helpers guarantee that any atomic state duplicated with + * this hook and still owned by the caller (i.e. not transferred to the + * driver by calling &drm_mode_config_funcs.atomic_commit) will be + * cleaned up by calling the @atomic_destroy_state hook in this + * structure. + * + * This callback is mandatory for atomic drivers. + * + * Atomic drivers which don't subclass &struct drm_crtc_state should use + * drm_atomic_helper_crtc_duplicate_state(). Drivers that subclass the + * state structure to extend it with driver-private state should use + * __drm_atomic_helper_crtc_duplicate_state() to make sure shared state is + * duplicated in a consistent fashion across drivers. + * + * It is an error to call this hook before &drm_crtc.state has been + * initialized correctly. + * + * NOTE: + * + * If the duplicate state references refcounted resources this hook must + * acquire a reference for each of them. The driver must release these + * references again in @atomic_destroy_state. + * + * RETURNS: + * + * Duplicated atomic state or NULL when the allocation failed. + */ + struct drm_crtc_state *(*atomic_duplicate_state)(struct drm_crtc *crtc); + + /** + * @atomic_destroy_state: + * + * Destroy a state duplicated with @atomic_duplicate_state and release + * or unreference all resources it references + * + * This callback is mandatory for atomic drivers. + */ + void (*atomic_destroy_state)(struct drm_crtc *crtc, + struct drm_crtc_state *state); + + /** + * @atomic_set_property: + * + * Decode a driver-private property value and store the decoded value + * into the passed-in state structure. Since the atomic core decodes all + * standardized properties (even for extensions beyond the core set of + * properties which might not be implemented by all drivers) this + * requires drivers to subclass the state structure. + * + * Such driver-private properties should really only be implemented for + * truly hardware/vendor specific state. Instead it is preferred to + * standardize atomic extension and decode the properties used to expose + * such an extension in the core. + * + * Do not call this function directly, use + * drm_atomic_crtc_set_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * NOTE: + * + * This function is called in the state assembly phase of atomic + * modesets, which can be aborted for any reason (including on + * userspace's request to just check whether a configuration would be + * possible). Drivers MUST NOT touch any persistent state (hardware or + * software) or data structures except the passed in @state parameter. + * + * Also since userspace controls in which order properties are set this + * function must not do any input validation (since the state update is + * incomplete and hence likely inconsistent). Instead any such input + * validation must be done in the various atomic_check callbacks. + * + * RETURNS: + * + * 0 if the property has been found, -EINVAL if the property isn't + * implemented by the driver (which should never happen, the core only + * asks for properties attached to this CRTC). No other validation is + * allowed by the driver. The core already checks that the property + * value is within the range (integer, valid enum value, ...) the driver + * set when registering the property. + */ + int (*atomic_set_property)(struct drm_crtc *crtc, + struct drm_crtc_state *state, + struct drm_property *property, + uint64_t val); + /** + * @atomic_get_property: + * + * Reads out the decoded driver-private property. This is used to + * implement the GETCRTC IOCTL. + * + * Do not call this function directly, use + * drm_atomic_crtc_get_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * RETURNS: + * + * 0 on success, -EINVAL if the property isn't implemented by the + * driver (which should never happen, the core only asks for + * properties attached to this CRTC). + */ + int (*atomic_get_property)(struct drm_crtc *crtc, + const struct drm_crtc_state *state, + struct drm_property *property, + uint64_t *val); + + /** + * @late_register: + * + * This optional hook can be used to register additional userspace + * interfaces attached to the crtc like debugfs interfaces. + * It is called late in the driver load sequence from drm_dev_register(). + * Everything added from this callback should be unregistered in + * the early_unregister callback. + * + * Returns: + * + * 0 on success, or a negative error code on failure. + */ + int (*late_register)(struct drm_crtc *crtc); + + /** + * @early_unregister: + * + * This optional hook should be used to unregister the additional + * userspace interfaces attached to the crtc from + * @late_register. It is called from drm_dev_unregister(), + * early in the driver unload sequence to disable userspace access + * before data structures are torndown. + */ + void (*early_unregister)(struct drm_crtc *crtc); + + /** + * @set_crc_source: + * + * Changes the source of CRC checksums of frames at the request of + * userspace, typically for testing purposes. The sources available are + * specific of each driver and a %NULL value indicates that CRC + * generation is to be switched off. + * + * When CRC generation is enabled, the driver should call + * drm_crtc_add_crc_entry() at each frame, providing any information + * that characterizes the frame contents in the crcN arguments, as + * provided from the configured source. Drivers must accept an "auto" + * source name that will select a default source for this CRTC. + * + * Note that "auto" can depend upon the current modeset configuration, + * e.g. it could pick an encoder or output specific CRC sampling point. + * + * This callback is optional if the driver does not support any CRC + * generation functionality. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*set_crc_source)(struct drm_crtc *crtc, const char *source, + size_t *values_cnt); + + /** + * @atomic_print_state: + * + * If driver subclasses &struct drm_crtc_state, it should implement + * this optional hook for printing additional driver specific state. + * + * Do not call this directly, use drm_atomic_crtc_print_state() + * instead. + */ + void (*atomic_print_state)(struct drm_printer *p, + const struct drm_crtc_state *state); + + /** + * @get_vblank_counter: + * + * Driver callback for fetching a raw hardware vblank counter for the + * CRTC. It's meant to be used by new drivers as the replacement of + * &drm_driver.get_vblank_counter hook. + * + * This callback is optional. If a device doesn't have a hardware + * counter, the driver can simply leave the hook as NULL. The DRM core + * will account for missed vblank events while interrupts where disabled + * based on system timestamps. + * + * Wraparound handling and loss of events due to modesetting is dealt + * with in the DRM core code, as long as drivers call + * drm_crtc_vblank_off() and drm_crtc_vblank_on() when disabling or + * enabling a CRTC. + * + * See also &drm_device.vblank_disable_immediate and + * &drm_device.max_vblank_count. + * + * Returns: + * + * Raw vblank counter value. + */ + u32 (*get_vblank_counter)(struct drm_crtc *crtc); + + /** + * @enable_vblank: + * + * Enable vblank interrupts for the CRTC. It's meant to be used by + * new drivers as the replacement of &drm_driver.enable_vblank hook. + * + * Returns: + * + * Zero on success, appropriate errno if the vblank interrupt cannot + * be enabled. + */ + int (*enable_vblank)(struct drm_crtc *crtc); + + /** + * @disable_vblank: + * + * Disable vblank interrupts for the CRTC. It's meant to be used by + * new drivers as the replacement of &drm_driver.disable_vblank hook. + */ + void (*disable_vblank)(struct drm_crtc *crtc); +}; + +/** + * struct drm_crtc - central CRTC control structure + * + * Each CRTC may have one or more connectors associated with it. This structure + * allows the CRTC to be controlled. + */ +struct drm_crtc { + /** @dev: parent DRM device */ + struct drm_device *dev; + /** @port: OF node used by drm_of_find_possible_crtcs(). */ + struct device_node *port; + /** + * @head: + * + * List of all CRTCs on @dev, linked from &drm_mode_config.crtc_list. + * Invariant over the lifetime of @dev and therefore does not need + * locking. + */ + struct list_head head; + + /** @name: human readable name, can be overwritten by the driver */ + char *name; + + /** + * @mutex: + * + * This provides a read lock for the overall CRTC state (mode, dpms + * state, ...) and a write lock for everything which can be update + * without a full modeset (fb, cursor data, CRTC properties ...). A full + * modeset also need to grab &drm_mode_config.connection_mutex. + * + * For atomic drivers specifically this protects @state. + */ + struct drm_modeset_lock mutex; + + /** @base: base KMS object for ID tracking etc. */ + struct drm_mode_object base; + + /** + * @primary: + * Primary plane for this CRTC. Note that this is only + * relevant for legacy IOCTL, it specifies the plane implicitly used by + * the SETCRTC and PAGE_FLIP IOCTLs. It does not have any significance + * beyond that. + */ + struct drm_plane *primary; + + /** + * @cursor: + * Cursor plane for this CRTC. Note that this is only relevant for + * legacy IOCTL, it specifies the plane implicitly used by the SETCURSOR + * and SETCURSOR2 IOCTLs. It does not have any significance + * beyond that. + */ + struct drm_plane *cursor; + + /** + * @index: Position inside the mode_config.list, can be used as an array + * index. It is invariant over the lifetime of the CRTC. + */ + unsigned index; + + /** + * @cursor_x: Current x position of the cursor, used for universal + * cursor planes because the SETCURSOR IOCTL only can update the + * framebuffer without supplying the coordinates. Drivers should not use + * this directly, atomic drivers should look at &drm_plane_state.crtc_x + * of the cursor plane instead. + */ + int cursor_x; + /** + * @cursor_y: Current y position of the cursor, used for universal + * cursor planes because the SETCURSOR IOCTL only can update the + * framebuffer without supplying the coordinates. Drivers should not use + * this directly, atomic drivers should look at &drm_plane_state.crtc_y + * of the cursor plane instead. + */ + int cursor_y; + + /** + * @enabled: + * + * Is this CRTC enabled? Should only be used by legacy drivers, atomic + * drivers should instead consult &drm_crtc_state.enable and + * &drm_crtc_state.active. Atomic drivers can update this by calling + * drm_atomic_helper_update_legacy_modeset_state(). + */ + bool enabled; + + /** + * @mode: + * + * Current mode timings. Should only be used by legacy drivers, atomic + * drivers should instead consult &drm_crtc_state.mode. Atomic drivers + * can update this by calling + * drm_atomic_helper_update_legacy_modeset_state(). + */ + struct drm_display_mode mode; + + /** + * @hwmode: + * + * Programmed mode in hw, after adjustments for encoders, crtc, panel + * scaling etc. Should only be used by legacy drivers, for high + * precision vblank timestamps in + * drm_calc_vbltimestamp_from_scanoutpos(). + * + * Note that atomic drivers should not use this, but instead use + * &drm_crtc_state.adjusted_mode. And for high-precision timestamps + * drm_calc_vbltimestamp_from_scanoutpos() used &drm_vblank_crtc.hwmode, + * which is filled out by calling drm_calc_timestamping_constants(). + */ + struct drm_display_mode hwmode; + + /** + * @x: + * x position on screen. Should only be used by legacy drivers, atomic + * drivers should look at &drm_plane_state.crtc_x of the primary plane + * instead. Updated by calling + * drm_atomic_helper_update_legacy_modeset_state(). + */ + int x; + /** + * @y: + * y position on screen. Should only be used by legacy drivers, atomic + * drivers should look at &drm_plane_state.crtc_y of the primary plane + * instead. Updated by calling + * drm_atomic_helper_update_legacy_modeset_state(). + */ + int y; + + /** @funcs: CRTC control functions */ + const struct drm_crtc_funcs *funcs; + + /** + * @gamma_size: Size of legacy gamma ramp reported to userspace. Set up + * by calling drm_mode_crtc_set_gamma_size(). + */ + uint32_t gamma_size; + + /** + * @gamma_store: Gamma ramp values used by the legacy SETGAMMA and + * GETGAMMA IOCTls. Set up by calling drm_mode_crtc_set_gamma_size(). + */ + uint16_t *gamma_store; + + /** @helper_private: mid-layer private data */ + const struct drm_crtc_helper_funcs *helper_private; + + /** @properties: property tracking for this CRTC */ + struct drm_object_properties properties; + + /** + * @state: + * + * Current atomic state for this CRTC. + * + * This is protected by @mutex. Note that nonblocking atomic commits + * access the current CRTC state without taking locks. Either by going + * through the &struct drm_atomic_state pointers, see + * for_each_oldnew_crtc_in_state(), for_each_old_crtc_in_state() and + * for_each_new_crtc_in_state(). Or through careful ordering of atomic + * commit operations as implemented in the atomic helpers, see + * &struct drm_crtc_commit. + */ + struct drm_crtc_state *state; + + /** + * @commit_list: + * + * List of &drm_crtc_commit structures tracking pending commits. + * Protected by @commit_lock. This list holds its own full reference, + * as does the ongoing commit. + * + * "Note that the commit for a state change is also tracked in + * &drm_crtc_state.commit. For accessing the immediately preceding + * commit in an atomic update it is recommended to just use that + * pointer in the old CRTC state, since accessing that doesn't need + * any locking or list-walking. @commit_list should only be used to + * stall for framebuffer cleanup that's signalled through + * &drm_crtc_commit.cleanup_done." + */ + struct list_head commit_list; + + /** + * @commit_lock: + * + * Spinlock to protect @commit_list. + */ + spinlock_t commit_lock; + +#ifdef CONFIG_DEBUG_FS + /** + * @debugfs_entry: + * + * Debugfs directory for this CRTC. + */ + struct dentry *debugfs_entry; +#endif + + /** + * @crc: + * + * Configuration settings of CRC capture. + */ + struct drm_crtc_crc crc; + + /** + * @fence_context: + * + * timeline context used for fence operations. + */ + unsigned int fence_context; + + /** + * @fence_lock: + * + * spinlock to protect the fences in the fence_context. + */ + spinlock_t fence_lock; + /** + * @fence_seqno: + * + * Seqno variable used as monotonic counter for the fences + * created on the CRTC's timeline. + */ + unsigned long fence_seqno; + + /** + * @timeline_name: + * + * The name of the CRTC's fence timeline. + */ + char timeline_name[32]; +}; + +/** + * struct drm_mode_set - new values for a CRTC config change + * @fb: framebuffer to use for new config + * @crtc: CRTC whose configuration we're about to change + * @mode: mode timings to use + * @x: position of this CRTC relative to @fb + * @y: position of this CRTC relative to @fb + * @connectors: array of connectors to drive with this CRTC if possible + * @num_connectors: size of @connectors array + * + * This represents a modeset configuration for the legacy SETCRTC ioctl and is + * also used internally. Atomic drivers instead use &drm_atomic_state. + */ +struct drm_mode_set { + struct drm_framebuffer *fb; + struct drm_crtc *crtc; + struct drm_display_mode *mode; + + uint32_t x; + uint32_t y; + + struct drm_connector **connectors; + size_t num_connectors; +}; + +#define obj_to_crtc(x) container_of(x, struct drm_crtc, base) + +__printf(6, 7) +int drm_crtc_init_with_planes(struct drm_device *dev, + struct drm_crtc *crtc, + struct drm_plane *primary, + struct drm_plane *cursor, + const struct drm_crtc_funcs *funcs, + const char *name, ...); +void drm_crtc_cleanup(struct drm_crtc *crtc); + +/** + * drm_crtc_index - find the index of a registered CRTC + * @crtc: CRTC to find index for + * + * Given a registered CRTC, return the index of that CRTC within a DRM + * device's list of CRTCs. + */ +static inline unsigned int drm_crtc_index(const struct drm_crtc *crtc) +{ + return crtc->index; +} + +/** + * drm_crtc_mask - find the mask of a registered CRTC + * @crtc: CRTC to find mask for + * + * Given a registered CRTC, return the mask bit of that CRTC for the + * &drm_encoder.possible_crtcs and &drm_plane.possible_crtcs fields. + */ +static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc) +{ + return 1 << drm_crtc_index(crtc); +} + +int drm_crtc_force_disable(struct drm_crtc *crtc); +int drm_crtc_force_disable_all(struct drm_device *dev); + +int drm_mode_set_config_internal(struct drm_mode_set *set); +struct drm_crtc *drm_crtc_from_index(struct drm_device *dev, int idx); + +/** + * drm_crtc_find - look up a CRTC object from its ID + * @dev: DRM device + * @file_priv: drm file to check for lease against. + * @id: &drm_mode_object ID + * + * This can be used to look up a CRTC from its userspace ID. Only used by + * drivers for legacy IOCTLs and interface, nowadays extensions to the KMS + * userspace interface should be done using &drm_property. + */ +static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t id) +{ + struct drm_mode_object *mo; + mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_CRTC); + return mo ? obj_to_crtc(mo) : NULL; +} + +/** + * drm_for_each_crtc - iterate over all CRTCs + * @crtc: a &struct drm_crtc as the loop cursor + * @dev: the &struct drm_device + * + * Iterate over all CRTCs of @dev. + */ +#define drm_for_each_crtc(crtc, dev) \ + list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head) + +#endif /* __DRM_CRTC_H__ */ diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h new file mode 100644 index 000000000..691463303 --- /dev/null +++ b/include/drm/drm_crtc_helper.h @@ -0,0 +1,82 @@ +/* + * Copyright © 2006 Keith Packard + * Copyright © 2007-2008 Dave Airlie + * Copyright © 2007-2008 Intel Corporation + * Jesse Barnes + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * The DRM mode setting helper functions are common code for drivers to use if + * they wish. Drivers are not forced to use this code in their + * implementations but it would be useful if they code they do use at least + * provides a consistent interface and operation to userspace + */ + +#ifndef __DRM_CRTC_HELPER_H__ +#define __DRM_CRTC_HELPER_H__ + +#include +#include +#include + +#include + +#include +#include +#include + +void drm_helper_disable_unused_functions(struct drm_device *dev); +int drm_crtc_helper_set_config(struct drm_mode_set *set, + struct drm_modeset_acquire_ctx *ctx); +bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, + struct drm_display_mode *mode, + int x, int y, + struct drm_framebuffer *old_fb); +bool drm_helper_crtc_in_use(struct drm_crtc *crtc); +bool drm_helper_encoder_in_use(struct drm_encoder *encoder); + +int drm_helper_connector_dpms(struct drm_connector *connector, int mode); + +void drm_helper_resume_force_mode(struct drm_device *dev); + +int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, int x, int y, + struct drm_framebuffer *old_fb); +int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb); + +/* drm_probe_helper.c */ +int drm_helper_probe_single_connector_modes(struct drm_connector + *connector, uint32_t maxX, + uint32_t maxY); +int drm_helper_probe_detect(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + bool force); +void drm_kms_helper_poll_init(struct drm_device *dev); +void drm_kms_helper_poll_fini(struct drm_device *dev); +bool drm_helper_hpd_irq_event(struct drm_device *dev); +void drm_kms_helper_hotplug_event(struct drm_device *dev); + +void drm_kms_helper_poll_disable(struct drm_device *dev); +void drm_kms_helper_poll_enable(struct drm_device *dev); +bool drm_kms_helper_is_poll_worker(void); + +#endif diff --git a/include/drm/drm_debugfs.h b/include/drm/drm_debugfs.h new file mode 100644 index 000000000..ac0f75df1 --- /dev/null +++ b/include/drm/drm_debugfs.h @@ -0,0 +1,101 @@ +/* + * Internal Header for the Direct Rendering Manager + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright (c) 2009-2010, Code Aurora Forum. + * All rights reserved. + * + * Author: Rickard E. (Rik) Faith + * Author: Gareth Hughes + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DRM_DEBUGFS_H_ +#define _DRM_DEBUGFS_H_ + +/** + * struct drm_info_list - debugfs info list entry + * + * This structure represents a debugfs file to be created by the drm + * core. + */ +struct drm_info_list { + /** @name: file name */ + const char *name; + /** + * @show: + * + * Show callback. &seq_file->private will be set to the &struct + * drm_info_node corresponding to the instance of this info on a given + * &struct drm_minor. + */ + int (*show)(struct seq_file*, void*); + /** @driver_features: Required driver features for this entry */ + u32 driver_features; + /** @data: Driver-private data, should not be device-specific. */ + void *data; +}; + +/** + * struct drm_info_node - Per-minor debugfs node structure + * + * This structure represents a debugfs file, as an instantiation of a &struct + * drm_info_list on a &struct drm_minor. + * + * FIXME: + * + * No it doesn't make a hole lot of sense that we duplicate debugfs entries for + * both the render and the primary nodes, but that's how this has organically + * grown. It should probably be fixed, with a compatibility link, if needed. + */ +struct drm_info_node { + /** @minor: &struct drm_minor for this node. */ + struct drm_minor *minor; + /** @info_ent: template for this node. */ + const struct drm_info_list *info_ent; + /* private: */ + struct list_head list; + struct dentry *dent; +}; + +#if defined(CONFIG_DEBUG_FS) +int drm_debugfs_create_files(const struct drm_info_list *files, + int count, struct dentry *root, + struct drm_minor *minor); +int drm_debugfs_remove_files(const struct drm_info_list *files, + int count, struct drm_minor *minor); +#else +static inline int drm_debugfs_create_files(const struct drm_info_list *files, + int count, struct dentry *root, + struct drm_minor *minor) +{ + return 0; +} + +static inline int drm_debugfs_remove_files(const struct drm_info_list *files, + int count, struct drm_minor *minor) +{ + return 0; +} +#endif + +#endif /* _DRM_DEBUGFS_H_ */ diff --git a/include/drm/drm_debugfs_crc.h b/include/drm/drm_debugfs_crc.h new file mode 100644 index 000000000..b225eeb30 --- /dev/null +++ b/include/drm/drm_debugfs_crc.h @@ -0,0 +1,74 @@ +/* + * Copyright © 2016 Collabora Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef __DRM_DEBUGFS_CRC_H__ +#define __DRM_DEBUGFS_CRC_H__ + +#define DRM_MAX_CRC_NR 10 + +/** + * struct drm_crtc_crc_entry - entry describing a frame's content + * @has_frame_counter: whether the source was able to provide a frame number + * @frame: number of the frame this CRC is about, if @has_frame_counter is true + * @crc: array of values that characterize the frame + */ +struct drm_crtc_crc_entry { + bool has_frame_counter; + uint32_t frame; + uint32_t crcs[DRM_MAX_CRC_NR]; +}; + +#define DRM_CRC_ENTRIES_NR 128 + +/** + * struct drm_crtc_crc - data supporting CRC capture on a given CRTC + * @lock: protects the fields in this struct + * @source: name of the currently configured source of CRCs + * @opened: whether userspace has opened the data file for reading + * @overflow: whether an overflow occured. + * @entries: array of entries, with size of %DRM_CRC_ENTRIES_NR + * @head: head of circular queue + * @tail: tail of circular queue + * @values_cnt: number of CRC values per entry, up to %DRM_MAX_CRC_NR + * @wq: workqueue used to synchronize reading and writing + */ +struct drm_crtc_crc { + spinlock_t lock; + const char *source; + bool opened, overflow; + struct drm_crtc_crc_entry *entries; + int head, tail; + size_t values_cnt; + wait_queue_head_t wq; +}; + +#if defined(CONFIG_DEBUG_FS) +int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame, + uint32_t frame, uint32_t *crcs); +#else +static inline int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame, + uint32_t frame, uint32_t *crcs) +{ + return -EINVAL; +} +#endif /* defined(CONFIG_DEBUG_FS) */ + +#endif /* __DRM_DEBUGFS_CRC_H__ */ diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h new file mode 100644 index 000000000..fa117e114 --- /dev/null +++ b/include/drm/drm_device.h @@ -0,0 +1,232 @@ +#ifndef _DRM_DEVICE_H_ +#define _DRM_DEVICE_H_ + +#include +#include +#include +#include + +#include +#include + +struct drm_driver; +struct drm_minor; +struct drm_master; +struct drm_device_dma; +struct drm_vblank_crtc; +struct drm_sg_mem; +struct drm_local_map; +struct drm_vma_offset_manager; +struct drm_fb_helper; + +struct inode; + +struct pci_dev; +struct pci_controller; + +/** + * DRM device structure. This structure represent a complete card that + * may contain multiple heads. + */ +struct drm_device { + struct list_head legacy_dev_list;/**< list of devices per driver for stealth attach cleanup */ + int if_version; /**< Highest interface version set */ + + /** \name Lifetime Management */ + /*@{ */ + struct kref ref; /**< Object ref-count */ + struct device *dev; /**< Device structure of bus-device */ + struct drm_driver *driver; /**< DRM driver managing the device */ + void *dev_private; /**< DRM driver private data */ + struct drm_minor *primary; /**< Primary node */ + struct drm_minor *render; /**< Render node */ + bool registered; + + /* currently active master for this device. Protected by master_mutex */ + struct drm_master *master; + + /** + * @unplugged: + * + * Flag to tell if the device has been unplugged. + * See drm_dev_enter() and drm_dev_is_unplugged(). + */ + bool unplugged; + + struct inode *anon_inode; /**< inode for private address-space */ + char *unique; /**< unique name of the device */ + /*@} */ + + /** \name Locks */ + /*@{ */ + struct mutex struct_mutex; /**< For others */ + struct mutex master_mutex; /**< For drm_minor::master and drm_file::is_master */ + /*@} */ + + /** \name Usage Counters */ + /*@{ */ + int open_count; /**< Outstanding files open, protected by drm_global_mutex. */ + spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */ + int buf_use; /**< Buffers in use -- cannot alloc */ + atomic_t buf_alloc; /**< Buffer allocation in progress */ + /*@} */ + + struct mutex filelist_mutex; + struct list_head filelist; + + /** + * @filelist_internal: + * + * List of open DRM files for in-kernel clients. Protected by @filelist_mutex. + */ + struct list_head filelist_internal; + + /** + * @clientlist_mutex: + * + * Protects @clientlist access. + */ + struct mutex clientlist_mutex; + + /** + * @clientlist: + * + * List of in-kernel clients. Protected by @clientlist_mutex. + */ + struct list_head clientlist; + + /** \name Memory management */ + /*@{ */ + struct list_head maplist; /**< Linked list of regions */ + struct drm_open_hash map_hash; /**< User token hash table for maps */ + + /** \name Context handle management */ + /*@{ */ + struct list_head ctxlist; /**< Linked list of context handles */ + struct mutex ctxlist_mutex; /**< For ctxlist */ + + struct idr ctx_idr; + + struct list_head vmalist; /**< List of vmas (for debugging) */ + + /*@} */ + + /** \name DMA support */ + /*@{ */ + struct drm_device_dma *dma; /**< Optional pointer for DMA support */ + /*@} */ + + /** \name Context support */ + /*@{ */ + + __volatile__ long context_flag; /**< Context swapping flag */ + int last_context; /**< Last current context */ + /*@} */ + + /** + * @irq_enabled: + * + * Indicates that interrupt handling is enabled, specifically vblank + * handling. Drivers which don't use drm_irq_install() need to set this + * to true manually. + */ + bool irq_enabled; + int irq; + + /** + * @vblank_disable_immediate: + * + * If true, vblank interrupt will be disabled immediately when the + * refcount drops to zero, as opposed to via the vblank disable + * timer. + * + * This can be set to true it the hardware has a working vblank counter + * with high-precision timestamping (otherwise there are races) and the + * driver uses drm_crtc_vblank_on() and drm_crtc_vblank_off() + * appropriately. See also @max_vblank_count and + * &drm_crtc_funcs.get_vblank_counter. + */ + bool vblank_disable_immediate; + + /** + * @vblank: + * + * Array of vblank tracking structures, one per &struct drm_crtc. For + * historical reasons (vblank support predates kernel modesetting) this + * is free-standing and not part of &struct drm_crtc itself. It must be + * initialized explicitly by calling drm_vblank_init(). + */ + struct drm_vblank_crtc *vblank; + + spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */ + spinlock_t vbl_lock; + + /** + * @max_vblank_count: + * + * Maximum value of the vblank registers. This value +1 will result in a + * wrap-around of the vblank register. It is used by the vblank core to + * handle wrap-arounds. + * + * If set to zero the vblank core will try to guess the elapsed vblanks + * between times when the vblank interrupt is disabled through + * high-precision timestamps. That approach is suffering from small + * races and imprecision over longer time periods, hence exposing a + * hardware vblank counter is always recommended. + * + * This is the statically configured device wide maximum. The driver + * can instead choose to use a runtime configurable per-crtc value + * &drm_vblank_crtc.max_vblank_count, in which case @max_vblank_count + * must be left at zero. See drm_crtc_set_max_vblank_count() on how + * to use the per-crtc value. + * + * If non-zero, &drm_crtc_funcs.get_vblank_counter must be set. + */ + u32 max_vblank_count; /**< size of vblank counter register */ + + /** + * List of events + */ + struct list_head vblank_event_list; + spinlock_t event_lock; + + /*@} */ + + struct drm_agp_head *agp; /**< AGP data */ + + struct pci_dev *pdev; /**< PCI device structure */ +#ifdef __alpha__ + struct pci_controller *hose; +#endif + + struct drm_sg_mem *sg; /**< Scatter gather memory */ + unsigned int num_crtcs; /**< Number of CRTCs on this device */ + + struct { + int context; + struct drm_hw_lock *lock; + } sigdata; + + struct drm_local_map *agp_buffer_map; + unsigned int agp_buffer_token; + + struct drm_mode_config mode_config; /**< Current mode config */ + + /** \name GEM information */ + /*@{ */ + struct mutex object_name_lock; + struct idr object_name_idr; + struct drm_vma_offset_manager *vma_offset_manager; + /*@} */ + int switch_power_state; + + /** + * @fb_helper: + * + * Pointer to the fbdev emulation structure. + * Set by drm_fb_helper_init() and cleared by drm_fb_helper_fini(). + */ + struct drm_fb_helper *fb_helper; +}; + +#endif diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h new file mode 100644 index 000000000..9d3b745c3 --- /dev/null +++ b/include/drm/drm_displayid.h @@ -0,0 +1,103 @@ +/* + * Copyright © 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef DRM_DISPLAYID_H +#define DRM_DISPLAYID_H + +#define DATA_BLOCK_PRODUCT_ID 0x00 +#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01 +#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02 +#define DATA_BLOCK_TYPE_1_DETAILED_TIMING 0x03 +#define DATA_BLOCK_TYPE_2_DETAILED_TIMING 0x04 +#define DATA_BLOCK_TYPE_3_SHORT_TIMING 0x05 +#define DATA_BLOCK_TYPE_4_DMT_TIMING 0x06 +#define DATA_BLOCK_VESA_TIMING 0x07 +#define DATA_BLOCK_CEA_TIMING 0x08 +#define DATA_BLOCK_VIDEO_TIMING_RANGE 0x09 +#define DATA_BLOCK_PRODUCT_SERIAL_NUMBER 0x0a +#define DATA_BLOCK_GP_ASCII_STRING 0x0b +#define DATA_BLOCK_DISPLAY_DEVICE_DATA 0x0c +#define DATA_BLOCK_INTERFACE_POWER_SEQUENCING 0x0d +#define DATA_BLOCK_TRANSFER_CHARACTERISTICS 0x0e +#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f +#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10 +#define DATA_BLOCK_TILED_DISPLAY 0x12 +#define DATA_BLOCK_CTA 0x81 + +#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f + +#define PRODUCT_TYPE_EXTENSION 0 +#define PRODUCT_TYPE_TEST 1 +#define PRODUCT_TYPE_PANEL 2 +#define PRODUCT_TYPE_MONITOR 3 +#define PRODUCT_TYPE_TV 4 +#define PRODUCT_TYPE_REPEATER 5 +#define PRODUCT_TYPE_DIRECT_DRIVE 6 + +struct displayid_hdr { + u8 rev; + u8 bytes; + u8 prod_id; + u8 ext_count; +} __packed; + +struct displayid_block { + u8 tag; + u8 rev; + u8 num_bytes; +} __packed; + +struct displayid_tiled_block { + struct displayid_block base; + u8 tile_cap; + u8 topo[3]; + u8 tile_size[4]; + u8 tile_pixel_bezel[5]; + u8 topology_id[8]; +} __packed; + +struct displayid_detailed_timings_1 { + u8 pixel_clock[3]; + u8 flags; + u8 hactive[2]; + u8 hblank[2]; + u8 hsync[2]; + u8 hsw[2]; + u8 vactive[2]; + u8 vblank[2]; + u8 vsync[2]; + u8 vsw[2]; +} __packed; + +struct displayid_detailed_timing_block { + struct displayid_block base; + struct displayid_detailed_timings_1 timings[0]; +}; + +#define for_each_displayid_db(displayid, block, idx, length) \ + for ((block) = (struct displayid_block *)&(displayid)[idx]; \ + (idx) + sizeof(struct displayid_block) <= (length) && \ + (idx) + sizeof(struct displayid_block) + (block)->num_bytes <= (length) && \ + (block)->num_bytes > 0; \ + (idx) += (block)->num_bytes + sizeof(struct displayid_block), \ + (block) = (struct displayid_block *)&(displayid)[idx]) + +#endif diff --git a/include/drm/drm_dp_dual_mode_helper.h b/include/drm/drm_dp_dual_mode_helper.h new file mode 100644 index 000000000..4c42db81f --- /dev/null +++ b/include/drm/drm_dp_dual_mode_helper.h @@ -0,0 +1,119 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef DRM_DP_DUAL_MODE_HELPER_H +#define DRM_DP_DUAL_MODE_HELPER_H + +#include + +/* + * Optional for type 1 DVI adaptors + * Mandatory for type 1 HDMI and type 2 adaptors + */ +#define DP_DUAL_MODE_HDMI_ID 0x00 /* 00-0f */ +#define DP_DUAL_MODE_HDMI_ID_LEN 16 +/* + * Optional for type 1 adaptors + * Mandatory for type 2 adaptors + */ +#define DP_DUAL_MODE_ADAPTOR_ID 0x10 +#define DP_DUAL_MODE_REV_MASK 0x07 +#define DP_DUAL_MODE_REV_TYPE2 0x00 +#define DP_DUAL_MODE_TYPE_MASK 0xf0 +#define DP_DUAL_MODE_TYPE_TYPE2 0xa0 +/* This field is marked reserved in dual mode spec, used in LSPCON */ +#define DP_DUAL_MODE_TYPE_HAS_DPCD 0x08 +#define DP_DUAL_MODE_IEEE_OUI 0x11 /* 11-13*/ +#define DP_DUAL_IEEE_OUI_LEN 3 +#define DP_DUAL_DEVICE_ID 0x14 /* 14-19 */ +#define DP_DUAL_DEVICE_ID_LEN 6 +#define DP_DUAL_MODE_HARDWARE_REV 0x1a +#define DP_DUAL_MODE_FIRMWARE_MAJOR_REV 0x1b +#define DP_DUAL_MODE_FIRMWARE_MINOR_REV 0x1c +#define DP_DUAL_MODE_MAX_TMDS_CLOCK 0x1d +#define DP_DUAL_MODE_I2C_SPEED_CAP 0x1e +#define DP_DUAL_MODE_TMDS_OEN 0x20 +#define DP_DUAL_MODE_TMDS_DISABLE 0x01 +#define DP_DUAL_MODE_HDMI_PIN_CTRL 0x21 +#define DP_DUAL_MODE_CEC_ENABLE 0x01 +#define DP_DUAL_MODE_I2C_SPEED_CTRL 0x22 + +/* LSPCON specific registers, defined by MCA */ +#define DP_DUAL_MODE_LSPCON_MODE_CHANGE 0x40 +#define DP_DUAL_MODE_LSPCON_CURRENT_MODE 0x41 +#define DP_DUAL_MODE_LSPCON_MODE_PCON 0x1 + +struct i2c_adapter; + +ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter, + u8 offset, void *buffer, size_t size); +ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter, + u8 offset, const void *buffer, size_t size); + +/** + * enum drm_lspcon_mode + * @DRM_LSPCON_MODE_INVALID: No LSPCON. + * @DRM_LSPCON_MODE_LS: Level shifter mode of LSPCON + * which drives DP++ to HDMI 1.4 conversion. + * @DRM_LSPCON_MODE_PCON: Protocol converter mode of LSPCON + * which drives DP++ to HDMI 2.0 active conversion. + */ +enum drm_lspcon_mode { + DRM_LSPCON_MODE_INVALID, + DRM_LSPCON_MODE_LS, + DRM_LSPCON_MODE_PCON, +}; + +/** + * enum drm_dp_dual_mode_type - Type of the DP dual mode adaptor + * @DRM_DP_DUAL_MODE_NONE: No DP dual mode adaptor + * @DRM_DP_DUAL_MODE_UNKNOWN: Could be either none or type 1 DVI adaptor + * @DRM_DP_DUAL_MODE_TYPE1_DVI: Type 1 DVI adaptor + * @DRM_DP_DUAL_MODE_TYPE1_HDMI: Type 1 HDMI adaptor + * @DRM_DP_DUAL_MODE_TYPE2_DVI: Type 2 DVI adaptor + * @DRM_DP_DUAL_MODE_TYPE2_HDMI: Type 2 HDMI adaptor + * @DRM_DP_DUAL_MODE_LSPCON: Level shifter / protocol converter + */ +enum drm_dp_dual_mode_type { + DRM_DP_DUAL_MODE_NONE, + DRM_DP_DUAL_MODE_UNKNOWN, + DRM_DP_DUAL_MODE_TYPE1_DVI, + DRM_DP_DUAL_MODE_TYPE1_HDMI, + DRM_DP_DUAL_MODE_TYPE2_DVI, + DRM_DP_DUAL_MODE_TYPE2_HDMI, + DRM_DP_DUAL_MODE_LSPCON, +}; + +enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter); +int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type, + struct i2c_adapter *adapter); +int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type, + struct i2c_adapter *adapter, bool *enabled); +int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type, + struct i2c_adapter *adapter, bool enable); +const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type); + +int drm_lspcon_get_mode(struct i2c_adapter *adapter, + enum drm_lspcon_mode *current_mode); +int drm_lspcon_set_mode(struct i2c_adapter *adapter, + enum drm_lspcon_mode reqd_mode); +#endif diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h new file mode 100644 index 000000000..05cc31b5d --- /dev/null +++ b/include/drm/drm_dp_helper.h @@ -0,0 +1,1317 @@ +/* + * Copyright © 2008 Keith Packard + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef _DRM_DP_HELPER_H_ +#define _DRM_DP_HELPER_H_ + +#include +#include +#include + +/* + * Unless otherwise noted, all values are from the DP 1.1a spec. Note that + * DP and DPCD versions are independent. Differences from 1.0 are not noted, + * 1.0 devices basically don't exist in the wild. + * + * Abbreviations, in chronological order: + * + * eDP: Embedded DisplayPort version 1 + * DPI: DisplayPort Interoperability Guideline v1.1a + * 1.2: DisplayPort 1.2 + * MST: Multistream Transport - part of DP 1.2a + * + * 1.2 formally includes both eDP and DPI definitions. + */ + +#define DP_AUX_MAX_PAYLOAD_BYTES 16 + +#define DP_AUX_I2C_WRITE 0x0 +#define DP_AUX_I2C_READ 0x1 +#define DP_AUX_I2C_WRITE_STATUS_UPDATE 0x2 +#define DP_AUX_I2C_MOT 0x4 +#define DP_AUX_NATIVE_WRITE 0x8 +#define DP_AUX_NATIVE_READ 0x9 + +#define DP_AUX_NATIVE_REPLY_ACK (0x0 << 0) +#define DP_AUX_NATIVE_REPLY_NACK (0x1 << 0) +#define DP_AUX_NATIVE_REPLY_DEFER (0x2 << 0) +#define DP_AUX_NATIVE_REPLY_MASK (0x3 << 0) + +#define DP_AUX_I2C_REPLY_ACK (0x0 << 2) +#define DP_AUX_I2C_REPLY_NACK (0x1 << 2) +#define DP_AUX_I2C_REPLY_DEFER (0x2 << 2) +#define DP_AUX_I2C_REPLY_MASK (0x3 << 2) + +/* AUX CH addresses */ +/* DPCD */ +#define DP_DPCD_REV 0x000 +# define DP_DPCD_REV_10 0x10 +# define DP_DPCD_REV_11 0x11 +# define DP_DPCD_REV_12 0x12 +# define DP_DPCD_REV_13 0x13 +# define DP_DPCD_REV_14 0x14 + +#define DP_MAX_LINK_RATE 0x001 + +#define DP_MAX_LANE_COUNT 0x002 +# define DP_MAX_LANE_COUNT_MASK 0x1f +# define DP_TPS3_SUPPORTED (1 << 6) /* 1.2 */ +# define DP_ENHANCED_FRAME_CAP (1 << 7) + +#define DP_MAX_DOWNSPREAD 0x003 +# define DP_MAX_DOWNSPREAD_0_5 (1 << 0) +# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING (1 << 6) +# define DP_TPS4_SUPPORTED (1 << 7) + +#define DP_NORP 0x004 + +#define DP_DOWNSTREAMPORT_PRESENT 0x005 +# define DP_DWN_STRM_PORT_PRESENT (1 << 0) +# define DP_DWN_STRM_PORT_TYPE_MASK 0x06 +# define DP_DWN_STRM_PORT_TYPE_DP (0 << 1) +# define DP_DWN_STRM_PORT_TYPE_ANALOG (1 << 1) +# define DP_DWN_STRM_PORT_TYPE_TMDS (2 << 1) +# define DP_DWN_STRM_PORT_TYPE_OTHER (3 << 1) +# define DP_FORMAT_CONVERSION (1 << 3) +# define DP_DETAILED_CAP_INFO_AVAILABLE (1 << 4) /* DPI */ + +#define DP_MAIN_LINK_CHANNEL_CODING 0x006 + +#define DP_DOWN_STREAM_PORT_COUNT 0x007 +# define DP_PORT_COUNT_MASK 0x0f +# define DP_MSA_TIMING_PAR_IGNORED (1 << 6) /* eDP */ +# define DP_OUI_SUPPORT (1 << 7) + +#define DP_RECEIVE_PORT_0_CAP_0 0x008 +# define DP_LOCAL_EDID_PRESENT (1 << 1) +# define DP_ASSOCIATED_TO_PRECEDING_PORT (1 << 2) + +#define DP_RECEIVE_PORT_0_BUFFER_SIZE 0x009 + +#define DP_RECEIVE_PORT_1_CAP_0 0x00a +#define DP_RECEIVE_PORT_1_BUFFER_SIZE 0x00b + +#define DP_I2C_SPEED_CAP 0x00c /* DPI */ +# define DP_I2C_SPEED_1K 0x01 +# define DP_I2C_SPEED_5K 0x02 +# define DP_I2C_SPEED_10K 0x04 +# define DP_I2C_SPEED_100K 0x08 +# define DP_I2C_SPEED_400K 0x10 +# define DP_I2C_SPEED_1M 0x20 + +#define DP_EDP_CONFIGURATION_CAP 0x00d /* XXX 1.2? */ +# define DP_ALTERNATE_SCRAMBLER_RESET_CAP (1 << 0) +# define DP_FRAMING_CHANGE_CAP (1 << 1) +# define DP_DPCD_DISPLAY_CONTROL_CAPABLE (1 << 3) /* edp v1.2 or higher */ + +#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */ +# define DP_TRAINING_AUX_RD_MASK 0x7F /* XXX 1.2? */ + +#define DP_ADAPTER_CAP 0x00f /* 1.2 */ +# define DP_FORCE_LOAD_SENSE_CAP (1 << 0) +# define DP_ALTERNATE_I2C_PATTERN_CAP (1 << 1) + +#define DP_SUPPORTED_LINK_RATES 0x010 /* eDP 1.4 */ +# define DP_MAX_SUPPORTED_RATES 8 /* 16-bit little-endian */ + +/* Multiple stream transport */ +#define DP_FAUX_CAP 0x020 /* 1.2 */ +# define DP_FAUX_CAP_1 (1 << 0) + +#define DP_MSTM_CAP 0x021 /* 1.2 */ +# define DP_MST_CAP (1 << 0) + +#define DP_NUMBER_OF_AUDIO_ENDPOINTS 0x022 /* 1.2 */ + +/* AV_SYNC_DATA_BLOCK 1.2 */ +#define DP_AV_GRANULARITY 0x023 +# define DP_AG_FACTOR_MASK (0xf << 0) +# define DP_AG_FACTOR_3MS (0 << 0) +# define DP_AG_FACTOR_2MS (1 << 0) +# define DP_AG_FACTOR_1MS (2 << 0) +# define DP_AG_FACTOR_500US (3 << 0) +# define DP_AG_FACTOR_200US (4 << 0) +# define DP_AG_FACTOR_100US (5 << 0) +# define DP_AG_FACTOR_10US (6 << 0) +# define DP_AG_FACTOR_1US (7 << 0) +# define DP_VG_FACTOR_MASK (0xf << 4) +# define DP_VG_FACTOR_3MS (0 << 4) +# define DP_VG_FACTOR_2MS (1 << 4) +# define DP_VG_FACTOR_1MS (2 << 4) +# define DP_VG_FACTOR_500US (3 << 4) +# define DP_VG_FACTOR_200US (4 << 4) +# define DP_VG_FACTOR_100US (5 << 4) + +#define DP_AUD_DEC_LAT0 0x024 +#define DP_AUD_DEC_LAT1 0x025 + +#define DP_AUD_PP_LAT0 0x026 +#define DP_AUD_PP_LAT1 0x027 + +#define DP_VID_INTER_LAT 0x028 + +#define DP_VID_PROG_LAT 0x029 + +#define DP_REP_LAT 0x02a + +#define DP_AUD_DEL_INS0 0x02b +#define DP_AUD_DEL_INS1 0x02c +#define DP_AUD_DEL_INS2 0x02d +/* End of AV_SYNC_DATA_BLOCK */ + +#define DP_RECEIVER_ALPM_CAP 0x02e /* eDP 1.4 */ +# define DP_ALPM_CAP (1 << 0) + +#define DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP 0x02f /* eDP 1.4 */ +# define DP_AUX_FRAME_SYNC_CAP (1 << 0) + +#define DP_GUID 0x030 /* 1.2 */ + +#define DP_DSC_SUPPORT 0x060 /* DP 1.4 */ +# define DP_DSC_DECOMPRESSION_IS_SUPPORTED (1 << 0) + +#define DP_DSC_REV 0x061 +# define DP_DSC_MAJOR_MASK (0xf << 0) +# define DP_DSC_MINOR_MASK (0xf << 4) +# define DP_DSC_MAJOR_SHIFT 0 +# define DP_DSC_MINOR_SHIFT 4 + +#define DP_DSC_RC_BUF_BLK_SIZE 0x062 +# define DP_DSC_RC_BUF_BLK_SIZE_1 0x0 +# define DP_DSC_RC_BUF_BLK_SIZE_4 0x1 +# define DP_DSC_RC_BUF_BLK_SIZE_16 0x2 +# define DP_DSC_RC_BUF_BLK_SIZE_64 0x3 + +#define DP_DSC_RC_BUF_SIZE 0x063 + +#define DP_DSC_SLICE_CAP_1 0x064 +# define DP_DSC_1_PER_DP_DSC_SINK (1 << 0) +# define DP_DSC_2_PER_DP_DSC_SINK (1 << 1) +# define DP_DSC_4_PER_DP_DSC_SINK (1 << 3) +# define DP_DSC_6_PER_DP_DSC_SINK (1 << 4) +# define DP_DSC_8_PER_DP_DSC_SINK (1 << 5) +# define DP_DSC_10_PER_DP_DSC_SINK (1 << 6) +# define DP_DSC_12_PER_DP_DSC_SINK (1 << 7) + +#define DP_DSC_LINE_BUF_BIT_DEPTH 0x065 +# define DP_DSC_LINE_BUF_BIT_DEPTH_MASK (0xf << 0) +# define DP_DSC_LINE_BUF_BIT_DEPTH_9 0x0 +# define DP_DSC_LINE_BUF_BIT_DEPTH_10 0x1 +# define DP_DSC_LINE_BUF_BIT_DEPTH_11 0x2 +# define DP_DSC_LINE_BUF_BIT_DEPTH_12 0x3 +# define DP_DSC_LINE_BUF_BIT_DEPTH_13 0x4 +# define DP_DSC_LINE_BUF_BIT_DEPTH_14 0x5 +# define DP_DSC_LINE_BUF_BIT_DEPTH_15 0x6 +# define DP_DSC_LINE_BUF_BIT_DEPTH_16 0x7 +# define DP_DSC_LINE_BUF_BIT_DEPTH_8 0x8 + +#define DP_DSC_BLK_PREDICTION_SUPPORT 0x066 +# define DP_DSC_BLK_PREDICTION_IS_SUPPORTED (1 << 0) + +#define DP_DSC_MAX_BITS_PER_PIXEL_LOW 0x067 /* eDP 1.4 */ + +#define DP_DSC_MAX_BITS_PER_PIXEL_HI 0x068 /* eDP 1.4 */ + +#define DP_DSC_DEC_COLOR_FORMAT_CAP 0x069 +# define DP_DSC_RGB (1 << 0) +# define DP_DSC_YCbCr444 (1 << 1) +# define DP_DSC_YCbCr422_Simple (1 << 2) +# define DP_DSC_YCbCr422_Native (1 << 3) +# define DP_DSC_YCbCr420_Native (1 << 4) + +#define DP_DSC_DEC_COLOR_DEPTH_CAP 0x06A +# define DP_DSC_8_BPC (1 << 1) +# define DP_DSC_10_BPC (1 << 2) +# define DP_DSC_12_BPC (1 << 3) + +#define DP_DSC_PEAK_THROUGHPUT 0x06B +# define DP_DSC_THROUGHPUT_MODE_0_MASK (0xf << 0) +# define DP_DSC_THROUGHPUT_MODE_0_SHIFT 0 +# define DP_DSC_THROUGHPUT_MODE_0_340 (1 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_400 (2 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_450 (3 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_500 (4 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_550 (5 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_600 (6 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_650 (7 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_700 (8 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_750 (9 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_800 (10 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_850 (11 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_900 (12 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_950 (13 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_1000 (14 << 0) +# define DP_DSC_THROUGHPUT_MODE_1_MASK (0xf << 4) +# define DP_DSC_THROUGHPUT_MODE_1_SHIFT 4 +# define DP_DSC_THROUGHPUT_MODE_1_340 (1 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_400 (2 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_450 (3 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_500 (4 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_550 (5 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_600 (6 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_650 (7 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_700 (8 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_750 (9 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_800 (10 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_850 (11 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_900 (12 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_950 (13 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_1000 (14 << 4) + +#define DP_DSC_MAX_SLICE_WIDTH 0x06C + +#define DP_DSC_SLICE_CAP_2 0x06D +# define DP_DSC_16_PER_DP_DSC_SINK (1 << 0) +# define DP_DSC_20_PER_DP_DSC_SINK (1 << 1) +# define DP_DSC_24_PER_DP_DSC_SINK (1 << 2) + +#define DP_DSC_BITS_PER_PIXEL_INC 0x06F +# define DP_DSC_BITS_PER_PIXEL_1_16 0x0 +# define DP_DSC_BITS_PER_PIXEL_1_8 0x1 +# define DP_DSC_BITS_PER_PIXEL_1_4 0x2 +# define DP_DSC_BITS_PER_PIXEL_1_2 0x3 +# define DP_DSC_BITS_PER_PIXEL_1 0x4 + +#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */ +# define DP_PSR_IS_SUPPORTED 1 +# define DP_PSR2_IS_SUPPORTED 2 /* eDP 1.4 */ +# define DP_PSR2_WITH_Y_COORD_IS_SUPPORTED 3 /* eDP 1.4a */ + +#define DP_PSR_CAPS 0x071 /* XXX 1.2? */ +# define DP_PSR_NO_TRAIN_ON_EXIT 1 +# define DP_PSR_SETUP_TIME_330 (0 << 1) +# define DP_PSR_SETUP_TIME_275 (1 << 1) +# define DP_PSR_SETUP_TIME_220 (2 << 1) +# define DP_PSR_SETUP_TIME_165 (3 << 1) +# define DP_PSR_SETUP_TIME_110 (4 << 1) +# define DP_PSR_SETUP_TIME_55 (5 << 1) +# define DP_PSR_SETUP_TIME_0 (6 << 1) +# define DP_PSR_SETUP_TIME_MASK (7 << 1) +# define DP_PSR_SETUP_TIME_SHIFT 1 +# define DP_PSR2_SU_Y_COORDINATE_REQUIRED (1 << 4) /* eDP 1.4a */ +# define DP_PSR2_SU_GRANULARITY_REQUIRED (1 << 5) /* eDP 1.4b */ +/* + * 0x80-0x8f describe downstream port capabilities, but there are two layouts + * based on whether DP_DETAILED_CAP_INFO_AVAILABLE was set. If it was not, + * each port's descriptor is one byte wide. If it was set, each port's is + * four bytes wide, starting with the one byte from the base info. As of + * DP interop v1.1a only VGA defines additional detail. + */ + +/* offset 0 */ +#define DP_DOWNSTREAM_PORT_0 0x80 +# define DP_DS_PORT_TYPE_MASK (7 << 0) +# define DP_DS_PORT_TYPE_DP 0 +# define DP_DS_PORT_TYPE_VGA 1 +# define DP_DS_PORT_TYPE_DVI 2 +# define DP_DS_PORT_TYPE_HDMI 3 +# define DP_DS_PORT_TYPE_NON_EDID 4 +# define DP_DS_PORT_TYPE_DP_DUALMODE 5 +# define DP_DS_PORT_TYPE_WIRELESS 6 +# define DP_DS_PORT_HPD (1 << 3) +/* offset 1 for VGA is maximum megapixels per second / 8 */ +/* offset 2 */ +# define DP_DS_MAX_BPC_MASK (3 << 0) +# define DP_DS_8BPC 0 +# define DP_DS_10BPC 1 +# define DP_DS_12BPC 2 +# define DP_DS_16BPC 3 + +/* DP Forward error Correction Registers */ +#define DP_FEC_CAPABILITY 0x090 /* 1.4 */ +# define DP_FEC_CAPABLE (1 << 0) +# define DP_FEC_UNCORR_BLK_ERROR_COUNT_CAP (1 << 1) +# define DP_FEC_CORR_BLK_ERROR_COUNT_CAP (1 << 2) +# define DP_FEC_BIT_ERROR_COUNT_CAP (1 << 3) + +/* link configuration */ +#define DP_LINK_BW_SET 0x100 +# define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */ +# define DP_LINK_BW_1_62 0x06 +# define DP_LINK_BW_2_7 0x0a +# define DP_LINK_BW_5_4 0x14 /* 1.2 */ +# define DP_LINK_BW_8_1 0x1e /* 1.4 */ + +#define DP_LANE_COUNT_SET 0x101 +# define DP_LANE_COUNT_MASK 0x0f +# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7) + +#define DP_TRAINING_PATTERN_SET 0x102 +# define DP_TRAINING_PATTERN_DISABLE 0 +# define DP_TRAINING_PATTERN_1 1 +# define DP_TRAINING_PATTERN_2 2 +# define DP_TRAINING_PATTERN_3 3 /* 1.2 */ +# define DP_TRAINING_PATTERN_4 7 /* 1.4 */ +# define DP_TRAINING_PATTERN_MASK 0x3 +# define DP_TRAINING_PATTERN_MASK_1_4 0xf + +/* DPCD 1.1 only. For DPCD >= 1.2 see per-lane DP_LINK_QUAL_LANEn_SET */ +# define DP_LINK_QUAL_PATTERN_11_DISABLE (0 << 2) +# define DP_LINK_QUAL_PATTERN_11_D10_2 (1 << 2) +# define DP_LINK_QUAL_PATTERN_11_ERROR_RATE (2 << 2) +# define DP_LINK_QUAL_PATTERN_11_PRBS7 (3 << 2) +# define DP_LINK_QUAL_PATTERN_11_MASK (3 << 2) + +# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4) +# define DP_LINK_SCRAMBLING_DISABLE (1 << 5) + +# define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6) +# define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6) +# define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6) +# define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6) + +#define DP_TRAINING_LANE0_SET 0x103 +#define DP_TRAINING_LANE1_SET 0x104 +#define DP_TRAINING_LANE2_SET 0x105 +#define DP_TRAINING_LANE3_SET 0x106 + +# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3 +# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0 +# define DP_TRAIN_MAX_SWING_REACHED (1 << 2) +# define DP_TRAIN_VOLTAGE_SWING_LEVEL_0 (0 << 0) +# define DP_TRAIN_VOLTAGE_SWING_LEVEL_1 (1 << 0) +# define DP_TRAIN_VOLTAGE_SWING_LEVEL_2 (2 << 0) +# define DP_TRAIN_VOLTAGE_SWING_LEVEL_3 (3 << 0) + +# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3) +# define DP_TRAIN_PRE_EMPH_LEVEL_0 (0 << 3) +# define DP_TRAIN_PRE_EMPH_LEVEL_1 (1 << 3) +# define DP_TRAIN_PRE_EMPH_LEVEL_2 (2 << 3) +# define DP_TRAIN_PRE_EMPH_LEVEL_3 (3 << 3) + +# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3 +# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5) + +#define DP_DOWNSPREAD_CTRL 0x107 +# define DP_SPREAD_AMP_0_5 (1 << 4) +# define DP_MSA_TIMING_PAR_IGNORE_EN (1 << 7) /* eDP */ + +#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108 +# define DP_SET_ANSI_8B10B (1 << 0) + +#define DP_I2C_SPEED_CONTROL_STATUS 0x109 /* DPI */ +/* bitmask as for DP_I2C_SPEED_CAP */ + +#define DP_EDP_CONFIGURATION_SET 0x10a /* XXX 1.2? */ +# define DP_ALTERNATE_SCRAMBLER_RESET_ENABLE (1 << 0) +# define DP_FRAMING_CHANGE_ENABLE (1 << 1) +# define DP_PANEL_SELF_TEST_ENABLE (1 << 7) + +#define DP_LINK_QUAL_LANE0_SET 0x10b /* DPCD >= 1.2 */ +#define DP_LINK_QUAL_LANE1_SET 0x10c +#define DP_LINK_QUAL_LANE2_SET 0x10d +#define DP_LINK_QUAL_LANE3_SET 0x10e +# define DP_LINK_QUAL_PATTERN_DISABLE 0 +# define DP_LINK_QUAL_PATTERN_D10_2 1 +# define DP_LINK_QUAL_PATTERN_ERROR_RATE 2 +# define DP_LINK_QUAL_PATTERN_PRBS7 3 +# define DP_LINK_QUAL_PATTERN_80BIT_CUSTOM 4 +# define DP_LINK_QUAL_PATTERN_HBR2_EYE 5 +# define DP_LINK_QUAL_PATTERN_MASK 7 + +#define DP_TRAINING_LANE0_1_SET2 0x10f +#define DP_TRAINING_LANE2_3_SET2 0x110 +# define DP_LANE02_POST_CURSOR2_SET_MASK (3 << 0) +# define DP_LANE02_MAX_POST_CURSOR2_REACHED (1 << 2) +# define DP_LANE13_POST_CURSOR2_SET_MASK (3 << 4) +# define DP_LANE13_MAX_POST_CURSOR2_REACHED (1 << 6) + +#define DP_MSTM_CTRL 0x111 /* 1.2 */ +# define DP_MST_EN (1 << 0) +# define DP_UP_REQ_EN (1 << 1) +# define DP_UPSTREAM_IS_SRC (1 << 2) + +#define DP_AUDIO_DELAY0 0x112 /* 1.2 */ +#define DP_AUDIO_DELAY1 0x113 +#define DP_AUDIO_DELAY2 0x114 + +#define DP_LINK_RATE_SET 0x115 /* eDP 1.4 */ +# define DP_LINK_RATE_SET_SHIFT 0 +# define DP_LINK_RATE_SET_MASK (7 << 0) + +#define DP_RECEIVER_ALPM_CONFIG 0x116 /* eDP 1.4 */ +# define DP_ALPM_ENABLE (1 << 0) +# define DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE (1 << 1) + +#define DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF 0x117 /* eDP 1.4 */ +# define DP_AUX_FRAME_SYNC_ENABLE (1 << 0) +# define DP_IRQ_HPD_ENABLE (1 << 1) + +#define DP_UPSTREAM_DEVICE_DP_PWR_NEED 0x118 /* 1.2 */ +# define DP_PWR_NOT_NEEDED (1 << 0) + +#define DP_FEC_CONFIGURATION 0x120 /* 1.4 */ +# define DP_FEC_READY (1 << 0) +# define DP_FEC_ERR_COUNT_SEL_MASK (7 << 1) +# define DP_FEC_ERR_COUNT_DIS (0 << 1) +# define DP_FEC_UNCORR_BLK_ERROR_COUNT (1 << 1) +# define DP_FEC_CORR_BLK_ERROR_COUNT (2 << 1) +# define DP_FEC_BIT_ERROR_COUNT (3 << 1) +# define DP_FEC_LANE_SELECT_MASK (3 << 4) +# define DP_FEC_LANE_0_SELECT (0 << 4) +# define DP_FEC_LANE_1_SELECT (1 << 4) +# define DP_FEC_LANE_2_SELECT (2 << 4) +# define DP_FEC_LANE_3_SELECT (3 << 4) + +#define DP_AUX_FRAME_SYNC_VALUE 0x15c /* eDP 1.4 */ +# define DP_AUX_FRAME_SYNC_VALID (1 << 0) + +#define DP_DSC_ENABLE 0x160 /* DP 1.4 */ + +#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */ +# define DP_PSR_ENABLE (1 << 0) +# define DP_PSR_MAIN_LINK_ACTIVE (1 << 1) +# define DP_PSR_CRC_VERIFICATION (1 << 2) +# define DP_PSR_FRAME_CAPTURE (1 << 3) +# define DP_PSR_SELECTIVE_UPDATE (1 << 4) +# define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS (1 << 5) +# define DP_PSR_ENABLE_PSR2 (1 << 6) /* eDP 1.4a */ + +#define DP_ADAPTER_CTRL 0x1a0 +# define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0) + +#define DP_BRANCH_DEVICE_CTRL 0x1a1 +# define DP_BRANCH_DEVICE_IRQ_HPD (1 << 0) + +#define DP_PAYLOAD_ALLOCATE_SET 0x1c0 +#define DP_PAYLOAD_ALLOCATE_START_TIME_SLOT 0x1c1 +#define DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT 0x1c2 + +#define DP_SINK_COUNT 0x200 +/* prior to 1.2 bit 7 was reserved mbz */ +# define DP_GET_SINK_COUNT(x) ((((x) & 0x80) >> 1) | ((x) & 0x3f)) +# define DP_SINK_CP_READY (1 << 6) + +#define DP_DEVICE_SERVICE_IRQ_VECTOR 0x201 +# define DP_REMOTE_CONTROL_COMMAND_PENDING (1 << 0) +# define DP_AUTOMATED_TEST_REQUEST (1 << 1) +# define DP_CP_IRQ (1 << 2) +# define DP_MCCS_IRQ (1 << 3) +# define DP_DOWN_REP_MSG_RDY (1 << 4) /* 1.2 MST */ +# define DP_UP_REQ_MSG_RDY (1 << 5) /* 1.2 MST */ +# define DP_SINK_SPECIFIC_IRQ (1 << 6) + +#define DP_LANE0_1_STATUS 0x202 +#define DP_LANE2_3_STATUS 0x203 +# define DP_LANE_CR_DONE (1 << 0) +# define DP_LANE_CHANNEL_EQ_DONE (1 << 1) +# define DP_LANE_SYMBOL_LOCKED (1 << 2) + +#define DP_CHANNEL_EQ_BITS (DP_LANE_CR_DONE | \ + DP_LANE_CHANNEL_EQ_DONE | \ + DP_LANE_SYMBOL_LOCKED) + +#define DP_LANE_ALIGN_STATUS_UPDATED 0x204 + +#define DP_INTERLANE_ALIGN_DONE (1 << 0) +#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6) +#define DP_LINK_STATUS_UPDATED (1 << 7) + +#define DP_SINK_STATUS 0x205 + +#define DP_RECEIVE_PORT_0_STATUS (1 << 0) +#define DP_RECEIVE_PORT_1_STATUS (1 << 1) + +#define DP_ADJUST_REQUEST_LANE0_1 0x206 +#define DP_ADJUST_REQUEST_LANE2_3 0x207 +# define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03 +# define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0 +# define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c +# define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2 +# define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30 +# define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4 +# define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0 +# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6 + +#define DP_ADJUST_REQUEST_POST_CURSOR2 0x20c + +#define DP_TEST_REQUEST 0x218 +# define DP_TEST_LINK_TRAINING (1 << 0) +# define DP_TEST_LINK_VIDEO_PATTERN (1 << 1) +# define DP_TEST_LINK_EDID_READ (1 << 2) +# define DP_TEST_LINK_PHY_TEST_PATTERN (1 << 3) /* DPCD >= 1.1 */ +# define DP_TEST_LINK_FAUX_PATTERN (1 << 4) /* DPCD >= 1.2 */ + +#define DP_TEST_LINK_RATE 0x219 +# define DP_LINK_RATE_162 (0x6) +# define DP_LINK_RATE_27 (0xa) + +#define DP_TEST_LANE_COUNT 0x220 + +#define DP_TEST_PATTERN 0x221 +# define DP_NO_TEST_PATTERN 0x0 +# define DP_COLOR_RAMP 0x1 +# define DP_BLACK_AND_WHITE_VERTICAL_LINES 0x2 +# define DP_COLOR_SQUARE 0x3 + +#define DP_TEST_H_TOTAL_HI 0x222 +#define DP_TEST_H_TOTAL_LO 0x223 + +#define DP_TEST_V_TOTAL_HI 0x224 +#define DP_TEST_V_TOTAL_LO 0x225 + +#define DP_TEST_H_START_HI 0x226 +#define DP_TEST_H_START_LO 0x227 + +#define DP_TEST_V_START_HI 0x228 +#define DP_TEST_V_START_LO 0x229 + +#define DP_TEST_HSYNC_HI 0x22A +# define DP_TEST_HSYNC_POLARITY (1 << 7) +# define DP_TEST_HSYNC_WIDTH_HI_MASK (127 << 0) +#define DP_TEST_HSYNC_WIDTH_LO 0x22B + +#define DP_TEST_VSYNC_HI 0x22C +# define DP_TEST_VSYNC_POLARITY (1 << 7) +# define DP_TEST_VSYNC_WIDTH_HI_MASK (127 << 0) +#define DP_TEST_VSYNC_WIDTH_LO 0x22D + +#define DP_TEST_H_WIDTH_HI 0x22E +#define DP_TEST_H_WIDTH_LO 0x22F + +#define DP_TEST_V_HEIGHT_HI 0x230 +#define DP_TEST_V_HEIGHT_LO 0x231 + +#define DP_TEST_MISC0 0x232 +# define DP_TEST_SYNC_CLOCK (1 << 0) +# define DP_TEST_COLOR_FORMAT_MASK (3 << 1) +# define DP_TEST_COLOR_FORMAT_SHIFT 1 +# define DP_COLOR_FORMAT_RGB (0 << 1) +# define DP_COLOR_FORMAT_YCbCr422 (1 << 1) +# define DP_COLOR_FORMAT_YCbCr444 (2 << 1) +# define DP_TEST_DYNAMIC_RANGE_CEA (1 << 3) +# define DP_TEST_YCBCR_COEFFICIENTS (1 << 4) +# define DP_YCBCR_COEFFICIENTS_ITU601 (0 << 4) +# define DP_YCBCR_COEFFICIENTS_ITU709 (1 << 4) +# define DP_TEST_BIT_DEPTH_MASK (7 << 5) +# define DP_TEST_BIT_DEPTH_SHIFT 5 +# define DP_TEST_BIT_DEPTH_6 (0 << 5) +# define DP_TEST_BIT_DEPTH_8 (1 << 5) +# define DP_TEST_BIT_DEPTH_10 (2 << 5) +# define DP_TEST_BIT_DEPTH_12 (3 << 5) +# define DP_TEST_BIT_DEPTH_16 (4 << 5) + +#define DP_TEST_MISC1 0x233 +# define DP_TEST_REFRESH_DENOMINATOR (1 << 0) +# define DP_TEST_INTERLACED (1 << 1) + +#define DP_TEST_REFRESH_RATE_NUMERATOR 0x234 + +#define DP_TEST_MISC0 0x232 + +#define DP_TEST_CRC_R_CR 0x240 +#define DP_TEST_CRC_G_Y 0x242 +#define DP_TEST_CRC_B_CB 0x244 + +#define DP_TEST_SINK_MISC 0x246 +# define DP_TEST_CRC_SUPPORTED (1 << 5) +# define DP_TEST_COUNT_MASK 0xf + +#define DP_TEST_PHY_PATTERN 0x248 +#define DP_TEST_80BIT_CUSTOM_PATTERN_7_0 0x250 +#define DP_TEST_80BIT_CUSTOM_PATTERN_15_8 0x251 +#define DP_TEST_80BIT_CUSTOM_PATTERN_23_16 0x252 +#define DP_TEST_80BIT_CUSTOM_PATTERN_31_24 0x253 +#define DP_TEST_80BIT_CUSTOM_PATTERN_39_32 0x254 +#define DP_TEST_80BIT_CUSTOM_PATTERN_47_40 0x255 +#define DP_TEST_80BIT_CUSTOM_PATTERN_55_48 0x256 +#define DP_TEST_80BIT_CUSTOM_PATTERN_63_56 0x257 +#define DP_TEST_80BIT_CUSTOM_PATTERN_71_64 0x258 +#define DP_TEST_80BIT_CUSTOM_PATTERN_79_72 0x259 + +#define DP_TEST_RESPONSE 0x260 +# define DP_TEST_ACK (1 << 0) +# define DP_TEST_NAK (1 << 1) +# define DP_TEST_EDID_CHECKSUM_WRITE (1 << 2) + +#define DP_TEST_EDID_CHECKSUM 0x261 + +#define DP_TEST_SINK 0x270 +# define DP_TEST_SINK_START (1 << 0) + +#define DP_FEC_STATUS 0x280 /* 1.4 */ +# define DP_FEC_DECODE_EN_DETECTED (1 << 0) +# define DP_FEC_DECODE_DIS_DETECTED (1 << 1) + +#define DP_FEC_ERROR_COUNT_LSB 0x0281 /* 1.4 */ + +#define DP_FEC_ERROR_COUNT_MSB 0x0282 /* 1.4 */ +# define DP_FEC_ERROR_COUNT_MASK 0x7F +# define DP_FEC_ERR_COUNT_VALID (1 << 7) + +#define DP_PAYLOAD_TABLE_UPDATE_STATUS 0x2c0 /* 1.2 MST */ +# define DP_PAYLOAD_TABLE_UPDATED (1 << 0) +# define DP_PAYLOAD_ACT_HANDLED (1 << 1) + +#define DP_VC_PAYLOAD_ID_SLOT_1 0x2c1 /* 1.2 MST */ +/* up to ID_SLOT_63 at 0x2ff */ + +#define DP_SOURCE_OUI 0x300 +#define DP_SINK_OUI 0x400 +#define DP_BRANCH_OUI 0x500 +#define DP_BRANCH_ID 0x503 +#define DP_BRANCH_REVISION_START 0x509 +#define DP_BRANCH_HW_REV 0x509 +#define DP_BRANCH_SW_REV 0x50A + +#define DP_SET_POWER 0x600 +# define DP_SET_POWER_D0 0x1 +# define DP_SET_POWER_D3 0x2 +# define DP_SET_POWER_MASK 0x3 +# define DP_SET_POWER_D3_AUX_ON 0x5 + +#define DP_EDP_DPCD_REV 0x700 /* eDP 1.2 */ +# define DP_EDP_11 0x00 +# define DP_EDP_12 0x01 +# define DP_EDP_13 0x02 +# define DP_EDP_14 0x03 + +#define DP_EDP_GENERAL_CAP_1 0x701 +# define DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP (1 << 0) +# define DP_EDP_BACKLIGHT_PIN_ENABLE_CAP (1 << 1) +# define DP_EDP_BACKLIGHT_AUX_ENABLE_CAP (1 << 2) +# define DP_EDP_PANEL_SELF_TEST_PIN_ENABLE_CAP (1 << 3) +# define DP_EDP_PANEL_SELF_TEST_AUX_ENABLE_CAP (1 << 4) +# define DP_EDP_FRC_ENABLE_CAP (1 << 5) +# define DP_EDP_COLOR_ENGINE_CAP (1 << 6) +# define DP_EDP_SET_POWER_CAP (1 << 7) + +#define DP_EDP_BACKLIGHT_ADJUSTMENT_CAP 0x702 +# define DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP (1 << 0) +# define DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP (1 << 1) +# define DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT (1 << 2) +# define DP_EDP_BACKLIGHT_AUX_PWM_PRODUCT_CAP (1 << 3) +# define DP_EDP_BACKLIGHT_FREQ_PWM_PIN_PASSTHRU_CAP (1 << 4) +# define DP_EDP_BACKLIGHT_FREQ_AUX_SET_CAP (1 << 5) +# define DP_EDP_DYNAMIC_BACKLIGHT_CAP (1 << 6) +# define DP_EDP_VBLANK_BACKLIGHT_UPDATE_CAP (1 << 7) + +#define DP_EDP_GENERAL_CAP_2 0x703 +# define DP_EDP_OVERDRIVE_ENGINE_ENABLED (1 << 0) + +#define DP_EDP_GENERAL_CAP_3 0x704 /* eDP 1.4 */ +# define DP_EDP_X_REGION_CAP_MASK (0xf << 0) +# define DP_EDP_X_REGION_CAP_SHIFT 0 +# define DP_EDP_Y_REGION_CAP_MASK (0xf << 4) +# define DP_EDP_Y_REGION_CAP_SHIFT 4 + +#define DP_EDP_DISPLAY_CONTROL_REGISTER 0x720 +# define DP_EDP_BACKLIGHT_ENABLE (1 << 0) +# define DP_EDP_BLACK_VIDEO_ENABLE (1 << 1) +# define DP_EDP_FRC_ENABLE (1 << 2) +# define DP_EDP_COLOR_ENGINE_ENABLE (1 << 3) +# define DP_EDP_VBLANK_BACKLIGHT_UPDATE_ENABLE (1 << 7) + +#define DP_EDP_BACKLIGHT_MODE_SET_REGISTER 0x721 +# define DP_EDP_BACKLIGHT_CONTROL_MODE_MASK (3 << 0) +# define DP_EDP_BACKLIGHT_CONTROL_MODE_PWM (0 << 0) +# define DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET (1 << 0) +# define DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD (2 << 0) +# define DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT (3 << 0) +# define DP_EDP_BACKLIGHT_FREQ_PWM_PIN_PASSTHRU_ENABLE (1 << 2) +# define DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE (1 << 3) +# define DP_EDP_DYNAMIC_BACKLIGHT_ENABLE (1 << 4) +# define DP_EDP_REGIONAL_BACKLIGHT_ENABLE (1 << 5) +# define DP_EDP_UPDATE_REGION_BRIGHTNESS (1 << 6) /* eDP 1.4 */ + +#define DP_EDP_BACKLIGHT_BRIGHTNESS_MSB 0x722 +#define DP_EDP_BACKLIGHT_BRIGHTNESS_LSB 0x723 + +#define DP_EDP_PWMGEN_BIT_COUNT 0x724 +#define DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN 0x725 +#define DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX 0x726 +# define DP_EDP_PWMGEN_BIT_COUNT_MASK (0x1f << 0) + +#define DP_EDP_BACKLIGHT_CONTROL_STATUS 0x727 + +#define DP_EDP_BACKLIGHT_FREQ_SET 0x728 +# define DP_EDP_BACKLIGHT_FREQ_BASE_KHZ 27000 + +#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_MSB 0x72a +#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_MID 0x72b +#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_LSB 0x72c + +#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_MSB 0x72d +#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_MID 0x72e +#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB 0x72f + +#define DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET 0x732 +#define DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET 0x733 + +#define DP_EDP_REGIONAL_BACKLIGHT_BASE 0x740 /* eDP 1.4 */ +#define DP_EDP_REGIONAL_BACKLIGHT_0 0x741 /* eDP 1.4 */ + +#define DP_SIDEBAND_MSG_DOWN_REQ_BASE 0x1000 /* 1.2 MST */ +#define DP_SIDEBAND_MSG_UP_REP_BASE 0x1200 /* 1.2 MST */ +#define DP_SIDEBAND_MSG_DOWN_REP_BASE 0x1400 /* 1.2 MST */ +#define DP_SIDEBAND_MSG_UP_REQ_BASE 0x1600 /* 1.2 MST */ + +#define DP_SINK_COUNT_ESI 0x2002 /* 1.2 */ +/* 0-5 sink count */ +# define DP_SINK_COUNT_CP_READY (1 << 6) + +#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 0x2003 /* 1.2 */ + +#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1 0x2004 /* 1.2 */ +# define DP_RX_GTC_MSTR_REQ_STATUS_CHANGE (1 << 0) +# define DP_LOCK_ACQUISITION_REQUEST (1 << 1) +# define DP_CEC_IRQ (1 << 2) + +#define DP_LINK_SERVICE_IRQ_VECTOR_ESI0 0x2005 /* 1.2 */ + +#define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */ +# define DP_PSR_LINK_CRC_ERROR (1 << 0) +# define DP_PSR_RFB_STORAGE_ERROR (1 << 1) +# define DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR (1 << 2) /* eDP 1.4 */ + +#define DP_PSR_ESI 0x2007 /* XXX 1.2? */ +# define DP_PSR_CAPS_CHANGE (1 << 0) + +#define DP_PSR_STATUS 0x2008 /* XXX 1.2? */ +# define DP_PSR_SINK_INACTIVE 0 +# define DP_PSR_SINK_ACTIVE_SRC_SYNCED 1 +# define DP_PSR_SINK_ACTIVE_RFB 2 +# define DP_PSR_SINK_ACTIVE_SINK_SYNCED 3 +# define DP_PSR_SINK_ACTIVE_RESYNC 4 +# define DP_PSR_SINK_INTERNAL_ERROR 7 +# define DP_PSR_SINK_STATE_MASK 0x07 + +#define DP_SYNCHRONIZATION_LATENCY_IN_SINK 0x2009 /* edp 1.4 */ +# define DP_MAX_RESYNC_FRAME_COUNT_MASK (0xf << 0) +# define DP_MAX_RESYNC_FRAME_COUNT_SHIFT 0 +# define DP_LAST_ACTUAL_SYNCHRONIZATION_LATENCY_MASK (0xf << 4) +# define DP_LAST_ACTUAL_SYNCHRONIZATION_LATENCY_SHIFT 4 + +#define DP_LAST_RECEIVED_PSR_SDP 0x200a /* eDP 1.2 */ +# define DP_PSR_STATE_BIT (1 << 0) /* eDP 1.2 */ +# define DP_UPDATE_RFB_BIT (1 << 1) /* eDP 1.2 */ +# define DP_CRC_VALID_BIT (1 << 2) /* eDP 1.2 */ +# define DP_SU_VALID (1 << 3) /* eDP 1.4 */ +# define DP_FIRST_SCAN_LINE_SU_REGION (1 << 4) /* eDP 1.4 */ +# define DP_LAST_SCAN_LINE_SU_REGION (1 << 5) /* eDP 1.4 */ +# define DP_Y_COORDINATE_VALID (1 << 6) /* eDP 1.4a */ + +#define DP_RECEIVER_ALPM_STATUS 0x200b /* eDP 1.4 */ +# define DP_ALPM_LOCK_TIMEOUT_ERROR (1 << 0) + +#define DP_LANE0_1_STATUS_ESI 0x200c /* status same as 0x202 */ +#define DP_LANE2_3_STATUS_ESI 0x200d /* status same as 0x203 */ +#define DP_LANE_ALIGN_STATUS_UPDATED_ESI 0x200e /* status same as 0x204 */ +#define DP_SINK_STATUS_ESI 0x200f /* status same as 0x205 */ + +#define DP_DP13_DPCD_REV 0x2200 +#define DP_DP13_MAX_LINK_RATE 0x2201 + +#define DP_DPRX_FEATURE_ENUMERATION_LIST 0x2210 /* DP 1.3 */ +# define DP_GTC_CAP (1 << 0) /* DP 1.3 */ +# define DP_SST_SPLIT_SDP_CAP (1 << 1) /* DP 1.4 */ +# define DP_AV_SYNC_CAP (1 << 2) /* DP 1.3 */ +# define DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED (1 << 3) /* DP 1.3 */ +# define DP_VSC_EXT_VESA_SDP_SUPPORTED (1 << 4) /* DP 1.4 */ +# define DP_VSC_EXT_VESA_SDP_CHAINING_SUPPORTED (1 << 5) /* DP 1.4 */ +# define DP_VSC_EXT_CEA_SDP_SUPPORTED (1 << 6) /* DP 1.4 */ +# define DP_VSC_EXT_CEA_SDP_CHAINING_SUPPORTED (1 << 7) /* DP 1.4 */ + +/* HDMI CEC tunneling over AUX DP 1.3 section 5.3.3.3.1 DPCD 1.4+ */ +#define DP_CEC_TUNNELING_CAPABILITY 0x3000 +# define DP_CEC_TUNNELING_CAPABLE (1 << 0) +# define DP_CEC_SNOOPING_CAPABLE (1 << 1) +# define DP_CEC_MULTIPLE_LA_CAPABLE (1 << 2) + +#define DP_CEC_TUNNELING_CONTROL 0x3001 +# define DP_CEC_TUNNELING_ENABLE (1 << 0) +# define DP_CEC_SNOOPING_ENABLE (1 << 1) + +#define DP_CEC_RX_MESSAGE_INFO 0x3002 +# define DP_CEC_RX_MESSAGE_LEN_MASK (0xf << 0) +# define DP_CEC_RX_MESSAGE_LEN_SHIFT 0 +# define DP_CEC_RX_MESSAGE_HPD_STATE (1 << 4) +# define DP_CEC_RX_MESSAGE_HPD_LOST (1 << 5) +# define DP_CEC_RX_MESSAGE_ACKED (1 << 6) +# define DP_CEC_RX_MESSAGE_ENDED (1 << 7) + +#define DP_CEC_TX_MESSAGE_INFO 0x3003 +# define DP_CEC_TX_MESSAGE_LEN_MASK (0xf << 0) +# define DP_CEC_TX_MESSAGE_LEN_SHIFT 0 +# define DP_CEC_TX_RETRY_COUNT_MASK (0x7 << 4) +# define DP_CEC_TX_RETRY_COUNT_SHIFT 4 +# define DP_CEC_TX_MESSAGE_SEND (1 << 7) + +#define DP_CEC_TUNNELING_IRQ_FLAGS 0x3004 +# define DP_CEC_RX_MESSAGE_INFO_VALID (1 << 0) +# define DP_CEC_RX_MESSAGE_OVERFLOW (1 << 1) +# define DP_CEC_TX_MESSAGE_SENT (1 << 4) +# define DP_CEC_TX_LINE_ERROR (1 << 5) +# define DP_CEC_TX_ADDRESS_NACK_ERROR (1 << 6) +# define DP_CEC_TX_DATA_NACK_ERROR (1 << 7) + +#define DP_CEC_LOGICAL_ADDRESS_MASK 0x300E /* 0x300F word */ +# define DP_CEC_LOGICAL_ADDRESS_0 (1 << 0) +# define DP_CEC_LOGICAL_ADDRESS_1 (1 << 1) +# define DP_CEC_LOGICAL_ADDRESS_2 (1 << 2) +# define DP_CEC_LOGICAL_ADDRESS_3 (1 << 3) +# define DP_CEC_LOGICAL_ADDRESS_4 (1 << 4) +# define DP_CEC_LOGICAL_ADDRESS_5 (1 << 5) +# define DP_CEC_LOGICAL_ADDRESS_6 (1 << 6) +# define DP_CEC_LOGICAL_ADDRESS_7 (1 << 7) +#define DP_CEC_LOGICAL_ADDRESS_MASK_2 0x300F /* 0x300E word */ +# define DP_CEC_LOGICAL_ADDRESS_8 (1 << 0) +# define DP_CEC_LOGICAL_ADDRESS_9 (1 << 1) +# define DP_CEC_LOGICAL_ADDRESS_10 (1 << 2) +# define DP_CEC_LOGICAL_ADDRESS_11 (1 << 3) +# define DP_CEC_LOGICAL_ADDRESS_12 (1 << 4) +# define DP_CEC_LOGICAL_ADDRESS_13 (1 << 5) +# define DP_CEC_LOGICAL_ADDRESS_14 (1 << 6) +# define DP_CEC_LOGICAL_ADDRESS_15 (1 << 7) + +#define DP_CEC_RX_MESSAGE_BUFFER 0x3010 +#define DP_CEC_TX_MESSAGE_BUFFER 0x3020 +#define DP_CEC_MESSAGE_BUFFER_LENGTH 0x10 + +#define DP_AUX_HDCP_BKSV 0x68000 +#define DP_AUX_HDCP_RI_PRIME 0x68005 +#define DP_AUX_HDCP_AKSV 0x68007 +#define DP_AUX_HDCP_AN 0x6800C +#define DP_AUX_HDCP_V_PRIME(h) (0x68014 + h * 4) +#define DP_AUX_HDCP_BCAPS 0x68028 +# define DP_BCAPS_REPEATER_PRESENT BIT(1) +# define DP_BCAPS_HDCP_CAPABLE BIT(0) +#define DP_AUX_HDCP_BSTATUS 0x68029 +# define DP_BSTATUS_REAUTH_REQ BIT(3) +# define DP_BSTATUS_LINK_FAILURE BIT(2) +# define DP_BSTATUS_R0_PRIME_READY BIT(1) +# define DP_BSTATUS_READY BIT(0) +#define DP_AUX_HDCP_BINFO 0x6802A +#define DP_AUX_HDCP_KSV_FIFO 0x6802C +#define DP_AUX_HDCP_AINFO 0x6803B + +/* DP 1.2 Sideband message defines */ +/* peer device type - DP 1.2a Table 2-92 */ +#define DP_PEER_DEVICE_NONE 0x0 +#define DP_PEER_DEVICE_SOURCE_OR_SST 0x1 +#define DP_PEER_DEVICE_MST_BRANCHING 0x2 +#define DP_PEER_DEVICE_SST_SINK 0x3 +#define DP_PEER_DEVICE_DP_LEGACY_CONV 0x4 + +/* DP 1.2 MST sideband request names DP 1.2a Table 2-80 */ +#define DP_LINK_ADDRESS 0x01 +#define DP_CONNECTION_STATUS_NOTIFY 0x02 +#define DP_ENUM_PATH_RESOURCES 0x10 +#define DP_ALLOCATE_PAYLOAD 0x11 +#define DP_QUERY_PAYLOAD 0x12 +#define DP_RESOURCE_STATUS_NOTIFY 0x13 +#define DP_CLEAR_PAYLOAD_ID_TABLE 0x14 +#define DP_REMOTE_DPCD_READ 0x20 +#define DP_REMOTE_DPCD_WRITE 0x21 +#define DP_REMOTE_I2C_READ 0x22 +#define DP_REMOTE_I2C_WRITE 0x23 +#define DP_POWER_UP_PHY 0x24 +#define DP_POWER_DOWN_PHY 0x25 +#define DP_SINK_EVENT_NOTIFY 0x30 +#define DP_QUERY_STREAM_ENC_STATUS 0x38 + +/* DP 1.2 MST sideband nak reasons - table 2.84 */ +#define DP_NAK_WRITE_FAILURE 0x01 +#define DP_NAK_INVALID_READ 0x02 +#define DP_NAK_CRC_FAILURE 0x03 +#define DP_NAK_BAD_PARAM 0x04 +#define DP_NAK_DEFER 0x05 +#define DP_NAK_LINK_FAILURE 0x06 +#define DP_NAK_NO_RESOURCES 0x07 +#define DP_NAK_DPCD_FAIL 0x08 +#define DP_NAK_I2C_NAK 0x09 +#define DP_NAK_ALLOCATE_FAIL 0x0a + +#define MODE_I2C_START 1 +#define MODE_I2C_WRITE 2 +#define MODE_I2C_READ 4 +#define MODE_I2C_STOP 8 + +/* DP 1.2 MST PORTs - Section 2.5.1 v1.2a spec */ +#define DP_MST_PHYSICAL_PORT_0 0 +#define DP_MST_LOGICAL_PORT_0 8 + +#define DP_LINK_STATUS_SIZE 6 +bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count); +bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count); +u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane); +u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane); + +#define DP_BRANCH_OUI_HEADER_SIZE 0xc +#define DP_RECEIVER_CAP_SIZE 0xf +#define EDP_PSR_RECEIVER_CAP_SIZE 2 +#define EDP_DISPLAY_CTL_CAP_SIZE 3 + +void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]); +void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]); + +u8 drm_dp_link_rate_to_bw_code(int link_rate); +int drm_dp_bw_code_to_link_rate(u8 link_bw); + +#define DP_SDP_AUDIO_TIMESTAMP 0x01 +#define DP_SDP_AUDIO_STREAM 0x02 +#define DP_SDP_EXTENSION 0x04 /* DP 1.1 */ +#define DP_SDP_AUDIO_COPYMANAGEMENT 0x05 /* DP 1.2 */ +#define DP_SDP_ISRC 0x06 /* DP 1.2 */ +#define DP_SDP_VSC 0x07 /* DP 1.2 */ +#define DP_SDP_CAMERA_GENERIC(i) (0x08 + (i)) /* 0-7, DP 1.3 */ +#define DP_SDP_PPS 0x10 /* DP 1.4 */ +#define DP_SDP_VSC_EXT_VESA 0x20 /* DP 1.4 */ +#define DP_SDP_VSC_EXT_CEA 0x21 /* DP 1.4 */ +/* 0x80+ CEA-861 infoframe types */ + +struct dp_sdp_header { + u8 HB0; /* Secondary Data Packet ID */ + u8 HB1; /* Secondary Data Packet Type */ + u8 HB2; /* Secondary Data Packet Specific header, Byte 0 */ + u8 HB3; /* Secondary Data packet Specific header, Byte 1 */ +} __packed; + +#define EDP_SDP_HEADER_REVISION_MASK 0x1F +#define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES 0x1F + +struct edp_vsc_psr { + struct dp_sdp_header sdp_header; + u8 DB0; /* Stereo Interface */ + u8 DB1; /* 0 - PSR State; 1 - Update RFB; 2 - CRC Valid */ + u8 DB2; /* CRC value bits 7:0 of the R or Cr component */ + u8 DB3; /* CRC value bits 15:8 of the R or Cr component */ + u8 DB4; /* CRC value bits 7:0 of the G or Y component */ + u8 DB5; /* CRC value bits 15:8 of the G or Y component */ + u8 DB6; /* CRC value bits 7:0 of the B or Cb component */ + u8 DB7; /* CRC value bits 15:8 of the B or Cb component */ + u8 DB8_31[24]; /* Reserved */ +} __packed; + +#define EDP_VSC_PSR_STATE_ACTIVE (1<<0) +#define EDP_VSC_PSR_UPDATE_RFB (1<<1) +#define EDP_VSC_PSR_CRC_VALUES_VALID (1<<2) + +int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE]); + +static inline int +drm_dp_max_link_rate(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]); +} + +static inline u8 +drm_dp_max_lane_count(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; +} + +static inline bool +drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DPCD_REV] >= 0x11 && + (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP); +} + +static inline bool +drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DPCD_REV] >= 0x12 && + dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED; +} + +static inline bool +drm_dp_tps4_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DPCD_REV] >= 0x14 && + dpcd[DP_MAX_DOWNSPREAD] & DP_TPS4_SUPPORTED; +} + +static inline u8 +drm_dp_training_pattern_mask(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return (dpcd[DP_DPCD_REV] >= 0x14) ? DP_TRAINING_PATTERN_MASK_1_4 : + DP_TRAINING_PATTERN_MASK; +} + +static inline bool +drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT; +} + +/* + * DisplayPort AUX channel + */ + +/** + * struct drm_dp_aux_msg - DisplayPort AUX channel transaction + * @address: address of the (first) register to access + * @request: contains the type of transaction (see DP_AUX_* macros) + * @reply: upon completion, contains the reply type of the transaction + * @buffer: pointer to a transmission or reception buffer + * @size: size of @buffer + */ +struct drm_dp_aux_msg { + unsigned int address; + u8 request; + u8 reply; + void *buffer; + size_t size; +}; + +struct cec_adapter; +struct edid; + +/** + * struct drm_dp_aux_cec - DisplayPort CEC-Tunneling-over-AUX + * @lock: mutex protecting this struct + * @adap: the CEC adapter for CEC-Tunneling-over-AUX support. + * @name: name of the CEC adapter + * @parent: parent device of the CEC adapter + * @unregister_work: unregister the CEC adapter + */ +struct drm_dp_aux_cec { + struct mutex lock; + struct cec_adapter *adap; + const char *name; + struct device *parent; + struct delayed_work unregister_work; +}; + +/** + * struct drm_dp_aux - DisplayPort AUX channel + * @name: user-visible name of this AUX channel and the I2C-over-AUX adapter + * @ddc: I2C adapter that can be used for I2C-over-AUX communication + * @dev: pointer to struct device that is the parent for this AUX channel + * @crtc: backpointer to the crtc that is currently using this AUX channel + * @hw_mutex: internal mutex used for locking transfers + * @crc_work: worker that captures CRCs for each frame + * @crc_count: counter of captured frame CRCs + * @transfer: transfers a message representing a single AUX transaction + * + * The .dev field should be set to a pointer to the device that implements + * the AUX channel. + * + * The .name field may be used to specify the name of the I2C adapter. If set to + * NULL, dev_name() of .dev will be used. + * + * Drivers provide a hardware-specific implementation of how transactions + * are executed via the .transfer() function. A pointer to a drm_dp_aux_msg + * structure describing the transaction is passed into this function. Upon + * success, the implementation should return the number of payload bytes + * that were transferred, or a negative error-code on failure. Helpers + * propagate errors from the .transfer() function, with the exception of + * the -EBUSY error, which causes a transaction to be retried. On a short, + * helpers will return -EPROTO to make it simpler to check for failure. + * + * An AUX channel can also be used to transport I2C messages to a sink. A + * typical application of that is to access an EDID that's present in the + * sink device. The .transfer() function can also be used to execute such + * transactions. The drm_dp_aux_register() function registers an I2C + * adapter that can be passed to drm_probe_ddc(). Upon removal, drivers + * should call drm_dp_aux_unregister() to remove the I2C adapter. + * The I2C adapter uses long transfers by default; if a partial response is + * received, the adapter will drop down to the size given by the partial + * response for this transaction only. + * + * Note that the aux helper code assumes that the .transfer() function + * only modifies the reply field of the drm_dp_aux_msg structure. The + * retry logic and i2c helpers assume this is the case. + */ +struct drm_dp_aux { + const char *name; + struct i2c_adapter ddc; + struct device *dev; + struct drm_crtc *crtc; + struct mutex hw_mutex; + struct work_struct crc_work; + u8 crc_count; + ssize_t (*transfer)(struct drm_dp_aux *aux, + struct drm_dp_aux_msg *msg); + /** + * @i2c_nack_count: Counts I2C NACKs, used for DP validation. + */ + unsigned i2c_nack_count; + /** + * @i2c_defer_count: Counts I2C DEFERs, used for DP validation. + */ + unsigned i2c_defer_count; + /** + * @cec: struct containing fields used for CEC-Tunneling-over-AUX. + */ + struct drm_dp_aux_cec cec; +}; + +ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, + void *buffer, size_t size); +ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset, + void *buffer, size_t size); + +/** + * drm_dp_dpcd_readb() - read a single byte from the DPCD + * @aux: DisplayPort AUX channel + * @offset: address of the register to read + * @valuep: location where the value of the register will be stored + * + * Returns the number of bytes transferred (1) on success, or a negative + * error code on failure. + */ +static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux, + unsigned int offset, u8 *valuep) +{ + return drm_dp_dpcd_read(aux, offset, valuep, 1); +} + +/** + * drm_dp_dpcd_writeb() - write a single byte to the DPCD + * @aux: DisplayPort AUX channel + * @offset: address of the register to write + * @value: value to write to the register + * + * Returns the number of bytes transferred (1) on success, or a negative + * error code on failure. + */ +static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux, + unsigned int offset, u8 value) +{ + return drm_dp_dpcd_write(aux, offset, &value, 1); +} + +int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux, + u8 status[DP_LINK_STATUS_SIZE]); + +/* + * DisplayPort link + */ +#define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0) + +struct drm_dp_link { + unsigned char revision; + unsigned int rate; + unsigned int num_lanes; + unsigned long capabilities; +}; + +int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link); +int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link); +int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link); +int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link); +int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); +int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); +int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]); +void drm_dp_downstream_debug(struct seq_file *m, const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], struct drm_dp_aux *aux); + +void drm_dp_aux_init(struct drm_dp_aux *aux); +int drm_dp_aux_register(struct drm_dp_aux *aux); +void drm_dp_aux_unregister(struct drm_dp_aux *aux); + +int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc); +int drm_dp_stop_crc(struct drm_dp_aux *aux); + +struct drm_dp_dpcd_ident { + u8 oui[3]; + u8 device_id[6]; + u8 hw_rev; + u8 sw_major_rev; + u8 sw_minor_rev; +} __packed; + +/** + * struct drm_dp_desc - DP branch/sink device descriptor + * @ident: DP device identification from DPCD 0x400 (sink) or 0x500 (branch). + * @quirks: Quirks; use drm_dp_has_quirk() to query for the quirks. + */ +struct drm_dp_desc { + struct drm_dp_dpcd_ident ident; + u32 quirks; +}; + +int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc, + bool is_branch); + +/** + * enum drm_dp_quirk - Display Port sink/branch device specific quirks + * + * Display Port sink and branch devices in the wild have a variety of bugs, try + * to collect them here. The quirks are shared, but it's up to the drivers to + * implement workarounds for them. + */ +enum drm_dp_quirk { + /** + * @DP_DPCD_QUIRK_LIMITED_M_N: + * + * The device requires main link attributes Mvid and Nvid to be limited + * to 16 bits. + */ + DP_DPCD_QUIRK_LIMITED_M_N, +}; + +/** + * drm_dp_has_quirk() - does the DP device have a specific quirk + * @desc: Device decriptor filled by drm_dp_read_desc() + * @quirk: Quirk to query for + * + * Return true if DP device identified by @desc has @quirk. + */ +static inline bool +drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk) +{ + return desc->quirks & BIT(quirk); +} + +#ifdef CONFIG_DRM_DP_CEC +void drm_dp_cec_irq(struct drm_dp_aux *aux); +void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name, + struct device *parent); +void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux); +void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid); +void drm_dp_cec_unset_edid(struct drm_dp_aux *aux); +#else +static inline void drm_dp_cec_irq(struct drm_dp_aux *aux) +{ +} + +static inline void drm_dp_cec_register_connector(struct drm_dp_aux *aux, + const char *name, + struct device *parent) +{ +} + +static inline void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux) +{ +} + +static inline void drm_dp_cec_set_edid(struct drm_dp_aux *aux, + const struct edid *edid) +{ +} + +static inline void drm_dp_cec_unset_edid(struct drm_dp_aux *aux) +{ +} + +#endif + +#endif /* _DRM_DP_HELPER_H_ */ diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h new file mode 100644 index 000000000..0f7439f0b --- /dev/null +++ b/include/drm/drm_dp_mst_helper.h @@ -0,0 +1,637 @@ +/* + * Copyright © 2014 Red Hat. + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ +#ifndef _DRM_DP_MST_HELPER_H_ +#define _DRM_DP_MST_HELPER_H_ + +#include +#include +#include + +struct drm_dp_mst_branch; + +/** + * struct drm_dp_vcpi - Virtual Channel Payload Identifier + * @vcpi: Virtual channel ID. + * @pbn: Payload Bandwidth Number for this channel + * @aligned_pbn: PBN aligned with slot size + * @num_slots: number of slots for this PBN + */ +struct drm_dp_vcpi { + int vcpi; + int pbn; + int aligned_pbn; + int num_slots; +}; + +/** + * struct drm_dp_mst_port - MST port + * @kref: reference count for this port. + * @port_num: port number + * @input: if this port is an input port. + * @mcs: message capability status - DP 1.2 spec. + * @ddps: DisplayPort Device Plug Status - DP 1.2 + * @pdt: Peer Device Type + * @ldps: Legacy Device Plug Status + * @dpcd_rev: DPCD revision of device on this port + * @num_sdp_streams: Number of simultaneous streams + * @num_sdp_stream_sinks: Number of stream sinks + * @available_pbn: Available bandwidth for this port. + * @next: link to next port on this branch device + * @mstb: branch device attach below this port + * @aux: i2c aux transport to talk to device connected to this port. + * @parent: branch device parent of this port + * @vcpi: Virtual Channel Payload info for this port. + * @connector: DRM connector this port is connected to. + * @mgr: topology manager this port lives under. + * + * This structure represents an MST port endpoint on a device somewhere + * in the MST topology. + */ +struct drm_dp_mst_port { + struct kref kref; + + u8 port_num; + bool input; + bool mcs; + bool ddps; + u8 pdt; + bool ldps; + u8 dpcd_rev; + u8 num_sdp_streams; + u8 num_sdp_stream_sinks; + uint16_t available_pbn; + struct list_head next; + struct drm_dp_mst_branch *mstb; /* pointer to an mstb if this port has one */ + struct drm_dp_aux aux; /* i2c bus for this port? */ + struct drm_dp_mst_branch *parent; + + struct drm_dp_vcpi vcpi; + struct drm_connector *connector; + struct drm_dp_mst_topology_mgr *mgr; + + /** + * @cached_edid: for DP logical ports - make tiling work by ensuring + * that the EDID for all connectors is read immediately. + */ + struct edid *cached_edid; + /** + * @has_audio: Tracks whether the sink connector to this port is + * audio-capable. + */ + bool has_audio; +}; + +/** + * struct drm_dp_mst_branch - MST branch device. + * @kref: reference count for this port. + * @rad: Relative Address to talk to this branch device. + * @lct: Link count total to talk to this branch device. + * @num_ports: number of ports on the branch. + * @msg_slots: one bit per transmitted msg slot. + * @ports: linked list of ports on this branch. + * @port_parent: pointer to the port parent, NULL if toplevel. + * @mgr: topology manager for this branch device. + * @tx_slots: transmission slots for this device. + * @last_seqno: last sequence number used to talk to this. + * @link_address_sent: if a link address message has been sent to this device yet. + * @guid: guid for DP 1.2 branch device. port under this branch can be + * identified by port #. + * + * This structure represents an MST branch device, there is one + * primary branch device at the root, along with any other branches connected + * to downstream port of parent branches. + */ +struct drm_dp_mst_branch { + struct kref kref; + u8 rad[8]; + u8 lct; + int num_ports; + + int msg_slots; + struct list_head ports; + + /* list of tx ops queue for this port */ + struct drm_dp_mst_port *port_parent; + struct drm_dp_mst_topology_mgr *mgr; + + /* slots are protected by mstb->mgr->qlock */ + struct drm_dp_sideband_msg_tx *tx_slots[2]; + int last_seqno; + bool link_address_sent; + + /* global unique identifier to identify branch devices */ + u8 guid[16]; +}; + + +/* sideband msg header - not bit struct */ +struct drm_dp_sideband_msg_hdr { + u8 lct; + u8 lcr; + u8 rad[8]; + bool broadcast; + bool path_msg; + u8 msg_len; + bool somt; + bool eomt; + bool seqno; +}; + +struct drm_dp_nak_reply { + u8 guid[16]; + u8 reason; + u8 nak_data; +}; + +struct drm_dp_link_address_ack_reply { + u8 guid[16]; + u8 nports; + struct drm_dp_link_addr_reply_port { + bool input_port; + u8 peer_device_type; + u8 port_number; + bool mcs; + bool ddps; + bool legacy_device_plug_status; + u8 dpcd_revision; + u8 peer_guid[16]; + u8 num_sdp_streams; + u8 num_sdp_stream_sinks; + } ports[16]; +}; + +struct drm_dp_remote_dpcd_read_ack_reply { + u8 port_number; + u8 num_bytes; + u8 bytes[255]; +}; + +struct drm_dp_remote_dpcd_write_ack_reply { + u8 port_number; +}; + +struct drm_dp_remote_dpcd_write_nak_reply { + u8 port_number; + u8 reason; + u8 bytes_written_before_failure; +}; + +struct drm_dp_remote_i2c_read_ack_reply { + u8 port_number; + u8 num_bytes; + u8 bytes[255]; +}; + +struct drm_dp_remote_i2c_read_nak_reply { + u8 port_number; + u8 nak_reason; + u8 i2c_nak_transaction; +}; + +struct drm_dp_remote_i2c_write_ack_reply { + u8 port_number; +}; + + +struct drm_dp_sideband_msg_rx { + u8 chunk[48]; + u8 msg[256]; + u8 curchunk_len; + u8 curchunk_idx; /* chunk we are parsing now */ + u8 curchunk_hdrlen; + u8 curlen; /* total length of the msg */ + bool have_somt; + bool have_eomt; + struct drm_dp_sideband_msg_hdr initial_hdr; +}; + +#define DRM_DP_MAX_SDP_STREAMS 16 +struct drm_dp_allocate_payload { + u8 port_number; + u8 number_sdp_streams; + u8 vcpi; + u16 pbn; + u8 sdp_stream_sink[DRM_DP_MAX_SDP_STREAMS]; +}; + +struct drm_dp_allocate_payload_ack_reply { + u8 port_number; + u8 vcpi; + u16 allocated_pbn; +}; + +struct drm_dp_connection_status_notify { + u8 guid[16]; + u8 port_number; + bool legacy_device_plug_status; + bool displayport_device_plug_status; + bool message_capability_status; + bool input_port; + u8 peer_device_type; +}; + +struct drm_dp_remote_dpcd_read { + u8 port_number; + u32 dpcd_address; + u8 num_bytes; +}; + +struct drm_dp_remote_dpcd_write { + u8 port_number; + u32 dpcd_address; + u8 num_bytes; + u8 *bytes; +}; + +#define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4 +struct drm_dp_remote_i2c_read { + u8 num_transactions; + u8 port_number; + struct { + u8 i2c_dev_id; + u8 num_bytes; + u8 *bytes; + u8 no_stop_bit; + u8 i2c_transaction_delay; + } transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS]; + u8 read_i2c_device_id; + u8 num_bytes_read; +}; + +struct drm_dp_remote_i2c_write { + u8 port_number; + u8 write_i2c_device_id; + u8 num_bytes; + u8 *bytes; +}; + +/* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */ +struct drm_dp_port_number_req { + u8 port_number; +}; + +struct drm_dp_enum_path_resources_ack_reply { + u8 port_number; + u16 full_payload_bw_number; + u16 avail_payload_bw_number; +}; + +/* covers POWER_DOWN_PHY, POWER_UP_PHY */ +struct drm_dp_port_number_rep { + u8 port_number; +}; + +struct drm_dp_query_payload { + u8 port_number; + u8 vcpi; +}; + +struct drm_dp_resource_status_notify { + u8 port_number; + u8 guid[16]; + u16 available_pbn; +}; + +struct drm_dp_query_payload_ack_reply { + u8 port_number; + u16 allocated_pbn; +}; + +struct drm_dp_sideband_msg_req_body { + u8 req_type; + union ack_req { + struct drm_dp_connection_status_notify conn_stat; + struct drm_dp_port_number_req port_num; + struct drm_dp_resource_status_notify resource_stat; + + struct drm_dp_query_payload query_payload; + struct drm_dp_allocate_payload allocate_payload; + + struct drm_dp_remote_dpcd_read dpcd_read; + struct drm_dp_remote_dpcd_write dpcd_write; + + struct drm_dp_remote_i2c_read i2c_read; + struct drm_dp_remote_i2c_write i2c_write; + } u; +}; + +struct drm_dp_sideband_msg_reply_body { + u8 reply_type; + u8 req_type; + union ack_replies { + struct drm_dp_nak_reply nak; + struct drm_dp_link_address_ack_reply link_addr; + struct drm_dp_port_number_rep port_number; + + struct drm_dp_enum_path_resources_ack_reply path_resources; + struct drm_dp_allocate_payload_ack_reply allocate_payload; + struct drm_dp_query_payload_ack_reply query_payload; + + struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack; + struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack; + struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack; + + struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack; + struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack; + struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack; + } u; +}; + +/* msg is queued to be put into a slot */ +#define DRM_DP_SIDEBAND_TX_QUEUED 0 +/* msg has started transmitting on a slot - still on msgq */ +#define DRM_DP_SIDEBAND_TX_START_SEND 1 +/* msg has finished transmitting on a slot - removed from msgq only in slot */ +#define DRM_DP_SIDEBAND_TX_SENT 2 +/* msg has received a response - removed from slot */ +#define DRM_DP_SIDEBAND_TX_RX 3 +#define DRM_DP_SIDEBAND_TX_TIMEOUT 4 + +struct drm_dp_sideband_msg_tx { + u8 msg[256]; + u8 chunk[48]; + u8 cur_offset; + u8 cur_len; + struct drm_dp_mst_branch *dst; + struct list_head next; + int seqno; + int state; + bool path_msg; + struct drm_dp_sideband_msg_reply_body reply; +}; + +/* sideband msg handler */ +struct drm_dp_mst_topology_mgr; +struct drm_dp_mst_topology_cbs { + /* create a connector for a port */ + struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path); + void (*register_connector)(struct drm_connector *connector); + void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr, + struct drm_connector *connector); + void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr); + +}; + +#define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8) + +#define DP_PAYLOAD_LOCAL 1 +#define DP_PAYLOAD_REMOTE 2 +#define DP_PAYLOAD_DELETE_LOCAL 3 + +struct drm_dp_payload { + int payload_state; + int start_slot; + int num_slots; + int vcpi; +}; + +#define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base) + +struct drm_dp_mst_topology_state { + struct drm_private_state base; + int avail_slots; + struct drm_atomic_state *state; + struct drm_dp_mst_topology_mgr *mgr; +}; + +#define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base) + +/** + * struct drm_dp_mst_topology_mgr - DisplayPort MST manager + * + * This struct represents the toplevel displayport MST topology manager. + * There should be one instance of this for every MST capable DP connector + * on the GPU. + */ +struct drm_dp_mst_topology_mgr { + /** + * @base: Base private object for atomic + */ + struct drm_private_obj base; + + /** + * @dev: device pointer for adding i2c devices etc. + */ + struct drm_device *dev; + /** + * @cbs: callbacks for connector addition and destruction. + */ + const struct drm_dp_mst_topology_cbs *cbs; + /** + * @max_dpcd_transaction_bytes: maximum number of bytes to read/write + * in one go. + */ + int max_dpcd_transaction_bytes; + /** + * @aux: AUX channel for the DP MST connector this topolgy mgr is + * controlling. + */ + struct drm_dp_aux *aux; + /** + * @max_payloads: maximum number of payloads the GPU can generate. + */ + int max_payloads; + /** + * @conn_base_id: DRM connector ID this mgr is connected to. Only used + * to build the MST connector path value. + */ + int conn_base_id; + + /** + * @down_rep_recv: Message receiver state for down replies. This and + * @up_req_recv are only ever access from the work item, which is + * serialised. + */ + struct drm_dp_sideband_msg_rx down_rep_recv; + /** + * @up_req_recv: Message receiver state for up requests. This and + * @down_rep_recv are only ever access from the work item, which is + * serialised. + */ + struct drm_dp_sideband_msg_rx up_req_recv; + + /** + * @lock: protects mst state, primary, dpcd. + */ + struct mutex lock; + + /** + * @mst_state: If this manager is enabled for an MST capable port. False + * if no MST sink/branch devices is connected. + */ + bool mst_state; + /** + * @mst_primary: Pointer to the primary/first branch device. + */ + struct drm_dp_mst_branch *mst_primary; + + /** + * @dpcd: Cache of DPCD for primary port. + */ + u8 dpcd[DP_RECEIVER_CAP_SIZE]; + /** + * @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0. + */ + u8 sink_count; + /** + * @pbn_div: PBN to slots divisor. + */ + int pbn_div; + + /** + * @state: State information for topology manager + */ + struct drm_dp_mst_topology_state *state; + + /** + * @funcs: Atomic helper callbacks + */ + const struct drm_private_state_funcs *funcs; + + /** + * @qlock: protects @tx_msg_downq, the &drm_dp_mst_branch.txslost and + * &drm_dp_sideband_msg_tx.state once they are queued + */ + struct mutex qlock; + /** + * @tx_msg_downq: List of pending down replies. + */ + struct list_head tx_msg_downq; + + /** + * @payload_lock: Protect payload information. + */ + struct mutex payload_lock; + /** + * @proposed_vcpis: Array of pointers for the new VCPI allocation. The + * VCPI structure itself is &drm_dp_mst_port.vcpi. + */ + struct drm_dp_vcpi **proposed_vcpis; + /** + * @payloads: Array of payloads. + */ + struct drm_dp_payload *payloads; + /** + * @payload_mask: Elements of @payloads actually in use. Since + * reallocation of active outputs isn't possible gaps can be created by + * disabling outputs out of order compared to how they've been enabled. + */ + unsigned long payload_mask; + /** + * @vcpi_mask: Similar to @payload_mask, but for @proposed_vcpis. + */ + unsigned long vcpi_mask; + + /** + * @tx_waitq: Wait to queue stall for the tx worker. + */ + wait_queue_head_t tx_waitq; + /** + * @work: Probe work. + */ + struct work_struct work; + /** + * @tx_work: Sideband transmit worker. This can nest within the main + * @work worker for each transaction @work launches. + */ + struct work_struct tx_work; + + /** + * @destroy_connector_list: List of to be destroyed connectors. + */ + struct list_head destroy_connector_list; + /** + * @destroy_connector_lock: Protects @connector_list. + */ + struct mutex destroy_connector_lock; + /** + * @destroy_connector_work: Work item to destroy connectors. Needed to + * avoid locking inversion. + */ + struct work_struct destroy_connector_work; +}; + +int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, + struct drm_device *dev, struct drm_dp_aux *aux, + int max_dpcd_transaction_bytes, + int max_payloads, int conn_base_id); + +void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr); + + +int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state); + + +int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled); + + +enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); + +bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); +struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); + + +int drm_dp_calc_pbn_mode(int clock, int bpp); + + +bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, int pbn, int slots); + +int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); + + +void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); + + +void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); + + +int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, + int pbn); + + +int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr); + + +int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr); + +int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr); + +void drm_dp_mst_dump_topology(struct seq_file *m, + struct drm_dp_mst_topology_mgr *mgr); + +void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr); +int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr); +struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr); +int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, int pbn); +int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr, + int slots); +int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, bool power_up); + +#endif diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h new file mode 100644 index 000000000..152b3055e --- /dev/null +++ b/include/drm/drm_drv.h @@ -0,0 +1,685 @@ +/* + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright (c) 2009-2010, Code Aurora Forum. + * Copyright 2016 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DRM_DRV_H_ +#define _DRM_DRV_H_ + +#include +#include + +#include + +struct drm_file; +struct drm_gem_object; +struct drm_master; +struct drm_minor; +struct dma_buf_attachment; +struct drm_display_mode; +struct drm_mode_create_dumb; +struct drm_printer; + +/* driver capabilities and requirements mask */ +#define DRIVER_USE_AGP 0x1 +#define DRIVER_LEGACY 0x2 +#define DRIVER_PCI_DMA 0x8 +#define DRIVER_SG 0x10 +#define DRIVER_HAVE_DMA 0x20 +#define DRIVER_HAVE_IRQ 0x40 +#define DRIVER_IRQ_SHARED 0x80 +#define DRIVER_GEM 0x1000 +#define DRIVER_MODESET 0x2000 +#define DRIVER_PRIME 0x4000 +#define DRIVER_RENDER 0x8000 +#define DRIVER_ATOMIC 0x10000 +#define DRIVER_KMS_LEGACY_CONTEXT 0x20000 +#define DRIVER_SYNCOBJ 0x40000 +#define DRIVER_PREFER_XBGR_30BPP 0x80000 + +/** + * struct drm_driver - DRM driver structure + * + * This structure represent the common code for a family of cards. There will + * one drm_device for each card present in this family. It contains lots of + * vfunc entries, and a pile of those probably should be moved to more + * appropriate places like &drm_mode_config_funcs or into a new operations + * structure for GEM drivers. + */ +struct drm_driver { + /** + * @load: + * + * Backward-compatible driver callback to complete + * initialization steps after the driver is registered. For + * this reason, may suffer from race conditions and its use is + * deprecated for new drivers. It is therefore only supported + * for existing drivers not yet converted to the new scheme. + * See drm_dev_init() and drm_dev_register() for proper and + * race-free way to set up a &struct drm_device. + * + * This is deprecated, do not use! + * + * Returns: + * + * Zero on success, non-zero value on failure. + */ + int (*load) (struct drm_device *, unsigned long flags); + + /** + * @open: + * + * Driver callback when a new &struct drm_file is opened. Useful for + * setting up driver-private data structures like buffer allocators, + * execution contexts or similar things. Such driver-private resources + * must be released again in @postclose. + * + * Since the display/modeset side of DRM can only be owned by exactly + * one &struct drm_file (see &drm_file.is_master and &drm_device.master) + * there should never be a need to set up any modeset related resources + * in this callback. Doing so would be a driver design bug. + * + * Returns: + * + * 0 on success, a negative error code on failure, which will be + * promoted to userspace as the result of the open() system call. + */ + int (*open) (struct drm_device *, struct drm_file *); + + /** + * @postclose: + * + * One of the driver callbacks when a new &struct drm_file is closed. + * Useful for tearing down driver-private data structures allocated in + * @open like buffer allocators, execution contexts or similar things. + * + * Since the display/modeset side of DRM can only be owned by exactly + * one &struct drm_file (see &drm_file.is_master and &drm_device.master) + * there should never be a need to tear down any modeset related + * resources in this callback. Doing so would be a driver design bug. + */ + void (*postclose) (struct drm_device *, struct drm_file *); + + /** + * @lastclose: + * + * Called when the last &struct drm_file has been closed and there's + * currently no userspace client for the &struct drm_device. + * + * Modern drivers should only use this to force-restore the fbdev + * framebuffer using drm_fb_helper_restore_fbdev_mode_unlocked(). + * Anything else would indicate there's something seriously wrong. + * Modern drivers can also use this to execute delayed power switching + * state changes, e.g. in conjunction with the :ref:`vga_switcheroo` + * infrastructure. + * + * This is called after @postclose hook has been called. + * + * NOTE: + * + * All legacy drivers use this callback to de-initialize the hardware. + * This is purely because of the shadow-attach model, where the DRM + * kernel driver does not really own the hardware. Instead ownershipe is + * handled with the help of userspace through an inheritedly racy dance + * to set/unset the VT into raw mode. + * + * Legacy drivers initialize the hardware in the @firstopen callback, + * which isn't even called for modern drivers. + */ + void (*lastclose) (struct drm_device *); + + /** + * @unload: + * + * Reverse the effects of the driver load callback. Ideally, + * the clean up performed by the driver should happen in the + * reverse order of the initialization. Similarly to the load + * hook, this handler is deprecated and its usage should be + * dropped in favor of an open-coded teardown function at the + * driver layer. See drm_dev_unregister() and drm_dev_put() + * for the proper way to remove a &struct drm_device. + * + * The unload() hook is called right after unregistering + * the device. + * + */ + void (*unload) (struct drm_device *); + + /** + * @release: + * + * Optional callback for destroying device data after the final + * reference is released, i.e. the device is being destroyed. Drivers + * using this callback are responsible for calling drm_dev_fini() + * to finalize the device and then freeing the struct themselves. + */ + void (*release) (struct drm_device *); + + /** + * @get_vblank_counter: + * + * Driver callback for fetching a raw hardware vblank counter for the + * CRTC specified with the pipe argument. If a device doesn't have a + * hardware counter, the driver can simply leave the hook as NULL. + * The DRM core will account for missed vblank events while interrupts + * where disabled based on system timestamps. + * + * Wraparound handling and loss of events due to modesetting is dealt + * with in the DRM core code, as long as drivers call + * drm_crtc_vblank_off() and drm_crtc_vblank_on() when disabling or + * enabling a CRTC. + * + * This is deprecated and should not be used by new drivers. + * Use &drm_crtc_funcs.get_vblank_counter instead. + * + * Returns: + * + * Raw vblank counter value. + */ + u32 (*get_vblank_counter) (struct drm_device *dev, unsigned int pipe); + + /** + * @enable_vblank: + * + * Enable vblank interrupts for the CRTC specified with the pipe + * argument. + * + * This is deprecated and should not be used by new drivers. + * Use &drm_crtc_funcs.enable_vblank instead. + * + * Returns: + * + * Zero on success, appropriate errno if the given @crtc's vblank + * interrupt cannot be enabled. + */ + int (*enable_vblank) (struct drm_device *dev, unsigned int pipe); + + /** + * @disable_vblank: + * + * Disable vblank interrupts for the CRTC specified with the pipe + * argument. + * + * This is deprecated and should not be used by new drivers. + * Use &drm_crtc_funcs.disable_vblank instead. + */ + void (*disable_vblank) (struct drm_device *dev, unsigned int pipe); + + /** + * @get_scanout_position: + * + * Called by vblank timestamping code. + * + * Returns the current display scanout position from a crtc, and an + * optional accurate ktime_get() timestamp of when position was + * measured. Note that this is a helper callback which is only used if a + * driver uses drm_calc_vbltimestamp_from_scanoutpos() for the + * @get_vblank_timestamp callback. + * + * Parameters: + * + * dev: + * DRM device. + * pipe: + * Id of the crtc to query. + * in_vblank_irq: + * True when called from drm_crtc_handle_vblank(). Some drivers + * need to apply some workarounds for gpu-specific vblank irq quirks + * if flag is set. + * vpos: + * Target location for current vertical scanout position. + * hpos: + * Target location for current horizontal scanout position. + * stime: + * Target location for timestamp taken immediately before + * scanout position query. Can be NULL to skip timestamp. + * etime: + * Target location for timestamp taken immediately after + * scanout position query. Can be NULL to skip timestamp. + * mode: + * Current display timings. + * + * Returns vpos as a positive number while in active scanout area. + * Returns vpos as a negative number inside vblank, counting the number + * of scanlines to go until end of vblank, e.g., -1 means "one scanline + * until start of active scanout / end of vblank." + * + * Returns: + * + * True on success, false if a reliable scanout position counter could + * not be read out. + * + * FIXME: + * + * Since this is a helper to implement @get_vblank_timestamp, we should + * move it to &struct drm_crtc_helper_funcs, like all the other + * helper-internal hooks. + */ + bool (*get_scanout_position) (struct drm_device *dev, unsigned int pipe, + bool in_vblank_irq, int *vpos, int *hpos, + ktime_t *stime, ktime_t *etime, + const struct drm_display_mode *mode); + + /** + * @get_vblank_timestamp: + * + * Called by drm_get_last_vbltimestamp(). Should return a precise + * timestamp when the most recent VBLANK interval ended or will end. + * + * Specifically, the timestamp in @vblank_time should correspond as + * closely as possible to the time when the first video scanline of + * the video frame after the end of VBLANK will start scanning out, + * the time immediately after end of the VBLANK interval. If the + * @crtc is currently inside VBLANK, this will be a time in the future. + * If the @crtc is currently scanning out a frame, this will be the + * past start time of the current scanout. This is meant to adhere + * to the OpenML OML_sync_control extension specification. + * + * Paramters: + * + * dev: + * dev DRM device handle. + * pipe: + * crtc for which timestamp should be returned. + * max_error: + * Maximum allowable timestamp error in nanoseconds. + * Implementation should strive to provide timestamp + * with an error of at most max_error nanoseconds. + * Returns true upper bound on error for timestamp. + * vblank_time: + * Target location for returned vblank timestamp. + * in_vblank_irq: + * True when called from drm_crtc_handle_vblank(). Some drivers + * need to apply some workarounds for gpu-specific vblank irq quirks + * if flag is set. + * + * Returns: + * + * True on success, false on failure, which means the core should + * fallback to a simple timestamp taken in drm_crtc_handle_vblank(). + * + * FIXME: + * + * We should move this hook to &struct drm_crtc_funcs like all the other + * vblank hooks. + */ + bool (*get_vblank_timestamp) (struct drm_device *dev, unsigned int pipe, + int *max_error, + ktime_t *vblank_time, + bool in_vblank_irq); + + /** + * @irq_handler: + * + * Interrupt handler called when using drm_irq_install(). Not used by + * drivers which implement their own interrupt handling. + */ + irqreturn_t(*irq_handler) (int irq, void *arg); + + /** + * @irq_preinstall: + * + * Optional callback used by drm_irq_install() which is called before + * the interrupt handler is registered. This should be used to clear out + * any pending interrupts (from e.g. firmware based drives) and reset + * the interrupt handling registers. + */ + void (*irq_preinstall) (struct drm_device *dev); + + /** + * @irq_postinstall: + * + * Optional callback used by drm_irq_install() which is called after + * the interrupt handler is registered. This should be used to enable + * interrupt generation in the hardware. + */ + int (*irq_postinstall) (struct drm_device *dev); + + /** + * @irq_uninstall: + * + * Optional callback used by drm_irq_uninstall() which is called before + * the interrupt handler is unregistered. This should be used to disable + * interrupt generation in the hardware. + */ + void (*irq_uninstall) (struct drm_device *dev); + + /** + * @master_create: + * + * Called whenever a new master is created. Only used by vmwgfx. + */ + int (*master_create)(struct drm_device *dev, struct drm_master *master); + + /** + * @master_destroy: + * + * Called whenever a master is destroyed. Only used by vmwgfx. + */ + void (*master_destroy)(struct drm_device *dev, struct drm_master *master); + + /** + * @master_set: + * + * Called whenever the minor master is set. Only used by vmwgfx. + */ + int (*master_set)(struct drm_device *dev, struct drm_file *file_priv, + bool from_open); + /** + * @master_drop: + * + * Called whenever the minor master is dropped. Only used by vmwgfx. + */ + void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv); + + /** + * @debugfs_init: + * + * Allows drivers to create driver-specific debugfs files. + */ + int (*debugfs_init)(struct drm_minor *minor); + + /** + * @gem_free_object: deconstructor for drm_gem_objects + * + * This is deprecated and should not be used by new drivers. Use + * @gem_free_object_unlocked instead. + */ + void (*gem_free_object) (struct drm_gem_object *obj); + + /** + * @gem_free_object_unlocked: deconstructor for drm_gem_objects + * + * This is for drivers which are not encumbered with &drm_device.struct_mutex + * legacy locking schemes. Use this hook instead of @gem_free_object. + */ + void (*gem_free_object_unlocked) (struct drm_gem_object *obj); + + /** + * @gem_open_object: + * + * Driver hook called upon gem handle creation + */ + int (*gem_open_object) (struct drm_gem_object *, struct drm_file *); + + /** + * @gem_close_object: + * + * Driver hook called upon gem handle release + */ + void (*gem_close_object) (struct drm_gem_object *, struct drm_file *); + + /** + * @gem_print_info: + * + * If driver subclasses struct &drm_gem_object, it can implement this + * optional hook for printing additional driver specific info. + * + * drm_printf_indent() should be used in the callback passing it the + * indent argument. + * + * This callback is called from drm_gem_print_info(). + */ + void (*gem_print_info)(struct drm_printer *p, unsigned int indent, + const struct drm_gem_object *obj); + + /** + * @gem_create_object: constructor for gem objects + * + * Hook for allocating the GEM object struct, for use by core + * helpers. + */ + struct drm_gem_object *(*gem_create_object)(struct drm_device *dev, + size_t size); + + /* prime: */ + /** + * @prime_handle_to_fd: + * + * export handle -> fd (see drm_gem_prime_handle_to_fd() helper) + */ + int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv, + uint32_t handle, uint32_t flags, int *prime_fd); + /** + * @prime_fd_to_handle: + * + * import fd -> handle (see drm_gem_prime_fd_to_handle() helper) + */ + int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv, + int prime_fd, uint32_t *handle); + /** + * @gem_prime_export: + * + * export GEM -> dmabuf + */ + struct dma_buf * (*gem_prime_export)(struct drm_device *dev, + struct drm_gem_object *obj, int flags); + /** + * @gem_prime_import: + * + * import dmabuf -> GEM + */ + struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev, + struct dma_buf *dma_buf); + int (*gem_prime_pin)(struct drm_gem_object *obj); + void (*gem_prime_unpin)(struct drm_gem_object *obj); + struct reservation_object * (*gem_prime_res_obj)( + struct drm_gem_object *obj); + struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj); + struct drm_gem_object *(*gem_prime_import_sg_table)( + struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt); + void *(*gem_prime_vmap)(struct drm_gem_object *obj); + void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr); + int (*gem_prime_mmap)(struct drm_gem_object *obj, + struct vm_area_struct *vma); + + /** + * @dumb_create: + * + * This creates a new dumb buffer in the driver's backing storage manager (GEM, + * TTM or something else entirely) and returns the resulting buffer handle. This + * handle can then be wrapped up into a framebuffer modeset object. + * + * Note that userspace is not allowed to use such objects for render + * acceleration - drivers must create their own private ioctls for such a use + * case. + * + * Width, height and depth are specified in the &drm_mode_create_dumb + * argument. The callback needs to fill the handle, pitch and size for + * the created buffer. + * + * Called by the user via ioctl. + * + * Returns: + * + * Zero on success, negative errno on failure. + */ + int (*dumb_create)(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args); + /** + * @dumb_map_offset: + * + * Allocate an offset in the drm device node's address space to be able to + * memory map a dumb buffer. GEM-based drivers must use + * drm_gem_create_mmap_offset() to implement this. + * + * Called by the user via ioctl. + * + * Returns: + * + * Zero on success, negative errno on failure. + */ + int (*dumb_map_offset)(struct drm_file *file_priv, + struct drm_device *dev, uint32_t handle, + uint64_t *offset); + /** + * @dumb_destroy: + * + * This destroys the userspace handle for the given dumb backing storage buffer. + * Since buffer objects must be reference counted in the kernel a buffer object + * won't be immediately freed if a framebuffer modeset object still uses it. + * + * Called by the user via ioctl. + * + * Returns: + * + * Zero on success, negative errno on failure. + */ + int (*dumb_destroy)(struct drm_file *file_priv, + struct drm_device *dev, + uint32_t handle); + + /** + * @gem_vm_ops: Driver private ops for this object + */ + const struct vm_operations_struct *gem_vm_ops; + + /** @major: driver major number */ + int major; + /** @minor: driver minor number */ + int minor; + /** @patchlevel: driver patch level */ + int patchlevel; + /** @name: driver name */ + char *name; + /** @desc: driver description */ + char *desc; + /** @date: driver date */ + char *date; + + /** @driver_features: driver features */ + u32 driver_features; + + /** + * @ioctls: + * + * Array of driver-private IOCTL description entries. See the chapter on + * :ref:`IOCTL support in the userland interfaces + * chapter` for the full details. + */ + + const struct drm_ioctl_desc *ioctls; + /** @num_ioctls: Number of entries in @ioctls. */ + int num_ioctls; + + /** + * @fops: + * + * File operations for the DRM device node. See the discussion in + * :ref:`file operations` for in-depth coverage and + * some examples. + */ + const struct file_operations *fops; + + /* Everything below here is for legacy driver, never use! */ + /* private: */ + + /* List of devices hanging off this driver with stealth attach. */ + struct list_head legacy_dev_list; + int (*firstopen) (struct drm_device *); + void (*preclose) (struct drm_device *, struct drm_file *file_priv); + int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); + int (*dma_quiescent) (struct drm_device *); + int (*context_dtor) (struct drm_device *dev, int context); + int dev_priv_size; +}; + +extern unsigned int drm_debug; + +int drm_dev_init(struct drm_device *dev, + struct drm_driver *driver, + struct device *parent); +void drm_dev_fini(struct drm_device *dev); + +struct drm_device *drm_dev_alloc(struct drm_driver *driver, + struct device *parent); +int drm_dev_register(struct drm_device *dev, unsigned long flags); +void drm_dev_unregister(struct drm_device *dev); + +void drm_dev_get(struct drm_device *dev); +void drm_dev_put(struct drm_device *dev); +void drm_dev_unref(struct drm_device *dev); +void drm_put_dev(struct drm_device *dev); +bool drm_dev_enter(struct drm_device *dev, int *idx); +void drm_dev_exit(int idx); +void drm_dev_unplug(struct drm_device *dev); + +/** + * drm_dev_is_unplugged - is a DRM device unplugged + * @dev: DRM device + * + * This function can be called to check whether a hotpluggable is unplugged. + * Unplugging itself is singalled through drm_dev_unplug(). If a device is + * unplugged, these two functions guarantee that any store before calling + * drm_dev_unplug() is visible to callers of this function after it completes + */ +static inline bool drm_dev_is_unplugged(struct drm_device *dev) +{ + int idx; + + if (drm_dev_enter(dev, &idx)) { + drm_dev_exit(idx); + return false; + } + + return true; +} + +/** + * drm_core_check_feature - check driver feature flags + * @dev: DRM device to check + * @feature: feature flag + * + * This checks @dev for driver features, see &drm_driver.driver_features and the + * various DRIVER_\* flags. + * + * Returns true if the @feature is supported, false otherwise. + */ +static inline bool drm_core_check_feature(struct drm_device *dev, int feature) +{ + return dev->driver->driver_features & feature; +} + +/** + * drm_drv_uses_atomic_modeset - check if the driver implements + * atomic_commit() + * @dev: DRM device + * + * This check is useful if drivers do not have DRIVER_ATOMIC set but + * have atomic modesetting internally implemented. + */ +static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev) +{ + return drm_core_check_feature(dev, DRIVER_ATOMIC) || + (dev->mode_config.funcs && dev->mode_config.funcs->atomic_commit != NULL); +} + + +int drm_dev_set_unique(struct drm_device *dev, const char *name); + + +#endif diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h new file mode 100644 index 000000000..8b9678bff --- /dev/null +++ b/include/drm/drm_edid.h @@ -0,0 +1,492 @@ +/* + * Copyright © 2007-2008 Intel Corporation + * Jesse Barnes + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef __DRM_EDID_H__ +#define __DRM_EDID_H__ + +#include +#include + +struct drm_device; +struct i2c_adapter; + +#define EDID_LENGTH 128 +#define DDC_ADDR 0x50 +#define DDC_ADDR2 0x52 /* E-DDC 1.2 - where DisplayID can hide */ + +#define CEA_EXT 0x02 +#define VTB_EXT 0x10 +#define DI_EXT 0x40 +#define LS_EXT 0x50 +#define MI_EXT 0x60 +#define DISPLAYID_EXT 0x70 + +struct est_timings { + u8 t1; + u8 t2; + u8 mfg_rsvd; +} __attribute__((packed)); + +/* 00=16:10, 01=4:3, 10=5:4, 11=16:9 */ +#define EDID_TIMING_ASPECT_SHIFT 6 +#define EDID_TIMING_ASPECT_MASK (0x3 << EDID_TIMING_ASPECT_SHIFT) + +/* need to add 60 */ +#define EDID_TIMING_VFREQ_SHIFT 0 +#define EDID_TIMING_VFREQ_MASK (0x3f << EDID_TIMING_VFREQ_SHIFT) + +struct std_timing { + u8 hsize; /* need to multiply by 8 then add 248 */ + u8 vfreq_aspect; +} __attribute__((packed)); + +#define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1) +#define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2) +#define DRM_EDID_PT_SEPARATE_SYNC (3 << 3) +#define DRM_EDID_PT_STEREO (1 << 5) +#define DRM_EDID_PT_INTERLACED (1 << 7) + +/* If detailed data is pixel timing */ +struct detailed_pixel_timing { + u8 hactive_lo; + u8 hblank_lo; + u8 hactive_hblank_hi; + u8 vactive_lo; + u8 vblank_lo; + u8 vactive_vblank_hi; + u8 hsync_offset_lo; + u8 hsync_pulse_width_lo; + u8 vsync_offset_pulse_width_lo; + u8 hsync_vsync_offset_pulse_width_hi; + u8 width_mm_lo; + u8 height_mm_lo; + u8 width_height_mm_hi; + u8 hborder; + u8 vborder; + u8 misc; +} __attribute__((packed)); + +/* If it's not pixel timing, it'll be one of the below */ +struct detailed_data_string { + u8 str[13]; +} __attribute__((packed)); + +struct detailed_data_monitor_range { + u8 min_vfreq; + u8 max_vfreq; + u8 min_hfreq_khz; + u8 max_hfreq_khz; + u8 pixel_clock_mhz; /* need to multiply by 10 */ + u8 flags; + union { + struct { + u8 reserved; + u8 hfreq_start_khz; /* need to multiply by 2 */ + u8 c; /* need to divide by 2 */ + __le16 m; + u8 k; + u8 j; /* need to divide by 2 */ + } __attribute__((packed)) gtf2; + struct { + u8 version; + u8 data1; /* high 6 bits: extra clock resolution */ + u8 data2; /* plus low 2 of above: max hactive */ + u8 supported_aspects; + u8 flags; /* preferred aspect and blanking support */ + u8 supported_scalings; + u8 preferred_refresh; + } __attribute__((packed)) cvt; + } __attribute__((packed)) formula; +} __attribute__((packed)); + +struct detailed_data_wpindex { + u8 white_yx_lo; /* Lower 2 bits each */ + u8 white_x_hi; + u8 white_y_hi; + u8 gamma; /* need to divide by 100 then add 1 */ +} __attribute__((packed)); + +struct detailed_data_color_point { + u8 windex1; + u8 wpindex1[3]; + u8 windex2; + u8 wpindex2[3]; +} __attribute__((packed)); + +struct cvt_timing { + u8 code[3]; +} __attribute__((packed)); + +struct detailed_non_pixel { + u8 pad1; + u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name + fb=color point data, fa=standard timing data, + f9=undefined, f8=mfg. reserved */ + u8 pad2; + union { + struct detailed_data_string str; + struct detailed_data_monitor_range range; + struct detailed_data_wpindex color; + struct std_timing timings[6]; + struct cvt_timing cvt[4]; + } __attribute__((packed)) data; +} __attribute__((packed)); + +#define EDID_DETAIL_EST_TIMINGS 0xf7 +#define EDID_DETAIL_CVT_3BYTE 0xf8 +#define EDID_DETAIL_COLOR_MGMT_DATA 0xf9 +#define EDID_DETAIL_STD_MODES 0xfa +#define EDID_DETAIL_MONITOR_CPDATA 0xfb +#define EDID_DETAIL_MONITOR_NAME 0xfc +#define EDID_DETAIL_MONITOR_RANGE 0xfd +#define EDID_DETAIL_MONITOR_STRING 0xfe +#define EDID_DETAIL_MONITOR_SERIAL 0xff + +struct detailed_timing { + __le16 pixel_clock; /* need to multiply by 10 KHz */ + union { + struct detailed_pixel_timing pixel_data; + struct detailed_non_pixel other_data; + } __attribute__((packed)) data; +} __attribute__((packed)); + +#define DRM_EDID_INPUT_SERRATION_VSYNC (1 << 0) +#define DRM_EDID_INPUT_SYNC_ON_GREEN (1 << 1) +#define DRM_EDID_INPUT_COMPOSITE_SYNC (1 << 2) +#define DRM_EDID_INPUT_SEPARATE_SYNCS (1 << 3) +#define DRM_EDID_INPUT_BLANK_TO_BLACK (1 << 4) +#define DRM_EDID_INPUT_VIDEO_LEVEL (3 << 5) +#define DRM_EDID_INPUT_DIGITAL (1 << 7) +#define DRM_EDID_DIGITAL_DEPTH_MASK (7 << 4) +#define DRM_EDID_DIGITAL_DEPTH_UNDEF (0 << 4) +#define DRM_EDID_DIGITAL_DEPTH_6 (1 << 4) +#define DRM_EDID_DIGITAL_DEPTH_8 (2 << 4) +#define DRM_EDID_DIGITAL_DEPTH_10 (3 << 4) +#define DRM_EDID_DIGITAL_DEPTH_12 (4 << 4) +#define DRM_EDID_DIGITAL_DEPTH_14 (5 << 4) +#define DRM_EDID_DIGITAL_DEPTH_16 (6 << 4) +#define DRM_EDID_DIGITAL_DEPTH_RSVD (7 << 4) +#define DRM_EDID_DIGITAL_TYPE_UNDEF (0) +#define DRM_EDID_DIGITAL_TYPE_DVI (1) +#define DRM_EDID_DIGITAL_TYPE_HDMI_A (2) +#define DRM_EDID_DIGITAL_TYPE_HDMI_B (3) +#define DRM_EDID_DIGITAL_TYPE_MDDI (4) +#define DRM_EDID_DIGITAL_TYPE_DP (5) + +#define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 0) +#define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 1) +#define DRM_EDID_FEATURE_STANDARD_COLOR (1 << 2) +/* If analog */ +#define DRM_EDID_FEATURE_DISPLAY_TYPE (3 << 3) /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */ +/* If digital */ +#define DRM_EDID_FEATURE_COLOR_MASK (3 << 3) +#define DRM_EDID_FEATURE_RGB (0 << 3) +#define DRM_EDID_FEATURE_RGB_YCRCB444 (1 << 3) +#define DRM_EDID_FEATURE_RGB_YCRCB422 (2 << 3) +#define DRM_EDID_FEATURE_RGB_YCRCB (3 << 3) /* both 4:4:4 and 4:2:2 */ + +#define DRM_EDID_FEATURE_PM_ACTIVE_OFF (1 << 5) +#define DRM_EDID_FEATURE_PM_SUSPEND (1 << 6) +#define DRM_EDID_FEATURE_PM_STANDBY (1 << 7) + +#define DRM_EDID_HDMI_DC_48 (1 << 6) +#define DRM_EDID_HDMI_DC_36 (1 << 5) +#define DRM_EDID_HDMI_DC_30 (1 << 4) +#define DRM_EDID_HDMI_DC_Y444 (1 << 3) + +/* YCBCR 420 deep color modes */ +#define DRM_EDID_YCBCR420_DC_48 (1 << 2) +#define DRM_EDID_YCBCR420_DC_36 (1 << 1) +#define DRM_EDID_YCBCR420_DC_30 (1 << 0) +#define DRM_EDID_YCBCR420_DC_MASK (DRM_EDID_YCBCR420_DC_48 | \ + DRM_EDID_YCBCR420_DC_36 | \ + DRM_EDID_YCBCR420_DC_30) + +/* ELD Header Block */ +#define DRM_ELD_HEADER_BLOCK_SIZE 4 + +#define DRM_ELD_VER 0 +# define DRM_ELD_VER_SHIFT 3 +# define DRM_ELD_VER_MASK (0x1f << 3) +# define DRM_ELD_VER_CEA861D (2 << 3) /* supports 861D or below */ +# define DRM_ELD_VER_CANNED (0x1f << 3) + +#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */ + +/* ELD Baseline Block for ELD_Ver == 2 */ +#define DRM_ELD_CEA_EDID_VER_MNL 4 +# define DRM_ELD_CEA_EDID_VER_SHIFT 5 +# define DRM_ELD_CEA_EDID_VER_MASK (7 << 5) +# define DRM_ELD_CEA_EDID_VER_NONE (0 << 5) +# define DRM_ELD_CEA_EDID_VER_CEA861 (1 << 5) +# define DRM_ELD_CEA_EDID_VER_CEA861A (2 << 5) +# define DRM_ELD_CEA_EDID_VER_CEA861BCD (3 << 5) +# define DRM_ELD_MNL_SHIFT 0 +# define DRM_ELD_MNL_MASK (0x1f << 0) + +#define DRM_ELD_SAD_COUNT_CONN_TYPE 5 +# define DRM_ELD_SAD_COUNT_SHIFT 4 +# define DRM_ELD_SAD_COUNT_MASK (0xf << 4) +# define DRM_ELD_CONN_TYPE_SHIFT 2 +# define DRM_ELD_CONN_TYPE_MASK (3 << 2) +# define DRM_ELD_CONN_TYPE_HDMI (0 << 2) +# define DRM_ELD_CONN_TYPE_DP (1 << 2) +# define DRM_ELD_SUPPORTS_AI (1 << 1) +# define DRM_ELD_SUPPORTS_HDCP (1 << 0) + +#define DRM_ELD_AUD_SYNCH_DELAY 6 /* in units of 2 ms */ +# define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */ + +#define DRM_ELD_SPEAKER 7 +# define DRM_ELD_SPEAKER_MASK 0x7f +# define DRM_ELD_SPEAKER_RLRC (1 << 6) +# define DRM_ELD_SPEAKER_FLRC (1 << 5) +# define DRM_ELD_SPEAKER_RC (1 << 4) +# define DRM_ELD_SPEAKER_RLR (1 << 3) +# define DRM_ELD_SPEAKER_FC (1 << 2) +# define DRM_ELD_SPEAKER_LFE (1 << 1) +# define DRM_ELD_SPEAKER_FLR (1 << 0) + +#define DRM_ELD_PORT_ID 8 /* offsets 8..15 inclusive */ +# define DRM_ELD_PORT_ID_LEN 8 + +#define DRM_ELD_MANUFACTURER_NAME0 16 +#define DRM_ELD_MANUFACTURER_NAME1 17 + +#define DRM_ELD_PRODUCT_CODE0 18 +#define DRM_ELD_PRODUCT_CODE1 19 + +#define DRM_ELD_MONITOR_NAME_STRING 20 /* offsets 20..(20+mnl-1) inclusive */ + +#define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad)) + +struct edid { + u8 header[8]; + /* Vendor & product info */ + u8 mfg_id[2]; + u8 prod_code[2]; + u32 serial; /* FIXME: byte order */ + u8 mfg_week; + u8 mfg_year; + /* EDID version */ + u8 version; + u8 revision; + /* Display info: */ + u8 input; + u8 width_cm; + u8 height_cm; + u8 gamma; + u8 features; + /* Color characteristics */ + u8 red_green_lo; + u8 black_white_lo; + u8 red_x; + u8 red_y; + u8 green_x; + u8 green_y; + u8 blue_x; + u8 blue_y; + u8 white_x; + u8 white_y; + /* Est. timings and mfg rsvd timings*/ + struct est_timings established_timings; + /* Standard timings 1-8*/ + struct std_timing standard_timings[8]; + /* Detailing timings 1-4 */ + struct detailed_timing detailed_timings[4]; + /* Number of 128 byte ext. blocks */ + u8 extensions; + /* Checksum */ + u8 checksum; +} __attribute__((packed)); + +#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8)) + +/* Short Audio Descriptor */ +struct cea_sad { + u8 format; + u8 channels; /* max number of channels - 1 */ + u8 freq; + u8 byte2; /* meaning depends on format */ +}; + +struct drm_encoder; +struct drm_connector; +struct drm_display_mode; + +int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads); +int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb); +int drm_av_sync_delay(struct drm_connector *connector, + const struct drm_display_mode *mode); + +#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE +struct edid *drm_load_edid_firmware(struct drm_connector *connector); +int __drm_set_edid_firmware_path(const char *path); +int __drm_get_edid_firmware_path(char *buf, size_t bufsize); +#else +static inline struct edid * +drm_load_edid_firmware(struct drm_connector *connector) +{ + return ERR_PTR(-ENOENT); +} +#endif + +int +drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, + const struct drm_display_mode *mode, + bool is_hdmi2_sink); +int +drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, + struct drm_connector *connector, + const struct drm_display_mode *mode); +void +drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, + const struct drm_display_mode *mode, + enum hdmi_quantization_range rgb_quant_range, + bool rgb_quant_range_selectable, + bool is_hdmi2_sink); + +/** + * drm_eld_mnl - Get ELD monitor name length in bytes. + * @eld: pointer to an eld memory structure with mnl set + */ +static inline int drm_eld_mnl(const uint8_t *eld) +{ + return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT; +} + +/** + * drm_eld_sad - Get ELD SAD structures. + * @eld: pointer to an eld memory structure with sad_count set + */ +static inline const uint8_t *drm_eld_sad(const uint8_t *eld) +{ + unsigned int ver, mnl; + + ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT; + if (ver != 2 && ver != 31) + return NULL; + + mnl = drm_eld_mnl(eld); + if (mnl > 16) + return NULL; + + return eld + DRM_ELD_CEA_SAD(mnl, 0); +} + +/** + * drm_eld_sad_count - Get ELD SAD count. + * @eld: pointer to an eld memory structure with sad_count set + */ +static inline int drm_eld_sad_count(const uint8_t *eld) +{ + return (eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_SAD_COUNT_MASK) >> + DRM_ELD_SAD_COUNT_SHIFT; +} + +/** + * drm_eld_calc_baseline_block_size - Calculate baseline block size in bytes + * @eld: pointer to an eld memory structure with mnl and sad_count set + * + * This is a helper for determining the payload size of the baseline block, in + * bytes, for e.g. setting the Baseline_ELD_Len field in the ELD header block. + */ +static inline int drm_eld_calc_baseline_block_size(const uint8_t *eld) +{ + return DRM_ELD_MONITOR_NAME_STRING - DRM_ELD_HEADER_BLOCK_SIZE + + drm_eld_mnl(eld) + drm_eld_sad_count(eld) * 3; +} + +/** + * drm_eld_size - Get ELD size in bytes + * @eld: pointer to a complete eld memory structure + * + * The returned value does not include the vendor block. It's vendor specific, + * and comprises of the remaining bytes in the ELD memory buffer after + * drm_eld_size() bytes of header and baseline block. + * + * The returned value is guaranteed to be a multiple of 4. + */ +static inline int drm_eld_size(const uint8_t *eld) +{ + return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4; +} + +/** + * drm_eld_get_spk_alloc - Get speaker allocation + * @eld: pointer to an ELD memory structure + * + * The returned value is the speakers mask. User has to use %DRM_ELD_SPEAKER + * field definitions to identify speakers. + */ +static inline u8 drm_eld_get_spk_alloc(const uint8_t *eld) +{ + return eld[DRM_ELD_SPEAKER] & DRM_ELD_SPEAKER_MASK; +} + +/** + * drm_eld_get_conn_type - Get device type hdmi/dp connected + * @eld: pointer to an ELD memory structure + * + * The caller need to use %DRM_ELD_CONN_TYPE_HDMI or %DRM_ELD_CONN_TYPE_DP to + * identify the display type connected. + */ +static inline u8 drm_eld_get_conn_type(const uint8_t *eld) +{ + return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK; +} + +bool drm_probe_ddc(struct i2c_adapter *adapter); +struct edid *drm_do_get_edid(struct drm_connector *connector, + int (*get_edid_block)(void *data, u8 *buf, unsigned int block, + size_t len), + void *data); +struct edid *drm_get_edid(struct drm_connector *connector, + struct i2c_adapter *adapter); +struct edid *drm_get_edid_switcheroo(struct drm_connector *connector, + struct i2c_adapter *adapter); +struct edid *drm_edid_duplicate(const struct edid *edid); +int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); +int drm_add_override_edid_modes(struct drm_connector *connector); + +u8 drm_match_cea_mode(const struct drm_display_mode *to_match); +enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code); +bool drm_detect_hdmi_monitor(struct edid *edid); +bool drm_detect_monitor_audio(struct edid *edid); +bool drm_rgb_quant_range_selectable(struct edid *edid); +enum hdmi_quantization_range +drm_default_rgb_quant_range(const struct drm_display_mode *mode); +int drm_add_modes_noedid(struct drm_connector *connector, + int hdisplay, int vdisplay); +void drm_set_preferred_mode(struct drm_connector *connector, + int hpref, int vpref); + +int drm_edid_header_is_valid(const u8 *raw_edid); +bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid, + bool *edid_corrupt); +bool drm_edid_is_valid(struct edid *edid); +void drm_edid_get_monitor_name(struct edid *edid, char *name, + int buflen); +struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, + int hsize, int vsize, int fresh, + bool rb); +#endif /* __DRM_EDID_H__ */ diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h new file mode 100644 index 000000000..4f597c073 --- /dev/null +++ b/include/drm/drm_encoder.h @@ -0,0 +1,268 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_ENCODER_H__ +#define __DRM_ENCODER_H__ + +#include +#include +#include +#include +#include + +struct drm_encoder; + +/** + * struct drm_encoder_funcs - encoder controls + * + * Encoders sit between CRTCs and connectors. + */ +struct drm_encoder_funcs { + /** + * @reset: + * + * Reset encoder hardware and software state to off. This function isn't + * called by the core directly, only through drm_mode_config_reset(). + * It's not a helper hook only for historical reasons. + */ + void (*reset)(struct drm_encoder *encoder); + + /** + * @destroy: + * + * Clean up encoder resources. This is only called at driver unload time + * through drm_mode_config_cleanup() since an encoder cannot be + * hotplugged in DRM. + */ + void (*destroy)(struct drm_encoder *encoder); + + /** + * @late_register: + * + * This optional hook can be used to register additional userspace + * interfaces attached to the encoder like debugfs interfaces. + * It is called late in the driver load sequence from drm_dev_register(). + * Everything added from this callback should be unregistered in + * the early_unregister callback. + * + * Returns: + * + * 0 on success, or a negative error code on failure. + */ + int (*late_register)(struct drm_encoder *encoder); + + /** + * @early_unregister: + * + * This optional hook should be used to unregister the additional + * userspace interfaces attached to the encoder from + * @late_register. It is called from drm_dev_unregister(), + * early in the driver unload sequence to disable userspace access + * before data structures are torndown. + */ + void (*early_unregister)(struct drm_encoder *encoder); +}; + +/** + * struct drm_encoder - central DRM encoder structure + * @dev: parent DRM device + * @head: list management + * @base: base KMS object + * @name: human readable name, can be overwritten by the driver + * @bridge: bridge associated to the encoder + * @funcs: control functions + * @helper_private: mid-layer private data + * + * CRTCs drive pixels to encoders, which convert them into signals + * appropriate for a given connector or set of connectors. + */ +struct drm_encoder { + struct drm_device *dev; + struct list_head head; + + struct drm_mode_object base; + char *name; + /** + * @encoder_type: + * + * One of the DRM_MODE_ENCODER_ types in drm_mode.h. The following + * encoder types are defined thus far: + * + * - DRM_MODE_ENCODER_DAC for VGA and analog on DVI-I/DVI-A. + * + * - DRM_MODE_ENCODER_TMDS for DVI, HDMI and (embedded) DisplayPort. + * + * - DRM_MODE_ENCODER_LVDS for display panels, or in general any panel + * with a proprietary parallel connector. + * + * - DRM_MODE_ENCODER_TVDAC for TV output (Composite, S-Video, + * Component, SCART). + * + * - DRM_MODE_ENCODER_VIRTUAL for virtual machine displays + * + * - DRM_MODE_ENCODER_DSI for panels connected using the DSI serial bus. + * + * - DRM_MODE_ENCODER_DPI for panels connected using the DPI parallel + * bus. + * + * - DRM_MODE_ENCODER_DPMST for special fake encoders used to allow + * mutliple DP MST streams to share one physical encoder. + */ + int encoder_type; + + /** + * @index: Position inside the mode_config.list, can be used as an array + * index. It is invariant over the lifetime of the encoder. + */ + unsigned index; + + /** + * @possible_crtcs: Bitmask of potential CRTC bindings, using + * drm_crtc_index() as the index into the bitfield. The driver must set + * the bits for all &drm_crtc objects this encoder can be connected to + * before calling drm_encoder_init(). + * + * In reality almost every driver gets this wrong. + * + * Note that since CRTC objects can't be hotplugged the assigned indices + * are stable and hence known before registering all objects. + */ + uint32_t possible_crtcs; + + /** + * @possible_clones: Bitmask of potential sibling encoders for cloning, + * using drm_encoder_index() as the index into the bitfield. The driver + * must set the bits for all &drm_encoder objects which can clone a + * &drm_crtc together with this encoder before calling + * drm_encoder_init(). Drivers should set the bit representing the + * encoder itself, too. Cloning bits should be set such that when two + * encoders can be used in a cloned configuration, they both should have + * each another bits set. + * + * In reality almost every driver gets this wrong. + * + * Note that since encoder objects can't be hotplugged the assigned indices + * are stable and hence known before registering all objects. + */ + uint32_t possible_clones; + + /** + * @crtc: Currently bound CRTC, only really meaningful for non-atomic + * drivers. Atomic drivers should instead check + * &drm_connector_state.crtc. + */ + struct drm_crtc *crtc; + struct drm_bridge *bridge; + const struct drm_encoder_funcs *funcs; + const struct drm_encoder_helper_funcs *helper_private; +}; + +#define obj_to_encoder(x) container_of(x, struct drm_encoder, base) + +__printf(5, 6) +int drm_encoder_init(struct drm_device *dev, + struct drm_encoder *encoder, + const struct drm_encoder_funcs *funcs, + int encoder_type, const char *name, ...); + +/** + * drm_encoder_index - find the index of a registered encoder + * @encoder: encoder to find index for + * + * Given a registered encoder, return the index of that encoder within a DRM + * device's list of encoders. + */ +static inline unsigned int drm_encoder_index(const struct drm_encoder *encoder) +{ + return encoder->index; +} + +/** + * drm_encoder_mask - find the mask of a registered ENCODER + * @encoder: encoder to find mask for + * + * Given a registered encoder, return the mask bit of that encoder for an + * encoder's possible_clones field. + */ +static inline u32 drm_encoder_mask(const struct drm_encoder *encoder) +{ + return 1 << drm_encoder_index(encoder); +} + +/** + * drm_encoder_crtc_ok - can a given crtc drive a given encoder? + * @encoder: encoder to test + * @crtc: crtc to test + * + * Returns false if @encoder can't be driven by @crtc, true otherwise. + */ +static inline bool drm_encoder_crtc_ok(struct drm_encoder *encoder, + struct drm_crtc *crtc) +{ + return !!(encoder->possible_crtcs & drm_crtc_mask(crtc)); +} + +/** + * drm_encoder_find - find a &drm_encoder + * @dev: DRM device + * @file_priv: drm file to check for lease against. + * @id: encoder id + * + * Returns the encoder with @id, NULL if it doesn't exist. Simple wrapper around + * drm_mode_object_find(). + */ +static inline struct drm_encoder *drm_encoder_find(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t id) +{ + struct drm_mode_object *mo; + + mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_ENCODER); + + return mo ? obj_to_encoder(mo) : NULL; +} + +void drm_encoder_cleanup(struct drm_encoder *encoder); + +/** + * drm_for_each_encoder_mask - iterate over encoders specified by bitmask + * @encoder: the loop cursor + * @dev: the DRM device + * @encoder_mask: bitmask of encoder indices + * + * Iterate over all encoders specified by bitmask. + */ +#define drm_for_each_encoder_mask(encoder, dev, encoder_mask) \ + list_for_each_entry((encoder), &(dev)->mode_config.encoder_list, head) \ + for_each_if ((encoder_mask) & drm_encoder_mask(encoder)) + +/** + * drm_for_each_encoder - iterate over all encoders + * @encoder: the loop cursor + * @dev: the DRM device + * + * Iterate over all encoders of @dev. + */ +#define drm_for_each_encoder(encoder, dev) \ + list_for_each_entry(encoder, &(dev)->mode_config.encoder_list, head) + +#endif diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h new file mode 100644 index 000000000..1107b4b1c --- /dev/null +++ b/include/drm/drm_encoder_slave.h @@ -0,0 +1,183 @@ +/* + * Copyright (C) 2009 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __DRM_ENCODER_SLAVE_H__ +#define __DRM_ENCODER_SLAVE_H__ + +#include +#include +#include + +/** + * struct drm_encoder_slave_funcs - Entry points exposed by a slave encoder driver + * @set_config: Initialize any encoder-specific modesetting parameters. + * The meaning of the @params parameter is implementation + * dependent. It will usually be a structure with DVO port + * data format settings or timings. It's not required for + * the new parameters to take effect until the next mode + * is set. + * + * Most of its members are analogous to the function pointers in + * &drm_encoder_helper_funcs and they can optionally be used to + * initialize the latter. Connector-like methods (e.g. @get_modes and + * @set_property) will typically be wrapped around and only be called + * if the encoder is the currently selected one for the connector. + */ +struct drm_encoder_slave_funcs { + void (*set_config)(struct drm_encoder *encoder, + void *params); + + void (*destroy)(struct drm_encoder *encoder); + void (*dpms)(struct drm_encoder *encoder, int mode); + void (*save)(struct drm_encoder *encoder); + void (*restore)(struct drm_encoder *encoder); + bool (*mode_fixup)(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + int (*mode_valid)(struct drm_encoder *encoder, + struct drm_display_mode *mode); + void (*mode_set)(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + enum drm_connector_status (*detect)(struct drm_encoder *encoder, + struct drm_connector *connector); + int (*get_modes)(struct drm_encoder *encoder, + struct drm_connector *connector); + int (*create_resources)(struct drm_encoder *encoder, + struct drm_connector *connector); + int (*set_property)(struct drm_encoder *encoder, + struct drm_connector *connector, + struct drm_property *property, + uint64_t val); + +}; + +/** + * struct drm_encoder_slave - Slave encoder struct + * @base: DRM encoder object. + * @slave_funcs: Slave encoder callbacks. + * @slave_priv: Slave encoder private data. + * @bus_priv: Bus specific data. + * + * A &drm_encoder_slave has two sets of callbacks, @slave_funcs and the + * ones in @base. The former are never actually called by the common + * CRTC code, it's just a convenience for splitting the encoder + * functions in an upper, GPU-specific layer and a (hopefully) + * GPU-agnostic lower layer: It's the GPU driver responsibility to + * call the slave methods when appropriate. + * + * drm_i2c_encoder_init() provides a way to get an implementation of + * this. + */ +struct drm_encoder_slave { + struct drm_encoder base; + + const struct drm_encoder_slave_funcs *slave_funcs; + void *slave_priv; + void *bus_priv; +}; +#define to_encoder_slave(x) container_of((x), struct drm_encoder_slave, base) + +int drm_i2c_encoder_init(struct drm_device *dev, + struct drm_encoder_slave *encoder, + struct i2c_adapter *adap, + const struct i2c_board_info *info); + + +/** + * struct drm_i2c_encoder_driver + * + * Describes a device driver for an encoder connected to the GPU + * through an I2C bus. In addition to the entry points in @i2c_driver + * an @encoder_init function should be provided. It will be called to + * give the driver an opportunity to allocate any per-encoder data + * structures and to initialize the @slave_funcs and (optionally) + * @slave_priv members of @encoder. + */ +struct drm_i2c_encoder_driver { + struct i2c_driver i2c_driver; + + int (*encoder_init)(struct i2c_client *client, + struct drm_device *dev, + struct drm_encoder_slave *encoder); + +}; +#define to_drm_i2c_encoder_driver(x) container_of((x), \ + struct drm_i2c_encoder_driver, \ + i2c_driver) + +/** + * drm_i2c_encoder_get_client - Get the I2C client corresponding to an encoder + */ +static inline struct i2c_client *drm_i2c_encoder_get_client(struct drm_encoder *encoder) +{ + return (struct i2c_client *)to_encoder_slave(encoder)->bus_priv; +} + +/** + * drm_i2c_encoder_register - Register an I2C encoder driver + * @owner: Module containing the driver. + * @driver: Driver to be registered. + */ +static inline int drm_i2c_encoder_register(struct module *owner, + struct drm_i2c_encoder_driver *driver) +{ + return i2c_register_driver(owner, &driver->i2c_driver); +} + +/** + * drm_i2c_encoder_unregister - Unregister an I2C encoder driver + * @driver: Driver to be unregistered. + */ +static inline void drm_i2c_encoder_unregister(struct drm_i2c_encoder_driver *driver) +{ + i2c_del_driver(&driver->i2c_driver); +} + +void drm_i2c_encoder_destroy(struct drm_encoder *encoder); + + +/* + * Wrapper fxns which can be plugged in to drm_encoder_helper_funcs: + */ + +void drm_i2c_encoder_dpms(struct drm_encoder *encoder, int mode); +bool drm_i2c_encoder_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +void drm_i2c_encoder_prepare(struct drm_encoder *encoder); +void drm_i2c_encoder_commit(struct drm_encoder *encoder); +void drm_i2c_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +enum drm_connector_status drm_i2c_encoder_detect(struct drm_encoder *encoder, + struct drm_connector *connector); +void drm_i2c_encoder_save(struct drm_encoder *encoder); +void drm_i2c_encoder_restore(struct drm_encoder *encoder); + + +#endif diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h new file mode 100644 index 000000000..96e26e3b9 --- /dev/null +++ b/include/drm/drm_fb_cma_helper.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DRM_FB_CMA_HELPER_H__ +#define __DRM_FB_CMA_HELPER_H__ + +struct drm_fbdev_cma; +struct drm_gem_cma_object; + +struct drm_fb_helper_surface_size; +struct drm_framebuffer_funcs; +struct drm_fb_helper_funcs; +struct drm_framebuffer; +struct drm_fb_helper; +struct drm_device; +struct drm_file; +struct drm_mode_fb_cmd2; +struct drm_plane; +struct drm_plane_state; + +int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp, + unsigned int max_conn_count); +void drm_fb_cma_fbdev_fini(struct drm_device *dev); + +struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev, + unsigned int preferred_bpp, unsigned int max_conn_count); +void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma); + +void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma); +void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma); +void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, bool state); +void drm_fbdev_cma_set_suspend_unlocked(struct drm_fbdev_cma *fbdev_cma, + bool state); + +struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb, + unsigned int plane); + +dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb, + struct drm_plane_state *state, + unsigned int plane); + +#endif + diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h new file mode 100644 index 000000000..5db08c8f1 --- /dev/null +++ b/include/drm/drm_fb_helper.h @@ -0,0 +1,618 @@ +/* + * Copyright (c) 2006-2009 Red Hat Inc. + * Copyright (c) 2006-2008 Intel Corporation + * Copyright (c) 2007 Dave Airlie + * + * DRM framebuffer helper functions + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + * + * Authors: + * Dave Airlie + * Jesse Barnes + */ +#ifndef DRM_FB_HELPER_H +#define DRM_FB_HELPER_H + +struct drm_fb_helper; + +#include +#include +#include +#include + +enum mode_set_atomic { + LEAVE_ATOMIC_MODE_SET, + ENTER_ATOMIC_MODE_SET, +}; + +struct drm_fb_offset { + int x, y; +}; + +struct drm_fb_helper_crtc { + struct drm_mode_set mode_set; + struct drm_display_mode *desired_mode; + int x, y; + int rotation; +}; + +/** + * struct drm_fb_helper_surface_size - describes fbdev size and scanout surface size + * @fb_width: fbdev width + * @fb_height: fbdev height + * @surface_width: scanout buffer width + * @surface_height: scanout buffer height + * @surface_bpp: scanout buffer bpp + * @surface_depth: scanout buffer depth + * + * Note that the scanout surface width/height may be larger than the fbdev + * width/height. In case of multiple displays, the scanout surface is sized + * according to the largest width/height (so it is large enough for all CRTCs + * to scanout). But the fbdev width/height is sized to the minimum width/ + * height of all the displays. This ensures that fbcon fits on the smallest + * of the attached displays. + * + * So what is passed to drm_fb_helper_fill_var() should be fb_width/fb_height, + * rather than the surface size. + */ +struct drm_fb_helper_surface_size { + u32 fb_width; + u32 fb_height; + u32 surface_width; + u32 surface_height; + u32 surface_bpp; + u32 surface_depth; +}; + +/** + * struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library + * + * Driver callbacks used by the fbdev emulation helper library. + */ +struct drm_fb_helper_funcs { + /** + * @fb_probe: + * + * Driver callback to allocate and initialize the fbdev info structure. + * Furthermore it also needs to allocate the DRM framebuffer used to + * back the fbdev. + * + * This callback is mandatory. + * + * RETURNS: + * + * The driver should return 0 on success and a negative error code on + * failure. + */ + int (*fb_probe)(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes); + + /** + * @initial_config: + * + * Driver callback to setup an initial fbdev display configuration. + * Drivers can use this callback to tell the fbdev emulation what the + * preferred initial configuration is. This is useful to implement + * smooth booting where the fbdev (and subsequently all userspace) never + * changes the mode, but always inherits the existing configuration. + * + * This callback is optional. + * + * RETURNS: + * + * The driver should return true if a suitable initial configuration has + * been filled out and false when the fbdev helper should fall back to + * the default probing logic. + */ + bool (*initial_config)(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_crtc **crtcs, + struct drm_display_mode **modes, + struct drm_fb_offset *offsets, + bool *enabled, int width, int height); +}; + +struct drm_fb_helper_connector { + struct drm_connector *connector; +}; + +/** + * struct drm_fb_helper - main structure to emulate fbdev on top of KMS + * @fb: Scanout framebuffer object + * @dev: DRM device + * @crtc_count: number of possible CRTCs + * @crtc_info: per-CRTC helper state (mode, x/y offset, etc) + * @connector_count: number of connected connectors + * @connector_info_alloc_count: size of connector_info + * @funcs: driver callbacks for fb helper + * @fbdev: emulated fbdev device info struct + * @pseudo_palette: fake palette of 16 colors + * @dirty_clip: clip rectangle used with deferred_io to accumulate damage to + * the screen buffer + * @dirty_lock: spinlock protecting @dirty_clip + * @dirty_work: worker used to flush the framebuffer + * @resume_work: worker used during resume if the console lock is already taken + * + * This is the main structure used by the fbdev helpers. Drivers supporting + * fbdev emulation should embedded this into their overall driver structure. + * Drivers must also fill out a &struct drm_fb_helper_funcs with a few + * operations. + */ +struct drm_fb_helper { + /** + * @client: + * + * DRM client used by the generic fbdev emulation. + */ + struct drm_client_dev client; + + /** + * @buffer: + * + * Framebuffer used by the generic fbdev emulation. + */ + struct drm_client_buffer *buffer; + + struct drm_framebuffer *fb; + struct drm_device *dev; + int crtc_count; + struct drm_fb_helper_crtc *crtc_info; + int connector_count; + int connector_info_alloc_count; + /** + * @sw_rotations: + * Bitmask of all rotations requested for panel-orientation which + * could not be handled in hardware. If only one bit is set + * fbdev->fbcon_rotate_hint gets set to the requested rotation. + */ + int sw_rotations; + /** + * @connector_info: + * + * Array of per-connector information. Do not iterate directly, but use + * drm_fb_helper_for_each_connector. + */ + struct drm_fb_helper_connector **connector_info; + const struct drm_fb_helper_funcs *funcs; + struct fb_info *fbdev; + u32 pseudo_palette[17]; + struct drm_clip_rect dirty_clip; + spinlock_t dirty_lock; + struct work_struct dirty_work; + struct work_struct resume_work; + + /** + * @lock: + * + * Top-level FBDEV helper lock. This protects all internal data + * structures and lists, such as @connector_info and @crtc_info. + * + * FIXME: fbdev emulation locking is a mess and long term we want to + * protect all helper internal state with this lock as well as reduce + * core KMS locking as much as possible. + */ + struct mutex lock; + + /** + * @kernel_fb_list: + * + * Entry on the global kernel_fb_helper_list, used for kgdb entry/exit. + */ + struct list_head kernel_fb_list; + + /** + * @delayed_hotplug: + * + * A hotplug was received while fbdev wasn't in control of the DRM + * device, i.e. another KMS master was active. The output configuration + * needs to be reprobe when fbdev is in control again. + */ + bool delayed_hotplug; + + /** + * @deferred_setup: + * + * If no outputs are connected (disconnected or unknown) the FB helper + * code will defer setup until at least one of the outputs shows up. + * This field keeps track of the status so that setup can be retried + * at every hotplug event until it succeeds eventually. + * + * Protected by @lock. + */ + bool deferred_setup; + + /** + * @preferred_bpp: + * + * Temporary storage for the driver's preferred BPP setting passed to + * FB helper initialization. This needs to be tracked so that deferred + * FB helper setup can pass this on. + * + * See also: @deferred_setup + */ + int preferred_bpp; +}; + +static inline struct drm_fb_helper * +drm_fb_helper_from_client(struct drm_client_dev *client) +{ + return container_of(client, struct drm_fb_helper, client); +} + +/** + * define DRM_FB_HELPER_DEFAULT_OPS - helper define for drm drivers + * + * Helper define to register default implementations of drm_fb_helper + * functions. To be used in struct fb_ops of drm drivers. + */ +#define DRM_FB_HELPER_DEFAULT_OPS \ + .fb_check_var = drm_fb_helper_check_var, \ + .fb_set_par = drm_fb_helper_set_par, \ + .fb_setcmap = drm_fb_helper_setcmap, \ + .fb_blank = drm_fb_helper_blank, \ + .fb_pan_display = drm_fb_helper_pan_display, \ + .fb_debug_enter = drm_fb_helper_debug_enter, \ + .fb_debug_leave = drm_fb_helper_debug_leave, \ + .fb_ioctl = drm_fb_helper_ioctl + +#ifdef CONFIG_DRM_FBDEV_EMULATION +void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, + const struct drm_fb_helper_funcs *funcs); +int drm_fb_helper_init(struct drm_device *dev, + struct drm_fb_helper *helper, int max_conn); +void drm_fb_helper_fini(struct drm_fb_helper *helper); +int drm_fb_helper_blank(int blank, struct fb_info *info); +int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, + struct fb_info *info); +int drm_fb_helper_set_par(struct fb_info *info); +int drm_fb_helper_check_var(struct fb_var_screeninfo *var, + struct fb_info *info); + +int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper); + +struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper); +void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper); +void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, + uint32_t fb_width, uint32_t fb_height); +void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, + uint32_t depth); + +void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper); + +void drm_fb_helper_deferred_io(struct fb_info *info, + struct list_head *pagelist); +int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper); + +ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf, + size_t count, loff_t *ppos); +ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf, + size_t count, loff_t *ppos); + +void drm_fb_helper_sys_fillrect(struct fb_info *info, + const struct fb_fillrect *rect); +void drm_fb_helper_sys_copyarea(struct fb_info *info, + const struct fb_copyarea *area); +void drm_fb_helper_sys_imageblit(struct fb_info *info, + const struct fb_image *image); + +void drm_fb_helper_cfb_fillrect(struct fb_info *info, + const struct fb_fillrect *rect); +void drm_fb_helper_cfb_copyarea(struct fb_info *info, + const struct fb_copyarea *area); +void drm_fb_helper_cfb_imageblit(struct fb_info *info, + const struct fb_image *image); + +void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend); +void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper, + bool suspend); + +int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); + +int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd, + unsigned long arg); + +int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); +int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel); +int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper); +int drm_fb_helper_debug_enter(struct fb_info *info); +int drm_fb_helper_debug_leave(struct fb_info *info); +struct drm_display_mode * +drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, + int width, int height); +struct drm_display_mode * +drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn); + +int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector); +int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, + struct drm_connector *connector); + +int drm_fb_helper_fbdev_setup(struct drm_device *dev, + struct drm_fb_helper *fb_helper, + const struct drm_fb_helper_funcs *funcs, + unsigned int preferred_bpp, + unsigned int max_conn_count); +void drm_fb_helper_fbdev_teardown(struct drm_device *dev); + +void drm_fb_helper_lastclose(struct drm_device *dev); +void drm_fb_helper_output_poll_changed(struct drm_device *dev); + +int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes); +int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp); +#else +static inline void drm_fb_helper_prepare(struct drm_device *dev, + struct drm_fb_helper *helper, + const struct drm_fb_helper_funcs *funcs) +{ +} + +static inline int drm_fb_helper_init(struct drm_device *dev, + struct drm_fb_helper *helper, + int max_conn) +{ + /* So drivers can use it to free the struct */ + helper->dev = dev; + dev->fb_helper = helper; + + return 0; +} + +static inline void drm_fb_helper_fini(struct drm_fb_helper *helper) +{ + if (helper && helper->dev) + helper->dev->fb_helper = NULL; +} + +static inline int drm_fb_helper_blank(int blank, struct fb_info *info) +{ + return 0; +} + +static inline int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + return 0; +} + +static inline int drm_fb_helper_set_par(struct fb_info *info) +{ + return 0; +} + +static inline int drm_fb_helper_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + return 0; +} + +static inline int +drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) +{ + return 0; +} + +static inline struct fb_info * +drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper) +{ + return NULL; +} + +static inline void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper) +{ +} + +static inline void drm_fb_helper_fill_var(struct fb_info *info, + struct drm_fb_helper *fb_helper, + uint32_t fb_width, uint32_t fb_height) +{ +} + +static inline void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, + uint32_t depth) +{ +} + +static inline int drm_fb_helper_setcmap(struct fb_cmap *cmap, + struct fb_info *info) +{ + return 0; +} + +static inline int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd, + unsigned long arg) +{ + return 0; +} + +static inline void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper) +{ +} + +static inline void drm_fb_helper_deferred_io(struct fb_info *info, + struct list_head *pagelist) +{ +} + +static inline int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper) +{ + return -ENODEV; +} + +static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info, + char __user *buf, size_t count, + loff_t *ppos) +{ + return -ENODEV; +} + +static inline ssize_t drm_fb_helper_sys_write(struct fb_info *info, + const char __user *buf, + size_t count, loff_t *ppos) +{ + return -ENODEV; +} + +static inline void drm_fb_helper_sys_fillrect(struct fb_info *info, + const struct fb_fillrect *rect) +{ +} + +static inline void drm_fb_helper_sys_copyarea(struct fb_info *info, + const struct fb_copyarea *area) +{ +} + +static inline void drm_fb_helper_sys_imageblit(struct fb_info *info, + const struct fb_image *image) +{ +} + +static inline void drm_fb_helper_cfb_fillrect(struct fb_info *info, + const struct fb_fillrect *rect) +{ +} + +static inline void drm_fb_helper_cfb_copyarea(struct fb_info *info, + const struct fb_copyarea *area) +{ +} + +static inline void drm_fb_helper_cfb_imageblit(struct fb_info *info, + const struct fb_image *image) +{ +} + +static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, + bool suspend) +{ +} + +static inline void +drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper, bool suspend) +{ +} + +static inline int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) +{ + return 0; +} + +static inline int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, + int bpp_sel) +{ + return 0; +} + +static inline int +drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) +{ + return 0; +} + +static inline int drm_fb_helper_debug_enter(struct fb_info *info) +{ + return 0; +} + +static inline int drm_fb_helper_debug_leave(struct fb_info *info) +{ + return 0; +} + +static inline struct drm_display_mode * +drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, + int width, int height) +{ + return NULL; +} + +static inline struct drm_display_mode * +drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn, + int width, int height) +{ + return NULL; +} + +static inline int +drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, + struct drm_connector *connector) +{ + return 0; +} + +static inline int +drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, + struct drm_connector *connector) +{ + return 0; +} + +static inline int +drm_fb_helper_fbdev_setup(struct drm_device *dev, + struct drm_fb_helper *fb_helper, + const struct drm_fb_helper_funcs *funcs, + unsigned int preferred_bpp, + unsigned int max_conn_count) +{ + /* So drivers can use it to free the struct */ + dev->fb_helper = fb_helper; + + return 0; +} + +static inline void drm_fb_helper_fbdev_teardown(struct drm_device *dev) +{ + dev->fb_helper = NULL; +} + +static inline void drm_fb_helper_lastclose(struct drm_device *dev) +{ +} + +static inline void drm_fb_helper_output_poll_changed(struct drm_device *dev) +{ +} + +static inline int +drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes) +{ + return 0; +} + +static inline int +drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp) +{ + return 0; +} + +#endif + +static inline int +drm_fb_helper_remove_conflicting_framebuffers(struct apertures_struct *a, + const char *name, bool primary) +{ +#if IS_REACHABLE(CONFIG_FB) + return remove_conflicting_framebuffers(a, name, primary); +#else + return 0; +#endif +} + +#endif diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h new file mode 100644 index 000000000..26485acc5 --- /dev/null +++ b/include/drm/drm_file.h @@ -0,0 +1,387 @@ +/* + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright (c) 2009-2010, Code Aurora Forum. + * All rights reserved. + * + * Author: Rickard E. (Rik) Faith + * Author: Gareth Hughes + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DRM_FILE_H_ +#define _DRM_FILE_H_ + +#include +#include + +#include + +#include + +struct dma_fence; +struct drm_file; +struct drm_device; +struct device; + +/* + * FIXME: Not sure we want to have drm_minor here in the end, but to avoid + * header include loops we need it here for now. + */ + +/* Note that the order of this enum is ABI (it determines + * /dev/dri/renderD* numbers). + */ +enum drm_minor_type { + DRM_MINOR_PRIMARY, + DRM_MINOR_CONTROL, + DRM_MINOR_RENDER, +}; + +/** + * struct drm_minor - DRM device minor structure + * + * This structure represents a DRM minor number for device nodes in /dev. + * Entirely opaque to drivers and should never be inspected directly by drivers. + * Drivers instead should only interact with &struct drm_file and of course + * &struct drm_device, which is also where driver-private data and resources can + * be attached to. + */ +struct drm_minor { + /* private: */ + int index; /* Minor device number */ + int type; /* Control or render */ + struct device *kdev; /* Linux device */ + struct drm_device *dev; + + struct dentry *debugfs_root; + + struct list_head debugfs_list; + struct mutex debugfs_lock; /* Protects debugfs_list. */ +}; + +/** + * struct drm_pending_event - Event queued up for userspace to read + * + * This represents a DRM event. Drivers can use this as a generic completion + * mechanism, which supports kernel-internal &struct completion, &struct dma_fence + * and also the DRM-specific &struct drm_event delivery mechanism. + */ +struct drm_pending_event { + /** + * @completion: + * + * Optional pointer to a kernel internal completion signalled when + * drm_send_event() is called, useful to internally synchronize with + * nonblocking operations. + */ + struct completion *completion; + + /** + * @completion_release: + * + * Optional callback currently only used by the atomic modeset helpers + * to clean up the reference count for the structure @completion is + * stored in. + */ + void (*completion_release)(struct completion *completion); + + /** + * @event: + * + * Pointer to the actual event that should be sent to userspace to be + * read using drm_read(). Can be optional, since nowadays events are + * also used to signal kernel internal threads with @completion or DMA + * transactions using @fence. + */ + struct drm_event *event; + + /** + * @fence: + * + * Optional DMA fence to unblock other hardware transactions which + * depend upon the nonblocking DRM operation this event represents. + */ + struct dma_fence *fence; + + /** + * @file_priv: + * + * &struct drm_file where @event should be delivered to. Only set when + * @event is set. + */ + struct drm_file *file_priv; + + /** + * @link: + * + * Double-linked list to keep track of this event. Can be used by the + * driver up to the point when it calls drm_send_event(), after that + * this list entry is owned by the core for its own book-keeping. + */ + struct list_head link; + + /** + * @pending_link: + * + * Entry on &drm_file.pending_event_list, to keep track of all pending + * events for @file_priv, to allow correct unwinding of them when + * userspace closes the file before the event is delivered. + */ + struct list_head pending_link; +}; + +/** + * struct drm_file - DRM file private data + * + * This structure tracks DRM state per open file descriptor. + */ +struct drm_file { + /** + * @authenticated: + * + * Whether the client is allowed to submit rendering, which for legacy + * nodes means it must be authenticated. + * + * See also the :ref:`section on primary nodes and authentication + * `. + */ + unsigned authenticated :1; + + /** + * @stereo_allowed: + * + * True when the client has asked us to expose stereo 3D mode flags. + */ + unsigned stereo_allowed :1; + + /** + * @universal_planes: + * + * True if client understands CRTC primary planes and cursor planes + * in the plane list. Automatically set when @atomic is set. + */ + unsigned universal_planes:1; + + /** @atomic: True if client understands atomic properties. */ + unsigned atomic:1; + + /** + * @aspect_ratio_allowed: + * + * True, if client can handle picture aspect ratios, and has requested + * to pass this information along with the mode. + */ + unsigned aspect_ratio_allowed:1; + + /** + * @writeback_connectors: + * + * True if client understands writeback connectors + */ + unsigned writeback_connectors:1; + + /** + * @is_master: + * + * This client is the creator of @master. Protected by struct + * &drm_device.master_mutex. + * + * See also the :ref:`section on primary nodes and authentication + * `. + */ + unsigned is_master:1; + + /** + * @master: + * + * Master this node is currently associated with. Only relevant if + * drm_is_primary_client() returns true. Note that this only + * matches &drm_device.master if the master is the currently active one. + * + * See also @authentication and @is_master and the :ref:`section on + * primary nodes and authentication `. + */ + struct drm_master *master; + + /** @pid: Process that opened this file. */ + struct pid *pid; + + /** @magic: Authentication magic, see @authenticated. */ + drm_magic_t magic; + + /** + * @lhead: + * + * List of all open files of a DRM device, linked into + * &drm_device.filelist. Protected by &drm_device.filelist_mutex. + */ + struct list_head lhead; + + /** @minor: &struct drm_minor for this file. */ + struct drm_minor *minor; + + /** + * @object_idr: + * + * Mapping of mm object handles to object pointers. Used by the GEM + * subsystem. Protected by @table_lock. + */ + struct idr object_idr; + + /** @table_lock: Protects @object_idr. */ + spinlock_t table_lock; + + /** @syncobj_idr: Mapping of sync object handles to object pointers. */ + struct idr syncobj_idr; + /** @syncobj_table_lock: Protects @syncobj_idr. */ + spinlock_t syncobj_table_lock; + + /** @filp: Pointer to the core file structure. */ + struct file *filp; + + /** + * @driver_priv: + * + * Optional pointer for driver private data. Can be allocated in + * &drm_driver.open and should be freed in &drm_driver.postclose. + */ + void *driver_priv; + + /** + * @fbs: + * + * List of &struct drm_framebuffer associated with this file, using the + * &drm_framebuffer.filp_head entry. + * + * Protected by @fbs_lock. Note that the @fbs list holds a reference on + * the framebuffer object to prevent it from untimely disappearing. + */ + struct list_head fbs; + + /** @fbs_lock: Protects @fbs. */ + struct mutex fbs_lock; + + /** + * @blobs: + * + * User-created blob properties; this retains a reference on the + * property. + * + * Protected by @drm_mode_config.blob_lock; + */ + struct list_head blobs; + + /** @event_wait: Waitqueue for new events added to @event_list. */ + wait_queue_head_t event_wait; + + /** + * @pending_event_list: + * + * List of pending &struct drm_pending_event, used to clean up pending + * events in case this file gets closed before the event is signalled. + * Uses the &drm_pending_event.pending_link entry. + * + * Protect by &drm_device.event_lock. + */ + struct list_head pending_event_list; + + /** + * @event_list: + * + * List of &struct drm_pending_event, ready for delivery to userspace + * through drm_read(). Uses the &drm_pending_event.link entry. + * + * Protect by &drm_device.event_lock. + */ + struct list_head event_list; + + /** + * @event_space: + * + * Available event space to prevent userspace from + * exhausting kernel memory. Currently limited to the fairly arbitrary + * value of 4KB. + */ + int event_space; + + /** @event_read_lock: Serializes drm_read(). */ + struct mutex event_read_lock; + + /** + * @prime: + * + * Per-file buffer caches used by the PRIME buffer sharing code. + */ + struct drm_prime_file_private prime; + + /* private: */ + unsigned long lock_count; /* DRI1 legacy lock count */ +}; + +/** + * drm_is_primary_client - is this an open file of the primary node + * @file_priv: DRM file + * + * Returns true if this is an open file of the primary node, i.e. + * &drm_file.minor of @file_priv is a primary minor. + * + * See also the :ref:`section on primary nodes and authentication + * `. + */ +static inline bool drm_is_primary_client(const struct drm_file *file_priv) +{ + return file_priv->minor->type == DRM_MINOR_PRIMARY; +} + +/** + * drm_is_render_client - is this an open file of the render node + * @file_priv: DRM file + * + * Returns true if this is an open file of the render node, i.e. + * &drm_file.minor of @file_priv is a render minor. + * + * See also the :ref:`section on render nodes `. + */ +static inline bool drm_is_render_client(const struct drm_file *file_priv) +{ + return file_priv->minor->type == DRM_MINOR_RENDER; +} + +int drm_open(struct inode *inode, struct file *filp); +ssize_t drm_read(struct file *filp, char __user *buffer, + size_t count, loff_t *offset); +int drm_release(struct inode *inode, struct file *filp); +__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait); +int drm_event_reserve_init_locked(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_pending_event *p, + struct drm_event *e); +int drm_event_reserve_init(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_pending_event *p, + struct drm_event *e); +void drm_event_cancel_free(struct drm_device *dev, + struct drm_pending_event *p); +void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e); +void drm_send_event(struct drm_device *dev, struct drm_pending_event *e); + +#endif /* _DRM_FILE_H_ */ diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h new file mode 100644 index 000000000..553210c02 --- /dev/null +++ b/include/drm/drm_fixed.h @@ -0,0 +1,211 @@ +/* + * Copyright 2009 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Christian König + */ +#ifndef DRM_FIXED_H +#define DRM_FIXED_H + +#include + +typedef union dfixed { + u32 full; +} fixed20_12; + + +#define dfixed_const(A) (u32)(((A) << 12))/* + ((B + 0.000122)*4096)) */ +#define dfixed_const_half(A) (u32)(((A) << 12) + 2048) +#define dfixed_const_666(A) (u32)(((A) << 12) + 2731) +#define dfixed_const_8(A) (u32)(((A) << 12) + 3277) +#define dfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12) +#define dfixed_init(A) { .full = dfixed_const((A)) } +#define dfixed_init_half(A) { .full = dfixed_const_half((A)) } +#define dfixed_trunc(A) ((A).full >> 12) +#define dfixed_frac(A) ((A).full & ((1 << 12) - 1)) + +static inline u32 dfixed_floor(fixed20_12 A) +{ + u32 non_frac = dfixed_trunc(A); + + return dfixed_const(non_frac); +} + +static inline u32 dfixed_ceil(fixed20_12 A) +{ + u32 non_frac = dfixed_trunc(A); + + if (A.full > dfixed_const(non_frac)) + return dfixed_const(non_frac + 1); + else + return dfixed_const(non_frac); +} + +static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B) +{ + u64 tmp = ((u64)A.full << 13); + + do_div(tmp, B.full); + tmp += 1; + tmp /= 2; + return lower_32_bits(tmp); +} + +#define DRM_FIXED_POINT 32 +#define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT) +#define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1) +#define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK) +#define DRM_FIXED_EPSILON 1LL +#define DRM_FIXED_ALMOST_ONE (DRM_FIXED_ONE - DRM_FIXED_EPSILON) + +static inline s64 drm_int2fixp(int a) +{ + return ((s64)a) << DRM_FIXED_POINT; +} + +static inline int drm_fixp2int(s64 a) +{ + return ((s64)a) >> DRM_FIXED_POINT; +} + +static inline int drm_fixp2int_ceil(s64 a) +{ + if (a > 0) + return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE); + else + return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE); +} + +static inline unsigned drm_fixp_msbset(s64 a) +{ + unsigned shift, sign = (a >> 63) & 1; + + for (shift = 62; shift > 0; --shift) + if (((a >> shift) & 1) != sign) + return shift; + + return 0; +} + +static inline s64 drm_fixp_mul(s64 a, s64 b) +{ + unsigned shift = drm_fixp_msbset(a) + drm_fixp_msbset(b); + s64 result; + + if (shift > 61) { + shift = shift - 61; + a >>= (shift >> 1) + (shift & 1); + b >>= shift >> 1; + } else + shift = 0; + + result = a * b; + + if (shift > DRM_FIXED_POINT) + return result << (shift - DRM_FIXED_POINT); + + if (shift < DRM_FIXED_POINT) + return result >> (DRM_FIXED_POINT - shift); + + return result; +} + +static inline s64 drm_fixp_div(s64 a, s64 b) +{ + unsigned shift = 62 - drm_fixp_msbset(a); + s64 result; + + a <<= shift; + + if (shift < DRM_FIXED_POINT) + b >>= (DRM_FIXED_POINT - shift); + + result = div64_s64(a, b); + + if (shift > DRM_FIXED_POINT) + return result >> (shift - DRM_FIXED_POINT); + + return result; +} + +static inline s64 drm_fixp_from_fraction(s64 a, s64 b) +{ + s64 res; + bool a_neg = a < 0; + bool b_neg = b < 0; + u64 a_abs = a_neg ? -a : a; + u64 b_abs = b_neg ? -b : b; + u64 rem; + + /* determine integer part */ + u64 res_abs = div64_u64_rem(a_abs, b_abs, &rem); + + /* determine fractional part */ + { + u32 i = DRM_FIXED_POINT; + + do { + rem <<= 1; + res_abs <<= 1; + if (rem >= b_abs) { + res_abs |= 1; + rem -= b_abs; + } + } while (--i != 0); + } + + /* round up LSB */ + { + u64 summand = (rem << 1) >= b_abs; + + res_abs += summand; + } + + res = (s64) res_abs; + if (a_neg ^ b_neg) + res = -res; + return res; +} + +static inline s64 drm_fixp_exp(s64 x) +{ + s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000); + s64 sum = DRM_FIXED_ONE, term, y = x; + u64 count = 1; + + if (x < 0) + y = -1 * x; + + term = y; + + while (term >= tolerance) { + sum = sum + term; + count = count + 1; + term = drm_fixp_mul(term, div64_s64(y, count)); + } + + if (x < 0) + sum = drm_fixp_div(DRM_FIXED_ONE, sum); + + return sum; +} + +#endif diff --git a/include/drm/drm_flip_work.h b/include/drm/drm_flip_work.h new file mode 100644 index 000000000..21c3d512d --- /dev/null +++ b/include/drm/drm_flip_work.h @@ -0,0 +1,92 @@ +/* + * Copyright (C) 2013 Red Hat + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef DRM_FLIP_WORK_H +#define DRM_FLIP_WORK_H + +#include +#include +#include + +/** + * DOC: flip utils + * + * Util to queue up work to run from work-queue context after flip/vblank. + * Typically this can be used to defer unref of framebuffer's, cursor + * bo's, etc until after vblank. The APIs are all thread-safe. + * Moreover, drm_flip_work_queue_task and drm_flip_work_queue can be called + * in atomic context. + */ + +struct drm_flip_work; + +/* + * drm_flip_func_t - callback function + * + * @work: the flip work + * @val: value queued via drm_flip_work_queue() + * + * Callback function to be called for each of the queue'd work items after + * drm_flip_work_commit() is called. + */ +typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val); + +/** + * struct drm_flip_task - flip work task + * @node: list entry element + * @data: data to pass to &drm_flip_work.func + */ +struct drm_flip_task { + struct list_head node; + void *data; +}; + +/** + * struct drm_flip_work - flip work queue + * @name: debug name + * @func: callback fxn called for each committed item + * @worker: worker which calls @func + * @queued: queued tasks + * @commited: commited tasks + * @lock: lock to access queued and commited lists + */ +struct drm_flip_work { + const char *name; + drm_flip_func_t func; + struct work_struct worker; + struct list_head queued; + struct list_head commited; + spinlock_t lock; +}; + +struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags); +void drm_flip_work_queue_task(struct drm_flip_work *work, + struct drm_flip_task *task); +void drm_flip_work_queue(struct drm_flip_work *work, void *val); +void drm_flip_work_commit(struct drm_flip_work *work, + struct workqueue_struct *wq); +void drm_flip_work_init(struct drm_flip_work *work, + const char *name, drm_flip_func_t func); +void drm_flip_work_cleanup(struct drm_flip_work *work); + +#endif /* DRM_FLIP_WORK_H */ diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h new file mode 100644 index 000000000..f9c15845f --- /dev/null +++ b/include/drm/drm_fourcc.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2016 Laurent Pinchart + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ +#ifndef __DRM_FOURCC_H__ +#define __DRM_FOURCC_H__ + +#include +#include + +struct drm_device; +struct drm_mode_fb_cmd2; + +/** + * struct drm_format_info - information about a DRM format + * @format: 4CC format identifier (DRM_FORMAT_*) + * @depth: Color depth (number of bits per pixel excluding padding bits), + * valid for a subset of RGB formats only. This is a legacy field, do not + * use in new code and set to 0 for new formats. + * @num_planes: Number of color planes (1 to 3) + * @cpp: Number of bytes per pixel (per plane) + * @hsub: Horizontal chroma subsampling factor + * @vsub: Vertical chroma subsampling factor + * @has_alpha: Does the format embeds an alpha component? + * @is_yuv: Is it a YUV format? + */ +struct drm_format_info { + u32 format; + u8 depth; + u8 num_planes; + u8 cpp[3]; + u8 hsub; + u8 vsub; + bool has_alpha; + bool is_yuv; +}; + +/** + * struct drm_format_name_buf - name of a DRM format + * @str: string buffer containing the format name + */ +struct drm_format_name_buf { + char str[32]; +}; + +const struct drm_format_info *__drm_format_info(u32 format); +const struct drm_format_info *drm_format_info(u32 format); +const struct drm_format_info * +drm_get_format_info(struct drm_device *dev, + const struct drm_mode_fb_cmd2 *mode_cmd); +uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth); +int drm_format_num_planes(uint32_t format); +int drm_format_plane_cpp(uint32_t format, int plane); +int drm_format_horz_chroma_subsampling(uint32_t format); +int drm_format_vert_chroma_subsampling(uint32_t format); +int drm_format_plane_width(int width, uint32_t format, int plane); +int drm_format_plane_height(int height, uint32_t format, int plane); +const char *drm_get_format_name(uint32_t format, struct drm_format_name_buf *buf); + +#endif /* __DRM_FOURCC_H__ */ diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h new file mode 100644 index 000000000..c50502c65 --- /dev/null +++ b/include/drm/drm_framebuffer.h @@ -0,0 +1,316 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_FRAMEBUFFER_H__ +#define __DRM_FRAMEBUFFER_H__ + +#include +#include +#include + +struct drm_framebuffer; +struct drm_file; +struct drm_device; + +/** + * struct drm_framebuffer_funcs - framebuffer hooks + */ +struct drm_framebuffer_funcs { + /** + * @destroy: + * + * Clean up framebuffer resources, specifically also unreference the + * backing storage. The core guarantees to call this function for every + * framebuffer successfully created by calling + * &drm_mode_config_funcs.fb_create. Drivers must also call + * drm_framebuffer_cleanup() to release DRM core resources for this + * framebuffer. + */ + void (*destroy)(struct drm_framebuffer *framebuffer); + + /** + * @create_handle: + * + * Create a buffer handle in the driver-specific buffer manager (either + * GEM or TTM) valid for the passed-in &struct drm_file. This is used by + * the core to implement the GETFB IOCTL, which returns (for + * sufficiently priviledged user) also a native buffer handle. This can + * be used for seamless transitions between modesetting clients by + * copying the current screen contents to a private buffer and blending + * between that and the new contents. + * + * GEM based drivers should call drm_gem_handle_create() to create the + * handle. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*create_handle)(struct drm_framebuffer *fb, + struct drm_file *file_priv, + unsigned int *handle); + /** + * @dirty: + * + * Optional callback for the dirty fb IOCTL. + * + * Userspace can notify the driver via this callback that an area of the + * framebuffer has changed and should be flushed to the display + * hardware. This can also be used internally, e.g. by the fbdev + * emulation, though that's not the case currently. + * + * See documentation in drm_mode.h for the struct drm_mode_fb_dirty_cmd + * for more information as all the semantics and arguments have a one to + * one mapping on this function. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*dirty)(struct drm_framebuffer *framebuffer, + struct drm_file *file_priv, unsigned flags, + unsigned color, struct drm_clip_rect *clips, + unsigned num_clips); +}; + +/** + * struct drm_framebuffer - frame buffer object + * + * Note that the fb is refcounted for the benefit of driver internals, + * for example some hw, disabling a CRTC/plane is asynchronous, and + * scanout does not actually complete until the next vblank. So some + * cleanup (like releasing the reference(s) on the backing GEM bo(s)) + * should be deferred. In cases like this, the driver would like to + * hold a ref to the fb even though it has already been removed from + * userspace perspective. See drm_framebuffer_get() and + * drm_framebuffer_put(). + * + * The refcount is stored inside the mode object @base. + */ +struct drm_framebuffer { + /** + * @dev: DRM device this framebuffer belongs to + */ + struct drm_device *dev; + /** + * @head: Place on the &drm_mode_config.fb_list, access protected by + * &drm_mode_config.fb_lock. + */ + struct list_head head; + + /** + * @base: base modeset object structure, contains the reference count. + */ + struct drm_mode_object base; + + /** + * @comm: Name of the process allocating the fb, used for fb dumping. + */ + char comm[TASK_COMM_LEN]; + + /** + * @format: framebuffer format information + */ + const struct drm_format_info *format; + /** + * @funcs: framebuffer vfunc table + */ + const struct drm_framebuffer_funcs *funcs; + /** + * @pitches: Line stride per buffer. For userspace created object this + * is copied from drm_mode_fb_cmd2. + */ + unsigned int pitches[4]; + /** + * @offsets: Offset from buffer start to the actual pixel data in bytes, + * per buffer. For userspace created object this is copied from + * drm_mode_fb_cmd2. + * + * Note that this is a linear offset and does not take into account + * tiling or buffer laytou per @modifier. It meant to be used when the + * actual pixel data for this framebuffer plane starts at an offset, + * e.g. when multiple planes are allocated within the same backing + * storage buffer object. For tiled layouts this generally means it + * @offsets must at least be tile-size aligned, but hardware often has + * stricter requirements. + * + * This should not be used to specifiy x/y pixel offsets into the buffer + * data (even for linear buffers). Specifying an x/y pixel offset is + * instead done through the source rectangle in &struct drm_plane_state. + */ + unsigned int offsets[4]; + /** + * @modifier: Data layout modifier. This is used to describe + * tiling, or also special layouts (like compression) of auxiliary + * buffers. For userspace created object this is copied from + * drm_mode_fb_cmd2. + */ + uint64_t modifier; + /** + * @width: Logical width of the visible area of the framebuffer, in + * pixels. + */ + unsigned int width; + /** + * @height: Logical height of the visible area of the framebuffer, in + * pixels. + */ + unsigned int height; + /** + * @flags: Framebuffer flags like DRM_MODE_FB_INTERLACED or + * DRM_MODE_FB_MODIFIERS. + */ + int flags; + /** + * @hot_x: X coordinate of the cursor hotspot. Used by the legacy cursor + * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR + * universal plane. + */ + int hot_x; + /** + * @hot_y: Y coordinate of the cursor hotspot. Used by the legacy cursor + * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR + * universal plane. + */ + int hot_y; + /** + * @filp_head: Placed on &drm_file.fbs, protected by &drm_file.fbs_lock. + */ + struct list_head filp_head; + /** + * @obj: GEM objects backing the framebuffer, one per plane (optional). + * + * This is used by the GEM framebuffer helpers, see e.g. + * drm_gem_fb_create(). + */ + struct drm_gem_object *obj[4]; +}; + +#define obj_to_fb(x) container_of(x, struct drm_framebuffer, base) + +int drm_framebuffer_init(struct drm_device *dev, + struct drm_framebuffer *fb, + const struct drm_framebuffer_funcs *funcs); +struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t id); +void drm_framebuffer_remove(struct drm_framebuffer *fb); +void drm_framebuffer_cleanup(struct drm_framebuffer *fb); +void drm_framebuffer_unregister_private(struct drm_framebuffer *fb); + +/** + * drm_framebuffer_get - acquire a framebuffer reference + * @fb: DRM framebuffer + * + * This function increments the framebuffer's reference count. + */ +static inline void drm_framebuffer_get(struct drm_framebuffer *fb) +{ + drm_mode_object_get(&fb->base); +} + +/** + * drm_framebuffer_put - release a framebuffer reference + * @fb: DRM framebuffer + * + * This function decrements the framebuffer's reference count and frees the + * framebuffer if the reference count drops to zero. + */ +static inline void drm_framebuffer_put(struct drm_framebuffer *fb) +{ + drm_mode_object_put(&fb->base); +} + +/** + * drm_framebuffer_reference - acquire a framebuffer reference + * @fb: DRM framebuffer + * + * This is a compatibility alias for drm_framebuffer_get() and should not be + * used by new code. + */ +static inline void drm_framebuffer_reference(struct drm_framebuffer *fb) +{ + drm_framebuffer_get(fb); +} + +/** + * drm_framebuffer_unreference - release a framebuffer reference + * @fb: DRM framebuffer + * + * This is a compatibility alias for drm_framebuffer_put() and should not be + * used by new code. + */ +static inline void drm_framebuffer_unreference(struct drm_framebuffer *fb) +{ + drm_framebuffer_put(fb); +} + +/** + * drm_framebuffer_read_refcount - read the framebuffer reference count. + * @fb: framebuffer + * + * This functions returns the framebuffer's reference count. + */ +static inline uint32_t drm_framebuffer_read_refcount(const struct drm_framebuffer *fb) +{ + return kref_read(&fb->base.refcount); +} + +/** + * drm_framebuffer_assign - store a reference to the fb + * @p: location to store framebuffer + * @fb: new framebuffer (maybe NULL) + * + * This functions sets the location to store a reference to the framebuffer, + * unreferencing the framebuffer that was previously stored in that location. + */ +static inline void drm_framebuffer_assign(struct drm_framebuffer **p, + struct drm_framebuffer *fb) +{ + if (fb) + drm_framebuffer_get(fb); + if (*p) + drm_framebuffer_put(*p); + *p = fb; +} + +/* + * drm_for_each_fb - iterate over all framebuffers + * @fb: the loop cursor + * @dev: the DRM device + * + * Iterate over all framebuffers of @dev. User must hold + * &drm_mode_config.fb_lock. + */ +#define drm_for_each_fb(fb, dev) \ + for (WARN_ON(!mutex_is_locked(&(dev)->mode_config.fb_lock)), \ + fb = list_first_entry(&(dev)->mode_config.fb_list, \ + struct drm_framebuffer, head); \ + &fb->head != (&(dev)->mode_config.fb_list); \ + fb = list_next_entry(fb, head)) + +int drm_framebuffer_plane_width(int width, + const struct drm_framebuffer *fb, int plane); +int drm_framebuffer_plane_height(int height, + const struct drm_framebuffer *fb, int plane); + +#endif diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h new file mode 100644 index 000000000..3583b98a1 --- /dev/null +++ b/include/drm/drm_gem.h @@ -0,0 +1,296 @@ +#ifndef __DRM_GEM_H__ +#define __DRM_GEM_H__ + +/* + * GEM Graphics Execution Manager Driver Interfaces + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright (c) 2009-2010, Code Aurora Forum. + * All rights reserved. + * Copyright © 2014 Intel Corporation + * Daniel Vetter + * + * Author: Rickard E. (Rik) Faith + * Author: Gareth Hughes + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include + +#include + +/** + * struct drm_gem_object - GEM buffer object + * + * This structure defines the generic parts for GEM buffer objects, which are + * mostly around handling mmap and userspace handles. + * + * Buffer objects are often abbreviated to BO. + */ +struct drm_gem_object { + /** + * @refcount: + * + * Reference count of this object + * + * Please use drm_gem_object_get() to acquire and drm_gem_object_put() + * or drm_gem_object_put_unlocked() to release a reference to a GEM + * buffer object. + */ + struct kref refcount; + + /** + * @handle_count: + * + * This is the GEM file_priv handle count of this object. + * + * Each handle also holds a reference. Note that when the handle_count + * drops to 0 any global names (e.g. the id in the flink namespace) will + * be cleared. + * + * Protected by &drm_device.object_name_lock. + */ + unsigned handle_count; + + /** + * @dev: DRM dev this object belongs to. + */ + struct drm_device *dev; + + /** + * @filp: + * + * SHMEM file node used as backing storage for swappable buffer objects. + * GEM also supports driver private objects with driver-specific backing + * storage (contiguous CMA memory, special reserved blocks). In this + * case @filp is NULL. + */ + struct file *filp; + + /** + * @vma_node: + * + * Mapping info for this object to support mmap. Drivers are supposed to + * allocate the mmap offset using drm_gem_create_mmap_offset(). The + * offset itself can be retrieved using drm_vma_node_offset_addr(). + * + * Memory mapping itself is handled by drm_gem_mmap(), which also checks + * that userspace is allowed to access the object. + */ + struct drm_vma_offset_node vma_node; + + /** + * @size: + * + * Size of the object, in bytes. Immutable over the object's + * lifetime. + */ + size_t size; + + /** + * @name: + * + * Global name for this object, starts at 1. 0 means unnamed. + * Access is covered by &drm_device.object_name_lock. This is used by + * the GEM_FLINK and GEM_OPEN ioctls. + */ + int name; + + /** + * @dma_buf: + * + * dma-buf associated with this GEM object. + * + * Pointer to the dma-buf associated with this gem object (either + * through importing or exporting). We break the resulting reference + * loop when the last gem handle for this object is released. + * + * Protected by &drm_device.object_name_lock. + */ + struct dma_buf *dma_buf; + + /** + * @import_attach: + * + * dma-buf attachment backing this object. + * + * Any foreign dma_buf imported as a gem object has this set to the + * attachment point for the device. This is invariant over the lifetime + * of a gem object. + * + * The &drm_driver.gem_free_object callback is responsible for cleaning + * up the dma_buf attachment and references acquired at import time. + * + * Note that the drm gem/prime core does not depend upon drivers setting + * this field any more. So for drivers where this doesn't make sense + * (e.g. virtual devices or a displaylink behind an usb bus) they can + * simply leave it as NULL. + */ + struct dma_buf_attachment *import_attach; +}; + +/** + * DEFINE_DRM_GEM_FOPS() - macro to generate file operations for GEM drivers + * @name: name for the generated structure + * + * This macro autogenerates a suitable &struct file_operations for GEM based + * drivers, which can be assigned to &drm_driver.fops. Note that this structure + * cannot be shared between drivers, because it contains a reference to the + * current module using THIS_MODULE. + * + * Note that the declaration is already marked as static - if you need a + * non-static version of this you're probably doing it wrong and will break the + * THIS_MODULE reference by accident. + */ +#define DEFINE_DRM_GEM_FOPS(name) \ + static const struct file_operations name = {\ + .owner = THIS_MODULE,\ + .open = drm_open,\ + .release = drm_release,\ + .unlocked_ioctl = drm_ioctl,\ + .compat_ioctl = drm_compat_ioctl,\ + .poll = drm_poll,\ + .read = drm_read,\ + .llseek = noop_llseek,\ + .mmap = drm_gem_mmap,\ + } + +void drm_gem_object_release(struct drm_gem_object *obj); +void drm_gem_object_free(struct kref *kref); +int drm_gem_object_init(struct drm_device *dev, + struct drm_gem_object *obj, size_t size); +void drm_gem_private_object_init(struct drm_device *dev, + struct drm_gem_object *obj, size_t size); +void drm_gem_vm_open(struct vm_area_struct *vma); +void drm_gem_vm_close(struct vm_area_struct *vma); +int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, + struct vm_area_struct *vma); +int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); + +/** + * drm_gem_object_get - acquire a GEM buffer object reference + * @obj: GEM buffer object + * + * This function acquires an additional reference to @obj. It is illegal to + * call this without already holding a reference. No locks required. + */ +static inline void drm_gem_object_get(struct drm_gem_object *obj) +{ + kref_get(&obj->refcount); +} + +/** + * __drm_gem_object_put - raw function to release a GEM buffer object reference + * @obj: GEM buffer object + * + * This function is meant to be used by drivers which are not encumbered with + * &drm_device.struct_mutex legacy locking and which are using the + * gem_free_object_unlocked callback. It avoids all the locking checks and + * locking overhead of drm_gem_object_put() and drm_gem_object_put_unlocked(). + * + * Drivers should never call this directly in their code. Instead they should + * wrap it up into a ``driver_gem_object_put(struct driver_gem_object *obj)`` + * wrapper function, and use that. Shared code should never call this, to + * avoid breaking drivers by accident which still depend upon + * &drm_device.struct_mutex locking. + */ +static inline void +__drm_gem_object_put(struct drm_gem_object *obj) +{ + kref_put(&obj->refcount, drm_gem_object_free); +} + +void drm_gem_object_put_unlocked(struct drm_gem_object *obj); +void drm_gem_object_put(struct drm_gem_object *obj); + +/** + * drm_gem_object_reference - acquire a GEM buffer object reference + * @obj: GEM buffer object + * + * This is a compatibility alias for drm_gem_object_get() and should not be + * used by new code. + */ +static inline void drm_gem_object_reference(struct drm_gem_object *obj) +{ + drm_gem_object_get(obj); +} + +/** + * __drm_gem_object_unreference - raw function to release a GEM buffer object + * reference + * @obj: GEM buffer object + * + * This is a compatibility alias for __drm_gem_object_put() and should not be + * used by new code. + */ +static inline void __drm_gem_object_unreference(struct drm_gem_object *obj) +{ + __drm_gem_object_put(obj); +} + +/** + * drm_gem_object_unreference_unlocked - release a GEM buffer object reference + * @obj: GEM buffer object + * + * This is a compatibility alias for drm_gem_object_put_unlocked() and should + * not be used by new code. + */ +static inline void +drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) +{ + drm_gem_object_put_unlocked(obj); +} + +/** + * drm_gem_object_unreference - release a GEM buffer object reference + * @obj: GEM buffer object + * + * This is a compatibility alias for drm_gem_object_put() and should not be + * used by new code. + */ +static inline void drm_gem_object_unreference(struct drm_gem_object *obj) +{ + drm_gem_object_put(obj); +} + +int drm_gem_handle_create(struct drm_file *file_priv, + struct drm_gem_object *obj, + u32 *handlep); +int drm_gem_handle_delete(struct drm_file *filp, u32 handle); + + +void drm_gem_free_mmap_offset(struct drm_gem_object *obj); +int drm_gem_create_mmap_offset(struct drm_gem_object *obj); +int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size); + +struct page **drm_gem_get_pages(struct drm_gem_object *obj); +void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, + bool dirty, bool accessed); + +struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle); +int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, + u32 handle, u64 *offset); +int drm_gem_dumb_destroy(struct drm_file *file, + struct drm_device *dev, + uint32_t handle); + +#endif /* __DRM_GEM_H__ */ diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h new file mode 100644 index 000000000..19777145c --- /dev/null +++ b/include/drm/drm_gem_cma_helper.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DRM_GEM_CMA_HELPER_H__ +#define __DRM_GEM_CMA_HELPER_H__ + +#include +#include + +/** + * struct drm_gem_cma_object - GEM object backed by CMA memory allocations + * @base: base GEM object + * @paddr: physical address of the backing memory + * @sgt: scatter/gather table for imported PRIME buffers. The table can have + * more than one entry but they are guaranteed to have contiguous + * DMA addresses. + * @vaddr: kernel virtual address of the backing memory + */ +struct drm_gem_cma_object { + struct drm_gem_object base; + dma_addr_t paddr; + struct sg_table *sgt; + + /* For objects with DMA memory allocated by GEM CMA */ + void *vaddr; +}; + +#define to_drm_gem_cma_obj(gem_obj) \ + container_of(gem_obj, struct drm_gem_cma_object, base) + +#ifndef CONFIG_MMU +#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS \ + .get_unmapped_area = drm_gem_cma_get_unmapped_area, +#else +#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS +#endif + +/** + * DEFINE_DRM_GEM_CMA_FOPS() - macro to generate file operations for CMA drivers + * @name: name for the generated structure + * + * This macro autogenerates a suitable &struct file_operations for CMA based + * drivers, which can be assigned to &drm_driver.fops. Note that this structure + * cannot be shared between drivers, because it contains a reference to the + * current module using THIS_MODULE. + * + * Note that the declaration is already marked as static - if you need a + * non-static version of this you're probably doing it wrong and will break the + * THIS_MODULE reference by accident. + */ +#define DEFINE_DRM_GEM_CMA_FOPS(name) \ + static const struct file_operations name = {\ + .owner = THIS_MODULE,\ + .open = drm_open,\ + .release = drm_release,\ + .unlocked_ioctl = drm_ioctl,\ + .compat_ioctl = drm_compat_ioctl,\ + .poll = drm_poll,\ + .read = drm_read,\ + .llseek = noop_llseek,\ + .mmap = drm_gem_cma_mmap,\ + DRM_GEM_CMA_UNMAPPED_AREA_FOPS \ + } + +/* free GEM object */ +void drm_gem_cma_free_object(struct drm_gem_object *gem_obj); + +/* create memory region for DRM framebuffer */ +int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv, + struct drm_device *drm, + struct drm_mode_create_dumb *args); + +/* create memory region for DRM framebuffer */ +int drm_gem_cma_dumb_create(struct drm_file *file_priv, + struct drm_device *drm, + struct drm_mode_create_dumb *args); + +/* set vm_flags and we can change the VM attribute to other one at here */ +int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma); + +/* allocate physical memory */ +struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, + size_t size); + +extern const struct vm_operations_struct drm_gem_cma_vm_ops; + +#ifndef CONFIG_MMU +unsigned long drm_gem_cma_get_unmapped_area(struct file *filp, + unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags); +#endif + +void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent, + const struct drm_gem_object *obj); + +struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj); +struct drm_gem_object * +drm_gem_cma_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt); +int drm_gem_cma_prime_mmap(struct drm_gem_object *obj, + struct vm_area_struct *vma); +void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj); +void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr); + +#endif /* __DRM_GEM_CMA_HELPER_H__ */ diff --git a/include/drm/drm_gem_framebuffer_helper.h b/include/drm/drm_gem_framebuffer_helper.h new file mode 100644 index 000000000..a38de7eb5 --- /dev/null +++ b/include/drm/drm_gem_framebuffer_helper.h @@ -0,0 +1,40 @@ +#ifndef __DRM_GEM_FB_HELPER_H__ +#define __DRM_GEM_FB_HELPER_H__ + +struct drm_device; +struct drm_fb_helper_surface_size; +struct drm_file; +struct drm_framebuffer; +struct drm_framebuffer_funcs; +struct drm_gem_object; +struct drm_mode_fb_cmd2; +struct drm_plane; +struct drm_plane_state; +struct drm_simple_display_pipe; + +struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb, + unsigned int plane); +void drm_gem_fb_destroy(struct drm_framebuffer *fb); +int drm_gem_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file, + unsigned int *handle); + +struct drm_framebuffer * +drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file, + const struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_framebuffer_funcs *funcs); +struct drm_framebuffer * +drm_gem_fb_create(struct drm_device *dev, struct drm_file *file, + const struct drm_mode_fb_cmd2 *mode_cmd); + +int drm_gem_fb_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *state); +int drm_gem_fb_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state); + +struct drm_framebuffer * +drm_gem_fbdev_fb_create(struct drm_device *dev, + struct drm_fb_helper_surface_size *sizes, + unsigned int pitch_align, struct drm_gem_object *obj, + const struct drm_framebuffer_funcs *funcs); + +#endif diff --git a/include/drm/drm_global.h b/include/drm/drm_global.h new file mode 100644 index 000000000..3a830602a --- /dev/null +++ b/include/drm/drm_global.h @@ -0,0 +1,53 @@ +/************************************************************************** + * + * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#ifndef _DRM_GLOBAL_H_ +#define _DRM_GLOBAL_H_ +enum drm_global_types { + DRM_GLOBAL_TTM_MEM = 0, + DRM_GLOBAL_TTM_BO, + DRM_GLOBAL_TTM_OBJECT, + DRM_GLOBAL_NUM +}; + +struct drm_global_reference { + enum drm_global_types global_type; + size_t size; + void *object; + int (*init) (struct drm_global_reference *); + void (*release) (struct drm_global_reference *); +}; + +void drm_global_init(void); +void drm_global_release(void); +int drm_global_item_ref(struct drm_global_reference *ref); +void drm_global_item_unref(struct drm_global_reference *ref); + +#endif diff --git a/include/drm/drm_hashtab.h b/include/drm/drm_hashtab.h new file mode 100644 index 000000000..bb95ff011 --- /dev/null +++ b/include/drm/drm_hashtab.h @@ -0,0 +1,79 @@ +/************************************************************************** + * + * Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * + **************************************************************************/ +/* + * Simple open hash tab implementation. + * + * Authors: + * Thomas Hellström + */ + +#ifndef DRM_HASHTAB_H +#define DRM_HASHTAB_H + +#include + +#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) + +struct drm_hash_item { + struct hlist_node head; + unsigned long key; +}; + +struct drm_open_hash { + struct hlist_head *table; + u8 order; +}; + +int drm_ht_create(struct drm_open_hash *ht, unsigned int order); +int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item); +int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item, + unsigned long seed, int bits, int shift, + unsigned long add); +int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item); + +void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key); +int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key); +int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item); +void drm_ht_remove(struct drm_open_hash *ht); + +/* + * RCU-safe interface + * + * The user of this API needs to make sure that two or more instances of the + * hash table manipulation functions are never run simultaneously. + * The lookup function drm_ht_find_item_rcu may, however, run simultaneously + * with any of the manipulation functions as long as it's called from within + * an RCU read-locked section. + */ +#define drm_ht_insert_item_rcu drm_ht_insert_item +#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please +#define drm_ht_remove_key_rcu drm_ht_remove_key +#define drm_ht_remove_item_rcu drm_ht_remove_item +#define drm_ht_find_item_rcu drm_ht_find_item + +#endif diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h new file mode 100644 index 000000000..98e63d870 --- /dev/null +++ b/include/drm/drm_hdcp.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2017 Google, Inc. + * + * Authors: + * Sean Paul + */ + +#ifndef _DRM_HDCP_H_INCLUDED_ +#define _DRM_HDCP_H_INCLUDED_ + +/* Period of hdcp checks (to ensure we're still authenticated) */ +#define DRM_HDCP_CHECK_PERIOD_MS (128 * 16) + +/* Shared lengths/masks between HDMI/DVI/DisplayPort */ +#define DRM_HDCP_AN_LEN 8 +#define DRM_HDCP_BSTATUS_LEN 2 +#define DRM_HDCP_KSV_LEN 5 +#define DRM_HDCP_RI_LEN 2 +#define DRM_HDCP_V_PRIME_PART_LEN 4 +#define DRM_HDCP_V_PRIME_NUM_PARTS 5 +#define DRM_HDCP_NUM_DOWNSTREAM(x) (x & 0x7f) +#define DRM_HDCP_MAX_CASCADE_EXCEEDED(x) (x & BIT(3)) +#define DRM_HDCP_MAX_DEVICE_EXCEEDED(x) (x & BIT(7)) + +/* Slave address for the HDCP registers in the receiver */ +#define DRM_HDCP_DDC_ADDR 0x3A + +/* HDCP register offsets for HDMI/DVI devices */ +#define DRM_HDCP_DDC_BKSV 0x00 +#define DRM_HDCP_DDC_RI_PRIME 0x08 +#define DRM_HDCP_DDC_AKSV 0x10 +#define DRM_HDCP_DDC_AN 0x18 +#define DRM_HDCP_DDC_V_PRIME(h) (0x20 + h * 4) +#define DRM_HDCP_DDC_BCAPS 0x40 +#define DRM_HDCP_DDC_BCAPS_REPEATER_PRESENT BIT(6) +#define DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY BIT(5) +#define DRM_HDCP_DDC_BSTATUS 0x41 +#define DRM_HDCP_DDC_KSV_FIFO 0x43 + +#endif diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h new file mode 100644 index 000000000..b722757a5 --- /dev/null +++ b/include/drm/drm_ioctl.h @@ -0,0 +1,183 @@ +/* + * Internal Header for the Direct Rendering Manager + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright (c) 2009-2010, Code Aurora Forum. + * All rights reserved. + * + * Author: Rickard E. (Rik) Faith + * Author: Gareth Hughes + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DRM_IOCTL_H_ +#define _DRM_IOCTL_H_ + +#include +#include + +#include + +struct drm_device; +struct drm_file; +struct file; + +/** + * drm_ioctl_t - DRM ioctl function type. + * @dev: DRM device inode + * @data: private pointer of the ioctl call + * @file_priv: DRM file this ioctl was made on + * + * This is the DRM ioctl typedef. Note that drm_ioctl() has alrady copied @data + * into kernel-space, and will also copy it back, depending upon the read/write + * settings in the ioctl command code. + */ +typedef int drm_ioctl_t(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +/** + * drm_ioctl_compat_t - compatibility DRM ioctl function type. + * @filp: file pointer + * @cmd: ioctl command code + * @arg: DRM file this ioctl was made on + * + * Just a typedef to make declaring an array of compatibility handlers easier. + * New drivers shouldn't screw up the structure layout for their ioctl + * structures and hence never need this. + */ +typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, + unsigned long arg); + +#define DRM_IOCTL_NR(n) _IOC_NR(n) +#define DRM_IOCTL_TYPE(n) _IOC_TYPE(n) +#define DRM_MAJOR 226 + +/** + * enum drm_ioctl_flags - DRM ioctl flags + * + * Various flags that can be set in &drm_ioctl_desc.flags to control how + * userspace can use a given ioctl. + */ +enum drm_ioctl_flags { + /** + * @DRM_AUTH: + * + * This is for ioctl which are used for rendering, and require that the + * file descriptor is either for a render node, or if it's a + * legacy/primary node, then it must be authenticated. + */ + DRM_AUTH = BIT(0), + /** + * @DRM_MASTER: + * + * This must be set for any ioctl which can change the modeset or + * display state. Userspace must call the ioctl through a primary node, + * while it is the active master. + * + * Note that read-only modeset ioctl can also be called by + * unauthenticated clients, or when a master is not the currently active + * one. + */ + DRM_MASTER = BIT(1), + /** + * @DRM_ROOT_ONLY: + * + * Anything that could potentially wreak a master file descriptor needs + * to have this flag set. Current that's only for the SETMASTER and + * DROPMASTER ioctl, which e.g. logind can call to force a non-behaving + * master (display compositor) into compliance. + * + * This is equivalent to callers with the SYSADMIN capability. + */ + DRM_ROOT_ONLY = BIT(2), + /** + * @DRM_UNLOCKED: + * + * Whether &drm_ioctl_desc.func should be called with the DRM BKL held + * or not. Enforced as the default for all modern drivers, hence there + * should never be a need to set this flag. + */ + DRM_UNLOCKED = BIT(4), + /** + * @DRM_RENDER_ALLOW: + * + * This is used for all ioctl needed for rendering only, for drivers + * which support render nodes. This should be all new render drivers, + * and hence it should be always set for any ioctl with DRM_AUTH set. + * Note though that read-only query ioctl might have this set, but have + * not set DRM_AUTH because they do not require authentication. + */ + DRM_RENDER_ALLOW = BIT(5), +}; + +/** + * struct drm_ioctl_desc - DRM driver ioctl entry + * @cmd: ioctl command number, without flags + * @flags: a bitmask of &enum drm_ioctl_flags + * @func: handler for this ioctl + * @name: user-readable name for debug output + * + * For convenience it's easier to create these using the DRM_IOCTL_DEF_DRV() + * macro. + */ +struct drm_ioctl_desc { + unsigned int cmd; + enum drm_ioctl_flags flags; + drm_ioctl_t *func; + const char *name; +}; + +/** + * DRM_IOCTL_DEF_DRV() - helper macro to fill out a &struct drm_ioctl_desc + * @ioctl: ioctl command suffix + * @_func: handler for the ioctl + * @_flags: a bitmask of &enum drm_ioctl_flags + * + * Small helper macro to create a &struct drm_ioctl_desc entry. The ioctl + * command number is constructed by prepending ``DRM_IOCTL\_`` and passing that + * to DRM_IOCTL_NR(). + */ +#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \ + [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = { \ + .cmd = DRM_IOCTL_##ioctl, \ + .func = _func, \ + .flags = _flags, \ + .name = #ioctl \ + } + +int drm_ioctl_permit(u32 flags, struct drm_file *file_priv); +long drm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +long drm_ioctl_kernel(struct file *, drm_ioctl_t, void *, u32); +#ifdef CONFIG_COMPAT +long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +#else +/* Let drm_compat_ioctl be assigned to .compat_ioctl unconditionally */ +#define drm_compat_ioctl NULL +#endif +bool drm_ioctl_flags(unsigned int nr, unsigned int *flags); + +int drm_noop(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_invalid_op(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +#endif /* _DRM_IOCTL_H_ */ diff --git a/include/drm/drm_irq.h b/include/drm/drm_irq.h new file mode 100644 index 000000000..d77f6e65b --- /dev/null +++ b/include/drm/drm_irq.h @@ -0,0 +1,32 @@ +/* + * Copyright 2016 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DRM_IRQ_H_ +#define _DRM_IRQ_H_ + +struct drm_device; + +int drm_irq_install(struct drm_device *dev, int irq); +int drm_irq_uninstall(struct drm_device *dev); + +#endif diff --git a/include/drm/drm_lease.h b/include/drm/drm_lease.h new file mode 100644 index 000000000..fbc0ab548 --- /dev/null +++ b/include/drm/drm_lease.h @@ -0,0 +1,46 @@ +/* + * Copyright © 2017 Keith Packard + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef _DRM_LEASE_H_ +#define _DRM_LEASE_H_ + +struct drm_file; +struct drm_device; +struct drm_master; + +struct drm_master *drm_lease_owner(struct drm_master *master); + +void drm_lease_destroy(struct drm_master *lessee); + +bool drm_lease_held(struct drm_file *file_priv, int id); + +bool _drm_lease_held(struct drm_file *file_priv, int id); + +void drm_lease_revoke(struct drm_master *master); + +uint32_t drm_lease_filter_crtcs(struct drm_file *file_priv, uint32_t crtcs); + +int drm_mode_create_lease_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); + +int drm_mode_list_lessees_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); + +int drm_mode_get_lease_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); + +int drm_mode_revoke_lease_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); + +#endif /* _DRM_LEASE_H_ */ diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h new file mode 100644 index 000000000..8fad66f88 --- /dev/null +++ b/include/drm/drm_legacy.h @@ -0,0 +1,207 @@ +#ifndef __DRM_DRM_LEGACY_H__ +#define __DRM_DRM_LEGACY_H__ + +#include + +/* + * Legacy driver interfaces for the Direct Rendering Manager + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright (c) 2009-2010, Code Aurora Forum. + * All rights reserved. + * Copyright © 2014 Intel Corporation + * Daniel Vetter + * + * Author: Rickard E. (Rik) Faith + * Author: Gareth Hughes + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + + +/* + * Legacy Support for palateontologic DRM drivers + * + * If you add a new driver and it uses any of these functions or structures, + * you're doing it terribly wrong. + */ + +/** + * DMA buffer. + */ +struct drm_buf { + int idx; /**< Index into master buflist */ + int total; /**< Buffer size */ + int order; /**< log-base-2(total) */ + int used; /**< Amount of buffer in use (for DMA) */ + unsigned long offset; /**< Byte offset (used internally) */ + void *address; /**< Address of buffer */ + unsigned long bus_address; /**< Bus address of buffer */ + struct drm_buf *next; /**< Kernel-only: used for free list */ + __volatile__ int waiting; /**< On kernel DMA queue */ + __volatile__ int pending; /**< On hardware DMA queue */ + struct drm_file *file_priv; /**< Private of holding file descr */ + int context; /**< Kernel queue for this buffer */ + int while_locked; /**< Dispatch this buffer while locked */ + enum { + DRM_LIST_NONE = 0, + DRM_LIST_FREE = 1, + DRM_LIST_WAIT = 2, + DRM_LIST_PEND = 3, + DRM_LIST_PRIO = 4, + DRM_LIST_RECLAIM = 5 + } list; /**< Which list we're on */ + + int dev_priv_size; /**< Size of buffer private storage */ + void *dev_private; /**< Per-buffer private storage */ +}; + +typedef struct drm_dma_handle { + dma_addr_t busaddr; + void *vaddr; + size_t size; +} drm_dma_handle_t; + +/** + * Buffer entry. There is one of this for each buffer size order. + */ +struct drm_buf_entry { + int buf_size; /**< size */ + int buf_count; /**< number of buffers */ + struct drm_buf *buflist; /**< buffer list */ + int seg_count; + int page_order; + struct drm_dma_handle **seglist; + + int low_mark; /**< Low water mark */ + int high_mark; /**< High water mark */ +}; + +/** + * DMA data. + */ +struct drm_device_dma { + + struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ + int buf_count; /**< total number of buffers */ + struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */ + int seg_count; + int page_count; /**< number of pages */ + unsigned long *pagelist; /**< page list */ + unsigned long byte_count; + enum { + _DRM_DMA_USE_AGP = 0x01, + _DRM_DMA_USE_SG = 0x02, + _DRM_DMA_USE_FB = 0x04, + _DRM_DMA_USE_PCI_RO = 0x08 + } flags; + +}; + +/** + * Scatter-gather memory. + */ +struct drm_sg_mem { + unsigned long handle; + void *virtual; + int pages; + struct page **pagelist; + dma_addr_t *busaddr; +}; + +/** + * Kernel side of a mapping + */ +struct drm_local_map { + resource_size_t offset; /**< Requested physical address (0 for SAREA)*/ + unsigned long size; /**< Requested physical size (bytes) */ + enum drm_map_type type; /**< Type of memory to map */ + enum drm_map_flags flags; /**< Flags */ + void *handle; /**< User-space: "Handle" to pass to mmap() */ + /**< Kernel-space: kernel-virtual address */ + int mtrr; /**< MTRR slot used */ +}; + +typedef struct drm_local_map drm_local_map_t; + +/** + * Mappings list + */ +struct drm_map_list { + struct list_head head; /**< list head */ + struct drm_hash_item hash; + struct drm_local_map *map; /**< mapping */ + uint64_t user_token; + struct drm_master *master; +}; + +int drm_legacy_addmap(struct drm_device *d, resource_size_t offset, + unsigned int size, enum drm_map_type type, + enum drm_map_flags flags, struct drm_local_map **map_p); +void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map); +int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map); +void drm_legacy_master_rmmaps(struct drm_device *dev, + struct drm_master *master); +struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev); +int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma); + +int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req); +int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req); + +/** + * Test that the hardware lock is held by the caller, returning otherwise. + * + * \param dev DRM device. + * \param filp file pointer of the caller. + */ +#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \ +do { \ + if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \ + _file_priv->master->lock.file_priv != _file_priv) { \ + DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ + __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\ + _file_priv->master->lock.file_priv, _file_priv); \ + return -EINVAL; \ + } \ +} while (0) + +void drm_legacy_idlelock_take(struct drm_lock_data *lock); +void drm_legacy_idlelock_release(struct drm_lock_data *lock); + +/* drm_pci.c dma alloc wrappers */ +void __drm_legacy_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); + +/* drm_memory.c */ +void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev); +void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev); +void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev); + +static inline struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, + unsigned int token) +{ + struct drm_map_list *_entry; + list_for_each_entry(_entry, &dev->maplist, head) + if (_entry->user_token == token) + return _entry->map; + return NULL; +} + +#endif /* __DRM_DRM_LEGACY_H__ */ diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h new file mode 100644 index 000000000..4fef19064 --- /dev/null +++ b/include/drm/drm_mipi_dsi.h @@ -0,0 +1,319 @@ +/* + * MIPI DSI Bus + * + * Copyright (C) 2012-2013, Samsung Electronics, Co., Ltd. + * Andrzej Hajda + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __DRM_MIPI_DSI_H__ +#define __DRM_MIPI_DSI_H__ + +#include + +struct mipi_dsi_host; +struct mipi_dsi_device; + +/* request ACK from peripheral */ +#define MIPI_DSI_MSG_REQ_ACK BIT(0) +/* use Low Power Mode to transmit message */ +#define MIPI_DSI_MSG_USE_LPM BIT(1) + +/** + * struct mipi_dsi_msg - read/write DSI buffer + * @channel: virtual channel id + * @type: payload data type + * @flags: flags controlling this message transmission + * @tx_len: length of @tx_buf + * @tx_buf: data to be written + * @rx_len: length of @rx_buf + * @rx_buf: data to be read, or NULL + */ +struct mipi_dsi_msg { + u8 channel; + u8 type; + u16 flags; + + size_t tx_len; + const void *tx_buf; + + size_t rx_len; + void *rx_buf; +}; + +bool mipi_dsi_packet_format_is_short(u8 type); +bool mipi_dsi_packet_format_is_long(u8 type); + +/** + * struct mipi_dsi_packet - represents a MIPI DSI packet in protocol format + * @size: size (in bytes) of the packet + * @header: the four bytes that make up the header (Data ID, Word Count or + * Packet Data, and ECC) + * @payload_length: number of bytes in the payload + * @payload: a pointer to a buffer containing the payload, if any + */ +struct mipi_dsi_packet { + size_t size; + u8 header[4]; + size_t payload_length; + const u8 *payload; +}; + +int mipi_dsi_create_packet(struct mipi_dsi_packet *packet, + const struct mipi_dsi_msg *msg); + +/** + * struct mipi_dsi_host_ops - DSI bus operations + * @attach: attach DSI device to DSI host + * @detach: detach DSI device from DSI host + * @transfer: transmit a DSI packet + * + * DSI packets transmitted by .transfer() are passed in as mipi_dsi_msg + * structures. This structure contains information about the type of packet + * being transmitted as well as the transmit and receive buffers. When an + * error is encountered during transmission, this function will return a + * negative error code. On success it shall return the number of bytes + * transmitted for write packets or the number of bytes received for read + * packets. + * + * Note that typically DSI packet transmission is atomic, so the .transfer() + * function will seldomly return anything other than the number of bytes + * contained in the transmit buffer on success. + */ +struct mipi_dsi_host_ops { + int (*attach)(struct mipi_dsi_host *host, + struct mipi_dsi_device *dsi); + int (*detach)(struct mipi_dsi_host *host, + struct mipi_dsi_device *dsi); + ssize_t (*transfer)(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg); +}; + +/** + * struct mipi_dsi_host - DSI host device + * @dev: driver model device node for this DSI host + * @ops: DSI host operations + * @list: list management + */ +struct mipi_dsi_host { + struct device *dev; + const struct mipi_dsi_host_ops *ops; + struct list_head list; +}; + +int mipi_dsi_host_register(struct mipi_dsi_host *host); +void mipi_dsi_host_unregister(struct mipi_dsi_host *host); +struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node); + +/* DSI mode flags */ + +/* video mode */ +#define MIPI_DSI_MODE_VIDEO BIT(0) +/* video burst mode */ +#define MIPI_DSI_MODE_VIDEO_BURST BIT(1) +/* video pulse mode */ +#define MIPI_DSI_MODE_VIDEO_SYNC_PULSE BIT(2) +/* enable auto vertical count mode */ +#define MIPI_DSI_MODE_VIDEO_AUTO_VERT BIT(3) +/* enable hsync-end packets in vsync-pulse and v-porch area */ +#define MIPI_DSI_MODE_VIDEO_HSE BIT(4) +/* disable hfront-porch area */ +#define MIPI_DSI_MODE_VIDEO_HFP BIT(5) +/* disable hback-porch area */ +#define MIPI_DSI_MODE_VIDEO_HBP BIT(6) +/* disable hsync-active area */ +#define MIPI_DSI_MODE_VIDEO_HSA BIT(7) +/* flush display FIFO on vsync pulse */ +#define MIPI_DSI_MODE_VSYNC_FLUSH BIT(8) +/* disable EoT packets in HS mode */ +#define MIPI_DSI_MODE_EOT_PACKET BIT(9) +/* device supports non-continuous clock behavior (DSI spec 5.6.1) */ +#define MIPI_DSI_CLOCK_NON_CONTINUOUS BIT(10) +/* transmit data in low power */ +#define MIPI_DSI_MODE_LPM BIT(11) + +enum mipi_dsi_pixel_format { + MIPI_DSI_FMT_RGB888, + MIPI_DSI_FMT_RGB666, + MIPI_DSI_FMT_RGB666_PACKED, + MIPI_DSI_FMT_RGB565, +}; + +#define DSI_DEV_NAME_SIZE 20 + +/** + * struct mipi_dsi_device_info - template for creating a mipi_dsi_device + * @type: DSI peripheral chip type + * @channel: DSI virtual channel assigned to peripheral + * @node: pointer to OF device node or NULL + * + * This is populated and passed to mipi_dsi_device_new to create a new + * DSI device + */ +struct mipi_dsi_device_info { + char type[DSI_DEV_NAME_SIZE]; + u32 channel; + struct device_node *node; +}; + +/** + * struct mipi_dsi_device - DSI peripheral device + * @host: DSI host for this peripheral + * @dev: driver model device node for this peripheral + * @name: DSI peripheral chip type + * @channel: virtual channel assigned to the peripheral + * @format: pixel format for video mode + * @lanes: number of active data lanes + * @mode_flags: DSI operation mode related flags + */ +struct mipi_dsi_device { + struct mipi_dsi_host *host; + struct device dev; + + char name[DSI_DEV_NAME_SIZE]; + unsigned int channel; + unsigned int lanes; + enum mipi_dsi_pixel_format format; + unsigned long mode_flags; +}; + +#define MIPI_DSI_MODULE_PREFIX "mipi-dsi:" + +static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev) +{ + return container_of(dev, struct mipi_dsi_device, dev); +} + +/** + * mipi_dsi_pixel_format_to_bpp - obtain the number of bits per pixel for any + * given pixel format defined by the MIPI DSI + * specification + * @fmt: MIPI DSI pixel format + * + * Returns: The number of bits per pixel of the given pixel format. + */ +static inline int mipi_dsi_pixel_format_to_bpp(enum mipi_dsi_pixel_format fmt) +{ + switch (fmt) { + case MIPI_DSI_FMT_RGB888: + case MIPI_DSI_FMT_RGB666: + return 24; + + case MIPI_DSI_FMT_RGB666_PACKED: + return 18; + + case MIPI_DSI_FMT_RGB565: + return 16; + } + + return -EINVAL; +} + +struct mipi_dsi_device * +mipi_dsi_device_register_full(struct mipi_dsi_host *host, + const struct mipi_dsi_device_info *info); +void mipi_dsi_device_unregister(struct mipi_dsi_device *dsi); +struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np); +int mipi_dsi_attach(struct mipi_dsi_device *dsi); +int mipi_dsi_detach(struct mipi_dsi_device *dsi); +int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi); +int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi); +int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi, + u16 value); + +ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload, + size_t size); +ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params, + size_t num_params, void *data, size_t size); + +/** + * enum mipi_dsi_dcs_tear_mode - Tearing Effect Output Line mode + * @MIPI_DSI_DCS_TEAR_MODE_VBLANK: the TE output line consists of V-Blanking + * information only + * @MIPI_DSI_DCS_TEAR_MODE_VHBLANK : the TE output line consists of both + * V-Blanking and H-Blanking information + */ +enum mipi_dsi_dcs_tear_mode { + MIPI_DSI_DCS_TEAR_MODE_VBLANK, + MIPI_DSI_DCS_TEAR_MODE_VHBLANK, +}; + +#define MIPI_DSI_DCS_POWER_MODE_DISPLAY (1 << 2) +#define MIPI_DSI_DCS_POWER_MODE_NORMAL (1 << 3) +#define MIPI_DSI_DCS_POWER_MODE_SLEEP (1 << 4) +#define MIPI_DSI_DCS_POWER_MODE_PARTIAL (1 << 5) +#define MIPI_DSI_DCS_POWER_MODE_IDLE (1 << 6) + +ssize_t mipi_dsi_dcs_write_buffer(struct mipi_dsi_device *dsi, + const void *data, size_t len); +ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, u8 cmd, + const void *data, size_t len); +ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data, + size_t len); +int mipi_dsi_dcs_nop(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_soft_reset(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_get_power_mode(struct mipi_dsi_device *dsi, u8 *mode); +int mipi_dsi_dcs_get_pixel_format(struct mipi_dsi_device *dsi, u8 *format); +int mipi_dsi_dcs_enter_sleep_mode(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_exit_sleep_mode(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_set_display_off(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_set_display_on(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start, + u16 end); +int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start, + u16 end); +int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi, + enum mipi_dsi_dcs_tear_mode mode); +int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format); +int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline); +int mipi_dsi_dcs_set_display_brightness(struct mipi_dsi_device *dsi, + u16 brightness); +int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi, + u16 *brightness); + +/** + * struct mipi_dsi_driver - DSI driver + * @driver: device driver model driver + * @probe: callback for device binding + * @remove: callback for device unbinding + * @shutdown: called at shutdown time to quiesce the device + */ +struct mipi_dsi_driver { + struct device_driver driver; + int(*probe)(struct mipi_dsi_device *dsi); + int(*remove)(struct mipi_dsi_device *dsi); + void (*shutdown)(struct mipi_dsi_device *dsi); +}; + +static inline struct mipi_dsi_driver * +to_mipi_dsi_driver(struct device_driver *driver) +{ + return container_of(driver, struct mipi_dsi_driver, driver); +} + +static inline void *mipi_dsi_get_drvdata(const struct mipi_dsi_device *dsi) +{ + return dev_get_drvdata(&dsi->dev); +} + +static inline void mipi_dsi_set_drvdata(struct mipi_dsi_device *dsi, void *data) +{ + dev_set_drvdata(&dsi->dev, data); +} + +int mipi_dsi_driver_register_full(struct mipi_dsi_driver *driver, + struct module *owner); +void mipi_dsi_driver_unregister(struct mipi_dsi_driver *driver); + +#define mipi_dsi_driver_register(driver) \ + mipi_dsi_driver_register_full(driver, THIS_MODULE) + +#define module_mipi_dsi_driver(__mipi_dsi_driver) \ + module_driver(__mipi_dsi_driver, mipi_dsi_driver_register, \ + mipi_dsi_driver_unregister) + +#endif /* __DRM_MIPI_DSI__ */ diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h new file mode 100644 index 000000000..2c3bbb43c --- /dev/null +++ b/include/drm/drm_mm.h @@ -0,0 +1,549 @@ +/************************************************************************** + * + * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA. + * Copyright 2016 Intel Corporation + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * + **************************************************************************/ +/* + * Authors: + * Thomas Hellstrom + */ + +#ifndef _DRM_MM_H_ +#define _DRM_MM_H_ + +/* + * Generic range manager structs + */ +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_DRM_DEBUG_MM +#include +#endif +#include + +#ifdef CONFIG_DRM_DEBUG_MM +#define DRM_MM_BUG_ON(expr) BUG_ON(expr) +#else +#define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) +#endif + +/** + * enum drm_mm_insert_mode - control search and allocation behaviour + * + * The &struct drm_mm range manager supports finding a suitable modes using + * a number of search trees. These trees are oranised by size, by address and + * in most recent eviction order. This allows the user to find either the + * smallest hole to reuse, the lowest or highest address to reuse, or simply + * reuse the most recent eviction that fits. When allocating the &drm_mm_node + * from within the hole, the &drm_mm_insert_mode also dictate whether to + * allocate the lowest matching address or the highest. + */ +enum drm_mm_insert_mode { + /** + * @DRM_MM_INSERT_BEST: + * + * Search for the smallest hole (within the search range) that fits + * the desired node. + * + * Allocates the node from the bottom of the found hole. + */ + DRM_MM_INSERT_BEST = 0, + + /** + * @DRM_MM_INSERT_LOW: + * + * Search for the lowest hole (address closest to 0, within the search + * range) that fits the desired node. + * + * Allocates the node from the bottom of the found hole. + */ + DRM_MM_INSERT_LOW, + + /** + * @DRM_MM_INSERT_HIGH: + * + * Search for the highest hole (address closest to U64_MAX, within the + * search range) that fits the desired node. + * + * Allocates the node from the *top* of the found hole. The specified + * alignment for the node is applied to the base of the node + * (&drm_mm_node.start). + */ + DRM_MM_INSERT_HIGH, + + /** + * @DRM_MM_INSERT_EVICT: + * + * Search for the most recently evicted hole (within the search range) + * that fits the desired node. This is appropriate for use immediately + * after performing an eviction scan (see drm_mm_scan_init()) and + * removing the selected nodes to form a hole. + * + * Allocates the node from the bottom of the found hole. + */ + DRM_MM_INSERT_EVICT, + + /** + * @DRM_MM_INSERT_ONCE: + * + * Only check the first hole for suitablity and report -ENOSPC + * immediately otherwise, rather than check every hole until a + * suitable one is found. Can only be used in conjunction with another + * search method such as DRM_MM_INSERT_HIGH or DRM_MM_INSERT_LOW. + */ + DRM_MM_INSERT_ONCE = BIT(31), + + /** + * @DRM_MM_INSERT_HIGHEST: + * + * Only check the highest hole (the hole with the largest address) and + * insert the node at the top of the hole or report -ENOSPC if + * unsuitable. + * + * Does not search all holes. + */ + DRM_MM_INSERT_HIGHEST = DRM_MM_INSERT_HIGH | DRM_MM_INSERT_ONCE, + + /** + * @DRM_MM_INSERT_LOWEST: + * + * Only check the lowest hole (the hole with the smallest address) and + * insert the node at the bottom of the hole or report -ENOSPC if + * unsuitable. + * + * Does not search all holes. + */ + DRM_MM_INSERT_LOWEST = DRM_MM_INSERT_LOW | DRM_MM_INSERT_ONCE, +}; + +/** + * struct drm_mm_node - allocated block in the DRM allocator + * + * This represents an allocated block in a &drm_mm allocator. Except for + * pre-reserved nodes inserted using drm_mm_reserve_node() the structure is + * entirely opaque and should only be accessed through the provided funcions. + * Since allocation of these nodes is entirely handled by the driver they can be + * embedded. + */ +struct drm_mm_node { + /** @color: Opaque driver-private tag. */ + unsigned long color; + /** @start: Start address of the allocated block. */ + u64 start; + /** @size: Size of the allocated block. */ + u64 size; + /* private: */ + struct drm_mm *mm; + struct list_head node_list; + struct list_head hole_stack; + struct rb_node rb; + struct rb_node rb_hole_size; + struct rb_node rb_hole_addr; + u64 __subtree_last; + u64 hole_size; + bool allocated : 1; + bool scanned_block : 1; +#ifdef CONFIG_DRM_DEBUG_MM + depot_stack_handle_t stack; +#endif +}; + +/** + * struct drm_mm - DRM allocator + * + * DRM range allocator with a few special functions and features geared towards + * managing GPU memory. Except for the @color_adjust callback the structure is + * entirely opaque and should only be accessed through the provided functions + * and macros. This structure can be embedded into larger driver structures. + */ +struct drm_mm { + /** + * @color_adjust: + * + * Optional driver callback to further apply restrictions on a hole. The + * node argument points at the node containing the hole from which the + * block would be allocated (see drm_mm_hole_follows() and friends). The + * other arguments are the size of the block to be allocated. The driver + * can adjust the start and end as needed to e.g. insert guard pages. + */ + void (*color_adjust)(const struct drm_mm_node *node, + unsigned long color, + u64 *start, u64 *end); + + /* private: */ + /* List of all memory nodes that immediately precede a free hole. */ + struct list_head hole_stack; + /* head_node.node_list is the list of all memory nodes, ordered + * according to the (increasing) start address of the memory node. */ + struct drm_mm_node head_node; + /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */ + struct rb_root_cached interval_tree; + struct rb_root_cached holes_size; + struct rb_root holes_addr; + + unsigned long scan_active; +}; + +/** + * struct drm_mm_scan - DRM allocator eviction roaster data + * + * This structure tracks data needed for the eviction roaster set up using + * drm_mm_scan_init(), and used with drm_mm_scan_add_block() and + * drm_mm_scan_remove_block(). The structure is entirely opaque and should only + * be accessed through the provided functions and macros. It is meant to be + * allocated temporarily by the driver on the stack. + */ +struct drm_mm_scan { + /* private: */ + struct drm_mm *mm; + + u64 size; + u64 alignment; + u64 remainder_mask; + + u64 range_start; + u64 range_end; + + u64 hit_start; + u64 hit_end; + + unsigned long color; + enum drm_mm_insert_mode mode; +}; + +/** + * drm_mm_node_allocated - checks whether a node is allocated + * @node: drm_mm_node to check + * + * Drivers are required to clear a node prior to using it with the + * drm_mm range manager. + * + * Drivers should use this helper for proper encapsulation of drm_mm + * internals. + * + * Returns: + * True if the @node is allocated. + */ +static inline bool drm_mm_node_allocated(const struct drm_mm_node *node) +{ + return node->allocated; +} + +/** + * drm_mm_initialized - checks whether an allocator is initialized + * @mm: drm_mm to check + * + * Drivers should clear the struct drm_mm prior to initialisation if they + * want to use this function. + * + * Drivers should use this helper for proper encapsulation of drm_mm + * internals. + * + * Returns: + * True if the @mm is initialized. + */ +static inline bool drm_mm_initialized(const struct drm_mm *mm) +{ + return mm->hole_stack.next; +} + +/** + * drm_mm_hole_follows - checks whether a hole follows this node + * @node: drm_mm_node to check + * + * Holes are embedded into the drm_mm using the tail of a drm_mm_node. + * If you wish to know whether a hole follows this particular node, + * query this function. See also drm_mm_hole_node_start() and + * drm_mm_hole_node_end(). + * + * Returns: + * True if a hole follows the @node. + */ +static inline bool drm_mm_hole_follows(const struct drm_mm_node *node) +{ + return node->hole_size; +} + +static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node) +{ + return hole_node->start + hole_node->size; +} + +/** + * drm_mm_hole_node_start - computes the start of the hole following @node + * @hole_node: drm_mm_node which implicitly tracks the following hole + * + * This is useful for driver-specific debug dumpers. Otherwise drivers should + * not inspect holes themselves. Drivers must check first whether a hole indeed + * follows by looking at drm_mm_hole_follows() + * + * Returns: + * Start of the subsequent hole. + */ +static inline u64 drm_mm_hole_node_start(const struct drm_mm_node *hole_node) +{ + DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node)); + return __drm_mm_hole_node_start(hole_node); +} + +static inline u64 __drm_mm_hole_node_end(const struct drm_mm_node *hole_node) +{ + return list_next_entry(hole_node, node_list)->start; +} + +/** + * drm_mm_hole_node_end - computes the end of the hole following @node + * @hole_node: drm_mm_node which implicitly tracks the following hole + * + * This is useful for driver-specific debug dumpers. Otherwise drivers should + * not inspect holes themselves. Drivers must check first whether a hole indeed + * follows by looking at drm_mm_hole_follows(). + * + * Returns: + * End of the subsequent hole. + */ +static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node) +{ + return __drm_mm_hole_node_end(hole_node); +} + +/** + * drm_mm_nodes - list of nodes under the drm_mm range manager + * @mm: the struct drm_mm range manger + * + * As the drm_mm range manager hides its node_list deep with its + * structure, extracting it looks painful and repetitive. This is + * not expected to be used outside of the drm_mm_for_each_node() + * macros and similar internal functions. + * + * Returns: + * The node list, may be empty. + */ +#define drm_mm_nodes(mm) (&(mm)->head_node.node_list) + +/** + * drm_mm_for_each_node - iterator to walk over all allocated nodes + * @entry: &struct drm_mm_node to assign to in each iteration step + * @mm: &drm_mm allocator to walk + * + * This iterator walks over all nodes in the range allocator. It is implemented + * with list_for_each(), so not save against removal of elements. + */ +#define drm_mm_for_each_node(entry, mm) \ + list_for_each_entry(entry, drm_mm_nodes(mm), node_list) + +/** + * drm_mm_for_each_node_safe - iterator to walk over all allocated nodes + * @entry: &struct drm_mm_node to assign to in each iteration step + * @next: &struct drm_mm_node to store the next step + * @mm: &drm_mm allocator to walk + * + * This iterator walks over all nodes in the range allocator. It is implemented + * with list_for_each_safe(), so save against removal of elements. + */ +#define drm_mm_for_each_node_safe(entry, next, mm) \ + list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list) + +/** + * drm_mm_for_each_hole - iterator to walk over all holes + * @pos: &drm_mm_node used internally to track progress + * @mm: &drm_mm allocator to walk + * @hole_start: ulong variable to assign the hole start to on each iteration + * @hole_end: ulong variable to assign the hole end to on each iteration + * + * This iterator walks over all holes in the range allocator. It is implemented + * with list_for_each(), so not save against removal of elements. @entry is used + * internally and will not reflect a real drm_mm_node for the very first hole. + * Hence users of this iterator may not access it. + * + * Implementation Note: + * We need to inline list_for_each_entry in order to be able to set hole_start + * and hole_end on each iteration while keeping the macro sane. + */ +#define drm_mm_for_each_hole(pos, mm, hole_start, hole_end) \ + for (pos = list_first_entry(&(mm)->hole_stack, \ + typeof(*pos), hole_stack); \ + &pos->hole_stack != &(mm)->hole_stack ? \ + hole_start = drm_mm_hole_node_start(pos), \ + hole_end = hole_start + pos->hole_size, \ + 1 : 0; \ + pos = list_next_entry(pos, hole_stack)) + +/* + * Basic range manager support (drm_mm.c) + */ +int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node); +int drm_mm_insert_node_in_range(struct drm_mm *mm, + struct drm_mm_node *node, + u64 size, + u64 alignment, + unsigned long color, + u64 start, + u64 end, + enum drm_mm_insert_mode mode); + +/** + * drm_mm_insert_node_generic - search for space and insert @node + * @mm: drm_mm to allocate from + * @node: preallocate node to insert + * @size: size of the allocation + * @alignment: alignment of the allocation + * @color: opaque tag value to use for this node + * @mode: fine-tune the allocation search and placement + * + * This is a simplified version of drm_mm_insert_node_in_range() with no + * range restrictions applied. + * + * The preallocated node must be cleared to 0. + * + * Returns: + * 0 on success, -ENOSPC if there's no suitable hole. + */ +static inline int +drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, + u64 size, u64 alignment, + unsigned long color, + enum drm_mm_insert_mode mode) +{ + return drm_mm_insert_node_in_range(mm, node, + size, alignment, color, + 0, U64_MAX, mode); +} + +/** + * drm_mm_insert_node - search for space and insert @node + * @mm: drm_mm to allocate from + * @node: preallocate node to insert + * @size: size of the allocation + * + * This is a simplified version of drm_mm_insert_node_generic() with @color set + * to 0. + * + * The preallocated node must be cleared to 0. + * + * Returns: + * 0 on success, -ENOSPC if there's no suitable hole. + */ +static inline int drm_mm_insert_node(struct drm_mm *mm, + struct drm_mm_node *node, + u64 size) +{ + return drm_mm_insert_node_generic(mm, node, size, 0, 0, 0); +} + +void drm_mm_remove_node(struct drm_mm_node *node); +void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); +void drm_mm_init(struct drm_mm *mm, u64 start, u64 size); +void drm_mm_takedown(struct drm_mm *mm); + +/** + * drm_mm_clean - checks whether an allocator is clean + * @mm: drm_mm allocator to check + * + * Returns: + * True if the allocator is completely free, false if there's still a node + * allocated in it. + */ +static inline bool drm_mm_clean(const struct drm_mm *mm) +{ + return list_empty(drm_mm_nodes(mm)); +} + +struct drm_mm_node * +__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last); + +/** + * drm_mm_for_each_node_in_range - iterator to walk over a range of + * allocated nodes + * @node__: drm_mm_node structure to assign to in each iteration step + * @mm__: drm_mm allocator to walk + * @start__: starting offset, the first node will overlap this + * @end__: ending offset, the last node will start before this (but may overlap) + * + * This iterator walks over all nodes in the range allocator that lie + * between @start and @end. It is implemented similarly to list_for_each(), + * but using the internal interval tree to accelerate the search for the + * starting node, and so not safe against removal of elements. It assumes + * that @end is within (or is the upper limit of) the drm_mm allocator. + * If [@start, @end] are beyond the range of the drm_mm, the iterator may walk + * over the special _unallocated_ &drm_mm.head_node, and may even continue + * indefinitely. + */ +#define drm_mm_for_each_node_in_range(node__, mm__, start__, end__) \ + for (node__ = __drm_mm_interval_first((mm__), (start__), (end__)-1); \ + node__->start < (end__); \ + node__ = list_next_entry(node__, node_list)) + +void drm_mm_scan_init_with_range(struct drm_mm_scan *scan, + struct drm_mm *mm, + u64 size, u64 alignment, unsigned long color, + u64 start, u64 end, + enum drm_mm_insert_mode mode); + +/** + * drm_mm_scan_init - initialize lru scanning + * @scan: scan state + * @mm: drm_mm to scan + * @size: size of the allocation + * @alignment: alignment of the allocation + * @color: opaque tag value to use for the allocation + * @mode: fine-tune the allocation search and placement + * + * This is a simplified version of drm_mm_scan_init_with_range() with no range + * restrictions applied. + * + * This simply sets up the scanning routines with the parameters for the desired + * hole. + * + * Warning: + * As long as the scan list is non-empty, no other operations than + * adding/removing nodes to/from the scan list are allowed. + */ +static inline void drm_mm_scan_init(struct drm_mm_scan *scan, + struct drm_mm *mm, + u64 size, + u64 alignment, + unsigned long color, + enum drm_mm_insert_mode mode) +{ + drm_mm_scan_init_with_range(scan, mm, + size, alignment, color, + 0, U64_MAX, mode); +} + +bool drm_mm_scan_add_block(struct drm_mm_scan *scan, + struct drm_mm_node *node); +bool drm_mm_scan_remove_block(struct drm_mm_scan *scan, + struct drm_mm_node *node); +struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan); + +void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p); + +#endif diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h new file mode 100644 index 000000000..a0b202e1d --- /dev/null +++ b/include/drm/drm_mode_config.h @@ -0,0 +1,859 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_MODE_CONFIG_H__ +#define __DRM_MODE_CONFIG_H__ + +#include +#include +#include +#include +#include + +#include + +struct drm_file; +struct drm_device; +struct drm_atomic_state; +struct drm_mode_fb_cmd2; +struct drm_format_info; +struct drm_display_mode; + +/** + * struct drm_mode_config_funcs - basic driver provided mode setting functions + * + * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that + * involve drivers. + */ +struct drm_mode_config_funcs { + /** + * @fb_create: + * + * Create a new framebuffer object. The core does basic checks on the + * requested metadata, but most of that is left to the driver. See + * &struct drm_mode_fb_cmd2 for details. + * + * If the parameters are deemed valid and the backing storage objects in + * the underlying memory manager all exist, then the driver allocates + * a new &drm_framebuffer structure, subclassed to contain + * driver-specific information (like the internal native buffer object + * references). It also needs to fill out all relevant metadata, which + * should be done by calling drm_helper_mode_fill_fb_struct(). + * + * The initialization is finalized by calling drm_framebuffer_init(), + * which registers the framebuffer and makes it accessible to other + * threads. + * + * RETURNS: + * + * A new framebuffer with an initial reference count of 1 or a negative + * error code encoded with ERR_PTR(). + */ + struct drm_framebuffer *(*fb_create)(struct drm_device *dev, + struct drm_file *file_priv, + const struct drm_mode_fb_cmd2 *mode_cmd); + + /** + * @get_format_info: + * + * Allows a driver to return custom format information for special + * fb layouts (eg. ones with auxiliary compression control planes). + * + * RETURNS: + * + * The format information specific to the given fb metadata, or + * NULL if none is found. + */ + const struct drm_format_info *(*get_format_info)(const struct drm_mode_fb_cmd2 *mode_cmd); + + /** + * @output_poll_changed: + * + * Callback used by helpers to inform the driver of output configuration + * changes. + * + * Drivers implementing fbdev emulation with the helpers can call + * drm_fb_helper_hotplug_changed from this hook to inform the fbdev + * helper of output changes. + * + * FIXME: + * + * Except that there's no vtable for device-level helper callbacks + * there's no reason this is a core function. + */ + void (*output_poll_changed)(struct drm_device *dev); + + /** + * @mode_valid: + * + * Device specific validation of display modes. Can be used to reject + * modes that can never be supported. Only device wide constraints can + * be checked here. crtc/encoder/bridge/connector specific constraints + * should be checked in the .mode_valid() hook for each specific object. + */ + enum drm_mode_status (*mode_valid)(struct drm_device *dev, + const struct drm_display_mode *mode); + + /** + * @atomic_check: + * + * This is the only hook to validate an atomic modeset update. This + * function must reject any modeset and state changes which the hardware + * or driver doesn't support. This includes but is of course not limited + * to: + * + * - Checking that the modes, framebuffers, scaling and placement + * requirements and so on are within the limits of the hardware. + * + * - Checking that any hidden shared resources are not oversubscribed. + * This can be shared PLLs, shared lanes, overall memory bandwidth, + * display fifo space (where shared between planes or maybe even + * CRTCs). + * + * - Checking that virtualized resources exported to userspace are not + * oversubscribed. For various reasons it can make sense to expose + * more planes, crtcs or encoders than which are physically there. One + * example is dual-pipe operations (which generally should be hidden + * from userspace if when lockstepped in hardware, exposed otherwise), + * where a plane might need 1 hardware plane (if it's just on one + * pipe), 2 hardware planes (when it spans both pipes) or maybe even + * shared a hardware plane with a 2nd plane (if there's a compatible + * plane requested on the area handled by the other pipe). + * + * - Check that any transitional state is possible and that if + * requested, the update can indeed be done in the vblank period + * without temporarily disabling some functions. + * + * - Check any other constraints the driver or hardware might have. + * + * - This callback also needs to correctly fill out the &drm_crtc_state + * in this update to make sure that drm_atomic_crtc_needs_modeset() + * reflects the nature of the possible update and returns true if and + * only if the update cannot be applied without tearing within one + * vblank on that CRTC. The core uses that information to reject + * updates which require a full modeset (i.e. blanking the screen, or + * at least pausing updates for a substantial amount of time) if + * userspace has disallowed that in its request. + * + * - The driver also does not need to repeat basic input validation + * like done for the corresponding legacy entry points. The core does + * that before calling this hook. + * + * See the documentation of @atomic_commit for an exhaustive list of + * error conditions which don't have to be checked at the in this + * callback. + * + * See the documentation for &struct drm_atomic_state for how exactly + * an atomic modeset update is described. + * + * Drivers using the atomic helpers can implement this hook using + * drm_atomic_helper_check(), or one of the exported sub-functions of + * it. + * + * RETURNS: + * + * 0 on success or one of the below negative error codes: + * + * - -EINVAL, if any of the above constraints are violated. + * + * - -EDEADLK, when returned from an attempt to acquire an additional + * &drm_modeset_lock through drm_modeset_lock(). + * + * - -ENOMEM, if allocating additional state sub-structures failed due + * to lack of memory. + * + * - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted. + * This can either be due to a pending signal, or because the driver + * needs to completely bail out to recover from an exceptional + * situation like a GPU hang. From a userspace point all errors are + * treated equally. + */ + int (*atomic_check)(struct drm_device *dev, + struct drm_atomic_state *state); + + /** + * @atomic_commit: + * + * This is the only hook to commit an atomic modeset update. The core + * guarantees that @atomic_check has been called successfully before + * calling this function, and that nothing has been changed in the + * interim. + * + * See the documentation for &struct drm_atomic_state for how exactly + * an atomic modeset update is described. + * + * Drivers using the atomic helpers can implement this hook using + * drm_atomic_helper_commit(), or one of the exported sub-functions of + * it. + * + * Nonblocking commits (as indicated with the nonblock parameter) must + * do any preparatory work which might result in an unsuccessful commit + * in the context of this callback. The only exceptions are hardware + * errors resulting in -EIO. But even in that case the driver must + * ensure that the display pipe is at least running, to avoid + * compositors crashing when pageflips don't work. Anything else, + * specifically committing the update to the hardware, should be done + * without blocking the caller. For updates which do not require a + * modeset this must be guaranteed. + * + * The driver must wait for any pending rendering to the new + * framebuffers to complete before executing the flip. It should also + * wait for any pending rendering from other drivers if the underlying + * buffer is a shared dma-buf. Nonblocking commits must not wait for + * rendering in the context of this callback. + * + * An application can request to be notified when the atomic commit has + * completed. These events are per-CRTC and can be distinguished by the + * CRTC index supplied in &drm_event to userspace. + * + * The drm core will supply a &struct drm_event in each CRTC's + * &drm_crtc_state.event. See the documentation for + * &drm_crtc_state.event for more details about the precise semantics of + * this event. + * + * NOTE: + * + * Drivers are not allowed to shut down any display pipe successfully + * enabled through an atomic commit on their own. Doing so can result in + * compositors crashing if a page flip is suddenly rejected because the + * pipe is off. + * + * RETURNS: + * + * 0 on success or one of the below negative error codes: + * + * - -EBUSY, if a nonblocking updated is requested and there is + * an earlier updated pending. Drivers are allowed to support a queue + * of outstanding updates, but currently no driver supports that. + * Note that drivers must wait for preceding updates to complete if a + * synchronous update is requested, they are not allowed to fail the + * commit in that case. + * + * - -ENOMEM, if the driver failed to allocate memory. Specifically + * this can happen when trying to pin framebuffers, which must only + * be done when committing the state. + * + * - -ENOSPC, as a refinement of the more generic -ENOMEM to indicate + * that the driver has run out of vram, iommu space or similar GPU + * address space needed for framebuffer. + * + * - -EIO, if the hardware completely died. + * + * - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted. + * This can either be due to a pending signal, or because the driver + * needs to completely bail out to recover from an exceptional + * situation like a GPU hang. From a userspace point of view all errors are + * treated equally. + * + * This list is exhaustive. Specifically this hook is not allowed to + * return -EINVAL (any invalid requests should be caught in + * @atomic_check) or -EDEADLK (this function must not acquire + * additional modeset locks). + */ + int (*atomic_commit)(struct drm_device *dev, + struct drm_atomic_state *state, + bool nonblock); + + /** + * @atomic_state_alloc: + * + * This optional hook can be used by drivers that want to subclass struct + * &drm_atomic_state to be able to track their own driver-private global + * state easily. If this hook is implemented, drivers must also + * implement @atomic_state_clear and @atomic_state_free. + * + * Subclassing of &drm_atomic_state is deprecated in favour of using + * &drm_private_state and &drm_private_obj. + * + * RETURNS: + * + * A new &drm_atomic_state on success or NULL on failure. + */ + struct drm_atomic_state *(*atomic_state_alloc)(struct drm_device *dev); + + /** + * @atomic_state_clear: + * + * This hook must clear any driver private state duplicated into the + * passed-in &drm_atomic_state. This hook is called when the caller + * encountered a &drm_modeset_lock deadlock and needs to drop all + * already acquired locks as part of the deadlock avoidance dance + * implemented in drm_modeset_backoff(). + * + * Any duplicated state must be invalidated since a concurrent atomic + * update might change it, and the drm atomic interfaces always apply + * updates as relative changes to the current state. + * + * Drivers that implement this must call drm_atomic_state_default_clear() + * to clear common state. + * + * Subclassing of &drm_atomic_state is deprecated in favour of using + * &drm_private_state and &drm_private_obj. + */ + void (*atomic_state_clear)(struct drm_atomic_state *state); + + /** + * @atomic_state_free: + * + * This hook needs driver private resources and the &drm_atomic_state + * itself. Note that the core first calls drm_atomic_state_clear() to + * avoid code duplicate between the clear and free hooks. + * + * Drivers that implement this must call + * drm_atomic_state_default_release() to release common resources. + * + * Subclassing of &drm_atomic_state is deprecated in favour of using + * &drm_private_state and &drm_private_obj. + */ + void (*atomic_state_free)(struct drm_atomic_state *state); +}; + +/** + * struct drm_mode_config - Mode configuration control structure + * @min_width: minimum fb pixel width on this device + * @min_height: minimum fb pixel height on this device + * @max_width: maximum fb pixel width on this device + * @max_height: maximum fb pixel height on this device + * @funcs: core driver provided mode setting functions + * @fb_base: base address of the framebuffer + * @poll_enabled: track polling support for this device + * @poll_running: track polling status for this device + * @delayed_event: track delayed poll uevent deliver for this device + * @output_poll_work: delayed work for polling in process context + * @preferred_depth: preferred RBG pixel depth, used by fb helpers + * @prefer_shadow: hint to userspace to prefer shadow-fb rendering + * @cursor_width: hint to userspace for max cursor width + * @cursor_height: hint to userspace for max cursor height + * @helper_private: mid-layer private data + * + * Core mode resource tracking structure. All CRTC, encoders, and connectors + * enumerated by the driver are added here, as are global properties. Some + * global restrictions are also here, e.g. dimension restrictions. + */ +struct drm_mode_config { + /** + * @mutex: + * + * This is the big scary modeset BKL which protects everything that + * isn't protect otherwise. Scope is unclear and fuzzy, try to remove + * anything from under it's protection and move it into more well-scoped + * locks. + * + * The one important thing this protects is the use of @acquire_ctx. + */ + struct mutex mutex; + + /** + * @connection_mutex: + * + * This protects connector state and the connector to encoder to CRTC + * routing chain. + * + * For atomic drivers specifically this protects &drm_connector.state. + */ + struct drm_modeset_lock connection_mutex; + + /** + * @acquire_ctx: + * + * Global implicit acquire context used by atomic drivers for legacy + * IOCTLs. Deprecated, since implicit locking contexts make it + * impossible to use driver-private &struct drm_modeset_lock. Users of + * this must hold @mutex. + */ + struct drm_modeset_acquire_ctx *acquire_ctx; + + /** + * @idr_mutex: + * + * Mutex for KMS ID allocation and management. Protects both @crtc_idr + * and @tile_idr. + */ + struct mutex idr_mutex; + + /** + * @crtc_idr: + * + * Main KMS ID tracking object. Use this idr for all IDs, fb, crtc, + * connector, modes - just makes life easier to have only one. + */ + struct idr crtc_idr; + + /** + * @tile_idr: + * + * Use this idr for allocating new IDs for tiled sinks like use in some + * high-res DP MST screens. + */ + struct idr tile_idr; + + /** @fb_lock: Mutex to protect fb the global @fb_list and @num_fb. */ + struct mutex fb_lock; + /** @num_fb: Number of entries on @fb_list. */ + int num_fb; + /** @fb_list: List of all &struct drm_framebuffer. */ + struct list_head fb_list; + + /** + * @connector_list_lock: Protects @num_connector and + * @connector_list and @connector_free_list. + */ + spinlock_t connector_list_lock; + /** + * @num_connector: Number of connectors on this device. Protected by + * @connector_list_lock. + */ + int num_connector; + /** + * @connector_ida: ID allocator for connector indices. + */ + struct ida connector_ida; + /** + * @connector_list: + * + * List of connector objects linked with &drm_connector.head. Protected + * by @connector_list_lock. Only use drm_for_each_connector_iter() and + * &struct drm_connector_list_iter to walk this list. + */ + struct list_head connector_list; + /** + * @connector_free_list: + * + * List of connector objects linked with &drm_connector.free_head. + * Protected by @connector_list_lock. Used by + * drm_for_each_connector_iter() and + * &struct drm_connector_list_iter to savely free connectors using + * @connector_free_work. + */ + struct llist_head connector_free_list; + /** + * @connector_free_work: Work to clean up @connector_free_list. + */ + struct work_struct connector_free_work; + + /** + * @num_encoder: + * + * Number of encoders on this device. This is invariant over the + * lifetime of a device and hence doesn't need any locks. + */ + int num_encoder; + /** + * @encoder_list: + * + * List of encoder objects linked with &drm_encoder.head. This is + * invariant over the lifetime of a device and hence doesn't need any + * locks. + */ + struct list_head encoder_list; + + /** + * @num_total_plane: + * + * Number of universal (i.e. with primary/curso) planes on this device. + * This is invariant over the lifetime of a device and hence doesn't + * need any locks. + */ + int num_total_plane; + /** + * @plane_list: + * + * List of plane objects linked with &drm_plane.head. This is invariant + * over the lifetime of a device and hence doesn't need any locks. + */ + struct list_head plane_list; + + /** + * @num_crtc: + * + * Number of CRTCs on this device linked with &drm_crtc.head. This is invariant over the lifetime + * of a device and hence doesn't need any locks. + */ + int num_crtc; + /** + * @crtc_list: + * + * List of CRTC objects linked with &drm_crtc.head. This is invariant + * over the lifetime of a device and hence doesn't need any locks. + */ + struct list_head crtc_list; + + /** + * @property_list: + * + * List of property type objects linked with &drm_property.head. This is + * invariant over the lifetime of a device and hence doesn't need any + * locks. + */ + struct list_head property_list; + + int min_width, min_height; + int max_width, max_height; + const struct drm_mode_config_funcs *funcs; + resource_size_t fb_base; + + /* output poll support */ + bool poll_enabled; + bool poll_running; + bool delayed_event; + struct delayed_work output_poll_work; + + /** + * @blob_lock: + * + * Mutex for blob property allocation and management, protects + * @property_blob_list and &drm_file.blobs. + */ + struct mutex blob_lock; + + /** + * @property_blob_list: + * + * List of all the blob property objects linked with + * &drm_property_blob.head. Protected by @blob_lock. + */ + struct list_head property_blob_list; + + /* pointers to standard properties */ + + /** + * @edid_property: Default connector property to hold the EDID of the + * currently connected sink, if any. + */ + struct drm_property *edid_property; + /** + * @dpms_property: Default connector property to control the + * connector's DPMS state. + */ + struct drm_property *dpms_property; + /** + * @path_property: Default connector property to hold the DP MST path + * for the port. + */ + struct drm_property *path_property; + /** + * @tile_property: Default connector property to store the tile + * position of a tiled screen, for sinks which need to be driven with + * multiple CRTCs. + */ + struct drm_property *tile_property; + /** + * @link_status_property: Default connector property for link status + * of a connector + */ + struct drm_property *link_status_property; + /** + * @plane_type_property: Default plane property to differentiate + * CURSOR, PRIMARY and OVERLAY legacy uses of planes. + */ + struct drm_property *plane_type_property; + /** + * @prop_src_x: Default atomic plane property for the plane source + * position in the connected &drm_framebuffer. + */ + struct drm_property *prop_src_x; + /** + * @prop_src_y: Default atomic plane property for the plane source + * position in the connected &drm_framebuffer. + */ + struct drm_property *prop_src_y; + /** + * @prop_src_w: Default atomic plane property for the plane source + * position in the connected &drm_framebuffer. + */ + struct drm_property *prop_src_w; + /** + * @prop_src_h: Default atomic plane property for the plane source + * position in the connected &drm_framebuffer. + */ + struct drm_property *prop_src_h; + /** + * @prop_crtc_x: Default atomic plane property for the plane destination + * position in the &drm_crtc is is being shown on. + */ + struct drm_property *prop_crtc_x; + /** + * @prop_crtc_y: Default atomic plane property for the plane destination + * position in the &drm_crtc is is being shown on. + */ + struct drm_property *prop_crtc_y; + /** + * @prop_crtc_w: Default atomic plane property for the plane destination + * position in the &drm_crtc is is being shown on. + */ + struct drm_property *prop_crtc_w; + /** + * @prop_crtc_h: Default atomic plane property for the plane destination + * position in the &drm_crtc is is being shown on. + */ + struct drm_property *prop_crtc_h; + /** + * @prop_fb_id: Default atomic plane property to specify the + * &drm_framebuffer. + */ + struct drm_property *prop_fb_id; + /** + * @prop_in_fence_fd: Sync File fd representing the incoming fences + * for a Plane. + */ + struct drm_property *prop_in_fence_fd; + /** + * @prop_out_fence_ptr: Sync File fd pointer representing the + * outgoing fences for a CRTC. Userspace should provide a pointer to a + * value of type s32, and then cast that pointer to u64. + */ + struct drm_property *prop_out_fence_ptr; + /** + * @prop_crtc_id: Default atomic plane property to specify the + * &drm_crtc. + */ + struct drm_property *prop_crtc_id; + /** + * @prop_active: Default atomic CRTC property to control the active + * state, which is the simplified implementation for DPMS in atomic + * drivers. + */ + struct drm_property *prop_active; + /** + * @prop_mode_id: Default atomic CRTC property to set the mode for a + * CRTC. A 0 mode implies that the CRTC is entirely disabled - all + * connectors must be of and active must be set to disabled, too. + */ + struct drm_property *prop_mode_id; + + /** + * @dvi_i_subconnector_property: Optional DVI-I property to + * differentiate between analog or digital mode. + */ + struct drm_property *dvi_i_subconnector_property; + /** + * @dvi_i_select_subconnector_property: Optional DVI-I property to + * select between analog or digital mode. + */ + struct drm_property *dvi_i_select_subconnector_property; + + /** + * @tv_subconnector_property: Optional TV property to differentiate + * between different TV connector types. + */ + struct drm_property *tv_subconnector_property; + /** + * @tv_select_subconnector_property: Optional TV property to select + * between different TV connector types. + */ + struct drm_property *tv_select_subconnector_property; + /** + * @tv_mode_property: Optional TV property to select + * the output TV mode. + */ + struct drm_property *tv_mode_property; + /** + * @tv_left_margin_property: Optional TV property to set the left + * margin. + */ + struct drm_property *tv_left_margin_property; + /** + * @tv_right_margin_property: Optional TV property to set the right + * margin. + */ + struct drm_property *tv_right_margin_property; + /** + * @tv_top_margin_property: Optional TV property to set the right + * margin. + */ + struct drm_property *tv_top_margin_property; + /** + * @tv_bottom_margin_property: Optional TV property to set the right + * margin. + */ + struct drm_property *tv_bottom_margin_property; + /** + * @tv_brightness_property: Optional TV property to set the + * brightness. + */ + struct drm_property *tv_brightness_property; + /** + * @tv_contrast_property: Optional TV property to set the + * contrast. + */ + struct drm_property *tv_contrast_property; + /** + * @tv_flicker_reduction_property: Optional TV property to control the + * flicker reduction mode. + */ + struct drm_property *tv_flicker_reduction_property; + /** + * @tv_overscan_property: Optional TV property to control the overscan + * setting. + */ + struct drm_property *tv_overscan_property; + /** + * @tv_saturation_property: Optional TV property to set the + * saturation. + */ + struct drm_property *tv_saturation_property; + /** + * @tv_hue_property: Optional TV property to set the hue. + */ + struct drm_property *tv_hue_property; + + /** + * @scaling_mode_property: Optional connector property to control the + * upscaling, mostly used for built-in panels. + */ + struct drm_property *scaling_mode_property; + /** + * @aspect_ratio_property: Optional connector property to control the + * HDMI infoframe aspect ratio setting. + */ + struct drm_property *aspect_ratio_property; + /** + * @content_type_property: Optional connector property to control the + * HDMI infoframe content type setting. + */ + struct drm_property *content_type_property; + /** + * @degamma_lut_property: Optional CRTC property to set the LUT used to + * convert the framebuffer's colors to linear gamma. + */ + struct drm_property *degamma_lut_property; + /** + * @degamma_lut_size_property: Optional CRTC property for the size of + * the degamma LUT as supported by the driver (read-only). + */ + struct drm_property *degamma_lut_size_property; + /** + * @ctm_property: Optional CRTC property to set the + * matrix used to convert colors after the lookup in the + * degamma LUT. + */ + struct drm_property *ctm_property; + /** + * @gamma_lut_property: Optional CRTC property to set the LUT used to + * convert the colors, after the CTM matrix, to the gamma space of the + * connected screen. + */ + struct drm_property *gamma_lut_property; + /** + * @gamma_lut_size_property: Optional CRTC property for the size of the + * gamma LUT as supported by the driver (read-only). + */ + struct drm_property *gamma_lut_size_property; + + /** + * @suggested_x_property: Optional connector property with a hint for + * the position of the output on the host's screen. + */ + struct drm_property *suggested_x_property; + /** + * @suggested_y_property: Optional connector property with a hint for + * the position of the output on the host's screen. + */ + struct drm_property *suggested_y_property; + + /** + * @non_desktop_property: Optional connector property with a hint + * that device isn't a standard display, and the console/desktop, + * should not be displayed on it. + */ + struct drm_property *non_desktop_property; + + /** + * @panel_orientation_property: Optional connector property indicating + * how the lcd-panel is mounted inside the casing (e.g. normal or + * upside-down). + */ + struct drm_property *panel_orientation_property; + + /** + * @writeback_fb_id_property: Property for writeback connectors, storing + * the ID of the output framebuffer. + * See also: drm_writeback_connector_init() + */ + struct drm_property *writeback_fb_id_property; + + /** + * @writeback_pixel_formats_property: Property for writeback connectors, + * storing an array of the supported pixel formats for the writeback + * engine (read-only). + * See also: drm_writeback_connector_init() + */ + struct drm_property *writeback_pixel_formats_property; + /** + * @writeback_out_fence_ptr_property: Property for writeback connectors, + * fd pointer representing the outgoing fences for a writeback + * connector. Userspace should provide a pointer to a value of type s32, + * and then cast that pointer to u64. + * See also: drm_writeback_connector_init() + */ + struct drm_property *writeback_out_fence_ptr_property; + + /* dumb ioctl parameters */ + uint32_t preferred_depth, prefer_shadow; + + /** + * @async_page_flip: Does this device support async flips on the primary + * plane? + */ + bool async_page_flip; + + /** + * @allow_fb_modifiers: + * + * Whether the driver supports fb modifiers in the ADDFB2.1 ioctl call. + */ + bool allow_fb_modifiers; + + /** + * @normalize_zpos: + * + * If true the drm core will call drm_atomic_normalize_zpos() as part of + * atomic mode checking from drm_atomic_helper_check() + */ + bool normalize_zpos; + + /** + * @modifiers_property: Plane property to list support modifier/format + * combination. + */ + struct drm_property *modifiers_property; + + /* cursor size */ + uint32_t cursor_width, cursor_height; + + /** + * @suspend_state: + * + * Atomic state when suspended. + * Set by drm_mode_config_helper_suspend() and cleared by + * drm_mode_config_helper_resume(). + */ + struct drm_atomic_state *suspend_state; + + const struct drm_mode_config_helper_funcs *helper_private; +}; + +void drm_mode_config_init(struct drm_device *dev); +void drm_mode_config_reset(struct drm_device *dev); +void drm_mode_config_cleanup(struct drm_device *dev); + +#endif diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h new file mode 100644 index 000000000..c34a3e803 --- /dev/null +++ b/include/drm/drm_mode_object.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_MODESET_H__ +#define __DRM_MODESET_H__ + +#include +#include +struct drm_object_properties; +struct drm_property; +struct drm_device; +struct drm_file; + +/** + * struct drm_mode_object - base structure for modeset objects + * @id: userspace visible identifier + * @type: type of the object, one of DRM_MODE_OBJECT\_\* + * @properties: properties attached to this object, including values + * @refcount: reference count for objects which with dynamic lifetime + * @free_cb: free function callback, only set for objects with dynamic lifetime + * + * Base structure for modeset objects visible to userspace. Objects can be + * looked up using drm_mode_object_find(). Besides basic uapi interface + * properties like @id and @type it provides two services: + * + * - It tracks attached properties and their values. This is used by &drm_crtc, + * &drm_plane and &drm_connector. Properties are attached by calling + * drm_object_attach_property() before the object is visible to userspace. + * + * - For objects with dynamic lifetimes (as indicated by a non-NULL @free_cb) it + * provides reference counting through drm_mode_object_get() and + * drm_mode_object_put(). This is used by &drm_framebuffer, &drm_connector + * and &drm_property_blob. These objects provide specialized reference + * counting wrappers. + */ +struct drm_mode_object { + uint32_t id; + uint32_t type; + struct drm_object_properties *properties; + struct kref refcount; + void (*free_cb)(struct kref *kref); +}; + +#define DRM_OBJECT_MAX_PROPERTY 24 +/** + * struct drm_object_properties - property tracking for &drm_mode_object + */ +struct drm_object_properties { + /** + * @count: number of valid properties, must be less than or equal to + * DRM_OBJECT_MAX_PROPERTY. + */ + + int count; + /** + * @properties: Array of pointers to &drm_property. + * + * NOTE: if we ever start dynamically destroying properties (ie. + * not at drm_mode_config_cleanup() time), then we'd have to do + * a better job of detaching property from mode objects to avoid + * dangling property pointers: + */ + struct drm_property *properties[DRM_OBJECT_MAX_PROPERTY]; + + /** + * @values: Array to store the property values, matching @properties. Do + * not read/write values directly, but use + * drm_object_property_get_value() and drm_object_property_set_value(). + * + * Note that atomic drivers do not store mutable properties in this + * array, but only the decoded values in the corresponding state + * structure. The decoding is done using the &drm_crtc.atomic_get_property and + * &drm_crtc.atomic_set_property hooks for &struct drm_crtc. For + * &struct drm_plane the hooks are &drm_plane_funcs.atomic_get_property and + * &drm_plane_funcs.atomic_set_property. And for &struct drm_connector + * the hooks are &drm_connector_funcs.atomic_get_property and + * &drm_connector_funcs.atomic_set_property . + * + * Hence atomic drivers should not use drm_object_property_set_value() + * and drm_object_property_get_value() on mutable objects, i.e. those + * without the DRM_MODE_PROP_IMMUTABLE flag set. + */ + uint64_t values[DRM_OBJECT_MAX_PROPERTY]; +}; + +/* Avoid boilerplate. I'm tired of typing. */ +#define DRM_ENUM_NAME_FN(fnname, list) \ + const char *fnname(int val) \ + { \ + int i; \ + for (i = 0; i < ARRAY_SIZE(list); i++) { \ + if (list[i].type == val) \ + return list[i].name; \ + } \ + return "(unknown)"; \ + } + +struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t id, uint32_t type); +void drm_mode_object_get(struct drm_mode_object *obj); +void drm_mode_object_put(struct drm_mode_object *obj); + +int drm_object_property_set_value(struct drm_mode_object *obj, + struct drm_property *property, + uint64_t val); +int drm_object_property_get_value(struct drm_mode_object *obj, + struct drm_property *property, + uint64_t *value); + +void drm_object_attach_property(struct drm_mode_object *obj, + struct drm_property *property, + uint64_t init_val); + +bool drm_mode_object_lease_required(uint32_t type); +#endif diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h new file mode 100644 index 000000000..baded6514 --- /dev/null +++ b/include/drm/drm_modes.h @@ -0,0 +1,544 @@ +/* + * Copyright © 2006 Keith Packard + * Copyright © 2007-2008 Dave Airlie + * Copyright © 2007-2008 Intel Corporation + * Jesse Barnes + * Copyright © 2014 Intel Corporation + * Daniel Vetter + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef __DRM_MODES_H__ +#define __DRM_MODES_H__ + +#include + +#include +#include + +struct videomode; + +/* + * Note on terminology: here, for brevity and convenience, we refer to connector + * control chips as 'CRTCs'. They can control any type of connector, VGA, LVDS, + * DVI, etc. And 'screen' refers to the whole of the visible display, which + * may span multiple monitors (and therefore multiple CRTC and connector + * structures). + */ + +/** + * enum drm_mode_status - hardware support status of a mode + * @MODE_OK: Mode OK + * @MODE_HSYNC: hsync out of range + * @MODE_VSYNC: vsync out of range + * @MODE_H_ILLEGAL: mode has illegal horizontal timings + * @MODE_V_ILLEGAL: mode has illegal horizontal timings + * @MODE_BAD_WIDTH: requires an unsupported linepitch + * @MODE_NOMODE: no mode with a matching name + * @MODE_NO_INTERLACE: interlaced mode not supported + * @MODE_NO_DBLESCAN: doublescan mode not supported + * @MODE_NO_VSCAN: multiscan mode not supported + * @MODE_MEM: insufficient video memory + * @MODE_VIRTUAL_X: mode width too large for specified virtual size + * @MODE_VIRTUAL_Y: mode height too large for specified virtual size + * @MODE_MEM_VIRT: insufficient video memory given virtual size + * @MODE_NOCLOCK: no fixed clock available + * @MODE_CLOCK_HIGH: clock required is too high + * @MODE_CLOCK_LOW: clock required is too low + * @MODE_CLOCK_RANGE: clock/mode isn't in a ClockRange + * @MODE_BAD_HVALUE: horizontal timing was out of range + * @MODE_BAD_VVALUE: vertical timing was out of range + * @MODE_BAD_VSCAN: VScan value out of range + * @MODE_HSYNC_NARROW: horizontal sync too narrow + * @MODE_HSYNC_WIDE: horizontal sync too wide + * @MODE_HBLANK_NARROW: horizontal blanking too narrow + * @MODE_HBLANK_WIDE: horizontal blanking too wide + * @MODE_VSYNC_NARROW: vertical sync too narrow + * @MODE_VSYNC_WIDE: vertical sync too wide + * @MODE_VBLANK_NARROW: vertical blanking too narrow + * @MODE_VBLANK_WIDE: vertical blanking too wide + * @MODE_PANEL: exceeds panel dimensions + * @MODE_INTERLACE_WIDTH: width too large for interlaced mode + * @MODE_ONE_WIDTH: only one width is supported + * @MODE_ONE_HEIGHT: only one height is supported + * @MODE_ONE_SIZE: only one resolution is supported + * @MODE_NO_REDUCED: monitor doesn't accept reduced blanking + * @MODE_NO_STEREO: stereo modes not supported + * @MODE_NO_420: ycbcr 420 modes not supported + * @MODE_STALE: mode has become stale + * @MODE_BAD: unspecified reason + * @MODE_ERROR: error condition + * + * This enum is used to filter out modes not supported by the driver/hardware + * combination. + */ +enum drm_mode_status { + MODE_OK = 0, + MODE_HSYNC, + MODE_VSYNC, + MODE_H_ILLEGAL, + MODE_V_ILLEGAL, + MODE_BAD_WIDTH, + MODE_NOMODE, + MODE_NO_INTERLACE, + MODE_NO_DBLESCAN, + MODE_NO_VSCAN, + MODE_MEM, + MODE_VIRTUAL_X, + MODE_VIRTUAL_Y, + MODE_MEM_VIRT, + MODE_NOCLOCK, + MODE_CLOCK_HIGH, + MODE_CLOCK_LOW, + MODE_CLOCK_RANGE, + MODE_BAD_HVALUE, + MODE_BAD_VVALUE, + MODE_BAD_VSCAN, + MODE_HSYNC_NARROW, + MODE_HSYNC_WIDE, + MODE_HBLANK_NARROW, + MODE_HBLANK_WIDE, + MODE_VSYNC_NARROW, + MODE_VSYNC_WIDE, + MODE_VBLANK_NARROW, + MODE_VBLANK_WIDE, + MODE_PANEL, + MODE_INTERLACE_WIDTH, + MODE_ONE_WIDTH, + MODE_ONE_HEIGHT, + MODE_ONE_SIZE, + MODE_NO_REDUCED, + MODE_NO_STEREO, + MODE_NO_420, + MODE_STALE = -3, + MODE_BAD = -2, + MODE_ERROR = -1 +}; + +#define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \ + .name = nm, .status = 0, .type = (t), .clock = (c), \ + .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \ + .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \ + .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \ + .vscan = (vs), .flags = (f), \ + .base.type = DRM_MODE_OBJECT_MODE + +#define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */ +#define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */ +#define CRTC_NO_DBLSCAN (1 << 2) /* don't adjust doublescan */ +#define CRTC_NO_VSCAN (1 << 3) /* don't adjust doublescan */ +#define CRTC_STEREO_DOUBLE_ONLY (CRTC_STEREO_DOUBLE | CRTC_NO_DBLSCAN | CRTC_NO_VSCAN) + +#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF + +#define DRM_MODE_MATCH_TIMINGS (1 << 0) +#define DRM_MODE_MATCH_CLOCK (1 << 1) +#define DRM_MODE_MATCH_FLAGS (1 << 2) +#define DRM_MODE_MATCH_3D_FLAGS (1 << 3) +#define DRM_MODE_MATCH_ASPECT_RATIO (1 << 4) + +/** + * struct drm_display_mode - DRM kernel-internal display mode structure + * @hdisplay: horizontal display size + * @hsync_start: horizontal sync start + * @hsync_end: horizontal sync end + * @htotal: horizontal total size + * @hskew: horizontal skew?! + * @vdisplay: vertical display size + * @vsync_start: vertical sync start + * @vsync_end: vertical sync end + * @vtotal: vertical total size + * @vscan: vertical scan?! + * @crtc_hdisplay: hardware mode horizontal display size + * @crtc_hblank_start: hardware mode horizontal blank start + * @crtc_hblank_end: hardware mode horizontal blank end + * @crtc_hsync_start: hardware mode horizontal sync start + * @crtc_hsync_end: hardware mode horizontal sync end + * @crtc_htotal: hardware mode horizontal total size + * @crtc_hskew: hardware mode horizontal skew?! + * @crtc_vdisplay: hardware mode vertical display size + * @crtc_vblank_start: hardware mode vertical blank start + * @crtc_vblank_end: hardware mode vertical blank end + * @crtc_vsync_start: hardware mode vertical sync start + * @crtc_vsync_end: hardware mode vertical sync end + * @crtc_vtotal: hardware mode vertical total size + * + * The horizontal and vertical timings are defined per the following diagram. + * + * :: + * + * + * Active Front Sync Back + * Region Porch Porch + * <-----------------------><----------------><-------------><--------------> + * //////////////////////| + * ////////////////////// | + * ////////////////////// |.................. ................ + * _______________ + * <----- [hv]display -----> + * <------------- [hv]sync_start ------------> + * <--------------------- [hv]sync_end ---------------------> + * <-------------------------------- [hv]total ----------------------------->* + * + * This structure contains two copies of timings. First are the plain timings, + * which specify the logical mode, as it would be for a progressive 1:1 scanout + * at the refresh rate userspace can observe through vblank timestamps. Then + * there's the hardware timings, which are corrected for interlacing, + * double-clocking and similar things. They are provided as a convenience, and + * can be appropriately computed using drm_mode_set_crtcinfo(). + * + * For printing you can use %DRM_MODE_FMT and DRM_MODE_ARG(). + */ +struct drm_display_mode { + /** + * @head: + * + * struct list_head for mode lists. + */ + struct list_head head; + + /** + * @base: + * + * A display mode is a normal modeset object, possibly including public + * userspace id. + * + * FIXME: + * + * This can probably be removed since the entire concept of userspace + * managing modes explicitly has never landed in upstream kernel mode + * setting support. + */ + struct drm_mode_object base; + + /** + * @name: + * + * Human-readable name of the mode, filled out with drm_mode_set_name(). + */ + char name[DRM_DISPLAY_MODE_LEN]; + + /** + * @status: + * + * Status of the mode, used to filter out modes not supported by the + * hardware. See enum &drm_mode_status. + */ + enum drm_mode_status status; + + /** + * @type: + * + * A bitmask of flags, mostly about the source of a mode. Possible flags + * are: + * + * - DRM_MODE_TYPE_PREFERRED: Preferred mode, usually the native + * resolution of an LCD panel. There should only be one preferred + * mode per connector at any given time. + * - DRM_MODE_TYPE_DRIVER: Mode created by the driver, which is all of + * them really. Drivers must set this bit for all modes they create + * and expose to userspace. + * - DRM_MODE_TYPE_USERDEF: Mode defined via kernel command line + * + * Plus a big list of flags which shouldn't be used at all, but are + * still around since these flags are also used in the userspace ABI. + * We no longer accept modes with these types though: + * + * - DRM_MODE_TYPE_BUILTIN: Meant for hard-coded modes, unused. + * Use DRM_MODE_TYPE_DRIVER instead. + * - DRM_MODE_TYPE_DEFAULT: Again a leftover, use + * DRM_MODE_TYPE_PREFERRED instead. + * - DRM_MODE_TYPE_CLOCK_C and DRM_MODE_TYPE_CRTC_C: Define leftovers + * which are stuck around for hysterical raisins only. No one has an + * idea what they were meant for. Don't use. + */ + unsigned int type; + + /** + * @clock: + * + * Pixel clock in kHz. + */ + int clock; /* in kHz */ + int hdisplay; + int hsync_start; + int hsync_end; + int htotal; + int hskew; + int vdisplay; + int vsync_start; + int vsync_end; + int vtotal; + int vscan; + /** + * @flags: + * + * Sync and timing flags: + * + * - DRM_MODE_FLAG_PHSYNC: horizontal sync is active high. + * - DRM_MODE_FLAG_NHSYNC: horizontal sync is active low. + * - DRM_MODE_FLAG_PVSYNC: vertical sync is active high. + * - DRM_MODE_FLAG_NVSYNC: vertical sync is active low. + * - DRM_MODE_FLAG_INTERLACE: mode is interlaced. + * - DRM_MODE_FLAG_DBLSCAN: mode uses doublescan. + * - DRM_MODE_FLAG_CSYNC: mode uses composite sync. + * - DRM_MODE_FLAG_PCSYNC: composite sync is active high. + * - DRM_MODE_FLAG_NCSYNC: composite sync is active low. + * - DRM_MODE_FLAG_HSKEW: hskew provided (not used?). + * - DRM_MODE_FLAG_BCAST: + * - DRM_MODE_FLAG_PIXMUX: + * - DRM_MODE_FLAG_DBLCLK: double-clocked mode. + * - DRM_MODE_FLAG_CLKDIV2: half-clocked mode. + * + * Additionally there's flags to specify how 3D modes are packed: + * + * - DRM_MODE_FLAG_3D_NONE: normal, non-3D mode. + * - DRM_MODE_FLAG_3D_FRAME_PACKING: 2 full frames for left and right. + * - DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE: interleaved like fields. + * - DRM_MODE_FLAG_3D_LINE_ALTERNATIVE: interleaved lines. + * - DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL: side-by-side full frames. + * - DRM_MODE_FLAG_3D_L_DEPTH: ? + * - DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH: ? + * - DRM_MODE_FLAG_3D_TOP_AND_BOTTOM: frame split into top and bottom + * parts. + * - DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF: frame split into left and + * right parts. + */ + unsigned int flags; + + /** + * @width_mm: + * + * Addressable size of the output in mm, projectors should set this to + * 0. + */ + int width_mm; + + /** + * @height_mm: + * + * Addressable size of the output in mm, projectors should set this to + * 0. + */ + int height_mm; + + /** + * @crtc_clock: + * + * Actual pixel or dot clock in the hardware. This differs from the + * logical @clock when e.g. using interlacing, double-clocking, stereo + * modes or other fancy stuff that changes the timings and signals + * actually sent over the wire. + * + * This is again in kHz. + * + * Note that with digital outputs like HDMI or DP there's usually a + * massive confusion between the dot clock and the signal clock at the + * bit encoding level. Especially when a 8b/10b encoding is used and the + * difference is exactly a factor of 10. + */ + int crtc_clock; + int crtc_hdisplay; + int crtc_hblank_start; + int crtc_hblank_end; + int crtc_hsync_start; + int crtc_hsync_end; + int crtc_htotal; + int crtc_hskew; + int crtc_vdisplay; + int crtc_vblank_start; + int crtc_vblank_end; + int crtc_vsync_start; + int crtc_vsync_end; + int crtc_vtotal; + + /** + * @private: + * + * Pointer for driver private data. This can only be used for mode + * objects passed to drivers in modeset operations. It shouldn't be used + * by atomic drivers since they can store any additional data by + * subclassing state structures. + */ + int *private; + + /** + * @private_flags: + * + * Similar to @private, but just an integer. + */ + int private_flags; + + /** + * @vrefresh: + * + * Vertical refresh rate, for debug output in human readable form. Not + * used in a functional way. + * + * This value is in Hz. + */ + int vrefresh; + + /** + * @hsync: + * + * Horizontal refresh rate, for debug output in human readable form. Not + * used in a functional way. + * + * This value is in kHz. + */ + int hsync; + + /** + * @picture_aspect_ratio: + * + * Field for setting the HDMI picture aspect ratio of a mode. + */ + enum hdmi_picture_aspect picture_aspect_ratio; + + /** + * @export_head: + * + * struct list_head for modes to be exposed to the userspace. + * This is to maintain a list of exposed modes while preparing + * user-mode's list in drm_mode_getconnector ioctl. The purpose of this + * list_head only lies in the ioctl function, and is not expected to be + * used outside the function. + * Once used, the stale pointers are not reset, but left as it is, to + * avoid overhead of protecting it by mode_config.mutex. + */ + struct list_head export_head; +}; + +/** + * DRM_MODE_FMT - printf string for &struct drm_display_mode + */ +#define DRM_MODE_FMT "%d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x" + +/** + * DRM_MODE_ARG - printf arguments for &struct drm_display_mode + * @m: display mode + */ +#define DRM_MODE_ARG(m) \ + (m)->base.id, (m)->name, (m)->vrefresh, (m)->clock, \ + (m)->hdisplay, (m)->hsync_start, (m)->hsync_end, (m)->htotal, \ + (m)->vdisplay, (m)->vsync_start, (m)->vsync_end, (m)->vtotal, \ + (m)->type, (m)->flags + +#define obj_to_mode(x) container_of(x, struct drm_display_mode, base) + +/** + * drm_mode_is_stereo - check for stereo mode flags + * @mode: drm_display_mode to check + * + * Returns: + * True if the mode is one of the stereo modes (like side-by-side), false if + * not. + */ +static inline bool drm_mode_is_stereo(const struct drm_display_mode *mode) +{ + return mode->flags & DRM_MODE_FLAG_3D_MASK; +} + +struct drm_connector; +struct drm_cmdline_mode; + +struct drm_display_mode *drm_mode_create(struct drm_device *dev); +void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode); +void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out, + const struct drm_display_mode *in); +int drm_mode_convert_umode(struct drm_device *dev, + struct drm_display_mode *out, + const struct drm_mode_modeinfo *in); +void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode); +void drm_mode_debug_printmodeline(const struct drm_display_mode *mode); +bool drm_mode_is_420_only(const struct drm_display_info *display, + const struct drm_display_mode *mode); +bool drm_mode_is_420_also(const struct drm_display_info *display, + const struct drm_display_mode *mode); +bool drm_mode_is_420(const struct drm_display_info *display, + const struct drm_display_mode *mode); + +struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, + int hdisplay, int vdisplay, int vrefresh, + bool reduced, bool interlaced, + bool margins); +struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, + int hdisplay, int vdisplay, int vrefresh, + bool interlaced, int margins); +struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev, + int hdisplay, int vdisplay, + int vrefresh, bool interlaced, + int margins, + int GTF_M, int GTF_2C, + int GTF_K, int GTF_2J); +void drm_display_mode_from_videomode(const struct videomode *vm, + struct drm_display_mode *dmode); +void drm_display_mode_to_videomode(const struct drm_display_mode *dmode, + struct videomode *vm); +void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags); +int of_get_drm_display_mode(struct device_node *np, + struct drm_display_mode *dmode, u32 *bus_flags, + int index); + +void drm_mode_set_name(struct drm_display_mode *mode); +int drm_mode_hsync(const struct drm_display_mode *mode); +int drm_mode_vrefresh(const struct drm_display_mode *mode); +void drm_mode_get_hv_timing(const struct drm_display_mode *mode, + int *hdisplay, int *vdisplay); + +void drm_mode_set_crtcinfo(struct drm_display_mode *p, + int adjust_flags); +void drm_mode_copy(struct drm_display_mode *dst, + const struct drm_display_mode *src); +struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, + const struct drm_display_mode *mode); +bool drm_mode_match(const struct drm_display_mode *mode1, + const struct drm_display_mode *mode2, + unsigned int match_flags); +bool drm_mode_equal(const struct drm_display_mode *mode1, + const struct drm_display_mode *mode2); +bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, + const struct drm_display_mode *mode2); +bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, + const struct drm_display_mode *mode2); + +/* for use by the crtc helper probe functions */ +enum drm_mode_status drm_mode_validate_driver(struct drm_device *dev, + const struct drm_display_mode *mode); +enum drm_mode_status drm_mode_validate_size(const struct drm_display_mode *mode, + int maxX, int maxY); +enum drm_mode_status +drm_mode_validate_ycbcr420(const struct drm_display_mode *mode, + struct drm_connector *connector); +void drm_mode_prune_invalid(struct drm_device *dev, + struct list_head *mode_list, bool verbose); +void drm_mode_sort(struct list_head *mode_list); +void drm_connector_list_update(struct drm_connector *connector); + +/* parsing cmdline modes */ +bool +drm_mode_parse_command_line_for_connector(const char *mode_option, + struct drm_connector *connector, + struct drm_cmdline_mode *mode); +struct drm_display_mode * +drm_mode_create_from_cmdline_mode(struct drm_device *dev, + struct drm_cmdline_mode *cmd); + +#endif /* __DRM_MODES_H__ */ diff --git a/include/drm/drm_modeset_helper.h b/include/drm/drm_modeset_helper.h new file mode 100644 index 000000000..efa337f03 --- /dev/null +++ b/include/drm/drm_modeset_helper.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_KMS_HELPER_H__ +#define __DRM_KMS_HELPER_H__ + +#include + +void drm_helper_move_panel_connectors_to_head(struct drm_device *); + +void drm_helper_mode_fill_fb_struct(struct drm_device *dev, + struct drm_framebuffer *fb, + const struct drm_mode_fb_cmd2 *mode_cmd); + +int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, + const struct drm_crtc_funcs *funcs); + +int drm_mode_config_helper_suspend(struct drm_device *dev); +int drm_mode_config_helper_resume(struct drm_device *dev); + +#endif diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h new file mode 100644 index 000000000..0eb3372d0 --- /dev/null +++ b/include/drm/drm_modeset_helper_vtables.h @@ -0,0 +1,1252 @@ +/* + * Copyright © 2006 Keith Packard + * Copyright © 2007-2008 Dave Airlie + * Copyright © 2007-2008 Intel Corporation + * Jesse Barnes + * Copyright © 2011-2013 Intel Corporation + * Copyright © 2015 Intel Corporation + * Daniel Vetter + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __DRM_MODESET_HELPER_VTABLES_H__ +#define __DRM_MODESET_HELPER_VTABLES_H__ + +#include +#include + +/** + * DOC: overview + * + * The DRM mode setting helper functions are common code for drivers to use if + * they wish. Drivers are not forced to use this code in their + * implementations but it would be useful if the code they do use at least + * provides a consistent interface and operation to userspace. Therefore it is + * highly recommended to use the provided helpers as much as possible. + * + * Because there is only one pointer per modeset object to hold a vfunc table + * for helper libraries they are by necessity shared among the different + * helpers. + * + * To make this clear all the helper vtables are pulled together in this location here. + */ + +enum mode_set_atomic; + +/** + * struct drm_crtc_helper_funcs - helper operations for CRTCs + * + * These hooks are used by the legacy CRTC helpers, the transitional plane + * helpers and the new atomic modesetting helpers. + */ +struct drm_crtc_helper_funcs { + /** + * @dpms: + * + * Callback to control power levels on the CRTC. If the mode passed in + * is unsupported, the provider must use the next lowest power level. + * This is used by the legacy CRTC helpers to implement DPMS + * functionality in drm_helper_connector_dpms(). + * + * This callback is also used to disable a CRTC by calling it with + * DRM_MODE_DPMS_OFF if the @disable hook isn't used. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for enabling and disabling a CRTC to + * facilitate transitions to atomic, but it is deprecated. Instead + * @atomic_enable and @atomic_disable should be used. + */ + void (*dpms)(struct drm_crtc *crtc, int mode); + + /** + * @prepare: + * + * This callback should prepare the CRTC for a subsequent modeset, which + * in practice means the driver should disable the CRTC if it is + * running. Most drivers ended up implementing this by calling their + * @dpms hook with DRM_MODE_DPMS_OFF. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for disabling a CRTC to facilitate + * transitions to atomic, but it is deprecated. Instead @atomic_disable + * should be used. + */ + void (*prepare)(struct drm_crtc *crtc); + + /** + * @commit: + * + * This callback should commit the new mode on the CRTC after a modeset, + * which in practice means the driver should enable the CRTC. Most + * drivers ended up implementing this by calling their @dpms hook with + * DRM_MODE_DPMS_ON. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for enabling a CRTC to facilitate + * transitions to atomic, but it is deprecated. Instead @atomic_enable + * should be used. + */ + void (*commit)(struct drm_crtc *crtc); + + /** + * @mode_valid: + * + * This callback is used to check if a specific mode is valid in this + * crtc. This should be implemented if the crtc has some sort of + * restriction in the modes it can display. For example, a given crtc + * may be responsible to set a clock value. If the clock can not + * produce all the values for the available modes then this callback + * can be used to restrict the number of modes to only the ones that + * can be displayed. + * + * This hook is used by the probe helpers to filter the mode list in + * drm_helper_probe_single_connector_modes(), and it is used by the + * atomic helpers to validate modes supplied by userspace in + * drm_atomic_helper_check_modeset(). + * + * This function is optional. + * + * NOTE: + * + * Since this function is both called from the check phase of an atomic + * commit, and the mode validation in the probe paths it is not allowed + * to look at anything else but the passed-in mode, and validate it + * against configuration-invariant hardward constraints. Any further + * limits which depend upon the configuration can only be checked in + * @mode_fixup or @atomic_check. + * + * RETURNS: + * + * drm_mode_status Enum + */ + enum drm_mode_status (*mode_valid)(struct drm_crtc *crtc, + const struct drm_display_mode *mode); + + /** + * @mode_fixup: + * + * This callback is used to validate a mode. The parameter mode is the + * display mode that userspace requested, adjusted_mode is the mode the + * encoders need to be fed with. Note that this is the inverse semantics + * of the meaning for the &drm_encoder and &drm_bridge_funcs.mode_fixup + * vfunc. If the CRTC cannot support the requested conversion from mode + * to adjusted_mode it should reject the modeset. See also + * &drm_crtc_state.adjusted_mode for more details. + * + * This function is used by both legacy CRTC helpers and atomic helpers. + * With atomic helpers it is optional. + * + * NOTE: + * + * This function is called in the check phase of atomic modesets, which + * can be aborted for any reason (including on userspace's request to + * just check whether a configuration would be possible). Atomic drivers + * MUST NOT touch any persistent state (hardware or software) or data + * structures except the passed in adjusted_mode parameter. + * + * This is in contrast to the legacy CRTC helpers where this was + * allowed. + * + * Atomic drivers which need to inspect and adjust more state should + * instead use the @atomic_check callback, but note that they're not + * perfectly equivalent: @mode_valid is called from + * drm_atomic_helper_check_modeset(), but @atomic_check is called from + * drm_atomic_helper_check_planes(), because originally it was meant for + * plane update checks only. + * + * Also beware that userspace can request its own custom modes, neither + * core nor helpers filter modes to the list of probe modes reported by + * the GETCONNECTOR IOCTL and stored in &drm_connector.modes. To ensure + * that modes are filtered consistently put any CRTC constraints and + * limits checks into @mode_valid. + * + * RETURNS: + * + * True if an acceptable configuration is possible, false if the modeset + * operation should be rejected. + */ + bool (*mode_fixup)(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + /** + * @mode_set: + * + * This callback is used by the legacy CRTC helpers to set a new mode, + * position and framebuffer. Since it ties the primary plane to every + * mode change it is incompatible with universal plane support. And + * since it can't update other planes it's incompatible with atomic + * modeset support. + * + * This callback is only used by CRTC helpers and deprecated. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, int x, int y, + struct drm_framebuffer *old_fb); + + /** + * @mode_set_nofb: + * + * This callback is used to update the display mode of a CRTC without + * changing anything of the primary plane configuration. This fits the + * requirement of atomic and hence is used by the atomic helpers. It is + * also used by the transitional plane helpers to implement a + * @mode_set hook in drm_helper_crtc_mode_set(). + * + * Note that the display pipe is completely off when this function is + * called. Atomic drivers which need hardware to be running before they + * program the new display mode (e.g. because they implement runtime PM) + * should not use this hook. This is because the helper library calls + * this hook only once per mode change and not every time the display + * pipeline is suspended using either DPMS or the new "ACTIVE" property. + * Which means register values set in this callback might get reset when + * the CRTC is suspended, but not restored. Such drivers should instead + * move all their CRTC setup into the @atomic_enable callback. + * + * This callback is optional. + */ + void (*mode_set_nofb)(struct drm_crtc *crtc); + + /** + * @mode_set_base: + * + * This callback is used by the legacy CRTC helpers to set a new + * framebuffer and scanout position. It is optional and used as an + * optimized fast-path instead of a full mode set operation with all the + * resulting flickering. If it is not present + * drm_crtc_helper_set_config() will fall back to a full modeset, using + * the @mode_set callback. Since it can't update other planes it's + * incompatible with atomic modeset support. + * + * This callback is only used by the CRTC helpers and deprecated. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb); + + /** + * @mode_set_base_atomic: + * + * This callback is used by the fbdev helpers to set a new framebuffer + * and scanout without sleeping, i.e. from an atomic calling context. It + * is only used to implement kgdb support. + * + * This callback is optional and only needed for kgdb support in the fbdev + * helpers. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*mode_set_base_atomic)(struct drm_crtc *crtc, + struct drm_framebuffer *fb, int x, int y, + enum mode_set_atomic); + + /** + * @disable: + * + * This callback should be used to disable the CRTC. With the atomic + * drivers it is called after all encoders connected to this CRTC have + * been shut off already using their own + * &drm_encoder_helper_funcs.disable hook. If that sequence is too + * simple drivers can just add their own hooks and call it from this + * CRTC callback here by looping over all encoders connected to it using + * for_each_encoder_on_crtc(). + * + * This hook is used both by legacy CRTC helpers and atomic helpers. + * Atomic drivers don't need to implement it if there's no need to + * disable anything at the CRTC level. To ensure that runtime PM + * handling (using either DPMS or the new "ACTIVE" property) works + * @disable must be the inverse of @atomic_enable for atomic drivers. + * Atomic drivers should consider to use @atomic_disable instead of + * this one. + * + * NOTE: + * + * With legacy CRTC helpers there's a big semantic difference between + * @disable and other hooks (like @prepare or @dpms) used to shut down a + * CRTC: @disable is only called when also logically disabling the + * display pipeline and needs to release any resources acquired in + * @mode_set (like shared PLLs, or again release pinned framebuffers). + * + * Therefore @disable must be the inverse of @mode_set plus @commit for + * drivers still using legacy CRTC helpers, which is different from the + * rules under atomic. + */ + void (*disable)(struct drm_crtc *crtc); + + /** + * @atomic_check: + * + * Drivers should check plane-update related CRTC constraints in this + * hook. They can also check mode related limitations but need to be + * aware of the calling order, since this hook is used by + * drm_atomic_helper_check_planes() whereas the preparations needed to + * check output routing and the display mode is done in + * drm_atomic_helper_check_modeset(). Therefore drivers that want to + * check output routing and display mode constraints in this callback + * must ensure that drm_atomic_helper_check_modeset() has been called + * beforehand. This is calling order used by the default helper + * implementation in drm_atomic_helper_check(). + * + * When using drm_atomic_helper_check_planes() this hook is called + * after the &drm_plane_helper_funcs.atomic_check hook for planes, which + * allows drivers to assign shared resources requested by planes in this + * callback here. For more complicated dependencies the driver can call + * the provided check helpers multiple times until the computed state + * has a final configuration and everything has been checked. + * + * This function is also allowed to inspect any other object's state and + * can add more state objects to the atomic commit if needed. Care must + * be taken though to ensure that state check and compute functions for + * these added states are all called, and derived state in other objects + * all updated. Again the recommendation is to just call check helpers + * until a maximal configuration is reached. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + * + * NOTE: + * + * This function is called in the check phase of an atomic update. The + * driver is not allowed to change anything outside of the free-standing + * state objects passed-in or assembled in the overall &drm_atomic_state + * update tracking structure. + * + * Also beware that userspace can request its own custom modes, neither + * core nor helpers filter modes to the list of probe modes reported by + * the GETCONNECTOR IOCTL and stored in &drm_connector.modes. To ensure + * that modes are filtered consistently put any CRTC constraints and + * limits checks into @mode_valid. + * + * RETURNS: + * + * 0 on success, -EINVAL if the state or the transition can't be + * supported, -ENOMEM on memory allocation failure and -EDEADLK if an + * attempt to obtain another state object ran into a &drm_modeset_lock + * deadlock. + */ + int (*atomic_check)(struct drm_crtc *crtc, + struct drm_crtc_state *state); + + /** + * @atomic_begin: + * + * Drivers should prepare for an atomic update of multiple planes on + * a CRTC in this hook. Depending upon hardware this might be vblank + * evasion, blocking updates by setting bits or doing preparatory work + * for e.g. manual update display. + * + * This hook is called before any plane commit functions are called. + * + * Note that the power state of the display pipe when this function is + * called depends upon the exact helpers and calling sequence the driver + * has picked. See drm_atomic_helper_commit_planes() for a discussion of + * the tradeoffs and variants of plane commit helpers. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + */ + void (*atomic_begin)(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state); + /** + * @atomic_flush: + * + * Drivers should finalize an atomic update of multiple planes on + * a CRTC in this hook. Depending upon hardware this might include + * checking that vblank evasion was successful, unblocking updates by + * setting bits or setting the GO bit to flush out all updates. + * + * Simple hardware or hardware with special requirements can commit and + * flush out all updates for all planes from this hook and forgo all the + * other commit hooks for plane updates. + * + * This hook is called after any plane commit functions are called. + * + * Note that the power state of the display pipe when this function is + * called depends upon the exact helpers and calling sequence the driver + * has picked. See drm_atomic_helper_commit_planes() for a discussion of + * the tradeoffs and variants of plane commit helpers. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + */ + void (*atomic_flush)(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state); + + /** + * @atomic_enable: + * + * This callback should be used to enable the CRTC. With the atomic + * drivers it is called before all encoders connected to this CRTC are + * enabled through the encoder's own &drm_encoder_helper_funcs.enable + * hook. If that sequence is too simple drivers can just add their own + * hooks and call it from this CRTC callback here by looping over all + * encoders connected to it using for_each_encoder_on_crtc(). + * + * This hook is used only by atomic helpers, for symmetry with + * @atomic_disable. Atomic drivers don't need to implement it if there's + * no need to enable anything at the CRTC level. To ensure that runtime + * PM handling (using either DPMS or the new "ACTIVE" property) works + * @atomic_enable must be the inverse of @atomic_disable for atomic + * drivers. + * + * Drivers can use the @old_crtc_state input parameter if the operations + * needed to enable the CRTC don't depend solely on the new state but + * also on the transition between the old state and the new state. + */ + void (*atomic_enable)(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state); + + /** + * @atomic_disable: + * + * This callback should be used to disable the CRTC. With the atomic + * drivers it is called after all encoders connected to this CRTC have + * been shut off already using their own + * &drm_encoder_helper_funcs.disable hook. If that sequence is too + * simple drivers can just add their own hooks and call it from this + * CRTC callback here by looping over all encoders connected to it using + * for_each_encoder_on_crtc(). + * + * This hook is used only by atomic helpers. Atomic drivers don't + * need to implement it if there's no need to disable anything at the + * CRTC level. + * + * Comparing to @disable, this one provides the additional input + * parameter @old_crtc_state which could be used to access the old + * state. Atomic drivers should consider to use this one instead + * of @disable. + */ + void (*atomic_disable)(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state); +}; + +/** + * drm_crtc_helper_add - sets the helper vtable for a crtc + * @crtc: DRM CRTC + * @funcs: helper vtable to set for @crtc + */ +static inline void drm_crtc_helper_add(struct drm_crtc *crtc, + const struct drm_crtc_helper_funcs *funcs) +{ + crtc->helper_private = funcs; +} + +/** + * struct drm_encoder_helper_funcs - helper operations for encoders + * + * These hooks are used by the legacy CRTC helpers, the transitional plane + * helpers and the new atomic modesetting helpers. + */ +struct drm_encoder_helper_funcs { + /** + * @dpms: + * + * Callback to control power levels on the encoder. If the mode passed in + * is unsupported, the provider must use the next lowest power level. + * This is used by the legacy encoder helpers to implement DPMS + * functionality in drm_helper_connector_dpms(). + * + * This callback is also used to disable an encoder by calling it with + * DRM_MODE_DPMS_OFF if the @disable hook isn't used. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for enabling and disabling an encoder to + * facilitate transitions to atomic, but it is deprecated. Instead + * @enable and @disable should be used. + */ + void (*dpms)(struct drm_encoder *encoder, int mode); + + /** + * @mode_valid: + * + * This callback is used to check if a specific mode is valid in this + * encoder. This should be implemented if the encoder has some sort + * of restriction in the modes it can display. For example, a given + * encoder may be responsible to set a clock value. If the clock can + * not produce all the values for the available modes then this callback + * can be used to restrict the number of modes to only the ones that + * can be displayed. + * + * This hook is used by the probe helpers to filter the mode list in + * drm_helper_probe_single_connector_modes(), and it is used by the + * atomic helpers to validate modes supplied by userspace in + * drm_atomic_helper_check_modeset(). + * + * This function is optional. + * + * NOTE: + * + * Since this function is both called from the check phase of an atomic + * commit, and the mode validation in the probe paths it is not allowed + * to look at anything else but the passed-in mode, and validate it + * against configuration-invariant hardward constraints. Any further + * limits which depend upon the configuration can only be checked in + * @mode_fixup or @atomic_check. + * + * RETURNS: + * + * drm_mode_status Enum + */ + enum drm_mode_status (*mode_valid)(struct drm_encoder *crtc, + const struct drm_display_mode *mode); + + /** + * @mode_fixup: + * + * This callback is used to validate and adjust a mode. The parameter + * mode is the display mode that should be fed to the next element in + * the display chain, either the final &drm_connector or a &drm_bridge. + * The parameter adjusted_mode is the input mode the encoder requires. It + * can be modified by this callback and does not need to match mode. See + * also &drm_crtc_state.adjusted_mode for more details. + * + * This function is used by both legacy CRTC helpers and atomic helpers. + * This hook is optional. + * + * NOTE: + * + * This function is called in the check phase of atomic modesets, which + * can be aborted for any reason (including on userspace's request to + * just check whether a configuration would be possible). Atomic drivers + * MUST NOT touch any persistent state (hardware or software) or data + * structures except the passed in adjusted_mode parameter. + * + * This is in contrast to the legacy CRTC helpers where this was + * allowed. + * + * Atomic drivers which need to inspect and adjust more state should + * instead use the @atomic_check callback. If @atomic_check is used, + * this hook isn't called since @atomic_check allows a strict superset + * of the functionality of @mode_fixup. + * + * Also beware that userspace can request its own custom modes, neither + * core nor helpers filter modes to the list of probe modes reported by + * the GETCONNECTOR IOCTL and stored in &drm_connector.modes. To ensure + * that modes are filtered consistently put any encoder constraints and + * limits checks into @mode_valid. + * + * RETURNS: + * + * True if an acceptable configuration is possible, false if the modeset + * operation should be rejected. + */ + bool (*mode_fixup)(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + /** + * @prepare: + * + * This callback should prepare the encoder for a subsequent modeset, + * which in practice means the driver should disable the encoder if it + * is running. Most drivers ended up implementing this by calling their + * @dpms hook with DRM_MODE_DPMS_OFF. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for disabling an encoder to facilitate + * transitions to atomic, but it is deprecated. Instead @disable should + * be used. + */ + void (*prepare)(struct drm_encoder *encoder); + + /** + * @commit: + * + * This callback should commit the new mode on the encoder after a modeset, + * which in practice means the driver should enable the encoder. Most + * drivers ended up implementing this by calling their @dpms hook with + * DRM_MODE_DPMS_ON. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for enabling an encoder to facilitate + * transitions to atomic, but it is deprecated. Instead @enable should + * be used. + */ + void (*commit)(struct drm_encoder *encoder); + + /** + * @mode_set: + * + * This callback is used to update the display mode of an encoder. + * + * Note that the display pipe is completely off when this function is + * called. Drivers which need hardware to be running before they program + * the new display mode (because they implement runtime PM) should not + * use this hook, because the helper library calls it only once and not + * every time the display pipeline is suspend using either DPMS or the + * new "ACTIVE" property. Such drivers should instead move all their + * encoder setup into the @enable callback. + * + * This callback is used both by the legacy CRTC helpers and the atomic + * modeset helpers. It is optional in the atomic helpers. + * + * NOTE: + * + * If the driver uses the atomic modeset helpers and needs to inspect + * the connector state or connector display info during mode setting, + * @atomic_mode_set can be used instead. + */ + void (*mode_set)(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + /** + * @atomic_mode_set: + * + * This callback is used to update the display mode of an encoder. + * + * Note that the display pipe is completely off when this function is + * called. Drivers which need hardware to be running before they program + * the new display mode (because they implement runtime PM) should not + * use this hook, because the helper library calls it only once and not + * every time the display pipeline is suspended using either DPMS or the + * new "ACTIVE" property. Such drivers should instead move all their + * encoder setup into the @enable callback. + * + * This callback is used by the atomic modeset helpers in place of the + * @mode_set callback, if set by the driver. It is optional and should + * be used instead of @mode_set if the driver needs to inspect the + * connector state or display info, since there is no direct way to + * go from the encoder to the current connector. + */ + void (*atomic_mode_set)(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state); + + /** + * @get_crtc: + * + * This callback is used by the legacy CRTC helpers to work around + * deficiencies in its own book-keeping. + * + * Do not use, use atomic helpers instead, which get the book keeping + * right. + * + * FIXME: + * + * Currently only nouveau is using this, and as soon as nouveau is + * atomic we can ditch this hook. + */ + struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder); + + /** + * @detect: + * + * This callback can be used by drivers who want to do detection on the + * encoder object instead of in connector functions. + * + * It is not used by any helper and therefore has purely driver-specific + * semantics. New drivers shouldn't use this and instead just implement + * their own private callbacks. + * + * FIXME: + * + * This should just be converted into a pile of driver vfuncs. + * Currently radeon, amdgpu and nouveau are using it. + */ + enum drm_connector_status (*detect)(struct drm_encoder *encoder, + struct drm_connector *connector); + + /** + * @disable: + * + * This callback should be used to disable the encoder. With the atomic + * drivers it is called before this encoder's CRTC has been shut off + * using their own &drm_crtc_helper_funcs.disable hook. If that + * sequence is too simple drivers can just add their own driver private + * encoder hooks and call them from CRTC's callback by looping over all + * encoders connected to it using for_each_encoder_on_crtc(). + * + * This hook is used both by legacy CRTC helpers and atomic helpers. + * Atomic drivers don't need to implement it if there's no need to + * disable anything at the encoder level. To ensure that runtime PM + * handling (using either DPMS or the new "ACTIVE" property) works + * @disable must be the inverse of @enable for atomic drivers. + * + * NOTE: + * + * With legacy CRTC helpers there's a big semantic difference between + * @disable and other hooks (like @prepare or @dpms) used to shut down a + * encoder: @disable is only called when also logically disabling the + * display pipeline and needs to release any resources acquired in + * @mode_set (like shared PLLs, or again release pinned framebuffers). + * + * Therefore @disable must be the inverse of @mode_set plus @commit for + * drivers still using legacy CRTC helpers, which is different from the + * rules under atomic. + */ + void (*disable)(struct drm_encoder *encoder); + + /** + * @enable: + * + * This callback should be used to enable the encoder. With the atomic + * drivers it is called after this encoder's CRTC has been enabled using + * their own &drm_crtc_helper_funcs.enable hook. If that sequence is + * too simple drivers can just add their own driver private encoder + * hooks and call them from CRTC's callback by looping over all encoders + * connected to it using for_each_encoder_on_crtc(). + * + * This hook is used only by atomic helpers, for symmetry with @disable. + * Atomic drivers don't need to implement it if there's no need to + * enable anything at the encoder level. To ensure that runtime PM handling + * (using either DPMS or the new "ACTIVE" property) works + * @enable must be the inverse of @disable for atomic drivers. + */ + void (*enable)(struct drm_encoder *encoder); + + /** + * @atomic_check: + * + * This callback is used to validate encoder state for atomic drivers. + * Since the encoder is the object connecting the CRTC and connector it + * gets passed both states, to be able to validate interactions and + * update the CRTC to match what the encoder needs for the requested + * connector. + * + * Since this provides a strict superset of the functionality of + * @mode_fixup (the requested and adjusted modes are both available + * through the passed in &struct drm_crtc_state) @mode_fixup is not + * called when @atomic_check is implemented. + * + * This function is used by the atomic helpers, but it is optional. + * + * NOTE: + * + * This function is called in the check phase of an atomic update. The + * driver is not allowed to change anything outside of the free-standing + * state objects passed-in or assembled in the overall &drm_atomic_state + * update tracking structure. + * + * Also beware that userspace can request its own custom modes, neither + * core nor helpers filter modes to the list of probe modes reported by + * the GETCONNECTOR IOCTL and stored in &drm_connector.modes. To ensure + * that modes are filtered consistently put any encoder constraints and + * limits checks into @mode_valid. + * + * RETURNS: + * + * 0 on success, -EINVAL if the state or the transition can't be + * supported, -ENOMEM on memory allocation failure and -EDEADLK if an + * attempt to obtain another state object ran into a &drm_modeset_lock + * deadlock. + */ + int (*atomic_check)(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state); +}; + +/** + * drm_encoder_helper_add - sets the helper vtable for an encoder + * @encoder: DRM encoder + * @funcs: helper vtable to set for @encoder + */ +static inline void drm_encoder_helper_add(struct drm_encoder *encoder, + const struct drm_encoder_helper_funcs *funcs) +{ + encoder->helper_private = funcs; +} + +/** + * struct drm_connector_helper_funcs - helper operations for connectors + * + * These functions are used by the atomic and legacy modeset helpers and by the + * probe helpers. + */ +struct drm_connector_helper_funcs { + /** + * @get_modes: + * + * This function should fill in all modes currently valid for the sink + * into the &drm_connector.probed_modes list. It should also update the + * EDID property by calling drm_connector_update_edid_property(). + * + * The usual way to implement this is to cache the EDID retrieved in the + * probe callback somewhere in the driver-private connector structure. + * In this function drivers then parse the modes in the EDID and add + * them by calling drm_add_edid_modes(). But connectors that driver a + * fixed panel can also manually add specific modes using + * drm_mode_probed_add(). Drivers which manually add modes should also + * make sure that the &drm_connector.display_info, + * &drm_connector.width_mm and &drm_connector.height_mm fields are + * filled in. + * + * Virtual drivers that just want some standard VESA mode with a given + * resolution can call drm_add_modes_noedid(), and mark the preferred + * one using drm_set_preferred_mode(). + * + * This function is only called after the @detect hook has indicated + * that a sink is connected and when the EDID isn't overridden through + * sysfs or the kernel commandline. + * + * This callback is used by the probe helpers in e.g. + * drm_helper_probe_single_connector_modes(). + * + * To avoid races with concurrent connector state updates, the helper + * libraries always call this with the &drm_mode_config.connection_mutex + * held. Because of this it's safe to inspect &drm_connector->state. + * + * RETURNS: + * + * The number of modes added by calling drm_mode_probed_add(). + */ + int (*get_modes)(struct drm_connector *connector); + + /** + * @detect_ctx: + * + * Check to see if anything is attached to the connector. The parameter + * force is set to false whilst polling, true when checking the + * connector due to a user request. force can be used by the driver to + * avoid expensive, destructive operations during automated probing. + * + * This callback is optional, if not implemented the connector will be + * considered as always being attached. + * + * This is the atomic version of &drm_connector_funcs.detect. + * + * To avoid races against concurrent connector state updates, the + * helper libraries always call this with ctx set to a valid context, + * and &drm_mode_config.connection_mutex will always be locked with + * the ctx parameter set to this ctx. This allows taking additional + * locks as required. + * + * RETURNS: + * + * &drm_connector_status indicating the connector's status, + * or the error code returned by drm_modeset_lock(), -EDEADLK. + */ + int (*detect_ctx)(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + bool force); + + /** + * @mode_valid: + * + * Callback to validate a mode for a connector, irrespective of the + * specific display configuration. + * + * This callback is used by the probe helpers to filter the mode list + * (which is usually derived from the EDID data block from the sink). + * See e.g. drm_helper_probe_single_connector_modes(). + * + * This function is optional. + * + * NOTE: + * + * This only filters the mode list supplied to userspace in the + * GETCONNECTOR IOCTL. Compared to &drm_encoder_helper_funcs.mode_valid, + * &drm_crtc_helper_funcs.mode_valid and &drm_bridge_funcs.mode_valid, + * which are also called by the atomic helpers from + * drm_atomic_helper_check_modeset(). This allows userspace to force and + * ignore sink constraint (like the pixel clock limits in the screen's + * EDID), which is useful for e.g. testing, or working around a broken + * EDID. Any source hardware constraint (which always need to be + * enforced) therefore should be checked in one of the above callbacks, + * and not this one here. + * + * To avoid races with concurrent connector state updates, the helper + * libraries always call this with the &drm_mode_config.connection_mutex + * held. Because of this it's safe to inspect &drm_connector->state. + * + * RETURNS: + * + * Either &drm_mode_status.MODE_OK or one of the failure reasons in &enum + * drm_mode_status. + */ + enum drm_mode_status (*mode_valid)(struct drm_connector *connector, + struct drm_display_mode *mode); + /** + * @best_encoder: + * + * This function should select the best encoder for the given connector. + * + * This function is used by both the atomic helpers (in the + * drm_atomic_helper_check_modeset() function) and in the legacy CRTC + * helpers. + * + * NOTE: + * + * In atomic drivers this function is called in the check phase of an + * atomic update. The driver is not allowed to change or inspect + * anything outside of arguments passed-in. Atomic drivers which need to + * inspect dynamic configuration state should instead use + * @atomic_best_encoder. + * + * You can leave this function to NULL if the connector is only + * attached to a single encoder and you are using the atomic helpers. + * In this case, the core will call drm_atomic_helper_best_encoder() + * for you. + * + * RETURNS: + * + * Encoder that should be used for the given connector and connector + * state, or NULL if no suitable encoder exists. Note that the helpers + * will ensure that encoders aren't used twice, drivers should not check + * for this. + */ + struct drm_encoder *(*best_encoder)(struct drm_connector *connector); + + /** + * @atomic_best_encoder: + * + * This is the atomic version of @best_encoder for atomic drivers which + * need to select the best encoder depending upon the desired + * configuration and can't select it statically. + * + * This function is used by drm_atomic_helper_check_modeset(). + * If it is not implemented, the core will fallback to @best_encoder + * (or drm_atomic_helper_best_encoder() if @best_encoder is NULL). + * + * NOTE: + * + * This function is called in the check phase of an atomic update. The + * driver is not allowed to change anything outside of the free-standing + * state objects passed-in or assembled in the overall &drm_atomic_state + * update tracking structure. + * + * RETURNS: + * + * Encoder that should be used for the given connector and connector + * state, or NULL if no suitable encoder exists. Note that the helpers + * will ensure that encoders aren't used twice, drivers should not check + * for this. + */ + struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector, + struct drm_connector_state *connector_state); + + /** + * @atomic_check: + * + * This hook is used to validate connector state. This function is + * called from &drm_atomic_helper_check_modeset, and is called when + * a connector property is set, or a modeset on the crtc is forced. + * + * Because &drm_atomic_helper_check_modeset may be called multiple times, + * this function should handle being called multiple times as well. + * + * This function is also allowed to inspect any other object's state and + * can add more state objects to the atomic commit if needed. Care must + * be taken though to ensure that state check and compute functions for + * these added states are all called, and derived state in other objects + * all updated. Again the recommendation is to just call check helpers + * until a maximal configuration is reached. + * + * NOTE: + * + * This function is called in the check phase of an atomic update. The + * driver is not allowed to change anything outside of the free-standing + * state objects passed-in or assembled in the overall &drm_atomic_state + * update tracking structure. + * + * RETURNS: + * + * 0 on success, -EINVAL if the state or the transition can't be + * supported, -ENOMEM on memory allocation failure and -EDEADLK if an + * attempt to obtain another state object ran into a &drm_modeset_lock + * deadlock. + */ + int (*atomic_check)(struct drm_connector *connector, + struct drm_connector_state *state); + + /** + * @atomic_commit: + * + * This hook is to be used by drivers implementing writeback connectors + * that need a point when to commit the writeback job to the hardware. + * The writeback_job to commit is available in + * &drm_connector_state.writeback_job. + * + * This hook is optional. + * + * This callback is used by the atomic modeset helpers. + */ + void (*atomic_commit)(struct drm_connector *connector, + struct drm_connector_state *state); +}; + +/** + * drm_connector_helper_add - sets the helper vtable for a connector + * @connector: DRM connector + * @funcs: helper vtable to set for @connector + */ +static inline void drm_connector_helper_add(struct drm_connector *connector, + const struct drm_connector_helper_funcs *funcs) +{ + connector->helper_private = funcs; +} + +/** + * struct drm_plane_helper_funcs - helper operations for planes + * + * These functions are used by the atomic helpers and by the transitional plane + * helpers. + */ +struct drm_plane_helper_funcs { + /** + * @prepare_fb: + * + * This hook is to prepare a framebuffer for scanout by e.g. pinning + * it's backing storage or relocating it into a contiguous block of + * VRAM. Other possible preparatory work includes flushing caches. + * + * This function must not block for outstanding rendering, since it is + * called in the context of the atomic IOCTL even for async commits to + * be able to return any errors to userspace. Instead the recommended + * way is to fill out the &drm_plane_state.fence of the passed-in + * &drm_plane_state. If the driver doesn't support native fences then + * equivalent functionality should be implemented through private + * members in the plane structure. + * + * Drivers which always have their buffers pinned should use + * drm_gem_fb_prepare_fb() for this hook. + * + * The helpers will call @cleanup_fb with matching arguments for every + * successful call to this hook. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + * + * RETURNS: + * + * 0 on success or one of the following negative error codes allowed by + * the &drm_mode_config_funcs.atomic_commit vfunc. When using helpers + * this callback is the only one which can fail an atomic commit, + * everything else must complete successfully. + */ + int (*prepare_fb)(struct drm_plane *plane, + struct drm_plane_state *new_state); + /** + * @cleanup_fb: + * + * This hook is called to clean up any resources allocated for the given + * framebuffer and plane configuration in @prepare_fb. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + */ + void (*cleanup_fb)(struct drm_plane *plane, + struct drm_plane_state *old_state); + + /** + * @atomic_check: + * + * Drivers should check plane specific constraints in this hook. + * + * When using drm_atomic_helper_check_planes() plane's @atomic_check + * hooks are called before the ones for CRTCs, which allows drivers to + * request shared resources that the CRTC controls here. For more + * complicated dependencies the driver can call the provided check helpers + * multiple times until the computed state has a final configuration and + * everything has been checked. + * + * This function is also allowed to inspect any other object's state and + * can add more state objects to the atomic commit if needed. Care must + * be taken though to ensure that state check and compute functions for + * these added states are all called, and derived state in other objects + * all updated. Again the recommendation is to just call check helpers + * until a maximal configuration is reached. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + * + * NOTE: + * + * This function is called in the check phase of an atomic update. The + * driver is not allowed to change anything outside of the free-standing + * state objects passed-in or assembled in the overall &drm_atomic_state + * update tracking structure. + * + * RETURNS: + * + * 0 on success, -EINVAL if the state or the transition can't be + * supported, -ENOMEM on memory allocation failure and -EDEADLK if an + * attempt to obtain another state object ran into a &drm_modeset_lock + * deadlock. + */ + int (*atomic_check)(struct drm_plane *plane, + struct drm_plane_state *state); + + /** + * @atomic_update: + * + * Drivers should use this function to update the plane state. This + * hook is called in-between the &drm_crtc_helper_funcs.atomic_begin and + * drm_crtc_helper_funcs.atomic_flush callbacks. + * + * Note that the power state of the display pipe when this function is + * called depends upon the exact helpers and calling sequence the driver + * has picked. See drm_atomic_helper_commit_planes() for a discussion of + * the tradeoffs and variants of plane commit helpers. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + */ + void (*atomic_update)(struct drm_plane *plane, + struct drm_plane_state *old_state); + /** + * @atomic_disable: + * + * Drivers should use this function to unconditionally disable a plane. + * This hook is called in-between the + * &drm_crtc_helper_funcs.atomic_begin and + * drm_crtc_helper_funcs.atomic_flush callbacks. It is an alternative to + * @atomic_update, which will be called for disabling planes, too, if + * the @atomic_disable hook isn't implemented. + * + * This hook is also useful to disable planes in preparation of a modeset, + * by calling drm_atomic_helper_disable_planes_on_crtc() from the + * &drm_crtc_helper_funcs.disable hook. + * + * Note that the power state of the display pipe when this function is + * called depends upon the exact helpers and calling sequence the driver + * has picked. See drm_atomic_helper_commit_planes() for a discussion of + * the tradeoffs and variants of plane commit helpers. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + */ + void (*atomic_disable)(struct drm_plane *plane, + struct drm_plane_state *old_state); + + /** + * @atomic_async_check: + * + * Drivers should set this function pointer to check if the plane state + * can be updated in a async fashion. Here async means "not vblank + * synchronized". + * + * This hook is called by drm_atomic_async_check() to establish if a + * given update can be committed asynchronously, that is, if it can + * jump ahead of the state currently queued for update. + * + * RETURNS: + * + * Return 0 on success and any error returned indicates that the update + * can not be applied in asynchronous manner. + */ + int (*atomic_async_check)(struct drm_plane *plane, + struct drm_plane_state *state); + + /** + * @atomic_async_update: + * + * Drivers should set this function pointer to perform asynchronous + * updates of planes, that is, jump ahead of the currently queued + * state and update the plane. Here async means "not vblank + * synchronized". + * + * This hook is called by drm_atomic_helper_async_commit(). + * + * An async update will happen on legacy cursor updates. An async + * update won't happen if there is an outstanding commit modifying + * the same plane. + * + * Note that unlike &drm_plane_helper_funcs.atomic_update this hook + * takes the new &drm_plane_state as parameter. When doing async_update + * drivers shouldn't replace the &drm_plane_state but update the + * current one with the new plane configurations in the new + * plane_state. + * + * Drivers should also swap the framebuffers between current plane + * state (&drm_plane.state) and new_state. + * This is required since cleanup for async commits is performed on + * the new state, rather than old state like for traditional commits. + * Since we want to give up the reference on the current (old) fb + * instead of our brand new one, swap them in the driver during the + * async commit. + * + * FIXME: + * - It only works for single plane updates + * - Async Pageflips are not supported yet + * - Some hw might still scan out the old buffer until the next + * vblank, however we let go of the fb references as soon as + * we run this hook. For now drivers must implement their own workers + * for deferring if needed, until a common solution is created. + */ + void (*atomic_async_update)(struct drm_plane *plane, + struct drm_plane_state *new_state); +}; + +/** + * drm_plane_helper_add - sets the helper vtable for a plane + * @plane: DRM plane + * @funcs: helper vtable to set for @plane + */ +static inline void drm_plane_helper_add(struct drm_plane *plane, + const struct drm_plane_helper_funcs *funcs) +{ + plane->helper_private = funcs; +} + +/** + * struct drm_mode_config_helper_funcs - global modeset helper operations + * + * These helper functions are used by the atomic helpers. + */ +struct drm_mode_config_helper_funcs { + /** + * @atomic_commit_tail: + * + * This hook is used by the default atomic_commit() hook implemented in + * drm_atomic_helper_commit() together with the nonblocking commit + * helpers (see drm_atomic_helper_setup_commit() for a starting point) + * to implement blocking and nonblocking commits easily. It is not used + * by the atomic helpers + * + * This function is called when the new atomic state has already been + * swapped into the various state pointers. The passed in state + * therefore contains copies of the old/previous state. This hook should + * commit the new state into hardware. Note that the helpers have + * already waited for preceeding atomic commits and fences, but drivers + * can add more waiting calls at the start of their implementation, e.g. + * to wait for driver-internal request for implicit syncing, before + * starting to commit the update to the hardware. + * + * After the atomic update is committed to the hardware this hook needs + * to call drm_atomic_helper_commit_hw_done(). Then wait for the upate + * to be executed by the hardware, for example using + * drm_atomic_helper_wait_for_vblanks() or + * drm_atomic_helper_wait_for_flip_done(), and then clean up the old + * framebuffers using drm_atomic_helper_cleanup_planes(). + * + * When disabling a CRTC this hook _must_ stall for the commit to + * complete. Vblank waits don't work on disabled CRTC, hence the core + * can't take care of this. And it also can't rely on the vblank event, + * since that can be signalled already when the screen shows black, + * which can happen much earlier than the last hardware access needed to + * shut off the display pipeline completely. + * + * This hook is optional, the default implementation is + * drm_atomic_helper_commit_tail(). + */ + void (*atomic_commit_tail)(struct drm_atomic_state *state); +}; + +#endif diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h new file mode 100644 index 000000000..a685d1bb2 --- /dev/null +++ b/include/drm/drm_modeset_lock.h @@ -0,0 +1,133 @@ +/* + * Copyright (C) 2014 Red Hat + * Author: Rob Clark + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef DRM_MODESET_LOCK_H_ +#define DRM_MODESET_LOCK_H_ + +#include + +struct drm_modeset_lock; + +/** + * struct drm_modeset_acquire_ctx - locking context (see ww_acquire_ctx) + * @ww_ctx: base acquire ctx + * @contended: used internally for -EDEADLK handling + * @locked: list of held locks + * @trylock_only: trylock mode used in atomic contexts/panic notifiers + * @interruptible: whether interruptible locking should be used. + * + * Each thread competing for a set of locks must use one acquire + * ctx. And if any lock fxn returns -EDEADLK, it must backoff and + * retry. + */ +struct drm_modeset_acquire_ctx { + + struct ww_acquire_ctx ww_ctx; + + /* + * Contended lock: if a lock is contended you should only call + * drm_modeset_backoff() which drops locks and slow-locks the + * contended lock. + */ + struct drm_modeset_lock *contended; + + /* + * list of held locks (drm_modeset_lock) + */ + struct list_head locked; + + /* + * Trylock mode, use only for panic handlers! + */ + bool trylock_only; + + /* Perform interruptible waits on this context. */ + bool interruptible; +}; + +/** + * struct drm_modeset_lock - used for locking modeset resources. + * @mutex: resource locking + * @head: used to hold it's place on &drm_atomi_state.locked list when + * part of an atomic update + * + * Used for locking CRTCs and other modeset resources. + */ +struct drm_modeset_lock { + /* + * modeset lock + */ + struct ww_mutex mutex; + + /* + * Resources that are locked as part of an atomic update are added + * to a list (so we know what to unlock at the end). + */ + struct list_head head; +}; + +#define DRM_MODESET_ACQUIRE_INTERRUPTIBLE BIT(0) + +void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx, + uint32_t flags); +void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx); +void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx); +int drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx); + +void drm_modeset_lock_init(struct drm_modeset_lock *lock); + +/** + * drm_modeset_lock_fini - cleanup lock + * @lock: lock to cleanup + */ +static inline void drm_modeset_lock_fini(struct drm_modeset_lock *lock) +{ + WARN_ON(!list_empty(&lock->head)); +} + +/** + * drm_modeset_is_locked - equivalent to mutex_is_locked() + * @lock: lock to check + */ +static inline bool drm_modeset_is_locked(struct drm_modeset_lock *lock) +{ + return ww_mutex_is_locked(&lock->mutex); +} + +int drm_modeset_lock(struct drm_modeset_lock *lock, + struct drm_modeset_acquire_ctx *ctx); +int __must_check drm_modeset_lock_single_interruptible(struct drm_modeset_lock *lock); +void drm_modeset_unlock(struct drm_modeset_lock *lock); + +struct drm_device; +struct drm_crtc; +struct drm_plane; + +void drm_modeset_lock_all(struct drm_device *dev); +void drm_modeset_unlock_all(struct drm_device *dev); +void drm_warn_on_modeset_not_all_locked(struct drm_device *dev); + +int drm_modeset_lock_all_ctx(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx); + +#endif /* DRM_MODESET_LOCK_H_ */ diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h new file mode 100644 index 000000000..ead34ab5c --- /dev/null +++ b/include/drm/drm_of.h @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DRM_OF_H__ +#define __DRM_OF_H__ + +#include +#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_PANEL_BRIDGE) +#include +#endif + +struct component_master_ops; +struct component_match; +struct device; +struct drm_device; +struct drm_encoder; +struct drm_panel; +struct drm_bridge; +struct device_node; + +#ifdef CONFIG_OF +uint32_t drm_of_crtc_port_mask(struct drm_device *dev, + struct device_node *port); +uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, + struct device_node *port); +void drm_of_component_match_add(struct device *master, + struct component_match **matchptr, + int (*compare)(struct device *, void *), + struct device_node *node); +int drm_of_component_probe(struct device *dev, + int (*compare_of)(struct device *, void *), + const struct component_master_ops *m_ops); +int drm_of_encoder_active_endpoint(struct device_node *node, + struct drm_encoder *encoder, + struct of_endpoint *endpoint); +int drm_of_find_panel_or_bridge(const struct device_node *np, + int port, int endpoint, + struct drm_panel **panel, + struct drm_bridge **bridge); +#else +static inline uint32_t drm_of_crtc_port_mask(struct drm_device *dev, + struct device_node *port) +{ + return 0; +} + +static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, + struct device_node *port) +{ + return 0; +} + +static inline void +drm_of_component_match_add(struct device *master, + struct component_match **matchptr, + int (*compare)(struct device *, void *), + struct device_node *node) +{ +} + +static inline int +drm_of_component_probe(struct device *dev, + int (*compare_of)(struct device *, void *), + const struct component_master_ops *m_ops) +{ + return -EINVAL; +} + +static inline int drm_of_encoder_active_endpoint(struct device_node *node, + struct drm_encoder *encoder, + struct of_endpoint *endpoint) +{ + return -EINVAL; +} +static inline int drm_of_find_panel_or_bridge(const struct device_node *np, + int port, int endpoint, + struct drm_panel **panel, + struct drm_bridge **bridge) +{ + return -EINVAL; +} +#endif + +/* + * drm_of_panel_bridge_remove - remove panel bridge + * @np: device tree node containing panel bridge output ports + * + * Remove the panel bridge of a given DT node's port and endpoint number + * + * Returns zero if successful, or one of the standard error codes if it fails. + */ +static inline int drm_of_panel_bridge_remove(const struct device_node *np, + int port, int endpoint) +{ +#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_PANEL_BRIDGE) + struct drm_bridge *bridge; + struct device_node *remote; + + remote = of_graph_get_remote_node(np, port, endpoint); + if (!remote) + return -ENODEV; + + bridge = of_drm_find_bridge(remote); + drm_panel_bridge_remove(bridge); + + return 0; +#else + return -EINVAL; +#endif +} + +static inline int drm_of_encoder_active_endpoint_id(struct device_node *node, + struct drm_encoder *encoder) +{ + struct of_endpoint endpoint; + int ret = drm_of_encoder_active_endpoint(node, encoder, + &endpoint); + + return ret ?: endpoint.id; +} + +static inline int drm_of_encoder_active_port_id(struct device_node *node, + struct drm_encoder *encoder) +{ + struct of_endpoint endpoint; + int ret = drm_of_encoder_active_endpoint(node, encoder, + &endpoint); + + return ret ?: endpoint.port; +} + +#endif /* __DRM_OF_H__ */ diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h new file mode 100644 index 000000000..ee8d61b64 --- /dev/null +++ b/include/drm/drm_os_linux.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * \file drm_os_linux.h + * OS abstraction macros. + */ + +#include /* For task queue support */ +#include +#include +#include + +/** Current process ID */ +#define DRM_CURRENTPID task_pid_nr(current) +#define DRM_UDELAY(d) udelay(d) +/** Read a byte from a MMIO region */ +#define DRM_READ8(map, offset) readb(((void __iomem *)(map)->handle) + (offset)) +/** Read a word from a MMIO region */ +#define DRM_READ16(map, offset) readw(((void __iomem *)(map)->handle) + (offset)) +/** Read a dword from a MMIO region */ +#define DRM_READ32(map, offset) readl(((void __iomem *)(map)->handle) + (offset)) +/** Write a byte into a MMIO region */ +#define DRM_WRITE8(map, offset, val) writeb(val, ((void __iomem *)(map)->handle) + (offset)) +/** Write a word into a MMIO region */ +#define DRM_WRITE16(map, offset, val) writew(val, ((void __iomem *)(map)->handle) + (offset)) +/** Write a dword into a MMIO region */ +#define DRM_WRITE32(map, offset, val) writel(val, ((void __iomem *)(map)->handle) + (offset)) + +/** Read a qword from a MMIO region - be careful using these unless you really understand them */ +#define DRM_READ64(map, offset) readq(((void __iomem *)(map)->handle) + (offset)) +/** Write a qword into a MMIO region */ +#define DRM_WRITE64(map, offset, val) writeq(val, ((void __iomem *)(map)->handle) + (offset)) + +#define DRM_WAIT_ON( ret, queue, timeout, condition ) \ +do { \ + DECLARE_WAITQUEUE(entry, current); \ + unsigned long end = jiffies + (timeout); \ + add_wait_queue(&(queue), &entry); \ + \ + for (;;) { \ + __set_current_state(TASK_INTERRUPTIBLE); \ + if (condition) \ + break; \ + if (time_after_eq(jiffies, end)) { \ + ret = -EBUSY; \ + break; \ + } \ + schedule_timeout((HZ/100 > 1) ? HZ/100 : 1); \ + if (signal_pending(current)) { \ + ret = -EINTR; \ + break; \ + } \ + } \ + __set_current_state(TASK_RUNNING); \ + remove_wait_queue(&(queue), &entry); \ +} while (0) diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h new file mode 100644 index 000000000..675aa1e87 --- /dev/null +++ b/include/drm/drm_panel.h @@ -0,0 +1,207 @@ +/* + * Copyright (C) 2013, NVIDIA Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __DRM_PANEL_H__ +#define __DRM_PANEL_H__ + +#include +#include +#include + +struct device_node; +struct drm_connector; +struct drm_device; +struct drm_panel; +struct display_timing; + +/** + * struct drm_panel_funcs - perform operations on a given panel + * @disable: disable panel (turn off back light, etc.) + * @unprepare: turn off panel + * @prepare: turn on panel and perform set up + * @enable: enable panel (turn on back light, etc.) + * @get_modes: add modes to the connector that the panel is attached to and + * return the number of modes added + * @get_timings: copy display timings into the provided array and return + * the number of display timings available + * + * The .prepare() function is typically called before the display controller + * starts to transmit video data. Panel drivers can use this to turn the panel + * on and wait for it to become ready. If additional configuration is required + * (via a control bus such as I2C, SPI or DSI for example) this is a good time + * to do that. + * + * After the display controller has started transmitting video data, it's safe + * to call the .enable() function. This will typically enable the backlight to + * make the image on screen visible. Some panels require a certain amount of + * time or frames before the image is displayed. This function is responsible + * for taking this into account before enabling the backlight to avoid visual + * glitches. + * + * Before stopping video transmission from the display controller it can be + * necessary to turn off the panel to avoid visual glitches. This is done in + * the .disable() function. Analogously to .enable() this typically involves + * turning off the backlight and waiting for some time to make sure no image + * is visible on the panel. It is then safe for the display controller to + * cease transmission of video data. + * + * To save power when no video data is transmitted, a driver can power down + * the panel. This is the job of the .unprepare() function. + */ +struct drm_panel_funcs { + int (*disable)(struct drm_panel *panel); + int (*unprepare)(struct drm_panel *panel); + int (*prepare)(struct drm_panel *panel); + int (*enable)(struct drm_panel *panel); + int (*get_modes)(struct drm_panel *panel); + int (*get_timings)(struct drm_panel *panel, unsigned int num_timings, + struct display_timing *timings); +}; + +/** + * struct drm_panel - DRM panel object + * @drm: DRM device owning the panel + * @connector: DRM connector that the panel is attached to + * @dev: parent device of the panel + * @funcs: operations that can be performed on the panel + * @list: panel entry in registry + */ +struct drm_panel { + struct drm_device *drm; + struct drm_connector *connector; + struct device *dev; + + const struct drm_panel_funcs *funcs; + + struct list_head list; +}; + +/** + * drm_disable_unprepare - power off a panel + * @panel: DRM panel + * + * Calling this function will completely power off a panel (assert the panel's + * reset, turn off power supplies, ...). After this function has completed, it + * is usually no longer possible to communicate with the panel until another + * call to drm_panel_prepare(). + * + * Return: 0 on success or a negative error code on failure. + */ +static inline int drm_panel_unprepare(struct drm_panel *panel) +{ + if (panel && panel->funcs && panel->funcs->unprepare) + return panel->funcs->unprepare(panel); + + return panel ? -ENOSYS : -EINVAL; +} + +/** + * drm_panel_disable - disable a panel + * @panel: DRM panel + * + * This will typically turn off the panel's backlight or disable the display + * drivers. For smart panels it should still be possible to communicate with + * the integrated circuitry via any command bus after this call. + * + * Return: 0 on success or a negative error code on failure. + */ +static inline int drm_panel_disable(struct drm_panel *panel) +{ + if (panel && panel->funcs && panel->funcs->disable) + return panel->funcs->disable(panel); + + return panel ? -ENOSYS : -EINVAL; +} + +/** + * drm_panel_prepare - power on a panel + * @panel: DRM panel + * + * Calling this function will enable power and deassert any reset signals to + * the panel. After this has completed it is possible to communicate with any + * integrated circuitry via a command bus. + * + * Return: 0 on success or a negative error code on failure. + */ +static inline int drm_panel_prepare(struct drm_panel *panel) +{ + if (panel && panel->funcs && panel->funcs->prepare) + return panel->funcs->prepare(panel); + + return panel ? -ENOSYS : -EINVAL; +} + +/** + * drm_panel_enable - enable a panel + * @panel: DRM panel + * + * Calling this function will cause the panel display drivers to be turned on + * and the backlight to be enabled. Content will be visible on screen after + * this call completes. + * + * Return: 0 on success or a negative error code on failure. + */ +static inline int drm_panel_enable(struct drm_panel *panel) +{ + if (panel && panel->funcs && panel->funcs->enable) + return panel->funcs->enable(panel); + + return panel ? -ENOSYS : -EINVAL; +} + +/** + * drm_panel_get_modes - probe the available display modes of a panel + * @panel: DRM panel + * + * The modes probed from the panel are automatically added to the connector + * that the panel is attached to. + * + * Return: The number of modes available from the panel on success or a + * negative error code on failure. + */ +static inline int drm_panel_get_modes(struct drm_panel *panel) +{ + if (panel && panel->funcs && panel->funcs->get_modes) + return panel->funcs->get_modes(panel); + + return panel ? -ENOSYS : -EINVAL; +} + +void drm_panel_init(struct drm_panel *panel); + +int drm_panel_add(struct drm_panel *panel); +void drm_panel_remove(struct drm_panel *panel); + +int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector); +int drm_panel_detach(struct drm_panel *panel); + +#if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL) +struct drm_panel *of_drm_find_panel(const struct device_node *np); +#else +static inline struct drm_panel *of_drm_find_panel(const struct device_node *np) +{ + return ERR_PTR(-ENODEV); +} +#endif + +#endif diff --git a/include/drm/drm_pci.h b/include/drm/drm_pci.h new file mode 100644 index 000000000..8181e9e7c --- /dev/null +++ b/include/drm/drm_pci.h @@ -0,0 +1,61 @@ +/* + * Internal Header for the Direct Rendering Manager + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright (c) 2009-2010, Code Aurora Forum. + * All rights reserved. + * + * Author: Rickard E. (Rik) Faith + * Author: Gareth Hughes + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DRM_PCI_H_ +#define _DRM_PCI_H_ + +#include + +struct drm_dma_handle; +struct drm_device; +struct drm_driver; +struct drm_master; + +struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size, + size_t align); +void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah); + +int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver); +void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver); +#ifdef CONFIG_PCI +int drm_get_pci_dev(struct pci_dev *pdev, + const struct pci_device_id *ent, + struct drm_driver *driver); +#else +static inline int drm_get_pci_dev(struct pci_dev *pdev, + const struct pci_device_id *ent, + struct drm_driver *driver) +{ + return -ENOSYS; +} +#endif + +#endif /* _DRM_PCI_H_ */ diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h new file mode 100644 index 000000000..683742826 --- /dev/null +++ b/include/drm/drm_pciids.h @@ -0,0 +1,814 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#define radeon_PCI_IDS \ + {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ + {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x3155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \ + {0x1002, 0x4137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \ + {0x1002, 0x4144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ + {0x1002, 0x4145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ + {0x1002, 0x4146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ + {0x1002, 0x4147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ + {0x1002, 0x4148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ + {0x1002, 0x4149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ + {0x1002, 0x414A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ + {0x1002, 0x414B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ + {0x1002, 0x4150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ + {0x1002, 0x4151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ + {0x1002, 0x4152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ + {0x1002, 0x4153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ + {0x1002, 0x4154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ + {0x1002, 0x4155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ + {0x1002, 0x4156, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ + {0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \ + {0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ + {0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \ + {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \ + {0x1002, 0x4A48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4A49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4A4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4A4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4A4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4A4D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4A4E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4A4F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4A50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4A54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4B48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4B49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4B4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4B4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4B4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4C5A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ + {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ + {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ + {0x1002, 0x4E47, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ + {0x1002, 0x4E48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ + {0x1002, 0x4E49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ + {0x1002, 0x4E4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ + {0x1002, 0x4E4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ + {0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4E52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4E53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4E56, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ + {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \ + {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \ + {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \ + {0x1002, 0x5147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \ + {0x1002, 0x5148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ + {0x1002, 0x514C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ + {0x1002, 0x514D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ + {0x1002, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \ + {0x1002, 0x5158, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \ + {0x1002, 0x5159, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ + {0x1002, 0x515A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ + {0x1002, 0x515E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_SINGLE_CRTC}, \ + {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ + {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ + {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ + {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x554B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x554C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x554D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x554E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5551, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5554, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x564A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x564B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \ + {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ + {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ + {0x1002, 0x5955, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ + {0x1002, 0x5974, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ + {0x1002, 0x5975, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ + {0x1002, 0x5960, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ + {0x1002, 0x5961, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ + {0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ + {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ + {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ + {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_SINGLE_CRTC}, \ + {0x1002, 0x5a41, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \ + {0x1002, 0x5a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ + {0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \ + {0x1002, 0x5a62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ + {0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5b64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5b65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \ + {0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \ + {0x1002, 0x5d48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5d49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5d4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5d4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5d4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5d50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5d52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5d57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5e48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5e4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6600, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x665c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x665d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x665f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6700, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6703, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6704, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6705, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6706, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6707, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6708, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6709, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6718, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6719, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x671c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x671d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x671f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6720, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6721, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6722, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6723, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6724, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6725, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6726, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6727, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6728, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6729, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6738, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6739, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x673e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6740, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6741, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6743, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6744, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6745, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6746, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6747, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x674A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6759, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x675B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x675D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x675F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6761, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6762, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6763, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6764, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6765, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6766, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6771, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x677B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x678A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6790, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6791, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6792, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x679B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67A8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67A9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67AA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67B0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67B8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67B9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67BA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67BE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6811, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6822, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6823, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6826, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x683B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x683D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x683F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6841, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6842, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6843, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6849, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x684C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x688A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x688C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x688D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6898, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6899, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x689b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x689c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x689d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x689e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68a0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68a1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68a8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68a9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68b8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68b9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68ba, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68be, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68bf, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68c7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68c8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68c9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68d9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68da, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68de, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68e0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68e1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68e4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68e5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68e8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68e9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68f1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68f2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68f8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68f9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68fa, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x710A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x710B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x710C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x710E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x710F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7140, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7141, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7142, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7143, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x714A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x714B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x714C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x714D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x714E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x714F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x715E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x715F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7181, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7183, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7186, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7187, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7188, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x718A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x718B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x718C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x718D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x718F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7193, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7196, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x719B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x719F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71CE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71D2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71D4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71D5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71D6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71DA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71DE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7244, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7248, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7249, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x724A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x724B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x724C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x724D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x724E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x724F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7280, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7281, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7283, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7284, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7287, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7289, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x728B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x728C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7290, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7291, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7293, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7297, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x791e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ + {0x1002, 0x791f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ + {0x1002, 0x793f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7941, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7942, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x796c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ + {0x1002, 0x796d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ + {0x1002, 0x796e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ + {0x1002, 0x796f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ + {0x1002, 0x9400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9402, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9403, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x940A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x940B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x940F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94B3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94B4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94B5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94B9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9440, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9441, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9442, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9443, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9444, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x944A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x944B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x944C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x944E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9450, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9456, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x945A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x945B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x945E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x946A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x946B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x947A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x947B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9480, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9487, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9488, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9489, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x948A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x948F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9490, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9491, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9495, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9498, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x949C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x949E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x949F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94C8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94CB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9501, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9504, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9505, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9506, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9507, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9508, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9509, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x950F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9515, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9517, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9519, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9540, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9542, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x954E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x954F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9553, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9555, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9557, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x955f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9580, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9581, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9583, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9586, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9587, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9588, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9589, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x958A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x958B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x958C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x958D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x958E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x958F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9590, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9593, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9595, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9596, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9597, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9598, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9599, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x959B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95CE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9612, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9614, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9615, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9616, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9642, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9643, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9644, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ + {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ + {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ + {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x964e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ + {0x1002, 0x964f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ + {0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9711, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9804, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x980A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9904, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9905, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9906, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9908, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x990B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x990C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x990D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x990E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9910, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9913, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9917, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9918, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9919, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9990, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9991, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9996, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9998, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9999, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x999A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x999B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x999C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x999D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0, 0, 0} + +#define r128_PCI_IDS \ + {0x1002, 0x4c45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4c46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4d46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5041, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5044, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5045, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5046, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5047, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5048, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5049, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x504A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x504B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x504C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x504D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x504E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x504F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5052, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x524b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x524c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x534d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x544C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define mga_PCI_IDS \ + {0x102b, 0x0520, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \ + {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \ + {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G400}, \ + {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \ + {0, 0, 0} + +#define sisdrv_PCI_IDS \ + {0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1039, 0x6300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1039, 0x6330, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \ + {0x1039, 0x6351, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1039, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x18CA, 0x0040, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \ + {0x18CA, 0x0042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \ + {0, 0, 0} + +#define tdfx_PCI_IDS \ + {0x121a, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x121a, 0x0004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x121a, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x121a, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x121a, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x121a, 0x000b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define viadrv_PCI_IDS \ + {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \ + {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1106, 0x3344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \ + {0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \ + {0, 0, 0} + +#define i810_PCI_IDS \ + {0x8086, 0x7121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x8086, 0x7123, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x8086, 0x7125, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define savage_PCI_IDS \ + {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \ + {0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \ + {0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \ + {0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \ + {0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \ + {0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \ + {0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \ + {0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \ + {0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ + {0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ + {0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ + {0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ + {0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ + {0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ + {0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ + {0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ + {0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ + {0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \ + {0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \ + {0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \ + {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \ + {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \ + {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \ + {0, 0, 0} diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h new file mode 100644 index 000000000..8a152dc16 --- /dev/null +++ b/include/drm/drm_plane.h @@ -0,0 +1,783 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_PLANE_H__ +#define __DRM_PLANE_H__ + +#include +#include +#include +#include + +struct drm_crtc; +struct drm_printer; +struct drm_modeset_acquire_ctx; + +/** + * struct drm_plane_state - mutable plane state + * + * Please not that the destination coordinates @crtc_x, @crtc_y, @crtc_h and + * @crtc_w and the source coordinates @src_x, @src_y, @src_h and @src_w are the + * raw coordinates provided by userspace. Drivers should use + * drm_atomic_helper_check_plane_state() and only use the derived rectangles in + * @src and @dst to program the hardware. + */ +struct drm_plane_state { + /** @plane: backpointer to the plane */ + struct drm_plane *plane; + + /** + * @crtc: + * + * Currently bound CRTC, NULL if disabled. Do not this write directly, + * use drm_atomic_set_crtc_for_plane() + */ + struct drm_crtc *crtc; + + /** + * @fb: + * + * Currently bound framebuffer. Do not write this directly, use + * drm_atomic_set_fb_for_plane() + */ + struct drm_framebuffer *fb; + + /** + * @fence: + * + * Optional fence to wait for before scanning out @fb. The core atomic + * code will set this when userspace is using explicit fencing. Do not + * write this directly for a driver's implicit fence, use + * drm_atomic_set_fence_for_plane() to ensure that an explicit fence is + * preserved. + * + * Drivers should store any implicit fence in this from their + * &drm_plane_helper_funcs.prepare_fb callback. See drm_gem_fb_prepare_fb() + * and drm_gem_fb_simple_display_pipe_prepare_fb() for suitable helpers. + */ + struct dma_fence *fence; + + /** + * @crtc_x: + * + * Left position of visible portion of plane on crtc, signed dest + * location allows it to be partially off screen. + */ + + int32_t crtc_x; + /** + * @crtc_y: + * + * Upper position of visible portion of plane on crtc, signed dest + * location allows it to be partially off screen. + */ + int32_t crtc_y; + + /** @crtc_w: width of visible portion of plane on crtc */ + /** @crtc_h: height of visible portion of plane on crtc */ + uint32_t crtc_w, crtc_h; + + /** + * @src_x: left position of visible portion of plane within plane (in + * 16.16 fixed point). + */ + uint32_t src_x; + /** + * @src_y: upper position of visible portion of plane within plane (in + * 16.16 fixed point). + */ + uint32_t src_y; + /** @src_w: width of visible portion of plane (in 16.16) */ + /** @src_h: height of visible portion of plane (in 16.16) */ + uint32_t src_h, src_w; + + /** + * @alpha: + * Opacity of the plane with 0 as completely transparent and 0xffff as + * completely opaque. See drm_plane_create_alpha_property() for more + * details. + */ + u16 alpha; + + /** + * @rotation: + * Rotation of the plane. See drm_plane_create_rotation_property() for + * more details. + */ + unsigned int rotation; + + /** + * @zpos: + * Priority of the given plane on crtc (optional). + * + * Note that multiple active planes on the same crtc can have an + * identical zpos value. The rule to solving the conflict is to compare + * the plane object IDs; the plane with a higher ID must be stacked on + * top of a plane with a lower ID. + * + * See drm_plane_create_zpos_property() and + * drm_plane_create_zpos_immutable_property() for more details. + */ + unsigned int zpos; + + /** + * @normalized_zpos: + * Normalized value of zpos: unique, range from 0 to N-1 where N is the + * number of active planes for given crtc. Note that the driver must set + * &drm_mode_config.normalize_zpos or call drm_atomic_normalize_zpos() to + * update this before it can be trusted. + */ + unsigned int normalized_zpos; + + /** + * @color_encoding: + * + * Color encoding for non RGB formats + */ + enum drm_color_encoding color_encoding; + + /** + * @color_range: + * + * Color range for non RGB formats + */ + enum drm_color_range color_range; + + /** @src: clipped source coordinates of the plane (in 16.16) */ + /** @dst: clipped destination coordinates of the plane */ + struct drm_rect src, dst; + + /** + * @visible: + * + * Visibility of the plane. This can be false even if fb!=NULL and + * crtc!=NULL, due to clipping. + */ + bool visible; + + /** + * @commit: Tracks the pending commit to prevent use-after-free conditions, + * and for async plane updates. + * + * May be NULL. + */ + struct drm_crtc_commit *commit; + + /** @state: backpointer to global drm_atomic_state */ + struct drm_atomic_state *state; +}; + +static inline struct drm_rect +drm_plane_state_src(const struct drm_plane_state *state) +{ + struct drm_rect src = { + .x1 = state->src_x, + .y1 = state->src_y, + .x2 = state->src_x + state->src_w, + .y2 = state->src_y + state->src_h, + }; + return src; +} + +static inline struct drm_rect +drm_plane_state_dest(const struct drm_plane_state *state) +{ + struct drm_rect dest = { + .x1 = state->crtc_x, + .y1 = state->crtc_y, + .x2 = state->crtc_x + state->crtc_w, + .y2 = state->crtc_y + state->crtc_h, + }; + return dest; +} + +/** + * struct drm_plane_funcs - driver plane control functions + */ +struct drm_plane_funcs { + /** + * @update_plane: + * + * This is the legacy entry point to enable and configure the plane for + * the given CRTC and framebuffer. It is never called to disable the + * plane, i.e. the passed-in crtc and fb paramters are never NULL. + * + * The source rectangle in frame buffer memory coordinates is given by + * the src_x, src_y, src_w and src_h parameters (as 16.16 fixed point + * values). Devices that don't support subpixel plane coordinates can + * ignore the fractional part. + * + * The destination rectangle in CRTC coordinates is given by the + * crtc_x, crtc_y, crtc_w and crtc_h parameters (as integer values). + * Devices scale the source rectangle to the destination rectangle. If + * scaling is not supported, and the source rectangle size doesn't match + * the destination rectangle size, the driver must return a + * -EINVAL error. + * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_update_plane() to implement this hook. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*update_plane)(struct drm_plane *plane, + struct drm_crtc *crtc, struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h, + struct drm_modeset_acquire_ctx *ctx); + + /** + * @disable_plane: + * + * This is the legacy entry point to disable the plane. The DRM core + * calls this method in response to a DRM_IOCTL_MODE_SETPLANE IOCTL call + * with the frame buffer ID set to 0. Disabled planes must not be + * processed by the CRTC. + * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_disable_plane() to implement this hook. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*disable_plane)(struct drm_plane *plane, + struct drm_modeset_acquire_ctx *ctx); + + /** + * @destroy: + * + * Clean up plane resources. This is only called at driver unload time + * through drm_mode_config_cleanup() since a plane cannot be hotplugged + * in DRM. + */ + void (*destroy)(struct drm_plane *plane); + + /** + * @reset: + * + * Reset plane hardware and software state to off. This function isn't + * called by the core directly, only through drm_mode_config_reset(). + * It's not a helper hook only for historical reasons. + * + * Atomic drivers can use drm_atomic_helper_plane_reset() to reset + * atomic state using this hook. + */ + void (*reset)(struct drm_plane *plane); + + /** + * @set_property: + * + * This is the legacy entry point to update a property attached to the + * plane. + * + * This callback is optional if the driver does not support any legacy + * driver-private properties. For atomic drivers it is not used because + * property handling is done entirely in the DRM core. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*set_property)(struct drm_plane *plane, + struct drm_property *property, uint64_t val); + + /** + * @atomic_duplicate_state: + * + * Duplicate the current atomic state for this plane and return it. + * The core and helpers guarantee that any atomic state duplicated with + * this hook and still owned by the caller (i.e. not transferred to the + * driver by calling &drm_mode_config_funcs.atomic_commit) will be + * cleaned up by calling the @atomic_destroy_state hook in this + * structure. + * + * This callback is mandatory for atomic drivers. + * + * Atomic drivers which don't subclass &struct drm_plane_state should use + * drm_atomic_helper_plane_duplicate_state(). Drivers that subclass the + * state structure to extend it with driver-private state should use + * __drm_atomic_helper_plane_duplicate_state() to make sure shared state is + * duplicated in a consistent fashion across drivers. + * + * It is an error to call this hook before &drm_plane.state has been + * initialized correctly. + * + * NOTE: + * + * If the duplicate state references refcounted resources this hook must + * acquire a reference for each of them. The driver must release these + * references again in @atomic_destroy_state. + * + * RETURNS: + * + * Duplicated atomic state or NULL when the allocation failed. + */ + struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane); + + /** + * @atomic_destroy_state: + * + * Destroy a state duplicated with @atomic_duplicate_state and release + * or unreference all resources it references + * + * This callback is mandatory for atomic drivers. + */ + void (*atomic_destroy_state)(struct drm_plane *plane, + struct drm_plane_state *state); + + /** + * @atomic_set_property: + * + * Decode a driver-private property value and store the decoded value + * into the passed-in state structure. Since the atomic core decodes all + * standardized properties (even for extensions beyond the core set of + * properties which might not be implemented by all drivers) this + * requires drivers to subclass the state structure. + * + * Such driver-private properties should really only be implemented for + * truly hardware/vendor specific state. Instead it is preferred to + * standardize atomic extension and decode the properties used to expose + * such an extension in the core. + * + * Do not call this function directly, use + * drm_atomic_plane_set_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * NOTE: + * + * This function is called in the state assembly phase of atomic + * modesets, which can be aborted for any reason (including on + * userspace's request to just check whether a configuration would be + * possible). Drivers MUST NOT touch any persistent state (hardware or + * software) or data structures except the passed in @state parameter. + * + * Also since userspace controls in which order properties are set this + * function must not do any input validation (since the state update is + * incomplete and hence likely inconsistent). Instead any such input + * validation must be done in the various atomic_check callbacks. + * + * RETURNS: + * + * 0 if the property has been found, -EINVAL if the property isn't + * implemented by the driver (which shouldn't ever happen, the core only + * asks for properties attached to this plane). No other validation is + * allowed by the driver. The core already checks that the property + * value is within the range (integer, valid enum value, ...) the driver + * set when registering the property. + */ + int (*atomic_set_property)(struct drm_plane *plane, + struct drm_plane_state *state, + struct drm_property *property, + uint64_t val); + + /** + * @atomic_get_property: + * + * Reads out the decoded driver-private property. This is used to + * implement the GETPLANE IOCTL. + * + * Do not call this function directly, use + * drm_atomic_plane_get_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * RETURNS: + * + * 0 on success, -EINVAL if the property isn't implemented by the + * driver (which should never happen, the core only asks for + * properties attached to this plane). + */ + int (*atomic_get_property)(struct drm_plane *plane, + const struct drm_plane_state *state, + struct drm_property *property, + uint64_t *val); + /** + * @late_register: + * + * This optional hook can be used to register additional userspace + * interfaces attached to the plane like debugfs interfaces. + * It is called late in the driver load sequence from drm_dev_register(). + * Everything added from this callback should be unregistered in + * the early_unregister callback. + * + * Returns: + * + * 0 on success, or a negative error code on failure. + */ + int (*late_register)(struct drm_plane *plane); + + /** + * @early_unregister: + * + * This optional hook should be used to unregister the additional + * userspace interfaces attached to the plane from + * @late_register. It is called from drm_dev_unregister(), + * early in the driver unload sequence to disable userspace access + * before data structures are torndown. + */ + void (*early_unregister)(struct drm_plane *plane); + + /** + * @atomic_print_state: + * + * If driver subclasses &struct drm_plane_state, it should implement + * this optional hook for printing additional driver specific state. + * + * Do not call this directly, use drm_atomic_plane_print_state() + * instead. + */ + void (*atomic_print_state)(struct drm_printer *p, + const struct drm_plane_state *state); + + /** + * @format_mod_supported: + * + * This optional hook is used for the DRM to determine if the given + * format/modifier combination is valid for the plane. This allows the + * DRM to generate the correct format bitmask (which formats apply to + * which modifier), and to valdiate modifiers at atomic_check time. + * + * If not present, then any modifier in the plane's modifier + * list is allowed with any of the plane's formats. + * + * Returns: + * + * True if the given modifier is valid for that format on the plane. + * False otherwise. + */ + bool (*format_mod_supported)(struct drm_plane *plane, uint32_t format, + uint64_t modifier); +}; + +/** + * enum drm_plane_type - uapi plane type enumeration + * + * For historical reasons not all planes are made the same. This enumeration is + * used to tell the different types of planes apart to implement the different + * uapi semantics for them. For userspace which is universal plane aware and + * which is using that atomic IOCTL there's no difference between these planes + * (beyong what the driver and hardware can support of course). + * + * For compatibility with legacy userspace, only overlay planes are made + * available to userspace by default. Userspace clients may set the + * DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate that they + * wish to receive a universal plane list containing all plane types. See also + * drm_for_each_legacy_plane(). + * + * WARNING: The values of this enum is UABI since they're exposed in the "type" + * property. + */ +enum drm_plane_type { + /** + * @DRM_PLANE_TYPE_OVERLAY: + * + * Overlay planes represent all non-primary, non-cursor planes. Some + * drivers refer to these types of planes as "sprites" internally. + */ + DRM_PLANE_TYPE_OVERLAY, + + /** + * @DRM_PLANE_TYPE_PRIMARY: + * + * Primary planes represent a "main" plane for a CRTC. Primary planes + * are the planes operated upon by CRTC modesetting and flipping + * operations described in the &drm_crtc_funcs.page_flip and + * &drm_crtc_funcs.set_config hooks. + */ + DRM_PLANE_TYPE_PRIMARY, + + /** + * @DRM_PLANE_TYPE_CURSOR: + * + * Cursor planes represent a "cursor" plane for a CRTC. Cursor planes + * are the planes operated upon by the DRM_IOCTL_MODE_CURSOR and + * DRM_IOCTL_MODE_CURSOR2 IOCTLs. + */ + DRM_PLANE_TYPE_CURSOR, +}; + + +/** + * struct drm_plane - central DRM plane control structure + * + * Planes represent the scanout hardware of a display block. They receive their + * input data from a &drm_framebuffer and feed it to a &drm_crtc. Planes control + * the color conversion, see `Plane Composition Properties`_ for more details, + * and are also involved in the color conversion of input pixels, see `Color + * Management Properties`_ for details on that. + */ +struct drm_plane { + /** @dev: DRM device this plane belongs to */ + struct drm_device *dev; + + /** + * @head: + * + * List of all planes on @dev, linked from &drm_mode_config.plane_list. + * Invariant over the lifetime of @dev and therefore does not need + * locking. + */ + struct list_head head; + + /** @name: human readable name, can be overwritten by the driver */ + char *name; + + /** + * @mutex: + * + * Protects modeset plane state, together with the &drm_crtc.mutex of + * CRTC this plane is linked to (when active, getting activated or + * getting disabled). + * + * For atomic drivers specifically this protects @state. + */ + struct drm_modeset_lock mutex; + + /** @base: base mode object */ + struct drm_mode_object base; + + /** + * @possible_crtcs: pipes this plane can be bound to constructed from + * drm_crtc_mask() + */ + uint32_t possible_crtcs; + /** @format_types: array of formats supported by this plane */ + uint32_t *format_types; + /** @format_count: Size of the array pointed at by @format_types. */ + unsigned int format_count; + /** + * @format_default: driver hasn't supplied supported formats for the + * plane. Used by the drm_plane_init compatibility wrapper only. + */ + bool format_default; + + /** @modifiers: array of modifiers supported by this plane */ + uint64_t *modifiers; + /** @modifier_count: Size of the array pointed at by @modifier_count. */ + unsigned int modifier_count; + + /** + * @crtc: + * + * Currently bound CRTC, only meaningful for non-atomic drivers. For + * atomic drivers this is forced to be NULL, atomic drivers should + * instead check &drm_plane_state.crtc. + */ + struct drm_crtc *crtc; + + /** + * @fb: + * + * Currently bound framebuffer, only meaningful for non-atomic drivers. + * For atomic drivers this is forced to be NULL, atomic drivers should + * instead check &drm_plane_state.fb. + */ + struct drm_framebuffer *fb; + + /** + * @old_fb: + * + * Temporary tracking of the old fb while a modeset is ongoing. Only + * used by non-atomic drivers, forced to be NULL for atomic drivers. + */ + struct drm_framebuffer *old_fb; + + /** @funcs: plane control functions */ + const struct drm_plane_funcs *funcs; + + /** @properties: property tracking for this plane */ + struct drm_object_properties properties; + + /** @type: Type of plane, see &enum drm_plane_type for details. */ + enum drm_plane_type type; + + /** + * @index: Position inside the mode_config.list, can be used as an array + * index. It is invariant over the lifetime of the plane. + */ + unsigned index; + + /** @helper_private: mid-layer private data */ + const struct drm_plane_helper_funcs *helper_private; + + /** + * @state: + * + * Current atomic state for this plane. + * + * This is protected by @mutex. Note that nonblocking atomic commits + * access the current plane state without taking locks. Either by going + * through the &struct drm_atomic_state pointers, see + * for_each_oldnew_plane_in_state(), for_each_old_plane_in_state() and + * for_each_new_plane_in_state(). Or through careful ordering of atomic + * commit operations as implemented in the atomic helpers, see + * &struct drm_crtc_commit. + */ + struct drm_plane_state *state; + + /** + * @alpha_property: + * Optional alpha property for this plane. See + * drm_plane_create_alpha_property(). + */ + struct drm_property *alpha_property; + /** + * @zpos_property: + * Optional zpos property for this plane. See + * drm_plane_create_zpos_property(). + */ + struct drm_property *zpos_property; + /** + * @rotation_property: + * Optional rotation property for this plane. See + * drm_plane_create_rotation_property(). + */ + struct drm_property *rotation_property; + + /** + * @color_encoding_property: + * + * Optional "COLOR_ENCODING" enum property for specifying + * color encoding for non RGB formats. + * See drm_plane_create_color_properties(). + */ + struct drm_property *color_encoding_property; + /** + * @color_range_property: + * + * Optional "COLOR_RANGE" enum property for specifying + * color range for non RGB formats. + * See drm_plane_create_color_properties(). + */ + struct drm_property *color_range_property; +}; + +#define obj_to_plane(x) container_of(x, struct drm_plane, base) + +__printf(9, 10) +int drm_universal_plane_init(struct drm_device *dev, + struct drm_plane *plane, + uint32_t possible_crtcs, + const struct drm_plane_funcs *funcs, + const uint32_t *formats, + unsigned int format_count, + const uint64_t *format_modifiers, + enum drm_plane_type type, + const char *name, ...); +int drm_plane_init(struct drm_device *dev, + struct drm_plane *plane, + uint32_t possible_crtcs, + const struct drm_plane_funcs *funcs, + const uint32_t *formats, unsigned int format_count, + bool is_primary); +void drm_plane_cleanup(struct drm_plane *plane); + +/** + * drm_plane_index - find the index of a registered plane + * @plane: plane to find index for + * + * Given a registered plane, return the index of that plane within a DRM + * device's list of planes. + */ +static inline unsigned int drm_plane_index(const struct drm_plane *plane) +{ + return plane->index; +} + +/** + * drm_plane_mask - find the mask of a registered plane + * @plane: plane to find mask for + */ +static inline u32 drm_plane_mask(const struct drm_plane *plane) +{ + return 1 << drm_plane_index(plane); +} + +struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx); +void drm_plane_force_disable(struct drm_plane *plane); + +int drm_mode_plane_set_obj_prop(struct drm_plane *plane, + struct drm_property *property, + uint64_t value); + +/** + * drm_plane_find - find a &drm_plane + * @dev: DRM device + * @file_priv: drm file to check for lease against. + * @id: plane id + * + * Returns the plane with @id, NULL if it doesn't exist. Simple wrapper around + * drm_mode_object_find(). + */ +static inline struct drm_plane *drm_plane_find(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t id) +{ + struct drm_mode_object *mo; + mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_PLANE); + return mo ? obj_to_plane(mo) : NULL; +} + +/** + * drm_for_each_plane_mask - iterate over planes specified by bitmask + * @plane: the loop cursor + * @dev: the DRM device + * @plane_mask: bitmask of plane indices + * + * Iterate over all planes specified by bitmask. + */ +#define drm_for_each_plane_mask(plane, dev, plane_mask) \ + list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \ + for_each_if ((plane_mask) & drm_plane_mask(plane)) + +/** + * drm_for_each_legacy_plane - iterate over all planes for legacy userspace + * @plane: the loop cursor + * @dev: the DRM device + * + * Iterate over all legacy planes of @dev, excluding primary and cursor planes. + * This is useful for implementing userspace apis when userspace is not + * universal plane aware. See also &enum drm_plane_type. + */ +#define drm_for_each_legacy_plane(plane, dev) \ + list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \ + for_each_if (plane->type == DRM_PLANE_TYPE_OVERLAY) + +/** + * drm_for_each_plane - iterate over all planes + * @plane: the loop cursor + * @dev: the DRM device + * + * Iterate over all planes of @dev, include primary and cursor planes. + */ +#define drm_for_each_plane(plane, dev) \ + list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) + + +#endif diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h new file mode 100644 index 000000000..26cee2934 --- /dev/null +++ b/include/drm/drm_plane_helper.h @@ -0,0 +1,79 @@ +/* + * Copyright (C) 2011-2013 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef DRM_PLANE_HELPER_H +#define DRM_PLANE_HELPER_H + +#include +#include +#include +#include + +/* + * Drivers that don't allow primary plane scaling may pass this macro in place + * of the min/max scale parameters of the update checker function. + * + * Due to src being in 16.16 fixed point and dest being in integer pixels, + * 1<<16 represents no scaling. + */ +#define DRM_PLANE_HELPER_NO_SCALING (1<<16) + +int drm_plane_helper_check_update(struct drm_plane *plane, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_rect *src, + struct drm_rect *dest, + unsigned int rotation, + int min_scale, + int max_scale, + bool can_position, + bool can_update_disabled, + bool *visible); +int drm_primary_helper_update(struct drm_plane *plane, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h, + struct drm_modeset_acquire_ctx *ctx); +int drm_primary_helper_disable(struct drm_plane *plane, + struct drm_modeset_acquire_ctx *ctx); +void drm_primary_helper_destroy(struct drm_plane *plane); +extern const struct drm_plane_funcs drm_primary_helper_funcs; + +int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h, + struct drm_modeset_acquire_ctx *ctx); +int drm_plane_helper_disable(struct drm_plane *plane, + struct drm_modeset_acquire_ctx *ctx); + +/* For use by drm_crtc_helper.c */ +int drm_plane_helper_commit(struct drm_plane *plane, + struct drm_plane_state *plane_state, + struct drm_framebuffer *old_fb); +#endif diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h new file mode 100644 index 000000000..d716d653b --- /dev/null +++ b/include/drm/drm_prime.h @@ -0,0 +1,107 @@ +/* + * Copyright © 2012 Red Hat + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright (c) 2009-2010, Code Aurora Forum. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dave Airlie + * Rob Clark + * + */ + +#ifndef __DRM_PRIME_H__ +#define __DRM_PRIME_H__ + +#include +#include +#include + +/** + * struct drm_prime_file_private - per-file tracking for PRIME + * + * This just contains the internal &struct dma_buf and handle caches for each + * &struct drm_file used by the PRIME core code. + */ + +struct drm_prime_file_private { +/* private: */ + struct mutex lock; + struct rb_root dmabufs; + struct rb_root handles; +}; + +struct device; + +struct dma_buf_export_info; +struct dma_buf; +struct dma_buf_attachment; + +enum dma_data_direction; + +struct drm_device; +struct drm_gem_object; +struct drm_file; + +struct device; + +struct dma_buf *drm_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *obj, + int flags); +int drm_gem_prime_handle_to_fd(struct drm_device *dev, + struct drm_file *file_priv, uint32_t handle, uint32_t flags, + int *prime_fd); +struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf); + +struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev, + struct dma_buf *dma_buf, + struct device *attach_dev); + +int drm_gem_prime_fd_to_handle(struct drm_device *dev, + struct drm_file *file_priv, int prime_fd, uint32_t *handle); +struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev, + struct dma_buf_export_info *exp_info); +void drm_gem_dmabuf_release(struct dma_buf *dma_buf); +int drm_gem_map_attach(struct dma_buf *dma_buf, + struct dma_buf_attachment *attach); +void drm_gem_map_detach(struct dma_buf *dma_buf, + struct dma_buf_attachment *attach); +struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, + enum dma_data_direction dir); +void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, + struct sg_table *sgt, + enum dma_data_direction dir); +void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf); +void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr); +void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num); +void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, + void *addr); +int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma); + +int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, + dma_addr_t *addrs, int max_pages); +struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages); +void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg); + + +#endif /* __DRM_PRIME_H__ */ diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h new file mode 100644 index 000000000..f3e6eed3e --- /dev/null +++ b/include/drm/drm_print.h @@ -0,0 +1,426 @@ +/* + * Copyright (C) 2016 Red Hat + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark + */ + +#ifndef DRM_PRINT_H_ +#define DRM_PRINT_H_ + +#include +#include +#include +#include + +/** + * DOC: print + * + * A simple wrapper for dev_printk(), seq_printf(), etc. Allows same + * debug code to be used for both debugfs and printk logging. + * + * For example:: + * + * void log_some_info(struct drm_printer *p) + * { + * drm_printf(p, "foo=%d\n", foo); + * drm_printf(p, "bar=%d\n", bar); + * } + * + * #ifdef CONFIG_DEBUG_FS + * void debugfs_show(struct seq_file *f) + * { + * struct drm_printer p = drm_seq_file_printer(f); + * log_some_info(&p); + * } + * #endif + * + * void some_other_function(...) + * { + * struct drm_printer p = drm_info_printer(drm->dev); + * log_some_info(&p); + * } + */ + +/** + * struct drm_printer - drm output "stream" + * + * Do not use struct members directly. Use drm_printer_seq_file(), + * drm_printer_info(), etc to initialize. And drm_printf() for output. + */ +struct drm_printer { + /* private: */ + void (*printfn)(struct drm_printer *p, struct va_format *vaf); + void (*puts)(struct drm_printer *p, const char *str); + void *arg; + const char *prefix; +}; + +void __drm_printfn_coredump(struct drm_printer *p, struct va_format *vaf); +void __drm_puts_coredump(struct drm_printer *p, const char *str); +void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf); +void __drm_puts_seq_file(struct drm_printer *p, const char *str); +void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf); +void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf); + +__printf(2, 3) +void drm_printf(struct drm_printer *p, const char *f, ...); +void drm_puts(struct drm_printer *p, const char *str); + +__printf(2, 0) +/** + * drm_vprintf - print to a &drm_printer stream + * @p: the &drm_printer + * @fmt: format string + * @va: the va_list + */ +static inline void +drm_vprintf(struct drm_printer *p, const char *fmt, va_list *va) +{ + struct va_format vaf = { .fmt = fmt, .va = va }; + + p->printfn(p, &vaf); +} + +/** + * drm_printf_indent - Print to a &drm_printer stream with indentation + * @printer: DRM printer + * @indent: Tab indentation level (max 5) + * @fmt: Format string + */ +#define drm_printf_indent(printer, indent, fmt, ...) \ + drm_printf((printer), "%.*s" fmt, (indent), "\t\t\t\t\tX", ##__VA_ARGS__) + +/** + * struct drm_print_iterator - local struct used with drm_printer_coredump + * @data: Pointer to the devcoredump output buffer + * @start: The offset within the buffer to start writing + * @remain: The number of bytes to write for this iteration + */ +struct drm_print_iterator { + void *data; + ssize_t start; + ssize_t remain; + /* private: */ + ssize_t offset; +}; + +/** + * drm_coredump_printer - construct a &drm_printer that can output to a buffer + * from the read function for devcoredump + * @iter: A pointer to a struct drm_print_iterator for the read instance + * + * This wrapper extends drm_printf() to work with a dev_coredumpm() callback + * function. The passed in drm_print_iterator struct contains the buffer + * pointer, size and offset as passed in from devcoredump. + * + * For example:: + * + * void coredump_read(char *buffer, loff_t offset, size_t count, + * void *data, size_t datalen) + * { + * struct drm_print_iterator iter; + * struct drm_printer p; + * + * iter.data = buffer; + * iter.start = offset; + * iter.remain = count; + * + * p = drm_coredump_printer(&iter); + * + * drm_printf(p, "foo=%d\n", foo); + * } + * + * void makecoredump(...) + * { + * ... + * dev_coredumpm(dev, THIS_MODULE, data, 0, GFP_KERNEL, + * coredump_read, ...) + * } + * + * RETURNS: + * The &drm_printer object + */ +static inline struct drm_printer +drm_coredump_printer(struct drm_print_iterator *iter) +{ + struct drm_printer p = { + .printfn = __drm_printfn_coredump, + .puts = __drm_puts_coredump, + .arg = iter, + }; + + /* Set the internal offset of the iterator to zero */ + iter->offset = 0; + + return p; +} + +/** + * drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file + * @f: the &struct seq_file to output to + * + * RETURNS: + * The &drm_printer object + */ +static inline struct drm_printer drm_seq_file_printer(struct seq_file *f) +{ + struct drm_printer p = { + .printfn = __drm_printfn_seq_file, + .puts = __drm_puts_seq_file, + .arg = f, + }; + return p; +} + +/** + * drm_info_printer - construct a &drm_printer that outputs to dev_printk() + * @dev: the &struct device pointer + * + * RETURNS: + * The &drm_printer object + */ +static inline struct drm_printer drm_info_printer(struct device *dev) +{ + struct drm_printer p = { + .printfn = __drm_printfn_info, + .arg = dev, + }; + return p; +} + +/** + * drm_debug_printer - construct a &drm_printer that outputs to pr_debug() + * @prefix: debug output prefix + * + * RETURNS: + * The &drm_printer object + */ +static inline struct drm_printer drm_debug_printer(const char *prefix) +{ + struct drm_printer p = { + .printfn = __drm_printfn_debug, + .prefix = prefix + }; + return p; +} + +/* + * The following categories are defined: + * + * CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, drm_memory.c, ... + * This is the category used by the DRM_DEBUG() macro. + * + * DRIVER: Used in the vendor specific part of the driver: i915, radeon, ... + * This is the category used by the DRM_DEBUG_DRIVER() macro. + * + * KMS: used in the modesetting code. + * This is the category used by the DRM_DEBUG_KMS() macro. + * + * PRIME: used in the prime code. + * This is the category used by the DRM_DEBUG_PRIME() macro. + * + * ATOMIC: used in the atomic code. + * This is the category used by the DRM_DEBUG_ATOMIC() macro. + * + * VBL: used for verbose debug message in the vblank code + * This is the category used by the DRM_DEBUG_VBL() macro. + * + * Enabling verbose debug messages is done through the drm.debug parameter, + * each category being enabled by a bit. + * + * drm.debug=0x1 will enable CORE messages + * drm.debug=0x2 will enable DRIVER messages + * drm.debug=0x3 will enable CORE and DRIVER messages + * ... + * drm.debug=0x3f will enable all messages + * + * An interesting feature is that it's possible to enable verbose logging at + * run-time by echoing the debug value in its sysfs node: + * # echo 0xf > /sys/module/drm/parameters/debug + */ +#define DRM_UT_NONE 0x00 +#define DRM_UT_CORE 0x01 +#define DRM_UT_DRIVER 0x02 +#define DRM_UT_KMS 0x04 +#define DRM_UT_PRIME 0x08 +#define DRM_UT_ATOMIC 0x10 +#define DRM_UT_VBL 0x20 +#define DRM_UT_STATE 0x40 +#define DRM_UT_LEASE 0x80 +#define DRM_UT_DP 0x100 + +__printf(3, 4) +void drm_dev_printk(const struct device *dev, const char *level, + const char *format, ...); +__printf(3, 4) +void drm_dev_dbg(const struct device *dev, unsigned int category, + const char *format, ...); + +__printf(2, 3) +void drm_dbg(unsigned int category, const char *format, ...); +__printf(1, 2) +void drm_err(const char *format, ...); + +/* Macros to make printk easier */ + +#define _DRM_PRINTK(once, level, fmt, ...) \ + printk##once(KERN_##level "[" DRM_NAME "] " fmt, ##__VA_ARGS__) + +#define DRM_INFO(fmt, ...) \ + _DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__) +#define DRM_NOTE(fmt, ...) \ + _DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__) +#define DRM_WARN(fmt, ...) \ + _DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__) + +#define DRM_INFO_ONCE(fmt, ...) \ + _DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__) +#define DRM_NOTE_ONCE(fmt, ...) \ + _DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__) +#define DRM_WARN_ONCE(fmt, ...) \ + _DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__) + +/** + * Error output. + * + * @dev: device pointer + * @fmt: printf() like format string. + */ +#define DRM_DEV_ERROR(dev, fmt, ...) \ + drm_dev_printk(dev, KERN_ERR, "*ERROR* " fmt, ##__VA_ARGS__) +#define DRM_ERROR(fmt, ...) \ + drm_err(fmt, ##__VA_ARGS__) + +/** + * Rate limited error output. Like DRM_ERROR() but won't flood the log. + * + * @dev: device pointer + * @fmt: printf() like format string. + */ +#define DRM_DEV_ERROR_RATELIMITED(dev, fmt, ...) \ +({ \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + \ + if (__ratelimit(&_rs)) \ + DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__); \ +}) +#define DRM_ERROR_RATELIMITED(fmt, ...) \ + DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__) + +#define DRM_DEV_INFO(dev, fmt, ...) \ + drm_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__) + +#define DRM_DEV_INFO_ONCE(dev, fmt, ...) \ +({ \ + static bool __print_once __read_mostly; \ + if (!__print_once) { \ + __print_once = true; \ + DRM_DEV_INFO(dev, fmt, ##__VA_ARGS__); \ + } \ +}) + +/** + * Debug output. + * + * @dev: device pointer + * @fmt: printf() like format string. + */ +#define DRM_DEV_DEBUG(dev, fmt, ...) \ + drm_dev_dbg(dev, DRM_UT_CORE, fmt, ##__VA_ARGS__) +#define DRM_DEBUG(fmt, ...) \ + drm_dbg(DRM_UT_CORE, fmt, ##__VA_ARGS__) + +#define DRM_DEV_DEBUG_DRIVER(dev, fmt, ...) \ + drm_dev_dbg(dev, DRM_UT_DRIVER, fmt, ##__VA_ARGS__) +#define DRM_DEBUG_DRIVER(fmt, ...) \ + drm_dbg(DRM_UT_DRIVER, fmt, ##__VA_ARGS__) + +#define DRM_DEV_DEBUG_KMS(dev, fmt, ...) \ + drm_dev_dbg(dev, DRM_UT_KMS, fmt, ##__VA_ARGS__) +#define DRM_DEBUG_KMS(fmt, ...) \ + drm_dbg(DRM_UT_KMS, fmt, ##__VA_ARGS__) + +#define DRM_DEV_DEBUG_PRIME(dev, fmt, ...) \ + drm_dev_dbg(dev, DRM_UT_PRIME, fmt, ##__VA_ARGS__) +#define DRM_DEBUG_PRIME(fmt, ...) \ + drm_dbg(DRM_UT_PRIME, fmt, ##__VA_ARGS__) + +#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, ...) \ + drm_dev_dbg(dev, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__) +#define DRM_DEBUG_ATOMIC(fmt, ...) \ + drm_dbg(DRM_UT_ATOMIC, fmt, ##__VA_ARGS__) + +#define DRM_DEV_DEBUG_VBL(dev, fmt, ...) \ + drm_dev_dbg(dev, DRM_UT_VBL, fmt, ##__VA_ARGS__) +#define DRM_DEBUG_VBL(fmt, ...) \ + drm_dbg(DRM_UT_VBL, fmt, ##__VA_ARGS__) + +#define DRM_DEBUG_LEASE(fmt, ...) \ + drm_dbg(DRM_UT_LEASE, fmt, ##__VA_ARGS__) + +#define DRM_DEV_DEBUG_DP(dev, fmt, ...) \ + drm_dev_dbg(dev, DRM_UT_DP, fmt, ## __VA_ARGS__) +#define DRM_DEBUG_DP(dev, fmt, ...) \ + drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__) + +#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, category, fmt, ...) \ +({ \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + if (__ratelimit(&_rs)) \ + drm_dev_dbg(dev, category, fmt, ##__VA_ARGS__); \ +}) + +/** + * Rate limited debug output. Like DRM_DEBUG() but won't flood the log. + * + * @dev: device pointer + * @fmt: printf() like format string. + */ +#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, ...) \ + _DEV_DRM_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_CORE, \ + fmt, ##__VA_ARGS__) +#define DRM_DEBUG_RATELIMITED(fmt, ...) \ + DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##__VA_ARGS__) + +#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, ...) \ + _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_DRIVER, \ + fmt, ##__VA_ARGS__) +#define DRM_DEBUG_DRIVER_RATELIMITED(fmt, ...) \ + DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##__VA_ARGS__) + +#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, ...) \ + _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_KMS, \ + fmt, ##__VA_ARGS__) +#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) \ + DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##__VA_ARGS__) + +#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, ...) \ + _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_PRIME, \ + fmt, ##__VA_ARGS__) +#define DRM_DEBUG_PRIME_RATELIMITED(fmt, ...) \ + DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##__VA_ARGS__) + +#endif /* DRM_PRINT_H_ */ diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h new file mode 100644 index 000000000..c030f6cca --- /dev/null +++ b/include/drm/drm_property.h @@ -0,0 +1,300 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_PROPERTY_H__ +#define __DRM_PROPERTY_H__ + +#include +#include +#include + +/** + * struct drm_property_enum - symbolic values for enumerations + * @value: numeric property value for this enum entry + * @head: list of enum values, linked to &drm_property.enum_list + * @name: symbolic name for the enum + * + * For enumeration and bitmask properties this structure stores the symbolic + * decoding for each value. This is used for example for the rotation property. + */ +struct drm_property_enum { + uint64_t value; + struct list_head head; + char name[DRM_PROP_NAME_LEN]; +}; + +/** + * struct drm_property - modeset object property + * + * This structure represent a modeset object property. It combines both the name + * of the property with the set of permissible values. This means that when a + * driver wants to use a property with the same name on different objects, but + * with different value ranges, then it must create property for each one. An + * example would be rotation of &drm_plane, when e.g. the primary plane cannot + * be rotated. But if both the name and the value range match, then the same + * property structure can be instantiated multiple times for the same object. + * Userspace must be able to cope with this and cannot assume that the same + * symbolic property will have the same modeset object ID on all modeset + * objects. + * + * Properties are created by one of the special functions, as explained in + * detail in the @flags structure member. + * + * To actually expose a property it must be attached to each object using + * drm_object_attach_property(). Currently properties can only be attached to + * &drm_connector, &drm_crtc and &drm_plane. + * + * Properties are also used as the generic metadatatransport for the atomic + * IOCTL. Everything that was set directly in structures in the legacy modeset + * IOCTLs (like the plane source or destination windows, or e.g. the links to + * the CRTC) is exposed as a property with the DRM_MODE_PROP_ATOMIC flag set. + */ +struct drm_property { + /** + * @head: per-device list of properties, for cleanup. + */ + struct list_head head; + + /** + * @base: base KMS object + */ + struct drm_mode_object base; + + /** + * @flags: + * + * Property flags and type. A property needs to be one of the following + * types: + * + * DRM_MODE_PROP_RANGE + * Range properties report their minimum and maximum admissible unsigned values. + * The KMS core verifies that values set by application fit in that + * range. The range is unsigned. Range properties are created using + * drm_property_create_range(). + * + * DRM_MODE_PROP_SIGNED_RANGE + * Range properties report their minimum and maximum admissible unsigned values. + * The KMS core verifies that values set by application fit in that + * range. The range is signed. Range properties are created using + * drm_property_create_signed_range(). + * + * DRM_MODE_PROP_ENUM + * Enumerated properties take a numerical value that ranges from 0 to + * the number of enumerated values defined by the property minus one, + * and associate a free-formed string name to each value. Applications + * can retrieve the list of defined value-name pairs and use the + * numerical value to get and set property instance values. Enum + * properties are created using drm_property_create_enum(). + * + * DRM_MODE_PROP_BITMASK + * Bitmask properties are enumeration properties that additionally + * restrict all enumerated values to the 0..63 range. Bitmask property + * instance values combine one or more of the enumerated bits defined + * by the property. Bitmask properties are created using + * drm_property_create_bitmask(). + * + * DRM_MODE_PROB_OBJECT + * Object properties are used to link modeset objects. This is used + * extensively in the atomic support to create the display pipeline, + * by linking &drm_framebuffer to &drm_plane, &drm_plane to + * &drm_crtc and &drm_connector to &drm_crtc. An object property can + * only link to a specific type of &drm_mode_object, this limit is + * enforced by the core. Object properties are created using + * drm_property_create_object(). + * + * Object properties work like blob properties, but in a more + * general fashion. They are limited to atomic drivers and must have + * the DRM_MODE_PROP_ATOMIC flag set. + * + * DRM_MODE_PROP_BLOB + * Blob properties store a binary blob without any format restriction. + * The binary blobs are created as KMS standalone objects, and blob + * property instance values store the ID of their associated blob + * object. Blob properties are created by calling + * drm_property_create() with DRM_MODE_PROP_BLOB as the type. + * + * Actual blob objects to contain blob data are created using + * drm_property_create_blob(), or through the corresponding IOCTL. + * + * Besides the built-in limit to only accept blob objects blob + * properties work exactly like object properties. The only reasons + * blob properties exist is backwards compatibility with existing + * userspace. + * + * In addition a property can have any combination of the below flags: + * + * DRM_MODE_PROP_ATOMIC + * Set for properties which encode atomic modeset state. Such + * properties are not exposed to legacy userspace. + * + * DRM_MODE_PROP_IMMUTABLE + * Set for properties whose values cannot be changed by + * userspace. The kernel is allowed to update the value of these + * properties. This is generally used to expose probe state to + * userspace, e.g. the EDID, or the connector path property on DP + * MST sinks. + */ + uint32_t flags; + + /** + * @name: symbolic name of the properties + */ + char name[DRM_PROP_NAME_LEN]; + + /** + * @num_values: size of the @values array. + */ + uint32_t num_values; + + /** + * @values: + * + * Array with limits and values for the property. The + * interpretation of these limits is dependent upon the type per @flags. + */ + uint64_t *values; + + /** + * @dev: DRM device + */ + struct drm_device *dev; + + /** + * @enum_list: + * + * List of &drm_prop_enum_list structures with the symbolic names for + * enum and bitmask values. + */ + struct list_head enum_list; +}; + +/** + * struct drm_property_blob - Blob data for &drm_property + * @base: base KMS object + * @dev: DRM device + * @head_global: entry on the global blob list in + * &drm_mode_config.property_blob_list. + * @head_file: entry on the per-file blob list in &drm_file.blobs list. + * @length: size of the blob in bytes, invariant over the lifetime of the object + * @data: actual data, embedded at the end of this structure + * + * Blobs are used to store bigger values than what fits directly into the 64 + * bits available for a &drm_property. + * + * Blobs are reference counted using drm_property_blob_get() and + * drm_property_blob_put(). They are created using drm_property_create_blob(). + */ +struct drm_property_blob { + struct drm_mode_object base; + struct drm_device *dev; + struct list_head head_global; + struct list_head head_file; + size_t length; + void *data; +}; + +struct drm_prop_enum_list { + int type; + const char *name; +}; + +#define obj_to_property(x) container_of(x, struct drm_property, base) +#define obj_to_blob(x) container_of(x, struct drm_property_blob, base) + +/** + * drm_property_type_is - check the type of a property + * @property: property to check + * @type: property type to compare with + * + * This is a helper function becauase the uapi encoding of property types is + * a bit special for historical reasons. + */ +static inline bool drm_property_type_is(struct drm_property *property, + uint32_t type) +{ + /* instanceof for props.. handles extended type vs original types: */ + if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) + return (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) == type; + return property->flags & type; +} + +struct drm_property *drm_property_create(struct drm_device *dev, + u32 flags, const char *name, + int num_values); +struct drm_property *drm_property_create_enum(struct drm_device *dev, + u32 flags, const char *name, + const struct drm_prop_enum_list *props, + int num_values); +struct drm_property *drm_property_create_bitmask(struct drm_device *dev, + u32 flags, const char *name, + const struct drm_prop_enum_list *props, + int num_props, + uint64_t supported_bits); +struct drm_property *drm_property_create_range(struct drm_device *dev, + u32 flags, const char *name, + uint64_t min, uint64_t max); +struct drm_property *drm_property_create_signed_range(struct drm_device *dev, + u32 flags, const char *name, + int64_t min, int64_t max); +struct drm_property *drm_property_create_object(struct drm_device *dev, + u32 flags, const char *name, + uint32_t type); +struct drm_property *drm_property_create_bool(struct drm_device *dev, + u32 flags, const char *name); +int drm_property_add_enum(struct drm_property *property, + uint64_t value, const char *name); +void drm_property_destroy(struct drm_device *dev, struct drm_property *property); + +struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, + size_t length, + const void *data); +struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev, + uint32_t id); +int drm_property_replace_global_blob(struct drm_device *dev, + struct drm_property_blob **replace, + size_t length, + const void *data, + struct drm_mode_object *obj_holds_id, + struct drm_property *prop_holds_id); +bool drm_property_replace_blob(struct drm_property_blob **blob, + struct drm_property_blob *new_blob); +struct drm_property_blob *drm_property_blob_get(struct drm_property_blob *blob); +void drm_property_blob_put(struct drm_property_blob *blob); + +/** + * drm_property_find - find property object + * @dev: DRM device + * @file_priv: drm file to check for lease against. + * @id: property object id + * + * This function looks up the property object specified by id and returns it. + */ +static inline struct drm_property *drm_property_find(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t id) +{ + struct drm_mode_object *mo; + mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_PROPERTY); + return mo ? obj_to_property(mo) : NULL; +} + +#endif diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h new file mode 100644 index 000000000..6c54544a4 --- /dev/null +++ b/include/drm/drm_rect.h @@ -0,0 +1,200 @@ +/* + * Copyright (C) 2011-2013 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef DRM_RECT_H +#define DRM_RECT_H + +/** + * DOC: rect utils + * + * Utility functions to help manage rectangular areas for + * clipping, scaling, etc. calculations. + */ + +/** + * struct drm_rect - two dimensional rectangle + * @x1: horizontal starting coordinate (inclusive) + * @x2: horizontal ending coordinate (exclusive) + * @y1: vertical starting coordinate (inclusive) + * @y2: vertical ending coordinate (exclusive) + */ +struct drm_rect { + int x1, y1, x2, y2; +}; + +/** + * DRM_RECT_FMT - printf string for &struct drm_rect + */ +#define DRM_RECT_FMT "%dx%d%+d%+d" +/** + * DRM_RECT_ARG - printf arguments for &struct drm_rect + * @r: rectangle struct + */ +#define DRM_RECT_ARG(r) drm_rect_width(r), drm_rect_height(r), (r)->x1, (r)->y1 + +/** + * DRM_RECT_FP_FMT - printf string for &struct drm_rect in 16.16 fixed point + */ +#define DRM_RECT_FP_FMT "%d.%06ux%d.%06u%+d.%06u%+d.%06u" +/** + * DRM_RECT_FP_ARG - printf arguments for &struct drm_rect in 16.16 fixed point + * @r: rectangle struct + * + * This is useful for e.g. printing plane source rectangles, which are in 16.16 + * fixed point. + */ +#define DRM_RECT_FP_ARG(r) \ + drm_rect_width(r) >> 16, ((drm_rect_width(r) & 0xffff) * 15625) >> 10, \ + drm_rect_height(r) >> 16, ((drm_rect_height(r) & 0xffff) * 15625) >> 10, \ + (r)->x1 >> 16, (((r)->x1 & 0xffff) * 15625) >> 10, \ + (r)->y1 >> 16, (((r)->y1 & 0xffff) * 15625) >> 10 + +/** + * drm_rect_adjust_size - adjust the size of the rectangle + * @r: rectangle to be adjusted + * @dw: horizontal adjustment + * @dh: vertical adjustment + * + * Change the size of rectangle @r by @dw in the horizontal direction, + * and by @dh in the vertical direction, while keeping the center + * of @r stationary. + * + * Positive @dw and @dh increase the size, negative values decrease it. + */ +static inline void drm_rect_adjust_size(struct drm_rect *r, int dw, int dh) +{ + r->x1 -= dw >> 1; + r->y1 -= dh >> 1; + r->x2 += (dw + 1) >> 1; + r->y2 += (dh + 1) >> 1; +} + +/** + * drm_rect_translate - translate the rectangle + * @r: rectangle to be tranlated + * @dx: horizontal translation + * @dy: vertical translation + * + * Move rectangle @r by @dx in the horizontal direction, + * and by @dy in the vertical direction. + */ +static inline void drm_rect_translate(struct drm_rect *r, int dx, int dy) +{ + r->x1 += dx; + r->y1 += dy; + r->x2 += dx; + r->y2 += dy; +} + +/** + * drm_rect_downscale - downscale a rectangle + * @r: rectangle to be downscaled + * @horz: horizontal downscale factor + * @vert: vertical downscale factor + * + * Divide the coordinates of rectangle @r by @horz and @vert. + */ +static inline void drm_rect_downscale(struct drm_rect *r, int horz, int vert) +{ + r->x1 /= horz; + r->y1 /= vert; + r->x2 /= horz; + r->y2 /= vert; +} + +/** + * drm_rect_width - determine the rectangle width + * @r: rectangle whose width is returned + * + * RETURNS: + * The width of the rectangle. + */ +static inline int drm_rect_width(const struct drm_rect *r) +{ + return r->x2 - r->x1; +} + +/** + * drm_rect_height - determine the rectangle height + * @r: rectangle whose height is returned + * + * RETURNS: + * The height of the rectangle. + */ +static inline int drm_rect_height(const struct drm_rect *r) +{ + return r->y2 - r->y1; +} + +/** + * drm_rect_visible - determine if the the rectangle is visible + * @r: rectangle whose visibility is returned + * + * RETURNS: + * %true if the rectangle is visible, %false otherwise. + */ +static inline bool drm_rect_visible(const struct drm_rect *r) +{ + return drm_rect_width(r) > 0 && drm_rect_height(r) > 0; +} + +/** + * drm_rect_equals - determine if two rectangles are equal + * @r1: first rectangle + * @r2: second rectangle + * + * RETURNS: + * %true if the rectangles are equal, %false otherwise. + */ +static inline bool drm_rect_equals(const struct drm_rect *r1, + const struct drm_rect *r2) +{ + return r1->x1 == r2->x1 && r1->x2 == r2->x2 && + r1->y1 == r2->y1 && r1->y2 == r2->y2; +} + +bool drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip); +bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst, + const struct drm_rect *clip); +int drm_rect_calc_hscale(const struct drm_rect *src, + const struct drm_rect *dst, + int min_hscale, int max_hscale); +int drm_rect_calc_vscale(const struct drm_rect *src, + const struct drm_rect *dst, + int min_vscale, int max_vscale); +int drm_rect_calc_hscale_relaxed(struct drm_rect *src, + struct drm_rect *dst, + int min_hscale, int max_hscale); +int drm_rect_calc_vscale_relaxed(struct drm_rect *src, + struct drm_rect *dst, + int min_vscale, int max_vscale); +void drm_rect_debug_print(const char *prefix, + const struct drm_rect *r, bool fixed_point); +void drm_rect_rotate(struct drm_rect *r, + int width, int height, + unsigned int rotation); +void drm_rect_rotate_inv(struct drm_rect *r, + int width, int height, + unsigned int rotation); + +#endif diff --git a/include/drm/drm_scdc_helper.h b/include/drm/drm_scdc_helper.h new file mode 100644 index 000000000..f92eb2094 --- /dev/null +++ b/include/drm/drm_scdc_helper.h @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2015 NVIDIA Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef DRM_SCDC_HELPER_H +#define DRM_SCDC_HELPER_H + +#include +#include + +#define SCDC_SINK_VERSION 0x01 + +#define SCDC_SOURCE_VERSION 0x02 + +#define SCDC_UPDATE_0 0x10 +#define SCDC_READ_REQUEST_TEST (1 << 2) +#define SCDC_CED_UPDATE (1 << 1) +#define SCDC_STATUS_UPDATE (1 << 0) + +#define SCDC_UPDATE_1 0x11 + +#define SCDC_TMDS_CONFIG 0x20 +#define SCDC_TMDS_BIT_CLOCK_RATIO_BY_40 (1 << 1) +#define SCDC_TMDS_BIT_CLOCK_RATIO_BY_10 (0 << 1) +#define SCDC_SCRAMBLING_ENABLE (1 << 0) + +#define SCDC_SCRAMBLER_STATUS 0x21 +#define SCDC_SCRAMBLING_STATUS (1 << 0) + +#define SCDC_CONFIG_0 0x30 +#define SCDC_READ_REQUEST_ENABLE (1 << 0) + +#define SCDC_STATUS_FLAGS_0 0x40 +#define SCDC_CH2_LOCK (1 < 3) +#define SCDC_CH1_LOCK (1 < 2) +#define SCDC_CH0_LOCK (1 < 1) +#define SCDC_CH_LOCK_MASK (SCDC_CH2_LOCK | SCDC_CH1_LOCK | SCDC_CH0_LOCK) +#define SCDC_CLOCK_DETECT (1 << 0) + +#define SCDC_STATUS_FLAGS_1 0x41 + +#define SCDC_ERR_DET_0_L 0x50 +#define SCDC_ERR_DET_0_H 0x51 +#define SCDC_ERR_DET_1_L 0x52 +#define SCDC_ERR_DET_1_H 0x53 +#define SCDC_ERR_DET_2_L 0x54 +#define SCDC_ERR_DET_2_H 0x55 +#define SCDC_CHANNEL_VALID (1 << 7) + +#define SCDC_ERR_DET_CHECKSUM 0x56 + +#define SCDC_TEST_CONFIG_0 0xc0 +#define SCDC_TEST_READ_REQUEST (1 << 7) +#define SCDC_TEST_READ_REQUEST_DELAY(x) ((x) & 0x7f) + +#define SCDC_MANUFACTURER_IEEE_OUI 0xd0 +#define SCDC_MANUFACTURER_IEEE_OUI_SIZE 3 + +#define SCDC_DEVICE_ID 0xd3 +#define SCDC_DEVICE_ID_SIZE 8 + +#define SCDC_DEVICE_HARDWARE_REVISION 0xdb +#define SCDC_GET_DEVICE_HARDWARE_REVISION_MAJOR(x) (((x) >> 4) & 0xf) +#define SCDC_GET_DEVICE_HARDWARE_REVISION_MINOR(x) (((x) >> 0) & 0xf) + +#define SCDC_DEVICE_SOFTWARE_MAJOR_REVISION 0xdc +#define SCDC_DEVICE_SOFTWARE_MINOR_REVISION 0xdd + +#define SCDC_MANUFACTURER_SPECIFIC 0xde +#define SCDC_MANUFACTURER_SPECIFIC_SIZE 34 + +ssize_t drm_scdc_read(struct i2c_adapter *adapter, u8 offset, void *buffer, + size_t size); +ssize_t drm_scdc_write(struct i2c_adapter *adapter, u8 offset, + const void *buffer, size_t size); + +/** + * drm_scdc_readb - read a single byte from SCDC + * @adapter: I2C adapter + * @offset: offset of register to read + * @value: return location for the register value + * + * Reads a single byte from SCDC. This is a convenience wrapper around the + * drm_scdc_read() function. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +static inline int drm_scdc_readb(struct i2c_adapter *adapter, u8 offset, + u8 *value) +{ + return drm_scdc_read(adapter, offset, value, sizeof(*value)); +} + +/** + * drm_scdc_writeb - write a single byte to SCDC + * @adapter: I2C adapter + * @offset: offset of register to read + * @value: return location for the register value + * + * Writes a single byte to SCDC. This is a convenience wrapper around the + * drm_scdc_write() function. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +static inline int drm_scdc_writeb(struct i2c_adapter *adapter, u8 offset, + u8 value) +{ + return drm_scdc_write(adapter, offset, &value, sizeof(value)); +} + +bool drm_scdc_get_scrambling_status(struct i2c_adapter *adapter); + +bool drm_scdc_set_scrambling(struct i2c_adapter *adapter, bool enable); +bool drm_scdc_set_high_tmds_clock_ratio(struct i2c_adapter *adapter, bool set); +#endif diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h new file mode 100644 index 000000000..451960438 --- /dev/null +++ b/include/drm/drm_simple_kms_helper.h @@ -0,0 +1,185 @@ +/* + * Copyright (C) 2016 Noralf Trønnes + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __LINUX_DRM_SIMPLE_KMS_HELPER_H +#define __LINUX_DRM_SIMPLE_KMS_HELPER_H + +#include +#include +#include + +struct drm_simple_display_pipe; + +/** + * struct drm_simple_display_pipe_funcs - helper operations for a simple + * display pipeline + */ +struct drm_simple_display_pipe_funcs { + /** + * @mode_valid: + * + * This callback is used to check if a specific mode is valid in the + * crtc used in this simple display pipe. This should be implemented + * if the display pipe has some sort of restriction in the modes + * it can display. For example, a given display pipe may be responsible + * to set a clock value. If the clock can not produce all the values + * for the available modes then this callback can be used to restrict + * the number of modes to only the ones that can be displayed. Another + * reason can be bandwidth mitigation: the memory port on the display + * controller can have bandwidth limitations not allowing pixel data + * to be fetched at any rate. + * + * This hook is used by the probe helpers to filter the mode list in + * drm_helper_probe_single_connector_modes(), and it is used by the + * atomic helpers to validate modes supplied by userspace in + * drm_atomic_helper_check_modeset(). + * + * This function is optional. + * + * NOTE: + * + * Since this function is both called from the check phase of an atomic + * commit, and the mode validation in the probe paths it is not allowed + * to look at anything else but the passed-in mode, and validate it + * against configuration-invariant hardware constraints. + * + * RETURNS: + * + * drm_mode_status Enum + */ + enum drm_mode_status (*mode_valid)(struct drm_crtc *crtc, + const struct drm_display_mode *mode); + + /** + * @enable: + * + * This function should be used to enable the pipeline. + * It is called when the underlying crtc is enabled. + * This hook is optional. + */ + void (*enable)(struct drm_simple_display_pipe *pipe, + struct drm_crtc_state *crtc_state, + struct drm_plane_state *plane_state); + /** + * @disable: + * + * This function should be used to disable the pipeline. + * It is called when the underlying crtc is disabled. + * This hook is optional. + */ + void (*disable)(struct drm_simple_display_pipe *pipe); + + /** + * @check: + * + * This function is called in the check phase of an atomic update, + * specifically when the underlying plane is checked. + * The simple display pipeline helpers already check that the plane is + * not scaled, fills the entire visible area and is always enabled + * when the crtc is also enabled. + * This hook is optional. + * + * RETURNS: + * + * 0 on success, -EINVAL if the state or the transition can't be + * supported, -ENOMEM on memory allocation failure and -EDEADLK if an + * attempt to obtain another state object ran into a &drm_modeset_lock + * deadlock. + */ + int (*check)(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state, + struct drm_crtc_state *crtc_state); + /** + * @update: + * + * This function is called when the underlying plane state is updated. + * This hook is optional. + * + * This is the function drivers should submit the + * &drm_pending_vblank_event from. Using either + * drm_crtc_arm_vblank_event(), when the driver supports vblank + * interrupt handling, or drm_crtc_send_vblank_event() directly in case + * the hardware lacks vblank support entirely. + */ + void (*update)(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *old_plane_state); + + /** + * @prepare_fb: + * + * Optional, called by &drm_plane_helper_funcs.prepare_fb. Please read + * the documentation for the &drm_plane_helper_funcs.prepare_fb hook for + * more details. + * + * Drivers which always have their buffers pinned should use + * drm_gem_fb_simple_display_pipe_prepare_fb() for this hook. + */ + int (*prepare_fb)(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state); + + /** + * @cleanup_fb: + * + * Optional, called by &drm_plane_helper_funcs.cleanup_fb. Please read + * the documentation for the &drm_plane_helper_funcs.cleanup_fb hook for + * more details. + */ + void (*cleanup_fb)(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state); + + /** + * @enable_vblank: + * + * Optional, called by &drm_crtc_funcs.enable_vblank. Please read + * the documentation for the &drm_crtc_funcs.enable_vblank hook for + * more details. + */ + int (*enable_vblank)(struct drm_simple_display_pipe *pipe); + + /** + * @disable_vblank: + * + * Optional, called by &drm_crtc_funcs.disable_vblank. Please read + * the documentation for the &drm_crtc_funcs.disable_vblank hook for + * more details. + */ + void (*disable_vblank)(struct drm_simple_display_pipe *pipe); +}; + +/** + * struct drm_simple_display_pipe - simple display pipeline + * @crtc: CRTC control structure + * @plane: Plane control structure + * @encoder: Encoder control structure + * @connector: Connector control structure + * @funcs: Pipeline control functions (optional) + * + * Simple display pipeline with plane, crtc and encoder collapsed into one + * entity. It should be initialized by calling drm_simple_display_pipe_init(). + */ +struct drm_simple_display_pipe { + struct drm_crtc crtc; + struct drm_plane plane; + struct drm_encoder encoder; + struct drm_connector *connector; + + const struct drm_simple_display_pipe_funcs *funcs; +}; + +int drm_simple_display_pipe_attach_bridge(struct drm_simple_display_pipe *pipe, + struct drm_bridge *bridge); + +int drm_simple_display_pipe_init(struct drm_device *dev, + struct drm_simple_display_pipe *pipe, + const struct drm_simple_display_pipe_funcs *funcs, + const uint32_t *formats, unsigned int format_count, + const uint64_t *format_modifiers, + struct drm_connector *connector); + +#endif /* __LINUX_DRM_SIMPLE_KMS_HELPER_H */ diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h new file mode 100644 index 000000000..398060247 --- /dev/null +++ b/include/drm/drm_syncobj.h @@ -0,0 +1,151 @@ +/* + * Copyright © 2017 Red Hat + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * + */ +#ifndef __DRM_SYNCOBJ_H__ +#define __DRM_SYNCOBJ_H__ + +#include "linux/dma-fence.h" + +struct drm_syncobj_cb; + +/** + * struct drm_syncobj - sync object. + * + * This structure defines a generic sync object which wraps a &dma_fence. + */ +struct drm_syncobj { + /** + * @refcount: Reference count of this object. + */ + struct kref refcount; + /** + * @fence: + * NULL or a pointer to the fence bound to this object. + * + * This field should not be used directly. Use drm_syncobj_fence_get() + * and drm_syncobj_replace_fence() instead. + */ + struct dma_fence __rcu *fence; + /** + * @cb_list: List of callbacks to call when the &fence gets replaced. + */ + struct list_head cb_list; + /** + * @lock: Protects &cb_list and write-locks &fence. + */ + spinlock_t lock; + /** + * @file: A file backing for this syncobj. + */ + struct file *file; +}; + +typedef void (*drm_syncobj_func_t)(struct drm_syncobj *syncobj, + struct drm_syncobj_cb *cb); + +/** + * struct drm_syncobj_cb - callback for drm_syncobj_add_callback + * @node: used by drm_syncob_add_callback to append this struct to + * &drm_syncobj.cb_list + * @func: drm_syncobj_func_t to call + * + * This struct will be initialized by drm_syncobj_add_callback, additional + * data can be passed along by embedding drm_syncobj_cb in another struct. + * The callback will get called the next time drm_syncobj_replace_fence is + * called. + */ +struct drm_syncobj_cb { + struct list_head node; + drm_syncobj_func_t func; +}; + +void drm_syncobj_free(struct kref *kref); + +/** + * drm_syncobj_get - acquire a syncobj reference + * @obj: sync object + * + * This acquires an additional reference to @obj. It is illegal to call this + * without already holding a reference. No locks required. + */ +static inline void +drm_syncobj_get(struct drm_syncobj *obj) +{ + kref_get(&obj->refcount); +} + +/** + * drm_syncobj_put - release a reference to a sync object. + * @obj: sync object. + */ +static inline void +drm_syncobj_put(struct drm_syncobj *obj) +{ + kref_put(&obj->refcount, drm_syncobj_free); +} + +/** + * drm_syncobj_fence_get - get a reference to a fence in a sync object + * @syncobj: sync object. + * + * This acquires additional reference to &drm_syncobj.fence contained in @obj, + * if not NULL. It is illegal to call this without already holding a reference. + * No locks required. + * + * Returns: + * Either the fence of @obj or NULL if there's none. + */ +static inline struct dma_fence * +drm_syncobj_fence_get(struct drm_syncobj *syncobj) +{ + struct dma_fence *fence; + + rcu_read_lock(); + fence = dma_fence_get_rcu_safe(&syncobj->fence); + rcu_read_unlock(); + + return fence; +} + +struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private, + u32 handle); +void drm_syncobj_add_callback(struct drm_syncobj *syncobj, + struct drm_syncobj_cb *cb, + drm_syncobj_func_t func); +void drm_syncobj_remove_callback(struct drm_syncobj *syncobj, + struct drm_syncobj_cb *cb); +void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, + struct dma_fence *fence); +int drm_syncobj_find_fence(struct drm_file *file_private, + u32 handle, + struct dma_fence **fence); +void drm_syncobj_free(struct kref *kref); +int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags, + struct dma_fence *fence); +int drm_syncobj_get_handle(struct drm_file *file_private, + struct drm_syncobj *syncobj, u32 *handle); +int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd); + +#endif diff --git a/include/drm/drm_sysfs.h b/include/drm/drm_sysfs.h new file mode 100644 index 000000000..4f311e836 --- /dev/null +++ b/include/drm/drm_sysfs.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DRM_SYSFS_H_ +#define _DRM_SYSFS_H_ + +struct drm_device; +struct device; + +int drm_class_device_register(struct device *dev); +void drm_class_device_unregister(struct device *dev); + +void drm_sysfs_hotplug_event(struct drm_device *dev); + +#endif diff --git a/include/drm/drm_utils.h b/include/drm/drm_utils.h new file mode 100644 index 000000000..a803988d8 --- /dev/null +++ b/include/drm/drm_utils.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Function prototypes for misc. drm utility functions. + * Specifically this file is for function prototypes for functions which + * may also be used outside of drm code (e.g. in fbdev drivers). + * + * Copyright (C) 2017 Hans de Goede + */ + +#ifndef __DRM_UTILS_H__ +#define __DRM_UTILS_H__ + +int drm_get_panel_orientation_quirk(int width, int height); + +#endif diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h new file mode 100644 index 000000000..e9c676381 --- /dev/null +++ b/include/drm/drm_vblank.h @@ -0,0 +1,231 @@ +/* + * Copyright 2016 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DRM_VBLANK_H_ +#define _DRM_VBLANK_H_ + +#include +#include +#include + +#include +#include +#include + +struct drm_device; +struct drm_crtc; + +/** + * struct drm_pending_vblank_event - pending vblank event tracking + */ +struct drm_pending_vblank_event { + /** + * @base: Base structure for tracking pending DRM events. + */ + struct drm_pending_event base; + /** + * @pipe: drm_crtc_index() of the &drm_crtc this event is for. + */ + unsigned int pipe; + /** + * @sequence: frame event should be triggered at + */ + u64 sequence; + /** + * @event: Actual event which will be sent to userspace. + */ + union { + /** + * @event.base: DRM event base class. + */ + struct drm_event base; + + /** + * @event.vbl: + * + * Event payload for vblank events, requested through + * either the MODE_PAGE_FLIP or MODE_ATOMIC IOCTL. Also + * generated by the legacy WAIT_VBLANK IOCTL, but new userspace + * should use MODE_QUEUE_SEQUENCE and &event.seq instead. + */ + struct drm_event_vblank vbl; + + /** + * @event.seq: Event payload for the MODE_QUEUEU_SEQUENCE IOCTL. + */ + struct drm_event_crtc_sequence seq; + } event; +}; + +/** + * struct drm_vblank_crtc - vblank tracking for a CRTC + * + * This structure tracks the vblank state for one CRTC. + * + * Note that for historical reasons - the vblank handling code is still shared + * with legacy/non-kms drivers - this is a free-standing structure not directly + * connected to &struct drm_crtc. But all public interface functions are taking + * a &struct drm_crtc to hide this implementation detail. + */ +struct drm_vblank_crtc { + /** + * @dev: Pointer to the &drm_device. + */ + struct drm_device *dev; + /** + * @queue: Wait queue for vblank waiters. + */ + wait_queue_head_t queue; /**< VBLANK wait queue */ + /** + * @disable_timer: Disable timer for the delayed vblank disabling + * hysteresis logic. Vblank disabling is controlled through the + * drm_vblank_offdelay module option and the setting of the + * &drm_device.max_vblank_count value. + */ + struct timer_list disable_timer; + + /** + * @seqlock: Protect vblank count and time. + */ + seqlock_t seqlock; /* protects vblank count and time */ + + /** + * @count: Current software vblank counter. + */ + u64 count; + /** + * @time: Vblank timestamp corresponding to @count. + */ + ktime_t time; + + /** + * @refcount: Number of users/waiters of the vblank interrupt. Only when + * this refcount reaches 0 can the hardware interrupt be disabled using + * @disable_timer. + */ + atomic_t refcount; /* number of users of vblank interruptsper crtc */ + /** + * @last: Protected by &drm_device.vbl_lock, used for wraparound handling. + */ + u32 last; + /** + * @max_vblank_count: + * + * Maximum value of the vblank registers for this crtc. This value +1 + * will result in a wrap-around of the vblank register. It is used + * by the vblank core to handle wrap-arounds. + * + * If set to zero the vblank core will try to guess the elapsed vblanks + * between times when the vblank interrupt is disabled through + * high-precision timestamps. That approach is suffering from small + * races and imprecision over longer time periods, hence exposing a + * hardware vblank counter is always recommended. + * + * This is the runtime configurable per-crtc maximum set through + * drm_crtc_set_max_vblank_count(). If this is used the driver + * must leave the device wide &drm_device.max_vblank_count at zero. + * + * If non-zero, &drm_crtc_funcs.get_vblank_counter must be set. + */ + u32 max_vblank_count; + /** + * @inmodeset: Tracks whether the vblank is disabled due to a modeset. + * For legacy driver bit 2 additionally tracks whether an additional + * temporary vblank reference has been acquired to paper over the + * hardware counter resetting/jumping. KMS drivers should instead just + * call drm_crtc_vblank_off() and drm_crtc_vblank_on(), which explicitly + * save and restore the vblank count. + */ + unsigned int inmodeset; /* Display driver is setting mode */ + /** + * @pipe: drm_crtc_index() of the &drm_crtc corresponding to this + * structure. + */ + unsigned int pipe; + /** + * @framedur_ns: Frame/Field duration in ns, used by + * drm_calc_vbltimestamp_from_scanoutpos() and computed by + * drm_calc_timestamping_constants(). + */ + int framedur_ns; + /** + * @linedur_ns: Line duration in ns, used by + * drm_calc_vbltimestamp_from_scanoutpos() and computed by + * drm_calc_timestamping_constants(). + */ + int linedur_ns; + + /** + * @hwmode: + * + * Cache of the current hardware display mode. Only valid when @enabled + * is set. This is used by helpers like + * drm_calc_vbltimestamp_from_scanoutpos(). We can't just access the + * hardware mode by e.g. looking at &drm_crtc_state.adjusted_mode, + * because that one is really hard to get from interrupt context. + */ + struct drm_display_mode hwmode; + + /** + * @enabled: Tracks the enabling state of the corresponding &drm_crtc to + * avoid double-disabling and hence corrupting saved state. Needed by + * drivers not using atomic KMS, since those might go through their CRTC + * disabling functions multiple times. + */ + bool enabled; +}; + +int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs); +u64 drm_crtc_vblank_count(struct drm_crtc *crtc); +u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, + ktime_t *vblanktime); +void drm_crtc_send_vblank_event(struct drm_crtc *crtc, + struct drm_pending_vblank_event *e); +void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, + struct drm_pending_vblank_event *e); +void drm_vblank_set_event(struct drm_pending_vblank_event *e, + u64 *seq, + ktime_t *now); +bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe); +bool drm_crtc_handle_vblank(struct drm_crtc *crtc); +int drm_crtc_vblank_get(struct drm_crtc *crtc); +void drm_crtc_vblank_put(struct drm_crtc *crtc); +void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe); +void drm_crtc_wait_one_vblank(struct drm_crtc *crtc); +void drm_crtc_vblank_off(struct drm_crtc *crtc); +void drm_crtc_vblank_reset(struct drm_crtc *crtc); +void drm_crtc_vblank_on(struct drm_crtc *crtc); +u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc); +void drm_vblank_restore(struct drm_device *dev, unsigned int pipe); +void drm_crtc_vblank_restore(struct drm_crtc *crtc); + +bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, + unsigned int pipe, int *max_error, + ktime_t *vblank_time, + bool in_vblank_irq); +void drm_calc_timestamping_constants(struct drm_crtc *crtc, + const struct drm_display_mode *mode); +wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc); +void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc, + u32 max_vblank_count); +#endif diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h new file mode 100644 index 000000000..c7987daea --- /dev/null +++ b/include/drm/drm_vma_manager.h @@ -0,0 +1,234 @@ +#ifndef __DRM_VMA_MANAGER_H__ +#define __DRM_VMA_MANAGER_H__ + +/* + * Copyright (c) 2013 David Herrmann + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include + +struct drm_file; + +struct drm_vma_offset_file { + struct rb_node vm_rb; + struct drm_file *vm_tag; + unsigned long vm_count; +}; + +struct drm_vma_offset_node { + rwlock_t vm_lock; + struct drm_mm_node vm_node; + struct rb_root vm_files; + bool readonly:1; +}; + +struct drm_vma_offset_manager { + rwlock_t vm_lock; + struct drm_mm vm_addr_space_mm; +}; + +void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr, + unsigned long page_offset, unsigned long size); +void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr); + +struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr, + unsigned long start, + unsigned long pages); +int drm_vma_offset_add(struct drm_vma_offset_manager *mgr, + struct drm_vma_offset_node *node, unsigned long pages); +void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr, + struct drm_vma_offset_node *node); + +int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag); +void drm_vma_node_revoke(struct drm_vma_offset_node *node, + struct drm_file *tag); +bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node, + struct drm_file *tag); + +/** + * drm_vma_offset_exact_lookup_locked() - Look up node by exact address + * @mgr: Manager object + * @start: Start address (page-based, not byte-based) + * @pages: Size of object (page-based) + * + * Same as drm_vma_offset_lookup_locked() but does not allow any offset into the node. + * It only returns the exact object with the given start address. + * + * RETURNS: + * Node at exact start address @start. + */ +static inline struct drm_vma_offset_node * +drm_vma_offset_exact_lookup_locked(struct drm_vma_offset_manager *mgr, + unsigned long start, + unsigned long pages) +{ + struct drm_vma_offset_node *node; + + node = drm_vma_offset_lookup_locked(mgr, start, pages); + return (node && node->vm_node.start == start) ? node : NULL; +} + +/** + * drm_vma_offset_lock_lookup() - Lock lookup for extended private use + * @mgr: Manager object + * + * Lock VMA manager for extended lookups. Only locked VMA function calls + * are allowed while holding this lock. All other contexts are blocked from VMA + * until the lock is released via drm_vma_offset_unlock_lookup(). + * + * Use this if you need to take a reference to the objects returned by + * drm_vma_offset_lookup_locked() before releasing this lock again. + * + * This lock must not be used for anything else than extended lookups. You must + * not call any other VMA helpers while holding this lock. + * + * Note: You're in atomic-context while holding this lock! + */ +static inline void drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr) +{ + read_lock(&mgr->vm_lock); +} + +/** + * drm_vma_offset_unlock_lookup() - Unlock lookup for extended private use + * @mgr: Manager object + * + * Release lookup-lock. See drm_vma_offset_lock_lookup() for more information. + */ +static inline void drm_vma_offset_unlock_lookup(struct drm_vma_offset_manager *mgr) +{ + read_unlock(&mgr->vm_lock); +} + +/** + * drm_vma_node_reset() - Initialize or reset node object + * @node: Node to initialize or reset + * + * Reset a node to its initial state. This must be called before using it with + * any VMA offset manager. + * + * This must not be called on an already allocated node, or you will leak + * memory. + */ +static inline void drm_vma_node_reset(struct drm_vma_offset_node *node) +{ + memset(node, 0, sizeof(*node)); + node->vm_files = RB_ROOT; + rwlock_init(&node->vm_lock); +} + +/** + * drm_vma_node_start() - Return start address for page-based addressing + * @node: Node to inspect + * + * Return the start address of the given node. This can be used as offset into + * the linear VM space that is provided by the VMA offset manager. Note that + * this can only be used for page-based addressing. If you need a proper offset + * for user-space mappings, you must apply "<< PAGE_SHIFT" or use the + * drm_vma_node_offset_addr() helper instead. + * + * RETURNS: + * Start address of @node for page-based addressing. 0 if the node does not + * have an offset allocated. + */ +static inline unsigned long drm_vma_node_start(const struct drm_vma_offset_node *node) +{ + return node->vm_node.start; +} + +/** + * drm_vma_node_size() - Return size (page-based) + * @node: Node to inspect + * + * Return the size as number of pages for the given node. This is the same size + * that was passed to drm_vma_offset_add(). If no offset is allocated for the + * node, this is 0. + * + * RETURNS: + * Size of @node as number of pages. 0 if the node does not have an offset + * allocated. + */ +static inline unsigned long drm_vma_node_size(struct drm_vma_offset_node *node) +{ + return node->vm_node.size; +} + +/** + * drm_vma_node_offset_addr() - Return sanitized offset for user-space mmaps + * @node: Linked offset node + * + * Same as drm_vma_node_start() but returns the address as a valid offset that + * can be used for user-space mappings during mmap(). + * This must not be called on unlinked nodes. + * + * RETURNS: + * Offset of @node for byte-based addressing. 0 if the node does not have an + * object allocated. + */ +static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node) +{ + return ((__u64)node->vm_node.start) << PAGE_SHIFT; +} + +/** + * drm_vma_node_unmap() - Unmap offset node + * @node: Offset node + * @file_mapping: Address space to unmap @node from + * + * Unmap all userspace mappings for a given offset node. The mappings must be + * associated with the @file_mapping address-space. If no offset exists + * nothing is done. + * + * This call is unlocked. The caller must guarantee that drm_vma_offset_remove() + * is not called on this node concurrently. + */ +static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node, + struct address_space *file_mapping) +{ + if (drm_mm_node_allocated(&node->vm_node)) + unmap_mapping_range(file_mapping, + drm_vma_node_offset_addr(node), + drm_vma_node_size(node) << PAGE_SHIFT, 1); +} + +/** + * drm_vma_node_verify_access() - Access verification helper for TTM + * @node: Offset node + * @tag: Tag of file to check + * + * This checks whether @tag is granted access to @node. It is the same as + * drm_vma_node_is_allowed() but suitable as drop-in helper for TTM + * verify_access() callbacks. + * + * RETURNS: + * 0 if access is granted, -EACCES otherwise. + */ +static inline int drm_vma_node_verify_access(struct drm_vma_offset_node *node, + struct drm_file *tag) +{ + return drm_vma_node_is_allowed(node, tag) ? 0 : -EACCES; +} + +#endif /* __DRM_VMA_MANAGER_H__ */ diff --git a/include/drm/drm_writeback.h b/include/drm/drm_writeback.h new file mode 100644 index 000000000..23df9d463 --- /dev/null +++ b/include/drm/drm_writeback.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. + * Author: Brian Starkey + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + */ + +#ifndef __DRM_WRITEBACK_H__ +#define __DRM_WRITEBACK_H__ +#include +#include +#include + +struct drm_writeback_connector { + struct drm_connector base; + + /** + * @encoder: Internal encoder used by the connector to fulfill + * the DRM framework requirements. The users of the + * @drm_writeback_connector control the behaviour of the @encoder + * by passing the @enc_funcs parameter to drm_writeback_connector_init() + * function. + */ + struct drm_encoder encoder; + + /** + * @pixel_formats_blob_ptr: + * + * DRM blob property data for the pixel formats list on writeback + * connectors + * See also drm_writeback_connector_init() + */ + struct drm_property_blob *pixel_formats_blob_ptr; + + /** @job_lock: Protects job_queue */ + spinlock_t job_lock; + + /** + * @job_queue: + * + * Holds a list of a connector's writeback jobs; the last item is the + * most recent. The first item may be either waiting for the hardware + * to begin writing, or currently being written. + * + * See also: drm_writeback_queue_job() and + * drm_writeback_signal_completion() + */ + struct list_head job_queue; + + /** + * @fence_context: + * + * timeline context used for fence operations. + */ + unsigned int fence_context; + /** + * @fence_lock: + * + * spinlock to protect the fences in the fence_context. + */ + spinlock_t fence_lock; + /** + * @fence_seqno: + * + * Seqno variable used as monotonic counter for the fences + * created on the connector's timeline. + */ + unsigned long fence_seqno; + /** + * @timeline_name: + * + * The name of the connector's fence timeline. + */ + char timeline_name[32]; +}; + +struct drm_writeback_job { + /** + * @cleanup_work: + * + * Used to allow drm_writeback_signal_completion to defer dropping the + * framebuffer reference to a workqueue + */ + struct work_struct cleanup_work; + + /** + * @list_entry: + * + * List item for the writeback connector's @job_queue + */ + struct list_head list_entry; + + /** + * @fb: + * + * Framebuffer to be written to by the writeback connector. Do not set + * directly, use drm_atomic_set_writeback_fb_for_connector() + */ + struct drm_framebuffer *fb; + + /** + * @out_fence: + * + * Fence which will signal once the writeback has completed + */ + struct dma_fence *out_fence; +}; + +static inline struct drm_writeback_connector * +drm_connector_to_writeback(struct drm_connector *connector) +{ + return container_of(connector, struct drm_writeback_connector, base); +} + +int drm_writeback_connector_init(struct drm_device *dev, + struct drm_writeback_connector *wb_connector, + const struct drm_connector_funcs *con_funcs, + const struct drm_encoder_helper_funcs *enc_helper_funcs, + const u32 *formats, int n_formats); + +void drm_writeback_queue_job(struct drm_writeback_connector *wb_connector, + struct drm_writeback_job *job); + +void drm_writeback_cleanup_job(struct drm_writeback_job *job); + +void +drm_writeback_signal_completion(struct drm_writeback_connector *wb_connector, + int status); + +struct dma_fence * +drm_writeback_get_out_fence(struct drm_writeback_connector *wb_connector); +#endif diff --git a/include/drm/gma_drm.h b/include/drm/gma_drm.h new file mode 100644 index 000000000..87ac5e6ca --- /dev/null +++ b/include/drm/gma_drm.h @@ -0,0 +1,25 @@ +/************************************************************************** + * Copyright (c) 2007-2011, Intel Corporation. + * All Rights Reserved. + * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + **************************************************************************/ + +#ifndef _GMA_DRM_H_ +#define _GMA_DRM_H_ + +#endif diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h new file mode 100644 index 000000000..21c648b0b --- /dev/null +++ b/include/drm/gpu_scheduler.h @@ -0,0 +1,311 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _DRM_GPU_SCHEDULER_H_ +#define _DRM_GPU_SCHEDULER_H_ + +#include +#include + +#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000) + +struct drm_gpu_scheduler; +struct drm_sched_rq; + +enum drm_sched_priority { + DRM_SCHED_PRIORITY_MIN, + DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN, + DRM_SCHED_PRIORITY_NORMAL, + DRM_SCHED_PRIORITY_HIGH_SW, + DRM_SCHED_PRIORITY_HIGH_HW, + DRM_SCHED_PRIORITY_KERNEL, + DRM_SCHED_PRIORITY_MAX, + DRM_SCHED_PRIORITY_INVALID = -1, + DRM_SCHED_PRIORITY_UNSET = -2 +}; + +/** + * struct drm_sched_entity - A wrapper around a job queue (typically + * attached to the DRM file_priv). + * + * @list: used to append this struct to the list of entities in the + * runqueue. + * @rq: runqueue to which this entity belongs. + * @rq_lock: lock to modify the runqueue to which this entity belongs. + * @job_queue: the list of jobs of this entity. + * @fence_seq: a linearly increasing seqno incremented with each + * new &drm_sched_fence which is part of the entity. + * @fence_context: a unique context for all the fences which belong + * to this entity. + * The &drm_sched_fence.scheduled uses the + * fence_context but &drm_sched_fence.finished uses + * fence_context + 1. + * @dependency: the dependency fence of the job which is on the top + * of the job queue. + * @cb: callback for the dependency fence above. + * @guilty: points to ctx's guilty. + * @fini_status: contains the exit status in case the process was signalled. + * @last_scheduled: points to the finished fence of the last scheduled job. + * @last_user: last group leader pushing a job into the entity. + * + * Entities will emit jobs in order to their corresponding hardware + * ring, and the scheduler will alternate between entities based on + * scheduling policy. + */ +struct drm_sched_entity { + struct list_head list; + struct drm_sched_rq *rq; + spinlock_t rq_lock; + + struct spsc_queue job_queue; + + atomic_t fence_seq; + uint64_t fence_context; + + struct dma_fence *dependency; + struct dma_fence_cb cb; + atomic_t *guilty; + struct dma_fence *last_scheduled; + struct task_struct *last_user; +}; + +/** + * struct drm_sched_rq - queue of entities to be scheduled. + * + * @lock: to modify the entities list. + * @sched: the scheduler to which this rq belongs to. + * @entities: list of the entities to be scheduled. + * @current_entity: the entity which is to be scheduled. + * + * Run queue is a set of entities scheduling command submissions for + * one specific ring. It implements the scheduling policy that selects + * the next entity to emit commands from. + */ +struct drm_sched_rq { + spinlock_t lock; + struct drm_gpu_scheduler *sched; + struct list_head entities; + struct drm_sched_entity *current_entity; +}; + +/** + * struct drm_sched_fence - fences corresponding to the scheduling of a job. + */ +struct drm_sched_fence { + /** + * @scheduled: this fence is what will be signaled by the scheduler + * when the job is scheduled. + */ + struct dma_fence scheduled; + + /** + * @finished: this fence is what will be signaled by the scheduler + * when the job is completed. + * + * When setting up an out fence for the job, you should use + * this, since it's available immediately upon + * drm_sched_job_init(), and the fence returned by the driver + * from run_job() won't be created until the dependencies have + * resolved. + */ + struct dma_fence finished; + + /** + * @cb: the callback for the parent fence below. + */ + struct dma_fence_cb cb; + /** + * @parent: the fence returned by &drm_sched_backend_ops.run_job + * when scheduling the job on hardware. We signal the + * &drm_sched_fence.finished fence once parent is signalled. + */ + struct dma_fence *parent; + /** + * @sched: the scheduler instance to which the job having this struct + * belongs to. + */ + struct drm_gpu_scheduler *sched; + /** + * @lock: the lock used by the scheduled and the finished fences. + */ + spinlock_t lock; + /** + * @owner: job owner for debugging + */ + void *owner; +}; + +struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); + +/** + * struct drm_sched_job - A job to be run by an entity. + * + * @queue_node: used to append this struct to the queue of jobs in an entity. + * @sched: the scheduler instance on which this job is scheduled. + * @s_fence: contains the fences for the scheduling of job. + * @finish_cb: the callback for the finished fence. + * @finish_work: schedules the function @drm_sched_job_finish once the job has + * finished to remove the job from the + * @drm_gpu_scheduler.ring_mirror_list. + * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list. + * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the timeout + * interval is over. + * @id: a unique id assigned to each job scheduled on the scheduler. + * @karma: increment on every hang caused by this job. If this exceeds the hang + * limit of the scheduler then the job is marked guilty and will not + * be scheduled further. + * @s_priority: the priority of the job. + * @entity: the entity to which this job belongs. + * + * A job is created by the driver using drm_sched_job_init(), and + * should call drm_sched_entity_push_job() once it wants the scheduler + * to schedule the job. + */ +struct drm_sched_job { + struct spsc_node queue_node; + struct drm_gpu_scheduler *sched; + struct drm_sched_fence *s_fence; + struct dma_fence_cb finish_cb; + struct work_struct finish_work; + struct list_head node; + struct delayed_work work_tdr; + uint64_t id; + atomic_t karma; + enum drm_sched_priority s_priority; + struct drm_sched_entity *entity; +}; + +static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, + int threshold) +{ + return (s_job && atomic_inc_return(&s_job->karma) > threshold); +} + +/** + * struct drm_sched_backend_ops + * + * Define the backend operations called by the scheduler, + * these functions should be implemented in driver side. + */ +struct drm_sched_backend_ops { + /** + * @dependency: Called when the scheduler is considering scheduling + * this job next, to get another struct dma_fence for this job to + * block on. Once it returns NULL, run_job() may be called. + */ + struct dma_fence *(*dependency)(struct drm_sched_job *sched_job, + struct drm_sched_entity *s_entity); + + /** + * @run_job: Called to execute the job once all of the dependencies + * have been resolved. This may be called multiple times, if + * timedout_job() has happened and drm_sched_job_recovery() + * decides to try it again. + */ + struct dma_fence *(*run_job)(struct drm_sched_job *sched_job); + + /** + * @timedout_job: Called when a job has taken too long to execute, + * to trigger GPU recovery. + */ + void (*timedout_job)(struct drm_sched_job *sched_job); + + /** + * @free_job: Called once the job's finished fence has been signaled + * and it's time to clean it up. + */ + void (*free_job)(struct drm_sched_job *sched_job); +}; + +/** + * struct drm_gpu_scheduler + * + * @ops: backend operations provided by the driver. + * @hw_submission_limit: the max size of the hardware queue. + * @timeout: the time after which a job is removed from the scheduler. + * @name: name of the ring for which this scheduler is being used. + * @sched_rq: priority wise array of run queues. + * @wake_up_worker: the wait queue on which the scheduler sleeps until a job + * is ready to be scheduled. + * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler + * waits on this wait queue until all the scheduled jobs are + * finished. + * @hw_rq_count: the number of jobs currently in the hardware queue. + * @job_id_count: used to assign unique id to the each job. + * @thread: the kthread on which the scheduler which run. + * @ring_mirror_list: the list of jobs which are currently in the job queue. + * @job_list_lock: lock to protect the ring_mirror_list. + * @hang_limit: once the hangs by a job crosses this limit then it is marked + * guilty and it will be considered for scheduling further. + * + * One scheduler is implemented for each hardware ring. + */ +struct drm_gpu_scheduler { + const struct drm_sched_backend_ops *ops; + uint32_t hw_submission_limit; + long timeout; + const char *name; + struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_MAX]; + wait_queue_head_t wake_up_worker; + wait_queue_head_t job_scheduled; + atomic_t hw_rq_count; + atomic64_t job_id_count; + struct task_struct *thread; + struct list_head ring_mirror_list; + spinlock_t job_list_lock; + int hang_limit; +}; + +int drm_sched_init(struct drm_gpu_scheduler *sched, + const struct drm_sched_backend_ops *ops, + uint32_t hw_submission, unsigned hang_limit, long timeout, + const char *name); +void drm_sched_fini(struct drm_gpu_scheduler *sched); + +int drm_sched_entity_init(struct drm_sched_entity *entity, + struct drm_sched_rq **rq_list, + unsigned int num_rq_list, + atomic_t *guilty); +long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout); +void drm_sched_entity_fini(struct drm_sched_entity *entity); +void drm_sched_entity_destroy(struct drm_sched_entity *entity); +void drm_sched_entity_push_job(struct drm_sched_job *sched_job, + struct drm_sched_entity *entity); +void drm_sched_entity_set_rq(struct drm_sched_entity *entity, + struct drm_sched_rq *rq); + +struct drm_sched_fence *drm_sched_fence_create( + struct drm_sched_entity *s_entity, void *owner); +void drm_sched_fence_scheduled(struct drm_sched_fence *fence); +void drm_sched_fence_finished(struct drm_sched_fence *fence); +int drm_sched_job_init(struct drm_sched_job *job, + struct drm_sched_entity *entity, + void *owner); +void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, + struct drm_sched_job *job); +void drm_sched_job_recovery(struct drm_gpu_scheduler *sched); +bool drm_sched_dependency_optimized(struct dma_fence* fence, + struct drm_sched_entity *entity); +void drm_sched_job_kickout(struct drm_sched_job *s_job); + +#endif diff --git a/include/drm/i2c/ch7006.h b/include/drm/i2c/ch7006.h new file mode 100644 index 000000000..8390b437a --- /dev/null +++ b/include/drm/i2c/ch7006.h @@ -0,0 +1,86 @@ +/* + * Copyright (C) 2009 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __DRM_I2C_CH7006_H__ +#define __DRM_I2C_CH7006_H__ + +/** + * struct ch7006_encoder_params + * + * Describes how the ch7006 is wired up with the GPU. It should be + * used as the @params parameter of its @set_config method. + * + * See "http://www.chrontel.com/pdf/7006.pdf" for their precise + * meaning. + */ +struct ch7006_encoder_params { + enum { + CH7006_FORMAT_RGB16 = 0, + CH7006_FORMAT_YCrCb24m16, + CH7006_FORMAT_RGB24m16, + CH7006_FORMAT_RGB15, + CH7006_FORMAT_RGB24m12C, + CH7006_FORMAT_RGB24m12I, + CH7006_FORMAT_RGB24m8, + CH7006_FORMAT_RGB16m8, + CH7006_FORMAT_RGB15m8, + CH7006_FORMAT_YCrCb24m8, + } input_format; + + enum { + CH7006_CLOCK_SLAVE = 0, + CH7006_CLOCK_MASTER, + } clock_mode; + + enum { + CH7006_CLOCK_EDGE_NEG = 0, + CH7006_CLOCK_EDGE_POS, + } clock_edge; + + int xcm, pcm; + + enum { + CH7006_SYNC_SLAVE = 0, + CH7006_SYNC_MASTER, + } sync_direction; + + enum { + CH7006_SYNC_SEPARATED = 0, + CH7006_SYNC_EMBEDDED, + } sync_encoding; + + enum { + CH7006_POUT_1_8V = 0, + CH7006_POUT_3_3V, + } pout_level; + + enum { + CH7006_ACTIVE_HSYNC = 0, + CH7006_ACTIVE_DSTART, + } active_detect; +}; + +#endif diff --git a/include/drm/i2c/sil164.h b/include/drm/i2c/sil164.h new file mode 100644 index 000000000..205e27384 --- /dev/null +++ b/include/drm/i2c/sil164.h @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2010 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __DRM_I2C_SIL164_H__ +#define __DRM_I2C_SIL164_H__ + +/** + * struct sil164_encoder_params + * + * Describes how the sil164 is connected to the GPU. It should be used + * as the @params parameter of its @set_config method. + * + * See "http://www.siliconimage.com/docs/SiI-DS-0021-E-164.pdf". + */ +struct sil164_encoder_params { + enum { + SIL164_INPUT_EDGE_FALLING = 0, + SIL164_INPUT_EDGE_RISING + } input_edge; + + enum { + SIL164_INPUT_WIDTH_12BIT = 0, + SIL164_INPUT_WIDTH_24BIT + } input_width; + + enum { + SIL164_INPUT_SINGLE_EDGE = 0, + SIL164_INPUT_DUAL_EDGE + } input_dual; + + enum { + SIL164_PLL_FILTER_ON = 0, + SIL164_PLL_FILTER_OFF, + } pll_filter; + + int input_skew; /** < Allowed range [-4, 3], use 0 for no de-skew. */ + int duallink_skew; /** < Allowed range [-4, 3]. */ +}; + +#endif diff --git a/include/drm/i2c/tda998x.h b/include/drm/i2c/tda998x.h new file mode 100644 index 000000000..3cb25ccbe --- /dev/null +++ b/include/drm/i2c/tda998x.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DRM_I2C_TDA998X_H__ +#define __DRM_I2C_TDA998X_H__ + +#include +#include + +enum { + AFMT_UNUSED = 0, + AFMT_SPDIF = TDA998x_SPDIF, + AFMT_I2S = TDA998x_I2S, +}; + +struct tda998x_audio_params { + u8 config; + u8 format; + unsigned sample_width; + unsigned sample_rate; + struct hdmi_audio_infoframe cea; + u8 status[5]; +}; + +struct tda998x_encoder_params { + u8 swap_b:3; + u8 mirr_b:1; + u8 swap_a:3; + u8 mirr_a:1; + u8 swap_d:3; + u8 mirr_d:1; + u8 swap_c:3; + u8 mirr_c:1; + u8 swap_f:3; + u8 mirr_f:1; + u8 swap_e:3; + u8 mirr_e:1; + + struct tda998x_audio_params audio_params; +}; + +#endif diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h new file mode 100644 index 000000000..fca22d463 --- /dev/null +++ b/include/drm/i915_component.h @@ -0,0 +1,49 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef _I915_COMPONENT_H_ +#define _I915_COMPONENT_H_ + +#include "drm_audio_component.h" + +/* MAX_PORT is the number of port + * It must be sync with I915_MAX_PORTS defined i915_drv.h + */ +#define MAX_PORTS 6 + +/** + * struct i915_audio_component - Used for direct communication between i915 and hda drivers + */ +struct i915_audio_component { + /** + * @base: the drm_audio_component base class + */ + struct drm_audio_component base; + + /** + * @aud_sample_rate: the array of audio sample rate per port + */ + int aud_sample_rate[MAX_PORTS]; +}; + +#endif /* _I915_COMPONENT_H_ */ diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h new file mode 100644 index 000000000..c44703f47 --- /dev/null +++ b/include/drm/i915_drm.h @@ -0,0 +1,103 @@ +/* + * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _I915_DRM_H_ +#define _I915_DRM_H_ + +#include +#include + +/* For use by IPS driver */ +extern unsigned long i915_read_mch_val(void); +extern bool i915_gpu_raise(void); +extern bool i915_gpu_lower(void); +extern bool i915_gpu_busy(void); +extern bool i915_gpu_turbo_disable(void); + +/* Exported from arch/x86/kernel/early-quirks.c */ +extern struct resource intel_graphics_stolen_res; + +/* + * The Bridge device's PCI config space has information about the + * fb aperture size and the amount of pre-reserved memory. + * This is all handled in the intel-gtt.ko module. i915.ko only + * cares about the vga bit for the vga rbiter. + */ +#define INTEL_GMCH_CTRL 0x52 +#define INTEL_GMCH_VGA_DISABLE (1 << 1) +#define SNB_GMCH_CTRL 0x50 +#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */ +#define SNB_GMCH_GGMS_MASK 0x3 +#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */ +#define SNB_GMCH_GMS_MASK 0x1f +#define BDW_GMCH_GGMS_SHIFT 6 +#define BDW_GMCH_GGMS_MASK 0x3 +#define BDW_GMCH_GMS_SHIFT 8 +#define BDW_GMCH_GMS_MASK 0xff + +#define I830_GMCH_CTRL 0x52 + +#define I830_GMCH_GMS_MASK 0x70 +#define I830_GMCH_GMS_LOCAL 0x10 +#define I830_GMCH_GMS_STOLEN_512 0x20 +#define I830_GMCH_GMS_STOLEN_1024 0x30 +#define I830_GMCH_GMS_STOLEN_8192 0x40 + +#define I855_GMCH_GMS_MASK 0xF0 +#define I855_GMCH_GMS_STOLEN_0M 0x0 +#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4) +#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4) +#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4) +#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4) +#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4) +#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4) +#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4) +#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4) +#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4) +#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4) +#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4) +#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) +#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) + +#define I830_DRB3 0x63 +#define I85X_DRB3 0x43 +#define I865_TOUD 0xc4 + +#define I830_ESMRAMC 0x91 +#define I845_ESMRAMC 0x9e +#define I85X_ESMRAMC 0x61 +#define TSEG_ENABLE (1 << 0) +#define I830_TSEG_SIZE_512K (0 << 1) +#define I830_TSEG_SIZE_1M (1 << 1) +#define I845_TSEG_SIZE_MASK (3 << 1) +#define I845_TSEG_SIZE_512K (2 << 1) +#define I845_TSEG_SIZE_1M (3 << 1) + +#define INTEL_BSM 0x5c +#define INTEL_GEN11_BSM_DW0 0xc0 +#define INTEL_GEN11_BSM_DW1 0xc4 +#define INTEL_BSM_MASK (-(1u << 20)) + +#endif /* _I915_DRM_H_ */ diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h new file mode 100644 index 000000000..fd965ffbb --- /dev/null +++ b/include/drm/i915_pciids.h @@ -0,0 +1,461 @@ +/* + * Copyright 2013 Intel Corporation + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _I915_PCIIDS_H +#define _I915_PCIIDS_H + +/* + * A pci_device_id struct { + * __u32 vendor, device; + * __u32 subvendor, subdevice; + * __u32 class, class_mask; + * kernel_ulong_t driver_data; + * }; + * Don't use C99 here because "class" is reserved and we want to + * give userspace flexibility. + */ +#define INTEL_VGA_DEVICE(id, info) { \ + 0x8086, id, \ + ~0, ~0, \ + 0x030000, 0xff0000, \ + (unsigned long) info } + +#define INTEL_QUANTA_VGA_DEVICE(info) { \ + 0x8086, 0x16a, \ + 0x152d, 0x8990, \ + 0x030000, 0xff0000, \ + (unsigned long) info } + +#define INTEL_I810_IDS(info) \ + INTEL_VGA_DEVICE(0x7121, info), /* I810 */ \ + INTEL_VGA_DEVICE(0x7123, info), /* I810_DC100 */ \ + INTEL_VGA_DEVICE(0x7125, info) /* I810_E */ + +#define INTEL_I815_IDS(info) \ + INTEL_VGA_DEVICE(0x1132, info) /* I815*/ + +#define INTEL_I830_IDS(info) \ + INTEL_VGA_DEVICE(0x3577, info) + +#define INTEL_I845G_IDS(info) \ + INTEL_VGA_DEVICE(0x2562, info) + +#define INTEL_I85X_IDS(info) \ + INTEL_VGA_DEVICE(0x3582, info), /* I855_GM */ \ + INTEL_VGA_DEVICE(0x358e, info) + +#define INTEL_I865G_IDS(info) \ + INTEL_VGA_DEVICE(0x2572, info) /* I865_G */ + +#define INTEL_I915G_IDS(info) \ + INTEL_VGA_DEVICE(0x2582, info), /* I915_G */ \ + INTEL_VGA_DEVICE(0x258a, info) /* E7221_G */ + +#define INTEL_I915GM_IDS(info) \ + INTEL_VGA_DEVICE(0x2592, info) /* I915_GM */ + +#define INTEL_I945G_IDS(info) \ + INTEL_VGA_DEVICE(0x2772, info) /* I945_G */ + +#define INTEL_I945GM_IDS(info) \ + INTEL_VGA_DEVICE(0x27a2, info), /* I945_GM */ \ + INTEL_VGA_DEVICE(0x27ae, info) /* I945_GME */ + +#define INTEL_I965G_IDS(info) \ + INTEL_VGA_DEVICE(0x2972, info), /* I946_GZ */ \ + INTEL_VGA_DEVICE(0x2982, info), /* G35_G */ \ + INTEL_VGA_DEVICE(0x2992, info), /* I965_Q */ \ + INTEL_VGA_DEVICE(0x29a2, info) /* I965_G */ + +#define INTEL_G33_IDS(info) \ + INTEL_VGA_DEVICE(0x29b2, info), /* Q35_G */ \ + INTEL_VGA_DEVICE(0x29c2, info), /* G33_G */ \ + INTEL_VGA_DEVICE(0x29d2, info) /* Q33_G */ + +#define INTEL_I965GM_IDS(info) \ + INTEL_VGA_DEVICE(0x2a02, info), /* I965_GM */ \ + INTEL_VGA_DEVICE(0x2a12, info) /* I965_GME */ + +#define INTEL_GM45_IDS(info) \ + INTEL_VGA_DEVICE(0x2a42, info) /* GM45_G */ + +#define INTEL_G45_IDS(info) \ + INTEL_VGA_DEVICE(0x2e02, info), /* IGD_E_G */ \ + INTEL_VGA_DEVICE(0x2e12, info), /* Q45_G */ \ + INTEL_VGA_DEVICE(0x2e22, info), /* G45_G */ \ + INTEL_VGA_DEVICE(0x2e32, info), /* G41_G */ \ + INTEL_VGA_DEVICE(0x2e42, info), /* B43_G */ \ + INTEL_VGA_DEVICE(0x2e92, info) /* B43_G.1 */ + +#define INTEL_PINEVIEW_IDS(info) \ + INTEL_VGA_DEVICE(0xa001, info), \ + INTEL_VGA_DEVICE(0xa011, info) + +#define INTEL_IRONLAKE_D_IDS(info) \ + INTEL_VGA_DEVICE(0x0042, info) + +#define INTEL_IRONLAKE_M_IDS(info) \ + INTEL_VGA_DEVICE(0x0046, info) + +#define INTEL_SNB_D_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x0102, info), \ + INTEL_VGA_DEVICE(0x010A, info) + +#define INTEL_SNB_D_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x0112, info), \ + INTEL_VGA_DEVICE(0x0122, info) + +#define INTEL_SNB_D_IDS(info) \ + INTEL_SNB_D_GT1_IDS(info), \ + INTEL_SNB_D_GT2_IDS(info) + +#define INTEL_SNB_M_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x0106, info) + +#define INTEL_SNB_M_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x0116, info), \ + INTEL_VGA_DEVICE(0x0126, info) + +#define INTEL_SNB_M_IDS(info) \ + INTEL_SNB_M_GT1_IDS(info), \ + INTEL_SNB_M_GT2_IDS(info) + +#define INTEL_IVB_M_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x0156, info) /* GT1 mobile */ + +#define INTEL_IVB_M_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x0166, info) /* GT2 mobile */ + +#define INTEL_IVB_M_IDS(info) \ + INTEL_IVB_M_GT1_IDS(info), \ + INTEL_IVB_M_GT2_IDS(info) + +#define INTEL_IVB_D_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x0152, info), /* GT1 desktop */ \ + INTEL_VGA_DEVICE(0x015a, info) /* GT1 server */ + +#define INTEL_IVB_D_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x0162, info), /* GT2 desktop */ \ + INTEL_VGA_DEVICE(0x016a, info) /* GT2 server */ + +#define INTEL_IVB_D_IDS(info) \ + INTEL_IVB_D_GT1_IDS(info), \ + INTEL_IVB_D_GT2_IDS(info) + +#define INTEL_IVB_Q_IDS(info) \ + INTEL_QUANTA_VGA_DEVICE(info) /* Quanta transcode */ + +#define INTEL_HSW_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \ + INTEL_VGA_DEVICE(0x040a, info), /* GT1 server */ \ + INTEL_VGA_DEVICE(0x040B, info), /* GT1 reserved */ \ + INTEL_VGA_DEVICE(0x040E, info), /* GT1 reserved */ \ + INTEL_VGA_DEVICE(0x0C02, info), /* SDV GT1 desktop */ \ + INTEL_VGA_DEVICE(0x0C0A, info), /* SDV GT1 server */ \ + INTEL_VGA_DEVICE(0x0C0B, info), /* SDV GT1 reserved */ \ + INTEL_VGA_DEVICE(0x0C0E, info), /* SDV GT1 reserved */ \ + INTEL_VGA_DEVICE(0x0A02, info), /* ULT GT1 desktop */ \ + INTEL_VGA_DEVICE(0x0A0A, info), /* ULT GT1 server */ \ + INTEL_VGA_DEVICE(0x0A0B, info), /* ULT GT1 reserved */ \ + INTEL_VGA_DEVICE(0x0D02, info), /* CRW GT1 desktop */ \ + INTEL_VGA_DEVICE(0x0D0A, info), /* CRW GT1 server */ \ + INTEL_VGA_DEVICE(0x0D0B, info), /* CRW GT1 reserved */ \ + INTEL_VGA_DEVICE(0x0D0E, info), /* CRW GT1 reserved */ \ + INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \ + INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \ + INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \ + INTEL_VGA_DEVICE(0x0A0E, info), /* ULX GT1 mobile */ \ + INTEL_VGA_DEVICE(0x0D06, info) /* CRW GT1 mobile */ + +#define INTEL_HSW_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \ + INTEL_VGA_DEVICE(0x041a, info), /* GT2 server */ \ + INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \ + INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \ + INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \ + INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \ + INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \ + INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \ + INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \ + INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \ + INTEL_VGA_DEVICE(0x0A1B, info), /* ULT GT2 reserved */ \ + INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \ + INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \ + INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \ + INTEL_VGA_DEVICE(0x0D1E, info), /* CRW GT2 reserved */ \ + INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \ + INTEL_VGA_DEVICE(0x0426, info), /* GT2 mobile */ \ + INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \ + INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \ + INTEL_VGA_DEVICE(0x0A1E, info), /* ULX GT2 mobile */ \ + INTEL_VGA_DEVICE(0x0D16, info) /* CRW GT2 mobile */ + +#define INTEL_HSW_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \ + INTEL_VGA_DEVICE(0x042a, info), /* GT3 server */ \ + INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \ + INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \ + INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \ + INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \ + INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \ + INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \ + INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \ + INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0D2E, info), /* CRW GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \ + INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \ + INTEL_VGA_DEVICE(0x0A2E, info), /* ULT GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0D26, info) /* CRW GT3 mobile */ + +#define INTEL_HSW_IDS(info) \ + INTEL_HSW_GT1_IDS(info), \ + INTEL_HSW_GT2_IDS(info), \ + INTEL_HSW_GT3_IDS(info) + +#define INTEL_VLV_IDS(info) \ + INTEL_VGA_DEVICE(0x0f30, info), \ + INTEL_VGA_DEVICE(0x0f31, info), \ + INTEL_VGA_DEVICE(0x0f32, info), \ + INTEL_VGA_DEVICE(0x0f33, info), \ + INTEL_VGA_DEVICE(0x0157, info), \ + INTEL_VGA_DEVICE(0x0155, info) + +#define INTEL_BDW_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x1602, info), /* GT1 ULT */ \ + INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \ + INTEL_VGA_DEVICE(0x160B, info), /* GT1 Iris */ \ + INTEL_VGA_DEVICE(0x160E, info), /* GT1 ULX */ \ + INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \ + INTEL_VGA_DEVICE(0x160D, info) /* GT1 Workstation */ + +#define INTEL_BDW_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \ + INTEL_VGA_DEVICE(0x1616, info), /* GT2 ULT */ \ + INTEL_VGA_DEVICE(0x161B, info), /* GT2 ULT */ \ + INTEL_VGA_DEVICE(0x161E, info), /* GT2 ULX */ \ + INTEL_VGA_DEVICE(0x161A, info), /* GT2 Server */ \ + INTEL_VGA_DEVICE(0x161D, info) /* GT2 Workstation */ + +#define INTEL_BDW_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x1622, info), /* ULT */ \ + INTEL_VGA_DEVICE(0x1626, info), /* ULT */ \ + INTEL_VGA_DEVICE(0x162B, info), /* Iris */ \ + INTEL_VGA_DEVICE(0x162E, info), /* ULX */\ + INTEL_VGA_DEVICE(0x162A, info), /* Server */ \ + INTEL_VGA_DEVICE(0x162D, info) /* Workstation */ + +#define INTEL_BDW_RSVD_IDS(info) \ + INTEL_VGA_DEVICE(0x1632, info), /* ULT */ \ + INTEL_VGA_DEVICE(0x1636, info), /* ULT */ \ + INTEL_VGA_DEVICE(0x163B, info), /* Iris */ \ + INTEL_VGA_DEVICE(0x163E, info), /* ULX */ \ + INTEL_VGA_DEVICE(0x163A, info), /* Server */ \ + INTEL_VGA_DEVICE(0x163D, info) /* Workstation */ + +#define INTEL_BDW_IDS(info) \ + INTEL_BDW_GT1_IDS(info), \ + INTEL_BDW_GT2_IDS(info), \ + INTEL_BDW_GT3_IDS(info), \ + INTEL_BDW_RSVD_IDS(info) + +#define INTEL_CHV_IDS(info) \ + INTEL_VGA_DEVICE(0x22b0, info), \ + INTEL_VGA_DEVICE(0x22b1, info), \ + INTEL_VGA_DEVICE(0x22b2, info), \ + INTEL_VGA_DEVICE(0x22b3, info) + +#define INTEL_SKL_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \ + INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \ + INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \ + INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \ + INTEL_VGA_DEVICE(0x190A, info) /* SRV GT1 */ + +#define INTEL_SKL_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \ + INTEL_VGA_DEVICE(0x1921, info), /* ULT GT2F */ \ + INTEL_VGA_DEVICE(0x191E, info), /* ULX GT2 */ \ + INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \ + INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \ + INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \ + INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */ + +#define INTEL_SKL_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x1927, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \ + INTEL_VGA_DEVICE(0x192D, info) /* SRV GT3 */ + +#define INTEL_SKL_GT4_IDS(info) \ + INTEL_VGA_DEVICE(0x1932, info), /* DT GT4 */ \ + INTEL_VGA_DEVICE(0x193B, info), /* Halo GT4 */ \ + INTEL_VGA_DEVICE(0x193D, info), /* WKS GT4 */ \ + INTEL_VGA_DEVICE(0x192A, info), /* SRV GT4 */ \ + INTEL_VGA_DEVICE(0x193A, info) /* SRV GT4e */ + +#define INTEL_SKL_IDS(info) \ + INTEL_SKL_GT1_IDS(info), \ + INTEL_SKL_GT2_IDS(info), \ + INTEL_SKL_GT3_IDS(info), \ + INTEL_SKL_GT4_IDS(info) + +#define INTEL_BXT_IDS(info) \ + INTEL_VGA_DEVICE(0x0A84, info), \ + INTEL_VGA_DEVICE(0x1A84, info), \ + INTEL_VGA_DEVICE(0x1A85, info), \ + INTEL_VGA_DEVICE(0x5A84, info), /* APL HD Graphics 505 */ \ + INTEL_VGA_DEVICE(0x5A85, info) /* APL HD Graphics 500 */ + +#define INTEL_GLK_IDS(info) \ + INTEL_VGA_DEVICE(0x3184, info), \ + INTEL_VGA_DEVICE(0x3185, info) + +#define INTEL_KBL_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \ + INTEL_VGA_DEVICE(0x5915, info), /* ULX GT1.5 */ \ + INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \ + INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \ + INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \ + INTEL_VGA_DEVICE(0x5908, info), /* Halo GT1 */ \ + INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \ + INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */ + +#define INTEL_KBL_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \ + INTEL_VGA_DEVICE(0x5917, info), /* Mobile GT2 */ \ + INTEL_VGA_DEVICE(0x5921, info), /* ULT GT2F */ \ + INTEL_VGA_DEVICE(0x591E, info), /* ULX GT2 */ \ + INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \ + INTEL_VGA_DEVICE(0x591B, info), /* Halo GT2 */ \ + INTEL_VGA_DEVICE(0x591A, info), /* SRV GT2 */ \ + INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */ + +#define INTEL_KBL_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x5923, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x5926, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x5927, info) /* ULT GT3 */ + +#define INTEL_KBL_GT4_IDS(info) \ + INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */ + +/* AML/KBL Y GT2 */ +#define INTEL_AML_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x591C, info), /* ULX GT2 */ \ + INTEL_VGA_DEVICE(0x87C0, info) /* ULX GT2 */ + +#define INTEL_KBL_IDS(info) \ + INTEL_KBL_GT1_IDS(info), \ + INTEL_KBL_GT2_IDS(info), \ + INTEL_KBL_GT3_IDS(info), \ + INTEL_KBL_GT4_IDS(info), \ + INTEL_AML_GT2_IDS(info) + +/* CFL S */ +#define INTEL_CFL_S_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x3E90, info), /* SRV GT1 */ \ + INTEL_VGA_DEVICE(0x3E93, info), /* SRV GT1 */ \ + INTEL_VGA_DEVICE(0x3E99, info) /* SRV GT1 */ + +#define INTEL_CFL_S_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \ + INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \ + INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \ + INTEL_VGA_DEVICE(0x3E98, info), /* SRV GT2 */ \ + INTEL_VGA_DEVICE(0x3E9A, info) /* SRV GT2 */ + +/* CFL H */ +#define INTEL_CFL_H_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x3E9B, info), /* Halo GT2 */ \ + INTEL_VGA_DEVICE(0x3E94, info) /* Halo GT2 */ + +/* CFL U GT2 */ +#define INTEL_CFL_U_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x3EA9, info) + +/* CFL U GT3 */ +#define INTEL_CFL_U_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x3EA5, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x3EA6, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x3EA7, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x3EA8, info) /* ULT GT3 */ + +/* WHL/CFL U GT1 */ +#define INTEL_WHL_U_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x3EA1, info) + +/* WHL/CFL U GT2 */ +#define INTEL_WHL_U_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x3EA0, info) + +/* WHL/CFL U GT3 */ +#define INTEL_WHL_U_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x3EA2, info), \ + INTEL_VGA_DEVICE(0x3EA3, info), \ + INTEL_VGA_DEVICE(0x3EA4, info) + +#define INTEL_CFL_IDS(info) \ + INTEL_CFL_S_GT1_IDS(info), \ + INTEL_CFL_S_GT2_IDS(info), \ + INTEL_CFL_H_GT2_IDS(info), \ + INTEL_CFL_U_GT2_IDS(info), \ + INTEL_CFL_U_GT3_IDS(info), \ + INTEL_WHL_U_GT1_IDS(info), \ + INTEL_WHL_U_GT2_IDS(info), \ + INTEL_WHL_U_GT3_IDS(info) + +/* CNL */ +#define INTEL_CNL_IDS(info) \ + INTEL_VGA_DEVICE(0x5A51, info), \ + INTEL_VGA_DEVICE(0x5A59, info), \ + INTEL_VGA_DEVICE(0x5A41, info), \ + INTEL_VGA_DEVICE(0x5A49, info), \ + INTEL_VGA_DEVICE(0x5A52, info), \ + INTEL_VGA_DEVICE(0x5A5A, info), \ + INTEL_VGA_DEVICE(0x5A42, info), \ + INTEL_VGA_DEVICE(0x5A4A, info), \ + INTEL_VGA_DEVICE(0x5A50, info), \ + INTEL_VGA_DEVICE(0x5A40, info), \ + INTEL_VGA_DEVICE(0x5A54, info), \ + INTEL_VGA_DEVICE(0x5A5C, info), \ + INTEL_VGA_DEVICE(0x5A44, info), \ + INTEL_VGA_DEVICE(0x5A4C, info) + +/* ICL */ +#define INTEL_ICL_11_IDS(info) \ + INTEL_VGA_DEVICE(0x8A50, info), \ + INTEL_VGA_DEVICE(0x8A51, info), \ + INTEL_VGA_DEVICE(0x8A5C, info), \ + INTEL_VGA_DEVICE(0x8A5D, info), \ + INTEL_VGA_DEVICE(0x8A52, info), \ + INTEL_VGA_DEVICE(0x8A5A, info), \ + INTEL_VGA_DEVICE(0x8A5B, info), \ + INTEL_VGA_DEVICE(0x8A71, info), \ + INTEL_VGA_DEVICE(0x8A70, info) + +#endif /* _I915_PCIIDS_H */ diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h new file mode 100644 index 000000000..2324c84a2 --- /dev/null +++ b/include/drm/intel-gtt.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Common header for intel-gtt.ko and i915.ko */ + +#ifndef _DRM_INTEL_GTT_H +#define _DRM_INTEL_GTT_H + +void intel_gtt_get(u64 *gtt_total, + phys_addr_t *mappable_base, + resource_size_t *mappable_end); + +int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, + struct agp_bridge_data *bridge); +void intel_gmch_remove(void); + +bool intel_enable_gtt(void); + +void intel_gtt_chipset_flush(void); +void intel_gtt_insert_page(dma_addr_t addr, + unsigned int pg, + unsigned int flags); +void intel_gtt_insert_sg_entries(struct sg_table *st, + unsigned int pg_start, + unsigned int flags); +void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries); + +/* Special gtt memory types */ +#define AGP_DCACHE_MEMORY 1 +#define AGP_PHYS_MEMORY 2 + +/* flag for GFDT type */ +#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3) + +#ifdef CONFIG_INTEL_IOMMU +extern int intel_iommu_gfx_mapped; +#endif + +#endif diff --git a/include/drm/intel_lpe_audio.h b/include/drm/intel_lpe_audio.h new file mode 100644 index 000000000..b6121c8fe --- /dev/null +++ b/include/drm/intel_lpe_audio.h @@ -0,0 +1,51 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef _INTEL_LPE_AUDIO_H_ +#define _INTEL_LPE_AUDIO_H_ + +#include +#include + +struct platform_device; + +#define HDMI_MAX_ELD_BYTES 128 + +struct intel_hdmi_lpe_audio_port_pdata { + u8 eld[HDMI_MAX_ELD_BYTES]; + int port; + int pipe; + int ls_clock; + bool dp_output; +}; + +struct intel_hdmi_lpe_audio_pdata { + struct intel_hdmi_lpe_audio_port_pdata port[3]; /* for ports B,C,D */ + int num_ports; + int num_pipes; + + void (*notify_audio_lpe)(struct platform_device *pdev, int port); /* port: 0==B,1==C,2==D */ + spinlock_t lpe_audio_slock; +}; + +#endif /* _I915_LPE_AUDIO_H_ */ diff --git a/include/drm/spsc_queue.h b/include/drm/spsc_queue.h new file mode 100644 index 000000000..125f096c8 --- /dev/null +++ b/include/drm/spsc_queue.h @@ -0,0 +1,122 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef DRM_SCHEDULER_SPSC_QUEUE_H_ +#define DRM_SCHEDULER_SPSC_QUEUE_H_ + +#include +#include + +/** SPSC lockless queue */ + +struct spsc_node { + + /* Stores spsc_node* */ + struct spsc_node *next; +}; + +struct spsc_queue { + + struct spsc_node *head; + + /* atomic pointer to struct spsc_node* */ + atomic_long_t tail; + + atomic_t job_count; +}; + +static inline void spsc_queue_init(struct spsc_queue *queue) +{ + queue->head = NULL; + atomic_long_set(&queue->tail, (long)&queue->head); + atomic_set(&queue->job_count, 0); +} + +static inline struct spsc_node *spsc_queue_peek(struct spsc_queue *queue) +{ + return queue->head; +} + +static inline int spsc_queue_count(struct spsc_queue *queue) +{ + return atomic_read(&queue->job_count); +} + +static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *node) +{ + struct spsc_node **tail; + + node->next = NULL; + + preempt_disable(); + + tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next); + WRITE_ONCE(*tail, node); + atomic_inc(&queue->job_count); + + /* + * In case of first element verify new node will be visible to the consumer + * thread when we ping the kernel thread that there is new work to do. + */ + smp_wmb(); + + preempt_enable(); + + return tail == &queue->head; +} + + +static inline struct spsc_node *spsc_queue_pop(struct spsc_queue *queue) +{ + struct spsc_node *next, *node; + + /* Verify reading from memory and not the cache */ + smp_rmb(); + + node = READ_ONCE(queue->head); + + if (!node) + return NULL; + + next = READ_ONCE(node->next); + WRITE_ONCE(queue->head, next); + + if (unlikely(!next)) { + /* slowpath for the last element in the queue */ + + if (atomic_long_cmpxchg(&queue->tail, + (long)&node->next, (long) &queue->head) != (long)&node->next) { + /* Updating tail failed wait for new next to appear */ + do { + smp_rmb(); + } while (unlikely(!(queue->head = READ_ONCE(node->next)))); + } + } + + atomic_dec(&queue->job_count); + return node; +} + + + +#endif /* DRM_SCHEDULER_SPSC_QUEUE_H_ */ diff --git a/include/drm/tinydrm/mipi-dbi.h b/include/drm/tinydrm/mipi-dbi.h new file mode 100644 index 000000000..bcc98bd44 --- /dev/null +++ b/include/drm/tinydrm/mipi-dbi.h @@ -0,0 +1,109 @@ +/* + * MIPI Display Bus Interface (DBI) LCD controller support + * + * Copyright 2016 Noralf Trønnes + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __LINUX_MIPI_DBI_H +#define __LINUX_MIPI_DBI_H + +#include + +struct spi_device; +struct gpio_desc; +struct regulator; + +/** + * struct mipi_dbi - MIPI DBI controller + * @tinydrm: tinydrm base + * @spi: SPI device + * @enabled: Pipeline is enabled + * @cmdlock: Command lock + * @command: Bus specific callback executing commands. + * @read_commands: Array of read commands terminated by a zero entry. + * Reading is disabled if this is NULL. + * @dc: Optional D/C gpio. + * @tx_buf: Buffer used for transfer (copy clip rect area) + * @tx_buf9: Buffer used for Option 1 9-bit conversion + * @tx_buf9_len: Size of tx_buf9. + * @swap_bytes: Swap bytes in buffer before transfer + * @reset: Optional reset gpio + * @rotation: initial rotation in degrees Counter Clock Wise + * @backlight: backlight device (optional) + * @regulator: power regulator (optional) + */ +struct mipi_dbi { + struct tinydrm_device tinydrm; + struct spi_device *spi; + bool enabled; + struct mutex cmdlock; + int (*command)(struct mipi_dbi *mipi, u8 *cmd, u8 *param, size_t num); + const u8 *read_commands; + struct gpio_desc *dc; + u16 *tx_buf; + void *tx_buf9; + size_t tx_buf9_len; + bool swap_bytes; + struct gpio_desc *reset; + unsigned int rotation; + struct backlight_device *backlight; + struct regulator *regulator; +}; + +static inline struct mipi_dbi * +mipi_dbi_from_tinydrm(struct tinydrm_device *tdev) +{ + return container_of(tdev, struct mipi_dbi, tinydrm); +} + +int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *mipi, + struct gpio_desc *dc); +int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi, + const struct drm_simple_display_pipe_funcs *pipe_funcs, + struct drm_driver *driver, + const struct drm_display_mode *mode, unsigned int rotation); +void mipi_dbi_enable_flush(struct mipi_dbi *mipi, + struct drm_crtc_state *crtc_state, + struct drm_plane_state *plan_state); +void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe); +void mipi_dbi_hw_reset(struct mipi_dbi *mipi); +bool mipi_dbi_display_is_on(struct mipi_dbi *mipi); +int mipi_dbi_poweron_reset(struct mipi_dbi *mipi); +int mipi_dbi_poweron_conditional_reset(struct mipi_dbi *mipi); +u32 mipi_dbi_spi_cmd_max_speed(struct spi_device *spi, size_t len); + +int mipi_dbi_command_read(struct mipi_dbi *mipi, u8 cmd, u8 *val); +int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len); +int mipi_dbi_command_stackbuf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len); +int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb, + struct drm_clip_rect *clip, bool swap); +/** + * mipi_dbi_command - MIPI DCS command with optional parameter(s) + * @mipi: MIPI structure + * @cmd: Command + * @seq...: Optional parameter(s) + * + * Send MIPI DCS command to the controller. Use mipi_dbi_command_read() for + * get/read. + * + * Returns: + * Zero on success, negative error code on failure. + */ +#define mipi_dbi_command(mipi, cmd, seq...) \ +({ \ + u8 d[] = { seq }; \ + mipi_dbi_command_stackbuf(mipi, cmd, d, ARRAY_SIZE(d)); \ +}) + +#ifdef CONFIG_DEBUG_FS +int mipi_dbi_debugfs_init(struct drm_minor *minor); +#else +#define mipi_dbi_debugfs_init NULL +#endif + +#endif /* __LINUX_MIPI_DBI_H */ diff --git a/include/drm/tinydrm/tinydrm-helpers.h b/include/drm/tinydrm/tinydrm-helpers.h new file mode 100644 index 000000000..5b96f0b12 --- /dev/null +++ b/include/drm/tinydrm/tinydrm-helpers.h @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2016 Noralf Trønnes + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __LINUX_TINYDRM_HELPERS_H +#define __LINUX_TINYDRM_HELPERS_H + +struct backlight_device; +struct tinydrm_device; +struct drm_clip_rect; +struct spi_transfer; +struct spi_message; +struct spi_device; +struct device; + +/** + * tinydrm_machine_little_endian - Machine is little endian + * + * Returns: + * true if *defined(__LITTLE_ENDIAN)*, false otherwise + */ +static inline bool tinydrm_machine_little_endian(void) +{ +#if defined(__LITTLE_ENDIAN) + return true; +#else + return false; +#endif +} + +bool tinydrm_merge_clips(struct drm_clip_rect *dst, + struct drm_clip_rect *src, unsigned int num_clips, + unsigned int flags, u32 max_width, u32 max_height); +int tinydrm_fb_dirty(struct drm_framebuffer *fb, + struct drm_file *file_priv, + unsigned int flags, unsigned int color, + struct drm_clip_rect *clips, + unsigned int num_clips); +void tinydrm_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb, + struct drm_clip_rect *clip); +void tinydrm_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb, + struct drm_clip_rect *clip); +void tinydrm_xrgb8888_to_rgb565(u16 *dst, void *vaddr, + struct drm_framebuffer *fb, + struct drm_clip_rect *clip, bool swap); +void tinydrm_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb, + struct drm_clip_rect *clip); + +size_t tinydrm_spi_max_transfer_size(struct spi_device *spi, size_t max_len); +bool tinydrm_spi_bpw_supported(struct spi_device *spi, u8 bpw); +int tinydrm_spi_transfer(struct spi_device *spi, u32 speed_hz, + struct spi_transfer *header, u8 bpw, const void *buf, + size_t len); +void _tinydrm_dbg_spi_message(struct spi_device *spi, struct spi_message *m); + +#ifdef DEBUG +/** + * tinydrm_dbg_spi_message - Dump SPI message + * @spi: SPI device + * @m: SPI message + * + * Dumps info about the transfers in a SPI message including buffer content. + * DEBUG has to be defined for this function to be enabled alongside setting + * the DRM_UT_DRIVER bit of &drm_debug. + */ +static inline void tinydrm_dbg_spi_message(struct spi_device *spi, + struct spi_message *m) +{ + if (drm_debug & DRM_UT_DRIVER) + _tinydrm_dbg_spi_message(spi, m); +} +#else +static inline void tinydrm_dbg_spi_message(struct spi_device *spi, + struct spi_message *m) +{ +} +#endif /* DEBUG */ + +#endif /* __LINUX_TINYDRM_HELPERS_H */ diff --git a/include/drm/tinydrm/tinydrm.h b/include/drm/tinydrm/tinydrm.h new file mode 100644 index 000000000..fe9827d0c --- /dev/null +++ b/include/drm/tinydrm/tinydrm.h @@ -0,0 +1,122 @@ +/* + * Copyright (C) 2016 Noralf Trønnes + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __LINUX_TINYDRM_H +#define __LINUX_TINYDRM_H + +#include +#include +#include + +/** + * struct tinydrm_device - tinydrm device + */ +struct tinydrm_device { + /** + * @drm: DRM device + */ + struct drm_device *drm; + + /** + * @pipe: Display pipe structure + */ + struct drm_simple_display_pipe pipe; + + /** + * @dirty_lock: Serializes framebuffer flushing + */ + struct mutex dirty_lock; + + /** + * @fb_funcs: Framebuffer functions used when creating framebuffers + */ + const struct drm_framebuffer_funcs *fb_funcs; + + /** + * @fb_dirty: Framebuffer dirty callback + */ + int (*fb_dirty)(struct drm_framebuffer *framebuffer, + struct drm_file *file_priv, unsigned flags, + unsigned color, struct drm_clip_rect *clips, + unsigned num_clips); +}; + +static inline struct tinydrm_device * +pipe_to_tinydrm(struct drm_simple_display_pipe *pipe) +{ + return container_of(pipe, struct tinydrm_device, pipe); +} + +/** + * TINYDRM_GEM_DRIVER_OPS - default tinydrm gem operations + * + * This macro provides a shortcut for setting the tinydrm GEM operations in + * the &drm_driver structure. + */ +#define TINYDRM_GEM_DRIVER_OPS \ + .gem_free_object_unlocked = tinydrm_gem_cma_free_object, \ + .gem_print_info = drm_gem_cma_print_info, \ + .gem_vm_ops = &drm_gem_cma_vm_ops, \ + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ + .gem_prime_import = drm_gem_prime_import, \ + .gem_prime_export = drm_gem_prime_export, \ + .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, \ + .gem_prime_import_sg_table = tinydrm_gem_cma_prime_import_sg_table, \ + .gem_prime_vmap = drm_gem_cma_prime_vmap, \ + .gem_prime_vunmap = drm_gem_cma_prime_vunmap, \ + .gem_prime_mmap = drm_gem_cma_prime_mmap, \ + .dumb_create = drm_gem_cma_dumb_create + +/** + * TINYDRM_MODE - tinydrm display mode + * @hd: Horizontal resolution, width + * @vd: Vertical resolution, height + * @hd_mm: Display width in millimeters + * @vd_mm: Display height in millimeters + * + * This macro creates a &drm_display_mode for use with tinydrm. + */ +#define TINYDRM_MODE(hd, vd, hd_mm, vd_mm) \ + .hdisplay = (hd), \ + .hsync_start = (hd), \ + .hsync_end = (hd), \ + .htotal = (hd), \ + .vdisplay = (vd), \ + .vsync_start = (vd), \ + .vsync_end = (vd), \ + .vtotal = (vd), \ + .width_mm = (hd_mm), \ + .height_mm = (vd_mm), \ + .type = DRM_MODE_TYPE_DRIVER, \ + .clock = 1 /* pass validation */ + +void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj); +struct drm_gem_object * +tinydrm_gem_cma_prime_import_sg_table(struct drm_device *drm, + struct dma_buf_attachment *attach, + struct sg_table *sgt); +int devm_tinydrm_init(struct device *parent, struct tinydrm_device *tdev, + const struct drm_framebuffer_funcs *fb_funcs, + struct drm_driver *driver); +int devm_tinydrm_register(struct tinydrm_device *tdev); +void tinydrm_shutdown(struct tinydrm_device *tdev); + +void tinydrm_display_pipe_update(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *old_state); +int +tinydrm_display_pipe_init(struct tinydrm_device *tdev, + const struct drm_simple_display_pipe_funcs *funcs, + int connector_type, + const uint32_t *formats, + unsigned int format_count, + const struct drm_display_mode *mode, + unsigned int rotation); + +#endif /* __LINUX_TINYDRM_H */ diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h new file mode 100644 index 000000000..a01ba2032 --- /dev/null +++ b/include/drm/ttm/ttm_bo_api.h @@ -0,0 +1,767 @@ +/************************************************************************** + * + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#ifndef _TTM_BO_API_H_ +#define _TTM_BO_API_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct ttm_bo_global; + +struct ttm_bo_device; + +struct drm_mm_node; + +struct ttm_placement; + +struct ttm_place; + +/** + * struct ttm_bus_placement + * + * @addr: mapped virtual address + * @base: bus base address + * @is_iomem: is this io memory ? + * @size: size in byte + * @offset: offset from the base address + * @io_reserved_vm: The VM system has a refcount in @io_reserved_count + * @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve + * + * Structure indicating the bus placement of an object. + */ +struct ttm_bus_placement { + void *addr; + phys_addr_t base; + unsigned long size; + unsigned long offset; + bool is_iomem; + bool io_reserved_vm; + uint64_t io_reserved_count; +}; + + +/** + * struct ttm_mem_reg + * + * @mm_node: Memory manager node. + * @size: Requested size of memory region. + * @num_pages: Actual size of memory region in pages. + * @page_alignment: Page alignment. + * @placement: Placement flags. + * @bus: Placement on io bus accessible to the CPU + * + * Structure indicating the placement and space resources used by a + * buffer object. + */ + +struct ttm_mem_reg { + void *mm_node; + unsigned long start; + unsigned long size; + unsigned long num_pages; + uint32_t page_alignment; + uint32_t mem_type; + uint32_t placement; + struct ttm_bus_placement bus; +}; + +/** + * enum ttm_bo_type + * + * @ttm_bo_type_device: These are 'normal' buffers that can + * be mmapped by user space. Each of these bos occupy a slot in the + * device address space, that can be used for normal vm operations. + * + * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers, + * but they cannot be accessed from user-space. For kernel-only use. + * + * @ttm_bo_type_sg: Buffer made from dmabuf sg table shared with another + * driver. + */ + +enum ttm_bo_type { + ttm_bo_type_device, + ttm_bo_type_kernel, + ttm_bo_type_sg +}; + +struct ttm_tt; + +/** + * struct ttm_buffer_object + * + * @bdev: Pointer to the buffer object device structure. + * @type: The bo type. + * @destroy: Destruction function. If NULL, kfree is used. + * @num_pages: Actual number of pages. + * @acc_size: Accounted size for this object. + * @kref: Reference count of this buffer object. When this refcount reaches + * zero, the object is put on the delayed delete list. + * @list_kref: List reference count of this buffer object. This member is + * used to avoid destruction while the buffer object is still on a list. + * Lru lists may keep one refcount, the delayed delete list, and kref != 0 + * keeps one refcount. When this refcount reaches zero, + * the object is destroyed. + * @mem: structure describing current placement. + * @persistent_swap_storage: Usually the swap storage is deleted for buffers + * pinned in physical memory. If this behaviour is not desired, this member + * holds a pointer to a persistent shmem object. + * @ttm: TTM structure holding system pages. + * @evicted: Whether the object was evicted without user-space knowing. + * @cpu_writes: For synchronization. Number of cpu writers. + * @lru: List head for the lru list. + * @ddestroy: List head for the delayed destroy list. + * @swap: List head for swap LRU list. + * @moving: Fence set when BO is moving + * @vma_node: Address space manager node. + * @offset: The current GPU offset, which can have different meanings + * depending on the memory type. For SYSTEM type memory, it should be 0. + * @cur_placement: Hint of current placement. + * @wu_mutex: Wait unreserved mutex. + * + * Base class for TTM buffer object, that deals with data placement and CPU + * mappings. GPU mappings are really up to the driver, but for simpler GPUs + * the driver can usually use the placement offset @offset directly as the + * GPU virtual address. For drivers implementing multiple + * GPU memory manager contexts, the driver should manage the address space + * in these contexts separately and use these objects to get the correct + * placement and caching for these GPU maps. This makes it possible to use + * these objects for even quite elaborate memory management schemes. + * The destroy member, the API visibility of this object makes it possible + * to derive driver specific types. + */ + +struct ttm_buffer_object { + /** + * Members constant at init. + */ + + struct ttm_bo_device *bdev; + enum ttm_bo_type type; + void (*destroy) (struct ttm_buffer_object *); + unsigned long num_pages; + size_t acc_size; + + /** + * Members not needing protection. + */ + + struct kref kref; + struct kref list_kref; + + /** + * Members protected by the bo::resv::reserved lock. + */ + + struct ttm_mem_reg mem; + struct file *persistent_swap_storage; + struct ttm_tt *ttm; + bool evicted; + + /** + * Members protected by the bo::reserved lock only when written to. + */ + + atomic_t cpu_writers; + + /** + * Members protected by the bdev::lru_lock. + */ + + struct list_head lru; + struct list_head ddestroy; + struct list_head swap; + struct list_head io_reserve_lru; + + /** + * Members protected by a bo reservation. + */ + + struct dma_fence *moving; + + struct drm_vma_offset_node vma_node; + + unsigned priority; + + /** + * Special members that are protected by the reserve lock + * and the bo::lock when written to. Can be read with + * either of these locks held. + */ + + uint64_t offset; /* GPU address space is independent of CPU word size */ + + struct sg_table *sg; + + struct reservation_object *resv; + struct reservation_object ttm_resv; + struct mutex wu_mutex; +}; + +/** + * struct ttm_bo_kmap_obj + * + * @virtual: The current kernel virtual address. + * @page: The page when kmap'ing a single page. + * @bo_kmap_type: Type of bo_kmap. + * + * Object describing a kernel mapping. Since a TTM bo may be located + * in various memory types with various caching policies, the + * mapping can either be an ioremap, a vmap, a kmap or part of a + * premapped region. + */ + +#define TTM_BO_MAP_IOMEM_MASK 0x80 +struct ttm_bo_kmap_obj { + void *virtual; + struct page *page; + enum { + ttm_bo_map_iomap = 1 | TTM_BO_MAP_IOMEM_MASK, + ttm_bo_map_vmap = 2, + ttm_bo_map_kmap = 3, + ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK, + } bo_kmap_type; + struct ttm_buffer_object *bo; +}; + +/** + * struct ttm_operation_ctx + * + * @interruptible: Sleep interruptible if sleeping. + * @no_wait_gpu: Return immediately if the GPU is busy. + * @resv: Reservation object to allow reserved evictions with. + * @flags: Including the following flags + * + * Context for TTM operations like changing buffer placement or general memory + * allocation. + */ +struct ttm_operation_ctx { + bool interruptible; + bool no_wait_gpu; + struct reservation_object *resv; + uint64_t bytes_moved; + uint32_t flags; +}; + +/* Allow eviction of reserved BOs */ +#define TTM_OPT_FLAG_ALLOW_RES_EVICT 0x1 +/* when serving page fault or suspend, allow alloc anyway */ +#define TTM_OPT_FLAG_FORCE_ALLOC 0x2 + +/** + * ttm_bo_get - reference a struct ttm_buffer_object + * + * @bo: The buffer object. + */ +static inline void ttm_bo_get(struct ttm_buffer_object *bo) +{ + kref_get(&bo->kref); +} + +/** + * ttm_bo_reference - reference a struct ttm_buffer_object + * + * @bo: The buffer object. + * + * Returns a refcounted pointer to a buffer object. + * + * This function is deprecated. Use @ttm_bo_get instead. + */ + +static inline struct ttm_buffer_object * +ttm_bo_reference(struct ttm_buffer_object *bo) +{ + ttm_bo_get(bo); + return bo; +} + +/** + * ttm_bo_wait - wait for buffer idle. + * + * @bo: The buffer object. + * @interruptible: Use interruptible wait. + * @no_wait: Return immediately if buffer is busy. + * + * This function must be called with the bo::mutex held, and makes + * sure any previous rendering to the buffer is completed. + * Note: It might be necessary to block validations before the + * wait by reserving the buffer. + * Returns -EBUSY if no_wait is true and the buffer is busy. + * Returns -ERESTARTSYS if interrupted by a signal. + */ +int ttm_bo_wait(struct ttm_buffer_object *bo, bool interruptible, bool no_wait); + +/** + * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo + * + * @placement: Return immediately if buffer is busy. + * @mem: The struct ttm_mem_reg indicating the region where the bo resides + * @new_flags: Describes compatible placement found + * + * Returns true if the placement is compatible + */ +bool ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_mem_reg *mem, + uint32_t *new_flags); + +/** + * ttm_bo_validate + * + * @bo: The buffer object. + * @placement: Proposed placement for the buffer object. + * @ctx: validation parameters. + * + * Changes placement and caching policy of the buffer object + * according proposed placement. + * Returns + * -EINVAL on invalid proposed placement. + * -ENOMEM on out-of-memory condition. + * -EBUSY if no_wait is true and buffer busy. + * -ERESTARTSYS if interrupted by a signal. + */ +int ttm_bo_validate(struct ttm_buffer_object *bo, + struct ttm_placement *placement, + struct ttm_operation_ctx *ctx); + +/** + * ttm_bo_put + * + * @bo: The buffer object. + * + * Unreference a buffer object. + */ +void ttm_bo_put(struct ttm_buffer_object *bo); + +/** + * ttm_bo_unref + * + * @bo: The buffer object. + * + * Unreference and clear a pointer to a buffer object. + * + * This function is deprecated. Use @ttm_bo_put instead. + */ +void ttm_bo_unref(struct ttm_buffer_object **bo); + +/** + * ttm_bo_add_to_lru + * + * @bo: The buffer object. + * + * Add this bo to the relevant mem type lru and, if it's backed by + * system pages (ttms) to the swap list. + * This function must be called with struct ttm_bo_global::lru_lock held, and + * is typically called immediately prior to unreserving a bo. + */ +void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); + +/** + * ttm_bo_del_from_lru + * + * @bo: The buffer object. + * + * Remove this bo from all lru lists used to lookup and reserve an object. + * This function must be called with struct ttm_bo_global::lru_lock held, + * and is usually called just immediately after the bo has been reserved to + * avoid recursive reservation from lru lists. + */ +void ttm_bo_del_from_lru(struct ttm_buffer_object *bo); + +/** + * ttm_bo_move_to_lru_tail + * + * @bo: The buffer object. + * + * Move this BO to the tail of all lru lists used to lookup and reserve an + * object. This function must be called with struct ttm_bo_global::lru_lock + * held, and is used to make a BO less likely to be considered for eviction. + */ +void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo); + +/** + * ttm_bo_lock_delayed_workqueue + * + * Prevent the delayed workqueue from running. + * Returns + * True if the workqueue was queued at the time + */ +int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev); + +/** + * ttm_bo_unlock_delayed_workqueue + * + * Allows the delayed workqueue to run. + */ +void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched); + +/** + * ttm_bo_eviction_valuable + * + * @bo: The buffer object to evict + * @place: the placement we need to make room for + * + * Check if it is valuable to evict the BO to make room for the given placement. + */ +bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, + const struct ttm_place *place); + +/** + * ttm_bo_synccpu_write_grab + * + * @bo: The buffer object: + * @no_wait: Return immediately if buffer is busy. + * + * Synchronizes a buffer object for CPU RW access. This means + * command submission that affects the buffer will return -EBUSY + * until ttm_bo_synccpu_write_release is called. + * + * Returns + * -EBUSY if the buffer is busy and no_wait is true. + * -ERESTARTSYS if interrupted by a signal. + */ +int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait); + +/** + * ttm_bo_synccpu_write_release: + * + * @bo : The buffer object. + * + * Releases a synccpu lock. + */ +void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo); + +/** + * ttm_bo_acc_size + * + * @bdev: Pointer to a ttm_bo_device struct. + * @bo_size: size of the buffer object in byte. + * @struct_size: size of the structure holding buffer object datas + * + * Returns size to account for a buffer object + */ +size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, + unsigned long bo_size, + unsigned struct_size); +size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, + unsigned long bo_size, + unsigned struct_size); + +/** + * ttm_bo_init_reserved + * + * @bdev: Pointer to a ttm_bo_device struct. + * @bo: Pointer to a ttm_buffer_object to be initialized. + * @size: Requested size of buffer object. + * @type: Requested type of buffer object. + * @flags: Initial placement flags. + * @page_alignment: Data alignment in pages. + * @ctx: TTM operation context for memory allocation. + * @acc_size: Accounted size for this object. + * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one. + * @destroy: Destroy function. Use NULL for kfree(). + * + * This function initializes a pre-allocated struct ttm_buffer_object. + * As this object may be part of a larger structure, this function, + * together with the @destroy function, + * enables driver-specific objects derived from a ttm_buffer_object. + * + * On successful return, the caller owns an object kref to @bo. The kref and + * list_kref are usually set to 1, but note that in some situations, other + * tasks may already be holding references to @bo as well. + * Furthermore, if resv == NULL, the buffer's reservation lock will be held, + * and it is the caller's responsibility to call ttm_bo_unreserve. + * + * If a failure occurs, the function will call the @destroy function, or + * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is + * illegal and will likely cause memory corruption. + * + * Returns + * -ENOMEM: Out of memory. + * -EINVAL: Invalid placement flags. + * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. + */ + +int ttm_bo_init_reserved(struct ttm_bo_device *bdev, + struct ttm_buffer_object *bo, + unsigned long size, + enum ttm_bo_type type, + struct ttm_placement *placement, + uint32_t page_alignment, + struct ttm_operation_ctx *ctx, + size_t acc_size, + struct sg_table *sg, + struct reservation_object *resv, + void (*destroy) (struct ttm_buffer_object *)); + +/** + * ttm_bo_init + * + * @bdev: Pointer to a ttm_bo_device struct. + * @bo: Pointer to a ttm_buffer_object to be initialized. + * @size: Requested size of buffer object. + * @type: Requested type of buffer object. + * @flags: Initial placement flags. + * @page_alignment: Data alignment in pages. + * @interruptible: If needing to sleep to wait for GPU resources, + * sleep interruptible. + * pinned in physical memory. If this behaviour is not desired, this member + * holds a pointer to a persistent shmem object. Typically, this would + * point to the shmem object backing a GEM object if TTM is used to back a + * GEM user interface. + * @acc_size: Accounted size for this object. + * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one. + * @destroy: Destroy function. Use NULL for kfree(). + * + * This function initializes a pre-allocated struct ttm_buffer_object. + * As this object may be part of a larger structure, this function, + * together with the @destroy function, + * enables driver-specific objects derived from a ttm_buffer_object. + * + * On successful return, the caller owns an object kref to @bo. The kref and + * list_kref are usually set to 1, but note that in some situations, other + * tasks may already be holding references to @bo as well. + * + * If a failure occurs, the function will call the @destroy function, or + * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is + * illegal and will likely cause memory corruption. + * + * Returns + * -ENOMEM: Out of memory. + * -EINVAL: Invalid placement flags. + * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. + */ +int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo, + unsigned long size, enum ttm_bo_type type, + struct ttm_placement *placement, + uint32_t page_alignment, bool interrubtible, size_t acc_size, + struct sg_table *sg, struct reservation_object *resv, + void (*destroy) (struct ttm_buffer_object *)); + +/** + * ttm_bo_create + * + * @bdev: Pointer to a ttm_bo_device struct. + * @size: Requested size of buffer object. + * @type: Requested type of buffer object. + * @placement: Initial placement. + * @page_alignment: Data alignment in pages. + * @interruptible: If needing to sleep while waiting for GPU resources, + * sleep interruptible. + * @p_bo: On successful completion *p_bo points to the created object. + * + * This function allocates a ttm_buffer_object, and then calls ttm_bo_init + * on that object. The destroy function is set to kfree(). + * Returns + * -ENOMEM: Out of memory. + * -EINVAL: Invalid placement flags. + * -ERESTARTSYS: Interrupted by signal while waiting for resources. + */ +int ttm_bo_create(struct ttm_bo_device *bdev, unsigned long size, + enum ttm_bo_type type, struct ttm_placement *placement, + uint32_t page_alignment, bool interruptible, + struct ttm_buffer_object **p_bo); + +/** + * ttm_bo_init_mm + * + * @bdev: Pointer to a ttm_bo_device struct. + * @mem_type: The memory type. + * @p_size: size managed area in pages. + * + * Initialize a manager for a given memory type. + * Note: if part of driver firstopen, it must be protected from a + * potentially racing lastclose. + * Returns: + * -EINVAL: invalid size or memory type. + * -ENOMEM: Not enough memory. + * May also return driver-specified errors. + */ +int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, + unsigned long p_size); + +/** + * ttm_bo_clean_mm + * + * @bdev: Pointer to a ttm_bo_device struct. + * @mem_type: The memory type. + * + * Take down a manager for a given memory type after first walking + * the LRU list to evict any buffers left alive. + * + * Normally, this function is part of lastclose() or unload(), and at that + * point there shouldn't be any buffers left created by user-space, since + * there should've been removed by the file descriptor release() method. + * However, before this function is run, make sure to signal all sync objects, + * and verify that the delayed delete queue is empty. The driver must also + * make sure that there are no NO_EVICT buffers present in this memory type + * when the call is made. + * + * If this function is part of a VT switch, the caller must make sure that + * there are no appications currently validating buffers before this + * function is called. The caller can do that by first taking the + * struct ttm_bo_device::ttm_lock in write mode. + * + * Returns: + * -EINVAL: invalid or uninitialized memory type. + * -EBUSY: There are still buffers left in this memory type. + */ +int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type); + +/** + * ttm_bo_evict_mm + * + * @bdev: Pointer to a ttm_bo_device struct. + * @mem_type: The memory type. + * + * Evicts all buffers on the lru list of the memory type. + * This is normally part of a VT switch or an + * out-of-memory-space-due-to-fragmentation handler. + * The caller must make sure that there are no other processes + * currently validating buffers, and can do that by taking the + * struct ttm_bo_device::ttm_lock in write mode. + * + * Returns: + * -EINVAL: Invalid or uninitialized memory type. + * -ERESTARTSYS: The call was interrupted by a signal while waiting to + * evict a buffer. + */ +int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type); + +/** + * ttm_kmap_obj_virtual + * + * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap. + * @is_iomem: Pointer to an integer that on return indicates 1 if the + * virtual map is io memory, 0 if normal memory. + * + * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap. + * If *is_iomem is 1 on return, the virtual address points to an io memory area, + * that should strictly be accessed by the iowriteXX() and similar functions. + */ +static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map, + bool *is_iomem) +{ + *is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK); + return map->virtual; +} + +/** + * ttm_bo_kmap + * + * @bo: The buffer object. + * @start_page: The first page to map. + * @num_pages: Number of pages to map. + * @map: pointer to a struct ttm_bo_kmap_obj representing the map. + * + * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the + * data in the buffer object. The ttm_kmap_obj_virtual function can then be + * used to obtain a virtual address to the data. + * + * Returns + * -ENOMEM: Out of memory. + * -EINVAL: Invalid range. + */ +int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, + unsigned long num_pages, struct ttm_bo_kmap_obj *map); + +/** + * ttm_bo_kunmap + * + * @map: Object describing the map to unmap. + * + * Unmaps a kernel map set up by ttm_bo_kmap. + */ +void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); + +/** + * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object. + * + * @vma: vma as input from the fbdev mmap method. + * @bo: The bo backing the address space. The address space will + * have the same size as the bo, and start at offset 0. + * + * This function is intended to be called by the fbdev mmap method + * if the fbdev address space is to be backed by a bo. + */ +int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo); + +/** + * ttm_bo_mmap - mmap out of the ttm device address space. + * + * @filp: filp as input from the mmap method. + * @vma: vma as input from the mmap method. + * @bdev: Pointer to the ttm_bo_device with the address space manager. + * + * This function is intended to be called by the device mmap method. + * if the device address space is to be backed by the bo manager. + */ +int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, + struct ttm_bo_device *bdev); + +void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot); + +void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot); + +/** + * ttm_bo_io + * + * @bdev: Pointer to the struct ttm_bo_device. + * @filp: Pointer to the struct file attempting to read / write. + * @wbuf: User-space pointer to address of buffer to write. NULL on read. + * @rbuf: User-space pointer to address of buffer to read into. + * Null on write. + * @count: Number of bytes to read / write. + * @f_pos: Pointer to current file position. + * @write: 1 for read, 0 for write. + * + * This function implements read / write into ttm buffer objects, and is + * intended to + * be called from the fops::read and fops::write method. + * Returns: + * See man (2) write, man(2) read. In particular, + * the function may return -ERESTARTSYS if + * interrupted by a signal. + */ +ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, + const char __user *wbuf, char __user *rbuf, + size_t count, loff_t *f_pos, bool write); + +int ttm_bo_swapout(struct ttm_bo_global *glob, + struct ttm_operation_ctx *ctx); +void ttm_bo_swapout_all(struct ttm_bo_device *bdev); +int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo); +#endif diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h new file mode 100644 index 000000000..3234cc322 --- /dev/null +++ b/include/drm/ttm/ttm_bo_driver.h @@ -0,0 +1,872 @@ +/************************************************************************** + * + * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ +#ifndef _TTM_BO_DRIVER_H_ +#define _TTM_BO_DRIVER_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "ttm_bo_api.h" +#include "ttm_memory.h" +#include "ttm_module.h" +#include "ttm_placement.h" +#include "ttm_tt.h" + +#define TTM_MAX_BO_PRIORITY 4U + +#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */ +#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */ +#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */ + +struct ttm_mem_type_manager; + +struct ttm_mem_type_manager_func { + /** + * struct ttm_mem_type_manager member init + * + * @man: Pointer to a memory type manager. + * @p_size: Implementation dependent, but typically the size of the + * range to be managed in pages. + * + * Called to initialize a private range manager. The function is + * expected to initialize the man::priv member. + * Returns 0 on success, negative error code on failure. + */ + int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size); + + /** + * struct ttm_mem_type_manager member takedown + * + * @man: Pointer to a memory type manager. + * + * Called to undo the setup done in init. All allocated resources + * should be freed. + */ + int (*takedown)(struct ttm_mem_type_manager *man); + + /** + * struct ttm_mem_type_manager member get_node + * + * @man: Pointer to a memory type manager. + * @bo: Pointer to the buffer object we're allocating space for. + * @placement: Placement details. + * @flags: Additional placement flags. + * @mem: Pointer to a struct ttm_mem_reg to be filled in. + * + * This function should allocate space in the memory type managed + * by @man. Placement details if + * applicable are given by @placement. If successful, + * @mem::mm_node should be set to a non-null value, and + * @mem::start should be set to a value identifying the beginning + * of the range allocated, and the function should return zero. + * If the memory region accommodate the buffer object, @mem::mm_node + * should be set to NULL, and the function should return 0. + * If a system error occurred, preventing the request to be fulfilled, + * the function should return a negative error code. + * + * Note that @mem::mm_node will only be dereferenced by + * struct ttm_mem_type_manager functions and optionally by the driver, + * which has knowledge of the underlying type. + * + * This function may not be called from within atomic context, so + * an implementation can and must use either a mutex or a spinlock to + * protect any data structures managing the space. + */ + int (*get_node)(struct ttm_mem_type_manager *man, + struct ttm_buffer_object *bo, + const struct ttm_place *place, + struct ttm_mem_reg *mem); + + /** + * struct ttm_mem_type_manager member put_node + * + * @man: Pointer to a memory type manager. + * @mem: Pointer to a struct ttm_mem_reg to be filled in. + * + * This function frees memory type resources previously allocated + * and that are identified by @mem::mm_node and @mem::start. May not + * be called from within atomic context. + */ + void (*put_node)(struct ttm_mem_type_manager *man, + struct ttm_mem_reg *mem); + + /** + * struct ttm_mem_type_manager member debug + * + * @man: Pointer to a memory type manager. + * @printer: Prefix to be used in printout to identify the caller. + * + * This function is called to print out the state of the memory + * type manager to aid debugging of out-of-memory conditions. + * It may not be called from within atomic context. + */ + void (*debug)(struct ttm_mem_type_manager *man, + struct drm_printer *printer); +}; + +/** + * struct ttm_mem_type_manager + * + * @has_type: The memory type has been initialized. + * @use_type: The memory type is enabled. + * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory + * managed by this memory type. + * @gpu_offset: If used, the GPU offset of the first managed page of + * fixed memory or the first managed location in an aperture. + * @size: Size of the managed region. + * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX, + * as defined in ttm_placement_common.h + * @default_caching: The default caching policy used for a buffer object + * placed in this memory type if the user doesn't provide one. + * @func: structure pointer implementing the range manager. See above + * @priv: Driver private closure for @func. + * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures + * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions + * reserved by the TTM vm system. + * @io_reserve_lru: Optional lru list for unreserving io mem regions. + * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain + * @move_lock: lock for move fence + * static information. bdev::driver::io_mem_free is never used. + * @lru: The lru list for this memory type. + * @move: The fence of the last pipelined move operation. + * + * This structure is used to identify and manage memory types for a device. + * It's set up by the ttm_bo_driver::init_mem_type method. + */ + + + +struct ttm_mem_type_manager { + struct ttm_bo_device *bdev; + + /* + * No protection. Constant from start. + */ + + bool has_type; + bool use_type; + uint32_t flags; + uint64_t gpu_offset; /* GPU address space is independent of CPU word size */ + uint64_t size; + uint32_t available_caching; + uint32_t default_caching; + const struct ttm_mem_type_manager_func *func; + void *priv; + struct mutex io_reserve_mutex; + bool use_io_reserve_lru; + bool io_reserve_fastpath; + spinlock_t move_lock; + + /* + * Protected by @io_reserve_mutex: + */ + + struct list_head io_reserve_lru; + + /* + * Protected by the global->lru_lock. + */ + + struct list_head lru[TTM_MAX_BO_PRIORITY]; + + /* + * Protected by @move_lock. + */ + struct dma_fence *move; +}; + +/** + * struct ttm_bo_driver + * + * @create_ttm_backend_entry: Callback to create a struct ttm_backend. + * @invalidate_caches: Callback to invalidate read caches when a buffer object + * has been evicted. + * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager + * structure. + * @evict_flags: Callback to obtain placement flags when a buffer is evicted. + * @move: Callback for a driver to hook in accelerated functions to + * move a buffer. + * If set to NULL, a potentially slow memcpy() move is used. + */ + +struct ttm_bo_driver { + /** + * ttm_tt_create + * + * @bo: The buffer object to create the ttm for. + * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. + * + * Create a struct ttm_tt to back data with system memory pages. + * No pages are actually allocated. + * Returns: + * NULL: Out of memory. + */ + struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo, + uint32_t page_flags); + + /** + * ttm_tt_populate + * + * @ttm: The struct ttm_tt to contain the backing pages. + * + * Allocate all backing pages + * Returns: + * -ENOMEM: Out of memory. + */ + int (*ttm_tt_populate)(struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx); + + /** + * ttm_tt_unpopulate + * + * @ttm: The struct ttm_tt to contain the backing pages. + * + * Free all backing page + */ + void (*ttm_tt_unpopulate)(struct ttm_tt *ttm); + + /** + * struct ttm_bo_driver member invalidate_caches + * + * @bdev: the buffer object device. + * @flags: new placement of the rebound buffer object. + * + * A previosly evicted buffer has been rebound in a + * potentially new location. Tell the driver that it might + * consider invalidating read (texture) caches on the next command + * submission as a consequence. + */ + + int (*invalidate_caches)(struct ttm_bo_device *bdev, uint32_t flags); + int (*init_mem_type)(struct ttm_bo_device *bdev, uint32_t type, + struct ttm_mem_type_manager *man); + + /** + * struct ttm_bo_driver member eviction_valuable + * + * @bo: the buffer object to be evicted + * @place: placement we need room for + * + * Check with the driver if it is valuable to evict a BO to make room + * for a certain placement. + */ + bool (*eviction_valuable)(struct ttm_buffer_object *bo, + const struct ttm_place *place); + /** + * struct ttm_bo_driver member evict_flags: + * + * @bo: the buffer object to be evicted + * + * Return the bo flags for a buffer which is not mapped to the hardware. + * These will be placed in proposed_flags so that when the move is + * finished, they'll end up in bo->mem.flags + */ + + void (*evict_flags)(struct ttm_buffer_object *bo, + struct ttm_placement *placement); + + /** + * struct ttm_bo_driver member move: + * + * @bo: the buffer to move + * @evict: whether this motion is evicting the buffer from + * the graphics address space + * @ctx: context for this move with parameters + * @new_mem: the new memory region receiving the buffer + * + * Move a buffer between two memory regions. + */ + int (*move)(struct ttm_buffer_object *bo, bool evict, + struct ttm_operation_ctx *ctx, + struct ttm_mem_reg *new_mem); + + /** + * struct ttm_bo_driver_member verify_access + * + * @bo: Pointer to a buffer object. + * @filp: Pointer to a struct file trying to access the object. + * + * Called from the map / write / read methods to verify that the + * caller is permitted to access the buffer object. + * This member may be set to NULL, which will refuse this kind of + * access for all buffer objects. + * This function should return 0 if access is granted, -EPERM otherwise. + */ + int (*verify_access)(struct ttm_buffer_object *bo, + struct file *filp); + + /** + * Hook to notify driver about a driver move so it + * can do tiling things and book-keeping. + * + * @evict: whether this move is evicting the buffer from the graphics + * address space + */ + void (*move_notify)(struct ttm_buffer_object *bo, + bool evict, + struct ttm_mem_reg *new_mem); + /* notify the driver we are taking a fault on this BO + * and have reserved it */ + int (*fault_reserve_notify)(struct ttm_buffer_object *bo); + + /** + * notify the driver that we're about to swap out this bo + */ + void (*swap_notify)(struct ttm_buffer_object *bo); + + /** + * Driver callback on when mapping io memory (for bo_move_memcpy + * for instance). TTM will take care to call io_mem_free whenever + * the mapping is not use anymore. io_mem_reserve & io_mem_free + * are balanced. + */ + int (*io_mem_reserve)(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem); + void (*io_mem_free)(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem); + + /** + * Return the pfn for a given page_offset inside the BO. + * + * @bo: the BO to look up the pfn for + * @page_offset: the offset to look up + */ + unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo, + unsigned long page_offset); + + /** + * Read/write memory buffers for ptrace access + * + * @bo: the BO to access + * @offset: the offset from the start of the BO + * @buf: pointer to source/destination buffer + * @len: number of bytes to copy + * @write: whether to read (0) from or write (non-0) to BO + * + * If successful, this function should return the number of + * bytes copied, -EIO otherwise. If the number of bytes + * returned is < len, the function may be called again with + * the remainder of the buffer to copy. + */ + int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset, + void *buf, int len, int write); +}; + +/** + * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global. + */ + +struct ttm_bo_global_ref { + struct drm_global_reference ref; + struct ttm_mem_global *mem_glob; +}; + +/** + * struct ttm_bo_global - Buffer object driver global data. + * + * @mem_glob: Pointer to a struct ttm_mem_global object for accounting. + * @dummy_read_page: Pointer to a dummy page used for mapping requests + * of unpopulated pages. + * @shrink: A shrink callback object used for buffer object swap. + * @device_list_mutex: Mutex protecting the device list. + * This mutex is held while traversing the device list for pm options. + * @lru_lock: Spinlock protecting the bo subsystem lru lists. + * @device_list: List of buffer object devices. + * @swap_lru: Lru list of buffer objects used for swapping. + */ + +struct ttm_bo_global { + + /** + * Constant after init. + */ + + struct kobject kobj; + struct ttm_mem_global *mem_glob; + struct page *dummy_read_page; + struct mutex device_list_mutex; + spinlock_t lru_lock; + + /** + * Protected by device_list_mutex. + */ + struct list_head device_list; + + /** + * Protected by the lru_lock. + */ + struct list_head swap_lru[TTM_MAX_BO_PRIORITY]; + + /** + * Internal protection. + */ + atomic_t bo_count; +}; + + +#define TTM_NUM_MEM_TYPES 8 + +/** + * struct ttm_bo_device - Buffer object driver device-specific data. + * + * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. + * @man: An array of mem_type_managers. + * @vma_manager: Address space manager + * lru_lock: Spinlock that protects the buffer+device lru lists and + * ddestroy lists. + * @dev_mapping: A pointer to the struct address_space representing the + * device address space. + * @wq: Work queue structure for the delayed delete workqueue. + * @no_retry: Don't retry allocation if it fails + * + */ + +struct ttm_bo_device { + + /* + * Constant after bo device init / atomic. + */ + struct list_head device_list; + struct ttm_bo_global *glob; + struct ttm_bo_driver *driver; + struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; + + /* + * Protected by internal locks. + */ + struct drm_vma_offset_manager vma_manager; + + /* + * Protected by the global:lru lock. + */ + struct list_head ddestroy; + + /* + * Protected by load / firstopen / lastclose /unload sync. + */ + + struct address_space *dev_mapping; + + /* + * Internal protection. + */ + + struct delayed_work wq; + + bool need_dma32; + + bool no_retry; +}; + +/** + * ttm_flag_masked + * + * @old: Pointer to the result and original value. + * @new: New value of bits. + * @mask: Mask of bits to change. + * + * Convenience function to change a number of bits identified by a mask. + */ + +static inline uint32_t +ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask) +{ + *old ^= (*old ^ new) & mask; + return *old; +} + +/* + * ttm_bo.c + */ + +/** + * ttm_mem_reg_is_pci + * + * @bdev: Pointer to a struct ttm_bo_device. + * @mem: A valid struct ttm_mem_reg. + * + * Returns true if the memory described by @mem is PCI memory, + * false otherwise. + */ +bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); + +/** + * ttm_bo_mem_space + * + * @bo: Pointer to a struct ttm_buffer_object. the data of which + * we want to allocate space for. + * @proposed_placement: Proposed new placement for the buffer object. + * @mem: A struct ttm_mem_reg. + * @interruptible: Sleep interruptible when sliping. + * @no_wait_gpu: Return immediately if the GPU is busy. + * + * Allocate memory space for the buffer object pointed to by @bo, using + * the placement flags in @mem, potentially evicting other idle buffer objects. + * This function may sleep while waiting for space to become available. + * Returns: + * -EBUSY: No space available (only if no_wait == 1). + * -ENOMEM: Could not allocate memory for the buffer object, either due to + * fragmentation or concurrent allocators. + * -ERESTARTSYS: An interruptible sleep was interrupted by a signal. + */ +int ttm_bo_mem_space(struct ttm_buffer_object *bo, + struct ttm_placement *placement, + struct ttm_mem_reg *mem, + struct ttm_operation_ctx *ctx); + +void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem); +void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem); + +void ttm_bo_global_release(struct drm_global_reference *ref); +int ttm_bo_global_init(struct drm_global_reference *ref); + +int ttm_bo_device_release(struct ttm_bo_device *bdev); + +/** + * ttm_bo_device_init + * + * @bdev: A pointer to a struct ttm_bo_device to initialize. + * @glob: A pointer to an initialized struct ttm_bo_global. + * @driver: A pointer to a struct ttm_bo_driver set up by the caller. + * @mapping: The address space to use for this bo. + * @file_page_offset: Offset into the device address space that is available + * for buffer data. This ensures compatibility with other users of the + * address space. + * + * Initializes a struct ttm_bo_device: + * Returns: + * !0: Failure. + */ +int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_global *glob, + struct ttm_bo_driver *driver, + struct address_space *mapping, + uint64_t file_page_offset, bool need_dma32); + +/** + * ttm_bo_unmap_virtual + * + * @bo: tear down the virtual mappings for this BO + */ +void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); + +/** + * ttm_bo_unmap_virtual + * + * @bo: tear down the virtual mappings for this BO + * + * The caller must take ttm_mem_io_lock before calling this function. + */ +void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo); + +int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo); +void ttm_mem_io_free_vm(struct ttm_buffer_object *bo); +int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible); +void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); + +void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo); +void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); + +/** + * __ttm_bo_reserve: + * + * @bo: A pointer to a struct ttm_buffer_object. + * @interruptible: Sleep interruptible if waiting. + * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. + * @ticket: ticket used to acquire the ww_mutex. + * + * Will not remove reserved buffers from the lru lists. + * Otherwise identical to ttm_bo_reserve. + * + * Returns: + * -EDEADLK: The reservation may cause a deadlock. + * Release all buffer reservations, wait for @bo to become unreserved and + * try again. (only if use_sequence == 1). + * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by + * a signal. Release all buffer reservations and return to user-space. + * -EBUSY: The function needed to sleep, but @no_wait was true + * -EALREADY: Bo already reserved using @ticket. This error code will only + * be returned if @use_ticket is set to true. + */ +static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, + bool interruptible, bool no_wait, + struct ww_acquire_ctx *ticket) +{ + int ret = 0; + + if (no_wait) { + bool success; + if (WARN_ON(ticket)) + return -EBUSY; + + success = reservation_object_trylock(bo->resv); + return success ? 0 : -EBUSY; + } + + if (interruptible) + ret = reservation_object_lock_interruptible(bo->resv, ticket); + else + ret = reservation_object_lock(bo->resv, ticket); + if (ret == -EINTR) + return -ERESTARTSYS; + return ret; +} + +/** + * ttm_bo_reserve: + * + * @bo: A pointer to a struct ttm_buffer_object. + * @interruptible: Sleep interruptible if waiting. + * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. + * @ticket: ticket used to acquire the ww_mutex. + * + * Locks a buffer object for validation. (Or prevents other processes from + * locking it for validation) and removes it from lru lists, while taking + * a number of measures to prevent deadlocks. + * + * Deadlocks may occur when two processes try to reserve multiple buffers in + * different order, either by will or as a result of a buffer being evicted + * to make room for a buffer already reserved. (Buffers are reserved before + * they are evicted). The following algorithm prevents such deadlocks from + * occurring: + * Processes attempting to reserve multiple buffers other than for eviction, + * (typically execbuf), should first obtain a unique 32-bit + * validation sequence number, + * and call this function with @use_ticket == 1 and @ticket->stamp == the unique + * sequence number. If upon call of this function, the buffer object is already + * reserved, the validation sequence is checked against the validation + * sequence of the process currently reserving the buffer, + * and if the current validation sequence is greater than that of the process + * holding the reservation, the function returns -EDEADLK. Otherwise it sleeps + * waiting for the buffer to become unreserved, after which it retries + * reserving. + * The caller should, when receiving an -EDEADLK error + * release all its buffer reservations, wait for @bo to become unreserved, and + * then rerun the validation with the same validation sequence. This procedure + * will always guarantee that the process with the lowest validation sequence + * will eventually succeed, preventing both deadlocks and starvation. + * + * Returns: + * -EDEADLK: The reservation may cause a deadlock. + * Release all buffer reservations, wait for @bo to become unreserved and + * try again. (only if use_sequence == 1). + * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by + * a signal. Release all buffer reservations and return to user-space. + * -EBUSY: The function needed to sleep, but @no_wait was true + * -EALREADY: Bo already reserved using @ticket. This error code will only + * be returned if @use_ticket is set to true. + */ +static inline int ttm_bo_reserve(struct ttm_buffer_object *bo, + bool interruptible, bool no_wait, + struct ww_acquire_ctx *ticket) +{ + int ret; + + WARN_ON(!kref_read(&bo->kref)); + + ret = __ttm_bo_reserve(bo, interruptible, no_wait, ticket); + if (likely(ret == 0)) + ttm_bo_del_sub_from_lru(bo); + + return ret; +} + +/** + * ttm_bo_reserve_slowpath: + * @bo: A pointer to a struct ttm_buffer_object. + * @interruptible: Sleep interruptible if waiting. + * @sequence: Set (@bo)->sequence to this value after lock + * + * This is called after ttm_bo_reserve returns -EAGAIN and we backed off + * from all our other reservations. Because there are no other reservations + * held by us, this function cannot deadlock any more. + */ +static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, + bool interruptible, + struct ww_acquire_ctx *ticket) +{ + int ret = 0; + + WARN_ON(!kref_read(&bo->kref)); + + if (interruptible) + ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, + ticket); + else + ww_mutex_lock_slow(&bo->resv->lock, ticket); + + if (likely(ret == 0)) + ttm_bo_del_sub_from_lru(bo); + else if (ret == -EINTR) + ret = -ERESTARTSYS; + + return ret; +} + +/** + * ttm_bo_unreserve + * + * @bo: A pointer to a struct ttm_buffer_object. + * + * Unreserve a previous reservation of @bo. + */ +static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) +{ + if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { + spin_lock(&bo->bdev->glob->lru_lock); + ttm_bo_add_to_lru(bo); + spin_unlock(&bo->bdev->glob->lru_lock); + } + reservation_object_unlock(bo->resv); +} + +/* + * ttm_bo_util.c + */ + +int ttm_mem_io_reserve(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem); +void ttm_mem_io_free(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem); +/** + * ttm_bo_move_ttm + * + * @bo: A pointer to a struct ttm_buffer_object. + * @interruptible: Sleep interruptible if waiting. + * @no_wait_gpu: Return immediately if the GPU is busy. + * @new_mem: struct ttm_mem_reg indicating where to move. + * + * Optimized move function for a buffer object with both old and + * new placement backed by a TTM. The function will, if successful, + * free any old aperture space, and set (@new_mem)->mm_node to NULL, + * and update the (@bo)->mem placement flags. If unsuccessful, the old + * data remains untouched, and it's up to the caller to free the + * memory space indicated by @new_mem. + * Returns: + * !0: Failure. + */ + +int ttm_bo_move_ttm(struct ttm_buffer_object *bo, + struct ttm_operation_ctx *ctx, + struct ttm_mem_reg *new_mem); + +/** + * ttm_bo_move_memcpy + * + * @bo: A pointer to a struct ttm_buffer_object. + * @interruptible: Sleep interruptible if waiting. + * @no_wait_gpu: Return immediately if the GPU is busy. + * @new_mem: struct ttm_mem_reg indicating where to move. + * + * Fallback move function for a mappable buffer object in mappable memory. + * The function will, if successful, + * free any old aperture space, and set (@new_mem)->mm_node to NULL, + * and update the (@bo)->mem placement flags. If unsuccessful, the old + * data remains untouched, and it's up to the caller to free the + * memory space indicated by @new_mem. + * Returns: + * !0: Failure. + */ + +int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, + struct ttm_operation_ctx *ctx, + struct ttm_mem_reg *new_mem); + +/** + * ttm_bo_free_old_node + * + * @bo: A pointer to a struct ttm_buffer_object. + * + * Utility function to free an old placement after a successful move. + */ +void ttm_bo_free_old_node(struct ttm_buffer_object *bo); + +/** + * ttm_bo_move_accel_cleanup. + * + * @bo: A pointer to a struct ttm_buffer_object. + * @fence: A fence object that signals when moving is complete. + * @evict: This is an evict move. Don't return until the buffer is idle. + * @new_mem: struct ttm_mem_reg indicating where to move. + * + * Accelerated move function to be called when an accelerated move + * has been scheduled. The function will create a new temporary buffer object + * representing the old placement, and put the sync object on both buffer + * objects. After that the newly created buffer object is unref'd to be + * destroyed when the move is complete. This will help pipeline + * buffer moves. + */ +int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, + struct dma_fence *fence, bool evict, + struct ttm_mem_reg *new_mem); + +/** + * ttm_bo_pipeline_move. + * + * @bo: A pointer to a struct ttm_buffer_object. + * @fence: A fence object that signals when moving is complete. + * @evict: This is an evict move. Don't return until the buffer is idle. + * @new_mem: struct ttm_mem_reg indicating where to move. + * + * Function for pipelining accelerated moves. Either free the memory + * immediately or hang it on a temporary buffer object. + */ +int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, + struct dma_fence *fence, bool evict, + struct ttm_mem_reg *new_mem); + +/** + * ttm_bo_pipeline_gutting. + * + * @bo: A pointer to a struct ttm_buffer_object. + * + * Pipelined gutting a BO of it's backing store. + */ +int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo); + +/** + * ttm_io_prot + * + * @c_state: Caching state. + * @tmp: Page protection flag for a normal, cached mapping. + * + * Utility function that returns the pgprot_t that should be used for + * setting up a PTE with the caching model indicated by @c_state. + */ +pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); + +extern const struct ttm_mem_type_manager_func ttm_bo_manager_func; + +#endif diff --git a/include/drm/ttm/ttm_debug.h b/include/drm/ttm/ttm_debug.h new file mode 100644 index 000000000..b5e460fa5 --- /dev/null +++ b/include/drm/ttm/ttm_debug.h @@ -0,0 +1,31 @@ +/************************************************************************** + * + * Copyright (c) 2017 Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Tom St Denis + */ +extern void ttm_trace_dma_map(struct device *dev, struct ttm_dma_tt *tt); +extern void ttm_trace_dma_unmap(struct device *dev, struct ttm_dma_tt *tt); diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h new file mode 100644 index 000000000..b0fdd1980 --- /dev/null +++ b/include/drm/ttm/ttm_execbuf_util.h @@ -0,0 +1,120 @@ +/************************************************************************** + * + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#ifndef _TTM_EXECBUF_UTIL_H_ +#define _TTM_EXECBUF_UTIL_H_ + +#include + +#include "ttm_bo_api.h" + +/** + * struct ttm_validate_buffer + * + * @head: list head for thread-private list. + * @bo: refcounted buffer object pointer. + * @shared: should the fence be added shared? + */ + +struct ttm_validate_buffer { + struct list_head head; + struct ttm_buffer_object *bo; + bool shared; +}; + +/** + * function ttm_eu_backoff_reservation + * + * @ticket: ww_acquire_ctx from reserve call + * @list: thread private list of ttm_validate_buffer structs. + * + * Undoes all buffer validation reservations for bos pointed to by + * the list entries. + */ + +extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, + struct list_head *list); + +/** + * function ttm_eu_reserve_buffers + * + * @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only + * non-blocking reserves should be tried. + * @list: thread private list of ttm_validate_buffer structs. + * @intr: should the wait be interruptible + * @dups: [out] optional list of duplicates. + * + * Tries to reserve bos pointed to by the list entries for validation. + * If the function returns 0, all buffers are marked as "unfenced", + * taken off the lru lists and are not synced for write CPU usage. + * + * If the function detects a deadlock due to multiple threads trying to + * reserve the same buffers in reverse order, all threads except one will + * back off and retry. This function may sleep while waiting for + * CPU write reservations to be cleared, and for other threads to + * unreserve their buffers. + * + * If intr is set to true, this function may return -ERESTARTSYS if the + * calling process receives a signal while waiting. In that case, no + * buffers on the list will be reserved upon return. + * + * If dups is non NULL all buffers already reserved by the current thread + * (e.g. duplicates) are added to this list, otherwise -EALREADY is returned + * on the first already reserved buffer and all buffers from the list are + * unreserved again. + * + * Buffers reserved by this function should be unreserved by + * a call to either ttm_eu_backoff_reservation() or + * ttm_eu_fence_buffer_objects() when command submission is complete or + * has failed. + */ + +extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, + struct list_head *list, bool intr, + struct list_head *dups); + +/** + * function ttm_eu_fence_buffer_objects. + * + * @ticket: ww_acquire_ctx from reserve call + * @list: thread private list of ttm_validate_buffer structs. + * @fence: The new exclusive fence for the buffers. + * + * This function should be called when command submission is complete, and + * it will add a new sync object to bos pointed to by entries on @list. + * It also unreserves all buffers, putting them on lru lists. + * + */ + +extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, + struct list_head *list, + struct dma_fence *fence); + +#endif diff --git a/include/drm/ttm/ttm_lock.h b/include/drm/ttm/ttm_lock.h new file mode 100644 index 000000000..0c3af9836 --- /dev/null +++ b/include/drm/ttm/ttm_lock.h @@ -0,0 +1,248 @@ +/************************************************************************** + * + * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +/** @file ttm_lock.h + * This file implements a simple replacement for the buffer manager use + * of the DRM heavyweight hardware lock. + * The lock is a read-write lock. Taking it in read mode and write mode + * is relatively fast, and intended for in-kernel use only. + * + * The vt mode is used only when there is a need to block all + * user-space processes from validating buffers. + * It's allowed to leave kernel space with the vt lock held. + * If a user-space process dies while having the vt-lock, + * it will be released during the file descriptor release. The vt lock + * excludes write lock and read lock. + * + * The suspend mode is used to lock out all TTM users when preparing for + * and executing suspend operations. + * + */ + +#ifndef _TTM_LOCK_H_ +#define _TTM_LOCK_H_ + +#include +#include + +#include "ttm_object.h" + +/** + * struct ttm_lock + * + * @base: ttm base object used solely to release the lock if the client + * holding the lock dies. + * @queue: Queue for processes waiting for lock change-of-status. + * @lock: Spinlock protecting some lock members. + * @rw: Read-write lock counter. Protected by @lock. + * @flags: Lock state. Protected by @lock. + * @kill_takers: Boolean whether to kill takers of the lock. + * @signal: Signal to send when kill_takers is true. + */ + +struct ttm_lock { + struct ttm_base_object base; + wait_queue_head_t queue; + spinlock_t lock; + int32_t rw; + uint32_t flags; + bool kill_takers; + int signal; + struct ttm_object_file *vt_holder; +}; + + +/** + * ttm_lock_init + * + * @lock: Pointer to a struct ttm_lock + * Initializes the lock. + */ +extern void ttm_lock_init(struct ttm_lock *lock); + +/** + * ttm_read_unlock + * + * @lock: Pointer to a struct ttm_lock + * + * Releases a read lock. + */ +extern void ttm_read_unlock(struct ttm_lock *lock); + +/** + * ttm_read_lock + * + * @lock: Pointer to a struct ttm_lock + * @interruptible: Interruptible sleeping while waiting for a lock. + * + * Takes the lock in read mode. + * Returns: + * -ERESTARTSYS If interrupted by a signal and interruptible is true. + */ +extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible); + +/** + * ttm_read_trylock + * + * @lock: Pointer to a struct ttm_lock + * @interruptible: Interruptible sleeping while waiting for a lock. + * + * Tries to take the lock in read mode. If the lock is already held + * in write mode, the function will return -EBUSY. If the lock is held + * in vt or suspend mode, the function will sleep until these modes + * are unlocked. + * + * Returns: + * -EBUSY The lock was already held in write mode. + * -ERESTARTSYS If interrupted by a signal and interruptible is true. + */ +extern int ttm_read_trylock(struct ttm_lock *lock, bool interruptible); + +/** + * ttm_write_unlock + * + * @lock: Pointer to a struct ttm_lock + * + * Releases a write lock. + */ +extern void ttm_write_unlock(struct ttm_lock *lock); + +/** + * ttm_write_lock + * + * @lock: Pointer to a struct ttm_lock + * @interruptible: Interruptible sleeping while waiting for a lock. + * + * Takes the lock in write mode. + * Returns: + * -ERESTARTSYS If interrupted by a signal and interruptible is true. + */ +extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible); + +/** + * ttm_lock_downgrade + * + * @lock: Pointer to a struct ttm_lock + * + * Downgrades a write lock to a read lock. + */ +extern void ttm_lock_downgrade(struct ttm_lock *lock); + +/** + * ttm_suspend_lock + * + * @lock: Pointer to a struct ttm_lock + * + * Takes the lock in suspend mode. Excludes read and write mode. + */ +extern void ttm_suspend_lock(struct ttm_lock *lock); + +/** + * ttm_suspend_unlock + * + * @lock: Pointer to a struct ttm_lock + * + * Releases a suspend lock + */ +extern void ttm_suspend_unlock(struct ttm_lock *lock); + +/** + * ttm_vt_lock + * + * @lock: Pointer to a struct ttm_lock + * @interruptible: Interruptible sleeping while waiting for a lock. + * @tfile: Pointer to a struct ttm_object_file to register the lock with. + * + * Takes the lock in vt mode. + * Returns: + * -ERESTARTSYS If interrupted by a signal and interruptible is true. + * -ENOMEM: Out of memory when locking. + */ +extern int ttm_vt_lock(struct ttm_lock *lock, bool interruptible, + struct ttm_object_file *tfile); + +/** + * ttm_vt_unlock + * + * @lock: Pointer to a struct ttm_lock + * + * Releases a vt lock. + * Returns: + * -EINVAL If the lock was not held. + */ +extern int ttm_vt_unlock(struct ttm_lock *lock); + +/** + * ttm_write_unlock + * + * @lock: Pointer to a struct ttm_lock + * + * Releases a write lock. + */ +extern void ttm_write_unlock(struct ttm_lock *lock); + +/** + * ttm_write_lock + * + * @lock: Pointer to a struct ttm_lock + * @interruptible: Interruptible sleeping while waiting for a lock. + * + * Takes the lock in write mode. + * Returns: + * -ERESTARTSYS If interrupted by a signal and interruptible is true. + */ +extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible); + +/** + * ttm_lock_set_kill + * + * @lock: Pointer to a struct ttm_lock + * @val: Boolean whether to kill processes taking the lock. + * @signal: Signal to send to the process taking the lock. + * + * The kill-when-taking-lock functionality is used to kill processes that keep + * on using the TTM functionality when its resources has been taken down, for + * example when the X server exits. A typical sequence would look like this: + * - X server takes lock in write mode. + * - ttm_lock_set_kill() is called with @val set to true. + * - As part of X server exit, TTM resources are taken down. + * - X server releases the lock on file release. + * - Another dri client wants to render, takes the lock and is killed. + * + */ +static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val, + int signal) +{ + lock->kill_takers = val; + if (val) + lock->signal = signal; +} + +#endif diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h new file mode 100644 index 000000000..737b5fed8 --- /dev/null +++ b/include/drm/ttm/ttm_memory.h @@ -0,0 +1,98 @@ +/************************************************************************** + * + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#ifndef TTM_MEMORY_H +#define TTM_MEMORY_H + +#include +#include +#include +#include +#include +#include +#include +#include "ttm_bo_api.h" + +/** + * struct ttm_mem_global - Global memory accounting structure. + * + * @shrink: A single callback to shrink TTM memory usage. Extend this + * to a linked list to be able to handle multiple callbacks when needed. + * @swap_queue: A workqueue to handle shrinking in low memory situations. We + * need a separate workqueue since it will spend a lot of time waiting + * for the GPU, and this will otherwise block other workqueue tasks(?) + * At this point we use only a single-threaded workqueue. + * @work: The workqueue callback for the shrink queue. + * @lock: Lock to protect the @shrink - and the memory accounting members, + * that is, essentially the whole structure with some exceptions. + * @lower_mem_limit: include lower limit of swap space and lower limit of + * system memory. + * @zones: Array of pointers to accounting zones. + * @num_zones: Number of populated entries in the @zones array. + * @zone_kernel: Pointer to the kernel zone. + * @zone_highmem: Pointer to the highmem zone if there is one. + * @zone_dma32: Pointer to the dma32 zone if there is one. + * + * Note that this structure is not per device. It should be global for all + * graphics devices. + */ + +#define TTM_MEM_MAX_ZONES 2 +struct ttm_mem_zone; +struct ttm_mem_global { + struct kobject kobj; + struct ttm_bo_global *bo_glob; + struct workqueue_struct *swap_queue; + struct work_struct work; + spinlock_t lock; + uint64_t lower_mem_limit; + struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES]; + unsigned int num_zones; + struct ttm_mem_zone *zone_kernel; +#ifdef CONFIG_HIGHMEM + struct ttm_mem_zone *zone_highmem; +#else + struct ttm_mem_zone *zone_dma32; +#endif +}; + +extern int ttm_mem_global_init(struct ttm_mem_global *glob); +extern void ttm_mem_global_release(struct ttm_mem_global *glob); +extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, + struct ttm_operation_ctx *ctx); +extern void ttm_mem_global_free(struct ttm_mem_global *glob, + uint64_t amount); +extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, + struct page *page, uint64_t size, + struct ttm_operation_ctx *ctx); +extern void ttm_mem_global_free_page(struct ttm_mem_global *glob, + struct page *page, uint64_t size); +extern size_t ttm_round_pot(size_t size); +extern uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob); +extern bool ttm_check_under_lowerlimit(struct ttm_mem_global *glob, + uint64_t num_pages, struct ttm_operation_ctx *ctx); +#endif diff --git a/include/drm/ttm/ttm_module.h b/include/drm/ttm/ttm_module.h new file mode 100644 index 000000000..45fa318c1 --- /dev/null +++ b/include/drm/ttm/ttm_module.h @@ -0,0 +1,40 @@ +/************************************************************************** + * + * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#ifndef _TTM_MODULE_H_ +#define _TTM_MODULE_H_ + +#include +struct kobject; + +#define TTM_PFX "[TTM] " +extern struct kobject *ttm_get_kobj(void); + +#endif /* _TTM_MODULE_H_ */ diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h new file mode 100644 index 000000000..a98bfeb42 --- /dev/null +++ b/include/drm/ttm/ttm_object.h @@ -0,0 +1,354 @@ +/************************************************************************** + * + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ +/** @file ttm_object.h + * + * Base- and reference object implementation for the various + * ttm objects. Implements reference counting, minimal security checks + * and release on file close. + */ + +#ifndef _TTM_OBJECT_H_ +#define _TTM_OBJECT_H_ + +#include +#include +#include +#include +#include + +#include "ttm_memory.h" + +/** + * enum ttm_ref_type + * + * Describes what type of reference a ref object holds. + * + * TTM_REF_USAGE is a simple refcount on a base object. + * + * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a + * buffer object. + * + * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a + * buffer object. + * + */ + +enum ttm_ref_type { + TTM_REF_USAGE, + TTM_REF_SYNCCPU_READ, + TTM_REF_SYNCCPU_WRITE, + TTM_REF_NUM +}; + +/** + * enum ttm_object_type + * + * One entry per ttm object type. + * Device-specific types should use the + * ttm_driver_typex types. + */ + +enum ttm_object_type { + ttm_fence_type, + ttm_buffer_type, + ttm_lock_type, + ttm_prime_type, + ttm_driver_type0 = 256, + ttm_driver_type1, + ttm_driver_type2, + ttm_driver_type3, + ttm_driver_type4, + ttm_driver_type5 +}; + +struct ttm_object_file; +struct ttm_object_device; + +/** + * struct ttm_base_object + * + * @hash: hash entry for the per-device object hash. + * @type: derived type this object is base class for. + * @shareable: Other ttm_object_files can access this object. + * + * @tfile: Pointer to ttm_object_file of the creator. + * NULL if the object was not created by a user request. + * (kernel object). + * + * @refcount: Number of references to this object, not + * including the hash entry. A reference to a base object can + * only be held by a ref object. + * + * @refcount_release: A function to be called when there are + * no more references to this object. This function should + * destroy the object (or make sure destruction eventually happens), + * and when it is called, the object has + * already been taken out of the per-device hash. The parameter + * "base" should be set to NULL by the function. + * + * @ref_obj_release: A function to be called when a reference object + * with another ttm_ref_type than TTM_REF_USAGE is deleted. + * This function may, for example, release a lock held by a user-space + * process. + * + * This struct is intended to be used as a base struct for objects that + * are visible to user-space. It provides a global name, race-safe + * access and refcounting, minimal access contol and hooks for unref actions. + */ + +struct ttm_base_object { + struct rcu_head rhead; + struct drm_hash_item hash; + enum ttm_object_type object_type; + bool shareable; + struct ttm_object_file *tfile; + struct kref refcount; + void (*refcount_release) (struct ttm_base_object **base); + void (*ref_obj_release) (struct ttm_base_object *base, + enum ttm_ref_type ref_type); +}; + + +/** + * struct ttm_prime_object - Modified base object that is prime-aware + * + * @base: struct ttm_base_object that we derive from + * @mutex: Mutex protecting the @dma_buf member. + * @size: Size of the dma_buf associated with this object + * @real_type: Type of the underlying object. Needed since we're setting + * the value of @base::object_type to ttm_prime_type + * @dma_buf: Non ref-coutned pointer to a struct dma_buf created from this + * object. + * @refcount_release: The underlying object's release method. Needed since + * we set @base::refcount_release to our own release method. + */ + +struct ttm_prime_object { + struct ttm_base_object base; + struct mutex mutex; + size_t size; + enum ttm_object_type real_type; + struct dma_buf *dma_buf; + void (*refcount_release) (struct ttm_base_object **); +}; + +/** + * ttm_base_object_init + * + * @tfile: Pointer to a struct ttm_object_file. + * @base: The struct ttm_base_object to initialize. + * @shareable: This object is shareable with other applcations. + * (different @tfile pointers.) + * @type: The object type. + * @refcount_release: See the struct ttm_base_object description. + * @ref_obj_release: See the struct ttm_base_object description. + * + * Initializes a struct ttm_base_object. + */ + +extern int ttm_base_object_init(struct ttm_object_file *tfile, + struct ttm_base_object *base, + bool shareable, + enum ttm_object_type type, + void (*refcount_release) (struct ttm_base_object + **), + void (*ref_obj_release) (struct ttm_base_object + *, + enum ttm_ref_type + ref_type)); + +/** + * ttm_base_object_lookup + * + * @tfile: Pointer to a struct ttm_object_file. + * @key: Hash key + * + * Looks up a struct ttm_base_object with the key @key. + */ + +extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file + *tfile, uint32_t key); + +/** + * ttm_base_object_lookup_for_ref + * + * @tdev: Pointer to a struct ttm_object_device. + * @key: Hash key + * + * Looks up a struct ttm_base_object with the key @key. + * This function should only be used when the struct tfile associated with the + * caller doesn't yet have a reference to the base object. + */ + +extern struct ttm_base_object * +ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key); + +/** + * ttm_base_object_unref + * + * @p_base: Pointer to a pointer referencing a struct ttm_base_object. + * + * Decrements the base object refcount and clears the pointer pointed to by + * p_base. + */ + +extern void ttm_base_object_unref(struct ttm_base_object **p_base); + +/** + * ttm_ref_object_add. + * + * @tfile: A struct ttm_object_file representing the application owning the + * ref_object. + * @base: The base object to reference. + * @ref_type: The type of reference. + * @existed: Upon completion, indicates that an identical reference object + * already existed, and the refcount was upped on that object instead. + * @require_existed: Fail with -EPERM if an identical ref object didn't + * already exist. + * + * Checks that the base object is shareable and adds a ref object to it. + * + * Adding a ref object to a base object is basically like referencing the + * base object, but a user-space application holds the reference. When the + * file corresponding to @tfile is closed, all its reference objects are + * deleted. A reference object can have different types depending on what + * it's intended for. It can be refcounting to prevent object destruction, + * When user-space takes a lock, it can add a ref object to that lock to + * make sure the lock is released if the application dies. A ref object + * will hold a single reference on a base object. + */ +extern int ttm_ref_object_add(struct ttm_object_file *tfile, + struct ttm_base_object *base, + enum ttm_ref_type ref_type, bool *existed, + bool require_existed); + +extern bool ttm_ref_object_exists(struct ttm_object_file *tfile, + struct ttm_base_object *base); + +/** + * ttm_ref_object_base_unref + * + * @key: Key representing the base object. + * @ref_type: Ref type of the ref object to be dereferenced. + * + * Unreference a ref object with type @ref_type + * on the base object identified by @key. If there are no duplicate + * references, the ref object will be destroyed and the base object + * will be unreferenced. + */ +extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile, + unsigned long key, + enum ttm_ref_type ref_type); + +/** + * ttm_object_file_init - initialize a struct ttm_object file + * + * @tdev: A struct ttm_object device this file is initialized on. + * @hash_order: Order of the hash table used to hold the reference objects. + * + * This is typically called by the file_ops::open function. + */ + +extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device + *tdev, + unsigned int hash_order); + +/** + * ttm_object_file_release - release data held by a ttm_object_file + * + * @p_tfile: Pointer to pointer to the ttm_object_file object to release. + * *p_tfile will be set to NULL by this function. + * + * Releases all data associated by a ttm_object_file. + * Typically called from file_ops::release. The caller must + * ensure that there are no concurrent users of tfile. + */ + +extern void ttm_object_file_release(struct ttm_object_file **p_tfile); + +/** + * ttm_object device init - initialize a struct ttm_object_device + * + * @mem_glob: struct ttm_mem_global for memory accounting. + * @hash_order: Order of hash table used to hash the base objects. + * @ops: DMA buf ops for prime objects of this device. + * + * This function is typically called on device initialization to prepare + * data structures needed for ttm base and ref objects. + */ + +extern struct ttm_object_device * +ttm_object_device_init(struct ttm_mem_global *mem_glob, + unsigned int hash_order, + const struct dma_buf_ops *ops); + +/** + * ttm_object_device_release - release data held by a ttm_object_device + * + * @p_tdev: Pointer to pointer to the ttm_object_device object to release. + * *p_tdev will be set to NULL by this function. + * + * Releases all data associated by a ttm_object_device. + * Typically called from driver::unload before the destruction of the + * device private data structure. + */ + +extern void ttm_object_device_release(struct ttm_object_device **p_tdev); + +#define ttm_base_object_kfree(__object, __base)\ + kfree_rcu(__object, __base.rhead) + +extern int ttm_prime_object_init(struct ttm_object_file *tfile, + size_t size, + struct ttm_prime_object *prime, + bool shareable, + enum ttm_object_type type, + void (*refcount_release) + (struct ttm_base_object **), + void (*ref_obj_release) + (struct ttm_base_object *, + enum ttm_ref_type ref_type)); + +static inline enum ttm_object_type +ttm_base_object_type(struct ttm_base_object *base) +{ + return (base->object_type == ttm_prime_type) ? + container_of(base, struct ttm_prime_object, base)->real_type : + base->object_type; +} +extern int ttm_prime_fd_to_handle(struct ttm_object_file *tfile, + int fd, u32 *handle); +extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile, + uint32_t handle, uint32_t flags, + int *prime_fd); + +#define ttm_prime_object_kfree(__obj, __prime) \ + kfree_rcu(__obj, __prime.base.rhead) +#endif diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h new file mode 100644 index 000000000..4d9b019d2 --- /dev/null +++ b/include/drm/ttm/ttm_page_alloc.h @@ -0,0 +1,122 @@ +/* + * Copyright (c) Red Hat Inc. + + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Jerome Glisse + */ +#ifndef TTM_PAGE_ALLOC +#define TTM_PAGE_ALLOC + +#include +#include + +struct device; + +/** + * Initialize pool allocator. + */ +int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages); +/** + * Free pool allocator. + */ +void ttm_page_alloc_fini(void); + +/** + * ttm_pool_populate: + * + * @ttm: The struct ttm_tt to contain the backing pages. + * + * Add backing pages to all of @ttm + */ +int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); + +/** + * ttm_pool_unpopulate: + * + * @ttm: The struct ttm_tt which to free backing pages. + * + * Free all pages of @ttm + */ +void ttm_pool_unpopulate(struct ttm_tt *ttm); + +/** + * Populates and DMA maps pages to fullfil a ttm_dma_populate() request + */ +int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt, + struct ttm_operation_ctx *ctx); + +/** + * Unpopulates and DMA unmaps pages as part of a + * ttm_dma_unpopulate() request */ +void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt); + +/** + * Output the state of pools to debugfs file + */ +int ttm_page_alloc_debugfs(struct seq_file *m, void *data); + +#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU) +/** + * Initialize pool allocator. + */ +int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages); + +/** + * Free pool allocator. + */ +void ttm_dma_page_alloc_fini(void); + +/** + * Output the state of pools to debugfs file + */ +int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data); + +int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, + struct ttm_operation_ctx *ctx); +void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev); + +#else +static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, + unsigned max_pages) +{ + return -ENODEV; +} + +static inline void ttm_dma_page_alloc_fini(void) { return; } + +static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data) +{ + return 0; +} +static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, + struct device *dev, + struct ttm_operation_ctx *ctx) +{ + return -ENOMEM; +} +static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, + struct device *dev) +{ +} +#endif + +#endif diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h new file mode 100644 index 000000000..e88a8e397 --- /dev/null +++ b/include/drm/ttm/ttm_placement.h @@ -0,0 +1,108 @@ +/************************************************************************** + * + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#ifndef _TTM_PLACEMENT_H_ +#define _TTM_PLACEMENT_H_ + +#include + +/* + * Memory regions for data placement. + */ + +#define TTM_PL_SYSTEM 0 +#define TTM_PL_TT 1 +#define TTM_PL_VRAM 2 +#define TTM_PL_PRIV 3 + +#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM) +#define TTM_PL_FLAG_TT (1 << TTM_PL_TT) +#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM) +#define TTM_PL_FLAG_PRIV (1 << TTM_PL_PRIV) +#define TTM_PL_MASK_MEM 0x0000FFFF + +/* + * Other flags that affects data placement. + * TTM_PL_FLAG_CACHED indicates cache-coherent mappings + * if available. + * TTM_PL_FLAG_SHARED means that another application may + * reference the buffer. + * TTM_PL_FLAG_NO_EVICT means that the buffer may never + * be evicted to make room for other buffers. + * TTM_PL_FLAG_TOPDOWN requests to be placed from the + * top of the memory area, instead of the bottom. + */ + +#define TTM_PL_FLAG_CACHED (1 << 16) +#define TTM_PL_FLAG_UNCACHED (1 << 17) +#define TTM_PL_FLAG_WC (1 << 18) +#define TTM_PL_FLAG_CONTIGUOUS (1 << 19) +#define TTM_PL_FLAG_NO_EVICT (1 << 21) +#define TTM_PL_FLAG_TOPDOWN (1 << 22) + +#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \ + TTM_PL_FLAG_UNCACHED | \ + TTM_PL_FLAG_WC) + +#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING) + +/** + * struct ttm_place + * + * @fpfn: first valid page frame number to put the object + * @lpfn: last valid page frame number to put the object + * @flags: memory domain and caching flags for the object + * + * Structure indicating a possible place to put an object. + */ +struct ttm_place { + unsigned fpfn; + unsigned lpfn; + uint32_t flags; +}; + +/** + * struct ttm_placement + * + * @num_placement: number of preferred placements + * @placement: preferred placements + * @num_busy_placement: number of preferred placements when need to evict buffer + * @busy_placement: preferred placements when need to evict buffer + * + * Structure indicating the placement you request for an object. + */ +struct ttm_placement { + unsigned num_placement; + const struct ttm_place *placement; + unsigned num_busy_placement; + const struct ttm_place *busy_placement; +}; + +#endif diff --git a/include/drm/ttm/ttm_set_memory.h b/include/drm/ttm/ttm_set_memory.h new file mode 100644 index 000000000..7c492b49e --- /dev/null +++ b/include/drm/ttm/ttm_set_memory.h @@ -0,0 +1,150 @@ +/************************************************************************** + * + * Copyright (c) 2018 Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Huang Rui + */ + +#ifndef TTM_SET_MEMORY +#define TTM_SET_MEMORY + +#include + +#ifdef CONFIG_X86 + +#include + +static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray) +{ + return set_pages_array_wb(pages, addrinarray); +} + +static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray) +{ + return set_pages_array_wc(pages, addrinarray); +} + +static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray) +{ + return set_pages_array_uc(pages, addrinarray); +} + +static inline int ttm_set_pages_wb(struct page *page, int numpages) +{ + return set_pages_wb(page, numpages); +} + +static inline int ttm_set_pages_wc(struct page *page, int numpages) +{ + unsigned long addr = (unsigned long)page_address(page); + + return set_memory_wc(addr, numpages); +} + +static inline int ttm_set_pages_uc(struct page *page, int numpages) +{ + return set_pages_uc(page, numpages); +} + +#else /* for CONFIG_X86 */ + +#if IS_ENABLED(CONFIG_AGP) + +#include + +static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray) +{ + int i; + + for (i = 0; i < addrinarray; i++) + unmap_page_from_agp(pages[i]); + return 0; +} + +static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray) +{ + int i; + + for (i = 0; i < addrinarray; i++) + map_page_into_agp(pages[i]); + return 0; +} + +static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray) +{ + int i; + + for (i = 0; i < addrinarray; i++) + map_page_into_agp(pages[i]); + return 0; +} + +static inline int ttm_set_pages_wb(struct page *page, int numpages) +{ + int i; + + for (i = 0; i < numpages; i++) + unmap_page_from_agp(page++); + return 0; +} + +#else /* for CONFIG_AGP */ + +static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray) +{ + return 0; +} + +static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray) +{ + return 0; +} + +static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray) +{ + return 0; +} + +static inline int ttm_set_pages_wb(struct page *page, int numpages) +{ + return 0; +} + +#endif /* for CONFIG_AGP */ + +static inline int ttm_set_pages_wc(struct page *page, int numpages) +{ + return 0; +} + +static inline int ttm_set_pages_uc(struct page *page, int numpages) +{ + return 0; +} + +#endif /* for CONFIG_X86 */ + +#endif diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h new file mode 100644 index 000000000..c0e928abf --- /dev/null +++ b/include/drm/ttm/ttm_tt.h @@ -0,0 +1,272 @@ +/************************************************************************** + * + * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +#ifndef _TTM_TT_H_ +#define _TTM_TT_H_ + +#include + +struct ttm_tt; +struct ttm_mem_reg; +struct ttm_buffer_object; +struct ttm_operation_ctx; + +#define TTM_PAGE_FLAG_WRITE (1 << 3) +#define TTM_PAGE_FLAG_SWAPPED (1 << 4) +#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5) +#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6) +#define TTM_PAGE_FLAG_DMA32 (1 << 7) +#define TTM_PAGE_FLAG_SG (1 << 8) +#define TTM_PAGE_FLAG_NO_RETRY (1 << 9) + +enum ttm_caching_state { + tt_uncached, + tt_wc, + tt_cached +}; + +struct ttm_backend_func { + /** + * struct ttm_backend_func member bind + * + * @ttm: Pointer to a struct ttm_tt. + * @bo_mem: Pointer to a struct ttm_mem_reg describing the + * memory type and location for binding. + * + * Bind the backend pages into the aperture in the location + * indicated by @bo_mem. This function should be able to handle + * differences between aperture and system page sizes. + */ + int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); + + /** + * struct ttm_backend_func member unbind + * + * @ttm: Pointer to a struct ttm_tt. + * + * Unbind previously bound backend pages. This function should be + * able to handle differences between aperture and system page sizes. + */ + int (*unbind) (struct ttm_tt *ttm); + + /** + * struct ttm_backend_func member destroy + * + * @ttm: Pointer to a struct ttm_tt. + * + * Destroy the backend. This will be call back from ttm_tt_destroy so + * don't call ttm_tt_destroy from the callback or infinite loop. + */ + void (*destroy) (struct ttm_tt *ttm); +}; + +/** + * struct ttm_tt + * + * @bdev: Pointer to a struct ttm_bo_device. + * @func: Pointer to a struct ttm_backend_func that describes + * the backend methods. + * pointer. + * @pages: Array of pages backing the data. + * @num_pages: Number of pages in the page array. + * @bdev: Pointer to the current struct ttm_bo_device. + * @be: Pointer to the ttm backend. + * @swap_storage: Pointer to shmem struct file for swap storage. + * @caching_state: The current caching state of the pages. + * @state: The current binding state of the pages. + * + * This is a structure holding the pages, caching- and aperture binding + * status for a buffer object that isn't backed by fixed (VRAM / AGP) + * memory. + */ +struct ttm_tt { + struct ttm_bo_device *bdev; + struct ttm_backend_func *func; + struct page **pages; + uint32_t page_flags; + unsigned long num_pages; + struct sg_table *sg; /* for SG objects via dma-buf */ + struct file *swap_storage; + enum ttm_caching_state caching_state; + enum { + tt_bound, + tt_unbound, + tt_unpopulated, + } state; +}; + +/** + * struct ttm_dma_tt + * + * @ttm: Base ttm_tt struct. + * @dma_address: The DMA (bus) addresses of the pages + * @pages_list: used by some page allocation backend + * + * This is a structure holding the pages, caching- and aperture binding + * status for a buffer object that isn't backed by fixed (VRAM / AGP) + * memory. + */ +struct ttm_dma_tt { + struct ttm_tt ttm; + dma_addr_t *dma_address; + struct list_head pages_list; +}; + +/** + * ttm_tt_create + * + * @bo: pointer to a struct ttm_buffer_object + * @zero_alloc: true if allocated pages needs to be zeroed + * + * Make sure we have a TTM structure allocated for the given BO. + * No pages are actually allocated. + */ +int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc); + +/** + * ttm_tt_init + * + * @ttm: The struct ttm_tt. + * @bo: The buffer object we create the ttm for. + * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. + * + * Create a struct ttm_tt to back data with system memory pages. + * No pages are actually allocated. + * Returns: + * NULL: Out of memory. + */ +int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, + uint32_t page_flags); +int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, + uint32_t page_flags); +int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, + uint32_t page_flags); + +/** + * ttm_tt_fini + * + * @ttm: the ttm_tt structure. + * + * Free memory of ttm_tt structure + */ +void ttm_tt_fini(struct ttm_tt *ttm); +void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma); + +/** + * ttm_ttm_bind: + * + * @ttm: The struct ttm_tt containing backing pages. + * @bo_mem: The struct ttm_mem_reg identifying the binding location. + * + * Bind the pages of @ttm to an aperture location identified by @bo_mem + */ +int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem, + struct ttm_operation_ctx *ctx); + +/** + * ttm_ttm_destroy: + * + * @ttm: The struct ttm_tt. + * + * Unbind, unpopulate and destroy common struct ttm_tt. + */ +void ttm_tt_destroy(struct ttm_tt *ttm); + +/** + * ttm_ttm_unbind: + * + * @ttm: The struct ttm_tt. + * + * Unbind a struct ttm_tt. + */ +void ttm_tt_unbind(struct ttm_tt *ttm); + +/** + * ttm_tt_swapin: + * + * @ttm: The struct ttm_tt. + * + * Swap in a previously swap out ttm_tt. + */ +int ttm_tt_swapin(struct ttm_tt *ttm); + +/** + * ttm_tt_set_placement_caching: + * + * @ttm A struct ttm_tt the backing pages of which will change caching policy. + * @placement: Flag indicating the desired caching policy. + * + * This function will change caching policy of any default kernel mappings of + * the pages backing @ttm. If changing from cached to uncached or + * write-combined, + * all CPU caches will first be flushed to make sure the data of the pages + * hit RAM. This function may be very costly as it involves global TLB + * and cache flushes and potential page splitting / combining. + */ +int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement); +int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage); + +/** + * ttm_tt_populate - allocate pages for a ttm + * + * @ttm: Pointer to the ttm_tt structure + * + * Calls the driver method to allocate pages for a ttm + */ +int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); + +/** + * ttm_tt_unpopulate - free pages from a ttm + * + * @ttm: Pointer to the ttm_tt structure + * + * Calls the driver method to free all pages from a ttm + */ +void ttm_tt_unpopulate(struct ttm_tt *ttm); + +#if IS_ENABLED(CONFIG_AGP) +#include + +/** + * ttm_agp_tt_create + * + * @bo: Buffer object we allocate the ttm for. + * @bridge: The agp bridge this device is sitting on. + * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. + * + * + * Create a TTM backend that uses the indicated AGP bridge as an aperture + * for TT memory. This function uses the linux agpgart interface to + * bind and unbind memory backing a ttm_tt. + */ +struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo, + struct agp_bridge_data *bridge, + uint32_t page_flags); +int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); +void ttm_agp_tt_unpopulate(struct ttm_tt *ttm); +#endif + +#endif diff --git a/include/dt-bindings/arm/ux500_pm_domains.h b/include/dt-bindings/arm/ux500_pm_domains.h new file mode 100644 index 000000000..398a6c028 --- /dev/null +++ b/include/dt-bindings/arm/ux500_pm_domains.h @@ -0,0 +1,15 @@ +/* + * Copyright (C) 2014 Linaro Ltd. + * + * Author: Ulf Hansson + * License terms: GNU General Public License (GPL) version 2 + */ +#ifndef _DT_BINDINGS_ARM_UX500_PM_DOMAINS_H +#define _DT_BINDINGS_ARM_UX500_PM_DOMAINS_H + +#define DOMAIN_VAPE 0 + +/* Number of PM domains. */ +#define NR_DOMAINS (DOMAIN_VAPE + 1) + +#endif diff --git a/include/dt-bindings/bus/ti-sysc.h b/include/dt-bindings/bus/ti-sysc.h new file mode 100644 index 000000000..7138384e2 --- /dev/null +++ b/include/dt-bindings/bus/ti-sysc.h @@ -0,0 +1,24 @@ +/* TI sysc interconnect target module defines */ + +/* Generic sysc found on omap2 and later, also known as type1 */ +#define SYSC_OMAP2_CLOCKACTIVITY (3 << 8) +#define SYSC_OMAP2_EMUFREE (1 << 5) +#define SYSC_OMAP2_ENAWAKEUP (1 << 2) +#define SYSC_OMAP2_SOFTRESET (1 << 1) +#define SYSC_OMAP2_AUTOIDLE (1 << 0) + +/* Generic sysc found on omap4 and later, also known as type2 */ +#define SYSC_OMAP4_DMADISABLE (1 << 16) +#define SYSC_OMAP4_FREEEMU (1 << 1) /* Also known as EMUFREE */ +#define SYSC_OMAP4_SOFTRESET (1 << 0) + +/* SmartReflex sysc found on 36xx and later */ +#define SYSC_OMAP3_SR_ENAWAKEUP (1 << 26) + +#define SYSC_DRA7_MCAN_ENAWAKEUP (1 << 4) + +/* SYSCONFIG STANDBYMODE/MIDLEMODE/SIDLEMODE supported by hardware */ +#define SYSC_IDLE_FORCE 0 +#define SYSC_IDLE_NO 1 +#define SYSC_IDLE_SMART 2 +#define SYSC_IDLE_SMART_WKUP 3 diff --git a/include/dt-bindings/clk/ti-dra7-atl.h b/include/dt-bindings/clk/ti-dra7-atl.h new file mode 100644 index 000000000..42dd4164f --- /dev/null +++ b/include/dt-bindings/clk/ti-dra7-atl.h @@ -0,0 +1,40 @@ +/* + * This header provides constants for DRA7 ATL (Audio Tracking Logic) + * + * The constants defined in this header are used in dts files + * + * Copyright (C) 2013 Texas Instruments, Inc. + * + * Peter Ujfalusi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_DRA7_ATL_H +#define _DT_BINDINGS_CLK_DRA7_ATL_H + +#define DRA7_ATL_WS_MCASP1_FSR 0 +#define DRA7_ATL_WS_MCASP1_FSX 1 +#define DRA7_ATL_WS_MCASP2_FSR 2 +#define DRA7_ATL_WS_MCASP2_FSX 3 +#define DRA7_ATL_WS_MCASP3_FSX 4 +#define DRA7_ATL_WS_MCASP4_FSX 5 +#define DRA7_ATL_WS_MCASP5_FSX 6 +#define DRA7_ATL_WS_MCASP6_FSX 7 +#define DRA7_ATL_WS_MCASP7_FSX 8 +#define DRA7_ATL_WS_MCASP8_FSX 9 +#define DRA7_ATL_WS_MCASP8_AHCLKX 10 +#define DRA7_ATL_WS_XREF_CLK3 11 +#define DRA7_ATL_WS_XREF_CLK0 12 +#define DRA7_ATL_WS_XREF_CLK1 13 +#define DRA7_ATL_WS_XREF_CLK2 14 +#define DRA7_ATL_WS_OSC1_X1 15 + +#endif diff --git a/include/dt-bindings/clock/actions,s700-cmu.h b/include/dt-bindings/clock/actions,s700-cmu.h new file mode 100644 index 000000000..3e1942996 --- /dev/null +++ b/include/dt-bindings/clock/actions,s700-cmu.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Device Tree binding constants for Actions Semi S700 Clock Management Unit + * + * Copyright (c) 2014 Actions Semi Inc. + * Author: David Liu + * + * Author: Pathiban Nallathambi + * Author: Saravanan Sekar + */ + +#ifndef __DT_BINDINGS_CLOCK_S700_H +#define __DT_BINDINGS_CLOCK_S700_H + +#define CLK_NONE 0 + +/* pll clocks */ +#define CLK_CORE_PLL 1 +#define CLK_DEV_PLL 2 +#define CLK_DDR_PLL 3 +#define CLK_NAND_PLL 4 +#define CLK_DISPLAY_PLL 5 +#define CLK_TVOUT_PLL 6 +#define CLK_CVBS_PLL 7 +#define CLK_AUDIO_PLL 8 +#define CLK_ETHERNET_PLL 9 + +/* system clock */ +#define CLK_CPU 10 +#define CLK_DEV 11 +#define CLK_AHB 12 +#define CLK_APB 13 +#define CLK_DMAC 14 +#define CLK_NOC0_CLK_MUX 15 +#define CLK_NOC1_CLK_MUX 16 +#define CLK_HP_CLK_MUX 17 +#define CLK_HP_CLK_DIV 18 +#define CLK_NOC1_CLK_DIV 19 +#define CLK_NOC0 20 +#define CLK_NOC1 21 +#define CLK_SENOR_SRC 22 + +/* peripheral device clock */ +#define CLK_GPIO 23 +#define CLK_TIMER 24 +#define CLK_DSI 25 +#define CLK_CSI 26 +#define CLK_SI 27 +#define CLK_DE 28 +#define CLK_HDE 29 +#define CLK_VDE 30 +#define CLK_VCE 31 +#define CLK_NAND 32 +#define CLK_SD0 33 +#define CLK_SD1 34 +#define CLK_SD2 35 + +#define CLK_UART0 36 +#define CLK_UART1 37 +#define CLK_UART2 38 +#define CLK_UART3 39 +#define CLK_UART4 40 +#define CLK_UART5 41 +#define CLK_UART6 42 + +#define CLK_PWM0 43 +#define CLK_PWM1 44 +#define CLK_PWM2 45 +#define CLK_PWM3 46 +#define CLK_PWM4 47 +#define CLK_PWM5 48 +#define CLK_GPU3D 49 + +#define CLK_I2C0 50 +#define CLK_I2C1 51 +#define CLK_I2C2 52 +#define CLK_I2C3 53 + +#define CLK_SPI0 54 +#define CLK_SPI1 55 +#define CLK_SPI2 56 +#define CLK_SPI3 57 + +#define CLK_USB3_480MPLL0 58 +#define CLK_USB3_480MPHY0 59 +#define CLK_USB3_5GPHY 60 +#define CLK_USB3_CCE 61 +#define CLK_USB3_MAC 62 + +#define CLK_LCD 63 +#define CLK_HDMI_AUDIO 64 +#define CLK_I2SRX 65 +#define CLK_I2STX 66 + +#define CLK_SENSOR0 67 +#define CLK_SENSOR1 68 + +#define CLK_HDMI_DEV 69 + +#define CLK_ETHERNET 70 +#define CLK_RMII_REF 71 + +#define CLK_USB2H0_PLLEN 72 +#define CLK_USB2H0_PHY 73 +#define CLK_USB2H0_CCE 74 +#define CLK_USB2H1_PLLEN 75 +#define CLK_USB2H1_PHY 76 +#define CLK_USB2H1_CCE 77 + +#define CLK_TVOUT 78 + +#define CLK_THERMAL_SENSOR 79 + +#define CLK_IRC_SWITCH 80 +#define CLK_PCM1 81 +#define CLK_NR_CLKS (CLK_PCM1 + 1) + +#endif /* __DT_BINDINGS_CLOCK_S700_H */ diff --git a/include/dt-bindings/clock/actions,s900-cmu.h b/include/dt-bindings/clock/actions,s900-cmu.h new file mode 100644 index 000000000..7c1251565 --- /dev/null +++ b/include/dt-bindings/clock/actions,s900-cmu.h @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: GPL-2.0+ +// +// Device Tree binding constants for Actions Semi S900 Clock Management Unit +// +// Copyright (c) 2014 Actions Semi Inc. +// Copyright (c) 2018 Linaro Ltd. + +#ifndef __DT_BINDINGS_CLOCK_S900_CMU_H +#define __DT_BINDINGS_CLOCK_S900_CMU_H + +#define CLK_NONE 0 + +/* fixed rate clocks */ +#define CLK_LOSC 1 +#define CLK_HOSC 2 + +/* pll clocks */ +#define CLK_CORE_PLL 3 +#define CLK_DEV_PLL 4 +#define CLK_DDR_PLL 5 +#define CLK_NAND_PLL 6 +#define CLK_DISPLAY_PLL 7 +#define CLK_DSI_PLL 8 +#define CLK_ASSIST_PLL 9 +#define CLK_AUDIO_PLL 10 + +/* system clock */ +#define CLK_CPU 15 +#define CLK_DEV 16 +#define CLK_NOC 17 +#define CLK_NOC_MUX 18 +#define CLK_NOC_DIV 19 +#define CLK_AHB 20 +#define CLK_APB 21 +#define CLK_DMAC 22 + +/* peripheral device clock */ +#define CLK_GPIO 23 + +#define CLK_BISP 24 +#define CLK_CSI0 25 +#define CLK_CSI1 26 + +#define CLK_DE0 27 +#define CLK_DE1 28 +#define CLK_DE2 29 +#define CLK_DE3 30 +#define CLK_DSI 32 + +#define CLK_GPU 33 +#define CLK_GPU_CORE 34 +#define CLK_GPU_MEM 35 +#define CLK_GPU_SYS 36 + +#define CLK_HDE 37 +#define CLK_I2C0 38 +#define CLK_I2C1 39 +#define CLK_I2C2 40 +#define CLK_I2C3 41 +#define CLK_I2C4 42 +#define CLK_I2C5 43 +#define CLK_I2SRX 44 +#define CLK_I2STX 45 +#define CLK_IMX 46 +#define CLK_LCD 47 +#define CLK_NAND0 48 +#define CLK_NAND1 49 +#define CLK_PWM0 50 +#define CLK_PWM1 51 +#define CLK_PWM2 52 +#define CLK_PWM3 53 +#define CLK_PWM4 54 +#define CLK_PWM5 55 +#define CLK_SD0 56 +#define CLK_SD1 57 +#define CLK_SD2 58 +#define CLK_SD3 59 +#define CLK_SENSOR 60 +#define CLK_SPEED_SENSOR 61 +#define CLK_SPI0 62 +#define CLK_SPI1 63 +#define CLK_SPI2 64 +#define CLK_SPI3 65 +#define CLK_THERMAL_SENSOR 66 +#define CLK_UART0 67 +#define CLK_UART1 68 +#define CLK_UART2 69 +#define CLK_UART3 70 +#define CLK_UART4 71 +#define CLK_UART5 72 +#define CLK_UART6 73 +#define CLK_VCE 74 +#define CLK_VDE 75 + +#define CLK_USB3_480MPLL0 76 +#define CLK_USB3_480MPHY0 77 +#define CLK_USB3_5GPHY 78 +#define CLK_USB3_CCE 79 +#define CLK_USB3_MAC 80 + +#define CLK_TIMER 83 + +#define CLK_HDMI_AUDIO 84 + +#define CLK_24M 85 + +#define CLK_EDP 86 + +#define CLK_24M_EDP 87 +#define CLK_EDP_PLL 88 +#define CLK_EDP_LINK 89 + +#define CLK_USB2H0_PLLEN 90 +#define CLK_USB2H0_PHY 91 +#define CLK_USB2H0_CCE 92 +#define CLK_USB2H1_PLLEN 93 +#define CLK_USB2H1_PHY 94 +#define CLK_USB2H1_CCE 95 + +#define CLK_DDR0 96 +#define CLK_DDR1 97 +#define CLK_DMM 98 + +#define CLK_ETH_MAC 99 +#define CLK_RMII_REF 100 + +#define CLK_NR_CLKS (CLK_RMII_REF + 1) + +#endif /* __DT_BINDINGS_CLOCK_S900_CMU_H */ diff --git a/include/dt-bindings/clock/alphascale,asm9260.h b/include/dt-bindings/clock/alphascale,asm9260.h new file mode 100644 index 000000000..04e8db27d --- /dev/null +++ b/include/dt-bindings/clock/alphascale,asm9260.h @@ -0,0 +1,97 @@ +/* + * Copyright 2014 Oleksij Rempel + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_ASM9260_H +#define _DT_BINDINGS_CLK_ASM9260_H + +/* ahb gate */ +#define CLKID_AHB_ROM 0 +#define CLKID_AHB_RAM 1 +#define CLKID_AHB_GPIO 2 +#define CLKID_AHB_MAC 3 +#define CLKID_AHB_EMI 4 +#define CLKID_AHB_USB0 5 +#define CLKID_AHB_USB1 6 +#define CLKID_AHB_DMA0 7 +#define CLKID_AHB_DMA1 8 +#define CLKID_AHB_UART0 9 +#define CLKID_AHB_UART1 10 +#define CLKID_AHB_UART2 11 +#define CLKID_AHB_UART3 12 +#define CLKID_AHB_UART4 13 +#define CLKID_AHB_UART5 14 +#define CLKID_AHB_UART6 15 +#define CLKID_AHB_UART7 16 +#define CLKID_AHB_UART8 17 +#define CLKID_AHB_UART9 18 +#define CLKID_AHB_I2S0 19 +#define CLKID_AHB_I2C0 20 +#define CLKID_AHB_I2C1 21 +#define CLKID_AHB_SSP0 22 +#define CLKID_AHB_IOCONFIG 23 +#define CLKID_AHB_WDT 24 +#define CLKID_AHB_CAN0 25 +#define CLKID_AHB_CAN1 26 +#define CLKID_AHB_MPWM 27 +#define CLKID_AHB_SPI0 28 +#define CLKID_AHB_SPI1 29 +#define CLKID_AHB_QEI 30 +#define CLKID_AHB_QUADSPI0 31 +#define CLKID_AHB_CAMIF 32 +#define CLKID_AHB_LCDIF 33 +#define CLKID_AHB_TIMER0 34 +#define CLKID_AHB_TIMER1 35 +#define CLKID_AHB_TIMER2 36 +#define CLKID_AHB_TIMER3 37 +#define CLKID_AHB_IRQ 38 +#define CLKID_AHB_RTC 39 +#define CLKID_AHB_NAND 40 +#define CLKID_AHB_ADC0 41 +#define CLKID_AHB_LED 42 +#define CLKID_AHB_DAC0 43 +#define CLKID_AHB_LCD 44 +#define CLKID_AHB_I2S1 45 +#define CLKID_AHB_MAC1 46 + +/* devider */ +#define CLKID_SYS_CPU 47 +#define CLKID_SYS_AHB 48 +#define CLKID_SYS_I2S0M 49 +#define CLKID_SYS_I2S0S 50 +#define CLKID_SYS_I2S1M 51 +#define CLKID_SYS_I2S1S 52 +#define CLKID_SYS_UART0 53 +#define CLKID_SYS_UART1 54 +#define CLKID_SYS_UART2 55 +#define CLKID_SYS_UART3 56 +#define CLKID_SYS_UART4 56 +#define CLKID_SYS_UART5 57 +#define CLKID_SYS_UART6 58 +#define CLKID_SYS_UART7 59 +#define CLKID_SYS_UART8 60 +#define CLKID_SYS_UART9 61 +#define CLKID_SYS_SPI0 62 +#define CLKID_SYS_SPI1 63 +#define CLKID_SYS_QUADSPI 64 +#define CLKID_SYS_SSP0 65 +#define CLKID_SYS_NAND 66 +#define CLKID_SYS_TRACE 67 +#define CLKID_SYS_CAMM 68 +#define CLKID_SYS_WDT 69 +#define CLKID_SYS_CLKOUT 70 +#define CLKID_SYS_MAC 71 +#define CLKID_SYS_LCD 72 +#define CLKID_SYS_ADCANA 73 + +#define MAX_CLKS 74 +#endif diff --git a/include/dt-bindings/clock/am3.h b/include/dt-bindings/clock/am3.h new file mode 100644 index 000000000..b396f00e4 --- /dev/null +++ b/include/dt-bindings/clock/am3.h @@ -0,0 +1,108 @@ +/* + * Copyright 2017 Texas Instruments, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __DT_BINDINGS_CLK_AM3_H +#define __DT_BINDINGS_CLK_AM3_H + +#define AM3_CLKCTRL_OFFSET 0x0 +#define AM3_CLKCTRL_INDEX(offset) ((offset) - AM3_CLKCTRL_OFFSET) + +/* l4_per clocks */ +#define AM3_L4_PER_CLKCTRL_OFFSET 0x14 +#define AM3_L4_PER_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_PER_CLKCTRL_OFFSET) +#define AM3_CPGMAC0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x14) +#define AM3_LCDC_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x18) +#define AM3_USB_OTG_HS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x1c) +#define AM3_TPTC0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x24) +#define AM3_EMIF_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x28) +#define AM3_OCMCRAM_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x2c) +#define AM3_GPMC_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x30) +#define AM3_MCASP0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x34) +#define AM3_UART6_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x38) +#define AM3_MMC1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x3c) +#define AM3_ELM_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x40) +#define AM3_I2C3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x44) +#define AM3_I2C2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x48) +#define AM3_SPI0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x4c) +#define AM3_SPI1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x50) +#define AM3_L4_LS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x60) +#define AM3_MCASP1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x68) +#define AM3_UART2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x6c) +#define AM3_UART3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x70) +#define AM3_UART4_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x74) +#define AM3_UART5_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x78) +#define AM3_TIMER7_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x7c) +#define AM3_TIMER2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x80) +#define AM3_TIMER3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x84) +#define AM3_TIMER4_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x88) +#define AM3_RNG_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x90) +#define AM3_AES_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x94) +#define AM3_SHAM_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xa0) +#define AM3_GPIO2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xac) +#define AM3_GPIO3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xb0) +#define AM3_GPIO4_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xb4) +#define AM3_TPCC_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xbc) +#define AM3_D_CAN0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xc0) +#define AM3_D_CAN1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xc4) +#define AM3_EPWMSS1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xcc) +#define AM3_EPWMSS0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xd4) +#define AM3_EPWMSS2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xd8) +#define AM3_L3_INSTR_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xdc) +#define AM3_L3_MAIN_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xe0) +#define AM3_PRUSS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xe8) +#define AM3_TIMER5_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xec) +#define AM3_TIMER6_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xf0) +#define AM3_MMC2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xf4) +#define AM3_MMC3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xf8) +#define AM3_TPTC1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xfc) +#define AM3_TPTC2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x100) +#define AM3_SPINLOCK_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x10c) +#define AM3_MAILBOX_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x110) +#define AM3_L4_HS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x120) +#define AM3_OCPWP_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x130) +#define AM3_CLKDIV32K_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x14c) + +/* l4_wkup clocks */ +#define AM3_L4_WKUP_CLKCTRL_OFFSET 0x4 +#define AM3_L4_WKUP_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_WKUP_CLKCTRL_OFFSET) +#define AM3_CONTROL_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0x4) +#define AM3_GPIO1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0x8) +#define AM3_L4_WKUP_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc) +#define AM3_DEBUGSS_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0x14) +#define AM3_WKUP_M3_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xb0) +#define AM3_UART1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xb4) +#define AM3_I2C1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xb8) +#define AM3_ADC_TSC_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xbc) +#define AM3_SMARTREFLEX0_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc0) +#define AM3_TIMER1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc4) +#define AM3_SMARTREFLEX1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc8) +#define AM3_WD_TIMER2_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xd4) + +/* mpu clocks */ +#define AM3_MPU_CLKCTRL_OFFSET 0x4 +#define AM3_MPU_CLKCTRL_INDEX(offset) ((offset) - AM3_MPU_CLKCTRL_OFFSET) +#define AM3_MPU_CLKCTRL AM3_MPU_CLKCTRL_INDEX(0x4) + +/* l4_rtc clocks */ +#define AM3_RTC_CLKCTRL AM3_CLKCTRL_INDEX(0x0) + +/* gfx_l3 clocks */ +#define AM3_GFX_L3_CLKCTRL_OFFSET 0x4 +#define AM3_GFX_L3_CLKCTRL_INDEX(offset) ((offset) - AM3_GFX_L3_CLKCTRL_OFFSET) +#define AM3_GFX_CLKCTRL AM3_GFX_L3_CLKCTRL_INDEX(0x4) + +/* l4_cefuse clocks */ +#define AM3_L4_CEFUSE_CLKCTRL_OFFSET 0x20 +#define AM3_L4_CEFUSE_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_CEFUSE_CLKCTRL_OFFSET) +#define AM3_CEFUSE_CLKCTRL AM3_L4_CEFUSE_CLKCTRL_INDEX(0x20) + +#endif diff --git a/include/dt-bindings/clock/am4.h b/include/dt-bindings/clock/am4.h new file mode 100644 index 000000000..d21df00b3 --- /dev/null +++ b/include/dt-bindings/clock/am4.h @@ -0,0 +1,113 @@ +/* + * Copyright 2017 Texas Instruments, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __DT_BINDINGS_CLK_AM4_H +#define __DT_BINDINGS_CLK_AM4_H + +#define AM4_CLKCTRL_OFFSET 0x20 +#define AM4_CLKCTRL_INDEX(offset) ((offset) - AM4_CLKCTRL_OFFSET) + +/* l4_wkup clocks */ +#define AM4_ADC_TSC_CLKCTRL AM4_CLKCTRL_INDEX(0x120) +#define AM4_L4_WKUP_CLKCTRL AM4_CLKCTRL_INDEX(0x220) +#define AM4_WKUP_M3_CLKCTRL AM4_CLKCTRL_INDEX(0x228) +#define AM4_COUNTER_32K_CLKCTRL AM4_CLKCTRL_INDEX(0x230) +#define AM4_TIMER1_CLKCTRL AM4_CLKCTRL_INDEX(0x328) +#define AM4_WD_TIMER2_CLKCTRL AM4_CLKCTRL_INDEX(0x338) +#define AM4_I2C1_CLKCTRL AM4_CLKCTRL_INDEX(0x340) +#define AM4_UART1_CLKCTRL AM4_CLKCTRL_INDEX(0x348) +#define AM4_SMARTREFLEX0_CLKCTRL AM4_CLKCTRL_INDEX(0x350) +#define AM4_SMARTREFLEX1_CLKCTRL AM4_CLKCTRL_INDEX(0x358) +#define AM4_CONTROL_CLKCTRL AM4_CLKCTRL_INDEX(0x360) +#define AM4_GPIO1_CLKCTRL AM4_CLKCTRL_INDEX(0x368) + +/* mpu clocks */ +#define AM4_MPU_CLKCTRL AM4_CLKCTRL_INDEX(0x20) + +/* gfx_l3 clocks */ +#define AM4_GFX_CLKCTRL AM4_CLKCTRL_INDEX(0x20) + +/* l4_rtc clocks */ +#define AM4_RTC_CLKCTRL AM4_CLKCTRL_INDEX(0x20) + +/* l4_per clocks */ +#define AM4_L3_MAIN_CLKCTRL AM4_CLKCTRL_INDEX(0x20) +#define AM4_AES_CLKCTRL AM4_CLKCTRL_INDEX(0x28) +#define AM4_DES_CLKCTRL AM4_CLKCTRL_INDEX(0x30) +#define AM4_L3_INSTR_CLKCTRL AM4_CLKCTRL_INDEX(0x40) +#define AM4_OCMCRAM_CLKCTRL AM4_CLKCTRL_INDEX(0x50) +#define AM4_SHAM_CLKCTRL AM4_CLKCTRL_INDEX(0x58) +#define AM4_VPFE0_CLKCTRL AM4_CLKCTRL_INDEX(0x68) +#define AM4_VPFE1_CLKCTRL AM4_CLKCTRL_INDEX(0x70) +#define AM4_TPCC_CLKCTRL AM4_CLKCTRL_INDEX(0x78) +#define AM4_TPTC0_CLKCTRL AM4_CLKCTRL_INDEX(0x80) +#define AM4_TPTC1_CLKCTRL AM4_CLKCTRL_INDEX(0x88) +#define AM4_TPTC2_CLKCTRL AM4_CLKCTRL_INDEX(0x90) +#define AM4_L4_HS_CLKCTRL AM4_CLKCTRL_INDEX(0xa0) +#define AM4_GPMC_CLKCTRL AM4_CLKCTRL_INDEX(0x220) +#define AM4_MCASP0_CLKCTRL AM4_CLKCTRL_INDEX(0x238) +#define AM4_MCASP1_CLKCTRL AM4_CLKCTRL_INDEX(0x240) +#define AM4_MMC3_CLKCTRL AM4_CLKCTRL_INDEX(0x248) +#define AM4_QSPI_CLKCTRL AM4_CLKCTRL_INDEX(0x258) +#define AM4_USB_OTG_SS0_CLKCTRL AM4_CLKCTRL_INDEX(0x260) +#define AM4_USB_OTG_SS1_CLKCTRL AM4_CLKCTRL_INDEX(0x268) +#define AM4_PRUSS_CLKCTRL AM4_CLKCTRL_INDEX(0x320) +#define AM4_L4_LS_CLKCTRL AM4_CLKCTRL_INDEX(0x420) +#define AM4_D_CAN0_CLKCTRL AM4_CLKCTRL_INDEX(0x428) +#define AM4_D_CAN1_CLKCTRL AM4_CLKCTRL_INDEX(0x430) +#define AM4_EPWMSS0_CLKCTRL AM4_CLKCTRL_INDEX(0x438) +#define AM4_EPWMSS1_CLKCTRL AM4_CLKCTRL_INDEX(0x440) +#define AM4_EPWMSS2_CLKCTRL AM4_CLKCTRL_INDEX(0x448) +#define AM4_EPWMSS3_CLKCTRL AM4_CLKCTRL_INDEX(0x450) +#define AM4_EPWMSS4_CLKCTRL AM4_CLKCTRL_INDEX(0x458) +#define AM4_EPWMSS5_CLKCTRL AM4_CLKCTRL_INDEX(0x460) +#define AM4_ELM_CLKCTRL AM4_CLKCTRL_INDEX(0x468) +#define AM4_GPIO2_CLKCTRL AM4_CLKCTRL_INDEX(0x478) +#define AM4_GPIO3_CLKCTRL AM4_CLKCTRL_INDEX(0x480) +#define AM4_GPIO4_CLKCTRL AM4_CLKCTRL_INDEX(0x488) +#define AM4_GPIO5_CLKCTRL AM4_CLKCTRL_INDEX(0x490) +#define AM4_GPIO6_CLKCTRL AM4_CLKCTRL_INDEX(0x498) +#define AM4_HDQ1W_CLKCTRL AM4_CLKCTRL_INDEX(0x4a0) +#define AM4_I2C2_CLKCTRL AM4_CLKCTRL_INDEX(0x4a8) +#define AM4_I2C3_CLKCTRL AM4_CLKCTRL_INDEX(0x4b0) +#define AM4_MAILBOX_CLKCTRL AM4_CLKCTRL_INDEX(0x4b8) +#define AM4_MMC1_CLKCTRL AM4_CLKCTRL_INDEX(0x4c0) +#define AM4_MMC2_CLKCTRL AM4_CLKCTRL_INDEX(0x4c8) +#define AM4_RNG_CLKCTRL AM4_CLKCTRL_INDEX(0x4e0) +#define AM4_SPI0_CLKCTRL AM4_CLKCTRL_INDEX(0x500) +#define AM4_SPI1_CLKCTRL AM4_CLKCTRL_INDEX(0x508) +#define AM4_SPI2_CLKCTRL AM4_CLKCTRL_INDEX(0x510) +#define AM4_SPI3_CLKCTRL AM4_CLKCTRL_INDEX(0x518) +#define AM4_SPI4_CLKCTRL AM4_CLKCTRL_INDEX(0x520) +#define AM4_SPINLOCK_CLKCTRL AM4_CLKCTRL_INDEX(0x528) +#define AM4_TIMER2_CLKCTRL AM4_CLKCTRL_INDEX(0x530) +#define AM4_TIMER3_CLKCTRL AM4_CLKCTRL_INDEX(0x538) +#define AM4_TIMER4_CLKCTRL AM4_CLKCTRL_INDEX(0x540) +#define AM4_TIMER5_CLKCTRL AM4_CLKCTRL_INDEX(0x548) +#define AM4_TIMER6_CLKCTRL AM4_CLKCTRL_INDEX(0x550) +#define AM4_TIMER7_CLKCTRL AM4_CLKCTRL_INDEX(0x558) +#define AM4_TIMER8_CLKCTRL AM4_CLKCTRL_INDEX(0x560) +#define AM4_TIMER9_CLKCTRL AM4_CLKCTRL_INDEX(0x568) +#define AM4_TIMER10_CLKCTRL AM4_CLKCTRL_INDEX(0x570) +#define AM4_TIMER11_CLKCTRL AM4_CLKCTRL_INDEX(0x578) +#define AM4_UART2_CLKCTRL AM4_CLKCTRL_INDEX(0x580) +#define AM4_UART3_CLKCTRL AM4_CLKCTRL_INDEX(0x588) +#define AM4_UART4_CLKCTRL AM4_CLKCTRL_INDEX(0x590) +#define AM4_UART5_CLKCTRL AM4_CLKCTRL_INDEX(0x598) +#define AM4_UART6_CLKCTRL AM4_CLKCTRL_INDEX(0x5a0) +#define AM4_OCP2SCP0_CLKCTRL AM4_CLKCTRL_INDEX(0x5b8) +#define AM4_OCP2SCP1_CLKCTRL AM4_CLKCTRL_INDEX(0x5c0) +#define AM4_EMIF_CLKCTRL AM4_CLKCTRL_INDEX(0x720) +#define AM4_DSS_CORE_CLKCTRL AM4_CLKCTRL_INDEX(0xa20) +#define AM4_CPGMAC0_CLKCTRL AM4_CLKCTRL_INDEX(0xb20) + +#endif diff --git a/include/dt-bindings/clock/aspeed-clock.h b/include/dt-bindings/clock/aspeed-clock.h new file mode 100644 index 000000000..f43738607 --- /dev/null +++ b/include/dt-bindings/clock/aspeed-clock.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */ + +#ifndef DT_BINDINGS_ASPEED_CLOCK_H +#define DT_BINDINGS_ASPEED_CLOCK_H + +#define ASPEED_CLK_GATE_ECLK 0 +#define ASPEED_CLK_GATE_GCLK 1 +#define ASPEED_CLK_GATE_MCLK 2 +#define ASPEED_CLK_GATE_VCLK 3 +#define ASPEED_CLK_GATE_BCLK 4 +#define ASPEED_CLK_GATE_DCLK 5 +#define ASPEED_CLK_GATE_REFCLK 6 +#define ASPEED_CLK_GATE_USBPORT2CLK 7 +#define ASPEED_CLK_GATE_LCLK 8 +#define ASPEED_CLK_GATE_USBUHCICLK 9 +#define ASPEED_CLK_GATE_D1CLK 10 +#define ASPEED_CLK_GATE_YCLK 11 +#define ASPEED_CLK_GATE_USBPORT1CLK 12 +#define ASPEED_CLK_GATE_UART1CLK 13 +#define ASPEED_CLK_GATE_UART2CLK 14 +#define ASPEED_CLK_GATE_UART5CLK 15 +#define ASPEED_CLK_GATE_ESPICLK 16 +#define ASPEED_CLK_GATE_MAC1CLK 17 +#define ASPEED_CLK_GATE_MAC2CLK 18 +#define ASPEED_CLK_GATE_RSACLK 19 +#define ASPEED_CLK_GATE_UART3CLK 20 +#define ASPEED_CLK_GATE_UART4CLK 21 +#define ASPEED_CLK_GATE_SDCLK 22 +#define ASPEED_CLK_GATE_LHCCLK 23 +#define ASPEED_CLK_HPLL 24 +#define ASPEED_CLK_AHB 25 +#define ASPEED_CLK_APB 26 +#define ASPEED_CLK_UART 27 +#define ASPEED_CLK_SDIO 28 +#define ASPEED_CLK_ECLK 29 +#define ASPEED_CLK_ECLK_MUX 30 +#define ASPEED_CLK_LHCLK 31 +#define ASPEED_CLK_MAC 32 +#define ASPEED_CLK_BCLK 33 +#define ASPEED_CLK_MPLL 34 +#define ASPEED_CLK_24M 35 + +#define ASPEED_RESET_XDMA 0 +#define ASPEED_RESET_MCTP 1 +#define ASPEED_RESET_ADC 2 +#define ASPEED_RESET_JTAG_MASTER 3 +#define ASPEED_RESET_MIC 4 +#define ASPEED_RESET_PWM 5 +#define ASPEED_RESET_PECI 6 +#define ASPEED_RESET_I2C 7 +#define ASPEED_RESET_AHB 8 +#define ASPEED_RESET_CRT1 9 + +#endif diff --git a/include/dt-bindings/clock/at91.h b/include/dt-bindings/clock/at91.h new file mode 100644 index 000000000..ab3ee241d --- /dev/null +++ b/include/dt-bindings/clock/at91.h @@ -0,0 +1,23 @@ +/* + * This header provides constants for AT91 pmc status. + * + * The constants defined in this header are being used in dts. + * + * Licensed under GPLv2 or later. + */ + +#ifndef _DT_BINDINGS_CLK_AT91_H +#define _DT_BINDINGS_CLK_AT91_H + +#define AT91_PMC_MOSCS 0 /* MOSCS Flag */ +#define AT91_PMC_LOCKA 1 /* PLLA Lock */ +#define AT91_PMC_LOCKB 2 /* PLLB Lock */ +#define AT91_PMC_MCKRDY 3 /* Master Clock */ +#define AT91_PMC_LOCKU 6 /* UPLL Lock */ +#define AT91_PMC_PCKRDY(id) (8 + (id)) /* Programmable Clock */ +#define AT91_PMC_MOSCSELS 16 /* Main Oscillator Selection */ +#define AT91_PMC_MOSCRCS 17 /* Main On-Chip RC */ +#define AT91_PMC_CFDEV 18 /* Clock Failure Detector Event */ +#define AT91_PMC_GCKRDY 24 /* Generated Clocks */ + +#endif diff --git a/include/dt-bindings/clock/ath79-clk.h b/include/dt-bindings/clock/ath79-clk.h new file mode 100644 index 000000000..27359ad83 --- /dev/null +++ b/include/dt-bindings/clock/ath79-clk.h @@ -0,0 +1,19 @@ +/* + * Copyright (C) 2014, 2016 Antony Pavlov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __DT_BINDINGS_ATH79_CLK_H +#define __DT_BINDINGS_ATH79_CLK_H + +#define ATH79_CLK_CPU 0 +#define ATH79_CLK_DDR 1 +#define ATH79_CLK_AHB 2 + +#define ATH79_CLK_END 3 + +#endif /* __DT_BINDINGS_ATH79_CLK_H */ diff --git a/include/dt-bindings/clock/axg-aoclkc.h b/include/dt-bindings/clock/axg-aoclkc.h new file mode 100644 index 000000000..61955016a --- /dev/null +++ b/include/dt-bindings/clock/axg-aoclkc.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* + * Copyright (c) 2016 BayLibre, SAS + * Author: Neil Armstrong + * + * Copyright (c) 2018 Amlogic, inc. + * Author: Qiufang Dai + */ + +#ifndef DT_BINDINGS_CLOCK_AMLOGIC_MESON_AXG_AOCLK +#define DT_BINDINGS_CLOCK_AMLOGIC_MESON_AXG_AOCLK + +#define CLKID_AO_REMOTE 0 +#define CLKID_AO_I2C_MASTER 1 +#define CLKID_AO_I2C_SLAVE 2 +#define CLKID_AO_UART1 3 +#define CLKID_AO_UART2 4 +#define CLKID_AO_IR_BLASTER 5 +#define CLKID_AO_SAR_ADC 6 +#define CLKID_AO_CLK81 7 +#define CLKID_AO_SAR_ADC_SEL 8 +#define CLKID_AO_SAR_ADC_DIV 9 +#define CLKID_AO_SAR_ADC_CLK 10 +#define CLKID_AO_ALT_XTAL 11 + +#endif diff --git a/include/dt-bindings/clock/axg-audio-clkc.h b/include/dt-bindings/clock/axg-audio-clkc.h new file mode 100644 index 000000000..fd9c36209 --- /dev/null +++ b/include/dt-bindings/clock/axg-audio-clkc.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Copyright (c) 2018 Baylibre SAS. + * Author: Jerome Brunet + */ + +#ifndef __AXG_AUDIO_CLKC_BINDINGS_H +#define __AXG_AUDIO_CLKC_BINDINGS_H + +#define AUD_CLKID_SLV_SCLK0 9 +#define AUD_CLKID_SLV_SCLK1 10 +#define AUD_CLKID_SLV_SCLK2 11 +#define AUD_CLKID_SLV_SCLK3 12 +#define AUD_CLKID_SLV_SCLK4 13 +#define AUD_CLKID_SLV_SCLK5 14 +#define AUD_CLKID_SLV_SCLK6 15 +#define AUD_CLKID_SLV_SCLK7 16 +#define AUD_CLKID_SLV_SCLK8 17 +#define AUD_CLKID_SLV_SCLK9 18 +#define AUD_CLKID_SLV_LRCLK0 19 +#define AUD_CLKID_SLV_LRCLK1 20 +#define AUD_CLKID_SLV_LRCLK2 21 +#define AUD_CLKID_SLV_LRCLK3 22 +#define AUD_CLKID_SLV_LRCLK4 23 +#define AUD_CLKID_SLV_LRCLK5 24 +#define AUD_CLKID_SLV_LRCLK6 25 +#define AUD_CLKID_SLV_LRCLK7 26 +#define AUD_CLKID_SLV_LRCLK8 27 +#define AUD_CLKID_SLV_LRCLK9 28 +#define AUD_CLKID_DDR_ARB 29 +#define AUD_CLKID_PDM 30 +#define AUD_CLKID_TDMIN_A 31 +#define AUD_CLKID_TDMIN_B 32 +#define AUD_CLKID_TDMIN_C 33 +#define AUD_CLKID_TDMIN_LB 34 +#define AUD_CLKID_TDMOUT_A 35 +#define AUD_CLKID_TDMOUT_B 36 +#define AUD_CLKID_TDMOUT_C 37 +#define AUD_CLKID_FRDDR_A 38 +#define AUD_CLKID_FRDDR_B 39 +#define AUD_CLKID_FRDDR_C 40 +#define AUD_CLKID_TODDR_A 41 +#define AUD_CLKID_TODDR_B 42 +#define AUD_CLKID_TODDR_C 43 +#define AUD_CLKID_LOOPBACK 44 +#define AUD_CLKID_SPDIFIN 45 +#define AUD_CLKID_SPDIFOUT 46 +#define AUD_CLKID_RESAMPLE 47 +#define AUD_CLKID_POWER_DETECT 48 +#define AUD_CLKID_MST_A_MCLK 49 +#define AUD_CLKID_MST_B_MCLK 50 +#define AUD_CLKID_MST_C_MCLK 51 +#define AUD_CLKID_MST_D_MCLK 52 +#define AUD_CLKID_MST_E_MCLK 53 +#define AUD_CLKID_MST_F_MCLK 54 +#define AUD_CLKID_SPDIFOUT_CLK 55 +#define AUD_CLKID_SPDIFIN_CLK 56 +#define AUD_CLKID_PDM_DCLK 57 +#define AUD_CLKID_PDM_SYSCLK 58 +#define AUD_CLKID_MST_A_SCLK 79 +#define AUD_CLKID_MST_B_SCLK 80 +#define AUD_CLKID_MST_C_SCLK 81 +#define AUD_CLKID_MST_D_SCLK 82 +#define AUD_CLKID_MST_E_SCLK 83 +#define AUD_CLKID_MST_F_SCLK 84 +#define AUD_CLKID_MST_A_LRCLK 86 +#define AUD_CLKID_MST_B_LRCLK 87 +#define AUD_CLKID_MST_C_LRCLK 88 +#define AUD_CLKID_MST_D_LRCLK 89 +#define AUD_CLKID_MST_E_LRCLK 90 +#define AUD_CLKID_MST_F_LRCLK 91 +#define AUD_CLKID_TDMIN_A_SCLK_SEL 116 +#define AUD_CLKID_TDMIN_B_SCLK_SEL 117 +#define AUD_CLKID_TDMIN_C_SCLK_SEL 118 +#define AUD_CLKID_TDMIN_LB_SCLK_SEL 119 +#define AUD_CLKID_TDMOUT_A_SCLK_SEL 120 +#define AUD_CLKID_TDMOUT_B_SCLK_SEL 121 +#define AUD_CLKID_TDMOUT_C_SCLK_SEL 122 +#define AUD_CLKID_TDMIN_A_SCLK 123 +#define AUD_CLKID_TDMIN_B_SCLK 124 +#define AUD_CLKID_TDMIN_C_SCLK 125 +#define AUD_CLKID_TDMIN_LB_SCLK 126 +#define AUD_CLKID_TDMOUT_A_SCLK 127 +#define AUD_CLKID_TDMOUT_B_SCLK 128 +#define AUD_CLKID_TDMOUT_C_SCLK 129 +#define AUD_CLKID_TDMIN_A_LRCLK 130 +#define AUD_CLKID_TDMIN_B_LRCLK 131 +#define AUD_CLKID_TDMIN_C_LRCLK 132 +#define AUD_CLKID_TDMIN_LB_LRCLK 133 +#define AUD_CLKID_TDMOUT_A_LRCLK 134 +#define AUD_CLKID_TDMOUT_B_LRCLK 135 +#define AUD_CLKID_TDMOUT_C_LRCLK 136 + +#endif /* __AXG_AUDIO_CLKC_BINDINGS_H */ diff --git a/include/dt-bindings/clock/axg-clkc.h b/include/dt-bindings/clock/axg-clkc.h new file mode 100644 index 000000000..fd1f938c3 --- /dev/null +++ b/include/dt-bindings/clock/axg-clkc.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */ +/* + * Meson-AXG clock tree IDs + * + * Copyright (c) 2017 Amlogic, Inc. All rights reserved. + */ + +#ifndef __AXG_CLKC_H +#define __AXG_CLKC_H + +#define CLKID_SYS_PLL 0 +#define CLKID_FIXED_PLL 1 +#define CLKID_FCLK_DIV2 2 +#define CLKID_FCLK_DIV3 3 +#define CLKID_FCLK_DIV4 4 +#define CLKID_FCLK_DIV5 5 +#define CLKID_FCLK_DIV7 6 +#define CLKID_GP0_PLL 7 +#define CLKID_CLK81 10 +#define CLKID_MPLL0 11 +#define CLKID_MPLL1 12 +#define CLKID_MPLL2 13 +#define CLKID_MPLL3 14 +#define CLKID_DDR 15 +#define CLKID_AUDIO_LOCKER 16 +#define CLKID_MIPI_DSI_HOST 17 +#define CLKID_ISA 18 +#define CLKID_PL301 19 +#define CLKID_PERIPHS 20 +#define CLKID_SPICC0 21 +#define CLKID_I2C 22 +#define CLKID_RNG0 23 +#define CLKID_UART0 24 +#define CLKID_MIPI_DSI_PHY 25 +#define CLKID_SPICC1 26 +#define CLKID_PCIE_A 27 +#define CLKID_PCIE_B 28 +#define CLKID_HIU_IFACE 29 +#define CLKID_ASSIST_MISC 30 +#define CLKID_SD_EMMC_B 31 +#define CLKID_SD_EMMC_C 32 +#define CLKID_DMA 33 +#define CLKID_SPI 34 +#define CLKID_AUDIO 35 +#define CLKID_ETH 36 +#define CLKID_UART1 37 +#define CLKID_G2D 38 +#define CLKID_USB0 39 +#define CLKID_USB1 40 +#define CLKID_RESET 41 +#define CLKID_USB 42 +#define CLKID_AHB_ARB0 43 +#define CLKID_EFUSE 44 +#define CLKID_BOOT_ROM 45 +#define CLKID_AHB_DATA_BUS 46 +#define CLKID_AHB_CTRL_BUS 47 +#define CLKID_USB1_DDR_BRIDGE 48 +#define CLKID_USB0_DDR_BRIDGE 49 +#define CLKID_MMC_PCLK 50 +#define CLKID_VPU_INTR 51 +#define CLKID_SEC_AHB_AHB3_BRIDGE 52 +#define CLKID_GIC 53 +#define CLKID_AO_MEDIA_CPU 54 +#define CLKID_AO_AHB_SRAM 55 +#define CLKID_AO_AHB_BUS 56 +#define CLKID_AO_IFACE 57 +#define CLKID_AO_I2C 58 +#define CLKID_SD_EMMC_B_CLK0 59 +#define CLKID_SD_EMMC_C_CLK0 60 +#define CLKID_HIFI_PLL 69 +#define CLKID_PCIE_CML_EN0 79 +#define CLKID_PCIE_CML_EN1 80 +#define CLKID_MIPI_ENABLE 81 +#define CLKID_GEN_CLK 84 + +#endif /* __AXG_CLKC_H */ diff --git a/include/dt-bindings/clock/axis,artpec6-clkctrl.h b/include/dt-bindings/clock/axis,artpec6-clkctrl.h new file mode 100644 index 000000000..f9f04dccc --- /dev/null +++ b/include/dt-bindings/clock/axis,artpec6-clkctrl.h @@ -0,0 +1,38 @@ +/* + * ARTPEC-6 clock controller indexes + * + * Copyright 2016 Axis Comunications AB. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef DT_BINDINGS_CLK_ARTPEC6_CLKCTRL_H +#define DT_BINDINGS_CLK_ARTPEC6_CLKCTRL_H + +#define ARTPEC6_CLK_CPU 0 +#define ARTPEC6_CLK_CPU_PERIPH 1 +#define ARTPEC6_CLK_NAND_CLKA 2 +#define ARTPEC6_CLK_NAND_CLKB 3 +#define ARTPEC6_CLK_ETH_ACLK 4 +#define ARTPEC6_CLK_DMA_ACLK 5 +#define ARTPEC6_CLK_PTP_REF 6 +#define ARTPEC6_CLK_SD_PCLK 7 +#define ARTPEC6_CLK_SD_IMCLK 8 +#define ARTPEC6_CLK_I2S_HST 9 +#define ARTPEC6_CLK_I2S0_CLK 10 +#define ARTPEC6_CLK_I2S1_CLK 11 +#define ARTPEC6_CLK_UART_PCLK 12 +#define ARTPEC6_CLK_UART_REFCLK 13 +#define ARTPEC6_CLK_I2C 14 +#define ARTPEC6_CLK_SPI_PCLK 15 +#define ARTPEC6_CLK_SPI_SSPCLK 16 +#define ARTPEC6_CLK_SYS_TIMER 17 +#define ARTPEC6_CLK_FRACDIV_IN 18 +#define ARTPEC6_CLK_DBG_PCLK 19 + +/* This must be the highest clock index plus one. */ +#define ARTPEC6_CLK_NUMCLOCKS 20 + +#endif diff --git a/include/dt-bindings/clock/bcm-cygnus.h b/include/dt-bindings/clock/bcm-cygnus.h new file mode 100644 index 000000000..62ac5d782 --- /dev/null +++ b/include/dt-bindings/clock/bcm-cygnus.h @@ -0,0 +1,74 @@ +/* + * BSD LICENSE + * + * Copyright(c) 2014 Broadcom Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _CLOCK_BCM_CYGNUS_H +#define _CLOCK_BCM_CYGNUS_H + +/* GENPLL clock ID */ +#define BCM_CYGNUS_GENPLL 0 +#define BCM_CYGNUS_GENPLL_AXI21_CLK 1 +#define BCM_CYGNUS_GENPLL_250MHZ_CLK 2 +#define BCM_CYGNUS_GENPLL_IHOST_SYS_CLK 3 +#define BCM_CYGNUS_GENPLL_ENET_SW_CLK 4 +#define BCM_CYGNUS_GENPLL_AUDIO_125_CLK 5 +#define BCM_CYGNUS_GENPLL_CAN_CLK 6 + +/* LCPLL0 clock ID */ +#define BCM_CYGNUS_LCPLL0 0 +#define BCM_CYGNUS_LCPLL0_PCIE_PHY_REF_CLK 1 +#define BCM_CYGNUS_LCPLL0_DDR_PHY_CLK 2 +#define BCM_CYGNUS_LCPLL0_SDIO_CLK 3 +#define BCM_CYGNUS_LCPLL0_USB_PHY_REF_CLK 4 +#define BCM_CYGNUS_LCPLL0_SMART_CARD_CLK 5 +#define BCM_CYGNUS_LCPLL0_CH5_UNUSED 6 + +/* MIPI PLL clock ID */ +#define BCM_CYGNUS_MIPIPLL 0 +#define BCM_CYGNUS_MIPIPLL_CH0_UNUSED 1 +#define BCM_CYGNUS_MIPIPLL_CH1_LCD 2 +#define BCM_CYGNUS_MIPIPLL_CH2_V3D 3 +#define BCM_CYGNUS_MIPIPLL_CH3_UNUSED 4 +#define BCM_CYGNUS_MIPIPLL_CH4_UNUSED 5 +#define BCM_CYGNUS_MIPIPLL_CH5_UNUSED 6 + +/* ASIU clock ID */ +#define BCM_CYGNUS_ASIU_KEYPAD_CLK 0 +#define BCM_CYGNUS_ASIU_ADC_CLK 1 +#define BCM_CYGNUS_ASIU_PWM_CLK 2 + +/* AUDIO clock ID */ +#define BCM_CYGNUS_AUDIOPLL 0 +#define BCM_CYGNUS_AUDIOPLL_CH0 1 +#define BCM_CYGNUS_AUDIOPLL_CH1 2 +#define BCM_CYGNUS_AUDIOPLL_CH2 3 + +#endif /* _CLOCK_BCM_CYGNUS_H */ diff --git a/include/dt-bindings/clock/bcm-ns2.h b/include/dt-bindings/clock/bcm-ns2.h new file mode 100644 index 000000000..d99c7a2e7 --- /dev/null +++ b/include/dt-bindings/clock/bcm-ns2.h @@ -0,0 +1,72 @@ +/* + * BSD LICENSE + * + * Copyright(c) 2015 Broadcom Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _CLOCK_BCM_NS2_H +#define _CLOCK_BCM_NS2_H + +/* GENPLL SCR clock channel ID */ +#define BCM_NS2_GENPLL_SCR 0 +#define BCM_NS2_GENPLL_SCR_SCR_CLK 1 +#define BCM_NS2_GENPLL_SCR_FS_CLK 2 +#define BCM_NS2_GENPLL_SCR_AUDIO_CLK 3 +#define BCM_NS2_GENPLL_SCR_CH3_UNUSED 4 +#define BCM_NS2_GENPLL_SCR_CH4_UNUSED 5 +#define BCM_NS2_GENPLL_SCR_CH5_UNUSED 6 + +/* GENPLL SW clock channel ID */ +#define BCM_NS2_GENPLL_SW 0 +#define BCM_NS2_GENPLL_SW_RPE_CLK 1 +#define BCM_NS2_GENPLL_SW_250_CLK 2 +#define BCM_NS2_GENPLL_SW_NIC_CLK 3 +#define BCM_NS2_GENPLL_SW_CHIMP_CLK 4 +#define BCM_NS2_GENPLL_SW_PORT_CLK 5 +#define BCM_NS2_GENPLL_SW_SDIO_CLK 6 + +/* LCPLL DDR clock channel ID */ +#define BCM_NS2_LCPLL_DDR 0 +#define BCM_NS2_LCPLL_DDR_PCIE_SATA_USB_CLK 1 +#define BCM_NS2_LCPLL_DDR_DDR_CLK 2 +#define BCM_NS2_LCPLL_DDR_CH2_UNUSED 3 +#define BCM_NS2_LCPLL_DDR_CH3_UNUSED 4 +#define BCM_NS2_LCPLL_DDR_CH4_UNUSED 5 +#define BCM_NS2_LCPLL_DDR_CH5_UNUSED 6 + +/* LCPLL PORTS clock channel ID */ +#define BCM_NS2_LCPLL_PORTS 0 +#define BCM_NS2_LCPLL_PORTS_WAN_CLK 1 +#define BCM_NS2_LCPLL_PORTS_RGMII_CLK 2 +#define BCM_NS2_LCPLL_PORTS_CH2_UNUSED 3 +#define BCM_NS2_LCPLL_PORTS_CH3_UNUSED 4 +#define BCM_NS2_LCPLL_PORTS_CH4_UNUSED 5 +#define BCM_NS2_LCPLL_PORTS_CH5_UNUSED 6 + +#endif /* _CLOCK_BCM_NS2_H */ diff --git a/include/dt-bindings/clock/bcm-nsp.h b/include/dt-bindings/clock/bcm-nsp.h new file mode 100644 index 000000000..ad5827cde --- /dev/null +++ b/include/dt-bindings/clock/bcm-nsp.h @@ -0,0 +1,51 @@ +/* + * BSD LICENSE + * + * Copyright(c) 2015 Broadcom Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _CLOCK_BCM_NSP_H +#define _CLOCK_BCM_NSP_H + +/* GENPLL clock channel ID */ +#define BCM_NSP_GENPLL 0 +#define BCM_NSP_GENPLL_PHY_CLK 1 +#define BCM_NSP_GENPLL_ENET_SW_CLK 2 +#define BCM_NSP_GENPLL_USB_PHY_REF_CLK 3 +#define BCM_NSP_GENPLL_IPROCFAST_CLK 4 +#define BCM_NSP_GENPLL_SATA1_CLK 5 +#define BCM_NSP_GENPLL_SATA2_CLK 6 + +/* LCPLL0 clock channel ID */ +#define BCM_NSP_LCPLL0 0 +#define BCM_NSP_LCPLL0_PCIE_PHY_REF_CLK 1 +#define BCM_NSP_LCPLL0_SDIO_CLK 2 +#define BCM_NSP_LCPLL0_DDR_PHY_CLK 3 + +#endif /* _CLOCK_BCM_NSP_H */ diff --git a/include/dt-bindings/clock/bcm-sr.h b/include/dt-bindings/clock/bcm-sr.h new file mode 100644 index 000000000..419011ba1 --- /dev/null +++ b/include/dt-bindings/clock/bcm-sr.h @@ -0,0 +1,111 @@ +/* + * BSD LICENSE + * + * Copyright(c) 2017 Broadcom. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _CLOCK_BCM_SR_H +#define _CLOCK_BCM_SR_H + +/* GENPLL 0 clock channel ID SCR HSLS FS PCIE */ +#define BCM_SR_GENPLL0 0 +#define BCM_SR_GENPLL0_125M_CLK 1 +#define BCM_SR_GENPLL0_SCR_CLK 2 +#define BCM_SR_GENPLL0_250M_CLK 3 +#define BCM_SR_GENPLL0_PCIE_AXI_CLK 4 +#define BCM_SR_GENPLL0_PAXC_AXI_X2_CLK 5 +#define BCM_SR_GENPLL0_PAXC_AXI_CLK 6 + +/* GENPLL 1 clock channel ID MHB PCIE NITRO */ +#define BCM_SR_GENPLL1 0 +#define BCM_SR_GENPLL1_PCIE_TL_CLK 1 +#define BCM_SR_GENPLL1_MHB_APB_CLK 2 + +/* GENPLL 2 clock channel ID NITRO MHB*/ +#define BCM_SR_GENPLL2 0 +#define BCM_SR_GENPLL2_NIC_CLK 1 +#define BCM_SR_GENPLL2_TS_500_CLK 2 +#define BCM_SR_GENPLL2_125_NITRO_CLK 3 +#define BCM_SR_GENPLL2_CHIMP_CLK 4 +#define BCM_SR_GENPLL2_NIC_FLASH_CLK 5 +#define BCM_SR_GENPLL2_FS4_CLK 6 + +/* GENPLL 3 HSLS clock channel ID */ +#define BCM_SR_GENPLL3 0 +#define BCM_SR_GENPLL3_HSLS_CLK 1 +#define BCM_SR_GENPLL3_SDIO_CLK 2 + +/* GENPLL 4 SCR clock channel ID */ +#define BCM_SR_GENPLL4 0 +#define BCM_SR_GENPLL4_CCN_CLK 1 +#define BCM_SR_GENPLL4_TPIU_PLL_CLK 2 +#define BCM_SR_GENPLL4_NOC_CLK 3 +#define BCM_SR_GENPLL4_CHCLK_FS4_CLK 4 +#define BCM_SR_GENPLL4_BRIDGE_FSCPU_CLK 5 + +/* GENPLL 5 FS4 clock channel ID */ +#define BCM_SR_GENPLL5 0 +#define BCM_SR_GENPLL5_FS4_HF_CLK 1 +#define BCM_SR_GENPLL5_CRYPTO_AE_CLK 2 +#define BCM_SR_GENPLL5_RAID_AE_CLK 3 + +/* GENPLL 6 NITRO clock channel ID */ +#define BCM_SR_GENPLL6 0 +#define BCM_SR_GENPLL6_48_USB_CLK 1 + +/* LCPLL0 clock channel ID */ +#define BCM_SR_LCPLL0 0 +#define BCM_SR_LCPLL0_SATA_REFP_CLK 1 +#define BCM_SR_LCPLL0_SATA_REFN_CLK 2 +#define BCM_SR_LCPLL0_SATA_350_CLK 3 +#define BCM_SR_LCPLL0_SATA_500_CLK 4 + +/* LCPLL1 clock channel ID */ +#define BCM_SR_LCPLL1 0 +#define BCM_SR_LCPLL1_WAN_CLK 1 +#define BCM_SR_LCPLL1_USB_REF_CLK 2 +#define BCM_SR_LCPLL1_CRMU_TS_CLK 3 + +/* LCPLL PCIE clock channel ID */ +#define BCM_SR_LCPLL_PCIE 0 +#define BCM_SR_LCPLL_PCIE_PHY_REF_CLK 1 + +/* GENPLL EMEM0 clock channel ID */ +#define BCM_SR_EMEMPLL0 0 +#define BCM_SR_EMEMPLL0_EMEM_CLK 1 + +/* GENPLL EMEM0 clock channel ID */ +#define BCM_SR_EMEMPLL1 0 +#define BCM_SR_EMEMPLL1_EMEM_CLK 1 + +/* GENPLL EMEM0 clock channel ID */ +#define BCM_SR_EMEMPLL2 0 +#define BCM_SR_EMEMPLL2_EMEM_CLK 1 + +#endif /* _CLOCK_BCM_SR_H */ diff --git a/include/dt-bindings/clock/bcm21664.h b/include/dt-bindings/clock/bcm21664.h new file mode 100644 index 000000000..5a7f0e475 --- /dev/null +++ b/include/dt-bindings/clock/bcm21664.h @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2013 Broadcom Corporation + * Copyright 2013 Linaro Limited + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CLOCK_BCM21664_H +#define _CLOCK_BCM21664_H + +/* + * This file defines the values used to specify clocks provided by + * the clock control units (CCUs) on Broadcom BCM21664 family SoCs. + */ + +/* bcm21664 CCU device tree "compatible" strings */ +#define BCM21664_DT_ROOT_CCU_COMPAT "brcm,bcm21664-root-ccu" +#define BCM21664_DT_AON_CCU_COMPAT "brcm,bcm21664-aon-ccu" +#define BCM21664_DT_MASTER_CCU_COMPAT "brcm,bcm21664-master-ccu" +#define BCM21664_DT_SLAVE_CCU_COMPAT "brcm,bcm21664-slave-ccu" + +/* root CCU clock ids */ + +#define BCM21664_ROOT_CCU_FRAC_1M 0 +#define BCM21664_ROOT_CCU_CLOCK_COUNT 1 + +/* aon CCU clock ids */ + +#define BCM21664_AON_CCU_HUB_TIMER 0 +#define BCM21664_AON_CCU_CLOCK_COUNT 1 + +/* master CCU clock ids */ + +#define BCM21664_MASTER_CCU_SDIO1 0 +#define BCM21664_MASTER_CCU_SDIO2 1 +#define BCM21664_MASTER_CCU_SDIO3 2 +#define BCM21664_MASTER_CCU_SDIO4 3 +#define BCM21664_MASTER_CCU_SDIO1_SLEEP 4 +#define BCM21664_MASTER_CCU_SDIO2_SLEEP 5 +#define BCM21664_MASTER_CCU_SDIO3_SLEEP 6 +#define BCM21664_MASTER_CCU_SDIO4_SLEEP 7 +#define BCM21664_MASTER_CCU_CLOCK_COUNT 8 + +/* slave CCU clock ids */ + +#define BCM21664_SLAVE_CCU_UARTB 0 +#define BCM21664_SLAVE_CCU_UARTB2 1 +#define BCM21664_SLAVE_CCU_UARTB3 2 +#define BCM21664_SLAVE_CCU_BSC1 3 +#define BCM21664_SLAVE_CCU_BSC2 4 +#define BCM21664_SLAVE_CCU_BSC3 5 +#define BCM21664_SLAVE_CCU_BSC4 6 +#define BCM21664_SLAVE_CCU_CLOCK_COUNT 7 + +#endif /* _CLOCK_BCM21664_H */ diff --git a/include/dt-bindings/clock/bcm281xx.h b/include/dt-bindings/clock/bcm281xx.h new file mode 100644 index 000000000..a763460cf --- /dev/null +++ b/include/dt-bindings/clock/bcm281xx.h @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2013 Broadcom Corporation + * Copyright 2013 Linaro Limited + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CLOCK_BCM281XX_H +#define _CLOCK_BCM281XX_H + +/* + * This file defines the values used to specify clocks provided by + * the clock control units (CCUs) on Broadcom BCM281XX family SoCs. + */ + +/* + * These are the bcm281xx CCU device tree "compatible" strings. + * We're stuck with using "bcm11351" in the string because wild + * cards aren't allowed, and that name was the first one defined + * in this family of devices. + */ +#define BCM281XX_DT_ROOT_CCU_COMPAT "brcm,bcm11351-root-ccu" +#define BCM281XX_DT_AON_CCU_COMPAT "brcm,bcm11351-aon-ccu" +#define BCM281XX_DT_HUB_CCU_COMPAT "brcm,bcm11351-hub-ccu" +#define BCM281XX_DT_MASTER_CCU_COMPAT "brcm,bcm11351-master-ccu" +#define BCM281XX_DT_SLAVE_CCU_COMPAT "brcm,bcm11351-slave-ccu" + +/* root CCU clock ids */ + +#define BCM281XX_ROOT_CCU_FRAC_1M 0 +#define BCM281XX_ROOT_CCU_CLOCK_COUNT 1 + +/* aon CCU clock ids */ + +#define BCM281XX_AON_CCU_HUB_TIMER 0 +#define BCM281XX_AON_CCU_PMU_BSC 1 +#define BCM281XX_AON_CCU_PMU_BSC_VAR 2 +#define BCM281XX_AON_CCU_CLOCK_COUNT 3 + +/* hub CCU clock ids */ + +#define BCM281XX_HUB_CCU_TMON_1M 0 +#define BCM281XX_HUB_CCU_CLOCK_COUNT 1 + +/* master CCU clock ids */ + +#define BCM281XX_MASTER_CCU_SDIO1 0 +#define BCM281XX_MASTER_CCU_SDIO2 1 +#define BCM281XX_MASTER_CCU_SDIO3 2 +#define BCM281XX_MASTER_CCU_SDIO4 3 +#define BCM281XX_MASTER_CCU_USB_IC 4 +#define BCM281XX_MASTER_CCU_HSIC2_48M 5 +#define BCM281XX_MASTER_CCU_HSIC2_12M 6 +#define BCM281XX_MASTER_CCU_CLOCK_COUNT 7 + +/* slave CCU clock ids */ + +#define BCM281XX_SLAVE_CCU_UARTB 0 +#define BCM281XX_SLAVE_CCU_UARTB2 1 +#define BCM281XX_SLAVE_CCU_UARTB3 2 +#define BCM281XX_SLAVE_CCU_UARTB4 3 +#define BCM281XX_SLAVE_CCU_SSP0 4 +#define BCM281XX_SLAVE_CCU_SSP2 5 +#define BCM281XX_SLAVE_CCU_BSC1 6 +#define BCM281XX_SLAVE_CCU_BSC2 7 +#define BCM281XX_SLAVE_CCU_BSC3 8 +#define BCM281XX_SLAVE_CCU_PWM 9 +#define BCM281XX_SLAVE_CCU_CLOCK_COUNT 10 + +#endif /* _CLOCK_BCM281XX_H */ diff --git a/include/dt-bindings/clock/bcm2835-aux.h b/include/dt-bindings/clock/bcm2835-aux.h new file mode 100644 index 000000000..d91156e26 --- /dev/null +++ b/include/dt-bindings/clock/bcm2835-aux.h @@ -0,0 +1,17 @@ +/* + * Copyright (C) 2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define BCM2835_AUX_CLOCK_UART 0 +#define BCM2835_AUX_CLOCK_SPI1 1 +#define BCM2835_AUX_CLOCK_SPI2 2 +#define BCM2835_AUX_CLOCK_COUNT 3 diff --git a/include/dt-bindings/clock/bcm2835.h b/include/dt-bindings/clock/bcm2835.h new file mode 100644 index 000000000..a0c812b0f --- /dev/null +++ b/include/dt-bindings/clock/bcm2835.h @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define BCM2835_PLLA 0 +#define BCM2835_PLLB 1 +#define BCM2835_PLLC 2 +#define BCM2835_PLLD 3 +#define BCM2835_PLLH 4 + +#define BCM2835_PLLA_CORE 5 +#define BCM2835_PLLA_PER 6 +#define BCM2835_PLLB_ARM 7 +#define BCM2835_PLLC_CORE0 8 +#define BCM2835_PLLC_CORE1 9 +#define BCM2835_PLLC_CORE2 10 +#define BCM2835_PLLC_PER 11 +#define BCM2835_PLLD_CORE 12 +#define BCM2835_PLLD_PER 13 +#define BCM2835_PLLH_RCAL 14 +#define BCM2835_PLLH_AUX 15 +#define BCM2835_PLLH_PIX 16 + +#define BCM2835_CLOCK_TIMER 17 +#define BCM2835_CLOCK_OTP 18 +#define BCM2835_CLOCK_UART 19 +#define BCM2835_CLOCK_VPU 20 +#define BCM2835_CLOCK_V3D 21 +#define BCM2835_CLOCK_ISP 22 +#define BCM2835_CLOCK_H264 23 +#define BCM2835_CLOCK_VEC 24 +#define BCM2835_CLOCK_HSM 25 +#define BCM2835_CLOCK_SDRAM 26 +#define BCM2835_CLOCK_TSENS 27 +#define BCM2835_CLOCK_EMMC 28 +#define BCM2835_CLOCK_PERI_IMAGE 29 +#define BCM2835_CLOCK_PWM 30 +#define BCM2835_CLOCK_PCM 31 + +#define BCM2835_PLLA_DSI0 32 +#define BCM2835_PLLA_CCP2 33 +#define BCM2835_PLLD_DSI0 34 +#define BCM2835_PLLD_DSI1 35 + +#define BCM2835_CLOCK_AVEO 36 +#define BCM2835_CLOCK_DFT 37 +#define BCM2835_CLOCK_GP0 38 +#define BCM2835_CLOCK_GP1 39 +#define BCM2835_CLOCK_GP2 40 +#define BCM2835_CLOCK_SLIM 41 +#define BCM2835_CLOCK_SMI 42 +#define BCM2835_CLOCK_TEC 43 +#define BCM2835_CLOCK_DPI 44 +#define BCM2835_CLOCK_CAM0 45 +#define BCM2835_CLOCK_CAM1 46 +#define BCM2835_CLOCK_DSI0E 47 +#define BCM2835_CLOCK_DSI1E 48 +#define BCM2835_CLOCK_DSI0P 49 +#define BCM2835_CLOCK_DSI1P 50 diff --git a/include/dt-bindings/clock/berlin2.h b/include/dt-bindings/clock/berlin2.h new file mode 100644 index 000000000..b07b8efab --- /dev/null +++ b/include/dt-bindings/clock/berlin2.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Berlin2 BG2/BG2CD clock tree IDs + */ + +#define CLKID_SYS 0 +#define CLKID_CPU 1 +#define CLKID_DRMFIGO 2 +#define CLKID_CFG 3 +#define CLKID_GFX 4 +#define CLKID_ZSP 5 +#define CLKID_PERIF 6 +#define CLKID_PCUBE 7 +#define CLKID_VSCOPE 8 +#define CLKID_NFC_ECC 9 +#define CLKID_VPP 10 +#define CLKID_APP 11 +#define CLKID_AUDIO0 12 +#define CLKID_AUDIO2 13 +#define CLKID_AUDIO3 14 +#define CLKID_AUDIO1 15 +#define CLKID_GFX3D_CORE 16 +#define CLKID_GFX3D_SYS 17 +#define CLKID_ARC 18 +#define CLKID_VIP 19 +#define CLKID_SDIO0XIN 20 +#define CLKID_SDIO1XIN 21 +#define CLKID_GFX3D_EXTRA 22 +#define CLKID_GC360 23 +#define CLKID_SDIO_DLLMST 24 +#define CLKID_GETH0 25 +#define CLKID_GETH1 26 +#define CLKID_SATA 27 +#define CLKID_AHBAPB 28 +#define CLKID_USB0 29 +#define CLKID_USB1 30 +#define CLKID_PBRIDGE 31 +#define CLKID_SDIO0 32 +#define CLKID_SDIO1 33 +#define CLKID_NFC 34 +#define CLKID_SMEMC 35 +#define CLKID_AUDIOHD 36 +#define CLKID_VIDEO0 37 +#define CLKID_VIDEO1 38 +#define CLKID_VIDEO2 39 +#define CLKID_TWD 40 diff --git a/include/dt-bindings/clock/berlin2q.h b/include/dt-bindings/clock/berlin2q.h new file mode 100644 index 000000000..44b4ac382 --- /dev/null +++ b/include/dt-bindings/clock/berlin2q.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Berlin2 BG2Q clock tree IDs + */ + +#define CLKID_SYS 0 +#define CLKID_DRMFIGO 1 +#define CLKID_CFG 2 +#define CLKID_GFX2D 3 +#define CLKID_ZSP 4 +#define CLKID_PERIF 5 +#define CLKID_PCUBE 6 +#define CLKID_VSCOPE 7 +#define CLKID_NFC_ECC 8 +#define CLKID_VPP 9 +#define CLKID_APP 10 +#define CLKID_SDIO0XIN 11 +#define CLKID_SDIO1XIN 12 +#define CLKID_GFX2DAXI 13 +#define CLKID_GETH0 14 +#define CLKID_SATA 15 +#define CLKID_AHBAPB 16 +#define CLKID_USB0 17 +#define CLKID_USB1 18 +#define CLKID_USB2 19 +#define CLKID_USB3 20 +#define CLKID_PBRIDGE 21 +#define CLKID_SDIO 22 +#define CLKID_NFC 23 +#define CLKID_SMEMC 24 +#define CLKID_PCIE 25 +#define CLKID_TWD 26 +#define CLKID_CPU 27 diff --git a/include/dt-bindings/clock/boston-clock.h b/include/dt-bindings/clock/boston-clock.h new file mode 100644 index 000000000..a6f009821 --- /dev/null +++ b/include/dt-bindings/clock/boston-clock.h @@ -0,0 +1,14 @@ +/* + * Copyright (C) 2016 Imagination Technologies + * + * SPDX-License-Identifier: GPL-2.0 + */ + +#ifndef __DT_BINDINGS_CLOCK_BOSTON_CLOCK_H__ +#define __DT_BINDINGS_CLOCK_BOSTON_CLOCK_H__ + +#define BOSTON_CLK_INPUT 0 +#define BOSTON_CLK_SYS 1 +#define BOSTON_CLK_CPU 2 + +#endif /* __DT_BINDINGS_CLOCK_BOSTON_CLOCK_H__ */ diff --git a/include/dt-bindings/clock/clps711x-clock.h b/include/dt-bindings/clock/clps711x-clock.h new file mode 100644 index 000000000..0c4c80b63 --- /dev/null +++ b/include/dt-bindings/clock/clps711x-clock.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2014 Alexander Shiyan + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __DT_BINDINGS_CLOCK_CLPS711X_H +#define __DT_BINDINGS_CLOCK_CLPS711X_H + +#define CLPS711X_CLK_DUMMY 0 +#define CLPS711X_CLK_CPU 1 +#define CLPS711X_CLK_BUS 2 +#define CLPS711X_CLK_PLL 3 +#define CLPS711X_CLK_TIMERREF 4 +#define CLPS711X_CLK_TIMER1 5 +#define CLPS711X_CLK_TIMER2 6 +#define CLPS711X_CLK_PWM 7 +#define CLPS711X_CLK_SPIREF 8 +#define CLPS711X_CLK_SPI 9 +#define CLPS711X_CLK_UART 10 +#define CLPS711X_CLK_TICK 11 +#define CLPS711X_CLK_MAX 12 + +#endif diff --git a/include/dt-bindings/clock/cortina,gemini-clock.h b/include/dt-bindings/clock/cortina,gemini-clock.h new file mode 100644 index 000000000..04c3404b2 --- /dev/null +++ b/include/dt-bindings/clock/cortina,gemini-clock.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef DT_BINDINGS_CORTINA_GEMINI_CLOCK_H +#define DT_BINDINGS_CORTINA_GEMINI_CLOCK_H + +/* RTC, AHB, APB, CPU, PCI, TVC, UART clocks and 13 gates */ +#define GEMINI_NUM_CLKS 20 + +#define GEMINI_CLK_RTC 0 +#define GEMINI_CLK_AHB 1 +#define GEMINI_CLK_APB 2 +#define GEMINI_CLK_CPU 3 +#define GEMINI_CLK_PCI 4 +#define GEMINI_CLK_TVC 5 +#define GEMINI_CLK_UART 6 +#define GEMINI_CLK_GATES 7 +#define GEMINI_CLK_GATE_SECURITY 7 +#define GEMINI_CLK_GATE_GMAC0 8 +#define GEMINI_CLK_GATE_GMAC1 9 +#define GEMINI_CLK_GATE_SATA0 10 +#define GEMINI_CLK_GATE_SATA1 11 +#define GEMINI_CLK_GATE_USB0 12 +#define GEMINI_CLK_GATE_USB1 13 +#define GEMINI_CLK_GATE_IDE 14 +#define GEMINI_CLK_GATE_PCI 15 +#define GEMINI_CLK_GATE_DDR 16 +#define GEMINI_CLK_GATE_FLASH 17 +#define GEMINI_CLK_GATE_TVC 18 +#define GEMINI_CLK_GATE_BOOT 19 + +#endif /* DT_BINDINGS_CORTINA_GEMINI_CLOCK_H */ diff --git a/include/dt-bindings/clock/dm814.h b/include/dt-bindings/clock/dm814.h new file mode 100644 index 000000000..0e7099a34 --- /dev/null +++ b/include/dt-bindings/clock/dm814.h @@ -0,0 +1,45 @@ +/* + * Copyright 2017 Texas Instruments, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __DT_BINDINGS_CLK_DM814_H +#define __DT_BINDINGS_CLK_DM814_H + +#define DM814_CLKCTRL_OFFSET 0x0 +#define DM814_CLKCTRL_INDEX(offset) ((offset) - DM814_CLKCTRL_OFFSET) + +/* default clocks */ +#define DM814_USB_OTG_HS_CLKCTRL DM814_CLKCTRL_INDEX(0x58) + +/* alwon clocks */ +#define DM814_UART1_CLKCTRL DM814_CLKCTRL_INDEX(0x150) +#define DM814_UART2_CLKCTRL DM814_CLKCTRL_INDEX(0x154) +#define DM814_UART3_CLKCTRL DM814_CLKCTRL_INDEX(0x158) +#define DM814_GPIO1_CLKCTRL DM814_CLKCTRL_INDEX(0x15c) +#define DM814_GPIO2_CLKCTRL DM814_CLKCTRL_INDEX(0x160) +#define DM814_I2C1_CLKCTRL DM814_CLKCTRL_INDEX(0x164) +#define DM814_I2C2_CLKCTRL DM814_CLKCTRL_INDEX(0x168) +#define DM814_WD_TIMER_CLKCTRL DM814_CLKCTRL_INDEX(0x18c) +#define DM814_MCSPI1_CLKCTRL DM814_CLKCTRL_INDEX(0x190) +#define DM814_GPMC_CLKCTRL DM814_CLKCTRL_INDEX(0x1d0) +#define DM814_CPGMAC0_CLKCTRL DM814_CLKCTRL_INDEX(0x1d4) +#define DM814_MPU_CLKCTRL DM814_CLKCTRL_INDEX(0x1dc) +#define DM814_RTC_CLKCTRL DM814_CLKCTRL_INDEX(0x1f0) +#define DM814_TPCC_CLKCTRL DM814_CLKCTRL_INDEX(0x1f4) +#define DM814_TPTC0_CLKCTRL DM814_CLKCTRL_INDEX(0x1f8) +#define DM814_TPTC1_CLKCTRL DM814_CLKCTRL_INDEX(0x1fc) +#define DM814_TPTC2_CLKCTRL DM814_CLKCTRL_INDEX(0x200) +#define DM814_TPTC3_CLKCTRL DM814_CLKCTRL_INDEX(0x204) +#define DM814_MMC1_CLKCTRL DM814_CLKCTRL_INDEX(0x21c) +#define DM814_MMC2_CLKCTRL DM814_CLKCTRL_INDEX(0x220) +#define DM814_MMC3_CLKCTRL DM814_CLKCTRL_INDEX(0x224) + +#endif diff --git a/include/dt-bindings/clock/dm816.h b/include/dt-bindings/clock/dm816.h new file mode 100644 index 000000000..69e8a36d7 --- /dev/null +++ b/include/dt-bindings/clock/dm816.h @@ -0,0 +1,53 @@ +/* + * Copyright 2017 Texas Instruments, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __DT_BINDINGS_CLK_DM816_H +#define __DT_BINDINGS_CLK_DM816_H + +#define DM816_CLKCTRL_OFFSET 0x0 +#define DM816_CLKCTRL_INDEX(offset) ((offset) - DM816_CLKCTRL_OFFSET) + +/* default clocks */ +#define DM816_USB_OTG_HS_CLKCTRL DM816_CLKCTRL_INDEX(0x58) + +/* alwon clocks */ +#define DM816_UART1_CLKCTRL DM816_CLKCTRL_INDEX(0x150) +#define DM816_UART2_CLKCTRL DM816_CLKCTRL_INDEX(0x154) +#define DM816_UART3_CLKCTRL DM816_CLKCTRL_INDEX(0x158) +#define DM816_GPIO1_CLKCTRL DM816_CLKCTRL_INDEX(0x15c) +#define DM816_GPIO2_CLKCTRL DM816_CLKCTRL_INDEX(0x160) +#define DM816_I2C1_CLKCTRL DM816_CLKCTRL_INDEX(0x164) +#define DM816_I2C2_CLKCTRL DM816_CLKCTRL_INDEX(0x168) +#define DM816_TIMER1_CLKCTRL DM816_CLKCTRL_INDEX(0x170) +#define DM816_TIMER2_CLKCTRL DM816_CLKCTRL_INDEX(0x174) +#define DM816_TIMER3_CLKCTRL DM816_CLKCTRL_INDEX(0x178) +#define DM816_TIMER4_CLKCTRL DM816_CLKCTRL_INDEX(0x17c) +#define DM816_TIMER5_CLKCTRL DM816_CLKCTRL_INDEX(0x180) +#define DM816_TIMER6_CLKCTRL DM816_CLKCTRL_INDEX(0x184) +#define DM816_TIMER7_CLKCTRL DM816_CLKCTRL_INDEX(0x188) +#define DM816_WD_TIMER_CLKCTRL DM816_CLKCTRL_INDEX(0x18c) +#define DM816_MCSPI1_CLKCTRL DM816_CLKCTRL_INDEX(0x190) +#define DM816_MAILBOX_CLKCTRL DM816_CLKCTRL_INDEX(0x194) +#define DM816_SPINBOX_CLKCTRL DM816_CLKCTRL_INDEX(0x198) +#define DM816_MMC1_CLKCTRL DM816_CLKCTRL_INDEX(0x1b0) +#define DM816_GPMC_CLKCTRL DM816_CLKCTRL_INDEX(0x1d0) +#define DM816_DAVINCI_MDIO_CLKCTRL DM816_CLKCTRL_INDEX(0x1d4) +#define DM816_EMAC1_CLKCTRL DM816_CLKCTRL_INDEX(0x1d8) +#define DM816_MPU_CLKCTRL DM816_CLKCTRL_INDEX(0x1dc) +#define DM816_RTC_CLKCTRL DM816_CLKCTRL_INDEX(0x1f0) +#define DM816_TPCC_CLKCTRL DM816_CLKCTRL_INDEX(0x1f4) +#define DM816_TPTC0_CLKCTRL DM816_CLKCTRL_INDEX(0x1f8) +#define DM816_TPTC1_CLKCTRL DM816_CLKCTRL_INDEX(0x1fc) +#define DM816_TPTC2_CLKCTRL DM816_CLKCTRL_INDEX(0x200) +#define DM816_TPTC3_CLKCTRL DM816_CLKCTRL_INDEX(0x204) + +#endif diff --git a/include/dt-bindings/clock/dra7.h b/include/dt-bindings/clock/dra7.h new file mode 100644 index 000000000..d7549c57c --- /dev/null +++ b/include/dt-bindings/clock/dra7.h @@ -0,0 +1,173 @@ +/* + * Copyright 2017 Texas Instruments, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __DT_BINDINGS_CLK_DRA7_H +#define __DT_BINDINGS_CLK_DRA7_H + +#define DRA7_CLKCTRL_OFFSET 0x20 +#define DRA7_CLKCTRL_INDEX(offset) ((offset) - DRA7_CLKCTRL_OFFSET) + +/* mpu clocks */ +#define DRA7_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) + +/* ipu clocks */ +#define DRA7_IPU_CLKCTRL_OFFSET 0x40 +#define DRA7_IPU_CLKCTRL_INDEX(offset) ((offset) - DRA7_IPU_CLKCTRL_OFFSET) +#define DRA7_MCASP1_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x50) +#define DRA7_TIMER5_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x58) +#define DRA7_TIMER6_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x60) +#define DRA7_TIMER7_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x68) +#define DRA7_TIMER8_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x70) +#define DRA7_I2C5_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x78) +#define DRA7_UART6_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x80) + +/* rtc clocks */ +#define DRA7_RTC_CLKCTRL_OFFSET 0x40 +#define DRA7_RTC_CLKCTRL_INDEX(offset) ((offset) - DRA7_RTC_CLKCTRL_OFFSET) +#define DRA7_RTCSS_CLKCTRL DRA7_RTC_CLKCTRL_INDEX(0x44) + +/* coreaon clocks */ +#define DRA7_SMARTREFLEX_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) +#define DRA7_SMARTREFLEX_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x38) + +/* l3main1 clocks */ +#define DRA7_L3_MAIN_1_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_GPMC_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) +#define DRA7_TPCC_CLKCTRL DRA7_CLKCTRL_INDEX(0x70) +#define DRA7_TPTC0_CLKCTRL DRA7_CLKCTRL_INDEX(0x78) +#define DRA7_TPTC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x80) +#define DRA7_VCP1_CLKCTRL DRA7_CLKCTRL_INDEX(0x88) +#define DRA7_VCP2_CLKCTRL DRA7_CLKCTRL_INDEX(0x90) + +/* dma clocks */ +#define DRA7_DMA_SYSTEM_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) + +/* emif clocks */ +#define DRA7_DMM_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) + +/* atl clocks */ +#define DRA7_ATL_CLKCTRL_OFFSET 0x0 +#define DRA7_ATL_CLKCTRL_INDEX(offset) ((offset) - DRA7_ATL_CLKCTRL_OFFSET) +#define DRA7_ATL_CLKCTRL DRA7_ATL_CLKCTRL_INDEX(0x0) + +/* l4cfg clocks */ +#define DRA7_L4_CFG_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_SPINLOCK_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) +#define DRA7_MAILBOX1_CLKCTRL DRA7_CLKCTRL_INDEX(0x30) +#define DRA7_MAILBOX2_CLKCTRL DRA7_CLKCTRL_INDEX(0x48) +#define DRA7_MAILBOX3_CLKCTRL DRA7_CLKCTRL_INDEX(0x50) +#define DRA7_MAILBOX4_CLKCTRL DRA7_CLKCTRL_INDEX(0x58) +#define DRA7_MAILBOX5_CLKCTRL DRA7_CLKCTRL_INDEX(0x60) +#define DRA7_MAILBOX6_CLKCTRL DRA7_CLKCTRL_INDEX(0x68) +#define DRA7_MAILBOX7_CLKCTRL DRA7_CLKCTRL_INDEX(0x70) +#define DRA7_MAILBOX8_CLKCTRL DRA7_CLKCTRL_INDEX(0x78) +#define DRA7_MAILBOX9_CLKCTRL DRA7_CLKCTRL_INDEX(0x80) +#define DRA7_MAILBOX10_CLKCTRL DRA7_CLKCTRL_INDEX(0x88) +#define DRA7_MAILBOX11_CLKCTRL DRA7_CLKCTRL_INDEX(0x90) +#define DRA7_MAILBOX12_CLKCTRL DRA7_CLKCTRL_INDEX(0x98) +#define DRA7_MAILBOX13_CLKCTRL DRA7_CLKCTRL_INDEX(0xa0) + +/* l3instr clocks */ +#define DRA7_L3_MAIN_2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_L3_INSTR_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) + +/* dss clocks */ +#define DRA7_DSS_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_BB2D_CLKCTRL DRA7_CLKCTRL_INDEX(0x30) + +/* l3init clocks */ +#define DRA7_MMC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) +#define DRA7_MMC2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30) +#define DRA7_USB_OTG_SS2_CLKCTRL DRA7_CLKCTRL_INDEX(0x40) +#define DRA7_USB_OTG_SS3_CLKCTRL DRA7_CLKCTRL_INDEX(0x48) +#define DRA7_USB_OTG_SS4_CLKCTRL DRA7_CLKCTRL_INDEX(0x50) +#define DRA7_SATA_CLKCTRL DRA7_CLKCTRL_INDEX(0x88) +#define DRA7_PCIE1_CLKCTRL DRA7_CLKCTRL_INDEX(0xb0) +#define DRA7_PCIE2_CLKCTRL DRA7_CLKCTRL_INDEX(0xb8) +#define DRA7_GMAC_CLKCTRL DRA7_CLKCTRL_INDEX(0xd0) +#define DRA7_OCP2SCP1_CLKCTRL DRA7_CLKCTRL_INDEX(0xe0) +#define DRA7_OCP2SCP3_CLKCTRL DRA7_CLKCTRL_INDEX(0xe8) +#define DRA7_USB_OTG_SS1_CLKCTRL DRA7_CLKCTRL_INDEX(0xf0) + +/* l4per clocks */ +#define DRA7_L4PER_CLKCTRL_OFFSET 0x0 +#define DRA7_L4PER_CLKCTRL_INDEX(offset) ((offset) - DRA7_L4PER_CLKCTRL_OFFSET) +#define DRA7_L4_PER2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc) +#define DRA7_L4_PER3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x14) +#define DRA7_TIMER10_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x28) +#define DRA7_TIMER11_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x30) +#define DRA7_TIMER2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x38) +#define DRA7_TIMER3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x40) +#define DRA7_TIMER4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x48) +#define DRA7_TIMER9_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x50) +#define DRA7_ELM_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x58) +#define DRA7_GPIO2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x60) +#define DRA7_GPIO3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x68) +#define DRA7_GPIO4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x70) +#define DRA7_GPIO5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x78) +#define DRA7_GPIO6_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x80) +#define DRA7_HDQ1W_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x88) +#define DRA7_EPWMSS1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x90) +#define DRA7_EPWMSS2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x98) +#define DRA7_I2C1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xa0) +#define DRA7_I2C2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xa8) +#define DRA7_I2C3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xb0) +#define DRA7_I2C4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xb8) +#define DRA7_L4_PER1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc0) +#define DRA7_EPWMSS0_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc4) +#define DRA7_TIMER13_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc8) +#define DRA7_TIMER14_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xd0) +#define DRA7_TIMER15_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xd8) +#define DRA7_MCSPI1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xf0) +#define DRA7_MCSPI2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xf8) +#define DRA7_MCSPI3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x100) +#define DRA7_MCSPI4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x108) +#define DRA7_GPIO7_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x110) +#define DRA7_GPIO8_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x118) +#define DRA7_MMC3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x120) +#define DRA7_MMC4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x128) +#define DRA7_TIMER16_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x130) +#define DRA7_QSPI_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x138) +#define DRA7_UART1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x140) +#define DRA7_UART2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x148) +#define DRA7_UART3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x150) +#define DRA7_UART4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x158) +#define DRA7_MCASP2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x160) +#define DRA7_MCASP3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x168) +#define DRA7_UART5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x170) +#define DRA7_MCASP5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x178) +#define DRA7_MCASP8_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x190) +#define DRA7_MCASP4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x198) +#define DRA7_AES1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1a0) +#define DRA7_AES2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1a8) +#define DRA7_DES_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1b0) +#define DRA7_RNG_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1c0) +#define DRA7_SHAM_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1c8) +#define DRA7_UART7_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1d0) +#define DRA7_UART8_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1e0) +#define DRA7_UART9_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1e8) +#define DRA7_DCAN2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1f0) +#define DRA7_MCASP6_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x204) +#define DRA7_MCASP7_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x208) + +/* wkupaon clocks */ +#define DRA7_L4_WKUP_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_WD_TIMER2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30) +#define DRA7_GPIO1_CLKCTRL DRA7_CLKCTRL_INDEX(0x38) +#define DRA7_TIMER1_CLKCTRL DRA7_CLKCTRL_INDEX(0x40) +#define DRA7_TIMER12_CLKCTRL DRA7_CLKCTRL_INDEX(0x48) +#define DRA7_COUNTER_32K_CLKCTRL DRA7_CLKCTRL_INDEX(0x50) +#define DRA7_UART10_CLKCTRL DRA7_CLKCTRL_INDEX(0x80) +#define DRA7_DCAN1_CLKCTRL DRA7_CLKCTRL_INDEX(0x88) +#define DRA7_ADC_CLKCTRL DRA7_CLKCTRL_INDEX(0xa0) + +#endif diff --git a/include/dt-bindings/clock/efm32-cmu.h b/include/dt-bindings/clock/efm32-cmu.h new file mode 100644 index 000000000..4b48d15fe --- /dev/null +++ b/include/dt-bindings/clock/efm32-cmu.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_CLOCK_EFM32_CMU_H +#define __DT_BINDINGS_CLOCK_EFM32_CMU_H + +#define clk_HFXO 0 +#define clk_HFRCO 1 +#define clk_LFXO 2 +#define clk_LFRCO 3 +#define clk_ULFRCO 4 +#define clk_AUXHFRCO 5 +#define clk_HFCLKNODIV 6 +#define clk_HFCLK 7 +#define clk_HFPERCLK 8 +#define clk_HFCORECLK 9 +#define clk_LFACLK 10 +#define clk_LFBCLK 11 +#define clk_WDOGCLK 12 +#define clk_HFCORECLKDMA 13 +#define clk_HFCORECLKAES 14 +#define clk_HFCORECLKUSBC 15 +#define clk_HFCORECLKUSB 16 +#define clk_HFCORECLKLE 17 +#define clk_HFCORECLKEBI 18 +#define clk_HFPERCLKUSART0 19 +#define clk_HFPERCLKUSART1 20 +#define clk_HFPERCLKUSART2 21 +#define clk_HFPERCLKUART0 22 +#define clk_HFPERCLKUART1 23 +#define clk_HFPERCLKTIMER0 24 +#define clk_HFPERCLKTIMER1 25 +#define clk_HFPERCLKTIMER2 26 +#define clk_HFPERCLKTIMER3 27 +#define clk_HFPERCLKACMP0 28 +#define clk_HFPERCLKACMP1 29 +#define clk_HFPERCLKI2C0 30 +#define clk_HFPERCLKI2C1 31 +#define clk_HFPERCLKGPIO 32 +#define clk_HFPERCLKVCMP 33 +#define clk_HFPERCLKPRS 34 +#define clk_HFPERCLKADC0 35 +#define clk_HFPERCLKDAC0 36 + +#endif /* __DT_BINDINGS_CLOCK_EFM32_CMU_H */ diff --git a/include/dt-bindings/clock/exynos-audss-clk.h b/include/dt-bindings/clock/exynos-audss-clk.h new file mode 100644 index 000000000..eee9fcc6e --- /dev/null +++ b/include/dt-bindings/clock/exynos-audss-clk.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for Samsung audio subsystem + * clock controller. + * + * The constants defined in this header are being used in dts + * and exynos audss driver. + */ + +#ifndef _DT_BINDINGS_CLK_EXYNOS_AUDSS_H +#define _DT_BINDINGS_CLK_EXYNOS_AUDSS_H + +#define EXYNOS_MOUT_AUDSS 0 +#define EXYNOS_MOUT_I2S 1 +#define EXYNOS_DOUT_SRP 2 +#define EXYNOS_DOUT_AUD_BUS 3 +#define EXYNOS_DOUT_I2S 4 +#define EXYNOS_SRP_CLK 5 +#define EXYNOS_I2S_BUS 6 +#define EXYNOS_SCLK_I2S 7 +#define EXYNOS_PCM_BUS 8 +#define EXYNOS_SCLK_PCM 9 +#define EXYNOS_ADMA 10 + +#define EXYNOS_AUDSS_MAX_CLKS 11 + +#endif diff --git a/include/dt-bindings/clock/exynos3250.h b/include/dt-bindings/clock/exynos3250.h new file mode 100644 index 000000000..c796ff02c --- /dev/null +++ b/include/dt-bindings/clock/exynos3250.h @@ -0,0 +1,356 @@ +/* + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * Author: Tomasz Figa + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Device Tree binding constants for Samsung Exynos3250 clock controllers. + */ + +#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS3250_CLOCK_H +#define _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS3250_CLOCK_H + +/* + * Let each exported clock get a unique index, which is used on DT-enabled + * platforms to lookup the clock from a clock specifier. These indices are + * therefore considered an ABI and so must not be changed. This implies + * that new clocks should be added either in free spaces between clock groups + * or at the end. + */ + + +/* + * Main CMU + */ + +#define CLK_OSCSEL 1 +#define CLK_FIN_PLL 2 +#define CLK_FOUT_APLL 3 +#define CLK_FOUT_VPLL 4 +#define CLK_FOUT_UPLL 5 +#define CLK_FOUT_MPLL 6 +#define CLK_ARM_CLK 7 + +/* Muxes */ +#define CLK_MOUT_MPLL_USER_L 16 +#define CLK_MOUT_GDL 17 +#define CLK_MOUT_MPLL_USER_R 18 +#define CLK_MOUT_GDR 19 +#define CLK_MOUT_EBI 20 +#define CLK_MOUT_ACLK_200 21 +#define CLK_MOUT_ACLK_160 22 +#define CLK_MOUT_ACLK_100 23 +#define CLK_MOUT_ACLK_266_1 24 +#define CLK_MOUT_ACLK_266_0 25 +#define CLK_MOUT_ACLK_266 26 +#define CLK_MOUT_VPLL 27 +#define CLK_MOUT_EPLL_USER 28 +#define CLK_MOUT_EBI_1 29 +#define CLK_MOUT_UPLL 30 +#define CLK_MOUT_ACLK_400_MCUISP_SUB 31 +#define CLK_MOUT_MPLL 32 +#define CLK_MOUT_ACLK_400_MCUISP 33 +#define CLK_MOUT_VPLLSRC 34 +#define CLK_MOUT_CAM1 35 +#define CLK_MOUT_CAM_BLK 36 +#define CLK_MOUT_MFC 37 +#define CLK_MOUT_MFC_1 38 +#define CLK_MOUT_MFC_0 39 +#define CLK_MOUT_G3D 40 +#define CLK_MOUT_G3D_1 41 +#define CLK_MOUT_G3D_0 42 +#define CLK_MOUT_MIPI0 43 +#define CLK_MOUT_FIMD0 44 +#define CLK_MOUT_UART_ISP 45 +#define CLK_MOUT_SPI1_ISP 46 +#define CLK_MOUT_SPI0_ISP 47 +#define CLK_MOUT_TSADC 48 +#define CLK_MOUT_MMC1 49 +#define CLK_MOUT_MMC0 50 +#define CLK_MOUT_UART1 51 +#define CLK_MOUT_UART0 52 +#define CLK_MOUT_SPI1 53 +#define CLK_MOUT_SPI0 54 +#define CLK_MOUT_AUDIO 55 +#define CLK_MOUT_MPLL_USER_C 56 +#define CLK_MOUT_HPM 57 +#define CLK_MOUT_CORE 58 +#define CLK_MOUT_APLL 59 +#define CLK_MOUT_ACLK_266_SUB 60 +#define CLK_MOUT_UART2 61 +#define CLK_MOUT_MMC2 62 + +/* Dividers */ +#define CLK_DIV_GPL 64 +#define CLK_DIV_GDL 65 +#define CLK_DIV_GPR 66 +#define CLK_DIV_GDR 67 +#define CLK_DIV_MPLL_PRE 68 +#define CLK_DIV_ACLK_400_MCUISP 69 +#define CLK_DIV_EBI 70 +#define CLK_DIV_ACLK_200 71 +#define CLK_DIV_ACLK_160 72 +#define CLK_DIV_ACLK_100 73 +#define CLK_DIV_ACLK_266 74 +#define CLK_DIV_CAM1 75 +#define CLK_DIV_CAM_BLK 76 +#define CLK_DIV_MFC 77 +#define CLK_DIV_G3D 78 +#define CLK_DIV_MIPI0_PRE 79 +#define CLK_DIV_MIPI0 80 +#define CLK_DIV_FIMD0 81 +#define CLK_DIV_UART_ISP 82 +#define CLK_DIV_SPI1_ISP_PRE 83 +#define CLK_DIV_SPI1_ISP 84 +#define CLK_DIV_SPI0_ISP_PRE 85 +#define CLK_DIV_SPI0_ISP 86 +#define CLK_DIV_TSADC_PRE 87 +#define CLK_DIV_TSADC 88 +#define CLK_DIV_MMC1_PRE 89 +#define CLK_DIV_MMC1 90 +#define CLK_DIV_MMC0_PRE 91 +#define CLK_DIV_MMC0 92 +#define CLK_DIV_UART1 93 +#define CLK_DIV_UART0 94 +#define CLK_DIV_SPI1_PRE 95 +#define CLK_DIV_SPI1 96 +#define CLK_DIV_SPI0_PRE 97 +#define CLK_DIV_SPI0 98 +#define CLK_DIV_PCM 99 +#define CLK_DIV_AUDIO 100 +#define CLK_DIV_I2S 101 +#define CLK_DIV_CORE2 102 +#define CLK_DIV_APLL 103 +#define CLK_DIV_PCLK_DBG 104 +#define CLK_DIV_ATB 105 +#define CLK_DIV_COREM 106 +#define CLK_DIV_CORE 107 +#define CLK_DIV_HPM 108 +#define CLK_DIV_COPY 109 +#define CLK_DIV_UART2 110 +#define CLK_DIV_MMC2_PRE 111 +#define CLK_DIV_MMC2 112 + +/* Gates */ +#define CLK_ASYNC_G3D 128 +#define CLK_ASYNC_MFCL 129 +#define CLK_PPMULEFT 130 +#define CLK_GPIO_LEFT 131 +#define CLK_ASYNC_ISPMX 132 +#define CLK_ASYNC_FSYSD 133 +#define CLK_ASYNC_LCD0X 134 +#define CLK_ASYNC_CAMX 135 +#define CLK_PPMURIGHT 136 +#define CLK_GPIO_RIGHT 137 +#define CLK_MONOCNT 138 +#define CLK_TZPC6 139 +#define CLK_PROVISIONKEY1 140 +#define CLK_PROVISIONKEY0 141 +#define CLK_CMU_ISPPART 142 +#define CLK_TMU_APBIF 143 +#define CLK_KEYIF 144 +#define CLK_RTC 145 +#define CLK_WDT 146 +#define CLK_MCT 147 +#define CLK_SECKEY 148 +#define CLK_TZPC5 149 +#define CLK_TZPC4 150 +#define CLK_TZPC3 151 +#define CLK_TZPC2 152 +#define CLK_TZPC1 153 +#define CLK_TZPC0 154 +#define CLK_CMU_COREPART 155 +#define CLK_CMU_TOPPART 156 +#define CLK_PMU_APBIF 157 +#define CLK_SYSREG 158 +#define CLK_CHIP_ID 159 +#define CLK_QEJPEG 160 +#define CLK_PIXELASYNCM1 161 +#define CLK_PIXELASYNCM0 162 +#define CLK_PPMUCAMIF 163 +#define CLK_QEM2MSCALER 164 +#define CLK_QEGSCALER1 165 +#define CLK_QEGSCALER0 166 +#define CLK_SMMUJPEG 167 +#define CLK_SMMUM2M2SCALER 168 +#define CLK_SMMUGSCALER1 169 +#define CLK_SMMUGSCALER0 170 +#define CLK_JPEG 171 +#define CLK_M2MSCALER 172 +#define CLK_GSCALER1 173 +#define CLK_GSCALER0 174 +#define CLK_QEMFC 175 +#define CLK_PPMUMFC_L 176 +#define CLK_SMMUMFC_L 177 +#define CLK_MFC 178 +#define CLK_SMMUG3D 179 +#define CLK_QEG3D 180 +#define CLK_PPMUG3D 181 +#define CLK_G3D 182 +#define CLK_QE_CH1_LCD 183 +#define CLK_QE_CH0_LCD 184 +#define CLK_PPMULCD0 185 +#define CLK_SMMUFIMD0 186 +#define CLK_DSIM0 187 +#define CLK_FIMD0 188 +#define CLK_CAM1 189 +#define CLK_UART_ISP_TOP 190 +#define CLK_SPI1_ISP_TOP 191 +#define CLK_SPI0_ISP_TOP 192 +#define CLK_TSADC 193 +#define CLK_PPMUFILE 194 +#define CLK_USBOTG 195 +#define CLK_USBHOST 196 +#define CLK_SROMC 197 +#define CLK_SDMMC1 198 +#define CLK_SDMMC0 199 +#define CLK_PDMA1 200 +#define CLK_PDMA0 201 +#define CLK_PWM 202 +#define CLK_PCM 203 +#define CLK_I2S 204 +#define CLK_SPI1 205 +#define CLK_SPI0 206 +#define CLK_I2C7 207 +#define CLK_I2C6 208 +#define CLK_I2C5 209 +#define CLK_I2C4 210 +#define CLK_I2C3 211 +#define CLK_I2C2 212 +#define CLK_I2C1 213 +#define CLK_I2C0 214 +#define CLK_UART1 215 +#define CLK_UART0 216 +#define CLK_BLOCK_LCD 217 +#define CLK_BLOCK_G3D 218 +#define CLK_BLOCK_MFC 219 +#define CLK_BLOCK_CAM 220 +#define CLK_SMIES 221 +#define CLK_UART2 222 +#define CLK_SDMMC2 223 + +/* Special clocks */ +#define CLK_SCLK_JPEG 224 +#define CLK_SCLK_M2MSCALER 225 +#define CLK_SCLK_GSCALER1 226 +#define CLK_SCLK_GSCALER0 227 +#define CLK_SCLK_MFC 228 +#define CLK_SCLK_G3D 229 +#define CLK_SCLK_MIPIDPHY2L 230 +#define CLK_SCLK_MIPI0 231 +#define CLK_SCLK_FIMD0 232 +#define CLK_SCLK_CAM1 233 +#define CLK_SCLK_UART_ISP 234 +#define CLK_SCLK_SPI1_ISP 235 +#define CLK_SCLK_SPI0_ISP 236 +#define CLK_SCLK_UPLL 237 +#define CLK_SCLK_TSADC 238 +#define CLK_SCLK_EBI 239 +#define CLK_SCLK_MMC1 240 +#define CLK_SCLK_MMC0 241 +#define CLK_SCLK_I2S 242 +#define CLK_SCLK_PCM 243 +#define CLK_SCLK_SPI1 244 +#define CLK_SCLK_SPI0 245 +#define CLK_SCLK_UART1 246 +#define CLK_SCLK_UART0 247 +#define CLK_SCLK_UART2 248 +#define CLK_SCLK_MMC2 249 + +/* + * Total number of clocks of main CMU. + * NOTE: Must be equal to last clock ID increased by one. + */ +#define CLK_NR_CLKS 250 + +/* + * CMU DMC + */ + +#define CLK_FOUT_BPLL 1 +#define CLK_FOUT_EPLL 2 + +/* Muxes */ +#define CLK_MOUT_MPLL_MIF 8 +#define CLK_MOUT_BPLL 9 +#define CLK_MOUT_DPHY 10 +#define CLK_MOUT_DMC_BUS 11 +#define CLK_MOUT_EPLL 12 + +/* Dividers */ +#define CLK_DIV_DMC 16 +#define CLK_DIV_DPHY 17 +#define CLK_DIV_DMC_PRE 18 +#define CLK_DIV_DMCP 19 +#define CLK_DIV_DMCD 20 + +/* + * Total number of clocks of main CMU. + * NOTE: Must be equal to last clock ID increased by one. + */ +#define NR_CLKS_DMC 21 + +/* + * CMU ISP + */ + +/* Dividers */ + +#define CLK_DIV_ISP1 1 +#define CLK_DIV_ISP0 2 +#define CLK_DIV_MCUISP1 3 +#define CLK_DIV_MCUISP0 4 +#define CLK_DIV_MPWM 5 + +/* Gates */ + +#define CLK_UART_ISP 8 +#define CLK_WDT_ISP 9 +#define CLK_PWM_ISP 10 +#define CLK_I2C1_ISP 11 +#define CLK_I2C0_ISP 12 +#define CLK_MPWM_ISP 13 +#define CLK_MCUCTL_ISP 14 +#define CLK_PPMUISPX 15 +#define CLK_PPMUISPMX 16 +#define CLK_QE_LITE1 17 +#define CLK_QE_LITE0 18 +#define CLK_QE_FD 19 +#define CLK_QE_DRC 20 +#define CLK_QE_ISP 21 +#define CLK_CSIS1 22 +#define CLK_SMMU_LITE1 23 +#define CLK_SMMU_LITE0 24 +#define CLK_SMMU_FD 25 +#define CLK_SMMU_DRC 26 +#define CLK_SMMU_ISP 27 +#define CLK_GICISP 28 +#define CLK_CSIS0 29 +#define CLK_MCUISP 30 +#define CLK_LITE1 31 +#define CLK_LITE0 32 +#define CLK_FD 33 +#define CLK_DRC 34 +#define CLK_ISP 35 +#define CLK_QE_ISPCX 36 +#define CLK_QE_SCALERP 37 +#define CLK_QE_SCALERC 38 +#define CLK_SMMU_SCALERP 39 +#define CLK_SMMU_SCALERC 40 +#define CLK_SCALERP 41 +#define CLK_SCALERC 42 +#define CLK_SPI1_ISP 43 +#define CLK_SPI0_ISP 44 +#define CLK_SMMU_ISPCX 45 +#define CLK_ASYNCAXIM 46 +#define CLK_SCLK_MPWM_ISP 47 + +/* + * Total number of clocks of CMU_ISP. + * NOTE: Must be equal to last clock ID increased by one. + */ +#define NR_CLKS_ISP 48 + +#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS3250_CLOCK_H */ diff --git a/include/dt-bindings/clock/exynos4.h b/include/dt-bindings/clock/exynos4.h new file mode 100644 index 000000000..e9f9d400c --- /dev/null +++ b/include/dt-bindings/clock/exynos4.h @@ -0,0 +1,310 @@ +/* + * Copyright (c) 2013 Samsung Electronics Co., Ltd. + * Author: Andrzej Hajda + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Device Tree binding constants for Exynos4 clock controller. +*/ + +#ifndef _DT_BINDINGS_CLOCK_EXYNOS_4_H +#define _DT_BINDINGS_CLOCK_EXYNOS_4_H + +/* core clocks */ +#define CLK_XXTI 1 +#define CLK_XUSBXTI 2 +#define CLK_FIN_PLL 3 +#define CLK_FOUT_APLL 4 +#define CLK_FOUT_MPLL 5 +#define CLK_FOUT_EPLL 6 +#define CLK_FOUT_VPLL 7 +#define CLK_SCLK_APLL 8 +#define CLK_SCLK_MPLL 9 +#define CLK_SCLK_EPLL 10 +#define CLK_SCLK_VPLL 11 +#define CLK_ARM_CLK 12 +#define CLK_ACLK200 13 +#define CLK_ACLK100 14 +#define CLK_ACLK160 15 +#define CLK_ACLK133 16 +#define CLK_MOUT_MPLL_USER_T 17 /* Exynos4x12 only */ +#define CLK_MOUT_MPLL_USER_C 18 /* Exynos4x12 only */ +#define CLK_MOUT_CORE 19 +#define CLK_MOUT_APLL 20 +#define CLK_SCLK_HDMIPHY 22 +#define CLK_OUT_DMC 23 +#define CLK_OUT_TOP 24 +#define CLK_OUT_LEFTBUS 25 +#define CLK_OUT_RIGHTBUS 26 +#define CLK_OUT_CPU 27 + +/* gate for special clocks (sclk) */ +#define CLK_SCLK_FIMC0 128 +#define CLK_SCLK_FIMC1 129 +#define CLK_SCLK_FIMC2 130 +#define CLK_SCLK_FIMC3 131 +#define CLK_SCLK_CAM0 132 +#define CLK_SCLK_CAM1 133 +#define CLK_SCLK_CSIS0 134 +#define CLK_SCLK_CSIS1 135 +#define CLK_SCLK_HDMI 136 +#define CLK_SCLK_MIXER 137 +#define CLK_SCLK_DAC 138 +#define CLK_SCLK_PIXEL 139 +#define CLK_SCLK_FIMD0 140 +#define CLK_SCLK_MDNIE0 141 /* Exynos4412 only */ +#define CLK_SCLK_MDNIE_PWM0 142 +#define CLK_SCLK_MIPI0 143 +#define CLK_SCLK_AUDIO0 144 +#define CLK_SCLK_MMC0 145 +#define CLK_SCLK_MMC1 146 +#define CLK_SCLK_MMC2 147 +#define CLK_SCLK_MMC3 148 +#define CLK_SCLK_MMC4 149 +#define CLK_SCLK_SATA 150 /* Exynos4210 only */ +#define CLK_SCLK_UART0 151 +#define CLK_SCLK_UART1 152 +#define CLK_SCLK_UART2 153 +#define CLK_SCLK_UART3 154 +#define CLK_SCLK_UART4 155 +#define CLK_SCLK_AUDIO1 156 +#define CLK_SCLK_AUDIO2 157 +#define CLK_SCLK_SPDIF 158 +#define CLK_SCLK_SPI0 159 +#define CLK_SCLK_SPI1 160 +#define CLK_SCLK_SPI2 161 +#define CLK_SCLK_SLIMBUS 162 +#define CLK_SCLK_FIMD1 163 /* Exynos4210 only */ +#define CLK_SCLK_MIPI1 164 /* Exynos4210 only */ +#define CLK_SCLK_PCM1 165 +#define CLK_SCLK_PCM2 166 +#define CLK_SCLK_I2S1 167 +#define CLK_SCLK_I2S2 168 +#define CLK_SCLK_MIPIHSI 169 /* Exynos4412 only */ +#define CLK_SCLK_MFC 170 +#define CLK_SCLK_PCM0 171 +#define CLK_SCLK_G3D 172 +#define CLK_SCLK_PWM_ISP 173 /* Exynos4x12 only */ +#define CLK_SCLK_SPI0_ISP 174 /* Exynos4x12 only */ +#define CLK_SCLK_SPI1_ISP 175 /* Exynos4x12 only */ +#define CLK_SCLK_UART_ISP 176 /* Exynos4x12 only */ +#define CLK_SCLK_FIMG2D 177 + +/* gate clocks */ +#define CLK_SSS 255 +#define CLK_FIMC0 256 +#define CLK_FIMC1 257 +#define CLK_FIMC2 258 +#define CLK_FIMC3 259 +#define CLK_CSIS0 260 +#define CLK_CSIS1 261 +#define CLK_JPEG 262 +#define CLK_SMMU_FIMC0 263 +#define CLK_SMMU_FIMC1 264 +#define CLK_SMMU_FIMC2 265 +#define CLK_SMMU_FIMC3 266 +#define CLK_SMMU_JPEG 267 +#define CLK_VP 268 +#define CLK_MIXER 269 +#define CLK_TVENC 270 /* Exynos4210 only */ +#define CLK_HDMI 271 +#define CLK_SMMU_TV 272 +#define CLK_MFC 273 +#define CLK_SMMU_MFCL 274 +#define CLK_SMMU_MFCR 275 +#define CLK_G3D 276 +#define CLK_G2D 277 +#define CLK_ROTATOR 278 +#define CLK_MDMA 279 +#define CLK_SMMU_G2D 280 +#define CLK_SMMU_ROTATOR 281 +#define CLK_SMMU_MDMA 282 +#define CLK_FIMD0 283 +#define CLK_MIE0 284 +#define CLK_MDNIE0 285 /* Exynos4412 only */ +#define CLK_DSIM0 286 +#define CLK_SMMU_FIMD0 287 +#define CLK_FIMD1 288 /* Exynos4210 only */ +#define CLK_MIE1 289 /* Exynos4210 only */ +#define CLK_DSIM1 290 /* Exynos4210 only */ +#define CLK_SMMU_FIMD1 291 /* Exynos4210 only */ +#define CLK_PDMA0 292 +#define CLK_PDMA1 293 +#define CLK_PCIE_PHY 294 +#define CLK_SATA_PHY 295 /* Exynos4210 only */ +#define CLK_TSI 296 +#define CLK_SDMMC0 297 +#define CLK_SDMMC1 298 +#define CLK_SDMMC2 299 +#define CLK_SDMMC3 300 +#define CLK_SDMMC4 301 +#define CLK_SATA 302 /* Exynos4210 only */ +#define CLK_SROMC 303 +#define CLK_USB_HOST 304 +#define CLK_USB_DEVICE 305 +#define CLK_PCIE 306 +#define CLK_ONENAND 307 +#define CLK_NFCON 308 +#define CLK_SMMU_PCIE 309 +#define CLK_GPS 310 +#define CLK_SMMU_GPS 311 +#define CLK_UART0 312 +#define CLK_UART1 313 +#define CLK_UART2 314 +#define CLK_UART3 315 +#define CLK_UART4 316 +#define CLK_I2C0 317 +#define CLK_I2C1 318 +#define CLK_I2C2 319 +#define CLK_I2C3 320 +#define CLK_I2C4 321 +#define CLK_I2C5 322 +#define CLK_I2C6 323 +#define CLK_I2C7 324 +#define CLK_I2C_HDMI 325 +#define CLK_TSADC 326 +#define CLK_SPI0 327 +#define CLK_SPI1 328 +#define CLK_SPI2 329 +#define CLK_I2S1 330 +#define CLK_I2S2 331 +#define CLK_PCM0 332 +#define CLK_I2S0 333 +#define CLK_PCM1 334 +#define CLK_PCM2 335 +#define CLK_PWM 336 +#define CLK_SLIMBUS 337 +#define CLK_SPDIF 338 +#define CLK_AC97 339 +#define CLK_MODEMIF 340 +#define CLK_CHIPID 341 +#define CLK_SYSREG 342 +#define CLK_HDMI_CEC 343 +#define CLK_MCT 344 +#define CLK_WDT 345 +#define CLK_RTC 346 +#define CLK_KEYIF 347 +#define CLK_AUDSS 348 +#define CLK_MIPI_HSI 349 /* Exynos4210 only */ +#define CLK_PIXELASYNCM0 351 +#define CLK_PIXELASYNCM1 352 +#define CLK_FIMC_LITE0 353 /* Exynos4x12 only */ +#define CLK_FIMC_LITE1 354 /* Exynos4x12 only */ +#define CLK_PPMUISPX 355 /* Exynos4x12 only */ +#define CLK_PPMUISPMX 356 /* Exynos4x12 only */ +#define CLK_FIMC_ISP 357 /* Exynos4x12 only */ +#define CLK_FIMC_DRC 358 /* Exynos4x12 only */ +#define CLK_FIMC_FD 359 /* Exynos4x12 only */ +#define CLK_MCUISP 360 /* Exynos4x12 only */ +#define CLK_GICISP 361 /* Exynos4x12 only */ +#define CLK_SMMU_ISP 362 /* Exynos4x12 only */ +#define CLK_SMMU_DRC 363 /* Exynos4x12 only */ +#define CLK_SMMU_FD 364 /* Exynos4x12 only */ +#define CLK_SMMU_LITE0 365 /* Exynos4x12 only */ +#define CLK_SMMU_LITE1 366 /* Exynos4x12 only */ +#define CLK_MCUCTL_ISP 367 /* Exynos4x12 only */ +#define CLK_MPWM_ISP 368 /* Exynos4x12 only */ +#define CLK_I2C0_ISP 369 /* Exynos4x12 only */ +#define CLK_I2C1_ISP 370 /* Exynos4x12 only */ +#define CLK_MTCADC_ISP 371 /* Exynos4x12 only */ +#define CLK_PWM_ISP 372 /* Exynos4x12 only */ +#define CLK_WDT_ISP 373 /* Exynos4x12 only */ +#define CLK_UART_ISP 374 /* Exynos4x12 only */ +#define CLK_ASYNCAXIM 375 /* Exynos4x12 only */ +#define CLK_SMMU_ISPCX 376 /* Exynos4x12 only */ +#define CLK_SPI0_ISP 377 /* Exynos4x12 only */ +#define CLK_SPI1_ISP 378 /* Exynos4x12 only */ +#define CLK_PWM_ISP_SCLK 379 /* Exynos4x12 only */ +#define CLK_SPI0_ISP_SCLK 380 /* Exynos4x12 only */ +#define CLK_SPI1_ISP_SCLK 381 /* Exynos4x12 only */ +#define CLK_UART_ISP_SCLK 382 /* Exynos4x12 only */ +#define CLK_TMU_APBIF 383 + +/* mux clocks */ +#define CLK_MOUT_FIMC0 384 +#define CLK_MOUT_FIMC1 385 +#define CLK_MOUT_FIMC2 386 +#define CLK_MOUT_FIMC3 387 +#define CLK_MOUT_CAM0 388 +#define CLK_MOUT_CAM1 389 +#define CLK_MOUT_CSIS0 390 +#define CLK_MOUT_CSIS1 391 +#define CLK_MOUT_G3D0 392 +#define CLK_MOUT_G3D1 393 +#define CLK_MOUT_G3D 394 +#define CLK_ACLK400_MCUISP 395 /* Exynos4x12 only */ +#define CLK_MOUT_HDMI 396 +#define CLK_MOUT_MIXER 397 + +/* gate clocks - ppmu */ +#define CLK_PPMULEFT 400 +#define CLK_PPMURIGHT 401 +#define CLK_PPMUCAMIF 402 +#define CLK_PPMUTV 403 +#define CLK_PPMUMFC_L 404 +#define CLK_PPMUMFC_R 405 +#define CLK_PPMUG3D 406 +#define CLK_PPMUIMAGE 407 +#define CLK_PPMULCD0 408 +#define CLK_PPMULCD1 409 /* Exynos4210 only */ +#define CLK_PPMUFILE 410 +#define CLK_PPMUGPS 411 +#define CLK_PPMUDMC0 412 +#define CLK_PPMUDMC1 413 +#define CLK_PPMUCPU 414 +#define CLK_PPMUACP 415 + +/* div clocks */ +#define CLK_DIV_ISP0 450 /* Exynos4x12 only */ +#define CLK_DIV_ISP1 451 /* Exynos4x12 only */ +#define CLK_DIV_MCUISP0 452 /* Exynos4x12 only */ +#define CLK_DIV_MCUISP1 453 /* Exynos4x12 only */ +#define CLK_DIV_ACLK200 454 /* Exynos4x12 only */ +#define CLK_DIV_ACLK400_MCUISP 455 /* Exynos4x12 only */ +#define CLK_DIV_ACP 456 +#define CLK_DIV_DMC 457 +#define CLK_DIV_C2C 458 /* Exynos4x12 only */ +#define CLK_DIV_GDL 459 +#define CLK_DIV_GDR 460 + +/* must be greater than maximal clock id */ +#define CLK_NR_CLKS 461 + +/* Exynos4x12 ISP clocks */ +#define CLK_ISP_FIMC_ISP 1 +#define CLK_ISP_FIMC_DRC 2 +#define CLK_ISP_FIMC_FD 3 +#define CLK_ISP_FIMC_LITE0 4 +#define CLK_ISP_FIMC_LITE1 5 +#define CLK_ISP_MCUISP 6 +#define CLK_ISP_GICISP 7 +#define CLK_ISP_SMMU_ISP 8 +#define CLK_ISP_SMMU_DRC 9 +#define CLK_ISP_SMMU_FD 10 +#define CLK_ISP_SMMU_LITE0 11 +#define CLK_ISP_SMMU_LITE1 12 +#define CLK_ISP_PPMUISPMX 13 +#define CLK_ISP_PPMUISPX 14 +#define CLK_ISP_MCUCTL_ISP 15 +#define CLK_ISP_MPWM_ISP 16 +#define CLK_ISP_I2C0_ISP 17 +#define CLK_ISP_I2C1_ISP 18 +#define CLK_ISP_MTCADC_ISP 19 +#define CLK_ISP_PWM_ISP 20 +#define CLK_ISP_WDT_ISP 21 +#define CLK_ISP_UART_ISP 22 +#define CLK_ISP_ASYNCAXIM 23 +#define CLK_ISP_SMMU_ISPCX 24 +#define CLK_ISP_SPI0_ISP 25 +#define CLK_ISP_SPI1_ISP 26 + +#define CLK_ISP_DIV_ISP0 27 +#define CLK_ISP_DIV_ISP1 28 +#define CLK_ISP_DIV_MCUISP0 29 +#define CLK_ISP_DIV_MCUISP1 30 + +#define CLK_NR_ISP_CLKS 31 + +#endif /* _DT_BINDINGS_CLOCK_EXYNOS_4_H */ diff --git a/include/dt-bindings/clock/exynos5250.h b/include/dt-bindings/clock/exynos5250.h new file mode 100644 index 000000000..15508adcd --- /dev/null +++ b/include/dt-bindings/clock/exynos5250.h @@ -0,0 +1,182 @@ +/* + * Copyright (c) 2013 Samsung Electronics Co., Ltd. + * Author: Andrzej Hajda + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Device Tree binding constants for Exynos5250 clock controller. +*/ + +#ifndef _DT_BINDINGS_CLOCK_EXYNOS_5250_H +#define _DT_BINDINGS_CLOCK_EXYNOS_5250_H + +/* core clocks */ +#define CLK_FIN_PLL 1 +#define CLK_FOUT_APLL 2 +#define CLK_FOUT_MPLL 3 +#define CLK_FOUT_BPLL 4 +#define CLK_FOUT_GPLL 5 +#define CLK_FOUT_CPLL 6 +#define CLK_FOUT_EPLL 7 +#define CLK_FOUT_VPLL 8 +#define CLK_ARM_CLK 9 + +/* gate for special clocks (sclk) */ +#define CLK_SCLK_CAM_BAYER 128 +#define CLK_SCLK_CAM0 129 +#define CLK_SCLK_CAM1 130 +#define CLK_SCLK_GSCL_WA 131 +#define CLK_SCLK_GSCL_WB 132 +#define CLK_SCLK_FIMD1 133 +#define CLK_SCLK_MIPI1 134 +#define CLK_SCLK_DP 135 +#define CLK_SCLK_HDMI 136 +#define CLK_SCLK_PIXEL 137 +#define CLK_SCLK_AUDIO0 138 +#define CLK_SCLK_MMC0 139 +#define CLK_SCLK_MMC1 140 +#define CLK_SCLK_MMC2 141 +#define CLK_SCLK_MMC3 142 +#define CLK_SCLK_SATA 143 +#define CLK_SCLK_USB3 144 +#define CLK_SCLK_JPEG 145 +#define CLK_SCLK_UART0 146 +#define CLK_SCLK_UART1 147 +#define CLK_SCLK_UART2 148 +#define CLK_SCLK_UART3 149 +#define CLK_SCLK_PWM 150 +#define CLK_SCLK_AUDIO1 151 +#define CLK_SCLK_AUDIO2 152 +#define CLK_SCLK_SPDIF 153 +#define CLK_SCLK_SPI0 154 +#define CLK_SCLK_SPI1 155 +#define CLK_SCLK_SPI2 156 +#define CLK_DIV_I2S1 157 +#define CLK_DIV_I2S2 158 +#define CLK_SCLK_HDMIPHY 159 +#define CLK_DIV_PCM0 160 + +/* gate clocks */ +#define CLK_GSCL0 256 +#define CLK_GSCL1 257 +#define CLK_GSCL2 258 +#define CLK_GSCL3 259 +#define CLK_GSCL_WA 260 +#define CLK_GSCL_WB 261 +#define CLK_SMMU_GSCL0 262 +#define CLK_SMMU_GSCL1 263 +#define CLK_SMMU_GSCL2 264 +#define CLK_SMMU_GSCL3 265 +#define CLK_MFC 266 +#define CLK_SMMU_MFCL 267 +#define CLK_SMMU_MFCR 268 +#define CLK_ROTATOR 269 +#define CLK_JPEG 270 +#define CLK_MDMA1 271 +#define CLK_SMMU_ROTATOR 272 +#define CLK_SMMU_JPEG 273 +#define CLK_SMMU_MDMA1 274 +#define CLK_PDMA0 275 +#define CLK_PDMA1 276 +#define CLK_SATA 277 +#define CLK_USBOTG 278 +#define CLK_MIPI_HSI 279 +#define CLK_SDMMC0 280 +#define CLK_SDMMC1 281 +#define CLK_SDMMC2 282 +#define CLK_SDMMC3 283 +#define CLK_SROMC 284 +#define CLK_USB2 285 +#define CLK_USB3 286 +#define CLK_SATA_PHYCTRL 287 +#define CLK_SATA_PHYI2C 288 +#define CLK_UART0 289 +#define CLK_UART1 290 +#define CLK_UART2 291 +#define CLK_UART3 292 +#define CLK_UART4 293 +#define CLK_I2C0 294 +#define CLK_I2C1 295 +#define CLK_I2C2 296 +#define CLK_I2C3 297 +#define CLK_I2C4 298 +#define CLK_I2C5 299 +#define CLK_I2C6 300 +#define CLK_I2C7 301 +#define CLK_I2C_HDMI 302 +#define CLK_ADC 303 +#define CLK_SPI0 304 +#define CLK_SPI1 305 +#define CLK_SPI2 306 +#define CLK_I2S1 307 +#define CLK_I2S2 308 +#define CLK_PCM1 309 +#define CLK_PCM2 310 +#define CLK_PWM 311 +#define CLK_SPDIF 312 +#define CLK_AC97 313 +#define CLK_HSI2C0 314 +#define CLK_HSI2C1 315 +#define CLK_HSI2C2 316 +#define CLK_HSI2C3 317 +#define CLK_CHIPID 318 +#define CLK_SYSREG 319 +#define CLK_PMU 320 +#define CLK_CMU_TOP 321 +#define CLK_CMU_CORE 322 +#define CLK_CMU_MEM 323 +#define CLK_TZPC0 324 +#define CLK_TZPC1 325 +#define CLK_TZPC2 326 +#define CLK_TZPC3 327 +#define CLK_TZPC4 328 +#define CLK_TZPC5 329 +#define CLK_TZPC6 330 +#define CLK_TZPC7 331 +#define CLK_TZPC8 332 +#define CLK_TZPC9 333 +#define CLK_HDMI_CEC 334 +#define CLK_MCT 335 +#define CLK_WDT 336 +#define CLK_RTC 337 +#define CLK_TMU 338 +#define CLK_FIMD1 339 +#define CLK_MIE1 340 +#define CLK_DSIM0 341 +#define CLK_DP 342 +#define CLK_MIXER 343 +#define CLK_HDMI 344 +#define CLK_G2D 345 +#define CLK_MDMA0 346 +#define CLK_SMMU_MDMA0 347 +#define CLK_SSS 348 +#define CLK_G3D 349 +#define CLK_SMMU_TV 350 +#define CLK_SMMU_FIMD1 351 +#define CLK_SMMU_2D 352 +#define CLK_SMMU_FIMC_ISP 353 +#define CLK_SMMU_FIMC_DRC 354 +#define CLK_SMMU_FIMC_SCC 355 +#define CLK_SMMU_FIMC_SCP 356 +#define CLK_SMMU_FIMC_FD 357 +#define CLK_SMMU_FIMC_MCU 358 +#define CLK_SMMU_FIMC_ODC 359 +#define CLK_SMMU_FIMC_DIS0 360 +#define CLK_SMMU_FIMC_DIS1 361 +#define CLK_SMMU_FIMC_3DNR 362 +#define CLK_SMMU_FIMC_LITE0 363 +#define CLK_SMMU_FIMC_LITE1 364 +#define CLK_CAMIF_TOP 365 + +/* mux clocks */ +#define CLK_MOUT_HDMI 1024 +#define CLK_MOUT_GPLL 1025 +#define CLK_MOUT_ACLK200_DISP1_SUB 1026 +#define CLK_MOUT_ACLK300_DISP1_SUB 1027 + +/* must be greater than maximal clock id */ +#define CLK_NR_CLKS 1028 + +#endif /* _DT_BINDINGS_CLOCK_EXYNOS_5250_H */ diff --git a/include/dt-bindings/clock/exynos5260-clk.h b/include/dt-bindings/clock/exynos5260-clk.h new file mode 100644 index 000000000..a4bac9a17 --- /dev/null +++ b/include/dt-bindings/clock/exynos5260-clk.h @@ -0,0 +1,469 @@ +/* + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * Author: Rahul Sharma + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Provides Constants for Exynos5260 clocks. +*/ + +#ifndef _DT_BINDINGS_CLK_EXYNOS5260_H +#define _DT_BINDINGS_CLK_EXYNOS5260_H + +/* Clock names: */ + +/* List Of Clocks For CMU_TOP */ + +#define TOP_FOUT_DISP_PLL 1 +#define TOP_FOUT_AUD_PLL 2 +#define TOP_MOUT_AUDTOP_PLL_USER 3 +#define TOP_MOUT_AUD_PLL 4 +#define TOP_MOUT_DISP_PLL 5 +#define TOP_MOUT_BUSTOP_PLL_USER 6 +#define TOP_MOUT_MEMTOP_PLL_USER 7 +#define TOP_MOUT_MEDIATOP_PLL_USER 8 +#define TOP_MOUT_DISP_DISP_333 9 +#define TOP_MOUT_ACLK_DISP_333 10 +#define TOP_MOUT_DISP_DISP_222 11 +#define TOP_MOUT_ACLK_DISP_222 12 +#define TOP_MOUT_DISP_MEDIA_PIXEL 13 +#define TOP_MOUT_FIMD1 14 +#define TOP_MOUT_SCLK_PERI_SPI0_CLK 15 +#define TOP_MOUT_SCLK_PERI_SPI1_CLK 16 +#define TOP_MOUT_SCLK_PERI_SPI2_CLK 17 +#define TOP_MOUT_SCLK_PERI_UART0_UCLK 18 +#define TOP_MOUT_SCLK_PERI_UART2_UCLK 19 +#define TOP_MOUT_SCLK_PERI_UART1_UCLK 20 +#define TOP_MOUT_BUS4_BUSTOP_100 21 +#define TOP_MOUT_BUS4_BUSTOP_400 22 +#define TOP_MOUT_BUS3_BUSTOP_100 23 +#define TOP_MOUT_BUS3_BUSTOP_400 24 +#define TOP_MOUT_BUS2_BUSTOP_400 25 +#define TOP_MOUT_BUS2_BUSTOP_100 26 +#define TOP_MOUT_BUS1_BUSTOP_100 27 +#define TOP_MOUT_BUS1_BUSTOP_400 28 +#define TOP_MOUT_SCLK_FSYS_USB 29 +#define TOP_MOUT_SCLK_FSYS_MMC0_SDCLKIN_A 30 +#define TOP_MOUT_SCLK_FSYS_MMC1_SDCLKIN_A 31 +#define TOP_MOUT_SCLK_FSYS_MMC2_SDCLKIN_A 32 +#define TOP_MOUT_SCLK_FSYS_MMC0_SDCLKIN_B 33 +#define TOP_MOUT_SCLK_FSYS_MMC1_SDCLKIN_B 34 +#define TOP_MOUT_SCLK_FSYS_MMC2_SDCLKIN_B 35 +#define TOP_MOUT_ACLK_ISP1_266 36 +#define TOP_MOUT_ISP1_MEDIA_266 37 +#define TOP_MOUT_ACLK_ISP1_400 38 +#define TOP_MOUT_ISP1_MEDIA_400 39 +#define TOP_MOUT_SCLK_ISP1_SPI0 40 +#define TOP_MOUT_SCLK_ISP1_SPI1 41 +#define TOP_MOUT_SCLK_ISP1_UART 42 +#define TOP_MOUT_SCLK_ISP1_SENSOR2 43 +#define TOP_MOUT_SCLK_ISP1_SENSOR1 44 +#define TOP_MOUT_SCLK_ISP1_SENSOR0 45 +#define TOP_MOUT_ACLK_MFC_333 46 +#define TOP_MOUT_MFC_BUSTOP_333 47 +#define TOP_MOUT_ACLK_G2D_333 48 +#define TOP_MOUT_G2D_BUSTOP_333 49 +#define TOP_MOUT_ACLK_GSCL_FIMC 50 +#define TOP_MOUT_GSCL_BUSTOP_FIMC 51 +#define TOP_MOUT_ACLK_GSCL_333 52 +#define TOP_MOUT_GSCL_BUSTOP_333 53 +#define TOP_MOUT_ACLK_GSCL_400 54 +#define TOP_MOUT_M2M_MEDIATOP_400 55 +#define TOP_DOUT_ACLK_MFC_333 56 +#define TOP_DOUT_ACLK_G2D_333 57 +#define TOP_DOUT_SCLK_ISP1_SENSOR2_A 58 +#define TOP_DOUT_SCLK_ISP1_SENSOR1_A 59 +#define TOP_DOUT_SCLK_ISP1_SENSOR0_A 60 +#define TOP_DOUT_ACLK_GSCL_FIMC 61 +#define TOP_DOUT_ACLK_GSCL_400 62 +#define TOP_DOUT_ACLK_GSCL_333 63 +#define TOP_DOUT_SCLK_ISP1_SPI0_B 64 +#define TOP_DOUT_SCLK_ISP1_SPI0_A 65 +#define TOP_DOUT_ACLK_ISP1_400 66 +#define TOP_DOUT_ACLK_ISP1_266 67 +#define TOP_DOUT_SCLK_ISP1_UART 68 +#define TOP_DOUT_SCLK_ISP1_SPI1_B 69 +#define TOP_DOUT_SCLK_ISP1_SPI1_A 70 +#define TOP_DOUT_SCLK_ISP1_SENSOR2_B 71 +#define TOP_DOUT_SCLK_ISP1_SENSOR1_B 72 +#define TOP_DOUT_SCLK_ISP1_SENSOR0_B 73 +#define TOP_DOUTTOP__SCLK_HPM_TARGETCLK 74 +#define TOP_DOUT_SCLK_DISP_PIXEL 75 +#define TOP_DOUT_ACLK_DISP_222 76 +#define TOP_DOUT_ACLK_DISP_333 77 +#define TOP_DOUT_ACLK_BUS4_100 78 +#define TOP_DOUT_ACLK_BUS4_400 79 +#define TOP_DOUT_ACLK_BUS3_100 80 +#define TOP_DOUT_ACLK_BUS3_400 81 +#define TOP_DOUT_ACLK_BUS2_100 82 +#define TOP_DOUT_ACLK_BUS2_400 83 +#define TOP_DOUT_ACLK_BUS1_100 84 +#define TOP_DOUT_ACLK_BUS1_400 85 +#define TOP_DOUT_SCLK_PERI_SPI1_B 86 +#define TOP_DOUT_SCLK_PERI_SPI1_A 87 +#define TOP_DOUT_SCLK_PERI_SPI0_B 88 +#define TOP_DOUT_SCLK_PERI_SPI0_A 89 +#define TOP_DOUT_SCLK_PERI_UART0 90 +#define TOP_DOUT_SCLK_PERI_UART2 91 +#define TOP_DOUT_SCLK_PERI_UART1 92 +#define TOP_DOUT_SCLK_PERI_SPI2_B 93 +#define TOP_DOUT_SCLK_PERI_SPI2_A 94 +#define TOP_DOUT_ACLK_PERI_AUD 95 +#define TOP_DOUT_ACLK_PERI_66 96 +#define TOP_DOUT_SCLK_FSYS_MMC0_SDCLKIN_B 97 +#define TOP_DOUT_SCLK_FSYS_MMC0_SDCLKIN_A 98 +#define TOP_DOUT_SCLK_FSYS_USBDRD30_SUSPEND_CLK 99 +#define TOP_DOUT_ACLK_FSYS_200 100 +#define TOP_DOUT_SCLK_FSYS_MMC2_SDCLKIN_B 101 +#define TOP_DOUT_SCLK_FSYS_MMC2_SDCLKIN_A 102 +#define TOP_DOUT_SCLK_FSYS_MMC1_SDCLKIN_B 103 +#define TOP_DOUT_SCLK_FSYS_MMC1_SDCLKIN_A 104 +#define TOP_SCLK_FIMD1 105 +#define TOP_SCLK_MMC2 106 +#define TOP_SCLK_MMC1 107 +#define TOP_SCLK_MMC0 108 +#define PHYCLK_DPTX_PHY_CH3_TXD_CLK 109 +#define PHYCLK_DPTX_PHY_CH2_TXD_CLK 110 +#define PHYCLK_DPTX_PHY_CH1_TXD_CLK 111 +#define PHYCLK_DPTX_PHY_CH0_TXD_CLK 112 +#define phyclk_hdmi_phy_tmds_clko 113 +#define PHYCLK_HDMI_PHY_PIXEL_CLKO 114 +#define PHYCLK_HDMI_LINK_O_TMDS_CLKHI 115 +#define PHYCLK_MIPI_DPHY_4L_M_TXBYTECLKHS 116 +#define PHYCLK_DPTX_PHY_O_REF_CLK_24M 117 +#define PHYCLK_DPTX_PHY_CLK_DIV2 118 +#define PHYCLK_MIPI_DPHY_4L_M_RXCLKESC0 119 +#define PHYCLK_USBHOST20_PHY_PHYCLOCK 120 +#define PHYCLK_USBHOST20_PHY_FREECLK 121 +#define PHYCLK_USBHOST20_PHY_CLK48MOHCI 122 +#define PHYCLK_USBDRD30_UDRD30_PIPE_PCLK 123 +#define PHYCLK_USBDRD30_UDRD30_PHYCLOCK 124 +#define TOP_NR_CLK 125 + + +/* List Of Clocks For CMU_EGL */ + +#define EGL_FOUT_EGL_PLL 1 +#define EGL_FOUT_EGL_DPLL 2 +#define EGL_MOUT_EGL_B 3 +#define EGL_MOUT_EGL_PLL 4 +#define EGL_DOUT_EGL_PLL 5 +#define EGL_DOUT_EGL_PCLK_DBG 6 +#define EGL_DOUT_EGL_ATCLK 7 +#define EGL_DOUT_PCLK_EGL 8 +#define EGL_DOUT_ACLK_EGL 9 +#define EGL_DOUT_EGL2 10 +#define EGL_DOUT_EGL1 11 +#define EGL_NR_CLK 12 + + +/* List Of Clocks For CMU_KFC */ + +#define KFC_FOUT_KFC_PLL 1 +#define KFC_MOUT_KFC_PLL 2 +#define KFC_MOUT_KFC 3 +#define KFC_DOUT_KFC_PLL 4 +#define KFC_DOUT_PCLK_KFC 5 +#define KFC_DOUT_ACLK_KFC 6 +#define KFC_DOUT_KFC_PCLK_DBG 7 +#define KFC_DOUT_KFC_ATCLK 8 +#define KFC_DOUT_KFC2 9 +#define KFC_DOUT_KFC1 10 +#define KFC_NR_CLK 11 + + +/* List Of Clocks For CMU_MIF */ + +#define MIF_FOUT_MEM_PLL 1 +#define MIF_FOUT_MEDIA_PLL 2 +#define MIF_FOUT_BUS_PLL 3 +#define MIF_MOUT_CLK2X_PHY 4 +#define MIF_MOUT_MIF_DREX2X 5 +#define MIF_MOUT_CLKM_PHY 6 +#define MIF_MOUT_MIF_DREX 7 +#define MIF_MOUT_MEDIA_PLL 8 +#define MIF_MOUT_BUS_PLL 9 +#define MIF_MOUT_MEM_PLL 10 +#define MIF_DOUT_ACLK_BUS_100 11 +#define MIF_DOUT_ACLK_BUS_200 12 +#define MIF_DOUT_ACLK_MIF_466 13 +#define MIF_DOUT_CLK2X_PHY 14 +#define MIF_DOUT_CLKM_PHY 15 +#define MIF_DOUT_BUS_PLL 16 +#define MIF_DOUT_MEM_PLL 17 +#define MIF_DOUT_MEDIA_PLL 18 +#define MIF_CLK_LPDDR3PHY_WRAP1 19 +#define MIF_CLK_LPDDR3PHY_WRAP0 20 +#define MIF_CLK_MONOCNT 21 +#define MIF_CLK_MIF_RTC 22 +#define MIF_CLK_DREX1 23 +#define MIF_CLK_DREX0 24 +#define MIF_CLK_INTMEM 25 +#define MIF_SCLK_LPDDR3PHY_WRAP_U1 26 +#define MIF_SCLK_LPDDR3PHY_WRAP_U0 27 +#define MIF_NR_CLK 28 + + +/* List Of Clocks For CMU_G3D */ + +#define G3D_FOUT_G3D_PLL 1 +#define G3D_MOUT_G3D_PLL 2 +#define G3D_DOUT_PCLK_G3D 3 +#define G3D_DOUT_ACLK_G3D 4 +#define G3D_CLK_G3D_HPM 5 +#define G3D_CLK_G3D 6 +#define G3D_NR_CLK 7 + + +/* List Of Clocks For CMU_AUD */ + +#define AUD_MOUT_SCLK_AUD_PCM 1 +#define AUD_MOUT_SCLK_AUD_I2S 2 +#define AUD_MOUT_AUD_PLL_USER 3 +#define AUD_DOUT_ACLK_AUD_131 4 +#define AUD_DOUT_SCLK_AUD_UART 5 +#define AUD_DOUT_SCLK_AUD_PCM 6 +#define AUD_DOUT_SCLK_AUD_I2S 7 +#define AUD_CLK_AUD_UART 8 +#define AUD_CLK_PCM 9 +#define AUD_CLK_I2S 10 +#define AUD_CLK_DMAC 11 +#define AUD_CLK_SRAMC 12 +#define AUD_SCLK_AUD_UART 13 +#define AUD_SCLK_PCM 14 +#define AUD_SCLK_I2S 15 +#define AUD_NR_CLK 16 + + +/* List Of Clocks For CMU_MFC */ + +#define MFC_MOUT_ACLK_MFC_333_USER 1 +#define MFC_DOUT_PCLK_MFC_83 2 +#define MFC_CLK_MFC 3 +#define MFC_CLK_SMMU2_MFCM1 4 +#define MFC_CLK_SMMU2_MFCM0 5 +#define MFC_NR_CLK 6 + + +/* List Of Clocks For CMU_GSCL */ + +#define GSCL_MOUT_ACLK_CSIS 1 +#define GSCL_MOUT_ACLK_GSCL_FIMC_USER 2 +#define GSCL_MOUT_ACLK_M2M_400_USER 3 +#define GSCL_MOUT_ACLK_GSCL_333_USER 4 +#define GSCL_DOUT_ACLK_CSIS_200 5 +#define GSCL_DOUT_PCLK_M2M_100 6 +#define GSCL_CLK_PIXEL_GSCL1 7 +#define GSCL_CLK_PIXEL_GSCL0 8 +#define GSCL_CLK_MSCL1 9 +#define GSCL_CLK_MSCL0 10 +#define GSCL_CLK_GSCL1 11 +#define GSCL_CLK_GSCL0 12 +#define GSCL_CLK_FIMC_LITE_D 13 +#define GSCL_CLK_FIMC_LITE_B 14 +#define GSCL_CLK_FIMC_LITE_A 15 +#define GSCL_CLK_CSIS1 16 +#define GSCL_CLK_CSIS0 17 +#define GSCL_CLK_SMMU3_LITE_D 18 +#define GSCL_CLK_SMMU3_LITE_B 19 +#define GSCL_CLK_SMMU3_LITE_A 20 +#define GSCL_CLK_SMMU3_GSCL0 21 +#define GSCL_CLK_SMMU3_GSCL1 22 +#define GSCL_CLK_SMMU3_MSCL0 23 +#define GSCL_CLK_SMMU3_MSCL1 24 +#define GSCL_SCLK_CSIS1_WRAP 25 +#define GSCL_SCLK_CSIS0_WRAP 26 +#define GSCL_NR_CLK 27 + + +/* List Of Clocks For CMU_FSYS */ + +#define FSYS_MOUT_PHYCLK_USBHOST20_PHYCLK_USER 1 +#define FSYS_MOUT_PHYCLK_USBHOST20_FREECLK_USER 2 +#define FSYS_MOUT_PHYCLK_USBHOST20_CLK48MOHCI_USER 3 +#define FSYS_MOUT_PHYCLK_USBDRD30_PIPE_PCLK_USER 4 +#define FSYS_MOUT_PHYCLK_USBDRD30_PHYCLOCK_USER 5 +#define FSYS_CLK_TSI 6 +#define FSYS_CLK_USBLINK 7 +#define FSYS_CLK_USBHOST20 8 +#define FSYS_CLK_USBDRD30 9 +#define FSYS_CLK_SROMC 10 +#define FSYS_CLK_PDMA 11 +#define FSYS_CLK_MMC2 12 +#define FSYS_CLK_MMC1 13 +#define FSYS_CLK_MMC0 14 +#define FSYS_CLK_RTIC 15 +#define FSYS_CLK_SMMU_RTIC 16 +#define FSYS_PHYCLK_USBDRD30 17 +#define FSYS_PHYCLK_USBHOST20 18 +#define FSYS_NR_CLK 19 + + +/* List Of Clocks For CMU_PERI */ + +#define PERI_MOUT_SCLK_SPDIF 1 +#define PERI_MOUT_SCLK_I2SCOD 2 +#define PERI_MOUT_SCLK_PCM 3 +#define PERI_DOUT_I2S 4 +#define PERI_DOUT_PCM 5 +#define PERI_CLK_WDT_KFC 6 +#define PERI_CLK_WDT_EGL 7 +#define PERI_CLK_HSIC3 8 +#define PERI_CLK_HSIC2 9 +#define PERI_CLK_HSIC1 10 +#define PERI_CLK_HSIC0 11 +#define PERI_CLK_PCM 12 +#define PERI_CLK_MCT 13 +#define PERI_CLK_I2S 14 +#define PERI_CLK_I2CHDMI 15 +#define PERI_CLK_I2C7 16 +#define PERI_CLK_I2C6 17 +#define PERI_CLK_I2C5 18 +#define PERI_CLK_I2C4 19 +#define PERI_CLK_I2C9 20 +#define PERI_CLK_I2C8 21 +#define PERI_CLK_I2C11 22 +#define PERI_CLK_I2C10 23 +#define PERI_CLK_HDMICEC 24 +#define PERI_CLK_EFUSE_WRITER 25 +#define PERI_CLK_ABB 26 +#define PERI_CLK_UART2 27 +#define PERI_CLK_UART1 28 +#define PERI_CLK_UART0 29 +#define PERI_CLK_ADC 30 +#define PERI_CLK_TMU4 31 +#define PERI_CLK_TMU3 32 +#define PERI_CLK_TMU2 33 +#define PERI_CLK_TMU1 34 +#define PERI_CLK_TMU0 35 +#define PERI_CLK_SPI2 36 +#define PERI_CLK_SPI1 37 +#define PERI_CLK_SPI0 38 +#define PERI_CLK_SPDIF 39 +#define PERI_CLK_PWM 40 +#define PERI_CLK_UART4 41 +#define PERI_CLK_CHIPID 42 +#define PERI_CLK_PROVKEY0 43 +#define PERI_CLK_PROVKEY1 44 +#define PERI_CLK_SECKEY 45 +#define PERI_CLK_TOP_RTC 46 +#define PERI_CLK_TZPC10 47 +#define PERI_CLK_TZPC9 48 +#define PERI_CLK_TZPC8 49 +#define PERI_CLK_TZPC7 50 +#define PERI_CLK_TZPC6 51 +#define PERI_CLK_TZPC5 52 +#define PERI_CLK_TZPC4 53 +#define PERI_CLK_TZPC3 54 +#define PERI_CLK_TZPC2 55 +#define PERI_CLK_TZPC1 56 +#define PERI_CLK_TZPC0 57 +#define PERI_SCLK_UART2 58 +#define PERI_SCLK_UART1 59 +#define PERI_SCLK_UART0 60 +#define PERI_SCLK_SPI2 61 +#define PERI_SCLK_SPI1 62 +#define PERI_SCLK_SPI0 63 +#define PERI_SCLK_SPDIF 64 +#define PERI_SCLK_I2S 65 +#define PERI_SCLK_PCM1 66 +#define PERI_NR_CLK 67 + + +/* List Of Clocks For CMU_DISP */ + +#define DISP_MOUT_SCLK_HDMI_SPDIF 1 +#define DISP_MOUT_SCLK_HDMI_PIXEL 2 +#define DISP_MOUT_PHYCLK_MIPI_DPHY_4LMRXCLK_ESC0_USER 3 +#define DISP_MOUT_PHYCLK_HDMI_PHY_TMDS_CLKO_USER 4 +#define DISP_MOUT_PHYCLK_HDMI_PHY_REF_CLKO_USER 5 +#define DISP_MOUT_HDMI_PHY_PIXEL 6 +#define DISP_MOUT_PHYCLK_HDMI_LINK_O_TMDS_CLKHI_USER 7 +#define DISP_MOUT_PHYCLK_MIPI_DPHY_4L_M_TXBYTE_CLKHS 8 +#define DISP_MOUT_PHYCLK_DPTX_PHY_O_REF_CLK_24M_USER 9 +#define DISP_MOUT_PHYCLK_DPTX_PHY_CLK_DIV2_USER 10 +#define DISP_MOUT_PHYCLK_DPTX_PHY_CH3_TXD_CLK_USER 11 +#define DISP_MOUT_PHYCLK_DPTX_PHY_CH2_TXD_CLK_USER 12 +#define DISP_MOUT_PHYCLK_DPTX_PHY_CH1_TXD_CLK_USER 13 +#define DISP_MOUT_PHYCLK_DPTX_PHY_CH0_TXD_CLK_USER 14 +#define DISP_MOUT_ACLK_DISP_222_USER 15 +#define DISP_MOUT_SCLK_DISP_PIXEL_USER 16 +#define DISP_MOUT_ACLK_DISP_333_USER 17 +#define DISP_DOUT_SCLK_HDMI_PHY_PIXEL_CLKI 18 +#define DISP_DOUT_SCLK_FIMD1_EXTCLKPLL 19 +#define DISP_DOUT_PCLK_DISP_111 20 +#define DISP_CLK_SMMU_TV 21 +#define DISP_CLK_SMMU_FIMD1M1 22 +#define DISP_CLK_SMMU_FIMD1M0 23 +#define DISP_CLK_PIXEL_MIXER 24 +#define DISP_CLK_PIXEL_DISP 25 +#define DISP_CLK_MIXER 26 +#define DISP_CLK_MIPIPHY 27 +#define DISP_CLK_HDMIPHY 28 +#define DISP_CLK_HDMI 29 +#define DISP_CLK_FIMD1 30 +#define DISP_CLK_DSIM1 31 +#define DISP_CLK_DPPHY 32 +#define DISP_CLK_DP 33 +#define DISP_SCLK_PIXEL 34 +#define DISP_MOUT_HDMI_PHY_PIXEL_USER 35 +#define DISP_NR_CLK 36 + + +/* List Of Clocks For CMU_G2D */ + +#define G2D_MOUT_ACLK_G2D_333_USER 1 +#define G2D_DOUT_PCLK_G2D_83 2 +#define G2D_CLK_SMMU3_JPEG 3 +#define G2D_CLK_MDMA 4 +#define G2D_CLK_JPEG 5 +#define G2D_CLK_G2D 6 +#define G2D_CLK_SSS 7 +#define G2D_CLK_SLIM_SSS 8 +#define G2D_CLK_SMMU_SLIM_SSS 9 +#define G2D_CLK_SMMU_SSS 10 +#define G2D_CLK_SMMU_MDMA 11 +#define G2D_CLK_SMMU3_G2D 12 +#define G2D_NR_CLK 13 + + +/* List Of Clocks For CMU_ISP */ + +#define ISP_MOUT_ISP_400_USER 1 +#define ISP_MOUT_ISP_266_USER 2 +#define ISP_DOUT_SCLK_MPWM 3 +#define ISP_DOUT_CA5_PCLKDBG 4 +#define ISP_DOUT_CA5_ATCLKIN 5 +#define ISP_DOUT_PCLK_ISP_133 6 +#define ISP_DOUT_PCLK_ISP_66 7 +#define ISP_CLK_GIC 8 +#define ISP_CLK_WDT 9 +#define ISP_CLK_UART 10 +#define ISP_CLK_SPI1 11 +#define ISP_CLK_SPI0 12 +#define ISP_CLK_SMMU_SCALERP 13 +#define ISP_CLK_SMMU_SCALERC 14 +#define ISP_CLK_SMMU_ISPCX 15 +#define ISP_CLK_SMMU_ISP 16 +#define ISP_CLK_SMMU_FD 17 +#define ISP_CLK_SMMU_DRC 18 +#define ISP_CLK_PWM 19 +#define ISP_CLK_MTCADC 20 +#define ISP_CLK_MPWM 21 +#define ISP_CLK_MCUCTL 22 +#define ISP_CLK_I2C1 23 +#define ISP_CLK_I2C0 24 +#define ISP_CLK_FIMC_SCALERP 25 +#define ISP_CLK_FIMC_SCALERC 26 +#define ISP_CLK_FIMC 27 +#define ISP_CLK_FIMC_FD 28 +#define ISP_CLK_FIMC_DRC 29 +#define ISP_CLK_CA5 30 +#define ISP_SCLK_SPI0_EXT 31 +#define ISP_SCLK_SPI1_EXT 32 +#define ISP_SCLK_UART_EXT 33 +#define ISP_NR_CLK 34 + +#endif diff --git a/include/dt-bindings/clock/exynos5410.h b/include/dt-bindings/clock/exynos5410.h new file mode 100644 index 000000000..6cb4e90f8 --- /dev/null +++ b/include/dt-bindings/clock/exynos5410.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * Copyright (c) 2016 Krzysztof Kozlowski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Device Tree binding constants for Exynos5421 clock controller. +*/ + +#ifndef _DT_BINDINGS_CLOCK_EXYNOS_5410_H +#define _DT_BINDINGS_CLOCK_EXYNOS_5410_H + +/* core clocks */ +#define CLK_FIN_PLL 1 +#define CLK_FOUT_APLL 2 +#define CLK_FOUT_CPLL 3 +#define CLK_FOUT_MPLL 4 +#define CLK_FOUT_BPLL 5 +#define CLK_FOUT_KPLL 6 +#define CLK_FOUT_EPLL 7 + +/* gate for special clocks (sclk) */ +#define CLK_SCLK_UART0 128 +#define CLK_SCLK_UART1 129 +#define CLK_SCLK_UART2 130 +#define CLK_SCLK_UART3 131 +#define CLK_SCLK_MMC0 132 +#define CLK_SCLK_MMC1 133 +#define CLK_SCLK_MMC2 134 +#define CLK_SCLK_USBD300 150 +#define CLK_SCLK_USBD301 151 +#define CLK_SCLK_USBPHY300 152 +#define CLK_SCLK_USBPHY301 153 +#define CLK_SCLK_PWM 155 + +/* gate clocks */ +#define CLK_UART0 257 +#define CLK_UART1 258 +#define CLK_UART2 259 +#define CLK_I2C0 261 +#define CLK_I2C1 262 +#define CLK_I2C2 263 +#define CLK_I2C3 264 +#define CLK_USI0 265 +#define CLK_USI1 266 +#define CLK_USI2 267 +#define CLK_USI3 268 +#define CLK_UART3 260 +#define CLK_PWM 279 +#define CLK_MCT 315 +#define CLK_WDT 316 +#define CLK_RTC 317 +#define CLK_TMU 318 +#define CLK_MMC0 351 +#define CLK_MMC1 352 +#define CLK_MMC2 353 +#define CLK_PDMA0 362 +#define CLK_PDMA1 363 +#define CLK_USBH20 365 +#define CLK_USBD300 366 +#define CLK_USBD301 367 +#define CLK_SSS 471 + +#define CLK_NR_CLKS 512 + +#endif /* _DT_BINDINGS_CLOCK_EXYNOS_5410_H */ diff --git a/include/dt-bindings/clock/exynos5420.h b/include/dt-bindings/clock/exynos5420.h new file mode 100644 index 000000000..2740ae042 --- /dev/null +++ b/include/dt-bindings/clock/exynos5420.h @@ -0,0 +1,258 @@ +/* + * Copyright (c) 2013 Samsung Electronics Co., Ltd. + * Author: Andrzej Hajda + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Device Tree binding constants for Exynos5420 clock controller. +*/ + +#ifndef _DT_BINDINGS_CLOCK_EXYNOS_5420_H +#define _DT_BINDINGS_CLOCK_EXYNOS_5420_H + +/* core clocks */ +#define CLK_FIN_PLL 1 +#define CLK_FOUT_APLL 2 +#define CLK_FOUT_CPLL 3 +#define CLK_FOUT_DPLL 4 +#define CLK_FOUT_EPLL 5 +#define CLK_FOUT_RPLL 6 +#define CLK_FOUT_IPLL 7 +#define CLK_FOUT_SPLL 8 +#define CLK_FOUT_VPLL 9 +#define CLK_FOUT_MPLL 10 +#define CLK_FOUT_BPLL 11 +#define CLK_FOUT_KPLL 12 +#define CLK_ARM_CLK 13 +#define CLK_KFC_CLK 14 + +/* gate for special clocks (sclk) */ +#define CLK_SCLK_UART0 128 +#define CLK_SCLK_UART1 129 +#define CLK_SCLK_UART2 130 +#define CLK_SCLK_UART3 131 +#define CLK_SCLK_MMC0 132 +#define CLK_SCLK_MMC1 133 +#define CLK_SCLK_MMC2 134 +#define CLK_SCLK_SPI0 135 +#define CLK_SCLK_SPI1 136 +#define CLK_SCLK_SPI2 137 +#define CLK_SCLK_I2S1 138 +#define CLK_SCLK_I2S2 139 +#define CLK_SCLK_PCM1 140 +#define CLK_SCLK_PCM2 141 +#define CLK_SCLK_SPDIF 142 +#define CLK_SCLK_HDMI 143 +#define CLK_SCLK_PIXEL 144 +#define CLK_SCLK_DP1 145 +#define CLK_SCLK_MIPI1 146 +#define CLK_SCLK_FIMD1 147 +#define CLK_SCLK_MAUDIO0 148 +#define CLK_SCLK_MAUPCM0 149 +#define CLK_SCLK_USBD300 150 +#define CLK_SCLK_USBD301 151 +#define CLK_SCLK_USBPHY300 152 +#define CLK_SCLK_USBPHY301 153 +#define CLK_SCLK_UNIPRO 154 +#define CLK_SCLK_PWM 155 +#define CLK_SCLK_GSCL_WA 156 +#define CLK_SCLK_GSCL_WB 157 +#define CLK_SCLK_HDMIPHY 158 +#define CLK_MAU_EPLL 159 +#define CLK_SCLK_HSIC_12M 160 +#define CLK_SCLK_MPHY_IXTAL24 161 + +/* gate clocks */ +#define CLK_UART0 257 +#define CLK_UART1 258 +#define CLK_UART2 259 +#define CLK_UART3 260 +#define CLK_I2C0 261 +#define CLK_I2C1 262 +#define CLK_I2C2 263 +#define CLK_I2C3 264 +#define CLK_USI0 265 +#define CLK_USI1 266 +#define CLK_USI2 267 +#define CLK_USI3 268 +#define CLK_I2C_HDMI 269 +#define CLK_TSADC 270 +#define CLK_SPI0 271 +#define CLK_SPI1 272 +#define CLK_SPI2 273 +#define CLK_KEYIF 274 +#define CLK_I2S1 275 +#define CLK_I2S2 276 +#define CLK_PCM1 277 +#define CLK_PCM2 278 +#define CLK_PWM 279 +#define CLK_SPDIF 280 +#define CLK_USI4 281 +#define CLK_USI5 282 +#define CLK_USI6 283 +#define CLK_ACLK66_PSGEN 300 +#define CLK_CHIPID 301 +#define CLK_SYSREG 302 +#define CLK_TZPC0 303 +#define CLK_TZPC1 304 +#define CLK_TZPC2 305 +#define CLK_TZPC3 306 +#define CLK_TZPC4 307 +#define CLK_TZPC5 308 +#define CLK_TZPC6 309 +#define CLK_TZPC7 310 +#define CLK_TZPC8 311 +#define CLK_TZPC9 312 +#define CLK_HDMI_CEC 313 +#define CLK_SECKEY 314 +#define CLK_MCT 315 +#define CLK_WDT 316 +#define CLK_RTC 317 +#define CLK_TMU 318 +#define CLK_TMU_GPU 319 +#define CLK_PCLK66_GPIO 330 +#define CLK_ACLK200_FSYS2 350 +#define CLK_MMC0 351 +#define CLK_MMC1 352 +#define CLK_MMC2 353 +#define CLK_SROMC 354 +#define CLK_UFS 355 +#define CLK_ACLK200_FSYS 360 +#define CLK_TSI 361 +#define CLK_PDMA0 362 +#define CLK_PDMA1 363 +#define CLK_RTIC 364 +#define CLK_USBH20 365 +#define CLK_USBD300 366 +#define CLK_USBD301 367 +#define CLK_ACLK400_MSCL 380 +#define CLK_MSCL0 381 +#define CLK_MSCL1 382 +#define CLK_MSCL2 383 +#define CLK_SMMU_MSCL0 384 +#define CLK_SMMU_MSCL1 385 +#define CLK_SMMU_MSCL2 386 +#define CLK_ACLK333 400 +#define CLK_MFC 401 +#define CLK_SMMU_MFCL 402 +#define CLK_SMMU_MFCR 403 +#define CLK_ACLK200_DISP1 410 +#define CLK_DSIM1 411 +#define CLK_DP1 412 +#define CLK_HDMI 413 +#define CLK_ACLK300_DISP1 420 +#define CLK_FIMD1 421 +#define CLK_SMMU_FIMD1M0 422 +#define CLK_SMMU_FIMD1M1 423 +#define CLK_ACLK166 430 +#define CLK_MIXER 431 +#define CLK_ACLK266 440 +#define CLK_ROTATOR 441 +#define CLK_MDMA1 442 +#define CLK_SMMU_ROTATOR 443 +#define CLK_SMMU_MDMA1 444 +#define CLK_ACLK300_JPEG 450 +#define CLK_JPEG 451 +#define CLK_JPEG2 452 +#define CLK_SMMU_JPEG 453 +#define CLK_SMMU_JPEG2 454 +#define CLK_ACLK300_GSCL 460 +#define CLK_SMMU_GSCL0 461 +#define CLK_SMMU_GSCL1 462 +#define CLK_GSCL_WA 463 +#define CLK_GSCL_WB 464 +#define CLK_GSCL0 465 +#define CLK_GSCL1 466 +#define CLK_FIMC_3AA 467 +#define CLK_ACLK266_G2D 470 +#define CLK_SSS 471 +#define CLK_SLIM_SSS 472 +#define CLK_MDMA0 473 +#define CLK_ACLK333_G2D 480 +#define CLK_G2D 481 +#define CLK_ACLK333_432_GSCL 490 +#define CLK_SMMU_3AA 491 +#define CLK_SMMU_FIMCL0 492 +#define CLK_SMMU_FIMCL1 493 +#define CLK_SMMU_FIMCL3 494 +#define CLK_FIMC_LITE3 495 +#define CLK_FIMC_LITE0 496 +#define CLK_FIMC_LITE1 497 +#define CLK_ACLK_G3D 500 +#define CLK_G3D 501 +#define CLK_SMMU_MIXER 502 +#define CLK_SMMU_G2D 503 +#define CLK_SMMU_MDMA0 504 +#define CLK_MC 505 +#define CLK_TOP_RTC 506 +#define CLK_SCLK_UART_ISP 510 +#define CLK_SCLK_SPI0_ISP 511 +#define CLK_SCLK_SPI1_ISP 512 +#define CLK_SCLK_PWM_ISP 513 +#define CLK_SCLK_ISP_SENSOR0 514 +#define CLK_SCLK_ISP_SENSOR1 515 +#define CLK_SCLK_ISP_SENSOR2 516 +#define CLK_ACLK432_SCALER 517 +#define CLK_ACLK432_CAM 518 +#define CLK_ACLK_FL1550_CAM 519 +#define CLK_ACLK550_CAM 520 + +/* mux clocks */ +#define CLK_MOUT_HDMI 640 +#define CLK_MOUT_G3D 641 +#define CLK_MOUT_VPLL 642 +#define CLK_MOUT_MAUDIO0 643 +#define CLK_MOUT_USER_ACLK333 644 +#define CLK_MOUT_SW_ACLK333 645 +#define CLK_MOUT_USER_ACLK200_DISP1 646 +#define CLK_MOUT_SW_ACLK200 647 +#define CLK_MOUT_USER_ACLK300_DISP1 648 +#define CLK_MOUT_SW_ACLK300 649 +#define CLK_MOUT_USER_ACLK400_DISP1 650 +#define CLK_MOUT_SW_ACLK400 651 +#define CLK_MOUT_USER_ACLK300_GSCL 652 +#define CLK_MOUT_SW_ACLK300_GSCL 653 +#define CLK_MOUT_MCLK_CDREX 654 +#define CLK_MOUT_BPLL 655 +#define CLK_MOUT_MX_MSPLL_CCORE 656 +#define CLK_MOUT_EPLL 657 +#define CLK_MOUT_MAU_EPLL 658 +#define CLK_MOUT_USER_MAU_EPLL 659 + +/* divider clocks */ +#define CLK_DOUT_PIXEL 768 +#define CLK_DOUT_ACLK400_WCORE 769 +#define CLK_DOUT_ACLK400_ISP 770 +#define CLK_DOUT_ACLK400_MSCL 771 +#define CLK_DOUT_ACLK200 772 +#define CLK_DOUT_ACLK200_FSYS2 773 +#define CLK_DOUT_ACLK100_NOC 774 +#define CLK_DOUT_PCLK200_FSYS 775 +#define CLK_DOUT_ACLK200_FSYS 776 +#define CLK_DOUT_ACLK333_432_GSCL 777 +#define CLK_DOUT_ACLK333_432_ISP 778 +#define CLK_DOUT_ACLK66 779 +#define CLK_DOUT_ACLK333_432_ISP0 780 +#define CLK_DOUT_ACLK266 781 +#define CLK_DOUT_ACLK166 782 +#define CLK_DOUT_ACLK333 783 +#define CLK_DOUT_ACLK333_G2D 784 +#define CLK_DOUT_ACLK266_G2D 785 +#define CLK_DOUT_ACLK_G3D 786 +#define CLK_DOUT_ACLK300_JPEG 787 +#define CLK_DOUT_ACLK300_DISP1 788 +#define CLK_DOUT_ACLK300_GSCL 789 +#define CLK_DOUT_ACLK400_DISP1 790 +#define CLK_DOUT_PCLK_CDREX 791 +#define CLK_DOUT_SCLK_CDREX 792 +#define CLK_DOUT_ACLK_CDREX1 793 +#define CLK_DOUT_CCLK_DREX0 794 +#define CLK_DOUT_CLK2X_PHY0 795 +#define CLK_DOUT_PCLK_CORE_MEM 796 + +/* must be greater than maximal clock id */ +#define CLK_NR_CLKS 797 + +#endif /* _DT_BINDINGS_CLOCK_EXYNOS_5420_H */ diff --git a/include/dt-bindings/clock/exynos5433.h b/include/dt-bindings/clock/exynos5433.h new file mode 100644 index 000000000..be39d23e6 --- /dev/null +++ b/include/dt-bindings/clock/exynos5433.h @@ -0,0 +1,1412 @@ +/* + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * Author: Chanwoo Choi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _DT_BINDINGS_CLOCK_EXYNOS5433_H +#define _DT_BINDINGS_CLOCK_EXYNOS5433_H + +/* CMU_TOP */ +#define CLK_FOUT_ISP_PLL 1 +#define CLK_FOUT_AUD_PLL 2 + +#define CLK_MOUT_AUD_PLL 10 +#define CLK_MOUT_ISP_PLL 11 +#define CLK_MOUT_AUD_PLL_USER_T 12 +#define CLK_MOUT_MPHY_PLL_USER 13 +#define CLK_MOUT_MFC_PLL_USER 14 +#define CLK_MOUT_BUS_PLL_USER 15 +#define CLK_MOUT_ACLK_HEVC_400 16 +#define CLK_MOUT_ACLK_CAM1_333 17 +#define CLK_MOUT_ACLK_CAM1_552_B 18 +#define CLK_MOUT_ACLK_CAM1_552_A 19 +#define CLK_MOUT_ACLK_ISP_DIS_400 20 +#define CLK_MOUT_ACLK_ISP_400 21 +#define CLK_MOUT_ACLK_BUS0_400 22 +#define CLK_MOUT_ACLK_MSCL_400_B 23 +#define CLK_MOUT_ACLK_MSCL_400_A 24 +#define CLK_MOUT_ACLK_GSCL_333 25 +#define CLK_MOUT_ACLK_G2D_400_B 26 +#define CLK_MOUT_ACLK_G2D_400_A 27 +#define CLK_MOUT_SCLK_JPEG_C 28 +#define CLK_MOUT_SCLK_JPEG_B 29 +#define CLK_MOUT_SCLK_JPEG_A 30 +#define CLK_MOUT_SCLK_MMC2_B 31 +#define CLK_MOUT_SCLK_MMC2_A 32 +#define CLK_MOUT_SCLK_MMC1_B 33 +#define CLK_MOUT_SCLK_MMC1_A 34 +#define CLK_MOUT_SCLK_MMC0_D 35 +#define CLK_MOUT_SCLK_MMC0_C 36 +#define CLK_MOUT_SCLK_MMC0_B 37 +#define CLK_MOUT_SCLK_MMC0_A 38 +#define CLK_MOUT_SCLK_SPI4 39 +#define CLK_MOUT_SCLK_SPI3 40 +#define CLK_MOUT_SCLK_UART2 41 +#define CLK_MOUT_SCLK_UART1 42 +#define CLK_MOUT_SCLK_UART0 43 +#define CLK_MOUT_SCLK_SPI2 44 +#define CLK_MOUT_SCLK_SPI1 45 +#define CLK_MOUT_SCLK_SPI0 46 +#define CLK_MOUT_ACLK_MFC_400_C 47 +#define CLK_MOUT_ACLK_MFC_400_B 48 +#define CLK_MOUT_ACLK_MFC_400_A 49 +#define CLK_MOUT_SCLK_ISP_SENSOR2 50 +#define CLK_MOUT_SCLK_ISP_SENSOR1 51 +#define CLK_MOUT_SCLK_ISP_SENSOR0 52 +#define CLK_MOUT_SCLK_ISP_UART 53 +#define CLK_MOUT_SCLK_ISP_SPI1 54 +#define CLK_MOUT_SCLK_ISP_SPI0 55 +#define CLK_MOUT_SCLK_PCIE_100 56 +#define CLK_MOUT_SCLK_UFSUNIPRO 57 +#define CLK_MOUT_SCLK_USBHOST30 58 +#define CLK_MOUT_SCLK_USBDRD30 59 +#define CLK_MOUT_SCLK_SLIMBUS 60 +#define CLK_MOUT_SCLK_SPDIF 61 +#define CLK_MOUT_SCLK_AUDIO1 62 +#define CLK_MOUT_SCLK_AUDIO0 63 +#define CLK_MOUT_SCLK_HDMI_SPDIF 64 + +#define CLK_DIV_ACLK_FSYS_200 100 +#define CLK_DIV_ACLK_IMEM_SSSX_266 101 +#define CLK_DIV_ACLK_IMEM_200 102 +#define CLK_DIV_ACLK_IMEM_266 103 +#define CLK_DIV_ACLK_PERIC_66_B 104 +#define CLK_DIV_ACLK_PERIC_66_A 105 +#define CLK_DIV_ACLK_PERIS_66_B 106 +#define CLK_DIV_ACLK_PERIS_66_A 107 +#define CLK_DIV_SCLK_MMC1_B 108 +#define CLK_DIV_SCLK_MMC1_A 109 +#define CLK_DIV_SCLK_MMC0_B 110 +#define CLK_DIV_SCLK_MMC0_A 111 +#define CLK_DIV_SCLK_MMC2_B 112 +#define CLK_DIV_SCLK_MMC2_A 113 +#define CLK_DIV_SCLK_SPI1_B 114 +#define CLK_DIV_SCLK_SPI1_A 115 +#define CLK_DIV_SCLK_SPI0_B 116 +#define CLK_DIV_SCLK_SPI0_A 117 +#define CLK_DIV_SCLK_SPI2_B 118 +#define CLK_DIV_SCLK_SPI2_A 119 +#define CLK_DIV_SCLK_UART2 120 +#define CLK_DIV_SCLK_UART1 121 +#define CLK_DIV_SCLK_UART0 122 +#define CLK_DIV_SCLK_SPI4_B 123 +#define CLK_DIV_SCLK_SPI4_A 124 +#define CLK_DIV_SCLK_SPI3_B 125 +#define CLK_DIV_SCLK_SPI3_A 126 +#define CLK_DIV_SCLK_I2S1 127 +#define CLK_DIV_SCLK_PCM1 128 +#define CLK_DIV_SCLK_AUDIO1 129 +#define CLK_DIV_SCLK_AUDIO0 130 +#define CLK_DIV_ACLK_GSCL_111 131 +#define CLK_DIV_ACLK_GSCL_333 132 +#define CLK_DIV_ACLK_HEVC_400 133 +#define CLK_DIV_ACLK_MFC_400 134 +#define CLK_DIV_ACLK_G2D_266 135 +#define CLK_DIV_ACLK_G2D_400 136 +#define CLK_DIV_ACLK_G3D_400 137 +#define CLK_DIV_ACLK_BUS0_400 138 +#define CLK_DIV_ACLK_BUS1_400 139 +#define CLK_DIV_SCLK_PCIE_100 140 +#define CLK_DIV_SCLK_USBHOST30 141 +#define CLK_DIV_SCLK_UFSUNIPRO 142 +#define CLK_DIV_SCLK_USBDRD30 143 +#define CLK_DIV_SCLK_JPEG 144 +#define CLK_DIV_ACLK_MSCL_400 145 +#define CLK_DIV_ACLK_ISP_DIS_400 146 +#define CLK_DIV_ACLK_ISP_400 147 +#define CLK_DIV_ACLK_CAM0_333 148 +#define CLK_DIV_ACLK_CAM0_400 149 +#define CLK_DIV_ACLK_CAM0_552 150 +#define CLK_DIV_ACLK_CAM1_333 151 +#define CLK_DIV_ACLK_CAM1_400 152 +#define CLK_DIV_ACLK_CAM1_552 153 +#define CLK_DIV_SCLK_ISP_UART 154 +#define CLK_DIV_SCLK_ISP_SPI1_B 155 +#define CLK_DIV_SCLK_ISP_SPI1_A 156 +#define CLK_DIV_SCLK_ISP_SPI0_B 157 +#define CLK_DIV_SCLK_ISP_SPI0_A 158 +#define CLK_DIV_SCLK_ISP_SENSOR2_B 159 +#define CLK_DIV_SCLK_ISP_SENSOR2_A 160 +#define CLK_DIV_SCLK_ISP_SENSOR1_B 161 +#define CLK_DIV_SCLK_ISP_SENSOR1_A 162 +#define CLK_DIV_SCLK_ISP_SENSOR0_B 163 +#define CLK_DIV_SCLK_ISP_SENSOR0_A 164 + +#define CLK_ACLK_PERIC_66 200 +#define CLK_ACLK_PERIS_66 201 +#define CLK_ACLK_FSYS_200 202 +#define CLK_SCLK_MMC2_FSYS 203 +#define CLK_SCLK_MMC1_FSYS 204 +#define CLK_SCLK_MMC0_FSYS 205 +#define CLK_SCLK_SPI4_PERIC 206 +#define CLK_SCLK_SPI3_PERIC 207 +#define CLK_SCLK_UART2_PERIC 208 +#define CLK_SCLK_UART1_PERIC 209 +#define CLK_SCLK_UART0_PERIC 210 +#define CLK_SCLK_SPI2_PERIC 211 +#define CLK_SCLK_SPI1_PERIC 212 +#define CLK_SCLK_SPI0_PERIC 213 +#define CLK_SCLK_SPDIF_PERIC 214 +#define CLK_SCLK_I2S1_PERIC 215 +#define CLK_SCLK_PCM1_PERIC 216 +#define CLK_SCLK_SLIMBUS 217 +#define CLK_SCLK_AUDIO1 218 +#define CLK_SCLK_AUDIO0 219 +#define CLK_ACLK_G2D_266 220 +#define CLK_ACLK_G2D_400 221 +#define CLK_ACLK_G3D_400 222 +#define CLK_ACLK_IMEM_SSX_266 223 +#define CLK_ACLK_BUS0_400 224 +#define CLK_ACLK_BUS1_400 225 +#define CLK_ACLK_IMEM_200 226 +#define CLK_ACLK_IMEM_266 227 +#define CLK_SCLK_PCIE_100_FSYS 228 +#define CLK_SCLK_UFSUNIPRO_FSYS 229 +#define CLK_SCLK_USBHOST30_FSYS 230 +#define CLK_SCLK_USBDRD30_FSYS 231 +#define CLK_ACLK_GSCL_111 232 +#define CLK_ACLK_GSCL_333 233 +#define CLK_SCLK_JPEG_MSCL 234 +#define CLK_ACLK_MSCL_400 235 +#define CLK_ACLK_MFC_400 236 +#define CLK_ACLK_HEVC_400 237 +#define CLK_ACLK_ISP_DIS_400 238 +#define CLK_ACLK_ISP_400 239 +#define CLK_ACLK_CAM0_333 240 +#define CLK_ACLK_CAM0_400 241 +#define CLK_ACLK_CAM0_552 242 +#define CLK_ACLK_CAM1_333 243 +#define CLK_ACLK_CAM1_400 244 +#define CLK_ACLK_CAM1_552 245 +#define CLK_SCLK_ISP_SENSOR2 246 +#define CLK_SCLK_ISP_SENSOR1 247 +#define CLK_SCLK_ISP_SENSOR0 248 +#define CLK_SCLK_ISP_MCTADC_CAM1 249 +#define CLK_SCLK_ISP_UART_CAM1 250 +#define CLK_SCLK_ISP_SPI1_CAM1 251 +#define CLK_SCLK_ISP_SPI0_CAM1 252 +#define CLK_SCLK_HDMI_SPDIF_DISP 253 + +#define TOP_NR_CLK 254 + +/* CMU_CPIF */ +#define CLK_FOUT_MPHY_PLL 1 + +#define CLK_MOUT_MPHY_PLL 2 + +#define CLK_DIV_SCLK_MPHY 10 + +#define CLK_SCLK_MPHY_PLL 11 +#define CLK_SCLK_UFS_MPHY 11 + +#define CPIF_NR_CLK 12 + +/* CMU_MIF */ +#define CLK_FOUT_MEM0_PLL 1 +#define CLK_FOUT_MEM1_PLL 2 +#define CLK_FOUT_BUS_PLL 3 +#define CLK_FOUT_MFC_PLL 4 +#define CLK_DOUT_MFC_PLL 5 +#define CLK_DOUT_BUS_PLL 6 +#define CLK_DOUT_MEM1_PLL 7 +#define CLK_DOUT_MEM0_PLL 8 + +#define CLK_MOUT_MFC_PLL_DIV2 10 +#define CLK_MOUT_BUS_PLL_DIV2 11 +#define CLK_MOUT_MEM1_PLL_DIV2 12 +#define CLK_MOUT_MEM0_PLL_DIV2 13 +#define CLK_MOUT_MFC_PLL 14 +#define CLK_MOUT_BUS_PLL 15 +#define CLK_MOUT_MEM1_PLL 16 +#define CLK_MOUT_MEM0_PLL 17 +#define CLK_MOUT_CLK2X_PHY_C 18 +#define CLK_MOUT_CLK2X_PHY_B 19 +#define CLK_MOUT_CLK2X_PHY_A 20 +#define CLK_MOUT_CLKM_PHY_C 21 +#define CLK_MOUT_CLKM_PHY_B 22 +#define CLK_MOUT_CLKM_PHY_A 23 +#define CLK_MOUT_ACLK_MIFNM_200 24 +#define CLK_MOUT_ACLK_MIFNM_400 25 +#define CLK_MOUT_ACLK_DISP_333_B 26 +#define CLK_MOUT_ACLK_DISP_333_A 27 +#define CLK_MOUT_SCLK_DECON_VCLK_C 28 +#define CLK_MOUT_SCLK_DECON_VCLK_B 29 +#define CLK_MOUT_SCLK_DECON_VCLK_A 30 +#define CLK_MOUT_SCLK_DECON_ECLK_C 31 +#define CLK_MOUT_SCLK_DECON_ECLK_B 32 +#define CLK_MOUT_SCLK_DECON_ECLK_A 33 +#define CLK_MOUT_SCLK_DECON_TV_ECLK_C 34 +#define CLK_MOUT_SCLK_DECON_TV_ECLK_B 35 +#define CLK_MOUT_SCLK_DECON_TV_ECLK_A 36 +#define CLK_MOUT_SCLK_DSD_C 37 +#define CLK_MOUT_SCLK_DSD_B 38 +#define CLK_MOUT_SCLK_DSD_A 39 +#define CLK_MOUT_SCLK_DSIM0_C 40 +#define CLK_MOUT_SCLK_DSIM0_B 41 +#define CLK_MOUT_SCLK_DSIM0_A 42 +#define CLK_MOUT_SCLK_DECON_TV_VCLK_C 46 +#define CLK_MOUT_SCLK_DECON_TV_VCLK_B 47 +#define CLK_MOUT_SCLK_DECON_TV_VCLK_A 48 +#define CLK_MOUT_SCLK_DSIM1_C 49 +#define CLK_MOUT_SCLK_DSIM1_B 50 +#define CLK_MOUT_SCLK_DSIM1_A 51 + +#define CLK_DIV_SCLK_HPM_MIF 55 +#define CLK_DIV_ACLK_DREX1 56 +#define CLK_DIV_ACLK_DREX0 57 +#define CLK_DIV_CLK2XPHY 58 +#define CLK_DIV_ACLK_MIF_266 59 +#define CLK_DIV_ACLK_MIFND_133 60 +#define CLK_DIV_ACLK_MIF_133 61 +#define CLK_DIV_ACLK_MIFNM_200 62 +#define CLK_DIV_ACLK_MIF_200 63 +#define CLK_DIV_ACLK_MIF_400 64 +#define CLK_DIV_ACLK_BUS2_400 65 +#define CLK_DIV_ACLK_DISP_333 66 +#define CLK_DIV_ACLK_CPIF_200 67 +#define CLK_DIV_SCLK_DSIM1 68 +#define CLK_DIV_SCLK_DECON_TV_VCLK 69 +#define CLK_DIV_SCLK_DSIM0 70 +#define CLK_DIV_SCLK_DSD 71 +#define CLK_DIV_SCLK_DECON_TV_ECLK 72 +#define CLK_DIV_SCLK_DECON_VCLK 73 +#define CLK_DIV_SCLK_DECON_ECLK 74 +#define CLK_DIV_MIF_PRE 75 + +#define CLK_CLK2X_PHY1 80 +#define CLK_CLK2X_PHY0 81 +#define CLK_CLKM_PHY1 82 +#define CLK_CLKM_PHY0 83 +#define CLK_RCLK_DREX1 84 +#define CLK_RCLK_DREX0 85 +#define CLK_ACLK_DREX1_TZ 86 +#define CLK_ACLK_DREX0_TZ 87 +#define CLK_ACLK_DREX1_PEREV 88 +#define CLK_ACLK_DREX0_PEREV 89 +#define CLK_ACLK_DREX1_MEMIF 90 +#define CLK_ACLK_DREX0_MEMIF 91 +#define CLK_ACLK_DREX1_SCH 92 +#define CLK_ACLK_DREX0_SCH 93 +#define CLK_ACLK_DREX1_BUSIF 94 +#define CLK_ACLK_DREX0_BUSIF 95 +#define CLK_ACLK_DREX1_BUSIF_RD 96 +#define CLK_ACLK_DREX0_BUSIF_RD 97 +#define CLK_ACLK_DREX1 98 +#define CLK_ACLK_DREX0 99 +#define CLK_ACLK_ASYNCAXIM_ATLAS_CCIX 100 +#define CLK_ACLK_ASYNCAXIS_ATLAS_MIF 101 +#define CLK_ACLK_ASYNCAXIM_ATLAS_MIF 102 +#define CLK_ACLK_ASYNCAXIS_MIF_IMEM 103 +#define CLK_ACLK_ASYNCAXIS_NOC_P_CCI 104 +#define CLK_ACLK_ASYNCAXIM_NOC_P_CCI 105 +#define CLK_ACLK_ASYNCAXIS_CP1 106 +#define CLK_ACLK_ASYNCAXIM_CP1 107 +#define CLK_ACLK_ASYNCAXIS_CP0 108 +#define CLK_ACLK_ASYNCAXIM_CP0 109 +#define CLK_ACLK_ASYNCAXIS_DREX1_3 110 +#define CLK_ACLK_ASYNCAXIM_DREX1_3 111 +#define CLK_ACLK_ASYNCAXIS_DREX1_1 112 +#define CLK_ACLK_ASYNCAXIM_DREX1_1 113 +#define CLK_ACLK_ASYNCAXIS_DREX1_0 114 +#define CLK_ACLK_ASYNCAXIM_DREX1_0 115 +#define CLK_ACLK_ASYNCAXIS_DREX0_3 116 +#define CLK_ACLK_ASYNCAXIM_DREX0_3 117 +#define CLK_ACLK_ASYNCAXIS_DREX0_1 118 +#define CLK_ACLK_ASYNCAXIM_DREX0_1 119 +#define CLK_ACLK_ASYNCAXIS_DREX0_0 120 +#define CLK_ACLK_ASYNCAXIM_DREX0_0 121 +#define CLK_ACLK_AHB2APB_MIF2P 122 +#define CLK_ACLK_AHB2APB_MIF1P 123 +#define CLK_ACLK_AHB2APB_MIF0P 124 +#define CLK_ACLK_IXIU_CCI 125 +#define CLK_ACLK_XIU_MIFSFRX 126 +#define CLK_ACLK_MIFNP_133 127 +#define CLK_ACLK_MIFNM_200 128 +#define CLK_ACLK_MIFND_133 129 +#define CLK_ACLK_MIFND_400 130 +#define CLK_ACLK_CCI 131 +#define CLK_ACLK_MIFND_266 132 +#define CLK_ACLK_PPMU_DREX1S3 133 +#define CLK_ACLK_PPMU_DREX1S1 134 +#define CLK_ACLK_PPMU_DREX1S0 135 +#define CLK_ACLK_PPMU_DREX0S3 136 +#define CLK_ACLK_PPMU_DREX0S1 137 +#define CLK_ACLK_PPMU_DREX0S0 138 +#define CLK_ACLK_BTS_APOLLO 139 +#define CLK_ACLK_BTS_ATLAS 140 +#define CLK_ACLK_ACE_SEL_APOLL 141 +#define CLK_ACLK_ACE_SEL_ATLAS 142 +#define CLK_ACLK_AXIDS_CCI_MIFSFRX 143 +#define CLK_ACLK_AXIUS_ATLAS_CCI 144 +#define CLK_ACLK_AXISYNCDNS_CCI 145 +#define CLK_ACLK_AXISYNCDN_CCI 146 +#define CLK_ACLK_AXISYNCDN_NOC_D 147 +#define CLK_ACLK_ASYNCACEM_APOLLO_CCI 148 +#define CLK_ACLK_ASYNCACEM_ATLAS_CCI 149 +#define CLK_ACLK_ASYNCAPBS_MIF_CSSYS 150 +#define CLK_ACLK_BUS2_400 151 +#define CLK_ACLK_DISP_333 152 +#define CLK_ACLK_CPIF_200 153 +#define CLK_PCLK_PPMU_DREX1S3 154 +#define CLK_PCLK_PPMU_DREX1S1 155 +#define CLK_PCLK_PPMU_DREX1S0 156 +#define CLK_PCLK_PPMU_DREX0S3 157 +#define CLK_PCLK_PPMU_DREX0S1 158 +#define CLK_PCLK_PPMU_DREX0S0 159 +#define CLK_PCLK_BTS_APOLLO 160 +#define CLK_PCLK_BTS_ATLAS 161 +#define CLK_PCLK_ASYNCAXI_NOC_P_CCI 162 +#define CLK_PCLK_ASYNCAXI_CP1 163 +#define CLK_PCLK_ASYNCAXI_CP0 164 +#define CLK_PCLK_ASYNCAXI_DREX1_3 165 +#define CLK_PCLK_ASYNCAXI_DREX1_1 166 +#define CLK_PCLK_ASYNCAXI_DREX1_0 167 +#define CLK_PCLK_ASYNCAXI_DREX0_3 168 +#define CLK_PCLK_ASYNCAXI_DREX0_1 169 +#define CLK_PCLK_ASYNCAXI_DREX0_0 170 +#define CLK_PCLK_MIFSRVND_133 171 +#define CLK_PCLK_PMU_MIF 172 +#define CLK_PCLK_SYSREG_MIF 173 +#define CLK_PCLK_GPIO_ALIVE 174 +#define CLK_PCLK_ABB 175 +#define CLK_PCLK_PMU_APBIF 176 +#define CLK_PCLK_DDR_PHY1 177 +#define CLK_PCLK_DREX1 178 +#define CLK_PCLK_DDR_PHY0 179 +#define CLK_PCLK_DREX0 180 +#define CLK_PCLK_DREX0_TZ 181 +#define CLK_PCLK_DREX1_TZ 182 +#define CLK_PCLK_MONOTONIC_CNT 183 +#define CLK_PCLK_RTC 184 +#define CLK_SCLK_DSIM1_DISP 185 +#define CLK_SCLK_DECON_TV_VCLK_DISP 186 +#define CLK_SCLK_FREQ_DET_BUS_PLL 187 +#define CLK_SCLK_FREQ_DET_MFC_PLL 188 +#define CLK_SCLK_FREQ_DET_MEM0_PLL 189 +#define CLK_SCLK_FREQ_DET_MEM1_PLL 190 +#define CLK_SCLK_DSIM0_DISP 191 +#define CLK_SCLK_DSD_DISP 192 +#define CLK_SCLK_DECON_TV_ECLK_DISP 193 +#define CLK_SCLK_DECON_VCLK_DISP 194 +#define CLK_SCLK_DECON_ECLK_DISP 195 +#define CLK_SCLK_HPM_MIF 196 +#define CLK_SCLK_MFC_PLL 197 +#define CLK_SCLK_BUS_PLL 198 +#define CLK_SCLK_BUS_PLL_APOLLO 199 +#define CLK_SCLK_BUS_PLL_ATLAS 200 + +#define MIF_NR_CLK 201 + +/* CMU_PERIC */ +#define CLK_PCLK_SPI2 1 +#define CLK_PCLK_SPI1 2 +#define CLK_PCLK_SPI0 3 +#define CLK_PCLK_UART2 4 +#define CLK_PCLK_UART1 5 +#define CLK_PCLK_UART0 6 +#define CLK_PCLK_HSI2C3 7 +#define CLK_PCLK_HSI2C2 8 +#define CLK_PCLK_HSI2C1 9 +#define CLK_PCLK_HSI2C0 10 +#define CLK_PCLK_I2C7 11 +#define CLK_PCLK_I2C6 12 +#define CLK_PCLK_I2C5 13 +#define CLK_PCLK_I2C4 14 +#define CLK_PCLK_I2C3 15 +#define CLK_PCLK_I2C2 16 +#define CLK_PCLK_I2C1 17 +#define CLK_PCLK_I2C0 18 +#define CLK_PCLK_SPI4 19 +#define CLK_PCLK_SPI3 20 +#define CLK_PCLK_HSI2C11 21 +#define CLK_PCLK_HSI2C10 22 +#define CLK_PCLK_HSI2C9 23 +#define CLK_PCLK_HSI2C8 24 +#define CLK_PCLK_HSI2C7 25 +#define CLK_PCLK_HSI2C6 26 +#define CLK_PCLK_HSI2C5 27 +#define CLK_PCLK_HSI2C4 28 +#define CLK_SCLK_SPI4 29 +#define CLK_SCLK_SPI3 30 +#define CLK_SCLK_SPI2 31 +#define CLK_SCLK_SPI1 32 +#define CLK_SCLK_SPI0 33 +#define CLK_SCLK_UART2 34 +#define CLK_SCLK_UART1 35 +#define CLK_SCLK_UART0 36 +#define CLK_ACLK_AHB2APB_PERIC2P 37 +#define CLK_ACLK_AHB2APB_PERIC1P 38 +#define CLK_ACLK_AHB2APB_PERIC0P 39 +#define CLK_ACLK_PERICNP_66 40 +#define CLK_PCLK_SCI 41 +#define CLK_PCLK_GPIO_FINGER 42 +#define CLK_PCLK_GPIO_ESE 43 +#define CLK_PCLK_PWM 44 +#define CLK_PCLK_SPDIF 45 +#define CLK_PCLK_PCM1 46 +#define CLK_PCLK_I2S1 47 +#define CLK_PCLK_ADCIF 48 +#define CLK_PCLK_GPIO_TOUCH 49 +#define CLK_PCLK_GPIO_NFC 50 +#define CLK_PCLK_GPIO_PERIC 51 +#define CLK_PCLK_PMU_PERIC 52 +#define CLK_PCLK_SYSREG_PERIC 53 +#define CLK_SCLK_IOCLK_SPI4 54 +#define CLK_SCLK_IOCLK_SPI3 55 +#define CLK_SCLK_SCI 56 +#define CLK_SCLK_SC_IN 57 +#define CLK_SCLK_PWM 58 +#define CLK_SCLK_IOCLK_SPI2 59 +#define CLK_SCLK_IOCLK_SPI1 60 +#define CLK_SCLK_IOCLK_SPI0 61 +#define CLK_SCLK_IOCLK_I2S1_BCLK 62 +#define CLK_SCLK_SPDIF 63 +#define CLK_SCLK_PCM1 64 +#define CLK_SCLK_I2S1 65 + +#define CLK_DIV_SCLK_SCI 70 +#define CLK_DIV_SCLK_SC_IN 71 + +#define PERIC_NR_CLK 72 + +/* CMU_PERIS */ +#define CLK_PCLK_HPM_APBIF 1 +#define CLK_PCLK_TMU1_APBIF 2 +#define CLK_PCLK_TMU0_APBIF 3 +#define CLK_PCLK_PMU_PERIS 4 +#define CLK_PCLK_SYSREG_PERIS 5 +#define CLK_PCLK_CMU_TOP_APBIF 6 +#define CLK_PCLK_WDT_APOLLO 7 +#define CLK_PCLK_WDT_ATLAS 8 +#define CLK_PCLK_MCT 9 +#define CLK_PCLK_HDMI_CEC 10 +#define CLK_ACLK_AHB2APB_PERIS1P 11 +#define CLK_ACLK_AHB2APB_PERIS0P 12 +#define CLK_ACLK_PERISNP_66 13 +#define CLK_PCLK_TZPC12 14 +#define CLK_PCLK_TZPC11 15 +#define CLK_PCLK_TZPC10 16 +#define CLK_PCLK_TZPC9 17 +#define CLK_PCLK_TZPC8 18 +#define CLK_PCLK_TZPC7 19 +#define CLK_PCLK_TZPC6 20 +#define CLK_PCLK_TZPC5 21 +#define CLK_PCLK_TZPC4 22 +#define CLK_PCLK_TZPC3 23 +#define CLK_PCLK_TZPC2 24 +#define CLK_PCLK_TZPC1 25 +#define CLK_PCLK_TZPC0 26 +#define CLK_PCLK_SECKEY_APBIF 27 +#define CLK_PCLK_CHIPID_APBIF 28 +#define CLK_PCLK_TOPRTC 29 +#define CLK_PCLK_CUSTOM_EFUSE_APBIF 30 +#define CLK_PCLK_ANTIRBK_CNT_APBIF 31 +#define CLK_PCLK_OTP_CON_APBIF 32 +#define CLK_SCLK_ASV_TB 33 +#define CLK_SCLK_TMU1 34 +#define CLK_SCLK_TMU0 35 +#define CLK_SCLK_SECKEY 36 +#define CLK_SCLK_CHIPID 37 +#define CLK_SCLK_TOPRTC 38 +#define CLK_SCLK_CUSTOM_EFUSE 39 +#define CLK_SCLK_ANTIRBK_CNT 40 +#define CLK_SCLK_OTP_CON 41 + +#define PERIS_NR_CLK 42 + +/* CMU_FSYS */ +#define CLK_MOUT_ACLK_FSYS_200_USER 1 +#define CLK_MOUT_SCLK_MMC2_USER 2 +#define CLK_MOUT_SCLK_MMC1_USER 3 +#define CLK_MOUT_SCLK_MMC0_USER 4 +#define CLK_MOUT_SCLK_UFS_MPHY_USER 5 +#define CLK_MOUT_SCLK_PCIE_100_USER 6 +#define CLK_MOUT_SCLK_UFSUNIPRO_USER 7 +#define CLK_MOUT_SCLK_USBHOST30_USER 8 +#define CLK_MOUT_SCLK_USBDRD30_USER 9 +#define CLK_MOUT_PHYCLK_USBHOST30_UHOST30_PIPE_PCLK_USER 10 +#define CLK_MOUT_PHYCLK_USBHOST30_UHOST30_PHYCLOCK_USER 11 +#define CLK_MOUT_PHYCLK_USBHOST20_PHY_HSIC1_USER 12 +#define CLK_MOUT_PHYCLK_USBHOST20_PHY_CLK48MOHCI_USER 13 +#define CLK_MOUT_PHYCLK_USBHOST20_PHY_PHYCLOCK_USER 14 +#define CLK_MOUT_PHYCLK_USBHOST20_PHY_PHY_FREECLK_USER 15 +#define CLK_MOUT_PHYCLK_USBDRD30_UDRD30_PIPE_PCLK_USER 16 +#define CLK_MOUT_PHYCLK_USBDRD30_UDRD30_PHYCLOCK_USER 17 +#define CLK_MOUT_PHYCLK_UFS_RX1_SYMBOL_USER 18 +#define CLK_MOUT_PHYCLK_UFS_RX0_SYMBOL_USER 19 +#define CLK_MOUT_PHYCLK_UFS_TX1_SYMBOL_USER 20 +#define CLK_MOUT_PHYCLK_UFS_TX0_SYMBOL_USER 21 +#define CLK_MOUT_PHYCLK_LLI_MPHY_TO_UFS_USER 22 +#define CLK_MOUT_SCLK_MPHY 23 + +#define CLK_PHYCLK_USBDRD30_UDRD30_PHYCLOCK_PHY 25 +#define CLK_PHYCLK_USBDRD30_UDRD30_PIPE_PCLK_PHY 26 +#define CLK_PHYCLK_USBHOST30_UHOST30_PHYCLOCK_PHY 27 +#define CLK_PHYCLK_USBHOST30_UHOST30_PIPE_PCLK_PHY 28 +#define CLK_PHYCLK_USBHOST20_PHY_FREECLK_PHY 29 +#define CLK_PHYCLK_USBHOST20_PHY_PHYCLOCK_PHY 30 +#define CLK_PHYCLK_USBHOST20_PHY_CLK48MOHCI_PHY 31 +#define CLK_PHYCLK_USBHOST20_PHY_HSIC1_PHY 32 +#define CLK_PHYCLK_UFS_TX0_SYMBOL_PHY 33 +#define CLK_PHYCLK_UFS_RX0_SYMBOL_PHY 34 +#define CLK_PHYCLK_UFS_TX1_SYMBOL_PHY 35 +#define CLK_PHYCLK_UFS_RX1_SYMBOL_PHY 36 +#define CLK_PHYCLK_LLI_MPHY_TO_UFS_PHY 37 + +#define CLK_ACLK_PCIE 50 +#define CLK_ACLK_PDMA1 51 +#define CLK_ACLK_TSI 52 +#define CLK_ACLK_MMC2 53 +#define CLK_ACLK_MMC1 54 +#define CLK_ACLK_MMC0 55 +#define CLK_ACLK_UFS 56 +#define CLK_ACLK_USBHOST20 57 +#define CLK_ACLK_USBHOST30 58 +#define CLK_ACLK_USBDRD30 59 +#define CLK_ACLK_PDMA0 60 +#define CLK_SCLK_MMC2 61 +#define CLK_SCLK_MMC1 62 +#define CLK_SCLK_MMC0 63 +#define CLK_PDMA1 64 +#define CLK_PDMA0 65 +#define CLK_ACLK_XIU_FSYSPX 66 +#define CLK_ACLK_AHB_USBLINKH1 67 +#define CLK_ACLK_SMMU_PDMA1 68 +#define CLK_ACLK_BTS_PCIE 69 +#define CLK_ACLK_AXIUS_PDMA1 70 +#define CLK_ACLK_SMMU_PDMA0 71 +#define CLK_ACLK_BTS_UFS 72 +#define CLK_ACLK_BTS_USBHOST30 73 +#define CLK_ACLK_BTS_USBDRD30 74 +#define CLK_ACLK_AXIUS_PDMA0 75 +#define CLK_ACLK_AXIUS_USBHS 76 +#define CLK_ACLK_AXIUS_FSYSSX 77 +#define CLK_ACLK_AHB2APB_FSYSP 78 +#define CLK_ACLK_AHB2AXI_USBHS 79 +#define CLK_ACLK_AHB_USBLINKH0 80 +#define CLK_ACLK_AHB_USBHS 81 +#define CLK_ACLK_AHB_FSYSH 82 +#define CLK_ACLK_XIU_FSYSX 83 +#define CLK_ACLK_XIU_FSYSSX 84 +#define CLK_ACLK_FSYSNP_200 85 +#define CLK_ACLK_FSYSND_200 86 +#define CLK_PCLK_PCIE_CTRL 87 +#define CLK_PCLK_SMMU_PDMA1 88 +#define CLK_PCLK_PCIE_PHY 89 +#define CLK_PCLK_BTS_PCIE 90 +#define CLK_PCLK_SMMU_PDMA0 91 +#define CLK_PCLK_BTS_UFS 92 +#define CLK_PCLK_BTS_USBHOST30 93 +#define CLK_PCLK_BTS_USBDRD30 94 +#define CLK_PCLK_GPIO_FSYS 95 +#define CLK_PCLK_PMU_FSYS 96 +#define CLK_PCLK_SYSREG_FSYS 97 +#define CLK_SCLK_PCIE_100 98 +#define CLK_PHYCLK_USBHOST30_UHOST30_PIPE_PCLK 99 +#define CLK_PHYCLK_USBHOST30_UHOST30_PHYCLOCK 100 +#define CLK_PHYCLK_UFS_RX1_SYMBOL 101 +#define CLK_PHYCLK_UFS_RX0_SYMBOL 102 +#define CLK_PHYCLK_UFS_TX1_SYMBOL 103 +#define CLK_PHYCLK_UFS_TX0_SYMBOL 104 +#define CLK_PHYCLK_USBHOST20_PHY_HSIC1 105 +#define CLK_PHYCLK_USBHOST20_PHY_CLK48MOHCI 106 +#define CLK_PHYCLK_USBHOST20_PHY_PHYCLOCK 107 +#define CLK_PHYCLK_USBHOST20_PHY_FREECLK 108 +#define CLK_PHYCLK_USBDRD30_UDRD30_PIPE_PCLK 109 +#define CLK_PHYCLK_USBDRD30_UDRD30_PHYCLOCK 110 +#define CLK_SCLK_MPHY 111 +#define CLK_SCLK_UFSUNIPRO 112 +#define CLK_SCLK_USBHOST30 113 +#define CLK_SCLK_USBDRD30 114 +#define CLK_PCIE 115 + +#define FSYS_NR_CLK 116 + +/* CMU_G2D */ +#define CLK_MUX_ACLK_G2D_266_USER 1 +#define CLK_MUX_ACLK_G2D_400_USER 2 + +#define CLK_DIV_PCLK_G2D 3 + +#define CLK_ACLK_SMMU_MDMA1 4 +#define CLK_ACLK_BTS_MDMA1 5 +#define CLK_ACLK_BTS_G2D 6 +#define CLK_ACLK_ALB_G2D 7 +#define CLK_ACLK_AXIUS_G2DX 8 +#define CLK_ACLK_ASYNCAXI_SYSX 9 +#define CLK_ACLK_AHB2APB_G2D1P 10 +#define CLK_ACLK_AHB2APB_G2D0P 11 +#define CLK_ACLK_XIU_G2DX 12 +#define CLK_ACLK_G2DNP_133 13 +#define CLK_ACLK_G2DND_400 14 +#define CLK_ACLK_MDMA1 15 +#define CLK_ACLK_G2D 16 +#define CLK_ACLK_SMMU_G2D 17 +#define CLK_PCLK_SMMU_MDMA1 18 +#define CLK_PCLK_BTS_MDMA1 19 +#define CLK_PCLK_BTS_G2D 20 +#define CLK_PCLK_ALB_G2D 21 +#define CLK_PCLK_ASYNCAXI_SYSX 22 +#define CLK_PCLK_PMU_G2D 23 +#define CLK_PCLK_SYSREG_G2D 24 +#define CLK_PCLK_G2D 25 +#define CLK_PCLK_SMMU_G2D 26 + +#define G2D_NR_CLK 27 + +/* CMU_DISP */ +#define CLK_FOUT_DISP_PLL 1 + +#define CLK_MOUT_DISP_PLL 2 +#define CLK_MOUT_SCLK_DSIM1_USER 3 +#define CLK_MOUT_SCLK_DSIM0_USER 4 +#define CLK_MOUT_SCLK_DSD_USER 5 +#define CLK_MOUT_SCLK_DECON_TV_ECLK_USER 6 +#define CLK_MOUT_SCLK_DECON_VCLK_USER 7 +#define CLK_MOUT_SCLK_DECON_ECLK_USER 8 +#define CLK_MOUT_SCLK_DECON_TV_VCLK_USER 9 +#define CLK_MOUT_ACLK_DISP_333_USER 10 +#define CLK_MOUT_PHYCLK_MIPIDPHY1_BITCLKDIV8_USER 11 +#define CLK_MOUT_PHYCLK_MIPIDPHY1_RXCLKESC0_USER 12 +#define CLK_MOUT_PHYCLK_MIPIDPHY0_BITCLKDIV8_USER 13 +#define CLK_MOUT_PHYCLK_MIPIDPHY0_RXCLKESC0_USER 14 +#define CLK_MOUT_PHYCLK_HDMIPHY_TMDS_CLKO_USER 15 +#define CLK_MOUT_PHYCLK_HDMIPHY_PIXEL_CLKO_USER 16 +#define CLK_MOUT_SCLK_DSIM0 17 +#define CLK_MOUT_SCLK_DECON_TV_ECLK 18 +#define CLK_MOUT_SCLK_DECON_VCLK 19 +#define CLK_MOUT_SCLK_DECON_ECLK 20 +#define CLK_MOUT_SCLK_DSIM1_B_DISP 21 +#define CLK_MOUT_SCLK_DSIM1_A_DISP 22 +#define CLK_MOUT_SCLK_DECON_TV_VCLK_C_DISP 23 +#define CLK_MOUT_SCLK_DECON_TV_VCLK_B_DISP 24 +#define CLK_MOUT_SCLK_DECON_TV_VCLK_A_DISP 25 + +#define CLK_DIV_SCLK_DSIM1_DISP 30 +#define CLK_DIV_SCLK_DECON_TV_VCLK_DISP 31 +#define CLK_DIV_SCLK_DSIM0_DISP 32 +#define CLK_DIV_SCLK_DECON_TV_ECLK_DISP 33 +#define CLK_DIV_SCLK_DECON_VCLK_DISP 34 +#define CLK_DIV_SCLK_DECON_ECLK_DISP 35 +#define CLK_DIV_PCLK_DISP 36 + +#define CLK_ACLK_DECON_TV 40 +#define CLK_ACLK_DECON 41 +#define CLK_ACLK_SMMU_TV1X 42 +#define CLK_ACLK_SMMU_TV0X 43 +#define CLK_ACLK_SMMU_DECON1X 44 +#define CLK_ACLK_SMMU_DECON0X 45 +#define CLK_ACLK_BTS_DECON_TV_M3 46 +#define CLK_ACLK_BTS_DECON_TV_M2 47 +#define CLK_ACLK_BTS_DECON_TV_M1 48 +#define CLK_ACLK_BTS_DECON_TV_M0 49 +#define CLK_ACLK_BTS_DECON_NM4 50 +#define CLK_ACLK_BTS_DECON_NM3 51 +#define CLK_ACLK_BTS_DECON_NM2 52 +#define CLK_ACLK_BTS_DECON_NM1 53 +#define CLK_ACLK_BTS_DECON_NM0 54 +#define CLK_ACLK_AHB2APB_DISPSFR2P 55 +#define CLK_ACLK_AHB2APB_DISPSFR1P 56 +#define CLK_ACLK_AHB2APB_DISPSFR0P 57 +#define CLK_ACLK_AHB_DISPH 58 +#define CLK_ACLK_XIU_TV1X 59 +#define CLK_ACLK_XIU_TV0X 60 +#define CLK_ACLK_XIU_DECON1X 61 +#define CLK_ACLK_XIU_DECON0X 62 +#define CLK_ACLK_XIU_DISP1X 63 +#define CLK_ACLK_XIU_DISPNP_100 64 +#define CLK_ACLK_DISP1ND_333 65 +#define CLK_ACLK_DISP0ND_333 66 +#define CLK_PCLK_SMMU_TV1X 67 +#define CLK_PCLK_SMMU_TV0X 68 +#define CLK_PCLK_SMMU_DECON1X 69 +#define CLK_PCLK_SMMU_DECON0X 70 +#define CLK_PCLK_BTS_DECON_TV_M3 71 +#define CLK_PCLK_BTS_DECON_TV_M2 72 +#define CLK_PCLK_BTS_DECON_TV_M1 73 +#define CLK_PCLK_BTS_DECON_TV_M0 74 +#define CLK_PCLK_BTS_DECONM4 75 +#define CLK_PCLK_BTS_DECONM3 76 +#define CLK_PCLK_BTS_DECONM2 77 +#define CLK_PCLK_BTS_DECONM1 78 +#define CLK_PCLK_BTS_DECONM0 79 +#define CLK_PCLK_MIC1 80 +#define CLK_PCLK_PMU_DISP 81 +#define CLK_PCLK_SYSREG_DISP 82 +#define CLK_PCLK_HDMIPHY 83 +#define CLK_PCLK_HDMI 84 +#define CLK_PCLK_MIC0 85 +#define CLK_PCLK_DSIM1 86 +#define CLK_PCLK_DSIM0 87 +#define CLK_PCLK_DECON_TV 88 +#define CLK_PHYCLK_MIPIDPHY1_BITCLKDIV8 89 +#define CLK_PHYCLK_MIPIDPHY1_RXCLKESC0 90 +#define CLK_SCLK_RGB_TV_VCLK_TO_DSIM1 91 +#define CLK_SCLK_RGB_TV_VCLK_TO_MIC1 92 +#define CLK_SCLK_DSIM1 93 +#define CLK_SCLK_DECON_TV_VCLK 94 +#define CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8 95 +#define CLK_PHYCLK_MIPIDPHY0_RXCLKESC0 96 +#define CLK_PHYCLK_HDMIPHY_TMDS_CLKO 97 +#define CLK_PHYCLK_HDMI_PIXEL 98 +#define CLK_SCLK_RGB_VCLK_TO_SMIES 99 +#define CLK_SCLK_FREQ_DET_DISP_PLL 100 +#define CLK_SCLK_RGB_VCLK_TO_DSIM0 101 +#define CLK_SCLK_RGB_VCLK_TO_MIC0 102 +#define CLK_SCLK_DSD 103 +#define CLK_SCLK_HDMI_SPDIF 104 +#define CLK_SCLK_DSIM0 105 +#define CLK_SCLK_DECON_TV_ECLK 106 +#define CLK_SCLK_DECON_VCLK 107 +#define CLK_SCLK_DECON_ECLK 108 +#define CLK_SCLK_RGB_VCLK 109 +#define CLK_SCLK_RGB_TV_VCLK 110 + +#define CLK_PHYCLK_HDMIPHY_PIXEL_CLKO_PHY 111 +#define CLK_PHYCLK_HDMIPHY_TMDS_CLKO_PHY 112 + +#define CLK_PCLK_DECON 113 + +#define CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8_PHY 114 +#define CLK_PHYCLK_MIPIDPHY0_RXCLKESC0_PHY 115 + +#define DISP_NR_CLK 116 + +/* CMU_AUD */ +#define CLK_MOUT_AUD_PLL_USER 1 +#define CLK_MOUT_SCLK_AUD_PCM 2 +#define CLK_MOUT_SCLK_AUD_I2S 3 + +#define CLK_DIV_ATCLK_AUD 4 +#define CLK_DIV_PCLK_DBG_AUD 5 +#define CLK_DIV_ACLK_AUD 6 +#define CLK_DIV_AUD_CA5 7 +#define CLK_DIV_SCLK_AUD_SLIMBUS 8 +#define CLK_DIV_SCLK_AUD_UART 9 +#define CLK_DIV_SCLK_AUD_PCM 10 +#define CLK_DIV_SCLK_AUD_I2S 11 + +#define CLK_ACLK_INTR_CTRL 12 +#define CLK_ACLK_AXIDS2_LPASSP 13 +#define CLK_ACLK_AXIDS1_LPASSP 14 +#define CLK_ACLK_AXI2APB1_LPASSP 15 +#define CLK_ACLK_AXI2APH_LPASSP 16 +#define CLK_ACLK_SMMU_LPASSX 17 +#define CLK_ACLK_AXIDS0_LPASSP 18 +#define CLK_ACLK_AXI2APB0_LPASSP 19 +#define CLK_ACLK_XIU_LPASSX 20 +#define CLK_ACLK_AUDNP_133 21 +#define CLK_ACLK_AUDND_133 22 +#define CLK_ACLK_SRAMC 23 +#define CLK_ACLK_DMAC 24 +#define CLK_PCLK_WDT1 25 +#define CLK_PCLK_WDT0 26 +#define CLK_PCLK_SFR1 27 +#define CLK_PCLK_SMMU_LPASSX 28 +#define CLK_PCLK_GPIO_AUD 29 +#define CLK_PCLK_PMU_AUD 30 +#define CLK_PCLK_SYSREG_AUD 31 +#define CLK_PCLK_AUD_SLIMBUS 32 +#define CLK_PCLK_AUD_UART 33 +#define CLK_PCLK_AUD_PCM 34 +#define CLK_PCLK_AUD_I2S 35 +#define CLK_PCLK_TIMER 36 +#define CLK_PCLK_SFR0_CTRL 37 +#define CLK_ATCLK_AUD 38 +#define CLK_PCLK_DBG_AUD 39 +#define CLK_SCLK_AUD_CA5 40 +#define CLK_SCLK_JTAG_TCK 41 +#define CLK_SCLK_SLIMBUS_CLKIN 42 +#define CLK_SCLK_AUD_SLIMBUS 43 +#define CLK_SCLK_AUD_UART 44 +#define CLK_SCLK_AUD_PCM 45 +#define CLK_SCLK_I2S_BCLK 46 +#define CLK_SCLK_AUD_I2S 47 + +#define AUD_NR_CLK 48 + +/* CMU_BUS{0|1|2} */ +#define CLK_DIV_PCLK_BUS_133 1 + +#define CLK_ACLK_AHB2APB_BUSP 2 +#define CLK_ACLK_BUSNP_133 3 +#define CLK_ACLK_BUSND_400 4 +#define CLK_PCLK_BUSSRVND_133 5 +#define CLK_PCLK_PMU_BUS 6 +#define CLK_PCLK_SYSREG_BUS 7 + +#define CLK_MOUT_ACLK_BUS2_400_USER 8 /* Only CMU_BUS2 */ +#define CLK_ACLK_BUS2BEND_400 9 /* Only CMU_BUS2 */ +#define CLK_ACLK_BUS2RTND_400 10 /* Only CMU_BUS2 */ + +#define BUSx_NR_CLK 11 + +/* CMU_G3D */ +#define CLK_FOUT_G3D_PLL 1 + +#define CLK_MOUT_ACLK_G3D_400 2 +#define CLK_MOUT_G3D_PLL 3 + +#define CLK_DIV_SCLK_HPM_G3D 4 +#define CLK_DIV_PCLK_G3D 5 +#define CLK_DIV_ACLK_G3D 6 +#define CLK_ACLK_BTS_G3D1 7 +#define CLK_ACLK_BTS_G3D0 8 +#define CLK_ACLK_ASYNCAPBS_G3D 9 +#define CLK_ACLK_ASYNCAPBM_G3D 10 +#define CLK_ACLK_AHB2APB_G3DP 11 +#define CLK_ACLK_G3DNP_150 12 +#define CLK_ACLK_G3DND_600 13 +#define CLK_ACLK_G3D 14 +#define CLK_PCLK_BTS_G3D1 15 +#define CLK_PCLK_BTS_G3D0 16 +#define CLK_PCLK_PMU_G3D 17 +#define CLK_PCLK_SYSREG_G3D 18 +#define CLK_SCLK_HPM_G3D 19 + +#define G3D_NR_CLK 20 + +/* CMU_GSCL */ +#define CLK_MOUT_ACLK_GSCL_111_USER 1 +#define CLK_MOUT_ACLK_GSCL_333_USER 2 + +#define CLK_ACLK_BTS_GSCL2 3 +#define CLK_ACLK_BTS_GSCL1 4 +#define CLK_ACLK_BTS_GSCL0 5 +#define CLK_ACLK_AHB2APB_GSCLP 6 +#define CLK_ACLK_XIU_GSCLX 7 +#define CLK_ACLK_GSCLNP_111 8 +#define CLK_ACLK_GSCLRTND_333 9 +#define CLK_ACLK_GSCLBEND_333 10 +#define CLK_ACLK_GSD 11 +#define CLK_ACLK_GSCL2 12 +#define CLK_ACLK_GSCL1 13 +#define CLK_ACLK_GSCL0 14 +#define CLK_ACLK_SMMU_GSCL0 15 +#define CLK_ACLK_SMMU_GSCL1 16 +#define CLK_ACLK_SMMU_GSCL2 17 +#define CLK_PCLK_BTS_GSCL2 18 +#define CLK_PCLK_BTS_GSCL1 19 +#define CLK_PCLK_BTS_GSCL0 20 +#define CLK_PCLK_PMU_GSCL 21 +#define CLK_PCLK_SYSREG_GSCL 22 +#define CLK_PCLK_GSCL2 23 +#define CLK_PCLK_GSCL1 24 +#define CLK_PCLK_GSCL0 25 +#define CLK_PCLK_SMMU_GSCL0 26 +#define CLK_PCLK_SMMU_GSCL1 27 +#define CLK_PCLK_SMMU_GSCL2 28 + +#define GSCL_NR_CLK 29 + +/* CMU_APOLLO */ +#define CLK_FOUT_APOLLO_PLL 1 + +#define CLK_MOUT_APOLLO_PLL 2 +#define CLK_MOUT_BUS_PLL_APOLLO_USER 3 +#define CLK_MOUT_APOLLO 4 + +#define CLK_DIV_CNTCLK_APOLLO 5 +#define CLK_DIV_PCLK_DBG_APOLLO 6 +#define CLK_DIV_ATCLK_APOLLO 7 +#define CLK_DIV_PCLK_APOLLO 8 +#define CLK_DIV_ACLK_APOLLO 9 +#define CLK_DIV_APOLLO2 10 +#define CLK_DIV_APOLLO1 11 +#define CLK_DIV_SCLK_HPM_APOLLO 12 +#define CLK_DIV_APOLLO_PLL 13 + +#define CLK_ACLK_ATBDS_APOLLO_3 14 +#define CLK_ACLK_ATBDS_APOLLO_2 15 +#define CLK_ACLK_ATBDS_APOLLO_1 16 +#define CLK_ACLK_ATBDS_APOLLO_0 17 +#define CLK_ACLK_ASATBSLV_APOLLO_3_CSSYS 18 +#define CLK_ACLK_ASATBSLV_APOLLO_2_CSSYS 19 +#define CLK_ACLK_ASATBSLV_APOLLO_1_CSSYS 20 +#define CLK_ACLK_ASATBSLV_APOLLO_0_CSSYS 21 +#define CLK_ACLK_ASYNCACES_APOLLO_CCI 22 +#define CLK_ACLK_AHB2APB_APOLLOP 23 +#define CLK_ACLK_APOLLONP_200 24 +#define CLK_PCLK_ASAPBMST_CSSYS_APOLLO 25 +#define CLK_PCLK_PMU_APOLLO 26 +#define CLK_PCLK_SYSREG_APOLLO 27 +#define CLK_CNTCLK_APOLLO 28 +#define CLK_SCLK_HPM_APOLLO 29 +#define CLK_SCLK_APOLLO 30 + +#define APOLLO_NR_CLK 31 + +/* CMU_ATLAS */ +#define CLK_FOUT_ATLAS_PLL 1 + +#define CLK_MOUT_ATLAS_PLL 2 +#define CLK_MOUT_BUS_PLL_ATLAS_USER 3 +#define CLK_MOUT_ATLAS 4 + +#define CLK_DIV_CNTCLK_ATLAS 5 +#define CLK_DIV_PCLK_DBG_ATLAS 6 +#define CLK_DIV_ATCLK_ATLASO 7 +#define CLK_DIV_PCLK_ATLAS 8 +#define CLK_DIV_ACLK_ATLAS 9 +#define CLK_DIV_ATLAS2 10 +#define CLK_DIV_ATLAS1 11 +#define CLK_DIV_SCLK_HPM_ATLAS 12 +#define CLK_DIV_ATLAS_PLL 13 + +#define CLK_ACLK_ATB_AUD_CSSYS 14 +#define CLK_ACLK_ATB_APOLLO3_CSSYS 15 +#define CLK_ACLK_ATB_APOLLO2_CSSYS 16 +#define CLK_ACLK_ATB_APOLLO1_CSSYS 17 +#define CLK_ACLK_ATB_APOLLO0_CSSYS 18 +#define CLK_ACLK_ASYNCAHBS_CSSYS_SSS 19 +#define CLK_ACLK_ASYNCAXIS_CSSYS_CCIX 20 +#define CLK_ACLK_ASYNCACES_ATLAS_CCI 21 +#define CLK_ACLK_AHB2APB_ATLASP 22 +#define CLK_ACLK_ATLASNP_200 23 +#define CLK_PCLK_ASYNCAPB_AUD_CSSYS 24 +#define CLK_PCLK_ASYNCAPB_ISP_CSSYS 25 +#define CLK_PCLK_ASYNCAPB_APOLLO_CSSYS 26 +#define CLK_PCLK_PMU_ATLAS 27 +#define CLK_PCLK_SYSREG_ATLAS 28 +#define CLK_PCLK_SECJTAG 29 +#define CLK_CNTCLK_ATLAS 30 +#define CLK_SCLK_FREQ_DET_ATLAS_PLL 31 +#define CLK_SCLK_HPM_ATLAS 32 +#define CLK_TRACECLK 33 +#define CLK_CTMCLK 34 +#define CLK_HCLK_CSSYS 35 +#define CLK_PCLK_DBG_CSSYS 36 +#define CLK_PCLK_DBG 37 +#define CLK_ATCLK 38 +#define CLK_SCLK_ATLAS 39 + +#define ATLAS_NR_CLK 40 + +/* CMU_MSCL */ +#define CLK_MOUT_SCLK_JPEG_USER 1 +#define CLK_MOUT_ACLK_MSCL_400_USER 2 +#define CLK_MOUT_SCLK_JPEG 3 + +#define CLK_DIV_PCLK_MSCL 4 + +#define CLK_ACLK_BTS_JPEG 5 +#define CLK_ACLK_BTS_M2MSCALER1 6 +#define CLK_ACLK_BTS_M2MSCALER0 7 +#define CLK_ACLK_AHB2APB_MSCL0P 8 +#define CLK_ACLK_XIU_MSCLX 9 +#define CLK_ACLK_MSCLNP_100 10 +#define CLK_ACLK_MSCLND_400 11 +#define CLK_ACLK_JPEG 12 +#define CLK_ACLK_M2MSCALER1 13 +#define CLK_ACLK_M2MSCALER0 14 +#define CLK_ACLK_SMMU_M2MSCALER0 15 +#define CLK_ACLK_SMMU_M2MSCALER1 16 +#define CLK_ACLK_SMMU_JPEG 17 +#define CLK_PCLK_BTS_JPEG 18 +#define CLK_PCLK_BTS_M2MSCALER1 19 +#define CLK_PCLK_BTS_M2MSCALER0 20 +#define CLK_PCLK_PMU_MSCL 21 +#define CLK_PCLK_SYSREG_MSCL 22 +#define CLK_PCLK_JPEG 23 +#define CLK_PCLK_M2MSCALER1 24 +#define CLK_PCLK_M2MSCALER0 25 +#define CLK_PCLK_SMMU_M2MSCALER0 26 +#define CLK_PCLK_SMMU_M2MSCALER1 27 +#define CLK_PCLK_SMMU_JPEG 28 +#define CLK_SCLK_JPEG 29 + +#define MSCL_NR_CLK 30 + +/* CMU_MFC */ +#define CLK_MOUT_ACLK_MFC_400_USER 1 + +#define CLK_DIV_PCLK_MFC 2 + +#define CLK_ACLK_BTS_MFC_1 3 +#define CLK_ACLK_BTS_MFC_0 4 +#define CLK_ACLK_AHB2APB_MFCP 5 +#define CLK_ACLK_XIU_MFCX 6 +#define CLK_ACLK_MFCNP_100 7 +#define CLK_ACLK_MFCND_400 8 +#define CLK_ACLK_MFC 9 +#define CLK_ACLK_SMMU_MFC_1 10 +#define CLK_ACLK_SMMU_MFC_0 11 +#define CLK_PCLK_BTS_MFC_1 12 +#define CLK_PCLK_BTS_MFC_0 13 +#define CLK_PCLK_PMU_MFC 14 +#define CLK_PCLK_SYSREG_MFC 15 +#define CLK_PCLK_MFC 16 +#define CLK_PCLK_SMMU_MFC_1 17 +#define CLK_PCLK_SMMU_MFC_0 18 + +#define MFC_NR_CLK 19 + +/* CMU_HEVC */ +#define CLK_MOUT_ACLK_HEVC_400_USER 1 + +#define CLK_DIV_PCLK_HEVC 2 + +#define CLK_ACLK_BTS_HEVC_1 3 +#define CLK_ACLK_BTS_HEVC_0 4 +#define CLK_ACLK_AHB2APB_HEVCP 5 +#define CLK_ACLK_XIU_HEVCX 6 +#define CLK_ACLK_HEVCNP_100 7 +#define CLK_ACLK_HEVCND_400 8 +#define CLK_ACLK_HEVC 9 +#define CLK_ACLK_SMMU_HEVC_1 10 +#define CLK_ACLK_SMMU_HEVC_0 11 +#define CLK_PCLK_BTS_HEVC_1 12 +#define CLK_PCLK_BTS_HEVC_0 13 +#define CLK_PCLK_PMU_HEVC 14 +#define CLK_PCLK_SYSREG_HEVC 15 +#define CLK_PCLK_HEVC 16 +#define CLK_PCLK_SMMU_HEVC_1 17 +#define CLK_PCLK_SMMU_HEVC_0 18 + +#define HEVC_NR_CLK 19 + +/* CMU_ISP */ +#define CLK_MOUT_ACLK_ISP_DIS_400_USER 1 +#define CLK_MOUT_ACLK_ISP_400_USER 2 + +#define CLK_DIV_PCLK_ISP_DIS 3 +#define CLK_DIV_PCLK_ISP 4 +#define CLK_DIV_ACLK_ISP_D_200 5 +#define CLK_DIV_ACLK_ISP_C_200 6 + +#define CLK_ACLK_ISP_D_GLUE 7 +#define CLK_ACLK_SCALERP 8 +#define CLK_ACLK_3DNR 9 +#define CLK_ACLK_DIS 10 +#define CLK_ACLK_SCALERC 11 +#define CLK_ACLK_DRC 12 +#define CLK_ACLK_ISP 13 +#define CLK_ACLK_AXIUS_SCALERP 14 +#define CLK_ACLK_AXIUS_SCALERC 15 +#define CLK_ACLK_AXIUS_DRC 16 +#define CLK_ACLK_ASYNCAHBM_ISP2P 17 +#define CLK_ACLK_ASYNCAHBM_ISP1P 18 +#define CLK_ACLK_ASYNCAXIS_DIS1 19 +#define CLK_ACLK_ASYNCAXIS_DIS0 20 +#define CLK_ACLK_ASYNCAXIM_DIS1 21 +#define CLK_ACLK_ASYNCAXIM_DIS0 22 +#define CLK_ACLK_ASYNCAXIM_ISP2P 23 +#define CLK_ACLK_ASYNCAXIM_ISP1P 24 +#define CLK_ACLK_AHB2APB_ISP2P 25 +#define CLK_ACLK_AHB2APB_ISP1P 26 +#define CLK_ACLK_AXI2APB_ISP2P 27 +#define CLK_ACLK_AXI2APB_ISP1P 28 +#define CLK_ACLK_XIU_ISPEX1 29 +#define CLK_ACLK_XIU_ISPEX0 30 +#define CLK_ACLK_ISPND_400 31 +#define CLK_ACLK_SMMU_SCALERP 32 +#define CLK_ACLK_SMMU_3DNR 33 +#define CLK_ACLK_SMMU_DIS1 34 +#define CLK_ACLK_SMMU_DIS0 35 +#define CLK_ACLK_SMMU_SCALERC 36 +#define CLK_ACLK_SMMU_DRC 37 +#define CLK_ACLK_SMMU_ISP 38 +#define CLK_ACLK_BTS_SCALERP 39 +#define CLK_ACLK_BTS_3DR 40 +#define CLK_ACLK_BTS_DIS1 41 +#define CLK_ACLK_BTS_DIS0 42 +#define CLK_ACLK_BTS_SCALERC 43 +#define CLK_ACLK_BTS_DRC 44 +#define CLK_ACLK_BTS_ISP 45 +#define CLK_PCLK_SMMU_SCALERP 46 +#define CLK_PCLK_SMMU_3DNR 47 +#define CLK_PCLK_SMMU_DIS1 48 +#define CLK_PCLK_SMMU_DIS0 49 +#define CLK_PCLK_SMMU_SCALERC 50 +#define CLK_PCLK_SMMU_DRC 51 +#define CLK_PCLK_SMMU_ISP 52 +#define CLK_PCLK_BTS_SCALERP 53 +#define CLK_PCLK_BTS_3DNR 54 +#define CLK_PCLK_BTS_DIS1 55 +#define CLK_PCLK_BTS_DIS0 56 +#define CLK_PCLK_BTS_SCALERC 57 +#define CLK_PCLK_BTS_DRC 58 +#define CLK_PCLK_BTS_ISP 59 +#define CLK_PCLK_ASYNCAXI_DIS1 60 +#define CLK_PCLK_ASYNCAXI_DIS0 61 +#define CLK_PCLK_PMU_ISP 62 +#define CLK_PCLK_SYSREG_ISP 63 +#define CLK_PCLK_CMU_ISP_LOCAL 64 +#define CLK_PCLK_SCALERP 65 +#define CLK_PCLK_3DNR 66 +#define CLK_PCLK_DIS_CORE 67 +#define CLK_PCLK_DIS 68 +#define CLK_PCLK_SCALERC 69 +#define CLK_PCLK_DRC 70 +#define CLK_PCLK_ISP 71 +#define CLK_SCLK_PIXELASYNCS_DIS 72 +#define CLK_SCLK_PIXELASYNCM_DIS 73 +#define CLK_SCLK_PIXELASYNCS_SCALERP 74 +#define CLK_SCLK_PIXELASYNCM_ISPD 75 +#define CLK_SCLK_PIXELASYNCS_ISPC 76 +#define CLK_SCLK_PIXELASYNCM_ISPC 77 + +#define ISP_NR_CLK 78 + +/* CMU_CAM0 */ +#define CLK_PHYCLK_RXBYTEECLKHS0_S4_PHY 1 +#define CLK_PHYCLK_RXBYTEECLKHS0_S2A_PHY 2 + +#define CLK_MOUT_ACLK_CAM0_333_USER 3 +#define CLK_MOUT_ACLK_CAM0_400_USER 4 +#define CLK_MOUT_ACLK_CAM0_552_USER 5 +#define CLK_MOUT_PHYCLK_RXBYTECLKHS0_S4_USER 6 +#define CLK_MOUT_PHYCLK_RXBYTECLKHS0_S2A_USER 7 +#define CLK_MOUT_ACLK_LITE_D_B 8 +#define CLK_MOUT_ACLK_LITE_D_A 9 +#define CLK_MOUT_ACLK_LITE_B_B 10 +#define CLK_MOUT_ACLK_LITE_B_A 11 +#define CLK_MOUT_ACLK_LITE_A_B 12 +#define CLK_MOUT_ACLK_LITE_A_A 13 +#define CLK_MOUT_ACLK_CAM0_400 14 +#define CLK_MOUT_ACLK_CSIS1_B 15 +#define CLK_MOUT_ACLK_CSIS1_A 16 +#define CLK_MOUT_ACLK_CSIS0_B 17 +#define CLK_MOUT_ACLK_CSIS0_A 18 +#define CLK_MOUT_ACLK_3AA1_B 19 +#define CLK_MOUT_ACLK_3AA1_A 20 +#define CLK_MOUT_ACLK_3AA0_B 21 +#define CLK_MOUT_ACLK_3AA0_A 22 +#define CLK_MOUT_SCLK_LITE_FREECNT_C 23 +#define CLK_MOUT_SCLK_LITE_FREECNT_B 24 +#define CLK_MOUT_SCLK_LITE_FREECNT_A 25 +#define CLK_MOUT_SCLK_PIXELASYNC_LITE_C_B 26 +#define CLK_MOUT_SCLK_PIXELASYNC_LITE_C_A 27 +#define CLK_MOUT_SCLK_PIXELASYNC_LITE_C_INIT_B 28 +#define CLK_MOUT_SCLK_PIXELASYNC_LITE_C_INIT_A 29 + +#define CLK_DIV_PCLK_CAM0_50 30 +#define CLK_DIV_ACLK_CAM0_200 31 +#define CLK_DIV_ACLK_CAM0_BUS_400 32 +#define CLK_DIV_PCLK_LITE_D 33 +#define CLK_DIV_ACLK_LITE_D 34 +#define CLK_DIV_PCLK_LITE_B 35 +#define CLK_DIV_ACLK_LITE_B 36 +#define CLK_DIV_PCLK_LITE_A 37 +#define CLK_DIV_ACLK_LITE_A 38 +#define CLK_DIV_ACLK_CSIS1 39 +#define CLK_DIV_ACLK_CSIS0 40 +#define CLK_DIV_PCLK_3AA1 41 +#define CLK_DIV_ACLK_3AA1 42 +#define CLK_DIV_PCLK_3AA0 43 +#define CLK_DIV_ACLK_3AA0 44 +#define CLK_DIV_SCLK_PIXELASYNC_LITE_C 45 +#define CLK_DIV_PCLK_PIXELASYNC_LITE_C 46 +#define CLK_DIV_SCLK_PIXELASYNC_LITE_C_INIT 47 + +#define CLK_ACLK_CSIS1 50 +#define CLK_ACLK_CSIS0 51 +#define CLK_ACLK_3AA1 52 +#define CLK_ACLK_3AA0 53 +#define CLK_ACLK_LITE_D 54 +#define CLK_ACLK_LITE_B 55 +#define CLK_ACLK_LITE_A 56 +#define CLK_ACLK_AHBSYNCDN 57 +#define CLK_ACLK_AXIUS_LITE_D 58 +#define CLK_ACLK_AXIUS_LITE_B 59 +#define CLK_ACLK_AXIUS_LITE_A 60 +#define CLK_ACLK_ASYNCAPBM_3AA1 61 +#define CLK_ACLK_ASYNCAPBS_3AA1 62 +#define CLK_ACLK_ASYNCAPBM_3AA0 63 +#define CLK_ACLK_ASYNCAPBS_3AA0 64 +#define CLK_ACLK_ASYNCAPBM_LITE_D 65 +#define CLK_ACLK_ASYNCAPBS_LITE_D 66 +#define CLK_ACLK_ASYNCAPBM_LITE_B 67 +#define CLK_ACLK_ASYNCAPBS_LITE_B 68 +#define CLK_ACLK_ASYNCAPBM_LITE_A 69 +#define CLK_ACLK_ASYNCAPBS_LITE_A 70 +#define CLK_ACLK_ASYNCAXIM_ISP0P 71 +#define CLK_ACLK_ASYNCAXIM_3AA1 72 +#define CLK_ACLK_ASYNCAXIS_3AA1 73 +#define CLK_ACLK_ASYNCAXIM_3AA0 74 +#define CLK_ACLK_ASYNCAXIS_3AA0 75 +#define CLK_ACLK_ASYNCAXIM_LITE_D 76 +#define CLK_ACLK_ASYNCAXIS_LITE_D 77 +#define CLK_ACLK_ASYNCAXIM_LITE_B 78 +#define CLK_ACLK_ASYNCAXIS_LITE_B 79 +#define CLK_ACLK_ASYNCAXIM_LITE_A 80 +#define CLK_ACLK_ASYNCAXIS_LITE_A 81 +#define CLK_ACLK_AHB2APB_ISPSFRP 82 +#define CLK_ACLK_AXI2APB_ISP0P 83 +#define CLK_ACLK_AXI2AHB_ISP0P 84 +#define CLK_ACLK_XIU_IS0X 85 +#define CLK_ACLK_XIU_ISP0EX 86 +#define CLK_ACLK_CAM0NP_276 87 +#define CLK_ACLK_CAM0ND_400 88 +#define CLK_ACLK_SMMU_3AA1 89 +#define CLK_ACLK_SMMU_3AA0 90 +#define CLK_ACLK_SMMU_LITE_D 91 +#define CLK_ACLK_SMMU_LITE_B 92 +#define CLK_ACLK_SMMU_LITE_A 93 +#define CLK_ACLK_BTS_3AA1 94 +#define CLK_ACLK_BTS_3AA0 95 +#define CLK_ACLK_BTS_LITE_D 96 +#define CLK_ACLK_BTS_LITE_B 97 +#define CLK_ACLK_BTS_LITE_A 98 +#define CLK_PCLK_SMMU_3AA1 99 +#define CLK_PCLK_SMMU_3AA0 100 +#define CLK_PCLK_SMMU_LITE_D 101 +#define CLK_PCLK_SMMU_LITE_B 102 +#define CLK_PCLK_SMMU_LITE_A 103 +#define CLK_PCLK_BTS_3AA1 104 +#define CLK_PCLK_BTS_3AA0 105 +#define CLK_PCLK_BTS_LITE_D 106 +#define CLK_PCLK_BTS_LITE_B 107 +#define CLK_PCLK_BTS_LITE_A 108 +#define CLK_PCLK_ASYNCAXI_CAM1 109 +#define CLK_PCLK_ASYNCAXI_3AA1 110 +#define CLK_PCLK_ASYNCAXI_3AA0 111 +#define CLK_PCLK_ASYNCAXI_LITE_D 112 +#define CLK_PCLK_ASYNCAXI_LITE_B 113 +#define CLK_PCLK_ASYNCAXI_LITE_A 114 +#define CLK_PCLK_PMU_CAM0 115 +#define CLK_PCLK_SYSREG_CAM0 116 +#define CLK_PCLK_CMU_CAM0_LOCAL 117 +#define CLK_PCLK_CSIS1 118 +#define CLK_PCLK_CSIS0 119 +#define CLK_PCLK_3AA1 120 +#define CLK_PCLK_3AA0 121 +#define CLK_PCLK_LITE_D 122 +#define CLK_PCLK_LITE_B 123 +#define CLK_PCLK_LITE_A 124 +#define CLK_PHYCLK_RXBYTECLKHS0_S4 125 +#define CLK_PHYCLK_RXBYTECLKHS0_S2A 126 +#define CLK_SCLK_LITE_FREECNT 127 +#define CLK_SCLK_PIXELASYNCM_3AA1 128 +#define CLK_SCLK_PIXELASYNCM_3AA0 129 +#define CLK_SCLK_PIXELASYNCS_3AA0 130 +#define CLK_SCLK_PIXELASYNCM_LITE_C 131 +#define CLK_SCLK_PIXELASYNCM_LITE_C_INIT 132 +#define CLK_SCLK_PIXELASYNCS_LITE_C_INIT 133 + +#define CAM0_NR_CLK 134 + +/* CMU_CAM1 */ +#define CLK_PHYCLK_RXBYTEECLKHS0_S2B 1 + +#define CLK_MOUT_SCLK_ISP_UART_USER 2 +#define CLK_MOUT_SCLK_ISP_SPI1_USER 3 +#define CLK_MOUT_SCLK_ISP_SPI0_USER 4 +#define CLK_MOUT_ACLK_CAM1_333_USER 5 +#define CLK_MOUT_ACLK_CAM1_400_USER 6 +#define CLK_MOUT_ACLK_CAM1_552_USER 7 +#define CLK_MOUT_PHYCLK_RXBYTECLKHS0_S2B_USER 8 +#define CLK_MOUT_ACLK_CSIS2_B 9 +#define CLK_MOUT_ACLK_CSIS2_A 10 +#define CLK_MOUT_ACLK_FD_B 11 +#define CLK_MOUT_ACLK_FD_A 12 +#define CLK_MOUT_ACLK_LITE_C_B 13 +#define CLK_MOUT_ACLK_LITE_C_A 14 + +#define CLK_DIV_SCLK_ISP_MPWM 15 +#define CLK_DIV_PCLK_CAM1_83 16 +#define CLK_DIV_PCLK_CAM1_166 17 +#define CLK_DIV_PCLK_DBG_CAM1 18 +#define CLK_DIV_ATCLK_CAM1 19 +#define CLK_DIV_ACLK_CSIS2 20 +#define CLK_DIV_PCLK_FD 21 +#define CLK_DIV_ACLK_FD 22 +#define CLK_DIV_PCLK_LITE_C 23 +#define CLK_DIV_ACLK_LITE_C 24 + +#define CLK_ACLK_ISP_GIC 25 +#define CLK_ACLK_FD 26 +#define CLK_ACLK_LITE_C 27 +#define CLK_ACLK_CSIS2 28 +#define CLK_ACLK_ASYNCAPBM_FD 29 +#define CLK_ACLK_ASYNCAPBS_FD 30 +#define CLK_ACLK_ASYNCAPBM_LITE_C 31 +#define CLK_ACLK_ASYNCAPBS_LITE_C 32 +#define CLK_ACLK_ASYNCAHBS_SFRISP2H2 33 +#define CLK_ACLK_ASYNCAHBS_SFRISP2H1 34 +#define CLK_ACLK_ASYNCAXIM_CA5 35 +#define CLK_ACLK_ASYNCAXIS_CA5 36 +#define CLK_ACLK_ASYNCAXIS_ISPX2 37 +#define CLK_ACLK_ASYNCAXIS_ISPX1 38 +#define CLK_ACLK_ASYNCAXIS_ISPX0 39 +#define CLK_ACLK_ASYNCAXIM_ISPEX 40 +#define CLK_ACLK_ASYNCAXIM_ISP3P 41 +#define CLK_ACLK_ASYNCAXIS_ISP3P 42 +#define CLK_ACLK_ASYNCAXIM_FD 43 +#define CLK_ACLK_ASYNCAXIS_FD 44 +#define CLK_ACLK_ASYNCAXIM_LITE_C 45 +#define CLK_ACLK_ASYNCAXIS_LITE_C 46 +#define CLK_ACLK_AHB2APB_ISP5P 47 +#define CLK_ACLK_AHB2APB_ISP3P 48 +#define CLK_ACLK_AXI2APB_ISP3P 49 +#define CLK_ACLK_AHB_SFRISP2H 50 +#define CLK_ACLK_AXI_ISP_HX_R 51 +#define CLK_ACLK_AXI_ISP_CX_R 52 +#define CLK_ACLK_AXI_ISP_HX 53 +#define CLK_ACLK_AXI_ISP_CX 54 +#define CLK_ACLK_XIU_ISPX 55 +#define CLK_ACLK_XIU_ISPEX 56 +#define CLK_ACLK_CAM1NP_333 57 +#define CLK_ACLK_CAM1ND_400 58 +#define CLK_ACLK_SMMU_ISPCPU 59 +#define CLK_ACLK_SMMU_FD 60 +#define CLK_ACLK_SMMU_LITE_C 61 +#define CLK_ACLK_BTS_ISP3P 62 +#define CLK_ACLK_BTS_FD 63 +#define CLK_ACLK_BTS_LITE_C 64 +#define CLK_ACLK_AHBDN_SFRISP2H 65 +#define CLK_ACLK_AHBDN_ISP5P 66 +#define CLK_ACLK_AXIUS_ISP3P 67 +#define CLK_ACLK_AXIUS_FD 68 +#define CLK_ACLK_AXIUS_LITE_C 69 +#define CLK_PCLK_SMMU_ISPCPU 70 +#define CLK_PCLK_SMMU_FD 71 +#define CLK_PCLK_SMMU_LITE_C 72 +#define CLK_PCLK_BTS_ISP3P 73 +#define CLK_PCLK_BTS_FD 74 +#define CLK_PCLK_BTS_LITE_C 75 +#define CLK_PCLK_ASYNCAXIM_CA5 76 +#define CLK_PCLK_ASYNCAXIM_ISPEX 77 +#define CLK_PCLK_ASYNCAXIM_ISP3P 78 +#define CLK_PCLK_ASYNCAXIM_FD 79 +#define CLK_PCLK_ASYNCAXIM_LITE_C 80 +#define CLK_PCLK_PMU_CAM1 81 +#define CLK_PCLK_SYSREG_CAM1 82 +#define CLK_PCLK_CMU_CAM1_LOCAL 83 +#define CLK_PCLK_ISP_MCTADC 84 +#define CLK_PCLK_ISP_WDT 85 +#define CLK_PCLK_ISP_PWM 86 +#define CLK_PCLK_ISP_UART 87 +#define CLK_PCLK_ISP_MCUCTL 88 +#define CLK_PCLK_ISP_SPI1 89 +#define CLK_PCLK_ISP_SPI0 90 +#define CLK_PCLK_ISP_I2C2 91 +#define CLK_PCLK_ISP_I2C1 92 +#define CLK_PCLK_ISP_I2C0 93 +#define CLK_PCLK_ISP_MPWM 94 +#define CLK_PCLK_FD 95 +#define CLK_PCLK_LITE_C 96 +#define CLK_PCLK_CSIS2 97 +#define CLK_SCLK_ISP_I2C2 98 +#define CLK_SCLK_ISP_I2C1 99 +#define CLK_SCLK_ISP_I2C0 100 +#define CLK_SCLK_ISP_PWM 101 +#define CLK_PHYCLK_RXBYTECLKHS0_S2B 102 +#define CLK_SCLK_LITE_C_FREECNT 103 +#define CLK_SCLK_PIXELASYNCM_FD 104 +#define CLK_SCLK_ISP_MCTADC 105 +#define CLK_SCLK_ISP_UART 106 +#define CLK_SCLK_ISP_SPI1 107 +#define CLK_SCLK_ISP_SPI0 108 +#define CLK_SCLK_ISP_MPWM 109 +#define CLK_PCLK_DBG_ISP 110 +#define CLK_ATCLK_ISP 111 +#define CLK_SCLK_ISP_CA5 112 + +#define CAM1_NR_CLK 113 + +#endif /* _DT_BINDINGS_CLOCK_EXYNOS5433_H */ diff --git a/include/dt-bindings/clock/exynos7-clk.h b/include/dt-bindings/clock/exynos7-clk.h new file mode 100644 index 000000000..10c558611 --- /dev/null +++ b/include/dt-bindings/clock/exynos7-clk.h @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * Author: Naveen Krishna Ch + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#ifndef _DT_BINDINGS_CLOCK_EXYNOS7_H +#define _DT_BINDINGS_CLOCK_EXYNOS7_H + +/* TOPC */ +#define DOUT_ACLK_PERIS 1 +#define DOUT_SCLK_BUS0_PLL 2 +#define DOUT_SCLK_BUS1_PLL 3 +#define DOUT_SCLK_CC_PLL 4 +#define DOUT_SCLK_MFC_PLL 5 +#define DOUT_ACLK_CCORE_133 6 +#define DOUT_ACLK_MSCL_532 7 +#define ACLK_MSCL_532 8 +#define DOUT_SCLK_AUD_PLL 9 +#define FOUT_AUD_PLL 10 +#define SCLK_AUD_PLL 11 +#define SCLK_MFC_PLL_B 12 +#define SCLK_MFC_PLL_A 13 +#define SCLK_BUS1_PLL_B 14 +#define SCLK_BUS1_PLL_A 15 +#define SCLK_BUS0_PLL_B 16 +#define SCLK_BUS0_PLL_A 17 +#define SCLK_CC_PLL_B 18 +#define SCLK_CC_PLL_A 19 +#define ACLK_CCORE_133 20 +#define ACLK_PERIS_66 21 +#define TOPC_NR_CLK 22 + +/* TOP0 */ +#define DOUT_ACLK_PERIC1 1 +#define DOUT_ACLK_PERIC0 2 +#define CLK_SCLK_UART0 3 +#define CLK_SCLK_UART1 4 +#define CLK_SCLK_UART2 5 +#define CLK_SCLK_UART3 6 +#define CLK_SCLK_SPI0 7 +#define CLK_SCLK_SPI1 8 +#define CLK_SCLK_SPI2 9 +#define CLK_SCLK_SPI3 10 +#define CLK_SCLK_SPI4 11 +#define CLK_SCLK_SPDIF 12 +#define CLK_SCLK_PCM1 13 +#define CLK_SCLK_I2S1 14 +#define CLK_ACLK_PERIC0_66 15 +#define CLK_ACLK_PERIC1_66 16 +#define TOP0_NR_CLK 17 + +/* TOP1 */ +#define DOUT_ACLK_FSYS1_200 1 +#define DOUT_ACLK_FSYS0_200 2 +#define DOUT_SCLK_MMC2 3 +#define DOUT_SCLK_MMC1 4 +#define DOUT_SCLK_MMC0 5 +#define CLK_SCLK_MMC2 6 +#define CLK_SCLK_MMC1 7 +#define CLK_SCLK_MMC0 8 +#define CLK_ACLK_FSYS0_200 9 +#define CLK_ACLK_FSYS1_200 10 +#define CLK_SCLK_PHY_FSYS1 11 +#define CLK_SCLK_PHY_FSYS1_26M 12 +#define MOUT_SCLK_UFSUNIPRO20 13 +#define DOUT_SCLK_UFSUNIPRO20 14 +#define CLK_SCLK_UFSUNIPRO20 15 +#define DOUT_SCLK_PHY_FSYS1 16 +#define DOUT_SCLK_PHY_FSYS1_26M 17 +#define TOP1_NR_CLK 18 + +/* CCORE */ +#define PCLK_RTC 1 +#define CCORE_NR_CLK 2 + +/* PERIC0 */ +#define PCLK_UART0 1 +#define SCLK_UART0 2 +#define PCLK_HSI2C0 3 +#define PCLK_HSI2C1 4 +#define PCLK_HSI2C4 5 +#define PCLK_HSI2C5 6 +#define PCLK_HSI2C9 7 +#define PCLK_HSI2C10 8 +#define PCLK_HSI2C11 9 +#define PCLK_PWM 10 +#define SCLK_PWM 11 +#define PCLK_ADCIF 12 +#define PERIC0_NR_CLK 13 + +/* PERIC1 */ +#define PCLK_UART1 1 +#define PCLK_UART2 2 +#define PCLK_UART3 3 +#define SCLK_UART1 4 +#define SCLK_UART2 5 +#define SCLK_UART3 6 +#define PCLK_HSI2C2 7 +#define PCLK_HSI2C3 8 +#define PCLK_HSI2C6 9 +#define PCLK_HSI2C7 10 +#define PCLK_HSI2C8 11 +#define PCLK_SPI0 12 +#define PCLK_SPI1 13 +#define PCLK_SPI2 14 +#define PCLK_SPI3 15 +#define PCLK_SPI4 16 +#define SCLK_SPI0 17 +#define SCLK_SPI1 18 +#define SCLK_SPI2 19 +#define SCLK_SPI3 20 +#define SCLK_SPI4 21 +#define PCLK_I2S1 22 +#define PCLK_PCM1 23 +#define PCLK_SPDIF 24 +#define SCLK_I2S1 25 +#define SCLK_PCM1 26 +#define SCLK_SPDIF 27 +#define PERIC1_NR_CLK 28 + +/* PERIS */ +#define PCLK_CHIPID 1 +#define SCLK_CHIPID 2 +#define PCLK_WDT 3 +#define PCLK_TMU 4 +#define SCLK_TMU 5 +#define PERIS_NR_CLK 6 + +/* FSYS0 */ +#define ACLK_MMC2 1 +#define ACLK_AXIUS_USBDRD30X_FSYS0X 2 +#define ACLK_USBDRD300 3 +#define SCLK_USBDRD300_SUSPENDCLK 4 +#define SCLK_USBDRD300_REFCLK 5 +#define PHYCLK_USBDRD300_UDRD30_PIPE_PCLK_USER 6 +#define PHYCLK_USBDRD300_UDRD30_PHYCLK_USER 7 +#define OSCCLK_PHY_CLKOUT_USB30_PHY 8 +#define ACLK_PDMA0 9 +#define ACLK_PDMA1 10 +#define FSYS0_NR_CLK 11 + +/* FSYS1 */ +#define ACLK_MMC1 1 +#define ACLK_MMC0 2 +#define PHYCLK_UFS20_TX0_SYMBOL 3 +#define PHYCLK_UFS20_RX0_SYMBOL 4 +#define PHYCLK_UFS20_RX1_SYMBOL 5 +#define ACLK_UFS20_LINK 6 +#define SCLK_UFSUNIPRO20_USER 7 +#define PHYCLK_UFS20_RX1_SYMBOL_USER 8 +#define PHYCLK_UFS20_RX0_SYMBOL_USER 9 +#define PHYCLK_UFS20_TX0_SYMBOL_USER 10 +#define OSCCLK_PHY_CLKOUT_EMBEDDED_COMBO_PHY 11 +#define SCLK_COMBO_PHY_EMBEDDED_26M 12 +#define DOUT_PCLK_FSYS1 13 +#define PCLK_GPIO_FSYS1 14 +#define MOUT_FSYS1_PHYCLK_SEL1 15 +#define FSYS1_NR_CLK 16 + +/* MSCL */ +#define USERMUX_ACLK_MSCL_532 1 +#define DOUT_PCLK_MSCL 2 +#define ACLK_MSCL_0 3 +#define ACLK_MSCL_1 4 +#define ACLK_JPEG 5 +#define ACLK_G2D 6 +#define ACLK_LH_ASYNC_SI_MSCL_0 7 +#define ACLK_LH_ASYNC_SI_MSCL_1 8 +#define ACLK_AXI2ACEL_BRIDGE 9 +#define ACLK_XIU_MSCLX_0 10 +#define ACLK_XIU_MSCLX_1 11 +#define ACLK_QE_MSCL_0 12 +#define ACLK_QE_MSCL_1 13 +#define ACLK_QE_JPEG 14 +#define ACLK_QE_G2D 15 +#define ACLK_PPMU_MSCL_0 16 +#define ACLK_PPMU_MSCL_1 17 +#define ACLK_MSCLNP_133 18 +#define ACLK_AHB2APB_MSCL0P 19 +#define ACLK_AHB2APB_MSCL1P 20 + +#define PCLK_MSCL_0 21 +#define PCLK_MSCL_1 22 +#define PCLK_JPEG 23 +#define PCLK_G2D 24 +#define PCLK_QE_MSCL_0 25 +#define PCLK_QE_MSCL_1 26 +#define PCLK_QE_JPEG 27 +#define PCLK_QE_G2D 28 +#define PCLK_PPMU_MSCL_0 29 +#define PCLK_PPMU_MSCL_1 30 +#define PCLK_AXI2ACEL_BRIDGE 31 +#define PCLK_PMU_MSCL 32 +#define MSCL_NR_CLK 33 + +/* AUD */ +#define SCLK_I2S 1 +#define SCLK_PCM 2 +#define PCLK_I2S 3 +#define PCLK_PCM 4 +#define ACLK_ADMA 5 +#define AUD_NR_CLK 6 +#endif /* _DT_BINDINGS_CLOCK_EXYNOS7_H */ diff --git a/include/dt-bindings/clock/gxbb-aoclkc.h b/include/dt-bindings/clock/gxbb-aoclkc.h new file mode 100644 index 000000000..9d15e2221 --- /dev/null +++ b/include/dt-bindings/clock/gxbb-aoclkc.h @@ -0,0 +1,67 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2016 BayLibre, SAS. + * Author: Neil Armstrong + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * BSD LICENSE + * + * Copyright (c) 2016 BayLibre, SAS. + * Author: Neil Armstrong + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DT_BINDINGS_CLOCK_AMLOGIC_MESON_GXBB_AOCLK +#define DT_BINDINGS_CLOCK_AMLOGIC_MESON_GXBB_AOCLK + +#define CLKID_AO_REMOTE 0 +#define CLKID_AO_I2C_MASTER 1 +#define CLKID_AO_I2C_SLAVE 2 +#define CLKID_AO_UART1 3 +#define CLKID_AO_UART2 4 +#define CLKID_AO_IR_BLASTER 5 +#define CLKID_AO_CEC_32K 6 + +#endif diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h new file mode 100644 index 000000000..3979d48c0 --- /dev/null +++ b/include/dt-bindings/clock/gxbb-clkc.h @@ -0,0 +1,132 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * GXBB clock tree IDs + */ + +#ifndef __GXBB_CLKC_H +#define __GXBB_CLKC_H + +#define CLKID_SYS_PLL 0 +#define CLKID_HDMI_PLL 2 +#define CLKID_FIXED_PLL 3 +#define CLKID_FCLK_DIV2 4 +#define CLKID_FCLK_DIV3 5 +#define CLKID_FCLK_DIV4 6 +#define CLKID_FCLK_DIV5 7 +#define CLKID_FCLK_DIV7 8 +#define CLKID_GP0_PLL 9 +#define CLKID_CLK81 12 +#define CLKID_MPLL0 13 +#define CLKID_MPLL1 14 +#define CLKID_MPLL2 15 +#define CLKID_DDR 16 +#define CLKID_DOS 17 +#define CLKID_ISA 18 +#define CLKID_PL301 19 +#define CLKID_PERIPHS 20 +#define CLKID_SPICC 21 +#define CLKID_I2C 22 +#define CLKID_SAR_ADC 23 +#define CLKID_SMART_CARD 24 +#define CLKID_RNG0 25 +#define CLKID_UART0 26 +#define CLKID_SDHC 27 +#define CLKID_STREAM 28 +#define CLKID_ASYNC_FIFO 29 +#define CLKID_SDIO 30 +#define CLKID_ABUF 31 +#define CLKID_HIU_IFACE 32 +#define CLKID_ASSIST_MISC 33 +#define CLKID_SPI 34 +#define CLKID_ETH 36 +#define CLKID_I2S_SPDIF 35 +#define CLKID_DEMUX 37 +#define CLKID_AIU_GLUE 38 +#define CLKID_IEC958 39 +#define CLKID_I2S_OUT 40 +#define CLKID_AMCLK 41 +#define CLKID_AIFIFO2 42 +#define CLKID_MIXER 43 +#define CLKID_MIXER_IFACE 44 +#define CLKID_ADC 45 +#define CLKID_BLKMV 46 +#define CLKID_AIU 47 +#define CLKID_UART1 48 +#define CLKID_G2D 49 +#define CLKID_USB0 50 +#define CLKID_USB1 51 +#define CLKID_RESET 52 +#define CLKID_NAND 53 +#define CLKID_DOS_PARSER 54 +#define CLKID_USB 55 +#define CLKID_VDIN1 56 +#define CLKID_AHB_ARB0 57 +#define CLKID_EFUSE 58 +#define CLKID_BOOT_ROM 59 +#define CLKID_AHB_DATA_BUS 60 +#define CLKID_AHB_CTRL_BUS 61 +#define CLKID_HDMI_INTR_SYNC 62 +#define CLKID_HDMI_PCLK 63 +#define CLKID_USB1_DDR_BRIDGE 64 +#define CLKID_USB0_DDR_BRIDGE 65 +#define CLKID_MMC_PCLK 66 +#define CLKID_DVIN 67 +#define CLKID_UART2 68 +#define CLKID_SANA 69 +#define CLKID_VPU_INTR 70 +#define CLKID_SEC_AHB_AHB3_BRIDGE 71 +#define CLKID_CLK81_A53 72 +#define CLKID_VCLK2_VENCI0 73 +#define CLKID_VCLK2_VENCI1 74 +#define CLKID_VCLK2_VENCP0 75 +#define CLKID_VCLK2_VENCP1 76 +#define CLKID_GCLK_VENCI_INT0 77 +#define CLKID_GCLK_VENCI_INT 78 +#define CLKID_DAC_CLK 79 +#define CLKID_AOCLK_GATE 80 +#define CLKID_IEC958_GATE 81 +#define CLKID_ENC480P 82 +#define CLKID_RNG1 83 +#define CLKID_GCLK_VENCI_INT1 84 +#define CLKID_VCLK2_VENCLMCC 85 +#define CLKID_VCLK2_VENCL 86 +#define CLKID_VCLK_OTHER 87 +#define CLKID_EDP 88 +#define CLKID_AO_MEDIA_CPU 89 +#define CLKID_AO_AHB_SRAM 90 +#define CLKID_AO_AHB_BUS 91 +#define CLKID_AO_IFACE 92 +#define CLKID_AO_I2C 93 +#define CLKID_SD_EMMC_A 94 +#define CLKID_SD_EMMC_B 95 +#define CLKID_SD_EMMC_C 96 +#define CLKID_SAR_ADC_CLK 97 +#define CLKID_SAR_ADC_SEL 98 +#define CLKID_MALI_0_SEL 100 +#define CLKID_MALI_0 102 +#define CLKID_MALI_1_SEL 103 +#define CLKID_MALI_1 105 +#define CLKID_MALI 106 +#define CLKID_CTS_AMCLK 107 +#define CLKID_CTS_MCLK_I958 110 +#define CLKID_CTS_I958 113 +#define CLKID_32K_CLK 114 +#define CLKID_SD_EMMC_A_CLK0 119 +#define CLKID_SD_EMMC_B_CLK0 122 +#define CLKID_SD_EMMC_C_CLK0 125 +#define CLKID_VPU_0_SEL 126 +#define CLKID_VPU_0 128 +#define CLKID_VPU_1_SEL 129 +#define CLKID_VPU_1 131 +#define CLKID_VPU 132 +#define CLKID_VAPB_0_SEL 133 +#define CLKID_VAPB_0 135 +#define CLKID_VAPB_1_SEL 136 +#define CLKID_VAPB_1 138 +#define CLKID_VAPB_SEL 139 +#define CLKID_VAPB 140 +#define CLKID_VDEC_1 153 +#define CLKID_VDEC_HEVC 156 +#define CLKID_GEN_CLK 159 + +#endif /* __GXBB_CLKC_H */ diff --git a/include/dt-bindings/clock/hi3516cv300-clock.h b/include/dt-bindings/clock/hi3516cv300-clock.h new file mode 100644 index 000000000..5ba51b838 --- /dev/null +++ b/include/dt-bindings/clock/hi3516cv300-clock.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2016 HiSilicon Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __DTS_HI3516CV300_CLOCK_H +#define __DTS_HI3516CV300_CLOCK_H + +/* hi3516CV300 core CRG */ +#define HI3516CV300_APB_CLK 0 +#define HI3516CV300_UART0_CLK 1 +#define HI3516CV300_UART1_CLK 2 +#define HI3516CV300_UART2_CLK 3 +#define HI3516CV300_SPI0_CLK 4 +#define HI3516CV300_SPI1_CLK 5 +#define HI3516CV300_FMC_CLK 6 +#define HI3516CV300_MMC0_CLK 7 +#define HI3516CV300_MMC1_CLK 8 +#define HI3516CV300_MMC2_CLK 9 +#define HI3516CV300_MMC3_CLK 10 +#define HI3516CV300_ETH_CLK 11 +#define HI3516CV300_ETH_MACIF_CLK 12 +#define HI3516CV300_DMAC_CLK 13 +#define HI3516CV300_PWM_CLK 14 +#define HI3516CV300_USB2_BUS_CLK 15 +#define HI3516CV300_USB2_OHCI48M_CLK 16 +#define HI3516CV300_USB2_OHCI12M_CLK 17 +#define HI3516CV300_USB2_OTG_UTMI_CLK 18 +#define HI3516CV300_USB2_HST_PHY_CLK 19 +#define HI3516CV300_USB2_UTMI0_CLK 20 +#define HI3516CV300_USB2_PHY_CLK 21 + +/* hi3516CV300 sysctrl CRG */ +#define HI3516CV300_WDT_CLK 1 + +#endif /* __DTS_HI3516CV300_CLOCK_H */ diff --git a/include/dt-bindings/clock/hi3519-clock.h b/include/dt-bindings/clock/hi3519-clock.h new file mode 100644 index 000000000..14f4d2184 --- /dev/null +++ b/include/dt-bindings/clock/hi3519-clock.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2015 HiSilicon Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __DTS_HI3519_CLOCK_H +#define __DTS_HI3519_CLOCK_H + +#define HI3519_FMC_CLK 1 +#define HI3519_SPI0_CLK 2 +#define HI3519_SPI1_CLK 3 +#define HI3519_SPI2_CLK 4 +#define HI3519_UART0_CLK 5 +#define HI3519_UART1_CLK 6 +#define HI3519_UART2_CLK 7 +#define HI3519_UART3_CLK 8 +#define HI3519_UART4_CLK 9 +#define HI3519_PWM_CLK 10 +#define HI3519_DMA_CLK 11 +#define HI3519_IR_CLK 12 +#define HI3519_ETH_PHY_CLK 13 +#define HI3519_ETH_MAC_CLK 14 +#define HI3519_ETH_MACIF_CLK 15 +#define HI3519_USB2_BUS_CLK 16 +#define HI3519_USB2_PORT_CLK 17 +#define HI3519_USB3_CLK 18 + +#endif /* __DTS_HI3519_CLOCK_H */ diff --git a/include/dt-bindings/clock/hi3620-clock.h b/include/dt-bindings/clock/hi3620-clock.h new file mode 100644 index 000000000..21b9d0e2e --- /dev/null +++ b/include/dt-bindings/clock/hi3620-clock.h @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2012-2013 Hisilicon Limited. + * Copyright (c) 2012-2013 Linaro Limited. + * + * Author: Haojian Zhuang + * Xin Li + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +#ifndef __DTS_HI3620_CLOCK_H +#define __DTS_HI3620_CLOCK_H + +#define HI3620_NONE_CLOCK 0 + +/* fixed rate & fixed factor clocks */ +#define HI3620_OSC32K 1 +#define HI3620_OSC26M 2 +#define HI3620_PCLK 3 +#define HI3620_PLL_ARM0 4 +#define HI3620_PLL_ARM1 5 +#define HI3620_PLL_PERI 6 +#define HI3620_PLL_USB 7 +#define HI3620_PLL_HDMI 8 +#define HI3620_PLL_GPU 9 +#define HI3620_RCLK_TCXO 10 +#define HI3620_RCLK_CFGAXI 11 +#define HI3620_RCLK_PICO 12 + +/* mux clocks */ +#define HI3620_TIMER0_MUX 32 +#define HI3620_TIMER1_MUX 33 +#define HI3620_TIMER2_MUX 34 +#define HI3620_TIMER3_MUX 35 +#define HI3620_TIMER4_MUX 36 +#define HI3620_TIMER5_MUX 37 +#define HI3620_TIMER6_MUX 38 +#define HI3620_TIMER7_MUX 39 +#define HI3620_TIMER8_MUX 40 +#define HI3620_TIMER9_MUX 41 +#define HI3620_UART0_MUX 42 +#define HI3620_UART1_MUX 43 +#define HI3620_UART2_MUX 44 +#define HI3620_UART3_MUX 45 +#define HI3620_UART4_MUX 46 +#define HI3620_SPI0_MUX 47 +#define HI3620_SPI1_MUX 48 +#define HI3620_SPI2_MUX 49 +#define HI3620_SAXI_MUX 50 +#define HI3620_PWM0_MUX 51 +#define HI3620_PWM1_MUX 52 +#define HI3620_SD_MUX 53 +#define HI3620_MMC1_MUX 54 +#define HI3620_MMC1_MUX2 55 +#define HI3620_G2D_MUX 56 +#define HI3620_VENC_MUX 57 +#define HI3620_VDEC_MUX 58 +#define HI3620_VPP_MUX 59 +#define HI3620_EDC0_MUX 60 +#define HI3620_LDI0_MUX 61 +#define HI3620_EDC1_MUX 62 +#define HI3620_LDI1_MUX 63 +#define HI3620_RCLK_HSIC 64 +#define HI3620_MMC2_MUX 65 +#define HI3620_MMC3_MUX 66 + +/* divider clocks */ +#define HI3620_SHAREAXI_DIV 128 +#define HI3620_CFGAXI_DIV 129 +#define HI3620_SD_DIV 130 +#define HI3620_MMC1_DIV 131 +#define HI3620_HSIC_DIV 132 +#define HI3620_MMC2_DIV 133 +#define HI3620_MMC3_DIV 134 + +/* gate clocks */ +#define HI3620_TIMERCLK01 160 +#define HI3620_TIMER_RCLK01 161 +#define HI3620_TIMERCLK23 162 +#define HI3620_TIMER_RCLK23 163 +#define HI3620_TIMERCLK45 164 +#define HI3620_TIMERCLK67 165 +#define HI3620_TIMERCLK89 166 +#define HI3620_RTCCLK 167 +#define HI3620_KPC_CLK 168 +#define HI3620_GPIOCLK0 169 +#define HI3620_GPIOCLK1 170 +#define HI3620_GPIOCLK2 171 +#define HI3620_GPIOCLK3 172 +#define HI3620_GPIOCLK4 173 +#define HI3620_GPIOCLK5 174 +#define HI3620_GPIOCLK6 175 +#define HI3620_GPIOCLK7 176 +#define HI3620_GPIOCLK8 177 +#define HI3620_GPIOCLK9 178 +#define HI3620_GPIOCLK10 179 +#define HI3620_GPIOCLK11 180 +#define HI3620_GPIOCLK12 181 +#define HI3620_GPIOCLK13 182 +#define HI3620_GPIOCLK14 183 +#define HI3620_GPIOCLK15 184 +#define HI3620_GPIOCLK16 185 +#define HI3620_GPIOCLK17 186 +#define HI3620_GPIOCLK18 187 +#define HI3620_GPIOCLK19 188 +#define HI3620_GPIOCLK20 189 +#define HI3620_GPIOCLK21 190 +#define HI3620_DPHY0_CLK 191 +#define HI3620_DPHY1_CLK 192 +#define HI3620_DPHY2_CLK 193 +#define HI3620_USBPHY_CLK 194 +#define HI3620_ACP_CLK 195 +#define HI3620_PWMCLK0 196 +#define HI3620_PWMCLK1 197 +#define HI3620_UARTCLK0 198 +#define HI3620_UARTCLK1 199 +#define HI3620_UARTCLK2 200 +#define HI3620_UARTCLK3 201 +#define HI3620_UARTCLK4 202 +#define HI3620_SPICLK0 203 +#define HI3620_SPICLK1 204 +#define HI3620_SPICLK2 205 +#define HI3620_I2CCLK0 206 +#define HI3620_I2CCLK1 207 +#define HI3620_I2CCLK2 208 +#define HI3620_I2CCLK3 209 +#define HI3620_SCI_CLK 210 +#define HI3620_DDRC_PER_CLK 211 +#define HI3620_DMAC_CLK 212 +#define HI3620_USB2DVC_CLK 213 +#define HI3620_SD_CLK 214 +#define HI3620_MMC_CLK1 215 +#define HI3620_MMC_CLK2 216 +#define HI3620_MMC_CLK3 217 +#define HI3620_MCU_CLK 218 + +#define HI3620_SD_CIUCLK 0 +#define HI3620_MMC_CIUCLK1 1 +#define HI3620_MMC_CIUCLK2 2 +#define HI3620_MMC_CIUCLK3 3 + +#define HI3620_NR_CLKS 219 + +#endif /* __DTS_HI3620_CLOCK_H */ diff --git a/include/dt-bindings/clock/hi3660-clock.h b/include/dt-bindings/clock/hi3660-clock.h new file mode 100644 index 000000000..75d583eb8 --- /dev/null +++ b/include/dt-bindings/clock/hi3660-clock.h @@ -0,0 +1,218 @@ +/* + * Copyright (c) 2016-2017 Linaro Ltd. + * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __DTS_HI3660_CLOCK_H +#define __DTS_HI3660_CLOCK_H + +/* fixed rate clocks */ +#define HI3660_CLKIN_SYS 0 +#define HI3660_CLKIN_REF 1 +#define HI3660_CLK_FLL_SRC 2 +#define HI3660_CLK_PPLL0 3 +#define HI3660_CLK_PPLL1 4 +#define HI3660_CLK_PPLL2 5 +#define HI3660_CLK_PPLL3 6 +#define HI3660_CLK_SCPLL 7 +#define HI3660_PCLK 8 +#define HI3660_CLK_UART0_DBG 9 +#define HI3660_CLK_UART6 10 +#define HI3660_OSC32K 11 +#define HI3660_OSC19M 12 +#define HI3660_CLK_480M 13 +#define HI3660_CLK_INV 14 + +/* clk in crgctrl */ +#define HI3660_FACTOR_UART3 15 +#define HI3660_CLK_FACTOR_MMC 16 +#define HI3660_CLK_GATE_I2C0 17 +#define HI3660_CLK_GATE_I2C1 18 +#define HI3660_CLK_GATE_I2C2 19 +#define HI3660_CLK_GATE_I2C6 20 +#define HI3660_CLK_DIV_SYSBUS 21 +#define HI3660_CLK_DIV_320M 22 +#define HI3660_CLK_DIV_A53 23 +#define HI3660_CLK_GATE_SPI0 24 +#define HI3660_CLK_GATE_SPI2 25 +#define HI3660_PCIEPHY_REF 26 +#define HI3660_CLK_ABB_USB 27 +#define HI3660_HCLK_GATE_SDIO0 28 +#define HI3660_HCLK_GATE_SD 29 +#define HI3660_CLK_GATE_AOMM 30 +#define HI3660_PCLK_GPIO0 31 +#define HI3660_PCLK_GPIO1 32 +#define HI3660_PCLK_GPIO2 33 +#define HI3660_PCLK_GPIO3 34 +#define HI3660_PCLK_GPIO4 35 +#define HI3660_PCLK_GPIO5 36 +#define HI3660_PCLK_GPIO6 37 +#define HI3660_PCLK_GPIO7 38 +#define HI3660_PCLK_GPIO8 39 +#define HI3660_PCLK_GPIO9 40 +#define HI3660_PCLK_GPIO10 41 +#define HI3660_PCLK_GPIO11 42 +#define HI3660_PCLK_GPIO12 43 +#define HI3660_PCLK_GPIO13 44 +#define HI3660_PCLK_GPIO14 45 +#define HI3660_PCLK_GPIO15 46 +#define HI3660_PCLK_GPIO16 47 +#define HI3660_PCLK_GPIO17 48 +#define HI3660_PCLK_GPIO18 49 +#define HI3660_PCLK_GPIO19 50 +#define HI3660_PCLK_GPIO20 51 +#define HI3660_PCLK_GPIO21 52 +#define HI3660_CLK_GATE_SPI3 53 +#define HI3660_CLK_GATE_I2C7 54 +#define HI3660_CLK_GATE_I2C3 55 +#define HI3660_CLK_GATE_SPI1 56 +#define HI3660_CLK_GATE_UART1 57 +#define HI3660_CLK_GATE_UART2 58 +#define HI3660_CLK_GATE_UART4 59 +#define HI3660_CLK_GATE_UART5 60 +#define HI3660_CLK_GATE_I2C4 61 +#define HI3660_CLK_GATE_DMAC 62 +#define HI3660_PCLK_GATE_DSS 63 +#define HI3660_ACLK_GATE_DSS 64 +#define HI3660_CLK_GATE_LDI1 65 +#define HI3660_CLK_GATE_LDI0 66 +#define HI3660_CLK_GATE_VIVOBUS 67 +#define HI3660_CLK_GATE_EDC0 68 +#define HI3660_CLK_GATE_TXDPHY0_CFG 69 +#define HI3660_CLK_GATE_TXDPHY0_REF 70 +#define HI3660_CLK_GATE_TXDPHY1_CFG 71 +#define HI3660_CLK_GATE_TXDPHY1_REF 72 +#define HI3660_ACLK_GATE_USB3OTG 73 +#define HI3660_CLK_GATE_SPI4 74 +#define HI3660_CLK_GATE_SD 75 +#define HI3660_CLK_GATE_SDIO0 76 +#define HI3660_CLK_GATE_UFS_SUBSYS 77 +#define HI3660_PCLK_GATE_DSI0 78 +#define HI3660_PCLK_GATE_DSI1 79 +#define HI3660_ACLK_GATE_PCIE 80 +#define HI3660_PCLK_GATE_PCIE_SYS 81 +#define HI3660_CLK_GATE_PCIEAUX 82 +#define HI3660_PCLK_GATE_PCIE_PHY 83 +#define HI3660_CLK_ANDGT_LDI0 84 +#define HI3660_CLK_ANDGT_LDI1 85 +#define HI3660_CLK_ANDGT_EDC0 86 +#define HI3660_CLK_GATE_UFSPHY_GT 87 +#define HI3660_CLK_ANDGT_MMC 88 +#define HI3660_CLK_ANDGT_SD 89 +#define HI3660_CLK_A53HPM_ANDGT 90 +#define HI3660_CLK_ANDGT_SDIO 91 +#define HI3660_CLK_ANDGT_UART0 92 +#define HI3660_CLK_ANDGT_UART1 93 +#define HI3660_CLK_ANDGT_UARTH 94 +#define HI3660_CLK_ANDGT_SPI 95 +#define HI3660_CLK_VIVOBUS_ANDGT 96 +#define HI3660_CLK_AOMM_ANDGT 97 +#define HI3660_CLK_320M_PLL_GT 98 +#define HI3660_AUTODIV_EMMC0BUS 99 +#define HI3660_AUTODIV_SYSBUS 100 +#define HI3660_CLK_GATE_UFSPHY_CFG 101 +#define HI3660_CLK_GATE_UFSIO_REF 102 +#define HI3660_CLK_MUX_SYSBUS 103 +#define HI3660_CLK_MUX_UART0 104 +#define HI3660_CLK_MUX_UART1 105 +#define HI3660_CLK_MUX_UARTH 106 +#define HI3660_CLK_MUX_SPI 107 +#define HI3660_CLK_MUX_I2C 108 +#define HI3660_CLK_MUX_MMC_PLL 109 +#define HI3660_CLK_MUX_LDI1 110 +#define HI3660_CLK_MUX_LDI0 111 +#define HI3660_CLK_MUX_SD_PLL 112 +#define HI3660_CLK_MUX_SD_SYS 113 +#define HI3660_CLK_MUX_EDC0 114 +#define HI3660_CLK_MUX_SDIO_SYS 115 +#define HI3660_CLK_MUX_SDIO_PLL 116 +#define HI3660_CLK_MUX_VIVOBUS 117 +#define HI3660_CLK_MUX_A53HPM 118 +#define HI3660_CLK_MUX_320M 119 +#define HI3660_CLK_MUX_IOPERI 120 +#define HI3660_CLK_DIV_UART0 121 +#define HI3660_CLK_DIV_UART1 122 +#define HI3660_CLK_DIV_UARTH 123 +#define HI3660_CLK_DIV_MMC 124 +#define HI3660_CLK_DIV_SD 125 +#define HI3660_CLK_DIV_EDC0 126 +#define HI3660_CLK_DIV_LDI0 127 +#define HI3660_CLK_DIV_SDIO 128 +#define HI3660_CLK_DIV_LDI1 129 +#define HI3660_CLK_DIV_SPI 130 +#define HI3660_CLK_DIV_VIVOBUS 131 +#define HI3660_CLK_DIV_I2C 132 +#define HI3660_CLK_DIV_UFSPHY 133 +#define HI3660_CLK_DIV_CFGBUS 134 +#define HI3660_CLK_DIV_MMC0BUS 135 +#define HI3660_CLK_DIV_MMC1BUS 136 +#define HI3660_CLK_DIV_UFSPERI 137 +#define HI3660_CLK_DIV_AOMM 138 +#define HI3660_CLK_DIV_IOPERI 139 +#define HI3660_VENC_VOLT_HOLD 140 +#define HI3660_PERI_VOLT_HOLD 141 +#define HI3660_CLK_GATE_VENC 142 +#define HI3660_CLK_GATE_VDEC 143 +#define HI3660_CLK_ANDGT_VENC 144 +#define HI3660_CLK_ANDGT_VDEC 145 +#define HI3660_CLK_MUX_VENC 146 +#define HI3660_CLK_MUX_VDEC 147 +#define HI3660_CLK_DIV_VENC 148 +#define HI3660_CLK_DIV_VDEC 149 +#define HI3660_CLK_FAC_ISP_SNCLK 150 +#define HI3660_CLK_GATE_ISP_SNCLK0 151 +#define HI3660_CLK_GATE_ISP_SNCLK1 152 +#define HI3660_CLK_GATE_ISP_SNCLK2 153 +#define HI3660_CLK_ANGT_ISP_SNCLK 154 +#define HI3660_CLK_MUX_ISP_SNCLK 155 +#define HI3660_CLK_DIV_ISP_SNCLK 156 + +/* clk in pmuctrl */ +#define HI3660_GATE_ABB_192 0 + +/* clk in pctrl */ +#define HI3660_GATE_UFS_TCXO_EN 0 +#define HI3660_GATE_USB_TCXO_EN 1 + +/* clk in sctrl */ +#define HI3660_PCLK_AO_GPIO0 0 +#define HI3660_PCLK_AO_GPIO1 1 +#define HI3660_PCLK_AO_GPIO2 2 +#define HI3660_PCLK_AO_GPIO3 3 +#define HI3660_PCLK_AO_GPIO4 4 +#define HI3660_PCLK_AO_GPIO5 5 +#define HI3660_PCLK_AO_GPIO6 6 +#define HI3660_PCLK_GATE_MMBUF 7 +#define HI3660_CLK_GATE_DSS_AXI_MM 8 +#define HI3660_PCLK_MMBUF_ANDGT 9 +#define HI3660_CLK_MMBUF_PLL_ANDGT 10 +#define HI3660_CLK_FLL_MMBUF_ANDGT 11 +#define HI3660_CLK_SYS_MMBUF_ANDGT 12 +#define HI3660_CLK_GATE_PCIEPHY_GT 13 +#define HI3660_ACLK_MUX_MMBUF 14 +#define HI3660_CLK_SW_MMBUF 15 +#define HI3660_CLK_DIV_AOBUS 16 +#define HI3660_PCLK_DIV_MMBUF 17 +#define HI3660_ACLK_DIV_MMBUF 18 +#define HI3660_CLK_DIV_PCIEPHY 19 + +/* clk in iomcu */ +#define HI3660_CLK_I2C0_IOMCU 0 +#define HI3660_CLK_I2C1_IOMCU 1 +#define HI3660_CLK_I2C2_IOMCU 2 +#define HI3660_CLK_I2C6_IOMCU 3 +#define HI3660_CLK_IOMCU_PERI0 4 + +/* clk in stub clock */ +#define HI3660_CLK_STUB_CLUSTER0 0 +#define HI3660_CLK_STUB_CLUSTER1 1 +#define HI3660_CLK_STUB_GPU 2 +#define HI3660_CLK_STUB_DDR 3 +#define HI3660_CLK_STUB_NUM 4 + +#endif /* __DTS_HI3660_CLOCK_H */ diff --git a/include/dt-bindings/clock/hi6220-clock.h b/include/dt-bindings/clock/hi6220-clock.h new file mode 100644 index 000000000..409cc02cd --- /dev/null +++ b/include/dt-bindings/clock/hi6220-clock.h @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2015 Hisilicon Limited. + * + * Author: Bintian Wang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __DT_BINDINGS_CLOCK_HI6220_H +#define __DT_BINDINGS_CLOCK_HI6220_H + +/* clk in Hi6220 AO (always on) controller */ +#define HI6220_NONE_CLOCK 0 + +/* fixed rate clocks */ +#define HI6220_REF32K 1 +#define HI6220_CLK_TCXO 2 +#define HI6220_MMC1_PAD 3 +#define HI6220_MMC2_PAD 4 +#define HI6220_MMC0_PAD 5 +#define HI6220_PLL_BBP 6 +#define HI6220_PLL_GPU 7 +#define HI6220_PLL1_DDR 8 +#define HI6220_PLL_SYS 9 +#define HI6220_PLL_SYS_MEDIA 10 +#define HI6220_DDR_SRC 11 +#define HI6220_PLL_MEDIA 12 +#define HI6220_PLL_DDR 13 + +/* fixed factor clocks */ +#define HI6220_300M 14 +#define HI6220_150M 15 +#define HI6220_PICOPHY_SRC 16 +#define HI6220_MMC0_SRC_SEL 17 +#define HI6220_MMC1_SRC_SEL 18 +#define HI6220_MMC2_SRC_SEL 19 +#define HI6220_VPU_CODEC 20 +#define HI6220_MMC0_SMP 21 +#define HI6220_MMC1_SMP 22 +#define HI6220_MMC2_SMP 23 + +/* gate clocks */ +#define HI6220_WDT0_PCLK 24 +#define HI6220_WDT1_PCLK 25 +#define HI6220_WDT2_PCLK 26 +#define HI6220_TIMER0_PCLK 27 +#define HI6220_TIMER1_PCLK 28 +#define HI6220_TIMER2_PCLK 29 +#define HI6220_TIMER3_PCLK 30 +#define HI6220_TIMER4_PCLK 31 +#define HI6220_TIMER5_PCLK 32 +#define HI6220_TIMER6_PCLK 33 +#define HI6220_TIMER7_PCLK 34 +#define HI6220_TIMER8_PCLK 35 +#define HI6220_UART0_PCLK 36 +#define HI6220_RTC0_PCLK 37 +#define HI6220_RTC1_PCLK 38 +#define HI6220_AO_NR_CLKS 39 + +/* clk in Hi6220 systrl */ +/* gate clock */ +#define HI6220_MMC0_CLK 1 +#define HI6220_MMC0_CIUCLK 2 +#define HI6220_MMC1_CLK 3 +#define HI6220_MMC1_CIUCLK 4 +#define HI6220_MMC2_CLK 5 +#define HI6220_MMC2_CIUCLK 6 +#define HI6220_USBOTG_HCLK 7 +#define HI6220_CLK_PICOPHY 8 +#define HI6220_HIFI 9 +#define HI6220_DACODEC_PCLK 10 +#define HI6220_EDMAC_ACLK 11 +#define HI6220_CS_ATB 12 +#define HI6220_I2C0_CLK 13 +#define HI6220_I2C1_CLK 14 +#define HI6220_I2C2_CLK 15 +#define HI6220_I2C3_CLK 16 +#define HI6220_UART1_PCLK 17 +#define HI6220_UART2_PCLK 18 +#define HI6220_UART3_PCLK 19 +#define HI6220_UART4_PCLK 20 +#define HI6220_SPI_CLK 21 +#define HI6220_TSENSOR_CLK 22 +#define HI6220_MMU_CLK 23 +#define HI6220_HIFI_SEL 24 +#define HI6220_MMC0_SYSPLL 25 +#define HI6220_MMC1_SYSPLL 26 +#define HI6220_MMC2_SYSPLL 27 +#define HI6220_MMC0_SEL 28 +#define HI6220_MMC1_SEL 29 +#define HI6220_BBPPLL_SEL 30 +#define HI6220_MEDIA_PLL_SRC 31 +#define HI6220_MMC2_SEL 32 +#define HI6220_CS_ATB_SYSPLL 33 + +/* mux clocks */ +#define HI6220_MMC0_SRC 34 +#define HI6220_MMC0_SMP_IN 35 +#define HI6220_MMC1_SRC 36 +#define HI6220_MMC1_SMP_IN 37 +#define HI6220_MMC2_SRC 38 +#define HI6220_MMC2_SMP_IN 39 +#define HI6220_HIFI_SRC 40 +#define HI6220_UART1_SRC 41 +#define HI6220_UART2_SRC 42 +#define HI6220_UART3_SRC 43 +#define HI6220_UART4_SRC 44 +#define HI6220_MMC0_MUX0 45 +#define HI6220_MMC1_MUX0 46 +#define HI6220_MMC2_MUX0 47 +#define HI6220_MMC0_MUX1 48 +#define HI6220_MMC1_MUX1 49 +#define HI6220_MMC2_MUX1 50 + +/* divider clocks */ +#define HI6220_CLK_BUS 51 +#define HI6220_MMC0_DIV 52 +#define HI6220_MMC1_DIV 53 +#define HI6220_MMC2_DIV 54 +#define HI6220_HIFI_DIV 55 +#define HI6220_BBPPLL0_DIV 56 +#define HI6220_CS_DAPB 57 +#define HI6220_CS_ATB_DIV 58 + +/* gate clock */ +#define HI6220_DAPB_CLK 59 + +#define HI6220_SYS_NR_CLKS 60 + +/* clk in Hi6220 media controller */ +/* gate clocks */ +#define HI6220_DSI_PCLK 1 +#define HI6220_G3D_PCLK 2 +#define HI6220_ACLK_CODEC_VPU 3 +#define HI6220_ISP_SCLK 4 +#define HI6220_ADE_CORE 5 +#define HI6220_MED_MMU 6 +#define HI6220_CFG_CSI4PHY 7 +#define HI6220_CFG_CSI2PHY 8 +#define HI6220_ISP_SCLK_GATE 9 +#define HI6220_ISP_SCLK_GATE1 10 +#define HI6220_ADE_CORE_GATE 11 +#define HI6220_CODEC_VPU_GATE 12 +#define HI6220_MED_SYSPLL 13 + +/* mux clocks */ +#define HI6220_1440_1200 14 +#define HI6220_1000_1200 15 +#define HI6220_1000_1440 16 + +/* divider clocks */ +#define HI6220_CODEC_JPEG 17 +#define HI6220_ISP_SCLK_SRC 18 +#define HI6220_ISP_SCLK1 19 +#define HI6220_ADE_CORE_SRC 20 +#define HI6220_ADE_PIX_SRC 21 +#define HI6220_G3D_CLK 22 +#define HI6220_CODEC_VPU_SRC 23 + +#define HI6220_MEDIA_NR_CLKS 24 + +/* clk in Hi6220 power controller */ +/* gate clocks */ +#define HI6220_PLL_GPU_GATE 1 +#define HI6220_PLL1_DDR_GATE 2 +#define HI6220_PLL_DDR_GATE 3 +#define HI6220_PLL_MEDIA_GATE 4 +#define HI6220_PLL0_BBP_GATE 5 + +/* divider clocks */ +#define HI6220_DDRC_SRC 6 +#define HI6220_DDRC_AXI1 7 + +#define HI6220_POWER_NR_CLKS 8 + +/* clk in Hi6220 acpu sctrl */ +#define HI6220_ACPU_SFT_AT_S 0 + +#endif diff --git a/include/dt-bindings/clock/hip04-clock.h b/include/dt-bindings/clock/hip04-clock.h new file mode 100644 index 000000000..695e61cd1 --- /dev/null +++ b/include/dt-bindings/clock/hip04-clock.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2013-2014 Hisilicon Limited. + * Copyright (c) 2013-2014 Linaro Limited. + * + * Author: Haojian Zhuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +#ifndef __DTS_HIP04_CLOCK_H +#define __DTS_HIP04_CLOCK_H + +#define HIP04_NONE_CLOCK 0 + +/* fixed rate & fixed factor clocks */ +#define HIP04_OSC50M 1 +#define HIP04_CLK_50M 2 +#define HIP04_CLK_168M 3 + +#define HIP04_NR_CLKS 64 + +#endif /* __DTS_HIP04_CLOCK_H */ diff --git a/include/dt-bindings/clock/histb-clock.h b/include/dt-bindings/clock/histb-clock.h new file mode 100644 index 000000000..136de2473 --- /dev/null +++ b/include/dt-bindings/clock/histb-clock.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2016 HiSilicon Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __DTS_HISTB_CLOCK_H +#define __DTS_HISTB_CLOCK_H + +/* clocks provided by core CRG */ +#define HISTB_OSC_CLK 0 +#define HISTB_APB_CLK 1 +#define HISTB_AHB_CLK 2 +#define HISTB_UART1_CLK 3 +#define HISTB_UART2_CLK 4 +#define HISTB_UART3_CLK 5 +#define HISTB_I2C0_CLK 6 +#define HISTB_I2C1_CLK 7 +#define HISTB_I2C2_CLK 8 +#define HISTB_I2C3_CLK 9 +#define HISTB_I2C4_CLK 10 +#define HISTB_I2C5_CLK 11 +#define HISTB_SPI0_CLK 12 +#define HISTB_SPI1_CLK 13 +#define HISTB_SPI2_CLK 14 +#define HISTB_SCI_CLK 15 +#define HISTB_FMC_CLK 16 +#define HISTB_MMC_BIU_CLK 17 +#define HISTB_MMC_CIU_CLK 18 +#define HISTB_MMC_DRV_CLK 19 +#define HISTB_MMC_SAMPLE_CLK 20 +#define HISTB_SDIO0_BIU_CLK 21 +#define HISTB_SDIO0_CIU_CLK 22 +#define HISTB_SDIO0_DRV_CLK 23 +#define HISTB_SDIO0_SAMPLE_CLK 24 +#define HISTB_PCIE_AUX_CLK 25 +#define HISTB_PCIE_PIPE_CLK 26 +#define HISTB_PCIE_SYS_CLK 27 +#define HISTB_PCIE_BUS_CLK 28 +#define HISTB_ETH0_MAC_CLK 29 +#define HISTB_ETH0_MACIF_CLK 30 +#define HISTB_ETH1_MAC_CLK 31 +#define HISTB_ETH1_MACIF_CLK 32 +#define HISTB_COMBPHY1_CLK 33 +#define HISTB_USB2_BUS_CLK 34 +#define HISTB_USB2_PHY_CLK 35 +#define HISTB_USB2_UTMI_CLK 36 +#define HISTB_USB2_12M_CLK 37 +#define HISTB_USB2_48M_CLK 38 +#define HISTB_USB2_OTG_UTMI_CLK 39 +#define HISTB_USB2_PHY1_REF_CLK 40 +#define HISTB_USB2_PHY2_REF_CLK 41 +#define HISTB_COMBPHY0_CLK 42 +#define HISTB_USB3_BUS_CLK 43 +#define HISTB_USB3_UTMI_CLK 44 +#define HISTB_USB3_PIPE_CLK 45 +#define HISTB_USB3_SUSPEND_CLK 46 +#define HISTB_USB3_BUS_CLK1 47 +#define HISTB_USB3_UTMI_CLK1 48 +#define HISTB_USB3_PIPE_CLK1 49 +#define HISTB_USB3_SUSPEND_CLK1 50 + +/* clocks provided by mcu CRG */ +#define HISTB_MCE_CLK 1 +#define HISTB_IR_CLK 2 +#define HISTB_TIMER01_CLK 3 +#define HISTB_LEDC_CLK 4 +#define HISTB_UART0_CLK 5 +#define HISTB_LSADC_CLK 6 + +#endif /* __DTS_HISTB_CLOCK_H */ diff --git a/include/dt-bindings/clock/hix5hd2-clock.h b/include/dt-bindings/clock/hix5hd2-clock.h new file mode 100644 index 000000000..fd29c174b --- /dev/null +++ b/include/dt-bindings/clock/hix5hd2-clock.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2014 Linaro Ltd. + * Copyright (c) 2014 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +#ifndef __DTS_HIX5HD2_CLOCK_H +#define __DTS_HIX5HD2_CLOCK_H + +/* fixed rate */ +#define HIX5HD2_FIXED_1200M 1 +#define HIX5HD2_FIXED_400M 2 +#define HIX5HD2_FIXED_48M 3 +#define HIX5HD2_FIXED_24M 4 +#define HIX5HD2_FIXED_600M 5 +#define HIX5HD2_FIXED_300M 6 +#define HIX5HD2_FIXED_75M 7 +#define HIX5HD2_FIXED_200M 8 +#define HIX5HD2_FIXED_100M 9 +#define HIX5HD2_FIXED_40M 10 +#define HIX5HD2_FIXED_150M 11 +#define HIX5HD2_FIXED_1728M 12 +#define HIX5HD2_FIXED_28P8M 13 +#define HIX5HD2_FIXED_432M 14 +#define HIX5HD2_FIXED_345P6M 15 +#define HIX5HD2_FIXED_288M 16 +#define HIX5HD2_FIXED_60M 17 +#define HIX5HD2_FIXED_750M 18 +#define HIX5HD2_FIXED_500M 19 +#define HIX5HD2_FIXED_54M 20 +#define HIX5HD2_FIXED_27M 21 +#define HIX5HD2_FIXED_1500M 22 +#define HIX5HD2_FIXED_375M 23 +#define HIX5HD2_FIXED_187M 24 +#define HIX5HD2_FIXED_250M 25 +#define HIX5HD2_FIXED_125M 26 +#define HIX5HD2_FIXED_2P02M 27 +#define HIX5HD2_FIXED_50M 28 +#define HIX5HD2_FIXED_25M 29 +#define HIX5HD2_FIXED_83M 30 + +/* mux clocks */ +#define HIX5HD2_SFC_MUX 64 +#define HIX5HD2_MMC_MUX 65 +#define HIX5HD2_FEPHY_MUX 66 +#define HIX5HD2_SD_MUX 67 + +/* gate clocks */ +#define HIX5HD2_SFC_RST 128 +#define HIX5HD2_SFC_CLK 129 +#define HIX5HD2_MMC_CIU_CLK 130 +#define HIX5HD2_MMC_BIU_CLK 131 +#define HIX5HD2_MMC_CIU_RST 132 +#define HIX5HD2_FWD_BUS_CLK 133 +#define HIX5HD2_FWD_SYS_CLK 134 +#define HIX5HD2_MAC0_PHY_CLK 135 +#define HIX5HD2_SD_CIU_CLK 136 +#define HIX5HD2_SD_BIU_CLK 137 +#define HIX5HD2_SD_CIU_RST 138 +#define HIX5HD2_WDG0_CLK 139 +#define HIX5HD2_WDG0_RST 140 +#define HIX5HD2_I2C0_CLK 141 +#define HIX5HD2_I2C0_RST 142 +#define HIX5HD2_I2C1_CLK 143 +#define HIX5HD2_I2C1_RST 144 +#define HIX5HD2_I2C2_CLK 145 +#define HIX5HD2_I2C2_RST 146 +#define HIX5HD2_I2C3_CLK 147 +#define HIX5HD2_I2C3_RST 148 +#define HIX5HD2_I2C4_CLK 149 +#define HIX5HD2_I2C4_RST 150 +#define HIX5HD2_I2C5_CLK 151 +#define HIX5HD2_I2C5_RST 152 + +/* complex */ +#define HIX5HD2_MAC0_CLK 192 +#define HIX5HD2_MAC1_CLK 193 +#define HIX5HD2_SATA_CLK 194 +#define HIX5HD2_USB_CLK 195 + +#define HIX5HD2_NR_CLKS 256 +#endif /* __DTS_HIX5HD2_CLOCK_H */ diff --git a/include/dt-bindings/clock/imx1-clock.h b/include/dt-bindings/clock/imx1-clock.h new file mode 100644 index 000000000..607bf01a3 --- /dev/null +++ b/include/dt-bindings/clock/imx1-clock.h @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2014 Alexander Shiyan + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX1_H +#define __DT_BINDINGS_CLOCK_IMX1_H + +#define IMX1_CLK_DUMMY 0 +#define IMX1_CLK_CLK32 1 +#define IMX1_CLK_CLK16M_EXT 2 +#define IMX1_CLK_CLK16M 3 +#define IMX1_CLK_CLK32_PREMULT 4 +#define IMX1_CLK_PREM 5 +#define IMX1_CLK_MPLL 6 +#define IMX1_CLK_MPLL_GATE 7 +#define IMX1_CLK_SPLL 8 +#define IMX1_CLK_SPLL_GATE 9 +#define IMX1_CLK_MCU 10 +#define IMX1_CLK_FCLK 11 +#define IMX1_CLK_HCLK 12 +#define IMX1_CLK_CLK48M 13 +#define IMX1_CLK_PER1 14 +#define IMX1_CLK_PER2 15 +#define IMX1_CLK_PER3 16 +#define IMX1_CLK_CLKO 17 +#define IMX1_CLK_UART3_GATE 18 +#define IMX1_CLK_SSI2_GATE 19 +#define IMX1_CLK_BROM_GATE 20 +#define IMX1_CLK_DMA_GATE 21 +#define IMX1_CLK_CSI_GATE 22 +#define IMX1_CLK_MMA_GATE 23 +#define IMX1_CLK_USBD_GATE 24 +#define IMX1_CLK_MAX 25 + +#endif diff --git a/include/dt-bindings/clock/imx21-clock.h b/include/dt-bindings/clock/imx21-clock.h new file mode 100644 index 000000000..b13596cf5 --- /dev/null +++ b/include/dt-bindings/clock/imx21-clock.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2014 Alexander Shiyan + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX21_H +#define __DT_BINDINGS_CLOCK_IMX21_H + +#define IMX21_CLK_DUMMY 0 +#define IMX21_CLK_CKIL 1 +#define IMX21_CLK_CKIH 2 +#define IMX21_CLK_FPM 3 +#define IMX21_CLK_CKIH_DIV1P5 4 +#define IMX21_CLK_MPLL_GATE 5 +#define IMX21_CLK_SPLL_GATE 6 +#define IMX21_CLK_FPM_GATE 7 +#define IMX21_CLK_CKIH_GATE 8 +#define IMX21_CLK_MPLL_OSC_SEL 9 +#define IMX21_CLK_IPG 10 +#define IMX21_CLK_HCLK 11 +#define IMX21_CLK_MPLL_SEL 12 +#define IMX21_CLK_SPLL_SEL 13 +#define IMX21_CLK_SSI1_SEL 14 +#define IMX21_CLK_SSI2_SEL 15 +#define IMX21_CLK_USB_DIV 16 +#define IMX21_CLK_FCLK 17 +#define IMX21_CLK_MPLL 18 +#define IMX21_CLK_SPLL 19 +#define IMX21_CLK_NFC_DIV 20 +#define IMX21_CLK_SSI1_DIV 21 +#define IMX21_CLK_SSI2_DIV 22 +#define IMX21_CLK_PER1 23 +#define IMX21_CLK_PER2 24 +#define IMX21_CLK_PER3 25 +#define IMX21_CLK_PER4 26 +#define IMX21_CLK_UART1_IPG_GATE 27 +#define IMX21_CLK_UART2_IPG_GATE 28 +#define IMX21_CLK_UART3_IPG_GATE 29 +#define IMX21_CLK_UART4_IPG_GATE 30 +#define IMX21_CLK_CSPI1_IPG_GATE 31 +#define IMX21_CLK_CSPI2_IPG_GATE 32 +#define IMX21_CLK_SSI1_GATE 33 +#define IMX21_CLK_SSI2_GATE 34 +#define IMX21_CLK_SDHC1_IPG_GATE 35 +#define IMX21_CLK_SDHC2_IPG_GATE 36 +#define IMX21_CLK_GPIO_GATE 37 +#define IMX21_CLK_I2C_GATE 38 +#define IMX21_CLK_DMA_GATE 39 +#define IMX21_CLK_USB_GATE 40 +#define IMX21_CLK_EMMA_GATE 41 +#define IMX21_CLK_SSI2_BAUD_GATE 42 +#define IMX21_CLK_SSI1_BAUD_GATE 43 +#define IMX21_CLK_LCDC_IPG_GATE 44 +#define IMX21_CLK_NFC_GATE 45 +#define IMX21_CLK_LCDC_HCLK_GATE 46 +#define IMX21_CLK_PER4_GATE 47 +#define IMX21_CLK_BMI_GATE 48 +#define IMX21_CLK_USB_HCLK_GATE 49 +#define IMX21_CLK_SLCDC_GATE 50 +#define IMX21_CLK_SLCDC_HCLK_GATE 51 +#define IMX21_CLK_EMMA_HCLK_GATE 52 +#define IMX21_CLK_BROM_GATE 53 +#define IMX21_CLK_DMA_HCLK_GATE 54 +#define IMX21_CLK_CSI_HCLK_GATE 55 +#define IMX21_CLK_CSPI3_IPG_GATE 56 +#define IMX21_CLK_WDOG_GATE 57 +#define IMX21_CLK_GPT1_IPG_GATE 58 +#define IMX21_CLK_GPT2_IPG_GATE 59 +#define IMX21_CLK_GPT3_IPG_GATE 60 +#define IMX21_CLK_PWM_IPG_GATE 61 +#define IMX21_CLK_RTC_GATE 62 +#define IMX21_CLK_KPP_GATE 63 +#define IMX21_CLK_OWIRE_GATE 64 +#define IMX21_CLK_MAX 65 + +#endif diff --git a/include/dt-bindings/clock/imx27-clock.h b/include/dt-bindings/clock/imx27-clock.h new file mode 100644 index 000000000..148b053e5 --- /dev/null +++ b/include/dt-bindings/clock/imx27-clock.h @@ -0,0 +1,108 @@ +/* + * Copyright (C) 2014 Alexander Shiyan + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX27_H +#define __DT_BINDINGS_CLOCK_IMX27_H + +#define IMX27_CLK_DUMMY 0 +#define IMX27_CLK_CKIH 1 +#define IMX27_CLK_CKIL 2 +#define IMX27_CLK_MPLL 3 +#define IMX27_CLK_SPLL 4 +#define IMX27_CLK_MPLL_MAIN2 5 +#define IMX27_CLK_AHB 6 +#define IMX27_CLK_IPG 7 +#define IMX27_CLK_NFC_DIV 8 +#define IMX27_CLK_PER1_DIV 9 +#define IMX27_CLK_PER2_DIV 10 +#define IMX27_CLK_PER3_DIV 11 +#define IMX27_CLK_PER4_DIV 12 +#define IMX27_CLK_VPU_SEL 13 +#define IMX27_CLK_VPU_DIV 14 +#define IMX27_CLK_USB_DIV 15 +#define IMX27_CLK_CPU_SEL 16 +#define IMX27_CLK_CLKO_SEL 17 +#define IMX27_CLK_CPU_DIV 18 +#define IMX27_CLK_CLKO_DIV 19 +#define IMX27_CLK_SSI1_SEL 20 +#define IMX27_CLK_SSI2_SEL 21 +#define IMX27_CLK_SSI1_DIV 22 +#define IMX27_CLK_SSI2_DIV 23 +#define IMX27_CLK_CLKO_EN 24 +#define IMX27_CLK_SSI2_IPG_GATE 25 +#define IMX27_CLK_SSI1_IPG_GATE 26 +#define IMX27_CLK_SLCDC_IPG_GATE 27 +#define IMX27_CLK_SDHC3_IPG_GATE 28 +#define IMX27_CLK_SDHC2_IPG_GATE 29 +#define IMX27_CLK_SDHC1_IPG_GATE 30 +#define IMX27_CLK_SCC_IPG_GATE 31 +#define IMX27_CLK_SAHARA_IPG_GATE 32 +#define IMX27_CLK_RTC_IPG_GATE 33 +#define IMX27_CLK_PWM_IPG_GATE 34 +#define IMX27_CLK_OWIRE_IPG_GATE 35 +#define IMX27_CLK_LCDC_IPG_GATE 36 +#define IMX27_CLK_KPP_IPG_GATE 37 +#define IMX27_CLK_IIM_IPG_GATE 38 +#define IMX27_CLK_I2C2_IPG_GATE 39 +#define IMX27_CLK_I2C1_IPG_GATE 40 +#define IMX27_CLK_GPT6_IPG_GATE 41 +#define IMX27_CLK_GPT5_IPG_GATE 42 +#define IMX27_CLK_GPT4_IPG_GATE 43 +#define IMX27_CLK_GPT3_IPG_GATE 44 +#define IMX27_CLK_GPT2_IPG_GATE 45 +#define IMX27_CLK_GPT1_IPG_GATE 46 +#define IMX27_CLK_GPIO_IPG_GATE 47 +#define IMX27_CLK_FEC_IPG_GATE 48 +#define IMX27_CLK_EMMA_IPG_GATE 49 +#define IMX27_CLK_DMA_IPG_GATE 50 +#define IMX27_CLK_CSPI3_IPG_GATE 51 +#define IMX27_CLK_CSPI2_IPG_GATE 52 +#define IMX27_CLK_CSPI1_IPG_GATE 53 +#define IMX27_CLK_NFC_BAUD_GATE 54 +#define IMX27_CLK_SSI2_BAUD_GATE 55 +#define IMX27_CLK_SSI1_BAUD_GATE 56 +#define IMX27_CLK_VPU_BAUD_GATE 57 +#define IMX27_CLK_PER4_GATE 58 +#define IMX27_CLK_PER3_GATE 59 +#define IMX27_CLK_PER2_GATE 60 +#define IMX27_CLK_PER1_GATE 61 +#define IMX27_CLK_USB_AHB_GATE 62 +#define IMX27_CLK_SLCDC_AHB_GATE 63 +#define IMX27_CLK_SAHARA_AHB_GATE 64 +#define IMX27_CLK_LCDC_AHB_GATE 65 +#define IMX27_CLK_VPU_AHB_GATE 66 +#define IMX27_CLK_FEC_AHB_GATE 67 +#define IMX27_CLK_EMMA_AHB_GATE 68 +#define IMX27_CLK_EMI_AHB_GATE 69 +#define IMX27_CLK_DMA_AHB_GATE 70 +#define IMX27_CLK_CSI_AHB_GATE 71 +#define IMX27_CLK_BROM_AHB_GATE 72 +#define IMX27_CLK_ATA_AHB_GATE 73 +#define IMX27_CLK_WDOG_IPG_GATE 74 +#define IMX27_CLK_USB_IPG_GATE 75 +#define IMX27_CLK_UART6_IPG_GATE 76 +#define IMX27_CLK_UART5_IPG_GATE 77 +#define IMX27_CLK_UART4_IPG_GATE 78 +#define IMX27_CLK_UART3_IPG_GATE 79 +#define IMX27_CLK_UART2_IPG_GATE 80 +#define IMX27_CLK_UART1_IPG_GATE 81 +#define IMX27_CLK_CKIH_DIV1P5 82 +#define IMX27_CLK_FPM 83 +#define IMX27_CLK_MPLL_OSC_SEL 84 +#define IMX27_CLK_MPLL_SEL 85 +#define IMX27_CLK_SPLL_GATE 86 +#define IMX27_CLK_MSHC_DIV 87 +#define IMX27_CLK_RTIC_IPG_GATE 88 +#define IMX27_CLK_MSHC_IPG_GATE 89 +#define IMX27_CLK_RTIC_AHB_GATE 90 +#define IMX27_CLK_MSHC_BAUD_GATE 91 +#define IMX27_CLK_CKIH_GATE 92 +#define IMX27_CLK_MAX 93 + +#endif diff --git a/include/dt-bindings/clock/imx5-clock.h b/include/dt-bindings/clock/imx5-clock.h new file mode 100644 index 000000000..d382fc71a --- /dev/null +++ b/include/dt-bindings/clock/imx5-clock.h @@ -0,0 +1,219 @@ +/* + * Copyright 2013 Lucas Stach, Pengutronix + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX5_H +#define __DT_BINDINGS_CLOCK_IMX5_H + +#define IMX5_CLK_DUMMY 0 +#define IMX5_CLK_CKIL 1 +#define IMX5_CLK_OSC 2 +#define IMX5_CLK_CKIH1 3 +#define IMX5_CLK_CKIH2 4 +#define IMX5_CLK_AHB 5 +#define IMX5_CLK_IPG 6 +#define IMX5_CLK_AXI_A 7 +#define IMX5_CLK_AXI_B 8 +#define IMX5_CLK_UART_PRED 9 +#define IMX5_CLK_UART_ROOT 10 +#define IMX5_CLK_ESDHC_A_PRED 11 +#define IMX5_CLK_ESDHC_B_PRED 12 +#define IMX5_CLK_ESDHC_C_SEL 13 +#define IMX5_CLK_ESDHC_D_SEL 14 +#define IMX5_CLK_EMI_SEL 15 +#define IMX5_CLK_EMI_SLOW_PODF 16 +#define IMX5_CLK_NFC_PODF 17 +#define IMX5_CLK_ECSPI_PRED 18 +#define IMX5_CLK_ECSPI_PODF 19 +#define IMX5_CLK_USBOH3_PRED 20 +#define IMX5_CLK_USBOH3_PODF 21 +#define IMX5_CLK_USB_PHY_PRED 22 +#define IMX5_CLK_USB_PHY_PODF 23 +#define IMX5_CLK_CPU_PODF 24 +#define IMX5_CLK_DI_PRED 25 +#define IMX5_CLK_TVE_SEL 27 +#define IMX5_CLK_UART1_IPG_GATE 28 +#define IMX5_CLK_UART1_PER_GATE 29 +#define IMX5_CLK_UART2_IPG_GATE 30 +#define IMX5_CLK_UART2_PER_GATE 31 +#define IMX5_CLK_UART3_IPG_GATE 32 +#define IMX5_CLK_UART3_PER_GATE 33 +#define IMX5_CLK_I2C1_GATE 34 +#define IMX5_CLK_I2C2_GATE 35 +#define IMX5_CLK_GPT_IPG_GATE 36 +#define IMX5_CLK_PWM1_IPG_GATE 37 +#define IMX5_CLK_PWM1_HF_GATE 38 +#define IMX5_CLK_PWM2_IPG_GATE 39 +#define IMX5_CLK_PWM2_HF_GATE 40 +#define IMX5_CLK_GPT_HF_GATE 41 +#define IMX5_CLK_FEC_GATE 42 +#define IMX5_CLK_USBOH3_PER_GATE 43 +#define IMX5_CLK_ESDHC1_IPG_GATE 44 +#define IMX5_CLK_ESDHC2_IPG_GATE 45 +#define IMX5_CLK_ESDHC3_IPG_GATE 46 +#define IMX5_CLK_ESDHC4_IPG_GATE 47 +#define IMX5_CLK_SSI1_IPG_GATE 48 +#define IMX5_CLK_SSI2_IPG_GATE 49 +#define IMX5_CLK_SSI3_IPG_GATE 50 +#define IMX5_CLK_ECSPI1_IPG_GATE 51 +#define IMX5_CLK_ECSPI1_PER_GATE 52 +#define IMX5_CLK_ECSPI2_IPG_GATE 53 +#define IMX5_CLK_ECSPI2_PER_GATE 54 +#define IMX5_CLK_CSPI_IPG_GATE 55 +#define IMX5_CLK_SDMA_GATE 56 +#define IMX5_CLK_EMI_SLOW_GATE 57 +#define IMX5_CLK_IPU_SEL 58 +#define IMX5_CLK_IPU_GATE 59 +#define IMX5_CLK_NFC_GATE 60 +#define IMX5_CLK_IPU_DI1_GATE 61 +#define IMX5_CLK_VPU_SEL 62 +#define IMX5_CLK_VPU_GATE 63 +#define IMX5_CLK_VPU_REFERENCE_GATE 64 +#define IMX5_CLK_UART4_IPG_GATE 65 +#define IMX5_CLK_UART4_PER_GATE 66 +#define IMX5_CLK_UART5_IPG_GATE 67 +#define IMX5_CLK_UART5_PER_GATE 68 +#define IMX5_CLK_TVE_GATE 69 +#define IMX5_CLK_TVE_PRED 70 +#define IMX5_CLK_ESDHC1_PER_GATE 71 +#define IMX5_CLK_ESDHC2_PER_GATE 72 +#define IMX5_CLK_ESDHC3_PER_GATE 73 +#define IMX5_CLK_ESDHC4_PER_GATE 74 +#define IMX5_CLK_USB_PHY_GATE 75 +#define IMX5_CLK_HSI2C_GATE 76 +#define IMX5_CLK_MIPI_HSC1_GATE 77 +#define IMX5_CLK_MIPI_HSC2_GATE 78 +#define IMX5_CLK_MIPI_ESC_GATE 79 +#define IMX5_CLK_MIPI_HSP_GATE 80 +#define IMX5_CLK_LDB_DI1_DIV_3_5 81 +#define IMX5_CLK_LDB_DI1_DIV 82 +#define IMX5_CLK_LDB_DI0_DIV_3_5 83 +#define IMX5_CLK_LDB_DI0_DIV 84 +#define IMX5_CLK_LDB_DI1_GATE 85 +#define IMX5_CLK_CAN2_SERIAL_GATE 86 +#define IMX5_CLK_CAN2_IPG_GATE 87 +#define IMX5_CLK_I2C3_GATE 88 +#define IMX5_CLK_LP_APM 89 +#define IMX5_CLK_PERIPH_APM 90 +#define IMX5_CLK_MAIN_BUS 91 +#define IMX5_CLK_AHB_MAX 92 +#define IMX5_CLK_AIPS_TZ1 93 +#define IMX5_CLK_AIPS_TZ2 94 +#define IMX5_CLK_TMAX1 95 +#define IMX5_CLK_TMAX2 96 +#define IMX5_CLK_TMAX3 97 +#define IMX5_CLK_SPBA 98 +#define IMX5_CLK_UART_SEL 99 +#define IMX5_CLK_ESDHC_A_SEL 100 +#define IMX5_CLK_ESDHC_B_SEL 101 +#define IMX5_CLK_ESDHC_A_PODF 102 +#define IMX5_CLK_ESDHC_B_PODF 103 +#define IMX5_CLK_ECSPI_SEL 104 +#define IMX5_CLK_USBOH3_SEL 105 +#define IMX5_CLK_USB_PHY_SEL 106 +#define IMX5_CLK_IIM_GATE 107 +#define IMX5_CLK_USBOH3_GATE 108 +#define IMX5_CLK_EMI_FAST_GATE 109 +#define IMX5_CLK_IPU_DI0_GATE 110 +#define IMX5_CLK_GPC_DVFS 111 +#define IMX5_CLK_PLL1_SW 112 +#define IMX5_CLK_PLL2_SW 113 +#define IMX5_CLK_PLL3_SW 114 +#define IMX5_CLK_IPU_DI0_SEL 115 +#define IMX5_CLK_IPU_DI1_SEL 116 +#define IMX5_CLK_TVE_EXT_SEL 117 +#define IMX5_CLK_MX51_MIPI 118 +#define IMX5_CLK_PLL4_SW 119 +#define IMX5_CLK_LDB_DI1_SEL 120 +#define IMX5_CLK_DI_PLL4_PODF 121 +#define IMX5_CLK_LDB_DI0_SEL 122 +#define IMX5_CLK_LDB_DI0_GATE 123 +#define IMX5_CLK_USB_PHY1_GATE 124 +#define IMX5_CLK_USB_PHY2_GATE 125 +#define IMX5_CLK_PER_LP_APM 126 +#define IMX5_CLK_PER_PRED1 127 +#define IMX5_CLK_PER_PRED2 128 +#define IMX5_CLK_PER_PODF 129 +#define IMX5_CLK_PER_ROOT 130 +#define IMX5_CLK_SSI_APM 131 +#define IMX5_CLK_SSI1_ROOT_SEL 132 +#define IMX5_CLK_SSI2_ROOT_SEL 133 +#define IMX5_CLK_SSI3_ROOT_SEL 134 +#define IMX5_CLK_SSI_EXT1_SEL 135 +#define IMX5_CLK_SSI_EXT2_SEL 136 +#define IMX5_CLK_SSI_EXT1_COM_SEL 137 +#define IMX5_CLK_SSI_EXT2_COM_SEL 138 +#define IMX5_CLK_SSI1_ROOT_PRED 139 +#define IMX5_CLK_SSI1_ROOT_PODF 140 +#define IMX5_CLK_SSI2_ROOT_PRED 141 +#define IMX5_CLK_SSI2_ROOT_PODF 142 +#define IMX5_CLK_SSI_EXT1_PRED 143 +#define IMX5_CLK_SSI_EXT1_PODF 144 +#define IMX5_CLK_SSI_EXT2_PRED 145 +#define IMX5_CLK_SSI_EXT2_PODF 146 +#define IMX5_CLK_SSI1_ROOT_GATE 147 +#define IMX5_CLK_SSI2_ROOT_GATE 148 +#define IMX5_CLK_SSI3_ROOT_GATE 149 +#define IMX5_CLK_SSI_EXT1_GATE 150 +#define IMX5_CLK_SSI_EXT2_GATE 151 +#define IMX5_CLK_EPIT1_IPG_GATE 152 +#define IMX5_CLK_EPIT1_HF_GATE 153 +#define IMX5_CLK_EPIT2_IPG_GATE 154 +#define IMX5_CLK_EPIT2_HF_GATE 155 +#define IMX5_CLK_CAN_SEL 156 +#define IMX5_CLK_CAN1_SERIAL_GATE 157 +#define IMX5_CLK_CAN1_IPG_GATE 158 +#define IMX5_CLK_OWIRE_GATE 159 +#define IMX5_CLK_GPU3D_SEL 160 +#define IMX5_CLK_GPU2D_SEL 161 +#define IMX5_CLK_GPU3D_GATE 162 +#define IMX5_CLK_GPU2D_GATE 163 +#define IMX5_CLK_GARB_GATE 164 +#define IMX5_CLK_CKO1_SEL 165 +#define IMX5_CLK_CKO1_PODF 166 +#define IMX5_CLK_CKO1 167 +#define IMX5_CLK_CKO2_SEL 168 +#define IMX5_CLK_CKO2_PODF 169 +#define IMX5_CLK_CKO2 170 +#define IMX5_CLK_SRTC_GATE 171 +#define IMX5_CLK_PATA_GATE 172 +#define IMX5_CLK_SATA_GATE 173 +#define IMX5_CLK_SPDIF_XTAL_SEL 174 +#define IMX5_CLK_SPDIF0_SEL 175 +#define IMX5_CLK_SPDIF1_SEL 176 +#define IMX5_CLK_SPDIF0_PRED 177 +#define IMX5_CLK_SPDIF0_PODF 178 +#define IMX5_CLK_SPDIF1_PRED 179 +#define IMX5_CLK_SPDIF1_PODF 180 +#define IMX5_CLK_SPDIF0_COM_SEL 181 +#define IMX5_CLK_SPDIF1_COM_SEL 182 +#define IMX5_CLK_SPDIF0_GATE 183 +#define IMX5_CLK_SPDIF1_GATE 184 +#define IMX5_CLK_SPDIF_IPG_GATE 185 +#define IMX5_CLK_OCRAM 186 +#define IMX5_CLK_SAHARA_IPG_GATE 187 +#define IMX5_CLK_SATA_REF 188 +#define IMX5_CLK_STEP_SEL 189 +#define IMX5_CLK_CPU_PODF_SEL 190 +#define IMX5_CLK_ARM 191 +#define IMX5_CLK_FIRI_PRED 192 +#define IMX5_CLK_FIRI_SEL 193 +#define IMX5_CLK_FIRI_PODF 194 +#define IMX5_CLK_FIRI_SERIAL_GATE 195 +#define IMX5_CLK_FIRI_IPG_GATE 196 +#define IMX5_CLK_CSI0_MCLK1_PRED 197 +#define IMX5_CLK_CSI0_MCLK1_SEL 198 +#define IMX5_CLK_CSI0_MCLK1_PODF 199 +#define IMX5_CLK_CSI0_MCLK1_GATE 200 +#define IMX5_CLK_IEEE1588_PRED 201 +#define IMX5_CLK_IEEE1588_SEL 202 +#define IMX5_CLK_IEEE1588_PODF 203 +#define IMX5_CLK_IEEE1588_GATE 204 +#define IMX5_CLK_END 205 + +#endif /* __DT_BINDINGS_CLOCK_IMX5_H */ diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h new file mode 100644 index 000000000..7ad171b8f --- /dev/null +++ b/include/dt-bindings/clock/imx6qdl-clock.h @@ -0,0 +1,278 @@ +/* + * Copyright 2014 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX6QDL_H +#define __DT_BINDINGS_CLOCK_IMX6QDL_H + +#define IMX6QDL_CLK_DUMMY 0 +#define IMX6QDL_CLK_CKIL 1 +#define IMX6QDL_CLK_CKIH 2 +#define IMX6QDL_CLK_OSC 3 +#define IMX6QDL_CLK_PLL2_PFD0_352M 4 +#define IMX6QDL_CLK_PLL2_PFD1_594M 5 +#define IMX6QDL_CLK_PLL2_PFD2_396M 6 +#define IMX6QDL_CLK_PLL3_PFD0_720M 7 +#define IMX6QDL_CLK_PLL3_PFD1_540M 8 +#define IMX6QDL_CLK_PLL3_PFD2_508M 9 +#define IMX6QDL_CLK_PLL3_PFD3_454M 10 +#define IMX6QDL_CLK_PLL2_198M 11 +#define IMX6QDL_CLK_PLL3_120M 12 +#define IMX6QDL_CLK_PLL3_80M 13 +#define IMX6QDL_CLK_PLL3_60M 14 +#define IMX6QDL_CLK_TWD 15 +#define IMX6QDL_CLK_STEP 16 +#define IMX6QDL_CLK_PLL1_SW 17 +#define IMX6QDL_CLK_PERIPH_PRE 18 +#define IMX6QDL_CLK_PERIPH2_PRE 19 +#define IMX6QDL_CLK_PERIPH_CLK2_SEL 20 +#define IMX6QDL_CLK_PERIPH2_CLK2_SEL 21 +#define IMX6QDL_CLK_AXI_SEL 22 +#define IMX6QDL_CLK_ESAI_SEL 23 +#define IMX6QDL_CLK_ASRC_SEL 24 +#define IMX6QDL_CLK_SPDIF_SEL 25 +#define IMX6QDL_CLK_GPU2D_AXI 26 +#define IMX6QDL_CLK_GPU3D_AXI 27 +#define IMX6QDL_CLK_GPU2D_CORE_SEL 28 +#define IMX6QDL_CLK_GPU3D_CORE_SEL 29 +#define IMX6QDL_CLK_GPU3D_SHADER_SEL 30 +#define IMX6QDL_CLK_IPU1_SEL 31 +#define IMX6QDL_CLK_IPU2_SEL 32 +#define IMX6QDL_CLK_LDB_DI0_SEL 33 +#define IMX6QDL_CLK_LDB_DI1_SEL 34 +#define IMX6QDL_CLK_IPU1_DI0_PRE_SEL 35 +#define IMX6QDL_CLK_IPU1_DI1_PRE_SEL 36 +#define IMX6QDL_CLK_IPU2_DI0_PRE_SEL 37 +#define IMX6QDL_CLK_IPU2_DI1_PRE_SEL 38 +#define IMX6QDL_CLK_IPU1_DI0_SEL 39 +#define IMX6QDL_CLK_IPU1_DI1_SEL 40 +#define IMX6QDL_CLK_IPU2_DI0_SEL 41 +#define IMX6QDL_CLK_IPU2_DI1_SEL 42 +#define IMX6QDL_CLK_HSI_TX_SEL 43 +#define IMX6QDL_CLK_PCIE_AXI_SEL 44 +#define IMX6QDL_CLK_SSI1_SEL 45 +#define IMX6QDL_CLK_SSI2_SEL 46 +#define IMX6QDL_CLK_SSI3_SEL 47 +#define IMX6QDL_CLK_USDHC1_SEL 48 +#define IMX6QDL_CLK_USDHC2_SEL 49 +#define IMX6QDL_CLK_USDHC3_SEL 50 +#define IMX6QDL_CLK_USDHC4_SEL 51 +#define IMX6QDL_CLK_ENFC_SEL 52 +#define IMX6QDL_CLK_EIM_SEL 53 +#define IMX6QDL_CLK_EIM_SLOW_SEL 54 +#define IMX6QDL_CLK_VDO_AXI_SEL 55 +#define IMX6QDL_CLK_VPU_AXI_SEL 56 +#define IMX6QDL_CLK_CKO1_SEL 57 +#define IMX6QDL_CLK_PERIPH 58 +#define IMX6QDL_CLK_PERIPH2 59 +#define IMX6QDL_CLK_PERIPH_CLK2 60 +#define IMX6QDL_CLK_PERIPH2_CLK2 61 +#define IMX6QDL_CLK_IPG 62 +#define IMX6QDL_CLK_IPG_PER 63 +#define IMX6QDL_CLK_ESAI_PRED 64 +#define IMX6QDL_CLK_ESAI_PODF 65 +#define IMX6QDL_CLK_ASRC_PRED 66 +#define IMX6QDL_CLK_ASRC_PODF 67 +#define IMX6QDL_CLK_SPDIF_PRED 68 +#define IMX6QDL_CLK_SPDIF_PODF 69 +#define IMX6QDL_CLK_CAN_ROOT 70 +#define IMX6QDL_CLK_ECSPI_ROOT 71 +#define IMX6QDL_CLK_GPU2D_CORE_PODF 72 +#define IMX6QDL_CLK_GPU3D_CORE_PODF 73 +#define IMX6QDL_CLK_GPU3D_SHADER 74 +#define IMX6QDL_CLK_IPU1_PODF 75 +#define IMX6QDL_CLK_IPU2_PODF 76 +#define IMX6QDL_CLK_LDB_DI0_PODF 77 +#define IMX6QDL_CLK_LDB_DI1_PODF 78 +#define IMX6QDL_CLK_IPU1_DI0_PRE 79 +#define IMX6QDL_CLK_IPU1_DI1_PRE 80 +#define IMX6QDL_CLK_IPU2_DI0_PRE 81 +#define IMX6QDL_CLK_IPU2_DI1_PRE 82 +#define IMX6QDL_CLK_HSI_TX_PODF 83 +#define IMX6QDL_CLK_SSI1_PRED 84 +#define IMX6QDL_CLK_SSI1_PODF 85 +#define IMX6QDL_CLK_SSI2_PRED 86 +#define IMX6QDL_CLK_SSI2_PODF 87 +#define IMX6QDL_CLK_SSI3_PRED 88 +#define IMX6QDL_CLK_SSI3_PODF 89 +#define IMX6QDL_CLK_UART_SERIAL_PODF 90 +#define IMX6QDL_CLK_USDHC1_PODF 91 +#define IMX6QDL_CLK_USDHC2_PODF 92 +#define IMX6QDL_CLK_USDHC3_PODF 93 +#define IMX6QDL_CLK_USDHC4_PODF 94 +#define IMX6QDL_CLK_ENFC_PRED 95 +#define IMX6QDL_CLK_ENFC_PODF 96 +#define IMX6QDL_CLK_EIM_PODF 97 +#define IMX6QDL_CLK_EIM_SLOW_PODF 98 +#define IMX6QDL_CLK_VPU_AXI_PODF 99 +#define IMX6QDL_CLK_CKO1_PODF 100 +#define IMX6QDL_CLK_AXI 101 +#define IMX6QDL_CLK_MMDC_CH0_AXI_PODF 102 +#define IMX6QDL_CLK_MMDC_CH1_AXI_PODF 103 +#define IMX6QDL_CLK_ARM 104 +#define IMX6QDL_CLK_AHB 105 +#define IMX6QDL_CLK_APBH_DMA 106 +#define IMX6QDL_CLK_ASRC 107 +#define IMX6QDL_CLK_CAN1_IPG 108 +#define IMX6QDL_CLK_CAN1_SERIAL 109 +#define IMX6QDL_CLK_CAN2_IPG 110 +#define IMX6QDL_CLK_CAN2_SERIAL 111 +#define IMX6QDL_CLK_ECSPI1 112 +#define IMX6QDL_CLK_ECSPI2 113 +#define IMX6QDL_CLK_ECSPI3 114 +#define IMX6QDL_CLK_ECSPI4 115 +#define IMX6Q_CLK_ECSPI5 116 +#define IMX6DL_CLK_I2C4 116 +#define IMX6QDL_CLK_ENET 117 +#define IMX6QDL_CLK_ESAI_EXTAL 118 +#define IMX6QDL_CLK_GPT_IPG 119 +#define IMX6QDL_CLK_GPT_IPG_PER 120 +#define IMX6QDL_CLK_GPU2D_CORE 121 +#define IMX6QDL_CLK_GPU3D_CORE 122 +#define IMX6QDL_CLK_HDMI_IAHB 123 +#define IMX6QDL_CLK_HDMI_ISFR 124 +#define IMX6QDL_CLK_I2C1 125 +#define IMX6QDL_CLK_I2C2 126 +#define IMX6QDL_CLK_I2C3 127 +#define IMX6QDL_CLK_IIM 128 +#define IMX6QDL_CLK_ENFC 129 +#define IMX6QDL_CLK_IPU1 130 +#define IMX6QDL_CLK_IPU1_DI0 131 +#define IMX6QDL_CLK_IPU1_DI1 132 +#define IMX6QDL_CLK_IPU2 133 +#define IMX6QDL_CLK_IPU2_DI0 134 +#define IMX6QDL_CLK_LDB_DI0 135 +#define IMX6QDL_CLK_LDB_DI1 136 +#define IMX6QDL_CLK_IPU2_DI1 137 +#define IMX6QDL_CLK_HSI_TX 138 +#define IMX6QDL_CLK_MLB 139 +#define IMX6QDL_CLK_MMDC_CH0_AXI 140 +#define IMX6QDL_CLK_MMDC_CH1_AXI 141 +#define IMX6QDL_CLK_OCRAM 142 +#define IMX6QDL_CLK_OPENVG_AXI 143 +#define IMX6QDL_CLK_PCIE_AXI 144 +#define IMX6QDL_CLK_PWM1 145 +#define IMX6QDL_CLK_PWM2 146 +#define IMX6QDL_CLK_PWM3 147 +#define IMX6QDL_CLK_PWM4 148 +#define IMX6QDL_CLK_PER1_BCH 149 +#define IMX6QDL_CLK_GPMI_BCH_APB 150 +#define IMX6QDL_CLK_GPMI_BCH 151 +#define IMX6QDL_CLK_GPMI_IO 152 +#define IMX6QDL_CLK_GPMI_APB 153 +#define IMX6QDL_CLK_SATA 154 +#define IMX6QDL_CLK_SDMA 155 +#define IMX6QDL_CLK_SPBA 156 +#define IMX6QDL_CLK_SSI1 157 +#define IMX6QDL_CLK_SSI2 158 +#define IMX6QDL_CLK_SSI3 159 +#define IMX6QDL_CLK_UART_IPG 160 +#define IMX6QDL_CLK_UART_SERIAL 161 +#define IMX6QDL_CLK_USBOH3 162 +#define IMX6QDL_CLK_USDHC1 163 +#define IMX6QDL_CLK_USDHC2 164 +#define IMX6QDL_CLK_USDHC3 165 +#define IMX6QDL_CLK_USDHC4 166 +#define IMX6QDL_CLK_VDO_AXI 167 +#define IMX6QDL_CLK_VPU_AXI 168 +#define IMX6QDL_CLK_CKO1 169 +#define IMX6QDL_CLK_PLL1_SYS 170 +#define IMX6QDL_CLK_PLL2_BUS 171 +#define IMX6QDL_CLK_PLL3_USB_OTG 172 +#define IMX6QDL_CLK_PLL4_AUDIO 173 +#define IMX6QDL_CLK_PLL5_VIDEO 174 +#define IMX6QDL_CLK_PLL8_MLB 175 +#define IMX6QDL_CLK_PLL7_USB_HOST 176 +#define IMX6QDL_CLK_PLL6_ENET 177 +#define IMX6QDL_CLK_SSI1_IPG 178 +#define IMX6QDL_CLK_SSI2_IPG 179 +#define IMX6QDL_CLK_SSI3_IPG 180 +#define IMX6QDL_CLK_ROM 181 +#define IMX6QDL_CLK_USBPHY1 182 +#define IMX6QDL_CLK_USBPHY2 183 +#define IMX6QDL_CLK_LDB_DI0_DIV_3_5 184 +#define IMX6QDL_CLK_LDB_DI1_DIV_3_5 185 +#define IMX6QDL_CLK_SATA_REF 186 +#define IMX6QDL_CLK_SATA_REF_100M 187 +#define IMX6QDL_CLK_PCIE_REF 188 +#define IMX6QDL_CLK_PCIE_REF_125M 189 +#define IMX6QDL_CLK_ENET_REF 190 +#define IMX6QDL_CLK_USBPHY1_GATE 191 +#define IMX6QDL_CLK_USBPHY2_GATE 192 +#define IMX6QDL_CLK_PLL4_POST_DIV 193 +#define IMX6QDL_CLK_PLL5_POST_DIV 194 +#define IMX6QDL_CLK_PLL5_VIDEO_DIV 195 +#define IMX6QDL_CLK_EIM_SLOW 196 +#define IMX6QDL_CLK_SPDIF 197 +#define IMX6QDL_CLK_CKO2_SEL 198 +#define IMX6QDL_CLK_CKO2_PODF 199 +#define IMX6QDL_CLK_CKO2 200 +#define IMX6QDL_CLK_CKO 201 +#define IMX6QDL_CLK_VDOA 202 +#define IMX6QDL_CLK_PLL4_AUDIO_DIV 203 +#define IMX6QDL_CLK_LVDS1_SEL 204 +#define IMX6QDL_CLK_LVDS2_SEL 205 +#define IMX6QDL_CLK_LVDS1_GATE 206 +#define IMX6QDL_CLK_LVDS2_GATE 207 +#define IMX6QDL_CLK_ESAI_IPG 208 +#define IMX6QDL_CLK_ESAI_MEM 209 +#define IMX6QDL_CLK_ASRC_IPG 210 +#define IMX6QDL_CLK_ASRC_MEM 211 +#define IMX6QDL_CLK_LVDS1_IN 212 +#define IMX6QDL_CLK_LVDS2_IN 213 +#define IMX6QDL_CLK_ANACLK1 214 +#define IMX6QDL_CLK_ANACLK2 215 +#define IMX6QDL_PLL1_BYPASS_SRC 216 +#define IMX6QDL_PLL2_BYPASS_SRC 217 +#define IMX6QDL_PLL3_BYPASS_SRC 218 +#define IMX6QDL_PLL4_BYPASS_SRC 219 +#define IMX6QDL_PLL5_BYPASS_SRC 220 +#define IMX6QDL_PLL6_BYPASS_SRC 221 +#define IMX6QDL_PLL7_BYPASS_SRC 222 +#define IMX6QDL_CLK_PLL1 223 +#define IMX6QDL_CLK_PLL2 224 +#define IMX6QDL_CLK_PLL3 225 +#define IMX6QDL_CLK_PLL4 226 +#define IMX6QDL_CLK_PLL5 227 +#define IMX6QDL_CLK_PLL6 228 +#define IMX6QDL_CLK_PLL7 229 +#define IMX6QDL_PLL1_BYPASS 230 +#define IMX6QDL_PLL2_BYPASS 231 +#define IMX6QDL_PLL3_BYPASS 232 +#define IMX6QDL_PLL4_BYPASS 233 +#define IMX6QDL_PLL5_BYPASS 234 +#define IMX6QDL_PLL6_BYPASS 235 +#define IMX6QDL_PLL7_BYPASS 236 +#define IMX6QDL_CLK_GPT_3M 237 +#define IMX6QDL_CLK_VIDEO_27M 238 +#define IMX6QDL_CLK_MIPI_CORE_CFG 239 +#define IMX6QDL_CLK_MIPI_IPG 240 +#define IMX6QDL_CLK_CAAM_MEM 241 +#define IMX6QDL_CLK_CAAM_ACLK 242 +#define IMX6QDL_CLK_CAAM_IPG 243 +#define IMX6QDL_CLK_SPDIF_GCLK 244 +#define IMX6QDL_CLK_UART_SEL 245 +#define IMX6QDL_CLK_IPG_PER_SEL 246 +#define IMX6QDL_CLK_ECSPI_SEL 247 +#define IMX6QDL_CLK_CAN_SEL 248 +#define IMX6QDL_CLK_MMDC_CH1_AXI_CG 249 +#define IMX6QDL_CLK_PRE0 250 +#define IMX6QDL_CLK_PRE1 251 +#define IMX6QDL_CLK_PRE2 252 +#define IMX6QDL_CLK_PRE3 253 +#define IMX6QDL_CLK_PRG0_AXI 254 +#define IMX6QDL_CLK_PRG1_AXI 255 +#define IMX6QDL_CLK_PRG0_APB 256 +#define IMX6QDL_CLK_PRG1_APB 257 +#define IMX6QDL_CLK_PRE_AXI 258 +#define IMX6QDL_CLK_MLB_SEL 259 +#define IMX6QDL_CLK_MLB_PODF 260 +#define IMX6QDL_CLK_EPIT1 261 +#define IMX6QDL_CLK_EPIT2 262 +#define IMX6QDL_CLK_END 263 + +#endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */ diff --git a/include/dt-bindings/clock/imx6sl-clock.h b/include/dt-bindings/clock/imx6sl-clock.h new file mode 100644 index 000000000..e14573e29 --- /dev/null +++ b/include/dt-bindings/clock/imx6sl-clock.h @@ -0,0 +1,180 @@ +/* + * Copyright 2013 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX6SL_H +#define __DT_BINDINGS_CLOCK_IMX6SL_H + +#define IMX6SL_CLK_DUMMY 0 +#define IMX6SL_CLK_CKIL 1 +#define IMX6SL_CLK_OSC 2 +#define IMX6SL_CLK_PLL1_SYS 3 +#define IMX6SL_CLK_PLL2_BUS 4 +#define IMX6SL_CLK_PLL3_USB_OTG 5 +#define IMX6SL_CLK_PLL4_AUDIO 6 +#define IMX6SL_CLK_PLL5_VIDEO 7 +#define IMX6SL_CLK_PLL6_ENET 8 +#define IMX6SL_CLK_PLL7_USB_HOST 9 +#define IMX6SL_CLK_USBPHY1 10 +#define IMX6SL_CLK_USBPHY2 11 +#define IMX6SL_CLK_USBPHY1_GATE 12 +#define IMX6SL_CLK_USBPHY2_GATE 13 +#define IMX6SL_CLK_PLL4_POST_DIV 14 +#define IMX6SL_CLK_PLL5_POST_DIV 15 +#define IMX6SL_CLK_PLL5_VIDEO_DIV 16 +#define IMX6SL_CLK_ENET_REF 17 +#define IMX6SL_CLK_PLL2_PFD0 18 +#define IMX6SL_CLK_PLL2_PFD1 19 +#define IMX6SL_CLK_PLL2_PFD2 20 +#define IMX6SL_CLK_PLL3_PFD0 21 +#define IMX6SL_CLK_PLL3_PFD1 22 +#define IMX6SL_CLK_PLL3_PFD2 23 +#define IMX6SL_CLK_PLL3_PFD3 24 +#define IMX6SL_CLK_PLL2_198M 25 +#define IMX6SL_CLK_PLL3_120M 26 +#define IMX6SL_CLK_PLL3_80M 27 +#define IMX6SL_CLK_PLL3_60M 28 +#define IMX6SL_CLK_STEP 29 +#define IMX6SL_CLK_PLL1_SW 30 +#define IMX6SL_CLK_OCRAM_ALT_SEL 31 +#define IMX6SL_CLK_OCRAM_SEL 32 +#define IMX6SL_CLK_PRE_PERIPH2_SEL 33 +#define IMX6SL_CLK_PRE_PERIPH_SEL 34 +#define IMX6SL_CLK_PERIPH2_CLK2_SEL 35 +#define IMX6SL_CLK_PERIPH_CLK2_SEL 36 +#define IMX6SL_CLK_CSI_SEL 37 +#define IMX6SL_CLK_LCDIF_AXI_SEL 38 +#define IMX6SL_CLK_USDHC1_SEL 39 +#define IMX6SL_CLK_USDHC2_SEL 40 +#define IMX6SL_CLK_USDHC3_SEL 41 +#define IMX6SL_CLK_USDHC4_SEL 42 +#define IMX6SL_CLK_SSI1_SEL 43 +#define IMX6SL_CLK_SSI2_SEL 44 +#define IMX6SL_CLK_SSI3_SEL 45 +#define IMX6SL_CLK_PERCLK_SEL 46 +#define IMX6SL_CLK_PXP_AXI_SEL 47 +#define IMX6SL_CLK_EPDC_AXI_SEL 48 +#define IMX6SL_CLK_GPU2D_OVG_SEL 49 +#define IMX6SL_CLK_GPU2D_SEL 50 +#define IMX6SL_CLK_LCDIF_PIX_SEL 51 +#define IMX6SL_CLK_EPDC_PIX_SEL 52 +#define IMX6SL_CLK_SPDIF0_SEL 53 +#define IMX6SL_CLK_SPDIF1_SEL 54 +#define IMX6SL_CLK_EXTERN_AUDIO_SEL 55 +#define IMX6SL_CLK_ECSPI_SEL 56 +#define IMX6SL_CLK_UART_SEL 57 +#define IMX6SL_CLK_PERIPH 58 +#define IMX6SL_CLK_PERIPH2 59 +#define IMX6SL_CLK_OCRAM_PODF 60 +#define IMX6SL_CLK_PERIPH_CLK2_PODF 61 +#define IMX6SL_CLK_PERIPH2_CLK2_PODF 62 +#define IMX6SL_CLK_IPG 63 +#define IMX6SL_CLK_CSI_PODF 64 +#define IMX6SL_CLK_LCDIF_AXI_PODF 65 +#define IMX6SL_CLK_USDHC1_PODF 66 +#define IMX6SL_CLK_USDHC2_PODF 67 +#define IMX6SL_CLK_USDHC3_PODF 68 +#define IMX6SL_CLK_USDHC4_PODF 69 +#define IMX6SL_CLK_SSI1_PRED 70 +#define IMX6SL_CLK_SSI1_PODF 71 +#define IMX6SL_CLK_SSI2_PRED 72 +#define IMX6SL_CLK_SSI2_PODF 73 +#define IMX6SL_CLK_SSI3_PRED 74 +#define IMX6SL_CLK_SSI3_PODF 75 +#define IMX6SL_CLK_PERCLK 76 +#define IMX6SL_CLK_PXP_AXI_PODF 77 +#define IMX6SL_CLK_EPDC_AXI_PODF 78 +#define IMX6SL_CLK_GPU2D_OVG_PODF 79 +#define IMX6SL_CLK_GPU2D_PODF 80 +#define IMX6SL_CLK_LCDIF_PIX_PRED 81 +#define IMX6SL_CLK_EPDC_PIX_PRED 82 +#define IMX6SL_CLK_LCDIF_PIX_PODF 83 +#define IMX6SL_CLK_EPDC_PIX_PODF 84 +#define IMX6SL_CLK_SPDIF0_PRED 85 +#define IMX6SL_CLK_SPDIF0_PODF 86 +#define IMX6SL_CLK_SPDIF1_PRED 87 +#define IMX6SL_CLK_SPDIF1_PODF 88 +#define IMX6SL_CLK_EXTERN_AUDIO_PRED 89 +#define IMX6SL_CLK_EXTERN_AUDIO_PODF 90 +#define IMX6SL_CLK_ECSPI_ROOT 91 +#define IMX6SL_CLK_UART_ROOT 92 +#define IMX6SL_CLK_AHB 93 +#define IMX6SL_CLK_MMDC_ROOT 94 +#define IMX6SL_CLK_ARM 95 +#define IMX6SL_CLK_ECSPI1 96 +#define IMX6SL_CLK_ECSPI2 97 +#define IMX6SL_CLK_ECSPI3 98 +#define IMX6SL_CLK_ECSPI4 99 +#define IMX6SL_CLK_EPIT1 100 +#define IMX6SL_CLK_EPIT2 101 +#define IMX6SL_CLK_EXTERN_AUDIO 102 +#define IMX6SL_CLK_GPT 103 +#define IMX6SL_CLK_GPT_SERIAL 104 +#define IMX6SL_CLK_GPU2D_OVG 105 +#define IMX6SL_CLK_I2C1 106 +#define IMX6SL_CLK_I2C2 107 +#define IMX6SL_CLK_I2C3 108 +#define IMX6SL_CLK_OCOTP 109 +#define IMX6SL_CLK_CSI 110 +#define IMX6SL_CLK_PXP_AXI 111 +#define IMX6SL_CLK_EPDC_AXI 112 +#define IMX6SL_CLK_LCDIF_AXI 113 +#define IMX6SL_CLK_LCDIF_PIX 114 +#define IMX6SL_CLK_EPDC_PIX 115 +#define IMX6SL_CLK_OCRAM 116 +#define IMX6SL_CLK_PWM1 117 +#define IMX6SL_CLK_PWM2 118 +#define IMX6SL_CLK_PWM3 119 +#define IMX6SL_CLK_PWM4 120 +#define IMX6SL_CLK_SDMA 121 +#define IMX6SL_CLK_SPDIF 122 +#define IMX6SL_CLK_SSI1 123 +#define IMX6SL_CLK_SSI2 124 +#define IMX6SL_CLK_SSI3 125 +#define IMX6SL_CLK_UART 126 +#define IMX6SL_CLK_UART_SERIAL 127 +#define IMX6SL_CLK_USBOH3 128 +#define IMX6SL_CLK_USDHC1 129 +#define IMX6SL_CLK_USDHC2 130 +#define IMX6SL_CLK_USDHC3 131 +#define IMX6SL_CLK_USDHC4 132 +#define IMX6SL_CLK_PLL4_AUDIO_DIV 133 +#define IMX6SL_CLK_SPBA 134 +#define IMX6SL_CLK_ENET 135 +#define IMX6SL_CLK_LVDS1_SEL 136 +#define IMX6SL_CLK_LVDS1_OUT 137 +#define IMX6SL_CLK_LVDS1_IN 138 +#define IMX6SL_CLK_ANACLK1 139 +#define IMX6SL_PLL1_BYPASS_SRC 140 +#define IMX6SL_PLL2_BYPASS_SRC 141 +#define IMX6SL_PLL3_BYPASS_SRC 142 +#define IMX6SL_PLL4_BYPASS_SRC 143 +#define IMX6SL_PLL5_BYPASS_SRC 144 +#define IMX6SL_PLL6_BYPASS_SRC 145 +#define IMX6SL_PLL7_BYPASS_SRC 146 +#define IMX6SL_CLK_PLL1 147 +#define IMX6SL_CLK_PLL2 148 +#define IMX6SL_CLK_PLL3 149 +#define IMX6SL_CLK_PLL4 150 +#define IMX6SL_CLK_PLL5 151 +#define IMX6SL_CLK_PLL6 152 +#define IMX6SL_CLK_PLL7 153 +#define IMX6SL_PLL1_BYPASS 154 +#define IMX6SL_PLL2_BYPASS 155 +#define IMX6SL_PLL3_BYPASS 156 +#define IMX6SL_PLL4_BYPASS 157 +#define IMX6SL_PLL5_BYPASS 158 +#define IMX6SL_PLL6_BYPASS 159 +#define IMX6SL_PLL7_BYPASS 160 +#define IMX6SL_CLK_SSI1_IPG 161 +#define IMX6SL_CLK_SSI2_IPG 162 +#define IMX6SL_CLK_SSI3_IPG 163 +#define IMX6SL_CLK_SPDIF_GCLK 164 +#define IMX6SL_CLK_END 165 + +#endif /* __DT_BINDINGS_CLOCK_IMX6SL_H */ diff --git a/include/dt-bindings/clock/imx6sll-clock.h b/include/dt-bindings/clock/imx6sll-clock.h new file mode 100644 index 000000000..1036475f9 --- /dev/null +++ b/include/dt-bindings/clock/imx6sll-clock.h @@ -0,0 +1,209 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2018 NXP. + * + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX6SLL_H +#define __DT_BINDINGS_CLOCK_IMX6SLL_H + +#define IMX6SLL_CLK_DUMMY 0 +#define IMX6SLL_CLK_CKIL 1 +#define IMX6SLL_CLK_OSC 2 +#define IMX6SLL_PLL1_BYPASS_SRC 3 +#define IMX6SLL_PLL2_BYPASS_SRC 4 +#define IMX6SLL_PLL3_BYPASS_SRC 5 +#define IMX6SLL_PLL4_BYPASS_SRC 6 +#define IMX6SLL_PLL5_BYPASS_SRC 7 +#define IMX6SLL_PLL6_BYPASS_SRC 8 +#define IMX6SLL_PLL7_BYPASS_SRC 9 +#define IMX6SLL_CLK_PLL1 10 +#define IMX6SLL_CLK_PLL2 11 +#define IMX6SLL_CLK_PLL3 12 +#define IMX6SLL_CLK_PLL4 13 +#define IMX6SLL_CLK_PLL5 14 +#define IMX6SLL_CLK_PLL6 15 +#define IMX6SLL_CLK_PLL7 16 +#define IMX6SLL_PLL1_BYPASS 17 +#define IMX6SLL_PLL2_BYPASS 18 +#define IMX6SLL_PLL3_BYPASS 19 +#define IMX6SLL_PLL4_BYPASS 20 +#define IMX6SLL_PLL5_BYPASS 21 +#define IMX6SLL_PLL6_BYPASS 22 +#define IMX6SLL_PLL7_BYPASS 23 +#define IMX6SLL_CLK_PLL1_SYS 24 +#define IMX6SLL_CLK_PLL2_BUS 25 +#define IMX6SLL_CLK_PLL3_USB_OTG 26 +#define IMX6SLL_CLK_PLL4_AUDIO 27 +#define IMX6SLL_CLK_PLL5_VIDEO 28 +#define IMX6SLL_CLK_PLL6_ENET 29 +#define IMX6SLL_CLK_PLL7_USB_HOST 30 +#define IMX6SLL_CLK_USBPHY1 31 +#define IMX6SLL_CLK_USBPHY2 32 +#define IMX6SLL_CLK_USBPHY1_GATE 33 +#define IMX6SLL_CLK_USBPHY2_GATE 34 +#define IMX6SLL_CLK_PLL2_PFD0 35 +#define IMX6SLL_CLK_PLL2_PFD1 36 +#define IMX6SLL_CLK_PLL2_PFD2 37 +#define IMX6SLL_CLK_PLL2_PFD3 38 +#define IMX6SLL_CLK_PLL3_PFD0 39 +#define IMX6SLL_CLK_PLL3_PFD1 40 +#define IMX6SLL_CLK_PLL3_PFD2 41 +#define IMX6SLL_CLK_PLL3_PFD3 42 +#define IMX6SLL_CLK_PLL4_POST_DIV 43 +#define IMX6SLL_CLK_PLL4_AUDIO_DIV 44 +#define IMX6SLL_CLK_PLL5_POST_DIV 45 +#define IMX6SLL_CLK_PLL5_VIDEO_DIV 46 +#define IMX6SLL_CLK_PLL2_198M 47 +#define IMX6SLL_CLK_PLL3_120M 48 +#define IMX6SLL_CLK_PLL3_80M 49 +#define IMX6SLL_CLK_PLL3_60M 50 +#define IMX6SLL_CLK_STEP 51 +#define IMX6SLL_CLK_PLL1_SW 52 +#define IMX6SLL_CLK_AXI_ALT_SEL 53 +#define IMX6SLL_CLK_AXI_SEL 54 +#define IMX6SLL_CLK_PERIPH_PRE 55 +#define IMX6SLL_CLK_PERIPH2_PRE 56 +#define IMX6SLL_CLK_PERIPH_CLK2_SEL 57 +#define IMX6SLL_CLK_PERIPH2_CLK2_SEL 58 +#define IMX6SLL_CLK_PERCLK_SEL 59 +#define IMX6SLL_CLK_USDHC1_SEL 60 +#define IMX6SLL_CLK_USDHC2_SEL 61 +#define IMX6SLL_CLK_USDHC3_SEL 62 +#define IMX6SLL_CLK_SSI1_SEL 63 +#define IMX6SLL_CLK_SSI2_SEL 64 +#define IMX6SLL_CLK_SSI3_SEL 65 +#define IMX6SLL_CLK_PXP_SEL 66 +#define IMX6SLL_CLK_LCDIF_PRE_SEL 67 +#define IMX6SLL_CLK_LCDIF_SEL 68 +#define IMX6SLL_CLK_EPDC_PRE_SEL 69 +#define IMX6SLL_CLK_SPDIF_SEL 70 +#define IMX6SLL_CLK_ECSPI_SEL 71 +#define IMX6SLL_CLK_UART_SEL 72 +#define IMX6SLL_CLK_ARM 73 +#define IMX6SLL_CLK_PERIPH 74 +#define IMX6SLL_CLK_PERIPH2 75 +#define IMX6SLL_CLK_PERIPH2_CLK2 76 +#define IMX6SLL_CLK_PERIPH_CLK2 77 +#define IMX6SLL_CLK_MMDC_PODF 78 +#define IMX6SLL_CLK_AXI_PODF 79 +#define IMX6SLL_CLK_AHB 80 +#define IMX6SLL_CLK_IPG 81 +#define IMX6SLL_CLK_PERCLK 82 +#define IMX6SLL_CLK_USDHC1_PODF 83 +#define IMX6SLL_CLK_USDHC2_PODF 84 +#define IMX6SLL_CLK_USDHC3_PODF 85 +#define IMX6SLL_CLK_SSI1_PRED 86 +#define IMX6SLL_CLK_SSI2_PRED 87 +#define IMX6SLL_CLK_SSI3_PRED 88 +#define IMX6SLL_CLK_SSI1_PODF 89 +#define IMX6SLL_CLK_SSI2_PODF 90 +#define IMX6SLL_CLK_SSI3_PODF 91 +#define IMX6SLL_CLK_PXP_PODF 92 +#define IMX6SLL_CLK_LCDIF_PRED 93 +#define IMX6SLL_CLK_LCDIF_PODF 94 +#define IMX6SLL_CLK_EPDC_SEL 95 +#define IMX6SLL_CLK_EPDC_PODF 96 +#define IMX6SLL_CLK_SPDIF_PRED 97 +#define IMX6SLL_CLK_SPDIF_PODF 98 +#define IMX6SLL_CLK_ECSPI_PODF 99 +#define IMX6SLL_CLK_UART_PODF 100 + +/* CCGR 0 */ +#define IMX6SLL_CLK_AIPSTZ1 101 +#define IMX6SLL_CLK_AIPSTZ2 102 +#define IMX6SLL_CLK_DCP 103 +#define IMX6SLL_CLK_UART2_IPG 104 +#define IMX6SLL_CLK_UART2_SERIAL 105 + +/* CCGR 1 */ +#define IMX6SLL_CLK_ECSPI1 106 +#define IMX6SLL_CLK_ECSPI2 107 +#define IMX6SLL_CLK_ECSPI3 108 +#define IMX6SLL_CLK_ECSPI4 109 +#define IMX6SLL_CLK_UART3_IPG 110 +#define IMX6SLL_CLK_UART3_SERIAL 111 +#define IMX6SLL_CLK_UART4_IPG 112 +#define IMX6SLL_CLK_UART4_SERIAL 113 +#define IMX6SLL_CLK_EPIT1 114 +#define IMX6SLL_CLK_EPIT2 115 +#define IMX6SLL_CLK_GPT_BUS 116 +#define IMX6SLL_CLK_GPT_SERIAL 117 + +/* CCGR2 */ +#define IMX6SLL_CLK_CSI 118 +#define IMX6SLL_CLK_I2C1 119 +#define IMX6SLL_CLK_I2C2 120 +#define IMX6SLL_CLK_I2C3 121 +#define IMX6SLL_CLK_OCOTP 122 +#define IMX6SLL_CLK_LCDIF_APB 123 +#define IMX6SLL_CLK_PXP 124 + +/* CCGR3 */ +#define IMX6SLL_CLK_UART5_IPG 125 +#define IMX6SLL_CLK_UART5_SERIAL 126 +#define IMX6SLL_CLK_EPDC_AXI 127 +#define IMX6SLL_CLK_EPDC_PIX 128 +#define IMX6SLL_CLK_LCDIF_PIX 129 +#define IMX6SLL_CLK_WDOG1 130 +#define IMX6SLL_CLK_MMDC_P0_FAST 131 +#define IMX6SLL_CLK_MMDC_P0_IPG 132 +#define IMX6SLL_CLK_OCRAM 133 + +/* CCGR4 */ +#define IMX6SLL_CLK_PWM1 134 +#define IMX6SLL_CLK_PWM2 135 +#define IMX6SLL_CLK_PWM3 136 +#define IMX6SLL_CLK_PWM4 137 + +/* CCGR 5 */ +#define IMX6SLL_CLK_ROM 138 +#define IMX6SLL_CLK_SDMA 139 +#define IMX6SLL_CLK_KPP 140 +#define IMX6SLL_CLK_WDOG2 141 +#define IMX6SLL_CLK_SPBA 142 +#define IMX6SLL_CLK_SPDIF 143 +#define IMX6SLL_CLK_SPDIF_GCLK 144 +#define IMX6SLL_CLK_SSI1 145 +#define IMX6SLL_CLK_SSI1_IPG 146 +#define IMX6SLL_CLK_SSI2 147 +#define IMX6SLL_CLK_SSI2_IPG 148 +#define IMX6SLL_CLK_SSI3 149 +#define IMX6SLL_CLK_SSI3_IPG 150 +#define IMX6SLL_CLK_UART1_IPG 151 +#define IMX6SLL_CLK_UART1_SERIAL 152 + +/* CCGR 6 */ +#define IMX6SLL_CLK_USBOH3 153 +#define IMX6SLL_CLK_USDHC1 154 +#define IMX6SLL_CLK_USDHC2 155 +#define IMX6SLL_CLK_USDHC3 156 + +#define IMX6SLL_CLK_IPP_DI0 157 +#define IMX6SLL_CLK_IPP_DI1 158 +#define IMX6SLL_CLK_LDB_DI0_SEL 159 +#define IMX6SLL_CLK_LDB_DI0_DIV_3_5 160 +#define IMX6SLL_CLK_LDB_DI0_DIV_7 161 +#define IMX6SLL_CLK_LDB_DI0_DIV_SEL 162 +#define IMX6SLL_CLK_LDB_DI0 163 +#define IMX6SLL_CLK_LDB_DI1_SEL 164 +#define IMX6SLL_CLK_LDB_DI1_DIV_3_5 165 +#define IMX6SLL_CLK_LDB_DI1_DIV_7 166 +#define IMX6SLL_CLK_LDB_DI1_DIV_SEL 167 +#define IMX6SLL_CLK_LDB_DI1 168 +#define IMX6SLL_CLK_EXTERN_AUDIO_SEL 169 +#define IMX6SLL_CLK_EXTERN_AUDIO_PRED 170 +#define IMX6SLL_CLK_EXTERN_AUDIO_PODF 171 +#define IMX6SLL_CLK_EXTERN_AUDIO 172 + +#define IMX6SLL_CLK_GPIO1 173 +#define IMX6SLL_CLK_GPIO2 174 +#define IMX6SLL_CLK_GPIO3 175 +#define IMX6SLL_CLK_GPIO4 176 +#define IMX6SLL_CLK_GPIO5 177 +#define IMX6SLL_CLK_GPIO6 178 + +#define IMX6SLL_CLK_END 179 + +#endif /* __DT_BINDINGS_CLOCK_IMX6SLL_H */ diff --git a/include/dt-bindings/clock/imx6sx-clock.h b/include/dt-bindings/clock/imx6sx-clock.h new file mode 100644 index 000000000..cd2d6c570 --- /dev/null +++ b/include/dt-bindings/clock/imx6sx-clock.h @@ -0,0 +1,284 @@ +/* + * Copyright (C) 2014 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX6SX_H +#define __DT_BINDINGS_CLOCK_IMX6SX_H + +#define IMX6SX_CLK_DUMMY 0 +#define IMX6SX_CLK_CKIL 1 +#define IMX6SX_CLK_CKIH 2 +#define IMX6SX_CLK_OSC 3 +#define IMX6SX_CLK_PLL1_SYS 4 +#define IMX6SX_CLK_PLL2_BUS 5 +#define IMX6SX_CLK_PLL3_USB_OTG 6 +#define IMX6SX_CLK_PLL4_AUDIO 7 +#define IMX6SX_CLK_PLL5_VIDEO 8 +#define IMX6SX_CLK_PLL6_ENET 9 +#define IMX6SX_CLK_PLL7_USB_HOST 10 +#define IMX6SX_CLK_USBPHY1 11 +#define IMX6SX_CLK_USBPHY2 12 +#define IMX6SX_CLK_USBPHY1_GATE 13 +#define IMX6SX_CLK_USBPHY2_GATE 14 +#define IMX6SX_CLK_PCIE_REF 15 +#define IMX6SX_CLK_PCIE_REF_125M 16 +#define IMX6SX_CLK_ENET_REF 17 +#define IMX6SX_CLK_PLL2_PFD0 18 +#define IMX6SX_CLK_PLL2_PFD1 19 +#define IMX6SX_CLK_PLL2_PFD2 20 +#define IMX6SX_CLK_PLL2_PFD3 21 +#define IMX6SX_CLK_PLL3_PFD0 22 +#define IMX6SX_CLK_PLL3_PFD1 23 +#define IMX6SX_CLK_PLL3_PFD2 24 +#define IMX6SX_CLK_PLL3_PFD3 25 +#define IMX6SX_CLK_PLL2_198M 26 +#define IMX6SX_CLK_PLL3_120M 27 +#define IMX6SX_CLK_PLL3_80M 28 +#define IMX6SX_CLK_PLL3_60M 29 +#define IMX6SX_CLK_TWD 30 +#define IMX6SX_CLK_PLL4_POST_DIV 31 +#define IMX6SX_CLK_PLL4_AUDIO_DIV 32 +#define IMX6SX_CLK_PLL5_POST_DIV 33 +#define IMX6SX_CLK_PLL5_VIDEO_DIV 34 +#define IMX6SX_CLK_STEP 35 +#define IMX6SX_CLK_PLL1_SW 36 +#define IMX6SX_CLK_OCRAM_SEL 37 +#define IMX6SX_CLK_PERIPH_PRE 38 +#define IMX6SX_CLK_PERIPH2_PRE 39 +#define IMX6SX_CLK_PERIPH_CLK2_SEL 40 +#define IMX6SX_CLK_PERIPH2_CLK2_SEL 41 +#define IMX6SX_CLK_PCIE_AXI_SEL 42 +#define IMX6SX_CLK_GPU_AXI_SEL 43 +#define IMX6SX_CLK_GPU_CORE_SEL 44 +#define IMX6SX_CLK_EIM_SLOW_SEL 45 +#define IMX6SX_CLK_USDHC1_SEL 46 +#define IMX6SX_CLK_USDHC2_SEL 47 +#define IMX6SX_CLK_USDHC3_SEL 48 +#define IMX6SX_CLK_USDHC4_SEL 49 +#define IMX6SX_CLK_SSI1_SEL 50 +#define IMX6SX_CLK_SSI2_SEL 51 +#define IMX6SX_CLK_SSI3_SEL 52 +#define IMX6SX_CLK_QSPI1_SEL 53 +#define IMX6SX_CLK_PERCLK_SEL 54 +#define IMX6SX_CLK_VID_SEL 55 +#define IMX6SX_CLK_ESAI_SEL 56 +#define IMX6SX_CLK_LDB_DI0_DIV_SEL 57 +#define IMX6SX_CLK_LDB_DI1_DIV_SEL 58 +#define IMX6SX_CLK_CAN_SEL 59 +#define IMX6SX_CLK_UART_SEL 60 +#define IMX6SX_CLK_QSPI2_SEL 61 +#define IMX6SX_CLK_LDB_DI1_SEL 62 +#define IMX6SX_CLK_LDB_DI0_SEL 63 +#define IMX6SX_CLK_SPDIF_SEL 64 +#define IMX6SX_CLK_AUDIO_SEL 65 +#define IMX6SX_CLK_ENET_PRE_SEL 66 +#define IMX6SX_CLK_ENET_SEL 67 +#define IMX6SX_CLK_M4_PRE_SEL 68 +#define IMX6SX_CLK_M4_SEL 69 +#define IMX6SX_CLK_ECSPI_SEL 70 +#define IMX6SX_CLK_LCDIF1_PRE_SEL 71 +#define IMX6SX_CLK_LCDIF2_PRE_SEL 72 +#define IMX6SX_CLK_LCDIF1_SEL 73 +#define IMX6SX_CLK_LCDIF2_SEL 74 +#define IMX6SX_CLK_DISPLAY_SEL 75 +#define IMX6SX_CLK_CSI_SEL 76 +#define IMX6SX_CLK_CKO1_SEL 77 +#define IMX6SX_CLK_CKO2_SEL 78 +#define IMX6SX_CLK_CKO 79 +#define IMX6SX_CLK_PERIPH_CLK2 80 +#define IMX6SX_CLK_PERIPH2_CLK2 81 +#define IMX6SX_CLK_IPG 82 +#define IMX6SX_CLK_GPU_CORE_PODF 83 +#define IMX6SX_CLK_GPU_AXI_PODF 84 +#define IMX6SX_CLK_LCDIF1_PODF 85 +#define IMX6SX_CLK_QSPI1_PODF 86 +#define IMX6SX_CLK_EIM_SLOW_PODF 87 +#define IMX6SX_CLK_LCDIF2_PODF 88 +#define IMX6SX_CLK_PERCLK 89 +#define IMX6SX_CLK_VID_PODF 90 +#define IMX6SX_CLK_CAN_PODF 91 +#define IMX6SX_CLK_USDHC1_PODF 92 +#define IMX6SX_CLK_USDHC2_PODF 93 +#define IMX6SX_CLK_USDHC3_PODF 94 +#define IMX6SX_CLK_USDHC4_PODF 95 +#define IMX6SX_CLK_UART_PODF 96 +#define IMX6SX_CLK_ESAI_PRED 97 +#define IMX6SX_CLK_ESAI_PODF 98 +#define IMX6SX_CLK_SSI3_PRED 99 +#define IMX6SX_CLK_SSI3_PODF 100 +#define IMX6SX_CLK_SSI1_PRED 101 +#define IMX6SX_CLK_SSI1_PODF 102 +#define IMX6SX_CLK_QSPI2_PRED 103 +#define IMX6SX_CLK_QSPI2_PODF 104 +#define IMX6SX_CLK_SSI2_PRED 105 +#define IMX6SX_CLK_SSI2_PODF 106 +#define IMX6SX_CLK_SPDIF_PRED 107 +#define IMX6SX_CLK_SPDIF_PODF 108 +#define IMX6SX_CLK_AUDIO_PRED 109 +#define IMX6SX_CLK_AUDIO_PODF 110 +#define IMX6SX_CLK_ENET_PODF 111 +#define IMX6SX_CLK_M4_PODF 112 +#define IMX6SX_CLK_ECSPI_PODF 113 +#define IMX6SX_CLK_LCDIF1_PRED 114 +#define IMX6SX_CLK_LCDIF2_PRED 115 +#define IMX6SX_CLK_DISPLAY_PODF 116 +#define IMX6SX_CLK_CSI_PODF 117 +#define IMX6SX_CLK_LDB_DI0_DIV_3_5 118 +#define IMX6SX_CLK_LDB_DI0_DIV_7 119 +#define IMX6SX_CLK_LDB_DI1_DIV_3_5 120 +#define IMX6SX_CLK_LDB_DI1_DIV_7 121 +#define IMX6SX_CLK_CKO1_PODF 122 +#define IMX6SX_CLK_CKO2_PODF 123 +#define IMX6SX_CLK_PERIPH 124 +#define IMX6SX_CLK_PERIPH2 125 +#define IMX6SX_CLK_OCRAM 126 +#define IMX6SX_CLK_AHB 127 +#define IMX6SX_CLK_MMDC_PODF 128 +#define IMX6SX_CLK_ARM 129 +#define IMX6SX_CLK_AIPS_TZ1 130 +#define IMX6SX_CLK_AIPS_TZ2 131 +#define IMX6SX_CLK_APBH_DMA 132 +#define IMX6SX_CLK_ASRC_GATE 133 +#define IMX6SX_CLK_CAAM_MEM 134 +#define IMX6SX_CLK_CAAM_ACLK 135 +#define IMX6SX_CLK_CAAM_IPG 136 +#define IMX6SX_CLK_CAN1_IPG 137 +#define IMX6SX_CLK_CAN1_SERIAL 138 +#define IMX6SX_CLK_CAN2_IPG 139 +#define IMX6SX_CLK_CAN2_SERIAL 140 +#define IMX6SX_CLK_CPU_DEBUG 141 +#define IMX6SX_CLK_DCIC1 142 +#define IMX6SX_CLK_DCIC2 143 +#define IMX6SX_CLK_AIPS_TZ3 144 +#define IMX6SX_CLK_ECSPI1 145 +#define IMX6SX_CLK_ECSPI2 146 +#define IMX6SX_CLK_ECSPI3 147 +#define IMX6SX_CLK_ECSPI4 148 +#define IMX6SX_CLK_ECSPI5 149 +#define IMX6SX_CLK_EPIT1 150 +#define IMX6SX_CLK_EPIT2 151 +#define IMX6SX_CLK_ESAI_EXTAL 152 +#define IMX6SX_CLK_WAKEUP 153 +#define IMX6SX_CLK_GPT_BUS 154 +#define IMX6SX_CLK_GPT_SERIAL 155 +#define IMX6SX_CLK_GPU 156 +#define IMX6SX_CLK_OCRAM_S 157 +#define IMX6SX_CLK_CANFD 158 +#define IMX6SX_CLK_CSI 159 +#define IMX6SX_CLK_I2C1 160 +#define IMX6SX_CLK_I2C2 161 +#define IMX6SX_CLK_I2C3 162 +#define IMX6SX_CLK_OCOTP 163 +#define IMX6SX_CLK_IOMUXC 164 +#define IMX6SX_CLK_IPMUX1 165 +#define IMX6SX_CLK_IPMUX2 166 +#define IMX6SX_CLK_IPMUX3 167 +#define IMX6SX_CLK_TZASC1 168 +#define IMX6SX_CLK_LCDIF_APB 169 +#define IMX6SX_CLK_PXP_AXI 170 +#define IMX6SX_CLK_M4 171 +#define IMX6SX_CLK_ENET 172 +#define IMX6SX_CLK_DISPLAY_AXI 173 +#define IMX6SX_CLK_LCDIF2_PIX 174 +#define IMX6SX_CLK_LCDIF1_PIX 175 +#define IMX6SX_CLK_LDB_DI0 176 +#define IMX6SX_CLK_QSPI1 177 +#define IMX6SX_CLK_MLB 178 +#define IMX6SX_CLK_MMDC_P0_FAST 179 +#define IMX6SX_CLK_MMDC_P0_IPG 180 +#define IMX6SX_CLK_AXI 181 +#define IMX6SX_CLK_PCIE_AXI 182 +#define IMX6SX_CLK_QSPI2 183 +#define IMX6SX_CLK_PER1_BCH 184 +#define IMX6SX_CLK_PER2_MAIN 185 +#define IMX6SX_CLK_PWM1 186 +#define IMX6SX_CLK_PWM2 187 +#define IMX6SX_CLK_PWM3 188 +#define IMX6SX_CLK_PWM4 189 +#define IMX6SX_CLK_GPMI_BCH_APB 190 +#define IMX6SX_CLK_GPMI_BCH 191 +#define IMX6SX_CLK_GPMI_IO 192 +#define IMX6SX_CLK_GPMI_APB 193 +#define IMX6SX_CLK_ROM 194 +#define IMX6SX_CLK_SDMA 195 +#define IMX6SX_CLK_SPBA 196 +#define IMX6SX_CLK_SPDIF 197 +#define IMX6SX_CLK_SSI1_IPG 198 +#define IMX6SX_CLK_SSI2_IPG 199 +#define IMX6SX_CLK_SSI3_IPG 200 +#define IMX6SX_CLK_SSI1 201 +#define IMX6SX_CLK_SSI2 202 +#define IMX6SX_CLK_SSI3 203 +#define IMX6SX_CLK_UART_IPG 204 +#define IMX6SX_CLK_UART_SERIAL 205 +#define IMX6SX_CLK_SAI1 206 +#define IMX6SX_CLK_SAI2 207 +#define IMX6SX_CLK_USBOH3 208 +#define IMX6SX_CLK_USDHC1 209 +#define IMX6SX_CLK_USDHC2 210 +#define IMX6SX_CLK_USDHC3 211 +#define IMX6SX_CLK_USDHC4 212 +#define IMX6SX_CLK_EIM_SLOW 213 +#define IMX6SX_CLK_PWM8 214 +#define IMX6SX_CLK_VADC 215 +#define IMX6SX_CLK_GIS 216 +#define IMX6SX_CLK_I2C4 217 +#define IMX6SX_CLK_PWM5 218 +#define IMX6SX_CLK_PWM6 219 +#define IMX6SX_CLK_PWM7 220 +#define IMX6SX_CLK_CKO1 221 +#define IMX6SX_CLK_CKO2 222 +#define IMX6SX_CLK_IPP_DI0 223 +#define IMX6SX_CLK_IPP_DI1 224 +#define IMX6SX_CLK_ENET_AHB 225 +#define IMX6SX_CLK_OCRAM_PODF 226 +#define IMX6SX_CLK_GPT_3M 227 +#define IMX6SX_CLK_ENET_PTP 228 +#define IMX6SX_CLK_ENET_PTP_REF 229 +#define IMX6SX_CLK_ENET2_REF 230 +#define IMX6SX_CLK_ENET2_REF_125M 231 +#define IMX6SX_CLK_AUDIO 232 +#define IMX6SX_CLK_LVDS1_SEL 233 +#define IMX6SX_CLK_LVDS1_OUT 234 +#define IMX6SX_CLK_ASRC_IPG 235 +#define IMX6SX_CLK_ASRC_MEM 236 +#define IMX6SX_CLK_SAI1_IPG 237 +#define IMX6SX_CLK_SAI2_IPG 238 +#define IMX6SX_CLK_ESAI_IPG 239 +#define IMX6SX_CLK_ESAI_MEM 240 +#define IMX6SX_CLK_LVDS1_IN 241 +#define IMX6SX_CLK_ANACLK1 242 +#define IMX6SX_PLL1_BYPASS_SRC 243 +#define IMX6SX_PLL2_BYPASS_SRC 244 +#define IMX6SX_PLL3_BYPASS_SRC 245 +#define IMX6SX_PLL4_BYPASS_SRC 246 +#define IMX6SX_PLL5_BYPASS_SRC 247 +#define IMX6SX_PLL6_BYPASS_SRC 248 +#define IMX6SX_PLL7_BYPASS_SRC 249 +#define IMX6SX_CLK_PLL1 250 +#define IMX6SX_CLK_PLL2 251 +#define IMX6SX_CLK_PLL3 252 +#define IMX6SX_CLK_PLL4 253 +#define IMX6SX_CLK_PLL5 254 +#define IMX6SX_CLK_PLL6 255 +#define IMX6SX_CLK_PLL7 256 +#define IMX6SX_PLL1_BYPASS 257 +#define IMX6SX_PLL2_BYPASS 258 +#define IMX6SX_PLL3_BYPASS 259 +#define IMX6SX_PLL4_BYPASS 260 +#define IMX6SX_PLL5_BYPASS 261 +#define IMX6SX_PLL6_BYPASS 262 +#define IMX6SX_PLL7_BYPASS 263 +#define IMX6SX_CLK_SPDIF_GCLK 264 +#define IMX6SX_CLK_LVDS2_SEL 265 +#define IMX6SX_CLK_LVDS2_OUT 266 +#define IMX6SX_CLK_LVDS2_IN 267 +#define IMX6SX_CLK_ANACLK2 268 +#define IMX6SX_CLK_CLK_END 269 + +#endif /* __DT_BINDINGS_CLOCK_IMX6SX_H */ diff --git a/include/dt-bindings/clock/imx6ul-clock.h b/include/dt-bindings/clock/imx6ul-clock.h new file mode 100644 index 000000000..f8e0476a3 --- /dev/null +++ b/include/dt-bindings/clock/imx6ul-clock.h @@ -0,0 +1,265 @@ +/* + * Copyright (C) 2015 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX6UL_H +#define __DT_BINDINGS_CLOCK_IMX6UL_H + +#define IMX6UL_CLK_DUMMY 0 +#define IMX6UL_CLK_CKIL 1 +#define IMX6UL_CLK_CKIH 2 +#define IMX6UL_CLK_OSC 3 +#define IMX6UL_PLL1_BYPASS_SRC 4 +#define IMX6UL_PLL2_BYPASS_SRC 5 +#define IMX6UL_PLL3_BYPASS_SRC 6 +#define IMX6UL_PLL4_BYPASS_SRC 7 +#define IMX6UL_PLL5_BYPASS_SRC 8 +#define IMX6UL_PLL6_BYPASS_SRC 9 +#define IMX6UL_PLL7_BYPASS_SRC 10 +#define IMX6UL_CLK_PLL1 11 +#define IMX6UL_CLK_PLL2 12 +#define IMX6UL_CLK_PLL3 13 +#define IMX6UL_CLK_PLL4 14 +#define IMX6UL_CLK_PLL5 15 +#define IMX6UL_CLK_PLL6 16 +#define IMX6UL_CLK_PLL7 17 +#define IMX6UL_PLL1_BYPASS 18 +#define IMX6UL_PLL2_BYPASS 19 +#define IMX6UL_PLL3_BYPASS 20 +#define IMX6UL_PLL4_BYPASS 21 +#define IMX6UL_PLL5_BYPASS 22 +#define IMX6UL_PLL6_BYPASS 23 +#define IMX6UL_PLL7_BYPASS 24 +#define IMX6UL_CLK_PLL1_SYS 25 +#define IMX6UL_CLK_PLL2_BUS 26 +#define IMX6UL_CLK_PLL3_USB_OTG 27 +#define IMX6UL_CLK_PLL4_AUDIO 28 +#define IMX6UL_CLK_PLL5_VIDEO 29 +#define IMX6UL_CLK_PLL6_ENET 30 +#define IMX6UL_CLK_PLL7_USB_HOST 31 +#define IMX6UL_CLK_USBPHY1 32 +#define IMX6UL_CLK_USBPHY2 33 +#define IMX6UL_CLK_USBPHY1_GATE 34 +#define IMX6UL_CLK_USBPHY2_GATE 35 +#define IMX6UL_CLK_PLL2_PFD0 36 +#define IMX6UL_CLK_PLL2_PFD1 37 +#define IMX6UL_CLK_PLL2_PFD2 38 +#define IMX6UL_CLK_PLL2_PFD3 39 +#define IMX6UL_CLK_PLL3_PFD0 40 +#define IMX6UL_CLK_PLL3_PFD1 41 +#define IMX6UL_CLK_PLL3_PFD2 42 +#define IMX6UL_CLK_PLL3_PFD3 43 +#define IMX6UL_CLK_ENET_REF 44 +#define IMX6UL_CLK_ENET2_REF 45 +#define IMX6UL_CLK_ENET2_REF_125M 46 +#define IMX6UL_CLK_ENET_PTP_REF 47 +#define IMX6UL_CLK_ENET_PTP 48 +#define IMX6UL_CLK_PLL4_POST_DIV 49 +#define IMX6UL_CLK_PLL4_AUDIO_DIV 50 +#define IMX6UL_CLK_PLL5_POST_DIV 51 +#define IMX6UL_CLK_PLL5_VIDEO_DIV 52 +#define IMX6UL_CLK_PLL2_198M 53 +#define IMX6UL_CLK_PLL3_80M 54 +#define IMX6UL_CLK_PLL3_60M 55 +#define IMX6UL_CLK_STEP 56 +#define IMX6UL_CLK_PLL1_SW 57 +#define IMX6UL_CLK_AXI_ALT_SEL 58 +#define IMX6UL_CLK_AXI_SEL 59 +#define IMX6UL_CLK_PERIPH_PRE 60 +#define IMX6UL_CLK_PERIPH2_PRE 61 +#define IMX6UL_CLK_PERIPH_CLK2_SEL 62 +#define IMX6UL_CLK_PERIPH2_CLK2_SEL 63 +#define IMX6UL_CLK_USDHC1_SEL 64 +#define IMX6UL_CLK_USDHC2_SEL 65 +#define IMX6UL_CLK_BCH_SEL 66 +#define IMX6UL_CLK_GPMI_SEL 67 +#define IMX6UL_CLK_EIM_SLOW_SEL 68 +#define IMX6UL_CLK_SPDIF_SEL 69 +#define IMX6UL_CLK_SAI1_SEL 70 +#define IMX6UL_CLK_SAI2_SEL 71 +#define IMX6UL_CLK_SAI3_SEL 72 +#define IMX6UL_CLK_LCDIF_PRE_SEL 73 +#define IMX6UL_CLK_SIM_PRE_SEL 74 +#define IMX6UL_CLK_LDB_DI0_SEL 75 +#define IMX6UL_CLK_LDB_DI1_SEL 76 +#define IMX6UL_CLK_ENFC_SEL 77 +#define IMX6UL_CLK_CAN_SEL 78 +#define IMX6UL_CLK_ECSPI_SEL 79 +#define IMX6UL_CLK_UART_SEL 80 +#define IMX6UL_CLK_QSPI1_SEL 81 +#define IMX6UL_CLK_PERCLK_SEL 82 +#define IMX6UL_CLK_LCDIF_SEL 83 +#define IMX6UL_CLK_SIM_SEL 84 +#define IMX6UL_CLK_PERIPH 85 +#define IMX6UL_CLK_PERIPH2 86 +#define IMX6UL_CLK_LDB_DI0_DIV_3_5 87 +#define IMX6UL_CLK_LDB_DI0_DIV_7 88 +#define IMX6UL_CLK_LDB_DI1_DIV_3_5 89 +#define IMX6UL_CLK_LDB_DI1_DIV_7 90 +#define IMX6UL_CLK_LDB_DI0_DIV_SEL 91 +#define IMX6UL_CLK_LDB_DI1_DIV_SEL 92 +#define IMX6UL_CLK_ARM 93 +#define IMX6UL_CLK_PERIPH_CLK2 94 +#define IMX6UL_CLK_PERIPH2_CLK2 95 +#define IMX6UL_CLK_AHB 96 +#define IMX6UL_CLK_MMDC_PODF 97 +#define IMX6UL_CLK_AXI_PODF 98 +#define IMX6UL_CLK_PERCLK 99 +#define IMX6UL_CLK_IPG 100 +#define IMX6UL_CLK_USDHC1_PODF 101 +#define IMX6UL_CLK_USDHC2_PODF 102 +#define IMX6UL_CLK_BCH_PODF 103 +#define IMX6UL_CLK_GPMI_PODF 104 +#define IMX6UL_CLK_EIM_SLOW_PODF 105 +#define IMX6UL_CLK_SPDIF_PRED 106 +#define IMX6UL_CLK_SPDIF_PODF 107 +#define IMX6UL_CLK_SAI1_PRED 108 +#define IMX6UL_CLK_SAI1_PODF 109 +#define IMX6UL_CLK_SAI2_PRED 110 +#define IMX6UL_CLK_SAI2_PODF 111 +#define IMX6UL_CLK_SAI3_PRED 112 +#define IMX6UL_CLK_SAI3_PODF 113 +#define IMX6UL_CLK_LCDIF_PRED 114 +#define IMX6UL_CLK_LCDIF_PODF 115 +#define IMX6UL_CLK_SIM_PODF 116 +#define IMX6UL_CLK_QSPI1_PDOF 117 +#define IMX6UL_CLK_ENFC_PRED 118 +#define IMX6UL_CLK_ENFC_PODF 119 +#define IMX6UL_CLK_CAN_PODF 120 +#define IMX6UL_CLK_ECSPI_PODF 121 +#define IMX6UL_CLK_UART_PODF 122 +#define IMX6UL_CLK_ADC1 123 +#define IMX6UL_CLK_ADC2 124 +#define IMX6UL_CLK_AIPSTZ1 125 +#define IMX6UL_CLK_AIPSTZ2 126 +#define IMX6UL_CLK_AIPSTZ3 127 +#define IMX6UL_CLK_APBHDMA 128 +#define IMX6UL_CLK_ASRC_IPG 129 +#define IMX6UL_CLK_ASRC_MEM 130 +#define IMX6UL_CLK_GPMI_BCH_APB 131 +#define IMX6UL_CLK_GPMI_BCH 132 +#define IMX6UL_CLK_GPMI_IO 133 +#define IMX6UL_CLK_GPMI_APB 134 +#define IMX6UL_CLK_CAAM_MEM 135 +#define IMX6UL_CLK_CAAM_ACLK 136 +#define IMX6UL_CLK_CAAM_IPG 137 +#define IMX6UL_CLK_CSI 138 +#define IMX6UL_CLK_ECSPI1 139 +#define IMX6UL_CLK_ECSPI2 140 +#define IMX6UL_CLK_ECSPI3 141 +#define IMX6UL_CLK_ECSPI4 142 +#define IMX6UL_CLK_EIM 143 +#define IMX6UL_CLK_ENET 144 +#define IMX6UL_CLK_ENET_AHB 145 +#define IMX6UL_CLK_EPIT1 146 +#define IMX6UL_CLK_EPIT2 147 +#define IMX6UL_CLK_CAN1_IPG 148 +#define IMX6UL_CLK_CAN1_SERIAL 149 +#define IMX6UL_CLK_CAN2_IPG 150 +#define IMX6UL_CLK_CAN2_SERIAL 151 +#define IMX6UL_CLK_GPT1_BUS 152 +#define IMX6UL_CLK_GPT1_SERIAL 153 +#define IMX6UL_CLK_GPT2_BUS 154 +#define IMX6UL_CLK_GPT2_SERIAL 155 +#define IMX6UL_CLK_I2C1 156 +#define IMX6UL_CLK_I2C2 157 +#define IMX6UL_CLK_I2C3 158 +#define IMX6UL_CLK_I2C4 159 +#define IMX6UL_CLK_IOMUXC 160 +#define IMX6UL_CLK_LCDIF_APB 161 +#define IMX6UL_CLK_LCDIF_PIX 162 +#define IMX6UL_CLK_MMDC_P0_FAST 163 +#define IMX6UL_CLK_MMDC_P0_IPG 164 +#define IMX6UL_CLK_OCOTP 165 +#define IMX6UL_CLK_OCRAM 166 +#define IMX6UL_CLK_PWM1 167 +#define IMX6UL_CLK_PWM2 168 +#define IMX6UL_CLK_PWM3 169 +#define IMX6UL_CLK_PWM4 170 +#define IMX6UL_CLK_PWM5 171 +#define IMX6UL_CLK_PWM6 172 +#define IMX6UL_CLK_PWM7 173 +#define IMX6UL_CLK_PWM8 174 +#define IMX6UL_CLK_PXP 175 +#define IMX6UL_CLK_QSPI 176 +#define IMX6UL_CLK_ROM 177 +#define IMX6UL_CLK_SAI1 178 +#define IMX6UL_CLK_SAI1_IPG 179 +#define IMX6UL_CLK_SAI2 180 +#define IMX6UL_CLK_SAI2_IPG 181 +#define IMX6UL_CLK_SAI3 182 +#define IMX6UL_CLK_SAI3_IPG 183 +#define IMX6UL_CLK_SDMA 184 +#define IMX6UL_CLK_SIM 185 +#define IMX6UL_CLK_SIM_S 186 +#define IMX6UL_CLK_SPBA 187 +#define IMX6UL_CLK_SPDIF 188 +#define IMX6UL_CLK_UART1_IPG 189 +#define IMX6UL_CLK_UART1_SERIAL 190 +#define IMX6UL_CLK_UART2_IPG 191 +#define IMX6UL_CLK_UART2_SERIAL 192 +#define IMX6UL_CLK_UART3_IPG 193 +#define IMX6UL_CLK_UART3_SERIAL 194 +#define IMX6UL_CLK_UART4_IPG 195 +#define IMX6UL_CLK_UART4_SERIAL 196 +#define IMX6UL_CLK_UART5_IPG 197 +#define IMX6UL_CLK_UART5_SERIAL 198 +#define IMX6UL_CLK_UART6_IPG 199 +#define IMX6UL_CLK_UART6_SERIAL 200 +#define IMX6UL_CLK_UART7_IPG 201 +#define IMX6UL_CLK_UART7_SERIAL 202 +#define IMX6UL_CLK_UART8_IPG 203 +#define IMX6UL_CLK_UART8_SERIAL 204 +#define IMX6UL_CLK_USBOH3 205 +#define IMX6UL_CLK_USDHC1 206 +#define IMX6UL_CLK_USDHC2 207 +#define IMX6UL_CLK_WDOG1 208 +#define IMX6UL_CLK_WDOG2 209 +#define IMX6UL_CLK_WDOG3 210 +#define IMX6UL_CLK_LDB_DI0 211 +#define IMX6UL_CLK_AXI 212 +#define IMX6UL_CLK_SPDIF_GCLK 213 +#define IMX6UL_CLK_GPT_3M 214 +#define IMX6UL_CLK_SIM2 215 +#define IMX6UL_CLK_SIM1 216 +#define IMX6UL_CLK_IPP_DI0 217 +#define IMX6UL_CLK_IPP_DI1 218 +#define IMX6UL_CA7_SECONDARY_SEL 219 +#define IMX6UL_CLK_PER_BCH 220 +#define IMX6UL_CLK_CSI_SEL 221 +#define IMX6UL_CLK_CSI_PODF 222 +#define IMX6UL_CLK_PLL3_120M 223 +#define IMX6UL_CLK_KPP 224 +#define IMX6ULL_CLK_ESAI_PRED 225 +#define IMX6ULL_CLK_ESAI_PODF 226 +#define IMX6ULL_CLK_ESAI_EXTAL 227 +#define IMX6ULL_CLK_ESAI_MEM 228 +#define IMX6ULL_CLK_ESAI_IPG 229 +#define IMX6ULL_CLK_DCP_CLK 230 +#define IMX6ULL_CLK_EPDC_PRE_SEL 231 +#define IMX6ULL_CLK_EPDC_SEL 232 +#define IMX6ULL_CLK_EPDC_PODF 233 +#define IMX6ULL_CLK_EPDC_ACLK 234 +#define IMX6ULL_CLK_EPDC_PIX 235 +#define IMX6ULL_CLK_ESAI_SEL 236 +#define IMX6UL_CLK_CKO1_SEL 237 +#define IMX6UL_CLK_CKO1_PODF 238 +#define IMX6UL_CLK_CKO1 239 +#define IMX6UL_CLK_CKO2_SEL 240 +#define IMX6UL_CLK_CKO2_PODF 241 +#define IMX6UL_CLK_CKO2 242 +#define IMX6UL_CLK_CKO 243 +#define IMX6UL_CLK_GPIO1 244 +#define IMX6UL_CLK_GPIO2 245 +#define IMX6UL_CLK_GPIO3 246 +#define IMX6UL_CLK_GPIO4 247 +#define IMX6UL_CLK_GPIO5 248 + +#define IMX6UL_CLK_END 249 + +#endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */ diff --git a/include/dt-bindings/clock/imx7d-clock.h b/include/dt-bindings/clock/imx7d-clock.h new file mode 100644 index 000000000..0d67f53bb --- /dev/null +++ b/include/dt-bindings/clock/imx7d-clock.h @@ -0,0 +1,459 @@ +/* + * Copyright (C) 2014-2015 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX7D_H +#define __DT_BINDINGS_CLOCK_IMX7D_H + +#define IMX7D_OSC_24M_CLK 0 +#define IMX7D_PLL_ARM_MAIN 1 +#define IMX7D_PLL_ARM_MAIN_CLK 2 +#define IMX7D_PLL_ARM_MAIN_SRC 3 +#define IMX7D_PLL_ARM_MAIN_BYPASS 4 +#define IMX7D_PLL_SYS_MAIN 5 +#define IMX7D_PLL_SYS_MAIN_CLK 6 +#define IMX7D_PLL_SYS_MAIN_SRC 7 +#define IMX7D_PLL_SYS_MAIN_BYPASS 8 +#define IMX7D_PLL_SYS_MAIN_480M 9 +#define IMX7D_PLL_SYS_MAIN_240M 10 +#define IMX7D_PLL_SYS_MAIN_120M 11 +#define IMX7D_PLL_SYS_MAIN_480M_CLK 12 +#define IMX7D_PLL_SYS_MAIN_240M_CLK 13 +#define IMX7D_PLL_SYS_MAIN_120M_CLK 14 +#define IMX7D_PLL_SYS_PFD0_392M_CLK 15 +#define IMX7D_PLL_SYS_PFD0_196M 16 +#define IMX7D_PLL_SYS_PFD0_196M_CLK 17 +#define IMX7D_PLL_SYS_PFD1_332M_CLK 18 +#define IMX7D_PLL_SYS_PFD1_166M 19 +#define IMX7D_PLL_SYS_PFD1_166M_CLK 20 +#define IMX7D_PLL_SYS_PFD2_270M_CLK 21 +#define IMX7D_PLL_SYS_PFD2_135M 22 +#define IMX7D_PLL_SYS_PFD2_135M_CLK 23 +#define IMX7D_PLL_SYS_PFD3_CLK 24 +#define IMX7D_PLL_SYS_PFD4_CLK 25 +#define IMX7D_PLL_SYS_PFD5_CLK 26 +#define IMX7D_PLL_SYS_PFD6_CLK 27 +#define IMX7D_PLL_SYS_PFD7_CLK 28 +#define IMX7D_PLL_ENET_MAIN 29 +#define IMX7D_PLL_ENET_MAIN_CLK 30 +#define IMX7D_PLL_ENET_MAIN_SRC 31 +#define IMX7D_PLL_ENET_MAIN_BYPASS 32 +#define IMX7D_PLL_ENET_MAIN_500M 33 +#define IMX7D_PLL_ENET_MAIN_250M 34 +#define IMX7D_PLL_ENET_MAIN_125M 35 +#define IMX7D_PLL_ENET_MAIN_100M 36 +#define IMX7D_PLL_ENET_MAIN_50M 37 +#define IMX7D_PLL_ENET_MAIN_40M 38 +#define IMX7D_PLL_ENET_MAIN_25M 39 +#define IMX7D_PLL_ENET_MAIN_500M_CLK 40 +#define IMX7D_PLL_ENET_MAIN_250M_CLK 41 +#define IMX7D_PLL_ENET_MAIN_125M_CLK 42 +#define IMX7D_PLL_ENET_MAIN_100M_CLK 43 +#define IMX7D_PLL_ENET_MAIN_50M_CLK 44 +#define IMX7D_PLL_ENET_MAIN_40M_CLK 45 +#define IMX7D_PLL_ENET_MAIN_25M_CLK 46 +#define IMX7D_PLL_DRAM_MAIN 47 +#define IMX7D_PLL_DRAM_MAIN_CLK 48 +#define IMX7D_PLL_DRAM_MAIN_SRC 49 +#define IMX7D_PLL_DRAM_MAIN_BYPASS 50 +#define IMX7D_PLL_DRAM_MAIN_533M 51 +#define IMX7D_PLL_DRAM_MAIN_533M_CLK 52 +#define IMX7D_PLL_AUDIO_MAIN 53 +#define IMX7D_PLL_AUDIO_MAIN_CLK 54 +#define IMX7D_PLL_AUDIO_MAIN_SRC 55 +#define IMX7D_PLL_AUDIO_MAIN_BYPASS 56 +#define IMX7D_PLL_VIDEO_MAIN_CLK 57 +#define IMX7D_PLL_VIDEO_MAIN 58 +#define IMX7D_PLL_VIDEO_MAIN_SRC 59 +#define IMX7D_PLL_VIDEO_MAIN_BYPASS 60 +#define IMX7D_USB_MAIN_480M_CLK 61 +#define IMX7D_ARM_A7_ROOT_CLK 62 +#define IMX7D_ARM_A7_ROOT_SRC 63 +#define IMX7D_ARM_A7_ROOT_CG 64 +#define IMX7D_ARM_A7_ROOT_DIV 65 +#define IMX7D_ARM_M4_ROOT_CLK 66 +#define IMX7D_ARM_M4_ROOT_SRC 67 +#define IMX7D_ARM_M4_ROOT_CG 68 +#define IMX7D_ARM_M4_ROOT_DIV 69 +#define IMX7D_ARM_M0_ROOT_CLK 70 /* unused */ +#define IMX7D_ARM_M0_ROOT_SRC 71 /* unused */ +#define IMX7D_ARM_M0_ROOT_CG 72 /* unused */ +#define IMX7D_ARM_M0_ROOT_DIV 73 /* unused */ +#define IMX7D_MAIN_AXI_ROOT_CLK 74 +#define IMX7D_MAIN_AXI_ROOT_SRC 75 +#define IMX7D_MAIN_AXI_ROOT_CG 76 +#define IMX7D_MAIN_AXI_ROOT_DIV 77 +#define IMX7D_DISP_AXI_ROOT_CLK 78 +#define IMX7D_DISP_AXI_ROOT_SRC 79 +#define IMX7D_DISP_AXI_ROOT_CG 80 +#define IMX7D_DISP_AXI_ROOT_DIV 81 +#define IMX7D_ENET_AXI_ROOT_CLK 82 +#define IMX7D_ENET_AXI_ROOT_SRC 83 +#define IMX7D_ENET_AXI_ROOT_CG 84 +#define IMX7D_ENET_AXI_ROOT_DIV 85 +#define IMX7D_NAND_USDHC_BUS_ROOT_CLK 86 +#define IMX7D_NAND_USDHC_BUS_ROOT_SRC 87 +#define IMX7D_NAND_USDHC_BUS_ROOT_CG 88 +#define IMX7D_NAND_USDHC_BUS_ROOT_DIV 89 +#define IMX7D_AHB_CHANNEL_ROOT_CLK 90 +#define IMX7D_AHB_CHANNEL_ROOT_SRC 91 +#define IMX7D_AHB_CHANNEL_ROOT_CG 92 +#define IMX7D_AHB_CHANNEL_ROOT_DIV 93 +#define IMX7D_DRAM_PHYM_ROOT_CLK 94 +#define IMX7D_DRAM_PHYM_ROOT_SRC 95 +#define IMX7D_DRAM_PHYM_ROOT_CG 96 +#define IMX7D_DRAM_PHYM_ROOT_DIV 97 +#define IMX7D_DRAM_ROOT_CLK 98 +#define IMX7D_DRAM_ROOT_SRC 99 +#define IMX7D_DRAM_ROOT_CG 100 +#define IMX7D_DRAM_ROOT_DIV 101 +#define IMX7D_DRAM_PHYM_ALT_ROOT_CLK 102 +#define IMX7D_DRAM_PHYM_ALT_ROOT_SRC 103 +#define IMX7D_DRAM_PHYM_ALT_ROOT_CG 104 +#define IMX7D_DRAM_PHYM_ALT_ROOT_DIV 105 +#define IMX7D_DRAM_ALT_ROOT_CLK 106 +#define IMX7D_DRAM_ALT_ROOT_SRC 107 +#define IMX7D_DRAM_ALT_ROOT_CG 108 +#define IMX7D_DRAM_ALT_ROOT_DIV 109 +#define IMX7D_USB_HSIC_ROOT_CLK 110 +#define IMX7D_USB_HSIC_ROOT_SRC 111 +#define IMX7D_USB_HSIC_ROOT_CG 112 +#define IMX7D_USB_HSIC_ROOT_DIV 113 +#define IMX7D_PCIE_CTRL_ROOT_CLK 114 +#define IMX7D_PCIE_CTRL_ROOT_SRC 115 +#define IMX7D_PCIE_CTRL_ROOT_CG 116 +#define IMX7D_PCIE_CTRL_ROOT_DIV 117 +#define IMX7D_PCIE_PHY_ROOT_CLK 118 +#define IMX7D_PCIE_PHY_ROOT_SRC 119 +#define IMX7D_PCIE_PHY_ROOT_CG 120 +#define IMX7D_PCIE_PHY_ROOT_DIV 121 +#define IMX7D_EPDC_PIXEL_ROOT_CLK 122 +#define IMX7D_EPDC_PIXEL_ROOT_SRC 123 +#define IMX7D_EPDC_PIXEL_ROOT_CG 124 +#define IMX7D_EPDC_PIXEL_ROOT_DIV 125 +#define IMX7D_LCDIF_PIXEL_ROOT_CLK 126 +#define IMX7D_LCDIF_PIXEL_ROOT_SRC 127 +#define IMX7D_LCDIF_PIXEL_ROOT_CG 128 +#define IMX7D_LCDIF_PIXEL_ROOT_DIV 129 +#define IMX7D_MIPI_DSI_ROOT_CLK 130 +#define IMX7D_MIPI_DSI_ROOT_SRC 131 +#define IMX7D_MIPI_DSI_ROOT_CG 132 +#define IMX7D_MIPI_DSI_ROOT_DIV 133 +#define IMX7D_MIPI_CSI_ROOT_CLK 134 +#define IMX7D_MIPI_CSI_ROOT_SRC 135 +#define IMX7D_MIPI_CSI_ROOT_CG 136 +#define IMX7D_MIPI_CSI_ROOT_DIV 137 +#define IMX7D_MIPI_DPHY_ROOT_CLK 138 +#define IMX7D_MIPI_DPHY_ROOT_SRC 139 +#define IMX7D_MIPI_DPHY_ROOT_CG 140 +#define IMX7D_MIPI_DPHY_ROOT_DIV 141 +#define IMX7D_SAI1_ROOT_CLK 142 +#define IMX7D_SAI1_ROOT_SRC 143 +#define IMX7D_SAI1_ROOT_CG 144 +#define IMX7D_SAI1_ROOT_DIV 145 +#define IMX7D_SAI2_ROOT_CLK 146 +#define IMX7D_SAI2_ROOT_SRC 147 +#define IMX7D_SAI2_ROOT_CG 148 +#define IMX7D_SAI2_ROOT_DIV 149 +#define IMX7D_SAI3_ROOT_CLK 150 +#define IMX7D_SAI3_ROOT_SRC 151 +#define IMX7D_SAI3_ROOT_CG 152 +#define IMX7D_SAI3_ROOT_DIV 153 +#define IMX7D_SPDIF_ROOT_CLK 154 +#define IMX7D_SPDIF_ROOT_SRC 155 +#define IMX7D_SPDIF_ROOT_CG 156 +#define IMX7D_SPDIF_ROOT_DIV 157 +#define IMX7D_ENET1_IPG_ROOT_CLK 158 +#define IMX7D_ENET1_REF_ROOT_SRC 159 +#define IMX7D_ENET1_REF_ROOT_CG 160 +#define IMX7D_ENET1_REF_ROOT_DIV 161 +#define IMX7D_ENET1_TIME_ROOT_CLK 162 +#define IMX7D_ENET1_TIME_ROOT_SRC 163 +#define IMX7D_ENET1_TIME_ROOT_CG 164 +#define IMX7D_ENET1_TIME_ROOT_DIV 165 +#define IMX7D_ENET2_IPG_ROOT_CLK 166 +#define IMX7D_ENET2_REF_ROOT_SRC 167 +#define IMX7D_ENET2_REF_ROOT_CG 168 +#define IMX7D_ENET2_REF_ROOT_DIV 169 +#define IMX7D_ENET2_TIME_ROOT_CLK 170 +#define IMX7D_ENET2_TIME_ROOT_SRC 171 +#define IMX7D_ENET2_TIME_ROOT_CG 172 +#define IMX7D_ENET2_TIME_ROOT_DIV 173 +#define IMX7D_ENET_PHY_REF_ROOT_CLK 174 +#define IMX7D_ENET_PHY_REF_ROOT_SRC 175 +#define IMX7D_ENET_PHY_REF_ROOT_CG 176 +#define IMX7D_ENET_PHY_REF_ROOT_DIV 177 +#define IMX7D_EIM_ROOT_CLK 178 +#define IMX7D_EIM_ROOT_SRC 179 +#define IMX7D_EIM_ROOT_CG 180 +#define IMX7D_EIM_ROOT_DIV 181 +#define IMX7D_NAND_ROOT_CLK 182 +#define IMX7D_NAND_ROOT_SRC 183 +#define IMX7D_NAND_ROOT_CG 184 +#define IMX7D_NAND_ROOT_DIV 185 +#define IMX7D_QSPI_ROOT_CLK 186 +#define IMX7D_QSPI_ROOT_SRC 187 +#define IMX7D_QSPI_ROOT_CG 188 +#define IMX7D_QSPI_ROOT_DIV 189 +#define IMX7D_USDHC1_ROOT_CLK 190 +#define IMX7D_USDHC1_ROOT_SRC 191 +#define IMX7D_USDHC1_ROOT_CG 192 +#define IMX7D_USDHC1_ROOT_DIV 193 +#define IMX7D_USDHC2_ROOT_CLK 194 +#define IMX7D_USDHC2_ROOT_SRC 195 +#define IMX7D_USDHC2_ROOT_CG 196 +#define IMX7D_USDHC2_ROOT_DIV 197 +#define IMX7D_USDHC3_ROOT_CLK 198 +#define IMX7D_USDHC3_ROOT_SRC 199 +#define IMX7D_USDHC3_ROOT_CG 200 +#define IMX7D_USDHC3_ROOT_DIV 201 +#define IMX7D_CAN1_ROOT_CLK 202 +#define IMX7D_CAN1_ROOT_SRC 203 +#define IMX7D_CAN1_ROOT_CG 204 +#define IMX7D_CAN1_ROOT_DIV 205 +#define IMX7D_CAN2_ROOT_CLK 206 +#define IMX7D_CAN2_ROOT_SRC 207 +#define IMX7D_CAN2_ROOT_CG 208 +#define IMX7D_CAN2_ROOT_DIV 209 +#define IMX7D_I2C1_ROOT_CLK 210 +#define IMX7D_I2C1_ROOT_SRC 211 +#define IMX7D_I2C1_ROOT_CG 212 +#define IMX7D_I2C1_ROOT_DIV 213 +#define IMX7D_I2C2_ROOT_CLK 214 +#define IMX7D_I2C2_ROOT_SRC 215 +#define IMX7D_I2C2_ROOT_CG 216 +#define IMX7D_I2C2_ROOT_DIV 217 +#define IMX7D_I2C3_ROOT_CLK 218 +#define IMX7D_I2C3_ROOT_SRC 219 +#define IMX7D_I2C3_ROOT_CG 220 +#define IMX7D_I2C3_ROOT_DIV 221 +#define IMX7D_I2C4_ROOT_CLK 222 +#define IMX7D_I2C4_ROOT_SRC 223 +#define IMX7D_I2C4_ROOT_CG 224 +#define IMX7D_I2C4_ROOT_DIV 225 +#define IMX7D_UART1_ROOT_CLK 226 +#define IMX7D_UART1_ROOT_SRC 227 +#define IMX7D_UART1_ROOT_CG 228 +#define IMX7D_UART1_ROOT_DIV 229 +#define IMX7D_UART2_ROOT_CLK 230 +#define IMX7D_UART2_ROOT_SRC 231 +#define IMX7D_UART2_ROOT_CG 232 +#define IMX7D_UART2_ROOT_DIV 233 +#define IMX7D_UART3_ROOT_CLK 234 +#define IMX7D_UART3_ROOT_SRC 235 +#define IMX7D_UART3_ROOT_CG 236 +#define IMX7D_UART3_ROOT_DIV 237 +#define IMX7D_UART4_ROOT_CLK 238 +#define IMX7D_UART4_ROOT_SRC 239 +#define IMX7D_UART4_ROOT_CG 240 +#define IMX7D_UART4_ROOT_DIV 241 +#define IMX7D_UART5_ROOT_CLK 242 +#define IMX7D_UART5_ROOT_SRC 243 +#define IMX7D_UART5_ROOT_CG 244 +#define IMX7D_UART5_ROOT_DIV 245 +#define IMX7D_UART6_ROOT_CLK 246 +#define IMX7D_UART6_ROOT_SRC 247 +#define IMX7D_UART6_ROOT_CG 248 +#define IMX7D_UART6_ROOT_DIV 249 +#define IMX7D_UART7_ROOT_CLK 250 +#define IMX7D_UART7_ROOT_SRC 251 +#define IMX7D_UART7_ROOT_CG 252 +#define IMX7D_UART7_ROOT_DIV 253 +#define IMX7D_ECSPI1_ROOT_CLK 254 +#define IMX7D_ECSPI1_ROOT_SRC 255 +#define IMX7D_ECSPI1_ROOT_CG 256 +#define IMX7D_ECSPI1_ROOT_DIV 257 +#define IMX7D_ECSPI2_ROOT_CLK 258 +#define IMX7D_ECSPI2_ROOT_SRC 259 +#define IMX7D_ECSPI2_ROOT_CG 260 +#define IMX7D_ECSPI2_ROOT_DIV 261 +#define IMX7D_ECSPI3_ROOT_CLK 262 +#define IMX7D_ECSPI3_ROOT_SRC 263 +#define IMX7D_ECSPI3_ROOT_CG 264 +#define IMX7D_ECSPI3_ROOT_DIV 265 +#define IMX7D_ECSPI4_ROOT_CLK 266 +#define IMX7D_ECSPI4_ROOT_SRC 267 +#define IMX7D_ECSPI4_ROOT_CG 268 +#define IMX7D_ECSPI4_ROOT_DIV 269 +#define IMX7D_PWM1_ROOT_CLK 270 +#define IMX7D_PWM1_ROOT_SRC 271 +#define IMX7D_PWM1_ROOT_CG 272 +#define IMX7D_PWM1_ROOT_DIV 273 +#define IMX7D_PWM2_ROOT_CLK 274 +#define IMX7D_PWM2_ROOT_SRC 275 +#define IMX7D_PWM2_ROOT_CG 276 +#define IMX7D_PWM2_ROOT_DIV 277 +#define IMX7D_PWM3_ROOT_CLK 278 +#define IMX7D_PWM3_ROOT_SRC 279 +#define IMX7D_PWM3_ROOT_CG 280 +#define IMX7D_PWM3_ROOT_DIV 281 +#define IMX7D_PWM4_ROOT_CLK 282 +#define IMX7D_PWM4_ROOT_SRC 283 +#define IMX7D_PWM4_ROOT_CG 284 +#define IMX7D_PWM4_ROOT_DIV 285 +#define IMX7D_FLEXTIMER1_ROOT_CLK 286 +#define IMX7D_FLEXTIMER1_ROOT_SRC 287 +#define IMX7D_FLEXTIMER1_ROOT_CG 288 +#define IMX7D_FLEXTIMER1_ROOT_DIV 289 +#define IMX7D_FLEXTIMER2_ROOT_CLK 290 +#define IMX7D_FLEXTIMER2_ROOT_SRC 291 +#define IMX7D_FLEXTIMER2_ROOT_CG 292 +#define IMX7D_FLEXTIMER2_ROOT_DIV 293 +#define IMX7D_SIM1_ROOT_CLK 294 +#define IMX7D_SIM1_ROOT_SRC 295 +#define IMX7D_SIM1_ROOT_CG 296 +#define IMX7D_SIM1_ROOT_DIV 297 +#define IMX7D_SIM2_ROOT_CLK 298 +#define IMX7D_SIM2_ROOT_SRC 299 +#define IMX7D_SIM2_ROOT_CG 300 +#define IMX7D_SIM2_ROOT_DIV 301 +#define IMX7D_GPT1_ROOT_CLK 302 +#define IMX7D_GPT1_ROOT_SRC 303 +#define IMX7D_GPT1_ROOT_CG 304 +#define IMX7D_GPT1_ROOT_DIV 305 +#define IMX7D_GPT2_ROOT_CLK 306 +#define IMX7D_GPT2_ROOT_SRC 307 +#define IMX7D_GPT2_ROOT_CG 308 +#define IMX7D_GPT2_ROOT_DIV 309 +#define IMX7D_GPT3_ROOT_CLK 310 +#define IMX7D_GPT3_ROOT_SRC 311 +#define IMX7D_GPT3_ROOT_CG 312 +#define IMX7D_GPT3_ROOT_DIV 313 +#define IMX7D_GPT4_ROOT_CLK 314 +#define IMX7D_GPT4_ROOT_SRC 315 +#define IMX7D_GPT4_ROOT_CG 316 +#define IMX7D_GPT4_ROOT_DIV 317 +#define IMX7D_TRACE_ROOT_CLK 318 +#define IMX7D_TRACE_ROOT_SRC 319 +#define IMX7D_TRACE_ROOT_CG 320 +#define IMX7D_TRACE_ROOT_DIV 321 +#define IMX7D_WDOG1_ROOT_CLK 322 +#define IMX7D_WDOG_ROOT_SRC 323 +#define IMX7D_WDOG_ROOT_CG 324 +#define IMX7D_WDOG_ROOT_DIV 325 +#define IMX7D_CSI_MCLK_ROOT_CLK 326 +#define IMX7D_CSI_MCLK_ROOT_SRC 327 +#define IMX7D_CSI_MCLK_ROOT_CG 328 +#define IMX7D_CSI_MCLK_ROOT_DIV 329 +#define IMX7D_AUDIO_MCLK_ROOT_CLK 330 +#define IMX7D_AUDIO_MCLK_ROOT_SRC 331 +#define IMX7D_AUDIO_MCLK_ROOT_CG 332 +#define IMX7D_AUDIO_MCLK_ROOT_DIV 333 +#define IMX7D_WRCLK_ROOT_CLK 334 +#define IMX7D_WRCLK_ROOT_SRC 335 +#define IMX7D_WRCLK_ROOT_CG 336 +#define IMX7D_WRCLK_ROOT_DIV 337 +#define IMX7D_CLKO1_ROOT_SRC 338 +#define IMX7D_CLKO1_ROOT_CG 339 +#define IMX7D_CLKO1_ROOT_DIV 340 +#define IMX7D_CLKO2_ROOT_SRC 341 +#define IMX7D_CLKO2_ROOT_CG 342 +#define IMX7D_CLKO2_ROOT_DIV 343 +#define IMX7D_MAIN_AXI_ROOT_PRE_DIV 344 +#define IMX7D_DISP_AXI_ROOT_PRE_DIV 345 +#define IMX7D_ENET_AXI_ROOT_PRE_DIV 346 +#define IMX7D_NAND_USDHC_BUS_ROOT_PRE_DIV 347 +#define IMX7D_AHB_CHANNEL_ROOT_PRE_DIV 348 +#define IMX7D_USB_HSIC_ROOT_PRE_DIV 349 +#define IMX7D_PCIE_CTRL_ROOT_PRE_DIV 350 +#define IMX7D_PCIE_PHY_ROOT_PRE_DIV 351 +#define IMX7D_EPDC_PIXEL_ROOT_PRE_DIV 352 +#define IMX7D_LCDIF_PIXEL_ROOT_PRE_DIV 353 +#define IMX7D_MIPI_DSI_ROOT_PRE_DIV 354 +#define IMX7D_MIPI_CSI_ROOT_PRE_DIV 355 +#define IMX7D_MIPI_DPHY_ROOT_PRE_DIV 356 +#define IMX7D_SAI1_ROOT_PRE_DIV 357 +#define IMX7D_SAI2_ROOT_PRE_DIV 358 +#define IMX7D_SAI3_ROOT_PRE_DIV 359 +#define IMX7D_SPDIF_ROOT_PRE_DIV 360 +#define IMX7D_ENET1_REF_ROOT_PRE_DIV 361 +#define IMX7D_ENET1_TIME_ROOT_PRE_DIV 362 +#define IMX7D_ENET2_REF_ROOT_PRE_DIV 363 +#define IMX7D_ENET2_TIME_ROOT_PRE_DIV 364 +#define IMX7D_ENET_PHY_REF_ROOT_PRE_DIV 365 +#define IMX7D_EIM_ROOT_PRE_DIV 366 +#define IMX7D_NAND_ROOT_PRE_DIV 367 +#define IMX7D_QSPI_ROOT_PRE_DIV 368 +#define IMX7D_USDHC1_ROOT_PRE_DIV 369 +#define IMX7D_USDHC2_ROOT_PRE_DIV 370 +#define IMX7D_USDHC3_ROOT_PRE_DIV 371 +#define IMX7D_CAN1_ROOT_PRE_DIV 372 +#define IMX7D_CAN2_ROOT_PRE_DIV 373 +#define IMX7D_I2C1_ROOT_PRE_DIV 374 +#define IMX7D_I2C2_ROOT_PRE_DIV 375 +#define IMX7D_I2C3_ROOT_PRE_DIV 376 +#define IMX7D_I2C4_ROOT_PRE_DIV 377 +#define IMX7D_UART1_ROOT_PRE_DIV 378 +#define IMX7D_UART2_ROOT_PRE_DIV 379 +#define IMX7D_UART3_ROOT_PRE_DIV 380 +#define IMX7D_UART4_ROOT_PRE_DIV 381 +#define IMX7D_UART5_ROOT_PRE_DIV 382 +#define IMX7D_UART6_ROOT_PRE_DIV 383 +#define IMX7D_UART7_ROOT_PRE_DIV 384 +#define IMX7D_ECSPI1_ROOT_PRE_DIV 385 +#define IMX7D_ECSPI2_ROOT_PRE_DIV 386 +#define IMX7D_ECSPI3_ROOT_PRE_DIV 387 +#define IMX7D_ECSPI4_ROOT_PRE_DIV 388 +#define IMX7D_PWM1_ROOT_PRE_DIV 389 +#define IMX7D_PWM2_ROOT_PRE_DIV 390 +#define IMX7D_PWM3_ROOT_PRE_DIV 391 +#define IMX7D_PWM4_ROOT_PRE_DIV 392 +#define IMX7D_FLEXTIMER1_ROOT_PRE_DIV 393 +#define IMX7D_FLEXTIMER2_ROOT_PRE_DIV 394 +#define IMX7D_SIM1_ROOT_PRE_DIV 395 +#define IMX7D_SIM2_ROOT_PRE_DIV 396 +#define IMX7D_GPT1_ROOT_PRE_DIV 397 +#define IMX7D_GPT2_ROOT_PRE_DIV 398 +#define IMX7D_GPT3_ROOT_PRE_DIV 399 +#define IMX7D_GPT4_ROOT_PRE_DIV 400 +#define IMX7D_TRACE_ROOT_PRE_DIV 401 +#define IMX7D_WDOG_ROOT_PRE_DIV 402 +#define IMX7D_CSI_MCLK_ROOT_PRE_DIV 403 +#define IMX7D_AUDIO_MCLK_ROOT_PRE_DIV 404 +#define IMX7D_WRCLK_ROOT_PRE_DIV 405 +#define IMX7D_CLKO1_ROOT_PRE_DIV 406 +#define IMX7D_CLKO2_ROOT_PRE_DIV 407 +#define IMX7D_DRAM_PHYM_ALT_ROOT_PRE_DIV 408 +#define IMX7D_DRAM_ALT_ROOT_PRE_DIV 409 +#define IMX7D_LVDS1_IN_CLK 410 +#define IMX7D_LVDS1_OUT_SEL 411 +#define IMX7D_LVDS1_OUT_CLK 412 +#define IMX7D_CLK_DUMMY 413 +#define IMX7D_GPT_3M_CLK 414 +#define IMX7D_OCRAM_CLK 415 +#define IMX7D_OCRAM_S_CLK 416 +#define IMX7D_WDOG2_ROOT_CLK 417 +#define IMX7D_WDOG3_ROOT_CLK 418 +#define IMX7D_WDOG4_ROOT_CLK 419 +#define IMX7D_SDMA_CORE_CLK 420 +#define IMX7D_USB1_MAIN_480M_CLK 421 +#define IMX7D_USB_CTRL_CLK 422 +#define IMX7D_USB_PHY1_CLK 423 +#define IMX7D_USB_PHY2_CLK 424 +#define IMX7D_IPG_ROOT_CLK 425 +#define IMX7D_SAI1_IPG_CLK 426 +#define IMX7D_SAI2_IPG_CLK 427 +#define IMX7D_SAI3_IPG_CLK 428 +#define IMX7D_PLL_AUDIO_TEST_DIV 429 +#define IMX7D_PLL_AUDIO_POST_DIV 430 +#define IMX7D_PLL_VIDEO_TEST_DIV 431 +#define IMX7D_PLL_VIDEO_POST_DIV 432 +#define IMX7D_MU_ROOT_CLK 433 +#define IMX7D_SEMA4_HS_ROOT_CLK 434 +#define IMX7D_PLL_DRAM_TEST_DIV 435 +#define IMX7D_ADC_ROOT_CLK 436 +#define IMX7D_CLK_ARM 437 +#define IMX7D_CKIL 438 +#define IMX7D_OCOTP_CLK 439 +#define IMX7D_NAND_RAWNAND_CLK 440 +#define IMX7D_NAND_USDHC_BUS_RAWNAND_CLK 441 +#define IMX7D_SNVS_CLK 442 +#define IMX7D_CAAM_CLK 443 +#define IMX7D_KPP_ROOT_CLK 444 +#define IMX7D_CLK_END 445 +#endif /* __DT_BINDINGS_CLOCK_IMX7D_H */ diff --git a/include/dt-bindings/clock/jz4740-cgu.h b/include/dt-bindings/clock/jz4740-cgu.h new file mode 100644 index 000000000..6ed83f926 --- /dev/null +++ b/include/dt-bindings/clock/jz4740-cgu.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides clock numbers for the ingenic,jz4740-cgu DT binding. + * + * They are roughly ordered as: + * - external clocks + * - PLLs + * - muxes/dividers in the order they appear in the jz4740 programmers manual + * - gates in order of their bit in the CLKGR* registers + */ + +#ifndef __DT_BINDINGS_CLOCK_JZ4740_CGU_H__ +#define __DT_BINDINGS_CLOCK_JZ4740_CGU_H__ + +#define JZ4740_CLK_EXT 0 +#define JZ4740_CLK_RTC 1 +#define JZ4740_CLK_PLL 2 +#define JZ4740_CLK_PLL_HALF 3 +#define JZ4740_CLK_CCLK 4 +#define JZ4740_CLK_HCLK 5 +#define JZ4740_CLK_PCLK 6 +#define JZ4740_CLK_MCLK 7 +#define JZ4740_CLK_LCD 8 +#define JZ4740_CLK_LCD_PCLK 9 +#define JZ4740_CLK_I2S 10 +#define JZ4740_CLK_SPI 11 +#define JZ4740_CLK_MMC 12 +#define JZ4740_CLK_UHC 13 +#define JZ4740_CLK_UDC 14 +#define JZ4740_CLK_UART0 15 +#define JZ4740_CLK_UART1 16 +#define JZ4740_CLK_DMA 17 +#define JZ4740_CLK_IPU 18 +#define JZ4740_CLK_ADC 19 +#define JZ4740_CLK_I2C 20 +#define JZ4740_CLK_AIC 21 + +#endif /* __DT_BINDINGS_CLOCK_JZ4740_CGU_H__ */ diff --git a/include/dt-bindings/clock/jz4770-cgu.h b/include/dt-bindings/clock/jz4770-cgu.h new file mode 100644 index 000000000..d68a7695a --- /dev/null +++ b/include/dt-bindings/clock/jz4770-cgu.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides clock numbers for the ingenic,jz4770-cgu DT binding. + */ + +#ifndef __DT_BINDINGS_CLOCK_JZ4770_CGU_H__ +#define __DT_BINDINGS_CLOCK_JZ4770_CGU_H__ + +#define JZ4770_CLK_EXT 0 +#define JZ4770_CLK_OSC32K 1 +#define JZ4770_CLK_PLL0 2 +#define JZ4770_CLK_PLL1 3 +#define JZ4770_CLK_CCLK 4 +#define JZ4770_CLK_H0CLK 5 +#define JZ4770_CLK_H1CLK 6 +#define JZ4770_CLK_H2CLK 7 +#define JZ4770_CLK_C1CLK 8 +#define JZ4770_CLK_PCLK 9 +#define JZ4770_CLK_MMC0_MUX 10 +#define JZ4770_CLK_MMC0 11 +#define JZ4770_CLK_MMC1_MUX 12 +#define JZ4770_CLK_MMC1 13 +#define JZ4770_CLK_MMC2_MUX 14 +#define JZ4770_CLK_MMC2 15 +#define JZ4770_CLK_CIM 16 +#define JZ4770_CLK_UHC 17 +#define JZ4770_CLK_GPU 18 +#define JZ4770_CLK_BCH 19 +#define JZ4770_CLK_LPCLK_MUX 20 +#define JZ4770_CLK_GPS 21 +#define JZ4770_CLK_SSI_MUX 22 +#define JZ4770_CLK_PCM_MUX 23 +#define JZ4770_CLK_I2S 24 +#define JZ4770_CLK_OTG 25 +#define JZ4770_CLK_SSI0 26 +#define JZ4770_CLK_SSI1 27 +#define JZ4770_CLK_SSI2 28 +#define JZ4770_CLK_PCM0 29 +#define JZ4770_CLK_PCM1 30 +#define JZ4770_CLK_DMA 31 +#define JZ4770_CLK_I2C0 32 +#define JZ4770_CLK_I2C1 33 +#define JZ4770_CLK_I2C2 34 +#define JZ4770_CLK_UART0 35 +#define JZ4770_CLK_UART1 36 +#define JZ4770_CLK_UART2 37 +#define JZ4770_CLK_UART3 38 +#define JZ4770_CLK_IPU 39 +#define JZ4770_CLK_ADC 40 +#define JZ4770_CLK_AIC 41 +#define JZ4770_CLK_AUX 42 +#define JZ4770_CLK_VPU 43 +#define JZ4770_CLK_UHC_PHY 44 +#define JZ4770_CLK_OTG_PHY 45 +#define JZ4770_CLK_EXT512 46 +#define JZ4770_CLK_RTC 47 + +#endif /* __DT_BINDINGS_CLOCK_JZ4770_CGU_H__ */ diff --git a/include/dt-bindings/clock/jz4780-cgu.h b/include/dt-bindings/clock/jz4780-cgu.h new file mode 100644 index 000000000..1859ce53e --- /dev/null +++ b/include/dt-bindings/clock/jz4780-cgu.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides clock numbers for the ingenic,jz4780-cgu DT binding. + * + * They are roughly ordered as: + * - external clocks + * - PLLs + * - muxes/dividers in the order they appear in the jz4780 programmers manual + * - gates in order of their bit in the CLKGR* registers + */ + +#ifndef __DT_BINDINGS_CLOCK_JZ4780_CGU_H__ +#define __DT_BINDINGS_CLOCK_JZ4780_CGU_H__ + +#define JZ4780_CLK_EXCLK 0 +#define JZ4780_CLK_RTCLK 1 +#define JZ4780_CLK_APLL 2 +#define JZ4780_CLK_MPLL 3 +#define JZ4780_CLK_EPLL 4 +#define JZ4780_CLK_VPLL 5 +#define JZ4780_CLK_OTGPHY 6 +#define JZ4780_CLK_SCLKA 7 +#define JZ4780_CLK_CPUMUX 8 +#define JZ4780_CLK_CPU 9 +#define JZ4780_CLK_L2CACHE 10 +#define JZ4780_CLK_AHB0 11 +#define JZ4780_CLK_AHB2PMUX 12 +#define JZ4780_CLK_AHB2 13 +#define JZ4780_CLK_PCLK 14 +#define JZ4780_CLK_DDR 15 +#define JZ4780_CLK_VPU 16 +#define JZ4780_CLK_I2SPLL 17 +#define JZ4780_CLK_I2S 18 +#define JZ4780_CLK_LCD0PIXCLK 19 +#define JZ4780_CLK_LCD1PIXCLK 20 +#define JZ4780_CLK_MSCMUX 21 +#define JZ4780_CLK_MSC0 22 +#define JZ4780_CLK_MSC1 23 +#define JZ4780_CLK_MSC2 24 +#define JZ4780_CLK_UHC 25 +#define JZ4780_CLK_SSIPLL 26 +#define JZ4780_CLK_SSI 27 +#define JZ4780_CLK_CIMMCLK 28 +#define JZ4780_CLK_PCMPLL 29 +#define JZ4780_CLK_PCM 30 +#define JZ4780_CLK_GPU 31 +#define JZ4780_CLK_HDMI 32 +#define JZ4780_CLK_BCH 33 +#define JZ4780_CLK_NEMC 34 +#define JZ4780_CLK_OTG0 35 +#define JZ4780_CLK_SSI0 36 +#define JZ4780_CLK_SMB0 37 +#define JZ4780_CLK_SMB1 38 +#define JZ4780_CLK_SCC 39 +#define JZ4780_CLK_AIC 40 +#define JZ4780_CLK_TSSI0 41 +#define JZ4780_CLK_OWI 42 +#define JZ4780_CLK_KBC 43 +#define JZ4780_CLK_SADC 44 +#define JZ4780_CLK_UART0 45 +#define JZ4780_CLK_UART1 46 +#define JZ4780_CLK_UART2 47 +#define JZ4780_CLK_UART3 48 +#define JZ4780_CLK_SSI1 49 +#define JZ4780_CLK_SSI2 50 +#define JZ4780_CLK_PDMA 51 +#define JZ4780_CLK_GPS 52 +#define JZ4780_CLK_MAC 53 +#define JZ4780_CLK_SMB2 54 +#define JZ4780_CLK_CIM 55 +#define JZ4780_CLK_LCD 56 +#define JZ4780_CLK_TVE 57 +#define JZ4780_CLK_IPU 58 +#define JZ4780_CLK_DDR0 59 +#define JZ4780_CLK_DDR1 60 +#define JZ4780_CLK_SMB3 61 +#define JZ4780_CLK_TSSI1 62 +#define JZ4780_CLK_COMPRESS 63 +#define JZ4780_CLK_AIC1 64 +#define JZ4780_CLK_GPVLC 65 +#define JZ4780_CLK_OTG1 66 +#define JZ4780_CLK_UART4 67 +#define JZ4780_CLK_AHBMON 68 +#define JZ4780_CLK_SMB4 69 +#define JZ4780_CLK_DES 70 +#define JZ4780_CLK_X2D 71 +#define JZ4780_CLK_CORE1 72 + +#endif /* __DT_BINDINGS_CLOCK_JZ4780_CGU_H__ */ diff --git a/include/dt-bindings/clock/lpc18xx-ccu.h b/include/dt-bindings/clock/lpc18xx-ccu.h new file mode 100644 index 000000000..bbfe00b6a --- /dev/null +++ b/include/dt-bindings/clock/lpc18xx-ccu.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2015 Joachim Eastwood + * + * This code is released using a dual license strategy: BSD/GPL + * You can choose the licence that better fits your requirements. + * + * Released under the terms of 3-clause BSD License + * Released under the terms of GNU General Public License Version 2.0 + * + */ + +/* Clock Control Unit 1 (CCU1) clock offsets */ +#define CLK_APB3_BUS 0x100 +#define CLK_APB3_I2C1 0x108 +#define CLK_APB3_DAC 0x110 +#define CLK_APB3_ADC0 0x118 +#define CLK_APB3_ADC1 0x120 +#define CLK_APB3_CAN0 0x128 +#define CLK_APB1_BUS 0x200 +#define CLK_APB1_MOTOCON_PWM 0x208 +#define CLK_APB1_I2C0 0x210 +#define CLK_APB1_I2S 0x218 +#define CLK_APB1_CAN1 0x220 +#define CLK_SPIFI 0x300 +#define CLK_CPU_BUS 0x400 +#define CLK_CPU_SPIFI 0x408 +#define CLK_CPU_GPIO 0x410 +#define CLK_CPU_LCD 0x418 +#define CLK_CPU_ETHERNET 0x420 +#define CLK_CPU_USB0 0x428 +#define CLK_CPU_EMC 0x430 +#define CLK_CPU_SDIO 0x438 +#define CLK_CPU_DMA 0x440 +#define CLK_CPU_CORE 0x448 +#define CLK_CPU_SCT 0x468 +#define CLK_CPU_USB1 0x470 +#define CLK_CPU_EMCDIV 0x478 +#define CLK_CPU_FLASHA 0x480 +#define CLK_CPU_FLASHB 0x488 +#define CLK_CPU_M0APP 0x490 +#define CLK_CPU_ADCHS 0x498 +#define CLK_CPU_EEPROM 0x4a0 +#define CLK_CPU_WWDT 0x500 +#define CLK_CPU_UART0 0x508 +#define CLK_CPU_UART1 0x510 +#define CLK_CPU_SSP0 0x518 +#define CLK_CPU_TIMER0 0x520 +#define CLK_CPU_TIMER1 0x528 +#define CLK_CPU_SCU 0x530 +#define CLK_CPU_CREG 0x538 +#define CLK_CPU_RITIMER 0x600 +#define CLK_CPU_UART2 0x608 +#define CLK_CPU_UART3 0x610 +#define CLK_CPU_TIMER2 0x618 +#define CLK_CPU_TIMER3 0x620 +#define CLK_CPU_SSP1 0x628 +#define CLK_CPU_QEI 0x630 +#define CLK_PERIPH_BUS 0x700 +#define CLK_PERIPH_CORE 0x710 +#define CLK_PERIPH_SGPIO 0x718 +#define CLK_USB0 0x800 +#define CLK_USB1 0x900 +#define CLK_SPI 0xA00 +#define CLK_ADCHS 0xB00 + +/* Clock Control Unit 2 (CCU2) clock offsets */ +#define CLK_AUDIO 0x100 +#define CLK_APB2_UART3 0x200 +#define CLK_APB2_UART2 0x300 +#define CLK_APB0_UART1 0x400 +#define CLK_APB0_UART0 0x500 +#define CLK_APB2_SSP1 0x600 +#define CLK_APB0_SSP0 0x700 +#define CLK_SDIO 0x800 diff --git a/include/dt-bindings/clock/lpc18xx-cgu.h b/include/dt-bindings/clock/lpc18xx-cgu.h new file mode 100644 index 000000000..6e57c6d2c --- /dev/null +++ b/include/dt-bindings/clock/lpc18xx-cgu.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2015 Joachim Eastwood + * + * This code is released using a dual license strategy: BSD/GPL + * You can choose the licence that better fits your requirements. + * + * Released under the terms of 3-clause BSD License + * Released under the terms of GNU General Public License Version 2.0 + * + */ + +/* LPC18xx/43xx base clock ids */ +#define BASE_SAFE_CLK 0 +#define BASE_USB0_CLK 1 +#define BASE_PERIPH_CLK 2 +#define BASE_USB1_CLK 3 +#define BASE_CPU_CLK 4 +#define BASE_SPIFI_CLK 5 +#define BASE_SPI_CLK 6 +#define BASE_PHY_RX_CLK 7 +#define BASE_PHY_TX_CLK 8 +#define BASE_APB1_CLK 9 +#define BASE_APB3_CLK 10 +#define BASE_LCD_CLK 11 +#define BASE_ADCHS_CLK 12 +#define BASE_SDIO_CLK 13 +#define BASE_SSP0_CLK 14 +#define BASE_SSP1_CLK 15 +#define BASE_UART0_CLK 16 +#define BASE_UART1_CLK 17 +#define BASE_UART2_CLK 18 +#define BASE_UART3_CLK 19 +#define BASE_OUT_CLK 20 +#define BASE_RES1_CLK 21 +#define BASE_RES2_CLK 22 +#define BASE_RES3_CLK 23 +#define BASE_RES4_CLK 24 +#define BASE_AUDIO_CLK 25 +#define BASE_CGU_OUT0_CLK 26 +#define BASE_CGU_OUT1_CLK 27 +#define BASE_CLK_MAX (BASE_CGU_OUT1_CLK + 1) diff --git a/include/dt-bindings/clock/lpc32xx-clock.h b/include/dt-bindings/clock/lpc32xx-clock.h new file mode 100644 index 000000000..e624d3a52 --- /dev/null +++ b/include/dt-bindings/clock/lpc32xx-clock.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2015 Vladimir Zapolskiy + * + * This code is released using a dual license strategy: BSD/GPL + * You can choose the licence that better fits your requirements. + * + * Released under the terms of 3-clause BSD License + * Released under the terms of GNU General Public License Version 2.0 + * + */ + +#ifndef __DT_BINDINGS_LPC32XX_CLOCK_H +#define __DT_BINDINGS_LPC32XX_CLOCK_H + +/* LPC32XX System Control Block clocks */ +#define LPC32XX_CLK_RTC 1 +#define LPC32XX_CLK_DMA 2 +#define LPC32XX_CLK_MLC 3 +#define LPC32XX_CLK_SLC 4 +#define LPC32XX_CLK_LCD 5 +#define LPC32XX_CLK_MAC 6 +#define LPC32XX_CLK_SD 7 +#define LPC32XX_CLK_DDRAM 8 +#define LPC32XX_CLK_SSP0 9 +#define LPC32XX_CLK_SSP1 10 +#define LPC32XX_CLK_UART3 11 +#define LPC32XX_CLK_UART4 12 +#define LPC32XX_CLK_UART5 13 +#define LPC32XX_CLK_UART6 14 +#define LPC32XX_CLK_IRDA 15 +#define LPC32XX_CLK_I2C1 16 +#define LPC32XX_CLK_I2C2 17 +#define LPC32XX_CLK_TIMER0 18 +#define LPC32XX_CLK_TIMER1 19 +#define LPC32XX_CLK_TIMER2 20 +#define LPC32XX_CLK_TIMER3 21 +#define LPC32XX_CLK_TIMER4 22 +#define LPC32XX_CLK_TIMER5 23 +#define LPC32XX_CLK_WDOG 24 +#define LPC32XX_CLK_I2S0 25 +#define LPC32XX_CLK_I2S1 26 +#define LPC32XX_CLK_SPI1 27 +#define LPC32XX_CLK_SPI2 28 +#define LPC32XX_CLK_MCPWM 29 +#define LPC32XX_CLK_HSTIMER 30 +#define LPC32XX_CLK_KEY 31 +#define LPC32XX_CLK_PWM1 32 +#define LPC32XX_CLK_PWM2 33 +#define LPC32XX_CLK_ADC 34 +#define LPC32XX_CLK_HCLK_PLL 35 +#define LPC32XX_CLK_PERIPH 36 + +/* LPC32XX USB clocks */ +#define LPC32XX_USB_CLK_I2C 1 +#define LPC32XX_USB_CLK_DEVICE 2 +#define LPC32XX_USB_CLK_HOST 3 + +#endif /* __DT_BINDINGS_LPC32XX_CLOCK_H */ diff --git a/include/dt-bindings/clock/lsi,axm5516-clks.h b/include/dt-bindings/clock/lsi,axm5516-clks.h new file mode 100644 index 000000000..beb41ace5 --- /dev/null +++ b/include/dt-bindings/clock/lsi,axm5516-clks.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2014 LSI Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + */ + +#ifndef _DT_BINDINGS_CLK_AXM5516_H +#define _DT_BINDINGS_CLK_AXM5516_H + +#define AXXIA_CLK_FAB_PLL 0 +#define AXXIA_CLK_CPU_PLL 1 +#define AXXIA_CLK_SYS_PLL 2 +#define AXXIA_CLK_SM0_PLL 3 +#define AXXIA_CLK_SM1_PLL 4 +#define AXXIA_CLK_FAB_DIV 5 +#define AXXIA_CLK_SYS_DIV 6 +#define AXXIA_CLK_NRCP_DIV 7 +#define AXXIA_CLK_CPU0_DIV 8 +#define AXXIA_CLK_CPU1_DIV 9 +#define AXXIA_CLK_CPU2_DIV 10 +#define AXXIA_CLK_CPU3_DIV 11 +#define AXXIA_CLK_PER_DIV 12 +#define AXXIA_CLK_MMC_DIV 13 +#define AXXIA_CLK_FAB 14 +#define AXXIA_CLK_SYS 15 +#define AXXIA_CLK_NRCP 16 +#define AXXIA_CLK_CPU0 17 +#define AXXIA_CLK_CPU1 18 +#define AXXIA_CLK_CPU2 19 +#define AXXIA_CLK_CPU3 20 +#define AXXIA_CLK_PER 21 +#define AXXIA_CLK_MMC 22 + +#endif diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h new file mode 100644 index 000000000..228a5e234 --- /dev/null +++ b/include/dt-bindings/clock/marvell,mmp2.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DTS_MARVELL_MMP2_CLOCK_H +#define __DTS_MARVELL_MMP2_CLOCK_H + +/* fixed clocks and plls */ +#define MMP2_CLK_CLK32 1 +#define MMP2_CLK_VCTCXO 2 +#define MMP2_CLK_PLL1 3 +#define MMP2_CLK_PLL1_2 8 +#define MMP2_CLK_PLL1_4 9 +#define MMP2_CLK_PLL1_8 10 +#define MMP2_CLK_PLL1_16 11 +#define MMP2_CLK_PLL1_3 12 +#define MMP2_CLK_PLL1_6 13 +#define MMP2_CLK_PLL1_12 14 +#define MMP2_CLK_PLL1_20 15 +#define MMP2_CLK_PLL2 16 +#define MMP2_CLK_PLL2_2 17 +#define MMP2_CLK_PLL2_4 18 +#define MMP2_CLK_PLL2_8 19 +#define MMP2_CLK_PLL2_16 20 +#define MMP2_CLK_PLL2_3 21 +#define MMP2_CLK_PLL2_6 22 +#define MMP2_CLK_PLL2_12 23 +#define MMP2_CLK_VCTCXO_2 24 +#define MMP2_CLK_VCTCXO_4 25 +#define MMP2_CLK_UART_PLL 26 +#define MMP2_CLK_USB_PLL 27 + +/* apb periphrals */ +#define MMP2_CLK_TWSI0 60 +#define MMP2_CLK_TWSI1 61 +#define MMP2_CLK_TWSI2 62 +#define MMP2_CLK_TWSI3 63 +#define MMP2_CLK_TWSI4 64 +#define MMP2_CLK_TWSI5 65 +#define MMP2_CLK_GPIO 66 +#define MMP2_CLK_KPC 67 +#define MMP2_CLK_RTC 68 +#define MMP2_CLK_PWM0 69 +#define MMP2_CLK_PWM1 70 +#define MMP2_CLK_PWM2 71 +#define MMP2_CLK_PWM3 72 +#define MMP2_CLK_UART0 73 +#define MMP2_CLK_UART1 74 +#define MMP2_CLK_UART2 75 +#define MMP2_CLK_UART3 76 +#define MMP2_CLK_SSP0 77 +#define MMP2_CLK_SSP1 78 +#define MMP2_CLK_SSP2 79 +#define MMP2_CLK_SSP3 80 +#define MMP2_CLK_TIMER 81 + +/* axi periphrals */ +#define MMP2_CLK_SDH0 101 +#define MMP2_CLK_SDH1 102 +#define MMP2_CLK_SDH2 103 +#define MMP2_CLK_SDH3 104 +#define MMP2_CLK_USB 105 +#define MMP2_CLK_DISP0 106 +#define MMP2_CLK_DISP0_MUX 107 +#define MMP2_CLK_DISP0_SPHY 108 +#define MMP2_CLK_DISP1 109 +#define MMP2_CLK_DISP1_MUX 110 +#define MMP2_CLK_CCIC_ARBITER 111 +#define MMP2_CLK_CCIC0 112 +#define MMP2_CLK_CCIC0_MIX 113 +#define MMP2_CLK_CCIC0_PHY 114 +#define MMP2_CLK_CCIC0_SPHY 115 +#define MMP2_CLK_CCIC1 116 +#define MMP2_CLK_CCIC1_MIX 117 +#define MMP2_CLK_CCIC1_PHY 118 +#define MMP2_CLK_CCIC1_SPHY 119 + +#define MMP2_NR_CLKS 200 +#endif diff --git a/include/dt-bindings/clock/marvell,pxa168.h b/include/dt-bindings/clock/marvell,pxa168.h new file mode 100644 index 000000000..caf90436b --- /dev/null +++ b/include/dt-bindings/clock/marvell,pxa168.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DTS_MARVELL_PXA168_CLOCK_H +#define __DTS_MARVELL_PXA168_CLOCK_H + +/* fixed clocks and plls */ +#define PXA168_CLK_CLK32 1 +#define PXA168_CLK_VCTCXO 2 +#define PXA168_CLK_PLL1 3 +#define PXA168_CLK_PLL1_2 8 +#define PXA168_CLK_PLL1_4 9 +#define PXA168_CLK_PLL1_8 10 +#define PXA168_CLK_PLL1_16 11 +#define PXA168_CLK_PLL1_6 12 +#define PXA168_CLK_PLL1_12 13 +#define PXA168_CLK_PLL1_24 14 +#define PXA168_CLK_PLL1_48 15 +#define PXA168_CLK_PLL1_96 16 +#define PXA168_CLK_PLL1_13 17 +#define PXA168_CLK_PLL1_13_1_5 18 +#define PXA168_CLK_PLL1_2_1_5 19 +#define PXA168_CLK_PLL1_3_16 20 +#define PXA168_CLK_PLL1_192 21 +#define PXA168_CLK_UART_PLL 27 +#define PXA168_CLK_USB_PLL 28 + +/* apb periphrals */ +#define PXA168_CLK_TWSI0 60 +#define PXA168_CLK_TWSI1 61 +#define PXA168_CLK_TWSI2 62 +#define PXA168_CLK_TWSI3 63 +#define PXA168_CLK_GPIO 64 +#define PXA168_CLK_KPC 65 +#define PXA168_CLK_RTC 66 +#define PXA168_CLK_PWM0 67 +#define PXA168_CLK_PWM1 68 +#define PXA168_CLK_PWM2 69 +#define PXA168_CLK_PWM3 70 +#define PXA168_CLK_UART0 71 +#define PXA168_CLK_UART1 72 +#define PXA168_CLK_UART2 73 +#define PXA168_CLK_SSP0 74 +#define PXA168_CLK_SSP1 75 +#define PXA168_CLK_SSP2 76 +#define PXA168_CLK_SSP3 77 +#define PXA168_CLK_SSP4 78 +#define PXA168_CLK_TIMER 79 + +/* axi periphrals */ +#define PXA168_CLK_DFC 100 +#define PXA168_CLK_SDH0 101 +#define PXA168_CLK_SDH1 102 +#define PXA168_CLK_SDH2 103 +#define PXA168_CLK_USB 104 +#define PXA168_CLK_SPH 105 +#define PXA168_CLK_DISP0 106 +#define PXA168_CLK_CCIC0 107 +#define PXA168_CLK_CCIC0_PHY 108 +#define PXA168_CLK_CCIC0_SPHY 109 + +#define PXA168_NR_CLKS 200 +#endif diff --git a/include/dt-bindings/clock/marvell,pxa1928.h b/include/dt-bindings/clock/marvell,pxa1928.h new file mode 100644 index 000000000..5dca48202 --- /dev/null +++ b/include/dt-bindings/clock/marvell,pxa1928.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DTS_MARVELL_PXA1928_CLOCK_H +#define __DTS_MARVELL_PXA1928_CLOCK_H + +/* + * Clock ID values here correspond to the control register offset/4. + */ + +/* apb peripherals */ +#define PXA1928_CLK_RTC 0x00 +#define PXA1928_CLK_TWSI0 0x01 +#define PXA1928_CLK_TWSI1 0x02 +#define PXA1928_CLK_TWSI2 0x03 +#define PXA1928_CLK_TWSI3 0x04 +#define PXA1928_CLK_OWIRE 0x05 +#define PXA1928_CLK_KPC 0x06 +#define PXA1928_CLK_TB_ROTARY 0x07 +#define PXA1928_CLK_SW_JTAG 0x08 +#define PXA1928_CLK_TIMER1 0x09 +#define PXA1928_CLK_UART0 0x0b +#define PXA1928_CLK_UART1 0x0c +#define PXA1928_CLK_UART2 0x0d +#define PXA1928_CLK_GPIO 0x0e +#define PXA1928_CLK_PWM0 0x0f +#define PXA1928_CLK_PWM1 0x10 +#define PXA1928_CLK_PWM2 0x11 +#define PXA1928_CLK_PWM3 0x12 +#define PXA1928_CLK_SSP0 0x13 +#define PXA1928_CLK_SSP1 0x14 +#define PXA1928_CLK_SSP2 0x15 + +#define PXA1928_CLK_TWSI4 0x1f +#define PXA1928_CLK_TWSI5 0x20 +#define PXA1928_CLK_UART3 0x22 +#define PXA1928_CLK_THSENS_GLOB 0x24 +#define PXA1928_CLK_THSENS_CPU 0x26 +#define PXA1928_CLK_THSENS_VPU 0x27 +#define PXA1928_CLK_THSENS_GC 0x28 +#define PXA1928_APBC_NR_CLKS 0x30 + + +/* axi peripherals */ +#define PXA1928_CLK_SDH0 0x15 +#define PXA1928_CLK_SDH1 0x16 +#define PXA1928_CLK_USB 0x17 +#define PXA1928_CLK_NAND 0x18 +#define PXA1928_CLK_DMA 0x19 + +#define PXA1928_CLK_SDH2 0x3a +#define PXA1928_CLK_SDH3 0x3b +#define PXA1928_CLK_HSIC 0x3e +#define PXA1928_CLK_SDH4 0x57 +#define PXA1928_CLK_GC3D 0x5d +#define PXA1928_CLK_GC2D 0x5f + +#define PXA1928_APMU_NR_CLKS 0x60 + +#endif diff --git a/include/dt-bindings/clock/marvell,pxa910.h b/include/dt-bindings/clock/marvell,pxa910.h new file mode 100644 index 000000000..7bf462389 --- /dev/null +++ b/include/dt-bindings/clock/marvell,pxa910.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DTS_MARVELL_PXA910_CLOCK_H +#define __DTS_MARVELL_PXA910_CLOCK_H + +/* fixed clocks and plls */ +#define PXA910_CLK_CLK32 1 +#define PXA910_CLK_VCTCXO 2 +#define PXA910_CLK_PLL1 3 +#define PXA910_CLK_PLL1_2 8 +#define PXA910_CLK_PLL1_4 9 +#define PXA910_CLK_PLL1_8 10 +#define PXA910_CLK_PLL1_16 11 +#define PXA910_CLK_PLL1_6 12 +#define PXA910_CLK_PLL1_12 13 +#define PXA910_CLK_PLL1_24 14 +#define PXA910_CLK_PLL1_48 15 +#define PXA910_CLK_PLL1_96 16 +#define PXA910_CLK_PLL1_13 17 +#define PXA910_CLK_PLL1_13_1_5 18 +#define PXA910_CLK_PLL1_2_1_5 19 +#define PXA910_CLK_PLL1_3_16 20 +#define PXA910_CLK_PLL1_192 21 +#define PXA910_CLK_UART_PLL 27 +#define PXA910_CLK_USB_PLL 28 + +/* apb periphrals */ +#define PXA910_CLK_TWSI0 60 +#define PXA910_CLK_TWSI1 61 +#define PXA910_CLK_TWSI2 62 +#define PXA910_CLK_TWSI3 63 +#define PXA910_CLK_GPIO 64 +#define PXA910_CLK_KPC 65 +#define PXA910_CLK_RTC 66 +#define PXA910_CLK_PWM0 67 +#define PXA910_CLK_PWM1 68 +#define PXA910_CLK_PWM2 69 +#define PXA910_CLK_PWM3 70 +#define PXA910_CLK_UART0 71 +#define PXA910_CLK_UART1 72 +#define PXA910_CLK_UART2 73 +#define PXA910_CLK_SSP0 74 +#define PXA910_CLK_SSP1 75 +#define PXA910_CLK_TIMER0 76 +#define PXA910_CLK_TIMER1 77 + +/* axi periphrals */ +#define PXA910_CLK_DFC 100 +#define PXA910_CLK_SDH0 101 +#define PXA910_CLK_SDH1 102 +#define PXA910_CLK_SDH2 103 +#define PXA910_CLK_USB 104 +#define PXA910_CLK_SPH 105 +#define PXA910_CLK_DISP0 106 +#define PXA910_CLK_CCIC0 107 +#define PXA910_CLK_CCIC0_PHY 108 +#define PXA910_CLK_CCIC0_SPHY 109 + +#define PXA910_NR_CLKS 200 +#endif diff --git a/include/dt-bindings/clock/maxim,max77620.h b/include/dt-bindings/clock/maxim,max77620.h new file mode 100644 index 000000000..82aba2849 --- /dev/null +++ b/include/dt-bindings/clock/maxim,max77620.h @@ -0,0 +1,21 @@ +/* + * Copyright (C) 2016 NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Device Tree binding constants clocks for the Maxim 77620 PMIC. + */ + +#ifndef _DT_BINDINGS_CLOCK_MAXIM_MAX77620_CLOCK_H +#define _DT_BINDINGS_CLOCK_MAXIM_MAX77620_CLOCK_H + +/* Fixed rate clocks. */ + +#define MAX77620_CLK_32K_OUT0 0 + +/* Total number of clocks. */ +#define MAX77620_CLKS_NUM (MAX77620_CLK_32K_OUT0 + 1) + +#endif /* _DT_BINDINGS_CLOCK_MAXIM_MAX77620_CLOCK_H */ diff --git a/include/dt-bindings/clock/maxim,max77686.h b/include/dt-bindings/clock/maxim,max77686.h new file mode 100644 index 000000000..7b28b0905 --- /dev/null +++ b/include/dt-bindings/clock/maxim,max77686.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2014 Google, Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Device Tree binding constants clocks for the Maxim 77686 PMIC. + */ + +#ifndef _DT_BINDINGS_CLOCK_MAXIM_MAX77686_CLOCK_H +#define _DT_BINDINGS_CLOCK_MAXIM_MAX77686_CLOCK_H + +/* Fixed rate clocks. */ + +#define MAX77686_CLK_AP 0 +#define MAX77686_CLK_CP 1 +#define MAX77686_CLK_PMIC 2 + +/* Total number of clocks. */ +#define MAX77686_CLKS_NUM (MAX77686_CLK_PMIC + 1) + +#endif /* _DT_BINDINGS_CLOCK_MAXIM_MAX77686_CLOCK_H */ diff --git a/include/dt-bindings/clock/maxim,max77802.h b/include/dt-bindings/clock/maxim,max77802.h new file mode 100644 index 000000000..997312edc --- /dev/null +++ b/include/dt-bindings/clock/maxim,max77802.h @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2014 Google, Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Device Tree binding constants clocks for the Maxim 77802 PMIC. + */ + +#ifndef _DT_BINDINGS_CLOCK_MAXIM_MAX77802_CLOCK_H +#define _DT_BINDINGS_CLOCK_MAXIM_MAX77802_CLOCK_H + +/* Fixed rate clocks. */ + +#define MAX77802_CLK_32K_AP 0 +#define MAX77802_CLK_32K_CP 1 + +/* Total number of clocks. */ +#define MAX77802_CLKS_NUM (MAX77802_CLK_32K_CP + 1) + +#endif /* _DT_BINDINGS_CLOCK_MAXIM_MAX77802_CLOCK_H */ diff --git a/include/dt-bindings/clock/maxim,max9485.h b/include/dt-bindings/clock/maxim,max9485.h new file mode 100644 index 000000000..185b09ce1 --- /dev/null +++ b/include/dt-bindings/clock/maxim,max9485.h @@ -0,0 +1,18 @@ +/* + * Copyright (C) 2018 Daniel Mack + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __DT_BINDINGS_MAX9485_CLK_H +#define __DT_BINDINGS_MAX9485_CLK_H + +#define MAX9485_MCLKOUT 0 +#define MAX9485_CLKOUT 1 +#define MAX9485_CLKOUT1 2 +#define MAX9485_CLKOUT2 3 + +#endif /* __DT_BINDINGS_MAX9485_CLK_H */ diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h new file mode 100644 index 000000000..a60f47b49 --- /dev/null +++ b/include/dt-bindings/clock/meson8b-clkc.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Meson8b clock tree IDs + */ + +#ifndef __MESON8B_CLKC_H +#define __MESON8B_CLKC_H + +#define CLKID_UNUSED 0 +#define CLKID_XTAL 1 +#define CLKID_PLL_FIXED 2 +#define CLKID_PLL_VID 3 +#define CLKID_PLL_SYS 4 +#define CLKID_FCLK_DIV2 5 +#define CLKID_FCLK_DIV3 6 +#define CLKID_FCLK_DIV4 7 +#define CLKID_FCLK_DIV5 8 +#define CLKID_FCLK_DIV7 9 +#define CLKID_CLK81 10 +#define CLKID_MALI 11 +#define CLKID_CPUCLK 12 +#define CLKID_ZERO 13 +#define CLKID_MPEG_SEL 14 +#define CLKID_MPEG_DIV 15 +#define CLKID_DDR 16 +#define CLKID_DOS 17 +#define CLKID_ISA 18 +#define CLKID_PL301 19 +#define CLKID_PERIPHS 20 +#define CLKID_SPICC 21 +#define CLKID_I2C 22 +#define CLKID_SAR_ADC 23 +#define CLKID_SMART_CARD 24 +#define CLKID_RNG0 25 +#define CLKID_UART0 26 +#define CLKID_SDHC 27 +#define CLKID_STREAM 28 +#define CLKID_ASYNC_FIFO 29 +#define CLKID_SDIO 30 +#define CLKID_ABUF 31 +#define CLKID_HIU_IFACE 32 +#define CLKID_ASSIST_MISC 33 +#define CLKID_SPI 34 +#define CLKID_I2S_SPDIF 35 +#define CLKID_ETH 36 +#define CLKID_DEMUX 37 +#define CLKID_AIU_GLUE 38 +#define CLKID_IEC958 39 +#define CLKID_I2S_OUT 40 +#define CLKID_AMCLK 41 +#define CLKID_AIFIFO2 42 +#define CLKID_MIXER 43 +#define CLKID_MIXER_IFACE 44 +#define CLKID_ADC 45 +#define CLKID_BLKMV 46 +#define CLKID_AIU 47 +#define CLKID_UART1 48 +#define CLKID_G2D 49 +#define CLKID_USB0 50 +#define CLKID_USB1 51 +#define CLKID_RESET 52 +#define CLKID_NAND 53 +#define CLKID_DOS_PARSER 54 +#define CLKID_USB 55 +#define CLKID_VDIN1 56 +#define CLKID_AHB_ARB0 57 +#define CLKID_EFUSE 58 +#define CLKID_BOOT_ROM 59 +#define CLKID_AHB_DATA_BUS 60 +#define CLKID_AHB_CTRL_BUS 61 +#define CLKID_HDMI_INTR_SYNC 62 +#define CLKID_HDMI_PCLK 63 +#define CLKID_USB1_DDR_BRIDGE 64 +#define CLKID_USB0_DDR_BRIDGE 65 +#define CLKID_MMC_PCLK 66 +#define CLKID_DVIN 67 +#define CLKID_UART2 68 +#define CLKID_SANA 69 +#define CLKID_VPU_INTR 70 +#define CLKID_SEC_AHB_AHB3_BRIDGE 71 +#define CLKID_CLK81_A9 72 +#define CLKID_VCLK2_VENCI0 73 +#define CLKID_VCLK2_VENCI1 74 +#define CLKID_VCLK2_VENCP0 75 +#define CLKID_VCLK2_VENCP1 76 +#define CLKID_GCLK_VENCI_INT 77 +#define CLKID_GCLK_VENCP_INT 78 +#define CLKID_DAC_CLK 79 +#define CLKID_AOCLK_GATE 80 +#define CLKID_IEC958_GATE 81 +#define CLKID_ENC480P 82 +#define CLKID_RNG1 83 +#define CLKID_GCLK_VENCL_INT 84 +#define CLKID_VCLK2_VENCLMCC 85 +#define CLKID_VCLK2_VENCL 86 +#define CLKID_VCLK2_OTHER 87 +#define CLKID_EDP 88 +#define CLKID_AO_MEDIA_CPU 89 +#define CLKID_AO_AHB_SRAM 90 +#define CLKID_AO_AHB_BUS 91 +#define CLKID_AO_IFACE 92 +#define CLKID_MPLL0 93 +#define CLKID_MPLL1 94 +#define CLKID_MPLL2 95 +#define CLKID_NAND_CLK 112 + +#endif /* __MESON8B_CLKC_H */ diff --git a/include/dt-bindings/clock/microchip,pic32-clock.h b/include/dt-bindings/clock/microchip,pic32-clock.h new file mode 100644 index 000000000..184647a6a --- /dev/null +++ b/include/dt-bindings/clock/microchip,pic32-clock.h @@ -0,0 +1,42 @@ +/* + * Purna Chandra Mandal, + * Copyright (C) 2015 Microchip Technology Inc. All rights reserved. + * + * This program is free software; you can distribute it and/or modify it + * under the terms of the GNU General Public License (Version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef _DT_BINDINGS_CLK_MICROCHIP_PIC32_H_ +#define _DT_BINDINGS_CLK_MICROCHIP_PIC32_H_ + +/* clock output indices */ +#define POSCCLK 0 +#define FRCCLK 1 +#define BFRCCLK 2 +#define LPRCCLK 3 +#define SOSCCLK 4 +#define FRCDIVCLK 5 +#define PLLCLK 6 +#define SCLK 7 +#define PB1CLK 8 +#define PB2CLK 9 +#define PB3CLK 10 +#define PB4CLK 11 +#define PB5CLK 12 +#define PB6CLK 13 +#define PB7CLK 14 +#define REF1CLK 15 +#define REF2CLK 16 +#define REF3CLK 17 +#define REF4CLK 18 +#define REF5CLK 19 +#define UPLLCLK 20 +#define MAXCLKS 21 + +#endif /* _DT_BINDINGS_CLK_MICROCHIP_PIC32_H_ */ diff --git a/include/dt-bindings/clock/mpc512x-clock.h b/include/dt-bindings/clock/mpc512x-clock.h new file mode 100644 index 000000000..13c316bf2 --- /dev/null +++ b/include/dt-bindings/clock/mpc512x-clock.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for MPC512x clock specs in DT bindings. + */ + +#ifndef _DT_BINDINGS_CLOCK_MPC512x_CLOCK_H +#define _DT_BINDINGS_CLOCK_MPC512x_CLOCK_H + +#define MPC512x_CLK_DUMMY 0 +#define MPC512x_CLK_REF 1 +#define MPC512x_CLK_SYS 2 +#define MPC512x_CLK_DIU 3 +#define MPC512x_CLK_VIU 4 +#define MPC512x_CLK_CSB 5 +#define MPC512x_CLK_E300 6 +#define MPC512x_CLK_IPS 7 +#define MPC512x_CLK_FEC 8 +#define MPC512x_CLK_SATA 9 +#define MPC512x_CLK_PATA 10 +#define MPC512x_CLK_NFC 11 +#define MPC512x_CLK_LPC 12 +#define MPC512x_CLK_MBX_BUS 13 +#define MPC512x_CLK_MBX 14 +#define MPC512x_CLK_MBX_3D 15 +#define MPC512x_CLK_AXE 16 +#define MPC512x_CLK_USB1 17 +#define MPC512x_CLK_USB2 18 +#define MPC512x_CLK_I2C 19 +#define MPC512x_CLK_MSCAN0_MCLK 20 +#define MPC512x_CLK_MSCAN1_MCLK 21 +#define MPC512x_CLK_MSCAN2_MCLK 22 +#define MPC512x_CLK_MSCAN3_MCLK 23 +#define MPC512x_CLK_BDLC 24 +#define MPC512x_CLK_SDHC 25 +#define MPC512x_CLK_PCI 26 +#define MPC512x_CLK_PSC_MCLK_IN 27 +#define MPC512x_CLK_SPDIF_TX 28 +#define MPC512x_CLK_SPDIF_RX 29 +#define MPC512x_CLK_SPDIF_MCLK 30 +#define MPC512x_CLK_SPDIF 31 +#define MPC512x_CLK_AC97 32 +#define MPC512x_CLK_PSC0_MCLK 33 +#define MPC512x_CLK_PSC1_MCLK 34 +#define MPC512x_CLK_PSC2_MCLK 35 +#define MPC512x_CLK_PSC3_MCLK 36 +#define MPC512x_CLK_PSC4_MCLK 37 +#define MPC512x_CLK_PSC5_MCLK 38 +#define MPC512x_CLK_PSC6_MCLK 39 +#define MPC512x_CLK_PSC7_MCLK 40 +#define MPC512x_CLK_PSC8_MCLK 41 +#define MPC512x_CLK_PSC9_MCLK 42 +#define MPC512x_CLK_PSC10_MCLK 43 +#define MPC512x_CLK_PSC11_MCLK 44 +#define MPC512x_CLK_PSC_FIFO 45 +#define MPC512x_CLK_PSC0 46 +#define MPC512x_CLK_PSC1 47 +#define MPC512x_CLK_PSC2 48 +#define MPC512x_CLK_PSC3 49 +#define MPC512x_CLK_PSC4 50 +#define MPC512x_CLK_PSC5 51 +#define MPC512x_CLK_PSC6 52 +#define MPC512x_CLK_PSC7 53 +#define MPC512x_CLK_PSC8 54 +#define MPC512x_CLK_PSC9 55 +#define MPC512x_CLK_PSC10 56 +#define MPC512x_CLK_PSC11 57 +#define MPC512x_CLK_SDHC2 58 +#define MPC512x_CLK_FEC2 59 +#define MPC512x_CLK_OUT0_CLK 60 +#define MPC512x_CLK_OUT1_CLK 61 +#define MPC512x_CLK_OUT2_CLK 62 +#define MPC512x_CLK_OUT3_CLK 63 +#define MPC512x_CLK_CAN_CLK_IN 64 + +#define MPC512x_CLK_LAST_PUBLIC 64 + +#endif diff --git a/include/dt-bindings/clock/mt2701-clk.h b/include/dt-bindings/clock/mt2701-clk.h new file mode 100644 index 000000000..9ac2f2b57 --- /dev/null +++ b/include/dt-bindings/clock/mt2701-clk.h @@ -0,0 +1,492 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Shunli Wang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_MT2701_H +#define _DT_BINDINGS_CLK_MT2701_H + +/* TOPCKGEN */ +#define CLK_TOP_SYSPLL 1 +#define CLK_TOP_SYSPLL_D2 2 +#define CLK_TOP_SYSPLL_D3 3 +#define CLK_TOP_SYSPLL_D5 4 +#define CLK_TOP_SYSPLL_D7 5 +#define CLK_TOP_SYSPLL1_D2 6 +#define CLK_TOP_SYSPLL1_D4 7 +#define CLK_TOP_SYSPLL1_D8 8 +#define CLK_TOP_SYSPLL1_D16 9 +#define CLK_TOP_SYSPLL2_D2 10 +#define CLK_TOP_SYSPLL2_D4 11 +#define CLK_TOP_SYSPLL2_D8 12 +#define CLK_TOP_SYSPLL3_D2 13 +#define CLK_TOP_SYSPLL3_D4 14 +#define CLK_TOP_SYSPLL4_D2 15 +#define CLK_TOP_SYSPLL4_D4 16 +#define CLK_TOP_UNIVPLL 17 +#define CLK_TOP_UNIVPLL_D2 18 +#define CLK_TOP_UNIVPLL_D3 19 +#define CLK_TOP_UNIVPLL_D5 20 +#define CLK_TOP_UNIVPLL_D7 21 +#define CLK_TOP_UNIVPLL_D26 22 +#define CLK_TOP_UNIVPLL_D52 23 +#define CLK_TOP_UNIVPLL_D108 24 +#define CLK_TOP_USB_PHY48M 25 +#define CLK_TOP_UNIVPLL1_D2 26 +#define CLK_TOP_UNIVPLL1_D4 27 +#define CLK_TOP_UNIVPLL1_D8 28 +#define CLK_TOP_UNIVPLL2_D2 29 +#define CLK_TOP_UNIVPLL2_D4 30 +#define CLK_TOP_UNIVPLL2_D8 31 +#define CLK_TOP_UNIVPLL2_D16 32 +#define CLK_TOP_UNIVPLL2_D32 33 +#define CLK_TOP_UNIVPLL3_D2 34 +#define CLK_TOP_UNIVPLL3_D4 35 +#define CLK_TOP_UNIVPLL3_D8 36 +#define CLK_TOP_MSDCPLL 37 +#define CLK_TOP_MSDCPLL_D2 38 +#define CLK_TOP_MSDCPLL_D4 39 +#define CLK_TOP_MSDCPLL_D8 40 +#define CLK_TOP_MMPLL 41 +#define CLK_TOP_MMPLL_D2 42 +#define CLK_TOP_DMPLL 43 +#define CLK_TOP_DMPLL_D2 44 +#define CLK_TOP_DMPLL_D4 45 +#define CLK_TOP_DMPLL_X2 46 +#define CLK_TOP_TVDPLL 47 +#define CLK_TOP_TVDPLL_D2 48 +#define CLK_TOP_TVDPLL_D4 49 +#define CLK_TOP_TVD2PLL 50 +#define CLK_TOP_TVD2PLL_D2 51 +#define CLK_TOP_HADDS2PLL_98M 52 +#define CLK_TOP_HADDS2PLL_294M 53 +#define CLK_TOP_HADDS2_FB 54 +#define CLK_TOP_MIPIPLL_D2 55 +#define CLK_TOP_MIPIPLL_D4 56 +#define CLK_TOP_HDMIPLL 57 +#define CLK_TOP_HDMIPLL_D2 58 +#define CLK_TOP_HDMIPLL_D3 59 +#define CLK_TOP_HDMI_SCL_RX 60 +#define CLK_TOP_HDMI_0_PIX340M 61 +#define CLK_TOP_HDMI_0_DEEP340M 62 +#define CLK_TOP_HDMI_0_PLL340M 63 +#define CLK_TOP_AUD1PLL_98M 64 +#define CLK_TOP_AUD2PLL_90M 65 +#define CLK_TOP_AUDPLL 66 +#define CLK_TOP_AUDPLL_D4 67 +#define CLK_TOP_AUDPLL_D8 68 +#define CLK_TOP_AUDPLL_D16 69 +#define CLK_TOP_AUDPLL_D24 70 +#define CLK_TOP_ETHPLL_500M 71 +#define CLK_TOP_VDECPLL 72 +#define CLK_TOP_VENCPLL 73 +#define CLK_TOP_MIPIPLL 74 +#define CLK_TOP_ARMPLL_1P3G 75 + +#define CLK_TOP_MM_SEL 76 +#define CLK_TOP_DDRPHYCFG_SEL 77 +#define CLK_TOP_MEM_SEL 78 +#define CLK_TOP_AXI_SEL 79 +#define CLK_TOP_CAMTG_SEL 80 +#define CLK_TOP_MFG_SEL 81 +#define CLK_TOP_VDEC_SEL 82 +#define CLK_TOP_PWM_SEL 83 +#define CLK_TOP_MSDC30_0_SEL 84 +#define CLK_TOP_USB20_SEL 85 +#define CLK_TOP_SPI0_SEL 86 +#define CLK_TOP_UART_SEL 87 +#define CLK_TOP_AUDINTBUS_SEL 88 +#define CLK_TOP_AUDIO_SEL 89 +#define CLK_TOP_MSDC30_2_SEL 90 +#define CLK_TOP_MSDC30_1_SEL 91 +#define CLK_TOP_DPI1_SEL 92 +#define CLK_TOP_DPI0_SEL 93 +#define CLK_TOP_SCP_SEL 94 +#define CLK_TOP_PMICSPI_SEL 95 +#define CLK_TOP_APLL_SEL 96 +#define CLK_TOP_HDMI_SEL 97 +#define CLK_TOP_TVE_SEL 98 +#define CLK_TOP_EMMC_HCLK_SEL 99 +#define CLK_TOP_NFI2X_SEL 100 +#define CLK_TOP_RTC_SEL 101 +#define CLK_TOP_OSD_SEL 102 +#define CLK_TOP_NR_SEL 103 +#define CLK_TOP_DI_SEL 104 +#define CLK_TOP_FLASH_SEL 105 +#define CLK_TOP_ASM_M_SEL 106 +#define CLK_TOP_ASM_I_SEL 107 +#define CLK_TOP_INTDIR_SEL 108 +#define CLK_TOP_HDMIRX_BIST_SEL 109 +#define CLK_TOP_ETHIF_SEL 110 +#define CLK_TOP_MS_CARD_SEL 111 +#define CLK_TOP_ASM_H_SEL 112 +#define CLK_TOP_SPI1_SEL 113 +#define CLK_TOP_CMSYS_SEL 114 +#define CLK_TOP_MSDC30_3_SEL 115 +#define CLK_TOP_HDMIRX26_24_SEL 116 +#define CLK_TOP_AUD2DVD_SEL 117 +#define CLK_TOP_8BDAC_SEL 118 +#define CLK_TOP_SPI2_SEL 119 +#define CLK_TOP_AUD_MUX1_SEL 120 +#define CLK_TOP_AUD_MUX2_SEL 121 +#define CLK_TOP_AUDPLL_MUX_SEL 122 +#define CLK_TOP_AUD_K1_SRC_SEL 123 +#define CLK_TOP_AUD_K2_SRC_SEL 124 +#define CLK_TOP_AUD_K3_SRC_SEL 125 +#define CLK_TOP_AUD_K4_SRC_SEL 126 +#define CLK_TOP_AUD_K5_SRC_SEL 127 +#define CLK_TOP_AUD_K6_SRC_SEL 128 +#define CLK_TOP_PADMCLK_SEL 129 +#define CLK_TOP_AUD_EXTCK1_DIV 130 +#define CLK_TOP_AUD_EXTCK2_DIV 131 +#define CLK_TOP_AUD_MUX1_DIV 132 +#define CLK_TOP_AUD_MUX2_DIV 133 +#define CLK_TOP_AUD_K1_SRC_DIV 134 +#define CLK_TOP_AUD_K2_SRC_DIV 135 +#define CLK_TOP_AUD_K3_SRC_DIV 136 +#define CLK_TOP_AUD_K4_SRC_DIV 137 +#define CLK_TOP_AUD_K5_SRC_DIV 138 +#define CLK_TOP_AUD_K6_SRC_DIV 139 +#define CLK_TOP_AUD_I2S1_MCLK 140 +#define CLK_TOP_AUD_I2S2_MCLK 141 +#define CLK_TOP_AUD_I2S3_MCLK 142 +#define CLK_TOP_AUD_I2S4_MCLK 143 +#define CLK_TOP_AUD_I2S5_MCLK 144 +#define CLK_TOP_AUD_I2S6_MCLK 145 +#define CLK_TOP_AUD_48K_TIMING 146 +#define CLK_TOP_AUD_44K_TIMING 147 + +#define CLK_TOP_32K_INTERNAL 148 +#define CLK_TOP_32K_EXTERNAL 149 +#define CLK_TOP_CLK26M_D8 150 +#define CLK_TOP_8BDAC 151 +#define CLK_TOP_WBG_DIG_416M 152 +#define CLK_TOP_DPI 153 +#define CLK_TOP_DSI0_LNTC_DSI 154 +#define CLK_TOP_AUD_EXT1 155 +#define CLK_TOP_AUD_EXT2 156 +#define CLK_TOP_NFI1X_PAD 157 +#define CLK_TOP_AXISEL_D4 158 +#define CLK_TOP_NR 159 + +/* APMIXEDSYS */ + +#define CLK_APMIXED_ARMPLL 1 +#define CLK_APMIXED_MAINPLL 2 +#define CLK_APMIXED_UNIVPLL 3 +#define CLK_APMIXED_MMPLL 4 +#define CLK_APMIXED_MSDCPLL 5 +#define CLK_APMIXED_TVDPLL 6 +#define CLK_APMIXED_AUD1PLL 7 +#define CLK_APMIXED_TRGPLL 8 +#define CLK_APMIXED_ETHPLL 9 +#define CLK_APMIXED_VDECPLL 10 +#define CLK_APMIXED_HADDS2PLL 11 +#define CLK_APMIXED_AUD2PLL 12 +#define CLK_APMIXED_TVD2PLL 13 +#define CLK_APMIXED_HDMI_REF 14 +#define CLK_APMIXED_NR 15 + +/* DDRPHY */ + +#define CLK_DDRPHY_VENCPLL 1 +#define CLK_DDRPHY_NR 2 + +/* INFRACFG */ + +#define CLK_INFRA_DBG 1 +#define CLK_INFRA_SMI 2 +#define CLK_INFRA_QAXI_CM4 3 +#define CLK_INFRA_AUD_SPLIN_B 4 +#define CLK_INFRA_AUDIO 5 +#define CLK_INFRA_EFUSE 6 +#define CLK_INFRA_L2C_SRAM 7 +#define CLK_INFRA_M4U 8 +#define CLK_INFRA_CONNMCU 9 +#define CLK_INFRA_TRNG 10 +#define CLK_INFRA_RAMBUFIF 11 +#define CLK_INFRA_CPUM 12 +#define CLK_INFRA_KP 13 +#define CLK_INFRA_CEC 14 +#define CLK_INFRA_IRRX 15 +#define CLK_INFRA_PMICSPI 16 +#define CLK_INFRA_PMICWRAP 17 +#define CLK_INFRA_DDCCI 18 +#define CLK_INFRA_CLK_13M 19 +#define CLK_INFRA_CPUSEL 20 +#define CLK_INFRA_NR 21 + +/* PERICFG */ + +#define CLK_PERI_NFI 1 +#define CLK_PERI_THERM 2 +#define CLK_PERI_PWM1 3 +#define CLK_PERI_PWM2 4 +#define CLK_PERI_PWM3 5 +#define CLK_PERI_PWM4 6 +#define CLK_PERI_PWM5 7 +#define CLK_PERI_PWM6 8 +#define CLK_PERI_PWM7 9 +#define CLK_PERI_PWM 10 +#define CLK_PERI_USB0 11 +#define CLK_PERI_USB1 12 +#define CLK_PERI_AP_DMA 13 +#define CLK_PERI_MSDC30_0 14 +#define CLK_PERI_MSDC30_1 15 +#define CLK_PERI_MSDC30_2 16 +#define CLK_PERI_MSDC30_3 17 +#define CLK_PERI_MSDC50_3 18 +#define CLK_PERI_NLI 19 +#define CLK_PERI_UART0 20 +#define CLK_PERI_UART1 21 +#define CLK_PERI_UART2 22 +#define CLK_PERI_UART3 23 +#define CLK_PERI_BTIF 24 +#define CLK_PERI_I2C0 25 +#define CLK_PERI_I2C1 26 +#define CLK_PERI_I2C2 27 +#define CLK_PERI_I2C3 28 +#define CLK_PERI_AUXADC 29 +#define CLK_PERI_SPI0 30 +#define CLK_PERI_ETH 31 +#define CLK_PERI_USB0_MCU 32 + +#define CLK_PERI_USB1_MCU 33 +#define CLK_PERI_USB_SLV 34 +#define CLK_PERI_GCPU 35 +#define CLK_PERI_NFI_ECC 36 +#define CLK_PERI_NFI_PAD 37 +#define CLK_PERI_FLASH 38 +#define CLK_PERI_HOST89_INT 39 +#define CLK_PERI_HOST89_SPI 40 +#define CLK_PERI_HOST89_DVD 41 +#define CLK_PERI_SPI1 42 +#define CLK_PERI_SPI2 43 +#define CLK_PERI_FCI 44 + +#define CLK_PERI_UART0_SEL 45 +#define CLK_PERI_UART1_SEL 46 +#define CLK_PERI_UART2_SEL 47 +#define CLK_PERI_UART3_SEL 48 +#define CLK_PERI_NR 49 + +/* AUDIO */ + +#define CLK_AUD_AFE 1 +#define CLK_AUD_LRCK_DETECT 2 +#define CLK_AUD_I2S 3 +#define CLK_AUD_APLL_TUNER 4 +#define CLK_AUD_HDMI 5 +#define CLK_AUD_SPDF 6 +#define CLK_AUD_SPDF2 7 +#define CLK_AUD_APLL 8 +#define CLK_AUD_TML 9 +#define CLK_AUD_AHB_IDLE_EXT 10 +#define CLK_AUD_AHB_IDLE_INT 11 + +#define CLK_AUD_I2SIN1 12 +#define CLK_AUD_I2SIN2 13 +#define CLK_AUD_I2SIN3 14 +#define CLK_AUD_I2SIN4 15 +#define CLK_AUD_I2SIN5 16 +#define CLK_AUD_I2SIN6 17 +#define CLK_AUD_I2SO1 18 +#define CLK_AUD_I2SO2 19 +#define CLK_AUD_I2SO3 20 +#define CLK_AUD_I2SO4 21 +#define CLK_AUD_I2SO5 22 +#define CLK_AUD_I2SO6 23 +#define CLK_AUD_ASRCI1 24 +#define CLK_AUD_ASRCI2 25 +#define CLK_AUD_ASRCO1 26 +#define CLK_AUD_ASRCO2 27 +#define CLK_AUD_ASRC11 28 +#define CLK_AUD_ASRC12 29 +#define CLK_AUD_HDMIRX 30 +#define CLK_AUD_INTDIR 31 +#define CLK_AUD_A1SYS 32 +#define CLK_AUD_A2SYS 33 +#define CLK_AUD_AFE_CONN 34 +#define CLK_AUD_AFE_PCMIF 35 +#define CLK_AUD_AFE_MRGIF 36 + +#define CLK_AUD_MMIF_UL1 37 +#define CLK_AUD_MMIF_UL2 38 +#define CLK_AUD_MMIF_UL3 39 +#define CLK_AUD_MMIF_UL4 40 +#define CLK_AUD_MMIF_UL5 41 +#define CLK_AUD_MMIF_UL6 42 +#define CLK_AUD_MMIF_DL1 43 +#define CLK_AUD_MMIF_DL2 44 +#define CLK_AUD_MMIF_DL3 45 +#define CLK_AUD_MMIF_DL4 46 +#define CLK_AUD_MMIF_DL5 47 +#define CLK_AUD_MMIF_DL6 48 +#define CLK_AUD_MMIF_DLMCH 49 +#define CLK_AUD_MMIF_ARB1 50 +#define CLK_AUD_MMIF_AWB1 51 +#define CLK_AUD_MMIF_AWB2 52 +#define CLK_AUD_MMIF_DAI 53 + +#define CLK_AUD_DMIC1 54 +#define CLK_AUD_DMIC2 55 +#define CLK_AUD_ASRCI3 56 +#define CLK_AUD_ASRCI4 57 +#define CLK_AUD_ASRCI5 58 +#define CLK_AUD_ASRCI6 59 +#define CLK_AUD_ASRCO3 60 +#define CLK_AUD_ASRCO4 61 +#define CLK_AUD_ASRCO5 62 +#define CLK_AUD_ASRCO6 63 +#define CLK_AUD_MEM_ASRC1 64 +#define CLK_AUD_MEM_ASRC2 65 +#define CLK_AUD_MEM_ASRC3 66 +#define CLK_AUD_MEM_ASRC4 67 +#define CLK_AUD_MEM_ASRC5 68 +#define CLK_AUD_DSD_ENC 69 +#define CLK_AUD_ASRC_BRG 70 +#define CLK_AUD_NR 71 + +/* MMSYS */ + +#define CLK_MM_SMI_COMMON 1 +#define CLK_MM_SMI_LARB0 2 +#define CLK_MM_CMDQ 3 +#define CLK_MM_MUTEX 4 +#define CLK_MM_DISP_COLOR 5 +#define CLK_MM_DISP_BLS 6 +#define CLK_MM_DISP_WDMA 7 +#define CLK_MM_DISP_RDMA 8 +#define CLK_MM_DISP_OVL 9 +#define CLK_MM_MDP_TDSHP 10 +#define CLK_MM_MDP_WROT 11 +#define CLK_MM_MDP_WDMA 12 +#define CLK_MM_MDP_RSZ1 13 +#define CLK_MM_MDP_RSZ0 14 +#define CLK_MM_MDP_RDMA 15 +#define CLK_MM_MDP_BLS_26M 16 +#define CLK_MM_CAM_MDP 17 +#define CLK_MM_FAKE_ENG 18 +#define CLK_MM_MUTEX_32K 19 +#define CLK_MM_DISP_RDMA1 20 +#define CLK_MM_DISP_UFOE 21 + +#define CLK_MM_DSI_ENGINE 22 +#define CLK_MM_DSI_DIG 23 +#define CLK_MM_DPI_DIGL 24 +#define CLK_MM_DPI_ENGINE 25 +#define CLK_MM_DPI1_DIGL 26 +#define CLK_MM_DPI1_ENGINE 27 +#define CLK_MM_TVE_OUTPUT 28 +#define CLK_MM_TVE_INPUT 29 +#define CLK_MM_HDMI_PIXEL 30 +#define CLK_MM_HDMI_PLL 31 +#define CLK_MM_HDMI_AUDIO 32 +#define CLK_MM_HDMI_SPDIF 33 +#define CLK_MM_TVE_FMM 34 +#define CLK_MM_NR 35 + +/* IMGSYS */ + +#define CLK_IMG_SMI_COMM 1 +#define CLK_IMG_RESZ 2 +#define CLK_IMG_JPGDEC_SMI 3 +#define CLK_IMG_JPGDEC 4 +#define CLK_IMG_VENC_LT 5 +#define CLK_IMG_VENC 6 +#define CLK_IMG_NR 7 + +/* VDEC */ + +#define CLK_VDEC_CKGEN 1 +#define CLK_VDEC_LARB 2 +#define CLK_VDEC_NR 3 + +/* HIFSYS */ + +#define CLK_HIFSYS_USB0PHY 1 +#define CLK_HIFSYS_USB1PHY 2 +#define CLK_HIFSYS_PCIE0 3 +#define CLK_HIFSYS_PCIE1 4 +#define CLK_HIFSYS_PCIE2 5 +#define CLK_HIFSYS_NR 6 + +/* ETHSYS */ +#define CLK_ETHSYS_HSDMA 1 +#define CLK_ETHSYS_ESW 2 +#define CLK_ETHSYS_GP2 3 +#define CLK_ETHSYS_GP1 4 +#define CLK_ETHSYS_PCM 5 +#define CLK_ETHSYS_GDMA 6 +#define CLK_ETHSYS_I2S 7 +#define CLK_ETHSYS_CRYPTO 8 +#define CLK_ETHSYS_NR 9 + +/* G3DSYS */ +#define CLK_G3DSYS_CORE 1 +#define CLK_G3DSYS_NR 2 + +/* BDP */ + +#define CLK_BDP_BRG_BA 1 +#define CLK_BDP_BRG_DRAM 2 +#define CLK_BDP_LARB_DRAM 3 +#define CLK_BDP_WR_VDI_PXL 4 +#define CLK_BDP_WR_VDI_DRAM 5 +#define CLK_BDP_WR_B 6 +#define CLK_BDP_DGI_IN 7 +#define CLK_BDP_DGI_OUT 8 +#define CLK_BDP_FMT_MAST_27 9 +#define CLK_BDP_FMT_B 10 +#define CLK_BDP_OSD_B 11 +#define CLK_BDP_OSD_DRAM 12 +#define CLK_BDP_OSD_AGENT 13 +#define CLK_BDP_OSD_PXL 14 +#define CLK_BDP_RLE_B 15 +#define CLK_BDP_RLE_AGENT 16 +#define CLK_BDP_RLE_DRAM 17 +#define CLK_BDP_F27M 18 +#define CLK_BDP_F27M_VDOUT 19 +#define CLK_BDP_F27_74_74 20 +#define CLK_BDP_F2FS 21 +#define CLK_BDP_F2FS74_148 22 +#define CLK_BDP_FB 23 +#define CLK_BDP_VDO_DRAM 24 +#define CLK_BDP_VDO_2FS 25 +#define CLK_BDP_VDO_B 26 +#define CLK_BDP_WR_DI_PXL 27 +#define CLK_BDP_WR_DI_DRAM 28 +#define CLK_BDP_WR_DI_B 29 +#define CLK_BDP_NR_PXL 30 +#define CLK_BDP_NR_DRAM 31 +#define CLK_BDP_NR_B 32 + +#define CLK_BDP_RX_F 33 +#define CLK_BDP_RX_X 34 +#define CLK_BDP_RXPDT 35 +#define CLK_BDP_RX_CSCL_N 36 +#define CLK_BDP_RX_CSCL 37 +#define CLK_BDP_RX_DDCSCL_N 38 +#define CLK_BDP_RX_DDCSCL 39 +#define CLK_BDP_RX_VCO 40 +#define CLK_BDP_RX_DP 41 +#define CLK_BDP_RX_P 42 +#define CLK_BDP_RX_M 43 +#define CLK_BDP_RX_PLL 44 +#define CLK_BDP_BRG_RT_B 45 +#define CLK_BDP_BRG_RT_DRAM 46 +#define CLK_BDP_LARBRT_DRAM 47 +#define CLK_BDP_TMDS_SYN 48 +#define CLK_BDP_HDMI_MON 49 +#define CLK_BDP_NR 50 + +#endif /* _DT_BINDINGS_CLK_MT2701_H */ diff --git a/include/dt-bindings/clock/mt2712-clk.h b/include/dt-bindings/clock/mt2712-clk.h new file mode 100644 index 000000000..76265836a --- /dev/null +++ b/include/dt-bindings/clock/mt2712-clk.h @@ -0,0 +1,435 @@ +/* + * Copyright (c) 2017 MediaTek Inc. + * Author: Weiyi Lu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_MT2712_H +#define _DT_BINDINGS_CLK_MT2712_H + +/* APMIXEDSYS */ + +#define CLK_APMIXED_MAINPLL 0 +#define CLK_APMIXED_UNIVPLL 1 +#define CLK_APMIXED_VCODECPLL 2 +#define CLK_APMIXED_VENCPLL 3 +#define CLK_APMIXED_APLL1 4 +#define CLK_APMIXED_APLL2 5 +#define CLK_APMIXED_LVDSPLL 6 +#define CLK_APMIXED_LVDSPLL2 7 +#define CLK_APMIXED_MSDCPLL 8 +#define CLK_APMIXED_MSDCPLL2 9 +#define CLK_APMIXED_TVDPLL 10 +#define CLK_APMIXED_MMPLL 11 +#define CLK_APMIXED_ARMCA35PLL 12 +#define CLK_APMIXED_ARMCA72PLL 13 +#define CLK_APMIXED_ETHERPLL 14 +#define CLK_APMIXED_NR_CLK 15 + +/* TOPCKGEN */ + +#define CLK_TOP_ARMCA35PLL 0 +#define CLK_TOP_ARMCA35PLL_600M 1 +#define CLK_TOP_ARMCA35PLL_400M 2 +#define CLK_TOP_ARMCA72PLL 3 +#define CLK_TOP_SYSPLL 4 +#define CLK_TOP_SYSPLL_D2 5 +#define CLK_TOP_SYSPLL1_D2 6 +#define CLK_TOP_SYSPLL1_D4 7 +#define CLK_TOP_SYSPLL1_D8 8 +#define CLK_TOP_SYSPLL1_D16 9 +#define CLK_TOP_SYSPLL_D3 10 +#define CLK_TOP_SYSPLL2_D2 11 +#define CLK_TOP_SYSPLL2_D4 12 +#define CLK_TOP_SYSPLL_D5 13 +#define CLK_TOP_SYSPLL3_D2 14 +#define CLK_TOP_SYSPLL3_D4 15 +#define CLK_TOP_SYSPLL_D7 16 +#define CLK_TOP_SYSPLL4_D2 17 +#define CLK_TOP_SYSPLL4_D4 18 +#define CLK_TOP_UNIVPLL 19 +#define CLK_TOP_UNIVPLL_D7 20 +#define CLK_TOP_UNIVPLL_D26 21 +#define CLK_TOP_UNIVPLL_D52 22 +#define CLK_TOP_UNIVPLL_D104 23 +#define CLK_TOP_UNIVPLL_D208 24 +#define CLK_TOP_UNIVPLL_D2 25 +#define CLK_TOP_UNIVPLL1_D2 26 +#define CLK_TOP_UNIVPLL1_D4 27 +#define CLK_TOP_UNIVPLL1_D8 28 +#define CLK_TOP_UNIVPLL_D3 29 +#define CLK_TOP_UNIVPLL2_D2 30 +#define CLK_TOP_UNIVPLL2_D4 31 +#define CLK_TOP_UNIVPLL2_D8 32 +#define CLK_TOP_UNIVPLL_D5 33 +#define CLK_TOP_UNIVPLL3_D2 34 +#define CLK_TOP_UNIVPLL3_D4 35 +#define CLK_TOP_UNIVPLL3_D8 36 +#define CLK_TOP_F_MP0_PLL1 37 +#define CLK_TOP_F_MP0_PLL2 38 +#define CLK_TOP_F_BIG_PLL1 39 +#define CLK_TOP_F_BIG_PLL2 40 +#define CLK_TOP_F_BUS_PLL1 41 +#define CLK_TOP_F_BUS_PLL2 42 +#define CLK_TOP_APLL1 43 +#define CLK_TOP_APLL1_D2 44 +#define CLK_TOP_APLL1_D4 45 +#define CLK_TOP_APLL1_D8 46 +#define CLK_TOP_APLL1_D16 47 +#define CLK_TOP_APLL2 48 +#define CLK_TOP_APLL2_D2 49 +#define CLK_TOP_APLL2_D4 50 +#define CLK_TOP_APLL2_D8 51 +#define CLK_TOP_APLL2_D16 52 +#define CLK_TOP_LVDSPLL 53 +#define CLK_TOP_LVDSPLL_D2 54 +#define CLK_TOP_LVDSPLL_D4 55 +#define CLK_TOP_LVDSPLL_D8 56 +#define CLK_TOP_LVDSPLL2 57 +#define CLK_TOP_LVDSPLL2_D2 58 +#define CLK_TOP_LVDSPLL2_D4 59 +#define CLK_TOP_LVDSPLL2_D8 60 +#define CLK_TOP_ETHERPLL_125M 61 +#define CLK_TOP_ETHERPLL_50M 62 +#define CLK_TOP_CVBS 63 +#define CLK_TOP_CVBS_D2 64 +#define CLK_TOP_SYS_26M 65 +#define CLK_TOP_MMPLL 66 +#define CLK_TOP_MMPLL_D2 67 +#define CLK_TOP_VENCPLL 68 +#define CLK_TOP_VENCPLL_D2 69 +#define CLK_TOP_VCODECPLL 70 +#define CLK_TOP_VCODECPLL_D2 71 +#define CLK_TOP_TVDPLL 72 +#define CLK_TOP_TVDPLL_D2 73 +#define CLK_TOP_TVDPLL_D4 74 +#define CLK_TOP_TVDPLL_D8 75 +#define CLK_TOP_TVDPLL_429M 76 +#define CLK_TOP_TVDPLL_429M_D2 77 +#define CLK_TOP_TVDPLL_429M_D4 78 +#define CLK_TOP_MSDCPLL 79 +#define CLK_TOP_MSDCPLL_D2 80 +#define CLK_TOP_MSDCPLL_D4 81 +#define CLK_TOP_MSDCPLL2 82 +#define CLK_TOP_MSDCPLL2_D2 83 +#define CLK_TOP_MSDCPLL2_D4 84 +#define CLK_TOP_CLK26M_D2 85 +#define CLK_TOP_D2A_ULCLK_6P5M 86 +#define CLK_TOP_VPLL3_DPIX 87 +#define CLK_TOP_VPLL_DPIX 88 +#define CLK_TOP_LTEPLL_FS26M 89 +#define CLK_TOP_DMPLL 90 +#define CLK_TOP_DSI0_LNTC 91 +#define CLK_TOP_DSI1_LNTC 92 +#define CLK_TOP_LVDSTX3_CLKDIG_CTS 93 +#define CLK_TOP_LVDSTX_CLKDIG_CTS 94 +#define CLK_TOP_CLKRTC_EXT 95 +#define CLK_TOP_CLKRTC_INT 96 +#define CLK_TOP_CSI0 97 +#define CLK_TOP_CVBSPLL 98 +#define CLK_TOP_AXI_SEL 99 +#define CLK_TOP_MEM_SEL 100 +#define CLK_TOP_MM_SEL 101 +#define CLK_TOP_PWM_SEL 102 +#define CLK_TOP_VDEC_SEL 103 +#define CLK_TOP_VENC_SEL 104 +#define CLK_TOP_MFG_SEL 105 +#define CLK_TOP_CAMTG_SEL 106 +#define CLK_TOP_UART_SEL 107 +#define CLK_TOP_SPI_SEL 108 +#define CLK_TOP_USB20_SEL 109 +#define CLK_TOP_USB30_SEL 110 +#define CLK_TOP_MSDC50_0_HCLK_SEL 111 +#define CLK_TOP_MSDC50_0_SEL 112 +#define CLK_TOP_MSDC30_1_SEL 113 +#define CLK_TOP_MSDC30_2_SEL 114 +#define CLK_TOP_MSDC30_3_SEL 115 +#define CLK_TOP_AUDIO_SEL 116 +#define CLK_TOP_AUD_INTBUS_SEL 117 +#define CLK_TOP_PMICSPI_SEL 118 +#define CLK_TOP_DPILVDS1_SEL 119 +#define CLK_TOP_ATB_SEL 120 +#define CLK_TOP_NR_SEL 121 +#define CLK_TOP_NFI2X_SEL 122 +#define CLK_TOP_IRDA_SEL 123 +#define CLK_TOP_CCI400_SEL 124 +#define CLK_TOP_AUD_1_SEL 125 +#define CLK_TOP_AUD_2_SEL 126 +#define CLK_TOP_MEM_MFG_IN_AS_SEL 127 +#define CLK_TOP_AXI_MFG_IN_AS_SEL 128 +#define CLK_TOP_SCAM_SEL 129 +#define CLK_TOP_NFIECC_SEL 130 +#define CLK_TOP_PE2_MAC_P0_SEL 131 +#define CLK_TOP_PE2_MAC_P1_SEL 132 +#define CLK_TOP_DPILVDS_SEL 133 +#define CLK_TOP_MSDC50_3_HCLK_SEL 134 +#define CLK_TOP_HDCP_SEL 135 +#define CLK_TOP_HDCP_24M_SEL 136 +#define CLK_TOP_RTC_SEL 137 +#define CLK_TOP_SPINOR_SEL 138 +#define CLK_TOP_APLL_SEL 139 +#define CLK_TOP_APLL2_SEL 140 +#define CLK_TOP_A1SYS_HP_SEL 141 +#define CLK_TOP_A2SYS_HP_SEL 142 +#define CLK_TOP_ASM_L_SEL 143 +#define CLK_TOP_ASM_M_SEL 144 +#define CLK_TOP_ASM_H_SEL 145 +#define CLK_TOP_I2SO1_SEL 146 +#define CLK_TOP_I2SO2_SEL 147 +#define CLK_TOP_I2SO3_SEL 148 +#define CLK_TOP_TDMO0_SEL 149 +#define CLK_TOP_TDMO1_SEL 150 +#define CLK_TOP_I2SI1_SEL 151 +#define CLK_TOP_I2SI2_SEL 152 +#define CLK_TOP_I2SI3_SEL 153 +#define CLK_TOP_ETHER_125M_SEL 154 +#define CLK_TOP_ETHER_50M_SEL 155 +#define CLK_TOP_JPGDEC_SEL 156 +#define CLK_TOP_SPISLV_SEL 157 +#define CLK_TOP_ETHER_50M_RMII_SEL 158 +#define CLK_TOP_CAM2TG_SEL 159 +#define CLK_TOP_DI_SEL 160 +#define CLK_TOP_TVD_SEL 161 +#define CLK_TOP_I2C_SEL 162 +#define CLK_TOP_PWM_INFRA_SEL 163 +#define CLK_TOP_MSDC0P_AES_SEL 164 +#define CLK_TOP_CMSYS_SEL 165 +#define CLK_TOP_GCPU_SEL 166 +#define CLK_TOP_AUD_APLL1_SEL 167 +#define CLK_TOP_AUD_APLL2_SEL 168 +#define CLK_TOP_DA_AUDULL_VTX_6P5M_SEL 169 +#define CLK_TOP_APLL_DIV0 170 +#define CLK_TOP_APLL_DIV1 171 +#define CLK_TOP_APLL_DIV2 172 +#define CLK_TOP_APLL_DIV3 173 +#define CLK_TOP_APLL_DIV4 174 +#define CLK_TOP_APLL_DIV5 175 +#define CLK_TOP_APLL_DIV6 176 +#define CLK_TOP_APLL_DIV7 177 +#define CLK_TOP_APLL_DIV_PDN0 178 +#define CLK_TOP_APLL_DIV_PDN1 179 +#define CLK_TOP_APLL_DIV_PDN2 180 +#define CLK_TOP_APLL_DIV_PDN3 181 +#define CLK_TOP_APLL_DIV_PDN4 182 +#define CLK_TOP_APLL_DIV_PDN5 183 +#define CLK_TOP_APLL_DIV_PDN6 184 +#define CLK_TOP_APLL_DIV_PDN7 185 +#define CLK_TOP_APLL1_D3 186 +#define CLK_TOP_APLL1_REF_SEL 187 +#define CLK_TOP_APLL2_REF_SEL 188 +#define CLK_TOP_NFI2X_EN 189 +#define CLK_TOP_NFIECC_EN 190 +#define CLK_TOP_NFI1X_CK_EN 191 +#define CLK_TOP_NR_CLK 192 + +/* INFRACFG */ + +#define CLK_INFRA_DBGCLK 0 +#define CLK_INFRA_GCE 1 +#define CLK_INFRA_M4U 2 +#define CLK_INFRA_KP 3 +#define CLK_INFRA_AO_SPI0 4 +#define CLK_INFRA_AO_SPI1 5 +#define CLK_INFRA_AO_UART5 6 +#define CLK_INFRA_NR_CLK 7 + +/* PERICFG */ + +#define CLK_PERI_NFI 0 +#define CLK_PERI_THERM 1 +#define CLK_PERI_PWM0 2 +#define CLK_PERI_PWM1 3 +#define CLK_PERI_PWM2 4 +#define CLK_PERI_PWM3 5 +#define CLK_PERI_PWM4 6 +#define CLK_PERI_PWM5 7 +#define CLK_PERI_PWM6 8 +#define CLK_PERI_PWM7 9 +#define CLK_PERI_PWM 10 +#define CLK_PERI_AP_DMA 11 +#define CLK_PERI_MSDC30_0 12 +#define CLK_PERI_MSDC30_1 13 +#define CLK_PERI_MSDC30_2 14 +#define CLK_PERI_MSDC30_3 15 +#define CLK_PERI_UART0 16 +#define CLK_PERI_UART1 17 +#define CLK_PERI_UART2 18 +#define CLK_PERI_UART3 19 +#define CLK_PERI_I2C0 20 +#define CLK_PERI_I2C1 21 +#define CLK_PERI_I2C2 22 +#define CLK_PERI_I2C3 23 +#define CLK_PERI_I2C4 24 +#define CLK_PERI_AUXADC 25 +#define CLK_PERI_SPI0 26 +#define CLK_PERI_SPI 27 +#define CLK_PERI_I2C5 28 +#define CLK_PERI_SPI2 29 +#define CLK_PERI_SPI3 30 +#define CLK_PERI_SPI5 31 +#define CLK_PERI_UART4 32 +#define CLK_PERI_SFLASH 33 +#define CLK_PERI_GMAC 34 +#define CLK_PERI_PCIE0 35 +#define CLK_PERI_PCIE1 36 +#define CLK_PERI_GMAC_PCLK 37 +#define CLK_PERI_MSDC50_0_EN 38 +#define CLK_PERI_MSDC30_1_EN 39 +#define CLK_PERI_MSDC30_2_EN 40 +#define CLK_PERI_MSDC30_3_EN 41 +#define CLK_PERI_MSDC50_0_HCLK_EN 42 +#define CLK_PERI_MSDC50_3_HCLK_EN 43 +#define CLK_PERI_MSDC30_0_QTR_EN 44 +#define CLK_PERI_MSDC30_3_QTR_EN 45 +#define CLK_PERI_NR_CLK 46 + +/* MCUCFG */ + +#define CLK_MCU_MP0_SEL 0 +#define CLK_MCU_MP2_SEL 1 +#define CLK_MCU_BUS_SEL 2 +#define CLK_MCU_NR_CLK 3 + +/* MFGCFG */ + +#define CLK_MFG_BG3D 0 +#define CLK_MFG_NR_CLK 1 + +/* MMSYS */ + +#define CLK_MM_SMI_COMMON 0 +#define CLK_MM_SMI_LARB0 1 +#define CLK_MM_CAM_MDP 2 +#define CLK_MM_MDP_RDMA0 3 +#define CLK_MM_MDP_RDMA1 4 +#define CLK_MM_MDP_RSZ0 5 +#define CLK_MM_MDP_RSZ1 6 +#define CLK_MM_MDP_RSZ2 7 +#define CLK_MM_MDP_TDSHP0 8 +#define CLK_MM_MDP_TDSHP1 9 +#define CLK_MM_MDP_CROP 10 +#define CLK_MM_MDP_WDMA 11 +#define CLK_MM_MDP_WROT0 12 +#define CLK_MM_MDP_WROT1 13 +#define CLK_MM_FAKE_ENG 14 +#define CLK_MM_MUTEX_32K 15 +#define CLK_MM_DISP_OVL0 16 +#define CLK_MM_DISP_OVL1 17 +#define CLK_MM_DISP_RDMA0 18 +#define CLK_MM_DISP_RDMA1 19 +#define CLK_MM_DISP_RDMA2 20 +#define CLK_MM_DISP_WDMA0 21 +#define CLK_MM_DISP_WDMA1 22 +#define CLK_MM_DISP_COLOR0 23 +#define CLK_MM_DISP_COLOR1 24 +#define CLK_MM_DISP_AAL 25 +#define CLK_MM_DISP_GAMMA 26 +#define CLK_MM_DISP_UFOE 27 +#define CLK_MM_DISP_SPLIT0 28 +#define CLK_MM_DISP_OD 29 +#define CLK_MM_DISP_PWM0_MM 30 +#define CLK_MM_DISP_PWM0_26M 31 +#define CLK_MM_DISP_PWM1_MM 32 +#define CLK_MM_DISP_PWM1_26M 33 +#define CLK_MM_DSI0_ENGINE 34 +#define CLK_MM_DSI0_DIGITAL 35 +#define CLK_MM_DSI1_ENGINE 36 +#define CLK_MM_DSI1_DIGITAL 37 +#define CLK_MM_DPI_PIXEL 38 +#define CLK_MM_DPI_ENGINE 39 +#define CLK_MM_DPI1_PIXEL 40 +#define CLK_MM_DPI1_ENGINE 41 +#define CLK_MM_LVDS_PIXEL 42 +#define CLK_MM_LVDS_CTS 43 +#define CLK_MM_SMI_LARB4 44 +#define CLK_MM_SMI_COMMON1 45 +#define CLK_MM_SMI_LARB5 46 +#define CLK_MM_MDP_RDMA2 47 +#define CLK_MM_MDP_TDSHP2 48 +#define CLK_MM_DISP_OVL2 49 +#define CLK_MM_DISP_WDMA2 50 +#define CLK_MM_DISP_COLOR2 51 +#define CLK_MM_DISP_AAL1 52 +#define CLK_MM_DISP_OD1 53 +#define CLK_MM_LVDS1_PIXEL 54 +#define CLK_MM_LVDS1_CTS 55 +#define CLK_MM_SMI_LARB7 56 +#define CLK_MM_MDP_RDMA3 57 +#define CLK_MM_MDP_WROT2 58 +#define CLK_MM_DSI2 59 +#define CLK_MM_DSI2_DIGITAL 60 +#define CLK_MM_DSI3 61 +#define CLK_MM_DSI3_DIGITAL 62 +#define CLK_MM_NR_CLK 63 + +/* IMGSYS */ + +#define CLK_IMG_SMI_LARB2 0 +#define CLK_IMG_SENINF_SCAM_EN 1 +#define CLK_IMG_SENINF_CAM_EN 2 +#define CLK_IMG_CAM_SV_EN 3 +#define CLK_IMG_CAM_SV1_EN 4 +#define CLK_IMG_CAM_SV2_EN 5 +#define CLK_IMG_NR_CLK 6 + +/* BDPSYS */ + +#define CLK_BDP_BRIDGE_B 0 +#define CLK_BDP_BRIDGE_DRAM 1 +#define CLK_BDP_LARB_DRAM 2 +#define CLK_BDP_WR_CHANNEL_VDI_PXL 3 +#define CLK_BDP_WR_CHANNEL_VDI_DRAM 4 +#define CLK_BDP_WR_CHANNEL_VDI_B 5 +#define CLK_BDP_MT_B 6 +#define CLK_BDP_DISPFMT_27M 7 +#define CLK_BDP_DISPFMT_27M_VDOUT 8 +#define CLK_BDP_DISPFMT_27_74_74 9 +#define CLK_BDP_DISPFMT_2FS 10 +#define CLK_BDP_DISPFMT_2FS_2FS74_148 11 +#define CLK_BDP_DISPFMT_B 12 +#define CLK_BDP_VDO_DRAM 13 +#define CLK_BDP_VDO_2FS 14 +#define CLK_BDP_VDO_B 15 +#define CLK_BDP_WR_CHANNEL_DI_PXL 16 +#define CLK_BDP_WR_CHANNEL_DI_DRAM 17 +#define CLK_BDP_WR_CHANNEL_DI_B 18 +#define CLK_BDP_NR_AGENT 19 +#define CLK_BDP_NR_DRAM 20 +#define CLK_BDP_NR_B 21 +#define CLK_BDP_BRIDGE_RT_B 22 +#define CLK_BDP_BRIDGE_RT_DRAM 23 +#define CLK_BDP_LARB_RT_DRAM 24 +#define CLK_BDP_TVD_TDC 25 +#define CLK_BDP_TVD_54 26 +#define CLK_BDP_TVD_CBUS 27 +#define CLK_BDP_NR_CLK 28 + +/* VDECSYS */ + +#define CLK_VDEC_CKEN 0 +#define CLK_VDEC_LARB1_CKEN 1 +#define CLK_VDEC_IMGRZ_CKEN 2 +#define CLK_VDEC_NR_CLK 3 + +/* VENCSYS */ + +#define CLK_VENC_SMI_COMMON_CON 0 +#define CLK_VENC_VENC 1 +#define CLK_VENC_SMI_LARB6 2 +#define CLK_VENC_NR_CLK 3 + +/* JPGDECSYS */ + +#define CLK_JPGDEC_JPGDEC1 0 +#define CLK_JPGDEC_JPGDEC 1 +#define CLK_JPGDEC_NR_CLK 2 + +#endif /* _DT_BINDINGS_CLK_MT2712_H */ diff --git a/include/dt-bindings/clock/mt6797-clk.h b/include/dt-bindings/clock/mt6797-clk.h new file mode 100644 index 000000000..2f25a5aca --- /dev/null +++ b/include/dt-bindings/clock/mt6797-clk.h @@ -0,0 +1,281 @@ +/* + * Copyright (c) 2017 MediaTek Inc. + * Author: Kevin Chen + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_MT6797_H +#define _DT_BINDINGS_CLK_MT6797_H + +/* TOPCKGEN */ +#define CLK_TOP_MUX_ULPOSC_AXI_CK_MUX_PRE 1 +#define CLK_TOP_MUX_ULPOSC_AXI_CK_MUX 2 +#define CLK_TOP_MUX_AXI 3 +#define CLK_TOP_MUX_MEM 4 +#define CLK_TOP_MUX_DDRPHYCFG 5 +#define CLK_TOP_MUX_MM 6 +#define CLK_TOP_MUX_PWM 7 +#define CLK_TOP_MUX_VDEC 8 +#define CLK_TOP_MUX_VENC 9 +#define CLK_TOP_MUX_MFG 10 +#define CLK_TOP_MUX_CAMTG 11 +#define CLK_TOP_MUX_UART 12 +#define CLK_TOP_MUX_SPI 13 +#define CLK_TOP_MUX_ULPOSC_SPI_CK_MUX 14 +#define CLK_TOP_MUX_USB20 15 +#define CLK_TOP_MUX_MSDC50_0_HCLK 16 +#define CLK_TOP_MUX_MSDC50_0 17 +#define CLK_TOP_MUX_MSDC30_1 18 +#define CLK_TOP_MUX_MSDC30_2 19 +#define CLK_TOP_MUX_AUDIO 20 +#define CLK_TOP_MUX_AUD_INTBUS 21 +#define CLK_TOP_MUX_PMICSPI 22 +#define CLK_TOP_MUX_SCP 23 +#define CLK_TOP_MUX_ATB 24 +#define CLK_TOP_MUX_MJC 25 +#define CLK_TOP_MUX_DPI0 26 +#define CLK_TOP_MUX_AUD_1 27 +#define CLK_TOP_MUX_AUD_2 28 +#define CLK_TOP_MUX_SSUSB_TOP_SYS 29 +#define CLK_TOP_MUX_SPM 30 +#define CLK_TOP_MUX_BSI_SPI 31 +#define CLK_TOP_MUX_AUDIO_H 32 +#define CLK_TOP_MUX_ANC_MD32 33 +#define CLK_TOP_MUX_MFG_52M 34 +#define CLK_TOP_SYSPLL_CK 35 +#define CLK_TOP_SYSPLL_D2 36 +#define CLK_TOP_SYSPLL1_D2 37 +#define CLK_TOP_SYSPLL1_D4 38 +#define CLK_TOP_SYSPLL1_D8 39 +#define CLK_TOP_SYSPLL1_D16 40 +#define CLK_TOP_SYSPLL_D3 41 +#define CLK_TOP_SYSPLL_D3_D3 42 +#define CLK_TOP_SYSPLL2_D2 43 +#define CLK_TOP_SYSPLL2_D4 44 +#define CLK_TOP_SYSPLL2_D8 45 +#define CLK_TOP_SYSPLL_D5 46 +#define CLK_TOP_SYSPLL3_D2 47 +#define CLK_TOP_SYSPLL3_D4 48 +#define CLK_TOP_SYSPLL_D7 49 +#define CLK_TOP_SYSPLL4_D2 50 +#define CLK_TOP_SYSPLL4_D4 51 +#define CLK_TOP_UNIVPLL_CK 52 +#define CLK_TOP_UNIVPLL_D7 53 +#define CLK_TOP_UNIVPLL_D26 54 +#define CLK_TOP_SSUSB_PHY_48M_CK 55 +#define CLK_TOP_USB_PHY48M_CK 56 +#define CLK_TOP_UNIVPLL_D2 57 +#define CLK_TOP_UNIVPLL1_D2 58 +#define CLK_TOP_UNIVPLL1_D4 59 +#define CLK_TOP_UNIVPLL1_D8 60 +#define CLK_TOP_UNIVPLL_D3 61 +#define CLK_TOP_UNIVPLL2_D2 62 +#define CLK_TOP_UNIVPLL2_D4 63 +#define CLK_TOP_UNIVPLL2_D8 64 +#define CLK_TOP_UNIVPLL_D5 65 +#define CLK_TOP_UNIVPLL3_D2 66 +#define CLK_TOP_UNIVPLL3_D4 67 +#define CLK_TOP_UNIVPLL3_D8 68 +#define CLK_TOP_ULPOSC_CK_ORG 69 +#define CLK_TOP_ULPOSC_CK 70 +#define CLK_TOP_ULPOSC_D2 71 +#define CLK_TOP_ULPOSC_D3 72 +#define CLK_TOP_ULPOSC_D4 73 +#define CLK_TOP_ULPOSC_D8 74 +#define CLK_TOP_ULPOSC_D10 75 +#define CLK_TOP_APLL1_CK 76 +#define CLK_TOP_APLL2_CK 77 +#define CLK_TOP_MFGPLL_CK 78 +#define CLK_TOP_MFGPLL_D2 79 +#define CLK_TOP_IMGPLL_CK 80 +#define CLK_TOP_IMGPLL_D2 81 +#define CLK_TOP_IMGPLL_D4 82 +#define CLK_TOP_CODECPLL_CK 83 +#define CLK_TOP_CODECPLL_D2 84 +#define CLK_TOP_VDECPLL_CK 85 +#define CLK_TOP_TVDPLL_CK 86 +#define CLK_TOP_TVDPLL_D2 87 +#define CLK_TOP_TVDPLL_D4 88 +#define CLK_TOP_TVDPLL_D8 89 +#define CLK_TOP_TVDPLL_D16 90 +#define CLK_TOP_MSDCPLL_CK 91 +#define CLK_TOP_MSDCPLL_D2 92 +#define CLK_TOP_MSDCPLL_D4 93 +#define CLK_TOP_MSDCPLL_D8 94 +#define CLK_TOP_NR 95 + +/* APMIXED_SYS */ +#define CLK_APMIXED_MAINPLL 1 +#define CLK_APMIXED_UNIVPLL 2 +#define CLK_APMIXED_MFGPLL 3 +#define CLK_APMIXED_MSDCPLL 4 +#define CLK_APMIXED_IMGPLL 5 +#define CLK_APMIXED_TVDPLL 6 +#define CLK_APMIXED_CODECPLL 7 +#define CLK_APMIXED_VDECPLL 8 +#define CLK_APMIXED_APLL1 9 +#define CLK_APMIXED_APLL2 10 +#define CLK_APMIXED_NR 11 + +/* INFRA_SYS */ +#define CLK_INFRA_PMIC_TMR 1 +#define CLK_INFRA_PMIC_AP 2 +#define CLK_INFRA_PMIC_MD 3 +#define CLK_INFRA_PMIC_CONN 4 +#define CLK_INFRA_SCP 5 +#define CLK_INFRA_SEJ 6 +#define CLK_INFRA_APXGPT 7 +#define CLK_INFRA_SEJ_13M 8 +#define CLK_INFRA_ICUSB 9 +#define CLK_INFRA_GCE 10 +#define CLK_INFRA_THERM 11 +#define CLK_INFRA_I2C0 12 +#define CLK_INFRA_I2C1 13 +#define CLK_INFRA_I2C2 14 +#define CLK_INFRA_I2C3 15 +#define CLK_INFRA_PWM_HCLK 16 +#define CLK_INFRA_PWM1 17 +#define CLK_INFRA_PWM2 18 +#define CLK_INFRA_PWM3 19 +#define CLK_INFRA_PWM4 20 +#define CLK_INFRA_PWM 21 +#define CLK_INFRA_UART0 22 +#define CLK_INFRA_UART1 23 +#define CLK_INFRA_UART2 24 +#define CLK_INFRA_UART3 25 +#define CLK_INFRA_MD2MD_CCIF_0 26 +#define CLK_INFRA_MD2MD_CCIF_1 27 +#define CLK_INFRA_MD2MD_CCIF_2 28 +#define CLK_INFRA_FHCTL 29 +#define CLK_INFRA_BTIF 30 +#define CLK_INFRA_MD2MD_CCIF_3 31 +#define CLK_INFRA_SPI 32 +#define CLK_INFRA_MSDC0 33 +#define CLK_INFRA_MD2MD_CCIF_4 34 +#define CLK_INFRA_MSDC1 35 +#define CLK_INFRA_MSDC2 36 +#define CLK_INFRA_MD2MD_CCIF_5 37 +#define CLK_INFRA_GCPU 38 +#define CLK_INFRA_TRNG 39 +#define CLK_INFRA_AUXADC 40 +#define CLK_INFRA_CPUM 41 +#define CLK_INFRA_AP_C2K_CCIF_0 42 +#define CLK_INFRA_AP_C2K_CCIF_1 43 +#define CLK_INFRA_CLDMA 44 +#define CLK_INFRA_DISP_PWM 45 +#define CLK_INFRA_AP_DMA 46 +#define CLK_INFRA_DEVICE_APC 47 +#define CLK_INFRA_L2C_SRAM 48 +#define CLK_INFRA_CCIF_AP 49 +#define CLK_INFRA_AUDIO 50 +#define CLK_INFRA_CCIF_MD 51 +#define CLK_INFRA_DRAMC_F26M 52 +#define CLK_INFRA_I2C4 53 +#define CLK_INFRA_I2C_APPM 54 +#define CLK_INFRA_I2C_GPUPM 55 +#define CLK_INFRA_I2C2_IMM 56 +#define CLK_INFRA_I2C2_ARB 57 +#define CLK_INFRA_I2C3_IMM 58 +#define CLK_INFRA_I2C3_ARB 59 +#define CLK_INFRA_I2C5 60 +#define CLK_INFRA_SYS_CIRQ 61 +#define CLK_INFRA_SPI1 62 +#define CLK_INFRA_DRAMC_B_F26M 63 +#define CLK_INFRA_ANC_MD32 64 +#define CLK_INFRA_ANC_MD32_32K 65 +#define CLK_INFRA_DVFS_SPM1 66 +#define CLK_INFRA_AES_TOP0 67 +#define CLK_INFRA_AES_TOP1 68 +#define CLK_INFRA_SSUSB_BUS 69 +#define CLK_INFRA_SPI2 70 +#define CLK_INFRA_SPI3 71 +#define CLK_INFRA_SPI4 72 +#define CLK_INFRA_SPI5 73 +#define CLK_INFRA_IRTX 74 +#define CLK_INFRA_SSUSB_SYS 75 +#define CLK_INFRA_SSUSB_REF 76 +#define CLK_INFRA_AUDIO_26M 77 +#define CLK_INFRA_AUDIO_26M_PAD_TOP 78 +#define CLK_INFRA_MODEM_TEMP_SHARE 79 +#define CLK_INFRA_VAD_WRAP_SOC 80 +#define CLK_INFRA_DRAMC_CONF 81 +#define CLK_INFRA_DRAMC_B_CONF 82 +#define CLK_INFRA_MFG_VCG 83 +#define CLK_INFRA_13M 84 +#define CLK_INFRA_NR 85 + +/* IMG_SYS */ +#define CLK_IMG_FDVT 1 +#define CLK_IMG_DPE 2 +#define CLK_IMG_DIP 3 +#define CLK_IMG_LARB6 4 +#define CLK_IMG_NR 5 + +/* MM_SYS */ +#define CLK_MM_SMI_COMMON 1 +#define CLK_MM_SMI_LARB0 2 +#define CLK_MM_SMI_LARB5 3 +#define CLK_MM_CAM_MDP 4 +#define CLK_MM_MDP_RDMA0 5 +#define CLK_MM_MDP_RDMA1 6 +#define CLK_MM_MDP_RSZ0 7 +#define CLK_MM_MDP_RSZ1 8 +#define CLK_MM_MDP_RSZ2 9 +#define CLK_MM_MDP_TDSHP 10 +#define CLK_MM_MDP_COLOR 11 +#define CLK_MM_MDP_WDMA 12 +#define CLK_MM_MDP_WROT0 13 +#define CLK_MM_MDP_WROT1 14 +#define CLK_MM_FAKE_ENG 15 +#define CLK_MM_DISP_OVL0 16 +#define CLK_MM_DISP_OVL1 17 +#define CLK_MM_DISP_OVL0_2L 18 +#define CLK_MM_DISP_OVL1_2L 19 +#define CLK_MM_DISP_RDMA0 20 +#define CLK_MM_DISP_RDMA1 21 +#define CLK_MM_DISP_WDMA0 22 +#define CLK_MM_DISP_WDMA1 23 +#define CLK_MM_DISP_COLOR 24 +#define CLK_MM_DISP_CCORR 25 +#define CLK_MM_DISP_AAL 26 +#define CLK_MM_DISP_GAMMA 27 +#define CLK_MM_DISP_OD 28 +#define CLK_MM_DISP_DITHER 29 +#define CLK_MM_DISP_UFOE 30 +#define CLK_MM_DISP_DSC 31 +#define CLK_MM_DISP_SPLIT 32 +#define CLK_MM_DSI0_MM_CLOCK 33 +#define CLK_MM_DSI1_MM_CLOCK 34 +#define CLK_MM_DPI_MM_CLOCK 35 +#define CLK_MM_DPI_INTERFACE_CLOCK 36 +#define CLK_MM_LARB4_AXI_ASIF_MM_CLOCK 37 +#define CLK_MM_LARB4_AXI_ASIF_MJC_CLOCK 38 +#define CLK_MM_DISP_OVL0_MOUT_CLOCK 39 +#define CLK_MM_FAKE_ENG2 40 +#define CLK_MM_DSI0_INTERFACE_CLOCK 41 +#define CLK_MM_DSI1_INTERFACE_CLOCK 42 +#define CLK_MM_NR 43 + +/* VDEC_SYS */ +#define CLK_VDEC_CKEN_ENG 1 +#define CLK_VDEC_ACTIVE 2 +#define CLK_VDEC_CKEN 3 +#define CLK_VDEC_LARB1_CKEN 4 +#define CLK_VDEC_NR 5 + +/* VENC_SYS */ +#define CLK_VENC_0 1 +#define CLK_VENC_1 2 +#define CLK_VENC_2 3 +#define CLK_VENC_3 4 +#define CLK_VENC_NR 5 + +#endif /* _DT_BINDINGS_CLK_MT6797_H */ diff --git a/include/dt-bindings/clock/mt7622-clk.h b/include/dt-bindings/clock/mt7622-clk.h new file mode 100644 index 000000000..e9d77f0e8 --- /dev/null +++ b/include/dt-bindings/clock/mt7622-clk.h @@ -0,0 +1,290 @@ +/* + * Copyright (c) 2017 MediaTek Inc. + * Author: Chen Zhong + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_MT7622_H +#define _DT_BINDINGS_CLK_MT7622_H + +/* TOPCKGEN */ + +#define CLK_TOP_TO_U2_PHY 0 +#define CLK_TOP_TO_U2_PHY_1P 1 +#define CLK_TOP_PCIE0_PIPE_EN 2 +#define CLK_TOP_PCIE1_PIPE_EN 3 +#define CLK_TOP_SSUSB_TX250M 4 +#define CLK_TOP_SSUSB_EQ_RX250M 5 +#define CLK_TOP_SSUSB_CDR_REF 6 +#define CLK_TOP_SSUSB_CDR_FB 7 +#define CLK_TOP_SATA_ASIC 8 +#define CLK_TOP_SATA_RBC 9 +#define CLK_TOP_TO_USB3_SYS 10 +#define CLK_TOP_P1_1MHZ 11 +#define CLK_TOP_4MHZ 12 +#define CLK_TOP_P0_1MHZ 13 +#define CLK_TOP_TXCLK_SRC_PRE 14 +#define CLK_TOP_RTC 15 +#define CLK_TOP_MEMPLL 16 +#define CLK_TOP_DMPLL 17 +#define CLK_TOP_SYSPLL_D2 18 +#define CLK_TOP_SYSPLL1_D2 19 +#define CLK_TOP_SYSPLL1_D4 20 +#define CLK_TOP_SYSPLL1_D8 21 +#define CLK_TOP_SYSPLL2_D4 22 +#define CLK_TOP_SYSPLL2_D8 23 +#define CLK_TOP_SYSPLL_D5 24 +#define CLK_TOP_SYSPLL3_D2 25 +#define CLK_TOP_SYSPLL3_D4 26 +#define CLK_TOP_SYSPLL4_D2 27 +#define CLK_TOP_SYSPLL4_D4 28 +#define CLK_TOP_SYSPLL4_D16 29 +#define CLK_TOP_UNIVPLL 30 +#define CLK_TOP_UNIVPLL_D2 31 +#define CLK_TOP_UNIVPLL1_D2 32 +#define CLK_TOP_UNIVPLL1_D4 33 +#define CLK_TOP_UNIVPLL1_D8 34 +#define CLK_TOP_UNIVPLL1_D16 35 +#define CLK_TOP_UNIVPLL2_D2 36 +#define CLK_TOP_UNIVPLL2_D4 37 +#define CLK_TOP_UNIVPLL2_D8 38 +#define CLK_TOP_UNIVPLL2_D16 39 +#define CLK_TOP_UNIVPLL_D5 40 +#define CLK_TOP_UNIVPLL3_D2 41 +#define CLK_TOP_UNIVPLL3_D4 42 +#define CLK_TOP_UNIVPLL3_D16 43 +#define CLK_TOP_UNIVPLL_D7 44 +#define CLK_TOP_UNIVPLL_D80_D4 45 +#define CLK_TOP_UNIV48M 46 +#define CLK_TOP_SGMIIPLL 47 +#define CLK_TOP_SGMIIPLL_D2 48 +#define CLK_TOP_AUD1PLL 49 +#define CLK_TOP_AUD2PLL 50 +#define CLK_TOP_AUD_I2S2_MCK 51 +#define CLK_TOP_TO_USB3_REF 52 +#define CLK_TOP_PCIE1_MAC_EN 53 +#define CLK_TOP_PCIE0_MAC_EN 54 +#define CLK_TOP_ETH_500M 55 +#define CLK_TOP_AXI_SEL 56 +#define CLK_TOP_MEM_SEL 57 +#define CLK_TOP_DDRPHYCFG_SEL 58 +#define CLK_TOP_ETH_SEL 59 +#define CLK_TOP_PWM_SEL 60 +#define CLK_TOP_F10M_REF_SEL 61 +#define CLK_TOP_NFI_INFRA_SEL 62 +#define CLK_TOP_FLASH_SEL 63 +#define CLK_TOP_UART_SEL 64 +#define CLK_TOP_SPI0_SEL 65 +#define CLK_TOP_SPI1_SEL 66 +#define CLK_TOP_MSDC50_0_SEL 67 +#define CLK_TOP_MSDC30_0_SEL 68 +#define CLK_TOP_MSDC30_1_SEL 69 +#define CLK_TOP_A1SYS_HP_SEL 70 +#define CLK_TOP_A2SYS_HP_SEL 71 +#define CLK_TOP_INTDIR_SEL 72 +#define CLK_TOP_AUD_INTBUS_SEL 73 +#define CLK_TOP_PMICSPI_SEL 74 +#define CLK_TOP_SCP_SEL 75 +#define CLK_TOP_ATB_SEL 76 +#define CLK_TOP_HIF_SEL 77 +#define CLK_TOP_AUDIO_SEL 78 +#define CLK_TOP_U2_SEL 79 +#define CLK_TOP_AUD1_SEL 80 +#define CLK_TOP_AUD2_SEL 81 +#define CLK_TOP_IRRX_SEL 82 +#define CLK_TOP_IRTX_SEL 83 +#define CLK_TOP_ASM_L_SEL 84 +#define CLK_TOP_ASM_M_SEL 85 +#define CLK_TOP_ASM_H_SEL 86 +#define CLK_TOP_APLL1_SEL 87 +#define CLK_TOP_APLL2_SEL 88 +#define CLK_TOP_I2S0_MCK_SEL 89 +#define CLK_TOP_I2S1_MCK_SEL 90 +#define CLK_TOP_I2S2_MCK_SEL 91 +#define CLK_TOP_I2S3_MCK_SEL 92 +#define CLK_TOP_APLL1_DIV 93 +#define CLK_TOP_APLL2_DIV 94 +#define CLK_TOP_I2S0_MCK_DIV 95 +#define CLK_TOP_I2S1_MCK_DIV 96 +#define CLK_TOP_I2S2_MCK_DIV 97 +#define CLK_TOP_I2S3_MCK_DIV 98 +#define CLK_TOP_A1SYS_HP_DIV 99 +#define CLK_TOP_A2SYS_HP_DIV 100 +#define CLK_TOP_APLL1_DIV_PD 101 +#define CLK_TOP_APLL2_DIV_PD 102 +#define CLK_TOP_I2S0_MCK_DIV_PD 103 +#define CLK_TOP_I2S1_MCK_DIV_PD 104 +#define CLK_TOP_I2S2_MCK_DIV_PD 105 +#define CLK_TOP_I2S3_MCK_DIV_PD 106 +#define CLK_TOP_A1SYS_HP_DIV_PD 107 +#define CLK_TOP_A2SYS_HP_DIV_PD 108 +#define CLK_TOP_NR_CLK 109 + +/* INFRACFG */ + +#define CLK_INFRA_MUX1_SEL 0 +#define CLK_INFRA_DBGCLK_PD 1 +#define CLK_INFRA_AUDIO_PD 2 +#define CLK_INFRA_IRRX_PD 3 +#define CLK_INFRA_APXGPT_PD 4 +#define CLK_INFRA_PMIC_PD 5 +#define CLK_INFRA_TRNG 6 +#define CLK_INFRA_NR_CLK 7 + +/* PERICFG */ + +#define CLK_PERIBUS_SEL 0 +#define CLK_PERI_THERM_PD 1 +#define CLK_PERI_PWM1_PD 2 +#define CLK_PERI_PWM2_PD 3 +#define CLK_PERI_PWM3_PD 4 +#define CLK_PERI_PWM4_PD 5 +#define CLK_PERI_PWM5_PD 6 +#define CLK_PERI_PWM6_PD 7 +#define CLK_PERI_PWM7_PD 8 +#define CLK_PERI_PWM_PD 9 +#define CLK_PERI_AP_DMA_PD 10 +#define CLK_PERI_MSDC30_0_PD 11 +#define CLK_PERI_MSDC30_1_PD 12 +#define CLK_PERI_UART0_PD 13 +#define CLK_PERI_UART1_PD 14 +#define CLK_PERI_UART2_PD 15 +#define CLK_PERI_UART3_PD 16 +#define CLK_PERI_UART4_PD 17 +#define CLK_PERI_BTIF_PD 18 +#define CLK_PERI_I2C0_PD 19 +#define CLK_PERI_I2C1_PD 20 +#define CLK_PERI_I2C2_PD 21 +#define CLK_PERI_SPI1_PD 22 +#define CLK_PERI_AUXADC_PD 23 +#define CLK_PERI_SPI0_PD 24 +#define CLK_PERI_SNFI_PD 25 +#define CLK_PERI_NFI_PD 26 +#define CLK_PERI_NFIECC_PD 27 +#define CLK_PERI_FLASH_PD 28 +#define CLK_PERI_IRTX_PD 29 +#define CLK_PERI_NR_CLK 30 + +/* APMIXEDSYS */ + +#define CLK_APMIXED_ARMPLL 0 +#define CLK_APMIXED_MAINPLL 1 +#define CLK_APMIXED_UNIV2PLL 2 +#define CLK_APMIXED_ETH1PLL 3 +#define CLK_APMIXED_ETH2PLL 4 +#define CLK_APMIXED_AUD1PLL 5 +#define CLK_APMIXED_AUD2PLL 6 +#define CLK_APMIXED_TRGPLL 7 +#define CLK_APMIXED_SGMIPLL 8 +#define CLK_APMIXED_MAIN_CORE_EN 9 +#define CLK_APMIXED_NR_CLK 10 + +/* AUDIOSYS */ + +#define CLK_AUDIO_AFE 0 +#define CLK_AUDIO_HDMI 1 +#define CLK_AUDIO_SPDF 2 +#define CLK_AUDIO_APLL 3 +#define CLK_AUDIO_I2SIN1 4 +#define CLK_AUDIO_I2SIN2 5 +#define CLK_AUDIO_I2SIN3 6 +#define CLK_AUDIO_I2SIN4 7 +#define CLK_AUDIO_I2SO1 8 +#define CLK_AUDIO_I2SO2 9 +#define CLK_AUDIO_I2SO3 10 +#define CLK_AUDIO_I2SO4 11 +#define CLK_AUDIO_ASRCI1 12 +#define CLK_AUDIO_ASRCI2 13 +#define CLK_AUDIO_ASRCO1 14 +#define CLK_AUDIO_ASRCO2 15 +#define CLK_AUDIO_INTDIR 16 +#define CLK_AUDIO_A1SYS 17 +#define CLK_AUDIO_A2SYS 18 +#define CLK_AUDIO_UL1 19 +#define CLK_AUDIO_UL2 20 +#define CLK_AUDIO_UL3 21 +#define CLK_AUDIO_UL4 22 +#define CLK_AUDIO_UL5 23 +#define CLK_AUDIO_UL6 24 +#define CLK_AUDIO_DL1 25 +#define CLK_AUDIO_DL2 26 +#define CLK_AUDIO_DL3 27 +#define CLK_AUDIO_DL4 28 +#define CLK_AUDIO_DL5 29 +#define CLK_AUDIO_DL6 30 +#define CLK_AUDIO_DLMCH 31 +#define CLK_AUDIO_ARB1 32 +#define CLK_AUDIO_AWB 33 +#define CLK_AUDIO_AWB2 34 +#define CLK_AUDIO_DAI 35 +#define CLK_AUDIO_MOD 36 +#define CLK_AUDIO_ASRCI3 37 +#define CLK_AUDIO_ASRCI4 38 +#define CLK_AUDIO_ASRCO3 39 +#define CLK_AUDIO_ASRCO4 40 +#define CLK_AUDIO_MEM_ASRC1 41 +#define CLK_AUDIO_MEM_ASRC2 42 +#define CLK_AUDIO_MEM_ASRC3 43 +#define CLK_AUDIO_MEM_ASRC4 44 +#define CLK_AUDIO_MEM_ASRC5 45 +#define CLK_AUDIO_AFE_CONN 46 +#define CLK_AUDIO_NR_CLK 47 + +/* SSUSBSYS */ + +#define CLK_SSUSB_U2_PHY_1P_EN 0 +#define CLK_SSUSB_U2_PHY_EN 1 +#define CLK_SSUSB_REF_EN 2 +#define CLK_SSUSB_SYS_EN 3 +#define CLK_SSUSB_MCU_EN 4 +#define CLK_SSUSB_DMA_EN 5 +#define CLK_SSUSB_NR_CLK 6 + +/* PCIESYS */ + +#define CLK_PCIE_P1_AUX_EN 0 +#define CLK_PCIE_P1_OBFF_EN 1 +#define CLK_PCIE_P1_AHB_EN 2 +#define CLK_PCIE_P1_AXI_EN 3 +#define CLK_PCIE_P1_MAC_EN 4 +#define CLK_PCIE_P1_PIPE_EN 5 +#define CLK_PCIE_P0_AUX_EN 6 +#define CLK_PCIE_P0_OBFF_EN 7 +#define CLK_PCIE_P0_AHB_EN 8 +#define CLK_PCIE_P0_AXI_EN 9 +#define CLK_PCIE_P0_MAC_EN 10 +#define CLK_PCIE_P0_PIPE_EN 11 +#define CLK_SATA_AHB_EN 12 +#define CLK_SATA_AXI_EN 13 +#define CLK_SATA_ASIC_EN 14 +#define CLK_SATA_RBC_EN 15 +#define CLK_SATA_PM_EN 16 +#define CLK_PCIE_NR_CLK 17 + +/* ETHSYS */ + +#define CLK_ETH_HSDMA_EN 0 +#define CLK_ETH_ESW_EN 1 +#define CLK_ETH_GP2_EN 2 +#define CLK_ETH_GP1_EN 3 +#define CLK_ETH_GP0_EN 4 +#define CLK_ETH_NR_CLK 5 + +/* SGMIISYS */ + +#define CLK_SGMII_TX250M_EN 0 +#define CLK_SGMII_RX250M_EN 1 +#define CLK_SGMII_CDR_REF 2 +#define CLK_SGMII_CDR_FB 3 +#define CLK_SGMII_NR_CLK 4 + +#endif /* _DT_BINDINGS_CLK_MT7622_H */ + diff --git a/include/dt-bindings/clock/mt8135-clk.h b/include/dt-bindings/clock/mt8135-clk.h new file mode 100644 index 000000000..6dac6c091 --- /dev/null +++ b/include/dt-bindings/clock/mt8135-clk.h @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: James Liao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_MT8135_H +#define _DT_BINDINGS_CLK_MT8135_H + +/* TOPCKGEN */ + +#define CLK_TOP_DSI0_LNTC_DSICLK 1 +#define CLK_TOP_HDMITX_CLKDIG_CTS 2 +#define CLK_TOP_CLKPH_MCK 3 +#define CLK_TOP_CPUM_TCK_IN 4 +#define CLK_TOP_MAINPLL_806M 5 +#define CLK_TOP_MAINPLL_537P3M 6 +#define CLK_TOP_MAINPLL_322P4M 7 +#define CLK_TOP_MAINPLL_230P3M 8 +#define CLK_TOP_UNIVPLL_624M 9 +#define CLK_TOP_UNIVPLL_416M 10 +#define CLK_TOP_UNIVPLL_249P6M 11 +#define CLK_TOP_UNIVPLL_178P3M 12 +#define CLK_TOP_UNIVPLL_48M 13 +#define CLK_TOP_MMPLL_D2 14 +#define CLK_TOP_MMPLL_D3 15 +#define CLK_TOP_MMPLL_D5 16 +#define CLK_TOP_MMPLL_D7 17 +#define CLK_TOP_MMPLL_D4 18 +#define CLK_TOP_MMPLL_D6 19 +#define CLK_TOP_SYSPLL_D2 20 +#define CLK_TOP_SYSPLL_D4 21 +#define CLK_TOP_SYSPLL_D6 22 +#define CLK_TOP_SYSPLL_D8 23 +#define CLK_TOP_SYSPLL_D10 24 +#define CLK_TOP_SYSPLL_D12 25 +#define CLK_TOP_SYSPLL_D16 26 +#define CLK_TOP_SYSPLL_D24 27 +#define CLK_TOP_SYSPLL_D3 28 +#define CLK_TOP_SYSPLL_D2P5 29 +#define CLK_TOP_SYSPLL_D5 30 +#define CLK_TOP_SYSPLL_D3P5 31 +#define CLK_TOP_UNIVPLL1_D2 32 +#define CLK_TOP_UNIVPLL1_D4 33 +#define CLK_TOP_UNIVPLL1_D6 34 +#define CLK_TOP_UNIVPLL1_D8 35 +#define CLK_TOP_UNIVPLL1_D10 36 +#define CLK_TOP_UNIVPLL2_D2 37 +#define CLK_TOP_UNIVPLL2_D4 38 +#define CLK_TOP_UNIVPLL2_D6 39 +#define CLK_TOP_UNIVPLL2_D8 40 +#define CLK_TOP_UNIVPLL_D3 41 +#define CLK_TOP_UNIVPLL_D5 42 +#define CLK_TOP_UNIVPLL_D7 43 +#define CLK_TOP_UNIVPLL_D10 44 +#define CLK_TOP_UNIVPLL_D26 45 +#define CLK_TOP_APLL 46 +#define CLK_TOP_APLL_D4 47 +#define CLK_TOP_APLL_D8 48 +#define CLK_TOP_APLL_D16 49 +#define CLK_TOP_APLL_D24 50 +#define CLK_TOP_LVDSPLL_D2 51 +#define CLK_TOP_LVDSPLL_D4 52 +#define CLK_TOP_LVDSPLL_D8 53 +#define CLK_TOP_LVDSTX_CLKDIG_CT 54 +#define CLK_TOP_VPLL_DPIX 55 +#define CLK_TOP_TVHDMI_H 56 +#define CLK_TOP_HDMITX_CLKDIG_D2 57 +#define CLK_TOP_HDMITX_CLKDIG_D3 58 +#define CLK_TOP_TVHDMI_D2 59 +#define CLK_TOP_TVHDMI_D4 60 +#define CLK_TOP_MEMPLL_MCK_D4 61 +#define CLK_TOP_AXI_SEL 62 +#define CLK_TOP_SMI_SEL 63 +#define CLK_TOP_MFG_SEL 64 +#define CLK_TOP_IRDA_SEL 65 +#define CLK_TOP_CAM_SEL 66 +#define CLK_TOP_AUD_INTBUS_SEL 67 +#define CLK_TOP_JPG_SEL 68 +#define CLK_TOP_DISP_SEL 69 +#define CLK_TOP_MSDC30_1_SEL 70 +#define CLK_TOP_MSDC30_2_SEL 71 +#define CLK_TOP_MSDC30_3_SEL 72 +#define CLK_TOP_MSDC30_4_SEL 73 +#define CLK_TOP_USB20_SEL 74 +#define CLK_TOP_VENC_SEL 75 +#define CLK_TOP_SPI_SEL 76 +#define CLK_TOP_UART_SEL 77 +#define CLK_TOP_MEM_SEL 78 +#define CLK_TOP_CAMTG_SEL 79 +#define CLK_TOP_AUDIO_SEL 80 +#define CLK_TOP_FIX_SEL 81 +#define CLK_TOP_VDEC_SEL 82 +#define CLK_TOP_DDRPHYCFG_SEL 83 +#define CLK_TOP_DPILVDS_SEL 84 +#define CLK_TOP_PMICSPI_SEL 85 +#define CLK_TOP_MSDC30_0_SEL 86 +#define CLK_TOP_SMI_MFG_AS_SEL 87 +#define CLK_TOP_GCPU_SEL 88 +#define CLK_TOP_DPI1_SEL 89 +#define CLK_TOP_CCI_SEL 90 +#define CLK_TOP_APLL_SEL 91 +#define CLK_TOP_HDMIPLL_SEL 92 +#define CLK_TOP_NR_CLK 93 + +/* APMIXED_SYS */ + +#define CLK_APMIXED_ARMPLL1 1 +#define CLK_APMIXED_ARMPLL2 2 +#define CLK_APMIXED_MAINPLL 3 +#define CLK_APMIXED_UNIVPLL 4 +#define CLK_APMIXED_MMPLL 5 +#define CLK_APMIXED_MSDCPLL 6 +#define CLK_APMIXED_TVDPLL 7 +#define CLK_APMIXED_LVDSPLL 8 +#define CLK_APMIXED_AUDPLL 9 +#define CLK_APMIXED_VDECPLL 10 +#define CLK_APMIXED_NR_CLK 11 + +/* INFRA_SYS */ + +#define CLK_INFRA_PMIC_WRAP 1 +#define CLK_INFRA_PMICSPI 2 +#define CLK_INFRA_CCIF1_AP_CTRL 3 +#define CLK_INFRA_CCIF0_AP_CTRL 4 +#define CLK_INFRA_KP 5 +#define CLK_INFRA_CPUM 6 +#define CLK_INFRA_M4U 7 +#define CLK_INFRA_MFGAXI 8 +#define CLK_INFRA_DEVAPC 9 +#define CLK_INFRA_AUDIO 10 +#define CLK_INFRA_MFG_BUS 11 +#define CLK_INFRA_SMI 12 +#define CLK_INFRA_DBGCLK 13 +#define CLK_INFRA_NR_CLK 14 + +/* PERI_SYS */ + +#define CLK_PERI_I2C5 1 +#define CLK_PERI_I2C4 2 +#define CLK_PERI_I2C3 3 +#define CLK_PERI_I2C2 4 +#define CLK_PERI_I2C1 5 +#define CLK_PERI_I2C0 6 +#define CLK_PERI_UART3 7 +#define CLK_PERI_UART2 8 +#define CLK_PERI_UART1 9 +#define CLK_PERI_UART0 10 +#define CLK_PERI_IRDA 11 +#define CLK_PERI_NLI 12 +#define CLK_PERI_MD_HIF 13 +#define CLK_PERI_AP_HIF 14 +#define CLK_PERI_MSDC30_3 15 +#define CLK_PERI_MSDC30_2 16 +#define CLK_PERI_MSDC30_1 17 +#define CLK_PERI_MSDC20_2 18 +#define CLK_PERI_MSDC20_1 19 +#define CLK_PERI_AP_DMA 20 +#define CLK_PERI_USB1 21 +#define CLK_PERI_USB0 22 +#define CLK_PERI_PWM 23 +#define CLK_PERI_PWM7 24 +#define CLK_PERI_PWM6 25 +#define CLK_PERI_PWM5 26 +#define CLK_PERI_PWM4 27 +#define CLK_PERI_PWM3 28 +#define CLK_PERI_PWM2 29 +#define CLK_PERI_PWM1 30 +#define CLK_PERI_THERM 31 +#define CLK_PERI_NFI 32 +#define CLK_PERI_USBSLV 33 +#define CLK_PERI_USB1_MCU 34 +#define CLK_PERI_USB0_MCU 35 +#define CLK_PERI_GCPU 36 +#define CLK_PERI_FHCTL 37 +#define CLK_PERI_SPI1 38 +#define CLK_PERI_AUXADC 39 +#define CLK_PERI_PERI_PWRAP 40 +#define CLK_PERI_I2C6 41 +#define CLK_PERI_UART0_SEL 42 +#define CLK_PERI_UART1_SEL 43 +#define CLK_PERI_UART2_SEL 44 +#define CLK_PERI_UART3_SEL 45 +#define CLK_PERI_NR_CLK 46 + +#endif /* _DT_BINDINGS_CLK_MT8135_H */ diff --git a/include/dt-bindings/clock/mt8173-clk.h b/include/dt-bindings/clock/mt8173-clk.h new file mode 100644 index 000000000..8aea623dd --- /dev/null +++ b/include/dt-bindings/clock/mt8173-clk.h @@ -0,0 +1,330 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: James Liao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_MT8173_H +#define _DT_BINDINGS_CLK_MT8173_H + +/* TOPCKGEN */ + +#define CLK_TOP_CLKPH_MCK_O 1 +#define CLK_TOP_USB_SYSPLL_125M 3 +#define CLK_TOP_HDMITX_DIG_CTS 4 +#define CLK_TOP_ARMCA7PLL_754M 5 +#define CLK_TOP_ARMCA7PLL_502M 6 +#define CLK_TOP_MAIN_H546M 7 +#define CLK_TOP_MAIN_H364M 8 +#define CLK_TOP_MAIN_H218P4M 9 +#define CLK_TOP_MAIN_H156M 10 +#define CLK_TOP_TVDPLL_445P5M 11 +#define CLK_TOP_TVDPLL_594M 12 +#define CLK_TOP_UNIV_624M 13 +#define CLK_TOP_UNIV_416M 14 +#define CLK_TOP_UNIV_249P6M 15 +#define CLK_TOP_UNIV_178P3M 16 +#define CLK_TOP_UNIV_48M 17 +#define CLK_TOP_CLKRTC_EXT 18 +#define CLK_TOP_CLKRTC_INT 19 +#define CLK_TOP_FPC 20 +#define CLK_TOP_HDMITXPLL_D2 21 +#define CLK_TOP_HDMITXPLL_D3 22 +#define CLK_TOP_ARMCA7PLL_D2 23 +#define CLK_TOP_ARMCA7PLL_D3 24 +#define CLK_TOP_APLL1 25 +#define CLK_TOP_APLL2 26 +#define CLK_TOP_DMPLL 27 +#define CLK_TOP_DMPLL_D2 28 +#define CLK_TOP_DMPLL_D4 29 +#define CLK_TOP_DMPLL_D8 30 +#define CLK_TOP_DMPLL_D16 31 +#define CLK_TOP_LVDSPLL_D2 32 +#define CLK_TOP_LVDSPLL_D4 33 +#define CLK_TOP_LVDSPLL_D8 34 +#define CLK_TOP_MMPLL 35 +#define CLK_TOP_MMPLL_D2 36 +#define CLK_TOP_MSDCPLL 37 +#define CLK_TOP_MSDCPLL_D2 38 +#define CLK_TOP_MSDCPLL_D4 39 +#define CLK_TOP_MSDCPLL2 40 +#define CLK_TOP_MSDCPLL2_D2 41 +#define CLK_TOP_MSDCPLL2_D4 42 +#define CLK_TOP_SYSPLL_D2 43 +#define CLK_TOP_SYSPLL1_D2 44 +#define CLK_TOP_SYSPLL1_D4 45 +#define CLK_TOP_SYSPLL1_D8 46 +#define CLK_TOP_SYSPLL1_D16 47 +#define CLK_TOP_SYSPLL_D3 48 +#define CLK_TOP_SYSPLL2_D2 49 +#define CLK_TOP_SYSPLL2_D4 50 +#define CLK_TOP_SYSPLL_D5 51 +#define CLK_TOP_SYSPLL3_D2 52 +#define CLK_TOP_SYSPLL3_D4 53 +#define CLK_TOP_SYSPLL_D7 54 +#define CLK_TOP_SYSPLL4_D2 55 +#define CLK_TOP_SYSPLL4_D4 56 +#define CLK_TOP_TVDPLL 57 +#define CLK_TOP_TVDPLL_D2 58 +#define CLK_TOP_TVDPLL_D4 59 +#define CLK_TOP_TVDPLL_D8 60 +#define CLK_TOP_TVDPLL_D16 61 +#define CLK_TOP_UNIVPLL_D2 62 +#define CLK_TOP_UNIVPLL1_D2 63 +#define CLK_TOP_UNIVPLL1_D4 64 +#define CLK_TOP_UNIVPLL1_D8 65 +#define CLK_TOP_UNIVPLL_D3 66 +#define CLK_TOP_UNIVPLL2_D2 67 +#define CLK_TOP_UNIVPLL2_D4 68 +#define CLK_TOP_UNIVPLL2_D8 69 +#define CLK_TOP_UNIVPLL_D5 70 +#define CLK_TOP_UNIVPLL3_D2 71 +#define CLK_TOP_UNIVPLL3_D4 72 +#define CLK_TOP_UNIVPLL3_D8 73 +#define CLK_TOP_UNIVPLL_D7 74 +#define CLK_TOP_UNIVPLL_D26 75 +#define CLK_TOP_UNIVPLL_D52 76 +#define CLK_TOP_VCODECPLL 77 +#define CLK_TOP_VCODECPLL_370P5 78 +#define CLK_TOP_VENCPLL 79 +#define CLK_TOP_VENCPLL_D2 80 +#define CLK_TOP_VENCPLL_D4 81 +#define CLK_TOP_AXI_SEL 82 +#define CLK_TOP_MEM_SEL 83 +#define CLK_TOP_DDRPHYCFG_SEL 84 +#define CLK_TOP_MM_SEL 85 +#define CLK_TOP_PWM_SEL 86 +#define CLK_TOP_VDEC_SEL 87 +#define CLK_TOP_VENC_SEL 88 +#define CLK_TOP_MFG_SEL 89 +#define CLK_TOP_CAMTG_SEL 90 +#define CLK_TOP_UART_SEL 91 +#define CLK_TOP_SPI_SEL 92 +#define CLK_TOP_USB20_SEL 93 +#define CLK_TOP_USB30_SEL 94 +#define CLK_TOP_MSDC50_0_H_SEL 95 +#define CLK_TOP_MSDC50_0_SEL 96 +#define CLK_TOP_MSDC30_1_SEL 97 +#define CLK_TOP_MSDC30_2_SEL 98 +#define CLK_TOP_MSDC30_3_SEL 99 +#define CLK_TOP_AUDIO_SEL 100 +#define CLK_TOP_AUD_INTBUS_SEL 101 +#define CLK_TOP_PMICSPI_SEL 102 +#define CLK_TOP_SCP_SEL 103 +#define CLK_TOP_ATB_SEL 104 +#define CLK_TOP_VENC_LT_SEL 105 +#define CLK_TOP_DPI0_SEL 106 +#define CLK_TOP_IRDA_SEL 107 +#define CLK_TOP_CCI400_SEL 108 +#define CLK_TOP_AUD_1_SEL 109 +#define CLK_TOP_AUD_2_SEL 110 +#define CLK_TOP_MEM_MFG_IN_SEL 111 +#define CLK_TOP_AXI_MFG_IN_SEL 112 +#define CLK_TOP_SCAM_SEL 113 +#define CLK_TOP_SPINFI_IFR_SEL 114 +#define CLK_TOP_HDMI_SEL 115 +#define CLK_TOP_DPILVDS_SEL 116 +#define CLK_TOP_MSDC50_2_H_SEL 117 +#define CLK_TOP_HDCP_SEL 118 +#define CLK_TOP_HDCP_24M_SEL 119 +#define CLK_TOP_RTC_SEL 120 +#define CLK_TOP_APLL1_DIV0 121 +#define CLK_TOP_APLL1_DIV1 122 +#define CLK_TOP_APLL1_DIV2 123 +#define CLK_TOP_APLL1_DIV3 124 +#define CLK_TOP_APLL1_DIV4 125 +#define CLK_TOP_APLL1_DIV5 126 +#define CLK_TOP_APLL2_DIV0 127 +#define CLK_TOP_APLL2_DIV1 128 +#define CLK_TOP_APLL2_DIV2 129 +#define CLK_TOP_APLL2_DIV3 130 +#define CLK_TOP_APLL2_DIV4 131 +#define CLK_TOP_APLL2_DIV5 132 +#define CLK_TOP_I2S0_M_SEL 133 +#define CLK_TOP_I2S1_M_SEL 134 +#define CLK_TOP_I2S2_M_SEL 135 +#define CLK_TOP_I2S3_M_SEL 136 +#define CLK_TOP_I2S3_B_SEL 137 +#define CLK_TOP_DSI0_DIG 138 +#define CLK_TOP_DSI1_DIG 139 +#define CLK_TOP_LVDS_PXL 140 +#define CLK_TOP_LVDS_CTS 141 +#define CLK_TOP_NR_CLK 142 + +/* APMIXED_SYS */ + +#define CLK_APMIXED_ARMCA15PLL 1 +#define CLK_APMIXED_ARMCA7PLL 2 +#define CLK_APMIXED_MAINPLL 3 +#define CLK_APMIXED_UNIVPLL 4 +#define CLK_APMIXED_MMPLL 5 +#define CLK_APMIXED_MSDCPLL 6 +#define CLK_APMIXED_VENCPLL 7 +#define CLK_APMIXED_TVDPLL 8 +#define CLK_APMIXED_MPLL 9 +#define CLK_APMIXED_VCODECPLL 10 +#define CLK_APMIXED_APLL1 11 +#define CLK_APMIXED_APLL2 12 +#define CLK_APMIXED_LVDSPLL 13 +#define CLK_APMIXED_MSDCPLL2 14 +#define CLK_APMIXED_REF2USB_TX 15 +#define CLK_APMIXED_HDMI_REF 16 +#define CLK_APMIXED_NR_CLK 17 + +/* INFRA_SYS */ + +#define CLK_INFRA_DBGCLK 1 +#define CLK_INFRA_SMI 2 +#define CLK_INFRA_AUDIO 3 +#define CLK_INFRA_GCE 4 +#define CLK_INFRA_L2C_SRAM 5 +#define CLK_INFRA_M4U 6 +#define CLK_INFRA_CPUM 7 +#define CLK_INFRA_KP 8 +#define CLK_INFRA_CEC 9 +#define CLK_INFRA_PMICSPI 10 +#define CLK_INFRA_PMICWRAP 11 +#define CLK_INFRA_CLK_13M 12 +#define CLK_INFRA_CA53SEL 13 +#define CLK_INFRA_CA57SEL 14 +#define CLK_INFRA_NR_CLK 15 + +/* PERI_SYS */ + +#define CLK_PERI_NFI 1 +#define CLK_PERI_THERM 2 +#define CLK_PERI_PWM1 3 +#define CLK_PERI_PWM2 4 +#define CLK_PERI_PWM3 5 +#define CLK_PERI_PWM4 6 +#define CLK_PERI_PWM5 7 +#define CLK_PERI_PWM6 8 +#define CLK_PERI_PWM7 9 +#define CLK_PERI_PWM 10 +#define CLK_PERI_USB0 11 +#define CLK_PERI_USB1 12 +#define CLK_PERI_AP_DMA 13 +#define CLK_PERI_MSDC30_0 14 +#define CLK_PERI_MSDC30_1 15 +#define CLK_PERI_MSDC30_2 16 +#define CLK_PERI_MSDC30_3 17 +#define CLK_PERI_NLI_ARB 18 +#define CLK_PERI_IRDA 19 +#define CLK_PERI_UART0 20 +#define CLK_PERI_UART1 21 +#define CLK_PERI_UART2 22 +#define CLK_PERI_UART3 23 +#define CLK_PERI_I2C0 24 +#define CLK_PERI_I2C1 25 +#define CLK_PERI_I2C2 26 +#define CLK_PERI_I2C3 27 +#define CLK_PERI_I2C4 28 +#define CLK_PERI_AUXADC 29 +#define CLK_PERI_SPI0 30 +#define CLK_PERI_I2C5 31 +#define CLK_PERI_NFIECC 32 +#define CLK_PERI_SPI 33 +#define CLK_PERI_IRRX 34 +#define CLK_PERI_I2C6 35 +#define CLK_PERI_UART0_SEL 36 +#define CLK_PERI_UART1_SEL 37 +#define CLK_PERI_UART2_SEL 38 +#define CLK_PERI_UART3_SEL 39 +#define CLK_PERI_NR_CLK 40 + +/* IMG_SYS */ + +#define CLK_IMG_LARB2_SMI 1 +#define CLK_IMG_CAM_SMI 2 +#define CLK_IMG_CAM_CAM 3 +#define CLK_IMG_SEN_TG 4 +#define CLK_IMG_SEN_CAM 5 +#define CLK_IMG_CAM_SV 6 +#define CLK_IMG_FD 7 +#define CLK_IMG_NR_CLK 8 + +/* MM_SYS */ + +#define CLK_MM_SMI_COMMON 1 +#define CLK_MM_SMI_LARB0 2 +#define CLK_MM_CAM_MDP 3 +#define CLK_MM_MDP_RDMA0 4 +#define CLK_MM_MDP_RDMA1 5 +#define CLK_MM_MDP_RSZ0 6 +#define CLK_MM_MDP_RSZ1 7 +#define CLK_MM_MDP_RSZ2 8 +#define CLK_MM_MDP_TDSHP0 9 +#define CLK_MM_MDP_TDSHP1 10 +#define CLK_MM_MDP_WDMA 11 +#define CLK_MM_MDP_WROT0 12 +#define CLK_MM_MDP_WROT1 13 +#define CLK_MM_FAKE_ENG 14 +#define CLK_MM_MUTEX_32K 15 +#define CLK_MM_DISP_OVL0 16 +#define CLK_MM_DISP_OVL1 17 +#define CLK_MM_DISP_RDMA0 18 +#define CLK_MM_DISP_RDMA1 19 +#define CLK_MM_DISP_RDMA2 20 +#define CLK_MM_DISP_WDMA0 21 +#define CLK_MM_DISP_WDMA1 22 +#define CLK_MM_DISP_COLOR0 23 +#define CLK_MM_DISP_COLOR1 24 +#define CLK_MM_DISP_AAL 25 +#define CLK_MM_DISP_GAMMA 26 +#define CLK_MM_DISP_UFOE 27 +#define CLK_MM_DISP_SPLIT0 28 +#define CLK_MM_DISP_SPLIT1 29 +#define CLK_MM_DISP_MERGE 30 +#define CLK_MM_DISP_OD 31 +#define CLK_MM_DISP_PWM0MM 32 +#define CLK_MM_DISP_PWM026M 33 +#define CLK_MM_DISP_PWM1MM 34 +#define CLK_MM_DISP_PWM126M 35 +#define CLK_MM_DSI0_ENGINE 36 +#define CLK_MM_DSI0_DIGITAL 37 +#define CLK_MM_DSI1_ENGINE 38 +#define CLK_MM_DSI1_DIGITAL 39 +#define CLK_MM_DPI_PIXEL 40 +#define CLK_MM_DPI_ENGINE 41 +#define CLK_MM_DPI1_PIXEL 42 +#define CLK_MM_DPI1_ENGINE 43 +#define CLK_MM_HDMI_PIXEL 44 +#define CLK_MM_HDMI_PLLCK 45 +#define CLK_MM_HDMI_AUDIO 46 +#define CLK_MM_HDMI_SPDIF 47 +#define CLK_MM_LVDS_PIXEL 48 +#define CLK_MM_LVDS_CTS 49 +#define CLK_MM_SMI_LARB4 50 +#define CLK_MM_HDMI_HDCP 51 +#define CLK_MM_HDMI_HDCP24M 52 +#define CLK_MM_NR_CLK 53 + +/* VDEC_SYS */ + +#define CLK_VDEC_CKEN 1 +#define CLK_VDEC_LARB_CKEN 2 +#define CLK_VDEC_NR_CLK 3 + +/* VENC_SYS */ + +#define CLK_VENC_CKE0 1 +#define CLK_VENC_CKE1 2 +#define CLK_VENC_CKE2 3 +#define CLK_VENC_CKE3 4 +#define CLK_VENC_NR_CLK 5 + +/* VENCLT_SYS */ + +#define CLK_VENCLT_CKE0 1 +#define CLK_VENCLT_CKE1 2 +#define CLK_VENCLT_NR_CLK 3 + +#endif /* _DT_BINDINGS_CLK_MT8173_H */ diff --git a/include/dt-bindings/clock/nuvoton,npcm7xx-clock.h b/include/dt-bindings/clock/nuvoton,npcm7xx-clock.h new file mode 100644 index 000000000..f21522605 --- /dev/null +++ b/include/dt-bindings/clock/nuvoton,npcm7xx-clock.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Nuvoton NPCM7xx Clock Generator binding + * clock binding number for all clocks supportted by nuvoton,npcm7xx-clk + * + * Copyright (C) 2018 Nuvoton Technologies tali.perry@nuvoton.com + * + */ + +#ifndef __DT_BINDINGS_CLOCK_NPCM7XX_H +#define __DT_BINDINGS_CLOCK_NPCM7XX_H + + +#define NPCM7XX_CLK_CPU 0 +#define NPCM7XX_CLK_GFX_PIXEL 1 +#define NPCM7XX_CLK_MC 2 +#define NPCM7XX_CLK_ADC 3 +#define NPCM7XX_CLK_AHB 4 +#define NPCM7XX_CLK_TIMER 5 +#define NPCM7XX_CLK_UART 6 +#define NPCM7XX_CLK_MMC 7 +#define NPCM7XX_CLK_SPI3 8 +#define NPCM7XX_CLK_PCI 9 +#define NPCM7XX_CLK_AXI 10 +#define NPCM7XX_CLK_APB4 11 +#define NPCM7XX_CLK_APB3 12 +#define NPCM7XX_CLK_APB2 13 +#define NPCM7XX_CLK_APB1 14 +#define NPCM7XX_CLK_APB5 15 +#define NPCM7XX_CLK_CLKOUT 16 +#define NPCM7XX_CLK_GFX 17 +#define NPCM7XX_CLK_SU 18 +#define NPCM7XX_CLK_SU48 19 +#define NPCM7XX_CLK_SDHC 20 +#define NPCM7XX_CLK_SPI0 21 +#define NPCM7XX_CLK_SPIX 22 + +#define NPCM7XX_CLK_REFCLK 23 +#define NPCM7XX_CLK_SYSBYPCK 24 +#define NPCM7XX_CLK_MCBYPCK 25 + +#define NPCM7XX_NUM_CLOCKS (NPCM7XX_CLK_MCBYPCK+1) + +#endif diff --git a/include/dt-bindings/clock/omap4.h b/include/dt-bindings/clock/omap4.h new file mode 100644 index 000000000..e86c758e5 --- /dev/null +++ b/include/dt-bindings/clock/omap4.h @@ -0,0 +1,146 @@ +/* + * Copyright 2017 Texas Instruments, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __DT_BINDINGS_CLK_OMAP4_H +#define __DT_BINDINGS_CLK_OMAP4_H + +#define OMAP4_CLKCTRL_OFFSET 0x20 +#define OMAP4_CLKCTRL_INDEX(offset) ((offset) - OMAP4_CLKCTRL_OFFSET) + +/* mpuss clocks */ +#define OMAP4_MPU_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) + +/* tesla clocks */ +#define OMAP4_DSP_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) + +/* abe clocks */ +#define OMAP4_L4_ABE_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) +#define OMAP4_AESS_CLKCTRL OMAP4_CLKCTRL_INDEX(0x28) +#define OMAP4_MCPDM_CLKCTRL OMAP4_CLKCTRL_INDEX(0x30) +#define OMAP4_DMIC_CLKCTRL OMAP4_CLKCTRL_INDEX(0x38) +#define OMAP4_MCASP_CLKCTRL OMAP4_CLKCTRL_INDEX(0x40) +#define OMAP4_MCBSP1_CLKCTRL OMAP4_CLKCTRL_INDEX(0x48) +#define OMAP4_MCBSP2_CLKCTRL OMAP4_CLKCTRL_INDEX(0x50) +#define OMAP4_MCBSP3_CLKCTRL OMAP4_CLKCTRL_INDEX(0x58) +#define OMAP4_SLIMBUS1_CLKCTRL OMAP4_CLKCTRL_INDEX(0x60) +#define OMAP4_TIMER5_CLKCTRL OMAP4_CLKCTRL_INDEX(0x68) +#define OMAP4_TIMER6_CLKCTRL OMAP4_CLKCTRL_INDEX(0x70) +#define OMAP4_TIMER7_CLKCTRL OMAP4_CLKCTRL_INDEX(0x78) +#define OMAP4_TIMER8_CLKCTRL OMAP4_CLKCTRL_INDEX(0x80) +#define OMAP4_WD_TIMER3_CLKCTRL OMAP4_CLKCTRL_INDEX(0x88) + +/* l4_ao clocks */ +#define OMAP4_SMARTREFLEX_MPU_CLKCTRL OMAP4_CLKCTRL_INDEX(0x28) +#define OMAP4_SMARTREFLEX_IVA_CLKCTRL OMAP4_CLKCTRL_INDEX(0x30) +#define OMAP4_SMARTREFLEX_CORE_CLKCTRL OMAP4_CLKCTRL_INDEX(0x38) + +/* l3_1 clocks */ +#define OMAP4_L3_MAIN_1_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) + +/* l3_2 clocks */ +#define OMAP4_L3_MAIN_2_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) +#define OMAP4_GPMC_CLKCTRL OMAP4_CLKCTRL_INDEX(0x28) +#define OMAP4_OCMC_RAM_CLKCTRL OMAP4_CLKCTRL_INDEX(0x30) + +/* ducati clocks */ +#define OMAP4_IPU_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) + +/* l3_dma clocks */ +#define OMAP4_DMA_SYSTEM_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) + +/* l3_emif clocks */ +#define OMAP4_DMM_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) +#define OMAP4_EMIF1_CLKCTRL OMAP4_CLKCTRL_INDEX(0x30) +#define OMAP4_EMIF2_CLKCTRL OMAP4_CLKCTRL_INDEX(0x38) + +/* d2d clocks */ +#define OMAP4_C2C_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) + +/* l4_cfg clocks */ +#define OMAP4_L4_CFG_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) +#define OMAP4_SPINLOCK_CLKCTRL OMAP4_CLKCTRL_INDEX(0x28) +#define OMAP4_MAILBOX_CLKCTRL OMAP4_CLKCTRL_INDEX(0x30) + +/* l3_instr clocks */ +#define OMAP4_L3_MAIN_3_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) +#define OMAP4_L3_INSTR_CLKCTRL OMAP4_CLKCTRL_INDEX(0x28) +#define OMAP4_OCP_WP_NOC_CLKCTRL OMAP4_CLKCTRL_INDEX(0x40) + +/* ivahd clocks */ +#define OMAP4_IVA_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) +#define OMAP4_SL2IF_CLKCTRL OMAP4_CLKCTRL_INDEX(0x28) + +/* iss clocks */ +#define OMAP4_ISS_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) +#define OMAP4_FDIF_CLKCTRL OMAP4_CLKCTRL_INDEX(0x28) + +/* l3_dss clocks */ +#define OMAP4_DSS_CORE_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) + +/* l3_gfx clocks */ +#define OMAP4_GPU_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) + +/* l3_init clocks */ +#define OMAP4_MMC1_CLKCTRL OMAP4_CLKCTRL_INDEX(0x28) +#define OMAP4_MMC2_CLKCTRL OMAP4_CLKCTRL_INDEX(0x30) +#define OMAP4_HSI_CLKCTRL OMAP4_CLKCTRL_INDEX(0x38) +#define OMAP4_USB_HOST_HS_CLKCTRL OMAP4_CLKCTRL_INDEX(0x58) +#define OMAP4_USB_OTG_HS_CLKCTRL OMAP4_CLKCTRL_INDEX(0x60) +#define OMAP4_USB_TLL_HS_CLKCTRL OMAP4_CLKCTRL_INDEX(0x68) +#define OMAP4_USB_HOST_FS_CLKCTRL OMAP4_CLKCTRL_INDEX(0xd0) +#define OMAP4_OCP2SCP_USB_PHY_CLKCTRL OMAP4_CLKCTRL_INDEX(0xe0) + +/* l4_per clocks */ +#define OMAP4_TIMER10_CLKCTRL OMAP4_CLKCTRL_INDEX(0x28) +#define OMAP4_TIMER11_CLKCTRL OMAP4_CLKCTRL_INDEX(0x30) +#define OMAP4_TIMER2_CLKCTRL OMAP4_CLKCTRL_INDEX(0x38) +#define OMAP4_TIMER3_CLKCTRL OMAP4_CLKCTRL_INDEX(0x40) +#define OMAP4_TIMER4_CLKCTRL OMAP4_CLKCTRL_INDEX(0x48) +#define OMAP4_TIMER9_CLKCTRL OMAP4_CLKCTRL_INDEX(0x50) +#define OMAP4_ELM_CLKCTRL OMAP4_CLKCTRL_INDEX(0x58) +#define OMAP4_GPIO2_CLKCTRL OMAP4_CLKCTRL_INDEX(0x60) +#define OMAP4_GPIO3_CLKCTRL OMAP4_CLKCTRL_INDEX(0x68) +#define OMAP4_GPIO4_CLKCTRL OMAP4_CLKCTRL_INDEX(0x70) +#define OMAP4_GPIO5_CLKCTRL OMAP4_CLKCTRL_INDEX(0x78) +#define OMAP4_GPIO6_CLKCTRL OMAP4_CLKCTRL_INDEX(0x80) +#define OMAP4_HDQ1W_CLKCTRL OMAP4_CLKCTRL_INDEX(0x88) +#define OMAP4_I2C1_CLKCTRL OMAP4_CLKCTRL_INDEX(0xa0) +#define OMAP4_I2C2_CLKCTRL OMAP4_CLKCTRL_INDEX(0xa8) +#define OMAP4_I2C3_CLKCTRL OMAP4_CLKCTRL_INDEX(0xb0) +#define OMAP4_I2C4_CLKCTRL OMAP4_CLKCTRL_INDEX(0xb8) +#define OMAP4_L4_PER_CLKCTRL OMAP4_CLKCTRL_INDEX(0xc0) +#define OMAP4_MCBSP4_CLKCTRL OMAP4_CLKCTRL_INDEX(0xe0) +#define OMAP4_MCSPI1_CLKCTRL OMAP4_CLKCTRL_INDEX(0xf0) +#define OMAP4_MCSPI2_CLKCTRL OMAP4_CLKCTRL_INDEX(0xf8) +#define OMAP4_MCSPI3_CLKCTRL OMAP4_CLKCTRL_INDEX(0x100) +#define OMAP4_MCSPI4_CLKCTRL OMAP4_CLKCTRL_INDEX(0x108) +#define OMAP4_MMC3_CLKCTRL OMAP4_CLKCTRL_INDEX(0x120) +#define OMAP4_MMC4_CLKCTRL OMAP4_CLKCTRL_INDEX(0x128) +#define OMAP4_SLIMBUS2_CLKCTRL OMAP4_CLKCTRL_INDEX(0x138) +#define OMAP4_UART1_CLKCTRL OMAP4_CLKCTRL_INDEX(0x140) +#define OMAP4_UART2_CLKCTRL OMAP4_CLKCTRL_INDEX(0x148) +#define OMAP4_UART3_CLKCTRL OMAP4_CLKCTRL_INDEX(0x150) +#define OMAP4_UART4_CLKCTRL OMAP4_CLKCTRL_INDEX(0x158) +#define OMAP4_MMC5_CLKCTRL OMAP4_CLKCTRL_INDEX(0x160) + +/* l4_wkup clocks */ +#define OMAP4_L4_WKUP_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) +#define OMAP4_WD_TIMER2_CLKCTRL OMAP4_CLKCTRL_INDEX(0x30) +#define OMAP4_GPIO1_CLKCTRL OMAP4_CLKCTRL_INDEX(0x38) +#define OMAP4_TIMER1_CLKCTRL OMAP4_CLKCTRL_INDEX(0x40) +#define OMAP4_COUNTER_32K_CLKCTRL OMAP4_CLKCTRL_INDEX(0x50) +#define OMAP4_KBD_CLKCTRL OMAP4_CLKCTRL_INDEX(0x78) + +/* emu_sys clocks */ +#define OMAP4_DEBUGSS_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) + +#endif diff --git a/include/dt-bindings/clock/omap5.h b/include/dt-bindings/clock/omap5.h new file mode 100644 index 000000000..f51821a91 --- /dev/null +++ b/include/dt-bindings/clock/omap5.h @@ -0,0 +1,118 @@ +/* + * Copyright 2017 Texas Instruments, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __DT_BINDINGS_CLK_OMAP5_H +#define __DT_BINDINGS_CLK_OMAP5_H + +#define OMAP5_CLKCTRL_OFFSET 0x20 +#define OMAP5_CLKCTRL_INDEX(offset) ((offset) - OMAP5_CLKCTRL_OFFSET) + +/* mpu clocks */ +#define OMAP5_MPU_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + +/* dsp clocks */ +#define OMAP5_MMU_DSP_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + +/* abe clocks */ +#define OMAP5_L4_ABE_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) +#define OMAP5_MCPDM_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30) +#define OMAP5_DMIC_CLKCTRL OMAP5_CLKCTRL_INDEX(0x38) +#define OMAP5_MCBSP1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x48) +#define OMAP5_MCBSP2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x50) +#define OMAP5_MCBSP3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x58) +#define OMAP5_TIMER5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x68) +#define OMAP5_TIMER6_CLKCTRL OMAP5_CLKCTRL_INDEX(0x70) +#define OMAP5_TIMER7_CLKCTRL OMAP5_CLKCTRL_INDEX(0x78) +#define OMAP5_TIMER8_CLKCTRL OMAP5_CLKCTRL_INDEX(0x80) + +/* l3main1 clocks */ +#define OMAP5_L3_MAIN_1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + +/* l3main2 clocks */ +#define OMAP5_L3_MAIN_2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + +/* ipu clocks */ +#define OMAP5_MMU_IPU_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + +/* dma clocks */ +#define OMAP5_DMA_SYSTEM_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + +/* emif clocks */ +#define OMAP5_DMM_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) +#define OMAP5_EMIF1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30) +#define OMAP5_EMIF2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x38) + +/* l4cfg clocks */ +#define OMAP5_L4_CFG_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) +#define OMAP5_SPINLOCK_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28) +#define OMAP5_MAILBOX_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30) + +/* l3instr clocks */ +#define OMAP5_L3_MAIN_3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) +#define OMAP5_L3_INSTR_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28) + +/* l4per clocks */ +#define OMAP5_TIMER10_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28) +#define OMAP5_TIMER11_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30) +#define OMAP5_TIMER2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x38) +#define OMAP5_TIMER3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x40) +#define OMAP5_TIMER4_CLKCTRL OMAP5_CLKCTRL_INDEX(0x48) +#define OMAP5_TIMER9_CLKCTRL OMAP5_CLKCTRL_INDEX(0x50) +#define OMAP5_GPIO2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x60) +#define OMAP5_GPIO3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x68) +#define OMAP5_GPIO4_CLKCTRL OMAP5_CLKCTRL_INDEX(0x70) +#define OMAP5_GPIO5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x78) +#define OMAP5_GPIO6_CLKCTRL OMAP5_CLKCTRL_INDEX(0x80) +#define OMAP5_I2C1_CLKCTRL OMAP5_CLKCTRL_INDEX(0xa0) +#define OMAP5_I2C2_CLKCTRL OMAP5_CLKCTRL_INDEX(0xa8) +#define OMAP5_I2C3_CLKCTRL OMAP5_CLKCTRL_INDEX(0xb0) +#define OMAP5_I2C4_CLKCTRL OMAP5_CLKCTRL_INDEX(0xb8) +#define OMAP5_L4_PER_CLKCTRL OMAP5_CLKCTRL_INDEX(0xc0) +#define OMAP5_MCSPI1_CLKCTRL OMAP5_CLKCTRL_INDEX(0xf0) +#define OMAP5_MCSPI2_CLKCTRL OMAP5_CLKCTRL_INDEX(0xf8) +#define OMAP5_MCSPI3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x100) +#define OMAP5_MCSPI4_CLKCTRL OMAP5_CLKCTRL_INDEX(0x108) +#define OMAP5_GPIO7_CLKCTRL OMAP5_CLKCTRL_INDEX(0x110) +#define OMAP5_GPIO8_CLKCTRL OMAP5_CLKCTRL_INDEX(0x118) +#define OMAP5_MMC3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x120) +#define OMAP5_MMC4_CLKCTRL OMAP5_CLKCTRL_INDEX(0x128) +#define OMAP5_UART1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x140) +#define OMAP5_UART2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x148) +#define OMAP5_UART3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x150) +#define OMAP5_UART4_CLKCTRL OMAP5_CLKCTRL_INDEX(0x158) +#define OMAP5_MMC5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x160) +#define OMAP5_I2C5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x168) +#define OMAP5_UART5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x170) +#define OMAP5_UART6_CLKCTRL OMAP5_CLKCTRL_INDEX(0x178) + +/* dss clocks */ +#define OMAP5_DSS_CORE_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + +/* l3init clocks */ +#define OMAP5_MMC1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28) +#define OMAP5_MMC2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30) +#define OMAP5_USB_HOST_HS_CLKCTRL OMAP5_CLKCTRL_INDEX(0x58) +#define OMAP5_USB_TLL_HS_CLKCTRL OMAP5_CLKCTRL_INDEX(0x68) +#define OMAP5_SATA_CLKCTRL OMAP5_CLKCTRL_INDEX(0x88) +#define OMAP5_OCP2SCP1_CLKCTRL OMAP5_CLKCTRL_INDEX(0xe0) +#define OMAP5_OCP2SCP3_CLKCTRL OMAP5_CLKCTRL_INDEX(0xe8) +#define OMAP5_USB_OTG_SS_CLKCTRL OMAP5_CLKCTRL_INDEX(0xf0) + +/* wkupaon clocks */ +#define OMAP5_L4_WKUP_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) +#define OMAP5_WD_TIMER2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30) +#define OMAP5_GPIO1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x38) +#define OMAP5_TIMER1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x40) +#define OMAP5_COUNTER_32K_CLKCTRL OMAP5_CLKCTRL_INDEX(0x50) +#define OMAP5_KBD_CLKCTRL OMAP5_CLKCTRL_INDEX(0x78) + +#endif diff --git a/include/dt-bindings/clock/oxsemi,ox810se.h b/include/dt-bindings/clock/oxsemi,ox810se.h new file mode 100644 index 000000000..d5facb5e8 --- /dev/null +++ b/include/dt-bindings/clock/oxsemi,ox810se.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2016 Neil Armstrong + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef DT_CLOCK_OXSEMI_OX810SE_H +#define DT_CLOCK_OXSEMI_OX810SE_H + +#define CLK_810_LEON 0 +#define CLK_810_DMA_SGDMA 1 +#define CLK_810_CIPHER 2 +#define CLK_810_SATA 3 +#define CLK_810_AUDIO 4 +#define CLK_810_USBMPH 5 +#define CLK_810_ETHA 6 +#define CLK_810_PCIEA 7 +#define CLK_810_NAND 8 + +#endif /* DT_CLOCK_OXSEMI_OX810SE_H */ diff --git a/include/dt-bindings/clock/oxsemi,ox820.h b/include/dt-bindings/clock/oxsemi,ox820.h new file mode 100644 index 000000000..f661ecc8d --- /dev/null +++ b/include/dt-bindings/clock/oxsemi,ox820.h @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2016 Neil Armstrong + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef DT_CLOCK_OXSEMI_OX820_H +#define DT_CLOCK_OXSEMI_OX820_H + +/* PLLs */ +#define CLK_820_PLLA 0 +#define CLK_820_PLLB 1 + +/* Gate Clocks */ +#define CLK_820_LEON 2 +#define CLK_820_DMA_SGDMA 3 +#define CLK_820_CIPHER 4 +#define CLK_820_SD 5 +#define CLK_820_SATA 6 +#define CLK_820_AUDIO 7 +#define CLK_820_USBMPH 8 +#define CLK_820_ETHA 9 +#define CLK_820_PCIEA 10 +#define CLK_820_NAND 11 +#define CLK_820_PCIEB 12 +#define CLK_820_ETHB 13 +#define CLK_820_REF600 14 +#define CLK_820_USBDEV 15 + +#endif /* DT_CLOCK_OXSEMI_OX820_H */ diff --git a/include/dt-bindings/clock/pistachio-clk.h b/include/dt-bindings/clock/pistachio-clk.h new file mode 100644 index 000000000..039f83fac --- /dev/null +++ b/include/dt-bindings/clock/pistachio-clk.h @@ -0,0 +1,183 @@ +/* + * Copyright (C) 2014 Google, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +#ifndef _DT_BINDINGS_CLOCK_PISTACHIO_H +#define _DT_BINDINGS_CLOCK_PISTACHIO_H + +/* PLLs */ +#define CLK_MIPS_PLL 0 +#define CLK_AUDIO_PLL 1 +#define CLK_RPU_V_PLL 2 +#define CLK_RPU_L_PLL 3 +#define CLK_SYS_PLL 4 +#define CLK_WIFI_PLL 5 +#define CLK_BT_PLL 6 + +/* Fixed-factor clocks */ +#define CLK_WIFI_DIV4 16 +#define CLK_WIFI_DIV8 17 + +/* Gate clocks */ +#define CLK_MIPS 32 +#define CLK_AUDIO_IN 33 +#define CLK_AUDIO 34 +#define CLK_I2S 35 +#define CLK_SPDIF 36 +#define CLK_AUDIO_DAC 37 +#define CLK_RPU_V 38 +#define CLK_RPU_L 39 +#define CLK_RPU_SLEEP 40 +#define CLK_WIFI_PLL_GATE 41 +#define CLK_RPU_CORE 42 +#define CLK_WIFI_ADC 43 +#define CLK_WIFI_DAC 44 +#define CLK_USB_PHY 45 +#define CLK_ENET_IN 46 +#define CLK_ENET 47 +#define CLK_UART0 48 +#define CLK_UART1 49 +#define CLK_PERIPH_SYS 50 +#define CLK_SPI0 51 +#define CLK_SPI1 52 +#define CLK_EVENT_TIMER 53 +#define CLK_AUX_ADC_INTERNAL 54 +#define CLK_AUX_ADC 55 +#define CLK_SD_HOST 56 +#define CLK_BT 57 +#define CLK_BT_DIV4 58 +#define CLK_BT_DIV8 59 +#define CLK_BT_1MHZ 60 + +/* Divider clocks */ +#define CLK_MIPS_INTERNAL_DIV 64 +#define CLK_MIPS_DIV 65 +#define CLK_AUDIO_DIV 66 +#define CLK_I2S_DIV 67 +#define CLK_SPDIF_DIV 68 +#define CLK_AUDIO_DAC_DIV 69 +#define CLK_RPU_V_DIV 70 +#define CLK_RPU_L_DIV 71 +#define CLK_RPU_SLEEP_DIV 72 +#define CLK_RPU_CORE_DIV 73 +#define CLK_USB_PHY_DIV 74 +#define CLK_ENET_DIV 75 +#define CLK_UART0_INTERNAL_DIV 76 +#define CLK_UART0_DIV 77 +#define CLK_UART1_INTERNAL_DIV 78 +#define CLK_UART1_DIV 79 +#define CLK_SYS_INTERNAL_DIV 80 +#define CLK_SPI0_INTERNAL_DIV 81 +#define CLK_SPI0_DIV 82 +#define CLK_SPI1_INTERNAL_DIV 83 +#define CLK_SPI1_DIV 84 +#define CLK_EVENT_TIMER_INTERNAL_DIV 85 +#define CLK_EVENT_TIMER_DIV 86 +#define CLK_AUX_ADC_INTERNAL_DIV 87 +#define CLK_AUX_ADC_DIV 88 +#define CLK_SD_HOST_DIV 89 +#define CLK_BT_DIV 90 +#define CLK_BT_DIV4_DIV 91 +#define CLK_BT_DIV8_DIV 92 +#define CLK_BT_1MHZ_INTERNAL_DIV 93 +#define CLK_BT_1MHZ_DIV 94 + +/* Mux clocks */ +#define CLK_AUDIO_REF_MUX 96 +#define CLK_MIPS_PLL_MUX 97 +#define CLK_AUDIO_PLL_MUX 98 +#define CLK_AUDIO_MUX 99 +#define CLK_RPU_V_PLL_MUX 100 +#define CLK_RPU_L_PLL_MUX 101 +#define CLK_RPU_L_MUX 102 +#define CLK_WIFI_PLL_MUX 103 +#define CLK_WIFI_DIV4_MUX 104 +#define CLK_WIFI_DIV8_MUX 105 +#define CLK_RPU_CORE_MUX 106 +#define CLK_SYS_PLL_MUX 107 +#define CLK_ENET_MUX 108 +#define CLK_EVENT_TIMER_MUX 109 +#define CLK_SD_HOST_MUX 110 +#define CLK_BT_PLL_MUX 111 +#define CLK_DEBUG_MUX 112 + +#define CLK_NR_CLKS 113 + +/* Peripheral gate clocks */ +#define PERIPH_CLK_SYS 0 +#define PERIPH_CLK_SYS_BUS 1 +#define PERIPH_CLK_DDR 2 +#define PERIPH_CLK_ROM 3 +#define PERIPH_CLK_COUNTER_FAST 4 +#define PERIPH_CLK_COUNTER_SLOW 5 +#define PERIPH_CLK_IR 6 +#define PERIPH_CLK_WD 7 +#define PERIPH_CLK_PDM 8 +#define PERIPH_CLK_PWM 9 +#define PERIPH_CLK_I2C0 10 +#define PERIPH_CLK_I2C1 11 +#define PERIPH_CLK_I2C2 12 +#define PERIPH_CLK_I2C3 13 + +/* Peripheral divider clocks */ +#define PERIPH_CLK_ROM_DIV 32 +#define PERIPH_CLK_COUNTER_FAST_DIV 33 +#define PERIPH_CLK_COUNTER_SLOW_PRE_DIV 34 +#define PERIPH_CLK_COUNTER_SLOW_DIV 35 +#define PERIPH_CLK_IR_PRE_DIV 36 +#define PERIPH_CLK_IR_DIV 37 +#define PERIPH_CLK_WD_PRE_DIV 38 +#define PERIPH_CLK_WD_DIV 39 +#define PERIPH_CLK_PDM_PRE_DIV 40 +#define PERIPH_CLK_PDM_DIV 41 +#define PERIPH_CLK_PWM_PRE_DIV 42 +#define PERIPH_CLK_PWM_DIV 43 +#define PERIPH_CLK_I2C0_PRE_DIV 44 +#define PERIPH_CLK_I2C0_DIV 45 +#define PERIPH_CLK_I2C1_PRE_DIV 46 +#define PERIPH_CLK_I2C1_DIV 47 +#define PERIPH_CLK_I2C2_PRE_DIV 48 +#define PERIPH_CLK_I2C2_DIV 49 +#define PERIPH_CLK_I2C3_PRE_DIV 50 +#define PERIPH_CLK_I2C3_DIV 51 + +#define PERIPH_CLK_NR_CLKS 52 + +/* System gate clocks */ +#define SYS_CLK_I2C0 0 +#define SYS_CLK_I2C1 1 +#define SYS_CLK_I2C2 2 +#define SYS_CLK_I2C3 3 +#define SYS_CLK_I2S_IN 4 +#define SYS_CLK_PAUD_OUT 5 +#define SYS_CLK_SPDIF_OUT 6 +#define SYS_CLK_SPI0_MASTER 7 +#define SYS_CLK_SPI0_SLAVE 8 +#define SYS_CLK_PWM 9 +#define SYS_CLK_UART0 10 +#define SYS_CLK_UART1 11 +#define SYS_CLK_SPI1 12 +#define SYS_CLK_MDC 13 +#define SYS_CLK_SD_HOST 14 +#define SYS_CLK_ENET 15 +#define SYS_CLK_IR 16 +#define SYS_CLK_WD 17 +#define SYS_CLK_TIMER 18 +#define SYS_CLK_I2S_OUT 24 +#define SYS_CLK_SPDIF_IN 25 +#define SYS_CLK_EVENT_TIMER 26 +#define SYS_CLK_HASH 27 + +#define SYS_CLK_NR_CLKS 28 + +/* Gates for external input clocks */ +#define EXT_CLK_AUDIO_IN 0 +#define EXT_CLK_ENET_IN 1 + +#define EXT_CLK_NR_CLKS 2 + +#endif /* _DT_BINDINGS_CLOCK_PISTACHIO_H */ diff --git a/include/dt-bindings/clock/px30-cru.h b/include/dt-bindings/clock/px30-cru.h new file mode 100644 index 000000000..00101479f --- /dev/null +++ b/include/dt-bindings/clock/px30-cru.h @@ -0,0 +1,389 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_PX30_H +#define _DT_BINDINGS_CLK_ROCKCHIP_PX30_H + +/* core clocks */ +#define PLL_APLL 1 +#define PLL_DPLL 2 +#define PLL_CPLL 3 +#define PLL_NPLL 4 +#define APLL_BOOST_H 5 +#define APLL_BOOST_L 6 +#define ARMCLK 7 + +/* sclk gates (special clocks) */ +#define USB480M 14 +#define SCLK_PDM 15 +#define SCLK_I2S0_TX 16 +#define SCLK_I2S0_TX_OUT 17 +#define SCLK_I2S0_RX 18 +#define SCLK_I2S0_RX_OUT 19 +#define SCLK_I2S1 20 +#define SCLK_I2S1_OUT 21 +#define SCLK_I2S2 22 +#define SCLK_I2S2_OUT 23 +#define SCLK_UART1 24 +#define SCLK_UART2 25 +#define SCLK_UART3 26 +#define SCLK_UART4 27 +#define SCLK_UART5 28 +#define SCLK_I2C0 29 +#define SCLK_I2C1 30 +#define SCLK_I2C2 31 +#define SCLK_I2C3 32 +#define SCLK_I2C4 33 +#define SCLK_PWM0 34 +#define SCLK_PWM1 35 +#define SCLK_SPI0 36 +#define SCLK_SPI1 37 +#define SCLK_TIMER0 38 +#define SCLK_TIMER1 39 +#define SCLK_TIMER2 40 +#define SCLK_TIMER3 41 +#define SCLK_TIMER4 42 +#define SCLK_TIMER5 43 +#define SCLK_TSADC 44 +#define SCLK_SARADC 45 +#define SCLK_OTP 46 +#define SCLK_OTP_USR 47 +#define SCLK_CRYPTO 48 +#define SCLK_CRYPTO_APK 49 +#define SCLK_DDRC 50 +#define SCLK_ISP 51 +#define SCLK_CIF_OUT 52 +#define SCLK_RGA_CORE 53 +#define SCLK_VOPB_PWM 54 +#define SCLK_NANDC 55 +#define SCLK_SDIO 56 +#define SCLK_EMMC 57 +#define SCLK_SFC 58 +#define SCLK_SDMMC 59 +#define SCLK_OTG_ADP 60 +#define SCLK_GMAC_SRC 61 +#define SCLK_GMAC 62 +#define SCLK_GMAC_RX_TX 63 +#define SCLK_MAC_REF 64 +#define SCLK_MAC_REFOUT 65 +#define SCLK_MAC_OUT 66 +#define SCLK_SDMMC_DRV 67 +#define SCLK_SDMMC_SAMPLE 68 +#define SCLK_SDIO_DRV 69 +#define SCLK_SDIO_SAMPLE 70 +#define SCLK_EMMC_DRV 71 +#define SCLK_EMMC_SAMPLE 72 +#define SCLK_GPU 73 +#define SCLK_PVTM 74 +#define SCLK_CORE_VPU 75 +#define SCLK_GMAC_RMII 76 +#define SCLK_UART2_SRC 77 +#define SCLK_NANDC_DIV 78 +#define SCLK_NANDC_DIV50 79 +#define SCLK_SDIO_DIV 80 +#define SCLK_SDIO_DIV50 81 +#define SCLK_EMMC_DIV 82 +#define SCLK_EMMC_DIV50 83 +#define SCLK_DDRCLK 84 +#define SCLK_UART1_SRC 85 + +/* dclk gates */ +#define DCLK_VOPB 150 +#define DCLK_VOPL 151 + +/* aclk gates */ +#define ACLK_GPU 170 +#define ACLK_BUS_PRE 171 +#define ACLK_CRYPTO 172 +#define ACLK_VI_PRE 173 +#define ACLK_VO_PRE 174 +#define ACLK_VPU 175 +#define ACLK_PERI_PRE 176 +#define ACLK_GMAC 178 +#define ACLK_CIF 179 +#define ACLK_ISP 180 +#define ACLK_VOPB 181 +#define ACLK_VOPL 182 +#define ACLK_RGA 183 +#define ACLK_GIC 184 +#define ACLK_DCF 186 +#define ACLK_DMAC 187 +#define ACLK_BUS_SRC 188 +#define ACLK_PERI_SRC 189 + +/* hclk gates */ +#define HCLK_BUS_PRE 240 +#define HCLK_CRYPTO 241 +#define HCLK_VI_PRE 242 +#define HCLK_VO_PRE 243 +#define HCLK_VPU 244 +#define HCLK_PERI_PRE 245 +#define HCLK_MMC_NAND 246 +#define HCLK_SDMMC 247 +#define HCLK_USB 248 +#define HCLK_CIF 249 +#define HCLK_ISP 250 +#define HCLK_VOPB 251 +#define HCLK_VOPL 252 +#define HCLK_RGA 253 +#define HCLK_NANDC 254 +#define HCLK_SDIO 255 +#define HCLK_EMMC 256 +#define HCLK_SFC 257 +#define HCLK_OTG 258 +#define HCLK_HOST 259 +#define HCLK_HOST_ARB 260 +#define HCLK_PDM 261 +#define HCLK_I2S0 262 +#define HCLK_I2S1 263 +#define HCLK_I2S2 264 + +/* pclk gates */ +#define PCLK_BUS_PRE 320 +#define PCLK_DDR 321 +#define PCLK_VO_PRE 322 +#define PCLK_GMAC 323 +#define PCLK_MIPI_DSI 324 +#define PCLK_MIPIDSIPHY 325 +#define PCLK_MIPICSIPHY 326 +#define PCLK_USB_GRF 327 +#define PCLK_DCF 328 +#define PCLK_UART1 329 +#define PCLK_UART2 330 +#define PCLK_UART3 331 +#define PCLK_UART4 332 +#define PCLK_UART5 333 +#define PCLK_I2C0 334 +#define PCLK_I2C1 335 +#define PCLK_I2C2 336 +#define PCLK_I2C3 337 +#define PCLK_I2C4 338 +#define PCLK_PWM0 339 +#define PCLK_PWM1 340 +#define PCLK_SPI0 341 +#define PCLK_SPI1 342 +#define PCLK_SARADC 343 +#define PCLK_TSADC 344 +#define PCLK_TIMER 345 +#define PCLK_OTP_NS 346 +#define PCLK_WDT_NS 347 +#define PCLK_GPIO1 348 +#define PCLK_GPIO2 349 +#define PCLK_GPIO3 350 +#define PCLK_ISP 351 +#define PCLK_CIF 352 +#define PCLK_OTP_PHY 353 + +#define CLK_NR_CLKS (PCLK_OTP_PHY + 1) + +/* pmu-clocks indices */ + +#define PLL_GPLL 1 + +#define SCLK_RTC32K_PMU 4 +#define SCLK_WIFI_PMU 5 +#define SCLK_UART0_PMU 6 +#define SCLK_PVTM_PMU 7 +#define PCLK_PMU_PRE 8 +#define SCLK_REF24M_PMU 9 +#define SCLK_USBPHY_REF 10 +#define SCLK_MIPIDSIPHY_REF 11 + +#define XIN24M_DIV 12 + +#define PCLK_GPIO0_PMU 20 +#define PCLK_UART0_PMU 21 + +#define CLKPMU_NR_CLKS (PCLK_UART0_PMU + 1) + +/* soft-reset indices */ +#define SRST_CORE0_PO 0 +#define SRST_CORE1_PO 1 +#define SRST_CORE2_PO 2 +#define SRST_CORE3_PO 3 +#define SRST_CORE0 4 +#define SRST_CORE1 5 +#define SRST_CORE2 6 +#define SRST_CORE3 7 +#define SRST_CORE0_DBG 8 +#define SRST_CORE1_DBG 9 +#define SRST_CORE2_DBG 10 +#define SRST_CORE3_DBG 11 +#define SRST_TOPDBG 12 +#define SRST_CORE_NOC 13 +#define SRST_STRC_A 14 +#define SRST_L2C 15 + +#define SRST_DAP 16 +#define SRST_CORE_PVTM 17 +#define SRST_GPU 18 +#define SRST_GPU_NIU 19 +#define SRST_UPCTL2 20 +#define SRST_UPCTL2_A 21 +#define SRST_UPCTL2_P 22 +#define SRST_MSCH 23 +#define SRST_MSCH_P 24 +#define SRST_DDRMON_P 25 +#define SRST_DDRSTDBY_P 26 +#define SRST_DDRSTDBY 27 +#define SRST_DDRGRF_p 28 +#define SRST_AXI_SPLIT_A 29 +#define SRST_AXI_CMD_A 30 +#define SRST_AXI_CMD_P 31 + +#define SRST_DDRPHY 32 +#define SRST_DDRPHYDIV 33 +#define SRST_DDRPHY_P 34 +#define SRST_VPU_A 36 +#define SRST_VPU_NIU_A 37 +#define SRST_VPU_H 38 +#define SRST_VPU_NIU_H 39 +#define SRST_VI_NIU_A 40 +#define SRST_VI_NIU_H 41 +#define SRST_ISP_H 42 +#define SRST_ISP 43 +#define SRST_CIF_A 44 +#define SRST_CIF_H 45 +#define SRST_CIF_PCLKIN 46 +#define SRST_MIPICSIPHY_P 47 + +#define SRST_VO_NIU_A 48 +#define SRST_VO_NIU_H 49 +#define SRST_VO_NIU_P 50 +#define SRST_VOPB_A 51 +#define SRST_VOPB_H 52 +#define SRST_VOPB 53 +#define SRST_PWM_VOPB 54 +#define SRST_VOPL_A 55 +#define SRST_VOPL_H 56 +#define SRST_VOPL 57 +#define SRST_RGA_A 58 +#define SRST_RGA_H 59 +#define SRST_RGA 60 +#define SRST_MIPIDSI_HOST_P 61 +#define SRST_MIPIDSIPHY_P 62 +#define SRST_VPU_CORE 63 + +#define SRST_PERI_NIU_A 64 +#define SRST_USB_NIU_H 65 +#define SRST_USB2OTG_H 66 +#define SRST_USB2OTG 67 +#define SRST_USB2OTG_ADP 68 +#define SRST_USB2HOST_H 69 +#define SRST_USB2HOST_ARB_H 70 +#define SRST_USB2HOST_AUX_H 71 +#define SRST_USB2HOST_EHCI 72 +#define SRST_USB2HOST 73 +#define SRST_USBPHYPOR 74 +#define SRST_USBPHY_OTG_PORT 75 +#define SRST_USBPHY_HOST_PORT 76 +#define SRST_USBPHY_GRF 77 +#define SRST_CPU_BOOST_P 78 +#define SRST_CPU_BOOST 79 + +#define SRST_MMC_NAND_NIU_H 80 +#define SRST_SDIO_H 81 +#define SRST_EMMC_H 82 +#define SRST_SFC_H 83 +#define SRST_SFC 84 +#define SRST_SDCARD_NIU_H 85 +#define SRST_SDMMC_H 86 +#define SRST_NANDC_H 89 +#define SRST_NANDC 90 +#define SRST_GMAC_NIU_A 92 +#define SRST_GMAC_NIU_P 93 +#define SRST_GMAC_A 94 + +#define SRST_PMU_NIU_P 96 +#define SRST_PMU_SGRF_P 97 +#define SRST_PMU_GRF_P 98 +#define SRST_PMU 99 +#define SRST_PMU_MEM_P 100 +#define SRST_PMU_GPIO0_P 101 +#define SRST_PMU_UART0_P 102 +#define SRST_PMU_CRU_P 103 +#define SRST_PMU_PVTM 104 +#define SRST_PMU_UART 105 +#define SRST_PMU_NIU_H 106 +#define SRST_PMU_DDR_FAIL_SAVE 107 +#define SRST_PMU_CORE_PERF_A 108 +#define SRST_PMU_CORE_GRF_P 109 +#define SRST_PMU_GPU_PERF_A 110 +#define SRST_PMU_GPU_GRF_P 111 + +#define SRST_CRYPTO_NIU_A 112 +#define SRST_CRYPTO_NIU_H 113 +#define SRST_CRYPTO_A 114 +#define SRST_CRYPTO_H 115 +#define SRST_CRYPTO 116 +#define SRST_CRYPTO_APK 117 +#define SRST_BUS_NIU_H 120 +#define SRST_USB_NIU_P 121 +#define SRST_BUS_TOP_NIU_P 122 +#define SRST_INTMEM_A 123 +#define SRST_GIC_A 124 +#define SRST_ROM_H 126 +#define SRST_DCF_A 127 + +#define SRST_DCF_P 128 +#define SRST_PDM_H 129 +#define SRST_PDM 130 +#define SRST_I2S0_H 131 +#define SRST_I2S0_TX 132 +#define SRST_I2S1_H 133 +#define SRST_I2S1 134 +#define SRST_I2S2_H 135 +#define SRST_I2S2 136 +#define SRST_UART1_P 137 +#define SRST_UART1 138 +#define SRST_UART2_P 139 +#define SRST_UART2 140 +#define SRST_UART3_P 141 +#define SRST_UART3 142 +#define SRST_UART4_P 143 + +#define SRST_UART4 144 +#define SRST_UART5_P 145 +#define SRST_UART5 146 +#define SRST_I2C0_P 147 +#define SRST_I2C0 148 +#define SRST_I2C1_P 149 +#define SRST_I2C1 150 +#define SRST_I2C2_P 151 +#define SRST_I2C2 152 +#define SRST_I2C3_P 153 +#define SRST_I2C3 154 +#define SRST_PWM0_P 157 +#define SRST_PWM0 158 +#define SRST_PWM1_P 159 + +#define SRST_PWM1 160 +#define SRST_SPI0_P 161 +#define SRST_SPI0 162 +#define SRST_SPI1_P 163 +#define SRST_SPI1 164 +#define SRST_SARADC_P 165 +#define SRST_SARADC 166 +#define SRST_TSADC_P 167 +#define SRST_TSADC 168 +#define SRST_TIMER_P 169 +#define SRST_TIMER0 170 +#define SRST_TIMER1 171 +#define SRST_TIMER2 172 +#define SRST_TIMER3 173 +#define SRST_TIMER4 174 +#define SRST_TIMER5 175 + +#define SRST_OTP_NS_P 176 +#define SRST_OTP_NS_SBPI 177 +#define SRST_OTP_NS_USR 178 +#define SRST_OTP_PHY_P 179 +#define SRST_OTP_PHY 180 +#define SRST_WDT_NS_P 181 +#define SRST_GPIO1_P 182 +#define SRST_GPIO2_P 183 +#define SRST_GPIO3_P 184 +#define SRST_SGRF_P 185 +#define SRST_GRF_P 186 +#define SRST_I2S0_RX 191 + +#endif diff --git a/include/dt-bindings/clock/pxa-clock.h b/include/dt-bindings/clock/pxa-clock.h new file mode 100644 index 000000000..0b0fd2b01 --- /dev/null +++ b/include/dt-bindings/clock/pxa-clock.h @@ -0,0 +1,78 @@ +/* + * Inspired by original work from pxa2xx-regs.h by Nicolas Pitre + * Copyright (C) 2014 Robert Jarzmik + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __DT_BINDINGS_CLOCK_PXA2XX_H__ +#define __DT_BINDINGS_CLOCK_PXA2XX_H__ + +#define CLK_NONE 0 +#define CLK_1WIRE 1 +#define CLK_AC97 2 +#define CLK_AC97CONF 3 +#define CLK_ASSP 4 +#define CLK_BOOT 5 +#define CLK_BTUART 6 +#define CLK_CAMERA 7 +#define CLK_CIR 8 +#define CLK_CORE 9 +#define CLK_DMC 10 +#define CLK_FFUART 11 +#define CLK_FICP 12 +#define CLK_GPIO 13 +#define CLK_HSIO2 14 +#define CLK_HWUART 15 +#define CLK_I2C 16 +#define CLK_I2S 17 +#define CLK_IM 18 +#define CLK_INC 19 +#define CLK_ISC 20 +#define CLK_KEYPAD 21 +#define CLK_LCD 22 +#define CLK_MEMC 23 +#define CLK_MEMSTK 24 +#define CLK_MINI_IM 25 +#define CLK_MINI_LCD 26 +#define CLK_MMC 27 +#define CLK_MMC1 28 +#define CLK_MMC2 29 +#define CLK_MMC3 30 +#define CLK_MSL 31 +#define CLK_MSL0 32 +#define CLK_MVED 33 +#define CLK_NAND 34 +#define CLK_NSSP 35 +#define CLK_OSTIMER 36 +#define CLK_PWM0 37 +#define CLK_PWM1 38 +#define CLK_PWM2 39 +#define CLK_PWM3 40 +#define CLK_PWRI2C 41 +#define CLK_PXA300_GCU 42 +#define CLK_PXA320_GCU 43 +#define CLK_SMC 44 +#define CLK_SSP 45 +#define CLK_SSP1 46 +#define CLK_SSP2 47 +#define CLK_SSP3 48 +#define CLK_SSP4 49 +#define CLK_STUART 50 +#define CLK_TOUCH 51 +#define CLK_TPM 52 +#define CLK_UDC 53 +#define CLK_USB 54 +#define CLK_USB2 55 +#define CLK_USBH 56 +#define CLK_USBHOST 57 +#define CLK_USIM 58 +#define CLK_USIM1 59 +#define CLK_USMI0 60 +#define CLK_OSC32k768 61 +#define CLK_MAX 62 + +#endif diff --git a/include/dt-bindings/clock/qcom,dispcc-sdm845.h b/include/dt-bindings/clock/qcom,dispcc-sdm845.h new file mode 100644 index 000000000..11eed4bc9 --- /dev/null +++ b/include/dt-bindings/clock/qcom,dispcc-sdm845.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_SDM_DISP_CC_SDM845_H +#define _DT_BINDINGS_CLK_SDM_DISP_CC_SDM845_H + +/* DISP_CC clock registers */ +#define DISP_CC_MDSS_AHB_CLK 0 +#define DISP_CC_MDSS_AXI_CLK 1 +#define DISP_CC_MDSS_BYTE0_CLK 2 +#define DISP_CC_MDSS_BYTE0_CLK_SRC 3 +#define DISP_CC_MDSS_BYTE0_INTF_CLK 4 +#define DISP_CC_MDSS_BYTE1_CLK 5 +#define DISP_CC_MDSS_BYTE1_CLK_SRC 6 +#define DISP_CC_MDSS_BYTE1_INTF_CLK 7 +#define DISP_CC_MDSS_ESC0_CLK 8 +#define DISP_CC_MDSS_ESC0_CLK_SRC 9 +#define DISP_CC_MDSS_ESC1_CLK 10 +#define DISP_CC_MDSS_ESC1_CLK_SRC 11 +#define DISP_CC_MDSS_MDP_CLK 12 +#define DISP_CC_MDSS_MDP_CLK_SRC 13 +#define DISP_CC_MDSS_MDP_LUT_CLK 14 +#define DISP_CC_MDSS_PCLK0_CLK 15 +#define DISP_CC_MDSS_PCLK0_CLK_SRC 16 +#define DISP_CC_MDSS_PCLK1_CLK 17 +#define DISP_CC_MDSS_PCLK1_CLK_SRC 18 +#define DISP_CC_MDSS_ROT_CLK 19 +#define DISP_CC_MDSS_ROT_CLK_SRC 20 +#define DISP_CC_MDSS_RSCC_AHB_CLK 21 +#define DISP_CC_MDSS_RSCC_VSYNC_CLK 22 +#define DISP_CC_MDSS_VSYNC_CLK 23 +#define DISP_CC_MDSS_VSYNC_CLK_SRC 24 +#define DISP_CC_PLL0 25 +#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 26 +#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 27 + +/* DISP_CC Reset */ +#define DISP_CC_MDSS_RSCC_BCR 0 + +/* DISP_CC GDSCR */ +#define MDSS_GDSC 0 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-apq8084.h b/include/dt-bindings/clock/qcom,gcc-apq8084.h new file mode 100644 index 000000000..5aa7ebeae --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-apq8084.h @@ -0,0 +1,357 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_APQ_GCC_8084_H +#define _DT_BINDINGS_CLK_APQ_GCC_8084_H + +#define GPLL0 0 +#define GPLL0_VOTE 1 +#define GPLL1 2 +#define GPLL1_VOTE 3 +#define GPLL2 4 +#define GPLL2_VOTE 5 +#define GPLL3 6 +#define GPLL3_VOTE 7 +#define GPLL4 8 +#define GPLL4_VOTE 9 +#define CONFIG_NOC_CLK_SRC 10 +#define PERIPH_NOC_CLK_SRC 11 +#define SYSTEM_NOC_CLK_SRC 12 +#define BLSP_UART_SIM_CLK_SRC 13 +#define QDSS_TSCTR_CLK_SRC 14 +#define UFS_AXI_CLK_SRC 15 +#define RPM_CLK_SRC 16 +#define KPSS_AHB_CLK_SRC 17 +#define QDSS_AT_CLK_SRC 18 +#define BIMC_DDR_CLK_SRC 19 +#define USB30_MASTER_CLK_SRC 20 +#define USB30_SEC_MASTER_CLK_SRC 21 +#define USB_HSIC_AHB_CLK_SRC 22 +#define MMSS_BIMC_GFX_CLK_SRC 23 +#define QDSS_STM_CLK_SRC 24 +#define ACC_CLK_SRC 25 +#define SEC_CTRL_CLK_SRC 26 +#define BLSP1_QUP1_I2C_APPS_CLK_SRC 27 +#define BLSP1_QUP1_SPI_APPS_CLK_SRC 28 +#define BLSP1_QUP2_I2C_APPS_CLK_SRC 29 +#define BLSP1_QUP2_SPI_APPS_CLK_SRC 30 +#define BLSP1_QUP3_I2C_APPS_CLK_SRC 31 +#define BLSP1_QUP3_SPI_APPS_CLK_SRC 32 +#define BLSP1_QUP4_I2C_APPS_CLK_SRC 33 +#define BLSP1_QUP4_SPI_APPS_CLK_SRC 34 +#define BLSP1_QUP5_I2C_APPS_CLK_SRC 35 +#define BLSP1_QUP5_SPI_APPS_CLK_SRC 36 +#define BLSP1_QUP6_I2C_APPS_CLK_SRC 37 +#define BLSP1_QUP6_SPI_APPS_CLK_SRC 38 +#define BLSP1_UART1_APPS_CLK_SRC 39 +#define BLSP1_UART2_APPS_CLK_SRC 40 +#define BLSP1_UART3_APPS_CLK_SRC 41 +#define BLSP1_UART4_APPS_CLK_SRC 42 +#define BLSP1_UART5_APPS_CLK_SRC 43 +#define BLSP1_UART6_APPS_CLK_SRC 44 +#define BLSP2_QUP1_I2C_APPS_CLK_SRC 45 +#define BLSP2_QUP1_SPI_APPS_CLK_SRC 46 +#define BLSP2_QUP2_I2C_APPS_CLK_SRC 47 +#define BLSP2_QUP2_SPI_APPS_CLK_SRC 48 +#define BLSP2_QUP3_I2C_APPS_CLK_SRC 49 +#define BLSP2_QUP3_SPI_APPS_CLK_SRC 50 +#define BLSP2_QUP4_I2C_APPS_CLK_SRC 51 +#define BLSP2_QUP4_SPI_APPS_CLK_SRC 52 +#define BLSP2_QUP5_I2C_APPS_CLK_SRC 53 +#define BLSP2_QUP5_SPI_APPS_CLK_SRC 54 +#define BLSP2_QUP6_I2C_APPS_CLK_SRC 55 +#define BLSP2_QUP6_SPI_APPS_CLK_SRC 56 +#define BLSP2_UART1_APPS_CLK_SRC 57 +#define BLSP2_UART2_APPS_CLK_SRC 58 +#define BLSP2_UART3_APPS_CLK_SRC 59 +#define BLSP2_UART4_APPS_CLK_SRC 60 +#define BLSP2_UART5_APPS_CLK_SRC 61 +#define BLSP2_UART6_APPS_CLK_SRC 62 +#define CE1_CLK_SRC 63 +#define CE2_CLK_SRC 64 +#define CE3_CLK_SRC 65 +#define GP1_CLK_SRC 66 +#define GP2_CLK_SRC 67 +#define GP3_CLK_SRC 68 +#define PDM2_CLK_SRC 69 +#define QDSS_TRACECLKIN_CLK_SRC 70 +#define RBCPR_CLK_SRC 71 +#define SATA_ASIC0_CLK_SRC 72 +#define SATA_PMALIVE_CLK_SRC 73 +#define SATA_RX_CLK_SRC 74 +#define SATA_RX_OOB_CLK_SRC 75 +#define SDCC1_APPS_CLK_SRC 76 +#define SDCC2_APPS_CLK_SRC 77 +#define SDCC3_APPS_CLK_SRC 78 +#define SDCC4_APPS_CLK_SRC 79 +#define GCC_SNOC_BUS_TIMEOUT0_AHB_CLK 80 +#define SPMI_AHB_CLK_SRC 81 +#define SPMI_SER_CLK_SRC 82 +#define TSIF_REF_CLK_SRC 83 +#define USB30_MOCK_UTMI_CLK_SRC 84 +#define USB30_SEC_MOCK_UTMI_CLK_SRC 85 +#define USB_HS_SYSTEM_CLK_SRC 86 +#define USB_HSIC_CLK_SRC 87 +#define USB_HSIC_IO_CAL_CLK_SRC 88 +#define USB_HSIC_MOCK_UTMI_CLK_SRC 89 +#define USB_HSIC_SYSTEM_CLK_SRC 90 +#define GCC_BAM_DMA_AHB_CLK 91 +#define GCC_BAM_DMA_INACTIVITY_TIMERS_CLK 92 +#define DDR_CLK_SRC 93 +#define GCC_BIMC_CFG_AHB_CLK 94 +#define GCC_BIMC_CLK 95 +#define GCC_BIMC_KPSS_AXI_CLK 96 +#define GCC_BIMC_SLEEP_CLK 97 +#define GCC_BIMC_SYSNOC_AXI_CLK 98 +#define GCC_BIMC_XO_CLK 99 +#define GCC_BLSP1_AHB_CLK 100 +#define GCC_BLSP1_SLEEP_CLK 101 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK 102 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK 103 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK 104 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK 105 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK 106 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK 107 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK 108 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK 109 +#define GCC_BLSP1_QUP5_I2C_APPS_CLK 110 +#define GCC_BLSP1_QUP5_SPI_APPS_CLK 111 +#define GCC_BLSP1_QUP6_I2C_APPS_CLK 112 +#define GCC_BLSP1_QUP6_SPI_APPS_CLK 113 +#define GCC_BLSP1_UART1_APPS_CLK 114 +#define GCC_BLSP1_UART1_SIM_CLK 115 +#define GCC_BLSP1_UART2_APPS_CLK 116 +#define GCC_BLSP1_UART2_SIM_CLK 117 +#define GCC_BLSP1_UART3_APPS_CLK 118 +#define GCC_BLSP1_UART3_SIM_CLK 119 +#define GCC_BLSP1_UART4_APPS_CLK 120 +#define GCC_BLSP1_UART4_SIM_CLK 121 +#define GCC_BLSP1_UART5_APPS_CLK 122 +#define GCC_BLSP1_UART5_SIM_CLK 123 +#define GCC_BLSP1_UART6_APPS_CLK 124 +#define GCC_BLSP1_UART6_SIM_CLK 125 +#define GCC_BLSP2_AHB_CLK 126 +#define GCC_BLSP2_SLEEP_CLK 127 +#define GCC_BLSP2_QUP1_I2C_APPS_CLK 128 +#define GCC_BLSP2_QUP1_SPI_APPS_CLK 129 +#define GCC_BLSP2_QUP2_I2C_APPS_CLK 130 +#define GCC_BLSP2_QUP2_SPI_APPS_CLK 131 +#define GCC_BLSP2_QUP3_I2C_APPS_CLK 132 +#define GCC_BLSP2_QUP3_SPI_APPS_CLK 133 +#define GCC_BLSP2_QUP4_I2C_APPS_CLK 134 +#define GCC_BLSP2_QUP4_SPI_APPS_CLK 135 +#define GCC_BLSP2_QUP5_I2C_APPS_CLK 136 +#define GCC_BLSP2_QUP5_SPI_APPS_CLK 137 +#define GCC_BLSP2_QUP6_I2C_APPS_CLK 138 +#define GCC_BLSP2_QUP6_SPI_APPS_CLK 139 +#define GCC_BLSP2_UART1_APPS_CLK 140 +#define GCC_BLSP2_UART1_SIM_CLK 141 +#define GCC_BLSP2_UART2_APPS_CLK 142 +#define GCC_BLSP2_UART2_SIM_CLK 143 +#define GCC_BLSP2_UART3_APPS_CLK 144 +#define GCC_BLSP2_UART3_SIM_CLK 145 +#define GCC_BLSP2_UART4_APPS_CLK 146 +#define GCC_BLSP2_UART4_SIM_CLK 147 +#define GCC_BLSP2_UART5_APPS_CLK 148 +#define GCC_BLSP2_UART5_SIM_CLK 149 +#define GCC_BLSP2_UART6_APPS_CLK 150 +#define GCC_BLSP2_UART6_SIM_CLK 151 +#define GCC_BOOT_ROM_AHB_CLK 152 +#define GCC_CE1_AHB_CLK 153 +#define GCC_CE1_AXI_CLK 154 +#define GCC_CE1_CLK 155 +#define GCC_CE2_AHB_CLK 156 +#define GCC_CE2_AXI_CLK 157 +#define GCC_CE2_CLK 158 +#define GCC_CE3_AHB_CLK 159 +#define GCC_CE3_AXI_CLK 160 +#define GCC_CE3_CLK 161 +#define GCC_CNOC_BUS_TIMEOUT0_AHB_CLK 162 +#define GCC_CNOC_BUS_TIMEOUT1_AHB_CLK 163 +#define GCC_CNOC_BUS_TIMEOUT2_AHB_CLK 164 +#define GCC_CNOC_BUS_TIMEOUT3_AHB_CLK 165 +#define GCC_CNOC_BUS_TIMEOUT4_AHB_CLK 166 +#define GCC_CNOC_BUS_TIMEOUT5_AHB_CLK 167 +#define GCC_CNOC_BUS_TIMEOUT6_AHB_CLK 168 +#define GCC_CNOC_BUS_TIMEOUT7_AHB_CLK 169 +#define GCC_CFG_NOC_AHB_CLK 170 +#define GCC_CFG_NOC_DDR_CFG_CLK 171 +#define GCC_CFG_NOC_RPM_AHB_CLK 172 +#define GCC_COPSS_SMMU_AHB_CLK 173 +#define GCC_COPSS_SMMU_AXI_CLK 174 +#define GCC_DCD_XO_CLK 175 +#define GCC_BIMC_DDR_CH0_CLK 176 +#define GCC_BIMC_DDR_CH1_CLK 177 +#define GCC_BIMC_DDR_CPLL0_CLK 178 +#define GCC_BIMC_DDR_CPLL1_CLK 179 +#define GCC_BIMC_GFX_CLK 180 +#define GCC_DDR_DIM_CFG_CLK 181 +#define GCC_DDR_DIM_SLEEP_CLK 182 +#define GCC_DEHR_CLK 183 +#define GCC_AHB_CLK 184 +#define GCC_IM_SLEEP_CLK 185 +#define GCC_XO_CLK 186 +#define GCC_XO_DIV4_CLK 187 +#define GCC_GP1_CLK 188 +#define GCC_GP2_CLK 189 +#define GCC_GP3_CLK 190 +#define GCC_IMEM_AXI_CLK 191 +#define GCC_IMEM_CFG_AHB_CLK 192 +#define GCC_KPSS_AHB_CLK 193 +#define GCC_KPSS_AXI_CLK 194 +#define GCC_LPASS_MPORT_AXI_CLK 195 +#define GCC_LPASS_Q6_AXI_CLK 196 +#define GCC_LPASS_SWAY_CLK 197 +#define GCC_MMSS_BIMC_GFX_CLK 198 +#define GCC_MMSS_NOC_AT_CLK 199 +#define GCC_MMSS_NOC_CFG_AHB_CLK 200 +#define GCC_MMSS_VPU_MAPLE_SYS_NOC_AXI_CLK 201 +#define GCC_OCMEM_NOC_CFG_AHB_CLK 202 +#define GCC_OCMEM_SYS_NOC_AXI_CLK 203 +#define GCC_MPM_AHB_CLK 204 +#define GCC_MSG_RAM_AHB_CLK 205 +#define GCC_NOC_CONF_XPU_AHB_CLK 206 +#define GCC_PDM2_CLK 207 +#define GCC_PDM_AHB_CLK 208 +#define GCC_PDM_XO4_CLK 209 +#define GCC_PERIPH_NOC_AHB_CLK 210 +#define GCC_PERIPH_NOC_AT_CLK 211 +#define GCC_PERIPH_NOC_CFG_AHB_CLK 212 +#define GCC_PERIPH_NOC_USB_HSIC_AHB_CLK 213 +#define GCC_PERIPH_NOC_MPU_CFG_AHB_CLK 214 +#define GCC_PERIPH_XPU_AHB_CLK 215 +#define GCC_PNOC_BUS_TIMEOUT0_AHB_CLK 216 +#define GCC_PNOC_BUS_TIMEOUT1_AHB_CLK 217 +#define GCC_PNOC_BUS_TIMEOUT2_AHB_CLK 218 +#define GCC_PNOC_BUS_TIMEOUT3_AHB_CLK 219 +#define GCC_PNOC_BUS_TIMEOUT4_AHB_CLK 220 +#define GCC_PRNG_AHB_CLK 221 +#define GCC_QDSS_AT_CLK 222 +#define GCC_QDSS_CFG_AHB_CLK 223 +#define GCC_QDSS_DAP_AHB_CLK 224 +#define GCC_QDSS_DAP_CLK 225 +#define GCC_QDSS_ETR_USB_CLK 226 +#define GCC_QDSS_STM_CLK 227 +#define GCC_QDSS_TRACECLKIN_CLK 228 +#define GCC_QDSS_TSCTR_DIV16_CLK 229 +#define GCC_QDSS_TSCTR_DIV2_CLK 230 +#define GCC_QDSS_TSCTR_DIV3_CLK 231 +#define GCC_QDSS_TSCTR_DIV4_CLK 232 +#define GCC_QDSS_TSCTR_DIV8_CLK 233 +#define GCC_QDSS_RBCPR_XPU_AHB_CLK 234 +#define GCC_RBCPR_AHB_CLK 235 +#define GCC_RBCPR_CLK 236 +#define GCC_RPM_BUS_AHB_CLK 237 +#define GCC_RPM_PROC_HCLK 238 +#define GCC_RPM_SLEEP_CLK 239 +#define GCC_RPM_TIMER_CLK 240 +#define GCC_SATA_ASIC0_CLK 241 +#define GCC_SATA_AXI_CLK 242 +#define GCC_SATA_CFG_AHB_CLK 243 +#define GCC_SATA_PMALIVE_CLK 244 +#define GCC_SATA_RX_CLK 245 +#define GCC_SATA_RX_OOB_CLK 246 +#define GCC_SDCC1_AHB_CLK 247 +#define GCC_SDCC1_APPS_CLK 248 +#define GCC_SDCC1_CDCCAL_FF_CLK 249 +#define GCC_SDCC1_CDCCAL_SLEEP_CLK 250 +#define GCC_SDCC2_AHB_CLK 251 +#define GCC_SDCC2_APPS_CLK 252 +#define GCC_SDCC2_INACTIVITY_TIMERS_CLK 253 +#define GCC_SDCC3_AHB_CLK 254 +#define GCC_SDCC3_APPS_CLK 255 +#define GCC_SDCC3_INACTIVITY_TIMERS_CLK 256 +#define GCC_SDCC4_AHB_CLK 257 +#define GCC_SDCC4_APPS_CLK 258 +#define GCC_SDCC4_INACTIVITY_TIMERS_CLK 259 +#define GCC_SEC_CTRL_ACC_CLK 260 +#define GCC_SEC_CTRL_AHB_CLK 261 +#define GCC_SEC_CTRL_BOOT_ROM_PATCH_CLK 262 +#define GCC_SEC_CTRL_CLK 263 +#define GCC_SEC_CTRL_SENSE_CLK 264 +#define GCC_SNOC_BUS_TIMEOUT2_AHB_CLK 265 +#define GCC_SNOC_BUS_TIMEOUT3_AHB_CLK 266 +#define GCC_SPDM_BIMC_CY_CLK 267 +#define GCC_SPDM_CFG_AHB_CLK 268 +#define GCC_SPDM_DEBUG_CY_CLK 269 +#define GCC_SPDM_FF_CLK 270 +#define GCC_SPDM_MSTR_AHB_CLK 271 +#define GCC_SPDM_PNOC_CY_CLK 272 +#define GCC_SPDM_RPM_CY_CLK 273 +#define GCC_SPDM_SNOC_CY_CLK 274 +#define GCC_SPMI_AHB_CLK 275 +#define GCC_SPMI_CNOC_AHB_CLK 276 +#define GCC_SPMI_SER_CLK 277 +#define GCC_SPSS_AHB_CLK 278 +#define GCC_SNOC_CNOC_AHB_CLK 279 +#define GCC_SNOC_PNOC_AHB_CLK 280 +#define GCC_SYS_NOC_AT_CLK 281 +#define GCC_SYS_NOC_AXI_CLK 282 +#define GCC_SYS_NOC_KPSS_AHB_CLK 283 +#define GCC_SYS_NOC_QDSS_STM_AXI_CLK 284 +#define GCC_SYS_NOC_UFS_AXI_CLK 285 +#define GCC_SYS_NOC_USB3_AXI_CLK 286 +#define GCC_SYS_NOC_USB3_SEC_AXI_CLK 287 +#define GCC_TCSR_AHB_CLK 288 +#define GCC_TLMM_AHB_CLK 289 +#define GCC_TLMM_CLK 290 +#define GCC_TSIF_AHB_CLK 291 +#define GCC_TSIF_INACTIVITY_TIMERS_CLK 292 +#define GCC_TSIF_REF_CLK 293 +#define GCC_UFS_AHB_CLK 294 +#define GCC_UFS_AXI_CLK 295 +#define GCC_UFS_RX_CFG_CLK 296 +#define GCC_UFS_RX_SYMBOL_0_CLK 297 +#define GCC_UFS_RX_SYMBOL_1_CLK 298 +#define GCC_UFS_TX_CFG_CLK 299 +#define GCC_UFS_TX_SYMBOL_0_CLK 300 +#define GCC_UFS_TX_SYMBOL_1_CLK 301 +#define GCC_USB2A_PHY_SLEEP_CLK 302 +#define GCC_USB2B_PHY_SLEEP_CLK 303 +#define GCC_USB30_MASTER_CLK 304 +#define GCC_USB30_MOCK_UTMI_CLK 305 +#define GCC_USB30_SLEEP_CLK 306 +#define GCC_USB30_SEC_MASTER_CLK 307 +#define GCC_USB30_SEC_MOCK_UTMI_CLK 308 +#define GCC_USB30_SEC_SLEEP_CLK 309 +#define GCC_USB_HS_AHB_CLK 310 +#define GCC_USB_HS_INACTIVITY_TIMERS_CLK 311 +#define GCC_USB_HS_SYSTEM_CLK 312 +#define GCC_USB_HSIC_AHB_CLK 313 +#define GCC_USB_HSIC_CLK 314 +#define GCC_USB_HSIC_IO_CAL_CLK 315 +#define GCC_USB_HSIC_IO_CAL_SLEEP_CLK 316 +#define GCC_USB_HSIC_MOCK_UTMI_CLK 317 +#define GCC_USB_HSIC_SYSTEM_CLK 318 +#define PCIE_0_AUX_CLK_SRC 319 +#define PCIE_0_PIPE_CLK_SRC 320 +#define PCIE_1_AUX_CLK_SRC 321 +#define PCIE_1_PIPE_CLK_SRC 322 +#define GCC_PCIE_0_AUX_CLK 323 +#define GCC_PCIE_0_CFG_AHB_CLK 324 +#define GCC_PCIE_0_MSTR_AXI_CLK 325 +#define GCC_PCIE_0_PIPE_CLK 326 +#define GCC_PCIE_0_SLV_AXI_CLK 327 +#define GCC_PCIE_1_AUX_CLK 328 +#define GCC_PCIE_1_CFG_AHB_CLK 329 +#define GCC_PCIE_1_MSTR_AXI_CLK 330 +#define GCC_PCIE_1_PIPE_CLK 331 +#define GCC_PCIE_1_SLV_AXI_CLK 332 + +/* gdscs */ +#define USB_HS_HSIC_GDSC 0 +#define PCIE0_GDSC 1 +#define PCIE1_GDSC 2 +#define USB30_GDSC 3 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-ipq4019.h b/include/dt-bindings/clock/qcom,gcc-ipq4019.h new file mode 100644 index 000000000..7e8a7be6d --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-ipq4019.h @@ -0,0 +1,169 @@ +/* Copyright (c) 2015 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ +#ifndef __QCOM_CLK_IPQ4019_H__ +#define __QCOM_CLK_IPQ4019_H__ + +#define GCC_DUMMY_CLK 0 +#define AUDIO_CLK_SRC 1 +#define BLSP1_QUP1_I2C_APPS_CLK_SRC 2 +#define BLSP1_QUP1_SPI_APPS_CLK_SRC 3 +#define BLSP1_QUP2_I2C_APPS_CLK_SRC 4 +#define BLSP1_QUP2_SPI_APPS_CLK_SRC 5 +#define BLSP1_UART1_APPS_CLK_SRC 6 +#define BLSP1_UART2_APPS_CLK_SRC 7 +#define GCC_USB3_MOCK_UTMI_CLK_SRC 8 +#define GCC_APPS_CLK_SRC 9 +#define GCC_APPS_AHB_CLK_SRC 10 +#define GP1_CLK_SRC 11 +#define GP2_CLK_SRC 12 +#define GP3_CLK_SRC 13 +#define SDCC1_APPS_CLK_SRC 14 +#define FEPHY_125M_DLY_CLK_SRC 15 +#define WCSS2G_CLK_SRC 16 +#define WCSS5G_CLK_SRC 17 +#define GCC_APSS_AHB_CLK 18 +#define GCC_AUDIO_AHB_CLK 19 +#define GCC_AUDIO_PWM_CLK 20 +#define GCC_BLSP1_AHB_CLK 21 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK 22 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK 23 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK 24 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK 25 +#define GCC_BLSP1_UART1_APPS_CLK 26 +#define GCC_BLSP1_UART2_APPS_CLK 27 +#define GCC_DCD_XO_CLK 28 +#define GCC_GP1_CLK 29 +#define GCC_GP2_CLK 30 +#define GCC_GP3_CLK 31 +#define GCC_BOOT_ROM_AHB_CLK 32 +#define GCC_CRYPTO_AHB_CLK 33 +#define GCC_CRYPTO_AXI_CLK 34 +#define GCC_CRYPTO_CLK 35 +#define GCC_ESS_CLK 36 +#define GCC_IMEM_AXI_CLK 37 +#define GCC_IMEM_CFG_AHB_CLK 38 +#define GCC_PCIE_AHB_CLK 39 +#define GCC_PCIE_AXI_M_CLK 40 +#define GCC_PCIE_AXI_S_CLK 41 +#define GCC_PCNOC_AHB_CLK 42 +#define GCC_PRNG_AHB_CLK 43 +#define GCC_QPIC_AHB_CLK 44 +#define GCC_QPIC_CLK 45 +#define GCC_SDCC1_AHB_CLK 46 +#define GCC_SDCC1_APPS_CLK 47 +#define GCC_SNOC_PCNOC_AHB_CLK 48 +#define GCC_SYS_NOC_125M_CLK 49 +#define GCC_SYS_NOC_AXI_CLK 50 +#define GCC_TCSR_AHB_CLK 51 +#define GCC_TLMM_AHB_CLK 52 +#define GCC_USB2_MASTER_CLK 53 +#define GCC_USB2_SLEEP_CLK 54 +#define GCC_USB2_MOCK_UTMI_CLK 55 +#define GCC_USB3_MASTER_CLK 56 +#define GCC_USB3_SLEEP_CLK 57 +#define GCC_USB3_MOCK_UTMI_CLK 58 +#define GCC_WCSS2G_CLK 59 +#define GCC_WCSS2G_REF_CLK 60 +#define GCC_WCSS2G_RTC_CLK 61 +#define GCC_WCSS5G_CLK 62 +#define GCC_WCSS5G_REF_CLK 63 +#define GCC_WCSS5G_RTC_CLK 64 +#define GCC_APSS_DDRPLL_VCO 65 +#define GCC_SDCC_PLLDIV_CLK 66 +#define GCC_FEPLL_VCO 67 +#define GCC_FEPLL125_CLK 68 +#define GCC_FEPLL125DLY_CLK 69 +#define GCC_FEPLL200_CLK 70 +#define GCC_FEPLL500_CLK 71 +#define GCC_FEPLL_WCSS2G_CLK 72 +#define GCC_FEPLL_WCSS5G_CLK 73 +#define GCC_APSS_CPU_PLLDIV_CLK 74 +#define GCC_PCNOC_AHB_CLK_SRC 75 + +#define WIFI0_CPU_INIT_RESET 0 +#define WIFI0_RADIO_SRIF_RESET 1 +#define WIFI0_RADIO_WARM_RESET 2 +#define WIFI0_RADIO_COLD_RESET 3 +#define WIFI0_CORE_WARM_RESET 4 +#define WIFI0_CORE_COLD_RESET 5 +#define WIFI1_CPU_INIT_RESET 6 +#define WIFI1_RADIO_SRIF_RESET 7 +#define WIFI1_RADIO_WARM_RESET 8 +#define WIFI1_RADIO_COLD_RESET 9 +#define WIFI1_CORE_WARM_RESET 10 +#define WIFI1_CORE_COLD_RESET 11 +#define USB3_UNIPHY_PHY_ARES 12 +#define USB3_HSPHY_POR_ARES 13 +#define USB3_HSPHY_S_ARES 14 +#define USB2_HSPHY_POR_ARES 15 +#define USB2_HSPHY_S_ARES 16 +#define PCIE_PHY_AHB_ARES 17 +#define PCIE_AHB_ARES 18 +#define PCIE_PWR_ARES 19 +#define PCIE_PIPE_STICKY_ARES 20 +#define PCIE_AXI_M_STICKY_ARES 21 +#define PCIE_PHY_ARES 22 +#define PCIE_PARF_XPU_ARES 23 +#define PCIE_AXI_S_XPU_ARES 24 +#define PCIE_AXI_M_VMIDMT_ARES 25 +#define PCIE_PIPE_ARES 26 +#define PCIE_AXI_S_ARES 27 +#define PCIE_AXI_M_ARES 28 +#define ESS_RESET 29 +#define GCC_BLSP1_BCR 30 +#define GCC_BLSP1_QUP1_BCR 31 +#define GCC_BLSP1_UART1_BCR 32 +#define GCC_BLSP1_QUP2_BCR 33 +#define GCC_BLSP1_UART2_BCR 34 +#define GCC_BIMC_BCR 35 +#define GCC_TLMM_BCR 36 +#define GCC_IMEM_BCR 37 +#define GCC_ESS_BCR 38 +#define GCC_PRNG_BCR 39 +#define GCC_BOOT_ROM_BCR 40 +#define GCC_CRYPTO_BCR 41 +#define GCC_SDCC1_BCR 42 +#define GCC_SEC_CTRL_BCR 43 +#define GCC_AUDIO_BCR 44 +#define GCC_QPIC_BCR 45 +#define GCC_PCIE_BCR 46 +#define GCC_USB2_BCR 47 +#define GCC_USB2_PHY_BCR 48 +#define GCC_USB3_BCR 49 +#define GCC_USB3_PHY_BCR 50 +#define GCC_SYSTEM_NOC_BCR 51 +#define GCC_PCNOC_BCR 52 +#define GCC_DCD_BCR 53 +#define GCC_SNOC_BUS_TIMEOUT0_BCR 54 +#define GCC_SNOC_BUS_TIMEOUT1_BCR 55 +#define GCC_SNOC_BUS_TIMEOUT2_BCR 56 +#define GCC_SNOC_BUS_TIMEOUT3_BCR 57 +#define GCC_PCNOC_BUS_TIMEOUT0_BCR 58 +#define GCC_PCNOC_BUS_TIMEOUT1_BCR 59 +#define GCC_PCNOC_BUS_TIMEOUT2_BCR 60 +#define GCC_PCNOC_BUS_TIMEOUT3_BCR 61 +#define GCC_PCNOC_BUS_TIMEOUT4_BCR 62 +#define GCC_PCNOC_BUS_TIMEOUT5_BCR 63 +#define GCC_PCNOC_BUS_TIMEOUT6_BCR 64 +#define GCC_PCNOC_BUS_TIMEOUT7_BCR 65 +#define GCC_PCNOC_BUS_TIMEOUT8_BCR 66 +#define GCC_PCNOC_BUS_TIMEOUT9_BCR 67 +#define GCC_TCSR_BCR 68 +#define GCC_QDSS_BCR 69 +#define GCC_MPM_BCR 70 +#define GCC_SPDM_BCR 71 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-ipq806x.h b/include/dt-bindings/clock/qcom,gcc-ipq806x.h new file mode 100644 index 000000000..dc4254b8c --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-ipq806x.h @@ -0,0 +1,295 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_GCC_IPQ806X_H +#define _DT_BINDINGS_CLK_GCC_IPQ806X_H + +#define AFAB_CLK_SRC 0 +#define QDSS_STM_CLK 1 +#define SCSS_A_CLK 2 +#define SCSS_H_CLK 3 +#define AFAB_CORE_CLK 4 +#define SCSS_XO_SRC_CLK 5 +#define AFAB_EBI1_CH0_A_CLK 6 +#define AFAB_EBI1_CH1_A_CLK 7 +#define AFAB_AXI_S0_FCLK 8 +#define AFAB_AXI_S1_FCLK 9 +#define AFAB_AXI_S2_FCLK 10 +#define AFAB_AXI_S3_FCLK 11 +#define AFAB_AXI_S4_FCLK 12 +#define SFAB_CORE_CLK 13 +#define SFAB_AXI_S0_FCLK 14 +#define SFAB_AXI_S1_FCLK 15 +#define SFAB_AXI_S2_FCLK 16 +#define SFAB_AXI_S3_FCLK 17 +#define SFAB_AXI_S4_FCLK 18 +#define SFAB_AXI_S5_FCLK 19 +#define SFAB_AHB_S0_FCLK 20 +#define SFAB_AHB_S1_FCLK 21 +#define SFAB_AHB_S2_FCLK 22 +#define SFAB_AHB_S3_FCLK 23 +#define SFAB_AHB_S4_FCLK 24 +#define SFAB_AHB_S5_FCLK 25 +#define SFAB_AHB_S6_FCLK 26 +#define SFAB_AHB_S7_FCLK 27 +#define QDSS_AT_CLK_SRC 28 +#define QDSS_AT_CLK 29 +#define QDSS_TRACECLKIN_CLK_SRC 30 +#define QDSS_TRACECLKIN_CLK 31 +#define QDSS_TSCTR_CLK_SRC 32 +#define QDSS_TSCTR_CLK 33 +#define SFAB_ADM0_M0_A_CLK 34 +#define SFAB_ADM0_M1_A_CLK 35 +#define SFAB_ADM0_M2_H_CLK 36 +#define ADM0_CLK 37 +#define ADM0_PBUS_CLK 38 +#define IMEM0_A_CLK 39 +#define QDSS_H_CLK 40 +#define PCIE_A_CLK 41 +#define PCIE_AUX_CLK 42 +#define PCIE_H_CLK 43 +#define PCIE_PHY_CLK 44 +#define SFAB_CLK_SRC 45 +#define SFAB_LPASS_Q6_A_CLK 46 +#define SFAB_AFAB_M_A_CLK 47 +#define AFAB_SFAB_M0_A_CLK 48 +#define AFAB_SFAB_M1_A_CLK 49 +#define SFAB_SATA_S_H_CLK 50 +#define DFAB_CLK_SRC 51 +#define DFAB_CLK 52 +#define SFAB_DFAB_M_A_CLK 53 +#define DFAB_SFAB_M_A_CLK 54 +#define DFAB_SWAY0_H_CLK 55 +#define DFAB_SWAY1_H_CLK 56 +#define DFAB_ARB0_H_CLK 57 +#define DFAB_ARB1_H_CLK 58 +#define PPSS_H_CLK 59 +#define PPSS_PROC_CLK 60 +#define PPSS_TIMER0_CLK 61 +#define PPSS_TIMER1_CLK 62 +#define PMEM_A_CLK 63 +#define DMA_BAM_H_CLK 64 +#define SIC_H_CLK 65 +#define SPS_TIC_H_CLK 66 +#define CFPB_2X_CLK_SRC 67 +#define CFPB_CLK 68 +#define CFPB0_H_CLK 69 +#define CFPB1_H_CLK 70 +#define CFPB2_H_CLK 71 +#define SFAB_CFPB_M_H_CLK 72 +#define CFPB_MASTER_H_CLK 73 +#define SFAB_CFPB_S_H_CLK 74 +#define CFPB_SPLITTER_H_CLK 75 +#define TSIF_H_CLK 76 +#define TSIF_INACTIVITY_TIMERS_CLK 77 +#define TSIF_REF_SRC 78 +#define TSIF_REF_CLK 79 +#define CE1_H_CLK 80 +#define CE1_CORE_CLK 81 +#define CE1_SLEEP_CLK 82 +#define CE2_H_CLK 83 +#define CE2_CORE_CLK 84 +#define SFPB_H_CLK_SRC 85 +#define SFPB_H_CLK 86 +#define SFAB_SFPB_M_H_CLK 87 +#define SFAB_SFPB_S_H_CLK 88 +#define RPM_PROC_CLK 89 +#define RPM_BUS_H_CLK 90 +#define RPM_SLEEP_CLK 91 +#define RPM_TIMER_CLK 92 +#define RPM_MSG_RAM_H_CLK 93 +#define PMIC_ARB0_H_CLK 94 +#define PMIC_ARB1_H_CLK 95 +#define PMIC_SSBI2_SRC 96 +#define PMIC_SSBI2_CLK 97 +#define SDC1_H_CLK 98 +#define SDC2_H_CLK 99 +#define SDC3_H_CLK 100 +#define SDC4_H_CLK 101 +#define SDC1_SRC 102 +#define SDC1_CLK 103 +#define SDC2_SRC 104 +#define SDC2_CLK 105 +#define SDC3_SRC 106 +#define SDC3_CLK 107 +#define SDC4_SRC 108 +#define SDC4_CLK 109 +#define USB_HS1_H_CLK 110 +#define USB_HS1_XCVR_SRC 111 +#define USB_HS1_XCVR_CLK 112 +#define USB_HSIC_H_CLK 113 +#define USB_HSIC_XCVR_SRC 114 +#define USB_HSIC_XCVR_CLK 115 +#define USB_HSIC_SYSTEM_CLK_SRC 116 +#define USB_HSIC_SYSTEM_CLK 117 +#define CFPB0_C0_H_CLK 118 +#define CFPB0_D0_H_CLK 119 +#define CFPB0_C1_H_CLK 120 +#define CFPB0_D1_H_CLK 121 +#define USB_FS1_H_CLK 122 +#define USB_FS1_XCVR_SRC 123 +#define USB_FS1_XCVR_CLK 124 +#define USB_FS1_SYSTEM_CLK 125 +#define GSBI_COMMON_SIM_SRC 126 +#define GSBI1_H_CLK 127 +#define GSBI2_H_CLK 128 +#define GSBI3_H_CLK 129 +#define GSBI4_H_CLK 130 +#define GSBI5_H_CLK 131 +#define GSBI6_H_CLK 132 +#define GSBI7_H_CLK 133 +#define GSBI1_QUP_SRC 134 +#define GSBI1_QUP_CLK 135 +#define GSBI2_QUP_SRC 136 +#define GSBI2_QUP_CLK 137 +#define GSBI3_QUP_SRC 138 +#define GSBI3_QUP_CLK 139 +#define GSBI4_QUP_SRC 140 +#define GSBI4_QUP_CLK 141 +#define GSBI5_QUP_SRC 142 +#define GSBI5_QUP_CLK 143 +#define GSBI6_QUP_SRC 144 +#define GSBI6_QUP_CLK 145 +#define GSBI7_QUP_SRC 146 +#define GSBI7_QUP_CLK 147 +#define GSBI1_UART_SRC 148 +#define GSBI1_UART_CLK 149 +#define GSBI2_UART_SRC 150 +#define GSBI2_UART_CLK 151 +#define GSBI3_UART_SRC 152 +#define GSBI3_UART_CLK 153 +#define GSBI4_UART_SRC 154 +#define GSBI4_UART_CLK 155 +#define GSBI5_UART_SRC 156 +#define GSBI5_UART_CLK 157 +#define GSBI6_UART_SRC 158 +#define GSBI6_UART_CLK 159 +#define GSBI7_UART_SRC 160 +#define GSBI7_UART_CLK 161 +#define GSBI1_SIM_CLK 162 +#define GSBI2_SIM_CLK 163 +#define GSBI3_SIM_CLK 164 +#define GSBI4_SIM_CLK 165 +#define GSBI5_SIM_CLK 166 +#define GSBI6_SIM_CLK 167 +#define GSBI7_SIM_CLK 168 +#define USB_HSIC_HSIC_CLK_SRC 169 +#define USB_HSIC_HSIC_CLK 170 +#define USB_HSIC_HSIO_CAL_CLK 171 +#define SPDM_CFG_H_CLK 172 +#define SPDM_MSTR_H_CLK 173 +#define SPDM_FF_CLK_SRC 174 +#define SPDM_FF_CLK 175 +#define SEC_CTRL_CLK 176 +#define SEC_CTRL_ACC_CLK_SRC 177 +#define SEC_CTRL_ACC_CLK 178 +#define TLMM_H_CLK 179 +#define TLMM_CLK 180 +#define SATA_H_CLK 181 +#define SATA_CLK_SRC 182 +#define SATA_RXOOB_CLK 183 +#define SATA_PMALIVE_CLK 184 +#define SATA_PHY_REF_CLK 185 +#define SATA_A_CLK 186 +#define SATA_PHY_CFG_CLK 187 +#define TSSC_CLK_SRC 188 +#define TSSC_CLK 189 +#define PDM_SRC 190 +#define PDM_CLK 191 +#define GP0_SRC 192 +#define GP0_CLK 193 +#define GP1_SRC 194 +#define GP1_CLK 195 +#define GP2_SRC 196 +#define GP2_CLK 197 +#define MPM_CLK 198 +#define EBI1_CLK_SRC 199 +#define EBI1_CH0_CLK 200 +#define EBI1_CH1_CLK 201 +#define EBI1_2X_CLK 202 +#define EBI1_CH0_DQ_CLK 203 +#define EBI1_CH1_DQ_CLK 204 +#define EBI1_CH0_CA_CLK 205 +#define EBI1_CH1_CA_CLK 206 +#define EBI1_XO_CLK 207 +#define SFAB_SMPSS_S_H_CLK 208 +#define PRNG_SRC 209 +#define PRNG_CLK 210 +#define PXO_SRC 211 +#define SPDM_CY_PORT0_CLK 212 +#define SPDM_CY_PORT1_CLK 213 +#define SPDM_CY_PORT2_CLK 214 +#define SPDM_CY_PORT3_CLK 215 +#define SPDM_CY_PORT4_CLK 216 +#define SPDM_CY_PORT5_CLK 217 +#define SPDM_CY_PORT6_CLK 218 +#define SPDM_CY_PORT7_CLK 219 +#define PLL0 220 +#define PLL0_VOTE 221 +#define PLL3 222 +#define PLL3_VOTE 223 +#define PLL4_VOTE 225 +#define PLL8 226 +#define PLL8_VOTE 227 +#define PLL9 228 +#define PLL10 229 +#define PLL11 230 +#define PLL12 231 +#define PLL14 232 +#define PLL14_VOTE 233 +#define PLL18 234 +#define CE5_SRC 235 +#define CE5_H_CLK 236 +#define CE5_CORE_CLK 237 +#define CE3_SLEEP_CLK 238 +#define SFAB_AHB_S8_FCLK 239 +#define SPDM_CY_PORT8_CLK 246 +#define PCIE_ALT_REF_SRC 247 +#define PCIE_ALT_REF_CLK 248 +#define PCIE_1_A_CLK 249 +#define PCIE_1_AUX_CLK 250 +#define PCIE_1_H_CLK 251 +#define PCIE_1_PHY_CLK 252 +#define PCIE_1_ALT_REF_SRC 253 +#define PCIE_1_ALT_REF_CLK 254 +#define PCIE_2_A_CLK 255 +#define PCIE_2_AUX_CLK 256 +#define PCIE_2_H_CLK 257 +#define PCIE_2_PHY_CLK 258 +#define PCIE_2_ALT_REF_SRC 259 +#define PCIE_2_ALT_REF_CLK 260 +#define EBI2_CLK 261 +#define USB30_SLEEP_CLK 262 +#define USB30_UTMI_SRC 263 +#define USB30_0_UTMI_CLK 264 +#define USB30_1_UTMI_CLK 265 +#define USB30_MASTER_SRC 266 +#define USB30_0_MASTER_CLK 267 +#define USB30_1_MASTER_CLK 268 +#define GMAC_CORE1_CLK_SRC 269 +#define GMAC_CORE2_CLK_SRC 270 +#define GMAC_CORE3_CLK_SRC 271 +#define GMAC_CORE4_CLK_SRC 272 +#define GMAC_CORE1_CLK 273 +#define GMAC_CORE2_CLK 274 +#define GMAC_CORE3_CLK 275 +#define GMAC_CORE4_CLK 276 +#define UBI32_CORE1_CLK_SRC 277 +#define UBI32_CORE2_CLK_SRC 278 +#define UBI32_CORE1_CLK 279 +#define UBI32_CORE2_CLK 280 +#define EBI2_AON_CLK 281 +#define NSSTCM_CLK_SRC 282 +#define NSSTCM_CLK 283 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-ipq8074.h b/include/dt-bindings/clock/qcom,gcc-ipq8074.h new file mode 100644 index 000000000..238f872e5 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-ipq8074.h @@ -0,0 +1,374 @@ +/* + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLOCK_IPQ_GCC_8074_H +#define _DT_BINDINGS_CLOCK_IPQ_GCC_8074_H + +#define GPLL0 0 +#define GPLL0_MAIN 1 +#define GCC_SLEEP_CLK_SRC 2 +#define BLSP1_QUP1_I2C_APPS_CLK_SRC 3 +#define BLSP1_QUP1_SPI_APPS_CLK_SRC 4 +#define BLSP1_QUP2_I2C_APPS_CLK_SRC 5 +#define BLSP1_QUP2_SPI_APPS_CLK_SRC 6 +#define BLSP1_QUP3_I2C_APPS_CLK_SRC 7 +#define BLSP1_QUP3_SPI_APPS_CLK_SRC 8 +#define BLSP1_QUP4_I2C_APPS_CLK_SRC 9 +#define BLSP1_QUP4_SPI_APPS_CLK_SRC 10 +#define BLSP1_QUP5_I2C_APPS_CLK_SRC 11 +#define BLSP1_QUP5_SPI_APPS_CLK_SRC 12 +#define BLSP1_QUP6_I2C_APPS_CLK_SRC 13 +#define BLSP1_QUP6_SPI_APPS_CLK_SRC 14 +#define BLSP1_UART1_APPS_CLK_SRC 15 +#define BLSP1_UART2_APPS_CLK_SRC 16 +#define BLSP1_UART3_APPS_CLK_SRC 17 +#define BLSP1_UART4_APPS_CLK_SRC 18 +#define BLSP1_UART5_APPS_CLK_SRC 19 +#define BLSP1_UART6_APPS_CLK_SRC 20 +#define GCC_BLSP1_AHB_CLK 21 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK 22 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK 23 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK 24 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK 25 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK 26 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK 27 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK 28 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK 29 +#define GCC_BLSP1_QUP5_I2C_APPS_CLK 30 +#define GCC_BLSP1_QUP5_SPI_APPS_CLK 31 +#define GCC_BLSP1_QUP6_I2C_APPS_CLK 32 +#define GCC_BLSP1_QUP6_SPI_APPS_CLK 33 +#define GCC_BLSP1_UART1_APPS_CLK 34 +#define GCC_BLSP1_UART2_APPS_CLK 35 +#define GCC_BLSP1_UART3_APPS_CLK 36 +#define GCC_BLSP1_UART4_APPS_CLK 37 +#define GCC_BLSP1_UART5_APPS_CLK 38 +#define GCC_BLSP1_UART6_APPS_CLK 39 +#define GCC_PRNG_AHB_CLK 40 +#define GCC_QPIC_AHB_CLK 41 +#define GCC_QPIC_CLK 42 +#define PCNOC_BFDCD_CLK_SRC 43 +#define GPLL2_MAIN 44 +#define GPLL2 45 +#define GPLL4_MAIN 46 +#define GPLL4 47 +#define GPLL6_MAIN 48 +#define GPLL6 49 +#define UBI32_PLL_MAIN 50 +#define UBI32_PLL 51 +#define NSS_CRYPTO_PLL_MAIN 52 +#define NSS_CRYPTO_PLL 53 +#define PCIE0_AXI_CLK_SRC 54 +#define PCIE0_AUX_CLK_SRC 55 +#define PCIE0_PIPE_CLK_SRC 56 +#define PCIE1_AXI_CLK_SRC 57 +#define PCIE1_AUX_CLK_SRC 58 +#define PCIE1_PIPE_CLK_SRC 59 +#define SDCC1_APPS_CLK_SRC 60 +#define SDCC1_ICE_CORE_CLK_SRC 61 +#define SDCC2_APPS_CLK_SRC 62 +#define USB0_MASTER_CLK_SRC 63 +#define USB0_AUX_CLK_SRC 64 +#define USB0_MOCK_UTMI_CLK_SRC 65 +#define USB0_PIPE_CLK_SRC 66 +#define USB1_MASTER_CLK_SRC 67 +#define USB1_AUX_CLK_SRC 68 +#define USB1_MOCK_UTMI_CLK_SRC 69 +#define USB1_PIPE_CLK_SRC 70 +#define GCC_XO_CLK_SRC 71 +#define SYSTEM_NOC_BFDCD_CLK_SRC 72 +#define NSS_CE_CLK_SRC 73 +#define NSS_NOC_BFDCD_CLK_SRC 74 +#define NSS_CRYPTO_CLK_SRC 75 +#define NSS_UBI0_CLK_SRC 76 +#define NSS_UBI0_DIV_CLK_SRC 77 +#define NSS_UBI1_CLK_SRC 78 +#define NSS_UBI1_DIV_CLK_SRC 79 +#define UBI_MPT_CLK_SRC 80 +#define NSS_IMEM_CLK_SRC 81 +#define NSS_PPE_CLK_SRC 82 +#define NSS_PORT1_RX_CLK_SRC 83 +#define NSS_PORT1_RX_DIV_CLK_SRC 84 +#define NSS_PORT1_TX_CLK_SRC 85 +#define NSS_PORT1_TX_DIV_CLK_SRC 86 +#define NSS_PORT2_RX_CLK_SRC 87 +#define NSS_PORT2_RX_DIV_CLK_SRC 88 +#define NSS_PORT2_TX_CLK_SRC 89 +#define NSS_PORT2_TX_DIV_CLK_SRC 90 +#define NSS_PORT3_RX_CLK_SRC 91 +#define NSS_PORT3_RX_DIV_CLK_SRC 92 +#define NSS_PORT3_TX_CLK_SRC 93 +#define NSS_PORT3_TX_DIV_CLK_SRC 94 +#define NSS_PORT4_RX_CLK_SRC 95 +#define NSS_PORT4_RX_DIV_CLK_SRC 96 +#define NSS_PORT4_TX_CLK_SRC 97 +#define NSS_PORT4_TX_DIV_CLK_SRC 98 +#define NSS_PORT5_RX_CLK_SRC 99 +#define NSS_PORT5_RX_DIV_CLK_SRC 100 +#define NSS_PORT5_TX_CLK_SRC 101 +#define NSS_PORT5_TX_DIV_CLK_SRC 102 +#define NSS_PORT6_RX_CLK_SRC 103 +#define NSS_PORT6_RX_DIV_CLK_SRC 104 +#define NSS_PORT6_TX_CLK_SRC 105 +#define NSS_PORT6_TX_DIV_CLK_SRC 106 +#define CRYPTO_CLK_SRC 107 +#define GP1_CLK_SRC 108 +#define GP2_CLK_SRC 109 +#define GP3_CLK_SRC 110 +#define GCC_PCIE0_AHB_CLK 111 +#define GCC_PCIE0_AUX_CLK 112 +#define GCC_PCIE0_AXI_M_CLK 113 +#define GCC_PCIE0_AXI_S_CLK 114 +#define GCC_PCIE0_PIPE_CLK 115 +#define GCC_SYS_NOC_PCIE0_AXI_CLK 116 +#define GCC_PCIE1_AHB_CLK 117 +#define GCC_PCIE1_AUX_CLK 118 +#define GCC_PCIE1_AXI_M_CLK 119 +#define GCC_PCIE1_AXI_S_CLK 120 +#define GCC_PCIE1_PIPE_CLK 121 +#define GCC_SYS_NOC_PCIE1_AXI_CLK 122 +#define GCC_USB0_AUX_CLK 123 +#define GCC_SYS_NOC_USB0_AXI_CLK 124 +#define GCC_USB0_MASTER_CLK 125 +#define GCC_USB0_MOCK_UTMI_CLK 126 +#define GCC_USB0_PHY_CFG_AHB_CLK 127 +#define GCC_USB0_PIPE_CLK 128 +#define GCC_USB0_SLEEP_CLK 129 +#define GCC_USB1_AUX_CLK 130 +#define GCC_SYS_NOC_USB1_AXI_CLK 131 +#define GCC_USB1_MASTER_CLK 132 +#define GCC_USB1_MOCK_UTMI_CLK 133 +#define GCC_USB1_PHY_CFG_AHB_CLK 134 +#define GCC_USB1_PIPE_CLK 135 +#define GCC_USB1_SLEEP_CLK 136 +#define GCC_SDCC1_AHB_CLK 137 +#define GCC_SDCC1_APPS_CLK 138 +#define GCC_SDCC1_ICE_CORE_CLK 139 +#define GCC_SDCC2_AHB_CLK 140 +#define GCC_SDCC2_APPS_CLK 141 +#define GCC_MEM_NOC_NSS_AXI_CLK 142 +#define GCC_NSS_CE_APB_CLK 143 +#define GCC_NSS_CE_AXI_CLK 144 +#define GCC_NSS_CFG_CLK 145 +#define GCC_NSS_CRYPTO_CLK 146 +#define GCC_NSS_CSR_CLK 147 +#define GCC_NSS_EDMA_CFG_CLK 148 +#define GCC_NSS_EDMA_CLK 149 +#define GCC_NSS_IMEM_CLK 150 +#define GCC_NSS_NOC_CLK 151 +#define GCC_NSS_PPE_BTQ_CLK 152 +#define GCC_NSS_PPE_CFG_CLK 153 +#define GCC_NSS_PPE_CLK 154 +#define GCC_NSS_PPE_IPE_CLK 155 +#define GCC_NSS_PTP_REF_CLK 156 +#define GCC_NSSNOC_CE_APB_CLK 157 +#define GCC_NSSNOC_CE_AXI_CLK 158 +#define GCC_NSSNOC_CRYPTO_CLK 159 +#define GCC_NSSNOC_PPE_CFG_CLK 160 +#define GCC_NSSNOC_PPE_CLK 161 +#define GCC_NSSNOC_QOSGEN_REF_CLK 162 +#define GCC_NSSNOC_SNOC_CLK 163 +#define GCC_NSSNOC_TIMEOUT_REF_CLK 164 +#define GCC_NSSNOC_UBI0_AHB_CLK 165 +#define GCC_NSSNOC_UBI1_AHB_CLK 166 +#define GCC_UBI0_AHB_CLK 167 +#define GCC_UBI0_AXI_CLK 168 +#define GCC_UBI0_NC_AXI_CLK 169 +#define GCC_UBI0_CORE_CLK 170 +#define GCC_UBI0_MPT_CLK 171 +#define GCC_UBI1_AHB_CLK 172 +#define GCC_UBI1_AXI_CLK 173 +#define GCC_UBI1_NC_AXI_CLK 174 +#define GCC_UBI1_CORE_CLK 175 +#define GCC_UBI1_MPT_CLK 176 +#define GCC_CMN_12GPLL_AHB_CLK 177 +#define GCC_CMN_12GPLL_SYS_CLK 178 +#define GCC_MDIO_AHB_CLK 179 +#define GCC_UNIPHY0_AHB_CLK 180 +#define GCC_UNIPHY0_SYS_CLK 181 +#define GCC_UNIPHY1_AHB_CLK 182 +#define GCC_UNIPHY1_SYS_CLK 183 +#define GCC_UNIPHY2_AHB_CLK 184 +#define GCC_UNIPHY2_SYS_CLK 185 +#define GCC_NSS_PORT1_RX_CLK 186 +#define GCC_NSS_PORT1_TX_CLK 187 +#define GCC_NSS_PORT2_RX_CLK 188 +#define GCC_NSS_PORT2_TX_CLK 189 +#define GCC_NSS_PORT3_RX_CLK 190 +#define GCC_NSS_PORT3_TX_CLK 191 +#define GCC_NSS_PORT4_RX_CLK 192 +#define GCC_NSS_PORT4_TX_CLK 193 +#define GCC_NSS_PORT5_RX_CLK 194 +#define GCC_NSS_PORT5_TX_CLK 195 +#define GCC_NSS_PORT6_RX_CLK 196 +#define GCC_NSS_PORT6_TX_CLK 197 +#define GCC_PORT1_MAC_CLK 198 +#define GCC_PORT2_MAC_CLK 199 +#define GCC_PORT3_MAC_CLK 200 +#define GCC_PORT4_MAC_CLK 201 +#define GCC_PORT5_MAC_CLK 202 +#define GCC_PORT6_MAC_CLK 203 +#define GCC_UNIPHY0_PORT1_RX_CLK 204 +#define GCC_UNIPHY0_PORT1_TX_CLK 205 +#define GCC_UNIPHY0_PORT2_RX_CLK 206 +#define GCC_UNIPHY0_PORT2_TX_CLK 207 +#define GCC_UNIPHY0_PORT3_RX_CLK 208 +#define GCC_UNIPHY0_PORT3_TX_CLK 209 +#define GCC_UNIPHY0_PORT4_RX_CLK 210 +#define GCC_UNIPHY0_PORT4_TX_CLK 211 +#define GCC_UNIPHY0_PORT5_RX_CLK 212 +#define GCC_UNIPHY0_PORT5_TX_CLK 213 +#define GCC_UNIPHY1_PORT5_RX_CLK 214 +#define GCC_UNIPHY1_PORT5_TX_CLK 215 +#define GCC_UNIPHY2_PORT6_RX_CLK 216 +#define GCC_UNIPHY2_PORT6_TX_CLK 217 +#define GCC_CRYPTO_AHB_CLK 218 +#define GCC_CRYPTO_AXI_CLK 219 +#define GCC_CRYPTO_CLK 220 +#define GCC_GP1_CLK 221 +#define GCC_GP2_CLK 222 +#define GCC_GP3_CLK 223 + +#define GCC_BLSP1_BCR 0 +#define GCC_BLSP1_QUP1_BCR 1 +#define GCC_BLSP1_UART1_BCR 2 +#define GCC_BLSP1_QUP2_BCR 3 +#define GCC_BLSP1_UART2_BCR 4 +#define GCC_BLSP1_QUP3_BCR 5 +#define GCC_BLSP1_UART3_BCR 6 +#define GCC_BLSP1_QUP4_BCR 7 +#define GCC_BLSP1_UART4_BCR 8 +#define GCC_BLSP1_QUP5_BCR 9 +#define GCC_BLSP1_UART5_BCR 10 +#define GCC_BLSP1_QUP6_BCR 11 +#define GCC_BLSP1_UART6_BCR 12 +#define GCC_IMEM_BCR 13 +#define GCC_SMMU_BCR 14 +#define GCC_APSS_TCU_BCR 15 +#define GCC_SMMU_XPU_BCR 16 +#define GCC_PCNOC_TBU_BCR 17 +#define GCC_SMMU_CFG_BCR 18 +#define GCC_PRNG_BCR 19 +#define GCC_BOOT_ROM_BCR 20 +#define GCC_CRYPTO_BCR 21 +#define GCC_WCSS_BCR 22 +#define GCC_WCSS_Q6_BCR 23 +#define GCC_NSS_BCR 24 +#define GCC_SEC_CTRL_BCR 25 +#define GCC_ADSS_BCR 26 +#define GCC_DDRSS_BCR 27 +#define GCC_SYSTEM_NOC_BCR 28 +#define GCC_PCNOC_BCR 29 +#define GCC_TCSR_BCR 30 +#define GCC_QDSS_BCR 31 +#define GCC_DCD_BCR 32 +#define GCC_MSG_RAM_BCR 33 +#define GCC_MPM_BCR 34 +#define GCC_SPMI_BCR 35 +#define GCC_SPDM_BCR 36 +#define GCC_RBCPR_BCR 37 +#define GCC_RBCPR_MX_BCR 38 +#define GCC_TLMM_BCR 39 +#define GCC_RBCPR_WCSS_BCR 40 +#define GCC_USB0_PHY_BCR 41 +#define GCC_USB3PHY_0_PHY_BCR 42 +#define GCC_USB0_BCR 43 +#define GCC_USB1_PHY_BCR 44 +#define GCC_USB3PHY_1_PHY_BCR 45 +#define GCC_USB1_BCR 46 +#define GCC_QUSB2_0_PHY_BCR 47 +#define GCC_QUSB2_1_PHY_BCR 48 +#define GCC_SDCC1_BCR 49 +#define GCC_SDCC2_BCR 50 +#define GCC_SNOC_BUS_TIMEOUT0_BCR 51 +#define GCC_SNOC_BUS_TIMEOUT2_BCR 52 +#define GCC_SNOC_BUS_TIMEOUT3_BCR 53 +#define GCC_PCNOC_BUS_TIMEOUT0_BCR 54 +#define GCC_PCNOC_BUS_TIMEOUT1_BCR 55 +#define GCC_PCNOC_BUS_TIMEOUT2_BCR 56 +#define GCC_PCNOC_BUS_TIMEOUT3_BCR 57 +#define GCC_PCNOC_BUS_TIMEOUT4_BCR 58 +#define GCC_PCNOC_BUS_TIMEOUT5_BCR 59 +#define GCC_PCNOC_BUS_TIMEOUT6_BCR 60 +#define GCC_PCNOC_BUS_TIMEOUT7_BCR 61 +#define GCC_PCNOC_BUS_TIMEOUT8_BCR 62 +#define GCC_PCNOC_BUS_TIMEOUT9_BCR 63 +#define GCC_UNIPHY0_BCR 64 +#define GCC_UNIPHY1_BCR 65 +#define GCC_UNIPHY2_BCR 66 +#define GCC_CMN_12GPLL_BCR 67 +#define GCC_QPIC_BCR 68 +#define GCC_MDIO_BCR 69 +#define GCC_PCIE1_TBU_BCR 70 +#define GCC_WCSS_CORE_TBU_BCR 71 +#define GCC_WCSS_Q6_TBU_BCR 72 +#define GCC_USB0_TBU_BCR 73 +#define GCC_USB1_TBU_BCR 74 +#define GCC_PCIE0_TBU_BCR 75 +#define GCC_NSS_NOC_TBU_BCR 76 +#define GCC_PCIE0_BCR 77 +#define GCC_PCIE0_PHY_BCR 78 +#define GCC_PCIE0PHY_PHY_BCR 79 +#define GCC_PCIE0_LINK_DOWN_BCR 80 +#define GCC_PCIE1_BCR 81 +#define GCC_PCIE1_PHY_BCR 82 +#define GCC_PCIE1PHY_PHY_BCR 83 +#define GCC_PCIE1_LINK_DOWN_BCR 84 +#define GCC_DCC_BCR 85 +#define GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR 86 +#define GCC_APC1_VOLTAGE_DROOP_DETECTOR_BCR 87 +#define GCC_SMMU_CATS_BCR 88 +#define GCC_UBI0_AXI_ARES 89 +#define GCC_UBI0_AHB_ARES 90 +#define GCC_UBI0_NC_AXI_ARES 91 +#define GCC_UBI0_DBG_ARES 92 +#define GCC_UBI0_CORE_CLAMP_ENABLE 93 +#define GCC_UBI0_CLKRST_CLAMP_ENABLE 94 +#define GCC_UBI1_AXI_ARES 95 +#define GCC_UBI1_AHB_ARES 96 +#define GCC_UBI1_NC_AXI_ARES 97 +#define GCC_UBI1_DBG_ARES 98 +#define GCC_UBI1_CORE_CLAMP_ENABLE 99 +#define GCC_UBI1_CLKRST_CLAMP_ENABLE 100 +#define GCC_NSS_CFG_ARES 101 +#define GCC_NSS_IMEM_ARES 102 +#define GCC_NSS_NOC_ARES 103 +#define GCC_NSS_CRYPTO_ARES 104 +#define GCC_NSS_CSR_ARES 105 +#define GCC_NSS_CE_APB_ARES 106 +#define GCC_NSS_CE_AXI_ARES 107 +#define GCC_NSSNOC_CE_APB_ARES 108 +#define GCC_NSSNOC_CE_AXI_ARES 109 +#define GCC_NSSNOC_UBI0_AHB_ARES 110 +#define GCC_NSSNOC_UBI1_AHB_ARES 111 +#define GCC_NSSNOC_SNOC_ARES 112 +#define GCC_NSSNOC_CRYPTO_ARES 113 +#define GCC_NSSNOC_ATB_ARES 114 +#define GCC_NSSNOC_QOSGEN_REF_ARES 115 +#define GCC_NSSNOC_TIMEOUT_REF_ARES 116 +#define GCC_PCIE0_PIPE_ARES 117 +#define GCC_PCIE0_SLEEP_ARES 118 +#define GCC_PCIE0_CORE_STICKY_ARES 119 +#define GCC_PCIE0_AXI_MASTER_ARES 120 +#define GCC_PCIE0_AXI_SLAVE_ARES 121 +#define GCC_PCIE0_AHB_ARES 122 +#define GCC_PCIE0_AXI_MASTER_STICKY_ARES 123 +#define GCC_PCIE1_PIPE_ARES 124 +#define GCC_PCIE1_SLEEP_ARES 125 +#define GCC_PCIE1_CORE_STICKY_ARES 126 +#define GCC_PCIE1_AXI_MASTER_ARES 127 +#define GCC_PCIE1_AXI_SLAVE_ARES 128 +#define GCC_PCIE1_AHB_ARES 129 +#define GCC_PCIE1_AXI_MASTER_STICKY_ARES 130 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-mdm9615.h b/include/dt-bindings/clock/qcom,gcc-mdm9615.h new file mode 100644 index 000000000..787e44895 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-mdm9615.h @@ -0,0 +1,329 @@ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * Copyright (c) BayLibre, SAS. + * Author : Neil Armstrong + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_MDM_GCC_9615_H +#define _DT_BINDINGS_CLK_MDM_GCC_9615_H + +#define AFAB_CLK_SRC 0 +#define AFAB_CORE_CLK 1 +#define SFAB_MSS_Q6_SW_A_CLK 2 +#define SFAB_MSS_Q6_FW_A_CLK 3 +#define QDSS_STM_CLK 4 +#define SCSS_A_CLK 5 +#define SCSS_H_CLK 6 +#define SCSS_XO_SRC_CLK 7 +#define AFAB_EBI1_CH0_A_CLK 8 +#define AFAB_EBI1_CH1_A_CLK 9 +#define AFAB_AXI_S0_FCLK 10 +#define AFAB_AXI_S1_FCLK 11 +#define AFAB_AXI_S2_FCLK 12 +#define AFAB_AXI_S3_FCLK 13 +#define AFAB_AXI_S4_FCLK 14 +#define SFAB_CORE_CLK 15 +#define SFAB_AXI_S0_FCLK 16 +#define SFAB_AXI_S1_FCLK 17 +#define SFAB_AXI_S2_FCLK 18 +#define SFAB_AXI_S3_FCLK 19 +#define SFAB_AXI_S4_FCLK 20 +#define SFAB_AHB_S0_FCLK 21 +#define SFAB_AHB_S1_FCLK 22 +#define SFAB_AHB_S2_FCLK 23 +#define SFAB_AHB_S3_FCLK 24 +#define SFAB_AHB_S4_FCLK 25 +#define SFAB_AHB_S5_FCLK 26 +#define SFAB_AHB_S6_FCLK 27 +#define SFAB_AHB_S7_FCLK 28 +#define QDSS_AT_CLK_SRC 29 +#define QDSS_AT_CLK 30 +#define QDSS_TRACECLKIN_CLK_SRC 31 +#define QDSS_TRACECLKIN_CLK 32 +#define QDSS_TSCTR_CLK_SRC 33 +#define QDSS_TSCTR_CLK 34 +#define SFAB_ADM0_M0_A_CLK 35 +#define SFAB_ADM0_M1_A_CLK 36 +#define SFAB_ADM0_M2_H_CLK 37 +#define ADM0_CLK 38 +#define ADM0_PBUS_CLK 39 +#define MSS_XPU_CLK 40 +#define IMEM0_A_CLK 41 +#define QDSS_H_CLK 42 +#define PCIE_A_CLK 43 +#define PCIE_AUX_CLK 44 +#define PCIE_PHY_REF_CLK 45 +#define PCIE_H_CLK 46 +#define SFAB_CLK_SRC 47 +#define MAHB0_CLK 48 +#define Q6SW_CLK_SRC 49 +#define Q6SW_CLK 50 +#define Q6FW_CLK_SRC 51 +#define Q6FW_CLK 52 +#define SFAB_MSS_M_A_CLK 53 +#define SFAB_USB3_M_A_CLK 54 +#define SFAB_LPASS_Q6_A_CLK 55 +#define SFAB_AFAB_M_A_CLK 56 +#define AFAB_SFAB_M0_A_CLK 57 +#define AFAB_SFAB_M1_A_CLK 58 +#define SFAB_SATA_S_H_CLK 59 +#define DFAB_CLK_SRC 60 +#define DFAB_CLK 61 +#define SFAB_DFAB_M_A_CLK 62 +#define DFAB_SFAB_M_A_CLK 63 +#define DFAB_SWAY0_H_CLK 64 +#define DFAB_SWAY1_H_CLK 65 +#define DFAB_ARB0_H_CLK 66 +#define DFAB_ARB1_H_CLK 67 +#define PPSS_H_CLK 68 +#define PPSS_PROC_CLK 69 +#define PPSS_TIMER0_CLK 70 +#define PPSS_TIMER1_CLK 71 +#define PMEM_A_CLK 72 +#define DMA_BAM_H_CLK 73 +#define SIC_H_CLK 74 +#define SPS_TIC_H_CLK 75 +#define SLIMBUS_H_CLK 76 +#define SLIMBUS_XO_SRC_CLK 77 +#define CFPB_2X_CLK_SRC 78 +#define CFPB_CLK 79 +#define CFPB0_H_CLK 80 +#define CFPB1_H_CLK 81 +#define CFPB2_H_CLK 82 +#define SFAB_CFPB_M_H_CLK 83 +#define CFPB_MASTER_H_CLK 84 +#define SFAB_CFPB_S_H_CLK 85 +#define CFPB_SPLITTER_H_CLK 86 +#define TSIF_H_CLK 87 +#define TSIF_INACTIVITY_TIMERS_CLK 88 +#define TSIF_REF_SRC 89 +#define TSIF_REF_CLK 90 +#define CE1_H_CLK 91 +#define CE1_CORE_CLK 92 +#define CE1_SLEEP_CLK 93 +#define CE2_H_CLK 94 +#define CE2_CORE_CLK 95 +#define SFPB_H_CLK_SRC 97 +#define SFPB_H_CLK 98 +#define SFAB_SFPB_M_H_CLK 99 +#define SFAB_SFPB_S_H_CLK 100 +#define RPM_PROC_CLK 101 +#define RPM_BUS_H_CLK 102 +#define RPM_SLEEP_CLK 103 +#define RPM_TIMER_CLK 104 +#define RPM_MSG_RAM_H_CLK 105 +#define PMIC_ARB0_H_CLK 106 +#define PMIC_ARB1_H_CLK 107 +#define PMIC_SSBI2_SRC 108 +#define PMIC_SSBI2_CLK 109 +#define SDC1_H_CLK 110 +#define SDC2_H_CLK 111 +#define SDC3_H_CLK 112 +#define SDC4_H_CLK 113 +#define SDC5_H_CLK 114 +#define SDC1_SRC 115 +#define SDC2_SRC 116 +#define SDC3_SRC 117 +#define SDC4_SRC 118 +#define SDC5_SRC 119 +#define SDC1_CLK 120 +#define SDC2_CLK 121 +#define SDC3_CLK 122 +#define SDC4_CLK 123 +#define SDC5_CLK 124 +#define DFAB_A2_H_CLK 125 +#define USB_HS1_H_CLK 126 +#define USB_HS1_XCVR_SRC 127 +#define USB_HS1_XCVR_CLK 128 +#define USB_HSIC_H_CLK 129 +#define USB_HSIC_XCVR_FS_SRC 130 +#define USB_HSIC_XCVR_FS_CLK 131 +#define USB_HSIC_SYSTEM_CLK_SRC 132 +#define USB_HSIC_SYSTEM_CLK 133 +#define CFPB0_C0_H_CLK 134 +#define CFPB0_C1_H_CLK 135 +#define CFPB0_D0_H_CLK 136 +#define CFPB0_D1_H_CLK 137 +#define USB_FS1_H_CLK 138 +#define USB_FS1_XCVR_FS_SRC 139 +#define USB_FS1_XCVR_FS_CLK 140 +#define USB_FS1_SYSTEM_CLK 141 +#define USB_FS2_H_CLK 142 +#define USB_FS2_XCVR_FS_SRC 143 +#define USB_FS2_XCVR_FS_CLK 144 +#define USB_FS2_SYSTEM_CLK 145 +#define GSBI_COMMON_SIM_SRC 146 +#define GSBI1_H_CLK 147 +#define GSBI2_H_CLK 148 +#define GSBI3_H_CLK 149 +#define GSBI4_H_CLK 150 +#define GSBI5_H_CLK 151 +#define GSBI6_H_CLK 152 +#define GSBI7_H_CLK 153 +#define GSBI8_H_CLK 154 +#define GSBI9_H_CLK 155 +#define GSBI10_H_CLK 156 +#define GSBI11_H_CLK 157 +#define GSBI12_H_CLK 158 +#define GSBI1_UART_SRC 159 +#define GSBI1_UART_CLK 160 +#define GSBI2_UART_SRC 161 +#define GSBI2_UART_CLK 162 +#define GSBI3_UART_SRC 163 +#define GSBI3_UART_CLK 164 +#define GSBI4_UART_SRC 165 +#define GSBI4_UART_CLK 166 +#define GSBI5_UART_SRC 167 +#define GSBI5_UART_CLK 168 +#define GSBI6_UART_SRC 169 +#define GSBI6_UART_CLK 170 +#define GSBI7_UART_SRC 171 +#define GSBI7_UART_CLK 172 +#define GSBI8_UART_SRC 173 +#define GSBI8_UART_CLK 174 +#define GSBI9_UART_SRC 175 +#define GSBI9_UART_CLK 176 +#define GSBI10_UART_SRC 177 +#define GSBI10_UART_CLK 178 +#define GSBI11_UART_SRC 179 +#define GSBI11_UART_CLK 180 +#define GSBI12_UART_SRC 181 +#define GSBI12_UART_CLK 182 +#define GSBI1_QUP_SRC 183 +#define GSBI1_QUP_CLK 184 +#define GSBI2_QUP_SRC 185 +#define GSBI2_QUP_CLK 186 +#define GSBI3_QUP_SRC 187 +#define GSBI3_QUP_CLK 188 +#define GSBI4_QUP_SRC 189 +#define GSBI4_QUP_CLK 190 +#define GSBI5_QUP_SRC 191 +#define GSBI5_QUP_CLK 192 +#define GSBI6_QUP_SRC 193 +#define GSBI6_QUP_CLK 194 +#define GSBI7_QUP_SRC 195 +#define GSBI7_QUP_CLK 196 +#define GSBI8_QUP_SRC 197 +#define GSBI8_QUP_CLK 198 +#define GSBI9_QUP_SRC 199 +#define GSBI9_QUP_CLK 200 +#define GSBI10_QUP_SRC 201 +#define GSBI10_QUP_CLK 202 +#define GSBI11_QUP_SRC 203 +#define GSBI11_QUP_CLK 204 +#define GSBI12_QUP_SRC 205 +#define GSBI12_QUP_CLK 206 +#define GSBI1_SIM_CLK 207 +#define GSBI2_SIM_CLK 208 +#define GSBI3_SIM_CLK 209 +#define GSBI4_SIM_CLK 210 +#define GSBI5_SIM_CLK 211 +#define GSBI6_SIM_CLK 212 +#define GSBI7_SIM_CLK 213 +#define GSBI8_SIM_CLK 214 +#define GSBI9_SIM_CLK 215 +#define GSBI10_SIM_CLK 216 +#define GSBI11_SIM_CLK 217 +#define GSBI12_SIM_CLK 218 +#define USB_HSIC_HSIC_CLK_SRC 219 +#define USB_HSIC_HSIC_CLK 220 +#define USB_HSIC_HSIO_CAL_CLK 221 +#define SPDM_CFG_H_CLK 222 +#define SPDM_MSTR_H_CLK 223 +#define SPDM_FF_CLK_SRC 224 +#define SPDM_FF_CLK 225 +#define SEC_CTRL_CLK 226 +#define SEC_CTRL_ACC_CLK_SRC 227 +#define SEC_CTRL_ACC_CLK 228 +#define TLMM_H_CLK 229 +#define TLMM_CLK 230 +#define SFAB_MSS_S_H_CLK 231 +#define MSS_SLP_CLK 232 +#define MSS_Q6SW_JTAG_CLK 233 +#define MSS_Q6FW_JTAG_CLK 234 +#define MSS_S_H_CLK 235 +#define MSS_CXO_SRC_CLK 236 +#define SATA_H_CLK 237 +#define SATA_CLK_SRC 238 +#define SATA_RXOOB_CLK 239 +#define SATA_PMALIVE_CLK 240 +#define SATA_PHY_REF_CLK 241 +#define TSSC_CLK_SRC 242 +#define TSSC_CLK 243 +#define PDM_SRC 244 +#define PDM_CLK 245 +#define GP0_SRC 246 +#define GP0_CLK 247 +#define GP1_SRC 248 +#define GP1_CLK 249 +#define GP2_SRC 250 +#define GP2_CLK 251 +#define MPM_CLK 252 +#define EBI1_CLK_SRC 253 +#define EBI1_CH0_CLK 254 +#define EBI1_CH1_CLK 255 +#define EBI1_2X_CLK 256 +#define EBI1_CH0_DQ_CLK 257 +#define EBI1_CH1_DQ_CLK 258 +#define EBI1_CH0_CA_CLK 259 +#define EBI1_CH1_CA_CLK 260 +#define EBI1_XO_CLK 261 +#define SFAB_SMPSS_S_H_CLK 262 +#define PRNG_SRC 263 +#define PRNG_CLK 264 +#define PXO_SRC 265 +#define LPASS_CXO_CLK 266 +#define LPASS_PXO_CLK 267 +#define SPDM_CY_PORT0_CLK 268 +#define SPDM_CY_PORT1_CLK 269 +#define SPDM_CY_PORT2_CLK 270 +#define SPDM_CY_PORT3_CLK 271 +#define SPDM_CY_PORT4_CLK 272 +#define SPDM_CY_PORT5_CLK 273 +#define SPDM_CY_PORT6_CLK 274 +#define SPDM_CY_PORT7_CLK 275 +#define PLL0 276 +#define PLL0_VOTE 277 +#define PLL3 278 +#define PLL3_VOTE 279 +#define PLL4_VOTE 280 +#define PLL5 281 +#define PLL5_VOTE 282 +#define PLL6 283 +#define PLL6_VOTE 284 +#define PLL7_VOTE 285 +#define PLL8 286 +#define PLL8_VOTE 287 +#define PLL9 288 +#define PLL10 289 +#define PLL11 290 +#define PLL12 291 +#define PLL13 292 +#define PLL14 293 +#define PLL14_VOTE 294 +#define USB_HS3_H_CLK 295 +#define USB_HS3_XCVR_SRC 296 +#define USB_HS3_XCVR_CLK 297 +#define USB_HS4_H_CLK 298 +#define USB_HS4_XCVR_SRC 299 +#define USB_HS4_XCVR_CLK 300 +#define SATA_PHY_CFG_CLK 301 +#define SATA_A_CLK 302 +#define CE3_SRC 303 +#define CE3_CORE_CLK 304 +#define CE3_H_CLK 305 +#define USB_HS1_SYSTEM_CLK_SRC 306 +#define USB_HS1_SYSTEM_CLK 307 +#define EBI2_CLK 308 +#define EBI2_AON_CLK 309 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8660.h b/include/dt-bindings/clock/qcom,gcc-msm8660.h new file mode 100644 index 000000000..67665f681 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-msm8660.h @@ -0,0 +1,276 @@ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_MSM_GCC_8660_H +#define _DT_BINDINGS_CLK_MSM_GCC_8660_H + +#define AFAB_CLK_SRC 0 +#define AFAB_CORE_CLK 1 +#define SCSS_A_CLK 2 +#define SCSS_H_CLK 3 +#define SCSS_XO_SRC_CLK 4 +#define AFAB_EBI1_CH0_A_CLK 5 +#define AFAB_EBI1_CH1_A_CLK 6 +#define AFAB_AXI_S0_FCLK 7 +#define AFAB_AXI_S1_FCLK 8 +#define AFAB_AXI_S2_FCLK 9 +#define AFAB_AXI_S3_FCLK 10 +#define AFAB_AXI_S4_FCLK 11 +#define SFAB_CORE_CLK 12 +#define SFAB_AXI_S0_FCLK 13 +#define SFAB_AXI_S1_FCLK 14 +#define SFAB_AXI_S2_FCLK 15 +#define SFAB_AXI_S3_FCLK 16 +#define SFAB_AXI_S4_FCLK 17 +#define SFAB_AHB_S0_FCLK 18 +#define SFAB_AHB_S1_FCLK 19 +#define SFAB_AHB_S2_FCLK 20 +#define SFAB_AHB_S3_FCLK 21 +#define SFAB_AHB_S4_FCLK 22 +#define SFAB_AHB_S5_FCLK 23 +#define SFAB_AHB_S6_FCLK 24 +#define SFAB_ADM0_M0_A_CLK 25 +#define SFAB_ADM0_M1_A_CLK 26 +#define SFAB_ADM0_M2_A_CLK 27 +#define ADM0_CLK 28 +#define ADM0_PBUS_CLK 29 +#define SFAB_ADM1_M0_A_CLK 30 +#define SFAB_ADM1_M1_A_CLK 31 +#define SFAB_ADM1_M2_A_CLK 32 +#define MMFAB_ADM1_M3_A_CLK 33 +#define ADM1_CLK 34 +#define ADM1_PBUS_CLK 35 +#define IMEM0_A_CLK 36 +#define MAHB0_CLK 37 +#define SFAB_LPASS_Q6_A_CLK 38 +#define SFAB_AFAB_M_A_CLK 39 +#define AFAB_SFAB_M0_A_CLK 40 +#define AFAB_SFAB_M1_A_CLK 41 +#define DFAB_CLK_SRC 42 +#define DFAB_CLK 43 +#define DFAB_CORE_CLK 44 +#define SFAB_DFAB_M_A_CLK 45 +#define DFAB_SFAB_M_A_CLK 46 +#define DFAB_SWAY0_H_CLK 47 +#define DFAB_SWAY1_H_CLK 48 +#define DFAB_ARB0_H_CLK 49 +#define DFAB_ARB1_H_CLK 50 +#define PPSS_H_CLK 51 +#define PPSS_PROC_CLK 52 +#define PPSS_TIMER0_CLK 53 +#define PPSS_TIMER1_CLK 54 +#define PMEM_A_CLK 55 +#define DMA_BAM_H_CLK 56 +#define SIC_H_CLK 57 +#define SPS_TIC_H_CLK 58 +#define SLIMBUS_H_CLK 59 +#define SLIMBUS_XO_SRC_CLK 60 +#define CFPB_2X_CLK_SRC 61 +#define CFPB_CLK 62 +#define CFPB0_H_CLK 63 +#define CFPB1_H_CLK 64 +#define CFPB2_H_CLK 65 +#define EBI2_2X_CLK 66 +#define EBI2_CLK 67 +#define SFAB_CFPB_M_H_CLK 68 +#define CFPB_MASTER_H_CLK 69 +#define SFAB_CFPB_S_HCLK 70 +#define CFPB_SPLITTER_H_CLK 71 +#define TSIF_H_CLK 72 +#define TSIF_INACTIVITY_TIMERS_CLK 73 +#define TSIF_REF_SRC 74 +#define TSIF_REF_CLK 75 +#define CE1_H_CLK 76 +#define CE2_H_CLK 77 +#define SFPB_H_CLK_SRC 78 +#define SFPB_H_CLK 79 +#define SFAB_SFPB_M_H_CLK 80 +#define SFAB_SFPB_S_H_CLK 81 +#define RPM_PROC_CLK 82 +#define RPM_BUS_H_CLK 83 +#define RPM_SLEEP_CLK 84 +#define RPM_TIMER_CLK 85 +#define MODEM_AHB1_H_CLK 86 +#define MODEM_AHB2_H_CLK 87 +#define RPM_MSG_RAM_H_CLK 88 +#define SC_H_CLK 89 +#define SC_A_CLK 90 +#define PMIC_ARB0_H_CLK 91 +#define PMIC_ARB1_H_CLK 92 +#define PMIC_SSBI2_SRC 93 +#define PMIC_SSBI2_CLK 94 +#define SDC1_H_CLK 95 +#define SDC2_H_CLK 96 +#define SDC3_H_CLK 97 +#define SDC4_H_CLK 98 +#define SDC5_H_CLK 99 +#define SDC1_SRC 100 +#define SDC2_SRC 101 +#define SDC3_SRC 102 +#define SDC4_SRC 103 +#define SDC5_SRC 104 +#define SDC1_CLK 105 +#define SDC2_CLK 106 +#define SDC3_CLK 107 +#define SDC4_CLK 108 +#define SDC5_CLK 109 +#define USB_HS1_H_CLK 110 +#define USB_HS1_XCVR_SRC 111 +#define USB_HS1_XCVR_CLK 112 +#define USB_HS2_H_CLK 113 +#define USB_HS2_XCVR_SRC 114 +#define USB_HS2_XCVR_CLK 115 +#define USB_FS1_H_CLK 116 +#define USB_FS1_XCVR_FS_SRC 117 +#define USB_FS1_XCVR_FS_CLK 118 +#define USB_FS1_SYSTEM_CLK 119 +#define USB_FS2_H_CLK 120 +#define USB_FS2_XCVR_FS_SRC 121 +#define USB_FS2_XCVR_FS_CLK 122 +#define USB_FS2_SYSTEM_CLK 123 +#define GSBI_COMMON_SIM_SRC 124 +#define GSBI1_H_CLK 125 +#define GSBI2_H_CLK 126 +#define GSBI3_H_CLK 127 +#define GSBI4_H_CLK 128 +#define GSBI5_H_CLK 129 +#define GSBI6_H_CLK 130 +#define GSBI7_H_CLK 131 +#define GSBI8_H_CLK 132 +#define GSBI9_H_CLK 133 +#define GSBI10_H_CLK 134 +#define GSBI11_H_CLK 135 +#define GSBI12_H_CLK 136 +#define GSBI1_UART_SRC 137 +#define GSBI1_UART_CLK 138 +#define GSBI2_UART_SRC 139 +#define GSBI2_UART_CLK 140 +#define GSBI3_UART_SRC 141 +#define GSBI3_UART_CLK 142 +#define GSBI4_UART_SRC 143 +#define GSBI4_UART_CLK 144 +#define GSBI5_UART_SRC 145 +#define GSBI5_UART_CLK 146 +#define GSBI6_UART_SRC 147 +#define GSBI6_UART_CLK 148 +#define GSBI7_UART_SRC 149 +#define GSBI7_UART_CLK 150 +#define GSBI8_UART_SRC 151 +#define GSBI8_UART_CLK 152 +#define GSBI9_UART_SRC 153 +#define GSBI9_UART_CLK 154 +#define GSBI10_UART_SRC 155 +#define GSBI10_UART_CLK 156 +#define GSBI11_UART_SRC 157 +#define GSBI11_UART_CLK 158 +#define GSBI12_UART_SRC 159 +#define GSBI12_UART_CLK 160 +#define GSBI1_QUP_SRC 161 +#define GSBI1_QUP_CLK 162 +#define GSBI2_QUP_SRC 163 +#define GSBI2_QUP_CLK 164 +#define GSBI3_QUP_SRC 165 +#define GSBI3_QUP_CLK 166 +#define GSBI4_QUP_SRC 167 +#define GSBI4_QUP_CLK 168 +#define GSBI5_QUP_SRC 169 +#define GSBI5_QUP_CLK 170 +#define GSBI6_QUP_SRC 171 +#define GSBI6_QUP_CLK 172 +#define GSBI7_QUP_SRC 173 +#define GSBI7_QUP_CLK 174 +#define GSBI8_QUP_SRC 175 +#define GSBI8_QUP_CLK 176 +#define GSBI9_QUP_SRC 177 +#define GSBI9_QUP_CLK 178 +#define GSBI10_QUP_SRC 179 +#define GSBI10_QUP_CLK 180 +#define GSBI11_QUP_SRC 181 +#define GSBI11_QUP_CLK 182 +#define GSBI12_QUP_SRC 183 +#define GSBI12_QUP_CLK 184 +#define GSBI1_SIM_CLK 185 +#define GSBI2_SIM_CLK 186 +#define GSBI3_SIM_CLK 187 +#define GSBI4_SIM_CLK 188 +#define GSBI5_SIM_CLK 189 +#define GSBI6_SIM_CLK 190 +#define GSBI7_SIM_CLK 191 +#define GSBI8_SIM_CLK 192 +#define GSBI9_SIM_CLK 193 +#define GSBI10_SIM_CLK 194 +#define GSBI11_SIM_CLK 195 +#define GSBI12_SIM_CLK 196 +#define SPDM_CFG_H_CLK 197 +#define SPDM_MSTR_H_CLK 198 +#define SPDM_FF_CLK_SRC 199 +#define SPDM_FF_CLK 200 +#define SEC_CTRL_CLK 201 +#define SEC_CTRL_ACC_CLK_SRC 202 +#define SEC_CTRL_ACC_CLK 203 +#define TLMM_H_CLK 204 +#define TLMM_CLK 205 +#define MARM_CLK_SRC 206 +#define MARM_CLK 207 +#define MAHB1_SRC 208 +#define MAHB1_CLK 209 +#define SFAB_MSS_S_H_CLK 210 +#define MAHB2_SRC 211 +#define MAHB2_CLK 212 +#define MSS_MODEM_CLK_SRC 213 +#define MSS_MODEM_CXO_CLK 214 +#define MSS_SLP_CLK 215 +#define MSS_SYS_REF_CLK 216 +#define TSSC_CLK_SRC 217 +#define TSSC_CLK 218 +#define PDM_SRC 219 +#define PDM_CLK 220 +#define GP0_SRC 221 +#define GP0_CLK 222 +#define GP1_SRC 223 +#define GP1_CLK 224 +#define GP2_SRC 225 +#define GP2_CLK 226 +#define PMEM_CLK 227 +#define MPM_CLK 228 +#define EBI1_ASFAB_SRC 229 +#define EBI1_CLK_SRC 230 +#define EBI1_CH0_CLK 231 +#define EBI1_CH1_CLK 232 +#define SFAB_SMPSS_S_H_CLK 233 +#define PRNG_SRC 234 +#define PRNG_CLK 235 +#define PXO_SRC 236 +#define LPASS_CXO_CLK 237 +#define LPASS_PXO_CLK 238 +#define SPDM_CY_PORT0_CLK 239 +#define SPDM_CY_PORT1_CLK 240 +#define SPDM_CY_PORT2_CLK 241 +#define SPDM_CY_PORT3_CLK 242 +#define SPDM_CY_PORT4_CLK 243 +#define SPDM_CY_PORT5_CLK 244 +#define SPDM_CY_PORT6_CLK 245 +#define SPDM_CY_PORT7_CLK 246 +#define PLL0 247 +#define PLL0_VOTE 248 +#define PLL5 249 +#define PLL6 250 +#define PLL6_VOTE 251 +#define PLL8 252 +#define PLL8_VOTE 253 +#define PLL9 254 +#define PLL10 255 +#define PLL11 256 +#define PLL12 257 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8916.h b/include/dt-bindings/clock/qcom,gcc-msm8916.h new file mode 100644 index 000000000..28a27a4ed --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-msm8916.h @@ -0,0 +1,187 @@ +/* + * Copyright 2015 Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_MSM_GCC_8916_H +#define _DT_BINDINGS_CLK_MSM_GCC_8916_H + +#define GPLL0 0 +#define GPLL0_VOTE 1 +#define BIMC_PLL 2 +#define BIMC_PLL_VOTE 3 +#define GPLL1 4 +#define GPLL1_VOTE 5 +#define GPLL2 6 +#define GPLL2_VOTE 7 +#define PCNOC_BFDCD_CLK_SRC 8 +#define SYSTEM_NOC_BFDCD_CLK_SRC 9 +#define CAMSS_AHB_CLK_SRC 10 +#define APSS_AHB_CLK_SRC 11 +#define CSI0_CLK_SRC 12 +#define CSI1_CLK_SRC 13 +#define GFX3D_CLK_SRC 14 +#define VFE0_CLK_SRC 15 +#define BLSP1_QUP1_I2C_APPS_CLK_SRC 16 +#define BLSP1_QUP1_SPI_APPS_CLK_SRC 17 +#define BLSP1_QUP2_I2C_APPS_CLK_SRC 18 +#define BLSP1_QUP2_SPI_APPS_CLK_SRC 19 +#define BLSP1_QUP3_I2C_APPS_CLK_SRC 20 +#define BLSP1_QUP3_SPI_APPS_CLK_SRC 21 +#define BLSP1_QUP4_I2C_APPS_CLK_SRC 22 +#define BLSP1_QUP4_SPI_APPS_CLK_SRC 23 +#define BLSP1_QUP5_I2C_APPS_CLK_SRC 24 +#define BLSP1_QUP5_SPI_APPS_CLK_SRC 25 +#define BLSP1_QUP6_I2C_APPS_CLK_SRC 26 +#define BLSP1_QUP6_SPI_APPS_CLK_SRC 27 +#define BLSP1_UART1_APPS_CLK_SRC 28 +#define BLSP1_UART2_APPS_CLK_SRC 29 +#define CCI_CLK_SRC 30 +#define CAMSS_GP0_CLK_SRC 31 +#define CAMSS_GP1_CLK_SRC 32 +#define JPEG0_CLK_SRC 33 +#define MCLK0_CLK_SRC 34 +#define MCLK1_CLK_SRC 35 +#define CSI0PHYTIMER_CLK_SRC 36 +#define CSI1PHYTIMER_CLK_SRC 37 +#define CPP_CLK_SRC 38 +#define CRYPTO_CLK_SRC 39 +#define GP1_CLK_SRC 40 +#define GP2_CLK_SRC 41 +#define GP3_CLK_SRC 42 +#define BYTE0_CLK_SRC 43 +#define ESC0_CLK_SRC 44 +#define MDP_CLK_SRC 45 +#define PCLK0_CLK_SRC 46 +#define VSYNC_CLK_SRC 47 +#define PDM2_CLK_SRC 48 +#define SDCC1_APPS_CLK_SRC 49 +#define SDCC2_APPS_CLK_SRC 50 +#define APSS_TCU_CLK_SRC 51 +#define USB_HS_SYSTEM_CLK_SRC 52 +#define VCODEC0_CLK_SRC 53 +#define GCC_BLSP1_AHB_CLK 54 +#define GCC_BLSP1_SLEEP_CLK 55 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK 56 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK 57 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK 58 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK 59 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK 60 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK 61 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK 62 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK 63 +#define GCC_BLSP1_QUP5_I2C_APPS_CLK 64 +#define GCC_BLSP1_QUP5_SPI_APPS_CLK 65 +#define GCC_BLSP1_QUP6_I2C_APPS_CLK 66 +#define GCC_BLSP1_QUP6_SPI_APPS_CLK 67 +#define GCC_BLSP1_UART1_APPS_CLK 68 +#define GCC_BLSP1_UART2_APPS_CLK 69 +#define GCC_BOOT_ROM_AHB_CLK 70 +#define GCC_CAMSS_CCI_AHB_CLK 71 +#define GCC_CAMSS_CCI_CLK 72 +#define GCC_CAMSS_CSI0_AHB_CLK 73 +#define GCC_CAMSS_CSI0_CLK 74 +#define GCC_CAMSS_CSI0PHY_CLK 75 +#define GCC_CAMSS_CSI0PIX_CLK 76 +#define GCC_CAMSS_CSI0RDI_CLK 77 +#define GCC_CAMSS_CSI1_AHB_CLK 78 +#define GCC_CAMSS_CSI1_CLK 79 +#define GCC_CAMSS_CSI1PHY_CLK 80 +#define GCC_CAMSS_CSI1PIX_CLK 81 +#define GCC_CAMSS_CSI1RDI_CLK 82 +#define GCC_CAMSS_CSI_VFE0_CLK 83 +#define GCC_CAMSS_GP0_CLK 84 +#define GCC_CAMSS_GP1_CLK 85 +#define GCC_CAMSS_ISPIF_AHB_CLK 86 +#define GCC_CAMSS_JPEG0_CLK 87 +#define GCC_CAMSS_JPEG_AHB_CLK 88 +#define GCC_CAMSS_JPEG_AXI_CLK 89 +#define GCC_CAMSS_MCLK0_CLK 90 +#define GCC_CAMSS_MCLK1_CLK 91 +#define GCC_CAMSS_MICRO_AHB_CLK 92 +#define GCC_CAMSS_CSI0PHYTIMER_CLK 93 +#define GCC_CAMSS_CSI1PHYTIMER_CLK 94 +#define GCC_CAMSS_AHB_CLK 95 +#define GCC_CAMSS_TOP_AHB_CLK 96 +#define GCC_CAMSS_CPP_AHB_CLK 97 +#define GCC_CAMSS_CPP_CLK 98 +#define GCC_CAMSS_VFE0_CLK 99 +#define GCC_CAMSS_VFE_AHB_CLK 100 +#define GCC_CAMSS_VFE_AXI_CLK 101 +#define GCC_CRYPTO_AHB_CLK 102 +#define GCC_CRYPTO_AXI_CLK 103 +#define GCC_CRYPTO_CLK 104 +#define GCC_OXILI_GMEM_CLK 105 +#define GCC_GP1_CLK 106 +#define GCC_GP2_CLK 107 +#define GCC_GP3_CLK 108 +#define GCC_MDSS_AHB_CLK 109 +#define GCC_MDSS_AXI_CLK 110 +#define GCC_MDSS_BYTE0_CLK 111 +#define GCC_MDSS_ESC0_CLK 112 +#define GCC_MDSS_MDP_CLK 113 +#define GCC_MDSS_PCLK0_CLK 114 +#define GCC_MDSS_VSYNC_CLK 115 +#define GCC_MSS_CFG_AHB_CLK 116 +#define GCC_OXILI_AHB_CLK 117 +#define GCC_OXILI_GFX3D_CLK 118 +#define GCC_PDM2_CLK 119 +#define GCC_PDM_AHB_CLK 120 +#define GCC_PRNG_AHB_CLK 121 +#define GCC_SDCC1_AHB_CLK 122 +#define GCC_SDCC1_APPS_CLK 123 +#define GCC_SDCC2_AHB_CLK 124 +#define GCC_SDCC2_APPS_CLK 125 +#define GCC_GTCU_AHB_CLK 126 +#define GCC_JPEG_TBU_CLK 127 +#define GCC_MDP_TBU_CLK 128 +#define GCC_SMMU_CFG_CLK 129 +#define GCC_VENUS_TBU_CLK 130 +#define GCC_VFE_TBU_CLK 131 +#define GCC_USB2A_PHY_SLEEP_CLK 132 +#define GCC_USB_HS_AHB_CLK 133 +#define GCC_USB_HS_SYSTEM_CLK 134 +#define GCC_VENUS0_AHB_CLK 135 +#define GCC_VENUS0_AXI_CLK 136 +#define GCC_VENUS0_VCODEC0_CLK 137 +#define BIMC_DDR_CLK_SRC 138 +#define GCC_APSS_TCU_CLK 139 +#define GCC_GFX_TCU_CLK 140 +#define BIMC_GPU_CLK_SRC 141 +#define GCC_BIMC_GFX_CLK 142 +#define GCC_BIMC_GPU_CLK 143 +#define ULTAUDIO_LPAIF_PRI_I2S_CLK_SRC 144 +#define ULTAUDIO_LPAIF_SEC_I2S_CLK_SRC 145 +#define ULTAUDIO_LPAIF_AUX_I2S_CLK_SRC 146 +#define ULTAUDIO_XO_CLK_SRC 147 +#define ULTAUDIO_AHBFABRIC_CLK_SRC 148 +#define CODEC_DIGCODEC_CLK_SRC 149 +#define GCC_ULTAUDIO_PCNOC_MPORT_CLK 150 +#define GCC_ULTAUDIO_PCNOC_SWAY_CLK 151 +#define GCC_ULTAUDIO_AVSYNC_XO_CLK 152 +#define GCC_ULTAUDIO_STC_XO_CLK 153 +#define GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_CLK 154 +#define GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_LPM_CLK 155 +#define GCC_ULTAUDIO_LPAIF_PRI_I2S_CLK 156 +#define GCC_ULTAUDIO_LPAIF_SEC_I2S_CLK 157 +#define GCC_ULTAUDIO_LPAIF_AUX_I2S_CLK 158 +#define GCC_CODEC_DIGCODEC_CLK 159 +#define GCC_MSS_Q6_BIMC_AXI_CLK 160 + +/* Indexes for GDSCs */ +#define BIMC_GDSC 0 +#define VENUS_GDSC 1 +#define MDSS_GDSC 2 +#define JPEG_GDSC 3 +#define VFE_GDSC 4 +#define OXILI_GDSC 5 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8960.h b/include/dt-bindings/clock/qcom,gcc-msm8960.h new file mode 100644 index 000000000..7d20eedfe --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-msm8960.h @@ -0,0 +1,323 @@ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_MSM_GCC_8960_H +#define _DT_BINDINGS_CLK_MSM_GCC_8960_H + +#define AFAB_CLK_SRC 0 +#define AFAB_CORE_CLK 1 +#define SFAB_MSS_Q6_SW_A_CLK 2 +#define SFAB_MSS_Q6_FW_A_CLK 3 +#define QDSS_STM_CLK 4 +#define SCSS_A_CLK 5 +#define SCSS_H_CLK 6 +#define SCSS_XO_SRC_CLK 7 +#define AFAB_EBI1_CH0_A_CLK 8 +#define AFAB_EBI1_CH1_A_CLK 9 +#define AFAB_AXI_S0_FCLK 10 +#define AFAB_AXI_S1_FCLK 11 +#define AFAB_AXI_S2_FCLK 12 +#define AFAB_AXI_S3_FCLK 13 +#define AFAB_AXI_S4_FCLK 14 +#define SFAB_CORE_CLK 15 +#define SFAB_AXI_S0_FCLK 16 +#define SFAB_AXI_S1_FCLK 17 +#define SFAB_AXI_S2_FCLK 18 +#define SFAB_AXI_S3_FCLK 19 +#define SFAB_AXI_S4_FCLK 20 +#define SFAB_AHB_S0_FCLK 21 +#define SFAB_AHB_S1_FCLK 22 +#define SFAB_AHB_S2_FCLK 23 +#define SFAB_AHB_S3_FCLK 24 +#define SFAB_AHB_S4_FCLK 25 +#define SFAB_AHB_S5_FCLK 26 +#define SFAB_AHB_S6_FCLK 27 +#define SFAB_AHB_S7_FCLK 28 +#define QDSS_AT_CLK_SRC 29 +#define QDSS_AT_CLK 30 +#define QDSS_TRACECLKIN_CLK_SRC 31 +#define QDSS_TRACECLKIN_CLK 32 +#define QDSS_TSCTR_CLK_SRC 33 +#define QDSS_TSCTR_CLK 34 +#define SFAB_ADM0_M0_A_CLK 35 +#define SFAB_ADM0_M1_A_CLK 36 +#define SFAB_ADM0_M2_H_CLK 37 +#define ADM0_CLK 38 +#define ADM0_PBUS_CLK 39 +#define MSS_XPU_CLK 40 +#define IMEM0_A_CLK 41 +#define QDSS_H_CLK 42 +#define PCIE_A_CLK 43 +#define PCIE_AUX_CLK 44 +#define PCIE_PHY_REF_CLK 45 +#define PCIE_H_CLK 46 +#define SFAB_CLK_SRC 47 +#define MAHB0_CLK 48 +#define Q6SW_CLK_SRC 49 +#define Q6SW_CLK 50 +#define Q6FW_CLK_SRC 51 +#define Q6FW_CLK 52 +#define SFAB_MSS_M_A_CLK 53 +#define SFAB_USB3_M_A_CLK 54 +#define SFAB_LPASS_Q6_A_CLK 55 +#define SFAB_AFAB_M_A_CLK 56 +#define AFAB_SFAB_M0_A_CLK 57 +#define AFAB_SFAB_M1_A_CLK 58 +#define SFAB_SATA_S_H_CLK 59 +#define DFAB_CLK_SRC 60 +#define DFAB_CLK 61 +#define SFAB_DFAB_M_A_CLK 62 +#define DFAB_SFAB_M_A_CLK 63 +#define DFAB_SWAY0_H_CLK 64 +#define DFAB_SWAY1_H_CLK 65 +#define DFAB_ARB0_H_CLK 66 +#define DFAB_ARB1_H_CLK 67 +#define PPSS_H_CLK 68 +#define PPSS_PROC_CLK 69 +#define PPSS_TIMER0_CLK 70 +#define PPSS_TIMER1_CLK 71 +#define PMEM_A_CLK 72 +#define DMA_BAM_H_CLK 73 +#define SIC_H_CLK 74 +#define SPS_TIC_H_CLK 75 +#define SLIMBUS_H_CLK 76 +#define SLIMBUS_XO_SRC_CLK 77 +#define CFPB_2X_CLK_SRC 78 +#define CFPB_CLK 79 +#define CFPB0_H_CLK 80 +#define CFPB1_H_CLK 81 +#define CFPB2_H_CLK 82 +#define SFAB_CFPB_M_H_CLK 83 +#define CFPB_MASTER_H_CLK 84 +#define SFAB_CFPB_S_H_CLK 85 +#define CFPB_SPLITTER_H_CLK 86 +#define TSIF_H_CLK 87 +#define TSIF_INACTIVITY_TIMERS_CLK 88 +#define TSIF_REF_SRC 89 +#define TSIF_REF_CLK 90 +#define CE1_H_CLK 91 +#define CE1_CORE_CLK 92 +#define CE1_SLEEP_CLK 93 +#define CE2_H_CLK 94 +#define CE2_CORE_CLK 95 +#define SFPB_H_CLK_SRC 97 +#define SFPB_H_CLK 98 +#define SFAB_SFPB_M_H_CLK 99 +#define SFAB_SFPB_S_H_CLK 100 +#define RPM_PROC_CLK 101 +#define RPM_BUS_H_CLK 102 +#define RPM_SLEEP_CLK 103 +#define RPM_TIMER_CLK 104 +#define RPM_MSG_RAM_H_CLK 105 +#define PMIC_ARB0_H_CLK 106 +#define PMIC_ARB1_H_CLK 107 +#define PMIC_SSBI2_SRC 108 +#define PMIC_SSBI2_CLK 109 +#define SDC1_H_CLK 110 +#define SDC2_H_CLK 111 +#define SDC3_H_CLK 112 +#define SDC4_H_CLK 113 +#define SDC5_H_CLK 114 +#define SDC1_SRC 115 +#define SDC2_SRC 116 +#define SDC3_SRC 117 +#define SDC4_SRC 118 +#define SDC5_SRC 119 +#define SDC1_CLK 120 +#define SDC2_CLK 121 +#define SDC3_CLK 122 +#define SDC4_CLK 123 +#define SDC5_CLK 124 +#define DFAB_A2_H_CLK 125 +#define USB_HS1_H_CLK 126 +#define USB_HS1_XCVR_SRC 127 +#define USB_HS1_XCVR_CLK 128 +#define USB_HSIC_H_CLK 129 +#define USB_HSIC_XCVR_FS_SRC 130 +#define USB_HSIC_XCVR_FS_CLK 131 +#define USB_HSIC_SYSTEM_CLK_SRC 132 +#define USB_HSIC_SYSTEM_CLK 133 +#define CFPB0_C0_H_CLK 134 +#define CFPB0_C1_H_CLK 135 +#define CFPB0_D0_H_CLK 136 +#define CFPB0_D1_H_CLK 137 +#define USB_FS1_H_CLK 138 +#define USB_FS1_XCVR_FS_SRC 139 +#define USB_FS1_XCVR_FS_CLK 140 +#define USB_FS1_SYSTEM_CLK 141 +#define USB_FS2_H_CLK 142 +#define USB_FS2_XCVR_FS_SRC 143 +#define USB_FS2_XCVR_FS_CLK 144 +#define USB_FS2_SYSTEM_CLK 145 +#define GSBI_COMMON_SIM_SRC 146 +#define GSBI1_H_CLK 147 +#define GSBI2_H_CLK 148 +#define GSBI3_H_CLK 149 +#define GSBI4_H_CLK 150 +#define GSBI5_H_CLK 151 +#define GSBI6_H_CLK 152 +#define GSBI7_H_CLK 153 +#define GSBI8_H_CLK 154 +#define GSBI9_H_CLK 155 +#define GSBI10_H_CLK 156 +#define GSBI11_H_CLK 157 +#define GSBI12_H_CLK 158 +#define GSBI1_UART_SRC 159 +#define GSBI1_UART_CLK 160 +#define GSBI2_UART_SRC 161 +#define GSBI2_UART_CLK 162 +#define GSBI3_UART_SRC 163 +#define GSBI3_UART_CLK 164 +#define GSBI4_UART_SRC 165 +#define GSBI4_UART_CLK 166 +#define GSBI5_UART_SRC 167 +#define GSBI5_UART_CLK 168 +#define GSBI6_UART_SRC 169 +#define GSBI6_UART_CLK 170 +#define GSBI7_UART_SRC 171 +#define GSBI7_UART_CLK 172 +#define GSBI8_UART_SRC 173 +#define GSBI8_UART_CLK 174 +#define GSBI9_UART_SRC 175 +#define GSBI9_UART_CLK 176 +#define GSBI10_UART_SRC 177 +#define GSBI10_UART_CLK 178 +#define GSBI11_UART_SRC 179 +#define GSBI11_UART_CLK 180 +#define GSBI12_UART_SRC 181 +#define GSBI12_UART_CLK 182 +#define GSBI1_QUP_SRC 183 +#define GSBI1_QUP_CLK 184 +#define GSBI2_QUP_SRC 185 +#define GSBI2_QUP_CLK 186 +#define GSBI3_QUP_SRC 187 +#define GSBI3_QUP_CLK 188 +#define GSBI4_QUP_SRC 189 +#define GSBI4_QUP_CLK 190 +#define GSBI5_QUP_SRC 191 +#define GSBI5_QUP_CLK 192 +#define GSBI6_QUP_SRC 193 +#define GSBI6_QUP_CLK 194 +#define GSBI7_QUP_SRC 195 +#define GSBI7_QUP_CLK 196 +#define GSBI8_QUP_SRC 197 +#define GSBI8_QUP_CLK 198 +#define GSBI9_QUP_SRC 199 +#define GSBI9_QUP_CLK 200 +#define GSBI10_QUP_SRC 201 +#define GSBI10_QUP_CLK 202 +#define GSBI11_QUP_SRC 203 +#define GSBI11_QUP_CLK 204 +#define GSBI12_QUP_SRC 205 +#define GSBI12_QUP_CLK 206 +#define GSBI1_SIM_CLK 207 +#define GSBI2_SIM_CLK 208 +#define GSBI3_SIM_CLK 209 +#define GSBI4_SIM_CLK 210 +#define GSBI5_SIM_CLK 211 +#define GSBI6_SIM_CLK 212 +#define GSBI7_SIM_CLK 213 +#define GSBI8_SIM_CLK 214 +#define GSBI9_SIM_CLK 215 +#define GSBI10_SIM_CLK 216 +#define GSBI11_SIM_CLK 217 +#define GSBI12_SIM_CLK 218 +#define USB_HSIC_HSIC_CLK_SRC 219 +#define USB_HSIC_HSIC_CLK 220 +#define USB_HSIC_HSIO_CAL_CLK 221 +#define SPDM_CFG_H_CLK 222 +#define SPDM_MSTR_H_CLK 223 +#define SPDM_FF_CLK_SRC 224 +#define SPDM_FF_CLK 225 +#define SEC_CTRL_CLK 226 +#define SEC_CTRL_ACC_CLK_SRC 227 +#define SEC_CTRL_ACC_CLK 228 +#define TLMM_H_CLK 229 +#define TLMM_CLK 230 +#define SFAB_MSS_S_H_CLK 231 +#define MSS_SLP_CLK 232 +#define MSS_Q6SW_JTAG_CLK 233 +#define MSS_Q6FW_JTAG_CLK 234 +#define MSS_S_H_CLK 235 +#define MSS_CXO_SRC_CLK 236 +#define SATA_H_CLK 237 +#define SATA_CLK_SRC 238 +#define SATA_RXOOB_CLK 239 +#define SATA_PMALIVE_CLK 240 +#define SATA_PHY_REF_CLK 241 +#define TSSC_CLK_SRC 242 +#define TSSC_CLK 243 +#define PDM_SRC 244 +#define PDM_CLK 245 +#define GP0_SRC 246 +#define GP0_CLK 247 +#define GP1_SRC 248 +#define GP1_CLK 249 +#define GP2_SRC 250 +#define GP2_CLK 251 +#define MPM_CLK 252 +#define EBI1_CLK_SRC 253 +#define EBI1_CH0_CLK 254 +#define EBI1_CH1_CLK 255 +#define EBI1_2X_CLK 256 +#define EBI1_CH0_DQ_CLK 257 +#define EBI1_CH1_DQ_CLK 258 +#define EBI1_CH0_CA_CLK 259 +#define EBI1_CH1_CA_CLK 260 +#define EBI1_XO_CLK 261 +#define SFAB_SMPSS_S_H_CLK 262 +#define PRNG_SRC 263 +#define PRNG_CLK 264 +#define PXO_SRC 265 +#define LPASS_CXO_CLK 266 +#define LPASS_PXO_CLK 267 +#define SPDM_CY_PORT0_CLK 268 +#define SPDM_CY_PORT1_CLK 269 +#define SPDM_CY_PORT2_CLK 270 +#define SPDM_CY_PORT3_CLK 271 +#define SPDM_CY_PORT4_CLK 272 +#define SPDM_CY_PORT5_CLK 273 +#define SPDM_CY_PORT6_CLK 274 +#define SPDM_CY_PORT7_CLK 275 +#define PLL0 276 +#define PLL0_VOTE 277 +#define PLL3 278 +#define PLL3_VOTE 279 +#define PLL4_VOTE 280 +#define PLL5 281 +#define PLL5_VOTE 282 +#define PLL6 283 +#define PLL6_VOTE 284 +#define PLL7_VOTE 285 +#define PLL8 286 +#define PLL8_VOTE 287 +#define PLL9 288 +#define PLL10 289 +#define PLL11 290 +#define PLL12 291 +#define PLL13 292 +#define PLL14 293 +#define PLL14_VOTE 294 +#define USB_HS3_H_CLK 295 +#define USB_HS3_XCVR_SRC 296 +#define USB_HS3_XCVR_CLK 297 +#define USB_HS4_H_CLK 298 +#define USB_HS4_XCVR_SRC 299 +#define USB_HS4_XCVR_CLK 300 +#define SATA_PHY_CFG_CLK 301 +#define SATA_A_CLK 302 +#define CE3_SRC 303 +#define CE3_CORE_CLK 304 +#define CE3_H_CLK 305 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8974.h b/include/dt-bindings/clock/qcom,gcc-msm8974.h new file mode 100644 index 000000000..81d32f639 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-msm8974.h @@ -0,0 +1,327 @@ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_MSM_GCC_8974_H +#define _DT_BINDINGS_CLK_MSM_GCC_8974_H + +#define GPLL0 0 +#define GPLL0_VOTE 1 +#define CONFIG_NOC_CLK_SRC 2 +#define GPLL2 3 +#define GPLL2_VOTE 4 +#define GPLL3 5 +#define GPLL3_VOTE 6 +#define PERIPH_NOC_CLK_SRC 7 +#define BLSP_UART_SIM_CLK_SRC 8 +#define QDSS_TSCTR_CLK_SRC 9 +#define BIMC_DDR_CLK_SRC 10 +#define SYSTEM_NOC_CLK_SRC 11 +#define GPLL1 12 +#define GPLL1_VOTE 13 +#define RPM_CLK_SRC 14 +#define GCC_BIMC_CLK 15 +#define BIMC_DDR_CPLL0_ROOT_CLK_SRC 16 +#define KPSS_AHB_CLK_SRC 17 +#define QDSS_AT_CLK_SRC 18 +#define USB30_MASTER_CLK_SRC 19 +#define BIMC_DDR_CPLL1_ROOT_CLK_SRC 20 +#define QDSS_STM_CLK_SRC 21 +#define ACC_CLK_SRC 22 +#define SEC_CTRL_CLK_SRC 23 +#define BLSP1_QUP1_I2C_APPS_CLK_SRC 24 +#define BLSP1_QUP1_SPI_APPS_CLK_SRC 25 +#define BLSP1_QUP2_I2C_APPS_CLK_SRC 26 +#define BLSP1_QUP2_SPI_APPS_CLK_SRC 27 +#define BLSP1_QUP3_I2C_APPS_CLK_SRC 28 +#define BLSP1_QUP3_SPI_APPS_CLK_SRC 29 +#define BLSP1_QUP4_I2C_APPS_CLK_SRC 30 +#define BLSP1_QUP4_SPI_APPS_CLK_SRC 31 +#define BLSP1_QUP5_I2C_APPS_CLK_SRC 32 +#define BLSP1_QUP5_SPI_APPS_CLK_SRC 33 +#define BLSP1_QUP6_I2C_APPS_CLK_SRC 34 +#define BLSP1_QUP6_SPI_APPS_CLK_SRC 35 +#define BLSP1_UART1_APPS_CLK_SRC 36 +#define BLSP1_UART2_APPS_CLK_SRC 37 +#define BLSP1_UART3_APPS_CLK_SRC 38 +#define BLSP1_UART4_APPS_CLK_SRC 39 +#define BLSP1_UART5_APPS_CLK_SRC 40 +#define BLSP1_UART6_APPS_CLK_SRC 41 +#define BLSP2_QUP1_I2C_APPS_CLK_SRC 42 +#define BLSP2_QUP1_SPI_APPS_CLK_SRC 43 +#define BLSP2_QUP2_I2C_APPS_CLK_SRC 44 +#define BLSP2_QUP2_SPI_APPS_CLK_SRC 45 +#define BLSP2_QUP3_I2C_APPS_CLK_SRC 46 +#define BLSP2_QUP3_SPI_APPS_CLK_SRC 47 +#define BLSP2_QUP4_I2C_APPS_CLK_SRC 48 +#define BLSP2_QUP4_SPI_APPS_CLK_SRC 49 +#define BLSP2_QUP5_I2C_APPS_CLK_SRC 50 +#define BLSP2_QUP5_SPI_APPS_CLK_SRC 51 +#define BLSP2_QUP6_I2C_APPS_CLK_SRC 52 +#define BLSP2_QUP6_SPI_APPS_CLK_SRC 53 +#define BLSP2_UART1_APPS_CLK_SRC 54 +#define BLSP2_UART2_APPS_CLK_SRC 55 +#define BLSP2_UART3_APPS_CLK_SRC 56 +#define BLSP2_UART4_APPS_CLK_SRC 57 +#define BLSP2_UART5_APPS_CLK_SRC 58 +#define BLSP2_UART6_APPS_CLK_SRC 59 +#define CE1_CLK_SRC 60 +#define CE2_CLK_SRC 61 +#define GP1_CLK_SRC 62 +#define GP2_CLK_SRC 63 +#define GP3_CLK_SRC 64 +#define PDM2_CLK_SRC 65 +#define QDSS_TRACECLKIN_CLK_SRC 66 +#define RBCPR_CLK_SRC 67 +#define SDCC1_APPS_CLK_SRC 68 +#define SDCC2_APPS_CLK_SRC 69 +#define SDCC3_APPS_CLK_SRC 70 +#define SDCC4_APPS_CLK_SRC 71 +#define SPMI_AHB_CLK_SRC 72 +#define SPMI_SER_CLK_SRC 73 +#define TSIF_REF_CLK_SRC 74 +#define USB30_MOCK_UTMI_CLK_SRC 75 +#define USB_HS_SYSTEM_CLK_SRC 76 +#define USB_HSIC_CLK_SRC 77 +#define USB_HSIC_IO_CAL_CLK_SRC 78 +#define USB_HSIC_SYSTEM_CLK_SRC 79 +#define GCC_BAM_DMA_AHB_CLK 80 +#define GCC_BAM_DMA_INACTIVITY_TIMERS_CLK 81 +#define GCC_BIMC_CFG_AHB_CLK 82 +#define GCC_BIMC_KPSS_AXI_CLK 83 +#define GCC_BIMC_SLEEP_CLK 84 +#define GCC_BIMC_SYSNOC_AXI_CLK 85 +#define GCC_BIMC_XO_CLK 86 +#define GCC_BLSP1_AHB_CLK 87 +#define GCC_BLSP1_SLEEP_CLK 88 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK 89 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK 90 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK 91 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK 92 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK 93 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK 94 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK 95 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK 96 +#define GCC_BLSP1_QUP5_I2C_APPS_CLK 97 +#define GCC_BLSP1_QUP5_SPI_APPS_CLK 98 +#define GCC_BLSP1_QUP6_I2C_APPS_CLK 99 +#define GCC_BLSP1_QUP6_SPI_APPS_CLK 100 +#define GCC_BLSP1_UART1_APPS_CLK 101 +#define GCC_BLSP1_UART1_SIM_CLK 102 +#define GCC_BLSP1_UART2_APPS_CLK 103 +#define GCC_BLSP1_UART2_SIM_CLK 104 +#define GCC_BLSP1_UART3_APPS_CLK 105 +#define GCC_BLSP1_UART3_SIM_CLK 106 +#define GCC_BLSP1_UART4_APPS_CLK 107 +#define GCC_BLSP1_UART4_SIM_CLK 108 +#define GCC_BLSP1_UART5_APPS_CLK 109 +#define GCC_BLSP1_UART5_SIM_CLK 110 +#define GCC_BLSP1_UART6_APPS_CLK 111 +#define GCC_BLSP1_UART6_SIM_CLK 112 +#define GCC_BLSP2_AHB_CLK 113 +#define GCC_BLSP2_SLEEP_CLK 114 +#define GCC_BLSP2_QUP1_I2C_APPS_CLK 115 +#define GCC_BLSP2_QUP1_SPI_APPS_CLK 116 +#define GCC_BLSP2_QUP2_I2C_APPS_CLK 117 +#define GCC_BLSP2_QUP2_SPI_APPS_CLK 118 +#define GCC_BLSP2_QUP3_I2C_APPS_CLK 119 +#define GCC_BLSP2_QUP3_SPI_APPS_CLK 120 +#define GCC_BLSP2_QUP4_I2C_APPS_CLK 121 +#define GCC_BLSP2_QUP4_SPI_APPS_CLK 122 +#define GCC_BLSP2_QUP5_I2C_APPS_CLK 123 +#define GCC_BLSP2_QUP5_SPI_APPS_CLK 124 +#define GCC_BLSP2_QUP6_I2C_APPS_CLK 125 +#define GCC_BLSP2_QUP6_SPI_APPS_CLK 126 +#define GCC_BLSP2_UART1_APPS_CLK 127 +#define GCC_BLSP2_UART1_SIM_CLK 128 +#define GCC_BLSP2_UART2_APPS_CLK 129 +#define GCC_BLSP2_UART2_SIM_CLK 130 +#define GCC_BLSP2_UART3_APPS_CLK 131 +#define GCC_BLSP2_UART3_SIM_CLK 132 +#define GCC_BLSP2_UART4_APPS_CLK 133 +#define GCC_BLSP2_UART4_SIM_CLK 134 +#define GCC_BLSP2_UART5_APPS_CLK 135 +#define GCC_BLSP2_UART5_SIM_CLK 136 +#define GCC_BLSP2_UART6_APPS_CLK 137 +#define GCC_BLSP2_UART6_SIM_CLK 138 +#define GCC_BOOT_ROM_AHB_CLK 139 +#define GCC_CE1_AHB_CLK 140 +#define GCC_CE1_AXI_CLK 141 +#define GCC_CE1_CLK 142 +#define GCC_CE2_AHB_CLK 143 +#define GCC_CE2_AXI_CLK 144 +#define GCC_CE2_CLK 145 +#define GCC_CNOC_BUS_TIMEOUT0_AHB_CLK 146 +#define GCC_CNOC_BUS_TIMEOUT1_AHB_CLK 147 +#define GCC_CNOC_BUS_TIMEOUT2_AHB_CLK 148 +#define GCC_CNOC_BUS_TIMEOUT3_AHB_CLK 149 +#define GCC_CNOC_BUS_TIMEOUT4_AHB_CLK 150 +#define GCC_CNOC_BUS_TIMEOUT5_AHB_CLK 151 +#define GCC_CNOC_BUS_TIMEOUT6_AHB_CLK 152 +#define GCC_CFG_NOC_AHB_CLK 153 +#define GCC_CFG_NOC_DDR_CFG_CLK 154 +#define GCC_CFG_NOC_RPM_AHB_CLK 155 +#define GCC_BIMC_DDR_CPLL0_CLK 156 +#define GCC_BIMC_DDR_CPLL1_CLK 157 +#define GCC_DDR_DIM_CFG_CLK 158 +#define GCC_DDR_DIM_SLEEP_CLK 159 +#define GCC_DEHR_CLK 160 +#define GCC_AHB_CLK 161 +#define GCC_IM_SLEEP_CLK 162 +#define GCC_XO_CLK 163 +#define GCC_XO_DIV4_CLK 164 +#define GCC_GP1_CLK 165 +#define GCC_GP2_CLK 166 +#define GCC_GP3_CLK 167 +#define GCC_IMEM_AXI_CLK 168 +#define GCC_IMEM_CFG_AHB_CLK 169 +#define GCC_KPSS_AHB_CLK 170 +#define GCC_KPSS_AXI_CLK 171 +#define GCC_LPASS_Q6_AXI_CLK 172 +#define GCC_MMSS_NOC_AT_CLK 173 +#define GCC_MMSS_NOC_CFG_AHB_CLK 174 +#define GCC_OCMEM_NOC_CFG_AHB_CLK 175 +#define GCC_OCMEM_SYS_NOC_AXI_CLK 176 +#define GCC_MPM_AHB_CLK 177 +#define GCC_MSG_RAM_AHB_CLK 178 +#define GCC_MSS_CFG_AHB_CLK 179 +#define GCC_MSS_Q6_BIMC_AXI_CLK 180 +#define GCC_NOC_CONF_XPU_AHB_CLK 181 +#define GCC_PDM2_CLK 182 +#define GCC_PDM_AHB_CLK 183 +#define GCC_PDM_XO4_CLK 184 +#define GCC_PERIPH_NOC_AHB_CLK 185 +#define GCC_PERIPH_NOC_AT_CLK 186 +#define GCC_PERIPH_NOC_CFG_AHB_CLK 187 +#define GCC_PERIPH_NOC_MPU_CFG_AHB_CLK 188 +#define GCC_PERIPH_XPU_AHB_CLK 189 +#define GCC_PNOC_BUS_TIMEOUT0_AHB_CLK 190 +#define GCC_PNOC_BUS_TIMEOUT1_AHB_CLK 191 +#define GCC_PNOC_BUS_TIMEOUT2_AHB_CLK 192 +#define GCC_PNOC_BUS_TIMEOUT3_AHB_CLK 193 +#define GCC_PNOC_BUS_TIMEOUT4_AHB_CLK 194 +#define GCC_PRNG_AHB_CLK 195 +#define GCC_QDSS_AT_CLK 196 +#define GCC_QDSS_CFG_AHB_CLK 197 +#define GCC_QDSS_DAP_AHB_CLK 198 +#define GCC_QDSS_DAP_CLK 199 +#define GCC_QDSS_ETR_USB_CLK 200 +#define GCC_QDSS_STM_CLK 201 +#define GCC_QDSS_TRACECLKIN_CLK 202 +#define GCC_QDSS_TSCTR_DIV16_CLK 203 +#define GCC_QDSS_TSCTR_DIV2_CLK 204 +#define GCC_QDSS_TSCTR_DIV3_CLK 205 +#define GCC_QDSS_TSCTR_DIV4_CLK 206 +#define GCC_QDSS_TSCTR_DIV8_CLK 207 +#define GCC_QDSS_RBCPR_XPU_AHB_CLK 208 +#define GCC_RBCPR_AHB_CLK 209 +#define GCC_RBCPR_CLK 210 +#define GCC_RPM_BUS_AHB_CLK 211 +#define GCC_RPM_PROC_HCLK 212 +#define GCC_RPM_SLEEP_CLK 213 +#define GCC_RPM_TIMER_CLK 214 +#define GCC_SDCC1_AHB_CLK 215 +#define GCC_SDCC1_APPS_CLK 216 +#define GCC_SDCC1_INACTIVITY_TIMERS_CLK 217 +#define GCC_SDCC2_AHB_CLK 218 +#define GCC_SDCC2_APPS_CLK 219 +#define GCC_SDCC2_INACTIVITY_TIMERS_CLK 220 +#define GCC_SDCC3_AHB_CLK 221 +#define GCC_SDCC3_APPS_CLK 222 +#define GCC_SDCC3_INACTIVITY_TIMERS_CLK 223 +#define GCC_SDCC4_AHB_CLK 224 +#define GCC_SDCC4_APPS_CLK 225 +#define GCC_SDCC4_INACTIVITY_TIMERS_CLK 226 +#define GCC_SEC_CTRL_ACC_CLK 227 +#define GCC_SEC_CTRL_AHB_CLK 228 +#define GCC_SEC_CTRL_BOOT_ROM_PATCH_CLK 229 +#define GCC_SEC_CTRL_CLK 230 +#define GCC_SEC_CTRL_SENSE_CLK 231 +#define GCC_SNOC_BUS_TIMEOUT0_AHB_CLK 232 +#define GCC_SNOC_BUS_TIMEOUT2_AHB_CLK 233 +#define GCC_SPDM_BIMC_CY_CLK 234 +#define GCC_SPDM_CFG_AHB_CLK 235 +#define GCC_SPDM_DEBUG_CY_CLK 236 +#define GCC_SPDM_FF_CLK 237 +#define GCC_SPDM_MSTR_AHB_CLK 238 +#define GCC_SPDM_PNOC_CY_CLK 239 +#define GCC_SPDM_RPM_CY_CLK 240 +#define GCC_SPDM_SNOC_CY_CLK 241 +#define GCC_SPMI_AHB_CLK 242 +#define GCC_SPMI_CNOC_AHB_CLK 243 +#define GCC_SPMI_SER_CLK 244 +#define GCC_SNOC_CNOC_AHB_CLK 245 +#define GCC_SNOC_PNOC_AHB_CLK 246 +#define GCC_SYS_NOC_AT_CLK 247 +#define GCC_SYS_NOC_AXI_CLK 248 +#define GCC_SYS_NOC_KPSS_AHB_CLK 249 +#define GCC_SYS_NOC_QDSS_STM_AXI_CLK 250 +#define GCC_SYS_NOC_USB3_AXI_CLK 251 +#define GCC_TCSR_AHB_CLK 252 +#define GCC_TLMM_AHB_CLK 253 +#define GCC_TLMM_CLK 254 +#define GCC_TSIF_AHB_CLK 255 +#define GCC_TSIF_INACTIVITY_TIMERS_CLK 256 +#define GCC_TSIF_REF_CLK 257 +#define GCC_USB2A_PHY_SLEEP_CLK 258 +#define GCC_USB2B_PHY_SLEEP_CLK 259 +#define GCC_USB30_MASTER_CLK 260 +#define GCC_USB30_MOCK_UTMI_CLK 261 +#define GCC_USB30_SLEEP_CLK 262 +#define GCC_USB_HS_AHB_CLK 263 +#define GCC_USB_HS_INACTIVITY_TIMERS_CLK 264 +#define GCC_USB_HS_SYSTEM_CLK 265 +#define GCC_USB_HSIC_AHB_CLK 266 +#define GCC_USB_HSIC_CLK 267 +#define GCC_USB_HSIC_IO_CAL_CLK 268 +#define GCC_USB_HSIC_IO_CAL_SLEEP_CLK 269 +#define GCC_USB_HSIC_SYSTEM_CLK 270 +#define GCC_WCSS_GPLL1_CLK_SRC 271 +#define GCC_MMSS_GPLL0_CLK_SRC 272 +#define GCC_LPASS_GPLL0_CLK_SRC 273 +#define GCC_WCSS_GPLL1_CLK_SRC_SLEEP_ENA 274 +#define GCC_MMSS_GPLL0_CLK_SRC_SLEEP_ENA 275 +#define GCC_LPASS_GPLL0_CLK_SRC_SLEEP_ENA 276 +#define GCC_IMEM_AXI_CLK_SLEEP_ENA 277 +#define GCC_SYS_NOC_KPSS_AHB_CLK_SLEEP_ENA 278 +#define GCC_BIMC_KPSS_AXI_CLK_SLEEP_ENA 279 +#define GCC_KPSS_AHB_CLK_SLEEP_ENA 280 +#define GCC_KPSS_AXI_CLK_SLEEP_ENA 281 +#define GCC_MPM_AHB_CLK_SLEEP_ENA 282 +#define GCC_OCMEM_SYS_NOC_AXI_CLK_SLEEP_ENA 283 +#define GCC_BLSP1_AHB_CLK_SLEEP_ENA 284 +#define GCC_BLSP1_SLEEP_CLK_SLEEP_ENA 285 +#define GCC_BLSP2_AHB_CLK_SLEEP_ENA 286 +#define GCC_BLSP2_SLEEP_CLK_SLEEP_ENA 287 +#define GCC_PRNG_AHB_CLK_SLEEP_ENA 288 +#define GCC_BAM_DMA_AHB_CLK_SLEEP_ENA 289 +#define GCC_BAM_DMA_INACTIVITY_TIMERS_CLK_SLEEP_ENA 290 +#define GCC_BOOT_ROM_AHB_CLK_SLEEP_ENA 291 +#define GCC_MSG_RAM_AHB_CLK_SLEEP_ENA 292 +#define GCC_TLMM_AHB_CLK_SLEEP_ENA 293 +#define GCC_TLMM_CLK_SLEEP_ENA 294 +#define GCC_SPMI_CNOC_AHB_CLK_SLEEP_ENA 295 +#define GCC_CE1_CLK_SLEEP_ENA 296 +#define GCC_CE1_AXI_CLK_SLEEP_ENA 297 +#define GCC_CE1_AHB_CLK_SLEEP_ENA 298 +#define GCC_CE2_CLK_SLEEP_ENA 299 +#define GCC_CE2_AXI_CLK_SLEEP_ENA 300 +#define GCC_CE2_AHB_CLK_SLEEP_ENA 301 +#define GPLL4 302 +#define GPLL4_VOTE 303 +#define GCC_SDCC1_CDCCAL_SLEEP_CLK 304 +#define GCC_SDCC1_CDCCAL_FF_CLK 305 + +/* gdscs */ +#define USB_HS_HSIC_GDSC 0 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8994.h b/include/dt-bindings/clock/qcom,gcc-msm8994.h new file mode 100644 index 000000000..df47da086 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-msm8994.h @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + + +#ifndef _DT_BINDINGS_CLK_MSM_GCC_8994_H +#define _DT_BINDINGS_CLK_MSM_GCC_8994_H + +#define GPLL0_EARLY 0 +#define GPLL0 1 +#define GPLL4_EARLY 2 +#define GPLL4 3 +#define UFS_AXI_CLK_SRC 4 +#define USB30_MASTER_CLK_SRC 5 +#define BLSP1_QUP1_I2C_APPS_CLK_SRC 6 +#define BLSP1_QUP1_SPI_APPS_CLK_SRC 7 +#define BLSP1_QUP2_I2C_APPS_CLK_SRC 8 +#define BLSP1_QUP2_SPI_APPS_CLK_SRC 9 +#define BLSP1_QUP3_I2C_APPS_CLK_SRC 10 +#define BLSP1_QUP3_SPI_APPS_CLK_SRC 11 +#define BLSP1_QUP4_I2C_APPS_CLK_SRC 12 +#define BLSP1_QUP4_SPI_APPS_CLK_SRC 13 +#define BLSP1_QUP5_I2C_APPS_CLK_SRC 14 +#define BLSP1_QUP5_SPI_APPS_CLK_SRC 15 +#define BLSP1_QUP6_I2C_APPS_CLK_SRC 16 +#define BLSP1_QUP6_SPI_APPS_CLK_SRC 17 +#define BLSP1_UART1_APPS_CLK_SRC 18 +#define BLSP1_UART2_APPS_CLK_SRC 19 +#define BLSP1_UART3_APPS_CLK_SRC 20 +#define BLSP1_UART4_APPS_CLK_SRC 21 +#define BLSP1_UART5_APPS_CLK_SRC 22 +#define BLSP1_UART6_APPS_CLK_SRC 23 +#define BLSP2_QUP1_I2C_APPS_CLK_SRC 24 +#define BLSP2_QUP1_SPI_APPS_CLK_SRC 25 +#define BLSP2_QUP2_I2C_APPS_CLK_SRC 26 +#define BLSP2_QUP2_SPI_APPS_CLK_SRC 27 +#define BLSP2_QUP3_I2C_APPS_CLK_SRC 28 +#define BLSP2_QUP3_SPI_APPS_CLK_SRC 29 +#define BLSP2_QUP4_I2C_APPS_CLK_SRC 30 +#define BLSP2_QUP4_SPI_APPS_CLK_SRC 31 +#define BLSP2_QUP5_I2C_APPS_CLK_SRC 32 +#define BLSP2_QUP5_SPI_APPS_CLK_SRC 33 +#define BLSP2_QUP6_I2C_APPS_CLK_SRC 34 +#define BLSP2_QUP6_SPI_APPS_CLK_SRC 35 +#define BLSP2_UART1_APPS_CLK_SRC 36 +#define BLSP2_UART2_APPS_CLK_SRC 37 +#define BLSP2_UART3_APPS_CLK_SRC 38 +#define BLSP2_UART4_APPS_CLK_SRC 39 +#define BLSP2_UART5_APPS_CLK_SRC 40 +#define BLSP2_UART6_APPS_CLK_SRC 41 +#define GP1_CLK_SRC 42 +#define GP2_CLK_SRC 43 +#define GP3_CLK_SRC 44 +#define PCIE_0_AUX_CLK_SRC 45 +#define PCIE_0_PIPE_CLK_SRC 46 +#define PCIE_1_AUX_CLK_SRC 47 +#define PCIE_1_PIPE_CLK_SRC 48 +#define PDM2_CLK_SRC 49 +#define SDCC1_APPS_CLK_SRC 50 +#define SDCC2_APPS_CLK_SRC 51 +#define SDCC3_APPS_CLK_SRC 52 +#define SDCC4_APPS_CLK_SRC 53 +#define TSIF_REF_CLK_SRC 54 +#define USB30_MOCK_UTMI_CLK_SRC 55 +#define USB3_PHY_AUX_CLK_SRC 56 +#define USB_HS_SYSTEM_CLK_SRC 57 +#define GCC_BLSP1_AHB_CLK 58 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK 59 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK 60 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK 61 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK 62 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK 63 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK 64 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK 65 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK 66 +#define GCC_BLSP1_QUP5_I2C_APPS_CLK 67 +#define GCC_BLSP1_QUP5_SPI_APPS_CLK 68 +#define GCC_BLSP1_QUP6_I2C_APPS_CLK 69 +#define GCC_BLSP1_QUP6_SPI_APPS_CLK 70 +#define GCC_BLSP1_UART1_APPS_CLK 71 +#define GCC_BLSP1_UART2_APPS_CLK 72 +#define GCC_BLSP1_UART3_APPS_CLK 73 +#define GCC_BLSP1_UART4_APPS_CLK 74 +#define GCC_BLSP1_UART5_APPS_CLK 75 +#define GCC_BLSP1_UART6_APPS_CLK 76 +#define GCC_BLSP2_AHB_CLK 77 +#define GCC_BLSP2_QUP1_I2C_APPS_CLK 78 +#define GCC_BLSP2_QUP1_SPI_APPS_CLK 79 +#define GCC_BLSP2_QUP2_I2C_APPS_CLK 80 +#define GCC_BLSP2_QUP2_SPI_APPS_CLK 81 +#define GCC_BLSP2_QUP3_I2C_APPS_CLK 82 +#define GCC_BLSP2_QUP3_SPI_APPS_CLK 83 +#define GCC_BLSP2_QUP4_I2C_APPS_CLK 84 +#define GCC_BLSP2_QUP4_SPI_APPS_CLK 85 +#define GCC_BLSP2_QUP5_I2C_APPS_CLK 86 +#define GCC_BLSP2_QUP5_SPI_APPS_CLK 87 +#define GCC_BLSP2_QUP6_I2C_APPS_CLK 88 +#define GCC_BLSP2_QUP6_SPI_APPS_CLK 89 +#define GCC_BLSP2_UART1_APPS_CLK 90 +#define GCC_BLSP2_UART2_APPS_CLK 91 +#define GCC_BLSP2_UART3_APPS_CLK 92 +#define GCC_BLSP2_UART4_APPS_CLK 93 +#define GCC_BLSP2_UART5_APPS_CLK 94 +#define GCC_BLSP2_UART6_APPS_CLK 95 +#define GCC_GP1_CLK 96 +#define GCC_GP2_CLK 97 +#define GCC_GP3_CLK 98 +#define GCC_PCIE_0_AUX_CLK 99 +#define GCC_PCIE_0_PIPE_CLK 100 +#define GCC_PCIE_1_AUX_CLK 101 +#define GCC_PCIE_1_PIPE_CLK 102 +#define GCC_PDM2_CLK 103 +#define GCC_SDCC1_APPS_CLK 104 +#define GCC_SDCC2_APPS_CLK 105 +#define GCC_SDCC3_APPS_CLK 106 +#define GCC_SDCC4_APPS_CLK 107 +#define GCC_SYS_NOC_UFS_AXI_CLK 108 +#define GCC_SYS_NOC_USB3_AXI_CLK 109 +#define GCC_TSIF_REF_CLK 110 +#define GCC_UFS_AXI_CLK 111 +#define GCC_UFS_RX_CFG_CLK 112 +#define GCC_UFS_TX_CFG_CLK 113 +#define GCC_USB30_MASTER_CLK 114 +#define GCC_USB30_MOCK_UTMI_CLK 115 +#define GCC_USB3_PHY_AUX_CLK 116 +#define GCC_USB_HS_SYSTEM_CLK 117 +#define GCC_SDCC1_AHB_CLK 118 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8996.h b/include/dt-bindings/clock/qcom,gcc-msm8996.h new file mode 100644 index 000000000..75b07cf5e --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-msm8996.h @@ -0,0 +1,358 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_MSM_GCC_8996_H +#define _DT_BINDINGS_CLK_MSM_GCC_8996_H + +#define GPLL0_EARLY 0 +#define GPLL0 1 +#define GPLL1_EARLY 2 +#define GPLL1 3 +#define GPLL2_EARLY 4 +#define GPLL2 5 +#define GPLL3_EARLY 6 +#define GPLL3 7 +#define GPLL4_EARLY 8 +#define GPLL4 9 +#define SYSTEM_NOC_CLK_SRC 10 +#define CONFIG_NOC_CLK_SRC 11 +#define PERIPH_NOC_CLK_SRC 12 +#define MMSS_BIMC_GFX_CLK_SRC 13 +#define USB30_MASTER_CLK_SRC 14 +#define USB30_MOCK_UTMI_CLK_SRC 15 +#define USB3_PHY_AUX_CLK_SRC 16 +#define USB20_MASTER_CLK_SRC 17 +#define USB20_MOCK_UTMI_CLK_SRC 18 +#define SDCC1_APPS_CLK_SRC 19 +#define SDCC1_ICE_CORE_CLK_SRC 20 +#define SDCC2_APPS_CLK_SRC 21 +#define SDCC3_APPS_CLK_SRC 22 +#define SDCC4_APPS_CLK_SRC 23 +#define BLSP1_QUP1_SPI_APPS_CLK_SRC 24 +#define BLSP1_QUP1_I2C_APPS_CLK_SRC 25 +#define BLSP1_UART1_APPS_CLK_SRC 26 +#define BLSP1_QUP2_SPI_APPS_CLK_SRC 27 +#define BLSP1_QUP2_I2C_APPS_CLK_SRC 28 +#define BLSP1_UART2_APPS_CLK_SRC 29 +#define BLSP1_QUP3_SPI_APPS_CLK_SRC 30 +#define BLSP1_QUP3_I2C_APPS_CLK_SRC 31 +#define BLSP1_UART3_APPS_CLK_SRC 32 +#define BLSP1_QUP4_SPI_APPS_CLK_SRC 33 +#define BLSP1_QUP4_I2C_APPS_CLK_SRC 34 +#define BLSP1_UART4_APPS_CLK_SRC 35 +#define BLSP1_QUP5_SPI_APPS_CLK_SRC 36 +#define BLSP1_QUP5_I2C_APPS_CLK_SRC 37 +#define BLSP1_UART5_APPS_CLK_SRC 38 +#define BLSP1_QUP6_SPI_APPS_CLK_SRC 39 +#define BLSP1_QUP6_I2C_APPS_CLK_SRC 40 +#define BLSP1_UART6_APPS_CLK_SRC 41 +#define BLSP2_QUP1_SPI_APPS_CLK_SRC 42 +#define BLSP2_QUP1_I2C_APPS_CLK_SRC 43 +#define BLSP2_UART1_APPS_CLK_SRC 44 +#define BLSP2_QUP2_SPI_APPS_CLK_SRC 45 +#define BLSP2_QUP2_I2C_APPS_CLK_SRC 46 +#define BLSP2_UART2_APPS_CLK_SRC 47 +#define BLSP2_QUP3_SPI_APPS_CLK_SRC 48 +#define BLSP2_QUP3_I2C_APPS_CLK_SRC 49 +#define BLSP2_UART3_APPS_CLK_SRC 50 +#define BLSP2_QUP4_SPI_APPS_CLK_SRC 51 +#define BLSP2_QUP4_I2C_APPS_CLK_SRC 52 +#define BLSP2_UART4_APPS_CLK_SRC 53 +#define BLSP2_QUP5_SPI_APPS_CLK_SRC 54 +#define BLSP2_QUP5_I2C_APPS_CLK_SRC 55 +#define BLSP2_UART5_APPS_CLK_SRC 56 +#define BLSP2_QUP6_SPI_APPS_CLK_SRC 57 +#define BLSP2_QUP6_I2C_APPS_CLK_SRC 58 +#define BLSP2_UART6_APPS_CLK_SRC 59 +#define PDM2_CLK_SRC 60 +#define TSIF_REF_CLK_SRC 61 +#define CE1_CLK_SRC 62 +#define GCC_SLEEP_CLK_SRC 63 +#define BIMC_CLK_SRC 64 +#define HMSS_AHB_CLK_SRC 65 +#define BIMC_HMSS_AXI_CLK_SRC 66 +#define HMSS_RBCPR_CLK_SRC 67 +#define HMSS_GPLL0_CLK_SRC 68 +#define GP1_CLK_SRC 69 +#define GP2_CLK_SRC 70 +#define GP3_CLK_SRC 71 +#define PCIE_AUX_CLK_SRC 72 +#define UFS_AXI_CLK_SRC 73 +#define UFS_ICE_CORE_CLK_SRC 74 +#define QSPI_SER_CLK_SRC 75 +#define GCC_SYS_NOC_AXI_CLK 76 +#define GCC_SYS_NOC_HMSS_AHB_CLK 77 +#define GCC_SNOC_CNOC_AHB_CLK 78 +#define GCC_SNOC_PNOC_AHB_CLK 79 +#define GCC_SYS_NOC_AT_CLK 80 +#define GCC_SYS_NOC_USB3_AXI_CLK 81 +#define GCC_SYS_NOC_UFS_AXI_CLK 82 +#define GCC_CFG_NOC_AHB_CLK 83 +#define GCC_PERIPH_NOC_AHB_CLK 84 +#define GCC_PERIPH_NOC_USB20_AHB_CLK 85 +#define GCC_TIC_CLK 86 +#define GCC_IMEM_AXI_CLK 87 +#define GCC_MMSS_SYS_NOC_AXI_CLK 88 +#define GCC_MMSS_NOC_CFG_AHB_CLK 89 +#define GCC_MMSS_BIMC_GFX_CLK 90 +#define GCC_USB30_MASTER_CLK 91 +#define GCC_USB30_SLEEP_CLK 92 +#define GCC_USB30_MOCK_UTMI_CLK 93 +#define GCC_USB3_PHY_AUX_CLK 94 +#define GCC_USB3_PHY_PIPE_CLK 95 +#define GCC_USB20_MASTER_CLK 96 +#define GCC_USB20_SLEEP_CLK 97 +#define GCC_USB20_MOCK_UTMI_CLK 98 +#define GCC_USB_PHY_CFG_AHB2PHY_CLK 99 +#define GCC_SDCC1_APPS_CLK 100 +#define GCC_SDCC1_AHB_CLK 101 +#define GCC_SDCC1_ICE_CORE_CLK 102 +#define GCC_SDCC2_APPS_CLK 103 +#define GCC_SDCC2_AHB_CLK 104 +#define GCC_SDCC3_APPS_CLK 105 +#define GCC_SDCC3_AHB_CLK 106 +#define GCC_SDCC4_APPS_CLK 107 +#define GCC_SDCC4_AHB_CLK 108 +#define GCC_BLSP1_AHB_CLK 109 +#define GCC_BLSP1_SLEEP_CLK 110 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK 111 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK 112 +#define GCC_BLSP1_UART1_APPS_CLK 113 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK 114 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK 115 +#define GCC_BLSP1_UART2_APPS_CLK 116 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK 117 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK 118 +#define GCC_BLSP1_UART3_APPS_CLK 119 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK 120 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK 121 +#define GCC_BLSP1_UART4_APPS_CLK 122 +#define GCC_BLSP1_QUP5_SPI_APPS_CLK 123 +#define GCC_BLSP1_QUP5_I2C_APPS_CLK 124 +#define GCC_BLSP1_UART5_APPS_CLK 125 +#define GCC_BLSP1_QUP6_SPI_APPS_CLK 126 +#define GCC_BLSP1_QUP6_I2C_APPS_CLK 127 +#define GCC_BLSP1_UART6_APPS_CLK 128 +#define GCC_BLSP2_AHB_CLK 129 +#define GCC_BLSP2_SLEEP_CLK 130 +#define GCC_BLSP2_QUP1_SPI_APPS_CLK 131 +#define GCC_BLSP2_QUP1_I2C_APPS_CLK 132 +#define GCC_BLSP2_UART1_APPS_CLK 133 +#define GCC_BLSP2_QUP2_SPI_APPS_CLK 134 +#define GCC_BLSP2_QUP2_I2C_APPS_CLK 135 +#define GCC_BLSP2_UART2_APPS_CLK 136 +#define GCC_BLSP2_QUP3_SPI_APPS_CLK 137 +#define GCC_BLSP2_QUP3_I2C_APPS_CLK 138 +#define GCC_BLSP2_UART3_APPS_CLK 139 +#define GCC_BLSP2_QUP4_SPI_APPS_CLK 140 +#define GCC_BLSP2_QUP4_I2C_APPS_CLK 141 +#define GCC_BLSP2_UART4_APPS_CLK 142 +#define GCC_BLSP2_QUP5_SPI_APPS_CLK 143 +#define GCC_BLSP2_QUP5_I2C_APPS_CLK 144 +#define GCC_BLSP2_UART5_APPS_CLK 145 +#define GCC_BLSP2_QUP6_SPI_APPS_CLK 146 +#define GCC_BLSP2_QUP6_I2C_APPS_CLK 147 +#define GCC_BLSP2_UART6_APPS_CLK 148 +#define GCC_PDM_AHB_CLK 149 +#define GCC_PDM_XO4_CLK 150 +#define GCC_PDM2_CLK 151 +#define GCC_PRNG_AHB_CLK 152 +#define GCC_TSIF_AHB_CLK 153 +#define GCC_TSIF_REF_CLK 154 +#define GCC_TSIF_INACTIVITY_TIMERS_CLK 155 +#define GCC_TCSR_AHB_CLK 156 +#define GCC_BOOT_ROM_AHB_CLK 157 +#define GCC_MSG_RAM_AHB_CLK 158 +#define GCC_TLMM_AHB_CLK 159 +#define GCC_TLMM_CLK 160 +#define GCC_MPM_AHB_CLK 161 +#define GCC_SPMI_SER_CLK 162 +#define GCC_SPMI_CNOC_AHB_CLK 163 +#define GCC_CE1_CLK 164 +#define GCC_CE1_AXI_CLK 165 +#define GCC_CE1_AHB_CLK 166 +#define GCC_BIMC_HMSS_AXI_CLK 167 +#define GCC_BIMC_GFX_CLK 168 +#define GCC_HMSS_AHB_CLK 169 +#define GCC_HMSS_SLV_AXI_CLK 170 +#define GCC_HMSS_MSTR_AXI_CLK 171 +#define GCC_HMSS_RBCPR_CLK 172 +#define GCC_GP1_CLK 173 +#define GCC_GP2_CLK 174 +#define GCC_GP3_CLK 175 +#define GCC_PCIE_0_SLV_AXI_CLK 176 +#define GCC_PCIE_0_MSTR_AXI_CLK 177 +#define GCC_PCIE_0_CFG_AHB_CLK 178 +#define GCC_PCIE_0_AUX_CLK 179 +#define GCC_PCIE_0_PIPE_CLK 180 +#define GCC_PCIE_1_SLV_AXI_CLK 181 +#define GCC_PCIE_1_MSTR_AXI_CLK 182 +#define GCC_PCIE_1_CFG_AHB_CLK 183 +#define GCC_PCIE_1_AUX_CLK 184 +#define GCC_PCIE_1_PIPE_CLK 185 +#define GCC_PCIE_2_SLV_AXI_CLK 186 +#define GCC_PCIE_2_MSTR_AXI_CLK 187 +#define GCC_PCIE_2_CFG_AHB_CLK 188 +#define GCC_PCIE_2_AUX_CLK 189 +#define GCC_PCIE_2_PIPE_CLK 190 +#define GCC_PCIE_PHY_CFG_AHB_CLK 191 +#define GCC_PCIE_PHY_AUX_CLK 192 +#define GCC_UFS_AXI_CLK 193 +#define GCC_UFS_AHB_CLK 194 +#define GCC_UFS_TX_CFG_CLK 195 +#define GCC_UFS_RX_CFG_CLK 196 +#define GCC_UFS_TX_SYMBOL_0_CLK 197 +#define GCC_UFS_RX_SYMBOL_0_CLK 198 +#define GCC_UFS_RX_SYMBOL_1_CLK 199 +#define GCC_UFS_UNIPRO_CORE_CLK 200 +#define GCC_UFS_ICE_CORE_CLK 201 +#define GCC_UFS_SYS_CLK_CORE_CLK 202 +#define GCC_UFS_TX_SYMBOL_CLK_CORE_CLK 203 +#define GCC_AGGRE0_SNOC_AXI_CLK 204 +#define GCC_AGGRE0_CNOC_AHB_CLK 205 +#define GCC_SMMU_AGGRE0_AXI_CLK 206 +#define GCC_SMMU_AGGRE0_AHB_CLK 207 +#define GCC_AGGRE1_PNOC_AHB_CLK 208 +#define GCC_AGGRE2_UFS_AXI_CLK 209 +#define GCC_AGGRE2_USB3_AXI_CLK 210 +#define GCC_QSPI_AHB_CLK 211 +#define GCC_QSPI_SER_CLK 212 +#define GCC_USB3_CLKREF_CLK 213 +#define GCC_HDMI_CLKREF_CLK 214 +#define GCC_UFS_CLKREF_CLK 215 +#define GCC_PCIE_CLKREF_CLK 216 +#define GCC_RX2_USB2_CLKREF_CLK 217 +#define GCC_RX1_USB2_CLKREF_CLK 218 +#define GCC_HLOS1_VOTE_LPASS_CORE_SMMU_CLK 219 +#define GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CLK 220 + +#define GCC_SYSTEM_NOC_BCR 0 +#define GCC_CONFIG_NOC_BCR 1 +#define GCC_PERIPH_NOC_BCR 2 +#define GCC_IMEM_BCR 3 +#define GCC_MMSS_BCR 4 +#define GCC_PIMEM_BCR 5 +#define GCC_QDSS_BCR 6 +#define GCC_USB_30_BCR 7 +#define GCC_USB_20_BCR 8 +#define GCC_QUSB2PHY_PRIM_BCR 9 +#define GCC_QUSB2PHY_SEC_BCR 10 +#define GCC_USB_PHY_CFG_AHB2PHY_BCR 11 +#define GCC_SDCC1_BCR 12 +#define GCC_SDCC2_BCR 13 +#define GCC_SDCC3_BCR 14 +#define GCC_SDCC4_BCR 15 +#define GCC_BLSP1_BCR 16 +#define GCC_BLSP1_QUP1_BCR 17 +#define GCC_BLSP1_UART1_BCR 18 +#define GCC_BLSP1_QUP2_BCR 19 +#define GCC_BLSP1_UART2_BCR 20 +#define GCC_BLSP1_QUP3_BCR 21 +#define GCC_BLSP1_UART3_BCR 22 +#define GCC_BLSP1_QUP4_BCR 23 +#define GCC_BLSP1_UART4_BCR 24 +#define GCC_BLSP1_QUP5_BCR 25 +#define GCC_BLSP1_UART5_BCR 26 +#define GCC_BLSP1_QUP6_BCR 27 +#define GCC_BLSP1_UART6_BCR 28 +#define GCC_BLSP2_BCR 29 +#define GCC_BLSP2_QUP1_BCR 30 +#define GCC_BLSP2_UART1_BCR 31 +#define GCC_BLSP2_QUP2_BCR 32 +#define GCC_BLSP2_UART2_BCR 33 +#define GCC_BLSP2_QUP3_BCR 34 +#define GCC_BLSP2_UART3_BCR 35 +#define GCC_BLSP2_QUP4_BCR 36 +#define GCC_BLSP2_UART4_BCR 37 +#define GCC_BLSP2_QUP5_BCR 38 +#define GCC_BLSP2_UART5_BCR 39 +#define GCC_BLSP2_QUP6_BCR 40 +#define GCC_BLSP2_UART6_BCR 41 +#define GCC_PDM_BCR 42 +#define GCC_PRNG_BCR 43 +#define GCC_TSIF_BCR 44 +#define GCC_TCSR_BCR 45 +#define GCC_BOOT_ROM_BCR 46 +#define GCC_MSG_RAM_BCR 47 +#define GCC_TLMM_BCR 48 +#define GCC_MPM_BCR 49 +#define GCC_SEC_CTRL_BCR 50 +#define GCC_SPMI_BCR 51 +#define GCC_SPDM_BCR 52 +#define GCC_CE1_BCR 53 +#define GCC_BIMC_BCR 54 +#define GCC_SNOC_BUS_TIMEOUT0_BCR 55 +#define GCC_SNOC_BUS_TIMEOUT2_BCR 56 +#define GCC_SNOC_BUS_TIMEOUT1_BCR 57 +#define GCC_SNOC_BUS_TIMEOUT3_BCR 58 +#define GCC_SNOC_BUS_TIMEOUT_EXTREF_BCR 59 +#define GCC_PNOC_BUS_TIMEOUT0_BCR 60 +#define GCC_PNOC_BUS_TIMEOUT1_BCR 61 +#define GCC_PNOC_BUS_TIMEOUT2_BCR 62 +#define GCC_PNOC_BUS_TIMEOUT3_BCR 63 +#define GCC_PNOC_BUS_TIMEOUT4_BCR 64 +#define GCC_CNOC_BUS_TIMEOUT0_BCR 65 +#define GCC_CNOC_BUS_TIMEOUT1_BCR 66 +#define GCC_CNOC_BUS_TIMEOUT2_BCR 67 +#define GCC_CNOC_BUS_TIMEOUT3_BCR 68 +#define GCC_CNOC_BUS_TIMEOUT4_BCR 69 +#define GCC_CNOC_BUS_TIMEOUT5_BCR 70 +#define GCC_CNOC_BUS_TIMEOUT6_BCR 71 +#define GCC_CNOC_BUS_TIMEOUT7_BCR 72 +#define GCC_CNOC_BUS_TIMEOUT8_BCR 73 +#define GCC_CNOC_BUS_TIMEOUT9_BCR 74 +#define GCC_CNOC_BUS_TIMEOUT_EXTREF_BCR 75 +#define GCC_APB2JTAG_BCR 76 +#define GCC_RBCPR_CX_BCR 77 +#define GCC_RBCPR_MX_BCR 78 +#define GCC_PCIE_0_BCR 79 +#define GCC_PCIE_0_PHY_BCR 80 +#define GCC_PCIE_1_BCR 81 +#define GCC_PCIE_1_PHY_BCR 82 +#define GCC_PCIE_2_BCR 83 +#define GCC_PCIE_2_PHY_BCR 84 +#define GCC_PCIE_PHY_BCR 85 +#define GCC_DCD_BCR 86 +#define GCC_OBT_ODT_BCR 87 +#define GCC_UFS_BCR 88 +#define GCC_SSC_BCR 89 +#define GCC_VS_BCR 90 +#define GCC_AGGRE0_NOC_BCR 91 +#define GCC_AGGRE1_NOC_BCR 92 +#define GCC_AGGRE2_NOC_BCR 93 +#define GCC_DCC_BCR 94 +#define GCC_IPA_BCR 95 +#define GCC_QSPI_BCR 96 +#define GCC_SKL_BCR 97 +#define GCC_MSMPU_BCR 98 +#define GCC_MSS_Q6_BCR 99 +#define GCC_QREFS_VBG_CAL_BCR 100 +#define GCC_PCIE_PHY_COM_BCR 101 +#define GCC_PCIE_PHY_COM_NOCSR_BCR 102 +#define GCC_USB3_PHY_BCR 103 +#define GCC_USB3PHY_PHY_BCR 104 +#define GCC_MSS_RESTART 105 + + +/* Indexes for GDSCs */ +#define AGGRE0_NOC_GDSC 0 +#define HLOS1_VOTE_AGGRE0_NOC_GDSC 1 +#define HLOS1_VOTE_LPASS_ADSP_GDSC 2 +#define HLOS1_VOTE_LPASS_CORE_GDSC 3 +#define USB30_GDSC 4 +#define PCIE0_GDSC 5 +#define PCIE1_GDSC 6 +#define PCIE2_GDSC 7 +#define UFS_GDSC 8 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8998.h b/include/dt-bindings/clock/qcom,gcc-msm8998.h new file mode 100644 index 000000000..58a242e65 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-msm8998.h @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_MSM_GCC_COBALT_H +#define _DT_BINDINGS_CLK_MSM_GCC_COBALT_H + +#define BLSP1_QUP1_I2C_APPS_CLK_SRC 0 +#define BLSP1_QUP1_SPI_APPS_CLK_SRC 1 +#define BLSP1_QUP2_I2C_APPS_CLK_SRC 2 +#define BLSP1_QUP2_SPI_APPS_CLK_SRC 3 +#define BLSP1_QUP3_I2C_APPS_CLK_SRC 4 +#define BLSP1_QUP3_SPI_APPS_CLK_SRC 5 +#define BLSP1_QUP4_I2C_APPS_CLK_SRC 6 +#define BLSP1_QUP4_SPI_APPS_CLK_SRC 7 +#define BLSP1_QUP5_I2C_APPS_CLK_SRC 8 +#define BLSP1_QUP5_SPI_APPS_CLK_SRC 9 +#define BLSP1_QUP6_I2C_APPS_CLK_SRC 10 +#define BLSP1_QUP6_SPI_APPS_CLK_SRC 11 +#define BLSP1_UART1_APPS_CLK_SRC 12 +#define BLSP1_UART2_APPS_CLK_SRC 13 +#define BLSP1_UART3_APPS_CLK_SRC 14 +#define BLSP2_QUP1_I2C_APPS_CLK_SRC 15 +#define BLSP2_QUP1_SPI_APPS_CLK_SRC 16 +#define BLSP2_QUP2_I2C_APPS_CLK_SRC 17 +#define BLSP2_QUP2_SPI_APPS_CLK_SRC 18 +#define BLSP2_QUP3_I2C_APPS_CLK_SRC 19 +#define BLSP2_QUP3_SPI_APPS_CLK_SRC 20 +#define BLSP2_QUP4_I2C_APPS_CLK_SRC 21 +#define BLSP2_QUP4_SPI_APPS_CLK_SRC 22 +#define BLSP2_QUP5_I2C_APPS_CLK_SRC 23 +#define BLSP2_QUP5_SPI_APPS_CLK_SRC 24 +#define BLSP2_QUP6_I2C_APPS_CLK_SRC 25 +#define BLSP2_QUP6_SPI_APPS_CLK_SRC 26 +#define BLSP2_UART1_APPS_CLK_SRC 27 +#define BLSP2_UART2_APPS_CLK_SRC 28 +#define BLSP2_UART3_APPS_CLK_SRC 29 +#define GCC_AGGRE1_NOC_XO_CLK 30 +#define GCC_AGGRE1_UFS_AXI_CLK 31 +#define GCC_AGGRE1_USB3_AXI_CLK 32 +#define GCC_APSS_QDSS_TSCTR_DIV2_CLK 33 +#define GCC_APSS_QDSS_TSCTR_DIV8_CLK 34 +#define GCC_BIMC_HMSS_AXI_CLK 35 +#define GCC_BIMC_MSS_Q6_AXI_CLK 36 +#define GCC_BLSP1_AHB_CLK 37 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK 38 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK 39 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK 40 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK 41 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK 42 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK 43 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK 44 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK 45 +#define GCC_BLSP1_QUP5_I2C_APPS_CLK 46 +#define GCC_BLSP1_QUP5_SPI_APPS_CLK 47 +#define GCC_BLSP1_QUP6_I2C_APPS_CLK 48 +#define GCC_BLSP1_QUP6_SPI_APPS_CLK 49 +#define GCC_BLSP1_SLEEP_CLK 50 +#define GCC_BLSP1_UART1_APPS_CLK 51 +#define GCC_BLSP1_UART2_APPS_CLK 52 +#define GCC_BLSP1_UART3_APPS_CLK 53 +#define GCC_BLSP2_AHB_CLK 54 +#define GCC_BLSP2_QUP1_I2C_APPS_CLK 55 +#define GCC_BLSP2_QUP1_SPI_APPS_CLK 56 +#define GCC_BLSP2_QUP2_I2C_APPS_CLK 57 +#define GCC_BLSP2_QUP2_SPI_APPS_CLK 58 +#define GCC_BLSP2_QUP3_I2C_APPS_CLK 59 +#define GCC_BLSP2_QUP3_SPI_APPS_CLK 60 +#define GCC_BLSP2_QUP4_I2C_APPS_CLK 61 +#define GCC_BLSP2_QUP4_SPI_APPS_CLK 62 +#define GCC_BLSP2_QUP5_I2C_APPS_CLK 63 +#define GCC_BLSP2_QUP5_SPI_APPS_CLK 64 +#define GCC_BLSP2_QUP6_I2C_APPS_CLK 65 +#define GCC_BLSP2_QUP6_SPI_APPS_CLK 66 +#define GCC_BLSP2_SLEEP_CLK 67 +#define GCC_BLSP2_UART1_APPS_CLK 68 +#define GCC_BLSP2_UART2_APPS_CLK 69 +#define GCC_BLSP2_UART3_APPS_CLK 70 +#define GCC_CFG_NOC_USB3_AXI_CLK 71 +#define GCC_GP1_CLK 72 +#define GCC_GP2_CLK 73 +#define GCC_GP3_CLK 74 +#define GCC_GPU_BIMC_GFX_CLK 75 +#define GCC_GPU_BIMC_GFX_SRC_CLK 76 +#define GCC_GPU_CFG_AHB_CLK 77 +#define GCC_GPU_SNOC_DVM_GFX_CLK 78 +#define GCC_HMSS_AHB_CLK 79 +#define GCC_HMSS_AT_CLK 80 +#define GCC_HMSS_DVM_BUS_CLK 81 +#define GCC_HMSS_RBCPR_CLK 82 +#define GCC_HMSS_TRIG_CLK 83 +#define GCC_LPASS_AT_CLK 84 +#define GCC_LPASS_TRIG_CLK 85 +#define GCC_MMSS_NOC_CFG_AHB_CLK 86 +#define GCC_MMSS_QM_AHB_CLK 87 +#define GCC_MMSS_QM_CORE_CLK 88 +#define GCC_MMSS_SYS_NOC_AXI_CLK 89 +#define GCC_MSS_AT_CLK 90 +#define GCC_PCIE_0_AUX_CLK 91 +#define GCC_PCIE_0_CFG_AHB_CLK 92 +#define GCC_PCIE_0_MSTR_AXI_CLK 93 +#define GCC_PCIE_0_PIPE_CLK 94 +#define GCC_PCIE_0_SLV_AXI_CLK 95 +#define GCC_PCIE_PHY_AUX_CLK 96 +#define GCC_PDM2_CLK 97 +#define GCC_PDM_AHB_CLK 98 +#define GCC_PDM_XO4_CLK 99 +#define GCC_PRNG_AHB_CLK 100 +#define GCC_SDCC2_AHB_CLK 101 +#define GCC_SDCC2_APPS_CLK 102 +#define GCC_SDCC4_AHB_CLK 103 +#define GCC_SDCC4_APPS_CLK 104 +#define GCC_TSIF_AHB_CLK 105 +#define GCC_TSIF_INACTIVITY_TIMERS_CLK 106 +#define GCC_TSIF_REF_CLK 107 +#define GCC_UFS_AHB_CLK 108 +#define GCC_UFS_AXI_CLK 109 +#define GCC_UFS_ICE_CORE_CLK 110 +#define GCC_UFS_PHY_AUX_CLK 111 +#define GCC_UFS_RX_SYMBOL_0_CLK 112 +#define GCC_UFS_RX_SYMBOL_1_CLK 113 +#define GCC_UFS_TX_SYMBOL_0_CLK 114 +#define GCC_UFS_UNIPRO_CORE_CLK 115 +#define GCC_USB30_MASTER_CLK 116 +#define GCC_USB30_MOCK_UTMI_CLK 117 +#define GCC_USB30_SLEEP_CLK 118 +#define GCC_USB3_PHY_AUX_CLK 119 +#define GCC_USB3_PHY_PIPE_CLK 120 +#define GCC_USB_PHY_CFG_AHB2PHY_CLK 121 +#define GP1_CLK_SRC 122 +#define GP2_CLK_SRC 123 +#define GP3_CLK_SRC 124 +#define GPLL0 125 +#define GPLL0_OUT_EVEN 126 +#define GPLL0_OUT_MAIN 127 +#define GPLL0_OUT_ODD 128 +#define GPLL0_OUT_TEST 129 +#define GPLL1 130 +#define GPLL1_OUT_EVEN 131 +#define GPLL1_OUT_MAIN 132 +#define GPLL1_OUT_ODD 133 +#define GPLL1_OUT_TEST 134 +#define GPLL2 135 +#define GPLL2_OUT_EVEN 136 +#define GPLL2_OUT_MAIN 137 +#define GPLL2_OUT_ODD 138 +#define GPLL2_OUT_TEST 139 +#define GPLL3 140 +#define GPLL3_OUT_EVEN 141 +#define GPLL3_OUT_MAIN 142 +#define GPLL3_OUT_ODD 143 +#define GPLL3_OUT_TEST 144 +#define GPLL4 145 +#define GPLL4_OUT_EVEN 146 +#define GPLL4_OUT_MAIN 147 +#define GPLL4_OUT_ODD 148 +#define GPLL4_OUT_TEST 149 +#define GPLL6 150 +#define GPLL6_OUT_EVEN 151 +#define GPLL6_OUT_MAIN 152 +#define GPLL6_OUT_ODD 153 +#define GPLL6_OUT_TEST 154 +#define HMSS_AHB_CLK_SRC 155 +#define HMSS_RBCPR_CLK_SRC 156 +#define PCIE_AUX_CLK_SRC 157 +#define PDM2_CLK_SRC 158 +#define SDCC2_APPS_CLK_SRC 159 +#define SDCC4_APPS_CLK_SRC 160 +#define TSIF_REF_CLK_SRC 161 +#define UFS_AXI_CLK_SRC 162 +#define USB30_MASTER_CLK_SRC 163 +#define USB30_MOCK_UTMI_CLK_SRC 164 +#define USB3_PHY_AUX_CLK_SRC 165 + +#define PCIE_0_GDSC 0 +#define UFS_GDSC 1 +#define USB_30_GDSC 2 + +#define GCC_BLSP1_QUP1_BCR 0 +#define GCC_BLSP1_QUP2_BCR 1 +#define GCC_BLSP1_QUP3_BCR 2 +#define GCC_BLSP1_QUP4_BCR 3 +#define GCC_BLSP1_QUP5_BCR 4 +#define GCC_BLSP1_QUP6_BCR 5 +#define GCC_BLSP2_QUP1_BCR 6 +#define GCC_BLSP2_QUP2_BCR 7 +#define GCC_BLSP2_QUP3_BCR 8 +#define GCC_BLSP2_QUP4_BCR 9 +#define GCC_BLSP2_QUP5_BCR 10 +#define GCC_BLSP2_QUP6_BCR 11 +#define GCC_PCIE_0_BCR 12 +#define GCC_PDM_BCR 13 +#define GCC_SDCC2_BCR 14 +#define GCC_SDCC4_BCR 15 +#define GCC_TSIF_BCR 16 +#define GCC_UFS_BCR 17 +#define GCC_USB_30_BCR 18 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h new file mode 100644 index 000000000..f96fc2dbf --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h @@ -0,0 +1,241 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_SDM_GCC_SDM845_H +#define _DT_BINDINGS_CLK_SDM_GCC_SDM845_H + +/* GCC clock registers */ +#define GCC_AGGRE_NOC_PCIE_TBU_CLK 0 +#define GCC_AGGRE_UFS_CARD_AXI_CLK 1 +#define GCC_AGGRE_UFS_PHY_AXI_CLK 2 +#define GCC_AGGRE_USB3_PRIM_AXI_CLK 3 +#define GCC_AGGRE_USB3_SEC_AXI_CLK 4 +#define GCC_BOOT_ROM_AHB_CLK 5 +#define GCC_CAMERA_AHB_CLK 6 +#define GCC_CAMERA_AXI_CLK 7 +#define GCC_CAMERA_XO_CLK 8 +#define GCC_CE1_AHB_CLK 9 +#define GCC_CE1_AXI_CLK 10 +#define GCC_CE1_CLK 11 +#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 12 +#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 13 +#define GCC_CPUSS_AHB_CLK 14 +#define GCC_CPUSS_AHB_CLK_SRC 15 +#define GCC_CPUSS_RBCPR_CLK 16 +#define GCC_CPUSS_RBCPR_CLK_SRC 17 +#define GCC_DDRSS_GPU_AXI_CLK 18 +#define GCC_DISP_AHB_CLK 19 +#define GCC_DISP_AXI_CLK 20 +#define GCC_DISP_GPLL0_CLK_SRC 21 +#define GCC_DISP_GPLL0_DIV_CLK_SRC 22 +#define GCC_DISP_XO_CLK 23 +#define GCC_GP1_CLK 24 +#define GCC_GP1_CLK_SRC 25 +#define GCC_GP2_CLK 26 +#define GCC_GP2_CLK_SRC 27 +#define GCC_GP3_CLK 28 +#define GCC_GP3_CLK_SRC 29 +#define GCC_GPU_CFG_AHB_CLK 30 +#define GCC_GPU_GPLL0_CLK_SRC 31 +#define GCC_GPU_GPLL0_DIV_CLK_SRC 32 +#define GCC_GPU_MEMNOC_GFX_CLK 33 +#define GCC_GPU_SNOC_DVM_GFX_CLK 34 +#define GCC_MSS_AXIS2_CLK 35 +#define GCC_MSS_CFG_AHB_CLK 36 +#define GCC_MSS_GPLL0_DIV_CLK_SRC 37 +#define GCC_MSS_MFAB_AXIS_CLK 38 +#define GCC_MSS_Q6_MEMNOC_AXI_CLK 39 +#define GCC_MSS_SNOC_AXI_CLK 40 +#define GCC_PCIE_0_AUX_CLK 41 +#define GCC_PCIE_0_AUX_CLK_SRC 42 +#define GCC_PCIE_0_CFG_AHB_CLK 43 +#define GCC_PCIE_0_CLKREF_CLK 44 +#define GCC_PCIE_0_MSTR_AXI_CLK 45 +#define GCC_PCIE_0_PIPE_CLK 46 +#define GCC_PCIE_0_SLV_AXI_CLK 47 +#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 48 +#define GCC_PCIE_1_AUX_CLK 49 +#define GCC_PCIE_1_AUX_CLK_SRC 50 +#define GCC_PCIE_1_CFG_AHB_CLK 51 +#define GCC_PCIE_1_CLKREF_CLK 52 +#define GCC_PCIE_1_MSTR_AXI_CLK 53 +#define GCC_PCIE_1_PIPE_CLK 54 +#define GCC_PCIE_1_SLV_AXI_CLK 55 +#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 56 +#define GCC_PCIE_PHY_AUX_CLK 57 +#define GCC_PCIE_PHY_REFGEN_CLK 58 +#define GCC_PCIE_PHY_REFGEN_CLK_SRC 59 +#define GCC_PDM2_CLK 60 +#define GCC_PDM2_CLK_SRC 61 +#define GCC_PDM_AHB_CLK 62 +#define GCC_PDM_XO4_CLK 63 +#define GCC_PRNG_AHB_CLK 64 +#define GCC_QMIP_CAMERA_AHB_CLK 65 +#define GCC_QMIP_DISP_AHB_CLK 66 +#define GCC_QMIP_VIDEO_AHB_CLK 67 +#define GCC_QUPV3_WRAP0_S0_CLK 68 +#define GCC_QUPV3_WRAP0_S0_CLK_SRC 69 +#define GCC_QUPV3_WRAP0_S1_CLK 70 +#define GCC_QUPV3_WRAP0_S1_CLK_SRC 71 +#define GCC_QUPV3_WRAP0_S2_CLK 72 +#define GCC_QUPV3_WRAP0_S2_CLK_SRC 73 +#define GCC_QUPV3_WRAP0_S3_CLK 74 +#define GCC_QUPV3_WRAP0_S3_CLK_SRC 75 +#define GCC_QUPV3_WRAP0_S4_CLK 76 +#define GCC_QUPV3_WRAP0_S4_CLK_SRC 77 +#define GCC_QUPV3_WRAP0_S5_CLK 78 +#define GCC_QUPV3_WRAP0_S5_CLK_SRC 79 +#define GCC_QUPV3_WRAP0_S6_CLK 80 +#define GCC_QUPV3_WRAP0_S6_CLK_SRC 81 +#define GCC_QUPV3_WRAP0_S7_CLK 82 +#define GCC_QUPV3_WRAP0_S7_CLK_SRC 83 +#define GCC_QUPV3_WRAP1_S0_CLK 84 +#define GCC_QUPV3_WRAP1_S0_CLK_SRC 85 +#define GCC_QUPV3_WRAP1_S1_CLK 86 +#define GCC_QUPV3_WRAP1_S1_CLK_SRC 87 +#define GCC_QUPV3_WRAP1_S2_CLK 88 +#define GCC_QUPV3_WRAP1_S2_CLK_SRC 89 +#define GCC_QUPV3_WRAP1_S3_CLK 90 +#define GCC_QUPV3_WRAP1_S3_CLK_SRC 91 +#define GCC_QUPV3_WRAP1_S4_CLK 92 +#define GCC_QUPV3_WRAP1_S4_CLK_SRC 93 +#define GCC_QUPV3_WRAP1_S5_CLK 94 +#define GCC_QUPV3_WRAP1_S5_CLK_SRC 95 +#define GCC_QUPV3_WRAP1_S6_CLK 96 +#define GCC_QUPV3_WRAP1_S6_CLK_SRC 97 +#define GCC_QUPV3_WRAP1_S7_CLK 98 +#define GCC_QUPV3_WRAP1_S7_CLK_SRC 99 +#define GCC_QUPV3_WRAP_0_M_AHB_CLK 100 +#define GCC_QUPV3_WRAP_0_S_AHB_CLK 101 +#define GCC_QUPV3_WRAP_1_M_AHB_CLK 102 +#define GCC_QUPV3_WRAP_1_S_AHB_CLK 103 +#define GCC_SDCC2_AHB_CLK 104 +#define GCC_SDCC2_APPS_CLK 105 +#define GCC_SDCC2_APPS_CLK_SRC 106 +#define GCC_SDCC4_AHB_CLK 107 +#define GCC_SDCC4_APPS_CLK 108 +#define GCC_SDCC4_APPS_CLK_SRC 109 +#define GCC_SYS_NOC_CPUSS_AHB_CLK 110 +#define GCC_TSIF_AHB_CLK 111 +#define GCC_TSIF_INACTIVITY_TIMERS_CLK 112 +#define GCC_TSIF_REF_CLK 113 +#define GCC_TSIF_REF_CLK_SRC 114 +#define GCC_UFS_CARD_AHB_CLK 115 +#define GCC_UFS_CARD_AXI_CLK 116 +#define GCC_UFS_CARD_AXI_CLK_SRC 117 +#define GCC_UFS_CARD_CLKREF_CLK 118 +#define GCC_UFS_CARD_ICE_CORE_CLK 119 +#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 120 +#define GCC_UFS_CARD_PHY_AUX_CLK 121 +#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 122 +#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 123 +#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 124 +#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 125 +#define GCC_UFS_CARD_UNIPRO_CORE_CLK 126 +#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 127 +#define GCC_UFS_MEM_CLKREF_CLK 128 +#define GCC_UFS_PHY_AHB_CLK 129 +#define GCC_UFS_PHY_AXI_CLK 130 +#define GCC_UFS_PHY_AXI_CLK_SRC 131 +#define GCC_UFS_PHY_ICE_CORE_CLK 132 +#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 133 +#define GCC_UFS_PHY_PHY_AUX_CLK 134 +#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 135 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 136 +#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 137 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 138 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK 139 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 140 +#define GCC_USB30_PRIM_MASTER_CLK 141 +#define GCC_USB30_PRIM_MASTER_CLK_SRC 142 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK 143 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 144 +#define GCC_USB30_PRIM_SLEEP_CLK 145 +#define GCC_USB30_SEC_MASTER_CLK 146 +#define GCC_USB30_SEC_MASTER_CLK_SRC 147 +#define GCC_USB30_SEC_MOCK_UTMI_CLK 148 +#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 149 +#define GCC_USB30_SEC_SLEEP_CLK 150 +#define GCC_USB3_PRIM_CLKREF_CLK 151 +#define GCC_USB3_PRIM_PHY_AUX_CLK 152 +#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 153 +#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 154 +#define GCC_USB3_PRIM_PHY_PIPE_CLK 155 +#define GCC_USB3_SEC_CLKREF_CLK 156 +#define GCC_USB3_SEC_PHY_AUX_CLK 157 +#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 158 +#define GCC_USB3_SEC_PHY_PIPE_CLK 159 +#define GCC_USB3_SEC_PHY_COM_AUX_CLK 160 +#define GCC_USB_PHY_CFG_AHB2PHY_CLK 161 +#define GCC_VIDEO_AHB_CLK 162 +#define GCC_VIDEO_AXI_CLK 163 +#define GCC_VIDEO_XO_CLK 164 +#define GPLL0 165 +#define GPLL0_OUT_EVEN 166 +#define GPLL0_OUT_MAIN 167 +#define GCC_GPU_IREF_CLK 168 +#define GCC_SDCC1_AHB_CLK 169 +#define GCC_SDCC1_APPS_CLK 170 +#define GCC_SDCC1_ICE_CORE_CLK 171 +#define GCC_SDCC1_APPS_CLK_SRC 172 +#define GCC_SDCC1_ICE_CORE_CLK_SRC 173 +#define GCC_APC_VS_CLK 174 +#define GCC_GPU_VS_CLK 175 +#define GCC_MSS_VS_CLK 176 +#define GCC_VDDA_VS_CLK 177 +#define GCC_VDDCX_VS_CLK 178 +#define GCC_VDDMX_VS_CLK 179 +#define GCC_VS_CTRL_AHB_CLK 180 +#define GCC_VS_CTRL_CLK 181 +#define GCC_VS_CTRL_CLK_SRC 182 +#define GCC_VSENSOR_CLK_SRC 183 +#define GPLL4 184 +#define GCC_CPUSS_DVM_BUS_CLK 185 +#define GCC_CPUSS_GNOC_CLK 186 + +/* GCC Resets */ +#define GCC_MMSS_BCR 0 +#define GCC_PCIE_0_BCR 1 +#define GCC_PCIE_1_BCR 2 +#define GCC_PCIE_PHY_BCR 3 +#define GCC_PDM_BCR 4 +#define GCC_PRNG_BCR 5 +#define GCC_QUPV3_WRAPPER_0_BCR 6 +#define GCC_QUPV3_WRAPPER_1_BCR 7 +#define GCC_QUSB2PHY_PRIM_BCR 8 +#define GCC_QUSB2PHY_SEC_BCR 9 +#define GCC_SDCC2_BCR 10 +#define GCC_SDCC4_BCR 11 +#define GCC_TSIF_BCR 12 +#define GCC_UFS_CARD_BCR 13 +#define GCC_UFS_PHY_BCR 14 +#define GCC_USB30_PRIM_BCR 15 +#define GCC_USB30_SEC_BCR 16 +#define GCC_USB3_PHY_PRIM_BCR 17 +#define GCC_USB3PHY_PHY_PRIM_BCR 18 +#define GCC_USB3_DP_PHY_PRIM_BCR 19 +#define GCC_USB3_PHY_SEC_BCR 20 +#define GCC_USB3PHY_PHY_SEC_BCR 21 +#define GCC_USB3_DP_PHY_SEC_BCR 22 +#define GCC_USB_PHY_CFG_AHB2PHY_BCR 23 +#define GCC_PCIE_0_PHY_BCR 24 +#define GCC_PCIE_1_PHY_BCR 25 + +/* GCC GDSCRs */ +#define PCIE_0_GDSC 0 +#define PCIE_1_GDSC 1 +#define UFS_CARD_GDSC 2 +#define UFS_PHY_GDSC 3 +#define USB30_PRIM_GDSC 4 +#define USB30_SEC_GDSC 5 +#define HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_GDSC 6 +#define HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_GDSC 7 +#define HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_GDSC 8 +#define HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_GDSC 9 +#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 10 +#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC 11 +#define HLOS1_VOTE_MMNOC_MMU_TBU_SF_GDSC 12 + +#endif diff --git a/include/dt-bindings/clock/qcom,lcc-ipq806x.h b/include/dt-bindings/clock/qcom,lcc-ipq806x.h new file mode 100644 index 000000000..4e944b85c --- /dev/null +++ b/include/dt-bindings/clock/qcom,lcc-ipq806x.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_LCC_IPQ806X_H +#define _DT_BINDINGS_CLK_LCC_IPQ806X_H + +#define PLL4 0 +#define MI2S_OSR_SRC 1 +#define MI2S_OSR_CLK 2 +#define MI2S_DIV_CLK 3 +#define MI2S_BIT_DIV_CLK 4 +#define MI2S_BIT_CLK 5 +#define PCM_SRC 6 +#define PCM_CLK_OUT 7 +#define PCM_CLK 8 +#define SPDIF_SRC 9 +#define SPDIF_CLK 10 +#define AHBIX_CLK 11 + +#endif diff --git a/include/dt-bindings/clock/qcom,lcc-mdm9615.h b/include/dt-bindings/clock/qcom,lcc-mdm9615.h new file mode 100644 index 000000000..cac963a2f --- /dev/null +++ b/include/dt-bindings/clock/qcom,lcc-mdm9615.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Copyright (c) BayLibre, SAS. + * Author : Neil Armstrong + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_LCC_MDM9615_H +#define _DT_BINDINGS_CLK_LCC_MDM9615_H + +#define PLL4 0 +#define MI2S_OSR_SRC 1 +#define MI2S_OSR_CLK 2 +#define MI2S_DIV_CLK 3 +#define MI2S_BIT_DIV_CLK 4 +#define MI2S_BIT_CLK 5 +#define PCM_SRC 6 +#define PCM_CLK_OUT 7 +#define PCM_CLK 8 +#define SLIMBUS_SRC 9 +#define AUDIO_SLIMBUS_CLK 10 +#define SPS_SLIMBUS_CLK 11 +#define CODEC_I2S_MIC_OSR_SRC 12 +#define CODEC_I2S_MIC_OSR_CLK 13 +#define CODEC_I2S_MIC_DIV_CLK 14 +#define CODEC_I2S_MIC_BIT_DIV_CLK 15 +#define CODEC_I2S_MIC_BIT_CLK 16 +#define SPARE_I2S_MIC_OSR_SRC 17 +#define SPARE_I2S_MIC_OSR_CLK 18 +#define SPARE_I2S_MIC_DIV_CLK 19 +#define SPARE_I2S_MIC_BIT_DIV_CLK 20 +#define SPARE_I2S_MIC_BIT_CLK 21 +#define CODEC_I2S_SPKR_OSR_SRC 22 +#define CODEC_I2S_SPKR_OSR_CLK 23 +#define CODEC_I2S_SPKR_DIV_CLK 24 +#define CODEC_I2S_SPKR_BIT_DIV_CLK 25 +#define CODEC_I2S_SPKR_BIT_CLK 26 +#define SPARE_I2S_SPKR_OSR_SRC 27 +#define SPARE_I2S_SPKR_OSR_CLK 28 +#define SPARE_I2S_SPKR_DIV_CLK 29 +#define SPARE_I2S_SPKR_BIT_DIV_CLK 30 +#define SPARE_I2S_SPKR_BIT_CLK 31 + +#endif diff --git a/include/dt-bindings/clock/qcom,lcc-msm8960.h b/include/dt-bindings/clock/qcom,lcc-msm8960.h new file mode 100644 index 000000000..4fb2aa64d --- /dev/null +++ b/include/dt-bindings/clock/qcom,lcc-msm8960.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_LCC_MSM8960_H +#define _DT_BINDINGS_CLK_LCC_MSM8960_H + +#define PLL4 0 +#define MI2S_OSR_SRC 1 +#define MI2S_OSR_CLK 2 +#define MI2S_DIV_CLK 3 +#define MI2S_BIT_DIV_CLK 4 +#define MI2S_BIT_CLK 5 +#define PCM_SRC 6 +#define PCM_CLK_OUT 7 +#define PCM_CLK 8 +#define SLIMBUS_SRC 9 +#define AUDIO_SLIMBUS_CLK 10 +#define SPS_SLIMBUS_CLK 11 +#define CODEC_I2S_MIC_OSR_SRC 12 +#define CODEC_I2S_MIC_OSR_CLK 13 +#define CODEC_I2S_MIC_DIV_CLK 14 +#define CODEC_I2S_MIC_BIT_DIV_CLK 15 +#define CODEC_I2S_MIC_BIT_CLK 16 +#define SPARE_I2S_MIC_OSR_SRC 17 +#define SPARE_I2S_MIC_OSR_CLK 18 +#define SPARE_I2S_MIC_DIV_CLK 19 +#define SPARE_I2S_MIC_BIT_DIV_CLK 20 +#define SPARE_I2S_MIC_BIT_CLK 21 +#define CODEC_I2S_SPKR_OSR_SRC 22 +#define CODEC_I2S_SPKR_OSR_CLK 23 +#define CODEC_I2S_SPKR_DIV_CLK 24 +#define CODEC_I2S_SPKR_BIT_DIV_CLK 25 +#define CODEC_I2S_SPKR_BIT_CLK 26 +#define SPARE_I2S_SPKR_OSR_SRC 27 +#define SPARE_I2S_SPKR_OSR_CLK 28 +#define SPARE_I2S_SPKR_DIV_CLK 29 +#define SPARE_I2S_SPKR_BIT_DIV_CLK 30 +#define SPARE_I2S_SPKR_BIT_CLK 31 + +#endif diff --git a/include/dt-bindings/clock/qcom,mmcc-apq8084.h b/include/dt-bindings/clock/qcom,mmcc-apq8084.h new file mode 100644 index 000000000..03861e3f4 --- /dev/null +++ b/include/dt-bindings/clock/qcom,mmcc-apq8084.h @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_APQ_MMCC_8084_H +#define _DT_BINDINGS_CLK_APQ_MMCC_8084_H + +#define MMSS_AHB_CLK_SRC 0 +#define MMSS_AXI_CLK_SRC 1 +#define MMPLL0 2 +#define MMPLL0_VOTE 3 +#define MMPLL1 4 +#define MMPLL1_VOTE 5 +#define MMPLL2 6 +#define MMPLL3 7 +#define MMPLL4 8 +#define CSI0_CLK_SRC 9 +#define CSI1_CLK_SRC 10 +#define CSI2_CLK_SRC 11 +#define CSI3_CLK_SRC 12 +#define VCODEC0_CLK_SRC 13 +#define VFE0_CLK_SRC 14 +#define VFE1_CLK_SRC 15 +#define MDP_CLK_SRC 16 +#define PCLK0_CLK_SRC 17 +#define PCLK1_CLK_SRC 18 +#define OCMEMNOC_CLK_SRC 19 +#define GFX3D_CLK_SRC 20 +#define JPEG0_CLK_SRC 21 +#define JPEG1_CLK_SRC 22 +#define JPEG2_CLK_SRC 23 +#define EDPPIXEL_CLK_SRC 24 +#define EXTPCLK_CLK_SRC 25 +#define VP_CLK_SRC 26 +#define CCI_CLK_SRC 27 +#define CAMSS_GP0_CLK_SRC 28 +#define CAMSS_GP1_CLK_SRC 29 +#define MCLK0_CLK_SRC 30 +#define MCLK1_CLK_SRC 31 +#define MCLK2_CLK_SRC 32 +#define MCLK3_CLK_SRC 33 +#define CSI0PHYTIMER_CLK_SRC 34 +#define CSI1PHYTIMER_CLK_SRC 35 +#define CSI2PHYTIMER_CLK_SRC 36 +#define CPP_CLK_SRC 37 +#define BYTE0_CLK_SRC 38 +#define BYTE1_CLK_SRC 39 +#define EDPAUX_CLK_SRC 40 +#define EDPLINK_CLK_SRC 41 +#define ESC0_CLK_SRC 42 +#define ESC1_CLK_SRC 43 +#define HDMI_CLK_SRC 44 +#define VSYNC_CLK_SRC 45 +#define MMSS_RBCPR_CLK_SRC 46 +#define RBBMTIMER_CLK_SRC 47 +#define MAPLE_CLK_SRC 48 +#define VDP_CLK_SRC 49 +#define VPU_BUS_CLK_SRC 50 +#define MMSS_CXO_CLK 51 +#define MMSS_SLEEPCLK_CLK 52 +#define AVSYNC_AHB_CLK 53 +#define AVSYNC_EDPPIXEL_CLK 54 +#define AVSYNC_EXTPCLK_CLK 55 +#define AVSYNC_PCLK0_CLK 56 +#define AVSYNC_PCLK1_CLK 57 +#define AVSYNC_VP_CLK 58 +#define CAMSS_AHB_CLK 59 +#define CAMSS_CCI_CCI_AHB_CLK 60 +#define CAMSS_CCI_CCI_CLK 61 +#define CAMSS_CSI0_AHB_CLK 62 +#define CAMSS_CSI0_CLK 63 +#define CAMSS_CSI0PHY_CLK 64 +#define CAMSS_CSI0PIX_CLK 65 +#define CAMSS_CSI0RDI_CLK 66 +#define CAMSS_CSI1_AHB_CLK 67 +#define CAMSS_CSI1_CLK 68 +#define CAMSS_CSI1PHY_CLK 69 +#define CAMSS_CSI1PIX_CLK 70 +#define CAMSS_CSI1RDI_CLK 71 +#define CAMSS_CSI2_AHB_CLK 72 +#define CAMSS_CSI2_CLK 73 +#define CAMSS_CSI2PHY_CLK 74 +#define CAMSS_CSI2PIX_CLK 75 +#define CAMSS_CSI2RDI_CLK 76 +#define CAMSS_CSI3_AHB_CLK 77 +#define CAMSS_CSI3_CLK 78 +#define CAMSS_CSI3PHY_CLK 79 +#define CAMSS_CSI3PIX_CLK 80 +#define CAMSS_CSI3RDI_CLK 81 +#define CAMSS_CSI_VFE0_CLK 82 +#define CAMSS_CSI_VFE1_CLK 83 +#define CAMSS_GP0_CLK 84 +#define CAMSS_GP1_CLK 85 +#define CAMSS_ISPIF_AHB_CLK 86 +#define CAMSS_JPEG_JPEG0_CLK 87 +#define CAMSS_JPEG_JPEG1_CLK 88 +#define CAMSS_JPEG_JPEG2_CLK 89 +#define CAMSS_JPEG_JPEG_AHB_CLK 90 +#define CAMSS_JPEG_JPEG_AXI_CLK 91 +#define CAMSS_MCLK0_CLK 92 +#define CAMSS_MCLK1_CLK 93 +#define CAMSS_MCLK2_CLK 94 +#define CAMSS_MCLK3_CLK 95 +#define CAMSS_MICRO_AHB_CLK 96 +#define CAMSS_PHY0_CSI0PHYTIMER_CLK 97 +#define CAMSS_PHY1_CSI1PHYTIMER_CLK 98 +#define CAMSS_PHY2_CSI2PHYTIMER_CLK 99 +#define CAMSS_TOP_AHB_CLK 100 +#define CAMSS_VFE_CPP_AHB_CLK 101 +#define CAMSS_VFE_CPP_CLK 102 +#define CAMSS_VFE_VFE0_CLK 103 +#define CAMSS_VFE_VFE1_CLK 104 +#define CAMSS_VFE_VFE_AHB_CLK 105 +#define CAMSS_VFE_VFE_AXI_CLK 106 +#define MDSS_AHB_CLK 107 +#define MDSS_AXI_CLK 108 +#define MDSS_BYTE0_CLK 109 +#define MDSS_BYTE1_CLK 110 +#define MDSS_EDPAUX_CLK 111 +#define MDSS_EDPLINK_CLK 112 +#define MDSS_EDPPIXEL_CLK 113 +#define MDSS_ESC0_CLK 114 +#define MDSS_ESC1_CLK 115 +#define MDSS_EXTPCLK_CLK 116 +#define MDSS_HDMI_AHB_CLK 117 +#define MDSS_HDMI_CLK 118 +#define MDSS_MDP_CLK 119 +#define MDSS_MDP_LUT_CLK 120 +#define MDSS_PCLK0_CLK 121 +#define MDSS_PCLK1_CLK 122 +#define MDSS_VSYNC_CLK 123 +#define MMSS_RBCPR_AHB_CLK 124 +#define MMSS_RBCPR_CLK 125 +#define MMSS_SPDM_AHB_CLK 126 +#define MMSS_SPDM_AXI_CLK 127 +#define MMSS_SPDM_CSI0_CLK 128 +#define MMSS_SPDM_GFX3D_CLK 129 +#define MMSS_SPDM_JPEG0_CLK 130 +#define MMSS_SPDM_JPEG1_CLK 131 +#define MMSS_SPDM_JPEG2_CLK 132 +#define MMSS_SPDM_MDP_CLK 133 +#define MMSS_SPDM_PCLK0_CLK 134 +#define MMSS_SPDM_PCLK1_CLK 135 +#define MMSS_SPDM_VCODEC0_CLK 136 +#define MMSS_SPDM_VFE0_CLK 137 +#define MMSS_SPDM_VFE1_CLK 138 +#define MMSS_SPDM_RM_AXI_CLK 139 +#define MMSS_SPDM_RM_OCMEMNOC_CLK 140 +#define MMSS_MISC_AHB_CLK 141 +#define MMSS_MMSSNOC_AHB_CLK 142 +#define MMSS_MMSSNOC_BTO_AHB_CLK 143 +#define MMSS_MMSSNOC_AXI_CLK 144 +#define MMSS_S0_AXI_CLK 145 +#define OCMEMCX_AHB_CLK 146 +#define OCMEMCX_OCMEMNOC_CLK 147 +#define OXILI_OCMEMGX_CLK 148 +#define OXILI_GFX3D_CLK 149 +#define OXILI_RBBMTIMER_CLK 150 +#define OXILICX_AHB_CLK 151 +#define VENUS0_AHB_CLK 152 +#define VENUS0_AXI_CLK 153 +#define VENUS0_CORE0_VCODEC_CLK 154 +#define VENUS0_CORE1_VCODEC_CLK 155 +#define VENUS0_OCMEMNOC_CLK 156 +#define VENUS0_VCODEC0_CLK 157 +#define VPU_AHB_CLK 158 +#define VPU_AXI_CLK 159 +#define VPU_BUS_CLK 160 +#define VPU_CXO_CLK 161 +#define VPU_MAPLE_CLK 162 +#define VPU_SLEEP_CLK 163 +#define VPU_VDP_CLK 164 + +/* GDSCs */ +#define VENUS0_GDSC 0 +#define VENUS0_CORE0_GDSC 1 +#define VENUS0_CORE1_GDSC 2 +#define MDSS_GDSC 3 +#define CAMSS_JPEG_GDSC 4 +#define CAMSS_VFE_GDSC 5 +#define OXILI_GDSC 6 +#define OXILICX_GDSC 7 + +#endif diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8960.h b/include/dt-bindings/clock/qcom,mmcc-msm8960.h new file mode 100644 index 000000000..85041b28f --- /dev/null +++ b/include/dt-bindings/clock/qcom,mmcc-msm8960.h @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_MSM_MMCC_8960_H +#define _DT_BINDINGS_CLK_MSM_MMCC_8960_H + +#define MMSS_AHB_SRC 0 +#define FAB_AHB_CLK 1 +#define APU_AHB_CLK 2 +#define TV_ENC_AHB_CLK 3 +#define AMP_AHB_CLK 4 +#define DSI2_S_AHB_CLK 5 +#define JPEGD_AHB_CLK 6 +#define GFX2D0_AHB_CLK 7 +#define DSI_S_AHB_CLK 8 +#define DSI2_M_AHB_CLK 9 +#define VPE_AHB_CLK 10 +#define SMMU_AHB_CLK 11 +#define HDMI_M_AHB_CLK 12 +#define VFE_AHB_CLK 13 +#define ROT_AHB_CLK 14 +#define VCODEC_AHB_CLK 15 +#define MDP_AHB_CLK 16 +#define DSI_M_AHB_CLK 17 +#define CSI_AHB_CLK 18 +#define MMSS_IMEM_AHB_CLK 19 +#define IJPEG_AHB_CLK 20 +#define HDMI_S_AHB_CLK 21 +#define GFX3D_AHB_CLK 22 +#define GFX2D1_AHB_CLK 23 +#define MMSS_FPB_CLK 24 +#define MMSS_AXI_SRC 25 +#define MMSS_FAB_CORE 26 +#define FAB_MSP_AXI_CLK 27 +#define JPEGD_AXI_CLK 28 +#define GMEM_AXI_CLK 29 +#define MDP_AXI_CLK 30 +#define MMSS_IMEM_AXI_CLK 31 +#define IJPEG_AXI_CLK 32 +#define GFX3D_AXI_CLK 33 +#define VCODEC_AXI_CLK 34 +#define VFE_AXI_CLK 35 +#define VPE_AXI_CLK 36 +#define ROT_AXI_CLK 37 +#define VCODEC_AXI_A_CLK 38 +#define VCODEC_AXI_B_CLK 39 +#define MM_AXI_S3_FCLK 40 +#define MM_AXI_S2_FCLK 41 +#define MM_AXI_S1_FCLK 42 +#define MM_AXI_S0_FCLK 43 +#define MM_AXI_S2_CLK 44 +#define MM_AXI_S1_CLK 45 +#define MM_AXI_S0_CLK 46 +#define CSI0_SRC 47 +#define CSI0_CLK 48 +#define CSI0_PHY_CLK 49 +#define CSI1_SRC 50 +#define CSI1_CLK 51 +#define CSI1_PHY_CLK 52 +#define CSI2_SRC 53 +#define CSI2_CLK 54 +#define CSI2_PHY_CLK 55 +#define DSI_SRC 56 +#define DSI_CLK 57 +#define CSI_PIX_CLK 58 +#define CSI_RDI_CLK 59 +#define MDP_VSYNC_CLK 60 +#define HDMI_DIV_CLK 61 +#define HDMI_APP_CLK 62 +#define CSI_PIX1_CLK 63 +#define CSI_RDI2_CLK 64 +#define CSI_RDI1_CLK 65 +#define GFX2D0_SRC 66 +#define GFX2D0_CLK 67 +#define GFX2D1_SRC 68 +#define GFX2D1_CLK 69 +#define GFX3D_SRC 70 +#define GFX3D_CLK 71 +#define IJPEG_SRC 72 +#define IJPEG_CLK 73 +#define JPEGD_SRC 74 +#define JPEGD_CLK 75 +#define MDP_SRC 76 +#define MDP_CLK 77 +#define MDP_LUT_CLK 78 +#define DSI2_PIXEL_SRC 79 +#define DSI2_PIXEL_CLK 80 +#define DSI2_SRC 81 +#define DSI2_CLK 82 +#define DSI1_BYTE_SRC 83 +#define DSI1_BYTE_CLK 84 +#define DSI2_BYTE_SRC 85 +#define DSI2_BYTE_CLK 86 +#define DSI1_ESC_SRC 87 +#define DSI1_ESC_CLK 88 +#define DSI2_ESC_SRC 89 +#define DSI2_ESC_CLK 90 +#define ROT_SRC 91 +#define ROT_CLK 92 +#define TV_ENC_CLK 93 +#define TV_DAC_CLK 94 +#define HDMI_TV_CLK 95 +#define MDP_TV_CLK 96 +#define TV_SRC 97 +#define VCODEC_SRC 98 +#define VCODEC_CLK 99 +#define VFE_SRC 100 +#define VFE_CLK 101 +#define VFE_CSI_CLK 102 +#define VPE_SRC 103 +#define VPE_CLK 104 +#define DSI_PIXEL_SRC 105 +#define DSI_PIXEL_CLK 106 +#define CAMCLK0_SRC 107 +#define CAMCLK0_CLK 108 +#define CAMCLK1_SRC 109 +#define CAMCLK1_CLK 110 +#define CAMCLK2_SRC 111 +#define CAMCLK2_CLK 112 +#define CSIPHYTIMER_SRC 113 +#define CSIPHY2_TIMER_CLK 114 +#define CSIPHY1_TIMER_CLK 115 +#define CSIPHY0_TIMER_CLK 116 +#define PLL1 117 +#define PLL2 118 +#define RGB_TV_CLK 119 +#define NPL_TV_CLK 120 +#define VCAP_AHB_CLK 121 +#define VCAP_AXI_CLK 122 +#define VCAP_SRC 123 +#define VCAP_CLK 124 +#define VCAP_NPL_CLK 125 +#define PLL15 126 + +#endif diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8974.h b/include/dt-bindings/clock/qcom,mmcc-msm8974.h new file mode 100644 index 000000000..28651e54c --- /dev/null +++ b/include/dt-bindings/clock/qcom,mmcc-msm8974.h @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_MSM_MMCC_8974_H +#define _DT_BINDINGS_CLK_MSM_MMCC_8974_H + +#define MMSS_AHB_CLK_SRC 0 +#define MMSS_AXI_CLK_SRC 1 +#define MMPLL0 2 +#define MMPLL0_VOTE 3 +#define MMPLL1 4 +#define MMPLL1_VOTE 5 +#define MMPLL2 6 +#define MMPLL3 7 +#define CSI0_CLK_SRC 8 +#define CSI1_CLK_SRC 9 +#define CSI2_CLK_SRC 10 +#define CSI3_CLK_SRC 11 +#define VFE0_CLK_SRC 12 +#define VFE1_CLK_SRC 13 +#define MDP_CLK_SRC 14 +#define GFX3D_CLK_SRC 15 +#define JPEG0_CLK_SRC 16 +#define JPEG1_CLK_SRC 17 +#define JPEG2_CLK_SRC 18 +#define PCLK0_CLK_SRC 19 +#define PCLK1_CLK_SRC 20 +#define VCODEC0_CLK_SRC 21 +#define CCI_CLK_SRC 22 +#define CAMSS_GP0_CLK_SRC 23 +#define CAMSS_GP1_CLK_SRC 24 +#define MCLK0_CLK_SRC 25 +#define MCLK1_CLK_SRC 26 +#define MCLK2_CLK_SRC 27 +#define MCLK3_CLK_SRC 28 +#define CSI0PHYTIMER_CLK_SRC 29 +#define CSI1PHYTIMER_CLK_SRC 30 +#define CSI2PHYTIMER_CLK_SRC 31 +#define CPP_CLK_SRC 32 +#define BYTE0_CLK_SRC 33 +#define BYTE1_CLK_SRC 34 +#define EDPAUX_CLK_SRC 35 +#define EDPLINK_CLK_SRC 36 +#define EDPPIXEL_CLK_SRC 37 +#define ESC0_CLK_SRC 38 +#define ESC1_CLK_SRC 39 +#define EXTPCLK_CLK_SRC 40 +#define HDMI_CLK_SRC 41 +#define VSYNC_CLK_SRC 42 +#define MMSS_RBCPR_CLK_SRC 43 +#define CAMSS_CCI_CCI_AHB_CLK 44 +#define CAMSS_CCI_CCI_CLK 45 +#define CAMSS_CSI0_AHB_CLK 46 +#define CAMSS_CSI0_CLK 47 +#define CAMSS_CSI0PHY_CLK 48 +#define CAMSS_CSI0PIX_CLK 49 +#define CAMSS_CSI0RDI_CLK 50 +#define CAMSS_CSI1_AHB_CLK 51 +#define CAMSS_CSI1_CLK 52 +#define CAMSS_CSI1PHY_CLK 53 +#define CAMSS_CSI1PIX_CLK 54 +#define CAMSS_CSI1RDI_CLK 55 +#define CAMSS_CSI2_AHB_CLK 56 +#define CAMSS_CSI2_CLK 57 +#define CAMSS_CSI2PHY_CLK 58 +#define CAMSS_CSI2PIX_CLK 59 +#define CAMSS_CSI2RDI_CLK 60 +#define CAMSS_CSI3_AHB_CLK 61 +#define CAMSS_CSI3_CLK 62 +#define CAMSS_CSI3PHY_CLK 63 +#define CAMSS_CSI3PIX_CLK 64 +#define CAMSS_CSI3RDI_CLK 65 +#define CAMSS_CSI_VFE0_CLK 66 +#define CAMSS_CSI_VFE1_CLK 67 +#define CAMSS_GP0_CLK 68 +#define CAMSS_GP1_CLK 69 +#define CAMSS_ISPIF_AHB_CLK 70 +#define CAMSS_JPEG_JPEG0_CLK 71 +#define CAMSS_JPEG_JPEG1_CLK 72 +#define CAMSS_JPEG_JPEG2_CLK 73 +#define CAMSS_JPEG_JPEG_AHB_CLK 74 +#define CAMSS_JPEG_JPEG_AXI_CLK 75 +#define CAMSS_JPEG_JPEG_OCMEMNOC_CLK 76 +#define CAMSS_MCLK0_CLK 77 +#define CAMSS_MCLK1_CLK 78 +#define CAMSS_MCLK2_CLK 79 +#define CAMSS_MCLK3_CLK 80 +#define CAMSS_MICRO_AHB_CLK 81 +#define CAMSS_PHY0_CSI0PHYTIMER_CLK 82 +#define CAMSS_PHY1_CSI1PHYTIMER_CLK 83 +#define CAMSS_PHY2_CSI2PHYTIMER_CLK 84 +#define CAMSS_TOP_AHB_CLK 85 +#define CAMSS_VFE_CPP_AHB_CLK 86 +#define CAMSS_VFE_CPP_CLK 87 +#define CAMSS_VFE_VFE0_CLK 88 +#define CAMSS_VFE_VFE1_CLK 89 +#define CAMSS_VFE_VFE_AHB_CLK 90 +#define CAMSS_VFE_VFE_AXI_CLK 91 +#define CAMSS_VFE_VFE_OCMEMNOC_CLK 92 +#define MDSS_AHB_CLK 93 +#define MDSS_AXI_CLK 94 +#define MDSS_BYTE0_CLK 95 +#define MDSS_BYTE1_CLK 96 +#define MDSS_EDPAUX_CLK 97 +#define MDSS_EDPLINK_CLK 98 +#define MDSS_EDPPIXEL_CLK 99 +#define MDSS_ESC0_CLK 100 +#define MDSS_ESC1_CLK 101 +#define MDSS_EXTPCLK_CLK 102 +#define MDSS_HDMI_AHB_CLK 103 +#define MDSS_HDMI_CLK 104 +#define MDSS_MDP_CLK 105 +#define MDSS_MDP_LUT_CLK 106 +#define MDSS_PCLK0_CLK 107 +#define MDSS_PCLK1_CLK 108 +#define MDSS_VSYNC_CLK 109 +#define MMSS_MISC_AHB_CLK 110 +#define MMSS_MMSSNOC_AHB_CLK 111 +#define MMSS_MMSSNOC_BTO_AHB_CLK 112 +#define MMSS_MMSSNOC_AXI_CLK 113 +#define MMSS_S0_AXI_CLK 114 +#define OCMEMCX_AHB_CLK 115 +#define OCMEMCX_OCMEMNOC_CLK 116 +#define OXILI_OCMEMGX_CLK 117 +#define OCMEMNOC_CLK 118 +#define OXILI_GFX3D_CLK 119 +#define OXILICX_AHB_CLK 120 +#define OXILICX_AXI_CLK 121 +#define VENUS0_AHB_CLK 122 +#define VENUS0_AXI_CLK 123 +#define VENUS0_OCMEMNOC_CLK 124 +#define VENUS0_VCODEC0_CLK 125 +#define OCMEMNOC_CLK_SRC 126 +#define SPDM_JPEG0 127 +#define SPDM_JPEG1 128 +#define SPDM_MDP 129 +#define SPDM_AXI 130 +#define SPDM_VCODEC0 131 +#define SPDM_VFE0 132 +#define SPDM_VFE1 133 +#define SPDM_JPEG2 134 +#define SPDM_PCLK1 135 +#define SPDM_GFX3D 136 +#define SPDM_AHB 137 +#define SPDM_PCLK0 138 +#define SPDM_OCMEMNOC 139 +#define SPDM_CSI0 140 +#define SPDM_RM_AXI 141 +#define SPDM_RM_OCMEMNOC 142 + +/* gdscs */ +#define VENUS0_GDSC 0 +#define MDSS_GDSC 1 +#define CAMSS_JPEG_GDSC 2 +#define CAMSS_VFE_GDSC 3 +#define OXILI_GDSC 4 +#define OXILICX_GDSC 5 + +#endif diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8996.h b/include/dt-bindings/clock/qcom,mmcc-msm8996.h new file mode 100644 index 000000000..5abc445ad --- /dev/null +++ b/include/dt-bindings/clock/qcom,mmcc-msm8996.h @@ -0,0 +1,303 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_MSM_MMCC_8996_H +#define _DT_BINDINGS_CLK_MSM_MMCC_8996_H + +#define MMPLL0_EARLY 0 +#define MMPLL0_PLL 1 +#define MMPLL1_EARLY 2 +#define MMPLL1_PLL 3 +#define MMPLL2_EARLY 4 +#define MMPLL2_PLL 5 +#define MMPLL3_EARLY 6 +#define MMPLL3_PLL 7 +#define MMPLL4_EARLY 8 +#define MMPLL4_PLL 9 +#define MMPLL5_EARLY 10 +#define MMPLL5_PLL 11 +#define MMPLL8_EARLY 12 +#define MMPLL8_PLL 13 +#define MMPLL9_EARLY 14 +#define MMPLL9_PLL 15 +#define AHB_CLK_SRC 16 +#define AXI_CLK_SRC 17 +#define MAXI_CLK_SRC 18 +#define DSA_CORE_CLK_SRC 19 +#define GFX3D_CLK_SRC 20 +#define RBBMTIMER_CLK_SRC 21 +#define ISENSE_CLK_SRC 22 +#define RBCPR_CLK_SRC 23 +#define VIDEO_CORE_CLK_SRC 24 +#define VIDEO_SUBCORE0_CLK_SRC 25 +#define VIDEO_SUBCORE1_CLK_SRC 26 +#define PCLK0_CLK_SRC 27 +#define PCLK1_CLK_SRC 28 +#define MDP_CLK_SRC 29 +#define EXTPCLK_CLK_SRC 30 +#define VSYNC_CLK_SRC 31 +#define HDMI_CLK_SRC 32 +#define BYTE0_CLK_SRC 33 +#define BYTE1_CLK_SRC 34 +#define ESC0_CLK_SRC 35 +#define ESC1_CLK_SRC 36 +#define CAMSS_GP0_CLK_SRC 37 +#define CAMSS_GP1_CLK_SRC 38 +#define MCLK0_CLK_SRC 39 +#define MCLK1_CLK_SRC 40 +#define MCLK2_CLK_SRC 41 +#define MCLK3_CLK_SRC 42 +#define CCI_CLK_SRC 43 +#define CSI0PHYTIMER_CLK_SRC 44 +#define CSI1PHYTIMER_CLK_SRC 45 +#define CSI2PHYTIMER_CLK_SRC 46 +#define CSIPHY0_3P_CLK_SRC 47 +#define CSIPHY1_3P_CLK_SRC 48 +#define CSIPHY2_3P_CLK_SRC 49 +#define JPEG0_CLK_SRC 50 +#define JPEG2_CLK_SRC 51 +#define JPEG_DMA_CLK_SRC 52 +#define VFE0_CLK_SRC 53 +#define VFE1_CLK_SRC 54 +#define CPP_CLK_SRC 55 +#define CSI0_CLK_SRC 56 +#define CSI1_CLK_SRC 57 +#define CSI2_CLK_SRC 58 +#define CSI3_CLK_SRC 59 +#define FD_CORE_CLK_SRC 60 +#define MMSS_CXO_CLK 61 +#define MMSS_SLEEPCLK_CLK 62 +#define MMSS_MMAGIC_AHB_CLK 63 +#define MMSS_MMAGIC_CFG_AHB_CLK 64 +#define MMSS_MISC_AHB_CLK 65 +#define MMSS_MISC_CXO_CLK 66 +#define MMSS_BTO_AHB_CLK 67 +#define MMSS_MMAGIC_AXI_CLK 68 +#define MMSS_S0_AXI_CLK 69 +#define MMSS_MMAGIC_MAXI_CLK 70 +#define DSA_CORE_CLK 71 +#define DSA_NOC_CFG_AHB_CLK 72 +#define MMAGIC_CAMSS_AXI_CLK 73 +#define MMAGIC_CAMSS_NOC_CFG_AHB_CLK 74 +#define THROTTLE_CAMSS_CXO_CLK 75 +#define THROTTLE_CAMSS_AHB_CLK 76 +#define THROTTLE_CAMSS_AXI_CLK 77 +#define SMMU_VFE_AHB_CLK 78 +#define SMMU_VFE_AXI_CLK 79 +#define SMMU_CPP_AHB_CLK 80 +#define SMMU_CPP_AXI_CLK 81 +#define SMMU_JPEG_AHB_CLK 82 +#define SMMU_JPEG_AXI_CLK 83 +#define MMAGIC_MDSS_AXI_CLK 84 +#define MMAGIC_MDSS_NOC_CFG_AHB_CLK 85 +#define THROTTLE_MDSS_CXO_CLK 86 +#define THROTTLE_MDSS_AHB_CLK 87 +#define THROTTLE_MDSS_AXI_CLK 88 +#define SMMU_ROT_AHB_CLK 89 +#define SMMU_ROT_AXI_CLK 90 +#define SMMU_MDP_AHB_CLK 91 +#define SMMU_MDP_AXI_CLK 92 +#define MMAGIC_VIDEO_AXI_CLK 93 +#define MMAGIC_VIDEO_NOC_CFG_AHB_CLK 94 +#define THROTTLE_VIDEO_CXO_CLK 95 +#define THROTTLE_VIDEO_AHB_CLK 96 +#define THROTTLE_VIDEO_AXI_CLK 97 +#define SMMU_VIDEO_AHB_CLK 98 +#define SMMU_VIDEO_AXI_CLK 99 +#define MMAGIC_BIMC_AXI_CLK 100 +#define MMAGIC_BIMC_NOC_CFG_AHB_CLK 101 +#define GPU_GX_GFX3D_CLK 102 +#define GPU_GX_RBBMTIMER_CLK 103 +#define GPU_AHB_CLK 104 +#define GPU_AON_ISENSE_CLK 105 +#define VMEM_MAXI_CLK 106 +#define VMEM_AHB_CLK 107 +#define MMSS_RBCPR_CLK 108 +#define MMSS_RBCPR_AHB_CLK 109 +#define VIDEO_CORE_CLK 110 +#define VIDEO_AXI_CLK 111 +#define VIDEO_MAXI_CLK 112 +#define VIDEO_AHB_CLK 113 +#define VIDEO_SUBCORE0_CLK 114 +#define VIDEO_SUBCORE1_CLK 115 +#define MDSS_AHB_CLK 116 +#define MDSS_HDMI_AHB_CLK 117 +#define MDSS_AXI_CLK 118 +#define MDSS_PCLK0_CLK 119 +#define MDSS_PCLK1_CLK 120 +#define MDSS_MDP_CLK 121 +#define MDSS_EXTPCLK_CLK 122 +#define MDSS_VSYNC_CLK 123 +#define MDSS_HDMI_CLK 124 +#define MDSS_BYTE0_CLK 125 +#define MDSS_BYTE1_CLK 126 +#define MDSS_ESC0_CLK 127 +#define MDSS_ESC1_CLK 128 +#define CAMSS_TOP_AHB_CLK 129 +#define CAMSS_AHB_CLK 130 +#define CAMSS_MICRO_AHB_CLK 131 +#define CAMSS_GP0_CLK 132 +#define CAMSS_GP1_CLK 133 +#define CAMSS_MCLK0_CLK 134 +#define CAMSS_MCLK1_CLK 135 +#define CAMSS_MCLK2_CLK 136 +#define CAMSS_MCLK3_CLK 137 +#define CAMSS_CCI_CLK 138 +#define CAMSS_CCI_AHB_CLK 139 +#define CAMSS_CSI0PHYTIMER_CLK 140 +#define CAMSS_CSI1PHYTIMER_CLK 141 +#define CAMSS_CSI2PHYTIMER_CLK 142 +#define CAMSS_CSIPHY0_3P_CLK 143 +#define CAMSS_CSIPHY1_3P_CLK 144 +#define CAMSS_CSIPHY2_3P_CLK 145 +#define CAMSS_JPEG0_CLK 146 +#define CAMSS_JPEG2_CLK 147 +#define CAMSS_JPEG_DMA_CLK 148 +#define CAMSS_JPEG_AHB_CLK 149 +#define CAMSS_JPEG_AXI_CLK 150 +#define CAMSS_VFE_AHB_CLK 151 +#define CAMSS_VFE_AXI_CLK 152 +#define CAMSS_VFE0_CLK 153 +#define CAMSS_VFE0_STREAM_CLK 154 +#define CAMSS_VFE0_AHB_CLK 155 +#define CAMSS_VFE1_CLK 156 +#define CAMSS_VFE1_STREAM_CLK 157 +#define CAMSS_VFE1_AHB_CLK 158 +#define CAMSS_CSI_VFE0_CLK 159 +#define CAMSS_CSI_VFE1_CLK 160 +#define CAMSS_CPP_VBIF_AHB_CLK 161 +#define CAMSS_CPP_AXI_CLK 162 +#define CAMSS_CPP_CLK 163 +#define CAMSS_CPP_AHB_CLK 164 +#define CAMSS_CSI0_CLK 165 +#define CAMSS_CSI0_AHB_CLK 166 +#define CAMSS_CSI0PHY_CLK 167 +#define CAMSS_CSI0RDI_CLK 168 +#define CAMSS_CSI0PIX_CLK 169 +#define CAMSS_CSI1_CLK 170 +#define CAMSS_CSI1_AHB_CLK 171 +#define CAMSS_CSI1PHY_CLK 172 +#define CAMSS_CSI1RDI_CLK 173 +#define CAMSS_CSI1PIX_CLK 174 +#define CAMSS_CSI2_CLK 175 +#define CAMSS_CSI2_AHB_CLK 176 +#define CAMSS_CSI2PHY_CLK 177 +#define CAMSS_CSI2RDI_CLK 178 +#define CAMSS_CSI2PIX_CLK 179 +#define CAMSS_CSI3_CLK 180 +#define CAMSS_CSI3_AHB_CLK 181 +#define CAMSS_CSI3PHY_CLK 182 +#define CAMSS_CSI3RDI_CLK 183 +#define CAMSS_CSI3PIX_CLK 184 +#define CAMSS_ISPIF_AHB_CLK 185 +#define FD_CORE_CLK 186 +#define FD_CORE_UAR_CLK 187 +#define FD_AHB_CLK 188 +#define MMSS_SPDM_CSI0_CLK 189 +#define MMSS_SPDM_JPEG_DMA_CLK 190 +#define MMSS_SPDM_CPP_CLK 191 +#define MMSS_SPDM_PCLK0_CLK 192 +#define MMSS_SPDM_AHB_CLK 193 +#define MMSS_SPDM_GFX3D_CLK 194 +#define MMSS_SPDM_PCLK1_CLK 195 +#define MMSS_SPDM_JPEG2_CLK 196 +#define MMSS_SPDM_DEBUG_CLK 197 +#define MMSS_SPDM_VFE1_CLK 198 +#define MMSS_SPDM_VFE0_CLK 199 +#define MMSS_SPDM_VIDEO_CORE_CLK 200 +#define MMSS_SPDM_AXI_CLK 201 +#define MMSS_SPDM_MDP_CLK 202 +#define MMSS_SPDM_JPEG0_CLK 203 +#define MMSS_SPDM_RM_AXI_CLK 204 +#define MMSS_SPDM_RM_MAXI_CLK 205 + +#define MMAGICAHB_BCR 0 +#define MMAGIC_CFG_BCR 1 +#define MISC_BCR 2 +#define BTO_BCR 3 +#define MMAGICAXI_BCR 4 +#define MMAGICMAXI_BCR 5 +#define DSA_BCR 6 +#define MMAGIC_CAMSS_BCR 7 +#define THROTTLE_CAMSS_BCR 8 +#define SMMU_VFE_BCR 9 +#define SMMU_CPP_BCR 10 +#define SMMU_JPEG_BCR 11 +#define MMAGIC_MDSS_BCR 12 +#define THROTTLE_MDSS_BCR 13 +#define SMMU_ROT_BCR 14 +#define SMMU_MDP_BCR 15 +#define MMAGIC_VIDEO_BCR 16 +#define THROTTLE_VIDEO_BCR 17 +#define SMMU_VIDEO_BCR 18 +#define MMAGIC_BIMC_BCR 19 +#define GPU_GX_BCR 20 +#define GPU_BCR 21 +#define GPU_AON_BCR 22 +#define VMEM_BCR 23 +#define MMSS_RBCPR_BCR 24 +#define VIDEO_BCR 25 +#define MDSS_BCR 26 +#define CAMSS_TOP_BCR 27 +#define CAMSS_AHB_BCR 28 +#define CAMSS_MICRO_BCR 29 +#define CAMSS_CCI_BCR 30 +#define CAMSS_PHY0_BCR 31 +#define CAMSS_PHY1_BCR 32 +#define CAMSS_PHY2_BCR 33 +#define CAMSS_CSIPHY0_3P_BCR 34 +#define CAMSS_CSIPHY1_3P_BCR 35 +#define CAMSS_CSIPHY2_3P_BCR 36 +#define CAMSS_JPEG_BCR 37 +#define CAMSS_VFE_BCR 38 +#define CAMSS_VFE0_BCR 39 +#define CAMSS_VFE1_BCR 40 +#define CAMSS_CSI_VFE0_BCR 41 +#define CAMSS_CSI_VFE1_BCR 42 +#define CAMSS_CPP_TOP_BCR 43 +#define CAMSS_CPP_BCR 44 +#define CAMSS_CSI0_BCR 45 +#define CAMSS_CSI0RDI_BCR 46 +#define CAMSS_CSI0PIX_BCR 47 +#define CAMSS_CSI1_BCR 48 +#define CAMSS_CSI1RDI_BCR 49 +#define CAMSS_CSI1PIX_BCR 50 +#define CAMSS_CSI2_BCR 51 +#define CAMSS_CSI2RDI_BCR 52 +#define CAMSS_CSI2PIX_BCR 53 +#define CAMSS_CSI3_BCR 54 +#define CAMSS_CSI3RDI_BCR 55 +#define CAMSS_CSI3PIX_BCR 56 +#define CAMSS_ISPIF_BCR 57 +#define FD_BCR 58 +#define MMSS_SPDM_RM_BCR 59 + +/* Indexes for GDSCs */ +#define MMAGIC_VIDEO_GDSC 0 +#define MMAGIC_MDSS_GDSC 1 +#define MMAGIC_CAMSS_GDSC 2 +#define GPU_GDSC 3 +#define VENUS_GDSC 4 +#define VENUS_CORE0_GDSC 5 +#define VENUS_CORE1_GDSC 6 +#define CAMSS_GDSC 7 +#define VFE0_GDSC 8 +#define VFE1_GDSC 9 +#define JPEG_GDSC 10 +#define CPP_GDSC 11 +#define FD_GDSC 12 +#define MDSS_GDSC 13 +#define GPU_GX_GDSC 14 +#define MMAGIC_BIMC_GDSC 15 + +#endif diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h new file mode 100644 index 000000000..c585b82b9 --- /dev/null +++ b/include/dt-bindings/clock/qcom,rpmcc.h @@ -0,0 +1,127 @@ +/* + * Copyright 2015 Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_MSM_RPMCC_H +#define _DT_BINDINGS_CLK_MSM_RPMCC_H + +/* RPM clocks */ +#define RPM_PXO_CLK 0 +#define RPM_PXO_A_CLK 1 +#define RPM_CXO_CLK 2 +#define RPM_CXO_A_CLK 3 +#define RPM_APPS_FABRIC_CLK 4 +#define RPM_APPS_FABRIC_A_CLK 5 +#define RPM_CFPB_CLK 6 +#define RPM_CFPB_A_CLK 7 +#define RPM_QDSS_CLK 8 +#define RPM_QDSS_A_CLK 9 +#define RPM_DAYTONA_FABRIC_CLK 10 +#define RPM_DAYTONA_FABRIC_A_CLK 11 +#define RPM_EBI1_CLK 12 +#define RPM_EBI1_A_CLK 13 +#define RPM_MM_FABRIC_CLK 14 +#define RPM_MM_FABRIC_A_CLK 15 +#define RPM_MMFPB_CLK 16 +#define RPM_MMFPB_A_CLK 17 +#define RPM_SYS_FABRIC_CLK 18 +#define RPM_SYS_FABRIC_A_CLK 19 +#define RPM_SFPB_CLK 20 +#define RPM_SFPB_A_CLK 21 +#define RPM_SMI_CLK 22 +#define RPM_SMI_A_CLK 23 +#define RPM_PLL4_CLK 24 +#define RPM_XO_D0 25 +#define RPM_XO_D1 26 +#define RPM_XO_A0 27 +#define RPM_XO_A1 28 +#define RPM_XO_A2 29 + +/* SMD RPM clocks */ +#define RPM_SMD_XO_CLK_SRC 0 +#define RPM_SMD_XO_A_CLK_SRC 1 +#define RPM_SMD_PCNOC_CLK 2 +#define RPM_SMD_PCNOC_A_CLK 3 +#define RPM_SMD_SNOC_CLK 4 +#define RPM_SMD_SNOC_A_CLK 5 +#define RPM_SMD_BIMC_CLK 6 +#define RPM_SMD_BIMC_A_CLK 7 +#define RPM_SMD_QDSS_CLK 8 +#define RPM_SMD_QDSS_A_CLK 9 +#define RPM_SMD_BB_CLK1 10 +#define RPM_SMD_BB_CLK1_A 11 +#define RPM_SMD_BB_CLK2 12 +#define RPM_SMD_BB_CLK2_A 13 +#define RPM_SMD_RF_CLK1 14 +#define RPM_SMD_RF_CLK1_A 15 +#define RPM_SMD_RF_CLK2 16 +#define RPM_SMD_RF_CLK2_A 17 +#define RPM_SMD_BB_CLK1_PIN 18 +#define RPM_SMD_BB_CLK1_A_PIN 19 +#define RPM_SMD_BB_CLK2_PIN 20 +#define RPM_SMD_BB_CLK2_A_PIN 21 +#define RPM_SMD_RF_CLK1_PIN 22 +#define RPM_SMD_RF_CLK1_A_PIN 23 +#define RPM_SMD_RF_CLK2_PIN 24 +#define RPM_SMD_RF_CLK2_A_PIN 25 +#define RPM_SMD_PNOC_CLK 26 +#define RPM_SMD_PNOC_A_CLK 27 +#define RPM_SMD_CNOC_CLK 28 +#define RPM_SMD_CNOC_A_CLK 29 +#define RPM_SMD_MMSSNOC_AHB_CLK 30 +#define RPM_SMD_MMSSNOC_AHB_A_CLK 31 +#define RPM_SMD_GFX3D_CLK_SRC 32 +#define RPM_SMD_GFX3D_A_CLK_SRC 33 +#define RPM_SMD_OCMEMGX_CLK 34 +#define RPM_SMD_OCMEMGX_A_CLK 35 +#define RPM_SMD_CXO_D0 36 +#define RPM_SMD_CXO_D0_A 37 +#define RPM_SMD_CXO_D1 38 +#define RPM_SMD_CXO_D1_A 39 +#define RPM_SMD_CXO_A0 40 +#define RPM_SMD_CXO_A0_A 41 +#define RPM_SMD_CXO_A1 42 +#define RPM_SMD_CXO_A1_A 43 +#define RPM_SMD_CXO_A2 44 +#define RPM_SMD_CXO_A2_A 45 +#define RPM_SMD_DIV_CLK1 46 +#define RPM_SMD_DIV_A_CLK1 47 +#define RPM_SMD_DIV_CLK2 48 +#define RPM_SMD_DIV_A_CLK2 49 +#define RPM_SMD_DIFF_CLK 50 +#define RPM_SMD_DIFF_A_CLK 51 +#define RPM_SMD_CXO_D0_PIN 52 +#define RPM_SMD_CXO_D0_A_PIN 53 +#define RPM_SMD_CXO_D1_PIN 54 +#define RPM_SMD_CXO_D1_A_PIN 55 +#define RPM_SMD_CXO_A0_PIN 56 +#define RPM_SMD_CXO_A0_A_PIN 57 +#define RPM_SMD_CXO_A1_PIN 58 +#define RPM_SMD_CXO_A1_A_PIN 59 +#define RPM_SMD_CXO_A2_PIN 60 +#define RPM_SMD_CXO_A2_A_PIN 61 +#define RPM_SMD_AGGR1_NOC_CLK 62 +#define RPM_SMD_AGGR1_NOC_A_CLK 63 +#define RPM_SMD_AGGR2_NOC_CLK 64 +#define RPM_SMD_AGGR2_NOC_A_CLK 65 +#define RPM_SMD_MMAXI_CLK 66 +#define RPM_SMD_MMAXI_A_CLK 67 +#define RPM_SMD_IPA_CLK 68 +#define RPM_SMD_IPA_A_CLK 69 +#define RPM_SMD_CE1_CLK 70 +#define RPM_SMD_CE1_A_CLK 71 +#define RPM_SMD_DIV_CLK3 72 +#define RPM_SMD_DIV_A_CLK3 73 +#define RPM_SMD_LN_BB_CLK 74 +#define RPM_SMD_LN_BB_A_CLK 75 + +#endif diff --git a/include/dt-bindings/clock/qcom,rpmh.h b/include/dt-bindings/clock/qcom,rpmh.h new file mode 100644 index 000000000..f48fbd6f2 --- /dev/null +++ b/include/dt-bindings/clock/qcom,rpmh.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */ + + +#ifndef _DT_BINDINGS_CLK_MSM_RPMH_H +#define _DT_BINDINGS_CLK_MSM_RPMH_H + +/* RPMh controlled clocks */ +#define RPMH_CXO_CLK 0 +#define RPMH_CXO_CLK_A 1 +#define RPMH_LN_BB_CLK2 2 +#define RPMH_LN_BB_CLK2_A 3 +#define RPMH_LN_BB_CLK3 4 +#define RPMH_LN_BB_CLK3_A 5 +#define RPMH_RF_CLK1 6 +#define RPMH_RF_CLK1_A 7 +#define RPMH_RF_CLK2 8 +#define RPMH_RF_CLK2_A 9 +#define RPMH_RF_CLK3 10 +#define RPMH_RF_CLK3_A 11 + +#endif diff --git a/include/dt-bindings/clock/qcom,videocc-sdm845.h b/include/dt-bindings/clock/qcom,videocc-sdm845.h new file mode 100644 index 000000000..1b868165e --- /dev/null +++ b/include/dt-bindings/clock/qcom,videocc-sdm845.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_SDM_VIDEO_CC_SDM845_H +#define _DT_BINDINGS_CLK_SDM_VIDEO_CC_SDM845_H + +/* VIDEO_CC clock registers */ +#define VIDEO_CC_APB_CLK 0 +#define VIDEO_CC_AT_CLK 1 +#define VIDEO_CC_QDSS_TRIG_CLK 2 +#define VIDEO_CC_QDSS_TSCTR_DIV8_CLK 3 +#define VIDEO_CC_VCODEC0_AXI_CLK 4 +#define VIDEO_CC_VCODEC0_CORE_CLK 5 +#define VIDEO_CC_VCODEC1_AXI_CLK 6 +#define VIDEO_CC_VCODEC1_CORE_CLK 7 +#define VIDEO_CC_VENUS_AHB_CLK 8 +#define VIDEO_CC_VENUS_CLK_SRC 9 +#define VIDEO_CC_VENUS_CTL_AXI_CLK 10 +#define VIDEO_CC_VENUS_CTL_CORE_CLK 11 +#define VIDEO_PLL0 12 + +/* VIDEO_CC Resets */ +#define VIDEO_CC_VENUS_BCR 0 +#define VIDEO_CC_VCODEC0_BCR 1 +#define VIDEO_CC_VCODEC1_BCR 2 +#define VIDEO_CC_INTERFACE_BCR 3 + +/* VIDEO_CC GDSCRs */ +#define VENUS_GDSC 0 +#define VCODEC0_GDSC 1 +#define VCODEC1_GDSC 2 + +#endif diff --git a/include/dt-bindings/clock/r7s72100-clock.h b/include/dt-bindings/clock/r7s72100-clock.h new file mode 100644 index 000000000..0dcb3e87d --- /dev/null +++ b/include/dt-bindings/clock/r7s72100-clock.h @@ -0,0 +1,115 @@ +/* + * Copyright (C) 2014 Renesas Solutions Corp. + * Copyright (C) 2014 Wolfram Sang, Sang Engineering + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ + +#ifndef __DT_BINDINGS_CLOCK_R7S72100_H__ +#define __DT_BINDINGS_CLOCK_R7S72100_H__ + +#define R7S72100_CLK_PLL 0 +#define R7S72100_CLK_I 1 +#define R7S72100_CLK_G 2 + +/* MSTP2 */ +#define R7S72100_CLK_CORESIGHT 0 + +/* MSTP3 */ +#define R7S72100_CLK_IEBUS 7 +#define R7S72100_CLK_IRDA 6 +#define R7S72100_CLK_LIN0 5 +#define R7S72100_CLK_LIN1 4 +#define R7S72100_CLK_MTU2 3 +#define R7S72100_CLK_CAN 2 +#define R7S72100_CLK_ADCPWR 1 +#define R7S72100_CLK_PWM 0 + +/* MSTP4 */ +#define R7S72100_CLK_SCIF0 7 +#define R7S72100_CLK_SCIF1 6 +#define R7S72100_CLK_SCIF2 5 +#define R7S72100_CLK_SCIF3 4 +#define R7S72100_CLK_SCIF4 3 +#define R7S72100_CLK_SCIF5 2 +#define R7S72100_CLK_SCIF6 1 +#define R7S72100_CLK_SCIF7 0 + +/* MSTP5 */ +#define R7S72100_CLK_SCI0 7 +#define R7S72100_CLK_SCI1 6 +#define R7S72100_CLK_SG0 5 +#define R7S72100_CLK_SG1 4 +#define R7S72100_CLK_SG2 3 +#define R7S72100_CLK_SG3 2 +#define R7S72100_CLK_OSTM0 1 +#define R7S72100_CLK_OSTM1 0 + +/* MSTP6 */ +#define R7S72100_CLK_ADC 7 +#define R7S72100_CLK_CEU 6 +#define R7S72100_CLK_DOC0 5 +#define R7S72100_CLK_DOC1 4 +#define R7S72100_CLK_DRC0 3 +#define R7S72100_CLK_DRC1 2 +#define R7S72100_CLK_JCU 1 +#define R7S72100_CLK_RTC 0 + +/* MSTP7 */ +#define R7S72100_CLK_VDEC0 7 +#define R7S72100_CLK_VDEC1 6 +#define R7S72100_CLK_ETHER 4 +#define R7S72100_CLK_NAND 3 +#define R7S72100_CLK_USB0 1 +#define R7S72100_CLK_USB1 0 + +/* MSTP8 */ +#define R7S72100_CLK_IMR0 7 +#define R7S72100_CLK_IMR1 6 +#define R7S72100_CLK_IMRDISP 5 +#define R7S72100_CLK_MMCIF 4 +#define R7S72100_CLK_MLB 3 +#define R7S72100_CLK_ETHAVB 2 +#define R7S72100_CLK_SCUX 1 + +/* MSTP9 */ +#define R7S72100_CLK_I2C0 7 +#define R7S72100_CLK_I2C1 6 +#define R7S72100_CLK_I2C2 5 +#define R7S72100_CLK_I2C3 4 +#define R7S72100_CLK_SPIBSC0 3 +#define R7S72100_CLK_SPIBSC1 2 +#define R7S72100_CLK_VDC50 1 /* and LVDS */ +#define R7S72100_CLK_VDC51 0 + +/* MSTP10 */ +#define R7S72100_CLK_SPI0 7 +#define R7S72100_CLK_SPI1 6 +#define R7S72100_CLK_SPI2 5 +#define R7S72100_CLK_SPI3 4 +#define R7S72100_CLK_SPI4 3 +#define R7S72100_CLK_CDROM 2 +#define R7S72100_CLK_SPDIF 1 +#define R7S72100_CLK_RGPVG2 0 + +/* MSTP11 */ +#define R7S72100_CLK_SSI0 5 +#define R7S72100_CLK_SSI1 4 +#define R7S72100_CLK_SSI2 3 +#define R7S72100_CLK_SSI3 2 +#define R7S72100_CLK_SSI4 1 +#define R7S72100_CLK_SSI5 0 + +/* MSTP12 */ +#define R7S72100_CLK_SDHI00 3 +#define R7S72100_CLK_SDHI01 2 +#define R7S72100_CLK_SDHI10 1 +#define R7S72100_CLK_SDHI11 0 + +/* MSTP13 */ +#define R7S72100_CLK_PIX1 2 +#define R7S72100_CLK_PIX0 1 + +#endif /* __DT_BINDINGS_CLOCK_R7S72100_H__ */ diff --git a/include/dt-bindings/clock/r8a73a4-clock.h b/include/dt-bindings/clock/r8a73a4-clock.h new file mode 100644 index 000000000..4b3668157 --- /dev/null +++ b/include/dt-bindings/clock/r8a73a4-clock.h @@ -0,0 +1,64 @@ +/* + * Copyright 2014 Ulrich Hecht + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __DT_BINDINGS_CLOCK_R8A73A4_H__ +#define __DT_BINDINGS_CLOCK_R8A73A4_H__ + +/* CPG */ +#define R8A73A4_CLK_MAIN 0 +#define R8A73A4_CLK_PLL0 1 +#define R8A73A4_CLK_PLL1 2 +#define R8A73A4_CLK_PLL2 3 +#define R8A73A4_CLK_PLL2S 4 +#define R8A73A4_CLK_PLL2H 5 +#define R8A73A4_CLK_Z 6 +#define R8A73A4_CLK_Z2 7 +#define R8A73A4_CLK_I 8 +#define R8A73A4_CLK_M3 9 +#define R8A73A4_CLK_B 10 +#define R8A73A4_CLK_M1 11 +#define R8A73A4_CLK_M2 12 +#define R8A73A4_CLK_ZX 13 +#define R8A73A4_CLK_ZS 14 +#define R8A73A4_CLK_HP 15 + +/* MSTP2 */ +#define R8A73A4_CLK_DMAC 18 +#define R8A73A4_CLK_SCIFB3 17 +#define R8A73A4_CLK_SCIFB2 16 +#define R8A73A4_CLK_SCIFB1 7 +#define R8A73A4_CLK_SCIFB0 6 +#define R8A73A4_CLK_SCIFA0 4 +#define R8A73A4_CLK_SCIFA1 3 + +/* MSTP3 */ +#define R8A73A4_CLK_CMT1 29 +#define R8A73A4_CLK_IIC1 23 +#define R8A73A4_CLK_IIC0 18 +#define R8A73A4_CLK_IIC7 17 +#define R8A73A4_CLK_IIC6 16 +#define R8A73A4_CLK_MMCIF0 15 +#define R8A73A4_CLK_SDHI0 14 +#define R8A73A4_CLK_SDHI1 13 +#define R8A73A4_CLK_SDHI2 12 +#define R8A73A4_CLK_MMCIF1 5 +#define R8A73A4_CLK_IIC2 0 + +/* MSTP4 */ +#define R8A73A4_CLK_IIC3 11 +#define R8A73A4_CLK_IIC4 10 +#define R8A73A4_CLK_IIC5 9 +#define R8A73A4_CLK_INTC_SYS 8 +#define R8A73A4_CLK_IRQC 7 + +/* MSTP5 */ +#define R8A73A4_CLK_THERMAL 22 +#define R8A73A4_CLK_IIC8 15 + +#endif /* __DT_BINDINGS_CLOCK_R8A73A4_H__ */ diff --git a/include/dt-bindings/clock/r8a7740-clock.h b/include/dt-bindings/clock/r8a7740-clock.h new file mode 100644 index 000000000..476135da0 --- /dev/null +++ b/include/dt-bindings/clock/r8a7740-clock.h @@ -0,0 +1,78 @@ +/* + * Copyright 2014 Ulrich Hecht + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __DT_BINDINGS_CLOCK_R8A7740_H__ +#define __DT_BINDINGS_CLOCK_R8A7740_H__ + +/* CPG */ +#define R8A7740_CLK_SYSTEM 0 +#define R8A7740_CLK_PLLC0 1 +#define R8A7740_CLK_PLLC1 2 +#define R8A7740_CLK_PLLC2 3 +#define R8A7740_CLK_R 4 +#define R8A7740_CLK_USB24S 5 +#define R8A7740_CLK_I 6 +#define R8A7740_CLK_ZG 7 +#define R8A7740_CLK_B 8 +#define R8A7740_CLK_M1 9 +#define R8A7740_CLK_HP 10 +#define R8A7740_CLK_HPP 11 +#define R8A7740_CLK_USBP 12 +#define R8A7740_CLK_S 13 +#define R8A7740_CLK_ZB 14 +#define R8A7740_CLK_M3 15 +#define R8A7740_CLK_CP 16 + +/* MSTP1 */ +#define R8A7740_CLK_CEU21 28 +#define R8A7740_CLK_CEU20 27 +#define R8A7740_CLK_TMU0 25 +#define R8A7740_CLK_LCDC1 17 +#define R8A7740_CLK_IIC0 16 +#define R8A7740_CLK_TMU1 11 +#define R8A7740_CLK_LCDC0 0 + +/* MSTP2 */ +#define R8A7740_CLK_SCIFA6 30 +#define R8A7740_CLK_INTCA 29 +#define R8A7740_CLK_SCIFA7 22 +#define R8A7740_CLK_DMAC1 18 +#define R8A7740_CLK_DMAC2 17 +#define R8A7740_CLK_DMAC3 16 +#define R8A7740_CLK_USBDMAC 14 +#define R8A7740_CLK_SCIFA5 7 +#define R8A7740_CLK_SCIFB 6 +#define R8A7740_CLK_SCIFA0 4 +#define R8A7740_CLK_SCIFA1 3 +#define R8A7740_CLK_SCIFA2 2 +#define R8A7740_CLK_SCIFA3 1 +#define R8A7740_CLK_SCIFA4 0 + +/* MSTP3 */ +#define R8A7740_CLK_CMT1 29 +#define R8A7740_CLK_FSI 28 +#define R8A7740_CLK_IIC1 23 +#define R8A7740_CLK_USBF 20 +#define R8A7740_CLK_SDHI0 14 +#define R8A7740_CLK_SDHI1 13 +#define R8A7740_CLK_MMC 12 +#define R8A7740_CLK_GETHER 9 +#define R8A7740_CLK_TPU0 4 + +/* MSTP4 */ +#define R8A7740_CLK_USBH 16 +#define R8A7740_CLK_SDHI2 15 +#define R8A7740_CLK_USBFUNC 7 +#define R8A7740_CLK_USBPHY 6 + +/* SUBCK* */ +#define R8A7740_CLK_SUBCK 9 +#define R8A7740_CLK_SUBCK2 10 + +#endif /* __DT_BINDINGS_CLOCK_R8A7740_H__ */ diff --git a/include/dt-bindings/clock/r8a7743-cpg-mssr.h b/include/dt-bindings/clock/r8a7743-cpg-mssr.h new file mode 100644 index 000000000..e1d1f3c6a --- /dev/null +++ b/include/dt-bindings/clock/r8a7743-cpg-mssr.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2016 Cogent Embedded Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef __DT_BINDINGS_CLOCK_R8A7743_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A7743_CPG_MSSR_H__ + +#include + +/* r8a7743 CPG Core Clocks */ +#define R8A7743_CLK_Z 0 +#define R8A7743_CLK_ZG 1 +#define R8A7743_CLK_ZTR 2 +#define R8A7743_CLK_ZTRD2 3 +#define R8A7743_CLK_ZT 4 +#define R8A7743_CLK_ZX 5 +#define R8A7743_CLK_ZS 6 +#define R8A7743_CLK_HP 7 +#define R8A7743_CLK_B 9 +#define R8A7743_CLK_LB 10 +#define R8A7743_CLK_P 11 +#define R8A7743_CLK_CL 12 +#define R8A7743_CLK_M2 13 +#define R8A7743_CLK_ZB3 15 +#define R8A7743_CLK_ZB3D2 16 +#define R8A7743_CLK_DDR 17 +#define R8A7743_CLK_SDH 18 +#define R8A7743_CLK_SD0 19 +#define R8A7743_CLK_SD2 20 +#define R8A7743_CLK_SD3 21 +#define R8A7743_CLK_MMC0 22 +#define R8A7743_CLK_MP 23 +#define R8A7743_CLK_QSPI 26 +#define R8A7743_CLK_CP 27 +#define R8A7743_CLK_RCAN 28 +#define R8A7743_CLK_R 29 +#define R8A7743_CLK_OSC 30 + +#endif /* __DT_BINDINGS_CLOCK_R8A7743_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a7745-cpg-mssr.h b/include/dt-bindings/clock/r8a7745-cpg-mssr.h new file mode 100644 index 000000000..56ad6f0c6 --- /dev/null +++ b/include/dt-bindings/clock/r8a7745-cpg-mssr.h @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2016 Cogent Embedded Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef __DT_BINDINGS_CLOCK_R8A7745_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A7745_CPG_MSSR_H__ + +#include + +/* r8a7745 CPG Core Clocks */ +#define R8A7745_CLK_Z2 0 +#define R8A7745_CLK_ZG 1 +#define R8A7745_CLK_ZTR 2 +#define R8A7745_CLK_ZTRD2 3 +#define R8A7745_CLK_ZT 4 +#define R8A7745_CLK_ZX 5 +#define R8A7745_CLK_ZS 6 +#define R8A7745_CLK_HP 7 +#define R8A7745_CLK_B 9 +#define R8A7745_CLK_LB 10 +#define R8A7745_CLK_P 11 +#define R8A7745_CLK_CL 12 +#define R8A7745_CLK_CP 13 +#define R8A7745_CLK_M2 14 +#define R8A7745_CLK_ZB3 16 +#define R8A7745_CLK_ZB3D2 17 +#define R8A7745_CLK_DDR 18 +#define R8A7745_CLK_SDH 19 +#define R8A7745_CLK_SD0 20 +#define R8A7745_CLK_SD2 21 +#define R8A7745_CLK_SD3 22 +#define R8A7745_CLK_MMC0 23 +#define R8A7745_CLK_MP 24 +#define R8A7745_CLK_QSPI 25 +#define R8A7745_CLK_CPEX 26 +#define R8A7745_CLK_RCAN 27 +#define R8A7745_CLK_R 28 +#define R8A7745_CLK_OSC 29 + +#endif /* __DT_BINDINGS_CLOCK_R8A7745_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a77470-cpg-mssr.h b/include/dt-bindings/clock/r8a77470-cpg-mssr.h new file mode 100644 index 000000000..34cba49d0 --- /dev/null +++ b/include/dt-bindings/clock/r8a77470-cpg-mssr.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_CLOCK_R8A77470_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A77470_CPG_MSSR_H__ + +#include + +/* r8a77470 CPG Core Clocks */ +#define R8A77470_CLK_Z2 0 +#define R8A77470_CLK_ZTR 1 +#define R8A77470_CLK_ZTRD2 2 +#define R8A77470_CLK_ZT 3 +#define R8A77470_CLK_ZX 4 +#define R8A77470_CLK_ZS 5 +#define R8A77470_CLK_HP 6 +#define R8A77470_CLK_B 7 +#define R8A77470_CLK_LB 8 +#define R8A77470_CLK_P 9 +#define R8A77470_CLK_CL 10 +#define R8A77470_CLK_CP 11 +#define R8A77470_CLK_M2 12 +#define R8A77470_CLK_ZB3 13 +#define R8A77470_CLK_SDH 14 +#define R8A77470_CLK_SD0 15 +#define R8A77470_CLK_SD1 16 +#define R8A77470_CLK_SD2 17 +#define R8A77470_CLK_MP 18 +#define R8A77470_CLK_QSPI 19 +#define R8A77470_CLK_CPEX 20 +#define R8A77470_CLK_RCAN 21 +#define R8A77470_CLK_R 22 +#define R8A77470_CLK_OSC 23 + +#endif /* __DT_BINDINGS_CLOCK_R8A77470_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a7778-clock.h b/include/dt-bindings/clock/r8a7778-clock.h new file mode 100644 index 000000000..f6b07c539 --- /dev/null +++ b/include/dt-bindings/clock/r8a7778-clock.h @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2014 Ulrich Hecht + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __DT_BINDINGS_CLOCK_R8A7778_H__ +#define __DT_BINDINGS_CLOCK_R8A7778_H__ + +/* CPG */ +#define R8A7778_CLK_PLLA 0 +#define R8A7778_CLK_PLLB 1 +#define R8A7778_CLK_B 2 +#define R8A7778_CLK_OUT 3 +#define R8A7778_CLK_P 4 +#define R8A7778_CLK_S 5 +#define R8A7778_CLK_S1 6 + +/* MSTP0 */ +#define R8A7778_CLK_I2C0 30 +#define R8A7778_CLK_I2C1 29 +#define R8A7778_CLK_I2C2 28 +#define R8A7778_CLK_I2C3 27 +#define R8A7778_CLK_SCIF0 26 +#define R8A7778_CLK_SCIF1 25 +#define R8A7778_CLK_SCIF2 24 +#define R8A7778_CLK_SCIF3 23 +#define R8A7778_CLK_SCIF4 22 +#define R8A7778_CLK_SCIF5 21 +#define R8A7778_CLK_TMU0 16 +#define R8A7778_CLK_TMU1 15 +#define R8A7778_CLK_TMU2 14 +#define R8A7778_CLK_SSI0 12 +#define R8A7778_CLK_SSI1 11 +#define R8A7778_CLK_SSI2 10 +#define R8A7778_CLK_SSI3 9 +#define R8A7778_CLK_SRU 8 +#define R8A7778_CLK_HSPI 7 + +/* MSTP1 */ +#define R8A7778_CLK_ETHER 14 +#define R8A7778_CLK_VIN0 10 +#define R8A7778_CLK_VIN1 9 +#define R8A7778_CLK_USB 0 + +/* MSTP3 */ +#define R8A7778_CLK_MMC 31 +#define R8A7778_CLK_SDHI0 23 +#define R8A7778_CLK_SDHI1 22 +#define R8A7778_CLK_SDHI2 21 +#define R8A7778_CLK_SSI4 11 +#define R8A7778_CLK_SSI5 10 +#define R8A7778_CLK_SSI6 9 +#define R8A7778_CLK_SSI7 8 +#define R8A7778_CLK_SSI8 7 + +/* MSTP5 */ +#define R8A7778_CLK_SRU_SRC0 31 +#define R8A7778_CLK_SRU_SRC1 30 +#define R8A7778_CLK_SRU_SRC2 29 +#define R8A7778_CLK_SRU_SRC3 28 +#define R8A7778_CLK_SRU_SRC4 27 +#define R8A7778_CLK_SRU_SRC5 26 +#define R8A7778_CLK_SRU_SRC6 25 +#define R8A7778_CLK_SRU_SRC7 24 +#define R8A7778_CLK_SRU_SRC8 23 + +#endif /* __DT_BINDINGS_CLOCK_R8A7778_H__ */ diff --git a/include/dt-bindings/clock/r8a7779-clock.h b/include/dt-bindings/clock/r8a7779-clock.h new file mode 100644 index 000000000..381a61142 --- /dev/null +++ b/include/dt-bindings/clock/r8a7779-clock.h @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2013 Horms Solutions Ltd. + * + * Contact: Simon Horman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __DT_BINDINGS_CLOCK_R8A7779_H__ +#define __DT_BINDINGS_CLOCK_R8A7779_H__ + +/* CPG */ +#define R8A7779_CLK_PLLA 0 +#define R8A7779_CLK_Z 1 +#define R8A7779_CLK_ZS 2 +#define R8A7779_CLK_S 3 +#define R8A7779_CLK_S1 4 +#define R8A7779_CLK_P 5 +#define R8A7779_CLK_B 6 +#define R8A7779_CLK_OUT 7 + +/* MSTP 0 */ +#define R8A7779_CLK_HSPI 7 +#define R8A7779_CLK_TMU2 14 +#define R8A7779_CLK_TMU1 15 +#define R8A7779_CLK_TMU0 16 +#define R8A7779_CLK_HSCIF1 18 +#define R8A7779_CLK_HSCIF0 19 +#define R8A7779_CLK_SCIF5 21 +#define R8A7779_CLK_SCIF4 22 +#define R8A7779_CLK_SCIF3 23 +#define R8A7779_CLK_SCIF2 24 +#define R8A7779_CLK_SCIF1 25 +#define R8A7779_CLK_SCIF0 26 +#define R8A7779_CLK_I2C3 27 +#define R8A7779_CLK_I2C2 28 +#define R8A7779_CLK_I2C1 29 +#define R8A7779_CLK_I2C0 30 + +/* MSTP 1 */ +#define R8A7779_CLK_USB01 0 +#define R8A7779_CLK_USB2 1 +#define R8A7779_CLK_DU 3 +#define R8A7779_CLK_VIN2 8 +#define R8A7779_CLK_VIN1 9 +#define R8A7779_CLK_VIN0 10 +#define R8A7779_CLK_ETHER 14 +#define R8A7779_CLK_SATA 15 +#define R8A7779_CLK_PCIE 16 +#define R8A7779_CLK_VIN3 20 + +/* MSTP 3 */ +#define R8A7779_CLK_SDHI3 20 +#define R8A7779_CLK_SDHI2 21 +#define R8A7779_CLK_SDHI1 22 +#define R8A7779_CLK_SDHI0 23 +#define R8A7779_CLK_MMC1 30 +#define R8A7779_CLK_MMC0 31 + + +#endif /* __DT_BINDINGS_CLOCK_R8A7779_H__ */ diff --git a/include/dt-bindings/clock/r8a7790-clock.h b/include/dt-bindings/clock/r8a7790-clock.h new file mode 100644 index 000000000..20641fa68 --- /dev/null +++ b/include/dt-bindings/clock/r8a7790-clock.h @@ -0,0 +1,162 @@ +/* + * Copyright 2013 Ideas On Board SPRL + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __DT_BINDINGS_CLOCK_R8A7790_H__ +#define __DT_BINDINGS_CLOCK_R8A7790_H__ + +/* CPG */ +#define R8A7790_CLK_MAIN 0 +#define R8A7790_CLK_PLL0 1 +#define R8A7790_CLK_PLL1 2 +#define R8A7790_CLK_PLL3 3 +#define R8A7790_CLK_LB 4 +#define R8A7790_CLK_QSPI 5 +#define R8A7790_CLK_SDH 6 +#define R8A7790_CLK_SD0 7 +#define R8A7790_CLK_SD1 8 +#define R8A7790_CLK_Z 9 +#define R8A7790_CLK_RCAN 10 +#define R8A7790_CLK_ADSP 11 + +/* MSTP0 */ +#define R8A7790_CLK_MSIOF0 0 + +/* MSTP1 */ +#define R8A7790_CLK_VCP1 0 +#define R8A7790_CLK_VCP0 1 +#define R8A7790_CLK_VPC1 2 +#define R8A7790_CLK_VPC0 3 +#define R8A7790_CLK_JPU 6 +#define R8A7790_CLK_SSP1 9 +#define R8A7790_CLK_TMU1 11 +#define R8A7790_CLK_3DG 12 +#define R8A7790_CLK_2DDMAC 15 +#define R8A7790_CLK_FDP1_2 17 +#define R8A7790_CLK_FDP1_1 18 +#define R8A7790_CLK_FDP1_0 19 +#define R8A7790_CLK_TMU3 21 +#define R8A7790_CLK_TMU2 22 +#define R8A7790_CLK_CMT0 24 +#define R8A7790_CLK_TMU0 25 +#define R8A7790_CLK_VSP1_DU1 27 +#define R8A7790_CLK_VSP1_DU0 28 +#define R8A7790_CLK_VSP1_R 30 +#define R8A7790_CLK_VSP1_S 31 + +/* MSTP2 */ +#define R8A7790_CLK_SCIFA2 2 +#define R8A7790_CLK_SCIFA1 3 +#define R8A7790_CLK_SCIFA0 4 +#define R8A7790_CLK_MSIOF2 5 +#define R8A7790_CLK_SCIFB0 6 +#define R8A7790_CLK_SCIFB1 7 +#define R8A7790_CLK_MSIOF1 8 +#define R8A7790_CLK_MSIOF3 15 +#define R8A7790_CLK_SCIFB2 16 +#define R8A7790_CLK_SYS_DMAC1 18 +#define R8A7790_CLK_SYS_DMAC0 19 + +/* MSTP3 */ +#define R8A7790_CLK_IIC2 0 +#define R8A7790_CLK_TPU0 4 +#define R8A7790_CLK_MMCIF1 5 +#define R8A7790_CLK_SCIF2 10 +#define R8A7790_CLK_SDHI3 11 +#define R8A7790_CLK_SDHI2 12 +#define R8A7790_CLK_SDHI1 13 +#define R8A7790_CLK_SDHI0 14 +#define R8A7790_CLK_MMCIF0 15 +#define R8A7790_CLK_IIC0 18 +#define R8A7790_CLK_PCIEC 19 +#define R8A7790_CLK_IIC1 23 +#define R8A7790_CLK_SSUSB 28 +#define R8A7790_CLK_CMT1 29 +#define R8A7790_CLK_USBDMAC0 30 +#define R8A7790_CLK_USBDMAC1 31 + +/* MSTP4 */ +#define R8A7790_CLK_IRQC 7 +#define R8A7790_CLK_INTC_SYS 8 + +/* MSTP5 */ +#define R8A7790_CLK_AUDIO_DMAC1 1 +#define R8A7790_CLK_AUDIO_DMAC0 2 +#define R8A7790_CLK_ADSP_MOD 6 +#define R8A7790_CLK_THERMAL 22 +#define R8A7790_CLK_PWM 23 + +/* MSTP7 */ +#define R8A7790_CLK_EHCI 3 +#define R8A7790_CLK_HSUSB 4 +#define R8A7790_CLK_HSCIF1 16 +#define R8A7790_CLK_HSCIF0 17 +#define R8A7790_CLK_SCIF1 20 +#define R8A7790_CLK_SCIF0 21 +#define R8A7790_CLK_DU2 22 +#define R8A7790_CLK_DU1 23 +#define R8A7790_CLK_DU0 24 +#define R8A7790_CLK_LVDS1 25 +#define R8A7790_CLK_LVDS0 26 + +/* MSTP8 */ +#define R8A7790_CLK_MLB 2 +#define R8A7790_CLK_VIN3 8 +#define R8A7790_CLK_VIN2 9 +#define R8A7790_CLK_VIN1 10 +#define R8A7790_CLK_VIN0 11 +#define R8A7790_CLK_ETHERAVB 12 +#define R8A7790_CLK_ETHER 13 +#define R8A7790_CLK_SATA1 14 +#define R8A7790_CLK_SATA0 15 + +/* MSTP9 */ +#define R8A7790_CLK_GPIO5 7 +#define R8A7790_CLK_GPIO4 8 +#define R8A7790_CLK_GPIO3 9 +#define R8A7790_CLK_GPIO2 10 +#define R8A7790_CLK_GPIO1 11 +#define R8A7790_CLK_GPIO0 12 +#define R8A7790_CLK_RCAN1 15 +#define R8A7790_CLK_RCAN0 16 +#define R8A7790_CLK_QSPI_MOD 17 +#define R8A7790_CLK_IICDVFS 26 +#define R8A7790_CLK_I2C3 28 +#define R8A7790_CLK_I2C2 29 +#define R8A7790_CLK_I2C1 30 +#define R8A7790_CLK_I2C0 31 + +/* MSTP10 */ +#define R8A7790_CLK_SSI_ALL 5 +#define R8A7790_CLK_SSI9 6 +#define R8A7790_CLK_SSI8 7 +#define R8A7790_CLK_SSI7 8 +#define R8A7790_CLK_SSI6 9 +#define R8A7790_CLK_SSI5 10 +#define R8A7790_CLK_SSI4 11 +#define R8A7790_CLK_SSI3 12 +#define R8A7790_CLK_SSI2 13 +#define R8A7790_CLK_SSI1 14 +#define R8A7790_CLK_SSI0 15 +#define R8A7790_CLK_SCU_ALL 17 +#define R8A7790_CLK_SCU_DVC1 18 +#define R8A7790_CLK_SCU_DVC0 19 +#define R8A7790_CLK_SCU_CTU1_MIX1 20 +#define R8A7790_CLK_SCU_CTU0_MIX0 21 +#define R8A7790_CLK_SCU_SRC9 22 +#define R8A7790_CLK_SCU_SRC8 23 +#define R8A7790_CLK_SCU_SRC7 24 +#define R8A7790_CLK_SCU_SRC6 25 +#define R8A7790_CLK_SCU_SRC5 26 +#define R8A7790_CLK_SCU_SRC4 27 +#define R8A7790_CLK_SCU_SRC3 28 +#define R8A7790_CLK_SCU_SRC2 29 +#define R8A7790_CLK_SCU_SRC1 30 +#define R8A7790_CLK_SCU_SRC0 31 + +#endif /* __DT_BINDINGS_CLOCK_R8A7790_H__ */ diff --git a/include/dt-bindings/clock/r8a7790-cpg-mssr.h b/include/dt-bindings/clock/r8a7790-cpg-mssr.h new file mode 100644 index 000000000..1625b8bf3 --- /dev/null +++ b/include/dt-bindings/clock/r8a7790-cpg-mssr.h @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2015 Renesas Electronics Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __DT_BINDINGS_CLOCK_R8A7790_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A7790_CPG_MSSR_H__ + +#include + +/* r8a7790 CPG Core Clocks */ +#define R8A7790_CLK_Z 0 +#define R8A7790_CLK_Z2 1 +#define R8A7790_CLK_ZG 2 +#define R8A7790_CLK_ZTR 3 +#define R8A7790_CLK_ZTRD2 4 +#define R8A7790_CLK_ZT 5 +#define R8A7790_CLK_ZX 6 +#define R8A7790_CLK_ZS 7 +#define R8A7790_CLK_HP 8 +#define R8A7790_CLK_I 9 +#define R8A7790_CLK_B 10 +#define R8A7790_CLK_LB 11 +#define R8A7790_CLK_P 12 +#define R8A7790_CLK_CL 13 +#define R8A7790_CLK_M2 14 +#define R8A7790_CLK_ADSP 15 +#define R8A7790_CLK_IMP 16 +#define R8A7790_CLK_ZB3 17 +#define R8A7790_CLK_ZB3D2 18 +#define R8A7790_CLK_DDR 19 +#define R8A7790_CLK_SDH 20 +#define R8A7790_CLK_SD0 21 +#define R8A7790_CLK_SD1 22 +#define R8A7790_CLK_SD2 23 +#define R8A7790_CLK_SD3 24 +#define R8A7790_CLK_MMC0 25 +#define R8A7790_CLK_MMC1 26 +#define R8A7790_CLK_MP 27 +#define R8A7790_CLK_SSP 28 +#define R8A7790_CLK_SSPRS 29 +#define R8A7790_CLK_QSPI 30 +#define R8A7790_CLK_CP 31 +#define R8A7790_CLK_RCAN 32 +#define R8A7790_CLK_R 33 +#define R8A7790_CLK_OSC 34 + +#endif /* __DT_BINDINGS_CLOCK_R8A7790_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a7791-clock.h b/include/dt-bindings/clock/r8a7791-clock.h new file mode 100644 index 000000000..ef6921341 --- /dev/null +++ b/include/dt-bindings/clock/r8a7791-clock.h @@ -0,0 +1,165 @@ +/* + * Copyright 2013 Ideas On Board SPRL + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __DT_BINDINGS_CLOCK_R8A7791_H__ +#define __DT_BINDINGS_CLOCK_R8A7791_H__ + +/* CPG */ +#define R8A7791_CLK_MAIN 0 +#define R8A7791_CLK_PLL0 1 +#define R8A7791_CLK_PLL1 2 +#define R8A7791_CLK_PLL3 3 +#define R8A7791_CLK_LB 4 +#define R8A7791_CLK_QSPI 5 +#define R8A7791_CLK_SDH 6 +#define R8A7791_CLK_SD0 7 +#define R8A7791_CLK_Z 8 +#define R8A7791_CLK_RCAN 9 +#define R8A7791_CLK_ADSP 10 + +/* MSTP0 */ +#define R8A7791_CLK_MSIOF0 0 + +/* MSTP1 */ +#define R8A7791_CLK_VCP0 1 +#define R8A7791_CLK_VPC0 3 +#define R8A7791_CLK_JPU 6 +#define R8A7791_CLK_SSP1 9 +#define R8A7791_CLK_TMU1 11 +#define R8A7791_CLK_3DG 12 +#define R8A7791_CLK_2DDMAC 15 +#define R8A7791_CLK_FDP1_1 18 +#define R8A7791_CLK_FDP1_0 19 +#define R8A7791_CLK_TMU3 21 +#define R8A7791_CLK_TMU2 22 +#define R8A7791_CLK_CMT0 24 +#define R8A7791_CLK_TMU0 25 +#define R8A7791_CLK_VSP1_DU1 27 +#define R8A7791_CLK_VSP1_DU0 28 +#define R8A7791_CLK_VSP1_S 31 + +/* MSTP2 */ +#define R8A7791_CLK_SCIFA2 2 +#define R8A7791_CLK_SCIFA1 3 +#define R8A7791_CLK_SCIFA0 4 +#define R8A7791_CLK_MSIOF2 5 +#define R8A7791_CLK_SCIFB0 6 +#define R8A7791_CLK_SCIFB1 7 +#define R8A7791_CLK_MSIOF1 8 +#define R8A7791_CLK_SCIFB2 16 +#define R8A7791_CLK_SYS_DMAC1 18 +#define R8A7791_CLK_SYS_DMAC0 19 + +/* MSTP3 */ +#define R8A7791_CLK_TPU0 4 +#define R8A7791_CLK_SDHI2 11 +#define R8A7791_CLK_SDHI1 12 +#define R8A7791_CLK_SDHI0 14 +#define R8A7791_CLK_MMCIF0 15 +#define R8A7791_CLK_IIC0 18 +#define R8A7791_CLK_PCIEC 19 +#define R8A7791_CLK_IIC1 23 +#define R8A7791_CLK_SSUSB 28 +#define R8A7791_CLK_CMT1 29 +#define R8A7791_CLK_USBDMAC0 30 +#define R8A7791_CLK_USBDMAC1 31 + +/* MSTP4 */ +#define R8A7791_CLK_IRQC 7 +#define R8A7791_CLK_INTC_SYS 8 + +/* MSTP5 */ +#define R8A7791_CLK_AUDIO_DMAC1 1 +#define R8A7791_CLK_AUDIO_DMAC0 2 +#define R8A7791_CLK_ADSP_MOD 6 +#define R8A7791_CLK_THERMAL 22 +#define R8A7791_CLK_PWM 23 + +/* MSTP7 */ +#define R8A7791_CLK_EHCI 3 +#define R8A7791_CLK_HSUSB 4 +#define R8A7791_CLK_HSCIF2 13 +#define R8A7791_CLK_SCIF5 14 +#define R8A7791_CLK_SCIF4 15 +#define R8A7791_CLK_HSCIF1 16 +#define R8A7791_CLK_HSCIF0 17 +#define R8A7791_CLK_SCIF3 18 +#define R8A7791_CLK_SCIF2 19 +#define R8A7791_CLK_SCIF1 20 +#define R8A7791_CLK_SCIF0 21 +#define R8A7791_CLK_DU1 23 +#define R8A7791_CLK_DU0 24 +#define R8A7791_CLK_LVDS0 26 + +/* MSTP8 */ +#define R8A7791_CLK_IPMMU_SGX 0 +#define R8A7791_CLK_MLB 2 +#define R8A7791_CLK_VIN2 9 +#define R8A7791_CLK_VIN1 10 +#define R8A7791_CLK_VIN0 11 +#define R8A7791_CLK_ETHERAVB 12 +#define R8A7791_CLK_ETHER 13 +#define R8A7791_CLK_SATA1 14 +#define R8A7791_CLK_SATA0 15 + +/* MSTP9 */ +#define R8A7791_CLK_GYROADC 1 +#define R8A7791_CLK_GPIO7 4 +#define R8A7791_CLK_GPIO6 5 +#define R8A7791_CLK_GPIO5 7 +#define R8A7791_CLK_GPIO4 8 +#define R8A7791_CLK_GPIO3 9 +#define R8A7791_CLK_GPIO2 10 +#define R8A7791_CLK_GPIO1 11 +#define R8A7791_CLK_GPIO0 12 +#define R8A7791_CLK_RCAN1 15 +#define R8A7791_CLK_RCAN0 16 +#define R8A7791_CLK_QSPI_MOD 17 +#define R8A7791_CLK_I2C5 25 +#define R8A7791_CLK_IICDVFS 26 +#define R8A7791_CLK_I2C4 27 +#define R8A7791_CLK_I2C3 28 +#define R8A7791_CLK_I2C2 29 +#define R8A7791_CLK_I2C1 30 +#define R8A7791_CLK_I2C0 31 + +/* MSTP10 */ +#define R8A7791_CLK_SSI_ALL 5 +#define R8A7791_CLK_SSI9 6 +#define R8A7791_CLK_SSI8 7 +#define R8A7791_CLK_SSI7 8 +#define R8A7791_CLK_SSI6 9 +#define R8A7791_CLK_SSI5 10 +#define R8A7791_CLK_SSI4 11 +#define R8A7791_CLK_SSI3 12 +#define R8A7791_CLK_SSI2 13 +#define R8A7791_CLK_SSI1 14 +#define R8A7791_CLK_SSI0 15 +#define R8A7791_CLK_SCU_ALL 17 +#define R8A7791_CLK_SCU_DVC1 18 +#define R8A7791_CLK_SCU_DVC0 19 +#define R8A7791_CLK_SCU_CTU1_MIX1 20 +#define R8A7791_CLK_SCU_CTU0_MIX0 21 +#define R8A7791_CLK_SCU_SRC9 22 +#define R8A7791_CLK_SCU_SRC8 23 +#define R8A7791_CLK_SCU_SRC7 24 +#define R8A7791_CLK_SCU_SRC6 25 +#define R8A7791_CLK_SCU_SRC5 26 +#define R8A7791_CLK_SCU_SRC4 27 +#define R8A7791_CLK_SCU_SRC3 28 +#define R8A7791_CLK_SCU_SRC2 29 +#define R8A7791_CLK_SCU_SRC1 30 +#define R8A7791_CLK_SCU_SRC0 31 + +/* MSTP11 */ +#define R8A7791_CLK_SCIFA3 6 +#define R8A7791_CLK_SCIFA4 7 +#define R8A7791_CLK_SCIFA5 8 + +#endif /* __DT_BINDINGS_CLOCK_R8A7791_H__ */ diff --git a/include/dt-bindings/clock/r8a7791-cpg-mssr.h b/include/dt-bindings/clock/r8a7791-cpg-mssr.h new file mode 100644 index 000000000..e8823410c --- /dev/null +++ b/include/dt-bindings/clock/r8a7791-cpg-mssr.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2015 Renesas Electronics Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __DT_BINDINGS_CLOCK_R8A7791_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A7791_CPG_MSSR_H__ + +#include + +/* r8a7791 CPG Core Clocks */ +#define R8A7791_CLK_Z 0 +#define R8A7791_CLK_ZG 1 +#define R8A7791_CLK_ZTR 2 +#define R8A7791_CLK_ZTRD2 3 +#define R8A7791_CLK_ZT 4 +#define R8A7791_CLK_ZX 5 +#define R8A7791_CLK_ZS 6 +#define R8A7791_CLK_HP 7 +#define R8A7791_CLK_I 8 +#define R8A7791_CLK_B 9 +#define R8A7791_CLK_LB 10 +#define R8A7791_CLK_P 11 +#define R8A7791_CLK_CL 12 +#define R8A7791_CLK_M2 13 +#define R8A7791_CLK_ADSP 14 +#define R8A7791_CLK_ZB3 15 +#define R8A7791_CLK_ZB3D2 16 +#define R8A7791_CLK_DDR 17 +#define R8A7791_CLK_SDH 18 +#define R8A7791_CLK_SD0 19 +#define R8A7791_CLK_SD2 20 +#define R8A7791_CLK_SD3 21 +#define R8A7791_CLK_MMC0 22 +#define R8A7791_CLK_MP 23 +#define R8A7791_CLK_SSP 24 +#define R8A7791_CLK_SSPRS 25 +#define R8A7791_CLK_QSPI 26 +#define R8A7791_CLK_CP 27 +#define R8A7791_CLK_RCAN 28 +#define R8A7791_CLK_R 29 +#define R8A7791_CLK_OSC 30 + +#endif /* __DT_BINDINGS_CLOCK_R8A7791_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a7792-clock.h b/include/dt-bindings/clock/r8a7792-clock.h new file mode 100644 index 000000000..5be90bc23 --- /dev/null +++ b/include/dt-bindings/clock/r8a7792-clock.h @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2016 Cogent Embedded, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __DT_BINDINGS_CLOCK_R8A7792_H__ +#define __DT_BINDINGS_CLOCK_R8A7792_H__ + +/* CPG */ +#define R8A7792_CLK_MAIN 0 +#define R8A7792_CLK_PLL0 1 +#define R8A7792_CLK_PLL1 2 +#define R8A7792_CLK_PLL3 3 +#define R8A7792_CLK_LB 4 +#define R8A7792_CLK_QSPI 5 + +/* MSTP0 */ +#define R8A7792_CLK_MSIOF0 0 + +/* MSTP1 */ +#define R8A7792_CLK_JPU 6 +#define R8A7792_CLK_TMU1 11 +#define R8A7792_CLK_TMU3 21 +#define R8A7792_CLK_TMU2 22 +#define R8A7792_CLK_CMT0 24 +#define R8A7792_CLK_TMU0 25 +#define R8A7792_CLK_VSP1DU1 27 +#define R8A7792_CLK_VSP1DU0 28 +#define R8A7792_CLK_VSP1_SY 31 + +/* MSTP2 */ +#define R8A7792_CLK_MSIOF1 8 +#define R8A7792_CLK_SYS_DMAC1 18 +#define R8A7792_CLK_SYS_DMAC0 19 + +/* MSTP3 */ +#define R8A7792_CLK_TPU0 4 +#define R8A7792_CLK_SDHI0 14 +#define R8A7792_CLK_CMT1 29 + +/* MSTP4 */ +#define R8A7792_CLK_IRQC 7 +#define R8A7792_CLK_INTC_SYS 8 + +/* MSTP5 */ +#define R8A7792_CLK_AUDIO_DMAC0 2 +#define R8A7792_CLK_THERMAL 22 +#define R8A7792_CLK_PWM 23 + +/* MSTP7 */ +#define R8A7792_CLK_HSCIF1 16 +#define R8A7792_CLK_HSCIF0 17 +#define R8A7792_CLK_SCIF3 18 +#define R8A7792_CLK_SCIF2 19 +#define R8A7792_CLK_SCIF1 20 +#define R8A7792_CLK_SCIF0 21 +#define R8A7792_CLK_DU1 23 +#define R8A7792_CLK_DU0 24 + +/* MSTP8 */ +#define R8A7792_CLK_VIN5 4 +#define R8A7792_CLK_VIN4 5 +#define R8A7792_CLK_VIN3 8 +#define R8A7792_CLK_VIN2 9 +#define R8A7792_CLK_VIN1 10 +#define R8A7792_CLK_VIN0 11 +#define R8A7792_CLK_ETHERAVB 12 + +/* MSTP9 */ +#define R8A7792_CLK_GPIO7 4 +#define R8A7792_CLK_GPIO6 5 +#define R8A7792_CLK_GPIO5 7 +#define R8A7792_CLK_GPIO4 8 +#define R8A7792_CLK_GPIO3 9 +#define R8A7792_CLK_GPIO2 10 +#define R8A7792_CLK_GPIO1 11 +#define R8A7792_CLK_GPIO0 12 +#define R8A7792_CLK_GPIO11 13 +#define R8A7792_CLK_GPIO10 14 +#define R8A7792_CLK_CAN1 15 +#define R8A7792_CLK_CAN0 16 +#define R8A7792_CLK_QSPI_MOD 17 +#define R8A7792_CLK_GPIO9 19 +#define R8A7792_CLK_GPIO8 21 +#define R8A7792_CLK_I2C5 25 +#define R8A7792_CLK_IICDVFS 26 +#define R8A7792_CLK_I2C4 27 +#define R8A7792_CLK_I2C3 28 +#define R8A7792_CLK_I2C2 29 +#define R8A7792_CLK_I2C1 30 +#define R8A7792_CLK_I2C0 31 + +/* MSTP10 */ +#define R8A7792_CLK_SSI_ALL 5 +#define R8A7792_CLK_SSI4 11 +#define R8A7792_CLK_SSI3 12 + +#endif /* __DT_BINDINGS_CLOCK_R8A7792_H__ */ diff --git a/include/dt-bindings/clock/r8a7792-cpg-mssr.h b/include/dt-bindings/clock/r8a7792-cpg-mssr.h new file mode 100644 index 000000000..72ce85cb2 --- /dev/null +++ b/include/dt-bindings/clock/r8a7792-cpg-mssr.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2015 Renesas Electronics Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __DT_BINDINGS_CLOCK_R8A7792_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A7792_CPG_MSSR_H__ + +#include + +/* r8a7792 CPG Core Clocks */ +#define R8A7792_CLK_Z 0 +#define R8A7792_CLK_ZG 1 +#define R8A7792_CLK_ZTR 2 +#define R8A7792_CLK_ZTRD2 3 +#define R8A7792_CLK_ZT 4 +#define R8A7792_CLK_ZX 5 +#define R8A7792_CLK_ZS 6 +#define R8A7792_CLK_HP 7 +#define R8A7792_CLK_I 8 +#define R8A7792_CLK_B 9 +#define R8A7792_CLK_LB 10 +#define R8A7792_CLK_P 11 +#define R8A7792_CLK_CL 12 +#define R8A7792_CLK_M2 13 +#define R8A7792_CLK_IMP 14 +#define R8A7792_CLK_ZB3 15 +#define R8A7792_CLK_ZB3D2 16 +#define R8A7792_CLK_DDR 17 +#define R8A7792_CLK_SD 18 +#define R8A7792_CLK_MP 19 +#define R8A7792_CLK_QSPI 20 +#define R8A7792_CLK_CP 21 +#define R8A7792_CLK_CPEX 22 +#define R8A7792_CLK_RCAN 23 +#define R8A7792_CLK_R 24 +#define R8A7792_CLK_OSC 25 + +#endif /* __DT_BINDINGS_CLOCK_R8A7792_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a7793-clock.h b/include/dt-bindings/clock/r8a7793-clock.h new file mode 100644 index 000000000..7318d45d4 --- /dev/null +++ b/include/dt-bindings/clock/r8a7793-clock.h @@ -0,0 +1,167 @@ +/* + * r8a7793 clock definition + * + * Copyright (C) 2014 Renesas Electronics Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __DT_BINDINGS_CLOCK_R8A7793_H__ +#define __DT_BINDINGS_CLOCK_R8A7793_H__ + +/* CPG */ +#define R8A7793_CLK_MAIN 0 +#define R8A7793_CLK_PLL0 1 +#define R8A7793_CLK_PLL1 2 +#define R8A7793_CLK_PLL3 3 +#define R8A7793_CLK_LB 4 +#define R8A7793_CLK_QSPI 5 +#define R8A7793_CLK_SDH 6 +#define R8A7793_CLK_SD0 7 +#define R8A7793_CLK_Z 8 +#define R8A7793_CLK_RCAN 9 +#define R8A7793_CLK_ADSP 10 + +/* MSTP0 */ +#define R8A7793_CLK_MSIOF0 0 + +/* MSTP1 */ +#define R8A7793_CLK_VCP0 1 +#define R8A7793_CLK_VPC0 3 +#define R8A7793_CLK_SSP1 9 +#define R8A7793_CLK_TMU1 11 +#define R8A7793_CLK_3DG 12 +#define R8A7793_CLK_2DDMAC 15 +#define R8A7793_CLK_FDP1_1 18 +#define R8A7793_CLK_FDP1_0 19 +#define R8A7793_CLK_TMU3 21 +#define R8A7793_CLK_TMU2 22 +#define R8A7793_CLK_CMT0 24 +#define R8A7793_CLK_TMU0 25 +#define R8A7793_CLK_VSP1_DU1 27 +#define R8A7793_CLK_VSP1_DU0 28 +#define R8A7793_CLK_VSP1_S 31 + +/* MSTP2 */ +#define R8A7793_CLK_SCIFA2 2 +#define R8A7793_CLK_SCIFA1 3 +#define R8A7793_CLK_SCIFA0 4 +#define R8A7793_CLK_MSIOF2 5 +#define R8A7793_CLK_SCIFB0 6 +#define R8A7793_CLK_SCIFB1 7 +#define R8A7793_CLK_MSIOF1 8 +#define R8A7793_CLK_SCIFB2 16 +#define R8A7793_CLK_SYS_DMAC1 18 +#define R8A7793_CLK_SYS_DMAC0 19 + +/* MSTP3 */ +#define R8A7793_CLK_TPU0 4 +#define R8A7793_CLK_SDHI2 11 +#define R8A7793_CLK_SDHI1 12 +#define R8A7793_CLK_SDHI0 14 +#define R8A7793_CLK_MMCIF0 15 +#define R8A7793_CLK_IIC0 18 +#define R8A7793_CLK_PCIEC 19 +#define R8A7793_CLK_IIC1 23 +#define R8A7793_CLK_SSUSB 28 +#define R8A7793_CLK_CMT1 29 +#define R8A7793_CLK_USBDMAC0 30 +#define R8A7793_CLK_USBDMAC1 31 + +/* MSTP4 */ +#define R8A7793_CLK_IRQC 7 +#define R8A7793_CLK_INTC_SYS 8 + +/* MSTP5 */ +#define R8A7793_CLK_AUDIO_DMAC1 1 +#define R8A7793_CLK_AUDIO_DMAC0 2 +#define R8A7793_CLK_ADSP_MOD 6 +#define R8A7793_CLK_THERMAL 22 +#define R8A7793_CLK_PWM 23 + +/* MSTP7 */ +#define R8A7793_CLK_EHCI 3 +#define R8A7793_CLK_HSUSB 4 +#define R8A7793_CLK_HSCIF2 13 +#define R8A7793_CLK_SCIF5 14 +#define R8A7793_CLK_SCIF4 15 +#define R8A7793_CLK_HSCIF1 16 +#define R8A7793_CLK_HSCIF0 17 +#define R8A7793_CLK_SCIF3 18 +#define R8A7793_CLK_SCIF2 19 +#define R8A7793_CLK_SCIF1 20 +#define R8A7793_CLK_SCIF0 21 +#define R8A7793_CLK_DU1 23 +#define R8A7793_CLK_DU0 24 +#define R8A7793_CLK_LVDS0 26 + +/* MSTP8 */ +#define R8A7793_CLK_IPMMU_SGX 0 +#define R8A7793_CLK_VIN2 9 +#define R8A7793_CLK_VIN1 10 +#define R8A7793_CLK_VIN0 11 +#define R8A7793_CLK_ETHER 13 +#define R8A7793_CLK_SATA1 14 +#define R8A7793_CLK_SATA0 15 + +/* MSTP9 */ +#define R8A7793_CLK_GPIO7 4 +#define R8A7793_CLK_GPIO6 5 +#define R8A7793_CLK_GPIO5 7 +#define R8A7793_CLK_GPIO4 8 +#define R8A7793_CLK_GPIO3 9 +#define R8A7793_CLK_GPIO2 10 +#define R8A7793_CLK_GPIO1 11 +#define R8A7793_CLK_GPIO0 12 +#define R8A7793_CLK_RCAN1 15 +#define R8A7793_CLK_RCAN0 16 +#define R8A7793_CLK_QSPI_MOD 17 +#define R8A7793_CLK_I2C5 25 +#define R8A7793_CLK_IICDVFS 26 +#define R8A7793_CLK_I2C4 27 +#define R8A7793_CLK_I2C3 28 +#define R8A7793_CLK_I2C2 29 +#define R8A7793_CLK_I2C1 30 +#define R8A7793_CLK_I2C0 31 + +/* MSTP10 */ +#define R8A7793_CLK_SSI_ALL 5 +#define R8A7793_CLK_SSI9 6 +#define R8A7793_CLK_SSI8 7 +#define R8A7793_CLK_SSI7 8 +#define R8A7793_CLK_SSI6 9 +#define R8A7793_CLK_SSI5 10 +#define R8A7793_CLK_SSI4 11 +#define R8A7793_CLK_SSI3 12 +#define R8A7793_CLK_SSI2 13 +#define R8A7793_CLK_SSI1 14 +#define R8A7793_CLK_SSI0 15 +#define R8A7793_CLK_SCU_ALL 17 +#define R8A7793_CLK_SCU_DVC1 18 +#define R8A7793_CLK_SCU_DVC0 19 +#define R8A7793_CLK_SCU_CTU1_MIX1 20 +#define R8A7793_CLK_SCU_CTU0_MIX0 21 +#define R8A7793_CLK_SCU_SRC9 22 +#define R8A7793_CLK_SCU_SRC8 23 +#define R8A7793_CLK_SCU_SRC7 24 +#define R8A7793_CLK_SCU_SRC6 25 +#define R8A7793_CLK_SCU_SRC5 26 +#define R8A7793_CLK_SCU_SRC4 27 +#define R8A7793_CLK_SCU_SRC3 28 +#define R8A7793_CLK_SCU_SRC2 29 +#define R8A7793_CLK_SCU_SRC1 30 +#define R8A7793_CLK_SCU_SRC0 31 + +/* MSTP11 */ +#define R8A7793_CLK_SCIFA3 6 +#define R8A7793_CLK_SCIFA4 7 +#define R8A7793_CLK_SCIFA5 8 + +#endif /* __DT_BINDINGS_CLOCK_R8A7793_H__ */ diff --git a/include/dt-bindings/clock/r8a7793-cpg-mssr.h b/include/dt-bindings/clock/r8a7793-cpg-mssr.h new file mode 100644 index 000000000..8809b0f62 --- /dev/null +++ b/include/dt-bindings/clock/r8a7793-cpg-mssr.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2015 Renesas Electronics Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __DT_BINDINGS_CLOCK_R8A7793_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A7793_CPG_MSSR_H__ + +#include + +/* r8a7793 CPG Core Clocks */ +#define R8A7793_CLK_Z 0 +#define R8A7793_CLK_ZG 1 +#define R8A7793_CLK_ZTR 2 +#define R8A7793_CLK_ZTRD2 3 +#define R8A7793_CLK_ZT 4 +#define R8A7793_CLK_ZX 5 +#define R8A7793_CLK_ZS 6 +#define R8A7793_CLK_HP 7 +#define R8A7793_CLK_I 8 +#define R8A7793_CLK_B 9 +#define R8A7793_CLK_LB 10 +#define R8A7793_CLK_P 11 +#define R8A7793_CLK_CL 12 +#define R8A7793_CLK_M2 13 +#define R8A7793_CLK_ADSP 14 +#define R8A7793_CLK_ZB3 15 +#define R8A7793_CLK_ZB3D2 16 +#define R8A7793_CLK_DDR 17 +#define R8A7793_CLK_SDH 18 +#define R8A7793_CLK_SD0 19 +#define R8A7793_CLK_SD2 20 +#define R8A7793_CLK_SD3 21 +#define R8A7793_CLK_MMC0 22 +#define R8A7793_CLK_MP 23 +#define R8A7793_CLK_SSP 24 +#define R8A7793_CLK_SSPRS 25 +#define R8A7793_CLK_QSPI 26 +#define R8A7793_CLK_CP 27 +#define R8A7793_CLK_RCAN 28 +#define R8A7793_CLK_R 29 +#define R8A7793_CLK_OSC 30 + +#endif /* __DT_BINDINGS_CLOCK_R8A7793_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a7794-clock.h b/include/dt-bindings/clock/r8a7794-clock.h new file mode 100644 index 000000000..93e99c3ff --- /dev/null +++ b/include/dt-bindings/clock/r8a7794-clock.h @@ -0,0 +1,141 @@ +/* + * Copyright (C) 2014 Renesas Electronics Corporation + * Copyright 2013 Ideas On Board SPRL + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __DT_BINDINGS_CLOCK_R8A7794_H__ +#define __DT_BINDINGS_CLOCK_R8A7794_H__ + +/* CPG */ +#define R8A7794_CLK_MAIN 0 +#define R8A7794_CLK_PLL0 1 +#define R8A7794_CLK_PLL1 2 +#define R8A7794_CLK_PLL3 3 +#define R8A7794_CLK_LB 4 +#define R8A7794_CLK_QSPI 5 +#define R8A7794_CLK_SDH 6 +#define R8A7794_CLK_SD0 7 +#define R8A7794_CLK_RCAN 8 + +/* MSTP0 */ +#define R8A7794_CLK_MSIOF0 0 + +/* MSTP1 */ +#define R8A7794_CLK_VCP0 1 +#define R8A7794_CLK_VPC0 3 +#define R8A7794_CLK_TMU1 11 +#define R8A7794_CLK_3DG 12 +#define R8A7794_CLK_2DDMAC 15 +#define R8A7794_CLK_FDP1_0 19 +#define R8A7794_CLK_TMU3 21 +#define R8A7794_CLK_TMU2 22 +#define R8A7794_CLK_CMT0 24 +#define R8A7794_CLK_TMU0 25 +#define R8A7794_CLK_VSP1_DU0 28 +#define R8A7794_CLK_VSP1_S 31 + +/* MSTP2 */ +#define R8A7794_CLK_SCIFA2 2 +#define R8A7794_CLK_SCIFA1 3 +#define R8A7794_CLK_SCIFA0 4 +#define R8A7794_CLK_MSIOF2 5 +#define R8A7794_CLK_SCIFB0 6 +#define R8A7794_CLK_SCIFB1 7 +#define R8A7794_CLK_MSIOF1 8 +#define R8A7794_CLK_SCIFB2 16 +#define R8A7794_CLK_SYS_DMAC1 18 +#define R8A7794_CLK_SYS_DMAC0 19 + +/* MSTP3 */ +#define R8A7794_CLK_SDHI2 11 +#define R8A7794_CLK_SDHI1 12 +#define R8A7794_CLK_SDHI0 14 +#define R8A7794_CLK_MMCIF0 15 +#define R8A7794_CLK_IIC0 18 +#define R8A7794_CLK_IIC1 23 +#define R8A7794_CLK_CMT1 29 +#define R8A7794_CLK_USBDMAC0 30 +#define R8A7794_CLK_USBDMAC1 31 + +/* MSTP4 */ +#define R8A7794_CLK_IRQC 7 +#define R8A7794_CLK_INTC_SYS 8 + +/* MSTP5 */ +#define R8A7794_CLK_AUDIO_DMAC0 2 +#define R8A7794_CLK_PWM 23 + +/* MSTP7 */ +#define R8A7794_CLK_EHCI 3 +#define R8A7794_CLK_HSUSB 4 +#define R8A7794_CLK_HSCIF2 13 +#define R8A7794_CLK_SCIF5 14 +#define R8A7794_CLK_SCIF4 15 +#define R8A7794_CLK_HSCIF1 16 +#define R8A7794_CLK_HSCIF0 17 +#define R8A7794_CLK_SCIF3 18 +#define R8A7794_CLK_SCIF2 19 +#define R8A7794_CLK_SCIF1 20 +#define R8A7794_CLK_SCIF0 21 +#define R8A7794_CLK_DU1 23 +#define R8A7794_CLK_DU0 24 + +/* MSTP8 */ +#define R8A7794_CLK_VIN1 10 +#define R8A7794_CLK_VIN0 11 +#define R8A7794_CLK_ETHERAVB 12 +#define R8A7794_CLK_ETHER 13 + +/* MSTP9 */ +#define R8A7794_CLK_GPIO6 5 +#define R8A7794_CLK_GPIO5 7 +#define R8A7794_CLK_GPIO4 8 +#define R8A7794_CLK_GPIO3 9 +#define R8A7794_CLK_GPIO2 10 +#define R8A7794_CLK_GPIO1 11 +#define R8A7794_CLK_GPIO0 12 +#define R8A7794_CLK_RCAN1 15 +#define R8A7794_CLK_RCAN0 16 +#define R8A7794_CLK_QSPI_MOD 17 +#define R8A7794_CLK_I2C5 25 +#define R8A7794_CLK_I2C4 27 +#define R8A7794_CLK_I2C3 28 +#define R8A7794_CLK_I2C2 29 +#define R8A7794_CLK_I2C1 30 +#define R8A7794_CLK_I2C0 31 + +/* MSTP10 */ +#define R8A7794_CLK_SSI_ALL 5 +#define R8A7794_CLK_SSI9 6 +#define R8A7794_CLK_SSI8 7 +#define R8A7794_CLK_SSI7 8 +#define R8A7794_CLK_SSI6 9 +#define R8A7794_CLK_SSI5 10 +#define R8A7794_CLK_SSI4 11 +#define R8A7794_CLK_SSI3 12 +#define R8A7794_CLK_SSI2 13 +#define R8A7794_CLK_SSI1 14 +#define R8A7794_CLK_SSI0 15 +#define R8A7794_CLK_SCU_ALL 17 +#define R8A7794_CLK_SCU_DVC1 18 +#define R8A7794_CLK_SCU_DVC0 19 +#define R8A7794_CLK_SCU_CTU1_MIX1 20 +#define R8A7794_CLK_SCU_CTU0_MIX0 21 +#define R8A7794_CLK_SCU_SRC6 25 +#define R8A7794_CLK_SCU_SRC5 26 +#define R8A7794_CLK_SCU_SRC4 27 +#define R8A7794_CLK_SCU_SRC3 28 +#define R8A7794_CLK_SCU_SRC2 29 +#define R8A7794_CLK_SCU_SRC1 30 + +/* MSTP11 */ +#define R8A7794_CLK_SCIFA3 6 +#define R8A7794_CLK_SCIFA4 7 +#define R8A7794_CLK_SCIFA5 8 + +#endif /* __DT_BINDINGS_CLOCK_R8A7794_H__ */ diff --git a/include/dt-bindings/clock/r8a7794-cpg-mssr.h b/include/dt-bindings/clock/r8a7794-cpg-mssr.h new file mode 100644 index 000000000..9d720311a --- /dev/null +++ b/include/dt-bindings/clock/r8a7794-cpg-mssr.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2015 Renesas Electronics Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __DT_BINDINGS_CLOCK_R8A7794_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A7794_CPG_MSSR_H__ + +#include + +/* r8a7794 CPG Core Clocks */ +#define R8A7794_CLK_Z2 0 +#define R8A7794_CLK_ZG 1 +#define R8A7794_CLK_ZTR 2 +#define R8A7794_CLK_ZTRD2 3 +#define R8A7794_CLK_ZT 4 +#define R8A7794_CLK_ZX 5 +#define R8A7794_CLK_ZS 6 +#define R8A7794_CLK_HP 7 +#define R8A7794_CLK_I 8 +#define R8A7794_CLK_B 9 +#define R8A7794_CLK_LB 10 +#define R8A7794_CLK_P 11 +#define R8A7794_CLK_CL 12 +#define R8A7794_CLK_CP 13 +#define R8A7794_CLK_M2 14 +#define R8A7794_CLK_ADSP 15 +#define R8A7794_CLK_ZB3 16 +#define R8A7794_CLK_ZB3D2 17 +#define R8A7794_CLK_DDR 18 +#define R8A7794_CLK_SDH 19 +#define R8A7794_CLK_SD0 20 +#define R8A7794_CLK_SD2 21 +#define R8A7794_CLK_SD3 22 +#define R8A7794_CLK_MMC0 23 +#define R8A7794_CLK_MP 24 +#define R8A7794_CLK_QSPI 25 +#define R8A7794_CLK_CPEX 26 +#define R8A7794_CLK_RCAN 27 +#define R8A7794_CLK_R 28 +#define R8A7794_CLK_OSC 29 + +#endif /* __DT_BINDINGS_CLOCK_R8A7794_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a7795-cpg-mssr.h b/include/dt-bindings/clock/r8a7795-cpg-mssr.h new file mode 100644 index 000000000..f047eaf26 --- /dev/null +++ b/include/dt-bindings/clock/r8a7795-cpg-mssr.h @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2015 Renesas Electronics Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef __DT_BINDINGS_CLOCK_R8A7795_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A7795_CPG_MSSR_H__ + +#include + +/* r8a7795 CPG Core Clocks */ +#define R8A7795_CLK_Z 0 +#define R8A7795_CLK_Z2 1 +#define R8A7795_CLK_ZR 2 +#define R8A7795_CLK_ZG 3 +#define R8A7795_CLK_ZTR 4 +#define R8A7795_CLK_ZTRD2 5 +#define R8A7795_CLK_ZT 6 +#define R8A7795_CLK_ZX 7 +#define R8A7795_CLK_S0D1 8 +#define R8A7795_CLK_S0D4 9 +#define R8A7795_CLK_S1D1 10 +#define R8A7795_CLK_S1D2 11 +#define R8A7795_CLK_S1D4 12 +#define R8A7795_CLK_S2D1 13 +#define R8A7795_CLK_S2D2 14 +#define R8A7795_CLK_S2D4 15 +#define R8A7795_CLK_S3D1 16 +#define R8A7795_CLK_S3D2 17 +#define R8A7795_CLK_S3D4 18 +#define R8A7795_CLK_LB 19 +#define R8A7795_CLK_CL 20 +#define R8A7795_CLK_ZB3 21 +#define R8A7795_CLK_ZB3D2 22 +#define R8A7795_CLK_CR 23 +#define R8A7795_CLK_CRD2 24 +#define R8A7795_CLK_SD0H 25 +#define R8A7795_CLK_SD0 26 +#define R8A7795_CLK_SD1H 27 +#define R8A7795_CLK_SD1 28 +#define R8A7795_CLK_SD2H 29 +#define R8A7795_CLK_SD2 30 +#define R8A7795_CLK_SD3H 31 +#define R8A7795_CLK_SD3 32 +#define R8A7795_CLK_SSP2 33 +#define R8A7795_CLK_SSP1 34 +#define R8A7795_CLK_SSPRS 35 +#define R8A7795_CLK_RPC 36 +#define R8A7795_CLK_RPCD2 37 +#define R8A7795_CLK_MSO 38 +#define R8A7795_CLK_CANFD 39 +#define R8A7795_CLK_HDMI 40 +#define R8A7795_CLK_CSI0 41 +#define R8A7795_CLK_CSIREF 42 +#define R8A7795_CLK_CP 43 +#define R8A7795_CLK_CPEX 44 +#define R8A7795_CLK_R 45 +#define R8A7795_CLK_OSC 46 + +/* r8a7795 ES2.0 CPG Core Clocks */ +#define R8A7795_CLK_S0D2 47 +#define R8A7795_CLK_S0D3 48 +#define R8A7795_CLK_S0D6 49 +#define R8A7795_CLK_S0D8 50 +#define R8A7795_CLK_S0D12 51 + +#endif /* __DT_BINDINGS_CLOCK_R8A7795_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a7796-cpg-mssr.h b/include/dt-bindings/clock/r8a7796-cpg-mssr.h new file mode 100644 index 000000000..1e5942695 --- /dev/null +++ b/include/dt-bindings/clock/r8a7796-cpg-mssr.h @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2016 Renesas Electronics Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef __DT_BINDINGS_CLOCK_R8A7796_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A7796_CPG_MSSR_H__ + +#include + +/* r8a7796 CPG Core Clocks */ +#define R8A7796_CLK_Z 0 +#define R8A7796_CLK_Z2 1 +#define R8A7796_CLK_ZR 2 +#define R8A7796_CLK_ZG 3 +#define R8A7796_CLK_ZTR 4 +#define R8A7796_CLK_ZTRD2 5 +#define R8A7796_CLK_ZT 6 +#define R8A7796_CLK_ZX 7 +#define R8A7796_CLK_S0D1 8 +#define R8A7796_CLK_S0D2 9 +#define R8A7796_CLK_S0D3 10 +#define R8A7796_CLK_S0D4 11 +#define R8A7796_CLK_S0D6 12 +#define R8A7796_CLK_S0D8 13 +#define R8A7796_CLK_S0D12 14 +#define R8A7796_CLK_S1D1 15 +#define R8A7796_CLK_S1D2 16 +#define R8A7796_CLK_S1D4 17 +#define R8A7796_CLK_S2D1 18 +#define R8A7796_CLK_S2D2 19 +#define R8A7796_CLK_S2D4 20 +#define R8A7796_CLK_S3D1 21 +#define R8A7796_CLK_S3D2 22 +#define R8A7796_CLK_S3D4 23 +#define R8A7796_CLK_LB 24 +#define R8A7796_CLK_CL 25 +#define R8A7796_CLK_ZB3 26 +#define R8A7796_CLK_ZB3D2 27 +#define R8A7796_CLK_ZB3D4 28 +#define R8A7796_CLK_CR 29 +#define R8A7796_CLK_CRD2 30 +#define R8A7796_CLK_SD0H 31 +#define R8A7796_CLK_SD0 32 +#define R8A7796_CLK_SD1H 33 +#define R8A7796_CLK_SD1 34 +#define R8A7796_CLK_SD2H 35 +#define R8A7796_CLK_SD2 36 +#define R8A7796_CLK_SD3H 37 +#define R8A7796_CLK_SD3 38 +#define R8A7796_CLK_SSP2 39 +#define R8A7796_CLK_SSP1 40 +#define R8A7796_CLK_SSPRS 41 +#define R8A7796_CLK_RPC 42 +#define R8A7796_CLK_RPCD2 43 +#define R8A7796_CLK_MSO 44 +#define R8A7796_CLK_CANFD 45 +#define R8A7796_CLK_HDMI 46 +#define R8A7796_CLK_CSI0 47 +#define R8A7796_CLK_CSIREF 48 +#define R8A7796_CLK_CP 49 +#define R8A7796_CLK_CPEX 50 +#define R8A7796_CLK_R 51 +#define R8A7796_CLK_OSC 52 + +#endif /* __DT_BINDINGS_CLOCK_R8A7796_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a77965-cpg-mssr.h b/include/dt-bindings/clock/r8a77965-cpg-mssr.h new file mode 100644 index 000000000..6d3b5a9a6 --- /dev/null +++ b/include/dt-bindings/clock/r8a77965-cpg-mssr.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Jacopo Mondi + */ +#ifndef __DT_BINDINGS_CLOCK_R8A77965_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A77965_CPG_MSSR_H__ + +#include + +/* r8a77965 CPG Core Clocks */ +#define R8A77965_CLK_Z 0 +#define R8A77965_CLK_ZR 1 +#define R8A77965_CLK_ZG 2 +#define R8A77965_CLK_ZTR 3 +#define R8A77965_CLK_ZTRD2 4 +#define R8A77965_CLK_ZT 5 +#define R8A77965_CLK_ZX 6 +#define R8A77965_CLK_S0D1 7 +#define R8A77965_CLK_S0D2 8 +#define R8A77965_CLK_S0D3 9 +#define R8A77965_CLK_S0D4 10 +#define R8A77965_CLK_S0D6 11 +#define R8A77965_CLK_S0D8 12 +#define R8A77965_CLK_S0D12 13 +#define R8A77965_CLK_S1D1 14 +#define R8A77965_CLK_S1D2 15 +#define R8A77965_CLK_S1D4 16 +#define R8A77965_CLK_S2D1 17 +#define R8A77965_CLK_S2D2 18 +#define R8A77965_CLK_S2D4 19 +#define R8A77965_CLK_S3D1 20 +#define R8A77965_CLK_S3D2 21 +#define R8A77965_CLK_S3D4 22 +#define R8A77965_CLK_LB 23 +#define R8A77965_CLK_CL 24 +#define R8A77965_CLK_ZB3 25 +#define R8A77965_CLK_ZB3D2 26 +#define R8A77965_CLK_CR 27 +#define R8A77965_CLK_CRD2 28 +#define R8A77965_CLK_SD0H 29 +#define R8A77965_CLK_SD0 30 +#define R8A77965_CLK_SD1H 31 +#define R8A77965_CLK_SD1 32 +#define R8A77965_CLK_SD2H 33 +#define R8A77965_CLK_SD2 34 +#define R8A77965_CLK_SD3H 35 +#define R8A77965_CLK_SD3 36 +#define R8A77965_CLK_SSP2 37 +#define R8A77965_CLK_SSP1 38 +#define R8A77965_CLK_SSPRS 39 +#define R8A77965_CLK_RPC 40 +#define R8A77965_CLK_RPCD2 41 +#define R8A77965_CLK_MSO 42 +#define R8A77965_CLK_CANFD 43 +#define R8A77965_CLK_HDMI 44 +#define R8A77965_CLK_CSI0 45 +#define R8A77965_CLK_CP 46 +#define R8A77965_CLK_CPEX 47 +#define R8A77965_CLK_R 48 +#define R8A77965_CLK_OSC 49 + +#endif /* __DT_BINDINGS_CLOCK_R8A77965_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a77970-cpg-mssr.h b/include/dt-bindings/clock/r8a77970-cpg-mssr.h new file mode 100644 index 000000000..414639559 --- /dev/null +++ b/include/dt-bindings/clock/r8a77970-cpg-mssr.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2016 Renesas Electronics Corp. + * Copyright (C) 2017 Cogent Embedded, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef __DT_BINDINGS_CLOCK_R8A77970_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A77970_CPG_MSSR_H__ + +#include + +/* r8a77970 CPG Core Clocks */ +#define R8A77970_CLK_Z2 0 +#define R8A77970_CLK_ZR 1 +#define R8A77970_CLK_ZTR 2 +#define R8A77970_CLK_ZTRD2 3 +#define R8A77970_CLK_ZT 4 +#define R8A77970_CLK_ZX 5 +#define R8A77970_CLK_S1D1 6 +#define R8A77970_CLK_S1D2 7 +#define R8A77970_CLK_S1D4 8 +#define R8A77970_CLK_S2D1 9 +#define R8A77970_CLK_S2D2 10 +#define R8A77970_CLK_S2D4 11 +#define R8A77970_CLK_LB 12 +#define R8A77970_CLK_CL 13 +#define R8A77970_CLK_ZB3 14 +#define R8A77970_CLK_ZB3D2 15 +#define R8A77970_CLK_DDR 16 +#define R8A77970_CLK_CR 17 +#define R8A77970_CLK_CRD2 18 +#define R8A77970_CLK_SD0H 19 +#define R8A77970_CLK_SD0 20 +#define R8A77970_CLK_RPC 21 +#define R8A77970_CLK_RPCD2 22 +#define R8A77970_CLK_MSO 23 +#define R8A77970_CLK_CANFD 24 +#define R8A77970_CLK_CSI0 25 +#define R8A77970_CLK_FRAY 26 +#define R8A77970_CLK_CP 27 +#define R8A77970_CLK_CPEX 28 +#define R8A77970_CLK_R 29 +#define R8A77970_CLK_OSC 30 + +#endif /* __DT_BINDINGS_CLOCK_R8A77970_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a77980-cpg-mssr.h b/include/dt-bindings/clock/r8a77980-cpg-mssr.h new file mode 100644 index 000000000..a4c0d76c3 --- /dev/null +++ b/include/dt-bindings/clock/r8a77980-cpg-mssr.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2018 Renesas Electronics Corp. + * Copyright (C) 2018 Cogent Embedded, Inc. + */ +#ifndef __DT_BINDINGS_CLOCK_R8A77980_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A77980_CPG_MSSR_H__ + +#include + +/* r8a77980 CPG Core Clocks */ +#define R8A77980_CLK_Z2 0 +#define R8A77980_CLK_ZR 1 +#define R8A77980_CLK_ZTR 2 +#define R8A77980_CLK_ZTRD2 3 +#define R8A77980_CLK_ZT 4 +#define R8A77980_CLK_ZX 5 +#define R8A77980_CLK_S0D1 6 +#define R8A77980_CLK_S0D2 7 +#define R8A77980_CLK_S0D3 8 +#define R8A77980_CLK_S0D4 9 +#define R8A77980_CLK_S0D6 10 +#define R8A77980_CLK_S0D12 11 +#define R8A77980_CLK_S0D24 12 +#define R8A77980_CLK_S1D1 13 +#define R8A77980_CLK_S1D2 14 +#define R8A77980_CLK_S1D4 15 +#define R8A77980_CLK_S2D1 16 +#define R8A77980_CLK_S2D2 17 +#define R8A77980_CLK_S2D4 18 +#define R8A77980_CLK_S3D1 19 +#define R8A77980_CLK_S3D2 20 +#define R8A77980_CLK_S3D4 21 +#define R8A77980_CLK_LB 22 +#define R8A77980_CLK_CL 23 +#define R8A77980_CLK_ZB3 24 +#define R8A77980_CLK_ZB3D2 25 +#define R8A77980_CLK_ZB3D4 26 +#define R8A77980_CLK_SD0H 27 +#define R8A77980_CLK_SD0 28 +#define R8A77980_CLK_RPC 29 +#define R8A77980_CLK_RPCD2 30 +#define R8A77980_CLK_MSO 31 +#define R8A77980_CLK_CANFD 32 +#define R8A77980_CLK_CSI0 33 +#define R8A77980_CLK_CP 34 +#define R8A77980_CLK_CPEX 35 +#define R8A77980_CLK_R 36 +#define R8A77980_CLK_OSC 37 + +#endif /* __DT_BINDINGS_CLOCK_R8A77980_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a77990-cpg-mssr.h b/include/dt-bindings/clock/r8a77990-cpg-mssr.h new file mode 100644 index 000000000..a596a482f --- /dev/null +++ b/include/dt-bindings/clock/r8a77990-cpg-mssr.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_CLOCK_R8A77990_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A77990_CPG_MSSR_H__ + +#include + +/* r8a77990 CPG Core Clocks */ +#define R8A77990_CLK_Z2 0 +#define R8A77990_CLK_ZR 1 +#define R8A77990_CLK_ZG 2 +#define R8A77990_CLK_ZTR 3 +#define R8A77990_CLK_ZT 4 +#define R8A77990_CLK_ZX 5 +#define R8A77990_CLK_S0D1 6 +#define R8A77990_CLK_S0D3 7 +#define R8A77990_CLK_S0D6 8 +#define R8A77990_CLK_S0D12 9 +#define R8A77990_CLK_S0D24 10 +#define R8A77990_CLK_S1D1 11 +#define R8A77990_CLK_S1D2 12 +#define R8A77990_CLK_S1D4 13 +#define R8A77990_CLK_S2D1 14 +#define R8A77990_CLK_S2D2 15 +#define R8A77990_CLK_S2D4 16 +#define R8A77990_CLK_S3D1 17 +#define R8A77990_CLK_S3D2 18 +#define R8A77990_CLK_S3D4 19 +#define R8A77990_CLK_S0D6C 20 +#define R8A77990_CLK_S3D1C 21 +#define R8A77990_CLK_S3D2C 22 +#define R8A77990_CLK_S3D4C 23 +#define R8A77990_CLK_LB 24 +#define R8A77990_CLK_CL 25 +#define R8A77990_CLK_ZB3 26 +#define R8A77990_CLK_ZB3D2 27 +#define R8A77990_CLK_CR 28 +#define R8A77990_CLK_CRD2 29 +#define R8A77990_CLK_SD0H 30 +#define R8A77990_CLK_SD0 31 +#define R8A77990_CLK_SD1H 32 +#define R8A77990_CLK_SD1 33 +#define R8A77990_CLK_SD3H 34 +#define R8A77990_CLK_SD3 35 +#define R8A77990_CLK_RPC 36 +#define R8A77990_CLK_RPCD2 37 +#define R8A77990_CLK_ZA2 38 +#define R8A77990_CLK_ZA8 39 +#define R8A77990_CLK_Z2D 40 +#define R8A77990_CLK_CANFD 41 +#define R8A77990_CLK_MSO 42 +#define R8A77990_CLK_R 43 +#define R8A77990_CLK_OSC 44 +#define R8A77990_CLK_LV0 45 +#define R8A77990_CLK_LV1 46 +#define R8A77990_CLK_CSI0 47 +#define R8A77990_CLK_CP 48 +#define R8A77990_CLK_CPEX 49 + +#endif /* __DT_BINDINGS_CLOCK_R8A77990_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a77995-cpg-mssr.h b/include/dt-bindings/clock/r8a77995-cpg-mssr.h new file mode 100644 index 000000000..4e8ae3dee --- /dev/null +++ b/include/dt-bindings/clock/r8a77995-cpg-mssr.h @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2017 Glider bvba + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef __DT_BINDINGS_CLOCK_R8A77995_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A77995_CPG_MSSR_H__ + +#include + +/* r8a77995 CPG Core Clocks */ +#define R8A77995_CLK_Z2 0 +#define R8A77995_CLK_ZG 1 +#define R8A77995_CLK_ZTR 2 +#define R8A77995_CLK_ZT 3 +#define R8A77995_CLK_ZX 4 +#define R8A77995_CLK_S0D1 5 +#define R8A77995_CLK_S1D1 6 +#define R8A77995_CLK_S1D2 7 +#define R8A77995_CLK_S1D4 8 +#define R8A77995_CLK_S2D1 9 +#define R8A77995_CLK_S2D2 10 +#define R8A77995_CLK_S2D4 11 +#define R8A77995_CLK_S3D1 12 +#define R8A77995_CLK_S3D2 13 +#define R8A77995_CLK_S3D4 14 +#define R8A77995_CLK_S1D4C 15 +#define R8A77995_CLK_S3D1C 16 +#define R8A77995_CLK_S3D2C 17 +#define R8A77995_CLK_S3D4C 18 +#define R8A77995_CLK_LB 19 +#define R8A77995_CLK_CL 20 +#define R8A77995_CLK_ZB3 21 +#define R8A77995_CLK_ZB3D2 22 +#define R8A77995_CLK_CR 23 +#define R8A77995_CLK_CRD2 24 +#define R8A77995_CLK_SD0H 25 +#define R8A77995_CLK_SD0 26 +#define R8A77995_CLK_SSP2 27 +#define R8A77995_CLK_SSP1 28 +#define R8A77995_CLK_RPC 29 +#define R8A77995_CLK_RPCD2 30 +#define R8A77995_CLK_ZA2 31 +#define R8A77995_CLK_ZA8 32 +#define R8A77995_CLK_Z2D 33 +#define R8A77995_CLK_CANFD 34 +#define R8A77995_CLK_MSO 35 +#define R8A77995_CLK_R 36 +#define R8A77995_CLK_OSC 37 +#define R8A77995_CLK_LV0 38 +#define R8A77995_CLK_LV1 39 +#define R8A77995_CLK_CP 40 + +#endif /* __DT_BINDINGS_CLOCK_R8A77995_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r9a06g032-sysctrl.h b/include/dt-bindings/clock/r9a06g032-sysctrl.h new file mode 100644 index 000000000..90c0f3dc1 --- /dev/null +++ b/include/dt-bindings/clock/r9a06g032-sysctrl.h @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * R9A06G032 sysctrl IDs + * + * Copyright (C) 2018 Renesas Electronics Europe Limited + * + * Michel Pollet , + */ + +#ifndef __DT_BINDINGS_R9A06G032_SYSCTRL_H__ +#define __DT_BINDINGS_R9A06G032_SYSCTRL_H__ + +#define R9A06G032_CLK_PLL_USB 1 +#define R9A06G032_CLK_48 1 /* AKA CLK_PLL_USB */ +#define R9A06G032_MSEBIS_CLK 3 /* AKA CLKOUT_D16 */ +#define R9A06G032_MSEBIM_CLK 3 /* AKA CLKOUT_D16 */ +#define R9A06G032_CLK_DDRPHY_PLLCLK 5 /* AKA CLKOUT_D1OR2 */ +#define R9A06G032_CLK50 6 /* AKA CLKOUT_D20 */ +#define R9A06G032_CLK25 7 /* AKA CLKOUT_D40 */ +#define R9A06G032_CLK125 9 /* AKA CLKOUT_D8 */ +#define R9A06G032_CLK_P5_PG1 17 /* AKA DIV_P5_PG */ +#define R9A06G032_CLK_REF_SYNC 21 /* AKA DIV_REF_SYNC */ +#define R9A06G032_CLK_25_PG4 26 +#define R9A06G032_CLK_25_PG5 27 +#define R9A06G032_CLK_25_PG6 28 +#define R9A06G032_CLK_25_PG7 29 +#define R9A06G032_CLK_25_PG8 30 +#define R9A06G032_CLK_ADC 31 +#define R9A06G032_CLK_ECAT100 32 +#define R9A06G032_CLK_HSR100 33 +#define R9A06G032_CLK_I2C0 34 +#define R9A06G032_CLK_I2C1 35 +#define R9A06G032_CLK_MII_REF 36 +#define R9A06G032_CLK_NAND 37 +#define R9A06G032_CLK_NOUSBP2_PG6 38 +#define R9A06G032_CLK_P1_PG2 39 +#define R9A06G032_CLK_P1_PG3 40 +#define R9A06G032_CLK_P1_PG4 41 +#define R9A06G032_CLK_P4_PG3 42 +#define R9A06G032_CLK_P4_PG4 43 +#define R9A06G032_CLK_P6_PG1 44 +#define R9A06G032_CLK_P6_PG2 45 +#define R9A06G032_CLK_P6_PG3 46 +#define R9A06G032_CLK_P6_PG4 47 +#define R9A06G032_CLK_PCI_USB 48 +#define R9A06G032_CLK_QSPI0 49 +#define R9A06G032_CLK_QSPI1 50 +#define R9A06G032_CLK_RGMII_REF 51 +#define R9A06G032_CLK_RMII_REF 52 +#define R9A06G032_CLK_SDIO0 53 +#define R9A06G032_CLK_SDIO1 54 +#define R9A06G032_CLK_SERCOS100 55 +#define R9A06G032_CLK_SLCD 56 +#define R9A06G032_CLK_SPI0 57 +#define R9A06G032_CLK_SPI1 58 +#define R9A06G032_CLK_SPI2 59 +#define R9A06G032_CLK_SPI3 60 +#define R9A06G032_CLK_SPI4 61 +#define R9A06G032_CLK_SPI5 62 +#define R9A06G032_CLK_SWITCH 63 +#define R9A06G032_HCLK_ECAT125 65 +#define R9A06G032_HCLK_PINCONFIG 66 +#define R9A06G032_HCLK_SERCOS 67 +#define R9A06G032_HCLK_SGPIO2 68 +#define R9A06G032_HCLK_SGPIO3 69 +#define R9A06G032_HCLK_SGPIO4 70 +#define R9A06G032_HCLK_TIMER0 71 +#define R9A06G032_HCLK_TIMER1 72 +#define R9A06G032_HCLK_USBF 73 +#define R9A06G032_HCLK_USBH 74 +#define R9A06G032_HCLK_USBPM 75 +#define R9A06G032_CLK_48_PG_F 76 +#define R9A06G032_CLK_48_PG4 77 +#define R9A06G032_CLK_DDRPHY_PCLK 81 /* AKA CLK_REF_SYNC_D4 */ +#define R9A06G032_CLK_FW 81 /* AKA CLK_REF_SYNC_D4 */ +#define R9A06G032_CLK_CRYPTO 81 /* AKA CLK_REF_SYNC_D4 */ +#define R9A06G032_CLK_A7MP 84 /* AKA DIV_CA7 */ +#define R9A06G032_HCLK_CAN0 85 +#define R9A06G032_HCLK_CAN1 86 +#define R9A06G032_HCLK_DELTASIGMA 87 +#define R9A06G032_HCLK_PWMPTO 88 +#define R9A06G032_HCLK_RSV 89 +#define R9A06G032_HCLK_SGPIO0 90 +#define R9A06G032_HCLK_SGPIO1 91 +#define R9A06G032_RTOS_MDC 92 +#define R9A06G032_CLK_CM3 93 +#define R9A06G032_CLK_DDRC 94 +#define R9A06G032_CLK_ECAT25 95 +#define R9A06G032_CLK_HSR50 96 +#define R9A06G032_CLK_HW_RTOS 97 +#define R9A06G032_CLK_SERCOS50 98 +#define R9A06G032_HCLK_ADC 99 +#define R9A06G032_HCLK_CM3 100 +#define R9A06G032_HCLK_CRYPTO_EIP150 101 +#define R9A06G032_HCLK_CRYPTO_EIP93 102 +#define R9A06G032_HCLK_DDRC 103 +#define R9A06G032_HCLK_DMA0 104 +#define R9A06G032_HCLK_DMA1 105 +#define R9A06G032_HCLK_GMAC0 106 +#define R9A06G032_HCLK_GMAC1 107 +#define R9A06G032_HCLK_GPIO0 108 +#define R9A06G032_HCLK_GPIO1 109 +#define R9A06G032_HCLK_GPIO2 110 +#define R9A06G032_HCLK_HSR 111 +#define R9A06G032_HCLK_I2C0 112 +#define R9A06G032_HCLK_I2C1 113 +#define R9A06G032_HCLK_LCD 114 +#define R9A06G032_HCLK_MSEBI_M 115 +#define R9A06G032_HCLK_MSEBI_S 116 +#define R9A06G032_HCLK_NAND 117 +#define R9A06G032_HCLK_PG_I 118 +#define R9A06G032_HCLK_PG19 119 +#define R9A06G032_HCLK_PG20 120 +#define R9A06G032_HCLK_PG3 121 +#define R9A06G032_HCLK_PG4 122 +#define R9A06G032_HCLK_QSPI0 123 +#define R9A06G032_HCLK_QSPI1 124 +#define R9A06G032_HCLK_ROM 125 +#define R9A06G032_HCLK_RTC 126 +#define R9A06G032_HCLK_SDIO0 127 +#define R9A06G032_HCLK_SDIO1 128 +#define R9A06G032_HCLK_SEMAP 129 +#define R9A06G032_HCLK_SPI0 130 +#define R9A06G032_HCLK_SPI1 131 +#define R9A06G032_HCLK_SPI2 132 +#define R9A06G032_HCLK_SPI3 133 +#define R9A06G032_HCLK_SPI4 134 +#define R9A06G032_HCLK_SPI5 135 +#define R9A06G032_HCLK_SWITCH 136 +#define R9A06G032_HCLK_SWITCH_RG 137 +#define R9A06G032_HCLK_UART0 138 +#define R9A06G032_HCLK_UART1 139 +#define R9A06G032_HCLK_UART2 140 +#define R9A06G032_HCLK_UART3 141 +#define R9A06G032_HCLK_UART4 142 +#define R9A06G032_HCLK_UART5 143 +#define R9A06G032_HCLK_UART6 144 +#define R9A06G032_HCLK_UART7 145 +#define R9A06G032_CLK_UART0 146 +#define R9A06G032_CLK_UART1 147 +#define R9A06G032_CLK_UART2 148 +#define R9A06G032_CLK_UART3 149 +#define R9A06G032_CLK_UART4 150 +#define R9A06G032_CLK_UART5 151 +#define R9A06G032_CLK_UART6 152 +#define R9A06G032_CLK_UART7 153 + +#endif /* __DT_BINDINGS_R9A06G032_SYSCTRL_H__ */ diff --git a/include/dt-bindings/clock/renesas-cpg-mssr.h b/include/dt-bindings/clock/renesas-cpg-mssr.h new file mode 100644 index 000000000..569a3cc33 --- /dev/null +++ b/include/dt-bindings/clock/renesas-cpg-mssr.h @@ -0,0 +1,15 @@ +/* + * Copyright (C) 2015 Renesas Electronics Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef __DT_BINDINGS_CLOCK_RENESAS_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_RENESAS_CPG_MSSR_H__ + +#define CPG_CORE 0 /* Core Clock */ +#define CPG_MOD 1 /* Module Clock */ + +#endif /* __DT_BINDINGS_CLOCK_RENESAS_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/rk3036-cru.h b/include/dt-bindings/clock/rk3036-cru.h new file mode 100644 index 000000000..de44109a3 --- /dev/null +++ b/include/dt-bindings/clock/rk3036-cru.h @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2015 Rockchip Electronics Co. Ltd. + * Author: Xing Zheng + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3036_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RK3036_H + +/* core clocks */ +#define PLL_APLL 1 +#define PLL_DPLL 2 +#define PLL_GPLL 3 +#define ARMCLK 4 + +/* sclk gates (special clocks) */ +#define SCLK_GPU 64 +#define SCLK_SPI 65 +#define SCLK_SDMMC 68 +#define SCLK_SDIO 69 +#define SCLK_EMMC 71 +#define SCLK_NANDC 76 +#define SCLK_UART0 77 +#define SCLK_UART1 78 +#define SCLK_UART2 79 +#define SCLK_I2S 82 +#define SCLK_SPDIF 83 +#define SCLK_TIMER0 85 +#define SCLK_TIMER1 86 +#define SCLK_TIMER2 87 +#define SCLK_TIMER3 88 +#define SCLK_OTGPHY0 93 +#define SCLK_LCDC 100 +#define SCLK_HDMI 109 +#define SCLK_HEVC 111 +#define SCLK_I2S_OUT 113 +#define SCLK_SDMMC_DRV 114 +#define SCLK_SDIO_DRV 115 +#define SCLK_EMMC_DRV 117 +#define SCLK_SDMMC_SAMPLE 118 +#define SCLK_SDIO_SAMPLE 119 +#define SCLK_EMMC_SAMPLE 121 +#define SCLK_PVTM_CORE 123 +#define SCLK_PVTM_GPU 124 +#define SCLK_PVTM_VIDEO 125 +#define SCLK_MAC 151 +#define SCLK_MACREF 152 +#define SCLK_MACPLL 153 +#define SCLK_SFC 160 + +/* aclk gates */ +#define ACLK_DMAC2 194 +#define ACLK_LCDC 197 +#define ACLK_VIO 203 +#define ACLK_VCODEC 208 +#define ACLK_CPU 209 +#define ACLK_PERI 210 + +/* pclk gates */ +#define PCLK_GPIO0 320 +#define PCLK_GPIO1 321 +#define PCLK_GPIO2 322 +#define PCLK_GRF 329 +#define PCLK_I2C0 332 +#define PCLK_I2C1 333 +#define PCLK_I2C2 334 +#define PCLK_SPI 338 +#define PCLK_UART0 341 +#define PCLK_UART1 342 +#define PCLK_UART2 343 +#define PCLK_PWM 350 +#define PCLK_TIMER 353 +#define PCLK_HDMI 360 +#define PCLK_CPU 362 +#define PCLK_PERI 363 +#define PCLK_DDRUPCTL 364 +#define PCLK_WDT 368 +#define PCLK_ACODEC 369 + +/* hclk gates */ +#define HCLK_OTG0 449 +#define HCLK_OTG1 450 +#define HCLK_NANDC 453 +#define HCLK_SDMMC 456 +#define HCLK_SDIO 457 +#define HCLK_EMMC 459 +#define HCLK_MAC 460 +#define HCLK_I2S 462 +#define HCLK_LCDC 465 +#define HCLK_ROM 467 +#define HCLK_VIO_BUS 472 +#define HCLK_VCODEC 476 +#define HCLK_CPU 477 +#define HCLK_PERI 478 + +#define CLK_NR_CLKS (HCLK_PERI + 1) + +/* soft-reset indices */ +#define SRST_CORE0 0 +#define SRST_CORE1 1 +#define SRST_CORE0_DBG 4 +#define SRST_CORE1_DBG 5 +#define SRST_CORE0_POR 8 +#define SRST_CORE1_POR 9 +#define SRST_L2C 12 +#define SRST_TOPDBG 13 +#define SRST_STRC_SYS_A 14 +#define SRST_PD_CORE_NIU 15 + +#define SRST_TIMER2 16 +#define SRST_CPUSYS_H 17 +#define SRST_AHB2APB_H 19 +#define SRST_TIMER3 20 +#define SRST_INTMEM 21 +#define SRST_ROM 22 +#define SRST_PERI_NIU 23 +#define SRST_I2S 24 +#define SRST_DDR_PLL 25 +#define SRST_GPU_DLL 26 +#define SRST_TIMER0 27 +#define SRST_TIMER1 28 +#define SRST_CORE_DLL 29 +#define SRST_EFUSE_P 30 +#define SRST_ACODEC_P 31 + +#define SRST_GPIO0 32 +#define SRST_GPIO1 33 +#define SRST_GPIO2 34 +#define SRST_UART0 39 +#define SRST_UART1 40 +#define SRST_UART2 41 +#define SRST_I2C0 43 +#define SRST_I2C1 44 +#define SRST_I2C2 45 +#define SRST_SFC 47 + +#define SRST_PWM0 48 +#define SRST_DAP 51 +#define SRST_DAP_SYS 52 +#define SRST_GRF 55 +#define SRST_PERIPHSYS_A 57 +#define SRST_PERIPHSYS_H 58 +#define SRST_PERIPHSYS_P 59 +#define SRST_CPU_PERI 61 +#define SRST_EMEM_PERI 62 +#define SRST_USB_PERI 63 + +#define SRST_DMA2 64 +#define SRST_MAC 66 +#define SRST_NANDC 68 +#define SRST_USBOTG0 69 +#define SRST_OTGC0 71 +#define SRST_USBOTG1 72 +#define SRST_OTGC1 74 +#define SRST_DDRMSCH 79 + +#define SRST_MMC0 81 +#define SRST_SDIO 82 +#define SRST_EMMC 83 +#define SRST_SPI0 84 +#define SRST_WDT 86 +#define SRST_DDRPHY 88 +#define SRST_DDRPHY_P 89 +#define SRST_DDRCTRL 90 +#define SRST_DDRCTRL_P 91 + +#define SRST_HDMI_P 96 +#define SRST_VIO_BUS_H 99 +#define SRST_UTMI0 103 +#define SRST_UTMI1 104 +#define SRST_USBPOR 105 + +#define SRST_VCODEC_A 112 +#define SRST_VCODEC_H 113 +#define SRST_VIO1_A 114 +#define SRST_HEVC 115 +#define SRST_VCODEC_NIU_A 116 +#define SRST_LCDC1_A 117 +#define SRST_LCDC1_H 118 +#define SRST_LCDC1_D 119 +#define SRST_GPU 120 +#define SRST_GPU_NIU_A 122 + +#define SRST_DBG_P 131 + +#endif diff --git a/include/dt-bindings/clock/rk3066a-cru.h b/include/dt-bindings/clock/rk3066a-cru.h new file mode 100644 index 000000000..d3a9824ef --- /dev/null +++ b/include/dt-bindings/clock/rk3066a-cru.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2014 MundoReader S.L. + * Author: Heiko Stuebner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3066A_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RK3066A_H + +#include + +/* soft-reset indices */ +#define SRST_SRST1 0 +#define SRST_SRST2 1 + +#define SRST_L2MEM 18 +#define SRST_I2S0 23 +#define SRST_I2S1 24 +#define SRST_I2S2 25 +#define SRST_TIMER2 29 + +#define SRST_GPIO4 36 +#define SRST_GPIO6 38 + +#define SRST_TSADC 92 + +#define SRST_HDMI 96 +#define SRST_HDMI_APB 97 +#define SRST_CIF1 111 + +#endif diff --git a/include/dt-bindings/clock/rk3128-cru.h b/include/dt-bindings/clock/rk3128-cru.h new file mode 100644 index 000000000..92894f430 --- /dev/null +++ b/include/dt-bindings/clock/rk3128-cru.h @@ -0,0 +1,282 @@ +/* + * Copyright (c) 2017 Rockchip Electronics Co. Ltd. + * Author: Elaine + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3128_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RK3128_H + +/* core clocks */ +#define PLL_APLL 1 +#define PLL_DPLL 2 +#define PLL_CPLL 3 +#define PLL_GPLL 4 +#define ARMCLK 5 +#define PLL_GPLL_DIV2 6 +#define PLL_GPLL_DIV3 7 + +/* sclk gates (special clocks) */ +#define SCLK_SPI0 65 +#define SCLK_NANDC 67 +#define SCLK_SDMMC 68 +#define SCLK_SDIO 69 +#define SCLK_EMMC 71 +#define SCLK_UART0 77 +#define SCLK_UART1 78 +#define SCLK_UART2 79 +#define SCLK_I2S0 80 +#define SCLK_I2S1 81 +#define SCLK_SPDIF 83 +#define SCLK_TIMER0 85 +#define SCLK_TIMER1 86 +#define SCLK_TIMER2 87 +#define SCLK_TIMER3 88 +#define SCLK_TIMER4 89 +#define SCLK_TIMER5 90 +#define SCLK_SARADC 91 +#define SCLK_I2S_OUT 113 +#define SCLK_SDMMC_DRV 114 +#define SCLK_SDIO_DRV 115 +#define SCLK_EMMC_DRV 117 +#define SCLK_SDMMC_SAMPLE 118 +#define SCLK_SDIO_SAMPLE 119 +#define SCLK_EMMC_SAMPLE 121 +#define SCLK_VOP 122 +#define SCLK_MAC_SRC 124 +#define SCLK_MAC 126 +#define SCLK_MAC_REFOUT 127 +#define SCLK_MAC_REF 128 +#define SCLK_MAC_RX 129 +#define SCLK_MAC_TX 130 +#define SCLK_HEVC_CORE 134 +#define SCLK_RGA 135 +#define SCLK_CRYPTO 138 +#define SCLK_TSP 139 +#define SCLK_OTGPHY0 142 +#define SCLK_OTGPHY1 143 +#define SCLK_DDRC 144 +#define SCLK_PVTM_FUNC 145 +#define SCLK_PVTM_CORE 146 +#define SCLK_PVTM_GPU 147 +#define SCLK_MIPI_24M 148 +#define SCLK_PVTM 149 +#define SCLK_CIF_SRC 150 +#define SCLK_CIF_OUT_SRC 151 +#define SCLK_CIF_OUT 152 +#define SCLK_SFC 153 +#define SCLK_USB480M 154 + +/* dclk gates */ +#define DCLK_VOP 190 +#define DCLK_EBC 191 + +/* aclk gates */ +#define ACLK_VIO0 192 +#define ACLK_VIO1 193 +#define ACLK_DMAC 194 +#define ACLK_CPU 195 +#define ACLK_VEPU 196 +#define ACLK_VDPU 197 +#define ACLK_CIF 198 +#define ACLK_IEP 199 +#define ACLK_LCDC0 204 +#define ACLK_RGA 205 +#define ACLK_PERI 210 +#define ACLK_VOP 211 +#define ACLK_GMAC 212 +#define ACLK_GPU 213 + +/* pclk gates */ +#define PCLK_SARADC 318 +#define PCLK_WDT 319 +#define PCLK_GPIO0 320 +#define PCLK_GPIO1 321 +#define PCLK_GPIO2 322 +#define PCLK_GPIO3 323 +#define PCLK_VIO_H2P 324 +#define PCLK_MIPI 325 +#define PCLK_EFUSE 326 +#define PCLK_HDMI 327 +#define PCLK_ACODEC 328 +#define PCLK_GRF 329 +#define PCLK_I2C0 332 +#define PCLK_I2C1 333 +#define PCLK_I2C2 334 +#define PCLK_I2C3 335 +#define PCLK_SPI0 338 +#define PCLK_UART0 341 +#define PCLK_UART1 342 +#define PCLK_UART2 343 +#define PCLK_TSADC 344 +#define PCLK_PWM 350 +#define PCLK_TIMER 353 +#define PCLK_CPU 354 +#define PCLK_PERI 363 +#define PCLK_GMAC 367 +#define PCLK_PMU_PRE 368 +#define PCLK_SIM_CARD 369 + +/* hclk gates */ +#define HCLK_SPDIF 440 +#define HCLK_GPS 441 +#define HCLK_USBHOST 442 +#define HCLK_I2S_8CH 443 +#define HCLK_I2S_2CH 444 +#define HCLK_VOP 452 +#define HCLK_NANDC 453 +#define HCLK_SDMMC 456 +#define HCLK_SDIO 457 +#define HCLK_EMMC 459 +#define HCLK_CPU 460 +#define HCLK_VEPU 461 +#define HCLK_VDPU 462 +#define HCLK_LCDC0 463 +#define HCLK_EBC 465 +#define HCLK_VIO 466 +#define HCLK_RGA 467 +#define HCLK_IEP 468 +#define HCLK_VIO_H2P 469 +#define HCLK_CIF 470 +#define HCLK_HOST2 473 +#define HCLK_OTG 474 +#define HCLK_TSP 475 +#define HCLK_CRYPTO 476 +#define HCLK_PERI 478 + +#define CLK_NR_CLKS (HCLK_PERI + 1) + +/* soft-reset indices */ +#define SRST_CORE0_PO 0 +#define SRST_CORE1_PO 1 +#define SRST_CORE2_PO 2 +#define SRST_CORE3_PO 3 +#define SRST_CORE0 4 +#define SRST_CORE1 5 +#define SRST_CORE2 6 +#define SRST_CORE3 7 +#define SRST_CORE0_DBG 8 +#define SRST_CORE1_DBG 9 +#define SRST_CORE2_DBG 10 +#define SRST_CORE3_DBG 11 +#define SRST_TOPDBG 12 +#define SRST_ACLK_CORE 13 +#define SRST_STRC_SYS_A 14 +#define SRST_L2C 15 + +#define SRST_CPUSYS_H 18 +#define SRST_AHB2APBSYS_H 19 +#define SRST_SPDIF 20 +#define SRST_INTMEM 21 +#define SRST_ROM 22 +#define SRST_PERI_NIU 23 +#define SRST_I2S_2CH 24 +#define SRST_I2S_8CH 25 +#define SRST_GPU_PVTM 26 +#define SRST_FUNC_PVTM 27 +#define SRST_CORE_PVTM 29 +#define SRST_EFUSE_P 30 +#define SRST_ACODEC_P 31 + +#define SRST_GPIO0 32 +#define SRST_GPIO1 33 +#define SRST_GPIO2 34 +#define SRST_GPIO3 35 +#define SRST_MIPIPHY_P 36 +#define SRST_UART0 39 +#define SRST_UART1 40 +#define SRST_UART2 41 +#define SRST_I2C0 43 +#define SRST_I2C1 44 +#define SRST_I2C2 45 +#define SRST_I2C3 46 +#define SRST_SFC 47 + +#define SRST_PWM 48 +#define SRST_DAP_PO 50 +#define SRST_DAP 51 +#define SRST_DAP_SYS 52 +#define SRST_CRYPTO 53 +#define SRST_GRF 55 +#define SRST_GMAC 56 +#define SRST_PERIPH_SYS_A 57 +#define SRST_PERIPH_SYS_H 58 +#define SRST_PERIPH_SYS_P 59 +#define SRST_SMART_CARD 60 +#define SRST_CPU_PERI 61 +#define SRST_EMEM_PERI 62 +#define SRST_USB_PERI 63 + +#define SRST_DMA 64 +#define SRST_GPS 67 +#define SRST_NANDC 68 +#define SRST_USBOTG0 69 +#define SRST_OTGC0 71 +#define SRST_USBOTG1 72 +#define SRST_OTGC1 74 +#define SRST_DDRMSCH 79 + +#define SRST_SDMMC 81 +#define SRST_SDIO 82 +#define SRST_EMMC 83 +#define SRST_SPI 84 +#define SRST_WDT 86 +#define SRST_SARADC 87 +#define SRST_DDRPHY 88 +#define SRST_DDRPHY_P 89 +#define SRST_DDRCTRL 90 +#define SRST_DDRCTRL_P 91 +#define SRST_TSP 92 +#define SRST_TSP_CLKIN 93 +#define SRST_HOST0_ECHI 94 + +#define SRST_HDMI_P 96 +#define SRST_VIO_ARBI_H 97 +#define SRST_VIO0_A 98 +#define SRST_VIO_BUS_H 99 +#define SRST_VOP_A 100 +#define SRST_VOP_H 101 +#define SRST_VOP_D 102 +#define SRST_UTMI0 103 +#define SRST_UTMI1 104 +#define SRST_USBPOR 105 +#define SRST_IEP_A 106 +#define SRST_IEP_H 107 +#define SRST_RGA_A 108 +#define SRST_RGA_H 109 +#define SRST_CIF0 110 +#define SRST_PMU 111 + +#define SRST_VCODEC_A 112 +#define SRST_VCODEC_H 113 +#define SRST_VIO1_A 114 +#define SRST_HEVC_CORE 115 +#define SRST_VCODEC_NIU_A 116 +#define SRST_PMU_NIU_P 117 +#define SRST_LCDC0_S 119 +#define SRST_GPU 120 +#define SRST_GPU_NIU_A 122 +#define SRST_EBC_A 123 +#define SRST_EBC_H 124 + +#define SRST_CORE_DBG 128 +#define SRST_DBG_P 129 +#define SRST_TIMER0 130 +#define SRST_TIMER1 131 +#define SRST_TIMER2 132 +#define SRST_TIMER3 133 +#define SRST_TIMER4 134 +#define SRST_TIMER5 135 +#define SRST_VIO_H2P 136 +#define SRST_VIO_MIPI_DSI 137 + +#endif diff --git a/include/dt-bindings/clock/rk3188-cru-common.h b/include/dt-bindings/clock/rk3188-cru-common.h new file mode 100644 index 000000000..b9462b7d3 --- /dev/null +++ b/include/dt-bindings/clock/rk3188-cru-common.h @@ -0,0 +1,269 @@ +/* + * Copyright (c) 2014 MundoReader S.L. + * Author: Heiko Stuebner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3188_COMMON_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RK3188_COMMON_H + +/* core clocks from */ +#define PLL_APLL 1 +#define PLL_DPLL 2 +#define PLL_CPLL 3 +#define PLL_GPLL 4 +#define CORE_PERI 5 +#define CORE_L2C 6 +#define ARMCLK 7 + +/* sclk gates (special clocks) */ +#define SCLK_UART0 64 +#define SCLK_UART1 65 +#define SCLK_UART2 66 +#define SCLK_UART3 67 +#define SCLK_MAC 68 +#define SCLK_SPI0 69 +#define SCLK_SPI1 70 +#define SCLK_SARADC 71 +#define SCLK_SDMMC 72 +#define SCLK_SDIO 73 +#define SCLK_EMMC 74 +#define SCLK_I2S0 75 +#define SCLK_I2S1 76 +#define SCLK_I2S2 77 +#define SCLK_SPDIF 78 +#define SCLK_CIF0 79 +#define SCLK_CIF1 80 +#define SCLK_OTGPHY0 81 +#define SCLK_OTGPHY1 82 +#define SCLK_HSADC 83 +#define SCLK_TIMER0 84 +#define SCLK_TIMER1 85 +#define SCLK_TIMER2 86 +#define SCLK_TIMER3 87 +#define SCLK_TIMER4 88 +#define SCLK_TIMER5 89 +#define SCLK_TIMER6 90 +#define SCLK_JTAG 91 +#define SCLK_SMC 92 +#define SCLK_TSADC 93 + +#define DCLK_LCDC0 190 +#define DCLK_LCDC1 191 + +/* aclk gates */ +#define ACLK_DMA1 192 +#define ACLK_DMA2 193 +#define ACLK_GPS 194 +#define ACLK_LCDC0 195 +#define ACLK_LCDC1 196 +#define ACLK_GPU 197 +#define ACLK_SMC 198 +#define ACLK_CIF1 199 +#define ACLK_IPP 200 +#define ACLK_RGA 201 +#define ACLK_CIF0 202 +#define ACLK_CPU 203 +#define ACLK_PERI 204 +#define ACLK_VEPU 205 +#define ACLK_VDPU 206 + +/* pclk gates */ +#define PCLK_GRF 320 +#define PCLK_PMU 321 +#define PCLK_TIMER0 322 +#define PCLK_TIMER1 323 +#define PCLK_TIMER2 324 +#define PCLK_TIMER3 325 +#define PCLK_PWM01 326 +#define PCLK_PWM23 327 +#define PCLK_SPI0 328 +#define PCLK_SPI1 329 +#define PCLK_SARADC 330 +#define PCLK_WDT 331 +#define PCLK_UART0 332 +#define PCLK_UART1 333 +#define PCLK_UART2 334 +#define PCLK_UART3 335 +#define PCLK_I2C0 336 +#define PCLK_I2C1 337 +#define PCLK_I2C2 338 +#define PCLK_I2C3 339 +#define PCLK_I2C4 340 +#define PCLK_GPIO0 341 +#define PCLK_GPIO1 342 +#define PCLK_GPIO2 343 +#define PCLK_GPIO3 344 +#define PCLK_GPIO4 345 +#define PCLK_GPIO6 346 +#define PCLK_EFUSE 347 +#define PCLK_TZPC 348 +#define PCLK_TSADC 349 +#define PCLK_CPU 350 +#define PCLK_PERI 351 +#define PCLK_DDRUPCTL 352 +#define PCLK_PUBL 353 + +/* hclk gates */ +#define HCLK_SDMMC 448 +#define HCLK_SDIO 449 +#define HCLK_EMMC 450 +#define HCLK_OTG0 451 +#define HCLK_EMAC 452 +#define HCLK_SPDIF 453 +#define HCLK_I2S0 454 +#define HCLK_I2S1 455 +#define HCLK_I2S2 456 +#define HCLK_OTG1 457 +#define HCLK_HSIC 458 +#define HCLK_HSADC 459 +#define HCLK_PIDF 460 +#define HCLK_LCDC0 461 +#define HCLK_LCDC1 462 +#define HCLK_ROM 463 +#define HCLK_CIF0 464 +#define HCLK_IPP 465 +#define HCLK_RGA 466 +#define HCLK_NANDC0 467 +#define HCLK_CPU 468 +#define HCLK_PERI 469 +#define HCLK_CIF1 470 +#define HCLK_VEPU 471 +#define HCLK_VDPU 472 + +#define CLK_NR_CLKS (HCLK_VDPU + 1) + +/* soft-reset indices */ +#define SRST_MCORE 2 +#define SRST_CORE0 3 +#define SRST_CORE1 4 +#define SRST_MCORE_DBG 7 +#define SRST_CORE0_DBG 8 +#define SRST_CORE1_DBG 9 +#define SRST_CORE0_WDT 12 +#define SRST_CORE1_WDT 13 +#define SRST_STRC_SYS 14 +#define SRST_L2C 15 + +#define SRST_CPU_AHB 17 +#define SRST_AHB2APB 19 +#define SRST_DMA1 20 +#define SRST_INTMEM 21 +#define SRST_ROM 22 +#define SRST_SPDIF 26 +#define SRST_TIMER0 27 +#define SRST_TIMER1 28 +#define SRST_EFUSE 30 + +#define SRST_GPIO0 32 +#define SRST_GPIO1 33 +#define SRST_GPIO2 34 +#define SRST_GPIO3 35 + +#define SRST_UART0 39 +#define SRST_UART1 40 +#define SRST_UART2 41 +#define SRST_UART3 42 +#define SRST_I2C0 43 +#define SRST_I2C1 44 +#define SRST_I2C2 45 +#define SRST_I2C3 46 +#define SRST_I2C4 47 + +#define SRST_PWM0 48 +#define SRST_PWM1 49 +#define SRST_DAP_PO 50 +#define SRST_DAP 51 +#define SRST_DAP_SYS 52 +#define SRST_TPIU_ATB 53 +#define SRST_PMU_APB 54 +#define SRST_GRF 55 +#define SRST_PMU 56 +#define SRST_PERI_AXI 57 +#define SRST_PERI_AHB 58 +#define SRST_PERI_APB 59 +#define SRST_PERI_NIU 60 +#define SRST_CPU_PERI 61 +#define SRST_EMEM_PERI 62 +#define SRST_USB_PERI 63 + +#define SRST_DMA2 64 +#define SRST_SMC 65 +#define SRST_MAC 66 +#define SRST_NANC0 68 +#define SRST_USBOTG0 69 +#define SRST_USBPHY0 70 +#define SRST_OTGC0 71 +#define SRST_USBOTG1 72 +#define SRST_USBPHY1 73 +#define SRST_OTGC1 74 +#define SRST_HSADC 76 +#define SRST_PIDFILTER 77 +#define SRST_DDR_MSCH 79 + +#define SRST_TZPC 80 +#define SRST_SDMMC 81 +#define SRST_SDIO 82 +#define SRST_EMMC 83 +#define SRST_SPI0 84 +#define SRST_SPI1 85 +#define SRST_WDT 86 +#define SRST_SARADC 87 +#define SRST_DDRPHY 88 +#define SRST_DDRPHY_APB 89 +#define SRST_DDRCTL 90 +#define SRST_DDRCTL_APB 91 +#define SRST_DDRPUB 93 + +#define SRST_VIO0_AXI 98 +#define SRST_VIO0_AHB 99 +#define SRST_LCDC0_AXI 100 +#define SRST_LCDC0_AHB 101 +#define SRST_LCDC0_DCLK 102 +#define SRST_LCDC1_AXI 103 +#define SRST_LCDC1_AHB 104 +#define SRST_LCDC1_DCLK 105 +#define SRST_IPP_AXI 106 +#define SRST_IPP_AHB 107 +#define SRST_RGA_AXI 108 +#define SRST_RGA_AHB 109 +#define SRST_CIF0 110 + +#define SRST_VCODEC_AXI 112 +#define SRST_VCODEC_AHB 113 +#define SRST_VIO1_AXI 114 +#define SRST_VCODEC_CPU 115 +#define SRST_VCODEC_NIU 116 +#define SRST_GPU 120 +#define SRST_GPU_NIU 122 +#define SRST_TFUN_ATB 125 +#define SRST_TFUN_APB 126 +#define SRST_CTI4_APB 127 + +#define SRST_TPIU_APB 128 +#define SRST_TRACE 129 +#define SRST_CORE_DBG 130 +#define SRST_DBG_APB 131 +#define SRST_CTI0 132 +#define SRST_CTI0_APB 133 +#define SRST_CTI1 134 +#define SRST_CTI1_APB 135 +#define SRST_PTM_CORE0 136 +#define SRST_PTM_CORE1 137 +#define SRST_PTM0 138 +#define SRST_PTM0_ATB 139 +#define SRST_PTM1 140 +#define SRST_PTM1_ATB 141 +#define SRST_CTM 142 +#define SRST_TS 143 + +#endif diff --git a/include/dt-bindings/clock/rk3188-cru.h b/include/dt-bindings/clock/rk3188-cru.h new file mode 100644 index 000000000..9f2e631f2 --- /dev/null +++ b/include/dt-bindings/clock/rk3188-cru.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2014 MundoReader S.L. + * Author: Heiko Stuebner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3188_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RK3188_H + +#include + +/* soft-reset indices */ +#define SRST_PTM_CORE2 0 +#define SRST_PTM_CORE3 1 +#define SRST_CORE2 5 +#define SRST_CORE3 6 +#define SRST_CORE2_DBG 10 +#define SRST_CORE3_DBG 11 + +#define SRST_TIMER2 16 +#define SRST_TIMER4 23 +#define SRST_I2S0 24 +#define SRST_TIMER5 25 +#define SRST_TIMER3 29 +#define SRST_TIMER6 31 + +#define SRST_PTM3 36 +#define SRST_PTM3_ATB 37 + +#define SRST_GPS 67 +#define SRST_HSICPHY 75 +#define SRST_TIMER 78 + +#define SRST_PTM2 92 +#define SRST_CORE2_WDT 94 +#define SRST_CORE3_WDT 95 + +#define SRST_PTM2_ATB 111 + +#define SRST_HSIC 117 +#define SRST_CTI2 118 +#define SRST_CTI2_APB 119 +#define SRST_GPU_BRIDGE 121 +#define SRST_CTI3 123 +#define SRST_CTI3_APB 124 + +#endif diff --git a/include/dt-bindings/clock/rk3228-cru.h b/include/dt-bindings/clock/rk3228-cru.h new file mode 100644 index 000000000..55655ab0a --- /dev/null +++ b/include/dt-bindings/clock/rk3228-cru.h @@ -0,0 +1,295 @@ +/* + * Copyright (c) 2015 Rockchip Electronics Co. Ltd. + * Author: Jeffy Chen + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3228_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RK3228_H + +/* core clocks */ +#define PLL_APLL 1 +#define PLL_DPLL 2 +#define PLL_CPLL 3 +#define PLL_GPLL 4 +#define ARMCLK 5 + +/* sclk gates (special clocks) */ +#define SCLK_SPI0 65 +#define SCLK_NANDC 67 +#define SCLK_SDMMC 68 +#define SCLK_SDIO 69 +#define SCLK_EMMC 71 +#define SCLK_TSADC 72 +#define SCLK_UART0 77 +#define SCLK_UART1 78 +#define SCLK_UART2 79 +#define SCLK_I2S0 80 +#define SCLK_I2S1 81 +#define SCLK_I2S2 82 +#define SCLK_SPDIF 83 +#define SCLK_TIMER0 85 +#define SCLK_TIMER1 86 +#define SCLK_TIMER2 87 +#define SCLK_TIMER3 88 +#define SCLK_TIMER4 89 +#define SCLK_TIMER5 90 +#define SCLK_I2S_OUT 113 +#define SCLK_SDMMC_DRV 114 +#define SCLK_SDIO_DRV 115 +#define SCLK_EMMC_DRV 117 +#define SCLK_SDMMC_SAMPLE 118 +#define SCLK_SDIO_SAMPLE 119 +#define SCLK_SDIO_SRC 120 +#define SCLK_EMMC_SAMPLE 121 +#define SCLK_VOP 122 +#define SCLK_HDMI_HDCP 123 +#define SCLK_MAC_SRC 124 +#define SCLK_MAC_EXTCLK 125 +#define SCLK_MAC 126 +#define SCLK_MAC_REFOUT 127 +#define SCLK_MAC_REF 128 +#define SCLK_MAC_RX 129 +#define SCLK_MAC_TX 130 +#define SCLK_MAC_PHY 131 +#define SCLK_MAC_OUT 132 +#define SCLK_VDEC_CABAC 133 +#define SCLK_VDEC_CORE 134 +#define SCLK_RGA 135 +#define SCLK_HDCP 136 +#define SCLK_HDMI_CEC 137 +#define SCLK_CRYPTO 138 +#define SCLK_TSP 139 +#define SCLK_HSADC 140 +#define SCLK_WIFI 141 +#define SCLK_OTGPHY0 142 +#define SCLK_OTGPHY1 143 + +/* dclk gates */ +#define DCLK_VOP 190 +#define DCLK_HDMI_PHY 191 + +/* aclk gates */ +#define ACLK_DMAC 194 +#define ACLK_CPU 195 +#define ACLK_VPU_PRE 196 +#define ACLK_RKVDEC_PRE 197 +#define ACLK_RGA_PRE 198 +#define ACLK_IEP_PRE 199 +#define ACLK_HDCP_PRE 200 +#define ACLK_VOP_PRE 201 +#define ACLK_VPU 202 +#define ACLK_RKVDEC 203 +#define ACLK_IEP 204 +#define ACLK_RGA 205 +#define ACLK_HDCP 206 +#define ACLK_PERI 210 +#define ACLK_VOP 211 +#define ACLK_GMAC 212 +#define ACLK_GPU 213 + +/* pclk gates */ +#define PCLK_GPIO0 320 +#define PCLK_GPIO1 321 +#define PCLK_GPIO2 322 +#define PCLK_GPIO3 323 +#define PCLK_VIO_H2P 324 +#define PCLK_HDCP 325 +#define PCLK_EFUSE_1024 326 +#define PCLK_EFUSE_256 327 +#define PCLK_GRF 329 +#define PCLK_I2C0 332 +#define PCLK_I2C1 333 +#define PCLK_I2C2 334 +#define PCLK_I2C3 335 +#define PCLK_SPI0 338 +#define PCLK_UART0 341 +#define PCLK_UART1 342 +#define PCLK_UART2 343 +#define PCLK_TSADC 344 +#define PCLK_PWM 350 +#define PCLK_TIMER 353 +#define PCLK_CPU 354 +#define PCLK_PERI 363 +#define PCLK_HDMI_CTRL 364 +#define PCLK_HDMI_PHY 365 +#define PCLK_GMAC 367 + +/* hclk gates */ +#define HCLK_I2S0_8CH 442 +#define HCLK_I2S1_8CH 443 +#define HCLK_I2S2_2CH 444 +#define HCLK_SPDIF_8CH 445 +#define HCLK_VOP 452 +#define HCLK_NANDC 453 +#define HCLK_SDMMC 456 +#define HCLK_SDIO 457 +#define HCLK_EMMC 459 +#define HCLK_CPU 460 +#define HCLK_VPU_PRE 461 +#define HCLK_RKVDEC_PRE 462 +#define HCLK_VIO_PRE 463 +#define HCLK_VPU 464 +#define HCLK_RKVDEC 465 +#define HCLK_VIO 466 +#define HCLK_RGA 467 +#define HCLK_IEP 468 +#define HCLK_VIO_H2P 469 +#define HCLK_HDCP_MMU 470 +#define HCLK_HOST0 471 +#define HCLK_HOST1 472 +#define HCLK_HOST2 473 +#define HCLK_OTG 474 +#define HCLK_TSP 475 +#define HCLK_M_CRYPTO 476 +#define HCLK_S_CRYPTO 477 +#define HCLK_PERI 478 + +#define CLK_NR_CLKS (HCLK_PERI + 1) + +/* soft-reset indices */ +#define SRST_CORE0_PO 0 +#define SRST_CORE1_PO 1 +#define SRST_CORE2_PO 2 +#define SRST_CORE3_PO 3 +#define SRST_CORE0 4 +#define SRST_CORE1 5 +#define SRST_CORE2 6 +#define SRST_CORE3 7 +#define SRST_CORE0_DBG 8 +#define SRST_CORE1_DBG 9 +#define SRST_CORE2_DBG 10 +#define SRST_CORE3_DBG 11 +#define SRST_TOPDBG 12 +#define SRST_ACLK_CORE 13 +#define SRST_NOC 14 +#define SRST_L2C 15 + +#define SRST_CPUSYS_H 18 +#define SRST_BUSSYS_H 19 +#define SRST_SPDIF 20 +#define SRST_INTMEM 21 +#define SRST_ROM 22 +#define SRST_OTG_ADP 23 +#define SRST_I2S0 24 +#define SRST_I2S1 25 +#define SRST_I2S2 26 +#define SRST_ACODEC_P 27 +#define SRST_DFIMON 28 +#define SRST_MSCH 29 +#define SRST_EFUSE1024 30 +#define SRST_EFUSE256 31 + +#define SRST_GPIO0 32 +#define SRST_GPIO1 33 +#define SRST_GPIO2 34 +#define SRST_GPIO3 35 +#define SRST_PERIPH_NOC_A 36 +#define SRST_PERIPH_NOC_BUS_H 37 +#define SRST_PERIPH_NOC_P 38 +#define SRST_UART0 39 +#define SRST_UART1 40 +#define SRST_UART2 41 +#define SRST_PHYNOC 42 +#define SRST_I2C0 43 +#define SRST_I2C1 44 +#define SRST_I2C2 45 +#define SRST_I2C3 46 + +#define SRST_PWM 48 +#define SRST_A53_GIC 49 +#define SRST_DAP 51 +#define SRST_DAP_NOC 52 +#define SRST_CRYPTO 53 +#define SRST_SGRF 54 +#define SRST_GRF 55 +#define SRST_GMAC 56 +#define SRST_PERIPH_NOC_H 58 +#define SRST_MACPHY 63 + +#define SRST_DMA 64 +#define SRST_NANDC 68 +#define SRST_USBOTG 69 +#define SRST_OTGC 70 +#define SRST_USBHOST0 71 +#define SRST_HOST_CTRL0 72 +#define SRST_USBHOST1 73 +#define SRST_HOST_CTRL1 74 +#define SRST_USBHOST2 75 +#define SRST_HOST_CTRL2 76 +#define SRST_USBPOR0 77 +#define SRST_USBPOR1 78 +#define SRST_DDRMSCH 79 + +#define SRST_SMART_CARD 80 +#define SRST_SDMMC 81 +#define SRST_SDIO 82 +#define SRST_EMMC 83 +#define SRST_SPI 84 +#define SRST_TSP_H 85 +#define SRST_TSP 86 +#define SRST_TSADC 87 +#define SRST_DDRPHY 88 +#define SRST_DDRPHY_P 89 +#define SRST_DDRCTRL 90 +#define SRST_DDRCTRL_P 91 +#define SRST_HOST0_ECHI 92 +#define SRST_HOST1_ECHI 93 +#define SRST_HOST2_ECHI 94 +#define SRST_VOP_NOC_A 95 + +#define SRST_HDMI_P 96 +#define SRST_VIO_ARBI_H 97 +#define SRST_IEP_NOC_A 98 +#define SRST_VIO_NOC_H 99 +#define SRST_VOP_A 100 +#define SRST_VOP_H 101 +#define SRST_VOP_D 102 +#define SRST_UTMI0 103 +#define SRST_UTMI1 104 +#define SRST_UTMI2 105 +#define SRST_UTMI3 106 +#define SRST_RGA 107 +#define SRST_RGA_NOC_A 108 +#define SRST_RGA_A 109 +#define SRST_RGA_H 110 +#define SRST_HDCP_A 111 + +#define SRST_VPU_A 112 +#define SRST_VPU_H 113 +#define SRST_VPU_NOC_A 116 +#define SRST_VPU_NOC_H 117 +#define SRST_RKVDEC_A 118 +#define SRST_RKVDEC_NOC_A 119 +#define SRST_RKVDEC_H 120 +#define SRST_RKVDEC_NOC_H 121 +#define SRST_RKVDEC_CORE 122 +#define SRST_RKVDEC_CABAC 123 +#define SRST_IEP_A 124 +#define SRST_IEP_H 125 +#define SRST_GPU_A 126 +#define SRST_GPU_NOC_A 127 + +#define SRST_CORE_DBG 128 +#define SRST_DBG_P 129 +#define SRST_TIMER0 130 +#define SRST_TIMER1 131 +#define SRST_TIMER2 132 +#define SRST_TIMER3 133 +#define SRST_TIMER4 134 +#define SRST_TIMER5 135 +#define SRST_VIO_H2P 136 +#define SRST_HDMIPHY 139 +#define SRST_VDAC 140 +#define SRST_TIMER_6CH_P 141 + +#endif diff --git a/include/dt-bindings/clock/rk3288-cru.h b/include/dt-bindings/clock/rk3288-cru.h new file mode 100644 index 000000000..d7b6c83ea --- /dev/null +++ b/include/dt-bindings/clock/rk3288-cru.h @@ -0,0 +1,389 @@ +/* + * Copyright (c) 2014 MundoReader S.L. + * Author: Heiko Stuebner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3288_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RK3288_H + +/* core clocks */ +#define PLL_APLL 1 +#define PLL_DPLL 2 +#define PLL_CPLL 3 +#define PLL_GPLL 4 +#define PLL_NPLL 5 +#define ARMCLK 6 + +/* sclk gates (special clocks) */ +#define SCLK_GPU 64 +#define SCLK_SPI0 65 +#define SCLK_SPI1 66 +#define SCLK_SPI2 67 +#define SCLK_SDMMC 68 +#define SCLK_SDIO0 69 +#define SCLK_SDIO1 70 +#define SCLK_EMMC 71 +#define SCLK_TSADC 72 +#define SCLK_SARADC 73 +#define SCLK_PS2C 74 +#define SCLK_NANDC0 75 +#define SCLK_NANDC1 76 +#define SCLK_UART0 77 +#define SCLK_UART1 78 +#define SCLK_UART2 79 +#define SCLK_UART3 80 +#define SCLK_UART4 81 +#define SCLK_I2S0 82 +#define SCLK_SPDIF 83 +#define SCLK_SPDIF8CH 84 +#define SCLK_TIMER0 85 +#define SCLK_TIMER1 86 +#define SCLK_TIMER2 87 +#define SCLK_TIMER3 88 +#define SCLK_TIMER4 89 +#define SCLK_TIMER5 90 +#define SCLK_TIMER6 91 +#define SCLK_HSADC 92 +#define SCLK_OTGPHY0 93 +#define SCLK_OTGPHY1 94 +#define SCLK_OTGPHY2 95 +#define SCLK_OTG_ADP 96 +#define SCLK_HSICPHY480M 97 +#define SCLK_HSICPHY12M 98 +#define SCLK_MACREF 99 +#define SCLK_LCDC_PWM0 100 +#define SCLK_LCDC_PWM1 101 +#define SCLK_MAC_RX 102 +#define SCLK_MAC_TX 103 +#define SCLK_EDP_24M 104 +#define SCLK_EDP 105 +#define SCLK_RGA 106 +#define SCLK_ISP 107 +#define SCLK_ISP_JPE 108 +#define SCLK_HDMI_HDCP 109 +#define SCLK_HDMI_CEC 110 +#define SCLK_HEVC_CABAC 111 +#define SCLK_HEVC_CORE 112 +#define SCLK_I2S0_OUT 113 +#define SCLK_SDMMC_DRV 114 +#define SCLK_SDIO0_DRV 115 +#define SCLK_SDIO1_DRV 116 +#define SCLK_EMMC_DRV 117 +#define SCLK_SDMMC_SAMPLE 118 +#define SCLK_SDIO0_SAMPLE 119 +#define SCLK_SDIO1_SAMPLE 120 +#define SCLK_EMMC_SAMPLE 121 +#define SCLK_USBPHY480M_SRC 122 +#define SCLK_PVTM_CORE 123 +#define SCLK_PVTM_GPU 124 +#define SCLK_CRYPTO 125 +#define SCLK_MIPIDSI_24M 126 +#define SCLK_VIP_OUT 127 + +#define SCLK_MAC 151 +#define SCLK_MACREF_OUT 152 + +#define DCLK_VOP0 190 +#define DCLK_VOP1 191 + +/* aclk gates */ +#define ACLK_GPU 192 +#define ACLK_DMAC1 193 +#define ACLK_DMAC2 194 +#define ACLK_MMU 195 +#define ACLK_GMAC 196 +#define ACLK_VOP0 197 +#define ACLK_VOP1 198 +#define ACLK_CRYPTO 199 +#define ACLK_RGA 200 +#define ACLK_RGA_NIU 201 +#define ACLK_IEP 202 +#define ACLK_VIO0_NIU 203 +#define ACLK_VIP 204 +#define ACLK_ISP 205 +#define ACLK_VIO1_NIU 206 +#define ACLK_HEVC 207 +#define ACLK_VCODEC 208 +#define ACLK_CPU 209 +#define ACLK_PERI 210 + +/* pclk gates */ +#define PCLK_GPIO0 320 +#define PCLK_GPIO1 321 +#define PCLK_GPIO2 322 +#define PCLK_GPIO3 323 +#define PCLK_GPIO4 324 +#define PCLK_GPIO5 325 +#define PCLK_GPIO6 326 +#define PCLK_GPIO7 327 +#define PCLK_GPIO8 328 +#define PCLK_GRF 329 +#define PCLK_SGRF 330 +#define PCLK_PMU 331 +#define PCLK_I2C0 332 +#define PCLK_I2C1 333 +#define PCLK_I2C2 334 +#define PCLK_I2C3 335 +#define PCLK_I2C4 336 +#define PCLK_I2C5 337 +#define PCLK_SPI0 338 +#define PCLK_SPI1 339 +#define PCLK_SPI2 340 +#define PCLK_UART0 341 +#define PCLK_UART1 342 +#define PCLK_UART2 343 +#define PCLK_UART3 344 +#define PCLK_UART4 345 +#define PCLK_TSADC 346 +#define PCLK_SARADC 347 +#define PCLK_SIM 348 +#define PCLK_GMAC 349 +#define PCLK_PWM 350 +#define PCLK_RKPWM 351 +#define PCLK_PS2C 352 +#define PCLK_TIMER 353 +#define PCLK_TZPC 354 +#define PCLK_EDP_CTRL 355 +#define PCLK_MIPI_DSI0 356 +#define PCLK_MIPI_DSI1 357 +#define PCLK_MIPI_CSI 358 +#define PCLK_LVDS_PHY 359 +#define PCLK_HDMI_CTRL 360 +#define PCLK_VIO2_H2P 361 +#define PCLK_CPU 362 +#define PCLK_PERI 363 +#define PCLK_DDRUPCTL0 364 +#define PCLK_PUBL0 365 +#define PCLK_DDRUPCTL1 366 +#define PCLK_PUBL1 367 +#define PCLK_WDT 368 +#define PCLK_EFUSE256 369 +#define PCLK_EFUSE1024 370 +#define PCLK_ISP_IN 371 + +/* hclk gates */ +#define HCLK_GPS 448 +#define HCLK_OTG0 449 +#define HCLK_USBHOST0 450 +#define HCLK_USBHOST1 451 +#define HCLK_HSIC 452 +#define HCLK_NANDC0 453 +#define HCLK_NANDC1 454 +#define HCLK_TSP 455 +#define HCLK_SDMMC 456 +#define HCLK_SDIO0 457 +#define HCLK_SDIO1 458 +#define HCLK_EMMC 459 +#define HCLK_HSADC 460 +#define HCLK_CRYPTO 461 +#define HCLK_I2S0 462 +#define HCLK_SPDIF 463 +#define HCLK_SPDIF8CH 464 +#define HCLK_VOP0 465 +#define HCLK_VOP1 466 +#define HCLK_ROM 467 +#define HCLK_IEP 468 +#define HCLK_ISP 469 +#define HCLK_RGA 470 +#define HCLK_VIO_AHB_ARBI 471 +#define HCLK_VIO_NIU 472 +#define HCLK_VIP 473 +#define HCLK_VIO2_H2P 474 +#define HCLK_HEVC 475 +#define HCLK_VCODEC 476 +#define HCLK_CPU 477 +#define HCLK_PERI 478 + +#define CLK_NR_CLKS (HCLK_PERI + 1) + +/* soft-reset indices */ +#define SRST_CORE0 0 +#define SRST_CORE1 1 +#define SRST_CORE2 2 +#define SRST_CORE3 3 +#define SRST_CORE0_PO 4 +#define SRST_CORE1_PO 5 +#define SRST_CORE2_PO 6 +#define SRST_CORE3_PO 7 +#define SRST_PDCORE_STRSYS 8 +#define SRST_PDBUS_STRSYS 9 +#define SRST_L2C 10 +#define SRST_TOPDBG 11 +#define SRST_CORE0_DBG 12 +#define SRST_CORE1_DBG 13 +#define SRST_CORE2_DBG 14 +#define SRST_CORE3_DBG 15 + +#define SRST_PDBUG_AHB_ARBITOR 16 +#define SRST_EFUSE256 17 +#define SRST_DMAC1 18 +#define SRST_INTMEM 19 +#define SRST_ROM 20 +#define SRST_SPDIF8CH 21 +#define SRST_TIMER 22 +#define SRST_I2S0 23 +#define SRST_SPDIF 24 +#define SRST_TIMER0 25 +#define SRST_TIMER1 26 +#define SRST_TIMER2 27 +#define SRST_TIMER3 28 +#define SRST_TIMER4 29 +#define SRST_TIMER5 30 +#define SRST_EFUSE 31 + +#define SRST_GPIO0 32 +#define SRST_GPIO1 33 +#define SRST_GPIO2 34 +#define SRST_GPIO3 35 +#define SRST_GPIO4 36 +#define SRST_GPIO5 37 +#define SRST_GPIO6 38 +#define SRST_GPIO7 39 +#define SRST_GPIO8 40 +#define SRST_I2C0 42 +#define SRST_I2C1 43 +#define SRST_I2C2 44 +#define SRST_I2C3 45 +#define SRST_I2C4 46 +#define SRST_I2C5 47 + +#define SRST_DWPWM 48 +#define SRST_MMC_PERI 49 +#define SRST_PERIPH_MMU 50 +#define SRST_DAP 51 +#define SRST_DAP_SYS 52 +#define SRST_TPIU 53 +#define SRST_PMU_APB 54 +#define SRST_GRF 55 +#define SRST_PMU 56 +#define SRST_PERIPH_AXI 57 +#define SRST_PERIPH_AHB 58 +#define SRST_PERIPH_APB 59 +#define SRST_PERIPH_NIU 60 +#define SRST_PDPERI_AHB_ARBI 61 +#define SRST_EMEM 62 +#define SRST_USB_PERI 63 + +#define SRST_DMAC2 64 +#define SRST_MAC 66 +#define SRST_GPS 67 +#define SRST_RKPWM 69 +#define SRST_CCP 71 +#define SRST_USBHOST0 72 +#define SRST_HSIC 73 +#define SRST_HSIC_AUX 74 +#define SRST_HSIC_PHY 75 +#define SRST_HSADC 76 +#define SRST_NANDC0 77 +#define SRST_NANDC1 78 + +#define SRST_TZPC 80 +#define SRST_SPI0 83 +#define SRST_SPI1 84 +#define SRST_SPI2 85 +#define SRST_SARADC 87 +#define SRST_PDALIVE_NIU 88 +#define SRST_PDPMU_INTMEM 89 +#define SRST_PDPMU_NIU 90 +#define SRST_SGRF 91 + +#define SRST_VIO_ARBI 96 +#define SRST_RGA_NIU 97 +#define SRST_VIO0_NIU_AXI 98 +#define SRST_VIO_NIU_AHB 99 +#define SRST_LCDC0_AXI 100 +#define SRST_LCDC0_AHB 101 +#define SRST_LCDC0_DCLK 102 +#define SRST_VIO1_NIU_AXI 103 +#define SRST_VIP 104 +#define SRST_RGA_CORE 105 +#define SRST_IEP_AXI 106 +#define SRST_IEP_AHB 107 +#define SRST_RGA_AXI 108 +#define SRST_RGA_AHB 109 +#define SRST_ISP 110 +#define SRST_EDP 111 + +#define SRST_VCODEC_AXI 112 +#define SRST_VCODEC_AHB 113 +#define SRST_VIO_H2P 114 +#define SRST_MIPIDSI0 115 +#define SRST_MIPIDSI1 116 +#define SRST_MIPICSI 117 +#define SRST_LVDS_PHY 118 +#define SRST_LVDS_CON 119 +#define SRST_GPU 120 +#define SRST_HDMI 121 +#define SRST_CORE_PVTM 124 +#define SRST_GPU_PVTM 125 + +#define SRST_MMC0 128 +#define SRST_SDIO0 129 +#define SRST_SDIO1 130 +#define SRST_EMMC 131 +#define SRST_USBOTG_AHB 132 +#define SRST_USBOTG_PHY 133 +#define SRST_USBOTG_CON 134 +#define SRST_USBHOST0_AHB 135 +#define SRST_USBHOST0_PHY 136 +#define SRST_USBHOST0_CON 137 +#define SRST_USBHOST1_AHB 138 +#define SRST_USBHOST1_PHY 139 +#define SRST_USBHOST1_CON 140 +#define SRST_USB_ADP 141 +#define SRST_ACC_EFUSE 142 + +#define SRST_CORESIGHT 144 +#define SRST_PD_CORE_AHB_NOC 145 +#define SRST_PD_CORE_APB_NOC 146 +#define SRST_PD_CORE_MP_AXI 147 +#define SRST_GIC 148 +#define SRST_LCDC_PWM0 149 +#define SRST_LCDC_PWM1 150 +#define SRST_VIO0_H2P_BRG 151 +#define SRST_VIO1_H2P_BRG 152 +#define SRST_RGA_H2P_BRG 153 +#define SRST_HEVC 154 +#define SRST_TSADC 159 + +#define SRST_DDRPHY0 160 +#define SRST_DDRPHY0_APB 161 +#define SRST_DDRCTRL0 162 +#define SRST_DDRCTRL0_APB 163 +#define SRST_DDRPHY0_CTRL 164 +#define SRST_DDRPHY1 165 +#define SRST_DDRPHY1_APB 166 +#define SRST_DDRCTRL1 167 +#define SRST_DDRCTRL1_APB 168 +#define SRST_DDRPHY1_CTRL 169 +#define SRST_DDRMSCH0 170 +#define SRST_DDRMSCH1 171 +#define SRST_CRYPTO 174 +#define SRST_C2C_HOST 175 + +#define SRST_LCDC1_AXI 176 +#define SRST_LCDC1_AHB 177 +#define SRST_LCDC1_DCLK 178 +#define SRST_UART0 179 +#define SRST_UART1 180 +#define SRST_UART2 181 +#define SRST_UART3 182 +#define SRST_UART4 183 +#define SRST_SIMC 186 +#define SRST_PS2C 187 +#define SRST_TSP 188 +#define SRST_TSP_CLKIN0 189 +#define SRST_TSP_CLKIN1 190 +#define SRST_TSP_27M 191 + +#endif diff --git a/include/dt-bindings/clock/rk3328-cru.h b/include/dt-bindings/clock/rk3328-cru.h new file mode 100644 index 000000000..9d5f79946 --- /dev/null +++ b/include/dt-bindings/clock/rk3328-cru.h @@ -0,0 +1,400 @@ +/* + * Copyright (c) 2016 Rockchip Electronics Co. Ltd. + * Author: Elaine + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3328_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RK3328_H + +/* core clocks */ +#define PLL_APLL 1 +#define PLL_DPLL 2 +#define PLL_CPLL 3 +#define PLL_GPLL 4 +#define PLL_NPLL 5 +#define ARMCLK 6 + +/* sclk gates (special clocks) */ +#define SCLK_RTC32K 30 +#define SCLK_SDMMC_EXT 31 +#define SCLK_SPI 32 +#define SCLK_SDMMC 33 +#define SCLK_SDIO 34 +#define SCLK_EMMC 35 +#define SCLK_TSADC 36 +#define SCLK_SARADC 37 +#define SCLK_UART0 38 +#define SCLK_UART1 39 +#define SCLK_UART2 40 +#define SCLK_I2S0 41 +#define SCLK_I2S1 42 +#define SCLK_I2S2 43 +#define SCLK_I2S1_OUT 44 +#define SCLK_I2S2_OUT 45 +#define SCLK_SPDIF 46 +#define SCLK_TIMER0 47 +#define SCLK_TIMER1 48 +#define SCLK_TIMER2 49 +#define SCLK_TIMER3 50 +#define SCLK_TIMER4 51 +#define SCLK_TIMER5 52 +#define SCLK_WIFI 53 +#define SCLK_CIF_OUT 54 +#define SCLK_I2C0 55 +#define SCLK_I2C1 56 +#define SCLK_I2C2 57 +#define SCLK_I2C3 58 +#define SCLK_CRYPTO 59 +#define SCLK_PWM 60 +#define SCLK_PDM 61 +#define SCLK_EFUSE 62 +#define SCLK_OTP 63 +#define SCLK_DDRCLK 64 +#define SCLK_VDEC_CABAC 65 +#define SCLK_VDEC_CORE 66 +#define SCLK_VENC_DSP 67 +#define SCLK_VENC_CORE 68 +#define SCLK_RGA 69 +#define SCLK_HDMI_SFC 70 +#define SCLK_HDMI_CEC 71 +#define SCLK_USB3_REF 72 +#define SCLK_USB3_SUSPEND 73 +#define SCLK_SDMMC_DRV 74 +#define SCLK_SDIO_DRV 75 +#define SCLK_EMMC_DRV 76 +#define SCLK_SDMMC_EXT_DRV 77 +#define SCLK_SDMMC_SAMPLE 78 +#define SCLK_SDIO_SAMPLE 79 +#define SCLK_EMMC_SAMPLE 80 +#define SCLK_SDMMC_EXT_SAMPLE 81 +#define SCLK_VOP 82 +#define SCLK_MAC2PHY_RXTX 83 +#define SCLK_MAC2PHY_SRC 84 +#define SCLK_MAC2PHY_REF 85 +#define SCLK_MAC2PHY_OUT 86 +#define SCLK_MAC2IO_RX 87 +#define SCLK_MAC2IO_TX 88 +#define SCLK_MAC2IO_REFOUT 89 +#define SCLK_MAC2IO_REF 90 +#define SCLK_MAC2IO_OUT 91 +#define SCLK_TSP 92 +#define SCLK_HSADC_TSP 93 +#define SCLK_USB3PHY_REF 94 +#define SCLK_REF_USB3OTG 95 +#define SCLK_USB3OTG_REF 96 +#define SCLK_USB3OTG_SUSPEND 97 +#define SCLK_REF_USB3OTG_SRC 98 +#define SCLK_MAC2IO_SRC 99 +#define SCLK_MAC2IO 100 +#define SCLK_MAC2PHY 101 +#define SCLK_MAC2IO_EXT 102 + +/* dclk gates */ +#define DCLK_LCDC 120 +#define DCLK_HDMIPHY 121 +#define HDMIPHY 122 +#define USB480M 123 +#define DCLK_LCDC_SRC 124 + +/* aclk gates */ +#define ACLK_AXISRAM 130 +#define ACLK_VOP_PRE 131 +#define ACLK_USB3OTG 132 +#define ACLK_RGA_PRE 133 +#define ACLK_DMAC 134 +#define ACLK_GPU 135 +#define ACLK_BUS_PRE 136 +#define ACLK_PERI_PRE 137 +#define ACLK_RKVDEC_PRE 138 +#define ACLK_RKVDEC 139 +#define ACLK_RKVENC 140 +#define ACLK_VPU_PRE 141 +#define ACLK_VIO_PRE 142 +#define ACLK_VPU 143 +#define ACLK_VIO 144 +#define ACLK_VOP 145 +#define ACLK_GMAC 146 +#define ACLK_H265 147 +#define ACLK_H264 148 +#define ACLK_MAC2PHY 149 +#define ACLK_MAC2IO 150 +#define ACLK_DCF 151 +#define ACLK_TSP 152 +#define ACLK_PERI 153 +#define ACLK_RGA 154 +#define ACLK_IEP 155 +#define ACLK_CIF 156 +#define ACLK_HDCP 157 + +/* pclk gates */ +#define PCLK_GPIO0 200 +#define PCLK_GPIO1 201 +#define PCLK_GPIO2 202 +#define PCLK_GPIO3 203 +#define PCLK_GRF 204 +#define PCLK_I2C0 205 +#define PCLK_I2C1 206 +#define PCLK_I2C2 207 +#define PCLK_I2C3 208 +#define PCLK_SPI 209 +#define PCLK_UART0 210 +#define PCLK_UART1 211 +#define PCLK_UART2 212 +#define PCLK_TSADC 213 +#define PCLK_PWM 214 +#define PCLK_TIMER 215 +#define PCLK_BUS_PRE 216 +#define PCLK_PERI_PRE 217 +#define PCLK_HDMI_CTRL 218 +#define PCLK_HDMI_PHY 219 +#define PCLK_GMAC 220 +#define PCLK_H265 221 +#define PCLK_MAC2PHY 222 +#define PCLK_MAC2IO 223 +#define PCLK_USB3PHY_OTG 224 +#define PCLK_USB3PHY_PIPE 225 +#define PCLK_USB3_GRF 226 +#define PCLK_USB2_GRF 227 +#define PCLK_HDMIPHY 228 +#define PCLK_DDR 229 +#define PCLK_PERI 230 +#define PCLK_HDMI 231 +#define PCLK_HDCP 232 +#define PCLK_DCF 233 +#define PCLK_SARADC 234 + +/* hclk gates */ +#define HCLK_PERI 308 +#define HCLK_TSP 309 +#define HCLK_GMAC 310 +#define HCLK_I2S0_8CH 311 +#define HCLK_I2S1_8CH 312 +#define HCLK_I2S2_2CH 313 +#define HCLK_SPDIF_8CH 314 +#define HCLK_VOP 315 +#define HCLK_NANDC 316 +#define HCLK_SDMMC 317 +#define HCLK_SDIO 318 +#define HCLK_EMMC 319 +#define HCLK_SDMMC_EXT 320 +#define HCLK_RKVDEC_PRE 321 +#define HCLK_RKVDEC 322 +#define HCLK_RKVENC 323 +#define HCLK_VPU_PRE 324 +#define HCLK_VIO_PRE 325 +#define HCLK_VPU 326 +#define HCLK_BUS_PRE 328 +#define HCLK_PERI_PRE 329 +#define HCLK_H264 330 +#define HCLK_CIF 331 +#define HCLK_OTG_PMU 332 +#define HCLK_OTG 333 +#define HCLK_HOST0 334 +#define HCLK_HOST0_ARB 335 +#define HCLK_CRYPTO_MST 336 +#define HCLK_CRYPTO_SLV 337 +#define HCLK_PDM 338 +#define HCLK_IEP 339 +#define HCLK_RGA 340 +#define HCLK_HDCP 341 + +#define CLK_NR_CLKS (HCLK_HDCP + 1) + +/* soft-reset indices */ +#define SRST_CORE0_PO 0 +#define SRST_CORE1_PO 1 +#define SRST_CORE2_PO 2 +#define SRST_CORE3_PO 3 +#define SRST_CORE0 4 +#define SRST_CORE1 5 +#define SRST_CORE2 6 +#define SRST_CORE3 7 +#define SRST_CORE0_DBG 8 +#define SRST_CORE1_DBG 9 +#define SRST_CORE2_DBG 10 +#define SRST_CORE3_DBG 11 +#define SRST_TOPDBG 12 +#define SRST_CORE_NIU 13 +#define SRST_STRC_A 14 +#define SRST_L2C 15 + +#define SRST_A53_GIC 18 +#define SRST_DAP 19 +#define SRST_PMU_P 21 +#define SRST_EFUSE 22 +#define SRST_BUSSYS_H 23 +#define SRST_BUSSYS_P 24 +#define SRST_SPDIF 25 +#define SRST_INTMEM 26 +#define SRST_ROM 27 +#define SRST_GPIO0 28 +#define SRST_GPIO1 29 +#define SRST_GPIO2 30 +#define SRST_GPIO3 31 + +#define SRST_I2S0 32 +#define SRST_I2S1 33 +#define SRST_I2S2 34 +#define SRST_I2S0_H 35 +#define SRST_I2S1_H 36 +#define SRST_I2S2_H 37 +#define SRST_UART0 38 +#define SRST_UART1 39 +#define SRST_UART2 40 +#define SRST_UART0_P 41 +#define SRST_UART1_P 42 +#define SRST_UART2_P 43 +#define SRST_I2C0 44 +#define SRST_I2C1 45 +#define SRST_I2C2 46 +#define SRST_I2C3 47 + +#define SRST_I2C0_P 48 +#define SRST_I2C1_P 49 +#define SRST_I2C2_P 50 +#define SRST_I2C3_P 51 +#define SRST_EFUSE_SE_P 52 +#define SRST_EFUSE_NS_P 53 +#define SRST_PWM0 54 +#define SRST_PWM0_P 55 +#define SRST_DMA 56 +#define SRST_TSP_A 57 +#define SRST_TSP_H 58 +#define SRST_TSP 59 +#define SRST_TSP_HSADC 60 +#define SRST_DCF_A 61 +#define SRST_DCF_P 62 + +#define SRST_SCR 64 +#define SRST_SPI 65 +#define SRST_TSADC 66 +#define SRST_TSADC_P 67 +#define SRST_CRYPTO 68 +#define SRST_SGRF 69 +#define SRST_GRF 70 +#define SRST_USB_GRF 71 +#define SRST_TIMER_6CH_P 72 +#define SRST_TIMER0 73 +#define SRST_TIMER1 74 +#define SRST_TIMER2 75 +#define SRST_TIMER3 76 +#define SRST_TIMER4 77 +#define SRST_TIMER5 78 +#define SRST_USB3GRF 79 + +#define SRST_PHYNIU 80 +#define SRST_HDMIPHY 81 +#define SRST_VDAC 82 +#define SRST_ACODEC_p 83 +#define SRST_SARADC 85 +#define SRST_SARADC_P 86 +#define SRST_GRF_DDR 87 +#define SRST_DFIMON 88 +#define SRST_MSCH 89 +#define SRST_DDRMSCH 91 +#define SRST_DDRCTRL 92 +#define SRST_DDRCTRL_P 93 +#define SRST_DDRPHY 94 +#define SRST_DDRPHY_P 95 + +#define SRST_GMAC_NIU_A 96 +#define SRST_GMAC_NIU_P 97 +#define SRST_GMAC2PHY_A 98 +#define SRST_GMAC2IO_A 99 +#define SRST_MACPHY 100 +#define SRST_OTP_PHY 101 +#define SRST_GPU_A 102 +#define SRST_GPU_NIU_A 103 +#define SRST_SDMMCEXT 104 +#define SRST_PERIPH_NIU_A 105 +#define SRST_PERIHP_NIU_H 106 +#define SRST_PERIHP_P 107 +#define SRST_PERIPHSYS_H 108 +#define SRST_MMC0 109 +#define SRST_SDIO 110 +#define SRST_EMMC 111 + +#define SRST_USB2OTG_H 112 +#define SRST_USB2OTG 113 +#define SRST_USB2OTG_ADP 114 +#define SRST_USB2HOST_H 115 +#define SRST_USB2HOST_ARB 116 +#define SRST_USB2HOST_AUX 117 +#define SRST_USB2HOST_EHCIPHY 118 +#define SRST_USB2HOST_UTMI 119 +#define SRST_USB3OTG 120 +#define SRST_USBPOR 121 +#define SRST_USB2OTG_UTMI 122 +#define SRST_USB2HOST_PHY_UTMI 123 +#define SRST_USB3OTG_UTMI 124 +#define SRST_USB3PHY_U2 125 +#define SRST_USB3PHY_U3 126 +#define SRST_USB3PHY_PIPE 127 + +#define SRST_VIO_A 128 +#define SRST_VIO_BUS_H 129 +#define SRST_VIO_H2P_H 130 +#define SRST_VIO_ARBI_H 131 +#define SRST_VOP_NIU_A 132 +#define SRST_VOP_A 133 +#define SRST_VOP_H 134 +#define SRST_VOP_D 135 +#define SRST_RGA 136 +#define SRST_RGA_NIU_A 137 +#define SRST_RGA_A 138 +#define SRST_RGA_H 139 +#define SRST_IEP_A 140 +#define SRST_IEP_H 141 +#define SRST_HDMI 142 +#define SRST_HDMI_P 143 + +#define SRST_HDCP_A 144 +#define SRST_HDCP 145 +#define SRST_HDCP_H 146 +#define SRST_CIF_A 147 +#define SRST_CIF_H 148 +#define SRST_CIF_P 149 +#define SRST_OTP_P 150 +#define SRST_OTP_SBPI 151 +#define SRST_OTP_USER 152 +#define SRST_DDRCTRL_A 153 +#define SRST_DDRSTDY_P 154 +#define SRST_DDRSTDY 155 +#define SRST_PDM_H 156 +#define SRST_PDM 157 +#define SRST_USB3PHY_OTG_P 158 +#define SRST_USB3PHY_PIPE_P 159 + +#define SRST_VCODEC_A 160 +#define SRST_VCODEC_NIU_A 161 +#define SRST_VCODEC_H 162 +#define SRST_VCODEC_NIU_H 163 +#define SRST_VDEC_A 164 +#define SRST_VDEC_NIU_A 165 +#define SRST_VDEC_H 166 +#define SRST_VDEC_NIU_H 167 +#define SRST_VDEC_CORE 168 +#define SRST_VDEC_CABAC 169 +#define SRST_DDRPHYDIV 175 + +#define SRST_RKVENC_NIU_A 176 +#define SRST_RKVENC_NIU_H 177 +#define SRST_RKVENC_H265_A 178 +#define SRST_RKVENC_H265_P 179 +#define SRST_RKVENC_H265_CORE 180 +#define SRST_RKVENC_H265_DSP 181 +#define SRST_RKVENC_H264_A 182 +#define SRST_RKVENC_H264_H 183 +#define SRST_RKVENC_INTMEM 184 + +#endif diff --git a/include/dt-bindings/clock/rk3368-cru.h b/include/dt-bindings/clock/rk3368-cru.h new file mode 100644 index 000000000..a0063ed72 --- /dev/null +++ b/include/dt-bindings/clock/rk3368-cru.h @@ -0,0 +1,390 @@ +/* + * Copyright (c) 2015 Heiko Stuebner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3368_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RK3368_H + +/* core clocks */ +#define PLL_APLLB 1 +#define PLL_APLLL 2 +#define PLL_DPLL 3 +#define PLL_CPLL 4 +#define PLL_GPLL 5 +#define PLL_NPLL 6 +#define ARMCLKB 7 +#define ARMCLKL 8 + +/* sclk gates (special clocks) */ +#define SCLK_GPU_CORE 64 +#define SCLK_SPI0 65 +#define SCLK_SPI1 66 +#define SCLK_SPI2 67 +#define SCLK_SDMMC 68 +#define SCLK_SDIO0 69 +#define SCLK_EMMC 71 +#define SCLK_TSADC 72 +#define SCLK_SARADC 73 +#define SCLK_NANDC0 75 +#define SCLK_UART0 77 +#define SCLK_UART1 78 +#define SCLK_UART2 79 +#define SCLK_UART3 80 +#define SCLK_UART4 81 +#define SCLK_I2S_8CH 82 +#define SCLK_SPDIF_8CH 83 +#define SCLK_I2S_2CH 84 +#define SCLK_TIMER00 85 +#define SCLK_TIMER01 86 +#define SCLK_TIMER02 87 +#define SCLK_TIMER03 88 +#define SCLK_TIMER04 89 +#define SCLK_TIMER05 90 +#define SCLK_OTGPHY0 93 +#define SCLK_OTG_ADP 96 +#define SCLK_HSICPHY480M 97 +#define SCLK_HSICPHY12M 98 +#define SCLK_MACREF 99 +#define SCLK_VOP0_PWM 100 +#define SCLK_MAC_RX 102 +#define SCLK_MAC_TX 103 +#define SCLK_EDP_24M 104 +#define SCLK_EDP 105 +#define SCLK_RGA 106 +#define SCLK_ISP 107 +#define SCLK_HDCP 108 +#define SCLK_HDMI_HDCP 109 +#define SCLK_HDMI_CEC 110 +#define SCLK_HEVC_CABAC 111 +#define SCLK_HEVC_CORE 112 +#define SCLK_I2S_8CH_OUT 113 +#define SCLK_SDMMC_DRV 114 +#define SCLK_SDIO0_DRV 115 +#define SCLK_EMMC_DRV 117 +#define SCLK_SDMMC_SAMPLE 118 +#define SCLK_SDIO0_SAMPLE 119 +#define SCLK_EMMC_SAMPLE 121 +#define SCLK_USBPHY480M 122 +#define SCLK_PVTM_CORE 123 +#define SCLK_PVTM_GPU 124 +#define SCLK_PVTM_PMU 125 +#define SCLK_SFC 126 +#define SCLK_MAC 127 +#define SCLK_MACREF_OUT 128 +#define SCLK_TIMER10 133 +#define SCLK_TIMER11 134 +#define SCLK_TIMER12 135 +#define SCLK_TIMER13 136 +#define SCLK_TIMER14 137 +#define SCLK_TIMER15 138 + +#define DCLK_VOP 190 +#define MCLK_CRYPTO 191 + +/* aclk gates */ +#define ACLK_GPU_MEM 192 +#define ACLK_GPU_CFG 193 +#define ACLK_DMAC_BUS 194 +#define ACLK_DMAC_PERI 195 +#define ACLK_PERI_MMU 196 +#define ACLK_GMAC 197 +#define ACLK_VOP 198 +#define ACLK_VOP_IEP 199 +#define ACLK_RGA 200 +#define ACLK_HDCP 201 +#define ACLK_IEP 202 +#define ACLK_VIO0_NOC 203 +#define ACLK_VIP 204 +#define ACLK_ISP 205 +#define ACLK_VIO1_NOC 206 +#define ACLK_VIDEO 208 +#define ACLK_BUS 209 +#define ACLK_PERI 210 + +/* pclk gates */ +#define PCLK_GPIO0 320 +#define PCLK_GPIO1 321 +#define PCLK_GPIO2 322 +#define PCLK_GPIO3 323 +#define PCLK_PMUGRF 324 +#define PCLK_MAILBOX 325 +#define PCLK_GRF 329 +#define PCLK_SGRF 330 +#define PCLK_PMU 331 +#define PCLK_I2C0 332 +#define PCLK_I2C1 333 +#define PCLK_I2C2 334 +#define PCLK_I2C3 335 +#define PCLK_I2C4 336 +#define PCLK_I2C5 337 +#define PCLK_SPI0 338 +#define PCLK_SPI1 339 +#define PCLK_SPI2 340 +#define PCLK_UART0 341 +#define PCLK_UART1 342 +#define PCLK_UART2 343 +#define PCLK_UART3 344 +#define PCLK_UART4 345 +#define PCLK_TSADC 346 +#define PCLK_SARADC 347 +#define PCLK_SIM 348 +#define PCLK_GMAC 349 +#define PCLK_PWM0 350 +#define PCLK_PWM1 351 +#define PCLK_TIMER0 353 +#define PCLK_TIMER1 354 +#define PCLK_EDP_CTRL 355 +#define PCLK_MIPI_DSI0 356 +#define PCLK_MIPI_CSI 358 +#define PCLK_HDCP 359 +#define PCLK_HDMI_CTRL 360 +#define PCLK_VIO_H2P 361 +#define PCLK_BUS 362 +#define PCLK_PERI 363 +#define PCLK_DDRUPCTL 364 +#define PCLK_DDRPHY 365 +#define PCLK_ISP 366 +#define PCLK_VIP 367 +#define PCLK_WDT 368 +#define PCLK_EFUSE256 369 + +/* hclk gates */ +#define HCLK_SFC 448 +#define HCLK_OTG0 449 +#define HCLK_HOST0 450 +#define HCLK_HOST1 451 +#define HCLK_HSIC 452 +#define HCLK_NANDC0 453 +#define HCLK_TSP 455 +#define HCLK_SDMMC 456 +#define HCLK_SDIO0 457 +#define HCLK_EMMC 459 +#define HCLK_HSADC 460 +#define HCLK_CRYPTO 461 +#define HCLK_I2S_2CH 462 +#define HCLK_I2S_8CH 463 +#define HCLK_SPDIF 464 +#define HCLK_VOP 465 +#define HCLK_ROM 467 +#define HCLK_IEP 468 +#define HCLK_ISP 469 +#define HCLK_RGA 470 +#define HCLK_VIO_AHB_ARBI 471 +#define HCLK_VIO_NOC 472 +#define HCLK_VIP 473 +#define HCLK_VIO_H2P 474 +#define HCLK_VIO_HDCPMMU 475 +#define HCLK_VIDEO 476 +#define HCLK_BUS 477 +#define HCLK_PERI 478 + +#define CLK_NR_CLKS (HCLK_PERI + 1) + +/* soft-reset indices */ +#define SRST_CORE_B0 0 +#define SRST_CORE_B1 1 +#define SRST_CORE_B2 2 +#define SRST_CORE_B3 3 +#define SRST_CORE_B0_PO 4 +#define SRST_CORE_B1_PO 5 +#define SRST_CORE_B2_PO 6 +#define SRST_CORE_B3_PO 7 +#define SRST_L2_B 8 +#define SRST_ADB_B 9 +#define SRST_PD_CORE_B_NIU 10 +#define SRST_PDBUS_STRSYS 11 +#define SRST_SOCDBG_B 14 +#define SRST_CORE_B_DBG 15 + +#define SRST_DMAC1 18 +#define SRST_INTMEM 19 +#define SRST_ROM 20 +#define SRST_SPDIF8CH 21 +#define SRST_I2S8CH 23 +#define SRST_MAILBOX 24 +#define SRST_I2S2CH 25 +#define SRST_EFUSE_256 26 +#define SRST_MCU_SYS 28 +#define SRST_MCU_PO 29 +#define SRST_MCU_NOC 30 +#define SRST_EFUSE 31 + +#define SRST_GPIO0 32 +#define SRST_GPIO1 33 +#define SRST_GPIO2 34 +#define SRST_GPIO3 35 +#define SRST_GPIO4 36 +#define SRST_PMUGRF 41 +#define SRST_I2C0 42 +#define SRST_I2C1 43 +#define SRST_I2C2 44 +#define SRST_I2C3 45 +#define SRST_I2C4 46 +#define SRST_I2C5 47 + +#define SRST_DWPWM 48 +#define SRST_MMC_PERI 49 +#define SRST_PERIPH_MMU 50 +#define SRST_GRF 55 +#define SRST_PMU 56 +#define SRST_PERIPH_AXI 57 +#define SRST_PERIPH_AHB 58 +#define SRST_PERIPH_APB 59 +#define SRST_PERIPH_NIU 60 +#define SRST_PDPERI_AHB_ARBI 61 +#define SRST_EMEM 62 +#define SRST_USB_PERI 63 + +#define SRST_DMAC2 64 +#define SRST_MAC 66 +#define SRST_GPS 67 +#define SRST_RKPWM 69 +#define SRST_USBHOST0 72 +#define SRST_HSIC 73 +#define SRST_HSIC_AUX 74 +#define SRST_HSIC_PHY 75 +#define SRST_HSADC 76 +#define SRST_NANDC0 77 +#define SRST_SFC 79 + +#define SRST_SPI0 83 +#define SRST_SPI1 84 +#define SRST_SPI2 85 +#define SRST_SARADC 87 +#define SRST_PDALIVE_NIU 88 +#define SRST_PDPMU_INTMEM 89 +#define SRST_PDPMU_NIU 90 +#define SRST_SGRF 91 + +#define SRST_VIO_ARBI 96 +#define SRST_RGA_NIU 97 +#define SRST_VIO0_NIU_AXI 98 +#define SRST_VIO_NIU_AHB 99 +#define SRST_LCDC0_AXI 100 +#define SRST_LCDC0_AHB 101 +#define SRST_LCDC0_DCLK 102 +#define SRST_VIP 104 +#define SRST_RGA_CORE 105 +#define SRST_IEP_AXI 106 +#define SRST_IEP_AHB 107 +#define SRST_RGA_AXI 108 +#define SRST_RGA_AHB 109 +#define SRST_ISP 110 +#define SRST_EDP_24M 111 + +#define SRST_VIDEO_AXI 112 +#define SRST_VIDEO_AHB 113 +#define SRST_MIPIDPHYTX 114 +#define SRST_MIPIDSI0 115 +#define SRST_MIPIDPHYRX 116 +#define SRST_MIPICSI 117 +#define SRST_GPU 120 +#define SRST_HDMI 121 +#define SRST_EDP 122 +#define SRST_PMU_PVTM 123 +#define SRST_CORE_PVTM 124 +#define SRST_GPU_PVTM 125 +#define SRST_GPU_SYS 126 +#define SRST_GPU_MEM_NIU 127 + +#define SRST_MMC0 128 +#define SRST_SDIO0 129 +#define SRST_EMMC 131 +#define SRST_USBOTG_AHB 132 +#define SRST_USBOTG_PHY 133 +#define SRST_USBOTG_CON 134 +#define SRST_USBHOST0_AHB 135 +#define SRST_USBHOST0_PHY 136 +#define SRST_USBHOST0_CON 137 +#define SRST_USBOTG_UTMI 138 +#define SRST_USBHOST1_UTMI 139 +#define SRST_USB_ADP 141 + +#define SRST_CORESIGHT 144 +#define SRST_PD_CORE_AHB_NOC 145 +#define SRST_PD_CORE_APB_NOC 146 +#define SRST_GIC 148 +#define SRST_LCDC_PWM0 149 +#define SRST_RGA_H2P_BRG 153 +#define SRST_VIDEO 154 +#define SRST_GPU_CFG_NIU 157 +#define SRST_TSADC 159 + +#define SRST_DDRPHY0 160 +#define SRST_DDRPHY0_APB 161 +#define SRST_DDRCTRL0 162 +#define SRST_DDRCTRL0_APB 163 +#define SRST_VIDEO_NIU 165 +#define SRST_VIDEO_NIU_AHB 167 +#define SRST_DDRMSCH0 170 +#define SRST_PDBUS_AHB 173 +#define SRST_CRYPTO 174 + +#define SRST_UART0 179 +#define SRST_UART1 180 +#define SRST_UART2 181 +#define SRST_UART3 182 +#define SRST_UART4 183 +#define SRST_SIMC 186 +#define SRST_TSP 188 +#define SRST_TSP_CLKIN0 189 + +#define SRST_CORE_L0 192 +#define SRST_CORE_L1 193 +#define SRST_CORE_L2 194 +#define SRST_CORE_L3 195 +#define SRST_CORE_L0_PO 195 +#define SRST_CORE_L1_PO 197 +#define SRST_CORE_L2_PO 198 +#define SRST_CORE_L3_PO 199 +#define SRST_L2_L 200 +#define SRST_ADB_L 201 +#define SRST_PD_CORE_L_NIU 202 +#define SRST_CCI_SYS 203 +#define SRST_CCI_DDR 204 +#define SRST_CCI 205 +#define SRST_SOCDBG_L 206 +#define SRST_CORE_L_DBG 207 + +#define SRST_CORE_B0_NC 208 +#define SRST_CORE_B0_PO_NC 209 +#define SRST_L2_B_NC 210 +#define SRST_ADB_B_NC 211 +#define SRST_PD_CORE_B_NIU_NC 212 +#define SRST_PDBUS_STRSYS_NC 213 +#define SRST_CORE_L0_NC 214 +#define SRST_CORE_L0_PO_NC 215 +#define SRST_L2_L_NC 216 +#define SRST_ADB_L_NC 217 +#define SRST_PD_CORE_L_NIU_NC 218 +#define SRST_CCI_SYS_NC 219 +#define SRST_CCI_DDR_NC 220 +#define SRST_CCI_NC 221 +#define SRST_TRACE_NC 222 + +#define SRST_TIMER00 224 +#define SRST_TIMER01 225 +#define SRST_TIMER02 226 +#define SRST_TIMER03 227 +#define SRST_TIMER04 228 +#define SRST_TIMER05 229 +#define SRST_TIMER10 230 +#define SRST_TIMER11 231 +#define SRST_TIMER12 232 +#define SRST_TIMER13 233 +#define SRST_TIMER14 234 +#define SRST_TIMER15 235 +#define SRST_TIMER0_APB 236 +#define SRST_TIMER1_APB 237 + +#endif diff --git a/include/dt-bindings/clock/rk3399-cru.h b/include/dt-bindings/clock/rk3399-cru.h new file mode 100644 index 000000000..22cb1dfa9 --- /dev/null +++ b/include/dt-bindings/clock/rk3399-cru.h @@ -0,0 +1,760 @@ +/* + * Copyright (c) 2016 Rockchip Electronics Co. Ltd. + * Author: Xing Zheng + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3399_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RK3399_H + +/* core clocks */ +#define PLL_APLLL 1 +#define PLL_APLLB 2 +#define PLL_DPLL 3 +#define PLL_CPLL 4 +#define PLL_GPLL 5 +#define PLL_NPLL 6 +#define PLL_VPLL 7 +#define ARMCLKL 8 +#define ARMCLKB 9 + +/* sclk gates (special clocks) */ +#define SCLK_I2C1 65 +#define SCLK_I2C2 66 +#define SCLK_I2C3 67 +#define SCLK_I2C5 68 +#define SCLK_I2C6 69 +#define SCLK_I2C7 70 +#define SCLK_SPI0 71 +#define SCLK_SPI1 72 +#define SCLK_SPI2 73 +#define SCLK_SPI4 74 +#define SCLK_SPI5 75 +#define SCLK_SDMMC 76 +#define SCLK_SDIO 77 +#define SCLK_EMMC 78 +#define SCLK_TSADC 79 +#define SCLK_SARADC 80 +#define SCLK_UART0 81 +#define SCLK_UART1 82 +#define SCLK_UART2 83 +#define SCLK_UART3 84 +#define SCLK_SPDIF_8CH 85 +#define SCLK_I2S0_8CH 86 +#define SCLK_I2S1_8CH 87 +#define SCLK_I2S2_8CH 88 +#define SCLK_I2S_8CH_OUT 89 +#define SCLK_TIMER00 90 +#define SCLK_TIMER01 91 +#define SCLK_TIMER02 92 +#define SCLK_TIMER03 93 +#define SCLK_TIMER04 94 +#define SCLK_TIMER05 95 +#define SCLK_TIMER06 96 +#define SCLK_TIMER07 97 +#define SCLK_TIMER08 98 +#define SCLK_TIMER09 99 +#define SCLK_TIMER10 100 +#define SCLK_TIMER11 101 +#define SCLK_MACREF 102 +#define SCLK_MAC_RX 103 +#define SCLK_MAC_TX 104 +#define SCLK_MAC 105 +#define SCLK_MACREF_OUT 106 +#define SCLK_VOP0_PWM 107 +#define SCLK_VOP1_PWM 108 +#define SCLK_RGA_CORE 109 +#define SCLK_ISP0 110 +#define SCLK_ISP1 111 +#define SCLK_HDMI_CEC 112 +#define SCLK_HDMI_SFR 113 +#define SCLK_DP_CORE 114 +#define SCLK_PVTM_CORE_L 115 +#define SCLK_PVTM_CORE_B 116 +#define SCLK_PVTM_GPU 117 +#define SCLK_PVTM_DDR 118 +#define SCLK_MIPIDPHY_REF 119 +#define SCLK_MIPIDPHY_CFG 120 +#define SCLK_HSICPHY 121 +#define SCLK_USBPHY480M 122 +#define SCLK_USB2PHY0_REF 123 +#define SCLK_USB2PHY1_REF 124 +#define SCLK_UPHY0_TCPDPHY_REF 125 +#define SCLK_UPHY0_TCPDCORE 126 +#define SCLK_UPHY1_TCPDPHY_REF 127 +#define SCLK_UPHY1_TCPDCORE 128 +#define SCLK_USB3OTG0_REF 129 +#define SCLK_USB3OTG1_REF 130 +#define SCLK_USB3OTG0_SUSPEND 131 +#define SCLK_USB3OTG1_SUSPEND 132 +#define SCLK_CRYPTO0 133 +#define SCLK_CRYPTO1 134 +#define SCLK_CCI_TRACE 135 +#define SCLK_CS 136 +#define SCLK_CIF_OUT 137 +#define SCLK_PCIEPHY_REF 138 +#define SCLK_PCIE_CORE 139 +#define SCLK_M0_PERILP 140 +#define SCLK_M0_PERILP_DEC 141 +#define SCLK_CM0S 142 +#define SCLK_DBG_NOC 143 +#define SCLK_DBG_PD_CORE_B 144 +#define SCLK_DBG_PD_CORE_L 145 +#define SCLK_DFIMON0_TIMER 146 +#define SCLK_DFIMON1_TIMER 147 +#define SCLK_INTMEM0 148 +#define SCLK_INTMEM1 149 +#define SCLK_INTMEM2 150 +#define SCLK_INTMEM3 151 +#define SCLK_INTMEM4 152 +#define SCLK_INTMEM5 153 +#define SCLK_SDMMC_DRV 154 +#define SCLK_SDMMC_SAMPLE 155 +#define SCLK_SDIO_DRV 156 +#define SCLK_SDIO_SAMPLE 157 +#define SCLK_VDU_CORE 158 +#define SCLK_VDU_CA 159 +#define SCLK_PCIE_PM 160 +#define SCLK_SPDIF_REC_DPTX 161 +#define SCLK_DPHY_PLL 162 +#define SCLK_DPHY_TX0_CFG 163 +#define SCLK_DPHY_TX1RX1_CFG 164 +#define SCLK_DPHY_RX0_CFG 165 +#define SCLK_RMII_SRC 166 +#define SCLK_PCIEPHY_REF100M 167 +#define SCLK_DDRC 168 +#define SCLK_TESTCLKOUT1 169 +#define SCLK_TESTCLKOUT2 170 + +#define DCLK_VOP0 180 +#define DCLK_VOP1 181 +#define DCLK_VOP0_DIV 182 +#define DCLK_VOP1_DIV 183 +#define DCLK_M0_PERILP 184 +#define DCLK_VOP0_FRAC 185 +#define DCLK_VOP1_FRAC 186 + +#define FCLK_CM0S 190 + +/* aclk gates */ +#define ACLK_PERIHP 192 +#define ACLK_PERIHP_NOC 193 +#define ACLK_PERILP0 194 +#define ACLK_PERILP0_NOC 195 +#define ACLK_PERF_PCIE 196 +#define ACLK_PCIE 197 +#define ACLK_INTMEM 198 +#define ACLK_TZMA 199 +#define ACLK_DCF 200 +#define ACLK_CCI 201 +#define ACLK_CCI_NOC0 202 +#define ACLK_CCI_NOC1 203 +#define ACLK_CCI_GRF 204 +#define ACLK_CENTER 205 +#define ACLK_CENTER_MAIN_NOC 206 +#define ACLK_CENTER_PERI_NOC 207 +#define ACLK_GPU 208 +#define ACLK_PERF_GPU 209 +#define ACLK_GPU_GRF 210 +#define ACLK_DMAC0_PERILP 211 +#define ACLK_DMAC1_PERILP 212 +#define ACLK_GMAC 213 +#define ACLK_GMAC_NOC 214 +#define ACLK_PERF_GMAC 215 +#define ACLK_VOP0_NOC 216 +#define ACLK_VOP0 217 +#define ACLK_VOP1_NOC 218 +#define ACLK_VOP1 219 +#define ACLK_RGA 220 +#define ACLK_RGA_NOC 221 +#define ACLK_HDCP 222 +#define ACLK_HDCP_NOC 223 +#define ACLK_HDCP22 224 +#define ACLK_IEP 225 +#define ACLK_IEP_NOC 226 +#define ACLK_VIO 227 +#define ACLK_VIO_NOC 228 +#define ACLK_ISP0 229 +#define ACLK_ISP1 230 +#define ACLK_ISP0_NOC 231 +#define ACLK_ISP1_NOC 232 +#define ACLK_ISP0_WRAPPER 233 +#define ACLK_ISP1_WRAPPER 234 +#define ACLK_VCODEC 235 +#define ACLK_VCODEC_NOC 236 +#define ACLK_VDU 237 +#define ACLK_VDU_NOC 238 +#define ACLK_PERI 239 +#define ACLK_EMMC 240 +#define ACLK_EMMC_CORE 241 +#define ACLK_EMMC_NOC 242 +#define ACLK_EMMC_GRF 243 +#define ACLK_USB3 244 +#define ACLK_USB3_NOC 245 +#define ACLK_USB3OTG0 246 +#define ACLK_USB3OTG1 247 +#define ACLK_USB3_RKSOC_AXI_PERF 248 +#define ACLK_USB3_GRF 249 +#define ACLK_GIC 250 +#define ACLK_GIC_NOC 251 +#define ACLK_GIC_ADB400_CORE_L_2_GIC 252 +#define ACLK_GIC_ADB400_CORE_B_2_GIC 253 +#define ACLK_GIC_ADB400_GIC_2_CORE_L 254 +#define ACLK_GIC_ADB400_GIC_2_CORE_B 255 +#define ACLK_CORE_ADB400_CORE_L_2_CCI500 256 +#define ACLK_CORE_ADB400_CORE_B_2_CCI500 257 +#define ACLK_ADB400M_PD_CORE_L 258 +#define ACLK_ADB400M_PD_CORE_B 259 +#define ACLK_PERF_CORE_L 260 +#define ACLK_PERF_CORE_B 261 +#define ACLK_GIC_PRE 262 +#define ACLK_VOP0_PRE 263 +#define ACLK_VOP1_PRE 264 + +/* pclk gates */ +#define PCLK_PERIHP 320 +#define PCLK_PERIHP_NOC 321 +#define PCLK_PERILP0 322 +#define PCLK_PERILP1 323 +#define PCLK_PERILP1_NOC 324 +#define PCLK_PERILP_SGRF 325 +#define PCLK_PERIHP_GRF 326 +#define PCLK_PCIE 327 +#define PCLK_SGRF 328 +#define PCLK_INTR_ARB 329 +#define PCLK_CENTER_MAIN_NOC 330 +#define PCLK_CIC 331 +#define PCLK_COREDBG_B 332 +#define PCLK_COREDBG_L 333 +#define PCLK_DBG_CXCS_PD_CORE_B 334 +#define PCLK_DCF 335 +#define PCLK_GPIO2 336 +#define PCLK_GPIO3 337 +#define PCLK_GPIO4 338 +#define PCLK_GRF 339 +#define PCLK_HSICPHY 340 +#define PCLK_I2C1 341 +#define PCLK_I2C2 342 +#define PCLK_I2C3 343 +#define PCLK_I2C5 344 +#define PCLK_I2C6 345 +#define PCLK_I2C7 346 +#define PCLK_SPI0 347 +#define PCLK_SPI1 348 +#define PCLK_SPI2 349 +#define PCLK_SPI4 350 +#define PCLK_SPI5 351 +#define PCLK_UART0 352 +#define PCLK_UART1 353 +#define PCLK_UART2 354 +#define PCLK_UART3 355 +#define PCLK_TSADC 356 +#define PCLK_SARADC 357 +#define PCLK_GMAC 358 +#define PCLK_GMAC_NOC 359 +#define PCLK_TIMER0 360 +#define PCLK_TIMER1 361 +#define PCLK_EDP 362 +#define PCLK_EDP_NOC 363 +#define PCLK_EDP_CTRL 364 +#define PCLK_VIO 365 +#define PCLK_VIO_NOC 366 +#define PCLK_VIO_GRF 367 +#define PCLK_MIPI_DSI0 368 +#define PCLK_MIPI_DSI1 369 +#define PCLK_HDCP 370 +#define PCLK_HDCP_NOC 371 +#define PCLK_HDMI_CTRL 372 +#define PCLK_DP_CTRL 373 +#define PCLK_HDCP22 374 +#define PCLK_GASKET 375 +#define PCLK_DDR 376 +#define PCLK_DDR_MON 377 +#define PCLK_DDR_SGRF 378 +#define PCLK_ISP1_WRAPPER 379 +#define PCLK_WDT 380 +#define PCLK_EFUSE1024NS 381 +#define PCLK_EFUSE1024S 382 +#define PCLK_PMU_INTR_ARB 383 +#define PCLK_MAILBOX0 384 +#define PCLK_USBPHY_MUX_G 385 +#define PCLK_UPHY0_TCPHY_G 386 +#define PCLK_UPHY0_TCPD_G 387 +#define PCLK_UPHY1_TCPHY_G 388 +#define PCLK_UPHY1_TCPD_G 389 +#define PCLK_ALIVE 390 + +/* hclk gates */ +#define HCLK_PERIHP 448 +#define HCLK_PERILP0 449 +#define HCLK_PERILP1 450 +#define HCLK_PERILP0_NOC 451 +#define HCLK_PERILP1_NOC 452 +#define HCLK_M0_PERILP 453 +#define HCLK_M0_PERILP_NOC 454 +#define HCLK_AHB1TOM 455 +#define HCLK_HOST0 456 +#define HCLK_HOST0_ARB 457 +#define HCLK_HOST1 458 +#define HCLK_HOST1_ARB 459 +#define HCLK_HSIC 460 +#define HCLK_SD 461 +#define HCLK_SDMMC 462 +#define HCLK_SDMMC_NOC 463 +#define HCLK_M_CRYPTO0 464 +#define HCLK_M_CRYPTO1 465 +#define HCLK_S_CRYPTO0 466 +#define HCLK_S_CRYPTO1 467 +#define HCLK_I2S0_8CH 468 +#define HCLK_I2S1_8CH 469 +#define HCLK_I2S2_8CH 470 +#define HCLK_SPDIF 471 +#define HCLK_VOP0_NOC 472 +#define HCLK_VOP0 473 +#define HCLK_VOP1_NOC 474 +#define HCLK_VOP1 475 +#define HCLK_ROM 476 +#define HCLK_IEP 477 +#define HCLK_IEP_NOC 478 +#define HCLK_ISP0 479 +#define HCLK_ISP1 480 +#define HCLK_ISP0_NOC 481 +#define HCLK_ISP1_NOC 482 +#define HCLK_ISP0_WRAPPER 483 +#define HCLK_ISP1_WRAPPER 484 +#define HCLK_RGA 485 +#define HCLK_RGA_NOC 486 +#define HCLK_HDCP 487 +#define HCLK_HDCP_NOC 488 +#define HCLK_HDCP22 489 +#define HCLK_VCODEC 490 +#define HCLK_VCODEC_NOC 491 +#define HCLK_VDU 492 +#define HCLK_VDU_NOC 493 +#define HCLK_SDIO 494 +#define HCLK_SDIO_NOC 495 +#define HCLK_SDIOAUDIO_NOC 496 + +#define CLK_NR_CLKS (HCLK_SDIOAUDIO_NOC + 1) + +/* pmu-clocks indices */ + +#define PLL_PPLL 1 + +#define SCLK_32K_SUSPEND_PMU 2 +#define SCLK_SPI3_PMU 3 +#define SCLK_TIMER12_PMU 4 +#define SCLK_TIMER13_PMU 5 +#define SCLK_UART4_PMU 6 +#define SCLK_PVTM_PMU 7 +#define SCLK_WIFI_PMU 8 +#define SCLK_I2C0_PMU 9 +#define SCLK_I2C4_PMU 10 +#define SCLK_I2C8_PMU 11 + +#define PCLK_SRC_PMU 19 +#define PCLK_PMU 20 +#define PCLK_PMUGRF_PMU 21 +#define PCLK_INTMEM1_PMU 22 +#define PCLK_GPIO0_PMU 23 +#define PCLK_GPIO1_PMU 24 +#define PCLK_SGRF_PMU 25 +#define PCLK_NOC_PMU 26 +#define PCLK_I2C0_PMU 27 +#define PCLK_I2C4_PMU 28 +#define PCLK_I2C8_PMU 29 +#define PCLK_RKPWM_PMU 30 +#define PCLK_SPI3_PMU 31 +#define PCLK_TIMER_PMU 32 +#define PCLK_MAILBOX_PMU 33 +#define PCLK_UART4_PMU 34 +#define PCLK_WDT_M0_PMU 35 + +#define FCLK_CM0S_SRC_PMU 44 +#define FCLK_CM0S_PMU 45 +#define SCLK_CM0S_PMU 46 +#define HCLK_CM0S_PMU 47 +#define DCLK_CM0S_PMU 48 +#define PCLK_INTR_ARB_PMU 49 +#define HCLK_NOC_PMU 50 + +#define CLKPMU_NR_CLKS (HCLK_NOC_PMU + 1) + +/* soft-reset indices */ + +/* cru_softrst_con0 */ +#define SRST_CORE_L0 0 +#define SRST_CORE_B0 1 +#define SRST_CORE_PO_L0 2 +#define SRST_CORE_PO_B0 3 +#define SRST_L2_L 4 +#define SRST_L2_B 5 +#define SRST_ADB_L 6 +#define SRST_ADB_B 7 +#define SRST_A_CCI 8 +#define SRST_A_CCIM0_NOC 9 +#define SRST_A_CCIM1_NOC 10 +#define SRST_DBG_NOC 11 + +/* cru_softrst_con1 */ +#define SRST_CORE_L0_T 16 +#define SRST_CORE_L1 17 +#define SRST_CORE_L2 18 +#define SRST_CORE_L3 19 +#define SRST_CORE_PO_L0_T 20 +#define SRST_CORE_PO_L1 21 +#define SRST_CORE_PO_L2 22 +#define SRST_CORE_PO_L3 23 +#define SRST_A_ADB400_GIC2COREL 24 +#define SRST_A_ADB400_COREL2GIC 25 +#define SRST_P_DBG_L 26 +#define SRST_L2_L_T 28 +#define SRST_ADB_L_T 29 +#define SRST_A_RKPERF_L 30 +#define SRST_PVTM_CORE_L 31 + +/* cru_softrst_con2 */ +#define SRST_CORE_B0_T 32 +#define SRST_CORE_B1 33 +#define SRST_CORE_PO_B0_T 36 +#define SRST_CORE_PO_B1 37 +#define SRST_A_ADB400_GIC2COREB 40 +#define SRST_A_ADB400_COREB2GIC 41 +#define SRST_P_DBG_B 42 +#define SRST_L2_B_T 43 +#define SRST_ADB_B_T 45 +#define SRST_A_RKPERF_B 46 +#define SRST_PVTM_CORE_B 47 + +/* cru_softrst_con3 */ +#define SRST_A_CCI_T 50 +#define SRST_A_CCIM0_NOC_T 51 +#define SRST_A_CCIM1_NOC_T 52 +#define SRST_A_ADB400M_PD_CORE_B_T 53 +#define SRST_A_ADB400M_PD_CORE_L_T 54 +#define SRST_DBG_NOC_T 55 +#define SRST_DBG_CXCS 56 +#define SRST_CCI_TRACE 57 +#define SRST_P_CCI_GRF 58 + +/* cru_softrst_con4 */ +#define SRST_A_CENTER_MAIN_NOC 64 +#define SRST_A_CENTER_PERI_NOC 65 +#define SRST_P_CENTER_MAIN 66 +#define SRST_P_DDRMON 67 +#define SRST_P_CIC 68 +#define SRST_P_CENTER_SGRF 69 +#define SRST_DDR0_MSCH 70 +#define SRST_DDRCFG0_MSCH 71 +#define SRST_DDR0 72 +#define SRST_DDRPHY0 73 +#define SRST_DDR1_MSCH 74 +#define SRST_DDRCFG1_MSCH 75 +#define SRST_DDR1 76 +#define SRST_DDRPHY1 77 +#define SRST_DDR_CIC 78 +#define SRST_PVTM_DDR 79 + +/* cru_softrst_con5 */ +#define SRST_A_VCODEC_NOC 80 +#define SRST_A_VCODEC 81 +#define SRST_H_VCODEC_NOC 82 +#define SRST_H_VCODEC 83 +#define SRST_A_VDU_NOC 88 +#define SRST_A_VDU 89 +#define SRST_H_VDU_NOC 90 +#define SRST_H_VDU 91 +#define SRST_VDU_CORE 92 +#define SRST_VDU_CA 93 + +/* cru_softrst_con6 */ +#define SRST_A_IEP_NOC 96 +#define SRST_A_VOP_IEP 97 +#define SRST_A_IEP 98 +#define SRST_H_IEP_NOC 99 +#define SRST_H_IEP 100 +#define SRST_A_RGA_NOC 102 +#define SRST_A_RGA 103 +#define SRST_H_RGA_NOC 104 +#define SRST_H_RGA 105 +#define SRST_RGA_CORE 106 +#define SRST_EMMC_NOC 108 +#define SRST_EMMC 109 +#define SRST_EMMC_GRF 110 + +/* cru_softrst_con7 */ +#define SRST_A_PERIHP_NOC 112 +#define SRST_P_PERIHP_GRF 113 +#define SRST_H_PERIHP_NOC 114 +#define SRST_USBHOST0 115 +#define SRST_HOSTC0_AUX 116 +#define SRST_HOST0_ARB 117 +#define SRST_USBHOST1 118 +#define SRST_HOSTC1_AUX 119 +#define SRST_HOST1_ARB 120 +#define SRST_SDIO0 121 +#define SRST_SDMMC 122 +#define SRST_HSIC 123 +#define SRST_HSIC_AUX 124 +#define SRST_AHB1TOM 125 +#define SRST_P_PERIHP_NOC 126 +#define SRST_HSICPHY 127 + +/* cru_softrst_con8 */ +#define SRST_A_PCIE 128 +#define SRST_P_PCIE 129 +#define SRST_PCIE_CORE 130 +#define SRST_PCIE_MGMT 131 +#define SRST_PCIE_MGMT_STICKY 132 +#define SRST_PCIE_PIPE 133 +#define SRST_PCIE_PM 134 +#define SRST_PCIEPHY 135 +#define SRST_A_GMAC_NOC 136 +#define SRST_A_GMAC 137 +#define SRST_P_GMAC_NOC 138 +#define SRST_P_GMAC_GRF 140 +#define SRST_HSICPHY_POR 142 +#define SRST_HSICPHY_UTMI 143 + +/* cru_softrst_con9 */ +#define SRST_USB2PHY0_POR 144 +#define SRST_USB2PHY0_UTMI_PORT0 145 +#define SRST_USB2PHY0_UTMI_PORT1 146 +#define SRST_USB2PHY0_EHCIPHY 147 +#define SRST_UPHY0_PIPE_L00 148 +#define SRST_UPHY0 149 +#define SRST_UPHY0_TCPDPWRUP 150 +#define SRST_USB2PHY1_POR 152 +#define SRST_USB2PHY1_UTMI_PORT0 153 +#define SRST_USB2PHY1_UTMI_PORT1 154 +#define SRST_USB2PHY1_EHCIPHY 155 +#define SRST_UPHY1_PIPE_L00 156 +#define SRST_UPHY1 157 +#define SRST_UPHY1_TCPDPWRUP 158 + +/* cru_softrst_con10 */ +#define SRST_A_PERILP0_NOC 160 +#define SRST_A_DCF 161 +#define SRST_GIC500 162 +#define SRST_DMAC0_PERILP0 163 +#define SRST_DMAC1_PERILP0 164 +#define SRST_TZMA 165 +#define SRST_INTMEM 166 +#define SRST_ADB400_MST0 167 +#define SRST_ADB400_MST1 168 +#define SRST_ADB400_SLV0 169 +#define SRST_ADB400_SLV1 170 +#define SRST_H_PERILP0 171 +#define SRST_H_PERILP0_NOC 172 +#define SRST_ROM 173 +#define SRST_CRYPTO_S 174 +#define SRST_CRYPTO_M 175 + +/* cru_softrst_con11 */ +#define SRST_P_DCF 176 +#define SRST_CM0S_NOC 177 +#define SRST_CM0S 178 +#define SRST_CM0S_DBG 179 +#define SRST_CM0S_PO 180 +#define SRST_CRYPTO 181 +#define SRST_P_PERILP1_SGRF 182 +#define SRST_P_PERILP1_GRF 183 +#define SRST_CRYPTO1_S 184 +#define SRST_CRYPTO1_M 185 +#define SRST_CRYPTO1 186 +#define SRST_GIC_NOC 188 +#define SRST_SD_NOC 189 +#define SRST_SDIOAUDIO_BRG 190 + +/* cru_softrst_con12 */ +#define SRST_H_PERILP1 192 +#define SRST_H_PERILP1_NOC 193 +#define SRST_H_I2S0_8CH 194 +#define SRST_H_I2S1_8CH 195 +#define SRST_H_I2S2_8CH 196 +#define SRST_H_SPDIF_8CH 197 +#define SRST_P_PERILP1_NOC 198 +#define SRST_P_EFUSE_1024 199 +#define SRST_P_EFUSE_1024S 200 +#define SRST_P_I2C0 201 +#define SRST_P_I2C1 202 +#define SRST_P_I2C2 203 +#define SRST_P_I2C3 204 +#define SRST_P_I2C4 205 +#define SRST_P_I2C5 206 +#define SRST_P_MAILBOX0 207 + +/* cru_softrst_con13 */ +#define SRST_P_UART0 208 +#define SRST_P_UART1 209 +#define SRST_P_UART2 210 +#define SRST_P_UART3 211 +#define SRST_P_SARADC 212 +#define SRST_P_TSADC 213 +#define SRST_P_SPI0 214 +#define SRST_P_SPI1 215 +#define SRST_P_SPI2 216 +#define SRST_P_SPI3 217 +#define SRST_P_SPI4 218 +#define SRST_SPI0 219 +#define SRST_SPI1 220 +#define SRST_SPI2 221 +#define SRST_SPI3 222 +#define SRST_SPI4 223 + +/* cru_softrst_con14 */ +#define SRST_I2S0_8CH 224 +#define SRST_I2S1_8CH 225 +#define SRST_I2S2_8CH 226 +#define SRST_SPDIF_8CH 227 +#define SRST_UART0 228 +#define SRST_UART1 229 +#define SRST_UART2 230 +#define SRST_UART3 231 +#define SRST_TSADC 232 +#define SRST_I2C0 233 +#define SRST_I2C1 234 +#define SRST_I2C2 235 +#define SRST_I2C3 236 +#define SRST_I2C4 237 +#define SRST_I2C5 238 +#define SRST_SDIOAUDIO_NOC 239 + +/* cru_softrst_con15 */ +#define SRST_A_VIO_NOC 240 +#define SRST_A_HDCP_NOC 241 +#define SRST_A_HDCP 242 +#define SRST_H_HDCP_NOC 243 +#define SRST_H_HDCP 244 +#define SRST_P_HDCP_NOC 245 +#define SRST_P_HDCP 246 +#define SRST_P_HDMI_CTRL 247 +#define SRST_P_DP_CTRL 248 +#define SRST_S_DP_CTRL 249 +#define SRST_C_DP_CTRL 250 +#define SRST_P_MIPI_DSI0 251 +#define SRST_P_MIPI_DSI1 252 +#define SRST_DP_CORE 253 +#define SRST_DP_I2S 254 + +/* cru_softrst_con16 */ +#define SRST_GASKET 256 +#define SRST_VIO_GRF 258 +#define SRST_DPTX_SPDIF_REC 259 +#define SRST_HDMI_CTRL 260 +#define SRST_HDCP_CTRL 261 +#define SRST_A_ISP0_NOC 262 +#define SRST_A_ISP1_NOC 263 +#define SRST_H_ISP0_NOC 266 +#define SRST_H_ISP1_NOC 267 +#define SRST_H_ISP0 268 +#define SRST_H_ISP1 269 +#define SRST_ISP0 270 +#define SRST_ISP1 271 + +/* cru_softrst_con17 */ +#define SRST_A_VOP0_NOC 272 +#define SRST_A_VOP1_NOC 273 +#define SRST_A_VOP0 274 +#define SRST_A_VOP1 275 +#define SRST_H_VOP0_NOC 276 +#define SRST_H_VOP1_NOC 277 +#define SRST_H_VOP0 278 +#define SRST_H_VOP1 279 +#define SRST_D_VOP0 280 +#define SRST_D_VOP1 281 +#define SRST_VOP0_PWM 282 +#define SRST_VOP1_PWM 283 +#define SRST_P_EDP_NOC 284 +#define SRST_P_EDP_CTRL 285 + +/* cru_softrst_con18 */ +#define SRST_A_GPU 288 +#define SRST_A_GPU_NOC 289 +#define SRST_A_GPU_GRF 290 +#define SRST_PVTM_GPU 291 +#define SRST_A_USB3_NOC 292 +#define SRST_A_USB3_OTG0 293 +#define SRST_A_USB3_OTG1 294 +#define SRST_A_USB3_GRF 295 +#define SRST_PMU 296 + +/* cru_softrst_con19 */ +#define SRST_P_TIMER0_5 304 +#define SRST_TIMER0 305 +#define SRST_TIMER1 306 +#define SRST_TIMER2 307 +#define SRST_TIMER3 308 +#define SRST_TIMER4 309 +#define SRST_TIMER5 310 +#define SRST_P_TIMER6_11 311 +#define SRST_TIMER6 312 +#define SRST_TIMER7 313 +#define SRST_TIMER8 314 +#define SRST_TIMER9 315 +#define SRST_TIMER10 316 +#define SRST_TIMER11 317 +#define SRST_P_INTR_ARB_PMU 318 +#define SRST_P_ALIVE_SGRF 319 + +/* cru_softrst_con20 */ +#define SRST_P_GPIO2 320 +#define SRST_P_GPIO3 321 +#define SRST_P_GPIO4 322 +#define SRST_P_GRF 323 +#define SRST_P_ALIVE_NOC 324 +#define SRST_P_WDT0 325 +#define SRST_P_WDT1 326 +#define SRST_P_INTR_ARB 327 +#define SRST_P_UPHY0_DPTX 328 +#define SRST_P_UPHY0_APB 330 +#define SRST_P_UPHY0_TCPHY 332 +#define SRST_P_UPHY1_TCPHY 333 +#define SRST_P_UPHY0_TCPDCTRL 334 +#define SRST_P_UPHY1_TCPDCTRL 335 + +/* pmu soft-reset indices */ + +/* pmu_cru_softrst_con0 */ +#define SRST_P_NOC 0 +#define SRST_P_INTMEM 1 +#define SRST_H_CM0S 2 +#define SRST_H_CM0S_NOC 3 +#define SRST_DBG_CM0S 4 +#define SRST_PO_CM0S 5 +#define SRST_P_SPI6 6 +#define SRST_SPI6 7 +#define SRST_P_TIMER_0_1 8 +#define SRST_P_TIMER_0 9 +#define SRST_P_TIMER_1 10 +#define SRST_P_UART4 11 +#define SRST_UART4 12 +#define SRST_P_WDT 13 + +/* pmu_cru_softrst_con1 */ +#define SRST_P_I2C6 16 +#define SRST_P_I2C7 17 +#define SRST_P_I2C8 18 +#define SRST_P_MAILBOX 19 +#define SRST_P_RKPWM 20 +#define SRST_P_PMUGRF 21 +#define SRST_P_SGRF 22 +#define SRST_P_GPIO0 23 +#define SRST_P_GPIO1 24 +#define SRST_P_CRU 25 +#define SRST_P_INTR 26 +#define SRST_PVTM 27 +#define SRST_I2C6 28 +#define SRST_I2C7 29 +#define SRST_I2C8 30 + +#endif diff --git a/include/dt-bindings/clock/rk3399-ddr.h b/include/dt-bindings/clock/rk3399-ddr.h new file mode 100644 index 000000000..ed2280844 --- /dev/null +++ b/include/dt-bindings/clock/rk3399-ddr.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */ + +#ifndef DT_BINDINGS_DDR_H +#define DT_BINDINGS_DDR_H + +/* + * DDR3 SDRAM Standard Speed Bins include tCK, tRCD, tRP, tRAS and tRC for + * each corresponding bin. + */ + +/* DDR3-800 (5-5-5) */ +#define DDR3_800D 0 +/* DDR3-800 (6-6-6) */ +#define DDR3_800E 1 +/* DDR3-1066 (6-6-6) */ +#define DDR3_1066E 2 +/* DDR3-1066 (7-7-7) */ +#define DDR3_1066F 3 +/* DDR3-1066 (8-8-8) */ +#define DDR3_1066G 4 +/* DDR3-1333 (7-7-7) */ +#define DDR3_1333F 5 +/* DDR3-1333 (8-8-8) */ +#define DDR3_1333G 6 +/* DDR3-1333 (9-9-9) */ +#define DDR3_1333H 7 +/* DDR3-1333 (10-10-10) */ +#define DDR3_1333J 8 +/* DDR3-1600 (8-8-8) */ +#define DDR3_1600G 9 +/* DDR3-1600 (9-9-9) */ +#define DDR3_1600H 10 +/* DDR3-1600 (10-10-10) */ +#define DDR3_1600J 11 +/* DDR3-1600 (11-11-11) */ +#define DDR3_1600K 12 +/* DDR3-1600 (10-10-10) */ +#define DDR3_1866J 13 +/* DDR3-1866 (11-11-11) */ +#define DDR3_1866K 14 +/* DDR3-1866 (12-12-12) */ +#define DDR3_1866L 15 +/* DDR3-1866 (13-13-13) */ +#define DDR3_1866M 16 +/* DDR3-2133 (11-11-11) */ +#define DDR3_2133K 17 +/* DDR3-2133 (12-12-12) */ +#define DDR3_2133L 18 +/* DDR3-2133 (13-13-13) */ +#define DDR3_2133M 19 +/* DDR3-2133 (14-14-14) */ +#define DDR3_2133N 20 +/* DDR3 ATF default */ +#define DDR3_DEFAULT 21 + +#endif diff --git a/include/dt-bindings/clock/rockchip,rk808.h b/include/dt-bindings/clock/rockchip,rk808.h new file mode 100644 index 000000000..75dabfc6a --- /dev/null +++ b/include/dt-bindings/clock/rockchip,rk808.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants clk index RK808 pmic clkout + */ +#ifndef _CLK_ROCKCHIP_RK808 +#define _CLK_ROCKCHIP_RK808 + +/* CLOCKOUT index */ +#define RK808_CLKOUT0 0 +#define RK808_CLKOUT1 1 + +#endif diff --git a/include/dt-bindings/clock/rv1108-cru.h b/include/dt-bindings/clock/rv1108-cru.h new file mode 100644 index 000000000..d8d0e0456 --- /dev/null +++ b/include/dt-bindings/clock/rv1108-cru.h @@ -0,0 +1,362 @@ +/* + * Copyright (c) 2016 Rockchip Electronics Co. Ltd. + * Author: Shawn Lin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RV1108_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RV1108_H + +/* pll id */ +#define PLL_APLL 0 +#define PLL_DPLL 1 +#define PLL_GPLL 2 +#define ARMCLK 3 + +/* sclk gates (special clocks) */ +#define SCLK_SPI0 65 +#define SCLK_NANDC 67 +#define SCLK_SDMMC 68 +#define SCLK_SDIO 69 +#define SCLK_EMMC 71 +#define SCLK_UART0 72 +#define SCLK_UART1 73 +#define SCLK_UART2 74 +#define SCLK_I2S0 75 +#define SCLK_I2S1 76 +#define SCLK_I2S2 77 +#define SCLK_TIMER0 78 +#define SCLK_TIMER1 79 +#define SCLK_SFC 80 +#define SCLK_SDMMC_DRV 81 +#define SCLK_SDIO_DRV 82 +#define SCLK_EMMC_DRV 83 +#define SCLK_SDMMC_SAMPLE 84 +#define SCLK_SDIO_SAMPLE 85 +#define SCLK_EMMC_SAMPLE 86 +#define SCLK_VENC_CORE 87 +#define SCLK_HEVC_CORE 88 +#define SCLK_HEVC_CABAC 89 +#define SCLK_PWM0_PMU 90 +#define SCLK_I2C0_PMU 91 +#define SCLK_WIFI 92 +#define SCLK_CIFOUT 93 +#define SCLK_MIPI_CSI_OUT 94 +#define SCLK_CIF0 95 +#define SCLK_CIF1 96 +#define SCLK_CIF2 97 +#define SCLK_CIF3 98 +#define SCLK_DSP 99 +#define SCLK_DSP_IOP 100 +#define SCLK_DSP_EPP 101 +#define SCLK_DSP_EDP 102 +#define SCLK_DSP_EDAP 103 +#define SCLK_CVBS_HOST 104 +#define SCLK_HDMI_SFR 105 +#define SCLK_HDMI_CEC 106 +#define SCLK_CRYPTO 107 +#define SCLK_SPI 108 +#define SCLK_SARADC 109 +#define SCLK_TSADC 110 +#define SCLK_MAC_PRE 111 +#define SCLK_MAC 112 +#define SCLK_MAC_RX 113 +#define SCLK_MAC_REF 114 +#define SCLK_MAC_REFOUT 115 +#define SCLK_DSP_PFM 116 +#define SCLK_RGA 117 +#define SCLK_I2C1 118 +#define SCLK_I2C2 119 +#define SCLK_I2C3 120 +#define SCLK_PWM 121 +#define SCLK_ISP 122 +#define SCLK_USBPHY 123 +#define SCLK_I2S0_SRC 124 +#define SCLK_I2S1_SRC 125 +#define SCLK_I2S2_SRC 126 +#define SCLK_UART0_SRC 127 +#define SCLK_UART1_SRC 128 +#define SCLK_UART2_SRC 129 + +#define DCLK_VOP_SRC 185 +#define DCLK_HDMIPHY 186 +#define DCLK_VOP 187 + +/* aclk gates */ +#define ACLK_DMAC 192 +#define ACLK_PRE 193 +#define ACLK_CORE 194 +#define ACLK_ENMCORE 195 +#define ACLK_RKVENC 196 +#define ACLK_RKVDEC 197 +#define ACLK_VPU 198 +#define ACLK_CIF0 199 +#define ACLK_VIO0 200 +#define ACLK_VIO1 201 +#define ACLK_VOP 202 +#define ACLK_IEP 203 +#define ACLK_RGA 204 +#define ACLK_ISP 205 +#define ACLK_CIF1 206 +#define ACLK_CIF2 207 +#define ACLK_CIF3 208 +#define ACLK_PERI 209 +#define ACLK_GMAC 210 + +/* pclk gates */ +#define PCLK_GPIO1 256 +#define PCLK_GPIO2 257 +#define PCLK_GPIO3 258 +#define PCLK_GRF 259 +#define PCLK_I2C1 260 +#define PCLK_I2C2 261 +#define PCLK_I2C3 262 +#define PCLK_SPI 263 +#define PCLK_SFC 264 +#define PCLK_UART0 265 +#define PCLK_UART1 266 +#define PCLK_UART2 267 +#define PCLK_TSADC 268 +#define PCLK_PWM 269 +#define PCLK_TIMER 270 +#define PCLK_PERI 271 +#define PCLK_GPIO0_PMU 272 +#define PCLK_I2C0_PMU 273 +#define PCLK_PWM0_PMU 274 +#define PCLK_ISP 275 +#define PCLK_VIO 276 +#define PCLK_MIPI_DSI 277 +#define PCLK_HDMI_CTRL 278 +#define PCLK_SARADC 279 +#define PCLK_DSP_CFG 280 +#define PCLK_BUS 281 +#define PCLK_EFUSE0 282 +#define PCLK_EFUSE1 283 +#define PCLK_WDT 284 +#define PCLK_GMAC 285 + +/* hclk gates */ +#define HCLK_I2S0_8CH 320 +#define HCLK_I2S1_2CH 321 +#define HCLK_I2S2_2CH 322 +#define HCLK_NANDC 323 +#define HCLK_SDMMC 324 +#define HCLK_SDIO 325 +#define HCLK_EMMC 326 +#define HCLK_PERI 327 +#define HCLK_SFC 328 +#define HCLK_RKVENC 329 +#define HCLK_RKVDEC 330 +#define HCLK_CIF0 331 +#define HCLK_VIO 332 +#define HCLK_VOP 333 +#define HCLK_IEP 334 +#define HCLK_RGA 335 +#define HCLK_ISP 336 +#define HCLK_CRYPTO_MST 337 +#define HCLK_CRYPTO_SLV 338 +#define HCLK_HOST0 339 +#define HCLK_OTG 340 +#define HCLK_CIF1 341 +#define HCLK_CIF2 342 +#define HCLK_CIF3 343 +#define HCLK_BUS 344 +#define HCLK_VPU 345 + +#define CLK_NR_CLKS (HCLK_VPU + 1) + +/* reset id */ +#define SRST_CORE_PO_AD 0 +#define SRST_CORE_AD 1 +#define SRST_L2_AD 2 +#define SRST_CPU_NIU_AD 3 +#define SRST_CORE_PO 4 +#define SRST_CORE 5 +#define SRST_L2 6 +#define SRST_CORE_DBG 8 +#define PRST_DBG 9 +#define RST_DAP 10 +#define PRST_DBG_NIU 11 +#define ARST_STRC_SYS_AD 15 + +#define SRST_DDRPHY_CLKDIV 16 +#define SRST_DDRPHY 17 +#define PRST_DDRPHY 18 +#define PRST_HDMIPHY 19 +#define PRST_VDACPHY 20 +#define PRST_VADCPHY 21 +#define PRST_MIPI_CSI_PHY 22 +#define PRST_MIPI_DSI_PHY 23 +#define PRST_ACODEC 24 +#define ARST_BUS_NIU 25 +#define PRST_TOP_NIU 26 +#define ARST_INTMEM 27 +#define HRST_ROM 28 +#define ARST_DMAC 29 +#define SRST_MSCH_NIU 30 +#define PRST_MSCH_NIU 31 + +#define PRST_DDRUPCTL 32 +#define NRST_DDRUPCTL 33 +#define PRST_DDRMON 34 +#define HRST_I2S0_8CH 35 +#define MRST_I2S0_8CH 36 +#define HRST_I2S1_2CH 37 +#define MRST_IS21_2CH 38 +#define HRST_I2S2_2CH 39 +#define MRST_I2S2_2CH 40 +#define HRST_CRYPTO 41 +#define SRST_CRYPTO 42 +#define PRST_SPI 43 +#define SRST_SPI 44 +#define PRST_UART0 45 +#define PRST_UART1 46 +#define PRST_UART2 47 + +#define SRST_UART0 48 +#define SRST_UART1 49 +#define SRST_UART2 50 +#define PRST_I2C1 51 +#define PRST_I2C2 52 +#define PRST_I2C3 53 +#define SRST_I2C1 54 +#define SRST_I2C2 55 +#define SRST_I2C3 56 +#define PRST_PWM1 58 +#define SRST_PWM1 60 +#define PRST_WDT 61 +#define PRST_GPIO1 62 +#define PRST_GPIO2 63 + +#define PRST_GPIO3 64 +#define PRST_GRF 65 +#define PRST_EFUSE 66 +#define PRST_EFUSE512 67 +#define PRST_TIMER0 68 +#define SRST_TIMER0 69 +#define SRST_TIMER1 70 +#define PRST_TSADC 71 +#define SRST_TSADC 72 +#define PRST_SARADC 73 +#define SRST_SARADC 74 +#define HRST_SYSBUS 75 +#define PRST_USBGRF 76 + +#define ARST_PERIPH_NIU 80 +#define HRST_PERIPH_NIU 81 +#define PRST_PERIPH_NIU 82 +#define HRST_PERIPH 83 +#define HRST_SDMMC 84 +#define HRST_SDIO 85 +#define HRST_EMMC 86 +#define HRST_NANDC 87 +#define NRST_NANDC 88 +#define HRST_SFC 89 +#define SRST_SFC 90 +#define ARST_GMAC 91 +#define HRST_OTG 92 +#define SRST_OTG 93 +#define SRST_OTG_ADP 94 +#define HRST_HOST0 95 + +#define HRST_HOST0_AUX 96 +#define HRST_HOST0_ARB 97 +#define SRST_HOST0_EHCIPHY 98 +#define SRST_HOST0_UTMI 99 +#define SRST_USBPOR 100 +#define SRST_UTMI0 101 +#define SRST_UTMI1 102 + +#define ARST_VIO0_NIU 102 +#define ARST_VIO1_NIU 103 +#define HRST_VIO_NIU 104 +#define PRST_VIO_NIU 105 +#define ARST_VOP 106 +#define HRST_VOP 107 +#define DRST_VOP 108 +#define ARST_IEP 109 +#define HRST_IEP 110 +#define ARST_RGA 111 +#define HRST_RGA 112 +#define SRST_RGA 113 +#define PRST_CVBS 114 +#define PRST_HDMI 115 +#define SRST_HDMI 116 +#define PRST_MIPI_DSI 117 + +#define ARST_ISP_NIU 118 +#define HRST_ISP_NIU 119 +#define HRST_ISP 120 +#define SRST_ISP 121 +#define ARST_VIP0 122 +#define HRST_VIP0 123 +#define PRST_VIP0 124 +#define ARST_VIP1 125 +#define HRST_VIP1 126 +#define PRST_VIP1 127 +#define ARST_VIP2 128 +#define HRST_VIP2 129 +#define PRST_VIP2 120 +#define ARST_VIP3 121 +#define HRST_VIP3 122 +#define PRST_VIP4 123 + +#define PRST_CIF1TO4 124 +#define SRST_CVBS_CLK 125 +#define HRST_CVBS 126 + +#define ARST_VPU_NIU 140 +#define HRST_VPU_NIU 141 +#define ARST_VPU 142 +#define HRST_VPU 143 +#define ARST_RKVDEC_NIU 144 +#define HRST_RKVDEC_NIU 145 +#define ARST_RKVDEC 146 +#define HRST_RKVDEC 147 +#define SRST_RKVDEC_CABAC 148 +#define SRST_RKVDEC_CORE 149 +#define ARST_RKVENC_NIU 150 +#define HRST_RKVENC_NIU 151 +#define ARST_RKVENC 152 +#define HRST_RKVENC 153 +#define SRST_RKVENC_CORE 154 + +#define SRST_DSP_CORE 156 +#define SRST_DSP_SYS 157 +#define SRST_DSP_GLOBAL 158 +#define SRST_DSP_OECM 159 +#define PRST_DSP_IOP_NIU 160 +#define ARST_DSP_EPP_NIU 161 +#define ARST_DSP_EDP_NIU 162 +#define PRST_DSP_DBG_NIU 163 +#define PRST_DSP_CFG_NIU 164 +#define PRST_DSP_GRF 165 +#define PRST_DSP_MAILBOX 166 +#define PRST_DSP_INTC 167 +#define PRST_DSP_PFM_MON 169 +#define SRST_DSP_PFM_MON 170 +#define ARST_DSP_EDAP_NIU 171 + +#define SRST_PMU 172 +#define SRST_PMU_I2C0 173 +#define PRST_PMU_I2C0 174 +#define PRST_PMU_GPIO0 175 +#define PRST_PMU_INTMEM 176 +#define PRST_PMU_PWM0 177 +#define SRST_PMU_PWM0 178 +#define PRST_PMU_GRF 179 +#define SRST_PMU_NIU 180 +#define SRST_PMU_PVTM 181 +#define ARST_DSP_EDP_PERF 184 +#define ARST_DSP_EPP_PERF 185 + +#endif /* _DT_BINDINGS_CLK_ROCKCHIP_RV1108_H */ diff --git a/include/dt-bindings/clock/s3c2410.h b/include/dt-bindings/clock/s3c2410.h new file mode 100644 index 000000000..352a7673f --- /dev/null +++ b/include/dt-bindings/clock/s3c2410.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2013 Heiko Stuebner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Device Tree binding constants clock controllers of Samsung S3C2410 and later. + */ + +#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_S3C2410_CLOCK_H +#define _DT_BINDINGS_CLOCK_SAMSUNG_S3C2410_CLOCK_H + +/* + * Let each exported clock get a unique index, which is used on DT-enabled + * platforms to lookup the clock from a clock specifier. These indices are + * therefore considered an ABI and so must not be changed. This implies + * that new clocks should be added either in free spaces between clock groups + * or at the end. + */ + +/* Core clocks. */ + +/* id 1 is reserved */ +#define MPLL 2 +#define UPLL 3 +#define FCLK 4 +#define HCLK 5 +#define PCLK 6 +#define UCLK 7 +#define ARMCLK 8 + +/* pclk-gates */ +#define PCLK_UART0 16 +#define PCLK_UART1 17 +#define PCLK_UART2 18 +#define PCLK_I2C 19 +#define PCLK_SDI 20 +#define PCLK_SPI 21 +#define PCLK_ADC 22 +#define PCLK_AC97 23 +#define PCLK_I2S 24 +#define PCLK_PWM 25 +#define PCLK_RTC 26 +#define PCLK_GPIO 27 + + +/* hclk-gates */ +#define HCLK_LCD 32 +#define HCLK_USBH 33 +#define HCLK_USBD 34 +#define HCLK_NAND 35 +#define HCLK_CAM 36 + + +#define CAMIF 40 + + +/* Total number of clocks. */ +#define NR_CLKS (CAMIF + 1) + +#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_S3C2443_CLOCK_H */ diff --git a/include/dt-bindings/clock/s3c2412.h b/include/dt-bindings/clock/s3c2412.h new file mode 100644 index 000000000..aac1dcfda --- /dev/null +++ b/include/dt-bindings/clock/s3c2412.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2013 Heiko Stuebner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Device Tree binding constants clock controllers of Samsung S3C2412. + */ + +#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_S3C2412_CLOCK_H +#define _DT_BINDINGS_CLOCK_SAMSUNG_S3C2412_CLOCK_H + +/* + * Let each exported clock get a unique index, which is used on DT-enabled + * platforms to lookup the clock from a clock specifier. These indices are + * therefore considered an ABI and so must not be changed. This implies + * that new clocks should be added either in free spaces between clock groups + * or at the end. + */ + +/* Core clocks. */ + +/* id 1 is reserved */ +#define MPLL 2 +#define UPLL 3 +#define MDIVCLK 4 +#define MSYSCLK 5 +#define USYSCLK 6 +#define HCLK 7 +#define PCLK 8 +#define ARMDIV 9 +#define ARMCLK 10 + + +/* Special clocks */ +#define SCLK_CAM 16 +#define SCLK_UART 17 +#define SCLK_I2S 18 +#define SCLK_USBD 19 +#define SCLK_USBH 20 + +/* pclk-gates */ +#define PCLK_WDT 32 +#define PCLK_SPI 33 +#define PCLK_I2S 34 +#define PCLK_I2C 35 +#define PCLK_ADC 36 +#define PCLK_RTC 37 +#define PCLK_GPIO 38 +#define PCLK_UART2 39 +#define PCLK_UART1 40 +#define PCLK_UART0 41 +#define PCLK_SDI 42 +#define PCLK_PWM 43 +#define PCLK_USBD 44 + +/* hclk-gates */ +#define HCLK_HALF 48 +#define HCLK_X2 49 +#define HCLK_SDRAM 50 +#define HCLK_USBH 51 +#define HCLK_LCD 52 +#define HCLK_NAND 53 +#define HCLK_DMA3 54 +#define HCLK_DMA2 55 +#define HCLK_DMA1 56 +#define HCLK_DMA0 57 + +/* Total number of clocks. */ +#define NR_CLKS (HCLK_DMA0 + 1) + +#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_S3C2412_CLOCK_H */ diff --git a/include/dt-bindings/clock/s3c2443.h b/include/dt-bindings/clock/s3c2443.h new file mode 100644 index 000000000..f3ba68a25 --- /dev/null +++ b/include/dt-bindings/clock/s3c2443.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2013 Heiko Stuebner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Device Tree binding constants clock controllers of Samsung S3C2443 and later. + */ + +#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_S3C2443_CLOCK_H +#define _DT_BINDINGS_CLOCK_SAMSUNG_S3C2443_CLOCK_H + +/* + * Let each exported clock get a unique index, which is used on DT-enabled + * platforms to lookup the clock from a clock specifier. These indices are + * therefore considered an ABI and so must not be changed. This implies + * that new clocks should be added either in free spaces between clock groups + * or at the end. + */ + +/* Core clocks. */ +#define MSYSCLK 1 +#define ESYSCLK 2 +#define ARMDIV 3 +#define ARMCLK 4 +#define HCLK 5 +#define PCLK 6 +#define MPLL 7 +#define EPLL 8 + +/* Special clocks */ +#define SCLK_HSSPI0 16 +#define SCLK_FIMD 17 +#define SCLK_I2S0 18 +#define SCLK_I2S1 19 +#define SCLK_HSMMC1 20 +#define SCLK_HSMMC_EXT 21 +#define SCLK_CAM 22 +#define SCLK_UART 23 +#define SCLK_USBH 24 + +/* Muxes */ +#define MUX_HSSPI0 32 +#define MUX_HSSPI1 33 +#define MUX_HSMMC0 34 +#define MUX_HSMMC1 35 + +/* hclk-gates */ +#define HCLK_DMA0 48 +#define HCLK_DMA1 49 +#define HCLK_DMA2 50 +#define HCLK_DMA3 51 +#define HCLK_DMA4 52 +#define HCLK_DMA5 53 +#define HCLK_DMA6 54 +#define HCLK_DMA7 55 +#define HCLK_CAM 56 +#define HCLK_LCD 57 +#define HCLK_USBH 58 +#define HCLK_USBD 59 +#define HCLK_IROM 60 +#define HCLK_HSMMC0 61 +#define HCLK_HSMMC1 62 +#define HCLK_CFC 63 +#define HCLK_SSMC 64 +#define HCLK_DRAM 65 +#define HCLK_2D 66 + +/* pclk-gates */ +#define PCLK_UART0 72 +#define PCLK_UART1 73 +#define PCLK_UART2 74 +#define PCLK_UART3 75 +#define PCLK_I2C0 76 +#define PCLK_SDI 77 +#define PCLK_SPI0 78 +#define PCLK_ADC 79 +#define PCLK_AC97 80 +#define PCLK_I2S0 81 +#define PCLK_PWM 82 +#define PCLK_WDT 83 +#define PCLK_RTC 84 +#define PCLK_GPIO 85 +#define PCLK_SPI1 86 +#define PCLK_CHIPID 87 +#define PCLK_I2C1 88 +#define PCLK_I2S1 89 +#define PCLK_PCM 90 + +/* Total number of clocks. */ +#define NR_CLKS (PCLK_PCM + 1) + +#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_S3C2443_CLOCK_H */ diff --git a/include/dt-bindings/clock/s5pv210-audss.h b/include/dt-bindings/clock/s5pv210-audss.h new file mode 100644 index 000000000..fe57406e2 --- /dev/null +++ b/include/dt-bindings/clock/s5pv210-audss.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2014 Tomasz Figa + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This header provides constants for Samsung audio subsystem + * clock controller. + * + * The constants defined in this header are being used in dts + * and s5pv210 audss driver. + */ + +#ifndef _DT_BINDINGS_CLOCK_S5PV210_AUDSS_H +#define _DT_BINDINGS_CLOCK_S5PV210_AUDSS_H + +#define CLK_MOUT_AUDSS 0 +#define CLK_MOUT_I2S_A 1 + +#define CLK_DOUT_AUD_BUS 2 +#define CLK_DOUT_I2S_A 3 + +#define CLK_I2S 4 +#define CLK_HCLK_I2S 5 +#define CLK_HCLK_UART 6 +#define CLK_HCLK_HWA 7 +#define CLK_HCLK_DMA 8 +#define CLK_HCLK_BUF 9 +#define CLK_HCLK_RP 10 + +#define AUDSS_MAX_CLKS 11 + +#endif diff --git a/include/dt-bindings/clock/s5pv210.h b/include/dt-bindings/clock/s5pv210.h new file mode 100644 index 000000000..e88986b7c --- /dev/null +++ b/include/dt-bindings/clock/s5pv210.h @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2013 Samsung Electronics Co., Ltd. + * Author: Mateusz Krawczuk + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Device Tree binding constants for Samsung S5PV210 clock controller. + */ + +#ifndef _DT_BINDINGS_CLOCK_S5PV210_H +#define _DT_BINDINGS_CLOCK_S5PV210_H + +/* Core clocks. */ +#define FIN_PLL 1 +#define FOUT_APLL 2 +#define FOUT_MPLL 3 +#define FOUT_EPLL 4 +#define FOUT_VPLL 5 + +/* Muxes. */ +#define MOUT_FLASH 6 +#define MOUT_PSYS 7 +#define MOUT_DSYS 8 +#define MOUT_MSYS 9 +#define MOUT_VPLL 10 +#define MOUT_EPLL 11 +#define MOUT_MPLL 12 +#define MOUT_APLL 13 +#define MOUT_VPLLSRC 14 +#define MOUT_CSIS 15 +#define MOUT_FIMD 16 +#define MOUT_CAM1 17 +#define MOUT_CAM0 18 +#define MOUT_DAC 19 +#define MOUT_MIXER 20 +#define MOUT_HDMI 21 +#define MOUT_G2D 22 +#define MOUT_MFC 23 +#define MOUT_G3D 24 +#define MOUT_FIMC2 25 +#define MOUT_FIMC1 26 +#define MOUT_FIMC0 27 +#define MOUT_UART3 28 +#define MOUT_UART2 29 +#define MOUT_UART1 30 +#define MOUT_UART0 31 +#define MOUT_MMC3 32 +#define MOUT_MMC2 33 +#define MOUT_MMC1 34 +#define MOUT_MMC0 35 +#define MOUT_PWM 36 +#define MOUT_SPI0 37 +#define MOUT_SPI1 38 +#define MOUT_DMC0 39 +#define MOUT_PWI 40 +#define MOUT_HPM 41 +#define MOUT_SPDIF 42 +#define MOUT_AUDIO2 43 +#define MOUT_AUDIO1 44 +#define MOUT_AUDIO0 45 + +/* Dividers. */ +#define DOUT_PCLKP 46 +#define DOUT_HCLKP 47 +#define DOUT_PCLKD 48 +#define DOUT_HCLKD 49 +#define DOUT_PCLKM 50 +#define DOUT_HCLKM 51 +#define DOUT_A2M 52 +#define DOUT_APLL 53 +#define DOUT_CSIS 54 +#define DOUT_FIMD 55 +#define DOUT_CAM1 56 +#define DOUT_CAM0 57 +#define DOUT_TBLK 58 +#define DOUT_G2D 59 +#define DOUT_MFC 60 +#define DOUT_G3D 61 +#define DOUT_FIMC2 62 +#define DOUT_FIMC1 63 +#define DOUT_FIMC0 64 +#define DOUT_UART3 65 +#define DOUT_UART2 66 +#define DOUT_UART1 67 +#define DOUT_UART0 68 +#define DOUT_MMC3 69 +#define DOUT_MMC2 70 +#define DOUT_MMC1 71 +#define DOUT_MMC0 72 +#define DOUT_PWM 73 +#define DOUT_SPI1 74 +#define DOUT_SPI0 75 +#define DOUT_DMC0 76 +#define DOUT_PWI 77 +#define DOUT_HPM 78 +#define DOUT_COPY 79 +#define DOUT_FLASH 80 +#define DOUT_AUDIO2 81 +#define DOUT_AUDIO1 82 +#define DOUT_AUDIO0 83 +#define DOUT_DPM 84 +#define DOUT_DVSEM 85 + +/* Gates */ +#define SCLK_FIMC 86 +#define CLK_CSIS 87 +#define CLK_ROTATOR 88 +#define CLK_FIMC2 89 +#define CLK_FIMC1 90 +#define CLK_FIMC0 91 +#define CLK_MFC 92 +#define CLK_G2D 93 +#define CLK_G3D 94 +#define CLK_IMEM 95 +#define CLK_PDMA1 96 +#define CLK_PDMA0 97 +#define CLK_MDMA 98 +#define CLK_DMC1 99 +#define CLK_DMC0 100 +#define CLK_NFCON 101 +#define CLK_SROMC 102 +#define CLK_CFCON 103 +#define CLK_NANDXL 104 +#define CLK_USB_HOST 105 +#define CLK_USB_OTG 106 +#define CLK_HDMI 107 +#define CLK_TVENC 108 +#define CLK_MIXER 109 +#define CLK_VP 110 +#define CLK_DSIM 111 +#define CLK_FIMD 112 +#define CLK_TZIC3 113 +#define CLK_TZIC2 114 +#define CLK_TZIC1 115 +#define CLK_TZIC0 116 +#define CLK_VIC3 117 +#define CLK_VIC2 118 +#define CLK_VIC1 119 +#define CLK_VIC0 120 +#define CLK_TSI 121 +#define CLK_HSMMC3 122 +#define CLK_HSMMC2 123 +#define CLK_HSMMC1 124 +#define CLK_HSMMC0 125 +#define CLK_JTAG 126 +#define CLK_MODEMIF 127 +#define CLK_CORESIGHT 128 +#define CLK_SDM 129 +#define CLK_SECSS 130 +#define CLK_PCM2 131 +#define CLK_PCM1 132 +#define CLK_PCM0 133 +#define CLK_SYSCON 134 +#define CLK_GPIO 135 +#define CLK_TSADC 136 +#define CLK_PWM 137 +#define CLK_WDT 138 +#define CLK_KEYIF 139 +#define CLK_UART3 140 +#define CLK_UART2 141 +#define CLK_UART1 142 +#define CLK_UART0 143 +#define CLK_SYSTIMER 144 +#define CLK_RTC 145 +#define CLK_SPI1 146 +#define CLK_SPI0 147 +#define CLK_I2C_HDMI_PHY 148 +#define CLK_I2C1 149 +#define CLK_I2C2 150 +#define CLK_I2C0 151 +#define CLK_I2S1 152 +#define CLK_I2S2 153 +#define CLK_I2S0 154 +#define CLK_AC97 155 +#define CLK_SPDIF 156 +#define CLK_TZPC3 157 +#define CLK_TZPC2 158 +#define CLK_TZPC1 159 +#define CLK_TZPC0 160 +#define CLK_SECKEY 161 +#define CLK_IEM_APC 162 +#define CLK_IEM_IEC 163 +#define CLK_CHIPID 164 +#define CLK_JPEG 163 + +/* Special clocks*/ +#define SCLK_PWI 164 +#define SCLK_SPDIF 165 +#define SCLK_AUDIO2 166 +#define SCLK_AUDIO1 167 +#define SCLK_AUDIO0 168 +#define SCLK_PWM 169 +#define SCLK_SPI1 170 +#define SCLK_SPI0 171 +#define SCLK_UART3 172 +#define SCLK_UART2 173 +#define SCLK_UART1 174 +#define SCLK_UART0 175 +#define SCLK_MMC3 176 +#define SCLK_MMC2 177 +#define SCLK_MMC1 178 +#define SCLK_MMC0 179 +#define SCLK_FINVPLL 180 +#define SCLK_CSIS 181 +#define SCLK_FIMD 182 +#define SCLK_CAM1 183 +#define SCLK_CAM0 184 +#define SCLK_DAC 185 +#define SCLK_MIXER 186 +#define SCLK_HDMI 187 +#define SCLK_FIMC2 188 +#define SCLK_FIMC1 189 +#define SCLK_FIMC0 190 +#define SCLK_HDMI27M 191 +#define SCLK_HDMIPHY 192 +#define SCLK_USBPHY0 193 +#define SCLK_USBPHY1 194 + +/* S5P6442-specific clocks */ +#define MOUT_D0SYNC 195 +#define MOUT_D1SYNC 196 +#define DOUT_MIXER 197 +#define CLK_ETB 198 +#define CLK_ETM 199 + +/* CLKOUT */ +#define FOUT_APLL_CLKOUT 200 +#define FOUT_MPLL_CLKOUT 201 +#define DOUT_APLL_CLKOUT 202 +#define MOUT_CLKSEL 203 +#define DOUT_CLKOUT 204 +#define MOUT_CLKOUT 205 + +/* Total number of clocks. */ +#define NR_CLKS 206 + +#endif /* _DT_BINDINGS_CLOCK_S5PV210_H */ diff --git a/include/dt-bindings/clock/samsung,s2mps11.h b/include/dt-bindings/clock/samsung,s2mps11.h new file mode 100644 index 000000000..b903d7de2 --- /dev/null +++ b/include/dt-bindings/clock/samsung,s2mps11.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2015 Markus Reichl + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Device Tree binding constants clocks for the Samsung S2MPS11 PMIC. + */ + +#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_S2MPS11_CLOCK_H +#define _DT_BINDINGS_CLOCK_SAMSUNG_S2MPS11_CLOCK_H + +/* Fixed rate clocks. */ + +#define S2MPS11_CLK_AP 0 +#define S2MPS11_CLK_CP 1 +#define S2MPS11_CLK_BT 2 + +/* Total number of clocks. */ +#define S2MPS11_CLKS_NUM (S2MPS11_CLK_BT + 1) + +#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_S2MPS11_CLOCK_H */ diff --git a/include/dt-bindings/clock/samsung,s3c64xx-clock.h b/include/dt-bindings/clock/samsung,s3c64xx-clock.h new file mode 100644 index 000000000..ad95c7f50 --- /dev/null +++ b/include/dt-bindings/clock/samsung,s3c64xx-clock.h @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2013 Tomasz Figa + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Device Tree binding constants for Samsung S3C64xx clock controller. +*/ + +#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_S3C64XX_CLOCK_H +#define _DT_BINDINGS_CLOCK_SAMSUNG_S3C64XX_CLOCK_H + +/* + * Let each exported clock get a unique index, which is used on DT-enabled + * platforms to lookup the clock from a clock specifier. These indices are + * therefore considered an ABI and so must not be changed. This implies + * that new clocks should be added either in free spaces between clock groups + * or at the end. + */ + +/* Core clocks. */ +#define CLK27M 1 +#define CLK48M 2 +#define FOUT_APLL 3 +#define FOUT_MPLL 4 +#define FOUT_EPLL 5 +#define ARMCLK 6 +#define HCLKX2 7 +#define HCLK 8 +#define PCLK 9 + +/* HCLK bus clocks. */ +#define HCLK_3DSE 16 +#define HCLK_UHOST 17 +#define HCLK_SECUR 18 +#define HCLK_SDMA1 19 +#define HCLK_SDMA0 20 +#define HCLK_IROM 21 +#define HCLK_DDR1 22 +#define HCLK_MEM1 23 +#define HCLK_MEM0 24 +#define HCLK_USB 25 +#define HCLK_HSMMC2 26 +#define HCLK_HSMMC1 27 +#define HCLK_HSMMC0 28 +#define HCLK_MDP 29 +#define HCLK_DHOST 30 +#define HCLK_IHOST 31 +#define HCLK_DMA1 32 +#define HCLK_DMA0 33 +#define HCLK_JPEG 34 +#define HCLK_CAMIF 35 +#define HCLK_SCALER 36 +#define HCLK_2D 37 +#define HCLK_TV 38 +#define HCLK_POST0 39 +#define HCLK_ROT 40 +#define HCLK_LCD 41 +#define HCLK_TZIC 42 +#define HCLK_INTC 43 +#define HCLK_MFC 44 +#define HCLK_DDR0 45 + +/* PCLK bus clocks. */ +#define PCLK_IIC1 48 +#define PCLK_IIS2 49 +#define PCLK_SKEY 50 +#define PCLK_CHIPID 51 +#define PCLK_SPI1 52 +#define PCLK_SPI0 53 +#define PCLK_HSIRX 54 +#define PCLK_HSITX 55 +#define PCLK_GPIO 56 +#define PCLK_IIC0 57 +#define PCLK_IIS1 58 +#define PCLK_IIS0 59 +#define PCLK_AC97 60 +#define PCLK_TZPC 61 +#define PCLK_TSADC 62 +#define PCLK_KEYPAD 63 +#define PCLK_IRDA 64 +#define PCLK_PCM1 65 +#define PCLK_PCM0 66 +#define PCLK_PWM 67 +#define PCLK_RTC 68 +#define PCLK_WDT 69 +#define PCLK_UART3 70 +#define PCLK_UART2 71 +#define PCLK_UART1 72 +#define PCLK_UART0 73 +#define PCLK_MFC 74 + +/* Special clocks. */ +#define SCLK_UHOST 80 +#define SCLK_MMC2_48 81 +#define SCLK_MMC1_48 82 +#define SCLK_MMC0_48 83 +#define SCLK_MMC2 84 +#define SCLK_MMC1 85 +#define SCLK_MMC0 86 +#define SCLK_SPI1_48 87 +#define SCLK_SPI0_48 88 +#define SCLK_SPI1 89 +#define SCLK_SPI0 90 +#define SCLK_DAC27 91 +#define SCLK_TV27 92 +#define SCLK_SCALER27 93 +#define SCLK_SCALER 94 +#define SCLK_LCD27 95 +#define SCLK_LCD 96 +#define SCLK_FIMC 97 +#define SCLK_POST0_27 98 +#define SCLK_AUDIO2 99 +#define SCLK_POST0 100 +#define SCLK_AUDIO1 101 +#define SCLK_AUDIO0 102 +#define SCLK_SECUR 103 +#define SCLK_IRDA 104 +#define SCLK_UART 105 +#define SCLK_MFC 106 +#define SCLK_CAM 107 +#define SCLK_JPEG 108 +#define SCLK_ONENAND 109 + +/* MEM0 bus clocks - S3C6410-specific. */ +#define MEM0_CFCON 112 +#define MEM0_ONENAND1 113 +#define MEM0_ONENAND0 114 +#define MEM0_NFCON 115 +#define MEM0_SROM 116 + +/* Muxes. */ +#define MOUT_APLL 128 +#define MOUT_MPLL 129 +#define MOUT_EPLL 130 +#define MOUT_MFC 131 +#define MOUT_AUDIO0 132 +#define MOUT_AUDIO1 133 +#define MOUT_UART 134 +#define MOUT_SPI0 135 +#define MOUT_SPI1 136 +#define MOUT_MMC0 137 +#define MOUT_MMC1 138 +#define MOUT_MMC2 139 +#define MOUT_UHOST 140 +#define MOUT_IRDA 141 +#define MOUT_LCD 142 +#define MOUT_SCALER 143 +#define MOUT_DAC27 144 +#define MOUT_TV27 145 +#define MOUT_AUDIO2 146 + +/* Dividers. */ +#define DOUT_MPLL 160 +#define DOUT_SECUR 161 +#define DOUT_CAM 162 +#define DOUT_JPEG 163 +#define DOUT_MFC 164 +#define DOUT_MMC0 165 +#define DOUT_MMC1 166 +#define DOUT_MMC2 167 +#define DOUT_LCD 168 +#define DOUT_SCALER 169 +#define DOUT_UHOST 170 +#define DOUT_SPI0 171 +#define DOUT_SPI1 172 +#define DOUT_AUDIO0 173 +#define DOUT_AUDIO1 174 +#define DOUT_UART 175 +#define DOUT_IRDA 176 +#define DOUT_FIMC 177 +#define DOUT_AUDIO2 178 + +/* Total number of clocks. */ +#define NR_CLKS (DOUT_AUDIO2 + 1) + +#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_S3C64XX_CLOCK_H */ diff --git a/include/dt-bindings/clock/sh73a0-clock.h b/include/dt-bindings/clock/sh73a0-clock.h new file mode 100644 index 000000000..2eca353a2 --- /dev/null +++ b/include/dt-bindings/clock/sh73a0-clock.h @@ -0,0 +1,86 @@ +/* + * Copyright 2014 Ulrich Hecht + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __DT_BINDINGS_CLOCK_SH73A0_H__ +#define __DT_BINDINGS_CLOCK_SH73A0_H__ + +/* CPG */ +#define SH73A0_CLK_MAIN 0 +#define SH73A0_CLK_PLL0 1 +#define SH73A0_CLK_PLL1 2 +#define SH73A0_CLK_PLL2 3 +#define SH73A0_CLK_PLL3 4 +#define SH73A0_CLK_DSI0PHY 5 +#define SH73A0_CLK_DSI1PHY 6 +#define SH73A0_CLK_ZG 7 +#define SH73A0_CLK_M3 8 +#define SH73A0_CLK_B 9 +#define SH73A0_CLK_M1 10 +#define SH73A0_CLK_M2 11 +#define SH73A0_CLK_Z 12 +#define SH73A0_CLK_ZX 13 +#define SH73A0_CLK_HP 14 + +/* MSTP0 */ +#define SH73A0_CLK_IIC2 1 +#define SH73A0_CLK_MSIOF0 0 + +/* MSTP1 */ +#define SH73A0_CLK_CEU1 29 +#define SH73A0_CLK_CSI2_RX1 28 +#define SH73A0_CLK_CEU0 27 +#define SH73A0_CLK_CSI2_RX0 26 +#define SH73A0_CLK_TMU0 25 +#define SH73A0_CLK_DSITX0 18 +#define SH73A0_CLK_IIC0 16 +#define SH73A0_CLK_SGX 12 +#define SH73A0_CLK_LCDC0 0 + +/* MSTP2 */ +#define SH73A0_CLK_SCIFA7 19 +#define SH73A0_CLK_SY_DMAC 18 +#define SH73A0_CLK_MP_DMAC 17 +#define SH73A0_CLK_MSIOF3 15 +#define SH73A0_CLK_MSIOF1 8 +#define SH73A0_CLK_SCIFA5 7 +#define SH73A0_CLK_SCIFB 6 +#define SH73A0_CLK_MSIOF2 5 +#define SH73A0_CLK_SCIFA0 4 +#define SH73A0_CLK_SCIFA1 3 +#define SH73A0_CLK_SCIFA2 2 +#define SH73A0_CLK_SCIFA3 1 +#define SH73A0_CLK_SCIFA4 0 + +/* MSTP3 */ +#define SH73A0_CLK_SCIFA6 31 +#define SH73A0_CLK_CMT1 29 +#define SH73A0_CLK_FSI 28 +#define SH73A0_CLK_IRDA 25 +#define SH73A0_CLK_IIC1 23 +#define SH73A0_CLK_USB 22 +#define SH73A0_CLK_FLCTL 15 +#define SH73A0_CLK_SDHI0 14 +#define SH73A0_CLK_SDHI1 13 +#define SH73A0_CLK_MMCIF0 12 +#define SH73A0_CLK_SDHI2 11 +#define SH73A0_CLK_TPU0 4 +#define SH73A0_CLK_TPU1 3 +#define SH73A0_CLK_TPU2 2 +#define SH73A0_CLK_TPU3 1 +#define SH73A0_CLK_TPU4 0 + +/* MSTP4 */ +#define SH73A0_CLK_IIC3 11 +#define SH73A0_CLK_IIC4 10 +#define SH73A0_CLK_KEYSC 3 + +/* MSTP5 */ +#define SH73A0_CLK_INTCA0 8 + +#endif diff --git a/include/dt-bindings/clock/sprd,sc9860-clk.h b/include/dt-bindings/clock/sprd,sc9860-clk.h new file mode 100644 index 000000000..f2ab4631d --- /dev/null +++ b/include/dt-bindings/clock/sprd,sc9860-clk.h @@ -0,0 +1,423 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +// +// Spreadtrum SC9860 platform clocks +// +// Copyright (C) 2017, Spreadtrum Communications Inc. + +#ifndef _DT_BINDINGS_CLK_SC9860_H_ +#define _DT_BINDINGS_CLK_SC9860_H_ + +#define CLK_FAC_4M 0 +#define CLK_FAC_2M 1 +#define CLK_FAC_1M 2 +#define CLK_FAC_250K 3 +#define CLK_FAC_RPLL0_26M 4 +#define CLK_FAC_RPLL1_26M 5 +#define CLK_FAC_RCO25M 6 +#define CLK_FAC_RCO4M 7 +#define CLK_FAC_RCO2M 8 +#define CLK_FAC_3K2 9 +#define CLK_FAC_1K 10 +#define CLK_MPLL0_GATE 11 +#define CLK_MPLL1_GATE 12 +#define CLK_DPLL0_GATE 13 +#define CLK_DPLL1_GATE 14 +#define CLK_LTEPLL0_GATE 15 +#define CLK_TWPLL_GATE 16 +#define CLK_LTEPLL1_GATE 17 +#define CLK_RPLL0_GATE 18 +#define CLK_RPLL1_GATE 19 +#define CLK_CPPLL_GATE 20 +#define CLK_GPLL_GATE 21 +#define CLK_PMU_GATE_NUM (CLK_GPLL_GATE + 1) + +#define CLK_MPLL0 0 +#define CLK_MPLL1 1 +#define CLK_DPLL0 2 +#define CLK_DPLL1 3 +#define CLK_RPLL0 4 +#define CLK_RPLL1 5 +#define CLK_TWPLL 6 +#define CLK_LTEPLL0 7 +#define CLK_LTEPLL1 8 +#define CLK_GPLL 9 +#define CLK_CPPLL 10 +#define CLK_GPLL_42M5 11 +#define CLK_TWPLL_768M 12 +#define CLK_TWPLL_384M 13 +#define CLK_TWPLL_192M 14 +#define CLK_TWPLL_96M 15 +#define CLK_TWPLL_48M 16 +#define CLK_TWPLL_24M 17 +#define CLK_TWPLL_12M 18 +#define CLK_TWPLL_512M 19 +#define CLK_TWPLL_256M 20 +#define CLK_TWPLL_128M 21 +#define CLK_TWPLL_64M 22 +#define CLK_TWPLL_307M2 23 +#define CLK_TWPLL_153M6 24 +#define CLK_TWPLL_76M8 25 +#define CLK_TWPLL_51M2 26 +#define CLK_TWPLL_38M4 27 +#define CLK_TWPLL_19M2 28 +#define CLK_L0_614M4 29 +#define CLK_L0_409M6 30 +#define CLK_L0_38M 31 +#define CLK_L1_38M 32 +#define CLK_RPLL0_192M 33 +#define CLK_RPLL0_96M 34 +#define CLK_RPLL0_48M 35 +#define CLK_RPLL1_468M 36 +#define CLK_RPLL1_192M 37 +#define CLK_RPLL1_96M 38 +#define CLK_RPLL1_64M 39 +#define CLK_RPLL1_48M 40 +#define CLK_DPLL0_50M 41 +#define CLK_DPLL1_50M 42 +#define CLK_CPPLL_50M 43 +#define CLK_M0_39M 44 +#define CLK_M1_63M 45 +#define CLK_PLL_NUM (CLK_M1_63M + 1) + + +#define CLK_AP_APB 0 +#define CLK_AP_USB3 1 +#define CLK_UART0 2 +#define CLK_UART1 3 +#define CLK_UART2 4 +#define CLK_UART3 5 +#define CLK_UART4 6 +#define CLK_I2C0 7 +#define CLK_I2C1 8 +#define CLK_I2C2 9 +#define CLK_I2C3 10 +#define CLK_I2C4 11 +#define CLK_I2C5 12 +#define CLK_SPI0 13 +#define CLK_SPI1 14 +#define CLK_SPI2 15 +#define CLK_SPI3 16 +#define CLK_IIS0 17 +#define CLK_IIS1 18 +#define CLK_IIS2 19 +#define CLK_IIS3 20 +#define CLK_AP_CLK_NUM (CLK_IIS3 + 1) + +#define CLK_AON_APB 0 +#define CLK_AUX0 1 +#define CLK_AUX1 2 +#define CLK_AUX2 3 +#define CLK_PROBE 4 +#define CLK_SP_AHB 5 +#define CLK_CCI 6 +#define CLK_GIC 7 +#define CLK_CSSYS 8 +#define CLK_SDIO0_2X 9 +#define CLK_SDIO1_2X 10 +#define CLK_SDIO2_2X 11 +#define CLK_EMMC_2X 12 +#define CLK_SDIO0_1X 13 +#define CLK_SDIO1_1X 14 +#define CLK_SDIO2_1X 15 +#define CLK_EMMC_1X 16 +#define CLK_ADI 17 +#define CLK_PWM0 18 +#define CLK_PWM1 19 +#define CLK_PWM2 20 +#define CLK_PWM3 21 +#define CLK_EFUSE 22 +#define CLK_CM3_UART0 23 +#define CLK_CM3_UART1 24 +#define CLK_THM 25 +#define CLK_CM3_I2C0 26 +#define CLK_CM3_I2C1 27 +#define CLK_CM4_SPI 28 +#define CLK_AON_I2C 29 +#define CLK_AVS 30 +#define CLK_CA53_DAP 31 +#define CLK_CA53_TS 32 +#define CLK_DJTAG_TCK 33 +#define CLK_PMU 34 +#define CLK_PMU_26M 35 +#define CLK_DEBOUNCE 36 +#define CLK_OTG2_REF 37 +#define CLK_USB3_REF 38 +#define CLK_AP_AXI 39 +#define CLK_AON_PREDIV_NUM (CLK_AP_AXI + 1) + +#define CLK_USB3_EB 0 +#define CLK_USB3_SUSPEND_EB 1 +#define CLK_USB3_REF_EB 2 +#define CLK_DMA_EB 3 +#define CLK_SDIO0_EB 4 +#define CLK_SDIO1_EB 5 +#define CLK_SDIO2_EB 6 +#define CLK_EMMC_EB 7 +#define CLK_ROM_EB 8 +#define CLK_BUSMON_EB 9 +#define CLK_CC63S_EB 10 +#define CLK_CC63P_EB 11 +#define CLK_CE0_EB 12 +#define CLK_CE1_EB 13 +#define CLK_APAHB_GATE_NUM (CLK_CE1_EB + 1) + +#define CLK_AVS_LIT_EB 0 +#define CLK_AVS_BIG_EB 1 +#define CLK_AP_INTC5_EB 2 +#define CLK_GPIO_EB 3 +#define CLK_PWM0_EB 4 +#define CLK_PWM1_EB 5 +#define CLK_PWM2_EB 6 +#define CLK_PWM3_EB 7 +#define CLK_KPD_EB 8 +#define CLK_AON_SYS_EB 9 +#define CLK_AP_SYS_EB 10 +#define CLK_AON_TMR_EB 11 +#define CLK_AP_TMR0_EB 12 +#define CLK_EFUSE_EB 13 +#define CLK_EIC_EB 14 +#define CLK_PUB1_REG_EB 15 +#define CLK_ADI_EB 16 +#define CLK_AP_INTC0_EB 17 +#define CLK_AP_INTC1_EB 18 +#define CLK_AP_INTC2_EB 19 +#define CLK_AP_INTC3_EB 20 +#define CLK_AP_INTC4_EB 21 +#define CLK_SPLK_EB 22 +#define CLK_MSPI_EB 23 +#define CLK_PUB0_REG_EB 24 +#define CLK_PIN_EB 25 +#define CLK_AON_CKG_EB 26 +#define CLK_GPU_EB 27 +#define CLK_APCPU_TS0_EB 28 +#define CLK_APCPU_TS1_EB 29 +#define CLK_DAP_EB 30 +#define CLK_I2C_EB 31 +#define CLK_PMU_EB 32 +#define CLK_THM_EB 33 +#define CLK_AUX0_EB 34 +#define CLK_AUX1_EB 35 +#define CLK_AUX2_EB 36 +#define CLK_PROBE_EB 37 +#define CLK_GPU0_AVS_EB 38 +#define CLK_GPU1_AVS_EB 39 +#define CLK_APCPU_WDG_EB 40 +#define CLK_AP_TMR1_EB 41 +#define CLK_AP_TMR2_EB 42 +#define CLK_DISP_EMC_EB 43 +#define CLK_ZIP_EMC_EB 44 +#define CLK_GSP_EMC_EB 45 +#define CLK_OSC_AON_EB 46 +#define CLK_LVDS_TRX_EB 47 +#define CLK_LVDS_TCXO_EB 48 +#define CLK_MDAR_EB 49 +#define CLK_RTC4M0_CAL_EB 50 +#define CLK_RCT100M_CAL_EB 51 +#define CLK_DJTAG_EB 52 +#define CLK_MBOX_EB 53 +#define CLK_AON_DMA_EB 54 +#define CLK_DBG_EMC_EB 55 +#define CLK_LVDS_PLL_DIV_EN 56 +#define CLK_DEF_EB 57 +#define CLK_AON_APB_RSV0 58 +#define CLK_ORP_JTAG_EB 59 +#define CLK_VSP_EB 60 +#define CLK_CAM_EB 61 +#define CLK_DISP_EB 62 +#define CLK_DBG_AXI_IF_EB 63 +#define CLK_SDIO0_2X_EN 64 +#define CLK_SDIO1_2X_EN 65 +#define CLK_SDIO2_2X_EN 66 +#define CLK_EMMC_2X_EN 67 +#define CLK_ARCH_RTC_EB 68 +#define CLK_KPB_RTC_EB 69 +#define CLK_AON_SYST_RTC_EB 70 +#define CLK_AP_SYST_RTC_EB 71 +#define CLK_AON_TMR_RTC_EB 72 +#define CLK_AP_TMR0_RTC_EB 73 +#define CLK_EIC_RTC_EB 74 +#define CLK_EIC_RTCDV5_EB 75 +#define CLK_AP_WDG_RTC_EB 76 +#define CLK_AP_TMR1_RTC_EB 77 +#define CLK_AP_TMR2_RTC_EB 78 +#define CLK_DCXO_TMR_RTC_EB 79 +#define CLK_BB_CAL_RTC_EB 80 +#define CLK_AVS_BIG_RTC_EB 81 +#define CLK_AVS_LIT_RTC_EB 82 +#define CLK_AVS_GPU0_RTC_EB 83 +#define CLK_AVS_GPU1_RTC_EB 84 +#define CLK_GPU_TS_EB 85 +#define CLK_RTCDV10_EB 86 +#define CLK_AON_GATE_NUM (CLK_RTCDV10_EB + 1) + +#define CLK_LIT_MCU 0 +#define CLK_BIG_MCU 1 +#define CLK_AONSECURE_NUM (CLK_BIG_MCU + 1) + +#define CLK_AGCP_IIS0_EB 0 +#define CLK_AGCP_IIS1_EB 1 +#define CLK_AGCP_IIS2_EB 2 +#define CLK_AGCP_IIS3_EB 3 +#define CLK_AGCP_UART_EB 4 +#define CLK_AGCP_DMACP_EB 5 +#define CLK_AGCP_DMAAP_EB 6 +#define CLK_AGCP_ARC48K_EB 7 +#define CLK_AGCP_SRC44P1K_EB 8 +#define CLK_AGCP_MCDT_EB 9 +#define CLK_AGCP_VBCIFD_EB 10 +#define CLK_AGCP_VBC_EB 11 +#define CLK_AGCP_SPINLOCK_EB 12 +#define CLK_AGCP_ICU_EB 13 +#define CLK_AGCP_AP_ASHB_EB 14 +#define CLK_AGCP_CP_ASHB_EB 15 +#define CLK_AGCP_AUD_EB 16 +#define CLK_AGCP_AUDIF_EB 17 +#define CLK_AGCP_GATE_NUM (CLK_AGCP_AUDIF_EB + 1) + +#define CLK_GPU 0 +#define CLK_GPU_NUM (CLK_GPU + 1) + +#define CLK_AHB_VSP 0 +#define CLK_VSP 1 +#define CLK_VSP_ENC 2 +#define CLK_VPP 3 +#define CLK_VSP_26M 4 +#define CLK_VSP_NUM (CLK_VSP_26M + 1) + +#define CLK_VSP_DEC_EB 0 +#define CLK_VSP_CKG_EB 1 +#define CLK_VSP_MMU_EB 2 +#define CLK_VSP_ENC_EB 3 +#define CLK_VPP_EB 4 +#define CLK_VSP_26M_EB 5 +#define CLK_VSP_AXI_GATE 6 +#define CLK_VSP_ENC_GATE 7 +#define CLK_VPP_AXI_GATE 8 +#define CLK_VSP_BM_GATE 9 +#define CLK_VSP_ENC_BM_GATE 10 +#define CLK_VPP_BM_GATE 11 +#define CLK_VSP_GATE_NUM (CLK_VPP_BM_GATE + 1) + +#define CLK_AHB_CAM 0 +#define CLK_SENSOR0 1 +#define CLK_SENSOR1 2 +#define CLK_SENSOR2 3 +#define CLK_MIPI_CSI0_EB 4 +#define CLK_MIPI_CSI1_EB 5 +#define CLK_CAM_NUM (CLK_MIPI_CSI1_EB + 1) + +#define CLK_DCAM0_EB 0 +#define CLK_DCAM1_EB 1 +#define CLK_ISP0_EB 2 +#define CLK_CSI0_EB 3 +#define CLK_CSI1_EB 4 +#define CLK_JPG0_EB 5 +#define CLK_JPG1_EB 6 +#define CLK_CAM_CKG_EB 7 +#define CLK_CAM_MMU_EB 8 +#define CLK_ISP1_EB 9 +#define CLK_CPP_EB 10 +#define CLK_MMU_PF_EB 11 +#define CLK_ISP2_EB 12 +#define CLK_DCAM2ISP_IF_EB 13 +#define CLK_ISP2DCAM_IF_EB 14 +#define CLK_ISP_LCLK_EB 15 +#define CLK_ISP_ICLK_EB 16 +#define CLK_ISP_MCLK_EB 17 +#define CLK_ISP_PCLK_EB 18 +#define CLK_ISP_ISP2DCAM_EB 19 +#define CLK_DCAM0_IF_EB 20 +#define CLK_CLK26M_IF_EB 21 +#define CLK_CPHY0_GATE 22 +#define CLK_MIPI_CSI0_GATE 23 +#define CLK_CPHY1_GATE 24 +#define CLK_MIPI_CSI1 25 +#define CLK_DCAM0_AXI_GATE 26 +#define CLK_DCAM1_AXI_GATE 27 +#define CLK_SENSOR0_GATE 28 +#define CLK_SENSOR1_GATE 29 +#define CLK_JPG0_AXI_GATE 30 +#define CLK_GPG1_AXI_GATE 31 +#define CLK_ISP0_AXI_GATE 32 +#define CLK_ISP1_AXI_GATE 33 +#define CLK_ISP2_AXI_GATE 34 +#define CLK_CPP_AXI_GATE 35 +#define CLK_D0_IF_AXI_GATE 36 +#define CLK_D2I_IF_AXI_GATE 37 +#define CLK_I2D_IF_AXI_GATE 38 +#define CLK_SPARE_AXI_GATE 39 +#define CLK_SENSOR2_GATE 40 +#define CLK_D0IF_IN_D_EN 41 +#define CLK_D1IF_IN_D_EN 42 +#define CLK_D0IF_IN_D2I_EN 43 +#define CLK_D1IF_IN_D2I_EN 44 +#define CLK_IA_IN_D2I_EN 45 +#define CLK_IB_IN_D2I_EN 46 +#define CLK_IC_IN_D2I_EN 47 +#define CLK_IA_IN_I_EN 48 +#define CLK_IB_IN_I_EN 49 +#define CLK_IC_IN_I_EN 50 +#define CLK_CAM_GATE_NUM (CLK_IC_IN_I_EN + 1) + +#define CLK_AHB_DISP 0 +#define CLK_DISPC0_DPI 1 +#define CLK_DISPC1_DPI 2 +#define CLK_DISP_NUM (CLK_DISPC1_DPI + 1) + +#define CLK_DISPC0_EB 0 +#define CLK_DISPC1_EB 1 +#define CLK_DISPC_MMU_EB 2 +#define CLK_GSP0_EB 3 +#define CLK_GSP1_EB 4 +#define CLK_GSP0_MMU_EB 5 +#define CLK_GSP1_MMU_EB 6 +#define CLK_DSI0_EB 7 +#define CLK_DSI1_EB 8 +#define CLK_DISP_CKG_EB 9 +#define CLK_DISP_GPU_EB 10 +#define CLK_GPU_MTX_EB 11 +#define CLK_GSP_MTX_EB 12 +#define CLK_TMC_MTX_EB 13 +#define CLK_DISPC_MTX_EB 14 +#define CLK_DPHY0_GATE 15 +#define CLK_DPHY1_GATE 16 +#define CLK_GSP0_A_GATE 17 +#define CLK_GSP1_A_GATE 18 +#define CLK_GSP0_F_GATE 19 +#define CLK_GSP1_F_GATE 20 +#define CLK_D_MTX_F_GATE 21 +#define CLK_D_MTX_A_GATE 22 +#define CLK_D_NOC_F_GATE 23 +#define CLK_D_NOC_A_GATE 24 +#define CLK_GSP_MTX_F_GATE 25 +#define CLK_GSP_MTX_A_GATE 26 +#define CLK_GSP_NOC_F_GATE 27 +#define CLK_GSP_NOC_A_GATE 28 +#define CLK_DISPM0IDLE_GATE 29 +#define CLK_GSPM0IDLE_GATE 30 +#define CLK_DISP_GATE_NUM (CLK_GSPM0IDLE_GATE + 1) + +#define CLK_SIM0_EB 0 +#define CLK_IIS0_EB 1 +#define CLK_IIS1_EB 2 +#define CLK_IIS2_EB 3 +#define CLK_IIS3_EB 4 +#define CLK_SPI0_EB 5 +#define CLK_SPI1_EB 6 +#define CLK_SPI2_EB 7 +#define CLK_I2C0_EB 8 +#define CLK_I2C1_EB 9 +#define CLK_I2C2_EB 10 +#define CLK_I2C3_EB 11 +#define CLK_I2C4_EB 12 +#define CLK_I2C5_EB 13 +#define CLK_UART0_EB 14 +#define CLK_UART1_EB 15 +#define CLK_UART2_EB 16 +#define CLK_UART3_EB 17 +#define CLK_UART4_EB 18 +#define CLK_AP_CKG_EB 19 +#define CLK_SPI3_EB 20 +#define CLK_APAPB_GATE_NUM (CLK_SPI3_EB + 1) + +#endif /* _DT_BINDINGS_CLK_SC9860_H_ */ diff --git a/include/dt-bindings/clock/ste-ab8500.h b/include/dt-bindings/clock/ste-ab8500.h new file mode 100644 index 000000000..fb42dd0ca --- /dev/null +++ b/include/dt-bindings/clock/ste-ab8500.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __STE_CLK_AB8500_H__ +#define __STE_CLK_AB8500_H__ + +#define AB8500_SYSCLK_BUF2 0 +#define AB8500_SYSCLK_BUF3 1 +#define AB8500_SYSCLK_BUF4 2 +#define AB8500_SYSCLK_ULP 3 +#define AB8500_SYSCLK_INT 4 +#define AB8500_SYSCLK_AUDIO 5 + +#endif diff --git a/include/dt-bindings/clock/stih407-clks.h b/include/dt-bindings/clock/stih407-clks.h new file mode 100644 index 000000000..f0936c133 --- /dev/null +++ b/include/dt-bindings/clock/stih407-clks.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants clk index STMicroelectronics + * STiH407 SoC. + */ +#ifndef _DT_BINDINGS_CLK_STIH407 +#define _DT_BINDINGS_CLK_STIH407 + +/* CLOCKGEN A0 */ +#define CLK_IC_LMI0 0 +#define CLK_IC_LMI1 1 + +/* CLOCKGEN C0 */ +#define CLK_ICN_GPU 0 +#define CLK_FDMA 1 +#define CLK_NAND 2 +#define CLK_HVA 3 +#define CLK_PROC_STFE 4 +#define CLK_PROC_TP 5 +#define CLK_RX_ICN_DMU 6 +#define CLK_RX_ICN_DISP_0 6 +#define CLK_RX_ICN_DISP_1 6 +#define CLK_RX_ICN_HVA 7 +#define CLK_RX_ICN_TS 7 +#define CLK_ICN_CPU 8 +#define CLK_TX_ICN_DMU 9 +#define CLK_TX_ICN_HVA 9 +#define CLK_TX_ICN_TS 9 +#define CLK_ICN_COMPO 9 +#define CLK_MMC_0 10 +#define CLK_MMC_1 11 +#define CLK_JPEGDEC 12 +#define CLK_ICN_REG 13 +#define CLK_TRACE_A9 13 +#define CLK_PTI_STM 13 +#define CLK_EXT2F_A9 13 +#define CLK_IC_BDISP_0 14 +#define CLK_IC_BDISP_1 15 +#define CLK_PP_DMU 16 +#define CLK_VID_DMU 17 +#define CLK_DSS_LPC 18 +#define CLK_ST231_AUD_0 19 +#define CLK_ST231_GP_0 19 +#define CLK_ST231_GP_1 20 +#define CLK_ST231_DMU 21 +#define CLK_ICN_LMI 22 +#define CLK_TX_ICN_DISP_0 23 +#define CLK_TX_ICN_DISP_1 23 +#define CLK_ICN_SBC 24 +#define CLK_STFE_FRC2 25 +#define CLK_ETH_PHY 26 +#define CLK_ETH_REF_PHYCLK 27 +#define CLK_FLASH_PROMIP 28 +#define CLK_MAIN_DISP 29 +#define CLK_AUX_DISP 30 +#define CLK_COMPO_DVP 31 + +/* CLOCKGEN D0 */ +#define CLK_PCM_0 0 +#define CLK_PCM_1 1 +#define CLK_PCM_2 2 +#define CLK_SPDIFF 3 + +/* CLOCKGEN D2 */ +#define CLK_PIX_MAIN_DISP 0 +#define CLK_PIX_PIP 1 +#define CLK_PIX_GDP1 2 +#define CLK_PIX_GDP2 3 +#define CLK_PIX_GDP3 4 +#define CLK_PIX_GDP4 5 +#define CLK_PIX_AUX_DISP 6 +#define CLK_DENC 7 +#define CLK_PIX_HDDAC 8 +#define CLK_HDDAC 9 +#define CLK_SDDAC 10 +#define CLK_PIX_DVO 11 +#define CLK_DVO 12 +#define CLK_PIX_HDMI 13 +#define CLK_TMDS_HDMI 14 +#define CLK_REF_HDMIPHY 15 + +/* CLOCKGEN D3 */ +#define CLK_STFE_FRC1 0 +#define CLK_TSOUT_0 1 +#define CLK_TSOUT_1 2 +#define CLK_MCHI 3 +#define CLK_VSENS_COMPO 4 +#define CLK_FRC1_REMOTE 5 +#define CLK_LPC_0 6 +#define CLK_LPC_1 7 +#endif diff --git a/include/dt-bindings/clock/stih410-clks.h b/include/dt-bindings/clock/stih410-clks.h new file mode 100644 index 000000000..90cbe6154 --- /dev/null +++ b/include/dt-bindings/clock/stih410-clks.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants clk index STMicroelectronics + * STiH410 SoC. + */ +#ifndef _DT_BINDINGS_CLK_STIH410 +#define _DT_BINDINGS_CLK_STIH410 + +#include "stih407-clks.h" + +/* STiH410 introduces new clock outputs compared to STiH407 */ + +/* CLOCKGEN C0 */ +#define CLK_TX_ICN_HADES 32 +#define CLK_RX_ICN_HADES 33 +#define CLK_ICN_REG_16 34 +#define CLK_PP_HADES 35 +#define CLK_CLUST_HADES 36 +#define CLK_HWPE_HADES 37 +#define CLK_FC_HADES 38 + +/* CLOCKGEN D0 */ +#define CLK_PCMR10_MASTER 4 +#define CLK_USB2_PHY 5 + +#endif diff --git a/include/dt-bindings/clock/stih416-clks.h b/include/dt-bindings/clock/stih416-clks.h new file mode 100644 index 000000000..743022780 --- /dev/null +++ b/include/dt-bindings/clock/stih416-clks.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants clk index STMicroelectronics + * STiH416 SoC. + */ +#ifndef _CLK_STIH416 +#define _CLK_STIH416 + +/* CLOCKGEN A0 */ +#define CLK_ICN_REG 0 +#define CLK_ETH1_PHY 4 + +/* CLOCKGEN A1 */ +#define CLK_ICN_IF_2 0 +#define CLK_GMAC0_PHY 3 + +#endif diff --git a/include/dt-bindings/clock/stih418-clks.h b/include/dt-bindings/clock/stih418-clks.h new file mode 100644 index 000000000..0e7fba0c5 --- /dev/null +++ b/include/dt-bindings/clock/stih418-clks.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants clk index STMicroelectronics + * STiH418 SoC. + */ +#ifndef _DT_BINDINGS_CLK_STIH418 +#define _DT_BINDINGS_CLK_STIH418 + +#include "stih410-clks.h" + +/* STiH418 introduces new clock outputs compared to STiH410 */ + +/* CLOCKGEN C0 */ +#define CLK_PROC_BDISP_0 14 +#define CLK_PROC_BDISP_1 15 +#define CLK_TX_ICN_1 23 +#define CLK_ETH_PHYREF 27 +#define CLK_PP_HEVC 35 +#define CLK_CLUST_HEVC 36 +#define CLK_HWPE_HEVC 37 +#define CLK_FC_HEVC 38 +#define CLK_PROC_MIXER 39 +#define CLK_PROC_SC 40 +#define CLK_AVSP_HEVC 41 + +/* CLOCKGEN D2 */ +#undef CLK_PIX_PIP +#undef CLK_PIX_GDP1 +#undef CLK_PIX_GDP2 +#undef CLK_PIX_GDP3 +#undef CLK_PIX_GDP4 + +#define CLK_TMDS_HDMI_DIV2 5 +#define CLK_VP9 47 +#endif diff --git a/include/dt-bindings/clock/stm32fx-clock.h b/include/dt-bindings/clock/stm32fx-clock.h new file mode 100644 index 000000000..58d8b515b --- /dev/null +++ b/include/dt-bindings/clock/stm32fx-clock.h @@ -0,0 +1,60 @@ +/* + * stm32fx-clock.h + * + * Copyright (C) 2016 STMicroelectronics + * Author: Gabriel Fernandez for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +/* + * List of clocks wich are not derived from system clock (SYSCLOCK) + * + * The index of these clocks is the secondary index of DT bindings + * (see Documentatoin/devicetree/bindings/clock/st,stm32-rcc.txt) + * + * e.g: + ; +*/ + +#ifndef _DT_BINDINGS_CLK_STMFX_H +#define _DT_BINDINGS_CLK_STMFX_H + +#define SYSTICK 0 +#define FCLK 1 +#define CLK_LSI 2 +#define CLK_LSE 3 +#define CLK_HSE_RTC 4 +#define CLK_RTC 5 +#define PLL_VCO_I2S 6 +#define PLL_VCO_SAI 7 +#define CLK_LCD 8 +#define CLK_I2S 9 +#define CLK_SAI1 10 +#define CLK_SAI2 11 +#define CLK_I2SQ_PDIV 12 +#define CLK_SAIQ_PDIV 13 +#define CLK_HSI 14 +#define CLK_SYSCLK 15 +#define CLK_F469_DSI 16 + +#define END_PRIMARY_CLK 17 + +#define CLK_HDMI_CEC 16 +#define CLK_SPDIF 17 +#define CLK_USART1 18 +#define CLK_USART2 19 +#define CLK_USART3 20 +#define CLK_UART4 21 +#define CLK_UART5 22 +#define CLK_USART6 23 +#define CLK_UART7 24 +#define CLK_UART8 25 +#define CLK_I2C1 26 +#define CLK_I2C2 27 +#define CLK_I2C3 28 +#define CLK_I2C4 29 +#define CLK_LPTIMER 30 + +#define END_PRIMARY_CLK_F7 31 + +#endif diff --git a/include/dt-bindings/clock/stm32h7-clks.h b/include/dt-bindings/clock/stm32h7-clks.h new file mode 100644 index 000000000..6637272b3 --- /dev/null +++ b/include/dt-bindings/clock/stm32h7-clks.h @@ -0,0 +1,165 @@ +/* SYS, CORE AND BUS CLOCKS */ +#define SYS_D1CPRE 0 +#define HCLK 1 +#define PCLK1 2 +#define PCLK2 3 +#define PCLK3 4 +#define PCLK4 5 +#define HSI_DIV 6 +#define HSE_1M 7 +#define I2S_CKIN 8 +#define CK_DSI_PHY 9 +#define HSE_CK 10 +#define LSE_CK 11 +#define CSI_KER_DIV122 12 +#define RTC_CK 13 +#define CPU_SYSTICK 14 + +/* OSCILLATOR BANK */ +#define OSC_BANK 18 +#define HSI_CK 18 +#define HSI_KER_CK 19 +#define CSI_CK 20 +#define CSI_KER_CK 21 +#define RC48_CK 22 +#define LSI_CK 23 + +/* MCLOCK BANK */ +#define MCLK_BANK 28 +#define PER_CK 28 +#define PLLSRC 29 +#define SYS_CK 30 +#define TRACEIN_CK 31 + +/* ODF BANK */ +#define ODF_BANK 32 +#define PLL1_P 32 +#define PLL1_Q 33 +#define PLL1_R 34 +#define PLL2_P 35 +#define PLL2_Q 36 +#define PLL2_R 37 +#define PLL3_P 38 +#define PLL3_Q 39 +#define PLL3_R 40 + +/* MCO BANK */ +#define MCO_BANK 41 +#define MCO1 41 +#define MCO2 42 + +/* PERIF BANK */ +#define PERIF_BANK 50 +#define D1SRAM1_CK 50 +#define ITCM_CK 51 +#define DTCM2_CK 52 +#define DTCM1_CK 53 +#define FLITF_CK 54 +#define JPGDEC_CK 55 +#define DMA2D_CK 56 +#define MDMA_CK 57 +#define USB2ULPI_CK 58 +#define USB1ULPI_CK 59 +#define ETH1RX_CK 60 +#define ETH1TX_CK 61 +#define ETH1MAC_CK 62 +#define ART_CK 63 +#define DMA2_CK 64 +#define DMA1_CK 65 +#define D2SRAM3_CK 66 +#define D2SRAM2_CK 67 +#define D2SRAM1_CK 68 +#define HASH_CK 69 +#define CRYPT_CK 70 +#define CAMITF_CK 71 +#define BKPRAM_CK 72 +#define HSEM_CK 73 +#define BDMA_CK 74 +#define CRC_CK 75 +#define GPIOK_CK 76 +#define GPIOJ_CK 77 +#define GPIOI_CK 78 +#define GPIOH_CK 79 +#define GPIOG_CK 80 +#define GPIOF_CK 81 +#define GPIOE_CK 82 +#define GPIOD_CK 83 +#define GPIOC_CK 84 +#define GPIOB_CK 85 +#define GPIOA_CK 86 +#define WWDG1_CK 87 +#define DAC12_CK 88 +#define WWDG2_CK 89 +#define TIM14_CK 90 +#define TIM13_CK 91 +#define TIM12_CK 92 +#define TIM7_CK 93 +#define TIM6_CK 94 +#define TIM5_CK 95 +#define TIM4_CK 96 +#define TIM3_CK 97 +#define TIM2_CK 98 +#define MDIOS_CK 99 +#define OPAMP_CK 100 +#define CRS_CK 101 +#define TIM17_CK 102 +#define TIM16_CK 103 +#define TIM15_CK 104 +#define TIM8_CK 105 +#define TIM1_CK 106 +#define TMPSENS_CK 107 +#define RTCAPB_CK 108 +#define VREF_CK 109 +#define COMP12_CK 110 +#define SYSCFG_CK 111 + +/* KERNEL BANK */ +#define KERN_BANK 120 +#define SDMMC1_CK 120 +#define QUADSPI_CK 121 +#define FMC_CK 122 +#define USB2OTG_CK 123 +#define USB1OTG_CK 124 +#define ADC12_CK 125 +#define SDMMC2_CK 126 +#define RNG_CK 127 +#define ADC3_CK 128 +#define DSI_CK 129 +#define LTDC_CK 130 +#define USART8_CK 131 +#define USART7_CK 132 +#define HDMICEC_CK 133 +#define I2C3_CK 134 +#define I2C2_CK 135 +#define I2C1_CK 136 +#define UART5_CK 137 +#define UART4_CK 138 +#define USART3_CK 139 +#define USART2_CK 140 +#define SPDIFRX_CK 141 +#define SPI3_CK 142 +#define SPI2_CK 143 +#define LPTIM1_CK 144 +#define FDCAN_CK 145 +#define SWP_CK 146 +#define HRTIM_CK 147 +#define DFSDM1_CK 148 +#define SAI3_CK 149 +#define SAI2_CK 150 +#define SAI1_CK 151 +#define SPI5_CK 152 +#define SPI4_CK 153 +#define SPI1_CK 154 +#define USART6_CK 155 +#define USART1_CK 156 +#define SAI4B_CK 157 +#define SAI4A_CK 158 +#define LPTIM5_CK 159 +#define LPTIM4_CK 160 +#define LPTIM3_CK 161 +#define LPTIM2_CK 162 +#define I2C4_CK 163 +#define SPI6_CK 164 +#define LPUART1_CK 165 + +#define STM32H7_MAX_CLKS 166 diff --git a/include/dt-bindings/clock/stm32mp1-clks.h b/include/dt-bindings/clock/stm32mp1-clks.h new file mode 100644 index 000000000..90ec780bf --- /dev/null +++ b/include/dt-bindings/clock/stm32mp1-clks.h @@ -0,0 +1,254 @@ +/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* + * Copyright (C) STMicroelectronics 2018 - All Rights Reserved + * Author: Gabriel Fernandez for STMicroelectronics. + */ + +#ifndef _DT_BINDINGS_STM32MP1_CLKS_H_ +#define _DT_BINDINGS_STM32MP1_CLKS_H_ + +/* OSCILLATOR clocks */ +#define CK_HSE 0 +#define CK_CSI 1 +#define CK_LSI 2 +#define CK_LSE 3 +#define CK_HSI 4 +#define CK_HSE_DIV2 5 + +/* Bus clocks */ +#define TIM2 6 +#define TIM3 7 +#define TIM4 8 +#define TIM5 9 +#define TIM6 10 +#define TIM7 11 +#define TIM12 12 +#define TIM13 13 +#define TIM14 14 +#define LPTIM1 15 +#define SPI2 16 +#define SPI3 17 +#define USART2 18 +#define USART3 19 +#define UART4 20 +#define UART5 21 +#define UART7 22 +#define UART8 23 +#define I2C1 24 +#define I2C2 25 +#define I2C3 26 +#define I2C5 27 +#define SPDIF 28 +#define CEC 29 +#define DAC12 30 +#define MDIO 31 +#define TIM1 32 +#define TIM8 33 +#define TIM15 34 +#define TIM16 35 +#define TIM17 36 +#define SPI1 37 +#define SPI4 38 +#define SPI5 39 +#define USART6 40 +#define SAI1 41 +#define SAI2 42 +#define SAI3 43 +#define DFSDM 44 +#define FDCAN 45 +#define LPTIM2 46 +#define LPTIM3 47 +#define LPTIM4 48 +#define LPTIM5 49 +#define SAI4 50 +#define SYSCFG 51 +#define VREF 52 +#define TMPSENS 53 +#define PMBCTRL 54 +#define HDP 55 +#define LTDC 56 +#define DSI 57 +#define IWDG2 58 +#define USBPHY 59 +#define STGENRO 60 +#define SPI6 61 +#define I2C4 62 +#define I2C6 63 +#define USART1 64 +#define RTCAPB 65 +#define TZC1 66 +#define TZPC 67 +#define IWDG1 68 +#define BSEC 69 +#define STGEN 70 +#define DMA1 71 +#define DMA2 72 +#define DMAMUX 73 +#define ADC12 74 +#define USBO 75 +#define SDMMC3 76 +#define DCMI 77 +#define CRYP2 78 +#define HASH2 79 +#define RNG2 80 +#define CRC2 81 +#define HSEM 82 +#define IPCC 83 +#define GPIOA 84 +#define GPIOB 85 +#define GPIOC 86 +#define GPIOD 87 +#define GPIOE 88 +#define GPIOF 89 +#define GPIOG 90 +#define GPIOH 91 +#define GPIOI 92 +#define GPIOJ 93 +#define GPIOK 94 +#define GPIOZ 95 +#define CRYP1 96 +#define HASH1 97 +#define RNG1 98 +#define BKPSRAM 99 +#define MDMA 100 +#define GPU 101 +#define ETHCK 102 +#define ETHTX 103 +#define ETHRX 104 +#define ETHMAC 105 +#define FMC 106 +#define QSPI 107 +#define SDMMC1 108 +#define SDMMC2 109 +#define CRC1 110 +#define USBH 111 +#define ETHSTP 112 +#define TZC2 113 + +/* Kernel clocks */ +#define SDMMC1_K 118 +#define SDMMC2_K 119 +#define SDMMC3_K 120 +#define FMC_K 121 +#define QSPI_K 122 +#define ETHCK_K 123 +#define RNG1_K 124 +#define RNG2_K 125 +#define GPU_K 126 +#define USBPHY_K 127 +#define STGEN_K 128 +#define SPDIF_K 129 +#define SPI1_K 130 +#define SPI2_K 131 +#define SPI3_K 132 +#define SPI4_K 133 +#define SPI5_K 134 +#define SPI6_K 135 +#define CEC_K 136 +#define I2C1_K 137 +#define I2C2_K 138 +#define I2C3_K 139 +#define I2C4_K 140 +#define I2C5_K 141 +#define I2C6_K 142 +#define LPTIM1_K 143 +#define LPTIM2_K 144 +#define LPTIM3_K 145 +#define LPTIM4_K 146 +#define LPTIM5_K 147 +#define USART1_K 148 +#define USART2_K 149 +#define USART3_K 150 +#define UART4_K 151 +#define UART5_K 152 +#define USART6_K 153 +#define UART7_K 154 +#define UART8_K 155 +#define DFSDM_K 156 +#define FDCAN_K 157 +#define SAI1_K 158 +#define SAI2_K 159 +#define SAI3_K 160 +#define SAI4_K 161 +#define ADC12_K 162 +#define DSI_K 163 +#define DSI_PX 164 +#define ADFSDM_K 165 +#define USBO_K 166 +#define LTDC_PX 167 +#define DAC12_K 168 +#define ETHPTP_K 169 + +/* PLL */ +#define PLL1 176 +#define PLL2 177 +#define PLL3 178 +#define PLL4 179 + +/* ODF */ +#define PLL1_P 180 +#define PLL1_Q 181 +#define PLL1_R 182 +#define PLL2_P 183 +#define PLL2_Q 184 +#define PLL2_R 185 +#define PLL3_P 186 +#define PLL3_Q 187 +#define PLL3_R 188 +#define PLL4_P 189 +#define PLL4_Q 190 +#define PLL4_R 191 + +/* AUX */ +#define RTC 192 + +/* MCLK */ +#define CK_PER 193 +#define CK_MPU 194 +#define CK_AXI 195 +#define CK_MCU 196 + +/* Time base */ +#define TIM2_K 197 +#define TIM3_K 198 +#define TIM4_K 199 +#define TIM5_K 200 +#define TIM6_K 201 +#define TIM7_K 202 +#define TIM12_K 203 +#define TIM13_K 204 +#define TIM14_K 205 +#define TIM1_K 206 +#define TIM8_K 207 +#define TIM15_K 208 +#define TIM16_K 209 +#define TIM17_K 210 + +/* MCO clocks */ +#define CK_MCO1 211 +#define CK_MCO2 212 + +/* TRACE & DEBUG clocks */ +#define CK_DBG 214 +#define CK_TRACE 215 + +/* DDR */ +#define DDRC1 220 +#define DDRC1LP 221 +#define DDRC2 222 +#define DDRC2LP 223 +#define DDRPHYC 224 +#define DDRPHYCLP 225 +#define DDRCAPB 226 +#define DDRCAPBLP 227 +#define AXIDCG 228 +#define DDRPHYCAPB 229 +#define DDRPHYCAPBLP 230 +#define DDRPERFM 231 + +#define STM32MP1_LAST_CLK 232 + +#define LTDC_K LTDC_PX +#define ETHMAC_K ETHCK_K + +#endif /* _DT_BINDINGS_STM32MP1_CLKS_H_ */ diff --git a/include/dt-bindings/clock/stratix10-clock.h b/include/dt-bindings/clock/stratix10-clock.h new file mode 100644 index 000000000..0ac1c90a1 --- /dev/null +++ b/include/dt-bindings/clock/stratix10-clock.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2017, Intel Corporation + */ + +#ifndef __STRATIX10_CLOCK_H +#define __STRATIX10_CLOCK_H + +/* fixed rate clocks */ +#define STRATIX10_OSC1 0 +#define STRATIX10_CB_INTOSC_HS_DIV2_CLK 1 +#define STRATIX10_CB_INTOSC_LS_CLK 2 +#define STRATIX10_F2S_FREE_CLK 3 + +/* fixed factor clocks */ +#define STRATIX10_L4_SYS_FREE_CLK 4 +#define STRATIX10_MPU_PERIPH_CLK 5 +#define STRATIX10_MPU_L2RAM_CLK 6 +#define STRATIX10_SDMMC_CIU_CLK 7 + +/* PLL clocks */ +#define STRATIX10_MAIN_PLL_CLK 8 +#define STRATIX10_PERIPH_PLL_CLK 9 +#define STRATIX10_BOOT_CLK 10 + +/* Periph clocks */ +#define STRATIX10_MAIN_MPU_BASE_CLK 11 +#define STRATIX10_MAIN_NOC_BASE_CLK 12 +#define STRATIX10_MAIN_EMACA_CLK 13 +#define STRATIX10_MAIN_EMACB_CLK 14 +#define STRATIX10_MAIN_EMAC_PTP_CLK 15 +#define STRATIX10_MAIN_GPIO_DB_CLK 16 +#define STRATIX10_MAIN_SDMMC_CLK 17 +#define STRATIX10_MAIN_S2F_USR0_CLK 18 +#define STRATIX10_MAIN_S2F_USR1_CLK 19 +#define STRATIX10_MAIN_PSI_REF_CLK 20 + +#define STRATIX10_PERI_MPU_BASE_CLK 21 +#define STRATIX10_PERI_NOC_BASE_CLK 22 +#define STRATIX10_PERI_EMACA_CLK 23 +#define STRATIX10_PERI_EMACB_CLK 24 +#define STRATIX10_PERI_EMAC_PTP_CLK 25 +#define STRATIX10_PERI_GPIO_DB_CLK 26 +#define STRATIX10_PERI_SDMMC_CLK 27 +#define STRATIX10_PERI_S2F_USR0_CLK 28 +#define STRATIX10_PERI_S2F_USR1_CLK 29 +#define STRATIX10_PERI_PSI_REF_CLK 30 + +#define STRATIX10_MPU_FREE_CLK 31 +#define STRATIX10_NOC_FREE_CLK 32 +#define STRATIX10_S2F_USR0_CLK 33 +#define STRATIX10_NOC_CLK 34 +#define STRATIX10_EMAC_A_FREE_CLK 35 +#define STRATIX10_EMAC_B_FREE_CLK 36 +#define STRATIX10_EMAC_PTP_FREE_CLK 37 +#define STRATIX10_GPIO_DB_FREE_CLK 38 +#define STRATIX10_SDMMC_FREE_CLK 39 +#define STRATIX10_S2F_USER1_FREE_CLK 40 +#define STRATIX10_PSI_REF_FREE_CLK 41 + +/* Gate clocks */ +#define STRATIX10_MPU_CLK 42 +#define STRATIX10_L4_MAIN_CLK 43 +#define STRATIX10_L4_MP_CLK 44 +#define STRATIX10_L4_SP_CLK 45 +#define STRATIX10_CS_AT_CLK 46 +#define STRATIX10_CS_TRACE_CLK 47 +#define STRATIX10_CS_PDBG_CLK 48 +#define STRATIX10_CS_TIMER_CLK 49 +#define STRATIX10_S2F_USER0_CLK 50 +#define STRATIX10_S2F_USER1_CLK 51 +#define STRATIX10_EMAC0_CLK 52 +#define STRATIX10_EMAC1_CLK 53 +#define STRATIX10_EMAC2_CLK 54 +#define STRATIX10_EMAC_PTP_CLK 55 +#define STRATIX10_GPIO_DB_CLK 56 +#define STRATIX10_SDMMC_CLK 57 +#define STRATIX10_PSI_REF_CLK 58 +#define STRATIX10_USB_CLK 59 +#define STRATIX10_SPI_M_CLK 60 +#define STRATIX10_NAND_CLK 61 +#define STRATIX10_NUM_CLKS 62 + +#endif /* __STRATIX10_CLOCK_H */ diff --git a/include/dt-bindings/clock/sun4i-a10-ccu.h b/include/dt-bindings/clock/sun4i-a10-ccu.h new file mode 100644 index 000000000..e4fa61be5 --- /dev/null +++ b/include/dt-bindings/clock/sun4i-a10-ccu.h @@ -0,0 +1,202 @@ +/* + * Copyright (C) 2017 Priit Laes + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLK_SUN4I_A10_H_ +#define _DT_BINDINGS_CLK_SUN4I_A10_H_ + +#define CLK_HOSC 1 +#define CLK_PLL_VIDEO0_2X 9 +#define CLK_PLL_VIDEO1_2X 18 +#define CLK_CPU 20 + +/* AHB Gates */ +#define CLK_AHB_OTG 26 +#define CLK_AHB_EHCI0 27 +#define CLK_AHB_OHCI0 28 +#define CLK_AHB_EHCI1 29 +#define CLK_AHB_OHCI1 30 +#define CLK_AHB_SS 31 +#define CLK_AHB_DMA 32 +#define CLK_AHB_BIST 33 +#define CLK_AHB_MMC0 34 +#define CLK_AHB_MMC1 35 +#define CLK_AHB_MMC2 36 +#define CLK_AHB_MMC3 37 +#define CLK_AHB_MS 38 +#define CLK_AHB_NAND 39 +#define CLK_AHB_SDRAM 40 +#define CLK_AHB_ACE 41 +#define CLK_AHB_EMAC 42 +#define CLK_AHB_TS 43 +#define CLK_AHB_SPI0 44 +#define CLK_AHB_SPI1 45 +#define CLK_AHB_SPI2 46 +#define CLK_AHB_SPI3 47 +#define CLK_AHB_PATA 48 +#define CLK_AHB_SATA 49 +#define CLK_AHB_GPS 50 +#define CLK_AHB_HSTIMER 51 +#define CLK_AHB_VE 52 +#define CLK_AHB_TVD 53 +#define CLK_AHB_TVE0 54 +#define CLK_AHB_TVE1 55 +#define CLK_AHB_LCD0 56 +#define CLK_AHB_LCD1 57 +#define CLK_AHB_CSI0 58 +#define CLK_AHB_CSI1 59 +#define CLK_AHB_HDMI0 60 +#define CLK_AHB_HDMI1 61 +#define CLK_AHB_DE_BE0 62 +#define CLK_AHB_DE_BE1 63 +#define CLK_AHB_DE_FE0 64 +#define CLK_AHB_DE_FE1 65 +#define CLK_AHB_GMAC 66 +#define CLK_AHB_MP 67 +#define CLK_AHB_GPU 68 + +/* APB0 Gates */ +#define CLK_APB0_CODEC 69 +#define CLK_APB0_SPDIF 70 +#define CLK_APB0_I2S0 71 +#define CLK_APB0_AC97 72 +#define CLK_APB0_I2S1 73 +#define CLK_APB0_PIO 74 +#define CLK_APB0_IR0 75 +#define CLK_APB0_IR1 76 +#define CLK_APB0_I2S2 77 +#define CLK_APB0_KEYPAD 78 + +/* APB1 Gates */ +#define CLK_APB1_I2C0 79 +#define CLK_APB1_I2C1 80 +#define CLK_APB1_I2C2 81 +#define CLK_APB1_I2C3 82 +#define CLK_APB1_CAN 83 +#define CLK_APB1_SCR 84 +#define CLK_APB1_PS20 85 +#define CLK_APB1_PS21 86 +#define CLK_APB1_I2C4 87 +#define CLK_APB1_UART0 88 +#define CLK_APB1_UART1 89 +#define CLK_APB1_UART2 90 +#define CLK_APB1_UART3 91 +#define CLK_APB1_UART4 92 +#define CLK_APB1_UART5 93 +#define CLK_APB1_UART6 94 +#define CLK_APB1_UART7 95 + +/* IP clocks */ +#define CLK_NAND 96 +#define CLK_MS 97 +#define CLK_MMC0 98 +#define CLK_MMC0_OUTPUT 99 +#define CLK_MMC0_SAMPLE 100 +#define CLK_MMC1 101 +#define CLK_MMC1_OUTPUT 102 +#define CLK_MMC1_SAMPLE 103 +#define CLK_MMC2 104 +#define CLK_MMC2_OUTPUT 105 +#define CLK_MMC2_SAMPLE 106 +#define CLK_MMC3 107 +#define CLK_MMC3_OUTPUT 108 +#define CLK_MMC3_SAMPLE 109 +#define CLK_TS 110 +#define CLK_SS 111 +#define CLK_SPI0 112 +#define CLK_SPI1 113 +#define CLK_SPI2 114 +#define CLK_PATA 115 +#define CLK_IR0 116 +#define CLK_IR1 117 +#define CLK_I2S0 118 +#define CLK_AC97 119 +#define CLK_SPDIF 120 +#define CLK_KEYPAD 121 +#define CLK_SATA 122 +#define CLK_USB_OHCI0 123 +#define CLK_USB_OHCI1 124 +#define CLK_USB_PHY 125 +#define CLK_GPS 126 +#define CLK_SPI3 127 +#define CLK_I2S1 128 +#define CLK_I2S2 129 + +/* DRAM Gates */ +#define CLK_DRAM_VE 130 +#define CLK_DRAM_CSI0 131 +#define CLK_DRAM_CSI1 132 +#define CLK_DRAM_TS 133 +#define CLK_DRAM_TVD 134 +#define CLK_DRAM_TVE0 135 +#define CLK_DRAM_TVE1 136 +#define CLK_DRAM_OUT 137 +#define CLK_DRAM_DE_FE1 138 +#define CLK_DRAM_DE_FE0 139 +#define CLK_DRAM_DE_BE0 140 +#define CLK_DRAM_DE_BE1 141 +#define CLK_DRAM_MP 142 +#define CLK_DRAM_ACE 143 + +/* Display Engine Clocks */ +#define CLK_DE_BE0 144 +#define CLK_DE_BE1 145 +#define CLK_DE_FE0 146 +#define CLK_DE_FE1 147 +#define CLK_DE_MP 148 +#define CLK_TCON0_CH0 149 +#define CLK_TCON1_CH0 150 +#define CLK_CSI_SCLK 151 +#define CLK_TVD_SCLK2 152 +#define CLK_TVD 153 +#define CLK_TCON0_CH1_SCLK2 154 +#define CLK_TCON0_CH1 155 +#define CLK_TCON1_CH1_SCLK2 156 +#define CLK_TCON1_CH1 157 +#define CLK_CSI0 158 +#define CLK_CSI1 159 +#define CLK_CODEC 160 +#define CLK_VE 161 +#define CLK_AVS 162 +#define CLK_ACE 163 +#define CLK_HDMI 164 +#define CLK_GPU 165 + +#endif /* _DT_BINDINGS_CLK_SUN4I_A10_H_ */ diff --git a/include/dt-bindings/clock/sun4i-a10-pll2.h b/include/dt-bindings/clock/sun4i-a10-pll2.h new file mode 100644 index 000000000..071c8112d --- /dev/null +++ b/include/dt-bindings/clock/sun4i-a10-pll2.h @@ -0,0 +1,53 @@ +/* + * Copyright 2015 Maxime Ripard + * + * Maxime Ripard + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __DT_BINDINGS_CLOCK_SUN4I_A10_PLL2_H_ +#define __DT_BINDINGS_CLOCK_SUN4I_A10_PLL2_H_ + +#define SUN4I_A10_PLL2_1X 0 +#define SUN4I_A10_PLL2_2X 1 +#define SUN4I_A10_PLL2_4X 2 +#define SUN4I_A10_PLL2_8X 3 + +#endif /* __DT_BINDINGS_CLOCK_SUN4I_A10_PLL2_H_ */ diff --git a/include/dt-bindings/clock/sun50i-a64-ccu.h b/include/dt-bindings/clock/sun50i-a64-ccu.h new file mode 100644 index 000000000..d66432c6e --- /dev/null +++ b/include/dt-bindings/clock/sun50i-a64-ccu.h @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2016 Maxime Ripard + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLK_SUN50I_A64_H_ +#define _DT_BINDINGS_CLK_SUN50I_A64_H_ + +#define CLK_PLL_PERIPH0 11 + +#define CLK_BUS_MIPI_DSI 28 +#define CLK_BUS_CE 29 +#define CLK_BUS_DMA 30 +#define CLK_BUS_MMC0 31 +#define CLK_BUS_MMC1 32 +#define CLK_BUS_MMC2 33 +#define CLK_BUS_NAND 34 +#define CLK_BUS_DRAM 35 +#define CLK_BUS_EMAC 36 +#define CLK_BUS_TS 37 +#define CLK_BUS_HSTIMER 38 +#define CLK_BUS_SPI0 39 +#define CLK_BUS_SPI1 40 +#define CLK_BUS_OTG 41 +#define CLK_BUS_EHCI0 42 +#define CLK_BUS_EHCI1 43 +#define CLK_BUS_OHCI0 44 +#define CLK_BUS_OHCI1 45 +#define CLK_BUS_VE 46 +#define CLK_BUS_TCON0 47 +#define CLK_BUS_TCON1 48 +#define CLK_BUS_DEINTERLACE 49 +#define CLK_BUS_CSI 50 +#define CLK_BUS_HDMI 51 +#define CLK_BUS_DE 52 +#define CLK_BUS_GPU 53 +#define CLK_BUS_MSGBOX 54 +#define CLK_BUS_SPINLOCK 55 +#define CLK_BUS_CODEC 56 +#define CLK_BUS_SPDIF 57 +#define CLK_BUS_PIO 58 +#define CLK_BUS_THS 59 +#define CLK_BUS_I2S0 60 +#define CLK_BUS_I2S1 61 +#define CLK_BUS_I2S2 62 +#define CLK_BUS_I2C0 63 +#define CLK_BUS_I2C1 64 +#define CLK_BUS_I2C2 65 +#define CLK_BUS_SCR 66 +#define CLK_BUS_UART0 67 +#define CLK_BUS_UART1 68 +#define CLK_BUS_UART2 69 +#define CLK_BUS_UART3 70 +#define CLK_BUS_UART4 71 +#define CLK_BUS_DBG 72 +#define CLK_THS 73 +#define CLK_NAND 74 +#define CLK_MMC0 75 +#define CLK_MMC1 76 +#define CLK_MMC2 77 +#define CLK_TS 78 +#define CLK_CE 79 +#define CLK_SPI0 80 +#define CLK_SPI1 81 +#define CLK_I2S0 82 +#define CLK_I2S1 83 +#define CLK_I2S2 84 +#define CLK_SPDIF 85 +#define CLK_USB_PHY0 86 +#define CLK_USB_PHY1 87 +#define CLK_USB_HSIC 88 +#define CLK_USB_HSIC_12M 89 + +#define CLK_USB_OHCI0 91 + +#define CLK_USB_OHCI1 93 + +#define CLK_DRAM_VE 95 +#define CLK_DRAM_CSI 96 +#define CLK_DRAM_DEINTERLACE 97 +#define CLK_DRAM_TS 98 +#define CLK_DE 99 +#define CLK_TCON0 100 +#define CLK_TCON1 101 +#define CLK_DEINTERLACE 102 +#define CLK_CSI_MISC 103 +#define CLK_CSI_SCLK 104 +#define CLK_CSI_MCLK 105 +#define CLK_VE 106 +#define CLK_AC_DIG 107 +#define CLK_AC_DIG_4X 108 +#define CLK_AVS 109 +#define CLK_HDMI 110 +#define CLK_HDMI_DDC 111 + +#define CLK_DSI_DPHY 113 +#define CLK_GPU 114 + +#endif /* _DT_BINDINGS_CLK_SUN50I_H_ */ diff --git a/include/dt-bindings/clock/sun50i-h6-ccu.h b/include/dt-bindings/clock/sun50i-h6-ccu.h new file mode 100644 index 000000000..a1545cd60 --- /dev/null +++ b/include/dt-bindings/clock/sun50i-h6-ccu.h @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: (GPL-2.0+ or MIT) +/* + * Copyright (C) 2017 Icenowy Zheng + */ + +#ifndef _DT_BINDINGS_CLK_SUN50I_H6_H_ +#define _DT_BINDINGS_CLK_SUN50I_H6_H_ + +#define CLK_PLL_PERIPH0 3 + +#define CLK_CPUX 21 + +#define CLK_APB1 26 + +#define CLK_DE 29 +#define CLK_BUS_DE 30 +#define CLK_DEINTERLACE 31 +#define CLK_BUS_DEINTERLACE 32 +#define CLK_GPU 33 +#define CLK_BUS_GPU 34 +#define CLK_CE 35 +#define CLK_BUS_CE 36 +#define CLK_VE 37 +#define CLK_BUS_VE 38 +#define CLK_EMCE 39 +#define CLK_BUS_EMCE 40 +#define CLK_VP9 41 +#define CLK_BUS_VP9 42 +#define CLK_BUS_DMA 43 +#define CLK_BUS_MSGBOX 44 +#define CLK_BUS_SPINLOCK 45 +#define CLK_BUS_HSTIMER 46 +#define CLK_AVS 47 +#define CLK_BUS_DBG 48 +#define CLK_BUS_PSI 49 +#define CLK_BUS_PWM 50 +#define CLK_BUS_IOMMU 51 + +#define CLK_MBUS_DMA 53 +#define CLK_MBUS_VE 54 +#define CLK_MBUS_CE 55 +#define CLK_MBUS_TS 56 +#define CLK_MBUS_NAND 57 +#define CLK_MBUS_CSI 58 +#define CLK_MBUS_DEINTERLACE 59 + +#define CLK_NAND0 61 +#define CLK_NAND1 62 +#define CLK_BUS_NAND 63 +#define CLK_MMC0 64 +#define CLK_MMC1 65 +#define CLK_MMC2 66 +#define CLK_BUS_MMC0 67 +#define CLK_BUS_MMC1 68 +#define CLK_BUS_MMC2 69 +#define CLK_BUS_UART0 70 +#define CLK_BUS_UART1 71 +#define CLK_BUS_UART2 72 +#define CLK_BUS_UART3 73 +#define CLK_BUS_I2C0 74 +#define CLK_BUS_I2C1 75 +#define CLK_BUS_I2C2 76 +#define CLK_BUS_I2C3 77 +#define CLK_BUS_SCR0 78 +#define CLK_BUS_SCR1 79 +#define CLK_SPI0 80 +#define CLK_SPI1 81 +#define CLK_BUS_SPI0 82 +#define CLK_BUS_SPI1 83 +#define CLK_BUS_EMAC 84 +#define CLK_TS 85 +#define CLK_BUS_TS 86 +#define CLK_IR_TX 87 +#define CLK_BUS_IR_TX 88 +#define CLK_BUS_THS 89 +#define CLK_I2S3 90 +#define CLK_I2S0 91 +#define CLK_I2S1 92 +#define CLK_I2S2 93 +#define CLK_BUS_I2S0 94 +#define CLK_BUS_I2S1 95 +#define CLK_BUS_I2S2 96 +#define CLK_BUS_I2S3 97 +#define CLK_SPDIF 98 +#define CLK_BUS_SPDIF 99 +#define CLK_DMIC 100 +#define CLK_BUS_DMIC 101 +#define CLK_AUDIO_HUB 102 +#define CLK_BUS_AUDIO_HUB 103 +#define CLK_USB_OHCI0 104 +#define CLK_USB_PHY0 105 +#define CLK_USB_PHY1 106 +#define CLK_USB_OHCI3 107 +#define CLK_USB_PHY3 108 +#define CLK_USB_HSIC_12M 109 +#define CLK_USB_HSIC 110 +#define CLK_BUS_OHCI0 111 +#define CLK_BUS_OHCI3 112 +#define CLK_BUS_EHCI0 113 +#define CLK_BUS_XHCI 114 +#define CLK_BUS_EHCI3 115 +#define CLK_BUS_OTG 116 +#define CLK_PCIE_REF_100M 117 +#define CLK_PCIE_REF 118 +#define CLK_PCIE_REF_OUT 119 +#define CLK_PCIE_MAXI 120 +#define CLK_PCIE_AUX 121 +#define CLK_BUS_PCIE 122 +#define CLK_HDMI 123 +#define CLK_HDMI_SLOW 124 +#define CLK_HDMI_CEC 125 +#define CLK_BUS_HDMI 126 +#define CLK_BUS_TCON_TOP 127 +#define CLK_TCON_LCD0 128 +#define CLK_BUS_TCON_LCD0 129 +#define CLK_TCON_TV0 130 +#define CLK_BUS_TCON_TV0 131 +#define CLK_CSI_CCI 132 +#define CLK_CSI_TOP 133 +#define CLK_CSI_MCLK 134 +#define CLK_BUS_CSI 135 +#define CLK_HDCP 136 +#define CLK_BUS_HDCP 137 + +#endif /* _DT_BINDINGS_CLK_SUN50I_H6_H_ */ diff --git a/include/dt-bindings/clock/sun50i-h6-r-ccu.h b/include/dt-bindings/clock/sun50i-h6-r-ccu.h new file mode 100644 index 000000000..76136132a --- /dev/null +++ b/include/dt-bindings/clock/sun50i-h6-r-ccu.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017 Icenowy Zheng + */ + +#ifndef _DT_BINDINGS_CLK_SUN50I_H6_R_CCU_H_ +#define _DT_BINDINGS_CLK_SUN50I_H6_R_CCU_H_ + +#define CLK_AR100 0 + +#define CLK_R_APB1 2 + +#define CLK_R_APB1_TIMER 4 +#define CLK_R_APB1_TWD 5 +#define CLK_R_APB1_PWM 6 +#define CLK_R_APB2_UART 7 +#define CLK_R_APB2_I2C 8 +#define CLK_R_APB1_IR 9 +#define CLK_R_APB1_W1 10 + +#define CLK_IR 11 +#define CLK_W1 12 + +#endif /* _DT_BINDINGS_CLK_SUN50I_H6_R_CCU_H_ */ diff --git a/include/dt-bindings/clock/sun5i-ccu.h b/include/dt-bindings/clock/sun5i-ccu.h new file mode 100644 index 000000000..81f34d477 --- /dev/null +++ b/include/dt-bindings/clock/sun5i-ccu.h @@ -0,0 +1,106 @@ +/* + * Copyright 2016 Maxime Ripard + * + * Maxime Ripard + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_SUN5I_H_ +#define _DT_BINDINGS_CLK_SUN5I_H_ + +#define CLK_HOSC 1 + +#define CLK_PLL_VIDEO0_2X 9 + +#define CLK_PLL_VIDEO1_2X 16 +#define CLK_CPU 17 + +#define CLK_AHB_OTG 23 +#define CLK_AHB_EHCI 24 +#define CLK_AHB_OHCI 25 +#define CLK_AHB_SS 26 +#define CLK_AHB_DMA 27 +#define CLK_AHB_BIST 28 +#define CLK_AHB_MMC0 29 +#define CLK_AHB_MMC1 30 +#define CLK_AHB_MMC2 31 +#define CLK_AHB_NAND 32 +#define CLK_AHB_SDRAM 33 +#define CLK_AHB_EMAC 34 +#define CLK_AHB_TS 35 +#define CLK_AHB_SPI0 36 +#define CLK_AHB_SPI1 37 +#define CLK_AHB_SPI2 38 +#define CLK_AHB_GPS 39 +#define CLK_AHB_HSTIMER 40 +#define CLK_AHB_VE 41 +#define CLK_AHB_TVE 42 +#define CLK_AHB_LCD 43 +#define CLK_AHB_CSI 44 +#define CLK_AHB_HDMI 45 +#define CLK_AHB_DE_BE 46 +#define CLK_AHB_DE_FE 47 +#define CLK_AHB_IEP 48 +#define CLK_AHB_GPU 49 +#define CLK_APB0_CODEC 50 +#define CLK_APB0_SPDIF 51 +#define CLK_APB0_I2S 52 +#define CLK_APB0_PIO 53 +#define CLK_APB0_IR 54 +#define CLK_APB0_KEYPAD 55 +#define CLK_APB1_I2C0 56 +#define CLK_APB1_I2C1 57 +#define CLK_APB1_I2C2 58 +#define CLK_APB1_UART0 59 +#define CLK_APB1_UART1 60 +#define CLK_APB1_UART2 61 +#define CLK_APB1_UART3 62 +#define CLK_NAND 63 +#define CLK_MMC0 64 +#define CLK_MMC1 65 +#define CLK_MMC2 66 +#define CLK_TS 67 +#define CLK_SS 68 +#define CLK_SPI0 69 +#define CLK_SPI1 70 +#define CLK_SPI2 71 +#define CLK_IR 72 +#define CLK_I2S 73 +#define CLK_SPDIF 74 +#define CLK_KEYPAD 75 +#define CLK_USB_OHCI 76 +#define CLK_USB_PHY0 77 +#define CLK_USB_PHY1 78 +#define CLK_GPS 79 +#define CLK_DRAM_VE 80 +#define CLK_DRAM_CSI 81 +#define CLK_DRAM_TS 82 +#define CLK_DRAM_TVE 83 +#define CLK_DRAM_DE_FE 84 +#define CLK_DRAM_DE_BE 85 +#define CLK_DRAM_ACE 86 +#define CLK_DRAM_IEP 87 +#define CLK_DE_BE 88 +#define CLK_DE_FE 89 +#define CLK_TCON_CH0 90 + +#define CLK_TCON_CH1 92 +#define CLK_CSI 93 +#define CLK_VE 94 +#define CLK_CODEC 95 +#define CLK_AVS 96 +#define CLK_HDMI 97 +#define CLK_GPU 98 + +#define CLK_IEP 100 + +#endif /* _DT_BINDINGS_CLK_SUN5I_H_ */ diff --git a/include/dt-bindings/clock/sun6i-a31-ccu.h b/include/dt-bindings/clock/sun6i-a31-ccu.h new file mode 100644 index 000000000..c5d133401 --- /dev/null +++ b/include/dt-bindings/clock/sun6i-a31-ccu.h @@ -0,0 +1,191 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLK_SUN6I_A31_H_ +#define _DT_BINDINGS_CLK_SUN6I_A31_H_ + +#define CLK_PLL_VIDEO0_2X 7 + +#define CLK_PLL_PERIPH 10 + +#define CLK_PLL_VIDEO1_2X 13 + +#define CLK_CPU 18 + +#define CLK_AHB1_MIPIDSI 23 +#define CLK_AHB1_SS 24 +#define CLK_AHB1_DMA 25 +#define CLK_AHB1_MMC0 26 +#define CLK_AHB1_MMC1 27 +#define CLK_AHB1_MMC2 28 +#define CLK_AHB1_MMC3 29 +#define CLK_AHB1_NAND1 30 +#define CLK_AHB1_NAND0 31 +#define CLK_AHB1_SDRAM 32 +#define CLK_AHB1_EMAC 33 +#define CLK_AHB1_TS 34 +#define CLK_AHB1_HSTIMER 35 +#define CLK_AHB1_SPI0 36 +#define CLK_AHB1_SPI1 37 +#define CLK_AHB1_SPI2 38 +#define CLK_AHB1_SPI3 39 +#define CLK_AHB1_OTG 40 +#define CLK_AHB1_EHCI0 41 +#define CLK_AHB1_EHCI1 42 +#define CLK_AHB1_OHCI0 43 +#define CLK_AHB1_OHCI1 44 +#define CLK_AHB1_OHCI2 45 +#define CLK_AHB1_VE 46 +#define CLK_AHB1_LCD0 47 +#define CLK_AHB1_LCD1 48 +#define CLK_AHB1_CSI 49 +#define CLK_AHB1_HDMI 50 +#define CLK_AHB1_BE0 51 +#define CLK_AHB1_BE1 52 +#define CLK_AHB1_FE0 53 +#define CLK_AHB1_FE1 54 +#define CLK_AHB1_MP 55 +#define CLK_AHB1_GPU 56 +#define CLK_AHB1_DEU0 57 +#define CLK_AHB1_DEU1 58 +#define CLK_AHB1_DRC0 59 +#define CLK_AHB1_DRC1 60 + +#define CLK_APB1_CODEC 61 +#define CLK_APB1_SPDIF 62 +#define CLK_APB1_DIGITAL_MIC 63 +#define CLK_APB1_PIO 64 +#define CLK_APB1_DAUDIO0 65 +#define CLK_APB1_DAUDIO1 66 + +#define CLK_APB2_I2C0 67 +#define CLK_APB2_I2C1 68 +#define CLK_APB2_I2C2 69 +#define CLK_APB2_I2C3 70 +#define CLK_APB2_UART0 71 +#define CLK_APB2_UART1 72 +#define CLK_APB2_UART2 73 +#define CLK_APB2_UART3 74 +#define CLK_APB2_UART4 75 +#define CLK_APB2_UART5 76 + +#define CLK_NAND0 77 +#define CLK_NAND1 78 +#define CLK_MMC0 79 +#define CLK_MMC0_SAMPLE 80 +#define CLK_MMC0_OUTPUT 81 +#define CLK_MMC1 82 +#define CLK_MMC1_SAMPLE 83 +#define CLK_MMC1_OUTPUT 84 +#define CLK_MMC2 85 +#define CLK_MMC2_SAMPLE 86 +#define CLK_MMC2_OUTPUT 87 +#define CLK_MMC3 88 +#define CLK_MMC3_SAMPLE 89 +#define CLK_MMC3_OUTPUT 90 +#define CLK_TS 91 +#define CLK_SS 92 +#define CLK_SPI0 93 +#define CLK_SPI1 94 +#define CLK_SPI2 95 +#define CLK_SPI3 96 +#define CLK_DAUDIO0 97 +#define CLK_DAUDIO1 98 +#define CLK_SPDIF 99 +#define CLK_USB_PHY0 100 +#define CLK_USB_PHY1 101 +#define CLK_USB_PHY2 102 +#define CLK_USB_OHCI0 103 +#define CLK_USB_OHCI1 104 +#define CLK_USB_OHCI2 105 + +#define CLK_DRAM_VE 110 +#define CLK_DRAM_CSI_ISP 111 +#define CLK_DRAM_TS 112 +#define CLK_DRAM_DRC0 113 +#define CLK_DRAM_DRC1 114 +#define CLK_DRAM_DEU0 115 +#define CLK_DRAM_DEU1 116 +#define CLK_DRAM_FE0 117 +#define CLK_DRAM_FE1 118 +#define CLK_DRAM_BE0 119 +#define CLK_DRAM_BE1 120 +#define CLK_DRAM_MP 121 + +#define CLK_BE0 122 +#define CLK_BE1 123 +#define CLK_FE0 124 +#define CLK_FE1 125 +#define CLK_MP 126 +#define CLK_LCD0_CH0 127 +#define CLK_LCD1_CH0 128 +#define CLK_LCD0_CH1 129 +#define CLK_LCD1_CH1 130 +#define CLK_CSI0_SCLK 131 +#define CLK_CSI0_MCLK 132 +#define CLK_CSI1_MCLK 133 +#define CLK_VE 134 +#define CLK_CODEC 135 +#define CLK_AVS 136 +#define CLK_DIGITAL_MIC 137 +#define CLK_HDMI 138 +#define CLK_HDMI_DDC 139 +#define CLK_PS 140 + +#define CLK_MIPI_DSI 143 +#define CLK_MIPI_DSI_DPHY 144 +#define CLK_MIPI_CSI_DPHY 145 +#define CLK_IEP_DRC0 146 +#define CLK_IEP_DRC1 147 +#define CLK_IEP_DEU0 148 +#define CLK_IEP_DEU1 149 +#define CLK_GPU_CORE 150 +#define CLK_GPU_MEMORY 151 +#define CLK_GPU_HYD 152 +#define CLK_ATS 153 +#define CLK_TRACE 154 + +#define CLK_OUT_A 155 +#define CLK_OUT_B 156 +#define CLK_OUT_C 157 + +#endif /* _DT_BINDINGS_CLK_SUN6I_A31_H_ */ diff --git a/include/dt-bindings/clock/sun7i-a20-ccu.h b/include/dt-bindings/clock/sun7i-a20-ccu.h new file mode 100644 index 000000000..045a5178d --- /dev/null +++ b/include/dt-bindings/clock/sun7i-a20-ccu.h @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2017 Priit Laes + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLK_SUN7I_A20_H_ +#define _DT_BINDINGS_CLK_SUN7I_A20_H_ + +#include + +#define CLK_MBUS 166 +#define CLK_HDMI1_SLOW 167 +#define CLK_HDMI1 168 +#define CLK_OUT_A 169 +#define CLK_OUT_B 170 + +#endif /* _DT_BINDINGS_CLK_SUN7I_A20_H_ */ diff --git a/include/dt-bindings/clock/sun8i-a23-a33-ccu.h b/include/dt-bindings/clock/sun8i-a23-a33-ccu.h new file mode 100644 index 000000000..f8222b6b2 --- /dev/null +++ b/include/dt-bindings/clock/sun8i-a23-a33-ccu.h @@ -0,0 +1,127 @@ +/* + * Copyright (C) 2016 Maxime Ripard + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLK_SUN8I_A23_A33_H_ +#define _DT_BINDINGS_CLK_SUN8I_A23_A33_H_ + +#define CLK_CPUX 18 + +#define CLK_BUS_MIPI_DSI 23 +#define CLK_BUS_SS 24 +#define CLK_BUS_DMA 25 +#define CLK_BUS_MMC0 26 +#define CLK_BUS_MMC1 27 +#define CLK_BUS_MMC2 28 +#define CLK_BUS_NAND 29 +#define CLK_BUS_DRAM 30 +#define CLK_BUS_HSTIMER 31 +#define CLK_BUS_SPI0 32 +#define CLK_BUS_SPI1 33 +#define CLK_BUS_OTG 34 +#define CLK_BUS_EHCI 35 +#define CLK_BUS_OHCI 36 +#define CLK_BUS_VE 37 +#define CLK_BUS_LCD 38 +#define CLK_BUS_CSI 39 +#define CLK_BUS_DE_BE 40 +#define CLK_BUS_DE_FE 41 +#define CLK_BUS_GPU 42 +#define CLK_BUS_MSGBOX 43 +#define CLK_BUS_SPINLOCK 44 +#define CLK_BUS_DRC 45 +#define CLK_BUS_SAT 46 +#define CLK_BUS_CODEC 47 +#define CLK_BUS_PIO 48 +#define CLK_BUS_I2S0 49 +#define CLK_BUS_I2S1 50 +#define CLK_BUS_I2C0 51 +#define CLK_BUS_I2C1 52 +#define CLK_BUS_I2C2 53 +#define CLK_BUS_UART0 54 +#define CLK_BUS_UART1 55 +#define CLK_BUS_UART2 56 +#define CLK_BUS_UART3 57 +#define CLK_BUS_UART4 58 +#define CLK_NAND 59 +#define CLK_MMC0 60 +#define CLK_MMC0_SAMPLE 61 +#define CLK_MMC0_OUTPUT 62 +#define CLK_MMC1 63 +#define CLK_MMC1_SAMPLE 64 +#define CLK_MMC1_OUTPUT 65 +#define CLK_MMC2 66 +#define CLK_MMC2_SAMPLE 67 +#define CLK_MMC2_OUTPUT 68 +#define CLK_SS 69 +#define CLK_SPI0 70 +#define CLK_SPI1 71 +#define CLK_I2S0 72 +#define CLK_I2S1 73 +#define CLK_USB_PHY0 74 +#define CLK_USB_PHY1 75 +#define CLK_USB_HSIC 76 +#define CLK_USB_HSIC_12M 77 +#define CLK_USB_OHCI 78 + +#define CLK_DRAM_VE 80 +#define CLK_DRAM_CSI 81 +#define CLK_DRAM_DRC 82 +#define CLK_DRAM_DE_FE 83 +#define CLK_DRAM_DE_BE 84 +#define CLK_DE_BE 85 +#define CLK_DE_FE 86 +#define CLK_LCD_CH0 87 +#define CLK_LCD_CH1 88 +#define CLK_CSI_SCLK 89 +#define CLK_CSI_MCLK 90 +#define CLK_VE 91 +#define CLK_AC_DIG 92 +#define CLK_AC_DIG_4X 93 +#define CLK_AVS 94 + +#define CLK_DSI_SCLK 96 +#define CLK_DSI_DPHY 97 +#define CLK_DRC 98 +#define CLK_GPU 99 +#define CLK_ATS 100 + +#endif /* _DT_BINDINGS_CLK_SUN8I_A23_A33_H_ */ diff --git a/include/dt-bindings/clock/sun8i-a83t-ccu.h b/include/dt-bindings/clock/sun8i-a83t-ccu.h new file mode 100644 index 000000000..78af5085f --- /dev/null +++ b/include/dt-bindings/clock/sun8i-a83t-ccu.h @@ -0,0 +1,140 @@ +/* + * Copyright (C) 2017 Chen-Yu Tsai + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLOCK_SUN8I_A83T_CCU_H_ +#define _DT_BINDINGS_CLOCK_SUN8I_A83T_CCU_H_ + +#define CLK_PLL_PERIPH 6 + +#define CLK_PLL_DE 9 + +#define CLK_C0CPUX 11 +#define CLK_C1CPUX 12 + +#define CLK_BUS_MIPI_DSI 19 +#define CLK_BUS_SS 20 +#define CLK_BUS_DMA 21 +#define CLK_BUS_MMC0 22 +#define CLK_BUS_MMC1 23 +#define CLK_BUS_MMC2 24 +#define CLK_BUS_NAND 25 +#define CLK_BUS_DRAM 26 +#define CLK_BUS_EMAC 27 +#define CLK_BUS_HSTIMER 28 +#define CLK_BUS_SPI0 29 +#define CLK_BUS_SPI1 30 +#define CLK_BUS_OTG 31 +#define CLK_BUS_EHCI0 32 +#define CLK_BUS_EHCI1 33 +#define CLK_BUS_OHCI0 34 + +#define CLK_BUS_VE 35 +#define CLK_BUS_TCON0 36 +#define CLK_BUS_TCON1 37 +#define CLK_BUS_CSI 38 +#define CLK_BUS_HDMI 39 +#define CLK_BUS_DE 40 +#define CLK_BUS_GPU 41 +#define CLK_BUS_MSGBOX 42 +#define CLK_BUS_SPINLOCK 43 + +#define CLK_BUS_SPDIF 44 +#define CLK_BUS_PIO 45 +#define CLK_BUS_I2S0 46 +#define CLK_BUS_I2S1 47 +#define CLK_BUS_I2S2 48 +#define CLK_BUS_TDM 49 + +#define CLK_BUS_I2C0 50 +#define CLK_BUS_I2C1 51 +#define CLK_BUS_I2C2 52 +#define CLK_BUS_UART0 53 +#define CLK_BUS_UART1 54 +#define CLK_BUS_UART2 55 +#define CLK_BUS_UART3 56 +#define CLK_BUS_UART4 57 + +#define CLK_NAND 59 +#define CLK_MMC0 60 +#define CLK_MMC0_SAMPLE 61 +#define CLK_MMC0_OUTPUT 62 +#define CLK_MMC1 63 +#define CLK_MMC1_SAMPLE 64 +#define CLK_MMC1_OUTPUT 65 +#define CLK_MMC2 66 +#define CLK_MMC2_SAMPLE 67 +#define CLK_MMC2_OUTPUT 68 +#define CLK_SS 69 +#define CLK_SPI0 70 +#define CLK_SPI1 71 +#define CLK_I2S0 72 +#define CLK_I2S1 73 +#define CLK_I2S2 74 +#define CLK_TDM 75 +#define CLK_SPDIF 76 +#define CLK_USB_PHY0 77 +#define CLK_USB_PHY1 78 +#define CLK_USB_HSIC 79 +#define CLK_USB_HSIC_12M 80 +#define CLK_USB_OHCI0 81 + +#define CLK_DRAM_VE 83 +#define CLK_DRAM_CSI 84 + +#define CLK_TCON0 85 +#define CLK_TCON1 86 +#define CLK_CSI_MISC 87 +#define CLK_MIPI_CSI 88 +#define CLK_CSI_MCLK 89 +#define CLK_CSI_SCLK 90 +#define CLK_VE 91 +#define CLK_AVS 92 +#define CLK_HDMI 93 +#define CLK_HDMI_SLOW 94 + +#define CLK_MIPI_DSI0 96 +#define CLK_MIPI_DSI1 97 +#define CLK_GPU_CORE 98 +#define CLK_GPU_MEMORY 99 +#define CLK_GPU_HYD 100 + +#endif /* _DT_BINDINGS_CLOCK_SUN8I_A83T_CCU_H_ */ diff --git a/include/dt-bindings/clock/sun8i-de2.h b/include/dt-bindings/clock/sun8i-de2.h new file mode 100644 index 000000000..3bed63b52 --- /dev/null +++ b/include/dt-bindings/clock/sun8i-de2.h @@ -0,0 +1,18 @@ +/* + * Copyright (C) 2016 Icenowy Zheng + * + * SPDX-License-Identifier: (GPL-2.0+ OR MIT) + */ + +#ifndef _DT_BINDINGS_CLOCK_SUN8I_DE2_H_ +#define _DT_BINDINGS_CLOCK_SUN8I_DE2_H_ + +#define CLK_BUS_MIXER0 0 +#define CLK_BUS_MIXER1 1 +#define CLK_BUS_WB 2 + +#define CLK_MIXER0 6 +#define CLK_MIXER1 7 +#define CLK_WB 8 + +#endif /* _DT_BINDINGS_CLOCK_SUN8I_DE2_H_ */ diff --git a/include/dt-bindings/clock/sun8i-h3-ccu.h b/include/dt-bindings/clock/sun8i-h3-ccu.h new file mode 100644 index 000000000..c5f7e9a70 --- /dev/null +++ b/include/dt-bindings/clock/sun8i-h3-ccu.h @@ -0,0 +1,152 @@ +/* + * Copyright (C) 2016 Maxime Ripard + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLK_SUN8I_H3_H_ +#define _DT_BINDINGS_CLK_SUN8I_H3_H_ + +#define CLK_PLL_VIDEO 6 + +#define CLK_PLL_PERIPH0 9 + +#define CLK_CPUX 14 + +#define CLK_BUS_CE 20 +#define CLK_BUS_DMA 21 +#define CLK_BUS_MMC0 22 +#define CLK_BUS_MMC1 23 +#define CLK_BUS_MMC2 24 +#define CLK_BUS_NAND 25 +#define CLK_BUS_DRAM 26 +#define CLK_BUS_EMAC 27 +#define CLK_BUS_TS 28 +#define CLK_BUS_HSTIMER 29 +#define CLK_BUS_SPI0 30 +#define CLK_BUS_SPI1 31 +#define CLK_BUS_OTG 32 +#define CLK_BUS_EHCI0 33 +#define CLK_BUS_EHCI1 34 +#define CLK_BUS_EHCI2 35 +#define CLK_BUS_EHCI3 36 +#define CLK_BUS_OHCI0 37 +#define CLK_BUS_OHCI1 38 +#define CLK_BUS_OHCI2 39 +#define CLK_BUS_OHCI3 40 +#define CLK_BUS_VE 41 +#define CLK_BUS_TCON0 42 +#define CLK_BUS_TCON1 43 +#define CLK_BUS_DEINTERLACE 44 +#define CLK_BUS_CSI 45 +#define CLK_BUS_TVE 46 +#define CLK_BUS_HDMI 47 +#define CLK_BUS_DE 48 +#define CLK_BUS_GPU 49 +#define CLK_BUS_MSGBOX 50 +#define CLK_BUS_SPINLOCK 51 +#define CLK_BUS_CODEC 52 +#define CLK_BUS_SPDIF 53 +#define CLK_BUS_PIO 54 +#define CLK_BUS_THS 55 +#define CLK_BUS_I2S0 56 +#define CLK_BUS_I2S1 57 +#define CLK_BUS_I2S2 58 +#define CLK_BUS_I2C0 59 +#define CLK_BUS_I2C1 60 +#define CLK_BUS_I2C2 61 +#define CLK_BUS_UART0 62 +#define CLK_BUS_UART1 63 +#define CLK_BUS_UART2 64 +#define CLK_BUS_UART3 65 +#define CLK_BUS_SCR0 66 +#define CLK_BUS_EPHY 67 +#define CLK_BUS_DBG 68 + +#define CLK_THS 69 +#define CLK_NAND 70 +#define CLK_MMC0 71 +#define CLK_MMC0_SAMPLE 72 +#define CLK_MMC0_OUTPUT 73 +#define CLK_MMC1 74 +#define CLK_MMC1_SAMPLE 75 +#define CLK_MMC1_OUTPUT 76 +#define CLK_MMC2 77 +#define CLK_MMC2_SAMPLE 78 +#define CLK_MMC2_OUTPUT 79 +#define CLK_TS 80 +#define CLK_CE 81 +#define CLK_SPI0 82 +#define CLK_SPI1 83 +#define CLK_I2S0 84 +#define CLK_I2S1 85 +#define CLK_I2S2 86 +#define CLK_SPDIF 87 +#define CLK_USB_PHY0 88 +#define CLK_USB_PHY1 89 +#define CLK_USB_PHY2 90 +#define CLK_USB_PHY3 91 +#define CLK_USB_OHCI0 92 +#define CLK_USB_OHCI1 93 +#define CLK_USB_OHCI2 94 +#define CLK_USB_OHCI3 95 + +#define CLK_DRAM_VE 97 +#define CLK_DRAM_CSI 98 +#define CLK_DRAM_DEINTERLACE 99 +#define CLK_DRAM_TS 100 +#define CLK_DE 101 +#define CLK_TCON0 102 +#define CLK_TVE 103 +#define CLK_DEINTERLACE 104 +#define CLK_CSI_MISC 105 +#define CLK_CSI_SCLK 106 +#define CLK_CSI_MCLK 107 +#define CLK_VE 108 +#define CLK_AC_DIG 109 +#define CLK_AVS 110 +#define CLK_HDMI 111 +#define CLK_HDMI_DDC 112 + +#define CLK_GPU 114 + +/* New clocks imported in H5 */ +#define CLK_BUS_SCR1 115 + +#endif /* _DT_BINDINGS_CLK_SUN8I_H3_H_ */ diff --git a/include/dt-bindings/clock/sun8i-r-ccu.h b/include/dt-bindings/clock/sun8i-r-ccu.h new file mode 100644 index 000000000..779d20aa0 --- /dev/null +++ b/include/dt-bindings/clock/sun8i-r-ccu.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2016 Icenowy Zheng + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLK_SUN8I_R_CCU_H_ +#define _DT_BINDINGS_CLK_SUN8I_R_CCU_H_ + +#define CLK_AR100 0 + +#define CLK_APB0_PIO 3 +#define CLK_APB0_IR 4 +#define CLK_APB0_TIMER 5 +#define CLK_APB0_RSB 6 +#define CLK_APB0_UART 7 +/* 8 is reserved for CLK_APB0_W1 on A31 */ +#define CLK_APB0_I2C 9 +#define CLK_APB0_TWD 10 + +#define CLK_IR 11 + +#endif /* _DT_BINDINGS_CLK_SUN8I_R_CCU_H_ */ diff --git a/include/dt-bindings/clock/sun8i-r40-ccu.h b/include/dt-bindings/clock/sun8i-r40-ccu.h new file mode 100644 index 000000000..f9e15a235 --- /dev/null +++ b/include/dt-bindings/clock/sun8i-r40-ccu.h @@ -0,0 +1,191 @@ +/* + * Copyright (C) 2017 Icenowy Zheng + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLK_SUN8I_R40_H_ +#define _DT_BINDINGS_CLK_SUN8I_R40_H_ + +#define CLK_PLL_VIDEO0 7 + +#define CLK_PLL_VIDEO1 16 + +#define CLK_CPU 24 + +#define CLK_BUS_MIPI_DSI 29 +#define CLK_BUS_CE 30 +#define CLK_BUS_DMA 31 +#define CLK_BUS_MMC0 32 +#define CLK_BUS_MMC1 33 +#define CLK_BUS_MMC2 34 +#define CLK_BUS_MMC3 35 +#define CLK_BUS_NAND 36 +#define CLK_BUS_DRAM 37 +#define CLK_BUS_EMAC 38 +#define CLK_BUS_TS 39 +#define CLK_BUS_HSTIMER 40 +#define CLK_BUS_SPI0 41 +#define CLK_BUS_SPI1 42 +#define CLK_BUS_SPI2 43 +#define CLK_BUS_SPI3 44 +#define CLK_BUS_SATA 45 +#define CLK_BUS_OTG 46 +#define CLK_BUS_EHCI0 47 +#define CLK_BUS_EHCI1 48 +#define CLK_BUS_EHCI2 49 +#define CLK_BUS_OHCI0 50 +#define CLK_BUS_OHCI1 51 +#define CLK_BUS_OHCI2 52 +#define CLK_BUS_VE 53 +#define CLK_BUS_MP 54 +#define CLK_BUS_DEINTERLACE 55 +#define CLK_BUS_CSI0 56 +#define CLK_BUS_CSI1 57 +#define CLK_BUS_HDMI1 58 +#define CLK_BUS_HDMI0 59 +#define CLK_BUS_DE 60 +#define CLK_BUS_TVE0 61 +#define CLK_BUS_TVE1 62 +#define CLK_BUS_TVE_TOP 63 +#define CLK_BUS_GMAC 64 +#define CLK_BUS_GPU 65 +#define CLK_BUS_TVD0 66 +#define CLK_BUS_TVD1 67 +#define CLK_BUS_TVD2 68 +#define CLK_BUS_TVD3 69 +#define CLK_BUS_TVD_TOP 70 +#define CLK_BUS_TCON_LCD0 71 +#define CLK_BUS_TCON_LCD1 72 +#define CLK_BUS_TCON_TV0 73 +#define CLK_BUS_TCON_TV1 74 +#define CLK_BUS_TCON_TOP 75 +#define CLK_BUS_CODEC 76 +#define CLK_BUS_SPDIF 77 +#define CLK_BUS_AC97 78 +#define CLK_BUS_PIO 79 +#define CLK_BUS_IR0 80 +#define CLK_BUS_IR1 81 +#define CLK_BUS_THS 82 +#define CLK_BUS_KEYPAD 83 +#define CLK_BUS_I2S0 84 +#define CLK_BUS_I2S1 85 +#define CLK_BUS_I2S2 86 +#define CLK_BUS_I2C0 87 +#define CLK_BUS_I2C1 88 +#define CLK_BUS_I2C2 89 +#define CLK_BUS_I2C3 90 +#define CLK_BUS_CAN 91 +#define CLK_BUS_SCR 92 +#define CLK_BUS_PS20 93 +#define CLK_BUS_PS21 94 +#define CLK_BUS_I2C4 95 +#define CLK_BUS_UART0 96 +#define CLK_BUS_UART1 97 +#define CLK_BUS_UART2 98 +#define CLK_BUS_UART3 99 +#define CLK_BUS_UART4 100 +#define CLK_BUS_UART5 101 +#define CLK_BUS_UART6 102 +#define CLK_BUS_UART7 103 +#define CLK_BUS_DBG 104 + +#define CLK_THS 105 +#define CLK_NAND 106 +#define CLK_MMC0 107 +#define CLK_MMC1 108 +#define CLK_MMC2 109 +#define CLK_MMC3 110 +#define CLK_TS 111 +#define CLK_CE 112 +#define CLK_SPI0 113 +#define CLK_SPI1 114 +#define CLK_SPI2 115 +#define CLK_SPI3 116 +#define CLK_I2S0 117 +#define CLK_I2S1 118 +#define CLK_I2S2 119 +#define CLK_AC97 120 +#define CLK_SPDIF 121 +#define CLK_KEYPAD 122 +#define CLK_SATA 123 +#define CLK_USB_PHY0 124 +#define CLK_USB_PHY1 125 +#define CLK_USB_PHY2 126 +#define CLK_USB_OHCI0 127 +#define CLK_USB_OHCI1 128 +#define CLK_USB_OHCI2 129 +#define CLK_IR0 130 +#define CLK_IR1 131 + +#define CLK_DRAM_VE 133 +#define CLK_DRAM_CSI0 134 +#define CLK_DRAM_CSI1 135 +#define CLK_DRAM_TS 136 +#define CLK_DRAM_TVD 137 +#define CLK_DRAM_MP 138 +#define CLK_DRAM_DEINTERLACE 139 +#define CLK_DE 140 +#define CLK_MP 141 +#define CLK_TCON_LCD0 142 +#define CLK_TCON_LCD1 143 +#define CLK_TCON_TV0 144 +#define CLK_TCON_TV1 145 +#define CLK_DEINTERLACE 146 +#define CLK_CSI1_MCLK 147 +#define CLK_CSI_SCLK 148 +#define CLK_CSI0_MCLK 149 +#define CLK_VE 150 +#define CLK_CODEC 151 +#define CLK_AVS 152 +#define CLK_HDMI 153 +#define CLK_HDMI_SLOW 154 + +#define CLK_DSI_DPHY 156 +#define CLK_TVE0 157 +#define CLK_TVE1 158 +#define CLK_TVD0 159 +#define CLK_TVD1 160 +#define CLK_TVD2 161 +#define CLK_TVD3 162 +#define CLK_GPU 163 +#define CLK_OUTA 164 +#define CLK_OUTB 165 + +#endif /* _DT_BINDINGS_CLK_SUN8I_R40_H_ */ diff --git a/include/dt-bindings/clock/sun8i-tcon-top.h b/include/dt-bindings/clock/sun8i-tcon-top.h new file mode 100644 index 000000000..25164d767 --- /dev/null +++ b/include/dt-bindings/clock/sun8i-tcon-top.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */ +/* Copyright (C) 2018 Jernej Skrabec */ + +#ifndef _DT_BINDINGS_CLOCK_SUN8I_TCON_TOP_H_ +#define _DT_BINDINGS_CLOCK_SUN8I_TCON_TOP_H_ + +#define CLK_TCON_TOP_TV0 0 +#define CLK_TCON_TOP_TV1 1 +#define CLK_TCON_TOP_DSI 2 + +#endif /* _DT_BINDINGS_CLOCK_SUN8I_TCON_TOP_H_ */ diff --git a/include/dt-bindings/clock/sun8i-v3s-ccu.h b/include/dt-bindings/clock/sun8i-v3s-ccu.h new file mode 100644 index 000000000..c0d5d5599 --- /dev/null +++ b/include/dt-bindings/clock/sun8i-v3s-ccu.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2016 Icenowy Zheng + * + * Based on sun8i-h3-ccu.h, which is: + * Copyright (C) 2016 Maxime Ripard + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLK_SUN8I_V3S_H_ +#define _DT_BINDINGS_CLK_SUN8I_V3S_H_ + +#define CLK_CPU 14 + +#define CLK_BUS_CE 20 +#define CLK_BUS_DMA 21 +#define CLK_BUS_MMC0 22 +#define CLK_BUS_MMC1 23 +#define CLK_BUS_MMC2 24 +#define CLK_BUS_DRAM 25 +#define CLK_BUS_EMAC 26 +#define CLK_BUS_HSTIMER 27 +#define CLK_BUS_SPI0 28 +#define CLK_BUS_OTG 29 +#define CLK_BUS_EHCI0 30 +#define CLK_BUS_OHCI0 31 +#define CLK_BUS_VE 32 +#define CLK_BUS_TCON0 33 +#define CLK_BUS_CSI 34 +#define CLK_BUS_DE 35 +#define CLK_BUS_CODEC 36 +#define CLK_BUS_PIO 37 +#define CLK_BUS_I2C0 38 +#define CLK_BUS_I2C1 39 +#define CLK_BUS_UART0 40 +#define CLK_BUS_UART1 41 +#define CLK_BUS_UART2 42 +#define CLK_BUS_EPHY 43 +#define CLK_BUS_DBG 44 + +#define CLK_MMC0 45 +#define CLK_MMC0_SAMPLE 46 +#define CLK_MMC0_OUTPUT 47 +#define CLK_MMC1 48 +#define CLK_MMC1_SAMPLE 49 +#define CLK_MMC1_OUTPUT 50 +#define CLK_MMC2 51 +#define CLK_MMC2_SAMPLE 52 +#define CLK_MMC2_OUTPUT 53 +#define CLK_CE 54 +#define CLK_SPI0 55 +#define CLK_USB_PHY0 56 +#define CLK_USB_OHCI0 57 + +#define CLK_DRAM_VE 59 +#define CLK_DRAM_CSI 60 +#define CLK_DRAM_EHCI 61 +#define CLK_DRAM_OHCI 62 +#define CLK_DE 63 +#define CLK_TCON0 64 +#define CLK_CSI_MISC 65 +#define CLK_CSI0_MCLK 66 +#define CLK_CSI1_SCLK 67 +#define CLK_CSI1_MCLK 68 +#define CLK_VE 69 +#define CLK_AC_DIG 70 +#define CLK_AVS 71 + +#define CLK_MIPI_CSI 73 + +#endif /* _DT_BINDINGS_CLK_SUN8I_V3S_H_ */ diff --git a/include/dt-bindings/clock/sun9i-a80-ccu.h b/include/dt-bindings/clock/sun9i-a80-ccu.h new file mode 100644 index 000000000..6ea1492a7 --- /dev/null +++ b/include/dt-bindings/clock/sun9i-a80-ccu.h @@ -0,0 +1,162 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLOCK_SUN9I_A80_CCU_H_ +#define _DT_BINDINGS_CLOCK_SUN9I_A80_CCU_H_ + +#define CLK_PLL_AUDIO 2 +#define CLK_PLL_PERIPH0 3 + +#define CLK_C0CPUX 12 +#define CLK_C1CPUX 13 + +#define CLK_OUT_A 27 +#define CLK_OUT_B 28 + +#define CLK_NAND0_0 29 +#define CLK_NAND0_1 30 +#define CLK_NAND1_0 31 +#define CLK_NAND1_1 32 +#define CLK_MMC0 33 +#define CLK_MMC0_SAMPLE 34 +#define CLK_MMC0_OUTPUT 35 +#define CLK_MMC1 36 +#define CLK_MMC1_SAMPLE 37 +#define CLK_MMC1_OUTPUT 38 +#define CLK_MMC2 39 +#define CLK_MMC2_SAMPLE 40 +#define CLK_MMC2_OUTPUT 41 +#define CLK_MMC3 42 +#define CLK_MMC3_SAMPLE 43 +#define CLK_MMC3_OUTPUT 44 +#define CLK_TS 45 +#define CLK_SS 46 +#define CLK_SPI0 47 +#define CLK_SPI1 48 +#define CLK_SPI2 49 +#define CLK_SPI3 50 +#define CLK_I2S0 51 +#define CLK_I2S1 52 +#define CLK_SPDIF 53 +#define CLK_SDRAM 54 +#define CLK_DE 55 +#define CLK_EDP 56 +#define CLK_MP 57 +#define CLK_LCD0 58 +#define CLK_LCD1 59 +#define CLK_MIPI_DSI0 60 +#define CLK_MIPI_DSI1 61 +#define CLK_HDMI 62 +#define CLK_HDMI_SLOW 63 +#define CLK_MIPI_CSI 64 +#define CLK_CSI_ISP 65 +#define CLK_CSI_MISC 66 +#define CLK_CSI0_MCLK 67 +#define CLK_CSI1_MCLK 68 +#define CLK_FD 69 +#define CLK_VE 70 +#define CLK_AVS 71 +#define CLK_GPU_CORE 72 +#define CLK_GPU_MEMORY 73 +#define CLK_GPU_AXI 74 +#define CLK_SATA 75 +#define CLK_AC97 76 +#define CLK_MIPI_HSI 77 +#define CLK_GPADC 78 +#define CLK_CIR_TX 79 + +#define CLK_BUS_FD 80 +#define CLK_BUS_VE 81 +#define CLK_BUS_GPU_CTRL 82 +#define CLK_BUS_SS 83 +#define CLK_BUS_MMC 84 +#define CLK_BUS_NAND0 85 +#define CLK_BUS_NAND1 86 +#define CLK_BUS_SDRAM 87 +#define CLK_BUS_MIPI_HSI 88 +#define CLK_BUS_SATA 89 +#define CLK_BUS_TS 90 +#define CLK_BUS_SPI0 91 +#define CLK_BUS_SPI1 92 +#define CLK_BUS_SPI2 93 +#define CLK_BUS_SPI3 94 + +#define CLK_BUS_OTG 95 +#define CLK_BUS_USB 96 +#define CLK_BUS_GMAC 97 +#define CLK_BUS_MSGBOX 98 +#define CLK_BUS_SPINLOCK 99 +#define CLK_BUS_HSTIMER 100 +#define CLK_BUS_DMA 101 + +#define CLK_BUS_LCD0 102 +#define CLK_BUS_LCD1 103 +#define CLK_BUS_EDP 104 +#define CLK_BUS_CSI 105 +#define CLK_BUS_HDMI 106 +#define CLK_BUS_DE 107 +#define CLK_BUS_MP 108 +#define CLK_BUS_MIPI_DSI 109 + +#define CLK_BUS_SPDIF 110 +#define CLK_BUS_PIO 111 +#define CLK_BUS_AC97 112 +#define CLK_BUS_I2S0 113 +#define CLK_BUS_I2S1 114 +#define CLK_BUS_LRADC 115 +#define CLK_BUS_GPADC 116 +#define CLK_BUS_TWD 117 +#define CLK_BUS_CIR_TX 118 + +#define CLK_BUS_I2C0 119 +#define CLK_BUS_I2C1 120 +#define CLK_BUS_I2C2 121 +#define CLK_BUS_I2C3 122 +#define CLK_BUS_I2C4 123 +#define CLK_BUS_UART0 124 +#define CLK_BUS_UART1 125 +#define CLK_BUS_UART2 126 +#define CLK_BUS_UART3 127 +#define CLK_BUS_UART4 128 +#define CLK_BUS_UART5 129 + +#endif /* _DT_BINDINGS_CLOCK_SUN9I_A80_CCU_H_ */ diff --git a/include/dt-bindings/clock/sun9i-a80-de.h b/include/dt-bindings/clock/sun9i-a80-de.h new file mode 100644 index 000000000..3dad6c3cd --- /dev/null +++ b/include/dt-bindings/clock/sun9i-a80-de.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLOCK_SUN9I_A80_DE_H_ +#define _DT_BINDINGS_CLOCK_SUN9I_A80_DE_H_ + +#define CLK_FE0 0 +#define CLK_FE1 1 +#define CLK_FE2 2 +#define CLK_IEP_DEU0 3 +#define CLK_IEP_DEU1 4 +#define CLK_BE0 5 +#define CLK_BE1 6 +#define CLK_BE2 7 +#define CLK_IEP_DRC0 8 +#define CLK_IEP_DRC1 9 +#define CLK_MERGE 10 + +#define CLK_DRAM_FE0 11 +#define CLK_DRAM_FE1 12 +#define CLK_DRAM_FE2 13 +#define CLK_DRAM_DEU0 14 +#define CLK_DRAM_DEU1 15 +#define CLK_DRAM_BE0 16 +#define CLK_DRAM_BE1 17 +#define CLK_DRAM_BE2 18 +#define CLK_DRAM_DRC0 19 +#define CLK_DRAM_DRC1 20 + +#define CLK_BUS_FE0 21 +#define CLK_BUS_FE1 22 +#define CLK_BUS_FE2 23 +#define CLK_BUS_DEU0 24 +#define CLK_BUS_DEU1 25 +#define CLK_BUS_BE0 26 +#define CLK_BUS_BE1 27 +#define CLK_BUS_BE2 28 +#define CLK_BUS_DRC0 29 +#define CLK_BUS_DRC1 30 + +#endif /* _DT_BINDINGS_CLOCK_SUN9I_A80_DE_H_ */ diff --git a/include/dt-bindings/clock/sun9i-a80-usb.h b/include/dt-bindings/clock/sun9i-a80-usb.h new file mode 100644 index 000000000..783a60d2c --- /dev/null +++ b/include/dt-bindings/clock/sun9i-a80-usb.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLOCK_SUN9I_A80_USB_H_ +#define _DT_BINDINGS_CLOCK_SUN9I_A80_USB_H_ + +#define CLK_BUS_HCI0 0 +#define CLK_USB_OHCI0 1 +#define CLK_BUS_HCI1 2 +#define CLK_BUS_HCI2 3 +#define CLK_USB_OHCI2 4 + +#define CLK_USB0_PHY 5 +#define CLK_USB1_HSIC 6 +#define CLK_USB1_PHY 7 +#define CLK_USB2_HSIC 8 +#define CLK_USB2_PHY 9 +#define CLK_USB_HSIC 10 + +#endif /* _DT_BINDINGS_CLOCK_SUN9I_A80_USB_H_ */ diff --git a/include/dt-bindings/clock/tegra114-car.h b/include/dt-bindings/clock/tegra114-car.h new file mode 100644 index 000000000..bb5c2c999 --- /dev/null +++ b/include/dt-bindings/clock/tegra114-car.h @@ -0,0 +1,344 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for binding nvidia,tegra114-car. + * + * The first 160 clocks are numbered to match the bits in the CAR's CLK_OUT_ENB + * registers. These IDs often match those in the CAR's RST_DEVICES registers, + * but not in all cases. Some bits in CLK_OUT_ENB affect multiple clocks. In + * this case, those clocks are assigned IDs above 160 in order to highlight + * this issue. Implementations that interpret these clock IDs as bit values + * within the CLK_OUT_ENB or RST_DEVICES registers should be careful to + * explicitly handle these special cases. + * + * The balance of the clocks controlled by the CAR are assigned IDs of 160 and + * above. + */ + +#ifndef _DT_BINDINGS_CLOCK_TEGRA114_CAR_H +#define _DT_BINDINGS_CLOCK_TEGRA114_CAR_H + +/* 0 */ +/* 1 */ +/* 2 */ +/* 3 */ +#define TEGRA114_CLK_RTC 4 +#define TEGRA114_CLK_TIMER 5 +#define TEGRA114_CLK_UARTA 6 +/* 7 (register bit affects uartb and vfir) */ +/* 8 */ +#define TEGRA114_CLK_SDMMC2 9 +/* 10 (register bit affects spdif_in and spdif_out) */ +#define TEGRA114_CLK_I2S1 11 +#define TEGRA114_CLK_I2C1 12 +#define TEGRA114_CLK_NDFLASH 13 +#define TEGRA114_CLK_SDMMC1 14 +#define TEGRA114_CLK_SDMMC4 15 +/* 16 */ +#define TEGRA114_CLK_PWM 17 +#define TEGRA114_CLK_I2S2 18 +#define TEGRA114_CLK_EPP 19 +/* 20 (register bit affects vi and vi_sensor) */ +#define TEGRA114_CLK_GR2D 21 +#define TEGRA114_CLK_USBD 22 +#define TEGRA114_CLK_ISP 23 +#define TEGRA114_CLK_GR3D 24 +/* 25 */ +#define TEGRA114_CLK_DISP2 26 +#define TEGRA114_CLK_DISP1 27 +#define TEGRA114_CLK_HOST1X 28 +#define TEGRA114_CLK_VCP 29 +#define TEGRA114_CLK_I2S0 30 +/* 31 */ + +#define TEGRA114_CLK_MC 32 +/* 33 */ +#define TEGRA114_CLK_APBDMA 34 +/* 35 */ +#define TEGRA114_CLK_KBC 36 +/* 37 */ +/* 38 */ +/* 39 (register bit affects fuse and fuse_burn) */ +#define TEGRA114_CLK_KFUSE 40 +#define TEGRA114_CLK_SBC1 41 +#define TEGRA114_CLK_NOR 42 +/* 43 */ +#define TEGRA114_CLK_SBC2 44 +/* 45 */ +#define TEGRA114_CLK_SBC3 46 +#define TEGRA114_CLK_I2C5 47 +#define TEGRA114_CLK_DSIA 48 +/* 49 */ +#define TEGRA114_CLK_MIPI 50 +#define TEGRA114_CLK_HDMI 51 +#define TEGRA114_CLK_CSI 52 +/* 53 */ +#define TEGRA114_CLK_I2C2 54 +#define TEGRA114_CLK_UARTC 55 +#define TEGRA114_CLK_MIPI_CAL 56 +#define TEGRA114_CLK_EMC 57 +#define TEGRA114_CLK_USB2 58 +#define TEGRA114_CLK_USB3 59 +/* 60 */ +#define TEGRA114_CLK_VDE 61 +#define TEGRA114_CLK_BSEA 62 +#define TEGRA114_CLK_BSEV 63 + +/* 64 */ +#define TEGRA114_CLK_UARTD 65 +/* 66 */ +#define TEGRA114_CLK_I2C3 67 +#define TEGRA114_CLK_SBC4 68 +#define TEGRA114_CLK_SDMMC3 69 +/* 70 */ +#define TEGRA114_CLK_OWR 71 +/* 72 */ +#define TEGRA114_CLK_CSITE 73 +/* 74 */ +/* 75 */ +#define TEGRA114_CLK_LA 76 +#define TEGRA114_CLK_TRACE 77 +#define TEGRA114_CLK_SOC_THERM 78 +#define TEGRA114_CLK_DTV 79 +#define TEGRA114_CLK_NDSPEED 80 +#define TEGRA114_CLK_I2CSLOW 81 +#define TEGRA114_CLK_DSIB 82 +#define TEGRA114_CLK_TSEC 83 +/* 84 */ +/* 85 */ +/* 86 */ +/* 87 */ +/* 88 */ +#define TEGRA114_CLK_XUSB_HOST 89 +/* 90 */ +#define TEGRA114_CLK_MSENC 91 +#define TEGRA114_CLK_CSUS 92 +/* 93 */ +/* 94 */ +/* 95 (bit affects xusb_dev and xusb_dev_src) */ + +/* 96 */ +/* 97 */ +/* 98 */ +#define TEGRA114_CLK_MSELECT 99 +#define TEGRA114_CLK_TSENSOR 100 +#define TEGRA114_CLK_I2S3 101 +#define TEGRA114_CLK_I2S4 102 +#define TEGRA114_CLK_I2C4 103 +#define TEGRA114_CLK_SBC5 104 +#define TEGRA114_CLK_SBC6 105 +#define TEGRA114_CLK_D_AUDIO 106 +#define TEGRA114_CLK_APBIF 107 +#define TEGRA114_CLK_DAM0 108 +#define TEGRA114_CLK_DAM1 109 +#define TEGRA114_CLK_DAM2 110 +#define TEGRA114_CLK_HDA2CODEC_2X 111 +/* 112 */ +#define TEGRA114_CLK_AUDIO0_2X 113 +#define TEGRA114_CLK_AUDIO1_2X 114 +#define TEGRA114_CLK_AUDIO2_2X 115 +#define TEGRA114_CLK_AUDIO3_2X 116 +#define TEGRA114_CLK_AUDIO4_2X 117 +#define TEGRA114_CLK_SPDIF_2X 118 +#define TEGRA114_CLK_ACTMON 119 +#define TEGRA114_CLK_EXTERN1 120 +#define TEGRA114_CLK_EXTERN2 121 +#define TEGRA114_CLK_EXTERN3 122 +/* 123 */ +/* 124 */ +#define TEGRA114_CLK_HDA 125 +/* 126 */ +#define TEGRA114_CLK_SE 127 + +#define TEGRA114_CLK_HDA2HDMI 128 +/* 129 */ +/* 130 */ +/* 131 */ +/* 132 */ +/* 133 */ +/* 134 */ +/* 135 */ +#define TEGRA114_CLK_CEC 136 +/* 137 */ +/* 138 */ +/* 139 */ +/* 140 */ +/* 141 */ +/* 142 */ +/* 143 (bit affects xusb_falcon_src, xusb_fs_src, */ +/* xusb_host_src and xusb_ss_src) */ +#define TEGRA114_CLK_CILAB 144 +#define TEGRA114_CLK_CILCD 145 +#define TEGRA114_CLK_CILE 146 +#define TEGRA114_CLK_DSIALP 147 +#define TEGRA114_CLK_DSIBLP 148 +/* 149 */ +#define TEGRA114_CLK_DDS 150 +/* 151 */ +#define TEGRA114_CLK_DP2 152 +#define TEGRA114_CLK_AMX 153 +#define TEGRA114_CLK_ADX 154 +/* 155 (bit affects dfll_ref and dfll_soc) */ +#define TEGRA114_CLK_XUSB_SS 156 +/* 157 */ +/* 158 */ +/* 159 */ + +/* 160 */ +/* 161 */ +/* 162 */ +/* 163 */ +/* 164 */ +/* 165 */ +/* 166 */ +/* 167 */ +/* 168 */ +/* 169 */ +/* 170 */ +/* 171 */ +/* 172 */ +/* 173 */ +/* 174 */ +/* 175 */ +/* 176 */ +/* 177 */ +/* 178 */ +/* 179 */ +/* 180 */ +/* 181 */ +/* 182 */ +/* 183 */ +/* 184 */ +/* 185 */ +/* 186 */ +/* 187 */ +/* 188 */ +/* 189 */ +/* 190 */ +/* 191 */ + +#define TEGRA114_CLK_UARTB 192 +#define TEGRA114_CLK_VFIR 193 +#define TEGRA114_CLK_SPDIF_IN 194 +#define TEGRA114_CLK_SPDIF_OUT 195 +#define TEGRA114_CLK_VI 196 +#define TEGRA114_CLK_VI_SENSOR 197 +#define TEGRA114_CLK_FUSE 198 +#define TEGRA114_CLK_FUSE_BURN 199 +#define TEGRA114_CLK_CLK_32K 200 +#define TEGRA114_CLK_CLK_M 201 +#define TEGRA114_CLK_CLK_M_DIV2 202 +#define TEGRA114_CLK_CLK_M_DIV4 203 +#define TEGRA114_CLK_PLL_REF 204 +#define TEGRA114_CLK_PLL_C 205 +#define TEGRA114_CLK_PLL_C_OUT1 206 +#define TEGRA114_CLK_PLL_C2 207 +#define TEGRA114_CLK_PLL_C3 208 +#define TEGRA114_CLK_PLL_M 209 +#define TEGRA114_CLK_PLL_M_OUT1 210 +#define TEGRA114_CLK_PLL_P 211 +#define TEGRA114_CLK_PLL_P_OUT1 212 +#define TEGRA114_CLK_PLL_P_OUT2 213 +#define TEGRA114_CLK_PLL_P_OUT3 214 +#define TEGRA114_CLK_PLL_P_OUT4 215 +#define TEGRA114_CLK_PLL_A 216 +#define TEGRA114_CLK_PLL_A_OUT0 217 +#define TEGRA114_CLK_PLL_D 218 +#define TEGRA114_CLK_PLL_D_OUT0 219 +#define TEGRA114_CLK_PLL_D2 220 +#define TEGRA114_CLK_PLL_D2_OUT0 221 +#define TEGRA114_CLK_PLL_U 222 +#define TEGRA114_CLK_PLL_U_480M 223 + +#define TEGRA114_CLK_PLL_U_60M 224 +#define TEGRA114_CLK_PLL_U_48M 225 +#define TEGRA114_CLK_PLL_U_12M 226 +#define TEGRA114_CLK_PLL_X 227 +#define TEGRA114_CLK_PLL_X_OUT0 228 +#define TEGRA114_CLK_PLL_RE_VCO 229 +#define TEGRA114_CLK_PLL_RE_OUT 230 +#define TEGRA114_CLK_PLL_E_OUT0 231 +#define TEGRA114_CLK_SPDIF_IN_SYNC 232 +#define TEGRA114_CLK_I2S0_SYNC 233 +#define TEGRA114_CLK_I2S1_SYNC 234 +#define TEGRA114_CLK_I2S2_SYNC 235 +#define TEGRA114_CLK_I2S3_SYNC 236 +#define TEGRA114_CLK_I2S4_SYNC 237 +#define TEGRA114_CLK_VIMCLK_SYNC 238 +#define TEGRA114_CLK_AUDIO0 239 +#define TEGRA114_CLK_AUDIO1 240 +#define TEGRA114_CLK_AUDIO2 241 +#define TEGRA114_CLK_AUDIO3 242 +#define TEGRA114_CLK_AUDIO4 243 +#define TEGRA114_CLK_SPDIF 244 +#define TEGRA114_CLK_CLK_OUT_1 245 +#define TEGRA114_CLK_CLK_OUT_2 246 +#define TEGRA114_CLK_CLK_OUT_3 247 +#define TEGRA114_CLK_BLINK 248 +/* 249 */ +/* 250 */ +/* 251 */ +#define TEGRA114_CLK_XUSB_HOST_SRC 252 +#define TEGRA114_CLK_XUSB_FALCON_SRC 253 +#define TEGRA114_CLK_XUSB_FS_SRC 254 +#define TEGRA114_CLK_XUSB_SS_SRC 255 + +#define TEGRA114_CLK_XUSB_DEV_SRC 256 +#define TEGRA114_CLK_XUSB_DEV 257 +#define TEGRA114_CLK_XUSB_HS_SRC 258 +#define TEGRA114_CLK_SCLK 259 +#define TEGRA114_CLK_HCLK 260 +#define TEGRA114_CLK_PCLK 261 +#define TEGRA114_CLK_CCLK_G 262 +#define TEGRA114_CLK_CCLK_LP 263 +#define TEGRA114_CLK_DFLL_REF 264 +#define TEGRA114_CLK_DFLL_SOC 265 +/* 266 */ +/* 267 */ +/* 268 */ +/* 269 */ +/* 270 */ +/* 271 */ +/* 272 */ +/* 273 */ +/* 274 */ +/* 275 */ +/* 276 */ +/* 277 */ +/* 278 */ +/* 279 */ +/* 280 */ +/* 281 */ +/* 282 */ +/* 283 */ +/* 284 */ +/* 285 */ +/* 286 */ +/* 287 */ + +/* 288 */ +/* 289 */ +/* 290 */ +/* 291 */ +/* 292 */ +/* 293 */ +/* 294 */ +/* 295 */ +/* 296 */ +/* 297 */ +/* 298 */ +/* 299 */ +#define TEGRA114_CLK_AUDIO0_MUX 300 +#define TEGRA114_CLK_AUDIO1_MUX 301 +#define TEGRA114_CLK_AUDIO2_MUX 302 +#define TEGRA114_CLK_AUDIO3_MUX 303 +#define TEGRA114_CLK_AUDIO4_MUX 304 +#define TEGRA114_CLK_SPDIF_MUX 305 +#define TEGRA114_CLK_CLK_OUT_1_MUX 306 +#define TEGRA114_CLK_CLK_OUT_2_MUX 307 +#define TEGRA114_CLK_CLK_OUT_3_MUX 308 +#define TEGRA114_CLK_DSIA_MUX 309 +#define TEGRA114_CLK_DSIB_MUX 310 +#define TEGRA114_CLK_XUSB_SS_DIV2 311 +#define TEGRA114_CLK_CLK_MAX 312 + +#endif /* _DT_BINDINGS_CLOCK_TEGRA114_CAR_H */ diff --git a/include/dt-bindings/clock/tegra124-car-common.h b/include/dt-bindings/clock/tegra124-car-common.h new file mode 100644 index 000000000..4331f1df6 --- /dev/null +++ b/include/dt-bindings/clock/tegra124-car-common.h @@ -0,0 +1,346 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for binding nvidia,tegra124-car or + * nvidia,tegra132-car. + * + * The first 192 clocks are numbered to match the bits in the CAR's CLK_OUT_ENB + * registers. These IDs often match those in the CAR's RST_DEVICES registers, + * but not in all cases. Some bits in CLK_OUT_ENB affect multiple clocks. In + * this case, those clocks are assigned IDs above 185 in order to highlight + * this issue. Implementations that interpret these clock IDs as bit values + * within the CLK_OUT_ENB or RST_DEVICES registers should be careful to + * explicitly handle these special cases. + * + * The balance of the clocks controlled by the CAR are assigned IDs of 185 and + * above. + */ + +#ifndef _DT_BINDINGS_CLOCK_TEGRA124_CAR_COMMON_H +#define _DT_BINDINGS_CLOCK_TEGRA124_CAR_COMMON_H + +/* 0 */ +/* 1 */ +/* 2 */ +#define TEGRA124_CLK_ISPB 3 +#define TEGRA124_CLK_RTC 4 +#define TEGRA124_CLK_TIMER 5 +#define TEGRA124_CLK_UARTA 6 +/* 7 (register bit affects uartb and vfir) */ +/* 8 */ +#define TEGRA124_CLK_SDMMC2 9 +/* 10 (register bit affects spdif_in and spdif_out) */ +#define TEGRA124_CLK_I2S1 11 +#define TEGRA124_CLK_I2C1 12 +/* 13 */ +#define TEGRA124_CLK_SDMMC1 14 +#define TEGRA124_CLK_SDMMC4 15 +/* 16 */ +#define TEGRA124_CLK_PWM 17 +#define TEGRA124_CLK_I2S2 18 +/* 20 (register bit affects vi and vi_sensor) */ +/* 21 */ +#define TEGRA124_CLK_USBD 22 +#define TEGRA124_CLK_ISP 23 +/* 26 */ +/* 25 */ +#define TEGRA124_CLK_DISP2 26 +#define TEGRA124_CLK_DISP1 27 +#define TEGRA124_CLK_HOST1X 28 +#define TEGRA124_CLK_VCP 29 +#define TEGRA124_CLK_I2S0 30 +/* 31 */ + +#define TEGRA124_CLK_MC 32 +/* 33 */ +#define TEGRA124_CLK_APBDMA 34 +/* 35 */ +#define TEGRA124_CLK_KBC 36 +/* 37 */ +/* 38 */ +/* 39 (register bit affects fuse and fuse_burn) */ +#define TEGRA124_CLK_KFUSE 40 +#define TEGRA124_CLK_SBC1 41 +#define TEGRA124_CLK_NOR 42 +/* 43 */ +#define TEGRA124_CLK_SBC2 44 +/* 45 */ +#define TEGRA124_CLK_SBC3 46 +#define TEGRA124_CLK_I2C5 47 +#define TEGRA124_CLK_DSIA 48 +/* 49 */ +#define TEGRA124_CLK_MIPI 50 +#define TEGRA124_CLK_HDMI 51 +#define TEGRA124_CLK_CSI 52 +/* 53 */ +#define TEGRA124_CLK_I2C2 54 +#define TEGRA124_CLK_UARTC 55 +#define TEGRA124_CLK_MIPI_CAL 56 +#define TEGRA124_CLK_EMC 57 +#define TEGRA124_CLK_USB2 58 +#define TEGRA124_CLK_USB3 59 +/* 60 */ +#define TEGRA124_CLK_VDE 61 +#define TEGRA124_CLK_BSEA 62 +#define TEGRA124_CLK_BSEV 63 + +/* 64 */ +#define TEGRA124_CLK_UARTD 65 +/* 66 */ +#define TEGRA124_CLK_I2C3 67 +#define TEGRA124_CLK_SBC4 68 +#define TEGRA124_CLK_SDMMC3 69 +#define TEGRA124_CLK_PCIE 70 +#define TEGRA124_CLK_OWR 71 +#define TEGRA124_CLK_AFI 72 +#define TEGRA124_CLK_CSITE 73 +/* 74 */ +/* 75 */ +#define TEGRA124_CLK_LA 76 +#define TEGRA124_CLK_TRACE 77 +#define TEGRA124_CLK_SOC_THERM 78 +#define TEGRA124_CLK_DTV 79 +/* 80 */ +#define TEGRA124_CLK_I2CSLOW 81 +#define TEGRA124_CLK_DSIB 82 +#define TEGRA124_CLK_TSEC 83 +/* 84 */ +/* 85 */ +/* 86 */ +/* 87 */ +/* 88 */ +#define TEGRA124_CLK_XUSB_HOST 89 +/* 90 */ +#define TEGRA124_CLK_MSENC 91 +#define TEGRA124_CLK_CSUS 92 +/* 93 */ +/* 94 */ +/* 95 (bit affects xusb_dev and xusb_dev_src) */ + +/* 96 */ +/* 97 */ +/* 98 */ +#define TEGRA124_CLK_MSELECT 99 +#define TEGRA124_CLK_TSENSOR 100 +#define TEGRA124_CLK_I2S3 101 +#define TEGRA124_CLK_I2S4 102 +#define TEGRA124_CLK_I2C4 103 +#define TEGRA124_CLK_SBC5 104 +#define TEGRA124_CLK_SBC6 105 +#define TEGRA124_CLK_D_AUDIO 106 +#define TEGRA124_CLK_APBIF 107 +#define TEGRA124_CLK_DAM0 108 +#define TEGRA124_CLK_DAM1 109 +#define TEGRA124_CLK_DAM2 110 +#define TEGRA124_CLK_HDA2CODEC_2X 111 +/* 112 */ +#define TEGRA124_CLK_AUDIO0_2X 113 +#define TEGRA124_CLK_AUDIO1_2X 114 +#define TEGRA124_CLK_AUDIO2_2X 115 +#define TEGRA124_CLK_AUDIO3_2X 116 +#define TEGRA124_CLK_AUDIO4_2X 117 +#define TEGRA124_CLK_SPDIF_2X 118 +#define TEGRA124_CLK_ACTMON 119 +#define TEGRA124_CLK_EXTERN1 120 +#define TEGRA124_CLK_EXTERN2 121 +#define TEGRA124_CLK_EXTERN3 122 +#define TEGRA124_CLK_SATA_OOB 123 +#define TEGRA124_CLK_SATA 124 +#define TEGRA124_CLK_HDA 125 +/* 126 */ +#define TEGRA124_CLK_SE 127 + +#define TEGRA124_CLK_HDA2HDMI 128 +#define TEGRA124_CLK_SATA_COLD 129 +/* 130 */ +/* 131 */ +/* 132 */ +/* 133 */ +/* 134 */ +/* 135 */ +#define TEGRA124_CLK_CEC 136 +/* 137 */ +/* 138 */ +/* 139 */ +/* 140 */ +/* 141 */ +/* 142 */ +/* 143 (bit affects xusb_falcon_src, xusb_fs_src, */ +/* xusb_host_src and xusb_ss_src) */ +#define TEGRA124_CLK_CILAB 144 +#define TEGRA124_CLK_CILCD 145 +#define TEGRA124_CLK_CILE 146 +#define TEGRA124_CLK_DSIALP 147 +#define TEGRA124_CLK_DSIBLP 148 +#define TEGRA124_CLK_ENTROPY 149 +#define TEGRA124_CLK_DDS 150 +/* 151 */ +#define TEGRA124_CLK_DP2 152 +#define TEGRA124_CLK_AMX 153 +#define TEGRA124_CLK_ADX 154 +/* 155 (bit affects dfll_ref and dfll_soc) */ +#define TEGRA124_CLK_XUSB_SS 156 +/* 157 */ +/* 158 */ +/* 159 */ + +/* 160 */ +/* 161 */ +/* 162 */ +/* 163 */ +/* 164 */ +/* 165 */ +#define TEGRA124_CLK_I2C6 166 +/* 167 */ +/* 168 */ +/* 169 */ +/* 170 */ +#define TEGRA124_CLK_VIM2_CLK 171 +/* 172 */ +/* 173 */ +/* 174 */ +/* 175 */ +#define TEGRA124_CLK_HDMI_AUDIO 176 +#define TEGRA124_CLK_CLK72MHZ 177 +#define TEGRA124_CLK_VIC03 178 +/* 179 */ +#define TEGRA124_CLK_ADX1 180 +#define TEGRA124_CLK_DPAUX 181 +#define TEGRA124_CLK_SOR0 182 +/* 183 */ +#define TEGRA124_CLK_GPU 184 +#define TEGRA124_CLK_AMX1 185 +/* 186 */ +/* 187 */ +/* 188 */ +/* 189 */ +/* 190 */ +/* 191 */ +#define TEGRA124_CLK_UARTB 192 +#define TEGRA124_CLK_VFIR 193 +#define TEGRA124_CLK_SPDIF_IN 194 +#define TEGRA124_CLK_SPDIF_OUT 195 +#define TEGRA124_CLK_VI 196 +#define TEGRA124_CLK_VI_SENSOR 197 +#define TEGRA124_CLK_FUSE 198 +#define TEGRA124_CLK_FUSE_BURN 199 +#define TEGRA124_CLK_CLK_32K 200 +#define TEGRA124_CLK_CLK_M 201 +#define TEGRA124_CLK_CLK_M_DIV2 202 +#define TEGRA124_CLK_CLK_M_DIV4 203 +#define TEGRA124_CLK_PLL_REF 204 +#define TEGRA124_CLK_PLL_C 205 +#define TEGRA124_CLK_PLL_C_OUT1 206 +#define TEGRA124_CLK_PLL_C2 207 +#define TEGRA124_CLK_PLL_C3 208 +#define TEGRA124_CLK_PLL_M 209 +#define TEGRA124_CLK_PLL_M_OUT1 210 +#define TEGRA124_CLK_PLL_P 211 +#define TEGRA124_CLK_PLL_P_OUT1 212 +#define TEGRA124_CLK_PLL_P_OUT2 213 +#define TEGRA124_CLK_PLL_P_OUT3 214 +#define TEGRA124_CLK_PLL_P_OUT4 215 +#define TEGRA124_CLK_PLL_A 216 +#define TEGRA124_CLK_PLL_A_OUT0 217 +#define TEGRA124_CLK_PLL_D 218 +#define TEGRA124_CLK_PLL_D_OUT0 219 +#define TEGRA124_CLK_PLL_D2 220 +#define TEGRA124_CLK_PLL_D2_OUT0 221 +#define TEGRA124_CLK_PLL_U 222 +#define TEGRA124_CLK_PLL_U_480M 223 + +#define TEGRA124_CLK_PLL_U_60M 224 +#define TEGRA124_CLK_PLL_U_48M 225 +#define TEGRA124_CLK_PLL_U_12M 226 +/* 227 */ +/* 228 */ +#define TEGRA124_CLK_PLL_RE_VCO 229 +#define TEGRA124_CLK_PLL_RE_OUT 230 +#define TEGRA124_CLK_PLL_E 231 +#define TEGRA124_CLK_SPDIF_IN_SYNC 232 +#define TEGRA124_CLK_I2S0_SYNC 233 +#define TEGRA124_CLK_I2S1_SYNC 234 +#define TEGRA124_CLK_I2S2_SYNC 235 +#define TEGRA124_CLK_I2S3_SYNC 236 +#define TEGRA124_CLK_I2S4_SYNC 237 +#define TEGRA124_CLK_VIMCLK_SYNC 238 +#define TEGRA124_CLK_AUDIO0 239 +#define TEGRA124_CLK_AUDIO1 240 +#define TEGRA124_CLK_AUDIO2 241 +#define TEGRA124_CLK_AUDIO3 242 +#define TEGRA124_CLK_AUDIO4 243 +#define TEGRA124_CLK_SPDIF 244 +#define TEGRA124_CLK_CLK_OUT_1 245 +#define TEGRA124_CLK_CLK_OUT_2 246 +#define TEGRA124_CLK_CLK_OUT_3 247 +#define TEGRA124_CLK_BLINK 248 +/* 249 */ +/* 250 */ +/* 251 */ +#define TEGRA124_CLK_XUSB_HOST_SRC 252 +#define TEGRA124_CLK_XUSB_FALCON_SRC 253 +#define TEGRA124_CLK_XUSB_FS_SRC 254 +#define TEGRA124_CLK_XUSB_SS_SRC 255 + +#define TEGRA124_CLK_XUSB_DEV_SRC 256 +#define TEGRA124_CLK_XUSB_DEV 257 +#define TEGRA124_CLK_XUSB_HS_SRC 258 +#define TEGRA124_CLK_SCLK 259 +#define TEGRA124_CLK_HCLK 260 +#define TEGRA124_CLK_PCLK 261 +/* 262 */ +/* 263 */ +#define TEGRA124_CLK_DFLL_REF 264 +#define TEGRA124_CLK_DFLL_SOC 265 +#define TEGRA124_CLK_VI_SENSOR2 266 +#define TEGRA124_CLK_PLL_P_OUT5 267 +#define TEGRA124_CLK_CML0 268 +#define TEGRA124_CLK_CML1 269 +#define TEGRA124_CLK_PLL_C4 270 +#define TEGRA124_CLK_PLL_DP 271 +#define TEGRA124_CLK_PLL_E_MUX 272 +#define TEGRA124_CLK_PLL_D_DSI_OUT 273 +/* 274 */ +/* 275 */ +/* 276 */ +/* 277 */ +/* 278 */ +/* 279 */ +/* 280 */ +/* 281 */ +/* 282 */ +/* 283 */ +/* 284 */ +/* 285 */ +/* 286 */ +/* 287 */ + +/* 288 */ +/* 289 */ +/* 290 */ +/* 291 */ +/* 292 */ +/* 293 */ +/* 294 */ +/* 295 */ +/* 296 */ +/* 297 */ +/* 298 */ +/* 299 */ +#define TEGRA124_CLK_AUDIO0_MUX 300 +#define TEGRA124_CLK_AUDIO1_MUX 301 +#define TEGRA124_CLK_AUDIO2_MUX 302 +#define TEGRA124_CLK_AUDIO3_MUX 303 +#define TEGRA124_CLK_AUDIO4_MUX 304 +#define TEGRA124_CLK_SPDIF_MUX 305 +#define TEGRA124_CLK_CLK_OUT_1_MUX 306 +#define TEGRA124_CLK_CLK_OUT_2_MUX 307 +#define TEGRA124_CLK_CLK_OUT_3_MUX 308 +/* 309 */ +/* 310 */ +#define TEGRA124_CLK_SOR0_LVDS 311 +#define TEGRA124_CLK_XUSB_SS_DIV2 312 + +#define TEGRA124_CLK_PLL_M_UD 313 +#define TEGRA124_CLK_PLL_C_UD 314 + +#endif /* _DT_BINDINGS_CLOCK_TEGRA124_CAR_COMMON_H */ diff --git a/include/dt-bindings/clock/tegra124-car.h b/include/dt-bindings/clock/tegra124-car.h new file mode 100644 index 000000000..c520ee231 --- /dev/null +++ b/include/dt-bindings/clock/tegra124-car.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides Tegra124-specific constants for binding + * nvidia,tegra124-car. + */ + +#include + +#ifndef _DT_BINDINGS_CLOCK_TEGRA124_CAR_H +#define _DT_BINDINGS_CLOCK_TEGRA124_CAR_H + +#define TEGRA124_CLK_PLL_X 227 +#define TEGRA124_CLK_PLL_X_OUT0 228 + +#define TEGRA124_CLK_CCLK_G 262 +#define TEGRA124_CLK_CCLK_LP 263 + +#define TEGRA124_CLK_CLK_MAX 315 + +#endif /* _DT_BINDINGS_CLOCK_TEGRA124_CAR_H */ diff --git a/include/dt-bindings/clock/tegra186-clock.h b/include/dt-bindings/clock/tegra186-clock.h new file mode 100644 index 000000000..d6b525f45 --- /dev/null +++ b/include/dt-bindings/clock/tegra186-clock.h @@ -0,0 +1,941 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** @file */ + +#ifndef _MACH_T186_CLK_T186_H +#define _MACH_T186_CLK_T186_H + +/** + * @defgroup clock_ids Clock Identifiers + * @{ + * @defgroup extern_input external input clocks + * @{ + * @def TEGRA186_CLK_OSC + * @def TEGRA186_CLK_CLK_32K + * @def TEGRA186_CLK_DTV_INPUT + * @def TEGRA186_CLK_SOR0_PAD_CLKOUT + * @def TEGRA186_CLK_SOR1_PAD_CLKOUT + * @def TEGRA186_CLK_I2S1_SYNC_INPUT + * @def TEGRA186_CLK_I2S2_SYNC_INPUT + * @def TEGRA186_CLK_I2S3_SYNC_INPUT + * @def TEGRA186_CLK_I2S4_SYNC_INPUT + * @def TEGRA186_CLK_I2S5_SYNC_INPUT + * @def TEGRA186_CLK_I2S6_SYNC_INPUT + * @def TEGRA186_CLK_SPDIFIN_SYNC_INPUT + * @} + * + * @defgroup extern_output external output clocks + * @{ + * @def TEGRA186_CLK_EXTPERIPH1 + * @def TEGRA186_CLK_EXTPERIPH2 + * @def TEGRA186_CLK_EXTPERIPH3 + * @def TEGRA186_CLK_EXTPERIPH4 + * @} + * + * @defgroup display_clks display related clocks + * @{ + * @def TEGRA186_CLK_CEC + * @def TEGRA186_CLK_DSIC + * @def TEGRA186_CLK_DSIC_LP + * @def TEGRA186_CLK_DSID + * @def TEGRA186_CLK_DSID_LP + * @def TEGRA186_CLK_DPAUX1 + * @def TEGRA186_CLK_DPAUX + * @def TEGRA186_CLK_HDA2HDMICODEC + * @def TEGRA186_CLK_NVDISPLAY_DISP + * @def TEGRA186_CLK_NVDISPLAY_DSC + * @def TEGRA186_CLK_NVDISPLAY_P0 + * @def TEGRA186_CLK_NVDISPLAY_P1 + * @def TEGRA186_CLK_NVDISPLAY_P2 + * @def TEGRA186_CLK_NVDISPLAYHUB + * @def TEGRA186_CLK_SOR_SAFE + * @def TEGRA186_CLK_SOR0 + * @def TEGRA186_CLK_SOR0_OUT + * @def TEGRA186_CLK_SOR1 + * @def TEGRA186_CLK_SOR1_OUT + * @def TEGRA186_CLK_DSI + * @def TEGRA186_CLK_MIPI_CAL + * @def TEGRA186_CLK_DSIA_LP + * @def TEGRA186_CLK_DSIB + * @def TEGRA186_CLK_DSIB_LP + * @} + * + * @defgroup camera_clks camera related clocks + * @{ + * @def TEGRA186_CLK_NVCSI + * @def TEGRA186_CLK_NVCSILP + * @def TEGRA186_CLK_VI + * @} + * + * @defgroup audio_clks audio related clocks + * @{ + * @def TEGRA186_CLK_ACLK + * @def TEGRA186_CLK_ADSP + * @def TEGRA186_CLK_ADSPNEON + * @def TEGRA186_CLK_AHUB + * @def TEGRA186_CLK_APE + * @def TEGRA186_CLK_APB2APE + * @def TEGRA186_CLK_AUD_MCLK + * @def TEGRA186_CLK_DMIC1 + * @def TEGRA186_CLK_DMIC2 + * @def TEGRA186_CLK_DMIC3 + * @def TEGRA186_CLK_DMIC4 + * @def TEGRA186_CLK_DSPK1 + * @def TEGRA186_CLK_DSPK2 + * @def TEGRA186_CLK_HDA + * @def TEGRA186_CLK_HDA2CODEC_2X + * @def TEGRA186_CLK_I2S1 + * @def TEGRA186_CLK_I2S2 + * @def TEGRA186_CLK_I2S3 + * @def TEGRA186_CLK_I2S4 + * @def TEGRA186_CLK_I2S5 + * @def TEGRA186_CLK_I2S6 + * @def TEGRA186_CLK_MAUD + * @def TEGRA186_CLK_PLL_A_OUT0 + * @def TEGRA186_CLK_SPDIF_DOUBLER + * @def TEGRA186_CLK_SPDIF_IN + * @def TEGRA186_CLK_SPDIF_OUT + * @def TEGRA186_CLK_SYNC_DMIC1 + * @def TEGRA186_CLK_SYNC_DMIC2 + * @def TEGRA186_CLK_SYNC_DMIC3 + * @def TEGRA186_CLK_SYNC_DMIC4 + * @def TEGRA186_CLK_SYNC_DMIC5 + * @def TEGRA186_CLK_SYNC_DSPK1 + * @def TEGRA186_CLK_SYNC_DSPK2 + * @def TEGRA186_CLK_SYNC_I2S1 + * @def TEGRA186_CLK_SYNC_I2S2 + * @def TEGRA186_CLK_SYNC_I2S3 + * @def TEGRA186_CLK_SYNC_I2S4 + * @def TEGRA186_CLK_SYNC_I2S5 + * @def TEGRA186_CLK_SYNC_I2S6 + * @def TEGRA186_CLK_SYNC_SPDIF + * @} + * + * @defgroup uart_clks UART clocks + * @{ + * @def TEGRA186_CLK_AON_UART_FST_MIPI_CAL + * @def TEGRA186_CLK_UARTA + * @def TEGRA186_CLK_UARTB + * @def TEGRA186_CLK_UARTC + * @def TEGRA186_CLK_UARTD + * @def TEGRA186_CLK_UARTE + * @def TEGRA186_CLK_UARTF + * @def TEGRA186_CLK_UARTG + * @def TEGRA186_CLK_UART_FST_MIPI_CAL + * @} + * + * @defgroup i2c_clks I2C clocks + * @{ + * @def TEGRA186_CLK_AON_I2C_SLOW + * @def TEGRA186_CLK_I2C1 + * @def TEGRA186_CLK_I2C2 + * @def TEGRA186_CLK_I2C3 + * @def TEGRA186_CLK_I2C4 + * @def TEGRA186_CLK_I2C5 + * @def TEGRA186_CLK_I2C6 + * @def TEGRA186_CLK_I2C8 + * @def TEGRA186_CLK_I2C9 + * @def TEGRA186_CLK_I2C1 + * @def TEGRA186_CLK_I2C12 + * @def TEGRA186_CLK_I2C13 + * @def TEGRA186_CLK_I2C14 + * @def TEGRA186_CLK_I2C_SLOW + * @def TEGRA186_CLK_VI_I2C + * @} + * + * @defgroup spi_clks SPI clocks + * @{ + * @def TEGRA186_CLK_SPI1 + * @def TEGRA186_CLK_SPI2 + * @def TEGRA186_CLK_SPI3 + * @def TEGRA186_CLK_SPI4 + * @} + * + * @defgroup storage storage related clocks + * @{ + * @def TEGRA186_CLK_SATA + * @def TEGRA186_CLK_SATA_OOB + * @def TEGRA186_CLK_SATA_IOBIST + * @def TEGRA186_CLK_SDMMC_LEGACY_TM + * @def TEGRA186_CLK_SDMMC1 + * @def TEGRA186_CLK_SDMMC2 + * @def TEGRA186_CLK_SDMMC3 + * @def TEGRA186_CLK_SDMMC4 + * @def TEGRA186_CLK_QSPI + * @def TEGRA186_CLK_QSPI_OUT + * @def TEGRA186_CLK_UFSDEV_REF + * @def TEGRA186_CLK_UFSHC + * @} + * + * @defgroup pwm_clks PWM clocks + * @{ + * @def TEGRA186_CLK_PWM1 + * @def TEGRA186_CLK_PWM2 + * @def TEGRA186_CLK_PWM3 + * @def TEGRA186_CLK_PWM4 + * @def TEGRA186_CLK_PWM5 + * @def TEGRA186_CLK_PWM6 + * @def TEGRA186_CLK_PWM7 + * @def TEGRA186_CLK_PWM8 + * @} + * + * @defgroup plls PLLs and related clocks + * @{ + * @def TEGRA186_CLK_PLLREFE_OUT_GATED + * @def TEGRA186_CLK_PLLREFE_OUT1 + * @def TEGRA186_CLK_PLLD_OUT1 + * @def TEGRA186_CLK_PLLP_OUT0 + * @def TEGRA186_CLK_PLLP_OUT5 + * @def TEGRA186_CLK_PLLA + * @def TEGRA186_CLK_PLLE_PWRSEQ + * @def TEGRA186_CLK_PLLA_OUT1 + * @def TEGRA186_CLK_PLLREFE_REF + * @def TEGRA186_CLK_UPHY_PLL0_PWRSEQ + * @def TEGRA186_CLK_UPHY_PLL1_PWRSEQ + * @def TEGRA186_CLK_PLLREFE_PLLE_PASSTHROUGH + * @def TEGRA186_CLK_PLLREFE_PEX + * @def TEGRA186_CLK_PLLREFE_IDDQ + * @def TEGRA186_CLK_PLLC_OUT_AON + * @def TEGRA186_CLK_PLLC_OUT_ISP + * @def TEGRA186_CLK_PLLC_OUT_VE + * @def TEGRA186_CLK_PLLC4_OUT + * @def TEGRA186_CLK_PLLREFE_OUT + * @def TEGRA186_CLK_PLLREFE_PLL_REF + * @def TEGRA186_CLK_PLLE + * @def TEGRA186_CLK_PLLC + * @def TEGRA186_CLK_PLLP + * @def TEGRA186_CLK_PLLD + * @def TEGRA186_CLK_PLLD2 + * @def TEGRA186_CLK_PLLREFE_VCO + * @def TEGRA186_CLK_PLLC2 + * @def TEGRA186_CLK_PLLC3 + * @def TEGRA186_CLK_PLLDP + * @def TEGRA186_CLK_PLLC4_VCO + * @def TEGRA186_CLK_PLLA1 + * @def TEGRA186_CLK_PLLNVCSI + * @def TEGRA186_CLK_PLLDISPHUB + * @def TEGRA186_CLK_PLLD3 + * @def TEGRA186_CLK_PLLBPMPCAM + * @def TEGRA186_CLK_PLLAON + * @def TEGRA186_CLK_PLLU + * @def TEGRA186_CLK_PLLC4_VCO_DIV2 + * @def TEGRA186_CLK_PLL_REF + * @def TEGRA186_CLK_PLLREFE_OUT1_DIV5 + * @def TEGRA186_CLK_UTMIP_PLL_PWRSEQ + * @def TEGRA186_CLK_PLL_U_48M + * @def TEGRA186_CLK_PLL_U_480M + * @def TEGRA186_CLK_PLLC4_OUT0 + * @def TEGRA186_CLK_PLLC4_OUT1 + * @def TEGRA186_CLK_PLLC4_OUT2 + * @def TEGRA186_CLK_PLLC4_OUT_MUX + * @def TEGRA186_CLK_DFLLDISP_DIV + * @def TEGRA186_CLK_PLLDISPHUB_DIV + * @def TEGRA186_CLK_PLLP_DIV8 + * @} + * + * @defgroup nafll_clks NAFLL clock sources + * @{ + * @def TEGRA186_CLK_NAFLL_AXI_CBB + * @def TEGRA186_CLK_NAFLL_BCPU + * @def TEGRA186_CLK_NAFLL_BPMP + * @def TEGRA186_CLK_NAFLL_DISP + * @def TEGRA186_CLK_NAFLL_GPU + * @def TEGRA186_CLK_NAFLL_ISP + * @def TEGRA186_CLK_NAFLL_MCPU + * @def TEGRA186_CLK_NAFLL_NVDEC + * @def TEGRA186_CLK_NAFLL_NVENC + * @def TEGRA186_CLK_NAFLL_NVJPG + * @def TEGRA186_CLK_NAFLL_SCE + * @def TEGRA186_CLK_NAFLL_SE + * @def TEGRA186_CLK_NAFLL_TSEC + * @def TEGRA186_CLK_NAFLL_TSECB + * @def TEGRA186_CLK_NAFLL_VI + * @def TEGRA186_CLK_NAFLL_VIC + * @} + * + * @defgroup mphy MPHY related clocks + * @{ + * @def TEGRA186_CLK_MPHY_L0_RX_SYMB + * @def TEGRA186_CLK_MPHY_L0_RX_LS_BIT + * @def TEGRA186_CLK_MPHY_L0_TX_SYMB + * @def TEGRA186_CLK_MPHY_L0_TX_LS_3XBIT + * @def TEGRA186_CLK_MPHY_L0_RX_ANA + * @def TEGRA186_CLK_MPHY_L1_RX_ANA + * @def TEGRA186_CLK_MPHY_IOBIST + * @def TEGRA186_CLK_MPHY_TX_1MHZ_REF + * @def TEGRA186_CLK_MPHY_CORE_PLL_FIXED + * @} + * + * @defgroup eavb EAVB related clocks + * @{ + * @def TEGRA186_CLK_EQOS_AXI + * @def TEGRA186_CLK_EQOS_PTP_REF + * @def TEGRA186_CLK_EQOS_RX + * @def TEGRA186_CLK_EQOS_RX_INPUT + * @def TEGRA186_CLK_EQOS_TX + * @} + * + * @defgroup usb USB related clocks + * @{ + * @def TEGRA186_CLK_PEX_USB_PAD0_MGMT + * @def TEGRA186_CLK_PEX_USB_PAD1_MGMT + * @def TEGRA186_CLK_HSIC_TRK + * @def TEGRA186_CLK_USB2_TRK + * @def TEGRA186_CLK_USB2_HSIC_TRK + * @def TEGRA186_CLK_XUSB_CORE_SS + * @def TEGRA186_CLK_XUSB_CORE_DEV + * @def TEGRA186_CLK_XUSB_FALCON + * @def TEGRA186_CLK_XUSB_FS + * @def TEGRA186_CLK_XUSB + * @def TEGRA186_CLK_XUSB_DEV + * @def TEGRA186_CLK_XUSB_HOST + * @def TEGRA186_CLK_XUSB_SS + * @} + * + * @defgroup bigblock compute block related clocks + * @{ + * @def TEGRA186_CLK_GPCCLK + * @def TEGRA186_CLK_GPC2CLK + * @def TEGRA186_CLK_GPU + * @def TEGRA186_CLK_HOST1X + * @def TEGRA186_CLK_ISP + * @def TEGRA186_CLK_NVDEC + * @def TEGRA186_CLK_NVENC + * @def TEGRA186_CLK_NVJPG + * @def TEGRA186_CLK_SE + * @def TEGRA186_CLK_TSEC + * @def TEGRA186_CLK_TSECB + * @def TEGRA186_CLK_VIC + * @} + * + * @defgroup can CAN bus related clocks + * @{ + * @def TEGRA186_CLK_CAN1 + * @def TEGRA186_CLK_CAN1_HOST + * @def TEGRA186_CLK_CAN2 + * @def TEGRA186_CLK_CAN2_HOST + * @} + * + * @defgroup system basic system clocks + * @{ + * @def TEGRA186_CLK_ACTMON + * @def TEGRA186_CLK_AON_APB + * @def TEGRA186_CLK_AON_CPU_NIC + * @def TEGRA186_CLK_AON_NIC + * @def TEGRA186_CLK_AXI_CBB + * @def TEGRA186_CLK_BPMP_APB + * @def TEGRA186_CLK_BPMP_CPU_NIC + * @def TEGRA186_CLK_BPMP_NIC_RATE + * @def TEGRA186_CLK_CLK_M + * @def TEGRA186_CLK_EMC + * @def TEGRA186_CLK_MSS_ENCRYPT + * @def TEGRA186_CLK_SCE_APB + * @def TEGRA186_CLK_SCE_CPU_NIC + * @def TEGRA186_CLK_SCE_NIC + * @def TEGRA186_CLK_TSC + * @} + * + * @defgroup pcie_clks PCIe related clocks + * @{ + * @def TEGRA186_CLK_AFI + * @def TEGRA186_CLK_PCIE + * @def TEGRA186_CLK_PCIE2_IOBIST + * @def TEGRA186_CLK_PCIERX0 + * @def TEGRA186_CLK_PCIERX1 + * @def TEGRA186_CLK_PCIERX2 + * @def TEGRA186_CLK_PCIERX3 + * @def TEGRA186_CLK_PCIERX4 + * @} + */ + +/** @brief output of gate CLK_ENB_FUSE */ +#define TEGRA186_CLK_FUSE 0 +/** + * @brief It's not what you think + * @details output of gate CLK_ENB_GPU. This output connects to the GPU + * pwrclk. @warning: This is almost certainly not the clock you think + * it is. If you're looking for the clock of the graphics engine, see + * TEGRA186_GPCCLK + */ +#define TEGRA186_CLK_GPU 1 +/** @brief output of gate CLK_ENB_PCIE */ +#define TEGRA186_CLK_PCIE 3 +/** @brief output of the divider IPFS_CLK_DIVISOR */ +#define TEGRA186_CLK_AFI 4 +/** @brief output of gate CLK_ENB_PCIE2_IOBIST */ +#define TEGRA186_CLK_PCIE2_IOBIST 5 +/** @brief output of gate CLK_ENB_PCIERX0*/ +#define TEGRA186_CLK_PCIERX0 6 +/** @brief output of gate CLK_ENB_PCIERX1*/ +#define TEGRA186_CLK_PCIERX1 7 +/** @brief output of gate CLK_ENB_PCIERX2*/ +#define TEGRA186_CLK_PCIERX2 8 +/** @brief output of gate CLK_ENB_PCIERX3*/ +#define TEGRA186_CLK_PCIERX3 9 +/** @brief output of gate CLK_ENB_PCIERX4*/ +#define TEGRA186_CLK_PCIERX4 10 +/** @brief output branch of PLL_C for ISP, controlled by gate CLK_ENB_PLLC_OUT_ISP */ +#define TEGRA186_CLK_PLLC_OUT_ISP 11 +/** @brief output branch of PLL_C for VI, controlled by gate CLK_ENB_PLLC_OUT_VE */ +#define TEGRA186_CLK_PLLC_OUT_VE 12 +/** @brief output branch of PLL_C for AON domain, controlled by gate CLK_ENB_PLLC_OUT_AON */ +#define TEGRA186_CLK_PLLC_OUT_AON 13 +/** @brief output of gate CLK_ENB_SOR_SAFE */ +#define TEGRA186_CLK_SOR_SAFE 39 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S2 */ +#define TEGRA186_CLK_I2S2 42 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S3 */ +#define TEGRA186_CLK_I2S3 43 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPDF_IN */ +#define TEGRA186_CLK_SPDIF_IN 44 +/** @brief output of gate CLK_ENB_SPDIF_DOUBLER */ +#define TEGRA186_CLK_SPDIF_DOUBLER 45 +/** @clkdesc{spi_clks, out, mux, CLK_RST_CONTROLLER_CLK_SOURCE_SPI3} */ +#define TEGRA186_CLK_SPI3 46 +/** @clkdesc{i2c_clks, out, mux, CLK_RST_CONTROLLER_CLK_SOURCE_I2C1} */ +#define TEGRA186_CLK_I2C1 47 +/** @clkdesc{i2c_clks, out, mux, CLK_RST_CONTROLLER_CLK_SOURCE_I2C5} */ +#define TEGRA186_CLK_I2C5 48 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPI1 */ +#define TEGRA186_CLK_SPI1 49 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_ISP */ +#define TEGRA186_CLK_ISP 50 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_VI */ +#define TEGRA186_CLK_VI 51 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC1 */ +#define TEGRA186_CLK_SDMMC1 52 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC2 */ +#define TEGRA186_CLK_SDMMC2 53 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC4 */ +#define TEGRA186_CLK_SDMMC4 54 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTA */ +#define TEGRA186_CLK_UARTA 55 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTB */ +#define TEGRA186_CLK_UARTB 56 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_HOST1X */ +#define TEGRA186_CLK_HOST1X 57 +/** + * @brief controls the EMC clock frequency. + * @details Doing a clk_set_rate on this clock will select the + * appropriate clock source, program the source rate and execute a + * specific sequence to switch to the new clock source for both memory + * controllers. This can be used to control the balance between memory + * throughput and memory controller power. + */ +#define TEGRA186_CLK_EMC 58 +/* @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH4 */ +#define TEGRA186_CLK_EXTPERIPH4 73 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPI4 */ +#define TEGRA186_CLK_SPI4 74 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C3 */ +#define TEGRA186_CLK_I2C3 75 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC3 */ +#define TEGRA186_CLK_SDMMC3 76 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTD */ +#define TEGRA186_CLK_UARTD 77 +/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S1 */ +#define TEGRA186_CLK_I2S1 79 +/** output of gate CLK_ENB_DTV */ +#define TEGRA186_CLK_DTV 80 +/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_TSEC */ +#define TEGRA186_CLK_TSEC 81 +/** @brief output of gate CLK_ENB_DP2 */ +#define TEGRA186_CLK_DP2 82 +/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S4 */ +#define TEGRA186_CLK_I2S4 84 +/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S5 */ +#define TEGRA186_CLK_I2S5 85 +/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C4 */ +#define TEGRA186_CLK_I2C4 86 +/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AHUB */ +#define TEGRA186_CLK_AHUB 87 +/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_HDA2CODEC_2X */ +#define TEGRA186_CLK_HDA2CODEC_2X 88 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH1 */ +#define TEGRA186_CLK_EXTPERIPH1 89 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH2 */ +#define TEGRA186_CLK_EXTPERIPH2 90 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH3 */ +#define TEGRA186_CLK_EXTPERIPH3 91 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C_SLOW */ +#define TEGRA186_CLK_I2C_SLOW 92 +/** @brief output of the SOR1_CLK_SRC mux in CLK_RST_CONTROLLER_CLK_SOURCE_SOR1 */ +#define TEGRA186_CLK_SOR1 93 +/** @brief output of gate CLK_ENB_CEC */ +#define TEGRA186_CLK_CEC 94 +/** @brief output of gate CLK_ENB_DPAUX1 */ +#define TEGRA186_CLK_DPAUX1 95 +/** @brief output of gate CLK_ENB_DPAUX */ +#define TEGRA186_CLK_DPAUX 96 +/** @brief output of the SOR0_CLK_SRC mux in CLK_RST_CONTROLLER_CLK_SOURCE_SOR0 */ +#define TEGRA186_CLK_SOR0 97 +/** @brief output of gate CLK_ENB_HDA2HDMICODEC */ +#define TEGRA186_CLK_HDA2HDMICODEC 98 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SATA */ +#define TEGRA186_CLK_SATA 99 +/** @brief output of gate CLK_ENB_SATA_OOB */ +#define TEGRA186_CLK_SATA_OOB 100 +/** @brief output of gate CLK_ENB_SATA_IOBIST */ +#define TEGRA186_CLK_SATA_IOBIST 101 +/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_HDA */ +#define TEGRA186_CLK_HDA 102 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SE */ +#define TEGRA186_CLK_SE 103 +/** @brief output of gate CLK_ENB_APB2APE */ +#define TEGRA186_CLK_APB2APE 104 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_APE */ +#define TEGRA186_CLK_APE 105 +/** @brief output of gate CLK_ENB_IQC1 */ +#define TEGRA186_CLK_IQC1 106 +/** @brief output of gate CLK_ENB_IQC2 */ +#define TEGRA186_CLK_IQC2 107 +/** divide by 2 version of TEGRA186_CLK_PLLREFE_VCO */ +#define TEGRA186_CLK_PLLREFE_OUT 108 +/** @brief output of gate CLK_ENB_PLLREFE_PLL_REF */ +#define TEGRA186_CLK_PLLREFE_PLL_REF 109 +/** @brief output of gate CLK_ENB_PLLC4_OUT */ +#define TEGRA186_CLK_PLLC4_OUT 110 +/** @brief output of mux xusb_core_clk_switch on page 67 of T186_Clocks_IAS.doc */ +#define TEGRA186_CLK_XUSB 111 +/** controls xusb_dev_ce signal on page 66 and 67 of T186_Clocks_IAS.doc */ +#define TEGRA186_CLK_XUSB_DEV 112 +/** controls xusb_host_ce signal on page 67 of T186_Clocks_IAS.doc */ +#define TEGRA186_CLK_XUSB_HOST 113 +/** controls xusb_ss_ce signal on page 67 of T186_Clocks_IAS.doc */ +#define TEGRA186_CLK_XUSB_SS 114 +/** @brief output of gate CLK_ENB_DSI */ +#define TEGRA186_CLK_DSI 115 +/** @brief output of gate CLK_ENB_MIPI_CAL */ +#define TEGRA186_CLK_MIPI_CAL 116 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSIA_LP */ +#define TEGRA186_CLK_DSIA_LP 117 +/** @brief output of gate CLK_ENB_DSIB */ +#define TEGRA186_CLK_DSIB 118 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSIB_LP */ +#define TEGRA186_CLK_DSIB_LP 119 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC1 */ +#define TEGRA186_CLK_DMIC1 122 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC2 */ +#define TEGRA186_CLK_DMIC2 123 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AUD_MCLK */ +#define TEGRA186_CLK_AUD_MCLK 124 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C6 */ +#define TEGRA186_CLK_I2C6 125 +/**output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UART_FST_MIPI_CAL */ +#define TEGRA186_CLK_UART_FST_MIPI_CAL 126 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_VIC */ +#define TEGRA186_CLK_VIC 127 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC_LEGACY_TM */ +#define TEGRA186_CLK_SDMMC_LEGACY_TM 128 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVDEC */ +#define TEGRA186_CLK_NVDEC 129 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVJPG */ +#define TEGRA186_CLK_NVJPG 130 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVENC */ +#define TEGRA186_CLK_NVENC 131 +/** @brief output of the QSPI_CLK_SRC mux in CLK_RST_CONTROLLER_CLK_SOURCE_QSPI */ +#define TEGRA186_CLK_QSPI 132 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_VI_I2C */ +#define TEGRA186_CLK_VI_I2C 133 +/** @brief output of gate CLK_ENB_HSIC_TRK */ +#define TEGRA186_CLK_HSIC_TRK 134 +/** @brief output of gate CLK_ENB_USB2_TRK */ +#define TEGRA186_CLK_USB2_TRK 135 +/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_MAUD */ +#define TEGRA186_CLK_MAUD 136 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_TSECB */ +#define TEGRA186_CLK_TSECB 137 +/** @brief output of gate CLK_ENB_ADSP */ +#define TEGRA186_CLK_ADSP 138 +/** @brief output of gate CLK_ENB_ADSPNEON */ +#define TEGRA186_CLK_ADSPNEON 139 +/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_L0_RX_LS_SYMB */ +#define TEGRA186_CLK_MPHY_L0_RX_SYMB 140 +/** @brief output of gate CLK_ENB_MPHY_L0_RX_LS_BIT */ +#define TEGRA186_CLK_MPHY_L0_RX_LS_BIT 141 +/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_L0_TX_LS_SYMB */ +#define TEGRA186_CLK_MPHY_L0_TX_SYMB 142 +/** @brief output of gate CLK_ENB_MPHY_L0_TX_LS_3XBIT */ +#define TEGRA186_CLK_MPHY_L0_TX_LS_3XBIT 143 +/** @brief output of gate CLK_ENB_MPHY_L0_RX_ANA */ +#define TEGRA186_CLK_MPHY_L0_RX_ANA 144 +/** @brief output of gate CLK_ENB_MPHY_L1_RX_ANA */ +#define TEGRA186_CLK_MPHY_L1_RX_ANA 145 +/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_IOBIST */ +#define TEGRA186_CLK_MPHY_IOBIST 146 +/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_TX_1MHZ_REF */ +#define TEGRA186_CLK_MPHY_TX_1MHZ_REF 147 +/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_CORE_PLL_FIXED */ +#define TEGRA186_CLK_MPHY_CORE_PLL_FIXED 148 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AXI_CBB */ +#define TEGRA186_CLK_AXI_CBB 149 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC3 */ +#define TEGRA186_CLK_DMIC3 150 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC4 */ +#define TEGRA186_CLK_DMIC4 151 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSPK1 */ +#define TEGRA186_CLK_DSPK1 152 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSPK2 */ +#define TEGRA186_CLK_DSPK2 153 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C6 */ +#define TEGRA186_CLK_I2S6 154 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAY_P0 */ +#define TEGRA186_CLK_NVDISPLAY_P0 155 +/** @brief output of the NVDISPLAY_DISP_CLK_SRC mux in CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAY_DISP */ +#define TEGRA186_CLK_NVDISPLAY_DISP 156 +/** @brief output of gate CLK_ENB_NVDISPLAY_DSC */ +#define TEGRA186_CLK_NVDISPLAY_DSC 157 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAYHUB */ +#define TEGRA186_CLK_NVDISPLAYHUB 158 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAY_P1 */ +#define TEGRA186_CLK_NVDISPLAY_P1 159 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAY_P2 */ +#define TEGRA186_CLK_NVDISPLAY_P2 160 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_TACH */ +#define TEGRA186_CLK_TACH 166 +/** @brief output of gate CLK_ENB_EQOS */ +#define TEGRA186_CLK_EQOS_AXI 167 +/** @brief output of gate CLK_ENB_EQOS_RX */ +#define TEGRA186_CLK_EQOS_RX 168 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UFSHC_CG_SYS */ +#define TEGRA186_CLK_UFSHC 178 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UFSDEV_REF */ +#define TEGRA186_CLK_UFSDEV_REF 179 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVCSI */ +#define TEGRA186_CLK_NVCSI 180 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVCSILP */ +#define TEGRA186_CLK_NVCSILP 181 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C7 */ +#define TEGRA186_CLK_I2C7 182 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C9 */ +#define TEGRA186_CLK_I2C9 183 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C12 */ +#define TEGRA186_CLK_I2C12 184 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C13 */ +#define TEGRA186_CLK_I2C13 185 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C14 */ +#define TEGRA186_CLK_I2C14 186 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM1 */ +#define TEGRA186_CLK_PWM1 187 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM2 */ +#define TEGRA186_CLK_PWM2 188 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM3 */ +#define TEGRA186_CLK_PWM3 189 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM5 */ +#define TEGRA186_CLK_PWM5 190 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM6 */ +#define TEGRA186_CLK_PWM6 191 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM7 */ +#define TEGRA186_CLK_PWM7 192 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM8 */ +#define TEGRA186_CLK_PWM8 193 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTE */ +#define TEGRA186_CLK_UARTE 194 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTF */ +#define TEGRA186_CLK_UARTF 195 +/** @deprecated */ +#define TEGRA186_CLK_DBGAPB 196 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_BPMP_CPU_NIC */ +#define TEGRA186_CLK_BPMP_CPU_NIC 197 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_BPMP_APB */ +#define TEGRA186_CLK_BPMP_APB 199 +/** @brief output of mux controlled by TEGRA186_CLK_SOC_ACTMON */ +#define TEGRA186_CLK_ACTMON 201 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AON_CPU_NIC */ +#define TEGRA186_CLK_AON_CPU_NIC 208 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_CAN1 */ +#define TEGRA186_CLK_CAN1 210 +/** @brief output of gate CLK_ENB_CAN1_HOST */ +#define TEGRA186_CLK_CAN1_HOST 211 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_CAN2 */ +#define TEGRA186_CLK_CAN2 212 +/** @brief output of gate CLK_ENB_CAN2_HOST */ +#define TEGRA186_CLK_CAN2_HOST 213 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AON_APB */ +#define TEGRA186_CLK_AON_APB 214 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTC */ +#define TEGRA186_CLK_UARTC 215 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTG */ +#define TEGRA186_CLK_UARTG 216 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AON_UART_FST_MIPI_CAL */ +#define TEGRA186_CLK_AON_UART_FST_MIPI_CAL 217 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C2 */ +#define TEGRA186_CLK_I2C2 218 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C8 */ +#define TEGRA186_CLK_I2C8 219 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C10 */ +#define TEGRA186_CLK_I2C10 220 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AON_I2C_SLOW */ +#define TEGRA186_CLK_AON_I2C_SLOW 221 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPI2 */ +#define TEGRA186_CLK_SPI2 222 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC5 */ +#define TEGRA186_CLK_DMIC5 223 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AON_TOUCH */ +#define TEGRA186_CLK_AON_TOUCH 224 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM4 */ +#define TEGRA186_CLK_PWM4 225 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_TSC. This clock object is read only and is used for all timers in the system. */ +#define TEGRA186_CLK_TSC 226 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_MSS_ENCRYPT */ +#define TEGRA186_CLK_MSS_ENCRYPT 227 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SCE_CPU_NIC */ +#define TEGRA186_CLK_SCE_CPU_NIC 228 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SCE_APB */ +#define TEGRA186_CLK_SCE_APB 230 +/** @brief output of gate CLK_ENB_DSIC */ +#define TEGRA186_CLK_DSIC 231 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSIC_LP */ +#define TEGRA186_CLK_DSIC_LP 232 +/** @brief output of gate CLK_ENB_DSID */ +#define TEGRA186_CLK_DSID 233 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSID_LP */ +#define TEGRA186_CLK_DSID_LP 234 +/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_PEX_SATA_USB_RX_BYP */ +#define TEGRA186_CLK_PEX_SATA_USB_RX_BYP 236 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPDIF_OUT */ +#define TEGRA186_CLK_SPDIF_OUT 238 +/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_EQOS_PTP_REF_CLK_0 */ +#define TEGRA186_CLK_EQOS_PTP_REF 239 +/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_EQOS_TX_CLK */ +#define TEGRA186_CLK_EQOS_TX 240 +/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_USB2_HSIC_TRK */ +#define TEGRA186_CLK_USB2_HSIC_TRK 241 +/** @brief output of mux xusb_ss_clk_switch on page 66 of T186_Clocks_IAS.doc */ +#define TEGRA186_CLK_XUSB_CORE_SS 242 +/** @brief output of mux xusb_core_dev_clk_switch on page 67 of T186_Clocks_IAS.doc */ +#define TEGRA186_CLK_XUSB_CORE_DEV 243 +/** @brief output of mux xusb_core_falcon_clk_switch on page 67 of T186_Clocks_IAS.doc */ +#define TEGRA186_CLK_XUSB_FALCON 244 +/** @brief output of mux xusb_fs_clk_switch on page 66 of T186_Clocks_IAS.doc */ +#define TEGRA186_CLK_XUSB_FS 245 +/** @brief output of the divider CLK_RST_CONTROLLER_PLLA_OUT */ +#define TEGRA186_CLK_PLL_A_OUT0 246 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S1 */ +#define TEGRA186_CLK_SYNC_I2S1 247 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S2 */ +#define TEGRA186_CLK_SYNC_I2S2 248 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S3 */ +#define TEGRA186_CLK_SYNC_I2S3 249 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S4 */ +#define TEGRA186_CLK_SYNC_I2S4 250 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S5 */ +#define TEGRA186_CLK_SYNC_I2S5 251 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S6 */ +#define TEGRA186_CLK_SYNC_I2S6 252 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DSPK1 */ +#define TEGRA186_CLK_SYNC_DSPK1 253 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DSPK2 */ +#define TEGRA186_CLK_SYNC_DSPK2 254 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC1 */ +#define TEGRA186_CLK_SYNC_DMIC1 255 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC2 */ +#define TEGRA186_CLK_SYNC_DMIC2 256 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC3 */ +#define TEGRA186_CLK_SYNC_DMIC3 257 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC4 */ +#define TEGRA186_CLK_SYNC_DMIC4 259 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_SPDIF */ +#define TEGRA186_CLK_SYNC_SPDIF 260 +/** @brief output of gate CLK_ENB_PLLREFE_OUT */ +#define TEGRA186_CLK_PLLREFE_OUT_GATED 261 +/** @brief output of the divider PLLREFE_DIVP in CLK_RST_CONTROLLER_PLLREFE_BASE. PLLREFE has 2 outputs: + * * VCO/pdiv defined by this clock object + * * VCO/2 defined by TEGRA186_CLK_PLLREFE_OUT + */ +#define TEGRA186_CLK_PLLREFE_OUT1 262 +#define TEGRA186_CLK_PLLD_OUT1 267 +/** @brief output of the divider PLLP_DIVP in CLK_RST_CONTROLLER_PLLP_BASE */ +#define TEGRA186_CLK_PLLP_OUT0 269 +/** @brief output of the divider CLK_RST_CONTROLLER_PLLP_OUTC */ +#define TEGRA186_CLK_PLLP_OUT5 270 +/** PLL controlled by CLK_RST_CONTROLLER_PLLA_BASE for use by audio clocks */ +#define TEGRA186_CLK_PLLA 271 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_ACLK_BURST_POLICY divided by the divider controlled by ACLK_CLK_DIVISOR in CLK_RST_CONTROLLER_SUPER_ACLK_DIVIDER */ +#define TEGRA186_CLK_ACLK 273 +/** fixed 48MHz clock divided down from TEGRA186_CLK_PLL_U */ +#define TEGRA186_CLK_PLL_U_48M 274 +/** fixed 480MHz clock divided down from TEGRA186_CLK_PLL_U */ +#define TEGRA186_CLK_PLL_U_480M 275 +/** @brief output of the divider PLLC4_DIVP in CLK_RST_CONTROLLER_PLLC4_BASE. Output frequency is TEGRA186_CLK_PLLC4_VCO/PLLC4_DIVP */ +#define TEGRA186_CLK_PLLC4_OUT0 276 +/** fixed /3 divider. Output frequency of this clock is TEGRA186_CLK_PLLC4_VCO/3 */ +#define TEGRA186_CLK_PLLC4_OUT1 277 +/** fixed /5 divider. Output frequency of this clock is TEGRA186_CLK_PLLC4_VCO/5 */ +#define TEGRA186_CLK_PLLC4_OUT2 278 +/** @brief output of mux controlled by PLLC4_CLK_SEL in CLK_RST_CONTROLLER_PLLC4_MISC1 */ +#define TEGRA186_CLK_PLLC4_OUT_MUX 279 +/** @brief output of divider NVDISPLAY_DISP_CLK_DIVISOR in CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAY_DISP when DFLLDISP_DIV is selected in NVDISPLAY_DISP_CLK_SRC */ +#define TEGRA186_CLK_DFLLDISP_DIV 284 +/** @brief output of divider NVDISPLAY_DISP_CLK_DIVISOR in CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAY_DISP when PLLDISPHUB_DIV is selected in NVDISPLAY_DISP_CLK_SRC */ +#define TEGRA186_CLK_PLLDISPHUB_DIV 285 +/** fixed /8 divider which is used as the input for TEGRA186_CLK_SOR_SAFE */ +#define TEGRA186_CLK_PLLP_DIV8 286 +/** @brief output of divider CLK_RST_CONTROLLER_BPMP_NIC_RATE */ +#define TEGRA186_CLK_BPMP_NIC 287 +/** @brief output of the divider CLK_RST_CONTROLLER_PLLA1_OUT1 */ +#define TEGRA186_CLK_PLL_A_OUT1 288 +/** @deprecated */ +#define TEGRA186_CLK_GPC2CLK 289 +/** A fake clock which must be enabled during KFUSE read operations to ensure adequate VDD_CORE voltage. */ +#define TEGRA186_CLK_KFUSE 293 +/** + * @brief controls the PLLE hardware sequencer. + * @details This clock only has enable and disable methods. When the + * PLLE hw sequencer is enabled, PLLE, will be enabled or disabled by + * hw based on the control signals from the PCIe, SATA and XUSB + * clocks. When the PLLE hw sequencer is disabled, the state of PLLE + * is controlled by sw using clk_enable/clk_disable on + * TEGRA186_CLK_PLLE. + */ +#define TEGRA186_CLK_PLLE_PWRSEQ 294 +/** fixed 60MHz clock divided down from, TEGRA186_CLK_PLL_U */ +#define TEGRA186_CLK_PLLREFE_REF 295 +/** @brief output of mux controlled by SOR0_CLK_SEL0 and SOR0_CLK_SEL1 in CLK_RST_CONTROLLER_CLK_SOURCE_SOR0 */ +#define TEGRA186_CLK_SOR0_OUT 296 +/** @brief output of mux controlled by SOR1_CLK_SEL0 and SOR1_CLK_SEL1 in CLK_RST_CONTROLLER_CLK_SOURCE_SOR1 */ +#define TEGRA186_CLK_SOR1_OUT 297 +/** @brief fixed /5 divider. Output frequency of this clock is TEGRA186_CLK_PLLREFE_OUT1/5. Used as input for TEGRA186_CLK_EQOS_AXI */ +#define TEGRA186_CLK_PLLREFE_OUT1_DIV5 298 +/** @brief controls the UTMIP_PLL (aka PLLU) hardware sqeuencer */ +#define TEGRA186_CLK_UTMIP_PLL_PWRSEQ 301 +/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_PEX_USB_PAD_PLL0_MGMT */ +#define TEGRA186_CLK_PEX_USB_PAD0_MGMT 302 +/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_PEX_USB_PAD_PLL1_MGMT */ +#define TEGRA186_CLK_PEX_USB_PAD1_MGMT 303 +/** @brief controls the UPHY_PLL0 hardware sqeuencer */ +#define TEGRA186_CLK_UPHY_PLL0_PWRSEQ 304 +/** @brief controls the UPHY_PLL1 hardware sqeuencer */ +#define TEGRA186_CLK_UPHY_PLL1_PWRSEQ 305 +/** @brief control for PLLREFE_IDDQ in CLK_RST_CONTROLLER_PLLREFE_MISC so the bypass output even be used when the PLL is disabled */ +#define TEGRA186_CLK_PLLREFE_PLLE_PASSTHROUGH 306 +/** @brief output of the mux controlled by PLLREFE_SEL_CLKIN_PEX in CLK_RST_CONTROLLER_PLLREFE_MISC */ +#define TEGRA186_CLK_PLLREFE_PEX 307 +/** @brief control for PLLREFE_IDDQ in CLK_RST_CONTROLLER_PLLREFE_MISC to turn on the PLL when enabled */ +#define TEGRA186_CLK_PLLREFE_IDDQ 308 +/** @brief output of the divider QSPI_CLK_DIV2_SEL in CLK_RST_CONTROLLER_CLK_SOURCE_QSPI */ +#define TEGRA186_CLK_QSPI_OUT 309 +/** + * @brief GPC2CLK-div-2 + * @details fixed /2 divider. Output frequency is + * TEGRA186_CLK_GPC2CLK/2. The frequency of this clock is the + * frequency at which the GPU graphics engine runs. */ +#define TEGRA186_CLK_GPCCLK 310 +/** @brief output of divider CLK_RST_CONTROLLER_AON_NIC_RATE */ +#define TEGRA186_CLK_AON_NIC 450 +/** @brief output of divider CLK_RST_CONTROLLER_SCE_NIC_RATE */ +#define TEGRA186_CLK_SCE_NIC 451 +/** Fixed 100MHz PLL for PCIe, SATA and superspeed USB */ +#define TEGRA186_CLK_PLLE 512 +/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC_BASE */ +#define TEGRA186_CLK_PLLC 513 +/** Fixed 408MHz PLL for use by peripheral clocks */ +#define TEGRA186_CLK_PLLP 516 +/** @deprecated */ +#define TEGRA186_CLK_PLL_P TEGRA186_CLK_PLLP +/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLD_BASE for use by DSI */ +#define TEGRA186_CLK_PLLD 518 +/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLD2_BASE for use by HDMI or DP */ +#define TEGRA186_CLK_PLLD2 519 +/** + * @brief PLL controlled by CLK_RST_CONTROLLER_PLLREFE_BASE. + * @details Note that this clock only controls the VCO output, before + * the post-divider. See TEGRA186_CLK_PLLREFE_OUT1 for more + * information. + */ +#define TEGRA186_CLK_PLLREFE_VCO 520 +/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC2_BASE */ +#define TEGRA186_CLK_PLLC2 521 +/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC3_BASE */ +#define TEGRA186_CLK_PLLC3 522 +/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLDP_BASE for use as the DP link clock */ +#define TEGRA186_CLK_PLLDP 523 +/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC4_BASE */ +#define TEGRA186_CLK_PLLC4_VCO 524 +/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLA1_BASE for use by audio clocks */ +#define TEGRA186_CLK_PLLA1 525 +/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLNVCSI_BASE */ +#define TEGRA186_CLK_PLLNVCSI 526 +/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLDISPHUB_BASE */ +#define TEGRA186_CLK_PLLDISPHUB 527 +/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLD3_BASE for use by HDMI or DP */ +#define TEGRA186_CLK_PLLD3 528 +/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLBPMPCAM_BASE */ +#define TEGRA186_CLK_PLLBPMPCAM 531 +/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLAON_BASE for use by IP blocks in the AON domain */ +#define TEGRA186_CLK_PLLAON 532 +/** Fixed frequency 960MHz PLL for USB and EAVB */ +#define TEGRA186_CLK_PLLU 533 +/** fixed /2 divider. Output frequency is TEGRA186_CLK_PLLC4_VCO/2 */ +#define TEGRA186_CLK_PLLC4_VCO_DIV2 535 +/** @brief NAFLL clock source for AXI_CBB */ +#define TEGRA186_CLK_NAFLL_AXI_CBB 564 +/** @brief NAFLL clock source for BPMP */ +#define TEGRA186_CLK_NAFLL_BPMP 565 +/** @brief NAFLL clock source for ISP */ +#define TEGRA186_CLK_NAFLL_ISP 566 +/** @brief NAFLL clock source for NVDEC */ +#define TEGRA186_CLK_NAFLL_NVDEC 567 +/** @brief NAFLL clock source for NVENC */ +#define TEGRA186_CLK_NAFLL_NVENC 568 +/** @brief NAFLL clock source for NVJPG */ +#define TEGRA186_CLK_NAFLL_NVJPG 569 +/** @brief NAFLL clock source for SCE */ +#define TEGRA186_CLK_NAFLL_SCE 570 +/** @brief NAFLL clock source for SE */ +#define TEGRA186_CLK_NAFLL_SE 571 +/** @brief NAFLL clock source for TSEC */ +#define TEGRA186_CLK_NAFLL_TSEC 572 +/** @brief NAFLL clock source for TSECB */ +#define TEGRA186_CLK_NAFLL_TSECB 573 +/** @brief NAFLL clock source for VI */ +#define TEGRA186_CLK_NAFLL_VI 574 +/** @brief NAFLL clock source for VIC */ +#define TEGRA186_CLK_NAFLL_VIC 575 +/** @brief NAFLL clock source for DISP */ +#define TEGRA186_CLK_NAFLL_DISP 576 +/** @brief NAFLL clock source for GPU */ +#define TEGRA186_CLK_NAFLL_GPU 577 +/** @brief NAFLL clock source for M-CPU cluster */ +#define TEGRA186_CLK_NAFLL_MCPU 578 +/** @brief NAFLL clock source for B-CPU cluster */ +#define TEGRA186_CLK_NAFLL_BCPU 579 +/** @brief input from Tegra's CLK_32K_IN pad */ +#define TEGRA186_CLK_CLK_32K 608 +/** @brief output of divider CLK_RST_CONTROLLER_CLK_M_DIVIDE */ +#define TEGRA186_CLK_CLK_M 609 +/** @brief output of divider PLL_REF_DIV in CLK_RST_CONTROLLER_OSC_CTRL */ +#define TEGRA186_CLK_PLL_REF 610 +/** @brief input from Tegra's XTAL_IN */ +#define TEGRA186_CLK_OSC 612 +/** @brief clock recovered from EAVB input */ +#define TEGRA186_CLK_EQOS_RX_INPUT 613 +/** @brief clock recovered from DTV input */ +#define TEGRA186_CLK_DTV_INPUT 614 +/** @brief SOR0 brick output which feeds into SOR0_CLK_SEL mux in CLK_RST_CONTROLLER_CLK_SOURCE_SOR0*/ +#define TEGRA186_CLK_SOR0_PAD_CLKOUT 615 +/** @brief SOR1 brick output which feeds into SOR1_CLK_SEL mux in CLK_RST_CONTROLLER_CLK_SOURCE_SOR1*/ +#define TEGRA186_CLK_SOR1_PAD_CLKOUT 616 +/** @brief clock recovered from I2S1 input */ +#define TEGRA186_CLK_I2S1_SYNC_INPUT 617 +/** @brief clock recovered from I2S2 input */ +#define TEGRA186_CLK_I2S2_SYNC_INPUT 618 +/** @brief clock recovered from I2S3 input */ +#define TEGRA186_CLK_I2S3_SYNC_INPUT 619 +/** @brief clock recovered from I2S4 input */ +#define TEGRA186_CLK_I2S4_SYNC_INPUT 620 +/** @brief clock recovered from I2S5 input */ +#define TEGRA186_CLK_I2S5_SYNC_INPUT 621 +/** @brief clock recovered from I2S6 input */ +#define TEGRA186_CLK_I2S6_SYNC_INPUT 622 +/** @brief clock recovered from SPDIFIN input */ +#define TEGRA186_CLK_SPDIFIN_SYNC_INPUT 623 + +/** + * @brief subject to change + * @details maximum clock identifier value plus one. + */ +#define TEGRA186_CLK_CLK_MAX 624 + +/** @} */ + +#endif diff --git a/include/dt-bindings/clock/tegra194-clock.h b/include/dt-bindings/clock/tegra194-clock.h new file mode 100644 index 000000000..a2ff66342 --- /dev/null +++ b/include/dt-bindings/clock/tegra194-clock.h @@ -0,0 +1,321 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __ABI_MACH_T194_CLOCK_H +#define __ABI_MACH_T194_CLOCK_H + +#define TEGRA194_CLK_ACTMON 1 +#define TEGRA194_CLK_ADSP 2 +#define TEGRA194_CLK_ADSPNEON 3 +#define TEGRA194_CLK_AHUB 4 +#define TEGRA194_CLK_APB2APE 5 +#define TEGRA194_CLK_APE 6 +#define TEGRA194_CLK_AUD_MCLK 7 +#define TEGRA194_CLK_AXI_CBB 8 +#define TEGRA194_CLK_CAN1 9 +#define TEGRA194_CLK_CAN1_HOST 10 +#define TEGRA194_CLK_CAN2 11 +#define TEGRA194_CLK_CAN2_HOST 12 +#define TEGRA194_CLK_CEC 13 +#define TEGRA194_CLK_CLK_M 14 +#define TEGRA194_CLK_DMIC1 15 +#define TEGRA194_CLK_DMIC2 16 +#define TEGRA194_CLK_DMIC3 17 +#define TEGRA194_CLK_DMIC4 18 +#define TEGRA194_CLK_DPAUX 19 +#define TEGRA194_CLK_DPAUX1 20 +#define TEGRA194_CLK_ACLK 21 +#define TEGRA194_CLK_MSS_ENCRYPT 22 +#define TEGRA194_CLK_EQOS_RX_INPUT 23 +#define TEGRA194_CLK_IQC2 24 +#define TEGRA194_CLK_AON_APB 25 +#define TEGRA194_CLK_AON_NIC 26 +#define TEGRA194_CLK_AON_CPU_NIC 27 +#define TEGRA194_CLK_PLLA1 28 +#define TEGRA194_CLK_DSPK1 29 +#define TEGRA194_CLK_DSPK2 30 +#define TEGRA194_CLK_EMC 31 +#define TEGRA194_CLK_EQOS_AXI 32 +#define TEGRA194_CLK_EQOS_PTP_REF 33 +#define TEGRA194_CLK_EQOS_RX 34 +#define TEGRA194_CLK_EQOS_TX 35 +#define TEGRA194_CLK_EXTPERIPH1 36 +#define TEGRA194_CLK_EXTPERIPH2 37 +#define TEGRA194_CLK_EXTPERIPH3 38 +#define TEGRA194_CLK_EXTPERIPH4 39 +#define TEGRA194_CLK_FUSE 40 +#define TEGRA194_CLK_GPCCLK 41 +#define TEGRA194_CLK_GPU_PWR 42 +#define TEGRA194_CLK_HDA 43 +#define TEGRA194_CLK_HDA2CODEC_2X 44 +#define TEGRA194_CLK_HDA2HDMICODEC 45 +#define TEGRA194_CLK_HOST1X 46 +#define TEGRA194_CLK_HSIC_TRK 47 +#define TEGRA194_CLK_I2C1 48 +#define TEGRA194_CLK_I2C2 49 +#define TEGRA194_CLK_I2C3 50 +#define TEGRA194_CLK_I2C4 51 +#define TEGRA194_CLK_I2C6 52 +#define TEGRA194_CLK_I2C7 53 +#define TEGRA194_CLK_I2C8 54 +#define TEGRA194_CLK_I2C9 55 +#define TEGRA194_CLK_I2S1 56 +#define TEGRA194_CLK_I2S1_SYNC_INPUT 57 +#define TEGRA194_CLK_I2S2 58 +#define TEGRA194_CLK_I2S2_SYNC_INPUT 59 +#define TEGRA194_CLK_I2S3 60 +#define TEGRA194_CLK_I2S3_SYNC_INPUT 61 +#define TEGRA194_CLK_I2S4 62 +#define TEGRA194_CLK_I2S4_SYNC_INPUT 63 +#define TEGRA194_CLK_I2S5 64 +#define TEGRA194_CLK_I2S5_SYNC_INPUT 65 +#define TEGRA194_CLK_I2S6 66 +#define TEGRA194_CLK_I2S6_SYNC_INPUT 67 +#define TEGRA194_CLK_IQC1 68 +#define TEGRA194_CLK_ISP 69 +#define TEGRA194_CLK_KFUSE 70 +#define TEGRA194_CLK_MAUD 71 +#define TEGRA194_CLK_MIPI_CAL 72 +#define TEGRA194_CLK_MPHY_CORE_PLL_FIXED 73 +#define TEGRA194_CLK_MPHY_L0_RX_ANA 74 +#define TEGRA194_CLK_MPHY_L0_RX_LS_BIT 75 +#define TEGRA194_CLK_MPHY_L0_RX_SYMB 76 +#define TEGRA194_CLK_MPHY_L0_TX_LS_3XBIT 77 +#define TEGRA194_CLK_MPHY_L0_TX_SYMB 78 +#define TEGRA194_CLK_MPHY_L1_RX_ANA 79 +#define TEGRA194_CLK_MPHY_TX_1MHZ_REF 80 +#define TEGRA194_CLK_NVCSI 81 +#define TEGRA194_CLK_NVCSILP 82 +#define TEGRA194_CLK_NVDEC 83 +#define TEGRA194_CLK_NVDISPLAYHUB 84 +#define TEGRA194_CLK_NVDISPLAY_DISP 85 +#define TEGRA194_CLK_NVDISPLAY_P0 86 +#define TEGRA194_CLK_NVDISPLAY_P1 87 +#define TEGRA194_CLK_NVDISPLAY_P2 88 +#define TEGRA194_CLK_NVENC 89 +#define TEGRA194_CLK_NVJPG 90 +#define TEGRA194_CLK_OSC 91 +#define TEGRA194_CLK_AON_TOUCH 92 +#define TEGRA194_CLK_PLLA 93 +#define TEGRA194_CLK_PLLAON 94 +#define TEGRA194_CLK_PLLD 95 +#define TEGRA194_CLK_PLLD2 96 +#define TEGRA194_CLK_PLLD3 97 +#define TEGRA194_CLK_PLLDP 98 +#define TEGRA194_CLK_PLLD4 99 +#define TEGRA194_CLK_PLLE 100 +#define TEGRA194_CLK_PLLP 101 +#define TEGRA194_CLK_PLLP_OUT0 102 +#define TEGRA194_CLK_UTMIPLL 103 +#define TEGRA194_CLK_PLLA_OUT0 104 +#define TEGRA194_CLK_PWM1 105 +#define TEGRA194_CLK_PWM2 106 +#define TEGRA194_CLK_PWM3 107 +#define TEGRA194_CLK_PWM4 108 +#define TEGRA194_CLK_PWM5 109 +#define TEGRA194_CLK_PWM6 110 +#define TEGRA194_CLK_PWM7 111 +#define TEGRA194_CLK_PWM8 112 +#define TEGRA194_CLK_RCE_CPU_NIC 113 +#define TEGRA194_CLK_RCE_NIC 114 +#define TEGRA194_CLK_SATA 115 +#define TEGRA194_CLK_SATA_OOB 116 +#define TEGRA194_CLK_AON_I2C_SLOW 117 +#define TEGRA194_CLK_SCE_CPU_NIC 118 +#define TEGRA194_CLK_SCE_NIC 119 +#define TEGRA194_CLK_SDMMC1 120 +#define TEGRA194_CLK_UPHY_PLL3 121 +#define TEGRA194_CLK_SDMMC3 122 +#define TEGRA194_CLK_SDMMC4 123 +#define TEGRA194_CLK_SE 124 +#define TEGRA194_CLK_SOR0_OUT 125 +#define TEGRA194_CLK_SOR0_REF 126 +#define TEGRA194_CLK_SOR0_PAD_CLKOUT 127 +#define TEGRA194_CLK_SOR1_OUT 128 +#define TEGRA194_CLK_SOR1_REF 129 +#define TEGRA194_CLK_SOR1_PAD_CLKOUT 130 +#define TEGRA194_CLK_SOR_SAFE 131 +#define TEGRA194_CLK_IQC1_IN 132 +#define TEGRA194_CLK_IQC2_IN 133 +#define TEGRA194_CLK_DMIC5 134 +#define TEGRA194_CLK_SPI1 135 +#define TEGRA194_CLK_SPI2 136 +#define TEGRA194_CLK_SPI3 137 +#define TEGRA194_CLK_I2C_SLOW 138 +#define TEGRA194_CLK_SYNC_DMIC1 139 +#define TEGRA194_CLK_SYNC_DMIC2 140 +#define TEGRA194_CLK_SYNC_DMIC3 141 +#define TEGRA194_CLK_SYNC_DMIC4 142 +#define TEGRA194_CLK_SYNC_DSPK1 143 +#define TEGRA194_CLK_SYNC_DSPK2 144 +#define TEGRA194_CLK_SYNC_I2S1 145 +#define TEGRA194_CLK_SYNC_I2S2 146 +#define TEGRA194_CLK_SYNC_I2S3 147 +#define TEGRA194_CLK_SYNC_I2S4 148 +#define TEGRA194_CLK_SYNC_I2S5 149 +#define TEGRA194_CLK_SYNC_I2S6 150 +#define TEGRA194_CLK_MPHY_FORCE_LS_MODE 151 +#define TEGRA194_CLK_TACH 152 +#define TEGRA194_CLK_TSEC 153 +#define TEGRA194_CLK_TSECB 154 +#define TEGRA194_CLK_UARTA 155 +#define TEGRA194_CLK_UARTB 156 +#define TEGRA194_CLK_UARTC 157 +#define TEGRA194_CLK_UARTD 158 +#define TEGRA194_CLK_UARTE 159 +#define TEGRA194_CLK_UARTF 160 +#define TEGRA194_CLK_UARTG 161 +#define TEGRA194_CLK_UART_FST_MIPI_CAL 162 +#define TEGRA194_CLK_UFSDEV_REF 163 +#define TEGRA194_CLK_UFSHC 164 +#define TEGRA194_CLK_USB2_TRK 165 +#define TEGRA194_CLK_VI 166 +#define TEGRA194_CLK_VIC 167 +#define TEGRA194_CLK_PVA0_AXI 168 +#define TEGRA194_CLK_PVA0_VPS0 169 +#define TEGRA194_CLK_PVA0_VPS1 170 +#define TEGRA194_CLK_PVA1_AXI 171 +#define TEGRA194_CLK_PVA1_VPS0 172 +#define TEGRA194_CLK_PVA1_VPS1 173 +#define TEGRA194_CLK_DLA0_FALCON 174 +#define TEGRA194_CLK_DLA0_CORE 175 +#define TEGRA194_CLK_DLA1_FALCON 176 +#define TEGRA194_CLK_DLA1_CORE 177 +#define TEGRA194_CLK_SOR2_OUT 178 +#define TEGRA194_CLK_SOR2_REF 179 +#define TEGRA194_CLK_SOR2_PAD_CLKOUT 180 +#define TEGRA194_CLK_SOR3_OUT 181 +#define TEGRA194_CLK_SOR3_REF 182 +#define TEGRA194_CLK_SOR3_PAD_CLKOUT 183 +#define TEGRA194_CLK_NVDISPLAY_P3 184 +#define TEGRA194_CLK_DPAUX2 185 +#define TEGRA194_CLK_DPAUX3 186 +#define TEGRA194_CLK_NVDEC1 187 +#define TEGRA194_CLK_NVENC1 188 +#define TEGRA194_CLK_SE_FREE 189 +#define TEGRA194_CLK_UARTH 190 +#define TEGRA194_CLK_FUSE_SERIAL 191 +#define TEGRA194_CLK_QSPI0 192 +#define TEGRA194_CLK_QSPI1 193 +#define TEGRA194_CLK_QSPI0_PM 194 +#define TEGRA194_CLK_QSPI1_PM 195 +#define TEGRA194_CLK_VI_CONST 196 +#define TEGRA194_CLK_NAFLL_BPMP 197 +#define TEGRA194_CLK_NAFLL_SCE 198 +#define TEGRA194_CLK_NAFLL_NVDEC 199 +#define TEGRA194_CLK_NAFLL_NVJPG 200 +#define TEGRA194_CLK_NAFLL_TSEC 201 +#define TEGRA194_CLK_NAFLL_TSECB 202 +#define TEGRA194_CLK_NAFLL_VI 203 +#define TEGRA194_CLK_NAFLL_SE 204 +#define TEGRA194_CLK_NAFLL_NVENC 205 +#define TEGRA194_CLK_NAFLL_ISP 206 +#define TEGRA194_CLK_NAFLL_VIC 207 +#define TEGRA194_CLK_NAFLL_NVDISPLAYHUB 208 +#define TEGRA194_CLK_NAFLL_AXICBB 209 +#define TEGRA194_CLK_NAFLL_DLA 210 +#define TEGRA194_CLK_NAFLL_PVA_CORE 211 +#define TEGRA194_CLK_NAFLL_PVA_VPS 212 +#define TEGRA194_CLK_NAFLL_CVNAS 213 +#define TEGRA194_CLK_NAFLL_RCE 214 +#define TEGRA194_CLK_NAFLL_NVENC1 215 +#define TEGRA194_CLK_NAFLL_DLA_FALCON 216 +#define TEGRA194_CLK_NAFLL_NVDEC1 217 +#define TEGRA194_CLK_NAFLL_GPU 218 +#define TEGRA194_CLK_SDMMC_LEGACY_TM 219 +#define TEGRA194_CLK_PEX0_CORE_0 220 +#define TEGRA194_CLK_PEX0_CORE_1 221 +#define TEGRA194_CLK_PEX0_CORE_2 222 +#define TEGRA194_CLK_PEX0_CORE_3 223 +#define TEGRA194_CLK_PEX0_CORE_4 224 +#define TEGRA194_CLK_PEX1_CORE_5 225 +#define TEGRA194_CLK_PEX_REF1 226 +#define TEGRA194_CLK_PEX_REF2 227 +#define TEGRA194_CLK_CSI_A 229 +#define TEGRA194_CLK_CSI_B 230 +#define TEGRA194_CLK_CSI_C 231 +#define TEGRA194_CLK_CSI_D 232 +#define TEGRA194_CLK_CSI_E 233 +#define TEGRA194_CLK_CSI_F 234 +#define TEGRA194_CLK_CSI_G 235 +#define TEGRA194_CLK_CSI_H 236 +#define TEGRA194_CLK_PLLC4 237 +#define TEGRA194_CLK_PLLC4_OUT 238 +#define TEGRA194_CLK_PLLC4_OUT1 239 +#define TEGRA194_CLK_PLLC4_OUT2 240 +#define TEGRA194_CLK_PLLC4_MUXED 241 +#define TEGRA194_CLK_PLLC4_VCO_DIV2 242 +#define TEGRA194_CLK_CSI_A_PAD 244 +#define TEGRA194_CLK_CSI_B_PAD 245 +#define TEGRA194_CLK_CSI_C_PAD 246 +#define TEGRA194_CLK_CSI_D_PAD 247 +#define TEGRA194_CLK_CSI_E_PAD 248 +#define TEGRA194_CLK_CSI_F_PAD 249 +#define TEGRA194_CLK_CSI_G_PAD 250 +#define TEGRA194_CLK_CSI_H_PAD 251 +#define TEGRA194_CLK_PEX_SATA_USB_RX_BYP 254 +#define TEGRA194_CLK_PEX_USB_PAD_PLL0_MGMT 255 +#define TEGRA194_CLK_PEX_USB_PAD_PLL1_MGMT 256 +#define TEGRA194_CLK_PEX_USB_PAD_PLL2_MGMT 257 +#define TEGRA194_CLK_PEX_USB_PAD_PLL3_MGMT 258 +#define TEGRA194_CLK_XUSB_CORE_DEV 265 +#define TEGRA194_CLK_XUSB_CORE_MUX 266 +#define TEGRA194_CLK_XUSB_CORE_HOST 267 +#define TEGRA194_CLK_XUSB_CORE_SS 268 +#define TEGRA194_CLK_XUSB_FALCON 269 +#define TEGRA194_CLK_XUSB_FALCON_HOST 270 +#define TEGRA194_CLK_XUSB_FALCON_SS 271 +#define TEGRA194_CLK_XUSB_FS 272 +#define TEGRA194_CLK_XUSB_FS_HOST 273 +#define TEGRA194_CLK_XUSB_FS_DEV 274 +#define TEGRA194_CLK_XUSB_SS 275 +#define TEGRA194_CLK_XUSB_SS_DEV 276 +#define TEGRA194_CLK_XUSB_SS_SUPERSPEED 277 +#define TEGRA194_CLK_PLLDISPHUB 278 +#define TEGRA194_CLK_PLLDISPHUB_DIV 279 +#define TEGRA194_CLK_NAFLL_CLUSTER0 280 +#define TEGRA194_CLK_NAFLL_CLUSTER1 281 +#define TEGRA194_CLK_NAFLL_CLUSTER2 282 +#define TEGRA194_CLK_NAFLL_CLUSTER3 283 +#define TEGRA194_CLK_CAN1_CORE 284 +#define TEGRA194_CLK_CAN2_CORE 285 +#define TEGRA194_CLK_PLLA1_OUT1 286 +#define TEGRA194_CLK_PLLREFE_VCOOUT 288 +#define TEGRA194_CLK_CLK_32K 289 +#define TEGRA194_CLK_SPDIFIN_SYNC_INPUT 290 +#define TEGRA194_CLK_UTMIPLL_CLKOUT48 291 +#define TEGRA194_CLK_UTMIPLL_CLKOUT480 292 +#define TEGRA194_CLK_CVNAS 293 +#define TEGRA194_CLK_PLLNVCSI 294 +#define TEGRA194_CLK_PVA0_CPU_AXI 295 +#define TEGRA194_CLK_PVA1_CPU_AXI 296 +#define TEGRA194_CLK_PVA0_VPS 297 +#define TEGRA194_CLK_PVA1_VPS 298 +#define TEGRA194_CLK_DLA0_FALCON_MUX 299 +#define TEGRA194_CLK_DLA1_FALCON_MUX 300 +#define TEGRA194_CLK_DLA0_CORE_MUX 301 +#define TEGRA194_CLK_DLA1_CORE_MUX 302 +#define TEGRA194_CLK_UTMIPLL_HPS 304 +#define TEGRA194_CLK_I2C5 305 +#define TEGRA194_CLK_I2C10 306 +#define TEGRA194_CLK_BPMP_CPU_NIC 307 +#define TEGRA194_CLK_BPMP_APB 308 +#define TEGRA194_CLK_TSC 309 +#define TEGRA194_CLK_EMCSA 310 +#define TEGRA194_CLK_EMCSB 311 +#define TEGRA194_CLK_EMCSC 312 +#define TEGRA194_CLK_EMCSD 313 +#define TEGRA194_CLK_PLLC 314 +#define TEGRA194_CLK_PLLC2 315 +#define TEGRA194_CLK_PLLC3 316 +#define TEGRA194_CLK_TSC_REF 317 +#define TEGRA194_CLK_FUSE_BURN 318 +#define TEGRA194_CLK_PEX0_CORE_0M 319 +#define TEGRA194_CLK_PEX0_CORE_1M 320 +#define TEGRA194_CLK_PEX0_CORE_2M 321 +#define TEGRA194_CLK_PEX0_CORE_3M 322 +#define TEGRA194_CLK_PEX0_CORE_4M 323 +#define TEGRA194_CLK_PEX1_CORE_5M 324 +#define TEGRA194_CLK_PLLE_HPS 326 + +#endif diff --git a/include/dt-bindings/clock/tegra20-car.h b/include/dt-bindings/clock/tegra20-car.h new file mode 100644 index 000000000..b21a0eb32 --- /dev/null +++ b/include/dt-bindings/clock/tegra20-car.h @@ -0,0 +1,159 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for binding nvidia,tegra20-car. + * + * The first 96 clocks are numbered to match the bits in the CAR's CLK_OUT_ENB + * registers. These IDs often match those in the CAR's RST_DEVICES registers, + * but not in all cases. Some bits in CLK_OUT_ENB affect multiple clocks. In + * this case, those clocks are assigned IDs above 95 in order to highlight + * this issue. Implementations that interpret these clock IDs as bit values + * within the CLK_OUT_ENB or RST_DEVICES registers should be careful to + * explicitly handle these special cases. + * + * The balance of the clocks controlled by the CAR are assigned IDs of 96 and + * above. + */ + +#ifndef _DT_BINDINGS_CLOCK_TEGRA20_CAR_H +#define _DT_BINDINGS_CLOCK_TEGRA20_CAR_H + +#define TEGRA20_CLK_CPU 0 +/* 1 */ +/* 2 */ +#define TEGRA20_CLK_AC97 3 +#define TEGRA20_CLK_RTC 4 +#define TEGRA20_CLK_TIMER 5 +#define TEGRA20_CLK_UARTA 6 +/* 7 (register bit affects uart2 and vfir) */ +#define TEGRA20_CLK_GPIO 8 +#define TEGRA20_CLK_SDMMC2 9 +/* 10 (register bit affects spdif_in and spdif_out) */ +#define TEGRA20_CLK_I2S1 11 +#define TEGRA20_CLK_I2C1 12 +#define TEGRA20_CLK_NDFLASH 13 +#define TEGRA20_CLK_SDMMC1 14 +#define TEGRA20_CLK_SDMMC4 15 +#define TEGRA20_CLK_TWC 16 +#define TEGRA20_CLK_PWM 17 +#define TEGRA20_CLK_I2S2 18 +#define TEGRA20_CLK_EPP 19 +/* 20 (register bit affects vi and vi_sensor) */ +#define TEGRA20_CLK_GR2D 21 +#define TEGRA20_CLK_USBD 22 +#define TEGRA20_CLK_ISP 23 +#define TEGRA20_CLK_GR3D 24 +#define TEGRA20_CLK_IDE 25 +#define TEGRA20_CLK_DISP2 26 +#define TEGRA20_CLK_DISP1 27 +#define TEGRA20_CLK_HOST1X 28 +#define TEGRA20_CLK_VCP 29 +/* 30 */ +#define TEGRA20_CLK_CACHE2 31 + +#define TEGRA20_CLK_MC 32 +#define TEGRA20_CLK_AHBDMA 33 +#define TEGRA20_CLK_APBDMA 34 +/* 35 */ +#define TEGRA20_CLK_KBC 36 +#define TEGRA20_CLK_STAT_MON 37 +#define TEGRA20_CLK_PMC 38 +#define TEGRA20_CLK_FUSE 39 +#define TEGRA20_CLK_KFUSE 40 +#define TEGRA20_CLK_SBC1 41 +#define TEGRA20_CLK_NOR 42 +#define TEGRA20_CLK_SPI 43 +#define TEGRA20_CLK_SBC2 44 +#define TEGRA20_CLK_XIO 45 +#define TEGRA20_CLK_SBC3 46 +#define TEGRA20_CLK_DVC 47 +#define TEGRA20_CLK_DSI 48 +/* 49 (register bit affects tvo and cve) */ +#define TEGRA20_CLK_MIPI 50 +#define TEGRA20_CLK_HDMI 51 +#define TEGRA20_CLK_CSI 52 +#define TEGRA20_CLK_TVDAC 53 +#define TEGRA20_CLK_I2C2 54 +#define TEGRA20_CLK_UARTC 55 +/* 56 */ +#define TEGRA20_CLK_EMC 57 +#define TEGRA20_CLK_USB2 58 +#define TEGRA20_CLK_USB3 59 +#define TEGRA20_CLK_MPE 60 +#define TEGRA20_CLK_VDE 61 +#define TEGRA20_CLK_BSEA 62 +#define TEGRA20_CLK_BSEV 63 + +#define TEGRA20_CLK_SPEEDO 64 +#define TEGRA20_CLK_UARTD 65 +#define TEGRA20_CLK_UARTE 66 +#define TEGRA20_CLK_I2C3 67 +#define TEGRA20_CLK_SBC4 68 +#define TEGRA20_CLK_SDMMC3 69 +#define TEGRA20_CLK_PEX 70 +#define TEGRA20_CLK_OWR 71 +#define TEGRA20_CLK_AFI 72 +#define TEGRA20_CLK_CSITE 73 +/* 74 */ +#define TEGRA20_CLK_AVPUCQ 75 +#define TEGRA20_CLK_LA 76 +/* 77 */ +/* 78 */ +/* 79 */ +/* 80 */ +/* 81 */ +/* 82 */ +/* 83 */ +#define TEGRA20_CLK_IRAMA 84 +#define TEGRA20_CLK_IRAMB 85 +#define TEGRA20_CLK_IRAMC 86 +#define TEGRA20_CLK_IRAMD 87 +#define TEGRA20_CLK_CRAM2 88 +#define TEGRA20_CLK_AUDIO_2X 89 /* a/k/a audio_2x_sync_clk */ +#define TEGRA20_CLK_CLK_D 90 +/* 91 */ +#define TEGRA20_CLK_CSUS 92 +#define TEGRA20_CLK_CDEV2 93 +#define TEGRA20_CLK_CDEV1 94 +/* 95 */ + +#define TEGRA20_CLK_UARTB 96 +#define TEGRA20_CLK_VFIR 97 +#define TEGRA20_CLK_SPDIF_IN 98 +#define TEGRA20_CLK_SPDIF_OUT 99 +#define TEGRA20_CLK_VI 100 +#define TEGRA20_CLK_VI_SENSOR 101 +#define TEGRA20_CLK_TVO 102 +#define TEGRA20_CLK_CVE 103 +#define TEGRA20_CLK_OSC 104 +#define TEGRA20_CLK_CLK_32K 105 /* a/k/a clk_s */ +#define TEGRA20_CLK_CLK_M 106 +#define TEGRA20_CLK_SCLK 107 +#define TEGRA20_CLK_CCLK 108 +#define TEGRA20_CLK_HCLK 109 +#define TEGRA20_CLK_PCLK 110 +#define TEGRA20_CLK_BLINK 111 +#define TEGRA20_CLK_PLL_A 112 +#define TEGRA20_CLK_PLL_A_OUT0 113 +#define TEGRA20_CLK_PLL_C 114 +#define TEGRA20_CLK_PLL_C_OUT1 115 +#define TEGRA20_CLK_PLL_D 116 +#define TEGRA20_CLK_PLL_D_OUT0 117 +#define TEGRA20_CLK_PLL_E 118 +#define TEGRA20_CLK_PLL_M 119 +#define TEGRA20_CLK_PLL_M_OUT1 120 +#define TEGRA20_CLK_PLL_P 121 +#define TEGRA20_CLK_PLL_P_OUT1 122 +#define TEGRA20_CLK_PLL_P_OUT2 123 +#define TEGRA20_CLK_PLL_P_OUT3 124 +#define TEGRA20_CLK_PLL_P_OUT4 125 +#define TEGRA20_CLK_PLL_S 126 +#define TEGRA20_CLK_PLL_U 127 + +#define TEGRA20_CLK_PLL_X 128 +#define TEGRA20_CLK_COP 129 /* a/k/a avp */ +#define TEGRA20_CLK_AUDIO 130 /* a/k/a audio_sync_clk */ +#define TEGRA20_CLK_PLL_REF 131 +#define TEGRA20_CLK_TWD 132 +#define TEGRA20_CLK_CLK_MAX 133 + +#endif /* _DT_BINDINGS_CLOCK_TEGRA20_CAR_H */ diff --git a/include/dt-bindings/clock/tegra210-car.h b/include/dt-bindings/clock/tegra210-car.h new file mode 100644 index 000000000..6b77e721f --- /dev/null +++ b/include/dt-bindings/clock/tegra210-car.h @@ -0,0 +1,412 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for binding nvidia,tegra210-car. + * + * The first 224 clocks are numbered to match the bits in the CAR's CLK_OUT_ENB + * registers. These IDs often match those in the CAR's RST_DEVICES registers, + * but not in all cases. Some bits in CLK_OUT_ENB affect multiple clocks. In + * this case, those clocks are assigned IDs above 224 in order to highlight + * this issue. Implementations that interpret these clock IDs as bit values + * within the CLK_OUT_ENB or RST_DEVICES registers should be careful to + * explicitly handle these special cases. + * + * The balance of the clocks controlled by the CAR are assigned IDs of 224 and + * above. + */ + +#ifndef _DT_BINDINGS_CLOCK_TEGRA210_CAR_H +#define _DT_BINDINGS_CLOCK_TEGRA210_CAR_H + +/* 0 */ +/* 1 */ +/* 2 */ +#define TEGRA210_CLK_ISPB 3 +#define TEGRA210_CLK_RTC 4 +#define TEGRA210_CLK_TIMER 5 +#define TEGRA210_CLK_UARTA 6 +/* 7 (register bit affects uartb and vfir) */ +#define TEGRA210_CLK_GPIO 8 +#define TEGRA210_CLK_SDMMC2 9 +/* 10 (register bit affects spdif_in and spdif_out) */ +#define TEGRA210_CLK_I2S1 11 +#define TEGRA210_CLK_I2C1 12 +/* 13 */ +#define TEGRA210_CLK_SDMMC1 14 +#define TEGRA210_CLK_SDMMC4 15 +/* 16 */ +#define TEGRA210_CLK_PWM 17 +#define TEGRA210_CLK_I2S2 18 +/* 19 */ +/* 20 (register bit affects vi and vi_sensor) */ +/* 21 */ +#define TEGRA210_CLK_USBD 22 +#define TEGRA210_CLK_ISPA 23 +/* 24 */ +/* 25 */ +#define TEGRA210_CLK_DISP2 26 +#define TEGRA210_CLK_DISP1 27 +#define TEGRA210_CLK_HOST1X 28 +/* 29 */ +#define TEGRA210_CLK_I2S0 30 +/* 31 */ + +#define TEGRA210_CLK_MC 32 +#define TEGRA210_CLK_AHBDMA 33 +#define TEGRA210_CLK_APBDMA 34 +/* 35 */ +/* 36 */ +/* 37 */ +#define TEGRA210_CLK_PMC 38 +/* 39 (register bit affects fuse and fuse_burn) */ +#define TEGRA210_CLK_KFUSE 40 +#define TEGRA210_CLK_SBC1 41 +/* 42 */ +/* 43 */ +#define TEGRA210_CLK_SBC2 44 +/* 45 */ +#define TEGRA210_CLK_SBC3 46 +#define TEGRA210_CLK_I2C5 47 +#define TEGRA210_CLK_DSIA 48 +/* 49 */ +/* 50 */ +/* 51 */ +#define TEGRA210_CLK_CSI 52 +/* 53 */ +#define TEGRA210_CLK_I2C2 54 +#define TEGRA210_CLK_UARTC 55 +#define TEGRA210_CLK_MIPI_CAL 56 +#define TEGRA210_CLK_EMC 57 +#define TEGRA210_CLK_USB2 58 +/* 59 */ +/* 60 */ +/* 61 */ +/* 62 */ +#define TEGRA210_CLK_BSEV 63 + +/* 64 */ +#define TEGRA210_CLK_UARTD 65 +/* 66 */ +#define TEGRA210_CLK_I2C3 67 +#define TEGRA210_CLK_SBC4 68 +#define TEGRA210_CLK_SDMMC3 69 +#define TEGRA210_CLK_PCIE 70 +#define TEGRA210_CLK_OWR 71 +#define TEGRA210_CLK_AFI 72 +#define TEGRA210_CLK_CSITE 73 +/* 74 */ +/* 75 */ +#define TEGRA210_CLK_LA 76 +/* 77 */ +#define TEGRA210_CLK_SOC_THERM 78 +#define TEGRA210_CLK_DTV 79 +/* 80 */ +#define TEGRA210_CLK_I2CSLOW 81 +#define TEGRA210_CLK_DSIB 82 +#define TEGRA210_CLK_TSEC 83 +/* 84 */ +/* 85 */ +/* 86 */ +/* 87 */ +/* 88 */ +#define TEGRA210_CLK_XUSB_HOST 89 +/* 90 */ +/* 91 */ +#define TEGRA210_CLK_CSUS 92 +/* 93 */ +/* 94 */ +/* 95 (bit affects xusb_dev and xusb_dev_src) */ + +/* 96 */ +/* 97 */ +/* 98 */ +#define TEGRA210_CLK_MSELECT 99 +#define TEGRA210_CLK_TSENSOR 100 +#define TEGRA210_CLK_I2S3 101 +#define TEGRA210_CLK_I2S4 102 +#define TEGRA210_CLK_I2C4 103 +/* 104 */ +/* 105 */ +#define TEGRA210_CLK_D_AUDIO 106 +#define TEGRA210_CLK_APB2APE 107 +/* 108 */ +/* 109 */ +/* 110 */ +#define TEGRA210_CLK_HDA2CODEC_2X 111 +/* 112 */ +/* 113 */ +/* 114 */ +/* 115 */ +/* 116 */ +/* 117 */ +#define TEGRA210_CLK_SPDIF_2X 118 +#define TEGRA210_CLK_ACTMON 119 +#define TEGRA210_CLK_EXTERN1 120 +#define TEGRA210_CLK_EXTERN2 121 +#define TEGRA210_CLK_EXTERN3 122 +#define TEGRA210_CLK_SATA_OOB 123 +#define TEGRA210_CLK_SATA 124 +#define TEGRA210_CLK_HDA 125 +/* 126 */ +/* 127 */ + +#define TEGRA210_CLK_HDA2HDMI 128 +/* 129 */ +/* 130 */ +/* 131 */ +/* 132 */ +/* 133 */ +/* 134 */ +/* 135 */ +#define TEGRA210_CLK_CEC 136 +/* 137 */ +/* 138 */ +/* 139 */ +/* 140 */ +/* 141 */ +/* 142 */ +/* (bit affects xusb_falcon_src, xusb_fs_src, xusb_host_src and xusb_ss_src) */ +#define TEGRA210_CLK_XUSB_GATE 143 +#define TEGRA210_CLK_CILAB 144 +#define TEGRA210_CLK_CILCD 145 +#define TEGRA210_CLK_CILE 146 +#define TEGRA210_CLK_DSIALP 147 +#define TEGRA210_CLK_DSIBLP 148 +#define TEGRA210_CLK_ENTROPY 149 +/* 150 */ +/* 151 */ +#define TEGRA210_CLK_DP2 152 +/* 153 */ +/* 154 */ +/* 155 (bit affects dfll_ref and dfll_soc) */ +#define TEGRA210_CLK_XUSB_SS 156 +/* 157 */ +/* 158 */ +/* 159 */ + +/* 160 */ +#define TEGRA210_CLK_DMIC1 161 +#define TEGRA210_CLK_DMIC2 162 +/* 163 */ +/* 164 */ +/* 165 */ +#define TEGRA210_CLK_I2C6 166 +/* 167 */ +/* 168 */ +/* 169 */ +/* 170 */ +#define TEGRA210_CLK_VIM2_CLK 171 +/* 172 */ +#define TEGRA210_CLK_MIPIBIF 173 +/* 174 */ +/* 175 */ +/* 176 */ +#define TEGRA210_CLK_CLK72MHZ 177 +#define TEGRA210_CLK_VIC03 178 +/* 179 */ +/* 180 */ +#define TEGRA210_CLK_DPAUX 181 +#define TEGRA210_CLK_SOR0 182 +#define TEGRA210_CLK_SOR1 183 +#define TEGRA210_CLK_GPU 184 +#define TEGRA210_CLK_DBGAPB 185 +/* 186 */ +#define TEGRA210_CLK_PLL_P_OUT_ADSP 187 +/* 188 ((bit affects pll_a_out_adsp and pll_a_out0_out_adsp)*/ +#define TEGRA210_CLK_PLL_G_REF 189 +/* 190 */ +/* 191 */ + +/* 192 */ +#define TEGRA210_CLK_SDMMC_LEGACY 193 +#define TEGRA210_CLK_NVDEC 194 +#define TEGRA210_CLK_NVJPG 195 +/* 196 */ +#define TEGRA210_CLK_DMIC3 197 +#define TEGRA210_CLK_APE 198 +#define TEGRA210_CLK_ADSP 199 +/* 200 */ +/* 201 */ +#define TEGRA210_CLK_MAUD 202 +/* 203 */ +/* 204 */ +/* 205 */ +#define TEGRA210_CLK_TSECB 206 +#define TEGRA210_CLK_DPAUX1 207 +#define TEGRA210_CLK_VI_I2C 208 +#define TEGRA210_CLK_HSIC_TRK 209 +#define TEGRA210_CLK_USB2_TRK 210 +#define TEGRA210_CLK_QSPI 211 +#define TEGRA210_CLK_UARTAPE 212 +/* 213 */ +/* 214 */ +/* 215 */ +/* 216 */ +/* 217 */ +#define TEGRA210_CLK_ADSP_NEON 218 +#define TEGRA210_CLK_NVENC 219 +#define TEGRA210_CLK_IQC2 220 +#define TEGRA210_CLK_IQC1 221 +#define TEGRA210_CLK_SOR_SAFE 222 +#define TEGRA210_CLK_PLL_P_OUT_CPU 223 + + +#define TEGRA210_CLK_UARTB 224 +#define TEGRA210_CLK_VFIR 225 +#define TEGRA210_CLK_SPDIF_IN 226 +#define TEGRA210_CLK_SPDIF_OUT 227 +#define TEGRA210_CLK_VI 228 +#define TEGRA210_CLK_VI_SENSOR 229 +#define TEGRA210_CLK_FUSE 230 +#define TEGRA210_CLK_FUSE_BURN 231 +#define TEGRA210_CLK_CLK_32K 232 +#define TEGRA210_CLK_CLK_M 233 +#define TEGRA210_CLK_CLK_M_DIV2 234 +#define TEGRA210_CLK_CLK_M_DIV4 235 +#define TEGRA210_CLK_PLL_REF 236 +#define TEGRA210_CLK_PLL_C 237 +#define TEGRA210_CLK_PLL_C_OUT1 238 +#define TEGRA210_CLK_PLL_C2 239 +#define TEGRA210_CLK_PLL_C3 240 +#define TEGRA210_CLK_PLL_M 241 +#define TEGRA210_CLK_PLL_M_OUT1 242 +#define TEGRA210_CLK_PLL_P 243 +#define TEGRA210_CLK_PLL_P_OUT1 244 +#define TEGRA210_CLK_PLL_P_OUT2 245 +#define TEGRA210_CLK_PLL_P_OUT3 246 +#define TEGRA210_CLK_PLL_P_OUT4 247 +#define TEGRA210_CLK_PLL_A 248 +#define TEGRA210_CLK_PLL_A_OUT0 249 +#define TEGRA210_CLK_PLL_D 250 +#define TEGRA210_CLK_PLL_D_OUT0 251 +#define TEGRA210_CLK_PLL_D2 252 +#define TEGRA210_CLK_PLL_D2_OUT0 253 +#define TEGRA210_CLK_PLL_U 254 +#define TEGRA210_CLK_PLL_U_480M 255 + +#define TEGRA210_CLK_PLL_U_60M 256 +#define TEGRA210_CLK_PLL_U_48M 257 +/* 258 */ +#define TEGRA210_CLK_PLL_X 259 +#define TEGRA210_CLK_PLL_X_OUT0 260 +#define TEGRA210_CLK_PLL_RE_VCO 261 +#define TEGRA210_CLK_PLL_RE_OUT 262 +#define TEGRA210_CLK_PLL_E 263 +#define TEGRA210_CLK_SPDIF_IN_SYNC 264 +#define TEGRA210_CLK_I2S0_SYNC 265 +#define TEGRA210_CLK_I2S1_SYNC 266 +#define TEGRA210_CLK_I2S2_SYNC 267 +#define TEGRA210_CLK_I2S3_SYNC 268 +#define TEGRA210_CLK_I2S4_SYNC 269 +#define TEGRA210_CLK_VIMCLK_SYNC 270 +#define TEGRA210_CLK_AUDIO0 271 +#define TEGRA210_CLK_AUDIO1 272 +#define TEGRA210_CLK_AUDIO2 273 +#define TEGRA210_CLK_AUDIO3 274 +#define TEGRA210_CLK_AUDIO4 275 +#define TEGRA210_CLK_SPDIF 276 +#define TEGRA210_CLK_CLK_OUT_1 277 +#define TEGRA210_CLK_CLK_OUT_2 278 +#define TEGRA210_CLK_CLK_OUT_3 279 +#define TEGRA210_CLK_BLINK 280 +/* 281 */ +#define TEGRA210_CLK_SOR1_SRC 282 +#define TEGRA210_CLK_SOR1_OUT 282 +/* 283 */ +#define TEGRA210_CLK_XUSB_HOST_SRC 284 +#define TEGRA210_CLK_XUSB_FALCON_SRC 285 +#define TEGRA210_CLK_XUSB_FS_SRC 286 +#define TEGRA210_CLK_XUSB_SS_SRC 287 + +#define TEGRA210_CLK_XUSB_DEV_SRC 288 +#define TEGRA210_CLK_XUSB_DEV 289 +#define TEGRA210_CLK_XUSB_HS_SRC 290 +#define TEGRA210_CLK_SCLK 291 +#define TEGRA210_CLK_HCLK 292 +#define TEGRA210_CLK_PCLK 293 +#define TEGRA210_CLK_CCLK_G 294 +#define TEGRA210_CLK_CCLK_LP 295 +#define TEGRA210_CLK_DFLL_REF 296 +#define TEGRA210_CLK_DFLL_SOC 297 +#define TEGRA210_CLK_VI_SENSOR2 298 +#define TEGRA210_CLK_PLL_P_OUT5 299 +#define TEGRA210_CLK_CML0 300 +#define TEGRA210_CLK_CML1 301 +#define TEGRA210_CLK_PLL_C4 302 +#define TEGRA210_CLK_PLL_DP 303 +#define TEGRA210_CLK_PLL_E_MUX 304 +#define TEGRA210_CLK_PLL_MB 305 +#define TEGRA210_CLK_PLL_A1 306 +#define TEGRA210_CLK_PLL_D_DSI_OUT 307 +#define TEGRA210_CLK_PLL_C4_OUT0 308 +#define TEGRA210_CLK_PLL_C4_OUT1 309 +#define TEGRA210_CLK_PLL_C4_OUT2 310 +#define TEGRA210_CLK_PLL_C4_OUT3 311 +#define TEGRA210_CLK_PLL_U_OUT 312 +#define TEGRA210_CLK_PLL_U_OUT1 313 +#define TEGRA210_CLK_PLL_U_OUT2 314 +#define TEGRA210_CLK_USB2_HSIC_TRK 315 +#define TEGRA210_CLK_PLL_P_OUT_HSIO 316 +#define TEGRA210_CLK_PLL_P_OUT_XUSB 317 +#define TEGRA210_CLK_XUSB_SSP_SRC 318 +#define TEGRA210_CLK_PLL_RE_OUT1 319 +/* 320 */ +/* 321 */ +#define TEGRA210_CLK_ISP 322 +#define TEGRA210_CLK_PLL_A_OUT_ADSP 323 +#define TEGRA210_CLK_PLL_A_OUT0_OUT_ADSP 324 +/* 325 */ +/* 326 */ +/* 327 */ +/* 328 */ +/* 329 */ +/* 330 */ +/* 331 */ +/* 332 */ +/* 333 */ +/* 334 */ +/* 335 */ +/* 336 */ +/* 337 */ +/* 338 */ +/* 339 */ +/* 340 */ +/* 341 */ +/* 342 */ +/* 343 */ +/* 344 */ +/* 345 */ +/* 346 */ +/* 347 */ +/* 348 */ +/* 349 */ + +#define TEGRA210_CLK_AUDIO0_MUX 350 +#define TEGRA210_CLK_AUDIO1_MUX 351 +#define TEGRA210_CLK_AUDIO2_MUX 352 +#define TEGRA210_CLK_AUDIO3_MUX 353 +#define TEGRA210_CLK_AUDIO4_MUX 354 +#define TEGRA210_CLK_SPDIF_MUX 355 +#define TEGRA210_CLK_CLK_OUT_1_MUX 356 +#define TEGRA210_CLK_CLK_OUT_2_MUX 357 +#define TEGRA210_CLK_CLK_OUT_3_MUX 358 +#define TEGRA210_CLK_DSIA_MUX 359 +#define TEGRA210_CLK_DSIB_MUX 360 +#define TEGRA210_CLK_SOR0_LVDS 361 +#define TEGRA210_CLK_XUSB_SS_DIV2 362 + +#define TEGRA210_CLK_PLL_M_UD 363 +#define TEGRA210_CLK_PLL_C_UD 364 +#define TEGRA210_CLK_SCLK_MUX 365 + +#define TEGRA210_CLK_ACLK 370 + +#define TEGRA210_CLK_DMIC1_SYNC_CLK 388 +#define TEGRA210_CLK_DMIC1_SYNC_CLK_MUX 389 +#define TEGRA210_CLK_DMIC2_SYNC_CLK 390 +#define TEGRA210_CLK_DMIC2_SYNC_CLK_MUX 391 +#define TEGRA210_CLK_DMIC3_SYNC_CLK 392 +#define TEGRA210_CLK_DMIC3_SYNC_CLK_MUX 393 + +#define TEGRA210_CLK_CLK_MAX 394 + +#endif /* _DT_BINDINGS_CLOCK_TEGRA210_CAR_H */ diff --git a/include/dt-bindings/clock/tegra30-car.h b/include/dt-bindings/clock/tegra30-car.h new file mode 100644 index 000000000..3c90f1535 --- /dev/null +++ b/include/dt-bindings/clock/tegra30-car.h @@ -0,0 +1,274 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for binding nvidia,tegra30-car. + * + * The first 130 clocks are numbered to match the bits in the CAR's CLK_OUT_ENB + * registers. These IDs often match those in the CAR's RST_DEVICES registers, + * but not in all cases. Some bits in CLK_OUT_ENB affect multiple clocks. In + * this case, those clocks are assigned IDs above 160 in order to highlight + * this issue. Implementations that interpret these clock IDs as bit values + * within the CLK_OUT_ENB or RST_DEVICES registers should be careful to + * explicitly handle these special cases. + * + * The balance of the clocks controlled by the CAR are assigned IDs of 160 and + * above. + */ + +#ifndef _DT_BINDINGS_CLOCK_TEGRA30_CAR_H +#define _DT_BINDINGS_CLOCK_TEGRA30_CAR_H + +#define TEGRA30_CLK_CPU 0 +/* 1 */ +/* 2 */ +/* 3 */ +#define TEGRA30_CLK_RTC 4 +#define TEGRA30_CLK_TIMER 5 +#define TEGRA30_CLK_UARTA 6 +/* 7 (register bit affects uartb and vfir) */ +#define TEGRA30_CLK_GPIO 8 +#define TEGRA30_CLK_SDMMC2 9 +/* 10 (register bit affects spdif_in and spdif_out) */ +#define TEGRA30_CLK_I2S1 11 +#define TEGRA30_CLK_I2C1 12 +#define TEGRA30_CLK_NDFLASH 13 +#define TEGRA30_CLK_SDMMC1 14 +#define TEGRA30_CLK_SDMMC4 15 +/* 16 */ +#define TEGRA30_CLK_PWM 17 +#define TEGRA30_CLK_I2S2 18 +#define TEGRA30_CLK_EPP 19 +/* 20 (register bit affects vi and vi_sensor) */ +#define TEGRA30_CLK_GR2D 21 +#define TEGRA30_CLK_USBD 22 +#define TEGRA30_CLK_ISP 23 +#define TEGRA30_CLK_GR3D 24 +/* 25 */ +#define TEGRA30_CLK_DISP2 26 +#define TEGRA30_CLK_DISP1 27 +#define TEGRA30_CLK_HOST1X 28 +#define TEGRA30_CLK_VCP 29 +#define TEGRA30_CLK_I2S0 30 +#define TEGRA30_CLK_COP_CACHE 31 + +#define TEGRA30_CLK_MC 32 +#define TEGRA30_CLK_AHBDMA 33 +#define TEGRA30_CLK_APBDMA 34 +/* 35 */ +#define TEGRA30_CLK_KBC 36 +#define TEGRA30_CLK_STATMON 37 +#define TEGRA30_CLK_PMC 38 +/* 39 (register bit affects fuse and fuse_burn) */ +#define TEGRA30_CLK_KFUSE 40 +#define TEGRA30_CLK_SBC1 41 +#define TEGRA30_CLK_NOR 42 +/* 43 */ +#define TEGRA30_CLK_SBC2 44 +/* 45 */ +#define TEGRA30_CLK_SBC3 46 +#define TEGRA30_CLK_I2C5 47 +#define TEGRA30_CLK_DSIA 48 +/* 49 (register bit affects cve and tvo) */ +#define TEGRA30_CLK_MIPI 50 +#define TEGRA30_CLK_HDMI 51 +#define TEGRA30_CLK_CSI 52 +#define TEGRA30_CLK_TVDAC 53 +#define TEGRA30_CLK_I2C2 54 +#define TEGRA30_CLK_UARTC 55 +/* 56 */ +#define TEGRA30_CLK_EMC 57 +#define TEGRA30_CLK_USB2 58 +#define TEGRA30_CLK_USB3 59 +#define TEGRA30_CLK_MPE 60 +#define TEGRA30_CLK_VDE 61 +#define TEGRA30_CLK_BSEA 62 +#define TEGRA30_CLK_BSEV 63 + +#define TEGRA30_CLK_SPEEDO 64 +#define TEGRA30_CLK_UARTD 65 +#define TEGRA30_CLK_UARTE 66 +#define TEGRA30_CLK_I2C3 67 +#define TEGRA30_CLK_SBC4 68 +#define TEGRA30_CLK_SDMMC3 69 +#define TEGRA30_CLK_PCIE 70 +#define TEGRA30_CLK_OWR 71 +#define TEGRA30_CLK_AFI 72 +#define TEGRA30_CLK_CSITE 73 +/* 74 */ +#define TEGRA30_CLK_AVPUCQ 75 +#define TEGRA30_CLK_LA 76 +/* 77 */ +/* 78 */ +#define TEGRA30_CLK_DTV 79 +#define TEGRA30_CLK_NDSPEED 80 +#define TEGRA30_CLK_I2CSLOW 81 +#define TEGRA30_CLK_DSIB 82 +/* 83 */ +#define TEGRA30_CLK_IRAMA 84 +#define TEGRA30_CLK_IRAMB 85 +#define TEGRA30_CLK_IRAMC 86 +#define TEGRA30_CLK_IRAMD 87 +#define TEGRA30_CLK_CRAM2 88 +/* 89 */ +#define TEGRA30_CLK_AUDIO_2X 90 /* a/k/a audio_2x_sync_clk */ +/* 91 */ +#define TEGRA30_CLK_CSUS 92 +#define TEGRA30_CLK_CDEV2 93 +#define TEGRA30_CLK_CDEV1 94 +/* 95 */ + +#define TEGRA30_CLK_CPU_G 96 +#define TEGRA30_CLK_CPU_LP 97 +#define TEGRA30_CLK_GR3D2 98 +#define TEGRA30_CLK_MSELECT 99 +#define TEGRA30_CLK_TSENSOR 100 +#define TEGRA30_CLK_I2S3 101 +#define TEGRA30_CLK_I2S4 102 +#define TEGRA30_CLK_I2C4 103 +#define TEGRA30_CLK_SBC5 104 +#define TEGRA30_CLK_SBC6 105 +#define TEGRA30_CLK_D_AUDIO 106 +#define TEGRA30_CLK_APBIF 107 +#define TEGRA30_CLK_DAM0 108 +#define TEGRA30_CLK_DAM1 109 +#define TEGRA30_CLK_DAM2 110 +#define TEGRA30_CLK_HDA2CODEC_2X 111 +#define TEGRA30_CLK_ATOMICS 112 +#define TEGRA30_CLK_AUDIO0_2X 113 +#define TEGRA30_CLK_AUDIO1_2X 114 +#define TEGRA30_CLK_AUDIO2_2X 115 +#define TEGRA30_CLK_AUDIO3_2X 116 +#define TEGRA30_CLK_AUDIO4_2X 117 +#define TEGRA30_CLK_SPDIF_2X 118 +#define TEGRA30_CLK_ACTMON 119 +#define TEGRA30_CLK_EXTERN1 120 +#define TEGRA30_CLK_EXTERN2 121 +#define TEGRA30_CLK_EXTERN3 122 +#define TEGRA30_CLK_SATA_OOB 123 +#define TEGRA30_CLK_SATA 124 +#define TEGRA30_CLK_HDA 125 +/* 126 */ +#define TEGRA30_CLK_SE 127 + +#define TEGRA30_CLK_HDA2HDMI 128 +#define TEGRA30_CLK_SATA_COLD 129 +/* 130 */ +/* 131 */ +/* 132 */ +/* 133 */ +/* 134 */ +/* 135 */ +#define TEGRA30_CLK_CEC 136 +/* 137 */ +/* 138 */ +/* 139 */ +/* 140 */ +/* 141 */ +/* 142 */ +/* 143 */ +/* 144 */ +/* 145 */ +/* 146 */ +/* 147 */ +/* 148 */ +/* 149 */ +/* 150 */ +/* 151 */ +/* 152 */ +/* 153 */ +/* 154 */ +/* 155 */ +/* 156 */ +/* 157 */ +/* 158 */ +/* 159 */ + +#define TEGRA30_CLK_UARTB 160 +#define TEGRA30_CLK_VFIR 161 +#define TEGRA30_CLK_SPDIF_IN 162 +#define TEGRA30_CLK_SPDIF_OUT 163 +#define TEGRA30_CLK_VI 164 +#define TEGRA30_CLK_VI_SENSOR 165 +#define TEGRA30_CLK_FUSE 166 +#define TEGRA30_CLK_FUSE_BURN 167 +#define TEGRA30_CLK_CVE 168 +#define TEGRA30_CLK_TVO 169 +#define TEGRA30_CLK_CLK_32K 170 +#define TEGRA30_CLK_CLK_M 171 +#define TEGRA30_CLK_CLK_M_DIV2 172 +#define TEGRA30_CLK_CLK_M_DIV4 173 +#define TEGRA30_CLK_PLL_REF 174 +#define TEGRA30_CLK_PLL_C 175 +#define TEGRA30_CLK_PLL_C_OUT1 176 +#define TEGRA30_CLK_PLL_M 177 +#define TEGRA30_CLK_PLL_M_OUT1 178 +#define TEGRA30_CLK_PLL_P 179 +#define TEGRA30_CLK_PLL_P_OUT1 180 +#define TEGRA30_CLK_PLL_P_OUT2 181 +#define TEGRA30_CLK_PLL_P_OUT3 182 +#define TEGRA30_CLK_PLL_P_OUT4 183 +#define TEGRA30_CLK_PLL_A 184 +#define TEGRA30_CLK_PLL_A_OUT0 185 +#define TEGRA30_CLK_PLL_D 186 +#define TEGRA30_CLK_PLL_D_OUT0 187 +#define TEGRA30_CLK_PLL_D2 188 +#define TEGRA30_CLK_PLL_D2_OUT0 189 +#define TEGRA30_CLK_PLL_U 190 +#define TEGRA30_CLK_PLL_X 191 + +#define TEGRA30_CLK_PLL_X_OUT0 192 +#define TEGRA30_CLK_PLL_E 193 +#define TEGRA30_CLK_SPDIF_IN_SYNC 194 +#define TEGRA30_CLK_I2S0_SYNC 195 +#define TEGRA30_CLK_I2S1_SYNC 196 +#define TEGRA30_CLK_I2S2_SYNC 197 +#define TEGRA30_CLK_I2S3_SYNC 198 +#define TEGRA30_CLK_I2S4_SYNC 199 +#define TEGRA30_CLK_VIMCLK_SYNC 200 +#define TEGRA30_CLK_AUDIO0 201 +#define TEGRA30_CLK_AUDIO1 202 +#define TEGRA30_CLK_AUDIO2 203 +#define TEGRA30_CLK_AUDIO3 204 +#define TEGRA30_CLK_AUDIO4 205 +#define TEGRA30_CLK_SPDIF 206 +#define TEGRA30_CLK_CLK_OUT_1 207 /* (extern1) */ +#define TEGRA30_CLK_CLK_OUT_2 208 /* (extern2) */ +#define TEGRA30_CLK_CLK_OUT_3 209 /* (extern3) */ +#define TEGRA30_CLK_SCLK 210 +#define TEGRA30_CLK_BLINK 211 +#define TEGRA30_CLK_CCLK_G 212 +#define TEGRA30_CLK_CCLK_LP 213 +#define TEGRA30_CLK_TWD 214 +#define TEGRA30_CLK_CML0 215 +#define TEGRA30_CLK_CML1 216 +#define TEGRA30_CLK_HCLK 217 +#define TEGRA30_CLK_PCLK 218 +/* 219 */ +/* 220 */ +/* 221 */ +/* 222 */ +/* 223 */ + +/* 288 */ +/* 289 */ +/* 290 */ +/* 291 */ +/* 292 */ +/* 293 */ +/* 294 */ +/* 295 */ +/* 296 */ +/* 297 */ +/* 298 */ +/* 299 */ +#define TEGRA30_CLK_CLK_OUT_1_MUX 300 +#define TEGRA30_CLK_CLK_OUT_2_MUX 301 +#define TEGRA30_CLK_CLK_OUT_3_MUX 302 +#define TEGRA30_CLK_AUDIO0_MUX 303 +#define TEGRA30_CLK_AUDIO1_MUX 304 +#define TEGRA30_CLK_AUDIO2_MUX 305 +#define TEGRA30_CLK_AUDIO3_MUX 306 +#define TEGRA30_CLK_AUDIO4_MUX 307 +#define TEGRA30_CLK_SPDIF_MUX 308 +#define TEGRA30_CLK_CLK_MAX 309 + +#endif /* _DT_BINDINGS_CLOCK_TEGRA30_CAR_H */ diff --git a/include/dt-bindings/clock/vf610-clock.h b/include/dt-bindings/clock/vf610-clock.h new file mode 100644 index 000000000..45997750c --- /dev/null +++ b/include/dt-bindings/clock/vf610-clock.h @@ -0,0 +1,204 @@ +/* + * Copyright 2013 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __DT_BINDINGS_CLOCK_VF610_H +#define __DT_BINDINGS_CLOCK_VF610_H + +#define VF610_CLK_DUMMY 0 +#define VF610_CLK_SIRC_128K 1 +#define VF610_CLK_SIRC_32K 2 +#define VF610_CLK_FIRC 3 +#define VF610_CLK_SXOSC 4 +#define VF610_CLK_FXOSC 5 +#define VF610_CLK_FXOSC_HALF 6 +#define VF610_CLK_SLOW_CLK_SEL 7 +#define VF610_CLK_FASK_CLK_SEL 8 +#define VF610_CLK_AUDIO_EXT 9 +#define VF610_CLK_ENET_EXT 10 +#define VF610_CLK_PLL1_SYS 11 +#define VF610_CLK_PLL1_PFD1 12 +#define VF610_CLK_PLL1_PFD2 13 +#define VF610_CLK_PLL1_PFD3 14 +#define VF610_CLK_PLL1_PFD4 15 +#define VF610_CLK_PLL2_BUS 16 +#define VF610_CLK_PLL2_PFD1 17 +#define VF610_CLK_PLL2_PFD2 18 +#define VF610_CLK_PLL2_PFD3 19 +#define VF610_CLK_PLL2_PFD4 20 +#define VF610_CLK_PLL3_USB_OTG 21 +#define VF610_CLK_PLL3_PFD1 22 +#define VF610_CLK_PLL3_PFD2 23 +#define VF610_CLK_PLL3_PFD3 24 +#define VF610_CLK_PLL3_PFD4 25 +#define VF610_CLK_PLL4_AUDIO 26 +#define VF610_CLK_PLL5_ENET 27 +#define VF610_CLK_PLL6_VIDEO 28 +#define VF610_CLK_PLL3_MAIN_DIV 29 +#define VF610_CLK_PLL4_MAIN_DIV 30 +#define VF610_CLK_PLL6_MAIN_DIV 31 +#define VF610_CLK_PLL1_PFD_SEL 32 +#define VF610_CLK_PLL2_PFD_SEL 33 +#define VF610_CLK_SYS_SEL 34 +#define VF610_CLK_DDR_SEL 35 +#define VF610_CLK_SYS_BUS 36 +#define VF610_CLK_PLATFORM_BUS 37 +#define VF610_CLK_IPG_BUS 38 +#define VF610_CLK_UART0 39 +#define VF610_CLK_UART1 40 +#define VF610_CLK_UART2 41 +#define VF610_CLK_UART3 42 +#define VF610_CLK_UART4 43 +#define VF610_CLK_UART5 44 +#define VF610_CLK_PIT 45 +#define VF610_CLK_I2C0 46 +#define VF610_CLK_I2C1 47 +#define VF610_CLK_I2C2 48 +#define VF610_CLK_I2C3 49 +#define VF610_CLK_FTM0_EXT_SEL 50 +#define VF610_CLK_FTM0_FIX_SEL 51 +#define VF610_CLK_FTM0_EXT_FIX_EN 52 +#define VF610_CLK_FTM1_EXT_SEL 53 +#define VF610_CLK_FTM1_FIX_SEL 54 +#define VF610_CLK_FTM1_EXT_FIX_EN 55 +#define VF610_CLK_FTM2_EXT_SEL 56 +#define VF610_CLK_FTM2_FIX_SEL 57 +#define VF610_CLK_FTM2_EXT_FIX_EN 58 +#define VF610_CLK_FTM3_EXT_SEL 59 +#define VF610_CLK_FTM3_FIX_SEL 60 +#define VF610_CLK_FTM3_EXT_FIX_EN 61 +#define VF610_CLK_FTM0 62 +#define VF610_CLK_FTM1 63 +#define VF610_CLK_FTM2 64 +#define VF610_CLK_FTM3 65 +#define VF610_CLK_ENET_50M 66 +#define VF610_CLK_ENET_25M 67 +#define VF610_CLK_ENET_SEL 68 +#define VF610_CLK_ENET 69 +#define VF610_CLK_ENET_TS_SEL 70 +#define VF610_CLK_ENET_TS 71 +#define VF610_CLK_DSPI0 72 +#define VF610_CLK_DSPI1 73 +#define VF610_CLK_DSPI2 74 +#define VF610_CLK_DSPI3 75 +#define VF610_CLK_WDT 76 +#define VF610_CLK_ESDHC0_SEL 77 +#define VF610_CLK_ESDHC0_EN 78 +#define VF610_CLK_ESDHC0_DIV 79 +#define VF610_CLK_ESDHC0 80 +#define VF610_CLK_ESDHC1_SEL 81 +#define VF610_CLK_ESDHC1_EN 82 +#define VF610_CLK_ESDHC1_DIV 83 +#define VF610_CLK_ESDHC1 84 +#define VF610_CLK_DCU0_SEL 85 +#define VF610_CLK_DCU0_EN 86 +#define VF610_CLK_DCU0_DIV 87 +#define VF610_CLK_DCU0 88 +#define VF610_CLK_DCU1_SEL 89 +#define VF610_CLK_DCU1_EN 90 +#define VF610_CLK_DCU1_DIV 91 +#define VF610_CLK_DCU1 92 +#define VF610_CLK_ESAI_SEL 93 +#define VF610_CLK_ESAI_EN 94 +#define VF610_CLK_ESAI_DIV 95 +#define VF610_CLK_ESAI 96 +#define VF610_CLK_SAI0_SEL 97 +#define VF610_CLK_SAI0_EN 98 +#define VF610_CLK_SAI0_DIV 99 +#define VF610_CLK_SAI0 100 +#define VF610_CLK_SAI1_SEL 101 +#define VF610_CLK_SAI1_EN 102 +#define VF610_CLK_SAI1_DIV 103 +#define VF610_CLK_SAI1 104 +#define VF610_CLK_SAI2_SEL 105 +#define VF610_CLK_SAI2_EN 106 +#define VF610_CLK_SAI2_DIV 107 +#define VF610_CLK_SAI2 108 +#define VF610_CLK_SAI3_SEL 109 +#define VF610_CLK_SAI3_EN 110 +#define VF610_CLK_SAI3_DIV 111 +#define VF610_CLK_SAI3 112 +#define VF610_CLK_USBC0 113 +#define VF610_CLK_USBC1 114 +#define VF610_CLK_QSPI0_SEL 115 +#define VF610_CLK_QSPI0_EN 116 +#define VF610_CLK_QSPI0_X4_DIV 117 +#define VF610_CLK_QSPI0_X2_DIV 118 +#define VF610_CLK_QSPI0_X1_DIV 119 +#define VF610_CLK_QSPI1_SEL 120 +#define VF610_CLK_QSPI1_EN 121 +#define VF610_CLK_QSPI1_X4_DIV 122 +#define VF610_CLK_QSPI1_X2_DIV 123 +#define VF610_CLK_QSPI1_X1_DIV 124 +#define VF610_CLK_QSPI0 125 +#define VF610_CLK_QSPI1 126 +#define VF610_CLK_NFC_SEL 127 +#define VF610_CLK_NFC_EN 128 +#define VF610_CLK_NFC_PRE_DIV 129 +#define VF610_CLK_NFC_FRAC_DIV 130 +#define VF610_CLK_NFC_INV 131 +#define VF610_CLK_NFC 132 +#define VF610_CLK_VADC_SEL 133 +#define VF610_CLK_VADC_EN 134 +#define VF610_CLK_VADC_DIV 135 +#define VF610_CLK_VADC_DIV_HALF 136 +#define VF610_CLK_VADC 137 +#define VF610_CLK_ADC0 138 +#define VF610_CLK_ADC1 139 +#define VF610_CLK_DAC0 140 +#define VF610_CLK_DAC1 141 +#define VF610_CLK_FLEXCAN0 142 +#define VF610_CLK_FLEXCAN1 143 +#define VF610_CLK_ASRC 144 +#define VF610_CLK_GPU_SEL 145 +#define VF610_CLK_GPU_EN 146 +#define VF610_CLK_GPU2D 147 +#define VF610_CLK_ENET0 148 +#define VF610_CLK_ENET1 149 +#define VF610_CLK_DMAMUX0 150 +#define VF610_CLK_DMAMUX1 151 +#define VF610_CLK_DMAMUX2 152 +#define VF610_CLK_DMAMUX3 153 +#define VF610_CLK_FLEXCAN0_EN 154 +#define VF610_CLK_FLEXCAN1_EN 155 +#define VF610_CLK_PLL7_USB_HOST 156 +#define VF610_CLK_USBPHY0 157 +#define VF610_CLK_USBPHY1 158 +#define VF610_CLK_LVDS1_IN 159 +#define VF610_CLK_ANACLK1 160 +#define VF610_CLK_PLL1_BYPASS_SRC 161 +#define VF610_CLK_PLL2_BYPASS_SRC 162 +#define VF610_CLK_PLL3_BYPASS_SRC 163 +#define VF610_CLK_PLL4_BYPASS_SRC 164 +#define VF610_CLK_PLL5_BYPASS_SRC 165 +#define VF610_CLK_PLL6_BYPASS_SRC 166 +#define VF610_CLK_PLL7_BYPASS_SRC 167 +#define VF610_CLK_PLL1 168 +#define VF610_CLK_PLL2 169 +#define VF610_CLK_PLL3 170 +#define VF610_CLK_PLL4 171 +#define VF610_CLK_PLL5 172 +#define VF610_CLK_PLL6 173 +#define VF610_CLK_PLL7 174 +#define VF610_PLL1_BYPASS 175 +#define VF610_PLL2_BYPASS 176 +#define VF610_PLL3_BYPASS 177 +#define VF610_PLL4_BYPASS 178 +#define VF610_PLL5_BYPASS 179 +#define VF610_PLL6_BYPASS 180 +#define VF610_PLL7_BYPASS 181 +#define VF610_CLK_SNVS 182 +#define VF610_CLK_DAP 183 +#define VF610_CLK_OCOTP 184 +#define VF610_CLK_DDRMC 185 +#define VF610_CLK_WKPU 186 +#define VF610_CLK_TCON0 187 +#define VF610_CLK_TCON1 188 +#define VF610_CLK_END 189 + +#endif /* __DT_BINDINGS_CLOCK_VF610_H */ diff --git a/include/dt-bindings/clock/zx296702-clock.h b/include/dt-bindings/clock/zx296702-clock.h new file mode 100644 index 000000000..26ee564b0 --- /dev/null +++ b/include/dt-bindings/clock/zx296702-clock.h @@ -0,0 +1,183 @@ +/* + * Copyright 2014 Linaro Ltd. + * Copyright (C) 2014 ZTE Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __DT_BINDINGS_CLOCK_ZX296702_H +#define __DT_BINDINGS_CLOCK_ZX296702_H + +#define ZX296702_OSC 0 +#define ZX296702_PLL_A9 1 +#define ZX296702_PLL_A9_350M 2 +#define ZX296702_PLL_MAC_1000M 3 +#define ZX296702_PLL_MAC_333M 4 +#define ZX296702_PLL_MM0_1188M 5 +#define ZX296702_PLL_MM0_396M 6 +#define ZX296702_PLL_MM0_198M 7 +#define ZX296702_PLL_MM1_108M 8 +#define ZX296702_PLL_MM1_72M 9 +#define ZX296702_PLL_MM1_54M 10 +#define ZX296702_PLL_LSP_104M 11 +#define ZX296702_PLL_LSP_26M 12 +#define ZX296702_PLL_AUDIO_294M912 13 +#define ZX296702_PLL_DDR_266M 14 +#define ZX296702_CLK_148M5 15 +#define ZX296702_MATRIX_ACLK 16 +#define ZX296702_MAIN_HCLK 17 +#define ZX296702_MAIN_PCLK 18 +#define ZX296702_CLK_500 19 +#define ZX296702_CLK_250 20 +#define ZX296702_CLK_125 21 +#define ZX296702_CLK_74M25 22 +#define ZX296702_A9_WCLK 23 +#define ZX296702_A9_AS1_ACLK_MUX 24 +#define ZX296702_A9_TRACE_CLKIN_MUX 25 +#define ZX296702_A9_AS1_ACLK_DIV 26 +#define ZX296702_CLK_2 27 +#define ZX296702_CLK_27 28 +#define ZX296702_DECPPU_ACLK_MUX 29 +#define ZX296702_PPU_ACLK_MUX 30 +#define ZX296702_MALI400_ACLK_MUX 31 +#define ZX296702_VOU_ACLK_MUX 32 +#define ZX296702_VOU_MAIN_WCLK_MUX 33 +#define ZX296702_VOU_AUX_WCLK_MUX 34 +#define ZX296702_VOU_SCALER_WCLK_MUX 35 +#define ZX296702_R2D_ACLK_MUX 36 +#define ZX296702_R2D_WCLK_MUX 37 +#define ZX296702_CLK_50 38 +#define ZX296702_CLK_25 39 +#define ZX296702_CLK_12 40 +#define ZX296702_CLK_16M384 41 +#define ZX296702_CLK_32K768 42 +#define ZX296702_SEC_WCLK_DIV 43 +#define ZX296702_DDR_WCLK_MUX 44 +#define ZX296702_NAND_WCLK_MUX 45 +#define ZX296702_LSP_26_WCLK_MUX 46 +#define ZX296702_A9_AS0_ACLK 47 +#define ZX296702_A9_AS1_ACLK 48 +#define ZX296702_A9_TRACE_CLKIN 49 +#define ZX296702_DECPPU_AXI_M_ACLK 50 +#define ZX296702_DECPPU_AHB_S_HCLK 51 +#define ZX296702_PPU_AXI_M_ACLK 52 +#define ZX296702_PPU_AHB_S_HCLK 53 +#define ZX296702_VOU_AXI_M_ACLK 54 +#define ZX296702_VOU_APB_PCLK 55 +#define ZX296702_VOU_MAIN_CHANNEL_WCLK 56 +#define ZX296702_VOU_AUX_CHANNEL_WCLK 57 +#define ZX296702_VOU_HDMI_OSCLK_CEC 58 +#define ZX296702_VOU_SCALER_WCLK 59 +#define ZX296702_MALI400_AXI_M_ACLK 60 +#define ZX296702_MALI400_APB_PCLK 61 +#define ZX296702_R2D_WCLK 62 +#define ZX296702_R2D_AXI_M_ACLK 63 +#define ZX296702_R2D_AHB_HCLK 64 +#define ZX296702_DDR3_AXI_S0_ACLK 65 +#define ZX296702_DDR3_APB_PCLK 66 +#define ZX296702_DDR3_WCLK 67 +#define ZX296702_USB20_0_AHB_HCLK 68 +#define ZX296702_USB20_0_EXTREFCLK 69 +#define ZX296702_USB20_1_AHB_HCLK 70 +#define ZX296702_USB20_1_EXTREFCLK 71 +#define ZX296702_USB20_2_AHB_HCLK 72 +#define ZX296702_USB20_2_EXTREFCLK 73 +#define ZX296702_GMAC_AXI_M_ACLK 74 +#define ZX296702_GMAC_APB_PCLK 75 +#define ZX296702_GMAC_125_CLKIN 76 +#define ZX296702_GMAC_RMII_CLKIN 77 +#define ZX296702_GMAC_25M_CLK 78 +#define ZX296702_NANDFLASH_AHB_HCLK 79 +#define ZX296702_NANDFLASH_WCLK 80 +#define ZX296702_LSP0_APB_PCLK 81 +#define ZX296702_LSP0_AHB_HCLK 82 +#define ZX296702_LSP0_26M_WCLK 83 +#define ZX296702_LSP0_104M_WCLK 84 +#define ZX296702_LSP0_16M384_WCLK 85 +#define ZX296702_LSP1_APB_PCLK 86 +#define ZX296702_LSP1_26M_WCLK 87 +#define ZX296702_LSP1_104M_WCLK 88 +#define ZX296702_LSP1_32K_CLK 89 +#define ZX296702_AON_HCLK 90 +#define ZX296702_SYS_CTRL_PCLK 91 +#define ZX296702_DMA_PCLK 92 +#define ZX296702_DMA_ACLK 93 +#define ZX296702_SEC_HCLK 94 +#define ZX296702_AES_WCLK 95 +#define ZX296702_DES_WCLK 96 +#define ZX296702_IRAM_ACLK 97 +#define ZX296702_IROM_ACLK 98 +#define ZX296702_BOOT_CTRL_HCLK 99 +#define ZX296702_EFUSE_CLK_30 100 +#define ZX296702_VOU_MAIN_CHANNEL_DIV 101 +#define ZX296702_VOU_AUX_CHANNEL_DIV 102 +#define ZX296702_VOU_TV_ENC_HD_DIV 103 +#define ZX296702_VOU_TV_ENC_SD_DIV 104 +#define ZX296702_VL0_MUX 105 +#define ZX296702_VL1_MUX 106 +#define ZX296702_VL2_MUX 107 +#define ZX296702_GL0_MUX 108 +#define ZX296702_GL1_MUX 109 +#define ZX296702_GL2_MUX 110 +#define ZX296702_WB_MUX 111 +#define ZX296702_HDMI_MUX 112 +#define ZX296702_VOU_TV_ENC_HD_MUX 113 +#define ZX296702_VOU_TV_ENC_SD_MUX 114 +#define ZX296702_VL0_CLK 115 +#define ZX296702_VL1_CLK 116 +#define ZX296702_VL2_CLK 117 +#define ZX296702_GL0_CLK 118 +#define ZX296702_GL1_CLK 119 +#define ZX296702_GL2_CLK 120 +#define ZX296702_WB_CLK 121 +#define ZX296702_CL_CLK 122 +#define ZX296702_MAIN_MIX_CLK 123 +#define ZX296702_AUX_MIX_CLK 124 +#define ZX296702_HDMI_CLK 125 +#define ZX296702_VOU_TV_ENC_HD_DAC_CLK 126 +#define ZX296702_VOU_TV_ENC_SD_DAC_CLK 127 +#define ZX296702_A9_PERIPHCLK 128 +#define ZX296702_TOPCLK_END 129 + +#define ZX296702_SDMMC1_WCLK_MUX 0 +#define ZX296702_SDMMC1_WCLK_DIV 1 +#define ZX296702_SDMMC1_WCLK 2 +#define ZX296702_SDMMC1_PCLK 3 +#define ZX296702_SPDIF0_WCLK_MUX 4 +#define ZX296702_SPDIF0_WCLK 5 +#define ZX296702_SPDIF0_PCLK 6 +#define ZX296702_SPDIF0_DIV 7 +#define ZX296702_I2S0_WCLK_MUX 8 +#define ZX296702_I2S0_WCLK 9 +#define ZX296702_I2S0_PCLK 10 +#define ZX296702_I2S0_DIV 11 +#define ZX296702_I2S1_WCLK_MUX 12 +#define ZX296702_I2S1_WCLK 13 +#define ZX296702_I2S1_PCLK 14 +#define ZX296702_I2S1_DIV 15 +#define ZX296702_I2S2_WCLK_MUX 16 +#define ZX296702_I2S2_WCLK 17 +#define ZX296702_I2S2_PCLK 18 +#define ZX296702_I2S2_DIV 19 +#define ZX296702_GPIO_CLK 20 +#define ZX296702_LSP0CLK_END 21 + +#define ZX296702_UART0_WCLK_MUX 0 +#define ZX296702_UART0_WCLK 1 +#define ZX296702_UART0_PCLK 2 +#define ZX296702_UART1_WCLK_MUX 3 +#define ZX296702_UART1_WCLK 4 +#define ZX296702_UART1_PCLK 5 +#define ZX296702_SDMMC0_WCLK_MUX 6 +#define ZX296702_SDMMC0_WCLK_DIV 7 +#define ZX296702_SDMMC0_WCLK 8 +#define ZX296702_SDMMC0_PCLK 9 +#define ZX296702_SPDIF1_WCLK_MUX 10 +#define ZX296702_SPDIF1_WCLK 11 +#define ZX296702_SPDIF1_PCLK 12 +#define ZX296702_SPDIF1_DIV 13 +#define ZX296702_LSP1CLK_END 14 + +#endif /* __DT_BINDINGS_CLOCK_ZX296702_H */ diff --git a/include/dt-bindings/clock/zx296718-clock.h b/include/dt-bindings/clock/zx296718-clock.h new file mode 100644 index 000000000..092c9751a --- /dev/null +++ b/include/dt-bindings/clock/zx296718-clock.h @@ -0,0 +1,167 @@ +/* + * Copyright (C) 2015 - 2016 ZTE Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __DT_BINDINGS_CLOCK_ZX296718_H +#define __DT_BINDINGS_CLOCK_ZX296718_H + +/* PLL */ +#define ZX296718_PLL_CPU 1 +#define ZX296718_PLL_MAC 2 +#define ZX296718_PLL_MM0 3 +#define ZX296718_PLL_MM1 4 +#define ZX296718_PLL_VGA 5 +#define ZX296718_PLL_DDR 6 +#define ZX296718_PLL_AUDIO 7 +#define ZX296718_PLL_HSIC 8 +#define CPU_DBG_GATE 9 +#define A72_GATE 10 +#define CPU_PERI_GATE 11 +#define A53_GATE 12 +#define DDR1_GATE 13 +#define DDR0_GATE 14 +#define SD1_WCLK 15 +#define SD1_AHB 16 +#define SD0_WCLK 17 +#define SD0_AHB 18 +#define EMMC_WCLK 19 +#define EMMC_NAND_AXI 20 +#define NAND_WCLK 21 +#define EMMC_NAND_AHB 22 +#define LSP1_148M5 23 +#define LSP1_99M 24 +#define LSP1_24M 25 +#define LSP0_74M25 26 +#define LSP0_32K 27 +#define LSP0_148M5 28 +#define LSP0_99M 29 +#define LSP0_24M 30 +#define DEMUX_AXI 31 +#define DEMUX_APB 32 +#define DEMUX_148M5 33 +#define DEMUX_108M 34 +#define AUDIO_APB 35 +#define AUDIO_99M 36 +#define AUDIO_24M 37 +#define AUDIO_16M384 38 +#define AUDIO_32K 39 +#define WDT_WCLK 40 +#define TIMER_WCLK 41 +#define VDE_ACLK 42 +#define VCE_ACLK 43 +#define HDE_ACLK 44 +#define GPU_ACLK 45 +#define SAPPU_ACLK 46 +#define SAPPU_WCLK 47 +#define VOU_ACLK 48 +#define VOU_MAIN_WCLK 49 +#define VOU_AUX_WCLK 50 +#define VOU_PPU_WCLK 51 +#define MIPI_CFG_CLK 52 +#define VGA_I2C_WCLK 53 +#define MIPI_REF_CLK 54 +#define HDMI_OSC_CEC 55 +#define HDMI_OSC_CLK 56 +#define HDMI_XCLK 57 +#define VIU_M0_ACLK 58 +#define VIU_M1_ACLK 59 +#define VIU_WCLK 60 +#define VIU_JPEG_WCLK 61 +#define VIU_CFG_CLK 62 +#define TS_SYS_WCLK 63 +#define TS_SYS_108M 64 +#define USB20_HCLK 65 +#define USB20_PHY_CLK 66 +#define USB21_HCLK 67 +#define USB21_PHY_CLK 68 +#define GMAC_RMIICLK 69 +#define GMAC_PCLK 70 +#define GMAC_ACLK 71 +#define GMAC_RFCLK 72 +#define TEMPSENSOR_GATE 73 + +#define TOP_NR_CLKS 74 + + +#define LSP0_TIMER3_PCLK 1 +#define LSP0_TIMER3_WCLK 2 +#define LSP0_TIMER4_PCLK 3 +#define LSP0_TIMER4_WCLK 4 +#define LSP0_TIMER5_PCLK 5 +#define LSP0_TIMER5_WCLK 6 +#define LSP0_UART3_PCLK 7 +#define LSP0_UART3_WCLK 8 +#define LSP0_UART1_PCLK 9 +#define LSP0_UART1_WCLK 10 +#define LSP0_UART2_PCLK 11 +#define LSP0_UART2_WCLK 12 +#define LSP0_SPIFC0_PCLK 13 +#define LSP0_SPIFC0_WCLK 14 +#define LSP0_I2C4_PCLK 15 +#define LSP0_I2C4_WCLK 16 +#define LSP0_I2C5_PCLK 17 +#define LSP0_I2C5_WCLK 18 +#define LSP0_SSP0_PCLK 19 +#define LSP0_SSP0_WCLK 20 +#define LSP0_SSP1_PCLK 21 +#define LSP0_SSP1_WCLK 22 +#define LSP0_USIM_PCLK 23 +#define LSP0_USIM_WCLK 24 +#define LSP0_GPIO_PCLK 25 +#define LSP0_GPIO_WCLK 26 +#define LSP0_I2C3_PCLK 27 +#define LSP0_I2C3_WCLK 28 + +#define LSP0_NR_CLKS 29 + + +#define LSP1_UART4_PCLK 1 +#define LSP1_UART4_WCLK 2 +#define LSP1_UART5_PCLK 3 +#define LSP1_UART5_WCLK 4 +#define LSP1_PWM_PCLK 5 +#define LSP1_PWM_WCLK 6 +#define LSP1_I2C2_PCLK 7 +#define LSP1_I2C2_WCLK 8 +#define LSP1_SSP2_PCLK 9 +#define LSP1_SSP2_WCLK 10 +#define LSP1_SSP3_PCLK 11 +#define LSP1_SSP3_WCLK 12 +#define LSP1_SSP4_PCLK 13 +#define LSP1_SSP4_WCLK 14 +#define LSP1_USIM1_PCLK 15 +#define LSP1_USIM1_WCLK 16 + +#define LSP1_NR_CLKS 17 + + +#define AUDIO_I2S0_WCLK 1 +#define AUDIO_I2S0_PCLK 2 +#define AUDIO_I2S1_WCLK 3 +#define AUDIO_I2S1_PCLK 4 +#define AUDIO_I2S2_WCLK 5 +#define AUDIO_I2S2_PCLK 6 +#define AUDIO_I2S3_WCLK 7 +#define AUDIO_I2S3_PCLK 8 +#define AUDIO_I2C0_WCLK 9 +#define AUDIO_I2C0_PCLK 10 +#define AUDIO_SPDIF0_WCLK 11 +#define AUDIO_SPDIF0_PCLK 12 +#define AUDIO_SPDIF1_WCLK 13 +#define AUDIO_SPDIF1_PCLK 14 +#define AUDIO_TIMER_WCLK 15 +#define AUDIO_TIMER_PCLK 16 +#define AUDIO_TDM_WCLK 17 +#define AUDIO_TDM_PCLK 18 +#define AUDIO_TS_PCLK 19 +#define I2S0_WCLK_MUX 20 +#define I2S1_WCLK_MUX 21 +#define I2S2_WCLK_MUX 22 +#define I2S3_WCLK_MUX 23 + +#define AUDIO_NR_CLKS 24 + +#endif diff --git a/include/dt-bindings/display/tda998x.h b/include/dt-bindings/display/tda998x.h new file mode 100644 index 000000000..746831ff3 --- /dev/null +++ b/include/dt-bindings/display/tda998x.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DT_BINDINGS_TDA998X_H +#define _DT_BINDINGS_TDA998X_H + +#define TDA998x_SPDIF 1 +#define TDA998x_I2S 2 + +#endif /*_DT_BINDINGS_TDA998X_H */ diff --git a/include/dt-bindings/dma/at91.h b/include/dt-bindings/dma/at91.h new file mode 100644 index 000000000..ab6cbba45 --- /dev/null +++ b/include/dt-bindings/dma/at91.h @@ -0,0 +1,52 @@ +/* + * This header provides macros for at91 dma bindings. + * + * Copyright (C) 2013 Ludovic Desroches + * + * GPLv2 only + */ + +#ifndef __DT_BINDINGS_AT91_DMA_H__ +#define __DT_BINDINGS_AT91_DMA_H__ + +/* ---------- HDMAC ---------- */ + +/* + * Source and/or destination peripheral ID + */ +#define AT91_DMA_CFG_PER_ID_MASK (0xff) +#define AT91_DMA_CFG_PER_ID(id) (id & AT91_DMA_CFG_PER_ID_MASK) + +/* + * FIFO configuration: it defines when a request is serviced. + */ +#define AT91_DMA_CFG_FIFOCFG_OFFSET (8) +#define AT91_DMA_CFG_FIFOCFG_MASK (0xf << AT91_DMA_CFG_FIFOCFG_OFFSET) +#define AT91_DMA_CFG_FIFOCFG_HALF (0x0 << AT91_DMA_CFG_FIFOCFG_OFFSET) /* half FIFO (default behavior) */ +#define AT91_DMA_CFG_FIFOCFG_ALAP (0x1 << AT91_DMA_CFG_FIFOCFG_OFFSET) /* largest defined AHB burst */ +#define AT91_DMA_CFG_FIFOCFG_ASAP (0x2 << AT91_DMA_CFG_FIFOCFG_OFFSET) /* single AHB access */ + + +/* ---------- XDMAC ---------- */ +#define AT91_XDMAC_DT_MEM_IF_MASK (0x1) +#define AT91_XDMAC_DT_MEM_IF_OFFSET (13) +#define AT91_XDMAC_DT_MEM_IF(mem_if) (((mem_if) & AT91_XDMAC_DT_MEM_IF_MASK) \ + << AT91_XDMAC_DT_MEM_IF_OFFSET) +#define AT91_XDMAC_DT_GET_MEM_IF(cfg) (((cfg) >> AT91_XDMAC_DT_MEM_IF_OFFSET) \ + & AT91_XDMAC_DT_MEM_IF_MASK) + +#define AT91_XDMAC_DT_PER_IF_MASK (0x1) +#define AT91_XDMAC_DT_PER_IF_OFFSET (14) +#define AT91_XDMAC_DT_PER_IF(per_if) (((per_if) & AT91_XDMAC_DT_PER_IF_MASK) \ + << AT91_XDMAC_DT_PER_IF_OFFSET) +#define AT91_XDMAC_DT_GET_PER_IF(cfg) (((cfg) >> AT91_XDMAC_DT_PER_IF_OFFSET) \ + & AT91_XDMAC_DT_PER_IF_MASK) + +#define AT91_XDMAC_DT_PERID_MASK (0x7f) +#define AT91_XDMAC_DT_PERID_OFFSET (24) +#define AT91_XDMAC_DT_PERID(perid) (((perid) & AT91_XDMAC_DT_PERID_MASK) \ + << AT91_XDMAC_DT_PERID_OFFSET) +#define AT91_XDMAC_DT_GET_PERID(cfg) (((cfg) >> AT91_XDMAC_DT_PERID_OFFSET) \ + & AT91_XDMAC_DT_PERID_MASK) + +#endif /* __DT_BINDINGS_AT91_DMA_H__ */ diff --git a/include/dt-bindings/dma/axi-dmac.h b/include/dt-bindings/dma/axi-dmac.h new file mode 100644 index 000000000..ad9e6ecb9 --- /dev/null +++ b/include/dt-bindings/dma/axi-dmac.h @@ -0,0 +1,48 @@ +/* + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __DT_BINDINGS_DMA_AXI_DMAC_H__ +#define __DT_BINDINGS_DMA_AXI_DMAC_H__ + +#define AXI_DMAC_BUS_TYPE_AXI_MM 0 +#define AXI_DMAC_BUS_TYPE_AXI_STREAM 1 +#define AXI_DMAC_BUS_TYPE_FIFO 2 + +#endif diff --git a/include/dt-bindings/dma/jz4780-dma.h b/include/dt-bindings/dma/jz4780-dma.h new file mode 100644 index 000000000..df017fdfb --- /dev/null +++ b/include/dt-bindings/dma/jz4780-dma.h @@ -0,0 +1,49 @@ +#ifndef __DT_BINDINGS_DMA_JZ4780_DMA_H__ +#define __DT_BINDINGS_DMA_JZ4780_DMA_H__ + +/* + * Request type numbers for the JZ4780 DMA controller (written to the DRTn + * register for the channel). + */ +#define JZ4780_DMA_I2S1_TX 0x4 +#define JZ4780_DMA_I2S1_RX 0x5 +#define JZ4780_DMA_I2S0_TX 0x6 +#define JZ4780_DMA_I2S0_RX 0x7 +#define JZ4780_DMA_AUTO 0x8 +#define JZ4780_DMA_SADC_RX 0x9 +#define JZ4780_DMA_UART4_TX 0xc +#define JZ4780_DMA_UART4_RX 0xd +#define JZ4780_DMA_UART3_TX 0xe +#define JZ4780_DMA_UART3_RX 0xf +#define JZ4780_DMA_UART2_TX 0x10 +#define JZ4780_DMA_UART2_RX 0x11 +#define JZ4780_DMA_UART1_TX 0x12 +#define JZ4780_DMA_UART1_RX 0x13 +#define JZ4780_DMA_UART0_TX 0x14 +#define JZ4780_DMA_UART0_RX 0x15 +#define JZ4780_DMA_SSI0_TX 0x16 +#define JZ4780_DMA_SSI0_RX 0x17 +#define JZ4780_DMA_SSI1_TX 0x18 +#define JZ4780_DMA_SSI1_RX 0x19 +#define JZ4780_DMA_MSC0_TX 0x1a +#define JZ4780_DMA_MSC0_RX 0x1b +#define JZ4780_DMA_MSC1_TX 0x1c +#define JZ4780_DMA_MSC1_RX 0x1d +#define JZ4780_DMA_MSC2_TX 0x1e +#define JZ4780_DMA_MSC2_RX 0x1f +#define JZ4780_DMA_PCM0_TX 0x20 +#define JZ4780_DMA_PCM0_RX 0x21 +#define JZ4780_DMA_SMB0_TX 0x24 +#define JZ4780_DMA_SMB0_RX 0x25 +#define JZ4780_DMA_SMB1_TX 0x26 +#define JZ4780_DMA_SMB1_RX 0x27 +#define JZ4780_DMA_SMB2_TX 0x28 +#define JZ4780_DMA_SMB2_RX 0x29 +#define JZ4780_DMA_SMB3_TX 0x2a +#define JZ4780_DMA_SMB3_RX 0x2b +#define JZ4780_DMA_SMB4_TX 0x2c +#define JZ4780_DMA_SMB4_RX 0x2d +#define JZ4780_DMA_DES_TX 0x2e +#define JZ4780_DMA_DES_RX 0x2f + +#endif /* __DT_BINDINGS_DMA_JZ4780_DMA_H__ */ diff --git a/include/dt-bindings/dma/nbpfaxi.h b/include/dt-bindings/dma/nbpfaxi.h new file mode 100644 index 000000000..c1a5b9e0d --- /dev/null +++ b/include/dt-bindings/dma/nbpfaxi.h @@ -0,0 +1,20 @@ +/* + * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd. + * Author: Guennadi Liakhovetski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + */ + +#ifndef DT_BINDINGS_NBPFAXI_H +#define DT_BINDINGS_NBPFAXI_H + +/** + * Use "#dma-cells = <2>;" with the second integer defining slave DMA flags: + */ +#define NBPF_SLAVE_RQ_HIGH 1 +#define NBPF_SLAVE_RQ_LOW 2 +#define NBPF_SLAVE_RQ_LEVEL 4 + +#endif diff --git a/include/dt-bindings/dma/sun4i-a10.h b/include/dt-bindings/dma/sun4i-a10.h new file mode 100644 index 000000000..8caba9ef7 --- /dev/null +++ b/include/dt-bindings/dma/sun4i-a10.h @@ -0,0 +1,56 @@ +/* + * Copyright 2014 Maxime Ripard + * + * Maxime Ripard + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this file; if not, write to the Free + * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, + * MA 02110-1301 USA + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __DT_BINDINGS_DMA_SUN4I_A10_H_ +#define __DT_BINDINGS_DMA_SUN4I_A10_H_ + +#define SUN4I_DMA_NORMAL 0 +#define SUN4I_DMA_DEDICATED 1 + +#endif /* __DT_BINDINGS_DMA_SUN4I_A10_H_ */ diff --git a/include/dt-bindings/gce/mt8173-gce.h b/include/dt-bindings/gce/mt8173-gce.h new file mode 100644 index 000000000..ffcf94ba9 --- /dev/null +++ b/include/dt-bindings/gce/mt8173-gce.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018 MediaTek Inc. + * Author: Houlong Wei + * + */ + +#ifndef _DT_BINDINGS_GCE_MT8173_H +#define _DT_BINDINGS_GCE_MT8173_H + +/* GCE HW thread priority */ +#define CMDQ_THR_PRIO_LOWEST 0 +#define CMDQ_THR_PRIO_HIGHEST 1 + +/* GCE SUBSYS */ +#define SUBSYS_1400XXXX 1 +#define SUBSYS_1401XXXX 2 +#define SUBSYS_1402XXXX 3 + +/* GCE HW EVENT */ +#define CMDQ_EVENT_DISP_OVL0_SOF 11 +#define CMDQ_EVENT_DISP_OVL1_SOF 12 +#define CMDQ_EVENT_DISP_RDMA0_SOF 13 +#define CMDQ_EVENT_DISP_RDMA1_SOF 14 +#define CMDQ_EVENT_DISP_RDMA2_SOF 15 +#define CMDQ_EVENT_DISP_WDMA0_SOF 16 +#define CMDQ_EVENT_DISP_WDMA1_SOF 17 +#define CMDQ_EVENT_DISP_OVL0_EOF 39 +#define CMDQ_EVENT_DISP_OVL1_EOF 40 +#define CMDQ_EVENT_DISP_RDMA0_EOF 41 +#define CMDQ_EVENT_DISP_RDMA1_EOF 42 +#define CMDQ_EVENT_DISP_RDMA2_EOF 43 +#define CMDQ_EVENT_DISP_WDMA0_EOF 44 +#define CMDQ_EVENT_DISP_WDMA1_EOF 45 +#define CMDQ_EVENT_MUTEX0_STREAM_EOF 53 +#define CMDQ_EVENT_MUTEX1_STREAM_EOF 54 +#define CMDQ_EVENT_MUTEX2_STREAM_EOF 55 +#define CMDQ_EVENT_MUTEX3_STREAM_EOF 56 +#define CMDQ_EVENT_MUTEX4_STREAM_EOF 57 +#define CMDQ_EVENT_DISP_RDMA0_UNDERRUN 63 +#define CMDQ_EVENT_DISP_RDMA1_UNDERRUN 64 +#define CMDQ_EVENT_DISP_RDMA2_UNDERRUN 65 + +#endif diff --git a/include/dt-bindings/gpio/aspeed-gpio.h b/include/dt-bindings/gpio/aspeed-gpio.h new file mode 100644 index 000000000..56fc4889b --- /dev/null +++ b/include/dt-bindings/gpio/aspeed-gpio.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * This header provides constants for binding aspeed,*-gpio. + * + * The first cell in Aspeed's GPIO specifier is the GPIO ID. The macros below + * provide names for this. + * + * The second cell contains standard flag values specified in gpio.h. + */ + +#ifndef _DT_BINDINGS_GPIO_ASPEED_GPIO_H +#define _DT_BINDINGS_GPIO_ASPEED_GPIO_H + +#include + +#define ASPEED_GPIO_PORT_A 0 +#define ASPEED_GPIO_PORT_B 1 +#define ASPEED_GPIO_PORT_C 2 +#define ASPEED_GPIO_PORT_D 3 +#define ASPEED_GPIO_PORT_E 4 +#define ASPEED_GPIO_PORT_F 5 +#define ASPEED_GPIO_PORT_G 6 +#define ASPEED_GPIO_PORT_H 7 +#define ASPEED_GPIO_PORT_I 8 +#define ASPEED_GPIO_PORT_J 9 +#define ASPEED_GPIO_PORT_K 10 +#define ASPEED_GPIO_PORT_L 11 +#define ASPEED_GPIO_PORT_M 12 +#define ASPEED_GPIO_PORT_N 13 +#define ASPEED_GPIO_PORT_O 14 +#define ASPEED_GPIO_PORT_P 15 +#define ASPEED_GPIO_PORT_Q 16 +#define ASPEED_GPIO_PORT_R 17 +#define ASPEED_GPIO_PORT_S 18 +#define ASPEED_GPIO_PORT_T 19 +#define ASPEED_GPIO_PORT_U 20 +#define ASPEED_GPIO_PORT_V 21 +#define ASPEED_GPIO_PORT_W 22 +#define ASPEED_GPIO_PORT_X 23 +#define ASPEED_GPIO_PORT_Y 24 +#define ASPEED_GPIO_PORT_Z 25 +#define ASPEED_GPIO_PORT_AA 26 +#define ASPEED_GPIO_PORT_AB 27 +#define ASPEED_GPIO_PORT_AC 28 + +#define ASPEED_GPIO(port, offset) \ + ((ASPEED_GPIO_PORT_##port * 8) + offset) + +#endif diff --git a/include/dt-bindings/gpio/gpio.h b/include/dt-bindings/gpio/gpio.h new file mode 100644 index 000000000..2cc10ae4b --- /dev/null +++ b/include/dt-bindings/gpio/gpio.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for most GPIO bindings. + * + * Most GPIO bindings include a flags cell as part of the GPIO specifier. + * In most cases, the format of the flags cell uses the standard values + * defined in this header. + */ + +#ifndef _DT_BINDINGS_GPIO_GPIO_H +#define _DT_BINDINGS_GPIO_GPIO_H + +/* Bit 0 express polarity */ +#define GPIO_ACTIVE_HIGH 0 +#define GPIO_ACTIVE_LOW 1 + +/* Bit 1 express single-endedness */ +#define GPIO_PUSH_PULL 0 +#define GPIO_SINGLE_ENDED 2 + +/* Bit 2 express Open drain or open source */ +#define GPIO_LINE_OPEN_SOURCE 0 +#define GPIO_LINE_OPEN_DRAIN 4 + +/* + * Open Drain/Collector is the combination of single-ended open drain interface. + * Open Source/Emitter is the combination of single-ended open source interface. + */ +#define GPIO_OPEN_DRAIN (GPIO_SINGLE_ENDED | GPIO_LINE_OPEN_DRAIN) +#define GPIO_OPEN_SOURCE (GPIO_SINGLE_ENDED | GPIO_LINE_OPEN_SOURCE) + +/* Bit 3 express GPIO suspend/resume and reset persistence */ +#define GPIO_PERSISTENT 0 +#define GPIO_TRANSITORY 8 + +#endif diff --git a/include/dt-bindings/gpio/meson-axg-gpio.h b/include/dt-bindings/gpio/meson-axg-gpio.h new file mode 100644 index 000000000..25bb1fffa --- /dev/null +++ b/include/dt-bindings/gpio/meson-axg-gpio.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2017 Amlogic, Inc. All rights reserved. + * Author: Xingyu Chen + * + * SPDX-License-Identifier: GPL-2.0+ + */ + +#ifndef _DT_BINDINGS_MESON_AXG_GPIO_H +#define _DT_BINDINGS_MESON_AXG_GPIO_H + +/* First GPIO chip */ +#define GPIOAO_0 0 +#define GPIOAO_1 1 +#define GPIOAO_2 2 +#define GPIOAO_3 3 +#define GPIOAO_4 4 +#define GPIOAO_5 5 +#define GPIOAO_6 6 +#define GPIOAO_7 7 +#define GPIOAO_8 8 +#define GPIOAO_9 9 +#define GPIOAO_10 10 +#define GPIOAO_11 11 +#define GPIOAO_12 12 +#define GPIOAO_13 13 +#define GPIO_TEST_N 14 + +/* Second GPIO chip */ +#define GPIOZ_0 0 +#define GPIOZ_1 1 +#define GPIOZ_2 2 +#define GPIOZ_3 3 +#define GPIOZ_4 4 +#define GPIOZ_5 5 +#define GPIOZ_6 6 +#define GPIOZ_7 7 +#define GPIOZ_8 8 +#define GPIOZ_9 9 +#define GPIOZ_10 10 +#define BOOT_0 11 +#define BOOT_1 12 +#define BOOT_2 13 +#define BOOT_3 14 +#define BOOT_4 15 +#define BOOT_5 16 +#define BOOT_6 17 +#define BOOT_7 18 +#define BOOT_8 19 +#define BOOT_9 20 +#define BOOT_10 21 +#define BOOT_11 22 +#define BOOT_12 23 +#define BOOT_13 24 +#define BOOT_14 25 +#define GPIOA_0 26 +#define GPIOA_1 27 +#define GPIOA_2 28 +#define GPIOA_3 29 +#define GPIOA_4 30 +#define GPIOA_5 31 +#define GPIOA_6 32 +#define GPIOA_7 33 +#define GPIOA_8 34 +#define GPIOA_9 35 +#define GPIOA_10 36 +#define GPIOA_11 37 +#define GPIOA_12 38 +#define GPIOA_13 39 +#define GPIOA_14 40 +#define GPIOA_15 41 +#define GPIOA_16 42 +#define GPIOA_17 43 +#define GPIOA_18 44 +#define GPIOA_19 45 +#define GPIOA_20 46 +#define GPIOX_0 47 +#define GPIOX_1 48 +#define GPIOX_2 49 +#define GPIOX_3 50 +#define GPIOX_4 51 +#define GPIOX_5 52 +#define GPIOX_6 53 +#define GPIOX_7 54 +#define GPIOX_8 55 +#define GPIOX_9 56 +#define GPIOX_10 57 +#define GPIOX_11 58 +#define GPIOX_12 59 +#define GPIOX_13 60 +#define GPIOX_14 61 +#define GPIOX_15 62 +#define GPIOX_16 63 +#define GPIOX_17 64 +#define GPIOX_18 65 +#define GPIOX_19 66 +#define GPIOX_20 67 +#define GPIOX_21 68 +#define GPIOX_22 69 +#define GPIOY_0 70 +#define GPIOY_1 71 +#define GPIOY_2 72 +#define GPIOY_3 73 +#define GPIOY_4 74 +#define GPIOY_5 75 +#define GPIOY_6 76 +#define GPIOY_7 77 +#define GPIOY_8 78 +#define GPIOY_9 79 +#define GPIOY_10 80 +#define GPIOY_11 81 +#define GPIOY_12 82 +#define GPIOY_13 83 +#define GPIOY_14 84 +#define GPIOY_15 85 + +#endif /* _DT_BINDINGS_MESON_AXG_GPIO_H */ diff --git a/include/dt-bindings/gpio/meson-gxbb-gpio.h b/include/dt-bindings/gpio/meson-gxbb-gpio.h new file mode 100644 index 000000000..43a68a111 --- /dev/null +++ b/include/dt-bindings/gpio/meson-gxbb-gpio.h @@ -0,0 +1,154 @@ +/* + * GPIO definitions for Amlogic Meson GXBB SoCs + * + * Copyright (C) 2016 Endless Mobile, Inc. + * Author: Carlo Caione + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef _DT_BINDINGS_MESON_GXBB_GPIO_H +#define _DT_BINDINGS_MESON_GXBB_GPIO_H + +#define GPIOAO_0 0 +#define GPIOAO_1 1 +#define GPIOAO_2 2 +#define GPIOAO_3 3 +#define GPIOAO_4 4 +#define GPIOAO_5 5 +#define GPIOAO_6 6 +#define GPIOAO_7 7 +#define GPIOAO_8 8 +#define GPIOAO_9 9 +#define GPIOAO_10 10 +#define GPIOAO_11 11 +#define GPIOAO_12 12 +#define GPIOAO_13 13 +#define GPIO_TEST_N 14 + +#define GPIOZ_0 0 +#define GPIOZ_1 1 +#define GPIOZ_2 2 +#define GPIOZ_3 3 +#define GPIOZ_4 4 +#define GPIOZ_5 5 +#define GPIOZ_6 6 +#define GPIOZ_7 7 +#define GPIOZ_8 8 +#define GPIOZ_9 9 +#define GPIOZ_10 10 +#define GPIOZ_11 11 +#define GPIOZ_12 12 +#define GPIOZ_13 13 +#define GPIOZ_14 14 +#define GPIOZ_15 15 +#define GPIOH_0 16 +#define GPIOH_1 17 +#define GPIOH_2 18 +#define GPIOH_3 19 +#define BOOT_0 20 +#define BOOT_1 21 +#define BOOT_2 22 +#define BOOT_3 23 +#define BOOT_4 24 +#define BOOT_5 25 +#define BOOT_6 26 +#define BOOT_7 27 +#define BOOT_8 28 +#define BOOT_9 29 +#define BOOT_10 30 +#define BOOT_11 31 +#define BOOT_12 32 +#define BOOT_13 33 +#define BOOT_14 34 +#define BOOT_15 35 +#define BOOT_16 36 +#define BOOT_17 37 +#define CARD_0 38 +#define CARD_1 39 +#define CARD_2 40 +#define CARD_3 41 +#define CARD_4 42 +#define CARD_5 43 +#define CARD_6 44 +#define GPIODV_0 45 +#define GPIODV_1 46 +#define GPIODV_2 47 +#define GPIODV_3 48 +#define GPIODV_4 49 +#define GPIODV_5 50 +#define GPIODV_6 51 +#define GPIODV_7 52 +#define GPIODV_8 53 +#define GPIODV_9 54 +#define GPIODV_10 55 +#define GPIODV_11 56 +#define GPIODV_12 57 +#define GPIODV_13 58 +#define GPIODV_14 59 +#define GPIODV_15 60 +#define GPIODV_16 61 +#define GPIODV_17 62 +#define GPIODV_18 63 +#define GPIODV_19 64 +#define GPIODV_20 65 +#define GPIODV_21 66 +#define GPIODV_22 67 +#define GPIODV_23 68 +#define GPIODV_24 69 +#define GPIODV_25 70 +#define GPIODV_26 71 +#define GPIODV_27 72 +#define GPIODV_28 73 +#define GPIODV_29 74 +#define GPIOY_0 75 +#define GPIOY_1 76 +#define GPIOY_2 77 +#define GPIOY_3 78 +#define GPIOY_4 79 +#define GPIOY_5 80 +#define GPIOY_6 81 +#define GPIOY_7 82 +#define GPIOY_8 83 +#define GPIOY_9 84 +#define GPIOY_10 85 +#define GPIOY_11 86 +#define GPIOY_12 87 +#define GPIOY_13 88 +#define GPIOY_14 89 +#define GPIOY_15 90 +#define GPIOY_16 91 +#define GPIOX_0 92 +#define GPIOX_1 93 +#define GPIOX_2 94 +#define GPIOX_3 95 +#define GPIOX_4 96 +#define GPIOX_5 97 +#define GPIOX_6 98 +#define GPIOX_7 99 +#define GPIOX_8 100 +#define GPIOX_9 101 +#define GPIOX_10 102 +#define GPIOX_11 103 +#define GPIOX_12 104 +#define GPIOX_13 105 +#define GPIOX_14 106 +#define GPIOX_15 107 +#define GPIOX_16 108 +#define GPIOX_17 109 +#define GPIOX_18 110 +#define GPIOX_19 111 +#define GPIOX_20 112 +#define GPIOX_21 113 +#define GPIOX_22 114 +#define GPIOCLK_0 115 +#define GPIOCLK_1 116 +#define GPIOCLK_2 117 +#define GPIOCLK_3 118 + +#endif diff --git a/include/dt-bindings/gpio/meson-gxl-gpio.h b/include/dt-bindings/gpio/meson-gxl-gpio.h new file mode 100644 index 000000000..01f2a2abd --- /dev/null +++ b/include/dt-bindings/gpio/meson-gxl-gpio.h @@ -0,0 +1,131 @@ +/* + * GPIO definitions for Amlogic Meson GXL SoCs + * + * Copyright (C) 2016 Endless Mobile, Inc. + * Author: Carlo Caione + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef _DT_BINDINGS_MESON_GXL_GPIO_H +#define _DT_BINDINGS_MESON_GXL_GPIO_H + +#define GPIOAO_0 0 +#define GPIOAO_1 1 +#define GPIOAO_2 2 +#define GPIOAO_3 3 +#define GPIOAO_4 4 +#define GPIOAO_5 5 +#define GPIOAO_6 6 +#define GPIOAO_7 7 +#define GPIOAO_8 8 +#define GPIOAO_9 9 +#define GPIO_TEST_N 10 + +#define GPIOZ_0 0 +#define GPIOZ_1 1 +#define GPIOZ_2 2 +#define GPIOZ_3 3 +#define GPIOZ_4 4 +#define GPIOZ_5 5 +#define GPIOZ_6 6 +#define GPIOZ_7 7 +#define GPIOZ_8 8 +#define GPIOZ_9 9 +#define GPIOZ_10 10 +#define GPIOZ_11 11 +#define GPIOZ_12 12 +#define GPIOZ_13 13 +#define GPIOZ_14 14 +#define GPIOZ_15 15 +#define GPIOH_0 16 +#define GPIOH_1 17 +#define GPIOH_2 18 +#define GPIOH_3 19 +#define GPIOH_4 20 +#define GPIOH_5 21 +#define GPIOH_6 22 +#define GPIOH_7 23 +#define GPIOH_8 24 +#define GPIOH_9 25 +#define BOOT_0 26 +#define BOOT_1 27 +#define BOOT_2 28 +#define BOOT_3 29 +#define BOOT_4 30 +#define BOOT_5 31 +#define BOOT_6 32 +#define BOOT_7 33 +#define BOOT_8 34 +#define BOOT_9 35 +#define BOOT_10 36 +#define BOOT_11 37 +#define BOOT_12 38 +#define BOOT_13 39 +#define BOOT_14 40 +#define BOOT_15 41 +#define CARD_0 42 +#define CARD_1 43 +#define CARD_2 44 +#define CARD_3 45 +#define CARD_4 46 +#define CARD_5 47 +#define CARD_6 48 +#define GPIODV_0 49 +#define GPIODV_1 50 +#define GPIODV_2 51 +#define GPIODV_3 52 +#define GPIODV_4 53 +#define GPIODV_5 54 +#define GPIODV_6 55 +#define GPIODV_7 56 +#define GPIODV_8 57 +#define GPIODV_9 58 +#define GPIODV_10 59 +#define GPIODV_11 60 +#define GPIODV_12 61 +#define GPIODV_13 62 +#define GPIODV_14 63 +#define GPIODV_15 64 +#define GPIODV_16 65 +#define GPIODV_17 66 +#define GPIODV_18 67 +#define GPIODV_19 68 +#define GPIODV_20 69 +#define GPIODV_21 70 +#define GPIODV_22 71 +#define GPIODV_23 72 +#define GPIODV_24 73 +#define GPIODV_25 74 +#define GPIODV_26 75 +#define GPIODV_27 76 +#define GPIODV_28 77 +#define GPIODV_29 78 +#define GPIOX_0 79 +#define GPIOX_1 80 +#define GPIOX_2 81 +#define GPIOX_3 82 +#define GPIOX_4 83 +#define GPIOX_5 84 +#define GPIOX_6 85 +#define GPIOX_7 86 +#define GPIOX_8 87 +#define GPIOX_9 88 +#define GPIOX_10 89 +#define GPIOX_11 90 +#define GPIOX_12 91 +#define GPIOX_13 92 +#define GPIOX_14 93 +#define GPIOX_15 94 +#define GPIOX_16 95 +#define GPIOX_17 96 +#define GPIOX_18 97 +#define GPIOCLK_0 98 +#define GPIOCLK_1 99 + +#endif diff --git a/include/dt-bindings/gpio/meson8-gpio.h b/include/dt-bindings/gpio/meson8-gpio.h new file mode 100644 index 000000000..fdaeb5cbf --- /dev/null +++ b/include/dt-bindings/gpio/meson8-gpio.h @@ -0,0 +1,157 @@ +/* + * GPIO definitions for Amlogic Meson8 SoCs + * + * Copyright (C) 2014 Beniamino Galvani + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef _DT_BINDINGS_MESON8_GPIO_H +#define _DT_BINDINGS_MESON8_GPIO_H + +/* First GPIO chip */ +#define GPIOX_0 0 +#define GPIOX_1 1 +#define GPIOX_2 2 +#define GPIOX_3 3 +#define GPIOX_4 4 +#define GPIOX_5 5 +#define GPIOX_6 6 +#define GPIOX_7 7 +#define GPIOX_8 8 +#define GPIOX_9 9 +#define GPIOX_10 10 +#define GPIOX_11 11 +#define GPIOX_12 12 +#define GPIOX_13 13 +#define GPIOX_14 14 +#define GPIOX_15 15 +#define GPIOX_16 16 +#define GPIOX_17 17 +#define GPIOX_18 18 +#define GPIOX_19 19 +#define GPIOX_20 20 +#define GPIOX_21 21 +#define GPIOY_0 22 +#define GPIOY_1 23 +#define GPIOY_2 24 +#define GPIOY_3 25 +#define GPIOY_4 26 +#define GPIOY_5 27 +#define GPIOY_6 28 +#define GPIOY_7 29 +#define GPIOY_8 30 +#define GPIOY_9 31 +#define GPIOY_10 32 +#define GPIOY_11 33 +#define GPIOY_12 34 +#define GPIOY_13 35 +#define GPIOY_14 36 +#define GPIOY_15 37 +#define GPIOY_16 38 +#define GPIODV_0 39 +#define GPIODV_1 40 +#define GPIODV_2 41 +#define GPIODV_3 42 +#define GPIODV_4 43 +#define GPIODV_5 44 +#define GPIODV_6 45 +#define GPIODV_7 46 +#define GPIODV_8 47 +#define GPIODV_9 48 +#define GPIODV_10 49 +#define GPIODV_11 50 +#define GPIODV_12 51 +#define GPIODV_13 52 +#define GPIODV_14 53 +#define GPIODV_15 54 +#define GPIODV_16 55 +#define GPIODV_17 56 +#define GPIODV_18 57 +#define GPIODV_19 58 +#define GPIODV_20 59 +#define GPIODV_21 60 +#define GPIODV_22 61 +#define GPIODV_23 62 +#define GPIODV_24 63 +#define GPIODV_25 64 +#define GPIODV_26 65 +#define GPIODV_27 66 +#define GPIODV_28 67 +#define GPIODV_29 68 +#define GPIOH_0 69 +#define GPIOH_1 70 +#define GPIOH_2 71 +#define GPIOH_3 72 +#define GPIOH_4 73 +#define GPIOH_5 74 +#define GPIOH_6 75 +#define GPIOH_7 76 +#define GPIOH_8 77 +#define GPIOH_9 78 +#define GPIOZ_0 79 +#define GPIOZ_1 80 +#define GPIOZ_2 81 +#define GPIOZ_3 82 +#define GPIOZ_4 83 +#define GPIOZ_5 84 +#define GPIOZ_6 85 +#define GPIOZ_7 86 +#define GPIOZ_8 87 +#define GPIOZ_9 88 +#define GPIOZ_10 89 +#define GPIOZ_11 90 +#define GPIOZ_12 91 +#define GPIOZ_13 92 +#define GPIOZ_14 93 +#define CARD_0 94 +#define CARD_1 95 +#define CARD_2 96 +#define CARD_3 97 +#define CARD_4 98 +#define CARD_5 99 +#define CARD_6 100 +#define BOOT_0 101 +#define BOOT_1 102 +#define BOOT_2 103 +#define BOOT_3 104 +#define BOOT_4 105 +#define BOOT_5 106 +#define BOOT_6 107 +#define BOOT_7 108 +#define BOOT_8 109 +#define BOOT_9 110 +#define BOOT_10 111 +#define BOOT_11 112 +#define BOOT_12 113 +#define BOOT_13 114 +#define BOOT_14 115 +#define BOOT_15 116 +#define BOOT_16 117 +#define BOOT_17 118 +#define BOOT_18 119 + +/* Second GPIO chip */ +#define GPIOAO_0 0 +#define GPIOAO_1 1 +#define GPIOAO_2 2 +#define GPIOAO_3 3 +#define GPIOAO_4 4 +#define GPIOAO_5 5 +#define GPIOAO_6 6 +#define GPIOAO_7 7 +#define GPIOAO_8 8 +#define GPIOAO_9 9 +#define GPIOAO_10 10 +#define GPIOAO_11 11 +#define GPIOAO_12 12 +#define GPIOAO_13 13 +#define GPIO_BSD_EN 14 +#define GPIO_TEST_N 15 + +#endif /* _DT_BINDINGS_MESON8_GPIO_H */ diff --git a/include/dt-bindings/gpio/meson8b-gpio.h b/include/dt-bindings/gpio/meson8b-gpio.h new file mode 100644 index 000000000..bf0d76fa0 --- /dev/null +++ b/include/dt-bindings/gpio/meson8b-gpio.h @@ -0,0 +1,127 @@ +/* + * GPIO definitions for Amlogic Meson8b SoCs + * + * Copyright (C) 2015 Endless Mobile, Inc. + * Author: Carlo Caione + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef _DT_BINDINGS_MESON8B_GPIO_H +#define _DT_BINDINGS_MESON8B_GPIO_H + +/* EE (CBUS) GPIO chip */ +#define GPIOX_0 0 +#define GPIOX_1 1 +#define GPIOX_2 2 +#define GPIOX_3 3 +#define GPIOX_4 4 +#define GPIOX_5 5 +#define GPIOX_6 6 +#define GPIOX_7 7 +#define GPIOX_8 8 +#define GPIOX_9 9 +#define GPIOX_10 10 +#define GPIOX_11 11 +#define GPIOX_16 12 +#define GPIOX_17 13 +#define GPIOX_18 14 +#define GPIOX_19 15 +#define GPIOX_20 16 +#define GPIOX_21 17 + +#define GPIOY_0 18 +#define GPIOY_1 19 +#define GPIOY_3 20 +#define GPIOY_6 21 +#define GPIOY_7 22 +#define GPIOY_8 23 +#define GPIOY_9 24 +#define GPIOY_10 25 +#define GPIOY_11 26 +#define GPIOY_12 27 +#define GPIOY_13 28 +#define GPIOY_14 29 + +#define GPIODV_9 30 +#define GPIODV_24 31 +#define GPIODV_25 32 +#define GPIODV_26 33 +#define GPIODV_27 34 +#define GPIODV_28 35 +#define GPIODV_29 36 + +#define GPIOH_0 37 +#define GPIOH_1 38 +#define GPIOH_2 39 +#define GPIOH_3 40 +#define GPIOH_4 41 +#define GPIOH_5 42 +#define GPIOH_6 43 +#define GPIOH_7 44 +#define GPIOH_8 45 +#define GPIOH_9 46 + +#define CARD_0 47 +#define CARD_1 48 +#define CARD_2 49 +#define CARD_3 50 +#define CARD_4 51 +#define CARD_5 52 +#define CARD_6 53 + +#define BOOT_0 54 +#define BOOT_1 55 +#define BOOT_2 56 +#define BOOT_3 57 +#define BOOT_4 58 +#define BOOT_5 59 +#define BOOT_6 60 +#define BOOT_7 61 +#define BOOT_8 62 +#define BOOT_9 63 +#define BOOT_10 64 +#define BOOT_11 65 +#define BOOT_12 66 +#define BOOT_13 67 +#define BOOT_14 68 +#define BOOT_15 69 +#define BOOT_16 70 +#define BOOT_17 71 +#define BOOT_18 72 + +#define DIF_0_P 73 +#define DIF_0_N 74 +#define DIF_1_P 75 +#define DIF_1_N 76 +#define DIF_2_P 77 +#define DIF_2_N 78 +#define DIF_3_P 79 +#define DIF_3_N 80 +#define DIF_4_P 81 +#define DIF_4_N 82 + +/* AO GPIO chip */ +#define GPIOAO_0 0 +#define GPIOAO_1 1 +#define GPIOAO_2 2 +#define GPIOAO_3 3 +#define GPIOAO_4 4 +#define GPIOAO_5 5 +#define GPIOAO_6 6 +#define GPIOAO_7 7 +#define GPIOAO_8 8 +#define GPIOAO_9 9 +#define GPIOAO_10 10 +#define GPIOAO_11 11 +#define GPIOAO_12 12 +#define GPIOAO_13 13 +#define GPIO_BSD_EN 14 +#define GPIO_TEST_N 15 + +#endif /* _DT_BINDINGS_MESON8B_GPIO_H */ diff --git a/include/dt-bindings/gpio/tegra-gpio.h b/include/dt-bindings/gpio/tegra-gpio.h new file mode 100644 index 000000000..7625dbc57 --- /dev/null +++ b/include/dt-bindings/gpio/tegra-gpio.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for binding nvidia,tegra*-gpio. + * + * The first cell in Tegra's GPIO specifier is the GPIO ID. The macros below + * provide names for this. + * + * The second cell contains standard flag values specified in gpio.h. + */ + +#ifndef _DT_BINDINGS_GPIO_TEGRA_GPIO_H +#define _DT_BINDINGS_GPIO_TEGRA_GPIO_H + +#include + +#define TEGRA_GPIO_PORT_A 0 +#define TEGRA_GPIO_PORT_B 1 +#define TEGRA_GPIO_PORT_C 2 +#define TEGRA_GPIO_PORT_D 3 +#define TEGRA_GPIO_PORT_E 4 +#define TEGRA_GPIO_PORT_F 5 +#define TEGRA_GPIO_PORT_G 6 +#define TEGRA_GPIO_PORT_H 7 +#define TEGRA_GPIO_PORT_I 8 +#define TEGRA_GPIO_PORT_J 9 +#define TEGRA_GPIO_PORT_K 10 +#define TEGRA_GPIO_PORT_L 11 +#define TEGRA_GPIO_PORT_M 12 +#define TEGRA_GPIO_PORT_N 13 +#define TEGRA_GPIO_PORT_O 14 +#define TEGRA_GPIO_PORT_P 15 +#define TEGRA_GPIO_PORT_Q 16 +#define TEGRA_GPIO_PORT_R 17 +#define TEGRA_GPIO_PORT_S 18 +#define TEGRA_GPIO_PORT_T 19 +#define TEGRA_GPIO_PORT_U 20 +#define TEGRA_GPIO_PORT_V 21 +#define TEGRA_GPIO_PORT_W 22 +#define TEGRA_GPIO_PORT_X 23 +#define TEGRA_GPIO_PORT_Y 24 +#define TEGRA_GPIO_PORT_Z 25 +#define TEGRA_GPIO_PORT_AA 26 +#define TEGRA_GPIO_PORT_BB 27 +#define TEGRA_GPIO_PORT_CC 28 +#define TEGRA_GPIO_PORT_DD 29 +#define TEGRA_GPIO_PORT_EE 30 +#define TEGRA_GPIO_PORT_FF 31 + +#define TEGRA_GPIO(port, offset) \ + ((TEGRA_GPIO_PORT_##port * 8) + offset) + +#endif diff --git a/include/dt-bindings/gpio/tegra186-gpio.h b/include/dt-bindings/gpio/tegra186-gpio.h new file mode 100644 index 000000000..463ad398f --- /dev/null +++ b/include/dt-bindings/gpio/tegra186-gpio.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for binding nvidia,tegra186-gpio*. + * + * The first cell in Tegra's GPIO specifier is the GPIO ID. The macros below + * provide names for this. + * + * The second cell contains standard flag values specified in gpio.h. + */ + +#ifndef _DT_BINDINGS_GPIO_TEGRA_GPIO_H +#define _DT_BINDINGS_GPIO_TEGRA_GPIO_H + +#include + +/* GPIOs implemented by main GPIO controller */ +#define TEGRA_MAIN_GPIO_PORT_A 0 +#define TEGRA_MAIN_GPIO_PORT_B 1 +#define TEGRA_MAIN_GPIO_PORT_C 2 +#define TEGRA_MAIN_GPIO_PORT_D 3 +#define TEGRA_MAIN_GPIO_PORT_E 4 +#define TEGRA_MAIN_GPIO_PORT_F 5 +#define TEGRA_MAIN_GPIO_PORT_G 6 +#define TEGRA_MAIN_GPIO_PORT_H 7 +#define TEGRA_MAIN_GPIO_PORT_I 8 +#define TEGRA_MAIN_GPIO_PORT_J 9 +#define TEGRA_MAIN_GPIO_PORT_K 10 +#define TEGRA_MAIN_GPIO_PORT_L 11 +#define TEGRA_MAIN_GPIO_PORT_M 12 +#define TEGRA_MAIN_GPIO_PORT_N 13 +#define TEGRA_MAIN_GPIO_PORT_O 14 +#define TEGRA_MAIN_GPIO_PORT_P 15 +#define TEGRA_MAIN_GPIO_PORT_Q 16 +#define TEGRA_MAIN_GPIO_PORT_R 17 +#define TEGRA_MAIN_GPIO_PORT_T 18 +#define TEGRA_MAIN_GPIO_PORT_X 19 +#define TEGRA_MAIN_GPIO_PORT_Y 20 +#define TEGRA_MAIN_GPIO_PORT_BB 21 +#define TEGRA_MAIN_GPIO_PORT_CC 22 + +#define TEGRA_MAIN_GPIO(port, offset) \ + ((TEGRA_MAIN_GPIO_PORT_##port * 8) + offset) + +/* GPIOs implemented by AON GPIO controller */ +#define TEGRA_AON_GPIO_PORT_S 0 +#define TEGRA_AON_GPIO_PORT_U 1 +#define TEGRA_AON_GPIO_PORT_V 2 +#define TEGRA_AON_GPIO_PORT_W 3 +#define TEGRA_AON_GPIO_PORT_Z 4 +#define TEGRA_AON_GPIO_PORT_AA 5 +#define TEGRA_AON_GPIO_PORT_EE 6 +#define TEGRA_AON_GPIO_PORT_FF 7 + +#define TEGRA_AON_GPIO(port, offset) \ + ((TEGRA_AON_GPIO_PORT_##port * 8) + offset) + +#endif diff --git a/include/dt-bindings/gpio/tegra194-gpio.h b/include/dt-bindings/gpio/tegra194-gpio.h new file mode 100644 index 000000000..ede860225 --- /dev/null +++ b/include/dt-bindings/gpio/tegra194-gpio.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. */ + +/* + * This header provides constants for binding nvidia,tegra194-gpio*. + * + * The first cell in Tegra's GPIO specifier is the GPIO ID. The macros below + * provide names for this. + * + * The second cell contains standard flag values specified in gpio.h. + */ + +#ifndef _DT_BINDINGS_GPIO_TEGRA194_GPIO_H +#define _DT_BINDINGS_GPIO_TEGRA194_GPIO_H + +#include + +/* GPIOs implemented by main GPIO controller */ +#define TEGRA194_MAIN_GPIO_PORT_A 0 +#define TEGRA194_MAIN_GPIO_PORT_B 1 +#define TEGRA194_MAIN_GPIO_PORT_C 2 +#define TEGRA194_MAIN_GPIO_PORT_D 3 +#define TEGRA194_MAIN_GPIO_PORT_E 4 +#define TEGRA194_MAIN_GPIO_PORT_F 5 +#define TEGRA194_MAIN_GPIO_PORT_G 6 +#define TEGRA194_MAIN_GPIO_PORT_H 7 +#define TEGRA194_MAIN_GPIO_PORT_I 8 +#define TEGRA194_MAIN_GPIO_PORT_J 9 +#define TEGRA194_MAIN_GPIO_PORT_K 10 +#define TEGRA194_MAIN_GPIO_PORT_L 11 +#define TEGRA194_MAIN_GPIO_PORT_M 12 +#define TEGRA194_MAIN_GPIO_PORT_N 13 +#define TEGRA194_MAIN_GPIO_PORT_O 14 +#define TEGRA194_MAIN_GPIO_PORT_P 15 +#define TEGRA194_MAIN_GPIO_PORT_Q 16 +#define TEGRA194_MAIN_GPIO_PORT_R 17 +#define TEGRA194_MAIN_GPIO_PORT_S 18 +#define TEGRA194_MAIN_GPIO_PORT_T 19 +#define TEGRA194_MAIN_GPIO_PORT_U 20 +#define TEGRA194_MAIN_GPIO_PORT_V 21 +#define TEGRA194_MAIN_GPIO_PORT_W 22 +#define TEGRA194_MAIN_GPIO_PORT_X 23 +#define TEGRA194_MAIN_GPIO_PORT_Y 24 +#define TEGRA194_MAIN_GPIO_PORT_Z 25 +#define TEGRA194_MAIN_GPIO_PORT_FF 26 +#define TEGRA194_MAIN_GPIO_PORT_GG 27 + +#define TEGRA194_MAIN_GPIO(port, offset) \ + ((TEGRA194_MAIN_GPIO_PORT_##port * 8) + offset) + +/* GPIOs implemented by AON GPIO controller */ +#define TEGRA194_AON_GPIO_PORT_AA 0 +#define TEGRA194_AON_GPIO_PORT_BB 1 +#define TEGRA194_AON_GPIO_PORT_CC 2 +#define TEGRA194_AON_GPIO_PORT_DD 3 +#define TEGRA194_AON_GPIO_PORT_EE 4 + +#define TEGRA194_AON_GPIO(port, offset) \ + ((TEGRA194_AON_GPIO_PORT_##port * 8) + offset) + +#endif diff --git a/include/dt-bindings/gpio/uniphier-gpio.h b/include/dt-bindings/gpio/uniphier-gpio.h new file mode 100644 index 000000000..9f0ad174f --- /dev/null +++ b/include/dt-bindings/gpio/uniphier-gpio.h @@ -0,0 +1,18 @@ +/* + * Copyright (C) 2017 Socionext Inc. + * Author: Masahiro Yamada + */ + +#ifndef _DT_BINDINGS_GPIO_UNIPHIER_H +#define _DT_BINDINGS_GPIO_UNIPHIER_H + +#define UNIPHIER_GPIO_LINES_PER_BANK 8 + +#define UNIPHIER_GPIO_IRQ_OFFSET ((UNIPHIER_GPIO_LINES_PER_BANK) * 15) + +#define UNIPHIER_GPIO_PORT(bank, line) \ + ((UNIPHIER_GPIO_LINES_PER_BANK) * (bank) + (line)) + +#define UNIPHIER_GPIO_IRQ(n) ((UNIPHIER_GPIO_IRQ_OFFSET) + (n)) + +#endif /* _DT_BINDINGS_GPIO_UNIPHIER_H */ diff --git a/include/dt-bindings/i2c/i2c.h b/include/dt-bindings/i2c/i2c.h new file mode 100644 index 000000000..1d5da81d9 --- /dev/null +++ b/include/dt-bindings/i2c/i2c.h @@ -0,0 +1,18 @@ +/* + * This header provides constants for I2C bindings + * + * Copyright (C) 2015 by Sang Engineering + * Copyright (C) 2015 by Renesas Electronics Corporation + * + * Wolfram Sang + * + * GPLv2 only + */ + +#ifndef _DT_BINDINGS_I2C_I2C_H +#define _DT_BINDINGS_I2C_I2C_H + +#define I2C_TEN_BIT_ADDRESS (1 << 31) +#define I2C_OWN_SLAVE_ADDRESS (1 << 30) + +#endif diff --git a/include/dt-bindings/iio/adc/at91-sama5d2_adc.h b/include/dt-bindings/iio/adc/at91-sama5d2_adc.h new file mode 100644 index 000000000..70f99dbdb --- /dev/null +++ b/include/dt-bindings/iio/adc/at91-sama5d2_adc.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for configuring the AT91 SAMA5D2 ADC + */ + +#ifndef _DT_BINDINGS_IIO_ADC_AT91_SAMA5D2_ADC_H +#define _DT_BINDINGS_IIO_ADC_AT91_SAMA5D2_ADC_H + +/* X relative position channel index */ +#define AT91_SAMA5D2_ADC_X_CHANNEL 24 +/* Y relative position channel index */ +#define AT91_SAMA5D2_ADC_Y_CHANNEL 25 +/* pressure channel index */ +#define AT91_SAMA5D2_ADC_P_CHANNEL 26 + +#endif diff --git a/include/dt-bindings/iio/adc/fsl-imx25-gcq.h b/include/dt-bindings/iio/adc/fsl-imx25-gcq.h new file mode 100644 index 000000000..08ef4d298 --- /dev/null +++ b/include/dt-bindings/iio/adc/fsl-imx25-gcq.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for configuring the I.MX25 ADC + */ + +#ifndef _DT_BINDINGS_IIO_ADC_FS_IMX25_GCQ_H +#define _DT_BINDINGS_IIO_ADC_FS_IMX25_GCQ_H + +#define MX25_ADC_REFP_YP 0 /* YP voltage reference */ +#define MX25_ADC_REFP_XP 1 /* XP voltage reference */ +#define MX25_ADC_REFP_EXT 2 /* External voltage reference */ +#define MX25_ADC_REFP_INT 3 /* Internal voltage reference */ + +#define MX25_ADC_REFN_XN 0 /* XN ground reference */ +#define MX25_ADC_REFN_YN 1 /* YN ground reference */ +#define MX25_ADC_REFN_NGND 2 /* Internal ground reference */ +#define MX25_ADC_REFN_NGND2 3 /* External ground reference */ + +#endif diff --git a/include/dt-bindings/iio/adi,ad5592r.h b/include/dt-bindings/iio/adi,ad5592r.h new file mode 100644 index 000000000..9f8c7b808 --- /dev/null +++ b/include/dt-bindings/iio/adi,ad5592r.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _DT_BINDINGS_ADI_AD5592R_H +#define _DT_BINDINGS_ADI_AD5592R_H + +#define CH_MODE_UNUSED 0 +#define CH_MODE_ADC 1 +#define CH_MODE_DAC 2 +#define CH_MODE_DAC_AND_ADC 3 +#define CH_MODE_GPIO 8 + +#define CH_OFFSTATE_PULLDOWN 0 +#define CH_OFFSTATE_OUT_LOW 1 +#define CH_OFFSTATE_OUT_HIGH 2 +#define CH_OFFSTATE_OUT_TRISTATE 3 + +#endif /* _DT_BINDINGS_ADI_AD5592R_H */ diff --git a/include/dt-bindings/iio/qcom,spmi-vadc.h b/include/dt-bindings/iio/qcom,spmi-vadc.h new file mode 100644 index 000000000..42121fa23 --- /dev/null +++ b/include/dt-bindings/iio/qcom,spmi-vadc.h @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_H +#define _DT_BINDINGS_QCOM_SPMI_VADC_H + +/* Voltage ADC channels */ +#define VADC_USBIN 0x00 +#define VADC_DCIN 0x01 +#define VADC_VCHG_SNS 0x02 +#define VADC_SPARE1_03 0x03 +#define VADC_USB_ID_MV 0x04 +#define VADC_VCOIN 0x05 +#define VADC_VBAT_SNS 0x06 +#define VADC_VSYS 0x07 +#define VADC_DIE_TEMP 0x08 +#define VADC_REF_625MV 0x09 +#define VADC_REF_1250MV 0x0a +#define VADC_CHG_TEMP 0x0b +#define VADC_SPARE1 0x0c +#define VADC_SPARE2 0x0d +#define VADC_GND_REF 0x0e +#define VADC_VDD_VADC 0x0f + +#define VADC_P_MUX1_1_1 0x10 +#define VADC_P_MUX2_1_1 0x11 +#define VADC_P_MUX3_1_1 0x12 +#define VADC_P_MUX4_1_1 0x13 +#define VADC_P_MUX5_1_1 0x14 +#define VADC_P_MUX6_1_1 0x15 +#define VADC_P_MUX7_1_1 0x16 +#define VADC_P_MUX8_1_1 0x17 +#define VADC_P_MUX9_1_1 0x18 +#define VADC_P_MUX10_1_1 0x19 +#define VADC_P_MUX11_1_1 0x1a +#define VADC_P_MUX12_1_1 0x1b +#define VADC_P_MUX13_1_1 0x1c +#define VADC_P_MUX14_1_1 0x1d +#define VADC_P_MUX15_1_1 0x1e +#define VADC_P_MUX16_1_1 0x1f + +#define VADC_P_MUX1_1_3 0x20 +#define VADC_P_MUX2_1_3 0x21 +#define VADC_P_MUX3_1_3 0x22 +#define VADC_P_MUX4_1_3 0x23 +#define VADC_P_MUX5_1_3 0x24 +#define VADC_P_MUX6_1_3 0x25 +#define VADC_P_MUX7_1_3 0x26 +#define VADC_P_MUX8_1_3 0x27 +#define VADC_P_MUX9_1_3 0x28 +#define VADC_P_MUX10_1_3 0x29 +#define VADC_P_MUX11_1_3 0x2a +#define VADC_P_MUX12_1_3 0x2b +#define VADC_P_MUX13_1_3 0x2c +#define VADC_P_MUX14_1_3 0x2d +#define VADC_P_MUX15_1_3 0x2e +#define VADC_P_MUX16_1_3 0x2f + +#define VADC_LR_MUX1_BAT_THERM 0x30 +#define VADC_LR_MUX2_BAT_ID 0x31 +#define VADC_LR_MUX3_XO_THERM 0x32 +#define VADC_LR_MUX4_AMUX_THM1 0x33 +#define VADC_LR_MUX5_AMUX_THM2 0x34 +#define VADC_LR_MUX6_AMUX_THM3 0x35 +#define VADC_LR_MUX7_HW_ID 0x36 +#define VADC_LR_MUX8_AMUX_THM4 0x37 +#define VADC_LR_MUX9_AMUX_THM5 0x38 +#define VADC_LR_MUX10_USB_ID 0x39 +#define VADC_AMUX_PU1 0x3a +#define VADC_AMUX_PU2 0x3b +#define VADC_LR_MUX3_BUF_XO_THERM 0x3c + +#define VADC_LR_MUX1_PU1_BAT_THERM 0x70 +#define VADC_LR_MUX2_PU1_BAT_ID 0x71 +#define VADC_LR_MUX3_PU1_XO_THERM 0x72 +#define VADC_LR_MUX4_PU1_AMUX_THM1 0x73 +#define VADC_LR_MUX5_PU1_AMUX_THM2 0x74 +#define VADC_LR_MUX6_PU1_AMUX_THM3 0x75 +#define VADC_LR_MUX7_PU1_AMUX_HW_ID 0x76 +#define VADC_LR_MUX8_PU1_AMUX_THM4 0x77 +#define VADC_LR_MUX9_PU1_AMUX_THM5 0x78 +#define VADC_LR_MUX10_PU1_AMUX_USB_ID 0x79 +#define VADC_LR_MUX3_BUF_PU1_XO_THERM 0x7c + +#define VADC_LR_MUX1_PU2_BAT_THERM 0xb0 +#define VADC_LR_MUX2_PU2_BAT_ID 0xb1 +#define VADC_LR_MUX3_PU2_XO_THERM 0xb2 +#define VADC_LR_MUX4_PU2_AMUX_THM1 0xb3 +#define VADC_LR_MUX5_PU2_AMUX_THM2 0xb4 +#define VADC_LR_MUX6_PU2_AMUX_THM3 0xb5 +#define VADC_LR_MUX7_PU2_AMUX_HW_ID 0xb6 +#define VADC_LR_MUX8_PU2_AMUX_THM4 0xb7 +#define VADC_LR_MUX9_PU2_AMUX_THM5 0xb8 +#define VADC_LR_MUX10_PU2_AMUX_USB_ID 0xb9 +#define VADC_LR_MUX3_BUF_PU2_XO_THERM 0xbc + +#define VADC_LR_MUX1_PU1_PU2_BAT_THERM 0xf0 +#define VADC_LR_MUX2_PU1_PU2_BAT_ID 0xf1 +#define VADC_LR_MUX3_PU1_PU2_XO_THERM 0xf2 +#define VADC_LR_MUX4_PU1_PU2_AMUX_THM1 0xf3 +#define VADC_LR_MUX5_PU1_PU2_AMUX_THM2 0xf4 +#define VADC_LR_MUX6_PU1_PU2_AMUX_THM3 0xf5 +#define VADC_LR_MUX7_PU1_PU2_AMUX_HW_ID 0xf6 +#define VADC_LR_MUX8_PU1_PU2_AMUX_THM4 0xf7 +#define VADC_LR_MUX9_PU1_PU2_AMUX_THM5 0xf8 +#define VADC_LR_MUX10_PU1_PU2_AMUX_USB_ID 0xf9 +#define VADC_LR_MUX3_BUF_PU1_PU2_XO_THERM 0xfc + +#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_H */ diff --git a/include/dt-bindings/input/gpio-keys.h b/include/dt-bindings/input/gpio-keys.h new file mode 100644 index 000000000..8962df79e --- /dev/null +++ b/include/dt-bindings/input/gpio-keys.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for gpio keys bindings. + */ + +#ifndef _DT_BINDINGS_GPIO_KEYS_H +#define _DT_BINDINGS_GPIO_KEYS_H + +#define EV_ACT_ANY 0x00 /* asserted or deasserted */ +#define EV_ACT_ASSERTED 0x01 /* asserted */ +#define EV_ACT_DEASSERTED 0x02 /* deasserted */ + +#endif /* _DT_BINDINGS_GPIO_KEYS_H */ diff --git a/include/dt-bindings/input/input.h b/include/dt-bindings/input/input.h new file mode 100644 index 000000000..bcf0ae100 --- /dev/null +++ b/include/dt-bindings/input/input.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for most input bindings. + * + * Most input bindings include key code, matrix key code format. + * In most cases, key code and matrix key code format uses + * the standard values/macro defined in this header. + */ + +#ifndef _DT_BINDINGS_INPUT_INPUT_H +#define _DT_BINDINGS_INPUT_INPUT_H + +#include "linux-event-codes.h" + +#define MATRIX_KEY(row, col, code) \ + ((((row) & 0xFF) << 24) | (((col) & 0xFF) << 16) | ((code) & 0xFFFF)) + +#endif /* _DT_BINDINGS_INPUT_INPUT_H */ diff --git a/include/dt-bindings/input/linux-event-codes.h b/include/dt-bindings/input/linux-event-codes.h new file mode 120000 index 000000000..693bbcd26 --- /dev/null +++ b/include/dt-bindings/input/linux-event-codes.h @@ -0,0 +1 @@ +../../uapi/linux/input-event-codes.h \ No newline at end of file diff --git a/include/dt-bindings/input/ti-drv260x.h b/include/dt-bindings/input/ti-drv260x.h new file mode 100644 index 000000000..2626e6d9f --- /dev/null +++ b/include/dt-bindings/input/ti-drv260x.h @@ -0,0 +1,36 @@ +/* + * DRV260X haptics driver family + * + * Author: Dan Murphy + * + * Copyright: (C) 2014 Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef _DT_BINDINGS_TI_DRV260X_H +#define _DT_BINDINGS_TI_DRV260X_H + +/* Calibration Types */ +#define DRV260X_LRA_MODE 0x00 +#define DRV260X_LRA_NO_CAL_MODE 0x01 +#define DRV260X_ERM_MODE 0x02 + +/* Library Selection */ +#define DRV260X_LIB_EMPTY 0x00 +#define DRV260X_ERM_LIB_A 0x01 +#define DRV260X_ERM_LIB_B 0x02 +#define DRV260X_ERM_LIB_C 0x03 +#define DRV260X_ERM_LIB_D 0x04 +#define DRV260X_ERM_LIB_E 0x05 +#define DRV260X_LIB_LRA 0x06 +#define DRV260X_ERM_LIB_F 0x07 + +#endif diff --git a/include/dt-bindings/interrupt-controller/arm-gic.h b/include/dt-bindings/interrupt-controller/arm-gic.h new file mode 100644 index 000000000..0c85f65c8 --- /dev/null +++ b/include/dt-bindings/interrupt-controller/arm-gic.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for the ARM GIC. + */ + +#ifndef _DT_BINDINGS_INTERRUPT_CONTROLLER_ARM_GIC_H +#define _DT_BINDINGS_INTERRUPT_CONTROLLER_ARM_GIC_H + +#include + +/* interrupt specifier cell 0 */ + +#define GIC_SPI 0 +#define GIC_PPI 1 + +/* + * Interrupt specifier cell 2. + * The flags in irq.h are valid, plus those below. + */ +#define GIC_CPU_MASK_RAW(x) ((x) << 8) +#define GIC_CPU_MASK_SIMPLE(num) GIC_CPU_MASK_RAW((1 << (num)) - 1) + +#endif diff --git a/include/dt-bindings/interrupt-controller/irq-st.h b/include/dt-bindings/interrupt-controller/irq-st.h new file mode 100644 index 000000000..4c59aceb9 --- /dev/null +++ b/include/dt-bindings/interrupt-controller/irq-st.h @@ -0,0 +1,30 @@ +/* + * include/linux/irqchip/irq-st.h + * + * Copyright (C) 2014 STMicroelectronics – All Rights Reserved + * + * Author: Lee Jones + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _DT_BINDINGS_INTERRUPT_CONTROLLER_ST_H +#define _DT_BINDINGS_INTERRUPT_CONTROLLER_ST_H + +#define ST_IRQ_SYSCFG_EXT_0 0 +#define ST_IRQ_SYSCFG_EXT_1 1 +#define ST_IRQ_SYSCFG_EXT_2 2 +#define ST_IRQ_SYSCFG_CTI_0 3 +#define ST_IRQ_SYSCFG_CTI_1 4 +#define ST_IRQ_SYSCFG_PMU_0 5 +#define ST_IRQ_SYSCFG_PMU_1 6 +#define ST_IRQ_SYSCFG_pl310_L2 7 +#define ST_IRQ_SYSCFG_DISABLED 0xFFFFFFFF + +#define ST_IRQ_SYSCFG_EXT_1_INV 0x1 +#define ST_IRQ_SYSCFG_EXT_2_INV 0x2 +#define ST_IRQ_SYSCFG_EXT_3_INV 0x4 + +#endif diff --git a/include/dt-bindings/interrupt-controller/irq.h b/include/dt-bindings/interrupt-controller/irq.h new file mode 100644 index 000000000..a8b310555 --- /dev/null +++ b/include/dt-bindings/interrupt-controller/irq.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for most IRQ bindings. + * + * Most IRQ bindings include a flags cell as part of the IRQ specifier. + * In most cases, the format of the flags cell uses the standard values + * defined in this header. + */ + +#ifndef _DT_BINDINGS_INTERRUPT_CONTROLLER_IRQ_H +#define _DT_BINDINGS_INTERRUPT_CONTROLLER_IRQ_H + +#define IRQ_TYPE_NONE 0 +#define IRQ_TYPE_EDGE_RISING 1 +#define IRQ_TYPE_EDGE_FALLING 2 +#define IRQ_TYPE_EDGE_BOTH (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING) +#define IRQ_TYPE_LEVEL_HIGH 4 +#define IRQ_TYPE_LEVEL_LOW 8 + +#endif diff --git a/include/dt-bindings/interrupt-controller/mips-gic.h b/include/dt-bindings/interrupt-controller/mips-gic.h new file mode 100644 index 000000000..bd45cee0c --- /dev/null +++ b/include/dt-bindings/interrupt-controller/mips-gic.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DT_BINDINGS_INTERRUPT_CONTROLLER_MIPS_GIC_H +#define _DT_BINDINGS_INTERRUPT_CONTROLLER_MIPS_GIC_H + +#include + +#define GIC_SHARED 0 +#define GIC_LOCAL 1 + +#endif diff --git a/include/dt-bindings/interrupt-controller/mvebu-icu.h b/include/dt-bindings/interrupt-controller/mvebu-icu.h new file mode 100644 index 000000000..bb5217c64 --- /dev/null +++ b/include/dt-bindings/interrupt-controller/mvebu-icu.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for the MVEBU ICU driver. + */ + +#ifndef _DT_BINDINGS_INTERRUPT_CONTROLLER_MVEBU_ICU_H +#define _DT_BINDINGS_INTERRUPT_CONTROLLER_MVEBU_ICU_H + +/* interrupt specifier cell 0 */ + +#define ICU_GRP_NSR 0x0 +#define ICU_GRP_SR 0x1 +#define ICU_GRP_SEI 0x4 +#define ICU_GRP_REI 0x5 + +#endif diff --git a/include/dt-bindings/leds/common.h b/include/dt-bindings/leds/common.h new file mode 100644 index 000000000..e171d0a6b --- /dev/null +++ b/include/dt-bindings/leds/common.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides macros for the common LEDs device tree bindings. + * + * Copyright (C) 2015, Samsung Electronics Co., Ltd. + * + * Author: Jacek Anaszewski + */ + +#ifndef __DT_BINDINGS_LEDS_H +#define __DT_BINDINGS_LEDS_H + +/* External trigger type */ +#define LEDS_TRIG_TYPE_EDGE 0 +#define LEDS_TRIG_TYPE_LEVEL 1 + +/* Boost modes */ +#define LEDS_BOOST_OFF 0 +#define LEDS_BOOST_ADAPTIVE 1 +#define LEDS_BOOST_FIXED 2 + +#endif /* __DT_BINDINGS_LEDS_H */ diff --git a/include/dt-bindings/leds/leds-netxbig.h b/include/dt-bindings/leds/leds-netxbig.h new file mode 100644 index 000000000..92658b031 --- /dev/null +++ b/include/dt-bindings/leds/leds-netxbig.h @@ -0,0 +1,18 @@ +/* + * This header provides constants for netxbig LED bindings. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef _DT_BINDINGS_LEDS_NETXBIG_H +#define _DT_BINDINGS_LEDS_NETXBIG_H + +#define NETXBIG_LED_OFF 0 +#define NETXBIG_LED_ON 1 +#define NETXBIG_LED_SATA 2 +#define NETXBIG_LED_TIMER1 3 +#define NETXBIG_LED_TIMER2 4 + +#endif /* _DT_BINDINGS_LEDS_NETXBIG_H */ diff --git a/include/dt-bindings/leds/leds-ns2.h b/include/dt-bindings/leds/leds-ns2.h new file mode 100644 index 000000000..fd615749e --- /dev/null +++ b/include/dt-bindings/leds/leds-ns2.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DT_BINDINGS_LEDS_NS2_H +#define _DT_BINDINGS_LEDS_NS2_H + +#define NS_V2_LED_OFF 0 +#define NS_V2_LED_ON 1 +#define NS_V2_LED_SATA 2 + +#endif diff --git a/include/dt-bindings/leds/leds-pca9532.h b/include/dt-bindings/leds/leds-pca9532.h new file mode 100644 index 000000000..4d917aab7 --- /dev/null +++ b/include/dt-bindings/leds/leds-pca9532.h @@ -0,0 +1,18 @@ +/* + * This header provides constants for pca9532 LED bindings. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef _DT_BINDINGS_LEDS_PCA9532_H +#define _DT_BINDINGS_LEDS_PCA9532_H + +#define PCA9532_TYPE_NONE 0 +#define PCA9532_TYPE_LED 1 +#define PCA9532_TYPE_N2100_BEEP 2 +#define PCA9532_TYPE_GPIO 3 +#define PCA9532_LED_TIMER2 4 + +#endif /* _DT_BINDINGS_LEDS_PCA9532_H */ diff --git a/include/dt-bindings/leds/leds-pca955x.h b/include/dt-bindings/leds/leds-pca955x.h new file mode 100644 index 000000000..78cb7e979 --- /dev/null +++ b/include/dt-bindings/leds/leds-pca955x.h @@ -0,0 +1,16 @@ +/* + * This header provides constants for pca955x LED bindings. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef _DT_BINDINGS_LEDS_PCA955X_H +#define _DT_BINDINGS_LEDS_PCA955X_H + +#define PCA955X_TYPE_NONE 0 +#define PCA955X_TYPE_LED 1 +#define PCA955X_TYPE_GPIO 2 + +#endif /* _DT_BINDINGS_LEDS_PCA955X_H */ diff --git a/include/dt-bindings/mailbox/tegra186-hsp.h b/include/dt-bindings/mailbox/tegra186-hsp.h new file mode 100644 index 000000000..bcab5b7ca --- /dev/null +++ b/include/dt-bindings/mailbox/tegra186-hsp.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for binding nvidia,tegra186-hsp. + */ + +#ifndef _DT_BINDINGS_MAILBOX_TEGRA186_HSP_H +#define _DT_BINDINGS_MAILBOX_TEGRA186_HSP_H + +/* + * These define the type of mailbox that is to be used (doorbell, shared + * mailbox, shared semaphore or arbitrated semaphore). + */ +#define TEGRA_HSP_MBOX_TYPE_DB 0x0 +#define TEGRA_HSP_MBOX_TYPE_SM 0x1 +#define TEGRA_HSP_MBOX_TYPE_SS 0x2 +#define TEGRA_HSP_MBOX_TYPE_AS 0x3 + +/* + * These defines represent the bit associated with the given master ID in the + * doorbell registers. + */ +#define TEGRA_HSP_DB_MASTER_CCPLEX 17 +#define TEGRA_HSP_DB_MASTER_BPMP 19 + +#endif diff --git a/include/dt-bindings/media/c8sectpfe.h b/include/dt-bindings/media/c8sectpfe.h new file mode 100644 index 000000000..6b1fb6f54 --- /dev/null +++ b/include/dt-bindings/media/c8sectpfe.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_C8SECTPFE_H +#define __DT_C8SECTPFE_H + +#define STV0367_TDA18212_NIMA_1 0 +#define STV0367_TDA18212_NIMA_2 1 +#define STV0367_TDA18212_NIMB_1 2 +#define STV0367_TDA18212_NIMB_2 3 + +#define STV0903_6110_LNB24_NIMA 4 +#define STV0903_6110_LNB24_NIMB 5 + +#endif /* __DT_C8SECTPFE_H */ diff --git a/include/dt-bindings/media/omap3-isp.h b/include/dt-bindings/media/omap3-isp.h new file mode 100644 index 000000000..b18c60e46 --- /dev/null +++ b/include/dt-bindings/media/omap3-isp.h @@ -0,0 +1,22 @@ +/* + * include/dt-bindings/media/omap3-isp.h + * + * Copyright (C) 2015 Sakari Ailus + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef __DT_BINDINGS_OMAP3_ISP_H__ +#define __DT_BINDINGS_OMAP3_ISP_H__ + +#define OMAP3ISP_PHY_TYPE_COMPLEX_IO 0 +#define OMAP3ISP_PHY_TYPE_CSIPHY 1 + +#endif /* __DT_BINDINGS_OMAP3_ISP_H__ */ diff --git a/include/dt-bindings/media/tda1997x.h b/include/dt-bindings/media/tda1997x.h new file mode 100644 index 000000000..bd9fbd718 --- /dev/null +++ b/include/dt-bindings/media/tda1997x.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2017 Gateworks Corporation + */ +#ifndef _DT_BINDINGS_MEDIA_TDA1997X_H +#define _DT_BINDINGS_MEDIA_TDA1997X_H + +/* TDA19973 36bit Video Port control registers */ +#define TDA1997X_VP36_35_32 0 +#define TDA1997X_VP36_31_28 1 +#define TDA1997X_VP36_27_24 2 +#define TDA1997X_VP36_23_20 3 +#define TDA1997X_VP36_19_16 4 +#define TDA1997X_VP36_15_12 5 +#define TDA1997X_VP36_11_08 6 +#define TDA1997X_VP36_07_04 7 +#define TDA1997X_VP36_03_00 8 + +/* TDA19971 24bit Video Port control registers */ +#define TDA1997X_VP24_V23_20 0 +#define TDA1997X_VP24_V19_16 1 +#define TDA1997X_VP24_V15_12 3 +#define TDA1997X_VP24_V11_08 4 +#define TDA1997X_VP24_V07_04 6 +#define TDA1997X_VP24_V03_00 7 + +/* Pin groups */ +#define TDA1997X_VP_OUT_EN 0x80 /* enable output group */ +#define TDA1997X_VP_HIZ 0x40 /* hi-Z output group when not used */ +#define TDA1997X_VP_SWP 0x10 /* pin-swap output group */ +#define TDA1997X_R_CR_CBCR_3_0 (0 | TDA1997X_VP_OUT_EN | TDA1997X_VP_HIZ) +#define TDA1997X_R_CR_CBCR_7_4 (1 | TDA1997X_VP_OUT_EN | TDA1997X_VP_HIZ) +#define TDA1997X_R_CR_CBCR_11_8 (2 | TDA1997X_VP_OUT_EN | TDA1997X_VP_HIZ) +#define TDA1997X_B_CB_3_0 (3 | TDA1997X_VP_OUT_EN | TDA1997X_VP_HIZ) +#define TDA1997X_B_CB_7_4 (4 | TDA1997X_VP_OUT_EN | TDA1997X_VP_HIZ) +#define TDA1997X_B_CB_11_8 (5 | TDA1997X_VP_OUT_EN | TDA1997X_VP_HIZ) +#define TDA1997X_G_Y_3_0 (6 | TDA1997X_VP_OUT_EN | TDA1997X_VP_HIZ) +#define TDA1997X_G_Y_7_4 (7 | TDA1997X_VP_OUT_EN | TDA1997X_VP_HIZ) +#define TDA1997X_G_Y_11_8 (8 | TDA1997X_VP_OUT_EN | TDA1997X_VP_HIZ) +/* pinswapped groups */ +#define TDA1997X_R_CR_CBCR_3_0_S (TDA1997X_R_CR_CBCR_3_0 | TDA1997X_VP_SWAP) +#define TDA1997X_R_CR_CBCR_7_4_S (TDA1997X_R_CR_CBCR_7_4 | TDA1997X_VP_SWAP) +#define TDA1997X_R_CR_CBCR_11_8_S (TDA1997X_R_CR_CBCR_11_8 | TDA1997X_VP_SWAP) +#define TDA1997X_B_CB_3_0_S (TDA1997X_B_CB_3_0 | TDA1997X_VP_SWAP) +#define TDA1997X_B_CB_7_4_S (TDA1997X_B_CB_7_4 | TDA1997X_VP_SWAP) +#define TDA1997X_B_CB_11_8_S (TDA1997X_B_CB_11_8 | TDA1997X_VP_SWAP) +#define TDA1997X_G_Y_3_0_S (TDA1997X_G_Y_3_0 | TDA1997X_VP_SWAP) +#define TDA1997X_G_Y_7_4_S (TDA1997X_G_Y_7_4 | TDA1997X_VP_SWAP) +#define TDA1997X_G_Y_11_8_S (TDA1997X_G_Y_11_8 | TDA1997X_VP_SWAP) + +/* Audio bus DAI format */ +#define TDA1997X_I2S16 1 /* I2S 16bit */ +#define TDA1997X_I2S32 2 /* I2S 32bit */ +#define TDA1997X_SPDIF 3 /* SPDIF */ +#define TDA1997X_OBA 4 /* One Bit Audio */ +#define TDA1997X_DST 5 /* Direct Stream Transfer */ +#define TDA1997X_I2S16_HBR 6 /* HBR straight in I2S 16bit mode */ +#define TDA1997X_I2S16_HBR_DEMUX 7 /* HBR demux in I2S 16bit mode */ +#define TDA1997X_I2S32_HBR_DEMUX 8 /* HBR demux in I2S 32bit mode */ +#define TDA1997X_SPDIF_HBR_DEMUX 9 /* HBR demux in SPDIF mode */ + +/* Audio bus channel layout */ +#define TDA1997X_LAYOUT0 0 /* 2-channel */ +#define TDA1997X_LAYOUT1 1 /* 8-channel */ + +/* Audio bus clock */ +#define TDA1997X_ACLK_16FS 0 +#define TDA1997X_ACLK_32FS 1 +#define TDA1997X_ACLK_64FS 2 +#define TDA1997X_ACLK_128FS 3 +#define TDA1997X_ACLK_256FS 4 +#define TDA1997X_ACLK_512FS 5 + +#endif /* _DT_BINDINGS_MEDIA_TDA1997X_H */ diff --git a/include/dt-bindings/media/tvp5150.h b/include/dt-bindings/media/tvp5150.h new file mode 100644 index 000000000..c852a35e9 --- /dev/null +++ b/include/dt-bindings/media/tvp5150.h @@ -0,0 +1,35 @@ +/* + tvp5150.h - definition for tvp5150 inputs + + Copyright (C) 2006 Hans Verkuil (hverkuil@xs4all.nl) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#ifndef _DT_BINDINGS_MEDIA_TVP5150_H +#define _DT_BINDINGS_MEDIA_TVP5150_H + +/* TVP5150 HW inputs */ +#define TVP5150_COMPOSITE0 0 +#define TVP5150_COMPOSITE1 1 +#define TVP5150_SVIDEO 2 + +#define TVP5150_INPUT_NUM 3 + +/* TVP5150 HW outputs */ +#define TVP5150_NORMAL 0 +#define TVP5150_BLACK_SCREEN 1 + +#endif /* _DT_BINDINGS_MEDIA_TVP5150_H */ diff --git a/include/dt-bindings/media/xilinx-vip.h b/include/dt-bindings/media/xilinx-vip.h new file mode 100644 index 000000000..6298fec00 --- /dev/null +++ b/include/dt-bindings/media/xilinx-vip.h @@ -0,0 +1,39 @@ +/* + * Xilinx Video IP Core + * + * Copyright (C) 2013-2015 Ideas on Board + * Copyright (C) 2013-2015 Xilinx, Inc. + * + * Contacts: Hyun Kwon + * Laurent Pinchart + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __DT_BINDINGS_MEDIA_XILINX_VIP_H__ +#define __DT_BINDINGS_MEDIA_XILINX_VIP_H__ + +/* + * Video format codes as defined in "AXI4-Stream Video IP and System Design + * Guide". + */ +#define XVIP_VF_YUV_422 0 +#define XVIP_VF_YUV_444 1 +#define XVIP_VF_RBG 2 +#define XVIP_VF_YUV_420 3 +#define XVIP_VF_YUVA_422 4 +#define XVIP_VF_YUVA_444 5 +#define XVIP_VF_RGBA 6 +#define XVIP_VF_YUVA_420 7 +#define XVIP_VF_YUVD_422 8 +#define XVIP_VF_YUVD_444 9 +#define XVIP_VF_RGBD 10 +#define XVIP_VF_YUVD_420 11 +#define XVIP_VF_MONO_SENSOR 12 +#define XVIP_VF_CUSTOM2 13 +#define XVIP_VF_CUSTOM3 14 +#define XVIP_VF_CUSTOM4 15 + +#endif /* __DT_BINDINGS_MEDIA_XILINX_VIP_H__ */ diff --git a/include/dt-bindings/memory/mt2701-larb-port.h b/include/dt-bindings/memory/mt2701-larb-port.h new file mode 100644 index 000000000..6764d7447 --- /dev/null +++ b/include/dt-bindings/memory/mt2701-larb-port.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2015 MediaTek Inc. + * Author: Honghui Zhang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _MT2701_LARB_PORT_H_ +#define _MT2701_LARB_PORT_H_ + +/* + * Mediatek m4u generation 1 such as mt2701 has flat m4u port numbers, + * the first port's id for larb[N] would be the last port's id of larb[N - 1] + * plus one while larb[0]'s first port number is 0. The definition of + * MT2701_M4U_ID_LARBx is following HW register spec. + * But m4u generation 2 like mt8173 have different port number, it use fixed + * offset for each larb, the first port's id for larb[N] would be (N * 32). + */ +#define LARB0_PORT_OFFSET 0 +#define LARB1_PORT_OFFSET 11 +#define LARB2_PORT_OFFSET 21 +#define LARB3_PORT_OFFSET 44 + +#define MT2701_M4U_ID_LARB0(port) ((port) + LARB0_PORT_OFFSET) +#define MT2701_M4U_ID_LARB1(port) ((port) + LARB1_PORT_OFFSET) +#define MT2701_M4U_ID_LARB2(port) ((port) + LARB2_PORT_OFFSET) + +/* Port define for larb0 */ +#define MT2701_M4U_PORT_DISP_OVL_0 MT2701_M4U_ID_LARB0(0) +#define MT2701_M4U_PORT_DISP_RDMA1 MT2701_M4U_ID_LARB0(1) +#define MT2701_M4U_PORT_DISP_RDMA MT2701_M4U_ID_LARB0(2) +#define MT2701_M4U_PORT_DISP_WDMA MT2701_M4U_ID_LARB0(3) +#define MT2701_M4U_PORT_MM_CMDQ MT2701_M4U_ID_LARB0(4) +#define MT2701_M4U_PORT_MDP_RDMA MT2701_M4U_ID_LARB0(5) +#define MT2701_M4U_PORT_MDP_WDMA MT2701_M4U_ID_LARB0(6) +#define MT2701_M4U_PORT_MDP_ROTO MT2701_M4U_ID_LARB0(7) +#define MT2701_M4U_PORT_MDP_ROTCO MT2701_M4U_ID_LARB0(8) +#define MT2701_M4U_PORT_MDP_ROTVO MT2701_M4U_ID_LARB0(9) +#define MT2701_M4U_PORT_MDP_RDMA1 MT2701_M4U_ID_LARB0(10) + +/* Port define for larb1 */ +#define MT2701_M4U_PORT_VDEC_MC_EXT MT2701_M4U_ID_LARB1(0) +#define MT2701_M4U_PORT_VDEC_PP_EXT MT2701_M4U_ID_LARB1(1) +#define MT2701_M4U_PORT_VDEC_PPWRAP_EXT MT2701_M4U_ID_LARB1(2) +#define MT2701_M4U_PORT_VDEC_AVC_MV_EXT MT2701_M4U_ID_LARB1(3) +#define MT2701_M4U_PORT_VDEC_PRED_RD_EXT MT2701_M4U_ID_LARB1(4) +#define MT2701_M4U_PORT_VDEC_PRED_WR_EXT MT2701_M4U_ID_LARB1(5) +#define MT2701_M4U_PORT_VDEC_VLD_EXT MT2701_M4U_ID_LARB1(6) +#define MT2701_M4U_PORT_VDEC_VLD2_EXT MT2701_M4U_ID_LARB1(7) +#define MT2701_M4U_PORT_VDEC_TILE_EXT MT2701_M4U_ID_LARB1(8) +#define MT2701_M4U_PORT_VDEC_IMG_RESZ_EXT MT2701_M4U_ID_LARB1(9) + +/* Port define for larb2 */ +#define MT2701_M4U_PORT_VENC_RCPU MT2701_M4U_ID_LARB2(0) +#define MT2701_M4U_PORT_VENC_REC_FRM MT2701_M4U_ID_LARB2(1) +#define MT2701_M4U_PORT_VENC_BSDMA MT2701_M4U_ID_LARB2(2) +#define MT2701_M4U_PORT_JPGENC_RDMA MT2701_M4U_ID_LARB2(3) +#define MT2701_M4U_PORT_VENC_LT_RCPU MT2701_M4U_ID_LARB2(4) +#define MT2701_M4U_PORT_VENC_LT_REC_FRM MT2701_M4U_ID_LARB2(5) +#define MT2701_M4U_PORT_VENC_LT_BSDMA MT2701_M4U_ID_LARB2(6) +#define MT2701_M4U_PORT_JPGDEC_BSDMA MT2701_M4U_ID_LARB2(7) +#define MT2701_M4U_PORT_VENC_SV_COMV MT2701_M4U_ID_LARB2(8) +#define MT2701_M4U_PORT_VENC_RD_COMV MT2701_M4U_ID_LARB2(9) +#define MT2701_M4U_PORT_JPGENC_BSDMA MT2701_M4U_ID_LARB2(10) +#define MT2701_M4U_PORT_VENC_CUR_LUMA MT2701_M4U_ID_LARB2(11) +#define MT2701_M4U_PORT_VENC_CUR_CHROMA MT2701_M4U_ID_LARB2(12) +#define MT2701_M4U_PORT_VENC_REF_LUMA MT2701_M4U_ID_LARB2(13) +#define MT2701_M4U_PORT_VENC_REF_CHROMA MT2701_M4U_ID_LARB2(14) +#define MT2701_M4U_PORT_IMG_RESZ MT2701_M4U_ID_LARB2(15) +#define MT2701_M4U_PORT_VENC_LT_SV_COMV MT2701_M4U_ID_LARB2(16) +#define MT2701_M4U_PORT_VENC_LT_RD_COMV MT2701_M4U_ID_LARB2(17) +#define MT2701_M4U_PORT_VENC_LT_CUR_LUMA MT2701_M4U_ID_LARB2(18) +#define MT2701_M4U_PORT_VENC_LT_CUR_CHROMA MT2701_M4U_ID_LARB2(19) +#define MT2701_M4U_PORT_VENC_LT_REF_LUMA MT2701_M4U_ID_LARB2(20) +#define MT2701_M4U_PORT_VENC_LT_REF_CHROMA MT2701_M4U_ID_LARB2(21) +#define MT2701_M4U_PORT_JPGDEC_WDMA MT2701_M4U_ID_LARB2(22) + +#endif diff --git a/include/dt-bindings/memory/mt2712-larb-port.h b/include/dt-bindings/memory/mt2712-larb-port.h new file mode 100644 index 000000000..6f9aa7349 --- /dev/null +++ b/include/dt-bindings/memory/mt2712-larb-port.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017 MediaTek Inc. + * Author: Yong Wu + */ +#ifndef __DTS_IOMMU_PORT_MT2712_H +#define __DTS_IOMMU_PORT_MT2712_H + +#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port)) + +#define M4U_LARB0_ID 0 +#define M4U_LARB1_ID 1 +#define M4U_LARB2_ID 2 +#define M4U_LARB3_ID 3 +#define M4U_LARB4_ID 4 +#define M4U_LARB5_ID 5 +#define M4U_LARB6_ID 6 +#define M4U_LARB7_ID 7 +#define M4U_LARB8_ID 8 +#define M4U_LARB9_ID 9 + +/* larb0 */ +#define M4U_PORT_DISP_OVL0 MTK_M4U_ID(M4U_LARB0_ID, 0) +#define M4U_PORT_DISP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 1) +#define M4U_PORT_DISP_WDMA0 MTK_M4U_ID(M4U_LARB0_ID, 2) +#define M4U_PORT_DISP_OD_R MTK_M4U_ID(M4U_LARB0_ID, 3) +#define M4U_PORT_DISP_OD_W MTK_M4U_ID(M4U_LARB0_ID, 4) +#define M4U_PORT_MDP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 5) +#define M4U_PORT_MDP_WDMA MTK_M4U_ID(M4U_LARB0_ID, 6) +#define M4U_PORT_DISP_RDMA2 MTK_M4U_ID(M4U_LARB0_ID, 7) + +/* larb1 */ +#define M4U_PORT_HW_VDEC_MC_EXT MTK_M4U_ID(M4U_LARB1_ID, 0) +#define M4U_PORT_HW_VDEC_PP_EXT MTK_M4U_ID(M4U_LARB1_ID, 1) +#define M4U_PORT_HW_VDEC_UFO_EXT MTK_M4U_ID(M4U_LARB1_ID, 2) +#define M4U_PORT_HW_VDEC_VLD_EXT MTK_M4U_ID(M4U_LARB1_ID, 3) +#define M4U_PORT_HW_VDEC_VLD2_EXT MTK_M4U_ID(M4U_LARB1_ID, 4) +#define M4U_PORT_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(M4U_LARB1_ID, 5) +#define M4U_PORT_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(M4U_LARB1_ID, 6) +#define M4U_PORT_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(M4U_LARB1_ID, 7) +#define M4U_PORT_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(M4U_LARB1_ID, 8) +#define M4U_PORT_HW_VDEC_TILE MTK_M4U_ID(M4U_LARB1_ID, 9) +#define M4U_PORT_HW_IMG_RESZ_EXT MTK_M4U_ID(M4U_LARB1_ID, 10) + +/* larb2 */ +#define M4U_PORT_CAM_DMA0 MTK_M4U_ID(M4U_LARB2_ID, 0) +#define M4U_PORT_CAM_DMA1 MTK_M4U_ID(M4U_LARB2_ID, 1) +#define M4U_PORT_CAM_DMA2 MTK_M4U_ID(M4U_LARB2_ID, 2) + +/* larb3 */ +#define M4U_PORT_VENC_RCPU MTK_M4U_ID(M4U_LARB3_ID, 0) +#define M4U_PORT_VENC_REC MTK_M4U_ID(M4U_LARB3_ID, 1) +#define M4U_PORT_VENC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 2) +#define M4U_PORT_VENC_SV_COMV MTK_M4U_ID(M4U_LARB3_ID, 3) +#define M4U_PORT_VENC_RD_COMV MTK_M4U_ID(M4U_LARB3_ID, 4) +#define M4U_PORT_VENC_CUR_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 5) +#define M4U_PORT_VENC_REF_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 6) +#define M4U_PORT_VENC_CUR_LUMA MTK_M4U_ID(M4U_LARB3_ID, 7) +#define M4U_PORT_VENC_REF_LUMA MTK_M4U_ID(M4U_LARB3_ID, 8) + +/* larb4 */ +#define M4U_PORT_DISP_OVL1 MTK_M4U_ID(M4U_LARB4_ID, 0) +#define M4U_PORT_DISP_RDMA1 MTK_M4U_ID(M4U_LARB4_ID, 1) +#define M4U_PORT_DISP_WDMA1 MTK_M4U_ID(M4U_LARB4_ID, 2) +#define M4U_PORT_DISP_OD1_R MTK_M4U_ID(M4U_LARB4_ID, 3) +#define M4U_PORT_DISP_OD1_W MTK_M4U_ID(M4U_LARB4_ID, 4) +#define M4U_PORT_MDP_RDMA1 MTK_M4U_ID(M4U_LARB4_ID, 5) +#define M4U_PORT_MDP_WROT1 MTK_M4U_ID(M4U_LARB4_ID, 6) + +/* larb5 */ +#define M4U_PORT_DISP_OVL2 MTK_M4U_ID(M4U_LARB5_ID, 0) +#define M4U_PORT_DISP_WDMA2 MTK_M4U_ID(M4U_LARB5_ID, 1) +#define M4U_PORT_MDP_RDMA2 MTK_M4U_ID(M4U_LARB5_ID, 2) +#define M4U_PORT_MDP_WROT0 MTK_M4U_ID(M4U_LARB5_ID, 3) + +/* larb6 */ +#define M4U_PORT_JPGDEC_WDMA_0 MTK_M4U_ID(M4U_LARB6_ID, 0) +#define M4U_PORT_JPGDEC_WDMA_1 MTK_M4U_ID(M4U_LARB6_ID, 1) +#define M4U_PORT_JPGDEC_BSDMA_0 MTK_M4U_ID(M4U_LARB6_ID, 2) +#define M4U_PORT_JPGDEC_BSDMA_1 MTK_M4U_ID(M4U_LARB6_ID, 3) + +/* larb7 */ +#define M4U_PORT_MDP_RDMA3 MTK_M4U_ID(M4U_LARB7_ID, 0) +#define M4U_PORT_MDP_WROT2 MTK_M4U_ID(M4U_LARB7_ID, 1) + +/* larb8 */ +#define M4U_PORT_VDO MTK_M4U_ID(M4U_LARB8_ID, 0) +#define M4U_PORT_NR MTK_M4U_ID(M4U_LARB8_ID, 1) +#define M4U_PORT_WR_CHANNEL0 MTK_M4U_ID(M4U_LARB8_ID, 2) + +/* larb9 */ +#define M4U_PORT_TVD MTK_M4U_ID(M4U_LARB9_ID, 0) +#define M4U_PORT_WR_CHANNEL1 MTK_M4U_ID(M4U_LARB9_ID, 1) + +#endif diff --git a/include/dt-bindings/memory/mt8173-larb-port.h b/include/dt-bindings/memory/mt8173-larb-port.h new file mode 100644 index 000000000..111b4b0ec --- /dev/null +++ b/include/dt-bindings/memory/mt8173-larb-port.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2015-2016 MediaTek Inc. + * Author: Yong Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __DTS_IOMMU_PORT_MT8173_H +#define __DTS_IOMMU_PORT_MT8173_H + +#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port)) + +#define M4U_LARB0_ID 0 +#define M4U_LARB1_ID 1 +#define M4U_LARB2_ID 2 +#define M4U_LARB3_ID 3 +#define M4U_LARB4_ID 4 +#define M4U_LARB5_ID 5 + +/* larb0 */ +#define M4U_PORT_DISP_OVL0 MTK_M4U_ID(M4U_LARB0_ID, 0) +#define M4U_PORT_DISP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 1) +#define M4U_PORT_DISP_WDMA0 MTK_M4U_ID(M4U_LARB0_ID, 2) +#define M4U_PORT_DISP_OD_R MTK_M4U_ID(M4U_LARB0_ID, 3) +#define M4U_PORT_DISP_OD_W MTK_M4U_ID(M4U_LARB0_ID, 4) +#define M4U_PORT_MDP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 5) +#define M4U_PORT_MDP_WDMA MTK_M4U_ID(M4U_LARB0_ID, 6) +#define M4U_PORT_MDP_WROT0 MTK_M4U_ID(M4U_LARB0_ID, 7) + +/* larb1 */ +#define M4U_PORT_HW_VDEC_MC_EXT MTK_M4U_ID(M4U_LARB1_ID, 0) +#define M4U_PORT_HW_VDEC_PP_EXT MTK_M4U_ID(M4U_LARB1_ID, 1) +#define M4U_PORT_HW_VDEC_UFO_EXT MTK_M4U_ID(M4U_LARB1_ID, 2) +#define M4U_PORT_HW_VDEC_VLD_EXT MTK_M4U_ID(M4U_LARB1_ID, 3) +#define M4U_PORT_HW_VDEC_VLD2_EXT MTK_M4U_ID(M4U_LARB1_ID, 4) +#define M4U_PORT_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(M4U_LARB1_ID, 5) +#define M4U_PORT_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(M4U_LARB1_ID, 6) +#define M4U_PORT_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(M4U_LARB1_ID, 7) +#define M4U_PORT_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(M4U_LARB1_ID, 8) +#define M4U_PORT_HW_VDEC_TILE MTK_M4U_ID(M4U_LARB1_ID, 9) + +/* larb2 */ +#define M4U_PORT_IMGO MTK_M4U_ID(M4U_LARB2_ID, 0) +#define M4U_PORT_RRZO MTK_M4U_ID(M4U_LARB2_ID, 1) +#define M4U_PORT_AAO MTK_M4U_ID(M4U_LARB2_ID, 2) +#define M4U_PORT_LCSO MTK_M4U_ID(M4U_LARB2_ID, 3) +#define M4U_PORT_ESFKO MTK_M4U_ID(M4U_LARB2_ID, 4) +#define M4U_PORT_IMGO_D MTK_M4U_ID(M4U_LARB2_ID, 5) +#define M4U_PORT_LSCI MTK_M4U_ID(M4U_LARB2_ID, 6) +#define M4U_PORT_LSCI_D MTK_M4U_ID(M4U_LARB2_ID, 7) +#define M4U_PORT_BPCI MTK_M4U_ID(M4U_LARB2_ID, 8) +#define M4U_PORT_BPCI_D MTK_M4U_ID(M4U_LARB2_ID, 9) +#define M4U_PORT_UFDI MTK_M4U_ID(M4U_LARB2_ID, 10) +#define M4U_PORT_IMGI MTK_M4U_ID(M4U_LARB2_ID, 11) +#define M4U_PORT_IMG2O MTK_M4U_ID(M4U_LARB2_ID, 12) +#define M4U_PORT_IMG3O MTK_M4U_ID(M4U_LARB2_ID, 13) +#define M4U_PORT_VIPI MTK_M4U_ID(M4U_LARB2_ID, 14) +#define M4U_PORT_VIP2I MTK_M4U_ID(M4U_LARB2_ID, 15) +#define M4U_PORT_VIP3I MTK_M4U_ID(M4U_LARB2_ID, 16) +#define M4U_PORT_LCEI MTK_M4U_ID(M4U_LARB2_ID, 17) +#define M4U_PORT_RB MTK_M4U_ID(M4U_LARB2_ID, 18) +#define M4U_PORT_RP MTK_M4U_ID(M4U_LARB2_ID, 19) +#define M4U_PORT_WR MTK_M4U_ID(M4U_LARB2_ID, 20) + +/* larb3 */ +#define M4U_PORT_VENC_RCPU MTK_M4U_ID(M4U_LARB3_ID, 0) +#define M4U_PORT_VENC_REC MTK_M4U_ID(M4U_LARB3_ID, 1) +#define M4U_PORT_VENC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 2) +#define M4U_PORT_VENC_SV_COMV MTK_M4U_ID(M4U_LARB3_ID, 3) +#define M4U_PORT_VENC_RD_COMV MTK_M4U_ID(M4U_LARB3_ID, 4) +#define M4U_PORT_JPGENC_RDMA MTK_M4U_ID(M4U_LARB3_ID, 5) +#define M4U_PORT_JPGENC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 6) +#define M4U_PORT_JPGDEC_WDMA MTK_M4U_ID(M4U_LARB3_ID, 7) +#define M4U_PORT_JPGDEC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 8) +#define M4U_PORT_VENC_CUR_LUMA MTK_M4U_ID(M4U_LARB3_ID, 9) +#define M4U_PORT_VENC_CUR_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 10) +#define M4U_PORT_VENC_REF_LUMA MTK_M4U_ID(M4U_LARB3_ID, 11) +#define M4U_PORT_VENC_REF_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 12) +#define M4U_PORT_VENC_NBM_RDMA MTK_M4U_ID(M4U_LARB3_ID, 13) +#define M4U_PORT_VENC_NBM_WDMA MTK_M4U_ID(M4U_LARB3_ID, 14) + +/* larb4 */ +#define M4U_PORT_DISP_OVL1 MTK_M4U_ID(M4U_LARB4_ID, 0) +#define M4U_PORT_DISP_RDMA1 MTK_M4U_ID(M4U_LARB4_ID, 1) +#define M4U_PORT_DISP_RDMA2 MTK_M4U_ID(M4U_LARB4_ID, 2) +#define M4U_PORT_DISP_WDMA1 MTK_M4U_ID(M4U_LARB4_ID, 3) +#define M4U_PORT_MDP_RDMA1 MTK_M4U_ID(M4U_LARB4_ID, 4) +#define M4U_PORT_MDP_WROT1 MTK_M4U_ID(M4U_LARB4_ID, 5) + +/* larb5 */ +#define M4U_PORT_VENC_RCPU_SET2 MTK_M4U_ID(M4U_LARB5_ID, 0) +#define M4U_PORT_VENC_REC_FRM_SET2 MTK_M4U_ID(M4U_LARB5_ID, 1) +#define M4U_PORT_VENC_REF_LUMA_SET2 MTK_M4U_ID(M4U_LARB5_ID, 2) +#define M4U_PORT_VENC_REC_CHROMA_SET2 MTK_M4U_ID(M4U_LARB5_ID, 3) +#define M4U_PORT_VENC_BSDMA_SET2 MTK_M4U_ID(M4U_LARB5_ID, 4) +#define M4U_PORT_VENC_CUR_LUMA_SET2 MTK_M4U_ID(M4U_LARB5_ID, 5) +#define M4U_PORT_VENC_CUR_CHROMA_SET2 MTK_M4U_ID(M4U_LARB5_ID, 6) +#define M4U_PORT_VENC_RD_COMA_SET2 MTK_M4U_ID(M4U_LARB5_ID, 7) +#define M4U_PORT_VENC_SV_COMA_SET2 MTK_M4U_ID(M4U_LARB5_ID, 8) + +#endif diff --git a/include/dt-bindings/memory/tegra114-mc.h b/include/dt-bindings/memory/tegra114-mc.h new file mode 100644 index 000000000..dfe99c8a5 --- /dev/null +++ b/include/dt-bindings/memory/tegra114-mc.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef DT_BINDINGS_MEMORY_TEGRA114_MC_H +#define DT_BINDINGS_MEMORY_TEGRA114_MC_H + +#define TEGRA_SWGROUP_PTC 0 +#define TEGRA_SWGROUP_DC 1 +#define TEGRA_SWGROUP_DCB 2 +#define TEGRA_SWGROUP_EPP 3 +#define TEGRA_SWGROUP_G2 4 +#define TEGRA_SWGROUP_AVPC 5 +#define TEGRA_SWGROUP_NV 6 +#define TEGRA_SWGROUP_HDA 7 +#define TEGRA_SWGROUP_HC 8 +#define TEGRA_SWGROUP_MSENC 9 +#define TEGRA_SWGROUP_PPCS 10 +#define TEGRA_SWGROUP_VDE 11 +#define TEGRA_SWGROUP_MPCORELP 12 +#define TEGRA_SWGROUP_MPCORE 13 +#define TEGRA_SWGROUP_VI 14 +#define TEGRA_SWGROUP_ISP 15 +#define TEGRA_SWGROUP_XUSB_HOST 16 +#define TEGRA_SWGROUP_XUSB_DEV 17 +#define TEGRA_SWGROUP_EMUCIF 18 +#define TEGRA_SWGROUP_TSEC 19 + +#define TEGRA114_MC_RESET_AVPC 0 +#define TEGRA114_MC_RESET_DC 1 +#define TEGRA114_MC_RESET_DCB 2 +#define TEGRA114_MC_RESET_EPP 3 +#define TEGRA114_MC_RESET_2D 4 +#define TEGRA114_MC_RESET_HC 5 +#define TEGRA114_MC_RESET_HDA 6 +#define TEGRA114_MC_RESET_ISP 7 +#define TEGRA114_MC_RESET_MPCORE 8 +#define TEGRA114_MC_RESET_MPCORELP 9 +#define TEGRA114_MC_RESET_MPE 10 +#define TEGRA114_MC_RESET_3D 11 +#define TEGRA114_MC_RESET_3D2 12 +#define TEGRA114_MC_RESET_PPCS 13 +#define TEGRA114_MC_RESET_VDE 14 +#define TEGRA114_MC_RESET_VI 15 + +#endif diff --git a/include/dt-bindings/memory/tegra124-mc.h b/include/dt-bindings/memory/tegra124-mc.h new file mode 100644 index 000000000..186e6b7e9 --- /dev/null +++ b/include/dt-bindings/memory/tegra124-mc.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef DT_BINDINGS_MEMORY_TEGRA124_MC_H +#define DT_BINDINGS_MEMORY_TEGRA124_MC_H + +#define TEGRA_SWGROUP_PTC 0 +#define TEGRA_SWGROUP_DC 1 +#define TEGRA_SWGROUP_DCB 2 +#define TEGRA_SWGROUP_AFI 3 +#define TEGRA_SWGROUP_AVPC 4 +#define TEGRA_SWGROUP_HDA 5 +#define TEGRA_SWGROUP_HC 6 +#define TEGRA_SWGROUP_MSENC 7 +#define TEGRA_SWGROUP_PPCS 8 +#define TEGRA_SWGROUP_SATA 9 +#define TEGRA_SWGROUP_VDE 10 +#define TEGRA_SWGROUP_MPCORELP 11 +#define TEGRA_SWGROUP_MPCORE 12 +#define TEGRA_SWGROUP_ISP2 13 +#define TEGRA_SWGROUP_XUSB_HOST 14 +#define TEGRA_SWGROUP_XUSB_DEV 15 +#define TEGRA_SWGROUP_ISP2B 16 +#define TEGRA_SWGROUP_TSEC 17 +#define TEGRA_SWGROUP_A9AVP 18 +#define TEGRA_SWGROUP_GPU 19 +#define TEGRA_SWGROUP_SDMMC1A 20 +#define TEGRA_SWGROUP_SDMMC2A 21 +#define TEGRA_SWGROUP_SDMMC3A 22 +#define TEGRA_SWGROUP_SDMMC4A 23 +#define TEGRA_SWGROUP_VIC 24 +#define TEGRA_SWGROUP_VI 25 + +#define TEGRA124_MC_RESET_AFI 0 +#define TEGRA124_MC_RESET_AVPC 1 +#define TEGRA124_MC_RESET_DC 2 +#define TEGRA124_MC_RESET_DCB 3 +#define TEGRA124_MC_RESET_HC 4 +#define TEGRA124_MC_RESET_HDA 5 +#define TEGRA124_MC_RESET_ISP2 6 +#define TEGRA124_MC_RESET_MPCORE 7 +#define TEGRA124_MC_RESET_MPCORELP 8 +#define TEGRA124_MC_RESET_MSENC 9 +#define TEGRA124_MC_RESET_PPCS 10 +#define TEGRA124_MC_RESET_SATA 11 +#define TEGRA124_MC_RESET_VDE 12 +#define TEGRA124_MC_RESET_VI 13 +#define TEGRA124_MC_RESET_VIC 14 +#define TEGRA124_MC_RESET_XUSB_HOST 15 +#define TEGRA124_MC_RESET_XUSB_DEV 16 +#define TEGRA124_MC_RESET_TSEC 17 +#define TEGRA124_MC_RESET_SDMMC1 18 +#define TEGRA124_MC_RESET_SDMMC2 19 +#define TEGRA124_MC_RESET_SDMMC3 20 +#define TEGRA124_MC_RESET_SDMMC4 21 +#define TEGRA124_MC_RESET_ISP2B 22 +#define TEGRA124_MC_RESET_GPU 23 + +#endif diff --git a/include/dt-bindings/memory/tegra186-mc.h b/include/dt-bindings/memory/tegra186-mc.h new file mode 100644 index 000000000..64813536a --- /dev/null +++ b/include/dt-bindings/memory/tegra186-mc.h @@ -0,0 +1,111 @@ +#ifndef DT_BINDINGS_MEMORY_TEGRA186_MC_H +#define DT_BINDINGS_MEMORY_TEGRA186_MC_H + +/* special clients */ +#define TEGRA186_SID_INVALID 0x00 +#define TEGRA186_SID_PASSTHROUGH 0x7f + +/* host1x clients */ +#define TEGRA186_SID_HOST1X 0x01 +#define TEGRA186_SID_CSI 0x02 +#define TEGRA186_SID_VIC 0x03 +#define TEGRA186_SID_VI 0x04 +#define TEGRA186_SID_ISP 0x05 +#define TEGRA186_SID_NVDEC 0x06 +#define TEGRA186_SID_NVENC 0x07 +#define TEGRA186_SID_NVJPG 0x08 +#define TEGRA186_SID_NVDISPLAY 0x09 +#define TEGRA186_SID_TSEC 0x0a +#define TEGRA186_SID_TSECB 0x0b +#define TEGRA186_SID_SE 0x0c +#define TEGRA186_SID_SE1 0x0d +#define TEGRA186_SID_SE2 0x0e +#define TEGRA186_SID_SE3 0x0f + +/* GPU clients */ +#define TEGRA186_SID_GPU 0x10 + +/* other SoC clients */ +#define TEGRA186_SID_AFI 0x11 +#define TEGRA186_SID_HDA 0x12 +#define TEGRA186_SID_ETR 0x13 +#define TEGRA186_SID_EQOS 0x14 +#define TEGRA186_SID_UFSHC 0x15 +#define TEGRA186_SID_AON 0x16 +#define TEGRA186_SID_SDMMC4 0x17 +#define TEGRA186_SID_SDMMC3 0x18 +#define TEGRA186_SID_SDMMC2 0x19 +#define TEGRA186_SID_SDMMC1 0x1a +#define TEGRA186_SID_XUSB_HOST 0x1b +#define TEGRA186_SID_XUSB_DEV 0x1c +#define TEGRA186_SID_SATA 0x1d +#define TEGRA186_SID_APE 0x1e +#define TEGRA186_SID_SCE 0x1f + +/* GPC DMA clients */ +#define TEGRA186_SID_GPCDMA_0 0x20 +#define TEGRA186_SID_GPCDMA_1 0x21 +#define TEGRA186_SID_GPCDMA_2 0x22 +#define TEGRA186_SID_GPCDMA_3 0x23 +#define TEGRA186_SID_GPCDMA_4 0x24 +#define TEGRA186_SID_GPCDMA_5 0x25 +#define TEGRA186_SID_GPCDMA_6 0x26 +#define TEGRA186_SID_GPCDMA_7 0x27 + +/* APE DMA clients */ +#define TEGRA186_SID_APE_1 0x28 +#define TEGRA186_SID_APE_2 0x29 + +/* camera RTCPU */ +#define TEGRA186_SID_RCE 0x2a + +/* camera RTCPU on host1x address space */ +#define TEGRA186_SID_RCE_1X 0x2b + +/* APE DMA clients */ +#define TEGRA186_SID_APE_3 0x2c + +/* camera RTCPU running on APE */ +#define TEGRA186_SID_APE_CAM 0x2d +#define TEGRA186_SID_APE_CAM_1X 0x2e + +/* + * The BPMP has its SID value hardcoded in the firmware. Changing it requires + * considerable effort. + */ +#define TEGRA186_SID_BPMP 0x32 + +/* for SMMU tests */ +#define TEGRA186_SID_SMMU_TEST 0x33 + +/* host1x virtualization channels */ +#define TEGRA186_SID_HOST1X_CTX0 0x38 +#define TEGRA186_SID_HOST1X_CTX1 0x39 +#define TEGRA186_SID_HOST1X_CTX2 0x3a +#define TEGRA186_SID_HOST1X_CTX3 0x3b +#define TEGRA186_SID_HOST1X_CTX4 0x3c +#define TEGRA186_SID_HOST1X_CTX5 0x3d +#define TEGRA186_SID_HOST1X_CTX6 0x3e +#define TEGRA186_SID_HOST1X_CTX7 0x3f + +/* host1x command buffers */ +#define TEGRA186_SID_HOST1X_VM0 0x40 +#define TEGRA186_SID_HOST1X_VM1 0x41 +#define TEGRA186_SID_HOST1X_VM2 0x42 +#define TEGRA186_SID_HOST1X_VM3 0x43 +#define TEGRA186_SID_HOST1X_VM4 0x44 +#define TEGRA186_SID_HOST1X_VM5 0x45 +#define TEGRA186_SID_HOST1X_VM6 0x46 +#define TEGRA186_SID_HOST1X_VM7 0x47 + +/* SE data buffers */ +#define TEGRA186_SID_SE_VM0 0x48 +#define TEGRA186_SID_SE_VM1 0x49 +#define TEGRA186_SID_SE_VM2 0x4a +#define TEGRA186_SID_SE_VM3 0x4b +#define TEGRA186_SID_SE_VM4 0x4c +#define TEGRA186_SID_SE_VM5 0x4d +#define TEGRA186_SID_SE_VM6 0x4e +#define TEGRA186_SID_SE_VM7 0x4f + +#endif diff --git a/include/dt-bindings/memory/tegra20-mc.h b/include/dt-bindings/memory/tegra20-mc.h new file mode 100644 index 000000000..35e131eee --- /dev/null +++ b/include/dt-bindings/memory/tegra20-mc.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef DT_BINDINGS_MEMORY_TEGRA20_MC_H +#define DT_BINDINGS_MEMORY_TEGRA20_MC_H + +#define TEGRA20_MC_RESET_AVPC 0 +#define TEGRA20_MC_RESET_DC 1 +#define TEGRA20_MC_RESET_DCB 2 +#define TEGRA20_MC_RESET_EPP 3 +#define TEGRA20_MC_RESET_2D 4 +#define TEGRA20_MC_RESET_HC 5 +#define TEGRA20_MC_RESET_ISP 6 +#define TEGRA20_MC_RESET_MPCORE 7 +#define TEGRA20_MC_RESET_MPEA 8 +#define TEGRA20_MC_RESET_MPEB 9 +#define TEGRA20_MC_RESET_MPEC 10 +#define TEGRA20_MC_RESET_3D 11 +#define TEGRA20_MC_RESET_PPCS 12 +#define TEGRA20_MC_RESET_VDE 13 +#define TEGRA20_MC_RESET_VI 14 + +#endif diff --git a/include/dt-bindings/memory/tegra210-mc.h b/include/dt-bindings/memory/tegra210-mc.h new file mode 100644 index 000000000..cacf05617 --- /dev/null +++ b/include/dt-bindings/memory/tegra210-mc.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef DT_BINDINGS_MEMORY_TEGRA210_MC_H +#define DT_BINDINGS_MEMORY_TEGRA210_MC_H + +#define TEGRA_SWGROUP_PTC 0 +#define TEGRA_SWGROUP_DC 1 +#define TEGRA_SWGROUP_DCB 2 +#define TEGRA_SWGROUP_AFI 3 +#define TEGRA_SWGROUP_AVPC 4 +#define TEGRA_SWGROUP_HDA 5 +#define TEGRA_SWGROUP_HC 6 +#define TEGRA_SWGROUP_NVENC 7 +#define TEGRA_SWGROUP_PPCS 8 +#define TEGRA_SWGROUP_SATA 9 +#define TEGRA_SWGROUP_MPCORE 10 +#define TEGRA_SWGROUP_ISP2 11 +#define TEGRA_SWGROUP_XUSB_HOST 12 +#define TEGRA_SWGROUP_XUSB_DEV 13 +#define TEGRA_SWGROUP_ISP2B 14 +#define TEGRA_SWGROUP_TSEC 15 +#define TEGRA_SWGROUP_A9AVP 16 +#define TEGRA_SWGROUP_GPU 17 +#define TEGRA_SWGROUP_SDMMC1A 18 +#define TEGRA_SWGROUP_SDMMC2A 19 +#define TEGRA_SWGROUP_SDMMC3A 20 +#define TEGRA_SWGROUP_SDMMC4A 21 +#define TEGRA_SWGROUP_VIC 22 +#define TEGRA_SWGROUP_VI 23 +#define TEGRA_SWGROUP_NVDEC 24 +#define TEGRA_SWGROUP_APE 25 +#define TEGRA_SWGROUP_NVJPG 26 +#define TEGRA_SWGROUP_SE 27 +#define TEGRA_SWGROUP_AXIAP 28 +#define TEGRA_SWGROUP_ETR 29 +#define TEGRA_SWGROUP_TSECB 30 + +#define TEGRA210_MC_RESET_AFI 0 +#define TEGRA210_MC_RESET_AVPC 1 +#define TEGRA210_MC_RESET_DC 2 +#define TEGRA210_MC_RESET_DCB 3 +#define TEGRA210_MC_RESET_HC 4 +#define TEGRA210_MC_RESET_HDA 5 +#define TEGRA210_MC_RESET_ISP2 6 +#define TEGRA210_MC_RESET_MPCORE 7 +#define TEGRA210_MC_RESET_NVENC 8 +#define TEGRA210_MC_RESET_PPCS 9 +#define TEGRA210_MC_RESET_SATA 10 +#define TEGRA210_MC_RESET_VI 11 +#define TEGRA210_MC_RESET_VIC 12 +#define TEGRA210_MC_RESET_XUSB_HOST 13 +#define TEGRA210_MC_RESET_XUSB_DEV 14 +#define TEGRA210_MC_RESET_A9AVP 15 +#define TEGRA210_MC_RESET_TSEC 16 +#define TEGRA210_MC_RESET_SDMMC1 17 +#define TEGRA210_MC_RESET_SDMMC2 18 +#define TEGRA210_MC_RESET_SDMMC3 19 +#define TEGRA210_MC_RESET_SDMMC4 20 +#define TEGRA210_MC_RESET_ISP2B 21 +#define TEGRA210_MC_RESET_GPU 22 +#define TEGRA210_MC_RESET_NVDEC 23 +#define TEGRA210_MC_RESET_APE 24 +#define TEGRA210_MC_RESET_SE 25 +#define TEGRA210_MC_RESET_NVJPG 26 +#define TEGRA210_MC_RESET_AXIAP 27 +#define TEGRA210_MC_RESET_ETR 28 +#define TEGRA210_MC_RESET_TSECB 29 + +#endif diff --git a/include/dt-bindings/memory/tegra30-mc.h b/include/dt-bindings/memory/tegra30-mc.h new file mode 100644 index 000000000..169f005fb --- /dev/null +++ b/include/dt-bindings/memory/tegra30-mc.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef DT_BINDINGS_MEMORY_TEGRA30_MC_H +#define DT_BINDINGS_MEMORY_TEGRA30_MC_H + +#define TEGRA_SWGROUP_PTC 0 +#define TEGRA_SWGROUP_DC 1 +#define TEGRA_SWGROUP_DCB 2 +#define TEGRA_SWGROUP_EPP 3 +#define TEGRA_SWGROUP_G2 4 +#define TEGRA_SWGROUP_MPE 5 +#define TEGRA_SWGROUP_VI 6 +#define TEGRA_SWGROUP_AFI 7 +#define TEGRA_SWGROUP_AVPC 8 +#define TEGRA_SWGROUP_NV 9 +#define TEGRA_SWGROUP_NV2 10 +#define TEGRA_SWGROUP_HDA 11 +#define TEGRA_SWGROUP_HC 12 +#define TEGRA_SWGROUP_PPCS 13 +#define TEGRA_SWGROUP_SATA 14 +#define TEGRA_SWGROUP_VDE 15 +#define TEGRA_SWGROUP_MPCORELP 16 +#define TEGRA_SWGROUP_MPCORE 17 +#define TEGRA_SWGROUP_ISP 18 + +#define TEGRA30_MC_RESET_AFI 0 +#define TEGRA30_MC_RESET_AVPC 1 +#define TEGRA30_MC_RESET_DC 2 +#define TEGRA30_MC_RESET_DCB 3 +#define TEGRA30_MC_RESET_EPP 4 +#define TEGRA30_MC_RESET_2D 5 +#define TEGRA30_MC_RESET_HC 6 +#define TEGRA30_MC_RESET_HDA 7 +#define TEGRA30_MC_RESET_ISP 8 +#define TEGRA30_MC_RESET_MPCORE 9 +#define TEGRA30_MC_RESET_MPCORELP 10 +#define TEGRA30_MC_RESET_MPE 11 +#define TEGRA30_MC_RESET_3D 12 +#define TEGRA30_MC_RESET_3D2 13 +#define TEGRA30_MC_RESET_PPCS 14 +#define TEGRA30_MC_RESET_SATA 15 +#define TEGRA30_MC_RESET_VDE 16 +#define TEGRA30_MC_RESET_VI 17 + +#endif diff --git a/include/dt-bindings/mfd/arizona.h b/include/dt-bindings/mfd/arizona.h new file mode 100644 index 000000000..dedf46ffd --- /dev/null +++ b/include/dt-bindings/mfd/arizona.h @@ -0,0 +1,118 @@ +/* + * Device Tree defines for Arizona devices + * + * Copyright 2015 Cirrus Logic Inc. + * + * Author: Charles Keepax + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _DT_BINDINGS_MFD_ARIZONA_H +#define _DT_BINDINGS_MFD_ARIZONA_H + +/* GPIO Function Definitions */ +#define ARIZONA_GP_FN_TXLRCLK 0x00 +#define ARIZONA_GP_FN_GPIO 0x01 +#define ARIZONA_GP_FN_IRQ1 0x02 +#define ARIZONA_GP_FN_IRQ2 0x03 +#define ARIZONA_GP_FN_OPCLK 0x04 +#define ARIZONA_GP_FN_FLL1_OUT 0x05 +#define ARIZONA_GP_FN_FLL2_OUT 0x06 +#define ARIZONA_GP_FN_PWM1 0x08 +#define ARIZONA_GP_FN_PWM2 0x09 +#define ARIZONA_GP_FN_SYSCLK_UNDERCLOCKED 0x0A +#define ARIZONA_GP_FN_ASYNCCLK_UNDERCLOCKED 0x0B +#define ARIZONA_GP_FN_FLL1_LOCK 0x0C +#define ARIZONA_GP_FN_FLL2_LOCK 0x0D +#define ARIZONA_GP_FN_FLL1_CLOCK_OK 0x0F +#define ARIZONA_GP_FN_FLL2_CLOCK_OK 0x10 +#define ARIZONA_GP_FN_HEADPHONE_DET 0x12 +#define ARIZONA_GP_FN_MIC_DET 0x13 +#define ARIZONA_GP_FN_WSEQ_STATUS 0x15 +#define ARIZONA_GP_FN_CIF_ADDRESS_ERROR 0x16 +#define ARIZONA_GP_FN_ASRC1_LOCK 0x1A +#define ARIZONA_GP_FN_ASRC2_LOCK 0x1B +#define ARIZONA_GP_FN_ASRC_CONFIG_ERROR 0x1C +#define ARIZONA_GP_FN_DRC1_SIGNAL_DETECT 0x1D +#define ARIZONA_GP_FN_DRC1_ANTICLIP 0x1E +#define ARIZONA_GP_FN_DRC1_DECAY 0x1F +#define ARIZONA_GP_FN_DRC1_NOISE 0x20 +#define ARIZONA_GP_FN_DRC1_QUICK_RELEASE 0x21 +#define ARIZONA_GP_FN_DRC2_SIGNAL_DETECT 0x22 +#define ARIZONA_GP_FN_DRC2_ANTICLIP 0x23 +#define ARIZONA_GP_FN_DRC2_DECAY 0x24 +#define ARIZONA_GP_FN_DRC2_NOISE 0x25 +#define ARIZONA_GP_FN_DRC2_QUICK_RELEASE 0x26 +#define ARIZONA_GP_FN_MIXER_DROPPED_SAMPLE 0x27 +#define ARIZONA_GP_FN_AIF1_CONFIG_ERROR 0x28 +#define ARIZONA_GP_FN_AIF2_CONFIG_ERROR 0x29 +#define ARIZONA_GP_FN_AIF3_CONFIG_ERROR 0x2A +#define ARIZONA_GP_FN_SPK_TEMP_SHUTDOWN 0x2B +#define ARIZONA_GP_FN_SPK_TEMP_WARNING 0x2C +#define ARIZONA_GP_FN_UNDERCLOCKED 0x2D +#define ARIZONA_GP_FN_OVERCLOCKED 0x2E +#define ARIZONA_GP_FN_DSP_IRQ1 0x35 +#define ARIZONA_GP_FN_DSP_IRQ2 0x36 +#define ARIZONA_GP_FN_ASYNC_OPCLK 0x3D +#define ARIZONA_GP_FN_BOOT_DONE 0x44 +#define ARIZONA_GP_FN_DSP1_RAM_READY 0x45 +#define ARIZONA_GP_FN_SYSCLK_ENA_STATUS 0x4B +#define ARIZONA_GP_FN_ASYNCCLK_ENA_STATUS 0x4C + +/* GPIO Configuration Bits */ +#define ARIZONA_GPN_DIR 0x8000 +#define ARIZONA_GPN_PU 0x4000 +#define ARIZONA_GPN_PD 0x2000 +#define ARIZONA_GPN_LVL 0x0800 +#define ARIZONA_GPN_POL 0x0400 +#define ARIZONA_GPN_OP_CFG 0x0200 +#define ARIZONA_GPN_DB 0x0100 + +/* Provide some defines for the most common configs */ +#define ARIZONA_GP_DEFAULT 0xffffffff +#define ARIZONA_GP_OUTPUT (ARIZONA_GP_FN_GPIO) +#define ARIZONA_GP_INPUT (ARIZONA_GP_FN_GPIO | \ + ARIZONA_GPN_DIR) + +#define ARIZONA_32KZ_MCLK1 1 +#define ARIZONA_32KZ_MCLK2 2 +#define ARIZONA_32KZ_NONE 3 + +#define ARIZONA_DMIC_MICVDD 0 +#define ARIZONA_DMIC_MICBIAS1 1 +#define ARIZONA_DMIC_MICBIAS2 2 +#define ARIZONA_DMIC_MICBIAS3 3 + +#define ARIZONA_INMODE_DIFF 0 +#define ARIZONA_INMODE_SE 1 +#define ARIZONA_INMODE_DMIC 2 + +#define ARIZONA_MICD_TIME_CONTINUOUS 0 +#define ARIZONA_MICD_TIME_250US 1 +#define ARIZONA_MICD_TIME_500US 2 +#define ARIZONA_MICD_TIME_1MS 3 +#define ARIZONA_MICD_TIME_2MS 4 +#define ARIZONA_MICD_TIME_4MS 5 +#define ARIZONA_MICD_TIME_8MS 6 +#define ARIZONA_MICD_TIME_16MS 7 +#define ARIZONA_MICD_TIME_32MS 8 +#define ARIZONA_MICD_TIME_64MS 9 +#define ARIZONA_MICD_TIME_128MS 10 +#define ARIZONA_MICD_TIME_256MS 11 +#define ARIZONA_MICD_TIME_512MS 12 + +#define ARIZONA_ACCDET_MODE_MIC 0 +#define ARIZONA_ACCDET_MODE_HPL 1 +#define ARIZONA_ACCDET_MODE_HPR 2 +#define ARIZONA_ACCDET_MODE_HPM 4 +#define ARIZONA_ACCDET_MODE_ADC 7 + +#define ARIZONA_GPSW_OPEN 0 +#define ARIZONA_GPSW_CLOSED 1 +#define ARIZONA_GPSW_CLAMP_ENABLED 2 +#define ARIZONA_GPSW_CLAMP_DISABLED 3 + +#endif diff --git a/include/dt-bindings/mfd/as3722.h b/include/dt-bindings/mfd/as3722.h new file mode 100644 index 000000000..9ef0cba90 --- /dev/null +++ b/include/dt-bindings/mfd/as3722.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides macros for ams AS3722 device bindings. + * + * Copyright (c) 2013, NVIDIA Corporation. + * + * Author: Laxman Dewangan + * + */ + +#ifndef __DT_BINDINGS_AS3722_H__ +#define __DT_BINDINGS_AS3722_H__ + +/* External control pins */ +#define AS3722_EXT_CONTROL_PIN_ENABLE1 1 +#define AS3722_EXT_CONTROL_PIN_ENABLE2 2 +#define AS3722_EXT_CONTROL_PIN_ENABLE3 3 + +/* Interrupt numbers for AS3722 */ +#define AS3722_IRQ_LID 0 +#define AS3722_IRQ_ACOK 1 +#define AS3722_IRQ_ENABLE1 2 +#define AS3722_IRQ_OCCUR_ALARM_SD0 3 +#define AS3722_IRQ_ONKEY_LONG_PRESS 4 +#define AS3722_IRQ_ONKEY 5 +#define AS3722_IRQ_OVTMP 6 +#define AS3722_IRQ_LOWBAT 7 +#define AS3722_IRQ_SD0_LV 8 +#define AS3722_IRQ_SD1_LV 9 +#define AS3722_IRQ_SD2_LV 10 +#define AS3722_IRQ_PWM1_OV_PROT 11 +#define AS3722_IRQ_PWM2_OV_PROT 12 +#define AS3722_IRQ_ENABLE2 13 +#define AS3722_IRQ_SD6_LV 14 +#define AS3722_IRQ_RTC_REP 15 +#define AS3722_IRQ_RTC_ALARM 16 +#define AS3722_IRQ_GPIO1 17 +#define AS3722_IRQ_GPIO2 18 +#define AS3722_IRQ_GPIO3 19 +#define AS3722_IRQ_GPIO4 20 +#define AS3722_IRQ_GPIO5 21 +#define AS3722_IRQ_WATCHDOG 22 +#define AS3722_IRQ_ENABLE3 23 +#define AS3722_IRQ_TEMP_SD0_SHUTDOWN 24 +#define AS3722_IRQ_TEMP_SD1_SHUTDOWN 25 +#define AS3722_IRQ_TEMP_SD2_SHUTDOWN 26 +#define AS3722_IRQ_TEMP_SD0_ALARM 27 +#define AS3722_IRQ_TEMP_SD1_ALARM 28 +#define AS3722_IRQ_TEMP_SD6_ALARM 29 +#define AS3722_IRQ_OCCUR_ALARM_SD6 30 +#define AS3722_IRQ_ADC 31 + +#endif /* __DT_BINDINGS_AS3722_H__ */ diff --git a/include/dt-bindings/mfd/atmel-flexcom.h b/include/dt-bindings/mfd/atmel-flexcom.h new file mode 100644 index 000000000..a266fe4ee --- /dev/null +++ b/include/dt-bindings/mfd/atmel-flexcom.h @@ -0,0 +1,26 @@ +/* + * This header provides macros for Atmel Flexcom DT bindings. + * + * Copyright (C) 2015 Cyrille Pitchen + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __DT_BINDINGS_ATMEL_FLEXCOM_H__ +#define __DT_BINDINGS_ATMEL_FLEXCOM_H__ + +#define ATMEL_FLEXCOM_MODE_USART 1 +#define ATMEL_FLEXCOM_MODE_SPI 2 +#define ATMEL_FLEXCOM_MODE_TWI 3 + +#endif /* __DT_BINDINGS_ATMEL_FLEXCOM_H__ */ diff --git a/include/dt-bindings/mfd/dbx500-prcmu.h b/include/dt-bindings/mfd/dbx500-prcmu.h new file mode 100644 index 000000000..0404bcc47 --- /dev/null +++ b/include/dt-bindings/mfd/dbx500-prcmu.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for the PRCMU bindings. + * + */ + +#ifndef _DT_BINDINGS_MFD_PRCMU_H +#define _DT_BINDINGS_MFD_PRCMU_H + +/* + * Clock identifiers. + */ +#define ARMCLK 0 +#define PRCMU_ACLK 1 +#define PRCMU_SVAMMCSPCLK 2 +#define PRCMU_SDMMCHCLK 2 /* DBx540 only. */ +#define PRCMU_SIACLK 3 +#define PRCMU_SIAMMDSPCLK 3 /* DBx540 only. */ +#define PRCMU_SGACLK 4 +#define PRCMU_UARTCLK 5 +#define PRCMU_MSP02CLK 6 +#define PRCMU_MSP1CLK 7 +#define PRCMU_I2CCLK 8 +#define PRCMU_SDMMCCLK 9 +#define PRCMU_SLIMCLK 10 +#define PRCMU_CAMCLK 10 /* DBx540 only. */ +#define PRCMU_PER1CLK 11 +#define PRCMU_PER2CLK 12 +#define PRCMU_PER3CLK 13 +#define PRCMU_PER5CLK 14 +#define PRCMU_PER6CLK 15 +#define PRCMU_PER7CLK 16 +#define PRCMU_LCDCLK 17 +#define PRCMU_BMLCLK 18 +#define PRCMU_HSITXCLK 19 +#define PRCMU_HSIRXCLK 20 +#define PRCMU_HDMICLK 21 +#define PRCMU_APEATCLK 22 +#define PRCMU_APETRACECLK 23 +#define PRCMU_MCDECLK 24 +#define PRCMU_IPI2CCLK 25 +#define PRCMU_DSIALTCLK 26 +#define PRCMU_DMACLK 27 +#define PRCMU_B2R2CLK 28 +#define PRCMU_TVCLK 29 +#define SPARE_UNIPROCLK 30 +#define PRCMU_SSPCLK 31 +#define PRCMU_RNGCLK 32 +#define PRCMU_UICCCLK 33 +#define PRCMU_G1CLK 34 /* DBx540 only. */ +#define PRCMU_HVACLK 35 /* DBx540 only. */ +#define PRCMU_SPARE1CLK 36 +#define PRCMU_SPARE2CLK 37 + +#define PRCMU_NUM_REG_CLOCKS 38 + +#define PRCMU_RTCCLK PRCMU_NUM_REG_CLOCKS +#define PRCMU_SYSCLK 39 +#define PRCMU_CDCLK 40 +#define PRCMU_TIMCLK 41 +#define PRCMU_PLLSOC0 42 +#define PRCMU_PLLSOC1 43 +#define PRCMU_ARMSS 44 +#define PRCMU_PLLDDR 45 + +/* DSI Clocks */ +#define PRCMU_PLLDSI 46 +#define PRCMU_DSI0CLK 47 +#define PRCMU_DSI1CLK 48 +#define PRCMU_DSI0ESCCLK 49 +#define PRCMU_DSI1ESCCLK 50 +#define PRCMU_DSI2ESCCLK 51 + +/* LCD DSI PLL - Ux540 only */ +#define PRCMU_PLLDSI_LCD 52 +#define PRCMU_DSI0CLK_LCD 53 +#define PRCMU_DSI1CLK_LCD 54 +#define PRCMU_DSI0ESCCLK_LCD 55 +#define PRCMU_DSI1ESCCLK_LCD 56 +#define PRCMU_DSI2ESCCLK_LCD 57 + +#define PRCMU_NUM_CLKS 58 + +#endif diff --git a/include/dt-bindings/mfd/max77620.h b/include/dt-bindings/mfd/max77620.h new file mode 100644 index 000000000..1e19c5f90 --- /dev/null +++ b/include/dt-bindings/mfd/max77620.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides macros for MAXIM MAX77620 device bindings. + * + * Copyright (c) 2016, NVIDIA Corporation. + * Author: Laxman Dewangan + */ + +#ifndef _DT_BINDINGS_MFD_MAX77620_H +#define _DT_BINDINGS_MFD_MAX77620_H + +/* MAX77620 interrupts */ +#define MAX77620_IRQ_TOP_GLBL 0 /* Low-Battery */ +#define MAX77620_IRQ_TOP_SD 1 /* SD power fail */ +#define MAX77620_IRQ_TOP_LDO 2 /* LDO power fail */ +#define MAX77620_IRQ_TOP_GPIO 3 /* GPIO internal int to MAX77620 */ +#define MAX77620_IRQ_TOP_RTC 4 /* RTC */ +#define MAX77620_IRQ_TOP_32K 5 /* 32kHz oscillator */ +#define MAX77620_IRQ_TOP_ONOFF 6 /* ON/OFF oscillator */ +#define MAX77620_IRQ_LBT_MBATLOW 7 /* Thermal alarm status, > 120C */ +#define MAX77620_IRQ_LBT_TJALRM1 8 /* Thermal alarm status, > 120C */ +#define MAX77620_IRQ_LBT_TJALRM2 9 /* Thermal alarm status, > 140C */ + +/* FPS event source */ +#define MAX77620_FPS_EVENT_SRC_EN0 0 +#define MAX77620_FPS_EVENT_SRC_EN1 1 +#define MAX77620_FPS_EVENT_SRC_SW 2 + +/* Device state when FPS event LOW */ +#define MAX77620_FPS_INACTIVE_STATE_SLEEP 0 +#define MAX77620_FPS_INACTIVE_STATE_LOW_POWER 1 + +/* FPS source */ +#define MAX77620_FPS_SRC_0 0 +#define MAX77620_FPS_SRC_1 1 +#define MAX77620_FPS_SRC_2 2 +#define MAX77620_FPS_SRC_NONE 3 +#define MAX77620_FPS_SRC_DEF 4 + +#endif diff --git a/include/dt-bindings/mfd/palmas.h b/include/dt-bindings/mfd/palmas.h new file mode 100644 index 000000000..c4f1d57ff --- /dev/null +++ b/include/dt-bindings/mfd/palmas.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides macros for Palmas device bindings. + * + * Copyright (c) 2013, NVIDIA Corporation. + * + * Author: Laxman Dewangan + * + */ + +#ifndef __DT_BINDINGS_PALMAS_H +#define __DT_BINDINGS_PALMAS_H + +/* External control pins */ +#define PALMAS_EXT_CONTROL_PIN_ENABLE1 1 +#define PALMAS_EXT_CONTROL_PIN_ENABLE2 2 +#define PALMAS_EXT_CONTROL_PIN_NSLEEP 3 + +#endif /* __DT_BINDINGS_PALMAS_H */ diff --git a/include/dt-bindings/mfd/qcom-rpm.h b/include/dt-bindings/mfd/qcom-rpm.h new file mode 100644 index 000000000..c9204c4df --- /dev/null +++ b/include/dt-bindings/mfd/qcom-rpm.h @@ -0,0 +1,183 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for the Qualcomm RPM bindings. + */ + +#ifndef _DT_BINDINGS_MFD_QCOM_RPM_H +#define _DT_BINDINGS_MFD_QCOM_RPM_H + +/* + * Constants use to identify individual resources in the RPM. + */ +#define QCOM_RPM_APPS_FABRIC_ARB 1 +#define QCOM_RPM_APPS_FABRIC_CLK 2 +#define QCOM_RPM_APPS_FABRIC_HALT 3 +#define QCOM_RPM_APPS_FABRIC_IOCTL 4 +#define QCOM_RPM_APPS_FABRIC_MODE 5 +#define QCOM_RPM_APPS_L2_CACHE_CTL 6 +#define QCOM_RPM_CFPB_CLK 7 +#define QCOM_RPM_CXO_BUFFERS 8 +#define QCOM_RPM_CXO_CLK 9 +#define QCOM_RPM_DAYTONA_FABRIC_CLK 10 +#define QCOM_RPM_DDR_DMM 11 +#define QCOM_RPM_EBI1_CLK 12 +#define QCOM_RPM_HDMI_SWITCH 13 +#define QCOM_RPM_MMFPB_CLK 14 +#define QCOM_RPM_MM_FABRIC_ARB 15 +#define QCOM_RPM_MM_FABRIC_CLK 16 +#define QCOM_RPM_MM_FABRIC_HALT 17 +#define QCOM_RPM_MM_FABRIC_IOCTL 18 +#define QCOM_RPM_MM_FABRIC_MODE 19 +#define QCOM_RPM_PLL_4 20 +#define QCOM_RPM_PM8058_LDO0 21 +#define QCOM_RPM_PM8058_LDO1 22 +#define QCOM_RPM_PM8058_LDO2 23 +#define QCOM_RPM_PM8058_LDO3 24 +#define QCOM_RPM_PM8058_LDO4 25 +#define QCOM_RPM_PM8058_LDO5 26 +#define QCOM_RPM_PM8058_LDO6 27 +#define QCOM_RPM_PM8058_LDO7 28 +#define QCOM_RPM_PM8058_LDO8 29 +#define QCOM_RPM_PM8058_LDO9 30 +#define QCOM_RPM_PM8058_LDO10 31 +#define QCOM_RPM_PM8058_LDO11 32 +#define QCOM_RPM_PM8058_LDO12 33 +#define QCOM_RPM_PM8058_LDO13 34 +#define QCOM_RPM_PM8058_LDO14 35 +#define QCOM_RPM_PM8058_LDO15 36 +#define QCOM_RPM_PM8058_LDO16 37 +#define QCOM_RPM_PM8058_LDO17 38 +#define QCOM_RPM_PM8058_LDO18 39 +#define QCOM_RPM_PM8058_LDO19 40 +#define QCOM_RPM_PM8058_LDO20 41 +#define QCOM_RPM_PM8058_LDO21 42 +#define QCOM_RPM_PM8058_LDO22 43 +#define QCOM_RPM_PM8058_LDO23 44 +#define QCOM_RPM_PM8058_LDO24 45 +#define QCOM_RPM_PM8058_LDO25 46 +#define QCOM_RPM_PM8058_LVS0 47 +#define QCOM_RPM_PM8058_LVS1 48 +#define QCOM_RPM_PM8058_NCP 49 +#define QCOM_RPM_PM8058_SMPS0 50 +#define QCOM_RPM_PM8058_SMPS1 51 +#define QCOM_RPM_PM8058_SMPS2 52 +#define QCOM_RPM_PM8058_SMPS3 53 +#define QCOM_RPM_PM8058_SMPS4 54 +#define QCOM_RPM_PM8821_LDO1 55 +#define QCOM_RPM_PM8821_SMPS1 56 +#define QCOM_RPM_PM8821_SMPS2 57 +#define QCOM_RPM_PM8901_LDO0 58 +#define QCOM_RPM_PM8901_LDO1 59 +#define QCOM_RPM_PM8901_LDO2 60 +#define QCOM_RPM_PM8901_LDO3 61 +#define QCOM_RPM_PM8901_LDO4 62 +#define QCOM_RPM_PM8901_LDO5 63 +#define QCOM_RPM_PM8901_LDO6 64 +#define QCOM_RPM_PM8901_LVS0 65 +#define QCOM_RPM_PM8901_LVS1 66 +#define QCOM_RPM_PM8901_LVS2 67 +#define QCOM_RPM_PM8901_LVS3 68 +#define QCOM_RPM_PM8901_MVS 69 +#define QCOM_RPM_PM8901_SMPS0 70 +#define QCOM_RPM_PM8901_SMPS1 71 +#define QCOM_RPM_PM8901_SMPS2 72 +#define QCOM_RPM_PM8901_SMPS3 73 +#define QCOM_RPM_PM8901_SMPS4 74 +#define QCOM_RPM_PM8921_CLK1 75 +#define QCOM_RPM_PM8921_CLK2 76 +#define QCOM_RPM_PM8921_LDO1 77 +#define QCOM_RPM_PM8921_LDO2 78 +#define QCOM_RPM_PM8921_LDO3 79 +#define QCOM_RPM_PM8921_LDO4 80 +#define QCOM_RPM_PM8921_LDO5 81 +#define QCOM_RPM_PM8921_LDO6 82 +#define QCOM_RPM_PM8921_LDO7 83 +#define QCOM_RPM_PM8921_LDO8 84 +#define QCOM_RPM_PM8921_LDO9 85 +#define QCOM_RPM_PM8921_LDO10 86 +#define QCOM_RPM_PM8921_LDO11 87 +#define QCOM_RPM_PM8921_LDO12 88 +#define QCOM_RPM_PM8921_LDO13 89 +#define QCOM_RPM_PM8921_LDO14 90 +#define QCOM_RPM_PM8921_LDO15 91 +#define QCOM_RPM_PM8921_LDO16 92 +#define QCOM_RPM_PM8921_LDO17 93 +#define QCOM_RPM_PM8921_LDO18 94 +#define QCOM_RPM_PM8921_LDO19 95 +#define QCOM_RPM_PM8921_LDO20 96 +#define QCOM_RPM_PM8921_LDO21 97 +#define QCOM_RPM_PM8921_LDO22 98 +#define QCOM_RPM_PM8921_LDO23 99 +#define QCOM_RPM_PM8921_LDO24 100 +#define QCOM_RPM_PM8921_LDO25 101 +#define QCOM_RPM_PM8921_LDO26 102 +#define QCOM_RPM_PM8921_LDO27 103 +#define QCOM_RPM_PM8921_LDO28 104 +#define QCOM_RPM_PM8921_LDO29 105 +#define QCOM_RPM_PM8921_LVS1 106 +#define QCOM_RPM_PM8921_LVS2 107 +#define QCOM_RPM_PM8921_LVS3 108 +#define QCOM_RPM_PM8921_LVS4 109 +#define QCOM_RPM_PM8921_LVS5 110 +#define QCOM_RPM_PM8921_LVS6 111 +#define QCOM_RPM_PM8921_LVS7 112 +#define QCOM_RPM_PM8921_MVS 113 +#define QCOM_RPM_PM8921_NCP 114 +#define QCOM_RPM_PM8921_SMPS1 115 +#define QCOM_RPM_PM8921_SMPS2 116 +#define QCOM_RPM_PM8921_SMPS3 117 +#define QCOM_RPM_PM8921_SMPS4 118 +#define QCOM_RPM_PM8921_SMPS5 119 +#define QCOM_RPM_PM8921_SMPS6 120 +#define QCOM_RPM_PM8921_SMPS7 121 +#define QCOM_RPM_PM8921_SMPS8 122 +#define QCOM_RPM_PXO_CLK 123 +#define QCOM_RPM_QDSS_CLK 124 +#define QCOM_RPM_SFPB_CLK 125 +#define QCOM_RPM_SMI_CLK 126 +#define QCOM_RPM_SYS_FABRIC_ARB 127 +#define QCOM_RPM_SYS_FABRIC_CLK 128 +#define QCOM_RPM_SYS_FABRIC_HALT 129 +#define QCOM_RPM_SYS_FABRIC_IOCTL 130 +#define QCOM_RPM_SYS_FABRIC_MODE 131 +#define QCOM_RPM_USB_OTG_SWITCH 132 +#define QCOM_RPM_VDDMIN_GPIO 133 +#define QCOM_RPM_NSS_FABRIC_0_CLK 134 +#define QCOM_RPM_NSS_FABRIC_1_CLK 135 +#define QCOM_RPM_SMB208_S1a 136 +#define QCOM_RPM_SMB208_S1b 137 +#define QCOM_RPM_SMB208_S2a 138 +#define QCOM_RPM_SMB208_S2b 139 +#define QCOM_RPM_PM8018_SMPS1 140 +#define QCOM_RPM_PM8018_SMPS2 141 +#define QCOM_RPM_PM8018_SMPS3 142 +#define QCOM_RPM_PM8018_SMPS4 143 +#define QCOM_RPM_PM8018_SMPS5 144 +#define QCOM_RPM_PM8018_LDO1 145 +#define QCOM_RPM_PM8018_LDO2 146 +#define QCOM_RPM_PM8018_LDO3 147 +#define QCOM_RPM_PM8018_LDO4 148 +#define QCOM_RPM_PM8018_LDO5 149 +#define QCOM_RPM_PM8018_LDO6 150 +#define QCOM_RPM_PM8018_LDO7 151 +#define QCOM_RPM_PM8018_LDO8 152 +#define QCOM_RPM_PM8018_LDO9 153 +#define QCOM_RPM_PM8018_LDO10 154 +#define QCOM_RPM_PM8018_LDO11 155 +#define QCOM_RPM_PM8018_LDO12 156 +#define QCOM_RPM_PM8018_LDO13 157 +#define QCOM_RPM_PM8018_LDO14 158 +#define QCOM_RPM_PM8018_LVS1 159 +#define QCOM_RPM_PM8018_NCP 160 +#define QCOM_RPM_VOLTAGE_CORNER 161 + +/* + * Constants used to select force mode for regulators. + */ +#define QCOM_RPM_FORCE_MODE_NONE 0 +#define QCOM_RPM_FORCE_MODE_LPM 1 +#define QCOM_RPM_FORCE_MODE_HPM 2 +#define QCOM_RPM_FORCE_MODE_AUTO 3 +#define QCOM_RPM_FORCE_MODE_BYPASS 4 + +#endif diff --git a/include/dt-bindings/mfd/st-lpc.h b/include/dt-bindings/mfd/st-lpc.h new file mode 100644 index 000000000..88a7f5684 --- /dev/null +++ b/include/dt-bindings/mfd/st-lpc.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides shared DT/Driver defines for ST's LPC device + * + * Copyright (C) 2014 STMicroelectronics -- All Rights Reserved + * + * Author: Lee Jones for STMicroelectronics + */ + +#ifndef __DT_BINDINGS_ST_LPC_H__ +#define __DT_BINDINGS_ST_LPC_H__ + +#define ST_LPC_MODE_RTC 0 +#define ST_LPC_MODE_WDT 1 +#define ST_LPC_MODE_CLKSRC 2 + +#endif /* __DT_BINDINGS_ST_LPC_H__ */ diff --git a/include/dt-bindings/mfd/stm32f4-rcc.h b/include/dt-bindings/mfd/stm32f4-rcc.h new file mode 100644 index 000000000..309e8c79f --- /dev/null +++ b/include/dt-bindings/mfd/stm32f4-rcc.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for the STM32F4 RCC IP + */ + +#ifndef _DT_BINDINGS_MFD_STM32F4_RCC_H +#define _DT_BINDINGS_MFD_STM32F4_RCC_H + +/* AHB1 */ +#define STM32F4_RCC_AHB1_GPIOA 0 +#define STM32F4_RCC_AHB1_GPIOB 1 +#define STM32F4_RCC_AHB1_GPIOC 2 +#define STM32F4_RCC_AHB1_GPIOD 3 +#define STM32F4_RCC_AHB1_GPIOE 4 +#define STM32F4_RCC_AHB1_GPIOF 5 +#define STM32F4_RCC_AHB1_GPIOG 6 +#define STM32F4_RCC_AHB1_GPIOH 7 +#define STM32F4_RCC_AHB1_GPIOI 8 +#define STM32F4_RCC_AHB1_GPIOJ 9 +#define STM32F4_RCC_AHB1_GPIOK 10 +#define STM32F4_RCC_AHB1_CRC 12 +#define STM32F4_RCC_AHB1_BKPSRAM 18 +#define STM32F4_RCC_AHB1_CCMDATARAM 20 +#define STM32F4_RCC_AHB1_DMA1 21 +#define STM32F4_RCC_AHB1_DMA2 22 +#define STM32F4_RCC_AHB1_DMA2D 23 +#define STM32F4_RCC_AHB1_ETHMAC 25 +#define STM32F4_RCC_AHB1_ETHMACTX 26 +#define STM32F4_RCC_AHB1_ETHMACRX 27 +#define STM32F4_RCC_AHB1_ETHMACPTP 28 +#define STM32F4_RCC_AHB1_OTGHS 29 +#define STM32F4_RCC_AHB1_OTGHSULPI 30 + +#define STM32F4_AHB1_RESET(bit) (STM32F4_RCC_AHB1_##bit + (0x10 * 8)) +#define STM32F4_AHB1_CLOCK(bit) (STM32F4_RCC_AHB1_##bit) + + +/* AHB2 */ +#define STM32F4_RCC_AHB2_DCMI 0 +#define STM32F4_RCC_AHB2_CRYP 4 +#define STM32F4_RCC_AHB2_HASH 5 +#define STM32F4_RCC_AHB2_RNG 6 +#define STM32F4_RCC_AHB2_OTGFS 7 + +#define STM32F4_AHB2_RESET(bit) (STM32F4_RCC_AHB2_##bit + (0x14 * 8)) +#define STM32F4_AHB2_CLOCK(bit) (STM32F4_RCC_AHB2_##bit + 0x20) + +/* AHB3 */ +#define STM32F4_RCC_AHB3_FMC 0 +#define STM32F4_RCC_AHB3_QSPI 1 + +#define STM32F4_AHB3_RESET(bit) (STM32F4_RCC_AHB3_##bit + (0x18 * 8)) +#define STM32F4_AHB3_CLOCK(bit) (STM32F4_RCC_AHB3_##bit + 0x40) + +/* APB1 */ +#define STM32F4_RCC_APB1_TIM2 0 +#define STM32F4_RCC_APB1_TIM3 1 +#define STM32F4_RCC_APB1_TIM4 2 +#define STM32F4_RCC_APB1_TIM5 3 +#define STM32F4_RCC_APB1_TIM6 4 +#define STM32F4_RCC_APB1_TIM7 5 +#define STM32F4_RCC_APB1_TIM12 6 +#define STM32F4_RCC_APB1_TIM13 7 +#define STM32F4_RCC_APB1_TIM14 8 +#define STM32F4_RCC_APB1_WWDG 11 +#define STM32F4_RCC_APB1_SPI2 14 +#define STM32F4_RCC_APB1_SPI3 15 +#define STM32F4_RCC_APB1_UART2 17 +#define STM32F4_RCC_APB1_UART3 18 +#define STM32F4_RCC_APB1_UART4 19 +#define STM32F4_RCC_APB1_UART5 20 +#define STM32F4_RCC_APB1_I2C1 21 +#define STM32F4_RCC_APB1_I2C2 22 +#define STM32F4_RCC_APB1_I2C3 23 +#define STM32F4_RCC_APB1_CAN1 25 +#define STM32F4_RCC_APB1_CAN2 26 +#define STM32F4_RCC_APB1_PWR 28 +#define STM32F4_RCC_APB1_DAC 29 +#define STM32F4_RCC_APB1_UART7 30 +#define STM32F4_RCC_APB1_UART8 31 + +#define STM32F4_APB1_RESET(bit) (STM32F4_RCC_APB1_##bit + (0x20 * 8)) +#define STM32F4_APB1_CLOCK(bit) (STM32F4_RCC_APB1_##bit + 0x80) + +/* APB2 */ +#define STM32F4_RCC_APB2_TIM1 0 +#define STM32F4_RCC_APB2_TIM8 1 +#define STM32F4_RCC_APB2_USART1 4 +#define STM32F4_RCC_APB2_USART6 5 +#define STM32F4_RCC_APB2_ADC1 8 +#define STM32F4_RCC_APB2_ADC2 9 +#define STM32F4_RCC_APB2_ADC3 10 +#define STM32F4_RCC_APB2_SDIO 11 +#define STM32F4_RCC_APB2_SPI1 12 +#define STM32F4_RCC_APB2_SPI4 13 +#define STM32F4_RCC_APB2_SYSCFG 14 +#define STM32F4_RCC_APB2_TIM9 16 +#define STM32F4_RCC_APB2_TIM10 17 +#define STM32F4_RCC_APB2_TIM11 18 +#define STM32F4_RCC_APB2_SPI5 20 +#define STM32F4_RCC_APB2_SPI6 21 +#define STM32F4_RCC_APB2_SAI1 22 +#define STM32F4_RCC_APB2_LTDC 26 +#define STM32F4_RCC_APB2_DSI 27 + +#define STM32F4_APB2_RESET(bit) (STM32F4_RCC_APB2_##bit + (0x24 * 8)) +#define STM32F4_APB2_CLOCK(bit) (STM32F4_RCC_APB2_##bit + 0xA0) + +#endif /* _DT_BINDINGS_MFD_STM32F4_RCC_H */ diff --git a/include/dt-bindings/mfd/stm32f7-rcc.h b/include/dt-bindings/mfd/stm32f7-rcc.h new file mode 100644 index 000000000..a90f3613c --- /dev/null +++ b/include/dt-bindings/mfd/stm32f7-rcc.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for the STM32F7 RCC IP + */ + +#ifndef _DT_BINDINGS_MFD_STM32F7_RCC_H +#define _DT_BINDINGS_MFD_STM32F7_RCC_H + +/* AHB1 */ +#define STM32F7_RCC_AHB1_GPIOA 0 +#define STM32F7_RCC_AHB1_GPIOB 1 +#define STM32F7_RCC_AHB1_GPIOC 2 +#define STM32F7_RCC_AHB1_GPIOD 3 +#define STM32F7_RCC_AHB1_GPIOE 4 +#define STM32F7_RCC_AHB1_GPIOF 5 +#define STM32F7_RCC_AHB1_GPIOG 6 +#define STM32F7_RCC_AHB1_GPIOH 7 +#define STM32F7_RCC_AHB1_GPIOI 8 +#define STM32F7_RCC_AHB1_GPIOJ 9 +#define STM32F7_RCC_AHB1_GPIOK 10 +#define STM32F7_RCC_AHB1_CRC 12 +#define STM32F7_RCC_AHB1_BKPSRAM 18 +#define STM32F7_RCC_AHB1_DTCMRAM 20 +#define STM32F7_RCC_AHB1_DMA1 21 +#define STM32F7_RCC_AHB1_DMA2 22 +#define STM32F7_RCC_AHB1_DMA2D 23 +#define STM32F7_RCC_AHB1_ETHMAC 25 +#define STM32F7_RCC_AHB1_ETHMACTX 26 +#define STM32F7_RCC_AHB1_ETHMACRX 27 +#define STM32FF_RCC_AHB1_ETHMACPTP 28 +#define STM32F7_RCC_AHB1_OTGHS 29 +#define STM32F7_RCC_AHB1_OTGHSULPI 30 + +#define STM32F7_AHB1_RESET(bit) (STM32F7_RCC_AHB1_##bit + (0x10 * 8)) +#define STM32F7_AHB1_CLOCK(bit) (STM32F7_RCC_AHB1_##bit) + + +/* AHB2 */ +#define STM32F7_RCC_AHB2_DCMI 0 +#define STM32F7_RCC_AHB2_CRYP 4 +#define STM32F7_RCC_AHB2_HASH 5 +#define STM32F7_RCC_AHB2_RNG 6 +#define STM32F7_RCC_AHB2_OTGFS 7 + +#define STM32F7_AHB2_RESET(bit) (STM32F7_RCC_AHB2_##bit + (0x14 * 8)) +#define STM32F7_AHB2_CLOCK(bit) (STM32F7_RCC_AHB2_##bit + 0x20) + +/* AHB3 */ +#define STM32F7_RCC_AHB3_FMC 0 +#define STM32F7_RCC_AHB3_QSPI 1 + +#define STM32F7_AHB3_RESET(bit) (STM32F7_RCC_AHB3_##bit + (0x18 * 8)) +#define STM32F7_AHB3_CLOCK(bit) (STM32F7_RCC_AHB3_##bit + 0x40) + +/* APB1 */ +#define STM32F7_RCC_APB1_TIM2 0 +#define STM32F7_RCC_APB1_TIM3 1 +#define STM32F7_RCC_APB1_TIM4 2 +#define STM32F7_RCC_APB1_TIM5 3 +#define STM32F7_RCC_APB1_TIM6 4 +#define STM32F7_RCC_APB1_TIM7 5 +#define STM32F7_RCC_APB1_TIM12 6 +#define STM32F7_RCC_APB1_TIM13 7 +#define STM32F7_RCC_APB1_TIM14 8 +#define STM32F7_RCC_APB1_LPTIM1 9 +#define STM32F7_RCC_APB1_WWDG 11 +#define STM32F7_RCC_APB1_SPI2 14 +#define STM32F7_RCC_APB1_SPI3 15 +#define STM32F7_RCC_APB1_SPDIFRX 16 +#define STM32F7_RCC_APB1_UART2 17 +#define STM32F7_RCC_APB1_UART3 18 +#define STM32F7_RCC_APB1_UART4 19 +#define STM32F7_RCC_APB1_UART5 20 +#define STM32F7_RCC_APB1_I2C1 21 +#define STM32F7_RCC_APB1_I2C2 22 +#define STM32F7_RCC_APB1_I2C3 23 +#define STM32F7_RCC_APB1_I2C4 24 +#define STM32F7_RCC_APB1_CAN1 25 +#define STM32F7_RCC_APB1_CAN2 26 +#define STM32F7_RCC_APB1_CEC 27 +#define STM32F7_RCC_APB1_PWR 28 +#define STM32F7_RCC_APB1_DAC 29 +#define STM32F7_RCC_APB1_UART7 30 +#define STM32F7_RCC_APB1_UART8 31 + +#define STM32F7_APB1_RESET(bit) (STM32F7_RCC_APB1_##bit + (0x20 * 8)) +#define STM32F7_APB1_CLOCK(bit) (STM32F7_RCC_APB1_##bit + 0x80) + +/* APB2 */ +#define STM32F7_RCC_APB2_TIM1 0 +#define STM32F7_RCC_APB2_TIM8 1 +#define STM32F7_RCC_APB2_USART1 4 +#define STM32F7_RCC_APB2_USART6 5 +#define STM32F7_RCC_APB2_SDMMC2 7 +#define STM32F7_RCC_APB2_ADC1 8 +#define STM32F7_RCC_APB2_ADC2 9 +#define STM32F7_RCC_APB2_ADC3 10 +#define STM32F7_RCC_APB2_SDMMC1 11 +#define STM32F7_RCC_APB2_SPI1 12 +#define STM32F7_RCC_APB2_SPI4 13 +#define STM32F7_RCC_APB2_SYSCFG 14 +#define STM32F7_RCC_APB2_TIM9 16 +#define STM32F7_RCC_APB2_TIM10 17 +#define STM32F7_RCC_APB2_TIM11 18 +#define STM32F7_RCC_APB2_SPI5 20 +#define STM32F7_RCC_APB2_SPI6 21 +#define STM32F7_RCC_APB2_SAI1 22 +#define STM32F7_RCC_APB2_SAI2 23 +#define STM32F7_RCC_APB2_LTDC 26 + +#define STM32F7_APB2_RESET(bit) (STM32F7_RCC_APB2_##bit + (0x24 * 8)) +#define STM32F7_APB2_CLOCK(bit) (STM32F7_RCC_APB2_##bit + 0xA0) + +#endif /* _DT_BINDINGS_MFD_STM32F7_RCC_H */ diff --git a/include/dt-bindings/mfd/stm32h7-rcc.h b/include/dt-bindings/mfd/stm32h7-rcc.h new file mode 100644 index 000000000..461a8e044 --- /dev/null +++ b/include/dt-bindings/mfd/stm32h7-rcc.h @@ -0,0 +1,136 @@ +/* + * This header provides constants for the STM32H7 RCC IP + */ + +#ifndef _DT_BINDINGS_MFD_STM32H7_RCC_H +#define _DT_BINDINGS_MFD_STM32H7_RCC_H + +/* AHB3 */ +#define STM32H7_RCC_AHB3_MDMA 0 +#define STM32H7_RCC_AHB3_DMA2D 4 +#define STM32H7_RCC_AHB3_JPGDEC 5 +#define STM32H7_RCC_AHB3_FMC 12 +#define STM32H7_RCC_AHB3_QUADSPI 14 +#define STM32H7_RCC_AHB3_SDMMC1 16 +#define STM32H7_RCC_AHB3_CPU 31 + +#define STM32H7_AHB3_RESET(bit) (STM32H7_RCC_AHB3_##bit + (0x7C * 8)) + +/* AHB1 */ +#define STM32H7_RCC_AHB1_DMA1 0 +#define STM32H7_RCC_AHB1_DMA2 1 +#define STM32H7_RCC_AHB1_ADC12 5 +#define STM32H7_RCC_AHB1_ART 14 +#define STM32H7_RCC_AHB1_ETH1MAC 15 +#define STM32H7_RCC_AHB1_USB1OTG 25 +#define STM32H7_RCC_AHB1_USB2OTG 27 + +#define STM32H7_AHB1_RESET(bit) (STM32H7_RCC_AHB1_##bit + (0x80 * 8)) + +/* AHB2 */ +#define STM32H7_RCC_AHB2_CAMITF 0 +#define STM32H7_RCC_AHB2_CRYPT 4 +#define STM32H7_RCC_AHB2_HASH 5 +#define STM32H7_RCC_AHB2_RNG 6 +#define STM32H7_RCC_AHB2_SDMMC2 9 + +#define STM32H7_AHB2_RESET(bit) (STM32H7_RCC_AHB2_##bit + (0x84 * 8)) + +/* AHB4 */ +#define STM32H7_RCC_AHB4_GPIOA 0 +#define STM32H7_RCC_AHB4_GPIOB 1 +#define STM32H7_RCC_AHB4_GPIOC 2 +#define STM32H7_RCC_AHB4_GPIOD 3 +#define STM32H7_RCC_AHB4_GPIOE 4 +#define STM32H7_RCC_AHB4_GPIOF 5 +#define STM32H7_RCC_AHB4_GPIOG 6 +#define STM32H7_RCC_AHB4_GPIOH 7 +#define STM32H7_RCC_AHB4_GPIOI 8 +#define STM32H7_RCC_AHB4_GPIOJ 9 +#define STM32H7_RCC_AHB4_GPIOK 10 +#define STM32H7_RCC_AHB4_CRC 19 +#define STM32H7_RCC_AHB4_BDMA 21 +#define STM32H7_RCC_AHB4_ADC3 24 +#define STM32H7_RCC_AHB4_HSEM 25 + +#define STM32H7_AHB4_RESET(bit) (STM32H7_RCC_AHB4_##bit + (0x88 * 8)) + +/* APB3 */ +#define STM32H7_RCC_APB3_LTDC 3 +#define STM32H7_RCC_APB3_DSI 4 + +#define STM32H7_APB3_RESET(bit) (STM32H7_RCC_APB3_##bit + (0x8C * 8)) + +/* APB1L */ +#define STM32H7_RCC_APB1L_TIM2 0 +#define STM32H7_RCC_APB1L_TIM3 1 +#define STM32H7_RCC_APB1L_TIM4 2 +#define STM32H7_RCC_APB1L_TIM5 3 +#define STM32H7_RCC_APB1L_TIM6 4 +#define STM32H7_RCC_APB1L_TIM7 5 +#define STM32H7_RCC_APB1L_TIM12 6 +#define STM32H7_RCC_APB1L_TIM13 7 +#define STM32H7_RCC_APB1L_TIM14 8 +#define STM32H7_RCC_APB1L_LPTIM1 9 +#define STM32H7_RCC_APB1L_SPI2 14 +#define STM32H7_RCC_APB1L_SPI3 15 +#define STM32H7_RCC_APB1L_SPDIF_RX 16 +#define STM32H7_RCC_APB1L_USART2 17 +#define STM32H7_RCC_APB1L_USART3 18 +#define STM32H7_RCC_APB1L_UART4 19 +#define STM32H7_RCC_APB1L_UART5 20 +#define STM32H7_RCC_APB1L_I2C1 21 +#define STM32H7_RCC_APB1L_I2C2 22 +#define STM32H7_RCC_APB1L_I2C3 23 +#define STM32H7_RCC_APB1L_HDMICEC 27 +#define STM32H7_RCC_APB1L_DAC12 29 +#define STM32H7_RCC_APB1L_USART7 30 +#define STM32H7_RCC_APB1L_USART8 31 + +#define STM32H7_APB1L_RESET(bit) (STM32H7_RCC_APB1L_##bit + (0x90 * 8)) + +/* APB1H */ +#define STM32H7_RCC_APB1H_CRS 1 +#define STM32H7_RCC_APB1H_SWP 2 +#define STM32H7_RCC_APB1H_OPAMP 4 +#define STM32H7_RCC_APB1H_MDIOS 5 +#define STM32H7_RCC_APB1H_FDCAN 8 + +#define STM32H7_APB1H_RESET(bit) (STM32H7_RCC_APB1H_##bit + (0x94 * 8)) + +/* APB2 */ +#define STM32H7_RCC_APB2_TIM1 0 +#define STM32H7_RCC_APB2_TIM8 1 +#define STM32H7_RCC_APB2_USART1 4 +#define STM32H7_RCC_APB2_USART6 5 +#define STM32H7_RCC_APB2_SPI1 12 +#define STM32H7_RCC_APB2_SPI4 13 +#define STM32H7_RCC_APB2_TIM15 16 +#define STM32H7_RCC_APB2_TIM16 17 +#define STM32H7_RCC_APB2_TIM17 18 +#define STM32H7_RCC_APB2_SPI5 20 +#define STM32H7_RCC_APB2_SAI1 22 +#define STM32H7_RCC_APB2_SAI2 23 +#define STM32H7_RCC_APB2_SAI3 24 +#define STM32H7_RCC_APB2_DFSDM1 28 +#define STM32H7_RCC_APB2_HRTIM 29 + +#define STM32H7_APB2_RESET(bit) (STM32H7_RCC_APB2_##bit + (0x98 * 8)) + +/* APB4 */ +#define STM32H7_RCC_APB4_SYSCFG 1 +#define STM32H7_RCC_APB4_LPUART1 3 +#define STM32H7_RCC_APB4_SPI6 5 +#define STM32H7_RCC_APB4_I2C4 7 +#define STM32H7_RCC_APB4_LPTIM2 9 +#define STM32H7_RCC_APB4_LPTIM3 10 +#define STM32H7_RCC_APB4_LPTIM4 11 +#define STM32H7_RCC_APB4_LPTIM5 12 +#define STM32H7_RCC_APB4_COMP12 14 +#define STM32H7_RCC_APB4_VREF 15 +#define STM32H7_RCC_APB4_SAI4 21 +#define STM32H7_RCC_APB4_TMPSENS 26 + +#define STM32H7_APB4_RESET(bit) (STM32H7_RCC_APB4_##bit + (0x9C * 8)) + +#endif /* _DT_BINDINGS_MFD_STM32H7_RCC_H */ diff --git a/include/dt-bindings/mips/lantiq_rcu_gphy.h b/include/dt-bindings/mips/lantiq_rcu_gphy.h new file mode 100644 index 000000000..fa1a63773 --- /dev/null +++ b/include/dt-bindings/mips/lantiq_rcu_gphy.h @@ -0,0 +1,15 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2016 Martin Blumenstingl + * Copyright (C) 2017 Hauke Mehrtens + */ +#ifndef _DT_BINDINGS_MIPS_LANTIQ_RCU_GPHY_H +#define _DT_BINDINGS_MIPS_LANTIQ_RCU_GPHY_H + +#define GPHY_MODE_GE 1 +#define GPHY_MODE_FE 2 + +#endif /* _DT_BINDINGS_MIPS_LANTIQ_RCU_GPHY_H */ diff --git a/include/dt-bindings/mux/mux.h b/include/dt-bindings/mux/mux.h new file mode 100644 index 000000000..042719218 --- /dev/null +++ b/include/dt-bindings/mux/mux.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for most Multiplexer bindings. + * + * Most Multiplexer bindings specify an idle state. In most cases, the + * the multiplexer can be left as is when idle, and in some cases it can + * disconnect the input/output and leave the multiplexer in a high + * impedance state. + */ + +#ifndef _DT_BINDINGS_MUX_MUX_H +#define _DT_BINDINGS_MUX_MUX_H + +#define MUX_IDLE_AS_IS (-1) +#define MUX_IDLE_DISCONNECT (-2) + +#endif diff --git a/include/dt-bindings/net/microchip-lan78xx.h b/include/dt-bindings/net/microchip-lan78xx.h new file mode 100644 index 000000000..0742ff075 --- /dev/null +++ b/include/dt-bindings/net/microchip-lan78xx.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DT_BINDINGS_MICROCHIP_LAN78XX_H +#define _DT_BINDINGS_MICROCHIP_LAN78XX_H + +/* LED modes for LAN7800/LAN7850 embedded PHY */ + +#define LAN78XX_LINK_ACTIVITY 0 +#define LAN78XX_LINK_1000_ACTIVITY 1 +#define LAN78XX_LINK_100_ACTIVITY 2 +#define LAN78XX_LINK_10_ACTIVITY 3 +#define LAN78XX_LINK_100_1000_ACTIVITY 4 +#define LAN78XX_LINK_10_1000_ACTIVITY 5 +#define LAN78XX_LINK_10_100_ACTIVITY 6 +#define LAN78XX_DUPLEX_COLLISION 8 +#define LAN78XX_COLLISION 9 +#define LAN78XX_ACTIVITY 10 +#define LAN78XX_AUTONEG_FAULT 12 +#define LAN78XX_FORCE_LED_OFF 14 +#define LAN78XX_FORCE_LED_ON 15 + +#endif diff --git a/include/dt-bindings/net/mscc-phy-vsc8531.h b/include/dt-bindings/net/mscc-phy-vsc8531.h new file mode 100644 index 000000000..697161f80 --- /dev/null +++ b/include/dt-bindings/net/mscc-phy-vsc8531.h @@ -0,0 +1,29 @@ +/* + * Device Tree constants for Microsemi VSC8531 PHY + * + * Author: Nagaraju Lakkaraju + * + * License: Dual MIT/GPL + * Copyright (c) 2017 Microsemi Corporation + */ + +#ifndef _DT_BINDINGS_MSCC_VSC8531_H +#define _DT_BINDINGS_MSCC_VSC8531_H + +/* PHY LED Modes */ +#define VSC8531_LINK_ACTIVITY 0 +#define VSC8531_LINK_1000_ACTIVITY 1 +#define VSC8531_LINK_100_ACTIVITY 2 +#define VSC8531_LINK_10_ACTIVITY 3 +#define VSC8531_LINK_100_1000_ACTIVITY 4 +#define VSC8531_LINK_10_1000_ACTIVITY 5 +#define VSC8531_LINK_10_100_ACTIVITY 6 +#define VSC8531_DUPLEX_COLLISION 8 +#define VSC8531_COLLISION 9 +#define VSC8531_ACTIVITY 10 +#define VSC8531_AUTONEG_FAULT 12 +#define VSC8531_SERIAL_MODE 13 +#define VSC8531_FORCE_LED_OFF 14 +#define VSC8531_FORCE_LED_ON 15 + +#endif diff --git a/include/dt-bindings/net/ti-dp83867.h b/include/dt-bindings/net/ti-dp83867.h new file mode 100644 index 000000000..7b1656427 --- /dev/null +++ b/include/dt-bindings/net/ti-dp83867.h @@ -0,0 +1,59 @@ +/* + * Device Tree constants for the Texas Instruments DP83867 PHY + * + * Author: Dan Murphy + * + * Copyright: (C) 2015 Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef _DT_BINDINGS_TI_DP83867_H +#define _DT_BINDINGS_TI_DP83867_H + +/* PHY CTRL bits */ +#define DP83867_PHYCR_FIFO_DEPTH_3_B_NIB 0x00 +#define DP83867_PHYCR_FIFO_DEPTH_4_B_NIB 0x01 +#define DP83867_PHYCR_FIFO_DEPTH_6_B_NIB 0x02 +#define DP83867_PHYCR_FIFO_DEPTH_8_B_NIB 0x03 + +/* RGMIIDCTL internal delay for rx and tx */ +#define DP83867_RGMIIDCTL_250_PS 0x0 +#define DP83867_RGMIIDCTL_500_PS 0x1 +#define DP83867_RGMIIDCTL_750_PS 0x2 +#define DP83867_RGMIIDCTL_1_NS 0x3 +#define DP83867_RGMIIDCTL_1_25_NS 0x4 +#define DP83867_RGMIIDCTL_1_50_NS 0x5 +#define DP83867_RGMIIDCTL_1_75_NS 0x6 +#define DP83867_RGMIIDCTL_2_00_NS 0x7 +#define DP83867_RGMIIDCTL_2_25_NS 0x8 +#define DP83867_RGMIIDCTL_2_50_NS 0x9 +#define DP83867_RGMIIDCTL_2_75_NS 0xa +#define DP83867_RGMIIDCTL_3_00_NS 0xb +#define DP83867_RGMIIDCTL_3_25_NS 0xc +#define DP83867_RGMIIDCTL_3_50_NS 0xd +#define DP83867_RGMIIDCTL_3_75_NS 0xe +#define DP83867_RGMIIDCTL_4_00_NS 0xf + +/* IO_MUX_CFG - Clock output selection */ +#define DP83867_CLK_O_SEL_CHN_A_RCLK 0x0 +#define DP83867_CLK_O_SEL_CHN_B_RCLK 0x1 +#define DP83867_CLK_O_SEL_CHN_C_RCLK 0x2 +#define DP83867_CLK_O_SEL_CHN_D_RCLK 0x3 +#define DP83867_CLK_O_SEL_CHN_A_RCLK_DIV5 0x4 +#define DP83867_CLK_O_SEL_CHN_B_RCLK_DIV5 0x5 +#define DP83867_CLK_O_SEL_CHN_C_RCLK_DIV5 0x6 +#define DP83867_CLK_O_SEL_CHN_D_RCLK_DIV5 0x7 +#define DP83867_CLK_O_SEL_CHN_A_TCLK 0x8 +#define DP83867_CLK_O_SEL_CHN_B_TCLK 0x9 +#define DP83867_CLK_O_SEL_CHN_C_TCLK 0xA +#define DP83867_CLK_O_SEL_CHN_D_TCLK 0xB +#define DP83867_CLK_O_SEL_REF_CLK 0xC +#endif diff --git a/include/dt-bindings/phy/phy-pistachio-usb.h b/include/dt-bindings/phy/phy-pistachio-usb.h new file mode 100644 index 000000000..d1877aa0a --- /dev/null +++ b/include/dt-bindings/phy/phy-pistachio-usb.h @@ -0,0 +1,16 @@ +/* + * Copyright (C) 2015 Google, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +#ifndef _DT_BINDINGS_PHY_PISTACHIO +#define _DT_BINDINGS_PHY_PISTACHIO + +#define REFCLK_XO_CRYSTAL 0x0 +#define REFCLK_X0_EXT_CLK 0x1 +#define REFCLK_CLK_CORE 0x2 + +#endif /* _DT_BINDINGS_PHY_PISTACHIO */ diff --git a/include/dt-bindings/phy/phy-qcom-qusb2.h b/include/dt-bindings/phy/phy-qcom-qusb2.h new file mode 100644 index 000000000..5c5e4d800 --- /dev/null +++ b/include/dt-bindings/phy/phy-qcom-qusb2.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_QCOM_PHY_QUSB2_H_ +#define _DT_BINDINGS_QCOM_PHY_QUSB2_H_ + +/* PHY HSTX TRIM bit values (24mA to 15mA) */ +#define QUSB2_V2_HSTX_TRIM_24_0_MA 0x0 +#define QUSB2_V2_HSTX_TRIM_23_4_MA 0x1 +#define QUSB2_V2_HSTX_TRIM_22_8_MA 0x2 +#define QUSB2_V2_HSTX_TRIM_22_2_MA 0x3 +#define QUSB2_V2_HSTX_TRIM_21_6_MA 0x4 +#define QUSB2_V2_HSTX_TRIM_21_0_MA 0x5 +#define QUSB2_V2_HSTX_TRIM_20_4_MA 0x6 +#define QUSB2_V2_HSTX_TRIM_19_8_MA 0x7 +#define QUSB2_V2_HSTX_TRIM_19_2_MA 0x8 +#define QUSB2_V2_HSTX_TRIM_18_6_MA 0x9 +#define QUSB2_V2_HSTX_TRIM_18_0_MA 0xa +#define QUSB2_V2_HSTX_TRIM_17_4_MA 0xb +#define QUSB2_V2_HSTX_TRIM_16_8_MA 0xc +#define QUSB2_V2_HSTX_TRIM_16_2_MA 0xd +#define QUSB2_V2_HSTX_TRIM_15_6_MA 0xe +#define QUSB2_V2_HSTX_TRIM_15_0_MA 0xf + +/* PHY PREEMPHASIS bit values */ +#define QUSB2_V2_PREEMPHASIS_NONE 0 +#define QUSB2_V2_PREEMPHASIS_5_PERCENT 1 +#define QUSB2_V2_PREEMPHASIS_10_PERCENT 2 +#define QUSB2_V2_PREEMPHASIS_15_PERCENT 3 + +/* PHY PREEMPHASIS-WIDTH bit values */ +#define QUSB2_V2_PREEMPHASIS_WIDTH_FULL_BIT 0 +#define QUSB2_V2_PREEMPHASIS_WIDTH_HALF_BIT 1 + +#endif diff --git a/include/dt-bindings/phy/phy.h b/include/dt-bindings/phy/phy.h new file mode 100644 index 000000000..d16e8755f --- /dev/null +++ b/include/dt-bindings/phy/phy.h @@ -0,0 +1,20 @@ +/* + * + * This header provides constants for the phy framework + * + * Copyright (C) 2014 STMicroelectronics + * Author: Gabriel Fernandez + * License terms: GNU General Public License (GPL), version 2 + */ + +#ifndef _DT_BINDINGS_PHY +#define _DT_BINDINGS_PHY + +#define PHY_NONE 0 +#define PHY_TYPE_SATA 1 +#define PHY_TYPE_PCIE 2 +#define PHY_TYPE_USB2 3 +#define PHY_TYPE_USB3 4 +#define PHY_TYPE_UFS 5 + +#endif /* _DT_BINDINGS_PHY */ diff --git a/include/dt-bindings/pinctrl/am33xx.h b/include/dt-bindings/pinctrl/am33xx.h new file mode 100644 index 000000000..7d947a597 --- /dev/null +++ b/include/dt-bindings/pinctrl/am33xx.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants specific to AM33XX pinctrl bindings. + */ + +#ifndef _DT_BINDINGS_PINCTRL_AM33XX_H +#define _DT_BINDINGS_PINCTRL_AM33XX_H + +#include + +/* am33xx specific mux bit defines */ +#undef PULL_ENA +#undef INPUT_EN + +#define PULL_DISABLE (1 << 3) +#define INPUT_EN (1 << 5) +#define SLEWCTRL_SLOW (1 << 6) +#define SLEWCTRL_FAST 0 + +/* update macro depending on INPUT_EN and PULL_ENA */ +#undef PIN_OUTPUT +#undef PIN_OUTPUT_PULLUP +#undef PIN_OUTPUT_PULLDOWN +#undef PIN_INPUT +#undef PIN_INPUT_PULLUP +#undef PIN_INPUT_PULLDOWN + +#define PIN_OUTPUT (PULL_DISABLE) +#define PIN_OUTPUT_PULLUP (PULL_UP) +#define PIN_OUTPUT_PULLDOWN 0 +#define PIN_INPUT (INPUT_EN | PULL_DISABLE) +#define PIN_INPUT_PULLUP (INPUT_EN | PULL_UP) +#define PIN_INPUT_PULLDOWN (INPUT_EN) + +/* undef non-existing modes */ +#undef PIN_OFF_NONE +#undef PIN_OFF_OUTPUT_HIGH +#undef PIN_OFF_OUTPUT_LOW +#undef PIN_OFF_INPUT_PULLUP +#undef PIN_OFF_INPUT_PULLDOWN +#undef PIN_OFF_WAKEUPENABLE + +#endif + diff --git a/include/dt-bindings/pinctrl/am43xx.h b/include/dt-bindings/pinctrl/am43xx.h new file mode 100644 index 000000000..6ce4a32f7 --- /dev/null +++ b/include/dt-bindings/pinctrl/am43xx.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants specific to AM43XX pinctrl bindings. + */ + +#ifndef _DT_BINDINGS_PINCTRL_AM43XX_H +#define _DT_BINDINGS_PINCTRL_AM43XX_H + +#define MUX_MODE0 0 +#define MUX_MODE1 1 +#define MUX_MODE2 2 +#define MUX_MODE3 3 +#define MUX_MODE4 4 +#define MUX_MODE5 5 +#define MUX_MODE6 6 +#define MUX_MODE7 7 +#define MUX_MODE8 8 +#define MUX_MODE9 9 + +#define PULL_DISABLE (1 << 16) +#define PULL_UP (1 << 17) +#define INPUT_EN (1 << 18) +#define SLEWCTRL_SLOW (1 << 19) +#define SLEWCTRL_FAST 0 +#define DS0_FORCE_OFF_MODE (1 << 24) +#define DS0_INPUT (1 << 25) +#define DS0_FORCE_OUT_HIGH (1 << 26) +#define DS0_PULL_UP_DOWN_EN (0 << 27) +#define DS0_PULL_UP_DOWN_DIS (1 << 27) +#define DS0_PULL_UP_SEL (1 << 28) +#define WAKEUP_ENABLE (1 << 29) + +#define DS0_PIN_OUTPUT (DS0_FORCE_OFF_MODE) +#define DS0_PIN_OUTPUT_HIGH (DS0_FORCE_OFF_MODE | DS0_FORCE_OUT_HIGH) +#define DS0_PIN_OUTPUT_PULLUP (DS0_FORCE_OFF_MODE | DS0_PULL_UP_DOWN_EN | DS0_PULL_UP_SEL) +#define DS0_PIN_OUTPUT_PULLDOWN (DS0_FORCE_OFF_MODE | DS0_PULL_UP_DOWN_EN) +#define DS0_PIN_INPUT (DS0_FORCE_OFF_MODE | DS0_INPUT) +#define DS0_PIN_INPUT_PULLUP (DS0_FORCE_OFF_MODE | DS0_INPUT | DS0_PULL_UP_DOWN_EN | DS0_PULL_UP_SEL) +#define DS0_PIN_INPUT_PULLDOWN (DS0_FORCE_OFF_MODE | DS0_INPUT | DS0_PULL_UP_DOWN_EN) + +#define PIN_OUTPUT (PULL_DISABLE) +#define PIN_OUTPUT_PULLUP (PULL_UP) +#define PIN_OUTPUT_PULLDOWN 0 +#define PIN_INPUT (INPUT_EN | PULL_DISABLE) +#define PIN_INPUT_PULLUP (INPUT_EN | PULL_UP) +#define PIN_INPUT_PULLDOWN (INPUT_EN) + +/* + * Macro to allow using the absolute physical address instead of the + * padconf registers instead of the offset from padconf base. + */ +#define AM4372_IOPAD(pa, val) (((pa) & 0xffff) - 0x0800) (val) + +#endif + diff --git a/include/dt-bindings/pinctrl/at91.h b/include/dt-bindings/pinctrl/at91.h new file mode 100644 index 000000000..eb81867ea --- /dev/null +++ b/include/dt-bindings/pinctrl/at91.h @@ -0,0 +1,46 @@ +/* + * This header provides constants for most at91 pinctrl bindings. + * + * Copyright (C) 2013 Jean-Christophe PLAGNIOL-VILLARD + * + * GPLv2 only + */ + +#ifndef __DT_BINDINGS_AT91_PINCTRL_H__ +#define __DT_BINDINGS_AT91_PINCTRL_H__ + +#define AT91_PINCTRL_NONE (0 << 0) +#define AT91_PINCTRL_PULL_UP (1 << 0) +#define AT91_PINCTRL_MULTI_DRIVE (1 << 1) +#define AT91_PINCTRL_DEGLITCH (1 << 2) +#define AT91_PINCTRL_PULL_DOWN (1 << 3) +#define AT91_PINCTRL_DIS_SCHMIT (1 << 4) +#define AT91_PINCTRL_OUTPUT (1 << 7) +#define AT91_PINCTRL_OUTPUT_VAL(x) ((x & 0x1) << 8) +#define AT91_PINCTRL_DEBOUNCE (1 << 16) +#define AT91_PINCTRL_DEBOUNCE_VAL(x) (x << 17) + +#define AT91_PINCTRL_PULL_UP_DEGLITCH (AT91_PINCTRL_PULL_UP | AT91_PINCTRL_DEGLITCH) + +#define AT91_PINCTRL_DRIVE_STRENGTH_DEFAULT (0x0 << 5) +#define AT91_PINCTRL_DRIVE_STRENGTH_LOW (0x1 << 5) +#define AT91_PINCTRL_DRIVE_STRENGTH_MED (0x2 << 5) +#define AT91_PINCTRL_DRIVE_STRENGTH_HI (0x3 << 5) + +#define AT91_PIOA 0 +#define AT91_PIOB 1 +#define AT91_PIOC 2 +#define AT91_PIOD 3 +#define AT91_PIOE 4 + +#define AT91_PERIPH_GPIO 0 +#define AT91_PERIPH_A 1 +#define AT91_PERIPH_B 2 +#define AT91_PERIPH_C 3 +#define AT91_PERIPH_D 4 + +#define ATMEL_PIO_DRVSTR_LO 1 +#define ATMEL_PIO_DRVSTR_ME 2 +#define ATMEL_PIO_DRVSTR_HI 3 + +#endif /* __DT_BINDINGS_AT91_PINCTRL_H__ */ diff --git a/include/dt-bindings/pinctrl/bcm2835.h b/include/dt-bindings/pinctrl/bcm2835.h new file mode 100644 index 000000000..e4e4fdf5d --- /dev/null +++ b/include/dt-bindings/pinctrl/bcm2835.h @@ -0,0 +1,32 @@ +/* + * Header providing constants for bcm2835 pinctrl bindings. + * + * Copyright (C) 2015 Stefan Wahren + * + * The code contained herein is licensed under the GNU General Public + * License. You may obtain a copy of the GNU General Public License + * Version 2 at the following locations: + * + * http://www.opensource.org/licenses/gpl-license.html + * http://www.gnu.org/copyleft/gpl.html + */ + +#ifndef __DT_BINDINGS_PINCTRL_BCM2835_H__ +#define __DT_BINDINGS_PINCTRL_BCM2835_H__ + +/* brcm,function property */ +#define BCM2835_FSEL_GPIO_IN 0 +#define BCM2835_FSEL_GPIO_OUT 1 +#define BCM2835_FSEL_ALT5 2 +#define BCM2835_FSEL_ALT4 3 +#define BCM2835_FSEL_ALT0 4 +#define BCM2835_FSEL_ALT1 5 +#define BCM2835_FSEL_ALT2 6 +#define BCM2835_FSEL_ALT3 7 + +/* brcm,pull property */ +#define BCM2835_PUD_OFF 0 +#define BCM2835_PUD_DOWN 1 +#define BCM2835_PUD_UP 2 + +#endif /* __DT_BINDINGS_PINCTRL_BCM2835_H__ */ diff --git a/include/dt-bindings/pinctrl/brcm,pinctrl-stingray.h b/include/dt-bindings/pinctrl/brcm,pinctrl-stingray.h new file mode 100644 index 000000000..caa6c664b --- /dev/null +++ b/include/dt-bindings/pinctrl/brcm,pinctrl-stingray.h @@ -0,0 +1,68 @@ +/* + * BSD LICENSE + * + * Copyright(c) 2017 Broadcom Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __DT_BINDINGS_PINCTRL_BRCM_STINGRAY_H__ +#define __DT_BINDINGS_PINCTRL_BRCM_STINGRAY_H__ + +/* Alternate functions available in MUX controller */ +#define MODE_NITRO 0 +#define MODE_NAND 1 +#define MODE_PNOR 2 +#define MODE_GPIO 3 + +/* Pad configuration attribute */ +#define PAD_SLEW_RATE_ENA (1 << 0) +#define PAD_SLEW_RATE_ENA_MASK (1 << 0) + +#define PAD_DRIVE_STRENGTH_2_MA (0 << 1) +#define PAD_DRIVE_STRENGTH_4_MA (1 << 1) +#define PAD_DRIVE_STRENGTH_6_MA (2 << 1) +#define PAD_DRIVE_STRENGTH_8_MA (3 << 1) +#define PAD_DRIVE_STRENGTH_10_MA (4 << 1) +#define PAD_DRIVE_STRENGTH_12_MA (5 << 1) +#define PAD_DRIVE_STRENGTH_14_MA (6 << 1) +#define PAD_DRIVE_STRENGTH_16_MA (7 << 1) +#define PAD_DRIVE_STRENGTH_MASK (7 << 1) + +#define PAD_PULL_UP_ENA (1 << 4) +#define PAD_PULL_UP_ENA_MASK (1 << 4) + +#define PAD_PULL_DOWN_ENA (1 << 5) +#define PAD_PULL_DOWN_ENA_MASK (1 << 5) + +#define PAD_INPUT_PATH_DIS (1 << 6) +#define PAD_INPUT_PATH_DIS_MASK (1 << 6) + +#define PAD_HYSTERESIS_ENA (1 << 7) +#define PAD_HYSTERESIS_ENA_MASK (1 << 7) + +#endif diff --git a/include/dt-bindings/pinctrl/dm814x.h b/include/dt-bindings/pinctrl/dm814x.h new file mode 100644 index 000000000..afbabbc4d --- /dev/null +++ b/include/dt-bindings/pinctrl/dm814x.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants specific to DM814X pinctrl bindings. + */ + +#ifndef _DT_BINDINGS_PINCTRL_DM814X_H +#define _DT_BINDINGS_PINCTRL_DM814X_H + +#include + +#undef INPUT_EN +#undef PULL_UP +#undef PULL_ENA + +/* + * Note that dm814x silicon revision 2.1 and older require input enabled + * (bit 18 set) for all 3.3V I/Os to avoid cumulative hardware damage. For + * more info, see errata advisory 2.1.87. We leave bit 18 out of + * function-mask in dm814x.h and rely on the bootloader for it. + */ +#define INPUT_EN (1 << 18) +#define PULL_UP (1 << 17) +#define PULL_DISABLE (1 << 16) + +/* update macro depending on INPUT_EN and PULL_ENA */ +#undef PIN_OUTPUT +#undef PIN_OUTPUT_PULLUP +#undef PIN_OUTPUT_PULLDOWN +#undef PIN_INPUT +#undef PIN_INPUT_PULLUP +#undef PIN_INPUT_PULLDOWN + +#define PIN_OUTPUT (PULL_DISABLE) +#define PIN_OUTPUT_PULLUP (PULL_UP) +#define PIN_OUTPUT_PULLDOWN 0 +#define PIN_INPUT (INPUT_EN | PULL_DISABLE) +#define PIN_INPUT_PULLUP (INPUT_EN | PULL_UP) +#define PIN_INPUT_PULLDOWN (INPUT_EN) + +/* undef non-existing modes */ +#undef PIN_OFF_NONE +#undef PIN_OFF_OUTPUT_HIGH +#undef PIN_OFF_OUTPUT_LOW +#undef PIN_OFF_INPUT_PULLUP +#undef PIN_OFF_INPUT_PULLDOWN +#undef PIN_OFF_WAKEUPENABLE + +#endif + diff --git a/include/dt-bindings/pinctrl/dra.h b/include/dt-bindings/pinctrl/dra.h new file mode 100644 index 000000000..18ec5df5a --- /dev/null +++ b/include/dt-bindings/pinctrl/dra.h @@ -0,0 +1,80 @@ +/* + * This header provides constants for DRA pinctrl bindings. + * + * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ + * Author: Rajendra Nayak + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _DT_BINDINGS_PINCTRL_DRA_H +#define _DT_BINDINGS_PINCTRL_DRA_H + +/* DRA7 mux mode options for each pin. See TRM for options */ +#define MUX_MODE0 0x0 +#define MUX_MODE1 0x1 +#define MUX_MODE2 0x2 +#define MUX_MODE3 0x3 +#define MUX_MODE4 0x4 +#define MUX_MODE5 0x5 +#define MUX_MODE6 0x6 +#define MUX_MODE7 0x7 +#define MUX_MODE8 0x8 +#define MUX_MODE9 0x9 +#define MUX_MODE10 0xa +#define MUX_MODE11 0xb +#define MUX_MODE12 0xc +#define MUX_MODE13 0xd +#define MUX_MODE14 0xe +#define MUX_MODE15 0xf + +/* Certain pins need virtual mode, but note: they may glitch */ +#define MUX_VIRTUAL_MODE0 (MODE_SELECT | (0x0 << 4)) +#define MUX_VIRTUAL_MODE1 (MODE_SELECT | (0x1 << 4)) +#define MUX_VIRTUAL_MODE2 (MODE_SELECT | (0x2 << 4)) +#define MUX_VIRTUAL_MODE3 (MODE_SELECT | (0x3 << 4)) +#define MUX_VIRTUAL_MODE4 (MODE_SELECT | (0x4 << 4)) +#define MUX_VIRTUAL_MODE5 (MODE_SELECT | (0x5 << 4)) +#define MUX_VIRTUAL_MODE6 (MODE_SELECT | (0x6 << 4)) +#define MUX_VIRTUAL_MODE7 (MODE_SELECT | (0x7 << 4)) +#define MUX_VIRTUAL_MODE8 (MODE_SELECT | (0x8 << 4)) +#define MUX_VIRTUAL_MODE9 (MODE_SELECT | (0x9 << 4)) +#define MUX_VIRTUAL_MODE10 (MODE_SELECT | (0xa << 4)) +#define MUX_VIRTUAL_MODE11 (MODE_SELECT | (0xb << 4)) +#define MUX_VIRTUAL_MODE12 (MODE_SELECT | (0xc << 4)) +#define MUX_VIRTUAL_MODE13 (MODE_SELECT | (0xd << 4)) +#define MUX_VIRTUAL_MODE14 (MODE_SELECT | (0xe << 4)) +#define MUX_VIRTUAL_MODE15 (MODE_SELECT | (0xf << 4)) + +#define MODE_SELECT (1 << 8) + +#define PULL_ENA (0 << 16) +#define PULL_DIS (1 << 16) +#define PULL_UP (1 << 17) +#define INPUT_EN (1 << 18) +#define SLEWCONTROL (1 << 19) +#define WAKEUP_EN (1 << 24) +#define WAKEUP_EVENT (1 << 25) + +/* Active pin states */ +#define PIN_OUTPUT (0 | PULL_DIS) +#define PIN_OUTPUT_PULLUP (PULL_UP) +#define PIN_OUTPUT_PULLDOWN (0) +#define PIN_INPUT (INPUT_EN | PULL_DIS) +#define PIN_INPUT_SLEW (INPUT_EN | SLEWCONTROL) +#define PIN_INPUT_PULLUP (PULL_ENA | INPUT_EN | PULL_UP) +#define PIN_INPUT_PULLDOWN (PULL_ENA | INPUT_EN) + +/* + * Macro to allow using the absolute physical address instead of the + * padconf registers instead of the offset from padconf base. + */ +#define DRA7XX_CORE_IOPAD(pa, val) (((pa) & 0xffff) - 0x3400) (val) + +/* DRA7 IODELAY configuration parameters */ +#define A_DELAY_PS(val) ((val) & 0xffff) +#define G_DELAY_PS(val) ((val) & 0xffff) +#endif + diff --git a/include/dt-bindings/pinctrl/hisi.h b/include/dt-bindings/pinctrl/hisi.h new file mode 100644 index 000000000..0359bfdc9 --- /dev/null +++ b/include/dt-bindings/pinctrl/hisi.h @@ -0,0 +1,74 @@ +/* + * This header provides constants for hisilicon pinctrl bindings. + * + * Copyright (c) 2015 Hisilicon Limited. + * Copyright (c) 2015 Linaro Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_PINCTRL_HISI_H +#define _DT_BINDINGS_PINCTRL_HISI_H + +/* iomg bit definition */ +#define MUX_M0 0 +#define MUX_M1 1 +#define MUX_M2 2 +#define MUX_M3 3 +#define MUX_M4 4 +#define MUX_M5 5 +#define MUX_M6 6 +#define MUX_M7 7 + +/* iocg bit definition */ +#define PULL_MASK (3) +#define PULL_DIS (0) +#define PULL_UP (1 << 0) +#define PULL_DOWN (1 << 1) + +/* drive strength definition */ +#define DRIVE_MASK (7 << 4) +#define DRIVE1_02MA (0 << 4) +#define DRIVE1_04MA (1 << 4) +#define DRIVE1_08MA (2 << 4) +#define DRIVE1_10MA (3 << 4) +#define DRIVE2_02MA (0 << 4) +#define DRIVE2_04MA (1 << 4) +#define DRIVE2_08MA (2 << 4) +#define DRIVE2_10MA (3 << 4) +#define DRIVE3_04MA (0 << 4) +#define DRIVE3_08MA (1 << 4) +#define DRIVE3_12MA (2 << 4) +#define DRIVE3_16MA (3 << 4) +#define DRIVE3_20MA (4 << 4) +#define DRIVE3_24MA (5 << 4) +#define DRIVE3_32MA (6 << 4) +#define DRIVE3_40MA (7 << 4) +#define DRIVE4_02MA (0 << 4) +#define DRIVE4_04MA (2 << 4) +#define DRIVE4_08MA (4 << 4) +#define DRIVE4_10MA (6 << 4) + +/* drive strength definition for hi3660 */ +#define DRIVE6_MASK (15 << 4) +#define DRIVE6_04MA (0 << 4) +#define DRIVE6_12MA (4 << 4) +#define DRIVE6_19MA (8 << 4) +#define DRIVE6_27MA (10 << 4) +#define DRIVE6_32MA (15 << 4) +#define DRIVE7_02MA (0 << 4) +#define DRIVE7_04MA (1 << 4) +#define DRIVE7_06MA (2 << 4) +#define DRIVE7_08MA (3 << 4) +#define DRIVE7_10MA (4 << 4) +#define DRIVE7_12MA (5 << 4) +#define DRIVE7_14MA (6 << 4) +#define DRIVE7_16MA (7 << 4) +#endif diff --git a/include/dt-bindings/pinctrl/keystone.h b/include/dt-bindings/pinctrl/keystone.h new file mode 100644 index 000000000..7f97d776a --- /dev/null +++ b/include/dt-bindings/pinctrl/keystone.h @@ -0,0 +1,39 @@ +/* + * This header provides constants for Keystone pinctrl bindings. + * + * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_PINCTRL_KEYSTONE_H +#define _DT_BINDINGS_PINCTRL_KEYSTONE_H + +#define MUX_MODE0 0 +#define MUX_MODE1 1 +#define MUX_MODE2 2 +#define MUX_MODE3 3 +#define MUX_MODE4 4 +#define MUX_MODE5 5 + +#define BUFFER_CLASS_B (0 << 19) +#define BUFFER_CLASS_C (1 << 19) +#define BUFFER_CLASS_D (2 << 19) +#define BUFFER_CLASS_E (3 << 19) + +#define PULL_DISABLE (1 << 16) +#define PIN_PULLUP (1 << 17) +#define PIN_PULLDOWN (0 << 17) + +#define KEYSTONE_IOPAD_OFFSET(pa, offset) (((pa) & 0xffff) - (offset)) + +#define K2G_CORE_IOPAD(pa) KEYSTONE_IOPAD_OFFSET((pa), 0x1000) + +#endif diff --git a/include/dt-bindings/pinctrl/mt6397-pinfunc.h b/include/dt-bindings/pinctrl/mt6397-pinfunc.h new file mode 100644 index 000000000..f393fbd68 --- /dev/null +++ b/include/dt-bindings/pinctrl/mt6397-pinfunc.h @@ -0,0 +1,257 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DTS_MT6397_PINFUNC_H +#define __DTS_MT6397_PINFUNC_H + +#include + +#define MT6397_PIN_0_INT__FUNC_GPIO0 (MTK_PIN_NO(0) | 0) +#define MT6397_PIN_0_INT__FUNC_INT (MTK_PIN_NO(0) | 1) + +#define MT6397_PIN_1_SRCVOLTEN__FUNC_GPIO1 (MTK_PIN_NO(1) | 0) +#define MT6397_PIN_1_SRCVOLTEN__FUNC_SRCVOLTEN (MTK_PIN_NO(1) | 1) +#define MT6397_PIN_1_SRCVOLTEN__FUNC_TEST_CK1 (MTK_PIN_NO(1) | 6) + +#define MT6397_PIN_2_SRCLKEN_PERI__FUNC_GPIO2 (MTK_PIN_NO(2) | 0) +#define MT6397_PIN_2_SRCLKEN_PERI__FUNC_SRCLKEN_PERI (MTK_PIN_NO(2) | 1) +#define MT6397_PIN_2_SRCLKEN_PERI__FUNC_TEST_CK2 (MTK_PIN_NO(2) | 6) + +#define MT6397_PIN_3_RTC_32K1V8__FUNC_GPIO3 (MTK_PIN_NO(3) | 0) +#define MT6397_PIN_3_RTC_32K1V8__FUNC_RTC_32K1V8 (MTK_PIN_NO(3) | 1) +#define MT6397_PIN_3_RTC_32K1V8__FUNC_TEST_CK3 (MTK_PIN_NO(3) | 6) + +#define MT6397_PIN_4_WRAP_EVENT__FUNC_GPIO4 (MTK_PIN_NO(4) | 0) +#define MT6397_PIN_4_WRAP_EVENT__FUNC_WRAP_EVENT (MTK_PIN_NO(4) | 1) + +#define MT6397_PIN_5_SPI_CLK__FUNC_GPIO5 (MTK_PIN_NO(5) | 0) +#define MT6397_PIN_5_SPI_CLK__FUNC_SPI_CLK (MTK_PIN_NO(5) | 1) + +#define MT6397_PIN_6_SPI_CSN__FUNC_GPIO6 (MTK_PIN_NO(6) | 0) +#define MT6397_PIN_6_SPI_CSN__FUNC_SPI_CSN (MTK_PIN_NO(6) | 1) + +#define MT6397_PIN_7_SPI_MOSI__FUNC_GPIO7 (MTK_PIN_NO(7) | 0) +#define MT6397_PIN_7_SPI_MOSI__FUNC_SPI_MOSI (MTK_PIN_NO(7) | 1) + +#define MT6397_PIN_8_SPI_MISO__FUNC_GPIO8 (MTK_PIN_NO(8) | 0) +#define MT6397_PIN_8_SPI_MISO__FUNC_SPI_MISO (MTK_PIN_NO(8) | 1) + +#define MT6397_PIN_9_AUD_CLK_MOSI__FUNC_GPIO9 (MTK_PIN_NO(9) | 0) +#define MT6397_PIN_9_AUD_CLK_MOSI__FUNC_AUD_CLK (MTK_PIN_NO(9) | 1) +#define MT6397_PIN_9_AUD_CLK_MOSI__FUNC_TEST_IN0 (MTK_PIN_NO(9) | 6) +#define MT6397_PIN_9_AUD_CLK_MOSI__FUNC_TEST_OUT0 (MTK_PIN_NO(9) | 7) + +#define MT6397_PIN_10_AUD_DAT_MISO__FUNC_GPIO10 (MTK_PIN_NO(10) | 0) +#define MT6397_PIN_10_AUD_DAT_MISO__FUNC_AUD_MISO (MTK_PIN_NO(10) | 1) +#define MT6397_PIN_10_AUD_DAT_MISO__FUNC_TEST_IN1 (MTK_PIN_NO(10) | 6) +#define MT6397_PIN_10_AUD_DAT_MISO__FUNC_TEST_OUT1 (MTK_PIN_NO(10) | 7) + +#define MT6397_PIN_11_AUD_DAT_MOSI__FUNC_GPIO11 (MTK_PIN_NO(11) | 0) +#define MT6397_PIN_11_AUD_DAT_MOSI__FUNC_AUD_MOSI (MTK_PIN_NO(11) | 1) +#define MT6397_PIN_11_AUD_DAT_MOSI__FUNC_TEST_IN2 (MTK_PIN_NO(11) | 6) +#define MT6397_PIN_11_AUD_DAT_MOSI__FUNC_TEST_OUT2 (MTK_PIN_NO(11) | 7) + +#define MT6397_PIN_12_COL0__FUNC_GPIO12 (MTK_PIN_NO(12) | 0) +#define MT6397_PIN_12_COL0__FUNC_COL0_USBDL (MTK_PIN_NO(12) | 1) +#define MT6397_PIN_12_COL0__FUNC_EINT10_1X (MTK_PIN_NO(12) | 2) +#define MT6397_PIN_12_COL0__FUNC_PWM1_3X (MTK_PIN_NO(12) | 3) +#define MT6397_PIN_12_COL0__FUNC_TEST_IN3 (MTK_PIN_NO(12) | 6) +#define MT6397_PIN_12_COL0__FUNC_TEST_OUT3 (MTK_PIN_NO(12) | 7) + +#define MT6397_PIN_13_COL1__FUNC_GPIO13 (MTK_PIN_NO(13) | 0) +#define MT6397_PIN_13_COL1__FUNC_COL1 (MTK_PIN_NO(13) | 1) +#define MT6397_PIN_13_COL1__FUNC_EINT11_1X (MTK_PIN_NO(13) | 2) +#define MT6397_PIN_13_COL1__FUNC_SCL0_2X (MTK_PIN_NO(13) | 3) +#define MT6397_PIN_13_COL1__FUNC_TEST_IN4 (MTK_PIN_NO(13) | 6) +#define MT6397_PIN_13_COL1__FUNC_TEST_OUT4 (MTK_PIN_NO(13) | 7) + +#define MT6397_PIN_14_COL2__FUNC_GPIO14 (MTK_PIN_NO(14) | 0) +#define MT6397_PIN_14_COL2__FUNC_COL2 (MTK_PIN_NO(14) | 1) +#define MT6397_PIN_14_COL2__FUNC_EINT12_1X (MTK_PIN_NO(14) | 2) +#define MT6397_PIN_14_COL2__FUNC_SDA0_2X (MTK_PIN_NO(14) | 3) +#define MT6397_PIN_14_COL2__FUNC_TEST_IN5 (MTK_PIN_NO(14) | 6) +#define MT6397_PIN_14_COL2__FUNC_TEST_OUT5 (MTK_PIN_NO(14) | 7) + +#define MT6397_PIN_15_COL3__FUNC_GPIO15 (MTK_PIN_NO(15) | 0) +#define MT6397_PIN_15_COL3__FUNC_COL3 (MTK_PIN_NO(15) | 1) +#define MT6397_PIN_15_COL3__FUNC_EINT13_1X (MTK_PIN_NO(15) | 2) +#define MT6397_PIN_15_COL3__FUNC_SCL1_2X (MTK_PIN_NO(15) | 3) +#define MT6397_PIN_15_COL3__FUNC_TEST_IN6 (MTK_PIN_NO(15) | 6) +#define MT6397_PIN_15_COL3__FUNC_TEST_OUT6 (MTK_PIN_NO(15) | 7) + +#define MT6397_PIN_16_COL4__FUNC_GPIO16 (MTK_PIN_NO(16) | 0) +#define MT6397_PIN_16_COL4__FUNC_COL4 (MTK_PIN_NO(16) | 1) +#define MT6397_PIN_16_COL4__FUNC_EINT14_1X (MTK_PIN_NO(16) | 2) +#define MT6397_PIN_16_COL4__FUNC_SDA1_2X (MTK_PIN_NO(16) | 3) +#define MT6397_PIN_16_COL4__FUNC_TEST_IN7 (MTK_PIN_NO(16) | 6) +#define MT6397_PIN_16_COL4__FUNC_TEST_OUT7 (MTK_PIN_NO(16) | 7) + +#define MT6397_PIN_17_COL5__FUNC_GPIO17 (MTK_PIN_NO(17) | 0) +#define MT6397_PIN_17_COL5__FUNC_COL5 (MTK_PIN_NO(17) | 1) +#define MT6397_PIN_17_COL5__FUNC_EINT15_1X (MTK_PIN_NO(17) | 2) +#define MT6397_PIN_17_COL5__FUNC_SCL2_2X (MTK_PIN_NO(17) | 3) +#define MT6397_PIN_17_COL5__FUNC_TEST_IN8 (MTK_PIN_NO(17) | 6) +#define MT6397_PIN_17_COL5__FUNC_TEST_OUT8 (MTK_PIN_NO(17) | 7) + +#define MT6397_PIN_18_COL6__FUNC_GPIO18 (MTK_PIN_NO(18) | 0) +#define MT6397_PIN_18_COL6__FUNC_COL6 (MTK_PIN_NO(18) | 1) +#define MT6397_PIN_18_COL6__FUNC_EINT16_1X (MTK_PIN_NO(18) | 2) +#define MT6397_PIN_18_COL6__FUNC_SDA2_2X (MTK_PIN_NO(18) | 3) +#define MT6397_PIN_18_COL6__FUNC_GPIO32K_0 (MTK_PIN_NO(18) | 4) +#define MT6397_PIN_18_COL6__FUNC_GPIO26M_0 (MTK_PIN_NO(18) | 5) +#define MT6397_PIN_18_COL6__FUNC_TEST_IN9 (MTK_PIN_NO(18) | 6) +#define MT6397_PIN_18_COL6__FUNC_TEST_OUT9 (MTK_PIN_NO(18) | 7) + +#define MT6397_PIN_19_COL7__FUNC_GPIO19 (MTK_PIN_NO(19) | 0) +#define MT6397_PIN_19_COL7__FUNC_COL7 (MTK_PIN_NO(19) | 1) +#define MT6397_PIN_19_COL7__FUNC_EINT17_1X (MTK_PIN_NO(19) | 2) +#define MT6397_PIN_19_COL7__FUNC_PWM2_3X (MTK_PIN_NO(19) | 3) +#define MT6397_PIN_19_COL7__FUNC_GPIO32K_1 (MTK_PIN_NO(19) | 4) +#define MT6397_PIN_19_COL7__FUNC_GPIO26M_1 (MTK_PIN_NO(19) | 5) +#define MT6397_PIN_19_COL7__FUNC_TEST_IN10 (MTK_PIN_NO(19) | 6) +#define MT6397_PIN_19_COL7__FUNC_TEST_OUT10 (MTK_PIN_NO(19) | 7) + +#define MT6397_PIN_20_ROW0__FUNC_GPIO20 (MTK_PIN_NO(20) | 0) +#define MT6397_PIN_20_ROW0__FUNC_ROW0 (MTK_PIN_NO(20) | 1) +#define MT6397_PIN_20_ROW0__FUNC_EINT18_1X (MTK_PIN_NO(20) | 2) +#define MT6397_PIN_20_ROW0__FUNC_SCL0_3X (MTK_PIN_NO(20) | 3) +#define MT6397_PIN_20_ROW0__FUNC_TEST_IN11 (MTK_PIN_NO(20) | 6) +#define MT6397_PIN_20_ROW0__FUNC_TEST_OUT11 (MTK_PIN_NO(20) | 7) + +#define MT6397_PIN_21_ROW1__FUNC_GPIO21 (MTK_PIN_NO(21) | 0) +#define MT6397_PIN_21_ROW1__FUNC_ROW1 (MTK_PIN_NO(21) | 1) +#define MT6397_PIN_21_ROW1__FUNC_EINT19_1X (MTK_PIN_NO(21) | 2) +#define MT6397_PIN_21_ROW1__FUNC_SDA0_3X (MTK_PIN_NO(21) | 3) +#define MT6397_PIN_21_ROW1__FUNC_AUD_TSTCK (MTK_PIN_NO(21) | 4) +#define MT6397_PIN_21_ROW1__FUNC_TEST_IN12 (MTK_PIN_NO(21) | 6) +#define MT6397_PIN_21_ROW1__FUNC_TEST_OUT12 (MTK_PIN_NO(21) | 7) + +#define MT6397_PIN_22_ROW2__FUNC_GPIO22 (MTK_PIN_NO(22) | 0) +#define MT6397_PIN_22_ROW2__FUNC_ROW2 (MTK_PIN_NO(22) | 1) +#define MT6397_PIN_22_ROW2__FUNC_EINT20_1X (MTK_PIN_NO(22) | 2) +#define MT6397_PIN_22_ROW2__FUNC_SCL1_3X (MTK_PIN_NO(22) | 3) +#define MT6397_PIN_22_ROW2__FUNC_TEST_IN13 (MTK_PIN_NO(22) | 6) +#define MT6397_PIN_22_ROW2__FUNC_TEST_OUT13 (MTK_PIN_NO(22) | 7) + +#define MT6397_PIN_23_ROW3__FUNC_GPIO23 (MTK_PIN_NO(23) | 0) +#define MT6397_PIN_23_ROW3__FUNC_ROW3 (MTK_PIN_NO(23) | 1) +#define MT6397_PIN_23_ROW3__FUNC_EINT21_1X (MTK_PIN_NO(23) | 2) +#define MT6397_PIN_23_ROW3__FUNC_SDA1_3X (MTK_PIN_NO(23) | 3) +#define MT6397_PIN_23_ROW3__FUNC_TEST_IN14 (MTK_PIN_NO(23) | 6) +#define MT6397_PIN_23_ROW3__FUNC_TEST_OUT14 (MTK_PIN_NO(23) | 7) + +#define MT6397_PIN_24_ROW4__FUNC_GPIO24 (MTK_PIN_NO(24) | 0) +#define MT6397_PIN_24_ROW4__FUNC_ROW4 (MTK_PIN_NO(24) | 1) +#define MT6397_PIN_24_ROW4__FUNC_EINT22_1X (MTK_PIN_NO(24) | 2) +#define MT6397_PIN_24_ROW4__FUNC_SCL2_3X (MTK_PIN_NO(24) | 3) +#define MT6397_PIN_24_ROW4__FUNC_TEST_IN15 (MTK_PIN_NO(24) | 6) +#define MT6397_PIN_24_ROW4__FUNC_TEST_OUT15 (MTK_PIN_NO(24) | 7) + +#define MT6397_PIN_25_ROW5__FUNC_GPIO25 (MTK_PIN_NO(25) | 0) +#define MT6397_PIN_25_ROW5__FUNC_ROW5 (MTK_PIN_NO(25) | 1) +#define MT6397_PIN_25_ROW5__FUNC_EINT23_1X (MTK_PIN_NO(25) | 2) +#define MT6397_PIN_25_ROW5__FUNC_SDA2_3X (MTK_PIN_NO(25) | 3) +#define MT6397_PIN_25_ROW5__FUNC_TEST_IN16 (MTK_PIN_NO(25) | 6) +#define MT6397_PIN_25_ROW5__FUNC_TEST_OUT16 (MTK_PIN_NO(25) | 7) + +#define MT6397_PIN_26_ROW6__FUNC_GPIO26 (MTK_PIN_NO(26) | 0) +#define MT6397_PIN_26_ROW6__FUNC_ROW6 (MTK_PIN_NO(26) | 1) +#define MT6397_PIN_26_ROW6__FUNC_EINT24_1X (MTK_PIN_NO(26) | 2) +#define MT6397_PIN_26_ROW6__FUNC_PWM3_3X (MTK_PIN_NO(26) | 3) +#define MT6397_PIN_26_ROW6__FUNC_GPIO32K_2 (MTK_PIN_NO(26) | 4) +#define MT6397_PIN_26_ROW6__FUNC_GPIO26M_2 (MTK_PIN_NO(26) | 5) +#define MT6397_PIN_26_ROW6__FUNC_TEST_IN17 (MTK_PIN_NO(26) | 6) +#define MT6397_PIN_26_ROW6__FUNC_TEST_OUT17 (MTK_PIN_NO(26) | 7) + +#define MT6397_PIN_27_ROW7__FUNC_GPIO27 (MTK_PIN_NO(27) | 0) +#define MT6397_PIN_27_ROW7__FUNC_ROW7 (MTK_PIN_NO(27) | 1) +#define MT6397_PIN_27_ROW7__FUNC_EINT3_1X (MTK_PIN_NO(27) | 2) +#define MT6397_PIN_27_ROW7__FUNC_CBUS (MTK_PIN_NO(27) | 3) +#define MT6397_PIN_27_ROW7__FUNC_GPIO32K_3 (MTK_PIN_NO(27) | 4) +#define MT6397_PIN_27_ROW7__FUNC_GPIO26M_3 (MTK_PIN_NO(27) | 5) +#define MT6397_PIN_27_ROW7__FUNC_TEST_IN18 (MTK_PIN_NO(27) | 6) +#define MT6397_PIN_27_ROW7__FUNC_TEST_OUT18 (MTK_PIN_NO(27) | 7) + +#define MT6397_PIN_28_PWM1__FUNC_GPIO28 (MTK_PIN_NO(28) | 0) +#define MT6397_PIN_28_PWM1__FUNC_PWM1 (MTK_PIN_NO(28) | 1) +#define MT6397_PIN_28_PWM1__FUNC_EINT4_1X (MTK_PIN_NO(28) | 2) +#define MT6397_PIN_28_PWM1__FUNC_GPIO32K_4 (MTK_PIN_NO(28) | 4) +#define MT6397_PIN_28_PWM1__FUNC_GPIO26M_4 (MTK_PIN_NO(28) | 5) +#define MT6397_PIN_28_PWM1__FUNC_TEST_IN19 (MTK_PIN_NO(28) | 6) +#define MT6397_PIN_28_PWM1__FUNC_TEST_OUT19 (MTK_PIN_NO(28) | 7) + +#define MT6397_PIN_29_PWM2__FUNC_GPIO29 (MTK_PIN_NO(29) | 0) +#define MT6397_PIN_29_PWM2__FUNC_PWM2 (MTK_PIN_NO(29) | 1) +#define MT6397_PIN_29_PWM2__FUNC_EINT5_1X (MTK_PIN_NO(29) | 2) +#define MT6397_PIN_29_PWM2__FUNC_GPIO32K_5 (MTK_PIN_NO(29) | 4) +#define MT6397_PIN_29_PWM2__FUNC_GPIO26M_5 (MTK_PIN_NO(29) | 5) +#define MT6397_PIN_29_PWM2__FUNC_TEST_IN20 (MTK_PIN_NO(29) | 6) +#define MT6397_PIN_29_PWM2__FUNC_TEST_OUT20 (MTK_PIN_NO(29) | 7) + +#define MT6397_PIN_30_PWM3__FUNC_GPIO30 (MTK_PIN_NO(30) | 0) +#define MT6397_PIN_30_PWM3__FUNC_PWM3 (MTK_PIN_NO(30) | 1) +#define MT6397_PIN_30_PWM3__FUNC_EINT6_1X (MTK_PIN_NO(30) | 2) +#define MT6397_PIN_30_PWM3__FUNC_COL0 (MTK_PIN_NO(30) | 3) +#define MT6397_PIN_30_PWM3__FUNC_GPIO32K_6 (MTK_PIN_NO(30) | 4) +#define MT6397_PIN_30_PWM3__FUNC_GPIO26M_6 (MTK_PIN_NO(30) | 5) +#define MT6397_PIN_30_PWM3__FUNC_TEST_IN21 (MTK_PIN_NO(30) | 6) +#define MT6397_PIN_30_PWM3__FUNC_TEST_OUT21 (MTK_PIN_NO(30) | 7) + +#define MT6397_PIN_31_SCL0__FUNC_GPIO31 (MTK_PIN_NO(31) | 0) +#define MT6397_PIN_31_SCL0__FUNC_SCL0 (MTK_PIN_NO(31) | 1) +#define MT6397_PIN_31_SCL0__FUNC_EINT7_1X (MTK_PIN_NO(31) | 2) +#define MT6397_PIN_31_SCL0__FUNC_PWM1_2X (MTK_PIN_NO(31) | 3) +#define MT6397_PIN_31_SCL0__FUNC_TEST_IN22 (MTK_PIN_NO(31) | 6) +#define MT6397_PIN_31_SCL0__FUNC_TEST_OUT22 (MTK_PIN_NO(31) | 7) + +#define MT6397_PIN_32_SDA0__FUNC_GPIO32 (MTK_PIN_NO(32) | 0) +#define MT6397_PIN_32_SDA0__FUNC_SDA0 (MTK_PIN_NO(32) | 1) +#define MT6397_PIN_32_SDA0__FUNC_EINT8_1X (MTK_PIN_NO(32) | 2) +#define MT6397_PIN_32_SDA0__FUNC_TEST_IN23 (MTK_PIN_NO(32) | 6) +#define MT6397_PIN_32_SDA0__FUNC_TEST_OUT23 (MTK_PIN_NO(32) | 7) + +#define MT6397_PIN_33_SCL1__FUNC_GPIO33 (MTK_PIN_NO(33) | 0) +#define MT6397_PIN_33_SCL1__FUNC_SCL1 (MTK_PIN_NO(33) | 1) +#define MT6397_PIN_33_SCL1__FUNC_EINT9_1X (MTK_PIN_NO(33) | 2) +#define MT6397_PIN_33_SCL1__FUNC_PWM2_2X (MTK_PIN_NO(33) | 3) +#define MT6397_PIN_33_SCL1__FUNC_TEST_IN24 (MTK_PIN_NO(33) | 6) +#define MT6397_PIN_33_SCL1__FUNC_TEST_OUT24 (MTK_PIN_NO(33) | 7) + +#define MT6397_PIN_34_SDA1__FUNC_GPIO34 (MTK_PIN_NO(34) | 0) +#define MT6397_PIN_34_SDA1__FUNC_SDA1 (MTK_PIN_NO(34) | 1) +#define MT6397_PIN_34_SDA1__FUNC_EINT0_1X (MTK_PIN_NO(34) | 2) +#define MT6397_PIN_34_SDA1__FUNC_TEST_IN25 (MTK_PIN_NO(34) | 6) +#define MT6397_PIN_34_SDA1__FUNC_TEST_OUT25 (MTK_PIN_NO(34) | 7) + +#define MT6397_PIN_35_SCL2__FUNC_GPIO35 (MTK_PIN_NO(35) | 0) +#define MT6397_PIN_35_SCL2__FUNC_SCL2 (MTK_PIN_NO(35) | 1) +#define MT6397_PIN_35_SCL2__FUNC_EINT1_1X (MTK_PIN_NO(35) | 2) +#define MT6397_PIN_35_SCL2__FUNC_PWM3_2X (MTK_PIN_NO(35) | 3) +#define MT6397_PIN_35_SCL2__FUNC_TEST_IN26 (MTK_PIN_NO(35) | 6) +#define MT6397_PIN_35_SCL2__FUNC_TEST_OUT26 (MTK_PIN_NO(35) | 7) + +#define MT6397_PIN_36_SDA2__FUNC_GPIO36 (MTK_PIN_NO(36) | 0) +#define MT6397_PIN_36_SDA2__FUNC_SDA2 (MTK_PIN_NO(36) | 1) +#define MT6397_PIN_36_SDA2__FUNC_EINT2_1X (MTK_PIN_NO(36) | 2) +#define MT6397_PIN_36_SDA2__FUNC_TEST_IN27 (MTK_PIN_NO(36) | 6) +#define MT6397_PIN_36_SDA2__FUNC_TEST_OUT27 (MTK_PIN_NO(36) | 7) + +#define MT6397_PIN_37_HDMISD__FUNC_GPIO37 (MTK_PIN_NO(37) | 0) +#define MT6397_PIN_37_HDMISD__FUNC_HDMISD (MTK_PIN_NO(37) | 1) +#define MT6397_PIN_37_HDMISD__FUNC_TEST_IN28 (MTK_PIN_NO(37) | 6) +#define MT6397_PIN_37_HDMISD__FUNC_TEST_OUT28 (MTK_PIN_NO(37) | 7) + +#define MT6397_PIN_38_HDMISCK__FUNC_GPIO38 (MTK_PIN_NO(38) | 0) +#define MT6397_PIN_38_HDMISCK__FUNC_HDMISCK (MTK_PIN_NO(38) | 1) +#define MT6397_PIN_38_HDMISCK__FUNC_TEST_IN29 (MTK_PIN_NO(38) | 6) +#define MT6397_PIN_38_HDMISCK__FUNC_TEST_OUT29 (MTK_PIN_NO(38) | 7) + +#define MT6397_PIN_39_HTPLG__FUNC_GPIO39 (MTK_PIN_NO(39) | 0) +#define MT6397_PIN_39_HTPLG__FUNC_HTPLG (MTK_PIN_NO(39) | 1) +#define MT6397_PIN_39_HTPLG__FUNC_TEST_IN30 (MTK_PIN_NO(39) | 6) +#define MT6397_PIN_39_HTPLG__FUNC_TEST_OUT30 (MTK_PIN_NO(39) | 7) + +#define MT6397_PIN_40_CEC__FUNC_GPIO40 (MTK_PIN_NO(40) | 0) +#define MT6397_PIN_40_CEC__FUNC_CEC (MTK_PIN_NO(40) | 1) +#define MT6397_PIN_40_CEC__FUNC_TEST_IN31 (MTK_PIN_NO(40) | 6) +#define MT6397_PIN_40_CEC__FUNC_TEST_OUT31 (MTK_PIN_NO(40) | 7) + +#endif /* __DTS_MT6397_PINFUNC_H */ diff --git a/include/dt-bindings/pinctrl/mt65xx.h b/include/dt-bindings/pinctrl/mt65xx.h new file mode 100644 index 000000000..1198f4541 --- /dev/null +++ b/include/dt-bindings/pinctrl/mt65xx.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Hongzhou.Yang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_PINCTRL_MT65XX_H +#define _DT_BINDINGS_PINCTRL_MT65XX_H + +#define MTK_PIN_NO(x) ((x) << 8) +#define MTK_GET_PIN_NO(x) ((x) >> 8) +#define MTK_GET_PIN_FUNC(x) ((x) & 0xf) + +#define MTK_PUPD_SET_R1R0_00 100 +#define MTK_PUPD_SET_R1R0_01 101 +#define MTK_PUPD_SET_R1R0_10 102 +#define MTK_PUPD_SET_R1R0_11 103 + +#define MTK_DRIVE_2mA 2 +#define MTK_DRIVE_4mA 4 +#define MTK_DRIVE_6mA 6 +#define MTK_DRIVE_8mA 8 +#define MTK_DRIVE_10mA 10 +#define MTK_DRIVE_12mA 12 +#define MTK_DRIVE_14mA 14 +#define MTK_DRIVE_16mA 16 +#define MTK_DRIVE_20mA 20 +#define MTK_DRIVE_24mA 24 +#define MTK_DRIVE_28mA 28 +#define MTK_DRIVE_32mA 32 + +#endif /* _DT_BINDINGS_PINCTRL_MT65XX_H */ diff --git a/include/dt-bindings/pinctrl/mt7623-pinfunc.h b/include/dt-bindings/pinctrl/mt7623-pinfunc.h new file mode 100644 index 000000000..604fe781c --- /dev/null +++ b/include/dt-bindings/pinctrl/mt7623-pinfunc.h @@ -0,0 +1,651 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DTS_MT7623_PINFUNC_H +#define __DTS_MT7623_PINFUNC_H + +#include + +#define MT7623_PIN_0_PWRAP_SPI0_MI_FUNC_GPIO0 (MTK_PIN_NO(0) | 0) +#define MT7623_PIN_0_PWRAP_SPI0_MI_FUNC_PWRAP_SPIDO (MTK_PIN_NO(0) | 1) +#define MT7623_PIN_0_PWRAP_SPI0_MI_FUNC_PWRAP_SPIDI (MTK_PIN_NO(0) | 2) + +#define MT7623_PIN_1_PWRAP_SPI0_MO_FUNC_GPIO1 (MTK_PIN_NO(1) | 0) +#define MT7623_PIN_1_PWRAP_SPI0_MO_FUNC_PWRAP_SPIDI (MTK_PIN_NO(1) | 1) +#define MT7623_PIN_1_PWRAP_SPI0_MO_FUNC_PWRAP_SPIDO (MTK_PIN_NO(1) | 2) + +#define MT7623_PIN_2_PWRAP_INT_FUNC_GPIO2 (MTK_PIN_NO(2) | 0) +#define MT7623_PIN_2_PWRAP_INT_FUNC_PWRAP_INT (MTK_PIN_NO(2) | 1) + +#define MT7623_PIN_3_PWRAP_SPI0_CK_FUNC_GPIO3 (MTK_PIN_NO(3) | 0) +#define MT7623_PIN_3_PWRAP_SPI0_CK_FUNC_PWRAP_SPICK_I (MTK_PIN_NO(3) | 1) + +#define MT7623_PIN_4_PWRAP_SPI0_CSN_FUNC_GPIO4 (MTK_PIN_NO(4) | 0) +#define MT7623_PIN_4_PWRAP_SPI0_CSN_FUNC_PWRAP_SPICS_B_I (MTK_PIN_NO(4) | 1) + +#define MT7623_PIN_5_PWRAP_SPI0_CK2_FUNC_GPIO5 (MTK_PIN_NO(5) | 0) +#define MT7623_PIN_5_PWRAP_SPI0_CK2_FUNC_PWRAP_SPICK2_I (MTK_PIN_NO(5) | 1) +#define MT7623_PIN_5_PWRAP_SPI0_CK2_FUNC_ANT_SEL1 (MTK_PIN_NO(5) | 5) + +#define MT7623_PIN_6_PWRAP_SPI0_CSN2_FUNC_GPIO6 (MTK_PIN_NO(6) | 0) +#define MT7623_PIN_6_PWRAP_SPI0_CSN2_FUNC_PWRAP_SPICS2_B_I (MTK_PIN_NO(6) | 1) +#define MT7623_PIN_6_PWRAP_SPI0_CSN2_FUNC_ANT_SEL0 (MTK_PIN_NO(6) | 5) + +#define MT7623_PIN_7_SPI1_CSN_FUNC_GPIO7 (MTK_PIN_NO(7) | 0) +#define MT7623_PIN_7_SPI1_CSN_FUNC_SPI1_CS (MTK_PIN_NO(7) | 1) +#define MT7623_PIN_7_SPI1_CSN_FUNC_KCOL0 (MTK_PIN_NO(7) | 4) + +#define MT7623_PIN_8_SPI1_MI_FUNC_GPIO8 (MTK_PIN_NO(8) | 0) +#define MT7623_PIN_8_SPI1_MI_FUNC_SPI1_MI (MTK_PIN_NO(8) | 1) +#define MT7623_PIN_8_SPI1_MI_FUNC_SPI1_MO (MTK_PIN_NO(8) | 2) +#define MT7623_PIN_8_SPI1_MI_FUNC_KCOL1 (MTK_PIN_NO(8) | 4) + +#define MT7623_PIN_9_SPI1_MO_FUNC_GPIO9 (MTK_PIN_NO(9) | 0) +#define MT7623_PIN_9_SPI1_MO_FUNC_SPI1_MO (MTK_PIN_NO(9) | 1) +#define MT7623_PIN_9_SPI1_MO_FUNC_SPI1_MI (MTK_PIN_NO(9) | 2) +#define MT7623_PIN_9_SPI1_MO_FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(9) | 3) +#define MT7623_PIN_9_SPI1_MO_FUNC_KCOL2 (MTK_PIN_NO(9) | 4) + +#define MT7623_PIN_10_RTC32K_CK_FUNC_GPIO10 (MTK_PIN_NO(10) | 0) +#define MT7623_PIN_10_RTC32K_CK_FUNC_RTC32K_CK (MTK_PIN_NO(10) | 1) + +#define MT7623_PIN_11_WATCHDOG_FUNC_GPIO11 (MTK_PIN_NO(11) | 0) +#define MT7623_PIN_11_WATCHDOG_FUNC_WATCHDOG (MTK_PIN_NO(11) | 1) + +#define MT7623_PIN_12_SRCLKENA_FUNC_GPIO12 (MTK_PIN_NO(12) | 0) +#define MT7623_PIN_12_SRCLKENA_FUNC_SRCLKENA (MTK_PIN_NO(12) | 1) + +#define MT7623_PIN_13_SRCLKENAI_FUNC_GPIO13 (MTK_PIN_NO(13) | 0) +#define MT7623_PIN_13_SRCLKENAI_FUNC_SRCLKENAI (MTK_PIN_NO(13) | 1) + +#define MT7623_PIN_14_GPIO14_FUNC_GPIO14 (MTK_PIN_NO(14) | 0) +#define MT7623_PIN_14_GPIO14_FUNC_URXD2 (MTK_PIN_NO(14) | 1) +#define MT7623_PIN_14_GPIO14_FUNC_UTXD2 (MTK_PIN_NO(14) | 2) +#define MT7623_PIN_14_GPIO14_FUNC_SRCCLKENAI2 (MTK_PIN_NO(14) | 5) + +#define MT7623_PIN_15_GPIO15_FUNC_GPIO15 (MTK_PIN_NO(15) | 0) +#define MT7623_PIN_15_GPIO15_FUNC_UTXD2 (MTK_PIN_NO(15) | 1) +#define MT7623_PIN_15_GPIO15_FUNC_URXD2 (MTK_PIN_NO(15) | 2) + +#define MT7623_PIN_18_PCM_CLK_FUNC_GPIO18 (MTK_PIN_NO(18) | 0) +#define MT7623_PIN_18_PCM_CLK_FUNC_PCM_CLK0 (MTK_PIN_NO(18) | 1) +#define MT7623_PIN_18_PCM_CLK_FUNC_MRG_CLK (MTK_PIN_NO(18) | 2) +#define MT7623_PIN_18_PCM_CLK_FUNC_MM_TEST_CK (MTK_PIN_NO(18) | 4) +#define MT7623_PIN_18_PCM_CLK_FUNC_CONN_DSP_JCK (MTK_PIN_NO(18) | 5) +#define MT7623_PIN_18_PCM_CLK_FUNC_AP_PCM_CLKO (MTK_PIN_NO(18) | 6) + +#define MT7623_PIN_19_PCM_SYNC_FUNC_GPIO19 (MTK_PIN_NO(19) | 0) +#define MT7623_PIN_19_PCM_SYNC_FUNC_PCM_SYNC (MTK_PIN_NO(19) | 1) +#define MT7623_PIN_19_PCM_SYNC_FUNC_MRG_SYNC (MTK_PIN_NO(19) | 2) +#define MT7623_PIN_19_PCM_SYNC_FUNC_CONN_DSP_JINTP (MTK_PIN_NO(19) | 5) +#define MT7623_PIN_19_PCM_SYNC_FUNC_AP_PCM_SYNC (MTK_PIN_NO(19) | 6) + +#define MT7623_PIN_20_PCM_RX_FUNC_GPIO20 (MTK_PIN_NO(20) | 0) +#define MT7623_PIN_20_PCM_RX_FUNC_PCM_RX (MTK_PIN_NO(20) | 1) +#define MT7623_PIN_20_PCM_RX_FUNC_MRG_RX (MTK_PIN_NO(20) | 2) +#define MT7623_PIN_20_PCM_RX_FUNC_MRG_TX (MTK_PIN_NO(20) | 3) +#define MT7623_PIN_20_PCM_RX_FUNC_PCM_TX (MTK_PIN_NO(20) | 4) +#define MT7623_PIN_20_PCM_RX_FUNC_CONN_DSP_JDI (MTK_PIN_NO(20) | 5) +#define MT7623_PIN_20_PCM_RX_FUNC_AP_PCM_RX (MTK_PIN_NO(20) | 6) + +#define MT7623_PIN_21_PCM_TX_FUNC_GPIO21 (MTK_PIN_NO(21) | 0) +#define MT7623_PIN_21_PCM_TX_FUNC_PCM_TX (MTK_PIN_NO(21) | 1) +#define MT7623_PIN_21_PCM_TX_FUNC_MRG_TX (MTK_PIN_NO(21) | 2) +#define MT7623_PIN_21_PCM_TX_FUNC_MRG_RX (MTK_PIN_NO(21) | 3) +#define MT7623_PIN_21_PCM_TX_FUNC_PCM_RX (MTK_PIN_NO(21) | 4) +#define MT7623_PIN_21_PCM_TX_FUNC_CONN_DSP_JMS (MTK_PIN_NO(21) | 5) +#define MT7623_PIN_21_PCM_TX_FUNC_AP_PCM_TX (MTK_PIN_NO(21) | 6) + +#define MT7623_PIN_22_EINT0_FUNC_GPIO22 (MTK_PIN_NO(22) | 0) +#define MT7623_PIN_22_EINT0_FUNC_UCTS0 (MTK_PIN_NO(22) | 1) +#define MT7623_PIN_22_EINT0_FUNC_PCIE0_PERST_N (MTK_PIN_NO(22) | 2) +#define MT7623_PIN_22_EINT0_FUNC_KCOL3 (MTK_PIN_NO(22) | 3) +#define MT7623_PIN_22_EINT0_FUNC_CONN_DSP_JDO (MTK_PIN_NO(22) | 4) +#define MT7623_PIN_22_EINT0_FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(22) | 5) + +#define MT7623_PIN_23_EINT1_FUNC_GPIO23 (MTK_PIN_NO(23) | 0) +#define MT7623_PIN_23_EINT1_FUNC_URTS0 (MTK_PIN_NO(23) | 1) +#define MT7623_PIN_23_EINT1_FUNC_PCIE1_PERST_N (MTK_PIN_NO(23) | 2) +#define MT7623_PIN_23_EINT1_FUNC_KCOL2 (MTK_PIN_NO(23) | 3) +#define MT7623_PIN_23_EINT1_FUNC_CONN_MCU_TDO (MTK_PIN_NO(23) | 4) +#define MT7623_PIN_23_EINT1_FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(23) | 5) + +#define MT7623_PIN_24_EINT2_FUNC_GPIO24 (MTK_PIN_NO(24) | 0) +#define MT7623_PIN_24_EINT2_FUNC_UCTS1 (MTK_PIN_NO(24) | 1) +#define MT7623_PIN_24_EINT2_FUNC_PCIE2_PERST_N (MTK_PIN_NO(24) | 2) +#define MT7623_PIN_24_EINT2_FUNC_KCOL1 (MTK_PIN_NO(24) | 3) +#define MT7623_PIN_24_EINT2_FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(24) | 4) + +#define MT7623_PIN_25_EINT3_FUNC_GPIO25 (MTK_PIN_NO(25) | 0) +#define MT7623_PIN_25_EINT3_FUNC_URTS1 (MTK_PIN_NO(25) | 1) +#define MT7623_PIN_25_EINT3_FUNC_KCOL0 (MTK_PIN_NO(25) | 3) +#define MT7623_PIN_25_EINT3_FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(25) | 4) + +#define MT7623_PIN_26_EINT4_FUNC_GPIO26 (MTK_PIN_NO(26) | 0) +#define MT7623_PIN_26_EINT4_FUNC_UCTS3 (MTK_PIN_NO(26) | 1) +#define MT7623_PIN_26_EINT4_FUNC_DRV_VBUS_P1 (MTK_PIN_NO(26) | 2) +#define MT7623_PIN_26_EINT4_FUNC_KROW3 (MTK_PIN_NO(26) | 3) +#define MT7623_PIN_26_EINT4_FUNC_CONN_MCU_TCK0 (MTK_PIN_NO(26) | 4) +#define MT7623_PIN_26_EINT4_FUNC_CONN_MCU_AICE_JCKC (MTK_PIN_NO(26) | 5) +#define MT7623_PIN_26_EINT4_FUNC_PCIE2_WAKE_N (MTK_PIN_NO(26) | 6) + +#define MT7623_PIN_27_EINT5_FUNC_GPIO27 (MTK_PIN_NO(27) | 0) +#define MT7623_PIN_27_EINT5_FUNC_URTS3 (MTK_PIN_NO(27) | 1) +#define MT7623_PIN_27_EINT5_FUNC_IDDIG_P1 (MTK_PIN_NO(27) | 2) +#define MT7623_PIN_27_EINT5_FUNC_KROW2 (MTK_PIN_NO(27) | 3) +#define MT7623_PIN_27_EINT5_FUNC_CONN_MCU_TDI (MTK_PIN_NO(27) | 4) +#define MT7623_PIN_27_EINT5_FUNC_PCIE1_WAKE_N (MTK_PIN_NO(27) | 6) + +#define MT7623_PIN_28_EINT6_FUNC_GPIO28 (MTK_PIN_NO(28) | 0) +#define MT7623_PIN_28_EINT6_FUNC_DRV_VBUS (MTK_PIN_NO(28) | 1) +#define MT7623_PIN_28_EINT6_FUNC_KROW1 (MTK_PIN_NO(28) | 3) +#define MT7623_PIN_28_EINT6_FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(28) | 4) +#define MT7623_PIN_28_EINT6_FUNC_PCIE0_WAKE_N (MTK_PIN_NO(28) | 6) + +#define MT7623_PIN_29_EINT7_FUNC_GPIO29 (MTK_PIN_NO(29) | 0) +#define MT7623_PIN_29_EINT7_FUNC_IDDIG (MTK_PIN_NO(29) | 1) +#define MT7623_PIN_29_EINT7_FUNC_MSDC1_WP (MTK_PIN_NO(29) | 2) +#define MT7623_PIN_29_EINT7_FUNC_KROW0 (MTK_PIN_NO(29) | 3) +#define MT7623_PIN_29_EINT7_FUNC_CONN_MCU_TMS (MTK_PIN_NO(29) | 4) +#define MT7623_PIN_29_EINT7_FUNC_CONN_MCU_AICE_JMSC (MTK_PIN_NO(29) | 5) +#define MT7623_PIN_29_EINT7_FUNC_PCIE2_PERST_N (MTK_PIN_NO(29) | 6) + +#define MT7623_PIN_33_I2S1_DATA_FUNC_GPIO33 (MTK_PIN_NO(33) | 0) +#define MT7623_PIN_33_I2S1_DATA_FUNC_I2S1_DATA (MTK_PIN_NO(33) | 1) +#define MT7623_PIN_33_I2S1_DATA_FUNC_I2S1_DATA_BYPS (MTK_PIN_NO(33) | 2) +#define MT7623_PIN_33_I2S1_DATA_FUNC_PCM_TX (MTK_PIN_NO(33) | 3) +#define MT7623_PIN_33_I2S1_DATA_FUNC_IMG_TEST_CK (MTK_PIN_NO(33) | 4) +#define MT7623_PIN_33_I2S1_DATA_FUNC_G1_RXD0 (MTK_PIN_NO(33) | 5) +#define MT7623_PIN_33_I2S1_DATA_FUNC_AP_PCM_TX (MTK_PIN_NO(33) | 6) + +#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_GPIO34 (MTK_PIN_NO(34) | 0) +#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_I2S1_DATA_IN (MTK_PIN_NO(34) | 1) +#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_PCM_RX (MTK_PIN_NO(34) | 3) +#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_VDEC_TEST_CK (MTK_PIN_NO(34) | 4) +#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_G1_RXD1 (MTK_PIN_NO(34) | 5) +#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_AP_PCM_RX (MTK_PIN_NO(34) | 6) + +#define MT7623_PIN_35_I2S1_BCK_FUNC_GPIO35 (MTK_PIN_NO(35) | 0) +#define MT7623_PIN_35_I2S1_BCK_FUNC_I2S1_BCK (MTK_PIN_NO(35) | 1) +#define MT7623_PIN_35_I2S1_BCK_FUNC_PCM_CLK0 (MTK_PIN_NO(35) | 3) +#define MT7623_PIN_35_I2S1_BCK_FUNC_G1_RXD2 (MTK_PIN_NO(35) | 5) +#define MT7623_PIN_35_I2S1_BCK_FUNC_AP_PCM_CLKO (MTK_PIN_NO(35) | 6) + +#define MT7623_PIN_36_I2S1_LRCK_FUNC_GPIO36 (MTK_PIN_NO(36) | 0) +#define MT7623_PIN_36_I2S1_LRCK_FUNC_I2S1_LRCK (MTK_PIN_NO(36) | 1) +#define MT7623_PIN_36_I2S1_LRCK_FUNC_PCM_SYNC (MTK_PIN_NO(36) | 3) +#define MT7623_PIN_36_I2S1_LRCK_FUNC_G1_RXD3 (MTK_PIN_NO(36) | 5) +#define MT7623_PIN_36_I2S1_LRCK_FUNC_AP_PCM_SYNC (MTK_PIN_NO(36) | 6) + +#define MT7623_PIN_37_I2S1_MCLK_FUNC_GPIO37 (MTK_PIN_NO(37) | 0) +#define MT7623_PIN_37_I2S1_MCLK_FUNC_I2S1_MCLK (MTK_PIN_NO(37) | 1) +#define MT7623_PIN_37_I2S1_MCLK_FUNC_G1_RXDV (MTK_PIN_NO(37) | 5) + +#define MT7623_PIN_39_JTMS_FUNC_GPIO39 (MTK_PIN_NO(39) | 0) +#define MT7623_PIN_39_JTMS_FUNC_JTMS (MTK_PIN_NO(39) | 1) +#define MT7623_PIN_39_JTMS_FUNC_CONN_MCU_TMS (MTK_PIN_NO(39) | 2) +#define MT7623_PIN_39_JTMS_FUNC_CONN_MCU_AICE_JMSC (MTK_PIN_NO(39) | 3) +#define MT7623_PIN_39_JTMS_FUNC_DFD_TMS_XI (MTK_PIN_NO(39) | 4) + +#define MT7623_PIN_40_JTCK_FUNC_GPIO40 (MTK_PIN_NO(40) | 0) +#define MT7623_PIN_40_JTCK_FUNC_JTCK (MTK_PIN_NO(40) | 1) +#define MT7623_PIN_40_JTCK_FUNC_CONN_MCU_TCK1 (MTK_PIN_NO(40) | 2) +#define MT7623_PIN_40_JTCK_FUNC_CONN_MCU_AICE_JCKC (MTK_PIN_NO(40) | 3) +#define MT7623_PIN_40_JTCK_FUNC_DFD_TCK_XI (MTK_PIN_NO(40) | 4) + +#define MT7623_PIN_41_JTDI_FUNC_GPIO41 (MTK_PIN_NO(41) | 0) +#define MT7623_PIN_41_JTDI_FUNC_JTDI (MTK_PIN_NO(41) | 1) +#define MT7623_PIN_41_JTDI_FUNC_CONN_MCU_TDI (MTK_PIN_NO(41) | 2) +#define MT7623_PIN_41_JTDI_FUNC_DFD_TDI_XI (MTK_PIN_NO(41) | 4) + +#define MT7623_PIN_42_JTDO_FUNC_GPIO42 (MTK_PIN_NO(42) | 0) +#define MT7623_PIN_42_JTDO_FUNC_JTDO (MTK_PIN_NO(42) | 1) +#define MT7623_PIN_42_JTDO_FUNC_CONN_MCU_TDO (MTK_PIN_NO(42) | 2) +#define MT7623_PIN_42_JTDO_FUNC_DFD_TDO (MTK_PIN_NO(42) | 4) + +#define MT7623_PIN_43_NCLE_FUNC_GPIO43 (MTK_PIN_NO(43) | 0) +#define MT7623_PIN_43_NCLE_FUNC_NCLE (MTK_PIN_NO(43) | 1) +#define MT7623_PIN_43_NCLE_FUNC_EXT_XCS2 (MTK_PIN_NO(43) | 2) + +#define MT7623_PIN_44_NCEB1_FUNC_GPIO44 (MTK_PIN_NO(44) | 0) +#define MT7623_PIN_44_NCEB1_FUNC_NCEB1 (MTK_PIN_NO(44) | 1) +#define MT7623_PIN_44_NCEB1_FUNC_IDDIG (MTK_PIN_NO(44) | 2) + +#define MT7623_PIN_45_NCEB0_FUNC_GPIO45 (MTK_PIN_NO(45) | 0) +#define MT7623_PIN_45_NCEB0_FUNC_NCEB0 (MTK_PIN_NO(45) | 1) +#define MT7623_PIN_45_NCEB0_FUNC_DRV_VBUS (MTK_PIN_NO(45) | 2) + +#define MT7623_PIN_46_IR_FUNC_GPIO46 (MTK_PIN_NO(46) | 0) +#define MT7623_PIN_46_IR_FUNC_IR (MTK_PIN_NO(46) | 1) + +#define MT7623_PIN_47_NREB_FUNC_GPIO47 (MTK_PIN_NO(47) | 0) +#define MT7623_PIN_47_NREB_FUNC_NREB (MTK_PIN_NO(47) | 1) +#define MT7623_PIN_47_NREB_FUNC_IDDIG_P1 (MTK_PIN_NO(47) | 2) + +#define MT7623_PIN_48_NRNB_FUNC_GPIO48 (MTK_PIN_NO(48) | 0) +#define MT7623_PIN_48_NRNB_FUNC_NRNB (MTK_PIN_NO(48) | 1) +#define MT7623_PIN_48_NRNB_FUNC_DRV_VBUS_P1 (MTK_PIN_NO(48) | 2) + +#define MT7623_PIN_49_I2S0_DATA_FUNC_GPIO49 (MTK_PIN_NO(49) | 0) +#define MT7623_PIN_49_I2S0_DATA_FUNC_I2S0_DATA (MTK_PIN_NO(49) | 1) +#define MT7623_PIN_49_I2S0_DATA_FUNC_I2S0_DATA_BYPS (MTK_PIN_NO(49) | 2) +#define MT7623_PIN_49_I2S0_DATA_FUNC_PCM_TX (MTK_PIN_NO(49) | 3) +#define MT7623_PIN_49_I2S0_DATA_FUNC_AP_I2S_DO (MTK_PIN_NO(49) | 6) + +#define MT7623_PIN_53_SPI0_CSN_FUNC_GPIO53 (MTK_PIN_NO(53) | 0) +#define MT7623_PIN_53_SPI0_CSN_FUNC_SPI0_CS (MTK_PIN_NO(53) | 1) +#define MT7623_PIN_53_SPI0_CSN_FUNC_SPDIF (MTK_PIN_NO(53) | 3) +#define MT7623_PIN_53_SPI0_CSN_FUNC_ADC_CK (MTK_PIN_NO(53) | 4) +#define MT7623_PIN_53_SPI0_CSN_FUNC_PWM1 (MTK_PIN_NO(53) | 5) + +#define MT7623_PIN_54_SPI0_CK_FUNC_GPIO54 (MTK_PIN_NO(54) | 0) +#define MT7623_PIN_54_SPI0_CK_FUNC_SPI0_CK (MTK_PIN_NO(54) | 1) +#define MT7623_PIN_54_SPI0_CK_FUNC_SPDIF_IN1 (MTK_PIN_NO(54) | 3) +#define MT7623_PIN_54_SPI0_CK_FUNC_ADC_DAT_IN (MTK_PIN_NO(54) | 4) + +#define MT7623_PIN_55_SPI0_MI_FUNC_GPIO55 (MTK_PIN_NO(55) | 0) +#define MT7623_PIN_55_SPI0_MI_FUNC_SPI0_MI (MTK_PIN_NO(55) | 1) +#define MT7623_PIN_55_SPI0_MI_FUNC_SPI0_MO (MTK_PIN_NO(55) | 2) +#define MT7623_PIN_55_SPI0_MI_FUNC_MSDC1_WP (MTK_PIN_NO(55) | 3) +#define MT7623_PIN_55_SPI0_MI_FUNC_ADC_WS (MTK_PIN_NO(55) | 4) +#define MT7623_PIN_55_SPI0_MI_FUNC_PWM2 (MTK_PIN_NO(55) | 5) + +#define MT7623_PIN_56_SPI0_MO_FUNC_GPIO56 (MTK_PIN_NO(56) | 0) +#define MT7623_PIN_56_SPI0_MO_FUNC_SPI0_MO (MTK_PIN_NO(56) | 1) +#define MT7623_PIN_56_SPI0_MO_FUNC_SPI0_MI (MTK_PIN_NO(56) | 2) +#define MT7623_PIN_56_SPI0_MO_FUNC_SPDIF_IN0 (MTK_PIN_NO(56) | 3) + +#define MT7623_PIN_57_SDA1_FUNC_GPIO57 (MTK_PIN_NO(57) | 0) +#define MT7623_PIN_57_SDA1_FUNC_SDA1 (MTK_PIN_NO(57) | 1) + +#define MT7623_PIN_58_SCL1_FUNC_GPIO58 (MTK_PIN_NO(58) | 0) +#define MT7623_PIN_58_SCL1_FUNC_SCL1 (MTK_PIN_NO(58) | 1) + +#define MT7623_PIN_60_WB_RSTB_FUNC_GPIO60 (MTK_PIN_NO(60) | 0) +#define MT7623_PIN_60_WB_RSTB_FUNC_WB_RSTB (MTK_PIN_NO(60) | 1) + +#define MT7623_PIN_61_GPIO61_FUNC_GPIO61 (MTK_PIN_NO(61) | 0) +#define MT7623_PIN_61_GPIO61_FUNC_TEST_FD (MTK_PIN_NO(61) | 1) + +#define MT7623_PIN_62_GPIO62_FUNC_GPIO62 (MTK_PIN_NO(62) | 0) +#define MT7623_PIN_62_GPIO62_FUNC_TEST_FC (MTK_PIN_NO(62) | 1) + +#define MT7623_PIN_63_WB_SCLK_FUNC_GPIO63 (MTK_PIN_NO(63) | 0) +#define MT7623_PIN_63_WB_SCLK_FUNC_WB_SCLK (MTK_PIN_NO(63) | 1) + +#define MT7623_PIN_64_WB_SDATA_FUNC_GPIO64 (MTK_PIN_NO(64) | 0) +#define MT7623_PIN_64_WB_SDATA_FUNC_WB_SDATA (MTK_PIN_NO(64) | 1) + +#define MT7623_PIN_65_WB_SEN_FUNC_GPIO65 (MTK_PIN_NO(65) | 0) +#define MT7623_PIN_65_WB_SEN_FUNC_WB_SEN (MTK_PIN_NO(65) | 1) + +#define MT7623_PIN_66_WB_CRTL0_FUNC_GPIO66 (MTK_PIN_NO(66) | 0) +#define MT7623_PIN_66_WB_CRTL0_FUNC_WB_CRTL0 (MTK_PIN_NO(66) | 1) + +#define MT7623_PIN_67_WB_CRTL1_FUNC_GPIO67 (MTK_PIN_NO(67) | 0) +#define MT7623_PIN_67_WB_CRTL1_FUNC_WB_CRTL1 (MTK_PIN_NO(67) | 1) + +#define MT7623_PIN_68_WB_CRTL2_FUNC_GPIO68 (MTK_PIN_NO(68) | 0) +#define MT7623_PIN_68_WB_CRTL2_FUNC_WB_CRTL2 (MTK_PIN_NO(68) | 1) + +#define MT7623_PIN_69_WB_CRTL3_FUNC_GPIO69 (MTK_PIN_NO(69) | 0) +#define MT7623_PIN_69_WB_CRTL3_FUNC_WB_CRTL3 (MTK_PIN_NO(69) | 1) + +#define MT7623_PIN_70_WB_CRTL4_FUNC_GPIO70 (MTK_PIN_NO(70) | 0) +#define MT7623_PIN_70_WB_CRTL4_FUNC_WB_CRTL4 (MTK_PIN_NO(70) | 1) + +#define MT7623_PIN_71_WB_CRTL5_FUNC_GPIO71 (MTK_PIN_NO(71) | 0) +#define MT7623_PIN_71_WB_CRTL5_FUNC_WB_CRTL5 (MTK_PIN_NO(71) | 1) + +#define MT7623_PIN_72_I2S0_DATA_IN_FUNC_GPIO72 (MTK_PIN_NO(72) | 0) +#define MT7623_PIN_72_I2S0_DATA_IN_FUNC_I2S0_DATA_IN (MTK_PIN_NO(72) | 1) +#define MT7623_PIN_72_I2S0_DATA_IN_FUNC_PCM_RX (MTK_PIN_NO(72) | 3) +#define MT7623_PIN_72_I2S0_DATA_IN_FUNC_PWM0 (MTK_PIN_NO(72) | 4) +#define MT7623_PIN_72_I2S0_DATA_IN_FUNC_DISP_PWM (MTK_PIN_NO(72) | 5) +#define MT7623_PIN_72_I2S0_DATA_IN_FUNC_AP_I2S_DI (MTK_PIN_NO(72) | 6) + +#define MT7623_PIN_73_I2S0_LRCK_FUNC_GPIO73 (MTK_PIN_NO(73) | 0) +#define MT7623_PIN_73_I2S0_LRCK_FUNC_I2S0_LRCK (MTK_PIN_NO(73) | 1) +#define MT7623_PIN_73_I2S0_LRCK_FUNC_PCM_SYNC (MTK_PIN_NO(73) | 3) +#define MT7623_PIN_73_I2S0_LRCK_FUNC_AP_I2S_LRCK (MTK_PIN_NO(73) | 6) + +#define MT7623_PIN_74_I2S0_BCK_FUNC_GPIO74 (MTK_PIN_NO(74) | 0) +#define MT7623_PIN_74_I2S0_BCK_FUNC_I2S0_BCK (MTK_PIN_NO(74) | 1) +#define MT7623_PIN_74_I2S0_BCK_FUNC_PCM_CLK0 (MTK_PIN_NO(74) | 3) +#define MT7623_PIN_74_I2S0_BCK_FUNC_AP_I2S_BCK (MTK_PIN_NO(74) | 6) + +#define MT7623_PIN_75_SDA0_FUNC_GPIO75 (MTK_PIN_NO(75) | 0) +#define MT7623_PIN_75_SDA0_FUNC_SDA0 (MTK_PIN_NO(75) | 1) + +#define MT7623_PIN_76_SCL0_FUNC_GPIO76 (MTK_PIN_NO(76) | 0) +#define MT7623_PIN_76_SCL0_FUNC_SCL0 (MTK_PIN_NO(76) | 1) + +#define MT7623_PIN_77_SDA2_FUNC_GPIO77 (MTK_PIN_NO(77) | 0) +#define MT7623_PIN_77_SDA2_FUNC_SDA2 (MTK_PIN_NO(77) | 1) + +#define MT7623_PIN_78_SCL2_FUNC_GPIO78 (MTK_PIN_NO(78) | 0) +#define MT7623_PIN_78_SCL2_FUNC_SCL2 (MTK_PIN_NO(78) | 1) + +#define MT7623_PIN_79_URXD0_FUNC_GPIO79 (MTK_PIN_NO(79) | 0) +#define MT7623_PIN_79_URXD0_FUNC_URXD0 (MTK_PIN_NO(79) | 1) +#define MT7623_PIN_79_URXD0_FUNC_UTXD0 (MTK_PIN_NO(79) | 2) + +#define MT7623_PIN_80_UTXD0_FUNC_GPIO80 (MTK_PIN_NO(80) | 0) +#define MT7623_PIN_80_UTXD0_FUNC_UTXD0 (MTK_PIN_NO(80) | 1) +#define MT7623_PIN_80_UTXD0_FUNC_URXD0 (MTK_PIN_NO(80) | 2) + +#define MT7623_PIN_81_URXD1_FUNC_GPIO81 (MTK_PIN_NO(81) | 0) +#define MT7623_PIN_81_URXD1_FUNC_URXD1 (MTK_PIN_NO(81) | 1) +#define MT7623_PIN_81_URXD1_FUNC_UTXD1 (MTK_PIN_NO(81) | 2) + +#define MT7623_PIN_82_UTXD1_FUNC_GPIO82 (MTK_PIN_NO(82) | 0) +#define MT7623_PIN_82_UTXD1_FUNC_UTXD1 (MTK_PIN_NO(82) | 1) +#define MT7623_PIN_82_UTXD1_FUNC_URXD1 (MTK_PIN_NO(82) | 2) + +#define MT7623_PIN_83_LCM_RST_FUNC_GPIO83 (MTK_PIN_NO(83) | 0) +#define MT7623_PIN_83_LCM_RST_FUNC_LCM_RST (MTK_PIN_NO(83) | 1) +#define MT7623_PIN_83_LCM_RST_FUNC_VDAC_CK_XI (MTK_PIN_NO(83) | 2) + +#define MT7623_PIN_84_DSI_TE_FUNC_GPIO84 (MTK_PIN_NO(84) | 0) +#define MT7623_PIN_84_DSI_TE_FUNC_DSI_TE (MTK_PIN_NO(84) | 1) + +#define MT7623_PIN_91_MIPI_TDN3_FUNC_GPIO91 (MTK_PIN_NO(91) | 0) +#define MT7623_PIN_91_MIPI_TDN3_FUNC_TDN3 (MTK_PIN_NO(91) | 1) + +#define MT7623_PIN_92_MIPI_TDP3_FUNC_GPIO92 (MTK_PIN_NO(92) | 0) +#define MT7623_PIN_92_MIPI_TDP3_FUNC_TDP3 (MTK_PIN_NO(92) | 1) + +#define MT7623_PIN_93_MIPI_TDN2_FUNC_GPIO93 (MTK_PIN_NO(93) | 0) +#define MT7623_PIN_93_MIPI_TDN2_FUNC_TDN2 (MTK_PIN_NO(93) | 1) + +#define MT7623_PIN_94_MIPI_TDP2_FUNC_GPIO94 (MTK_PIN_NO(94) | 0) +#define MT7623_PIN_94_MIPI_TDP2_FUNC_TDP2 (MTK_PIN_NO(94) | 1) + +#define MT7623_PIN_95_MIPI_TCN_FUNC_GPIO95 (MTK_PIN_NO(95) | 0) +#define MT7623_PIN_95_MIPI_TCN_FUNC_TCN (MTK_PIN_NO(95) | 1) + +#define MT7623_PIN_96_MIPI_TCP_FUNC_GPIO96 (MTK_PIN_NO(96) | 0) +#define MT7623_PIN_96_MIPI_TCP_FUNC_TCP (MTK_PIN_NO(96) | 1) + +#define MT7623_PIN_97_MIPI_TDN1_FUNC_GPIO97 (MTK_PIN_NO(97) | 0) +#define MT7623_PIN_97_MIPI_TDN1_FUNC_TDN1 (MTK_PIN_NO(97) | 1) + +#define MT7623_PIN_98_MIPI_TDP1_FUNC_GPIO98 (MTK_PIN_NO(98) | 0) +#define MT7623_PIN_98_MIPI_TDP1_FUNC_TDP1 (MTK_PIN_NO(98) | 1) + +#define MT7623_PIN_99_MIPI_TDN0_FUNC_GPIO99 (MTK_PIN_NO(99) | 0) +#define MT7623_PIN_99_MIPI_TDN0_FUNC_TDN0 (MTK_PIN_NO(99) | 1) + +#define MT7623_PIN_100_MIPI_TDP0_FUNC_GPIO100 (MTK_PIN_NO(100) | 0) +#define MT7623_PIN_100_MIPI_TDP0_FUNC_TDP0 (MTK_PIN_NO(100) | 1) + +#define MT7623_PIN_101_SPI2_CSN_FUNC_GPIO101 (MTK_PIN_NO(101) | 0) +#define MT7623_PIN_101_SPI2_CSN_FUNC_SPI2_CS (MTK_PIN_NO(101) | 1) +#define MT7623_PIN_101_SPI2_CSN_FUNC_SCL3 (MTK_PIN_NO(101) | 3) +#define MT7623_PIN_101_SPI2_CSN_FUNC_KROW0 (MTK_PIN_NO(101) | 4) + +#define MT7623_PIN_102_SPI2_MI_FUNC_GPIO102 (MTK_PIN_NO(102) | 0) +#define MT7623_PIN_102_SPI2_MI_FUNC_SPI2_MI (MTK_PIN_NO(102) | 1) +#define MT7623_PIN_102_SPI2_MI_FUNC_SPI2_MO (MTK_PIN_NO(102) | 2) +#define MT7623_PIN_102_SPI2_MI_FUNC_SDA3 (MTK_PIN_NO(102) | 3) +#define MT7623_PIN_102_SPI2_MI_FUNC_KROW1 (MTK_PIN_NO(102) | 4) + +#define MT7623_PIN_103_SPI2_MO_FUNC_GPIO103 (MTK_PIN_NO(103) | 0) +#define MT7623_PIN_103_SPI2_MO_FUNC_SPI2_MO (MTK_PIN_NO(103) | 1) +#define MT7623_PIN_103_SPI2_MO_FUNC_SPI2_MI (MTK_PIN_NO(103) | 2) +#define MT7623_PIN_103_SPI2_MO_FUNC_SCL3 (MTK_PIN_NO(103) | 3) +#define MT7623_PIN_103_SPI2_MO_FUNC_KROW2 (MTK_PIN_NO(103) | 4) + +#define MT7623_PIN_104_SPI2_CK_FUNC_GPIO104 (MTK_PIN_NO(104) | 0) +#define MT7623_PIN_104_SPI2_CK_FUNC_SPI2_CK (MTK_PIN_NO(104) | 1) +#define MT7623_PIN_104_SPI2_CK_FUNC_SDA3 (MTK_PIN_NO(104) | 3) +#define MT7623_PIN_104_SPI2_CK_FUNC_KROW3 (MTK_PIN_NO(104) | 4) + +#define MT7623_PIN_105_MSDC1_CMD_FUNC_GPIO105 (MTK_PIN_NO(105) | 0) +#define MT7623_PIN_105_MSDC1_CMD_FUNC_MSDC1_CMD (MTK_PIN_NO(105) | 1) +#define MT7623_PIN_105_MSDC1_CMD_FUNC_SDA1 (MTK_PIN_NO(105) | 3) +#define MT7623_PIN_105_MSDC1_CMD_FUNC_I2SOUT_BCK (MTK_PIN_NO(105) | 6) + +#define MT7623_PIN_106_MSDC1_CLK_FUNC_GPIO106 (MTK_PIN_NO(106) | 0) +#define MT7623_PIN_106_MSDC1_CLK_FUNC_MSDC1_CLK (MTK_PIN_NO(106) | 1) +#define MT7623_PIN_106_MSDC1_CLK_FUNC_SCL1 (MTK_PIN_NO(106) | 3) +#define MT7623_PIN_106_MSDC1_CLK_FUNC_I2SOUT_LRCK (MTK_PIN_NO(106) | 6) + +#define MT7623_PIN_107_MSDC1_DAT0_FUNC_GPIO107 (MTK_PIN_NO(107) | 0) +#define MT7623_PIN_107_MSDC1_DAT0_FUNC_MSDC1_DAT0 (MTK_PIN_NO(107) | 1) +#define MT7623_PIN_107_MSDC1_DAT0_FUNC_UTXD0 (MTK_PIN_NO(107) | 5) +#define MT7623_PIN_107_MSDC1_DAT0_FUNC_I2SOUT_DATA_OUT (MTK_PIN_NO(107) | 6) + +#define MT7623_PIN_108_MSDC1_DAT1_FUNC_GPIO108 (MTK_PIN_NO(108) | 0) +#define MT7623_PIN_108_MSDC1_DAT1_FUNC_MSDC1_DAT1 (MTK_PIN_NO(108) | 1) +#define MT7623_PIN_108_MSDC1_DAT1_FUNC_PWM0 (MTK_PIN_NO(108) | 3) +#define MT7623_PIN_108_MSDC1_DAT1_FUNC_URXD0 (MTK_PIN_NO(108) | 5) +#define MT7623_PIN_108_MSDC1_DAT1_FUNC_PWM1 (MTK_PIN_NO(108) | 6) + +#define MT7623_PIN_109_MSDC1_DAT2_FUNC_GPIO109 (MTK_PIN_NO(109) | 0) +#define MT7623_PIN_109_MSDC1_DAT2_FUNC_MSDC1_DAT2 (MTK_PIN_NO(109) | 1) +#define MT7623_PIN_109_MSDC1_DAT2_FUNC_SDA2 (MTK_PIN_NO(109) | 3) +#define MT7623_PIN_109_MSDC1_DAT2_FUNC_UTXD1 (MTK_PIN_NO(109) | 5) +#define MT7623_PIN_109_MSDC1_DAT2_FUNC_PWM2 (MTK_PIN_NO(109) | 6) + +#define MT7623_PIN_110_MSDC1_DAT3_FUNC_GPIO110 (MTK_PIN_NO(110) | 0) +#define MT7623_PIN_110_MSDC1_DAT3_FUNC_MSDC1_DAT3 (MTK_PIN_NO(110) | 1) +#define MT7623_PIN_110_MSDC1_DAT3_FUNC_SCL2 (MTK_PIN_NO(110) | 3) +#define MT7623_PIN_110_MSDC1_DAT3_FUNC_URXD1 (MTK_PIN_NO(110) | 5) +#define MT7623_PIN_110_MSDC1_DAT3_FUNC_PWM3 (MTK_PIN_NO(110) | 6) + +#define MT7623_PIN_111_MSDC0_DAT7_FUNC_GPIO111 (MTK_PIN_NO(111) | 0) +#define MT7623_PIN_111_MSDC0_DAT7_FUNC_MSDC0_DAT7 (MTK_PIN_NO(111) | 1) +#define MT7623_PIN_111_MSDC0_DAT7_FUNC_NLD7 (MTK_PIN_NO(111) | 4) + +#define MT7623_PIN_112_MSDC0_DAT6_FUNC_GPIO112 (MTK_PIN_NO(112) | 0) +#define MT7623_PIN_112_MSDC0_DAT6_FUNC_MSDC0_DAT6 (MTK_PIN_NO(112) | 1) +#define MT7623_PIN_112_MSDC0_DAT6_FUNC_NLD6 (MTK_PIN_NO(112) | 4) + +#define MT7623_PIN_113_MSDC0_DAT5_FUNC_GPIO113 (MTK_PIN_NO(113) | 0) +#define MT7623_PIN_113_MSDC0_DAT5_FUNC_MSDC0_DAT5 (MTK_PIN_NO(113) | 1) +#define MT7623_PIN_113_MSDC0_DAT5_FUNC_NLD5 (MTK_PIN_NO(113) | 4) + +#define MT7623_PIN_114_MSDC0_DAT4_FUNC_GPIO114 (MTK_PIN_NO(114) | 0) +#define MT7623_PIN_114_MSDC0_DAT4_FUNC_MSDC0_DAT4 (MTK_PIN_NO(114) | 1) +#define MT7623_PIN_114_MSDC0_DAT4_FUNC_NLD4 (MTK_PIN_NO(114) | 4) + +#define MT7623_PIN_115_MSDC0_RSTB_FUNC_GPIO115 (MTK_PIN_NO(115) | 0) +#define MT7623_PIN_115_MSDC0_RSTB_FUNC_MSDC0_RSTB (MTK_PIN_NO(115) | 1) +#define MT7623_PIN_115_MSDC0_RSTB_FUNC_NLD8 (MTK_PIN_NO(115) | 4) + +#define MT7623_PIN_116_MSDC0_CMD_FUNC_GPIO116 (MTK_PIN_NO(116) | 0) +#define MT7623_PIN_116_MSDC0_CMD_FUNC_MSDC0_CMD (MTK_PIN_NO(116) | 1) +#define MT7623_PIN_116_MSDC0_CMD_FUNC_NALE (MTK_PIN_NO(116) | 4) + +#define MT7623_PIN_117_MSDC0_CLK_FUNC_GPIO117 (MTK_PIN_NO(117) | 0) +#define MT7623_PIN_117_MSDC0_CLK_FUNC_MSDC0_CLK (MTK_PIN_NO(117) | 1) +#define MT7623_PIN_117_MSDC0_CLK_FUNC_NWEB (MTK_PIN_NO(117) | 4) + +#define MT7623_PIN_118_MSDC0_DAT3_FUNC_GPIO118 (MTK_PIN_NO(118) | 0) +#define MT7623_PIN_118_MSDC0_DAT3_FUNC_MSDC0_DAT3 (MTK_PIN_NO(118) | 1) +#define MT7623_PIN_118_MSDC0_DAT3_FUNC_NLD3 (MTK_PIN_NO(118) | 4) + +#define MT7623_PIN_119_MSDC0_DAT2_FUNC_GPIO119 (MTK_PIN_NO(119) | 0) +#define MT7623_PIN_119_MSDC0_DAT2_FUNC_MSDC0_DAT2 (MTK_PIN_NO(119) | 1) +#define MT7623_PIN_119_MSDC0_DAT2_FUNC_NLD2 (MTK_PIN_NO(119) | 4) + +#define MT7623_PIN_120_MSDC0_DAT1_FUNC_GPIO120 (MTK_PIN_NO(120) | 0) +#define MT7623_PIN_120_MSDC0_DAT1_FUNC_MSDC0_DAT1 (MTK_PIN_NO(120) | 1) +#define MT7623_PIN_120_MSDC0_DAT1_FUNC_NLD1 (MTK_PIN_NO(120) | 4) + +#define MT7623_PIN_121_MSDC0_DAT0_FUNC_GPIO121 (MTK_PIN_NO(121) | 0) +#define MT7623_PIN_121_MSDC0_DAT0_FUNC_MSDC0_DAT0 (MTK_PIN_NO(121) | 1) +#define MT7623_PIN_121_MSDC0_DAT0_FUNC_NLD0 (MTK_PIN_NO(121) | 4) +#define MT7623_PIN_121_MSDC0_DAT0_FUNC_WATCHDOG (MTK_PIN_NO(121) | 5) + +#define MT7623_PIN_122_GPIO122_FUNC_GPIO122 (MTK_PIN_NO(122) | 0) +#define MT7623_PIN_122_GPIO122_FUNC_CEC (MTK_PIN_NO(122) | 1) +#define MT7623_PIN_122_GPIO122_FUNC_SDA2 (MTK_PIN_NO(122) | 4) +#define MT7623_PIN_122_GPIO122_FUNC_URXD0 (MTK_PIN_NO(122) | 5) + +#define MT7623_PIN_123_HTPLG_FUNC_GPIO123 (MTK_PIN_NO(123) | 0) +#define MT7623_PIN_123_HTPLG_FUNC_HTPLG (MTK_PIN_NO(123) | 1) +#define MT7623_PIN_123_HTPLG_FUNC_SCL2 (MTK_PIN_NO(123) | 4) +#define MT7623_PIN_123_HTPLG_FUNC_UTXD0 (MTK_PIN_NO(123) | 5) + +#define MT7623_PIN_124_GPIO124_FUNC_GPIO124 (MTK_PIN_NO(124) | 0) +#define MT7623_PIN_124_GPIO124_FUNC_HDMISCK (MTK_PIN_NO(124) | 1) +#define MT7623_PIN_124_GPIO124_FUNC_SDA1 (MTK_PIN_NO(124) | 4) +#define MT7623_PIN_124_GPIO124_FUNC_PWM3 (MTK_PIN_NO(124) | 5) + +#define MT7623_PIN_125_GPIO125_FUNC_GPIO125 (MTK_PIN_NO(125) | 0) +#define MT7623_PIN_125_GPIO125_FUNC_HDMISD (MTK_PIN_NO(125) | 1) +#define MT7623_PIN_125_GPIO125_FUNC_SCL1 (MTK_PIN_NO(125) | 4) +#define MT7623_PIN_125_GPIO125_FUNC_PWM4 (MTK_PIN_NO(125) | 5) + +#define MT7623_PIN_126_I2S0_MCLK_FUNC_GPIO126 (MTK_PIN_NO(126) | 0) +#define MT7623_PIN_126_I2S0_MCLK_FUNC_I2S0_MCLK (MTK_PIN_NO(126) | 1) +#define MT7623_PIN_126_I2S0_MCLK_FUNC_AP_I2S_MCLK (MTK_PIN_NO(126) | 6) + +#define MT7623_PIN_199_SPI1_CK_FUNC_GPIO199 (MTK_PIN_NO(199) | 0) +#define MT7623_PIN_199_SPI1_CK_FUNC_SPI1_CK (MTK_PIN_NO(199) | 1) + +#define MT7623_PIN_200_URXD2_FUNC_GPIO200 (MTK_PIN_NO(200) | 0) +#define MT7623_PIN_200_URXD2_FUNC_URXD2 (MTK_PIN_NO(200) | 6) + +#define MT7623_PIN_201_UTXD2_FUNC_GPIO201 (MTK_PIN_NO(201) | 0) +#define MT7623_PIN_201_UTXD2_FUNC_UTXD2 (MTK_PIN_NO(201) | 6) + +#define MT7623_PIN_203_PWM0_FUNC_GPIO203 (MTK_PIN_NO(203) | 0) +#define MT7623_PIN_203_PWM0_FUNC_PWM0 (MTK_PIN_NO(203) | 1) +#define MT7623_PIN_203_PWM0_FUNC_DISP_PWM (MTK_PIN_NO(203) | 2) + +#define MT7623_PIN_204_PWM1_FUNC_GPIO204 (MTK_PIN_NO(204) | 0) +#define MT7623_PIN_204_PWM1_FUNC_PWM1 (MTK_PIN_NO(204) | 1) + +#define MT7623_PIN_205_PWM2_FUNC_GPIO205 (MTK_PIN_NO(205) | 0) +#define MT7623_PIN_205_PWM2_FUNC_PWM2 (MTK_PIN_NO(205) | 1) + +#define MT7623_PIN_206_PWM3_FUNC_GPIO206 (MTK_PIN_NO(206) | 0) +#define MT7623_PIN_206_PWM3_FUNC_PWM3 (MTK_PIN_NO(206) | 1) + +#define MT7623_PIN_207_PWM4_FUNC_GPIO207 (MTK_PIN_NO(207) | 0) +#define MT7623_PIN_207_PWM4_FUNC_PWM4 (MTK_PIN_NO(207) | 1) + +#define MT7623_PIN_208_AUD_EXT_CK1_FUNC_GPIO208 (MTK_PIN_NO(208) | 0) +#define MT7623_PIN_208_AUD_EXT_CK1_FUNC_AUD_EXT_CK1 (MTK_PIN_NO(208) | 1) +#define MT7623_PIN_208_AUD_EXT_CK1_FUNC_PWM0 (MTK_PIN_NO(208) | 2) +#define MT7623_PIN_208_AUD_EXT_CK1_FUNC_PCIE0_PERST_N (MTK_PIN_NO(208) | 3) +#define MT7623_PIN_208_AUD_EXT_CK1_FUNC_DISP_PWM (MTK_PIN_NO(208) | 5) + +#define MT7623_PIN_209_AUD_EXT_CK2_FUNC_GPIO209 (MTK_PIN_NO(209) | 0) +#define MT7623_PIN_209_AUD_EXT_CK2_FUNC_AUD_EXT_CK2 (MTK_PIN_NO(209) | 1) +#define MT7623_PIN_209_AUD_EXT_CK2_FUNC_MSDC1_WP (MTK_PIN_NO(209) | 2) +#define MT7623_PIN_209_AUD_EXT_CK2_FUNC_PCIE1_PERST_N (MTK_PIN_NO(209) | 3) +#define MT7623_PIN_209_AUD_EXT_CK2_FUNC_PWM1 (MTK_PIN_NO(209) | 5) + +#define MT7623_PIN_236_EXT_SDIO3_FUNC_GPIO236 (MTK_PIN_NO(236) | 0) +#define MT7623_PIN_236_EXT_SDIO3_FUNC_EXT_SDIO3 (MTK_PIN_NO(236) | 1) +#define MT7623_PIN_236_EXT_SDIO3_FUNC_IDDIG (MTK_PIN_NO(236) | 2) + +#define MT7623_PIN_237_EXT_SDIO2_FUNC_GPIO237 (MTK_PIN_NO(237) | 0) +#define MT7623_PIN_237_EXT_SDIO2_FUNC_EXT_SDIO2 (MTK_PIN_NO(237) | 1) +#define MT7623_PIN_237_EXT_SDIO2_FUNC_DRV_VBUS (MTK_PIN_NO(237) | 2) + +#define MT7623_PIN_238_EXT_SDIO1_FUNC_GPIO238 (MTK_PIN_NO(238) | 0) +#define MT7623_PIN_238_EXT_SDIO1_FUNC_EXT_SDIO1 (MTK_PIN_NO(238) | 1) + +#define MT7623_PIN_239_EXT_SDIO0_FUNC_GPIO239 (MTK_PIN_NO(239) | 0) +#define MT7623_PIN_239_EXT_SDIO0_FUNC_EXT_SDIO0 (MTK_PIN_NO(239) | 1) + +#define MT7623_PIN_240_EXT_XCS_FUNC_GPIO240 (MTK_PIN_NO(240) | 0) +#define MT7623_PIN_240_EXT_XCS_FUNC_EXT_XCS (MTK_PIN_NO(240) | 1) + +#define MT7623_PIN_241_EXT_SCK_FUNC_GPIO241 (MTK_PIN_NO(241) | 0) +#define MT7623_PIN_241_EXT_SCK_FUNC_EXT_SCK (MTK_PIN_NO(241) | 1) + +#define MT7623_PIN_242_URTS2_FUNC_GPIO242 (MTK_PIN_NO(242) | 0) +#define MT7623_PIN_242_URTS2_FUNC_URTS2 (MTK_PIN_NO(242) | 1) +#define MT7623_PIN_242_URTS2_FUNC_UTXD3 (MTK_PIN_NO(242) | 2) +#define MT7623_PIN_242_URTS2_FUNC_URXD3 (MTK_PIN_NO(242) | 3) +#define MT7623_PIN_242_URTS2_FUNC_SCL1 (MTK_PIN_NO(242) | 4) + +#define MT7623_PIN_243_UCTS2_FUNC_GPIO243 (MTK_PIN_NO(243) | 0) +#define MT7623_PIN_243_UCTS2_FUNC_UCTS2 (MTK_PIN_NO(243) | 1) +#define MT7623_PIN_243_UCTS2_FUNC_URXD3 (MTK_PIN_NO(243) | 2) +#define MT7623_PIN_243_UCTS2_FUNC_UTXD3 (MTK_PIN_NO(243) | 3) +#define MT7623_PIN_243_UCTS2_FUNC_SDA1 (MTK_PIN_NO(243) | 4) + +#define MT7623_PIN_250_GPIO250_FUNC_GPIO250 (MTK_PIN_NO(250) | 0) +#define MT7623_PIN_250_GPIO250_FUNC_TEST_MD7 (MTK_PIN_NO(250) | 1) +#define MT7623_PIN_250_GPIO250_FUNC_PCIE0_CLKREQ_N (MTK_PIN_NO(250) | 6) + +#define MT7623_PIN_251_GPIO251_FUNC_GPIO251 (MTK_PIN_NO(251) | 0) +#define MT7623_PIN_251_GPIO251_FUNC_TEST_MD6 (MTK_PIN_NO(251) | 1) +#define MT7623_PIN_251_GPIO251_FUNC_PCIE0_WAKE_N (MTK_PIN_NO(251) | 6) + +#define MT7623_PIN_252_GPIO252_FUNC_GPIO252 (MTK_PIN_NO(252) | 0) +#define MT7623_PIN_252_GPIO252_FUNC_TEST_MD5 (MTK_PIN_NO(252) | 1) +#define MT7623_PIN_252_GPIO252_FUNC_PCIE1_CLKREQ_N (MTK_PIN_NO(252) | 6) + +#define MT7623_PIN_253_GPIO253_FUNC_GPIO253 (MTK_PIN_NO(253) | 0) +#define MT7623_PIN_253_GPIO253_FUNC_TEST_MD4 (MTK_PIN_NO(253) | 1) +#define MT7623_PIN_253_GPIO253_FUNC_PCIE1_WAKE_N (MTK_PIN_NO(253) | 6) + +#define MT7623_PIN_254_GPIO254_FUNC_GPIO254 (MTK_PIN_NO(254) | 0) +#define MT7623_PIN_254_GPIO254_FUNC_TEST_MD3 (MTK_PIN_NO(254) | 1) +#define MT7623_PIN_254_GPIO254_FUNC_PCIE2_CLKREQ_N (MTK_PIN_NO(254) | 6) + +#define MT7623_PIN_255_GPIO255_FUNC_GPIO255 (MTK_PIN_NO(255) | 0) +#define MT7623_PIN_255_GPIO255_FUNC_TEST_MD2 (MTK_PIN_NO(255) | 1) +#define MT7623_PIN_255_GPIO255_FUNC_PCIE2_WAKE_N (MTK_PIN_NO(255) | 6) + +#define MT7623_PIN_256_GPIO256_FUNC_GPIO256 (MTK_PIN_NO(256) | 0) +#define MT7623_PIN_256_GPIO256_FUNC_TEST_MD1 (MTK_PIN_NO(256) | 1) + +#define MT7623_PIN_257_GPIO257_FUNC_GPIO257 (MTK_PIN_NO(257) | 0) +#define MT7623_PIN_257_GPIO257_FUNC_TEST_MD0 (MTK_PIN_NO(257) | 1) + +#define MT7623_PIN_261_MSDC1_INS_FUNC_GPIO261 (MTK_PIN_NO(261) | 0) +#define MT7623_PIN_261_MSDC1_INS_FUNC_MSDC1_INS (MTK_PIN_NO(261) | 1) + +#define MT7623_PIN_262_G2_TXEN_FUNC_GPIO262 (MTK_PIN_NO(262) | 0) +#define MT7623_PIN_262_G2_TXEN_FUNC_G2_TXEN (MTK_PIN_NO(262) | 1) + +#define MT7623_PIN_263_G2_TXD3_FUNC_GPIO263 (MTK_PIN_NO(263) | 0) +#define MT7623_PIN_263_G2_TXD3_FUNC_G2_TXD3 (MTK_PIN_NO(263) | 1) + +#define MT7623_PIN_264_G2_TXD2_FUNC_GPIO264 (MTK_PIN_NO(264) | 0) +#define MT7623_PIN_264_G2_TXD2_FUNC_G2_TXD2 (MTK_PIN_NO(264) | 1) + +#define MT7623_PIN_265_G2_TXD1_FUNC_GPIO265 (MTK_PIN_NO(265) | 0) +#define MT7623_PIN_265_G2_TXD1_FUNC_G2_TXD1 (MTK_PIN_NO(265) | 1) + +#define MT7623_PIN_266_G2_TXD0_FUNC_GPIO266 (MTK_PIN_NO(266) | 0) +#define MT7623_PIN_266_G2_TXD0_FUNC_G2_TXD0 (MTK_PIN_NO(266) | 1) + +#define MT7623_PIN_267_G2_TXCLK_FUNC_GPIO267 (MTK_PIN_NO(267) | 0) +#define MT7623_PIN_267_G2_TXCLK_FUNC_G2_TXC (MTK_PIN_NO(267) | 1) + +#define MT7623_PIN_268_G2_RXCLK_FUNC_GPIO268 (MTK_PIN_NO(268) | 0) +#define MT7623_PIN_268_G2_RXCLK_FUNC_G2_RXC (MTK_PIN_NO(268) | 1) + +#define MT7623_PIN_269_G2_RXD0_FUNC_GPIO269 (MTK_PIN_NO(269) | 0) +#define MT7623_PIN_269_G2_RXD0_FUNC_G2_RXD0 (MTK_PIN_NO(269) | 1) + +#define MT7623_PIN_270_G2_RXD1_FUNC_GPIO270 (MTK_PIN_NO(270) | 0) +#define MT7623_PIN_270_G2_RXD1_FUNC_G2_RXD1 (MTK_PIN_NO(270) | 1) + +#define MT7623_PIN_271_G2_RXD2_FUNC_GPIO271 (MTK_PIN_NO(271) | 0) +#define MT7623_PIN_271_G2_RXD2_FUNC_G2_RXD2 (MTK_PIN_NO(271) | 1) + +#define MT7623_PIN_272_G2_RXD3_FUNC_GPIO272 (MTK_PIN_NO(272) | 0) +#define MT7623_PIN_272_G2_RXD3_FUNC_G2_RXD3 (MTK_PIN_NO(272) | 1) + +#define MT7623_PIN_274_G2_RXDV_FUNC_GPIO274 (MTK_PIN_NO(274) | 0) +#define MT7623_PIN_274_G2_RXDV_FUNC_G2_RXDV (MTK_PIN_NO(274) | 1) + +#define MT7623_PIN_275_G2_MDC_FUNC_GPIO275 (MTK_PIN_NO(275) | 0) +#define MT7623_PIN_275_G2_MDC_FUNC_MDC (MTK_PIN_NO(275) | 1) + +#define MT7623_PIN_276_G2_MDIO_FUNC_GPIO276 (MTK_PIN_NO(276) | 0) +#define MT7623_PIN_276_G2_MDIO_FUNC_MDIO (MTK_PIN_NO(276) | 1) + +#define MT7623_PIN_278_JTAG_RESET_FUNC_GPIO278 (MTK_PIN_NO(278) | 0) +#define MT7623_PIN_278_JTAG_RESET_FUNC_JTAG_RESET (MTK_PIN_NO(278) | 1) + +#endif /* __DTS_MT7623_PINFUNC_H */ diff --git a/include/dt-bindings/pinctrl/nomadik.h b/include/dt-bindings/pinctrl/nomadik.h new file mode 100644 index 000000000..638fb321a --- /dev/null +++ b/include/dt-bindings/pinctrl/nomadik.h @@ -0,0 +1,36 @@ +/* + * nomadik.h + * + * Copyright (C) ST-Ericsson SA 2013 + * Author: Gabriel Fernandez for ST-Ericsson. + * License terms: GNU General Public License (GPL), version 2 + */ + +#define INPUT_NOPULL 0 +#define INPUT_PULLUP 1 +#define INPUT_PULLDOWN 2 + +#define OUTPUT_LOW 0 +#define OUTPUT_HIGH 1 +#define DIR_OUTPUT 2 + +#define SLPM_DISABLED 0 +#define SLPM_ENABLED 1 + +#define SLPM_INPUT_NOPULL 0 +#define SLPM_INPUT_PULLUP 1 +#define SLPM_INPUT_PULLDOWN 2 +#define SLPM_DIR_INPUT 3 + +#define SLPM_OUTPUT_LOW 0 +#define SLPM_OUTPUT_HIGH 1 +#define SLPM_DIR_OUTPUT 2 + +#define SLPM_WAKEUP_DISABLE 0 +#define SLPM_WAKEUP_ENABLE 1 + +#define GPIOMODE_DISABLED 0 +#define GPIOMODE_ENABLED 1 + +#define SLPM_PDIS_DISABLED 0 +#define SLPM_PDIS_ENABLED 1 diff --git a/include/dt-bindings/pinctrl/omap.h b/include/dt-bindings/pinctrl/omap.h new file mode 100644 index 000000000..49b5dea2b --- /dev/null +++ b/include/dt-bindings/pinctrl/omap.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for OMAP pinctrl bindings. + * + * Copyright (C) 2009 Nokia + * Copyright (C) 2009-2010 Texas Instruments + */ + +#ifndef _DT_BINDINGS_PINCTRL_OMAP_H +#define _DT_BINDINGS_PINCTRL_OMAP_H + +/* 34xx mux mode options for each pin. See TRM for options */ +#define MUX_MODE0 0 +#define MUX_MODE1 1 +#define MUX_MODE2 2 +#define MUX_MODE3 3 +#define MUX_MODE4 4 +#define MUX_MODE5 5 +#define MUX_MODE6 6 +#define MUX_MODE7 7 + +/* 24xx/34xx mux bit defines */ +#define PULL_ENA (1 << 3) +#define PULL_UP (1 << 4) +#define ALTELECTRICALSEL (1 << 5) + +/* omap3/4/5 specific mux bit defines */ +#define INPUT_EN (1 << 8) +#define OFF_EN (1 << 9) +#define OFFOUT_EN (1 << 10) +#define OFFOUT_VAL (1 << 11) +#define OFF_PULL_EN (1 << 12) +#define OFF_PULL_UP (1 << 13) +#define WAKEUP_EN (1 << 14) +#define WAKEUP_EVENT (1 << 15) + +/* Active pin states */ +#define PIN_OUTPUT 0 +#define PIN_OUTPUT_PULLUP (PIN_OUTPUT | PULL_ENA | PULL_UP) +#define PIN_OUTPUT_PULLDOWN (PIN_OUTPUT | PULL_ENA) +#define PIN_INPUT INPUT_EN +#define PIN_INPUT_PULLUP (PULL_ENA | INPUT_EN | PULL_UP) +#define PIN_INPUT_PULLDOWN (PULL_ENA | INPUT_EN) + +/* Off mode states */ +#define PIN_OFF_NONE 0 +#define PIN_OFF_OUTPUT_HIGH (OFF_EN | OFFOUT_EN | OFFOUT_VAL) +#define PIN_OFF_OUTPUT_LOW (OFF_EN | OFFOUT_EN) +#define PIN_OFF_INPUT_PULLUP (OFF_EN | OFFOUT_EN | OFF_PULL_EN | OFF_PULL_UP) +#define PIN_OFF_INPUT_PULLDOWN (OFF_EN | OFFOUT_EN | OFF_PULL_EN) +#define PIN_OFF_WAKEUPENABLE WAKEUP_EN + +/* + * Macros to allow using the absolute physical address instead of the + * padconf registers instead of the offset from padconf base. + */ +#define OMAP_IOPAD_OFFSET(pa, offset) (((pa) & 0xffff) - (offset)) + +#define OMAP2420_CORE_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x0030) (val) +#define OMAP2430_CORE_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x2030) (val) +#define OMAP3_CORE1_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x2030) (val) +#define OMAP3430_CORE2_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x25d8) (val) +#define OMAP3630_CORE2_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x25a0) (val) +#define OMAP3_WKUP_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x2a00) (val) +#define DM814X_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x0800) (val) +#define DM816X_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x0800) (val) +#define AM33XX_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x0800) (val) + +/* + * Macros to allow using the offset from the padconf physical address + * instead of the offset from padconf base. + */ +#define OMAP_PADCONF_OFFSET(offset, base_offset) ((offset) - (base_offset)) + +#define OMAP4_IOPAD(offset, val) OMAP_PADCONF_OFFSET((offset), 0x0040) (val) +#define OMAP5_IOPAD(offset, val) OMAP_PADCONF_OFFSET((offset), 0x0040) (val) + +/* + * Define some commonly used pins configured by the boards. + * Note that some boards use alternative pins, so check + * the schematics before using these. + */ +#define OMAP3_UART1_RX 0x152 +#define OMAP3_UART2_RX 0x14a +#define OMAP3_UART3_RX 0x16e +#define OMAP4_UART2_RX 0xdc +#define OMAP4_UART3_RX 0x104 +#define OMAP4_UART4_RX 0x11c + +#endif + diff --git a/include/dt-bindings/pinctrl/pinctrl-tegra-xusb.h b/include/dt-bindings/pinctrl/pinctrl-tegra-xusb.h new file mode 100644 index 000000000..ac63c399b --- /dev/null +++ b/include/dt-bindings/pinctrl/pinctrl-tegra-xusb.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DT_BINDINGS_PINCTRL_TEGRA_XUSB_H +#define _DT_BINDINGS_PINCTRL_TEGRA_XUSB_H 1 + +#define TEGRA_XUSB_PADCTL_PCIE 0 +#define TEGRA_XUSB_PADCTL_SATA 1 + +#endif /* _DT_BINDINGS_PINCTRL_TEGRA_XUSB_H */ diff --git a/include/dt-bindings/pinctrl/pinctrl-tegra.h b/include/dt-bindings/pinctrl/pinctrl-tegra.h new file mode 100644 index 000000000..ebafa498b --- /dev/null +++ b/include/dt-bindings/pinctrl/pinctrl-tegra.h @@ -0,0 +1,45 @@ +/* + * This header provides constants for Tegra pinctrl bindings. + * + * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved. + * + * Author: Laxman Dewangan + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef _DT_BINDINGS_PINCTRL_TEGRA_H +#define _DT_BINDINGS_PINCTRL_TEGRA_H + +/* + * Enable/disable for diffeent dt properties. This is applicable for + * properties nvidia,enable-input, nvidia,tristate, nvidia,open-drain, + * nvidia,lock, nvidia,rcv-sel, nvidia,high-speed-mode, nvidia,schmitt. + */ +#define TEGRA_PIN_DISABLE 0 +#define TEGRA_PIN_ENABLE 1 + +#define TEGRA_PIN_PULL_NONE 0 +#define TEGRA_PIN_PULL_DOWN 1 +#define TEGRA_PIN_PULL_UP 2 + +/* Low power mode driver */ +#define TEGRA_PIN_LP_DRIVE_DIV_8 0 +#define TEGRA_PIN_LP_DRIVE_DIV_4 1 +#define TEGRA_PIN_LP_DRIVE_DIV_2 2 +#define TEGRA_PIN_LP_DRIVE_DIV_1 3 + +/* Rising/Falling slew rate */ +#define TEGRA_PIN_SLEW_RATE_FASTEST 0 +#define TEGRA_PIN_SLEW_RATE_FAST 1 +#define TEGRA_PIN_SLEW_RATE_SLOW 2 +#define TEGRA_PIN_SLEW_RATE_SLOWEST 3 + +#endif diff --git a/include/dt-bindings/pinctrl/qcom,pmic-gpio.h b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h new file mode 100644 index 000000000..e5df5ce45 --- /dev/null +++ b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for the Qualcomm PMIC GPIO binding. + */ + +#ifndef _DT_BINDINGS_PINCTRL_QCOM_PMIC_GPIO_H +#define _DT_BINDINGS_PINCTRL_QCOM_PMIC_GPIO_H + +#define PMIC_GPIO_PULL_UP_30 0 +#define PMIC_GPIO_PULL_UP_1P5 1 +#define PMIC_GPIO_PULL_UP_31P5 2 +#define PMIC_GPIO_PULL_UP_1P5_30 3 + +#define PMIC_GPIO_STRENGTH_NO 0 +#define PMIC_GPIO_STRENGTH_HIGH 1 +#define PMIC_GPIO_STRENGTH_MED 2 +#define PMIC_GPIO_STRENGTH_LOW 3 + +/* + * Note: PM8018 GPIO3 and GPIO4 are supporting + * only S3 and L2 options (1.8V) + */ +#define PM8018_GPIO_L6 0 +#define PM8018_GPIO_L5 1 +#define PM8018_GPIO_S3 2 +#define PM8018_GPIO_L14 3 +#define PM8018_GPIO_L2 4 +#define PM8018_GPIO_L4 5 +#define PM8018_GPIO_VDD 6 + +/* + * Note: PM8038 GPIO7 and GPIO8 are supporting + * only L11 and L4 options (1.8V) + */ +#define PM8038_GPIO_VPH 0 +#define PM8038_GPIO_BB 1 +#define PM8038_GPIO_L11 2 +#define PM8038_GPIO_L15 3 +#define PM8038_GPIO_L4 4 +#define PM8038_GPIO_L3 5 +#define PM8038_GPIO_L17 6 + +#define PM8058_GPIO_VPH 0 +#define PM8058_GPIO_BB 1 +#define PM8058_GPIO_S3 2 +#define PM8058_GPIO_L3 3 +#define PM8058_GPIO_L7 4 +#define PM8058_GPIO_L6 5 +#define PM8058_GPIO_L5 6 +#define PM8058_GPIO_L2 7 + +/* + * Note: PM8916 GPIO1 and GPIO2 are supporting + * only L2(1.15V) and L5(1.8V) options + */ +#define PM8916_GPIO_VPH 0 +#define PM8916_GPIO_L2 2 +#define PM8916_GPIO_L5 3 + +#define PM8917_GPIO_VPH 0 +#define PM8917_GPIO_S4 2 +#define PM8917_GPIO_L15 3 +#define PM8917_GPIO_L4 4 +#define PM8917_GPIO_L3 5 +#define PM8917_GPIO_L17 6 + +#define PM8921_GPIO_VPH 0 +#define PM8921_GPIO_BB 1 +#define PM8921_GPIO_S4 2 +#define PM8921_GPIO_L15 3 +#define PM8921_GPIO_L4 4 +#define PM8921_GPIO_L3 5 +#define PM8921_GPIO_L17 6 + +/* + * Note: PM8941 gpios from 15 to 18 are supporting + * only S3 and L6 options (1.8V) + */ +#define PM8941_GPIO_VPH 0 +#define PM8941_GPIO_L1 1 +#define PM8941_GPIO_S3 2 +#define PM8941_GPIO_L6 3 + +/* + * Note: PMA8084 gpios from 15 to 18 are supporting + * only S4 and L6 options (1.8V) + */ +#define PMA8084_GPIO_VPH 0 +#define PMA8084_GPIO_L1 1 +#define PMA8084_GPIO_S4 2 +#define PMA8084_GPIO_L6 3 + +#define PM8994_GPIO_VPH 0 +#define PM8994_GPIO_S4 2 +#define PM8994_GPIO_L12 3 + +/* To be used with "function" */ +#define PMIC_GPIO_FUNC_NORMAL "normal" +#define PMIC_GPIO_FUNC_PAIRED "paired" +#define PMIC_GPIO_FUNC_FUNC1 "func1" +#define PMIC_GPIO_FUNC_FUNC2 "func2" +#define PMIC_GPIO_FUNC_FUNC3 "func3" +#define PMIC_GPIO_FUNC_FUNC4 "func4" +#define PMIC_GPIO_FUNC_DTEST1 "dtest1" +#define PMIC_GPIO_FUNC_DTEST2 "dtest2" +#define PMIC_GPIO_FUNC_DTEST3 "dtest3" +#define PMIC_GPIO_FUNC_DTEST4 "dtest4" + +#define PM8038_GPIO1_2_LPG_DRV PMIC_GPIO_FUNC_FUNC1 +#define PM8038_GPIO3_5V_BOOST_EN PMIC_GPIO_FUNC_FUNC1 +#define PM8038_GPIO4_SSBI_ALT_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8038_GPIO5_6_EXT_REG_EN PMIC_GPIO_FUNC_FUNC1 +#define PM8038_GPIO10_11_EXT_REG_EN PMIC_GPIO_FUNC_FUNC1 +#define PM8038_GPIO6_7_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8038_GPIO9_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1 +#define PM8038_GPIO6_12_KYPD_DRV PMIC_GPIO_FUNC_FUNC2 + +#define PM8058_GPIO7_8_MP3_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO7_8_BCLK_19P2MHZ PMIC_GPIO_FUNC_FUNC2 +#define PM8058_GPIO9_26_KYPD_DRV PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO21_23_UART_TX PMIC_GPIO_FUNC_FUNC2 +#define PM8058_GPIO24_26_LPG_DRV PMIC_GPIO_FUNC_FUNC2 +#define PM8058_GPIO33_BCLK_19P2MHZ PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO34_35_MP3_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO36_BCLK_19P2MHZ PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO37_UPL_OUT PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO37_UART_M_RX PMIC_GPIO_FUNC_FUNC2 +#define PM8058_GPIO38_XO_SLEEP_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO38_39_CLK_32KHZ PMIC_GPIO_FUNC_FUNC2 +#define PM8058_GPIO39_MP3_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO40_EXT_BB_EN PMIC_GPIO_FUNC_FUNC1 + +#define PM8916_GPIO1_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1 +#define PM8916_GPIO1_KEYP_DRV PMIC_GPIO_FUNC_FUNC2 +#define PM8916_GPIO2_DIV_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8916_GPIO2_SLEEP_CLK PMIC_GPIO_FUNC_FUNC2 +#define PM8916_GPIO3_KEYP_DRV PMIC_GPIO_FUNC_FUNC1 +#define PM8916_GPIO4_KEYP_DRV PMIC_GPIO_FUNC_FUNC2 + +#define PM8917_GPIO9_18_KEYP_DRV PMIC_GPIO_FUNC_FUNC1 +#define PM8917_GPIO20_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1 +#define PM8917_GPIO21_23_UART_TX PMIC_GPIO_FUNC_FUNC2 +#define PM8917_GPIO25_26_EXT_REG_EN PMIC_GPIO_FUNC_FUNC1 +#define PM8917_GPIO37_38_XO_SLEEP_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8917_GPIO37_38_MP3_CLK PMIC_GPIO_FUNC_FUNC2 + +#define PM8941_GPIO9_14_KYPD_DRV PMIC_GPIO_FUNC_FUNC1 +#define PM8941_GPIO15_18_DIV_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8941_GPIO15_18_SLEEP_CLK PMIC_GPIO_FUNC_FUNC2 +#define PM8941_GPIO23_26_KYPD_DRV PMIC_GPIO_FUNC_FUNC1 +#define PM8941_GPIO23_26_LPG_DRV_HI PMIC_GPIO_FUNC_FUNC2 +#define PM8941_GPIO31_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1 +#define PM8941_GPIO33_36_LPG_DRV_3D PMIC_GPIO_FUNC_FUNC1 +#define PM8941_GPIO33_36_LPG_DRV_HI PMIC_GPIO_FUNC_FUNC2 + +#define PMA8084_GPIO4_5_LPG_DRV PMIC_GPIO_FUNC_FUNC1 +#define PMA8084_GPIO7_10_LPG_DRV PMIC_GPIO_FUNC_FUNC1 +#define PMA8084_GPIO5_14_KEYP_DRV PMIC_GPIO_FUNC_FUNC2 +#define PMA8084_GPIO19_21_KEYP_DRV PMIC_GPIO_FUNC_FUNC2 +#define PMA8084_GPIO15_18_DIV_CLK PMIC_GPIO_FUNC_FUNC1 +#define PMA8084_GPIO15_18_SLEEP_CLK PMIC_GPIO_FUNC_FUNC2 +#define PMA8084_GPIO22_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1 + +#endif diff --git a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h new file mode 100644 index 000000000..32e66ee7e --- /dev/null +++ b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for the Qualcomm PMIC's + * Multi-Purpose Pin binding. + */ + +#ifndef _DT_BINDINGS_PINCTRL_QCOM_PMIC_MPP_H +#define _DT_BINDINGS_PINCTRL_QCOM_PMIC_MPP_H + +/* power-source */ + +/* Digital Input/Output: level [PM8058] */ +#define PM8058_MPP_VPH 0 +#define PM8058_MPP_S3 1 +#define PM8058_MPP_L2 2 +#define PM8058_MPP_L3 3 + +/* Digital Input/Output: level [PM8901] */ +#define PM8901_MPP_MSMIO 0 +#define PM8901_MPP_DIG 1 +#define PM8901_MPP_L5 2 +#define PM8901_MPP_S4 3 +#define PM8901_MPP_VPH 4 + +/* Digital Input/Output: level [PM8921] */ +#define PM8921_MPP_S4 1 +#define PM8921_MPP_L15 3 +#define PM8921_MPP_L17 4 +#define PM8921_MPP_VPH 7 + +/* Digital Input/Output: level [PM8821] */ +#define PM8821_MPP_1P8 0 +#define PM8821_MPP_VPH 7 + +/* Digital Input/Output: level [PM8018] */ +#define PM8018_MPP_L4 0 +#define PM8018_MPP_L14 1 +#define PM8018_MPP_S3 2 +#define PM8018_MPP_L6 3 +#define PM8018_MPP_L2 4 +#define PM8018_MPP_L5 5 +#define PM8018_MPP_VPH 7 + +/* Digital Input/Output: level [PM8038] */ +#define PM8038_MPP_L20 0 +#define PM8038_MPP_L11 1 +#define PM8038_MPP_L5 2 +#define PM8038_MPP_L15 3 +#define PM8038_MPP_L17 4 +#define PM8038_MPP_VPH 7 + +#define PM8841_MPP_VPH 0 +#define PM8841_MPP_S3 2 + +#define PM8916_MPP_VPH 0 +#define PM8916_MPP_L2 2 +#define PM8916_MPP_L5 3 + +#define PM8941_MPP_VPH 0 +#define PM8941_MPP_L1 1 +#define PM8941_MPP_S3 2 +#define PM8941_MPP_L6 3 + +#define PMA8084_MPP_VPH 0 +#define PMA8084_MPP_L1 1 +#define PMA8084_MPP_S4 2 +#define PMA8084_MPP_L6 3 + +#define PM8994_MPP_VPH 0 +/* Only supported for MPP_05-MPP_08 */ +#define PM8994_MPP_L19 1 +#define PM8994_MPP_S4 2 +#define PM8994_MPP_L12 3 + +/* + * Analog Input - Set the source for analog input. + * To be used with "qcom,amux-route" property + */ +#define PMIC_MPP_AMUX_ROUTE_CH5 0 +#define PMIC_MPP_AMUX_ROUTE_CH6 1 +#define PMIC_MPP_AMUX_ROUTE_CH7 2 +#define PMIC_MPP_AMUX_ROUTE_CH8 3 +#define PMIC_MPP_AMUX_ROUTE_ABUS1 4 +#define PMIC_MPP_AMUX_ROUTE_ABUS2 5 +#define PMIC_MPP_AMUX_ROUTE_ABUS3 6 +#define PMIC_MPP_AMUX_ROUTE_ABUS4 7 + +/* Analog Output: level */ +#define PMIC_MPP_AOUT_LVL_1V25 0 +#define PMIC_MPP_AOUT_LVL_1V25_2 1 +#define PMIC_MPP_AOUT_LVL_0V625 2 +#define PMIC_MPP_AOUT_LVL_0V3125 3 +#define PMIC_MPP_AOUT_LVL_MPP 4 +#define PMIC_MPP_AOUT_LVL_ABUS1 5 +#define PMIC_MPP_AOUT_LVL_ABUS2 6 +#define PMIC_MPP_AOUT_LVL_ABUS3 7 + +/* To be used with "function" */ +#define PMIC_MPP_FUNC_NORMAL "normal" +#define PMIC_MPP_FUNC_PAIRED "paired" +#define PMIC_MPP_FUNC_DTEST1 "dtest1" +#define PMIC_MPP_FUNC_DTEST2 "dtest2" +#define PMIC_MPP_FUNC_DTEST3 "dtest3" +#define PMIC_MPP_FUNC_DTEST4 "dtest4" + +#endif diff --git a/include/dt-bindings/pinctrl/r7s72100-pinctrl.h b/include/dt-bindings/pinctrl/r7s72100-pinctrl.h new file mode 100644 index 000000000..cdb950246 --- /dev/null +++ b/include/dt-bindings/pinctrl/r7s72100-pinctrl.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Defines macros and constants for Renesas RZ/A1 pin controller pin + * muxing functions. + */ +#ifndef __DT_BINDINGS_PINCTRL_RENESAS_RZA1_H +#define __DT_BINDINGS_PINCTRL_RENESAS_RZA1_H + +#define RZA1_PINS_PER_PORT 16 + +/* + * Create the pin index from its bank and position numbers and store in + * the upper 16 bits the alternate function identifier + */ +#define RZA1_PINMUX(b, p, f) ((b) * RZA1_PINS_PER_PORT + (p) | (f << 16)) + +#endif /* __DT_BINDINGS_PINCTRL_RENESAS_RZA1_H */ diff --git a/include/dt-bindings/pinctrl/rockchip.h b/include/dt-bindings/pinctrl/rockchip.h new file mode 100644 index 000000000..aaec8baaa --- /dev/null +++ b/include/dt-bindings/pinctrl/rockchip.h @@ -0,0 +1,67 @@ +/* + * Header providing constants for Rockchip pinctrl bindings. + * + * Copyright (c) 2013 MundoReader S.L. + * Author: Heiko Stuebner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __DT_BINDINGS_ROCKCHIP_PINCTRL_H__ +#define __DT_BINDINGS_ROCKCHIP_PINCTRL_H__ + +#define RK_GPIO0 0 +#define RK_GPIO1 1 +#define RK_GPIO2 2 +#define RK_GPIO3 3 +#define RK_GPIO4 4 +#define RK_GPIO6 6 + +#define RK_PA0 0 +#define RK_PA1 1 +#define RK_PA2 2 +#define RK_PA3 3 +#define RK_PA4 4 +#define RK_PA5 5 +#define RK_PA6 6 +#define RK_PA7 7 +#define RK_PB0 8 +#define RK_PB1 9 +#define RK_PB2 10 +#define RK_PB3 11 +#define RK_PB4 12 +#define RK_PB5 13 +#define RK_PB6 14 +#define RK_PB7 15 +#define RK_PC0 16 +#define RK_PC1 17 +#define RK_PC2 18 +#define RK_PC3 19 +#define RK_PC4 20 +#define RK_PC5 21 +#define RK_PC6 22 +#define RK_PC7 23 +#define RK_PD0 24 +#define RK_PD1 25 +#define RK_PD2 26 +#define RK_PD3 27 +#define RK_PD4 28 +#define RK_PD5 29 +#define RK_PD6 30 +#define RK_PD7 31 + +#define RK_FUNC_GPIO 0 +#define RK_FUNC_1 1 +#define RK_FUNC_2 2 +#define RK_FUNC_3 3 +#define RK_FUNC_4 4 + +#endif diff --git a/include/dt-bindings/pinctrl/samsung.h b/include/dt-bindings/pinctrl/samsung.h new file mode 100644 index 000000000..b1832506b --- /dev/null +++ b/include/dt-bindings/pinctrl/samsung.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Samsung's Exynos pinctrl bindings + * + * Copyright (c) 2016 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * Author: Krzysztof Kozlowski + */ + +#ifndef __DT_BINDINGS_PINCTRL_SAMSUNG_H__ +#define __DT_BINDINGS_PINCTRL_SAMSUNG_H__ + +#define EXYNOS_PIN_PULL_NONE 0 +#define EXYNOS_PIN_PULL_DOWN 1 +#define EXYNOS_PIN_PULL_UP 3 + +#define S3C64XX_PIN_PULL_NONE 0 +#define S3C64XX_PIN_PULL_DOWN 1 +#define S3C64XX_PIN_PULL_UP 2 + +/* Pin function in power down mode */ +#define EXYNOS_PIN_PDN_OUT0 0 +#define EXYNOS_PIN_PDN_OUT1 1 +#define EXYNOS_PIN_PDN_INPUT 2 +#define EXYNOS_PIN_PDN_PREV 3 + +/* Drive strengths for Exynos3250, Exynos4 (all) and Exynos5250 */ +#define EXYNOS4_PIN_DRV_LV1 0 +#define EXYNOS4_PIN_DRV_LV2 2 +#define EXYNOS4_PIN_DRV_LV3 1 +#define EXYNOS4_PIN_DRV_LV4 3 + +/* Drive strengths for Exynos5260 */ +#define EXYNOS5260_PIN_DRV_LV1 0 +#define EXYNOS5260_PIN_DRV_LV2 1 +#define EXYNOS5260_PIN_DRV_LV4 2 +#define EXYNOS5260_PIN_DRV_LV6 3 + +/* Drive strengths for Exynos5410, Exynos542x and Exynos5800 */ +#define EXYNOS5420_PIN_DRV_LV1 0 +#define EXYNOS5420_PIN_DRV_LV2 1 +#define EXYNOS5420_PIN_DRV_LV3 2 +#define EXYNOS5420_PIN_DRV_LV4 3 + +/* Drive strengths for Exynos5433 */ +#define EXYNOS5433_PIN_DRV_FAST_SR1 0 +#define EXYNOS5433_PIN_DRV_FAST_SR2 1 +#define EXYNOS5433_PIN_DRV_FAST_SR3 2 +#define EXYNOS5433_PIN_DRV_FAST_SR4 3 +#define EXYNOS5433_PIN_DRV_FAST_SR5 4 +#define EXYNOS5433_PIN_DRV_FAST_SR6 5 +#define EXYNOS5433_PIN_DRV_SLOW_SR1 8 +#define EXYNOS5433_PIN_DRV_SLOW_SR2 9 +#define EXYNOS5433_PIN_DRV_SLOW_SR3 0xa +#define EXYNOS5433_PIN_DRV_SLOW_SR4 0xb +#define EXYNOS5433_PIN_DRV_SLOW_SR5 0xc +#define EXYNOS5433_PIN_DRV_SLOW_SR6 0xf + +#define EXYNOS_PIN_FUNC_INPUT 0 +#define EXYNOS_PIN_FUNC_OUTPUT 1 +#define EXYNOS_PIN_FUNC_2 2 +#define EXYNOS_PIN_FUNC_3 3 +#define EXYNOS_PIN_FUNC_4 4 +#define EXYNOS_PIN_FUNC_5 5 +#define EXYNOS_PIN_FUNC_6 6 +#define EXYNOS_PIN_FUNC_EINT 0xf +#define EXYNOS_PIN_FUNC_F EXYNOS_PIN_FUNC_EINT + +/* Drive strengths for Exynos7 FSYS1 block */ +#define EXYNOS7_FSYS1_PIN_DRV_LV1 0 +#define EXYNOS7_FSYS1_PIN_DRV_LV2 4 +#define EXYNOS7_FSYS1_PIN_DRV_LV3 2 +#define EXYNOS7_FSYS1_PIN_DRV_LV4 6 +#define EXYNOS7_FSYS1_PIN_DRV_LV5 1 +#define EXYNOS7_FSYS1_PIN_DRV_LV6 5 + +#endif /* __DT_BINDINGS_PINCTRL_SAMSUNG_H__ */ diff --git a/include/dt-bindings/pinctrl/stm32-pinfunc.h b/include/dt-bindings/pinctrl/stm32-pinfunc.h new file mode 100644 index 000000000..b5a2174a6 --- /dev/null +++ b/include/dt-bindings/pinctrl/stm32-pinfunc.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* + * Copyright (C) STMicroelectronics 2017 - All Rights Reserved + * Author: Torgue Alexandre for STMicroelectronics. + */ + +#ifndef _DT_BINDINGS_STM32_PINFUNC_H +#define _DT_BINDINGS_STM32_PINFUNC_H + +/* define PIN modes */ +#define GPIO 0x0 +#define AF0 0x1 +#define AF1 0x2 +#define AF2 0x3 +#define AF3 0x4 +#define AF4 0x5 +#define AF5 0x6 +#define AF6 0x7 +#define AF7 0x8 +#define AF8 0x9 +#define AF9 0xa +#define AF10 0xb +#define AF11 0xc +#define AF12 0xd +#define AF13 0xe +#define AF14 0xf +#define AF15 0x10 +#define ANALOG 0x11 + +/* define Pins number*/ +#define PIN_NO(port, line) (((port) - 'A') * 0x10 + (line)) + +#define STM32_PINMUX(port, line, mode) (((PIN_NO(port, line)) << 8) | (mode)) + +#endif /* _DT_BINDINGS_STM32_PINFUNC_H */ + diff --git a/include/dt-bindings/pinctrl/sun4i-a10.h b/include/dt-bindings/pinctrl/sun4i-a10.h new file mode 100644 index 000000000..f7553c143 --- /dev/null +++ b/include/dt-bindings/pinctrl/sun4i-a10.h @@ -0,0 +1,62 @@ +/* + * Copyright 2014 Maxime Ripard + * + * Maxime Ripard + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this file; if not, write to the Free + * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, + * MA 02110-1301 USA + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __DT_BINDINGS_PINCTRL_SUN4I_A10_H_ +#define __DT_BINDINGS_PINCTRL_SUN4I_A10_H_ + +#define SUN4I_PINCTRL_10_MA 0 +#define SUN4I_PINCTRL_20_MA 1 +#define SUN4I_PINCTRL_30_MA 2 +#define SUN4I_PINCTRL_40_MA 3 + +#define SUN4I_PINCTRL_NO_PULL 0 +#define SUN4I_PINCTRL_PULL_UP 1 +#define SUN4I_PINCTRL_PULL_DOWN 2 + +#endif /* __DT_BINDINGS_PINCTRL_SUN4I_A10_H_ */ diff --git a/include/dt-bindings/power/imx7-power.h b/include/dt-bindings/power/imx7-power.h new file mode 100644 index 000000000..3a181e410 --- /dev/null +++ b/include/dt-bindings/power/imx7-power.h @@ -0,0 +1,16 @@ +/* + * Copyright (C) 2017 Impinj + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __DT_BINDINGS_IMX7_POWER_H__ +#define __DT_BINDINGS_IMX7_POWER_H__ + +#define IMX7_POWER_DOMAIN_MIPI_PHY 0 +#define IMX7_POWER_DOMAIN_PCIE_PHY 1 +#define IMX7_POWER_DOMAIN_USB_HSIC_PHY 2 + +#endif diff --git a/include/dt-bindings/power/mt2701-power.h b/include/dt-bindings/power/mt2701-power.h new file mode 100644 index 000000000..64cc826d6 --- /dev/null +++ b/include/dt-bindings/power/mt2701-power.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2015 MediaTek Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_POWER_MT2701_POWER_H +#define _DT_BINDINGS_POWER_MT2701_POWER_H + +#define MT2701_POWER_DOMAIN_CONN 0 +#define MT2701_POWER_DOMAIN_DISP 1 +#define MT2701_POWER_DOMAIN_MFG 2 +#define MT2701_POWER_DOMAIN_VDEC 3 +#define MT2701_POWER_DOMAIN_ISP 4 +#define MT2701_POWER_DOMAIN_BDP 5 +#define MT2701_POWER_DOMAIN_ETH 6 +#define MT2701_POWER_DOMAIN_HIF 7 +#define MT2701_POWER_DOMAIN_IFR_MSC 8 + +#endif /* _DT_BINDINGS_POWER_MT2701_POWER_H */ diff --git a/include/dt-bindings/power/mt2712-power.h b/include/dt-bindings/power/mt2712-power.h new file mode 100644 index 000000000..2c147817e --- /dev/null +++ b/include/dt-bindings/power/mt2712-power.h @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2017 MediaTek Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See http://www.gnu.org/licenses/gpl-2.0.html for more details. + */ + +#ifndef _DT_BINDINGS_POWER_MT2712_POWER_H +#define _DT_BINDINGS_POWER_MT2712_POWER_H + +#define MT2712_POWER_DOMAIN_MM 0 +#define MT2712_POWER_DOMAIN_VDEC 1 +#define MT2712_POWER_DOMAIN_VENC 2 +#define MT2712_POWER_DOMAIN_ISP 3 +#define MT2712_POWER_DOMAIN_AUDIO 4 +#define MT2712_POWER_DOMAIN_USB 5 +#define MT2712_POWER_DOMAIN_USB2 6 +#define MT2712_POWER_DOMAIN_MFG 7 +#define MT2712_POWER_DOMAIN_MFG_SC1 8 +#define MT2712_POWER_DOMAIN_MFG_SC2 9 +#define MT2712_POWER_DOMAIN_MFG_SC3 10 + +#endif /* _DT_BINDINGS_POWER_MT2712_POWER_H */ diff --git a/include/dt-bindings/power/mt6797-power.h b/include/dt-bindings/power/mt6797-power.h new file mode 100644 index 000000000..a60c1d81c --- /dev/null +++ b/include/dt-bindings/power/mt6797-power.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2017 MediaTek Inc. + * Author: Mars.C + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_POWER_MT6797_POWER_H +#define _DT_BINDINGS_POWER_MT6797_POWER_H + +#define MT6797_POWER_DOMAIN_VDEC 0 +#define MT6797_POWER_DOMAIN_VENC 1 +#define MT6797_POWER_DOMAIN_ISP 2 +#define MT6797_POWER_DOMAIN_MM 3 +#define MT6797_POWER_DOMAIN_AUDIO 4 +#define MT6797_POWER_DOMAIN_MFG_ASYNC 5 +#define MT6797_POWER_DOMAIN_MFG 6 +#define MT6797_POWER_DOMAIN_MFG_CORE0 7 +#define MT6797_POWER_DOMAIN_MFG_CORE1 8 +#define MT6797_POWER_DOMAIN_MFG_CORE2 9 +#define MT6797_POWER_DOMAIN_MFG_CORE3 10 +#define MT6797_POWER_DOMAIN_MJC 11 + +#endif /* _DT_BINDINGS_POWER_MT6797_POWER_H */ diff --git a/include/dt-bindings/power/mt7622-power.h b/include/dt-bindings/power/mt7622-power.h new file mode 100644 index 000000000..1b6392697 --- /dev/null +++ b/include/dt-bindings/power/mt7622-power.h @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2017 MediaTek Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See http://www.gnu.org/licenses/gpl-2.0.html for more details. + */ + +#ifndef _DT_BINDINGS_POWER_MT7622_POWER_H +#define _DT_BINDINGS_POWER_MT7622_POWER_H + +#define MT7622_POWER_DOMAIN_ETHSYS 0 +#define MT7622_POWER_DOMAIN_HIF0 1 +#define MT7622_POWER_DOMAIN_HIF1 2 +#define MT7622_POWER_DOMAIN_WB 3 + +#endif /* _DT_BINDINGS_POWER_MT7622_POWER_H */ diff --git a/include/dt-bindings/power/mt7623a-power.h b/include/dt-bindings/power/mt7623a-power.h new file mode 100644 index 000000000..2544822aa --- /dev/null +++ b/include/dt-bindings/power/mt7623a-power.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DT_BINDINGS_POWER_MT7623A_POWER_H +#define _DT_BINDINGS_POWER_MT7623A_POWER_H + +#define MT7623A_POWER_DOMAIN_CONN 0 +#define MT7623A_POWER_DOMAIN_ETH 1 +#define MT7623A_POWER_DOMAIN_HIF 2 +#define MT7623A_POWER_DOMAIN_IFR_MSC 3 + +#endif /* _DT_BINDINGS_POWER_MT7623A_POWER_H */ diff --git a/include/dt-bindings/power/mt8173-power.h b/include/dt-bindings/power/mt8173-power.h new file mode 100644 index 000000000..15d531aa6 --- /dev/null +++ b/include/dt-bindings/power/mt8173-power.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DT_BINDINGS_POWER_MT8183_POWER_H +#define _DT_BINDINGS_POWER_MT8183_POWER_H + +#define MT8173_POWER_DOMAIN_VDEC 0 +#define MT8173_POWER_DOMAIN_VENC 1 +#define MT8173_POWER_DOMAIN_ISP 2 +#define MT8173_POWER_DOMAIN_MM 3 +#define MT8173_POWER_DOMAIN_VENC_LT 4 +#define MT8173_POWER_DOMAIN_AUDIO 5 +#define MT8173_POWER_DOMAIN_USB 6 +#define MT8173_POWER_DOMAIN_MFG_ASYNC 7 +#define MT8173_POWER_DOMAIN_MFG_2D 8 +#define MT8173_POWER_DOMAIN_MFG 9 + +#endif /* _DT_BINDINGS_POWER_MT8183_POWER_H */ diff --git a/include/dt-bindings/power/owl-s500-powergate.h b/include/dt-bindings/power/owl-s500-powergate.h new file mode 100644 index 000000000..0a1c45186 --- /dev/null +++ b/include/dt-bindings/power/owl-s500-powergate.h @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2017 Andreas Färber + * + * SPDX-License-Identifier: (GPL-2.0+ OR MIT) + */ +#ifndef DT_BINDINGS_POWER_OWL_S500_POWERGATE_H +#define DT_BINDINGS_POWER_OWL_S500_POWERGATE_H + +#define S500_PD_VDE 0 +#define S500_PD_VCE_SI 1 +#define S500_PD_USB2_1 2 +#define S500_PD_CPU2 3 +#define S500_PD_CPU3 4 +#define S500_PD_DMA 5 +#define S500_PD_DS 6 +#define S500_PD_USB3 7 +#define S500_PD_USB2_0 8 + +#endif diff --git a/include/dt-bindings/power/owl-s700-powergate.h b/include/dt-bindings/power/owl-s700-powergate.h new file mode 100644 index 000000000..4cf1aefbf --- /dev/null +++ b/include/dt-bindings/power/owl-s700-powergate.h @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Actions Semi S700 SPS + * + * Copyright (c) 2017 Andreas Färber + */ +#ifndef DT_BINDINGS_POWER_OWL_S700_POWERGATE_H +#define DT_BINDINGS_POWER_OWL_S700_POWERGATE_H + +#define S700_PD_VDE 0 +#define S700_PD_VCE_SI 1 +#define S700_PD_USB2_1 2 +#define S700_PD_HDE 3 +#define S700_PD_DMA 4 +#define S700_PD_DS 5 +#define S700_PD_USB3 6 +#define S700_PD_USB2_0 7 + +#endif diff --git a/include/dt-bindings/power/px30-power.h b/include/dt-bindings/power/px30-power.h new file mode 100644 index 000000000..30917a99a --- /dev/null +++ b/include/dt-bindings/power/px30-power.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_POWER_PX30_POWER_H__ +#define __DT_BINDINGS_POWER_PX30_POWER_H__ + +/* VD_CORE */ +#define PX30_PD_A35_0 0 +#define PX30_PD_A35_1 1 +#define PX30_PD_A35_2 2 +#define PX30_PD_A35_3 3 +#define PX30_PD_SCU 4 + +/* VD_LOGIC */ +#define PX30_PD_USB 5 +#define PX30_PD_DDR 6 +#define PX30_PD_SDCARD 7 +#define PX30_PD_CRYPTO 8 +#define PX30_PD_GMAC 9 +#define PX30_PD_MMC_NAND 10 +#define PX30_PD_VPU 11 +#define PX30_PD_VO 12 +#define PX30_PD_VI 13 +#define PX30_PD_GPU 14 + +/* VD_PMU */ +#define PX30_PD_PMU 15 + +#endif diff --git a/include/dt-bindings/power/r8a7743-sysc.h b/include/dt-bindings/power/r8a7743-sysc.h new file mode 100644 index 000000000..61cfbb290 --- /dev/null +++ b/include/dt-bindings/power/r8a7743-sysc.h @@ -0,0 +1,25 @@ +/* + * Copyright (C) 2016 Cogent Embedded Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __DT_BINDINGS_POWER_R8A7743_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A7743_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A7743_PD_CA15_CPU0 0 +#define R8A7743_PD_CA15_CPU1 1 +#define R8A7743_PD_CA15_SCU 12 +#define R8A7743_PD_SGX 20 + +/* Always-on power area */ +#define R8A7743_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A7743_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a7745-sysc.h b/include/dt-bindings/power/r8a7745-sysc.h new file mode 100644 index 000000000..1844c1171 --- /dev/null +++ b/include/dt-bindings/power/r8a7745-sysc.h @@ -0,0 +1,25 @@ +/* + * Copyright (C) 2016 Cogent Embedded Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __DT_BINDINGS_POWER_R8A7745_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A7745_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A7745_PD_CA7_CPU0 5 +#define R8A7745_PD_CA7_CPU1 6 +#define R8A7745_PD_SGX 20 +#define R8A7745_PD_CA7_SCU 21 + +/* Always-on power area */ +#define R8A7745_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A7745_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a77470-sysc.h b/include/dt-bindings/power/r8a77470-sysc.h new file mode 100644 index 000000000..8bf4db187 --- /dev/null +++ b/include/dt-bindings/power/r8a77470-sysc.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_POWER_R8A77470_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A77470_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A77470_PD_CA7_CPU0 5 +#define R8A77470_PD_CA7_CPU1 6 +#define R8A77470_PD_SGX 20 +#define R8A77470_PD_CA7_SCU 21 + +/* Always-on power area */ +#define R8A77470_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A77470_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a7779-sysc.h b/include/dt-bindings/power/r8a7779-sysc.h new file mode 100644 index 000000000..183571da5 --- /dev/null +++ b/include/dt-bindings/power/r8a7779-sysc.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2016 Glider bvba + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ +#ifndef __DT_BINDINGS_POWER_R8A7779_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A7779_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A7779_PD_ARM1 1 +#define R8A7779_PD_ARM2 2 +#define R8A7779_PD_ARM3 3 +#define R8A7779_PD_SGX 20 +#define R8A7779_PD_VDP 21 +#define R8A7779_PD_IMP 24 + +/* Always-on power area */ +#define R8A7779_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A7779_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a7790-sysc.h b/include/dt-bindings/power/r8a7790-sysc.h new file mode 100644 index 000000000..6af4e9929 --- /dev/null +++ b/include/dt-bindings/power/r8a7790-sysc.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2016 Glider bvba + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ +#ifndef __DT_BINDINGS_POWER_R8A7790_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A7790_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A7790_PD_CA15_CPU0 0 +#define R8A7790_PD_CA15_CPU1 1 +#define R8A7790_PD_CA15_CPU2 2 +#define R8A7790_PD_CA15_CPU3 3 +#define R8A7790_PD_CA7_CPU0 5 +#define R8A7790_PD_CA7_CPU1 6 +#define R8A7790_PD_CA7_CPU2 7 +#define R8A7790_PD_CA7_CPU3 8 +#define R8A7790_PD_CA15_SCU 12 +#define R8A7790_PD_SH_4A 16 +#define R8A7790_PD_RGX 20 +#define R8A7790_PD_CA7_SCU 21 +#define R8A7790_PD_IMP 24 + +/* Always-on power area */ +#define R8A7790_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A7790_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a7791-sysc.h b/include/dt-bindings/power/r8a7791-sysc.h new file mode 100644 index 000000000..1403baa05 --- /dev/null +++ b/include/dt-bindings/power/r8a7791-sysc.h @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2016 Glider bvba + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ +#ifndef __DT_BINDINGS_POWER_R8A7791_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A7791_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A7791_PD_CA15_CPU0 0 +#define R8A7791_PD_CA15_CPU1 1 +#define R8A7791_PD_CA15_SCU 12 +#define R8A7791_PD_SH_4A 16 +#define R8A7791_PD_SGX 20 + +/* Always-on power area */ +#define R8A7791_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A7791_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a7792-sysc.h b/include/dt-bindings/power/r8a7792-sysc.h new file mode 100644 index 000000000..74f4a78e2 --- /dev/null +++ b/include/dt-bindings/power/r8a7792-sysc.h @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2016 Cogent Embedded Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ +#ifndef __DT_BINDINGS_POWER_R8A7792_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A7792_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A7792_PD_CA15_CPU0 0 +#define R8A7792_PD_CA15_CPU1 1 +#define R8A7792_PD_CA15_SCU 12 +#define R8A7792_PD_SGX 20 +#define R8A7792_PD_IMP 24 + +/* Always-on power area */ +#define R8A7792_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A7792_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a7793-sysc.h b/include/dt-bindings/power/r8a7793-sysc.h new file mode 100644 index 000000000..b5693df3d --- /dev/null +++ b/include/dt-bindings/power/r8a7793-sysc.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2016 Glider bvba + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ +#ifndef __DT_BINDINGS_POWER_R8A7793_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A7793_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + * + * Note that R-Car M2-N is identical to R-Car M2-W w.r.t. power domains. + */ + +#define R8A7793_PD_CA15_CPU0 0 +#define R8A7793_PD_CA15_CPU1 1 +#define R8A7793_PD_CA15_SCU 12 +#define R8A7793_PD_SH_4A 16 +#define R8A7793_PD_SGX 20 + +/* Always-on power area */ +#define R8A7793_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A7793_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a7794-sysc.h b/include/dt-bindings/power/r8a7794-sysc.h new file mode 100644 index 000000000..862241c2d --- /dev/null +++ b/include/dt-bindings/power/r8a7794-sysc.h @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2016 Glider bvba + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ +#ifndef __DT_BINDINGS_POWER_R8A7794_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A7794_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A7794_PD_CA7_CPU0 5 +#define R8A7794_PD_CA7_CPU1 6 +#define R8A7794_PD_SH_4A 16 +#define R8A7794_PD_SGX 20 +#define R8A7794_PD_CA7_SCU 21 + +/* Always-on power area */ +#define R8A7794_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A7794_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a7795-sysc.h b/include/dt-bindings/power/r8a7795-sysc.h new file mode 100644 index 000000000..ad679eeda --- /dev/null +++ b/include/dt-bindings/power/r8a7795-sysc.h @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2016 Glider bvba + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ +#ifndef __DT_BINDINGS_POWER_R8A7795_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A7795_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A7795_PD_CA57_CPU0 0 +#define R8A7795_PD_CA57_CPU1 1 +#define R8A7795_PD_CA57_CPU2 2 +#define R8A7795_PD_CA57_CPU3 3 +#define R8A7795_PD_CA53_CPU0 5 +#define R8A7795_PD_CA53_CPU1 6 +#define R8A7795_PD_CA53_CPU2 7 +#define R8A7795_PD_CA53_CPU3 8 +#define R8A7795_PD_A3VP 9 +#define R8A7795_PD_CA57_SCU 12 +#define R8A7795_PD_CR7 13 +#define R8A7795_PD_A3VC 14 +#define R8A7795_PD_3DG_A 17 +#define R8A7795_PD_3DG_B 18 +#define R8A7795_PD_3DG_C 19 +#define R8A7795_PD_3DG_D 20 +#define R8A7795_PD_CA53_SCU 21 +#define R8A7795_PD_3DG_E 22 +#define R8A7795_PD_A3IR 24 +#define R8A7795_PD_A2VC0 25 /* ES1.x only */ +#define R8A7795_PD_A2VC1 26 + +/* Always-on power area */ +#define R8A7795_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A7795_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a7796-sysc.h b/include/dt-bindings/power/r8a7796-sysc.h new file mode 100644 index 000000000..5b4daab44 --- /dev/null +++ b/include/dt-bindings/power/r8a7796-sysc.h @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2016 Glider bvba + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ +#ifndef __DT_BINDINGS_POWER_R8A7796_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A7796_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A7796_PD_CA57_CPU0 0 +#define R8A7796_PD_CA57_CPU1 1 +#define R8A7796_PD_CA53_CPU0 5 +#define R8A7796_PD_CA53_CPU1 6 +#define R8A7796_PD_CA53_CPU2 7 +#define R8A7796_PD_CA53_CPU3 8 +#define R8A7796_PD_CA57_SCU 12 +#define R8A7796_PD_CR7 13 +#define R8A7796_PD_A3VC 14 +#define R8A7796_PD_3DG_A 17 +#define R8A7796_PD_3DG_B 18 +#define R8A7796_PD_CA53_SCU 21 +#define R8A7796_PD_A3IR 24 +#define R8A7796_PD_A2VC0 25 +#define R8A7796_PD_A2VC1 26 + +/* Always-on power area */ +#define R8A7796_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A7796_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a77965-sysc.h b/include/dt-bindings/power/r8a77965-sysc.h new file mode 100644 index 000000000..05a4b5917 --- /dev/null +++ b/include/dt-bindings/power/r8a77965-sysc.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Jacopo Mondi + * Copyright (C) 2016 Glider bvba + */ + +#ifndef __DT_BINDINGS_POWER_R8A77965_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A77965_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A77965_PD_CA57_CPU0 0 +#define R8A77965_PD_CA57_CPU1 1 +#define R8A77965_PD_A3VP 9 +#define R8A77965_PD_CA57_SCU 12 +#define R8A77965_PD_CR7 13 +#define R8A77965_PD_A3VC 14 +#define R8A77965_PD_3DG_A 17 +#define R8A77965_PD_3DG_B 18 +#define R8A77965_PD_A3IR 24 +#define R8A77965_PD_A2VC1 26 + +/* Always-on power area */ +#define R8A77965_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A77965_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a77970-sysc.h b/include/dt-bindings/power/r8a77970-sysc.h new file mode 100644 index 000000000..9eaf824b1 --- /dev/null +++ b/include/dt-bindings/power/r8a77970-sysc.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2017 Cogent Embedded Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __DT_BINDINGS_POWER_R8A77970_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A77970_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A77970_PD_CA53_CPU0 5 +#define R8A77970_PD_CA53_CPU1 6 +#define R8A77970_PD_CR7 13 +#define R8A77970_PD_CA53_SCU 21 +#define R8A77970_PD_A2IR0 23 +#define R8A77970_PD_A3IR 24 +#define R8A77970_PD_A2IR1 27 +#define R8A77970_PD_A2DP 28 +#define R8A77970_PD_A2CN 29 +#define R8A77970_PD_A2SC0 30 +#define R8A77970_PD_A2SC1 31 + +/* Always-on power area */ +#define R8A77970_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A77970_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a77980-sysc.h b/include/dt-bindings/power/r8a77980-sysc.h new file mode 100644 index 000000000..e12c8587b --- /dev/null +++ b/include/dt-bindings/power/r8a77980-sysc.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Renesas Electronics Corp. + * Copyright (C) 2018 Cogent Embedded, Inc. + */ +#ifndef __DT_BINDINGS_POWER_R8A77980_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A77980_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A77980_PD_A2SC2 0 +#define R8A77980_PD_A2SC3 1 +#define R8A77980_PD_A2SC4 2 +#define R8A77980_PD_A2DP0 3 +#define R8A77980_PD_A2DP1 4 +#define R8A77980_PD_CA53_CPU0 5 +#define R8A77980_PD_CA53_CPU1 6 +#define R8A77980_PD_CA53_CPU2 7 +#define R8A77980_PD_CA53_CPU3 8 +#define R8A77980_PD_A2CN 10 +#define R8A77980_PD_A3VIP0 11 +#define R8A77980_PD_A2IR5 12 +#define R8A77980_PD_CR7 13 +#define R8A77980_PD_A2IR4 15 +#define R8A77980_PD_CA53_SCU 21 +#define R8A77980_PD_A2IR0 23 +#define R8A77980_PD_A3IR 24 +#define R8A77980_PD_A3VIP1 25 +#define R8A77980_PD_A3VIP2 26 +#define R8A77980_PD_A2IR1 27 +#define R8A77980_PD_A2IR2 28 +#define R8A77980_PD_A2IR3 29 +#define R8A77980_PD_A2SC0 30 +#define R8A77980_PD_A2SC1 31 + +/* Always-on power area */ +#define R8A77980_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A77980_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a77990-sysc.h b/include/dt-bindings/power/r8a77990-sysc.h new file mode 100644 index 000000000..944d85bee --- /dev/null +++ b/include/dt-bindings/power/r8a77990-sysc.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_POWER_R8A77990_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A77990_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A77990_PD_CA53_CPU0 5 +#define R8A77990_PD_CA53_CPU1 6 +#define R8A77990_PD_CR7 13 +#define R8A77990_PD_A3VC 14 +#define R8A77990_PD_3DG_A 17 +#define R8A77990_PD_3DG_B 18 +#define R8A77990_PD_CA53_SCU 21 +#define R8A77990_PD_A2VC1 26 + +/* Always-on power area */ +#define R8A77990_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A77990_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a77995-sysc.h b/include/dt-bindings/power/r8a77995-sysc.h new file mode 100644 index 000000000..09d0ed575 --- /dev/null +++ b/include/dt-bindings/power/r8a77995-sysc.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2017 Glider bvba + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ +#ifndef __DT_BINDINGS_POWER_R8A77995_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A77995_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A77995_PD_CA53_CPU0 5 +#define R8A77995_PD_CA53_SCU 21 + +/* Always-on power area */ +#define R8A77995_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A77995_SYSC_H__ */ diff --git a/include/dt-bindings/power/raspberrypi-power.h b/include/dt-bindings/power/raspberrypi-power.h new file mode 100644 index 000000000..b3ff8e09a --- /dev/null +++ b/include/dt-bindings/power/raspberrypi-power.h @@ -0,0 +1,41 @@ +/* + * Copyright © 2015 Broadcom + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _DT_BINDINGS_ARM_BCM2835_RPI_POWER_H +#define _DT_BINDINGS_ARM_BCM2835_RPI_POWER_H + +/* These power domain indices are the firmware interface's indices + * minus one. + */ +#define RPI_POWER_DOMAIN_I2C0 0 +#define RPI_POWER_DOMAIN_I2C1 1 +#define RPI_POWER_DOMAIN_I2C2 2 +#define RPI_POWER_DOMAIN_VIDEO_SCALER 3 +#define RPI_POWER_DOMAIN_VPU1 4 +#define RPI_POWER_DOMAIN_HDMI 5 +#define RPI_POWER_DOMAIN_USB 6 +#define RPI_POWER_DOMAIN_VEC 7 +#define RPI_POWER_DOMAIN_JPEG 8 +#define RPI_POWER_DOMAIN_H264 9 +#define RPI_POWER_DOMAIN_V3D 10 +#define RPI_POWER_DOMAIN_ISP 11 +#define RPI_POWER_DOMAIN_UNICAM0 12 +#define RPI_POWER_DOMAIN_UNICAM1 13 +#define RPI_POWER_DOMAIN_CCP2RX 14 +#define RPI_POWER_DOMAIN_CSI2 15 +#define RPI_POWER_DOMAIN_CPI 16 +#define RPI_POWER_DOMAIN_DSI0 17 +#define RPI_POWER_DOMAIN_DSI1 18 +#define RPI_POWER_DOMAIN_TRANSPOSER 19 +#define RPI_POWER_DOMAIN_CCP2TX 20 +#define RPI_POWER_DOMAIN_CDP 21 +#define RPI_POWER_DOMAIN_ARM 22 + +#define RPI_POWER_DOMAIN_COUNT 23 + +#endif /* _DT_BINDINGS_ARM_BCM2835_RPI_POWER_H */ diff --git a/include/dt-bindings/power/rk3036-power.h b/include/dt-bindings/power/rk3036-power.h new file mode 100644 index 000000000..0bc6b5d50 --- /dev/null +++ b/include/dt-bindings/power/rk3036-power.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_POWER_RK3036_POWER_H__ +#define __DT_BINDINGS_POWER_RK3036_POWER_H__ + +#define RK3036_PD_MSCH 0 +#define RK3036_PD_CORE 1 +#define RK3036_PD_PERI 2 +#define RK3036_PD_VIO 3 +#define RK3036_PD_VPU 4 +#define RK3036_PD_GPU 5 +#define RK3036_PD_SYS 6 + +#endif diff --git a/include/dt-bindings/power/rk3128-power.h b/include/dt-bindings/power/rk3128-power.h new file mode 100644 index 000000000..c051dc310 --- /dev/null +++ b/include/dt-bindings/power/rk3128-power.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_POWER_RK3128_POWER_H__ +#define __DT_BINDINGS_POWER_RK3128_POWER_H__ + +/* VD_CORE */ +#define RK3128_PD_CORE 0 + +/* VD_LOGIC */ +#define RK3128_PD_VIO 1 +#define RK3128_PD_VIDEO 2 +#define RK3128_PD_GPU 3 +#define RK3128_PD_MSCH 4 + +#endif diff --git a/include/dt-bindings/power/rk3228-power.h b/include/dt-bindings/power/rk3228-power.h new file mode 100644 index 000000000..6a8dc1bf7 --- /dev/null +++ b/include/dt-bindings/power/rk3228-power.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_POWER_RK3228_POWER_H__ +#define __DT_BINDINGS_POWER_RK3228_POWER_H__ + +/** + * RK3228 idle id Summary. + */ + +#define RK3228_PD_CORE 0 +#define RK3228_PD_MSCH 1 +#define RK3228_PD_BUS 2 +#define RK3228_PD_SYS 3 +#define RK3228_PD_VIO 4 +#define RK3228_PD_VOP 5 +#define RK3228_PD_VPU 6 +#define RK3228_PD_RKVDEC 7 +#define RK3228_PD_GPU 8 +#define RK3228_PD_PERI 9 +#define RK3228_PD_GMAC 10 + +#endif diff --git a/include/dt-bindings/power/rk3288-power.h b/include/dt-bindings/power/rk3288-power.h new file mode 100644 index 000000000..f710b56cc --- /dev/null +++ b/include/dt-bindings/power/rk3288-power.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_POWER_RK3288_POWER_H__ +#define __DT_BINDINGS_POWER_RK3288_POWER_H__ + +/** + * RK3288 Power Domain and Voltage Domain Summary. + */ + +/* VD_CORE */ +#define RK3288_PD_A17_0 0 +#define RK3288_PD_A17_1 1 +#define RK3288_PD_A17_2 2 +#define RK3288_PD_A17_3 3 +#define RK3288_PD_SCU 4 +#define RK3288_PD_DEBUG 5 +#define RK3288_PD_MEM 6 + +/* VD_LOGIC */ +#define RK3288_PD_BUS 7 +#define RK3288_PD_PERI 8 +#define RK3288_PD_VIO 9 +#define RK3288_PD_ALIVE 10 +#define RK3288_PD_HEVC 11 +#define RK3288_PD_VIDEO 12 + +/* VD_GPU */ +#define RK3288_PD_GPU 13 + +/* VD_PMU */ +#define RK3288_PD_PMU 14 + +#endif diff --git a/include/dt-bindings/power/rk3328-power.h b/include/dt-bindings/power/rk3328-power.h new file mode 100644 index 000000000..02e3d7fc1 --- /dev/null +++ b/include/dt-bindings/power/rk3328-power.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_POWER_RK3328_POWER_H__ +#define __DT_BINDINGS_POWER_RK3328_POWER_H__ + +/** + * RK3328 idle id Summary. + */ +#define RK3328_PD_CORE 0 +#define RK3328_PD_GPU 1 +#define RK3328_PD_BUS 2 +#define RK3328_PD_MSCH 3 +#define RK3328_PD_PERI 4 +#define RK3328_PD_VIDEO 5 +#define RK3328_PD_HEVC 6 +#define RK3328_PD_SYS 7 +#define RK3328_PD_VPU 8 +#define RK3328_PD_VIO 9 + +#endif diff --git a/include/dt-bindings/power/rk3366-power.h b/include/dt-bindings/power/rk3366-power.h new file mode 100644 index 000000000..223a3dce0 --- /dev/null +++ b/include/dt-bindings/power/rk3366-power.h @@ -0,0 +1,24 @@ +#ifndef __DT_BINDINGS_POWER_RK3366_POWER_H__ +#define __DT_BINDINGS_POWER_RK3366_POWER_H__ + +/* VD_CORE */ +#define RK3366_PD_A53_0 0 +#define RK3366_PD_A53_1 1 +#define RK3366_PD_A53_2 2 +#define RK3366_PD_A53_3 3 + +/* VD_LOGIC */ +#define RK3366_PD_BUS 4 +#define RK3366_PD_PERI 5 +#define RK3366_PD_VIO 6 +#define RK3366_PD_VIDEO 7 +#define RK3366_PD_RKVDEC 8 +#define RK3366_PD_WIFIBT 9 +#define RK3366_PD_VPU 10 +#define RK3366_PD_GPU 11 +#define RK3366_PD_ALIVE 12 + +/* VD_PMU */ +#define RK3366_PD_PMU 13 + +#endif diff --git a/include/dt-bindings/power/rk3368-power.h b/include/dt-bindings/power/rk3368-power.h new file mode 100644 index 000000000..5e602dbd6 --- /dev/null +++ b/include/dt-bindings/power/rk3368-power.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_POWER_RK3368_POWER_H__ +#define __DT_BINDINGS_POWER_RK3368_POWER_H__ + +/* VD_CORE */ +#define RK3368_PD_A53_L0 0 +#define RK3368_PD_A53_L1 1 +#define RK3368_PD_A53_L2 2 +#define RK3368_PD_A53_L3 3 +#define RK3368_PD_SCU_L 4 +#define RK3368_PD_A53_B0 5 +#define RK3368_PD_A53_B1 6 +#define RK3368_PD_A53_B2 7 +#define RK3368_PD_A53_B3 8 +#define RK3368_PD_SCU_B 9 + +/* VD_LOGIC */ +#define RK3368_PD_BUS 10 +#define RK3368_PD_PERI 11 +#define RK3368_PD_VIO 12 +#define RK3368_PD_ALIVE 13 +#define RK3368_PD_VIDEO 14 +#define RK3368_PD_GPU_0 15 +#define RK3368_PD_GPU_1 16 + +/* VD_PMU */ +#define RK3368_PD_PMU 17 + +#endif diff --git a/include/dt-bindings/power/rk3399-power.h b/include/dt-bindings/power/rk3399-power.h new file mode 100644 index 000000000..aedd8b180 --- /dev/null +++ b/include/dt-bindings/power/rk3399-power.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_POWER_RK3399_POWER_H__ +#define __DT_BINDINGS_POWER_RK3399_POWER_H__ + +/* VD_CORE_L */ +#define RK3399_PD_A53_L0 0 +#define RK3399_PD_A53_L1 1 +#define RK3399_PD_A53_L2 2 +#define RK3399_PD_A53_L3 3 +#define RK3399_PD_SCU_L 4 + +/* VD_CORE_B */ +#define RK3399_PD_A72_B0 5 +#define RK3399_PD_A72_B1 6 +#define RK3399_PD_SCU_B 7 + +/* VD_LOGIC */ +#define RK3399_PD_TCPD0 8 +#define RK3399_PD_TCPD1 9 +#define RK3399_PD_CCI 10 +#define RK3399_PD_CCI0 11 +#define RK3399_PD_CCI1 12 +#define RK3399_PD_PERILP 13 +#define RK3399_PD_PERIHP 14 +#define RK3399_PD_VIO 15 +#define RK3399_PD_VO 16 +#define RK3399_PD_VOPB 17 +#define RK3399_PD_VOPL 18 +#define RK3399_PD_ISP0 19 +#define RK3399_PD_ISP1 20 +#define RK3399_PD_HDCP 21 +#define RK3399_PD_GMAC 22 +#define RK3399_PD_EMMC 23 +#define RK3399_PD_USB3 24 +#define RK3399_PD_EDP 25 +#define RK3399_PD_GIC 26 +#define RK3399_PD_SD 27 +#define RK3399_PD_SDIOAUDIO 28 +#define RK3399_PD_ALIVE 29 + +/* VD_CENTER */ +#define RK3399_PD_CENTER 30 +#define RK3399_PD_VCODEC 31 +#define RK3399_PD_VDU 32 +#define RK3399_PD_RGA 33 +#define RK3399_PD_IEP 34 + +/* VD_GPU */ +#define RK3399_PD_GPU 35 + +/* VD_PMU */ +#define RK3399_PD_PMU 36 + +#endif diff --git a/include/dt-bindings/power/tegra186-powergate.h b/include/dt-bindings/power/tegra186-powergate.h new file mode 100644 index 000000000..388d6e228 --- /dev/null +++ b/include/dt-bindings/power/tegra186-powergate.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef _DT_BINDINGS_POWER_TEGRA186_POWERGATE_H +#define _DT_BINDINGS_POWER_TEGRA186_POWERGATE_H + +#define TEGRA186_POWER_DOMAIN_AUD 0 +#define TEGRA186_POWER_DOMAIN_DFD 1 +#define TEGRA186_POWER_DOMAIN_DISP 2 +#define TEGRA186_POWER_DOMAIN_DISPB 3 +#define TEGRA186_POWER_DOMAIN_DISPC 4 +#define TEGRA186_POWER_DOMAIN_ISPA 5 +#define TEGRA186_POWER_DOMAIN_NVDEC 6 +#define TEGRA186_POWER_DOMAIN_NVJPG 7 +#define TEGRA186_POWER_DOMAIN_MPE 8 +#define TEGRA186_POWER_DOMAIN_PCX 9 +#define TEGRA186_POWER_DOMAIN_SAX 10 +#define TEGRA186_POWER_DOMAIN_VE 11 +#define TEGRA186_POWER_DOMAIN_VIC 12 +#define TEGRA186_POWER_DOMAIN_XUSBA 13 +#define TEGRA186_POWER_DOMAIN_XUSBB 14 +#define TEGRA186_POWER_DOMAIN_XUSBC 15 +#define TEGRA186_POWER_DOMAIN_GPU 43 +#define TEGRA186_POWER_DOMAIN_MAX 44 + +#endif diff --git a/include/dt-bindings/power/tegra194-powergate.h b/include/dt-bindings/power/tegra194-powergate.h new file mode 100644 index 000000000..82253742a --- /dev/null +++ b/include/dt-bindings/power/tegra194-powergate.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __ABI_MACH_T194_POWERGATE_T194_H_ +#define __ABI_MACH_T194_POWERGATE_T194_H_ + +#define TEGRA194_POWER_DOMAIN_AUD 1 +#define TEGRA194_POWER_DOMAIN_DISP 2 +#define TEGRA194_POWER_DOMAIN_DISPB 3 +#define TEGRA194_POWER_DOMAIN_DISPC 4 +#define TEGRA194_POWER_DOMAIN_ISPA 5 +#define TEGRA194_POWER_DOMAIN_NVDECA 6 +#define TEGRA194_POWER_DOMAIN_NVJPG 7 +#define TEGRA194_POWER_DOMAIN_NVENCA 8 +#define TEGRA194_POWER_DOMAIN_NVENCB 9 +#define TEGRA194_POWER_DOMAIN_NVDECB 10 +#define TEGRA194_POWER_DOMAIN_SAX 11 +#define TEGRA194_POWER_DOMAIN_VE 12 +#define TEGRA194_POWER_DOMAIN_VIC 13 +#define TEGRA194_POWER_DOMAIN_XUSBA 14 +#define TEGRA194_POWER_DOMAIN_XUSBB 15 +#define TEGRA194_POWER_DOMAIN_XUSBC 16 +#define TEGRA194_POWER_DOMAIN_PCIEX8A 17 +#define TEGRA194_POWER_DOMAIN_PCIEX4A 18 +#define TEGRA194_POWER_DOMAIN_PCIEX1A 19 +#define TEGRA194_POWER_DOMAIN_PCIEX8B 21 +#define TEGRA194_POWER_DOMAIN_PVAA 22 +#define TEGRA194_POWER_DOMAIN_PVAB 23 +#define TEGRA194_POWER_DOMAIN_DLAA 24 +#define TEGRA194_POWER_DOMAIN_DLAB 25 +#define TEGRA194_POWER_DOMAIN_CV 26 +#define TEGRA194_POWER_DOMAIN_GPU 27 +#define TEGRA194_POWER_DOMAIN_MAX 27 + +#endif diff --git a/include/dt-bindings/pwm/pwm.h b/include/dt-bindings/pwm/pwm.h new file mode 100644 index 000000000..ab9a077e3 --- /dev/null +++ b/include/dt-bindings/pwm/pwm.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for most PWM bindings. + * + * Most PWM bindings can include a flags cell as part of the PWM specifier. + * In most cases, the format of the flags cell uses the standard values + * defined in this header. + */ + +#ifndef _DT_BINDINGS_PWM_PWM_H +#define _DT_BINDINGS_PWM_PWM_H + +#define PWM_POLARITY_INVERTED (1 << 0) + +#endif diff --git a/include/dt-bindings/regulator/maxim,max77802.h b/include/dt-bindings/regulator/maxim,max77802.h new file mode 100644 index 000000000..d0baba197 --- /dev/null +++ b/include/dt-bindings/regulator/maxim,max77802.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2014 Google, Inc + * + * Device Tree binding constants for the Maxim 77802 PMIC regulators + */ + +#ifndef _DT_BINDINGS_REGULATOR_MAXIM_MAX77802_H +#define _DT_BINDINGS_REGULATOR_MAXIM_MAX77802_H + +/* Regulator operating modes */ +#define MAX77802_OPMODE_LP 1 +#define MAX77802_OPMODE_NORMAL 3 + +#endif /* _DT_BINDINGS_REGULATOR_MAXIM_MAX77802_H */ diff --git a/include/dt-bindings/regulator/qcom,rpmh-regulator.h b/include/dt-bindings/regulator/qcom,rpmh-regulator.h new file mode 100644 index 000000000..86713dcf9 --- /dev/null +++ b/include/dt-bindings/regulator/qcom,rpmh-regulator.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */ + +#ifndef __QCOM_RPMH_REGULATOR_H +#define __QCOM_RPMH_REGULATOR_H + +/* + * These mode constants may be used to specify modes for various RPMh regulator + * device tree properties (e.g. regulator-initial-mode). Each type of regulator + * supports a subset of the possible modes. + * + * %RPMH_REGULATOR_MODE_RET: Retention mode in which only an extremely small + * load current is allowed. This mode is supported + * by LDO and SMPS type regulators. + * %RPMH_REGULATOR_MODE_LPM: Low power mode in which a small load current is + * allowed. This mode corresponds to PFM for SMPS + * and BOB type regulators. This mode is supported + * by LDO, HFSMPS, BOB, and PMIC4 FTSMPS type + * regulators. + * %RPMH_REGULATOR_MODE_AUTO: Auto mode in which the regulator hardware + * automatically switches between LPM and HPM based + * upon the real-time load current. This mode is + * supported by HFSMPS, BOB, and PMIC4 FTSMPS type + * regulators. + * %RPMH_REGULATOR_MODE_HPM: High power mode in which the full rated current + * of the regulator is allowed. This mode + * corresponds to PWM for SMPS and BOB type + * regulators. This mode is supported by all types + * of regulators. + */ +#define RPMH_REGULATOR_MODE_RET 0 +#define RPMH_REGULATOR_MODE_LPM 1 +#define RPMH_REGULATOR_MODE_AUTO 2 +#define RPMH_REGULATOR_MODE_HPM 3 + +#endif diff --git a/include/dt-bindings/reset/altr,rst-mgr-a10.h b/include/dt-bindings/reset/altr,rst-mgr-a10.h new file mode 100644 index 000000000..acb0bbf4f --- /dev/null +++ b/include/dt-bindings/reset/altr,rst-mgr-a10.h @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2014, Steffen Trumtrar + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_RESET_ALTR_RST_MGR_A10_H +#define _DT_BINDINGS_RESET_ALTR_RST_MGR_A10_H + +/* MPUMODRST */ +#define CPU0_RESET 0 +#define CPU1_RESET 1 +#define WDS_RESET 2 +#define SCUPER_RESET 3 + +/* PER0MODRST */ +#define EMAC0_RESET 32 +#define EMAC1_RESET 33 +#define EMAC2_RESET 34 +#define USB0_RESET 35 +#define USB1_RESET 36 +#define NAND_RESET 37 +#define QSPI_RESET 38 +#define SDMMC_RESET 39 +#define EMAC0_OCP_RESET 40 +#define EMAC1_OCP_RESET 41 +#define EMAC2_OCP_RESET 42 +#define USB0_OCP_RESET 43 +#define USB1_OCP_RESET 44 +#define NAND_OCP_RESET 45 +#define QSPI_OCP_RESET 46 +#define SDMMC_OCP_RESET 47 +#define DMA_RESET 48 +#define SPIM0_RESET 49 +#define SPIM1_RESET 50 +#define SPIS0_RESET 51 +#define SPIS1_RESET 52 +#define DMA_OCP_RESET 53 +#define EMAC_PTP_RESET 54 +/* 55 is empty*/ +#define DMAIF0_RESET 56 +#define DMAIF1_RESET 57 +#define DMAIF2_RESET 58 +#define DMAIF3_RESET 59 +#define DMAIF4_RESET 60 +#define DMAIF5_RESET 61 +#define DMAIF6_RESET 62 +#define DMAIF7_RESET 63 + +/* PER1MODRST */ +#define L4WD0_RESET 64 +#define L4WD1_RESET 65 +#define L4SYSTIMER0_RESET 66 +#define L4SYSTIMER1_RESET 67 +#define SPTIMER0_RESET 68 +#define SPTIMER1_RESET 69 +/* 70-71 is reserved */ +#define I2C0_RESET 72 +#define I2C1_RESET 73 +#define I2C2_RESET 74 +#define I2C3_RESET 75 +#define I2C4_RESET 76 +/* 77-79 is reserved */ +#define UART0_RESET 80 +#define UART1_RESET 81 +/* 82-87 is reserved */ +#define GPIO0_RESET 88 +#define GPIO1_RESET 89 +#define GPIO2_RESET 90 + +/* BRGMODRST */ +#define HPS2FPGA_RESET 96 +#define LWHPS2FPGA_RESET 97 +#define FPGA2HPS_RESET 98 +#define F2SSDRAM0_RESET 99 +#define F2SSDRAM1_RESET 100 +#define F2SSDRAM2_RESET 101 +#define DDRSCH_RESET 102 + +/* SYSMODRST*/ +#define ROM_RESET 128 +#define OCRAM_RESET 129 +/* 130 is reserved */ +#define FPGAMGR_RESET 131 +#define S2F_RESET 132 +#define SYSDBG_RESET 133 +#define OCRAM_OCP_RESET 134 + +/* COLDMODRST */ +#define CLKMGRCOLD_RESET 160 +/* 161-162 is reserved */ +#define S2FCOLD_RESET 163 +#define TIMESTAMPCOLD_RESET 164 +#define TAPCOLD_RESET 165 +#define HMCCOLD_RESET 166 +#define IOMGRCOLD_RESET 167 + +/* NRSTMODRST */ +#define NRSTPINOE_RESET 192 + +/* DBGMODRST */ +#define DBG_RESET 224 +#endif diff --git a/include/dt-bindings/reset/altr,rst-mgr-a10sr.h b/include/dt-bindings/reset/altr,rst-mgr-a10sr.h new file mode 100644 index 000000000..9855925e5 --- /dev/null +++ b/include/dt-bindings/reset/altr,rst-mgr-a10sr.h @@ -0,0 +1,33 @@ +/* + * Copyright Intel Corporation (C) 2017. All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + * Reset binding definitions for Altera Arria10 MAX5 System Resource Chip + * + * Adapted from altr,rst-mgr-a10.h + */ + +#ifndef _DT_BINDINGS_RESET_ALTR_RST_MGR_A10SR_H +#define _DT_BINDINGS_RESET_ALTR_RST_MGR_A10SR_H + +/* Peripheral PHY resets */ +#define A10SR_RESET_ENET_HPS 0 +#define A10SR_RESET_PCIE 1 +#define A10SR_RESET_FILE 2 +#define A10SR_RESET_BQSPI 3 +#define A10SR_RESET_USB 4 + +#define A10SR_RESET_NUM 5 + +#endif diff --git a/include/dt-bindings/reset/altr,rst-mgr-s10.h b/include/dt-bindings/reset/altr,rst-mgr-s10.h new file mode 100644 index 000000000..7978c21e4 --- /dev/null +++ b/include/dt-bindings/reset/altr,rst-mgr-s10.h @@ -0,0 +1,108 @@ +/* + * Copyright (C) 2016 Intel Corporation. All rights reserved + * Copyright (C) 2016 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + * derived from Steffen Trumtrar's "altr,rst-mgr-a10.h" + */ + +#ifndef _DT_BINDINGS_RESET_ALTR_RST_MGR_S10_H +#define _DT_BINDINGS_RESET_ALTR_RST_MGR_S10_H + +/* MPUMODRST */ +#define CPU0_RESET 0 +#define CPU1_RESET 1 +#define CPU2_RESET 2 +#define CPU3_RESET 3 + +/* PER0MODRST */ +#define EMAC0_RESET 32 +#define EMAC1_RESET 33 +#define EMAC2_RESET 34 +#define USB0_RESET 35 +#define USB1_RESET 36 +#define NAND_RESET 37 +/* 38 is empty */ +#define SDMMC_RESET 39 +#define EMAC0_OCP_RESET 40 +#define EMAC1_OCP_RESET 41 +#define EMAC2_OCP_RESET 42 +#define USB0_OCP_RESET 43 +#define USB1_OCP_RESET 44 +#define NAND_OCP_RESET 45 +/* 46 is empty */ +#define SDMMC_OCP_RESET 47 +#define DMA_RESET 48 +#define SPIM0_RESET 49 +#define SPIM1_RESET 50 +#define SPIS0_RESET 51 +#define SPIS1_RESET 52 +#define DMA_OCP_RESET 53 +#define EMAC_PTP_RESET 54 +/* 55 is empty*/ +#define DMAIF0_RESET 56 +#define DMAIF1_RESET 57 +#define DMAIF2_RESET 58 +#define DMAIF3_RESET 59 +#define DMAIF4_RESET 60 +#define DMAIF5_RESET 61 +#define DMAIF6_RESET 62 +#define DMAIF7_RESET 63 + +/* PER1MODRST */ +#define WATCHDOG0_RESET 64 +#define WATCHDOG1_RESET 65 +#define WATCHDOG2_RESET 66 +#define WATCHDOG3_RESET 67 +#define L4SYSTIMER0_RESET 68 +#define L4SYSTIMER1_RESET 69 +#define SPTIMER0_RESET 70 +#define SPTIMER1_RESET 71 +#define I2C0_RESET 72 +#define I2C1_RESET 73 +#define I2C2_RESET 74 +#define I2C3_RESET 75 +#define I2C4_RESET 76 +/* 77-79 is empty */ +#define UART0_RESET 80 +#define UART1_RESET 81 +/* 82-87 is empty */ +#define GPIO0_RESET 88 +#define GPIO1_RESET 89 + +/* BRGMODRST */ +#define SOC2FPGA_RESET 96 +#define LWHPS2FPGA_RESET 97 +#define FPGA2SOC_RESET 98 +#define F2SSDRAM0_RESET 99 +#define F2SSDRAM1_RESET 100 +#define F2SSDRAM2_RESET 101 +#define DDRSCH_RESET 102 + +/* COLDMODRST */ +#define CPUPO0_RESET 160 +#define CPUPO1_RESET 161 +#define CPUPO2_RESET 162 +#define CPUPO3_RESET 163 +/* 164-167 is empty */ +#define L2_RESET 168 + +/* DBGMODRST */ +#define DBG_RESET 224 +#define CSDAP_RESET 225 + +/* TAPMODRST */ +#define TAP_RESET 256 + +#endif diff --git a/include/dt-bindings/reset/altr,rst-mgr.h b/include/dt-bindings/reset/altr,rst-mgr.h new file mode 100644 index 000000000..3f04908fb --- /dev/null +++ b/include/dt-bindings/reset/altr,rst-mgr.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2014, Steffen Trumtrar + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_RESET_ALTR_RST_MGR_H +#define _DT_BINDINGS_RESET_ALTR_RST_MGR_H + +/* MPUMODRST */ +#define CPU0_RESET 0 +#define CPU1_RESET 1 +#define WDS_RESET 2 +#define SCUPER_RESET 3 +#define L2_RESET 4 + +/* PERMODRST */ +#define EMAC0_RESET 32 +#define EMAC1_RESET 33 +#define USB0_RESET 34 +#define USB1_RESET 35 +#define NAND_RESET 36 +#define QSPI_RESET 37 +#define L4WD0_RESET 38 +#define L4WD1_RESET 39 +#define OSC1TIMER0_RESET 40 +#define OSC1TIMER1_RESET 41 +#define SPTIMER0_RESET 42 +#define SPTIMER1_RESET 43 +#define I2C0_RESET 44 +#define I2C1_RESET 45 +#define I2C2_RESET 46 +#define I2C3_RESET 47 +#define UART0_RESET 48 +#define UART1_RESET 49 +#define SPIM0_RESET 50 +#define SPIM1_RESET 51 +#define SPIS0_RESET 52 +#define SPIS1_RESET 53 +#define SDMMC_RESET 54 +#define CAN0_RESET 55 +#define CAN1_RESET 56 +#define GPIO0_RESET 57 +#define GPIO1_RESET 58 +#define GPIO2_RESET 59 +#define DMA_RESET 60 +#define SDR_RESET 61 + +/* PER2MODRST */ +#define DMAIF0_RESET 64 +#define DMAIF1_RESET 65 +#define DMAIF2_RESET 66 +#define DMAIF3_RESET 67 +#define DMAIF4_RESET 68 +#define DMAIF5_RESET 69 +#define DMAIF6_RESET 70 +#define DMAIF7_RESET 71 + +/* BRGMODRST */ +#define HPS2FPGA_RESET 96 +#define LWHPS2FPGA_RESET 97 +#define FPGA2HPS_RESET 98 + +/* MISCMODRST*/ +#define ROM_RESET 128 +#define OCRAM_RESET 129 +#define SYSMGR_RESET 130 +#define SYSMGRCOLD_RESET 131 +#define FPGAMGR_RESET 132 +#define ACPIDMAP_RESET 133 +#define S2F_RESET 134 +#define S2FCOLD_RESET 135 +#define NRSTPIN_RESET 136 +#define TIMESTAMPCOLD_RESET 137 +#define CLKMGRCOLD_RESET 138 +#define SCANMGR_RESET 139 +#define FRZCTRLCOLD_RESET 140 +#define SYSDBG_RESET 141 +#define DBG_RESET 142 +#define TAPCOLD_RESET 143 +#define SDRCOLD_RESET 144 + +#endif diff --git a/include/dt-bindings/reset/amlogic,meson-axg-audio-arb.h b/include/dt-bindings/reset/amlogic,meson-axg-audio-arb.h new file mode 100644 index 000000000..05c363678 --- /dev/null +++ b/include/dt-bindings/reset/amlogic,meson-axg-audio-arb.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) + * + * Copyright (c) 2018 Baylibre SAS. + * Author: Jerome Brunet + */ + +#ifndef _DT_BINDINGS_AMLOGIC_MESON_AXG_AUDIO_ARB_H +#define _DT_BINDINGS_AMLOGIC_MESON_AXG_AUDIO_ARB_H + +#define AXG_ARB_TODDR_A 0 +#define AXG_ARB_TODDR_B 1 +#define AXG_ARB_TODDR_C 2 +#define AXG_ARB_FRDDR_A 3 +#define AXG_ARB_FRDDR_B 4 +#define AXG_ARB_FRDDR_C 5 + +#endif /* _DT_BINDINGS_AMLOGIC_MESON_AXG_AUDIO_ARB_H */ diff --git a/include/dt-bindings/reset/amlogic,meson-axg-reset.h b/include/dt-bindings/reset/amlogic,meson-axg-reset.h new file mode 100644 index 000000000..ad6f55dab --- /dev/null +++ b/include/dt-bindings/reset/amlogic,meson-axg-reset.h @@ -0,0 +1,124 @@ +/* + * + * Copyright (c) 2016 BayLibre, SAS. + * Author: Neil Armstrong + * + * Copyright (c) 2017 Amlogic, inc. + * Author: Yixun Lan + * + * SPDX-License-Identifier: (GPL-2.0+ OR BSD) + */ + +#ifndef _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H +#define _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H + +/* RESET0 */ +#define RESET_HIU 0 +#define RESET_PCIE_A 1 +#define RESET_PCIE_B 2 +#define RESET_DDR_TOP 3 +/* 4 */ +#define RESET_VIU 5 +#define RESET_PCIE_PHY 6 +#define RESET_PCIE_APB 7 +/* 8 */ +/* 9 */ +#define RESET_VENC 10 +#define RESET_ASSIST 11 +/* 12 */ +#define RESET_VCBUS 13 +/* 14 */ +/* 15 */ +#define RESET_GIC 16 +#define RESET_CAPB3_DECODE 17 +/* 18-21 */ +#define RESET_SYS_CPU_CAPB3 22 +#define RESET_CBUS_CAPB3 23 +#define RESET_AHB_CNTL 24 +#define RESET_AHB_DATA 25 +#define RESET_VCBUS_CLK81 26 +#define RESET_MMC 27 +/* 28-31 */ +/* RESET1 */ +/* 32 */ +/* 33 */ +#define RESET_USB_OTG 34 +#define RESET_DDR 35 +#define RESET_AO_RESET 36 +/* 37 */ +#define RESET_AHB_SRAM 38 +/* 39 */ +/* 40 */ +#define RESET_DMA 41 +#define RESET_ISA 42 +#define RESET_ETHERNET 43 +/* 44 */ +#define RESET_SD_EMMC_B 45 +#define RESET_SD_EMMC_C 46 +#define RESET_ROM_BOOT 47 +#define RESET_SYS_CPU_0 48 +#define RESET_SYS_CPU_1 49 +#define RESET_SYS_CPU_2 50 +#define RESET_SYS_CPU_3 51 +#define RESET_SYS_CPU_CORE_0 52 +#define RESET_SYS_CPU_CORE_1 53 +#define RESET_SYS_CPU_CORE_2 54 +#define RESET_SYS_CPU_CORE_3 55 +#define RESET_SYS_PLL_DIV 56 +#define RESET_SYS_CPU_AXI 57 +#define RESET_SYS_CPU_L2 58 +#define RESET_SYS_CPU_P 59 +#define RESET_SYS_CPU_MBIST 60 +/* 61-63 */ +/* RESET2 */ +/* 64 */ +/* 65 */ +#define RESET_AUDIO 66 +/* 67 */ +#define RESET_MIPI_HOST 68 +#define RESET_AUDIO_LOCKER 69 +#define RESET_GE2D 70 +/* 71-76 */ +#define RESET_AO_CPU_RESET 77 +/* 78-95 */ +/* RESET3 */ +#define RESET_RING_OSCILLATOR 96 +/* 97-127 */ +/* RESET4 */ +/* 128 */ +/* 129 */ +#define RESET_MIPI_PHY 130 +/* 131-140 */ +#define RESET_VENCL 141 +#define RESET_I2C_MASTER_2 142 +#define RESET_I2C_MASTER_1 143 +/* 144-159 */ +/* RESET5 */ +/* 160-191 */ +/* RESET6 */ +#define RESET_PERIPHS_GENERAL 192 +#define RESET_PERIPHS_SPICC 193 +/* 194 */ +/* 195 */ +#define RESET_PERIPHS_I2C_MASTER_0 196 +/* 197-200 */ +#define RESET_PERIPHS_UART_0 201 +#define RESET_PERIPHS_UART_1 202 +/* 203-204 */ +#define RESET_PERIPHS_SPI_0 205 +#define RESET_PERIPHS_I2C_MASTER_3 206 +/* 207-223 */ +/* RESET7 */ +#define RESET_USB_DDR_0 224 +#define RESET_USB_DDR_1 225 +#define RESET_USB_DDR_2 226 +#define RESET_USB_DDR_3 227 +/* 228 */ +#define RESET_DEVICE_MMC_ARB 229 +/* 230 */ +#define RESET_VID_LOCK 231 +#define RESET_A9_DMC_PIPEL 232 +#define RESET_DMC_VPU_PIPEL 233 +/* 234-255 */ + +#endif diff --git a/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h b/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h new file mode 100644 index 000000000..524d6077a --- /dev/null +++ b/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h @@ -0,0 +1,210 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2016 BayLibre, SAS. + * Author: Neil Armstrong + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * BSD LICENSE + * + * Copyright (c) 2016 BayLibre, SAS. + * Author: Neil Armstrong + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _DT_BINDINGS_AMLOGIC_MESON_GXBB_RESET_H +#define _DT_BINDINGS_AMLOGIC_MESON_GXBB_RESET_H + +/* RESET0 */ +#define RESET_HIU 0 +/* 1 */ +#define RESET_DOS_RESET 2 +#define RESET_DDR_TOP 3 +#define RESET_DCU_RESET 4 +#define RESET_VIU 5 +#define RESET_AIU 6 +#define RESET_VID_PLL_DIV 7 +/* 8 */ +#define RESET_PMUX 9 +#define RESET_VENC 10 +#define RESET_ASSIST 11 +#define RESET_AFIFO2 12 +#define RESET_VCBUS 13 +/* 14 */ +/* 15 */ +#define RESET_GIC 16 +#define RESET_CAPB3_DECODE 17 +#define RESET_NAND_CAPB3 18 +#define RESET_HDMITX_CAPB3 19 +#define RESET_MALI_CAPB3 20 +#define RESET_DOS_CAPB3 21 +#define RESET_SYS_CPU_CAPB3 22 +#define RESET_CBUS_CAPB3 23 +#define RESET_AHB_CNTL 24 +#define RESET_AHB_DATA 25 +#define RESET_VCBUS_CLK81 26 +#define RESET_MMC 27 +#define RESET_MIPI_0 28 +#define RESET_MIPI_1 29 +#define RESET_MIPI_2 30 +#define RESET_MIPI_3 31 +/* RESET1 */ +#define RESET_CPPM 32 +#define RESET_DEMUX 33 +#define RESET_USB_OTG 34 +#define RESET_DDR 35 +#define RESET_AO_RESET 36 +#define RESET_BT656 37 +#define RESET_AHB_SRAM 38 +/* 39 */ +#define RESET_PARSER 40 +#define RESET_BLKMV 41 +#define RESET_ISA 42 +#define RESET_ETHERNET 43 +#define RESET_SD_EMMC_A 44 +#define RESET_SD_EMMC_B 45 +#define RESET_SD_EMMC_C 46 +#define RESET_ROM_BOOT 47 +#define RESET_SYS_CPU_0 48 +#define RESET_SYS_CPU_1 49 +#define RESET_SYS_CPU_2 50 +#define RESET_SYS_CPU_3 51 +#define RESET_SYS_CPU_CORE_0 52 +#define RESET_SYS_CPU_CORE_1 53 +#define RESET_SYS_CPU_CORE_2 54 +#define RESET_SYS_CPU_CORE_3 55 +#define RESET_SYS_PLL_DIV 56 +#define RESET_SYS_CPU_AXI 57 +#define RESET_SYS_CPU_L2 58 +#define RESET_SYS_CPU_P 59 +#define RESET_SYS_CPU_MBIST 60 +/* 61 */ +/* 62 */ +/* 63 */ +/* RESET2 */ +#define RESET_VD_RMEM 64 +#define RESET_AUDIN 65 +#define RESET_HDMI_TX 66 +/* 67 */ +/* 68 */ +/* 69 */ +#define RESET_GE2D 70 +#define RESET_PARSER_REG 71 +#define RESET_PARSER_FETCH 72 +#define RESET_PARSER_CTL 73 +#define RESET_PARSER_TOP 74 +/* 75 */ +/* 76 */ +#define RESET_AO_CPU_RESET 77 +#define RESET_MALI 78 +#define RESET_HDMI_SYSTEM_RESET 79 +/* 80-95 */ +/* RESET3 */ +#define RESET_RING_OSCILLATOR 96 +#define RESET_SYS_CPU 97 +#define RESET_EFUSE 98 +#define RESET_SYS_CPU_BVCI 99 +#define RESET_AIFIFO 100 +#define RESET_TVFE 101 +#define RESET_AHB_BRIDGE_CNTL 102 +/* 103 */ +#define RESET_AUDIO_DAC 104 +#define RESET_DEMUX_TOP 105 +#define RESET_DEMUX_DES 106 +#define RESET_DEMUX_S2P_0 107 +#define RESET_DEMUX_S2P_1 108 +#define RESET_DEMUX_RESET_0 109 +#define RESET_DEMUX_RESET_1 110 +#define RESET_DEMUX_RESET_2 111 +/* 112-127 */ +/* RESET4 */ +/* 128 */ +/* 129 */ +/* 130 */ +/* 131 */ +#define RESET_DVIN_RESET 132 +#define RESET_RDMA 133 +#define RESET_VENCI 134 +#define RESET_VENCP 135 +/* 136 */ +#define RESET_VDAC 137 +#define RESET_RTC 138 +/* 139 */ +#define RESET_VDI6 140 +#define RESET_VENCL 141 +#define RESET_I2C_MASTER_2 142 +#define RESET_I2C_MASTER_1 143 +/* 144-159 */ +/* RESET5 */ +/* 160-191 */ +/* RESET6 */ +#define RESET_PERIPHS_GENERAL 192 +#define RESET_PERIPHS_SPICC 193 +#define RESET_PERIPHS_SMART_CARD 194 +#define RESET_PERIPHS_SAR_ADC 195 +#define RESET_PERIPHS_I2C_MASTER_0 196 +#define RESET_SANA 197 +/* 198 */ +#define RESET_PERIPHS_STREAM_INTERFACE 199 +#define RESET_PERIPHS_SDIO 200 +#define RESET_PERIPHS_UART_0 201 +#define RESET_PERIPHS_UART_1_2 202 +#define RESET_PERIPHS_ASYNC_0 203 +#define RESET_PERIPHS_ASYNC_1 204 +#define RESET_PERIPHS_SPI_0 205 +#define RESET_PERIPHS_SDHC 206 +#define RESET_UART_SLIP 207 +/* 208-223 */ +/* RESET7 */ +#define RESET_USB_DDR_0 224 +#define RESET_USB_DDR_1 225 +#define RESET_USB_DDR_2 226 +#define RESET_USB_DDR_3 227 +/* 228 */ +#define RESET_DEVICE_MMC_ARB 229 +/* 230 */ +#define RESET_VID_LOCK 231 +#define RESET_A9_DMC_PIPEL 232 +/* 233-255 */ + +#endif diff --git a/include/dt-bindings/reset/amlogic,meson8b-clkc-reset.h b/include/dt-bindings/reset/amlogic,meson8b-clkc-reset.h new file mode 100644 index 000000000..1f1b56e57 --- /dev/null +++ b/include/dt-bindings/reset/amlogic,meson8b-clkc-reset.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2017 Martin Blumenstingl . + * + * SPDX-License-Identifier: (GPL-2.0+ OR MIT) + */ + +#ifndef _DT_BINDINGS_AMLOGIC_MESON8B_CLKC_RESET_H +#define _DT_BINDINGS_AMLOGIC_MESON8B_CLKC_RESET_H + +#define CLKC_RESET_L2_CACHE_SOFT_RESET 0 +#define CLKC_RESET_AXI_64_TO_128_BRIDGE_A5_SOFT_RESET 1 +#define CLKC_RESET_SCU_SOFT_RESET 2 +#define CLKC_RESET_CPU0_SOFT_RESET 3 +#define CLKC_RESET_CPU1_SOFT_RESET 4 +#define CLKC_RESET_CPU2_SOFT_RESET 5 +#define CLKC_RESET_CPU3_SOFT_RESET 6 +#define CLKC_RESET_A5_GLOBAL_RESET 7 +#define CLKC_RESET_A5_AXI_SOFT_RESET 8 +#define CLKC_RESET_A5_ABP_SOFT_RESET 9 +#define CLKC_RESET_AXI_64_TO_128_BRIDGE_MMC_SOFT_RESET 10 +#define CLKC_RESET_VID_CLK_CNTL_SOFT_RESET 11 +#define CLKC_RESET_VID_DIVIDER_CNTL_SOFT_RESET_POST 12 +#define CLKC_RESET_VID_DIVIDER_CNTL_SOFT_RESET_PRE 13 +#define CLKC_RESET_VID_DIVIDER_CNTL_RESET_N_POST 14 +#define CLKC_RESET_VID_DIVIDER_CNTL_RESET_N_PRE 15 + +#endif /* _DT_BINDINGS_AMLOGIC_MESON8B_CLKC_RESET_H */ diff --git a/include/dt-bindings/reset/amlogic,meson8b-reset.h b/include/dt-bindings/reset/amlogic,meson8b-reset.h new file mode 100644 index 000000000..a03e86fe2 --- /dev/null +++ b/include/dt-bindings/reset/amlogic,meson8b-reset.h @@ -0,0 +1,175 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2016 BayLibre, SAS. + * Author: Neil Armstrong + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * BSD LICENSE + * + * Copyright (c) 2016 BayLibre, SAS. + * Author: Neil Armstrong + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _DT_BINDINGS_AMLOGIC_MESON8B_RESET_H +#define _DT_BINDINGS_AMLOGIC_MESON8B_RESET_H + +/* RESET0 */ +#define RESET_HIU 0 +#define RESET_VLD 1 +#define RESET_IQIDCT 2 +#define RESET_MC 3 +/* 8 */ +#define RESET_VIU 5 +#define RESET_AIU 6 +#define RESET_MCPU 7 +#define RESET_CCPU 8 +#define RESET_PMUX 9 +#define RESET_VENC 10 +#define RESET_ASSIST 11 +#define RESET_AFIFO2 12 +#define RESET_MDEC 13 +#define RESET_VLD_PART 14 +#define RESET_VIFIFO 15 +/* 16-31 */ +/* RESET1 */ +/* 32 */ +#define RESET_DEMUX 33 +#define RESET_USB_OTG 34 +#define RESET_DDR 35 +#define RESET_VDAC_1 36 +#define RESET_BT656 37 +#define RESET_AHB_SRAM 38 +#define RESET_AHB_BRIDGE 39 +#define RESET_PARSER 40 +#define RESET_BLKMV 41 +#define RESET_ISA 42 +#define RESET_ETHERNET 43 +#define RESET_ABUF 44 +#define RESET_AHB_DATA 45 +#define RESET_AHB_CNTL 46 +#define RESET_ROM_BOOT 47 +/* 48-63 */ +/* RESET2 */ +#define RESET_VD_RMEM 64 +#define RESET_AUDIN 65 +#define RESET_DBLK 66 +#define RESET_PIC_DC 67 +#define RESET_PSC 68 +#define RESET_NAND 69 +#define RESET_GE2D 70 +#define RESET_PARSER_REG 71 +#define RESET_PARSER_FETCH 72 +#define RESET_PARSER_CTL 73 +#define RESET_PARSER_TOP 74 +#define RESET_HDMI_APB 75 +#define RESET_AUDIO_APB 76 +#define RESET_MEDIA_CPU 77 +#define RESET_MALI 78 +#define RESET_HDMI_SYSTEM_RESET 79 +/* 80-95 */ +/* RESET3 */ +#define RESET_RING_OSCILLATOR 96 +#define RESET_SYS_CPU_0 97 +#define RESET_EFUSE 98 +#define RESET_SYS_CPU_BVCI 99 +#define RESET_AIFIFO 100 +#define RESET_AUDIO_PLL_MODULATOR 101 +#define RESET_AHB_BRIDGE_CNTL 102 +#define RESET_SYS_CPU_1 103 +#define RESET_AUDIO_DAC 104 +#define RESET_DEMUX_TOP 105 +#define RESET_DEMUX_DES 106 +#define RESET_DEMUX_S2P_0 107 +#define RESET_DEMUX_S2P_1 108 +#define RESET_DEMUX_RESET_0 109 +#define RESET_DEMUX_RESET_1 110 +#define RESET_DEMUX_RESET_2 111 +/* 112-127 */ +/* RESET4 */ +#define RESET_PL310 128 +#define RESET_A5_APB 129 +#define RESET_A5_AXI 130 +#define RESET_A5 131 +#define RESET_DVIN 132 +#define RESET_RDMA 133 +#define RESET_VENCI 134 +#define RESET_VENCP 135 +#define RESET_VENCT 136 +#define RESET_VDAC_4 137 +#define RESET_RTC 138 +#define RESET_A5_DEBUG 139 +#define RESET_VDI6 140 +#define RESET_VENCL 141 +/* 142-159 */ +/* RESET5 */ +#define RESET_DDR_PLL 160 +#define RESET_MISC_PLL 161 +#define RESET_SYS_PLL 162 +#define RESET_HPLL_PLL 163 +#define RESET_AUDIO_PLL 164 +#define RESET_VID2_PLL 165 +/* 166-191 */ +/* RESET6 */ +#define RESET_PERIPHS_GENERAL 192 +#define RESET_PERIPHS_IR_REMOTE 193 +#define RESET_PERIPHS_SMART_CARD 194 +#define RESET_PERIPHS_SAR_ADC 195 +#define RESET_PERIPHS_I2C_MASTER_0 196 +#define RESET_PERIPHS_I2C_MASTER_1 197 +#define RESET_PERIPHS_I2C_SLAVE 198 +#define RESET_PERIPHS_STREAM_INTERFACE 199 +#define RESET_PERIPHS_SDIO 200 +#define RESET_PERIPHS_UART_0 201 +#define RESET_PERIPHS_UART_1 202 +#define RESET_PERIPHS_ASYNC_0 203 +#define RESET_PERIPHS_ASYNC_1 204 +#define RESET_PERIPHS_SPI_0 205 +#define RESET_PERIPHS_SPI_1 206 +#define RESET_PERIPHS_LED_PWM 207 +/* 208-223 */ +/* RESET7 */ +/* 224-255 */ + +#endif diff --git a/include/dt-bindings/reset/axg-aoclkc.h b/include/dt-bindings/reset/axg-aoclkc.h new file mode 100644 index 000000000..d342c0b6b --- /dev/null +++ b/include/dt-bindings/reset/axg-aoclkc.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* + * Copyright (c) 2016 BayLibre, SAS + * Author: Neil Armstrong + * + * Copyright (c) 2018 Amlogic, inc. + * Author: Qiufang Dai + */ + +#ifndef DT_BINDINGS_RESET_AMLOGIC_MESON_AXG_AOCLK +#define DT_BINDINGS_RESET_AMLOGIC_MESON_AXG_AOCLK + +#define RESET_AO_REMOTE 0 +#define RESET_AO_I2C_MASTER 1 +#define RESET_AO_I2C_SLAVE 2 +#define RESET_AO_UART1 3 +#define RESET_AO_UART2 4 +#define RESET_AO_IR_BLASTER 5 + +#endif diff --git a/include/dt-bindings/reset/cortina,gemini-reset.h b/include/dt-bindings/reset/cortina,gemini-reset.h new file mode 100644 index 000000000..f48aff238 --- /dev/null +++ b/include/dt-bindings/reset/cortina,gemini-reset.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DT_BINDINGS_RESET_CORTINA_GEMINI_H +#define _DT_BINDINGS_RESET_CORTINA_GEMINI_H + +#define GEMINI_RESET_DRAM 0 +#define GEMINI_RESET_FLASH 1 +#define GEMINI_RESET_IDE 2 +#define GEMINI_RESET_RAID 3 +#define GEMINI_RESET_SECURITY 4 +#define GEMINI_RESET_GMAC0 5 +#define GEMINI_RESET_GMAC1 6 +#define GEMINI_RESET_PCI 7 +#define GEMINI_RESET_USB0 8 +#define GEMINI_RESET_USB1 9 +#define GEMINI_RESET_DMAC 10 +#define GEMINI_RESET_APB 11 +#define GEMINI_RESET_LPC 12 +#define GEMINI_RESET_LCD 13 +#define GEMINI_RESET_INTCON0 14 +#define GEMINI_RESET_INTCON1 15 +#define GEMINI_RESET_RTC 16 +#define GEMINI_RESET_TIMER 17 +#define GEMINI_RESET_UART 18 +#define GEMINI_RESET_SSP 19 +#define GEMINI_RESET_GPIO0 20 +#define GEMINI_RESET_GPIO1 21 +#define GEMINI_RESET_GPIO2 22 +#define GEMINI_RESET_WDOG 23 +#define GEMINI_RESET_EXTERN 24 +#define GEMINI_RESET_CIR 25 +#define GEMINI_RESET_SATA0 26 +#define GEMINI_RESET_SATA1 27 +#define GEMINI_RESET_TVC 28 +#define GEMINI_RESET_CPU1 30 +#define GEMINI_RESET_GLOBAL 31 + +#endif diff --git a/include/dt-bindings/reset/gxbb-aoclkc.h b/include/dt-bindings/reset/gxbb-aoclkc.h new file mode 100644 index 000000000..9e3fd60c3 --- /dev/null +++ b/include/dt-bindings/reset/gxbb-aoclkc.h @@ -0,0 +1,66 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2016 BayLibre, SAS. + * Author: Neil Armstrong + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * BSD LICENSE + * + * Copyright (c) 2016 BayLibre, SAS. + * Author: Neil Armstrong + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DT_BINDINGS_RESET_AMLOGIC_MESON_GXBB_AOCLK +#define DT_BINDINGS_RESET_AMLOGIC_MESON_GXBB_AOCLK + +#define RESET_AO_REMOTE 0 +#define RESET_AO_I2C_MASTER 1 +#define RESET_AO_I2C_SLAVE 2 +#define RESET_AO_UART1 3 +#define RESET_AO_UART2 4 +#define RESET_AO_IR_BLASTER 5 + +#endif diff --git a/include/dt-bindings/reset/hisi,hi6220-resets.h b/include/dt-bindings/reset/hisi,hi6220-resets.h new file mode 100644 index 000000000..e7c362a81 --- /dev/null +++ b/include/dt-bindings/reset/hisi,hi6220-resets.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * This header provides index for the reset controller + * based on hi6220 SoC. + */ +#ifndef _DT_BINDINGS_RESET_CONTROLLER_HI6220 +#define _DT_BINDINGS_RESET_CONTROLLER_HI6220 + +#define PERIPH_RSTDIS0_MMC0 0x000 +#define PERIPH_RSTDIS0_MMC1 0x001 +#define PERIPH_RSTDIS0_MMC2 0x002 +#define PERIPH_RSTDIS0_NANDC 0x003 +#define PERIPH_RSTDIS0_USBOTG_BUS 0x004 +#define PERIPH_RSTDIS0_POR_PICOPHY 0x005 +#define PERIPH_RSTDIS0_USBOTG 0x006 +#define PERIPH_RSTDIS0_USBOTG_32K 0x007 +#define PERIPH_RSTDIS1_HIFI 0x100 +#define PERIPH_RSTDIS1_DIGACODEC 0x105 +#define PERIPH_RSTEN2_IPF 0x200 +#define PERIPH_RSTEN2_SOCP 0x201 +#define PERIPH_RSTEN2_DMAC 0x202 +#define PERIPH_RSTEN2_SECENG 0x203 +#define PERIPH_RSTEN2_ABB 0x204 +#define PERIPH_RSTEN2_HPM0 0x205 +#define PERIPH_RSTEN2_HPM1 0x206 +#define PERIPH_RSTEN2_HPM2 0x207 +#define PERIPH_RSTEN2_HPM3 0x208 +#define PERIPH_RSTEN3_CSSYS 0x300 +#define PERIPH_RSTEN3_I2C0 0x301 +#define PERIPH_RSTEN3_I2C1 0x302 +#define PERIPH_RSTEN3_I2C2 0x303 +#define PERIPH_RSTEN3_I2C3 0x304 +#define PERIPH_RSTEN3_UART1 0x305 +#define PERIPH_RSTEN3_UART2 0x306 +#define PERIPH_RSTEN3_UART3 0x307 +#define PERIPH_RSTEN3_UART4 0x308 +#define PERIPH_RSTEN3_SSP 0x309 +#define PERIPH_RSTEN3_PWM 0x30a +#define PERIPH_RSTEN3_BLPWM 0x30b +#define PERIPH_RSTEN3_TSENSOR 0x30c +#define PERIPH_RSTEN3_DAPB 0x312 +#define PERIPH_RSTEN3_HKADC 0x313 +#define PERIPH_RSTEN3_CODEC_SSI 0x314 +#define PERIPH_RSTEN3_PMUSSI1 0x316 +#define PERIPH_RSTEN8_RS0 0x400 +#define PERIPH_RSTEN8_RS2 0x401 +#define PERIPH_RSTEN8_RS3 0x402 +#define PERIPH_RSTEN8_MS0 0x403 +#define PERIPH_RSTEN8_MS2 0x405 +#define PERIPH_RSTEN8_XG2RAM0 0x406 +#define PERIPH_RSTEN8_X2SRAM_TZMA 0x407 +#define PERIPH_RSTEN8_SRAM 0x408 +#define PERIPH_RSTEN8_HARQ 0x40a +#define PERIPH_RSTEN8_DDRC 0x40c +#define PERIPH_RSTEN8_DDRC_APB 0x40d +#define PERIPH_RSTEN8_DDRPACK_APB 0x40e +#define PERIPH_RSTEN8_DDRT 0x411 +#define PERIPH_RSDIST9_CARM_DAP 0x500 +#define PERIPH_RSDIST9_CARM_ATB 0x501 +#define PERIPH_RSDIST9_CARM_LBUS 0x502 +#define PERIPH_RSDIST9_CARM_POR 0x503 +#define PERIPH_RSDIST9_CARM_CORE 0x504 +#define PERIPH_RSDIST9_CARM_DBG 0x505 +#define PERIPH_RSDIST9_CARM_L2 0x506 +#define PERIPH_RSDIST9_CARM_SOCDBG 0x507 +#define PERIPH_RSDIST9_CARM_ETM 0x508 + +#define MEDIA_G3D 0 +#define MEDIA_CODEC_VPU 2 +#define MEDIA_CODEC_JPEG 3 +#define MEDIA_ISP 4 +#define MEDIA_ADE 5 +#define MEDIA_MMU 6 +#define MEDIA_XG2RAM1 7 + +#endif /*_DT_BINDINGS_RESET_CONTROLLER_HI6220*/ diff --git a/include/dt-bindings/reset/imx7-reset.h b/include/dt-bindings/reset/imx7-reset.h new file mode 100644 index 000000000..63948170c --- /dev/null +++ b/include/dt-bindings/reset/imx7-reset.h @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2017 Impinj, Inc. + * + * Author: Andrey Smirnov + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef DT_BINDING_RESET_IMX7_H +#define DT_BINDING_RESET_IMX7_H + +#define IMX7_RESET_A7_CORE_POR_RESET0 0 +#define IMX7_RESET_A7_CORE_POR_RESET1 1 +#define IMX7_RESET_A7_CORE_RESET0 2 +#define IMX7_RESET_A7_CORE_RESET1 3 +#define IMX7_RESET_A7_DBG_RESET0 4 +#define IMX7_RESET_A7_DBG_RESET1 5 +#define IMX7_RESET_A7_ETM_RESET0 6 +#define IMX7_RESET_A7_ETM_RESET1 7 +#define IMX7_RESET_A7_SOC_DBG_RESET 8 +#define IMX7_RESET_A7_L2RESET 9 +#define IMX7_RESET_SW_M4C_RST 10 +#define IMX7_RESET_SW_M4P_RST 11 +#define IMX7_RESET_EIM_RST 12 +#define IMX7_RESET_HSICPHY_PORT_RST 13 +#define IMX7_RESET_USBPHY1_POR 14 +#define IMX7_RESET_USBPHY1_PORT_RST 15 +#define IMX7_RESET_USBPHY2_POR 16 +#define IMX7_RESET_USBPHY2_PORT_RST 17 +#define IMX7_RESET_MIPI_PHY_MRST 18 +#define IMX7_RESET_MIPI_PHY_SRST 19 + +/* + * IMX7_RESET_PCIEPHY is a logical reset line combining PCIEPHY_BTN + * and PCIEPHY_G_RST + */ +#define IMX7_RESET_PCIEPHY 20 +#define IMX7_RESET_PCIEPHY_PERST 21 + +/* + * IMX7_RESET_PCIE_CTRL_APPS_EN is not strictly a reset line, but it + * can be used to inhibit PCIe LTTSM, so, in a way, it can be thoguht + * of as one + */ +#define IMX7_RESET_PCIE_CTRL_APPS_EN 22 +#define IMX7_RESET_DDRC_PRST 23 +#define IMX7_RESET_DDRC_CORE_RST 24 + +#define IMX7_RESET_NUM 25 + +#endif + diff --git a/include/dt-bindings/reset/mt2701-resets.h b/include/dt-bindings/reset/mt2701-resets.h new file mode 100644 index 000000000..50b7f066d --- /dev/null +++ b/include/dt-bindings/reset/mt2701-resets.h @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2015 MediaTek, Shunli Wang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT2701 +#define _DT_BINDINGS_RESET_CONTROLLER_MT2701 + +/* INFRACFG resets */ +#define MT2701_INFRA_EMI_REG_RST 0 +#define MT2701_INFRA_DRAMC0_A0_RST 1 +#define MT2701_INFRA_FHCTL_RST 2 +#define MT2701_INFRA_APCIRQ_EINT_RST 3 +#define MT2701_INFRA_APXGPT_RST 4 +#define MT2701_INFRA_SCPSYS_RST 5 +#define MT2701_INFRA_KP_RST 6 +#define MT2701_INFRA_PMIC_WRAP_RST 7 +#define MT2701_INFRA_MIPI_RST 8 +#define MT2701_INFRA_IRRX_RST 9 +#define MT2701_INFRA_CEC_RST 10 +#define MT2701_INFRA_EMI_RST 32 +#define MT2701_INFRA_DRAMC0_RST 34 +#define MT2701_INFRA_TRNG_RST 37 +#define MT2701_INFRA_SYSIRQ_RST 38 + +/* PERICFG resets */ +#define MT2701_PERI_UART0_SW_RST 0 +#define MT2701_PERI_UART1_SW_RST 1 +#define MT2701_PERI_UART2_SW_RST 2 +#define MT2701_PERI_UART3_SW_RST 3 +#define MT2701_PERI_GCPU_SW_RST 5 +#define MT2701_PERI_BTIF_SW_RST 6 +#define MT2701_PERI_PWM_SW_RST 8 +#define MT2701_PERI_AUXADC_SW_RST 10 +#define MT2701_PERI_DMA_SW_RST 11 +#define MT2701_PERI_NFI_SW_RST 14 +#define MT2701_PERI_NLI_SW_RST 15 +#define MT2701_PERI_THERM_SW_RST 16 +#define MT2701_PERI_MSDC2_SW_RST 17 +#define MT2701_PERI_MSDC0_SW_RST 19 +#define MT2701_PERI_MSDC1_SW_RST 20 +#define MT2701_PERI_I2C0_SW_RST 22 +#define MT2701_PERI_I2C1_SW_RST 23 +#define MT2701_PERI_I2C2_SW_RST 24 +#define MT2701_PERI_I2C3_SW_RST 25 +#define MT2701_PERI_USB_SW_RST 28 +#define MT2701_PERI_ETH_SW_RST 29 +#define MT2701_PERI_SPI0_SW_RST 33 + +/* TOPRGU resets */ +#define MT2701_TOPRGU_INFRA_RST 0 +#define MT2701_TOPRGU_MM_RST 1 +#define MT2701_TOPRGU_MFG_RST 2 +#define MT2701_TOPRGU_ETHDMA_RST 3 +#define MT2701_TOPRGU_VDEC_RST 4 +#define MT2701_TOPRGU_VENC_IMG_RST 5 +#define MT2701_TOPRGU_DDRPHY_RST 6 +#define MT2701_TOPRGU_MD_RST 7 +#define MT2701_TOPRGU_INFRA_AO_RST 8 +#define MT2701_TOPRGU_CONN_RST 9 +#define MT2701_TOPRGU_APMIXED_RST 10 +#define MT2701_TOPRGU_HIFSYS_RST 11 +#define MT2701_TOPRGU_CONN_MCU_RST 12 +#define MT2701_TOPRGU_BDP_DISP_RST 13 + +/* HIFSYS resets */ +#define MT2701_HIFSYS_UHOST0_RST 3 +#define MT2701_HIFSYS_UHOST1_RST 4 +#define MT2701_HIFSYS_UPHY0_RST 21 +#define MT2701_HIFSYS_UPHY1_RST 22 +#define MT2701_HIFSYS_PCIE0_RST 24 +#define MT2701_HIFSYS_PCIE1_RST 25 +#define MT2701_HIFSYS_PCIE2_RST 26 + +/* ETHSYS resets */ +#define MT2701_ETHSYS_SYS_RST 0 +#define MT2701_ETHSYS_MCM_RST 2 +#define MT2701_ETHSYS_FE_RST 6 +#define MT2701_ETHSYS_GMAC_RST 23 +#define MT2701_ETHSYS_PPE_RST 31 + +/* G3DSYS resets */ +#define MT2701_G3DSYS_CORE_RST 0 + +#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT2701 */ diff --git a/include/dt-bindings/reset/mt7622-reset.h b/include/dt-bindings/reset/mt7622-reset.h new file mode 100644 index 000000000..234052f80 --- /dev/null +++ b/include/dt-bindings/reset/mt7622-reset.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2017 MediaTek Inc. + * Author: Sean Wang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT7622 +#define _DT_BINDINGS_RESET_CONTROLLER_MT7622 + +/* INFRACFG resets */ +#define MT7622_INFRA_EMI_REG_RST 0 +#define MT7622_INFRA_DRAMC0_A0_RST 1 +#define MT7622_INFRA_APCIRQ_EINT_RST 3 +#define MT7622_INFRA_APXGPT_RST 4 +#define MT7622_INFRA_SCPSYS_RST 5 +#define MT7622_INFRA_PMIC_WRAP_RST 7 +#define MT7622_INFRA_IRRX_RST 9 +#define MT7622_INFRA_EMI_RST 16 +#define MT7622_INFRA_WED0_RST 17 +#define MT7622_INFRA_DRAMC_RST 18 +#define MT7622_INFRA_CCI_INTF_RST 19 +#define MT7622_INFRA_TRNG_RST 21 +#define MT7622_INFRA_SYSIRQ_RST 22 +#define MT7622_INFRA_WED1_RST 25 + +/* PERICFG Subsystem resets */ +#define MT7622_PERI_UART0_SW_RST 0 +#define MT7622_PERI_UART1_SW_RST 1 +#define MT7622_PERI_UART2_SW_RST 2 +#define MT7622_PERI_UART3_SW_RST 3 +#define MT7622_PERI_UART4_SW_RST 4 +#define MT7622_PERI_BTIF_SW_RST 6 +#define MT7622_PERI_PWM_SW_RST 8 +#define MT7622_PERI_AUXADC_SW_RST 10 +#define MT7622_PERI_DMA_SW_RST 11 +#define MT7622_PERI_IRTX_SW_RST 13 +#define MT7622_PERI_NFI_SW_RST 14 +#define MT7622_PERI_THERM_SW_RST 16 +#define MT7622_PERI_MSDC0_SW_RST 19 +#define MT7622_PERI_MSDC1_SW_RST 20 +#define MT7622_PERI_I2C0_SW_RST 22 +#define MT7622_PERI_I2C1_SW_RST 23 +#define MT7622_PERI_I2C2_SW_RST 24 +#define MT7622_PERI_SPI0_SW_RST 33 +#define MT7622_PERI_SPI1_SW_RST 34 +#define MT7622_PERI_FLASHIF_SW_RST 36 + +/* TOPRGU resets */ +#define MT7622_TOPRGU_INFRA_RST 0 +#define MT7622_TOPRGU_ETHDMA_RST 1 +#define MT7622_TOPRGU_DDRPHY_RST 6 +#define MT7622_TOPRGU_INFRA_AO_RST 8 +#define MT7622_TOPRGU_CONN_RST 9 +#define MT7622_TOPRGU_APMIXED_RST 10 +#define MT7622_TOPRGU_CONN_MCU_RST 12 + +/* PCIe/SATA Subsystem resets */ +#define MT7622_SATA_PHY_REG_RST 12 +#define MT7622_SATA_PHY_SW_RST 13 +#define MT7622_SATA_AXI_BUS_RST 15 +#define MT7622_PCIE1_CORE_RST 19 +#define MT7622_PCIE1_MMIO_RST 20 +#define MT7622_PCIE1_HRST 21 +#define MT7622_PCIE1_USER_RST 22 +#define MT7622_PCIE1_PIPE_RST 23 +#define MT7622_PCIE0_CORE_RST 27 +#define MT7622_PCIE0_MMIO_RST 28 +#define MT7622_PCIE0_HRST 29 +#define MT7622_PCIE0_USER_RST 30 +#define MT7622_PCIE0_PIPE_RST 31 + +/* SSUSB Subsystem resets */ +#define MT7622_SSUSB_PHY_PWR_RST 3 +#define MT7622_SSUSB_MAC_PWR_RST 4 + +/* ETHSYS Subsystem resets */ +#define MT7622_ETHSYS_SYS_RST 0 +#define MT7622_ETHSYS_MCM_RST 2 +#define MT7622_ETHSYS_HSDMA_RST 5 +#define MT7622_ETHSYS_FE_RST 6 +#define MT7622_ETHSYS_GMAC_RST 23 +#define MT7622_ETHSYS_EPHY_RST 24 +#define MT7622_ETHSYS_CRYPTO_RST 29 +#define MT7622_ETHSYS_PPE_RST 31 + +#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT7622 */ diff --git a/include/dt-bindings/reset/mt8135-resets.h b/include/dt-bindings/reset/mt8135-resets.h new file mode 100644 index 000000000..1fb629508 --- /dev/null +++ b/include/dt-bindings/reset/mt8135-resets.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Flora Fu, MediaTek + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8135 +#define _DT_BINDINGS_RESET_CONTROLLER_MT8135 + +/* INFRACFG resets */ +#define MT8135_INFRA_EMI_REG_RST 0 +#define MT8135_INFRA_DRAMC0_A0_RST 1 +#define MT8135_INFRA_CCIF0_RST 2 +#define MT8135_INFRA_APCIRQ_EINT_RST 3 +#define MT8135_INFRA_APXGPT_RST 4 +#define MT8135_INFRA_SCPSYS_RST 5 +#define MT8135_INFRA_CCIF1_RST 6 +#define MT8135_INFRA_PMIC_WRAP_RST 7 +#define MT8135_INFRA_KP_RST 8 +#define MT8135_INFRA_EMI_RST 32 +#define MT8135_INFRA_DRAMC0_RST 34 +#define MT8135_INFRA_SMI_RST 35 +#define MT8135_INFRA_M4U_RST 36 + +/* PERICFG resets */ +#define MT8135_PERI_UART0_SW_RST 0 +#define MT8135_PERI_UART1_SW_RST 1 +#define MT8135_PERI_UART2_SW_RST 2 +#define MT8135_PERI_UART3_SW_RST 3 +#define MT8135_PERI_IRDA_SW_RST 4 +#define MT8135_PERI_PTP_SW_RST 5 +#define MT8135_PERI_AP_HIF_SW_RST 6 +#define MT8135_PERI_GPCU_SW_RST 7 +#define MT8135_PERI_MD_HIF_SW_RST 8 +#define MT8135_PERI_NLI_SW_RST 9 +#define MT8135_PERI_AUXADC_SW_RST 10 +#define MT8135_PERI_DMA_SW_RST 11 +#define MT8135_PERI_NFI_SW_RST 14 +#define MT8135_PERI_PWM_SW_RST 15 +#define MT8135_PERI_THERM_SW_RST 16 +#define MT8135_PERI_MSDC0_SW_RST 17 +#define MT8135_PERI_MSDC1_SW_RST 18 +#define MT8135_PERI_MSDC2_SW_RST 19 +#define MT8135_PERI_MSDC3_SW_RST 20 +#define MT8135_PERI_I2C0_SW_RST 22 +#define MT8135_PERI_I2C1_SW_RST 23 +#define MT8135_PERI_I2C2_SW_RST 24 +#define MT8135_PERI_I2C3_SW_RST 25 +#define MT8135_PERI_I2C4_SW_RST 26 +#define MT8135_PERI_I2C5_SW_RST 27 +#define MT8135_PERI_I2C6_SW_RST 28 +#define MT8135_PERI_USB_SW_RST 29 +#define MT8135_PERI_SPI1_SW_RST 33 +#define MT8135_PERI_PWRAP_BRIDGE_SW_RST 34 + +#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8135 */ diff --git a/include/dt-bindings/reset/mt8173-resets.h b/include/dt-bindings/reset/mt8173-resets.h new file mode 100644 index 000000000..9464b37cf --- /dev/null +++ b/include/dt-bindings/reset/mt8173-resets.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Flora Fu, MediaTek + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8173 +#define _DT_BINDINGS_RESET_CONTROLLER_MT8173 + +/* INFRACFG resets */ +#define MT8173_INFRA_EMI_REG_RST 0 +#define MT8173_INFRA_DRAMC0_A0_RST 1 +#define MT8173_INFRA_APCIRQ_EINT_RST 3 +#define MT8173_INFRA_APXGPT_RST 4 +#define MT8173_INFRA_SCPSYS_RST 5 +#define MT8173_INFRA_KP_RST 6 +#define MT8173_INFRA_PMIC_WRAP_RST 7 +#define MT8173_INFRA_MPIP_RST 8 +#define MT8173_INFRA_CEC_RST 9 +#define MT8173_INFRA_EMI_RST 32 +#define MT8173_INFRA_DRAMC0_RST 34 +#define MT8173_INFRA_APMIXEDSYS_RST 35 +#define MT8173_INFRA_MIPI_DSI_RST 36 +#define MT8173_INFRA_TRNG_RST 37 +#define MT8173_INFRA_SYSIRQ_RST 38 +#define MT8173_INFRA_MIPI_CSI_RST 39 +#define MT8173_INFRA_GCE_FAXI_RST 40 +#define MT8173_INFRA_MMIOMMURST 47 + + +/* PERICFG resets */ +#define MT8173_PERI_UART0_SW_RST 0 +#define MT8173_PERI_UART1_SW_RST 1 +#define MT8173_PERI_UART2_SW_RST 2 +#define MT8173_PERI_UART3_SW_RST 3 +#define MT8173_PERI_IRRX_SW_RST 4 +#define MT8173_PERI_PWM_SW_RST 8 +#define MT8173_PERI_AUXADC_SW_RST 10 +#define MT8173_PERI_DMA_SW_RST 11 +#define MT8173_PERI_I2C6_SW_RST 13 +#define MT8173_PERI_NFI_SW_RST 14 +#define MT8173_PERI_THERM_SW_RST 16 +#define MT8173_PERI_MSDC2_SW_RST 17 +#define MT8173_PERI_MSDC3_SW_RST 18 +#define MT8173_PERI_MSDC0_SW_RST 19 +#define MT8173_PERI_MSDC1_SW_RST 20 +#define MT8173_PERI_I2C0_SW_RST 22 +#define MT8173_PERI_I2C1_SW_RST 23 +#define MT8173_PERI_I2C2_SW_RST 24 +#define MT8173_PERI_I2C3_SW_RST 25 +#define MT8173_PERI_I2C4_SW_RST 26 +#define MT8173_PERI_HDMI_SW_RST 29 +#define MT8173_PERI_SPI0_SW_RST 33 + +#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8173 */ diff --git a/include/dt-bindings/reset/oxsemi,ox810se.h b/include/dt-bindings/reset/oxsemi,ox810se.h new file mode 100644 index 000000000..960c26e45 --- /dev/null +++ b/include/dt-bindings/reset/oxsemi,ox810se.h @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2016 Neil Armstrong + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef DT_RESET_OXSEMI_OX810SE_H +#define DT_RESET_OXSEMI_OX810SE_H + +#define RESET_ARM 0 +#define RESET_COPRO 1 +/* Reserved 2 */ +/* Reserved 3 */ +#define RESET_USBHS 4 +#define RESET_USBHSPHY 5 +#define RESET_MAC 6 +#define RESET_PCI 7 +#define RESET_DMA 8 +#define RESET_DPE 9 +#define RESET_DDR 10 +#define RESET_SATA 11 +#define RESET_SATA_LINK 12 +#define RESET_SATA_PHY 13 + /* Reserved 14 */ +#define RESET_NAND 15 +#define RESET_GPIO 16 +#define RESET_UART1 17 +#define RESET_UART2 18 +#define RESET_MISC 19 +#define RESET_I2S 20 +#define RESET_AHB_MON 21 +#define RESET_UART3 22 +#define RESET_UART4 23 +#define RESET_SGDMA 24 +/* Reserved 25 */ +/* Reserved 26 */ +/* Reserved 27 */ +/* Reserved 28 */ +/* Reserved 29 */ +/* Reserved 30 */ +#define RESET_BUS 31 + +#endif /* DT_RESET_OXSEMI_OX810SE_H */ diff --git a/include/dt-bindings/reset/oxsemi,ox820.h b/include/dt-bindings/reset/oxsemi,ox820.h new file mode 100644 index 000000000..cc6797bf0 --- /dev/null +++ b/include/dt-bindings/reset/oxsemi,ox820.h @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2016 Neil Armstrong + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef DT_RESET_OXSEMI_OX820_H +#define DT_RESET_OXSEMI_OX820_H + +#define RESET_SCU 0 +#define RESET_LEON 1 +#define RESET_ARM0 2 +#define RESET_ARM1 3 +#define RESET_USBHS 4 +#define RESET_USBPHYA 5 +#define RESET_MAC 6 +#define RESET_PCIEA 7 +#define RESET_SGDMA 8 +#define RESET_CIPHER 9 +#define RESET_DDR 10 +#define RESET_SATA 11 +#define RESET_SATA_LINK 12 +#define RESET_SATA_PHY 13 +#define RESET_PCIEPHY 14 +#define RESET_NAND 15 +#define RESET_GPIO 16 +#define RESET_UART1 17 +#define RESET_UART2 18 +#define RESET_MISC 19 +#define RESET_I2S 20 +#define RESET_SD 21 +#define RESET_MAC_2 22 +#define RESET_PCIEB 23 +#define RESET_VIDEO 24 +#define RESET_DDR_PHY 25 +#define RESET_USBPHYB 26 +#define RESET_USBDEV 27 +/* Reserved 29 */ +#define RESET_ARMDBG 29 +#define RESET_PLLA 30 +#define RESET_PLLB 31 + +#endif /* DT_RESET_OXSEMI_OX820_H */ diff --git a/include/dt-bindings/reset/pistachio-resets.h b/include/dt-bindings/reset/pistachio-resets.h new file mode 100644 index 000000000..5bb4dd0d6 --- /dev/null +++ b/include/dt-bindings/reset/pistachio-resets.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for the reset controller + * present in the Pistachio SoC + */ + +#ifndef _PISTACHIO_RESETS_H +#define _PISTACHIO_RESETS_H + +#define PISTACHIO_RESET_I2C0 0 +#define PISTACHIO_RESET_I2C1 1 +#define PISTACHIO_RESET_I2C2 2 +#define PISTACHIO_RESET_I2C3 3 +#define PISTACHIO_RESET_I2S_IN 4 +#define PISTACHIO_RESET_PRL_OUT 5 +#define PISTACHIO_RESET_SPDIF_OUT 6 +#define PISTACHIO_RESET_SPI 7 +#define PISTACHIO_RESET_PWM_PDM 8 +#define PISTACHIO_RESET_UART0 9 +#define PISTACHIO_RESET_UART1 10 +#define PISTACHIO_RESET_QSPI 11 +#define PISTACHIO_RESET_MDC 12 +#define PISTACHIO_RESET_SDHOST 13 +#define PISTACHIO_RESET_ETHERNET 14 +#define PISTACHIO_RESET_IR 15 +#define PISTACHIO_RESET_HASH 16 +#define PISTACHIO_RESET_TIMER 17 +#define PISTACHIO_RESET_I2S_OUT 18 +#define PISTACHIO_RESET_SPDIF_IN 19 +#define PISTACHIO_RESET_EVT 20 +#define PISTACHIO_RESET_USB_H 21 +#define PISTACHIO_RESET_USB_PR 22 +#define PISTACHIO_RESET_USB_PHY_PR 23 +#define PISTACHIO_RESET_USB_PHY_PON 24 +#define PISTACHIO_RESET_MAX 24 + +#endif diff --git a/include/dt-bindings/reset/qcom,gcc-apq8084.h b/include/dt-bindings/reset/qcom,gcc-apq8084.h new file mode 100644 index 000000000..527caaf48 --- /dev/null +++ b/include/dt-bindings/reset/qcom,gcc-apq8084.h @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_RESET_APQ_GCC_8084_H +#define _DT_BINDINGS_RESET_APQ_GCC_8084_H + +#define GCC_SYSTEM_NOC_BCR 0 +#define GCC_CONFIG_NOC_BCR 1 +#define GCC_PERIPH_NOC_BCR 2 +#define GCC_IMEM_BCR 3 +#define GCC_MMSS_BCR 4 +#define GCC_QDSS_BCR 5 +#define GCC_USB_30_BCR 6 +#define GCC_USB3_PHY_BCR 7 +#define GCC_USB_HS_HSIC_BCR 8 +#define GCC_USB_HS_BCR 9 +#define GCC_USB2A_PHY_BCR 10 +#define GCC_USB2B_PHY_BCR 11 +#define GCC_SDCC1_BCR 12 +#define GCC_SDCC2_BCR 13 +#define GCC_SDCC3_BCR 14 +#define GCC_SDCC4_BCR 15 +#define GCC_BLSP1_BCR 16 +#define GCC_BLSP1_QUP1_BCR 17 +#define GCC_BLSP1_UART1_BCR 18 +#define GCC_BLSP1_QUP2_BCR 19 +#define GCC_BLSP1_UART2_BCR 20 +#define GCC_BLSP1_QUP3_BCR 21 +#define GCC_BLSP1_UART3_BCR 22 +#define GCC_BLSP1_QUP4_BCR 23 +#define GCC_BLSP1_UART4_BCR 24 +#define GCC_BLSP1_QUP5_BCR 25 +#define GCC_BLSP1_UART5_BCR 26 +#define GCC_BLSP1_QUP6_BCR 27 +#define GCC_BLSP1_UART6_BCR 28 +#define GCC_BLSP2_BCR 29 +#define GCC_BLSP2_QUP1_BCR 30 +#define GCC_BLSP2_UART1_BCR 31 +#define GCC_BLSP2_QUP2_BCR 32 +#define GCC_BLSP2_UART2_BCR 33 +#define GCC_BLSP2_QUP3_BCR 34 +#define GCC_BLSP2_UART3_BCR 35 +#define GCC_BLSP2_QUP4_BCR 36 +#define GCC_BLSP2_UART4_BCR 37 +#define GCC_BLSP2_QUP5_BCR 38 +#define GCC_BLSP2_UART5_BCR 39 +#define GCC_BLSP2_QUP6_BCR 40 +#define GCC_BLSP2_UART6_BCR 41 +#define GCC_PDM_BCR 42 +#define GCC_PRNG_BCR 43 +#define GCC_BAM_DMA_BCR 44 +#define GCC_TSIF_BCR 45 +#define GCC_TCSR_BCR 46 +#define GCC_BOOT_ROM_BCR 47 +#define GCC_MSG_RAM_BCR 48 +#define GCC_TLMM_BCR 49 +#define GCC_MPM_BCR 50 +#define GCC_MPM_AHB_RESET 51 +#define GCC_MPM_NON_AHB_RESET 52 +#define GCC_SEC_CTRL_BCR 53 +#define GCC_SPMI_BCR 54 +#define GCC_SPDM_BCR 55 +#define GCC_CE1_BCR 56 +#define GCC_CE2_BCR 57 +#define GCC_BIMC_BCR 58 +#define GCC_SNOC_BUS_TIMEOUT0_BCR 59 +#define GCC_SNOC_BUS_TIMEOUT2_BCR 60 +#define GCC_PNOC_BUS_TIMEOUT0_BCR 61 +#define GCC_PNOC_BUS_TIMEOUT1_BCR 62 +#define GCC_PNOC_BUS_TIMEOUT2_BCR 63 +#define GCC_PNOC_BUS_TIMEOUT3_BCR 64 +#define GCC_PNOC_BUS_TIMEOUT4_BCR 65 +#define GCC_CNOC_BUS_TIMEOUT0_BCR 66 +#define GCC_CNOC_BUS_TIMEOUT1_BCR 67 +#define GCC_CNOC_BUS_TIMEOUT2_BCR 68 +#define GCC_CNOC_BUS_TIMEOUT3_BCR 69 +#define GCC_CNOC_BUS_TIMEOUT4_BCR 70 +#define GCC_CNOC_BUS_TIMEOUT5_BCR 71 +#define GCC_CNOC_BUS_TIMEOUT6_BCR 72 +#define GCC_DEHR_BCR 73 +#define GCC_RBCPR_BCR 74 +#define GCC_MSS_RESTART 75 +#define GCC_LPASS_RESTART 76 +#define GCC_WCSS_RESTART 77 +#define GCC_VENUS_RESTART 78 +#define GCC_COPSS_SMMU_BCR 79 +#define GCC_SPSS_BCR 80 +#define GCC_PCIE_0_BCR 81 +#define GCC_PCIE_0_PHY_BCR 82 +#define GCC_PCIE_1_BCR 83 +#define GCC_PCIE_1_PHY_BCR 84 +#define GCC_USB_30_SEC_BCR 85 +#define GCC_USB3_SEC_PHY_BCR 86 +#define GCC_SATA_BCR 87 +#define GCC_CE3_BCR 88 +#define GCC_UFS_BCR 89 +#define GCC_USB30_PHY_COM_BCR 90 + +#endif diff --git a/include/dt-bindings/reset/qcom,gcc-ipq806x.h b/include/dt-bindings/reset/qcom,gcc-ipq806x.h new file mode 100644 index 000000000..de9c81409 --- /dev/null +++ b/include/dt-bindings/reset/qcom,gcc-ipq806x.h @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_RESET_IPQ_806X_H +#define _DT_BINDINGS_RESET_IPQ_806X_H + +#define QDSS_STM_RESET 0 +#define AFAB_SMPSS_S_RESET 1 +#define AFAB_SMPSS_M1_RESET 2 +#define AFAB_SMPSS_M0_RESET 3 +#define AFAB_EBI1_CH0_RESET 4 +#define AFAB_EBI1_CH1_RESET 5 +#define SFAB_ADM0_M0_RESET 6 +#define SFAB_ADM0_M1_RESET 7 +#define SFAB_ADM0_M2_RESET 8 +#define ADM0_C2_RESET 9 +#define ADM0_C1_RESET 10 +#define ADM0_C0_RESET 11 +#define ADM0_PBUS_RESET 12 +#define ADM0_RESET 13 +#define QDSS_CLKS_SW_RESET 14 +#define QDSS_POR_RESET 15 +#define QDSS_TSCTR_RESET 16 +#define QDSS_HRESET_RESET 17 +#define QDSS_AXI_RESET 18 +#define QDSS_DBG_RESET 19 +#define SFAB_PCIE_M_RESET 20 +#define SFAB_PCIE_S_RESET 21 +#define PCIE_EXT_RESET 22 +#define PCIE_PHY_RESET 23 +#define PCIE_PCI_RESET 24 +#define PCIE_POR_RESET 25 +#define PCIE_HCLK_RESET 26 +#define PCIE_ACLK_RESET 27 +#define SFAB_LPASS_RESET 28 +#define SFAB_AFAB_M_RESET 29 +#define AFAB_SFAB_M0_RESET 30 +#define AFAB_SFAB_M1_RESET 31 +#define SFAB_SATA_S_RESET 32 +#define SFAB_DFAB_M_RESET 33 +#define DFAB_SFAB_M_RESET 34 +#define DFAB_SWAY0_RESET 35 +#define DFAB_SWAY1_RESET 36 +#define DFAB_ARB0_RESET 37 +#define DFAB_ARB1_RESET 38 +#define PPSS_PROC_RESET 39 +#define PPSS_RESET 40 +#define DMA_BAM_RESET 41 +#define SPS_TIC_H_RESET 42 +#define SFAB_CFPB_M_RESET 43 +#define SFAB_CFPB_S_RESET 44 +#define TSIF_H_RESET 45 +#define CE1_H_RESET 46 +#define CE1_CORE_RESET 47 +#define CE1_SLEEP_RESET 48 +#define CE2_H_RESET 49 +#define CE2_CORE_RESET 50 +#define SFAB_SFPB_M_RESET 51 +#define SFAB_SFPB_S_RESET 52 +#define RPM_PROC_RESET 53 +#define PMIC_SSBI2_RESET 54 +#define SDC1_RESET 55 +#define SDC2_RESET 56 +#define SDC3_RESET 57 +#define SDC4_RESET 58 +#define USB_HS1_RESET 59 +#define USB_HSIC_RESET 60 +#define USB_FS1_XCVR_RESET 61 +#define USB_FS1_RESET 62 +#define GSBI1_RESET 63 +#define GSBI2_RESET 64 +#define GSBI3_RESET 65 +#define GSBI4_RESET 66 +#define GSBI5_RESET 67 +#define GSBI6_RESET 68 +#define GSBI7_RESET 69 +#define SPDM_RESET 70 +#define SEC_CTRL_RESET 71 +#define TLMM_H_RESET 72 +#define SFAB_SATA_M_RESET 73 +#define SATA_RESET 74 +#define TSSC_RESET 75 +#define PDM_RESET 76 +#define MPM_H_RESET 77 +#define MPM_RESET 78 +#define SFAB_SMPSS_S_RESET 79 +#define PRNG_RESET 80 +#define SFAB_CE3_M_RESET 81 +#define SFAB_CE3_S_RESET 82 +#define CE3_SLEEP_RESET 83 +#define PCIE_1_M_RESET 84 +#define PCIE_1_S_RESET 85 +#define PCIE_1_EXT_RESET 86 +#define PCIE_1_PHY_RESET 87 +#define PCIE_1_PCI_RESET 88 +#define PCIE_1_POR_RESET 89 +#define PCIE_1_HCLK_RESET 90 +#define PCIE_1_ACLK_RESET 91 +#define PCIE_2_M_RESET 92 +#define PCIE_2_S_RESET 93 +#define PCIE_2_EXT_RESET 94 +#define PCIE_2_PHY_RESET 95 +#define PCIE_2_PCI_RESET 96 +#define PCIE_2_POR_RESET 97 +#define PCIE_2_HCLK_RESET 98 +#define PCIE_2_ACLK_RESET 99 +#define SFAB_USB30_S_RESET 100 +#define SFAB_USB30_M_RESET 101 +#define USB30_0_PORT2_HS_PHY_RESET 102 +#define USB30_0_MASTER_RESET 103 +#define USB30_0_SLEEP_RESET 104 +#define USB30_0_UTMI_PHY_RESET 105 +#define USB30_0_POWERON_RESET 106 +#define USB30_0_PHY_RESET 107 +#define USB30_1_MASTER_RESET 108 +#define USB30_1_SLEEP_RESET 109 +#define USB30_1_UTMI_PHY_RESET 110 +#define USB30_1_POWERON_RESET 111 +#define USB30_1_PHY_RESET 112 +#define NSSFB0_RESET 113 +#define NSSFB1_RESET 114 +#define UBI32_CORE1_CLKRST_CLAMP_RESET 115 +#define UBI32_CORE1_CLAMP_RESET 116 +#define UBI32_CORE1_AHB_RESET 117 +#define UBI32_CORE1_AXI_RESET 118 +#define UBI32_CORE2_CLKRST_CLAMP_RESET 119 +#define UBI32_CORE2_CLAMP_RESET 120 +#define UBI32_CORE2_AHB_RESET 121 +#define UBI32_CORE2_AXI_RESET 122 +#define GMAC_CORE1_RESET 123 +#define GMAC_CORE2_RESET 124 +#define GMAC_CORE3_RESET 125 +#define GMAC_CORE4_RESET 126 +#define GMAC_AHB_RESET 127 +#define NSS_CH0_RST_RX_CLK_N_RESET 128 +#define NSS_CH0_RST_TX_CLK_N_RESET 129 +#define NSS_CH0_RST_RX_125M_N_RESET 130 +#define NSS_CH0_HW_RST_RX_125M_N_RESET 131 +#define NSS_CH0_RST_TX_125M_N_RESET 132 +#define NSS_CH1_RST_RX_CLK_N_RESET 133 +#define NSS_CH1_RST_TX_CLK_N_RESET 134 +#define NSS_CH1_RST_RX_125M_N_RESET 135 +#define NSS_CH1_HW_RST_RX_125M_N_RESET 136 +#define NSS_CH1_RST_TX_125M_N_RESET 137 +#define NSS_CH2_RST_RX_CLK_N_RESET 138 +#define NSS_CH2_RST_TX_CLK_N_RESET 139 +#define NSS_CH2_RST_RX_125M_N_RESET 140 +#define NSS_CH2_HW_RST_RX_125M_N_RESET 141 +#define NSS_CH2_RST_TX_125M_N_RESET 142 +#define NSS_CH3_RST_RX_CLK_N_RESET 143 +#define NSS_CH3_RST_TX_CLK_N_RESET 144 +#define NSS_CH3_RST_RX_125M_N_RESET 145 +#define NSS_CH3_HW_RST_RX_125M_N_RESET 146 +#define NSS_CH3_RST_TX_125M_N_RESET 147 +#define NSS_RST_RX_250M_125M_N_RESET 148 +#define NSS_RST_TX_250M_125M_N_RESET 149 +#define NSS_QSGMII_TXPI_RST_N_RESET 150 +#define NSS_QSGMII_CDR_RST_N_RESET 151 +#define NSS_SGMII2_CDR_RST_N_RESET 152 +#define NSS_SGMII3_CDR_RST_N_RESET 153 +#define NSS_CAL_PRBS_RST_N_RESET 154 +#define NSS_LCKDT_RST_N_RESET 155 +#define NSS_SRDS_N_RESET 156 + +#endif diff --git a/include/dt-bindings/reset/qcom,gcc-mdm9615.h b/include/dt-bindings/reset/qcom,gcc-mdm9615.h new file mode 100644 index 000000000..7f86e9a59 --- /dev/null +++ b/include/dt-bindings/reset/qcom,gcc-mdm9615.h @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * Copyright (c) BayLibre, SAS. + * Author : Neil Armstrong + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_RESET_GCC_MDM9615_H +#define _DT_BINDINGS_RESET_GCC_MDM9615_H + +#define SFAB_MSS_Q6_SW_RESET 0 +#define SFAB_MSS_Q6_FW_RESET 1 +#define QDSS_STM_RESET 2 +#define AFAB_SMPSS_S_RESET 3 +#define AFAB_SMPSS_M1_RESET 4 +#define AFAB_SMPSS_M0_RESET 5 +#define AFAB_EBI1_CH0_RESET 6 +#define AFAB_EBI1_CH1_RESET 7 +#define SFAB_ADM0_M0_RESET 8 +#define SFAB_ADM0_M1_RESET 9 +#define SFAB_ADM0_M2_RESET 10 +#define ADM0_C2_RESET 11 +#define ADM0_C1_RESET 12 +#define ADM0_C0_RESET 13 +#define ADM0_PBUS_RESET 14 +#define ADM0_RESET 15 +#define QDSS_CLKS_SW_RESET 16 +#define QDSS_POR_RESET 17 +#define QDSS_TSCTR_RESET 18 +#define QDSS_HRESET_RESET 19 +#define QDSS_AXI_RESET 20 +#define QDSS_DBG_RESET 21 +#define PCIE_A_RESET 22 +#define PCIE_AUX_RESET 23 +#define PCIE_H_RESET 24 +#define SFAB_PCIE_M_RESET 25 +#define SFAB_PCIE_S_RESET 26 +#define SFAB_MSS_M_RESET 27 +#define SFAB_USB3_M_RESET 28 +#define SFAB_RIVA_M_RESET 29 +#define SFAB_LPASS_RESET 30 +#define SFAB_AFAB_M_RESET 31 +#define AFAB_SFAB_M0_RESET 32 +#define AFAB_SFAB_M1_RESET 33 +#define SFAB_SATA_S_RESET 34 +#define SFAB_DFAB_M_RESET 35 +#define DFAB_SFAB_M_RESET 36 +#define DFAB_SWAY0_RESET 37 +#define DFAB_SWAY1_RESET 38 +#define DFAB_ARB0_RESET 39 +#define DFAB_ARB1_RESET 40 +#define PPSS_PROC_RESET 41 +#define PPSS_RESET 42 +#define DMA_BAM_RESET 43 +#define SPS_TIC_H_RESET 44 +#define SLIMBUS_H_RESET 45 +#define SFAB_CFPB_M_RESET 46 +#define SFAB_CFPB_S_RESET 47 +#define TSIF_H_RESET 48 +#define CE1_H_RESET 49 +#define CE1_CORE_RESET 50 +#define CE1_SLEEP_RESET 51 +#define CE2_H_RESET 52 +#define CE2_CORE_RESET 53 +#define SFAB_SFPB_M_RESET 54 +#define SFAB_SFPB_S_RESET 55 +#define RPM_PROC_RESET 56 +#define PMIC_SSBI2_RESET 57 +#define SDC1_RESET 58 +#define SDC2_RESET 59 +#define SDC3_RESET 60 +#define SDC4_RESET 61 +#define SDC5_RESET 62 +#define DFAB_A2_RESET 63 +#define USB_HS1_RESET 64 +#define USB_HSIC_RESET 65 +#define USB_FS1_XCVR_RESET 66 +#define USB_FS1_RESET 67 +#define USB_FS2_XCVR_RESET 68 +#define USB_FS2_RESET 69 +#define GSBI1_RESET 70 +#define GSBI2_RESET 71 +#define GSBI3_RESET 72 +#define GSBI4_RESET 73 +#define GSBI5_RESET 74 +#define GSBI6_RESET 75 +#define GSBI7_RESET 76 +#define GSBI8_RESET 77 +#define GSBI9_RESET 78 +#define GSBI10_RESET 79 +#define GSBI11_RESET 80 +#define GSBI12_RESET 81 +#define SPDM_RESET 82 +#define TLMM_H_RESET 83 +#define SFAB_MSS_S_RESET 84 +#define MSS_SLP_RESET 85 +#define MSS_Q6SW_JTAG_RESET 86 +#define MSS_Q6FW_JTAG_RESET 87 +#define MSS_RESET 88 +#define SATA_H_RESET 89 +#define SATA_RXOOB_RESE 90 +#define SATA_PMALIVE_RESET 91 +#define SATA_SFAB_M_RESET 92 +#define TSSC_RESET 93 +#define PDM_RESET 94 +#define MPM_H_RESET 95 +#define MPM_RESET 96 +#define SFAB_SMPSS_S_RESET 97 +#define PRNG_RESET 98 +#define RIVA_RESET 99 +#define USB_HS3_RESET 100 +#define USB_HS4_RESET 101 +#define CE3_RESET 102 +#define PCIE_EXT_PCI_RESET 103 +#define PCIE_PHY_RESET 104 +#define PCIE_PCI_RESET 105 +#define PCIE_POR_RESET 106 +#define PCIE_HCLK_RESET 107 +#define PCIE_ACLK_RESET 108 +#define CE3_H_RESET 109 +#define SFAB_CE3_M_RESET 110 +#define SFAB_CE3_S_RESET 111 +#define SATA_RESET 112 +#define CE3_SLEEP_RESET 113 +#define GSS_SLP_RESET 114 +#define GSS_RESET 115 + +#endif diff --git a/include/dt-bindings/reset/qcom,gcc-msm8660.h b/include/dt-bindings/reset/qcom,gcc-msm8660.h new file mode 100644 index 000000000..a83282fe5 --- /dev/null +++ b/include/dt-bindings/reset/qcom,gcc-msm8660.h @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_RESET_MSM_GCC_8660_H +#define _DT_BINDINGS_RESET_MSM_GCC_8660_H + +#define AFAB_CORE_RESET 0 +#define SCSS_SYS_RESET 1 +#define SCSS_SYS_POR_RESET 2 +#define AFAB_SMPSS_S_RESET 3 +#define AFAB_SMPSS_M1_RESET 4 +#define AFAB_SMPSS_M0_RESET 5 +#define AFAB_EBI1_S_RESET 6 +#define SFAB_CORE_RESET 7 +#define SFAB_ADM0_M0_RESET 8 +#define SFAB_ADM0_M1_RESET 9 +#define SFAB_ADM0_M2_RESET 10 +#define ADM0_C2_RESET 11 +#define ADM0_C1_RESET 12 +#define ADM0_C0_RESET 13 +#define ADM0_PBUS_RESET 14 +#define ADM0_RESET 15 +#define SFAB_ADM1_M0_RESET 16 +#define SFAB_ADM1_M1_RESET 17 +#define SFAB_ADM1_M2_RESET 18 +#define MMFAB_ADM1_M3_RESET 19 +#define ADM1_C3_RESET 20 +#define ADM1_C2_RESET 21 +#define ADM1_C1_RESET 22 +#define ADM1_C0_RESET 23 +#define ADM1_PBUS_RESET 24 +#define ADM1_RESET 25 +#define IMEM0_RESET 26 +#define SFAB_LPASS_Q6_RESET 27 +#define SFAB_AFAB_M_RESET 28 +#define AFAB_SFAB_M0_RESET 29 +#define AFAB_SFAB_M1_RESET 30 +#define DFAB_CORE_RESET 31 +#define SFAB_DFAB_M_RESET 32 +#define DFAB_SFAB_M_RESET 33 +#define DFAB_SWAY0_RESET 34 +#define DFAB_SWAY1_RESET 35 +#define DFAB_ARB0_RESET 36 +#define DFAB_ARB1_RESET 37 +#define PPSS_PROC_RESET 38 +#define PPSS_RESET 39 +#define PMEM_RESET 40 +#define DMA_BAM_RESET 41 +#define SIC_RESET 42 +#define SPS_TIC_RESET 43 +#define CFBP0_RESET 44 +#define CFBP1_RESET 45 +#define CFBP2_RESET 46 +#define EBI2_RESET 47 +#define SFAB_CFPB_M_RESET 48 +#define CFPB_MASTER_RESET 49 +#define SFAB_CFPB_S_RESET 50 +#define CFPB_SPLITTER_RESET 51 +#define TSIF_RESET 52 +#define CE1_RESET 53 +#define CE2_RESET 54 +#define SFAB_SFPB_M_RESET 55 +#define SFAB_SFPB_S_RESET 56 +#define RPM_PROC_RESET 57 +#define RPM_BUS_RESET 58 +#define RPM_MSG_RAM_RESET 59 +#define PMIC_ARB0_RESET 60 +#define PMIC_ARB1_RESET 61 +#define PMIC_SSBI2_RESET 62 +#define SDC1_RESET 63 +#define SDC2_RESET 64 +#define SDC3_RESET 65 +#define SDC4_RESET 66 +#define SDC5_RESET 67 +#define USB_HS1_RESET 68 +#define USB_HS2_XCVR_RESET 69 +#define USB_HS2_RESET 70 +#define USB_FS1_XCVR_RESET 71 +#define USB_FS1_RESET 72 +#define USB_FS2_XCVR_RESET 73 +#define USB_FS2_RESET 74 +#define GSBI1_RESET 75 +#define GSBI2_RESET 76 +#define GSBI3_RESET 77 +#define GSBI4_RESET 78 +#define GSBI5_RESET 79 +#define GSBI6_RESET 80 +#define GSBI7_RESET 81 +#define GSBI8_RESET 82 +#define GSBI9_RESET 83 +#define GSBI10_RESET 84 +#define GSBI11_RESET 85 +#define GSBI12_RESET 86 +#define SPDM_RESET 87 +#define SEC_CTRL_RESET 88 +#define TLMM_H_RESET 89 +#define TLMM_RESET 90 +#define MARRM_PWRON_RESET 91 +#define MARM_RESET 92 +#define MAHB1_RESET 93 +#define SFAB_MSS_S_RESET 94 +#define MAHB2_RESET 95 +#define MODEM_SW_AHB_RESET 96 +#define MODEM_RESET 97 +#define SFAB_MSS_MDM1_RESET 98 +#define SFAB_MSS_MDM0_RESET 99 +#define MSS_SLP_RESET 100 +#define MSS_MARM_SAW_RESET 101 +#define MSS_WDOG_RESET 102 +#define TSSC_RESET 103 +#define PDM_RESET 104 +#define SCSS_CORE0_RESET 105 +#define SCSS_CORE0_POR_RESET 106 +#define SCSS_CORE1_RESET 107 +#define SCSS_CORE1_POR_RESET 108 +#define MPM_RESET 109 +#define EBI1_1X_DIV_RESET 110 +#define EBI1_RESET 111 +#define SFAB_SMPSS_S_RESET 112 +#define USB_PHY0_RESET 113 +#define USB_PHY1_RESET 114 +#define PRNG_RESET 115 + +#endif diff --git a/include/dt-bindings/reset/qcom,gcc-msm8916.h b/include/dt-bindings/reset/qcom,gcc-msm8916.h new file mode 100644 index 000000000..3d90410f0 --- /dev/null +++ b/include/dt-bindings/reset/qcom,gcc-msm8916.h @@ -0,0 +1,108 @@ +/* + * Copyright 2015 Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_RESET_MSM_GCC_8916_H +#define _DT_BINDINGS_RESET_MSM_GCC_8916_H + +#define GCC_BLSP1_BCR 0 +#define GCC_BLSP1_QUP1_BCR 1 +#define GCC_BLSP1_UART1_BCR 2 +#define GCC_BLSP1_QUP2_BCR 3 +#define GCC_BLSP1_UART2_BCR 4 +#define GCC_BLSP1_QUP3_BCR 5 +#define GCC_BLSP1_QUP4_BCR 6 +#define GCC_BLSP1_QUP5_BCR 7 +#define GCC_BLSP1_QUP6_BCR 8 +#define GCC_IMEM_BCR 9 +#define GCC_SMMU_BCR 10 +#define GCC_APSS_TCU_BCR 11 +#define GCC_SMMU_XPU_BCR 12 +#define GCC_PCNOC_TBU_BCR 13 +#define GCC_PRNG_BCR 14 +#define GCC_BOOT_ROM_BCR 15 +#define GCC_CRYPTO_BCR 16 +#define GCC_SEC_CTRL_BCR 17 +#define GCC_AUDIO_CORE_BCR 18 +#define GCC_ULT_AUDIO_BCR 19 +#define GCC_DEHR_BCR 20 +#define GCC_SYSTEM_NOC_BCR 21 +#define GCC_PCNOC_BCR 22 +#define GCC_TCSR_BCR 23 +#define GCC_QDSS_BCR 24 +#define GCC_DCD_BCR 25 +#define GCC_MSG_RAM_BCR 26 +#define GCC_MPM_BCR 27 +#define GCC_SPMI_BCR 28 +#define GCC_SPDM_BCR 29 +#define GCC_MM_SPDM_BCR 30 +#define GCC_BIMC_BCR 31 +#define GCC_RBCPR_BCR 32 +#define GCC_TLMM_BCR 33 +#define GCC_USB_HS_BCR 34 +#define GCC_USB2A_PHY_BCR 35 +#define GCC_SDCC1_BCR 36 +#define GCC_SDCC2_BCR 37 +#define GCC_PDM_BCR 38 +#define GCC_SNOC_BUS_TIMEOUT0_BCR 39 +#define GCC_PCNOC_BUS_TIMEOUT0_BCR 40 +#define GCC_PCNOC_BUS_TIMEOUT1_BCR 41 +#define GCC_PCNOC_BUS_TIMEOUT2_BCR 42 +#define GCC_PCNOC_BUS_TIMEOUT3_BCR 43 +#define GCC_PCNOC_BUS_TIMEOUT4_BCR 44 +#define GCC_PCNOC_BUS_TIMEOUT5_BCR 45 +#define GCC_PCNOC_BUS_TIMEOUT6_BCR 46 +#define GCC_PCNOC_BUS_TIMEOUT7_BCR 47 +#define GCC_PCNOC_BUS_TIMEOUT8_BCR 48 +#define GCC_PCNOC_BUS_TIMEOUT9_BCR 49 +#define GCC_MMSS_BCR 50 +#define GCC_VENUS0_BCR 51 +#define GCC_MDSS_BCR 52 +#define GCC_CAMSS_PHY0_BCR 53 +#define GCC_CAMSS_CSI0_BCR 54 +#define GCC_CAMSS_CSI0PHY_BCR 55 +#define GCC_CAMSS_CSI0RDI_BCR 56 +#define GCC_CAMSS_CSI0PIX_BCR 57 +#define GCC_CAMSS_PHY1_BCR 58 +#define GCC_CAMSS_CSI1_BCR 59 +#define GCC_CAMSS_CSI1PHY_BCR 60 +#define GCC_CAMSS_CSI1RDI_BCR 61 +#define GCC_CAMSS_CSI1PIX_BCR 62 +#define GCC_CAMSS_ISPIF_BCR 63 +#define GCC_CAMSS_CCI_BCR 64 +#define GCC_CAMSS_MCLK0_BCR 65 +#define GCC_CAMSS_MCLK1_BCR 66 +#define GCC_CAMSS_GP0_BCR 67 +#define GCC_CAMSS_GP1_BCR 68 +#define GCC_CAMSS_TOP_BCR 69 +#define GCC_CAMSS_MICRO_BCR 70 +#define GCC_CAMSS_JPEG_BCR 71 +#define GCC_CAMSS_VFE_BCR 72 +#define GCC_CAMSS_CSI_VFE0_BCR 73 +#define GCC_OXILI_BCR 74 +#define GCC_GMEM_BCR 75 +#define GCC_CAMSS_AHB_BCR 76 +#define GCC_MDP_TBU_BCR 77 +#define GCC_GFX_TBU_BCR 78 +#define GCC_GFX_TCU_BCR 79 +#define GCC_MSS_TBU_AXI_BCR 80 +#define GCC_MSS_TBU_GSS_AXI_BCR 81 +#define GCC_MSS_TBU_Q6_AXI_BCR 82 +#define GCC_GTCU_AHB_BCR 83 +#define GCC_SMMU_CFG_BCR 84 +#define GCC_VFE_TBU_BCR 85 +#define GCC_VENUS_TBU_BCR 86 +#define GCC_JPEG_TBU_BCR 87 +#define GCC_PRONTO_TBU_BCR 88 +#define GCC_SMMU_CATS_BCR 89 + +#endif diff --git a/include/dt-bindings/reset/qcom,gcc-msm8960.h b/include/dt-bindings/reset/qcom,gcc-msm8960.h new file mode 100644 index 000000000..47c868695 --- /dev/null +++ b/include/dt-bindings/reset/qcom,gcc-msm8960.h @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_RESET_MSM_GCC_8960_H +#define _DT_BINDINGS_RESET_MSM_GCC_8960_H + +#define SFAB_MSS_Q6_SW_RESET 0 +#define SFAB_MSS_Q6_FW_RESET 1 +#define QDSS_STM_RESET 2 +#define AFAB_SMPSS_S_RESET 3 +#define AFAB_SMPSS_M1_RESET 4 +#define AFAB_SMPSS_M0_RESET 5 +#define AFAB_EBI1_CH0_RESET 6 +#define AFAB_EBI1_CH1_RESET 7 +#define SFAB_ADM0_M0_RESET 8 +#define SFAB_ADM0_M1_RESET 9 +#define SFAB_ADM0_M2_RESET 10 +#define ADM0_C2_RESET 11 +#define ADM0_C1_RESET 12 +#define ADM0_C0_RESET 13 +#define ADM0_PBUS_RESET 14 +#define ADM0_RESET 15 +#define QDSS_CLKS_SW_RESET 16 +#define QDSS_POR_RESET 17 +#define QDSS_TSCTR_RESET 18 +#define QDSS_HRESET_RESET 19 +#define QDSS_AXI_RESET 20 +#define QDSS_DBG_RESET 21 +#define PCIE_A_RESET 22 +#define PCIE_AUX_RESET 23 +#define PCIE_H_RESET 24 +#define SFAB_PCIE_M_RESET 25 +#define SFAB_PCIE_S_RESET 26 +#define SFAB_MSS_M_RESET 27 +#define SFAB_USB3_M_RESET 28 +#define SFAB_RIVA_M_RESET 29 +#define SFAB_LPASS_RESET 30 +#define SFAB_AFAB_M_RESET 31 +#define AFAB_SFAB_M0_RESET 32 +#define AFAB_SFAB_M1_RESET 33 +#define SFAB_SATA_S_RESET 34 +#define SFAB_DFAB_M_RESET 35 +#define DFAB_SFAB_M_RESET 36 +#define DFAB_SWAY0_RESET 37 +#define DFAB_SWAY1_RESET 38 +#define DFAB_ARB0_RESET 39 +#define DFAB_ARB1_RESET 40 +#define PPSS_PROC_RESET 41 +#define PPSS_RESET 42 +#define DMA_BAM_RESET 43 +#define SPS_TIC_H_RESET 44 +#define SLIMBUS_H_RESET 45 +#define SFAB_CFPB_M_RESET 46 +#define SFAB_CFPB_S_RESET 47 +#define TSIF_H_RESET 48 +#define CE1_H_RESET 49 +#define CE1_CORE_RESET 50 +#define CE1_SLEEP_RESET 51 +#define CE2_H_RESET 52 +#define CE2_CORE_RESET 53 +#define SFAB_SFPB_M_RESET 54 +#define SFAB_SFPB_S_RESET 55 +#define RPM_PROC_RESET 56 +#define PMIC_SSBI2_RESET 57 +#define SDC1_RESET 58 +#define SDC2_RESET 59 +#define SDC3_RESET 60 +#define SDC4_RESET 61 +#define SDC5_RESET 62 +#define DFAB_A2_RESET 63 +#define USB_HS1_RESET 64 +#define USB_HSIC_RESET 65 +#define USB_FS1_XCVR_RESET 66 +#define USB_FS1_RESET 67 +#define USB_FS2_XCVR_RESET 68 +#define USB_FS2_RESET 69 +#define GSBI1_RESET 70 +#define GSBI2_RESET 71 +#define GSBI3_RESET 72 +#define GSBI4_RESET 73 +#define GSBI5_RESET 74 +#define GSBI6_RESET 75 +#define GSBI7_RESET 76 +#define GSBI8_RESET 77 +#define GSBI9_RESET 78 +#define GSBI10_RESET 79 +#define GSBI11_RESET 80 +#define GSBI12_RESET 81 +#define SPDM_RESET 82 +#define TLMM_H_RESET 83 +#define SFAB_MSS_S_RESET 84 +#define MSS_SLP_RESET 85 +#define MSS_Q6SW_JTAG_RESET 86 +#define MSS_Q6FW_JTAG_RESET 87 +#define MSS_RESET 88 +#define SATA_H_RESET 89 +#define SATA_RXOOB_RESE 90 +#define SATA_PMALIVE_RESET 91 +#define SATA_SFAB_M_RESET 92 +#define TSSC_RESET 93 +#define PDM_RESET 94 +#define MPM_H_RESET 95 +#define MPM_RESET 96 +#define SFAB_SMPSS_S_RESET 97 +#define PRNG_RESET 98 +#define RIVA_RESET 99 +#define USB_HS3_RESET 100 +#define USB_HS4_RESET 101 +#define CE3_RESET 102 +#define PCIE_EXT_PCI_RESET 103 +#define PCIE_PHY_RESET 104 +#define PCIE_PCI_RESET 105 +#define PCIE_POR_RESET 106 +#define PCIE_HCLK_RESET 107 +#define PCIE_ACLK_RESET 108 +#define CE3_H_RESET 109 +#define SFAB_CE3_M_RESET 110 +#define SFAB_CE3_S_RESET 111 +#define SATA_RESET 112 +#define CE3_SLEEP_RESET 113 +#define GSS_SLP_RESET 114 +#define GSS_RESET 115 + +#endif diff --git a/include/dt-bindings/reset/qcom,gcc-msm8974.h b/include/dt-bindings/reset/qcom,gcc-msm8974.h new file mode 100644 index 000000000..9bdf54322 --- /dev/null +++ b/include/dt-bindings/reset/qcom,gcc-msm8974.h @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_RESET_MSM_GCC_8974_H +#define _DT_BINDINGS_RESET_MSM_GCC_8974_H + +#define GCC_SYSTEM_NOC_BCR 0 +#define GCC_CONFIG_NOC_BCR 1 +#define GCC_PERIPH_NOC_BCR 2 +#define GCC_IMEM_BCR 3 +#define GCC_MMSS_BCR 4 +#define GCC_QDSS_BCR 5 +#define GCC_USB_30_BCR 6 +#define GCC_USB3_PHY_BCR 7 +#define GCC_USB_HS_HSIC_BCR 8 +#define GCC_USB_HS_BCR 9 +#define GCC_USB2A_PHY_BCR 10 +#define GCC_USB2B_PHY_BCR 11 +#define GCC_SDCC1_BCR 12 +#define GCC_SDCC2_BCR 13 +#define GCC_SDCC3_BCR 14 +#define GCC_SDCC4_BCR 15 +#define GCC_BLSP1_BCR 16 +#define GCC_BLSP1_QUP1_BCR 17 +#define GCC_BLSP1_UART1_BCR 18 +#define GCC_BLSP1_QUP2_BCR 19 +#define GCC_BLSP1_UART2_BCR 20 +#define GCC_BLSP1_QUP3_BCR 21 +#define GCC_BLSP1_UART3_BCR 22 +#define GCC_BLSP1_QUP4_BCR 23 +#define GCC_BLSP1_UART4_BCR 24 +#define GCC_BLSP1_QUP5_BCR 25 +#define GCC_BLSP1_UART5_BCR 26 +#define GCC_BLSP1_QUP6_BCR 27 +#define GCC_BLSP1_UART6_BCR 28 +#define GCC_BLSP2_BCR 29 +#define GCC_BLSP2_QUP1_BCR 30 +#define GCC_BLSP2_UART1_BCR 31 +#define GCC_BLSP2_QUP2_BCR 32 +#define GCC_BLSP2_UART2_BCR 33 +#define GCC_BLSP2_QUP3_BCR 34 +#define GCC_BLSP2_UART3_BCR 35 +#define GCC_BLSP2_QUP4_BCR 36 +#define GCC_BLSP2_UART4_BCR 37 +#define GCC_BLSP2_QUP5_BCR 38 +#define GCC_BLSP2_UART5_BCR 39 +#define GCC_BLSP2_QUP6_BCR 40 +#define GCC_BLSP2_UART6_BCR 41 +#define GCC_PDM_BCR 42 +#define GCC_BAM_DMA_BCR 43 +#define GCC_TSIF_BCR 44 +#define GCC_TCSR_BCR 45 +#define GCC_BOOT_ROM_BCR 46 +#define GCC_MSG_RAM_BCR 47 +#define GCC_TLMM_BCR 48 +#define GCC_MPM_BCR 49 +#define GCC_SEC_CTRL_BCR 50 +#define GCC_SPMI_BCR 51 +#define GCC_SPDM_BCR 52 +#define GCC_CE1_BCR 53 +#define GCC_CE2_BCR 54 +#define GCC_BIMC_BCR 55 +#define GCC_MPM_NON_AHB_RESET 56 +#define GCC_MPM_AHB_RESET 57 +#define GCC_SNOC_BUS_TIMEOUT0_BCR 58 +#define GCC_SNOC_BUS_TIMEOUT2_BCR 59 +#define GCC_PNOC_BUS_TIMEOUT0_BCR 60 +#define GCC_PNOC_BUS_TIMEOUT1_BCR 61 +#define GCC_PNOC_BUS_TIMEOUT2_BCR 62 +#define GCC_PNOC_BUS_TIMEOUT3_BCR 63 +#define GCC_PNOC_BUS_TIMEOUT4_BCR 64 +#define GCC_CNOC_BUS_TIMEOUT0_BCR 65 +#define GCC_CNOC_BUS_TIMEOUT1_BCR 66 +#define GCC_CNOC_BUS_TIMEOUT2_BCR 67 +#define GCC_CNOC_BUS_TIMEOUT3_BCR 68 +#define GCC_CNOC_BUS_TIMEOUT4_BCR 69 +#define GCC_CNOC_BUS_TIMEOUT5_BCR 70 +#define GCC_CNOC_BUS_TIMEOUT6_BCR 71 +#define GCC_DEHR_BCR 72 +#define GCC_RBCPR_BCR 73 +#define GCC_MSS_RESTART 74 +#define GCC_LPASS_RESTART 75 +#define GCC_WCSS_RESTART 76 +#define GCC_VENUS_RESTART 77 + +#endif diff --git a/include/dt-bindings/reset/qcom,mmcc-apq8084.h b/include/dt-bindings/reset/qcom,mmcc-apq8084.h new file mode 100644 index 000000000..c16713965 --- /dev/null +++ b/include/dt-bindings/reset/qcom,mmcc-apq8084.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_RESET_APQ_MMCC_8084_H +#define _DT_BINDINGS_RESET_APQ_MMCC_8084_H + +#define MMSS_SPDM_RESET 0 +#define MMSS_SPDM_RM_RESET 1 +#define VENUS0_RESET 2 +#define VPU_RESET 3 +#define MDSS_RESET 4 +#define AVSYNC_RESET 5 +#define CAMSS_PHY0_RESET 6 +#define CAMSS_PHY1_RESET 7 +#define CAMSS_PHY2_RESET 8 +#define CAMSS_CSI0_RESET 9 +#define CAMSS_CSI0PHY_RESET 10 +#define CAMSS_CSI0RDI_RESET 11 +#define CAMSS_CSI0PIX_RESET 12 +#define CAMSS_CSI1_RESET 13 +#define CAMSS_CSI1PHY_RESET 14 +#define CAMSS_CSI1RDI_RESET 15 +#define CAMSS_CSI1PIX_RESET 16 +#define CAMSS_CSI2_RESET 17 +#define CAMSS_CSI2PHY_RESET 18 +#define CAMSS_CSI2RDI_RESET 19 +#define CAMSS_CSI2PIX_RESET 20 +#define CAMSS_CSI3_RESET 21 +#define CAMSS_CSI3PHY_RESET 22 +#define CAMSS_CSI3RDI_RESET 23 +#define CAMSS_CSI3PIX_RESET 24 +#define CAMSS_ISPIF_RESET 25 +#define CAMSS_CCI_RESET 26 +#define CAMSS_MCLK0_RESET 27 +#define CAMSS_MCLK1_RESET 28 +#define CAMSS_MCLK2_RESET 29 +#define CAMSS_MCLK3_RESET 30 +#define CAMSS_GP0_RESET 31 +#define CAMSS_GP1_RESET 32 +#define CAMSS_TOP_RESET 33 +#define CAMSS_AHB_RESET 34 +#define CAMSS_MICRO_RESET 35 +#define CAMSS_JPEG_RESET 36 +#define CAMSS_VFE_RESET 37 +#define CAMSS_CSI_VFE0_RESET 38 +#define CAMSS_CSI_VFE1_RESET 39 +#define OXILI_RESET 40 +#define OXILICX_RESET 41 +#define OCMEMCX_RESET 42 +#define MMSS_RBCRP_RESET 43 +#define MMSSNOCAHB_RESET 44 +#define MMSSNOCAXI_RESET 45 + +#endif diff --git a/include/dt-bindings/reset/qcom,mmcc-msm8960.h b/include/dt-bindings/reset/qcom,mmcc-msm8960.h new file mode 100644 index 000000000..11741113a --- /dev/null +++ b/include/dt-bindings/reset/qcom,mmcc-msm8960.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_RESET_MSM_MMCC_8960_H +#define _DT_BINDINGS_RESET_MSM_MMCC_8960_H + +#define VPE_AXI_RESET 0 +#define IJPEG_AXI_RESET 1 +#define MPD_AXI_RESET 2 +#define VFE_AXI_RESET 3 +#define SP_AXI_RESET 4 +#define VCODEC_AXI_RESET 5 +#define ROT_AXI_RESET 6 +#define VCODEC_AXI_A_RESET 7 +#define VCODEC_AXI_B_RESET 8 +#define FAB_S3_AXI_RESET 9 +#define FAB_S2_AXI_RESET 10 +#define FAB_S1_AXI_RESET 11 +#define FAB_S0_AXI_RESET 12 +#define SMMU_GFX3D_ABH_RESET 13 +#define SMMU_VPE_AHB_RESET 14 +#define SMMU_VFE_AHB_RESET 15 +#define SMMU_ROT_AHB_RESET 16 +#define SMMU_VCODEC_B_AHB_RESET 17 +#define SMMU_VCODEC_A_AHB_RESET 18 +#define SMMU_MDP1_AHB_RESET 19 +#define SMMU_MDP0_AHB_RESET 20 +#define SMMU_JPEGD_AHB_RESET 21 +#define SMMU_IJPEG_AHB_RESET 22 +#define SMMU_GFX2D0_AHB_RESET 23 +#define SMMU_GFX2D1_AHB_RESET 24 +#define APU_AHB_RESET 25 +#define CSI_AHB_RESET 26 +#define TV_ENC_AHB_RESET 27 +#define VPE_AHB_RESET 28 +#define FABRIC_AHB_RESET 29 +#define GFX2D0_AHB_RESET 30 +#define GFX2D1_AHB_RESET 31 +#define GFX3D_AHB_RESET 32 +#define HDMI_AHB_RESET 33 +#define MSSS_IMEM_AHB_RESET 34 +#define IJPEG_AHB_RESET 35 +#define DSI_M_AHB_RESET 36 +#define DSI_S_AHB_RESET 37 +#define JPEGD_AHB_RESET 38 +#define MDP_AHB_RESET 39 +#define ROT_AHB_RESET 40 +#define VCODEC_AHB_RESET 41 +#define VFE_AHB_RESET 42 +#define DSI2_M_AHB_RESET 43 +#define DSI2_S_AHB_RESET 44 +#define CSIPHY2_RESET 45 +#define CSI_PIX1_RESET 46 +#define CSIPHY0_RESET 47 +#define CSIPHY1_RESET 48 +#define DSI2_RESET 49 +#define VFE_CSI_RESET 50 +#define MDP_RESET 51 +#define AMP_RESET 52 +#define JPEGD_RESET 53 +#define CSI1_RESET 54 +#define VPE_RESET 55 +#define MMSS_FABRIC_RESET 56 +#define VFE_RESET 57 +#define GFX2D0_RESET 58 +#define GFX2D1_RESET 59 +#define GFX3D_RESET 60 +#define HDMI_RESET 61 +#define MMSS_IMEM_RESET 62 +#define IJPEG_RESET 63 +#define CSI0_RESET 64 +#define DSI_RESET 65 +#define VCODEC_RESET 66 +#define MDP_TV_RESET 67 +#define MDP_VSYNC_RESET 68 +#define ROT_RESET 69 +#define TV_HDMI_RESET 70 +#define TV_ENC_RESET 71 +#define CSI2_RESET 72 +#define CSI_RDI1_RESET 73 +#define CSI_RDI2_RESET 74 +#define GFX3D_AXI_RESET 75 +#define VCAP_AXI_RESET 76 +#define SMMU_VCAP_AHB_RESET 77 +#define VCAP_AHB_RESET 78 +#define CSI_RDI_RESET 79 +#define CSI_PIX_RESET 80 +#define VCAP_NPL_RESET 81 +#define VCAP_RESET 82 + +#endif diff --git a/include/dt-bindings/reset/qcom,mmcc-msm8974.h b/include/dt-bindings/reset/qcom,mmcc-msm8974.h new file mode 100644 index 000000000..da3ec37f1 --- /dev/null +++ b/include/dt-bindings/reset/qcom,mmcc-msm8974.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_RESET_MSM_MMCC_8974_H +#define _DT_BINDINGS_RESET_MSM_MMCC_8974_H + +#define SPDM_RESET 0 +#define SPDM_RM_RESET 1 +#define VENUS0_RESET 2 +#define MDSS_RESET 3 +#define CAMSS_PHY0_RESET 4 +#define CAMSS_PHY1_RESET 5 +#define CAMSS_PHY2_RESET 6 +#define CAMSS_CSI0_RESET 7 +#define CAMSS_CSI0PHY_RESET 8 +#define CAMSS_CSI0RDI_RESET 9 +#define CAMSS_CSI0PIX_RESET 10 +#define CAMSS_CSI1_RESET 11 +#define CAMSS_CSI1PHY_RESET 12 +#define CAMSS_CSI1RDI_RESET 13 +#define CAMSS_CSI1PIX_RESET 14 +#define CAMSS_CSI2_RESET 15 +#define CAMSS_CSI2PHY_RESET 16 +#define CAMSS_CSI2RDI_RESET 17 +#define CAMSS_CSI2PIX_RESET 18 +#define CAMSS_CSI3_RESET 19 +#define CAMSS_CSI3PHY_RESET 20 +#define CAMSS_CSI3RDI_RESET 21 +#define CAMSS_CSI3PIX_RESET 22 +#define CAMSS_ISPIF_RESET 23 +#define CAMSS_CCI_RESET 24 +#define CAMSS_MCLK0_RESET 25 +#define CAMSS_MCLK1_RESET 26 +#define CAMSS_MCLK2_RESET 27 +#define CAMSS_MCLK3_RESET 28 +#define CAMSS_GP0_RESET 29 +#define CAMSS_GP1_RESET 30 +#define CAMSS_TOP_RESET 31 +#define CAMSS_MICRO_RESET 32 +#define CAMSS_JPEG_RESET 33 +#define CAMSS_VFE_RESET 34 +#define CAMSS_CSI_VFE0_RESET 35 +#define CAMSS_CSI_VFE1_RESET 36 +#define OXILI_RESET 37 +#define OXILICX_RESET 38 +#define OCMEMCX_RESET 39 +#define MMSS_RBCRP_RESET 40 +#define MMSSNOCAHB_RESET 41 +#define MMSSNOCAXI_RESET 42 +#define OCMEMNOC_RESET 43 + +#endif diff --git a/include/dt-bindings/reset/qcom,sdm845-aoss.h b/include/dt-bindings/reset/qcom,sdm845-aoss.h new file mode 100644 index 000000000..476c5fc87 --- /dev/null +++ b/include/dt-bindings/reset/qcom,sdm845-aoss.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_RESET_AOSS_SDM_845_H +#define _DT_BINDINGS_RESET_AOSS_SDM_845_H + +#define AOSS_CC_MSS_RESTART 0 +#define AOSS_CC_CAMSS_RESTART 1 +#define AOSS_CC_VENUS_RESTART 2 +#define AOSS_CC_GPU_RESTART 3 +#define AOSS_CC_DISPSS_RESTART 4 +#define AOSS_CC_WCSS_RESTART 5 +#define AOSS_CC_LPASS_RESTART 6 + +#endif diff --git a/include/dt-bindings/reset/snps,hsdk-reset.h b/include/dt-bindings/reset/snps,hsdk-reset.h new file mode 100644 index 000000000..e1a643e4b --- /dev/null +++ b/include/dt-bindings/reset/snps,hsdk-reset.h @@ -0,0 +1,17 @@ +/** + * This header provides index for the HSDK reset controller. + */ +#ifndef _DT_BINDINGS_RESET_CONTROLLER_SNPS_HSDK +#define _DT_BINDINGS_RESET_CONTROLLER_SNPS_HSDK + +#define HSDK_APB_RESET 0 +#define HSDK_AXI_RESET 1 +#define HSDK_ETH_RESET 2 +#define HSDK_USB_RESET 3 +#define HSDK_SDIO_RESET 4 +#define HSDK_HDMI_RESET 5 +#define HSDK_GFX_RESET 6 +#define HSDK_DMAC_RESET 7 +#define HSDK_EBI_RESET 8 + +#endif /*_DT_BINDINGS_RESET_CONTROLLER_SNPS_HSDK*/ diff --git a/include/dt-bindings/reset/stih407-resets.h b/include/dt-bindings/reset/stih407-resets.h new file mode 100644 index 000000000..f2a2c4f7f --- /dev/null +++ b/include/dt-bindings/reset/stih407-resets.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for the reset controller + * based peripheral powerdown requests on the STMicroelectronics + * STiH407 SoC. + */ +#ifndef _DT_BINDINGS_RESET_CONTROLLER_STIH407 +#define _DT_BINDINGS_RESET_CONTROLLER_STIH407 + +/* Powerdown requests control 0 */ +#define STIH407_EMISS_POWERDOWN 0 +#define STIH407_NAND_POWERDOWN 1 + +/* Synp GMAC PowerDown */ +#define STIH407_ETH1_POWERDOWN 2 + +/* Powerdown requests control 1 */ +#define STIH407_USB3_POWERDOWN 3 +#define STIH407_USB2_PORT1_POWERDOWN 4 +#define STIH407_USB2_PORT0_POWERDOWN 5 +#define STIH407_PCIE1_POWERDOWN 6 +#define STIH407_PCIE0_POWERDOWN 7 +#define STIH407_SATA1_POWERDOWN 8 +#define STIH407_SATA0_POWERDOWN 9 + +/* Reset defines */ +#define STIH407_ETH1_SOFTRESET 0 +#define STIH407_MMC1_SOFTRESET 1 +#define STIH407_PICOPHY_SOFTRESET 2 +#define STIH407_IRB_SOFTRESET 3 +#define STIH407_PCIE0_SOFTRESET 4 +#define STIH407_PCIE1_SOFTRESET 5 +#define STIH407_SATA0_SOFTRESET 6 +#define STIH407_SATA1_SOFTRESET 7 +#define STIH407_MIPHY0_SOFTRESET 8 +#define STIH407_MIPHY1_SOFTRESET 9 +#define STIH407_MIPHY2_SOFTRESET 10 +#define STIH407_SATA0_PWR_SOFTRESET 11 +#define STIH407_SATA1_PWR_SOFTRESET 12 +#define STIH407_DELTA_SOFTRESET 13 +#define STIH407_BLITTER_SOFTRESET 14 +#define STIH407_HDTVOUT_SOFTRESET 15 +#define STIH407_HDQVDP_SOFTRESET 16 +#define STIH407_VDP_AUX_SOFTRESET 17 +#define STIH407_COMPO_SOFTRESET 18 +#define STIH407_HDMI_TX_PHY_SOFTRESET 19 +#define STIH407_JPEG_DEC_SOFTRESET 20 +#define STIH407_VP8_DEC_SOFTRESET 21 +#define STIH407_GPU_SOFTRESET 22 +#define STIH407_HVA_SOFTRESET 23 +#define STIH407_ERAM_HVA_SOFTRESET 24 +#define STIH407_LPM_SOFTRESET 25 +#define STIH407_KEYSCAN_SOFTRESET 26 +#define STIH407_USB2_PORT0_SOFTRESET 27 +#define STIH407_USB2_PORT1_SOFTRESET 28 +#define STIH407_ST231_AUD_SOFTRESET 29 +#define STIH407_ST231_DMU_SOFTRESET 30 +#define STIH407_ST231_GP0_SOFTRESET 31 +#define STIH407_ST231_GP1_SOFTRESET 32 + +/* Picophy reset defines */ +#define STIH407_PICOPHY0_RESET 0 +#define STIH407_PICOPHY1_RESET 1 +#define STIH407_PICOPHY2_RESET 2 + +#endif /* _DT_BINDINGS_RESET_CONTROLLER_STIH407 */ diff --git a/include/dt-bindings/reset/stih415-resets.h b/include/dt-bindings/reset/stih415-resets.h new file mode 100644 index 000000000..96f7831a1 --- /dev/null +++ b/include/dt-bindings/reset/stih415-resets.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for the reset controller + * based peripheral powerdown requests on the STMicroelectronics + * STiH415 SoC. + */ +#ifndef _DT_BINDINGS_RESET_CONTROLLER_STIH415 +#define _DT_BINDINGS_RESET_CONTROLLER_STIH415 + +#define STIH415_EMISS_POWERDOWN 0 +#define STIH415_NAND_POWERDOWN 1 +#define STIH415_KEYSCAN_POWERDOWN 2 +#define STIH415_USB0_POWERDOWN 3 +#define STIH415_USB1_POWERDOWN 4 +#define STIH415_USB2_POWERDOWN 5 +#define STIH415_SATA0_POWERDOWN 6 +#define STIH415_SATA1_POWERDOWN 7 +#define STIH415_PCIE_POWERDOWN 8 + +#define STIH415_ETH0_SOFTRESET 0 +#define STIH415_ETH1_SOFTRESET 1 +#define STIH415_IRB_SOFTRESET 2 +#define STIH415_USB0_SOFTRESET 3 +#define STIH415_USB1_SOFTRESET 4 +#define STIH415_USB2_SOFTRESET 5 +#define STIH415_KEYSCAN_SOFTRESET 6 + +#endif /* _DT_BINDINGS_RESET_CONTROLLER_STIH415 */ diff --git a/include/dt-bindings/reset/stih416-resets.h b/include/dt-bindings/reset/stih416-resets.h new file mode 100644 index 000000000..f682c906e --- /dev/null +++ b/include/dt-bindings/reset/stih416-resets.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for the reset controller + * based peripheral powerdown requests on the STMicroelectronics + * STiH416 SoC. + */ +#ifndef _DT_BINDINGS_RESET_CONTROLLER_STIH416 +#define _DT_BINDINGS_RESET_CONTROLLER_STIH416 + +#define STIH416_EMISS_POWERDOWN 0 +#define STIH416_NAND_POWERDOWN 1 +#define STIH416_KEYSCAN_POWERDOWN 2 +#define STIH416_USB0_POWERDOWN 3 +#define STIH416_USB1_POWERDOWN 4 +#define STIH416_USB2_POWERDOWN 5 +#define STIH416_USB3_POWERDOWN 6 +#define STIH416_SATA0_POWERDOWN 7 +#define STIH416_SATA1_POWERDOWN 8 +#define STIH416_PCIE0_POWERDOWN 9 +#define STIH416_PCIE1_POWERDOWN 10 + +#define STIH416_ETH0_SOFTRESET 0 +#define STIH416_ETH1_SOFTRESET 1 +#define STIH416_IRB_SOFTRESET 2 +#define STIH416_USB0_SOFTRESET 3 +#define STIH416_USB1_SOFTRESET 4 +#define STIH416_USB2_SOFTRESET 5 +#define STIH416_USB3_SOFTRESET 6 +#define STIH416_SATA0_SOFTRESET 7 +#define STIH416_SATA1_SOFTRESET 8 +#define STIH416_PCIE0_SOFTRESET 9 +#define STIH416_PCIE1_SOFTRESET 10 +#define STIH416_AUD_DAC_SOFTRESET 11 +#define STIH416_HDTVOUT_SOFTRESET 12 +#define STIH416_VTAC_M_RX_SOFTRESET 13 +#define STIH416_VTAC_A_RX_SOFTRESET 14 +#define STIH416_SYNC_HD_SOFTRESET 15 +#define STIH416_SYNC_SD_SOFTRESET 16 +#define STIH416_BLITTER_SOFTRESET 17 +#define STIH416_GPU_SOFTRESET 18 +#define STIH416_VTAC_M_TX_SOFTRESET 19 +#define STIH416_VTAC_A_TX_SOFTRESET 20 +#define STIH416_VTG_AUX_SOFTRESET 21 +#define STIH416_JPEG_DEC_SOFTRESET 22 +#define STIH416_HVA_SOFTRESET 23 +#define STIH416_COMPO_M_SOFTRESET 24 +#define STIH416_COMPO_A_SOFTRESET 25 +#define STIH416_VP8_DEC_SOFTRESET 26 +#define STIH416_VTG_MAIN_SOFTRESET 27 +#define STIH416_KEYSCAN_SOFTRESET 28 + +#endif /* _DT_BINDINGS_RESET_CONTROLLER_STIH416 */ diff --git a/include/dt-bindings/reset/stm32mp1-resets.h b/include/dt-bindings/reset/stm32mp1-resets.h new file mode 100644 index 000000000..f0c3aaef6 --- /dev/null +++ b/include/dt-bindings/reset/stm32mp1-resets.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* + * Copyright (C) STMicroelectronics 2018 - All Rights Reserved + * Author: Gabriel Fernandez for STMicroelectronics. + */ + +#ifndef _DT_BINDINGS_STM32MP1_RESET_H_ +#define _DT_BINDINGS_STM32MP1_RESET_H_ + +#define LTDC_R 3072 +#define DSI_R 3076 +#define DDRPERFM_R 3080 +#define USBPHY_R 3088 +#define SPI6_R 3136 +#define I2C4_R 3138 +#define I2C6_R 3139 +#define USART1_R 3140 +#define STGEN_R 3156 +#define GPIOZ_R 3200 +#define CRYP1_R 3204 +#define HASH1_R 3205 +#define RNG1_R 3206 +#define AXIM_R 3216 +#define GPU_R 3269 +#define ETHMAC_R 3274 +#define FMC_R 3276 +#define QSPI_R 3278 +#define SDMMC1_R 3280 +#define SDMMC2_R 3281 +#define CRC1_R 3284 +#define USBH_R 3288 +#define MDMA_R 3328 +#define MCU_R 8225 +#define TIM2_R 19456 +#define TIM3_R 19457 +#define TIM4_R 19458 +#define TIM5_R 19459 +#define TIM6_R 19460 +#define TIM7_R 19461 +#define TIM12_R 16462 +#define TIM13_R 16463 +#define TIM14_R 16464 +#define LPTIM1_R 19465 +#define SPI2_R 19467 +#define SPI3_R 19468 +#define USART2_R 19470 +#define USART3_R 19471 +#define UART4_R 19472 +#define UART5_R 19473 +#define UART7_R 19474 +#define UART8_R 19475 +#define I2C1_R 19477 +#define I2C2_R 19478 +#define I2C3_R 19479 +#define I2C5_R 19480 +#define SPDIF_R 19482 +#define CEC_R 19483 +#define DAC12_R 19485 +#define MDIO_R 19847 +#define TIM1_R 19520 +#define TIM8_R 19521 +#define TIM15_R 19522 +#define TIM16_R 19523 +#define TIM17_R 19524 +#define SPI1_R 19528 +#define SPI4_R 19529 +#define SPI5_R 19530 +#define USART6_R 19533 +#define SAI1_R 19536 +#define SAI2_R 19537 +#define SAI3_R 19538 +#define DFSDM_R 19540 +#define FDCAN_R 19544 +#define LPTIM2_R 19584 +#define LPTIM3_R 19585 +#define LPTIM4_R 19586 +#define LPTIM5_R 19587 +#define SAI4_R 19592 +#define SYSCFG_R 19595 +#define VREF_R 19597 +#define TMPSENS_R 19600 +#define PMBCTRL_R 19601 +#define DMA1_R 19648 +#define DMA2_R 19649 +#define DMAMUX_R 19650 +#define ADC12_R 19653 +#define USBO_R 19656 +#define SDMMC3_R 19664 +#define CAMITF_R 19712 +#define CRYP2_R 19716 +#define HASH2_R 19717 +#define RNG2_R 19718 +#define CRC2_R 19719 +#define HSEM_R 19723 +#define MBOX_R 19724 +#define GPIOA_R 19776 +#define GPIOB_R 19777 +#define GPIOC_R 19778 +#define GPIOD_R 19779 +#define GPIOE_R 19780 +#define GPIOF_R 19781 +#define GPIOG_R 19782 +#define GPIOH_R 19783 +#define GPIOI_R 19784 +#define GPIOJ_R 19785 +#define GPIOK_R 19786 + +#endif /* _DT_BINDINGS_STM32MP1_RESET_H_ */ diff --git a/include/dt-bindings/reset/sun4i-a10-ccu.h b/include/dt-bindings/reset/sun4i-a10-ccu.h new file mode 100644 index 000000000..5f4480bed --- /dev/null +++ b/include/dt-bindings/reset/sun4i-a10-ccu.h @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2017 Priit Laes + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RST_SUN4I_A10_H +#define _DT_BINDINGS_RST_SUN4I_A10_H + +#define RST_USB_PHY0 1 +#define RST_USB_PHY1 2 +#define RST_USB_PHY2 3 +#define RST_GPS 4 +#define RST_DE_BE0 5 +#define RST_DE_BE1 6 +#define RST_DE_FE0 7 +#define RST_DE_FE1 8 +#define RST_DE_MP 9 +#define RST_TVE0 10 +#define RST_TCON0 11 +#define RST_TVE1 12 +#define RST_TCON1 13 +#define RST_CSI0 14 +#define RST_CSI1 15 +#define RST_VE 16 +#define RST_ACE 17 +#define RST_LVDS 18 +#define RST_GPU 19 +#define RST_HDMI_H 20 +#define RST_HDMI_SYS 21 +#define RST_HDMI_AUDIO_DMA 22 + +#endif /* DT_BINDINGS_RST_SUN4I_A10_H */ diff --git a/include/dt-bindings/reset/sun50i-a64-ccu.h b/include/dt-bindings/reset/sun50i-a64-ccu.h new file mode 100644 index 000000000..db60b29dd --- /dev/null +++ b/include/dt-bindings/reset/sun50i-a64-ccu.h @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2016 Maxime Ripard + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RST_SUN50I_A64_H_ +#define _DT_BINDINGS_RST_SUN50I_A64_H_ + +#define RST_USB_PHY0 0 +#define RST_USB_PHY1 1 +#define RST_USB_HSIC 2 +#define RST_DRAM 3 +#define RST_MBUS 4 +#define RST_BUS_MIPI_DSI 5 +#define RST_BUS_CE 6 +#define RST_BUS_DMA 7 +#define RST_BUS_MMC0 8 +#define RST_BUS_MMC1 9 +#define RST_BUS_MMC2 10 +#define RST_BUS_NAND 11 +#define RST_BUS_DRAM 12 +#define RST_BUS_EMAC 13 +#define RST_BUS_TS 14 +#define RST_BUS_HSTIMER 15 +#define RST_BUS_SPI0 16 +#define RST_BUS_SPI1 17 +#define RST_BUS_OTG 18 +#define RST_BUS_EHCI0 19 +#define RST_BUS_EHCI1 20 +#define RST_BUS_OHCI0 21 +#define RST_BUS_OHCI1 22 +#define RST_BUS_VE 23 +#define RST_BUS_TCON0 24 +#define RST_BUS_TCON1 25 +#define RST_BUS_DEINTERLACE 26 +#define RST_BUS_CSI 27 +#define RST_BUS_HDMI0 28 +#define RST_BUS_HDMI1 29 +#define RST_BUS_DE 30 +#define RST_BUS_GPU 31 +#define RST_BUS_MSGBOX 32 +#define RST_BUS_SPINLOCK 33 +#define RST_BUS_DBG 34 +#define RST_BUS_LVDS 35 +#define RST_BUS_CODEC 36 +#define RST_BUS_SPDIF 37 +#define RST_BUS_THS 38 +#define RST_BUS_I2S0 39 +#define RST_BUS_I2S1 40 +#define RST_BUS_I2S2 41 +#define RST_BUS_I2C0 42 +#define RST_BUS_I2C1 43 +#define RST_BUS_I2C2 44 +#define RST_BUS_SCR 45 +#define RST_BUS_UART0 46 +#define RST_BUS_UART1 47 +#define RST_BUS_UART2 48 +#define RST_BUS_UART3 49 +#define RST_BUS_UART4 50 + +#endif /* _DT_BINDINGS_RST_SUN50I_A64_H_ */ diff --git a/include/dt-bindings/reset/sun50i-h6-ccu.h b/include/dt-bindings/reset/sun50i-h6-ccu.h new file mode 100644 index 000000000..81106f455 --- /dev/null +++ b/include/dt-bindings/reset/sun50i-h6-ccu.h @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: (GPL-2.0+ or MIT) +/* + * Copyright (C) 2017 Icenowy Zheng + */ + +#ifndef _DT_BINDINGS_RESET_SUN50I_H6_H_ +#define _DT_BINDINGS_RESET_SUN50I_H6_H_ + +#define RST_MBUS 0 +#define RST_BUS_DE 1 +#define RST_BUS_DEINTERLACE 2 +#define RST_BUS_GPU 3 +#define RST_BUS_CE 4 +#define RST_BUS_VE 5 +#define RST_BUS_EMCE 6 +#define RST_BUS_VP9 7 +#define RST_BUS_DMA 8 +#define RST_BUS_MSGBOX 9 +#define RST_BUS_SPINLOCK 10 +#define RST_BUS_HSTIMER 11 +#define RST_BUS_DBG 12 +#define RST_BUS_PSI 13 +#define RST_BUS_PWM 14 +#define RST_BUS_IOMMU 15 +#define RST_BUS_DRAM 16 +#define RST_BUS_NAND 17 +#define RST_BUS_MMC0 18 +#define RST_BUS_MMC1 19 +#define RST_BUS_MMC2 20 +#define RST_BUS_UART0 21 +#define RST_BUS_UART1 22 +#define RST_BUS_UART2 23 +#define RST_BUS_UART3 24 +#define RST_BUS_I2C0 25 +#define RST_BUS_I2C1 26 +#define RST_BUS_I2C2 27 +#define RST_BUS_I2C3 28 +#define RST_BUS_SCR0 29 +#define RST_BUS_SCR1 30 +#define RST_BUS_SPI0 31 +#define RST_BUS_SPI1 32 +#define RST_BUS_EMAC 33 +#define RST_BUS_TS 34 +#define RST_BUS_IR_TX 35 +#define RST_BUS_THS 36 +#define RST_BUS_I2S0 37 +#define RST_BUS_I2S1 38 +#define RST_BUS_I2S2 39 +#define RST_BUS_I2S3 40 +#define RST_BUS_SPDIF 41 +#define RST_BUS_DMIC 42 +#define RST_BUS_AUDIO_HUB 43 +#define RST_USB_PHY0 44 +#define RST_USB_PHY1 45 +#define RST_USB_PHY3 46 +#define RST_USB_HSIC 47 +#define RST_BUS_OHCI0 48 +#define RST_BUS_OHCI3 49 +#define RST_BUS_EHCI0 50 +#define RST_BUS_XHCI 51 +#define RST_BUS_EHCI3 52 +#define RST_BUS_OTG 53 +#define RST_BUS_PCIE 54 +#define RST_PCIE_POWERUP 55 +#define RST_BUS_HDMI 56 +#define RST_BUS_HDMI_SUB 57 +#define RST_BUS_TCON_TOP 58 +#define RST_BUS_TCON_LCD0 59 +#define RST_BUS_TCON_TV0 60 +#define RST_BUS_CSI 61 +#define RST_BUS_HDCP 62 + +#endif /* _DT_BINDINGS_RESET_SUN50I_H6_H_ */ diff --git a/include/dt-bindings/reset/sun50i-h6-r-ccu.h b/include/dt-bindings/reset/sun50i-h6-r-ccu.h new file mode 100644 index 000000000..01c84dba4 --- /dev/null +++ b/include/dt-bindings/reset/sun50i-h6-r-ccu.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */ +/* + * Copyright (C) 2016 Icenowy Zheng + */ + +#ifndef _DT_BINDINGS_RST_SUN50I_H6_R_CCU_H_ +#define _DT_BINDINGS_RST_SUN50I_H6_R_CCU_H_ + +#define RST_R_APB1_TIMER 0 +#define RST_R_APB1_TWD 1 +#define RST_R_APB1_PWM 2 +#define RST_R_APB2_UART 3 +#define RST_R_APB2_I2C 4 +#define RST_R_APB1_IR 5 +#define RST_R_APB1_W1 6 + +#endif /* _DT_BINDINGS_RST_SUN50I_H6_R_CCU_H_ */ diff --git a/include/dt-bindings/reset/sun5i-ccu.h b/include/dt-bindings/reset/sun5i-ccu.h new file mode 100644 index 000000000..c2b9726b5 --- /dev/null +++ b/include/dt-bindings/reset/sun5i-ccu.h @@ -0,0 +1,32 @@ +/* + * Copyright 2016 Maxime Ripard + * + * Maxime Ripard + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _RST_SUN5I_H_ +#define _RST_SUN5I_H_ + +#define RST_USB_PHY0 0 +#define RST_USB_PHY1 1 +#define RST_GPS 2 +#define RST_DE_BE 3 +#define RST_DE_FE 4 +#define RST_TVE 5 +#define RST_LCD 6 +#define RST_CSI 7 +#define RST_VE 8 +#define RST_GPU 9 +#define RST_IEP 10 + +#endif /* _RST_SUN5I_H_ */ diff --git a/include/dt-bindings/reset/sun6i-a31-ccu.h b/include/dt-bindings/reset/sun6i-a31-ccu.h new file mode 100644 index 000000000..fbff365ed --- /dev/null +++ b/include/dt-bindings/reset/sun6i-a31-ccu.h @@ -0,0 +1,106 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RST_SUN6I_A31_H_ +#define _DT_BINDINGS_RST_SUN6I_A31_H_ + +#define RST_USB_PHY0 0 +#define RST_USB_PHY1 1 +#define RST_USB_PHY2 2 + +#define RST_AHB1_MIPI_DSI 3 +#define RST_AHB1_SS 4 +#define RST_AHB1_DMA 5 +#define RST_AHB1_MMC0 6 +#define RST_AHB1_MMC1 7 +#define RST_AHB1_MMC2 8 +#define RST_AHB1_MMC3 9 +#define RST_AHB1_NAND1 10 +#define RST_AHB1_NAND0 11 +#define RST_AHB1_SDRAM 12 +#define RST_AHB1_EMAC 13 +#define RST_AHB1_TS 14 +#define RST_AHB1_HSTIMER 15 +#define RST_AHB1_SPI0 16 +#define RST_AHB1_SPI1 17 +#define RST_AHB1_SPI2 18 +#define RST_AHB1_SPI3 19 +#define RST_AHB1_OTG 20 +#define RST_AHB1_EHCI0 21 +#define RST_AHB1_EHCI1 22 +#define RST_AHB1_OHCI0 23 +#define RST_AHB1_OHCI1 24 +#define RST_AHB1_OHCI2 25 +#define RST_AHB1_VE 26 +#define RST_AHB1_LCD0 27 +#define RST_AHB1_LCD1 28 +#define RST_AHB1_CSI 29 +#define RST_AHB1_HDMI 30 +#define RST_AHB1_BE0 31 +#define RST_AHB1_BE1 32 +#define RST_AHB1_FE0 33 +#define RST_AHB1_FE1 34 +#define RST_AHB1_MP 35 +#define RST_AHB1_GPU 36 +#define RST_AHB1_DEU0 37 +#define RST_AHB1_DEU1 38 +#define RST_AHB1_DRC0 39 +#define RST_AHB1_DRC1 40 +#define RST_AHB1_LVDS 41 + +#define RST_APB1_CODEC 42 +#define RST_APB1_SPDIF 43 +#define RST_APB1_DIGITAL_MIC 44 +#define RST_APB1_DAUDIO0 45 +#define RST_APB1_DAUDIO1 46 +#define RST_APB2_I2C0 47 +#define RST_APB2_I2C1 48 +#define RST_APB2_I2C2 49 +#define RST_APB2_I2C3 50 +#define RST_APB2_UART0 51 +#define RST_APB2_UART1 52 +#define RST_APB2_UART2 53 +#define RST_APB2_UART3 54 +#define RST_APB2_UART4 55 +#define RST_APB2_UART5 56 + +#endif /* _DT_BINDINGS_RST_SUN6I_A31_H_ */ diff --git a/include/dt-bindings/reset/sun8i-a23-a33-ccu.h b/include/dt-bindings/reset/sun8i-a23-a33-ccu.h new file mode 100644 index 000000000..6121f2b0c --- /dev/null +++ b/include/dt-bindings/reset/sun8i-a23-a33-ccu.h @@ -0,0 +1,87 @@ +/* + * Copyright (C) 2016 Maxime Ripard + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RST_SUN8I_A23_A33_H_ +#define _DT_BINDINGS_RST_SUN8I_A23_A33_H_ + +#define RST_USB_PHY0 0 +#define RST_USB_PHY1 1 +#define RST_USB_HSIC 2 +#define RST_MBUS 3 +#define RST_BUS_MIPI_DSI 4 +#define RST_BUS_SS 5 +#define RST_BUS_DMA 6 +#define RST_BUS_MMC0 7 +#define RST_BUS_MMC1 8 +#define RST_BUS_MMC2 9 +#define RST_BUS_NAND 10 +#define RST_BUS_DRAM 11 +#define RST_BUS_HSTIMER 12 +#define RST_BUS_SPI0 13 +#define RST_BUS_SPI1 14 +#define RST_BUS_OTG 15 +#define RST_BUS_EHCI 16 +#define RST_BUS_OHCI 17 +#define RST_BUS_VE 18 +#define RST_BUS_LCD 19 +#define RST_BUS_CSI 20 +#define RST_BUS_DE_BE 21 +#define RST_BUS_DE_FE 22 +#define RST_BUS_GPU 23 +#define RST_BUS_MSGBOX 24 +#define RST_BUS_SPINLOCK 25 +#define RST_BUS_DRC 26 +#define RST_BUS_SAT 27 +#define RST_BUS_LVDS 28 +#define RST_BUS_CODEC 29 +#define RST_BUS_I2S0 30 +#define RST_BUS_I2S1 31 +#define RST_BUS_I2C0 32 +#define RST_BUS_I2C1 33 +#define RST_BUS_I2C2 34 +#define RST_BUS_UART0 35 +#define RST_BUS_UART1 36 +#define RST_BUS_UART2 37 +#define RST_BUS_UART3 38 +#define RST_BUS_UART4 39 + +#endif /* _DT_BINDINGS_RST_SUN8I_A23_A33_H_ */ diff --git a/include/dt-bindings/reset/sun8i-a83t-ccu.h b/include/dt-bindings/reset/sun8i-a83t-ccu.h new file mode 100644 index 000000000..784f6e116 --- /dev/null +++ b/include/dt-bindings/reset/sun8i-a83t-ccu.h @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2017 Chen-Yu Tsai + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RESET_SUN8I_A83T_CCU_H_ +#define _DT_BINDINGS_RESET_SUN8I_A83T_CCU_H_ + +#define RST_USB_PHY0 0 +#define RST_USB_PHY1 1 +#define RST_USB_HSIC 2 + +#define RST_DRAM 3 +#define RST_MBUS 4 + +#define RST_BUS_MIPI_DSI 5 +#define RST_BUS_SS 6 +#define RST_BUS_DMA 7 +#define RST_BUS_MMC0 8 +#define RST_BUS_MMC1 9 +#define RST_BUS_MMC2 10 +#define RST_BUS_NAND 11 +#define RST_BUS_DRAM 12 +#define RST_BUS_EMAC 13 +#define RST_BUS_HSTIMER 14 +#define RST_BUS_SPI0 15 +#define RST_BUS_SPI1 16 +#define RST_BUS_OTG 17 +#define RST_BUS_EHCI0 18 +#define RST_BUS_EHCI1 19 +#define RST_BUS_OHCI0 20 + +#define RST_BUS_VE 21 +#define RST_BUS_TCON0 22 +#define RST_BUS_TCON1 23 +#define RST_BUS_CSI 24 +#define RST_BUS_HDMI0 25 +#define RST_BUS_HDMI1 26 +#define RST_BUS_DE 27 +#define RST_BUS_GPU 28 +#define RST_BUS_MSGBOX 29 +#define RST_BUS_SPINLOCK 30 + +#define RST_BUS_LVDS 31 + +#define RST_BUS_SPDIF 32 +#define RST_BUS_I2S0 33 +#define RST_BUS_I2S1 34 +#define RST_BUS_I2S2 35 +#define RST_BUS_TDM 36 + +#define RST_BUS_I2C0 37 +#define RST_BUS_I2C1 38 +#define RST_BUS_I2C2 39 +#define RST_BUS_UART0 40 +#define RST_BUS_UART1 41 +#define RST_BUS_UART2 42 +#define RST_BUS_UART3 43 +#define RST_BUS_UART4 44 + +#endif /* _DT_BINDINGS_RESET_SUN8I_A83T_CCU_H_ */ diff --git a/include/dt-bindings/reset/sun8i-de2.h b/include/dt-bindings/reset/sun8i-de2.h new file mode 100644 index 000000000..952601743 --- /dev/null +++ b/include/dt-bindings/reset/sun8i-de2.h @@ -0,0 +1,14 @@ +/* + * Copyright (C) 2016 Icenowy Zheng + * + * SPDX-License-Identifier: (GPL-2.0+ OR MIT) + */ + +#ifndef _DT_BINDINGS_RESET_SUN8I_DE2_H_ +#define _DT_BINDINGS_RESET_SUN8I_DE2_H_ + +#define RST_MIXER0 0 +#define RST_MIXER1 1 +#define RST_WB 2 + +#endif /* _DT_BINDINGS_RESET_SUN8I_DE2_H_ */ diff --git a/include/dt-bindings/reset/sun8i-h3-ccu.h b/include/dt-bindings/reset/sun8i-h3-ccu.h new file mode 100644 index 000000000..484c2a229 --- /dev/null +++ b/include/dt-bindings/reset/sun8i-h3-ccu.h @@ -0,0 +1,106 @@ +/* + * Copyright (C) 2016 Maxime Ripard + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RST_SUN8I_H3_H_ +#define _DT_BINDINGS_RST_SUN8I_H3_H_ + +#define RST_USB_PHY0 0 +#define RST_USB_PHY1 1 +#define RST_USB_PHY2 2 +#define RST_USB_PHY3 3 + +#define RST_MBUS 4 + +#define RST_BUS_CE 5 +#define RST_BUS_DMA 6 +#define RST_BUS_MMC0 7 +#define RST_BUS_MMC1 8 +#define RST_BUS_MMC2 9 +#define RST_BUS_NAND 10 +#define RST_BUS_DRAM 11 +#define RST_BUS_EMAC 12 +#define RST_BUS_TS 13 +#define RST_BUS_HSTIMER 14 +#define RST_BUS_SPI0 15 +#define RST_BUS_SPI1 16 +#define RST_BUS_OTG 17 +#define RST_BUS_EHCI0 18 +#define RST_BUS_EHCI1 19 +#define RST_BUS_EHCI2 20 +#define RST_BUS_EHCI3 21 +#define RST_BUS_OHCI0 22 +#define RST_BUS_OHCI1 23 +#define RST_BUS_OHCI2 24 +#define RST_BUS_OHCI3 25 +#define RST_BUS_VE 26 +#define RST_BUS_TCON0 27 +#define RST_BUS_TCON1 28 +#define RST_BUS_DEINTERLACE 29 +#define RST_BUS_CSI 30 +#define RST_BUS_TVE 31 +#define RST_BUS_HDMI0 32 +#define RST_BUS_HDMI1 33 +#define RST_BUS_DE 34 +#define RST_BUS_GPU 35 +#define RST_BUS_MSGBOX 36 +#define RST_BUS_SPINLOCK 37 +#define RST_BUS_DBG 38 +#define RST_BUS_EPHY 39 +#define RST_BUS_CODEC 40 +#define RST_BUS_SPDIF 41 +#define RST_BUS_THS 42 +#define RST_BUS_I2S0 43 +#define RST_BUS_I2S1 44 +#define RST_BUS_I2S2 45 +#define RST_BUS_I2C0 46 +#define RST_BUS_I2C1 47 +#define RST_BUS_I2C2 48 +#define RST_BUS_UART0 49 +#define RST_BUS_UART1 50 +#define RST_BUS_UART2 51 +#define RST_BUS_UART3 52 +#define RST_BUS_SCR0 53 + +/* New resets imported in H5 */ +#define RST_BUS_SCR1 54 + +#endif /* _DT_BINDINGS_RST_SUN8I_H3_H_ */ diff --git a/include/dt-bindings/reset/sun8i-r-ccu.h b/include/dt-bindings/reset/sun8i-r-ccu.h new file mode 100644 index 000000000..4ba64f3d6 --- /dev/null +++ b/include/dt-bindings/reset/sun8i-r-ccu.h @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2016 Icenowy Zheng + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RST_SUN8I_R_CCU_H_ +#define _DT_BINDINGS_RST_SUN8I_R_CCU_H_ + +#define RST_APB0_IR 0 +#define RST_APB0_TIMER 1 +#define RST_APB0_RSB 2 +#define RST_APB0_UART 3 +/* 4 is reserved for RST_APB0_W1 on A31 */ +#define RST_APB0_I2C 5 + +#endif /* _DT_BINDINGS_RST_SUN8I_R_CCU_H_ */ diff --git a/include/dt-bindings/reset/sun8i-r40-ccu.h b/include/dt-bindings/reset/sun8i-r40-ccu.h new file mode 100644 index 000000000..c5ebcf667 --- /dev/null +++ b/include/dt-bindings/reset/sun8i-r40-ccu.h @@ -0,0 +1,130 @@ +/* + * Copyright (C) 2017 Icenowy Zheng + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RST_SUN8I_R40_H_ +#define _DT_BINDINGS_RST_SUN8I_R40_H_ + +#define RST_USB_PHY0 0 +#define RST_USB_PHY1 1 +#define RST_USB_PHY2 2 + +#define RST_DRAM 3 +#define RST_MBUS 4 + +#define RST_BUS_MIPI_DSI 5 +#define RST_BUS_CE 6 +#define RST_BUS_DMA 7 +#define RST_BUS_MMC0 8 +#define RST_BUS_MMC1 9 +#define RST_BUS_MMC2 10 +#define RST_BUS_MMC3 11 +#define RST_BUS_NAND 12 +#define RST_BUS_DRAM 13 +#define RST_BUS_EMAC 14 +#define RST_BUS_TS 15 +#define RST_BUS_HSTIMER 16 +#define RST_BUS_SPI0 17 +#define RST_BUS_SPI1 18 +#define RST_BUS_SPI2 19 +#define RST_BUS_SPI3 20 +#define RST_BUS_SATA 21 +#define RST_BUS_OTG 22 +#define RST_BUS_EHCI0 23 +#define RST_BUS_EHCI1 24 +#define RST_BUS_EHCI2 25 +#define RST_BUS_OHCI0 26 +#define RST_BUS_OHCI1 27 +#define RST_BUS_OHCI2 28 +#define RST_BUS_VE 29 +#define RST_BUS_MP 30 +#define RST_BUS_DEINTERLACE 31 +#define RST_BUS_CSI0 32 +#define RST_BUS_CSI1 33 +#define RST_BUS_HDMI0 34 +#define RST_BUS_HDMI1 35 +#define RST_BUS_DE 36 +#define RST_BUS_TVE0 37 +#define RST_BUS_TVE1 38 +#define RST_BUS_TVE_TOP 39 +#define RST_BUS_GMAC 40 +#define RST_BUS_GPU 41 +#define RST_BUS_TVD0 42 +#define RST_BUS_TVD1 43 +#define RST_BUS_TVD2 44 +#define RST_BUS_TVD3 45 +#define RST_BUS_TVD_TOP 46 +#define RST_BUS_TCON_LCD0 47 +#define RST_BUS_TCON_LCD1 48 +#define RST_BUS_TCON_TV0 49 +#define RST_BUS_TCON_TV1 50 +#define RST_BUS_TCON_TOP 51 +#define RST_BUS_DBG 52 +#define RST_BUS_LVDS 53 +#define RST_BUS_CODEC 54 +#define RST_BUS_SPDIF 55 +#define RST_BUS_AC97 56 +#define RST_BUS_IR0 57 +#define RST_BUS_IR1 58 +#define RST_BUS_THS 59 +#define RST_BUS_KEYPAD 60 +#define RST_BUS_I2S0 61 +#define RST_BUS_I2S1 62 +#define RST_BUS_I2S2 63 +#define RST_BUS_I2C0 64 +#define RST_BUS_I2C1 65 +#define RST_BUS_I2C2 66 +#define RST_BUS_I2C3 67 +#define RST_BUS_CAN 68 +#define RST_BUS_SCR 69 +#define RST_BUS_PS20 70 +#define RST_BUS_PS21 71 +#define RST_BUS_I2C4 72 +#define RST_BUS_UART0 73 +#define RST_BUS_UART1 74 +#define RST_BUS_UART2 75 +#define RST_BUS_UART3 76 +#define RST_BUS_UART4 77 +#define RST_BUS_UART5 78 +#define RST_BUS_UART6 79 +#define RST_BUS_UART7 80 + +#endif /* _DT_BINDINGS_RST_SUN8I_R40_H_ */ diff --git a/include/dt-bindings/reset/sun8i-v3s-ccu.h b/include/dt-bindings/reset/sun8i-v3s-ccu.h new file mode 100644 index 000000000..b58ef21a2 --- /dev/null +++ b/include/dt-bindings/reset/sun8i-v3s-ccu.h @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2016 Icenowy Zheng + * + * Based on sun8i-v3s-ccu.h, which is + * Copyright (C) 2016 Maxime Ripard + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RST_SUN8I_V3S_H_ +#define _DT_BINDINGS_RST_SUN8I_V3S_H_ + +#define RST_USB_PHY0 0 + +#define RST_MBUS 1 + +#define RST_BUS_CE 5 +#define RST_BUS_DMA 6 +#define RST_BUS_MMC0 7 +#define RST_BUS_MMC1 8 +#define RST_BUS_MMC2 9 +#define RST_BUS_DRAM 11 +#define RST_BUS_EMAC 12 +#define RST_BUS_HSTIMER 14 +#define RST_BUS_SPI0 15 +#define RST_BUS_OTG 17 +#define RST_BUS_EHCI0 18 +#define RST_BUS_OHCI0 22 +#define RST_BUS_VE 26 +#define RST_BUS_TCON0 27 +#define RST_BUS_CSI 30 +#define RST_BUS_DE 34 +#define RST_BUS_DBG 38 +#define RST_BUS_EPHY 39 +#define RST_BUS_CODEC 40 +#define RST_BUS_I2C0 46 +#define RST_BUS_I2C1 47 +#define RST_BUS_UART0 49 +#define RST_BUS_UART1 50 +#define RST_BUS_UART2 51 + +#endif /* _DT_BINDINGS_RST_SUN8I_H3_H_ */ diff --git a/include/dt-bindings/reset/sun9i-a80-ccu.h b/include/dt-bindings/reset/sun9i-a80-ccu.h new file mode 100644 index 000000000..4b8df4b36 --- /dev/null +++ b/include/dt-bindings/reset/sun9i-a80-ccu.h @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RESET_SUN9I_A80_CCU_H_ +#define _DT_BINDINGS_RESET_SUN9I_A80_CCU_H_ + +#define RST_BUS_FD 0 +#define RST_BUS_VE 1 +#define RST_BUS_GPU_CTRL 2 +#define RST_BUS_SS 3 +#define RST_BUS_MMC 4 +#define RST_BUS_NAND0 5 +#define RST_BUS_NAND1 6 +#define RST_BUS_SDRAM 7 +#define RST_BUS_SATA 8 +#define RST_BUS_TS 9 +#define RST_BUS_SPI0 10 +#define RST_BUS_SPI1 11 +#define RST_BUS_SPI2 12 +#define RST_BUS_SPI3 13 + +#define RST_BUS_OTG 14 +#define RST_BUS_OTG_PHY 15 +#define RST_BUS_MIPI_HSI 16 +#define RST_BUS_GMAC 17 +#define RST_BUS_MSGBOX 18 +#define RST_BUS_SPINLOCK 19 +#define RST_BUS_HSTIMER 20 +#define RST_BUS_DMA 21 + +#define RST_BUS_LCD0 22 +#define RST_BUS_LCD1 23 +#define RST_BUS_EDP 24 +#define RST_BUS_LVDS 25 +#define RST_BUS_CSI 26 +#define RST_BUS_HDMI0 27 +#define RST_BUS_HDMI1 28 +#define RST_BUS_DE 29 +#define RST_BUS_MP 30 +#define RST_BUS_GPU 31 +#define RST_BUS_MIPI_DSI 32 + +#define RST_BUS_SPDIF 33 +#define RST_BUS_AC97 34 +#define RST_BUS_I2S0 35 +#define RST_BUS_I2S1 36 +#define RST_BUS_LRADC 37 +#define RST_BUS_GPADC 38 +#define RST_BUS_CIR_TX 39 + +#define RST_BUS_I2C0 40 +#define RST_BUS_I2C1 41 +#define RST_BUS_I2C2 42 +#define RST_BUS_I2C3 43 +#define RST_BUS_I2C4 44 +#define RST_BUS_UART0 45 +#define RST_BUS_UART1 46 +#define RST_BUS_UART2 47 +#define RST_BUS_UART3 48 +#define RST_BUS_UART4 49 +#define RST_BUS_UART5 50 + +#endif /* _DT_BINDINGS_RESET_SUN9I_A80_CCU_H_ */ diff --git a/include/dt-bindings/reset/sun9i-a80-de.h b/include/dt-bindings/reset/sun9i-a80-de.h new file mode 100644 index 000000000..205072770 --- /dev/null +++ b/include/dt-bindings/reset/sun9i-a80-de.h @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RESET_SUN9I_A80_DE_H_ +#define _DT_BINDINGS_RESET_SUN9I_A80_DE_H_ + +#define RST_FE0 0 +#define RST_FE1 1 +#define RST_FE2 2 +#define RST_DEU0 3 +#define RST_DEU1 4 +#define RST_BE0 5 +#define RST_BE1 6 +#define RST_BE2 7 +#define RST_DRC0 8 +#define RST_DRC1 9 +#define RST_MERGE 10 + +#endif /* _DT_BINDINGS_RESET_SUN9I_A80_DE_H_ */ diff --git a/include/dt-bindings/reset/sun9i-a80-usb.h b/include/dt-bindings/reset/sun9i-a80-usb.h new file mode 100644 index 000000000..ee492864c --- /dev/null +++ b/include/dt-bindings/reset/sun9i-a80-usb.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RESET_SUN9I_A80_USB_H_ +#define _DT_BINDINGS_RESET_SUN9I_A80_USB_H_ + +#define RST_USB0_HCI 0 +#define RST_USB1_HCI 1 +#define RST_USB2_HCI 2 + +#define RST_USB0_PHY 3 +#define RST_USB1_HSIC 4 +#define RST_USB1_PHY 5 +#define RST_USB2_HSIC 6 +#define RST_USB2_PHY 7 + +#endif /* _DT_BINDINGS_RESET_SUN9I_A80_USB_H_ */ diff --git a/include/dt-bindings/reset/tegra124-car.h b/include/dt-bindings/reset/tegra124-car.h new file mode 100644 index 000000000..97d2f3db8 --- /dev/null +++ b/include/dt-bindings/reset/tegra124-car.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides Tegra124-specific constants for binding + * nvidia,tegra124-car. + */ + +#ifndef _DT_BINDINGS_RESET_TEGRA124_CAR_H +#define _DT_BINDINGS_RESET_TEGRA124_CAR_H + +#define TEGRA124_RESET(x) (6 * 32 + (x)) +#define TEGRA124_RST_DFLL_DVCO TEGRA124_RESET(0) + +#endif /* _DT_BINDINGS_RESET_TEGRA124_CAR_H */ diff --git a/include/dt-bindings/reset/tegra186-reset.h b/include/dt-bindings/reset/tegra186-reset.h new file mode 100644 index 000000000..8a184e357 --- /dev/null +++ b/include/dt-bindings/reset/tegra186-reset.h @@ -0,0 +1,217 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef _ABI_MACH_T186_RESET_T186_H_ +#define _ABI_MACH_T186_RESET_T186_H_ + + +#define TEGRA186_RESET_ACTMON 0 +#define TEGRA186_RESET_AFI 1 +#define TEGRA186_RESET_CEC 2 +#define TEGRA186_RESET_CSITE 3 +#define TEGRA186_RESET_DP2 4 +#define TEGRA186_RESET_DPAUX 5 +#define TEGRA186_RESET_DSI 6 +#define TEGRA186_RESET_DSIB 7 +#define TEGRA186_RESET_DTV 8 +#define TEGRA186_RESET_DVFS 9 +#define TEGRA186_RESET_ENTROPY 10 +#define TEGRA186_RESET_EXTPERIPH1 11 +#define TEGRA186_RESET_EXTPERIPH2 12 +#define TEGRA186_RESET_EXTPERIPH3 13 +#define TEGRA186_RESET_GPU 14 +#define TEGRA186_RESET_HDA 15 +#define TEGRA186_RESET_HDA2CODEC_2X 16 +#define TEGRA186_RESET_HDA2HDMICODEC 17 +#define TEGRA186_RESET_HOST1X 18 +#define TEGRA186_RESET_I2C1 19 +#define TEGRA186_RESET_I2C2 20 +#define TEGRA186_RESET_I2C3 21 +#define TEGRA186_RESET_I2C4 22 +#define TEGRA186_RESET_I2C5 23 +#define TEGRA186_RESET_I2C6 24 +#define TEGRA186_RESET_ISP 25 +#define TEGRA186_RESET_KFUSE 26 +#define TEGRA186_RESET_LA 27 +#define TEGRA186_RESET_MIPI_CAL 28 +#define TEGRA186_RESET_PCIE 29 +#define TEGRA186_RESET_PCIEXCLK 30 +#define TEGRA186_RESET_SATA 31 +#define TEGRA186_RESET_SATACOLD 32 +#define TEGRA186_RESET_SDMMC1 33 +#define TEGRA186_RESET_SDMMC2 34 +#define TEGRA186_RESET_SDMMC3 35 +#define TEGRA186_RESET_SDMMC4 36 +#define TEGRA186_RESET_SE 37 +#define TEGRA186_RESET_SOC_THERM 38 +#define TEGRA186_RESET_SOR0 39 +#define TEGRA186_RESET_SPI1 40 +#define TEGRA186_RESET_SPI2 41 +#define TEGRA186_RESET_SPI3 42 +#define TEGRA186_RESET_SPI4 43 +#define TEGRA186_RESET_TMR 44 +#define TEGRA186_RESET_TRIG_SYS 45 +#define TEGRA186_RESET_TSEC 46 +#define TEGRA186_RESET_UARTA 47 +#define TEGRA186_RESET_UARTB 48 +#define TEGRA186_RESET_UARTC 49 +#define TEGRA186_RESET_UARTD 50 +#define TEGRA186_RESET_VI 51 +#define TEGRA186_RESET_VIC 52 +#define TEGRA186_RESET_XUSB_DEV 53 +#define TEGRA186_RESET_XUSB_HOST 54 +#define TEGRA186_RESET_XUSB_PADCTL 55 +#define TEGRA186_RESET_XUSB_SS 56 +#define TEGRA186_RESET_AON_APB 57 +#define TEGRA186_RESET_AXI_CBB 58 +#define TEGRA186_RESET_BPMP_APB 59 +#define TEGRA186_RESET_CAN1 60 +#define TEGRA186_RESET_CAN2 61 +#define TEGRA186_RESET_DMIC5 62 +#define TEGRA186_RESET_DSIC 63 +#define TEGRA186_RESET_DSID 64 +#define TEGRA186_RESET_EMC_EMC 65 +#define TEGRA186_RESET_EMC_MEM 66 +#define TEGRA186_RESET_EMCSB_EMC 67 +#define TEGRA186_RESET_EMCSB_MEM 68 +#define TEGRA186_RESET_EQOS 69 +#define TEGRA186_RESET_GPCDMA 70 +#define TEGRA186_RESET_GPIO_CTL0 71 +#define TEGRA186_RESET_GPIO_CTL1 72 +#define TEGRA186_RESET_GPIO_CTL2 73 +#define TEGRA186_RESET_GPIO_CTL3 74 +#define TEGRA186_RESET_GPIO_CTL4 75 +#define TEGRA186_RESET_GPIO_CTL5 76 +#define TEGRA186_RESET_I2C10 77 +#define TEGRA186_RESET_I2C12 78 +#define TEGRA186_RESET_I2C13 79 +#define TEGRA186_RESET_I2C14 80 +#define TEGRA186_RESET_I2C7 81 +#define TEGRA186_RESET_I2C8 82 +#define TEGRA186_RESET_I2C9 83 +#define TEGRA186_RESET_JTAG2AXI 84 +#define TEGRA186_RESET_MPHY_IOBIST 85 +#define TEGRA186_RESET_MPHY_L0_RX 86 +#define TEGRA186_RESET_MPHY_L0_TX 87 +#define TEGRA186_RESET_NVCSI 88 +#define TEGRA186_RESET_NVDISPLAY0_HEAD0 89 +#define TEGRA186_RESET_NVDISPLAY0_HEAD1 90 +#define TEGRA186_RESET_NVDISPLAY0_HEAD2 91 +#define TEGRA186_RESET_NVDISPLAY0_MISC 92 +#define TEGRA186_RESET_NVDISPLAY0_WGRP0 93 +#define TEGRA186_RESET_NVDISPLAY0_WGRP1 94 +#define TEGRA186_RESET_NVDISPLAY0_WGRP2 95 +#define TEGRA186_RESET_NVDISPLAY0_WGRP3 96 +#define TEGRA186_RESET_NVDISPLAY0_WGRP4 97 +#define TEGRA186_RESET_NVDISPLAY0_WGRP5 98 +#define TEGRA186_RESET_PWM1 99 +#define TEGRA186_RESET_PWM2 100 +#define TEGRA186_RESET_PWM3 101 +#define TEGRA186_RESET_PWM4 102 +#define TEGRA186_RESET_PWM5 103 +#define TEGRA186_RESET_PWM6 104 +#define TEGRA186_RESET_PWM7 105 +#define TEGRA186_RESET_PWM8 106 +#define TEGRA186_RESET_SCE_APB 107 +#define TEGRA186_RESET_SOR1 108 +#define TEGRA186_RESET_TACH 109 +#define TEGRA186_RESET_TSC 110 +#define TEGRA186_RESET_UARTF 111 +#define TEGRA186_RESET_UARTG 112 +#define TEGRA186_RESET_UFSHC 113 +#define TEGRA186_RESET_UFSHC_AXI_M 114 +#define TEGRA186_RESET_UPHY 115 +#define TEGRA186_RESET_ADSP 116 +#define TEGRA186_RESET_ADSPDBG 117 +#define TEGRA186_RESET_ADSPINTF 118 +#define TEGRA186_RESET_ADSPNEON 119 +#define TEGRA186_RESET_ADSPPERIPH 120 +#define TEGRA186_RESET_ADSPSCU 121 +#define TEGRA186_RESET_ADSPWDT 122 +#define TEGRA186_RESET_APE 123 +#define TEGRA186_RESET_DPAUX1 124 +#define TEGRA186_RESET_NVDEC 125 +#define TEGRA186_RESET_NVENC 126 +#define TEGRA186_RESET_NVJPG 127 +#define TEGRA186_RESET_PEX_USB_UPHY 128 +#define TEGRA186_RESET_QSPI 129 +#define TEGRA186_RESET_TSECB 130 +#define TEGRA186_RESET_VI_I2C 131 +#define TEGRA186_RESET_UARTE 132 +#define TEGRA186_RESET_TOP_GTE 133 +#define TEGRA186_RESET_SHSP 134 +#define TEGRA186_RESET_PEX_USB_UPHY_L5 135 +#define TEGRA186_RESET_PEX_USB_UPHY_L4 136 +#define TEGRA186_RESET_PEX_USB_UPHY_L3 137 +#define TEGRA186_RESET_PEX_USB_UPHY_L2 138 +#define TEGRA186_RESET_PEX_USB_UPHY_L1 139 +#define TEGRA186_RESET_PEX_USB_UPHY_L0 140 +#define TEGRA186_RESET_PEX_USB_UPHY_PLL1 141 +#define TEGRA186_RESET_PEX_USB_UPHY_PLL0 142 +#define TEGRA186_RESET_TSCTNVI 143 +#define TEGRA186_RESET_EXTPERIPH4 144 +#define TEGRA186_RESET_DSIPADCTL 145 +#define TEGRA186_RESET_AUD_MCLK 146 +#define TEGRA186_RESET_MPHY_CLK_CTL 147 +#define TEGRA186_RESET_MPHY_L1_RX 148 +#define TEGRA186_RESET_MPHY_L1_TX 149 +#define TEGRA186_RESET_UFSHC_LP 150 +#define TEGRA186_RESET_BPMP_NIC 151 +#define TEGRA186_RESET_BPMP_NSYSPORESET 152 +#define TEGRA186_RESET_BPMP_NRESET 153 +#define TEGRA186_RESET_BPMP_DBGRESETN 154 +#define TEGRA186_RESET_BPMP_PRESETDBGN 155 +#define TEGRA186_RESET_BPMP_PM 156 +#define TEGRA186_RESET_BPMP_CVC 157 +#define TEGRA186_RESET_BPMP_DMA 158 +#define TEGRA186_RESET_BPMP_HSP 159 +#define TEGRA186_RESET_TSCTNBPMP 160 +#define TEGRA186_RESET_BPMP_TKE 161 +#define TEGRA186_RESET_BPMP_GTE 162 +#define TEGRA186_RESET_BPMP_PM_ACTMON 163 +#define TEGRA186_RESET_AON_NIC 164 +#define TEGRA186_RESET_AON_NSYSPORESET 165 +#define TEGRA186_RESET_AON_NRESET 166 +#define TEGRA186_RESET_AON_DBGRESETN 167 +#define TEGRA186_RESET_AON_PRESETDBGN 168 +#define TEGRA186_RESET_AON_ACTMON 169 +#define TEGRA186_RESET_AOPM 170 +#define TEGRA186_RESET_AOVC 171 +#define TEGRA186_RESET_AON_DMA 172 +#define TEGRA186_RESET_AON_GPIO 173 +#define TEGRA186_RESET_AON_HSP 174 +#define TEGRA186_RESET_TSCTNAON 175 +#define TEGRA186_RESET_AON_TKE 176 +#define TEGRA186_RESET_AON_GTE 177 +#define TEGRA186_RESET_SCE_NIC 178 +#define TEGRA186_RESET_SCE_NSYSPORESET 179 +#define TEGRA186_RESET_SCE_NRESET 180 +#define TEGRA186_RESET_SCE_DBGRESETN 181 +#define TEGRA186_RESET_SCE_PRESETDBGN 182 +#define TEGRA186_RESET_SCE_ACTMON 183 +#define TEGRA186_RESET_SCE_PM 184 +#define TEGRA186_RESET_SCE_DMA 185 +#define TEGRA186_RESET_SCE_HSP 186 +#define TEGRA186_RESET_TSCTNSCE 187 +#define TEGRA186_RESET_SCE_TKE 188 +#define TEGRA186_RESET_SCE_GTE 189 +#define TEGRA186_RESET_SCE_CFG 190 +#define TEGRA186_RESET_ADSP_ALL 191 +/** @brief controls the power up/down sequence of UFSHC PSW partition. Controls LP_PWR_READY, LP_ISOL_EN, and LP_RESET_N signals */ +#define TEGRA186_RESET_UFSHC_LP_SEQ 192 +#define TEGRA186_RESET_SIZE 193 + +#endif diff --git a/include/dt-bindings/reset/tegra194-reset.h b/include/dt-bindings/reset/tegra194-reset.h new file mode 100644 index 000000000..473afaa25 --- /dev/null +++ b/include/dt-bindings/reset/tegra194-reset.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __ABI_MACH_T194_RESET_H +#define __ABI_MACH_T194_RESET_H + +#define TEGRA194_RESET_ACTMON 1 +#define TEGRA194_RESET_ADSP_ALL 2 +#define TEGRA194_RESET_AFI 3 +#define TEGRA194_RESET_CAN1 4 +#define TEGRA194_RESET_CAN2 5 +#define TEGRA194_RESET_DLA0 6 +#define TEGRA194_RESET_DLA1 7 +#define TEGRA194_RESET_DPAUX 8 +#define TEGRA194_RESET_DPAUX1 9 +#define TEGRA194_RESET_DPAUX2 10 +#define TEGRA194_RESET_DPAUX3 11 +#define TEGRA194_RESET_EQOS 17 +#define TEGRA194_RESET_GPCDMA 18 +#define TEGRA194_RESET_GPU 19 +#define TEGRA194_RESET_HDA 20 +#define TEGRA194_RESET_HDA2CODEC_2X 21 +#define TEGRA194_RESET_HDA2HDMICODEC 22 +#define TEGRA194_RESET_HOST1X 23 +#define TEGRA194_RESET_I2C1 24 +#define TEGRA194_RESET_I2C10 25 +#define TEGRA194_RESET_RSVD_26 26 +#define TEGRA194_RESET_RSVD_27 27 +#define TEGRA194_RESET_RSVD_28 28 +#define TEGRA194_RESET_I2C2 29 +#define TEGRA194_RESET_I2C3 30 +#define TEGRA194_RESET_I2C4 31 +#define TEGRA194_RESET_I2C6 32 +#define TEGRA194_RESET_I2C7 33 +#define TEGRA194_RESET_I2C8 34 +#define TEGRA194_RESET_I2C9 35 +#define TEGRA194_RESET_ISP 36 +#define TEGRA194_RESET_MIPI_CAL 37 +#define TEGRA194_RESET_MPHY_CLK_CTL 38 +#define TEGRA194_RESET_MPHY_L0_RX 39 +#define TEGRA194_RESET_MPHY_L0_TX 40 +#define TEGRA194_RESET_MPHY_L1_RX 41 +#define TEGRA194_RESET_MPHY_L1_TX 42 +#define TEGRA194_RESET_NVCSI 43 +#define TEGRA194_RESET_NVDEC 44 +#define TEGRA194_RESET_NVDISPLAY0_HEAD0 45 +#define TEGRA194_RESET_NVDISPLAY0_HEAD1 46 +#define TEGRA194_RESET_NVDISPLAY0_HEAD2 47 +#define TEGRA194_RESET_NVDISPLAY0_HEAD3 48 +#define TEGRA194_RESET_NVDISPLAY0_MISC 49 +#define TEGRA194_RESET_NVDISPLAY0_WGRP0 50 +#define TEGRA194_RESET_NVDISPLAY0_WGRP1 51 +#define TEGRA194_RESET_NVDISPLAY0_WGRP2 52 +#define TEGRA194_RESET_NVDISPLAY0_WGRP3 53 +#define TEGRA194_RESET_NVDISPLAY0_WGRP4 54 +#define TEGRA194_RESET_NVDISPLAY0_WGRP5 55 +#define TEGRA194_RESET_RSVD_56 56 +#define TEGRA194_RESET_RSVD_57 57 +#define TEGRA194_RESET_RSVD_58 58 +#define TEGRA194_RESET_NVENC 59 +#define TEGRA194_RESET_NVENC1 60 +#define TEGRA194_RESET_NVJPG 61 +#define TEGRA194_RESET_PCIE 62 +#define TEGRA194_RESET_PCIEXCLK 63 +#define TEGRA194_RESET_RSVD_64 64 +#define TEGRA194_RESET_RSVD_65 65 +#define TEGRA194_RESET_PVA0_ALL 66 +#define TEGRA194_RESET_PVA1_ALL 67 +#define TEGRA194_RESET_PWM1 68 +#define TEGRA194_RESET_PWM2 69 +#define TEGRA194_RESET_PWM3 70 +#define TEGRA194_RESET_PWM4 71 +#define TEGRA194_RESET_PWM5 72 +#define TEGRA194_RESET_PWM6 73 +#define TEGRA194_RESET_PWM7 74 +#define TEGRA194_RESET_PWM8 75 +#define TEGRA194_RESET_QSPI0 76 +#define TEGRA194_RESET_QSPI1 77 +#define TEGRA194_RESET_SATA 78 +#define TEGRA194_RESET_SATACOLD 79 +#define TEGRA194_RESET_SCE_ALL 80 +#define TEGRA194_RESET_RCE_ALL 81 +#define TEGRA194_RESET_SDMMC1 82 +#define TEGRA194_RESET_RSVD_83 83 +#define TEGRA194_RESET_SDMMC3 84 +#define TEGRA194_RESET_SDMMC4 85 +#define TEGRA194_RESET_SE 86 +#define TEGRA194_RESET_SOR0 87 +#define TEGRA194_RESET_SOR1 88 +#define TEGRA194_RESET_SOR2 89 +#define TEGRA194_RESET_SOR3 90 +#define TEGRA194_RESET_SPI1 91 +#define TEGRA194_RESET_SPI2 92 +#define TEGRA194_RESET_SPI3 93 +#define TEGRA194_RESET_SPI4 94 +#define TEGRA194_RESET_TACH 95 +#define TEGRA194_RESET_RSVD_96 96 +#define TEGRA194_RESET_TSCTNVI 97 +#define TEGRA194_RESET_TSEC 98 +#define TEGRA194_RESET_TSECB 99 +#define TEGRA194_RESET_UARTA 100 +#define TEGRA194_RESET_UARTB 101 +#define TEGRA194_RESET_UARTC 102 +#define TEGRA194_RESET_UARTD 103 +#define TEGRA194_RESET_UARTE 104 +#define TEGRA194_RESET_UARTF 105 +#define TEGRA194_RESET_UARTG 106 +#define TEGRA194_RESET_UARTH 107 +#define TEGRA194_RESET_UFSHC 108 +#define TEGRA194_RESET_UFSHC_AXI_M 109 +#define TEGRA194_RESET_UFSHC_LP_SEQ 110 +#define TEGRA194_RESET_RSVD_111 111 +#define TEGRA194_RESET_VI 112 +#define TEGRA194_RESET_VIC 113 +#define TEGRA194_RESET_XUSB_PADCTL 114 +#define TEGRA194_RESET_NVDEC1 115 +#define TEGRA194_RESET_PEX0_CORE_0 116 +#define TEGRA194_RESET_PEX0_CORE_1 117 +#define TEGRA194_RESET_PEX0_CORE_2 118 +#define TEGRA194_RESET_PEX0_CORE_3 119 +#define TEGRA194_RESET_PEX0_CORE_4 120 +#define TEGRA194_RESET_PEX0_CORE_0_APB 121 +#define TEGRA194_RESET_PEX0_CORE_1_APB 122 +#define TEGRA194_RESET_PEX0_CORE_2_APB 123 +#define TEGRA194_RESET_PEX0_CORE_3_APB 124 +#define TEGRA194_RESET_PEX0_CORE_4_APB 125 +#define TEGRA194_RESET_PEX0_COMMON_APB 126 +#define TEGRA194_RESET_PEX1_CORE_5 129 +#define TEGRA194_RESET_PEX1_CORE_5_APB 130 +#define TEGRA194_RESET_CVNAS 131 +#define TEGRA194_RESET_CVNAS_FCM 132 +#define TEGRA194_RESET_DMIC5 144 +#define TEGRA194_RESET_APE 145 +#define TEGRA194_RESET_PEX_USB_UPHY 146 +#define TEGRA194_RESET_PEX_USB_UPHY_L0 147 +#define TEGRA194_RESET_PEX_USB_UPHY_L1 148 +#define TEGRA194_RESET_PEX_USB_UPHY_L2 149 +#define TEGRA194_RESET_PEX_USB_UPHY_L3 150 +#define TEGRA194_RESET_PEX_USB_UPHY_L4 151 +#define TEGRA194_RESET_PEX_USB_UPHY_L5 152 +#define TEGRA194_RESET_PEX_USB_UPHY_L6 153 +#define TEGRA194_RESET_PEX_USB_UPHY_L7 154 +#define TEGRA194_RESET_PEX_USB_UPHY_L8 155 +#define TEGRA194_RESET_PEX_USB_UPHY_L9 156 +#define TEGRA194_RESET_PEX_USB_UPHY_L10 157 +#define TEGRA194_RESET_PEX_USB_UPHY_L11 158 +#define TEGRA194_RESET_PEX_USB_UPHY_PLL0 159 +#define TEGRA194_RESET_PEX_USB_UPHY_PLL1 160 +#define TEGRA194_RESET_PEX_USB_UPHY_PLL2 161 +#define TEGRA194_RESET_PEX_USB_UPHY_PLL3 162 + +#endif diff --git a/include/dt-bindings/reset/tegra210-car.h b/include/dt-bindings/reset/tegra210-car.h new file mode 100644 index 000000000..9dc84ec76 --- /dev/null +++ b/include/dt-bindings/reset/tegra210-car.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides Tegra210-specific constants for binding + * nvidia,tegra210-car. + */ + +#ifndef _DT_BINDINGS_RESET_TEGRA210_CAR_H +#define _DT_BINDINGS_RESET_TEGRA210_CAR_H + +#define TEGRA210_RESET(x) (7 * 32 + (x)) +#define TEGRA210_RST_DFLL_DVCO TEGRA210_RESET(0) +#define TEGRA210_RST_ADSP TEGRA210_RESET(1) + +#endif /* _DT_BINDINGS_RESET_TEGRA210_CAR_H */ diff --git a/include/dt-bindings/reset/ti-syscon.h b/include/dt-bindings/reset/ti-syscon.h new file mode 100644 index 000000000..884fd91df --- /dev/null +++ b/include/dt-bindings/reset/ti-syscon.h @@ -0,0 +1,38 @@ +/* + * TI Syscon Reset definitions + * + * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __DT_BINDINGS_RESET_TI_SYSCON_H__ +#define __DT_BINDINGS_RESET_TI_SYSCON_H__ + +/* + * The reset does not support the feature and corresponding + * values are not valid + */ +#define ASSERT_NONE (1 << 0) +#define DEASSERT_NONE (1 << 1) +#define STATUS_NONE (1 << 2) + +/* When set this function is activated by setting(vs clearing) this bit */ +#define ASSERT_SET (1 << 3) +#define DEASSERT_SET (1 << 4) +#define STATUS_SET (1 << 5) + +/* The following are the inverse of the above and are added for consistency */ +#define ASSERT_CLEAR (0 << 3) +#define DEASSERT_CLEAR (0 << 4) +#define STATUS_CLEAR (0 << 5) + +#endif diff --git a/include/dt-bindings/soc/qcom,apr.h b/include/dt-bindings/soc/qcom,apr.h new file mode 100644 index 000000000..006362400 --- /dev/null +++ b/include/dt-bindings/soc/qcom,apr.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_QCOM_APR_H +#define __DT_BINDINGS_QCOM_APR_H + +/* Domain IDs */ +#define APR_DOMAIN_SIM 0x1 +#define APR_DOMAIN_PC 0x2 +#define APR_DOMAIN_MODEM 0x3 +#define APR_DOMAIN_ADSP 0x4 +#define APR_DOMAIN_APPS 0x5 +#define APR_DOMAIN_MAX 0x6 + +/* ADSP service IDs */ +#define APR_SVC_ADSP_CORE 0x3 +#define APR_SVC_AFE 0x4 +#define APR_SVC_VSM 0x5 +#define APR_SVC_VPM 0x6 +#define APR_SVC_ASM 0x7 +#define APR_SVC_ADM 0x8 +#define APR_SVC_ADSP_MVM 0x09 +#define APR_SVC_ADSP_CVS 0x0A +#define APR_SVC_ADSP_CVP 0x0B +#define APR_SVC_USM 0x0C +#define APR_SVC_LSM 0x0D +#define APR_SVC_VIDC 0x16 +#define APR_SVC_MAX 0x17 + +#endif /* __DT_BINDINGS_QCOM_APR_H */ diff --git a/include/dt-bindings/soc/qcom,gsbi.h b/include/dt-bindings/soc/qcom,gsbi.h new file mode 100644 index 000000000..7ac429233 --- /dev/null +++ b/include/dt-bindings/soc/qcom,gsbi.h @@ -0,0 +1,26 @@ +/* Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __DT_BINDINGS_QCOM_GSBI_H +#define __DT_BINDINGS_QCOM_GSBI_H + +#define GSBI_PROT_IDLE 0 +#define GSBI_PROT_I2C_UIM 1 +#define GSBI_PROT_I2C 2 +#define GSBI_PROT_SPI 3 +#define GSBI_PROT_UART_W_FC 4 +#define GSBI_PROT_UIM 5 +#define GSBI_PROT_I2C_UART 6 + +#define GSBI_CRCI_QUP 0 +#define GSBI_CRCI_UART 1 + +#endif diff --git a/include/dt-bindings/soc/qcom,rpmh-rsc.h b/include/dt-bindings/soc/qcom,rpmh-rsc.h new file mode 100644 index 000000000..868f998ea --- /dev/null +++ b/include/dt-bindings/soc/qcom,rpmh-rsc.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef __DT_QCOM_RPMH_RSC_H__ +#define __DT_QCOM_RPMH_RSC_H__ + +#define SLEEP_TCS 0 +#define WAKE_TCS 1 +#define ACTIVE_TCS 2 +#define CONTROL_TCS 3 + +#endif /* __DT_QCOM_RPMH_RSC_H__ */ diff --git a/include/dt-bindings/soc/rockchip,boot-mode.h b/include/dt-bindings/soc/rockchip,boot-mode.h new file mode 100644 index 000000000..4b0914c09 --- /dev/null +++ b/include/dt-bindings/soc/rockchip,boot-mode.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ROCKCHIP_BOOT_MODE_H +#define __ROCKCHIP_BOOT_MODE_H + +/*high 24 bits is tag, low 8 bits is type*/ +#define REBOOT_FLAG 0x5242C300 +/* normal boot */ +#define BOOT_NORMAL (REBOOT_FLAG + 0) +/* enter bootloader rockusb mode */ +#define BOOT_BL_DOWNLOAD (REBOOT_FLAG + 1) +/* enter recovery */ +#define BOOT_RECOVERY (REBOOT_FLAG + 3) + /* enter fastboot mode */ +#define BOOT_FASTBOOT (REBOOT_FLAG + 9) + +#endif diff --git a/include/dt-bindings/soc/zte,pm_domains.h b/include/dt-bindings/soc/zte,pm_domains.h new file mode 100644 index 000000000..a0b4019c8 --- /dev/null +++ b/include/dt-bindings/soc/zte,pm_domains.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2017 Linaro Ltd. + * + * Author: Baoyou Xie + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef _DT_BINDINGS_SOC_ZTE_PM_DOMAINS_H +#define _DT_BINDINGS_SOC_ZTE_PM_DOMAINS_H + +#define DM_ZX296718_SAPPU 0 +#define DM_ZX296718_VDE 1 /* g1v6 */ +#define DM_ZX296718_VCE 2 /* h1v6 */ +#define DM_ZX296718_HDE 3 /* g2v2 */ +#define DM_ZX296718_VIU 4 +#define DM_ZX296718_USB20 5 +#define DM_ZX296718_USB21 6 +#define DM_ZX296718_USB30 7 +#define DM_ZX296718_HSIC 8 +#define DM_ZX296718_GMAC 9 +#define DM_ZX296718_TS 10 +#define DM_ZX296718_VOU 11 + +#endif /* _DT_BINDINGS_SOC_ZTE_PM_DOMAINS_H */ diff --git a/include/dt-bindings/sound/apq8016-lpass.h b/include/dt-bindings/sound/apq8016-lpass.h new file mode 100644 index 000000000..3c3e16c0a --- /dev/null +++ b/include/dt-bindings/sound/apq8016-lpass.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_APQ8016_LPASS_H +#define __DT_APQ8016_LPASS_H + +#define MI2S_PRIMARY 0 +#define MI2S_SECONDARY 1 +#define MI2S_TERTIARY 2 +#define MI2S_QUATERNARY 3 + +#endif /* __DT_APQ8016_LPASS_H */ diff --git a/include/dt-bindings/sound/audio-jack-events.h b/include/dt-bindings/sound/audio-jack-events.h new file mode 100644 index 000000000..1b29b2951 --- /dev/null +++ b/include/dt-bindings/sound/audio-jack-events.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __AUDIO_JACK_EVENTS_H +#define __AUDIO_JACK_EVENTS_H + +#define JACK_HEADPHONE 1 +#define JACK_MICROPHONE 2 +#define JACK_LINEOUT 3 +#define JACK_LINEIN 4 + +#endif /* __AUDIO_JACK_EVENTS_H */ diff --git a/include/dt-bindings/sound/cs35l32.h b/include/dt-bindings/sound/cs35l32.h new file mode 100644 index 000000000..7549d5019 --- /dev/null +++ b/include/dt-bindings/sound/cs35l32.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_CS35L32_H +#define __DT_CS35L32_H + +#define CS35L32_BOOST_MGR_AUTO 0 +#define CS35L32_BOOST_MGR_AUTO_AUDIO 1 +#define CS35L32_BOOST_MGR_BYPASS 2 +#define CS35L32_BOOST_MGR_FIXED 3 + +#define CS35L32_DATA_CFG_LR_VP 0 +#define CS35L32_DATA_CFG_LR_STAT 1 +#define CS35L32_DATA_CFG_LR 2 +#define CS35L32_DATA_CFG_LR_VPSTAT 3 + +#define CS35L32_BATT_THRESH_3_1V 0 +#define CS35L32_BATT_THRESH_3_2V 1 +#define CS35L32_BATT_THRESH_3_3V 2 +#define CS35L32_BATT_THRESH_3_4V 3 + +#define CS35L32_BATT_RECOV_3_1V 0 +#define CS35L32_BATT_RECOV_3_2V 1 +#define CS35L32_BATT_RECOV_3_3V 2 +#define CS35L32_BATT_RECOV_3_4V 3 +#define CS35L32_BATT_RECOV_3_5V 4 +#define CS35L32_BATT_RECOV_3_6V 5 + +#endif /* __DT_CS35L32_H */ diff --git a/include/dt-bindings/sound/cs42l42.h b/include/dt-bindings/sound/cs42l42.h new file mode 100644 index 000000000..db69d84ed --- /dev/null +++ b/include/dt-bindings/sound/cs42l42.h @@ -0,0 +1,73 @@ +/* + * cs42l42.h -- CS42L42 ALSA SoC audio driver DT bindings header + * + * Copyright 2016 Cirrus Logic, Inc. + * + * Author: James Schulman + * Author: Brian Austin + * Author: Michael White + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __DT_CS42L42_H +#define __DT_CS42L42_H + +/* HPOUT Load Capacity */ +#define CS42L42_HPOUT_LOAD_1NF 0 +#define CS42L42_HPOUT_LOAD_10NF 1 + +/* HPOUT Clamp to GND Override */ +#define CS42L42_HPOUT_CLAMP_EN 0 +#define CS42L42_HPOUT_CLAMP_DIS 1 + +/* Tip Sense Inversion */ +#define CS42L42_TS_INV_DIS 0 +#define CS42L42_TS_INV_EN 1 + +/* Tip Sense Debounce */ +#define CS42L42_TS_DBNCE_0 0 +#define CS42L42_TS_DBNCE_125 1 +#define CS42L42_TS_DBNCE_250 2 +#define CS42L42_TS_DBNCE_500 3 +#define CS42L42_TS_DBNCE_750 4 +#define CS42L42_TS_DBNCE_1000 5 +#define CS42L42_TS_DBNCE_1250 6 +#define CS42L42_TS_DBNCE_1500 7 + +/* Button Press Software Debounce Times */ +#define CS42L42_BTN_DET_INIT_DBNCE_MIN 0 +#define CS42L42_BTN_DET_INIT_DBNCE_DEFAULT 100 +#define CS42L42_BTN_DET_INIT_DBNCE_MAX 200 + +#define CS42L42_BTN_DET_EVENT_DBNCE_MIN 0 +#define CS42L42_BTN_DET_EVENT_DBNCE_DEFAULT 10 +#define CS42L42_BTN_DET_EVENT_DBNCE_MAX 20 + +/* Button Detect Level Sensitivities */ +#define CS42L42_NUM_BIASES 4 + +#define CS42L42_HS_DET_LEVEL_15 0x0F +#define CS42L42_HS_DET_LEVEL_8 0x08 +#define CS42L42_HS_DET_LEVEL_4 0x04 +#define CS42L42_HS_DET_LEVEL_1 0x01 + +#define CS42L42_HS_DET_LEVEL_MIN 0 +#define CS42L42_HS_DET_LEVEL_MAX 0x3F + +/* HS Bias Ramp Rate */ + +#define CS42L42_HSBIAS_RAMP_FAST_RISE_SLOW_FALL 0 +#define CS42L42_HSBIAS_RAMP_FAST 1 +#define CS42L42_HSBIAS_RAMP_SLOW 2 +#define CS42L42_HSBIAS_RAMP_SLOWEST 3 + +#define CS42L42_HSBIAS_RAMP_TIME0 10 +#define CS42L42_HSBIAS_RAMP_TIME1 40 +#define CS42L42_HSBIAS_RAMP_TIME2 90 +#define CS42L42_HSBIAS_RAMP_TIME3 170 + +#endif /* __DT_CS42L42_H */ diff --git a/include/dt-bindings/sound/fsl-imx-audmux.h b/include/dt-bindings/sound/fsl-imx-audmux.h new file mode 100644 index 000000000..15f138beb --- /dev/null +++ b/include/dt-bindings/sound/fsl-imx-audmux.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_FSL_IMX_AUDMUX_H +#define __DT_FSL_IMX_AUDMUX_H + +#define MX27_AUDMUX_HPCR1_SSI0 0 +#define MX27_AUDMUX_HPCR2_SSI1 1 +#define MX27_AUDMUX_HPCR3_SSI_PINS_4 2 +#define MX27_AUDMUX_PPCR1_SSI_PINS_1 3 +#define MX27_AUDMUX_PPCR2_SSI_PINS_2 4 +#define MX27_AUDMUX_PPCR3_SSI_PINS_3 5 + +#define MX31_AUDMUX_PORT1_SSI0 0 +#define MX31_AUDMUX_PORT2_SSI1 1 +#define MX31_AUDMUX_PORT3_SSI_PINS_3 2 +#define MX31_AUDMUX_PORT4_SSI_PINS_4 3 +#define MX31_AUDMUX_PORT5_SSI_PINS_5 4 +#define MX31_AUDMUX_PORT6_SSI_PINS_6 5 +#define MX31_AUDMUX_PORT7_SSI_PINS_7 6 + +#define MX51_AUDMUX_PORT1_SSI0 0 +#define MX51_AUDMUX_PORT2_SSI1 1 +#define MX51_AUDMUX_PORT3 2 +#define MX51_AUDMUX_PORT4 3 +#define MX51_AUDMUX_PORT5 4 +#define MX51_AUDMUX_PORT6 5 +#define MX51_AUDMUX_PORT7 6 + +/* + * TFCSEL/RFCSEL (i.MX27) or TFSEL/TCSEL/RFSEL/RCSEL (i.MX31/51/53/6Q) + * can be sourced from Rx/Tx. + */ +#define IMX_AUDMUX_RXFS 0x8 +#define IMX_AUDMUX_RXCLK 0x8 + +/* Register definitions for the i.MX21/27 Digital Audio Multiplexer */ +#define IMX_AUDMUX_V1_PCR_INMMASK(x) ((x) & 0xff) +#define IMX_AUDMUX_V1_PCR_INMEN (1 << 8) +#define IMX_AUDMUX_V1_PCR_TXRXEN (1 << 10) +#define IMX_AUDMUX_V1_PCR_SYN (1 << 12) +#define IMX_AUDMUX_V1_PCR_RXDSEL(x) (((x) & 0x7) << 13) +#define IMX_AUDMUX_V1_PCR_RFCSEL(x) (((x) & 0xf) << 20) +#define IMX_AUDMUX_V1_PCR_RCLKDIR (1 << 24) +#define IMX_AUDMUX_V1_PCR_RFSDIR (1 << 25) +#define IMX_AUDMUX_V1_PCR_TFCSEL(x) (((x) & 0xf) << 26) +#define IMX_AUDMUX_V1_PCR_TCLKDIR (1 << 30) +#define IMX_AUDMUX_V1_PCR_TFSDIR (1 << 31) + +/* Register definitions for the i.MX25/31/35/51 Digital Audio Multiplexer */ +#define IMX_AUDMUX_V2_PTCR_TFSDIR (1 << 31) +#define IMX_AUDMUX_V2_PTCR_TFSEL(x) (((x) & 0xf) << 27) +#define IMX_AUDMUX_V2_PTCR_TCLKDIR (1 << 26) +#define IMX_AUDMUX_V2_PTCR_TCSEL(x) (((x) & 0xf) << 22) +#define IMX_AUDMUX_V2_PTCR_RFSDIR (1 << 21) +#define IMX_AUDMUX_V2_PTCR_RFSEL(x) (((x) & 0xf) << 17) +#define IMX_AUDMUX_V2_PTCR_RCLKDIR (1 << 16) +#define IMX_AUDMUX_V2_PTCR_RCSEL(x) (((x) & 0xf) << 12) +#define IMX_AUDMUX_V2_PTCR_SYN (1 << 11) + +#define IMX_AUDMUX_V2_PDCR_RXDSEL(x) (((x) & 0x7) << 13) +#define IMX_AUDMUX_V2_PDCR_TXRXEN (1 << 12) +#define IMX_AUDMUX_V2_PDCR_MODE(x) (((x) & 0x3) << 8) +#define IMX_AUDMUX_V2_PDCR_INMMASK(x) ((x) & 0xff) + +#endif /* __DT_FSL_IMX_AUDMUX_H */ diff --git a/include/dt-bindings/sound/qcom,q6afe.h b/include/dt-bindings/sound/qcom,q6afe.h new file mode 100644 index 000000000..e2d389224 --- /dev/null +++ b/include/dt-bindings/sound/qcom,q6afe.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_Q6_AFE_H__ +#define __DT_BINDINGS_Q6_AFE_H__ + +/* Audio Front End (AFE) virtual ports IDs */ +#define HDMI_RX 1 +#define SLIMBUS_0_RX 2 +#define SLIMBUS_0_TX 3 +#define SLIMBUS_1_RX 4 +#define SLIMBUS_1_TX 5 +#define SLIMBUS_2_RX 6 +#define SLIMBUS_2_TX 7 +#define SLIMBUS_3_RX 8 +#define SLIMBUS_3_TX 9 +#define SLIMBUS_4_RX 10 +#define SLIMBUS_4_TX 11 +#define SLIMBUS_5_RX 12 +#define SLIMBUS_5_TX 13 +#define SLIMBUS_6_RX 14 +#define SLIMBUS_6_TX 15 +#define PRIMARY_MI2S_RX 16 +#define PRIMARY_MI2S_TX 17 +#define SECONDARY_MI2S_RX 18 +#define SECONDARY_MI2S_TX 19 +#define TERTIARY_MI2S_RX 20 +#define TERTIARY_MI2S_TX 21 +#define QUATERNARY_MI2S_RX 22 +#define QUATERNARY_MI2S_TX 23 +#define PRIMARY_TDM_RX_0 24 +#define PRIMARY_TDM_TX_0 25 +#define PRIMARY_TDM_RX_1 26 +#define PRIMARY_TDM_TX_1 27 +#define PRIMARY_TDM_RX_2 28 +#define PRIMARY_TDM_TX_2 29 +#define PRIMARY_TDM_RX_3 30 +#define PRIMARY_TDM_TX_3 31 +#define PRIMARY_TDM_RX_4 32 +#define PRIMARY_TDM_TX_4 33 +#define PRIMARY_TDM_RX_5 34 +#define PRIMARY_TDM_TX_5 35 +#define PRIMARY_TDM_RX_6 36 +#define PRIMARY_TDM_TX_6 37 +#define PRIMARY_TDM_RX_7 38 +#define PRIMARY_TDM_TX_7 39 +#define SECONDARY_TDM_RX_0 40 +#define SECONDARY_TDM_TX_0 41 +#define SECONDARY_TDM_RX_1 42 +#define SECONDARY_TDM_TX_1 43 +#define SECONDARY_TDM_RX_2 44 +#define SECONDARY_TDM_TX_2 45 +#define SECONDARY_TDM_RX_3 46 +#define SECONDARY_TDM_TX_3 47 +#define SECONDARY_TDM_RX_4 48 +#define SECONDARY_TDM_TX_4 49 +#define SECONDARY_TDM_RX_5 50 +#define SECONDARY_TDM_TX_5 51 +#define SECONDARY_TDM_RX_6 52 +#define SECONDARY_TDM_TX_6 53 +#define SECONDARY_TDM_RX_7 54 +#define SECONDARY_TDM_TX_7 55 +#define TERTIARY_TDM_RX_0 56 +#define TERTIARY_TDM_TX_0 57 +#define TERTIARY_TDM_RX_1 58 +#define TERTIARY_TDM_TX_1 59 +#define TERTIARY_TDM_RX_2 60 +#define TERTIARY_TDM_TX_2 61 +#define TERTIARY_TDM_RX_3 62 +#define TERTIARY_TDM_TX_3 63 +#define TERTIARY_TDM_RX_4 64 +#define TERTIARY_TDM_TX_4 65 +#define TERTIARY_TDM_RX_5 66 +#define TERTIARY_TDM_TX_5 67 +#define TERTIARY_TDM_RX_6 68 +#define TERTIARY_TDM_TX_6 69 +#define TERTIARY_TDM_RX_7 70 +#define TERTIARY_TDM_TX_7 71 +#define QUATERNARY_TDM_RX_0 72 +#define QUATERNARY_TDM_TX_0 73 +#define QUATERNARY_TDM_RX_1 74 +#define QUATERNARY_TDM_TX_1 75 +#define QUATERNARY_TDM_RX_2 76 +#define QUATERNARY_TDM_TX_2 77 +#define QUATERNARY_TDM_RX_3 78 +#define QUATERNARY_TDM_TX_3 79 +#define QUATERNARY_TDM_RX_4 80 +#define QUATERNARY_TDM_TX_4 81 +#define QUATERNARY_TDM_RX_5 82 +#define QUATERNARY_TDM_TX_5 83 +#define QUATERNARY_TDM_RX_6 84 +#define QUATERNARY_TDM_TX_6 85 +#define QUATERNARY_TDM_RX_7 86 +#define QUATERNARY_TDM_TX_7 87 +#define QUINARY_TDM_RX_0 88 +#define QUINARY_TDM_TX_0 89 +#define QUINARY_TDM_RX_1 90 +#define QUINARY_TDM_TX_1 91 +#define QUINARY_TDM_RX_2 92 +#define QUINARY_TDM_TX_2 93 +#define QUINARY_TDM_RX_3 94 +#define QUINARY_TDM_TX_3 95 +#define QUINARY_TDM_RX_4 96 +#define QUINARY_TDM_TX_4 97 +#define QUINARY_TDM_RX_5 98 +#define QUINARY_TDM_TX_5 99 +#define QUINARY_TDM_RX_6 100 +#define QUINARY_TDM_TX_6 101 +#define QUINARY_TDM_RX_7 102 +#define QUINARY_TDM_TX_7 103 + +#endif /* __DT_BINDINGS_Q6_AFE_H__ */ + diff --git a/include/dt-bindings/sound/qcom,q6asm.h b/include/dt-bindings/sound/qcom,q6asm.h new file mode 100644 index 000000000..1eb77d87c --- /dev/null +++ b/include/dt-bindings/sound/qcom,q6asm.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_Q6_ASM_H__ +#define __DT_BINDINGS_Q6_ASM_H__ + +#define MSM_FRONTEND_DAI_MULTIMEDIA1 0 +#define MSM_FRONTEND_DAI_MULTIMEDIA2 1 +#define MSM_FRONTEND_DAI_MULTIMEDIA3 2 +#define MSM_FRONTEND_DAI_MULTIMEDIA4 3 +#define MSM_FRONTEND_DAI_MULTIMEDIA5 4 +#define MSM_FRONTEND_DAI_MULTIMEDIA6 5 +#define MSM_FRONTEND_DAI_MULTIMEDIA7 6 +#define MSM_FRONTEND_DAI_MULTIMEDIA8 7 +#define MSM_FRONTEND_DAI_MULTIMEDIA9 8 +#define MSM_FRONTEND_DAI_MULTIMEDIA10 9 +#define MSM_FRONTEND_DAI_MULTIMEDIA11 10 +#define MSM_FRONTEND_DAI_MULTIMEDIA12 11 +#define MSM_FRONTEND_DAI_MULTIMEDIA13 12 +#define MSM_FRONTEND_DAI_MULTIMEDIA14 13 +#define MSM_FRONTEND_DAI_MULTIMEDIA15 14 +#define MSM_FRONTEND_DAI_MULTIMEDIA16 15 + +#endif /* __DT_BINDINGS_Q6_ASM_H__ */ diff --git a/include/dt-bindings/sound/rt5640.h b/include/dt-bindings/sound/rt5640.h new file mode 100644 index 000000000..154c9b441 --- /dev/null +++ b/include/dt-bindings/sound/rt5640.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_RT5640_H +#define __DT_RT5640_H + +#define RT5640_DMIC1_DATA_PIN_NONE 0 +#define RT5640_DMIC1_DATA_PIN_IN1P 1 +#define RT5640_DMIC1_DATA_PIN_GPIO3 2 + +#define RT5640_DMIC2_DATA_PIN_NONE 0 +#define RT5640_DMIC2_DATA_PIN_IN1N 1 +#define RT5640_DMIC2_DATA_PIN_GPIO4 2 + +#define RT5640_JD_SRC_GPIO1 1 +#define RT5640_JD_SRC_JD1_IN4P 2 +#define RT5640_JD_SRC_JD2_IN4N 3 +#define RT5640_JD_SRC_GPIO2 4 +#define RT5640_JD_SRC_GPIO3 5 +#define RT5640_JD_SRC_GPIO4 6 + +#define RT5640_OVCD_SF_0P5 0 +#define RT5640_OVCD_SF_0P75 1 +#define RT5640_OVCD_SF_1P0 2 +#define RT5640_OVCD_SF_1P5 3 + +#endif /* __DT_RT5640_H */ diff --git a/include/dt-bindings/sound/rt5651.h b/include/dt-bindings/sound/rt5651.h new file mode 100644 index 000000000..2f2dac915 --- /dev/null +++ b/include/dt-bindings/sound/rt5651.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_RT5651_H +#define __DT_RT5651_H + +#define RT5651_JD_NULL 0 +#define RT5651_JD1_1 1 +#define RT5651_JD1_2 2 +#define RT5651_JD2 3 + +#define RT5651_OVCD_SF_0P5 0 +#define RT5651_OVCD_SF_0P75 1 +#define RT5651_OVCD_SF_1P0 2 +#define RT5651_OVCD_SF_1P5 3 + +#endif /* __DT_RT5651_H */ diff --git a/include/dt-bindings/sound/samsung-i2s.h b/include/dt-bindings/sound/samsung-i2s.h new file mode 100644 index 000000000..77545f14c --- /dev/null +++ b/include/dt-bindings/sound/samsung-i2s.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DT_BINDINGS_SAMSUNG_I2S_H +#define _DT_BINDINGS_SAMSUNG_I2S_H + +#define CLK_I2S_CDCLK 0 +#define CLK_I2S_RCLK_SRC 1 +#define CLK_I2S_RCLK_PSR 2 + +#endif /* _DT_BINDINGS_SAMSUNG_I2S_H */ diff --git a/include/dt-bindings/sound/tas2552.h b/include/dt-bindings/sound/tas2552.h new file mode 100644 index 000000000..0daeb8385 --- /dev/null +++ b/include/dt-bindings/sound/tas2552.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_TAS2552_H +#define __DT_TAS2552_H + +#define TAS2552_PLL_CLKIN (0) +#define TAS2552_PDM_CLK (1) +#define TAS2552_CLK_TARGET_MASK (1) + +#define TAS2552_PLL_CLKIN_MCLK ((0 << 1) | TAS2552_PLL_CLKIN) +#define TAS2552_PLL_CLKIN_BCLK ((1 << 1) | TAS2552_PLL_CLKIN) +#define TAS2552_PLL_CLKIN_IVCLKIN ((2 << 1) | TAS2552_PLL_CLKIN) +#define TAS2552_PLL_CLKIN_1_8_FIXED ((3 << 1) | TAS2552_PLL_CLKIN) + +#define TAS2552_PDM_CLK_PLL ((0 << 1) | TAS2552_PDM_CLK) +#define TAS2552_PDM_CLK_IVCLKIN ((1 << 1) | TAS2552_PDM_CLK) +#define TAS2552_PDM_CLK_BCLK ((2 << 1) | TAS2552_PDM_CLK) +#define TAS2552_PDM_CLK_MCLK ((3 << 1) | TAS2552_PDM_CLK) + +#endif /* __DT_TAS2552_H */ diff --git a/include/dt-bindings/sound/tlv320aic31xx-micbias.h b/include/dt-bindings/sound/tlv320aic31xx-micbias.h new file mode 100644 index 000000000..c6895a18a --- /dev/null +++ b/include/dt-bindings/sound/tlv320aic31xx-micbias.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_TLV320AIC31XX_MICBIAS_H +#define __DT_TLV320AIC31XX_MICBIAS_H + +#define MICBIAS_2_0V 1 +#define MICBIAS_2_5V 2 +#define MICBIAS_AVDDV 3 + +#endif /* __DT_TLV320AIC31XX_MICBIAS_H */ diff --git a/include/dt-bindings/spmi/spmi.h b/include/dt-bindings/spmi/spmi.h new file mode 100644 index 000000000..d11e1e543 --- /dev/null +++ b/include/dt-bindings/spmi/spmi.h @@ -0,0 +1,18 @@ +/* Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __DT_BINDINGS_SPMI_H +#define __DT_BINDINGS_SPMI_H + +#define SPMI_USID 0 +#define SPMI_GSID 1 + +#endif diff --git a/include/dt-bindings/thermal/lm90.h b/include/dt-bindings/thermal/lm90.h new file mode 100644 index 000000000..eed91a16c --- /dev/null +++ b/include/dt-bindings/thermal/lm90.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for the LM90 thermal bindings. + */ + +#ifndef _DT_BINDINGS_THERMAL_LM90_H_ +#define _DT_BINDINGS_THERMAL_LM90_H_ + +#define LM90_LOCAL_TEMPERATURE 0 +#define LM90_REMOTE_TEMPERATURE 1 +#define LM90_REMOTE2_TEMPERATURE 2 + +#endif diff --git a/include/dt-bindings/thermal/tegra124-soctherm.h b/include/dt-bindings/thermal/tegra124-soctherm.h new file mode 100644 index 000000000..c15e8b709 --- /dev/null +++ b/include/dt-bindings/thermal/tegra124-soctherm.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for binding nvidia,tegra124-soctherm. + */ + +#ifndef _DT_BINDINGS_THERMAL_TEGRA124_SOCTHERM_H +#define _DT_BINDINGS_THERMAL_TEGRA124_SOCTHERM_H + +#define TEGRA124_SOCTHERM_SENSOR_CPU 0 +#define TEGRA124_SOCTHERM_SENSOR_MEM 1 +#define TEGRA124_SOCTHERM_SENSOR_GPU 2 +#define TEGRA124_SOCTHERM_SENSOR_PLLX 3 +#define TEGRA124_SOCTHERM_SENSOR_NUM 4 + +#define TEGRA_SOCTHERM_THROT_LEVEL_LOW 0 +#define TEGRA_SOCTHERM_THROT_LEVEL_MED 1 +#define TEGRA_SOCTHERM_THROT_LEVEL_HIGH 2 +#define TEGRA_SOCTHERM_THROT_LEVEL_NONE -1 + +#endif diff --git a/include/dt-bindings/thermal/tegra186-bpmp-thermal.h b/include/dt-bindings/thermal/tegra186-bpmp-thermal.h new file mode 100644 index 000000000..a96b8fa31 --- /dev/null +++ b/include/dt-bindings/thermal/tegra186-bpmp-thermal.h @@ -0,0 +1,14 @@ +/* + * This header provides constants for binding nvidia,tegra186-bpmp-thermal. + */ + +#ifndef _DT_BINDINGS_THERMAL_TEGRA186_BPMP_THERMAL_H +#define _DT_BINDINGS_THERMAL_TEGRA186_BPMP_THERMAL_H + +#define TEGRA186_BPMP_THERMAL_ZONE_CPU 2 +#define TEGRA186_BPMP_THERMAL_ZONE_GPU 3 +#define TEGRA186_BPMP_THERMAL_ZONE_AUX 4 +#define TEGRA186_BPMP_THERMAL_ZONE_PLLX 5 +#define TEGRA186_BPMP_THERMAL_ZONE_AO 6 + +#endif diff --git a/include/dt-bindings/thermal/thermal.h b/include/dt-bindings/thermal/thermal.h new file mode 100644 index 000000000..b5e6b0069 --- /dev/null +++ b/include/dt-bindings/thermal/thermal.h @@ -0,0 +1,17 @@ +/* + * This header provides constants for most thermal bindings. + * + * Copyright (C) 2013 Texas Instruments + * Eduardo Valentin + * + * GPLv2 only + */ + +#ifndef _DT_BINDINGS_THERMAL_THERMAL_H +#define _DT_BINDINGS_THERMAL_THERMAL_H + +/* On cooling devices upper and lower limits */ +#define THERMAL_NO_LIMIT (~0) + +#endif + diff --git a/include/dt-bindings/thermal/thermal_exynos.h b/include/dt-bindings/thermal/thermal_exynos.h new file mode 100644 index 000000000..0646500bc --- /dev/null +++ b/include/dt-bindings/thermal/thermal_exynos.h @@ -0,0 +1,28 @@ +/* + * thermal_exynos.h - Samsung EXYNOS TMU device tree definitions + * + * Copyright (C) 2014 Samsung Electronics + * Lukasz Majewski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _EXYNOS_THERMAL_TMU_DT_H +#define _EXYNOS_THERMAL_TMU_DT_H + +#define TYPE_ONE_POINT_TRIMMING 0 +#define TYPE_ONE_POINT_TRIMMING_25 1 +#define TYPE_ONE_POINT_TRIMMING_85 2 +#define TYPE_TWO_POINT_TRIMMING 3 +#define TYPE_NONE 4 + +#endif /* _EXYNOS_THERMAL_TMU_DT_H */ diff --git a/include/dt-bindings/usb/pd.h b/include/dt-bindings/usb/pd.h new file mode 100644 index 000000000..7b7a92fef --- /dev/null +++ b/include/dt-bindings/usb/pd.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_POWER_DELIVERY_H +#define __DT_POWER_DELIVERY_H + +/* Power delivery Power Data Object definitions */ +#define PDO_TYPE_FIXED 0 +#define PDO_TYPE_BATT 1 +#define PDO_TYPE_VAR 2 +#define PDO_TYPE_APDO 3 + +#define PDO_TYPE_SHIFT 30 +#define PDO_TYPE_MASK 0x3 + +#define PDO_TYPE(t) ((t) << PDO_TYPE_SHIFT) + +#define PDO_VOLT_MASK 0x3ff +#define PDO_CURR_MASK 0x3ff +#define PDO_PWR_MASK 0x3ff + +#define PDO_FIXED_DUAL_ROLE (1 << 29) /* Power role swap supported */ +#define PDO_FIXED_SUSPEND (1 << 28) /* USB Suspend supported (Source) */ +#define PDO_FIXED_HIGHER_CAP (1 << 28) /* Requires more than vSafe5V (Sink) */ +#define PDO_FIXED_EXTPOWER (1 << 27) /* Externally powered */ +#define PDO_FIXED_USB_COMM (1 << 26) /* USB communications capable */ +#define PDO_FIXED_DATA_SWAP (1 << 25) /* Data role swap supported */ +#define PDO_FIXED_VOLT_SHIFT 10 /* 50mV units */ +#define PDO_FIXED_CURR_SHIFT 0 /* 10mA units */ + +#define PDO_FIXED_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_FIXED_VOLT_SHIFT) +#define PDO_FIXED_CURR(ma) ((((ma) / 10) & PDO_CURR_MASK) << PDO_FIXED_CURR_SHIFT) + +#define PDO_FIXED(mv, ma, flags) \ + (PDO_TYPE(PDO_TYPE_FIXED) | (flags) | \ + PDO_FIXED_VOLT(mv) | PDO_FIXED_CURR(ma)) + +#define VSAFE5V 5000 /* mv units */ + +#define PDO_BATT_MAX_VOLT_SHIFT 20 /* 50mV units */ +#define PDO_BATT_MIN_VOLT_SHIFT 10 /* 50mV units */ +#define PDO_BATT_MAX_PWR_SHIFT 0 /* 250mW units */ + +#define PDO_BATT_MIN_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_BATT_MIN_VOLT_SHIFT) +#define PDO_BATT_MAX_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_BATT_MAX_VOLT_SHIFT) +#define PDO_BATT_MAX_POWER(mw) ((((mw) / 250) & PDO_PWR_MASK) << PDO_BATT_MAX_PWR_SHIFT) + +#define PDO_BATT(min_mv, max_mv, max_mw) \ + (PDO_TYPE(PDO_TYPE_BATT) | PDO_BATT_MIN_VOLT(min_mv) | \ + PDO_BATT_MAX_VOLT(max_mv) | PDO_BATT_MAX_POWER(max_mw)) + +#define PDO_VAR_MAX_VOLT_SHIFT 20 /* 50mV units */ +#define PDO_VAR_MIN_VOLT_SHIFT 10 /* 50mV units */ +#define PDO_VAR_MAX_CURR_SHIFT 0 /* 10mA units */ + +#define PDO_VAR_MIN_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_VAR_MIN_VOLT_SHIFT) +#define PDO_VAR_MAX_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_VAR_MAX_VOLT_SHIFT) +#define PDO_VAR_MAX_CURR(ma) ((((ma) / 10) & PDO_CURR_MASK) << PDO_VAR_MAX_CURR_SHIFT) + +#define PDO_VAR(min_mv, max_mv, max_ma) \ + (PDO_TYPE(PDO_TYPE_VAR) | PDO_VAR_MIN_VOLT(min_mv) | \ + PDO_VAR_MAX_VOLT(max_mv) | PDO_VAR_MAX_CURR(max_ma)) + + #endif /* __DT_POWER_DELIVERY_H */ diff --git a/include/keys/asymmetric-parser.h b/include/keys/asymmetric-parser.h new file mode 100644 index 000000000..09b3b4807 --- /dev/null +++ b/include/keys/asymmetric-parser.h @@ -0,0 +1,37 @@ +/* Asymmetric public-key cryptography data parser + * + * See Documentation/crypto/asymmetric-keys.txt + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _KEYS_ASYMMETRIC_PARSER_H +#define _KEYS_ASYMMETRIC_PARSER_H + +/* + * Key data parser. Called during key instantiation. + */ +struct asymmetric_key_parser { + struct list_head link; + struct module *owner; + const char *name; + + /* Attempt to parse a key from the data blob passed to add_key() or + * keyctl_instantiate(). Should also generate a proposed description + * that the caller can optionally use for the key. + * + * Return EBADMSG if not recognised. + */ + int (*parse)(struct key_preparsed_payload *prep); +}; + +extern int register_asymmetric_key_parser(struct asymmetric_key_parser *); +extern void unregister_asymmetric_key_parser(struct asymmetric_key_parser *); + +#endif /* _KEYS_ASYMMETRIC_PARSER_H */ diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h new file mode 100644 index 000000000..e0a9c2368 --- /dev/null +++ b/include/keys/asymmetric-subtype.h @@ -0,0 +1,55 @@ +/* Asymmetric public-key cryptography key subtype + * + * See Documentation/crypto/asymmetric-keys.txt + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _KEYS_ASYMMETRIC_SUBTYPE_H +#define _KEYS_ASYMMETRIC_SUBTYPE_H + +#include +#include + +struct public_key_signature; + +/* + * Keys of this type declare a subtype that indicates the handlers and + * capabilities. + */ +struct asymmetric_key_subtype { + struct module *owner; + const char *name; + unsigned short name_len; /* length of name */ + + /* Describe a key of this subtype for /proc/keys */ + void (*describe)(const struct key *key, struct seq_file *m); + + /* Destroy a key of this subtype */ + void (*destroy)(void *payload_crypto, void *payload_auth); + + /* Verify the signature on a key of this subtype (optional) */ + int (*verify_signature)(const struct key *key, + const struct public_key_signature *sig); +}; + +/** + * asymmetric_key_subtype - Get the subtype from an asymmetric key + * @key: The key of interest. + * + * Retrieves and returns the subtype pointer of the asymmetric key from the + * type-specific data attached to the key. + */ +static inline +struct asymmetric_key_subtype *asymmetric_key_subtype(const struct key *key) +{ + return key->payload.data[asym_subtype]; +} + +#endif /* _KEYS_ASYMMETRIC_SUBTYPE_H */ diff --git a/include/keys/asymmetric-type.h b/include/keys/asymmetric-type.h new file mode 100644 index 000000000..1cb77cd51 --- /dev/null +++ b/include/keys/asymmetric-type.h @@ -0,0 +1,88 @@ +/* Asymmetric Public-key cryptography key type interface + * + * See Documentation/crypto/asymmetric-keys.txt + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _KEYS_ASYMMETRIC_TYPE_H +#define _KEYS_ASYMMETRIC_TYPE_H + +#include +#include + +extern struct key_type key_type_asymmetric; + +/* + * The key payload is four words. The asymmetric-type key uses them as + * follows: + */ +enum asymmetric_payload_bits { + asym_crypto, /* The data representing the key */ + asym_subtype, /* Pointer to an asymmetric_key_subtype struct */ + asym_key_ids, /* Pointer to an asymmetric_key_ids struct */ + asym_auth /* The key's authorisation (signature, parent key ID) */ +}; + +/* + * Identifiers for an asymmetric key ID. We have three ways of looking up a + * key derived from an X.509 certificate: + * + * (1) Serial Number & Issuer. Non-optional. This is the only valid way to + * map a PKCS#7 signature to an X.509 certificate. + * + * (2) Issuer & Subject Unique IDs. Optional. These were the original way to + * match X.509 certificates, but have fallen into disuse in favour of (3). + * + * (3) Auth & Subject Key Identifiers. Optional. SKIDs are only provided on + * CA keys that are intended to sign other keys, so don't appear in end + * user certificates unless forced. + * + * We could also support an PGP key identifier, which is just a SHA1 sum of the + * public key and certain parameters, but since we don't support PGP keys at + * the moment, we shall ignore those. + * + * What we actually do is provide a place where binary identifiers can be + * stashed and then compare against them when checking for an id match. + */ +struct asymmetric_key_id { + unsigned short len; + unsigned char data[]; +}; + +struct asymmetric_key_ids { + void *id[2]; +}; + +extern bool asymmetric_key_id_same(const struct asymmetric_key_id *kid1, + const struct asymmetric_key_id *kid2); + +extern bool asymmetric_key_id_partial(const struct asymmetric_key_id *kid1, + const struct asymmetric_key_id *kid2); + +extern struct asymmetric_key_id *asymmetric_key_generate_id(const void *val_1, + size_t len_1, + const void *val_2, + size_t len_2); +static inline +const struct asymmetric_key_ids *asymmetric_key_ids(const struct key *key) +{ + return key->payload.data[asym_key_ids]; +} + +extern struct key *find_asymmetric_key(struct key *keyring, + const struct asymmetric_key_id *id_0, + const struct asymmetric_key_id *id_1, + bool partial); + +/* + * The payload is at the discretion of the subtype. + */ + +#endif /* _KEYS_ASYMMETRIC_TYPE_H */ diff --git a/include/keys/big_key-type.h b/include/keys/big_key-type.h new file mode 100644 index 000000000..a7207a965 --- /dev/null +++ b/include/keys/big_key-type.h @@ -0,0 +1,26 @@ +/* Big capacity key type. + * + * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _KEYS_BIG_KEY_TYPE_H +#define _KEYS_BIG_KEY_TYPE_H + +#include + +extern struct key_type key_type_big_key; + +extern int big_key_preparse(struct key_preparsed_payload *prep); +extern void big_key_free_preparse(struct key_preparsed_payload *prep); +extern void big_key_revoke(struct key *key); +extern void big_key_destroy(struct key *key); +extern void big_key_describe(const struct key *big_key, struct seq_file *m); +extern long big_key_read(const struct key *key, char *buffer, size_t buflen); + +#endif /* _KEYS_BIG_KEY_TYPE_H */ diff --git a/include/keys/ceph-type.h b/include/keys/ceph-type.h new file mode 100644 index 000000000..aa6d3e050 --- /dev/null +++ b/include/keys/ceph-type.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _KEYS_CEPH_TYPE_H +#define _KEYS_CEPH_TYPE_H + +#include + +extern struct key_type key_type_ceph; + +#endif diff --git a/include/keys/dns_resolver-type.h b/include/keys/dns_resolver-type.h new file mode 100644 index 000000000..9284a1939 --- /dev/null +++ b/include/keys/dns_resolver-type.h @@ -0,0 +1,23 @@ +/* DNS resolver key type + * + * Copyright (C) 2010 Wang Lei. All Rights Reserved. + * Written by Wang Lei (wang840925@gmail.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _KEYS_DNS_RESOLVER_TYPE_H +#define _KEYS_DNS_RESOLVER_TYPE_H + +#include + +extern struct key_type key_type_dns_resolver; + +extern int request_dns_resolver_key(const char *description, + const char *callout_info, + char **data); + +#endif /* _KEYS_DNS_RESOLVER_TYPE_H */ diff --git a/include/keys/encrypted-type.h b/include/keys/encrypted-type.h new file mode 100644 index 000000000..1d4541370 --- /dev/null +++ b/include/keys/encrypted-type.h @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2010 IBM Corporation + * Copyright (C) 2010 Politecnico di Torino, Italy + * TORSEC group -- http://security.polito.it + * + * Authors: + * Mimi Zohar + * Roberto Sassu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + */ + +#ifndef _KEYS_ENCRYPTED_TYPE_H +#define _KEYS_ENCRYPTED_TYPE_H + +#include +#include + +struct encrypted_key_payload { + struct rcu_head rcu; + char *format; /* datablob: format */ + char *master_desc; /* datablob: master key name */ + char *datalen; /* datablob: decrypted key length */ + u8 *iv; /* datablob: iv */ + u8 *encrypted_data; /* datablob: encrypted data */ + unsigned short datablob_len; /* length of datablob */ + unsigned short decrypted_datalen; /* decrypted data length */ + unsigned short payload_datalen; /* payload data length */ + unsigned short encrypted_key_format; /* encrypted key format */ + u8 *decrypted_data; /* decrypted data */ + u8 payload_data[0]; /* payload data + datablob + hmac */ +}; + +extern struct key_type key_type_encrypted; + +#endif /* _KEYS_ENCRYPTED_TYPE_H */ diff --git a/include/keys/keyring-type.h b/include/keys/keyring-type.h new file mode 100644 index 000000000..fca5c6234 --- /dev/null +++ b/include/keys/keyring-type.h @@ -0,0 +1,18 @@ +/* Keyring key type + * + * Copyright (C) 2008, 2013 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _KEYS_KEYRING_TYPE_H +#define _KEYS_KEYRING_TYPE_H + +#include +#include + +#endif /* _KEYS_KEYRING_TYPE_H */ diff --git a/include/keys/request_key_auth-type.h b/include/keys/request_key_auth-type.h new file mode 100644 index 000000000..a726dd3f1 --- /dev/null +++ b/include/keys/request_key_auth-type.h @@ -0,0 +1,36 @@ +/* request_key authorisation token key type + * + * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _KEYS_REQUEST_KEY_AUTH_TYPE_H +#define _KEYS_REQUEST_KEY_AUTH_TYPE_H + +#include + +/* + * Authorisation record for request_key(). + */ +struct request_key_auth { + struct key *target_key; + struct key *dest_keyring; + const struct cred *cred; + void *callout_info; + size_t callout_len; + pid_t pid; + char op[8]; +} __randomize_layout; + +static inline struct request_key_auth *get_request_key_auth(const struct key *key) +{ + return key->payload.data[0]; +} + + +#endif /* _KEYS_REQUEST_KEY_AUTH_TYPE_H */ diff --git a/include/keys/rxrpc-type.h b/include/keys/rxrpc-type.h new file mode 100644 index 000000000..8cf829dbf --- /dev/null +++ b/include/keys/rxrpc-type.h @@ -0,0 +1,153 @@ +/* RxRPC key type + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _KEYS_RXRPC_TYPE_H +#define _KEYS_RXRPC_TYPE_H + +#include + +/* + * key type for AF_RXRPC keys + */ +extern struct key_type key_type_rxrpc; + +extern struct key *rxrpc_get_null_key(const char *); + +/* + * RxRPC key for Kerberos IV (type-2 security) + */ +struct rxkad_key { + u32 vice_id; + u32 start; /* time at which ticket starts */ + u32 expiry; /* time at which ticket expires */ + u32 kvno; /* key version number */ + u8 primary_flag; /* T if key for primary cell for this user */ + u16 ticket_len; /* length of ticket[] */ + u8 session_key[8]; /* DES session key */ + u8 ticket[0]; /* the encrypted ticket */ +}; + +/* + * Kerberos 5 principal + * name/name/name@realm + */ +struct krb5_principal { + u8 n_name_parts; /* N of parts of the name part of the principal */ + char **name_parts; /* parts of the name part of the principal */ + char *realm; /* parts of the realm part of the principal */ +}; + +/* + * Kerberos 5 tagged data + */ +struct krb5_tagged_data { + /* for tag value, see /usr/include/krb5/krb5.h + * - KRB5_AUTHDATA_* for auth data + * - + */ + s32 tag; + u32 data_len; + u8 *data; +}; + +/* + * RxRPC key for Kerberos V (type-5 security) + */ +struct rxk5_key { + u64 authtime; /* time at which auth token generated */ + u64 starttime; /* time at which auth token starts */ + u64 endtime; /* time at which auth token expired */ + u64 renew_till; /* time to which auth token can be renewed */ + s32 is_skey; /* T if ticket is encrypted in another ticket's + * skey */ + s32 flags; /* mask of TKT_FLG_* bits (krb5/krb5.h) */ + struct krb5_principal client; /* client principal name */ + struct krb5_principal server; /* server principal name */ + u16 ticket_len; /* length of ticket */ + u16 ticket2_len; /* length of second ticket */ + u8 n_authdata; /* number of authorisation data elements */ + u8 n_addresses; /* number of addresses */ + struct krb5_tagged_data session; /* session data; tag is enctype */ + struct krb5_tagged_data *addresses; /* addresses */ + u8 *ticket; /* krb5 ticket */ + u8 *ticket2; /* second krb5 ticket, if related to ticket (via + * DUPLICATE-SKEY or ENC-TKT-IN-SKEY) */ + struct krb5_tagged_data *authdata; /* authorisation data */ +}; + +/* + * list of tokens attached to an rxrpc key + */ +struct rxrpc_key_token { + u16 security_index; /* RxRPC header security index */ + struct rxrpc_key_token *next; /* the next token in the list */ + union { + struct rxkad_key *kad; + struct rxk5_key *k5; + }; +}; + +/* + * structure of raw payloads passed to add_key() or instantiate key + */ +struct rxrpc_key_data_v1 { + u16 security_index; + u16 ticket_length; + u32 expiry; /* time_t */ + u32 kvno; + u8 session_key[8]; + u8 ticket[0]; +}; + +/* + * AF_RXRPC key payload derived from XDR format + * - based on openafs-1.4.10/src/auth/afs_token.xg + */ +#define AFSTOKEN_LENGTH_MAX 16384 /* max payload size */ +#define AFSTOKEN_STRING_MAX 256 /* max small string length */ +#define AFSTOKEN_DATA_MAX 64 /* max small data length */ +#define AFSTOKEN_CELL_MAX 64 /* max cellname length */ +#define AFSTOKEN_MAX 8 /* max tokens per payload */ +#define AFSTOKEN_BDATALN_MAX 16384 /* max big data length */ +#define AFSTOKEN_RK_TIX_MAX 12000 /* max RxKAD ticket size */ +#define AFSTOKEN_GK_KEY_MAX 64 /* max GSSAPI key size */ +#define AFSTOKEN_GK_TOKEN_MAX 16384 /* max GSSAPI token size */ +#define AFSTOKEN_K5_COMPONENTS_MAX 16 /* max K5 components */ +#define AFSTOKEN_K5_NAME_MAX 128 /* max K5 name length */ +#define AFSTOKEN_K5_REALM_MAX 64 /* max K5 realm name length */ +#define AFSTOKEN_K5_TIX_MAX 16384 /* max K5 ticket size */ +#define AFSTOKEN_K5_ADDRESSES_MAX 16 /* max K5 addresses */ +#define AFSTOKEN_K5_AUTHDATA_MAX 16 /* max K5 pieces of auth data */ + +/* + * Truncate a time64_t to the range from 1970 to 2106 as in the network + * protocol. + */ +static inline u32 rxrpc_time64_to_u32(time64_t time) +{ + if (time < 0) + return 0; + + if (time > UINT_MAX) + return UINT_MAX; + + return (u32)time; +} + +/* + * Extend u32 back to time64_t using the same 1970-2106 range. + */ +static inline time64_t rxrpc_u32_to_time64(u32 time) +{ + return (time64_t)time; +} + +#endif /* _KEYS_RXRPC_TYPE_H */ diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h new file mode 100644 index 000000000..359c2f936 --- /dev/null +++ b/include/keys/system_keyring.h @@ -0,0 +1,65 @@ +/* System keyring containing trusted public keys. + * + * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _KEYS_SYSTEM_KEYRING_H +#define _KEYS_SYSTEM_KEYRING_H + +#include + +#ifdef CONFIG_SYSTEM_TRUSTED_KEYRING + +extern int restrict_link_by_builtin_trusted(struct key *keyring, + const struct key_type *type, + const union key_payload *payload, + struct key *restriction_key); + +#else +#define restrict_link_by_builtin_trusted restrict_link_reject +#endif + +#ifdef CONFIG_SECONDARY_TRUSTED_KEYRING +extern int restrict_link_by_builtin_and_secondary_trusted( + struct key *keyring, + const struct key_type *type, + const union key_payload *payload, + struct key *restriction_key); +#else +#define restrict_link_by_builtin_and_secondary_trusted restrict_link_by_builtin_trusted +#endif + +#ifdef CONFIG_SYSTEM_BLACKLIST_KEYRING +extern int mark_hash_blacklisted(const char *hash); +extern int is_hash_blacklisted(const u8 *hash, size_t hash_len, + const char *type); +#else +static inline int is_hash_blacklisted(const u8 *hash, size_t hash_len, + const char *type) +{ + return 0; +} +#endif + +#ifdef CONFIG_IMA_BLACKLIST_KEYRING +extern struct key *ima_blacklist_keyring; + +static inline struct key *get_ima_blacklist_keyring(void) +{ + return ima_blacklist_keyring; +} +#else +static inline struct key *get_ima_blacklist_keyring(void) +{ + return NULL; +} +#endif /* CONFIG_IMA_BLACKLIST_KEYRING */ + + +#endif /* _KEYS_SYSTEM_KEYRING_H */ diff --git a/include/keys/trusted-type.h b/include/keys/trusted-type.h new file mode 100644 index 000000000..4ea7e55f2 --- /dev/null +++ b/include/keys/trusted-type.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2010 IBM Corporation + * Author: David Safford + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + */ + +#ifndef _KEYS_TRUSTED_TYPE_H +#define _KEYS_TRUSTED_TYPE_H + +#include +#include +#include + +#define MIN_KEY_SIZE 32 +#define MAX_KEY_SIZE 128 +#define MAX_BLOB_SIZE 512 +#define MAX_PCRINFO_SIZE 64 +#define MAX_DIGEST_SIZE 64 + +struct trusted_key_payload { + struct rcu_head rcu; + unsigned int key_len; + unsigned int blob_len; + unsigned char migratable; + unsigned char key[MAX_KEY_SIZE + 1]; + unsigned char blob[MAX_BLOB_SIZE]; +}; + +struct trusted_key_options { + uint16_t keytype; + uint32_t keyhandle; + unsigned char keyauth[TPM_DIGEST_SIZE]; + unsigned char blobauth[TPM_DIGEST_SIZE]; + uint32_t pcrinfo_len; + unsigned char pcrinfo[MAX_PCRINFO_SIZE]; + int pcrlock; + uint32_t hash; + uint32_t policydigest_len; + unsigned char policydigest[MAX_DIGEST_SIZE]; + uint32_t policyhandle; +}; + +extern struct key_type key_type_trusted; + +#endif /* _KEYS_TRUSTED_TYPE_H */ diff --git a/include/keys/user-type.h b/include/keys/user-type.h new file mode 100644 index 000000000..0d8f3cd30 --- /dev/null +++ b/include/keys/user-type.h @@ -0,0 +1,62 @@ +/* user-type.h: User-defined key type + * + * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _KEYS_USER_TYPE_H +#define _KEYS_USER_TYPE_H + +#include +#include + +#ifdef CONFIG_KEYS + +/*****************************************************************************/ +/* + * the payload for a key of type "user" or "logon" + * - once filled in and attached to a key: + * - the payload struct is invariant may not be changed, only replaced + * - the payload must be read with RCU procedures or with the key semaphore + * held + * - the payload may only be replaced with the key semaphore write-locked + * - the key's data length is the size of the actual data, not including the + * payload wrapper + */ +struct user_key_payload { + struct rcu_head rcu; /* RCU destructor */ + unsigned short datalen; /* length of this data */ + char data[0] __aligned(__alignof__(u64)); /* actual data */ +}; + +extern struct key_type key_type_user; +extern struct key_type key_type_logon; + +struct key_preparsed_payload; + +extern int user_preparse(struct key_preparsed_payload *prep); +extern void user_free_preparse(struct key_preparsed_payload *prep); +extern int user_update(struct key *key, struct key_preparsed_payload *prep); +extern void user_revoke(struct key *key); +extern void user_destroy(struct key *key); +extern void user_describe(const struct key *user, struct seq_file *m); +extern long user_read(const struct key *key, char *buffer, size_t buflen); + +static inline const struct user_key_payload *user_key_payload_rcu(const struct key *key) +{ + return (struct user_key_payload *)dereference_key_rcu(key); +} + +static inline struct user_key_payload *user_key_payload_locked(const struct key *key) +{ + return (struct user_key_payload *)dereference_key_locked((struct key *)key); +} + +#endif /* CONFIG_KEYS */ + +#endif /* _KEYS_USER_TYPE_H */ diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h new file mode 100644 index 000000000..6502feb95 --- /dev/null +++ b/include/kvm/arm_arch_timer.h @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __ASM_ARM_KVM_ARCH_TIMER_H +#define __ASM_ARM_KVM_ARCH_TIMER_H + +#include +#include +#include + +struct arch_timer_context { + /* Registers: control register, timer value */ + u32 cnt_ctl; + u64 cnt_cval; + + /* Timer IRQ */ + struct kvm_irq_level irq; + + /* + * We have multiple paths which can save/restore the timer state + * onto the hardware, so we need some way of keeping track of + * where the latest state is. + * + * loaded == true: State is loaded on the hardware registers. + * loaded == false: State is stored in memory. + */ + bool loaded; + + /* Virtual offset */ + u64 cntvoff; +}; + +struct arch_timer_cpu { + struct arch_timer_context vtimer; + struct arch_timer_context ptimer; + + /* Background timer used when the guest is not running */ + struct hrtimer bg_timer; + + /* Work queued with the above timer expires */ + struct work_struct expired; + + /* Physical timer emulation */ + struct hrtimer phys_timer; + + /* Is the timer enabled */ + bool enabled; +}; + +int kvm_timer_hyp_init(bool); +int kvm_timer_enable(struct kvm_vcpu *vcpu); +int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu); +void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); +void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu); +bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu); +void kvm_timer_update_run(struct kvm_vcpu *vcpu); +void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu); + +u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid); +int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value); + +int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); +int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); +int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); + +bool kvm_timer_is_pending(struct kvm_vcpu *vcpu); + +void kvm_timer_schedule(struct kvm_vcpu *vcpu); +void kvm_timer_unschedule(struct kvm_vcpu *vcpu); + +u64 kvm_phys_timer_read(void); + +void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu); +void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu); + +void kvm_timer_init_vhe(void); + +bool kvm_arch_timer_get_input_level(int vintid); + +#define vcpu_vtimer(v) (&(v)->arch.timer_cpu.vtimer) +#define vcpu_ptimer(v) (&(v)->arch.timer_cpu.ptimer) + +#endif diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h new file mode 100644 index 000000000..f87fe20fc --- /dev/null +++ b/include/kvm/arm_pmu.h @@ -0,0 +1,121 @@ +/* + * Copyright (C) 2015 Linaro Ltd. + * Author: Shannon Zhao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __ASM_ARM_KVM_PMU_H +#define __ASM_ARM_KVM_PMU_H + +#include +#include + +#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) + +#ifdef CONFIG_KVM_ARM_PMU + +struct kvm_pmc { + u8 idx; /* index into the pmu->pmc array */ + struct perf_event *perf_event; + u64 bitmask; +}; + +struct kvm_pmu { + int irq_num; + struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; + bool ready; + bool created; + bool irq_level; +}; + +#define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready) +#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS) +u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); +void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); +u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); +void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu); +void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu); +void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val); +void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val); +void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu); +void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu); +bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu); +void kvm_pmu_update_run(struct kvm_vcpu *vcpu); +void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); +void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); +void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, + u64 select_idx); +bool kvm_arm_support_pmu_v3(void); +int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); +int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); +int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); +int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu); +#else +struct kvm_pmu { +}; + +#define kvm_arm_pmu_v3_ready(v) (false) +#define kvm_arm_pmu_irq_initialized(v) (false) +static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, + u64 select_idx) +{ + return 0; +} +static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, + u64 select_idx, u64 val) {} +static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) +{ + return 0; +} +static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {} +static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {} +static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {} +static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {} +static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {} +static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {} +static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu) +{ + return false; +} +static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {} +static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {} +static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {} +static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, + u64 data, u64 select_idx) {} +static inline bool kvm_arm_support_pmu_v3(void) { return false; } +static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} +static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} +static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} +static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu) +{ + return 0; +} +#endif + +#endif diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h new file mode 100644 index 000000000..4b1548129 --- /dev/null +++ b/include/kvm/arm_psci.h @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2012,2013 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __KVM_ARM_PSCI_H__ +#define __KVM_ARM_PSCI_H__ + +#include +#include + +#define KVM_ARM_PSCI_0_1 PSCI_VERSION(0, 1) +#define KVM_ARM_PSCI_0_2 PSCI_VERSION(0, 2) +#define KVM_ARM_PSCI_1_0 PSCI_VERSION(1, 0) + +#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_0 + +/* + * We need the KVM pointer independently from the vcpu as we can call + * this from HYP, and need to apply kern_hyp_va on it... + */ +static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm) +{ + /* + * Our PSCI implementation stays the same across versions from + * v0.2 onward, only adding the few mandatory functions (such + * as FEATURES with 1.0) that are required by newer + * revisions. It is thus safe to return the latest, unless + * userspace has instructed us otherwise. + */ + if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) { + if (vcpu->kvm->arch.psci_version) + return vcpu->kvm->arch.psci_version; + + return KVM_ARM_PSCI_LATEST; + } + + return KVM_ARM_PSCI_0_1; +} + + +int kvm_hvc_call_handler(struct kvm_vcpu *vcpu); + +struct kvm_one_reg; + +int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu); +int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); +int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); +int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); + +#endif /* __KVM_ARM_PSCI_H__ */ diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h new file mode 100644 index 000000000..561fefc2a --- /dev/null +++ b/include/kvm/arm_vgic.h @@ -0,0 +1,411 @@ +/* + * Copyright (C) 2015, 2016 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef __KVM_ARM_VGIC_H +#define __KVM_ARM_VGIC_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define VGIC_V3_MAX_CPUS 512 +#define VGIC_V2_MAX_CPUS 8 +#define VGIC_NR_IRQS_LEGACY 256 +#define VGIC_NR_SGIS 16 +#define VGIC_NR_PPIS 16 +#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS) +#define VGIC_MAX_PRIVATE (VGIC_NR_PRIVATE_IRQS - 1) +#define VGIC_MAX_SPI 1019 +#define VGIC_MAX_RESERVED 1023 +#define VGIC_MIN_LPI 8192 +#define KVM_IRQCHIP_NUM_PINS (1020 - 32) + +#define irq_is_ppi(irq) ((irq) >= VGIC_NR_SGIS && (irq) < VGIC_NR_PRIVATE_IRQS) +#define irq_is_spi(irq) ((irq) >= VGIC_NR_PRIVATE_IRQS && \ + (irq) <= VGIC_MAX_SPI) + +enum vgic_type { + VGIC_V2, /* Good ol' GICv2 */ + VGIC_V3, /* New fancy GICv3 */ +}; + +/* same for all guests, as depending only on the _host's_ GIC model */ +struct vgic_global { + /* type of the host GIC */ + enum vgic_type type; + + /* Physical address of vgic virtual cpu interface */ + phys_addr_t vcpu_base; + + /* GICV mapping, kernel VA */ + void __iomem *vcpu_base_va; + /* GICV mapping, HYP VA */ + void __iomem *vcpu_hyp_va; + + /* virtual control interface mapping, kernel VA */ + void __iomem *vctrl_base; + /* virtual control interface mapping, HYP VA */ + void __iomem *vctrl_hyp; + + /* Number of implemented list registers */ + int nr_lr; + + /* Maintenance IRQ number */ + unsigned int maint_irq; + + /* maximum number of VCPUs allowed (GICv2 limits us to 8) */ + int max_gic_vcpus; + + /* Only needed for the legacy KVM_CREATE_IRQCHIP */ + bool can_emulate_gicv2; + + /* Hardware has GICv4? */ + bool has_gicv4; + + /* GIC system register CPU interface */ + struct static_key_false gicv3_cpuif; + + u32 ich_vtr_el2; +}; + +extern struct vgic_global kvm_vgic_global_state; + +#define VGIC_V2_MAX_LRS (1 << 6) +#define VGIC_V3_MAX_LRS 16 +#define VGIC_V3_LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr) + +enum vgic_irq_config { + VGIC_CONFIG_EDGE = 0, + VGIC_CONFIG_LEVEL +}; + +struct vgic_irq { + spinlock_t irq_lock; /* Protects the content of the struct */ + struct list_head lpi_list; /* Used to link all LPIs together */ + struct list_head ap_list; + + struct kvm_vcpu *vcpu; /* SGIs and PPIs: The VCPU + * SPIs and LPIs: The VCPU whose ap_list + * this is queued on. + */ + + struct kvm_vcpu *target_vcpu; /* The VCPU that this interrupt should + * be sent to, as a result of the + * targets reg (v2) or the + * affinity reg (v3). + */ + + u32 intid; /* Guest visible INTID */ + bool line_level; /* Level only */ + bool pending_latch; /* The pending latch state used to calculate + * the pending state for both level + * and edge triggered IRQs. */ + bool active; /* not used for LPIs */ + bool enabled; + bool hw; /* Tied to HW IRQ */ + struct kref refcount; /* Used for LPIs */ + u32 hwintid; /* HW INTID number */ + unsigned int host_irq; /* linux irq corresponding to hwintid */ + union { + u8 targets; /* GICv2 target VCPUs mask */ + u32 mpidr; /* GICv3 target VCPU */ + }; + u8 source; /* GICv2 SGIs only */ + u8 active_source; /* GICv2 SGIs only */ + u8 priority; + u8 group; /* 0 == group 0, 1 == group 1 */ + enum vgic_irq_config config; /* Level or edge */ + + /* + * Callback function pointer to in-kernel devices that can tell us the + * state of the input level of mapped level-triggered IRQ faster than + * peaking into the physical GIC. + * + * Always called in non-preemptible section and the functions can use + * kvm_arm_get_running_vcpu() to get the vcpu pointer for private + * IRQs. + */ + bool (*get_input_level)(int vintid); + + void *owner; /* Opaque pointer to reserve an interrupt + for in-kernel devices. */ +}; + +struct vgic_register_region; +struct vgic_its; + +enum iodev_type { + IODEV_CPUIF, + IODEV_DIST, + IODEV_REDIST, + IODEV_ITS +}; + +struct vgic_io_device { + gpa_t base_addr; + union { + struct kvm_vcpu *redist_vcpu; + struct vgic_its *its; + }; + const struct vgic_register_region *regions; + enum iodev_type iodev_type; + int nr_regions; + struct kvm_io_device dev; +}; + +struct vgic_its { + /* The base address of the ITS control register frame */ + gpa_t vgic_its_base; + + bool enabled; + struct vgic_io_device iodev; + struct kvm_device *dev; + + /* These registers correspond to GITS_BASER{0,1} */ + u64 baser_device_table; + u64 baser_coll_table; + + /* Protects the command queue */ + struct mutex cmd_lock; + u64 cbaser; + u32 creadr; + u32 cwriter; + + /* migration ABI revision in use */ + u32 abi_rev; + + /* Protects the device and collection lists */ + struct mutex its_lock; + struct list_head device_list; + struct list_head collection_list; +}; + +struct vgic_state_iter; + +struct vgic_redist_region { + u32 index; + gpa_t base; + u32 count; /* number of redistributors or 0 if single region */ + u32 free_index; /* index of the next free redistributor */ + struct list_head list; +}; + +struct vgic_dist { + bool in_kernel; + bool ready; + bool initialized; + + /* vGIC model the kernel emulates for the guest (GICv2 or GICv3) */ + u32 vgic_model; + + /* Implementation revision as reported in the GICD_IIDR */ + u32 implementation_rev; + + /* Userspace can write to GICv2 IGROUPR */ + bool v2_groups_user_writable; + + /* Do injected MSIs require an additional device ID? */ + bool msis_require_devid; + + int nr_spis; + + /* base addresses in guest physical address space: */ + gpa_t vgic_dist_base; /* distributor */ + union { + /* either a GICv2 CPU interface */ + gpa_t vgic_cpu_base; + /* or a number of GICv3 redistributor regions */ + struct list_head rd_regions; + }; + + /* distributor enabled */ + bool enabled; + + struct vgic_irq *spis; + + struct vgic_io_device dist_iodev; + + bool has_its; + + /* + * Contains the attributes and gpa of the LPI configuration table. + * Since we report GICR_TYPER.CommonLPIAff as 0b00, we can share + * one address across all redistributors. + * GICv3 spec: 6.1.2 "LPI Configuration tables" + */ + u64 propbaser; + + /* Protects the lpi_list and the count value below. */ + raw_spinlock_t lpi_list_lock; + struct list_head lpi_list_head; + int lpi_list_count; + + /* used by vgic-debug */ + struct vgic_state_iter *iter; + + /* + * GICv4 ITS per-VM data, containing the IRQ domain, the VPE + * array, the property table pointer as well as allocation + * data. This essentially ties the Linux IRQ core and ITS + * together, and avoids leaking KVM's data structures anywhere + * else. + */ + struct its_vm its_vm; +}; + +struct vgic_v2_cpu_if { + u32 vgic_hcr; + u32 vgic_vmcr; + u32 vgic_apr; + u32 vgic_lr[VGIC_V2_MAX_LRS]; +}; + +struct vgic_v3_cpu_if { + u32 vgic_hcr; + u32 vgic_vmcr; + u32 vgic_sre; /* Restored only, change ignored */ + u32 vgic_ap0r[4]; + u32 vgic_ap1r[4]; + u64 vgic_lr[VGIC_V3_MAX_LRS]; + + /* + * GICv4 ITS per-VPE data, containing the doorbell IRQ, the + * pending table pointer, the its_vm pointer and a few other + * HW specific things. As for the its_vm structure, this is + * linking the Linux IRQ subsystem and the ITS together. + */ + struct its_vpe its_vpe; +}; + +struct vgic_cpu { + /* CPU vif control registers for world switch */ + union { + struct vgic_v2_cpu_if vgic_v2; + struct vgic_v3_cpu_if vgic_v3; + }; + + unsigned int used_lrs; + struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS]; + + spinlock_t ap_list_lock; /* Protects the ap_list */ + + /* + * List of IRQs that this VCPU should consider because they are either + * Active or Pending (hence the name; AP list), or because they recently + * were one of the two and need to be migrated off this list to another + * VCPU. + */ + struct list_head ap_list_head; + + /* + * Members below are used with GICv3 emulation only and represent + * parts of the redistributor. + */ + struct vgic_io_device rd_iodev; + struct vgic_io_device sgi_iodev; + struct vgic_redist_region *rdreg; + + /* Contains the attributes and gpa of the LPI pending tables. */ + u64 pendbaser; + + bool lpis_enabled; + + /* Cache guest priority bits */ + u32 num_pri_bits; + + /* Cache guest interrupt ID bits */ + u32 num_id_bits; +}; + +extern struct static_key_false vgic_v2_cpuif_trap; +extern struct static_key_false vgic_v3_cpuif_trap; + +int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write); +void kvm_vgic_early_init(struct kvm *kvm); +int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu); +int kvm_vgic_create(struct kvm *kvm, u32 type); +void kvm_vgic_destroy(struct kvm *kvm); +void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu); +int kvm_vgic_map_resources(struct kvm *kvm); +int kvm_vgic_hyp_init(void); +void kvm_vgic_init_cpu_hardware(void); + +int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, + bool level, void *owner); +int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq, + u32 vintid, bool (*get_input_level)(int vindid)); +int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid); +bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid); + +int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); + +void kvm_vgic_load(struct kvm_vcpu *vcpu); +void kvm_vgic_put(struct kvm_vcpu *vcpu); +void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu); + +#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) +#define vgic_initialized(k) ((k)->arch.vgic.initialized) +#define vgic_ready(k) ((k)->arch.vgic.ready) +#define vgic_valid_spi(k, i) (((i) >= VGIC_NR_PRIVATE_IRQS) && \ + ((i) < (k)->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) + +bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu); +void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu); +void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu); +void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid); + +void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1); + +/** + * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW + * + * The host's GIC naturally limits the maximum amount of VCPUs a guest + * can use. + */ +static inline int kvm_vgic_get_max_vcpus(void) +{ + return kvm_vgic_global_state.max_gic_vcpus; +} + +int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); + +/** + * kvm_vgic_setup_default_irq_routing: + * Setup a default flat gsi routing table mapping all SPIs + */ +int kvm_vgic_setup_default_irq_routing(struct kvm *kvm); + +int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner); + +struct kvm_kernel_irq_routing_entry; + +int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int irq, + struct kvm_kernel_irq_routing_entry *irq_entry); + +int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq, + struct kvm_kernel_irq_routing_entry *irq_entry); + +void kvm_vgic_v4_enable_doorbell(struct kvm_vcpu *vcpu); +void kvm_vgic_v4_disable_doorbell(struct kvm_vcpu *vcpu); + +#endif /* __KVM_ARM_VGIC_H */ diff --git a/include/kvm/iodev.h b/include/kvm/iodev.h new file mode 100644 index 000000000..a6d208b91 --- /dev/null +++ b/include/kvm/iodev.h @@ -0,0 +1,76 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __KVM_IODEV_H__ +#define __KVM_IODEV_H__ + +#include +#include + +struct kvm_io_device; +struct kvm_vcpu; + +/** + * kvm_io_device_ops are called under kvm slots_lock. + * read and write handlers return 0 if the transaction has been handled, + * or non-zero to have it passed to the next device. + **/ +struct kvm_io_device_ops { + int (*read)(struct kvm_vcpu *vcpu, + struct kvm_io_device *this, + gpa_t addr, + int len, + void *val); + int (*write)(struct kvm_vcpu *vcpu, + struct kvm_io_device *this, + gpa_t addr, + int len, + const void *val); + void (*destructor)(struct kvm_io_device *this); +}; + + +struct kvm_io_device { + const struct kvm_io_device_ops *ops; +}; + +static inline void kvm_iodevice_init(struct kvm_io_device *dev, + const struct kvm_io_device_ops *ops) +{ + dev->ops = ops; +} + +static inline int kvm_iodevice_read(struct kvm_vcpu *vcpu, + struct kvm_io_device *dev, gpa_t addr, + int l, void *v) +{ + return dev->ops->read ? dev->ops->read(vcpu, dev, addr, l, v) + : -EOPNOTSUPP; +} + +static inline int kvm_iodevice_write(struct kvm_vcpu *vcpu, + struct kvm_io_device *dev, gpa_t addr, + int l, const void *v) +{ + return dev->ops->write ? dev->ops->write(vcpu, dev, addr, l, v) + : -EOPNOTSUPP; +} + +static inline void kvm_iodevice_destructor(struct kvm_io_device *dev) +{ + if (dev->ops->destructor) + dev->ops->destructor(dev); +} + +#endif /* __KVM_IODEV_H__ */ diff --git a/include/linux/8250_pci.h b/include/linux/8250_pci.h new file mode 100644 index 000000000..9c777d2c9 --- /dev/null +++ b/include/linux/8250_pci.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Definitions for PCI support. + */ +#define FL_BASE_MASK 0x0007 +#define FL_BASE0 0x0000 +#define FL_BASE1 0x0001 +#define FL_BASE2 0x0002 +#define FL_BASE3 0x0003 +#define FL_BASE4 0x0004 +#define FL_GET_BASE(x) (x & FL_BASE_MASK) + +/* Use successive BARs (PCI base address registers), + else use offset into some specified BAR */ +#define FL_BASE_BARS 0x0008 + +/* do not assign an irq */ +#define FL_NOIRQ 0x0080 + +/* Use the Base address register size to cap number of ports */ +#define FL_REGION_SZ_CAP 0x0100 + +struct pciserial_board { + unsigned int flags; + unsigned int num_ports; + unsigned int base_baud; + unsigned int uart_offset; + unsigned int reg_shift; + unsigned int first_offset; +}; + +struct serial_private; + +struct serial_private * +pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board); +void pciserial_remove_ports(struct serial_private *priv); +void pciserial_suspend_ports(struct serial_private *priv); +void pciserial_resume_ports(struct serial_private *priv); diff --git a/include/linux/a.out.h b/include/linux/a.out.h new file mode 100644 index 000000000..600cf4564 --- /dev/null +++ b/include/linux/a.out.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __A_OUT_GNU_H__ +#define __A_OUT_GNU_H__ + +#include + +#ifndef __ASSEMBLY__ +#ifdef linux +#include +#if defined(__i386__) || defined(__mc68000__) +#else +#ifndef SEGMENT_SIZE +#define SEGMENT_SIZE PAGE_SIZE +#endif +#endif +#endif +#endif /*__ASSEMBLY__ */ +#endif /* __A_OUT_GNU_H__ */ diff --git a/include/linux/acct.h b/include/linux/acct.h new file mode 100644 index 000000000..bc70e8189 --- /dev/null +++ b/include/linux/acct.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * BSD Process Accounting for Linux - Definitions + * + * Author: Marco van Wieringen (mvw@planets.elm.net) + * + * This header file contains the definitions needed to implement + * BSD-style process accounting. The kernel accounting code and all + * user-level programs that try to do something useful with the + * process accounting log must include this file. + * + * Copyright (C) 1995 - 1997 Marco van Wieringen - ELM Consultancy B.V. + * + */ +#ifndef _LINUX_ACCT_H +#define _LINUX_ACCT_H + +#include + + + +#ifdef CONFIG_BSD_PROCESS_ACCT +struct pid_namespace; +extern int acct_parm[]; /* for sysctl */ +extern void acct_collect(long exitcode, int group_dead); +extern void acct_process(void); +extern void acct_exit_ns(struct pid_namespace *); +#else +#define acct_collect(x,y) do { } while (0) +#define acct_process() do { } while (0) +#define acct_exit_ns(ns) do { } while (0) +#endif + +/* + * ACCT_VERSION numbers as yet defined: + * 0: old format (until 2.6.7) with 16 bit uid/gid + * 1: extended variant (binary compatible on M68K) + * 2: extended variant (binary compatible on everything except M68K) + * 3: new binary incompatible format (64 bytes) + * 4: new binary incompatible format (128 bytes) + * 5: new binary incompatible format (128 bytes, second half) + * + */ + +#undef ACCT_VERSION +#undef AHZ + +#ifdef CONFIG_BSD_PROCESS_ACCT_V3 +#define ACCT_VERSION 3 +#define AHZ 100 +typedef struct acct_v3 acct_t; +#else +#ifdef CONFIG_M68K +#define ACCT_VERSION 1 +#else +#define ACCT_VERSION 2 +#endif +#define AHZ (USER_HZ) +typedef struct acct acct_t; +#endif + +#include +/* + * Yet another set of HZ to *HZ helper functions. + * See for the original. + */ + +static inline u32 jiffies_to_AHZ(unsigned long x) +{ +#if (TICK_NSEC % (NSEC_PER_SEC / AHZ)) == 0 +# if HZ < AHZ + return x * (AHZ / HZ); +# else + return x / (HZ / AHZ); +# endif +#else + u64 tmp = (u64)x * TICK_NSEC; + do_div(tmp, (NSEC_PER_SEC / AHZ)); + return (long)tmp; +#endif +} + +static inline u64 nsec_to_AHZ(u64 x) +{ +#if (NSEC_PER_SEC % AHZ) == 0 + do_div(x, (NSEC_PER_SEC / AHZ)); +#elif (AHZ % 512) == 0 + x *= AHZ/512; + do_div(x, (NSEC_PER_SEC / 512)); +#else + /* + * max relative error 5.7e-8 (1.8s per year) for AHZ <= 1024, + * overflow after 64.99 years. + * exact for AHZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ... + */ + x *= 9; + do_div(x, (unsigned long)((9ull * NSEC_PER_SEC + (AHZ/2)) + / AHZ)); +#endif + return x; +} + +#endif /* _LINUX_ACCT_H */ diff --git a/include/linux/acpi.h b/include/linux/acpi.h new file mode 100644 index 000000000..1a3774876 --- /dev/null +++ b/include/linux/acpi.h @@ -0,0 +1,1323 @@ +/* + * acpi.h - ACPI Interface + * + * Copyright (C) 2001 Paul Diefenbaugh + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#ifndef _LINUX_ACPI_H +#define _LINUX_ACPI_H + +#include +#include /* for struct resource */ +#include +#include +#include +#include + +#ifndef _LINUX +#define _LINUX +#endif +#include + +#ifdef CONFIG_ACPI + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +static inline acpi_handle acpi_device_handle(struct acpi_device *adev) +{ + return adev ? adev->handle : NULL; +} + +#define ACPI_COMPANION(dev) to_acpi_device_node((dev)->fwnode) +#define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \ + acpi_fwnode_handle(adev) : NULL) +#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) +#define ACPI_HANDLE_FWNODE(fwnode) \ + acpi_device_handle(to_acpi_device_node(fwnode)) + +static inline struct fwnode_handle *acpi_alloc_fwnode_static(void) +{ + struct fwnode_handle *fwnode; + + fwnode = kzalloc(sizeof(struct fwnode_handle), GFP_KERNEL); + if (!fwnode) + return NULL; + + fwnode->ops = &acpi_static_fwnode_ops; + + return fwnode; +} + +static inline void acpi_free_fwnode_static(struct fwnode_handle *fwnode) +{ + if (WARN_ON(!is_acpi_static_node(fwnode))) + return; + + kfree(fwnode); +} + +/** + * ACPI_DEVICE_CLASS - macro used to describe an ACPI device with + * the PCI-defined class-code information + * + * @_cls : the class, subclass, prog-if triple for this device + * @_msk : the class mask for this device + * + * This macro is used to create a struct acpi_device_id that matches a + * specific PCI class. The .id and .driver_data fields will be left + * initialized with the default value. + */ +#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (_cls), .cls_msk = (_msk), + +static inline bool has_acpi_companion(struct device *dev) +{ + return is_acpi_device_node(dev->fwnode); +} + +static inline void acpi_preset_companion(struct device *dev, + struct acpi_device *parent, u64 addr) +{ + ACPI_COMPANION_SET(dev, acpi_find_child_device(parent, addr, false)); +} + +static inline const char *acpi_dev_name(struct acpi_device *adev) +{ + return dev_name(&adev->dev); +} + +struct device *acpi_get_first_physical_node(struct acpi_device *adev); + +enum acpi_irq_model_id { + ACPI_IRQ_MODEL_PIC = 0, + ACPI_IRQ_MODEL_IOAPIC, + ACPI_IRQ_MODEL_IOSAPIC, + ACPI_IRQ_MODEL_PLATFORM, + ACPI_IRQ_MODEL_GIC, + ACPI_IRQ_MODEL_COUNT +}; + +extern enum acpi_irq_model_id acpi_irq_model; + +enum acpi_interrupt_id { + ACPI_INTERRUPT_PMI = 1, + ACPI_INTERRUPT_INIT, + ACPI_INTERRUPT_CPEI, + ACPI_INTERRUPT_COUNT +}; + +#define ACPI_SPACE_MEM 0 + +enum acpi_address_range_id { + ACPI_ADDRESS_RANGE_MEMORY = 1, + ACPI_ADDRESS_RANGE_RESERVED = 2, + ACPI_ADDRESS_RANGE_ACPI = 3, + ACPI_ADDRESS_RANGE_NVS = 4, + ACPI_ADDRESS_RANGE_COUNT +}; + + +/* Table Handlers */ + +typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table); + +typedef int (*acpi_tbl_entry_handler)(struct acpi_subtable_header *header, + const unsigned long end); + +/* Debugger support */ + +struct acpi_debugger_ops { + int (*create_thread)(acpi_osd_exec_callback function, void *context); + ssize_t (*write_log)(const char *msg); + ssize_t (*read_cmd)(char *buffer, size_t length); + int (*wait_command_ready)(bool single_step, char *buffer, size_t length); + int (*notify_command_complete)(void); +}; + +struct acpi_debugger { + const struct acpi_debugger_ops *ops; + struct module *owner; + struct mutex lock; +}; + +#ifdef CONFIG_ACPI_DEBUGGER +int __init acpi_debugger_init(void); +int acpi_register_debugger(struct module *owner, + const struct acpi_debugger_ops *ops); +void acpi_unregister_debugger(const struct acpi_debugger_ops *ops); +int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context); +ssize_t acpi_debugger_write_log(const char *msg); +ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length); +int acpi_debugger_wait_command_ready(void); +int acpi_debugger_notify_command_complete(void); +#else +static inline int acpi_debugger_init(void) +{ + return -ENODEV; +} + +static inline int acpi_register_debugger(struct module *owner, + const struct acpi_debugger_ops *ops) +{ + return -ENODEV; +} + +static inline void acpi_unregister_debugger(const struct acpi_debugger_ops *ops) +{ +} + +static inline int acpi_debugger_create_thread(acpi_osd_exec_callback function, + void *context) +{ + return -ENODEV; +} + +static inline int acpi_debugger_write_log(const char *msg) +{ + return -ENODEV; +} + +static inline int acpi_debugger_read_cmd(char *buffer, u32 buffer_length) +{ + return -ENODEV; +} + +static inline int acpi_debugger_wait_command_ready(void) +{ + return -ENODEV; +} + +static inline int acpi_debugger_notify_command_complete(void) +{ + return -ENODEV; +} +#endif + +#define BAD_MADT_ENTRY(entry, end) ( \ + (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ + ((struct acpi_subtable_header *)entry)->length < sizeof(*entry)) + +struct acpi_subtable_proc { + int id; + acpi_tbl_entry_handler handler; + int count; +}; + +void __iomem *__acpi_map_table(unsigned long phys, unsigned long size); +void __acpi_unmap_table(void __iomem *map, unsigned long size); +int early_acpi_boot_init(void); +int acpi_boot_init (void); +void acpi_boot_table_prepare (void); +void acpi_boot_table_init (void); +int acpi_mps_check (void); +int acpi_numa_init (void); + +int acpi_locate_initial_tables (void); +void acpi_reserve_initial_tables (void); +void acpi_table_init_complete (void); +int acpi_table_init (void); +int acpi_table_parse(char *id, acpi_tbl_table_handler handler); +int __init acpi_table_parse_entries(char *id, unsigned long table_size, + int entry_id, + acpi_tbl_entry_handler handler, + unsigned int max_entries); +int __init acpi_table_parse_entries_array(char *id, unsigned long table_size, + struct acpi_subtable_proc *proc, int proc_num, + unsigned int max_entries); +int acpi_table_parse_madt(enum acpi_madt_type id, + acpi_tbl_entry_handler handler, + unsigned int max_entries); +int acpi_parse_mcfg (struct acpi_table_header *header); +void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); + +/* the following numa functions are architecture-dependent */ +void acpi_numa_slit_init (struct acpi_table_slit *slit); + +#if defined(CONFIG_X86) || defined(CONFIG_IA64) +void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); +#else +static inline void +acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) { } +#endif + +void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa); + +#ifdef CONFIG_ARM64 +void acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa); +#else +static inline void +acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) { } +#endif + +int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); + +#ifndef PHYS_CPUID_INVALID +typedef u32 phys_cpuid_t; +#define PHYS_CPUID_INVALID (phys_cpuid_t)(-1) +#endif + +static inline bool invalid_logical_cpuid(u32 cpuid) +{ + return (int)cpuid < 0; +} + +static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id) +{ + return phys_id == PHYS_CPUID_INVALID; +} + +/* Validate the processor object's proc_id */ +bool acpi_duplicate_processor_id(int proc_id); + +#ifdef CONFIG_ACPI_HOTPLUG_CPU +/* Arch dependent functions for cpu hotplug support */ +int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, + int *pcpu); +int acpi_unmap_cpu(int cpu); +#endif /* CONFIG_ACPI_HOTPLUG_CPU */ + +#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC +int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr); +#endif + +int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base); +int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base); +int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base); +void acpi_irq_stats_init(void); +extern u32 acpi_irq_handled; +extern u32 acpi_irq_not_handled; +extern unsigned int acpi_sci_irq; +extern bool acpi_no_s5; +#define INVALID_ACPI_IRQ ((unsigned)-1) +static inline bool acpi_sci_irq_valid(void) +{ + return acpi_sci_irq != INVALID_ACPI_IRQ; +} + +extern int sbf_port; +extern unsigned long acpi_realmode_flags; + +int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity); +int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); +int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi); + +void acpi_set_irq_model(enum acpi_irq_model_id model, + struct fwnode_handle *fwnode); + +#ifdef CONFIG_X86_IO_APIC +extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity); +#else +static inline int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity) +{ + return -1; +} +#endif +/* + * This function undoes the effect of one call to acpi_register_gsi(). + * If this matches the last registration, any IRQ resources for gsi + * are freed. + */ +void acpi_unregister_gsi (u32 gsi); + +struct pci_dev; + +int acpi_pci_irq_enable (struct pci_dev *dev); +void acpi_penalize_isa_irq(int irq, int active); +bool acpi_isa_irq_available(int irq); +void acpi_penalize_sci_irq(int irq, int trigger, int polarity); +void acpi_pci_irq_disable (struct pci_dev *dev); + +extern int ec_read(u8 addr, u8 *val); +extern int ec_write(u8 addr, u8 val); +extern int ec_transaction(u8 command, + const u8 *wdata, unsigned wdata_len, + u8 *rdata, unsigned rdata_len); +extern acpi_handle ec_get_handle(void); + +extern bool acpi_is_pnp_device(struct acpi_device *); + +#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE) + +typedef void (*wmi_notify_handler) (u32 value, void *context); + +extern acpi_status wmi_evaluate_method(const char *guid, u8 instance, + u32 method_id, + const struct acpi_buffer *in, + struct acpi_buffer *out); +extern acpi_status wmi_query_block(const char *guid, u8 instance, + struct acpi_buffer *out); +extern acpi_status wmi_set_block(const char *guid, u8 instance, + const struct acpi_buffer *in); +extern acpi_status wmi_install_notify_handler(const char *guid, + wmi_notify_handler handler, void *data); +extern acpi_status wmi_remove_notify_handler(const char *guid); +extern acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out); +extern bool wmi_has_guid(const char *guid); + +#endif /* CONFIG_ACPI_WMI */ + +#define ACPI_VIDEO_OUTPUT_SWITCHING 0x0001 +#define ACPI_VIDEO_DEVICE_POSTING 0x0002 +#define ACPI_VIDEO_ROM_AVAILABLE 0x0004 +#define ACPI_VIDEO_BACKLIGHT 0x0008 +#define ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR 0x0010 +#define ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO 0x0020 +#define ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR 0x0040 +#define ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO 0x0080 +#define ACPI_VIDEO_BACKLIGHT_DMI_VENDOR 0x0100 +#define ACPI_VIDEO_BACKLIGHT_DMI_VIDEO 0x0200 +#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR 0x0400 +#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO 0x0800 + +extern char acpi_video_backlight_string[]; +extern long acpi_is_video_device(acpi_handle handle); +extern int acpi_blacklisted(void); +extern void acpi_osi_setup(char *str); +extern bool acpi_osi_is_win8(void); + +#ifdef CONFIG_ACPI_NUMA +int acpi_map_pxm_to_online_node(int pxm); +int acpi_get_node(acpi_handle handle); +#else +static inline int acpi_map_pxm_to_online_node(int pxm) +{ + return 0; +} +static inline int acpi_get_node(acpi_handle handle) +{ + return 0; +} +#endif +extern int acpi_paddr_to_node(u64 start_addr, u64 size); + +extern int pnpacpi_disabled; + +#define PXM_INVAL (-1) + +bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res); +bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res); +bool acpi_dev_resource_address_space(struct acpi_resource *ares, + struct resource_win *win); +bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares, + struct resource_win *win); +unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable); +unsigned int acpi_dev_get_irq_type(int triggering, int polarity); +bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, + struct resource *res); + +void acpi_dev_free_resource_list(struct list_head *list); +int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list, + int (*preproc)(struct acpi_resource *, void *), + void *preproc_data); +int acpi_dev_get_dma_resources(struct acpi_device *adev, + struct list_head *list); +int acpi_dev_filter_resource_type(struct acpi_resource *ares, + unsigned long types); + +static inline int acpi_dev_filter_resource_type_cb(struct acpi_resource *ares, + void *arg) +{ + return acpi_dev_filter_resource_type(ares, (unsigned long)arg); +} + +struct acpi_device *acpi_resource_consumer(struct resource *res); + +int acpi_check_resource_conflict(const struct resource *res); + +int acpi_check_region(resource_size_t start, resource_size_t n, + const char *name); + +acpi_status acpi_release_memory(acpi_handle handle, struct resource *res, + u32 level); + +int acpi_resources_are_enforced(void); + +#ifdef CONFIG_HIBERNATION +void __init acpi_no_s4_hw_signature(void); +#endif + +#ifdef CONFIG_PM_SLEEP +void __init acpi_old_suspend_ordering(void); +void __init acpi_nvs_nosave(void); +void __init acpi_nvs_nosave_s3(void); +void __init acpi_sleep_no_blacklist(void); +#endif /* CONFIG_PM_SLEEP */ + +struct acpi_osc_context { + char *uuid_str; /* UUID string */ + int rev; + struct acpi_buffer cap; /* list of DWORD capabilities */ + struct acpi_buffer ret; /* free by caller if success */ +}; + +acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); + +/* Indexes into _OSC Capabilities Buffer (DWORDs 2 & 3 are device-specific) */ +#define OSC_QUERY_DWORD 0 /* DWORD 1 */ +#define OSC_SUPPORT_DWORD 1 /* DWORD 2 */ +#define OSC_CONTROL_DWORD 2 /* DWORD 3 */ + +/* _OSC Capabilities DWORD 1: Query/Control and Error Returns (generic) */ +#define OSC_QUERY_ENABLE 0x00000001 /* input */ +#define OSC_REQUEST_ERROR 0x00000002 /* return */ +#define OSC_INVALID_UUID_ERROR 0x00000004 /* return */ +#define OSC_INVALID_REVISION_ERROR 0x00000008 /* return */ +#define OSC_CAPABILITIES_MASK_ERROR 0x00000010 /* return */ + +/* Platform-Wide Capabilities _OSC: Capabilities DWORD 2: Support Field */ +#define OSC_SB_PAD_SUPPORT 0x00000001 +#define OSC_SB_PPC_OST_SUPPORT 0x00000002 +#define OSC_SB_PR3_SUPPORT 0x00000004 +#define OSC_SB_HOTPLUG_OST_SUPPORT 0x00000008 +#define OSC_SB_APEI_SUPPORT 0x00000010 +#define OSC_SB_CPC_SUPPORT 0x00000020 +#define OSC_SB_CPCV2_SUPPORT 0x00000040 +#define OSC_SB_PCLPI_SUPPORT 0x00000080 +#define OSC_SB_OSLPI_SUPPORT 0x00000100 +#define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000 + +extern bool osc_sb_apei_support_acked; +extern bool osc_pc_lpi_support_confirmed; + +/* PCI Host Bridge _OSC: Capabilities DWORD 2: Support Field */ +#define OSC_PCI_EXT_CONFIG_SUPPORT 0x00000001 +#define OSC_PCI_ASPM_SUPPORT 0x00000002 +#define OSC_PCI_CLOCK_PM_SUPPORT 0x00000004 +#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 0x00000008 +#define OSC_PCI_MSI_SUPPORT 0x00000010 +#define OSC_PCI_SUPPORT_MASKS 0x0000001f + +/* PCI Host Bridge _OSC: Capabilities DWORD 3: Control Field */ +#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 0x00000001 +#define OSC_PCI_SHPC_NATIVE_HP_CONTROL 0x00000002 +#define OSC_PCI_EXPRESS_PME_CONTROL 0x00000004 +#define OSC_PCI_EXPRESS_AER_CONTROL 0x00000008 +#define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010 +#define OSC_PCI_EXPRESS_LTR_CONTROL 0x00000020 +#define OSC_PCI_CONTROL_MASKS 0x0000003f + +#define ACPI_GSB_ACCESS_ATTRIB_QUICK 0x00000002 +#define ACPI_GSB_ACCESS_ATTRIB_SEND_RCV 0x00000004 +#define ACPI_GSB_ACCESS_ATTRIB_BYTE 0x00000006 +#define ACPI_GSB_ACCESS_ATTRIB_WORD 0x00000008 +#define ACPI_GSB_ACCESS_ATTRIB_BLOCK 0x0000000A +#define ACPI_GSB_ACCESS_ATTRIB_MULTIBYTE 0x0000000B +#define ACPI_GSB_ACCESS_ATTRIB_WORD_CALL 0x0000000C +#define ACPI_GSB_ACCESS_ATTRIB_BLOCK_CALL 0x0000000D +#define ACPI_GSB_ACCESS_ATTRIB_RAW_BYTES 0x0000000E +#define ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS 0x0000000F + +extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, + u32 *mask, u32 req); + +/* Enable _OST when all relevant hotplug operations are enabled */ +#if defined(CONFIG_ACPI_HOTPLUG_CPU) && \ + defined(CONFIG_ACPI_HOTPLUG_MEMORY) && \ + defined(CONFIG_ACPI_CONTAINER) +#define ACPI_HOTPLUG_OST +#endif + +/* _OST Source Event Code (OSPM Action) */ +#define ACPI_OST_EC_OSPM_SHUTDOWN 0x100 +#define ACPI_OST_EC_OSPM_EJECT 0x103 +#define ACPI_OST_EC_OSPM_INSERTION 0x200 + +/* _OST General Processing Status Code */ +#define ACPI_OST_SC_SUCCESS 0x0 +#define ACPI_OST_SC_NON_SPECIFIC_FAILURE 0x1 +#define ACPI_OST_SC_UNRECOGNIZED_NOTIFY 0x2 + +/* _OST OS Shutdown Processing (0x100) Status Code */ +#define ACPI_OST_SC_OS_SHUTDOWN_DENIED 0x80 +#define ACPI_OST_SC_OS_SHUTDOWN_IN_PROGRESS 0x81 +#define ACPI_OST_SC_OS_SHUTDOWN_COMPLETED 0x82 +#define ACPI_OST_SC_OS_SHUTDOWN_NOT_SUPPORTED 0x83 + +/* _OST Ejection Request (0x3, 0x103) Status Code */ +#define ACPI_OST_SC_EJECT_NOT_SUPPORTED 0x80 +#define ACPI_OST_SC_DEVICE_IN_USE 0x81 +#define ACPI_OST_SC_DEVICE_BUSY 0x82 +#define ACPI_OST_SC_EJECT_DEPENDENCY_BUSY 0x83 +#define ACPI_OST_SC_EJECT_IN_PROGRESS 0x84 + +/* _OST Insertion Request (0x200) Status Code */ +#define ACPI_OST_SC_INSERT_IN_PROGRESS 0x80 +#define ACPI_OST_SC_DRIVER_LOAD_FAILURE 0x81 +#define ACPI_OST_SC_INSERT_NOT_SUPPORTED 0x82 + +enum acpi_predicate { + all_versions, + less_than_or_equal, + equal, + greater_than_or_equal, +}; + +/* Table must be terminted by a NULL entry */ +struct acpi_platform_list { + char oem_id[ACPI_OEM_ID_SIZE+1]; + char oem_table_id[ACPI_OEM_TABLE_ID_SIZE+1]; + u32 oem_revision; + char *table; + enum acpi_predicate pred; + char *reason; + u32 data; +}; +int acpi_match_platform_list(const struct acpi_platform_list *plat); + +extern void acpi_early_init(void); +extern void acpi_subsystem_init(void); +extern void arch_post_acpi_subsys_init(void); + +extern int acpi_nvs_register(__u64 start, __u64 size); + +extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), + void *data); + +const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, + const struct device *dev); + +const void *acpi_device_get_match_data(const struct device *dev); +extern bool acpi_driver_match_device(struct device *dev, + const struct device_driver *drv); +int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); +int acpi_device_modalias(struct device *, char *, int); +void acpi_walk_dep_device_list(acpi_handle handle); + +struct platform_device *acpi_create_platform_device(struct acpi_device *, + struct property_entry *); +#define ACPI_PTR(_ptr) (_ptr) + +static inline void acpi_device_set_enumerated(struct acpi_device *adev) +{ + adev->flags.visited = true; +} + +static inline void acpi_device_clear_enumerated(struct acpi_device *adev) +{ + adev->flags.visited = false; +} + +enum acpi_reconfig_event { + ACPI_RECONFIG_DEVICE_ADD = 0, + ACPI_RECONFIG_DEVICE_REMOVE, +}; + +int acpi_reconfig_notifier_register(struct notifier_block *nb); +int acpi_reconfig_notifier_unregister(struct notifier_block *nb); + +#ifdef CONFIG_ACPI_GTDT +int acpi_gtdt_init(struct acpi_table_header *table, int *platform_timer_count); +int acpi_gtdt_map_ppi(int type); +bool acpi_gtdt_c3stop(int type); +int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count); +#endif + +#ifndef ACPI_HAVE_ARCH_GET_ROOT_POINTER +static inline u64 acpi_arch_get_root_pointer(void) +{ + return 0; +} +#endif + +#else /* !CONFIG_ACPI */ + +#define acpi_disabled 1 + +#define ACPI_COMPANION(dev) (NULL) +#define ACPI_COMPANION_SET(dev, adev) do { } while (0) +#define ACPI_HANDLE(dev) (NULL) +#define ACPI_HANDLE_FWNODE(fwnode) (NULL) +#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (0), .cls_msk = (0), + +struct fwnode_handle; + +static inline bool acpi_dev_found(const char *hid) +{ + return false; +} + +static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv) +{ + return false; +} + +static inline const char * +acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv) +{ + return NULL; +} + +static inline bool is_acpi_node(struct fwnode_handle *fwnode) +{ + return false; +} + +static inline bool is_acpi_device_node(struct fwnode_handle *fwnode) +{ + return false; +} + +static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode) +{ + return NULL; +} + +static inline bool is_acpi_data_node(struct fwnode_handle *fwnode) +{ + return false; +} + +static inline struct acpi_data_node *to_acpi_data_node(struct fwnode_handle *fwnode) +{ + return NULL; +} + +static inline bool acpi_data_node_match(struct fwnode_handle *fwnode, + const char *name) +{ + return false; +} + +static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev) +{ + return NULL; +} + +static inline bool has_acpi_companion(struct device *dev) +{ + return false; +} + +static inline void acpi_preset_companion(struct device *dev, + struct acpi_device *parent, u64 addr) +{ +} + +static inline const char *acpi_dev_name(struct acpi_device *adev) +{ + return NULL; +} + +static inline struct device *acpi_get_first_physical_node(struct acpi_device *adev) +{ + return NULL; +} + +static inline void acpi_early_init(void) { } +static inline void acpi_subsystem_init(void) { } + +static inline int early_acpi_boot_init(void) +{ + return 0; +} +static inline int acpi_boot_init(void) +{ + return 0; +} + +static inline void acpi_boot_table_prepare(void) +{ +} + +static inline void acpi_boot_table_init(void) +{ +} + +static inline int acpi_mps_check(void) +{ + return 0; +} + +static inline int acpi_check_resource_conflict(struct resource *res) +{ + return 0; +} + +static inline int acpi_check_region(resource_size_t start, resource_size_t n, + const char *name) +{ + return 0; +} + +struct acpi_table_header; +static inline int acpi_table_parse(char *id, + int (*handler)(struct acpi_table_header *)) +{ + return -ENODEV; +} + +static inline int acpi_nvs_register(__u64 start, __u64 size) +{ + return 0; +} + +static inline int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), + void *data) +{ + return 0; +} + +struct acpi_device_id; + +static inline const struct acpi_device_id *acpi_match_device( + const struct acpi_device_id *ids, const struct device *dev) +{ + return NULL; +} + +static inline const void *acpi_device_get_match_data(const struct device *dev) +{ + return NULL; +} + +static inline bool acpi_driver_match_device(struct device *dev, + const struct device_driver *drv) +{ + return false; +} + +static inline union acpi_object *acpi_evaluate_dsm(acpi_handle handle, + const guid_t *guid, + int rev, int func, + union acpi_object *argv4) +{ + return NULL; +} + +static inline int acpi_device_uevent_modalias(struct device *dev, + struct kobj_uevent_env *env) +{ + return -ENODEV; +} + +static inline int acpi_device_modalias(struct device *dev, + char *buf, int size) +{ + return -ENODEV; +} + +static inline struct platform_device * +acpi_create_platform_device(struct acpi_device *adev, + struct property_entry *properties) +{ + return NULL; +} + +static inline bool acpi_dma_supported(struct acpi_device *adev) +{ + return false; +} + +static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev) +{ + return DEV_DMA_NOT_SUPPORTED; +} + +static inline int acpi_dma_get_range(struct device *dev, u64 *dma_addr, + u64 *offset, u64 *size) +{ + return -ENODEV; +} + +static inline int acpi_dma_configure(struct device *dev, + enum dev_dma_attr attr) +{ + return 0; +} + +static inline void acpi_dma_deconfigure(struct device *dev) { } + +#define ACPI_PTR(_ptr) (NULL) + +static inline void acpi_device_set_enumerated(struct acpi_device *adev) +{ +} + +static inline void acpi_device_clear_enumerated(struct acpi_device *adev) +{ +} + +static inline int acpi_reconfig_notifier_register(struct notifier_block *nb) +{ + return -EINVAL; +} + +static inline int acpi_reconfig_notifier_unregister(struct notifier_block *nb) +{ + return -EINVAL; +} + +static inline struct acpi_device *acpi_resource_consumer(struct resource *res) +{ + return NULL; +} + +#endif /* !CONFIG_ACPI */ + +#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC +int acpi_ioapic_add(acpi_handle root); +#else +static inline int acpi_ioapic_add(acpi_handle root) { return 0; } +#endif + +#ifdef CONFIG_ACPI +void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state, + u32 pm1a_ctrl, u32 pm1b_ctrl)); + +acpi_status acpi_os_prepare_sleep(u8 sleep_state, + u32 pm1a_control, u32 pm1b_control); + +void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, + u32 val_a, u32 val_b)); + +acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, + u32 val_a, u32 val_b); + +#ifdef CONFIG_X86 +void arch_reserve_mem_area(acpi_physical_address addr, size_t size); +#else +static inline void arch_reserve_mem_area(acpi_physical_address addr, + size_t size) +{ +} +#endif /* CONFIG_X86 */ +#else +#define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0) +#endif + +#if defined(CONFIG_ACPI) && defined(CONFIG_PM) +int acpi_dev_suspend(struct device *dev, bool wakeup); +int acpi_dev_resume(struct device *dev); +int acpi_subsys_runtime_suspend(struct device *dev); +int acpi_subsys_runtime_resume(struct device *dev); +int acpi_dev_pm_attach(struct device *dev, bool power_on); +#else +static inline int acpi_dev_runtime_suspend(struct device *dev) { return 0; } +static inline int acpi_dev_runtime_resume(struct device *dev) { return 0; } +static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; } +static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; } +static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) +{ + return 0; +} +#endif + +#if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP) +int acpi_dev_suspend_late(struct device *dev); +int acpi_subsys_prepare(struct device *dev); +void acpi_subsys_complete(struct device *dev); +int acpi_subsys_suspend_late(struct device *dev); +int acpi_subsys_suspend_noirq(struct device *dev); +int acpi_subsys_suspend(struct device *dev); +int acpi_subsys_freeze(struct device *dev); +int acpi_subsys_poweroff(struct device *dev); +#else +static inline int acpi_dev_resume_early(struct device *dev) { return 0; } +static inline int acpi_subsys_prepare(struct device *dev) { return 0; } +static inline void acpi_subsys_complete(struct device *dev) {} +static inline int acpi_subsys_suspend_late(struct device *dev) { return 0; } +static inline int acpi_subsys_suspend_noirq(struct device *dev) { return 0; } +static inline int acpi_subsys_suspend(struct device *dev) { return 0; } +static inline int acpi_subsys_freeze(struct device *dev) { return 0; } +static inline int acpi_subsys_poweroff(struct device *dev) { return 0; } +#endif + +#ifdef CONFIG_ACPI +__printf(3, 4) +void acpi_handle_printk(const char *level, acpi_handle handle, + const char *fmt, ...); +#else /* !CONFIG_ACPI */ +static inline __printf(3, 4) void +acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {} +#endif /* !CONFIG_ACPI */ + +#if defined(CONFIG_ACPI) && defined(CONFIG_DYNAMIC_DEBUG) +__printf(3, 4) +void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const char *fmt, ...); +#else +#define __acpi_handle_debug(descriptor, handle, fmt, ...) \ + acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__); +#endif + +/* + * acpi_handle_: Print message with ACPI prefix and object path + * + * These interfaces acquire the global namespace mutex to obtain an object + * path. In interrupt context, it shows the object path as . + */ +#define acpi_handle_emerg(handle, fmt, ...) \ + acpi_handle_printk(KERN_EMERG, handle, fmt, ##__VA_ARGS__) +#define acpi_handle_alert(handle, fmt, ...) \ + acpi_handle_printk(KERN_ALERT, handle, fmt, ##__VA_ARGS__) +#define acpi_handle_crit(handle, fmt, ...) \ + acpi_handle_printk(KERN_CRIT, handle, fmt, ##__VA_ARGS__) +#define acpi_handle_err(handle, fmt, ...) \ + acpi_handle_printk(KERN_ERR, handle, fmt, ##__VA_ARGS__) +#define acpi_handle_warn(handle, fmt, ...) \ + acpi_handle_printk(KERN_WARNING, handle, fmt, ##__VA_ARGS__) +#define acpi_handle_notice(handle, fmt, ...) \ + acpi_handle_printk(KERN_NOTICE, handle, fmt, ##__VA_ARGS__) +#define acpi_handle_info(handle, fmt, ...) \ + acpi_handle_printk(KERN_INFO, handle, fmt, ##__VA_ARGS__) + +#if defined(DEBUG) +#define acpi_handle_debug(handle, fmt, ...) \ + acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__) +#else +#if defined(CONFIG_DYNAMIC_DEBUG) +#define acpi_handle_debug(handle, fmt, ...) \ +do { \ + DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ + if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ + __acpi_handle_debug(&descriptor, handle, pr_fmt(fmt), \ + ##__VA_ARGS__); \ +} while (0) +#else +#define acpi_handle_debug(handle, fmt, ...) \ +({ \ + if (0) \ + acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__); \ + 0; \ +}) +#endif +#endif + +struct acpi_gpio_params { + unsigned int crs_entry_index; + unsigned int line_index; + bool active_low; +}; + +struct acpi_gpio_mapping { + const char *name; + const struct acpi_gpio_params *data; + unsigned int size; + +/* Ignore IoRestriction field */ +#define ACPI_GPIO_QUIRK_NO_IO_RESTRICTION BIT(0) + + unsigned int quirks; +}; + +#if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB) +int acpi_dev_add_driver_gpios(struct acpi_device *adev, + const struct acpi_gpio_mapping *gpios); + +static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) +{ + if (adev) + adev->driver_gpios = NULL; +} + +int devm_acpi_dev_add_driver_gpios(struct device *dev, + const struct acpi_gpio_mapping *gpios); +void devm_acpi_dev_remove_driver_gpios(struct device *dev); + +bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, + struct acpi_resource_gpio **agpio); +int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index); +#else +static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev, + const struct acpi_gpio_mapping *gpios) +{ + return -ENXIO; +} +static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {} + +static inline int devm_acpi_dev_add_driver_gpios(struct device *dev, + const struct acpi_gpio_mapping *gpios) +{ + return -ENXIO; +} +static inline void devm_acpi_dev_remove_driver_gpios(struct device *dev) {} + +static inline bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, + struct acpi_resource_gpio **agpio) +{ + return false; +} +static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index) +{ + return -ENXIO; +} +#endif + +/* Device properties */ + +#ifdef CONFIG_ACPI +int acpi_dev_get_property(const struct acpi_device *adev, const char *name, + acpi_object_type type, const union acpi_object **obj); +int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, + const char *name, size_t index, size_t num_args, + struct fwnode_reference_args *args); + +static inline int acpi_node_get_property_reference( + const struct fwnode_handle *fwnode, + const char *name, size_t index, + struct fwnode_reference_args *args) +{ + return __acpi_node_get_property_reference(fwnode, name, index, + NR_FWNODE_REFERENCE_ARGS, args); +} + +int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname, + void **valptr); +int acpi_dev_prop_read_single(struct acpi_device *adev, + const char *propname, enum dev_prop_type proptype, + void *val); +int acpi_node_prop_read(const struct fwnode_handle *fwnode, + const char *propname, enum dev_prop_type proptype, + void *val, size_t nval); +int acpi_dev_prop_read(const struct acpi_device *adev, const char *propname, + enum dev_prop_type proptype, void *val, size_t nval); + +struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode, + struct fwnode_handle *child); +struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode); + +struct acpi_probe_entry; +typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *, + struct acpi_probe_entry *); + +#define ACPI_TABLE_ID_LEN 5 + +/** + * struct acpi_probe_entry - boot-time probing entry + * @id: ACPI table name + * @type: Optional subtable type to match + * (if @id contains subtables) + * @subtable_valid: Optional callback to check the validity of + * the subtable + * @probe_table: Callback to the driver being probed when table + * match is successful + * @probe_subtbl: Callback to the driver being probed when table and + * subtable match (and optional callback is successful) + * @driver_data: Sideband data provided back to the driver + */ +struct acpi_probe_entry { + __u8 id[ACPI_TABLE_ID_LEN]; + __u8 type; + acpi_probe_entry_validate_subtbl subtable_valid; + union { + acpi_tbl_table_handler probe_table; + acpi_tbl_entry_handler probe_subtbl; + }; + kernel_ulong_t driver_data; +}; + +#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \ + static const struct acpi_probe_entry __acpi_probe_##name \ + __used __section(__##table##_acpi_probe_table) \ + = { \ + .id = table_id, \ + .type = subtable, \ + .subtable_valid = valid, \ + .probe_table = (acpi_tbl_table_handler)fn, \ + .driver_data = data, \ + } + +#define ACPI_PROBE_TABLE(name) __##name##_acpi_probe_table +#define ACPI_PROBE_TABLE_END(name) __##name##_acpi_probe_table_end + +int __acpi_probe_device_table(struct acpi_probe_entry *start, int nr); + +#define acpi_probe_device_table(t) \ + ({ \ + extern struct acpi_probe_entry ACPI_PROBE_TABLE(t), \ + ACPI_PROBE_TABLE_END(t); \ + __acpi_probe_device_table(&ACPI_PROBE_TABLE(t), \ + (&ACPI_PROBE_TABLE_END(t) - \ + &ACPI_PROBE_TABLE(t))); \ + }) +#else +static inline int acpi_dev_get_property(struct acpi_device *adev, + const char *name, acpi_object_type type, + const union acpi_object **obj) +{ + return -ENXIO; +} + +static inline int +__acpi_node_get_property_reference(const struct fwnode_handle *fwnode, + const char *name, size_t index, size_t num_args, + struct fwnode_reference_args *args) +{ + return -ENXIO; +} + +static inline int +acpi_node_get_property_reference(const struct fwnode_handle *fwnode, + const char *name, size_t index, + struct fwnode_reference_args *args) +{ + return -ENXIO; +} + +static inline int acpi_node_prop_get(const struct fwnode_handle *fwnode, + const char *propname, + void **valptr) +{ + return -ENXIO; +} + +static inline int acpi_dev_prop_get(const struct acpi_device *adev, + const char *propname, + void **valptr) +{ + return -ENXIO; +} + +static inline int acpi_dev_prop_read_single(const struct acpi_device *adev, + const char *propname, + enum dev_prop_type proptype, + void *val) +{ + return -ENXIO; +} + +static inline int acpi_node_prop_read(const struct fwnode_handle *fwnode, + const char *propname, + enum dev_prop_type proptype, + void *val, size_t nval) +{ + return -ENXIO; +} + +static inline int acpi_dev_prop_read(const struct acpi_device *adev, + const char *propname, + enum dev_prop_type proptype, + void *val, size_t nval) +{ + return -ENXIO; +} + +static inline struct fwnode_handle * +acpi_get_next_subnode(const struct fwnode_handle *fwnode, + struct fwnode_handle *child) +{ + return NULL; +} + +static inline struct fwnode_handle * +acpi_node_get_parent(const struct fwnode_handle *fwnode) +{ + return NULL; +} + +static inline struct fwnode_handle * +acpi_graph_get_next_endpoint(const struct fwnode_handle *fwnode, + struct fwnode_handle *prev) +{ + return ERR_PTR(-ENXIO); +} + +static inline int +acpi_graph_get_remote_endpoint(const struct fwnode_handle *fwnode, + struct fwnode_handle **remote, + struct fwnode_handle **port, + struct fwnode_handle **endpoint) +{ + return -ENXIO; +} + +#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \ + static const void * __acpi_table_##name[] \ + __attribute__((unused)) \ + = { (void *) table_id, \ + (void *) subtable, \ + (void *) valid, \ + (void *) fn, \ + (void *) data } + +#define acpi_probe_device_table(t) ({ int __r = 0; __r;}) +#endif + +#ifdef CONFIG_ACPI_TABLE_UPGRADE +void acpi_table_upgrade(void); +#else +static inline void acpi_table_upgrade(void) { } +#endif + +#if defined(CONFIG_ACPI) && defined(CONFIG_ACPI_WATCHDOG) +extern bool acpi_has_watchdog(void); +#else +static inline bool acpi_has_watchdog(void) { return false; } +#endif + +#ifdef CONFIG_ACPI_SPCR_TABLE +extern bool qdf2400_e44_present; +int acpi_parse_spcr(bool enable_earlycon, bool enable_console); +#else +static inline int acpi_parse_spcr(bool enable_earlycon, bool enable_console) +{ + return 0; +} +#endif + +#if IS_ENABLED(CONFIG_ACPI_GENERIC_GSI) +int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res); +#else +static inline +int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res) +{ + return -EINVAL; +} +#endif + +#ifdef CONFIG_ACPI_LPIT +int lpit_read_residency_count_address(u64 *address); +#else +static inline int lpit_read_residency_count_address(u64 *address) +{ + return -EINVAL; +} +#endif + +#ifdef CONFIG_ACPI_PPTT +int acpi_pptt_cpu_is_thread(unsigned int cpu); +int find_acpi_cpu_topology(unsigned int cpu, int level); +int find_acpi_cpu_topology_package(unsigned int cpu); +int find_acpi_cpu_cache_topology(unsigned int cpu, int level); +#else +static inline int acpi_pptt_cpu_is_thread(unsigned int cpu) +{ + return -EINVAL; +} +static inline int find_acpi_cpu_topology(unsigned int cpu, int level) +{ + return -EINVAL; +} +static inline int find_acpi_cpu_topology_package(unsigned int cpu) +{ + return -EINVAL; +} +static inline int find_acpi_cpu_cache_topology(unsigned int cpu, int level) +{ + return -EINVAL; +} +#endif + +#endif /*_LINUX_ACPI_H*/ diff --git a/include/linux/acpi_dma.h b/include/linux/acpi_dma.h new file mode 100644 index 000000000..329436d38 --- /dev/null +++ b/include/linux/acpi_dma.h @@ -0,0 +1,121 @@ +/* + * ACPI helpers for DMA request / controller + * + * Based on of_dma.h + * + * Copyright (C) 2013, Intel Corporation + * Author: Andy Shevchenko + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_ACPI_DMA_H +#define __LINUX_ACPI_DMA_H + +#include +#include +#include +#include + +/** + * struct acpi_dma_spec - slave device DMA resources + * @chan_id: channel unique id + * @slave_id: request line unique id + * @dev: struct device of the DMA controller to be used in the filter + * function + */ +struct acpi_dma_spec { + int chan_id; + int slave_id; + struct device *dev; +}; + +/** + * struct acpi_dma - representation of the registered DMAC + * @dma_controllers: linked list node + * @dev: struct device of this controller + * @acpi_dma_xlate: callback function to find a suitable channel + * @data: private data used by a callback function + * @base_request_line: first supported request line (CSRT) + * @end_request_line: last supported request line (CSRT) + */ +struct acpi_dma { + struct list_head dma_controllers; + struct device *dev; + struct dma_chan *(*acpi_dma_xlate) + (struct acpi_dma_spec *, struct acpi_dma *); + void *data; + unsigned short base_request_line; + unsigned short end_request_line; +}; + +/* Used with acpi_dma_simple_xlate() */ +struct acpi_dma_filter_info { + dma_cap_mask_t dma_cap; + dma_filter_fn filter_fn; +}; + +#ifdef CONFIG_DMA_ACPI + +int acpi_dma_controller_register(struct device *dev, + struct dma_chan *(*acpi_dma_xlate) + (struct acpi_dma_spec *, struct acpi_dma *), + void *data); +int acpi_dma_controller_free(struct device *dev); +int devm_acpi_dma_controller_register(struct device *dev, + struct dma_chan *(*acpi_dma_xlate) + (struct acpi_dma_spec *, struct acpi_dma *), + void *data); +void devm_acpi_dma_controller_free(struct device *dev); + +struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev, + size_t index); +struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev, + const char *name); + +struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec, + struct acpi_dma *adma); +#else + +static inline int acpi_dma_controller_register(struct device *dev, + struct dma_chan *(*acpi_dma_xlate) + (struct acpi_dma_spec *, struct acpi_dma *), + void *data) +{ + return -ENODEV; +} +static inline int acpi_dma_controller_free(struct device *dev) +{ + return -ENODEV; +} +static inline int devm_acpi_dma_controller_register(struct device *dev, + struct dma_chan *(*acpi_dma_xlate) + (struct acpi_dma_spec *, struct acpi_dma *), + void *data) +{ + return -ENODEV; +} +static inline void devm_acpi_dma_controller_free(struct device *dev) +{ +} + +static inline struct dma_chan *acpi_dma_request_slave_chan_by_index( + struct device *dev, size_t index) +{ + return ERR_PTR(-ENODEV); +} +static inline struct dma_chan *acpi_dma_request_slave_chan_by_name( + struct device *dev, const char *name) +{ + return ERR_PTR(-ENODEV); +} + +#define acpi_dma_simple_xlate NULL + +#endif + +#define acpi_dma_request_slave_channel acpi_dma_request_slave_chan_by_index + +#endif /* __LINUX_ACPI_DMA_H */ diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h new file mode 100644 index 000000000..38cd77b39 --- /dev/null +++ b/include/linux/acpi_iort.h @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2016, Semihalf + * Author: Tomasz Nowicki + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + */ + +#ifndef __ACPI_IORT_H__ +#define __ACPI_IORT_H__ + +#include +#include +#include + +#define IORT_IRQ_MASK(irq) (irq & 0xffffffffULL) +#define IORT_IRQ_TRIGGER_MASK(irq) ((irq >> 32) & 0xffffffffULL) + +int iort_register_domain_token(int trans_id, phys_addr_t base, + struct fwnode_handle *fw_node); +void iort_deregister_domain_token(int trans_id); +struct fwnode_handle *iort_find_domain_token(int trans_id); +#ifdef CONFIG_ACPI_IORT +void acpi_iort_init(void); +u32 iort_msi_map_rid(struct device *dev, u32 req_id); +struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id); +void acpi_configure_pmsi_domain(struct device *dev); +int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id); +/* IOMMU interface */ +void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *size); +const struct iommu_ops *iort_iommu_configure(struct device *dev); +int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head); +#else +static inline void acpi_iort_init(void) { } +static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id) +{ return req_id; } +static inline struct irq_domain *iort_get_device_domain(struct device *dev, + u32 req_id) +{ return NULL; } +static inline void acpi_configure_pmsi_domain(struct device *dev) { } +/* IOMMU interface */ +static inline void iort_dma_setup(struct device *dev, u64 *dma_addr, + u64 *size) { } +static inline const struct iommu_ops *iort_iommu_configure( + struct device *dev) +{ return NULL; } +static inline +int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) +{ return 0; } +#endif + +#endif /* __ACPI_IORT_H__ */ diff --git a/include/linux/acpi_pmtmr.h b/include/linux/acpi_pmtmr.h new file mode 100644 index 000000000..50d88bf14 --- /dev/null +++ b/include/linux/acpi_pmtmr.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ACPI_PMTMR_H_ +#define _ACPI_PMTMR_H_ + +#include + +/* Number of PMTMR ticks expected during calibration run */ +#define PMTMR_TICKS_PER_SEC 3579545 + +/* limit it to 24 bits */ +#define ACPI_PM_MASK CLOCKSOURCE_MASK(24) + +/* Overrun value */ +#define ACPI_PM_OVRRUN (1<<24) + +#ifdef CONFIG_X86_PM_TIMER + +extern u32 acpi_pm_read_verified(void); +extern u32 pmtmr_ioport; + +static inline u32 acpi_pm_read_early(void) +{ + if (!pmtmr_ioport) + return 0; + /* mask the output to 24 bits */ + return acpi_pm_read_verified() & ACPI_PM_MASK; +} + +#else + +static inline u32 acpi_pm_read_early(void) +{ + return 0; +} + +#endif + +#endif + diff --git a/include/linux/adb.h b/include/linux/adb.h new file mode 100644 index 000000000..f6306fc86 --- /dev/null +++ b/include/linux/adb.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Definitions for ADB (Apple Desktop Bus) support. + */ +#ifndef __ADB_H +#define __ADB_H + +#include + + +struct adb_request { + unsigned char data[32]; + int nbytes; + unsigned char reply[32]; + int reply_len; + unsigned char reply_expected; + unsigned char sent; + unsigned char complete; + void (*done)(struct adb_request *); + void *arg; + struct adb_request *next; +}; + +struct adb_ids { + int nids; + unsigned char id[16]; +}; + +/* Structure which encapsulates a low-level ADB driver */ + +struct adb_driver { + char name[16]; + int (*probe)(void); + int (*init)(void); + int (*send_request)(struct adb_request *req, int sync); + int (*autopoll)(int devs); + void (*poll)(void); + int (*reset_bus)(void); +}; + +/* Values for adb_request flags */ +#define ADBREQ_REPLY 1 /* expect reply */ +#define ADBREQ_SYNC 2 /* poll until done */ +#define ADBREQ_NOSEND 4 /* build the request, but don't send it */ + +/* Messages sent thru the client_list notifier. You should NOT stop + the operation, at least not with this version */ +enum adb_message { + ADB_MSG_POWERDOWN, /* Currently called before sleep only */ + ADB_MSG_PRE_RESET, /* Called before resetting the bus */ + ADB_MSG_POST_RESET /* Called after resetting the bus (re-do init & register) */ +}; +extern struct blocking_notifier_head adb_client_list; + +int adb_request(struct adb_request *req, void (*done)(struct adb_request *), + int flags, int nbytes, ...); +int adb_register(int default_id,int handler_id,struct adb_ids *ids, + void (*handler)(unsigned char *, int, int)); +int adb_unregister(int index); +void adb_poll(void); +void adb_input(unsigned char *, int, int); +int adb_reset_bus(void); + +int adb_try_handler_change(int address, int new_id); +int adb_get_infos(int address, int *original_address, int *handler_id); + +#endif /* __ADB_H */ diff --git a/include/linux/adfs_fs.h b/include/linux/adfs_fs.h new file mode 100644 index 000000000..4836e382a --- /dev/null +++ b/include/linux/adfs_fs.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ADFS_FS_H +#define _ADFS_FS_H + +#include + +/* + * Calculate the boot block checksum on an ADFS drive. Note that this will + * appear to be correct if the sector contains all zeros, so also check that + * the disk size is non-zero!!! + */ +static inline int adfs_checkbblk(unsigned char *ptr) +{ + unsigned int result = 0; + unsigned char *p = ptr + 511; + + do { + result = (result & 0xff) + (result >> 8); + result = result + *--p; + } while (p != ptr); + + return (result & 0xff) != ptr[511]; +} +#endif diff --git a/include/linux/aer.h b/include/linux/aer.h new file mode 100644 index 000000000..514bffa11 --- /dev/null +++ b/include/linux/aer.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2006 Intel Corp. + * Tom Long Nguyen (tom.l.nguyen@intel.com) + * Zhang Yanmin (yanmin.zhang@intel.com) + */ + +#ifndef _AER_H_ +#define _AER_H_ + +#include +#include + +#define AER_NONFATAL 0 +#define AER_FATAL 1 +#define AER_CORRECTABLE 2 +#define DPC_FATAL 3 + +struct pci_dev; + +struct aer_header_log_regs { + unsigned int dw0; + unsigned int dw1; + unsigned int dw2; + unsigned int dw3; +}; + +struct aer_capability_regs { + u32 header; + u32 uncor_status; + u32 uncor_mask; + u32 uncor_severity; + u32 cor_status; + u32 cor_mask; + u32 cap_control; + struct aer_header_log_regs header_log; + u32 root_command; + u32 root_status; + u16 cor_err_source; + u16 uncor_err_source; +}; + +#if defined(CONFIG_PCIEAER) +/* PCIe port driver needs this function to enable AER */ +int pci_enable_pcie_error_reporting(struct pci_dev *dev); +int pci_disable_pcie_error_reporting(struct pci_dev *dev); +int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev); +int pci_cleanup_aer_error_status_regs(struct pci_dev *dev); +#else +static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev) +{ + return -EINVAL; +} +static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev) +{ + return -EINVAL; +} +static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) +{ + return -EINVAL; +} +static inline int pci_cleanup_aer_error_status_regs(struct pci_dev *dev) +{ + return -EINVAL; +} +#endif + +void cper_print_aer(struct pci_dev *dev, int aer_severity, + struct aer_capability_regs *aer); +int cper_severity_to_aer(int cper_severity); +void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn, + int severity, struct aer_capability_regs *aer_regs); +#endif //_AER_H_ + diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h new file mode 100644 index 000000000..eaf6cd75a --- /dev/null +++ b/include/linux/agp_backend.h @@ -0,0 +1,109 @@ +/* + * AGPGART backend specific includes. Not for userspace consumption. + * + * Copyright (C) 2004 Silicon Graphics, Inc. + * Copyright (C) 2002-2003 Dave Jones + * Copyright (C) 1999 Jeff Hartmann + * Copyright (C) 1999 Precision Insight, Inc. + * Copyright (C) 1999 Xi Graphics, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _AGP_BACKEND_H +#define _AGP_BACKEND_H 1 + +#include + +enum chipset_type { + NOT_SUPPORTED, + SUPPORTED, +}; + +struct agp_version { + u16 major; + u16 minor; +}; + +struct agp_kern_info { + struct agp_version version; + struct pci_dev *device; + enum chipset_type chipset; + unsigned long mode; + unsigned long aper_base; + size_t aper_size; + int max_memory; /* In pages */ + int current_memory; + bool cant_use_aperture; + unsigned long page_mask; + const struct vm_operations_struct *vm_ops; +}; + +/* + * The agp_memory structure has information about the block of agp memory + * allocated. A caller may manipulate the next and prev pointers to link + * each allocated item into a list. These pointers are ignored by the backend. + * Everything else should never be written to, but the caller may read any of + * the items to determine the status of this block of agp memory. + */ + +struct agp_bridge_data; + +struct agp_memory { + struct agp_memory *next; + struct agp_memory *prev; + struct agp_bridge_data *bridge; + struct page **pages; + size_t page_count; + int key; + int num_scratch_pages; + off_t pg_start; + u32 type; + u32 physical; + bool is_bound; + bool is_flushed; + /* list of agp_memory mapped to the aperture */ + struct list_head mapped_list; + /* DMA-mapped addresses */ + struct scatterlist *sg_list; + int num_sg; +}; + +#define AGP_NORMAL_MEMORY 0 + +#define AGP_USER_TYPES (1 << 16) +#define AGP_USER_MEMORY (AGP_USER_TYPES) +#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1) + +extern struct agp_bridge_data *agp_bridge; +extern struct list_head agp_bridges; + +extern struct agp_bridge_data *(*agp_find_bridge)(struct pci_dev *); + +extern void agp_free_memory(struct agp_memory *); +extern struct agp_memory *agp_allocate_memory(struct agp_bridge_data *, size_t, u32); +extern int agp_copy_info(struct agp_bridge_data *, struct agp_kern_info *); +extern int agp_bind_memory(struct agp_memory *, off_t); +extern int agp_unbind_memory(struct agp_memory *); +extern void agp_enable(struct agp_bridge_data *, u32); +extern struct agp_bridge_data *agp_backend_acquire(struct pci_dev *); +extern void agp_backend_release(struct agp_bridge_data *); + +#endif /* _AGP_BACKEND_H */ diff --git a/include/linux/agpgart.h b/include/linux/agpgart.h new file mode 100644 index 000000000..c6b61ca97 --- /dev/null +++ b/include/linux/agpgart.h @@ -0,0 +1,130 @@ +/* + * AGPGART module version 0.99 + * Copyright (C) 1999 Jeff Hartmann + * Copyright (C) 1999 Precision Insight, Inc. + * Copyright (C) 1999 Xi Graphics, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _AGP_H +#define _AGP_H 1 + +#include +#include +#include + +#define AGPGART_MINOR 175 + +struct agp_info { + struct agp_version version; /* version of the driver */ + u32 bridge_id; /* bridge vendor/device */ + u32 agp_mode; /* mode info of bridge */ + unsigned long aper_base;/* base of aperture */ + size_t aper_size; /* size of aperture */ + size_t pg_total; /* max pages (swap + system) */ + size_t pg_system; /* max pages (system) */ + size_t pg_used; /* current pages used */ +}; + +struct agp_setup { + u32 agp_mode; /* mode info of bridge */ +}; + +/* + * The "prot" down below needs still a "sleep" flag somehow ... + */ +struct agp_segment { + off_t pg_start; /* starting page to populate */ + size_t pg_count; /* number of pages */ + int prot; /* prot flags for mmap */ +}; + +struct agp_segment_priv { + off_t pg_start; + size_t pg_count; + pgprot_t prot; +}; + +struct agp_region { + pid_t pid; /* pid of process */ + size_t seg_count; /* number of segments */ + struct agp_segment *seg_list; +}; + +struct agp_allocate { + int key; /* tag of allocation */ + size_t pg_count; /* number of pages */ + u32 type; /* 0 == normal, other devspec */ + u32 physical; /* device specific (some devices + * need a phys address of the + * actual page behind the gatt + * table) */ +}; + +struct agp_bind { + int key; /* tag of allocation */ + off_t pg_start; /* starting page to populate */ +}; + +struct agp_unbind { + int key; /* tag of allocation */ + u32 priority; /* priority for paging out */ +}; + +struct agp_client { + struct agp_client *next; + struct agp_client *prev; + pid_t pid; + int num_segments; + struct agp_segment_priv **segments; +}; + +struct agp_controller { + struct agp_controller *next; + struct agp_controller *prev; + pid_t pid; + int num_clients; + struct agp_memory *pool; + struct agp_client *clients; +}; + +#define AGP_FF_ALLOW_CLIENT 0 +#define AGP_FF_ALLOW_CONTROLLER 1 +#define AGP_FF_IS_CLIENT 2 +#define AGP_FF_IS_CONTROLLER 3 +#define AGP_FF_IS_VALID 4 + +struct agp_file_private { + struct agp_file_private *next; + struct agp_file_private *prev; + pid_t my_pid; + unsigned long access_flags; /* long req'd for set_bit --RR */ +}; + +struct agp_front_data { + struct mutex agp_mutex; + struct agp_controller *current_controller; + struct agp_controller *controllers; + struct agp_file_private *file_priv_list; + bool used_by_controller; + bool backend_acquired; +}; + +#endif /* _AGP_H */ diff --git a/include/linux/ahci-remap.h b/include/linux/ahci-remap.h new file mode 100644 index 000000000..230c871ba --- /dev/null +++ b/include/linux/ahci-remap.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_AHCI_REMAP_H +#define _LINUX_AHCI_REMAP_H + +#include + +#define AHCI_VSCAP 0xa4 +#define AHCI_REMAP_CAP 0x800 + +/* device class code */ +#define AHCI_REMAP_N_DCC 0x880 + +/* remap-device base relative to ahci-bar */ +#define AHCI_REMAP_N_OFFSET SZ_16K +#define AHCI_REMAP_N_SIZE SZ_16K + +#define AHCI_MAX_REMAP 3 + +static inline unsigned int ahci_remap_dcc(int i) +{ + return AHCI_REMAP_N_DCC + i * 0x80; +} + +static inline unsigned int ahci_remap_base(int i) +{ + return AHCI_REMAP_N_OFFSET + i * AHCI_REMAP_N_SIZE; +} + +#endif /* _LINUX_AHCI_REMAP_H */ diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h new file mode 100644 index 000000000..345c3b510 --- /dev/null +++ b/include/linux/ahci_platform.h @@ -0,0 +1,50 @@ +/* + * AHCI SATA platform driver + * + * Copyright 2004-2005 Red Hat, Inc. + * Jeff Garzik + * Copyright 2010 MontaVista Software, LLC. + * Anton Vorontsov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + */ + +#ifndef _AHCI_PLATFORM_H +#define _AHCI_PLATFORM_H + +#include + +struct device; +struct ata_port_info; +struct ahci_host_priv; +struct platform_device; +struct scsi_host_template; + +int ahci_platform_enable_phys(struct ahci_host_priv *hpriv); +void ahci_platform_disable_phys(struct ahci_host_priv *hpriv); +int ahci_platform_enable_clks(struct ahci_host_priv *hpriv); +void ahci_platform_disable_clks(struct ahci_host_priv *hpriv); +int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv); +void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv); +int ahci_platform_enable_resources(struct ahci_host_priv *hpriv); +void ahci_platform_disable_resources(struct ahci_host_priv *hpriv); +struct ahci_host_priv *ahci_platform_get_resources( + struct platform_device *pdev, unsigned int flags); +int ahci_platform_init_host(struct platform_device *pdev, + struct ahci_host_priv *hpriv, + const struct ata_port_info *pi_template, + struct scsi_host_template *sht); + +void ahci_platform_shutdown(struct platform_device *pdev); + +int ahci_platform_suspend_host(struct device *dev); +int ahci_platform_resume_host(struct device *dev); +int ahci_platform_suspend(struct device *dev); +int ahci_platform_resume(struct device *dev); + +#define AHCI_PLATFORM_GET_RESETS 0x01 + +#endif /* _AHCI_PLATFORM_H */ diff --git a/include/linux/aio.h b/include/linux/aio.h new file mode 100644 index 000000000..b83e68dd0 --- /dev/null +++ b/include/linux/aio.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX__AIO_H +#define __LINUX__AIO_H + +#include + +struct kioctx; +struct kiocb; +struct mm_struct; + +typedef int (kiocb_cancel_fn)(struct kiocb *); + +/* prototypes */ +#ifdef CONFIG_AIO +extern void exit_aio(struct mm_struct *mm); +void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel); +#else +static inline void exit_aio(struct mm_struct *mm) { } +static inline void kiocb_set_cancel_fn(struct kiocb *req, + kiocb_cancel_fn *cancel) { } +#endif /* CONFIG_AIO */ + +/* for sysctl: */ +extern unsigned long aio_nr; +extern unsigned long aio_max_nr; + +#endif /* __LINUX__AIO_H */ diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h new file mode 100644 index 000000000..0760ca1cb --- /dev/null +++ b/include/linux/alarmtimer.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_ALARMTIMER_H +#define _LINUX_ALARMTIMER_H + +#include +#include +#include +#include + +enum alarmtimer_type { + ALARM_REALTIME, + ALARM_BOOTTIME, + + /* Supported types end here */ + ALARM_NUMTYPE, + + /* Used for tracing information. No usable types. */ + ALARM_REALTIME_FREEZER, + ALARM_BOOTTIME_FREEZER, +}; + +enum alarmtimer_restart { + ALARMTIMER_NORESTART, + ALARMTIMER_RESTART, +}; + + +#define ALARMTIMER_STATE_INACTIVE 0x00 +#define ALARMTIMER_STATE_ENQUEUED 0x01 + +/** + * struct alarm - Alarm timer structure + * @node: timerqueue node for adding to the event list this value + * also includes the expiration time. + * @timer: hrtimer used to schedule events while running + * @function: Function pointer to be executed when the timer fires. + * @type: Alarm type (BOOTTIME/REALTIME). + * @state: Flag that represents if the alarm is set to fire or not. + * @data: Internal data value. + */ +struct alarm { + struct timerqueue_node node; + struct hrtimer timer; + enum alarmtimer_restart (*function)(struct alarm *, ktime_t now); + enum alarmtimer_type type; + int state; + void *data; +}; + +void alarm_init(struct alarm *alarm, enum alarmtimer_type type, + enum alarmtimer_restart (*function)(struct alarm *, ktime_t)); +void alarm_start(struct alarm *alarm, ktime_t start); +void alarm_start_relative(struct alarm *alarm, ktime_t start); +void alarm_restart(struct alarm *alarm); +int alarm_try_to_cancel(struct alarm *alarm); +int alarm_cancel(struct alarm *alarm); + +u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval); +u64 alarm_forward_now(struct alarm *alarm, ktime_t interval); +ktime_t alarm_expires_remaining(const struct alarm *alarm); + +/* Provide way to access the rtc device being used by alarmtimers */ +struct rtc_device *alarmtimer_get_rtcdev(void); + +#endif diff --git a/include/linux/altera_jtaguart.h b/include/linux/altera_jtaguart.h new file mode 100644 index 000000000..527a142cd --- /dev/null +++ b/include/linux/altera_jtaguart.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * altera_jtaguart.h -- Altera JTAG UART driver defines. + */ + +#ifndef __ALTJUART_H +#define __ALTJUART_H + +#define ALTERA_JTAGUART_MAJOR 204 +#define ALTERA_JTAGUART_MINOR 186 + +struct altera_jtaguart_platform_uart { + unsigned long mapbase; /* Physical address base */ + unsigned int irq; /* Interrupt vector */ +}; + +#endif /* __ALTJUART_H */ diff --git a/include/linux/altera_uart.h b/include/linux/altera_uart.h new file mode 100644 index 000000000..3eb73b8c4 --- /dev/null +++ b/include/linux/altera_uart.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * altera_uart.h -- Altera UART driver defines. + */ + +#ifndef __ALTUART_H +#define __ALTUART_H + +struct altera_uart_platform_uart { + unsigned long mapbase; /* Physical address base */ + unsigned int irq; /* Interrupt vector */ + unsigned int uartclk; /* UART clock rate */ + unsigned int bus_shift; /* Bus shift (address stride) */ +}; + +#endif /* __ALTUART_H */ diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h new file mode 100644 index 000000000..d143c13be --- /dev/null +++ b/include/linux/amba/bus.h @@ -0,0 +1,181 @@ +/* + * linux/include/amba/bus.h + * + * This device type deals with ARM PrimeCells and anything else that + * presents a proper CID (0xB105F00D) at the end of the I/O register + * region or that is derived from a PrimeCell. + * + * Copyright (C) 2003 Deep Blue Solutions Ltd, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ASMARM_AMBA_H +#define ASMARM_AMBA_H + +#include +#include +#include +#include +#include +#include + +#define AMBA_NR_IRQS 9 +#define AMBA_CID 0xb105f00d +#define CORESIGHT_CID 0xb105900d + +struct clk; + +struct amba_device { + struct device dev; + struct resource res; + struct clk *pclk; + unsigned int periphid; + unsigned int irq[AMBA_NR_IRQS]; + char *driver_override; +}; + +struct amba_driver { + struct device_driver drv; + int (*probe)(struct amba_device *, const struct amba_id *); + int (*remove)(struct amba_device *); + void (*shutdown)(struct amba_device *); + const struct amba_id *id_table; +}; + +/* + * Constants for the designer field of the Peripheral ID register. When bit 7 + * is set to '1', bits [6:0] should be the JEP106 manufacturer identity code. + */ +enum amba_vendor { + AMBA_VENDOR_ARM = 0x41, + AMBA_VENDOR_ST = 0x80, + AMBA_VENDOR_QCOM = 0x51, + AMBA_VENDOR_LSI = 0xb6, + AMBA_VENDOR_LINUX = 0xfe, /* This value is not official */ +}; + +/* This is used to generate pseudo-ID for AMBA device */ +#define AMBA_LINUX_ID(conf, rev, part) \ + (((conf) & 0xff) << 24 | ((rev) & 0xf) << 20 | \ + AMBA_VENDOR_LINUX << 12 | ((part) & 0xfff)) + +extern struct bus_type amba_bustype; + +#define to_amba_device(d) container_of(d, struct amba_device, dev) + +#define amba_get_drvdata(d) dev_get_drvdata(&d->dev) +#define amba_set_drvdata(d,p) dev_set_drvdata(&d->dev, p) + +int amba_driver_register(struct amba_driver *); +void amba_driver_unregister(struct amba_driver *); +struct amba_device *amba_device_alloc(const char *, resource_size_t, size_t); +void amba_device_put(struct amba_device *); +int amba_device_add(struct amba_device *, struct resource *); +int amba_device_register(struct amba_device *, struct resource *); +struct amba_device *amba_apb_device_add(struct device *parent, const char *name, + resource_size_t base, size_t size, + int irq1, int irq2, void *pdata, + unsigned int periphid); +struct amba_device *amba_ahb_device_add(struct device *parent, const char *name, + resource_size_t base, size_t size, + int irq1, int irq2, void *pdata, + unsigned int periphid); +struct amba_device * +amba_apb_device_add_res(struct device *parent, const char *name, + resource_size_t base, size_t size, int irq1, + int irq2, void *pdata, unsigned int periphid, + struct resource *resbase); +struct amba_device * +amba_ahb_device_add_res(struct device *parent, const char *name, + resource_size_t base, size_t size, int irq1, + int irq2, void *pdata, unsigned int periphid, + struct resource *resbase); +void amba_device_unregister(struct amba_device *); +struct amba_device *amba_find_device(const char *, struct device *, unsigned int, unsigned int); +int amba_request_regions(struct amba_device *, const char *); +void amba_release_regions(struct amba_device *); + +static inline int amba_pclk_enable(struct amba_device *dev) +{ + return clk_enable(dev->pclk); +} + +static inline void amba_pclk_disable(struct amba_device *dev) +{ + clk_disable(dev->pclk); +} + +static inline int amba_pclk_prepare(struct amba_device *dev) +{ + return clk_prepare(dev->pclk); +} + +static inline void amba_pclk_unprepare(struct amba_device *dev) +{ + clk_unprepare(dev->pclk); +} + +/* Some drivers don't use the struct amba_device */ +#define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff) +#define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f) +#define AMBA_MANF_BITS(a) (((a) >> 12) & 0xff) +#define AMBA_PART_BITS(a) ((a) & 0xfff) + +#define amba_config(d) AMBA_CONFIG_BITS((d)->periphid) +#define amba_rev(d) AMBA_REV_BITS((d)->periphid) +#define amba_manf(d) AMBA_MANF_BITS((d)->periphid) +#define amba_part(d) AMBA_PART_BITS((d)->periphid) + +#define __AMBA_DEV(busid, data, mask) \ + { \ + .coherent_dma_mask = mask, \ + .init_name = busid, \ + .platform_data = data, \ + } + +/* + * APB devices do not themselves have the ability to address memory, + * so DMA masks should be zero (much like USB peripheral devices.) + * The DMA controller DMA masks should be used instead (much like + * USB host controllers in conventional PCs.) + */ +#define AMBA_APB_DEVICE(name, busid, id, base, irqs, data) \ +struct amba_device name##_device = { \ + .dev = __AMBA_DEV(busid, data, 0), \ + .res = DEFINE_RES_MEM(base, SZ_4K), \ + .irq = irqs, \ + .periphid = id, \ +} + +/* + * AHB devices are DMA capable, so set their DMA masks + */ +#define AMBA_AHB_DEVICE(name, busid, id, base, irqs, data) \ +struct amba_device name##_device = { \ + .dev = __AMBA_DEV(busid, data, ~0ULL), \ + .res = DEFINE_RES_MEM(base, SZ_4K), \ + .irq = irqs, \ + .periphid = id, \ +} + +/* + * module_amba_driver() - Helper macro for drivers that don't do anything + * special in module init/exit. This eliminates a lot of boilerplate. Each + * module may only use this macro once, and calling it replaces module_init() + * and module_exit() + */ +#define module_amba_driver(__amba_drv) \ + module_driver(__amba_drv, amba_driver_register, amba_driver_unregister) + +/* + * builtin_amba_driver() - Helper macro for drivers that don't do anything + * special in driver initcall. This eliminates a lot of boilerplate. Each + * driver may only use this macro once, and calling it replaces the instance + * device_initcall(). + */ +#define builtin_amba_driver(__amba_drv) \ + builtin_driver(__amba_drv, amba_driver_register) + +#endif diff --git a/include/linux/amba/clcd-regs.h b/include/linux/amba/clcd-regs.h new file mode 100644 index 000000000..516a6fda8 --- /dev/null +++ b/include/linux/amba/clcd-regs.h @@ -0,0 +1,86 @@ +/* + * David A Rusling + * + * Copyright (C) 2001 ARM Limited + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ + +#ifndef AMBA_CLCD_REGS_H +#define AMBA_CLCD_REGS_H + +/* + * CLCD Controller Internal Register addresses + */ +#define CLCD_TIM0 0x00000000 +#define CLCD_TIM1 0x00000004 +#define CLCD_TIM2 0x00000008 +#define CLCD_TIM3 0x0000000c +#define CLCD_UBAS 0x00000010 +#define CLCD_LBAS 0x00000014 + +#define CLCD_PL110_IENB 0x00000018 +#define CLCD_PL110_CNTL 0x0000001c +#define CLCD_PL110_STAT 0x00000020 +#define CLCD_PL110_INTR 0x00000024 +#define CLCD_PL110_UCUR 0x00000028 +#define CLCD_PL110_LCUR 0x0000002C + +#define CLCD_PL111_CNTL 0x00000018 +#define CLCD_PL111_IENB 0x0000001c +#define CLCD_PL111_RIS 0x00000020 +#define CLCD_PL111_MIS 0x00000024 +#define CLCD_PL111_ICR 0x00000028 +#define CLCD_PL111_UCUR 0x0000002c +#define CLCD_PL111_LCUR 0x00000030 + +#define CLCD_PALL 0x00000200 +#define CLCD_PALETTE 0x00000200 + +#define TIM2_PCD_LO_MASK GENMASK(4, 0) +#define TIM2_PCD_LO_BITS 5 +#define TIM2_CLKSEL (1 << 5) +#define TIM2_IVS (1 << 11) +#define TIM2_IHS (1 << 12) +#define TIM2_IPC (1 << 13) +#define TIM2_IOE (1 << 14) +#define TIM2_BCD (1 << 26) +#define TIM2_PCD_HI_MASK GENMASK(31, 27) +#define TIM2_PCD_HI_BITS 5 +#define TIM2_PCD_HI_SHIFT 27 + +#define CNTL_LCDEN (1 << 0) +#define CNTL_LCDBPP1 (0 << 1) +#define CNTL_LCDBPP2 (1 << 1) +#define CNTL_LCDBPP4 (2 << 1) +#define CNTL_LCDBPP8 (3 << 1) +#define CNTL_LCDBPP16 (4 << 1) +#define CNTL_LCDBPP16_565 (6 << 1) +#define CNTL_LCDBPP16_444 (7 << 1) +#define CNTL_LCDBPP24 (5 << 1) +#define CNTL_LCDBW (1 << 4) +#define CNTL_LCDTFT (1 << 5) +#define CNTL_LCDMONO8 (1 << 6) +#define CNTL_LCDDUAL (1 << 7) +#define CNTL_BGR (1 << 8) +#define CNTL_BEBO (1 << 9) +#define CNTL_BEPO (1 << 10) +#define CNTL_LCDPWR (1 << 11) +#define CNTL_LCDVCOMP(x) ((x) << 12) +#define CNTL_LDMAFIFOTIME (1 << 15) +#define CNTL_WATERMARK (1 << 16) + +/* ST Microelectronics variant bits */ +#define CNTL_ST_1XBPP_444 0x0 +#define CNTL_ST_1XBPP_5551 (1 << 17) +#define CNTL_ST_1XBPP_565 (1 << 18) +#define CNTL_ST_CDWID_12 0x0 +#define CNTL_ST_CDWID_16 (1 << 19) +#define CNTL_ST_CDWID_18 (1 << 20) +#define CNTL_ST_CDWID_24 ((1 << 19)|(1 << 20)) +#define CNTL_ST_CEAEN (1 << 21) +#define CNTL_ST_LCDBPP24_PACKED (6 << 1) + +#endif /* AMBA_CLCD_REGS_H */ diff --git a/include/linux/amba/clcd.h b/include/linux/amba/clcd.h new file mode 100644 index 000000000..d0c3be77c --- /dev/null +++ b/include/linux/amba/clcd.h @@ -0,0 +1,321 @@ +/* + * linux/include/asm-arm/hardware/amba_clcd.h -- Integrator LCD panel. + * + * David A Rusling + * + * Copyright (C) 2001 ARM Limited + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ +#include +#include + +enum { + /* individual formats */ + CLCD_CAP_RGB444 = (1 << 0), + CLCD_CAP_RGB5551 = (1 << 1), + CLCD_CAP_RGB565 = (1 << 2), + CLCD_CAP_RGB888 = (1 << 3), + CLCD_CAP_BGR444 = (1 << 4), + CLCD_CAP_BGR5551 = (1 << 5), + CLCD_CAP_BGR565 = (1 << 6), + CLCD_CAP_BGR888 = (1 << 7), + + /* connection layouts */ + CLCD_CAP_444 = CLCD_CAP_RGB444 | CLCD_CAP_BGR444, + CLCD_CAP_5551 = CLCD_CAP_RGB5551 | CLCD_CAP_BGR5551, + CLCD_CAP_565 = CLCD_CAP_RGB565 | CLCD_CAP_BGR565, + CLCD_CAP_888 = CLCD_CAP_RGB888 | CLCD_CAP_BGR888, + + /* red/blue ordering */ + CLCD_CAP_RGB = CLCD_CAP_RGB444 | CLCD_CAP_RGB5551 | + CLCD_CAP_RGB565 | CLCD_CAP_RGB888, + CLCD_CAP_BGR = CLCD_CAP_BGR444 | CLCD_CAP_BGR5551 | + CLCD_CAP_BGR565 | CLCD_CAP_BGR888, + + CLCD_CAP_ALL = CLCD_CAP_BGR | CLCD_CAP_RGB, +}; + +struct backlight_device; + +struct clcd_panel { + struct fb_videomode mode; + signed short width; /* width in mm */ + signed short height; /* height in mm */ + u32 tim2; + u32 tim3; + u32 cntl; + u32 caps; + unsigned int bpp:8, + fixedtimings:1, + grayscale:1; + unsigned int connector; + struct backlight_device *backlight; + /* + * If the B/R lines are switched between the CLCD + * and the panel we need to know this and not try to + * compensate with the BGR bit in the control register. + */ + bool bgr_connection; +}; + +struct clcd_regs { + u32 tim0; + u32 tim1; + u32 tim2; + u32 tim3; + u32 cntl; + unsigned long pixclock; +}; + +struct clcd_fb; + +/* + * the board-type specific routines + */ +struct clcd_board { + const char *name; + + /* + * Optional. Hardware capability flags. + */ + u32 caps; + + /* + * Optional. Check whether the var structure is acceptable + * for this display. + */ + int (*check)(struct clcd_fb *fb, struct fb_var_screeninfo *var); + + /* + * Compulsory. Decode fb->fb.var into regs->*. In the case of + * fixed timing, set regs->* to the register values required. + */ + void (*decode)(struct clcd_fb *fb, struct clcd_regs *regs); + + /* + * Optional. Disable any extra display hardware. + */ + void (*disable)(struct clcd_fb *); + + /* + * Optional. Enable any extra display hardware. + */ + void (*enable)(struct clcd_fb *); + + /* + * Setup platform specific parts of CLCD driver + */ + int (*setup)(struct clcd_fb *); + + /* + * mmap the framebuffer memory + */ + int (*mmap)(struct clcd_fb *, struct vm_area_struct *); + + /* + * Remove platform specific parts of CLCD driver + */ + void (*remove)(struct clcd_fb *); +}; + +struct amba_device; +struct clk; + +/** + * struct clcd_vendor_data - holds hardware (IP-block) vendor-specific + * variant information + * + * @clock_timregs: the CLCD needs to be clocked when accessing the + * timer registers, or the hardware will hang. + * @packed_24_bit_pixels: this variant supports 24bit packed pixel data, + * so that RGB accesses 3 bytes at a time, not just on even 32bit + * boundaries, packing the pixel data in memory. ST Microelectronics + * have this. + * @st_bitmux_control: ST Microelectronics have implemented output + * bit line multiplexing into the CLCD control register. This indicates + * that we need to use this. + * @init_board: custom board init function for this variant + * @init_panel: custom panel init function for this variant + */ +struct clcd_vendor_data { + bool clock_timregs; + bool packed_24_bit_pixels; + bool st_bitmux_control; + int (*init_board)(struct amba_device *adev, + struct clcd_board *board); + int (*init_panel)(struct clcd_fb *fb, + struct device_node *panel); +}; + +/* this data structure describes each frame buffer device we find */ +struct clcd_fb { + struct fb_info fb; + struct amba_device *dev; + struct clk *clk; + struct clcd_vendor_data *vendor; + struct clcd_panel *panel; + struct clcd_board *board; + void *board_data; + void __iomem *regs; + u16 off_ienb; + u16 off_cntl; + u32 clcd_cntl; + u32 cmap[16]; + bool clk_enabled; +}; + +static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs) +{ + struct fb_var_screeninfo *var = &fb->fb.var; + u32 val, cpl; + + /* + * Program the CLCD controller registers and start the CLCD + */ + val = ((var->xres / 16) - 1) << 2; + val |= (var->hsync_len - 1) << 8; + val |= (var->right_margin - 1) << 16; + val |= (var->left_margin - 1) << 24; + regs->tim0 = val; + + val = var->yres; + if (fb->panel->cntl & CNTL_LCDDUAL) + val /= 2; + val -= 1; + val |= (var->vsync_len - 1) << 10; + val |= var->lower_margin << 16; + val |= var->upper_margin << 24; + regs->tim1 = val; + + val = fb->panel->tim2; + val |= var->sync & FB_SYNC_HOR_HIGH_ACT ? 0 : TIM2_IHS; + val |= var->sync & FB_SYNC_VERT_HIGH_ACT ? 0 : TIM2_IVS; + + cpl = var->xres_virtual; + if (fb->panel->cntl & CNTL_LCDTFT) /* TFT */ + /* / 1 */; + else if (!var->grayscale) /* STN color */ + cpl = cpl * 8 / 3; + else if (fb->panel->cntl & CNTL_LCDMONO8) /* STN monochrome, 8bit */ + cpl /= 8; + else /* STN monochrome, 4bit */ + cpl /= 4; + + regs->tim2 = val | ((cpl - 1) << 16); + + regs->tim3 = fb->panel->tim3; + + val = fb->panel->cntl; + if (var->grayscale) + val |= CNTL_LCDBW; + + if (fb->panel->caps && fb->board->caps && var->bits_per_pixel >= 16) { + /* + * if board and panel supply capabilities, we can support + * changing BGR/RGB depending on supplied parameters. Here + * we switch to what the framebuffer is providing if need + * be, so if the framebuffer is BGR but the display connection + * is RGB (first case) we switch it around. Vice versa mutatis + * mutandis if the framebuffer is RGB but the display connection + * is BGR, we flip it around. + */ + if (var->red.offset == 0) + val &= ~CNTL_BGR; + else + val |= CNTL_BGR; + if (fb->panel->bgr_connection) + val ^= CNTL_BGR; + } + + switch (var->bits_per_pixel) { + case 1: + val |= CNTL_LCDBPP1; + break; + case 2: + val |= CNTL_LCDBPP2; + break; + case 4: + val |= CNTL_LCDBPP4; + break; + case 8: + val |= CNTL_LCDBPP8; + break; + case 16: + /* + * PL110 cannot choose between 5551 and 565 modes in its + * control register. It is possible to use 565 with + * custom external wiring. + */ + if (amba_part(fb->dev) == 0x110 || + var->green.length == 5) + val |= CNTL_LCDBPP16; + else if (var->green.length == 6) + val |= CNTL_LCDBPP16_565; + else + val |= CNTL_LCDBPP16_444; + break; + case 24: + /* Modified variant supporting 24 bit packed pixels */ + val |= CNTL_ST_LCDBPP24_PACKED; + break; + case 32: + val |= CNTL_LCDBPP24; + break; + } + + regs->cntl = val; + regs->pixclock = var->pixclock; +} + +static inline int clcdfb_check(struct clcd_fb *fb, struct fb_var_screeninfo *var) +{ + var->xres_virtual = var->xres = (var->xres + 15) & ~15; + var->yres_virtual = var->yres = (var->yres + 1) & ~1; + +#define CHECK(e,l,h) (var->e < l || var->e > h) + if (CHECK(right_margin, (5+1), 256) || /* back porch */ + CHECK(left_margin, (5+1), 256) || /* front porch */ + CHECK(hsync_len, (5+1), 256) || + var->xres > 4096 || + var->lower_margin > 255 || /* back porch */ + var->upper_margin > 255 || /* front porch */ + var->vsync_len > 32 || + var->yres > 1024) + return -EINVAL; +#undef CHECK + + /* single panel mode: PCD = max(PCD, 1) */ + /* dual panel mode: PCD = max(PCD, 5) */ + + /* + * You can't change the grayscale setting, and + * we can only do non-interlaced video. + */ + if (var->grayscale != fb->fb.var.grayscale || + (var->vmode & FB_VMODE_MASK) != FB_VMODE_NONINTERLACED) + return -EINVAL; + +#define CHECK(e) (var->e != fb->fb.var.e) + if (fb->panel->fixedtimings && + (CHECK(xres) || + CHECK(yres) || + CHECK(bits_per_pixel) || + CHECK(pixclock) || + CHECK(left_margin) || + CHECK(right_margin) || + CHECK(upper_margin) || + CHECK(lower_margin) || + CHECK(hsync_len) || + CHECK(vsync_len) || + CHECK(sync))) + return -EINVAL; +#undef CHECK + + var->nonstd = 0; + var->accel_flags = 0; + + return 0; +} diff --git a/include/linux/amba/kmi.h b/include/linux/amba/kmi.h new file mode 100644 index 000000000..a39e5be75 --- /dev/null +++ b/include/linux/amba/kmi.h @@ -0,0 +1,92 @@ +/* + * linux/include/asm-arm/hardware/amba_kmi.h + * + * Internal header file for AMBA KMI ports + * + * Copyright (C) 2000 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * + * --------------------------------------------------------------------------- + * From ARM PrimeCell(tm) PS2 Keyboard/Mouse Interface (PL050) Technical + * Reference Manual - ARM DDI 0143B - see http://www.arm.com/ + * --------------------------------------------------------------------------- + */ +#ifndef ASM_ARM_HARDWARE_AMBA_KMI_H +#define ASM_ARM_HARDWARE_AMBA_KMI_H + +/* + * KMI control register: + * KMICR_TYPE 0 = PS2/AT mode, 1 = No line control bit mode + * KMICR_RXINTREN 1 = enable RX interrupts + * KMICR_TXINTREN 1 = enable TX interrupts + * KMICR_EN 1 = enable KMI + * KMICR_FD 1 = force KMI data low + * KMICR_FC 1 = force KMI clock low + */ +#define KMICR (KMI_BASE + 0x00) +#define KMICR_TYPE (1 << 5) +#define KMICR_RXINTREN (1 << 4) +#define KMICR_TXINTREN (1 << 3) +#define KMICR_EN (1 << 2) +#define KMICR_FD (1 << 1) +#define KMICR_FC (1 << 0) + +/* + * KMI status register: + * KMISTAT_TXEMPTY 1 = transmitter register empty + * KMISTAT_TXBUSY 1 = currently sending data + * KMISTAT_RXFULL 1 = receiver register ready to be read + * KMISTAT_RXBUSY 1 = currently receiving data + * KMISTAT_RXPARITY parity of last databyte received + * KMISTAT_IC current level of KMI clock input + * KMISTAT_ID current level of KMI data input + */ +#define KMISTAT (KMI_BASE + 0x04) +#define KMISTAT_TXEMPTY (1 << 6) +#define KMISTAT_TXBUSY (1 << 5) +#define KMISTAT_RXFULL (1 << 4) +#define KMISTAT_RXBUSY (1 << 3) +#define KMISTAT_RXPARITY (1 << 2) +#define KMISTAT_IC (1 << 1) +#define KMISTAT_ID (1 << 0) + +/* + * KMI data register + */ +#define KMIDATA (KMI_BASE + 0x08) + +/* + * KMI clock divisor: to generate 8MHz internal clock + * div = (ref / 8MHz) - 1; 0 <= div <= 15 + */ +#define KMICLKDIV (KMI_BASE + 0x0c) + +/* + * KMI interrupt register: + * KMIIR_TXINTR 1 = transmit interrupt asserted + * KMIIR_RXINTR 1 = receive interrupt asserted + */ +#define KMIIR (KMI_BASE + 0x10) +#define KMIIR_TXINTR (1 << 1) +#define KMIIR_RXINTR (1 << 0) + +/* + * The size of the KMI primecell + */ +#define KMI_SIZE (0x100) + +#endif diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h new file mode 100644 index 000000000..da8357ba1 --- /dev/null +++ b/include/linux/amba/mmci.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/amba/mmci.h + */ +#ifndef AMBA_MMCI_H +#define AMBA_MMCI_H + +#include + +/** + * struct mmci_platform_data - platform configuration for the MMCI + * (also known as PL180) block. + * @ocr_mask: available voltages on the 4 pins from the block, this + * is ignored if a regulator is used, see the MMC_VDD_* masks in + * mmc/host.h + * @ios_handler: a callback function to act on specfic ios changes, + * used for example to control a levelshifter + * mask into a value to be binary (or set some other custom bits + * in MMCIPWR) or:ed and written into the MMCIPWR register of the + * block. May also control external power based on the power_mode. + * @status: if no GPIO read function was given to the block in + * gpio_wp (below) this function will be called to determine + * whether a card is present in the MMC slot or not + * @gpio_wp: read this GPIO pin to see if the card is write protected + * @gpio_cd: read this GPIO pin to detect card insertion + * @cd_invert: true if the gpio_cd pin value is active low + */ +struct mmci_platform_data { + unsigned int ocr_mask; + int (*ios_handler)(struct device *, struct mmc_ios *); + unsigned int (*status)(struct device *); + int gpio_wp; + int gpio_cd; + bool cd_invert; +}; + +#endif diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h new file mode 100644 index 000000000..854b7294f --- /dev/null +++ b/include/linux/amba/pl022.h @@ -0,0 +1,295 @@ +/* + * include/linux/amba/pl022.h + * + * Copyright (C) 2008-2009 ST-Ericsson AB + * Copyright (C) 2006 STMicroelectronics Pvt. Ltd. + * + * Author: Linus Walleij + * + * Initial version inspired by: + * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c + * Initial adoption to PL022 by: + * Sachin Verma + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _SSP_PL022_H +#define _SSP_PL022_H + +#include + +/** + * whether SSP is in loopback mode or not + */ +enum ssp_loopback { + LOOPBACK_DISABLED, + LOOPBACK_ENABLED +}; + +/** + * enum ssp_interface - interfaces allowed for this SSP Controller + * @SSP_INTERFACE_MOTOROLA_SPI: Motorola Interface + * @SSP_INTERFACE_TI_SYNC_SERIAL: Texas Instrument Synchronous Serial + * interface + * @SSP_INTERFACE_NATIONAL_MICROWIRE: National Semiconductor Microwire + * interface + * @SSP_INTERFACE_UNIDIRECTIONAL: Unidirectional interface (STn8810 + * &STn8815 only) + */ +enum ssp_interface { + SSP_INTERFACE_MOTOROLA_SPI, + SSP_INTERFACE_TI_SYNC_SERIAL, + SSP_INTERFACE_NATIONAL_MICROWIRE, + SSP_INTERFACE_UNIDIRECTIONAL +}; + +/** + * enum ssp_hierarchy - whether SSP is configured as Master or Slave + */ +enum ssp_hierarchy { + SSP_MASTER, + SSP_SLAVE +}; + +/** + * enum ssp_clock_params - clock parameters, to set SSP clock at a + * desired freq + */ +struct ssp_clock_params { + u8 cpsdvsr; /* value from 2 to 254 (even only!) */ + u8 scr; /* value from 0 to 255 */ +}; + +/** + * enum ssp_rx_endian - endianess of Rx FIFO Data + * this feature is only available in ST versionf of PL022 + */ +enum ssp_rx_endian { + SSP_RX_MSB, + SSP_RX_LSB +}; + +/** + * enum ssp_tx_endian - endianess of Tx FIFO Data + */ +enum ssp_tx_endian { + SSP_TX_MSB, + SSP_TX_LSB +}; + +/** + * enum ssp_data_size - number of bits in one data element + */ +enum ssp_data_size { + SSP_DATA_BITS_4 = 0x03, SSP_DATA_BITS_5, SSP_DATA_BITS_6, + SSP_DATA_BITS_7, SSP_DATA_BITS_8, SSP_DATA_BITS_9, + SSP_DATA_BITS_10, SSP_DATA_BITS_11, SSP_DATA_BITS_12, + SSP_DATA_BITS_13, SSP_DATA_BITS_14, SSP_DATA_BITS_15, + SSP_DATA_BITS_16, SSP_DATA_BITS_17, SSP_DATA_BITS_18, + SSP_DATA_BITS_19, SSP_DATA_BITS_20, SSP_DATA_BITS_21, + SSP_DATA_BITS_22, SSP_DATA_BITS_23, SSP_DATA_BITS_24, + SSP_DATA_BITS_25, SSP_DATA_BITS_26, SSP_DATA_BITS_27, + SSP_DATA_BITS_28, SSP_DATA_BITS_29, SSP_DATA_BITS_30, + SSP_DATA_BITS_31, SSP_DATA_BITS_32 +}; + +/** + * enum ssp_mode - SSP mode of operation (Communication modes) + */ +enum ssp_mode { + INTERRUPT_TRANSFER, + POLLING_TRANSFER, + DMA_TRANSFER +}; + +/** + * enum ssp_rx_level_trig - receive FIFO watermark level which triggers + * IT: Interrupt fires when _N_ or more elements in RX FIFO. + */ +enum ssp_rx_level_trig { + SSP_RX_1_OR_MORE_ELEM, + SSP_RX_4_OR_MORE_ELEM, + SSP_RX_8_OR_MORE_ELEM, + SSP_RX_16_OR_MORE_ELEM, + SSP_RX_32_OR_MORE_ELEM +}; + +/** + * Transmit FIFO watermark level which triggers (IT Interrupt fires + * when _N_ or more empty locations in TX FIFO) + */ +enum ssp_tx_level_trig { + SSP_TX_1_OR_MORE_EMPTY_LOC, + SSP_TX_4_OR_MORE_EMPTY_LOC, + SSP_TX_8_OR_MORE_EMPTY_LOC, + SSP_TX_16_OR_MORE_EMPTY_LOC, + SSP_TX_32_OR_MORE_EMPTY_LOC +}; + +/** + * enum SPI Clock Phase - clock phase (Motorola SPI interface only) + * @SSP_CLK_FIRST_EDGE: Receive data on first edge transition (actual direction depends on polarity) + * @SSP_CLK_SECOND_EDGE: Receive data on second edge transition (actual direction depends on polarity) + */ +enum ssp_spi_clk_phase { + SSP_CLK_FIRST_EDGE, + SSP_CLK_SECOND_EDGE +}; + +/** + * enum SPI Clock Polarity - clock polarity (Motorola SPI interface only) + * @SSP_CLK_POL_IDLE_LOW: Low inactive level + * @SSP_CLK_POL_IDLE_HIGH: High inactive level + */ +enum ssp_spi_clk_pol { + SSP_CLK_POL_IDLE_LOW, + SSP_CLK_POL_IDLE_HIGH +}; + +/** + * Microwire Conrol Lengths Command size in microwire format + */ +enum ssp_microwire_ctrl_len { + SSP_BITS_4 = 0x03, SSP_BITS_5, SSP_BITS_6, + SSP_BITS_7, SSP_BITS_8, SSP_BITS_9, + SSP_BITS_10, SSP_BITS_11, SSP_BITS_12, + SSP_BITS_13, SSP_BITS_14, SSP_BITS_15, + SSP_BITS_16, SSP_BITS_17, SSP_BITS_18, + SSP_BITS_19, SSP_BITS_20, SSP_BITS_21, + SSP_BITS_22, SSP_BITS_23, SSP_BITS_24, + SSP_BITS_25, SSP_BITS_26, SSP_BITS_27, + SSP_BITS_28, SSP_BITS_29, SSP_BITS_30, + SSP_BITS_31, SSP_BITS_32 +}; + +/** + * enum Microwire Wait State + * @SSP_MWIRE_WAIT_ZERO: No wait state inserted after last command bit + * @SSP_MWIRE_WAIT_ONE: One wait state inserted after last command bit + */ +enum ssp_microwire_wait_state { + SSP_MWIRE_WAIT_ZERO, + SSP_MWIRE_WAIT_ONE +}; + +/** + * enum ssp_duplex - whether Full/Half Duplex on microwire, only + * available in the ST Micro variant. + * @SSP_MICROWIRE_CHANNEL_FULL_DUPLEX: SSPTXD becomes bi-directional, + * SSPRXD not used + * @SSP_MICROWIRE_CHANNEL_HALF_DUPLEX: SSPTXD is an output, SSPRXD is + * an input. + */ +enum ssp_duplex { + SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, + SSP_MICROWIRE_CHANNEL_HALF_DUPLEX +}; + +/** + * enum ssp_clkdelay - an optional clock delay on the feedback clock + * only available in the ST Micro PL023 variant. + * @SSP_FEEDBACK_CLK_DELAY_NONE: no delay, the data coming in from the + * slave is sampled directly + * @SSP_FEEDBACK_CLK_DELAY_1T: the incoming slave data is sampled with + * a delay of T-dt + * @SSP_FEEDBACK_CLK_DELAY_2T: dito with a delay if 2T-dt + * @SSP_FEEDBACK_CLK_DELAY_3T: dito with a delay if 3T-dt + * @SSP_FEEDBACK_CLK_DELAY_4T: dito with a delay if 4T-dt + * @SSP_FEEDBACK_CLK_DELAY_5T: dito with a delay if 5T-dt + * @SSP_FEEDBACK_CLK_DELAY_6T: dito with a delay if 6T-dt + * @SSP_FEEDBACK_CLK_DELAY_7T: dito with a delay if 7T-dt + */ +enum ssp_clkdelay { + SSP_FEEDBACK_CLK_DELAY_NONE, + SSP_FEEDBACK_CLK_DELAY_1T, + SSP_FEEDBACK_CLK_DELAY_2T, + SSP_FEEDBACK_CLK_DELAY_3T, + SSP_FEEDBACK_CLK_DELAY_4T, + SSP_FEEDBACK_CLK_DELAY_5T, + SSP_FEEDBACK_CLK_DELAY_6T, + SSP_FEEDBACK_CLK_DELAY_7T +}; + +/** + * CHIP select/deselect commands + */ +enum ssp_chip_select { + SSP_CHIP_SELECT, + SSP_CHIP_DESELECT +}; + + +struct dma_chan; +/** + * struct pl022_ssp_master - device.platform_data for SPI controller devices. + * @bus_id: identifier for this bus + * @num_chipselect: chipselects are used to distinguish individual + * SPI slaves, and are numbered from zero to num_chipselects - 1. + * each slave has a chipselect signal, but it's common that not + * every chipselect is connected to a slave. + * @enable_dma: if true enables DMA driven transfers. + * @dma_rx_param: parameter to locate an RX DMA channel. + * @dma_tx_param: parameter to locate a TX DMA channel. + * @autosuspend_delay: delay in ms following transfer completion before the + * runtime power management system suspends the device. A setting of 0 + * indicates no delay and the device will be suspended immediately. + * @rt: indicates the controller should run the message pump with realtime + * priority to minimise the transfer latency on the bus. + * @chipselects: list of chip select gpios + */ +struct pl022_ssp_controller { + u16 bus_id; + u8 num_chipselect; + u8 enable_dma:1; + bool (*dma_filter)(struct dma_chan *chan, void *filter_param); + void *dma_rx_param; + void *dma_tx_param; + int autosuspend_delay; + bool rt; + int *chipselects; +}; + +/** + * struct ssp_config_chip - spi_board_info.controller_data for SPI + * slave devices, copied to spi_device.controller_data. + * + * @iface: Interface type(Motorola, TI, Microwire, Universal) + * @hierarchy: sets whether interface is master or slave + * @slave_tx_disable: SSPTXD is disconnected (in slave mode only) + * @clk_freq: Tune freq parameters of SSP(when in master mode) + * @com_mode: communication mode: polling, Interrupt or DMA + * @rx_lev_trig: Rx FIFO watermark level (for IT & DMA mode) + * @tx_lev_trig: Tx FIFO watermark level (for IT & DMA mode) + * @ctrl_len: Microwire interface: Control length + * @wait_state: Microwire interface: Wait state + * @duplex: Microwire interface: Full/Half duplex + * @clkdelay: on the PL023 variant, the delay in feeback clock cycles + * before sampling the incoming line + * @cs_control: function pointer to board-specific function to + * assert/deassert I/O port to control HW generation of devices chip-select. + */ +struct pl022_config_chip { + enum ssp_interface iface; + enum ssp_hierarchy hierarchy; + bool slave_tx_disable; + struct ssp_clock_params clk_freq; + enum ssp_mode com_mode; + enum ssp_rx_level_trig rx_lev_trig; + enum ssp_tx_level_trig tx_lev_trig; + enum ssp_microwire_ctrl_len ctrl_len; + enum ssp_microwire_wait_state wait_state; + enum ssp_duplex duplex; + enum ssp_clkdelay clkdelay; + void (*cs_control) (u32 control); +}; + +#endif /* _SSP_PL022_H */ diff --git a/include/linux/amba/pl080.h b/include/linux/amba/pl080.h new file mode 100644 index 000000000..ab036b6b1 --- /dev/null +++ b/include/linux/amba/pl080.h @@ -0,0 +1,220 @@ +/* include/linux/amba/pl080.h + * + * Copyright 2008 Openmoko, Inc. + * Copyright 2008 Simtec Electronics + * http://armlinux.simtec.co.uk/ + * Ben Dooks + * + * ARM PrimeCell PL080 DMA controller + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +/* Note, there are some Samsung updates to this controller block which + * make it not entierly compatible with the PL080 specification from + * ARM. When in doubt, check the Samsung documentation first. + * + * The Samsung defines are PL080S, and add an extra control register, + * the ability to move more than 2^11 counts of data and some extra + * OneNAND features. +*/ + +#ifndef ASM_PL080_H +#define ASM_PL080_H + +#define PL080_INT_STATUS (0x00) +#define PL080_TC_STATUS (0x04) +#define PL080_TC_CLEAR (0x08) +#define PL080_ERR_STATUS (0x0C) +#define PL080_ERR_CLEAR (0x10) +#define PL080_RAW_TC_STATUS (0x14) +#define PL080_RAW_ERR_STATUS (0x18) +#define PL080_EN_CHAN (0x1c) +#define PL080_SOFT_BREQ (0x20) +#define PL080_SOFT_SREQ (0x24) +#define PL080_SOFT_LBREQ (0x28) +#define PL080_SOFT_LSREQ (0x2C) + +#define PL080_CONFIG (0x30) +#define PL080_CONFIG_M2_BE BIT(2) +#define PL080_CONFIG_M1_BE BIT(1) +#define PL080_CONFIG_ENABLE BIT(0) + +#define PL080_SYNC (0x34) + +/* The Faraday Technology FTDMAC020 variant registers */ +#define FTDMAC020_CH_BUSY (0x20) +/* Identical to PL080_CONFIG */ +#define FTDMAC020_CSR (0x24) +/* Identical to PL080_SYNC */ +#define FTDMAC020_SYNC (0x2C) +#define FTDMAC020_REVISION (0x30) +#define FTDMAC020_FEATURE (0x34) + +/* Per channel configuration registers */ +#define PL080_Cx_BASE(x) ((0x100 + (x * 0x20))) +#define PL080_CH_SRC_ADDR (0x00) +#define PL080_CH_DST_ADDR (0x04) +#define PL080_CH_LLI (0x08) +#define PL080_CH_CONTROL (0x0C) +#define PL080_CH_CONFIG (0x10) +#define PL080S_CH_CONTROL2 (0x10) +#define PL080S_CH_CONFIG (0x14) +/* The Faraday FTDMAC020 derivative shuffles the registers around */ +#define FTDMAC020_CH_CSR (0x00) +#define FTDMAC020_CH_CFG (0x04) +#define FTDMAC020_CH_SRC_ADDR (0x08) +#define FTDMAC020_CH_DST_ADDR (0x0C) +#define FTDMAC020_CH_LLP (0x10) +#define FTDMAC020_CH_SIZE (0x14) + +#define PL080_LLI_ADDR_MASK GENMASK(31, 2) +#define PL080_LLI_ADDR_SHIFT (2) +#define PL080_LLI_LM_AHB2 BIT(0) + +#define PL080_CONTROL_TC_IRQ_EN BIT(31) +#define PL080_CONTROL_PROT_MASK GENMASK(30, 28) +#define PL080_CONTROL_PROT_SHIFT (28) +#define PL080_CONTROL_PROT_CACHE BIT(30) +#define PL080_CONTROL_PROT_BUFF BIT(29) +#define PL080_CONTROL_PROT_SYS BIT(28) +#define PL080_CONTROL_DST_INCR BIT(27) +#define PL080_CONTROL_SRC_INCR BIT(26) +#define PL080_CONTROL_DST_AHB2 BIT(25) +#define PL080_CONTROL_SRC_AHB2 BIT(24) +#define PL080_CONTROL_DWIDTH_MASK GENMASK(23, 21) +#define PL080_CONTROL_DWIDTH_SHIFT (21) +#define PL080_CONTROL_SWIDTH_MASK GENMASK(20, 18) +#define PL080_CONTROL_SWIDTH_SHIFT (18) +#define PL080_CONTROL_DB_SIZE_MASK GENMASK(17, 15) +#define PL080_CONTROL_DB_SIZE_SHIFT (15) +#define PL080_CONTROL_SB_SIZE_MASK GENMASK(14, 12) +#define PL080_CONTROL_SB_SIZE_SHIFT (12) +#define PL080_CONTROL_TRANSFER_SIZE_MASK GENMASK(11, 0) +#define PL080S_CONTROL_TRANSFER_SIZE_MASK GENMASK(24, 0) +#define PL080_CONTROL_TRANSFER_SIZE_SHIFT (0) + +#define PL080_BSIZE_1 (0x0) +#define PL080_BSIZE_4 (0x1) +#define PL080_BSIZE_8 (0x2) +#define PL080_BSIZE_16 (0x3) +#define PL080_BSIZE_32 (0x4) +#define PL080_BSIZE_64 (0x5) +#define PL080_BSIZE_128 (0x6) +#define PL080_BSIZE_256 (0x7) + +#define PL080_WIDTH_8BIT (0x0) +#define PL080_WIDTH_16BIT (0x1) +#define PL080_WIDTH_32BIT (0x2) + +#define PL080N_CONFIG_ITPROT BIT(20) +#define PL080N_CONFIG_SECPROT BIT(19) +#define PL080_CONFIG_HALT BIT(18) +#define PL080_CONFIG_ACTIVE BIT(17) /* RO */ +#define PL080_CONFIG_LOCK BIT(16) +#define PL080_CONFIG_TC_IRQ_MASK BIT(15) +#define PL080_CONFIG_ERR_IRQ_MASK BIT(14) +#define PL080_CONFIG_FLOW_CONTROL_MASK GENMASK(13, 11) +#define PL080_CONFIG_FLOW_CONTROL_SHIFT (11) +#define PL080_CONFIG_DST_SEL_MASK GENMASK(9, 6) +#define PL080_CONFIG_DST_SEL_SHIFT (6) +#define PL080_CONFIG_SRC_SEL_MASK GENMASK(4, 1) +#define PL080_CONFIG_SRC_SEL_SHIFT (1) +#define PL080_CONFIG_ENABLE BIT(0) + +#define PL080_FLOW_MEM2MEM (0x0) +#define PL080_FLOW_MEM2PER (0x1) +#define PL080_FLOW_PER2MEM (0x2) +#define PL080_FLOW_SRC2DST (0x3) +#define PL080_FLOW_SRC2DST_DST (0x4) +#define PL080_FLOW_MEM2PER_PER (0x5) +#define PL080_FLOW_PER2MEM_PER (0x6) +#define PL080_FLOW_SRC2DST_SRC (0x7) + +#define FTDMAC020_CH_CSR_TC_MSK BIT(31) +/* Later versions have a threshold in bits 24..26, */ +#define FTDMAC020_CH_CSR_FIFOTH_MSK GENMASK(26, 24) +#define FTDMAC020_CH_CSR_FIFOTH_SHIFT (24) +#define FTDMAC020_CH_CSR_CHPR1_MSK GENMASK(23, 22) +#define FTDMAC020_CH_CSR_PROT3 BIT(21) +#define FTDMAC020_CH_CSR_PROT2 BIT(20) +#define FTDMAC020_CH_CSR_PROT1 BIT(19) +#define FTDMAC020_CH_CSR_SRC_SIZE_MSK GENMASK(18, 16) +#define FTDMAC020_CH_CSR_SRC_SIZE_SHIFT (16) +#define FTDMAC020_CH_CSR_ABT BIT(15) +#define FTDMAC020_CH_CSR_SRC_WIDTH_MSK GENMASK(13, 11) +#define FTDMAC020_CH_CSR_SRC_WIDTH_SHIFT (11) +#define FTDMAC020_CH_CSR_DST_WIDTH_MSK GENMASK(10, 8) +#define FTDMAC020_CH_CSR_DST_WIDTH_SHIFT (8) +#define FTDMAC020_CH_CSR_MODE BIT(7) +/* 00 = increase, 01 = decrease, 10 = fix */ +#define FTDMAC020_CH_CSR_SRCAD_CTL_MSK GENMASK(6, 5) +#define FTDMAC020_CH_CSR_SRCAD_CTL_SHIFT (5) +#define FTDMAC020_CH_CSR_DSTAD_CTL_MSK GENMASK(4, 3) +#define FTDMAC020_CH_CSR_DSTAD_CTL_SHIFT (3) +#define FTDMAC020_CH_CSR_SRC_SEL BIT(2) +#define FTDMAC020_CH_CSR_DST_SEL BIT(1) +#define FTDMAC020_CH_CSR_EN BIT(0) + +/* FIFO threshold setting */ +#define FTDMAC020_CH_CSR_FIFOTH_1 (0x0) +#define FTDMAC020_CH_CSR_FIFOTH_2 (0x1) +#define FTDMAC020_CH_CSR_FIFOTH_4 (0x2) +#define FTDMAC020_CH_CSR_FIFOTH_8 (0x3) +#define FTDMAC020_CH_CSR_FIFOTH_16 (0x4) +/* The FTDMAC020 supports 64bit wide transfers */ +#define FTDMAC020_WIDTH_64BIT (0x3) +/* Address can be increased, decreased or fixed */ +#define FTDMAC020_CH_CSR_SRCAD_CTL_INC (0x0) +#define FTDMAC020_CH_CSR_SRCAD_CTL_DEC (0x1) +#define FTDMAC020_CH_CSR_SRCAD_CTL_FIXED (0x2) + +#define FTDMAC020_CH_CFG_LLP_CNT_MASK GENMASK(19, 16) +#define FTDMAC020_CH_CFG_LLP_CNT_SHIFT (16) +#define FTDMAC020_CH_CFG_BUSY BIT(8) +#define FTDMAC020_CH_CFG_INT_ABT_MASK BIT(2) +#define FTDMAC020_CH_CFG_INT_ERR_MASK BIT(1) +#define FTDMAC020_CH_CFG_INT_TC_MASK BIT(0) + +/* Inside the LLIs, the applicable CSR fields are mapped differently */ +#define FTDMAC020_LLI_TC_MSK BIT(28) +#define FTDMAC020_LLI_SRC_WIDTH_MSK GENMASK(27, 25) +#define FTDMAC020_LLI_SRC_WIDTH_SHIFT (25) +#define FTDMAC020_LLI_DST_WIDTH_MSK GENMASK(24, 22) +#define FTDMAC020_LLI_DST_WIDTH_SHIFT (22) +#define FTDMAC020_LLI_SRCAD_CTL_MSK GENMASK(21, 20) +#define FTDMAC020_LLI_SRCAD_CTL_SHIFT (20) +#define FTDMAC020_LLI_DSTAD_CTL_MSK GENMASK(19, 18) +#define FTDMAC020_LLI_DSTAD_CTL_SHIFT (18) +#define FTDMAC020_LLI_SRC_SEL BIT(17) +#define FTDMAC020_LLI_DST_SEL BIT(16) +#define FTDMAC020_LLI_TRANSFER_SIZE_MASK GENMASK(11, 0) +#define FTDMAC020_LLI_TRANSFER_SIZE_SHIFT (0) + +#define FTDMAC020_CFG_LLP_CNT_MASK GENMASK(19, 16) +#define FTDMAC020_CFG_LLP_CNT_SHIFT (16) +#define FTDMAC020_CFG_BUSY BIT(8) +#define FTDMAC020_CFG_INT_ABT_MSK BIT(2) +#define FTDMAC020_CFG_INT_ERR_MSK BIT(1) +#define FTDMAC020_CFG_INT_TC_MSK BIT(0) + +/* DMA linked list chain structure */ + +struct pl080_lli { + u32 src_addr; + u32 dst_addr; + u32 next_lli; + u32 control0; +}; + +struct pl080s_lli { + u32 src_addr; + u32 dst_addr; + u32 next_lli; + u32 control0; + u32 control1; +}; + +#endif /* ASM_PL080_H */ diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h new file mode 100644 index 000000000..79d1bcee7 --- /dev/null +++ b/include/linux/amba/pl08x.h @@ -0,0 +1,133 @@ +/* + * linux/amba/pl08x.h - ARM PrimeCell DMA Controller driver + * + * Copyright (C) 2005 ARM Ltd + * Copyright (C) 2010 ST-Ericsson SA + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * pl08x information required by platform code + * + * Please credit ARM.com + * Documentation: ARM DDI 0196D + */ + +#ifndef AMBA_PL08X_H +#define AMBA_PL08X_H + +/* We need sizes of structs from this header */ +#include +#include + +struct pl08x_driver_data; +struct pl08x_phy_chan; +struct pl08x_txd; + +/* Bitmasks for selecting AHB ports for DMA transfers */ +enum { + PL08X_AHB1 = (1 << 0), + PL08X_AHB2 = (1 << 1) +}; + +/** + * struct pl08x_channel_data - data structure to pass info between + * platform and PL08x driver regarding channel configuration + * @bus_id: name of this device channel, not just a device name since + * devices may have more than one channel e.g. "foo_tx" + * @min_signal: the minimum DMA signal number to be muxed in for this + * channel (for platforms supporting muxed signals). If you have + * static assignments, make sure this is set to the assigned signal + * number, PL08x have 16 possible signals in number 0 thru 15 so + * when these are not enough they often get muxed (in hardware) + * disabling simultaneous use of the same channel for two devices. + * @max_signal: the maximum DMA signal number to be muxed in for + * the channel. Set to the same as min_signal for + * devices with static assignments + * @muxval: a number usually used to poke into some mux regiser to + * mux in the signal to this channel + * @addr: source/target address in physical memory for this DMA channel, + * can be the address of a FIFO register for burst requests for example. + * This can be left undefined if the PrimeCell API is used for configuring + * this. + * @single: the device connected to this channel will request single DMA + * transfers, not bursts. (Bursts are default.) + * @periph_buses: the device connected to this channel is accessible via + * these buses (use PL08X_AHB1 | PL08X_AHB2). + */ +struct pl08x_channel_data { + const char *bus_id; + int min_signal; + int max_signal; + u32 muxval; + dma_addr_t addr; + bool single; + u8 periph_buses; +}; + +enum pl08x_burst_size { + PL08X_BURST_SZ_1, + PL08X_BURST_SZ_4, + PL08X_BURST_SZ_8, + PL08X_BURST_SZ_16, + PL08X_BURST_SZ_32, + PL08X_BURST_SZ_64, + PL08X_BURST_SZ_128, + PL08X_BURST_SZ_256, +}; + +enum pl08x_bus_width { + PL08X_BUS_WIDTH_8_BITS, + PL08X_BUS_WIDTH_16_BITS, + PL08X_BUS_WIDTH_32_BITS, +}; + +/** + * struct pl08x_platform_data - the platform configuration for the PL08x + * PrimeCells. + * @slave_channels: the channels defined for the different devices on the + * platform, all inclusive, including multiplexed channels. The available + * physical channels will be multiplexed around these signals as they are + * requested, just enumerate all possible channels. + * @num_slave_channels: number of elements in the slave channel array + * @memcpy_burst_size: the appropriate burst size for memcpy operations + * @memcpy_bus_width: memory bus width + * @memcpy_prot_buff: whether memcpy DMA is bufferable + * @memcpy_prot_cache: whether memcpy DMA is cacheable + * @get_xfer_signal: request a physical signal to be used for a DMA transfer + * immediately: if there is some multiplexing or similar blocking the use + * of the channel the transfer can be denied by returning less than zero, + * else it returns the allocated signal number + * @put_xfer_signal: indicate to the platform that this physical signal is not + * running any DMA transfer and multiplexing can be recycled + * @lli_buses: buses which LLIs can be fetched from: PL08X_AHB1 | PL08X_AHB2 + * @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2 + * @slave_map: DMA slave matching table + * @slave_map_len: number of elements in @slave_map + */ +struct pl08x_platform_data { + struct pl08x_channel_data *slave_channels; + unsigned int num_slave_channels; + enum pl08x_burst_size memcpy_burst_size; + enum pl08x_bus_width memcpy_bus_width; + bool memcpy_prot_buff; + bool memcpy_prot_cache; + int (*get_xfer_signal)(const struct pl08x_channel_data *); + void (*put_xfer_signal)(const struct pl08x_channel_data *, int); + u8 lli_buses; + u8 mem_buses; + const struct dma_slave_map *slave_map; + int slave_map_len; +}; + +#ifdef CONFIG_AMBA_PL08X +bool pl08x_filter_id(struct dma_chan *chan, void *chan_id); +#else +static inline bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) +{ + return false; +} +#endif + +#endif /* AMBA_PL08X_H */ diff --git a/include/linux/amba/pl093.h b/include/linux/amba/pl093.h new file mode 100644 index 000000000..2983e3671 --- /dev/null +++ b/include/linux/amba/pl093.h @@ -0,0 +1,80 @@ +/* linux/amba/pl093.h + * + * Copyright (c) 2008 Simtec Electronics + * http://armlinux.simtec.co.uk/ + * Ben Dooks + * + * AMBA PL093 SSMC (synchronous static memory controller) + * See DDI0236.pdf (r0p4) for more details + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#define SMB_BANK(x) ((x) * 0x20) /* each bank control set is 0x20 apart */ + +/* Offsets for SMBxxxxRy registers */ + +#define SMBIDCYR (0x00) +#define SMBWSTRDR (0x04) +#define SMBWSTWRR (0x08) +#define SMBWSTOENR (0x0C) +#define SMBWSTWENR (0x10) +#define SMBCR (0x14) +#define SMBSR (0x18) +#define SMBWSTBRDR (0x1C) + +/* Masks for SMB registers */ +#define IDCY_MASK (0xf) +#define WSTRD_MASK (0xf) +#define WSTWR_MASK (0xf) +#define WSTOEN_MASK (0xf) +#define WSTWEN_MASK (0xf) + +/* Notes from datasheet: + * WSTOEN <= WSTRD + * WSTWEN <= WSTWR + * + * WSTOEN is not used with nWAIT + */ + +/* SMBCR bit definitions */ +#define SMBCR_BIWRITEEN (1 << 21) +#define SMBCR_ADDRVALIDWRITEEN (1 << 20) +#define SMBCR_SYNCWRITE (1 << 17) +#define SMBCR_BMWRITE (1 << 16) +#define SMBCR_WRAPREAD (1 << 14) +#define SMBCR_BIREADEN (1 << 13) +#define SMBCR_ADDRVALIDREADEN (1 << 12) +#define SMBCR_SYNCREAD (1 << 9) +#define SMBCR_BMREAD (1 << 8) +#define SMBCR_SMBLSPOL (1 << 6) +#define SMBCR_WP (1 << 3) +#define SMBCR_WAITEN (1 << 2) +#define SMBCR_WAITPOL (1 << 1) +#define SMBCR_RBLE (1 << 0) + +#define SMBCR_BURSTLENWRITE_MASK (3 << 18) +#define SMBCR_BURSTLENWRITE_4 (0 << 18) +#define SMBCR_BURSTLENWRITE_8 (1 << 18) +#define SMBCR_BURSTLENWRITE_RESERVED (2 << 18) +#define SMBCR_BURSTLENWRITE_CONTINUOUS (3 << 18) + +#define SMBCR_BURSTLENREAD_MASK (3 << 10) +#define SMBCR_BURSTLENREAD_4 (0 << 10) +#define SMBCR_BURSTLENREAD_8 (1 << 10) +#define SMBCR_BURSTLENREAD_16 (2 << 10) +#define SMBCR_BURSTLENREAD_CONTINUOUS (3 << 10) + +#define SMBCR_MW_MASK (3 << 4) +#define SMBCR_MW_8BIT (0 << 4) +#define SMBCR_MW_16BIT (1 << 4) +#define SMBCR_MW_M32BIT (2 << 4) + +/* SSMC status registers */ +#define SSMCCSR (0x200) +#define SSMCCR (0x204) +#define SSMCITCR (0x208) +#define SSMCITIP (0x20C) +#define SSMCITIOP (0x210) diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h new file mode 100644 index 000000000..ad0965e21 --- /dev/null +++ b/include/linux/amba/serial.h @@ -0,0 +1,241 @@ +/* + * linux/include/asm-arm/hardware/serial_amba.h + * + * Internal header file for AMBA serial ports + * + * Copyright (C) ARM Limited + * Copyright (C) 2000 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef ASM_ARM_HARDWARE_SERIAL_AMBA_H +#define ASM_ARM_HARDWARE_SERIAL_AMBA_H + +#include + +/* ------------------------------------------------------------------------------- + * From AMBA UART (PL010) Block Specification + * ------------------------------------------------------------------------------- + * UART Register Offsets. + */ +#define UART01x_DR 0x00 /* Data read or written from the interface. */ +#define UART01x_RSR 0x04 /* Receive status register (Read). */ +#define UART01x_ECR 0x04 /* Error clear register (Write). */ +#define UART010_LCRH 0x08 /* Line control register, high byte. */ +#define ST_UART011_DMAWM 0x08 /* DMA watermark configure register. */ +#define UART010_LCRM 0x0C /* Line control register, middle byte. */ +#define ST_UART011_TIMEOUT 0x0C /* Timeout period register. */ +#define UART010_LCRL 0x10 /* Line control register, low byte. */ +#define UART010_CR 0x14 /* Control register. */ +#define UART01x_FR 0x18 /* Flag register (Read only). */ +#define UART010_IIR 0x1C /* Interrupt identification register (Read). */ +#define UART010_ICR 0x1C /* Interrupt clear register (Write). */ +#define ST_UART011_LCRH_RX 0x1C /* Rx line control register. */ +#define UART01x_ILPR 0x20 /* IrDA low power counter register. */ +#define UART011_IBRD 0x24 /* Integer baud rate divisor register. */ +#define UART011_FBRD 0x28 /* Fractional baud rate divisor register. */ +#define UART011_LCRH 0x2c /* Line control register. */ +#define ST_UART011_LCRH_TX 0x2c /* Tx Line control register. */ +#define UART011_CR 0x30 /* Control register. */ +#define UART011_IFLS 0x34 /* Interrupt fifo level select. */ +#define UART011_IMSC 0x38 /* Interrupt mask. */ +#define UART011_RIS 0x3c /* Raw interrupt status. */ +#define UART011_MIS 0x40 /* Masked interrupt status. */ +#define UART011_ICR 0x44 /* Interrupt clear register. */ +#define UART011_DMACR 0x48 /* DMA control register. */ +#define ST_UART011_XFCR 0x50 /* XON/XOFF control register. */ +#define ST_UART011_XON1 0x54 /* XON1 register. */ +#define ST_UART011_XON2 0x58 /* XON2 register. */ +#define ST_UART011_XOFF1 0x5C /* XON1 register. */ +#define ST_UART011_XOFF2 0x60 /* XON2 register. */ +#define ST_UART011_ITCR 0x80 /* Integration test control register. */ +#define ST_UART011_ITIP 0x84 /* Integration test input register. */ +#define ST_UART011_ABCR 0x100 /* Autobaud control register. */ +#define ST_UART011_ABIMSC 0x15C /* Autobaud interrupt mask/clear register. */ + +/* + * ZTE UART register offsets. This UART has a radically different address + * allocation from the ARM and ST variants, so we list all registers here. + * We assume unlisted registers do not exist. + */ +#define ZX_UART011_DR 0x04 +#define ZX_UART011_FR 0x14 +#define ZX_UART011_IBRD 0x24 +#define ZX_UART011_FBRD 0x28 +#define ZX_UART011_LCRH 0x30 +#define ZX_UART011_CR 0x34 +#define ZX_UART011_IFLS 0x38 +#define ZX_UART011_IMSC 0x40 +#define ZX_UART011_RIS 0x44 +#define ZX_UART011_MIS 0x48 +#define ZX_UART011_ICR 0x4c +#define ZX_UART011_DMACR 0x50 + +#define UART011_DR_OE (1 << 11) +#define UART011_DR_BE (1 << 10) +#define UART011_DR_PE (1 << 9) +#define UART011_DR_FE (1 << 8) + +#define UART01x_RSR_OE 0x08 +#define UART01x_RSR_BE 0x04 +#define UART01x_RSR_PE 0x02 +#define UART01x_RSR_FE 0x01 + +#define UART011_FR_RI 0x100 +#define UART011_FR_TXFE 0x080 +#define UART011_FR_RXFF 0x040 +#define UART01x_FR_TXFF 0x020 +#define UART01x_FR_RXFE 0x010 +#define UART01x_FR_BUSY 0x008 +#define UART01x_FR_DCD 0x004 +#define UART01x_FR_DSR 0x002 +#define UART01x_FR_CTS 0x001 +#define UART01x_FR_TMSK (UART01x_FR_TXFF + UART01x_FR_BUSY) + +/* + * Some bits of Flag Register on ZTE device have different position from + * standard ones. + */ +#define ZX_UART01x_FR_BUSY 0x100 +#define ZX_UART01x_FR_DSR 0x008 +#define ZX_UART01x_FR_CTS 0x002 +#define ZX_UART011_FR_RI 0x001 + +#define UART011_CR_CTSEN 0x8000 /* CTS hardware flow control */ +#define UART011_CR_RTSEN 0x4000 /* RTS hardware flow control */ +#define UART011_CR_OUT2 0x2000 /* OUT2 */ +#define UART011_CR_OUT1 0x1000 /* OUT1 */ +#define UART011_CR_RTS 0x0800 /* RTS */ +#define UART011_CR_DTR 0x0400 /* DTR */ +#define UART011_CR_RXE 0x0200 /* receive enable */ +#define UART011_CR_TXE 0x0100 /* transmit enable */ +#define UART011_CR_LBE 0x0080 /* loopback enable */ +#define UART010_CR_RTIE 0x0040 +#define UART010_CR_TIE 0x0020 +#define UART010_CR_RIE 0x0010 +#define UART010_CR_MSIE 0x0008 +#define ST_UART011_CR_OVSFACT 0x0008 /* Oversampling factor */ +#define UART01x_CR_IIRLP 0x0004 /* SIR low power mode */ +#define UART01x_CR_SIREN 0x0002 /* SIR enable */ +#define UART01x_CR_UARTEN 0x0001 /* UART enable */ + +#define UART011_LCRH_SPS 0x80 +#define UART01x_LCRH_WLEN_8 0x60 +#define UART01x_LCRH_WLEN_7 0x40 +#define UART01x_LCRH_WLEN_6 0x20 +#define UART01x_LCRH_WLEN_5 0x00 +#define UART01x_LCRH_FEN 0x10 +#define UART01x_LCRH_STP2 0x08 +#define UART01x_LCRH_EPS 0x04 +#define UART01x_LCRH_PEN 0x02 +#define UART01x_LCRH_BRK 0x01 + +#define ST_UART011_DMAWM_RX_1 (0 << 3) +#define ST_UART011_DMAWM_RX_2 (1 << 3) +#define ST_UART011_DMAWM_RX_4 (2 << 3) +#define ST_UART011_DMAWM_RX_8 (3 << 3) +#define ST_UART011_DMAWM_RX_16 (4 << 3) +#define ST_UART011_DMAWM_RX_32 (5 << 3) +#define ST_UART011_DMAWM_RX_48 (6 << 3) +#define ST_UART011_DMAWM_TX_1 0 +#define ST_UART011_DMAWM_TX_2 1 +#define ST_UART011_DMAWM_TX_4 2 +#define ST_UART011_DMAWM_TX_8 3 +#define ST_UART011_DMAWM_TX_16 4 +#define ST_UART011_DMAWM_TX_32 5 +#define ST_UART011_DMAWM_TX_48 6 + +#define UART010_IIR_RTIS 0x08 +#define UART010_IIR_TIS 0x04 +#define UART010_IIR_RIS 0x02 +#define UART010_IIR_MIS 0x01 + +#define UART011_IFLS_RX1_8 (0 << 3) +#define UART011_IFLS_RX2_8 (1 << 3) +#define UART011_IFLS_RX4_8 (2 << 3) +#define UART011_IFLS_RX6_8 (3 << 3) +#define UART011_IFLS_RX7_8 (4 << 3) +#define UART011_IFLS_TX1_8 (0 << 0) +#define UART011_IFLS_TX2_8 (1 << 0) +#define UART011_IFLS_TX4_8 (2 << 0) +#define UART011_IFLS_TX6_8 (3 << 0) +#define UART011_IFLS_TX7_8 (4 << 0) +/* special values for ST vendor with deeper fifo */ +#define UART011_IFLS_RX_HALF (5 << 3) +#define UART011_IFLS_TX_HALF (5 << 0) + +#define UART011_OEIM (1 << 10) /* overrun error interrupt mask */ +#define UART011_BEIM (1 << 9) /* break error interrupt mask */ +#define UART011_PEIM (1 << 8) /* parity error interrupt mask */ +#define UART011_FEIM (1 << 7) /* framing error interrupt mask */ +#define UART011_RTIM (1 << 6) /* receive timeout interrupt mask */ +#define UART011_TXIM (1 << 5) /* transmit interrupt mask */ +#define UART011_RXIM (1 << 4) /* receive interrupt mask */ +#define UART011_DSRMIM (1 << 3) /* DSR interrupt mask */ +#define UART011_DCDMIM (1 << 2) /* DCD interrupt mask */ +#define UART011_CTSMIM (1 << 1) /* CTS interrupt mask */ +#define UART011_RIMIM (1 << 0) /* RI interrupt mask */ + +#define UART011_OEIS (1 << 10) /* overrun error interrupt status */ +#define UART011_BEIS (1 << 9) /* break error interrupt status */ +#define UART011_PEIS (1 << 8) /* parity error interrupt status */ +#define UART011_FEIS (1 << 7) /* framing error interrupt status */ +#define UART011_RTIS (1 << 6) /* receive timeout interrupt status */ +#define UART011_TXIS (1 << 5) /* transmit interrupt status */ +#define UART011_RXIS (1 << 4) /* receive interrupt status */ +#define UART011_DSRMIS (1 << 3) /* DSR interrupt status */ +#define UART011_DCDMIS (1 << 2) /* DCD interrupt status */ +#define UART011_CTSMIS (1 << 1) /* CTS interrupt status */ +#define UART011_RIMIS (1 << 0) /* RI interrupt status */ + +#define UART011_OEIC (1 << 10) /* overrun error interrupt clear */ +#define UART011_BEIC (1 << 9) /* break error interrupt clear */ +#define UART011_PEIC (1 << 8) /* parity error interrupt clear */ +#define UART011_FEIC (1 << 7) /* framing error interrupt clear */ +#define UART011_RTIC (1 << 6) /* receive timeout interrupt clear */ +#define UART011_TXIC (1 << 5) /* transmit interrupt clear */ +#define UART011_RXIC (1 << 4) /* receive interrupt clear */ +#define UART011_DSRMIC (1 << 3) /* DSR interrupt clear */ +#define UART011_DCDMIC (1 << 2) /* DCD interrupt clear */ +#define UART011_CTSMIC (1 << 1) /* CTS interrupt clear */ +#define UART011_RIMIC (1 << 0) /* RI interrupt clear */ + +#define UART011_DMAONERR (1 << 2) /* disable dma on error */ +#define UART011_TXDMAE (1 << 1) /* enable transmit dma */ +#define UART011_RXDMAE (1 << 0) /* enable receive dma */ + +#define UART01x_RSR_ANY (UART01x_RSR_OE|UART01x_RSR_BE|UART01x_RSR_PE|UART01x_RSR_FE) +#define UART01x_FR_MODEM_ANY (UART01x_FR_DCD|UART01x_FR_DSR|UART01x_FR_CTS) + +#ifndef __ASSEMBLY__ +struct amba_device; /* in uncompress this is included but amba/bus.h is not */ +struct amba_pl010_data { + void (*set_mctrl)(struct amba_device *dev, void __iomem *base, unsigned int mctrl); +}; + +struct dma_chan; +struct amba_pl011_data { + bool (*dma_filter)(struct dma_chan *chan, void *filter_param); + void *dma_rx_param; + void *dma_tx_param; + bool dma_rx_poll_enable; + unsigned int dma_rx_poll_rate; + unsigned int dma_rx_poll_timeout; + void (*init) (void); + void (*exit) (void); +}; +#endif + +#endif diff --git a/include/linux/amba/sp810.h b/include/linux/amba/sp810.h new file mode 100644 index 000000000..58fe9e8b6 --- /dev/null +++ b/include/linux/amba/sp810.h @@ -0,0 +1,62 @@ +/* + * ARM PrimeXsys System Controller SP810 header file + * + * Copyright (C) 2009 ST Microelectronics + * Viresh Kumar + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __AMBA_SP810_H +#define __AMBA_SP810_H + +#include + +/* sysctl registers offset */ +#define SCCTRL 0x000 +#define SCSYSSTAT 0x004 +#define SCIMCTRL 0x008 +#define SCIMSTAT 0x00C +#define SCXTALCTRL 0x010 +#define SCPLLCTRL 0x014 +#define SCPLLFCTRL 0x018 +#define SCPERCTRL0 0x01C +#define SCPERCTRL1 0x020 +#define SCPEREN 0x024 +#define SCPERDIS 0x028 +#define SCPERCLKEN 0x02C +#define SCPERSTAT 0x030 +#define SCSYSID0 0xEE0 +#define SCSYSID1 0xEE4 +#define SCSYSID2 0xEE8 +#define SCSYSID3 0xEEC +#define SCITCR 0xF00 +#define SCITIR0 0xF04 +#define SCITIR1 0xF08 +#define SCITOR 0xF0C +#define SCCNTCTRL 0xF10 +#define SCCNTDATA 0xF14 +#define SCCNTSTEP 0xF18 +#define SCPERIPHID0 0xFE0 +#define SCPERIPHID1 0xFE4 +#define SCPERIPHID2 0xFE8 +#define SCPERIPHID3 0xFEC +#define SCPCELLID0 0xFF0 +#define SCPCELLID1 0xFF4 +#define SCPCELLID2 0xFF8 +#define SCPCELLID3 0xFFC + +#define SCCTRL_TIMERENnSEL_SHIFT(n) (15 + ((n) * 2)) + +static inline void sysctl_soft_reset(void __iomem *base) +{ + /* switch to slow mode */ + writel(0x2, base + SCCTRL); + + /* writing any value to SCSYSSTAT reg will reset system */ + writel(0, base + SCSYSSTAT); +} + +#endif /* __AMBA_SP810_H */ diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h new file mode 100644 index 000000000..09751d349 --- /dev/null +++ b/include/linux/amd-iommu.h @@ -0,0 +1,215 @@ +/* + * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. + * Author: Joerg Roedel + * Leo Duran + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _ASM_X86_AMD_IOMMU_H +#define _ASM_X86_AMD_IOMMU_H + +#include + +/* + * This is mainly used to communicate information back-and-forth + * between SVM and IOMMU for setting up and tearing down posted + * interrupt + */ +struct amd_iommu_pi_data { + u32 ga_tag; + u32 prev_ga_tag; + u64 base; + bool is_guest_mode; + struct vcpu_data *vcpu_data; + void *ir_data; +}; + +#ifdef CONFIG_AMD_IOMMU + +struct task_struct; +struct pci_dev; + +extern int amd_iommu_detect(void); +extern int amd_iommu_init_hardware(void); + +/** + * amd_iommu_enable_device_erratum() - Enable erratum workaround for device + * in the IOMMUv2 driver + * @pdev: The PCI device the workaround is necessary for + * @erratum: The erratum workaround to enable + * + * The function needs to be called before amd_iommu_init_device(). + * Possible values for the erratum number are for now: + * - AMD_PRI_DEV_ERRATUM_ENABLE_RESET - Reset PRI capability when PRI + * is enabled + * - AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE - Limit number of outstanding PRI + * requests to one + */ +#define AMD_PRI_DEV_ERRATUM_ENABLE_RESET 0 +#define AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE 1 + +extern void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum); + +/** + * amd_iommu_init_device() - Init device for use with IOMMUv2 driver + * @pdev: The PCI device to initialize + * @pasids: Number of PASIDs to support for this device + * + * This function does all setup for the device pdev so that it can be + * used with IOMMUv2. + * Returns 0 on success or negative value on error. + */ +extern int amd_iommu_init_device(struct pci_dev *pdev, int pasids); + +/** + * amd_iommu_free_device() - Free all IOMMUv2 related device resources + * and disable IOMMUv2 usage for this device + * @pdev: The PCI device to disable IOMMUv2 usage for' + */ +extern void amd_iommu_free_device(struct pci_dev *pdev); + +/** + * amd_iommu_bind_pasid() - Bind a given task to a PASID on a device + * @pdev: The PCI device to bind the task to + * @pasid: The PASID on the device the task should be bound to + * @task: the task to bind + * + * The function returns 0 on success or a negative value on error. + */ +extern int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid, + struct task_struct *task); + +/** + * amd_iommu_unbind_pasid() - Unbind a PASID from its task on + * a device + * @pdev: The device of the PASID + * @pasid: The PASID to unbind + * + * When this function returns the device is no longer using the PASID + * and the PASID is no longer bound to its task. + */ +extern void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid); + +/** + * amd_iommu_set_invalid_ppr_cb() - Register a call-back for failed + * PRI requests + * @pdev: The PCI device the call-back should be registered for + * @cb: The call-back function + * + * The IOMMUv2 driver invokes this call-back when it is unable to + * successfully handle a PRI request. The device driver can then decide + * which PRI response the device should see. Possible return values for + * the call-back are: + * + * - AMD_IOMMU_INV_PRI_RSP_SUCCESS - Send SUCCESS back to the device + * - AMD_IOMMU_INV_PRI_RSP_INVALID - Send INVALID back to the device + * - AMD_IOMMU_INV_PRI_RSP_FAIL - Send Failure back to the device, + * the device is required to disable + * PRI when it receives this response + * + * The function returns 0 on success or negative value on error. + */ +#define AMD_IOMMU_INV_PRI_RSP_SUCCESS 0 +#define AMD_IOMMU_INV_PRI_RSP_INVALID 1 +#define AMD_IOMMU_INV_PRI_RSP_FAIL 2 + +typedef int (*amd_iommu_invalid_ppr_cb)(struct pci_dev *pdev, + int pasid, + unsigned long address, + u16); + +extern int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev, + amd_iommu_invalid_ppr_cb cb); + +#define PPR_FAULT_EXEC (1 << 1) +#define PPR_FAULT_READ (1 << 2) +#define PPR_FAULT_WRITE (1 << 5) +#define PPR_FAULT_USER (1 << 6) +#define PPR_FAULT_RSVD (1 << 7) +#define PPR_FAULT_GN (1 << 8) + +/** + * amd_iommu_device_info() - Get information about IOMMUv2 support of a + * PCI device + * @pdev: PCI device to query information from + * @info: A pointer to an amd_iommu_device_info structure which will contain + * the information about the PCI device + * + * Returns 0 on success, negative value on error + */ + +#define AMD_IOMMU_DEVICE_FLAG_ATS_SUP 0x1 /* ATS feature supported */ +#define AMD_IOMMU_DEVICE_FLAG_PRI_SUP 0x2 /* PRI feature supported */ +#define AMD_IOMMU_DEVICE_FLAG_PASID_SUP 0x4 /* PASID context supported */ +#define AMD_IOMMU_DEVICE_FLAG_EXEC_SUP 0x8 /* Device may request execution + on memory pages */ +#define AMD_IOMMU_DEVICE_FLAG_PRIV_SUP 0x10 /* Device may request + super-user privileges */ + +struct amd_iommu_device_info { + int max_pasids; + u32 flags; +}; + +extern int amd_iommu_device_info(struct pci_dev *pdev, + struct amd_iommu_device_info *info); + +/** + * amd_iommu_set_invalidate_ctx_cb() - Register a call-back for invalidating + * a pasid context. This call-back is + * invoked when the IOMMUv2 driver needs to + * invalidate a PASID context, for example + * because the task that is bound to that + * context is about to exit. + * + * @pdev: The PCI device the call-back should be registered for + * @cb: The call-back function + */ + +typedef void (*amd_iommu_invalidate_ctx)(struct pci_dev *pdev, int pasid); + +extern int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev, + amd_iommu_invalidate_ctx cb); +#else /* CONFIG_AMD_IOMMU */ + +static inline int amd_iommu_detect(void) { return -ENODEV; } + +#endif /* CONFIG_AMD_IOMMU */ + +#if defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) + +/* IOMMU AVIC Function */ +extern int amd_iommu_register_ga_log_notifier(int (*notifier)(u32)); + +extern int +amd_iommu_update_ga(int cpu, bool is_run, void *data); + +#else /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */ + +static inline int +amd_iommu_register_ga_log_notifier(int (*notifier)(u32)) +{ + return 0; +} + +static inline int +amd_iommu_update_ga(int cpu, bool is_run, void *data) +{ + return 0; +} + +#endif /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */ + +#endif /* _ASM_X86_AMD_IOMMU_H */ diff --git a/include/linux/amifd.h b/include/linux/amifd.h new file mode 100644 index 000000000..202a77dbe --- /dev/null +++ b/include/linux/amifd.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _AMIFD_H +#define _AMIFD_H + +/* Definitions for the Amiga floppy driver */ + +#include + +#define FD_MAX_UNITS 4 /* Max. Number of drives */ +#define FLOPPY_MAX_SECTORS 22 /* Max. Number of sectors per track */ + +#ifndef ASSEMBLER + +struct fd_data_type { + char *name; /* description of data type */ + int sects; /* sectors per track */ +#ifdef __STDC__ + int (*read_fkt)(int); + void (*write_fkt)(int); +#else + int (*read_fkt)(); /* read whole track */ + void (*write_fkt)(); /* write whole track */ +#endif +}; + +/* +** Floppy type descriptions +*/ + +struct fd_drive_type { + unsigned long code; /* code returned from drive */ + char *name; /* description of drive */ + unsigned int tracks; /* number of tracks */ + unsigned int heads; /* number of heads */ + unsigned int read_size; /* raw read size for one track */ + unsigned int write_size; /* raw write size for one track */ + unsigned int sect_mult; /* sectors and gap multiplier (HD = 2) */ + unsigned int precomp1; /* start track for precomp 1 */ + unsigned int precomp2; /* start track for precomp 2 */ + unsigned int step_delay; /* time (in ms) for delay after step */ + unsigned int settle_time; /* time to settle after dir change */ + unsigned int side_time; /* time needed to change sides */ +}; + +struct amiga_floppy_struct { + struct fd_drive_type *type; /* type of floppy for this unit */ + struct fd_data_type *dtype; /* type of floppy for this unit */ + int track; /* current track (-1 == unknown) */ + unsigned char *trackbuf; /* current track (kmaloc()'d */ + + int blocks; /* total # blocks on disk */ + + int changed; /* true when not known */ + int disk; /* disk in drive (-1 == unknown) */ + int motor; /* true when motor is at speed */ + int busy; /* true when drive is active */ + int dirty; /* true when trackbuf is not on disk */ + int status; /* current error code for unit */ + struct gendisk *gendisk; +}; +#endif + +#endif diff --git a/include/linux/amifdreg.h b/include/linux/amifdreg.h new file mode 100644 index 000000000..9b514d05e --- /dev/null +++ b/include/linux/amifdreg.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_AMIFDREG_H +#define _LINUX_AMIFDREG_H + +/* +** CIAAPRA bits (read only) +*/ + +#define DSKRDY (0x1<<5) /* disk ready when low */ +#define DSKTRACK0 (0x1<<4) /* head at track zero when low */ +#define DSKPROT (0x1<<3) /* disk protected when low */ +#define DSKCHANGE (0x1<<2) /* low when disk removed */ + +/* +** CIAAPRB bits (read/write) +*/ + +#define DSKMOTOR (0x1<<7) /* motor on when low */ +#define DSKSEL3 (0x1<<6) /* select drive 3 when low */ +#define DSKSEL2 (0x1<<5) /* select drive 2 when low */ +#define DSKSEL1 (0x1<<4) /* select drive 1 when low */ +#define DSKSEL0 (0x1<<3) /* select drive 0 when low */ +#define DSKSIDE (0x1<<2) /* side selection: 0 = upper, 1 = lower */ +#define DSKDIREC (0x1<<1) /* step direction: 0=in, 1=out (to trk 0) */ +#define DSKSTEP (0x1) /* pulse low to step head 1 track */ + +/* +** DSKBYTR bits (read only) +*/ + +#define DSKBYT (1<<15) /* register contains valid byte when set */ +#define DMAON (1<<14) /* disk DMA enabled */ +#define DISKWRITE (1<<13) /* disk write bit in DSKLEN enabled */ +#define WORDEQUAL (1<<12) /* DSKSYNC register match when true */ +/* bits 7-0 are data */ + +/* +** ADKCON/ADKCONR bits +*/ + +#ifndef SETCLR +#define ADK_SETCLR (1<<15) /* control bit */ +#endif +#define ADK_PRECOMP1 (1<<14) /* precompensation selection */ +#define ADK_PRECOMP0 (1<<13) /* 00=none, 01=140ns, 10=280ns, 11=500ns */ +#define ADK_MFMPREC (1<<12) /* 0=GCR precomp., 1=MFM precomp. */ +#define ADK_WORDSYNC (1<<10) /* enable DSKSYNC auto DMA */ +#define ADK_MSBSYNC (1<<9) /* when 1, enable sync on MSbit (for GCR) */ +#define ADK_FAST (1<<8) /* bit cell: 0=2us (GCR), 1=1us (MFM) */ + +/* +** DSKLEN bits +*/ + +#define DSKLEN_DMAEN (1<<15) +#define DSKLEN_WRITE (1<<14) + +/* +** INTENA/INTREQ bits +*/ + +#define DSKINDEX (0x1<<4) /* DSKINDEX bit */ + +/* +** Misc +*/ + +#define MFM_SYNC 0x4489 /* standard MFM sync value */ + +/* Values for FD_COMMAND */ +#define FD_RECALIBRATE 0x07 /* move to track 0 */ +#define FD_SEEK 0x0F /* seek track */ +#define FD_READ 0xE6 /* read with MT, MFM, SKip deleted */ +#define FD_WRITE 0xC5 /* write with MT, MFM */ +#define FD_SENSEI 0x08 /* Sense Interrupt Status */ +#define FD_SPECIFY 0x03 /* specify HUT etc */ +#define FD_FORMAT 0x4D /* format one track */ +#define FD_VERSION 0x10 /* get version code */ +#define FD_CONFIGURE 0x13 /* configure FIFO operation */ +#define FD_PERPENDICULAR 0x12 /* perpendicular r/w mode */ + +#endif /* _LINUX_AMIFDREG_H */ diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h new file mode 100644 index 000000000..d0d7d9626 --- /dev/null +++ b/include/linux/anon_inodes.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/anon_inodes.h + * + * Copyright (C) 2007 Davide Libenzi + * + */ + +#ifndef _LINUX_ANON_INODES_H +#define _LINUX_ANON_INODES_H + +struct file_operations; + +struct file *anon_inode_getfile(const char *name, + const struct file_operations *fops, + void *priv, int flags); +int anon_inode_getfd(const char *name, const struct file_operations *fops, + void *priv, int flags); + +#endif /* _LINUX_ANON_INODES_H */ + diff --git a/include/linux/apm-emulation.h b/include/linux/apm-emulation.h new file mode 100644 index 000000000..e6d800358 --- /dev/null +++ b/include/linux/apm-emulation.h @@ -0,0 +1,62 @@ +/* -*- linux-c -*- + * + * (C) 2003 zecke@handhelds.org + * + * GPL version 2 + * + * based on arch/arm/kernel/apm.c + * factor out the information needed by architectures to provide + * apm status + */ +#ifndef __LINUX_APM_EMULATION_H +#define __LINUX_APM_EMULATION_H + +#include + +/* + * This structure gets filled in by the machine specific 'get_power_status' + * implementation. Any fields which are not set default to a safe value. + */ +struct apm_power_info { + unsigned char ac_line_status; +#define APM_AC_OFFLINE 0 +#define APM_AC_ONLINE 1 +#define APM_AC_BACKUP 2 +#define APM_AC_UNKNOWN 0xff + + unsigned char battery_status; +#define APM_BATTERY_STATUS_HIGH 0 +#define APM_BATTERY_STATUS_LOW 1 +#define APM_BATTERY_STATUS_CRITICAL 2 +#define APM_BATTERY_STATUS_CHARGING 3 +#define APM_BATTERY_STATUS_NOT_PRESENT 4 +#define APM_BATTERY_STATUS_UNKNOWN 0xff + + unsigned char battery_flag; +#define APM_BATTERY_FLAG_HIGH (1 << 0) +#define APM_BATTERY_FLAG_LOW (1 << 1) +#define APM_BATTERY_FLAG_CRITICAL (1 << 2) +#define APM_BATTERY_FLAG_CHARGING (1 << 3) +#define APM_BATTERY_FLAG_NOT_PRESENT (1 << 7) +#define APM_BATTERY_FLAG_UNKNOWN 0xff + + int battery_life; + int time; + int units; +#define APM_UNITS_MINS 0 +#define APM_UNITS_SECS 1 +#define APM_UNITS_UNKNOWN -1 + +}; + +/* + * This allows machines to provide their own "apm get power status" function. + */ +extern void (*apm_get_power_status)(struct apm_power_info *); + +/* + * Queue an event (APM_SYS_SUSPEND or APM_CRITICAL_SUSPEND) + */ +void apm_queue_event(apm_event_t event); + +#endif /* __LINUX_APM_EMULATION_H */ diff --git a/include/linux/apm_bios.h b/include/linux/apm_bios.h new file mode 100644 index 000000000..9c3a87184 --- /dev/null +++ b/include/linux/apm_bios.h @@ -0,0 +1,101 @@ +/* + * Include file for the interface to an APM BIOS + * Copyright 1994-2001 Stephen Rothwell (sfr@canb.auug.org.au) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#ifndef _LINUX_APM_H +#define _LINUX_APM_H + +#include + + +#define APM_CS (GDT_ENTRY_APMBIOS_BASE * 8) +#define APM_CS_16 (APM_CS + 8) +#define APM_DS (APM_CS_16 + 8) + +/* Results of APM Installation Check */ +#define APM_16_BIT_SUPPORT 0x0001 +#define APM_32_BIT_SUPPORT 0x0002 +#define APM_IDLE_SLOWS_CLOCK 0x0004 +#define APM_BIOS_DISABLED 0x0008 +#define APM_BIOS_DISENGAGED 0x0010 + +/* + * Data for APM that is persistent across module unload/load + */ +struct apm_info { + struct apm_bios_info bios; + unsigned short connection_version; + int get_power_status_broken; + int get_power_status_swabinminutes; + int allow_ints; + int forbid_idle; + int realmode_power_off; + int disabled; +}; + +/* + * The APM function codes + */ +#define APM_FUNC_INST_CHECK 0x5300 +#define APM_FUNC_REAL_CONN 0x5301 +#define APM_FUNC_16BIT_CONN 0x5302 +#define APM_FUNC_32BIT_CONN 0x5303 +#define APM_FUNC_DISCONN 0x5304 +#define APM_FUNC_IDLE 0x5305 +#define APM_FUNC_BUSY 0x5306 +#define APM_FUNC_SET_STATE 0x5307 +#define APM_FUNC_ENABLE_PM 0x5308 +#define APM_FUNC_RESTORE_BIOS 0x5309 +#define APM_FUNC_GET_STATUS 0x530a +#define APM_FUNC_GET_EVENT 0x530b +#define APM_FUNC_GET_STATE 0x530c +#define APM_FUNC_ENABLE_DEV_PM 0x530d +#define APM_FUNC_VERSION 0x530e +#define APM_FUNC_ENGAGE_PM 0x530f +#define APM_FUNC_GET_CAP 0x5310 +#define APM_FUNC_RESUME_TIMER 0x5311 +#define APM_FUNC_RESUME_ON_RING 0x5312 +#define APM_FUNC_TIMER 0x5313 + +/* + * Function code for APM_FUNC_RESUME_TIMER + */ +#define APM_FUNC_DISABLE_TIMER 0 +#define APM_FUNC_GET_TIMER 1 +#define APM_FUNC_SET_TIMER 2 + +/* + * Function code for APM_FUNC_RESUME_ON_RING + */ +#define APM_FUNC_DISABLE_RING 0 +#define APM_FUNC_ENABLE_RING 1 +#define APM_FUNC_GET_RING 2 + +/* + * Function code for APM_FUNC_TIMER_STATUS + */ +#define APM_FUNC_TIMER_DISABLE 0 +#define APM_FUNC_TIMER_ENABLE 1 +#define APM_FUNC_TIMER_GET 2 + +/* + * in arch/i386/kernel/setup.c + */ +extern struct apm_info apm_info; + +/* + * This is the "All Devices" ID communicated to the BIOS + */ +#define APM_DEVICE_BALL ((apm_info.connection_version > 0x0100) ? \ + APM_DEVICE_ALL : APM_DEVICE_OLD_ALL) +#endif /* LINUX_APM_H */ diff --git a/include/linux/apple-gmux.h b/include/linux/apple-gmux.h new file mode 100644 index 000000000..714186de8 --- /dev/null +++ b/include/linux/apple-gmux.h @@ -0,0 +1,50 @@ +/* + * apple-gmux.h - microcontroller built into dual GPU MacBook Pro & Mac Pro + * Copyright (C) 2015 Lukas Wunner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef LINUX_APPLE_GMUX_H +#define LINUX_APPLE_GMUX_H + +#include + +#define GMUX_ACPI_HID "APP000B" + +#if IS_ENABLED(CONFIG_APPLE_GMUX) + +/** + * apple_gmux_present() - detect if gmux is built into the machine + * + * Drivers may use this to activate quirks specific to dual GPU MacBook Pros + * and Mac Pros, e.g. for deferred probing, runtime pm and backlight. + * + * Return: %true if gmux is present and the kernel was configured + * with CONFIG_APPLE_GMUX, %false otherwise. + */ +static inline bool apple_gmux_present(void) +{ + return acpi_dev_found(GMUX_ACPI_HID); +} + +#else /* !CONFIG_APPLE_GMUX */ + +static inline bool apple_gmux_present(void) +{ + return false; +} + +#endif /* !CONFIG_APPLE_GMUX */ + +#endif /* LINUX_APPLE_GMUX_H */ diff --git a/include/linux/apple_bl.h b/include/linux/apple_bl.h new file mode 100644 index 000000000..445af2e3c --- /dev/null +++ b/include/linux/apple_bl.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * apple_bl exported symbols + */ + +#ifndef _LINUX_APPLE_BL_H +#define _LINUX_APPLE_BL_H + +#if defined(CONFIG_BACKLIGHT_APPLE) || defined(CONFIG_BACKLIGHT_APPLE_MODULE) + +extern int apple_bl_register(void); +extern void apple_bl_unregister(void); + +#else /* !CONFIG_BACKLIGHT_APPLE */ + +static inline int apple_bl_register(void) +{ + return 0; +} + +static inline void apple_bl_unregister(void) +{ +} + +#endif /* !CONFIG_BACKLIGHT_APPLE */ + +#endif /* _LINUX_APPLE_BL_H */ diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h new file mode 100644 index 000000000..2b709416d --- /dev/null +++ b/include/linux/arch_topology.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/arch_topology.h - arch specific cpu topology information + */ +#ifndef _LINUX_ARCH_TOPOLOGY_H_ +#define _LINUX_ARCH_TOPOLOGY_H_ + +#include +#include + +void topology_normalize_cpu_scale(void); + +struct device_node; +bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu); + +DECLARE_PER_CPU(unsigned long, cpu_scale); + +struct sched_domain; +static inline +unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu) +{ + return per_cpu(cpu_scale, cpu); +} + +void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity); + +DECLARE_PER_CPU(unsigned long, freq_scale); + +static inline +unsigned long topology_get_freq_scale(int cpu) +{ + return per_cpu(freq_scale, cpu); +} + +#endif /* _LINUX_ARCH_TOPOLOGY_H_ */ diff --git a/include/linux/arm-cci.h b/include/linux/arm-cci.h new file mode 100644 index 000000000..521ec1f2e --- /dev/null +++ b/include/linux/arm-cci.h @@ -0,0 +1,68 @@ +/* + * CCI cache coherent interconnect support + * + * Copyright (C) 2013 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_ARM_CCI_H +#define __LINUX_ARM_CCI_H + +#include +#include + +#include + +struct device_node; + +#ifdef CONFIG_ARM_CCI +extern bool cci_probed(void); +#else +static inline bool cci_probed(void) { return false; } +#endif + +#ifdef CONFIG_ARM_CCI400_PORT_CTRL +extern int cci_ace_get_port(struct device_node *dn); +extern int cci_disable_port_by_cpu(u64 mpidr); +extern int __cci_control_port_by_device(struct device_node *dn, bool enable); +extern int __cci_control_port_by_index(u32 port, bool enable); +#else +static inline int cci_ace_get_port(struct device_node *dn) +{ + return -ENODEV; +} +static inline int cci_disable_port_by_cpu(u64 mpidr) { return -ENODEV; } +static inline int __cci_control_port_by_device(struct device_node *dn, + bool enable) +{ + return -ENODEV; +} +static inline int __cci_control_port_by_index(u32 port, bool enable) +{ + return -ENODEV; +} +#endif + +#define cci_disable_port_by_device(dev) \ + __cci_control_port_by_device(dev, false) +#define cci_enable_port_by_device(dev) \ + __cci_control_port_by_device(dev, true) +#define cci_disable_port_by_index(dev) \ + __cci_control_port_by_index(dev, false) +#define cci_enable_port_by_index(dev) \ + __cci_control_port_by_index(dev, true) + +#endif diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h new file mode 100644 index 000000000..040266891 --- /dev/null +++ b/include/linux/arm-smccc.h @@ -0,0 +1,396 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __LINUX_ARM_SMCCC_H +#define __LINUX_ARM_SMCCC_H + +#include + +/* + * This file provides common defines for ARM SMC Calling Convention as + * specified in + * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html + */ + +#define ARM_SMCCC_STD_CALL _AC(0,U) +#define ARM_SMCCC_FAST_CALL _AC(1,U) +#define ARM_SMCCC_TYPE_SHIFT 31 + +#define ARM_SMCCC_SMC_32 0 +#define ARM_SMCCC_SMC_64 1 +#define ARM_SMCCC_CALL_CONV_SHIFT 30 + +#define ARM_SMCCC_OWNER_MASK 0x3F +#define ARM_SMCCC_OWNER_SHIFT 24 + +#define ARM_SMCCC_FUNC_MASK 0xFFFF + +#define ARM_SMCCC_IS_FAST_CALL(smc_val) \ + ((smc_val) & (ARM_SMCCC_FAST_CALL << ARM_SMCCC_TYPE_SHIFT)) +#define ARM_SMCCC_IS_64(smc_val) \ + ((smc_val) & (ARM_SMCCC_SMC_64 << ARM_SMCCC_CALL_CONV_SHIFT)) +#define ARM_SMCCC_FUNC_NUM(smc_val) ((smc_val) & ARM_SMCCC_FUNC_MASK) +#define ARM_SMCCC_OWNER_NUM(smc_val) \ + (((smc_val) >> ARM_SMCCC_OWNER_SHIFT) & ARM_SMCCC_OWNER_MASK) + +#define ARM_SMCCC_CALL_VAL(type, calling_convention, owner, func_num) \ + (((type) << ARM_SMCCC_TYPE_SHIFT) | \ + ((calling_convention) << ARM_SMCCC_CALL_CONV_SHIFT) | \ + (((owner) & ARM_SMCCC_OWNER_MASK) << ARM_SMCCC_OWNER_SHIFT) | \ + ((func_num) & ARM_SMCCC_FUNC_MASK)) + +#define ARM_SMCCC_OWNER_ARCH 0 +#define ARM_SMCCC_OWNER_CPU 1 +#define ARM_SMCCC_OWNER_SIP 2 +#define ARM_SMCCC_OWNER_OEM 3 +#define ARM_SMCCC_OWNER_STANDARD 4 +#define ARM_SMCCC_OWNER_TRUSTED_APP 48 +#define ARM_SMCCC_OWNER_TRUSTED_APP_END 49 +#define ARM_SMCCC_OWNER_TRUSTED_OS 50 +#define ARM_SMCCC_OWNER_TRUSTED_OS_END 63 + +#define ARM_SMCCC_QUIRK_NONE 0 +#define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */ + +#define ARM_SMCCC_VERSION_1_0 0x10000 +#define ARM_SMCCC_VERSION_1_1 0x10001 + +#define ARM_SMCCC_VERSION_FUNC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 0) + +#define ARM_SMCCC_ARCH_FEATURES_FUNC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 1) + +#define ARM_SMCCC_ARCH_WORKAROUND_1 \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 0x8000) + +#define ARM_SMCCC_ARCH_WORKAROUND_2 \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 0x7fff) + +#define ARM_SMCCC_ARCH_WORKAROUND_3 \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 0x3fff) + +#define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED 1 + +#ifndef __ASSEMBLY__ + +#include +#include + +enum arm_smccc_conduit { + SMCCC_CONDUIT_NONE, + SMCCC_CONDUIT_SMC, + SMCCC_CONDUIT_HVC, +}; + +/** + * arm_smccc_1_1_get_conduit() + * + * Returns the conduit to be used for SMCCCv1.1 or later. + * + * When SMCCCv1.1 is not present, returns SMCCC_CONDUIT_NONE. + */ +enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void); + +/** + * struct arm_smccc_res - Result from SMC/HVC call + * @a0-a3 result values from registers 0 to 3 + */ +struct arm_smccc_res { + unsigned long a0; + unsigned long a1; + unsigned long a2; + unsigned long a3; +}; + +/** + * struct arm_smccc_quirk - Contains quirk information + * @id: quirk identification + * @state: quirk specific information + * @a6: Qualcomm quirk entry for returning post-smc call contents of a6 + */ +struct arm_smccc_quirk { + int id; + union { + unsigned long a6; + } state; +}; + +/** + * __arm_smccc_smc() - make SMC calls + * @a0-a7: arguments passed in registers 0 to 7 + * @res: result values from registers 0 to 3 + * @quirk: points to an arm_smccc_quirk, or NULL when no quirks are required. + * + * This function is used to make SMC calls following SMC Calling Convention. + * The content of the supplied param are copied to registers 0 to 7 prior + * to the SMC instruction. The return values are updated with the content + * from register 0 to 3 on return from the SMC instruction. An optional + * quirk structure provides vendor specific behavior. + */ +asmlinkage void __arm_smccc_smc(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, unsigned long a4, + unsigned long a5, unsigned long a6, unsigned long a7, + struct arm_smccc_res *res, struct arm_smccc_quirk *quirk); + +/** + * __arm_smccc_hvc() - make HVC calls + * @a0-a7: arguments passed in registers 0 to 7 + * @res: result values from registers 0 to 3 + * @quirk: points to an arm_smccc_quirk, or NULL when no quirks are required. + * + * This function is used to make HVC calls following SMC Calling + * Convention. The content of the supplied param are copied to registers 0 + * to 7 prior to the HVC instruction. The return values are updated with + * the content from register 0 to 3 on return from the HVC instruction. An + * optional quirk structure provides vendor specific behavior. + */ +asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, unsigned long a4, + unsigned long a5, unsigned long a6, unsigned long a7, + struct arm_smccc_res *res, struct arm_smccc_quirk *quirk); + +#define arm_smccc_smc(...) __arm_smccc_smc(__VA_ARGS__, NULL) + +#define arm_smccc_smc_quirk(...) __arm_smccc_smc(__VA_ARGS__) + +#define arm_smccc_hvc(...) __arm_smccc_hvc(__VA_ARGS__, NULL) + +#define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__) + +/* SMCCC v1.1 implementation madness follows */ +#ifdef CONFIG_ARM64 + +#define SMCCC_SMC_INST "smc #0" +#define SMCCC_HVC_INST "hvc #0" + +#elif defined(CONFIG_ARM) +#include +#include + +#define SMCCC_SMC_INST __SMC(0) +#define SMCCC_HVC_INST __HVC(0) + +#endif + +#define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x + +#define __count_args(...) \ + ___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0) + +#define __constraint_write_0 \ + "+r" (r0), "=&r" (r1), "=&r" (r2), "=&r" (r3) +#define __constraint_write_1 \ + "+r" (r0), "+r" (r1), "=&r" (r2), "=&r" (r3) +#define __constraint_write_2 \ + "+r" (r0), "+r" (r1), "+r" (r2), "=&r" (r3) +#define __constraint_write_3 \ + "+r" (r0), "+r" (r1), "+r" (r2), "+r" (r3) +#define __constraint_write_4 __constraint_write_3 +#define __constraint_write_5 __constraint_write_4 +#define __constraint_write_6 __constraint_write_5 +#define __constraint_write_7 __constraint_write_6 + +#define __constraint_read_0 +#define __constraint_read_1 +#define __constraint_read_2 +#define __constraint_read_3 +#define __constraint_read_4 "r" (r4) +#define __constraint_read_5 __constraint_read_4, "r" (r5) +#define __constraint_read_6 __constraint_read_5, "r" (r6) +#define __constraint_read_7 __constraint_read_6, "r" (r7) + +#define __declare_arg_0(a0, res) \ + struct arm_smccc_res *___res = res; \ + register unsigned long r0 asm("r0") = (u32)a0; \ + register unsigned long r1 asm("r1"); \ + register unsigned long r2 asm("r2"); \ + register unsigned long r3 asm("r3") + +#define __declare_arg_1(a0, a1, res) \ + typeof(a1) __a1 = a1; \ + struct arm_smccc_res *___res = res; \ + register unsigned long r0 asm("r0") = (u32)a0; \ + register unsigned long r1 asm("r1") = __a1; \ + register unsigned long r2 asm("r2"); \ + register unsigned long r3 asm("r3") + +#define __declare_arg_2(a0, a1, a2, res) \ + typeof(a1) __a1 = a1; \ + typeof(a2) __a2 = a2; \ + struct arm_smccc_res *___res = res; \ + register unsigned long r0 asm("r0") = (u32)a0; \ + register unsigned long r1 asm("r1") = __a1; \ + register unsigned long r2 asm("r2") = __a2; \ + register unsigned long r3 asm("r3") + +#define __declare_arg_3(a0, a1, a2, a3, res) \ + typeof(a1) __a1 = a1; \ + typeof(a2) __a2 = a2; \ + typeof(a3) __a3 = a3; \ + struct arm_smccc_res *___res = res; \ + register unsigned long r0 asm("r0") = (u32)a0; \ + register unsigned long r1 asm("r1") = __a1; \ + register unsigned long r2 asm("r2") = __a2; \ + register unsigned long r3 asm("r3") = __a3 + +#define __declare_arg_4(a0, a1, a2, a3, a4, res) \ + typeof(a4) __a4 = a4; \ + __declare_arg_3(a0, a1, a2, a3, res); \ + register unsigned long r4 asm("r4") = __a4 + +#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \ + typeof(a5) __a5 = a5; \ + __declare_arg_4(a0, a1, a2, a3, a4, res); \ + register unsigned long r5 asm("r5") = __a5 + +#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \ + typeof(a6) __a6 = a6; \ + __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \ + register unsigned long r6 asm("r6") = __a6 + +#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \ + typeof(a7) __a7 = a7; \ + __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \ + register unsigned long r7 asm("r7") = __a7 + +#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__) +#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__) + +#define ___constraints(count) \ + : __constraint_write_ ## count \ + : __constraint_read_ ## count \ + : "memory" +#define __constraints(count) ___constraints(count) + +/* + * We have an output list that is not necessarily used, and GCC feels + * entitled to optimise the whole sequence away. "volatile" is what + * makes it stick. + */ +#define __arm_smccc_1_1(inst, ...) \ + do { \ + __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \ + asm volatile(inst "\n" \ + __constraints(__count_args(__VA_ARGS__))); \ + if (___res) \ + *___res = (typeof(*___res)){r0, r1, r2, r3}; \ + } while (0) + +/* + * arm_smccc_1_1_smc() - make an SMCCC v1.1 compliant SMC call + * + * This is a variadic macro taking one to eight source arguments, and + * an optional return structure. + * + * @a0-a7: arguments passed in registers 0 to 7 + * @res: result values from registers 0 to 3 + * + * This macro is used to make SMC calls following SMC Calling Convention v1.1. + * The content of the supplied param are copied to registers 0 to 7 prior + * to the SMC instruction. The return values are updated with the content + * from register 0 to 3 on return from the SMC instruction if not NULL. + */ +#define arm_smccc_1_1_smc(...) __arm_smccc_1_1(SMCCC_SMC_INST, __VA_ARGS__) + +/* + * arm_smccc_1_1_hvc() - make an SMCCC v1.1 compliant HVC call + * + * This is a variadic macro taking one to eight source arguments, and + * an optional return structure. + * + * @a0-a7: arguments passed in registers 0 to 7 + * @res: result values from registers 0 to 3 + * + * This macro is used to make HVC calls following SMC Calling Convention v1.1. + * The content of the supplied param are copied to registers 0 to 7 prior + * to the HVC instruction. The return values are updated with the content + * from register 0 to 3 on return from the HVC instruction if not NULL. + */ +#define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__) + +/* Return codes defined in ARM DEN 0070A */ +#define SMCCC_RET_SUCCESS 0 +#define SMCCC_RET_NOT_SUPPORTED -1 +#define SMCCC_RET_NOT_REQUIRED -2 + +/* + * Like arm_smccc_1_1* but always returns SMCCC_RET_NOT_SUPPORTED. + * Used when the SMCCC conduit is not defined. The empty asm statement + * avoids compiler warnings about unused variables. + */ +#define __fail_smccc_1_1(...) \ + do { \ + __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \ + asm ("" __constraints(__count_args(__VA_ARGS__))); \ + if (___res) \ + ___res->a0 = SMCCC_RET_NOT_SUPPORTED; \ + } while (0) + +/* + * arm_smccc_1_1_invoke() - make an SMCCC v1.1 compliant call + * + * This is a variadic macro taking one to eight source arguments, and + * an optional return structure. + * + * @a0-a7: arguments passed in registers 0 to 7 + * @res: result values from registers 0 to 3 + * + * This macro will make either an HVC call or an SMC call depending on the + * current SMCCC conduit. If no valid conduit is available then -1 + * (SMCCC_RET_NOT_SUPPORTED) is returned in @res.a0 (if supplied). + * + * The return value also provides the conduit that was used. + */ +#define arm_smccc_1_1_invoke(...) ({ \ + int method = arm_smccc_1_1_get_conduit(); \ + switch (method) { \ + case SMCCC_CONDUIT_HVC: \ + arm_smccc_1_1_hvc(__VA_ARGS__); \ + break; \ + case SMCCC_CONDUIT_SMC: \ + arm_smccc_1_1_smc(__VA_ARGS__); \ + break; \ + default: \ + __fail_smccc_1_1(__VA_ARGS__); \ + method = SMCCC_CONDUIT_NONE; \ + break; \ + } \ + method; \ + }) + +/* Paravirtualised time calls (defined by ARM DEN0057A) */ +#define ARM_SMCCC_HV_PV_TIME_FEATURES \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_STANDARD_HYP, \ + 0x20) + +#define ARM_SMCCC_HV_PV_TIME_ST \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_STANDARD_HYP, \ + 0x21) + +#endif /*__ASSEMBLY__*/ +#endif /*__LINUX_ARM_SMCCC_H*/ diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h new file mode 100644 index 000000000..942afbd54 --- /dev/null +++ b/include/linux/arm_sdei.h @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2017 Arm Ltd. +#ifndef __LINUX_ARM_SDEI_H +#define __LINUX_ARM_SDEI_H + +#include + +enum sdei_conduit_types { + CONDUIT_INVALID = 0, + CONDUIT_SMC, + CONDUIT_HVC, +}; + +#include + +/* Arch code should override this to set the entry point from firmware... */ +#ifndef sdei_arch_get_entry_point +#define sdei_arch_get_entry_point(conduit) (0) +#endif + +/* + * When an event occurs sdei_event_handler() will call a user-provided callback + * like this in NMI context on the CPU that received the event. + */ +typedef int (sdei_event_callback)(u32 event, struct pt_regs *regs, void *arg); + +/* + * Register your callback to claim an event. The event must be described + * by firmware. + */ +int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg); + +/* + * Calls to sdei_event_unregister() may return EINPROGRESS. Keep calling + * it until it succeeds. + */ +int sdei_event_unregister(u32 event_num); + +int sdei_event_enable(u32 event_num); +int sdei_event_disable(u32 event_num); + +#ifdef CONFIG_ARM_SDE_INTERFACE +/* For use by arch code when CPU hotplug notifiers are not appropriate. */ +int sdei_mask_local_cpu(void); +int sdei_unmask_local_cpu(void); +#else +static inline int sdei_mask_local_cpu(void) { return 0; } +static inline int sdei_unmask_local_cpu(void) { return 0; } +#endif /* CONFIG_ARM_SDE_INTERFACE */ + + +/* + * This struct represents an event that has been registered. The driver + * maintains a list of all events, and which ones are registered. (Private + * events have one entry in the list, but are registered on each CPU). + * A pointer to this struct is passed to firmware, and back to the event + * handler. The event handler can then use this to invoke the registered + * callback, without having to walk the list. + * + * For CPU private events, this structure is per-cpu. + */ +struct sdei_registered_event { + /* For use by arch code: */ + struct pt_regs interrupted_regs; + + sdei_event_callback *callback; + void *callback_arg; + u32 event_num; + u8 priority; +}; + +/* The arch code entry point should then call this when an event arrives. */ +int notrace sdei_event_handler(struct pt_regs *regs, + struct sdei_registered_event *arg); + +/* arch code may use this to retrieve the extra registers. */ +int sdei_api_event_context(u32 query, u64 *result); + +#endif /* __LINUX_ARM_SDEI_H */ diff --git a/include/linux/ascii85.h b/include/linux/ascii85.h new file mode 100644 index 000000000..4cc402012 --- /dev/null +++ b/include/linux/ascii85.h @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (c) 2008 Intel Corporation + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + */ + +#ifndef _ASCII85_H_ +#define _ASCII85_H_ + +#include + +#define ASCII85_BUFSZ 6 + +static inline long +ascii85_encode_len(long len) +{ + return DIV_ROUND_UP(len, 4); +} + +static inline const char * +ascii85_encode(u32 in, char *out) +{ + int i; + + if (in == 0) + return "z"; + + out[5] = '\0'; + for (i = 5; i--; ) { + out[i] = '!' + in % 85; + in /= 85; + } + + return out; +} + +#endif diff --git a/include/linux/asn1.h b/include/linux/asn1.h new file mode 100644 index 000000000..eed698286 --- /dev/null +++ b/include/linux/asn1.h @@ -0,0 +1,69 @@ +/* ASN.1 BER/DER/CER encoding definitions + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _LINUX_ASN1_H +#define _LINUX_ASN1_H + +/* Class */ +enum asn1_class { + ASN1_UNIV = 0, /* Universal */ + ASN1_APPL = 1, /* Application */ + ASN1_CONT = 2, /* Context */ + ASN1_PRIV = 3 /* Private */ +}; +#define ASN1_CLASS_BITS 0xc0 + + +enum asn1_method { + ASN1_PRIM = 0, /* Primitive */ + ASN1_CONS = 1 /* Constructed */ +}; +#define ASN1_CONS_BIT 0x20 + +/* Tag */ +enum asn1_tag { + ASN1_EOC = 0, /* End Of Contents or N/A */ + ASN1_BOOL = 1, /* Boolean */ + ASN1_INT = 2, /* Integer */ + ASN1_BTS = 3, /* Bit String */ + ASN1_OTS = 4, /* Octet String */ + ASN1_NULL = 5, /* Null */ + ASN1_OID = 6, /* Object Identifier */ + ASN1_ODE = 7, /* Object Description */ + ASN1_EXT = 8, /* External */ + ASN1_REAL = 9, /* Real float */ + ASN1_ENUM = 10, /* Enumerated */ + ASN1_EPDV = 11, /* Embedded PDV */ + ASN1_UTF8STR = 12, /* UTF8 String */ + ASN1_RELOID = 13, /* Relative OID */ + /* 14 - Reserved */ + /* 15 - Reserved */ + ASN1_SEQ = 16, /* Sequence and Sequence of */ + ASN1_SET = 17, /* Set and Set of */ + ASN1_NUMSTR = 18, /* Numerical String */ + ASN1_PRNSTR = 19, /* Printable String */ + ASN1_TEXSTR = 20, /* T61 String / Teletext String */ + ASN1_VIDSTR = 21, /* Videotex String */ + ASN1_IA5STR = 22, /* IA5 String */ + ASN1_UNITIM = 23, /* Universal Time */ + ASN1_GENTIM = 24, /* General Time */ + ASN1_GRASTR = 25, /* Graphic String */ + ASN1_VISSTR = 26, /* Visible String */ + ASN1_GENSTR = 27, /* General String */ + ASN1_UNISTR = 28, /* Universal String */ + ASN1_CHRSTR = 29, /* Character String */ + ASN1_BMPSTR = 30, /* BMP String */ + ASN1_LONG_TAG = 31 /* Long form tag */ +}; + +#define ASN1_INDEFINITE_LENGTH 0x80 + +#endif /* _LINUX_ASN1_H */ diff --git a/include/linux/asn1_ber_bytecode.h b/include/linux/asn1_ber_bytecode.h new file mode 100644 index 000000000..ab3a6c002 --- /dev/null +++ b/include/linux/asn1_ber_bytecode.h @@ -0,0 +1,93 @@ +/* ASN.1 BER/DER/CER parsing state machine internal definitions + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _LINUX_ASN1_BER_BYTECODE_H +#define _LINUX_ASN1_BER_BYTECODE_H + +#ifdef __KERNEL__ +#include +#endif +#include + +typedef int (*asn1_action_t)(void *context, + size_t hdrlen, /* In case of ANY type */ + unsigned char tag, /* In case of ANY type */ + const void *value, size_t vlen); + +struct asn1_decoder { + const unsigned char *machine; + size_t machlen; + const asn1_action_t *actions; +}; + +enum asn1_opcode { + /* The tag-matching ops come first and the odd-numbered slots + * are for OR_SKIP ops. + */ +#define ASN1_OP_MATCH__SKIP 0x01 +#define ASN1_OP_MATCH__ACT 0x02 +#define ASN1_OP_MATCH__JUMP 0x04 +#define ASN1_OP_MATCH__ANY 0x08 +#define ASN1_OP_MATCH__COND 0x10 + + ASN1_OP_MATCH = 0x00, + ASN1_OP_MATCH_OR_SKIP = 0x01, + ASN1_OP_MATCH_ACT = 0x02, + ASN1_OP_MATCH_ACT_OR_SKIP = 0x03, + ASN1_OP_MATCH_JUMP = 0x04, + ASN1_OP_MATCH_JUMP_OR_SKIP = 0x05, + ASN1_OP_MATCH_ANY = 0x08, + ASN1_OP_MATCH_ANY_OR_SKIP = 0x09, + ASN1_OP_MATCH_ANY_ACT = 0x0a, + ASN1_OP_MATCH_ANY_ACT_OR_SKIP = 0x0b, + /* Everything before here matches unconditionally */ + + ASN1_OP_COND_MATCH_OR_SKIP = 0x11, + ASN1_OP_COND_MATCH_ACT_OR_SKIP = 0x13, + ASN1_OP_COND_MATCH_JUMP_OR_SKIP = 0x15, + ASN1_OP_COND_MATCH_ANY = 0x18, + ASN1_OP_COND_MATCH_ANY_OR_SKIP = 0x19, + ASN1_OP_COND_MATCH_ANY_ACT = 0x1a, + ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP = 0x1b, + + /* Everything before here will want a tag from the data */ +#define ASN1_OP__MATCHES_TAG ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP + + /* These are here to help fill up space */ + ASN1_OP_COND_FAIL = 0x1c, + ASN1_OP_COMPLETE = 0x1d, + ASN1_OP_ACT = 0x1e, + ASN1_OP_MAYBE_ACT = 0x1f, + + /* The following eight have bit 0 -> SET, 1 -> OF, 2 -> ACT */ + ASN1_OP_END_SEQ = 0x20, + ASN1_OP_END_SET = 0x21, + ASN1_OP_END_SEQ_OF = 0x22, + ASN1_OP_END_SET_OF = 0x23, + ASN1_OP_END_SEQ_ACT = 0x24, + ASN1_OP_END_SET_ACT = 0x25, + ASN1_OP_END_SEQ_OF_ACT = 0x26, + ASN1_OP_END_SET_OF_ACT = 0x27, +#define ASN1_OP_END__SET 0x01 +#define ASN1_OP_END__OF 0x02 +#define ASN1_OP_END__ACT 0x04 + + ASN1_OP_RETURN = 0x28, + + ASN1_OP__NR +}; + +#define _tag(CLASS, CP, TAG) ((ASN1_##CLASS << 6) | (ASN1_##CP << 5) | ASN1_##TAG) +#define _tagn(CLASS, CP, TAG) ((ASN1_##CLASS << 6) | (ASN1_##CP << 5) | TAG) +#define _jump_target(N) (N) +#define _action(N) (N) + +#endif /* _LINUX_ASN1_BER_BYTECODE_H */ diff --git a/include/linux/asn1_decoder.h b/include/linux/asn1_decoder.h new file mode 100644 index 000000000..fa2ff5bc0 --- /dev/null +++ b/include/linux/asn1_decoder.h @@ -0,0 +1,24 @@ +/* ASN.1 decoder + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _LINUX_ASN1_DECODER_H +#define _LINUX_ASN1_DECODER_H + +#include + +struct asn1_decoder; + +extern int asn1_ber_decoder(const struct asn1_decoder *decoder, + void *context, + const unsigned char *data, + size_t datalen); + +#endif /* _LINUX_ASN1_DECODER_H */ diff --git a/include/linux/assoc_array.h b/include/linux/assoc_array.h new file mode 100644 index 000000000..65e3832f9 --- /dev/null +++ b/include/linux/assoc_array.h @@ -0,0 +1,92 @@ +/* Generic associative array implementation. + * + * See Documentation/core-api/assoc_array.rst for information. + * + * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _LINUX_ASSOC_ARRAY_H +#define _LINUX_ASSOC_ARRAY_H + +#ifdef CONFIG_ASSOCIATIVE_ARRAY + +#include + +#define ASSOC_ARRAY_KEY_CHUNK_SIZE BITS_PER_LONG /* Key data retrieved in chunks of this size */ + +/* + * Generic associative array. + */ +struct assoc_array { + struct assoc_array_ptr *root; /* The node at the root of the tree */ + unsigned long nr_leaves_on_tree; +}; + +/* + * Operations on objects and index keys for use by array manipulation routines. + */ +struct assoc_array_ops { + /* Method to get a chunk of an index key from caller-supplied data */ + unsigned long (*get_key_chunk)(const void *index_key, int level); + + /* Method to get a piece of an object's index key */ + unsigned long (*get_object_key_chunk)(const void *object, int level); + + /* Is this the object we're looking for? */ + bool (*compare_object)(const void *object, const void *index_key); + + /* How different is an object from an index key, to a bit position in + * their keys? (or -1 if they're the same) + */ + int (*diff_objects)(const void *object, const void *index_key); + + /* Method to free an object. */ + void (*free_object)(void *object); +}; + +/* + * Access and manipulation functions. + */ +struct assoc_array_edit; + +static inline void assoc_array_init(struct assoc_array *array) +{ + array->root = NULL; + array->nr_leaves_on_tree = 0; +} + +extern int assoc_array_iterate(const struct assoc_array *array, + int (*iterator)(const void *object, + void *iterator_data), + void *iterator_data); +extern void *assoc_array_find(const struct assoc_array *array, + const struct assoc_array_ops *ops, + const void *index_key); +extern void assoc_array_destroy(struct assoc_array *array, + const struct assoc_array_ops *ops); +extern struct assoc_array_edit *assoc_array_insert(struct assoc_array *array, + const struct assoc_array_ops *ops, + const void *index_key, + void *object); +extern void assoc_array_insert_set_object(struct assoc_array_edit *edit, + void *object); +extern struct assoc_array_edit *assoc_array_delete(struct assoc_array *array, + const struct assoc_array_ops *ops, + const void *index_key); +extern struct assoc_array_edit *assoc_array_clear(struct assoc_array *array, + const struct assoc_array_ops *ops); +extern void assoc_array_apply_edit(struct assoc_array_edit *edit); +extern void assoc_array_cancel_edit(struct assoc_array_edit *edit); +extern int assoc_array_gc(struct assoc_array *array, + const struct assoc_array_ops *ops, + bool (*iterator)(void *object, void *iterator_data), + void *iterator_data); + +#endif /* CONFIG_ASSOCIATIVE_ARRAY */ +#endif /* _LINUX_ASSOC_ARRAY_H */ diff --git a/include/linux/assoc_array_priv.h b/include/linux/assoc_array_priv.h new file mode 100644 index 000000000..a00a06550 --- /dev/null +++ b/include/linux/assoc_array_priv.h @@ -0,0 +1,182 @@ +/* Private definitions for the generic associative array implementation. + * + * See Documentation/core-api/assoc_array.rst for information. + * + * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _LINUX_ASSOC_ARRAY_PRIV_H +#define _LINUX_ASSOC_ARRAY_PRIV_H + +#ifdef CONFIG_ASSOCIATIVE_ARRAY + +#include + +#define ASSOC_ARRAY_FAN_OUT 16 /* Number of slots per node */ +#define ASSOC_ARRAY_FAN_MASK (ASSOC_ARRAY_FAN_OUT - 1) +#define ASSOC_ARRAY_LEVEL_STEP (ilog2(ASSOC_ARRAY_FAN_OUT)) +#define ASSOC_ARRAY_LEVEL_STEP_MASK (ASSOC_ARRAY_LEVEL_STEP - 1) +#define ASSOC_ARRAY_KEY_CHUNK_MASK (ASSOC_ARRAY_KEY_CHUNK_SIZE - 1) +#define ASSOC_ARRAY_KEY_CHUNK_SHIFT (ilog2(BITS_PER_LONG)) + +/* + * Undefined type representing a pointer with type information in the bottom + * two bits. + */ +struct assoc_array_ptr; + +/* + * An N-way node in the tree. + * + * Each slot contains one of four things: + * + * (1) Nothing (NULL). + * + * (2) A leaf object (pointer types 0). + * + * (3) A next-level node (pointer type 1, subtype 0). + * + * (4) A shortcut (pointer type 1, subtype 1). + * + * The tree is optimised for search-by-ID, but permits reasonable iteration + * also. + * + * The tree is navigated by constructing an index key consisting of an array of + * segments, where each segment is ilog2(ASSOC_ARRAY_FAN_OUT) bits in size. + * + * The segments correspond to levels of the tree (the first segment is used at + * level 0, the second at level 1, etc.). + */ +struct assoc_array_node { + struct assoc_array_ptr *back_pointer; + u8 parent_slot; + struct assoc_array_ptr *slots[ASSOC_ARRAY_FAN_OUT]; + unsigned long nr_leaves_on_branch; +}; + +/* + * A shortcut through the index space out to where a collection of nodes/leaves + * with the same IDs live. + */ +struct assoc_array_shortcut { + struct assoc_array_ptr *back_pointer; + int parent_slot; + int skip_to_level; + struct assoc_array_ptr *next_node; + unsigned long index_key[]; +}; + +/* + * Preallocation cache. + */ +struct assoc_array_edit { + struct rcu_head rcu; + struct assoc_array *array; + const struct assoc_array_ops *ops; + const struct assoc_array_ops *ops_for_excised_subtree; + struct assoc_array_ptr *leaf; + struct assoc_array_ptr **leaf_p; + struct assoc_array_ptr *dead_leaf; + struct assoc_array_ptr *new_meta[3]; + struct assoc_array_ptr *excised_meta[1]; + struct assoc_array_ptr *excised_subtree; + struct assoc_array_ptr **set_backpointers[ASSOC_ARRAY_FAN_OUT]; + struct assoc_array_ptr *set_backpointers_to; + struct assoc_array_node *adjust_count_on; + long adjust_count_by; + struct { + struct assoc_array_ptr **ptr; + struct assoc_array_ptr *to; + } set[2]; + struct { + u8 *p; + u8 to; + } set_parent_slot[1]; + u8 segment_cache[ASSOC_ARRAY_FAN_OUT + 1]; +}; + +/* + * Internal tree member pointers are marked in the bottom one or two bits to + * indicate what type they are so that we don't have to look behind every + * pointer to see what it points to. + * + * We provide functions to test type annotations and to create and translate + * the annotated pointers. + */ +#define ASSOC_ARRAY_PTR_TYPE_MASK 0x1UL +#define ASSOC_ARRAY_PTR_LEAF_TYPE 0x0UL /* Points to leaf (or nowhere) */ +#define ASSOC_ARRAY_PTR_META_TYPE 0x1UL /* Points to node or shortcut */ +#define ASSOC_ARRAY_PTR_SUBTYPE_MASK 0x2UL +#define ASSOC_ARRAY_PTR_NODE_SUBTYPE 0x0UL +#define ASSOC_ARRAY_PTR_SHORTCUT_SUBTYPE 0x2UL + +static inline bool assoc_array_ptr_is_meta(const struct assoc_array_ptr *x) +{ + return (unsigned long)x & ASSOC_ARRAY_PTR_TYPE_MASK; +} +static inline bool assoc_array_ptr_is_leaf(const struct assoc_array_ptr *x) +{ + return !assoc_array_ptr_is_meta(x); +} +static inline bool assoc_array_ptr_is_shortcut(const struct assoc_array_ptr *x) +{ + return (unsigned long)x & ASSOC_ARRAY_PTR_SUBTYPE_MASK; +} +static inline bool assoc_array_ptr_is_node(const struct assoc_array_ptr *x) +{ + return !assoc_array_ptr_is_shortcut(x); +} + +static inline void *assoc_array_ptr_to_leaf(const struct assoc_array_ptr *x) +{ + return (void *)((unsigned long)x & ~ASSOC_ARRAY_PTR_TYPE_MASK); +} + +static inline +unsigned long __assoc_array_ptr_to_meta(const struct assoc_array_ptr *x) +{ + return (unsigned long)x & + ~(ASSOC_ARRAY_PTR_SUBTYPE_MASK | ASSOC_ARRAY_PTR_TYPE_MASK); +} +static inline +struct assoc_array_node *assoc_array_ptr_to_node(const struct assoc_array_ptr *x) +{ + return (struct assoc_array_node *)__assoc_array_ptr_to_meta(x); +} +static inline +struct assoc_array_shortcut *assoc_array_ptr_to_shortcut(const struct assoc_array_ptr *x) +{ + return (struct assoc_array_shortcut *)__assoc_array_ptr_to_meta(x); +} + +static inline +struct assoc_array_ptr *__assoc_array_x_to_ptr(const void *p, unsigned long t) +{ + return (struct assoc_array_ptr *)((unsigned long)p | t); +} +static inline +struct assoc_array_ptr *assoc_array_leaf_to_ptr(const void *p) +{ + return __assoc_array_x_to_ptr(p, ASSOC_ARRAY_PTR_LEAF_TYPE); +} +static inline +struct assoc_array_ptr *assoc_array_node_to_ptr(const struct assoc_array_node *p) +{ + return __assoc_array_x_to_ptr( + p, ASSOC_ARRAY_PTR_META_TYPE | ASSOC_ARRAY_PTR_NODE_SUBTYPE); +} +static inline +struct assoc_array_ptr *assoc_array_shortcut_to_ptr(const struct assoc_array_shortcut *p) +{ + return __assoc_array_x_to_ptr( + p, ASSOC_ARRAY_PTR_META_TYPE | ASSOC_ARRAY_PTR_SHORTCUT_SUBTYPE); +} + +#endif /* CONFIG_ASSOCIATIVE_ARRAY */ +#endif /* _LINUX_ASSOC_ARRAY_PRIV_H */ diff --git a/include/linux/async.h b/include/linux/async.h new file mode 100644 index 000000000..6b0226bda --- /dev/null +++ b/include/linux/async.h @@ -0,0 +1,50 @@ +/* + * async.h: Asynchronous function calls for boot performance + * + * (C) Copyright 2009 Intel Corporation + * Author: Arjan van de Ven + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ +#ifndef __ASYNC_H__ +#define __ASYNC_H__ + +#include +#include + +typedef u64 async_cookie_t; +typedef void (*async_func_t) (void *data, async_cookie_t cookie); +struct async_domain { + struct list_head pending; + unsigned registered:1; +}; + +/* + * domain participates in global async_synchronize_full + */ +#define ASYNC_DOMAIN(_name) \ + struct async_domain _name = { .pending = LIST_HEAD_INIT(_name.pending), \ + .registered = 1 } + +/* + * domain is free to go out of scope as soon as all pending work is + * complete, this domain does not participate in async_synchronize_full + */ +#define ASYNC_DOMAIN_EXCLUSIVE(_name) \ + struct async_domain _name = { .pending = LIST_HEAD_INIT(_name.pending), \ + .registered = 0 } + +extern async_cookie_t async_schedule(async_func_t func, void *data); +extern async_cookie_t async_schedule_domain(async_func_t func, void *data, + struct async_domain *domain); +void async_unregister_domain(struct async_domain *domain); +extern void async_synchronize_full(void); +extern void async_synchronize_full_domain(struct async_domain *domain); +extern void async_synchronize_cookie(async_cookie_t cookie); +extern void async_synchronize_cookie_domain(async_cookie_t cookie, + struct async_domain *domain); +extern bool current_is_async(void); +#endif diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h new file mode 100644 index 000000000..28e3cf146 --- /dev/null +++ b/include/linux/async_tx.h @@ -0,0 +1,208 @@ +/* + * Copyright © 2006, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ +#ifndef _ASYNC_TX_H_ +#define _ASYNC_TX_H_ +#include +#include +#include + +/* on architectures without dma-mapping capabilities we need to ensure + * that the asynchronous path compiles away + */ +#ifdef CONFIG_HAS_DMA +#define __async_inline +#else +#define __async_inline __always_inline +#endif + +/** + * dma_chan_ref - object used to manage dma channels received from the + * dmaengine core. + * @chan - the channel being tracked + * @node - node for the channel to be placed on async_tx_master_list + * @rcu - for list_del_rcu + * @count - number of times this channel is listed in the pool + * (for channels with multiple capabiities) + */ +struct dma_chan_ref { + struct dma_chan *chan; + struct list_head node; + struct rcu_head rcu; + atomic_t count; +}; + +/** + * async_tx_flags - modifiers for the async_* calls + * @ASYNC_TX_XOR_ZERO_DST: this flag must be used for xor operations where the + * the destination address is not a source. The asynchronous case handles this + * implicitly, the synchronous case needs to zero the destination block. + * @ASYNC_TX_XOR_DROP_DST: this flag must be used if the destination address is + * also one of the source addresses. In the synchronous case the destination + * address is an implied source, whereas the asynchronous case it must be listed + * as a source. The destination address must be the first address in the source + * array. + * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a + * dependency chain + * @ASYNC_TX_FENCE: specify that the next operation in the dependency + * chain uses this operation's result as an input + * @ASYNC_TX_PQ_XOR_DST: do not overwrite the syndrome but XOR it with the + * input data. Required for rmw case. + */ +enum async_tx_flags { + ASYNC_TX_XOR_ZERO_DST = (1 << 0), + ASYNC_TX_XOR_DROP_DST = (1 << 1), + ASYNC_TX_ACK = (1 << 2), + ASYNC_TX_FENCE = (1 << 3), + ASYNC_TX_PQ_XOR_DST = (1 << 4), +}; + +/** + * struct async_submit_ctl - async_tx submission/completion modifiers + * @flags: submission modifiers + * @depend_tx: parent dependency of the current operation being submitted + * @cb_fn: callback routine to run at operation completion + * @cb_param: parameter for the callback routine + * @scribble: caller provided space for dma/page address conversions + */ +struct async_submit_ctl { + enum async_tx_flags flags; + struct dma_async_tx_descriptor *depend_tx; + dma_async_tx_callback cb_fn; + void *cb_param; + void *scribble; +}; + +#if defined(CONFIG_DMA_ENGINE) && !defined(CONFIG_ASYNC_TX_CHANNEL_SWITCH) +#define async_tx_issue_pending_all dma_issue_pending_all + +/** + * async_tx_issue_pending - send pending descriptor to the hardware channel + * @tx: descriptor handle to retrieve hardware context + * + * Note: any dependent operations will have already been issued by + * async_tx_channel_switch, or (in the case of no channel switch) will + * be already pending on this channel. + */ +static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx) +{ + if (likely(tx)) { + struct dma_chan *chan = tx->chan; + struct dma_device *dma = chan->device; + + dma->device_issue_pending(chan); + } +} +#ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL +#include +#else +#define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \ + __async_tx_find_channel(dep, type) +struct dma_chan * +__async_tx_find_channel(struct async_submit_ctl *submit, + enum dma_transaction_type tx_type); +#endif /* CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL */ +#else +static inline void async_tx_issue_pending_all(void) +{ + do { } while (0); +} + +static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx) +{ + do { } while (0); +} + +static inline struct dma_chan * +async_tx_find_channel(struct async_submit_ctl *submit, + enum dma_transaction_type tx_type, struct page **dst, + int dst_count, struct page **src, int src_count, + size_t len) +{ + return NULL; +} +#endif + +/** + * async_tx_sync_epilog - actions to take if an operation is run synchronously + * @cb_fn: function to call when the transaction completes + * @cb_fn_param: parameter to pass to the callback routine + */ +static inline void +async_tx_sync_epilog(struct async_submit_ctl *submit) +{ + if (submit->cb_fn) + submit->cb_fn(submit->cb_param); +} + +typedef union { + unsigned long addr; + struct page *page; + dma_addr_t dma; +} addr_conv_t; + +static inline void +init_async_submit(struct async_submit_ctl *args, enum async_tx_flags flags, + struct dma_async_tx_descriptor *tx, + dma_async_tx_callback cb_fn, void *cb_param, + addr_conv_t *scribble) +{ + args->flags = flags; + args->depend_tx = tx; + args->cb_fn = cb_fn; + args->cb_param = cb_param; + args->scribble = scribble; +} + +void async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, + struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor * +async_xor(struct page *dest, struct page **src_list, unsigned int offset, + int src_cnt, size_t len, struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor * +async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, + int src_cnt, size_t len, enum sum_check_flags *result, + struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor * +async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, + unsigned int src_offset, size_t len, + struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor * +async_gen_syndrome(struct page **blocks, unsigned int offset, int src_cnt, + size_t len, struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor * +async_syndrome_val(struct page **blocks, unsigned int offset, int src_cnt, + size_t len, enum sum_check_flags *pqres, struct page *spare, + struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor * +async_raid6_2data_recov(int src_num, size_t bytes, int faila, int failb, + struct page **ptrs, struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor * +async_raid6_datap_recov(int src_num, size_t bytes, int faila, + struct page **ptrs, struct async_submit_ctl *submit); + +void async_tx_quiesce(struct dma_async_tx_descriptor **tx); +#endif /* _ASYNC_TX_H_ */ diff --git a/include/linux/ata.h b/include/linux/ata.h new file mode 100644 index 000000000..40d150ad7 --- /dev/null +++ b/include/linux/ata.h @@ -0,0 +1,1156 @@ + +/* + * Copyright 2003-2004 Red Hat, Inc. All rights reserved. + * Copyright 2003-2004 Jeff Garzik + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * + * libata documentation is available via 'make {ps|pdf}docs', + * as Documentation/driver-api/libata.rst + * + * Hardware documentation available from http://www.t13.org/ + * + */ + +#ifndef __LINUX_ATA_H__ +#define __LINUX_ATA_H__ + +#include +#include +#include +#include + +/* defines only for the constants which don't work well as enums */ +#define ATA_DMA_BOUNDARY 0xffffUL +#define ATA_DMA_MASK 0xffffffffULL + +enum { + /* various global constants */ + ATA_MAX_DEVICES = 2, /* per bus/port */ + ATA_MAX_PRD = 256, /* we could make these 256/256 */ + ATA_SECT_SIZE = 512, + ATA_MAX_SECTORS_128 = 128, + ATA_MAX_SECTORS = 256, + ATA_MAX_SECTORS_1024 = 1024, + ATA_MAX_SECTORS_LBA48 = 65535,/* avoid count to be 0000h */ + ATA_MAX_SECTORS_TAPE = 65535, + ATA_MAX_TRIM_RNUM = 64, /* 512-byte payload / (6-byte LBA + 2-byte range per entry) */ + + ATA_ID_WORDS = 256, + ATA_ID_CONFIG = 0, + ATA_ID_CYLS = 1, + ATA_ID_HEADS = 3, + ATA_ID_SECTORS = 6, + ATA_ID_SERNO = 10, + ATA_ID_BUF_SIZE = 21, + ATA_ID_FW_REV = 23, + ATA_ID_PROD = 27, + ATA_ID_MAX_MULTSECT = 47, + ATA_ID_DWORD_IO = 48, /* before ATA-8 */ + ATA_ID_TRUSTED = 48, /* ATA-8 and later */ + ATA_ID_CAPABILITY = 49, + ATA_ID_OLD_PIO_MODES = 51, + ATA_ID_OLD_DMA_MODES = 52, + ATA_ID_FIELD_VALID = 53, + ATA_ID_CUR_CYLS = 54, + ATA_ID_CUR_HEADS = 55, + ATA_ID_CUR_SECTORS = 56, + ATA_ID_MULTSECT = 59, + ATA_ID_LBA_CAPACITY = 60, + ATA_ID_SWDMA_MODES = 62, + ATA_ID_MWDMA_MODES = 63, + ATA_ID_PIO_MODES = 64, + ATA_ID_EIDE_DMA_MIN = 65, + ATA_ID_EIDE_DMA_TIME = 66, + ATA_ID_EIDE_PIO = 67, + ATA_ID_EIDE_PIO_IORDY = 68, + ATA_ID_ADDITIONAL_SUPP = 69, + ATA_ID_QUEUE_DEPTH = 75, + ATA_ID_SATA_CAPABILITY = 76, + ATA_ID_SATA_CAPABILITY_2 = 77, + ATA_ID_FEATURE_SUPP = 78, + ATA_ID_MAJOR_VER = 80, + ATA_ID_COMMAND_SET_1 = 82, + ATA_ID_COMMAND_SET_2 = 83, + ATA_ID_CFSSE = 84, + ATA_ID_CFS_ENABLE_1 = 85, + ATA_ID_CFS_ENABLE_2 = 86, + ATA_ID_CSF_DEFAULT = 87, + ATA_ID_UDMA_MODES = 88, + ATA_ID_HW_CONFIG = 93, + ATA_ID_SPG = 98, + ATA_ID_LBA_CAPACITY_2 = 100, + ATA_ID_SECTOR_SIZE = 106, + ATA_ID_WWN = 108, + ATA_ID_LOGICAL_SECTOR_SIZE = 117, /* and 118 */ + ATA_ID_COMMAND_SET_3 = 119, + ATA_ID_COMMAND_SET_4 = 120, + ATA_ID_LAST_LUN = 126, + ATA_ID_DLF = 128, + ATA_ID_CSFO = 129, + ATA_ID_CFA_POWER = 160, + ATA_ID_CFA_KEY_MGMT = 162, + ATA_ID_CFA_MODES = 163, + ATA_ID_DATA_SET_MGMT = 169, + ATA_ID_SCT_CMD_XPORT = 206, + ATA_ID_ROT_SPEED = 217, + ATA_ID_PIO4 = (1 << 1), + + ATA_ID_SERNO_LEN = 20, + ATA_ID_FW_REV_LEN = 8, + ATA_ID_PROD_LEN = 40, + ATA_ID_WWN_LEN = 8, + + ATA_PCI_CTL_OFS = 2, + + ATA_PIO0 = (1 << 0), + ATA_PIO1 = ATA_PIO0 | (1 << 1), + ATA_PIO2 = ATA_PIO1 | (1 << 2), + ATA_PIO3 = ATA_PIO2 | (1 << 3), + ATA_PIO4 = ATA_PIO3 | (1 << 4), + ATA_PIO5 = ATA_PIO4 | (1 << 5), + ATA_PIO6 = ATA_PIO5 | (1 << 6), + + ATA_PIO4_ONLY = (1 << 4), + + ATA_SWDMA0 = (1 << 0), + ATA_SWDMA1 = ATA_SWDMA0 | (1 << 1), + ATA_SWDMA2 = ATA_SWDMA1 | (1 << 2), + + ATA_SWDMA2_ONLY = (1 << 2), + + ATA_MWDMA0 = (1 << 0), + ATA_MWDMA1 = ATA_MWDMA0 | (1 << 1), + ATA_MWDMA2 = ATA_MWDMA1 | (1 << 2), + ATA_MWDMA3 = ATA_MWDMA2 | (1 << 3), + ATA_MWDMA4 = ATA_MWDMA3 | (1 << 4), + + ATA_MWDMA12_ONLY = (1 << 1) | (1 << 2), + ATA_MWDMA2_ONLY = (1 << 2), + + ATA_UDMA0 = (1 << 0), + ATA_UDMA1 = ATA_UDMA0 | (1 << 1), + ATA_UDMA2 = ATA_UDMA1 | (1 << 2), + ATA_UDMA3 = ATA_UDMA2 | (1 << 3), + ATA_UDMA4 = ATA_UDMA3 | (1 << 4), + ATA_UDMA5 = ATA_UDMA4 | (1 << 5), + ATA_UDMA6 = ATA_UDMA5 | (1 << 6), + ATA_UDMA7 = ATA_UDMA6 | (1 << 7), + /* ATA_UDMA7 is just for completeness... doesn't exist (yet?). */ + + ATA_UDMA24_ONLY = (1 << 2) | (1 << 4), + + ATA_UDMA_MASK_40C = ATA_UDMA2, /* udma0-2 */ + + /* DMA-related */ + ATA_PRD_SZ = 8, + ATA_PRD_TBL_SZ = (ATA_MAX_PRD * ATA_PRD_SZ), + ATA_PRD_EOT = (1 << 31), /* end-of-table flag */ + + ATA_DMA_TABLE_OFS = 4, + ATA_DMA_STATUS = 2, + ATA_DMA_CMD = 0, + ATA_DMA_WR = (1 << 3), + ATA_DMA_START = (1 << 0), + ATA_DMA_INTR = (1 << 2), + ATA_DMA_ERR = (1 << 1), + ATA_DMA_ACTIVE = (1 << 0), + + /* bits in ATA command block registers */ + ATA_HOB = (1 << 7), /* LBA48 selector */ + ATA_NIEN = (1 << 1), /* disable-irq flag */ + ATA_LBA = (1 << 6), /* LBA28 selector */ + ATA_DEV1 = (1 << 4), /* Select Device 1 (slave) */ + ATA_DEVICE_OBS = (1 << 7) | (1 << 5), /* obs bits in dev reg */ + ATA_DEVCTL_OBS = (1 << 3), /* obsolete bit in devctl reg */ + ATA_BUSY = (1 << 7), /* BSY status bit */ + ATA_DRDY = (1 << 6), /* device ready */ + ATA_DF = (1 << 5), /* device fault */ + ATA_DSC = (1 << 4), /* drive seek complete */ + ATA_DRQ = (1 << 3), /* data request i/o */ + ATA_CORR = (1 << 2), /* corrected data error */ + ATA_SENSE = (1 << 1), /* sense code available */ + ATA_ERR = (1 << 0), /* have an error */ + ATA_SRST = (1 << 2), /* software reset */ + ATA_ICRC = (1 << 7), /* interface CRC error */ + ATA_BBK = ATA_ICRC, /* pre-EIDE: block marked bad */ + ATA_UNC = (1 << 6), /* uncorrectable media error */ + ATA_MC = (1 << 5), /* media changed */ + ATA_IDNF = (1 << 4), /* ID not found */ + ATA_MCR = (1 << 3), /* media change requested */ + ATA_ABORTED = (1 << 2), /* command aborted */ + ATA_TRK0NF = (1 << 1), /* track 0 not found */ + ATA_AMNF = (1 << 0), /* address mark not found */ + ATAPI_LFS = 0xF0, /* last failed sense */ + ATAPI_EOM = ATA_TRK0NF, /* end of media */ + ATAPI_ILI = ATA_AMNF, /* illegal length indication */ + ATAPI_IO = (1 << 1), + ATAPI_COD = (1 << 0), + + /* ATA command block registers */ + ATA_REG_DATA = 0x00, + ATA_REG_ERR = 0x01, + ATA_REG_NSECT = 0x02, + ATA_REG_LBAL = 0x03, + ATA_REG_LBAM = 0x04, + ATA_REG_LBAH = 0x05, + ATA_REG_DEVICE = 0x06, + ATA_REG_STATUS = 0x07, + + ATA_REG_FEATURE = ATA_REG_ERR, /* and their aliases */ + ATA_REG_CMD = ATA_REG_STATUS, + ATA_REG_BYTEL = ATA_REG_LBAM, + ATA_REG_BYTEH = ATA_REG_LBAH, + ATA_REG_DEVSEL = ATA_REG_DEVICE, + ATA_REG_IRQ = ATA_REG_NSECT, + + /* ATA device commands */ + ATA_CMD_DEV_RESET = 0x08, /* ATAPI device reset */ + ATA_CMD_CHK_POWER = 0xE5, /* check power mode */ + ATA_CMD_STANDBY = 0xE2, /* place in standby power mode */ + ATA_CMD_IDLE = 0xE3, /* place in idle power mode */ + ATA_CMD_EDD = 0x90, /* execute device diagnostic */ + ATA_CMD_DOWNLOAD_MICRO = 0x92, + ATA_CMD_DOWNLOAD_MICRO_DMA = 0x93, + ATA_CMD_NOP = 0x00, + ATA_CMD_FLUSH = 0xE7, + ATA_CMD_FLUSH_EXT = 0xEA, + ATA_CMD_ID_ATA = 0xEC, + ATA_CMD_ID_ATAPI = 0xA1, + ATA_CMD_SERVICE = 0xA2, + ATA_CMD_READ = 0xC8, + ATA_CMD_READ_EXT = 0x25, + ATA_CMD_READ_QUEUED = 0x26, + ATA_CMD_READ_STREAM_EXT = 0x2B, + ATA_CMD_READ_STREAM_DMA_EXT = 0x2A, + ATA_CMD_WRITE = 0xCA, + ATA_CMD_WRITE_EXT = 0x35, + ATA_CMD_WRITE_QUEUED = 0x36, + ATA_CMD_WRITE_STREAM_EXT = 0x3B, + ATA_CMD_WRITE_STREAM_DMA_EXT = 0x3A, + ATA_CMD_WRITE_FUA_EXT = 0x3D, + ATA_CMD_WRITE_QUEUED_FUA_EXT = 0x3E, + ATA_CMD_FPDMA_READ = 0x60, + ATA_CMD_FPDMA_WRITE = 0x61, + ATA_CMD_NCQ_NON_DATA = 0x63, + ATA_CMD_FPDMA_SEND = 0x64, + ATA_CMD_FPDMA_RECV = 0x65, + ATA_CMD_PIO_READ = 0x20, + ATA_CMD_PIO_READ_EXT = 0x24, + ATA_CMD_PIO_WRITE = 0x30, + ATA_CMD_PIO_WRITE_EXT = 0x34, + ATA_CMD_READ_MULTI = 0xC4, + ATA_CMD_READ_MULTI_EXT = 0x29, + ATA_CMD_WRITE_MULTI = 0xC5, + ATA_CMD_WRITE_MULTI_EXT = 0x39, + ATA_CMD_WRITE_MULTI_FUA_EXT = 0xCE, + ATA_CMD_SET_FEATURES = 0xEF, + ATA_CMD_SET_MULTI = 0xC6, + ATA_CMD_PACKET = 0xA0, + ATA_CMD_VERIFY = 0x40, + ATA_CMD_VERIFY_EXT = 0x42, + ATA_CMD_WRITE_UNCORR_EXT = 0x45, + ATA_CMD_STANDBYNOW1 = 0xE0, + ATA_CMD_IDLEIMMEDIATE = 0xE1, + ATA_CMD_SLEEP = 0xE6, + ATA_CMD_INIT_DEV_PARAMS = 0x91, + ATA_CMD_READ_NATIVE_MAX = 0xF8, + ATA_CMD_READ_NATIVE_MAX_EXT = 0x27, + ATA_CMD_SET_MAX = 0xF9, + ATA_CMD_SET_MAX_EXT = 0x37, + ATA_CMD_READ_LOG_EXT = 0x2F, + ATA_CMD_WRITE_LOG_EXT = 0x3F, + ATA_CMD_READ_LOG_DMA_EXT = 0x47, + ATA_CMD_WRITE_LOG_DMA_EXT = 0x57, + ATA_CMD_TRUSTED_NONDATA = 0x5B, + ATA_CMD_TRUSTED_RCV = 0x5C, + ATA_CMD_TRUSTED_RCV_DMA = 0x5D, + ATA_CMD_TRUSTED_SND = 0x5E, + ATA_CMD_TRUSTED_SND_DMA = 0x5F, + ATA_CMD_PMP_READ = 0xE4, + ATA_CMD_PMP_READ_DMA = 0xE9, + ATA_CMD_PMP_WRITE = 0xE8, + ATA_CMD_PMP_WRITE_DMA = 0xEB, + ATA_CMD_CONF_OVERLAY = 0xB1, + ATA_CMD_SEC_SET_PASS = 0xF1, + ATA_CMD_SEC_UNLOCK = 0xF2, + ATA_CMD_SEC_ERASE_PREP = 0xF3, + ATA_CMD_SEC_ERASE_UNIT = 0xF4, + ATA_CMD_SEC_FREEZE_LOCK = 0xF5, + ATA_CMD_SEC_DISABLE_PASS = 0xF6, + ATA_CMD_CONFIG_STREAM = 0x51, + ATA_CMD_SMART = 0xB0, + ATA_CMD_MEDIA_LOCK = 0xDE, + ATA_CMD_MEDIA_UNLOCK = 0xDF, + ATA_CMD_DSM = 0x06, + ATA_CMD_CHK_MED_CRD_TYP = 0xD1, + ATA_CMD_CFA_REQ_EXT_ERR = 0x03, + ATA_CMD_CFA_WRITE_NE = 0x38, + ATA_CMD_CFA_TRANS_SECT = 0x87, + ATA_CMD_CFA_ERASE = 0xC0, + ATA_CMD_CFA_WRITE_MULT_NE = 0xCD, + ATA_CMD_REQ_SENSE_DATA = 0x0B, + ATA_CMD_SANITIZE_DEVICE = 0xB4, + ATA_CMD_ZAC_MGMT_IN = 0x4A, + ATA_CMD_ZAC_MGMT_OUT = 0x9F, + + /* marked obsolete in the ATA/ATAPI-7 spec */ + ATA_CMD_RESTORE = 0x10, + + /* Subcmds for ATA_CMD_FPDMA_RECV */ + ATA_SUBCMD_FPDMA_RECV_RD_LOG_DMA_EXT = 0x01, + ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN = 0x02, + + /* Subcmds for ATA_CMD_FPDMA_SEND */ + ATA_SUBCMD_FPDMA_SEND_DSM = 0x00, + ATA_SUBCMD_FPDMA_SEND_WR_LOG_DMA_EXT = 0x02, + + /* Subcmds for ATA_CMD_NCQ_NON_DATA */ + ATA_SUBCMD_NCQ_NON_DATA_ABORT_QUEUE = 0x00, + ATA_SUBCMD_NCQ_NON_DATA_SET_FEATURES = 0x05, + ATA_SUBCMD_NCQ_NON_DATA_ZERO_EXT = 0x06, + ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT = 0x07, + + /* Subcmds for ATA_CMD_ZAC_MGMT_IN */ + ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES = 0x00, + + /* Subcmds for ATA_CMD_ZAC_MGMT_OUT */ + ATA_SUBCMD_ZAC_MGMT_OUT_CLOSE_ZONE = 0x01, + ATA_SUBCMD_ZAC_MGMT_OUT_FINISH_ZONE = 0x02, + ATA_SUBCMD_ZAC_MGMT_OUT_OPEN_ZONE = 0x03, + ATA_SUBCMD_ZAC_MGMT_OUT_RESET_WRITE_POINTER = 0x04, + + /* READ_LOG_EXT pages */ + ATA_LOG_DIRECTORY = 0x0, + ATA_LOG_SATA_NCQ = 0x10, + ATA_LOG_NCQ_NON_DATA = 0x12, + ATA_LOG_NCQ_SEND_RECV = 0x13, + ATA_LOG_IDENTIFY_DEVICE = 0x30, + + /* Identify device log pages: */ + ATA_LOG_SECURITY = 0x06, + ATA_LOG_SATA_SETTINGS = 0x08, + ATA_LOG_ZONED_INFORMATION = 0x09, + + /* Identify device SATA settings log:*/ + ATA_LOG_DEVSLP_OFFSET = 0x30, + ATA_LOG_DEVSLP_SIZE = 0x08, + ATA_LOG_DEVSLP_MDAT = 0x00, + ATA_LOG_DEVSLP_MDAT_MASK = 0x1F, + ATA_LOG_DEVSLP_DETO = 0x01, + ATA_LOG_DEVSLP_VALID = 0x07, + ATA_LOG_DEVSLP_VALID_MASK = 0x80, + ATA_LOG_NCQ_PRIO_OFFSET = 0x09, + + /* NCQ send and receive log */ + ATA_LOG_NCQ_SEND_RECV_SUBCMDS_OFFSET = 0x00, + ATA_LOG_NCQ_SEND_RECV_SUBCMDS_DSM = (1 << 0), + ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET = 0x04, + ATA_LOG_NCQ_SEND_RECV_DSM_TRIM = (1 << 0), + ATA_LOG_NCQ_SEND_RECV_RD_LOG_OFFSET = 0x08, + ATA_LOG_NCQ_SEND_RECV_RD_LOG_SUPPORTED = (1 << 0), + ATA_LOG_NCQ_SEND_RECV_WR_LOG_OFFSET = 0x0C, + ATA_LOG_NCQ_SEND_RECV_WR_LOG_SUPPORTED = (1 << 0), + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OFFSET = 0x10, + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OUT_SUPPORTED = (1 << 0), + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_IN_SUPPORTED = (1 << 1), + ATA_LOG_NCQ_SEND_RECV_SIZE = 0x14, + + /* NCQ Non-Data log */ + ATA_LOG_NCQ_NON_DATA_SUBCMDS_OFFSET = 0x00, + ATA_LOG_NCQ_NON_DATA_ABORT_OFFSET = 0x00, + ATA_LOG_NCQ_NON_DATA_ABORT_NCQ = (1 << 0), + ATA_LOG_NCQ_NON_DATA_ABORT_ALL = (1 << 1), + ATA_LOG_NCQ_NON_DATA_ABORT_STREAMING = (1 << 2), + ATA_LOG_NCQ_NON_DATA_ABORT_NON_STREAMING = (1 << 3), + ATA_LOG_NCQ_NON_DATA_ABORT_SELECTED = (1 << 4), + ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OFFSET = 0x1C, + ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OUT = (1 << 0), + ATA_LOG_NCQ_NON_DATA_SIZE = 0x40, + + /* READ/WRITE LONG (obsolete) */ + ATA_CMD_READ_LONG = 0x22, + ATA_CMD_READ_LONG_ONCE = 0x23, + ATA_CMD_WRITE_LONG = 0x32, + ATA_CMD_WRITE_LONG_ONCE = 0x33, + + /* SETFEATURES stuff */ + SETFEATURES_XFER = 0x03, + XFER_UDMA_7 = 0x47, + XFER_UDMA_6 = 0x46, + XFER_UDMA_5 = 0x45, + XFER_UDMA_4 = 0x44, + XFER_UDMA_3 = 0x43, + XFER_UDMA_2 = 0x42, + XFER_UDMA_1 = 0x41, + XFER_UDMA_0 = 0x40, + XFER_MW_DMA_4 = 0x24, /* CFA only */ + XFER_MW_DMA_3 = 0x23, /* CFA only */ + XFER_MW_DMA_2 = 0x22, + XFER_MW_DMA_1 = 0x21, + XFER_MW_DMA_0 = 0x20, + XFER_SW_DMA_2 = 0x12, + XFER_SW_DMA_1 = 0x11, + XFER_SW_DMA_0 = 0x10, + XFER_PIO_6 = 0x0E, /* CFA only */ + XFER_PIO_5 = 0x0D, /* CFA only */ + XFER_PIO_4 = 0x0C, + XFER_PIO_3 = 0x0B, + XFER_PIO_2 = 0x0A, + XFER_PIO_1 = 0x09, + XFER_PIO_0 = 0x08, + XFER_PIO_SLOW = 0x00, + + SETFEATURES_WC_ON = 0x02, /* Enable write cache */ + SETFEATURES_WC_OFF = 0x82, /* Disable write cache */ + + SETFEATURES_RA_ON = 0xaa, /* Enable read look-ahead */ + SETFEATURES_RA_OFF = 0x55, /* Disable read look-ahead */ + + /* Enable/Disable Automatic Acoustic Management */ + SETFEATURES_AAM_ON = 0x42, + SETFEATURES_AAM_OFF = 0xC2, + + SETFEATURES_SPINUP = 0x07, /* Spin-up drive */ + SETFEATURES_SPINUP_TIMEOUT = 30000, /* 30s timeout for drive spin-up from PUIS */ + + SETFEATURES_SATA_ENABLE = 0x10, /* Enable use of SATA feature */ + SETFEATURES_SATA_DISABLE = 0x90, /* Disable use of SATA feature */ + + /* SETFEATURE Sector counts for SATA features */ + SATA_FPDMA_OFFSET = 0x01, /* FPDMA non-zero buffer offsets */ + SATA_FPDMA_AA = 0x02, /* FPDMA Setup FIS Auto-Activate */ + SATA_DIPM = 0x03, /* Device Initiated Power Management */ + SATA_FPDMA_IN_ORDER = 0x04, /* FPDMA in-order data delivery */ + SATA_AN = 0x05, /* Asynchronous Notification */ + SATA_SSP = 0x06, /* Software Settings Preservation */ + SATA_DEVSLP = 0x09, /* Device Sleep */ + + SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */ + + /* feature values for SET_MAX */ + ATA_SET_MAX_ADDR = 0x00, + ATA_SET_MAX_PASSWD = 0x01, + ATA_SET_MAX_LOCK = 0x02, + ATA_SET_MAX_UNLOCK = 0x03, + ATA_SET_MAX_FREEZE_LOCK = 0x04, + ATA_SET_MAX_PASSWD_DMA = 0x05, + ATA_SET_MAX_UNLOCK_DMA = 0x06, + + /* feature values for DEVICE CONFIGURATION OVERLAY */ + ATA_DCO_RESTORE = 0xC0, + ATA_DCO_FREEZE_LOCK = 0xC1, + ATA_DCO_IDENTIFY = 0xC2, + ATA_DCO_SET = 0xC3, + + /* feature values for SMART */ + ATA_SMART_ENABLE = 0xD8, + ATA_SMART_READ_VALUES = 0xD0, + ATA_SMART_READ_THRESHOLDS = 0xD1, + + /* feature values for Data Set Management */ + ATA_DSM_TRIM = 0x01, + + /* password used in LBA Mid / LBA High for executing SMART commands */ + ATA_SMART_LBAM_PASS = 0x4F, + ATA_SMART_LBAH_PASS = 0xC2, + + /* ATAPI stuff */ + ATAPI_PKT_DMA = (1 << 0), + ATAPI_DMADIR = (1 << 2), /* ATAPI data dir: + 0=to device, 1=to host */ + ATAPI_CDB_LEN = 16, + + /* PMP stuff */ + SATA_PMP_MAX_PORTS = 15, + SATA_PMP_CTRL_PORT = 15, + + SATA_PMP_GSCR_DWORDS = 128, + SATA_PMP_GSCR_PROD_ID = 0, + SATA_PMP_GSCR_REV = 1, + SATA_PMP_GSCR_PORT_INFO = 2, + SATA_PMP_GSCR_ERROR = 32, + SATA_PMP_GSCR_ERROR_EN = 33, + SATA_PMP_GSCR_FEAT = 64, + SATA_PMP_GSCR_FEAT_EN = 96, + + SATA_PMP_PSCR_STATUS = 0, + SATA_PMP_PSCR_ERROR = 1, + SATA_PMP_PSCR_CONTROL = 2, + + SATA_PMP_FEAT_BIST = (1 << 0), + SATA_PMP_FEAT_PMREQ = (1 << 1), + SATA_PMP_FEAT_DYNSSC = (1 << 2), + SATA_PMP_FEAT_NOTIFY = (1 << 3), + + /* cable types */ + ATA_CBL_NONE = 0, + ATA_CBL_PATA40 = 1, + ATA_CBL_PATA80 = 2, + ATA_CBL_PATA40_SHORT = 3, /* 40 wire cable to high UDMA spec */ + ATA_CBL_PATA_UNK = 4, /* don't know, maybe 80c? */ + ATA_CBL_PATA_IGN = 5, /* don't know, ignore cable handling */ + ATA_CBL_SATA = 6, + + /* SATA Status and Control Registers */ + SCR_STATUS = 0, + SCR_ERROR = 1, + SCR_CONTROL = 2, + SCR_ACTIVE = 3, + SCR_NOTIFICATION = 4, + + /* SError bits */ + SERR_DATA_RECOVERED = (1 << 0), /* recovered data error */ + SERR_COMM_RECOVERED = (1 << 1), /* recovered comm failure */ + SERR_DATA = (1 << 8), /* unrecovered data error */ + SERR_PERSISTENT = (1 << 9), /* persistent data/comm error */ + SERR_PROTOCOL = (1 << 10), /* protocol violation */ + SERR_INTERNAL = (1 << 11), /* host internal error */ + SERR_PHYRDY_CHG = (1 << 16), /* PHY RDY changed */ + SERR_PHY_INT_ERR = (1 << 17), /* PHY internal error */ + SERR_COMM_WAKE = (1 << 18), /* Comm wake */ + SERR_10B_8B_ERR = (1 << 19), /* 10b to 8b decode error */ + SERR_DISPARITY = (1 << 20), /* Disparity */ + SERR_CRC = (1 << 21), /* CRC error */ + SERR_HANDSHAKE = (1 << 22), /* Handshake error */ + SERR_LINK_SEQ_ERR = (1 << 23), /* Link sequence error */ + SERR_TRANS_ST_ERROR = (1 << 24), /* Transport state trans. error */ + SERR_UNRECOG_FIS = (1 << 25), /* Unrecognized FIS */ + SERR_DEV_XCHG = (1 << 26), /* device exchanged */ +}; + +enum ata_prot_flags { + /* protocol flags */ + ATA_PROT_FLAG_PIO = (1 << 0), /* is PIO */ + ATA_PROT_FLAG_DMA = (1 << 1), /* is DMA */ + ATA_PROT_FLAG_NCQ = (1 << 2), /* is NCQ */ + ATA_PROT_FLAG_ATAPI = (1 << 3), /* is ATAPI */ + + /* taskfile protocols */ + ATA_PROT_UNKNOWN = (u8)-1, + ATA_PROT_NODATA = 0, + ATA_PROT_PIO = ATA_PROT_FLAG_PIO, + ATA_PROT_DMA = ATA_PROT_FLAG_DMA, + ATA_PROT_NCQ_NODATA = ATA_PROT_FLAG_NCQ, + ATA_PROT_NCQ = ATA_PROT_FLAG_DMA | ATA_PROT_FLAG_NCQ, + ATAPI_PROT_NODATA = ATA_PROT_FLAG_ATAPI, + ATAPI_PROT_PIO = ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_PIO, + ATAPI_PROT_DMA = ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_DMA, +}; + +enum ata_ioctls { + ATA_IOC_GET_IO32 = 0x309, /* HDIO_GET_32BIT */ + ATA_IOC_SET_IO32 = 0x324, /* HDIO_SET_32BIT */ +}; + +/* core structures */ + +struct ata_bmdma_prd { + __le32 addr; + __le32 flags_len; +}; + +/* + * id tests + */ +#define ata_id_is_ata(id) (((id)[ATA_ID_CONFIG] & (1 << 15)) == 0) +#define ata_id_has_lba(id) ((id)[ATA_ID_CAPABILITY] & (1 << 9)) +#define ata_id_has_dma(id) ((id)[ATA_ID_CAPABILITY] & (1 << 8)) +#define ata_id_has_ncq(id) ((id)[ATA_ID_SATA_CAPABILITY] & (1 << 8)) +#define ata_id_queue_depth(id) (((id)[ATA_ID_QUEUE_DEPTH] & 0x1f) + 1) +#define ata_id_removable(id) ((id)[ATA_ID_CONFIG] & (1 << 7)) +#define ata_id_has_atapi_AN(id) \ + ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \ + ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \ + ((id)[ATA_ID_FEATURE_SUPP] & (1 << 5))) +#define ata_id_has_fpdma_aa(id) \ + ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \ + ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \ + ((id)[ATA_ID_FEATURE_SUPP] & (1 << 2))) +#define ata_id_iordy_disable(id) ((id)[ATA_ID_CAPABILITY] & (1 << 10)) +#define ata_id_has_iordy(id) ((id)[ATA_ID_CAPABILITY] & (1 << 11)) +#define ata_id_u32(id,n) \ + (((u32) (id)[(n) + 1] << 16) | ((u32) (id)[(n)])) +#define ata_id_u64(id,n) \ + ( ((u64) (id)[(n) + 3] << 48) | \ + ((u64) (id)[(n) + 2] << 32) | \ + ((u64) (id)[(n) + 1] << 16) | \ + ((u64) (id)[(n) + 0]) ) + +#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20) +#define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4)) +#define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8)) +#define ata_id_has_ncq_autosense(id) \ + ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7)) + +static inline bool ata_id_has_hipm(const u16 *id) +{ + u16 val = id[ATA_ID_SATA_CAPABILITY]; + + if (val == 0 || val == 0xffff) + return false; + + return val & (1 << 9); +} + +static inline bool ata_id_has_dipm(const u16 *id) +{ + u16 val = id[ATA_ID_FEATURE_SUPP]; + + if (val == 0 || val == 0xffff) + return false; + + return val & (1 << 3); +} + + +static inline bool ata_id_has_fua(const u16 *id) +{ + if ((id[ATA_ID_CFSSE] & 0xC000) != 0x4000) + return false; + return id[ATA_ID_CFSSE] & (1 << 6); +} + +static inline bool ata_id_has_flush(const u16 *id) +{ + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) + return false; + return id[ATA_ID_COMMAND_SET_2] & (1 << 12); +} + +static inline bool ata_id_flush_enabled(const u16 *id) +{ + if (ata_id_has_flush(id) == 0) + return false; + if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) + return false; + return id[ATA_ID_CFS_ENABLE_2] & (1 << 12); +} + +static inline bool ata_id_has_flush_ext(const u16 *id) +{ + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) + return false; + return id[ATA_ID_COMMAND_SET_2] & (1 << 13); +} + +static inline bool ata_id_flush_ext_enabled(const u16 *id) +{ + if (ata_id_has_flush_ext(id) == 0) + return false; + if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) + return false; + /* + * some Maxtor disks have bit 13 defined incorrectly + * so check bit 10 too + */ + return (id[ATA_ID_CFS_ENABLE_2] & 0x2400) == 0x2400; +} + +static inline u32 ata_id_logical_sector_size(const u16 *id) +{ + /* T13/1699-D Revision 6a, Sep 6, 2008. Page 128. + * IDENTIFY DEVICE data, word 117-118. + * 0xd000 ignores bit 13 (logical:physical > 1) + */ + if ((id[ATA_ID_SECTOR_SIZE] & 0xd000) == 0x5000) + return (((id[ATA_ID_LOGICAL_SECTOR_SIZE+1] << 16) + + id[ATA_ID_LOGICAL_SECTOR_SIZE]) * sizeof(u16)) ; + return ATA_SECT_SIZE; +} + +static inline u8 ata_id_log2_per_physical_sector(const u16 *id) +{ + /* T13/1699-D Revision 6a, Sep 6, 2008. Page 128. + * IDENTIFY DEVICE data, word 106. + * 0xe000 ignores bit 12 (logical sector > 512 bytes) + */ + if ((id[ATA_ID_SECTOR_SIZE] & 0xe000) == 0x6000) + return (id[ATA_ID_SECTOR_SIZE] & 0xf); + return 0; +} + +/* Offset of logical sectors relative to physical sectors. + * + * If device has more than one logical sector per physical sector + * (aka 512 byte emulation), vendors might offset the "sector 0" address + * so sector 63 is "naturally aligned" - e.g. FAT partition table. + * This avoids Read/Mod/Write penalties when using FAT partition table + * and updating "well aligned" (FS perspective) physical sectors on every + * transaction. + */ +static inline u16 ata_id_logical_sector_offset(const u16 *id, + u8 log2_per_phys) +{ + u16 word_209 = id[209]; + + if ((log2_per_phys > 1) && (word_209 & 0xc000) == 0x4000) { + u16 first = word_209 & 0x3fff; + if (first > 0) + return (1 << log2_per_phys) - first; + } + return 0; +} + +static inline bool ata_id_has_lba48(const u16 *id) +{ + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) + return false; + if (!ata_id_u64(id, ATA_ID_LBA_CAPACITY_2)) + return false; + return id[ATA_ID_COMMAND_SET_2] & (1 << 10); +} + +static inline bool ata_id_lba48_enabled(const u16 *id) +{ + if (ata_id_has_lba48(id) == 0) + return false; + if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) + return false; + return id[ATA_ID_CFS_ENABLE_2] & (1 << 10); +} + +static inline bool ata_id_hpa_enabled(const u16 *id) +{ + /* Yes children, word 83 valid bits cover word 82 data */ + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) + return false; + /* And 87 covers 85-87 */ + if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) + return false; + /* Check command sets enabled as well as supported */ + if ((id[ATA_ID_CFS_ENABLE_1] & (1 << 10)) == 0) + return false; + return id[ATA_ID_COMMAND_SET_1] & (1 << 10); +} + +static inline bool ata_id_has_wcache(const u16 *id) +{ + /* Yes children, word 83 valid bits cover word 82 data */ + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) + return false; + return id[ATA_ID_COMMAND_SET_1] & (1 << 5); +} + +static inline bool ata_id_has_pm(const u16 *id) +{ + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) + return false; + return id[ATA_ID_COMMAND_SET_1] & (1 << 3); +} + +static inline bool ata_id_rahead_enabled(const u16 *id) +{ + if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) + return false; + return id[ATA_ID_CFS_ENABLE_1] & (1 << 6); +} + +static inline bool ata_id_wcache_enabled(const u16 *id) +{ + if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) + return false; + return id[ATA_ID_CFS_ENABLE_1] & (1 << 5); +} + +static inline bool ata_id_has_read_log_dma_ext(const u16 *id) +{ + /* Word 86 must have bit 15 set */ + if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15))) + return false; + + /* READ LOG DMA EXT support can be signaled either from word 119 + * or from word 120. The format is the same for both words: Bit + * 15 must be cleared, bit 14 set and bit 3 set. + */ + if ((id[ATA_ID_COMMAND_SET_3] & 0xC008) == 0x4008 || + (id[ATA_ID_COMMAND_SET_4] & 0xC008) == 0x4008) + return true; + + return false; +} + +static inline bool ata_id_has_sense_reporting(const u16 *id) +{ + if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15))) + return false; + return id[ATA_ID_COMMAND_SET_3] & (1 << 6); +} + +static inline bool ata_id_sense_reporting_enabled(const u16 *id) +{ + if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15))) + return false; + return id[ATA_ID_COMMAND_SET_4] & (1 << 6); +} + +/** + * + * Word: 206 - SCT Command Transport + * 15:12 - Vendor Specific + * 11:6 - Reserved + * 5 - SCT Command Transport Data Tables supported + * 4 - SCT Command Transport Features Control supported + * 3 - SCT Command Transport Error Recovery Control supported + * 2 - SCT Command Transport Write Same supported + * 1 - SCT Command Transport Long Sector Access supported + * 0 - SCT Command Transport supported + */ +static inline bool ata_id_sct_data_tables(const u16 *id) +{ + return id[ATA_ID_SCT_CMD_XPORT] & (1 << 5) ? true : false; +} + +static inline bool ata_id_sct_features_ctrl(const u16 *id) +{ + return id[ATA_ID_SCT_CMD_XPORT] & (1 << 4) ? true : false; +} + +static inline bool ata_id_sct_error_recovery_ctrl(const u16 *id) +{ + return id[ATA_ID_SCT_CMD_XPORT] & (1 << 3) ? true : false; +} + +static inline bool ata_id_sct_long_sector_access(const u16 *id) +{ + return id[ATA_ID_SCT_CMD_XPORT] & (1 << 1) ? true : false; +} + +static inline bool ata_id_sct_supported(const u16 *id) +{ + return id[ATA_ID_SCT_CMD_XPORT] & (1 << 0) ? true : false; +} + +/** + * ata_id_major_version - get ATA level of drive + * @id: Identify data + * + * Caveats: + * ATA-1 considers identify optional + * ATA-2 introduces mandatory identify + * ATA-3 introduces word 80 and accurate reporting + * + * The practical impact of this is that ata_id_major_version cannot + * reliably report on drives below ATA3. + */ + +static inline unsigned int ata_id_major_version(const u16 *id) +{ + unsigned int mver; + + if (id[ATA_ID_MAJOR_VER] == 0xFFFF) + return 0; + + for (mver = 14; mver >= 1; mver--) + if (id[ATA_ID_MAJOR_VER] & (1 << mver)) + break; + return mver; +} + +static inline bool ata_id_is_sata(const u16 *id) +{ + /* + * See if word 93 is 0 AND drive is at least ATA-5 compatible + * verifying that word 80 by casting it to a signed type -- + * this trick allows us to filter out the reserved values of + * 0x0000 and 0xffff along with the earlier ATA revisions... + */ + if (id[ATA_ID_HW_CONFIG] == 0 && (short)id[ATA_ID_MAJOR_VER] >= 0x0020) + return true; + return false; +} + +static inline bool ata_id_has_tpm(const u16 *id) +{ + /* The TPM bits are only valid on ATA8 */ + if (ata_id_major_version(id) < 8) + return false; + if ((id[48] & 0xC000) != 0x4000) + return false; + return id[48] & (1 << 0); +} + +static inline bool ata_id_has_dword_io(const u16 *id) +{ + /* ATA 8 reuses this flag for "trusted" computing */ + if (ata_id_major_version(id) > 7) + return false; + return id[ATA_ID_DWORD_IO] & (1 << 0); +} + +static inline bool ata_id_has_trusted(const u16 *id) +{ + if (ata_id_major_version(id) <= 7) + return false; + return id[ATA_ID_TRUSTED] & (1 << 0); +} + +static inline bool ata_id_has_unload(const u16 *id) +{ + if (ata_id_major_version(id) >= 7 && + (id[ATA_ID_CFSSE] & 0xC000) == 0x4000 && + id[ATA_ID_CFSSE] & (1 << 13)) + return true; + return false; +} + +static inline bool ata_id_has_wwn(const u16 *id) +{ + return (id[ATA_ID_CSF_DEFAULT] & 0xC100) == 0x4100; +} + +static inline int ata_id_form_factor(const u16 *id) +{ + u16 val = id[168]; + + if (ata_id_major_version(id) < 7 || val == 0 || val == 0xffff) + return 0; + + val &= 0xf; + + if (val > 5) + return 0; + + return val; +} + +static inline int ata_id_rotation_rate(const u16 *id) +{ + u16 val = id[217]; + + if (ata_id_major_version(id) < 7 || val == 0 || val == 0xffff) + return 0; + + if (val > 1 && val < 0x401) + return 0; + + return val; +} + +static inline bool ata_id_has_ncq_send_and_recv(const u16 *id) +{ + return id[ATA_ID_SATA_CAPABILITY_2] & BIT(6); +} + +static inline bool ata_id_has_ncq_non_data(const u16 *id) +{ + return id[ATA_ID_SATA_CAPABILITY_2] & BIT(5); +} + +static inline bool ata_id_has_ncq_prio(const u16 *id) +{ + return id[ATA_ID_SATA_CAPABILITY] & BIT(12); +} + +static inline bool ata_id_has_trim(const u16 *id) +{ + if (ata_id_major_version(id) >= 7 && + (id[ATA_ID_DATA_SET_MGMT] & 1)) + return true; + return false; +} + +static inline bool ata_id_has_zero_after_trim(const u16 *id) +{ + /* DSM supported, deterministic read, and read zero after trim set */ + if (ata_id_has_trim(id) && + (id[ATA_ID_ADDITIONAL_SUPP] & 0x4020) == 0x4020) + return true; + + return false; +} + +static inline bool ata_id_current_chs_valid(const u16 *id) +{ + /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command + has not been issued to the device then the values of + id[ATA_ID_CUR_CYLS] to id[ATA_ID_CUR_SECTORS] are vendor specific. */ + return (id[ATA_ID_FIELD_VALID] & 1) && /* Current translation valid */ + id[ATA_ID_CUR_CYLS] && /* cylinders in current translation */ + id[ATA_ID_CUR_HEADS] && /* heads in current translation */ + id[ATA_ID_CUR_HEADS] <= 16 && + id[ATA_ID_CUR_SECTORS]; /* sectors in current translation */ +} + +static inline bool ata_id_is_cfa(const u16 *id) +{ + if ((id[ATA_ID_CONFIG] == 0x848A) || /* Traditional CF */ + (id[ATA_ID_CONFIG] == 0x844A)) /* Delkin Devices CF */ + return true; + /* + * CF specs don't require specific value in the word 0 anymore and yet + * they forbid to report the ATA version in the word 80 and require the + * CFA feature set support to be indicated in the word 83 in this case. + * Unfortunately, some cards only follow either of this requirements, + * and while those that don't indicate CFA feature support need some + * sort of quirk list, it seems impractical for the ones that do... + */ + return (id[ATA_ID_COMMAND_SET_2] & 0xC004) == 0x4004; +} + +static inline bool ata_id_is_ssd(const u16 *id) +{ + return id[ATA_ID_ROT_SPEED] == 0x01; +} + +static inline u8 ata_id_zoned_cap(const u16 *id) +{ + return (id[ATA_ID_ADDITIONAL_SUPP] & 0x3); +} + +static inline bool ata_id_pio_need_iordy(const u16 *id, const u8 pio) +{ + /* CF spec. r4.1 Table 22 says no IORDY on PIO5 and PIO6. */ + if (pio > 4 && ata_id_is_cfa(id)) + return false; + /* For PIO3 and higher it is mandatory. */ + if (pio > 2) + return true; + /* Turn it on when possible. */ + return ata_id_has_iordy(id); +} + +static inline bool ata_drive_40wire(const u16 *dev_id) +{ + if (ata_id_is_sata(dev_id)) + return false; /* SATA */ + if ((dev_id[ATA_ID_HW_CONFIG] & 0xE000) == 0x6000) + return false; /* 80 wire */ + return true; +} + +static inline bool ata_drive_40wire_relaxed(const u16 *dev_id) +{ + if ((dev_id[ATA_ID_HW_CONFIG] & 0x2000) == 0x2000) + return false; /* 80 wire */ + return true; +} + +static inline int atapi_cdb_len(const u16 *dev_id) +{ + u16 tmp = dev_id[ATA_ID_CONFIG] & 0x3; + switch (tmp) { + case 0: return 12; + case 1: return 16; + default: return -1; + } +} + +static inline int atapi_command_packet_set(const u16 *dev_id) +{ + return (dev_id[ATA_ID_CONFIG] >> 8) & 0x1f; +} + +static inline bool atapi_id_dmadir(const u16 *dev_id) +{ + return ata_id_major_version(dev_id) >= 7 && (dev_id[62] & 0x8000); +} + +/* + * ata_id_is_lba_capacity_ok() performs a sanity check on + * the claimed LBA capacity value for the device. + * + * Returns 1 if LBA capacity looks sensible, 0 otherwise. + * + * It is called only once for each device. + */ +static inline bool ata_id_is_lba_capacity_ok(u16 *id) +{ + unsigned long lba_sects, chs_sects, head, tail; + + /* No non-LBA info .. so valid! */ + if (id[ATA_ID_CYLS] == 0) + return true; + + lba_sects = ata_id_u32(id, ATA_ID_LBA_CAPACITY); + + /* + * The ATA spec tells large drives to return + * C/H/S = 16383/16/63 independent of their size. + * Some drives can be jumpered to use 15 heads instead of 16. + * Some drives can be jumpered to use 4092 cyls instead of 16383. + */ + if ((id[ATA_ID_CYLS] == 16383 || + (id[ATA_ID_CYLS] == 4092 && id[ATA_ID_CUR_CYLS] == 16383)) && + id[ATA_ID_SECTORS] == 63 && + (id[ATA_ID_HEADS] == 15 || id[ATA_ID_HEADS] == 16) && + (lba_sects >= 16383 * 63 * id[ATA_ID_HEADS])) + return true; + + chs_sects = id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * id[ATA_ID_SECTORS]; + + /* perform a rough sanity check on lba_sects: within 10% is OK */ + if (lba_sects - chs_sects < chs_sects/10) + return true; + + /* some drives have the word order reversed */ + head = (lba_sects >> 16) & 0xffff; + tail = lba_sects & 0xffff; + lba_sects = head | (tail << 16); + + if (lba_sects - chs_sects < chs_sects/10) { + *(__le32 *)&id[ATA_ID_LBA_CAPACITY] = __cpu_to_le32(lba_sects); + return true; /* LBA capacity is (now) good */ + } + + return false; /* LBA capacity value may be bad */ +} + +static inline void ata_id_to_hd_driveid(u16 *id) +{ +#ifdef __BIG_ENDIAN + /* accessed in struct hd_driveid as 8-bit values */ + id[ATA_ID_MAX_MULTSECT] = __cpu_to_le16(id[ATA_ID_MAX_MULTSECT]); + id[ATA_ID_CAPABILITY] = __cpu_to_le16(id[ATA_ID_CAPABILITY]); + id[ATA_ID_OLD_PIO_MODES] = __cpu_to_le16(id[ATA_ID_OLD_PIO_MODES]); + id[ATA_ID_OLD_DMA_MODES] = __cpu_to_le16(id[ATA_ID_OLD_DMA_MODES]); + id[ATA_ID_MULTSECT] = __cpu_to_le16(id[ATA_ID_MULTSECT]); + + /* as 32-bit values */ + *(u32 *)&id[ATA_ID_LBA_CAPACITY] = ata_id_u32(id, ATA_ID_LBA_CAPACITY); + *(u32 *)&id[ATA_ID_SPG] = ata_id_u32(id, ATA_ID_SPG); + + /* as 64-bit value */ + *(u64 *)&id[ATA_ID_LBA_CAPACITY_2] = + ata_id_u64(id, ATA_ID_LBA_CAPACITY_2); +#endif +} + +static inline bool ata_ok(u8 status) +{ + return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR)) + == ATA_DRDY); +} + +static inline bool lba_28_ok(u64 block, u32 n_block) +{ + /* check the ending block number: must be LESS THAN 0x0fffffff */ + return ((block + n_block) < ((1 << 28) - 1)) && (n_block <= ATA_MAX_SECTORS); +} + +static inline bool lba_48_ok(u64 block, u32 n_block) +{ + /* check the ending block number */ + return ((block + n_block - 1) < ((u64)1 << 48)) && (n_block <= ATA_MAX_SECTORS_LBA48); +} + +#define sata_pmp_gscr_vendor(gscr) ((gscr)[SATA_PMP_GSCR_PROD_ID] & 0xffff) +#define sata_pmp_gscr_devid(gscr) ((gscr)[SATA_PMP_GSCR_PROD_ID] >> 16) +#define sata_pmp_gscr_rev(gscr) (((gscr)[SATA_PMP_GSCR_REV] >> 8) & 0xff) +#define sata_pmp_gscr_ports(gscr) ((gscr)[SATA_PMP_GSCR_PORT_INFO] & 0xf) + +#endif /* __LINUX_ATA_H__ */ diff --git a/include/linux/ata_platform.h b/include/linux/ata_platform.h new file mode 100644 index 000000000..ff2120215 --- /dev/null +++ b/include/linux/ata_platform.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_ATA_PLATFORM_H +#define __LINUX_ATA_PLATFORM_H + +struct pata_platform_info { + /* + * I/O port shift, for platforms with ports that are + * constantly spaced and need larger than the 1-byte + * spacing used by ata_std_ports(). + */ + unsigned int ioport_shift; +}; + +struct scsi_host_template; + +extern int __pata_platform_probe(struct device *dev, + struct resource *io_res, + struct resource *ctl_res, + struct resource *irq_res, + unsigned int ioport_shift, + int __pio_mask, + struct scsi_host_template *sht); + +/* + * Marvell SATA private data + */ +struct mv_sata_platform_data { + int n_ports; /* number of sata ports */ +}; + +#endif /* __LINUX_ATA_PLATFORM_H */ diff --git a/include/linux/atalk.h b/include/linux/atalk.h new file mode 100644 index 000000000..f6034ba77 --- /dev/null +++ b/include/linux/atalk.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_ATALK_H__ +#define __LINUX_ATALK_H__ + + +#include +#include + +struct atalk_route { + struct net_device *dev; + struct atalk_addr target; + struct atalk_addr gateway; + int flags; + struct atalk_route *next; +}; + +/** + * struct atalk_iface - AppleTalk Interface + * @dev - Network device associated with this interface + * @address - Our address + * @status - What are we doing? + * @nets - Associated direct netrange + * @next - next element in the list of interfaces + */ +struct atalk_iface { + struct net_device *dev; + struct atalk_addr address; + int status; +#define ATIF_PROBE 1 /* Probing for an address */ +#define ATIF_PROBE_FAIL 2 /* Probe collided */ + struct atalk_netrange nets; + struct atalk_iface *next; +}; + +struct atalk_sock { + /* struct sock has to be the first member of atalk_sock */ + struct sock sk; + __be16 dest_net; + __be16 src_net; + unsigned char dest_node; + unsigned char src_node; + unsigned char dest_port; + unsigned char src_port; +}; + +static inline struct atalk_sock *at_sk(struct sock *sk) +{ + return (struct atalk_sock *)sk; +} + +struct ddpehdr { + __be16 deh_len_hops; /* lower 10 bits are length, next 4 - hops */ + __be16 deh_sum; + __be16 deh_dnet; + __be16 deh_snet; + __u8 deh_dnode; + __u8 deh_snode; + __u8 deh_dport; + __u8 deh_sport; + /* And netatalk apps expect to stick the type in themselves */ +}; + +static __inline__ struct ddpehdr *ddp_hdr(struct sk_buff *skb) +{ + return (struct ddpehdr *)skb_transport_header(skb); +} + +/* AppleTalk AARP headers */ +struct elapaarp { + __be16 hw_type; +#define AARP_HW_TYPE_ETHERNET 1 +#define AARP_HW_TYPE_TOKENRING 2 + __be16 pa_type; + __u8 hw_len; + __u8 pa_len; +#define AARP_PA_ALEN 4 + __be16 function; +#define AARP_REQUEST 1 +#define AARP_REPLY 2 +#define AARP_PROBE 3 + __u8 hw_src[ETH_ALEN]; + __u8 pa_src_zero; + __be16 pa_src_net; + __u8 pa_src_node; + __u8 hw_dst[ETH_ALEN]; + __u8 pa_dst_zero; + __be16 pa_dst_net; + __u8 pa_dst_node; +} __attribute__ ((packed)); + +static __inline__ struct elapaarp *aarp_hdr(struct sk_buff *skb) +{ + return (struct elapaarp *)skb_transport_header(skb); +} + +/* Not specified - how long till we drop a resolved entry */ +#define AARP_EXPIRY_TIME (5 * 60 * HZ) +/* Size of hash table */ +#define AARP_HASH_SIZE 16 +/* Fast retransmission timer when resolving */ +#define AARP_TICK_TIME (HZ / 5) +/* Send 10 requests then give up (2 seconds) */ +#define AARP_RETRANSMIT_LIMIT 10 +/* + * Some value bigger than total retransmit time + a bit for last reply to + * appear and to stop continual requests + */ +#define AARP_RESOLVE_TIME (10 * HZ) + +extern struct datalink_proto *ddp_dl, *aarp_dl; +extern int aarp_proto_init(void); + +/* Inter module exports */ + +/* Give a device find its atif control structure */ +#if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK) +static inline struct atalk_iface *atalk_find_dev(struct net_device *dev) +{ + return dev->atalk_ptr; +} +#endif + +extern struct atalk_addr *atalk_find_dev_addr(struct net_device *dev); +extern struct net_device *atrtr_get_dev(struct atalk_addr *sa); +extern int aarp_send_ddp(struct net_device *dev, + struct sk_buff *skb, + struct atalk_addr *sa, void *hwaddr); +extern void aarp_device_down(struct net_device *dev); +extern void aarp_probe_network(struct atalk_iface *atif); +extern int aarp_proxy_probe_network(struct atalk_iface *atif, + struct atalk_addr *sa); +extern void aarp_proxy_remove(struct net_device *dev, + struct atalk_addr *sa); + +extern void aarp_cleanup_module(void); + +extern struct hlist_head atalk_sockets; +extern rwlock_t atalk_sockets_lock; + +extern struct atalk_route *atalk_routes; +extern rwlock_t atalk_routes_lock; + +extern struct atalk_iface *atalk_interfaces; +extern rwlock_t atalk_interfaces_lock; + +extern struct atalk_route atrtr_default; + +struct aarp_iter_state { + int bucket; + struct aarp_entry **table; +}; + +extern const struct seq_operations aarp_seq_ops; + +extern int sysctl_aarp_expiry_time; +extern int sysctl_aarp_tick_time; +extern int sysctl_aarp_retransmit_limit; +extern int sysctl_aarp_resolve_time; + +#ifdef CONFIG_SYSCTL +extern int atalk_register_sysctl(void); +extern void atalk_unregister_sysctl(void); +#else +static inline int atalk_register_sysctl(void) +{ + return 0; +} +static inline void atalk_unregister_sysctl(void) +{ +} +#endif + +#ifdef CONFIG_PROC_FS +extern int atalk_proc_init(void); +extern void atalk_proc_exit(void); +#else +static inline int atalk_proc_init(void) +{ + return 0; +} +static inline void atalk_proc_exit(void) +{ +} +#endif /* CONFIG_PROC_FS */ + +#endif /* __LINUX_ATALK_H__ */ diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h new file mode 100644 index 000000000..76860a461 --- /dev/null +++ b/include/linux/ath9k_platform.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2009 Gabor Juhos + * Copyright (c) 2009 Imre Kaloz + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _LINUX_ATH9K_PLATFORM_H +#define _LINUX_ATH9K_PLATFORM_H + +#define ATH9K_PLAT_EEP_MAX_WORDS 2048 + +struct ath9k_platform_data { + const char *eeprom_name; + + u16 eeprom_data[ATH9K_PLAT_EEP_MAX_WORDS]; + u8 *macaddr; + + int led_pin; + u32 gpio_mask; + u32 gpio_val; + + u32 bt_active_pin; + u32 bt_priority_pin; + u32 wlan_active_pin; + + bool endian_check; + bool is_clk_25mhz; + bool tx_gain_buffalo; + bool disable_2ghz; + bool disable_5ghz; + bool led_active_high; + + int (*get_mac_revision)(void); + int (*external_reset)(void); + + bool use_eeprom; +}; + +#endif /* _LINUX_ATH9K_PLATFORM_H */ diff --git a/include/linux/atm.h b/include/linux/atm.h new file mode 100644 index 000000000..4b50fd0a6 --- /dev/null +++ b/include/linux/atm.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* atm.h - general ATM declarations */ +#ifndef _LINUX_ATM_H +#define _LINUX_ATM_H + +#include + +#ifdef CONFIG_COMPAT +#include +struct compat_atmif_sioc { + int number; + int length; + compat_uptr_t arg; +}; +#endif +#endif diff --git a/include/linux/atm_suni.h b/include/linux/atm_suni.h new file mode 100644 index 000000000..84f3aab54 --- /dev/null +++ b/include/linux/atm_suni.h @@ -0,0 +1,12 @@ +/* atm_suni.h - Driver-specific declarations of the SUNI driver (for use by + driver-specific utilities) */ + +/* Written 1998,2000 by Werner Almesberger, EPFL ICA */ + + +#ifndef LINUX_ATM_SUNI_H +#define LINUX_ATM_SUNI_H + +/* everything obsoleted */ + +#endif diff --git a/include/linux/atm_tcp.h b/include/linux/atm_tcp.h new file mode 100644 index 000000000..c8ecf6f68 --- /dev/null +++ b/include/linux/atm_tcp.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* atm_tcp.h - Driver-specific declarations of the ATMTCP driver (for use by + driver-specific utilities) */ + +/* Written 1997-2000 by Werner Almesberger, EPFL LRC/ICA */ + +#ifndef LINUX_ATM_TCP_H +#define LINUX_ATM_TCP_H + +#include + + +struct atm_tcp_ops { + int (*attach)(struct atm_vcc *vcc,int itf); + int (*create_persistent)(int itf); + int (*remove_persistent)(int itf); + struct module *owner; +}; + +extern struct atm_tcp_ops atm_tcp_ops; + +#endif diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h new file mode 100644 index 000000000..8124815eb --- /dev/null +++ b/include/linux/atmdev.h @@ -0,0 +1,334 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* atmdev.h - ATM device driver declarations and various related items */ +#ifndef LINUX_ATMDEV_H +#define LINUX_ATMDEV_H + + +#include /* wait_queue_head_t */ +#include /* struct timeval */ +#include +#include +#include /* struct sk_buff */ +#include +#include +#include +#include +#include + +#ifdef CONFIG_PROC_FS +#include + +extern struct proc_dir_entry *atm_proc_root; +#endif + +#ifdef CONFIG_COMPAT +#include +struct compat_atm_iobuf { + int length; + compat_uptr_t buffer; +}; +#endif + +struct k_atm_aal_stats { +#define __HANDLE_ITEM(i) atomic_t i + __AAL_STAT_ITEMS +#undef __HANDLE_ITEM +}; + + +struct k_atm_dev_stats { + struct k_atm_aal_stats aal0; + struct k_atm_aal_stats aal34; + struct k_atm_aal_stats aal5; +}; + +struct device; + +enum { + ATM_VF_ADDR, /* Address is in use. Set by anybody, cleared + by device driver. */ + ATM_VF_READY, /* VC is ready to transfer data. Set by device + driver, cleared by anybody. */ + ATM_VF_PARTIAL, /* resources are bound to PVC (partial PVC + setup), controlled by socket layer */ + ATM_VF_REGIS, /* registered with demon, controlled by SVC + socket layer */ + ATM_VF_BOUND, /* local SAP is set, controlled by SVC socket + layer */ + ATM_VF_RELEASED, /* demon has indicated/requested release, + controlled by SVC socket layer */ + ATM_VF_HASQOS, /* QOS parameters have been set */ + ATM_VF_LISTEN, /* socket is used for listening */ + ATM_VF_META, /* SVC socket isn't used for normal data + traffic and doesn't depend on signaling + to be available */ + ATM_VF_SESSION, /* VCC is p2mp session control descriptor */ + ATM_VF_HASSAP, /* SAP has been set */ + ATM_VF_CLOSE, /* asynchronous close - treat like VF_RELEASED*/ + ATM_VF_WAITING, /* waiting for reply from sigd */ + ATM_VF_IS_CLIP, /* in use by CLIP protocol */ +}; + + +#define ATM_VF2VS(flags) \ + (test_bit(ATM_VF_READY,&(flags)) ? ATM_VS_CONNECTED : \ + test_bit(ATM_VF_RELEASED,&(flags)) ? ATM_VS_CLOSING : \ + test_bit(ATM_VF_LISTEN,&(flags)) ? ATM_VS_LISTEN : \ + test_bit(ATM_VF_REGIS,&(flags)) ? ATM_VS_INUSE : \ + test_bit(ATM_VF_BOUND,&(flags)) ? ATM_VS_BOUND : ATM_VS_IDLE) + + +enum { + ATM_DF_REMOVED, /* device was removed from atm_devs list */ +}; + + +#define ATM_PHY_SIG_LOST 0 /* no carrier/light */ +#define ATM_PHY_SIG_UNKNOWN 1 /* carrier/light status is unknown */ +#define ATM_PHY_SIG_FOUND 2 /* carrier/light okay */ + +#define ATM_ATMOPT_CLP 1 /* set CLP bit */ + +struct atm_vcc { + /* struct sock has to be the first member of atm_vcc */ + struct sock sk; + unsigned long flags; /* VCC flags (ATM_VF_*) */ + short vpi; /* VPI and VCI (types must be equal */ + /* with sockaddr) */ + int vci; + unsigned long aal_options; /* AAL layer options */ + unsigned long atm_options; /* ATM layer options */ + struct atm_dev *dev; /* device back pointer */ + struct atm_qos qos; /* QOS */ + struct atm_sap sap; /* SAP */ + void (*release_cb)(struct atm_vcc *vcc); /* release_sock callback */ + void (*push)(struct atm_vcc *vcc,struct sk_buff *skb); + void (*pop)(struct atm_vcc *vcc,struct sk_buff *skb); /* optional */ + int (*push_oam)(struct atm_vcc *vcc,void *cell); + int (*send)(struct atm_vcc *vcc,struct sk_buff *skb); + void *dev_data; /* per-device data */ + void *proto_data; /* per-protocol data */ + struct k_atm_aal_stats *stats; /* pointer to AAL stats group */ + struct module *owner; /* owner of ->push function */ + /* SVC part --- may move later ------------------------------------- */ + short itf; /* interface number */ + struct sockaddr_atmsvc local; + struct sockaddr_atmsvc remote; + /* Multipoint part ------------------------------------------------- */ + struct atm_vcc *session; /* session VCC descriptor */ + /* Other stuff ----------------------------------------------------- */ + void *user_back; /* user backlink - not touched by */ + /* native ATM stack. Currently used */ + /* by CLIP and sch_atm. */ +}; + +static inline struct atm_vcc *atm_sk(struct sock *sk) +{ + return (struct atm_vcc *)sk; +} + +static inline struct atm_vcc *ATM_SD(struct socket *sock) +{ + return atm_sk(sock->sk); +} + +static inline struct sock *sk_atm(struct atm_vcc *vcc) +{ + return (struct sock *)vcc; +} + +struct atm_dev_addr { + struct sockaddr_atmsvc addr; /* ATM address */ + struct list_head entry; /* next address */ +}; + +enum atm_addr_type_t { ATM_ADDR_LOCAL, ATM_ADDR_LECS }; + +struct atm_dev { + const struct atmdev_ops *ops; /* device operations; NULL if unused */ + const struct atmphy_ops *phy; /* PHY operations, may be undefined */ + /* (NULL) */ + const char *type; /* device type name */ + int number; /* device index */ + void *dev_data; /* per-device data */ + void *phy_data; /* private PHY date */ + unsigned long flags; /* device flags (ATM_DF_*) */ + struct list_head local; /* local ATM addresses */ + struct list_head lecs; /* LECS ATM addresses learned via ILMI */ + unsigned char esi[ESI_LEN]; /* ESI ("MAC" addr) */ + struct atm_cirange ci_range; /* VPI/VCI range */ + struct k_atm_dev_stats stats; /* statistics */ + char signal; /* signal status (ATM_PHY_SIG_*) */ + int link_rate; /* link rate (default: OC3) */ + refcount_t refcnt; /* reference count */ + spinlock_t lock; /* protect internal members */ +#ifdef CONFIG_PROC_FS + struct proc_dir_entry *proc_entry; /* proc entry */ + char *proc_name; /* proc entry name */ +#endif + struct device class_dev; /* sysfs device */ + struct list_head dev_list; /* linkage */ +}; + + +/* OF: send_Oam Flags */ + +#define ATM_OF_IMMED 1 /* Attempt immediate delivery */ +#define ATM_OF_INRATE 2 /* Attempt in-rate delivery */ + + +/* + * ioctl, getsockopt, and setsockopt are optional and can be set to NULL. + */ + +struct atmdev_ops { /* only send is required */ + void (*dev_close)(struct atm_dev *dev); + int (*open)(struct atm_vcc *vcc); + void (*close)(struct atm_vcc *vcc); + int (*ioctl)(struct atm_dev *dev,unsigned int cmd,void __user *arg); +#ifdef CONFIG_COMPAT + int (*compat_ioctl)(struct atm_dev *dev,unsigned int cmd, + void __user *arg); +#endif + int (*getsockopt)(struct atm_vcc *vcc,int level,int optname, + void __user *optval,int optlen); + int (*setsockopt)(struct atm_vcc *vcc,int level,int optname, + void __user *optval,unsigned int optlen); + int (*send)(struct atm_vcc *vcc,struct sk_buff *skb); + int (*send_oam)(struct atm_vcc *vcc,void *cell,int flags); + void (*phy_put)(struct atm_dev *dev,unsigned char value, + unsigned long addr); + unsigned char (*phy_get)(struct atm_dev *dev,unsigned long addr); + int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags); + int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page); + struct module *owner; +}; + +struct atmphy_ops { + int (*start)(struct atm_dev *dev); + int (*ioctl)(struct atm_dev *dev,unsigned int cmd,void __user *arg); + void (*interrupt)(struct atm_dev *dev); + int (*stop)(struct atm_dev *dev); +}; + +struct atm_skb_data { + struct atm_vcc *vcc; /* ATM VCC */ + unsigned long atm_options; /* ATM layer options */ + unsigned int acct_truesize; /* truesize accounted to vcc */ +}; + +#define VCC_HTABLE_SIZE 32 + +extern struct hlist_head vcc_hash[VCC_HTABLE_SIZE]; +extern rwlock_t vcc_sklist_lock; + +#define ATM_SKB(skb) (((struct atm_skb_data *) (skb)->cb)) + +struct atm_dev *atm_dev_register(const char *type, struct device *parent, + const struct atmdev_ops *ops, + int number, /* -1 == pick first available */ + unsigned long *flags); +struct atm_dev *atm_dev_lookup(int number); +void atm_dev_deregister(struct atm_dev *dev); + +/* atm_dev_signal_change + * + * Propagate lower layer signal change in atm_dev->signal to netdevice. + * The event will be sent via a notifier call chain. + */ +void atm_dev_signal_change(struct atm_dev *dev, char signal); + +void vcc_insert_socket(struct sock *sk); + +void atm_dev_release_vccs(struct atm_dev *dev); + +static inline void atm_account_tx(struct atm_vcc *vcc, struct sk_buff *skb) +{ + /* + * Because ATM skbs may not belong to a sock (and we don't + * necessarily want to), skb->truesize may be adjusted, + * escaping the hack in pskb_expand_head() which avoids + * doing so for some cases. So stash the value of truesize + * at the time we accounted it, and atm_pop_raw() can use + * that value later, in case it changes. + */ + refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); + ATM_SKB(skb)->acct_truesize = skb->truesize; + ATM_SKB(skb)->atm_options = vcc->atm_options; +} + +static inline void atm_force_charge(struct atm_vcc *vcc,int truesize) +{ + atomic_add(truesize, &sk_atm(vcc)->sk_rmem_alloc); +} + + +static inline void atm_return(struct atm_vcc *vcc,int truesize) +{ + atomic_sub(truesize, &sk_atm(vcc)->sk_rmem_alloc); +} + + +static inline int atm_may_send(struct atm_vcc *vcc,unsigned int size) +{ + return (size + refcount_read(&sk_atm(vcc)->sk_wmem_alloc)) < + sk_atm(vcc)->sk_sndbuf; +} + + +static inline void atm_dev_hold(struct atm_dev *dev) +{ + refcount_inc(&dev->refcnt); +} + + +static inline void atm_dev_put(struct atm_dev *dev) +{ + if (refcount_dec_and_test(&dev->refcnt)) { + BUG_ON(!test_bit(ATM_DF_REMOVED, &dev->flags)); + if (dev->ops->dev_close) + dev->ops->dev_close(dev); + put_device(&dev->class_dev); + } +} + + +int atm_charge(struct atm_vcc *vcc,int truesize); +struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size, + gfp_t gfp_flags); +int atm_pcr_goal(const struct atm_trafprm *tp); + +void vcc_release_async(struct atm_vcc *vcc, int reply); + +struct atm_ioctl { + struct module *owner; + /* A module reference is kept if appropriate over this call. + * Return -ENOIOCTLCMD if you don't handle it. */ + int (*ioctl)(struct socket *, unsigned int cmd, unsigned long arg); + struct list_head list; +}; + +/** + * register_atm_ioctl - register handler for ioctl operations + * + * Special (non-device) handlers of ioctl's should + * register here. If you're a normal device, you should + * set .ioctl in your atmdev_ops instead. + */ +void register_atm_ioctl(struct atm_ioctl *); + +/** + * deregister_atm_ioctl - remove the ioctl handler + */ +void deregister_atm_ioctl(struct atm_ioctl *); + + +/* register_atmdevice_notifier - register atm_dev notify events + * + * Clients like br2684 will register notify events + * Currently we notify of signal found/lost + */ +int register_atmdevice_notifier(struct notifier_block *nb); +void unregister_atmdevice_notifier(struct notifier_block *nb); + +#endif diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h new file mode 100644 index 000000000..1491af38c --- /dev/null +++ b/include/linux/atmel-mci.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_ATMEL_MCI_H +#define __LINUX_ATMEL_MCI_H + +#include +#include + +#define ATMCI_MAX_NR_SLOTS 2 + +/** + * struct mci_slot_pdata - board-specific per-slot configuration + * @bus_width: Number of data lines wired up the slot + * @detect_pin: GPIO pin wired to the card detect switch + * @wp_pin: GPIO pin wired to the write protect sensor + * @detect_is_active_high: The state of the detect pin when it is active + * @non_removable: The slot is not removable, only detect once + * + * If a given slot is not present on the board, @bus_width should be + * set to 0. The other fields are ignored in this case. + * + * Any pins that aren't available should be set to a negative value. + * + * Note that support for multiple slots is experimental -- some cards + * might get upset if we don't get the clock management exactly right. + * But in most cases, it should work just fine. + */ +struct mci_slot_pdata { + unsigned int bus_width; + int detect_pin; + int wp_pin; + bool detect_is_active_high; + bool non_removable; +}; + +/** + * struct mci_platform_data - board-specific MMC/SDcard configuration + * @dma_slave: DMA slave interface to use in data transfers. + * @slot: Per-slot configuration data. + */ +struct mci_platform_data { + void *dma_slave; + dma_filter_fn dma_filter; + struct mci_slot_pdata slot[ATMCI_MAX_NR_SLOTS]; +}; + +#endif /* __LINUX_ATMEL_MCI_H */ diff --git a/include/linux/atmel-ssc.h b/include/linux/atmel-ssc.h new file mode 100644 index 000000000..6091d2abc --- /dev/null +++ b/include/linux/atmel-ssc.h @@ -0,0 +1,335 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __INCLUDE_ATMEL_SSC_H +#define __INCLUDE_ATMEL_SSC_H + +#include +#include +#include + +struct atmel_ssc_platform_data { + int use_dma; + int has_fslen_ext; +}; + +struct ssc_device { + struct list_head list; + dma_addr_t phybase; + void __iomem *regs; + struct platform_device *pdev; + struct atmel_ssc_platform_data *pdata; + struct clk *clk; + int user; + int irq; + bool clk_from_rk_pin; + bool sound_dai; +}; + +struct ssc_device * __must_check ssc_request(unsigned int ssc_num); +void ssc_free(struct ssc_device *ssc); + +/* SSC register offsets */ + +/* SSC Control Register */ +#define SSC_CR 0x00000000 +#define SSC_CR_RXDIS_SIZE 1 +#define SSC_CR_RXDIS_OFFSET 1 +#define SSC_CR_RXEN_SIZE 1 +#define SSC_CR_RXEN_OFFSET 0 +#define SSC_CR_SWRST_SIZE 1 +#define SSC_CR_SWRST_OFFSET 15 +#define SSC_CR_TXDIS_SIZE 1 +#define SSC_CR_TXDIS_OFFSET 9 +#define SSC_CR_TXEN_SIZE 1 +#define SSC_CR_TXEN_OFFSET 8 + +/* SSC Clock Mode Register */ +#define SSC_CMR 0x00000004 +#define SSC_CMR_DIV_SIZE 12 +#define SSC_CMR_DIV_OFFSET 0 + +/* SSC Receive Clock Mode Register */ +#define SSC_RCMR 0x00000010 +#define SSC_RCMR_CKG_SIZE 2 +#define SSC_RCMR_CKG_OFFSET 6 +#define SSC_RCMR_CKI_SIZE 1 +#define SSC_RCMR_CKI_OFFSET 5 +#define SSC_RCMR_CKO_SIZE 3 +#define SSC_RCMR_CKO_OFFSET 2 +#define SSC_RCMR_CKS_SIZE 2 +#define SSC_RCMR_CKS_OFFSET 0 +#define SSC_RCMR_PERIOD_SIZE 8 +#define SSC_RCMR_PERIOD_OFFSET 24 +#define SSC_RCMR_START_SIZE 4 +#define SSC_RCMR_START_OFFSET 8 +#define SSC_RCMR_STOP_SIZE 1 +#define SSC_RCMR_STOP_OFFSET 12 +#define SSC_RCMR_STTDLY_SIZE 8 +#define SSC_RCMR_STTDLY_OFFSET 16 + +/* SSC Receive Frame Mode Register */ +#define SSC_RFMR 0x00000014 +#define SSC_RFMR_DATLEN_SIZE 5 +#define SSC_RFMR_DATLEN_OFFSET 0 +#define SSC_RFMR_DATNB_SIZE 4 +#define SSC_RFMR_DATNB_OFFSET 8 +#define SSC_RFMR_FSEDGE_SIZE 1 +#define SSC_RFMR_FSEDGE_OFFSET 24 +/* + * The FSLEN_EXT exist on at91sam9rl, at91sam9g10, + * at91sam9g20, and at91sam9g45 and newer SoCs + */ +#define SSC_RFMR_FSLEN_EXT_SIZE 4 +#define SSC_RFMR_FSLEN_EXT_OFFSET 28 +#define SSC_RFMR_FSLEN_SIZE 4 +#define SSC_RFMR_FSLEN_OFFSET 16 +#define SSC_RFMR_FSOS_SIZE 4 +#define SSC_RFMR_FSOS_OFFSET 20 +#define SSC_RFMR_LOOP_SIZE 1 +#define SSC_RFMR_LOOP_OFFSET 5 +#define SSC_RFMR_MSBF_SIZE 1 +#define SSC_RFMR_MSBF_OFFSET 7 + +/* SSC Transmit Clock Mode Register */ +#define SSC_TCMR 0x00000018 +#define SSC_TCMR_CKG_SIZE 2 +#define SSC_TCMR_CKG_OFFSET 6 +#define SSC_TCMR_CKI_SIZE 1 +#define SSC_TCMR_CKI_OFFSET 5 +#define SSC_TCMR_CKO_SIZE 3 +#define SSC_TCMR_CKO_OFFSET 2 +#define SSC_TCMR_CKS_SIZE 2 +#define SSC_TCMR_CKS_OFFSET 0 +#define SSC_TCMR_PERIOD_SIZE 8 +#define SSC_TCMR_PERIOD_OFFSET 24 +#define SSC_TCMR_START_SIZE 4 +#define SSC_TCMR_START_OFFSET 8 +#define SSC_TCMR_STTDLY_SIZE 8 +#define SSC_TCMR_STTDLY_OFFSET 16 + +/* SSC Transmit Frame Mode Register */ +#define SSC_TFMR 0x0000001c +#define SSC_TFMR_DATDEF_SIZE 1 +#define SSC_TFMR_DATDEF_OFFSET 5 +#define SSC_TFMR_DATLEN_SIZE 5 +#define SSC_TFMR_DATLEN_OFFSET 0 +#define SSC_TFMR_DATNB_SIZE 4 +#define SSC_TFMR_DATNB_OFFSET 8 +#define SSC_TFMR_FSDEN_SIZE 1 +#define SSC_TFMR_FSDEN_OFFSET 23 +#define SSC_TFMR_FSEDGE_SIZE 1 +#define SSC_TFMR_FSEDGE_OFFSET 24 +/* + * The FSLEN_EXT exist on at91sam9rl, at91sam9g10, + * at91sam9g20, and at91sam9g45 and newer SoCs + */ +#define SSC_TFMR_FSLEN_EXT_SIZE 4 +#define SSC_TFMR_FSLEN_EXT_OFFSET 28 +#define SSC_TFMR_FSLEN_SIZE 4 +#define SSC_TFMR_FSLEN_OFFSET 16 +#define SSC_TFMR_FSOS_SIZE 3 +#define SSC_TFMR_FSOS_OFFSET 20 +#define SSC_TFMR_MSBF_SIZE 1 +#define SSC_TFMR_MSBF_OFFSET 7 + +/* SSC Receive Hold Register */ +#define SSC_RHR 0x00000020 +#define SSC_RHR_RDAT_SIZE 32 +#define SSC_RHR_RDAT_OFFSET 0 + +/* SSC Transmit Hold Register */ +#define SSC_THR 0x00000024 +#define SSC_THR_TDAT_SIZE 32 +#define SSC_THR_TDAT_OFFSET 0 + +/* SSC Receive Sync. Holding Register */ +#define SSC_RSHR 0x00000030 +#define SSC_RSHR_RSDAT_SIZE 16 +#define SSC_RSHR_RSDAT_OFFSET 0 + +/* SSC Transmit Sync. Holding Register */ +#define SSC_TSHR 0x00000034 +#define SSC_TSHR_TSDAT_SIZE 16 +#define SSC_TSHR_RSDAT_OFFSET 0 + +/* SSC Receive Compare 0 Register */ +#define SSC_RC0R 0x00000038 +#define SSC_RC0R_CP0_SIZE 16 +#define SSC_RC0R_CP0_OFFSET 0 + +/* SSC Receive Compare 1 Register */ +#define SSC_RC1R 0x0000003c +#define SSC_RC1R_CP1_SIZE 16 +#define SSC_RC1R_CP1_OFFSET 0 + +/* SSC Status Register */ +#define SSC_SR 0x00000040 +#define SSC_SR_CP0_SIZE 1 +#define SSC_SR_CP0_OFFSET 8 +#define SSC_SR_CP1_SIZE 1 +#define SSC_SR_CP1_OFFSET 9 +#define SSC_SR_ENDRX_SIZE 1 +#define SSC_SR_ENDRX_OFFSET 6 +#define SSC_SR_ENDTX_SIZE 1 +#define SSC_SR_ENDTX_OFFSET 2 +#define SSC_SR_OVRUN_SIZE 1 +#define SSC_SR_OVRUN_OFFSET 5 +#define SSC_SR_RXBUFF_SIZE 1 +#define SSC_SR_RXBUFF_OFFSET 7 +#define SSC_SR_RXEN_SIZE 1 +#define SSC_SR_RXEN_OFFSET 17 +#define SSC_SR_RXRDY_SIZE 1 +#define SSC_SR_RXRDY_OFFSET 4 +#define SSC_SR_RXSYN_SIZE 1 +#define SSC_SR_RXSYN_OFFSET 11 +#define SSC_SR_TXBUFE_SIZE 1 +#define SSC_SR_TXBUFE_OFFSET 3 +#define SSC_SR_TXEMPTY_SIZE 1 +#define SSC_SR_TXEMPTY_OFFSET 1 +#define SSC_SR_TXEN_SIZE 1 +#define SSC_SR_TXEN_OFFSET 16 +#define SSC_SR_TXRDY_SIZE 1 +#define SSC_SR_TXRDY_OFFSET 0 +#define SSC_SR_TXSYN_SIZE 1 +#define SSC_SR_TXSYN_OFFSET 10 + +/* SSC Interrupt Enable Register */ +#define SSC_IER 0x00000044 +#define SSC_IER_CP0_SIZE 1 +#define SSC_IER_CP0_OFFSET 8 +#define SSC_IER_CP1_SIZE 1 +#define SSC_IER_CP1_OFFSET 9 +#define SSC_IER_ENDRX_SIZE 1 +#define SSC_IER_ENDRX_OFFSET 6 +#define SSC_IER_ENDTX_SIZE 1 +#define SSC_IER_ENDTX_OFFSET 2 +#define SSC_IER_OVRUN_SIZE 1 +#define SSC_IER_OVRUN_OFFSET 5 +#define SSC_IER_RXBUFF_SIZE 1 +#define SSC_IER_RXBUFF_OFFSET 7 +#define SSC_IER_RXRDY_SIZE 1 +#define SSC_IER_RXRDY_OFFSET 4 +#define SSC_IER_RXSYN_SIZE 1 +#define SSC_IER_RXSYN_OFFSET 11 +#define SSC_IER_TXBUFE_SIZE 1 +#define SSC_IER_TXBUFE_OFFSET 3 +#define SSC_IER_TXEMPTY_SIZE 1 +#define SSC_IER_TXEMPTY_OFFSET 1 +#define SSC_IER_TXRDY_SIZE 1 +#define SSC_IER_TXRDY_OFFSET 0 +#define SSC_IER_TXSYN_SIZE 1 +#define SSC_IER_TXSYN_OFFSET 10 + +/* SSC Interrupt Disable Register */ +#define SSC_IDR 0x00000048 +#define SSC_IDR_CP0_SIZE 1 +#define SSC_IDR_CP0_OFFSET 8 +#define SSC_IDR_CP1_SIZE 1 +#define SSC_IDR_CP1_OFFSET 9 +#define SSC_IDR_ENDRX_SIZE 1 +#define SSC_IDR_ENDRX_OFFSET 6 +#define SSC_IDR_ENDTX_SIZE 1 +#define SSC_IDR_ENDTX_OFFSET 2 +#define SSC_IDR_OVRUN_SIZE 1 +#define SSC_IDR_OVRUN_OFFSET 5 +#define SSC_IDR_RXBUFF_SIZE 1 +#define SSC_IDR_RXBUFF_OFFSET 7 +#define SSC_IDR_RXRDY_SIZE 1 +#define SSC_IDR_RXRDY_OFFSET 4 +#define SSC_IDR_RXSYN_SIZE 1 +#define SSC_IDR_RXSYN_OFFSET 11 +#define SSC_IDR_TXBUFE_SIZE 1 +#define SSC_IDR_TXBUFE_OFFSET 3 +#define SSC_IDR_TXEMPTY_SIZE 1 +#define SSC_IDR_TXEMPTY_OFFSET 1 +#define SSC_IDR_TXRDY_SIZE 1 +#define SSC_IDR_TXRDY_OFFSET 0 +#define SSC_IDR_TXSYN_SIZE 1 +#define SSC_IDR_TXSYN_OFFSET 10 + +/* SSC Interrupt Mask Register */ +#define SSC_IMR 0x0000004c +#define SSC_IMR_CP0_SIZE 1 +#define SSC_IMR_CP0_OFFSET 8 +#define SSC_IMR_CP1_SIZE 1 +#define SSC_IMR_CP1_OFFSET 9 +#define SSC_IMR_ENDRX_SIZE 1 +#define SSC_IMR_ENDRX_OFFSET 6 +#define SSC_IMR_ENDTX_SIZE 1 +#define SSC_IMR_ENDTX_OFFSET 2 +#define SSC_IMR_OVRUN_SIZE 1 +#define SSC_IMR_OVRUN_OFFSET 5 +#define SSC_IMR_RXBUFF_SIZE 1 +#define SSC_IMR_RXBUFF_OFFSET 7 +#define SSC_IMR_RXRDY_SIZE 1 +#define SSC_IMR_RXRDY_OFFSET 4 +#define SSC_IMR_RXSYN_SIZE 1 +#define SSC_IMR_RXSYN_OFFSET 11 +#define SSC_IMR_TXBUFE_SIZE 1 +#define SSC_IMR_TXBUFE_OFFSET 3 +#define SSC_IMR_TXEMPTY_SIZE 1 +#define SSC_IMR_TXEMPTY_OFFSET 1 +#define SSC_IMR_TXRDY_SIZE 1 +#define SSC_IMR_TXRDY_OFFSET 0 +#define SSC_IMR_TXSYN_SIZE 1 +#define SSC_IMR_TXSYN_OFFSET 10 + +/* SSC PDC Receive Pointer Register */ +#define SSC_PDC_RPR 0x00000100 + +/* SSC PDC Receive Counter Register */ +#define SSC_PDC_RCR 0x00000104 + +/* SSC PDC Transmit Pointer Register */ +#define SSC_PDC_TPR 0x00000108 + +/* SSC PDC Receive Next Pointer Register */ +#define SSC_PDC_RNPR 0x00000110 + +/* SSC PDC Receive Next Counter Register */ +#define SSC_PDC_RNCR 0x00000114 + +/* SSC PDC Transmit Counter Register */ +#define SSC_PDC_TCR 0x0000010c + +/* SSC PDC Transmit Next Pointer Register */ +#define SSC_PDC_TNPR 0x00000118 + +/* SSC PDC Transmit Next Counter Register */ +#define SSC_PDC_TNCR 0x0000011c + +/* SSC PDC Transfer Control Register */ +#define SSC_PDC_PTCR 0x00000120 +#define SSC_PDC_PTCR_RXTDIS_SIZE 1 +#define SSC_PDC_PTCR_RXTDIS_OFFSET 1 +#define SSC_PDC_PTCR_RXTEN_SIZE 1 +#define SSC_PDC_PTCR_RXTEN_OFFSET 0 +#define SSC_PDC_PTCR_TXTDIS_SIZE 1 +#define SSC_PDC_PTCR_TXTDIS_OFFSET 9 +#define SSC_PDC_PTCR_TXTEN_SIZE 1 +#define SSC_PDC_PTCR_TXTEN_OFFSET 8 + +/* SSC PDC Transfer Status Register */ +#define SSC_PDC_PTSR 0x00000124 +#define SSC_PDC_PTSR_RXTEN_SIZE 1 +#define SSC_PDC_PTSR_RXTEN_OFFSET 0 +#define SSC_PDC_PTSR_TXTEN_SIZE 1 +#define SSC_PDC_PTSR_TXTEN_OFFSET 8 + +/* Bit manipulation macros */ +#define SSC_BIT(name) \ + (1 << SSC_##name##_OFFSET) +#define SSC_BF(name, value) \ + (((value) & ((1 << SSC_##name##_SIZE) - 1)) \ + << SSC_##name##_OFFSET) +#define SSC_BFEXT(name, value) \ + (((value) >> SSC_##name##_OFFSET) \ + & ((1 << SSC_##name##_SIZE) - 1)) +#define SSC_BFINS(name, value, old) \ + (((old) & ~(((1 << SSC_##name##_SIZE) - 1) \ + << SSC_##name##_OFFSET)) | SSC_BF(name, value)) + +/* Register access macros */ +#define ssc_readl(base, reg) __raw_readl(base + SSC_##reg) +#define ssc_writel(base, reg, value) __raw_writel((value), base + SSC_##reg) + +#endif /* __INCLUDE_ATMEL_SSC_H */ diff --git a/include/linux/atmel_pdc.h b/include/linux/atmel_pdc.h new file mode 100644 index 000000000..63499ce80 --- /dev/null +++ b/include/linux/atmel_pdc.h @@ -0,0 +1,38 @@ +/* + * include/linux/atmel_pdc.h + * + * Copyright (C) 2005 Ivan Kokshaysky + * Copyright (C) SAN People + * + * Peripheral Data Controller (PDC) registers. + * Based on AT91RM9200 datasheet revision E. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef ATMEL_PDC_H +#define ATMEL_PDC_H + +#define ATMEL_PDC_RPR 0x100 /* Receive Pointer Register */ +#define ATMEL_PDC_RCR 0x104 /* Receive Counter Register */ +#define ATMEL_PDC_TPR 0x108 /* Transmit Pointer Register */ +#define ATMEL_PDC_TCR 0x10c /* Transmit Counter Register */ +#define ATMEL_PDC_RNPR 0x110 /* Receive Next Pointer Register */ +#define ATMEL_PDC_RNCR 0x114 /* Receive Next Counter Register */ +#define ATMEL_PDC_TNPR 0x118 /* Transmit Next Pointer Register */ +#define ATMEL_PDC_TNCR 0x11c /* Transmit Next Counter Register */ + +#define ATMEL_PDC_PTCR 0x120 /* Transfer Control Register */ +#define ATMEL_PDC_RXTEN (1 << 0) /* Receiver Transfer Enable */ +#define ATMEL_PDC_RXTDIS (1 << 1) /* Receiver Transfer Disable */ +#define ATMEL_PDC_TXTEN (1 << 8) /* Transmitter Transfer Enable */ +#define ATMEL_PDC_TXTDIS (1 << 9) /* Transmitter Transfer Disable */ + +#define ATMEL_PDC_PTSR 0x124 /* Transfer Status Register */ + +#define ATMEL_PDC_SCND_BUF_OFF 0x10 /* Offset between first and second buffer registers */ + +#endif diff --git a/include/linux/atmel_tc.h b/include/linux/atmel_tc.h new file mode 100644 index 000000000..468fdfa64 --- /dev/null +++ b/include/linux/atmel_tc.h @@ -0,0 +1,270 @@ +/* + * Timer/Counter Unit (TC) registers. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef ATMEL_TC_H +#define ATMEL_TC_H + +#include +#include + +/* + * Many 32-bit Atmel SOCs include one or more TC blocks, each of which holds + * three general-purpose 16-bit timers. These timers share one register bank. + * Depending on the SOC, each timer may have its own clock and IRQ, or those + * may be shared by the whole TC block. + * + * These TC blocks may have up to nine external pins: TCLK0..2 signals for + * clocks or clock gates, and per-timer TIOA and TIOB signals used for PWM + * or triggering. Those pins need to be set up for use with the TC block, + * else they will be used as GPIOs or for a different controller. + * + * Although we expect each TC block to have a platform_device node, those + * nodes are not what drivers bind to. Instead, they ask for a specific + * TC block, by number ... which is a common approach on systems with many + * timers. Then they use clk_get() and platform_get_irq() to get clock and + * IRQ resources. + */ + +struct clk; + +/** + * struct atmel_tcb_config - SoC data for a Timer/Counter Block + * @counter_width: size in bits of a timer counter register + */ +struct atmel_tcb_config { + size_t counter_width; +}; + +/** + * struct atmel_tc - information about a Timer/Counter Block + * @pdev: physical device + * @regs: mapping through which the I/O registers can be accessed + * @id: block id + * @tcb_config: configuration data from SoC + * @irq: irq for each of the three channels + * @clk: internal clock source for each of the three channels + * @node: list node, for tclib internal use + * @allocated: if already used, for tclib internal use + * + * On some platforms, each TC channel has its own clocks and IRQs, + * while on others, all TC channels share the same clock and IRQ. + * Drivers should clk_enable() all the clocks they need even though + * all the entries in @clk may point to the same physical clock. + * Likewise, drivers should request irqs independently for each + * channel, but they must use IRQF_SHARED in case some of the entries + * in @irq are actually the same IRQ. + */ +struct atmel_tc { + struct platform_device *pdev; + void __iomem *regs; + int id; + const struct atmel_tcb_config *tcb_config; + int irq[3]; + struct clk *clk[3]; + struct clk *slow_clk; + struct list_head node; + bool allocated; +}; + +extern struct atmel_tc *atmel_tc_alloc(unsigned block); +extern void atmel_tc_free(struct atmel_tc *tc); + +/* platform-specific ATMEL_TC_TIMER_CLOCKx divisors (0 means 32KiHz) */ +extern const u8 atmel_tc_divisors[5]; + + +/* + * Two registers have block-wide controls. These are: configuring the three + * "external" clocks (or event sources) used by the timer channels; and + * synchronizing the timers by resetting them all at once. + * + * "External" can mean "external to chip" using the TCLK0, TCLK1, or TCLK2 + * signals. Or, it can mean "external to timer", using the TIOA output from + * one of the other two timers that's being run in waveform mode. + */ + +#define ATMEL_TC_BCR 0xc0 /* TC Block Control Register */ +#define ATMEL_TC_SYNC (1 << 0) /* synchronize timers */ + +#define ATMEL_TC_BMR 0xc4 /* TC Block Mode Register */ +#define ATMEL_TC_TC0XC0S (3 << 0) /* external clock 0 source */ +#define ATMEL_TC_TC0XC0S_TCLK0 (0 << 0) +#define ATMEL_TC_TC0XC0S_NONE (1 << 0) +#define ATMEL_TC_TC0XC0S_TIOA1 (2 << 0) +#define ATMEL_TC_TC0XC0S_TIOA2 (3 << 0) +#define ATMEL_TC_TC1XC1S (3 << 2) /* external clock 1 source */ +#define ATMEL_TC_TC1XC1S_TCLK1 (0 << 2) +#define ATMEL_TC_TC1XC1S_NONE (1 << 2) +#define ATMEL_TC_TC1XC1S_TIOA0 (2 << 2) +#define ATMEL_TC_TC1XC1S_TIOA2 (3 << 2) +#define ATMEL_TC_TC2XC2S (3 << 4) /* external clock 2 source */ +#define ATMEL_TC_TC2XC2S_TCLK2 (0 << 4) +#define ATMEL_TC_TC2XC2S_NONE (1 << 4) +#define ATMEL_TC_TC2XC2S_TIOA0 (2 << 4) +#define ATMEL_TC_TC2XC2S_TIOA1 (3 << 4) + + +/* + * Each TC block has three "channels", each with one counter and controls. + * + * Note that the semantics of ATMEL_TC_TIMER_CLOCKx (input clock selection + * when it's not "external") is silicon-specific. AT91 platforms use one + * set of definitions; AVR32 platforms use a different set. Don't hard-wire + * such knowledge into your code, use the global "atmel_tc_divisors" ... + * where index N is the divisor for clock N+1, else zero to indicate it uses + * the 32 KiHz clock. + * + * The timers can be chained in various ways, and operated in "waveform" + * generation mode (including PWM) or "capture" mode (to time events). In + * both modes, behavior can be configured in many ways. + * + * Each timer has two I/O pins, TIOA and TIOB. Waveform mode uses TIOA as a + * PWM output, and TIOB as either another PWM or as a trigger. Capture mode + * uses them only as inputs. + */ +#define ATMEL_TC_CHAN(idx) ((idx)*0x40) +#define ATMEL_TC_REG(idx, reg) (ATMEL_TC_CHAN(idx) + ATMEL_TC_ ## reg) + +#define ATMEL_TC_CCR 0x00 /* Channel Control Register */ +#define ATMEL_TC_CLKEN (1 << 0) /* clock enable */ +#define ATMEL_TC_CLKDIS (1 << 1) /* clock disable */ +#define ATMEL_TC_SWTRG (1 << 2) /* software trigger */ + +#define ATMEL_TC_CMR 0x04 /* Channel Mode Register */ + +/* Both modes share some CMR bits */ +#define ATMEL_TC_TCCLKS (7 << 0) /* clock source */ +#define ATMEL_TC_TIMER_CLOCK1 (0 << 0) +#define ATMEL_TC_TIMER_CLOCK2 (1 << 0) +#define ATMEL_TC_TIMER_CLOCK3 (2 << 0) +#define ATMEL_TC_TIMER_CLOCK4 (3 << 0) +#define ATMEL_TC_TIMER_CLOCK5 (4 << 0) +#define ATMEL_TC_XC0 (5 << 0) +#define ATMEL_TC_XC1 (6 << 0) +#define ATMEL_TC_XC2 (7 << 0) +#define ATMEL_TC_CLKI (1 << 3) /* clock invert */ +#define ATMEL_TC_BURST (3 << 4) /* clock gating */ +#define ATMEL_TC_GATE_NONE (0 << 4) +#define ATMEL_TC_GATE_XC0 (1 << 4) +#define ATMEL_TC_GATE_XC1 (2 << 4) +#define ATMEL_TC_GATE_XC2 (3 << 4) +#define ATMEL_TC_WAVE (1 << 15) /* true = Waveform mode */ + +/* CAPTURE mode CMR bits */ +#define ATMEL_TC_LDBSTOP (1 << 6) /* counter stops on RB load */ +#define ATMEL_TC_LDBDIS (1 << 7) /* counter disable on RB load */ +#define ATMEL_TC_ETRGEDG (3 << 8) /* external trigger edge */ +#define ATMEL_TC_ETRGEDG_NONE (0 << 8) +#define ATMEL_TC_ETRGEDG_RISING (1 << 8) +#define ATMEL_TC_ETRGEDG_FALLING (2 << 8) +#define ATMEL_TC_ETRGEDG_BOTH (3 << 8) +#define ATMEL_TC_ABETRG (1 << 10) /* external trigger is TIOA? */ +#define ATMEL_TC_CPCTRG (1 << 14) /* RC compare trigger enable */ +#define ATMEL_TC_LDRA (3 << 16) /* RA loading edge (of TIOA) */ +#define ATMEL_TC_LDRA_NONE (0 << 16) +#define ATMEL_TC_LDRA_RISING (1 << 16) +#define ATMEL_TC_LDRA_FALLING (2 << 16) +#define ATMEL_TC_LDRA_BOTH (3 << 16) +#define ATMEL_TC_LDRB (3 << 18) /* RB loading edge (of TIOA) */ +#define ATMEL_TC_LDRB_NONE (0 << 18) +#define ATMEL_TC_LDRB_RISING (1 << 18) +#define ATMEL_TC_LDRB_FALLING (2 << 18) +#define ATMEL_TC_LDRB_BOTH (3 << 18) + +/* WAVEFORM mode CMR bits */ +#define ATMEL_TC_CPCSTOP (1 << 6) /* RC compare stops counter */ +#define ATMEL_TC_CPCDIS (1 << 7) /* RC compare disables counter */ +#define ATMEL_TC_EEVTEDG (3 << 8) /* external event edge */ +#define ATMEL_TC_EEVTEDG_NONE (0 << 8) +#define ATMEL_TC_EEVTEDG_RISING (1 << 8) +#define ATMEL_TC_EEVTEDG_FALLING (2 << 8) +#define ATMEL_TC_EEVTEDG_BOTH (3 << 8) +#define ATMEL_TC_EEVT (3 << 10) /* external event source */ +#define ATMEL_TC_EEVT_TIOB (0 << 10) +#define ATMEL_TC_EEVT_XC0 (1 << 10) +#define ATMEL_TC_EEVT_XC1 (2 << 10) +#define ATMEL_TC_EEVT_XC2 (3 << 10) +#define ATMEL_TC_ENETRG (1 << 12) /* external event is trigger */ +#define ATMEL_TC_WAVESEL (3 << 13) /* waveform type */ +#define ATMEL_TC_WAVESEL_UP (0 << 13) +#define ATMEL_TC_WAVESEL_UPDOWN (1 << 13) +#define ATMEL_TC_WAVESEL_UP_AUTO (2 << 13) +#define ATMEL_TC_WAVESEL_UPDOWN_AUTO (3 << 13) +#define ATMEL_TC_ACPA (3 << 16) /* RA compare changes TIOA */ +#define ATMEL_TC_ACPA_NONE (0 << 16) +#define ATMEL_TC_ACPA_SET (1 << 16) +#define ATMEL_TC_ACPA_CLEAR (2 << 16) +#define ATMEL_TC_ACPA_TOGGLE (3 << 16) +#define ATMEL_TC_ACPC (3 << 18) /* RC compare changes TIOA */ +#define ATMEL_TC_ACPC_NONE (0 << 18) +#define ATMEL_TC_ACPC_SET (1 << 18) +#define ATMEL_TC_ACPC_CLEAR (2 << 18) +#define ATMEL_TC_ACPC_TOGGLE (3 << 18) +#define ATMEL_TC_AEEVT (3 << 20) /* external event changes TIOA */ +#define ATMEL_TC_AEEVT_NONE (0 << 20) +#define ATMEL_TC_AEEVT_SET (1 << 20) +#define ATMEL_TC_AEEVT_CLEAR (2 << 20) +#define ATMEL_TC_AEEVT_TOGGLE (3 << 20) +#define ATMEL_TC_ASWTRG (3 << 22) /* software trigger changes TIOA */ +#define ATMEL_TC_ASWTRG_NONE (0 << 22) +#define ATMEL_TC_ASWTRG_SET (1 << 22) +#define ATMEL_TC_ASWTRG_CLEAR (2 << 22) +#define ATMEL_TC_ASWTRG_TOGGLE (3 << 22) +#define ATMEL_TC_BCPB (3 << 24) /* RB compare changes TIOB */ +#define ATMEL_TC_BCPB_NONE (0 << 24) +#define ATMEL_TC_BCPB_SET (1 << 24) +#define ATMEL_TC_BCPB_CLEAR (2 << 24) +#define ATMEL_TC_BCPB_TOGGLE (3 << 24) +#define ATMEL_TC_BCPC (3 << 26) /* RC compare changes TIOB */ +#define ATMEL_TC_BCPC_NONE (0 << 26) +#define ATMEL_TC_BCPC_SET (1 << 26) +#define ATMEL_TC_BCPC_CLEAR (2 << 26) +#define ATMEL_TC_BCPC_TOGGLE (3 << 26) +#define ATMEL_TC_BEEVT (3 << 28) /* external event changes TIOB */ +#define ATMEL_TC_BEEVT_NONE (0 << 28) +#define ATMEL_TC_BEEVT_SET (1 << 28) +#define ATMEL_TC_BEEVT_CLEAR (2 << 28) +#define ATMEL_TC_BEEVT_TOGGLE (3 << 28) +#define ATMEL_TC_BSWTRG (3 << 30) /* software trigger changes TIOB */ +#define ATMEL_TC_BSWTRG_NONE (0 << 30) +#define ATMEL_TC_BSWTRG_SET (1 << 30) +#define ATMEL_TC_BSWTRG_CLEAR (2 << 30) +#define ATMEL_TC_BSWTRG_TOGGLE (3 << 30) + +#define ATMEL_TC_CV 0x10 /* counter Value */ +#define ATMEL_TC_RA 0x14 /* register A */ +#define ATMEL_TC_RB 0x18 /* register B */ +#define ATMEL_TC_RC 0x1c /* register C */ + +#define ATMEL_TC_SR 0x20 /* status (read-only) */ +/* Status-only flags */ +#define ATMEL_TC_CLKSTA (1 << 16) /* clock enabled */ +#define ATMEL_TC_MTIOA (1 << 17) /* TIOA mirror */ +#define ATMEL_TC_MTIOB (1 << 18) /* TIOB mirror */ + +#define ATMEL_TC_IER 0x24 /* interrupt enable (write-only) */ +#define ATMEL_TC_IDR 0x28 /* interrupt disable (write-only) */ +#define ATMEL_TC_IMR 0x2c /* interrupt mask (read-only) */ + +/* Status and IRQ flags */ +#define ATMEL_TC_COVFS (1 << 0) /* counter overflow */ +#define ATMEL_TC_LOVRS (1 << 1) /* load overrun */ +#define ATMEL_TC_CPAS (1 << 2) /* RA compare */ +#define ATMEL_TC_CPBS (1 << 3) /* RB compare */ +#define ATMEL_TC_CPCS (1 << 4) /* RC compare */ +#define ATMEL_TC_LDRAS (1 << 5) /* RA loading */ +#define ATMEL_TC_LDRBS (1 << 6) /* RB loading */ +#define ATMEL_TC_ETRGS (1 << 7) /* external trigger */ +#define ATMEL_TC_ALL_IRQ (ATMEL_TC_COVFS | ATMEL_TC_LOVRS | \ + ATMEL_TC_CPAS | ATMEL_TC_CPBS | \ + ATMEL_TC_CPCS | ATMEL_TC_LDRAS | \ + ATMEL_TC_LDRBS | ATMEL_TC_ETRGS) \ + /* all IRQs */ + +#endif diff --git a/include/linux/atomic.h b/include/linux/atomic.h new file mode 100644 index 000000000..1e8e88bda --- /dev/null +++ b/include/linux/atomic.h @@ -0,0 +1,1317 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Atomic operations usable in machine independent code */ +#ifndef _LINUX_ATOMIC_H +#define _LINUX_ATOMIC_H +#include + +#include +#include + +/* + * Relaxed variants of xchg, cmpxchg and some atomic operations. + * + * We support four variants: + * + * - Fully ordered: The default implementation, no suffix required. + * - Acquire: Provides ACQUIRE semantics, _acquire suffix. + * - Release: Provides RELEASE semantics, _release suffix. + * - Relaxed: No ordering guarantees, _relaxed suffix. + * + * For compound atomics performing both a load and a store, ACQUIRE + * semantics apply only to the load and RELEASE semantics only to the + * store portion of the operation. Note that a failed cmpxchg_acquire + * does -not- imply any memory ordering constraints. + * + * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions. + */ + +#ifndef atomic_read_acquire +#define atomic_read_acquire(v) smp_load_acquire(&(v)->counter) +#endif + +#ifndef atomic_set_release +#define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i)) +#endif + +/* + * The idea here is to build acquire/release variants by adding explicit + * barriers on top of the relaxed variant. In the case where the relaxed + * variant is already fully ordered, no additional barriers are needed. + * + * If an architecture overrides __atomic_acquire_fence() it will probably + * want to define smp_mb__after_spinlock(). + */ +#ifndef __atomic_acquire_fence +#define __atomic_acquire_fence smp_mb__after_atomic +#endif + +#ifndef __atomic_release_fence +#define __atomic_release_fence smp_mb__before_atomic +#endif + +#ifndef __atomic_pre_full_fence +#define __atomic_pre_full_fence smp_mb__before_atomic +#endif + +#ifndef __atomic_post_full_fence +#define __atomic_post_full_fence smp_mb__after_atomic +#endif + +#define __atomic_op_acquire(op, args...) \ +({ \ + typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \ + __atomic_acquire_fence(); \ + __ret; \ +}) + +#define __atomic_op_release(op, args...) \ +({ \ + __atomic_release_fence(); \ + op##_relaxed(args); \ +}) + +#define __atomic_op_fence(op, args...) \ +({ \ + typeof(op##_relaxed(args)) __ret; \ + __atomic_pre_full_fence(); \ + __ret = op##_relaxed(args); \ + __atomic_post_full_fence(); \ + __ret; \ +}) + +/* atomic_add_return_relaxed */ +#ifndef atomic_add_return_relaxed +#define atomic_add_return_relaxed atomic_add_return +#define atomic_add_return_acquire atomic_add_return +#define atomic_add_return_release atomic_add_return + +#else /* atomic_add_return_relaxed */ + +#ifndef atomic_add_return_acquire +#define atomic_add_return_acquire(...) \ + __atomic_op_acquire(atomic_add_return, __VA_ARGS__) +#endif + +#ifndef atomic_add_return_release +#define atomic_add_return_release(...) \ + __atomic_op_release(atomic_add_return, __VA_ARGS__) +#endif + +#ifndef atomic_add_return +#define atomic_add_return(...) \ + __atomic_op_fence(atomic_add_return, __VA_ARGS__) +#endif +#endif /* atomic_add_return_relaxed */ + +#ifndef atomic_inc +#define atomic_inc(v) atomic_add(1, (v)) +#endif + +/* atomic_inc_return_relaxed */ +#ifndef atomic_inc_return_relaxed + +#ifndef atomic_inc_return +#define atomic_inc_return(v) atomic_add_return(1, (v)) +#define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v)) +#define atomic_inc_return_acquire(v) atomic_add_return_acquire(1, (v)) +#define atomic_inc_return_release(v) atomic_add_return_release(1, (v)) +#else /* atomic_inc_return */ +#define atomic_inc_return_relaxed atomic_inc_return +#define atomic_inc_return_acquire atomic_inc_return +#define atomic_inc_return_release atomic_inc_return +#endif /* atomic_inc_return */ + +#else /* atomic_inc_return_relaxed */ + +#ifndef atomic_inc_return_acquire +#define atomic_inc_return_acquire(...) \ + __atomic_op_acquire(atomic_inc_return, __VA_ARGS__) +#endif + +#ifndef atomic_inc_return_release +#define atomic_inc_return_release(...) \ + __atomic_op_release(atomic_inc_return, __VA_ARGS__) +#endif + +#ifndef atomic_inc_return +#define atomic_inc_return(...) \ + __atomic_op_fence(atomic_inc_return, __VA_ARGS__) +#endif +#endif /* atomic_inc_return_relaxed */ + +/* atomic_sub_return_relaxed */ +#ifndef atomic_sub_return_relaxed +#define atomic_sub_return_relaxed atomic_sub_return +#define atomic_sub_return_acquire atomic_sub_return +#define atomic_sub_return_release atomic_sub_return + +#else /* atomic_sub_return_relaxed */ + +#ifndef atomic_sub_return_acquire +#define atomic_sub_return_acquire(...) \ + __atomic_op_acquire(atomic_sub_return, __VA_ARGS__) +#endif + +#ifndef atomic_sub_return_release +#define atomic_sub_return_release(...) \ + __atomic_op_release(atomic_sub_return, __VA_ARGS__) +#endif + +#ifndef atomic_sub_return +#define atomic_sub_return(...) \ + __atomic_op_fence(atomic_sub_return, __VA_ARGS__) +#endif +#endif /* atomic_sub_return_relaxed */ + +#ifndef atomic_dec +#define atomic_dec(v) atomic_sub(1, (v)) +#endif + +/* atomic_dec_return_relaxed */ +#ifndef atomic_dec_return_relaxed + +#ifndef atomic_dec_return +#define atomic_dec_return(v) atomic_sub_return(1, (v)) +#define atomic_dec_return_relaxed(v) atomic_sub_return_relaxed(1, (v)) +#define atomic_dec_return_acquire(v) atomic_sub_return_acquire(1, (v)) +#define atomic_dec_return_release(v) atomic_sub_return_release(1, (v)) +#else /* atomic_dec_return */ +#define atomic_dec_return_relaxed atomic_dec_return +#define atomic_dec_return_acquire atomic_dec_return +#define atomic_dec_return_release atomic_dec_return +#endif /* atomic_dec_return */ + +#else /* atomic_dec_return_relaxed */ + +#ifndef atomic_dec_return_acquire +#define atomic_dec_return_acquire(...) \ + __atomic_op_acquire(atomic_dec_return, __VA_ARGS__) +#endif + +#ifndef atomic_dec_return_release +#define atomic_dec_return_release(...) \ + __atomic_op_release(atomic_dec_return, __VA_ARGS__) +#endif + +#ifndef atomic_dec_return +#define atomic_dec_return(...) \ + __atomic_op_fence(atomic_dec_return, __VA_ARGS__) +#endif +#endif /* atomic_dec_return_relaxed */ + + +/* atomic_fetch_add_relaxed */ +#ifndef atomic_fetch_add_relaxed +#define atomic_fetch_add_relaxed atomic_fetch_add +#define atomic_fetch_add_acquire atomic_fetch_add +#define atomic_fetch_add_release atomic_fetch_add + +#else /* atomic_fetch_add_relaxed */ + +#ifndef atomic_fetch_add_acquire +#define atomic_fetch_add_acquire(...) \ + __atomic_op_acquire(atomic_fetch_add, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_add_release +#define atomic_fetch_add_release(...) \ + __atomic_op_release(atomic_fetch_add, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_add +#define atomic_fetch_add(...) \ + __atomic_op_fence(atomic_fetch_add, __VA_ARGS__) +#endif +#endif /* atomic_fetch_add_relaxed */ + +/* atomic_fetch_inc_relaxed */ +#ifndef atomic_fetch_inc_relaxed + +#ifndef atomic_fetch_inc +#define atomic_fetch_inc(v) atomic_fetch_add(1, (v)) +#define atomic_fetch_inc_relaxed(v) atomic_fetch_add_relaxed(1, (v)) +#define atomic_fetch_inc_acquire(v) atomic_fetch_add_acquire(1, (v)) +#define atomic_fetch_inc_release(v) atomic_fetch_add_release(1, (v)) +#else /* atomic_fetch_inc */ +#define atomic_fetch_inc_relaxed atomic_fetch_inc +#define atomic_fetch_inc_acquire atomic_fetch_inc +#define atomic_fetch_inc_release atomic_fetch_inc +#endif /* atomic_fetch_inc */ + +#else /* atomic_fetch_inc_relaxed */ + +#ifndef atomic_fetch_inc_acquire +#define atomic_fetch_inc_acquire(...) \ + __atomic_op_acquire(atomic_fetch_inc, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_inc_release +#define atomic_fetch_inc_release(...) \ + __atomic_op_release(atomic_fetch_inc, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_inc +#define atomic_fetch_inc(...) \ + __atomic_op_fence(atomic_fetch_inc, __VA_ARGS__) +#endif +#endif /* atomic_fetch_inc_relaxed */ + +/* atomic_fetch_sub_relaxed */ +#ifndef atomic_fetch_sub_relaxed +#define atomic_fetch_sub_relaxed atomic_fetch_sub +#define atomic_fetch_sub_acquire atomic_fetch_sub +#define atomic_fetch_sub_release atomic_fetch_sub + +#else /* atomic_fetch_sub_relaxed */ + +#ifndef atomic_fetch_sub_acquire +#define atomic_fetch_sub_acquire(...) \ + __atomic_op_acquire(atomic_fetch_sub, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_sub_release +#define atomic_fetch_sub_release(...) \ + __atomic_op_release(atomic_fetch_sub, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_sub +#define atomic_fetch_sub(...) \ + __atomic_op_fence(atomic_fetch_sub, __VA_ARGS__) +#endif +#endif /* atomic_fetch_sub_relaxed */ + +/* atomic_fetch_dec_relaxed */ +#ifndef atomic_fetch_dec_relaxed + +#ifndef atomic_fetch_dec +#define atomic_fetch_dec(v) atomic_fetch_sub(1, (v)) +#define atomic_fetch_dec_relaxed(v) atomic_fetch_sub_relaxed(1, (v)) +#define atomic_fetch_dec_acquire(v) atomic_fetch_sub_acquire(1, (v)) +#define atomic_fetch_dec_release(v) atomic_fetch_sub_release(1, (v)) +#else /* atomic_fetch_dec */ +#define atomic_fetch_dec_relaxed atomic_fetch_dec +#define atomic_fetch_dec_acquire atomic_fetch_dec +#define atomic_fetch_dec_release atomic_fetch_dec +#endif /* atomic_fetch_dec */ + +#else /* atomic_fetch_dec_relaxed */ + +#ifndef atomic_fetch_dec_acquire +#define atomic_fetch_dec_acquire(...) \ + __atomic_op_acquire(atomic_fetch_dec, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_dec_release +#define atomic_fetch_dec_release(...) \ + __atomic_op_release(atomic_fetch_dec, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_dec +#define atomic_fetch_dec(...) \ + __atomic_op_fence(atomic_fetch_dec, __VA_ARGS__) +#endif +#endif /* atomic_fetch_dec_relaxed */ + +/* atomic_fetch_or_relaxed */ +#ifndef atomic_fetch_or_relaxed +#define atomic_fetch_or_relaxed atomic_fetch_or +#define atomic_fetch_or_acquire atomic_fetch_or +#define atomic_fetch_or_release atomic_fetch_or + +#else /* atomic_fetch_or_relaxed */ + +#ifndef atomic_fetch_or_acquire +#define atomic_fetch_or_acquire(...) \ + __atomic_op_acquire(atomic_fetch_or, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_or_release +#define atomic_fetch_or_release(...) \ + __atomic_op_release(atomic_fetch_or, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_or +#define atomic_fetch_or(...) \ + __atomic_op_fence(atomic_fetch_or, __VA_ARGS__) +#endif +#endif /* atomic_fetch_or_relaxed */ + +/* atomic_fetch_and_relaxed */ +#ifndef atomic_fetch_and_relaxed +#define atomic_fetch_and_relaxed atomic_fetch_and +#define atomic_fetch_and_acquire atomic_fetch_and +#define atomic_fetch_and_release atomic_fetch_and + +#else /* atomic_fetch_and_relaxed */ + +#ifndef atomic_fetch_and_acquire +#define atomic_fetch_and_acquire(...) \ + __atomic_op_acquire(atomic_fetch_and, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_and_release +#define atomic_fetch_and_release(...) \ + __atomic_op_release(atomic_fetch_and, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_and +#define atomic_fetch_and(...) \ + __atomic_op_fence(atomic_fetch_and, __VA_ARGS__) +#endif +#endif /* atomic_fetch_and_relaxed */ + +#ifndef atomic_andnot +#define atomic_andnot(i, v) atomic_and(~(int)(i), (v)) +#endif + +#ifndef atomic_fetch_andnot_relaxed + +#ifndef atomic_fetch_andnot +#define atomic_fetch_andnot(i, v) atomic_fetch_and(~(int)(i), (v)) +#define atomic_fetch_andnot_relaxed(i, v) atomic_fetch_and_relaxed(~(int)(i), (v)) +#define atomic_fetch_andnot_acquire(i, v) atomic_fetch_and_acquire(~(int)(i), (v)) +#define atomic_fetch_andnot_release(i, v) atomic_fetch_and_release(~(int)(i), (v)) +#else /* atomic_fetch_andnot */ +#define atomic_fetch_andnot_relaxed atomic_fetch_andnot +#define atomic_fetch_andnot_acquire atomic_fetch_andnot +#define atomic_fetch_andnot_release atomic_fetch_andnot +#endif /* atomic_fetch_andnot */ + +#else /* atomic_fetch_andnot_relaxed */ + +#ifndef atomic_fetch_andnot_acquire +#define atomic_fetch_andnot_acquire(...) \ + __atomic_op_acquire(atomic_fetch_andnot, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_andnot_release +#define atomic_fetch_andnot_release(...) \ + __atomic_op_release(atomic_fetch_andnot, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_andnot +#define atomic_fetch_andnot(...) \ + __atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__) +#endif +#endif /* atomic_fetch_andnot_relaxed */ + +/* atomic_fetch_xor_relaxed */ +#ifndef atomic_fetch_xor_relaxed +#define atomic_fetch_xor_relaxed atomic_fetch_xor +#define atomic_fetch_xor_acquire atomic_fetch_xor +#define atomic_fetch_xor_release atomic_fetch_xor + +#else /* atomic_fetch_xor_relaxed */ + +#ifndef atomic_fetch_xor_acquire +#define atomic_fetch_xor_acquire(...) \ + __atomic_op_acquire(atomic_fetch_xor, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_xor_release +#define atomic_fetch_xor_release(...) \ + __atomic_op_release(atomic_fetch_xor, __VA_ARGS__) +#endif + +#ifndef atomic_fetch_xor +#define atomic_fetch_xor(...) \ + __atomic_op_fence(atomic_fetch_xor, __VA_ARGS__) +#endif +#endif /* atomic_fetch_xor_relaxed */ + + +/* atomic_xchg_relaxed */ +#ifndef atomic_xchg_relaxed +#define atomic_xchg_relaxed atomic_xchg +#define atomic_xchg_acquire atomic_xchg +#define atomic_xchg_release atomic_xchg + +#else /* atomic_xchg_relaxed */ + +#ifndef atomic_xchg_acquire +#define atomic_xchg_acquire(...) \ + __atomic_op_acquire(atomic_xchg, __VA_ARGS__) +#endif + +#ifndef atomic_xchg_release +#define atomic_xchg_release(...) \ + __atomic_op_release(atomic_xchg, __VA_ARGS__) +#endif + +#ifndef atomic_xchg +#define atomic_xchg(...) \ + __atomic_op_fence(atomic_xchg, __VA_ARGS__) +#endif +#endif /* atomic_xchg_relaxed */ + +/* atomic_cmpxchg_relaxed */ +#ifndef atomic_cmpxchg_relaxed +#define atomic_cmpxchg_relaxed atomic_cmpxchg +#define atomic_cmpxchg_acquire atomic_cmpxchg +#define atomic_cmpxchg_release atomic_cmpxchg + +#else /* atomic_cmpxchg_relaxed */ + +#ifndef atomic_cmpxchg_acquire +#define atomic_cmpxchg_acquire(...) \ + __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__) +#endif + +#ifndef atomic_cmpxchg_release +#define atomic_cmpxchg_release(...) \ + __atomic_op_release(atomic_cmpxchg, __VA_ARGS__) +#endif + +#ifndef atomic_cmpxchg +#define atomic_cmpxchg(...) \ + __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__) +#endif +#endif /* atomic_cmpxchg_relaxed */ + +#ifndef atomic_try_cmpxchg + +#define __atomic_try_cmpxchg(type, _p, _po, _n) \ +({ \ + typeof(_po) __po = (_po); \ + typeof(*(_po)) __r, __o = *__po; \ + __r = atomic_cmpxchg##type((_p), __o, (_n)); \ + if (unlikely(__r != __o)) \ + *__po = __r; \ + likely(__r == __o); \ +}) + +#define atomic_try_cmpxchg(_p, _po, _n) __atomic_try_cmpxchg(, _p, _po, _n) +#define atomic_try_cmpxchg_relaxed(_p, _po, _n) __atomic_try_cmpxchg(_relaxed, _p, _po, _n) +#define atomic_try_cmpxchg_acquire(_p, _po, _n) __atomic_try_cmpxchg(_acquire, _p, _po, _n) +#define atomic_try_cmpxchg_release(_p, _po, _n) __atomic_try_cmpxchg(_release, _p, _po, _n) + +#else /* atomic_try_cmpxchg */ +#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg +#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg +#define atomic_try_cmpxchg_release atomic_try_cmpxchg +#endif /* atomic_try_cmpxchg */ + +/* cmpxchg_relaxed */ +#ifndef cmpxchg_relaxed +#define cmpxchg_relaxed cmpxchg +#define cmpxchg_acquire cmpxchg +#define cmpxchg_release cmpxchg + +#else /* cmpxchg_relaxed */ + +#ifndef cmpxchg_acquire +#define cmpxchg_acquire(...) \ + __atomic_op_acquire(cmpxchg, __VA_ARGS__) +#endif + +#ifndef cmpxchg_release +#define cmpxchg_release(...) \ + __atomic_op_release(cmpxchg, __VA_ARGS__) +#endif + +#ifndef cmpxchg +#define cmpxchg(...) \ + __atomic_op_fence(cmpxchg, __VA_ARGS__) +#endif +#endif /* cmpxchg_relaxed */ + +/* cmpxchg64_relaxed */ +#ifndef cmpxchg64_relaxed +#define cmpxchg64_relaxed cmpxchg64 +#define cmpxchg64_acquire cmpxchg64 +#define cmpxchg64_release cmpxchg64 + +#else /* cmpxchg64_relaxed */ + +#ifndef cmpxchg64_acquire +#define cmpxchg64_acquire(...) \ + __atomic_op_acquire(cmpxchg64, __VA_ARGS__) +#endif + +#ifndef cmpxchg64_release +#define cmpxchg64_release(...) \ + __atomic_op_release(cmpxchg64, __VA_ARGS__) +#endif + +#ifndef cmpxchg64 +#define cmpxchg64(...) \ + __atomic_op_fence(cmpxchg64, __VA_ARGS__) +#endif +#endif /* cmpxchg64_relaxed */ + +/* xchg_relaxed */ +#ifndef xchg_relaxed +#define xchg_relaxed xchg +#define xchg_acquire xchg +#define xchg_release xchg + +#else /* xchg_relaxed */ + +#ifndef xchg_acquire +#define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__) +#endif + +#ifndef xchg_release +#define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__) +#endif + +#ifndef xchg +#define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__) +#endif +#endif /* xchg_relaxed */ + +/** + * atomic_fetch_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, if @v was not already @u. + * Returns the original value of @v. + */ +#ifndef atomic_fetch_add_unless +static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + int c = atomic_read(v); + + do { + if (unlikely(c == u)) + break; + } while (!atomic_try_cmpxchg(v, &c, c + a)); + + return c; +} +#endif + +/** + * atomic_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, if @v was not already @u. + * Returns true if the addition was done. + */ +static inline bool atomic_add_unless(atomic_t *v, int a, int u) +{ + return atomic_fetch_add_unless(v, a, u) != u; +} + +/** + * atomic_inc_not_zero - increment unless the number is zero + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1, if @v is non-zero. + * Returns true if the increment was done. + */ +#ifndef atomic_inc_not_zero +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) +#endif + +/** + * atomic_inc_and_test - increment and test + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +#ifndef atomic_inc_and_test +static inline bool atomic_inc_and_test(atomic_t *v) +{ + return atomic_inc_return(v) == 0; +} +#endif + +/** + * atomic_dec_and_test - decrement and test + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +#ifndef atomic_dec_and_test +static inline bool atomic_dec_and_test(atomic_t *v) +{ + return atomic_dec_return(v) == 0; +} +#endif + +/** + * atomic_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +#ifndef atomic_sub_and_test +static inline bool atomic_sub_and_test(int i, atomic_t *v) +{ + return atomic_sub_return(i, v) == 0; +} +#endif + +/** + * atomic_add_negative - add and test if negative + * @i: integer value to add + * @v: pointer of type atomic_t + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +#ifndef atomic_add_negative +static inline bool atomic_add_negative(int i, atomic_t *v) +{ + return atomic_add_return(i, v) < 0; +} +#endif + +#ifndef atomic_inc_unless_negative +static inline bool atomic_inc_unless_negative(atomic_t *v) +{ + int c = atomic_read(v); + + do { + if (unlikely(c < 0)) + return false; + } while (!atomic_try_cmpxchg(v, &c, c + 1)); + + return true; +} +#endif + +#ifndef atomic_dec_unless_positive +static inline bool atomic_dec_unless_positive(atomic_t *v) +{ + int c = atomic_read(v); + + do { + if (unlikely(c > 0)) + return false; + } while (!atomic_try_cmpxchg(v, &c, c - 1)); + + return true; +} +#endif + +/* + * atomic_dec_if_positive - decrement by 1 if old value positive + * @v: pointer of type atomic_t + * + * The function returns the old value of *v minus 1, even if + * the atomic variable, v, was not decremented. + */ +#ifndef atomic_dec_if_positive +static inline int atomic_dec_if_positive(atomic_t *v) +{ + int dec, c = atomic_read(v); + + do { + dec = c - 1; + if (unlikely(dec < 0)) + break; + } while (!atomic_try_cmpxchg(v, &c, dec)); + + return dec; +} +#endif + +#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) +#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) + +#ifdef CONFIG_GENERIC_ATOMIC64 +#include +#endif + +#ifndef atomic64_read_acquire +#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter) +#endif + +#ifndef atomic64_set_release +#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i)) +#endif + +/* atomic64_add_return_relaxed */ +#ifndef atomic64_add_return_relaxed +#define atomic64_add_return_relaxed atomic64_add_return +#define atomic64_add_return_acquire atomic64_add_return +#define atomic64_add_return_release atomic64_add_return + +#else /* atomic64_add_return_relaxed */ + +#ifndef atomic64_add_return_acquire +#define atomic64_add_return_acquire(...) \ + __atomic_op_acquire(atomic64_add_return, __VA_ARGS__) +#endif + +#ifndef atomic64_add_return_release +#define atomic64_add_return_release(...) \ + __atomic_op_release(atomic64_add_return, __VA_ARGS__) +#endif + +#ifndef atomic64_add_return +#define atomic64_add_return(...) \ + __atomic_op_fence(atomic64_add_return, __VA_ARGS__) +#endif +#endif /* atomic64_add_return_relaxed */ + +#ifndef atomic64_inc +#define atomic64_inc(v) atomic64_add(1, (v)) +#endif + +/* atomic64_inc_return_relaxed */ +#ifndef atomic64_inc_return_relaxed + +#ifndef atomic64_inc_return +#define atomic64_inc_return(v) atomic64_add_return(1, (v)) +#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v)) +#define atomic64_inc_return_acquire(v) atomic64_add_return_acquire(1, (v)) +#define atomic64_inc_return_release(v) atomic64_add_return_release(1, (v)) +#else /* atomic64_inc_return */ +#define atomic64_inc_return_relaxed atomic64_inc_return +#define atomic64_inc_return_acquire atomic64_inc_return +#define atomic64_inc_return_release atomic64_inc_return +#endif /* atomic64_inc_return */ + +#else /* atomic64_inc_return_relaxed */ + +#ifndef atomic64_inc_return_acquire +#define atomic64_inc_return_acquire(...) \ + __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__) +#endif + +#ifndef atomic64_inc_return_release +#define atomic64_inc_return_release(...) \ + __atomic_op_release(atomic64_inc_return, __VA_ARGS__) +#endif + +#ifndef atomic64_inc_return +#define atomic64_inc_return(...) \ + __atomic_op_fence(atomic64_inc_return, __VA_ARGS__) +#endif +#endif /* atomic64_inc_return_relaxed */ + + +/* atomic64_sub_return_relaxed */ +#ifndef atomic64_sub_return_relaxed +#define atomic64_sub_return_relaxed atomic64_sub_return +#define atomic64_sub_return_acquire atomic64_sub_return +#define atomic64_sub_return_release atomic64_sub_return + +#else /* atomic64_sub_return_relaxed */ + +#ifndef atomic64_sub_return_acquire +#define atomic64_sub_return_acquire(...) \ + __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__) +#endif + +#ifndef atomic64_sub_return_release +#define atomic64_sub_return_release(...) \ + __atomic_op_release(atomic64_sub_return, __VA_ARGS__) +#endif + +#ifndef atomic64_sub_return +#define atomic64_sub_return(...) \ + __atomic_op_fence(atomic64_sub_return, __VA_ARGS__) +#endif +#endif /* atomic64_sub_return_relaxed */ + +#ifndef atomic64_dec +#define atomic64_dec(v) atomic64_sub(1, (v)) +#endif + +/* atomic64_dec_return_relaxed */ +#ifndef atomic64_dec_return_relaxed + +#ifndef atomic64_dec_return +#define atomic64_dec_return(v) atomic64_sub_return(1, (v)) +#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v)) +#define atomic64_dec_return_acquire(v) atomic64_sub_return_acquire(1, (v)) +#define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v)) +#else /* atomic64_dec_return */ +#define atomic64_dec_return_relaxed atomic64_dec_return +#define atomic64_dec_return_acquire atomic64_dec_return +#define atomic64_dec_return_release atomic64_dec_return +#endif /* atomic64_dec_return */ + +#else /* atomic64_dec_return_relaxed */ + +#ifndef atomic64_dec_return_acquire +#define atomic64_dec_return_acquire(...) \ + __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__) +#endif + +#ifndef atomic64_dec_return_release +#define atomic64_dec_return_release(...) \ + __atomic_op_release(atomic64_dec_return, __VA_ARGS__) +#endif + +#ifndef atomic64_dec_return +#define atomic64_dec_return(...) \ + __atomic_op_fence(atomic64_dec_return, __VA_ARGS__) +#endif +#endif /* atomic64_dec_return_relaxed */ + + +/* atomic64_fetch_add_relaxed */ +#ifndef atomic64_fetch_add_relaxed +#define atomic64_fetch_add_relaxed atomic64_fetch_add +#define atomic64_fetch_add_acquire atomic64_fetch_add +#define atomic64_fetch_add_release atomic64_fetch_add + +#else /* atomic64_fetch_add_relaxed */ + +#ifndef atomic64_fetch_add_acquire +#define atomic64_fetch_add_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_add, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_add_release +#define atomic64_fetch_add_release(...) \ + __atomic_op_release(atomic64_fetch_add, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_add +#define atomic64_fetch_add(...) \ + __atomic_op_fence(atomic64_fetch_add, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_add_relaxed */ + +/* atomic64_fetch_inc_relaxed */ +#ifndef atomic64_fetch_inc_relaxed + +#ifndef atomic64_fetch_inc +#define atomic64_fetch_inc(v) atomic64_fetch_add(1, (v)) +#define atomic64_fetch_inc_relaxed(v) atomic64_fetch_add_relaxed(1, (v)) +#define atomic64_fetch_inc_acquire(v) atomic64_fetch_add_acquire(1, (v)) +#define atomic64_fetch_inc_release(v) atomic64_fetch_add_release(1, (v)) +#else /* atomic64_fetch_inc */ +#define atomic64_fetch_inc_relaxed atomic64_fetch_inc +#define atomic64_fetch_inc_acquire atomic64_fetch_inc +#define atomic64_fetch_inc_release atomic64_fetch_inc +#endif /* atomic64_fetch_inc */ + +#else /* atomic64_fetch_inc_relaxed */ + +#ifndef atomic64_fetch_inc_acquire +#define atomic64_fetch_inc_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_inc, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_inc_release +#define atomic64_fetch_inc_release(...) \ + __atomic_op_release(atomic64_fetch_inc, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_inc +#define atomic64_fetch_inc(...) \ + __atomic_op_fence(atomic64_fetch_inc, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_inc_relaxed */ + +/* atomic64_fetch_sub_relaxed */ +#ifndef atomic64_fetch_sub_relaxed +#define atomic64_fetch_sub_relaxed atomic64_fetch_sub +#define atomic64_fetch_sub_acquire atomic64_fetch_sub +#define atomic64_fetch_sub_release atomic64_fetch_sub + +#else /* atomic64_fetch_sub_relaxed */ + +#ifndef atomic64_fetch_sub_acquire +#define atomic64_fetch_sub_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_sub, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_sub_release +#define atomic64_fetch_sub_release(...) \ + __atomic_op_release(atomic64_fetch_sub, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_sub +#define atomic64_fetch_sub(...) \ + __atomic_op_fence(atomic64_fetch_sub, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_sub_relaxed */ + +/* atomic64_fetch_dec_relaxed */ +#ifndef atomic64_fetch_dec_relaxed + +#ifndef atomic64_fetch_dec +#define atomic64_fetch_dec(v) atomic64_fetch_sub(1, (v)) +#define atomic64_fetch_dec_relaxed(v) atomic64_fetch_sub_relaxed(1, (v)) +#define atomic64_fetch_dec_acquire(v) atomic64_fetch_sub_acquire(1, (v)) +#define atomic64_fetch_dec_release(v) atomic64_fetch_sub_release(1, (v)) +#else /* atomic64_fetch_dec */ +#define atomic64_fetch_dec_relaxed atomic64_fetch_dec +#define atomic64_fetch_dec_acquire atomic64_fetch_dec +#define atomic64_fetch_dec_release atomic64_fetch_dec +#endif /* atomic64_fetch_dec */ + +#else /* atomic64_fetch_dec_relaxed */ + +#ifndef atomic64_fetch_dec_acquire +#define atomic64_fetch_dec_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_dec, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_dec_release +#define atomic64_fetch_dec_release(...) \ + __atomic_op_release(atomic64_fetch_dec, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_dec +#define atomic64_fetch_dec(...) \ + __atomic_op_fence(atomic64_fetch_dec, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_dec_relaxed */ + +/* atomic64_fetch_or_relaxed */ +#ifndef atomic64_fetch_or_relaxed +#define atomic64_fetch_or_relaxed atomic64_fetch_or +#define atomic64_fetch_or_acquire atomic64_fetch_or +#define atomic64_fetch_or_release atomic64_fetch_or + +#else /* atomic64_fetch_or_relaxed */ + +#ifndef atomic64_fetch_or_acquire +#define atomic64_fetch_or_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_or, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_or_release +#define atomic64_fetch_or_release(...) \ + __atomic_op_release(atomic64_fetch_or, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_or +#define atomic64_fetch_or(...) \ + __atomic_op_fence(atomic64_fetch_or, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_or_relaxed */ + +/* atomic64_fetch_and_relaxed */ +#ifndef atomic64_fetch_and_relaxed +#define atomic64_fetch_and_relaxed atomic64_fetch_and +#define atomic64_fetch_and_acquire atomic64_fetch_and +#define atomic64_fetch_and_release atomic64_fetch_and + +#else /* atomic64_fetch_and_relaxed */ + +#ifndef atomic64_fetch_and_acquire +#define atomic64_fetch_and_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_and, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_and_release +#define atomic64_fetch_and_release(...) \ + __atomic_op_release(atomic64_fetch_and, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_and +#define atomic64_fetch_and(...) \ + __atomic_op_fence(atomic64_fetch_and, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_and_relaxed */ + +#ifndef atomic64_andnot +#define atomic64_andnot(i, v) atomic64_and(~(long long)(i), (v)) +#endif + +#ifndef atomic64_fetch_andnot_relaxed + +#ifndef atomic64_fetch_andnot +#define atomic64_fetch_andnot(i, v) atomic64_fetch_and(~(long long)(i), (v)) +#define atomic64_fetch_andnot_relaxed(i, v) atomic64_fetch_and_relaxed(~(long long)(i), (v)) +#define atomic64_fetch_andnot_acquire(i, v) atomic64_fetch_and_acquire(~(long long)(i), (v)) +#define atomic64_fetch_andnot_release(i, v) atomic64_fetch_and_release(~(long long)(i), (v)) +#else /* atomic64_fetch_andnot */ +#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot +#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot +#define atomic64_fetch_andnot_release atomic64_fetch_andnot +#endif /* atomic64_fetch_andnot */ + +#else /* atomic64_fetch_andnot_relaxed */ + +#ifndef atomic64_fetch_andnot_acquire +#define atomic64_fetch_andnot_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_andnot, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_andnot_release +#define atomic64_fetch_andnot_release(...) \ + __atomic_op_release(atomic64_fetch_andnot, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_andnot +#define atomic64_fetch_andnot(...) \ + __atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_andnot_relaxed */ + +/* atomic64_fetch_xor_relaxed */ +#ifndef atomic64_fetch_xor_relaxed +#define atomic64_fetch_xor_relaxed atomic64_fetch_xor +#define atomic64_fetch_xor_acquire atomic64_fetch_xor +#define atomic64_fetch_xor_release atomic64_fetch_xor + +#else /* atomic64_fetch_xor_relaxed */ + +#ifndef atomic64_fetch_xor_acquire +#define atomic64_fetch_xor_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_xor, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_xor_release +#define atomic64_fetch_xor_release(...) \ + __atomic_op_release(atomic64_fetch_xor, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_xor +#define atomic64_fetch_xor(...) \ + __atomic_op_fence(atomic64_fetch_xor, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_xor_relaxed */ + + +/* atomic64_xchg_relaxed */ +#ifndef atomic64_xchg_relaxed +#define atomic64_xchg_relaxed atomic64_xchg +#define atomic64_xchg_acquire atomic64_xchg +#define atomic64_xchg_release atomic64_xchg + +#else /* atomic64_xchg_relaxed */ + +#ifndef atomic64_xchg_acquire +#define atomic64_xchg_acquire(...) \ + __atomic_op_acquire(atomic64_xchg, __VA_ARGS__) +#endif + +#ifndef atomic64_xchg_release +#define atomic64_xchg_release(...) \ + __atomic_op_release(atomic64_xchg, __VA_ARGS__) +#endif + +#ifndef atomic64_xchg +#define atomic64_xchg(...) \ + __atomic_op_fence(atomic64_xchg, __VA_ARGS__) +#endif +#endif /* atomic64_xchg_relaxed */ + +/* atomic64_cmpxchg_relaxed */ +#ifndef atomic64_cmpxchg_relaxed +#define atomic64_cmpxchg_relaxed atomic64_cmpxchg +#define atomic64_cmpxchg_acquire atomic64_cmpxchg +#define atomic64_cmpxchg_release atomic64_cmpxchg + +#else /* atomic64_cmpxchg_relaxed */ + +#ifndef atomic64_cmpxchg_acquire +#define atomic64_cmpxchg_acquire(...) \ + __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__) +#endif + +#ifndef atomic64_cmpxchg_release +#define atomic64_cmpxchg_release(...) \ + __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__) +#endif + +#ifndef atomic64_cmpxchg +#define atomic64_cmpxchg(...) \ + __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__) +#endif +#endif /* atomic64_cmpxchg_relaxed */ + +#ifndef atomic64_try_cmpxchg + +#define __atomic64_try_cmpxchg(type, _p, _po, _n) \ +({ \ + typeof(_po) __po = (_po); \ + typeof(*(_po)) __r, __o = *__po; \ + __r = atomic64_cmpxchg##type((_p), __o, (_n)); \ + if (unlikely(__r != __o)) \ + *__po = __r; \ + likely(__r == __o); \ +}) + +#define atomic64_try_cmpxchg(_p, _po, _n) __atomic64_try_cmpxchg(, _p, _po, _n) +#define atomic64_try_cmpxchg_relaxed(_p, _po, _n) __atomic64_try_cmpxchg(_relaxed, _p, _po, _n) +#define atomic64_try_cmpxchg_acquire(_p, _po, _n) __atomic64_try_cmpxchg(_acquire, _p, _po, _n) +#define atomic64_try_cmpxchg_release(_p, _po, _n) __atomic64_try_cmpxchg(_release, _p, _po, _n) + +#else /* atomic64_try_cmpxchg */ +#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg +#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg +#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg +#endif /* atomic64_try_cmpxchg */ + +/** + * atomic64_fetch_add_unless - add unless the number is already a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, if @v was not already @u. + * Returns the original value of @v. + */ +#ifndef atomic64_fetch_add_unless +static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a, + long long u) +{ + long long c = atomic64_read(v); + + do { + if (unlikely(c == u)) + break; + } while (!atomic64_try_cmpxchg(v, &c, c + a)); + + return c; +} +#endif + +/** + * atomic64_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, if @v was not already @u. + * Returns true if the addition was done. + */ +static inline bool atomic64_add_unless(atomic64_t *v, long long a, long long u) +{ + return atomic64_fetch_add_unless(v, a, u) != u; +} + +/** + * atomic64_inc_not_zero - increment unless the number is zero + * @v: pointer of type atomic64_t + * + * Atomically increments @v by 1, if @v is non-zero. + * Returns true if the increment was done. + */ +#ifndef atomic64_inc_not_zero +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) +#endif + +/** + * atomic64_inc_and_test - increment and test + * @v: pointer of type atomic64_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +#ifndef atomic64_inc_and_test +static inline bool atomic64_inc_and_test(atomic64_t *v) +{ + return atomic64_inc_return(v) == 0; +} +#endif + +/** + * atomic64_dec_and_test - decrement and test + * @v: pointer of type atomic64_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +#ifndef atomic64_dec_and_test +static inline bool atomic64_dec_and_test(atomic64_t *v) +{ + return atomic64_dec_return(v) == 0; +} +#endif + +/** + * atomic64_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer of type atomic64_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +#ifndef atomic64_sub_and_test +static inline bool atomic64_sub_and_test(long long i, atomic64_t *v) +{ + return atomic64_sub_return(i, v) == 0; +} +#endif + +/** + * atomic64_add_negative - add and test if negative + * @i: integer value to add + * @v: pointer of type atomic64_t + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +#ifndef atomic64_add_negative +static inline bool atomic64_add_negative(long long i, atomic64_t *v) +{ + return atomic64_add_return(i, v) < 0; +} +#endif + +#ifndef atomic64_inc_unless_negative +static inline bool atomic64_inc_unless_negative(atomic64_t *v) +{ + long long c = atomic64_read(v); + + do { + if (unlikely(c < 0)) + return false; + } while (!atomic64_try_cmpxchg(v, &c, c + 1)); + + return true; +} +#endif + +#ifndef atomic64_dec_unless_positive +static inline bool atomic64_dec_unless_positive(atomic64_t *v) +{ + long long c = atomic64_read(v); + + do { + if (unlikely(c > 0)) + return false; + } while (!atomic64_try_cmpxchg(v, &c, c - 1)); + + return true; +} +#endif + +/* + * atomic64_dec_if_positive - decrement by 1 if old value positive + * @v: pointer of type atomic64_t + * + * The function returns the old value of *v minus 1, even if + * the atomic64 variable, v, was not decremented. + */ +#ifndef atomic64_dec_if_positive +static inline long long atomic64_dec_if_positive(atomic64_t *v) +{ + long long dec, c = atomic64_read(v); + + do { + dec = c - 1; + if (unlikely(dec < 0)) + break; + } while (!atomic64_try_cmpxchg(v, &c, dec)); + + return dec; +} +#endif + +#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) +#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) + +#include + +#endif /* _LINUX_ATOMIC_H */ diff --git a/include/linux/attribute_container.h b/include/linux/attribute_container.h new file mode 100644 index 000000000..896c6892f --- /dev/null +++ b/include/linux/attribute_container.h @@ -0,0 +1,72 @@ +/* + * attribute_container.h - a generic container for all classes + * + * Copyright (c) 2005 - James Bottomley + * + * This file is licensed under GPLv2 + */ + +#ifndef _ATTRIBUTE_CONTAINER_H_ +#define _ATTRIBUTE_CONTAINER_H_ + +#include +#include + +struct device; + +struct attribute_container { + struct list_head node; + struct klist containers; + struct class *class; + const struct attribute_group *grp; + struct device_attribute **attrs; + int (*match)(struct attribute_container *, struct device *); +#define ATTRIBUTE_CONTAINER_NO_CLASSDEVS 0x01 + unsigned long flags; +}; + +static inline int +attribute_container_no_classdevs(struct attribute_container *atc) +{ + return atc->flags & ATTRIBUTE_CONTAINER_NO_CLASSDEVS; +} + +static inline void +attribute_container_set_no_classdevs(struct attribute_container *atc) +{ + atc->flags |= ATTRIBUTE_CONTAINER_NO_CLASSDEVS; +} + +int attribute_container_register(struct attribute_container *cont); +int __must_check attribute_container_unregister(struct attribute_container *cont); +void attribute_container_create_device(struct device *dev, + int (*fn)(struct attribute_container *, + struct device *, + struct device *)); +void attribute_container_add_device(struct device *dev, + int (*fn)(struct attribute_container *, + struct device *, + struct device *)); +void attribute_container_remove_device(struct device *dev, + void (*fn)(struct attribute_container *, + struct device *, + struct device *)); +void attribute_container_device_trigger(struct device *dev, + int (*fn)(struct attribute_container *, + struct device *, + struct device *)); +void attribute_container_trigger(struct device *dev, + int (*fn)(struct attribute_container *, + struct device *)); +int attribute_container_add_attrs(struct device *classdev); +int attribute_container_add_class_device(struct device *classdev); +int attribute_container_add_class_device_adapter(struct attribute_container *cont, + struct device *dev, + struct device *classdev); +void attribute_container_remove_attrs(struct device *classdev); +void attribute_container_class_device_del(struct device *classdev); +struct attribute_container *attribute_container_classdev_to_container(struct device *); +struct device *attribute_container_find_class_device(struct attribute_container *, struct device *); +struct device_attribute **attribute_container_classdev_to_attrs(const struct device *classdev); + +#endif diff --git a/include/linux/audit.h b/include/linux/audit.h new file mode 100644 index 000000000..9334fbef7 --- /dev/null +++ b/include/linux/audit.h @@ -0,0 +1,603 @@ +/* audit.h -- Auditing support + * + * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Written by Rickard E. (Rik) Faith + * + */ +#ifndef _LINUX_AUDIT_H_ +#define _LINUX_AUDIT_H_ + +#include +#include +#include + +#define AUDIT_INO_UNSET ((unsigned long)-1) +#define AUDIT_DEV_UNSET ((dev_t)-1) + +struct audit_sig_info { + uid_t uid; + pid_t pid; + char ctx[0]; +}; + +struct audit_buffer; +struct audit_context; +struct inode; +struct netlink_skb_parms; +struct path; +struct linux_binprm; +struct mq_attr; +struct mqstat; +struct audit_watch; +struct audit_tree; +struct sk_buff; + +struct audit_krule { + u32 pflags; + u32 flags; + u32 listnr; + u32 action; + u32 mask[AUDIT_BITMASK_SIZE]; + u32 buflen; /* for data alloc on list rules */ + u32 field_count; + char *filterkey; /* ties events to rules */ + struct audit_field *fields; + struct audit_field *arch_f; /* quick access to arch field */ + struct audit_field *inode_f; /* quick access to an inode field */ + struct audit_watch *watch; /* associated watch */ + struct audit_tree *tree; /* associated watched tree */ + struct audit_fsnotify_mark *exe; + struct list_head rlist; /* entry in audit_{watch,tree}.rules list */ + struct list_head list; /* for AUDIT_LIST* purposes only */ + u64 prio; +}; + +/* Flag to indicate legacy AUDIT_LOGINUID unset usage */ +#define AUDIT_LOGINUID_LEGACY 0x1 + +struct audit_field { + u32 type; + union { + u32 val; + kuid_t uid; + kgid_t gid; + struct { + char *lsm_str; + void *lsm_rule; + }; + }; + u32 op; +}; + +extern int is_audit_feature_set(int which); + +extern int __init audit_register_class(int class, unsigned *list); +extern int audit_classify_syscall(int abi, unsigned syscall); +extern int audit_classify_arch(int arch); +/* only for compat system calls */ +extern unsigned compat_write_class[]; +extern unsigned compat_read_class[]; +extern unsigned compat_dir_class[]; +extern unsigned compat_chattr_class[]; +extern unsigned compat_signal_class[]; + +extern int audit_classify_compat_syscall(int abi, unsigned syscall); + +/* audit_names->type values */ +#define AUDIT_TYPE_UNKNOWN 0 /* we don't know yet */ +#define AUDIT_TYPE_NORMAL 1 /* a "normal" audit record */ +#define AUDIT_TYPE_PARENT 2 /* a parent audit record */ +#define AUDIT_TYPE_CHILD_DELETE 3 /* a child being deleted */ +#define AUDIT_TYPE_CHILD_CREATE 4 /* a child being created */ + +/* maximized args number that audit_socketcall can process */ +#define AUDITSC_ARGS 6 + +/* bit values for ->signal->audit_tty */ +#define AUDIT_TTY_ENABLE BIT(0) +#define AUDIT_TTY_LOG_PASSWD BIT(1) + +struct filename; + +extern void audit_log_session_info(struct audit_buffer *ab); + +#define AUDIT_OFF 0 +#define AUDIT_ON 1 +#define AUDIT_LOCKED 2 +#ifdef CONFIG_AUDIT +/* These are defined in audit.c */ + /* Public API */ +extern __printf(4, 5) +void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, + const char *fmt, ...); + +extern struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type); +extern __printf(2, 3) +void audit_log_format(struct audit_buffer *ab, const char *fmt, ...); +extern void audit_log_end(struct audit_buffer *ab); +extern bool audit_string_contains_control(const char *string, + size_t len); +extern void audit_log_n_hex(struct audit_buffer *ab, + const unsigned char *buf, + size_t len); +extern void audit_log_n_string(struct audit_buffer *ab, + const char *buf, + size_t n); +extern void audit_log_n_untrustedstring(struct audit_buffer *ab, + const char *string, + size_t n); +extern void audit_log_untrustedstring(struct audit_buffer *ab, + const char *string); +extern void audit_log_d_path(struct audit_buffer *ab, + const char *prefix, + const struct path *path); +extern void audit_log_key(struct audit_buffer *ab, + char *key); +extern void audit_log_link_denied(const char *operation); +extern void audit_log_lost(const char *message); + +extern int audit_log_task_context(struct audit_buffer *ab); +extern void audit_log_task_info(struct audit_buffer *ab, + struct task_struct *tsk); + +extern int audit_update_lsm_rules(void); + + /* Private API (for audit.c only) */ +extern int audit_rule_change(int type, int seq, void *data, size_t datasz); +extern int audit_list_rules_send(struct sk_buff *request_skb, int seq); + +extern u32 audit_enabled; +#else /* CONFIG_AUDIT */ +static inline __printf(4, 5) +void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, + const char *fmt, ...) +{ } +static inline struct audit_buffer *audit_log_start(struct audit_context *ctx, + gfp_t gfp_mask, int type) +{ + return NULL; +} +static inline __printf(2, 3) +void audit_log_format(struct audit_buffer *ab, const char *fmt, ...) +{ } +static inline void audit_log_end(struct audit_buffer *ab) +{ } +static inline void audit_log_n_hex(struct audit_buffer *ab, + const unsigned char *buf, size_t len) +{ } +static inline void audit_log_n_string(struct audit_buffer *ab, + const char *buf, size_t n) +{ } +static inline void audit_log_n_untrustedstring(struct audit_buffer *ab, + const char *string, size_t n) +{ } +static inline void audit_log_untrustedstring(struct audit_buffer *ab, + const char *string) +{ } +static inline void audit_log_d_path(struct audit_buffer *ab, + const char *prefix, + const struct path *path) +{ } +static inline void audit_log_key(struct audit_buffer *ab, char *key) +{ } +static inline void audit_log_link_denied(const char *string) +{ } +static inline int audit_log_task_context(struct audit_buffer *ab) +{ + return 0; +} +static inline void audit_log_task_info(struct audit_buffer *ab, + struct task_struct *tsk) +{ } +#define audit_enabled AUDIT_OFF +#endif /* CONFIG_AUDIT */ + +#ifdef CONFIG_AUDIT_COMPAT_GENERIC +#define audit_is_compat(arch) (!((arch) & __AUDIT_ARCH_64BIT)) +#else +#define audit_is_compat(arch) false +#endif + +#ifdef CONFIG_AUDITSYSCALL +#include /* for syscall_get_arch() */ + +/* These are defined in auditsc.c */ + /* Public API */ +extern int audit_alloc(struct task_struct *task); +extern void __audit_free(struct task_struct *task); +extern void __audit_syscall_entry(int major, unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3); +extern void __audit_syscall_exit(int ret_success, long ret_value); +extern struct filename *__audit_reusename(const __user char *uptr); +extern void __audit_getname(struct filename *name); + +#define AUDIT_INODE_PARENT 1 /* dentry represents the parent */ +#define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */ +extern void __audit_inode(struct filename *name, const struct dentry *dentry, + unsigned int flags); +extern void __audit_file(const struct file *); +extern void __audit_inode_child(struct inode *parent, + const struct dentry *dentry, + const unsigned char type); +extern void audit_seccomp(unsigned long syscall, long signr, int code); +extern void audit_seccomp_actions_logged(const char *names, + const char *old_names, int res); +extern void __audit_ptrace(struct task_struct *t); + +static inline void audit_set_context(struct task_struct *task, struct audit_context *ctx) +{ + task->audit_context = ctx; +} + +static inline struct audit_context *audit_context(void) +{ + return current->audit_context; +} + +static inline bool audit_dummy_context(void) +{ + void *p = audit_context(); + return !p || *(int *)p; +} +static inline void audit_free(struct task_struct *task) +{ + if (unlikely(task->audit_context)) + __audit_free(task); +} +static inline void audit_syscall_entry(int major, unsigned long a0, + unsigned long a1, unsigned long a2, + unsigned long a3) +{ + if (unlikely(audit_context())) + __audit_syscall_entry(major, a0, a1, a2, a3); +} +static inline void audit_syscall_exit(void *pt_regs) +{ + if (unlikely(audit_context())) { + int success = is_syscall_success(pt_regs); + long return_code = regs_return_value(pt_regs); + + __audit_syscall_exit(success, return_code); + } +} +static inline struct filename *audit_reusename(const __user char *name) +{ + if (unlikely(!audit_dummy_context())) + return __audit_reusename(name); + return NULL; +} +static inline void audit_getname(struct filename *name) +{ + if (unlikely(!audit_dummy_context())) + __audit_getname(name); +} +static inline void audit_inode(struct filename *name, + const struct dentry *dentry, + unsigned int parent) { + if (unlikely(!audit_dummy_context())) { + unsigned int flags = 0; + if (parent) + flags |= AUDIT_INODE_PARENT; + __audit_inode(name, dentry, flags); + } +} +static inline void audit_file(struct file *file) +{ + if (unlikely(!audit_dummy_context())) + __audit_file(file); +} +static inline void audit_inode_parent_hidden(struct filename *name, + const struct dentry *dentry) +{ + if (unlikely(!audit_dummy_context())) + __audit_inode(name, dentry, + AUDIT_INODE_PARENT | AUDIT_INODE_HIDDEN); +} +static inline void audit_inode_child(struct inode *parent, + const struct dentry *dentry, + const unsigned char type) { + if (unlikely(!audit_dummy_context())) + __audit_inode_child(parent, dentry, type); +} +void audit_core_dumps(long signr); + +static inline void audit_ptrace(struct task_struct *t) +{ + if (unlikely(!audit_dummy_context())) + __audit_ptrace(t); +} + + /* Private API (for audit.c only) */ +extern unsigned int audit_serial(void); +extern int auditsc_get_stamp(struct audit_context *ctx, + struct timespec64 *t, unsigned int *serial); +extern int audit_set_loginuid(kuid_t loginuid); + +static inline kuid_t audit_get_loginuid(struct task_struct *tsk) +{ + return tsk->loginuid; +} + +static inline unsigned int audit_get_sessionid(struct task_struct *tsk) +{ + return tsk->sessionid; +} + +extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp); +extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode); +extern void __audit_bprm(struct linux_binprm *bprm); +extern int __audit_socketcall(int nargs, unsigned long *args); +extern int __audit_sockaddr(int len, void *addr); +extern void __audit_fd_pair(int fd1, int fd2); +extern void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr); +extern void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec64 *abs_timeout); +extern void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification); +extern void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat); +extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm, + const struct cred *new, + const struct cred *old); +extern void __audit_log_capset(const struct cred *new, const struct cred *old); +extern void __audit_mmap_fd(int fd, int flags); +extern void __audit_log_kern_module(char *name); +extern void __audit_fanotify(unsigned int response); + +static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) +{ + if (unlikely(!audit_dummy_context())) + __audit_ipc_obj(ipcp); +} +static inline void audit_fd_pair(int fd1, int fd2) +{ + if (unlikely(!audit_dummy_context())) + __audit_fd_pair(fd1, fd2); +} +static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode) +{ + if (unlikely(!audit_dummy_context())) + __audit_ipc_set_perm(qbytes, uid, gid, mode); +} +static inline void audit_bprm(struct linux_binprm *bprm) +{ + if (unlikely(!audit_dummy_context())) + __audit_bprm(bprm); +} +static inline int audit_socketcall(int nargs, unsigned long *args) +{ + if (unlikely(!audit_dummy_context())) + return __audit_socketcall(nargs, args); + return 0; +} + +static inline int audit_socketcall_compat(int nargs, u32 *args) +{ + unsigned long a[AUDITSC_ARGS]; + int i; + + if (audit_dummy_context()) + return 0; + + for (i = 0; i < nargs; i++) + a[i] = (unsigned long)args[i]; + return __audit_socketcall(nargs, a); +} + +static inline int audit_sockaddr(int len, void *addr) +{ + if (unlikely(!audit_dummy_context())) + return __audit_sockaddr(len, addr); + return 0; +} +static inline void audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr) +{ + if (unlikely(!audit_dummy_context())) + __audit_mq_open(oflag, mode, attr); +} +static inline void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec64 *abs_timeout) +{ + if (unlikely(!audit_dummy_context())) + __audit_mq_sendrecv(mqdes, msg_len, msg_prio, abs_timeout); +} +static inline void audit_mq_notify(mqd_t mqdes, const struct sigevent *notification) +{ + if (unlikely(!audit_dummy_context())) + __audit_mq_notify(mqdes, notification); +} +static inline void audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) +{ + if (unlikely(!audit_dummy_context())) + __audit_mq_getsetattr(mqdes, mqstat); +} + +static inline int audit_log_bprm_fcaps(struct linux_binprm *bprm, + const struct cred *new, + const struct cred *old) +{ + if (unlikely(!audit_dummy_context())) + return __audit_log_bprm_fcaps(bprm, new, old); + return 0; +} + +static inline void audit_log_capset(const struct cred *new, + const struct cred *old) +{ + if (unlikely(!audit_dummy_context())) + __audit_log_capset(new, old); +} + +static inline void audit_mmap_fd(int fd, int flags) +{ + if (unlikely(!audit_dummy_context())) + __audit_mmap_fd(fd, flags); +} + +static inline void audit_log_kern_module(char *name) +{ + if (!audit_dummy_context()) + __audit_log_kern_module(name); +} + +static inline void audit_fanotify(unsigned int response) +{ + if (!audit_dummy_context()) + __audit_fanotify(response); +} + +extern int audit_n_rules; +extern int audit_signals; +#else /* CONFIG_AUDITSYSCALL */ +static inline int audit_alloc(struct task_struct *task) +{ + return 0; +} +static inline void audit_free(struct task_struct *task) +{ } +static inline void audit_syscall_entry(int major, unsigned long a0, + unsigned long a1, unsigned long a2, + unsigned long a3) +{ } +static inline void audit_syscall_exit(void *pt_regs) +{ } +static inline bool audit_dummy_context(void) +{ + return true; +} +static inline void audit_set_context(struct task_struct *task, struct audit_context *ctx) +{ } +static inline struct audit_context *audit_context(void) +{ + return NULL; +} +static inline struct filename *audit_reusename(const __user char *name) +{ + return NULL; +} +static inline void audit_getname(struct filename *name) +{ } +static inline void __audit_inode(struct filename *name, + const struct dentry *dentry, + unsigned int flags) +{ } +static inline void __audit_inode_child(struct inode *parent, + const struct dentry *dentry, + const unsigned char type) +{ } +static inline void audit_inode(struct filename *name, + const struct dentry *dentry, + unsigned int parent) +{ } +static inline void audit_file(struct file *file) +{ +} +static inline void audit_inode_parent_hidden(struct filename *name, + const struct dentry *dentry) +{ } +static inline void audit_inode_child(struct inode *parent, + const struct dentry *dentry, + const unsigned char type) +{ } +static inline void audit_core_dumps(long signr) +{ } +static inline void audit_seccomp(unsigned long syscall, long signr, int code) +{ } +static inline void audit_seccomp_actions_logged(const char *names, + const char *old_names, int res) +{ } +static inline int auditsc_get_stamp(struct audit_context *ctx, + struct timespec64 *t, unsigned int *serial) +{ + return 0; +} +static inline kuid_t audit_get_loginuid(struct task_struct *tsk) +{ + return INVALID_UID; +} +static inline unsigned int audit_get_sessionid(struct task_struct *tsk) +{ + return AUDIT_SID_UNSET; +} +static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) +{ } +static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, + gid_t gid, umode_t mode) +{ } +static inline void audit_bprm(struct linux_binprm *bprm) +{ } +static inline int audit_socketcall(int nargs, unsigned long *args) +{ + return 0; +} + +static inline int audit_socketcall_compat(int nargs, u32 *args) +{ + return 0; +} + +static inline void audit_fd_pair(int fd1, int fd2) +{ } +static inline int audit_sockaddr(int len, void *addr) +{ + return 0; +} +static inline void audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr) +{ } +static inline void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, + unsigned int msg_prio, + const struct timespec64 *abs_timeout) +{ } +static inline void audit_mq_notify(mqd_t mqdes, + const struct sigevent *notification) +{ } +static inline void audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) +{ } +static inline int audit_log_bprm_fcaps(struct linux_binprm *bprm, + const struct cred *new, + const struct cred *old) +{ + return 0; +} +static inline void audit_log_capset(const struct cred *new, + const struct cred *old) +{ } +static inline void audit_mmap_fd(int fd, int flags) +{ } + +static inline void audit_log_kern_module(char *name) +{ +} + +static inline void audit_fanotify(unsigned int response) +{ } + +static inline void audit_ptrace(struct task_struct *t) +{ } +#define audit_n_rules 0 +#define audit_signals 0 +#endif /* CONFIG_AUDITSYSCALL */ + +static inline bool audit_loginuid_set(struct task_struct *tsk) +{ + return uid_valid(audit_get_loginuid(tsk)); +} + +static inline void audit_log_string(struct audit_buffer *ab, const char *buf) +{ + audit_log_n_string(ab, buf, strlen(buf)); +} + +#endif diff --git a/include/linux/auto_dev-ioctl.h b/include/linux/auto_dev-ioctl.h new file mode 100644 index 000000000..28c15050e --- /dev/null +++ b/include/linux/auto_dev-ioctl.h @@ -0,0 +1,14 @@ +/* + * Copyright 2008 Red Hat, Inc. All rights reserved. + * Copyright 2008 Ian Kent + * + * This file is part of the Linux kernel and is made available under + * the terms of the GNU General Public License, version 2, or at your + * option, any later version, incorporated herein by reference. + */ + +#ifndef _LINUX_AUTO_DEV_IOCTL_H +#define _LINUX_AUTO_DEV_IOCTL_H + +#include +#endif /* _LINUX_AUTO_DEV_IOCTL_H */ diff --git a/include/linux/auto_fs.h b/include/linux/auto_fs.h new file mode 100644 index 000000000..b8f814c95 --- /dev/null +++ b/include/linux/auto_fs.h @@ -0,0 +1,15 @@ +/* + * Copyright 1997 Transmeta Corporation - All Rights Reserved + * + * This file is part of the Linux kernel and is made available under + * the terms of the GNU General Public License, version 2, or at your + * option, any later version, incorporated herein by reference. + */ + +#ifndef _LINUX_AUTO_FS_H +#define _LINUX_AUTO_FS_H + +#include +#include +#include +#endif /* _LINUX_AUTO_FS_H */ diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h new file mode 100644 index 000000000..f68d0ec2d --- /dev/null +++ b/include/linux/auxvec.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_AUXVEC_H +#define _LINUX_AUXVEC_H + +#include + +#define AT_VECTOR_SIZE_BASE 20 /* NEW_AUX_ENT entries in auxiliary table */ + /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */ +#endif /* _LINUX_AUXVEC_H */ diff --git a/include/linux/average.h b/include/linux/average.h new file mode 100644 index 000000000..a1a8f0963 --- /dev/null +++ b/include/linux/average.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_AVERAGE_H +#define _LINUX_AVERAGE_H + +#include +#include +#include + +/* + * Exponentially weighted moving average (EWMA) + * + * This implements a fixed-precision EWMA algorithm, with both the + * precision and fall-off coefficient determined at compile-time + * and built into the generated helper funtions. + * + * The first argument to the macro is the name that will be used + * for the struct and helper functions. + * + * The second argument, the precision, expresses how many bits are + * used for the fractional part of the fixed-precision values. + * + * The third argument, the weight reciprocal, determines how the + * new values will be weighed vs. the old state, new values will + * get weight 1/weight_rcp and old values 1-1/weight_rcp. Note + * that this parameter must be a power of two for efficiency. + */ + +#define DECLARE_EWMA(name, _precision, _weight_rcp) \ + struct ewma_##name { \ + unsigned long internal; \ + }; \ + static inline void ewma_##name##_init(struct ewma_##name *e) \ + { \ + BUILD_BUG_ON(!__builtin_constant_p(_precision)); \ + BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \ + /* \ + * Even if you want to feed it just 0/1 you should have \ + * some bits for the non-fractional part... \ + */ \ + BUILD_BUG_ON((_precision) > 30); \ + BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \ + e->internal = 0; \ + } \ + static inline unsigned long \ + ewma_##name##_read(struct ewma_##name *e) \ + { \ + BUILD_BUG_ON(!__builtin_constant_p(_precision)); \ + BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \ + BUILD_BUG_ON((_precision) > 30); \ + BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \ + return e->internal >> (_precision); \ + } \ + static inline void ewma_##name##_add(struct ewma_##name *e, \ + unsigned long val) \ + { \ + unsigned long internal = READ_ONCE(e->internal); \ + unsigned long weight_rcp = ilog2(_weight_rcp); \ + unsigned long precision = _precision; \ + \ + BUILD_BUG_ON(!__builtin_constant_p(_precision)); \ + BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \ + BUILD_BUG_ON((_precision) > 30); \ + BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \ + \ + WRITE_ONCE(e->internal, internal ? \ + (((internal << weight_rcp) - internal) + \ + (val << precision)) >> weight_rcp : \ + (val << precision)); \ + } + +#endif /* _LINUX_AVERAGE_H */ diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h new file mode 100644 index 000000000..92d179fb6 --- /dev/null +++ b/include/linux/avf/virtchnl.h @@ -0,0 +1,827 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _VIRTCHNL_H_ +#define _VIRTCHNL_H_ + +/* Description: + * This header file describes the VF-PF communication protocol used + * by the drivers for all devices starting from our 40G product line + * + * Admin queue buffer usage: + * desc->opcode is always aqc_opc_send_msg_to_pf + * flags, retval, datalen, and data addr are all used normally. + * The Firmware copies the cookie fields when sending messages between the + * PF and VF, but uses all other fields internally. Due to this limitation, + * we must send all messages as "indirect", i.e. using an external buffer. + * + * All the VSI indexes are relative to the VF. Each VF can have maximum of + * three VSIs. All the queue indexes are relative to the VSI. Each VF can + * have a maximum of sixteen queues for all of its VSIs. + * + * The PF is required to return a status code in v_retval for all messages + * except RESET_VF, which does not require any response. The return value + * is of status_code type, defined in the shared type.h. + * + * In general, VF driver initialization should roughly follow the order of + * these opcodes. The VF driver must first validate the API version of the + * PF driver, then request a reset, then get resources, then configure + * queues and interrupts. After these operations are complete, the VF + * driver may start its queues, optionally add MAC and VLAN filters, and + * process traffic. + */ + +/* START GENERIC DEFINES + * Need to ensure the following enums and defines hold the same meaning and + * value in current and future projects + */ + +/* Error Codes */ +enum virtchnl_status_code { + VIRTCHNL_STATUS_SUCCESS = 0, + VIRTCHNL_ERR_PARAM = -5, + VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38, + VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39, + VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40, + VIRTCHNL_STATUS_NOT_SUPPORTED = -64, +}; + +#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1 +#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2 +#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3 +#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4 +#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5 +#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6 + +enum virtchnl_link_speed { + VIRTCHNL_LINK_SPEED_UNKNOWN = 0, + VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT), + VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT), + VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT), + VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT), + VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT), + VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT), +}; + +/* for hsplit_0 field of Rx HMC context */ +/* deprecated with AVF 1.0 */ +enum virtchnl_rx_hsplit { + VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0, + VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1, + VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2, + VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4, + VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8, +}; + +/* END GENERIC DEFINES */ + +/* Opcodes for VF-PF communication. These are placed in the v_opcode field + * of the virtchnl_msg structure. + */ +enum virtchnl_ops { +/* The PF sends status change events to VFs using + * the VIRTCHNL_OP_EVENT opcode. + * VFs send requests to the PF using the other ops. + * Use of "advanced opcode" features must be negotiated as part of capabilities + * exchange and are not considered part of base mode feature set. + */ + VIRTCHNL_OP_UNKNOWN = 0, + VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ + VIRTCHNL_OP_RESET_VF = 2, + VIRTCHNL_OP_GET_VF_RESOURCES = 3, + VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, + VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, + VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, + VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, + VIRTCHNL_OP_ENABLE_QUEUES = 8, + VIRTCHNL_OP_DISABLE_QUEUES = 9, + VIRTCHNL_OP_ADD_ETH_ADDR = 10, + VIRTCHNL_OP_DEL_ETH_ADDR = 11, + VIRTCHNL_OP_ADD_VLAN = 12, + VIRTCHNL_OP_DEL_VLAN = 13, + VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, + VIRTCHNL_OP_GET_STATS = 15, + VIRTCHNL_OP_RSVD = 16, + VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ + VIRTCHNL_OP_IWARP = 20, /* advanced opcode */ + VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */ + VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */ + VIRTCHNL_OP_CONFIG_RSS_KEY = 23, + VIRTCHNL_OP_CONFIG_RSS_LUT = 24, + VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, + VIRTCHNL_OP_SET_RSS_HENA = 26, + VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27, + VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28, + VIRTCHNL_OP_REQUEST_QUEUES = 29, + VIRTCHNL_OP_ENABLE_CHANNELS = 30, + VIRTCHNL_OP_DISABLE_CHANNELS = 31, + VIRTCHNL_OP_ADD_CLOUD_FILTER = 32, + VIRTCHNL_OP_DEL_CLOUD_FILTER = 33, +}; + +/* These macros are used to generate compilation errors if a structure/union + * is not exactly the correct length. It gives a divide by zero error if the + * structure/union is not of the correct size, otherwise it creates an enum + * that is never used. + */ +#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \ + { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } +#define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \ + { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) } + +/* Virtual channel message descriptor. This overlays the admin queue + * descriptor. All other data is passed in external buffers. + */ + +struct virtchnl_msg { + u8 pad[8]; /* AQ flags/opcode/len/retval fields */ + enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */ + enum virtchnl_status_code v_retval; /* ditto for desc->retval */ + u32 vfid; /* used by PF when sending to VF */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg); + +/* Message descriptions and data structures.*/ + +/* VIRTCHNL_OP_VERSION + * VF posts its version number to the PF. PF responds with its version number + * in the same format, along with a return code. + * Reply from PF has its major/minor versions also in param0 and param1. + * If there is a major version mismatch, then the VF cannot operate. + * If there is a minor version mismatch, then the VF can operate but should + * add a warning to the system log. + * + * This enum element MUST always be specified as == 1, regardless of other + * changes in the API. The PF must always respond to this message without + * error regardless of version mismatch. + */ +#define VIRTCHNL_VERSION_MAJOR 1 +#define VIRTCHNL_VERSION_MINOR 1 +#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0 + +struct virtchnl_version_info { + u32 major; + u32 minor; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info); + +#define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0)) +#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1)) + +/* VIRTCHNL_OP_RESET_VF + * VF sends this request to PF with no parameters + * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register + * until reset completion is indicated. The admin queue must be reinitialized + * after this operation. + * + * When reset is complete, PF must ensure that all queues in all VSIs associated + * with the VF are stopped, all queue configurations in the HMC are set to 0, + * and all MAC and VLAN filters (except the default MAC address) on all VSIs + * are cleared. + */ + +/* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV + * vsi_type should always be 6 for backward compatibility. Add other fields + * as needed. + */ +enum virtchnl_vsi_type { + VIRTCHNL_VSI_TYPE_INVALID = 0, + VIRTCHNL_VSI_SRIOV = 6, +}; + +/* VIRTCHNL_OP_GET_VF_RESOURCES + * Version 1.0 VF sends this request to PF with no parameters + * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities + * PF responds with an indirect message containing + * virtchnl_vf_resource and one or more + * virtchnl_vsi_resource structures. + */ + +struct virtchnl_vsi_resource { + u16 vsi_id; + u16 num_queue_pairs; + enum virtchnl_vsi_type vsi_type; + u16 qset_handle; + u8 default_mac_addr[ETH_ALEN]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); + +/* VF capability flags + * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including + * TX/RX Checksum offloading and TSO for non-tunnelled packets. + */ +#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001 +#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002 +#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004 +#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 +#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 +#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 +#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040 +#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 +#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 +#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 +#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000 +#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000 +#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000 +#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000 +#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000 + +#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \ + VIRTCHNL_VF_OFFLOAD_VLAN | \ + VIRTCHNL_VF_OFFLOAD_RSS_PF) + +struct virtchnl_vf_resource { + u16 num_vsis; + u16 num_queue_pairs; + u16 max_vectors; + u16 max_mtu; + + u32 vf_cap_flags; + u32 rss_key_size; + u32 rss_lut_size; + + struct virtchnl_vsi_resource vsi_res[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource); + +/* VIRTCHNL_OP_CONFIG_TX_QUEUE + * VF sends this message to set up parameters for one TX queue. + * External data buffer contains one instance of virtchnl_txq_info. + * PF configures requested queue and returns a status code. + */ + +/* Tx queue config info */ +struct virtchnl_txq_info { + u16 vsi_id; + u16 queue_id; + u16 ring_len; /* number of descriptors, multiple of 8 */ + u16 headwb_enabled; /* deprecated with AVF 1.0 */ + u64 dma_ring_addr; + u64 dma_headwb_addr; /* deprecated with AVF 1.0 */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info); + +/* VIRTCHNL_OP_CONFIG_RX_QUEUE + * VF sends this message to set up parameters for one RX queue. + * External data buffer contains one instance of virtchnl_rxq_info. + * PF configures requested queue and returns a status code. + */ + +/* Rx queue config info */ +struct virtchnl_rxq_info { + u16 vsi_id; + u16 queue_id; + u32 ring_len; /* number of descriptors, multiple of 32 */ + u16 hdr_size; + u16 splithdr_enabled; /* deprecated with AVF 1.0 */ + u32 databuffer_size; + u32 max_pkt_size; + u32 pad1; + u64 dma_ring_addr; + enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */ + u32 pad2; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info); + +/* VIRTCHNL_OP_CONFIG_VSI_QUEUES + * VF sends this message to set parameters for all active TX and RX queues + * associated with the specified VSI. + * PF configures queues and returns status. + * If the number of queues specified is greater than the number of queues + * associated with the VSI, an error is returned and no queues are configured. + */ +struct virtchnl_queue_pair_info { + /* NOTE: vsi_id and queue_id should be identical for both queues. */ + struct virtchnl_txq_info txq; + struct virtchnl_rxq_info rxq; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info); + +struct virtchnl_vsi_queue_config_info { + u16 vsi_id; + u16 num_queue_pairs; + u32 pad; + struct virtchnl_queue_pair_info qpair[1]; +}; + +/* VIRTCHNL_OP_REQUEST_QUEUES + * VF sends this message to request the PF to allocate additional queues to + * this VF. Each VF gets a guaranteed number of queues on init but asking for + * additional queues must be negotiated. This is a best effort request as it + * is possible the PF does not have enough queues left to support the request. + * If the PF cannot support the number requested it will respond with the + * maximum number it is able to support. If the request is successful, PF will + * then reset the VF to institute required changes. + */ + +/* VF resource request */ +struct virtchnl_vf_res_request { + u16 num_queue_pairs; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info); + +/* VIRTCHNL_OP_CONFIG_IRQ_MAP + * VF uses this message to map vectors to queues. + * The rxq_map and txq_map fields are bitmaps used to indicate which queues + * are to be associated with the specified vector. + * The "other" causes are always mapped to vector 0. + * PF configures interrupt mapping and returns status. + */ +struct virtchnl_vector_map { + u16 vsi_id; + u16 vector_id; + u16 rxq_map; + u16 txq_map; + u16 rxitr_idx; + u16 txitr_idx; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map); + +struct virtchnl_irq_map_info { + u16 num_vectors; + struct virtchnl_vector_map vecmap[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info); + +/* VIRTCHNL_OP_ENABLE_QUEUES + * VIRTCHNL_OP_DISABLE_QUEUES + * VF sends these message to enable or disable TX/RX queue pairs. + * The queues fields are bitmaps indicating which queues to act upon. + * (Currently, we only support 16 queues per VF, but we make the field + * u32 to allow for expansion.) + * PF performs requested action and returns status. + */ +struct virtchnl_queue_select { + u16 vsi_id; + u16 pad; + u32 rx_queues; + u32 tx_queues; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select); + +/* VIRTCHNL_OP_ADD_ETH_ADDR + * VF sends this message in order to add one or more unicast or multicast + * address filters for the specified VSI. + * PF adds the filters and returns status. + */ + +/* VIRTCHNL_OP_DEL_ETH_ADDR + * VF sends this message in order to remove one or more unicast or multicast + * filters for the specified VSI. + * PF removes the filters and returns status. + */ + +struct virtchnl_ether_addr { + u8 addr[ETH_ALEN]; + u8 pad[2]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr); + +struct virtchnl_ether_addr_list { + u16 vsi_id; + u16 num_elements; + struct virtchnl_ether_addr list[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list); + +/* VIRTCHNL_OP_ADD_VLAN + * VF sends this message to add one or more VLAN tag filters for receives. + * PF adds the filters and returns status. + * If a port VLAN is configured by the PF, this operation will return an + * error to the VF. + */ + +/* VIRTCHNL_OP_DEL_VLAN + * VF sends this message to remove one or more VLAN tag filters for receives. + * PF removes the filters and returns status. + * If a port VLAN is configured by the PF, this operation will return an + * error to the VF. + */ + +struct virtchnl_vlan_filter_list { + u16 vsi_id; + u16 num_elements; + u16 vlan_id[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list); + +/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE + * VF sends VSI id and flags. + * PF returns status code in retval. + * Note: we assume that broadcast accept mode is always enabled. + */ +struct virtchnl_promisc_info { + u16 vsi_id; + u16 flags; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info); + +#define FLAG_VF_UNICAST_PROMISC 0x00000001 +#define FLAG_VF_MULTICAST_PROMISC 0x00000002 + +/* VIRTCHNL_OP_GET_STATS + * VF sends this message to request stats for the selected VSI. VF uses + * the virtchnl_queue_select struct to specify the VSI. The queue_id + * field is ignored by the PF. + * + * PF replies with struct eth_stats in an external buffer. + */ + +/* VIRTCHNL_OP_CONFIG_RSS_KEY + * VIRTCHNL_OP_CONFIG_RSS_LUT + * VF sends these messages to configure RSS. Only supported if both PF + * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during + * configuration negotiation. If this is the case, then the RSS fields in + * the VF resource struct are valid. + * Both the key and LUT are initialized to 0 by the PF, meaning that + * RSS is effectively disabled until set up by the VF. + */ +struct virtchnl_rss_key { + u16 vsi_id; + u16 key_len; + u8 key[1]; /* RSS hash key, packed bytes */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key); + +struct virtchnl_rss_lut { + u16 vsi_id; + u16 lut_entries; + u8 lut[1]; /* RSS lookup table */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut); + +/* VIRTCHNL_OP_GET_RSS_HENA_CAPS + * VIRTCHNL_OP_SET_RSS_HENA + * VF sends these messages to get and set the hash filter enable bits for RSS. + * By default, the PF sets these to all possible traffic types that the + * hardware supports. The VF can query this value if it wants to change the + * traffic types that are hashed by the hardware. + */ +struct virtchnl_rss_hena { + u64 hena; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena); + +/* VIRTCHNL_OP_ENABLE_CHANNELS + * VIRTCHNL_OP_DISABLE_CHANNELS + * VF sends these messages to enable or disable channels based on + * the user specified queue count and queue offset for each traffic class. + * This struct encompasses all the information that the PF needs from + * VF to create a channel. + */ +struct virtchnl_channel_info { + u16 count; /* number of queues in a channel */ + u16 offset; /* queues in a channel start from 'offset' */ + u32 pad; + u64 max_tx_rate; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info); + +struct virtchnl_tc_info { + u32 num_tc; + u32 pad; + struct virtchnl_channel_info list[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info); + +/* VIRTCHNL_ADD_CLOUD_FILTER + * VIRTCHNL_DEL_CLOUD_FILTER + * VF sends these messages to add or delete a cloud filter based on the + * user specified match and action filters. These structures encompass + * all the information that the PF needs from the VF to add/delete a + * cloud filter. + */ + +struct virtchnl_l4_spec { + u8 src_mac[ETH_ALEN]; + u8 dst_mac[ETH_ALEN]; + __be16 vlan_id; + __be16 pad; /* reserved for future use */ + __be32 src_ip[4]; + __be32 dst_ip[4]; + __be16 src_port; + __be16 dst_port; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec); + +union virtchnl_flow_spec { + struct virtchnl_l4_spec tcp_spec; + u8 buffer[128]; /* reserved for future use */ +}; + +VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec); + +enum virtchnl_action { + /* action types */ + VIRTCHNL_ACTION_DROP = 0, + VIRTCHNL_ACTION_TC_REDIRECT, +}; + +enum virtchnl_flow_type { + /* flow types */ + VIRTCHNL_TCP_V4_FLOW = 0, + VIRTCHNL_TCP_V6_FLOW, +}; + +struct virtchnl_filter { + union virtchnl_flow_spec data; + union virtchnl_flow_spec mask; + enum virtchnl_flow_type flow_type; + enum virtchnl_action action; + u32 action_meta; + __u8 field_flags; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter); + +/* VIRTCHNL_OP_EVENT + * PF sends this message to inform the VF driver of events that may affect it. + * No direct response is expected from the VF, though it may generate other + * messages in response to this one. + */ +enum virtchnl_event_codes { + VIRTCHNL_EVENT_UNKNOWN = 0, + VIRTCHNL_EVENT_LINK_CHANGE, + VIRTCHNL_EVENT_RESET_IMPENDING, + VIRTCHNL_EVENT_PF_DRIVER_CLOSE, +}; + +#define PF_EVENT_SEVERITY_INFO 0 +#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255 + +struct virtchnl_pf_event { + enum virtchnl_event_codes event; + union { + struct { + enum virtchnl_link_speed link_speed; + bool link_status; + } link_event; + } event_data; + + int severity; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event); + +/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP + * VF uses this message to request PF to map IWARP vectors to IWARP queues. + * The request for this originates from the VF IWARP driver through + * a client interface between VF LAN and VF IWARP driver. + * A vector could have an AEQ and CEQ attached to it although + * there is a single AEQ per VF IWARP instance in which case + * most vectors will have an INVALID_IDX for aeq and valid idx for ceq. + * There will never be a case where there will be multiple CEQs attached + * to a single vector. + * PF configures interrupt mapping and returns status. + */ + +struct virtchnl_iwarp_qv_info { + u32 v_idx; /* msix_vector */ + u16 ceq_idx; + u16 aeq_idx; + u8 itr_idx; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info); + +struct virtchnl_iwarp_qvlist_info { + u32 num_vectors; + struct virtchnl_iwarp_qv_info qv_info[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info); + +/* VF reset states - these are written into the RSTAT register: + * VFGEN_RSTAT on the VF + * When the PF initiates a reset, it writes 0 + * When the reset is complete, it writes 1 + * When the PF detects that the VF has recovered, it writes 2 + * VF checks this register periodically to determine if a reset has occurred, + * then polls it to know when the reset is complete. + * If either the PF or VF reads the register while the hardware + * is in a reset state, it will return DEADBEEF, which, when masked + * will result in 3. + */ +enum virtchnl_vfr_states { + VIRTCHNL_VFR_INPROGRESS = 0, + VIRTCHNL_VFR_COMPLETED, + VIRTCHNL_VFR_VFACTIVE, +}; + +/** + * virtchnl_vc_validate_vf_msg + * @ver: Virtchnl version info + * @v_opcode: Opcode for the message + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * validate msg format against struct for each opcode + */ +static inline int +virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, + u8 *msg, u16 msglen) +{ + bool err_msg_format = false; + int valid_len = 0; + + /* Validate message length. */ + switch (v_opcode) { + case VIRTCHNL_OP_VERSION: + valid_len = sizeof(struct virtchnl_version_info); + break; + case VIRTCHNL_OP_RESET_VF: + break; + case VIRTCHNL_OP_GET_VF_RESOURCES: + if (VF_IS_V11(ver)) + valid_len = sizeof(u32); + break; + case VIRTCHNL_OP_CONFIG_TX_QUEUE: + valid_len = sizeof(struct virtchnl_txq_info); + break; + case VIRTCHNL_OP_CONFIG_RX_QUEUE: + valid_len = sizeof(struct virtchnl_rxq_info); + break; + case VIRTCHNL_OP_CONFIG_VSI_QUEUES: + valid_len = sizeof(struct virtchnl_vsi_queue_config_info); + if (msglen >= valid_len) { + struct virtchnl_vsi_queue_config_info *vqc = + (struct virtchnl_vsi_queue_config_info *)msg; + valid_len += (vqc->num_queue_pairs * + sizeof(struct + virtchnl_queue_pair_info)); + if (vqc->num_queue_pairs == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_CONFIG_IRQ_MAP: + valid_len = sizeof(struct virtchnl_irq_map_info); + if (msglen >= valid_len) { + struct virtchnl_irq_map_info *vimi = + (struct virtchnl_irq_map_info *)msg; + valid_len += (vimi->num_vectors * + sizeof(struct virtchnl_vector_map)); + if (vimi->num_vectors == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_ENABLE_QUEUES: + case VIRTCHNL_OP_DISABLE_QUEUES: + valid_len = sizeof(struct virtchnl_queue_select); + break; + case VIRTCHNL_OP_ADD_ETH_ADDR: + case VIRTCHNL_OP_DEL_ETH_ADDR: + valid_len = sizeof(struct virtchnl_ether_addr_list); + if (msglen >= valid_len) { + struct virtchnl_ether_addr_list *veal = + (struct virtchnl_ether_addr_list *)msg; + valid_len += veal->num_elements * + sizeof(struct virtchnl_ether_addr); + if (veal->num_elements == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_ADD_VLAN: + case VIRTCHNL_OP_DEL_VLAN: + valid_len = sizeof(struct virtchnl_vlan_filter_list); + if (msglen >= valid_len) { + struct virtchnl_vlan_filter_list *vfl = + (struct virtchnl_vlan_filter_list *)msg; + valid_len += vfl->num_elements * sizeof(u16); + if (vfl->num_elements == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + valid_len = sizeof(struct virtchnl_promisc_info); + break; + case VIRTCHNL_OP_GET_STATS: + valid_len = sizeof(struct virtchnl_queue_select); + break; + case VIRTCHNL_OP_IWARP: + /* These messages are opaque to us and will be validated in + * the RDMA client code. We just need to check for nonzero + * length. The firmware will enforce max length restrictions. + */ + if (msglen) + valid_len = msglen; + else + err_msg_format = true; + break; + case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: + break; + case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: + valid_len = sizeof(struct virtchnl_iwarp_qvlist_info); + if (msglen >= valid_len) { + struct virtchnl_iwarp_qvlist_info *qv = + (struct virtchnl_iwarp_qvlist_info *)msg; + if (qv->num_vectors == 0) { + err_msg_format = true; + break; + } + valid_len += ((qv->num_vectors - 1) * + sizeof(struct virtchnl_iwarp_qv_info)); + } + break; + case VIRTCHNL_OP_CONFIG_RSS_KEY: + valid_len = sizeof(struct virtchnl_rss_key); + if (msglen >= valid_len) { + struct virtchnl_rss_key *vrk = + (struct virtchnl_rss_key *)msg; + valid_len += vrk->key_len - 1; + } + break; + case VIRTCHNL_OP_CONFIG_RSS_LUT: + valid_len = sizeof(struct virtchnl_rss_lut); + if (msglen >= valid_len) { + struct virtchnl_rss_lut *vrl = + (struct virtchnl_rss_lut *)msg; + valid_len += vrl->lut_entries - 1; + } + break; + case VIRTCHNL_OP_GET_RSS_HENA_CAPS: + break; + case VIRTCHNL_OP_SET_RSS_HENA: + valid_len = sizeof(struct virtchnl_rss_hena); + break; + case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: + case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: + break; + case VIRTCHNL_OP_REQUEST_QUEUES: + valid_len = sizeof(struct virtchnl_vf_res_request); + break; + case VIRTCHNL_OP_ENABLE_CHANNELS: + valid_len = sizeof(struct virtchnl_tc_info); + if (msglen >= valid_len) { + struct virtchnl_tc_info *vti = + (struct virtchnl_tc_info *)msg; + valid_len += (vti->num_tc - 1) * + sizeof(struct virtchnl_channel_info); + if (vti->num_tc == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_DISABLE_CHANNELS: + break; + case VIRTCHNL_OP_ADD_CLOUD_FILTER: + valid_len = sizeof(struct virtchnl_filter); + break; + case VIRTCHNL_OP_DEL_CLOUD_FILTER: + valid_len = sizeof(struct virtchnl_filter); + break; + /* These are always errors coming from the VF. */ + case VIRTCHNL_OP_EVENT: + case VIRTCHNL_OP_UNKNOWN: + default: + return VIRTCHNL_ERR_PARAM; + } + /* few more checks */ + if (err_msg_format || valid_len != msglen) + return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH; + + return 0; +} +#endif /* _VIRTCHNL_H_ */ diff --git a/include/linux/b1pcmcia.h b/include/linux/b1pcmcia.h new file mode 100644 index 000000000..12a867c60 --- /dev/null +++ b/include/linux/b1pcmcia.h @@ -0,0 +1,21 @@ +/* $Id: b1pcmcia.h,v 1.1.8.2 2001/09/23 22:25:05 kai Exp $ + * + * Exported functions of module b1pcmcia to be called by + * avm_cs card services module. + * + * Copyright 1999 by Carsten Paeth (calle@calle.in-berlin.de) + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + */ + +#ifndef _B1PCMCIA_H_ +#define _B1PCMCIA_H_ + +int b1pcmcia_addcard_b1(unsigned int port, unsigned irq); +int b1pcmcia_addcard_m1(unsigned int port, unsigned irq); +int b1pcmcia_addcard_m2(unsigned int port, unsigned irq); +int b1pcmcia_delcard(unsigned int port, unsigned irq); + +#endif /* _B1PCMCIA_H_ */ diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h new file mode 100644 index 000000000..65d475224 --- /dev/null +++ b/include/linux/backing-dev-defs.h @@ -0,0 +1,308 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_BACKING_DEV_DEFS_H +#define __LINUX_BACKING_DEV_DEFS_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct page; +struct device; +struct dentry; + +/* + * Bits in bdi_writeback.state + */ +enum wb_state { + WB_registered, /* bdi_register() was done */ + WB_writeback_running, /* Writeback is in progress */ + WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */ + WB_start_all, /* nr_pages == 0 (all) work pending */ +}; + +enum wb_congested_state { + WB_async_congested, /* The async (write) queue is getting full */ + WB_sync_congested, /* The sync queue is getting full */ +}; + +typedef int (congested_fn)(void *, int); + +enum wb_stat_item { + WB_RECLAIMABLE, + WB_WRITEBACK, + WB_DIRTIED, + WB_WRITTEN, + NR_WB_STAT_ITEMS +}; + +#define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) + +/* + * why some writeback work was initiated + */ +enum wb_reason { + WB_REASON_BACKGROUND, + WB_REASON_VMSCAN, + WB_REASON_SYNC, + WB_REASON_PERIODIC, + WB_REASON_LAPTOP_TIMER, + WB_REASON_FREE_MORE_MEM, + WB_REASON_FS_FREE_SPACE, + /* + * There is no bdi forker thread any more and works are done + * by emergency worker, however, this is TPs userland visible + * and we'll be exposing exactly the same information, + * so it has a mismatch name. + */ + WB_REASON_FORKER_THREAD, + + WB_REASON_MAX, +}; + +/* + * For cgroup writeback, multiple wb's may map to the same blkcg. Those + * wb's can operate mostly independently but should share the congested + * state. To facilitate such sharing, the congested state is tracked using + * the following struct which is created on demand, indexed by blkcg ID on + * its bdi, and refcounted. + */ +struct bdi_writeback_congested { + unsigned long state; /* WB_[a]sync_congested flags */ + refcount_t refcnt; /* nr of attached wb's and blkg */ + +#ifdef CONFIG_CGROUP_WRITEBACK + struct backing_dev_info *__bdi; /* the associated bdi, set to NULL + * on bdi unregistration. For memcg-wb + * internal use only! */ + int blkcg_id; /* ID of the associated blkcg */ + struct rb_node rb_node; /* on bdi->cgwb_congestion_tree */ +#endif +}; + +/* + * Each wb (bdi_writeback) can perform writeback operations, is measured + * and throttled, independently. Without cgroup writeback, each bdi + * (bdi_writeback) is served by its embedded bdi->wb. + * + * On the default hierarchy, blkcg implicitly enables memcg. This allows + * using memcg's page ownership for attributing writeback IOs, and every + * memcg - blkcg combination can be served by its own wb by assigning a + * dedicated wb to each memcg, which enables isolation across different + * cgroups and propagation of IO back pressure down from the IO layer upto + * the tasks which are generating the dirty pages to be written back. + * + * A cgroup wb is indexed on its bdi by the ID of the associated memcg, + * refcounted with the number of inodes attached to it, and pins the memcg + * and the corresponding blkcg. As the corresponding blkcg for a memcg may + * change as blkcg is disabled and enabled higher up in the hierarchy, a wb + * is tested for blkcg after lookup and removed from index on mismatch so + * that a new wb for the combination can be created. + */ +struct bdi_writeback { + struct backing_dev_info *bdi; /* our parent bdi */ + + unsigned long state; /* Always use atomic bitops on this */ + unsigned long last_old_flush; /* last old data flush */ + + struct list_head b_dirty; /* dirty inodes */ + struct list_head b_io; /* parked for writeback */ + struct list_head b_more_io; /* parked for more writeback */ + struct list_head b_dirty_time; /* time stamps are dirty */ + spinlock_t list_lock; /* protects the b_* lists */ + + struct percpu_counter stat[NR_WB_STAT_ITEMS]; + + struct bdi_writeback_congested *congested; + + unsigned long bw_time_stamp; /* last time write bw is updated */ + unsigned long dirtied_stamp; + unsigned long written_stamp; /* pages written at bw_time_stamp */ + unsigned long write_bandwidth; /* the estimated write bandwidth */ + unsigned long avg_write_bandwidth; /* further smoothed write bw, > 0 */ + + /* + * The base dirty throttle rate, re-calculated on every 200ms. + * All the bdi tasks' dirty rate will be curbed under it. + * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit + * in small steps and is much more smooth/stable than the latter. + */ + unsigned long dirty_ratelimit; + unsigned long balanced_dirty_ratelimit; + + struct fprop_local_percpu completions; + int dirty_exceeded; + enum wb_reason start_all_reason; + + spinlock_t work_lock; /* protects work_list & dwork scheduling */ + struct list_head work_list; + struct delayed_work dwork; /* work item used for writeback */ + + unsigned long dirty_sleep; /* last wait */ + + struct list_head bdi_node; /* anchored at bdi->wb_list */ + +#ifdef CONFIG_CGROUP_WRITEBACK + struct percpu_ref refcnt; /* used only for !root wb's */ + struct fprop_local_percpu memcg_completions; + struct cgroup_subsys_state *memcg_css; /* the associated memcg */ + struct cgroup_subsys_state *blkcg_css; /* and blkcg */ + struct list_head memcg_node; /* anchored at memcg->cgwb_list */ + struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */ + + union { + struct work_struct release_work; + struct rcu_head rcu; + }; +#endif +}; + +struct backing_dev_info { + struct list_head bdi_list; + unsigned long ra_pages; /* max readahead in PAGE_SIZE units */ + unsigned long io_pages; /* max allowed IO size */ + congested_fn *congested_fn; /* Function pointer if device is md/dm */ + void *congested_data; /* Pointer to aux data for congested func */ + + const char *name; + + struct kref refcnt; /* Reference counter for the structure */ + unsigned int capabilities; /* Device capabilities */ + unsigned int min_ratio; + unsigned int max_ratio, max_prop_frac; + + /* + * Sum of avg_write_bw of wbs with dirty inodes. > 0 if there are + * any dirty wbs, which is depended upon by bdi_has_dirty(). + */ + atomic_long_t tot_write_bandwidth; + + struct bdi_writeback wb; /* the root writeback info for this bdi */ + struct list_head wb_list; /* list of all wbs */ +#ifdef CONFIG_CGROUP_WRITEBACK + struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ + struct rb_root cgwb_congested_tree; /* their congested states */ + struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */ + struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */ +#else + struct bdi_writeback_congested *wb_congested; +#endif + wait_queue_head_t wb_waitq; + + struct device *dev; + char dev_name[64]; + struct device *owner; + + struct timer_list laptop_mode_wb_timer; + +#ifdef CONFIG_DEBUG_FS + struct dentry *debug_dir; + struct dentry *debug_stats; +#endif +}; + +enum { + BLK_RW_ASYNC = 0, + BLK_RW_SYNC = 1, +}; + +void clear_wb_congested(struct bdi_writeback_congested *congested, int sync); +void set_wb_congested(struct bdi_writeback_congested *congested, int sync); + +static inline void clear_bdi_congested(struct backing_dev_info *bdi, int sync) +{ + clear_wb_congested(bdi->wb.congested, sync); +} + +static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync) +{ + set_wb_congested(bdi->wb.congested, sync); +} + +struct wb_lock_cookie { + bool locked; + unsigned long flags; +}; + +#ifdef CONFIG_CGROUP_WRITEBACK + +/** + * wb_tryget - try to increment a wb's refcount + * @wb: bdi_writeback to get + */ +static inline bool wb_tryget(struct bdi_writeback *wb) +{ + if (wb != &wb->bdi->wb) + return percpu_ref_tryget(&wb->refcnt); + return true; +} + +/** + * wb_get - increment a wb's refcount + * @wb: bdi_writeback to get + */ +static inline void wb_get(struct bdi_writeback *wb) +{ + if (wb != &wb->bdi->wb) + percpu_ref_get(&wb->refcnt); +} + +/** + * wb_put - decrement a wb's refcount + * @wb: bdi_writeback to put + */ +static inline void wb_put(struct bdi_writeback *wb) +{ + if (WARN_ON_ONCE(!wb->bdi)) { + /* + * A driver bug might cause a file to be removed before bdi was + * initialized. + */ + return; + } + + if (wb != &wb->bdi->wb) + percpu_ref_put(&wb->refcnt); +} + +/** + * wb_dying - is a wb dying? + * @wb: bdi_writeback of interest + * + * Returns whether @wb is unlinked and being drained. + */ +static inline bool wb_dying(struct bdi_writeback *wb) +{ + return percpu_ref_is_dying(&wb->refcnt); +} + +#else /* CONFIG_CGROUP_WRITEBACK */ + +static inline bool wb_tryget(struct bdi_writeback *wb) +{ + return true; +} + +static inline void wb_get(struct bdi_writeback *wb) +{ +} + +static inline void wb_put(struct bdi_writeback *wb) +{ +} + +static inline bool wb_dying(struct bdi_writeback *wb) +{ + return false; +} + +#endif /* CONFIG_CGROUP_WRITEBACK */ + +#endif /* __LINUX_BACKING_DEV_DEFS_H */ diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h new file mode 100644 index 000000000..d28d57eef --- /dev/null +++ b/include/linux/backing-dev.h @@ -0,0 +1,504 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/backing-dev.h + * + * low-level device information and state which is propagated up through + * to high-level code. + */ + +#ifndef _LINUX_BACKING_DEV_H +#define _LINUX_BACKING_DEV_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi) +{ + kref_get(&bdi->refcnt); + return bdi; +} + +void bdi_put(struct backing_dev_info *bdi); + +__printf(2, 3) +int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...); +__printf(2, 0) +int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, + va_list args); +int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner); +void bdi_unregister(struct backing_dev_info *bdi); + +struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id); +static inline struct backing_dev_info *bdi_alloc(gfp_t gfp_mask) +{ + return bdi_alloc_node(gfp_mask, NUMA_NO_NODE); +} + +void wb_start_background_writeback(struct bdi_writeback *wb); +void wb_workfn(struct work_struct *work); +void wb_wakeup_delayed(struct bdi_writeback *wb); + +extern spinlock_t bdi_lock; +extern struct list_head bdi_list; + +extern struct workqueue_struct *bdi_wq; + +static inline bool wb_has_dirty_io(struct bdi_writeback *wb) +{ + return test_bit(WB_has_dirty_io, &wb->state); +} + +static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi) +{ + /* + * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are + * any dirty wbs. See wb_update_write_bandwidth(). + */ + return atomic_long_read(&bdi->tot_write_bandwidth); +} + +static inline void __add_wb_stat(struct bdi_writeback *wb, + enum wb_stat_item item, s64 amount) +{ + percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH); +} + +static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) +{ + __add_wb_stat(wb, item, 1); +} + +static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) +{ + __add_wb_stat(wb, item, -1); +} + +static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) +{ + return percpu_counter_read_positive(&wb->stat[item]); +} + +static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item) +{ + return percpu_counter_sum_positive(&wb->stat[item]); +} + +extern void wb_writeout_inc(struct bdi_writeback *wb); + +/* + * maximal error of a stat counter. + */ +static inline unsigned long wb_stat_error(void) +{ +#ifdef CONFIG_SMP + return nr_cpu_ids * WB_STAT_BATCH; +#else + return 1; +#endif +} + +int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio); +int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio); + +/* + * Flags in backing_dev_info::capability + * + * The first three flags control whether dirty pages will contribute to the + * VM's accounting and whether writepages() should be called for dirty pages + * (something that would not, for example, be appropriate for ramfs) + * + * WARNING: these flags are closely related and should not normally be + * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these + * three flags into a single convenience macro. + * + * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting + * BDI_CAP_NO_WRITEBACK: Don't write pages back + * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages + * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold. + * + * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback. + * BDI_CAP_SYNCHRONOUS_IO: Device is so fast that asynchronous IO would be + * inefficient. + */ +#define BDI_CAP_NO_ACCT_DIRTY 0x00000001 +#define BDI_CAP_NO_WRITEBACK 0x00000002 +#define BDI_CAP_NO_ACCT_WB 0x00000004 +#define BDI_CAP_STABLE_WRITES 0x00000008 +#define BDI_CAP_STRICTLIMIT 0x00000010 +#define BDI_CAP_CGROUP_WRITEBACK 0x00000020 +#define BDI_CAP_SYNCHRONOUS_IO 0x00000040 + +#define BDI_CAP_NO_ACCT_AND_WRITEBACK \ + (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB) + +extern struct backing_dev_info noop_backing_dev_info; + +/** + * writeback_in_progress - determine whether there is writeback in progress + * @wb: bdi_writeback of interest + * + * Determine whether there is writeback waiting to be handled against a + * bdi_writeback. + */ +static inline bool writeback_in_progress(struct bdi_writeback *wb) +{ + return test_bit(WB_writeback_running, &wb->state); +} + +static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) +{ + struct super_block *sb; + + if (!inode) + return &noop_backing_dev_info; + + sb = inode->i_sb; +#ifdef CONFIG_BLOCK + if (sb_is_blkdev_sb(sb)) + return I_BDEV(inode)->bd_bdi; +#endif + return sb->s_bdi; +} + +static inline int wb_congested(struct bdi_writeback *wb, int cong_bits) +{ + struct backing_dev_info *bdi = wb->bdi; + + if (bdi->congested_fn) + return bdi->congested_fn(bdi->congested_data, cong_bits); + return wb->congested->state & cong_bits; +} + +long congestion_wait(int sync, long timeout); +long wait_iff_congested(int sync, long timeout); + +static inline bool bdi_cap_synchronous_io(struct backing_dev_info *bdi) +{ + return bdi->capabilities & BDI_CAP_SYNCHRONOUS_IO; +} + +static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi) +{ + return bdi->capabilities & BDI_CAP_STABLE_WRITES; +} + +static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi) +{ + return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK); +} + +static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi) +{ + return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY); +} + +static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi) +{ + /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */ + return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB | + BDI_CAP_NO_WRITEBACK)); +} + +static inline bool mapping_cap_writeback_dirty(struct address_space *mapping) +{ + return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host)); +} + +static inline bool mapping_cap_account_dirty(struct address_space *mapping) +{ + return bdi_cap_account_dirty(inode_to_bdi(mapping->host)); +} + +static inline int bdi_sched_wait(void *word) +{ + schedule(); + return 0; +} + +#ifdef CONFIG_CGROUP_WRITEBACK + +struct bdi_writeback_congested * +wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp); +void wb_congested_put(struct bdi_writeback_congested *congested); +struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, + struct cgroup_subsys_state *memcg_css, + gfp_t gfp); +void wb_memcg_offline(struct mem_cgroup *memcg); +void wb_blkcg_offline(struct blkcg *blkcg); +int inode_congested(struct inode *inode, int cong_bits); + +/** + * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode + * @inode: inode of interest + * + * cgroup writeback requires support from both the bdi and filesystem. + * Also, both memcg and iocg have to be on the default hierarchy. Test + * whether all conditions are met. + * + * Note that the test result may change dynamically on the same inode + * depending on how memcg and iocg are configured. + */ +static inline bool inode_cgwb_enabled(struct inode *inode) +{ + struct backing_dev_info *bdi = inode_to_bdi(inode); + + return cgroup_subsys_on_dfl(memory_cgrp_subsys) && + cgroup_subsys_on_dfl(io_cgrp_subsys) && + bdi_cap_account_dirty(bdi) && + (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) && + (inode->i_sb->s_iflags & SB_I_CGROUPWB); +} + +/** + * wb_find_current - find wb for %current on a bdi + * @bdi: bdi of interest + * + * Find the wb of @bdi which matches both the memcg and blkcg of %current. + * Must be called under rcu_read_lock() which protects the returend wb. + * NULL if not found. + */ +static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) +{ + struct cgroup_subsys_state *memcg_css; + struct bdi_writeback *wb; + + memcg_css = task_css(current, memory_cgrp_id); + if (!memcg_css->parent) + return &bdi->wb; + + wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); + + /* + * %current's blkcg equals the effective blkcg of its memcg. No + * need to use the relatively expensive cgroup_get_e_css(). + */ + if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id))) + return wb; + return NULL; +} + +/** + * wb_get_create_current - get or create wb for %current on a bdi + * @bdi: bdi of interest + * @gfp: allocation mask + * + * Equivalent to wb_get_create() on %current's memcg. This function is + * called from a relatively hot path and optimizes the common cases using + * wb_find_current(). + */ +static inline struct bdi_writeback * +wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) +{ + struct bdi_writeback *wb; + + rcu_read_lock(); + wb = wb_find_current(bdi); + if (wb && unlikely(!wb_tryget(wb))) + wb = NULL; + rcu_read_unlock(); + + if (unlikely(!wb)) { + struct cgroup_subsys_state *memcg_css; + + memcg_css = task_get_css(current, memory_cgrp_id); + wb = wb_get_create(bdi, memcg_css, gfp); + css_put(memcg_css); + } + return wb; +} + +/** + * inode_to_wb_is_valid - test whether an inode has a wb associated + * @inode: inode of interest + * + * Returns %true if @inode has a wb associated. May be called without any + * locking. + */ +static inline bool inode_to_wb_is_valid(struct inode *inode) +{ + return inode->i_wb; +} + +/** + * inode_to_wb - determine the wb of an inode + * @inode: inode of interest + * + * Returns the wb @inode is currently associated with. The caller must be + * holding either @inode->i_lock, the i_pages lock, or the + * associated wb's list_lock. + */ +static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) +{ +#ifdef CONFIG_LOCKDEP + WARN_ON_ONCE(debug_locks && + (!lockdep_is_held(&inode->i_lock) && + !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) && + !lockdep_is_held(&inode->i_wb->list_lock))); +#endif + return inode->i_wb; +} + +/** + * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction + * @inode: target inode + * @cookie: output param, to be passed to the end function + * + * The caller wants to access the wb associated with @inode but isn't + * holding inode->i_lock, the i_pages lock or wb->list_lock. This + * function determines the wb associated with @inode and ensures that the + * association doesn't change until the transaction is finished with + * unlocked_inode_to_wb_end(). + * + * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and + * can't sleep during the transaction. IRQs may or may not be disabled on + * return. + */ +static inline struct bdi_writeback * +unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) +{ + rcu_read_lock(); + + /* + * Paired with store_release in inode_switch_wb_work_fn() and + * ensures that we see the new wb if we see cleared I_WB_SWITCH. + */ + cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; + + if (unlikely(cookie->locked)) + xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags); + + /* + * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages + * lock. inode_to_wb() will bark. Deref directly. + */ + return inode->i_wb; +} + +/** + * unlocked_inode_to_wb_end - end inode wb access transaction + * @inode: target inode + * @cookie: @cookie from unlocked_inode_to_wb_begin() + */ +static inline void unlocked_inode_to_wb_end(struct inode *inode, + struct wb_lock_cookie *cookie) +{ + if (unlikely(cookie->locked)) + xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags); + + rcu_read_unlock(); +} + +#else /* CONFIG_CGROUP_WRITEBACK */ + +static inline bool inode_cgwb_enabled(struct inode *inode) +{ + return false; +} + +static inline struct bdi_writeback_congested * +wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp) +{ + refcount_inc(&bdi->wb_congested->refcnt); + return bdi->wb_congested; +} + +static inline void wb_congested_put(struct bdi_writeback_congested *congested) +{ + if (refcount_dec_and_test(&congested->refcnt)) + kfree(congested); +} + +static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) +{ + return &bdi->wb; +} + +static inline struct bdi_writeback * +wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) +{ + return &bdi->wb; +} + +static inline bool inode_to_wb_is_valid(struct inode *inode) +{ + return true; +} + +static inline struct bdi_writeback *inode_to_wb(struct inode *inode) +{ + return &inode_to_bdi(inode)->wb; +} + +static inline struct bdi_writeback * +unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) +{ + return inode_to_wb(inode); +} + +static inline void unlocked_inode_to_wb_end(struct inode *inode, + struct wb_lock_cookie *cookie) +{ +} + +static inline void wb_memcg_offline(struct mem_cgroup *memcg) +{ +} + +static inline void wb_blkcg_offline(struct blkcg *blkcg) +{ +} + +static inline int inode_congested(struct inode *inode, int cong_bits) +{ + return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); +} + +#endif /* CONFIG_CGROUP_WRITEBACK */ + +static inline int inode_read_congested(struct inode *inode) +{ + return inode_congested(inode, 1 << WB_sync_congested); +} + +static inline int inode_write_congested(struct inode *inode) +{ + return inode_congested(inode, 1 << WB_async_congested); +} + +static inline int inode_rw_congested(struct inode *inode) +{ + return inode_congested(inode, (1 << WB_sync_congested) | + (1 << WB_async_congested)); +} + +static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits) +{ + return wb_congested(&bdi->wb, cong_bits); +} + +static inline int bdi_read_congested(struct backing_dev_info *bdi) +{ + return bdi_congested(bdi, 1 << WB_sync_congested); +} + +static inline int bdi_write_congested(struct backing_dev_info *bdi) +{ + return bdi_congested(bdi, 1 << WB_async_congested); +} + +static inline int bdi_rw_congested(struct backing_dev_info *bdi) +{ + return bdi_congested(bdi, (1 << WB_sync_congested) | + (1 << WB_async_congested)); +} + +const char *bdi_dev_name(struct backing_dev_info *bdi); + +#endif /* _LINUX_BACKING_DEV_H */ diff --git a/include/linux/backlight.h b/include/linux/backlight.h new file mode 100644 index 000000000..0b5897446 --- /dev/null +++ b/include/linux/backlight.h @@ -0,0 +1,229 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Backlight Lowlevel Control Abstraction + * + * Copyright (C) 2003,2004 Hewlett-Packard Company + * + */ + +#ifndef _LINUX_BACKLIGHT_H +#define _LINUX_BACKLIGHT_H + +#include +#include +#include +#include + +/* Notes on locking: + * + * backlight_device->ops_lock is an internal backlight lock protecting the + * ops pointer and no code outside the core should need to touch it. + * + * Access to update_status() is serialised by the update_lock mutex since + * most drivers seem to need this and historically get it wrong. + * + * Most drivers don't need locking on their get_brightness() method. + * If yours does, you need to implement it in the driver. You can use the + * update_lock mutex if appropriate. + * + * Any other use of the locks below is probably wrong. + */ + +enum backlight_update_reason { + BACKLIGHT_UPDATE_HOTKEY, + BACKLIGHT_UPDATE_SYSFS, +}; + +enum backlight_type { + BACKLIGHT_RAW = 1, + BACKLIGHT_PLATFORM, + BACKLIGHT_FIRMWARE, + BACKLIGHT_TYPE_MAX, +}; + +enum backlight_notification { + BACKLIGHT_REGISTERED, + BACKLIGHT_UNREGISTERED, +}; + +struct backlight_device; +struct fb_info; + +struct backlight_ops { + unsigned int options; + +#define BL_CORE_SUSPENDRESUME (1 << 0) + + /* Notify the backlight driver some property has changed */ + int (*update_status)(struct backlight_device *); + /* Return the current backlight brightness (accounting for power, + fb_blank etc.) */ + int (*get_brightness)(struct backlight_device *); + /* Check if given framebuffer device is the one bound to this backlight; + return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */ + int (*check_fb)(struct backlight_device *, struct fb_info *); +}; + +/* This structure defines all the properties of a backlight */ +struct backlight_properties { + /* Current User requested brightness (0 - max_brightness) */ + int brightness; + /* Maximal value for brightness (read-only) */ + int max_brightness; + /* Current FB Power mode (0: full on, 1..3: power saving + modes; 4: full off), see FB_BLANK_XXX */ + int power; + /* FB Blanking active? (values as for power) */ + /* Due to be removed, please use (state & BL_CORE_FBBLANK) */ + int fb_blank; + /* Backlight type */ + enum backlight_type type; + /* Flags used to signal drivers of state changes */ + unsigned int state; + +#define BL_CORE_SUSPENDED (1 << 0) /* backlight is suspended */ +#define BL_CORE_FBBLANK (1 << 1) /* backlight is under an fb blank event */ + +}; + +struct backlight_device { + /* Backlight properties */ + struct backlight_properties props; + + /* Serialise access to update_status method */ + struct mutex update_lock; + + /* This protects the 'ops' field. If 'ops' is NULL, the driver that + registered this device has been unloaded, and if class_get_devdata() + points to something in the body of that driver, it is also invalid. */ + struct mutex ops_lock; + const struct backlight_ops *ops; + + /* The framebuffer notifier block */ + struct notifier_block fb_notif; + + /* list entry of all registered backlight devices */ + struct list_head entry; + + struct device dev; + + /* Multiple framebuffers may share one backlight device */ + bool fb_bl_on[FB_MAX]; + + int use_count; +}; + +static inline int backlight_update_status(struct backlight_device *bd) +{ + int ret = -ENOENT; + + mutex_lock(&bd->update_lock); + if (bd->ops && bd->ops->update_status) + ret = bd->ops->update_status(bd); + mutex_unlock(&bd->update_lock); + + return ret; +} + +/** + * backlight_enable - Enable backlight + * @bd: the backlight device to enable + */ +static inline int backlight_enable(struct backlight_device *bd) +{ + if (!bd) + return 0; + + bd->props.power = FB_BLANK_UNBLANK; + bd->props.fb_blank = FB_BLANK_UNBLANK; + bd->props.state &= ~BL_CORE_FBBLANK; + + return backlight_update_status(bd); +} + +/** + * backlight_disable - Disable backlight + * @bd: the backlight device to disable + */ +static inline int backlight_disable(struct backlight_device *bd) +{ + if (!bd) + return 0; + + bd->props.power = FB_BLANK_POWERDOWN; + bd->props.fb_blank = FB_BLANK_POWERDOWN; + bd->props.state |= BL_CORE_FBBLANK; + + return backlight_update_status(bd); +} + +/** + * backlight_put - Drop backlight reference + * @bd: the backlight device to put + */ +static inline void backlight_put(struct backlight_device *bd) +{ + if (bd) + put_device(&bd->dev); +} + +extern struct backlight_device *backlight_device_register(const char *name, + struct device *dev, void *devdata, const struct backlight_ops *ops, + const struct backlight_properties *props); +extern struct backlight_device *devm_backlight_device_register( + struct device *dev, const char *name, struct device *parent, + void *devdata, const struct backlight_ops *ops, + const struct backlight_properties *props); +extern void backlight_device_unregister(struct backlight_device *bd); +extern void devm_backlight_device_unregister(struct device *dev, + struct backlight_device *bd); +extern void backlight_force_update(struct backlight_device *bd, + enum backlight_update_reason reason); +extern int backlight_register_notifier(struct notifier_block *nb); +extern int backlight_unregister_notifier(struct notifier_block *nb); +extern struct backlight_device *backlight_device_get_by_type(enum backlight_type type); +extern int backlight_device_set_brightness(struct backlight_device *bd, unsigned long brightness); + +#define to_backlight_device(obj) container_of(obj, struct backlight_device, dev) + +static inline void * bl_get_data(struct backlight_device *bl_dev) +{ + return dev_get_drvdata(&bl_dev->dev); +} + +struct generic_bl_info { + const char *name; + int max_intensity; + int default_intensity; + int limit_mask; + void (*set_bl_intensity)(int intensity); + void (*kick_battery)(void); +}; + +#ifdef CONFIG_OF +struct backlight_device *of_find_backlight_by_node(struct device_node *node); +#else +static inline struct backlight_device * +of_find_backlight_by_node(struct device_node *node) +{ + return NULL; +} +#endif + +#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) +struct backlight_device *of_find_backlight(struct device *dev); +struct backlight_device *devm_of_find_backlight(struct device *dev); +#else +static inline struct backlight_device *of_find_backlight(struct device *dev) +{ + return NULL; +} + +static inline struct backlight_device * +devm_of_find_backlight(struct device *dev) +{ + return NULL; +} +#endif + +#endif diff --git a/include/linux/badblocks.h b/include/linux/badblocks.h new file mode 100644 index 000000000..2426276b9 --- /dev/null +++ b/include/linux/badblocks.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BADBLOCKS_H +#define _LINUX_BADBLOCKS_H + +#include +#include +#include +#include +#include + +#define BB_LEN_MASK (0x00000000000001FFULL) +#define BB_OFFSET_MASK (0x7FFFFFFFFFFFFE00ULL) +#define BB_ACK_MASK (0x8000000000000000ULL) +#define BB_MAX_LEN 512 +#define BB_OFFSET(x) (((x) & BB_OFFSET_MASK) >> 9) +#define BB_LEN(x) (((x) & BB_LEN_MASK) + 1) +#define BB_ACK(x) (!!((x) & BB_ACK_MASK)) +#define BB_MAKE(a, l, ack) (((a)<<9) | ((l)-1) | ((u64)(!!(ack)) << 63)) + +/* Bad block numbers are stored sorted in a single page. + * 64bits is used for each block or extent. + * 54 bits are sector number, 9 bits are extent size, + * 1 bit is an 'acknowledged' flag. + */ +#define MAX_BADBLOCKS (PAGE_SIZE/8) + +struct badblocks { + struct device *dev; /* set by devm_init_badblocks */ + int count; /* count of bad blocks */ + int unacked_exist; /* there probably are unacknowledged + * bad blocks. This is only cleared + * when a read discovers none + */ + int shift; /* shift from sectors to block size + * a -ve shift means badblocks are + * disabled.*/ + u64 *page; /* badblock list */ + int changed; + seqlock_t lock; + sector_t sector; + sector_t size; /* in sectors */ +}; + +int badblocks_check(struct badblocks *bb, sector_t s, int sectors, + sector_t *first_bad, int *bad_sectors); +int badblocks_set(struct badblocks *bb, sector_t s, int sectors, + int acknowledged); +int badblocks_clear(struct badblocks *bb, sector_t s, int sectors); +void ack_all_badblocks(struct badblocks *bb); +ssize_t badblocks_show(struct badblocks *bb, char *page, int unack); +ssize_t badblocks_store(struct badblocks *bb, const char *page, size_t len, + int unack); +int badblocks_init(struct badblocks *bb, int enable); +void badblocks_exit(struct badblocks *bb); +struct device; +int devm_init_badblocks(struct device *dev, struct badblocks *bb); +static inline void devm_exit_badblocks(struct device *dev, struct badblocks *bb) +{ + if (bb->dev != dev) { + dev_WARN_ONCE(dev, 1, "%s: badblocks instance not associated\n", + __func__); + return; + } + badblocks_exit(bb); +} +#endif diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h new file mode 100644 index 000000000..53051f3d8 --- /dev/null +++ b/include/linux/balloon_compaction.h @@ -0,0 +1,229 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/balloon_compaction.h + * + * Common interface definitions for making balloon pages movable by compaction. + * + * Despite being perfectly possible to perform ballooned pages migration, they + * make a special corner case to compaction scans because balloon pages are not + * enlisted at any LRU list like the other pages we do compact / migrate. + * + * As the page isolation scanning step a compaction thread does is a lockless + * procedure (from a page standpoint), it might bring some racy situations while + * performing balloon page compaction. In order to sort out these racy scenarios + * and safely perform balloon's page compaction and migration we must, always, + * ensure following these three simple rules: + * + * i. when updating a balloon's page ->mapping element, strictly do it under + * the following lock order, independently of the far superior + * locking scheme (lru_lock, balloon_lock): + * +-page_lock(page); + * +--spin_lock_irq(&b_dev_info->pages_lock); + * ... page->mapping updates here ... + * + * ii. before isolating or dequeueing a balloon page from the balloon device + * pages list, the page reference counter must be raised by one and the + * extra refcount must be dropped when the page is enqueued back into + * the balloon device page list, thus a balloon page keeps its reference + * counter raised only while it is under our special handling; + * + * iii. after the lockless scan step have selected a potential balloon page for + * isolation, re-test the PageBalloon mark and the PagePrivate flag + * under the proper page lock, to ensure isolating a valid balloon page + * (not yet isolated, nor under release procedure) + * + * iv. isolation or dequeueing procedure must clear PagePrivate flag under + * page lock together with removing page from balloon device page list. + * + * The functions provided by this interface are placed to help on coping with + * the aforementioned balloon page corner case, as well as to ensure the simple + * set of exposed rules are satisfied while we are dealing with balloon pages + * compaction / migration. + * + * Copyright (C) 2012, Red Hat, Inc. Rafael Aquini + */ +#ifndef _LINUX_BALLOON_COMPACTION_H +#define _LINUX_BALLOON_COMPACTION_H +#include +#include +#include +#include +#include +#include +#include + +/* + * Balloon device information descriptor. + * This struct is used to allow the common balloon compaction interface + * procedures to find the proper balloon device holding memory pages they'll + * have to cope for page compaction / migration, as well as it serves the + * balloon driver as a page book-keeper for its registered balloon devices. + */ +struct balloon_dev_info { + unsigned long isolated_pages; /* # of isolated pages for migration */ + spinlock_t pages_lock; /* Protection to pages list */ + struct list_head pages; /* Pages enqueued & handled to Host */ + int (*migratepage)(struct balloon_dev_info *, struct page *newpage, + struct page *page, enum migrate_mode mode); + struct inode *inode; +}; + +extern struct page *balloon_page_alloc(void); +extern void balloon_page_enqueue(struct balloon_dev_info *b_dev_info, + struct page *page); +extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info); + +static inline void balloon_devinfo_init(struct balloon_dev_info *balloon) +{ + balloon->isolated_pages = 0; + spin_lock_init(&balloon->pages_lock); + INIT_LIST_HEAD(&balloon->pages); + balloon->migratepage = NULL; + balloon->inode = NULL; +} + +#ifdef CONFIG_BALLOON_COMPACTION +extern const struct address_space_operations balloon_aops; +extern bool balloon_page_isolate(struct page *page, + isolate_mode_t mode); +extern void balloon_page_putback(struct page *page); +extern int balloon_page_migrate(struct address_space *mapping, + struct page *newpage, + struct page *page, enum migrate_mode mode); + +/* + * balloon_page_insert - insert a page into the balloon's page list and make + * the page->private assignment accordingly. + * @balloon : pointer to balloon device + * @page : page to be assigned as a 'balloon page' + * + * Caller must ensure the page is locked and the spin_lock protecting balloon + * pages list is held before inserting a page into the balloon device. + */ +static inline void balloon_page_insert(struct balloon_dev_info *balloon, + struct page *page) +{ + __SetPageBalloon(page); + __SetPageMovable(page, balloon->inode->i_mapping); + set_page_private(page, (unsigned long)balloon); + list_add(&page->lru, &balloon->pages); +} + +/* + * balloon_page_delete - delete a page from balloon's page list and clear + * the page->private assignement accordingly. + * @page : page to be released from balloon's page list + * + * Caller must ensure the page is locked and the spin_lock protecting balloon + * pages list is held before deleting a page from the balloon device. + */ +static inline void balloon_page_delete(struct page *page) +{ + __ClearPageBalloon(page); + __ClearPageMovable(page); + set_page_private(page, 0); + /* + * No touch page.lru field once @page has been isolated + * because VM is using the field. + */ + if (!PageIsolated(page)) + list_del(&page->lru); +} + +/* + * balloon_page_device - get the b_dev_info descriptor for the balloon device + * that enqueues the given page. + */ +static inline struct balloon_dev_info *balloon_page_device(struct page *page) +{ + return (struct balloon_dev_info *)page_private(page); +} + +static inline gfp_t balloon_mapping_gfp_mask(void) +{ + return GFP_HIGHUSER_MOVABLE; +} + +#else /* !CONFIG_BALLOON_COMPACTION */ + +static inline void balloon_page_insert(struct balloon_dev_info *balloon, + struct page *page) +{ + __SetPageBalloon(page); + list_add(&page->lru, &balloon->pages); +} + +static inline void balloon_page_delete(struct page *page) +{ + __ClearPageBalloon(page); + list_del(&page->lru); +} + +static inline bool __is_movable_balloon_page(struct page *page) +{ + return false; +} + +static inline bool balloon_page_movable(struct page *page) +{ + return false; +} + +static inline bool isolated_balloon_page(struct page *page) +{ + return false; +} + +static inline bool balloon_page_isolate(struct page *page) +{ + return false; +} + +static inline void balloon_page_putback(struct page *page) +{ + return; +} + +static inline int balloon_page_migrate(struct page *newpage, + struct page *page, enum migrate_mode mode) +{ + return 0; +} + +static inline gfp_t balloon_mapping_gfp_mask(void) +{ + return GFP_HIGHUSER; +} + +#endif /* CONFIG_BALLOON_COMPACTION */ + +/* + * balloon_page_push - insert a page into a page list. + * @head : pointer to list + * @page : page to be added + * + * Caller must ensure the page is private and protect the list. + */ +static inline void balloon_page_push(struct list_head *pages, struct page *page) +{ + list_add(&page->lru, pages); +} + +/* + * balloon_page_pop - remove a page from a page list. + * @head : pointer to list + * @page : page to be added + * + * Caller must ensure the page is private and protect the list. + */ +static inline struct page *balloon_page_pop(struct list_head *pages) +{ + struct page *page = list_first_entry_or_null(pages, struct page, lru); + + if (!page) + return NULL; + + list_del(&page->lru); + return page; +} +#endif /* _LINUX_BALLOON_COMPACTION_H */ diff --git a/include/linux/bcd.h b/include/linux/bcd.h new file mode 100644 index 000000000..118bea36d --- /dev/null +++ b/include/linux/bcd.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _BCD_H +#define _BCD_H + +#include + +#define bcd2bin(x) \ + (__builtin_constant_p((u8 )(x)) ? \ + const_bcd2bin(x) : \ + _bcd2bin(x)) + +#define bin2bcd(x) \ + (__builtin_constant_p((u8 )(x)) ? \ + const_bin2bcd(x) : \ + _bin2bcd(x)) + +#define const_bcd2bin(x) (((x) & 0x0f) + ((x) >> 4) * 10) +#define const_bin2bcd(x) ((((x) / 10) << 4) + (x) % 10) + +unsigned _bcd2bin(unsigned char val) __attribute_const__; +unsigned char _bin2bcd(unsigned val) __attribute_const__; + +#endif /* _BCD_H */ diff --git a/include/linux/bch.h b/include/linux/bch.h new file mode 100644 index 000000000..295b4ef15 --- /dev/null +++ b/include/linux/bch.h @@ -0,0 +1,79 @@ +/* + * Generic binary BCH encoding/decoding library + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 51 + * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Copyright © 2011 Parrot S.A. + * + * Author: Ivan Djelic + * + * Description: + * + * This library provides runtime configurable encoding/decoding of binary + * Bose-Chaudhuri-Hocquenghem (BCH) codes. +*/ +#ifndef _BCH_H +#define _BCH_H + +#include + +/** + * struct bch_control - BCH control structure + * @m: Galois field order + * @n: maximum codeword size in bits (= 2^m-1) + * @t: error correction capability in bits + * @ecc_bits: ecc exact size in bits, i.e. generator polynomial degree (<=m*t) + * @ecc_bytes: ecc max size (m*t bits) in bytes + * @a_pow_tab: Galois field GF(2^m) exponentiation lookup table + * @a_log_tab: Galois field GF(2^m) log lookup table + * @mod8_tab: remainder generator polynomial lookup tables + * @ecc_buf: ecc parity words buffer + * @ecc_buf2: ecc parity words buffer + * @xi_tab: GF(2^m) base for solving degree 2 polynomial roots + * @syn: syndrome buffer + * @cache: log-based polynomial representation buffer + * @elp: error locator polynomial + * @poly_2t: temporary polynomials of degree 2t + */ +struct bch_control { + unsigned int m; + unsigned int n; + unsigned int t; + unsigned int ecc_bits; + unsigned int ecc_bytes; +/* private: */ + uint16_t *a_pow_tab; + uint16_t *a_log_tab; + uint32_t *mod8_tab; + uint32_t *ecc_buf; + uint32_t *ecc_buf2; + unsigned int *xi_tab; + unsigned int *syn; + int *cache; + struct gf_poly *elp; + struct gf_poly *poly_2t[4]; +}; + +struct bch_control *init_bch(int m, int t, unsigned int prim_poly); + +void free_bch(struct bch_control *bch); + +void encode_bch(struct bch_control *bch, const uint8_t *data, + unsigned int len, uint8_t *ecc); + +int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len, + const uint8_t *recv_ecc, const uint8_t *calc_ecc, + const unsigned int *syn, unsigned int *errloc); + +#endif /* _BCH_H */ diff --git a/include/linux/bcm47xx_nvram.h b/include/linux/bcm47xx_nvram.h new file mode 100644 index 000000000..a414a2b53 --- /dev/null +++ b/include/linux/bcm47xx_nvram.h @@ -0,0 +1,50 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __BCM47XX_NVRAM_H +#define __BCM47XX_NVRAM_H + +#include +#include +#include +#include + +#ifdef CONFIG_BCM47XX_NVRAM +int bcm47xx_nvram_init_from_mem(u32 base, u32 lim); +int bcm47xx_nvram_getenv(const char *name, char *val, size_t val_len); +int bcm47xx_nvram_gpio_pin(const char *name); +char *bcm47xx_nvram_get_contents(size_t *val_len); +static inline void bcm47xx_nvram_release_contents(char *nvram) +{ + vfree(nvram); +}; +#else +static inline int bcm47xx_nvram_init_from_mem(u32 base, u32 lim) +{ + return -ENOTSUPP; +}; +static inline int bcm47xx_nvram_getenv(const char *name, char *val, + size_t val_len) +{ + return -ENOTSUPP; +}; +static inline int bcm47xx_nvram_gpio_pin(const char *name) +{ + return -ENOTSUPP; +}; + +static inline char *bcm47xx_nvram_get_contents(size_t *val_len) +{ + return NULL; +}; + +static inline void bcm47xx_nvram_release_contents(char *nvram) +{ +}; +#endif + +#endif /* __BCM47XX_NVRAM_H */ diff --git a/include/linux/bcm47xx_sprom.h b/include/linux/bcm47xx_sprom.h new file mode 100644 index 000000000..c06b47c84 --- /dev/null +++ b/include/linux/bcm47xx_sprom.h @@ -0,0 +1,24 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __BCM47XX_SPROM_H +#define __BCM47XX_SPROM_H + +#include +#include +#include + +#ifdef CONFIG_BCM47XX_SPROM +int bcm47xx_sprom_register_fallbacks(void); +#else +static inline int bcm47xx_sprom_register_fallbacks(void) +{ + return -ENOTSUPP; +}; +#endif + +#endif /* __BCM47XX_SPROM_H */ diff --git a/include/linux/bcm47xx_wdt.h b/include/linux/bcm47xx_wdt.h new file mode 100644 index 000000000..fc9dcdb4b --- /dev/null +++ b/include/linux/bcm47xx_wdt.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_BCM47XX_WDT_H_ +#define LINUX_BCM47XX_WDT_H_ + +#include +#include +#include + + +struct bcm47xx_wdt { + u32 (*timer_set)(struct bcm47xx_wdt *, u32); + u32 (*timer_set_ms)(struct bcm47xx_wdt *, u32); + u32 max_timer_ms; + + void *driver_data; + + struct watchdog_device wdd; + + struct timer_list soft_timer; + atomic_t soft_ticks; +}; + +static inline void *bcm47xx_wdt_get_drvdata(struct bcm47xx_wdt *wdt) +{ + return wdt->driver_data; +} +#endif /* LINUX_BCM47XX_WDT_H_ */ diff --git a/include/linux/bcm963xx_nvram.h b/include/linux/bcm963xx_nvram.h new file mode 100644 index 000000000..c8c7f0115 --- /dev/null +++ b/include/linux/bcm963xx_nvram.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_BCM963XX_NVRAM_H__ +#define __LINUX_BCM963XX_NVRAM_H__ + +#include +#include +#include +#include + +/* + * Broadcom BCM963xx SoC board nvram data structure. + * + * The nvram structure varies in size depending on the SoC board version. Use + * the appropriate minimum BCM963XX_NVRAM_*_SIZE define for the information + * you need instead of sizeof(struct bcm963xx_nvram) as this may change. + */ + +#define BCM963XX_NVRAM_V4_SIZE 300 +#define BCM963XX_NVRAM_V5_SIZE (1 * SZ_1K) + +#define BCM963XX_DEFAULT_PSI_SIZE 64 + +enum bcm963xx_nvram_nand_part { + BCM963XX_NVRAM_NAND_PART_BOOT = 0, + BCM963XX_NVRAM_NAND_PART_ROOTFS_1, + BCM963XX_NVRAM_NAND_PART_ROOTFS_2, + BCM963XX_NVRAM_NAND_PART_DATA, + BCM963XX_NVRAM_NAND_PART_BBT, + + __BCM963XX_NVRAM_NAND_NR_PARTS +}; + +struct bcm963xx_nvram { + u32 version; + char bootline[256]; + char name[16]; + u32 main_tp_number; + u32 psi_size; + u32 mac_addr_count; + u8 mac_addr_base[ETH_ALEN]; + u8 __reserved1[2]; + u32 checksum_v4; + + u8 __reserved2[292]; + u32 nand_part_offset[__BCM963XX_NVRAM_NAND_NR_PARTS]; + u32 nand_part_size[__BCM963XX_NVRAM_NAND_NR_PARTS]; + u8 __reserved3[388]; + u32 checksum_v5; +}; + +#define BCM963XX_NVRAM_NAND_PART_OFFSET(nvram, part) \ + bcm963xx_nvram_nand_part_offset(nvram, BCM963XX_NVRAM_NAND_PART_ ##part) + +static inline u64 __pure bcm963xx_nvram_nand_part_offset( + const struct bcm963xx_nvram *nvram, + enum bcm963xx_nvram_nand_part part) +{ + return nvram->nand_part_offset[part] * SZ_1K; +} + +#define BCM963XX_NVRAM_NAND_PART_SIZE(nvram, part) \ + bcm963xx_nvram_nand_part_size(nvram, BCM963XX_NVRAM_NAND_PART_ ##part) + +static inline u64 __pure bcm963xx_nvram_nand_part_size( + const struct bcm963xx_nvram *nvram, + enum bcm963xx_nvram_nand_part part) +{ + return nvram->nand_part_size[part] * SZ_1K; +} + +/* + * bcm963xx_nvram_checksum - Verify nvram checksum + * + * @nvram: pointer to full size nvram data structure + * @expected_out: optional pointer to store expected checksum value + * @actual_out: optional pointer to store actual checksum value + * + * Return: 0 if the checksum is valid, otherwise -EINVAL + */ +static int __maybe_unused bcm963xx_nvram_checksum( + const struct bcm963xx_nvram *nvram, + u32 *expected_out, u32 *actual_out) +{ + u32 expected, actual; + size_t len; + + if (nvram->version <= 4) { + expected = nvram->checksum_v4; + len = BCM963XX_NVRAM_V4_SIZE - sizeof(u32); + } else { + expected = nvram->checksum_v5; + len = BCM963XX_NVRAM_V5_SIZE - sizeof(u32); + } + + /* + * Calculate the CRC32 value for the nvram with a checksum value + * of 0 without modifying or copying the nvram by combining: + * - The CRC32 of the nvram without the checksum value + * - The CRC32 of a zero checksum value (which is also 0) + */ + actual = crc32_le_combine( + crc32_le(~0, (u8 *)nvram, len), 0, sizeof(u32)); + + if (expected_out) + *expected_out = expected; + + if (actual_out) + *actual_out = actual; + + return expected == actual ? 0 : -EINVAL; +}; + +#endif /* __LINUX_BCM963XX_NVRAM_H__ */ diff --git a/include/linux/bcm963xx_tag.h b/include/linux/bcm963xx_tag.h new file mode 100644 index 000000000..b87945cb6 --- /dev/null +++ b/include/linux/bcm963xx_tag.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_BCM963XX_TAG_H__ +#define __LINUX_BCM963XX_TAG_H__ + +#include + +#define TAGVER_LEN 4 /* Length of Tag Version */ +#define TAGLAYOUT_LEN 4 /* Length of FlashLayoutVer */ +#define SIG1_LEN 20 /* Company Signature 1 Length */ +#define SIG2_LEN 14 /* Company Signature 2 Length */ +#define BOARDID_LEN 16 /* Length of BoardId */ +#define ENDIANFLAG_LEN 2 /* Endian Flag Length */ +#define CHIPID_LEN 6 /* Chip Id Length */ +#define IMAGE_LEN 10 /* Length of Length Field */ +#define ADDRESS_LEN 12 /* Length of Address field */ +#define IMAGE_SEQUENCE_LEN 4 /* Image sequence Length */ +#define RSASIG_LEN 20 /* Length of RSA Signature in tag */ +#define TAGINFO1_LEN 30 /* Length of vendor information field1 in tag */ +#define FLASHLAYOUTVER_LEN 4 /* Length of Flash Layout Version String tag */ +#define TAGINFO2_LEN 16 /* Length of vendor information field2 in tag */ +#define ALTTAGINFO_LEN 54 /* Alternate length for vendor information; Pirelli */ + +#define NUM_PIRELLI 2 +#define IMAGETAG_CRC_START 0xFFFFFFFF + +#define PIRELLI_BOARDS { \ + "AGPF-S0", \ + "DWV-S0", \ +} + +/* Extended flash address, needs to be subtracted + * from bcm_tag flash image offsets. + */ +#define BCM963XX_EXTENDED_SIZE 0xBFC00000 + +/* + * The broadcom firmware assumes the rootfs starts the image, + * therefore uses the rootfs start (flash_image_address) + * to determine where to flash the image. Since we have the kernel first + * we have to give it the kernel address, but the crc uses the length + * associated with this address (root_length), which is added to the kernel + * length (kernel_length) to determine the length of image to flash and thus + * needs to be rootfs + deadcode (jffs2 EOF marker) +*/ + +struct bcm_tag { + /* 0-3: Version of the image tag */ + char tag_version[TAGVER_LEN]; + /* 4-23: Company Line 1 */ + char sig_1[SIG1_LEN]; + /* 24-37: Company Line 2 */ + char sig_2[SIG2_LEN]; + /* 38-43: Chip this image is for */ + char chip_id[CHIPID_LEN]; + /* 44-59: Board name */ + char board_id[BOARDID_LEN]; + /* 60-61: Map endianness -- 1 BE 0 LE */ + char big_endian[ENDIANFLAG_LEN]; + /* 62-71: Total length of image */ + char total_length[IMAGE_LEN]; + /* 72-83: Address in memory of CFE */ + char cfe__address[ADDRESS_LEN]; + /* 84-93: Size of CFE */ + char cfe_length[IMAGE_LEN]; + /* 94-105: Address in memory of image start + * (kernel for OpenWRT, rootfs for stock firmware) + */ + char flash_image_start[ADDRESS_LEN]; + /* 106-115: Size of rootfs */ + char root_length[IMAGE_LEN]; + /* 116-127: Address in memory of kernel */ + char kernel_address[ADDRESS_LEN]; + /* 128-137: Size of kernel */ + char kernel_length[IMAGE_LEN]; + /* 138-141: Image sequence number + * (to be incremented when flashed with a new image) + */ + char image_sequence[IMAGE_SEQUENCE_LEN]; + /* 142-161: RSA Signature (not used; some vendors may use this) */ + char rsa_signature[RSASIG_LEN]; + /* 162-191: Compilation and related information (not used in OpenWrt) */ + char information1[TAGINFO1_LEN]; + /* 192-195: Version flash layout */ + char flash_layout_ver[FLASHLAYOUTVER_LEN]; + /* 196-199: kernel+rootfs CRC32 */ + __u32 fskernel_crc; + /* 200-215: Unused except on Alice Gate where is is information */ + char information2[TAGINFO2_LEN]; + /* 216-219: CRC32 of image less imagetag (kernel for Alice Gate) */ + __u32 image_crc; + /* 220-223: CRC32 of rootfs partition */ + __u32 rootfs_crc; + /* 224-227: CRC32 of kernel partition */ + __u32 kernel_crc; + /* 228-235: Unused at present */ + char reserved1[8]; + /* 236-239: CRC32 of header excluding last 20 bytes */ + __u32 header_crc; + /* 240-255: Unused at present */ + char reserved2[16]; +}; + +#endif /* __LINUX_BCM63XX_TAG_H__ */ diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h new file mode 100644 index 000000000..ef61f3607 --- /dev/null +++ b/include/linux/bcma/bcma.h @@ -0,0 +1,494 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_BCMA_H_ +#define LINUX_BCMA_H_ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include /* SPROM sharing */ + +#include + +struct bcma_device; +struct bcma_bus; + +enum bcma_hosttype { + BCMA_HOSTTYPE_PCI, + BCMA_HOSTTYPE_SDIO, + BCMA_HOSTTYPE_SOC, +}; + +struct bcma_chipinfo { + u16 id; + u8 rev; + u8 pkg; +}; + +struct bcma_boardinfo { + u16 vendor; + u16 type; +}; + +enum bcma_clkmode { + BCMA_CLKMODE_FAST, + BCMA_CLKMODE_DYNAMIC, +}; + +struct bcma_host_ops { + u8 (*read8)(struct bcma_device *core, u16 offset); + u16 (*read16)(struct bcma_device *core, u16 offset); + u32 (*read32)(struct bcma_device *core, u16 offset); + void (*write8)(struct bcma_device *core, u16 offset, u8 value); + void (*write16)(struct bcma_device *core, u16 offset, u16 value); + void (*write32)(struct bcma_device *core, u16 offset, u32 value); +#ifdef CONFIG_BCMA_BLOCKIO + void (*block_read)(struct bcma_device *core, void *buffer, + size_t count, u16 offset, u8 reg_width); + void (*block_write)(struct bcma_device *core, const void *buffer, + size_t count, u16 offset, u8 reg_width); +#endif + /* Agent ops */ + u32 (*aread32)(struct bcma_device *core, u16 offset); + void (*awrite32)(struct bcma_device *core, u16 offset, u32 value); +}; + +/* Core manufacturers */ +#define BCMA_MANUF_ARM 0x43B +#define BCMA_MANUF_MIPS 0x4A7 +#define BCMA_MANUF_BCM 0x4BF + +/* Core class values. */ +#define BCMA_CL_SIM 0x0 +#define BCMA_CL_EROM 0x1 +#define BCMA_CL_CORESIGHT 0x9 +#define BCMA_CL_VERIF 0xB +#define BCMA_CL_OPTIMO 0xD +#define BCMA_CL_GEN 0xE +#define BCMA_CL_PRIMECELL 0xF + +/* Core-ID values. */ +#define BCMA_CORE_OOB_ROUTER 0x367 /* Out of band */ +#define BCMA_CORE_4706_CHIPCOMMON 0x500 +#define BCMA_CORE_NS_PCIEG2 0x501 +#define BCMA_CORE_NS_DMA 0x502 +#define BCMA_CORE_NS_SDIO3 0x503 +#define BCMA_CORE_NS_USB20 0x504 +#define BCMA_CORE_NS_USB30 0x505 +#define BCMA_CORE_NS_A9JTAG 0x506 +#define BCMA_CORE_NS_DDR23 0x507 +#define BCMA_CORE_NS_ROM 0x508 +#define BCMA_CORE_NS_NAND 0x509 +#define BCMA_CORE_NS_QSPI 0x50A +#define BCMA_CORE_NS_CHIPCOMMON_B 0x50B +#define BCMA_CORE_4706_SOC_RAM 0x50E +#define BCMA_CORE_ARMCA9 0x510 +#define BCMA_CORE_4706_MAC_GBIT 0x52D +#define BCMA_CORE_AMEMC 0x52E /* DDR1/2 memory controller core */ +#define BCMA_CORE_ALTA 0x534 /* I2S core */ +#define BCMA_CORE_4706_MAC_GBIT_COMMON 0x5DC +#define BCMA_CORE_DDR23_PHY 0x5DD +#define BCMA_CORE_INVALID 0x700 +#define BCMA_CORE_CHIPCOMMON 0x800 +#define BCMA_CORE_ILINE20 0x801 +#define BCMA_CORE_SRAM 0x802 +#define BCMA_CORE_SDRAM 0x803 +#define BCMA_CORE_PCI 0x804 +#define BCMA_CORE_MIPS 0x805 +#define BCMA_CORE_ETHERNET 0x806 +#define BCMA_CORE_V90 0x807 +#define BCMA_CORE_USB11_HOSTDEV 0x808 +#define BCMA_CORE_ADSL 0x809 +#define BCMA_CORE_ILINE100 0x80A +#define BCMA_CORE_IPSEC 0x80B +#define BCMA_CORE_UTOPIA 0x80C +#define BCMA_CORE_PCMCIA 0x80D +#define BCMA_CORE_INTERNAL_MEM 0x80E +#define BCMA_CORE_MEMC_SDRAM 0x80F +#define BCMA_CORE_OFDM 0x810 +#define BCMA_CORE_EXTIF 0x811 +#define BCMA_CORE_80211 0x812 +#define BCMA_CORE_PHY_A 0x813 +#define BCMA_CORE_PHY_B 0x814 +#define BCMA_CORE_PHY_G 0x815 +#define BCMA_CORE_MIPS_3302 0x816 +#define BCMA_CORE_USB11_HOST 0x817 +#define BCMA_CORE_USB11_DEV 0x818 +#define BCMA_CORE_USB20_HOST 0x819 +#define BCMA_CORE_USB20_DEV 0x81A +#define BCMA_CORE_SDIO_HOST 0x81B +#define BCMA_CORE_ROBOSWITCH 0x81C +#define BCMA_CORE_PARA_ATA 0x81D +#define BCMA_CORE_SATA_XORDMA 0x81E +#define BCMA_CORE_ETHERNET_GBIT 0x81F +#define BCMA_CORE_PCIE 0x820 +#define BCMA_CORE_PHY_N 0x821 +#define BCMA_CORE_SRAM_CTL 0x822 +#define BCMA_CORE_MINI_MACPHY 0x823 +#define BCMA_CORE_ARM_1176 0x824 +#define BCMA_CORE_ARM_7TDMI 0x825 +#define BCMA_CORE_PHY_LP 0x826 +#define BCMA_CORE_PMU 0x827 +#define BCMA_CORE_PHY_SSN 0x828 +#define BCMA_CORE_SDIO_DEV 0x829 +#define BCMA_CORE_ARM_CM3 0x82A +#define BCMA_CORE_PHY_HT 0x82B +#define BCMA_CORE_MIPS_74K 0x82C +#define BCMA_CORE_MAC_GBIT 0x82D +#define BCMA_CORE_DDR12_MEM_CTL 0x82E +#define BCMA_CORE_PCIE_RC 0x82F /* PCIe Root Complex */ +#define BCMA_CORE_OCP_OCP_BRIDGE 0x830 +#define BCMA_CORE_SHARED_COMMON 0x831 +#define BCMA_CORE_OCP_AHB_BRIDGE 0x832 +#define BCMA_CORE_SPI_HOST 0x833 +#define BCMA_CORE_I2S 0x834 +#define BCMA_CORE_SDR_DDR1_MEM_CTL 0x835 /* SDR/DDR1 memory controller core */ +#define BCMA_CORE_SHIM 0x837 /* SHIM component in ubus/6362 */ +#define BCMA_CORE_PHY_AC 0x83B +#define BCMA_CORE_PCIE2 0x83C /* PCI Express Gen2 */ +#define BCMA_CORE_USB30_DEV 0x83D +#define BCMA_CORE_ARM_CR4 0x83E +#define BCMA_CORE_GCI 0x840 +#define BCMA_CORE_CMEM 0x846 /* CNDS DDR2/3 memory controller */ +#define BCMA_CORE_ARM_CA7 0x847 +#define BCMA_CORE_SYS_MEM 0x849 +#define BCMA_CORE_DEFAULT 0xFFF + +#define BCMA_MAX_NR_CORES 16 +#define BCMA_CORE_SIZE 0x1000 + +/* Chip IDs of PCIe devices */ +#define BCMA_CHIP_ID_BCM4313 0x4313 +#define BCMA_CHIP_ID_BCM43142 43142 +#define BCMA_CHIP_ID_BCM43131 43131 +#define BCMA_CHIP_ID_BCM43217 43217 +#define BCMA_CHIP_ID_BCM43222 43222 +#define BCMA_CHIP_ID_BCM43224 43224 +#define BCMA_PKG_ID_BCM43224_FAB_CSM 0x8 +#define BCMA_PKG_ID_BCM43224_FAB_SMIC 0xa +#define BCMA_CHIP_ID_BCM43225 43225 +#define BCMA_CHIP_ID_BCM43227 43227 +#define BCMA_CHIP_ID_BCM43228 43228 +#define BCMA_CHIP_ID_BCM43421 43421 +#define BCMA_CHIP_ID_BCM43428 43428 +#define BCMA_CHIP_ID_BCM43431 43431 +#define BCMA_CHIP_ID_BCM43460 43460 +#define BCMA_CHIP_ID_BCM4331 0x4331 +#define BCMA_CHIP_ID_BCM6362 0x6362 +#define BCMA_CHIP_ID_BCM4360 0x4360 +#define BCMA_CHIP_ID_BCM4352 0x4352 + +/* Chip IDs of SoCs */ +#define BCMA_CHIP_ID_BCM4706 0x5300 +#define BCMA_PKG_ID_BCM4706L 1 +#define BCMA_CHIP_ID_BCM4716 0x4716 +#define BCMA_PKG_ID_BCM4716 8 +#define BCMA_PKG_ID_BCM4717 9 +#define BCMA_PKG_ID_BCM4718 10 +#define BCMA_CHIP_ID_BCM47162 47162 +#define BCMA_CHIP_ID_BCM4748 0x4748 +#define BCMA_CHIP_ID_BCM4749 0x4749 +#define BCMA_CHIP_ID_BCM5356 0x5356 +#define BCMA_CHIP_ID_BCM5357 0x5357 +#define BCMA_PKG_ID_BCM5358 9 +#define BCMA_PKG_ID_BCM47186 10 +#define BCMA_PKG_ID_BCM5357 11 +#define BCMA_CHIP_ID_BCM53572 53572 +#define BCMA_PKG_ID_BCM47188 9 +#define BCMA_CHIP_ID_BCM4707 53010 +#define BCMA_PKG_ID_BCM4707 1 +#define BCMA_PKG_ID_BCM4708 2 +#define BCMA_PKG_ID_BCM4709 0 +#define BCMA_CHIP_ID_BCM47094 53030 +#define BCMA_CHIP_ID_BCM53018 53018 +#define BCMA_CHIP_ID_BCM53573 53573 +#define BCMA_PKG_ID_BCM53573 0 +#define BCMA_PKG_ID_BCM47189 1 + +/* Board types (on PCI usually equals to the subsystem dev id) */ +/* BCM4313 */ +#define BCMA_BOARD_TYPE_BCM94313BU 0X050F +#define BCMA_BOARD_TYPE_BCM94313HM 0X0510 +#define BCMA_BOARD_TYPE_BCM94313EPA 0X0511 +#define BCMA_BOARD_TYPE_BCM94313HMG 0X051C +/* BCM4716 */ +#define BCMA_BOARD_TYPE_BCM94716NR2 0X04CD +/* BCM43224 */ +#define BCMA_BOARD_TYPE_BCM943224X21 0X056E +#define BCMA_BOARD_TYPE_BCM943224X21_FCC 0X00D1 +#define BCMA_BOARD_TYPE_BCM943224X21B 0X00E9 +#define BCMA_BOARD_TYPE_BCM943224M93 0X008B +#define BCMA_BOARD_TYPE_BCM943224M93A 0X0090 +#define BCMA_BOARD_TYPE_BCM943224X16 0X0093 +#define BCMA_BOARD_TYPE_BCM94322X9 0X008D +#define BCMA_BOARD_TYPE_BCM94322M35E 0X008E +/* BCM43228 */ +#define BCMA_BOARD_TYPE_BCM943228BU8 0X0540 +#define BCMA_BOARD_TYPE_BCM943228BU9 0X0541 +#define BCMA_BOARD_TYPE_BCM943228BU 0X0542 +#define BCMA_BOARD_TYPE_BCM943227HM4L 0X0543 +#define BCMA_BOARD_TYPE_BCM943227HMB 0X0544 +#define BCMA_BOARD_TYPE_BCM943228HM4L 0X0545 +#define BCMA_BOARD_TYPE_BCM943228SD 0X0573 +/* BCM4331 */ +#define BCMA_BOARD_TYPE_BCM94331X19 0X00D6 +#define BCMA_BOARD_TYPE_BCM94331X28 0X00E4 +#define BCMA_BOARD_TYPE_BCM94331X28B 0X010E +#define BCMA_BOARD_TYPE_BCM94331PCIEBT3AX 0X00E4 +#define BCMA_BOARD_TYPE_BCM94331X12_2G 0X00EC +#define BCMA_BOARD_TYPE_BCM94331X12_5G 0X00ED +#define BCMA_BOARD_TYPE_BCM94331X29B 0X00EF +#define BCMA_BOARD_TYPE_BCM94331CSAX 0X00EF +#define BCMA_BOARD_TYPE_BCM94331X19C 0X00F5 +#define BCMA_BOARD_TYPE_BCM94331X33 0X00F4 +#define BCMA_BOARD_TYPE_BCM94331BU 0X0523 +#define BCMA_BOARD_TYPE_BCM94331S9BU 0X0524 +#define BCMA_BOARD_TYPE_BCM94331MC 0X0525 +#define BCMA_BOARD_TYPE_BCM94331MCI 0X0526 +#define BCMA_BOARD_TYPE_BCM94331PCIEBT4 0X0527 +#define BCMA_BOARD_TYPE_BCM94331HM 0X0574 +#define BCMA_BOARD_TYPE_BCM94331PCIEDUAL 0X059B +#define BCMA_BOARD_TYPE_BCM94331MCH5 0X05A9 +#define BCMA_BOARD_TYPE_BCM94331CS 0X05C6 +#define BCMA_BOARD_TYPE_BCM94331CD 0X05DA +/* BCM53572 */ +#define BCMA_BOARD_TYPE_BCM953572BU 0X058D +#define BCMA_BOARD_TYPE_BCM953572NR2 0X058E +#define BCMA_BOARD_TYPE_BCM947188NR2 0X058F +#define BCMA_BOARD_TYPE_BCM953572SDRNR2 0X0590 +/* BCM43142 */ +#define BCMA_BOARD_TYPE_BCM943142HM 0X05E0 + +struct bcma_device { + struct bcma_bus *bus; + struct bcma_device_id id; + + struct device dev; + struct device *dma_dev; + + unsigned int irq; + bool dev_registered; + + u8 core_index; + u8 core_unit; + + u32 addr; + u32 addr_s[8]; + u32 wrap; + + void __iomem *io_addr; + void __iomem *io_wrap; + + void *drvdata; + struct list_head list; +}; + +static inline void *bcma_get_drvdata(struct bcma_device *core) +{ + return core->drvdata; +} +static inline void bcma_set_drvdata(struct bcma_device *core, void *drvdata) +{ + core->drvdata = drvdata; +} + +struct bcma_driver { + const char *name; + const struct bcma_device_id *id_table; + + int (*probe)(struct bcma_device *dev); + void (*remove)(struct bcma_device *dev); + int (*suspend)(struct bcma_device *dev); + int (*resume)(struct bcma_device *dev); + void (*shutdown)(struct bcma_device *dev); + + struct device_driver drv; +}; +extern +int __bcma_driver_register(struct bcma_driver *drv, struct module *owner); +#define bcma_driver_register(drv) \ + __bcma_driver_register(drv, THIS_MODULE) + +extern void bcma_driver_unregister(struct bcma_driver *drv); + +/* module_bcma_driver() - Helper macro for drivers that don't do + * anything special in module init/exit. This eliminates a lot of + * boilerplate. Each module may only use this macro once, and + * calling it replaces module_init() and module_exit() + */ +#define module_bcma_driver(__bcma_driver) \ + module_driver(__bcma_driver, bcma_driver_register, \ + bcma_driver_unregister) + +/* Set a fallback SPROM. + * See kdoc at the function definition for complete documentation. */ +extern int bcma_arch_register_fallback_sprom( + int (*sprom_callback)(struct bcma_bus *bus, + struct ssb_sprom *out)); + +struct bcma_bus { + /* The MMIO area. */ + void __iomem *mmio; + + const struct bcma_host_ops *ops; + + enum bcma_hosttype hosttype; + bool host_is_pcie2; /* Used for BCMA_HOSTTYPE_PCI only */ + union { + /* Pointer to the PCI bus (only for BCMA_HOSTTYPE_PCI) */ + struct pci_dev *host_pci; + /* Pointer to the SDIO device (only for BCMA_HOSTTYPE_SDIO) */ + struct sdio_func *host_sdio; + /* Pointer to platform device (only for BCMA_HOSTTYPE_SOC) */ + struct platform_device *host_pdev; + }; + + struct bcma_chipinfo chipinfo; + + struct bcma_boardinfo boardinfo; + + struct bcma_device *mapped_core; + struct list_head cores; + u8 nr_cores; + u8 num; + + struct bcma_drv_cc drv_cc; + struct bcma_drv_cc_b drv_cc_b; + struct bcma_drv_pci drv_pci[2]; + struct bcma_drv_pcie2 drv_pcie2; + struct bcma_drv_mips drv_mips; + struct bcma_drv_gmac_cmn drv_gmac_cmn; + + /* We decided to share SPROM struct with SSB as long as we do not need + * any hacks for BCMA. This simplifies drivers code. */ + struct ssb_sprom sprom; +}; + +static inline u32 bcma_read8(struct bcma_device *core, u16 offset) +{ + return core->bus->ops->read8(core, offset); +} +static inline u32 bcma_read16(struct bcma_device *core, u16 offset) +{ + return core->bus->ops->read16(core, offset); +} +static inline u32 bcma_read32(struct bcma_device *core, u16 offset) +{ + return core->bus->ops->read32(core, offset); +} +static inline +void bcma_write8(struct bcma_device *core, u16 offset, u32 value) +{ + core->bus->ops->write8(core, offset, value); +} +static inline +void bcma_write16(struct bcma_device *core, u16 offset, u32 value) +{ + core->bus->ops->write16(core, offset, value); +} +static inline +void bcma_write32(struct bcma_device *core, u16 offset, u32 value) +{ + core->bus->ops->write32(core, offset, value); +} +#ifdef CONFIG_BCMA_BLOCKIO +static inline void bcma_block_read(struct bcma_device *core, void *buffer, + size_t count, u16 offset, u8 reg_width) +{ + core->bus->ops->block_read(core, buffer, count, offset, reg_width); +} +static inline void bcma_block_write(struct bcma_device *core, + const void *buffer, size_t count, + u16 offset, u8 reg_width) +{ + core->bus->ops->block_write(core, buffer, count, offset, reg_width); +} +#endif +static inline u32 bcma_aread32(struct bcma_device *core, u16 offset) +{ + return core->bus->ops->aread32(core, offset); +} +static inline +void bcma_awrite32(struct bcma_device *core, u16 offset, u32 value) +{ + core->bus->ops->awrite32(core, offset, value); +} + +static inline void bcma_mask32(struct bcma_device *cc, u16 offset, u32 mask) +{ + bcma_write32(cc, offset, bcma_read32(cc, offset) & mask); +} +static inline void bcma_set32(struct bcma_device *cc, u16 offset, u32 set) +{ + bcma_write32(cc, offset, bcma_read32(cc, offset) | set); +} +static inline void bcma_maskset32(struct bcma_device *cc, + u16 offset, u32 mask, u32 set) +{ + bcma_write32(cc, offset, (bcma_read32(cc, offset) & mask) | set); +} +static inline void bcma_mask16(struct bcma_device *cc, u16 offset, u16 mask) +{ + bcma_write16(cc, offset, bcma_read16(cc, offset) & mask); +} +static inline void bcma_set16(struct bcma_device *cc, u16 offset, u16 set) +{ + bcma_write16(cc, offset, bcma_read16(cc, offset) | set); +} +static inline void bcma_maskset16(struct bcma_device *cc, + u16 offset, u16 mask, u16 set) +{ + bcma_write16(cc, offset, (bcma_read16(cc, offset) & mask) | set); +} + +extern struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid, + u8 unit); +static inline struct bcma_device *bcma_find_core(struct bcma_bus *bus, + u16 coreid) +{ + return bcma_find_core_unit(bus, coreid, 0); +} + +#ifdef CONFIG_BCMA_HOST_PCI +extern void bcma_host_pci_up(struct bcma_bus *bus); +extern void bcma_host_pci_down(struct bcma_bus *bus); +extern int bcma_host_pci_irq_ctl(struct bcma_bus *bus, + struct bcma_device *core, bool enable); +#else +static inline void bcma_host_pci_up(struct bcma_bus *bus) +{ +} +static inline void bcma_host_pci_down(struct bcma_bus *bus) +{ +} +static inline int bcma_host_pci_irq_ctl(struct bcma_bus *bus, + struct bcma_device *core, bool enable) +{ + if (bus->hosttype == BCMA_HOSTTYPE_PCI) + return -ENOTSUPP; + return 0; +} +#endif + +extern bool bcma_core_is_enabled(struct bcma_device *core); +extern void bcma_core_disable(struct bcma_device *core, u32 flags); +extern int bcma_core_enable(struct bcma_device *core, u32 flags); +extern void bcma_core_set_clockmode(struct bcma_device *core, + enum bcma_clkmode clkmode); +extern void bcma_core_pll_ctl(struct bcma_device *core, u32 req, u32 status, + bool on); +extern u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset); +#define BCMA_DMA_TRANSLATION_MASK 0xC0000000 +#define BCMA_DMA_TRANSLATION_NONE 0x00000000 +#define BCMA_DMA_TRANSLATION_DMA32_CMT 0x40000000 /* Client Mode Translation for 32-bit DMA */ +#define BCMA_DMA_TRANSLATION_DMA64_CMT 0x80000000 /* Client Mode Translation for 64-bit DMA */ +extern u32 bcma_core_dma_translation(struct bcma_device *core); + +extern unsigned int bcma_core_irq(struct bcma_device *core, int num); + +#endif /* LINUX_BCMA_H_ */ diff --git a/include/linux/bcma/bcma_driver_arm_c9.h b/include/linux/bcma/bcma_driver_arm_c9.h new file mode 100644 index 000000000..688cf590c --- /dev/null +++ b/include/linux/bcma/bcma_driver_arm_c9.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_BCMA_DRIVER_ARM_C9_H_ +#define LINUX_BCMA_DRIVER_ARM_C9_H_ + +/* DMU (Device Management Unit) */ +#define BCMA_DMU_CRU_USB2_CONTROL 0x0164 +#define BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_NDIV_MASK 0x00000FFC +#define BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_NDIV_SHIFT 2 +#define BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_PDIV_MASK 0x00007000 +#define BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_PDIV_SHIFT 12 +#define BCMA_DMU_CRU_CLKSET_KEY 0x0180 +#define BCMA_DMU_CRU_STRAPS_CTRL 0x02A0 +#define BCMA_DMU_CRU_STRAPS_CTRL_USB3 0x00000010 +#define BCMA_DMU_CRU_STRAPS_CTRL_4BYTE 0x00008000 + +#endif /* LINUX_BCMA_DRIVER_ARM_C9_H_ */ diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h new file mode 100644 index 000000000..d35b92060 --- /dev/null +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -0,0 +1,716 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_BCMA_DRIVER_CC_H_ +#define LINUX_BCMA_DRIVER_CC_H_ + +#include +#include + +/** ChipCommon core registers. **/ +#define BCMA_CC_ID 0x0000 +#define BCMA_CC_ID_ID 0x0000FFFF +#define BCMA_CC_ID_ID_SHIFT 0 +#define BCMA_CC_ID_REV 0x000F0000 +#define BCMA_CC_ID_REV_SHIFT 16 +#define BCMA_CC_ID_PKG 0x00F00000 +#define BCMA_CC_ID_PKG_SHIFT 20 +#define BCMA_CC_ID_NRCORES 0x0F000000 +#define BCMA_CC_ID_NRCORES_SHIFT 24 +#define BCMA_CC_ID_TYPE 0xF0000000 +#define BCMA_CC_ID_TYPE_SHIFT 28 +#define BCMA_CC_CAP 0x0004 /* Capabilities */ +#define BCMA_CC_CAP_NRUART 0x00000003 /* # of UARTs */ +#define BCMA_CC_CAP_MIPSEB 0x00000004 /* MIPS in BigEndian Mode */ +#define BCMA_CC_CAP_UARTCLK 0x00000018 /* UART clock select */ +#define BCMA_CC_CAP_UARTCLK_INT 0x00000008 /* UARTs are driven by internal divided clock */ +#define BCMA_CC_CAP_UARTGPIO 0x00000020 /* UARTs on GPIO 15-12 */ +#define BCMA_CC_CAP_EXTBUS 0x000000C0 /* External buses present */ +#define BCMA_CC_CAP_FLASHT 0x00000700 /* Flash Type */ +#define BCMA_CC_FLASHT_NONE 0x00000000 /* No flash */ +#define BCMA_CC_FLASHT_STSER 0x00000100 /* ST serial flash */ +#define BCMA_CC_FLASHT_ATSER 0x00000200 /* Atmel serial flash */ +#define BCMA_CC_FLASHT_NAND 0x00000300 /* NAND flash */ +#define BCMA_CC_FLASHT_PARA 0x00000700 /* Parallel flash */ +#define BCMA_CC_CAP_PLLT 0x00038000 /* PLL Type */ +#define BCMA_PLLTYPE_NONE 0x00000000 +#define BCMA_PLLTYPE_1 0x00010000 /* 48Mhz base, 3 dividers */ +#define BCMA_PLLTYPE_2 0x00020000 /* 48Mhz, 4 dividers */ +#define BCMA_PLLTYPE_3 0x00030000 /* 25Mhz, 2 dividers */ +#define BCMA_PLLTYPE_4 0x00008000 /* 48Mhz, 4 dividers */ +#define BCMA_PLLTYPE_5 0x00018000 /* 25Mhz, 4 dividers */ +#define BCMA_PLLTYPE_6 0x00028000 /* 100/200 or 120/240 only */ +#define BCMA_PLLTYPE_7 0x00038000 /* 25Mhz, 4 dividers */ +#define BCMA_CC_CAP_PCTL 0x00040000 /* Power Control */ +#define BCMA_CC_CAP_OTPS 0x00380000 /* OTP size */ +#define BCMA_CC_CAP_OTPS_SHIFT 19 +#define BCMA_CC_CAP_OTPS_BASE 5 +#define BCMA_CC_CAP_JTAGM 0x00400000 /* JTAG master present */ +#define BCMA_CC_CAP_BROM 0x00800000 /* Internal boot ROM active */ +#define BCMA_CC_CAP_64BIT 0x08000000 /* 64-bit Backplane */ +#define BCMA_CC_CAP_PMU 0x10000000 /* PMU available (rev >= 20) */ +#define BCMA_CC_CAP_ECI 0x20000000 /* ECI available (rev >= 20) */ +#define BCMA_CC_CAP_SPROM 0x40000000 /* SPROM present */ +#define BCMA_CC_CAP_NFLASH 0x80000000 /* NAND flash present (rev >= 35 or BCM4706?) */ +#define BCMA_CC_CORECTL 0x0008 +#define BCMA_CC_CORECTL_UARTCLK0 0x00000001 /* Drive UART with internal clock */ +#define BCMA_CC_CORECTL_SE 0x00000002 /* sync clk out enable (corerev >= 3) */ +#define BCMA_CC_CORECTL_UARTCLKEN 0x00000008 /* UART clock enable (rev >= 21) */ +#define BCMA_CC_BIST 0x000C +#define BCMA_CC_OTPS 0x0010 /* OTP status */ +#define BCMA_CC_OTPS_PROGFAIL 0x80000000 +#define BCMA_CC_OTPS_PROTECT 0x00000007 +#define BCMA_CC_OTPS_HW_PROTECT 0x00000001 +#define BCMA_CC_OTPS_SW_PROTECT 0x00000002 +#define BCMA_CC_OTPS_CID_PROTECT 0x00000004 +#define BCMA_CC_OTPS_GU_PROG_IND 0x00000F00 /* General Use programmed indication */ +#define BCMA_CC_OTPS_GU_PROG_IND_SHIFT 8 +#define BCMA_CC_OTPS_GU_PROG_HW 0x00000100 /* HW region programmed */ +#define BCMA_CC_OTPC 0x0014 /* OTP control */ +#define BCMA_CC_OTPC_RECWAIT 0xFF000000 +#define BCMA_CC_OTPC_PROGWAIT 0x00FFFF00 +#define BCMA_CC_OTPC_PRW_SHIFT 8 +#define BCMA_CC_OTPC_MAXFAIL 0x00000038 +#define BCMA_CC_OTPC_VSEL 0x00000006 +#define BCMA_CC_OTPC_SELVL 0x00000001 +#define BCMA_CC_OTPP 0x0018 /* OTP prog */ +#define BCMA_CC_OTPP_COL 0x000000FF +#define BCMA_CC_OTPP_ROW 0x0000FF00 +#define BCMA_CC_OTPP_ROW_SHIFT 8 +#define BCMA_CC_OTPP_READERR 0x10000000 +#define BCMA_CC_OTPP_VALUE 0x20000000 +#define BCMA_CC_OTPP_READ 0x40000000 +#define BCMA_CC_OTPP_START 0x80000000 +#define BCMA_CC_OTPP_BUSY 0x80000000 +#define BCMA_CC_OTPL 0x001C /* OTP layout */ +#define BCMA_CC_OTPL_GURGN_OFFSET 0x00000FFF /* offset of general use region */ +#define BCMA_CC_IRQSTAT 0x0020 +#define BCMA_CC_IRQMASK 0x0024 +#define BCMA_CC_IRQ_GPIO 0x00000001 /* gpio intr */ +#define BCMA_CC_IRQ_EXT 0x00000002 /* ro: ext intr pin (corerev >= 3) */ +#define BCMA_CC_IRQ_WDRESET 0x80000000 /* watchdog reset occurred */ +#define BCMA_CC_CHIPCTL 0x0028 /* Rev >= 11 only */ +#define BCMA_CC_CHIPSTAT 0x002C /* Rev >= 11 only */ +#define BCMA_CC_CHIPST_4313_SPROM_PRESENT 1 +#define BCMA_CC_CHIPST_4313_OTP_PRESENT 2 +#define BCMA_CC_CHIPST_4331_SPROM_PRESENT 2 +#define BCMA_CC_CHIPST_4331_OTP_PRESENT 4 +#define BCMA_CC_CHIPST_43228_ILP_DIV_EN 0x00000001 +#define BCMA_CC_CHIPST_43228_OTP_PRESENT 0x00000002 +#define BCMA_CC_CHIPST_43228_SERDES_REFCLK_PADSEL 0x00000004 +#define BCMA_CC_CHIPST_43228_SDIO_MODE 0x00000008 +#define BCMA_CC_CHIPST_43228_SDIO_OTP_PRESENT 0x00000010 +#define BCMA_CC_CHIPST_43228_SDIO_RESET 0x00000020 +#define BCMA_CC_CHIPST_4706_PKG_OPTION BIT(0) /* 0: full-featured package 1: low-cost package */ +#define BCMA_CC_CHIPST_4706_SFLASH_PRESENT BIT(1) /* 0: parallel, 1: serial flash is present */ +#define BCMA_CC_CHIPST_4706_SFLASH_TYPE BIT(2) /* 0: 8b-p/ST-s flash, 1: 16b-p/Atmal-s flash */ +#define BCMA_CC_CHIPST_4706_MIPS_BENDIAN BIT(3) /* 0: little, 1: big endian */ +#define BCMA_CC_CHIPST_4706_PCIE1_DISABLE BIT(5) /* PCIE1 enable strap pin */ +#define BCMA_CC_CHIPST_5357_NAND_BOOT BIT(4) /* NAND boot, valid for CC rev 38 and/or BCM5357 */ +#define BCMA_CC_CHIPST_4360_XTAL_40MZ 0x00000001 +#define BCMA_CC_JCMD 0x0030 /* Rev >= 10 only */ +#define BCMA_CC_JCMD_START 0x80000000 +#define BCMA_CC_JCMD_BUSY 0x80000000 +#define BCMA_CC_JCMD_PAUSE 0x40000000 +#define BCMA_CC_JCMD0_ACC_MASK 0x0000F000 +#define BCMA_CC_JCMD0_ACC_IRDR 0x00000000 +#define BCMA_CC_JCMD0_ACC_DR 0x00001000 +#define BCMA_CC_JCMD0_ACC_IR 0x00002000 +#define BCMA_CC_JCMD0_ACC_RESET 0x00003000 +#define BCMA_CC_JCMD0_ACC_IRPDR 0x00004000 +#define BCMA_CC_JCMD0_ACC_PDR 0x00005000 +#define BCMA_CC_JCMD0_IRW_MASK 0x00000F00 +#define BCMA_CC_JCMD_ACC_MASK 0x000F0000 /* Changes for corerev 11 */ +#define BCMA_CC_JCMD_ACC_IRDR 0x00000000 +#define BCMA_CC_JCMD_ACC_DR 0x00010000 +#define BCMA_CC_JCMD_ACC_IR 0x00020000 +#define BCMA_CC_JCMD_ACC_RESET 0x00030000 +#define BCMA_CC_JCMD_ACC_IRPDR 0x00040000 +#define BCMA_CC_JCMD_ACC_PDR 0x00050000 +#define BCMA_CC_JCMD_IRW_MASK 0x00001F00 +#define BCMA_CC_JCMD_IRW_SHIFT 8 +#define BCMA_CC_JCMD_DRW_MASK 0x0000003F +#define BCMA_CC_JIR 0x0034 /* Rev >= 10 only */ +#define BCMA_CC_JDR 0x0038 /* Rev >= 10 only */ +#define BCMA_CC_JCTL 0x003C /* Rev >= 10 only */ +#define BCMA_CC_JCTL_FORCE_CLK 4 /* Force clock */ +#define BCMA_CC_JCTL_EXT_EN 2 /* Enable external targets */ +#define BCMA_CC_JCTL_EN 1 /* Enable Jtag master */ +#define BCMA_CC_FLASHCTL 0x0040 +/* Start/busy bit in flashcontrol */ +#define BCMA_CC_FLASHCTL_OPCODE 0x000000ff +#define BCMA_CC_FLASHCTL_ACTION 0x00000700 +#define BCMA_CC_FLASHCTL_CS_ACTIVE 0x00001000 /* Chip Select Active, rev >= 20 */ +#define BCMA_CC_FLASHCTL_START 0x80000000 +#define BCMA_CC_FLASHCTL_BUSY BCMA_CC_FLASHCTL_START +/* Flashcontrol action + opcodes for ST flashes */ +#define BCMA_CC_FLASHCTL_ST_WREN 0x0006 /* Write Enable */ +#define BCMA_CC_FLASHCTL_ST_WRDIS 0x0004 /* Write Disable */ +#define BCMA_CC_FLASHCTL_ST_RDSR 0x0105 /* Read Status Register */ +#define BCMA_CC_FLASHCTL_ST_WRSR 0x0101 /* Write Status Register */ +#define BCMA_CC_FLASHCTL_ST_READ 0x0303 /* Read Data Bytes */ +#define BCMA_CC_FLASHCTL_ST_PP 0x0302 /* Page Program */ +#define BCMA_CC_FLASHCTL_ST_SE 0x02d8 /* Sector Erase */ +#define BCMA_CC_FLASHCTL_ST_BE 0x00c7 /* Bulk Erase */ +#define BCMA_CC_FLASHCTL_ST_DP 0x00b9 /* Deep Power-down */ +#define BCMA_CC_FLASHCTL_ST_RES 0x03ab /* Read Electronic Signature */ +#define BCMA_CC_FLASHCTL_ST_CSA 0x1000 /* Keep chip select asserted */ +#define BCMA_CC_FLASHCTL_ST_SSE 0x0220 /* Sub-sector Erase */ +/* Flashcontrol action + opcodes for Atmel flashes */ +#define BCMA_CC_FLASHCTL_AT_READ 0x07e8 +#define BCMA_CC_FLASHCTL_AT_PAGE_READ 0x07d2 +#define BCMA_CC_FLASHCTL_AT_STATUS 0x01d7 +#define BCMA_CC_FLASHCTL_AT_BUF1_WRITE 0x0384 +#define BCMA_CC_FLASHCTL_AT_BUF2_WRITE 0x0387 +#define BCMA_CC_FLASHCTL_AT_BUF1_ERASE_PROGRAM 0x0283 +#define BCMA_CC_FLASHCTL_AT_BUF2_ERASE_PROGRAM 0x0286 +#define BCMA_CC_FLASHCTL_AT_BUF1_PROGRAM 0x0288 +#define BCMA_CC_FLASHCTL_AT_BUF2_PROGRAM 0x0289 +#define BCMA_CC_FLASHCTL_AT_PAGE_ERASE 0x0281 +#define BCMA_CC_FLASHCTL_AT_BLOCK_ERASE 0x0250 +#define BCMA_CC_FLASHCTL_AT_BUF1_WRITE_ERASE_PROGRAM 0x0382 +#define BCMA_CC_FLASHCTL_AT_BUF2_WRITE_ERASE_PROGRAM 0x0385 +#define BCMA_CC_FLASHCTL_AT_BUF1_LOAD 0x0253 +#define BCMA_CC_FLASHCTL_AT_BUF2_LOAD 0x0255 +#define BCMA_CC_FLASHCTL_AT_BUF1_COMPARE 0x0260 +#define BCMA_CC_FLASHCTL_AT_BUF2_COMPARE 0x0261 +#define BCMA_CC_FLASHCTL_AT_BUF1_REPROGRAM 0x0258 +#define BCMA_CC_FLASHCTL_AT_BUF2_REPROGRAM 0x0259 +#define BCMA_CC_FLASHADDR 0x0044 +#define BCMA_CC_FLASHDATA 0x0048 +/* Status register bits for ST flashes */ +#define BCMA_CC_FLASHDATA_ST_WIP 0x01 /* Write In Progress */ +#define BCMA_CC_FLASHDATA_ST_WEL 0x02 /* Write Enable Latch */ +#define BCMA_CC_FLASHDATA_ST_BP_MASK 0x1c /* Block Protect */ +#define BCMA_CC_FLASHDATA_ST_BP_SHIFT 2 +#define BCMA_CC_FLASHDATA_ST_SRWD 0x80 /* Status Register Write Disable */ +/* Status register bits for Atmel flashes */ +#define BCMA_CC_FLASHDATA_AT_READY 0x80 +#define BCMA_CC_FLASHDATA_AT_MISMATCH 0x40 +#define BCMA_CC_FLASHDATA_AT_ID_MASK 0x38 +#define BCMA_CC_FLASHDATA_AT_ID_SHIFT 3 +#define BCMA_CC_BCAST_ADDR 0x0050 +#define BCMA_CC_BCAST_DATA 0x0054 +#define BCMA_CC_GPIOPULLUP 0x0058 /* Rev >= 20 only */ +#define BCMA_CC_GPIOPULLDOWN 0x005C /* Rev >= 20 only */ +#define BCMA_CC_GPIOIN 0x0060 +#define BCMA_CC_GPIOOUT 0x0064 +#define BCMA_CC_GPIOOUTEN 0x0068 +#define BCMA_CC_GPIOCTL 0x006C +#define BCMA_CC_GPIOPOL 0x0070 +#define BCMA_CC_GPIOIRQ 0x0074 +#define BCMA_CC_WATCHDOG 0x0080 +#define BCMA_CC_GPIOTIMER 0x0088 /* LED powersave (corerev >= 16) */ +#define BCMA_CC_GPIOTIMER_OFFTIME 0x0000FFFF +#define BCMA_CC_GPIOTIMER_OFFTIME_SHIFT 0 +#define BCMA_CC_GPIOTIMER_ONTIME 0xFFFF0000 +#define BCMA_CC_GPIOTIMER_ONTIME_SHIFT 16 +#define BCMA_CC_GPIOTOUTM 0x008C /* LED powersave (corerev >= 16) */ +#define BCMA_CC_CLOCK_N 0x0090 +#define BCMA_CC_CLOCK_SB 0x0094 +#define BCMA_CC_CLOCK_PCI 0x0098 +#define BCMA_CC_CLOCK_M2 0x009C +#define BCMA_CC_CLOCK_MIPS 0x00A0 +#define BCMA_CC_CLKDIV 0x00A4 /* Rev >= 3 only */ +#define BCMA_CC_CLKDIV_SFLASH 0x0F000000 +#define BCMA_CC_CLKDIV_SFLASH_SHIFT 24 +#define BCMA_CC_CLKDIV_OTP 0x000F0000 +#define BCMA_CC_CLKDIV_OTP_SHIFT 16 +#define BCMA_CC_CLKDIV_JTAG 0x00000F00 +#define BCMA_CC_CLKDIV_JTAG_SHIFT 8 +#define BCMA_CC_CLKDIV_UART 0x000000FF +#define BCMA_CC_CAP_EXT 0x00AC /* Capabilities */ +#define BCMA_CC_CAP_EXT_SECI_PRESENT 0x00000001 +#define BCMA_CC_CAP_EXT_GSIO_PRESENT 0x00000002 +#define BCMA_CC_CAP_EXT_GCI_PRESENT 0x00000004 +#define BCMA_CC_CAP_EXT_SECI_PUART_PRESENT 0x00000008 /* UART present */ +#define BCMA_CC_CAP_EXT_AOB_PRESENT 0x00000040 +#define BCMA_CC_PLLONDELAY 0x00B0 /* Rev >= 4 only */ +#define BCMA_CC_FREFSELDELAY 0x00B4 /* Rev >= 4 only */ +#define BCMA_CC_SLOWCLKCTL 0x00B8 /* 6 <= Rev <= 9 only */ +#define BCMA_CC_SLOWCLKCTL_SRC 0x00000007 /* slow clock source mask */ +#define BCMA_CC_SLOWCLKCTL_SRC_LPO 0x00000000 /* source of slow clock is LPO */ +#define BCMA_CC_SLOWCLKCTL_SRC_XTAL 0x00000001 /* source of slow clock is crystal */ +#define BCMA_CC_SLOECLKCTL_SRC_PCI 0x00000002 /* source of slow clock is PCI */ +#define BCMA_CC_SLOWCLKCTL_LPOFREQ 0x00000200 /* LPOFreqSel, 1: 160Khz, 0: 32KHz */ +#define BCMA_CC_SLOWCLKCTL_LPOPD 0x00000400 /* LPOPowerDown, 1: LPO is disabled, 0: LPO is enabled */ +#define BCMA_CC_SLOWCLKCTL_FSLOW 0x00000800 /* ForceSlowClk, 1: sb/cores running on slow clock, 0: power logic control */ +#define BCMA_CC_SLOWCLKCTL_IPLL 0x00001000 /* IgnorePllOffReq, 1/0: power logic ignores/honors PLL clock disable requests from core */ +#define BCMA_CC_SLOWCLKCTL_ENXTAL 0x00002000 /* XtalControlEn, 1/0: power logic does/doesn't disable crystal when appropriate */ +#define BCMA_CC_SLOWCLKCTL_XTALPU 0x00004000 /* XtalPU (RO), 1/0: crystal running/disabled */ +#define BCMA_CC_SLOWCLKCTL_CLKDIV 0xFFFF0000 /* ClockDivider (SlowClk = 1/(4+divisor)) */ +#define BCMA_CC_SLOWCLKCTL_CLKDIV_SHIFT 16 +#define BCMA_CC_SYSCLKCTL 0x00C0 /* Rev >= 3 only */ +#define BCMA_CC_SYSCLKCTL_IDLPEN 0x00000001 /* ILPen: Enable Idle Low Power */ +#define BCMA_CC_SYSCLKCTL_ALPEN 0x00000002 /* ALPen: Enable Active Low Power */ +#define BCMA_CC_SYSCLKCTL_PLLEN 0x00000004 /* ForcePLLOn */ +#define BCMA_CC_SYSCLKCTL_FORCEALP 0x00000008 /* Force ALP (or HT if ALPen is not set */ +#define BCMA_CC_SYSCLKCTL_FORCEHT 0x00000010 /* Force HT */ +#define BCMA_CC_SYSCLKCTL_CLKDIV 0xFFFF0000 /* ClkDiv (ILP = 1/(4+divisor)) */ +#define BCMA_CC_SYSCLKCTL_CLKDIV_SHIFT 16 +#define BCMA_CC_CLKSTSTR 0x00C4 /* Rev >= 3 only */ +#define BCMA_CC_EROM 0x00FC +#define BCMA_CC_PCMCIA_CFG 0x0100 +#define BCMA_CC_PCMCIA_MEMWAIT 0x0104 +#define BCMA_CC_PCMCIA_ATTRWAIT 0x0108 +#define BCMA_CC_PCMCIA_IOWAIT 0x010C +#define BCMA_CC_IDE_CFG 0x0110 +#define BCMA_CC_IDE_MEMWAIT 0x0114 +#define BCMA_CC_IDE_ATTRWAIT 0x0118 +#define BCMA_CC_IDE_IOWAIT 0x011C +#define BCMA_CC_PROG_CFG 0x0120 +#define BCMA_CC_PROG_WAITCNT 0x0124 +#define BCMA_CC_FLASH_CFG 0x0128 +#define BCMA_CC_FLASH_CFG_DS 0x0010 /* Data size, 0=8bit, 1=16bit */ +#define BCMA_CC_FLASH_WAITCNT 0x012C +#define BCMA_CC_SROM_CONTROL 0x0190 +#define BCMA_CC_SROM_CONTROL_START 0x80000000 +#define BCMA_CC_SROM_CONTROL_BUSY 0x80000000 +#define BCMA_CC_SROM_CONTROL_OPCODE 0x60000000 +#define BCMA_CC_SROM_CONTROL_OP_READ 0x00000000 +#define BCMA_CC_SROM_CONTROL_OP_WRITE 0x20000000 +#define BCMA_CC_SROM_CONTROL_OP_WRDIS 0x40000000 +#define BCMA_CC_SROM_CONTROL_OP_WREN 0x60000000 +#define BCMA_CC_SROM_CONTROL_OTPSEL 0x00000010 +#define BCMA_CC_SROM_CONTROL_LOCK 0x00000008 +#define BCMA_CC_SROM_CONTROL_SIZE_MASK 0x00000006 +#define BCMA_CC_SROM_CONTROL_SIZE_1K 0x00000000 +#define BCMA_CC_SROM_CONTROL_SIZE_4K 0x00000002 +#define BCMA_CC_SROM_CONTROL_SIZE_16K 0x00000004 +#define BCMA_CC_SROM_CONTROL_SIZE_SHIFT 1 +#define BCMA_CC_SROM_CONTROL_PRESENT 0x00000001 +/* Block 0x140 - 0x190 registers are chipset specific */ +#define BCMA_CC_4706_FLASHSCFG 0x18C /* Flash struct configuration */ +#define BCMA_CC_4706_FLASHSCFG_MASK 0x000000ff +#define BCMA_CC_4706_FLASHSCFG_SF1 0x00000001 /* 2nd serial flash present */ +#define BCMA_CC_4706_FLASHSCFG_PF1 0x00000002 /* 2nd parallel flash present */ +#define BCMA_CC_4706_FLASHSCFG_SF1_TYPE 0x00000004 /* 2nd serial flash type : 0 : ST, 1 : Atmel */ +#define BCMA_CC_4706_FLASHSCFG_NF1 0x00000008 /* 2nd NAND flash present */ +#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_MASK 0x000000f0 +#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_4MB 0x00000010 /* 4MB */ +#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_8MB 0x00000020 /* 8MB */ +#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_16MB 0x00000030 /* 16MB */ +#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_32MB 0x00000040 /* 32MB */ +#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_64MB 0x00000050 /* 64MB */ +#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_128MB 0x00000060 /* 128MB */ +#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_256MB 0x00000070 /* 256MB */ +/* NAND flash registers for BCM4706 (corerev = 31) */ +#define BCMA_CC_NFLASH_CTL 0x01A0 +#define BCMA_CC_NFLASH_CTL_ERR 0x08000000 +#define BCMA_CC_NFLASH_CONF 0x01A4 +#define BCMA_CC_NFLASH_COL_ADDR 0x01A8 +#define BCMA_CC_NFLASH_ROW_ADDR 0x01AC +#define BCMA_CC_NFLASH_DATA 0x01B0 +#define BCMA_CC_NFLASH_WAITCNT0 0x01B4 +/* 0x1E0 is defined as shared BCMA_CLKCTLST */ +#define BCMA_CC_HW_WORKAROUND 0x01E4 /* Hardware workaround (rev >= 20) */ +#define BCMA_CC_UART0_DATA 0x0300 +#define BCMA_CC_UART0_IMR 0x0304 +#define BCMA_CC_UART0_FCR 0x0308 +#define BCMA_CC_UART0_LCR 0x030C +#define BCMA_CC_UART0_MCR 0x0310 +#define BCMA_CC_UART0_LSR 0x0314 +#define BCMA_CC_UART0_MSR 0x0318 +#define BCMA_CC_UART0_SCRATCH 0x031C +#define BCMA_CC_UART1_DATA 0x0400 +#define BCMA_CC_UART1_IMR 0x0404 +#define BCMA_CC_UART1_FCR 0x0408 +#define BCMA_CC_UART1_LCR 0x040C +#define BCMA_CC_UART1_MCR 0x0410 +#define BCMA_CC_UART1_LSR 0x0414 +#define BCMA_CC_UART1_MSR 0x0418 +#define BCMA_CC_UART1_SCRATCH 0x041C +/* PMU registers (rev >= 20) */ +#define BCMA_CC_PMU_CTL 0x0600 /* PMU control */ +#define BCMA_CC_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */ +#define BCMA_CC_PMU_CTL_ILP_DIV_SHIFT 16 +#define BCMA_CC_PMU_CTL_RES 0x00006000 /* reset control mask */ +#define BCMA_CC_PMU_CTL_RES_SHIFT 13 +#define BCMA_CC_PMU_CTL_RES_RELOAD 0x2 /* reload POR values */ +#define BCMA_CC_PMU_CTL_PLL_UPD 0x00000400 +#define BCMA_CC_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */ +#define BCMA_CC_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */ +#define BCMA_CC_PMU_CTL_ALPREQEN 0x00000080 /* ALP req enable */ +#define BCMA_CC_PMU_CTL_XTALFREQ 0x0000007C /* Crystal freq */ +#define BCMA_CC_PMU_CTL_XTALFREQ_SHIFT 2 +#define BCMA_CC_PMU_CTL_ILPDIVEN 0x00000002 /* ILP div enable */ +#define BCMA_CC_PMU_CTL_LPOSEL 0x00000001 /* LPO sel */ +#define BCMA_CC_PMU_CAP 0x0604 /* PMU capabilities */ +#define BCMA_CC_PMU_CAP_REVISION 0x000000FF /* Revision mask */ +#define BCMA_CC_PMU_STAT 0x0608 /* PMU status */ +#define BCMA_CC_PMU_STAT_EXT_LPO_AVAIL 0x00000100 +#define BCMA_CC_PMU_STAT_WDRESET 0x00000080 +#define BCMA_CC_PMU_STAT_INTPEND 0x00000040 /* Interrupt pending */ +#define BCMA_CC_PMU_STAT_SBCLKST 0x00000030 /* Backplane clock status? */ +#define BCMA_CC_PMU_STAT_HAVEALP 0x00000008 /* ALP available */ +#define BCMA_CC_PMU_STAT_HAVEHT 0x00000004 /* HT available */ +#define BCMA_CC_PMU_STAT_RESINIT 0x00000003 /* Res init */ +#define BCMA_CC_PMU_RES_STAT 0x060C /* PMU res status */ +#define BCMA_CC_PMU_RES_PEND 0x0610 /* PMU res pending */ +#define BCMA_CC_PMU_TIMER 0x0614 /* PMU timer */ +#define BCMA_CC_PMU_MINRES_MSK 0x0618 /* PMU min res mask */ +#define BCMA_CC_PMU_MAXRES_MSK 0x061C /* PMU max res mask */ +#define BCMA_CC_PMU_RES_TABSEL 0x0620 /* PMU res table sel */ +#define BCMA_CC_PMU_RES_DEPMSK 0x0624 /* PMU res dep mask */ +#define BCMA_CC_PMU_RES_UPDNTM 0x0628 /* PMU res updown timer */ +#define BCMA_CC_PMU_RES_TIMER 0x062C /* PMU res timer */ +#define BCMA_CC_PMU_CLKSTRETCH 0x0630 /* PMU clockstretch */ +#define BCMA_CC_PMU_WATCHDOG 0x0634 /* PMU watchdog */ +#define BCMA_CC_PMU_RES_REQTS 0x0640 /* PMU res req timer sel */ +#define BCMA_CC_PMU_RES_REQT 0x0644 /* PMU res req timer */ +#define BCMA_CC_PMU_RES_REQM 0x0648 /* PMU res req mask */ +#define BCMA_CC_PMU_CHIPCTL_ADDR 0x0650 +#define BCMA_CC_PMU_CHIPCTL_DATA 0x0654 +#define BCMA_CC_PMU_REGCTL_ADDR 0x0658 +#define BCMA_CC_PMU_REGCTL_DATA 0x065C +#define BCMA_CC_PMU_PLLCTL_ADDR 0x0660 +#define BCMA_CC_PMU_PLLCTL_DATA 0x0664 +#define BCMA_CC_PMU_STRAPOPT 0x0668 /* (corerev >= 28) */ +#define BCMA_CC_PMU_XTAL_FREQ 0x066C /* (pmurev >= 10) */ +#define BCMA_CC_PMU_XTAL_FREQ_ILPCTL_MASK 0x00001FFF +#define BCMA_CC_PMU_XTAL_FREQ_MEASURE_MASK 0x80000000 +#define BCMA_CC_PMU_XTAL_FREQ_MEASURE_SHIFT 31 +#define BCMA_CC_SPROM 0x0800 /* SPROM beginning */ +/* NAND flash MLC controller registers (corerev >= 38) */ +#define BCMA_CC_NAND_REVISION 0x0C00 +#define BCMA_CC_NAND_CMD_START 0x0C04 +#define BCMA_CC_NAND_CMD_ADDR_X 0x0C08 +#define BCMA_CC_NAND_CMD_ADDR 0x0C0C +#define BCMA_CC_NAND_CMD_END_ADDR 0x0C10 +#define BCMA_CC_NAND_CS_NAND_SELECT 0x0C14 +#define BCMA_CC_NAND_CS_NAND_XOR 0x0C18 +#define BCMA_CC_NAND_SPARE_RD0 0x0C20 +#define BCMA_CC_NAND_SPARE_RD4 0x0C24 +#define BCMA_CC_NAND_SPARE_RD8 0x0C28 +#define BCMA_CC_NAND_SPARE_RD12 0x0C2C +#define BCMA_CC_NAND_SPARE_WR0 0x0C30 +#define BCMA_CC_NAND_SPARE_WR4 0x0C34 +#define BCMA_CC_NAND_SPARE_WR8 0x0C38 +#define BCMA_CC_NAND_SPARE_WR12 0x0C3C +#define BCMA_CC_NAND_ACC_CONTROL 0x0C40 +#define BCMA_CC_NAND_CONFIG 0x0C48 +#define BCMA_CC_NAND_TIMING_1 0x0C50 +#define BCMA_CC_NAND_TIMING_2 0x0C54 +#define BCMA_CC_NAND_SEMAPHORE 0x0C58 +#define BCMA_CC_NAND_DEVID 0x0C60 +#define BCMA_CC_NAND_DEVID_X 0x0C64 +#define BCMA_CC_NAND_BLOCK_LOCK_STATUS 0x0C68 +#define BCMA_CC_NAND_INTFC_STATUS 0x0C6C +#define BCMA_CC_NAND_ECC_CORR_ADDR_X 0x0C70 +#define BCMA_CC_NAND_ECC_CORR_ADDR 0x0C74 +#define BCMA_CC_NAND_ECC_UNC_ADDR_X 0x0C78 +#define BCMA_CC_NAND_ECC_UNC_ADDR 0x0C7C +#define BCMA_CC_NAND_READ_ERROR_COUNT 0x0C80 +#define BCMA_CC_NAND_CORR_STAT_THRESHOLD 0x0C84 +#define BCMA_CC_NAND_READ_ADDR_X 0x0C90 +#define BCMA_CC_NAND_READ_ADDR 0x0C94 +#define BCMA_CC_NAND_PAGE_PROGRAM_ADDR_X 0x0C98 +#define BCMA_CC_NAND_PAGE_PROGRAM_ADDR 0x0C9C +#define BCMA_CC_NAND_COPY_BACK_ADDR_X 0x0CA0 +#define BCMA_CC_NAND_COPY_BACK_ADDR 0x0CA4 +#define BCMA_CC_NAND_BLOCK_ERASE_ADDR_X 0x0CA8 +#define BCMA_CC_NAND_BLOCK_ERASE_ADDR 0x0CAC +#define BCMA_CC_NAND_INV_READ_ADDR_X 0x0CB0 +#define BCMA_CC_NAND_INV_READ_ADDR 0x0CB4 +#define BCMA_CC_NAND_BLK_WR_PROTECT 0x0CC0 +#define BCMA_CC_NAND_ACC_CONTROL_CS1 0x0CD0 +#define BCMA_CC_NAND_CONFIG_CS1 0x0CD4 +#define BCMA_CC_NAND_TIMING_1_CS1 0x0CD8 +#define BCMA_CC_NAND_TIMING_2_CS1 0x0CDC +#define BCMA_CC_NAND_SPARE_RD16 0x0D30 +#define BCMA_CC_NAND_SPARE_RD20 0x0D34 +#define BCMA_CC_NAND_SPARE_RD24 0x0D38 +#define BCMA_CC_NAND_SPARE_RD28 0x0D3C +#define BCMA_CC_NAND_CACHE_ADDR 0x0D40 +#define BCMA_CC_NAND_CACHE_DATA 0x0D44 +#define BCMA_CC_NAND_CTRL_CONFIG 0x0D48 +#define BCMA_CC_NAND_CTRL_STATUS 0x0D4C + +/* Divider allocation in 4716/47162/5356 */ +#define BCMA_CC_PMU5_MAINPLL_CPU 1 +#define BCMA_CC_PMU5_MAINPLL_MEM 2 +#define BCMA_CC_PMU5_MAINPLL_SSB 3 + +/* PLL usage in 4716/47162 */ +#define BCMA_CC_PMU4716_MAINPLL_PLL0 12 + +/* PLL usage in 5356/5357 */ +#define BCMA_CC_PMU5356_MAINPLL_PLL0 0 +#define BCMA_CC_PMU5357_MAINPLL_PLL0 0 + +/* 4706 PMU */ +#define BCMA_CC_PMU4706_MAINPLL_PLL0 0 +#define BCMA_CC_PMU6_4706_PROCPLL_OFF 4 /* The CPU PLL */ +#define BCMA_CC_PMU6_4706_PROC_P2DIV_MASK 0x000f0000 +#define BCMA_CC_PMU6_4706_PROC_P2DIV_SHIFT 16 +#define BCMA_CC_PMU6_4706_PROC_P1DIV_MASK 0x0000f000 +#define BCMA_CC_PMU6_4706_PROC_P1DIV_SHIFT 12 +#define BCMA_CC_PMU6_4706_PROC_NDIV_INT_MASK 0x00000ff8 +#define BCMA_CC_PMU6_4706_PROC_NDIV_INT_SHIFT 3 +#define BCMA_CC_PMU6_4706_PROC_NDIV_MODE_MASK 0x00000007 +#define BCMA_CC_PMU6_4706_PROC_NDIV_MODE_SHIFT 0 + +/* PMU rev 15 */ +#define BCMA_CC_PMU15_PLL_PLLCTL0 0 +#define BCMA_CC_PMU15_PLL_PC0_CLKSEL_MASK 0x00000003 +#define BCMA_CC_PMU15_PLL_PC0_CLKSEL_SHIFT 0 +#define BCMA_CC_PMU15_PLL_PC0_FREQTGT_MASK 0x003FFFFC +#define BCMA_CC_PMU15_PLL_PC0_FREQTGT_SHIFT 2 +#define BCMA_CC_PMU15_PLL_PC0_PRESCALE_MASK 0x00C00000 +#define BCMA_CC_PMU15_PLL_PC0_PRESCALE_SHIFT 22 +#define BCMA_CC_PMU15_PLL_PC0_KPCTRL_MASK 0x07000000 +#define BCMA_CC_PMU15_PLL_PC0_KPCTRL_SHIFT 24 +#define BCMA_CC_PMU15_PLL_PC0_FCNTCTRL_MASK 0x38000000 +#define BCMA_CC_PMU15_PLL_PC0_FCNTCTRL_SHIFT 27 +#define BCMA_CC_PMU15_PLL_PC0_FDCMODE_MASK 0x40000000 +#define BCMA_CC_PMU15_PLL_PC0_FDCMODE_SHIFT 30 +#define BCMA_CC_PMU15_PLL_PC0_CTRLBIAS_MASK 0x80000000 +#define BCMA_CC_PMU15_PLL_PC0_CTRLBIAS_SHIFT 31 + +/* ALP clock on pre-PMU chips */ +#define BCMA_CC_PMU_ALP_CLOCK 20000000 +/* HT clock for systems with PMU-enabled chipcommon */ +#define BCMA_CC_PMU_HT_CLOCK 80000000 + +/* PMU rev 5 (& 6) */ +#define BCMA_CC_PPL_P1P2_OFF 0 +#define BCMA_CC_PPL_P1_MASK 0x0f000000 +#define BCMA_CC_PPL_P1_SHIFT 24 +#define BCMA_CC_PPL_P2_MASK 0x00f00000 +#define BCMA_CC_PPL_P2_SHIFT 20 +#define BCMA_CC_PPL_M14_OFF 1 +#define BCMA_CC_PPL_MDIV_MASK 0x000000ff +#define BCMA_CC_PPL_MDIV_WIDTH 8 +#define BCMA_CC_PPL_NM5_OFF 2 +#define BCMA_CC_PPL_NDIV_MASK 0xfff00000 +#define BCMA_CC_PPL_NDIV_SHIFT 20 +#define BCMA_CC_PPL_FMAB_OFF 3 +#define BCMA_CC_PPL_MRAT_MASK 0xf0000000 +#define BCMA_CC_PPL_MRAT_SHIFT 28 +#define BCMA_CC_PPL_ABRAT_MASK 0x08000000 +#define BCMA_CC_PPL_ABRAT_SHIFT 27 +#define BCMA_CC_PPL_FDIV_MASK 0x07ffffff +#define BCMA_CC_PPL_PLLCTL_OFF 4 +#define BCMA_CC_PPL_PCHI_OFF 5 +#define BCMA_CC_PPL_PCHI_MASK 0x0000003f + +#define BCMA_CC_PMU_PLL_CTL0 0 +#define BCMA_CC_PMU_PLL_CTL1 1 +#define BCMA_CC_PMU_PLL_CTL2 2 +#define BCMA_CC_PMU_PLL_CTL3 3 +#define BCMA_CC_PMU_PLL_CTL4 4 +#define BCMA_CC_PMU_PLL_CTL5 5 + +#define BCMA_CC_PMU1_PLL0_PC0_P1DIV_MASK 0x00f00000 +#define BCMA_CC_PMU1_PLL0_PC0_P1DIV_SHIFT 20 + +#define BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_MASK 0x1ff00000 +#define BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_SHIFT 20 + +#define BCMA_CCB_MII_MNG_CTL 0x0000 +#define BCMA_CCB_MII_MNG_CMD_DATA 0x0004 + +/* BCM4331 ChipControl numbers. */ +#define BCMA_CHIPCTL_4331_BT_COEXIST BIT(0) /* 0 disable */ +#define BCMA_CHIPCTL_4331_SECI BIT(1) /* 0 SECI is disabled (JATG functional) */ +#define BCMA_CHIPCTL_4331_EXT_LNA BIT(2) /* 0 disable */ +#define BCMA_CHIPCTL_4331_SPROM_GPIO13_15 BIT(3) /* sprom/gpio13-15 mux */ +#define BCMA_CHIPCTL_4331_EXTPA_EN BIT(4) /* 0 ext pa disable, 1 ext pa enabled */ +#define BCMA_CHIPCTL_4331_GPIOCLK_ON_SPROMCS BIT(5) /* set drive out GPIO_CLK on sprom_cs pin */ +#define BCMA_CHIPCTL_4331_PCIE_MDIO_ON_SPROMCS BIT(6) /* use sprom_cs pin as PCIE mdio interface */ +#define BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5 BIT(7) /* aband extpa will be at gpio2/5 and sprom_dout */ +#define BCMA_CHIPCTL_4331_OVR_PIPEAUXCLKEN BIT(8) /* override core control on pipe_AuxClkEnable */ +#define BCMA_CHIPCTL_4331_OVR_PIPEAUXPWRDOWN BIT(9) /* override core control on pipe_AuxPowerDown */ +#define BCMA_CHIPCTL_4331_PCIE_AUXCLKEN BIT(10) /* pcie_auxclkenable */ +#define BCMA_CHIPCTL_4331_PCIE_PIPE_PLLDOWN BIT(11) /* pcie_pipe_pllpowerdown */ +#define BCMA_CHIPCTL_4331_EXTPA_EN2 BIT(12) /* 0 ext pa disable, 1 ext pa enabled */ +#define BCMA_CHIPCTL_4331_BT_SHD0_ON_GPIO4 BIT(16) /* enable bt_shd0 at gpio4 */ +#define BCMA_CHIPCTL_4331_BT_SHD1_ON_GPIO5 BIT(17) /* enable bt_shd1 at gpio5 */ + +/* 43224 chip-specific ChipControl register bits */ +#define BCMA_CCTRL_43224_GPIO_TOGGLE 0x8000 /* gpio[3:0] pins as btcoex or s/w gpio */ +#define BCMA_CCTRL_43224A0_12MA_LED_DRIVE 0x00F000F0 /* 12 mA drive strength */ +#define BCMA_CCTRL_43224B0_12MA_LED_DRIVE 0xF0 /* 12 mA drive strength for later 43224s */ + +/* 4313 Chip specific ChipControl register bits */ +#define BCMA_CCTRL_4313_12MA_LED_DRIVE 0x00000007 /* 12 mA drive strengh for later 4313 */ + +/* BCM5357 ChipControl register bits */ +#define BCMA_CHIPCTL_5357_EXTPA BIT(14) +#define BCMA_CHIPCTL_5357_ANT_MUX_2O3 BIT(15) +#define BCMA_CHIPCTL_5357_NFLASH BIT(16) +#define BCMA_CHIPCTL_5357_I2S_PINS_ENABLE BIT(18) +#define BCMA_CHIPCTL_5357_I2CSPI_PINS_ENABLE BIT(19) + +#define BCMA_RES_4314_LPLDO_PU BIT(0) +#define BCMA_RES_4314_PMU_SLEEP_DIS BIT(1) +#define BCMA_RES_4314_PMU_BG_PU BIT(2) +#define BCMA_RES_4314_CBUCK_LPOM_PU BIT(3) +#define BCMA_RES_4314_CBUCK_PFM_PU BIT(4) +#define BCMA_RES_4314_CLDO_PU BIT(5) +#define BCMA_RES_4314_LPLDO2_LVM BIT(6) +#define BCMA_RES_4314_WL_PMU_PU BIT(7) +#define BCMA_RES_4314_LNLDO_PU BIT(8) +#define BCMA_RES_4314_LDO3P3_PU BIT(9) +#define BCMA_RES_4314_OTP_PU BIT(10) +#define BCMA_RES_4314_XTAL_PU BIT(11) +#define BCMA_RES_4314_WL_PWRSW_PU BIT(12) +#define BCMA_RES_4314_LQ_AVAIL BIT(13) +#define BCMA_RES_4314_LOGIC_RET BIT(14) +#define BCMA_RES_4314_MEM_SLEEP BIT(15) +#define BCMA_RES_4314_MACPHY_RET BIT(16) +#define BCMA_RES_4314_WL_CORE_READY BIT(17) +#define BCMA_RES_4314_ILP_REQ BIT(18) +#define BCMA_RES_4314_ALP_AVAIL BIT(19) +#define BCMA_RES_4314_MISC_PWRSW_PU BIT(20) +#define BCMA_RES_4314_SYNTH_PWRSW_PU BIT(21) +#define BCMA_RES_4314_RX_PWRSW_PU BIT(22) +#define BCMA_RES_4314_RADIO_PU BIT(23) +#define BCMA_RES_4314_VCO_LDO_PU BIT(24) +#define BCMA_RES_4314_AFE_LDO_PU BIT(25) +#define BCMA_RES_4314_RX_LDO_PU BIT(26) +#define BCMA_RES_4314_TX_LDO_PU BIT(27) +#define BCMA_RES_4314_HT_AVAIL BIT(28) +#define BCMA_RES_4314_MACPHY_CLK_AVAIL BIT(29) + +/* Data for the PMU, if available. + * Check availability with ((struct bcma_chipcommon)->capabilities & BCMA_CC_CAP_PMU) + */ +struct bcma_chipcommon_pmu { + struct bcma_device *core; /* Can be separated core or just ChipCommon one */ + u8 rev; /* PMU revision */ + u32 crystalfreq; /* The active crystal frequency (in kHz) */ +}; + +#ifdef CONFIG_BCMA_PFLASH +struct bcma_pflash { + bool present; +}; +#endif + +#ifdef CONFIG_BCMA_SFLASH +struct mtd_info; + +struct bcma_sflash { + bool present; + u32 blocksize; + u16 numblocks; + u32 size; +}; +#endif + +#ifdef CONFIG_BCMA_NFLASH +struct bcma_nflash { + bool present; + bool boot; /* This is the flash the SoC boots from */ +}; +#endif + +#ifdef CONFIG_BCMA_DRIVER_MIPS +struct bcma_serial_port { + void *regs; + unsigned long clockspeed; + unsigned int irq; + unsigned int baud_base; + unsigned int reg_shift; +}; +#endif /* CONFIG_BCMA_DRIVER_MIPS */ + +struct bcma_drv_cc { + struct bcma_device *core; + u32 status; + u32 capabilities; + u32 capabilities_ext; + u8 setup_done:1; + u8 early_setup_done:1; + /* Fast Powerup Delay constant */ + u16 fast_pwrup_delay; + struct bcma_chipcommon_pmu pmu; +#ifdef CONFIG_BCMA_PFLASH + struct bcma_pflash pflash; +#endif +#ifdef CONFIG_BCMA_SFLASH + struct bcma_sflash sflash; +#endif +#ifdef CONFIG_BCMA_NFLASH + struct bcma_nflash nflash; +#endif + +#ifdef CONFIG_BCMA_DRIVER_MIPS + int nr_serial_ports; + struct bcma_serial_port serial_ports[4]; +#endif /* CONFIG_BCMA_DRIVER_MIPS */ + u32 ticks_per_ms; + struct platform_device *watchdog; + + /* Lock for GPIO register access. */ + spinlock_t gpio_lock; +#ifdef CONFIG_BCMA_DRIVER_GPIO + struct gpio_chip gpio; +#endif +}; + +struct bcma_drv_cc_b { + struct bcma_device *core; + u8 setup_done:1; + void __iomem *mii; +}; + +/* Register access */ +#define bcma_cc_read32(cc, offset) \ + bcma_read32((cc)->core, offset) +#define bcma_cc_write32(cc, offset, val) \ + bcma_write32((cc)->core, offset, val) + +#define bcma_cc_mask32(cc, offset, mask) \ + bcma_cc_write32(cc, offset, bcma_cc_read32(cc, offset) & (mask)) +#define bcma_cc_set32(cc, offset, set) \ + bcma_cc_write32(cc, offset, bcma_cc_read32(cc, offset) | (set)) +#define bcma_cc_maskset32(cc, offset, mask, set) \ + bcma_cc_write32(cc, offset, (bcma_cc_read32(cc, offset) & (mask)) | (set)) + +/* PMU registers access */ +#define bcma_pmu_read32(cc, offset) \ + bcma_read32((cc)->pmu.core, offset) +#define bcma_pmu_write32(cc, offset, val) \ + bcma_write32((cc)->pmu.core, offset, val) + +#define bcma_pmu_mask32(cc, offset, mask) \ + bcma_pmu_write32(cc, offset, bcma_pmu_read32(cc, offset) & (mask)) +#define bcma_pmu_set32(cc, offset, set) \ + bcma_pmu_write32(cc, offset, bcma_pmu_read32(cc, offset) | (set)) +#define bcma_pmu_maskset32(cc, offset, mask, set) \ + bcma_pmu_write32(cc, offset, (bcma_pmu_read32(cc, offset) & (mask)) | (set)) + +extern u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks); + +extern u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc); + +void bcma_chipco_irq_mask(struct bcma_drv_cc *cc, u32 mask, u32 value); + +u32 bcma_chipco_irq_status(struct bcma_drv_cc *cc, u32 mask); + +/* Chipcommon GPIO pin access. */ +u32 bcma_chipco_gpio_in(struct bcma_drv_cc *cc, u32 mask); +u32 bcma_chipco_gpio_out(struct bcma_drv_cc *cc, u32 mask, u32 value); +u32 bcma_chipco_gpio_outen(struct bcma_drv_cc *cc, u32 mask, u32 value); +u32 bcma_chipco_gpio_control(struct bcma_drv_cc *cc, u32 mask, u32 value); +u32 bcma_chipco_gpio_intmask(struct bcma_drv_cc *cc, u32 mask, u32 value); +u32 bcma_chipco_gpio_polarity(struct bcma_drv_cc *cc, u32 mask, u32 value); +u32 bcma_chipco_gpio_pullup(struct bcma_drv_cc *cc, u32 mask, u32 value); +u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value); + +/* PMU support */ +extern void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset, + u32 value); +extern void bcma_chipco_pll_maskset(struct bcma_drv_cc *cc, u32 offset, + u32 mask, u32 set); +extern void bcma_chipco_chipctl_maskset(struct bcma_drv_cc *cc, + u32 offset, u32 mask, u32 set); +extern void bcma_chipco_regctl_maskset(struct bcma_drv_cc *cc, + u32 offset, u32 mask, u32 set); +extern void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid); + +extern u32 bcma_pmu_get_bus_clock(struct bcma_drv_cc *cc); + +void bcma_chipco_b_mii_write(struct bcma_drv_cc_b *ccb, u32 offset, u32 value); + +#endif /* LINUX_BCMA_DRIVER_CC_H_ */ diff --git a/include/linux/bcma/bcma_driver_gmac_cmn.h b/include/linux/bcma/bcma_driver_gmac_cmn.h new file mode 100644 index 000000000..420e222d7 --- /dev/null +++ b/include/linux/bcma/bcma_driver_gmac_cmn.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_BCMA_DRIVER_GMAC_CMN_H_ +#define LINUX_BCMA_DRIVER_GMAC_CMN_H_ + +#include + +#define BCMA_GMAC_CMN_STAG0 0x000 +#define BCMA_GMAC_CMN_STAG1 0x004 +#define BCMA_GMAC_CMN_STAG2 0x008 +#define BCMA_GMAC_CMN_STAG3 0x00C +#define BCMA_GMAC_CMN_PARSER_CTL 0x020 +#define BCMA_GMAC_CMN_MIB_MAX_LEN 0x024 +#define BCMA_GMAC_CMN_PHY_ACCESS 0x100 +#define BCMA_GMAC_CMN_PA_DATA_MASK 0x0000ffff +#define BCMA_GMAC_CMN_PA_ADDR_MASK 0x001f0000 +#define BCMA_GMAC_CMN_PA_ADDR_SHIFT 16 +#define BCMA_GMAC_CMN_PA_REG_MASK 0x1f000000 +#define BCMA_GMAC_CMN_PA_REG_SHIFT 24 +#define BCMA_GMAC_CMN_PA_WRITE 0x20000000 +#define BCMA_GMAC_CMN_PA_START 0x40000000 +#define BCMA_GMAC_CMN_PHY_CTL 0x104 +#define BCMA_GMAC_CMN_PC_EPA_MASK 0x0000001f +#define BCMA_GMAC_CMN_PC_MCT_MASK 0x007f0000 +#define BCMA_GMAC_CMN_PC_MCT_SHIFT 16 +#define BCMA_GMAC_CMN_PC_MTE 0x00800000 +#define BCMA_GMAC_CMN_GMAC0_RGMII_CTL 0x110 +#define BCMA_GMAC_CMN_CFP_ACCESS 0x200 +#define BCMA_GMAC_CMN_CFP_TCAM_DATA0 0x210 +#define BCMA_GMAC_CMN_CFP_TCAM_DATA1 0x214 +#define BCMA_GMAC_CMN_CFP_TCAM_DATA2 0x218 +#define BCMA_GMAC_CMN_CFP_TCAM_DATA3 0x21C +#define BCMA_GMAC_CMN_CFP_TCAM_DATA4 0x220 +#define BCMA_GMAC_CMN_CFP_TCAM_DATA5 0x224 +#define BCMA_GMAC_CMN_CFP_TCAM_DATA6 0x228 +#define BCMA_GMAC_CMN_CFP_TCAM_DATA7 0x22C +#define BCMA_GMAC_CMN_CFP_TCAM_MASK0 0x230 +#define BCMA_GMAC_CMN_CFP_TCAM_MASK1 0x234 +#define BCMA_GMAC_CMN_CFP_TCAM_MASK2 0x238 +#define BCMA_GMAC_CMN_CFP_TCAM_MASK3 0x23C +#define BCMA_GMAC_CMN_CFP_TCAM_MASK4 0x240 +#define BCMA_GMAC_CMN_CFP_TCAM_MASK5 0x244 +#define BCMA_GMAC_CMN_CFP_TCAM_MASK6 0x248 +#define BCMA_GMAC_CMN_CFP_TCAM_MASK7 0x24C +#define BCMA_GMAC_CMN_CFP_ACTION_DATA 0x250 +#define BCMA_GMAC_CMN_TCAM_BIST_CTL 0x2A0 +#define BCMA_GMAC_CMN_TCAM_BIST_STATUS 0x2A4 +#define BCMA_GMAC_CMN_TCAM_CMP_STATUS 0x2A8 +#define BCMA_GMAC_CMN_TCAM_DISABLE 0x2AC +#define BCMA_GMAC_CMN_TCAM_TEST_CTL 0x2F0 +#define BCMA_GMAC_CMN_UDF_0_A3_A0 0x300 +#define BCMA_GMAC_CMN_UDF_0_A7_A4 0x304 +#define BCMA_GMAC_CMN_UDF_0_A8 0x308 +#define BCMA_GMAC_CMN_UDF_1_A3_A0 0x310 +#define BCMA_GMAC_CMN_UDF_1_A7_A4 0x314 +#define BCMA_GMAC_CMN_UDF_1_A8 0x318 +#define BCMA_GMAC_CMN_UDF_2_A3_A0 0x320 +#define BCMA_GMAC_CMN_UDF_2_A7_A4 0x324 +#define BCMA_GMAC_CMN_UDF_2_A8 0x328 +#define BCMA_GMAC_CMN_UDF_0_B3_B0 0x330 +#define BCMA_GMAC_CMN_UDF_0_B7_B4 0x334 +#define BCMA_GMAC_CMN_UDF_0_B8 0x338 +#define BCMA_GMAC_CMN_UDF_1_B3_B0 0x340 +#define BCMA_GMAC_CMN_UDF_1_B7_B4 0x344 +#define BCMA_GMAC_CMN_UDF_1_B8 0x348 +#define BCMA_GMAC_CMN_UDF_2_B3_B0 0x350 +#define BCMA_GMAC_CMN_UDF_2_B7_B4 0x354 +#define BCMA_GMAC_CMN_UDF_2_B8 0x358 +#define BCMA_GMAC_CMN_UDF_0_C3_C0 0x360 +#define BCMA_GMAC_CMN_UDF_0_C7_C4 0x364 +#define BCMA_GMAC_CMN_UDF_0_C8 0x368 +#define BCMA_GMAC_CMN_UDF_1_C3_C0 0x370 +#define BCMA_GMAC_CMN_UDF_1_C7_C4 0x374 +#define BCMA_GMAC_CMN_UDF_1_C8 0x378 +#define BCMA_GMAC_CMN_UDF_2_C3_C0 0x380 +#define BCMA_GMAC_CMN_UDF_2_C7_C4 0x384 +#define BCMA_GMAC_CMN_UDF_2_C8 0x388 +#define BCMA_GMAC_CMN_UDF_0_D3_D0 0x390 +#define BCMA_GMAC_CMN_UDF_0_D7_D4 0x394 +#define BCMA_GMAC_CMN_UDF_0_D11_D8 0x394 + +struct bcma_drv_gmac_cmn { + struct bcma_device *core; + + /* Drivers accessing BCMA_GMAC_CMN_PHY_ACCESS and + * BCMA_GMAC_CMN_PHY_CTL need to take that mutex first. */ + struct mutex phy_mutex; +}; + +/* Register access */ +#define gmac_cmn_read16(gc, offset) bcma_read16((gc)->core, offset) +#define gmac_cmn_read32(gc, offset) bcma_read32((gc)->core, offset) +#define gmac_cmn_write16(gc, offset, val) bcma_write16((gc)->core, offset, val) +#define gmac_cmn_write32(gc, offset, val) bcma_write32((gc)->core, offset, val) + +#endif /* LINUX_BCMA_DRIVER_GMAC_CMN_H_ */ diff --git a/include/linux/bcma/bcma_driver_mips.h b/include/linux/bcma/bcma_driver_mips.h new file mode 100644 index 000000000..798013fab --- /dev/null +++ b/include/linux/bcma/bcma_driver_mips.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_BCMA_DRIVER_MIPS_H_ +#define LINUX_BCMA_DRIVER_MIPS_H_ + +#define BCMA_MIPS_IPSFLAG 0x0F08 +/* which sbflags get routed to mips interrupt 1 */ +#define BCMA_MIPS_IPSFLAG_IRQ1 0x0000003F +#define BCMA_MIPS_IPSFLAG_IRQ1_SHIFT 0 +/* which sbflags get routed to mips interrupt 2 */ +#define BCMA_MIPS_IPSFLAG_IRQ2 0x00003F00 +#define BCMA_MIPS_IPSFLAG_IRQ2_SHIFT 8 +/* which sbflags get routed to mips interrupt 3 */ +#define BCMA_MIPS_IPSFLAG_IRQ3 0x003F0000 +#define BCMA_MIPS_IPSFLAG_IRQ3_SHIFT 16 +/* which sbflags get routed to mips interrupt 4 */ +#define BCMA_MIPS_IPSFLAG_IRQ4 0x3F000000 +#define BCMA_MIPS_IPSFLAG_IRQ4_SHIFT 24 + +/* MIPS 74K core registers */ +#define BCMA_MIPS_MIPS74K_CORECTL 0x0000 +#define BCMA_MIPS_MIPS74K_EXCEPTBASE 0x0004 +#define BCMA_MIPS_MIPS74K_BIST 0x000C +#define BCMA_MIPS_MIPS74K_INTMASK_INT0 0x0014 +#define BCMA_MIPS_MIPS74K_INTMASK(int) \ + ((int) * 4 + BCMA_MIPS_MIPS74K_INTMASK_INT0) +#define BCMA_MIPS_MIPS74K_NMIMASK 0x002C +#define BCMA_MIPS_MIPS74K_GPIOSEL 0x0040 +#define BCMA_MIPS_MIPS74K_GPIOOUT 0x0044 +#define BCMA_MIPS_MIPS74K_GPIOEN 0x0048 +#define BCMA_MIPS_MIPS74K_CLKCTLST 0x01E0 + +#define BCMA_MIPS_OOBSELINA74 0x004 +#define BCMA_MIPS_OOBSELOUTA30 0x100 + +struct bcma_device; + +struct bcma_drv_mips { + struct bcma_device *core; + u8 setup_done:1; + u8 early_setup_done:1; +}; + +extern u32 bcma_cpu_clock(struct bcma_drv_mips *mcore); + +#endif /* LINUX_BCMA_DRIVER_MIPS_H_ */ diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h new file mode 100644 index 000000000..68da8dba5 --- /dev/null +++ b/include/linux/bcma/bcma_driver_pci.h @@ -0,0 +1,264 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_BCMA_DRIVER_PCI_H_ +#define LINUX_BCMA_DRIVER_PCI_H_ + +#include + +struct pci_dev; + +/** PCI core registers. **/ +#define BCMA_CORE_PCI_CTL 0x0000 /* PCI Control */ +#define BCMA_CORE_PCI_CTL_RST_OE 0x00000001 /* PCI_RESET Output Enable */ +#define BCMA_CORE_PCI_CTL_RST 0x00000002 /* PCI_RESET driven out to pin */ +#define BCMA_CORE_PCI_CTL_CLK_OE 0x00000004 /* Clock gate Output Enable */ +#define BCMA_CORE_PCI_CTL_CLK 0x00000008 /* Gate for clock driven out to pin */ +#define BCMA_CORE_PCI_ARBCTL 0x0010 /* PCI Arbiter Control */ +#define BCMA_CORE_PCI_ARBCTL_INTERN 0x00000001 /* Use internal arbiter */ +#define BCMA_CORE_PCI_ARBCTL_EXTERN 0x00000002 /* Use external arbiter */ +#define BCMA_CORE_PCI_ARBCTL_PARKID 0x00000006 /* Mask, selects which agent is parked on an idle bus */ +#define BCMA_CORE_PCI_ARBCTL_PARKID_LAST 0x00000000 /* Last requestor */ +#define BCMA_CORE_PCI_ARBCTL_PARKID_4710 0x00000002 /* 4710 */ +#define BCMA_CORE_PCI_ARBCTL_PARKID_EXT0 0x00000004 /* External requestor 0 */ +#define BCMA_CORE_PCI_ARBCTL_PARKID_EXT1 0x00000006 /* External requestor 1 */ +#define BCMA_CORE_PCI_ISTAT 0x0020 /* Interrupt status */ +#define BCMA_CORE_PCI_ISTAT_INTA 0x00000001 /* PCI INTA# */ +#define BCMA_CORE_PCI_ISTAT_INTB 0x00000002 /* PCI INTB# */ +#define BCMA_CORE_PCI_ISTAT_SERR 0x00000004 /* PCI SERR# (write to clear) */ +#define BCMA_CORE_PCI_ISTAT_PERR 0x00000008 /* PCI PERR# (write to clear) */ +#define BCMA_CORE_PCI_ISTAT_PME 0x00000010 /* PCI PME# */ +#define BCMA_CORE_PCI_IMASK 0x0024 /* Interrupt mask */ +#define BCMA_CORE_PCI_IMASK_INTA 0x00000001 /* PCI INTA# */ +#define BCMA_CORE_PCI_IMASK_INTB 0x00000002 /* PCI INTB# */ +#define BCMA_CORE_PCI_IMASK_SERR 0x00000004 /* PCI SERR# */ +#define BCMA_CORE_PCI_IMASK_PERR 0x00000008 /* PCI PERR# */ +#define BCMA_CORE_PCI_IMASK_PME 0x00000010 /* PCI PME# */ +#define BCMA_CORE_PCI_MBOX 0x0028 /* Backplane to PCI Mailbox */ +#define BCMA_CORE_PCI_MBOX_F0_0 0x00000100 /* PCI function 0, INT 0 */ +#define BCMA_CORE_PCI_MBOX_F0_1 0x00000200 /* PCI function 0, INT 1 */ +#define BCMA_CORE_PCI_MBOX_F1_0 0x00000400 /* PCI function 1, INT 0 */ +#define BCMA_CORE_PCI_MBOX_F1_1 0x00000800 /* PCI function 1, INT 1 */ +#define BCMA_CORE_PCI_MBOX_F2_0 0x00001000 /* PCI function 2, INT 0 */ +#define BCMA_CORE_PCI_MBOX_F2_1 0x00002000 /* PCI function 2, INT 1 */ +#define BCMA_CORE_PCI_MBOX_F3_0 0x00004000 /* PCI function 3, INT 0 */ +#define BCMA_CORE_PCI_MBOX_F3_1 0x00008000 /* PCI function 3, INT 1 */ +#define BCMA_CORE_PCI_BCAST_ADDR 0x0050 /* Backplane Broadcast Address */ +#define BCMA_CORE_PCI_BCAST_ADDR_MASK 0x000000FF +#define BCMA_CORE_PCI_BCAST_DATA 0x0054 /* Backplane Broadcast Data */ +#define BCMA_CORE_PCI_GPIO_IN 0x0060 /* rev >= 2 only */ +#define BCMA_CORE_PCI_GPIO_OUT 0x0064 /* rev >= 2 only */ +#define BCMA_CORE_PCI_GPIO_ENABLE 0x0068 /* rev >= 2 only */ +#define BCMA_CORE_PCI_GPIO_CTL 0x006C /* rev >= 2 only */ +#define BCMA_CORE_PCI_SBTOPCI0 0x0100 /* Backplane to PCI translation 0 (sbtopci0) */ +#define BCMA_CORE_PCI_SBTOPCI0_MASK 0xFC000000 +#define BCMA_CORE_PCI_SBTOPCI1 0x0104 /* Backplane to PCI translation 1 (sbtopci1) */ +#define BCMA_CORE_PCI_SBTOPCI1_MASK 0xFC000000 +#define BCMA_CORE_PCI_SBTOPCI2 0x0108 /* Backplane to PCI translation 2 (sbtopci2) */ +#define BCMA_CORE_PCI_SBTOPCI2_MASK 0xC0000000 +#define BCMA_CORE_PCI_CONFIG_ADDR 0x0120 /* pcie config space access */ +#define BCMA_CORE_PCI_CONFIG_DATA 0x0124 /* pcie config space access */ +#define BCMA_CORE_PCI_MDIO_CONTROL 0x0128 /* controls the mdio access */ +#define BCMA_CORE_PCI_MDIOCTL_DIVISOR_MASK 0x7f /* clock to be used on MDIO */ +#define BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL 0x2 +#define BCMA_CORE_PCI_MDIOCTL_PREAM_EN 0x80 /* Enable preamble sequnce */ +#define BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE 0x100 /* Tranaction complete */ +#define BCMA_CORE_PCI_MDIO_DATA 0x012c /* Data to the mdio access */ +#define BCMA_CORE_PCI_MDIODATA_MASK 0x0000ffff /* data 2 bytes */ +#define BCMA_CORE_PCI_MDIODATA_TA 0x00020000 /* Turnaround */ +#define BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD 18 /* Regaddr shift (rev < 10) */ +#define BCMA_CORE_PCI_MDIODATA_REGADDR_MASK_OLD 0x003c0000 /* Regaddr Mask (rev < 10) */ +#define BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD 22 /* Physmedia devaddr shift (rev < 10) */ +#define BCMA_CORE_PCI_MDIODATA_DEVADDR_MASK_OLD 0x0fc00000 /* Physmedia devaddr Mask (rev < 10) */ +#define BCMA_CORE_PCI_MDIODATA_REGADDR_SHF 18 /* Regaddr shift */ +#define BCMA_CORE_PCI_MDIODATA_REGADDR_MASK 0x007c0000 /* Regaddr Mask */ +#define BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF 23 /* Physmedia devaddr shift */ +#define BCMA_CORE_PCI_MDIODATA_DEVADDR_MASK 0x0f800000 /* Physmedia devaddr Mask */ +#define BCMA_CORE_PCI_MDIODATA_WRITE 0x10000000 /* write Transaction */ +#define BCMA_CORE_PCI_MDIODATA_READ 0x20000000 /* Read Transaction */ +#define BCMA_CORE_PCI_MDIODATA_START 0x40000000 /* start of Transaction */ +#define BCMA_CORE_PCI_MDIODATA_DEV_ADDR 0x0 /* dev address for serdes */ +#define BCMA_CORE_PCI_MDIODATA_BLK_ADDR 0x1F /* blk address for serdes */ +#define BCMA_CORE_PCI_MDIODATA_DEV_PLL 0x1d /* SERDES PLL Dev */ +#define BCMA_CORE_PCI_MDIODATA_DEV_TX 0x1e /* SERDES TX Dev */ +#define BCMA_CORE_PCI_MDIODATA_DEV_RX 0x1f /* SERDES RX Dev */ +#define BCMA_CORE_PCI_PCIEIND_ADDR 0x0130 /* indirect access to the internal register */ +#define BCMA_CORE_PCI_PCIEIND_DATA 0x0134 /* Data to/from the internal register */ +#define BCMA_CORE_PCI_CLKREQENCTRL 0x0138 /* >= rev 6, Clkreq rdma control */ +#define BCMA_CORE_PCI_PCICFG0 0x0400 /* PCI config space 0 (rev >= 8) */ +#define BCMA_CORE_PCI_PCICFG1 0x0500 /* PCI config space 1 (rev >= 8) */ +#define BCMA_CORE_PCI_PCICFG2 0x0600 /* PCI config space 2 (rev >= 8) */ +#define BCMA_CORE_PCI_PCICFG3 0x0700 /* PCI config space 3 (rev >= 8) */ +#define BCMA_CORE_PCI_SPROM(wordoffset) (0x0800 + ((wordoffset) * 2)) /* SPROM shadow area (72 bytes) */ +#define BCMA_CORE_PCI_SPROM_PI_OFFSET 0 /* first word */ +#define BCMA_CORE_PCI_SPROM_PI_MASK 0xf000 /* bit 15:12 */ +#define BCMA_CORE_PCI_SPROM_PI_SHIFT 12 /* bit 15:12 */ +#define BCMA_CORE_PCI_SPROM_MISC_CONFIG 5 /* word 5 */ +#define BCMA_CORE_PCI_SPROM_L23READY_EXIT_NOPERST 0x8000 /* bit 15 */ +#define BCMA_CORE_PCI_SPROM_CLKREQ_OFFSET_REV5 20 /* word 20 for srom rev <= 5 */ +#define BCMA_CORE_PCI_SPROM_CLKREQ_ENB 0x0800 /* bit 11 */ + +/* SBtoPCIx */ +#define BCMA_CORE_PCI_SBTOPCI_MEM 0x00000000 +#define BCMA_CORE_PCI_SBTOPCI_IO 0x00000001 +#define BCMA_CORE_PCI_SBTOPCI_CFG0 0x00000002 +#define BCMA_CORE_PCI_SBTOPCI_CFG1 0x00000003 +#define BCMA_CORE_PCI_SBTOPCI_PREF 0x00000004 /* Prefetch enable */ +#define BCMA_CORE_PCI_SBTOPCI_BURST 0x00000008 /* Burst enable */ +#define BCMA_CORE_PCI_SBTOPCI_MRM 0x00000020 /* Memory Read Multiple */ +#define BCMA_CORE_PCI_SBTOPCI_RC 0x00000030 /* Read Command mask (rev >= 11) */ +#define BCMA_CORE_PCI_SBTOPCI_RC_READ 0x00000000 /* Memory read */ +#define BCMA_CORE_PCI_SBTOPCI_RC_READL 0x00000010 /* Memory read line */ +#define BCMA_CORE_PCI_SBTOPCI_RC_READM 0x00000020 /* Memory read multiple */ + +/* PCIE protocol PHY diagnostic registers */ +#define BCMA_CORE_PCI_PLP_MODEREG 0x200 /* Mode */ +#define BCMA_CORE_PCI_PLP_STATUSREG 0x204 /* Status */ +#define BCMA_CORE_PCI_PLP_POLARITYINV_STAT 0x10 /* Status reg PCIE_PLP_STATUSREG */ +#define BCMA_CORE_PCI_PLP_LTSSMCTRLREG 0x208 /* LTSSM control */ +#define BCMA_CORE_PCI_PLP_LTLINKNUMREG 0x20c /* Link Training Link number */ +#define BCMA_CORE_PCI_PLP_LTLANENUMREG 0x210 /* Link Training Lane number */ +#define BCMA_CORE_PCI_PLP_LTNFTSREG 0x214 /* Link Training N_FTS */ +#define BCMA_CORE_PCI_PLP_ATTNREG 0x218 /* Attention */ +#define BCMA_CORE_PCI_PLP_ATTNMASKREG 0x21C /* Attention Mask */ +#define BCMA_CORE_PCI_PLP_RXERRCTR 0x220 /* Rx Error */ +#define BCMA_CORE_PCI_PLP_RXFRMERRCTR 0x224 /* Rx Framing Error */ +#define BCMA_CORE_PCI_PLP_RXERRTHRESHREG 0x228 /* Rx Error threshold */ +#define BCMA_CORE_PCI_PLP_TESTCTRLREG 0x22C /* Test Control reg */ +#define BCMA_CORE_PCI_PLP_SERDESCTRLOVRDREG 0x230 /* SERDES Control Override */ +#define BCMA_CORE_PCI_PLP_TIMINGOVRDREG 0x234 /* Timing param override */ +#define BCMA_CORE_PCI_PLP_RXTXSMDIAGREG 0x238 /* RXTX State Machine Diag */ +#define BCMA_CORE_PCI_PLP_LTSSMDIAGREG 0x23C /* LTSSM State Machine Diag */ + +/* PCIE protocol DLLP diagnostic registers */ +#define BCMA_CORE_PCI_DLLP_LCREG 0x100 /* Link Control */ +#define BCMA_CORE_PCI_DLLP_LSREG 0x104 /* Link Status */ +#define BCMA_CORE_PCI_DLLP_LAREG 0x108 /* Link Attention */ +#define BCMA_CORE_PCI_DLLP_LSREG_LINKUP (1 << 16) +#define BCMA_CORE_PCI_DLLP_LAMASKREG 0x10C /* Link Attention Mask */ +#define BCMA_CORE_PCI_DLLP_NEXTTXSEQNUMREG 0x110 /* Next Tx Seq Num */ +#define BCMA_CORE_PCI_DLLP_ACKEDTXSEQNUMREG 0x114 /* Acked Tx Seq Num */ +#define BCMA_CORE_PCI_DLLP_PURGEDTXSEQNUMREG 0x118 /* Purged Tx Seq Num */ +#define BCMA_CORE_PCI_DLLP_RXSEQNUMREG 0x11C /* Rx Sequence Number */ +#define BCMA_CORE_PCI_DLLP_LRREG 0x120 /* Link Replay */ +#define BCMA_CORE_PCI_DLLP_LACKTOREG 0x124 /* Link Ack Timeout */ +#define BCMA_CORE_PCI_DLLP_PMTHRESHREG 0x128 /* Power Management Threshold */ +#define BCMA_CORE_PCI_ASPMTIMER_EXTEND 0x01000000 /* > rev7: enable extend ASPM timer */ +#define BCMA_CORE_PCI_DLLP_RTRYWPREG 0x12C /* Retry buffer write ptr */ +#define BCMA_CORE_PCI_DLLP_RTRYRPREG 0x130 /* Retry buffer Read ptr */ +#define BCMA_CORE_PCI_DLLP_RTRYPPREG 0x134 /* Retry buffer Purged ptr */ +#define BCMA_CORE_PCI_DLLP_RTRRWREG 0x138 /* Retry buffer Read/Write */ +#define BCMA_CORE_PCI_DLLP_ECTHRESHREG 0x13C /* Error Count Threshold */ +#define BCMA_CORE_PCI_DLLP_TLPERRCTRREG 0x140 /* TLP Error Counter */ +#define BCMA_CORE_PCI_DLLP_ERRCTRREG 0x144 /* Error Counter */ +#define BCMA_CORE_PCI_DLLP_NAKRXCTRREG 0x148 /* NAK Received Counter */ +#define BCMA_CORE_PCI_DLLP_TESTREG 0x14C /* Test */ +#define BCMA_CORE_PCI_DLLP_PKTBIST 0x150 /* Packet BIST */ +#define BCMA_CORE_PCI_DLLP_PCIE11 0x154 /* DLLP PCIE 1.1 reg */ + +/* SERDES RX registers */ +#define BCMA_CORE_PCI_SERDES_RX_CTRL 1 /* Rx cntrl */ +#define BCMA_CORE_PCI_SERDES_RX_CTRL_FORCE 0x80 /* rxpolarity_force */ +#define BCMA_CORE_PCI_SERDES_RX_CTRL_POLARITY 0x40 /* rxpolarity_value */ +#define BCMA_CORE_PCI_SERDES_RX_TIMER1 2 /* Rx Timer1 */ +#define BCMA_CORE_PCI_SERDES_RX_CDR 6 /* CDR */ +#define BCMA_CORE_PCI_SERDES_RX_CDRBW 7 /* CDR BW */ + +/* SERDES PLL registers */ +#define BCMA_CORE_PCI_SERDES_PLL_CTRL 1 /* PLL control reg */ +#define BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN 0x4000 /* bit 14 is FREQDET on */ + +/* PCIcore specific boardflags */ +#define BCMA_CORE_PCI_BFL_NOPCI 0x00000400 /* Board leaves PCI floating */ + +/* PCIE Config space accessing MACROS */ +#define BCMA_CORE_PCI_CFG_BUS_SHIFT 24 /* Bus shift */ +#define BCMA_CORE_PCI_CFG_SLOT_SHIFT 19 /* Slot/Device shift */ +#define BCMA_CORE_PCI_CFG_FUN_SHIFT 16 /* Function shift */ +#define BCMA_CORE_PCI_CFG_OFF_SHIFT 0 /* Register shift */ + +#define BCMA_CORE_PCI_CFG_BUS_MASK 0xff /* Bus mask */ +#define BCMA_CORE_PCI_CFG_SLOT_MASK 0x1f /* Slot/Device mask */ +#define BCMA_CORE_PCI_CFG_FUN_MASK 7 /* Function mask */ +#define BCMA_CORE_PCI_CFG_OFF_MASK 0xfff /* Register mask */ + +#define BCMA_CORE_PCI_CFG_DEVCTRL 0xd8 + +#define BCMA_CORE_PCI_ + +/* MDIO devices (SERDES modules) */ +#define BCMA_CORE_PCI_MDIO_IEEE0 0x000 +#define BCMA_CORE_PCI_MDIO_IEEE1 0x001 +#define BCMA_CORE_PCI_MDIO_BLK0 0x800 +#define BCMA_CORE_PCI_MDIO_BLK1 0x801 +#define BCMA_CORE_PCI_MDIO_BLK1_MGMT0 0x16 +#define BCMA_CORE_PCI_MDIO_BLK1_MGMT1 0x17 +#define BCMA_CORE_PCI_MDIO_BLK1_MGMT2 0x18 +#define BCMA_CORE_PCI_MDIO_BLK1_MGMT3 0x19 +#define BCMA_CORE_PCI_MDIO_BLK1_MGMT4 0x1A +#define BCMA_CORE_PCI_MDIO_BLK2 0x802 +#define BCMA_CORE_PCI_MDIO_BLK3 0x803 +#define BCMA_CORE_PCI_MDIO_BLK4 0x804 +#define BCMA_CORE_PCI_MDIO_TXPLL 0x808 /* TXPLL register block idx */ +#define BCMA_CORE_PCI_MDIO_TXCTRL0 0x820 +#define BCMA_CORE_PCI_MDIO_SERDESID 0x831 +#define BCMA_CORE_PCI_MDIO_RXCTRL0 0x840 + +/* PCIE Root Capability Register bits (Host mode only) */ +#define BCMA_CORE_PCI_RC_CRS_VISIBILITY 0x0001 + +struct bcma_drv_pci; +struct bcma_bus; + +#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE +struct bcma_drv_pci_host { + struct bcma_drv_pci *pdev; + + u32 host_cfg_addr; + spinlock_t cfgspace_lock; + + struct pci_controller pci_controller; + struct pci_ops pci_ops; + struct resource mem_resource; + struct resource io_resource; +}; +#endif + +struct bcma_drv_pci { + struct bcma_device *core; + u8 early_setup_done:1; + u8 setup_done:1; + u8 hostmode:1; + +#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE + struct bcma_drv_pci_host *host_controller; +#endif +}; + +/* Register access */ +#define pcicore_read16(pc, offset) bcma_read16((pc)->core, offset) +#define pcicore_read32(pc, offset) bcma_read32((pc)->core, offset) +#define pcicore_write16(pc, offset, val) bcma_write16((pc)->core, offset, val) +#define pcicore_write32(pc, offset, val) bcma_write32((pc)->core, offset, val) + +#ifdef CONFIG_BCMA_DRIVER_PCI +extern void bcma_core_pci_power_save(struct bcma_bus *bus, bool up); +#else +static inline void bcma_core_pci_power_save(struct bcma_bus *bus, bool up) +{ +} +#endif + +#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE +extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev); +extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev); +#else +static inline int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev) +{ + return -ENOTSUPP; +} +static inline int bcma_core_pci_plat_dev_init(struct pci_dev *dev) +{ + return -ENOTSUPP; +} +#endif + +#endif /* LINUX_BCMA_DRIVER_PCI_H_ */ diff --git a/include/linux/bcma/bcma_driver_pcie2.h b/include/linux/bcma/bcma_driver_pcie2.h new file mode 100644 index 000000000..91ce515e3 --- /dev/null +++ b/include/linux/bcma/bcma_driver_pcie2.h @@ -0,0 +1,159 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_BCMA_DRIVER_PCIE2_H_ +#define LINUX_BCMA_DRIVER_PCIE2_H_ + +#define BCMA_CORE_PCIE2_CLK_CONTROL 0x0000 +#define PCIE2_CLKC_RST_OE 0x0001 /* When set, drives PCI_RESET out to pin */ +#define PCIE2_CLKC_RST 0x0002 /* Value driven out to pin */ +#define PCIE2_CLKC_SPERST 0x0004 /* SurvivePeRst */ +#define PCIE2_CLKC_DISABLE_L1CLK_GATING 0x0010 +#define PCIE2_CLKC_DLYPERST 0x0100 /* Delay PeRst to CoE Core */ +#define PCIE2_CLKC_DISSPROMLD 0x0200 /* DisableSpromLoadOnPerst */ +#define PCIE2_CLKC_WAKE_MODE_L2 0x1000 /* Wake on L2 */ +#define BCMA_CORE_PCIE2_RC_PM_CONTROL 0x0004 +#define BCMA_CORE_PCIE2_RC_PM_STATUS 0x0008 +#define BCMA_CORE_PCIE2_EP_PM_CONTROL 0x000C +#define BCMA_CORE_PCIE2_EP_PM_STATUS 0x0010 +#define BCMA_CORE_PCIE2_EP_LTR_CONTROL 0x0014 +#define BCMA_CORE_PCIE2_EP_LTR_STATUS 0x0018 +#define BCMA_CORE_PCIE2_EP_OBFF_STATUS 0x001C +#define BCMA_CORE_PCIE2_PCIE_ERR_STATUS 0x0020 +#define BCMA_CORE_PCIE2_RC_AXI_CONFIG 0x0100 +#define BCMA_CORE_PCIE2_EP_AXI_CONFIG 0x0104 +#define BCMA_CORE_PCIE2_RXDEBUG_STATUS0 0x0108 +#define BCMA_CORE_PCIE2_RXDEBUG_CONTROL0 0x010C +#define BCMA_CORE_PCIE2_CONFIGINDADDR 0x0120 +#define BCMA_CORE_PCIE2_CONFIGINDDATA 0x0124 +#define BCMA_CORE_PCIE2_MDIOCONTROL 0x0128 +#define BCMA_CORE_PCIE2_MDIOWRDATA 0x012C +#define BCMA_CORE_PCIE2_MDIORDDATA 0x0130 +#define BCMA_CORE_PCIE2_DATAINTF 0x0180 +#define BCMA_CORE_PCIE2_D2H_INTRLAZY_0 0x0188 +#define BCMA_CORE_PCIE2_H2D_INTRLAZY_0 0x018c +#define BCMA_CORE_PCIE2_H2D_INTSTAT_0 0x0190 +#define BCMA_CORE_PCIE2_H2D_INTMASK_0 0x0194 +#define BCMA_CORE_PCIE2_D2H_INTSTAT_0 0x0198 +#define BCMA_CORE_PCIE2_D2H_INTMASK_0 0x019c +#define BCMA_CORE_PCIE2_LTR_STATE 0x01A0 /* Latency Tolerance Reporting */ +#define PCIE2_LTR_ACTIVE 2 +#define PCIE2_LTR_ACTIVE_IDLE 1 +#define PCIE2_LTR_SLEEP 0 +#define PCIE2_LTR_FINAL_MASK 0x300 +#define PCIE2_LTR_FINAL_SHIFT 8 +#define BCMA_CORE_PCIE2_PWR_INT_STATUS 0x01A4 +#define BCMA_CORE_PCIE2_PWR_INT_MASK 0x01A8 +#define BCMA_CORE_PCIE2_CFG_ADDR 0x01F8 +#define BCMA_CORE_PCIE2_CFG_DATA 0x01FC +#define BCMA_CORE_PCIE2_SYS_EQ_PAGE 0x0200 +#define BCMA_CORE_PCIE2_SYS_MSI_PAGE 0x0204 +#define BCMA_CORE_PCIE2_SYS_MSI_INTREN 0x0208 +#define BCMA_CORE_PCIE2_SYS_MSI_CTRL0 0x0210 +#define BCMA_CORE_PCIE2_SYS_MSI_CTRL1 0x0214 +#define BCMA_CORE_PCIE2_SYS_MSI_CTRL2 0x0218 +#define BCMA_CORE_PCIE2_SYS_MSI_CTRL3 0x021C +#define BCMA_CORE_PCIE2_SYS_MSI_CTRL4 0x0220 +#define BCMA_CORE_PCIE2_SYS_MSI_CTRL5 0x0224 +#define BCMA_CORE_PCIE2_SYS_EQ_HEAD0 0x0250 +#define BCMA_CORE_PCIE2_SYS_EQ_TAIL0 0x0254 +#define BCMA_CORE_PCIE2_SYS_EQ_HEAD1 0x0258 +#define BCMA_CORE_PCIE2_SYS_EQ_TAIL1 0x025C +#define BCMA_CORE_PCIE2_SYS_EQ_HEAD2 0x0260 +#define BCMA_CORE_PCIE2_SYS_EQ_TAIL2 0x0264 +#define BCMA_CORE_PCIE2_SYS_EQ_HEAD3 0x0268 +#define BCMA_CORE_PCIE2_SYS_EQ_TAIL3 0x026C +#define BCMA_CORE_PCIE2_SYS_EQ_HEAD4 0x0270 +#define BCMA_CORE_PCIE2_SYS_EQ_TAIL4 0x0274 +#define BCMA_CORE_PCIE2_SYS_EQ_HEAD5 0x0278 +#define BCMA_CORE_PCIE2_SYS_EQ_TAIL5 0x027C +#define BCMA_CORE_PCIE2_SYS_RC_INTX_EN 0x0330 +#define BCMA_CORE_PCIE2_SYS_RC_INTX_CSR 0x0334 +#define BCMA_CORE_PCIE2_SYS_MSI_REQ 0x0340 +#define BCMA_CORE_PCIE2_SYS_HOST_INTR_EN 0x0344 +#define BCMA_CORE_PCIE2_SYS_HOST_INTR_CSR 0x0348 +#define BCMA_CORE_PCIE2_SYS_HOST_INTR0 0x0350 +#define BCMA_CORE_PCIE2_SYS_HOST_INTR1 0x0354 +#define BCMA_CORE_PCIE2_SYS_HOST_INTR2 0x0358 +#define BCMA_CORE_PCIE2_SYS_HOST_INTR3 0x035C +#define BCMA_CORE_PCIE2_SYS_EP_INT_EN0 0x0360 +#define BCMA_CORE_PCIE2_SYS_EP_INT_EN1 0x0364 +#define BCMA_CORE_PCIE2_SYS_EP_INT_CSR0 0x0370 +#define BCMA_CORE_PCIE2_SYS_EP_INT_CSR1 0x0374 +#define BCMA_CORE_PCIE2_SPROM(wordoffset) (0x0800 + ((wordoffset) * 2)) +#define BCMA_CORE_PCIE2_FUNC0_IMAP0_0 0x0C00 +#define BCMA_CORE_PCIE2_FUNC0_IMAP0_1 0x0C04 +#define BCMA_CORE_PCIE2_FUNC0_IMAP0_2 0x0C08 +#define BCMA_CORE_PCIE2_FUNC0_IMAP0_3 0x0C0C +#define BCMA_CORE_PCIE2_FUNC0_IMAP0_4 0x0C10 +#define BCMA_CORE_PCIE2_FUNC0_IMAP0_5 0x0C14 +#define BCMA_CORE_PCIE2_FUNC0_IMAP0_6 0x0C18 +#define BCMA_CORE_PCIE2_FUNC0_IMAP0_7 0x0C1C +#define BCMA_CORE_PCIE2_FUNC1_IMAP0_0 0x0C20 +#define BCMA_CORE_PCIE2_FUNC1_IMAP0_1 0x0C24 +#define BCMA_CORE_PCIE2_FUNC1_IMAP0_2 0x0C28 +#define BCMA_CORE_PCIE2_FUNC1_IMAP0_3 0x0C2C +#define BCMA_CORE_PCIE2_FUNC1_IMAP0_4 0x0C30 +#define BCMA_CORE_PCIE2_FUNC1_IMAP0_5 0x0C34 +#define BCMA_CORE_PCIE2_FUNC1_IMAP0_6 0x0C38 +#define BCMA_CORE_PCIE2_FUNC1_IMAP0_7 0x0C3C +#define BCMA_CORE_PCIE2_FUNC0_IMAP1 0x0C80 +#define BCMA_CORE_PCIE2_FUNC1_IMAP1 0x0C88 +#define BCMA_CORE_PCIE2_FUNC0_IMAP2 0x0CC0 +#define BCMA_CORE_PCIE2_FUNC1_IMAP2 0x0CC8 +#define BCMA_CORE_PCIE2_IARR0_LOWER 0x0D00 +#define BCMA_CORE_PCIE2_IARR0_UPPER 0x0D04 +#define BCMA_CORE_PCIE2_IARR1_LOWER 0x0D08 +#define BCMA_CORE_PCIE2_IARR1_UPPER 0x0D0C +#define BCMA_CORE_PCIE2_IARR2_LOWER 0x0D10 +#define BCMA_CORE_PCIE2_IARR2_UPPER 0x0D14 +#define BCMA_CORE_PCIE2_OARR0 0x0D20 +#define BCMA_CORE_PCIE2_OARR1 0x0D28 +#define BCMA_CORE_PCIE2_OARR2 0x0D30 +#define BCMA_CORE_PCIE2_OMAP0_LOWER 0x0D40 +#define BCMA_CORE_PCIE2_OMAP0_UPPER 0x0D44 +#define BCMA_CORE_PCIE2_OMAP1_LOWER 0x0D48 +#define BCMA_CORE_PCIE2_OMAP1_UPPER 0x0D4C +#define BCMA_CORE_PCIE2_OMAP2_LOWER 0x0D50 +#define BCMA_CORE_PCIE2_OMAP2_UPPER 0x0D54 +#define BCMA_CORE_PCIE2_FUNC1_IARR1_SIZE 0x0D58 +#define BCMA_CORE_PCIE2_FUNC1_IARR2_SIZE 0x0D5C +#define BCMA_CORE_PCIE2_MEM_CONTROL 0x0F00 +#define BCMA_CORE_PCIE2_MEM_ECC_ERRLOG0 0x0F04 +#define BCMA_CORE_PCIE2_MEM_ECC_ERRLOG1 0x0F08 +#define BCMA_CORE_PCIE2_LINK_STATUS 0x0F0C +#define BCMA_CORE_PCIE2_STRAP_STATUS 0x0F10 +#define BCMA_CORE_PCIE2_RESET_STATUS 0x0F14 +#define BCMA_CORE_PCIE2_RESETEN_IN_LINKDOWN 0x0F18 +#define BCMA_CORE_PCIE2_MISC_INTR_EN 0x0F1C +#define BCMA_CORE_PCIE2_TX_DEBUG_CFG 0x0F20 +#define BCMA_CORE_PCIE2_MISC_CONFIG 0x0F24 +#define BCMA_CORE_PCIE2_MISC_STATUS 0x0F28 +#define BCMA_CORE_PCIE2_INTR_EN 0x0F30 +#define BCMA_CORE_PCIE2_INTR_CLEAR 0x0F34 +#define BCMA_CORE_PCIE2_INTR_STATUS 0x0F38 + +/* PCIE gen2 config regs */ +#define PCIE2_INTSTATUS 0x090 +#define PCIE2_INTMASK 0x094 +#define PCIE2_SBMBX 0x098 + +#define PCIE2_PMCR_REFUP 0x1814 /* Trefup time */ + +#define PCIE2_CAP_DEVSTSCTRL2_OFFSET 0xD4 +#define PCIE2_CAP_DEVSTSCTRL2_LTRENAB 0x400 +#define PCIE2_PVT_REG_PM_CLK_PERIOD 0x184c + +struct bcma_drv_pcie2 { + struct bcma_device *core; + + u16 reqsize; +}; + +#define pcie2_read16(pcie2, offset) bcma_read16((pcie2)->core, offset) +#define pcie2_read32(pcie2, offset) bcma_read32((pcie2)->core, offset) +#define pcie2_write16(pcie2, offset, val) bcma_write16((pcie2)->core, offset, val) +#define pcie2_write32(pcie2, offset, val) bcma_write32((pcie2)->core, offset, val) + +#define pcie2_set32(pcie2, offset, set) bcma_set32((pcie2)->core, offset, set) +#define pcie2_mask32(pcie2, offset, mask) bcma_mask32((pcie2)->core, offset, mask) + +#endif /* LINUX_BCMA_DRIVER_PCIE2_H_ */ diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h new file mode 100644 index 000000000..944105cbd --- /dev/null +++ b/include/linux/bcma/bcma_regs.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_BCMA_REGS_H_ +#define LINUX_BCMA_REGS_H_ + +/* Some single registers are shared between many cores */ +/* BCMA_CLKCTLST: ChipCommon (rev >= 20), PCIe, 80211 */ +#define BCMA_CLKCTLST 0x01E0 /* Clock control and status */ +#define BCMA_CLKCTLST_FORCEALP 0x00000001 /* Force ALP request */ +#define BCMA_CLKCTLST_FORCEHT 0x00000002 /* Force HT request */ +#define BCMA_CLKCTLST_FORCEILP 0x00000004 /* Force ILP request */ +#define BCMA_CLKCTLST_HAVEALPREQ 0x00000008 /* ALP available request */ +#define BCMA_CLKCTLST_HAVEHTREQ 0x00000010 /* HT available request */ +#define BCMA_CLKCTLST_HWCROFF 0x00000020 /* Force HW clock request off */ +#define BCMA_CLKCTLST_HQCLKREQ 0x00000040 /* HQ Clock */ +#define BCMA_CLKCTLST_EXTRESREQ 0x00000700 /* Mask of external resource requests */ +#define BCMA_CLKCTLST_EXTRESREQ_SHIFT 8 +#define BCMA_CLKCTLST_HAVEALP 0x00010000 /* ALP available */ +#define BCMA_CLKCTLST_HAVEHT 0x00020000 /* HT available */ +#define BCMA_CLKCTLST_BP_ON_ALP 0x00040000 /* RO: running on ALP clock */ +#define BCMA_CLKCTLST_BP_ON_HT 0x00080000 /* RO: running on HT clock */ +#define BCMA_CLKCTLST_EXTRESST 0x07000000 /* Mask of external resource status */ +#define BCMA_CLKCTLST_EXTRESST_SHIFT 24 +/* Is there any BCM4328 on BCMA bus? */ +#define BCMA_CLKCTLST_4328A0_HAVEHT 0x00010000 /* 4328a0 has reversed bits */ +#define BCMA_CLKCTLST_4328A0_HAVEALP 0x00020000 /* 4328a0 has reversed bits */ + +/* Agent registers (common for every core) */ +#define BCMA_OOB_SEL_OUT_A30 0x0100 +#define BCMA_IOCTL 0x0408 /* IO control */ +#define BCMA_IOCTL_CLK 0x0001 +#define BCMA_IOCTL_FGC 0x0002 +#define BCMA_IOCTL_CORE_BITS 0x3FFC +#define BCMA_IOCTL_PME_EN 0x4000 +#define BCMA_IOCTL_BIST_EN 0x8000 +#define BCMA_IOST 0x0500 /* IO status */ +#define BCMA_IOST_CORE_BITS 0x0FFF +#define BCMA_IOST_DMA64 0x1000 +#define BCMA_IOST_GATED_CLK 0x2000 +#define BCMA_IOST_BIST_ERROR 0x4000 +#define BCMA_IOST_BIST_DONE 0x8000 +#define BCMA_RESET_CTL 0x0800 +#define BCMA_RESET_CTL_RESET 0x0001 +#define BCMA_RESET_ST 0x0804 + +#define BCMA_NS_ROM_IOST_BOOT_DEV_MASK 0x0003 +#define BCMA_NS_ROM_IOST_BOOT_DEV_NOR 0x0000 +#define BCMA_NS_ROM_IOST_BOOT_DEV_NAND 0x0001 +#define BCMA_NS_ROM_IOST_BOOT_DEV_ROM 0x0002 + +/* BCMA PCI config space registers. */ +#define BCMA_PCI_PMCSR 0x44 +#define BCMA_PCI_PE 0x100 +#define BCMA_PCI_BAR0_WIN 0x80 /* Backplane address space 0 */ +#define BCMA_PCI_BAR1_WIN 0x84 /* Backplane address space 1 */ +#define BCMA_PCI_SPROMCTL 0x88 /* SPROM control */ +#define BCMA_PCI_SPROMCTL_WE 0x10 /* SPROM write enable */ +#define BCMA_PCI_BAR1_CONTROL 0x8c /* Address space 1 burst control */ +#define BCMA_PCI_IRQS 0x90 /* PCI interrupts */ +#define BCMA_PCI_IRQMASK 0x94 /* PCI IRQ control and mask (pcirev >= 6 only) */ +#define BCMA_PCI_BACKPLANE_IRQS 0x98 /* Backplane Interrupts */ +#define BCMA_PCI_BAR0_WIN2 0xAC +#define BCMA_PCI_GPIO_IN 0xB0 /* GPIO Input (pcirev >= 3 only) */ +#define BCMA_PCI_GPIO_OUT 0xB4 /* GPIO Output (pcirev >= 3 only) */ +#define BCMA_PCI_GPIO_OUT_ENABLE 0xB8 /* GPIO Output Enable/Disable (pcirev >= 3 only) */ +#define BCMA_PCI_GPIO_SCS 0x10 /* PCI config space bit 4 for 4306c0 slow clock source */ +#define BCMA_PCI_GPIO_HWRAD 0x20 /* PCI config space GPIO 13 for hw radio disable */ +#define BCMA_PCI_GPIO_XTAL 0x40 /* PCI config space GPIO 14 for Xtal powerup */ +#define BCMA_PCI_GPIO_PLL 0x80 /* PCI config space GPIO 15 for PLL powerdown */ + +#define BCMA_PCIE2_BAR0_WIN2 0x70 + +/* SiliconBackplane Address Map. + * All regions may not exist on all chips. + */ +#define BCMA_SOC_SDRAM_BASE 0x00000000U /* Physical SDRAM */ +#define BCMA_SOC_PCI_MEM 0x08000000U /* Host Mode sb2pcitranslation0 (64 MB) */ +#define BCMA_SOC_PCI_MEM_SZ (64 * 1024 * 1024) +#define BCMA_SOC_PCI_CFG 0x0c000000U /* Host Mode sb2pcitranslation1 (64 MB) */ +#define BCMA_SOC_SDRAM_SWAPPED 0x10000000U /* Byteswapped Physical SDRAM */ +#define BCMA_SOC_SDRAM_R2 0x80000000U /* Region 2 for sdram (512 MB) */ + + +#define BCMA_SOC_PCI_DMA 0x40000000U /* Client Mode sb2pcitranslation2 (1 GB) */ +#define BCMA_SOC_PCI_DMA2 0x80000000U /* Client Mode sb2pcitranslation2 (1 GB) */ +#define BCMA_SOC_PCI_DMA_SZ 0x40000000U /* Client Mode sb2pcitranslation2 size in bytes */ +#define BCMA_SOC_PCIE_DMA_L32 0x00000000U /* PCIE Client Mode sb2pcitranslation2 + * (2 ZettaBytes), low 32 bits + */ +#define BCMA_SOC_PCIE_DMA_H32 0x80000000U /* PCIE Client Mode sb2pcitranslation2 + * (2 ZettaBytes), high 32 bits + */ + +#define BCMA_SOC_PCI1_MEM 0x40000000U /* Host Mode sb2pcitranslation0 (64 MB) */ +#define BCMA_SOC_PCI1_CFG 0x44000000U /* Host Mode sb2pcitranslation1 (64 MB) */ +#define BCMA_SOC_PCIE1_DMA_H32 0xc0000000U /* PCIE Client Mode sb2pcitranslation2 + * (2 ZettaBytes), high 32 bits + */ + +#define BCMA_SOC_FLASH1 0x1fc00000 /* MIPS Flash Region 1 */ +#define BCMA_SOC_FLASH1_SZ 0x00400000 /* MIPS Size of Flash Region 1 */ +#define BCMA_SOC_FLASH2 0x1c000000 /* Flash Region 2 (region 1 shadowed here) */ +#define BCMA_SOC_FLASH2_SZ 0x02000000 /* Size of Flash Region 2 */ + +#endif /* LINUX_BCMA_REGS_H_ */ diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h new file mode 100644 index 000000000..f3c43519b --- /dev/null +++ b/include/linux/bcma/bcma_soc.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_BCMA_SOC_H_ +#define LINUX_BCMA_SOC_H_ + +#include + +struct bcma_soc { + struct bcma_bus bus; + struct device *dev; +}; + +int __init bcma_host_soc_register(struct bcma_soc *soc); +int __init bcma_host_soc_init(struct bcma_soc *soc); + +int bcma_bus_register(struct bcma_bus *bus); + +#endif /* LINUX_BCMA_SOC_H_ */ diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h new file mode 100644 index 000000000..c05f24fac --- /dev/null +++ b/include/linux/binfmts.h @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BINFMTS_H +#define _LINUX_BINFMTS_H + +#include +#include +#include +#include + +struct filename; + +#define CORENAME_MAX_SIZE 128 + +/* + * This structure is used to hold the arguments that are used when loading binaries. + */ +struct linux_binprm { + char buf[BINPRM_BUF_SIZE]; +#ifdef CONFIG_MMU + struct vm_area_struct *vma; + unsigned long vma_pages; +#else +# define MAX_ARG_PAGES 32 + struct page *page[MAX_ARG_PAGES]; +#endif + struct mm_struct *mm; + unsigned long p; /* current top of mem */ + unsigned int + /* + * True after the bprm_set_creds hook has been called once + * (multiple calls can be made via prepare_binprm() for + * binfmt_script/misc). + */ + called_set_creds:1, + /* + * True if most recent call to the commoncaps bprm_set_creds + * hook (due to multiple prepare_binprm() calls from the + * binfmt_script/misc handlers) resulted in elevated + * privileges. + */ + cap_elevated:1, + /* + * Set by bprm_set_creds hook to indicate a privilege-gaining + * exec has happened. Used to sanitize execution environment + * and to set AT_SECURE auxv for glibc. + */ + secureexec:1; +#ifdef __alpha__ + unsigned int taso:1; +#endif + unsigned int recursion_depth; /* only for search_binary_handler() */ + struct file * file; + struct cred *cred; /* new credentials */ + int unsafe; /* how unsafe this exec is (mask of LSM_UNSAFE_*) */ + unsigned int per_clear; /* bits to clear in current->personality */ + int argc, envc; + const char * filename; /* Name of binary as seen by procps */ + const char * interp; /* Name of the binary really executed. Most + of the time same as filename, but could be + different for binfmt_{misc,script} */ + unsigned interp_flags; + unsigned interp_data; + unsigned long loader, exec; + + struct rlimit rlim_stack; /* Saved RLIMIT_STACK used during exec. */ +} __randomize_layout; + +#define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0 +#define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT) + +/* fd of the binary should be passed to the interpreter */ +#define BINPRM_FLAGS_EXECFD_BIT 1 +#define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT) + +/* filename of the binary will be inaccessible after exec */ +#define BINPRM_FLAGS_PATH_INACCESSIBLE_BIT 2 +#define BINPRM_FLAGS_PATH_INACCESSIBLE (1 << BINPRM_FLAGS_PATH_INACCESSIBLE_BIT) + +/* Function parameter for binfmt->coredump */ +struct coredump_params { + const siginfo_t *siginfo; + struct pt_regs *regs; + struct file *file; + unsigned long limit; + unsigned long mm_flags; + loff_t written; + loff_t pos; +}; + +/* + * This structure defines the functions that are used to load the binary formats that + * linux accepts. + */ +struct linux_binfmt { + struct list_head lh; + struct module *module; + int (*load_binary)(struct linux_binprm *); + int (*load_shlib)(struct file *); + int (*core_dump)(struct coredump_params *cprm); + unsigned long min_coredump; /* minimal dump size */ +} __randomize_layout; + +extern void __register_binfmt(struct linux_binfmt *fmt, int insert); + +/* Registration of default binfmt handlers */ +static inline void register_binfmt(struct linux_binfmt *fmt) +{ + __register_binfmt(fmt, 0); +} +/* Same as above, but adds a new binfmt at the top of the list */ +static inline void insert_binfmt(struct linux_binfmt *fmt) +{ + __register_binfmt(fmt, 1); +} + +extern void unregister_binfmt(struct linux_binfmt *); + +extern int prepare_binprm(struct linux_binprm *); +extern int __must_check remove_arg_zero(struct linux_binprm *); +extern int search_binary_handler(struct linux_binprm *); +extern int flush_old_exec(struct linux_binprm * bprm); +extern void setup_new_exec(struct linux_binprm * bprm); +extern void finalize_exec(struct linux_binprm *bprm); +extern void would_dump(struct linux_binprm *, struct file *); + +extern int suid_dumpable; + +/* Stack area protections */ +#define EXSTACK_DEFAULT 0 /* Whatever the arch defaults to */ +#define EXSTACK_DISABLE_X 1 /* Disable executable stacks */ +#define EXSTACK_ENABLE_X 2 /* Enable executable stacks */ + +extern int setup_arg_pages(struct linux_binprm * bprm, + unsigned long stack_top, + int executable_stack); +extern int transfer_args_to_stack(struct linux_binprm *bprm, + unsigned long *sp_location); +extern int bprm_change_interp(const char *interp, struct linux_binprm *bprm); +extern int copy_strings_kernel(int argc, const char *const *argv, + struct linux_binprm *bprm); +extern int prepare_bprm_creds(struct linux_binprm *bprm); +extern void install_exec_creds(struct linux_binprm *bprm); +extern void set_binfmt(struct linux_binfmt *new); +extern ssize_t read_code(struct file *, unsigned long, loff_t, size_t); + +extern int do_execve(struct filename *, + const char __user * const __user *, + const char __user * const __user *); +extern int do_execveat(int, struct filename *, + const char __user * const __user *, + const char __user * const __user *, + int); +int do_execve_file(struct file *file, void *__argv, void *__envp); + +#endif /* _LINUX_BINFMTS_H */ diff --git a/include/linux/bio.h b/include/linux/bio.h new file mode 100644 index 000000000..c7433a201 --- /dev/null +++ b/include/linux/bio.h @@ -0,0 +1,866 @@ +/* + * Copyright (C) 2001 Jens Axboe + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public Licens + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- + */ +#ifndef __LINUX_BIO_H +#define __LINUX_BIO_H + +#include +#include +#include +#include + +#ifdef CONFIG_BLOCK + +#include + +/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ +#include + +#define BIO_DEBUG + +#ifdef BIO_DEBUG +#define BIO_BUG_ON BUG_ON +#else +#define BIO_BUG_ON +#endif + +#ifdef CONFIG_THP_SWAP +#if HPAGE_PMD_NR > 256 +#define BIO_MAX_PAGES HPAGE_PMD_NR +#else +#define BIO_MAX_PAGES 256 +#endif +#else +#define BIO_MAX_PAGES 256 +#endif + +#define bio_prio(bio) (bio)->bi_ioprio +#define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio) + +#define bio_iter_iovec(bio, iter) \ + bvec_iter_bvec((bio)->bi_io_vec, (iter)) + +#define bio_iter_page(bio, iter) \ + bvec_iter_page((bio)->bi_io_vec, (iter)) +#define bio_iter_len(bio, iter) \ + bvec_iter_len((bio)->bi_io_vec, (iter)) +#define bio_iter_offset(bio, iter) \ + bvec_iter_offset((bio)->bi_io_vec, (iter)) + +#define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter) +#define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter) +#define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter) + +#define bio_multiple_segments(bio) \ + ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len) + +#define bvec_iter_sectors(iter) ((iter).bi_size >> 9) +#define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter))) + +#define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter) +#define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter) + +/* + * Return the data direction, READ or WRITE. + */ +#define bio_data_dir(bio) \ + (op_is_write(bio_op(bio)) ? WRITE : READ) + +/* + * Check whether this bio carries any data or not. A NULL bio is allowed. + */ +static inline bool bio_has_data(struct bio *bio) +{ + if (bio && + bio->bi_iter.bi_size && + bio_op(bio) != REQ_OP_DISCARD && + bio_op(bio) != REQ_OP_SECURE_ERASE && + bio_op(bio) != REQ_OP_WRITE_ZEROES) + return true; + + return false; +} + +static inline bool bio_no_advance_iter(struct bio *bio) +{ + return bio_op(bio) == REQ_OP_DISCARD || + bio_op(bio) == REQ_OP_SECURE_ERASE || + bio_op(bio) == REQ_OP_WRITE_SAME || + bio_op(bio) == REQ_OP_WRITE_ZEROES; +} + +static inline bool bio_mergeable(struct bio *bio) +{ + if (bio->bi_opf & REQ_NOMERGE_FLAGS) + return false; + + return true; +} + +static inline unsigned int bio_cur_bytes(struct bio *bio) +{ + if (bio_has_data(bio)) + return bio_iovec(bio).bv_len; + else /* dataless requests such as discard */ + return bio->bi_iter.bi_size; +} + +static inline void *bio_data(struct bio *bio) +{ + if (bio_has_data(bio)) + return page_address(bio_page(bio)) + bio_offset(bio); + + return NULL; +} + +static inline bool bio_full(struct bio *bio) +{ + return bio->bi_vcnt >= bio->bi_max_vecs; +} + +/* + * will die + */ +#define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset) + +/* + * merge helpers etc + */ + +/* Default implementation of BIOVEC_PHYS_MERGEABLE */ +#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ + ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) + +/* + * allow arch override, for eg virtualized architectures (put in asm/io.h) + */ +#ifndef BIOVEC_PHYS_MERGEABLE +#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ + __BIOVEC_PHYS_MERGEABLE(vec1, vec2) +#endif + +#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ + (((addr1) | (mask)) == (((addr2) - 1) | (mask))) +#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ + __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) + +/* + * drivers should _never_ use the all version - the bio may have been split + * before it got to the driver and the driver won't own all of it + */ +#define bio_for_each_segment_all(bvl, bio, i) \ + for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++) + +static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, + unsigned bytes) +{ + iter->bi_sector += bytes >> 9; + + if (bio_no_advance_iter(bio)) { + iter->bi_size -= bytes; + iter->bi_done += bytes; + } else { + bvec_iter_advance(bio->bi_io_vec, iter, bytes); + /* TODO: It is reasonable to complete bio with error here. */ + } +} + +static inline bool bio_rewind_iter(struct bio *bio, struct bvec_iter *iter, + unsigned int bytes) +{ + iter->bi_sector -= bytes >> 9; + + if (bio_no_advance_iter(bio)) { + iter->bi_size += bytes; + iter->bi_done -= bytes; + return true; + } + + return bvec_iter_rewind(bio->bi_io_vec, iter, bytes); +} + +#define __bio_for_each_segment(bvl, bio, iter, start) \ + for (iter = (start); \ + (iter).bi_size && \ + ((bvl = bio_iter_iovec((bio), (iter))), 1); \ + bio_advance_iter((bio), &(iter), (bvl).bv_len)) + +#define bio_for_each_segment(bvl, bio, iter) \ + __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter) + +#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) + +static inline unsigned bio_segments(struct bio *bio) +{ + unsigned segs = 0; + struct bio_vec bv; + struct bvec_iter iter; + + /* + * We special case discard/write same/write zeroes, because they + * interpret bi_size differently: + */ + + switch (bio_op(bio)) { + case REQ_OP_DISCARD: + case REQ_OP_SECURE_ERASE: + case REQ_OP_WRITE_ZEROES: + return 0; + case REQ_OP_WRITE_SAME: + return 1; + default: + break; + } + + bio_for_each_segment(bv, bio, iter) + segs++; + + return segs; +} + +/* + * get a reference to a bio, so it won't disappear. the intended use is + * something like: + * + * bio_get(bio); + * submit_bio(rw, bio); + * if (bio->bi_flags ...) + * do_something + * bio_put(bio); + * + * without the bio_get(), it could potentially complete I/O before submit_bio + * returns. and then bio would be freed memory when if (bio->bi_flags ...) + * runs + */ +static inline void bio_get(struct bio *bio) +{ + bio->bi_flags |= (1 << BIO_REFFED); + smp_mb__before_atomic(); + atomic_inc(&bio->__bi_cnt); +} + +static inline void bio_cnt_set(struct bio *bio, unsigned int count) +{ + if (count != 1) { + bio->bi_flags |= (1 << BIO_REFFED); + smp_mb(); + } + atomic_set(&bio->__bi_cnt, count); +} + +static inline bool bio_flagged(struct bio *bio, unsigned int bit) +{ + return (bio->bi_flags & (1U << bit)) != 0; +} + +static inline void bio_set_flag(struct bio *bio, unsigned int bit) +{ + bio->bi_flags |= (1U << bit); +} + +static inline void bio_clear_flag(struct bio *bio, unsigned int bit) +{ + bio->bi_flags &= ~(1U << bit); +} + +static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) +{ + *bv = bio_iovec(bio); +} + +static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) +{ + struct bvec_iter iter = bio->bi_iter; + int idx; + + if (unlikely(!bio_multiple_segments(bio))) { + *bv = bio_iovec(bio); + return; + } + + bio_advance_iter(bio, &iter, iter.bi_size); + + if (!iter.bi_bvec_done) + idx = iter.bi_idx - 1; + else /* in the middle of bvec */ + idx = iter.bi_idx; + + *bv = bio->bi_io_vec[idx]; + + /* + * iter.bi_bvec_done records actual length of the last bvec + * if this bio ends in the middle of one io vector + */ + if (iter.bi_bvec_done) + bv->bv_len = iter.bi_bvec_done; +} + +static inline unsigned bio_pages_all(struct bio *bio) +{ + WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); + return bio->bi_vcnt; +} + +static inline struct bio_vec *bio_first_bvec_all(struct bio *bio) +{ + WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); + return bio->bi_io_vec; +} + +static inline struct page *bio_first_page_all(struct bio *bio) +{ + return bio_first_bvec_all(bio)->bv_page; +} + +static inline struct bio_vec *bio_last_bvec_all(struct bio *bio) +{ + WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); + return &bio->bi_io_vec[bio->bi_vcnt - 1]; +} + +enum bip_flags { + BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ + BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */ + BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */ + BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */ + BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */ +}; + +/* + * bio integrity payload + */ +struct bio_integrity_payload { + struct bio *bip_bio; /* parent bio */ + + struct bvec_iter bip_iter; + + unsigned short bip_slab; /* slab the bip came from */ + unsigned short bip_vcnt; /* # of integrity bio_vecs */ + unsigned short bip_max_vcnt; /* integrity bio_vec slots */ + unsigned short bip_flags; /* control flags */ + + struct work_struct bip_work; /* I/O completion */ + + struct bio_vec *bip_vec; + struct bio_vec bip_inline_vecs[0];/* embedded bvec array */ +}; + +#if defined(CONFIG_BLK_DEV_INTEGRITY) + +static inline struct bio_integrity_payload *bio_integrity(struct bio *bio) +{ + if (bio->bi_opf & REQ_INTEGRITY) + return bio->bi_integrity; + + return NULL; +} + +static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) +{ + struct bio_integrity_payload *bip = bio_integrity(bio); + + if (bip) + return bip->bip_flags & flag; + + return false; +} + +static inline sector_t bip_get_seed(struct bio_integrity_payload *bip) +{ + return bip->bip_iter.bi_sector; +} + +static inline void bip_set_seed(struct bio_integrity_payload *bip, + sector_t seed) +{ + bip->bip_iter.bi_sector = seed; +} + +#endif /* CONFIG_BLK_DEV_INTEGRITY */ + +extern void bio_trim(struct bio *bio, int offset, int size); +extern struct bio *bio_split(struct bio *bio, int sectors, + gfp_t gfp, struct bio_set *bs); + +/** + * bio_next_split - get next @sectors from a bio, splitting if necessary + * @bio: bio to split + * @sectors: number of sectors to split from the front of @bio + * @gfp: gfp mask + * @bs: bio set to allocate from + * + * Returns a bio representing the next @sectors of @bio - if the bio is smaller + * than @sectors, returns the original bio unchanged. + */ +static inline struct bio *bio_next_split(struct bio *bio, int sectors, + gfp_t gfp, struct bio_set *bs) +{ + if (sectors >= bio_sectors(bio)) + return bio; + + return bio_split(bio, sectors, gfp, bs); +} + +enum { + BIOSET_NEED_BVECS = BIT(0), + BIOSET_NEED_RESCUER = BIT(1), +}; +extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags); +extern void bioset_exit(struct bio_set *); +extern int biovec_init_pool(mempool_t *pool, int pool_entries); +extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src); + +extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *); +extern void bio_put(struct bio *); + +extern void __bio_clone_fast(struct bio *, struct bio *); +extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *); + +extern struct bio_set fs_bio_set; + +static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) +{ + return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set); +} + +static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs) +{ + return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL); +} + +extern blk_qc_t submit_bio(struct bio *); + +extern void bio_endio(struct bio *); + +static inline void bio_io_error(struct bio *bio) +{ + bio->bi_status = BLK_STS_IOERR; + bio_endio(bio); +} + +static inline void bio_wouldblock_error(struct bio *bio) +{ + bio->bi_status = BLK_STS_AGAIN; + bio_endio(bio); +} + +struct request_queue; +extern int bio_phys_segments(struct request_queue *, struct bio *); + +extern int submit_bio_wait(struct bio *bio); +extern void bio_advance(struct bio *, unsigned); + +extern void bio_init(struct bio *bio, struct bio_vec *table, + unsigned short max_vecs); +extern void bio_uninit(struct bio *); +extern void bio_reset(struct bio *); +void bio_chain(struct bio *, struct bio *); + +extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); +extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, + unsigned int, unsigned int); +bool __bio_try_merge_page(struct bio *bio, struct page *page, + unsigned int len, unsigned int off); +void __bio_add_page(struct bio *bio, struct page *page, + unsigned int len, unsigned int off); +int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); +struct rq_map_data; +extern struct bio *bio_map_user_iov(struct request_queue *, + struct iov_iter *, gfp_t); +extern void bio_unmap_user(struct bio *); +extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, + gfp_t); +extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int, + gfp_t, int); +extern void bio_set_pages_dirty(struct bio *bio); +extern void bio_check_pages_dirty(struct bio *bio); + +void generic_start_io_acct(struct request_queue *q, int op, + unsigned long sectors, struct hd_struct *part); +void generic_end_io_acct(struct request_queue *q, int op, + struct hd_struct *part, + unsigned long start_time); + +#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" +#endif +#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +extern void bio_flush_dcache_pages(struct bio *bi); +#else +static inline void bio_flush_dcache_pages(struct bio *bi) +{ +} +#endif + +extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, + struct bio *src, struct bvec_iter *src_iter); +extern void bio_copy_data(struct bio *dst, struct bio *src); +extern void bio_list_copy_data(struct bio *dst, struct bio *src); +extern void bio_free_pages(struct bio *bio); + +extern struct bio *bio_copy_user_iov(struct request_queue *, + struct rq_map_data *, + struct iov_iter *, + gfp_t); +extern int bio_uncopy_user(struct bio *); +void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter); + +static inline void zero_fill_bio(struct bio *bio) +{ + zero_fill_bio_iter(bio, bio->bi_iter); +} + +extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *); +extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int); +extern unsigned int bvec_nr_vecs(unsigned short idx); +extern const char *bio_devname(struct bio *bio, char *buffer); + +#define bio_set_dev(bio, bdev) \ +do { \ + if ((bio)->bi_disk != (bdev)->bd_disk) \ + bio_clear_flag(bio, BIO_THROTTLED);\ + (bio)->bi_disk = (bdev)->bd_disk; \ + (bio)->bi_partno = (bdev)->bd_partno; \ +} while (0) + +#define bio_copy_dev(dst, src) \ +do { \ + (dst)->bi_disk = (src)->bi_disk; \ + (dst)->bi_partno = (src)->bi_partno; \ +} while (0) + +#define bio_dev(bio) \ + disk_devt((bio)->bi_disk) + +#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) +int bio_associate_blkcg_from_page(struct bio *bio, struct page *page); +#else +static inline int bio_associate_blkcg_from_page(struct bio *bio, + struct page *page) { return 0; } +#endif + +#ifdef CONFIG_BLK_CGROUP +int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css); +int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg); +void bio_disassociate_task(struct bio *bio); +void bio_clone_blkcg_association(struct bio *dst, struct bio *src); +#else /* CONFIG_BLK_CGROUP */ +static inline int bio_associate_blkcg(struct bio *bio, + struct cgroup_subsys_state *blkcg_css) { return 0; } +static inline void bio_disassociate_task(struct bio *bio) { } +static inline void bio_clone_blkcg_association(struct bio *dst, + struct bio *src) { } +#endif /* CONFIG_BLK_CGROUP */ + +#ifdef CONFIG_HIGHMEM +/* + * remember never ever reenable interrupts between a bvec_kmap_irq and + * bvec_kunmap_irq! + */ +static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) +{ + unsigned long addr; + + /* + * might not be a highmem page, but the preempt/irq count + * balancing is a lot nicer this way + */ + local_irq_save(*flags); + addr = (unsigned long) kmap_atomic(bvec->bv_page); + + BUG_ON(addr & ~PAGE_MASK); + + return (char *) addr + bvec->bv_offset; +} + +static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) +{ + unsigned long ptr = (unsigned long) buffer & PAGE_MASK; + + kunmap_atomic((void *) ptr); + local_irq_restore(*flags); +} + +#else +static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) +{ + return page_address(bvec->bv_page) + bvec->bv_offset; +} + +static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) +{ + *flags = 0; +} +#endif + +/* + * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. + * + * A bio_list anchors a singly-linked list of bios chained through the bi_next + * member of the bio. The bio_list also caches the last list member to allow + * fast access to the tail. + */ +struct bio_list { + struct bio *head; + struct bio *tail; +}; + +static inline int bio_list_empty(const struct bio_list *bl) +{ + return bl->head == NULL; +} + +static inline void bio_list_init(struct bio_list *bl) +{ + bl->head = bl->tail = NULL; +} + +#define BIO_EMPTY_LIST { NULL, NULL } + +#define bio_list_for_each(bio, bl) \ + for (bio = (bl)->head; bio; bio = bio->bi_next) + +static inline unsigned bio_list_size(const struct bio_list *bl) +{ + unsigned sz = 0; + struct bio *bio; + + bio_list_for_each(bio, bl) + sz++; + + return sz; +} + +static inline void bio_list_add(struct bio_list *bl, struct bio *bio) +{ + bio->bi_next = NULL; + + if (bl->tail) + bl->tail->bi_next = bio; + else + bl->head = bio; + + bl->tail = bio; +} + +static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio) +{ + bio->bi_next = bl->head; + + bl->head = bio; + + if (!bl->tail) + bl->tail = bio; +} + +static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) +{ + if (!bl2->head) + return; + + if (bl->tail) + bl->tail->bi_next = bl2->head; + else + bl->head = bl2->head; + + bl->tail = bl2->tail; +} + +static inline void bio_list_merge_head(struct bio_list *bl, + struct bio_list *bl2) +{ + if (!bl2->head) + return; + + if (bl->head) + bl2->tail->bi_next = bl->head; + else + bl->tail = bl2->tail; + + bl->head = bl2->head; +} + +static inline struct bio *bio_list_peek(struct bio_list *bl) +{ + return bl->head; +} + +static inline struct bio *bio_list_pop(struct bio_list *bl) +{ + struct bio *bio = bl->head; + + if (bio) { + bl->head = bl->head->bi_next; + if (!bl->head) + bl->tail = NULL; + + bio->bi_next = NULL; + } + + return bio; +} + +static inline struct bio *bio_list_get(struct bio_list *bl) +{ + struct bio *bio = bl->head; + + bl->head = bl->tail = NULL; + + return bio; +} + +/* + * Increment chain count for the bio. Make sure the CHAIN flag update + * is visible before the raised count. + */ +static inline void bio_inc_remaining(struct bio *bio) +{ + bio_set_flag(bio, BIO_CHAIN); + smp_mb__before_atomic(); + atomic_inc(&bio->__bi_remaining); +} + +/* + * bio_set is used to allow other portions of the IO system to + * allocate their own private memory pools for bio and iovec structures. + * These memory pools in turn all allocate from the bio_slab + * and the bvec_slabs[]. + */ +#define BIO_POOL_SIZE 2 + +struct bio_set { + struct kmem_cache *bio_slab; + unsigned int front_pad; + + mempool_t bio_pool; + mempool_t bvec_pool; +#if defined(CONFIG_BLK_DEV_INTEGRITY) + mempool_t bio_integrity_pool; + mempool_t bvec_integrity_pool; +#endif + + /* + * Deadlock avoidance for stacking block drivers: see comments in + * bio_alloc_bioset() for details + */ + spinlock_t rescue_lock; + struct bio_list rescue_list; + struct work_struct rescue_work; + struct workqueue_struct *rescue_workqueue; +}; + +struct biovec_slab { + int nr_vecs; + char *name; + struct kmem_cache *slab; +}; + +static inline bool bioset_initialized(struct bio_set *bs) +{ + return bs->bio_slab != NULL; +} + +/* + * a small number of entries is fine, not going to be performance critical. + * basically we just need to survive + */ +#define BIO_SPLIT_ENTRIES 2 + +#if defined(CONFIG_BLK_DEV_INTEGRITY) + +#define bip_for_each_vec(bvl, bip, iter) \ + for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter) + +#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \ + for_each_bio(_bio) \ + bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) + +extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); +extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); +extern bool bio_integrity_prep(struct bio *); +extern void bio_integrity_advance(struct bio *, unsigned int); +extern void bio_integrity_trim(struct bio *); +extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t); +extern int bioset_integrity_create(struct bio_set *, int); +extern void bioset_integrity_free(struct bio_set *); +extern void bio_integrity_init(void); + +#else /* CONFIG_BLK_DEV_INTEGRITY */ + +static inline void *bio_integrity(struct bio *bio) +{ + return NULL; +} + +static inline int bioset_integrity_create(struct bio_set *bs, int pool_size) +{ + return 0; +} + +static inline void bioset_integrity_free (struct bio_set *bs) +{ + return; +} + +static inline bool bio_integrity_prep(struct bio *bio) +{ + return true; +} + +static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src, + gfp_t gfp_mask) +{ + return 0; +} + +static inline void bio_integrity_advance(struct bio *bio, + unsigned int bytes_done) +{ + return; +} + +static inline void bio_integrity_trim(struct bio *bio) +{ + return; +} + +static inline void bio_integrity_init(void) +{ + return; +} + +static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) +{ + return false; +} + +static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp, + unsigned int nr) +{ + return ERR_PTR(-EINVAL); +} + +static inline int bio_integrity_add_page(struct bio *bio, struct page *page, + unsigned int len, unsigned int offset) +{ + return 0; +} + +#endif /* CONFIG_BLK_DEV_INTEGRITY */ + +#endif /* CONFIG_BLOCK */ +#endif /* __LINUX_BIO_H */ diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h new file mode 100644 index 000000000..bbc4730a6 --- /dev/null +++ b/include/linux/bit_spinlock.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_BIT_SPINLOCK_H +#define __LINUX_BIT_SPINLOCK_H + +#include +#include +#include +#include + +/* + * bit-based spin_lock() + * + * Don't use this unless you really need to: spin_lock() and spin_unlock() + * are significantly faster. + */ +static inline void bit_spin_lock(int bitnum, unsigned long *addr) +{ + /* + * Assuming the lock is uncontended, this never enters + * the body of the outer loop. If it is contended, then + * within the inner loop a non-atomic test is used to + * busywait with less bus contention for a good time to + * attempt to acquire the lock bit. + */ + preempt_disable(); +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + while (unlikely(test_and_set_bit_lock(bitnum, addr))) { + preempt_enable(); + do { + cpu_relax(); + } while (test_bit(bitnum, addr)); + preempt_disable(); + } +#endif + __acquire(bitlock); +} + +/* + * Return true if it was acquired + */ +static inline int bit_spin_trylock(int bitnum, unsigned long *addr) +{ + preempt_disable(); +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + if (unlikely(test_and_set_bit_lock(bitnum, addr))) { + preempt_enable(); + return 0; + } +#endif + __acquire(bitlock); + return 1; +} + +/* + * bit-based spin_unlock() + */ +static inline void bit_spin_unlock(int bitnum, unsigned long *addr) +{ +#ifdef CONFIG_DEBUG_SPINLOCK + BUG_ON(!test_bit(bitnum, addr)); +#endif +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + clear_bit_unlock(bitnum, addr); +#endif + preempt_enable(); + __release(bitlock); +} + +/* + * bit-based spin_unlock() + * non-atomic version, which can be used eg. if the bit lock itself is + * protecting the rest of the flags in the word. + */ +static inline void __bit_spin_unlock(int bitnum, unsigned long *addr) +{ +#ifdef CONFIG_DEBUG_SPINLOCK + BUG_ON(!test_bit(bitnum, addr)); +#endif +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + __clear_bit_unlock(bitnum, addr); +#endif + preempt_enable(); + __release(bitlock); +} + +/* + * Return true if the lock is held. + */ +static inline int bit_spin_is_locked(int bitnum, unsigned long *addr) +{ +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + return test_bit(bitnum, addr); +#elif defined CONFIG_PREEMPT_COUNT + return preempt_count(); +#else + return 1; +#endif +} + +#endif /* __LINUX_BIT_SPINLOCK_H */ + diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h new file mode 100644 index 000000000..775cd10c0 --- /dev/null +++ b/include/linux/bitfield.h @@ -0,0 +1,153 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2004 - 2009 Ivo van Doorn + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _LINUX_BITFIELD_H +#define _LINUX_BITFIELD_H + +#include +#include + +/* + * Bitfield access macros + * + * FIELD_{GET,PREP} macros take as first parameter shifted mask + * from which they extract the base mask and shift amount. + * Mask must be a compilation time constant. + * + * Example: + * + * #define REG_FIELD_A GENMASK(6, 0) + * #define REG_FIELD_B BIT(7) + * #define REG_FIELD_C GENMASK(15, 8) + * #define REG_FIELD_D GENMASK(31, 16) + * + * Get: + * a = FIELD_GET(REG_FIELD_A, reg); + * b = FIELD_GET(REG_FIELD_B, reg); + * + * Set: + * reg = FIELD_PREP(REG_FIELD_A, 1) | + * FIELD_PREP(REG_FIELD_B, 0) | + * FIELD_PREP(REG_FIELD_C, c) | + * FIELD_PREP(REG_FIELD_D, 0x40); + * + * Modify: + * reg &= ~REG_FIELD_C; + * reg |= FIELD_PREP(REG_FIELD_C, c); + */ + +#define __bf_shf(x) (__builtin_ffsll(x) - 1) + +#define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \ + ({ \ + BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \ + _pfx "mask is not constant"); \ + BUILD_BUG_ON_MSG((_mask) == 0, _pfx "mask is zero"); \ + BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \ + ~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \ + _pfx "value too large for the field"); \ + BUILD_BUG_ON_MSG((_mask) > (typeof(_reg))~0ull, \ + _pfx "type of reg too small for mask"); \ + __BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \ + (1ULL << __bf_shf(_mask))); \ + }) + +/** + * FIELD_FIT() - check if value fits in the field + * @_mask: shifted mask defining the field's length and position + * @_val: value to test against the field + * + * Return: true if @_val can fit inside @_mask, false if @_val is too big. + */ +#define FIELD_FIT(_mask, _val) \ + ({ \ + __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_FIT: "); \ + !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \ + }) + +/** + * FIELD_PREP() - prepare a bitfield element + * @_mask: shifted mask defining the field's length and position + * @_val: value to put in the field + * + * FIELD_PREP() masks and shifts up the value. The result should + * be combined with other fields of the bitfield using logical OR. + */ +#define FIELD_PREP(_mask, _val) \ + ({ \ + __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \ + ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \ + }) + +/** + * FIELD_GET() - extract a bitfield element + * @_mask: shifted mask defining the field's length and position + * @_reg: value of entire bitfield + * + * FIELD_GET() extracts the field specified by @_mask from the + * bitfield passed in as @_reg by masking and shifting it down. + */ +#define FIELD_GET(_mask, _reg) \ + ({ \ + __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \ + (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \ + }) + +extern void __compiletime_error("value doesn't fit into mask") +__field_overflow(void); +extern void __compiletime_error("bad bitfield mask") +__bad_mask(void); +static __always_inline u64 field_multiplier(u64 field) +{ + if ((field | (field - 1)) & ((field | (field - 1)) + 1)) + __bad_mask(); + return field & -field; +} +static __always_inline u64 field_mask(u64 field) +{ + return field / field_multiplier(field); +} +#define ____MAKE_OP(type,base,to,from) \ +static __always_inline __##type type##_encode_bits(base v, base field) \ +{ \ + if (__builtin_constant_p(v) && (v & ~field_mask(field))) \ + __field_overflow(); \ + return to((v & field_mask(field)) * field_multiplier(field)); \ +} \ +static __always_inline __##type type##_replace_bits(__##type old, \ + base val, base field) \ +{ \ + return (old & ~to(field)) | type##_encode_bits(val, field); \ +} \ +static __always_inline void type##p_replace_bits(__##type *p, \ + base val, base field) \ +{ \ + *p = (*p & ~to(field)) | type##_encode_bits(val, field); \ +} \ +static __always_inline base type##_get_bits(__##type v, base field) \ +{ \ + return (from(v) & field)/field_multiplier(field); \ +} +#define __MAKE_OP(size) \ + ____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \ + ____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \ + ____MAKE_OP(u##size,u##size,,) +____MAKE_OP(u8,u8,,) +__MAKE_OP(16) +__MAKE_OP(32) +__MAKE_OP(64) +#undef __MAKE_OP +#undef ____MAKE_OP + +#endif diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h new file mode 100644 index 000000000..b71a033c7 --- /dev/null +++ b/include/linux/bitmap.h @@ -0,0 +1,482 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_BITMAP_H +#define __LINUX_BITMAP_H + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include + +/* + * bitmaps provide bit arrays that consume one or more unsigned + * longs. The bitmap interface and available operations are listed + * here, in bitmap.h + * + * Function implementations generic to all architectures are in + * lib/bitmap.c. Functions implementations that are architecture + * specific are in various include/asm-/bitops.h headers + * and other arch/ specific files. + * + * See lib/bitmap.c for more details. + */ + +/** + * DOC: bitmap overview + * + * The available bitmap operations and their rough meaning in the + * case that the bitmap is a single unsigned long are thus: + * + * Note that nbits should be always a compile time evaluable constant. + * Otherwise many inlines will generate horrible code. + * + * :: + * + * bitmap_zero(dst, nbits) *dst = 0UL + * bitmap_fill(dst, nbits) *dst = ~0UL + * bitmap_copy(dst, src, nbits) *dst = *src + * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2 + * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2 + * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2 + * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2) + * bitmap_complement(dst, src, nbits) *dst = ~(*src) + * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal? + * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap? + * bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2? + * bitmap_empty(src, nbits) Are all bits zero in *src? + * bitmap_full(src, nbits) Are all bits set in *src? + * bitmap_weight(src, nbits) Hamming Weight: number set bits + * bitmap_set(dst, pos, nbits) Set specified bit area + * bitmap_clear(dst, pos, nbits) Clear specified bit area + * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area + * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above + * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n + * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n + * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) + * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit) + * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap + * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz + * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf + * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf + * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf + * bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf + * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region + * bitmap_release_region(bitmap, pos, order) Free specified bit region + * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region + * bitmap_from_arr32(dst, buf, nbits) Copy nbits from u32[] buf to dst + * bitmap_to_arr32(buf, src, nbits) Copy nbits from buf to u32[] dst + * + * Note, bitmap_zero() and bitmap_fill() operate over the region of + * unsigned longs, that is, bits behind bitmap till the unsigned long + * boundary will be zeroed or filled as well. Consider to use + * bitmap_clear() or bitmap_set() to make explicit zeroing or filling + * respectively. + */ + +/** + * DOC: bitmap bitops + * + * Also the following operations in asm/bitops.h apply to bitmaps.:: + * + * set_bit(bit, addr) *addr |= bit + * clear_bit(bit, addr) *addr &= ~bit + * change_bit(bit, addr) *addr ^= bit + * test_bit(bit, addr) Is bit set in *addr? + * test_and_set_bit(bit, addr) Set bit and return old value + * test_and_clear_bit(bit, addr) Clear bit and return old value + * test_and_change_bit(bit, addr) Change bit and return old value + * find_first_zero_bit(addr, nbits) Position first zero bit in *addr + * find_first_bit(addr, nbits) Position first set bit in *addr + * find_next_zero_bit(addr, nbits, bit) + * Position next zero bit in *addr >= bit + * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit + * find_next_and_bit(addr1, addr2, nbits, bit) + * Same as find_next_bit, but in + * (*addr1 & *addr2) + * + */ + +/** + * DOC: declare bitmap + * The DECLARE_BITMAP(name,bits) macro, in linux/types.h, can be used + * to declare an array named 'name' of just enough unsigned longs to + * contain all bit positions from 0 to 'bits' - 1. + */ + +/* + * Allocation and deallocation of bitmap. + * Provided in lib/bitmap.c to avoid circular dependency. + */ +extern unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags); +extern unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags); +extern void bitmap_free(const unsigned long *bitmap); + +/* + * lib/bitmap.c provides these functions: + */ + +extern int __bitmap_empty(const unsigned long *bitmap, unsigned int nbits); +extern int __bitmap_full(const unsigned long *bitmap, unsigned int nbits); +extern int __bitmap_equal(const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +extern void __bitmap_complement(unsigned long *dst, const unsigned long *src, + unsigned int nbits); +extern void __bitmap_shift_right(unsigned long *dst, const unsigned long *src, + unsigned int shift, unsigned int nbits); +extern void __bitmap_shift_left(unsigned long *dst, const unsigned long *src, + unsigned int shift, unsigned int nbits); +extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +extern int __bitmap_intersects(const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +extern int __bitmap_subset(const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits); +extern void __bitmap_set(unsigned long *map, unsigned int start, int len); +extern void __bitmap_clear(unsigned long *map, unsigned int start, int len); + +extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr, + unsigned long align_mask, + unsigned long align_offset); + +/** + * bitmap_find_next_zero_area - find a contiguous aligned zero area + * @map: The address to base the search on + * @size: The bitmap size in bits + * @start: The bitnumber to start searching at + * @nr: The number of zeroed bits we're looking for + * @align_mask: Alignment mask for zero area + * + * The @align_mask should be one less than a power of 2; the effect is that + * the bit offset of all zero areas this function finds is multiples of that + * power of 2. A @align_mask of 0 means no alignment is required. + */ +static inline unsigned long +bitmap_find_next_zero_area(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr, + unsigned long align_mask) +{ + return bitmap_find_next_zero_area_off(map, size, start, nr, + align_mask, 0); +} + +extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user, + unsigned long *dst, int nbits); +extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, + unsigned long *dst, int nbits); +extern int bitmap_parselist(const char *buf, unsigned long *maskp, + int nmaskbits); +extern int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen, + unsigned long *dst, int nbits); +extern void bitmap_remap(unsigned long *dst, const unsigned long *src, + const unsigned long *old, const unsigned long *new, unsigned int nbits); +extern int bitmap_bitremap(int oldbit, + const unsigned long *old, const unsigned long *new, int bits); +extern void bitmap_onto(unsigned long *dst, const unsigned long *orig, + const unsigned long *relmap, unsigned int bits); +extern void bitmap_fold(unsigned long *dst, const unsigned long *orig, + unsigned int sz, unsigned int nbits); +extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order); +extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order); +extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); + +#ifdef __BIG_ENDIAN +extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits); +#else +#define bitmap_copy_le bitmap_copy +#endif +extern unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits); +extern int bitmap_print_to_pagebuf(bool list, char *buf, + const unsigned long *maskp, int nmaskbits); + +#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) +#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1))) + +/* + * The static inlines below do not handle constant nbits==0 correctly, + * so make such users (should any ever turn up) call the out-of-line + * versions. + */ +#define small_const_nbits(nbits) \ + (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG && (nbits) > 0) + +static inline void bitmap_zero(unsigned long *dst, unsigned int nbits) +{ + if (small_const_nbits(nbits)) + *dst = 0UL; + else { + unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memset(dst, 0, len); + } +} + +static inline void bitmap_fill(unsigned long *dst, unsigned int nbits) +{ + if (small_const_nbits(nbits)) + *dst = ~0UL; + else { + unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memset(dst, 0xff, len); + } +} + +static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, + unsigned int nbits) +{ + if (small_const_nbits(nbits)) + *dst = *src; + else { + unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memcpy(dst, src, len); + } +} + +/* + * Copy bitmap and clear tail bits in last word. + */ +static inline void bitmap_copy_clear_tail(unsigned long *dst, + const unsigned long *src, unsigned int nbits) +{ + bitmap_copy(dst, src, nbits); + if (nbits % BITS_PER_LONG) + dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits); +} + +/* + * On 32-bit systems bitmaps are represented as u32 arrays internally, and + * therefore conversion is not needed when copying data from/to arrays of u32. + */ +#if BITS_PER_LONG == 64 +extern void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, + unsigned int nbits); +extern void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, + unsigned int nbits); +#else +#define bitmap_from_arr32(bitmap, buf, nbits) \ + bitmap_copy_clear_tail((unsigned long *) (bitmap), \ + (const unsigned long *) (buf), (nbits)) +#define bitmap_to_arr32(buf, bitmap, nbits) \ + bitmap_copy_clear_tail((unsigned long *) (buf), \ + (const unsigned long *) (bitmap), (nbits)) +#endif + +static inline int bitmap_and(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2, unsigned int nbits) +{ + if (small_const_nbits(nbits)) + return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0; + return __bitmap_and(dst, src1, src2, nbits); +} + +static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2, unsigned int nbits) +{ + if (small_const_nbits(nbits)) + *dst = *src1 | *src2; + else + __bitmap_or(dst, src1, src2, nbits); +} + +static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2, unsigned int nbits) +{ + if (small_const_nbits(nbits)) + *dst = *src1 ^ *src2; + else + __bitmap_xor(dst, src1, src2, nbits); +} + +static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2, unsigned int nbits) +{ + if (small_const_nbits(nbits)) + return (*dst = *src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; + return __bitmap_andnot(dst, src1, src2, nbits); +} + +static inline void bitmap_complement(unsigned long *dst, const unsigned long *src, + unsigned int nbits) +{ + if (small_const_nbits(nbits)) + *dst = ~(*src); + else + __bitmap_complement(dst, src, nbits); +} + +#ifdef __LITTLE_ENDIAN +#define BITMAP_MEM_ALIGNMENT 8 +#else +#define BITMAP_MEM_ALIGNMENT (8 * sizeof(unsigned long)) +#endif +#define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1) + +static inline int bitmap_equal(const unsigned long *src1, + const unsigned long *src2, unsigned int nbits) +{ + if (small_const_nbits(nbits)) + return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); + if (__builtin_constant_p(nbits & BITMAP_MEM_MASK) && + IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT)) + return !memcmp(src1, src2, nbits / 8); + return __bitmap_equal(src1, src2, nbits); +} + +static inline int bitmap_intersects(const unsigned long *src1, + const unsigned long *src2, unsigned int nbits) +{ + if (small_const_nbits(nbits)) + return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; + else + return __bitmap_intersects(src1, src2, nbits); +} + +static inline int bitmap_subset(const unsigned long *src1, + const unsigned long *src2, unsigned int nbits) +{ + if (small_const_nbits(nbits)) + return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits)); + else + return __bitmap_subset(src1, src2, nbits); +} + +static inline int bitmap_empty(const unsigned long *src, unsigned nbits) +{ + if (small_const_nbits(nbits)) + return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); + + return find_first_bit(src, nbits) == nbits; +} + +static inline int bitmap_full(const unsigned long *src, unsigned int nbits) +{ + if (small_const_nbits(nbits)) + return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); + + return find_first_zero_bit(src, nbits) == nbits; +} + +static __always_inline int bitmap_weight(const unsigned long *src, unsigned int nbits) +{ + if (small_const_nbits(nbits)) + return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); + return __bitmap_weight(src, nbits); +} + +static __always_inline void bitmap_set(unsigned long *map, unsigned int start, + unsigned int nbits) +{ + if (__builtin_constant_p(nbits) && nbits == 1) + __set_bit(start, map); + else if (__builtin_constant_p(start & BITMAP_MEM_MASK) && + IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) && + __builtin_constant_p(nbits & BITMAP_MEM_MASK) && + IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT)) + memset((char *)map + start / 8, 0xff, nbits / 8); + else + __bitmap_set(map, start, nbits); +} + +static __always_inline void bitmap_clear(unsigned long *map, unsigned int start, + unsigned int nbits) +{ + if (__builtin_constant_p(nbits) && nbits == 1) + __clear_bit(start, map); + else if (__builtin_constant_p(start & BITMAP_MEM_MASK) && + IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) && + __builtin_constant_p(nbits & BITMAP_MEM_MASK) && + IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT)) + memset((char *)map + start / 8, 0, nbits / 8); + else + __bitmap_clear(map, start, nbits); +} + +static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src, + unsigned int shift, unsigned int nbits) +{ + if (small_const_nbits(nbits)) + *dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift; + else + __bitmap_shift_right(dst, src, shift, nbits); +} + +static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *src, + unsigned int shift, unsigned int nbits) +{ + if (small_const_nbits(nbits)) + *dst = (*src << shift) & BITMAP_LAST_WORD_MASK(nbits); + else + __bitmap_shift_left(dst, src, shift, nbits); +} + +static inline int bitmap_parse(const char *buf, unsigned int buflen, + unsigned long *maskp, int nmaskbits) +{ + return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits); +} + +/** + * BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap. + * @n: u64 value + * + * Linux bitmaps are internally arrays of unsigned longs, i.e. 32-bit + * integers in 32-bit environment, and 64-bit integers in 64-bit one. + * + * There are four combinations of endianness and length of the word in linux + * ABIs: LE64, BE64, LE32 and BE32. + * + * On 64-bit kernels 64-bit LE and BE numbers are naturally ordered in + * bitmaps and therefore don't require any special handling. + * + * On 32-bit kernels 32-bit LE ABI orders lo word of 64-bit number in memory + * prior to hi, and 32-bit BE orders hi word prior to lo. The bitmap on the + * other hand is represented as an array of 32-bit words and the position of + * bit N may therefore be calculated as: word #(N/32) and bit #(N%32) in that + * word. For example, bit #42 is located at 10th position of 2nd word. + * It matches 32-bit LE ABI, and we can simply let the compiler store 64-bit + * values in memory as it usually does. But for BE we need to swap hi and lo + * words manually. + * + * With all that, the macro BITMAP_FROM_U64() does explicit reordering of hi and + * lo parts of u64. For LE32 it does nothing, and for BE environment it swaps + * hi and lo words, as is expected by bitmap. + */ +#if __BITS_PER_LONG == 64 +#define BITMAP_FROM_U64(n) (n) +#else +#define BITMAP_FROM_U64(n) ((unsigned long) ((u64)(n) & ULONG_MAX)), \ + ((unsigned long) ((u64)(n) >> 32)) +#endif + +/** + * bitmap_from_u64 - Check and swap words within u64. + * @mask: source bitmap + * @dst: destination bitmap + * + * In 32-bit Big Endian kernel, when using ``(u32 *)(&val)[*]`` + * to read u64 mask, we will get the wrong word. + * That is ``(u32 *)(&val)[0]`` gets the upper 32 bits, + * but we expect the lower 32-bits of u64. + */ +static inline void bitmap_from_u64(unsigned long *dst, u64 mask) +{ + dst[0] = mask & ULONG_MAX; + + if (sizeof(mask) > sizeof(unsigned long)) + dst[1] = mask >> 32; +} + +#endif /* __ASSEMBLY__ */ + +#endif /* __LINUX_BITMAP_H */ diff --git a/include/linux/bitops.h b/include/linux/bitops.h new file mode 100644 index 000000000..5c1522ed2 --- /dev/null +++ b/include/linux/bitops.h @@ -0,0 +1,282 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BITOPS_H +#define _LINUX_BITOPS_H +#include +#include + +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) +#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(long)) + +extern unsigned int __sw_hweight8(unsigned int w); +extern unsigned int __sw_hweight16(unsigned int w); +extern unsigned int __sw_hweight32(unsigned int w); +extern unsigned long __sw_hweight64(__u64 w); + +/* + * Include this here because some architectures need generic_ffs/fls in + * scope + */ +#include + +#define for_each_set_bit(bit, addr, size) \ + for ((bit) = find_first_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) + +/* same as for_each_set_bit() but use bit as value to start with */ +#define for_each_set_bit_from(bit, addr, size) \ + for ((bit) = find_next_bit((addr), (size), (bit)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) + +#define for_each_clear_bit(bit, addr, size) \ + for ((bit) = find_first_zero_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) + +/* same as for_each_clear_bit() but use bit as value to start with */ +#define for_each_clear_bit_from(bit, addr, size) \ + for ((bit) = find_next_zero_bit((addr), (size), (bit)); \ + (bit) < (size); \ + (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) + +static inline int get_bitmask_order(unsigned int count) +{ + int order; + + order = fls(count); + return order; /* We could be slightly more clever with -1 here... */ +} + +static __always_inline unsigned long hweight_long(unsigned long w) +{ + return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w); +} + +/** + * rol64 - rotate a 64-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u64 rol64(__u64 word, unsigned int shift) +{ + return (word << (shift & 63)) | (word >> ((-shift) & 63)); +} + +/** + * ror64 - rotate a 64-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u64 ror64(__u64 word, unsigned int shift) +{ + return (word >> (shift & 63)) | (word << ((-shift) & 63)); +} + +/** + * rol32 - rotate a 32-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u32 rol32(__u32 word, unsigned int shift) +{ + return (word << (shift & 31)) | (word >> ((-shift) & 31)); +} + +/** + * ror32 - rotate a 32-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u32 ror32(__u32 word, unsigned int shift) +{ + return (word >> (shift & 31)) | (word << ((-shift) & 31)); +} + +/** + * rol16 - rotate a 16-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u16 rol16(__u16 word, unsigned int shift) +{ + return (word << (shift & 15)) | (word >> ((-shift) & 15)); +} + +/** + * ror16 - rotate a 16-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u16 ror16(__u16 word, unsigned int shift) +{ + return (word >> (shift & 15)) | (word << ((-shift) & 15)); +} + +/** + * rol8 - rotate an 8-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u8 rol8(__u8 word, unsigned int shift) +{ + return (word << (shift & 7)) | (word >> ((-shift) & 7)); +} + +/** + * ror8 - rotate an 8-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u8 ror8(__u8 word, unsigned int shift) +{ + return (word >> (shift & 7)) | (word << ((-shift) & 7)); +} + +/** + * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit + * @value: value to sign extend + * @index: 0 based bit index (0<=index<32) to sign bit + * + * This is safe to use for 16- and 8-bit types as well. + */ +static inline __s32 sign_extend32(__u32 value, int index) +{ + __u8 shift = 31 - index; + return (__s32)(value << shift) >> shift; +} + +/** + * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit + * @value: value to sign extend + * @index: 0 based bit index (0<=index<64) to sign bit + */ +static inline __s64 sign_extend64(__u64 value, int index) +{ + __u8 shift = 63 - index; + return (__s64)(value << shift) >> shift; +} + +static inline unsigned fls_long(unsigned long l) +{ + if (sizeof(l) == 4) + return fls(l); + return fls64(l); +} + +static inline int get_count_order(unsigned int count) +{ + int order; + + order = fls(count) - 1; + if (count & (count - 1)) + order++; + return order; +} + +/** + * get_count_order_long - get order after rounding @l up to power of 2 + * @l: parameter + * + * it is same as get_count_order() but with long type parameter + */ +static inline int get_count_order_long(unsigned long l) +{ + if (l == 0UL) + return -1; + else if (l & (l - 1UL)) + return (int)fls_long(l); + else + return (int)fls_long(l) - 1; +} + +/** + * __ffs64 - find first set bit in a 64 bit word + * @word: The 64 bit word + * + * On 64 bit arches this is a synomyn for __ffs + * The result is not defined if no bits are set, so check that @word + * is non-zero before calling this. + */ +static inline unsigned long __ffs64(u64 word) +{ +#if BITS_PER_LONG == 32 + if (((u32)word) == 0UL) + return __ffs((u32)(word >> 32)) + 32; +#elif BITS_PER_LONG != 64 +#error BITS_PER_LONG not 32 or 64 +#endif + return __ffs((unsigned long)word); +} + +/** + * assign_bit - Assign value to a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * @value: the value to assign + */ +static __always_inline void assign_bit(long nr, volatile unsigned long *addr, + bool value) +{ + if (value) + set_bit(nr, addr); + else + clear_bit(nr, addr); +} + +static __always_inline void __assign_bit(long nr, volatile unsigned long *addr, + bool value) +{ + if (value) + __set_bit(nr, addr); + else + __clear_bit(nr, addr); +} + +#ifdef __KERNEL__ + +#ifndef set_mask_bits +#define set_mask_bits(ptr, mask, bits) \ +({ \ + const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \ + typeof(*(ptr)) old__, new__; \ + \ + do { \ + old__ = READ_ONCE(*(ptr)); \ + new__ = (old__ & ~mask__) | bits__; \ + } while (cmpxchg(ptr, old__, new__) != old__); \ + \ + new__; \ +}) +#endif + +#ifndef bit_clear_unless +#define bit_clear_unless(ptr, _clear, _test) \ +({ \ + const typeof(*ptr) clear = (_clear), test = (_test); \ + typeof(*ptr) old, new; \ + \ + do { \ + old = READ_ONCE(*ptr); \ + new = old & ~clear; \ + } while (!(old & test) && \ + cmpxchg(ptr, old, new) != old); \ + \ + !(old & test); \ +}) +#endif + +#ifndef find_last_bit +/** + * find_last_bit - find the last set bit in a memory region + * @addr: The address to start the search at + * @size: The number of bits to search + * + * Returns the bit number of the last set bit, or size. + */ +extern unsigned long find_last_bit(const unsigned long *addr, + unsigned long size); +#endif + +#endif /* __KERNEL__ */ +#endif diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h new file mode 100644 index 000000000..d35b8ec1c --- /dev/null +++ b/include/linux/bitrev.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BITREV_H +#define _LINUX_BITREV_H + +#include + +#ifdef CONFIG_HAVE_ARCH_BITREVERSE +#include + +#define __bitrev32 __arch_bitrev32 +#define __bitrev16 __arch_bitrev16 +#define __bitrev8 __arch_bitrev8 + +#else +extern u8 const byte_rev_table[256]; +static inline u8 __bitrev8(u8 byte) +{ + return byte_rev_table[byte]; +} + +static inline u16 __bitrev16(u16 x) +{ + return (__bitrev8(x & 0xff) << 8) | __bitrev8(x >> 8); +} + +static inline u32 __bitrev32(u32 x) +{ + return (__bitrev16(x & 0xffff) << 16) | __bitrev16(x >> 16); +} + +#endif /* CONFIG_HAVE_ARCH_BITREVERSE */ + +#define __bitrev8x4(x) (__bitrev32(swab32(x))) + +#define __constant_bitrev32(x) \ +({ \ + u32 ___x = x; \ + ___x = (___x >> 16) | (___x << 16); \ + ___x = ((___x & (u32)0xFF00FF00UL) >> 8) | ((___x & (u32)0x00FF00FFUL) << 8); \ + ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \ + ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \ + ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \ + ___x; \ +}) + +#define __constant_bitrev16(x) \ +({ \ + u16 ___x = x; \ + ___x = (___x >> 8) | (___x << 8); \ + ___x = ((___x & (u16)0xF0F0U) >> 4) | ((___x & (u16)0x0F0FU) << 4); \ + ___x = ((___x & (u16)0xCCCCU) >> 2) | ((___x & (u16)0x3333U) << 2); \ + ___x = ((___x & (u16)0xAAAAU) >> 1) | ((___x & (u16)0x5555U) << 1); \ + ___x; \ +}) + +#define __constant_bitrev8x4(x) \ +({ \ + u32 ___x = x; \ + ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \ + ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \ + ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \ + ___x; \ +}) + +#define __constant_bitrev8(x) \ +({ \ + u8 ___x = x; \ + ___x = (___x >> 4) | (___x << 4); \ + ___x = ((___x & (u8)0xCCU) >> 2) | ((___x & (u8)0x33U) << 2); \ + ___x = ((___x & (u8)0xAAU) >> 1) | ((___x & (u8)0x55U) << 1); \ + ___x; \ +}) + +#define bitrev32(x) \ +({ \ + u32 __x = x; \ + __builtin_constant_p(__x) ? \ + __constant_bitrev32(__x) : \ + __bitrev32(__x); \ +}) + +#define bitrev16(x) \ +({ \ + u16 __x = x; \ + __builtin_constant_p(__x) ? \ + __constant_bitrev16(__x) : \ + __bitrev16(__x); \ + }) + +#define bitrev8x4(x) \ +({ \ + u32 __x = x; \ + __builtin_constant_p(__x) ? \ + __constant_bitrev8x4(__x) : \ + __bitrev8x4(__x); \ + }) + +#define bitrev8(x) \ +({ \ + u8 __x = x; \ + __builtin_constant_p(__x) ? \ + __constant_bitrev8(__x) : \ + __bitrev8(__x) ; \ + }) +#endif /* _LINUX_BITREV_H */ diff --git a/include/linux/bits.h b/include/linux/bits.h new file mode 100644 index 000000000..2b7b532c1 --- /dev/null +++ b/include/linux/bits.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_BITS_H +#define __LINUX_BITS_H +#include + +#define BIT(nr) (1UL << (nr)) +#define BIT_ULL(nr) (1ULL << (nr)) +#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) +#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) +#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG)) +#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG) +#define BITS_PER_BYTE 8 + +/* + * Create a contiguous bitmask starting at bit position @l and ending at + * position @h. For example + * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. + */ +#define GENMASK(h, l) \ + (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) + +#define GENMASK_ULL(h, l) \ + (((~0ULL) - (1ULL << (l)) + 1) & \ + (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) + +#endif /* __LINUX_BITS_H */ diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h new file mode 100644 index 000000000..8f1be8b49 --- /dev/null +++ b/include/linux/blk-cgroup.h @@ -0,0 +1,973 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _BLK_CGROUP_H +#define _BLK_CGROUP_H +/* + * Common Block IO controller cgroup interface + * + * Based on ideas and code from CFQ, CFS and BFQ: + * Copyright (C) 2003 Jens Axboe + * + * Copyright (C) 2008 Fabio Checconi + * Paolo Valente + * + * Copyright (C) 2009 Vivek Goyal + * Nauman Rafique + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */ +#define BLKG_STAT_CPU_BATCH (INT_MAX / 2) + +/* Max limits for throttle policy */ +#define THROTL_IOPS_MAX UINT_MAX + +#ifdef CONFIG_BLK_CGROUP + +enum blkg_rwstat_type { + BLKG_RWSTAT_READ, + BLKG_RWSTAT_WRITE, + BLKG_RWSTAT_SYNC, + BLKG_RWSTAT_ASYNC, + BLKG_RWSTAT_DISCARD, + + BLKG_RWSTAT_NR, + BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR, +}; + +struct blkcg_gq; + +struct blkcg { + struct cgroup_subsys_state css; + spinlock_t lock; + + struct radix_tree_root blkg_tree; + struct blkcg_gq __rcu *blkg_hint; + struct hlist_head blkg_list; + + struct blkcg_policy_data *cpd[BLKCG_MAX_POLS]; + + struct list_head all_blkcgs_node; +#ifdef CONFIG_CGROUP_WRITEBACK + struct list_head cgwb_list; + refcount_t cgwb_refcnt; +#endif +}; + +/* + * blkg_[rw]stat->aux_cnt is excluded for local stats but included for + * recursive. Used to carry stats of dead children, and, for blkg_rwstat, + * to carry result values from read and sum operations. + */ +struct blkg_stat { + struct percpu_counter cpu_cnt; + atomic64_t aux_cnt; +}; + +struct blkg_rwstat { + struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR]; + atomic64_t aux_cnt[BLKG_RWSTAT_NR]; +}; + +/* + * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a + * request_queue (q). This is used by blkcg policies which need to track + * information per blkcg - q pair. + * + * There can be multiple active blkcg policies and each blkg:policy pair is + * represented by a blkg_policy_data which is allocated and freed by each + * policy's pd_alloc/free_fn() methods. A policy can allocate private data + * area by allocating larger data structure which embeds blkg_policy_data + * at the beginning. + */ +struct blkg_policy_data { + /* the blkg and policy id this per-policy data belongs to */ + struct blkcg_gq *blkg; + int plid; +}; + +/* + * Policies that need to keep per-blkcg data which is independent from any + * request_queue associated to it should implement cpd_alloc/free_fn() + * methods. A policy can allocate private data area by allocating larger + * data structure which embeds blkcg_policy_data at the beginning. + * cpd_init() is invoked to let each policy handle per-blkcg data. + */ +struct blkcg_policy_data { + /* the blkcg and policy id this per-policy data belongs to */ + struct blkcg *blkcg; + int plid; +}; + +/* association between a blk cgroup and a request queue */ +struct blkcg_gq { + /* Pointer to the associated request_queue */ + struct request_queue *q; + struct list_head q_node; + struct hlist_node blkcg_node; + struct blkcg *blkcg; + + /* + * Each blkg gets congested separately and the congestion state is + * propagated to the matching bdi_writeback_congested. + */ + struct bdi_writeback_congested *wb_congested; + + /* all non-root blkcg_gq's are guaranteed to have access to parent */ + struct blkcg_gq *parent; + + /* request allocation list for this blkcg-q pair */ + struct request_list rl; + + /* reference count */ + atomic_t refcnt; + + /* is this blkg online? protected by both blkcg and q locks */ + bool online; + + struct blkg_rwstat stat_bytes; + struct blkg_rwstat stat_ios; + + struct blkg_policy_data *pd[BLKCG_MAX_POLS]; + + struct rcu_head rcu_head; + + atomic_t use_delay; + atomic64_t delay_nsec; + atomic64_t delay_start; + u64 last_delay; + int last_use; +}; + +typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp); +typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd); +typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd); +typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd); +typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node); +typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd); +typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd); +typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd); +typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd); +typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd); +typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf, + size_t size); + +struct blkcg_policy { + int plid; + /* cgroup files for the policy */ + struct cftype *dfl_cftypes; + struct cftype *legacy_cftypes; + + /* operations */ + blkcg_pol_alloc_cpd_fn *cpd_alloc_fn; + blkcg_pol_init_cpd_fn *cpd_init_fn; + blkcg_pol_free_cpd_fn *cpd_free_fn; + blkcg_pol_bind_cpd_fn *cpd_bind_fn; + + blkcg_pol_alloc_pd_fn *pd_alloc_fn; + blkcg_pol_init_pd_fn *pd_init_fn; + blkcg_pol_online_pd_fn *pd_online_fn; + blkcg_pol_offline_pd_fn *pd_offline_fn; + blkcg_pol_free_pd_fn *pd_free_fn; + blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; + blkcg_pol_stat_pd_fn *pd_stat_fn; +}; + +extern struct blkcg blkcg_root; +extern struct cgroup_subsys_state * const blkcg_root_css; + +struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, + struct request_queue *q, bool update_hint); +struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, + struct request_queue *q); +int blkcg_init_queue(struct request_queue *q); +void blkcg_drain_queue(struct request_queue *q); +void blkcg_exit_queue(struct request_queue *q); + +/* Blkio controller policy registration */ +int blkcg_policy_register(struct blkcg_policy *pol); +void blkcg_policy_unregister(struct blkcg_policy *pol); +int blkcg_activate_policy(struct request_queue *q, + const struct blkcg_policy *pol); +void blkcg_deactivate_policy(struct request_queue *q, + const struct blkcg_policy *pol); + +const char *blkg_dev_name(struct blkcg_gq *blkg); +void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, + u64 (*prfill)(struct seq_file *, + struct blkg_policy_data *, int), + const struct blkcg_policy *pol, int data, + bool show_total); +u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v); +u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, + const struct blkg_rwstat *rwstat); +u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off); +u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, + int off); +int blkg_print_stat_bytes(struct seq_file *sf, void *v); +int blkg_print_stat_ios(struct seq_file *sf, void *v); +int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v); +int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v); + +u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg, + struct blkcg_policy *pol, int off); +struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, + struct blkcg_policy *pol, int off); + +struct blkg_conf_ctx { + struct gendisk *disk; + struct blkcg_gq *blkg; + char *body; +}; + +int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, + char *input, struct blkg_conf_ctx *ctx); +void blkg_conf_finish(struct blkg_conf_ctx *ctx); + + +static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) +{ + return css ? container_of(css, struct blkcg, css) : NULL; +} + +static inline struct blkcg *bio_blkcg(struct bio *bio) +{ + struct cgroup_subsys_state *css; + + if (bio && bio->bi_css) + return css_to_blkcg(bio->bi_css); + css = kthread_blkcg(); + if (css) + return css_to_blkcg(css); + return css_to_blkcg(task_css(current, io_cgrp_id)); +} + +static inline bool blk_cgroup_congested(void) +{ + struct cgroup_subsys_state *css; + bool ret = false; + + rcu_read_lock(); + css = kthread_blkcg(); + if (!css) + css = task_css(current, io_cgrp_id); + while (css) { + if (atomic_read(&css->cgroup->congestion_count)) { + ret = true; + break; + } + css = css->parent; + } + rcu_read_unlock(); + return ret; +} + +/** + * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg + * @return: true if this bio needs to be submitted with the root blkg context. + * + * In order to avoid priority inversions we sometimes need to issue a bio as if + * it were attached to the root blkg, and then backcharge to the actual owning + * blkg. The idea is we do bio_blkcg() to look up the actual context for the + * bio and attach the appropriate blkg to the bio. Then we call this helper and + * if it is true run with the root blkg for that queue and then do any + * backcharging to the originating cgroup once the io is complete. + */ +static inline bool bio_issue_as_root_blkg(struct bio *bio) +{ + return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0; +} + +/** + * blkcg_parent - get the parent of a blkcg + * @blkcg: blkcg of interest + * + * Return the parent blkcg of @blkcg. Can be called anytime. + */ +static inline struct blkcg *blkcg_parent(struct blkcg *blkcg) +{ + return css_to_blkcg(blkcg->css.parent); +} + +/** + * __blkg_lookup - internal version of blkg_lookup() + * @blkcg: blkcg of interest + * @q: request_queue of interest + * @update_hint: whether to update lookup hint with the result or not + * + * This is internal version and shouldn't be used by policy + * implementations. Looks up blkgs for the @blkcg - @q pair regardless of + * @q's bypass state. If @update_hint is %true, the caller should be + * holding @q->queue_lock and lookup hint is updated on success. + */ +static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, + struct request_queue *q, + bool update_hint) +{ + struct blkcg_gq *blkg; + + if (blkcg == &blkcg_root) + return q->root_blkg; + + blkg = rcu_dereference(blkcg->blkg_hint); + if (blkg && blkg->q == q) + return blkg; + + return blkg_lookup_slowpath(blkcg, q, update_hint); +} + +/** + * blkg_lookup - lookup blkg for the specified blkcg - q pair + * @blkcg: blkcg of interest + * @q: request_queue of interest + * + * Lookup blkg for the @blkcg - @q pair. This function should be called + * under RCU read lock and is guaranteed to return %NULL if @q is bypassing + * - see blk_queue_bypass_start() for details. + */ +static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, + struct request_queue *q) +{ + WARN_ON_ONCE(!rcu_read_lock_held()); + + if (unlikely(blk_queue_bypass(q))) + return NULL; + return __blkg_lookup(blkcg, q, false); +} + +/** + * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair + * @q: request_queue of interest + * + * Lookup blkg for @q at the root level. See also blkg_lookup(). + */ +static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) +{ + return q->root_blkg; +} + +/** + * blkg_to_pdata - get policy private data + * @blkg: blkg of interest + * @pol: policy of interest + * + * Return pointer to private data associated with the @blkg-@pol pair. + */ +static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, + struct blkcg_policy *pol) +{ + return blkg ? blkg->pd[pol->plid] : NULL; +} + +static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg, + struct blkcg_policy *pol) +{ + return blkcg ? blkcg->cpd[pol->plid] : NULL; +} + +/** + * pdata_to_blkg - get blkg associated with policy private data + * @pd: policy private data of interest + * + * @pd is policy private data. Determine the blkg it's associated with. + */ +static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) +{ + return pd ? pd->blkg : NULL; +} + +static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd) +{ + return cpd ? cpd->blkcg : NULL; +} + +extern void blkcg_destroy_blkgs(struct blkcg *blkcg); + +#ifdef CONFIG_CGROUP_WRITEBACK + +/** + * blkcg_cgwb_get - get a reference for blkcg->cgwb_list + * @blkcg: blkcg of interest + * + * This is used to track the number of active wb's related to a blkcg. + */ +static inline void blkcg_cgwb_get(struct blkcg *blkcg) +{ + refcount_inc(&blkcg->cgwb_refcnt); +} + +/** + * blkcg_cgwb_put - put a reference for @blkcg->cgwb_list + * @blkcg: blkcg of interest + * + * This is used to track the number of active wb's related to a blkcg. + * When this count goes to zero, all active wb has finished so the + * blkcg can continue destruction by calling blkcg_destroy_blkgs(). + * This work may occur in cgwb_release_workfn() on the cgwb_release + * workqueue. + */ +static inline void blkcg_cgwb_put(struct blkcg *blkcg) +{ + if (refcount_dec_and_test(&blkcg->cgwb_refcnt)) + blkcg_destroy_blkgs(blkcg); +} + +#else + +static inline void blkcg_cgwb_get(struct blkcg *blkcg) { } + +static inline void blkcg_cgwb_put(struct blkcg *blkcg) +{ + /* wb isn't being accounted, so trigger destruction right away */ + blkcg_destroy_blkgs(blkcg); +} + +#endif + +/** + * blkg_path - format cgroup path of blkg + * @blkg: blkg of interest + * @buf: target buffer + * @buflen: target buffer length + * + * Format the path of the cgroup of @blkg into @buf. + */ +static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) +{ + return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen); +} + +/** + * blkg_get - get a blkg reference + * @blkg: blkg to get + * + * The caller should be holding an existing reference. + */ +static inline void blkg_get(struct blkcg_gq *blkg) +{ + WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); + atomic_inc(&blkg->refcnt); +} + +/** + * blkg_try_get - try and get a blkg reference + * @blkg: blkg to get + * + * This is for use when doing an RCU lookup of the blkg. We may be in the midst + * of freeing this blkg, so we can only use it if the refcnt is not zero. + */ +static inline struct blkcg_gq *blkg_try_get(struct blkcg_gq *blkg) +{ + if (atomic_inc_not_zero(&blkg->refcnt)) + return blkg; + return NULL; +} + + +void __blkg_release_rcu(struct rcu_head *rcu); + +/** + * blkg_put - put a blkg reference + * @blkg: blkg to put + */ +static inline void blkg_put(struct blkcg_gq *blkg) +{ + WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); + if (atomic_dec_and_test(&blkg->refcnt)) + call_rcu(&blkg->rcu_head, __blkg_release_rcu); +} + +/** + * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants + * @d_blkg: loop cursor pointing to the current descendant + * @pos_css: used for iteration + * @p_blkg: target blkg to walk descendants of + * + * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU + * read locked. If called under either blkcg or queue lock, the iteration + * is guaranteed to include all and only online blkgs. The caller may + * update @pos_css by calling css_rightmost_descendant() to skip subtree. + * @p_blkg is included in the iteration and the first node to be visited. + */ +#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \ + css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \ + if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ + (p_blkg)->q, false))) + +/** + * blkg_for_each_descendant_post - post-order walk of a blkg's descendants + * @d_blkg: loop cursor pointing to the current descendant + * @pos_css: used for iteration + * @p_blkg: target blkg to walk descendants of + * + * Similar to blkg_for_each_descendant_pre() but performs post-order + * traversal instead. Synchronization rules are the same. @p_blkg is + * included in the iteration and the last node to be visited. + */ +#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \ + css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \ + if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ + (p_blkg)->q, false))) + +/** + * blk_get_rl - get request_list to use + * @q: request_queue of interest + * @bio: bio which will be attached to the allocated request (may be %NULL) + * + * The caller wants to allocate a request from @q to use for @bio. Find + * the request_list to use and obtain a reference on it. Should be called + * under queue_lock. This function is guaranteed to return non-%NULL + * request_list. + */ +static inline struct request_list *blk_get_rl(struct request_queue *q, + struct bio *bio) +{ + struct blkcg *blkcg; + struct blkcg_gq *blkg; + + rcu_read_lock(); + + blkcg = bio_blkcg(bio); + + /* bypass blkg lookup and use @q->root_rl directly for root */ + if (blkcg == &blkcg_root) + goto root_rl; + + /* + * Try to use blkg->rl. blkg lookup may fail under memory pressure + * or if either the blkcg or queue is going away. Fall back to + * root_rl in such cases. + */ + blkg = blkg_lookup(blkcg, q); + if (unlikely(!blkg)) + goto root_rl; + + blkg_get(blkg); + rcu_read_unlock(); + return &blkg->rl; +root_rl: + rcu_read_unlock(); + return &q->root_rl; +} + +/** + * blk_put_rl - put request_list + * @rl: request_list to put + * + * Put the reference acquired by blk_get_rl(). Should be called under + * queue_lock. + */ +static inline void blk_put_rl(struct request_list *rl) +{ + if (rl->blkg->blkcg != &blkcg_root) + blkg_put(rl->blkg); +} + +/** + * blk_rq_set_rl - associate a request with a request_list + * @rq: request of interest + * @rl: target request_list + * + * Associate @rq with @rl so that accounting and freeing can know the + * request_list @rq came from. + */ +static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) +{ + rq->rl = rl; +} + +/** + * blk_rq_rl - return the request_list a request came from + * @rq: request of interest + * + * Return the request_list @rq is allocated from. + */ +static inline struct request_list *blk_rq_rl(struct request *rq) +{ + return rq->rl; +} + +struct request_list *__blk_queue_next_rl(struct request_list *rl, + struct request_queue *q); +/** + * blk_queue_for_each_rl - iterate through all request_lists of a request_queue + * + * Should be used under queue_lock. + */ +#define blk_queue_for_each_rl(rl, q) \ + for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q))) + +static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp) +{ + int ret; + + ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp); + if (ret) + return ret; + + atomic64_set(&stat->aux_cnt, 0); + return 0; +} + +static inline void blkg_stat_exit(struct blkg_stat *stat) +{ + percpu_counter_destroy(&stat->cpu_cnt); +} + +/** + * blkg_stat_add - add a value to a blkg_stat + * @stat: target blkg_stat + * @val: value to add + * + * Add @val to @stat. The caller must ensure that IRQ on the same CPU + * don't re-enter this function for the same counter. + */ +static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val) +{ + percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH); +} + +/** + * blkg_stat_read - read the current value of a blkg_stat + * @stat: blkg_stat to read + */ +static inline uint64_t blkg_stat_read(struct blkg_stat *stat) +{ + return percpu_counter_sum_positive(&stat->cpu_cnt); +} + +/** + * blkg_stat_reset - reset a blkg_stat + * @stat: blkg_stat to reset + */ +static inline void blkg_stat_reset(struct blkg_stat *stat) +{ + percpu_counter_set(&stat->cpu_cnt, 0); + atomic64_set(&stat->aux_cnt, 0); +} + +/** + * blkg_stat_add_aux - add a blkg_stat into another's aux count + * @to: the destination blkg_stat + * @from: the source + * + * Add @from's count including the aux one to @to's aux count. + */ +static inline void blkg_stat_add_aux(struct blkg_stat *to, + struct blkg_stat *from) +{ + atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt), + &to->aux_cnt); +} + +static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp) +{ + int i, ret; + + for (i = 0; i < BLKG_RWSTAT_NR; i++) { + ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp); + if (ret) { + while (--i >= 0) + percpu_counter_destroy(&rwstat->cpu_cnt[i]); + return ret; + } + atomic64_set(&rwstat->aux_cnt[i], 0); + } + return 0; +} + +static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat) +{ + int i; + + for (i = 0; i < BLKG_RWSTAT_NR; i++) + percpu_counter_destroy(&rwstat->cpu_cnt[i]); +} + +/** + * blkg_rwstat_add - add a value to a blkg_rwstat + * @rwstat: target blkg_rwstat + * @op: REQ_OP and flags + * @val: value to add + * + * Add @val to @rwstat. The counters are chosen according to @rw. The + * caller is responsible for synchronizing calls to this function. + */ +static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, + unsigned int op, uint64_t val) +{ + struct percpu_counter *cnt; + + if (op_is_discard(op)) + cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_DISCARD]; + else if (op_is_write(op)) + cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE]; + else + cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ]; + + percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH); + + if (op_is_sync(op)) + cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC]; + else + cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC]; + + percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH); +} + +/** + * blkg_rwstat_read - read the current values of a blkg_rwstat + * @rwstat: blkg_rwstat to read + * + * Read the current snapshot of @rwstat and return it in the aux counts. + */ +static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat) +{ + struct blkg_rwstat result; + int i; + + for (i = 0; i < BLKG_RWSTAT_NR; i++) + atomic64_set(&result.aux_cnt[i], + percpu_counter_sum_positive(&rwstat->cpu_cnt[i])); + return result; +} + +/** + * blkg_rwstat_total - read the total count of a blkg_rwstat + * @rwstat: blkg_rwstat to read + * + * Return the total count of @rwstat regardless of the IO direction. This + * function can be called without synchronization and takes care of u64 + * atomicity. + */ +static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat) +{ + struct blkg_rwstat tmp = blkg_rwstat_read(rwstat); + + return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) + + atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]); +} + +/** + * blkg_rwstat_reset - reset a blkg_rwstat + * @rwstat: blkg_rwstat to reset + */ +static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat) +{ + int i; + + for (i = 0; i < BLKG_RWSTAT_NR; i++) { + percpu_counter_set(&rwstat->cpu_cnt[i], 0); + atomic64_set(&rwstat->aux_cnt[i], 0); + } +} + +/** + * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count + * @to: the destination blkg_rwstat + * @from: the source + * + * Add @from's count including the aux one to @to's aux count. + */ +static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to, + struct blkg_rwstat *from) +{ + u64 sum[BLKG_RWSTAT_NR]; + int i; + + for (i = 0; i < BLKG_RWSTAT_NR; i++) + sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]); + + for (i = 0; i < BLKG_RWSTAT_NR; i++) + atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]), + &to->aux_cnt[i]); +} + +#ifdef CONFIG_BLK_DEV_THROTTLING +extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, + struct bio *bio); +#else +static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, + struct bio *bio) { return false; } +#endif + +static inline bool blkcg_bio_issue_check(struct request_queue *q, + struct bio *bio) +{ + struct blkcg *blkcg; + struct blkcg_gq *blkg; + bool throtl = false; + + rcu_read_lock(); + blkcg = bio_blkcg(bio); + + /* associate blkcg if bio hasn't attached one */ + bio_associate_blkcg(bio, &blkcg->css); + + blkg = blkg_lookup(blkcg, q); + if (unlikely(!blkg)) { + spin_lock_irq(q->queue_lock); + blkg = blkg_lookup_create(blkcg, q); + if (IS_ERR(blkg)) + blkg = NULL; + spin_unlock_irq(q->queue_lock); + } + + throtl = blk_throtl_bio(q, blkg, bio); + + if (!throtl) { + blkg = blkg ?: q->root_blkg; + /* + * If the bio is flagged with BIO_QUEUE_ENTERED it means this + * is a split bio and we would have already accounted for the + * size of the bio. + */ + if (!bio_flagged(bio, BIO_QUEUE_ENTERED)) + blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf, + bio->bi_iter.bi_size); + blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1); + } + + rcu_read_unlock(); + return !throtl; +} + +static inline void blkcg_use_delay(struct blkcg_gq *blkg) +{ + if (atomic_add_return(1, &blkg->use_delay) == 1) + atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); +} + +/** + * blk_cgroup_mergeable - Determine whether to allow or disallow merges + * @rq: request to merge into + * @bio: bio to merge + * + * @bio and @rq should belong to the same cgroup and their issue_as_root should + * match. The latter is necessary as we don't want to throttle e.g. a metadata + * update because it happens to be next to a regular IO. + */ +static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) +{ + return rq->bio->bi_blkg == bio->bi_blkg && + bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio); +} + +static inline int blkcg_unuse_delay(struct blkcg_gq *blkg) +{ + int old = atomic_read(&blkg->use_delay); + + if (old == 0) + return 0; + + /* + * We do this song and dance because we can race with somebody else + * adding or removing delay. If we just did an atomic_dec we'd end up + * negative and we'd already be in trouble. We need to subtract 1 and + * then check to see if we were the last delay so we can drop the + * congestion count on the cgroup. + */ + while (old) { + int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1); + if (cur == old) + break; + old = cur; + } + + if (old == 0) + return 0; + if (old == 1) + atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); + return 1; +} + +static inline void blkcg_clear_delay(struct blkcg_gq *blkg) +{ + int old = atomic_read(&blkg->use_delay); + if (!old) + return; + /* We only want 1 person clearing the congestion count for this blkg. */ + while (old) { + int cur = atomic_cmpxchg(&blkg->use_delay, old, 0); + if (cur == old) { + atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); + break; + } + old = cur; + } +} + +void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta); +void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay); +void blkcg_maybe_throttle_current(void); +#else /* CONFIG_BLK_CGROUP */ + +struct blkcg { +}; + +struct blkg_policy_data { +}; + +struct blkcg_policy_data { +}; + +struct blkcg_gq { +}; + +struct blkcg_policy { +}; + +#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL)) + +static inline void blkcg_maybe_throttle_current(void) { } +static inline bool blk_cgroup_congested(void) { return false; } + +#ifdef CONFIG_BLOCK + +static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { } + +static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } +static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) +{ return NULL; } +static inline int blkcg_init_queue(struct request_queue *q) { return 0; } +static inline void blkcg_drain_queue(struct request_queue *q) { } +static inline void blkcg_exit_queue(struct request_queue *q) { } +static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; } +static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } +static inline int blkcg_activate_policy(struct request_queue *q, + const struct blkcg_policy *pol) { return 0; } +static inline void blkcg_deactivate_policy(struct request_queue *q, + const struct blkcg_policy *pol) { } + +static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } + +static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, + struct blkcg_policy *pol) { return NULL; } +static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; } +static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } +static inline void blkg_get(struct blkcg_gq *blkg) { } +static inline void blkg_put(struct blkcg_gq *blkg) { } + +static inline struct request_list *blk_get_rl(struct request_queue *q, + struct bio *bio) { return &q->root_rl; } +static inline void blk_put_rl(struct request_list *rl) { } +static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { } +static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; } + +static inline bool blkcg_bio_issue_check(struct request_queue *q, + struct bio *bio) { return true; } +static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; } + +#define blk_queue_for_each_rl(rl, q) \ + for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) + +#endif /* CONFIG_BLOCK */ +#endif /* CONFIG_BLK_CGROUP */ +#endif /* _BLK_CGROUP_H */ diff --git a/include/linux/blk-mq-pci.h b/include/linux/blk-mq-pci.h new file mode 100644 index 000000000..9f4c17f0d --- /dev/null +++ b/include/linux/blk-mq-pci.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BLK_MQ_PCI_H +#define _LINUX_BLK_MQ_PCI_H + +struct blk_mq_tag_set; +struct pci_dev; + +int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev, + int offset); + +#endif /* _LINUX_BLK_MQ_PCI_H */ diff --git a/include/linux/blk-mq-rdma.h b/include/linux/blk-mq-rdma.h new file mode 100644 index 000000000..b4ade1980 --- /dev/null +++ b/include/linux/blk-mq-rdma.h @@ -0,0 +1,10 @@ +#ifndef _LINUX_BLK_MQ_RDMA_H +#define _LINUX_BLK_MQ_RDMA_H + +struct blk_mq_tag_set; +struct ib_device; + +int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set, + struct ib_device *dev, int first_vec); + +#endif /* _LINUX_BLK_MQ_RDMA_H */ diff --git a/include/linux/blk-mq-virtio.h b/include/linux/blk-mq-virtio.h new file mode 100644 index 000000000..69b4da262 --- /dev/null +++ b/include/linux/blk-mq-virtio.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BLK_MQ_VIRTIO_H +#define _LINUX_BLK_MQ_VIRTIO_H + +struct blk_mq_tag_set; +struct virtio_device; + +int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set, + struct virtio_device *vdev, int first_vec); + +#endif /* _LINUX_BLK_MQ_VIRTIO_H */ diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h new file mode 100644 index 000000000..2885dce1a --- /dev/null +++ b/include/linux/blk-mq.h @@ -0,0 +1,340 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef BLK_MQ_H +#define BLK_MQ_H + +#include +#include +#include + +struct blk_mq_tags; +struct blk_flush_queue; + +/** + * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware block device + */ +struct blk_mq_hw_ctx { + struct { + spinlock_t lock; + struct list_head dispatch; + unsigned long state; /* BLK_MQ_S_* flags */ + } ____cacheline_aligned_in_smp; + + struct delayed_work run_work; + cpumask_var_t cpumask; + int next_cpu; + int next_cpu_batch; + + unsigned long flags; /* BLK_MQ_F_* flags */ + + void *sched_data; + struct request_queue *queue; + struct blk_flush_queue *fq; + + void *driver_data; + + struct sbitmap ctx_map; + + struct blk_mq_ctx *dispatch_from; + unsigned int dispatch_busy; + + unsigned int nr_ctx; + struct blk_mq_ctx **ctxs; + + spinlock_t dispatch_wait_lock; + wait_queue_entry_t dispatch_wait; + atomic_t wait_index; + + struct blk_mq_tags *tags; + struct blk_mq_tags *sched_tags; + + unsigned long queued; + unsigned long run; +#define BLK_MQ_MAX_DISPATCH_ORDER 7 + unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; + + unsigned int numa_node; + unsigned int queue_num; + + atomic_t nr_active; + unsigned int nr_expired; + + struct hlist_node cpuhp_dead; + struct kobject kobj; + + unsigned long poll_considered; + unsigned long poll_invoked; + unsigned long poll_success; + +#ifdef CONFIG_BLK_DEBUG_FS + struct dentry *debugfs_dir; + struct dentry *sched_debugfs_dir; +#endif + + /* Must be the last member - see also blk_mq_hw_ctx_size(). */ + struct srcu_struct srcu[0]; +}; + +struct blk_mq_tag_set { + unsigned int *mq_map; + const struct blk_mq_ops *ops; + unsigned int nr_hw_queues; + unsigned int queue_depth; /* max hw supported */ + unsigned int reserved_tags; + unsigned int cmd_size; /* per-request extra data */ + int numa_node; + unsigned int timeout; + unsigned int flags; /* BLK_MQ_F_* */ + void *driver_data; + + struct blk_mq_tags **tags; + + struct mutex tag_list_lock; + struct list_head tag_list; +}; + +struct blk_mq_queue_data { + struct request *rq; + bool last; +}; + +typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *, + const struct blk_mq_queue_data *); +typedef bool (get_budget_fn)(struct blk_mq_hw_ctx *); +typedef void (put_budget_fn)(struct blk_mq_hw_ctx *); +typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); +typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); +typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); +typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *, + unsigned int, unsigned int); +typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *, + unsigned int); + +typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, + bool); +typedef void (busy_tag_iter_fn)(struct request *, void *, bool); +typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int); +typedef int (map_queues_fn)(struct blk_mq_tag_set *set); +typedef void (cleanup_rq_fn)(struct request *); + + +struct blk_mq_ops { + /* + * Queue request + */ + queue_rq_fn *queue_rq; + + /* + * Reserve budget before queue request, once .queue_rq is + * run, it is driver's responsibility to release the + * reserved budget. Also we have to handle failure case + * of .get_budget for avoiding I/O deadlock. + */ + get_budget_fn *get_budget; + put_budget_fn *put_budget; + + /* + * Called on request timeout + */ + timeout_fn *timeout; + + /* + * Called to poll for completion of a specific tag. + */ + poll_fn *poll; + + softirq_done_fn *complete; + + /* + * Called when the block layer side of a hardware queue has been + * set up, allowing the driver to allocate/init matching structures. + * Ditto for exit/teardown. + */ + init_hctx_fn *init_hctx; + exit_hctx_fn *exit_hctx; + + /* + * Called for every command allocated by the block layer to allow + * the driver to set up driver specific data. + * + * Tag greater than or equal to queue_depth is for setting up + * flush request. + * + * Ditto for exit/teardown. + */ + init_request_fn *init_request; + exit_request_fn *exit_request; + /* Called from inside blk_get_request() */ + void (*initialize_rq_fn)(struct request *rq); + + /* + * Called before freeing one request which isn't completed yet, + * and usually for freeing the driver private data + */ + cleanup_rq_fn *cleanup_rq; + + map_queues_fn *map_queues; + +#ifdef CONFIG_BLK_DEBUG_FS + /* + * Used by the debugfs implementation to show driver-specific + * information about a request. + */ + void (*show_rq)(struct seq_file *m, struct request *rq); +#endif +}; + +enum { + BLK_MQ_F_SHOULD_MERGE = 1 << 0, + BLK_MQ_F_TAG_SHARED = 1 << 1, + BLK_MQ_F_SG_MERGE = 1 << 2, + BLK_MQ_F_BLOCKING = 1 << 5, + BLK_MQ_F_NO_SCHED = 1 << 6, + BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, + BLK_MQ_F_ALLOC_POLICY_BITS = 1, + + BLK_MQ_S_STOPPED = 0, + BLK_MQ_S_TAG_ACTIVE = 1, + BLK_MQ_S_SCHED_RESTART = 2, + + BLK_MQ_MAX_DEPTH = 10240, + + BLK_MQ_CPU_WORK_BATCH = 8, +}; +#define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ + ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \ + ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) +#define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \ + ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ + << BLK_MQ_F_ALLOC_POLICY_START_BIT) + +struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); +struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, + struct request_queue *q); +int blk_mq_register_dev(struct device *, struct request_queue *); +void blk_mq_unregister_dev(struct device *, struct request_queue *); + +int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); +void blk_mq_free_tag_set(struct blk_mq_tag_set *set); + +void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); + +void blk_mq_free_request(struct request *rq); +bool blk_mq_can_queue(struct blk_mq_hw_ctx *); + +enum { + /* return when out of requests */ + BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0), + /* allocate from reserved pool */ + BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1), + /* allocate internal/sched tag */ + BLK_MQ_REQ_INTERNAL = (__force blk_mq_req_flags_t)(1 << 2), + /* set RQF_PREEMPT */ + BLK_MQ_REQ_PREEMPT = (__force blk_mq_req_flags_t)(1 << 3), +}; + +struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, + blk_mq_req_flags_t flags); +struct request *blk_mq_alloc_request_hctx(struct request_queue *q, + unsigned int op, blk_mq_req_flags_t flags, + unsigned int hctx_idx); +struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); + +enum { + BLK_MQ_UNIQUE_TAG_BITS = 16, + BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1, +}; + +u32 blk_mq_unique_tag(struct request *rq); + +static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag) +{ + return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS; +} + +static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) +{ + return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; +} + + +int blk_mq_request_started(struct request *rq); +void blk_mq_start_request(struct request *rq); +void blk_mq_end_request(struct request *rq, blk_status_t error); +void __blk_mq_end_request(struct request *rq, blk_status_t error); + +void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); +void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, + bool kick_requeue_list); +void blk_mq_kick_requeue_list(struct request_queue *q); +void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); +void blk_mq_complete_request(struct request *rq); +bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, + struct bio *bio); +bool blk_mq_queue_stopped(struct request_queue *q); +void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); +void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); +void blk_mq_stop_hw_queues(struct request_queue *q); +void blk_mq_start_hw_queues(struct request_queue *q); +void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); +void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); +void blk_mq_quiesce_queue(struct request_queue *q); +void blk_mq_unquiesce_queue(struct request_queue *q); +void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); +bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); +void blk_mq_run_hw_queues(struct request_queue *q, bool async); +void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, + busy_tag_iter_fn *fn, void *priv); +void blk_mq_freeze_queue(struct request_queue *q); +void blk_mq_unfreeze_queue(struct request_queue *q); +void blk_freeze_queue_start(struct request_queue *q); +void blk_mq_freeze_queue_wait(struct request_queue *q); +int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, + unsigned long timeout); + +int blk_mq_map_queues(struct blk_mq_tag_set *set); +void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); + +void blk_mq_quiesce_queue_nowait(struct request_queue *q); + +/** + * blk_mq_mark_complete() - Set request state to complete + * @rq: request to set to complete state + * + * Returns true if request state was successfully set to complete. If + * successful, the caller is responsibile for seeing this request is ended, as + * blk_mq_complete_request will not work again. + */ +static inline bool blk_mq_mark_complete(struct request *rq) +{ + return cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) == + MQ_RQ_IN_FLIGHT; +} + +/* + * Driver command data is immediately after the request. So subtract request + * size to get back to the original request, add request size to get the PDU. + */ +static inline struct request *blk_mq_rq_from_pdu(void *pdu) +{ + return pdu - sizeof(struct request); +} +static inline void *blk_mq_rq_to_pdu(struct request *rq) +{ + return rq + 1; +} + +#define queue_for_each_hw_ctx(q, hctx, i) \ + for ((i) = 0; (i) < (q)->nr_hw_queues && \ + ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) + +#define hctx_for_each_ctx(hctx, ctx, i) \ + for ((i) = 0; (i) < (hctx)->nr_ctx && \ + ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) + +static inline void blk_mq_cleanup_rq(struct request *rq) +{ + if (rq->q->mq_ops->cleanup_rq) + rq->q->mq_ops->cleanup_rq(rq); +} + +#endif diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h new file mode 100644 index 000000000..f6dfb3073 --- /dev/null +++ b/include/linux/blk_types.h @@ -0,0 +1,461 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Block data types and constants. Directly include this file only to + * break include dependency loop. + */ +#ifndef __LINUX_BLK_TYPES_H +#define __LINUX_BLK_TYPES_H + +#include +#include +#include + +struct bio_set; +struct bio; +struct bio_integrity_payload; +struct page; +struct block_device; +struct io_context; +struct cgroup_subsys_state; +typedef void (bio_end_io_t) (struct bio *); + +/* + * Block error status values. See block/blk-core:blk_errors for the details. + * Alpha cannot write a byte atomically, so we need to use 32-bit value. + */ +#if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__) +typedef u32 __bitwise blk_status_t; +#else +typedef u8 __bitwise blk_status_t; +#endif +#define BLK_STS_OK 0 +#define BLK_STS_NOTSUPP ((__force blk_status_t)1) +#define BLK_STS_TIMEOUT ((__force blk_status_t)2) +#define BLK_STS_NOSPC ((__force blk_status_t)3) +#define BLK_STS_TRANSPORT ((__force blk_status_t)4) +#define BLK_STS_TARGET ((__force blk_status_t)5) +#define BLK_STS_NEXUS ((__force blk_status_t)6) +#define BLK_STS_MEDIUM ((__force blk_status_t)7) +#define BLK_STS_PROTECTION ((__force blk_status_t)8) +#define BLK_STS_RESOURCE ((__force blk_status_t)9) +#define BLK_STS_IOERR ((__force blk_status_t)10) + +/* hack for device mapper, don't use elsewhere: */ +#define BLK_STS_DM_REQUEUE ((__force blk_status_t)11) + +#define BLK_STS_AGAIN ((__force blk_status_t)12) + +/* + * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if + * device related resources are unavailable, but the driver can guarantee + * that the queue will be rerun in the future once resources become + * available again. This is typically the case for device specific + * resources that are consumed for IO. If the driver fails allocating these + * resources, we know that inflight (or pending) IO will free these + * resource upon completion. + * + * This is different from BLK_STS_RESOURCE in that it explicitly references + * a device specific resource. For resources of wider scope, allocation + * failure can happen without having pending IO. This means that we can't + * rely on request completions freeing these resources, as IO may not be in + * flight. Examples of that are kernel memory allocations, DMA mappings, or + * any other system wide resources. + */ +#define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13) + +/** + * blk_path_error - returns true if error may be path related + * @error: status the request was completed with + * + * Description: + * This classifies block error status into non-retryable errors and ones + * that may be successful if retried on a failover path. + * + * Return: + * %false - retrying failover path will not help + * %true - may succeed if retried + */ +static inline bool blk_path_error(blk_status_t error) +{ + switch (error) { + case BLK_STS_NOTSUPP: + case BLK_STS_NOSPC: + case BLK_STS_TARGET: + case BLK_STS_NEXUS: + case BLK_STS_MEDIUM: + case BLK_STS_PROTECTION: + return false; + } + + /* Anything else could be a path failure, so should be retried */ + return true; +} + +/* + * From most significant bit: + * 1 bit: reserved for other usage, see below + * 12 bits: original size of bio + * 51 bits: issue time of bio + */ +#define BIO_ISSUE_RES_BITS 1 +#define BIO_ISSUE_SIZE_BITS 12 +#define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS) +#define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS) +#define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1) +#define BIO_ISSUE_SIZE_MASK \ + (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT) +#define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1)) + +/* Reserved bit for blk-throtl */ +#define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63) + +struct bio_issue { + u64 value; +}; + +static inline u64 __bio_issue_time(u64 time) +{ + return time & BIO_ISSUE_TIME_MASK; +} + +static inline u64 bio_issue_time(struct bio_issue *issue) +{ + return __bio_issue_time(issue->value); +} + +static inline sector_t bio_issue_size(struct bio_issue *issue) +{ + return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT); +} + +static inline void bio_issue_init(struct bio_issue *issue, + sector_t size) +{ + size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1; + issue->value = ((issue->value & BIO_ISSUE_RES_MASK) | + (ktime_get_ns() & BIO_ISSUE_TIME_MASK) | + ((u64)size << BIO_ISSUE_SIZE_SHIFT)); +} + +/* + * main unit of I/O for the block layer and lower layers (ie drivers and + * stacking drivers) + */ +struct bio { + struct bio *bi_next; /* request queue link */ + struct gendisk *bi_disk; + unsigned int bi_opf; /* bottom bits req flags, + * top bits REQ_OP. Use + * accessors. + */ + unsigned short bi_flags; /* status, etc and bvec pool number */ + unsigned short bi_ioprio; + unsigned short bi_write_hint; + blk_status_t bi_status; + u8 bi_partno; + + /* Number of segments in this BIO after + * physical address coalescing is performed. + */ + unsigned int bi_phys_segments; + + /* + * To keep track of the max segment size, we account for the + * sizes of the first and last mergeable segments in this bio. + */ + unsigned int bi_seg_front_size; + unsigned int bi_seg_back_size; + + struct bvec_iter bi_iter; + + atomic_t __bi_remaining; + bio_end_io_t *bi_end_io; + + void *bi_private; +#ifdef CONFIG_BLK_CGROUP + /* + * Optional ioc and css associated with this bio. Put on bio + * release. Read comment on top of bio_associate_current(). + */ + struct io_context *bi_ioc; + struct cgroup_subsys_state *bi_css; + struct blkcg_gq *bi_blkg; + struct bio_issue bi_issue; +#endif + union { +#if defined(CONFIG_BLK_DEV_INTEGRITY) + struct bio_integrity_payload *bi_integrity; /* data integrity */ +#endif + }; + + unsigned short bi_vcnt; /* how many bio_vec's */ + + /* + * Everything starting with bi_max_vecs will be preserved by bio_reset() + */ + + unsigned short bi_max_vecs; /* max bvl_vecs we can hold */ + + atomic_t __bi_cnt; /* pin count */ + + struct bio_vec *bi_io_vec; /* the actual vec list */ + + struct bio_set *bi_pool; + + /* + * We can inline a number of vecs at the end of the bio, to avoid + * double allocations for a small number of bio_vecs. This member + * MUST obviously be kept at the very end of the bio. + */ + struct bio_vec bi_inline_vecs[0]; +}; + +#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) + +/* + * bio flags + */ +#define BIO_SEG_VALID 1 /* bi_phys_segments valid */ +#define BIO_CLONED 2 /* doesn't own data */ +#define BIO_BOUNCED 3 /* bio is a bounce bio */ +#define BIO_USER_MAPPED 4 /* contains user pages */ +#define BIO_NULL_MAPPED 5 /* contains invalid user pages */ +#define BIO_QUIET 6 /* Make BIO Quiet */ +#define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */ +#define BIO_REFFED 8 /* bio has elevated ->bi_cnt */ +#define BIO_THROTTLED 9 /* This bio has already been subjected to + * throttling rules. Don't do it again. */ +#define BIO_TRACE_COMPLETION 10 /* bio_endio() should trace the final completion + * of this bio. */ +#define BIO_QUEUE_ENTERED 11 /* can use blk_queue_enter_live() */ + +/* See BVEC_POOL_OFFSET below before adding new flags */ + +/* + * We support 6 different bvec pools, the last one is magic in that it + * is backed by a mempool. + */ +#define BVEC_POOL_NR 6 +#define BVEC_POOL_MAX (BVEC_POOL_NR - 1) + +/* + * Top 3 bits of bio flags indicate the pool the bvecs came from. We add + * 1 to the actual index so that 0 indicates that there are no bvecs to be + * freed. + */ +#define BVEC_POOL_BITS (3) +#define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS) +#define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET) +#if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1) +# error "BVEC_POOL_BITS is too small" +#endif + +/* + * Flags starting here get preserved by bio_reset() - this includes + * only BVEC_POOL_IDX() + */ +#define BIO_RESET_BITS BVEC_POOL_OFFSET + +typedef __u32 __bitwise blk_mq_req_flags_t; + +/* + * Operations and flags common to the bio and request structures. + * We use 8 bits for encoding the operation, and the remaining 24 for flags. + * + * The least significant bit of the operation number indicates the data + * transfer direction: + * + * - if the least significant bit is set transfers are TO the device + * - if the least significant bit is not set transfers are FROM the device + * + * If a operation does not transfer data the least significant bit has no + * meaning. + */ +#define REQ_OP_BITS 8 +#define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1) +#define REQ_FLAG_BITS 24 + +enum req_opf { + /* read sectors from the device */ + REQ_OP_READ = 0, + /* write sectors to the device */ + REQ_OP_WRITE = 1, + /* flush the volatile write cache */ + REQ_OP_FLUSH = 2, + /* discard sectors */ + REQ_OP_DISCARD = 3, + /* get zone information */ + REQ_OP_ZONE_REPORT = 4, + /* securely erase sectors */ + REQ_OP_SECURE_ERASE = 5, + /* seset a zone write pointer */ + REQ_OP_ZONE_RESET = 6, + /* write the same sector many times */ + REQ_OP_WRITE_SAME = 7, + /* write the zero filled sector many times */ + REQ_OP_WRITE_ZEROES = 9, + + /* SCSI passthrough using struct scsi_request */ + REQ_OP_SCSI_IN = 32, + REQ_OP_SCSI_OUT = 33, + /* Driver private requests */ + REQ_OP_DRV_IN = 34, + REQ_OP_DRV_OUT = 35, + + REQ_OP_LAST, +}; + +enum req_flag_bits { + __REQ_FAILFAST_DEV = /* no driver retries of device errors */ + REQ_OP_BITS, + __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ + __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ + __REQ_SYNC, /* request is sync (sync write or read) */ + __REQ_META, /* metadata io request */ + __REQ_PRIO, /* boost priority in cfq */ + __REQ_NOMERGE, /* don't touch this for merging */ + __REQ_IDLE, /* anticipate more IO after this one */ + __REQ_INTEGRITY, /* I/O includes block integrity payload */ + __REQ_FUA, /* forced unit access */ + __REQ_PREFLUSH, /* request for cache flush */ + __REQ_RAHEAD, /* read ahead, can fail anytime */ + __REQ_BACKGROUND, /* background IO */ + __REQ_NOWAIT, /* Don't wait if request will block */ + + /* command specific flags for REQ_OP_WRITE_ZEROES: */ + __REQ_NOUNMAP, /* do not free blocks when zeroing */ + + /* for driver use */ + __REQ_DRV, + __REQ_SWAP, /* swapping request. */ + __REQ_NR_BITS, /* stops here */ +}; + +#define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV) +#define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT) +#define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER) +#define REQ_SYNC (1ULL << __REQ_SYNC) +#define REQ_META (1ULL << __REQ_META) +#define REQ_PRIO (1ULL << __REQ_PRIO) +#define REQ_NOMERGE (1ULL << __REQ_NOMERGE) +#define REQ_IDLE (1ULL << __REQ_IDLE) +#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) +#define REQ_FUA (1ULL << __REQ_FUA) +#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) +#define REQ_RAHEAD (1ULL << __REQ_RAHEAD) +#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) +#define REQ_NOWAIT (1ULL << __REQ_NOWAIT) + +#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) + +#define REQ_DRV (1ULL << __REQ_DRV) +#define REQ_SWAP (1ULL << __REQ_SWAP) + +#define REQ_FAILFAST_MASK \ + (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) + +#define REQ_NOMERGE_FLAGS \ + (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA) + +enum stat_group { + STAT_READ, + STAT_WRITE, + STAT_DISCARD, + + NR_STAT_GROUPS +}; + +#define bio_op(bio) \ + ((bio)->bi_opf & REQ_OP_MASK) +#define req_op(req) \ + ((req)->cmd_flags & REQ_OP_MASK) + +/* obsolete, don't use in new code */ +static inline void bio_set_op_attrs(struct bio *bio, unsigned op, + unsigned op_flags) +{ + bio->bi_opf = op | op_flags; +} + +static inline bool op_is_write(unsigned int op) +{ + return (op & 1); +} + +/* + * Check if the bio or request is one that needs special treatment in the + * flush state machine. + */ +static inline bool op_is_flush(unsigned int op) +{ + return op & (REQ_FUA | REQ_PREFLUSH); +} + +/* + * Reads are always treated as synchronous, as are requests with the FUA or + * PREFLUSH flag. Other operations may be marked as synchronous using the + * REQ_SYNC flag. + */ +static inline bool op_is_sync(unsigned int op) +{ + return (op & REQ_OP_MASK) == REQ_OP_READ || + (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH)); +} + +static inline bool op_is_discard(unsigned int op) +{ + return (op & REQ_OP_MASK) == REQ_OP_DISCARD; +} + +static inline int op_stat_group(unsigned int op) +{ + if (op_is_discard(op)) + return STAT_DISCARD; + return op_is_write(op); +} + +typedef unsigned int blk_qc_t; +#define BLK_QC_T_NONE -1U +#define BLK_QC_T_SHIFT 16 +#define BLK_QC_T_INTERNAL (1U << 31) + +static inline bool blk_qc_t_valid(blk_qc_t cookie) +{ + return cookie != BLK_QC_T_NONE; +} + +static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num, + bool internal) +{ + blk_qc_t ret = tag | (queue_num << BLK_QC_T_SHIFT); + + if (internal) + ret |= BLK_QC_T_INTERNAL; + + return ret; +} + +static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) +{ + return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT; +} + +static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie) +{ + return cookie & ((1u << BLK_QC_T_SHIFT) - 1); +} + +static inline bool blk_qc_t_is_internal(blk_qc_t cookie) +{ + return (cookie & BLK_QC_T_INTERNAL) != 0; +} + +struct blk_rq_stat { + u64 mean; + u64 min; + u64 max; + u32 nr_samples; + u64 batch; +}; + +#endif /* __LINUX_BLK_TYPES_H */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h new file mode 100644 index 000000000..56fe682d9 --- /dev/null +++ b/include/linux/blkdev.h @@ -0,0 +1,2114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BLKDEV_H +#define _LINUX_BLKDEV_H + +#include +#include + +#ifdef CONFIG_BLOCK + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct module; +struct scsi_ioctl_command; + +struct request_queue; +struct elevator_queue; +struct blk_trace; +struct request; +struct sg_io_hdr; +struct bsg_job; +struct blkcg_gq; +struct blk_flush_queue; +struct pr_ops; +struct rq_qos; +struct blk_queue_stats; +struct blk_stat_callback; + +#define BLKDEV_MIN_RQ 4 +#define BLKDEV_MAX_RQ 128 /* Default maximum */ + +/* Must be consistent with blk_mq_poll_stats_bkt() */ +#define BLK_MQ_POLL_STATS_BKTS 16 + +/* + * Maximum number of blkcg policies allowed to be registered concurrently. + * Defined here to simplify include dependency. + */ +#define BLKCG_MAX_POLS 5 + +static inline int blk_validate_block_size(unsigned int bsize) +{ + if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize)) + return -EINVAL; + + return 0; +} + +typedef void (rq_end_io_fn)(struct request *, blk_status_t); + +#define BLK_RL_SYNCFULL (1U << 0) +#define BLK_RL_ASYNCFULL (1U << 1) + +struct request_list { + struct request_queue *q; /* the queue this rl belongs to */ +#ifdef CONFIG_BLK_CGROUP + struct blkcg_gq *blkg; /* blkg this request pool belongs to */ +#endif + /* + * count[], starved[], and wait[] are indexed by + * BLK_RW_SYNC/BLK_RW_ASYNC + */ + int count[2]; + int starved[2]; + mempool_t *rq_pool; + wait_queue_head_t wait[2]; + unsigned int flags; +}; + +/* + * request flags */ +typedef __u32 __bitwise req_flags_t; + +/* elevator knows about this request */ +#define RQF_SORTED ((__force req_flags_t)(1 << 0)) +/* drive already may have started this one */ +#define RQF_STARTED ((__force req_flags_t)(1 << 1)) +/* uses tagged queueing */ +#define RQF_QUEUED ((__force req_flags_t)(1 << 2)) +/* may not be passed by ioscheduler */ +#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3)) +/* request for flush sequence */ +#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) +/* merge of different types, fail separately */ +#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) +/* track inflight for MQ */ +#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) +/* don't call prep for this one */ +#define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) +/* set for "ide_preempt" requests and also for requests for which the SCSI + "quiesce" state must be ignored. */ +#define RQF_PREEMPT ((__force req_flags_t)(1 << 8)) +/* contains copies of user pages */ +#define RQF_COPY_USER ((__force req_flags_t)(1 << 9)) +/* vaguely specified driver internal error. Ignored by the block layer */ +#define RQF_FAILED ((__force req_flags_t)(1 << 10)) +/* don't warn about errors */ +#define RQF_QUIET ((__force req_flags_t)(1 << 11)) +/* elevator private data attached */ +#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) +/* account I/O stat */ +#define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) +/* request came from our alloc pool */ +#define RQF_ALLOCED ((__force req_flags_t)(1 << 14)) +/* runtime pm request */ +#define RQF_PM ((__force req_flags_t)(1 << 15)) +/* on IO scheduler merge hash */ +#define RQF_HASHED ((__force req_flags_t)(1 << 16)) +/* IO stats tracking on */ +#define RQF_STATS ((__force req_flags_t)(1 << 17)) +/* Look at ->special_vec for the actual data payload instead of the + bio chain. */ +#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) +/* The per-zone write lock is held for this request */ +#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19)) +/* already slept for hybrid poll */ +#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20)) +/* ->timeout has been called, don't expire again */ +#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21)) + +/* flags that prevent us from merging requests: */ +#define RQF_NOMERGE_FLAGS \ + (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD) + +/* + * Request state for blk-mq. + */ +enum mq_rq_state { + MQ_RQ_IDLE = 0, + MQ_RQ_IN_FLIGHT = 1, + MQ_RQ_COMPLETE = 2, +}; + +/* + * Try to put the fields that are referenced together in the same cacheline. + * + * If you modify this structure, make sure to update blk_rq_init() and + * especially blk_mq_rq_ctx_init() to take care of the added fields. + */ +struct request { + struct request_queue *q; + struct blk_mq_ctx *mq_ctx; + + int cpu; + unsigned int cmd_flags; /* op and common flags */ + req_flags_t rq_flags; + + int internal_tag; + + /* the following two fields are internal, NEVER access directly */ + unsigned int __data_len; /* total data len */ + int tag; + sector_t __sector; /* sector cursor */ + + struct bio *bio; + struct bio *biotail; + + struct list_head queuelist; + + /* + * The hash is used inside the scheduler, and killed once the + * request reaches the dispatch list. The ipi_list is only used + * to queue the request for softirq completion, which is long + * after the request has been unhashed (and even removed from + * the dispatch list). + */ + union { + struct hlist_node hash; /* merge hash */ + struct list_head ipi_list; + }; + + /* + * The rb_node is only used inside the io scheduler, requests + * are pruned when moved to the dispatch queue. So let the + * completion_data share space with the rb_node. + */ + union { + struct rb_node rb_node; /* sort/lookup */ + struct bio_vec special_vec; + void *completion_data; + int error_count; /* for legacy drivers, don't use */ + }; + + /* + * Three pointers are available for the IO schedulers, if they need + * more they have to dynamically allocate it. Flush requests are + * never put on the IO scheduler. So let the flush fields share + * space with the elevator data. + */ + union { + struct { + struct io_cq *icq; + void *priv[2]; + } elv; + + struct { + unsigned int seq; + struct list_head list; + rq_end_io_fn *saved_end_io; + } flush; + }; + + struct gendisk *rq_disk; + struct hd_struct *part; + /* Time that I/O was submitted to the kernel. */ + u64 start_time_ns; + /* Time that I/O was submitted to the device. */ + u64 io_start_time_ns; + +#ifdef CONFIG_BLK_WBT + unsigned short wbt_flags; +#endif +#ifdef CONFIG_BLK_DEV_THROTTLING_LOW + unsigned short throtl_size; +#endif + + /* + * Number of scatter-gather DMA addr+len pairs after + * physical address coalescing is performed. + */ + unsigned short nr_phys_segments; + +#if defined(CONFIG_BLK_DEV_INTEGRITY) + unsigned short nr_integrity_segments; +#endif + + unsigned short write_hint; + unsigned short ioprio; + + void *special; /* opaque pointer available for LLD use */ + + unsigned int extra_len; /* length of alignment and padding */ + + enum mq_rq_state state; + refcount_t ref; + + unsigned int timeout; + + /* access through blk_rq_set_deadline, blk_rq_deadline */ + unsigned long __deadline; + + struct list_head timeout_list; + + union { + struct __call_single_data csd; + u64 fifo_time; + }; + + /* + * completion callback. + */ + rq_end_io_fn *end_io; + void *end_io_data; + + /* for bidi */ + struct request *next_rq; + +#ifdef CONFIG_BLK_CGROUP + struct request_list *rl; /* rl this rq is alloced from */ +#endif +}; + +static inline bool blk_op_is_scsi(unsigned int op) +{ + return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT; +} + +static inline bool blk_op_is_private(unsigned int op) +{ + return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; +} + +static inline bool blk_rq_is_scsi(struct request *rq) +{ + return blk_op_is_scsi(req_op(rq)); +} + +static inline bool blk_rq_is_private(struct request *rq) +{ + return blk_op_is_private(req_op(rq)); +} + +static inline bool blk_rq_is_passthrough(struct request *rq) +{ + return blk_rq_is_scsi(rq) || blk_rq_is_private(rq); +} + +static inline bool bio_is_passthrough(struct bio *bio) +{ + unsigned op = bio_op(bio); + + return blk_op_is_scsi(op) || blk_op_is_private(op); +} + +static inline unsigned short req_get_ioprio(struct request *req) +{ + return req->ioprio; +} + +#include + +struct blk_queue_ctx; + +typedef void (request_fn_proc) (struct request_queue *q); +typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio); +typedef bool (poll_q_fn) (struct request_queue *q, blk_qc_t); +typedef int (prep_rq_fn) (struct request_queue *, struct request *); +typedef void (unprep_rq_fn) (struct request_queue *, struct request *); + +struct bio_vec; +typedef void (softirq_done_fn)(struct request *); +typedef int (dma_drain_needed_fn)(struct request *); +typedef int (lld_busy_fn) (struct request_queue *q); +typedef int (bsg_job_fn) (struct bsg_job *); +typedef int (init_rq_fn)(struct request_queue *, struct request *, gfp_t); +typedef void (exit_rq_fn)(struct request_queue *, struct request *); + +enum blk_eh_timer_return { + BLK_EH_DONE, /* drivers has completed the command */ + BLK_EH_RESET_TIMER, /* reset timer and try again */ +}; + +typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); + +enum blk_queue_state { + Queue_down, + Queue_up, +}; + +struct blk_queue_tag { + struct request **tag_index; /* map of busy tags */ + unsigned long *tag_map; /* bit map of free/busy tags */ + int max_depth; /* what we will send to device */ + int real_max_depth; /* what the array can hold */ + atomic_t refcnt; /* map can be shared */ + int alloc_policy; /* tag allocation policy */ + int next_tag; /* next tag */ +}; +#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ +#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ + +#define BLK_SCSI_MAX_CMDS (256) +#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) + +/* + * Zoned block device models (zoned limit). + */ +enum blk_zoned_model { + BLK_ZONED_NONE, /* Regular block device */ + BLK_ZONED_HA, /* Host-aware zoned block device */ + BLK_ZONED_HM, /* Host-managed zoned block device */ +}; + +struct queue_limits { + unsigned long bounce_pfn; + unsigned long seg_boundary_mask; + unsigned long virt_boundary_mask; + + unsigned int max_hw_sectors; + unsigned int max_dev_sectors; + unsigned int chunk_sectors; + unsigned int max_sectors; + unsigned int max_segment_size; + unsigned int physical_block_size; + unsigned int logical_block_size; + unsigned int alignment_offset; + unsigned int io_min; + unsigned int io_opt; + unsigned int max_discard_sectors; + unsigned int max_hw_discard_sectors; + unsigned int max_write_same_sectors; + unsigned int max_write_zeroes_sectors; + unsigned int discard_granularity; + unsigned int discard_alignment; + + unsigned short max_segments; + unsigned short max_integrity_segments; + unsigned short max_discard_segments; + + unsigned char misaligned; + unsigned char discard_misaligned; + unsigned char cluster; + unsigned char raid_partial_stripes_expensive; + enum blk_zoned_model zoned; +}; + +#ifdef CONFIG_BLK_DEV_ZONED + +struct blk_zone_report_hdr { + unsigned int nr_zones; + u8 padding[60]; +}; + +extern int blkdev_report_zones(struct block_device *bdev, + sector_t sector, struct blk_zone *zones, + unsigned int *nr_zones, gfp_t gfp_mask); +extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors, + sector_t nr_sectors, gfp_t gfp_mask); + +extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg); +extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg); + +#else /* CONFIG_BLK_DEV_ZONED */ + +static inline int blkdev_report_zones_ioctl(struct block_device *bdev, + fmode_t mode, unsigned int cmd, + unsigned long arg) +{ + return -ENOTTY; +} + +static inline int blkdev_reset_zones_ioctl(struct block_device *bdev, + fmode_t mode, unsigned int cmd, + unsigned long arg) +{ + return -ENOTTY; +} + +#endif /* CONFIG_BLK_DEV_ZONED */ + +struct request_queue { + /* + * Together with queue_head for cacheline sharing + */ + struct list_head queue_head; + struct request *last_merge; + struct elevator_queue *elevator; + int nr_rqs[2]; /* # allocated [a]sync rqs */ + int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ + + struct blk_queue_stats *stats; + struct rq_qos *rq_qos; + + /* + * If blkcg is not used, @q->root_rl serves all requests. If blkcg + * is used, root blkg allocates from @q->root_rl and all other + * blkgs from their own blkg->rl. Which one to use should be + * determined using bio_request_list(). + */ + struct request_list root_rl; + + request_fn_proc *request_fn; + make_request_fn *make_request_fn; + poll_q_fn *poll_fn; + prep_rq_fn *prep_rq_fn; + unprep_rq_fn *unprep_rq_fn; + softirq_done_fn *softirq_done_fn; + rq_timed_out_fn *rq_timed_out_fn; + dma_drain_needed_fn *dma_drain_needed; + lld_busy_fn *lld_busy_fn; + /* Called just after a request is allocated */ + init_rq_fn *init_rq_fn; + /* Called just before a request is freed */ + exit_rq_fn *exit_rq_fn; + /* Called from inside blk_get_request() */ + void (*initialize_rq_fn)(struct request *rq); + + const struct blk_mq_ops *mq_ops; + + unsigned int *mq_map; + + /* sw queues */ + struct blk_mq_ctx __percpu *queue_ctx; + unsigned int nr_queues; + + unsigned int queue_depth; + + /* hw dispatch queues */ + struct blk_mq_hw_ctx **queue_hw_ctx; + unsigned int nr_hw_queues; + + /* + * Dispatch queue sorting + */ + sector_t end_sector; + struct request *boundary_rq; + + /* + * Delayed queue handling + */ + struct delayed_work delay_work; + + struct backing_dev_info *backing_dev_info; + + /* + * The queue owner gets to use this for whatever they like. + * ll_rw_blk doesn't touch it. + */ + void *queuedata; + + /* + * various queue flags, see QUEUE_* below + */ + unsigned long queue_flags; + /* + * Number of contexts that have called blk_set_pm_only(). If this + * counter is above zero then only RQF_PM and RQF_PREEMPT requests are + * processed. + */ + atomic_t pm_only; + + /* + * ida allocated id for this queue. Used to index queues from + * ioctx. + */ + int id; + + /* + * queue needs bounce pages for pages above this limit + */ + gfp_t bounce_gfp; + + /* + * protects queue structures from reentrancy. ->__queue_lock should + * _never_ be used directly, it is queue private. always use + * ->queue_lock. + */ + spinlock_t __queue_lock; + spinlock_t *queue_lock; + + /* + * queue kobject + */ + struct kobject kobj; + + /* + * mq queue kobject + */ + struct kobject mq_kobj; + +#ifdef CONFIG_BLK_DEV_INTEGRITY + struct blk_integrity integrity; +#endif /* CONFIG_BLK_DEV_INTEGRITY */ + +#ifdef CONFIG_PM + struct device *dev; + int rpm_status; + unsigned int nr_pending; +#endif + + /* + * queue settings + */ + unsigned long nr_requests; /* Max # of requests */ + unsigned int nr_congestion_on; + unsigned int nr_congestion_off; + unsigned int nr_batching; + + unsigned int dma_drain_size; + void *dma_drain_buffer; + unsigned int dma_pad_mask; + unsigned int dma_alignment; + + struct blk_queue_tag *queue_tags; + + unsigned int nr_sorted; + unsigned int in_flight[2]; + + /* + * Number of active block driver functions for which blk_drain_queue() + * must wait. Must be incremented around functions that unlock the + * queue_lock internally, e.g. scsi_request_fn(). + */ + unsigned int request_fn_active; + + unsigned int rq_timeout; + int poll_nsec; + + struct blk_stat_callback *poll_cb; + struct blk_rq_stat poll_stat[BLK_MQ_POLL_STATS_BKTS]; + + struct timer_list timeout; + struct work_struct timeout_work; + struct list_head timeout_list; + + struct list_head icq_list; +#ifdef CONFIG_BLK_CGROUP + DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); + struct blkcg_gq *root_blkg; + struct list_head blkg_list; +#endif + + struct queue_limits limits; + +#ifdef CONFIG_BLK_DEV_ZONED + /* + * Zoned block device information for request dispatch control. + * nr_zones is the total number of zones of the device. This is always + * 0 for regular block devices. seq_zones_bitmap is a bitmap of nr_zones + * bits which indicates if a zone is conventional (bit clear) or + * sequential (bit set). seq_zones_wlock is a bitmap of nr_zones + * bits which indicates if a zone is write locked, that is, if a write + * request targeting the zone was dispatched. All three fields are + * initialized by the low level device driver (e.g. scsi/sd.c). + * Stacking drivers (device mappers) may or may not initialize + * these fields. + * + * Reads of this information must be protected with blk_queue_enter() / + * blk_queue_exit(). Modifying this information is only allowed while + * no requests are being processed. See also blk_mq_freeze_queue() and + * blk_mq_unfreeze_queue(). + */ + unsigned int nr_zones; + unsigned long *seq_zones_bitmap; + unsigned long *seq_zones_wlock; +#endif /* CONFIG_BLK_DEV_ZONED */ + + /* + * sg stuff + */ + unsigned int sg_timeout; + unsigned int sg_reserved_size; + int node; +#ifdef CONFIG_BLK_DEV_IO_TRACE + struct blk_trace __rcu *blk_trace; + struct mutex blk_trace_mutex; +#endif + /* + * for flush operations + */ + struct blk_flush_queue *fq; + + struct list_head requeue_list; + spinlock_t requeue_lock; + struct delayed_work requeue_work; + + struct mutex sysfs_lock; + struct mutex sysfs_dir_lock; + + int bypass_depth; + atomic_t mq_freeze_depth; + +#if defined(CONFIG_BLK_DEV_BSG) + bsg_job_fn *bsg_job_fn; + struct bsg_class_device bsg_dev; +#endif + +#ifdef CONFIG_BLK_DEV_THROTTLING + /* Throttle data */ + struct throtl_data *td; +#endif + struct rcu_head rcu_head; + wait_queue_head_t mq_freeze_wq; + struct percpu_ref q_usage_counter; + struct list_head all_q_node; + + struct blk_mq_tag_set *tag_set; + struct list_head tag_set_list; + struct bio_set bio_split; + +#ifdef CONFIG_BLK_DEBUG_FS + struct dentry *debugfs_dir; + struct dentry *sched_debugfs_dir; +#endif + + bool mq_sysfs_init_done; + + size_t cmd_size; + void *rq_alloc_data; + + struct work_struct release_work; + +#define BLK_MAX_WRITE_HINTS 5 + u64 write_hints[BLK_MAX_WRITE_HINTS]; +}; + +#define QUEUE_FLAG_QUEUED 0 /* uses generic tag queueing */ +#define QUEUE_FLAG_STOPPED 1 /* queue is stopped */ +#define QUEUE_FLAG_DYING 2 /* queue being torn down */ +#define QUEUE_FLAG_BYPASS 3 /* act as dumb FIFO queue */ +#define QUEUE_FLAG_BIDI 4 /* queue supports bidi requests */ +#define QUEUE_FLAG_NOMERGES 5 /* disable merge attempts */ +#define QUEUE_FLAG_SAME_COMP 6 /* complete on same CPU-group */ +#define QUEUE_FLAG_FAIL_IO 7 /* fake timeout */ +#define QUEUE_FLAG_NONROT 9 /* non-rotational device (SSD) */ +#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ +#define QUEUE_FLAG_IO_STAT 10 /* do IO stats */ +#define QUEUE_FLAG_DISCARD 11 /* supports DISCARD */ +#define QUEUE_FLAG_NOXMERGES 12 /* No extended merges */ +#define QUEUE_FLAG_ADD_RANDOM 13 /* Contributes to random pool */ +#define QUEUE_FLAG_SECERASE 14 /* supports secure erase */ +#define QUEUE_FLAG_SAME_FORCE 15 /* force complete on same CPU */ +#define QUEUE_FLAG_DEAD 16 /* queue tear-down finished */ +#define QUEUE_FLAG_INIT_DONE 17 /* queue is initialized */ +#define QUEUE_FLAG_NO_SG_MERGE 18 /* don't attempt to merge SG segments*/ +#define QUEUE_FLAG_POLL 19 /* IO polling enabled if set */ +#define QUEUE_FLAG_WC 20 /* Write back caching */ +#define QUEUE_FLAG_FUA 21 /* device supports FUA writes */ +#define QUEUE_FLAG_FLUSH_NQ 22 /* flush not queueuable */ +#define QUEUE_FLAG_DAX 23 /* device supports DAX */ +#define QUEUE_FLAG_STATS 24 /* track rq completion times */ +#define QUEUE_FLAG_POLL_STATS 25 /* collecting stats for hybrid polling */ +#define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */ +#define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */ +#define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */ + +#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ + (1 << QUEUE_FLAG_SAME_COMP) | \ + (1 << QUEUE_FLAG_ADD_RANDOM)) + +#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ + (1 << QUEUE_FLAG_SAME_COMP) | \ + (1 << QUEUE_FLAG_POLL)) + +void blk_queue_flag_set(unsigned int flag, struct request_queue *q); +void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); +bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); +bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q); + +#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) +#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) +#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) +#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) +#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) +#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) +#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) +#define blk_queue_noxmerges(q) \ + test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) +#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) +#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) +#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) +#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) +#define blk_queue_secure_erase(q) \ + (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags)) +#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) +#define blk_queue_scsi_passthrough(q) \ + test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags) + +#define blk_noretry_request(rq) \ + ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ + REQ_FAILFAST_DRIVER)) +#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) +#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) +#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags) +#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) + +extern void blk_set_pm_only(struct request_queue *q); +extern void blk_clear_pm_only(struct request_queue *q); + +static inline int queue_in_flight(struct request_queue *q) +{ + return q->in_flight[0] + q->in_flight[1]; +} + +static inline bool blk_account_rq(struct request *rq) +{ + return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq); +} + +#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) +#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) +/* rq->queuelist of dequeued request must be list_empty() */ +#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) + +#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) + +#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) + +/* + * Driver can handle struct request, if it either has an old style + * request_fn defined, or is blk-mq based. + */ +static inline bool queue_is_rq_based(struct request_queue *q) +{ + return q->request_fn || q->mq_ops; +} + +static inline unsigned int blk_queue_cluster(struct request_queue *q) +{ + return q->limits.cluster; +} + +static inline enum blk_zoned_model +blk_queue_zoned_model(struct request_queue *q) +{ + return q->limits.zoned; +} + +static inline bool blk_queue_is_zoned(struct request_queue *q) +{ + switch (blk_queue_zoned_model(q)) { + case BLK_ZONED_HA: + case BLK_ZONED_HM: + return true; + default: + return false; + } +} + +static inline unsigned int blk_queue_zone_sectors(struct request_queue *q) +{ + return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; +} + +#ifdef CONFIG_BLK_DEV_ZONED +static inline unsigned int blk_queue_zone_no(struct request_queue *q, + sector_t sector) +{ + if (!blk_queue_is_zoned(q)) + return 0; + return sector >> ilog2(q->limits.chunk_sectors); +} + +static inline bool blk_queue_zone_is_seq(struct request_queue *q, + sector_t sector) +{ + if (!blk_queue_is_zoned(q) || !q->seq_zones_bitmap) + return false; + return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap); +} +#endif /* CONFIG_BLK_DEV_ZONED */ + +static inline bool rq_is_sync(struct request *rq) +{ + return op_is_sync(rq->cmd_flags); +} + +static inline bool blk_rl_full(struct request_list *rl, bool sync) +{ + unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; + + return rl->flags & flag; +} + +static inline void blk_set_rl_full(struct request_list *rl, bool sync) +{ + unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; + + rl->flags |= flag; +} + +static inline void blk_clear_rl_full(struct request_list *rl, bool sync) +{ + unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; + + rl->flags &= ~flag; +} + +static inline bool rq_mergeable(struct request *rq) +{ + if (blk_rq_is_passthrough(rq)) + return false; + + if (req_op(rq) == REQ_OP_FLUSH) + return false; + + if (req_op(rq) == REQ_OP_WRITE_ZEROES) + return false; + + if (rq->cmd_flags & REQ_NOMERGE_FLAGS) + return false; + if (rq->rq_flags & RQF_NOMERGE_FLAGS) + return false; + + return true; +} + +static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) +{ + if (bio_page(a) == bio_page(b) && + bio_offset(a) == bio_offset(b)) + return true; + + return false; +} + +static inline unsigned int blk_queue_depth(struct request_queue *q) +{ + if (q->queue_depth) + return q->queue_depth; + + return q->nr_requests; +} + +/* + * q->prep_rq_fn return values + */ +enum { + BLKPREP_OK, /* serve it */ + BLKPREP_KILL, /* fatal error, kill, return -EIO */ + BLKPREP_DEFER, /* leave on queue */ + BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */ +}; + +extern unsigned long blk_max_low_pfn, blk_max_pfn; + +/* + * standard bounce addresses: + * + * BLK_BOUNCE_HIGH : bounce all highmem pages + * BLK_BOUNCE_ANY : don't bounce anything + * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary + */ + +#if BITS_PER_LONG == 32 +#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) +#else +#define BLK_BOUNCE_HIGH -1ULL +#endif +#define BLK_BOUNCE_ANY (-1ULL) +#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) + +/* + * default timeout for SG_IO if none specified + */ +#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) +#define BLK_MIN_SG_TIMEOUT (7 * HZ) + +struct rq_map_data { + struct page **pages; + int page_order; + int nr_entries; + unsigned long offset; + int null_mapped; + int from_user; +}; + +struct req_iterator { + struct bvec_iter iter; + struct bio *bio; +}; + +/* This should not be used directly - use rq_for_each_segment */ +#define for_each_bio(_bio) \ + for (; _bio; _bio = _bio->bi_next) +#define __rq_for_each_bio(_bio, rq) \ + if ((rq->bio)) \ + for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) + +#define rq_for_each_segment(bvl, _rq, _iter) \ + __rq_for_each_bio(_iter.bio, _rq) \ + bio_for_each_segment(bvl, _iter.bio, _iter.iter) + +#define rq_iter_last(bvec, _iter) \ + (_iter.bio->bi_next == NULL && \ + bio_iter_last(bvec, _iter.iter)) + +#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" +#endif +#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +extern void rq_flush_dcache_pages(struct request *rq); +#else +static inline void rq_flush_dcache_pages(struct request *rq) +{ +} +#endif + +extern int blk_register_queue(struct gendisk *disk); +extern void blk_unregister_queue(struct gendisk *disk); +extern blk_qc_t generic_make_request(struct bio *bio); +extern blk_qc_t direct_make_request(struct bio *bio); +extern void blk_rq_init(struct request_queue *q, struct request *rq); +extern void blk_init_request_from_bio(struct request *req, struct bio *bio); +extern void blk_put_request(struct request *); +extern void __blk_put_request(struct request_queue *, struct request *); +extern struct request *blk_get_request(struct request_queue *, unsigned int op, + blk_mq_req_flags_t flags); +extern void blk_requeue_request(struct request_queue *, struct request *); +extern int blk_lld_busy(struct request_queue *q); +extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, + struct bio_set *bs, gfp_t gfp_mask, + int (*bio_ctr)(struct bio *, struct bio *, void *), + void *data); +extern void blk_rq_unprep_clone(struct request *rq); +extern blk_status_t blk_insert_cloned_request(struct request_queue *q, + struct request *rq); +extern int blk_rq_append_bio(struct request *rq, struct bio **bio); +extern void blk_delay_queue(struct request_queue *, unsigned long); +extern void blk_queue_split(struct request_queue *, struct bio **); +extern void blk_recount_segments(struct request_queue *, struct bio *); +extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); +extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, + unsigned int, void __user *); +extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, + unsigned int, void __user *); +extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, + struct scsi_ioctl_command __user *); + +extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags); +extern void blk_queue_exit(struct request_queue *q); +extern void blk_start_queue(struct request_queue *q); +extern void blk_start_queue_async(struct request_queue *q); +extern void blk_stop_queue(struct request_queue *q); +extern void blk_sync_queue(struct request_queue *q); +extern void __blk_stop_queue(struct request_queue *q); +extern void __blk_run_queue(struct request_queue *q); +extern void __blk_run_queue_uncond(struct request_queue *q); +extern void blk_run_queue(struct request_queue *); +extern void blk_run_queue_async(struct request_queue *q); +extern int blk_rq_map_user(struct request_queue *, struct request *, + struct rq_map_data *, void __user *, unsigned long, + gfp_t); +extern int blk_rq_unmap_user(struct bio *); +extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); +extern int blk_rq_map_user_iov(struct request_queue *, struct request *, + struct rq_map_data *, const struct iov_iter *, + gfp_t); +extern void blk_execute_rq(struct request_queue *, struct gendisk *, + struct request *, int); +extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, + struct request *, int, rq_end_io_fn *); + +int blk_status_to_errno(blk_status_t status); +blk_status_t errno_to_blk_status(int errno); + +bool blk_poll(struct request_queue *q, blk_qc_t cookie); + +static inline struct request_queue *bdev_get_queue(struct block_device *bdev) +{ + return bdev->bd_disk->queue; /* this is never NULL */ +} + +/* + * The basic unit of block I/O is a sector. It is used in a number of contexts + * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9 + * bytes. Variables of type sector_t represent an offset or size that is a + * multiple of 512 bytes. Hence these two constants. + */ +#ifndef SECTOR_SHIFT +#define SECTOR_SHIFT 9 +#endif +#ifndef SECTOR_SIZE +#define SECTOR_SIZE (1 << SECTOR_SHIFT) +#endif + +/* + * blk_rq_pos() : the current sector + * blk_rq_bytes() : bytes left in the entire request + * blk_rq_cur_bytes() : bytes left in the current segment + * blk_rq_err_bytes() : bytes left till the next error boundary + * blk_rq_sectors() : sectors left in the entire request + * blk_rq_cur_sectors() : sectors left in the current segment + */ +static inline sector_t blk_rq_pos(const struct request *rq) +{ + return rq->__sector; +} + +static inline unsigned int blk_rq_bytes(const struct request *rq) +{ + return rq->__data_len; +} + +static inline int blk_rq_cur_bytes(const struct request *rq) +{ + return rq->bio ? bio_cur_bytes(rq->bio) : 0; +} + +extern unsigned int blk_rq_err_bytes(const struct request *rq); + +static inline unsigned int blk_rq_sectors(const struct request *rq) +{ + return blk_rq_bytes(rq) >> SECTOR_SHIFT; +} + +static inline unsigned int blk_rq_cur_sectors(const struct request *rq) +{ + return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT; +} + +#ifdef CONFIG_BLK_DEV_ZONED +static inline unsigned int blk_rq_zone_no(struct request *rq) +{ + return blk_queue_zone_no(rq->q, blk_rq_pos(rq)); +} + +static inline unsigned int blk_rq_zone_is_seq(struct request *rq) +{ + return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq)); +} +#endif /* CONFIG_BLK_DEV_ZONED */ + +/* + * Some commands like WRITE SAME have a payload or data transfer size which + * is different from the size of the request. Any driver that supports such + * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to + * calculate the data transfer size. + */ +static inline unsigned int blk_rq_payload_bytes(struct request *rq) +{ + if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) + return rq->special_vec.bv_len; + return blk_rq_bytes(rq); +} + +static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, + int op) +{ + if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)) + return min(q->limits.max_discard_sectors, + UINT_MAX >> SECTOR_SHIFT); + + if (unlikely(op == REQ_OP_WRITE_SAME)) + return q->limits.max_write_same_sectors; + + if (unlikely(op == REQ_OP_WRITE_ZEROES)) + return q->limits.max_write_zeroes_sectors; + + return q->limits.max_sectors; +} + +/* + * Return maximum size of a request at given offset. Only valid for + * file system requests. + */ +static inline unsigned int blk_max_size_offset(struct request_queue *q, + sector_t offset) +{ + if (!q->limits.chunk_sectors) + return q->limits.max_sectors; + + return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors - + (offset & (q->limits.chunk_sectors - 1)))); +} + +static inline unsigned int blk_rq_get_max_sectors(struct request *rq, + sector_t offset) +{ + struct request_queue *q = rq->q; + + if (blk_rq_is_passthrough(rq)) + return q->limits.max_hw_sectors; + + if (!q->limits.chunk_sectors || + req_op(rq) == REQ_OP_DISCARD || + req_op(rq) == REQ_OP_SECURE_ERASE) + return blk_queue_get_max_sectors(q, req_op(rq)); + + return min(blk_max_size_offset(q, offset), + blk_queue_get_max_sectors(q, req_op(rq))); +} + +static inline unsigned int blk_rq_count_bios(struct request *rq) +{ + unsigned int nr_bios = 0; + struct bio *bio; + + __rq_for_each_bio(bio, rq) + nr_bios++; + + return nr_bios; +} + +/* + * Request issue related functions. + */ +extern struct request *blk_peek_request(struct request_queue *q); +extern void blk_start_request(struct request *rq); +extern struct request *blk_fetch_request(struct request_queue *q); + +void blk_steal_bios(struct bio_list *list, struct request *rq); + +/* + * Request completion related functions. + * + * blk_update_request() completes given number of bytes and updates + * the request without completing it. + * + * blk_end_request() and friends. __blk_end_request() must be called + * with the request queue spinlock acquired. + * + * Several drivers define their own end_request and call + * blk_end_request() for parts of the original function. + * This prevents code duplication in drivers. + */ +extern bool blk_update_request(struct request *rq, blk_status_t error, + unsigned int nr_bytes); +extern void blk_finish_request(struct request *rq, blk_status_t error); +extern bool blk_end_request(struct request *rq, blk_status_t error, + unsigned int nr_bytes); +extern void blk_end_request_all(struct request *rq, blk_status_t error); +extern bool __blk_end_request(struct request *rq, blk_status_t error, + unsigned int nr_bytes); +extern void __blk_end_request_all(struct request *rq, blk_status_t error); +extern bool __blk_end_request_cur(struct request *rq, blk_status_t error); + +extern void blk_complete_request(struct request *); +extern void __blk_complete_request(struct request *); +extern void blk_abort_request(struct request *); +extern void blk_unprep_request(struct request *); + +/* + * Access functions for manipulating queue properties + */ +extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, + spinlock_t *lock, int node_id); +extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); +extern int blk_init_allocated_queue(struct request_queue *); +extern void blk_cleanup_queue(struct request_queue *); +extern void blk_queue_make_request(struct request_queue *, make_request_fn *); +extern void blk_queue_bounce_limit(struct request_queue *, u64); +extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); +extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); +extern void blk_queue_max_segments(struct request_queue *, unsigned short); +extern void blk_queue_max_discard_segments(struct request_queue *, + unsigned short); +extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); +extern void blk_queue_max_discard_sectors(struct request_queue *q, + unsigned int max_discard_sectors); +extern void blk_queue_max_write_same_sectors(struct request_queue *q, + unsigned int max_write_same_sectors); +extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, + unsigned int max_write_same_sectors); +extern void blk_queue_logical_block_size(struct request_queue *, unsigned int); +extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); +extern void blk_queue_alignment_offset(struct request_queue *q, + unsigned int alignment); +extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); +extern void blk_queue_io_min(struct request_queue *q, unsigned int min); +extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); +extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); +extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); +extern void blk_set_default_limits(struct queue_limits *lim); +extern void blk_set_stacking_limits(struct queue_limits *lim); +extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, + sector_t offset); +extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, + sector_t offset); +extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, + sector_t offset); +extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); +extern void blk_queue_dma_pad(struct request_queue *, unsigned int); +extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); +extern int blk_queue_dma_drain(struct request_queue *q, + dma_drain_needed_fn *dma_drain_needed, + void *buf, unsigned int size); +extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); +extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); +extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); +extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); +extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); +extern void blk_queue_dma_alignment(struct request_queue *, int); +extern void blk_queue_update_dma_alignment(struct request_queue *, int); +extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); +extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); +extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); +extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); +extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); + +/* + * Number of physical segments as sent to the device. + * + * Normally this is the number of discontiguous data segments sent by the + * submitter. But for data-less command like discard we might have no + * actual data segments submitted, but the driver might have to add it's + * own special payload. In that case we still return 1 here so that this + * special payload will be mapped. + */ +static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) +{ + if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) + return 1; + return rq->nr_phys_segments; +} + +/* + * Number of discard segments (or ranges) the driver needs to fill in. + * Each discard bio merged into a request is counted as one segment. + */ +static inline unsigned short blk_rq_nr_discard_segments(struct request *rq) +{ + return max_t(unsigned short, rq->nr_phys_segments, 1); +} + +extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); +extern void blk_dump_rq_flags(struct request *, char *); +extern long nr_blockdev_pages(void); + +bool __must_check blk_get_queue(struct request_queue *); +struct request_queue *blk_alloc_queue(gfp_t); +struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, + spinlock_t *lock); +extern void blk_put_queue(struct request_queue *); +extern void blk_set_queue_dying(struct request_queue *); + +/* + * block layer runtime pm functions + */ +#ifdef CONFIG_PM +extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); +extern int blk_pre_runtime_suspend(struct request_queue *q); +extern void blk_post_runtime_suspend(struct request_queue *q, int err); +extern void blk_pre_runtime_resume(struct request_queue *q); +extern void blk_post_runtime_resume(struct request_queue *q, int err); +extern void blk_set_runtime_active(struct request_queue *q); +#else +static inline void blk_pm_runtime_init(struct request_queue *q, + struct device *dev) {} +static inline int blk_pre_runtime_suspend(struct request_queue *q) +{ + return -ENOSYS; +} +static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} +static inline void blk_pre_runtime_resume(struct request_queue *q) {} +static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} +static inline void blk_set_runtime_active(struct request_queue *q) {} +#endif + +/* + * blk_plug permits building a queue of related requests by holding the I/O + * fragments for a short period. This allows merging of sequential requests + * into single larger request. As the requests are moved from a per-task list to + * the device's request_queue in a batch, this results in improved scalability + * as the lock contention for request_queue lock is reduced. + * + * It is ok not to disable preemption when adding the request to the plug list + * or when attempting a merge, because blk_schedule_flush_list() will only flush + * the plug list when the task sleeps by itself. For details, please see + * schedule() where blk_schedule_flush_plug() is called. + */ +struct blk_plug { + struct list_head list; /* requests */ + struct list_head mq_list; /* blk-mq requests */ + struct list_head cb_list; /* md requires an unplug callback */ +}; +#define BLK_MAX_REQUEST_COUNT 16 +#define BLK_PLUG_FLUSH_SIZE (128 * 1024) + +struct blk_plug_cb; +typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); +struct blk_plug_cb { + struct list_head list; + blk_plug_cb_fn callback; + void *data; +}; +extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, + void *data, int size); +extern void blk_start_plug(struct blk_plug *); +extern void blk_finish_plug(struct blk_plug *); +extern void blk_flush_plug_list(struct blk_plug *, bool); + +static inline void blk_flush_plug(struct task_struct *tsk) +{ + struct blk_plug *plug = tsk->plug; + + if (plug) + blk_flush_plug_list(plug, false); +} + +static inline void blk_schedule_flush_plug(struct task_struct *tsk) +{ + struct blk_plug *plug = tsk->plug; + + if (plug) + blk_flush_plug_list(plug, true); +} + +static inline bool blk_needs_flush_plug(struct task_struct *tsk) +{ + struct blk_plug *plug = tsk->plug; + + return plug && + (!list_empty(&plug->list) || + !list_empty(&plug->mq_list) || + !list_empty(&plug->cb_list)); +} + +/* + * tag stuff + */ +extern int blk_queue_start_tag(struct request_queue *, struct request *); +extern struct request *blk_queue_find_tag(struct request_queue *, int); +extern void blk_queue_end_tag(struct request_queue *, struct request *); +extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int); +extern void blk_queue_free_tags(struct request_queue *); +extern int blk_queue_resize_tags(struct request_queue *, int); +extern struct blk_queue_tag *blk_init_tags(int, int); +extern void blk_free_tags(struct blk_queue_tag *); + +static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, + int tag) +{ + if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) + return NULL; + return bqt->tag_index[tag]; +} + +extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); +extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, struct page *page); + +#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */ + +extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); +extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, int flags, + struct bio **biop); + +#define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */ +#define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */ + +extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, + unsigned flags); +extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, unsigned flags); + +static inline int sb_issue_discard(struct super_block *sb, sector_t block, + sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) +{ + return blkdev_issue_discard(sb->s_bdev, + block << (sb->s_blocksize_bits - + SECTOR_SHIFT), + nr_blocks << (sb->s_blocksize_bits - + SECTOR_SHIFT), + gfp_mask, flags); +} +static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, + sector_t nr_blocks, gfp_t gfp_mask) +{ + return blkdev_issue_zeroout(sb->s_bdev, + block << (sb->s_blocksize_bits - + SECTOR_SHIFT), + nr_blocks << (sb->s_blocksize_bits - + SECTOR_SHIFT), + gfp_mask, 0); +} + +extern int blk_verify_command(unsigned char *cmd, fmode_t mode); + +enum blk_default_limits { + BLK_MAX_SEGMENTS = 128, + BLK_SAFE_MAX_SECTORS = 255, + BLK_DEF_MAX_SECTORS = 2560, + BLK_MAX_SEGMENT_SIZE = 65536, + BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, +}; + +static inline unsigned long queue_segment_boundary(struct request_queue *q) +{ + return q->limits.seg_boundary_mask; +} + +static inline unsigned long queue_virt_boundary(struct request_queue *q) +{ + return q->limits.virt_boundary_mask; +} + +static inline unsigned int queue_max_sectors(struct request_queue *q) +{ + return q->limits.max_sectors; +} + +static inline unsigned int queue_max_hw_sectors(struct request_queue *q) +{ + return q->limits.max_hw_sectors; +} + +static inline unsigned short queue_max_segments(struct request_queue *q) +{ + return q->limits.max_segments; +} + +static inline unsigned short queue_max_discard_segments(struct request_queue *q) +{ + return q->limits.max_discard_segments; +} + +static inline unsigned int queue_max_segment_size(struct request_queue *q) +{ + return q->limits.max_segment_size; +} + +static inline unsigned queue_logical_block_size(struct request_queue *q) +{ + int retval = 512; + + if (q && q->limits.logical_block_size) + retval = q->limits.logical_block_size; + + return retval; +} + +static inline unsigned int bdev_logical_block_size(struct block_device *bdev) +{ + return queue_logical_block_size(bdev_get_queue(bdev)); +} + +static inline unsigned int queue_physical_block_size(struct request_queue *q) +{ + return q->limits.physical_block_size; +} + +static inline unsigned int bdev_physical_block_size(struct block_device *bdev) +{ + return queue_physical_block_size(bdev_get_queue(bdev)); +} + +static inline unsigned int queue_io_min(struct request_queue *q) +{ + return q->limits.io_min; +} + +static inline int bdev_io_min(struct block_device *bdev) +{ + return queue_io_min(bdev_get_queue(bdev)); +} + +static inline unsigned int queue_io_opt(struct request_queue *q) +{ + return q->limits.io_opt; +} + +static inline int bdev_io_opt(struct block_device *bdev) +{ + return queue_io_opt(bdev_get_queue(bdev)); +} + +static inline int queue_alignment_offset(struct request_queue *q) +{ + if (q->limits.misaligned) + return -1; + + return q->limits.alignment_offset; +} + +static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) +{ + unsigned int granularity = max(lim->physical_block_size, lim->io_min); + unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT) + << SECTOR_SHIFT; + + return (granularity + lim->alignment_offset - alignment) % granularity; +} + +static inline int bdev_alignment_offset(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q->limits.misaligned) + return -1; + + if (bdev != bdev->bd_contains) + return bdev->bd_part->alignment_offset; + + return q->limits.alignment_offset; +} + +static inline int queue_discard_alignment(struct request_queue *q) +{ + if (q->limits.discard_misaligned) + return -1; + + return q->limits.discard_alignment; +} + +static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) +{ + unsigned int alignment, granularity, offset; + + if (!lim->max_discard_sectors) + return 0; + + /* Why are these in bytes, not sectors? */ + alignment = lim->discard_alignment >> SECTOR_SHIFT; + granularity = lim->discard_granularity >> SECTOR_SHIFT; + if (!granularity) + return 0; + + /* Offset of the partition start in 'granularity' sectors */ + offset = sector_div(sector, granularity); + + /* And why do we do this modulus *again* in blkdev_issue_discard()? */ + offset = (granularity + alignment - offset) % granularity; + + /* Turn it back into bytes, gaah */ + return offset << SECTOR_SHIFT; +} + +static inline int bdev_discard_alignment(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (bdev != bdev->bd_contains) + return bdev->bd_part->discard_alignment; + + return q->limits.discard_alignment; +} + +static inline unsigned int bdev_write_same(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q) + return q->limits.max_write_same_sectors; + + return 0; +} + +static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q) + return q->limits.max_write_zeroes_sectors; + + return 0; +} + +static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q) + return blk_queue_zoned_model(q); + + return BLK_ZONED_NONE; +} + +static inline bool bdev_is_zoned(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q) + return blk_queue_is_zoned(q); + + return false; +} + +static inline unsigned int bdev_zone_sectors(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q) + return blk_queue_zone_sectors(q); + return 0; +} + +static inline int queue_dma_alignment(struct request_queue *q) +{ + return q ? q->dma_alignment : 511; +} + +static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, + unsigned int len) +{ + unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; + return !(addr & alignment) && !(len & alignment); +} + +/* assumes size > 256 */ +static inline unsigned int blksize_bits(unsigned int size) +{ + unsigned int bits = 8; + do { + bits++; + size >>= 1; + } while (size > 256); + return bits; +} + +static inline unsigned int block_size(struct block_device *bdev) +{ + return bdev->bd_block_size; +} + +static inline bool queue_flush_queueable(struct request_queue *q) +{ + return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags); +} + +typedef struct {struct page *v;} Sector; + +unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); + +static inline void put_dev_sector(Sector p) +{ + put_page(p.v); +} + +static inline bool __bvec_gap_to_prev(struct request_queue *q, + struct bio_vec *bprv, unsigned int offset) +{ + return offset || + ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); +} + +/* + * Check if adding a bio_vec after bprv with offset would create a gap in + * the SG list. Most drivers don't care about this, but some do. + */ +static inline bool bvec_gap_to_prev(struct request_queue *q, + struct bio_vec *bprv, unsigned int offset) +{ + if (!queue_virt_boundary(q)) + return false; + return __bvec_gap_to_prev(q, bprv, offset); +} + +/* + * Check if the two bvecs from two bios can be merged to one segment. + * If yes, no need to check gap between the two bios since the 1st bio + * and the 1st bvec in the 2nd bio can be handled in one segment. + */ +static inline bool bios_segs_mergeable(struct request_queue *q, + struct bio *prev, struct bio_vec *prev_last_bv, + struct bio_vec *next_first_bv) +{ + if (!BIOVEC_PHYS_MERGEABLE(prev_last_bv, next_first_bv)) + return false; + if (!BIOVEC_SEG_BOUNDARY(q, prev_last_bv, next_first_bv)) + return false; + if (prev->bi_seg_back_size + next_first_bv->bv_len > + queue_max_segment_size(q)) + return false; + return true; +} + +static inline bool bio_will_gap(struct request_queue *q, + struct request *prev_rq, + struct bio *prev, + struct bio *next) +{ + if (bio_has_data(prev) && queue_virt_boundary(q)) { + struct bio_vec pb, nb; + + /* + * don't merge if the 1st bio starts with non-zero + * offset, otherwise it is quite difficult to respect + * sg gap limit. We work hard to merge a huge number of small + * single bios in case of mkfs. + */ + if (prev_rq) + bio_get_first_bvec(prev_rq->bio, &pb); + else + bio_get_first_bvec(prev, &pb); + if (pb.bv_offset) + return true; + + /* + * We don't need to worry about the situation that the + * merged segment ends in unaligned virt boundary: + * + * - if 'pb' ends aligned, the merged segment ends aligned + * - if 'pb' ends unaligned, the next bio must include + * one single bvec of 'nb', otherwise the 'nb' can't + * merge with 'pb' + */ + bio_get_last_bvec(prev, &pb); + bio_get_first_bvec(next, &nb); + + if (!bios_segs_mergeable(q, prev, &pb, &nb)) + return __bvec_gap_to_prev(q, &pb, nb.bv_offset); + } + + return false; +} + +static inline bool req_gap_back_merge(struct request *req, struct bio *bio) +{ + return bio_will_gap(req->q, req, req->biotail, bio); +} + +static inline bool req_gap_front_merge(struct request *req, struct bio *bio) +{ + return bio_will_gap(req->q, NULL, bio, req->bio); +} + +int kblockd_schedule_work(struct work_struct *work); +int kblockd_schedule_work_on(int cpu, struct work_struct *work); +int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); + +#define MODULE_ALIAS_BLOCKDEV(major,minor) \ + MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) +#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ + MODULE_ALIAS("block-major-" __stringify(major) "-*") + +#if defined(CONFIG_BLK_DEV_INTEGRITY) + +enum blk_integrity_flags { + BLK_INTEGRITY_VERIFY = 1 << 0, + BLK_INTEGRITY_GENERATE = 1 << 1, + BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2, + BLK_INTEGRITY_IP_CHECKSUM = 1 << 3, +}; + +struct blk_integrity_iter { + void *prot_buf; + void *data_buf; + sector_t seed; + unsigned int data_size; + unsigned short interval; + const char *disk_name; +}; + +typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *); + +struct blk_integrity_profile { + integrity_processing_fn *generate_fn; + integrity_processing_fn *verify_fn; + const char *name; +}; + +extern void blk_integrity_register(struct gendisk *, struct blk_integrity *); +extern void blk_integrity_unregister(struct gendisk *); +extern int blk_integrity_compare(struct gendisk *, struct gendisk *); +extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, + struct scatterlist *); +extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); +extern bool blk_integrity_merge_rq(struct request_queue *, struct request *, + struct request *); +extern bool blk_integrity_merge_bio(struct request_queue *, struct request *, + struct bio *); + +static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) +{ + struct blk_integrity *bi = &disk->queue->integrity; + + if (!bi->profile) + return NULL; + + return bi; +} + +static inline +struct blk_integrity *bdev_get_integrity(struct block_device *bdev) +{ + return blk_get_integrity(bdev->bd_disk); +} + +static inline bool blk_integrity_rq(struct request *rq) +{ + return rq->cmd_flags & REQ_INTEGRITY; +} + +static inline void blk_queue_max_integrity_segments(struct request_queue *q, + unsigned int segs) +{ + q->limits.max_integrity_segments = segs; +} + +static inline unsigned short +queue_max_integrity_segments(struct request_queue *q) +{ + return q->limits.max_integrity_segments; +} + +static inline bool integrity_req_gap_back_merge(struct request *req, + struct bio *next) +{ + struct bio_integrity_payload *bip = bio_integrity(req->bio); + struct bio_integrity_payload *bip_next = bio_integrity(next); + + return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], + bip_next->bip_vec[0].bv_offset); +} + +static inline bool integrity_req_gap_front_merge(struct request *req, + struct bio *bio) +{ + struct bio_integrity_payload *bip = bio_integrity(bio); + struct bio_integrity_payload *bip_next = bio_integrity(req->bio); + + return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], + bip_next->bip_vec[0].bv_offset); +} + +/** + * bio_integrity_intervals - Return number of integrity intervals for a bio + * @bi: blk_integrity profile for device + * @sectors: Size of the bio in 512-byte sectors + * + * Description: The block layer calculates everything in 512 byte + * sectors but integrity metadata is done in terms of the data integrity + * interval size of the storage device. Convert the block layer sectors + * to the appropriate number of integrity intervals. + */ +static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi, + unsigned int sectors) +{ + return sectors >> (bi->interval_exp - 9); +} + +static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi, + unsigned int sectors) +{ + return bio_integrity_intervals(bi, sectors) * bi->tuple_size; +} + +#else /* CONFIG_BLK_DEV_INTEGRITY */ + +struct bio; +struct block_device; +struct gendisk; +struct blk_integrity; + +static inline int blk_integrity_rq(struct request *rq) +{ + return 0; +} +static inline int blk_rq_count_integrity_sg(struct request_queue *q, + struct bio *b) +{ + return 0; +} +static inline int blk_rq_map_integrity_sg(struct request_queue *q, + struct bio *b, + struct scatterlist *s) +{ + return 0; +} +static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) +{ + return NULL; +} +static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) +{ + return NULL; +} +static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) +{ + return 0; +} +static inline void blk_integrity_register(struct gendisk *d, + struct blk_integrity *b) +{ +} +static inline void blk_integrity_unregister(struct gendisk *d) +{ +} +static inline void blk_queue_max_integrity_segments(struct request_queue *q, + unsigned int segs) +{ +} +static inline unsigned short queue_max_integrity_segments(struct request_queue *q) +{ + return 0; +} +static inline bool blk_integrity_merge_rq(struct request_queue *rq, + struct request *r1, + struct request *r2) +{ + return true; +} +static inline bool blk_integrity_merge_bio(struct request_queue *rq, + struct request *r, + struct bio *b) +{ + return true; +} + +static inline bool integrity_req_gap_back_merge(struct request *req, + struct bio *next) +{ + return false; +} +static inline bool integrity_req_gap_front_merge(struct request *req, + struct bio *bio) +{ + return false; +} + +static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi, + unsigned int sectors) +{ + return 0; +} + +static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi, + unsigned int sectors) +{ + return 0; +} + +#endif /* CONFIG_BLK_DEV_INTEGRITY */ + +struct block_device_operations { + int (*open) (struct block_device *, fmode_t); + void (*release) (struct gendisk *, fmode_t); + int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int); + int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); + int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); + unsigned int (*check_events) (struct gendisk *disk, + unsigned int clearing); + /* ->media_changed() is DEPRECATED, use ->check_events() instead */ + int (*media_changed) (struct gendisk *); + void (*unlock_native_capacity) (struct gendisk *); + int (*revalidate_disk) (struct gendisk *); + int (*getgeo)(struct block_device *, struct hd_geometry *); + /* this callback is with swap_lock and sometimes page table lock held */ + void (*swap_slot_free_notify) (struct block_device *, unsigned long); + struct module *owner; + const struct pr_ops *pr_ops; +}; + +extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, + unsigned long); +extern int bdev_read_page(struct block_device *, sector_t, struct page *); +extern int bdev_write_page(struct block_device *, sector_t, struct page *, + struct writeback_control *); + +#ifdef CONFIG_BLK_DEV_ZONED +bool blk_req_needs_zone_write_lock(struct request *rq); +void __blk_req_zone_write_lock(struct request *rq); +void __blk_req_zone_write_unlock(struct request *rq); + +static inline void blk_req_zone_write_lock(struct request *rq) +{ + if (blk_req_needs_zone_write_lock(rq)) + __blk_req_zone_write_lock(rq); +} + +static inline void blk_req_zone_write_unlock(struct request *rq) +{ + if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED) + __blk_req_zone_write_unlock(rq); +} + +static inline bool blk_req_zone_is_write_locked(struct request *rq) +{ + return rq->q->seq_zones_wlock && + test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock); +} + +static inline bool blk_req_can_dispatch_to_zone(struct request *rq) +{ + if (!blk_req_needs_zone_write_lock(rq)) + return true; + return !blk_req_zone_is_write_locked(rq); +} +#else +static inline bool blk_req_needs_zone_write_lock(struct request *rq) +{ + return false; +} + +static inline void blk_req_zone_write_lock(struct request *rq) +{ +} + +static inline void blk_req_zone_write_unlock(struct request *rq) +{ +} +static inline bool blk_req_zone_is_write_locked(struct request *rq) +{ + return false; +} + +static inline bool blk_req_can_dispatch_to_zone(struct request *rq) +{ + return true; +} +#endif /* CONFIG_BLK_DEV_ZONED */ + +#else /* CONFIG_BLOCK */ + +struct block_device; + +/* + * stubs for when the block layer is configured out + */ +#define buffer_heads_over_limit 0 + +static inline long nr_blockdev_pages(void) +{ + return 0; +} + +struct blk_plug { +}; + +static inline void blk_start_plug(struct blk_plug *plug) +{ +} + +static inline void blk_finish_plug(struct blk_plug *plug) +{ +} + +static inline void blk_flush_plug(struct task_struct *task) +{ +} + +static inline void blk_schedule_flush_plug(struct task_struct *task) +{ +} + + +static inline bool blk_needs_flush_plug(struct task_struct *tsk) +{ + return false; +} + +static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, + sector_t *error_sector) +{ + return 0; +} + +#endif /* CONFIG_BLOCK */ + +#endif diff --git a/include/linux/blkpg.h b/include/linux/blkpg.h new file mode 100644 index 000000000..1c91753c3 --- /dev/null +++ b/include/linux/blkpg.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BLKPG_H +#define _LINUX_BLKPG_H + +/* + * Partition table and disk geometry handling + */ + +#include +#include + +#ifdef CONFIG_COMPAT +/* For 32-bit/64-bit compatibility of struct blkpg_ioctl_arg */ +struct blkpg_compat_ioctl_arg { + compat_int_t op; + compat_int_t flags; + compat_int_t datalen; + compat_uptr_t data; +}; +#endif + +#endif /* _LINUX_BLKPG_H */ diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h new file mode 100644 index 000000000..3b6ff5902 --- /dev/null +++ b/include/linux/blktrace_api.h @@ -0,0 +1,141 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef BLKTRACE_H +#define BLKTRACE_H + +#include +#include +#include +#include +#include + +#if defined(CONFIG_BLK_DEV_IO_TRACE) + +#include + +struct blk_trace { + int trace_state; + struct rchan *rchan; + unsigned long __percpu *sequence; + unsigned char __percpu *msg_data; + u16 act_mask; + u64 start_lba; + u64 end_lba; + u32 pid; + u32 dev; + struct dentry *dir; + struct dentry *dropped_file; + struct dentry *msg_file; + struct list_head running_list; + atomic_t dropped; +}; + +struct blkcg; + +extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); +extern void blk_trace_shutdown(struct request_queue *); +extern __printf(3, 4) +void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *fmt, ...); + +/** + * blk_add_trace_msg - Add a (simple) message to the blktrace stream + * @q: queue the io is for + * @fmt: format to print message in + * args... Variable argument list for format + * + * Description: + * Records a (simple) message onto the blktrace stream. + * + * NOTE: BLK_TN_MAX_MSG characters are output at most. + * NOTE: Can not use 'static inline' due to presence of var args... + * + **/ +#define blk_add_cgroup_trace_msg(q, cg, fmt, ...) \ + do { \ + struct blk_trace *bt; \ + \ + rcu_read_lock(); \ + bt = rcu_dereference((q)->blk_trace); \ + if (unlikely(bt)) \ + __trace_note_message(bt, cg, fmt, ##__VA_ARGS__);\ + rcu_read_unlock(); \ + } while (0) +#define blk_add_trace_msg(q, fmt, ...) \ + blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__) +#define BLK_TN_MAX_MSG 128 + +static inline bool blk_trace_note_message_enabled(struct request_queue *q) +{ + struct blk_trace *bt; + bool ret; + + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); + ret = bt && (bt->act_mask & BLK_TC_NOTIFY); + rcu_read_unlock(); + return ret; +} + +extern void blk_add_driver_data(struct request_queue *q, struct request *rq, + void *data, size_t len); +extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, + struct block_device *bdev, + char __user *arg); +extern int blk_trace_startstop(struct request_queue *q, int start); +extern int blk_trace_remove(struct request_queue *q); +extern void blk_trace_remove_sysfs(struct device *dev); +extern int blk_trace_init_sysfs(struct device *dev); + +extern struct attribute_group blk_trace_attr_group; + +#else /* !CONFIG_BLK_DEV_IO_TRACE */ +# define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) +# define blk_trace_shutdown(q) do { } while (0) +# define blk_add_driver_data(q, rq, data, len) do {} while (0) +# define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY) +# define blk_trace_startstop(q, start) (-ENOTTY) +# define blk_trace_remove(q) (-ENOTTY) +# define blk_add_trace_msg(q, fmt, ...) do { } while (0) +# define blk_add_cgroup_trace_msg(q, cg, fmt, ...) do { } while (0) +# define blk_trace_remove_sysfs(dev) do { } while (0) +# define blk_trace_note_message_enabled(q) (false) +static inline int blk_trace_init_sysfs(struct device *dev) +{ + return 0; +} + +#endif /* CONFIG_BLK_DEV_IO_TRACE */ + +#ifdef CONFIG_COMPAT + +struct compat_blk_user_trace_setup { + char name[BLKTRACE_BDEV_SIZE]; + u16 act_mask; + u32 buf_size; + u32 buf_nr; + compat_u64 start_lba; + compat_u64 end_lba; + u32 pid; +}; +#define BLKTRACESETUP32 _IOWR(0x12, 115, struct compat_blk_user_trace_setup) + +#endif + +extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes); + +static inline sector_t blk_rq_trace_sector(struct request *rq) +{ + /* + * Tracing should ignore starting sector for passthrough requests and + * requests where starting sector didn't get set. + */ + if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1) + return 0; + return blk_rq_pos(rq); +} + +static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq) +{ + return blk_rq_is_passthrough(rq) ? 0 : blk_rq_sectors(rq); +} + +#endif diff --git a/include/linux/blockgroup_lock.h b/include/linux/blockgroup_lock.h new file mode 100644 index 000000000..511ab123a --- /dev/null +++ b/include/linux/blockgroup_lock.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BLOCKGROUP_LOCK_H +#define _LINUX_BLOCKGROUP_LOCK_H +/* + * Per-blockgroup locking for ext2 and ext3. + * + * Simple hashed spinlocking. + */ + +#include +#include + +#ifdef CONFIG_SMP +#define NR_BG_LOCKS (4 << ilog2(NR_CPUS < 32 ? NR_CPUS : 32)) +#else +#define NR_BG_LOCKS 1 +#endif + +struct bgl_lock { + spinlock_t lock; +} ____cacheline_aligned_in_smp; + +struct blockgroup_lock { + struct bgl_lock locks[NR_BG_LOCKS]; +}; + +static inline void bgl_lock_init(struct blockgroup_lock *bgl) +{ + int i; + + for (i = 0; i < NR_BG_LOCKS; i++) + spin_lock_init(&bgl->locks[i].lock); +} + +static inline spinlock_t * +bgl_lock_ptr(struct blockgroup_lock *bgl, unsigned int block_group) +{ + return &bgl->locks[block_group & (NR_BG_LOCKS-1)].lock; +} + +#endif diff --git a/include/linux/bma150.h b/include/linux/bma150.h new file mode 100644 index 000000000..97ade7cdc --- /dev/null +++ b/include/linux/bma150.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2011 Bosch Sensortec GmbH + * Copyright (c) 2011 Unixphere + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _BMA150_H_ +#define _BMA150_H_ + +#define BMA150_DRIVER "bma150" + +#define BMA150_RANGE_2G 0 +#define BMA150_RANGE_4G 1 +#define BMA150_RANGE_8G 2 + +#define BMA150_BW_25HZ 0 +#define BMA150_BW_50HZ 1 +#define BMA150_BW_100HZ 2 +#define BMA150_BW_190HZ 3 +#define BMA150_BW_375HZ 4 +#define BMA150_BW_750HZ 5 +#define BMA150_BW_1500HZ 6 + +struct bma150_cfg { + bool any_motion_int; /* Set to enable any-motion interrupt */ + bool hg_int; /* Set to enable high-G interrupt */ + bool lg_int; /* Set to enable low-G interrupt */ + unsigned char any_motion_dur; /* Any-motion duration */ + unsigned char any_motion_thres; /* Any-motion threshold */ + unsigned char hg_hyst; /* High-G hysterisis */ + unsigned char hg_dur; /* High-G duration */ + unsigned char hg_thres; /* High-G threshold */ + unsigned char lg_hyst; /* Low-G hysterisis */ + unsigned char lg_dur; /* Low-G duration */ + unsigned char lg_thres; /* Low-G threshold */ + unsigned char range; /* one of BMA0150_RANGE_xxx */ + unsigned char bandwidth; /* one of BMA0150_BW_xxx */ +}; + +struct bma150_platform_data { + struct bma150_cfg cfg; + int (*irq_gpio_cfg)(void); +}; + +#endif /* _BMA150_H_ */ diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h new file mode 100644 index 000000000..42515195d --- /dev/null +++ b/include/linux/bootmem.h @@ -0,0 +1,404 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 + */ +#ifndef _LINUX_BOOTMEM_H +#define _LINUX_BOOTMEM_H + +#include +#include +#include +#include + +/* + * simple boot-time physical memory area allocator. + */ + +extern unsigned long max_low_pfn; +extern unsigned long min_low_pfn; + +/* + * highest page + */ +extern unsigned long max_pfn; +/* + * highest possible page + */ +extern unsigned long long max_possible_pfn; + +#ifndef CONFIG_NO_BOOTMEM +/** + * struct bootmem_data - per-node information used by the bootmem allocator + * @node_min_pfn: the starting physical address of the node's memory + * @node_low_pfn: the end physical address of the directly addressable memory + * @node_bootmem_map: is a bitmap pointer - the bits represent all physical + * memory pages (including holes) on the node. + * @last_end_off: the offset within the page of the end of the last allocation; + * if 0, the page used is full + * @hint_idx: the PFN of the page used with the last allocation; + * together with using this with the @last_end_offset field, + * a test can be made to see if allocations can be merged + * with the page used for the last allocation rather than + * using up a full new page. + * @list: list entry in the linked list ordered by the memory addresses + */ +typedef struct bootmem_data { + unsigned long node_min_pfn; + unsigned long node_low_pfn; + void *node_bootmem_map; + unsigned long last_end_off; + unsigned long hint_idx; + struct list_head list; +} bootmem_data_t; + +extern bootmem_data_t bootmem_node_data[]; +#endif + +extern unsigned long bootmem_bootmap_pages(unsigned long); + +extern unsigned long init_bootmem_node(pg_data_t *pgdat, + unsigned long freepfn, + unsigned long startpfn, + unsigned long endpfn); +extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); + +extern unsigned long free_all_bootmem(void); +extern void reset_node_managed_pages(pg_data_t *pgdat); +extern void reset_all_zones_managed_pages(void); + +extern void free_bootmem_node(pg_data_t *pgdat, + unsigned long addr, + unsigned long size); +extern void free_bootmem(unsigned long physaddr, unsigned long size); +extern void free_bootmem_late(unsigned long physaddr, unsigned long size); + +/* + * Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE, + * the architecture-specific code should honor this). + * + * If flags is BOOTMEM_DEFAULT, then the return value is always 0 (success). + * If flags contains BOOTMEM_EXCLUSIVE, then -EBUSY is returned if the memory + * already was reserved. + */ +#define BOOTMEM_DEFAULT 0 +#define BOOTMEM_EXCLUSIVE (1<<0) + +extern int reserve_bootmem(unsigned long addr, + unsigned long size, + int flags); +extern int reserve_bootmem_node(pg_data_t *pgdat, + unsigned long physaddr, + unsigned long size, + int flags); + +extern void *__alloc_bootmem(unsigned long size, + unsigned long align, + unsigned long goal); +extern void *__alloc_bootmem_nopanic(unsigned long size, + unsigned long align, + unsigned long goal) __malloc; +extern void *__alloc_bootmem_node(pg_data_t *pgdat, + unsigned long size, + unsigned long align, + unsigned long goal) __malloc; +void *__alloc_bootmem_node_high(pg_data_t *pgdat, + unsigned long size, + unsigned long align, + unsigned long goal) __malloc; +extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat, + unsigned long size, + unsigned long align, + unsigned long goal) __malloc; +void *___alloc_bootmem_node_nopanic(pg_data_t *pgdat, + unsigned long size, + unsigned long align, + unsigned long goal, + unsigned long limit) __malloc; +extern void *__alloc_bootmem_low(unsigned long size, + unsigned long align, + unsigned long goal) __malloc; +void *__alloc_bootmem_low_nopanic(unsigned long size, + unsigned long align, + unsigned long goal) __malloc; +extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, + unsigned long size, + unsigned long align, + unsigned long goal) __malloc; + +#ifdef CONFIG_NO_BOOTMEM +/* We are using top down, so it is safe to use 0 here */ +#define BOOTMEM_LOW_LIMIT 0 +#else +#define BOOTMEM_LOW_LIMIT __pa(MAX_DMA_ADDRESS) +#endif + +#ifndef ARCH_LOW_ADDRESS_LIMIT +#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL +#endif + +#define alloc_bootmem(x) \ + __alloc_bootmem(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT) +#define alloc_bootmem_align(x, align) \ + __alloc_bootmem(x, align, BOOTMEM_LOW_LIMIT) +#define alloc_bootmem_nopanic(x) \ + __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT) +#define alloc_bootmem_pages(x) \ + __alloc_bootmem(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT) +#define alloc_bootmem_pages_nopanic(x) \ + __alloc_bootmem_nopanic(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT) +#define alloc_bootmem_node(pgdat, x) \ + __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT) +#define alloc_bootmem_node_nopanic(pgdat, x) \ + __alloc_bootmem_node_nopanic(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT) +#define alloc_bootmem_pages_node(pgdat, x) \ + __alloc_bootmem_node(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT) +#define alloc_bootmem_pages_node_nopanic(pgdat, x) \ + __alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT) + +#define alloc_bootmem_low(x) \ + __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0) +#define alloc_bootmem_low_pages_nopanic(x) \ + __alloc_bootmem_low_nopanic(x, PAGE_SIZE, 0) +#define alloc_bootmem_low_pages(x) \ + __alloc_bootmem_low(x, PAGE_SIZE, 0) +#define alloc_bootmem_low_pages_node(pgdat, x) \ + __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0) + + +#if defined(CONFIG_HAVE_MEMBLOCK) && defined(CONFIG_NO_BOOTMEM) + +/* FIXME: use MEMBLOCK_ALLOC_* variants here */ +#define BOOTMEM_ALLOC_ACCESSIBLE 0 +#define BOOTMEM_ALLOC_ANYWHERE (~(phys_addr_t)0) + +/* FIXME: Move to memblock.h at a point where we remove nobootmem.c */ +void *memblock_virt_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align, + phys_addr_t min_addr, + phys_addr_t max_addr, int nid); +void *memblock_virt_alloc_try_nid_nopanic(phys_addr_t size, + phys_addr_t align, phys_addr_t min_addr, + phys_addr_t max_addr, int nid); +void *memblock_virt_alloc_try_nid(phys_addr_t size, phys_addr_t align, + phys_addr_t min_addr, phys_addr_t max_addr, int nid); +void __memblock_free_early(phys_addr_t base, phys_addr_t size); +void __memblock_free_late(phys_addr_t base, phys_addr_t size); + +static inline void * __init memblock_virt_alloc( + phys_addr_t size, phys_addr_t align) +{ + return memblock_virt_alloc_try_nid(size, align, BOOTMEM_LOW_LIMIT, + BOOTMEM_ALLOC_ACCESSIBLE, + NUMA_NO_NODE); +} + +static inline void * __init memblock_virt_alloc_raw( + phys_addr_t size, phys_addr_t align) +{ + return memblock_virt_alloc_try_nid_raw(size, align, BOOTMEM_LOW_LIMIT, + BOOTMEM_ALLOC_ACCESSIBLE, + NUMA_NO_NODE); +} + +static inline void * __init memblock_virt_alloc_nopanic( + phys_addr_t size, phys_addr_t align) +{ + return memblock_virt_alloc_try_nid_nopanic(size, align, + BOOTMEM_LOW_LIMIT, + BOOTMEM_ALLOC_ACCESSIBLE, + NUMA_NO_NODE); +} + +static inline void * __init memblock_virt_alloc_low( + phys_addr_t size, phys_addr_t align) +{ + return memblock_virt_alloc_try_nid(size, align, + BOOTMEM_LOW_LIMIT, + ARCH_LOW_ADDRESS_LIMIT, + NUMA_NO_NODE); +} +static inline void * __init memblock_virt_alloc_low_nopanic( + phys_addr_t size, phys_addr_t align) +{ + return memblock_virt_alloc_try_nid_nopanic(size, align, + BOOTMEM_LOW_LIMIT, + ARCH_LOW_ADDRESS_LIMIT, + NUMA_NO_NODE); +} + +static inline void * __init memblock_virt_alloc_from_nopanic( + phys_addr_t size, phys_addr_t align, phys_addr_t min_addr) +{ + return memblock_virt_alloc_try_nid_nopanic(size, align, min_addr, + BOOTMEM_ALLOC_ACCESSIBLE, + NUMA_NO_NODE); +} + +static inline void * __init memblock_virt_alloc_node( + phys_addr_t size, int nid) +{ + return memblock_virt_alloc_try_nid(size, 0, BOOTMEM_LOW_LIMIT, + BOOTMEM_ALLOC_ACCESSIBLE, nid); +} + +static inline void * __init memblock_virt_alloc_node_nopanic( + phys_addr_t size, int nid) +{ + return memblock_virt_alloc_try_nid_nopanic(size, 0, BOOTMEM_LOW_LIMIT, + BOOTMEM_ALLOC_ACCESSIBLE, + nid); +} + +static inline void __init memblock_free_early( + phys_addr_t base, phys_addr_t size) +{ + __memblock_free_early(base, size); +} + +static inline void __init memblock_free_early_nid( + phys_addr_t base, phys_addr_t size, int nid) +{ + __memblock_free_early(base, size); +} + +static inline void __init memblock_free_late( + phys_addr_t base, phys_addr_t size) +{ + __memblock_free_late(base, size); +} + +#else + +#define BOOTMEM_ALLOC_ACCESSIBLE 0 + + +/* Fall back to all the existing bootmem APIs */ +static inline void * __init memblock_virt_alloc( + phys_addr_t size, phys_addr_t align) +{ + if (!align) + align = SMP_CACHE_BYTES; + return __alloc_bootmem(size, align, BOOTMEM_LOW_LIMIT); +} + +static inline void * __init memblock_virt_alloc_raw( + phys_addr_t size, phys_addr_t align) +{ + if (!align) + align = SMP_CACHE_BYTES; + return __alloc_bootmem_nopanic(size, align, BOOTMEM_LOW_LIMIT); +} + +static inline void * __init memblock_virt_alloc_nopanic( + phys_addr_t size, phys_addr_t align) +{ + if (!align) + align = SMP_CACHE_BYTES; + return __alloc_bootmem_nopanic(size, align, BOOTMEM_LOW_LIMIT); +} + +static inline void * __init memblock_virt_alloc_low( + phys_addr_t size, phys_addr_t align) +{ + if (!align) + align = SMP_CACHE_BYTES; + return __alloc_bootmem_low(size, align, 0); +} + +static inline void * __init memblock_virt_alloc_low_nopanic( + phys_addr_t size, phys_addr_t align) +{ + if (!align) + align = SMP_CACHE_BYTES; + return __alloc_bootmem_low_nopanic(size, align, 0); +} + +static inline void * __init memblock_virt_alloc_from_nopanic( + phys_addr_t size, phys_addr_t align, phys_addr_t min_addr) +{ + return __alloc_bootmem_nopanic(size, align, min_addr); +} + +static inline void * __init memblock_virt_alloc_node( + phys_addr_t size, int nid) +{ + return __alloc_bootmem_node(NODE_DATA(nid), size, SMP_CACHE_BYTES, + BOOTMEM_LOW_LIMIT); +} + +static inline void * __init memblock_virt_alloc_node_nopanic( + phys_addr_t size, int nid) +{ + return __alloc_bootmem_node_nopanic(NODE_DATA(nid), size, + SMP_CACHE_BYTES, + BOOTMEM_LOW_LIMIT); +} + +static inline void * __init memblock_virt_alloc_try_nid(phys_addr_t size, + phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid) +{ + return __alloc_bootmem_node_high(NODE_DATA(nid), size, align, + min_addr); +} + +static inline void * __init memblock_virt_alloc_try_nid_raw( + phys_addr_t size, phys_addr_t align, + phys_addr_t min_addr, phys_addr_t max_addr, int nid) +{ + return ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, align, + min_addr, max_addr); +} + +static inline void * __init memblock_virt_alloc_try_nid_nopanic( + phys_addr_t size, phys_addr_t align, + phys_addr_t min_addr, phys_addr_t max_addr, int nid) +{ + return ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, align, + min_addr, max_addr); +} + +static inline void __init memblock_free_early( + phys_addr_t base, phys_addr_t size) +{ + free_bootmem(base, size); +} + +static inline void __init memblock_free_early_nid( + phys_addr_t base, phys_addr_t size, int nid) +{ + free_bootmem_node(NODE_DATA(nid), base, size); +} + +static inline void __init memblock_free_late( + phys_addr_t base, phys_addr_t size) +{ + free_bootmem_late(base, size); +} +#endif /* defined(CONFIG_HAVE_MEMBLOCK) && defined(CONFIG_NO_BOOTMEM) */ + +extern void *alloc_large_system_hash(const char *tablename, + unsigned long bucketsize, + unsigned long numentries, + int scale, + int flags, + unsigned int *_hash_shift, + unsigned int *_hash_mask, + unsigned long low_limit, + unsigned long high_limit); + +#define HASH_EARLY 0x00000001 /* Allocating during early boot? */ +#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min + * shift passed via *_hash_shift */ +#define HASH_ZERO 0x00000004 /* Zero allocated hash table */ + +/* Only NUMA needs hash distribution. 64bit NUMA architectures have + * sufficient vmalloc space. + */ +#ifdef CONFIG_NUMA +#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT) +extern int hashdist; /* Distribute hashes across NUMA nodes? */ +#else +#define hashdist (0) +#endif + + +#endif /* _LINUX_BOOTMEM_H */ diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h new file mode 100644 index 000000000..a19519f42 --- /dev/null +++ b/include/linux/bottom_half.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BH_H +#define _LINUX_BH_H + +#include + +#ifdef CONFIG_TRACE_IRQFLAGS +extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); +#else +static __always_inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) +{ + preempt_count_add(cnt); + barrier(); +} +#endif + +static inline void local_bh_disable(void) +{ + __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); +} + +extern void _local_bh_enable(void); +extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt); + +static inline void local_bh_enable_ip(unsigned long ip) +{ + __local_bh_enable_ip(ip, SOFTIRQ_DISABLE_OFFSET); +} + +static inline void local_bh_enable(void) +{ + __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); +} + +#endif /* _LINUX_BH_H */ diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h new file mode 100644 index 000000000..ad6b30137 --- /dev/null +++ b/include/linux/bpf-cgroup.h @@ -0,0 +1,306 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _BPF_CGROUP_H +#define _BPF_CGROUP_H + +#include +#include +#include +#include +#include + +struct sock; +struct sockaddr; +struct cgroup; +struct sk_buff; +struct bpf_map; +struct bpf_prog; +struct bpf_sock_ops_kern; +struct bpf_cgroup_storage; + +#ifdef CONFIG_CGROUP_BPF + +extern struct static_key_false cgroup_bpf_enabled_key; +#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key) + +DECLARE_PER_CPU(void*, bpf_cgroup_storage); + +struct bpf_cgroup_storage_map; + +struct bpf_storage_buffer { + struct rcu_head rcu; + char data[0]; +}; + +struct bpf_cgroup_storage { + struct bpf_storage_buffer *buf; + struct bpf_cgroup_storage_map *map; + struct bpf_cgroup_storage_key key; + struct list_head list; + struct rb_node node; + struct rcu_head rcu; +}; + +struct bpf_prog_list { + struct list_head node; + struct bpf_prog *prog; + struct bpf_cgroup_storage *storage; +}; + +struct bpf_prog_array; + +struct cgroup_bpf { + /* array of effective progs in this cgroup */ + struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE]; + + /* attached progs to this cgroup and attach flags + * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will + * have either zero or one element + * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS + */ + struct list_head progs[MAX_BPF_ATTACH_TYPE]; + u32 flags[MAX_BPF_ATTACH_TYPE]; + + /* temp storage for effective prog array used by prog_attach/detach */ + struct bpf_prog_array __rcu *inactive; +}; + +void cgroup_bpf_put(struct cgroup *cgrp); +int cgroup_bpf_inherit(struct cgroup *cgrp); + +int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, + enum bpf_attach_type type, u32 flags); +int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, + enum bpf_attach_type type, u32 flags); +int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, + union bpf_attr __user *uattr); + +/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */ +int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, + enum bpf_attach_type type, u32 flags); +int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, + enum bpf_attach_type type, u32 flags); +int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, + union bpf_attr __user *uattr); + +int __cgroup_bpf_run_filter_skb(struct sock *sk, + struct sk_buff *skb, + enum bpf_attach_type type); + +int __cgroup_bpf_run_filter_sk(struct sock *sk, + enum bpf_attach_type type); + +int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, + struct sockaddr *uaddr, + enum bpf_attach_type type, + void *t_ctx); + +int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, + struct bpf_sock_ops_kern *sock_ops, + enum bpf_attach_type type); + +int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, + short access, enum bpf_attach_type type); + +static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage) +{ + struct bpf_storage_buffer *buf; + + if (!storage) + return; + + buf = READ_ONCE(storage->buf); + this_cpu_write(bpf_cgroup_storage, &buf->data[0]); +} + +struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog); +void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage); +void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, + struct cgroup *cgroup, + enum bpf_attach_type type); +void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage); +int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map); +void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map); + +/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */ +#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled) \ + __ret = __cgroup_bpf_run_filter_skb(sk, skb, \ + BPF_CGROUP_INET_INGRESS); \ + \ + __ret; \ +}) + +#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled && sk && sk == skb->sk) { \ + typeof(sk) __sk = sk_to_full_sk(sk); \ + if (sk_fullsock(__sk)) \ + __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \ + BPF_CGROUP_INET_EGRESS); \ + } \ + __ret; \ +}) + +#define BPF_CGROUP_RUN_SK_PROG(sk, type) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled) { \ + __ret = __cgroup_bpf_run_filter_sk(sk, type); \ + } \ + __ret; \ +}) + +#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \ + BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE) + +#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \ + BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND) + +#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \ + BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND) + +#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled) \ + __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \ + NULL); \ + __ret; \ +}) + +#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled) { \ + lock_sock(sk); \ + __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \ + t_ctx); \ + release_sock(sk); \ + } \ + __ret; \ +}) + +#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \ + BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND) + +#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \ + BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND) + +#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \ + sk->sk_prot->pre_connect) + +#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \ + BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT) + +#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \ + BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT) + +#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \ + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL) + +#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \ + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL) + +#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \ + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx) + +#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \ + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx) + +#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \ + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL) + +#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \ + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL) + +#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled && (sock_ops)->sk) { \ + typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \ + if (__sk && sk_fullsock(__sk)) \ + __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \ + sock_ops, \ + BPF_CGROUP_SOCK_OPS); \ + } \ + __ret; \ +}) + +#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled) \ + __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \ + access, \ + BPF_CGROUP_DEVICE); \ + \ + __ret; \ +}) +int cgroup_bpf_prog_attach(const union bpf_attr *attr, + enum bpf_prog_type ptype, struct bpf_prog *prog); +int cgroup_bpf_prog_detach(const union bpf_attr *attr, + enum bpf_prog_type ptype); +int cgroup_bpf_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr); +#else + +struct bpf_prog; +struct cgroup_bpf {}; +static inline void cgroup_bpf_put(struct cgroup *cgrp) {} +static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; } + +static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr, + enum bpf_prog_type ptype, + struct bpf_prog *prog) +{ + return -EINVAL; +} + +static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr, + enum bpf_prog_type ptype) +{ + return -EINVAL; +} + +static inline int cgroup_bpf_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr) +{ + return -EINVAL; +} + +static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage) {} +static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog, + struct bpf_map *map) { return 0; } +static inline void bpf_cgroup_storage_release(struct bpf_prog *prog, + struct bpf_map *map) {} +static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( + struct bpf_prog *prog) { return 0; } +static inline void bpf_cgroup_storage_free( + struct bpf_cgroup_storage *storage) {} + +#define cgroup_bpf_enabled (0) +#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) +#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; }) + +#endif /* CONFIG_CGROUP_BPF */ + +#endif /* _BPF_CGROUP_H */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h new file mode 100644 index 000000000..766ea96bf --- /dev/null +++ b/include/linux/bpf.h @@ -0,0 +1,845 @@ +/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#ifndef _LINUX_BPF_H +#define _LINUX_BPF_H 1 + +#include + +#include +#include +#include +#include +#include +#include +#include + +struct bpf_verifier_env; +struct perf_event; +struct bpf_prog; +struct bpf_map; +struct sock; +struct seq_file; +struct btf_type; + +/* map is generic key/value storage optionally accesible by eBPF programs */ +struct bpf_map_ops { + /* funcs callable from userspace (via syscall) */ + int (*map_alloc_check)(union bpf_attr *attr); + struct bpf_map *(*map_alloc)(union bpf_attr *attr); + void (*map_release)(struct bpf_map *map, struct file *map_file); + void (*map_free)(struct bpf_map *map); + int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); + void (*map_release_uref)(struct bpf_map *map); + void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); + + /* funcs callable from userspace and from eBPF programs */ + void *(*map_lookup_elem)(struct bpf_map *map, void *key); + int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); + int (*map_delete_elem)(struct bpf_map *map, void *key); + + /* funcs called by prog_array and perf_event_array map */ + void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, + int fd); + void (*map_fd_put_ptr)(void *ptr); + u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); + u32 (*map_fd_sys_lookup_elem)(void *ptr); + void (*map_seq_show_elem)(struct bpf_map *map, void *key, + struct seq_file *m); + int (*map_check_btf)(const struct bpf_map *map, + const struct btf_type *key_type, + const struct btf_type *value_type); +}; + +struct bpf_map { + /* The first two cachelines with read-mostly members of which some + * are also accessed in fast-path (e.g. ops, max_entries). + */ + const struct bpf_map_ops *ops ____cacheline_aligned; + struct bpf_map *inner_map_meta; +#ifdef CONFIG_SECURITY + void *security; +#endif + enum bpf_map_type map_type; + u32 key_size; + u32 value_size; + u32 max_entries; + u32 map_flags; + u32 pages; + u32 id; + int numa_node; + u32 btf_key_type_id; + u32 btf_value_type_id; + struct btf *btf; + bool unpriv_array; + /* 55 bytes hole */ + + /* The 3rd and 4th cacheline with misc members to avoid false sharing + * particularly with refcounting. + */ + struct user_struct *user ____cacheline_aligned; + atomic_t refcnt; + atomic_t usercnt; + struct work_struct work; + char name[BPF_OBJ_NAME_LEN]; +}; + +struct bpf_offload_dev; +struct bpf_offloaded_map; + +struct bpf_map_dev_ops { + int (*map_get_next_key)(struct bpf_offloaded_map *map, + void *key, void *next_key); + int (*map_lookup_elem)(struct bpf_offloaded_map *map, + void *key, void *value); + int (*map_update_elem)(struct bpf_offloaded_map *map, + void *key, void *value, u64 flags); + int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); +}; + +struct bpf_offloaded_map { + struct bpf_map map; + struct net_device *netdev; + const struct bpf_map_dev_ops *dev_ops; + void *dev_priv; + struct list_head offloads; +}; + +static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) +{ + return container_of(map, struct bpf_offloaded_map, map); +} + +static inline bool bpf_map_offload_neutral(const struct bpf_map *map) +{ + return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; +} + +static inline bool bpf_map_support_seq_show(const struct bpf_map *map) +{ + return map->btf && map->ops->map_seq_show_elem; +} + +int map_check_no_btf(const struct bpf_map *map, + const struct btf_type *key_type, + const struct btf_type *value_type); + +extern const struct bpf_map_ops bpf_map_offload_ops; + +/* function argument constraints */ +enum bpf_arg_type { + ARG_DONTCARE = 0, /* unused argument in helper function */ + + /* the following constraints used to prototype + * bpf_map_lookup/update/delete_elem() functions + */ + ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ + ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ + ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ + + /* the following constraints used to prototype bpf_memcmp() and other + * functions that access data on eBPF program stack + */ + ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ + ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */ + ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized, + * helper function must fill all bytes or clear + * them in error case. + */ + + ARG_CONST_SIZE, /* number of bytes accessed from memory */ + ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ + + ARG_PTR_TO_CTX, /* pointer to context */ + ARG_ANYTHING, /* any (initialized) argument is ok */ +}; + +/* type of values returned from helper functions */ +enum bpf_return_type { + RET_INTEGER, /* function returns integer */ + RET_VOID, /* function doesn't return anything */ + RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ + RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ +}; + +/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs + * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL + * instructions after verifying + */ +struct bpf_func_proto { + u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); + bool gpl_only; + bool pkt_access; + enum bpf_return_type ret_type; + enum bpf_arg_type arg1_type; + enum bpf_arg_type arg2_type; + enum bpf_arg_type arg3_type; + enum bpf_arg_type arg4_type; + enum bpf_arg_type arg5_type; +}; + +/* bpf_context is intentionally undefined structure. Pointer to bpf_context is + * the first argument to eBPF programs. + * For socket filters: 'struct bpf_context *' == 'struct sk_buff *' + */ +struct bpf_context; + +enum bpf_access_type { + BPF_READ = 1, + BPF_WRITE = 2 +}; + +/* types of values stored in eBPF registers */ +/* Pointer types represent: + * pointer + * pointer + imm + * pointer + (u16) var + * pointer + (u16) var + imm + * if (range > 0) then [ptr, ptr + range - off) is safe to access + * if (id > 0) means that some 'var' was added + * if (off > 0) means that 'imm' was added + */ +enum bpf_reg_type { + NOT_INIT = 0, /* nothing was written into register */ + SCALAR_VALUE, /* reg doesn't contain a valid pointer */ + PTR_TO_CTX, /* reg points to bpf_context */ + CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ + PTR_TO_MAP_VALUE, /* reg points to map element value */ + PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ + PTR_TO_STACK, /* reg == frame_pointer + offset */ + PTR_TO_PACKET_META, /* skb->data - meta_len */ + PTR_TO_PACKET, /* reg points to skb->data */ + PTR_TO_PACKET_END, /* skb->data + headlen */ +}; + +/* The information passed from prog-specific *_is_valid_access + * back to the verifier. + */ +struct bpf_insn_access_aux { + enum bpf_reg_type reg_type; + int ctx_field_size; +}; + +static inline void +bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) +{ + aux->ctx_field_size = size; +} + +struct bpf_prog_ops { + int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, + union bpf_attr __user *uattr); +}; + +struct bpf_verifier_ops { + /* return eBPF function prototype for verification */ + const struct bpf_func_proto * + (*get_func_proto)(enum bpf_func_id func_id, + const struct bpf_prog *prog); + + /* return true if 'size' wide access at offset 'off' within bpf_context + * with 'type' (read or write) is allowed + */ + bool (*is_valid_access)(int off, int size, enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info); + int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, + const struct bpf_prog *prog); + int (*gen_ld_abs)(const struct bpf_insn *orig, + struct bpf_insn *insn_buf); + u32 (*convert_ctx_access)(enum bpf_access_type type, + const struct bpf_insn *src, + struct bpf_insn *dst, + struct bpf_prog *prog, u32 *target_size); +}; + +struct bpf_prog_offload_ops { + int (*insn_hook)(struct bpf_verifier_env *env, + int insn_idx, int prev_insn_idx); +}; + +struct bpf_prog_offload { + struct bpf_prog *prog; + struct net_device *netdev; + void *dev_priv; + struct list_head offloads; + bool dev_state; + const struct bpf_prog_offload_ops *dev_ops; + void *jited_image; + u32 jited_len; +}; + +struct bpf_prog_aux { + atomic_t refcnt; + u32 used_map_cnt; + u32 max_ctx_offset; + u32 stack_depth; + u32 id; + u32 func_cnt; + bool offload_requested; + struct bpf_prog **func; + void *jit_data; /* JIT specific data. arch dependent */ + struct latch_tree_node ksym_tnode; + struct list_head ksym_lnode; + const struct bpf_prog_ops *ops; + struct bpf_map **used_maps; + struct bpf_prog *prog; + struct user_struct *user; + u64 load_time; /* ns since boottime */ + struct bpf_map *cgroup_storage; + char name[BPF_OBJ_NAME_LEN]; +#ifdef CONFIG_SECURITY + void *security; +#endif + struct bpf_prog_offload *offload; + union { + struct work_struct work; + struct rcu_head rcu; + }; +}; + +struct bpf_array { + struct bpf_map map; + u32 elem_size; + u32 index_mask; + /* 'ownership' of prog_array is claimed by the first program that + * is going to use this map or by the first program which FD is stored + * in the map to make sure that all callers and callees have the same + * prog_type and JITed flag + */ + enum bpf_prog_type owner_prog_type; + bool owner_jited; + union { + char value[0] __aligned(8); + void *ptrs[0] __aligned(8); + void __percpu *pptrs[0] __aligned(8); + }; +}; + +#define MAX_TAIL_CALL_CNT 32 + +struct bpf_event_entry { + struct perf_event *event; + struct file *perf_file; + struct file *map_file; + struct rcu_head rcu; +}; + +bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); +int bpf_prog_calc_tag(struct bpf_prog *fp); + +const struct bpf_func_proto *bpf_get_trace_printk_proto(void); + +typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, + unsigned long off, unsigned long len); + +u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, + void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); + +int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, + union bpf_attr __user *uattr); +int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, + union bpf_attr __user *uattr); + +/* an array of programs to be executed under rcu_lock. + * + * Typical usage: + * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN); + * + * the structure returned by bpf_prog_array_alloc() should be populated + * with program pointers and the last pointer must be NULL. + * The user has to keep refcnt on the program and make sure the program + * is removed from the array before bpf_prog_put(). + * The 'struct bpf_prog_array *' should only be replaced with xchg() + * since other cpus are walking the array of pointers in parallel. + */ +struct bpf_prog_array_item { + struct bpf_prog *prog; + struct bpf_cgroup_storage *cgroup_storage; +}; + +struct bpf_prog_array { + struct rcu_head rcu; + struct bpf_prog_array_item items[0]; +}; + +struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); +void bpf_prog_array_free(struct bpf_prog_array __rcu *progs); +int bpf_prog_array_length(struct bpf_prog_array __rcu *progs); +int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, + __u32 __user *prog_ids, u32 cnt); + +void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs, + struct bpf_prog *old_prog); +int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array, + u32 *prog_ids, u32 request_cnt, + u32 *prog_cnt); +int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, + struct bpf_prog *exclude_prog, + struct bpf_prog *include_prog, + struct bpf_prog_array **new_array); + +#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null, set_cg_storage) \ + ({ \ + struct bpf_prog_array_item *_item; \ + struct bpf_prog *_prog; \ + struct bpf_prog_array *_array; \ + u32 _ret = 1; \ + preempt_disable(); \ + rcu_read_lock(); \ + _array = rcu_dereference(array); \ + if (unlikely(check_non_null && !_array))\ + goto _out; \ + _item = &_array->items[0]; \ + while ((_prog = READ_ONCE(_item->prog))) { \ + if (set_cg_storage) \ + bpf_cgroup_storage_set(_item->cgroup_storage); \ + _ret &= func(_prog, ctx); \ + _item++; \ + } \ +_out: \ + rcu_read_unlock(); \ + preempt_enable(); \ + _ret; \ + }) + +#define BPF_PROG_RUN_ARRAY(array, ctx, func) \ + __BPF_PROG_RUN_ARRAY(array, ctx, func, false, true) + +#define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \ + __BPF_PROG_RUN_ARRAY(array, ctx, func, true, false) + +#ifdef CONFIG_BPF_SYSCALL +DECLARE_PER_CPU(int, bpf_prog_active); + +extern const struct file_operations bpf_map_fops; +extern const struct file_operations bpf_prog_fops; + +#define BPF_PROG_TYPE(_id, _name) \ + extern const struct bpf_prog_ops _name ## _prog_ops; \ + extern const struct bpf_verifier_ops _name ## _verifier_ops; +#define BPF_MAP_TYPE(_id, _ops) \ + extern const struct bpf_map_ops _ops; +#include +#undef BPF_PROG_TYPE +#undef BPF_MAP_TYPE + +extern const struct bpf_prog_ops bpf_offload_prog_ops; +extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; +extern const struct bpf_verifier_ops xdp_analyzer_ops; + +struct bpf_prog *bpf_prog_get(u32 ufd); +struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, + bool attach_drv); +struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i); +void bpf_prog_sub(struct bpf_prog *prog, int i); +struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog); +struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); +void bpf_prog_put(struct bpf_prog *prog); +int __bpf_prog_charge(struct user_struct *user, u32 pages); +void __bpf_prog_uncharge(struct user_struct *user, u32 pages); + +void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); +void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); + +struct bpf_map *bpf_map_get_with_uref(u32 ufd); +struct bpf_map *__bpf_map_get(struct fd f); +struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); +void bpf_map_put_with_uref(struct bpf_map *map); +void bpf_map_put(struct bpf_map *map); +int bpf_map_precharge_memlock(u32 pages); +int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); +void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); +void *bpf_map_area_alloc(size_t size, int numa_node); +void bpf_map_area_free(void *base); +void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); + +extern int sysctl_unprivileged_bpf_disabled; + +int bpf_map_new_fd(struct bpf_map *map, int flags); +int bpf_prog_new_fd(struct bpf_prog *prog); + +int bpf_obj_pin_user(u32 ufd, const char __user *pathname); +int bpf_obj_get_user(const char __user *pathname, int flags); + +int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); +int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); +int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, + u64 flags); +int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, + u64 flags); + +int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); + +int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, + void *key, void *value, u64 map_flags); +int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); +int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, + void *key, void *value, u64 map_flags); +int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); + +int bpf_get_file_flag(int flags); +int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size, + size_t actual_size); + +/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and + * forced to use 'long' read/writes to try to atomically copy long counters. + * Best-effort only. No barriers here, since it _will_ race with concurrent + * updates from BPF programs. Called from bpf syscall and mostly used with + * size 8 or 16 bytes, so ask compiler to inline it. + */ +static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) +{ + const long *lsrc = src; + long *ldst = dst; + + size /= sizeof(long); + while (size--) + *ldst++ = *lsrc++; +} + +/* verify correctness of eBPF program */ +int bpf_check(struct bpf_prog **fp, union bpf_attr *attr); +void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); + +/* Map specifics */ +struct xdp_buff; +struct sk_buff; + +struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key); +void __dev_map_insert_ctx(struct bpf_map *map, u32 index); +void __dev_map_flush(struct bpf_map *map); +int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, + struct net_device *dev_rx); +int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, + struct bpf_prog *xdp_prog); + +struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key); +void __cpu_map_insert_ctx(struct bpf_map *map, u32 index); +void __cpu_map_flush(struct bpf_map *map); +int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, + struct net_device *dev_rx); + +/* Return map's numa specified by userspace */ +static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) +{ + return (attr->map_flags & BPF_F_NUMA_NODE) ? + attr->numa_node : NUMA_NO_NODE; +} + +struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); +int array_map_alloc_check(union bpf_attr *attr); + +static inline bool unprivileged_ebpf_enabled(void) +{ + return !sysctl_unprivileged_bpf_disabled; +} + +#else /* !CONFIG_BPF_SYSCALL */ +static inline struct bpf_prog *bpf_prog_get(u32 ufd) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, + enum bpf_prog_type type, + bool attach_drv) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, + int i) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline void bpf_prog_sub(struct bpf_prog *prog, int i) +{ +} + +static inline void bpf_prog_put(struct bpf_prog *prog) +{ +} + +static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline struct bpf_prog *__must_check +bpf_prog_inc_not_zero(struct bpf_prog *prog) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline int __bpf_prog_charge(struct user_struct *user, u32 pages) +{ + return 0; +} + +static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages) +{ +} + +static inline int bpf_obj_get_user(const char __user *pathname, int flags) +{ + return -EOPNOTSUPP; +} + +static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map, + u32 key) +{ + return NULL; +} + +static inline void __dev_map_insert_ctx(struct bpf_map *map, u32 index) +{ +} + +static inline void __dev_map_flush(struct bpf_map *map) +{ +} + +struct xdp_buff; +struct bpf_dtab_netdev; + +static inline +int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, + struct net_device *dev_rx) +{ + return 0; +} + +struct sk_buff; + +static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, + struct sk_buff *skb, + struct bpf_prog *xdp_prog) +{ + return 0; +} + +static inline +struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) +{ + return NULL; +} + +static inline void __cpu_map_insert_ctx(struct bpf_map *map, u32 index) +{ +} + +static inline void __cpu_map_flush(struct bpf_map *map) +{ +} + +static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, + struct xdp_buff *xdp, + struct net_device *dev_rx) +{ + return 0; +} + +static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, + enum bpf_prog_type type) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline bool unprivileged_ebpf_enabled(void) +{ + return false; +} + +#endif /* CONFIG_BPF_SYSCALL */ + +static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, + enum bpf_prog_type type) +{ + return bpf_prog_get_type_dev(ufd, type, false); +} + +bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); + +int bpf_prog_offload_compile(struct bpf_prog *prog); +void bpf_prog_offload_destroy(struct bpf_prog *prog); +int bpf_prog_offload_info_fill(struct bpf_prog_info *info, + struct bpf_prog *prog); + +int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); + +int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); +int bpf_map_offload_update_elem(struct bpf_map *map, + void *key, void *value, u64 flags); +int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); +int bpf_map_offload_get_next_key(struct bpf_map *map, + void *key, void *next_key); + +bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); + +struct bpf_offload_dev *bpf_offload_dev_create(void); +void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); +int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, + struct net_device *netdev); +void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, + struct net_device *netdev); +bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev); + +void unpriv_ebpf_notify(int new_state); + +#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) +int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); + +static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) +{ + return aux->offload_requested; +} + +static inline bool bpf_map_is_dev_bound(struct bpf_map *map) +{ + return unlikely(map->ops == &bpf_map_offload_ops); +} + +struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); +void bpf_map_offload_map_free(struct bpf_map *map); +#else +static inline int bpf_prog_offload_init(struct bpf_prog *prog, + union bpf_attr *attr) +{ + return -EOPNOTSUPP; +} + +static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) +{ + return false; +} + +static inline bool bpf_map_is_dev_bound(struct bpf_map *map) +{ + return false; +} + +static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline void bpf_map_offload_map_free(struct bpf_map *map) +{ +} +#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ + +#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_INET) +struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key); +struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key); +int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type); +int sockmap_get_from_fd(const union bpf_attr *attr, int type, + struct bpf_prog *prog); +#else +static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key) +{ + return NULL; +} + +static inline struct sock *__sock_hash_lookup_elem(struct bpf_map *map, + void *key) +{ + return NULL; +} + +static inline int sock_map_prog(struct bpf_map *map, + struct bpf_prog *prog, + u32 type) +{ + return -EOPNOTSUPP; +} + +static inline int sockmap_get_from_fd(const union bpf_attr *attr, int type, + struct bpf_prog *prog) +{ + return -EINVAL; +} +#endif + +#if defined(CONFIG_XDP_SOCKETS) +struct xdp_sock; +struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key); +int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, + struct xdp_sock *xs); +void __xsk_map_flush(struct bpf_map *map); +#else +struct xdp_sock; +static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, + u32 key) +{ + return NULL; +} + +static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, + struct xdp_sock *xs) +{ + return -EOPNOTSUPP; +} + +static inline void __xsk_map_flush(struct bpf_map *map) +{ +} +#endif + +#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) +void bpf_sk_reuseport_detach(struct sock *sk); +int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, + void *value); +int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, + void *value, u64 map_flags); +#else +static inline void bpf_sk_reuseport_detach(struct sock *sk) +{ +} + +#ifdef CONFIG_BPF_SYSCALL +static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, + void *key, void *value) +{ + return -EOPNOTSUPP; +} + +static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, + void *key, void *value, + u64 map_flags) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_BPF_SYSCALL */ +#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */ + +/* verifier prototypes for helper functions called from eBPF programs */ +extern const struct bpf_func_proto bpf_map_lookup_elem_proto; +extern const struct bpf_func_proto bpf_map_update_elem_proto; +extern const struct bpf_func_proto bpf_map_delete_elem_proto; + +extern const struct bpf_func_proto bpf_get_prandom_u32_proto; +extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; +extern const struct bpf_func_proto bpf_get_numa_node_id_proto; +extern const struct bpf_func_proto bpf_tail_call_proto; +extern const struct bpf_func_proto bpf_ktime_get_ns_proto; +extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; +extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; +extern const struct bpf_func_proto bpf_get_current_comm_proto; +extern const struct bpf_func_proto bpf_get_stackid_proto; +extern const struct bpf_func_proto bpf_get_stack_proto; +extern const struct bpf_func_proto bpf_sock_map_update_proto; +extern const struct bpf_func_proto bpf_sock_hash_update_proto; +extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; + +extern const struct bpf_func_proto bpf_get_local_storage_proto; + +/* Shared helpers among cBPF and eBPF. */ +void bpf_user_rnd_init_once(void); +u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); + +#endif /* _LINUX_BPF_H */ diff --git a/include/linux/bpf_lirc.h b/include/linux/bpf_lirc.h new file mode 100644 index 000000000..9d9ff755e --- /dev/null +++ b/include/linux/bpf_lirc.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _BPF_LIRC_H +#define _BPF_LIRC_H + +#include + +#ifdef CONFIG_BPF_LIRC_MODE2 +int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog); +int lirc_prog_detach(const union bpf_attr *attr); +int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr); +#else +static inline int lirc_prog_attach(const union bpf_attr *attr, + struct bpf_prog *prog) +{ + return -EINVAL; +} + +static inline int lirc_prog_detach(const union bpf_attr *attr) +{ + return -EINVAL; +} + +static inline int lirc_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr) +{ + return -EINVAL; +} +#endif + +#endif /* _BPF_LIRC_H */ diff --git a/include/linux/bpf_trace.h b/include/linux/bpf_trace.h new file mode 100644 index 000000000..ddf896abc --- /dev/null +++ b/include/linux/bpf_trace.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_BPF_TRACE_H__ +#define __LINUX_BPF_TRACE_H__ + +#include + +#endif /* __LINUX_BPF_TRACE_H__ */ diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h new file mode 100644 index 000000000..cd26c090e --- /dev/null +++ b/include/linux/bpf_types.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* internal file - do not include directly */ + +#ifdef CONFIG_NET +BPF_PROG_TYPE(BPF_PROG_TYPE_SOCKET_FILTER, sk_filter) +BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act) +BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act) +BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp) +BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb) +BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock) +BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, cg_sock_addr) +BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_in) +BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_out) +BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit) +BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_SEG6LOCAL, lwt_seg6local) +BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops) +BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb) +BPF_PROG_TYPE(BPF_PROG_TYPE_SK_MSG, sk_msg) +#endif +#ifdef CONFIG_BPF_EVENTS +BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe) +BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint) +BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event) +BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT, raw_tracepoint) +#endif +#ifdef CONFIG_CGROUP_BPF +BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev) +#endif +#ifdef CONFIG_BPF_LIRC_MODE2 +BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2) +#endif +#ifdef CONFIG_INET +BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport) +#endif + +BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_PROG_ARRAY, prog_array_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, perf_event_array_map_ops) +#ifdef CONFIG_CGROUPS +BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, cgroup_array_map_ops) +#endif +#ifdef CONFIG_CGROUP_BPF +BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, cgroup_storage_map_ops) +#endif +BPF_MAP_TYPE(BPF_MAP_TYPE_HASH, htab_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_HASH, htab_percpu_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_HASH, htab_lru_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, htab_lru_percpu_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_LPM_TRIE, trie_map_ops) +#ifdef CONFIG_PERF_EVENTS +BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_map_ops) +#endif +BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops) +#ifdef CONFIG_NET +BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops) +#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_INET) +BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKHASH, sock_hash_ops) +#endif +BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops) +#if defined(CONFIG_XDP_SOCKETS) +BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops) +#endif +#ifdef CONFIG_INET +BPF_MAP_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, reuseport_array_ops) +#endif +#endif diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h new file mode 100644 index 000000000..4acd06cca --- /dev/null +++ b/include/linux/bpf_verifier.h @@ -0,0 +1,244 @@ +/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#ifndef _LINUX_BPF_VERIFIER_H +#define _LINUX_BPF_VERIFIER_H 1 + +#include /* for enum bpf_reg_type */ +#include /* for MAX_BPF_STACK */ +#include + +/* Maximum variable offset umax_value permitted when resolving memory accesses. + * In practice this is far bigger than any realistic pointer offset; this limit + * ensures that umax_value + (int)off + (int)size cannot overflow a u64. + */ +#define BPF_MAX_VAR_OFF (1 << 29) +/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures + * that converting umax_value to int cannot overflow. + */ +#define BPF_MAX_VAR_SIZ (1 << 29) + +/* Liveness marks, used for registers and spilled-regs (in stack slots). + * Read marks propagate upwards until they find a write mark; they record that + * "one of this state's descendants read this reg" (and therefore the reg is + * relevant for states_equal() checks). + * Write marks collect downwards and do not propagate; they record that "the + * straight-line code that reached this state (from its parent) wrote this reg" + * (and therefore that reads propagated from this state or its descendants + * should not propagate to its parent). + * A state with a write mark can receive read marks; it just won't propagate + * them to its parent, since the write mark is a property, not of the state, + * but of the link between it and its parent. See mark_reg_read() and + * mark_stack_slot_read() in kernel/bpf/verifier.c. + */ +enum bpf_reg_liveness { + REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */ + REG_LIVE_READ, /* reg was read, so we're sensitive to initial value */ + REG_LIVE_WRITTEN, /* reg was written first, screening off later reads */ +}; + +struct bpf_reg_state { + /* Ordering of fields matters. See states_equal() */ + enum bpf_reg_type type; + union { + /* valid when type == PTR_TO_PACKET */ + u16 range; + + /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | + * PTR_TO_MAP_VALUE_OR_NULL + */ + struct bpf_map *map_ptr; + + /* Max size from any of the above. */ + unsigned long raw; + }; + /* Fixed part of pointer offset, pointer types only */ + s32 off; + /* For PTR_TO_PACKET, used to find other pointers with the same variable + * offset, so they can share range knowledge. + * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we + * came from, when one is tested for != NULL. + */ + u32 id; + /* For scalar types (SCALAR_VALUE), this represents our knowledge of + * the actual value. + * For pointer types, this represents the variable part of the offset + * from the pointed-to object, and is shared with all bpf_reg_states + * with the same id as us. + */ + struct tnum var_off; + /* Used to determine if any memory access using this register will + * result in a bad access. + * These refer to the same value as var_off, not necessarily the actual + * contents of the register. + */ + s64 smin_value; /* minimum possible (s64)value */ + s64 smax_value; /* maximum possible (s64)value */ + u64 umin_value; /* minimum possible (u64)value */ + u64 umax_value; /* maximum possible (u64)value */ + /* parentage chain for liveness checking */ + struct bpf_reg_state *parent; + /* Inside the callee two registers can be both PTR_TO_STACK like + * R1=fp-8 and R2=fp-8, but one of them points to this function stack + * while another to the caller's stack. To differentiate them 'frameno' + * is used which is an index in bpf_verifier_state->frame[] array + * pointing to bpf_func_state. + */ + u32 frameno; + enum bpf_reg_liveness live; +}; + +enum bpf_stack_slot_type { + STACK_INVALID, /* nothing was stored in this stack slot */ + STACK_SPILL, /* register spilled into stack */ + STACK_MISC, /* BPF program wrote some data into this slot */ + STACK_ZERO, /* BPF program wrote constant zero */ +}; + +#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ + +struct bpf_stack_state { + struct bpf_reg_state spilled_ptr; + u8 slot_type[BPF_REG_SIZE]; +}; + +/* state of the program: + * type of all registers and stack info + */ +struct bpf_func_state { + struct bpf_reg_state regs[MAX_BPF_REG]; + /* index of call instruction that called into this func */ + int callsite; + /* stack frame number of this function state from pov of + * enclosing bpf_verifier_state. + * 0 = main function, 1 = first callee. + */ + u32 frameno; + /* subprog number == index within subprog_stack_depth + * zero == main subprog + */ + u32 subprogno; + + /* should be second to last. See copy_func_state() */ + int allocated_stack; + struct bpf_stack_state *stack; +}; + +struct bpf_id_pair { + u32 old; + u32 cur; +}; + +/* Maximum number of register states that can exist at once */ +#define BPF_ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) +#define MAX_CALL_FRAMES 8 +struct bpf_verifier_state { + /* call stack tracking */ + struct bpf_func_state *frame[MAX_CALL_FRAMES]; + u32 curframe; + bool speculative; +}; + +/* linked list of verifier states used to prune search */ +struct bpf_verifier_state_list { + struct bpf_verifier_state state; + struct bpf_verifier_state_list *next; +}; + +/* Possible states for alu_state member. */ +#define BPF_ALU_SANITIZE_SRC (1U << 0) +#define BPF_ALU_SANITIZE_DST (1U << 1) +#define BPF_ALU_NEG_VALUE (1U << 2) +#define BPF_ALU_NON_POINTER (1U << 3) +#define BPF_ALU_IMMEDIATE (1U << 4) +#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ + BPF_ALU_SANITIZE_DST) + +struct bpf_insn_aux_data { + union { + enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ + unsigned long map_state; /* pointer/poison value for maps */ + s32 call_imm; /* saved imm field of call insn */ + u32 alu_limit; /* limit for add/sub register with pointer */ + }; + int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ + bool seen; /* this insn was processed by the verifier */ + bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */ + u8 alu_state; /* used in combination with alu_limit */ +}; + +#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ + +#define BPF_VERIFIER_TMP_LOG_SIZE 1024 + +struct bpf_verifier_log { + u32 level; + char kbuf[BPF_VERIFIER_TMP_LOG_SIZE]; + char __user *ubuf; + u32 len_used; + u32 len_total; +}; + +static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log) +{ + return log->len_used >= log->len_total - 1; +} + +static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log) +{ + return log->level && log->ubuf && !bpf_verifier_log_full(log); +} + +#define BPF_MAX_SUBPROGS 256 + +struct bpf_subprog_info { + u32 start; /* insn idx of function entry point */ + u16 stack_depth; /* max. stack depth used by this function */ +}; + +/* single container for all structs + * one verifier_env per bpf_check() call + */ +struct bpf_verifier_env { + u32 insn_idx; + u32 prev_insn_idx; + struct bpf_prog *prog; /* eBPF program being verified */ + const struct bpf_verifier_ops *ops; + struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ + int stack_size; /* number of states to be processed */ + bool strict_alignment; /* perform strict pointer alignment checks */ + struct bpf_verifier_state *cur_state; /* current verifier state */ + struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ + struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ + u32 used_map_cnt; /* number of used maps */ + u32 id_gen; /* used to generate unique reg IDs */ + bool explore_alu_limits; + bool allow_ptr_leaks; + bool seen_direct_write; + struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ + struct bpf_verifier_log log; + struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1]; + struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE]; + u32 subprog_cnt; +}; + +__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, + const char *fmt, va_list args); +__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, + const char *fmt, ...); + +static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) +{ + struct bpf_verifier_state *cur = env->cur_state; + + return cur->frame[cur->curframe]->regs; +} + +int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env); +int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, + int insn_idx, int prev_insn_idx); + +#endif /* _LINUX_BPF_VERIFIER_H */ diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h new file mode 100644 index 000000000..f02cee022 --- /dev/null +++ b/include/linux/bpfilter.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BPFILTER_H +#define _LINUX_BPFILTER_H + +#include + +struct sock; +int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, + unsigned int optlen); +int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, + int __user *optlen); +extern int (*bpfilter_process_sockopt)(struct sock *sk, int optname, + char __user *optval, + unsigned int optlen, bool is_set); +#endif diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h new file mode 100644 index 000000000..949e9af8d --- /dev/null +++ b/include/linux/brcmphy.h @@ -0,0 +1,267 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BRCMPHY_H +#define _LINUX_BRCMPHY_H + +#include + +/* All Broadcom Ethernet switches have a pseudo-PHY at address 30 which is used + * to configure the switch internal registers via MDIO accesses. + */ +#define BRCM_PSEUDO_PHY_ADDR 30 + +#define PHY_ID_BCM50610 0x0143bd60 +#define PHY_ID_BCM50610M 0x0143bd70 +#define PHY_ID_BCM5241 0x0143bc30 +#define PHY_ID_BCMAC131 0x0143bc70 +#define PHY_ID_BCM5481 0x0143bca0 +#define PHY_ID_BCM5395 0x0143bcf0 +#define PHY_ID_BCM54810 0x03625d00 +#define PHY_ID_BCM5482 0x0143bcb0 +#define PHY_ID_BCM5411 0x00206070 +#define PHY_ID_BCM5421 0x002060e0 +#define PHY_ID_BCM54210E 0x600d84a0 +#define PHY_ID_BCM5464 0x002060b0 +#define PHY_ID_BCM5461 0x002060c0 +#define PHY_ID_BCM54612E 0x03625e60 +#define PHY_ID_BCM54616S 0x03625d10 +#define PHY_ID_BCM57780 0x03625d90 +#define PHY_ID_BCM89610 0x03625cd0 + +#define PHY_ID_BCM7250 0xae025280 +#define PHY_ID_BCM7260 0xae025190 +#define PHY_ID_BCM7268 0xae025090 +#define PHY_ID_BCM7271 0xae0253b0 +#define PHY_ID_BCM7278 0xae0251a0 +#define PHY_ID_BCM7364 0xae025260 +#define PHY_ID_BCM7366 0x600d8490 +#define PHY_ID_BCM7346 0x600d8650 +#define PHY_ID_BCM7362 0x600d84b0 +#define PHY_ID_BCM7425 0x600d86b0 +#define PHY_ID_BCM7429 0x600d8730 +#define PHY_ID_BCM7435 0x600d8750 +#define PHY_ID_BCM74371 0xae0252e0 +#define PHY_ID_BCM7439 0x600d8480 +#define PHY_ID_BCM7439_2 0xae025080 +#define PHY_ID_BCM7445 0x600d8510 + +#define PHY_ID_BCM_CYGNUS 0xae025200 +#define PHY_ID_BCM_OMEGA 0xae025100 + +#define PHY_BCM_OUI_MASK 0xfffffc00 +#define PHY_BCM_OUI_1 0x00206000 +#define PHY_BCM_OUI_2 0x0143bc00 +#define PHY_BCM_OUI_3 0x03625c00 +#define PHY_BCM_OUI_4 0x600d8400 +#define PHY_BCM_OUI_5 0x03625e00 +#define PHY_BCM_OUI_6 0xae025000 + +#define PHY_BCM_FLAGS_MODE_COPPER 0x00000001 +#define PHY_BCM_FLAGS_MODE_1000BX 0x00000002 +#define PHY_BCM_FLAGS_INTF_SGMII 0x00000010 +#define PHY_BCM_FLAGS_INTF_XAUI 0x00000020 +#define PHY_BRCM_WIRESPEED_ENABLE 0x00000100 +#define PHY_BRCM_AUTO_PWRDWN_ENABLE 0x00000200 +#define PHY_BRCM_RX_REFCLK_UNUSED 0x00000400 +#define PHY_BRCM_STD_IBND_DISABLE 0x00000800 +#define PHY_BRCM_EXT_IBND_RX_ENABLE 0x00001000 +#define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00002000 +#define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000 +#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000 +#define PHY_BRCM_EN_MASTER_MODE 0x00010000 + +/* Broadcom BCM7xxx specific workarounds */ +#define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff) +#define PHY_BRCM_7XXX_PATCH(x) ((x) & 0xff) +#define PHY_BCM_FLAGS_VALID 0x80000000 + +/* Broadcom BCM54XX register definitions, common to most Broadcom PHYs */ +#define MII_BCM54XX_ECR 0x10 /* BCM54xx extended control register */ +#define MII_BCM54XX_ECR_IM 0x1000 /* Interrupt mask */ +#define MII_BCM54XX_ECR_IF 0x0800 /* Interrupt force */ + +#define MII_BCM54XX_ESR 0x11 /* BCM54xx extended status register */ +#define MII_BCM54XX_ESR_IS 0x1000 /* Interrupt status */ + +#define MII_BCM54XX_EXP_DATA 0x15 /* Expansion register data */ +#define MII_BCM54XX_EXP_SEL 0x17 /* Expansion register select */ +#define MII_BCM54XX_EXP_SEL_SSD 0x0e00 /* Secondary SerDes select */ +#define MII_BCM54XX_EXP_SEL_ER 0x0f00 /* Expansion register select */ +#define MII_BCM54XX_EXP_SEL_ETC 0x0d00 /* Expansion register spare + 2k mem */ + +#define MII_BCM54XX_AUX_CTL 0x18 /* Auxiliary control register */ +#define MII_BCM54XX_ISR 0x1a /* BCM54xx interrupt status register */ +#define MII_BCM54XX_IMR 0x1b /* BCM54xx interrupt mask register */ +#define MII_BCM54XX_INT_CRCERR 0x0001 /* CRC error */ +#define MII_BCM54XX_INT_LINK 0x0002 /* Link status changed */ +#define MII_BCM54XX_INT_SPEED 0x0004 /* Link speed change */ +#define MII_BCM54XX_INT_DUPLEX 0x0008 /* Duplex mode changed */ +#define MII_BCM54XX_INT_LRS 0x0010 /* Local receiver status changed */ +#define MII_BCM54XX_INT_RRS 0x0020 /* Remote receiver status changed */ +#define MII_BCM54XX_INT_SSERR 0x0040 /* Scrambler synchronization error */ +#define MII_BCM54XX_INT_UHCD 0x0080 /* Unsupported HCD negotiated */ +#define MII_BCM54XX_INT_NHCD 0x0100 /* No HCD */ +#define MII_BCM54XX_INT_NHCDL 0x0200 /* No HCD link */ +#define MII_BCM54XX_INT_ANPR 0x0400 /* Auto-negotiation page received */ +#define MII_BCM54XX_INT_LC 0x0800 /* All counters below 128 */ +#define MII_BCM54XX_INT_HC 0x1000 /* Counter above 32768 */ +#define MII_BCM54XX_INT_MDIX 0x2000 /* MDIX status change */ +#define MII_BCM54XX_INT_PSERR 0x4000 /* Pair swap error */ + +#define MII_BCM54XX_SHD 0x1c /* 0x1c shadow registers */ +#define MII_BCM54XX_SHD_WRITE 0x8000 +#define MII_BCM54XX_SHD_VAL(x) ((x & 0x1f) << 10) +#define MII_BCM54XX_SHD_DATA(x) ((x & 0x3ff) << 0) + +/* + * AUXILIARY CONTROL SHADOW ACCESS REGISTERS. (PHY REG 0x18) + */ +#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x00 +#define MII_BCM54XX_AUXCTL_ACTL_TX_6DB 0x0400 +#define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800 + +#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x07 +#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN 0x0010 +#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN 0x0100 +#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200 +#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000 + +#define MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT 12 +#define MII_BCM54XX_AUXCTL_SHDWSEL_MASK 0x0007 + +/* + * Broadcom LED source encodings. These are used in BCM5461, BCM5481, + * BCM5482, and possibly some others. + */ +#define BCM_LED_SRC_LINKSPD1 0x0 +#define BCM_LED_SRC_LINKSPD2 0x1 +#define BCM_LED_SRC_XMITLED 0x2 +#define BCM_LED_SRC_ACTIVITYLED 0x3 +#define BCM_LED_SRC_FDXLED 0x4 +#define BCM_LED_SRC_SLAVE 0x5 +#define BCM_LED_SRC_INTR 0x6 +#define BCM_LED_SRC_QUALITY 0x7 +#define BCM_LED_SRC_RCVLED 0x8 +#define BCM_LED_SRC_WIRESPEED 0x9 +#define BCM_LED_SRC_MULTICOLOR1 0xa +#define BCM_LED_SRC_OPENSHORT 0xb +#define BCM_LED_SRC_OFF 0xe /* Tied high */ +#define BCM_LED_SRC_ON 0xf /* Tied low */ + + +/* + * BCM5482: Shadow registers + * Shadow values go into bits [14:10] of register 0x1c to select a shadow + * register to access. + */ + +/* 00100: Reserved control register 2 */ +#define BCM54XX_SHD_SCR2 0x04 +#define BCM54XX_SHD_SCR2_WSPD_RTRY_DIS 0x100 +#define BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_SHIFT 2 +#define BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_OFFSET 2 +#define BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_MASK 0x7 + +/* 00101: Spare Control Register 3 */ +#define BCM54XX_SHD_SCR3 0x05 +#define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001 +#define BCM54XX_SHD_SCR3_DLLAPD_DIS 0x0002 +#define BCM54XX_SHD_SCR3_TRDDAPD 0x0004 + +/* 01010: Auto Power-Down */ +#define BCM54XX_SHD_APD 0x0a +#define BCM_APD_CLR_MASK 0xFE9F /* clear bits 5, 6 & 8 */ +#define BCM54XX_SHD_APD_EN 0x0020 +#define BCM_NO_ANEG_APD_EN 0x0060 /* bits 5 & 6 */ +#define BCM_APD_SINGLELP_EN 0x0100 /* Bit 8 */ + +#define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */ + /* LED3 / ~LINKSPD[2] selector */ +#define BCM5482_SHD_LEDS1_LED3(src) ((src & 0xf) << 4) + /* LED1 / ~LINKSPD[1] selector */ +#define BCM5482_SHD_LEDS1_LED1(src) ((src & 0xf) << 0) +#define BCM54XX_SHD_RGMII_MODE 0x0b /* 01011: RGMII Mode Selector */ +#define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */ +#define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */ +#define BCM5482_SHD_SSD_EN 0x0001 /* SSD enable */ +#define BCM5482_SHD_MODE 0x1f /* 11111: Mode Control Register */ +#define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */ + + +/* + * EXPANSION SHADOW ACCESS REGISTERS. (PHY REG 0x15, 0x16, and 0x17) + */ +#define MII_BCM54XX_EXP_AADJ1CH0 0x001f +#define MII_BCM54XX_EXP_AADJ1CH0_SWP_ABCD_OEN 0x0200 +#define MII_BCM54XX_EXP_AADJ1CH0_SWSEL_THPF 0x0100 +#define MII_BCM54XX_EXP_AADJ1CH3 0x601f +#define MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ 0x0002 +#define MII_BCM54XX_EXP_EXP08 0x0F08 +#define MII_BCM54XX_EXP_EXP08_RJCT_2MHZ 0x0001 +#define MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE 0x0200 +#define MII_BCM54XX_EXP_EXP75 0x0f75 +#define MII_BCM54XX_EXP_EXP75_VDACCTRL 0x003c +#define MII_BCM54XX_EXP_EXP75_CM_OSC 0x0001 +#define MII_BCM54XX_EXP_EXP96 0x0f96 +#define MII_BCM54XX_EXP_EXP96_MYST 0x0010 +#define MII_BCM54XX_EXP_EXP97 0x0f97 +#define MII_BCM54XX_EXP_EXP97_MYST 0x0c0c + +/* + * BCM5482: Secondary SerDes registers + */ +#define BCM5482_SSD_1000BX_CTL 0x00 /* 1000BASE-X Control */ +#define BCM5482_SSD_1000BX_CTL_PWRDOWN 0x0800 /* Power-down SSD */ +#define BCM5482_SSD_SGMII_SLAVE 0x15 /* SGMII Slave Register */ +#define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */ +#define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */ + +/* BCM54810 Registers */ +#define BCM54810_EXP_BROADREACH_LRE_MISC_CTL (MII_BCM54XX_EXP_SEL_ER + 0x90) +#define BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN (1 << 0) +#define BCM54810_SHD_CLK_CTL 0x3 +#define BCM54810_SHD_CLK_CTL_GTXCLK_EN (1 << 9) + +/* BCM54612E Registers */ +#define BCM54612E_EXP_SPARE0 (MII_BCM54XX_EXP_SEL_ETC + 0x34) +#define BCM54612E_LED4_CLK125OUT_EN (1 << 1) + +/*****************************************************************************/ +/* Fast Ethernet Transceiver definitions. */ +/*****************************************************************************/ + +#define MII_BRCM_FET_INTREG 0x1a /* Interrupt register */ +#define MII_BRCM_FET_IR_MASK 0x0100 /* Mask all interrupts */ +#define MII_BRCM_FET_IR_LINK_EN 0x0200 /* Link status change enable */ +#define MII_BRCM_FET_IR_SPEED_EN 0x0400 /* Link speed change enable */ +#define MII_BRCM_FET_IR_DUPLEX_EN 0x0800 /* Duplex mode change enable */ +#define MII_BRCM_FET_IR_ENABLE 0x4000 /* Interrupt enable */ + +#define MII_BRCM_FET_BRCMTEST 0x1f /* Brcm test register */ +#define MII_BRCM_FET_BT_SRE 0x0080 /* Shadow register enable */ + + +/*** Shadow register definitions ***/ + +#define MII_BRCM_FET_SHDW_MISCCTRL 0x10 /* Shadow misc ctrl */ +#define MII_BRCM_FET_SHDW_MC_FAME 0x4000 /* Force Auto MDIX enable */ + +#define MII_BRCM_FET_SHDW_AUXMODE4 0x1a /* Auxiliary mode 4 */ +#define MII_BRCM_FET_SHDW_AM4_LED_MASK 0x0003 +#define MII_BRCM_FET_SHDW_AM4_LED_MODE1 0x0001 + +#define MII_BRCM_FET_SHDW_AUXSTAT2 0x1b /* Auxiliary status 2 */ +#define MII_BRCM_FET_SHDW_AS2_APDE 0x0020 /* Auto power down enable */ + +#define BRCM_CL45VEN_EEE_CONTROL 0x803d +#define LPI_FEATURE_EN 0x8000 +#define LPI_FEATURE_EN_DIG1000X 0x4000 + +/* Core register definitions*/ +#define MII_BRCM_CORE_BASE12 0x12 +#define MII_BRCM_CORE_BASE13 0x13 +#define MII_BRCM_CORE_BASE14 0x14 +#define MII_BRCM_CORE_BASE1E 0x1E +#define MII_BRCM_CORE_EXPB0 0xB0 +#define MII_BRCM_CORE_EXPB1 0xB1 + +#endif /* _LINUX_BRCMPHY_H */ diff --git a/include/linux/bsearch.h b/include/linux/bsearch.h new file mode 100644 index 000000000..62b1eb348 --- /dev/null +++ b/include/linux/bsearch.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BSEARCH_H +#define _LINUX_BSEARCH_H + +#include + +void *bsearch(const void *key, const void *base, size_t num, size_t size, + int (*cmp)(const void *key, const void *elt)); + +#endif /* _LINUX_BSEARCH_H */ diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h new file mode 100644 index 000000000..6aeaf6472 --- /dev/null +++ b/include/linux/bsg-lib.h @@ -0,0 +1,79 @@ +/* + * BSG helper library + * + * Copyright (C) 2008 James Smart, Emulex Corporation + * Copyright (C) 2011 Red Hat, Inc. All rights reserved. + * Copyright (C) 2011 Mike Christie + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ +#ifndef _BLK_BSG_ +#define _BLK_BSG_ + +#include +#include + +struct request; +struct device; +struct scatterlist; +struct request_queue; + +struct bsg_buffer { + unsigned int payload_len; + int sg_cnt; + struct scatterlist *sg_list; +}; + +struct bsg_job { + struct device *dev; + + struct kref kref; + + unsigned int timeout; + + /* Transport/driver specific request/reply structs */ + void *request; + void *reply; + + unsigned int request_len; + unsigned int reply_len; + /* + * On entry : reply_len indicates the buffer size allocated for + * the reply. + * + * Upon completion : the message handler must set reply_len + * to indicates the size of the reply to be returned to the + * caller. + */ + + /* DMA payloads for the request/response */ + struct bsg_buffer request_payload; + struct bsg_buffer reply_payload; + + int result; + unsigned int reply_payload_rcv_len; + + void *dd_data; /* Used for driver-specific storage */ +}; + +void bsg_job_done(struct bsg_job *job, int result, + unsigned int reply_payload_rcv_len); +struct request_queue *bsg_setup_queue(struct device *dev, const char *name, + bsg_job_fn *job_fn, int dd_job_size); +void bsg_job_put(struct bsg_job *job); +int __must_check bsg_job_get(struct bsg_job *job); + +#endif diff --git a/include/linux/bsg.h b/include/linux/bsg.h new file mode 100644 index 000000000..dac37b6e0 --- /dev/null +++ b/include/linux/bsg.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BSG_H +#define _LINUX_BSG_H + +#include + +struct request; + +#ifdef CONFIG_BLK_DEV_BSG +struct bsg_ops { + int (*check_proto)(struct sg_io_v4 *hdr); + int (*fill_hdr)(struct request *rq, struct sg_io_v4 *hdr, + fmode_t mode); + int (*complete_rq)(struct request *rq, struct sg_io_v4 *hdr); + void (*free_rq)(struct request *rq); +}; + +struct bsg_class_device { + struct device *class_dev; + int minor; + struct request_queue *queue; + const struct bsg_ops *ops; +}; + +int bsg_register_queue(struct request_queue *q, struct device *parent, + const char *name, const struct bsg_ops *ops); +int bsg_scsi_register_queue(struct request_queue *q, struct device *parent); +void bsg_unregister_queue(struct request_queue *q); +#else +static inline int bsg_scsi_register_queue(struct request_queue *q, + struct device *parent) +{ + return 0; +} +static inline void bsg_unregister_queue(struct request_queue *q) +{ +} +#endif /* CONFIG_BLK_DEV_BSG */ +#endif /* _LINUX_BSG_H */ diff --git a/include/linux/btf.h b/include/linux/btf.h new file mode 100644 index 000000000..e076c4697 --- /dev/null +++ b/include/linux/btf.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Facebook */ + +#ifndef _LINUX_BTF_H +#define _LINUX_BTF_H 1 + +#include + +struct btf; +struct btf_type; +union bpf_attr; + +extern const struct file_operations btf_fops; + +void btf_put(struct btf *btf); +int btf_new_fd(const union bpf_attr *attr); +struct btf *btf_get_by_fd(int fd); +int btf_get_info_by_fd(const struct btf *btf, + const union bpf_attr *attr, + union bpf_attr __user *uattr); +/* Figure out the size of a type_id. If type_id is a modifier + * (e.g. const), it will be resolved to find out the type with size. + * + * For example: + * In describing "const void *", type_id is "const" and "const" + * refers to "void *". The return type will be "void *". + * + * If type_id is a simple "int", then return type will be "int". + * + * @btf: struct btf object + * @type_id: Find out the size of type_id. The type_id of the return + * type is set to *type_id. + * @ret_size: It can be NULL. If not NULL, the size of the return + * type is set to *ret_size. + * Return: The btf_type (resolved to another type with size info if needed). + * NULL is returned if type_id itself does not have size info + * (e.g. void) or it cannot be resolved to another type that + * has size info. + * *type_id and *ret_size will not be changed in the + * NULL return case. + */ +const struct btf_type *btf_type_id_size(const struct btf *btf, + u32 *type_id, + u32 *ret_size); +void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj, + struct seq_file *m); +int btf_get_fd_by_id(u32 id); +u32 btf_id(const struct btf *btf); + +#endif diff --git a/include/linux/btree-128.h b/include/linux/btree-128.h new file mode 100644 index 000000000..22c09f5c3 --- /dev/null +++ b/include/linux/btree-128.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +extern struct btree_geo btree_geo128; + +struct btree_head128 { struct btree_head h; }; + +static inline void btree_init_mempool128(struct btree_head128 *head, + mempool_t *mempool) +{ + btree_init_mempool(&head->h, mempool); +} + +static inline int btree_init128(struct btree_head128 *head) +{ + return btree_init(&head->h); +} + +static inline void btree_destroy128(struct btree_head128 *head) +{ + btree_destroy(&head->h); +} + +static inline void *btree_lookup128(struct btree_head128 *head, u64 k1, u64 k2) +{ + u64 key[2] = {k1, k2}; + return btree_lookup(&head->h, &btree_geo128, (unsigned long *)&key); +} + +static inline void *btree_get_prev128(struct btree_head128 *head, + u64 *k1, u64 *k2) +{ + u64 key[2] = {*k1, *k2}; + void *val; + + val = btree_get_prev(&head->h, &btree_geo128, + (unsigned long *)&key); + *k1 = key[0]; + *k2 = key[1]; + return val; +} + +static inline int btree_insert128(struct btree_head128 *head, u64 k1, u64 k2, + void *val, gfp_t gfp) +{ + u64 key[2] = {k1, k2}; + return btree_insert(&head->h, &btree_geo128, + (unsigned long *)&key, val, gfp); +} + +static inline int btree_update128(struct btree_head128 *head, u64 k1, u64 k2, + void *val) +{ + u64 key[2] = {k1, k2}; + return btree_update(&head->h, &btree_geo128, + (unsigned long *)&key, val); +} + +static inline void *btree_remove128(struct btree_head128 *head, u64 k1, u64 k2) +{ + u64 key[2] = {k1, k2}; + return btree_remove(&head->h, &btree_geo128, (unsigned long *)&key); +} + +static inline void *btree_last128(struct btree_head128 *head, u64 *k1, u64 *k2) +{ + u64 key[2]; + void *val; + + val = btree_last(&head->h, &btree_geo128, (unsigned long *)&key[0]); + if (val) { + *k1 = key[0]; + *k2 = key[1]; + } + + return val; +} + +static inline int btree_merge128(struct btree_head128 *target, + struct btree_head128 *victim, + gfp_t gfp) +{ + return btree_merge(&target->h, &victim->h, &btree_geo128, gfp); +} + +void visitor128(void *elem, unsigned long opaque, unsigned long *__key, + size_t index, void *__func); + +typedef void (*visitor128_t)(void *elem, unsigned long opaque, + u64 key1, u64 key2, size_t index); + +static inline size_t btree_visitor128(struct btree_head128 *head, + unsigned long opaque, + visitor128_t func2) +{ + return btree_visitor(&head->h, &btree_geo128, opaque, + visitor128, func2); +} + +static inline size_t btree_grim_visitor128(struct btree_head128 *head, + unsigned long opaque, + visitor128_t func2) +{ + return btree_grim_visitor(&head->h, &btree_geo128, opaque, + visitor128, func2); +} + +#define btree_for_each_safe128(head, k1, k2, val) \ + for (val = btree_last128(head, &k1, &k2); \ + val; \ + val = btree_get_prev128(head, &k1, &k2)) + diff --git a/include/linux/btree-type.h b/include/linux/btree-type.h new file mode 100644 index 000000000..fb34a52c7 --- /dev/null +++ b/include/linux/btree-type.h @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#define __BTREE_TP(pfx, type, sfx) pfx ## type ## sfx +#define _BTREE_TP(pfx, type, sfx) __BTREE_TP(pfx, type, sfx) +#define BTREE_TP(pfx) _BTREE_TP(pfx, BTREE_TYPE_SUFFIX,) +#define BTREE_FN(name) BTREE_TP(btree_ ## name) +#define BTREE_TYPE_HEAD BTREE_TP(struct btree_head) +#define VISITOR_FN BTREE_TP(visitor) +#define VISITOR_FN_T _BTREE_TP(visitor, BTREE_TYPE_SUFFIX, _t) + +BTREE_TYPE_HEAD { + struct btree_head h; +}; + +static inline void BTREE_FN(init_mempool)(BTREE_TYPE_HEAD *head, + mempool_t *mempool) +{ + btree_init_mempool(&head->h, mempool); +} + +static inline int BTREE_FN(init)(BTREE_TYPE_HEAD *head) +{ + return btree_init(&head->h); +} + +static inline void BTREE_FN(destroy)(BTREE_TYPE_HEAD *head) +{ + btree_destroy(&head->h); +} + +static inline int BTREE_FN(merge)(BTREE_TYPE_HEAD *target, + BTREE_TYPE_HEAD *victim, + gfp_t gfp) +{ + return btree_merge(&target->h, &victim->h, BTREE_TYPE_GEO, gfp); +} + +#if (BITS_PER_LONG > BTREE_TYPE_BITS) +static inline void *BTREE_FN(lookup)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key) +{ + unsigned long _key = key; + return btree_lookup(&head->h, BTREE_TYPE_GEO, &_key); +} + +static inline int BTREE_FN(insert)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key, + void *val, gfp_t gfp) +{ + unsigned long _key = key; + return btree_insert(&head->h, BTREE_TYPE_GEO, &_key, val, gfp); +} + +static inline int BTREE_FN(update)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key, + void *val) +{ + unsigned long _key = key; + return btree_update(&head->h, BTREE_TYPE_GEO, &_key, val); +} + +static inline void *BTREE_FN(remove)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key) +{ + unsigned long _key = key; + return btree_remove(&head->h, BTREE_TYPE_GEO, &_key); +} + +static inline void *BTREE_FN(last)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key) +{ + unsigned long _key; + void *val = btree_last(&head->h, BTREE_TYPE_GEO, &_key); + if (val) + *key = _key; + return val; +} + +static inline void *BTREE_FN(get_prev)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key) +{ + unsigned long _key = *key; + void *val = btree_get_prev(&head->h, BTREE_TYPE_GEO, &_key); + if (val) + *key = _key; + return val; +} +#else +static inline void *BTREE_FN(lookup)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key) +{ + return btree_lookup(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key); +} + +static inline int BTREE_FN(insert)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key, + void *val, gfp_t gfp) +{ + return btree_insert(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key, + val, gfp); +} + +static inline int BTREE_FN(update)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key, + void *val) +{ + return btree_update(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key, val); +} + +static inline void *BTREE_FN(remove)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key) +{ + return btree_remove(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key); +} + +static inline void *BTREE_FN(last)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key) +{ + return btree_last(&head->h, BTREE_TYPE_GEO, (unsigned long *)key); +} + +static inline void *BTREE_FN(get_prev)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key) +{ + return btree_get_prev(&head->h, BTREE_TYPE_GEO, (unsigned long *)key); +} +#endif + +void VISITOR_FN(void *elem, unsigned long opaque, unsigned long *key, + size_t index, void *__func); + +typedef void (*VISITOR_FN_T)(void *elem, unsigned long opaque, + BTREE_KEYTYPE key, size_t index); + +static inline size_t BTREE_FN(visitor)(BTREE_TYPE_HEAD *head, + unsigned long opaque, + VISITOR_FN_T func2) +{ + return btree_visitor(&head->h, BTREE_TYPE_GEO, opaque, + visitorl, func2); +} + +static inline size_t BTREE_FN(grim_visitor)(BTREE_TYPE_HEAD *head, + unsigned long opaque, + VISITOR_FN_T func2) +{ + return btree_grim_visitor(&head->h, BTREE_TYPE_GEO, opaque, + visitorl, func2); +} + +#undef VISITOR_FN +#undef VISITOR_FN_T +#undef __BTREE_TP +#undef _BTREE_TP +#undef BTREE_TP +#undef BTREE_FN +#undef BTREE_TYPE_HEAD +#undef BTREE_TYPE_SUFFIX +#undef BTREE_TYPE_GEO +#undef BTREE_KEYTYPE +#undef BTREE_TYPE_BITS diff --git a/include/linux/btree.h b/include/linux/btree.h new file mode 100644 index 000000000..68f858c83 --- /dev/null +++ b/include/linux/btree.h @@ -0,0 +1,244 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef BTREE_H +#define BTREE_H + +#include +#include + +/** + * DOC: B+Tree basics + * + * A B+Tree is a data structure for looking up arbitrary (currently allowing + * unsigned long, u32, u64 and 2 * u64) keys into pointers. The data structure + * is described at http://en.wikipedia.org/wiki/B-tree, we currently do not + * use binary search to find the key on lookups. + * + * Each B+Tree consists of a head, that contains bookkeeping information and + * a variable number (starting with zero) nodes. Each node contains the keys + * and pointers to sub-nodes, or, for leaf nodes, the keys and values for the + * tree entries. + * + * Each node in this implementation has the following layout: + * [key1, key2, ..., keyN] [val1, val2, ..., valN] + * + * Each key here is an array of unsigned longs, geo->no_longs in total. The + * number of keys and values (N) is geo->no_pairs. + */ + +/** + * struct btree_head - btree head + * + * @node: the first node in the tree + * @mempool: mempool used for node allocations + * @height: current of the tree + */ +struct btree_head { + unsigned long *node; + mempool_t *mempool; + int height; +}; + +/* btree geometry */ +struct btree_geo; + +/** + * btree_alloc - allocate function for the mempool + * @gfp_mask: gfp mask for the allocation + * @pool_data: unused + */ +void *btree_alloc(gfp_t gfp_mask, void *pool_data); + +/** + * btree_free - free function for the mempool + * @element: the element to free + * @pool_data: unused + */ +void btree_free(void *element, void *pool_data); + +/** + * btree_init_mempool - initialise a btree with given mempool + * + * @head: the btree head to initialise + * @mempool: the mempool to use + * + * When this function is used, there is no need to destroy + * the mempool. + */ +void btree_init_mempool(struct btree_head *head, mempool_t *mempool); + +/** + * btree_init - initialise a btree + * + * @head: the btree head to initialise + * + * This function allocates the memory pool that the + * btree needs. Returns zero or a negative error code + * (-%ENOMEM) when memory allocation fails. + * + */ +int __must_check btree_init(struct btree_head *head); + +/** + * btree_destroy - destroy mempool + * + * @head: the btree head to destroy + * + * This function destroys the internal memory pool, use only + * when using btree_init(), not with btree_init_mempool(). + */ +void btree_destroy(struct btree_head *head); + +/** + * btree_lookup - look up a key in the btree + * + * @head: the btree to look in + * @geo: the btree geometry + * @key: the key to look up + * + * This function returns the value for the given key, or %NULL. + */ +void *btree_lookup(struct btree_head *head, struct btree_geo *geo, + unsigned long *key); + +/** + * btree_insert - insert an entry into the btree + * + * @head: the btree to add to + * @geo: the btree geometry + * @key: the key to add (must not already be present) + * @val: the value to add (must not be %NULL) + * @gfp: allocation flags for node allocations + * + * This function returns 0 if the item could be added, or an + * error code if it failed (may fail due to memory pressure). + */ +int __must_check btree_insert(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, void *val, gfp_t gfp); +/** + * btree_update - update an entry in the btree + * + * @head: the btree to update + * @geo: the btree geometry + * @key: the key to update + * @val: the value to change it to (must not be %NULL) + * + * This function returns 0 if the update was successful, or + * -%ENOENT if the key could not be found. + */ +int btree_update(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, void *val); +/** + * btree_remove - remove an entry from the btree + * + * @head: the btree to update + * @geo: the btree geometry + * @key: the key to remove + * + * This function returns the removed entry, or %NULL if the key + * could not be found. + */ +void *btree_remove(struct btree_head *head, struct btree_geo *geo, + unsigned long *key); + +/** + * btree_merge - merge two btrees + * + * @target: the tree that gets all the entries + * @victim: the tree that gets merged into @target + * @geo: the btree geometry + * @gfp: allocation flags + * + * The two trees @target and @victim may not contain the same keys, + * that is a bug and triggers a BUG(). This function returns zero + * if the trees were merged successfully, and may return a failure + * when memory allocation fails, in which case both trees might have + * been partially merged, i.e. some entries have been moved from + * @victim to @target. + */ +int btree_merge(struct btree_head *target, struct btree_head *victim, + struct btree_geo *geo, gfp_t gfp); + +/** + * btree_last - get last entry in btree + * + * @head: btree head + * @geo: btree geometry + * @key: last key + * + * Returns the last entry in the btree, and sets @key to the key + * of that entry; returns NULL if the tree is empty, in that case + * key is not changed. + */ +void *btree_last(struct btree_head *head, struct btree_geo *geo, + unsigned long *key); + +/** + * btree_get_prev - get previous entry + * + * @head: btree head + * @geo: btree geometry + * @key: pointer to key + * + * The function returns the next item right before the value pointed to by + * @key, and updates @key with its key, or returns %NULL when there is no + * entry with a key smaller than the given key. + */ +void *btree_get_prev(struct btree_head *head, struct btree_geo *geo, + unsigned long *key); + + +/* internal use, use btree_visitor{l,32,64,128} */ +size_t btree_visitor(struct btree_head *head, struct btree_geo *geo, + unsigned long opaque, + void (*func)(void *elem, unsigned long opaque, + unsigned long *key, size_t index, + void *func2), + void *func2); + +/* internal use, use btree_grim_visitor{l,32,64,128} */ +size_t btree_grim_visitor(struct btree_head *head, struct btree_geo *geo, + unsigned long opaque, + void (*func)(void *elem, unsigned long opaque, + unsigned long *key, + size_t index, void *func2), + void *func2); + + +#include + +extern struct btree_geo btree_geo32; +#define BTREE_TYPE_SUFFIX l +#define BTREE_TYPE_BITS BITS_PER_LONG +#define BTREE_TYPE_GEO &btree_geo32 +#define BTREE_KEYTYPE unsigned long +#include + +#define btree_for_each_safel(head, key, val) \ + for (val = btree_lastl(head, &key); \ + val; \ + val = btree_get_prevl(head, &key)) + +#define BTREE_TYPE_SUFFIX 32 +#define BTREE_TYPE_BITS 32 +#define BTREE_TYPE_GEO &btree_geo32 +#define BTREE_KEYTYPE u32 +#include + +#define btree_for_each_safe32(head, key, val) \ + for (val = btree_last32(head, &key); \ + val; \ + val = btree_get_prev32(head, &key)) + +extern struct btree_geo btree_geo64; +#define BTREE_TYPE_SUFFIX 64 +#define BTREE_TYPE_BITS 64 +#define BTREE_TYPE_GEO &btree_geo64 +#define BTREE_KEYTYPE u64 +#include + +#define btree_for_each_safe64(head, key, val) \ + for (val = btree_last64(head, &key); \ + val; \ + val = btree_get_prev64(head, &key)) + +#endif diff --git a/include/linux/btrfs.h b/include/linux/btrfs.h new file mode 100644 index 000000000..9a37a45ec --- /dev/null +++ b/include/linux/btrfs.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BTRFS_H +#define _LINUX_BTRFS_H + +#include + +#endif /* _LINUX_BTRFS_H */ diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h new file mode 100644 index 000000000..9168fc33a --- /dev/null +++ b/include/linux/buffer_head.h @@ -0,0 +1,419 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/buffer_head.h + * + * Everything to do with buffer_heads. + */ + +#ifndef _LINUX_BUFFER_HEAD_H +#define _LINUX_BUFFER_HEAD_H + +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_BLOCK + +enum bh_state_bits { + BH_Uptodate, /* Contains valid data */ + BH_Dirty, /* Is dirty */ + BH_Lock, /* Is locked */ + BH_Req, /* Has been submitted for I/O */ + BH_Uptodate_Lock,/* Used by the first bh in a page, to serialise + * IO completion of other buffers in the page + */ + + BH_Mapped, /* Has a disk mapping */ + BH_New, /* Disk mapping was newly created by get_block */ + BH_Async_Read, /* Is under end_buffer_async_read I/O */ + BH_Async_Write, /* Is under end_buffer_async_write I/O */ + BH_Delay, /* Buffer is not yet allocated on disk */ + BH_Boundary, /* Block is followed by a discontiguity */ + BH_Write_EIO, /* I/O error on write */ + BH_Unwritten, /* Buffer is allocated on disk but not written */ + BH_Quiet, /* Buffer Error Prinks to be quiet */ + BH_Meta, /* Buffer contains metadata */ + BH_Prio, /* Buffer should be submitted with REQ_PRIO */ + BH_Defer_Completion, /* Defer AIO completion to workqueue */ + + BH_PrivateStart,/* not a state bit, but the first bit available + * for private allocation by other entities + */ +}; + +#define MAX_BUF_PER_PAGE (PAGE_SIZE / 512) + +struct page; +struct buffer_head; +struct address_space; +typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate); + +/* + * Historically, a buffer_head was used to map a single block + * within a page, and of course as the unit of I/O through the + * filesystem and block layers. Nowadays the basic I/O unit + * is the bio, and buffer_heads are used for extracting block + * mappings (via a get_block_t call), for tracking state within + * a page (via a page_mapping) and for wrapping bio submission + * for backward compatibility reasons (e.g. submit_bh). + */ +struct buffer_head { + unsigned long b_state; /* buffer state bitmap (see above) */ + struct buffer_head *b_this_page;/* circular list of page's buffers */ + struct page *b_page; /* the page this bh is mapped to */ + + sector_t b_blocknr; /* start block number */ + size_t b_size; /* size of mapping */ + char *b_data; /* pointer to data within the page */ + + struct block_device *b_bdev; + bh_end_io_t *b_end_io; /* I/O completion */ + void *b_private; /* reserved for b_end_io */ + struct list_head b_assoc_buffers; /* associated with another mapping */ + struct address_space *b_assoc_map; /* mapping this buffer is + associated with */ + atomic_t b_count; /* users using this buffer_head */ +}; + +/* + * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() + * and buffer_foo() functions. + * To avoid reset buffer flags that are already set, because that causes + * a costly cache line transition, check the flag first. + */ +#define BUFFER_FNS(bit, name) \ +static __always_inline void set_buffer_##name(struct buffer_head *bh) \ +{ \ + if (!test_bit(BH_##bit, &(bh)->b_state)) \ + set_bit(BH_##bit, &(bh)->b_state); \ +} \ +static __always_inline void clear_buffer_##name(struct buffer_head *bh) \ +{ \ + clear_bit(BH_##bit, &(bh)->b_state); \ +} \ +static __always_inline int buffer_##name(const struct buffer_head *bh) \ +{ \ + return test_bit(BH_##bit, &(bh)->b_state); \ +} + +/* + * test_set_buffer_foo() and test_clear_buffer_foo() + */ +#define TAS_BUFFER_FNS(bit, name) \ +static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \ +{ \ + return test_and_set_bit(BH_##bit, &(bh)->b_state); \ +} \ +static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \ +{ \ + return test_and_clear_bit(BH_##bit, &(bh)->b_state); \ +} \ + +/* + * Emit the buffer bitops functions. Note that there are also functions + * of the form "mark_buffer_foo()". These are higher-level functions which + * do something in addition to setting a b_state bit. + */ +BUFFER_FNS(Uptodate, uptodate) +BUFFER_FNS(Dirty, dirty) +TAS_BUFFER_FNS(Dirty, dirty) +BUFFER_FNS(Lock, locked) +BUFFER_FNS(Req, req) +TAS_BUFFER_FNS(Req, req) +BUFFER_FNS(Mapped, mapped) +BUFFER_FNS(New, new) +BUFFER_FNS(Async_Read, async_read) +BUFFER_FNS(Async_Write, async_write) +BUFFER_FNS(Delay, delay) +BUFFER_FNS(Boundary, boundary) +BUFFER_FNS(Write_EIO, write_io_error) +BUFFER_FNS(Unwritten, unwritten) +BUFFER_FNS(Meta, meta) +BUFFER_FNS(Prio, prio) +BUFFER_FNS(Defer_Completion, defer_completion) + +#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) + +/* If we *know* page->private refers to buffer_heads */ +#define page_buffers(page) \ + ({ \ + BUG_ON(!PagePrivate(page)); \ + ((struct buffer_head *)page_private(page)); \ + }) +#define page_has_buffers(page) PagePrivate(page) + +void buffer_check_dirty_writeback(struct page *page, + bool *dirty, bool *writeback); + +/* + * Declarations + */ + +void mark_buffer_dirty(struct buffer_head *bh); +void mark_buffer_write_io_error(struct buffer_head *bh); +void touch_buffer(struct buffer_head *bh); +void set_bh_page(struct buffer_head *bh, + struct page *page, unsigned long offset); +int try_to_free_buffers(struct page *); +struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, + bool retry); +void create_empty_buffers(struct page *, unsigned long, + unsigned long b_state); +void end_buffer_read_sync(struct buffer_head *bh, int uptodate); +void end_buffer_write_sync(struct buffer_head *bh, int uptodate); +void end_buffer_async_write(struct buffer_head *bh, int uptodate); + +/* Things to do with buffers at mapping->private_list */ +void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode); +int inode_has_buffers(struct inode *); +void invalidate_inode_buffers(struct inode *); +int remove_inode_buffers(struct inode *inode); +int sync_mapping_buffers(struct address_space *mapping); +void clean_bdev_aliases(struct block_device *bdev, sector_t block, + sector_t len); +static inline void clean_bdev_bh_alias(struct buffer_head *bh) +{ + clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1); +} + +void mark_buffer_async_write(struct buffer_head *bh); +void __wait_on_buffer(struct buffer_head *); +wait_queue_head_t *bh_waitq_head(struct buffer_head *bh); +struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block, + unsigned size); +struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block, + unsigned size, gfp_t gfp); +void __brelse(struct buffer_head *); +void __bforget(struct buffer_head *); +void __breadahead(struct block_device *, sector_t block, unsigned int size); +void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size, + gfp_t gfp); +struct buffer_head *__bread_gfp(struct block_device *, + sector_t block, unsigned size, gfp_t gfp); +void invalidate_bh_lrus(void); +struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); +void free_buffer_head(struct buffer_head * bh); +void unlock_buffer(struct buffer_head *bh); +void __lock_buffer(struct buffer_head *bh); +void ll_rw_block(int, int, int, struct buffer_head * bh[]); +int sync_dirty_buffer(struct buffer_head *bh); +int __sync_dirty_buffer(struct buffer_head *bh, int op_flags); +void write_dirty_buffer(struct buffer_head *bh, int op_flags); +int submit_bh(int, int, struct buffer_head *); +void write_boundary_block(struct block_device *bdev, + sector_t bblock, unsigned blocksize); +int bh_uptodate_or_lock(struct buffer_head *bh); +int bh_submit_read(struct buffer_head *bh); + +extern int buffer_heads_over_limit; + +/* + * Generic address_space_operations implementations for buffer_head-backed + * address_spaces. + */ +void block_invalidatepage(struct page *page, unsigned int offset, + unsigned int length); +int block_write_full_page(struct page *page, get_block_t *get_block, + struct writeback_control *wbc); +int __block_write_full_page(struct inode *inode, struct page *page, + get_block_t *get_block, struct writeback_control *wbc, + bh_end_io_t *handler); +int block_read_full_page(struct page*, get_block_t*); +int block_is_partially_uptodate(struct page *page, unsigned long from, + unsigned long count); +int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, + unsigned flags, struct page **pagep, get_block_t *get_block); +int __block_write_begin(struct page *page, loff_t pos, unsigned len, + get_block_t *get_block); +int block_write_end(struct file *, struct address_space *, + loff_t, unsigned, unsigned, + struct page *, void *); +int generic_write_end(struct file *, struct address_space *, + loff_t, unsigned, unsigned, + struct page *, void *); +void page_zero_new_buffers(struct page *page, unsigned from, unsigned to); +void clean_page_buffers(struct page *page); +int cont_write_begin(struct file *, struct address_space *, loff_t, + unsigned, unsigned, struct page **, void **, + get_block_t *, loff_t *); +int generic_cont_expand_simple(struct inode *inode, loff_t size); +int block_commit_write(struct page *page, unsigned from, unsigned to); +int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, + get_block_t get_block); +/* Convert errno to return value from ->page_mkwrite() call */ +static inline int block_page_mkwrite_return(int err) +{ + if (err == 0) + return VM_FAULT_LOCKED; + if (err == -EFAULT || err == -EAGAIN) + return VM_FAULT_NOPAGE; + if (err == -ENOMEM) + return VM_FAULT_OOM; + /* -ENOSPC, -EDQUOT, -EIO ... */ + return VM_FAULT_SIGBUS; +} +sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); +int block_truncate_page(struct address_space *, loff_t, get_block_t *); +int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned, + struct page **, void **, get_block_t*); +int nobh_write_end(struct file *, struct address_space *, + loff_t, unsigned, unsigned, + struct page *, void *); +int nobh_truncate_page(struct address_space *, loff_t, get_block_t *); +int nobh_writepage(struct page *page, get_block_t *get_block, + struct writeback_control *wbc); + +void buffer_init(void); + +/* + * inline definitions + */ + +static inline void attach_page_buffers(struct page *page, + struct buffer_head *head) +{ + get_page(page); + SetPagePrivate(page); + set_page_private(page, (unsigned long)head); +} + +static inline void get_bh(struct buffer_head *bh) +{ + atomic_inc(&bh->b_count); +} + +static inline void put_bh(struct buffer_head *bh) +{ + smp_mb__before_atomic(); + atomic_dec(&bh->b_count); +} + +static inline void brelse(struct buffer_head *bh) +{ + if (bh) + __brelse(bh); +} + +static inline void bforget(struct buffer_head *bh) +{ + if (bh) + __bforget(bh); +} + +static inline struct buffer_head * +sb_bread(struct super_block *sb, sector_t block) +{ + return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE); +} + +static inline struct buffer_head * +sb_bread_unmovable(struct super_block *sb, sector_t block) +{ + return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0); +} + +static inline void +sb_breadahead(struct super_block *sb, sector_t block) +{ + __breadahead(sb->s_bdev, block, sb->s_blocksize); +} + +static inline void +sb_breadahead_unmovable(struct super_block *sb, sector_t block) +{ + __breadahead_gfp(sb->s_bdev, block, sb->s_blocksize, 0); +} + +static inline struct buffer_head * +sb_getblk(struct super_block *sb, sector_t block) +{ + return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE); +} + + +static inline struct buffer_head * +sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp) +{ + return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp); +} + +static inline struct buffer_head * +sb_find_get_block(struct super_block *sb, sector_t block) +{ + return __find_get_block(sb->s_bdev, block, sb->s_blocksize); +} + +static inline void +map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block) +{ + set_buffer_mapped(bh); + bh->b_bdev = sb->s_bdev; + bh->b_blocknr = block; + bh->b_size = sb->s_blocksize; +} + +static inline void wait_on_buffer(struct buffer_head *bh) +{ + might_sleep(); + if (buffer_locked(bh)) + __wait_on_buffer(bh); +} + +static inline int trylock_buffer(struct buffer_head *bh) +{ + return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state)); +} + +static inline void lock_buffer(struct buffer_head *bh) +{ + might_sleep(); + if (!trylock_buffer(bh)) + __lock_buffer(bh); +} + +static inline struct buffer_head *getblk_unmovable(struct block_device *bdev, + sector_t block, + unsigned size) +{ + return __getblk_gfp(bdev, block, size, 0); +} + +static inline struct buffer_head *__getblk(struct block_device *bdev, + sector_t block, + unsigned size) +{ + return __getblk_gfp(bdev, block, size, __GFP_MOVABLE); +} + +/** + * __bread() - reads a specified block and returns the bh + * @bdev: the block_device to read from + * @block: number of block + * @size: size (in bytes) to read + * + * Reads a specified block, and returns buffer head that contains it. + * The page cache is allocated from movable area so that it can be migrated. + * It returns NULL if the block was unreadable. + */ +static inline struct buffer_head * +__bread(struct block_device *bdev, sector_t block, unsigned size) +{ + return __bread_gfp(bdev, block, size, __GFP_MOVABLE); +} + +extern int __set_page_dirty_buffers(struct page *page); + +#else /* CONFIG_BLOCK */ + +static inline void buffer_init(void) {} +static inline int try_to_free_buffers(struct page *page) { return 1; } +static inline int inode_has_buffers(struct inode *inode) { return 0; } +static inline void invalidate_inode_buffers(struct inode *inode) {} +static inline int remove_inode_buffers(struct inode *inode) { return 1; } +static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } + +#endif /* CONFIG_BLOCK */ +#endif /* _LINUX_BUFFER_HEAD_H */ diff --git a/include/linux/bug.h b/include/linux/bug.h new file mode 100644 index 000000000..f639bd012 --- /dev/null +++ b/include/linux/bug.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BUG_H +#define _LINUX_BUG_H + +#include +#include +#include + +enum bug_trap_type { + BUG_TRAP_TYPE_NONE = 0, + BUG_TRAP_TYPE_WARN = 1, + BUG_TRAP_TYPE_BUG = 2, +}; + +struct pt_regs; + +#ifdef __CHECKER__ +#define MAYBE_BUILD_BUG_ON(cond) (0) +#else /* __CHECKER__ */ + +#define MAYBE_BUILD_BUG_ON(cond) \ + do { \ + if (__builtin_constant_p((cond))) \ + BUILD_BUG_ON(cond); \ + else \ + BUG_ON(cond); \ + } while (0) + +#endif /* __CHECKER__ */ + +#ifdef CONFIG_GENERIC_BUG +#include + +static inline int is_warning_bug(const struct bug_entry *bug) +{ + return bug->flags & BUGFLAG_WARNING; +} + +struct bug_entry *find_bug(unsigned long bugaddr); + +enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs); + +/* These are defined by the architecture */ +int is_valid_bugaddr(unsigned long addr); + +void generic_bug_clear_once(void); + +#else /* !CONFIG_GENERIC_BUG */ + +static inline void *find_bug(unsigned long bugaddr) +{ + return NULL; +} + +static inline enum bug_trap_type report_bug(unsigned long bug_addr, + struct pt_regs *regs) +{ + return BUG_TRAP_TYPE_BUG; +} + + +static inline void generic_bug_clear_once(void) {} + +#endif /* CONFIG_GENERIC_BUG */ + +/* + * Since detected data corruption should stop operation on the affected + * structures. Return value must be checked and sanely acted on by caller. + */ +static inline __must_check bool check_data_corruption(bool v) { return v; } +#define CHECK_DATA_CORRUPTION(condition, fmt, ...) \ + check_data_corruption(({ \ + bool corruption = unlikely(condition); \ + if (corruption) { \ + if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \ + pr_err(fmt, ##__VA_ARGS__); \ + BUG(); \ + } else \ + WARN(1, fmt, ##__VA_ARGS__); \ + } \ + corruption; \ + })) + +#endif /* _LINUX_BUG_H */ diff --git a/include/linux/build-salt.h b/include/linux/build-salt.h new file mode 100644 index 000000000..bb007bd05 --- /dev/null +++ b/include/linux/build-salt.h @@ -0,0 +1,20 @@ +#ifndef __BUILD_SALT_H +#define __BUILD_SALT_H + +#include + +#define LINUX_ELFNOTE_BUILD_SALT 0x100 + +#ifdef __ASSEMBLER__ + +#define BUILD_SALT \ + ELFNOTE(Linux, LINUX_ELFNOTE_BUILD_SALT, .asciz CONFIG_BUILD_SALT) + +#else + +#define BUILD_SALT \ + ELFNOTE32("Linux", LINUX_ELFNOTE_BUILD_SALT, CONFIG_BUILD_SALT) + +#endif + +#endif /* __BUILD_SALT_H */ diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h new file mode 100644 index 000000000..6099f754a --- /dev/null +++ b/include/linux/build_bug.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BUILD_BUG_H +#define _LINUX_BUILD_BUG_H + +#include + +#ifdef __CHECKER__ +#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) (0) +#define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0) +#define BUILD_BUG_ON_ZERO(e) (0) +#define BUILD_BUG_ON_INVALID(e) (0) +#define BUILD_BUG_ON_MSG(cond, msg) (0) +#define BUILD_BUG_ON(condition) (0) +#define BUILD_BUG() (0) +#else /* __CHECKER__ */ + +/* Force a compilation error if a constant expression is not a power of 2 */ +#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) \ + BUILD_BUG_ON(((n) & ((n) - 1)) != 0) +#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \ + BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0)) + +/* + * Force a compilation error if condition is true, but also produce a + * result (of value 0 and type size_t), so the expression can be used + * e.g. in a structure initializer (or where-ever else comma expressions + * aren't permitted). + */ +#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:(-!!(e)); })) + +/* + * BUILD_BUG_ON_INVALID() permits the compiler to check the validity of the + * expression but avoids the generation of any code, even if that expression + * has side-effects. + */ +#define BUILD_BUG_ON_INVALID(e) ((void)(sizeof((__force long)(e)))) + +/** + * BUILD_BUG_ON_MSG - break compile if a condition is true & emit supplied + * error message. + * @condition: the condition which the compiler should know is false. + * + * See BUILD_BUG_ON for description. + */ +#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) + +/** + * BUILD_BUG_ON - break compile if a condition is true. + * @condition: the condition which the compiler should know is false. + * + * If you have some code which relies on certain constants being equal, or + * some other compile-time-evaluated condition, you should use BUILD_BUG_ON to + * detect if someone changes it. + * + * The implementation uses gcc's reluctance to create a negative array, but gcc + * (as of 4.4) only emits that error for obvious cases (e.g. not arguments to + * inline functions). Luckily, in 4.3 they added the "error" function + * attribute just for this type of case. Thus, we use a negative sized array + * (should always create an error on gcc versions older than 4.4) and then call + * an undefined function with the error attribute (should always create an + * error on gcc 4.3 and later). If for some reason, neither creates a + * compile-time error, we'll still have a link-time error, which is harder to + * track down. + */ +#ifndef __OPTIMIZE__ +#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) +#else +#define BUILD_BUG_ON(condition) \ + BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) +#endif + +/** + * BUILD_BUG - break compile if used. + * + * If you have some code that you expect the compiler to eliminate at + * build time, you should use BUILD_BUG to detect if it is + * unexpectedly used. + */ +#define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed") + +#endif /* __CHECKER__ */ + +#ifdef __GENKSYMS__ +/* genksyms gets confused by _Static_assert */ +#define _Static_assert(expr, ...) +#endif + +#endif /* _LINUX_BUILD_BUG_H */ diff --git a/include/linux/bvec.h b/include/linux/bvec.h new file mode 100644 index 000000000..bc1f16e9f --- /dev/null +++ b/include/linux/bvec.h @@ -0,0 +1,144 @@ +/* + * bvec iterator + * + * Copyright (C) 2001 Ming Lei + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public Licens + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- + */ +#ifndef __LINUX_BVEC_ITER_H +#define __LINUX_BVEC_ITER_H + +#include +#include +#include + +/* + * was unsigned short, but we might as well be ready for > 64kB I/O pages + */ +struct bio_vec { + struct page *bv_page; + unsigned int bv_len; + unsigned int bv_offset; +}; + +struct bvec_iter { + sector_t bi_sector; /* device address in 512 byte + sectors */ + unsigned int bi_size; /* residual I/O count */ + + unsigned int bi_idx; /* current index into bvl_vec */ + + unsigned int bi_done; /* number of bytes completed */ + + unsigned int bi_bvec_done; /* number of bytes completed in + current bvec */ +}; + +/* + * various member access, note that bio_data should of course not be used + * on highmem page vectors + */ +#define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx]) + +#define bvec_iter_page(bvec, iter) \ + (__bvec_iter_bvec((bvec), (iter))->bv_page) + +#define bvec_iter_len(bvec, iter) \ + min((iter).bi_size, \ + __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done) + +#define bvec_iter_offset(bvec, iter) \ + (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done) + +#define bvec_iter_bvec(bvec, iter) \ +((struct bio_vec) { \ + .bv_page = bvec_iter_page((bvec), (iter)), \ + .bv_len = bvec_iter_len((bvec), (iter)), \ + .bv_offset = bvec_iter_offset((bvec), (iter)), \ +}) + +static inline bool bvec_iter_advance(const struct bio_vec *bv, + struct bvec_iter *iter, unsigned bytes) +{ + if (WARN_ONCE(bytes > iter->bi_size, + "Attempted to advance past end of bvec iter\n")) { + iter->bi_size = 0; + return false; + } + + while (bytes) { + unsigned iter_len = bvec_iter_len(bv, *iter); + unsigned len = min(bytes, iter_len); + + bytes -= len; + iter->bi_size -= len; + iter->bi_bvec_done += len; + iter->bi_done += len; + + if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) { + iter->bi_bvec_done = 0; + iter->bi_idx++; + } + } + return true; +} + +static inline bool bvec_iter_rewind(const struct bio_vec *bv, + struct bvec_iter *iter, + unsigned int bytes) +{ + while (bytes) { + unsigned len = min(bytes, iter->bi_bvec_done); + + if (iter->bi_bvec_done == 0) { + if (WARN_ONCE(iter->bi_idx == 0, + "Attempted to rewind iter beyond " + "bvec's boundaries\n")) { + return false; + } + iter->bi_idx--; + iter->bi_bvec_done = __bvec_iter_bvec(bv, *iter)->bv_len; + continue; + } + bytes -= len; + iter->bi_size += len; + iter->bi_bvec_done -= len; + } + return true; +} + +static inline void bvec_iter_skip_zero_bvec(struct bvec_iter *iter) +{ + iter->bi_bvec_done = 0; + iter->bi_idx++; +} + +#define for_each_bvec(bvl, bio_vec, iter, start) \ + for (iter = (start); \ + (iter).bi_size && \ + ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \ + (bvl).bv_len ? (void)bvec_iter_advance((bio_vec), &(iter), \ + (bvl).bv_len) : bvec_iter_skip_zero_bvec(&(iter))) + +/* for iterating one bio from start to end */ +#define BVEC_ITER_ALL_INIT (struct bvec_iter) \ +{ \ + .bi_sector = 0, \ + .bi_size = UINT_MAX, \ + .bi_idx = 0, \ + .bi_bvec_done = 0, \ +} + +#endif /* __LINUX_BVEC_ITER_H */ diff --git a/include/linux/byteorder/big_endian.h b/include/linux/byteorder/big_endian.h new file mode 100644 index 000000000..d64a524d3 --- /dev/null +++ b/include/linux/byteorder/big_endian.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BYTEORDER_BIG_ENDIAN_H +#define _LINUX_BYTEORDER_BIG_ENDIAN_H + +#include + +#ifndef CONFIG_CPU_BIG_ENDIAN +#warning inconsistent configuration, needs CONFIG_CPU_BIG_ENDIAN +#endif + +#include +#endif /* _LINUX_BYTEORDER_BIG_ENDIAN_H */ diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h new file mode 100644 index 000000000..4b13e0a3e --- /dev/null +++ b/include/linux/byteorder/generic.h @@ -0,0 +1,207 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BYTEORDER_GENERIC_H +#define _LINUX_BYTEORDER_GENERIC_H + +/* + * linux/byteorder/generic.h + * Generic Byte-reordering support + * + * The "... p" macros, like le64_to_cpup, can be used with pointers + * to unaligned data, but there will be a performance penalty on + * some architectures. Use get_unaligned for unaligned data. + * + * Francois-Rene Rideau 19970707 + * gathered all the good ideas from all asm-foo/byteorder.h into one file, + * cleaned them up. + * I hope it is compliant with non-GCC compilers. + * I decided to put __BYTEORDER_HAS_U64__ in byteorder.h, + * because I wasn't sure it would be ok to put it in types.h + * Upgraded it to 2.1.43 + * Francois-Rene Rideau 19971012 + * Upgraded it to 2.1.57 + * to please Linus T., replaced huge #ifdef's between little/big endian + * by nestedly #include'd files. + * Francois-Rene Rideau 19971205 + * Made it to 2.1.71; now a facelift: + * Put files under include/linux/byteorder/ + * Split swab from generic support. + * + * TODO: + * = Regular kernel maintainers could also replace all these manual + * byteswap macros that remain, disseminated among drivers, + * after some grep or the sources... + * = Linus might want to rename all these macros and files to fit his taste, + * to fit his personal naming scheme. + * = it seems that a few drivers would also appreciate + * nybble swapping support... + * = every architecture could add their byteswap macro in asm/byteorder.h + * see how some architectures already do (i386, alpha, ppc, etc) + * = cpu_to_beXX and beXX_to_cpu might some day need to be well + * distinguished throughout the kernel. This is not the case currently, + * since little endian, big endian, and pdp endian machines needn't it. + * But this might be the case for, say, a port of Linux to 20/21 bit + * architectures (and F21 Linux addict around?). + */ + +/* + * The following macros are to be defined by : + * + * Conversion of long and short int between network and host format + * ntohl(__u32 x) + * ntohs(__u16 x) + * htonl(__u32 x) + * htons(__u16 x) + * It seems that some programs (which? where? or perhaps a standard? POSIX?) + * might like the above to be functions, not macros (why?). + * if that's true, then detect them, and take measures. + * Anyway, the measure is: define only ___ntohl as a macro instead, + * and in a separate file, have + * unsigned long inline ntohl(x){return ___ntohl(x);} + * + * The same for constant arguments + * __constant_ntohl(__u32 x) + * __constant_ntohs(__u16 x) + * __constant_htonl(__u32 x) + * __constant_htons(__u16 x) + * + * Conversion of XX-bit integers (16- 32- or 64-) + * between native CPU format and little/big endian format + * 64-bit stuff only defined for proper architectures + * cpu_to_[bl]eXX(__uXX x) + * [bl]eXX_to_cpu(__uXX x) + * + * The same, but takes a pointer to the value to convert + * cpu_to_[bl]eXXp(__uXX x) + * [bl]eXX_to_cpup(__uXX x) + * + * The same, but change in situ + * cpu_to_[bl]eXXs(__uXX x) + * [bl]eXX_to_cpus(__uXX x) + * + * See asm-foo/byteorder.h for examples of how to provide + * architecture-optimized versions + * + */ + +#define cpu_to_le64 __cpu_to_le64 +#define le64_to_cpu __le64_to_cpu +#define cpu_to_le32 __cpu_to_le32 +#define le32_to_cpu __le32_to_cpu +#define cpu_to_le16 __cpu_to_le16 +#define le16_to_cpu __le16_to_cpu +#define cpu_to_be64 __cpu_to_be64 +#define be64_to_cpu __be64_to_cpu +#define cpu_to_be32 __cpu_to_be32 +#define be32_to_cpu __be32_to_cpu +#define cpu_to_be16 __cpu_to_be16 +#define be16_to_cpu __be16_to_cpu +#define cpu_to_le64p __cpu_to_le64p +#define le64_to_cpup __le64_to_cpup +#define cpu_to_le32p __cpu_to_le32p +#define le32_to_cpup __le32_to_cpup +#define cpu_to_le16p __cpu_to_le16p +#define le16_to_cpup __le16_to_cpup +#define cpu_to_be64p __cpu_to_be64p +#define be64_to_cpup __be64_to_cpup +#define cpu_to_be32p __cpu_to_be32p +#define be32_to_cpup __be32_to_cpup +#define cpu_to_be16p __cpu_to_be16p +#define be16_to_cpup __be16_to_cpup +#define cpu_to_le64s __cpu_to_le64s +#define le64_to_cpus __le64_to_cpus +#define cpu_to_le32s __cpu_to_le32s +#define le32_to_cpus __le32_to_cpus +#define cpu_to_le16s __cpu_to_le16s +#define le16_to_cpus __le16_to_cpus +#define cpu_to_be64s __cpu_to_be64s +#define be64_to_cpus __be64_to_cpus +#define cpu_to_be32s __cpu_to_be32s +#define be32_to_cpus __be32_to_cpus +#define cpu_to_be16s __cpu_to_be16s +#define be16_to_cpus __be16_to_cpus + +/* + * They have to be macros in order to do the constant folding + * correctly - if the argument passed into a inline function + * it is no longer constant according to gcc.. + */ + +#undef ntohl +#undef ntohs +#undef htonl +#undef htons + +#define ___htonl(x) __cpu_to_be32(x) +#define ___htons(x) __cpu_to_be16(x) +#define ___ntohl(x) __be32_to_cpu(x) +#define ___ntohs(x) __be16_to_cpu(x) + +#define htonl(x) ___htonl(x) +#define ntohl(x) ___ntohl(x) +#define htons(x) ___htons(x) +#define ntohs(x) ___ntohs(x) + +static inline void le16_add_cpu(__le16 *var, u16 val) +{ + *var = cpu_to_le16(le16_to_cpu(*var) + val); +} + +static inline void le32_add_cpu(__le32 *var, u32 val) +{ + *var = cpu_to_le32(le32_to_cpu(*var) + val); +} + +static inline void le64_add_cpu(__le64 *var, u64 val) +{ + *var = cpu_to_le64(le64_to_cpu(*var) + val); +} + +/* XXX: this stuff can be optimized */ +static inline void le32_to_cpu_array(u32 *buf, unsigned int words) +{ + while (words--) { + __le32_to_cpus(buf); + buf++; + } +} + +static inline void cpu_to_le32_array(u32 *buf, unsigned int words) +{ + while (words--) { + __cpu_to_le32s(buf); + buf++; + } +} + +static inline void be16_add_cpu(__be16 *var, u16 val) +{ + *var = cpu_to_be16(be16_to_cpu(*var) + val); +} + +static inline void be32_add_cpu(__be32 *var, u32 val) +{ + *var = cpu_to_be32(be32_to_cpu(*var) + val); +} + +static inline void be64_add_cpu(__be64 *var, u64 val) +{ + *var = cpu_to_be64(be64_to_cpu(*var) + val); +} + +static inline void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len) +{ + int i; + + for (i = 0; i < len; i++) + dst[i] = cpu_to_be32(src[i]); +} + +static inline void be32_to_cpu_array(u32 *dst, const __be32 *src, size_t len) +{ + int i; + + for (i = 0; i < len; i++) + dst[i] = be32_to_cpu(src[i]); +} + +#endif /* _LINUX_BYTEORDER_GENERIC_H */ diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h new file mode 100644 index 000000000..1ec650ff7 --- /dev/null +++ b/include/linux/byteorder/little_endian.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BYTEORDER_LITTLE_ENDIAN_H +#define _LINUX_BYTEORDER_LITTLE_ENDIAN_H + +#include + +#ifdef CONFIG_CPU_BIG_ENDIAN +#warning inconsistent configuration, CONFIG_CPU_BIG_ENDIAN is set +#endif + +#include +#endif /* _LINUX_BYTEORDER_LITTLE_ENDIAN_H */ diff --git a/include/linux/c2port.h b/include/linux/c2port.h new file mode 100644 index 000000000..f2736348c --- /dev/null +++ b/include/linux/c2port.h @@ -0,0 +1,62 @@ +/* + * Silicon Labs C2 port Linux support + * + * Copyright (c) 2007 Rodolfo Giometti + * Copyright (c) 2007 Eurotech S.p.A. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation + */ + +#define C2PORT_NAME_LEN 32 + +struct device; + +/* + * C2 port basic structs + */ + +/* Main struct */ +struct c2port_ops; +struct c2port_device { + unsigned int access:1; + unsigned int flash_access:1; + + int id; + char name[C2PORT_NAME_LEN]; + struct c2port_ops *ops; + struct mutex mutex; /* prevent races during read/write */ + + struct device *dev; + + void *private_data; +}; + +/* Basic operations */ +struct c2port_ops { + /* Flash layout */ + unsigned short block_size; /* flash block size in bytes */ + unsigned short blocks_num; /* flash blocks number */ + + /* Enable or disable the access to C2 port */ + void (*access)(struct c2port_device *dev, int status); + + /* Set C2D data line as input/output */ + void (*c2d_dir)(struct c2port_device *dev, int dir); + + /* Read/write C2D data line */ + int (*c2d_get)(struct c2port_device *dev); + void (*c2d_set)(struct c2port_device *dev, int status); + + /* Write C2CK clock line */ + void (*c2ck_set)(struct c2port_device *dev, int status); +}; + +/* + * Exported functions + */ + +extern struct c2port_device *c2port_device_register(char *name, + struct c2port_ops *ops, void *devdata); +extern void c2port_device_unregister(struct c2port_device *dev); diff --git a/include/linux/cache.h b/include/linux/cache.h new file mode 100644 index 000000000..750621e41 --- /dev/null +++ b/include/linux/cache.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_CACHE_H +#define __LINUX_CACHE_H + +#include +#include + +#ifndef L1_CACHE_ALIGN +#define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES) +#endif + +#ifndef SMP_CACHE_BYTES +#define SMP_CACHE_BYTES L1_CACHE_BYTES +#endif + +/* + * __read_mostly is used to keep rarely changing variables out of frequently + * updated cachelines. If an architecture doesn't support it, ignore the + * hint. + */ +#ifndef __read_mostly +#define __read_mostly +#endif + +/* + * __ro_after_init is used to mark things that are read-only after init (i.e. + * after mark_rodata_ro() has been called). These are effectively read-only, + * but may get written to during init, so can't live in .rodata (via "const"). + */ +#ifndef __ro_after_init +#define __ro_after_init __attribute__((__section__(".data..ro_after_init"))) +#endif + +#ifndef ____cacheline_aligned +#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES))) +#endif + +#ifndef ____cacheline_aligned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_aligned_in_smp ____cacheline_aligned +#else +#define ____cacheline_aligned_in_smp +#endif /* CONFIG_SMP */ +#endif + +#ifndef __cacheline_aligned +#define __cacheline_aligned \ + __attribute__((__aligned__(SMP_CACHE_BYTES), \ + __section__(".data..cacheline_aligned"))) +#endif /* __cacheline_aligned */ + +#ifndef __cacheline_aligned_in_smp +#ifdef CONFIG_SMP +#define __cacheline_aligned_in_smp __cacheline_aligned +#else +#define __cacheline_aligned_in_smp +#endif /* CONFIG_SMP */ +#endif + +/* + * The maximum alignment needed for some critical structures + * These could be inter-node cacheline sizes/L3 cacheline + * size etc. Define this in asm/cache.h for your arch + */ +#ifndef INTERNODE_CACHE_SHIFT +#define INTERNODE_CACHE_SHIFT L1_CACHE_SHIFT +#endif + +#if !defined(____cacheline_internodealigned_in_smp) +#if defined(CONFIG_SMP) +#define ____cacheline_internodealigned_in_smp \ + __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT)))) +#else +#define ____cacheline_internodealigned_in_smp +#endif +#endif + +#ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE +#define cache_line_size() L1_CACHE_BYTES +#endif + +#endif /* __LINUX_CACHE_H */ diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h new file mode 100644 index 000000000..66654e6f9 --- /dev/null +++ b/include/linux/cacheinfo.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CACHEINFO_H +#define _LINUX_CACHEINFO_H + +#include +#include +#include + +struct device_node; +struct attribute; + +enum cache_type { + CACHE_TYPE_NOCACHE = 0, + CACHE_TYPE_INST = BIT(0), + CACHE_TYPE_DATA = BIT(1), + CACHE_TYPE_SEPARATE = CACHE_TYPE_INST | CACHE_TYPE_DATA, + CACHE_TYPE_UNIFIED = BIT(2), +}; + +/** + * struct cacheinfo - represent a cache leaf node + * @id: This cache's id. It is unique among caches with the same (type, level). + * @type: type of the cache - data, inst or unified + * @level: represents the hierarchy in the multi-level cache + * @coherency_line_size: size of each cache line usually representing + * the minimum amount of data that gets transferred from memory + * @number_of_sets: total number of sets, a set is a collection of cache + * lines sharing the same index + * @ways_of_associativity: number of ways in which a particular memory + * block can be placed in the cache + * @physical_line_partition: number of physical cache lines sharing the + * same cachetag + * @size: Total size of the cache + * @shared_cpu_map: logical cpumask representing all the cpus sharing + * this cache node + * @attributes: bitfield representing various cache attributes + * @fw_token: Unique value used to determine if different cacheinfo + * structures represent a single hardware cache instance. + * @disable_sysfs: indicates whether this node is visible to the user via + * sysfs or not + * @priv: pointer to any private data structure specific to particular + * cache design + * + * While @of_node, @disable_sysfs and @priv are used for internal book + * keeping, the remaining members form the core properties of the cache + */ +struct cacheinfo { + unsigned int id; + enum cache_type type; + unsigned int level; + unsigned int coherency_line_size; + unsigned int number_of_sets; + unsigned int ways_of_associativity; + unsigned int physical_line_partition; + unsigned int size; + cpumask_t shared_cpu_map; + unsigned int attributes; +#define CACHE_WRITE_THROUGH BIT(0) +#define CACHE_WRITE_BACK BIT(1) +#define CACHE_WRITE_POLICY_MASK \ + (CACHE_WRITE_THROUGH | CACHE_WRITE_BACK) +#define CACHE_READ_ALLOCATE BIT(2) +#define CACHE_WRITE_ALLOCATE BIT(3) +#define CACHE_ALLOCATE_POLICY_MASK \ + (CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE) +#define CACHE_ID BIT(4) + void *fw_token; + bool disable_sysfs; + void *priv; +}; + +struct cpu_cacheinfo { + struct cacheinfo *info_list; + unsigned int num_levels; + unsigned int num_leaves; + bool cpu_map_populated; +}; + +struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu); +int init_cache_level(unsigned int cpu); +int populate_cache_leaves(unsigned int cpu); +int cache_setup_acpi(unsigned int cpu); +#ifndef CONFIG_ACPI_PPTT +/* + * acpi_find_last_cache_level is only called on ACPI enabled + * platforms using the PPTT for topology. This means that if + * the platform supports other firmware configuration methods + * we need to stub out the call when ACPI is disabled. + * ACPI enabled platforms not using PPTT won't be making calls + * to this function so we need not worry about them. + */ +static inline int acpi_find_last_cache_level(unsigned int cpu) +{ + return 0; +} +#else +int acpi_find_last_cache_level(unsigned int cpu); +#endif + +const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf); + +#endif /* _LINUX_CACHEINFO_H */ diff --git a/include/linux/can/core.h b/include/linux/can/core.h new file mode 100644 index 000000000..6099bc18b --- /dev/null +++ b/include/linux/can/core.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/can/core.h + * + * Protoypes and definitions for CAN protocol modules using the PF_CAN core + * + * Authors: Oliver Hartkopp + * Urs Thuermann + * Copyright (c) 2002-2017 Volkswagen Group Electronic Research + * All rights reserved. + * + */ + +#ifndef _CAN_CORE_H +#define _CAN_CORE_H + +#include +#include +#include + +#define CAN_VERSION "20170425" + +/* increment this number each time you change some user-space interface */ +#define CAN_ABI_VERSION "9" + +#define CAN_VERSION_STRING "rev " CAN_VERSION " abi " CAN_ABI_VERSION + +#define DNAME(dev) ((dev) ? (dev)->name : "any") + +/** + * struct can_proto - CAN protocol structure + * @type: type argument in socket() syscall, e.g. SOCK_DGRAM. + * @protocol: protocol number in socket() syscall. + * @ops: pointer to struct proto_ops for sock->ops. + * @prot: pointer to struct proto structure. + */ +struct can_proto { + int type; + int protocol; + const struct proto_ops *ops; + struct proto *prot; +}; + +/* function prototypes for the CAN networklayer core (af_can.c) */ + +extern int can_proto_register(const struct can_proto *cp); +extern void can_proto_unregister(const struct can_proto *cp); + +int can_rx_register(struct net *net, struct net_device *dev, + canid_t can_id, canid_t mask, + void (*func)(struct sk_buff *, void *), + void *data, char *ident, struct sock *sk); + +extern void can_rx_unregister(struct net *net, struct net_device *dev, + canid_t can_id, canid_t mask, + void (*func)(struct sk_buff *, void *), + void *data); + +extern int can_send(struct sk_buff *skb, int loop); +extern int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); + +#endif /* !_CAN_CORE_H */ diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h new file mode 100644 index 000000000..73199facd --- /dev/null +++ b/include/linux/can/dev.h @@ -0,0 +1,222 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/can/dev.h + * + * Definitions for the CAN network device driver interface + * + * Copyright (C) 2006 Andrey Volkov + * Varma Electronics Oy + * + * Copyright (C) 2008 Wolfgang Grandegger + * + */ + +#ifndef _CAN_DEV_H +#define _CAN_DEV_H + +#include +#include +#include +#include +#include +#include + +/* + * CAN mode + */ +enum can_mode { + CAN_MODE_STOP = 0, + CAN_MODE_START, + CAN_MODE_SLEEP +}; + +/* + * CAN common private data + */ +struct can_priv { + struct net_device *dev; + struct can_device_stats can_stats; + + struct can_bittiming bittiming, data_bittiming; + const struct can_bittiming_const *bittiming_const, + *data_bittiming_const; + const u16 *termination_const; + unsigned int termination_const_cnt; + u16 termination; + const u32 *bitrate_const; + unsigned int bitrate_const_cnt; + const u32 *data_bitrate_const; + unsigned int data_bitrate_const_cnt; + u32 bitrate_max; + struct can_clock clock; + + enum can_state state; + + /* CAN controller features - see include/uapi/linux/can/netlink.h */ + u32 ctrlmode; /* current options setting */ + u32 ctrlmode_supported; /* options that can be modified by netlink */ + u32 ctrlmode_static; /* static enabled options for driver/hardware */ + + int restart_ms; + struct delayed_work restart_work; + + int (*do_set_bittiming)(struct net_device *dev); + int (*do_set_data_bittiming)(struct net_device *dev); + int (*do_set_mode)(struct net_device *dev, enum can_mode mode); + int (*do_set_termination)(struct net_device *dev, u16 term); + int (*do_get_state)(const struct net_device *dev, + enum can_state *state); + int (*do_get_berr_counter)(const struct net_device *dev, + struct can_berr_counter *bec); + + unsigned int echo_skb_max; + struct sk_buff **echo_skb; + +#ifdef CONFIG_CAN_LEDS + struct led_trigger *tx_led_trig; + char tx_led_trig_name[CAN_LED_NAME_SZ]; + struct led_trigger *rx_led_trig; + char rx_led_trig_name[CAN_LED_NAME_SZ]; + struct led_trigger *rxtx_led_trig; + char rxtx_led_trig_name[CAN_LED_NAME_SZ]; +#endif +}; + +/* + * get_can_dlc(value) - helper macro to cast a given data length code (dlc) + * to __u8 and ensure the dlc value to be max. 8 bytes. + * + * To be used in the CAN netdriver receive path to ensure conformance with + * ISO 11898-1 Chapter 8.4.2.3 (DLC field) + */ +#define get_can_dlc(i) (min_t(__u8, (i), CAN_MAX_DLC)) +#define get_canfd_dlc(i) (min_t(__u8, (i), CANFD_MAX_DLC)) + +/* Check for outgoing skbs that have not been created by the CAN subsystem */ +static inline bool can_skb_headroom_valid(struct net_device *dev, + struct sk_buff *skb) +{ + /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */ + if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv))) + return false; + + /* af_packet does not apply CAN skb specific settings */ + if (skb->ip_summed == CHECKSUM_NONE) { + /* init headroom */ + can_skb_prv(skb)->ifindex = dev->ifindex; + can_skb_prv(skb)->skbcnt = 0; + + skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* preform proper loopback on capable devices */ + if (dev->flags & IFF_ECHO) + skb->pkt_type = PACKET_LOOPBACK; + else + skb->pkt_type = PACKET_HOST; + + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb_reset_transport_header(skb); + } + + return true; +} + +/* Drop a given socketbuffer if it does not contain a valid CAN frame. */ +static inline bool can_dropped_invalid_skb(struct net_device *dev, + struct sk_buff *skb) +{ + const struct canfd_frame *cfd = (struct canfd_frame *)skb->data; + + if (skb->protocol == htons(ETH_P_CAN)) { + if (unlikely(skb->len != CAN_MTU || + cfd->len > CAN_MAX_DLEN)) + goto inval_skb; + } else if (skb->protocol == htons(ETH_P_CANFD)) { + if (unlikely(skb->len != CANFD_MTU || + cfd->len > CANFD_MAX_DLEN)) + goto inval_skb; + } else + goto inval_skb; + + if (!can_skb_headroom_valid(dev, skb)) + goto inval_skb; + + return false; + +inval_skb: + kfree_skb(skb); + dev->stats.tx_dropped++; + return true; +} + +static inline bool can_is_canfd_skb(const struct sk_buff *skb) +{ + /* the CAN specific type of skb is identified by its data length */ + return skb->len == CANFD_MTU; +} + +/* helper to define static CAN controller features at device creation time */ +static inline void can_set_static_ctrlmode(struct net_device *dev, + u32 static_mode) +{ + struct can_priv *priv = netdev_priv(dev); + + /* alloc_candev() succeeded => netdev_priv() is valid at this point */ + priv->ctrlmode = static_mode; + priv->ctrlmode_static = static_mode; + + /* override MTU which was set by default in can_setup()? */ + if (static_mode & CAN_CTRLMODE_FD) + dev->mtu = CANFD_MTU; +} + +/* get data length from can_dlc with sanitized can_dlc */ +u8 can_dlc2len(u8 can_dlc); + +/* map the sanitized data length to an appropriate data length code */ +u8 can_len2dlc(u8 len); + +struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max, + unsigned int txqs, unsigned int rxqs); +#define alloc_candev(sizeof_priv, echo_skb_max) \ + alloc_candev_mqs(sizeof_priv, echo_skb_max, 1, 1) +#define alloc_candev_mq(sizeof_priv, echo_skb_max, count) \ + alloc_candev_mqs(sizeof_priv, echo_skb_max, count, count) +void free_candev(struct net_device *dev); + +/* a candev safe wrapper around netdev_priv */ +struct can_priv *safe_candev_priv(struct net_device *dev); + +int open_candev(struct net_device *dev); +void close_candev(struct net_device *dev); +int can_change_mtu(struct net_device *dev, int new_mtu); + +int register_candev(struct net_device *dev); +void unregister_candev(struct net_device *dev); + +int can_restart_now(struct net_device *dev); +void can_bus_off(struct net_device *dev); + +void can_change_state(struct net_device *dev, struct can_frame *cf, + enum can_state tx_state, enum can_state rx_state); + +void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, + unsigned int idx); +struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr); +unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); +void can_free_echo_skb(struct net_device *dev, unsigned int idx); + +#ifdef CONFIG_OF +void of_can_transceiver(struct net_device *dev); +#else +static inline void of_can_transceiver(struct net_device *dev) { } +#endif + +struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf); +struct sk_buff *alloc_canfd_skb(struct net_device *dev, + struct canfd_frame **cfd); +struct sk_buff *alloc_can_err_skb(struct net_device *dev, + struct can_frame **cf); + +#endif /* !_CAN_DEV_H */ diff --git a/include/linux/can/dev/peak_canfd.h b/include/linux/can/dev/peak_canfd.h new file mode 100644 index 000000000..46dceef2c --- /dev/null +++ b/include/linux/can/dev/peak_canfd.h @@ -0,0 +1,308 @@ +/* + * CAN driver for PEAK System micro-CAN based adapters + * + * Copyright (C) 2003-2011 PEAK System-Technik GmbH + * Copyright (C) 2011-2013 Stephane Grosjean + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published + * by the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#ifndef PUCAN_H +#define PUCAN_H + +/* uCAN commands opcodes list (low-order 10 bits) */ +#define PUCAN_CMD_NOP 0x000 +#define PUCAN_CMD_RESET_MODE 0x001 +#define PUCAN_CMD_NORMAL_MODE 0x002 +#define PUCAN_CMD_LISTEN_ONLY_MODE 0x003 +#define PUCAN_CMD_TIMING_SLOW 0x004 +#define PUCAN_CMD_TIMING_FAST 0x005 +#define PUCAN_CMD_SET_STD_FILTER 0x006 +#define PUCAN_CMD_RESERVED2 0x007 +#define PUCAN_CMD_FILTER_STD 0x008 +#define PUCAN_CMD_TX_ABORT 0x009 +#define PUCAN_CMD_WR_ERR_CNT 0x00a +#define PUCAN_CMD_SET_EN_OPTION 0x00b +#define PUCAN_CMD_CLR_DIS_OPTION 0x00c +#define PUCAN_CMD_RX_BARRIER 0x010 +#define PUCAN_CMD_END_OF_COLLECTION 0x3ff + +/* uCAN received messages list */ +#define PUCAN_MSG_CAN_RX 0x0001 +#define PUCAN_MSG_ERROR 0x0002 +#define PUCAN_MSG_STATUS 0x0003 +#define PUCAN_MSG_BUSLOAD 0x0004 + +#define PUCAN_MSG_CACHE_CRITICAL 0x0102 + +/* uCAN transmitted messages */ +#define PUCAN_MSG_CAN_TX 0x1000 + +/* uCAN command common header */ +struct __packed pucan_command { + __le16 opcode_channel; + u16 args[3]; +}; + +/* return the opcode from the opcode_channel field of a command */ +static inline u16 pucan_cmd_get_opcode(struct pucan_command *c) +{ + return le16_to_cpu(c->opcode_channel) & 0x3ff; +} + +#define PUCAN_TSLOW_BRP_BITS 10 +#define PUCAN_TSLOW_TSGEG1_BITS 8 +#define PUCAN_TSLOW_TSGEG2_BITS 7 +#define PUCAN_TSLOW_SJW_BITS 7 + +#define PUCAN_TSLOW_BRP_MASK ((1 << PUCAN_TSLOW_BRP_BITS) - 1) +#define PUCAN_TSLOW_TSEG1_MASK ((1 << PUCAN_TSLOW_TSGEG1_BITS) - 1) +#define PUCAN_TSLOW_TSEG2_MASK ((1 << PUCAN_TSLOW_TSGEG2_BITS) - 1) +#define PUCAN_TSLOW_SJW_MASK ((1 << PUCAN_TSLOW_SJW_BITS) - 1) + +/* uCAN TIMING_SLOW command fields */ +#define PUCAN_TSLOW_SJW_T(s, t) (((s) & PUCAN_TSLOW_SJW_MASK) | \ + ((!!(t)) << 7)) +#define PUCAN_TSLOW_TSEG2(t) ((t) & PUCAN_TSLOW_TSEG2_MASK) +#define PUCAN_TSLOW_TSEG1(t) ((t) & PUCAN_TSLOW_TSEG1_MASK) +#define PUCAN_TSLOW_BRP(b) ((b) & PUCAN_TSLOW_BRP_MASK) + +struct __packed pucan_timing_slow { + __le16 opcode_channel; + + u8 ewl; /* Error Warning limit */ + u8 sjw_t; /* Sync Jump Width + Triple sampling */ + u8 tseg2; /* Timing SEGment 2 */ + u8 tseg1; /* Timing SEGment 1 */ + + __le16 brp; /* BaudRate Prescaler */ +}; + +#define PUCAN_TFAST_BRP_BITS 10 +#define PUCAN_TFAST_TSGEG1_BITS 5 +#define PUCAN_TFAST_TSGEG2_BITS 4 +#define PUCAN_TFAST_SJW_BITS 4 + +#define PUCAN_TFAST_BRP_MASK ((1 << PUCAN_TFAST_BRP_BITS) - 1) +#define PUCAN_TFAST_TSEG1_MASK ((1 << PUCAN_TFAST_TSGEG1_BITS) - 1) +#define PUCAN_TFAST_TSEG2_MASK ((1 << PUCAN_TFAST_TSGEG2_BITS) - 1) +#define PUCAN_TFAST_SJW_MASK ((1 << PUCAN_TFAST_SJW_BITS) - 1) + +/* uCAN TIMING_FAST command fields */ +#define PUCAN_TFAST_SJW(s) ((s) & PUCAN_TFAST_SJW_MASK) +#define PUCAN_TFAST_TSEG2(t) ((t) & PUCAN_TFAST_TSEG2_MASK) +#define PUCAN_TFAST_TSEG1(t) ((t) & PUCAN_TFAST_TSEG1_MASK) +#define PUCAN_TFAST_BRP(b) ((b) & PUCAN_TFAST_BRP_MASK) + +struct __packed pucan_timing_fast { + __le16 opcode_channel; + + u8 unused; + u8 sjw; /* Sync Jump Width */ + u8 tseg2; /* Timing SEGment 2 */ + u8 tseg1; /* Timing SEGment 1 */ + + __le16 brp; /* BaudRate Prescaler */ +}; + +/* uCAN FILTER_STD command fields */ +#define PUCAN_FLTSTD_ROW_IDX_BITS 6 + +struct __packed pucan_filter_std { + __le16 opcode_channel; + + __le16 idx; + __le32 mask; /* CAN-ID bitmask in idx range */ +}; + +#define PUCAN_FLTSTD_ROW_IDX_MAX ((1 << PUCAN_FLTSTD_ROW_IDX_BITS) - 1) + +/* uCAN SET_STD_FILTER command fields */ +struct __packed pucan_std_filter { + __le16 opcode_channel; + + u8 unused; + u8 idx; + __le32 mask; /* CAN-ID bitmask in idx range */ +}; + +/* uCAN TX_ABORT commands fields */ +#define PUCAN_TX_ABORT_FLUSH 0x0001 + +struct __packed pucan_tx_abort { + __le16 opcode_channel; + + __le16 flags; + u32 unused; +}; + +/* uCAN WR_ERR_CNT command fields */ +#define PUCAN_WRERRCNT_TE 0x4000 /* Tx error cntr write Enable */ +#define PUCAN_WRERRCNT_RE 0x8000 /* Rx error cntr write Enable */ + +struct __packed pucan_wr_err_cnt { + __le16 opcode_channel; + + __le16 sel_mask; + u8 tx_counter; /* Tx error counter new value */ + u8 rx_counter; /* Rx error counter new value */ + + u16 unused; +}; + +/* uCAN SET_EN/CLR_DIS _OPTION command fields */ +#define PUCAN_OPTION_ERROR 0x0001 +#define PUCAN_OPTION_BUSLOAD 0x0002 +#define PUCAN_OPTION_CANDFDISO 0x0004 + +struct __packed pucan_options { + __le16 opcode_channel; + + __le16 options; + u32 unused; +}; + +/* uCAN received messages global format */ +struct __packed pucan_msg { + __le16 size; + __le16 type; + __le32 ts_low; + __le32 ts_high; +}; + +/* uCAN flags for CAN/CANFD messages */ +#define PUCAN_MSG_SELF_RECEIVE 0x80 +#define PUCAN_MSG_ERROR_STATE_IND 0x40 /* error state indicator */ +#define PUCAN_MSG_BITRATE_SWITCH 0x20 /* bitrate switch */ +#define PUCAN_MSG_EXT_DATA_LEN 0x10 /* extended data length */ +#define PUCAN_MSG_SINGLE_SHOT 0x08 +#define PUCAN_MSG_LOOPED_BACK 0x04 +#define PUCAN_MSG_EXT_ID 0x02 +#define PUCAN_MSG_RTR 0x01 + +struct __packed pucan_rx_msg { + __le16 size; + __le16 type; + __le32 ts_low; + __le32 ts_high; + __le32 tag_low; + __le32 tag_high; + u8 channel_dlc; + u8 client; + __le16 flags; + __le32 can_id; + u8 d[0]; +}; + +/* uCAN error types */ +#define PUCAN_ERMSG_BIT_ERROR 0 +#define PUCAN_ERMSG_FORM_ERROR 1 +#define PUCAN_ERMSG_STUFF_ERROR 2 +#define PUCAN_ERMSG_OTHER_ERROR 3 +#define PUCAN_ERMSG_ERR_CNT_DEC 4 + +struct __packed pucan_error_msg { + __le16 size; + __le16 type; + __le32 ts_low; + __le32 ts_high; + u8 channel_type_d; + u8 code_g; + u8 tx_err_cnt; + u8 rx_err_cnt; +}; + +static inline int pucan_error_get_channel(const struct pucan_error_msg *msg) +{ + return msg->channel_type_d & 0x0f; +} + +#define PUCAN_RX_BARRIER 0x10 +#define PUCAN_BUS_PASSIVE 0x20 +#define PUCAN_BUS_WARNING 0x40 +#define PUCAN_BUS_BUSOFF 0x80 + +struct __packed pucan_status_msg { + __le16 size; + __le16 type; + __le32 ts_low; + __le32 ts_high; + u8 channel_p_w_b; + u8 unused[3]; +}; + +static inline int pucan_status_get_channel(const struct pucan_status_msg *msg) +{ + return msg->channel_p_w_b & 0x0f; +} + +static inline int pucan_status_is_rx_barrier(const struct pucan_status_msg *msg) +{ + return msg->channel_p_w_b & PUCAN_RX_BARRIER; +} + +static inline int pucan_status_is_passive(const struct pucan_status_msg *msg) +{ + return msg->channel_p_w_b & PUCAN_BUS_PASSIVE; +} + +static inline int pucan_status_is_warning(const struct pucan_status_msg *msg) +{ + return msg->channel_p_w_b & PUCAN_BUS_WARNING; +} + +static inline int pucan_status_is_busoff(const struct pucan_status_msg *msg) +{ + return msg->channel_p_w_b & PUCAN_BUS_BUSOFF; +} + +/* uCAN transmitted message format */ +#define PUCAN_MSG_CHANNEL_DLC(c, d) (((c) & 0xf) | ((d) << 4)) + +struct __packed pucan_tx_msg { + __le16 size; + __le16 type; + __le32 tag_low; + __le32 tag_high; + u8 channel_dlc; + u8 client; + __le16 flags; + __le32 can_id; + u8 d[0]; +}; + +/* build the cmd opcode_channel field with respect to the correct endianness */ +static inline __le16 pucan_cmd_opcode_channel(int index, int opcode) +{ + return cpu_to_le16(((index) << 12) | ((opcode) & 0x3ff)); +} + +/* return the channel number part from any received message channel_dlc field */ +static inline int pucan_msg_get_channel(const struct pucan_rx_msg *msg) +{ + return msg->channel_dlc & 0xf; +} + +/* return the dlc value from any received message channel_dlc field */ +static inline int pucan_msg_get_dlc(const struct pucan_rx_msg *msg) +{ + return msg->channel_dlc >> 4; +} + +static inline int pucan_ermsg_get_channel(const struct pucan_error_msg *msg) +{ + return msg->channel_type_d & 0x0f; +} + +static inline int pucan_stmsg_get_channel(const struct pucan_status_msg *msg) +{ + return msg->channel_p_w_b & 0x0f; +} + +#endif diff --git a/include/linux/can/led.h b/include/linux/can/led.h new file mode 100644 index 000000000..2746f7c2f --- /dev/null +++ b/include/linux/can/led.h @@ -0,0 +1,54 @@ +/* + * Copyright 2012, Fabio Baltieri + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _CAN_LED_H +#define _CAN_LED_H + +#include +#include +#include + +enum can_led_event { + CAN_LED_EVENT_OPEN, + CAN_LED_EVENT_STOP, + CAN_LED_EVENT_TX, + CAN_LED_EVENT_RX, +}; + +#ifdef CONFIG_CAN_LEDS + +/* keep space for interface name + "-tx"/"-rx"/"-rxtx" + * suffix and null terminator + */ +#define CAN_LED_NAME_SZ (IFNAMSIZ + 6) + +void can_led_event(struct net_device *netdev, enum can_led_event event); +void devm_can_led_init(struct net_device *netdev); +int __init can_led_notifier_init(void); +void __exit can_led_notifier_exit(void); + +#else + +static inline void can_led_event(struct net_device *netdev, + enum can_led_event event) +{ +} +static inline void devm_can_led_init(struct net_device *netdev) +{ +} +static inline int can_led_notifier_init(void) +{ + return 0; +} +static inline void can_led_notifier_exit(void) +{ +} + +#endif + +#endif /* !_CAN_LED_H */ diff --git a/include/linux/can/platform/cc770.h b/include/linux/can/platform/cc770.h new file mode 100644 index 000000000..9587d6882 --- /dev/null +++ b/include/linux/can/platform/cc770.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CAN_PLATFORM_CC770_H +#define _CAN_PLATFORM_CC770_H + +/* CPU Interface Register (0x02) */ +#define CPUIF_CEN 0x01 /* Clock Out Enable */ +#define CPUIF_MUX 0x04 /* Multiplex */ +#define CPUIF_SLP 0x08 /* Sleep */ +#define CPUIF_PWD 0x10 /* Power Down Mode */ +#define CPUIF_DMC 0x20 /* Divide Memory Clock */ +#define CPUIF_DSC 0x40 /* Divide System Clock */ +#define CPUIF_RST 0x80 /* Hardware Reset Status */ + +/* Clock Out Register (0x1f) */ +#define CLKOUT_CD_MASK 0x0f /* Clock Divider mask */ +#define CLKOUT_SL_MASK 0x30 /* Slew Rate mask */ +#define CLKOUT_SL_SHIFT 4 + +/* Bus Configuration Register (0x2f) */ +#define BUSCFG_DR0 0x01 /* Disconnect RX0 Input / Select RX input */ +#define BUSCFG_DR1 0x02 /* Disconnect RX1 Input / Silent mode */ +#define BUSCFG_DT1 0x08 /* Disconnect TX1 Output */ +#define BUSCFG_POL 0x20 /* Polarity dominant or recessive */ +#define BUSCFG_CBY 0x40 /* Input Comparator Bypass */ + +struct cc770_platform_data { + u32 osc_freq; /* CAN bus oscillator frequency in Hz */ + + u8 cir; /* CPU Interface Register */ + u8 cor; /* Clock Out Register */ + u8 bcr; /* Bus Configuration Register */ +}; + +#endif /* !_CAN_PLATFORM_CC770_H */ diff --git a/include/linux/can/platform/mcp251x.h b/include/linux/can/platform/mcp251x.h new file mode 100644 index 000000000..9e5ac27fb --- /dev/null +++ b/include/linux/can/platform/mcp251x.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CAN_PLATFORM_MCP251X_H +#define _CAN_PLATFORM_MCP251X_H + +/* + * + * CAN bus driver for Microchip 251x CAN Controller with SPI Interface + * + */ + +#include + +/* + * struct mcp251x_platform_data - MCP251X SPI CAN controller platform data + * @oscillator_frequency: - oscillator frequency in Hz + */ + +struct mcp251x_platform_data { + unsigned long oscillator_frequency; +}; + +#endif /* !_CAN_PLATFORM_MCP251X_H */ diff --git a/include/linux/can/platform/rcar_can.h b/include/linux/can/platform/rcar_can.h new file mode 100644 index 000000000..a43dcd0cf --- /dev/null +++ b/include/linux/can/platform/rcar_can.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CAN_PLATFORM_RCAR_CAN_H_ +#define _CAN_PLATFORM_RCAR_CAN_H_ + +#include + +/* Clock Select Register settings */ +enum CLKR { + CLKR_CLKP1 = 0, /* Peripheral clock (clkp1) */ + CLKR_CLKP2 = 1, /* Peripheral clock (clkp2) */ + CLKR_CLKEXT = 3 /* Externally input clock */ +}; + +struct rcar_can_platform_data { + enum CLKR clock_select; /* Clock source select */ +}; + +#endif /* !_CAN_PLATFORM_RCAR_CAN_H_ */ diff --git a/include/linux/can/platform/sja1000.h b/include/linux/can/platform/sja1000.h new file mode 100644 index 000000000..5755ae5a4 --- /dev/null +++ b/include/linux/can/platform/sja1000.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CAN_PLATFORM_SJA1000_H +#define _CAN_PLATFORM_SJA1000_H + +/* clock divider register */ +#define CDR_CLKOUT_MASK 0x07 +#define CDR_CLK_OFF 0x08 /* Clock off (CLKOUT pin) */ +#define CDR_RXINPEN 0x20 /* TX1 output is RX irq output */ +#define CDR_CBP 0x40 /* CAN input comparator bypass */ +#define CDR_PELICAN 0x80 /* PeliCAN mode */ + +/* output control register */ +#define OCR_MODE_BIPHASE 0x00 +#define OCR_MODE_TEST 0x01 +#define OCR_MODE_NORMAL 0x02 +#define OCR_MODE_CLOCK 0x03 +#define OCR_MODE_MASK 0x07 +#define OCR_TX0_INVERT 0x04 +#define OCR_TX0_PULLDOWN 0x08 +#define OCR_TX0_PULLUP 0x10 +#define OCR_TX0_PUSHPULL 0x18 +#define OCR_TX1_INVERT 0x20 +#define OCR_TX1_PULLDOWN 0x40 +#define OCR_TX1_PULLUP 0x80 +#define OCR_TX1_PUSHPULL 0xc0 +#define OCR_TX_MASK 0xfc +#define OCR_TX_SHIFT 2 + +struct sja1000_platform_data { + u32 osc_freq; /* CAN bus oscillator frequency in Hz */ + + u8 ocr; /* output control register */ + u8 cdr; /* clock divider register */ +}; + +#endif /* !_CAN_PLATFORM_SJA1000_H */ diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h new file mode 100644 index 000000000..8268811a6 --- /dev/null +++ b/include/linux/can/rx-offload.h @@ -0,0 +1,64 @@ +/* + * linux/can/rx-offload.h + * + * Copyright (c) 2014 David Jander, Protonic Holland + * Copyright (c) 2014-2017 Pengutronix, Marc Kleine-Budde + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the version 2 of the GNU General Public License + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CAN_RX_OFFLOAD_H +#define _CAN_RX_OFFLOAD_H + +#include +#include + +struct can_rx_offload { + struct net_device *dev; + + unsigned int (*mailbox_read)(struct can_rx_offload *offload, struct can_frame *cf, + u32 *timestamp, unsigned int mb); + + struct sk_buff_head skb_queue; + u32 skb_queue_len_max; + + unsigned int mb_first; + unsigned int mb_last; + + struct napi_struct napi; + + bool inc; +}; + +int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload); +int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight); +int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg); +int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload); +int can_rx_offload_queue_sorted(struct can_rx_offload *offload, + struct sk_buff *skb, u32 timestamp); +unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, + unsigned int idx, u32 timestamp); +int can_rx_offload_queue_tail(struct can_rx_offload *offload, + struct sk_buff *skb); +void can_rx_offload_reset(struct can_rx_offload *offload); +void can_rx_offload_del(struct can_rx_offload *offload); +void can_rx_offload_enable(struct can_rx_offload *offload); + +static inline void can_rx_offload_schedule(struct can_rx_offload *offload) +{ + napi_schedule(&offload->napi); +} + +static inline void can_rx_offload_disable(struct can_rx_offload *offload) +{ + napi_disable(&offload->napi); +} + +#endif /* !_CAN_RX_OFFLOAD_H */ diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h new file mode 100644 index 000000000..fd1ae7907 --- /dev/null +++ b/include/linux/can/skb.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/can/skb.h + * + * Definitions for the CAN network socket buffer + * + * Copyright (C) 2012 Oliver Hartkopp + * + */ + +#ifndef _CAN_SKB_H +#define _CAN_SKB_H + +#include +#include +#include +#include + +/* + * The struct can_skb_priv is used to transport additional information along + * with the stored struct can(fd)_frame that can not be contained in existing + * struct sk_buff elements. + * N.B. that this information must not be modified in cloned CAN sk_buffs. + * To modify the CAN frame content or the struct can_skb_priv content + * skb_copy() needs to be used instead of skb_clone(). + */ + +/** + * struct can_skb_priv - private additional data inside CAN sk_buffs + * @ifindex: ifindex of the first interface the CAN frame appeared on + * @skbcnt: atomic counter to have an unique id together with skb pointer + * @cf: align to the following CAN frame at skb->data + */ +struct can_skb_priv { + int ifindex; + int skbcnt; + struct can_frame cf[0]; +}; + +static inline struct can_skb_priv *can_skb_prv(struct sk_buff *skb) +{ + return (struct can_skb_priv *)(skb->head); +} + +static inline void can_skb_reserve(struct sk_buff *skb) +{ + skb_reserve(skb, sizeof(struct can_skb_priv)); +} + +static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk) +{ + /* If the socket has already been closed by user space, the + * refcount may already be 0 (and the socket will be freed + * after the last TX skb has been freed). So only increase + * socket refcount if the refcount is > 0. + */ + if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) { + skb->destructor = sock_efree; + skb->sk = sk; + } +} + +/* + * returns an unshared skb owned by the original sock to be echo'ed back + */ +static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb) +{ + struct sk_buff *nskb; + + nskb = skb_clone(skb, GFP_ATOMIC); + if (unlikely(!nskb)) { + kfree_skb(skb); + return NULL; + } + + can_skb_set_owner(nskb, skb->sk); + consume_skb(skb); + return nskb; +} + +#endif /* !_CAN_SKB_H */ diff --git a/include/linux/capability.h b/include/linux/capability.h new file mode 100644 index 000000000..f640dcbc8 --- /dev/null +++ b/include/linux/capability.h @@ -0,0 +1,254 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This is + * + * Andrew G. Morgan + * Alexander Kjeldaas + * with help from Aleph1, Roland Buresund and Andrew Main. + * + * See here for the libcap library ("POSIX draft" compliance): + * + * ftp://www.kernel.org/pub/linux/libs/security/linux-privs/kernel-2.6/ + */ +#ifndef _LINUX_CAPABILITY_H +#define _LINUX_CAPABILITY_H + +#include + + +#define _KERNEL_CAPABILITY_VERSION _LINUX_CAPABILITY_VERSION_3 +#define _KERNEL_CAPABILITY_U32S _LINUX_CAPABILITY_U32S_3 + +extern int file_caps_enabled; + +typedef struct kernel_cap_struct { + __u32 cap[_KERNEL_CAPABILITY_U32S]; +} kernel_cap_t; + +/* exact same as vfs_cap_data but in cpu endian and always filled completely */ +struct cpu_vfs_cap_data { + __u32 magic_etc; + kernel_cap_t permitted; + kernel_cap_t inheritable; +}; + +#define _USER_CAP_HEADER_SIZE (sizeof(struct __user_cap_header_struct)) +#define _KERNEL_CAP_T_SIZE (sizeof(kernel_cap_t)) + + +struct file; +struct inode; +struct dentry; +struct task_struct; +struct user_namespace; + +extern const kernel_cap_t __cap_empty_set; +extern const kernel_cap_t __cap_init_eff_set; + +/* + * Internal kernel functions only + */ + +#define CAP_FOR_EACH_U32(__capi) \ + for (__capi = 0; __capi < _KERNEL_CAPABILITY_U32S; ++__capi) + +/* + * CAP_FS_MASK and CAP_NFSD_MASKS: + * + * The fs mask is all the privileges that fsuid==0 historically meant. + * At one time in the past, that included CAP_MKNOD and CAP_LINUX_IMMUTABLE. + * + * It has never meant setting security.* and trusted.* xattrs. + * + * We could also define fsmask as follows: + * 1. CAP_FS_MASK is the privilege to bypass all fs-related DAC permissions + * 2. The security.* and trusted.* xattrs are fs-related MAC permissions + */ + +# define CAP_FS_MASK_B0 (CAP_TO_MASK(CAP_CHOWN) \ + | CAP_TO_MASK(CAP_MKNOD) \ + | CAP_TO_MASK(CAP_DAC_OVERRIDE) \ + | CAP_TO_MASK(CAP_DAC_READ_SEARCH) \ + | CAP_TO_MASK(CAP_FOWNER) \ + | CAP_TO_MASK(CAP_FSETID)) + +# define CAP_FS_MASK_B1 (CAP_TO_MASK(CAP_MAC_OVERRIDE)) + +#if _KERNEL_CAPABILITY_U32S != 2 +# error Fix up hand-coded capability macro initializers +#else /* HAND-CODED capability initializers */ + +#define CAP_LAST_U32 ((_KERNEL_CAPABILITY_U32S) - 1) +#define CAP_LAST_U32_VALID_MASK (CAP_TO_MASK(CAP_LAST_CAP + 1) -1) + +# define CAP_EMPTY_SET ((kernel_cap_t){{ 0, 0 }}) +# define CAP_FULL_SET ((kernel_cap_t){{ ~0, CAP_LAST_U32_VALID_MASK }}) +# define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \ + | CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \ + CAP_FS_MASK_B1 } }) +# define CAP_NFSD_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \ + | CAP_TO_MASK(CAP_SYS_RESOURCE), \ + CAP_FS_MASK_B1 } }) + +#endif /* _KERNEL_CAPABILITY_U32S != 2 */ + +# define cap_clear(c) do { (c) = __cap_empty_set; } while (0) + +#define cap_raise(c, flag) ((c).cap[CAP_TO_INDEX(flag)] |= CAP_TO_MASK(flag)) +#define cap_lower(c, flag) ((c).cap[CAP_TO_INDEX(flag)] &= ~CAP_TO_MASK(flag)) +#define cap_raised(c, flag) ((c).cap[CAP_TO_INDEX(flag)] & CAP_TO_MASK(flag)) + +#define CAP_BOP_ALL(c, a, b, OP) \ +do { \ + unsigned __capi; \ + CAP_FOR_EACH_U32(__capi) { \ + c.cap[__capi] = a.cap[__capi] OP b.cap[__capi]; \ + } \ +} while (0) + +#define CAP_UOP_ALL(c, a, OP) \ +do { \ + unsigned __capi; \ + CAP_FOR_EACH_U32(__capi) { \ + c.cap[__capi] = OP a.cap[__capi]; \ + } \ +} while (0) + +static inline kernel_cap_t cap_combine(const kernel_cap_t a, + const kernel_cap_t b) +{ + kernel_cap_t dest; + CAP_BOP_ALL(dest, a, b, |); + return dest; +} + +static inline kernel_cap_t cap_intersect(const kernel_cap_t a, + const kernel_cap_t b) +{ + kernel_cap_t dest; + CAP_BOP_ALL(dest, a, b, &); + return dest; +} + +static inline kernel_cap_t cap_drop(const kernel_cap_t a, + const kernel_cap_t drop) +{ + kernel_cap_t dest; + CAP_BOP_ALL(dest, a, drop, &~); + return dest; +} + +static inline kernel_cap_t cap_invert(const kernel_cap_t c) +{ + kernel_cap_t dest; + CAP_UOP_ALL(dest, c, ~); + return dest; +} + +static inline bool cap_isclear(const kernel_cap_t a) +{ + unsigned __capi; + CAP_FOR_EACH_U32(__capi) { + if (a.cap[__capi] != 0) + return false; + } + return true; +} + +/* + * Check if "a" is a subset of "set". + * return true if ALL of the capabilities in "a" are also in "set" + * cap_issubset(0101, 1111) will return true + * return false if ANY of the capabilities in "a" are not in "set" + * cap_issubset(1111, 0101) will return false + */ +static inline bool cap_issubset(const kernel_cap_t a, const kernel_cap_t set) +{ + kernel_cap_t dest; + dest = cap_drop(a, set); + return cap_isclear(dest); +} + +/* Used to decide between falling back on the old suser() or fsuser(). */ + +static inline kernel_cap_t cap_drop_fs_set(const kernel_cap_t a) +{ + const kernel_cap_t __cap_fs_set = CAP_FS_SET; + return cap_drop(a, __cap_fs_set); +} + +static inline kernel_cap_t cap_raise_fs_set(const kernel_cap_t a, + const kernel_cap_t permitted) +{ + const kernel_cap_t __cap_fs_set = CAP_FS_SET; + return cap_combine(a, + cap_intersect(permitted, __cap_fs_set)); +} + +static inline kernel_cap_t cap_drop_nfsd_set(const kernel_cap_t a) +{ + const kernel_cap_t __cap_fs_set = CAP_NFSD_SET; + return cap_drop(a, __cap_fs_set); +} + +static inline kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a, + const kernel_cap_t permitted) +{ + const kernel_cap_t __cap_nfsd_set = CAP_NFSD_SET; + return cap_combine(a, + cap_intersect(permitted, __cap_nfsd_set)); +} + +#ifdef CONFIG_MULTIUSER +extern bool has_capability(struct task_struct *t, int cap); +extern bool has_ns_capability(struct task_struct *t, + struct user_namespace *ns, int cap); +extern bool has_capability_noaudit(struct task_struct *t, int cap); +extern bool has_ns_capability_noaudit(struct task_struct *t, + struct user_namespace *ns, int cap); +extern bool capable(int cap); +extern bool ns_capable(struct user_namespace *ns, int cap); +extern bool ns_capable_noaudit(struct user_namespace *ns, int cap); +#else +static inline bool has_capability(struct task_struct *t, int cap) +{ + return true; +} +static inline bool has_ns_capability(struct task_struct *t, + struct user_namespace *ns, int cap) +{ + return true; +} +static inline bool has_capability_noaudit(struct task_struct *t, int cap) +{ + return true; +} +static inline bool has_ns_capability_noaudit(struct task_struct *t, + struct user_namespace *ns, int cap) +{ + return true; +} +static inline bool capable(int cap) +{ + return true; +} +static inline bool ns_capable(struct user_namespace *ns, int cap) +{ + return true; +} +static inline bool ns_capable_noaudit(struct user_namespace *ns, int cap) +{ + return true; +} +#endif /* CONFIG_MULTIUSER */ +extern bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode *inode); +extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap); +extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap); +extern bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns); + +/* audit system wants to get cap info from files as well */ +extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); + +extern int cap_convert_nscap(struct dentry *dentry, void **ivalue, size_t size); + +#endif /* !_LINUX_CAPABILITY_H */ diff --git a/include/linux/cb710.h b/include/linux/cb710.h new file mode 100644 index 000000000..8cc10411b --- /dev/null +++ b/include/linux/cb710.h @@ -0,0 +1,208 @@ +/* + * cb710/cb710.h + * + * Copyright by Michał Mirosław, 2008-2009 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef LINUX_CB710_DRIVER_H +#define LINUX_CB710_DRIVER_H + +#include +#include +#include +#include +#include +#include + +struct cb710_slot; + +typedef int (*cb710_irq_handler_t)(struct cb710_slot *); + +/* per-virtual-slot structure */ +struct cb710_slot { + struct platform_device pdev; + void __iomem *iobase; + cb710_irq_handler_t irq_handler; +}; + +/* per-device structure */ +struct cb710_chip { + struct pci_dev *pdev; + void __iomem *iobase; + unsigned platform_id; +#ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS + atomic_t slot_refs_count; +#endif + unsigned slot_mask; + unsigned slots; + spinlock_t irq_lock; + struct cb710_slot slot[0]; +}; + +/* NOTE: cb710_chip.slots is modified only during device init/exit and + * they are all serialized wrt themselves */ + +/* cb710_chip.slot_mask values */ +#define CB710_SLOT_MMC 1 +#define CB710_SLOT_MS 2 +#define CB710_SLOT_SM 4 + +/* slot port accessors - so the logic is more clear in the code */ +#define CB710_PORT_ACCESSORS(t) \ +static inline void cb710_write_port_##t(struct cb710_slot *slot, \ + unsigned port, u##t value) \ +{ \ + iowrite##t(value, slot->iobase + port); \ +} \ + \ +static inline u##t cb710_read_port_##t(struct cb710_slot *slot, \ + unsigned port) \ +{ \ + return ioread##t(slot->iobase + port); \ +} \ + \ +static inline void cb710_modify_port_##t(struct cb710_slot *slot, \ + unsigned port, u##t set, u##t clear) \ +{ \ + iowrite##t( \ + (ioread##t(slot->iobase + port) & ~clear)|set, \ + slot->iobase + port); \ +} + +CB710_PORT_ACCESSORS(8) +CB710_PORT_ACCESSORS(16) +CB710_PORT_ACCESSORS(32) + +void cb710_pci_update_config_reg(struct pci_dev *pdev, + int reg, uint32_t and, uint32_t xor); +void cb710_set_irq_handler(struct cb710_slot *slot, + cb710_irq_handler_t handler); + +/* some device struct walking */ + +static inline struct cb710_slot *cb710_pdev_to_slot( + struct platform_device *pdev) +{ + return container_of(pdev, struct cb710_slot, pdev); +} + +static inline struct cb710_chip *cb710_slot_to_chip(struct cb710_slot *slot) +{ + return dev_get_drvdata(slot->pdev.dev.parent); +} + +static inline struct device *cb710_slot_dev(struct cb710_slot *slot) +{ + return &slot->pdev.dev; +} + +static inline struct device *cb710_chip_dev(struct cb710_chip *chip) +{ + return &chip->pdev->dev; +} + +/* debugging aids */ + +#ifdef CONFIG_CB710_DEBUG +void cb710_dump_regs(struct cb710_chip *chip, unsigned dump); +#else +#define cb710_dump_regs(c, d) do {} while (0) +#endif + +#define CB710_DUMP_REGS_MMC 0x0F +#define CB710_DUMP_REGS_MS 0x30 +#define CB710_DUMP_REGS_SM 0xC0 +#define CB710_DUMP_REGS_ALL 0xFF +#define CB710_DUMP_REGS_MASK 0xFF + +#define CB710_DUMP_ACCESS_8 0x100 +#define CB710_DUMP_ACCESS_16 0x200 +#define CB710_DUMP_ACCESS_32 0x400 +#define CB710_DUMP_ACCESS_ALL 0x700 +#define CB710_DUMP_ACCESS_MASK 0x700 + +#endif /* LINUX_CB710_DRIVER_H */ +/* + * cb710/sgbuf2.h + * + * Copyright by Michał Mirosław, 2008-2009 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef LINUX_CB710_SG_H +#define LINUX_CB710_SG_H + +#include +#include + +/* + * 32-bit PIO mapping sg iterator + * + * Hides scatterlist access issues - fragment boundaries, alignment, page + * mapping - for drivers using 32-bit-word-at-a-time-PIO (ie. PCI devices + * without DMA support). + * + * Best-case reading (transfer from device): + * sg_miter_start(, SG_MITER_TO_SG); + * cb710_sg_dwiter_write_from_io(); + * sg_miter_stop(); + * + * Best-case writing (transfer to device): + * sg_miter_start(, SG_MITER_FROM_SG); + * cb710_sg_dwiter_read_to_io(); + * sg_miter_stop(); + */ + +uint32_t cb710_sg_dwiter_read_next_block(struct sg_mapping_iter *miter); +void cb710_sg_dwiter_write_next_block(struct sg_mapping_iter *miter, uint32_t data); + +/** + * cb710_sg_dwiter_write_from_io - transfer data to mapped buffer from 32-bit IO port + * @miter: sg mapping iter + * @port: PIO port - IO or MMIO address + * @count: number of 32-bit words to transfer + * + * Description: + * Reads @count 32-bit words from register @port and stores it in + * buffer iterated by @miter. Data that would overflow the buffer + * is silently ignored. Iterator is advanced by 4*@count bytes + * or to the buffer's end whichever is closer. + * + * Context: + * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise. + */ +static inline void cb710_sg_dwiter_write_from_io(struct sg_mapping_iter *miter, + void __iomem *port, size_t count) +{ + while (count-- > 0) + cb710_sg_dwiter_write_next_block(miter, ioread32(port)); +} + +/** + * cb710_sg_dwiter_read_to_io - transfer data to 32-bit IO port from mapped buffer + * @miter: sg mapping iter + * @port: PIO port - IO or MMIO address + * @count: number of 32-bit words to transfer + * + * Description: + * Writes @count 32-bit words to register @port from buffer iterated + * through @miter. If buffer ends before @count words are written + * missing data is replaced by zeroes. @miter is advanced by 4*@count + * bytes or to the buffer's end whichever is closer. + * + * Context: + * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise. + */ +static inline void cb710_sg_dwiter_read_to_io(struct sg_mapping_iter *miter, + void __iomem *port, size_t count) +{ + while (count-- > 0) + iowrite32(cb710_sg_dwiter_read_next_block(miter), port); +} + +#endif /* LINUX_CB710_SG_H */ diff --git a/include/linux/cciss_ioctl.h b/include/linux/cciss_ioctl.h new file mode 100644 index 000000000..1d5229200 --- /dev/null +++ b/include/linux/cciss_ioctl.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef CCISS_IOCTLH +#define CCISS_IOCTLH + +#include + +#ifdef CONFIG_COMPAT + +/* 32 bit compatible ioctl structs */ +typedef struct _IOCTL32_Command_struct { + LUNAddr_struct LUN_info; + RequestBlock_struct Request; + ErrorInfo_struct error_info; + WORD buf_size; /* size in bytes of the buf */ + __u32 buf; /* 32 bit pointer to data buffer */ +} IOCTL32_Command_struct; + +typedef struct _BIG_IOCTL32_Command_struct { + LUNAddr_struct LUN_info; + RequestBlock_struct Request; + ErrorInfo_struct error_info; + DWORD malloc_size; /* < MAX_KMALLOC_SIZE in cciss.c */ + DWORD buf_size; /* size in bytes of the buf */ + /* < malloc_size * MAXSGENTRIES */ + __u32 buf; /* 32 bit pointer to data buffer */ +} BIG_IOCTL32_Command_struct; + +#define CCISS_PASSTHRU32 _IOWR(CCISS_IOC_MAGIC, 11, IOCTL32_Command_struct) +#define CCISS_BIG_PASSTHRU32 _IOWR(CCISS_IOC_MAGIC, 18, BIG_IOCTL32_Command_struct) + +#endif /* CONFIG_COMPAT */ +#endif diff --git a/include/linux/ccp.h b/include/linux/ccp.h new file mode 100644 index 000000000..43ed9e77c --- /dev/null +++ b/include/linux/ccp.h @@ -0,0 +1,669 @@ +/* + * AMD Cryptographic Coprocessor (CCP) driver + * + * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. + * + * Author: Tom Lendacky + * Author: Gary R Hook + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __CCP_H__ +#define __CCP_H__ + +#include +#include +#include +#include +#include + +struct ccp_device; +struct ccp_cmd; + +#if defined(CONFIG_CRYPTO_DEV_SP_CCP) + +/** + * ccp_present - check if a CCP device is present + * + * Returns zero if a CCP device is present, -ENODEV otherwise. + */ +int ccp_present(void); + +#define CCP_VSIZE 16 +#define CCP_VMASK ((unsigned int)((1 << CCP_VSIZE) - 1)) +#define CCP_VERSION(v, r) ((unsigned int)((v << CCP_VSIZE) \ + | (r & CCP_VMASK))) + +/** + * ccp_version - get the version of the CCP + * + * Returns a positive version number, or zero if no CCP + */ +unsigned int ccp_version(void); + +/** + * ccp_enqueue_cmd - queue an operation for processing by the CCP + * + * @cmd: ccp_cmd struct to be processed + * + * Refer to the ccp_cmd struct below for required fields. + * + * Queue a cmd to be processed by the CCP. If queueing the cmd + * would exceed the defined length of the cmd queue the cmd will + * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will + * result in a return code of -EBUSY. + * + * The callback routine specified in the ccp_cmd struct will be + * called to notify the caller of completion (if the cmd was not + * backlogged) or advancement out of the backlog. If the cmd has + * advanced out of the backlog the "err" value of the callback + * will be -EINPROGRESS. Any other "err" value during callback is + * the result of the operation. + * + * The cmd has been successfully queued if: + * the return code is -EINPROGRESS or + * the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set + */ +int ccp_enqueue_cmd(struct ccp_cmd *cmd); + +#else /* CONFIG_CRYPTO_DEV_CCP_SP_DEV is not enabled */ + +static inline int ccp_present(void) +{ + return -ENODEV; +} + +static inline unsigned int ccp_version(void) +{ + return 0; +} + +static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd) +{ + return -ENODEV; +} + +#endif /* CONFIG_CRYPTO_DEV_SP_CCP */ + + +/***** AES engine *****/ +/** + * ccp_aes_type - AES key size + * + * @CCP_AES_TYPE_128: 128-bit key + * @CCP_AES_TYPE_192: 192-bit key + * @CCP_AES_TYPE_256: 256-bit key + */ +enum ccp_aes_type { + CCP_AES_TYPE_128 = 0, + CCP_AES_TYPE_192, + CCP_AES_TYPE_256, + CCP_AES_TYPE__LAST, +}; + +/** + * ccp_aes_mode - AES operation mode + * + * @CCP_AES_MODE_ECB: ECB mode + * @CCP_AES_MODE_CBC: CBC mode + * @CCP_AES_MODE_OFB: OFB mode + * @CCP_AES_MODE_CFB: CFB mode + * @CCP_AES_MODE_CTR: CTR mode + * @CCP_AES_MODE_CMAC: CMAC mode + */ +enum ccp_aes_mode { + CCP_AES_MODE_ECB = 0, + CCP_AES_MODE_CBC, + CCP_AES_MODE_OFB, + CCP_AES_MODE_CFB, + CCP_AES_MODE_CTR, + CCP_AES_MODE_CMAC, + CCP_AES_MODE_GHASH, + CCP_AES_MODE_GCTR, + CCP_AES_MODE_GCM, + CCP_AES_MODE_GMAC, + CCP_AES_MODE__LAST, +}; + +/** + * ccp_aes_mode - AES operation mode + * + * @CCP_AES_ACTION_DECRYPT: AES decrypt operation + * @CCP_AES_ACTION_ENCRYPT: AES encrypt operation + */ +enum ccp_aes_action { + CCP_AES_ACTION_DECRYPT = 0, + CCP_AES_ACTION_ENCRYPT, + CCP_AES_ACTION__LAST, +}; +/* Overloaded field */ +#define CCP_AES_GHASHAAD CCP_AES_ACTION_DECRYPT +#define CCP_AES_GHASHFINAL CCP_AES_ACTION_ENCRYPT + +/** + * struct ccp_aes_engine - CCP AES operation + * @type: AES operation key size + * @mode: AES operation mode + * @action: AES operation (decrypt/encrypt) + * @key: key to be used for this AES operation + * @key_len: length in bytes of key + * @iv: IV to be used for this AES operation + * @iv_len: length in bytes of iv + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * @cmac_final: indicates final operation when running in CMAC mode + * @cmac_key: K1/K2 key used in final CMAC operation + * @cmac_key_len: length in bytes of cmac_key + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - type, mode, action, key, key_len, src, dst, src_len + * - iv, iv_len for any mode other than ECB + * - cmac_final for CMAC mode + * - cmac_key, cmac_key_len for CMAC mode if cmac_final is non-zero + * + * The iv variable is used as both input and output. On completion of the + * AES operation the new IV overwrites the old IV. + */ +struct ccp_aes_engine { + enum ccp_aes_type type; + enum ccp_aes_mode mode; + enum ccp_aes_action action; + + u32 authsize; + + struct scatterlist *key; + u32 key_len; /* In bytes */ + + struct scatterlist *iv; + u32 iv_len; /* In bytes */ + + struct scatterlist *src, *dst; + u64 src_len; /* In bytes */ + + u32 cmac_final; /* Indicates final cmac cmd */ + struct scatterlist *cmac_key; /* K1/K2 cmac key required for + * final cmac cmd */ + u32 cmac_key_len; /* In bytes */ + + u32 aad_len; /* In bytes */ +}; + +/***** XTS-AES engine *****/ +/** + * ccp_xts_aes_unit_size - XTS unit size + * + * @CCP_XTS_AES_UNIT_SIZE_16: Unit size of 16 bytes + * @CCP_XTS_AES_UNIT_SIZE_512: Unit size of 512 bytes + * @CCP_XTS_AES_UNIT_SIZE_1024: Unit size of 1024 bytes + * @CCP_XTS_AES_UNIT_SIZE_2048: Unit size of 2048 bytes + * @CCP_XTS_AES_UNIT_SIZE_4096: Unit size of 4096 bytes + */ +enum ccp_xts_aes_unit_size { + CCP_XTS_AES_UNIT_SIZE_16 = 0, + CCP_XTS_AES_UNIT_SIZE_512, + CCP_XTS_AES_UNIT_SIZE_1024, + CCP_XTS_AES_UNIT_SIZE_2048, + CCP_XTS_AES_UNIT_SIZE_4096, + CCP_XTS_AES_UNIT_SIZE__LAST, +}; + +/** + * struct ccp_xts_aes_engine - CCP XTS AES operation + * @action: AES operation (decrypt/encrypt) + * @unit_size: unit size of the XTS operation + * @key: key to be used for this XTS AES operation + * @key_len: length in bytes of key + * @iv: IV to be used for this XTS AES operation + * @iv_len: length in bytes of iv + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * @final: indicates final XTS operation + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - action, unit_size, key, key_len, iv, iv_len, src, dst, src_len, final + * + * The iv variable is used as both input and output. On completion of the + * AES operation the new IV overwrites the old IV. + */ +struct ccp_xts_aes_engine { + enum ccp_aes_type type; + enum ccp_aes_action action; + enum ccp_xts_aes_unit_size unit_size; + + struct scatterlist *key; + u32 key_len; /* In bytes */ + + struct scatterlist *iv; + u32 iv_len; /* In bytes */ + + struct scatterlist *src, *dst; + u64 src_len; /* In bytes */ + + u32 final; +}; + +/***** SHA engine *****/ +/** + * ccp_sha_type - type of SHA operation + * + * @CCP_SHA_TYPE_1: SHA-1 operation + * @CCP_SHA_TYPE_224: SHA-224 operation + * @CCP_SHA_TYPE_256: SHA-256 operation + */ +enum ccp_sha_type { + CCP_SHA_TYPE_1 = 1, + CCP_SHA_TYPE_224, + CCP_SHA_TYPE_256, + CCP_SHA_TYPE_384, + CCP_SHA_TYPE_512, + CCP_SHA_TYPE__LAST, +}; + +/** + * struct ccp_sha_engine - CCP SHA operation + * @type: Type of SHA operation + * @ctx: current hash value + * @ctx_len: length in bytes of hash value + * @src: data to be used for this operation + * @src_len: length in bytes of data used for this operation + * @opad: data to be used for final HMAC operation + * @opad_len: length in bytes of data used for final HMAC operation + * @first: indicates first SHA operation + * @final: indicates final SHA operation + * @msg_bits: total length of the message in bits used in final SHA operation + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - type, ctx, ctx_len, src, src_len, final + * - msg_bits if final is non-zero + * + * The ctx variable is used as both input and output. On completion of the + * SHA operation the new hash value overwrites the old hash value. + */ +struct ccp_sha_engine { + enum ccp_sha_type type; + + struct scatterlist *ctx; + u32 ctx_len; /* In bytes */ + + struct scatterlist *src; + u64 src_len; /* In bytes */ + + struct scatterlist *opad; + u32 opad_len; /* In bytes */ + + u32 first; /* Indicates first sha cmd */ + u32 final; /* Indicates final sha cmd */ + u64 msg_bits; /* Message length in bits required for + * final sha cmd */ +}; + +/***** 3DES engine *****/ +enum ccp_des3_mode { + CCP_DES3_MODE_ECB = 0, + CCP_DES3_MODE_CBC, + CCP_DES3_MODE_CFB, + CCP_DES3_MODE__LAST, +}; + +enum ccp_des3_type { + CCP_DES3_TYPE_168 = 1, + CCP_DES3_TYPE__LAST, + }; + +enum ccp_des3_action { + CCP_DES3_ACTION_DECRYPT = 0, + CCP_DES3_ACTION_ENCRYPT, + CCP_DES3_ACTION__LAST, +}; + +/** + * struct ccp_des3_engine - CCP SHA operation + * @type: Type of 3DES operation + * @mode: cipher mode + * @action: 3DES operation (decrypt/encrypt) + * @key: key to be used for this 3DES operation + * @key_len: length of key (in bytes) + * @iv: IV to be used for this AES operation + * @iv_len: length in bytes of iv + * @src: input data to be used for this operation + * @src_len: length of input data used for this operation (in bytes) + * @dst: output data produced by this operation + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - type, mode, action, key, key_len, src, dst, src_len + * - iv, iv_len for any mode other than ECB + * + * The iv variable is used as both input and output. On completion of the + * 3DES operation the new IV overwrites the old IV. + */ +struct ccp_des3_engine { + enum ccp_des3_type type; + enum ccp_des3_mode mode; + enum ccp_des3_action action; + + struct scatterlist *key; + u32 key_len; /* In bytes */ + + struct scatterlist *iv; + u32 iv_len; /* In bytes */ + + struct scatterlist *src, *dst; + u64 src_len; /* In bytes */ +}; + +/***** RSA engine *****/ +/** + * struct ccp_rsa_engine - CCP RSA operation + * @key_size: length in bits of RSA key + * @exp: RSA exponent + * @exp_len: length in bytes of exponent + * @mod: RSA modulus + * @mod_len: length in bytes of modulus + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - key_size, exp, exp_len, mod, mod_len, src, dst, src_len + */ +struct ccp_rsa_engine { + u32 key_size; /* In bits */ + + struct scatterlist *exp; + u32 exp_len; /* In bytes */ + + struct scatterlist *mod; + u32 mod_len; /* In bytes */ + + struct scatterlist *src, *dst; + u32 src_len; /* In bytes */ +}; + +/***** Passthru engine *****/ +/** + * ccp_passthru_bitwise - type of bitwise passthru operation + * + * @CCP_PASSTHRU_BITWISE_NOOP: no bitwise operation performed + * @CCP_PASSTHRU_BITWISE_AND: perform bitwise AND of src with mask + * @CCP_PASSTHRU_BITWISE_OR: perform bitwise OR of src with mask + * @CCP_PASSTHRU_BITWISE_XOR: perform bitwise XOR of src with mask + * @CCP_PASSTHRU_BITWISE_MASK: overwrite with mask + */ +enum ccp_passthru_bitwise { + CCP_PASSTHRU_BITWISE_NOOP = 0, + CCP_PASSTHRU_BITWISE_AND, + CCP_PASSTHRU_BITWISE_OR, + CCP_PASSTHRU_BITWISE_XOR, + CCP_PASSTHRU_BITWISE_MASK, + CCP_PASSTHRU_BITWISE__LAST, +}; + +/** + * ccp_passthru_byteswap - type of byteswap passthru operation + * + * @CCP_PASSTHRU_BYTESWAP_NOOP: no byte swapping performed + * @CCP_PASSTHRU_BYTESWAP_32BIT: swap bytes within 32-bit words + * @CCP_PASSTHRU_BYTESWAP_256BIT: swap bytes within 256-bit words + */ +enum ccp_passthru_byteswap { + CCP_PASSTHRU_BYTESWAP_NOOP = 0, + CCP_PASSTHRU_BYTESWAP_32BIT, + CCP_PASSTHRU_BYTESWAP_256BIT, + CCP_PASSTHRU_BYTESWAP__LAST, +}; + +/** + * struct ccp_passthru_engine - CCP pass-through operation + * @bit_mod: bitwise operation to perform + * @byte_swap: byteswap operation to perform + * @mask: mask to be applied to data + * @mask_len: length in bytes of mask + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * @final: indicate final pass-through operation + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - bit_mod, byte_swap, src, dst, src_len + * - mask, mask_len if bit_mod is not CCP_PASSTHRU_BITWISE_NOOP + */ +struct ccp_passthru_engine { + enum ccp_passthru_bitwise bit_mod; + enum ccp_passthru_byteswap byte_swap; + + struct scatterlist *mask; + u32 mask_len; /* In bytes */ + + struct scatterlist *src, *dst; + u64 src_len; /* In bytes */ + + u32 final; +}; + +/** + * struct ccp_passthru_nomap_engine - CCP pass-through operation + * without performing DMA mapping + * @bit_mod: bitwise operation to perform + * @byte_swap: byteswap operation to perform + * @mask: mask to be applied to data + * @mask_len: length in bytes of mask + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * @final: indicate final pass-through operation + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - bit_mod, byte_swap, src, dst, src_len + * - mask, mask_len if bit_mod is not CCP_PASSTHRU_BITWISE_NOOP + */ +struct ccp_passthru_nomap_engine { + enum ccp_passthru_bitwise bit_mod; + enum ccp_passthru_byteswap byte_swap; + + dma_addr_t mask; + u32 mask_len; /* In bytes */ + + dma_addr_t src_dma, dst_dma; + u64 src_len; /* In bytes */ + + u32 final; +}; + +/***** ECC engine *****/ +#define CCP_ECC_MODULUS_BYTES 48 /* 384-bits */ +#define CCP_ECC_MAX_OPERANDS 6 +#define CCP_ECC_MAX_OUTPUTS 3 + +/** + * ccp_ecc_function - type of ECC function + * + * @CCP_ECC_FUNCTION_MMUL_384BIT: 384-bit modular multiplication + * @CCP_ECC_FUNCTION_MADD_384BIT: 384-bit modular addition + * @CCP_ECC_FUNCTION_MINV_384BIT: 384-bit multiplicative inverse + * @CCP_ECC_FUNCTION_PADD_384BIT: 384-bit point addition + * @CCP_ECC_FUNCTION_PMUL_384BIT: 384-bit point multiplication + * @CCP_ECC_FUNCTION_PDBL_384BIT: 384-bit point doubling + */ +enum ccp_ecc_function { + CCP_ECC_FUNCTION_MMUL_384BIT = 0, + CCP_ECC_FUNCTION_MADD_384BIT, + CCP_ECC_FUNCTION_MINV_384BIT, + CCP_ECC_FUNCTION_PADD_384BIT, + CCP_ECC_FUNCTION_PMUL_384BIT, + CCP_ECC_FUNCTION_PDBL_384BIT, +}; + +/** + * struct ccp_ecc_modular_math - CCP ECC modular math parameters + * @operand_1: first operand for the modular math operation + * @operand_1_len: length of the first operand + * @operand_2: second operand for the modular math operation + * (not used for CCP_ECC_FUNCTION_MINV_384BIT) + * @operand_2_len: length of the second operand + * (not used for CCP_ECC_FUNCTION_MINV_384BIT) + * @result: result of the modular math operation + * @result_len: length of the supplied result buffer + */ +struct ccp_ecc_modular_math { + struct scatterlist *operand_1; + unsigned int operand_1_len; /* In bytes */ + + struct scatterlist *operand_2; + unsigned int operand_2_len; /* In bytes */ + + struct scatterlist *result; + unsigned int result_len; /* In bytes */ +}; + +/** + * struct ccp_ecc_point - CCP ECC point definition + * @x: the x coordinate of the ECC point + * @x_len: the length of the x coordinate + * @y: the y coordinate of the ECC point + * @y_len: the length of the y coordinate + */ +struct ccp_ecc_point { + struct scatterlist *x; + unsigned int x_len; /* In bytes */ + + struct scatterlist *y; + unsigned int y_len; /* In bytes */ +}; + +/** + * struct ccp_ecc_point_math - CCP ECC point math parameters + * @point_1: the first point of the ECC point math operation + * @point_2: the second point of the ECC point math operation + * (only used for CCP_ECC_FUNCTION_PADD_384BIT) + * @domain_a: the a parameter of the ECC curve + * @domain_a_len: the length of the a parameter + * @scalar: the scalar parameter for the point match operation + * (only used for CCP_ECC_FUNCTION_PMUL_384BIT) + * @scalar_len: the length of the scalar parameter + * (only used for CCP_ECC_FUNCTION_PMUL_384BIT) + * @result: the point resulting from the point math operation + */ +struct ccp_ecc_point_math { + struct ccp_ecc_point point_1; + struct ccp_ecc_point point_2; + + struct scatterlist *domain_a; + unsigned int domain_a_len; /* In bytes */ + + struct scatterlist *scalar; + unsigned int scalar_len; /* In bytes */ + + struct ccp_ecc_point result; +}; + +/** + * struct ccp_ecc_engine - CCP ECC operation + * @function: ECC function to perform + * @mod: ECC modulus + * @mod_len: length in bytes of modulus + * @mm: module math parameters + * @pm: point math parameters + * @ecc_result: result of the ECC operation + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - function, mod, mod_len + * - operand, operand_len, operand_count, output, output_len, output_count + * - ecc_result + */ +struct ccp_ecc_engine { + enum ccp_ecc_function function; + + struct scatterlist *mod; + u32 mod_len; /* In bytes */ + + union { + struct ccp_ecc_modular_math mm; + struct ccp_ecc_point_math pm; + } u; + + u16 ecc_result; +}; + + +/** + * ccp_engine - CCP operation identifiers + * + * @CCP_ENGINE_AES: AES operation + * @CCP_ENGINE_XTS_AES: 128-bit XTS AES operation + * @CCP_ENGINE_RSVD1: unused + * @CCP_ENGINE_SHA: SHA operation + * @CCP_ENGINE_RSA: RSA operation + * @CCP_ENGINE_PASSTHRU: pass-through operation + * @CCP_ENGINE_ZLIB_DECOMPRESS: unused + * @CCP_ENGINE_ECC: ECC operation + */ +enum ccp_engine { + CCP_ENGINE_AES = 0, + CCP_ENGINE_XTS_AES_128, + CCP_ENGINE_DES3, + CCP_ENGINE_SHA, + CCP_ENGINE_RSA, + CCP_ENGINE_PASSTHRU, + CCP_ENGINE_ZLIB_DECOMPRESS, + CCP_ENGINE_ECC, + CCP_ENGINE__LAST, +}; + +/* Flag values for flags member of ccp_cmd */ +#define CCP_CMD_MAY_BACKLOG 0x00000001 +#define CCP_CMD_PASSTHRU_NO_DMA_MAP 0x00000002 + +/** + * struct ccp_cmd - CCP operation request + * @entry: list element (ccp driver use only) + * @work: work element used for callbacks (ccp driver use only) + * @ccp: CCP device to be run on + * @ret: operation return code (ccp driver use only) + * @flags: cmd processing flags + * @engine: CCP operation to perform + * @engine_error: CCP engine return code + * @u: engine specific structures, refer to specific engine struct below + * @callback: operation completion callback function + * @data: parameter value to be supplied to the callback function + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - engine, callback + * - See the operation structures below for what is required for each + * operation. + */ +struct ccp_cmd { + /* The list_head, work_struct, ccp and ret variables are for use + * by the CCP driver only. + */ + struct list_head entry; + struct work_struct work; + struct ccp_device *ccp; + int ret; + + u32 flags; + + enum ccp_engine engine; + u32 engine_error; + + union { + struct ccp_aes_engine aes; + struct ccp_xts_aes_engine xts; + struct ccp_des3_engine des3; + struct ccp_sha_engine sha; + struct ccp_rsa_engine rsa; + struct ccp_passthru_engine passthru; + struct ccp_passthru_nomap_engine passthru_nomap; + struct ccp_ecc_engine ecc; + } u; + + /* Completion callback support */ + void (*callback)(void *data, int err); + void *data; +}; + +#endif diff --git a/include/linux/cdev.h b/include/linux/cdev.h new file mode 100644 index 000000000..0e8cd6293 --- /dev/null +++ b/include/linux/cdev.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CDEV_H +#define _LINUX_CDEV_H + +#include +#include +#include +#include + +struct file_operations; +struct inode; +struct module; + +struct cdev { + struct kobject kobj; + struct module *owner; + const struct file_operations *ops; + struct list_head list; + dev_t dev; + unsigned int count; +} __randomize_layout; + +void cdev_init(struct cdev *, const struct file_operations *); + +struct cdev *cdev_alloc(void); + +void cdev_put(struct cdev *p); + +int cdev_add(struct cdev *, dev_t, unsigned); + +void cdev_set_parent(struct cdev *p, struct kobject *kobj); +int cdev_device_add(struct cdev *cdev, struct device *dev); +void cdev_device_del(struct cdev *cdev, struct device *dev); + +void cdev_del(struct cdev *); + +void cd_forget(struct inode *); + +#endif diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h new file mode 100644 index 000000000..528271c60 --- /dev/null +++ b/include/linux/cdrom.h @@ -0,0 +1,317 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * -- + * General header file for linux CD-ROM drivers + * Copyright (C) 1992 David Giller, rafetmad@oxy.edu + * 1994, 1995 Eberhard Mönkeberg, emoenke@gwdg.de + * 1996 David van Leeuwen, david@tm.tno.nl + * 1997, 1998 Erik Andersen, andersee@debian.org + * 1998-2002 Jens Axboe, axboe@suse.de + */ +#ifndef _LINUX_CDROM_H +#define _LINUX_CDROM_H + +#include /* not really needed, later.. */ +#include +#include +#include + +struct packet_command +{ + unsigned char cmd[CDROM_PACKET_SIZE]; + unsigned char *buffer; + unsigned int buflen; + int stat; + struct scsi_sense_hdr *sshdr; + unsigned char data_direction; + int quiet; + int timeout; + void *reserved[1]; +}; + +/* + * _OLD will use PIO transfer on atapi devices, _BPC_* will use DMA + */ +#define CDDA_OLD 0 /* old style */ +#define CDDA_BPC_SINGLE 1 /* single frame block pc */ +#define CDDA_BPC_FULL 2 /* multi frame block pc */ + +/* Uniform cdrom data structures for cdrom.c */ +struct cdrom_device_info { + const struct cdrom_device_ops *ops; /* link to device_ops */ + struct list_head list; /* linked list of all device_info */ + struct gendisk *disk; /* matching block layer disk */ + void *handle; /* driver-dependent data */ +/* specifications */ + int mask; /* mask of capability: disables them */ + int speed; /* maximum speed for reading data */ + int capacity; /* number of discs in jukebox */ +/* device-related storage */ + unsigned int options : 30; /* options flags */ + unsigned mc_flags : 2; /* media change buffer flags */ + unsigned int vfs_events; /* cached events for vfs path */ + unsigned int ioctl_events; /* cached events for ioctl path */ + int use_count; /* number of times device opened */ + char name[20]; /* name of the device type */ +/* per-device flags */ + __u8 sanyo_slot : 2; /* Sanyo 3 CD changer support */ + __u8 keeplocked : 1; /* CDROM_LOCKDOOR status */ + __u8 reserved : 5; /* not used yet */ + int cdda_method; /* see flags */ + __u8 last_sense; + __u8 media_written; /* dirty flag, DVD+RW bookkeeping */ + unsigned short mmc3_profile; /* current MMC3 profile */ + int for_data; + int (*exit)(struct cdrom_device_info *); + int mrw_mode_page; +}; + +struct cdrom_device_ops { +/* routines */ + int (*open) (struct cdrom_device_info *, int); + void (*release) (struct cdrom_device_info *); + int (*drive_status) (struct cdrom_device_info *, int); + unsigned int (*check_events) (struct cdrom_device_info *cdi, + unsigned int clearing, int slot); + int (*media_changed) (struct cdrom_device_info *, int); + int (*tray_move) (struct cdrom_device_info *, int); + int (*lock_door) (struct cdrom_device_info *, int); + int (*select_speed) (struct cdrom_device_info *, int); + int (*select_disc) (struct cdrom_device_info *, int); + int (*get_last_session) (struct cdrom_device_info *, + struct cdrom_multisession *); + int (*get_mcn) (struct cdrom_device_info *, + struct cdrom_mcn *); + /* hard reset device */ + int (*reset) (struct cdrom_device_info *); + /* play stuff */ + int (*audio_ioctl) (struct cdrom_device_info *,unsigned int, void *); + +/* driver specifications */ + const int capability; /* capability flags */ + /* handle uniform packets for scsi type devices (scsi,atapi) */ + int (*generic_packet) (struct cdrom_device_info *, + struct packet_command *); +}; + +/* the general block_device operations structure: */ +extern int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, + fmode_t mode); +extern void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode); +extern int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev, + fmode_t mode, unsigned int cmd, unsigned long arg); +extern unsigned int cdrom_check_events(struct cdrom_device_info *cdi, + unsigned int clearing); +extern int cdrom_media_changed(struct cdrom_device_info *); + +extern int register_cdrom(struct cdrom_device_info *cdi); +extern void unregister_cdrom(struct cdrom_device_info *cdi); + +typedef struct { + int data; + int audio; + int cdi; + int xa; + long error; +} tracktype; + +extern int cdrom_get_last_written(struct cdrom_device_info *cdi, long *last_written); +extern int cdrom_number_of_slots(struct cdrom_device_info *cdi); +extern int cdrom_mode_select(struct cdrom_device_info *cdi, + struct packet_command *cgc); +extern int cdrom_mode_sense(struct cdrom_device_info *cdi, + struct packet_command *cgc, + int page_code, int page_control); +extern void init_cdrom_command(struct packet_command *cgc, + void *buffer, int len, int type); +extern int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi, + struct packet_command *cgc); + +/* The SCSI spec says there could be 256 slots. */ +#define CDROM_MAX_SLOTS 256 + +struct cdrom_mechstat_header { +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 fault : 1; + __u8 changer_state : 2; + __u8 curslot : 5; + __u8 mech_state : 3; + __u8 door_open : 1; + __u8 reserved1 : 4; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 curslot : 5; + __u8 changer_state : 2; + __u8 fault : 1; + __u8 reserved1 : 4; + __u8 door_open : 1; + __u8 mech_state : 3; +#endif + __u8 curlba[3]; + __u8 nslots; + __u16 slot_tablelen; +}; + +struct cdrom_slot { +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 disc_present : 1; + __u8 reserved1 : 6; + __u8 change : 1; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 change : 1; + __u8 reserved1 : 6; + __u8 disc_present : 1; +#endif + __u8 reserved2[3]; +}; + +struct cdrom_changer_info { + struct cdrom_mechstat_header hdr; + struct cdrom_slot slots[CDROM_MAX_SLOTS]; +}; + +typedef enum { + mechtype_caddy = 0, + mechtype_tray = 1, + mechtype_popup = 2, + mechtype_individual_changer = 4, + mechtype_cartridge_changer = 5 +} mechtype_t; + +typedef struct { +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 ps : 1; + __u8 reserved1 : 1; + __u8 page_code : 6; + __u8 page_length; + __u8 reserved2 : 1; + __u8 bufe : 1; + __u8 ls_v : 1; + __u8 test_write : 1; + __u8 write_type : 4; + __u8 multi_session : 2; /* or border, DVD */ + __u8 fp : 1; + __u8 copy : 1; + __u8 track_mode : 4; + __u8 reserved3 : 4; + __u8 data_block_type : 4; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 page_code : 6; + __u8 reserved1 : 1; + __u8 ps : 1; + __u8 page_length; + __u8 write_type : 4; + __u8 test_write : 1; + __u8 ls_v : 1; + __u8 bufe : 1; + __u8 reserved2 : 1; + __u8 track_mode : 4; + __u8 copy : 1; + __u8 fp : 1; + __u8 multi_session : 2; /* or border, DVD */ + __u8 data_block_type : 4; + __u8 reserved3 : 4; +#endif + __u8 link_size; + __u8 reserved4; +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 reserved5 : 2; + __u8 app_code : 6; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 app_code : 6; + __u8 reserved5 : 2; +#endif + __u8 session_format; + __u8 reserved6; + __be32 packet_size; + __u16 audio_pause; + __u8 mcn[16]; + __u8 isrc[16]; + __u8 subhdr0; + __u8 subhdr1; + __u8 subhdr2; + __u8 subhdr3; +} __attribute__((packed)) write_param_page; + +struct modesel_head +{ + __u8 reserved1; + __u8 medium; + __u8 reserved2; + __u8 block_desc_length; + __u8 density; + __u8 number_of_blocks_hi; + __u8 number_of_blocks_med; + __u8 number_of_blocks_lo; + __u8 reserved3; + __u8 block_length_hi; + __u8 block_length_med; + __u8 block_length_lo; +}; + +typedef struct { + __u16 report_key_length; + __u8 reserved1; + __u8 reserved2; +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 type_code : 2; + __u8 vra : 3; + __u8 ucca : 3; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 ucca : 3; + __u8 vra : 3; + __u8 type_code : 2; +#endif + __u8 region_mask; + __u8 rpc_scheme; + __u8 reserved3; +} rpc_state_t; + +struct event_header { + __be16 data_len; +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 nea : 1; + __u8 reserved1 : 4; + __u8 notification_class : 3; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 notification_class : 3; + __u8 reserved1 : 4; + __u8 nea : 1; +#endif + __u8 supp_event_class; +}; + +struct media_event_desc { +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 reserved1 : 4; + __u8 media_event_code : 4; + __u8 reserved2 : 6; + __u8 media_present : 1; + __u8 door_open : 1; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 media_event_code : 4; + __u8 reserved1 : 4; + __u8 door_open : 1; + __u8 media_present : 1; + __u8 reserved2 : 6; +#endif + __u8 start_slot; + __u8 end_slot; +}; + +extern int cdrom_get_media_event(struct cdrom_device_info *cdi, struct media_event_desc *med); + +static inline void lba_to_msf(int lba, u8 *m, u8 *s, u8 *f) +{ + lba += CD_MSF_OFFSET; + lba &= 0xffffff; /* negative lbas use only 24 bits */ + *m = lba / (CD_SECS * CD_FRAMES); + lba %= (CD_SECS * CD_FRAMES); + *s = lba / CD_FRAMES; + *f = lba % CD_FRAMES; +} + +static inline int msf_to_lba(u8 m, u8 s, u8 f) +{ + return (((m * CD_SECS) + s) * CD_FRAMES + f) - CD_MSF_OFFSET; +} +#endif /* _LINUX_CDROM_H */ diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h new file mode 100644 index 000000000..6728c2ee0 --- /dev/null +++ b/include/linux/ceph/auth.h @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FS_CEPH_AUTH_H +#define _FS_CEPH_AUTH_H + +#include +#include + +/* + * Abstract interface for communicating with the authenticate module. + * There is some handshake that takes place between us and the monitor + * to acquire the necessary keys. These are used to generate an + * 'authorizer' that we use when connecting to a service (mds, osd). + */ + +struct ceph_auth_client; +struct ceph_msg; + +struct ceph_authorizer { + void (*destroy)(struct ceph_authorizer *); +}; + +struct ceph_auth_handshake { + struct ceph_authorizer *authorizer; + void *authorizer_buf; + size_t authorizer_buf_len; + void *authorizer_reply_buf; + size_t authorizer_reply_buf_len; + int (*sign_message)(struct ceph_auth_handshake *auth, + struct ceph_msg *msg); + int (*check_message_signature)(struct ceph_auth_handshake *auth, + struct ceph_msg *msg); +}; + +struct ceph_auth_client_ops { + const char *name; + + /* + * true if we are authenticated and can connect to + * services. + */ + int (*is_authenticated)(struct ceph_auth_client *ac); + + /* + * true if we should (re)authenticate, e.g., when our tickets + * are getting old and crusty. + */ + int (*should_authenticate)(struct ceph_auth_client *ac); + + /* + * build requests and process replies during monitor + * handshake. if handle_reply returns -EAGAIN, we build + * another request. + */ + int (*build_request)(struct ceph_auth_client *ac, void *buf, void *end); + int (*handle_reply)(struct ceph_auth_client *ac, int result, + void *buf, void *end); + + /* + * Create authorizer for connecting to a service, and verify + * the response to authenticate the service. + */ + int (*create_authorizer)(struct ceph_auth_client *ac, int peer_type, + struct ceph_auth_handshake *auth); + /* ensure that an existing authorizer is up to date */ + int (*update_authorizer)(struct ceph_auth_client *ac, int peer_type, + struct ceph_auth_handshake *auth); + int (*add_authorizer_challenge)(struct ceph_auth_client *ac, + struct ceph_authorizer *a, + void *challenge_buf, + int challenge_buf_len); + int (*verify_authorizer_reply)(struct ceph_auth_client *ac, + struct ceph_authorizer *a); + void (*invalidate_authorizer)(struct ceph_auth_client *ac, + int peer_type); + + /* reset when we (re)connect to a monitor */ + void (*reset)(struct ceph_auth_client *ac); + + void (*destroy)(struct ceph_auth_client *ac); + + int (*sign_message)(struct ceph_auth_handshake *auth, + struct ceph_msg *msg); + int (*check_message_signature)(struct ceph_auth_handshake *auth, + struct ceph_msg *msg); +}; + +struct ceph_auth_client { + u32 protocol; /* CEPH_AUTH_* */ + void *private; /* for use by protocol implementation */ + const struct ceph_auth_client_ops *ops; /* null iff protocol==0 */ + + bool negotiating; /* true if negotiating protocol */ + const char *name; /* entity name */ + u64 global_id; /* our unique id in system */ + const struct ceph_crypto_key *key; /* our secret key */ + unsigned want_keys; /* which services we want */ + + struct mutex mutex; +}; + +extern struct ceph_auth_client *ceph_auth_init(const char *name, + const struct ceph_crypto_key *key); +extern void ceph_auth_destroy(struct ceph_auth_client *ac); + +extern void ceph_auth_reset(struct ceph_auth_client *ac); + +extern int ceph_auth_build_hello(struct ceph_auth_client *ac, + void *buf, size_t len); +extern int ceph_handle_auth_reply(struct ceph_auth_client *ac, + void *buf, size_t len, + void *reply_buf, size_t reply_len); +int ceph_auth_entity_name_encode(const char *name, void **p, void *end); + +extern int ceph_build_auth(struct ceph_auth_client *ac, + void *msg_buf, size_t msg_len); + +extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac); +extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac, + int peer_type, + struct ceph_auth_handshake *auth); +void ceph_auth_destroy_authorizer(struct ceph_authorizer *a); +extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac, + int peer_type, + struct ceph_auth_handshake *a); +int ceph_auth_add_authorizer_challenge(struct ceph_auth_client *ac, + struct ceph_authorizer *a, + void *challenge_buf, + int challenge_buf_len); +extern int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac, + struct ceph_authorizer *a); +extern void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac, + int peer_type); + +static inline int ceph_auth_sign_message(struct ceph_auth_handshake *auth, + struct ceph_msg *msg) +{ + if (auth->sign_message) + return auth->sign_message(auth, msg); + return 0; +} + +static inline +int ceph_auth_check_message_signature(struct ceph_auth_handshake *auth, + struct ceph_msg *msg) +{ + if (auth->check_message_signature) + return auth->check_message_signature(auth, msg); + return 0; +} +#endif diff --git a/include/linux/ceph/buffer.h b/include/linux/ceph/buffer.h new file mode 100644 index 000000000..11cdc7c60 --- /dev/null +++ b/include/linux/ceph/buffer.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __FS_CEPH_BUFFER_H +#define __FS_CEPH_BUFFER_H + +#include +#include +#include +#include +#include + +/* + * a simple reference counted buffer. + * + * use kmalloc for smaller sizes, vmalloc for larger sizes. + */ +struct ceph_buffer { + struct kref kref; + struct kvec vec; + size_t alloc_len; +}; + +extern struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp); +extern void ceph_buffer_release(struct kref *kref); + +static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b) +{ + kref_get(&b->kref); + return b; +} + +static inline void ceph_buffer_put(struct ceph_buffer *b) +{ + if (b) + kref_put(&b->kref, ceph_buffer_release); +} + +extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end); + +#endif diff --git a/include/linux/ceph/ceph_debug.h b/include/linux/ceph/ceph_debug.h new file mode 100644 index 000000000..d5a5da838 --- /dev/null +++ b/include/linux/ceph/ceph_debug.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FS_CEPH_DEBUG_H +#define _FS_CEPH_DEBUG_H + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include + +#ifdef CONFIG_CEPH_LIB_PRETTYDEBUG + +/* + * wrap pr_debug to include a filename:lineno prefix on each line. + * this incurs some overhead (kernel size and execution time) due to + * the extra function call at each call site. + */ + +# if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) +# define dout(fmt, ...) \ + pr_debug("%.*s %12.12s:%-4d : " fmt, \ + 8 - (int)sizeof(KBUILD_MODNAME), " ", \ + kbasename(__FILE__), __LINE__, ##__VA_ARGS__) +# else +/* faux printk call just to see any compiler warnings. */ +# define dout(fmt, ...) do { \ + if (0) \ + printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ + } while (0) +# endif + +#else + +/* + * or, just wrap pr_debug + */ +# define dout(fmt, ...) pr_debug(" " fmt, ##__VA_ARGS__) + +#endif + +#endif diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h new file mode 100644 index 000000000..6b92b3395 --- /dev/null +++ b/include/linux/ceph/ceph_features.h @@ -0,0 +1,224 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __CEPH_FEATURES +#define __CEPH_FEATURES + +/* + * Each time we reclaim bits for reuse we need to specify another bit + * that, if present, indicates we have the new incarnation of that + * feature. Base case is 1 (first use). + */ +#define CEPH_FEATURE_INCARNATION_1 (0ull) +#define CEPH_FEATURE_INCARNATION_2 (1ull<<57) // CEPH_FEATURE_SERVER_JEWEL + +#define DEFINE_CEPH_FEATURE(bit, incarnation, name) \ + static const uint64_t CEPH_FEATURE_##name = (1ULL<> 24; +} +static inline __u32 ceph_frag_value(__u32 f) +{ + return f & 0xffffffu; +} +static inline __u32 ceph_frag_mask(__u32 f) +{ + return (0xffffffu << (24-ceph_frag_bits(f))) & 0xffffffu; +} +static inline __u32 ceph_frag_mask_shift(__u32 f) +{ + return 24 - ceph_frag_bits(f); +} + +static inline bool ceph_frag_contains_value(__u32 f, __u32 v) +{ + return (v & ceph_frag_mask(f)) == ceph_frag_value(f); +} + +static inline __u32 ceph_frag_make_child(__u32 f, int by, int i) +{ + int newbits = ceph_frag_bits(f) + by; + return ceph_frag_make(newbits, + ceph_frag_value(f) | (i << (24 - newbits))); +} +static inline bool ceph_frag_is_leftmost(__u32 f) +{ + return ceph_frag_value(f) == 0; +} +static inline bool ceph_frag_is_rightmost(__u32 f) +{ + return ceph_frag_value(f) == ceph_frag_mask(f); +} +static inline __u32 ceph_frag_next(__u32 f) +{ + return ceph_frag_make(ceph_frag_bits(f), + ceph_frag_value(f) + (0x1000000 >> ceph_frag_bits(f))); +} + +/* + * comparator to sort frags logically, as when traversing the + * number space in ascending order... + */ +int ceph_frag_compare(__u32 a, __u32 b); + +#endif diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h new file mode 100644 index 000000000..4903deb07 --- /dev/null +++ b/include/linux/ceph/ceph_fs.h @@ -0,0 +1,828 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ceph_fs.h - Ceph constants and data types to share between kernel and + * user space. + * + * Most types in this file are defined as little-endian, and are + * primarily intended to describe data structures that pass over the + * wire or that are stored on disk. + * + * LGPL2 + */ + +#ifndef CEPH_FS_H +#define CEPH_FS_H + +#include +#include + +/* + * subprotocol versions. when specific messages types or high-level + * protocols change, bump the affected components. we keep rev + * internal cluster protocols separately from the public, + * client-facing protocol. + */ +#define CEPH_OSDC_PROTOCOL 24 /* server/client */ +#define CEPH_MDSC_PROTOCOL 32 /* server/client */ +#define CEPH_MONC_PROTOCOL 15 /* server/client */ + + +#define CEPH_INO_ROOT 1 +#define CEPH_INO_CEPH 2 /* hidden .ceph dir */ +#define CEPH_INO_DOTDOT 3 /* used by ceph fuse for parent (..) */ + +/* arbitrary limit on max # of monitors (cluster of 3 is typical) */ +#define CEPH_MAX_MON 31 + +/* + * legacy ceph_file_layoute + */ +struct ceph_file_layout_legacy { + /* file -> object mapping */ + __le32 fl_stripe_unit; /* stripe unit, in bytes. must be multiple + of page size. */ + __le32 fl_stripe_count; /* over this many objects */ + __le32 fl_object_size; /* until objects are this big, then move to + new objects */ + __le32 fl_cas_hash; /* UNUSED. 0 = none; 1 = sha256 */ + + /* pg -> disk layout */ + __le32 fl_object_stripe_unit; /* UNUSED. for per-object parity, if any */ + + /* object -> pg layout */ + __le32 fl_unused; /* unused; used to be preferred primary for pg (-1 for none) */ + __le32 fl_pg_pool; /* namespace, crush ruleset, rep level */ +} __attribute__ ((packed)); + +struct ceph_string; +/* + * ceph_file_layout - describe data layout for a file/inode + */ +struct ceph_file_layout { + /* file -> object mapping */ + u32 stripe_unit; /* stripe unit, in bytes */ + u32 stripe_count; /* over this many objects */ + u32 object_size; /* until objects are this big */ + s64 pool_id; /* rados pool id */ + struct ceph_string __rcu *pool_ns; /* rados pool namespace */ +}; + +extern int ceph_file_layout_is_valid(const struct ceph_file_layout *layout); +extern void ceph_file_layout_from_legacy(struct ceph_file_layout *fl, + struct ceph_file_layout_legacy *legacy); +extern void ceph_file_layout_to_legacy(struct ceph_file_layout *fl, + struct ceph_file_layout_legacy *legacy); + +#define CEPH_MIN_STRIPE_UNIT 65536 + +struct ceph_dir_layout { + __u8 dl_dir_hash; /* see ceph_hash.h for ids */ + __u8 dl_unused1; + __u16 dl_unused2; + __u32 dl_unused3; +} __attribute__ ((packed)); + +/* crypto algorithms */ +#define CEPH_CRYPTO_NONE 0x0 +#define CEPH_CRYPTO_AES 0x1 + +#define CEPH_AES_IV "cephsageyudagreg" + +/* security/authentication protocols */ +#define CEPH_AUTH_UNKNOWN 0x0 +#define CEPH_AUTH_NONE 0x1 +#define CEPH_AUTH_CEPHX 0x2 + +#define CEPH_AUTH_UID_DEFAULT ((__u64) -1) + + +/********************************************* + * message layer + */ + +/* + * message types + */ + +/* misc */ +#define CEPH_MSG_SHUTDOWN 1 +#define CEPH_MSG_PING 2 + +/* client <-> monitor */ +#define CEPH_MSG_MON_MAP 4 +#define CEPH_MSG_MON_GET_MAP 5 +#define CEPH_MSG_STATFS 13 +#define CEPH_MSG_STATFS_REPLY 14 +#define CEPH_MSG_MON_SUBSCRIBE 15 +#define CEPH_MSG_MON_SUBSCRIBE_ACK 16 +#define CEPH_MSG_AUTH 17 +#define CEPH_MSG_AUTH_REPLY 18 +#define CEPH_MSG_MON_GET_VERSION 19 +#define CEPH_MSG_MON_GET_VERSION_REPLY 20 + +/* client <-> mds */ +#define CEPH_MSG_MDS_MAP 21 +#define CEPH_MSG_FS_MAP_USER 103 + +#define CEPH_MSG_CLIENT_SESSION 22 +#define CEPH_MSG_CLIENT_RECONNECT 23 + +#define CEPH_MSG_CLIENT_REQUEST 24 +#define CEPH_MSG_CLIENT_REQUEST_FORWARD 25 +#define CEPH_MSG_CLIENT_REPLY 26 +#define CEPH_MSG_CLIENT_CAPS 0x310 +#define CEPH_MSG_CLIENT_LEASE 0x311 +#define CEPH_MSG_CLIENT_SNAP 0x312 +#define CEPH_MSG_CLIENT_CAPRELEASE 0x313 +#define CEPH_MSG_CLIENT_QUOTA 0x314 + +/* pool ops */ +#define CEPH_MSG_POOLOP_REPLY 48 +#define CEPH_MSG_POOLOP 49 + +/* mon commands */ +#define CEPH_MSG_MON_COMMAND 50 +#define CEPH_MSG_MON_COMMAND_ACK 51 + +/* osd */ +#define CEPH_MSG_OSD_MAP 41 +#define CEPH_MSG_OSD_OP 42 +#define CEPH_MSG_OSD_OPREPLY 43 +#define CEPH_MSG_WATCH_NOTIFY 44 +#define CEPH_MSG_OSD_BACKOFF 61 + + +/* watch-notify operations */ +enum { + CEPH_WATCH_EVENT_NOTIFY = 1, /* notifying watcher */ + CEPH_WATCH_EVENT_NOTIFY_COMPLETE = 2, /* notifier notified when done */ + CEPH_WATCH_EVENT_DISCONNECT = 3, /* we were disconnected */ +}; + + +struct ceph_mon_request_header { + __le64 have_version; + __le16 session_mon; + __le64 session_mon_tid; +} __attribute__ ((packed)); + +struct ceph_mon_statfs { + struct ceph_mon_request_header monhdr; + struct ceph_fsid fsid; + __u8 contains_data_pool; + __le64 data_pool; +} __attribute__ ((packed)); + +struct ceph_statfs { + __le64 kb, kb_used, kb_avail; + __le64 num_objects; +} __attribute__ ((packed)); + +struct ceph_mon_statfs_reply { + struct ceph_fsid fsid; + __le64 version; + struct ceph_statfs st; +} __attribute__ ((packed)); + +struct ceph_mon_command { + struct ceph_mon_request_header monhdr; + struct ceph_fsid fsid; + __le32 num_strs; /* always 1 */ + __le32 str_len; + char str[]; +} __attribute__ ((packed)); + +struct ceph_osd_getmap { + struct ceph_mon_request_header monhdr; + struct ceph_fsid fsid; + __le32 start; +} __attribute__ ((packed)); + +struct ceph_mds_getmap { + struct ceph_mon_request_header monhdr; + struct ceph_fsid fsid; +} __attribute__ ((packed)); + +struct ceph_client_mount { + struct ceph_mon_request_header monhdr; +} __attribute__ ((packed)); + +#define CEPH_SUBSCRIBE_ONETIME 1 /* i want only 1 update after have */ + +struct ceph_mon_subscribe_item { + __le64 start; + __u8 flags; +} __attribute__ ((packed)); + +struct ceph_mon_subscribe_ack { + __le32 duration; /* seconds */ + struct ceph_fsid fsid; +} __attribute__ ((packed)); + +#define CEPH_FS_CLUSTER_ID_NONE -1 + +/* + * mdsmap flags + */ +#define CEPH_MDSMAP_DOWN (1<<0) /* cluster deliberately down */ + +/* + * mds states + * > 0 -> in + * <= 0 -> out + */ +#define CEPH_MDS_STATE_DNE 0 /* down, does not exist. */ +#define CEPH_MDS_STATE_STOPPED -1 /* down, once existed, but no subtrees. + empty log. */ +#define CEPH_MDS_STATE_BOOT -4 /* up, boot announcement. */ +#define CEPH_MDS_STATE_STANDBY -5 /* up, idle. waiting for assignment. */ +#define CEPH_MDS_STATE_CREATING -6 /* up, creating MDS instance. */ +#define CEPH_MDS_STATE_STARTING -7 /* up, starting previously stopped mds */ +#define CEPH_MDS_STATE_STANDBY_REPLAY -8 /* up, tailing active node's journal */ +#define CEPH_MDS_STATE_REPLAYONCE -9 /* up, replaying an active node's journal */ + +#define CEPH_MDS_STATE_REPLAY 8 /* up, replaying journal. */ +#define CEPH_MDS_STATE_RESOLVE 9 /* up, disambiguating distributed + operations (import, rename, etc.) */ +#define CEPH_MDS_STATE_RECONNECT 10 /* up, reconnect to clients */ +#define CEPH_MDS_STATE_REJOIN 11 /* up, rejoining distributed cache */ +#define CEPH_MDS_STATE_CLIENTREPLAY 12 /* up, replaying client operations */ +#define CEPH_MDS_STATE_ACTIVE 13 /* up, active */ +#define CEPH_MDS_STATE_STOPPING 14 /* up, but exporting metadata */ + +extern const char *ceph_mds_state_name(int s); + + +/* + * metadata lock types. + * - these are bitmasks.. we can compose them + * - they also define the lock ordering by the MDS + * - a few of these are internal to the mds + */ +#define CEPH_LOCK_DVERSION 1 +#define CEPH_LOCK_DN 2 +#define CEPH_LOCK_ISNAP 16 +#define CEPH_LOCK_IVERSION 32 /* mds internal */ +#define CEPH_LOCK_IFILE 64 +#define CEPH_LOCK_IAUTH 128 +#define CEPH_LOCK_ILINK 256 +#define CEPH_LOCK_IDFT 512 /* dir frag tree */ +#define CEPH_LOCK_INEST 1024 /* mds internal */ +#define CEPH_LOCK_IXATTR 2048 +#define CEPH_LOCK_IFLOCK 4096 /* advisory file locks */ +#define CEPH_LOCK_INO 8192 /* immutable inode bits; not a lock */ +#define CEPH_LOCK_IPOLICY 16384 /* policy lock on dirs. MDS internal */ + +/* client_session ops */ +enum { + CEPH_SESSION_REQUEST_OPEN, + CEPH_SESSION_OPEN, + CEPH_SESSION_REQUEST_CLOSE, + CEPH_SESSION_CLOSE, + CEPH_SESSION_REQUEST_RENEWCAPS, + CEPH_SESSION_RENEWCAPS, + CEPH_SESSION_STALE, + CEPH_SESSION_RECALL_STATE, + CEPH_SESSION_FLUSHMSG, + CEPH_SESSION_FLUSHMSG_ACK, + CEPH_SESSION_FORCE_RO, + CEPH_SESSION_REJECT, +}; + +extern const char *ceph_session_op_name(int op); + +struct ceph_mds_session_head { + __le32 op; + __le64 seq; + struct ceph_timespec stamp; + __le32 max_caps, max_leases; +} __attribute__ ((packed)); + +/* client_request */ +/* + * metadata ops. + * & 0x001000 -> write op + * & 0x010000 -> follow symlink (e.g. stat(), not lstat()). + & & 0x100000 -> use weird ino/path trace + */ +#define CEPH_MDS_OP_WRITE 0x001000 +enum { + CEPH_MDS_OP_LOOKUP = 0x00100, + CEPH_MDS_OP_GETATTR = 0x00101, + CEPH_MDS_OP_LOOKUPHASH = 0x00102, + CEPH_MDS_OP_LOOKUPPARENT = 0x00103, + CEPH_MDS_OP_LOOKUPINO = 0x00104, + CEPH_MDS_OP_LOOKUPNAME = 0x00105, + + CEPH_MDS_OP_SETXATTR = 0x01105, + CEPH_MDS_OP_RMXATTR = 0x01106, + CEPH_MDS_OP_SETLAYOUT = 0x01107, + CEPH_MDS_OP_SETATTR = 0x01108, + CEPH_MDS_OP_SETFILELOCK= 0x01109, + CEPH_MDS_OP_GETFILELOCK= 0x00110, + CEPH_MDS_OP_SETDIRLAYOUT=0x0110a, + + CEPH_MDS_OP_MKNOD = 0x01201, + CEPH_MDS_OP_LINK = 0x01202, + CEPH_MDS_OP_UNLINK = 0x01203, + CEPH_MDS_OP_RENAME = 0x01204, + CEPH_MDS_OP_MKDIR = 0x01220, + CEPH_MDS_OP_RMDIR = 0x01221, + CEPH_MDS_OP_SYMLINK = 0x01222, + + CEPH_MDS_OP_CREATE = 0x01301, + CEPH_MDS_OP_OPEN = 0x00302, + CEPH_MDS_OP_READDIR = 0x00305, + + CEPH_MDS_OP_LOOKUPSNAP = 0x00400, + CEPH_MDS_OP_MKSNAP = 0x01400, + CEPH_MDS_OP_RMSNAP = 0x01401, + CEPH_MDS_OP_LSSNAP = 0x00402, + CEPH_MDS_OP_RENAMESNAP = 0x01403, +}; + +extern const char *ceph_mds_op_name(int op); + + +#define CEPH_SETATTR_MODE 1 +#define CEPH_SETATTR_UID 2 +#define CEPH_SETATTR_GID 4 +#define CEPH_SETATTR_MTIME 8 +#define CEPH_SETATTR_ATIME 16 +#define CEPH_SETATTR_SIZE 32 +#define CEPH_SETATTR_CTIME 64 + +/* + * Ceph setxattr request flags. + */ +#define CEPH_XATTR_CREATE (1 << 0) +#define CEPH_XATTR_REPLACE (1 << 1) +#define CEPH_XATTR_REMOVE (1 << 31) + +/* + * readdir request flags; + */ +#define CEPH_READDIR_REPLY_BITFLAGS (1<<0) + +/* + * readdir reply flags. + */ +#define CEPH_READDIR_FRAG_END (1<<0) +#define CEPH_READDIR_FRAG_COMPLETE (1<<8) +#define CEPH_READDIR_HASH_ORDER (1<<9) +#define CEPH_READDIR_OFFSET_HASH (1<<10) + +/* + * open request flags + */ +#define CEPH_O_RDONLY 00000000 +#define CEPH_O_WRONLY 00000001 +#define CEPH_O_RDWR 00000002 +#define CEPH_O_CREAT 00000100 +#define CEPH_O_EXCL 00000200 +#define CEPH_O_TRUNC 00001000 +#define CEPH_O_DIRECTORY 00200000 +#define CEPH_O_NOFOLLOW 00400000 + +union ceph_mds_request_args { + struct { + __le32 mask; /* CEPH_CAP_* */ + } __attribute__ ((packed)) getattr; + struct { + __le32 mode; + __le32 uid; + __le32 gid; + struct ceph_timespec mtime; + struct ceph_timespec atime; + __le64 size, old_size; /* old_size needed by truncate */ + __le32 mask; /* CEPH_SETATTR_* */ + } __attribute__ ((packed)) setattr; + struct { + __le32 frag; /* which dir fragment */ + __le32 max_entries; /* how many dentries to grab */ + __le32 max_bytes; + __le16 flags; + __le32 offset_hash; + } __attribute__ ((packed)) readdir; + struct { + __le32 mode; + __le32 rdev; + } __attribute__ ((packed)) mknod; + struct { + __le32 mode; + } __attribute__ ((packed)) mkdir; + struct { + __le32 flags; + __le32 mode; + __le32 stripe_unit; /* layout for newly created file */ + __le32 stripe_count; /* ... */ + __le32 object_size; + __le32 file_replication; + __le32 mask; /* CEPH_CAP_* */ + __le32 old_size; + } __attribute__ ((packed)) open; + struct { + __le32 flags; + } __attribute__ ((packed)) setxattr; + struct { + struct ceph_file_layout_legacy layout; + } __attribute__ ((packed)) setlayout; + struct { + __u8 rule; /* currently fcntl or flock */ + __u8 type; /* shared, exclusive, remove*/ + __le64 owner; /* owner of the lock */ + __le64 pid; /* process id requesting the lock */ + __le64 start; /* initial location to lock */ + __le64 length; /* num bytes to lock from start */ + __u8 wait; /* will caller wait for lock to become available? */ + } __attribute__ ((packed)) filelock_change; +} __attribute__ ((packed)); + +#define CEPH_MDS_FLAG_REPLAY 1 /* this is a replayed op */ +#define CEPH_MDS_FLAG_WANT_DENTRY 2 /* want dentry in reply */ + +struct ceph_mds_request_head { + __le64 oldest_client_tid; + __le32 mdsmap_epoch; /* on client */ + __le32 flags; /* CEPH_MDS_FLAG_* */ + __u8 num_retry, num_fwd; /* count retry, fwd attempts */ + __le16 num_releases; /* # include cap/lease release records */ + __le32 op; /* mds op code */ + __le32 caller_uid, caller_gid; + __le64 ino; /* use this ino for openc, mkdir, mknod, + etc. (if replaying) */ + union ceph_mds_request_args args; +} __attribute__ ((packed)); + +/* cap/lease release record */ +struct ceph_mds_request_release { + __le64 ino, cap_id; /* ino and unique cap id */ + __le32 caps, wanted; /* new issued, wanted */ + __le32 seq, issue_seq, mseq; + __le32 dname_seq; /* if releasing a dentry lease, a */ + __le32 dname_len; /* string follows. */ +} __attribute__ ((packed)); + +/* client reply */ +struct ceph_mds_reply_head { + __le32 op; + __le32 result; + __le32 mdsmap_epoch; + __u8 safe; /* true if committed to disk */ + __u8 is_dentry, is_target; /* true if dentry, target inode records + are included with reply */ +} __attribute__ ((packed)); + +/* one for each node split */ +struct ceph_frag_tree_split { + __le32 frag; /* this frag splits... */ + __le32 by; /* ...by this many bits */ +} __attribute__ ((packed)); + +struct ceph_frag_tree_head { + __le32 nsplits; /* num ceph_frag_tree_split records */ + struct ceph_frag_tree_split splits[]; +} __attribute__ ((packed)); + +/* capability issue, for bundling with mds reply */ +struct ceph_mds_reply_cap { + __le32 caps, wanted; /* caps issued, wanted */ + __le64 cap_id; + __le32 seq, mseq; + __le64 realm; /* snap realm */ + __u8 flags; /* CEPH_CAP_FLAG_* */ +} __attribute__ ((packed)); + +#define CEPH_CAP_FLAG_AUTH (1 << 0) /* cap is issued by auth mds */ +#define CEPH_CAP_FLAG_RELEASE (1 << 1) /* release the cap */ + +/* inode record, for bundling with mds reply */ +struct ceph_mds_reply_inode { + __le64 ino; + __le64 snapid; + __le32 rdev; + __le64 version; /* inode version */ + __le64 xattr_version; /* version for xattr blob */ + struct ceph_mds_reply_cap cap; /* caps issued for this inode */ + struct ceph_file_layout_legacy layout; + struct ceph_timespec ctime, mtime, atime; + __le32 time_warp_seq; + __le64 size, max_size, truncate_size; + __le32 truncate_seq; + __le32 mode, uid, gid; + __le32 nlink; + __le64 files, subdirs, rbytes, rfiles, rsubdirs; /* dir stats */ + struct ceph_timespec rctime; + struct ceph_frag_tree_head fragtree; /* (must be at end of struct) */ +} __attribute__ ((packed)); +/* followed by frag array, symlink string, dir layout, xattr blob */ + +/* reply_lease follows dname, and reply_inode */ +struct ceph_mds_reply_lease { + __le16 mask; /* lease type(s) */ + __le32 duration_ms; /* lease duration */ + __le32 seq; +} __attribute__ ((packed)); + +struct ceph_mds_reply_dirfrag { + __le32 frag; /* fragment */ + __le32 auth; /* auth mds, if this is a delegation point */ + __le32 ndist; /* number of mds' this is replicated on */ + __le32 dist[]; +} __attribute__ ((packed)); + +#define CEPH_LOCK_FCNTL 1 +#define CEPH_LOCK_FLOCK 2 +#define CEPH_LOCK_FCNTL_INTR 3 +#define CEPH_LOCK_FLOCK_INTR 4 + + +#define CEPH_LOCK_SHARED 1 +#define CEPH_LOCK_EXCL 2 +#define CEPH_LOCK_UNLOCK 4 + +struct ceph_filelock { + __le64 start;/* file offset to start lock at */ + __le64 length; /* num bytes to lock; 0 for all following start */ + __le64 client; /* which client holds the lock */ + __le64 owner; /* owner the lock */ + __le64 pid; /* process id holding the lock on the client */ + __u8 type; /* shared lock, exclusive lock, or unlock */ +} __attribute__ ((packed)); + + +/* file access modes */ +#define CEPH_FILE_MODE_PIN 0 +#define CEPH_FILE_MODE_RD 1 +#define CEPH_FILE_MODE_WR 2 +#define CEPH_FILE_MODE_RDWR 3 /* RD | WR */ +#define CEPH_FILE_MODE_LAZY 4 /* lazy io */ +#define CEPH_FILE_MODE_BITS 4 + +int ceph_flags_to_mode(int flags); + +#define CEPH_INLINE_NONE ((__u64)-1) + +/* capability bits */ +#define CEPH_CAP_PIN 1 /* no specific capabilities beyond the pin */ + +/* generic cap bits */ +#define CEPH_CAP_GSHARED 1 /* client can reads */ +#define CEPH_CAP_GEXCL 2 /* client can read and update */ +#define CEPH_CAP_GCACHE 4 /* (file) client can cache reads */ +#define CEPH_CAP_GRD 8 /* (file) client can read */ +#define CEPH_CAP_GWR 16 /* (file) client can write */ +#define CEPH_CAP_GBUFFER 32 /* (file) client can buffer writes */ +#define CEPH_CAP_GWREXTEND 64 /* (file) client can extend EOF */ +#define CEPH_CAP_GLAZYIO 128 /* (file) client can perform lazy io */ + +#define CEPH_CAP_SIMPLE_BITS 2 +#define CEPH_CAP_FILE_BITS 8 + +/* per-lock shift */ +#define CEPH_CAP_SAUTH 2 +#define CEPH_CAP_SLINK 4 +#define CEPH_CAP_SXATTR 6 +#define CEPH_CAP_SFILE 8 +#define CEPH_CAP_SFLOCK 20 + +#define CEPH_CAP_BITS 22 + +/* composed values */ +#define CEPH_CAP_AUTH_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SAUTH) +#define CEPH_CAP_AUTH_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SAUTH) +#define CEPH_CAP_LINK_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SLINK) +#define CEPH_CAP_LINK_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SLINK) +#define CEPH_CAP_XATTR_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SXATTR) +#define CEPH_CAP_XATTR_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SXATTR) +#define CEPH_CAP_FILE(x) (x << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_CACHE (CEPH_CAP_GCACHE << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_RD (CEPH_CAP_GRD << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_WR (CEPH_CAP_GWR << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_BUFFER (CEPH_CAP_GBUFFER << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_WREXTEND (CEPH_CAP_GWREXTEND << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_LAZYIO (CEPH_CAP_GLAZYIO << CEPH_CAP_SFILE) +#define CEPH_CAP_FLOCK_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SFLOCK) +#define CEPH_CAP_FLOCK_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SFLOCK) + + +/* cap masks (for getattr) */ +#define CEPH_STAT_CAP_INODE CEPH_CAP_PIN +#define CEPH_STAT_CAP_TYPE CEPH_CAP_PIN /* mode >> 12 */ +#define CEPH_STAT_CAP_SYMLINK CEPH_CAP_PIN +#define CEPH_STAT_CAP_UID CEPH_CAP_AUTH_SHARED +#define CEPH_STAT_CAP_GID CEPH_CAP_AUTH_SHARED +#define CEPH_STAT_CAP_MODE CEPH_CAP_AUTH_SHARED +#define CEPH_STAT_CAP_NLINK CEPH_CAP_LINK_SHARED +#define CEPH_STAT_CAP_LAYOUT CEPH_CAP_FILE_SHARED +#define CEPH_STAT_CAP_MTIME CEPH_CAP_FILE_SHARED +#define CEPH_STAT_CAP_SIZE CEPH_CAP_FILE_SHARED +#define CEPH_STAT_CAP_ATIME CEPH_CAP_FILE_SHARED /* fixme */ +#define CEPH_STAT_CAP_XATTR CEPH_CAP_XATTR_SHARED +#define CEPH_STAT_CAP_INODE_ALL (CEPH_CAP_PIN | \ + CEPH_CAP_AUTH_SHARED | \ + CEPH_CAP_LINK_SHARED | \ + CEPH_CAP_FILE_SHARED | \ + CEPH_CAP_XATTR_SHARED) +#define CEPH_STAT_CAP_INLINE_DATA (CEPH_CAP_FILE_SHARED | \ + CEPH_CAP_FILE_RD) +#define CEPH_STAT_RSTAT CEPH_CAP_FILE_WREXTEND + +#define CEPH_CAP_ANY_SHARED (CEPH_CAP_AUTH_SHARED | \ + CEPH_CAP_LINK_SHARED | \ + CEPH_CAP_XATTR_SHARED | \ + CEPH_CAP_FILE_SHARED) +#define CEPH_CAP_ANY_RD (CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_RD | \ + CEPH_CAP_FILE_CACHE) + +#define CEPH_CAP_ANY_EXCL (CEPH_CAP_AUTH_EXCL | \ + CEPH_CAP_LINK_EXCL | \ + CEPH_CAP_XATTR_EXCL | \ + CEPH_CAP_FILE_EXCL) +#define CEPH_CAP_ANY_FILE_RD (CEPH_CAP_FILE_RD | CEPH_CAP_FILE_CACHE | \ + CEPH_CAP_FILE_SHARED) +#define CEPH_CAP_ANY_FILE_WR (CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER | \ + CEPH_CAP_FILE_EXCL) +#define CEPH_CAP_ANY_WR (CEPH_CAP_ANY_EXCL | CEPH_CAP_ANY_FILE_WR) +#define CEPH_CAP_ANY (CEPH_CAP_ANY_RD | CEPH_CAP_ANY_EXCL | \ + CEPH_CAP_ANY_FILE_WR | CEPH_CAP_FILE_LAZYIO | \ + CEPH_CAP_PIN) + +#define CEPH_CAP_LOCKS (CEPH_LOCK_IFILE | CEPH_LOCK_IAUTH | CEPH_LOCK_ILINK | \ + CEPH_LOCK_IXATTR) + +int ceph_caps_for_mode(int mode); + +enum { + CEPH_CAP_OP_GRANT, /* mds->client grant */ + CEPH_CAP_OP_REVOKE, /* mds->client revoke */ + CEPH_CAP_OP_TRUNC, /* mds->client trunc notify */ + CEPH_CAP_OP_EXPORT, /* mds has exported the cap */ + CEPH_CAP_OP_IMPORT, /* mds has imported the cap */ + CEPH_CAP_OP_UPDATE, /* client->mds update */ + CEPH_CAP_OP_DROP, /* client->mds drop cap bits */ + CEPH_CAP_OP_FLUSH, /* client->mds cap writeback */ + CEPH_CAP_OP_FLUSH_ACK, /* mds->client flushed */ + CEPH_CAP_OP_FLUSHSNAP, /* client->mds flush snapped metadata */ + CEPH_CAP_OP_FLUSHSNAP_ACK, /* mds->client flushed snapped metadata */ + CEPH_CAP_OP_RELEASE, /* client->mds release (clean) cap */ + CEPH_CAP_OP_RENEW, /* client->mds renewal request */ +}; + +extern const char *ceph_cap_op_name(int op); + +/* flags field in client cap messages (version >= 10) */ +#define CEPH_CLIENT_CAPS_SYNC (1<<0) +#define CEPH_CLIENT_CAPS_NO_CAPSNAP (1<<1) +#define CEPH_CLIENT_CAPS_PENDING_CAPSNAP (1<<2); + +/* + * caps message, used for capability callbacks, acks, requests, etc. + */ +struct ceph_mds_caps { + __le32 op; /* CEPH_CAP_OP_* */ + __le64 ino, realm; + __le64 cap_id; + __le32 seq, issue_seq; + __le32 caps, wanted, dirty; /* latest issued/wanted/dirty */ + __le32 migrate_seq; + __le64 snap_follows; + __le32 snap_trace_len; + + /* authlock */ + __le32 uid, gid, mode; + + /* linklock */ + __le32 nlink; + + /* xattrlock */ + __le32 xattr_len; + __le64 xattr_version; + + /* filelock */ + __le64 size, max_size, truncate_size; + __le32 truncate_seq; + struct ceph_timespec mtime, atime, ctime; + struct ceph_file_layout_legacy layout; + __le32 time_warp_seq; +} __attribute__ ((packed)); + +struct ceph_mds_cap_peer { + __le64 cap_id; + __le32 seq; + __le32 mseq; + __le32 mds; + __u8 flags; +} __attribute__ ((packed)); + +/* cap release msg head */ +struct ceph_mds_cap_release { + __le32 num; /* number of cap_items that follow */ +} __attribute__ ((packed)); + +struct ceph_mds_cap_item { + __le64 ino; + __le64 cap_id; + __le32 migrate_seq, seq; +} __attribute__ ((packed)); + +#define CEPH_MDS_LEASE_REVOKE 1 /* mds -> client */ +#define CEPH_MDS_LEASE_RELEASE 2 /* client -> mds */ +#define CEPH_MDS_LEASE_RENEW 3 /* client <-> mds */ +#define CEPH_MDS_LEASE_REVOKE_ACK 4 /* client -> mds */ + +extern const char *ceph_lease_op_name(int o); + +/* lease msg header */ +struct ceph_mds_lease { + __u8 action; /* CEPH_MDS_LEASE_* */ + __le16 mask; /* which lease */ + __le64 ino; + __le64 first, last; /* snap range */ + __le32 seq; + __le32 duration_ms; /* duration of renewal */ +} __attribute__ ((packed)); +/* followed by a __le32+string for dname */ + +/* client reconnect */ +struct ceph_mds_cap_reconnect { + __le64 cap_id; + __le32 wanted; + __le32 issued; + __le64 snaprealm; + __le64 pathbase; /* base ino for our path to this ino */ + __le32 flock_len; /* size of flock state blob, if any */ +} __attribute__ ((packed)); +/* followed by flock blob */ + +struct ceph_mds_cap_reconnect_v1 { + __le64 cap_id; + __le32 wanted; + __le32 issued; + __le64 size; + struct ceph_timespec mtime, atime; + __le64 snaprealm; + __le64 pathbase; /* base ino for our path to this ino */ +} __attribute__ ((packed)); + +struct ceph_mds_snaprealm_reconnect { + __le64 ino; /* snap realm base */ + __le64 seq; /* snap seq for this snap realm */ + __le64 parent; /* parent realm */ +} __attribute__ ((packed)); + +/* + * snaps + */ +enum { + CEPH_SNAP_OP_UPDATE, /* CREATE or DESTROY */ + CEPH_SNAP_OP_CREATE, + CEPH_SNAP_OP_DESTROY, + CEPH_SNAP_OP_SPLIT, +}; + +extern const char *ceph_snap_op_name(int o); + +/* snap msg header */ +struct ceph_mds_snap_head { + __le32 op; /* CEPH_SNAP_OP_* */ + __le64 split; /* ino to split off, if any */ + __le32 num_split_inos; /* # inos belonging to new child realm */ + __le32 num_split_realms; /* # child realms udner new child realm */ + __le32 trace_len; /* size of snap trace blob */ +} __attribute__ ((packed)); +/* followed by split ino list, then split realms, then the trace blob */ + +/* + * encode info about a snaprealm, as viewed by a client + */ +struct ceph_mds_snap_realm { + __le64 ino; /* ino */ + __le64 created; /* snap: when created */ + __le64 parent; /* ino: parent realm */ + __le64 parent_since; /* snap: same parent since */ + __le64 seq; /* snap: version */ + __le32 num_snaps; + __le32 num_prior_parent_snaps; +} __attribute__ ((packed)); +/* followed by my snap list, then prior parent snap list */ + +/* + * quotas + */ +struct ceph_mds_quota { + __le64 ino; /* ino */ + struct ceph_timespec rctime; + __le64 rbytes; /* dir stats */ + __le64 rfiles; + __le64 rsubdirs; + __u8 struct_v; /* compat */ + __u8 struct_compat; + __le32 struct_len; + __le64 max_bytes; /* quota max. bytes */ + __le64 max_files; /* quota max. files */ +} __attribute__ ((packed)); + +#endif diff --git a/include/linux/ceph/ceph_hash.h b/include/linux/ceph/ceph_hash.h new file mode 100644 index 000000000..fda474c7a --- /dev/null +++ b/include/linux/ceph/ceph_hash.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef FS_CEPH_HASH_H +#define FS_CEPH_HASH_H + +#define CEPH_STR_HASH_LINUX 0x1 /* linux dcache hash */ +#define CEPH_STR_HASH_RJENKINS 0x2 /* robert jenkins' */ + +extern unsigned ceph_str_hash_linux(const char *s, unsigned len); +extern unsigned ceph_str_hash_rjenkins(const char *s, unsigned len); + +extern unsigned ceph_str_hash(int type, const char *s, unsigned len); +extern const char *ceph_str_hash_name(int type); + +#endif diff --git a/include/linux/ceph/cls_lock_client.h b/include/linux/ceph/cls_lock_client.h new file mode 100644 index 000000000..bea6c77d2 --- /dev/null +++ b/include/linux/ceph/cls_lock_client.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CEPH_CLS_LOCK_CLIENT_H +#define _LINUX_CEPH_CLS_LOCK_CLIENT_H + +#include + +enum ceph_cls_lock_type { + CEPH_CLS_LOCK_NONE = 0, + CEPH_CLS_LOCK_EXCLUSIVE = 1, + CEPH_CLS_LOCK_SHARED = 2, +}; + +struct ceph_locker_id { + struct ceph_entity_name name; /* locker's client name */ + char *cookie; /* locker's cookie */ +}; + +struct ceph_locker_info { + struct ceph_entity_addr addr; /* locker's address */ +}; + +struct ceph_locker { + struct ceph_locker_id id; + struct ceph_locker_info info; +}; + +int ceph_cls_lock(struct ceph_osd_client *osdc, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + char *lock_name, u8 type, char *cookie, + char *tag, char *desc, u8 flags); +int ceph_cls_unlock(struct ceph_osd_client *osdc, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + char *lock_name, char *cookie); +int ceph_cls_break_lock(struct ceph_osd_client *osdc, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + char *lock_name, char *cookie, + struct ceph_entity_name *locker); +int ceph_cls_set_cookie(struct ceph_osd_client *osdc, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + char *lock_name, u8 type, char *old_cookie, + char *tag, char *new_cookie); + +void ceph_free_lockers(struct ceph_locker *lockers, u32 num_lockers); + +int ceph_cls_lock_info(struct ceph_osd_client *osdc, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + char *lock_name, u8 *type, char **tag, + struct ceph_locker **lockers, u32 *num_lockers); + +#endif diff --git a/include/linux/ceph/debugfs.h b/include/linux/ceph/debugfs.h new file mode 100644 index 000000000..fa5f9b7f5 --- /dev/null +++ b/include/linux/ceph/debugfs.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FS_CEPH_DEBUGFS_H +#define _FS_CEPH_DEBUGFS_H + +#include +#include + +#define CEPH_DEFINE_SHOW_FUNC(name) \ +static int name##_open(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, name, inode->i_private); \ +} \ + \ +static const struct file_operations name##_fops = { \ + .open = name##_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ +}; + +/* debugfs.c */ +extern int ceph_debugfs_init(void); +extern void ceph_debugfs_cleanup(void); +extern int ceph_debugfs_client_init(struct ceph_client *client); +extern void ceph_debugfs_client_cleanup(struct ceph_client *client); + +#endif + diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h new file mode 100644 index 000000000..a6c2a48d4 --- /dev/null +++ b/include/linux/ceph/decode.h @@ -0,0 +1,381 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __CEPH_DECODE_H +#define __CEPH_DECODE_H + +#include +#include +#include +#include +#include + +#include + +/* + * in all cases, + * void **p pointer to position pointer + * void *end pointer to end of buffer (last byte + 1) + */ + +static inline u64 ceph_decode_64(void **p) +{ + u64 v = get_unaligned_le64(*p); + *p += sizeof(u64); + return v; +} +static inline u32 ceph_decode_32(void **p) +{ + u32 v = get_unaligned_le32(*p); + *p += sizeof(u32); + return v; +} +static inline u16 ceph_decode_16(void **p) +{ + u16 v = get_unaligned_le16(*p); + *p += sizeof(u16); + return v; +} +static inline u8 ceph_decode_8(void **p) +{ + u8 v = *(u8 *)*p; + (*p)++; + return v; +} +static inline void ceph_decode_copy(void **p, void *pv, size_t n) +{ + memcpy(pv, *p, n); + *p += n; +} + +/* + * bounds check input. + */ +static inline bool ceph_has_room(void **p, void *end, size_t n) +{ + return end >= *p && n <= end - *p; +} + +#define ceph_decode_need(p, end, n, bad) \ + do { \ + if (!likely(ceph_has_room(p, end, n))) \ + goto bad; \ + } while (0) + +#define ceph_decode_64_safe(p, end, v, bad) \ + do { \ + ceph_decode_need(p, end, sizeof(u64), bad); \ + v = ceph_decode_64(p); \ + } while (0) +#define ceph_decode_32_safe(p, end, v, bad) \ + do { \ + ceph_decode_need(p, end, sizeof(u32), bad); \ + v = ceph_decode_32(p); \ + } while (0) +#define ceph_decode_16_safe(p, end, v, bad) \ + do { \ + ceph_decode_need(p, end, sizeof(u16), bad); \ + v = ceph_decode_16(p); \ + } while (0) +#define ceph_decode_8_safe(p, end, v, bad) \ + do { \ + ceph_decode_need(p, end, sizeof(u8), bad); \ + v = ceph_decode_8(p); \ + } while (0) + +#define ceph_decode_copy_safe(p, end, pv, n, bad) \ + do { \ + ceph_decode_need(p, end, n, bad); \ + ceph_decode_copy(p, pv, n); \ + } while (0) + +/* + * Allocate a buffer big enough to hold the wire-encoded string, and + * decode the string into it. The resulting string will always be + * terminated with '\0'. If successful, *p will be advanced + * past the decoded data. Also, if lenp is not a null pointer, the + * length (not including the terminating '\0') will be recorded in + * *lenp. Note that a zero-length string is a valid return value. + * + * Returns a pointer to the newly-allocated string buffer, or a + * pointer-coded errno if an error occurs. Neither *p nor *lenp + * will have been updated if an error is returned. + * + * There are two possible failures: + * - converting the string would require accessing memory at or + * beyond the "end" pointer provided (-ERANGE) + * - memory could not be allocated for the result (-ENOMEM) + */ +static inline char *ceph_extract_encoded_string(void **p, void *end, + size_t *lenp, gfp_t gfp) +{ + u32 len; + void *sp = *p; + char *buf; + + ceph_decode_32_safe(&sp, end, len, bad); + if (!ceph_has_room(&sp, end, len)) + goto bad; + + buf = kmalloc(len + 1, gfp); + if (!buf) + return ERR_PTR(-ENOMEM); + + if (len) + memcpy(buf, sp, len); + buf[len] = '\0'; + + *p = (char *) *p + sizeof (u32) + len; + if (lenp) + *lenp = (size_t) len; + + return buf; + +bad: + return ERR_PTR(-ERANGE); +} + +/* + * skip helpers + */ +#define ceph_decode_skip_n(p, end, n, bad) \ + do { \ + ceph_decode_need(p, end, n, bad); \ + *p += n; \ + } while (0) + +#define ceph_decode_skip_64(p, end, bad) \ +ceph_decode_skip_n(p, end, sizeof(u64), bad) + +#define ceph_decode_skip_32(p, end, bad) \ +ceph_decode_skip_n(p, end, sizeof(u32), bad) + +#define ceph_decode_skip_16(p, end, bad) \ +ceph_decode_skip_n(p, end, sizeof(u16), bad) + +#define ceph_decode_skip_8(p, end, bad) \ +ceph_decode_skip_n(p, end, sizeof(u8), bad) + +#define ceph_decode_skip_string(p, end, bad) \ + do { \ + u32 len; \ + \ + ceph_decode_32_safe(p, end, len, bad); \ + ceph_decode_skip_n(p, end, len, bad); \ + } while (0) + +#define ceph_decode_skip_set(p, end, type, bad) \ + do { \ + u32 len; \ + \ + ceph_decode_32_safe(p, end, len, bad); \ + while (len--) \ + ceph_decode_skip_##type(p, end, bad); \ + } while (0) + +#define ceph_decode_skip_map(p, end, ktype, vtype, bad) \ + do { \ + u32 len; \ + \ + ceph_decode_32_safe(p, end, len, bad); \ + while (len--) { \ + ceph_decode_skip_##ktype(p, end, bad); \ + ceph_decode_skip_##vtype(p, end, bad); \ + } \ + } while (0) + +#define ceph_decode_skip_map_of_map(p, end, ktype1, ktype2, vtype2, bad) \ + do { \ + u32 len; \ + \ + ceph_decode_32_safe(p, end, len, bad); \ + while (len--) { \ + ceph_decode_skip_##ktype1(p, end, bad); \ + ceph_decode_skip_map(p, end, ktype2, vtype2, bad); \ + } \ + } while (0) + +/* + * struct ceph_timespec <-> struct timespec64 + */ +static inline void ceph_decode_timespec64(struct timespec64 *ts, + const struct ceph_timespec *tv) +{ + /* + * This will still overflow in year 2106. We could extend + * the protocol to steal two more bits from tv_nsec to + * add three more 136 year epochs after that the way ext4 + * does if necessary. + */ + ts->tv_sec = (time64_t)le32_to_cpu(tv->tv_sec); + ts->tv_nsec = (long)le32_to_cpu(tv->tv_nsec); +} +static inline void ceph_encode_timespec64(struct ceph_timespec *tv, + const struct timespec64 *ts) +{ + tv->tv_sec = cpu_to_le32((u32)ts->tv_sec); + tv->tv_nsec = cpu_to_le32((u32)ts->tv_nsec); +} + +/* + * sockaddr_storage <-> ceph_sockaddr + */ +static inline void ceph_encode_addr(struct ceph_entity_addr *a) +{ + __be16 ss_family = htons(a->in_addr.ss_family); + a->in_addr.ss_family = *(__u16 *)&ss_family; +} +static inline void ceph_decode_addr(struct ceph_entity_addr *a) +{ + __be16 ss_family = *(__be16 *)&a->in_addr.ss_family; + a->in_addr.ss_family = ntohs(ss_family); + WARN_ON(a->in_addr.ss_family == 512); +} + +/* + * encoders + */ +static inline void ceph_encode_64(void **p, u64 v) +{ + put_unaligned_le64(v, (__le64 *)*p); + *p += sizeof(u64); +} +static inline void ceph_encode_32(void **p, u32 v) +{ + put_unaligned_le32(v, (__le32 *)*p); + *p += sizeof(u32); +} +static inline void ceph_encode_16(void **p, u16 v) +{ + put_unaligned_le16(v, (__le16 *)*p); + *p += sizeof(u16); +} +static inline void ceph_encode_8(void **p, u8 v) +{ + *(u8 *)*p = v; + (*p)++; +} +static inline void ceph_encode_copy(void **p, const void *s, int len) +{ + memcpy(*p, s, len); + *p += len; +} + +/* + * filepath, string encoders + */ +static inline void ceph_encode_filepath(void **p, void *end, + u64 ino, const char *path) +{ + u32 len = path ? strlen(path) : 0; + BUG_ON(*p + 1 + sizeof(ino) + sizeof(len) + len > end); + ceph_encode_8(p, 1); + ceph_encode_64(p, ino); + ceph_encode_32(p, len); + if (len) + memcpy(*p, path, len); + *p += len; +} + +static inline void ceph_encode_string(void **p, void *end, + const char *s, u32 len) +{ + BUG_ON(*p + sizeof(len) + len > end); + ceph_encode_32(p, len); + if (len) + memcpy(*p, s, len); + *p += len; +} + +/* + * version and length starting block encoders/decoders + */ + +/* current code version (u8) + compat code version (u8) + len of struct (u32) */ +#define CEPH_ENCODING_START_BLK_LEN 6 + +/** + * ceph_start_encoding - start encoding block + * @struct_v: current (code) version of the encoding + * @struct_compat: oldest code version that can decode it + * @struct_len: length of struct encoding + */ +static inline void ceph_start_encoding(void **p, u8 struct_v, u8 struct_compat, + u32 struct_len) +{ + ceph_encode_8(p, struct_v); + ceph_encode_8(p, struct_compat); + ceph_encode_32(p, struct_len); +} + +/** + * ceph_start_decoding - start decoding block + * @v: current version of the encoding that the code supports + * @name: name of the struct (free-form) + * @struct_v: out param for the encoding version + * @struct_len: out param for the length of struct encoding + * + * Validates the length of struct encoding, so unsafe ceph_decode_* + * variants can be used for decoding. + */ +static inline int ceph_start_decoding(void **p, void *end, u8 v, + const char *name, u8 *struct_v, + u32 *struct_len) +{ + u8 struct_compat; + + ceph_decode_need(p, end, CEPH_ENCODING_START_BLK_LEN, bad); + *struct_v = ceph_decode_8(p); + struct_compat = ceph_decode_8(p); + if (v < struct_compat) { + pr_warn("got struct_v %d struct_compat %d > %d of %s\n", + *struct_v, struct_compat, v, name); + return -EINVAL; + } + + *struct_len = ceph_decode_32(p); + ceph_decode_need(p, end, *struct_len, bad); + return 0; + +bad: + return -ERANGE; +} + +#define ceph_encode_need(p, end, n, bad) \ + do { \ + if (!likely(ceph_has_room(p, end, n))) \ + goto bad; \ + } while (0) + +#define ceph_encode_64_safe(p, end, v, bad) \ + do { \ + ceph_encode_need(p, end, sizeof(u64), bad); \ + ceph_encode_64(p, v); \ + } while (0) +#define ceph_encode_32_safe(p, end, v, bad) \ + do { \ + ceph_encode_need(p, end, sizeof(u32), bad); \ + ceph_encode_32(p, v); \ + } while (0) +#define ceph_encode_16_safe(p, end, v, bad) \ + do { \ + ceph_encode_need(p, end, sizeof(u16), bad); \ + ceph_encode_16(p, v); \ + } while (0) +#define ceph_encode_8_safe(p, end, v, bad) \ + do { \ + ceph_encode_need(p, end, sizeof(u8), bad); \ + ceph_encode_8(p, v); \ + } while (0) + +#define ceph_encode_copy_safe(p, end, pv, n, bad) \ + do { \ + ceph_encode_need(p, end, n, bad); \ + ceph_encode_copy(p, pv, n); \ + } while (0) +#define ceph_encode_string_safe(p, end, s, n, bad) \ + do { \ + ceph_encode_need(p, end, n, bad); \ + ceph_encode_string(p, end, s, n); \ + } while (0) + + +#endif diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h new file mode 100644 index 000000000..0b589e684 --- /dev/null +++ b/include/linux/ceph/libceph.h @@ -0,0 +1,319 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FS_CEPH_LIBCEPH_H +#define _FS_CEPH_LIBCEPH_H + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +/* + * mount options + */ +#define CEPH_OPT_FSID (1<<0) +#define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */ +#define CEPH_OPT_MYIP (1<<2) /* specified my ip */ +#define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */ +#define CEPH_OPT_NOMSGAUTH (1<<4) /* don't require msg signing feat */ +#define CEPH_OPT_TCP_NODELAY (1<<5) /* TCP_NODELAY on TCP sockets */ +#define CEPH_OPT_NOMSGSIGN (1<<6) /* don't sign msgs */ + +#define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY) + +#define ceph_set_opt(client, opt) \ + (client)->options->flags |= CEPH_OPT_##opt; +#define ceph_test_opt(client, opt) \ + (!!((client)->options->flags & CEPH_OPT_##opt)) + +struct ceph_options { + int flags; + struct ceph_fsid fsid; + struct ceph_entity_addr my_addr; + unsigned long mount_timeout; /* jiffies */ + unsigned long osd_idle_ttl; /* jiffies */ + unsigned long osd_keepalive_timeout; /* jiffies */ + unsigned long osd_request_timeout; /* jiffies */ + + /* + * any type that can't be simply compared or doesn't need need + * to be compared should go beyond this point, + * ceph_compare_options() should be updated accordingly + */ + + struct ceph_entity_addr *mon_addr; /* should be the first + pointer type of args */ + int num_mon; + char *name; + struct ceph_crypto_key *key; +}; + +/* + * defaults + */ +#define CEPH_MOUNT_TIMEOUT_DEFAULT msecs_to_jiffies(60 * 1000) +#define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000) +#define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000) +#define CEPH_OSD_REQUEST_TIMEOUT_DEFAULT 0 /* no timeout */ + +#define CEPH_MONC_HUNT_INTERVAL msecs_to_jiffies(3 * 1000) +#define CEPH_MONC_PING_INTERVAL msecs_to_jiffies(10 * 1000) +#define CEPH_MONC_PING_TIMEOUT msecs_to_jiffies(30 * 1000) +#define CEPH_MONC_HUNT_BACKOFF 2 +#define CEPH_MONC_HUNT_MAX_MULT 10 + +#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024) +#define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024) + +/* + * Handle the largest possible rbd object in one message. + * There is no limit on the size of cephfs objects, but it has to obey + * rsize and wsize mount options anyway. + */ +#define CEPH_MSG_MAX_DATA_LEN (32*1024*1024) + +#define CEPH_AUTH_NAME_DEFAULT "guest" + +/* mount state */ +enum { + CEPH_MOUNT_MOUNTING, + CEPH_MOUNT_MOUNTED, + CEPH_MOUNT_UNMOUNTING, + CEPH_MOUNT_UNMOUNTED, + CEPH_MOUNT_SHUTDOWN, +}; + +static inline unsigned long ceph_timeout_jiffies(unsigned long timeout) +{ + return timeout ?: MAX_SCHEDULE_TIMEOUT; +} + +struct ceph_mds_client; + +/* + * per client state + * + * possibly shared by multiple mount points, if they are + * mounting the same ceph filesystem/cluster. + */ +struct ceph_client { + struct ceph_fsid fsid; + bool have_fsid; + + void *private; + + struct ceph_options *options; + + struct mutex mount_mutex; /* serialize mount attempts */ + wait_queue_head_t auth_wq; + int auth_err; + + int (*extra_mon_dispatch)(struct ceph_client *, struct ceph_msg *); + + u64 supported_features; + u64 required_features; + + struct ceph_messenger msgr; /* messenger instance */ + struct ceph_mon_client monc; + struct ceph_osd_client osdc; + +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs_dir; + struct dentry *debugfs_monmap; + struct dentry *debugfs_osdmap; + struct dentry *debugfs_options; +#endif +}; + +#define from_msgr(ms) container_of(ms, struct ceph_client, msgr) + + +/* + * snapshots + */ + +/* + * A "snap context" is the set of existing snapshots when we + * write data. It is used by the OSD to guide its COW behavior. + * + * The ceph_snap_context is refcounted, and attached to each dirty + * page, indicating which context the dirty data belonged when it was + * dirtied. + */ +struct ceph_snap_context { + refcount_t nref; + u64 seq; + u32 num_snaps; + u64 snaps[]; +}; + +extern struct ceph_snap_context *ceph_create_snap_context(u32 snap_count, + gfp_t gfp_flags); +extern struct ceph_snap_context *ceph_get_snap_context( + struct ceph_snap_context *sc); +extern void ceph_put_snap_context(struct ceph_snap_context *sc); + +/* + * calculate the number of pages a given length and offset map onto, + * if we align the data. + */ +static inline int calc_pages_for(u64 off, u64 len) +{ + return ((off+len+PAGE_SIZE-1) >> PAGE_SHIFT) - + (off >> PAGE_SHIFT); +} + +#define RB_BYVAL(a) (a) +#define RB_BYPTR(a) (&(a)) +#define RB_CMP3WAY(a, b) ((a) < (b) ? -1 : (a) > (b)) + +#define DEFINE_RB_INSDEL_FUNCS2(name, type, keyfld, cmpexp, keyexp, nodefld) \ +static void insert_##name(struct rb_root *root, type *t) \ +{ \ + struct rb_node **n = &root->rb_node; \ + struct rb_node *parent = NULL; \ + \ + BUG_ON(!RB_EMPTY_NODE(&t->nodefld)); \ + \ + while (*n) { \ + type *cur = rb_entry(*n, type, nodefld); \ + int cmp; \ + \ + parent = *n; \ + cmp = cmpexp(keyexp(t->keyfld), keyexp(cur->keyfld)); \ + if (cmp < 0) \ + n = &(*n)->rb_left; \ + else if (cmp > 0) \ + n = &(*n)->rb_right; \ + else \ + BUG(); \ + } \ + \ + rb_link_node(&t->nodefld, parent, n); \ + rb_insert_color(&t->nodefld, root); \ +} \ +static void erase_##name(struct rb_root *root, type *t) \ +{ \ + BUG_ON(RB_EMPTY_NODE(&t->nodefld)); \ + rb_erase(&t->nodefld, root); \ + RB_CLEAR_NODE(&t->nodefld); \ +} + +/* + * @lookup_param_type is a parameter and not constructed from (@type, + * @keyfld) with typeof() because adding const is too unwieldy. + */ +#define DEFINE_RB_LOOKUP_FUNC2(name, type, keyfld, cmpexp, keyexp, \ + lookup_param_type, nodefld) \ +static type *lookup_##name(struct rb_root *root, lookup_param_type key) \ +{ \ + struct rb_node *n = root->rb_node; \ + \ + while (n) { \ + type *cur = rb_entry(n, type, nodefld); \ + int cmp; \ + \ + cmp = cmpexp(key, keyexp(cur->keyfld)); \ + if (cmp < 0) \ + n = n->rb_left; \ + else if (cmp > 0) \ + n = n->rb_right; \ + else \ + return cur; \ + } \ + \ + return NULL; \ +} + +#define DEFINE_RB_FUNCS2(name, type, keyfld, cmpexp, keyexp, \ + lookup_param_type, nodefld) \ +DEFINE_RB_INSDEL_FUNCS2(name, type, keyfld, cmpexp, keyexp, nodefld) \ +DEFINE_RB_LOOKUP_FUNC2(name, type, keyfld, cmpexp, keyexp, \ + lookup_param_type, nodefld) + +/* + * Shorthands for integer keys. + */ +#define DEFINE_RB_INSDEL_FUNCS(name, type, keyfld, nodefld) \ +DEFINE_RB_INSDEL_FUNCS2(name, type, keyfld, RB_CMP3WAY, RB_BYVAL, nodefld) + +#define DEFINE_RB_LOOKUP_FUNC(name, type, keyfld, nodefld) \ +extern type __lookup_##name##_key; \ +DEFINE_RB_LOOKUP_FUNC2(name, type, keyfld, RB_CMP3WAY, RB_BYVAL, \ + typeof(__lookup_##name##_key.keyfld), nodefld) + +#define DEFINE_RB_FUNCS(name, type, keyfld, nodefld) \ +DEFINE_RB_INSDEL_FUNCS(name, type, keyfld, nodefld) \ +DEFINE_RB_LOOKUP_FUNC(name, type, keyfld, nodefld) + +extern struct kmem_cache *ceph_inode_cachep; +extern struct kmem_cache *ceph_cap_cachep; +extern struct kmem_cache *ceph_cap_flush_cachep; +extern struct kmem_cache *ceph_dentry_cachep; +extern struct kmem_cache *ceph_file_cachep; +extern struct kmem_cache *ceph_dir_file_cachep; + +/* ceph_common.c */ +extern bool libceph_compatible(void *data); + +extern const char *ceph_msg_type_name(int type); +extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid); +extern void *ceph_kvmalloc(size_t size, gfp_t flags); + +extern struct ceph_options *ceph_parse_options(char *options, + const char *dev_name, const char *dev_name_end, + int (*parse_extra_token)(char *c, void *private), + void *private); +int ceph_print_client_options(struct seq_file *m, struct ceph_client *client); +extern void ceph_destroy_options(struct ceph_options *opt); +extern int ceph_compare_options(struct ceph_options *new_opt, + struct ceph_client *client); +struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private); +struct ceph_entity_addr *ceph_client_addr(struct ceph_client *client); +u64 ceph_client_gid(struct ceph_client *client); +extern void ceph_destroy_client(struct ceph_client *client); +extern int __ceph_open_session(struct ceph_client *client, + unsigned long started); +extern int ceph_open_session(struct ceph_client *client); +int ceph_wait_for_latest_osdmap(struct ceph_client *client, + unsigned long timeout); + +/* pagevec.c */ +extern void ceph_release_page_vector(struct page **pages, int num_pages); + +extern struct page **ceph_get_direct_page_vector(const void __user *data, + int num_pages, + bool write_page); +extern void ceph_put_page_vector(struct page **pages, int num_pages, + bool dirty); +extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags); +extern int ceph_copy_user_to_page_vector(struct page **pages, + const void __user *data, + loff_t off, size_t len); +extern void ceph_copy_to_page_vector(struct page **pages, + const void *data, + loff_t off, size_t len); +extern void ceph_copy_from_page_vector(struct page **pages, + void *data, + loff_t off, size_t len); +extern void ceph_zero_page_vector_range(int off, int len, struct page **pages); + + +#endif /* _FS_CEPH_SUPER_H */ diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h new file mode 100644 index 000000000..0067d767c --- /dev/null +++ b/include/linux/ceph/mdsmap.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FS_CEPH_MDSMAP_H +#define _FS_CEPH_MDSMAP_H + +#include +#include + +/* + * mds map - describe servers in the mds cluster. + * + * we limit fields to those the client actually xcares about + */ +struct ceph_mds_info { + u64 global_id; + struct ceph_entity_addr addr; + s32 state; + int num_export_targets; + bool laggy; + u32 *export_targets; +}; + +struct ceph_mdsmap { + u32 m_epoch, m_client_epoch, m_last_failure; + u32 m_root; + u32 m_session_timeout; /* seconds */ + u32 m_session_autoclose; /* seconds */ + u64 m_max_file_size; + u32 m_max_mds; /* size of m_addr, m_state arrays */ + int m_num_mds; + struct ceph_mds_info *m_info; + + /* which object pools file data can be stored in */ + int m_num_data_pg_pools; + u64 *m_data_pg_pools; + u64 m_cas_pg_pool; + + bool m_enabled; + bool m_damaged; + int m_num_laggy; +}; + +static inline struct ceph_entity_addr * +ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w) +{ + if (w >= m->m_num_mds) + return NULL; + return &m->m_info[w].addr; +} + +static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w) +{ + BUG_ON(w < 0); + if (w >= m->m_num_mds) + return CEPH_MDS_STATE_DNE; + return m->m_info[w].state; +} + +static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w) +{ + if (w >= 0 && w < m->m_num_mds) + return m->m_info[w].laggy; + return false; +} + +extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m); +extern struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end); +extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m); +extern bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m); + +#endif diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h new file mode 100644 index 000000000..fc2b4491e --- /dev/null +++ b/include/linux/ceph/messenger.h @@ -0,0 +1,392 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __FS_CEPH_MESSENGER_H +#define __FS_CEPH_MESSENGER_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +struct ceph_msg; +struct ceph_connection; + +/* + * Ceph defines these callbacks for handling connection events. + */ +struct ceph_connection_operations { + struct ceph_connection *(*get)(struct ceph_connection *); + void (*put)(struct ceph_connection *); + + /* handle an incoming message. */ + void (*dispatch) (struct ceph_connection *con, struct ceph_msg *m); + + /* authorize an outgoing connection */ + struct ceph_auth_handshake *(*get_authorizer) ( + struct ceph_connection *con, + int *proto, int force_new); + int (*add_authorizer_challenge)(struct ceph_connection *con, + void *challenge_buf, + int challenge_buf_len); + int (*verify_authorizer_reply) (struct ceph_connection *con); + int (*invalidate_authorizer)(struct ceph_connection *con); + + /* there was some error on the socket (disconnect, whatever) */ + void (*fault) (struct ceph_connection *con); + + /* a remote host as terminated a message exchange session, and messages + * we sent (or they tried to send us) may be lost. */ + void (*peer_reset) (struct ceph_connection *con); + + struct ceph_msg * (*alloc_msg) (struct ceph_connection *con, + struct ceph_msg_header *hdr, + int *skip); + + void (*reencode_message) (struct ceph_msg *msg); + + int (*sign_message) (struct ceph_msg *msg); + int (*check_message_signature) (struct ceph_msg *msg); +}; + +/* use format string %s%d */ +#define ENTITY_NAME(n) ceph_entity_type_name((n).type), le64_to_cpu((n).num) + +struct ceph_messenger { + struct ceph_entity_inst inst; /* my name+address */ + struct ceph_entity_addr my_enc_addr; + + atomic_t stopping; + possible_net_t net; + + /* + * the global_seq counts connections i (attempt to) initiate + * in order to disambiguate certain connect race conditions. + */ + u32 global_seq; + spinlock_t global_seq_lock; +}; + +enum ceph_msg_data_type { + CEPH_MSG_DATA_NONE, /* message contains no data payload */ + CEPH_MSG_DATA_PAGES, /* data source/destination is a page array */ + CEPH_MSG_DATA_PAGELIST, /* data source/destination is a pagelist */ +#ifdef CONFIG_BLOCK + CEPH_MSG_DATA_BIO, /* data source/destination is a bio list */ +#endif /* CONFIG_BLOCK */ + CEPH_MSG_DATA_BVECS, /* data source/destination is a bio_vec array */ +}; + +static __inline__ bool ceph_msg_data_type_valid(enum ceph_msg_data_type type) +{ + switch (type) { + case CEPH_MSG_DATA_NONE: + case CEPH_MSG_DATA_PAGES: + case CEPH_MSG_DATA_PAGELIST: +#ifdef CONFIG_BLOCK + case CEPH_MSG_DATA_BIO: +#endif /* CONFIG_BLOCK */ + case CEPH_MSG_DATA_BVECS: + return true; + default: + return false; + } +} + +#ifdef CONFIG_BLOCK + +struct ceph_bio_iter { + struct bio *bio; + struct bvec_iter iter; +}; + +#define __ceph_bio_iter_advance_step(it, n, STEP) do { \ + unsigned int __n = (n), __cur_n; \ + \ + while (__n) { \ + BUG_ON(!(it)->iter.bi_size); \ + __cur_n = min((it)->iter.bi_size, __n); \ + (void)(STEP); \ + bio_advance_iter((it)->bio, &(it)->iter, __cur_n); \ + if (!(it)->iter.bi_size && (it)->bio->bi_next) { \ + dout("__ceph_bio_iter_advance_step next bio\n"); \ + (it)->bio = (it)->bio->bi_next; \ + (it)->iter = (it)->bio->bi_iter; \ + } \ + __n -= __cur_n; \ + } \ +} while (0) + +/* + * Advance @it by @n bytes. + */ +#define ceph_bio_iter_advance(it, n) \ + __ceph_bio_iter_advance_step(it, n, 0) + +/* + * Advance @it by @n bytes, executing BVEC_STEP for each bio_vec. + */ +#define ceph_bio_iter_advance_step(it, n, BVEC_STEP) \ + __ceph_bio_iter_advance_step(it, n, ({ \ + struct bio_vec bv; \ + struct bvec_iter __cur_iter; \ + \ + __cur_iter = (it)->iter; \ + __cur_iter.bi_size = __cur_n; \ + __bio_for_each_segment(bv, (it)->bio, __cur_iter, __cur_iter) \ + (void)(BVEC_STEP); \ + })) + +#endif /* CONFIG_BLOCK */ + +struct ceph_bvec_iter { + struct bio_vec *bvecs; + struct bvec_iter iter; +}; + +#define __ceph_bvec_iter_advance_step(it, n, STEP) do { \ + BUG_ON((n) > (it)->iter.bi_size); \ + (void)(STEP); \ + bvec_iter_advance((it)->bvecs, &(it)->iter, (n)); \ +} while (0) + +/* + * Advance @it by @n bytes. + */ +#define ceph_bvec_iter_advance(it, n) \ + __ceph_bvec_iter_advance_step(it, n, 0) + +/* + * Advance @it by @n bytes, executing BVEC_STEP for each bio_vec. + */ +#define ceph_bvec_iter_advance_step(it, n, BVEC_STEP) \ + __ceph_bvec_iter_advance_step(it, n, ({ \ + struct bio_vec bv; \ + struct bvec_iter __cur_iter; \ + \ + __cur_iter = (it)->iter; \ + __cur_iter.bi_size = (n); \ + for_each_bvec(bv, (it)->bvecs, __cur_iter, __cur_iter) \ + (void)(BVEC_STEP); \ + })) + +#define ceph_bvec_iter_shorten(it, n) do { \ + BUG_ON((n) > (it)->iter.bi_size); \ + (it)->iter.bi_size = (n); \ +} while (0) + +struct ceph_msg_data { + struct list_head links; /* ceph_msg->data */ + enum ceph_msg_data_type type; + union { +#ifdef CONFIG_BLOCK + struct { + struct ceph_bio_iter bio_pos; + u32 bio_length; + }; +#endif /* CONFIG_BLOCK */ + struct ceph_bvec_iter bvec_pos; + struct { + struct page **pages; /* NOT OWNER. */ + size_t length; /* total # bytes */ + unsigned int alignment; /* first page */ + }; + struct ceph_pagelist *pagelist; + }; +}; + +struct ceph_msg_data_cursor { + size_t total_resid; /* across all data items */ + struct list_head *data_head; /* = &ceph_msg->data */ + + struct ceph_msg_data *data; /* current data item */ + size_t resid; /* bytes not yet consumed */ + bool last_piece; /* current is last piece */ + bool need_crc; /* crc update needed */ + union { +#ifdef CONFIG_BLOCK + struct ceph_bio_iter bio_iter; +#endif /* CONFIG_BLOCK */ + struct bvec_iter bvec_iter; + struct { /* pages */ + unsigned int page_offset; /* offset in page */ + unsigned short page_index; /* index in array */ + unsigned short page_count; /* pages in array */ + }; + struct { /* pagelist */ + struct page *page; /* page from list */ + size_t offset; /* bytes from list */ + }; + }; +}; + +/* + * a single message. it contains a header (src, dest, message type, etc.), + * footer (crc values, mainly), a "front" message body, and possibly a + * data payload (stored in some number of pages). + */ +struct ceph_msg { + struct ceph_msg_header hdr; /* header */ + union { + struct ceph_msg_footer footer; /* footer */ + struct ceph_msg_footer_old old_footer; /* old format footer */ + }; + struct kvec front; /* unaligned blobs of message */ + struct ceph_buffer *middle; + + size_t data_length; + struct list_head data; + struct ceph_msg_data_cursor cursor; + + struct ceph_connection *con; + struct list_head list_head; /* links for connection lists */ + + struct kref kref; + bool more_to_follow; + bool needs_out_seq; + int front_alloc_len; + unsigned long ack_stamp; /* tx: when we were acked */ + + struct ceph_msgpool *pool; +}; + +/* ceph connection fault delay defaults, for exponential backoff */ +#define BASE_DELAY_INTERVAL (HZ/2) +#define MAX_DELAY_INTERVAL (5 * 60 * HZ) + +/* + * A single connection with another host. + * + * We maintain a queue of outgoing messages, and some session state to + * ensure that we can preserve the lossless, ordered delivery of + * messages in the case of a TCP disconnect. + */ +struct ceph_connection { + void *private; + + const struct ceph_connection_operations *ops; + + struct ceph_messenger *msgr; + + atomic_t sock_state; + struct socket *sock; + struct ceph_entity_addr peer_addr; /* peer address */ + struct ceph_entity_addr peer_addr_for_me; + + unsigned long flags; + unsigned long state; + const char *error_msg; /* error message, if any */ + + struct ceph_entity_name peer_name; /* peer name */ + + u64 peer_features; + u32 connect_seq; /* identify the most recent connection + attempt for this connection, client */ + u32 peer_global_seq; /* peer's global seq for this connection */ + + struct ceph_auth_handshake *auth; + int auth_retry; /* true if we need a newer authorizer */ + + struct mutex mutex; + + /* out queue */ + struct list_head out_queue; + struct list_head out_sent; /* sending or sent but unacked */ + u64 out_seq; /* last message queued for send */ + + u64 in_seq, in_seq_acked; /* last message received, acked */ + + /* connection negotiation temps */ + char in_banner[CEPH_BANNER_MAX_LEN]; + struct ceph_msg_connect out_connect; + struct ceph_msg_connect_reply in_reply; + struct ceph_entity_addr actual_peer_addr; + + /* message out temps */ + struct ceph_msg_header out_hdr; + struct ceph_msg *out_msg; /* sending message (== tail of + out_sent) */ + bool out_msg_done; + + struct kvec out_kvec[8], /* sending header/footer data */ + *out_kvec_cur; + int out_kvec_left; /* kvec's left in out_kvec */ + int out_skip; /* skip this many bytes */ + int out_kvec_bytes; /* total bytes left */ + int out_more; /* there is more data after the kvecs */ + __le64 out_temp_ack; /* for writing an ack */ + struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2 + stamp */ + + /* message in temps */ + struct ceph_msg_header in_hdr; + struct ceph_msg *in_msg; + u32 in_front_crc, in_middle_crc, in_data_crc; /* calculated crc */ + + char in_tag; /* protocol control byte */ + int in_base_pos; /* bytes read */ + __le64 in_temp_ack; /* for reading an ack */ + + struct timespec64 last_keepalive_ack; /* keepalive2 ack stamp */ + + struct delayed_work work; /* send|recv work */ + unsigned long delay; /* current delay interval */ +}; + + +extern const char *ceph_pr_addr(const struct sockaddr_storage *ss); +extern int ceph_parse_ips(const char *c, const char *end, + struct ceph_entity_addr *addr, + int max_count, int *count); + + +extern int ceph_msgr_init(void); +extern void ceph_msgr_exit(void); +extern void ceph_msgr_flush(void); + +extern void ceph_messenger_init(struct ceph_messenger *msgr, + struct ceph_entity_addr *myaddr); +extern void ceph_messenger_fini(struct ceph_messenger *msgr); + +extern void ceph_con_init(struct ceph_connection *con, void *private, + const struct ceph_connection_operations *ops, + struct ceph_messenger *msgr); +extern void ceph_con_open(struct ceph_connection *con, + __u8 entity_type, __u64 entity_num, + struct ceph_entity_addr *addr); +extern bool ceph_con_opened(struct ceph_connection *con); +extern void ceph_con_close(struct ceph_connection *con); +extern void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg); + +extern void ceph_msg_revoke(struct ceph_msg *msg); +extern void ceph_msg_revoke_incoming(struct ceph_msg *msg); + +extern void ceph_con_keepalive(struct ceph_connection *con); +extern bool ceph_con_keepalive_expired(struct ceph_connection *con, + unsigned long interval); + +extern void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, + size_t length, size_t alignment); +extern void ceph_msg_data_add_pagelist(struct ceph_msg *msg, + struct ceph_pagelist *pagelist); +#ifdef CONFIG_BLOCK +void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos, + u32 length); +#endif /* CONFIG_BLOCK */ +void ceph_msg_data_add_bvecs(struct ceph_msg *msg, + struct ceph_bvec_iter *bvec_pos); + +extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, + bool can_fail); + +extern struct ceph_msg *ceph_msg_get(struct ceph_msg *msg); +extern void ceph_msg_put(struct ceph_msg *msg); + +extern void ceph_msg_dump(struct ceph_msg *msg); + +#endif diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h new file mode 100644 index 000000000..3a4688af7 --- /dev/null +++ b/include/linux/ceph/mon_client.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FS_CEPH_MON_CLIENT_H +#define _FS_CEPH_MON_CLIENT_H + +#include +#include +#include + +#include + +struct ceph_client; +struct ceph_mount_args; +struct ceph_auth_client; + +/* + * The monitor map enumerates the set of all monitors. + */ +struct ceph_monmap { + struct ceph_fsid fsid; + u32 epoch; + u32 num_mon; + struct ceph_entity_inst mon_inst[0]; +}; + +struct ceph_mon_client; +struct ceph_mon_generic_request; + + +/* + * Generic mechanism for resending monitor requests. + */ +typedef void (*ceph_monc_request_func_t)(struct ceph_mon_client *monc, + int newmon); + +/* a pending monitor request */ +struct ceph_mon_request { + struct ceph_mon_client *monc; + struct delayed_work delayed_work; + unsigned long delay; + ceph_monc_request_func_t do_request; +}; + +typedef void (*ceph_monc_callback_t)(struct ceph_mon_generic_request *); + +/* + * ceph_mon_generic_request is being used for the statfs and + * mon_get_version requests which are being done a bit differently + * because we need to get data back to the caller + */ +struct ceph_mon_generic_request { + struct ceph_mon_client *monc; + struct kref kref; + u64 tid; + struct rb_node node; + int result; + + struct completion completion; + ceph_monc_callback_t complete_cb; + u64 private_data; /* r_tid/linger_id */ + + struct ceph_msg *request; /* original request */ + struct ceph_msg *reply; /* and reply */ + + union { + struct ceph_statfs *st; + u64 newest; + } u; +}; + +struct ceph_mon_client { + struct ceph_client *client; + struct ceph_monmap *monmap; + + struct mutex mutex; + struct delayed_work delayed_work; + + struct ceph_auth_client *auth; + struct ceph_msg *m_auth, *m_auth_reply, *m_subscribe, *m_subscribe_ack; + int pending_auth; + + bool hunting; + int cur_mon; /* last monitor i contacted */ + unsigned long sub_renew_after; + unsigned long sub_renew_sent; + struct ceph_connection con; + + bool had_a_connection; + int hunt_mult; /* [1..CEPH_MONC_HUNT_MAX_MULT] */ + + /* pending generic requests */ + struct rb_root generic_request_tree; + u64 last_tid; + + /* subs, indexed with CEPH_SUB_* */ + struct { + struct ceph_mon_subscribe_item item; + bool want; + u32 have; /* epoch */ + } subs[4]; + int fs_cluster_id; /* "mdsmap." sub */ + +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs_file; +#endif +}; + +extern struct ceph_monmap *ceph_monmap_decode(void *p, void *end); +extern int ceph_monmap_contains(struct ceph_monmap *m, + struct ceph_entity_addr *addr); + +extern int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl); +extern void ceph_monc_stop(struct ceph_mon_client *monc); + +enum { + CEPH_SUB_MONMAP = 0, + CEPH_SUB_OSDMAP, + CEPH_SUB_FSMAP, + CEPH_SUB_MDSMAP, +}; + +extern const char *ceph_sub_str[]; + +/* + * The model here is to indicate that we need a new map of at least + * epoch @epoch, and also call in when we receive a map. We will + * periodically rerequest the map from the monitor cluster until we + * get what we want. + */ +bool ceph_monc_want_map(struct ceph_mon_client *monc, int sub, u32 epoch, + bool continuous); +void ceph_monc_got_map(struct ceph_mon_client *monc, int sub, u32 epoch); +void ceph_monc_renew_subs(struct ceph_mon_client *monc); + +extern int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch, + unsigned long timeout); + +int ceph_monc_do_statfs(struct ceph_mon_client *monc, u64 data_pool, + struct ceph_statfs *buf); + +int ceph_monc_get_version(struct ceph_mon_client *monc, const char *what, + u64 *newest); +int ceph_monc_get_version_async(struct ceph_mon_client *monc, const char *what, + ceph_monc_callback_t cb, u64 private_data); + +int ceph_monc_blacklist_add(struct ceph_mon_client *monc, + struct ceph_entity_addr *client_addr); + +extern int ceph_monc_open_session(struct ceph_mon_client *monc); + +extern int ceph_monc_validate_auth(struct ceph_mon_client *monc); + +#endif diff --git a/include/linux/ceph/msgpool.h b/include/linux/ceph/msgpool.h new file mode 100644 index 000000000..76c98a512 --- /dev/null +++ b/include/linux/ceph/msgpool.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FS_CEPH_MSGPOOL +#define _FS_CEPH_MSGPOOL + +#include + +/* + * we use memory pools for preallocating messages we may receive, to + * avoid unexpected OOM conditions. + */ +struct ceph_msgpool { + const char *name; + mempool_t *pool; + int type; /* preallocated message type */ + int front_len; /* preallocated payload size */ +}; + +extern int ceph_msgpool_init(struct ceph_msgpool *pool, int type, + int front_len, int size, bool blocking, + const char *name); +extern void ceph_msgpool_destroy(struct ceph_msgpool *pool); +extern struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *, + int front_len); +extern void ceph_msgpool_put(struct ceph_msgpool *, struct ceph_msg *); + +#endif diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h new file mode 100644 index 000000000..9e50aede4 --- /dev/null +++ b/include/linux/ceph/msgr.h @@ -0,0 +1,188 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef CEPH_MSGR_H +#define CEPH_MSGR_H + +/* + * Data types for message passing layer used by Ceph. + */ + +#define CEPH_MON_PORT 6789 /* default monitor port */ + +/* + * client-side processes will try to bind to ports in this + * range, simply for the benefit of tools like nmap or wireshark + * that would like to identify the protocol. + */ +#define CEPH_PORT_FIRST 6789 +#define CEPH_PORT_START 6800 /* non-monitors start here */ +#define CEPH_PORT_LAST 6900 + +/* + * tcp connection banner. include a protocol version. and adjust + * whenever the wire protocol changes. try to keep this string length + * constant. + */ +#define CEPH_BANNER "ceph v027" +#define CEPH_BANNER_MAX_LEN 30 + + +/* + * Rollover-safe type and comparator for 32-bit sequence numbers. + * Comparator returns -1, 0, or 1. + */ +typedef __u32 ceph_seq_t; + +static inline __s32 ceph_seq_cmp(__u32 a, __u32 b) +{ + return (__s32)a - (__s32)b; +} + + +/* + * entity_name -- logical name for a process participating in the + * network, e.g. 'mds0' or 'osd3'. + */ +struct ceph_entity_name { + __u8 type; /* CEPH_ENTITY_TYPE_* */ + __le64 num; +} __attribute__ ((packed)); + +#define CEPH_ENTITY_TYPE_MON 0x01 +#define CEPH_ENTITY_TYPE_MDS 0x02 +#define CEPH_ENTITY_TYPE_OSD 0x04 +#define CEPH_ENTITY_TYPE_CLIENT 0x08 +#define CEPH_ENTITY_TYPE_AUTH 0x20 + +#define CEPH_ENTITY_TYPE_ANY 0xFF + +extern const char *ceph_entity_type_name(int type); + +/* + * entity_addr -- network address + */ +struct ceph_entity_addr { + __le32 type; + __le32 nonce; /* unique id for process (e.g. pid) */ + struct sockaddr_storage in_addr; +} __attribute__ ((packed)); + +struct ceph_entity_inst { + struct ceph_entity_name name; + struct ceph_entity_addr addr; +} __attribute__ ((packed)); + + +/* used by message exchange protocol */ +#define CEPH_MSGR_TAG_READY 1 /* server->client: ready for messages */ +#define CEPH_MSGR_TAG_RESETSESSION 2 /* server->client: reset, try again */ +#define CEPH_MSGR_TAG_WAIT 3 /* server->client: wait for racing + incoming connection */ +#define CEPH_MSGR_TAG_RETRY_SESSION 4 /* server->client + cseq: try again + with higher cseq */ +#define CEPH_MSGR_TAG_RETRY_GLOBAL 5 /* server->client + gseq: try again + with higher gseq */ +#define CEPH_MSGR_TAG_CLOSE 6 /* closing pipe */ +#define CEPH_MSGR_TAG_MSG 7 /* message */ +#define CEPH_MSGR_TAG_ACK 8 /* message ack */ +#define CEPH_MSGR_TAG_KEEPALIVE 9 /* just a keepalive byte! */ +#define CEPH_MSGR_TAG_BADPROTOVER 10 /* bad protocol version */ +#define CEPH_MSGR_TAG_BADAUTHORIZER 11 /* bad authorizer */ +#define CEPH_MSGR_TAG_FEATURES 12 /* insufficient features */ +#define CEPH_MSGR_TAG_SEQ 13 /* 64-bit int follows with seen seq number */ +#define CEPH_MSGR_TAG_KEEPALIVE2 14 /* keepalive2 byte + ceph_timespec */ +#define CEPH_MSGR_TAG_KEEPALIVE2_ACK 15 /* keepalive2 reply */ +#define CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER 16 /* cephx v2 doing server challenge */ + +/* + * connection negotiation + */ +struct ceph_msg_connect { + __le64 features; /* supported feature bits */ + __le32 host_type; /* CEPH_ENTITY_TYPE_* */ + __le32 global_seq; /* count connections initiated by this host */ + __le32 connect_seq; /* count connections initiated in this session */ + __le32 protocol_version; + __le32 authorizer_protocol; + __le32 authorizer_len; + __u8 flags; /* CEPH_MSG_CONNECT_* */ +} __attribute__ ((packed)); + +struct ceph_msg_connect_reply { + __u8 tag; + __le64 features; /* feature bits for this session */ + __le32 global_seq; + __le32 connect_seq; + __le32 protocol_version; + __le32 authorizer_len; + __u8 flags; +} __attribute__ ((packed)); + +#define CEPH_MSG_CONNECT_LOSSY 1 /* messages i send may be safely dropped */ + + +/* + * message header + */ +struct ceph_msg_header_old { + __le64 seq; /* message seq# for this session */ + __le64 tid; /* transaction id */ + __le16 type; /* message type */ + __le16 priority; /* priority. higher value == higher priority */ + __le16 version; /* version of message encoding */ + + __le32 front_len; /* bytes in main payload */ + __le32 middle_len;/* bytes in middle payload */ + __le32 data_len; /* bytes of data payload */ + __le16 data_off; /* sender: include full offset; + receiver: mask against ~PAGE_MASK */ + + struct ceph_entity_inst src, orig_src; + __le32 reserved; + __le32 crc; /* header crc32c */ +} __attribute__ ((packed)); + +struct ceph_msg_header { + __le64 seq; /* message seq# for this session */ + __le64 tid; /* transaction id */ + __le16 type; /* message type */ + __le16 priority; /* priority. higher value == higher priority */ + __le16 version; /* version of message encoding */ + + __le32 front_len; /* bytes in main payload */ + __le32 middle_len;/* bytes in middle payload */ + __le32 data_len; /* bytes of data payload */ + __le16 data_off; /* sender: include full offset; + receiver: mask against ~PAGE_MASK */ + + struct ceph_entity_name src; + __le16 compat_version; + __le16 reserved; + __le32 crc; /* header crc32c */ +} __attribute__ ((packed)); + +#define CEPH_MSG_PRIO_LOW 64 +#define CEPH_MSG_PRIO_DEFAULT 127 +#define CEPH_MSG_PRIO_HIGH 196 +#define CEPH_MSG_PRIO_HIGHEST 255 + +/* + * follows data payload + */ +struct ceph_msg_footer_old { + __le32 front_crc, middle_crc, data_crc; + __u8 flags; +} __attribute__ ((packed)); + +struct ceph_msg_footer { + __le32 front_crc, middle_crc, data_crc; + // sig holds the 64 bits of the digital signature for the message PLR + __le64 sig; + __u8 flags; +} __attribute__ ((packed)); + +#define CEPH_MSG_FOOTER_COMPLETE (1<<0) /* msg wasn't aborted */ +#define CEPH_MSG_FOOTER_NOCRC (1<<1) /* no data crc */ +#define CEPH_MSG_FOOTER_SIGNED (1<<2) /* msg was signed */ + + +#endif diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h new file mode 100644 index 000000000..02096da01 --- /dev/null +++ b/include/linux/ceph/osd_client.h @@ -0,0 +1,548 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FS_CEPH_OSD_CLIENT_H +#define _FS_CEPH_OSD_CLIENT_H + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +struct ceph_msg; +struct ceph_snap_context; +struct ceph_osd_request; +struct ceph_osd_client; + +/* + * completion callback for async writepages + */ +typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *); + +#define CEPH_HOMELESS_OSD -1 + +/* a given osd we're communicating with */ +struct ceph_osd { + refcount_t o_ref; + struct ceph_osd_client *o_osdc; + int o_osd; + int o_incarnation; + struct rb_node o_node; + struct ceph_connection o_con; + struct rb_root o_requests; + struct rb_root o_linger_requests; + struct rb_root o_backoff_mappings; + struct rb_root o_backoffs_by_id; + struct list_head o_osd_lru; + struct ceph_auth_handshake o_auth; + unsigned long lru_ttl; + struct list_head o_keepalive_item; + struct mutex lock; +}; + +#define CEPH_OSD_SLAB_OPS 2 +#define CEPH_OSD_MAX_OPS 16 + +enum ceph_osd_data_type { + CEPH_OSD_DATA_TYPE_NONE = 0, + CEPH_OSD_DATA_TYPE_PAGES, + CEPH_OSD_DATA_TYPE_PAGELIST, +#ifdef CONFIG_BLOCK + CEPH_OSD_DATA_TYPE_BIO, +#endif /* CONFIG_BLOCK */ + CEPH_OSD_DATA_TYPE_BVECS, +}; + +struct ceph_osd_data { + enum ceph_osd_data_type type; + union { + struct { + struct page **pages; + u64 length; + u32 alignment; + bool pages_from_pool; + bool own_pages; + }; + struct ceph_pagelist *pagelist; +#ifdef CONFIG_BLOCK + struct { + struct ceph_bio_iter bio_pos; + u32 bio_length; + }; +#endif /* CONFIG_BLOCK */ + struct { + struct ceph_bvec_iter bvec_pos; + u32 num_bvecs; + }; + }; +}; + +struct ceph_osd_req_op { + u16 op; /* CEPH_OSD_OP_* */ + u32 flags; /* CEPH_OSD_OP_FLAG_* */ + u32 indata_len; /* request */ + u32 outdata_len; /* reply */ + s32 rval; + + union { + struct ceph_osd_data raw_data_in; + struct { + u64 offset, length; + u64 truncate_size; + u32 truncate_seq; + struct ceph_osd_data osd_data; + } extent; + struct { + u32 name_len; + u32 value_len; + __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */ + __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */ + struct ceph_osd_data osd_data; + } xattr; + struct { + const char *class_name; + const char *method_name; + struct ceph_osd_data request_info; + struct ceph_osd_data request_data; + struct ceph_osd_data response_data; + __u8 class_len; + __u8 method_len; + u32 indata_len; + } cls; + struct { + u64 cookie; + __u8 op; /* CEPH_OSD_WATCH_OP_ */ + u32 gen; + } watch; + struct { + struct ceph_osd_data request_data; + } notify_ack; + struct { + u64 cookie; + struct ceph_osd_data request_data; + struct ceph_osd_data response_data; + } notify; + struct { + struct ceph_osd_data response_data; + } list_watchers; + struct { + u64 expected_object_size; + u64 expected_write_size; + } alloc_hint; + }; +}; + +struct ceph_osd_request_target { + struct ceph_object_id base_oid; + struct ceph_object_locator base_oloc; + struct ceph_object_id target_oid; + struct ceph_object_locator target_oloc; + + struct ceph_pg pgid; /* last raw pg we mapped to */ + struct ceph_spg spgid; /* last actual spg we mapped to */ + u32 pg_num; + u32 pg_num_mask; + struct ceph_osds acting; + struct ceph_osds up; + int size; + int min_size; + bool sort_bitwise; + bool recovery_deletes; + + unsigned int flags; /* CEPH_OSD_FLAG_* */ + bool paused; + + u32 epoch; + u32 last_force_resend; + + int osd; +}; + +/* an in-flight request */ +struct ceph_osd_request { + u64 r_tid; /* unique for this client */ + struct rb_node r_node; + struct rb_node r_mc_node; /* map check */ + struct work_struct r_complete_work; + struct ceph_osd *r_osd; + + struct ceph_osd_request_target r_t; +#define r_base_oid r_t.base_oid +#define r_base_oloc r_t.base_oloc +#define r_flags r_t.flags + + struct ceph_msg *r_request, *r_reply; + u32 r_sent; /* >0 if r_request is sending/sent */ + + /* request osd ops array */ + unsigned int r_num_ops; + + int r_result; + + struct ceph_osd_client *r_osdc; + struct kref r_kref; + bool r_mempool; + struct completion r_completion; /* private to osd_client.c */ + ceph_osdc_callback_t r_callback; + struct list_head r_unsafe_item; + + struct inode *r_inode; /* for use by callbacks */ + void *r_priv; /* ditto */ + + /* set by submitter */ + u64 r_snapid; /* for reads, CEPH_NOSNAP o/w */ + struct ceph_snap_context *r_snapc; /* for writes */ + struct timespec64 r_mtime; /* ditto */ + u64 r_data_offset; /* ditto */ + bool r_linger; /* don't resend on failure */ + + /* internal */ + unsigned long r_stamp; /* jiffies, send or check time */ + unsigned long r_start_stamp; /* jiffies */ + int r_attempts; + u32 r_map_dne_bound; + + struct ceph_osd_req_op r_ops[]; +}; + +struct ceph_request_redirect { + struct ceph_object_locator oloc; +}; + +/* + * osd request identifier + * + * caller name + incarnation# + tid to unique identify this request + */ +struct ceph_osd_reqid { + struct ceph_entity_name name; + __le64 tid; + __le32 inc; +} __packed; + +struct ceph_blkin_trace_info { + __le64 trace_id; + __le64 span_id; + __le64 parent_span_id; +} __packed; + +typedef void (*rados_watchcb2_t)(void *arg, u64 notify_id, u64 cookie, + u64 notifier_id, void *data, size_t data_len); +typedef void (*rados_watcherrcb_t)(void *arg, u64 cookie, int err); + +struct ceph_osd_linger_request { + struct ceph_osd_client *osdc; + u64 linger_id; + bool committed; + bool is_watch; /* watch or notify */ + + struct ceph_osd *osd; + struct ceph_osd_request *reg_req; + struct ceph_osd_request *ping_req; + unsigned long ping_sent; + unsigned long watch_valid_thru; + struct list_head pending_lworks; + + struct ceph_osd_request_target t; + u32 map_dne_bound; + + struct timespec64 mtime; + + struct kref kref; + struct mutex lock; + struct rb_node node; /* osd */ + struct rb_node osdc_node; /* osdc */ + struct rb_node mc_node; /* map check */ + struct list_head scan_item; + + struct completion reg_commit_wait; + struct completion notify_finish_wait; + int reg_commit_error; + int notify_finish_error; + int last_error; + + u32 register_gen; + u64 notify_id; + + rados_watchcb2_t wcb; + rados_watcherrcb_t errcb; + void *data; + + struct page ***preply_pages; + size_t *preply_len; +}; + +struct ceph_watch_item { + struct ceph_entity_name name; + u64 cookie; + struct ceph_entity_addr addr; +}; + +struct ceph_spg_mapping { + struct rb_node node; + struct ceph_spg spgid; + + struct rb_root backoffs; +}; + +struct ceph_hobject_id { + void *key; + size_t key_len; + void *oid; + size_t oid_len; + u64 snapid; + u32 hash; + u8 is_max; + void *nspace; + size_t nspace_len; + s64 pool; + + /* cache */ + u32 hash_reverse_bits; +}; + +static inline void ceph_hoid_build_hash_cache(struct ceph_hobject_id *hoid) +{ + hoid->hash_reverse_bits = bitrev32(hoid->hash); +} + +/* + * PG-wide backoff: [begin, end) + * per-object backoff: begin == end + */ +struct ceph_osd_backoff { + struct rb_node spg_node; + struct rb_node id_node; + + struct ceph_spg spgid; + u64 id; + struct ceph_hobject_id *begin; + struct ceph_hobject_id *end; +}; + +#define CEPH_LINGER_ID_START 0xffff000000000000ULL + +struct ceph_osd_client { + struct ceph_client *client; + + struct ceph_osdmap *osdmap; /* current map */ + struct rw_semaphore lock; + + struct rb_root osds; /* osds */ + struct list_head osd_lru; /* idle osds */ + spinlock_t osd_lru_lock; + u32 epoch_barrier; + struct ceph_osd homeless_osd; + atomic64_t last_tid; /* tid of last request */ + u64 last_linger_id; + struct rb_root linger_requests; /* lingering requests */ + struct rb_root map_checks; + struct rb_root linger_map_checks; + atomic_t num_requests; + atomic_t num_homeless; + bool abort_on_full; /* abort w/ ENOSPC when full */ + int abort_err; + struct delayed_work timeout_work; + struct delayed_work osds_timeout_work; +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs_file; +#endif + + mempool_t *req_mempool; + + struct ceph_msgpool msgpool_op; + struct ceph_msgpool msgpool_op_reply; + + struct workqueue_struct *notify_wq; + struct workqueue_struct *completion_wq; +}; + +static inline bool ceph_osdmap_flag(struct ceph_osd_client *osdc, int flag) +{ + return osdc->osdmap->flags & flag; +} + +extern int ceph_osdc_setup(void); +extern void ceph_osdc_cleanup(void); + +extern int ceph_osdc_init(struct ceph_osd_client *osdc, + struct ceph_client *client); +extern void ceph_osdc_stop(struct ceph_osd_client *osdc); + +extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc, + struct ceph_msg *msg); +extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc, + struct ceph_msg *msg); +void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb); +void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err); + +extern void osd_req_op_init(struct ceph_osd_request *osd_req, + unsigned int which, u16 opcode, u32 flags); + +extern void osd_req_op_raw_data_in_pages(struct ceph_osd_request *, + unsigned int which, + struct page **pages, u64 length, + u32 alignment, bool pages_from_pool, + bool own_pages); + +extern void osd_req_op_extent_init(struct ceph_osd_request *osd_req, + unsigned int which, u16 opcode, + u64 offset, u64 length, + u64 truncate_size, u32 truncate_seq); +extern void osd_req_op_extent_update(struct ceph_osd_request *osd_req, + unsigned int which, u64 length); +extern void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req, + unsigned int which, u64 offset_inc); + +extern struct ceph_osd_data *osd_req_op_extent_osd_data( + struct ceph_osd_request *osd_req, + unsigned int which); + +extern void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *, + unsigned int which, + struct page **pages, u64 length, + u32 alignment, bool pages_from_pool, + bool own_pages); +extern void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *, + unsigned int which, + struct ceph_pagelist *pagelist); +#ifdef CONFIG_BLOCK +void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req, + unsigned int which, + struct ceph_bio_iter *bio_pos, + u32 bio_length); +#endif /* CONFIG_BLOCK */ +void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req, + unsigned int which, + struct bio_vec *bvecs, u32 num_bvecs, + u32 bytes); +void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req, + unsigned int which, + struct ceph_bvec_iter *bvec_pos); + +extern void osd_req_op_cls_request_data_pagelist(struct ceph_osd_request *, + unsigned int which, + struct ceph_pagelist *pagelist); +extern void osd_req_op_cls_request_data_pages(struct ceph_osd_request *, + unsigned int which, + struct page **pages, u64 length, + u32 alignment, bool pages_from_pool, + bool own_pages); +void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req, + unsigned int which, + struct bio_vec *bvecs, u32 num_bvecs, + u32 bytes); +extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *, + unsigned int which, + struct page **pages, u64 length, + u32 alignment, bool pages_from_pool, + bool own_pages); +extern int osd_req_op_cls_init(struct ceph_osd_request *osd_req, + unsigned int which, u16 opcode, + const char *class, const char *method); +extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which, + u16 opcode, const char *name, const void *value, + size_t size, u8 cmp_op, u8 cmp_mode); +extern void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req, + unsigned int which, + u64 expected_object_size, + u64 expected_write_size); + +extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, + struct ceph_snap_context *snapc, + unsigned int num_ops, + bool use_mempool, + gfp_t gfp_flags); +int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp); + +extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *, + struct ceph_file_layout *layout, + struct ceph_vino vino, + u64 offset, u64 *len, + unsigned int which, int num_ops, + int opcode, int flags, + struct ceph_snap_context *snapc, + u32 truncate_seq, u64 truncate_size, + bool use_mempool); + +extern void ceph_osdc_get_request(struct ceph_osd_request *req); +extern void ceph_osdc_put_request(struct ceph_osd_request *req); + +extern int ceph_osdc_start_request(struct ceph_osd_client *osdc, + struct ceph_osd_request *req, + bool nofail); +extern void ceph_osdc_cancel_request(struct ceph_osd_request *req); +extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc, + struct ceph_osd_request *req); +extern void ceph_osdc_sync(struct ceph_osd_client *osdc); + +extern void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc); +void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc); + +int ceph_osdc_call(struct ceph_osd_client *osdc, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + const char *class, const char *method, + unsigned int flags, + struct page *req_page, size_t req_len, + struct page *resp_page, size_t *resp_len); + +extern int ceph_osdc_readpages(struct ceph_osd_client *osdc, + struct ceph_vino vino, + struct ceph_file_layout *layout, + u64 off, u64 *plen, + u32 truncate_seq, u64 truncate_size, + struct page **pages, int nr_pages, + int page_align); + +extern int ceph_osdc_writepages(struct ceph_osd_client *osdc, + struct ceph_vino vino, + struct ceph_file_layout *layout, + struct ceph_snap_context *sc, + u64 off, u64 len, + u32 truncate_seq, u64 truncate_size, + struct timespec64 *mtime, + struct page **pages, int nr_pages); + +/* watch/notify */ +struct ceph_osd_linger_request * +ceph_osdc_watch(struct ceph_osd_client *osdc, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + rados_watchcb2_t wcb, + rados_watcherrcb_t errcb, + void *data); +int ceph_osdc_unwatch(struct ceph_osd_client *osdc, + struct ceph_osd_linger_request *lreq); + +int ceph_osdc_notify_ack(struct ceph_osd_client *osdc, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + u64 notify_id, + u64 cookie, + void *payload, + u32 payload_len); +int ceph_osdc_notify(struct ceph_osd_client *osdc, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + void *payload, + u32 payload_len, + u32 timeout, + struct page ***preply_pages, + size_t *preply_len); +int ceph_osdc_watch_check(struct ceph_osd_client *osdc, + struct ceph_osd_linger_request *lreq); +int ceph_osdc_list_watchers(struct ceph_osd_client *osdc, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + struct ceph_watch_item **watchers, + u32 *num_watchers); +#endif + diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h new file mode 100644 index 000000000..43fdadeb2 --- /dev/null +++ b/include/linux/ceph/osdmap.h @@ -0,0 +1,313 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FS_CEPH_OSDMAP_H +#define _FS_CEPH_OSDMAP_H + +#include +#include +#include +#include + +/* + * The osd map describes the current membership of the osd cluster and + * specifies the mapping of objects to placement groups and placement + * groups to (sets of) osds. That is, it completely specifies the + * (desired) distribution of all data objects in the system at some + * point in time. + * + * Each map version is identified by an epoch, which increases monotonically. + * + * The map can be updated either via an incremental map (diff) describing + * the change between two successive epochs, or as a fully encoded map. + */ +struct ceph_pg { + uint64_t pool; + uint32_t seed; +}; + +#define CEPH_SPG_NOSHARD -1 + +struct ceph_spg { + struct ceph_pg pgid; + s8 shard; +}; + +int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs); +int ceph_spg_compare(const struct ceph_spg *lhs, const struct ceph_spg *rhs); + +#define CEPH_POOL_FLAG_HASHPSPOOL (1ULL << 0) /* hash pg seed and pool id + together */ +#define CEPH_POOL_FLAG_FULL (1ULL << 1) /* pool is full */ +#define CEPH_POOL_FLAG_FULL_QUOTA (1ULL << 10) /* pool ran out of quota, + will set FULL too */ +#define CEPH_POOL_FLAG_NEARFULL (1ULL << 11) /* pool is nearfull */ + +struct ceph_pg_pool_info { + struct rb_node node; + s64 id; + u8 type; /* CEPH_POOL_TYPE_* */ + u8 size; + u8 min_size; + u8 crush_ruleset; + u8 object_hash; + u32 last_force_request_resend; + u32 pg_num, pgp_num; + int pg_num_mask, pgp_num_mask; + s64 read_tier; + s64 write_tier; /* wins for read+write ops */ + u64 flags; /* CEPH_POOL_FLAG_* */ + char *name; + + bool was_full; /* for handle_one_map() */ +}; + +static inline bool ceph_can_shift_osds(struct ceph_pg_pool_info *pool) +{ + switch (pool->type) { + case CEPH_POOL_TYPE_REP: + return true; + case CEPH_POOL_TYPE_EC: + return false; + default: + BUG(); + } +} + +struct ceph_object_locator { + s64 pool; + struct ceph_string *pool_ns; +}; + +static inline void ceph_oloc_init(struct ceph_object_locator *oloc) +{ + oloc->pool = -1; + oloc->pool_ns = NULL; +} + +static inline bool ceph_oloc_empty(const struct ceph_object_locator *oloc) +{ + return oloc->pool == -1; +} + +void ceph_oloc_copy(struct ceph_object_locator *dest, + const struct ceph_object_locator *src); +void ceph_oloc_destroy(struct ceph_object_locator *oloc); + +/* + * 51-char inline_name is long enough for all cephfs and all but one + * rbd requests: in ".rbd"/"rbd_id." can be + * arbitrarily long (~PAGE_SIZE). It's done once during rbd map; all + * other rbd requests fit into inline_name. + * + * Makes ceph_object_id 64 bytes on 64-bit. + */ +#define CEPH_OID_INLINE_LEN 52 + +/* + * Both inline and external buffers have space for a NUL-terminator, + * which is carried around. It's not required though - RADOS object + * names don't have to be NUL-terminated and may contain NULs. + */ +struct ceph_object_id { + char *name; + char inline_name[CEPH_OID_INLINE_LEN]; + int name_len; +}; + +static inline void ceph_oid_init(struct ceph_object_id *oid) +{ + oid->name = oid->inline_name; + oid->name_len = 0; +} + +#define CEPH_OID_INIT_ONSTACK(oid) \ + ({ ceph_oid_init(&oid); oid; }) +#define CEPH_DEFINE_OID_ONSTACK(oid) \ + struct ceph_object_id oid = CEPH_OID_INIT_ONSTACK(oid) + +static inline bool ceph_oid_empty(const struct ceph_object_id *oid) +{ + return oid->name == oid->inline_name && !oid->name_len; +} + +void ceph_oid_copy(struct ceph_object_id *dest, + const struct ceph_object_id *src); +__printf(2, 3) +void ceph_oid_printf(struct ceph_object_id *oid, const char *fmt, ...); +__printf(3, 4) +int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp, + const char *fmt, ...); +void ceph_oid_destroy(struct ceph_object_id *oid); + +struct ceph_pg_mapping { + struct rb_node node; + struct ceph_pg pgid; + + union { + struct { + int len; + int osds[]; + } pg_temp, pg_upmap; + struct { + int osd; + } primary_temp; + struct { + int len; + int from_to[][2]; + } pg_upmap_items; + }; +}; + +struct ceph_osdmap { + struct ceph_fsid fsid; + u32 epoch; + struct ceph_timespec created, modified; + + u32 flags; /* CEPH_OSDMAP_* */ + + u32 max_osd; /* size of osd_state, _offload, _addr arrays */ + u32 *osd_state; /* CEPH_OSD_* */ + u32 *osd_weight; /* 0 = failed, 0x10000 = 100% normal */ + struct ceph_entity_addr *osd_addr; + + struct rb_root pg_temp; + struct rb_root primary_temp; + + /* remap (post-CRUSH, pre-up) */ + struct rb_root pg_upmap; /* PG := raw set */ + struct rb_root pg_upmap_items; /* from -> to within raw set */ + + u32 *osd_primary_affinity; + + struct rb_root pg_pools; + u32 pool_max; + + /* the CRUSH map specifies the mapping of placement groups to + * the list of osds that store+replicate them. */ + struct crush_map *crush; + + struct mutex crush_workspace_mutex; + void *crush_workspace; +}; + +static inline bool ceph_osd_exists(struct ceph_osdmap *map, int osd) +{ + return osd >= 0 && osd < map->max_osd && + (map->osd_state[osd] & CEPH_OSD_EXISTS); +} + +static inline bool ceph_osd_is_up(struct ceph_osdmap *map, int osd) +{ + return ceph_osd_exists(map, osd) && + (map->osd_state[osd] & CEPH_OSD_UP); +} + +static inline bool ceph_osd_is_down(struct ceph_osdmap *map, int osd) +{ + return !ceph_osd_is_up(map, osd); +} + +char *ceph_osdmap_state_str(char *str, int len, u32 state); +extern u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd); + +static inline struct ceph_entity_addr *ceph_osd_addr(struct ceph_osdmap *map, + int osd) +{ + if (osd >= map->max_osd) + return NULL; + return &map->osd_addr[osd]; +} + +#define CEPH_PGID_ENCODING_LEN (1 + 8 + 4 + 4) + +static inline int ceph_decode_pgid(void **p, void *end, struct ceph_pg *pgid) +{ + __u8 version; + + if (!ceph_has_room(p, end, CEPH_PGID_ENCODING_LEN)) { + pr_warn("incomplete pg encoding\n"); + return -EINVAL; + } + version = ceph_decode_8(p); + if (version > 1) { + pr_warn("do not understand pg encoding %d > 1\n", + (int)version); + return -EINVAL; + } + + pgid->pool = ceph_decode_64(p); + pgid->seed = ceph_decode_32(p); + *p += 4; /* skip deprecated preferred value */ + + return 0; +} + +struct ceph_osdmap *ceph_osdmap_alloc(void); +extern struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end); +struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, + struct ceph_osdmap *map); +extern void ceph_osdmap_destroy(struct ceph_osdmap *map); + +struct ceph_osds { + int osds[CEPH_PG_MAX_SIZE]; + int size; + int primary; /* id, NOT index */ +}; + +static inline void ceph_osds_init(struct ceph_osds *set) +{ + set->size = 0; + set->primary = -1; +} + +void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src); + +bool ceph_pg_is_split(const struct ceph_pg *pgid, u32 old_pg_num, + u32 new_pg_num); +bool ceph_is_new_interval(const struct ceph_osds *old_acting, + const struct ceph_osds *new_acting, + const struct ceph_osds *old_up, + const struct ceph_osds *new_up, + int old_size, + int new_size, + int old_min_size, + int new_min_size, + u32 old_pg_num, + u32 new_pg_num, + bool old_sort_bitwise, + bool new_sort_bitwise, + bool old_recovery_deletes, + bool new_recovery_deletes, + const struct ceph_pg *pgid); +bool ceph_osds_changed(const struct ceph_osds *old_acting, + const struct ceph_osds *new_acting, + bool any_change); + +void __ceph_object_locator_to_pg(struct ceph_pg_pool_info *pi, + const struct ceph_object_id *oid, + const struct ceph_object_locator *oloc, + struct ceph_pg *raw_pgid); +int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap, + const struct ceph_object_id *oid, + const struct ceph_object_locator *oloc, + struct ceph_pg *raw_pgid); + +void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap, + struct ceph_pg_pool_info *pi, + const struct ceph_pg *raw_pgid, + struct ceph_osds *up, + struct ceph_osds *acting); +bool ceph_pg_to_primary_shard(struct ceph_osdmap *osdmap, + struct ceph_pg_pool_info *pi, + const struct ceph_pg *raw_pgid, + struct ceph_spg *spgid); +int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap, + const struct ceph_pg *raw_pgid); + +extern struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, + u64 id); + +extern const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id); +extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name); +u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id); + +#endif diff --git a/include/linux/ceph/pagelist.h b/include/linux/ceph/pagelist.h new file mode 100644 index 000000000..d02233643 --- /dev/null +++ b/include/linux/ceph/pagelist.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __FS_CEPH_PAGELIST_H +#define __FS_CEPH_PAGELIST_H + +#include +#include +#include +#include + +struct ceph_pagelist { + struct list_head head; + void *mapped_tail; + size_t length; + size_t room; + struct list_head free_list; + size_t num_pages_free; + refcount_t refcnt; +}; + +struct ceph_pagelist_cursor { + struct ceph_pagelist *pl; /* pagelist, for error checking */ + struct list_head *page_lru; /* page in list */ + size_t room; /* room remaining to reset to */ +}; + +static inline void ceph_pagelist_init(struct ceph_pagelist *pl) +{ + INIT_LIST_HEAD(&pl->head); + pl->mapped_tail = NULL; + pl->length = 0; + pl->room = 0; + INIT_LIST_HEAD(&pl->free_list); + pl->num_pages_free = 0; + refcount_set(&pl->refcnt, 1); +} + +extern void ceph_pagelist_release(struct ceph_pagelist *pl); + +extern int ceph_pagelist_append(struct ceph_pagelist *pl, const void *d, size_t l); + +extern int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space); + +extern int ceph_pagelist_free_reserve(struct ceph_pagelist *pl); + +extern void ceph_pagelist_set_cursor(struct ceph_pagelist *pl, + struct ceph_pagelist_cursor *c); + +extern int ceph_pagelist_truncate(struct ceph_pagelist *pl, + struct ceph_pagelist_cursor *c); + +static inline int ceph_pagelist_encode_64(struct ceph_pagelist *pl, u64 v) +{ + __le64 ev = cpu_to_le64(v); + return ceph_pagelist_append(pl, &ev, sizeof(ev)); +} +static inline int ceph_pagelist_encode_32(struct ceph_pagelist *pl, u32 v) +{ + __le32 ev = cpu_to_le32(v); + return ceph_pagelist_append(pl, &ev, sizeof(ev)); +} +static inline int ceph_pagelist_encode_16(struct ceph_pagelist *pl, u16 v) +{ + __le16 ev = cpu_to_le16(v); + return ceph_pagelist_append(pl, &ev, sizeof(ev)); +} +static inline int ceph_pagelist_encode_8(struct ceph_pagelist *pl, u8 v) +{ + return ceph_pagelist_append(pl, &v, 1); +} +static inline int ceph_pagelist_encode_string(struct ceph_pagelist *pl, + char *s, u32 len) +{ + int ret = ceph_pagelist_encode_32(pl, len); + if (ret) + return ret; + if (len) + return ceph_pagelist_append(pl, s, len); + return 0; +} + +#endif diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h new file mode 100644 index 000000000..f6026bf4d --- /dev/null +++ b/include/linux/ceph/rados.h @@ -0,0 +1,507 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef CEPH_RADOS_H +#define CEPH_RADOS_H + +/* + * Data types for the Ceph distributed object storage layer RADOS + * (Reliable Autonomic Distributed Object Store). + */ + +#include + +/* + * fs id + */ +struct ceph_fsid { + unsigned char fsid[16]; +}; + +static inline int ceph_fsid_compare(const struct ceph_fsid *a, + const struct ceph_fsid *b) +{ + return memcmp(a, b, sizeof(*a)); +} + +/* + * ino, object, etc. + */ +typedef __le64 ceph_snapid_t; +#define CEPH_SNAPDIR ((__u64)(-1)) /* reserved for hidden .snap dir */ +#define CEPH_NOSNAP ((__u64)(-2)) /* "head", "live" revision */ +#define CEPH_MAXSNAP ((__u64)(-3)) /* largest valid snapid */ + +struct ceph_timespec { + __le32 tv_sec; + __le32 tv_nsec; +} __attribute__ ((packed)); + + +/* + * object layout - how objects are mapped into PGs + */ +#define CEPH_OBJECT_LAYOUT_HASH 1 +#define CEPH_OBJECT_LAYOUT_LINEAR 2 +#define CEPH_OBJECT_LAYOUT_HASHINO 3 + +/* + * pg layout -- how PGs are mapped onto (sets of) OSDs + */ +#define CEPH_PG_LAYOUT_CRUSH 0 +#define CEPH_PG_LAYOUT_HASH 1 +#define CEPH_PG_LAYOUT_LINEAR 2 +#define CEPH_PG_LAYOUT_HYBRID 3 + +#define CEPH_PG_MAX_SIZE 32 /* max # osds in a single pg */ + +/* + * placement group. + * we encode this into one __le64. + */ +struct ceph_pg_v1 { + __le16 preferred; /* preferred primary osd */ + __le16 ps; /* placement seed */ + __le32 pool; /* object pool */ +} __attribute__ ((packed)); + +/* + * pg_pool is a set of pgs storing a pool of objects + * + * pg_num -- base number of pseudorandomly placed pgs + * + * pgp_num -- effective number when calculating pg placement. this + * is used for pg_num increases. new pgs result in data being "split" + * into new pgs. for this to proceed smoothly, new pgs are intiially + * colocated with their parents; that is, pgp_num doesn't increase + * until the new pgs have successfully split. only _then_ are the new + * pgs placed independently. + * + * lpg_num -- localized pg count (per device). replicas are randomly + * selected. + * + * lpgp_num -- as above. + */ +#define CEPH_NOPOOL ((__u64) (-1)) /* pool id not defined */ + +#define CEPH_POOL_TYPE_REP 1 +#define CEPH_POOL_TYPE_RAID4 2 /* never implemented */ +#define CEPH_POOL_TYPE_EC 3 + +/* + * stable_mod func is used to control number of placement groups. + * similar to straight-up modulo, but produces a stable mapping as b + * increases over time. b is the number of bins, and bmask is the + * containing power of 2 minus 1. + * + * b <= bmask and bmask=(2**n)-1 + * e.g., b=12 -> bmask=15, b=123 -> bmask=127 + */ +static inline int ceph_stable_mod(int x, int b, int bmask) +{ + if ((x & bmask) < b) + return x & bmask; + else + return x & (bmask >> 1); +} + +/* + * object layout - how a given object should be stored. + */ +struct ceph_object_layout { + struct ceph_pg_v1 ol_pgid; /* raw pg, with _full_ ps precision. */ + __le32 ol_stripe_unit; /* for per-object parity, if any */ +} __attribute__ ((packed)); + +/* + * compound epoch+version, used by storage layer to serialize mutations + */ +struct ceph_eversion { + __le64 version; + __le32 epoch; +} __attribute__ ((packed)); + +/* + * osd map bits + */ + +/* status bits */ +#define CEPH_OSD_EXISTS (1<<0) +#define CEPH_OSD_UP (1<<1) +#define CEPH_OSD_AUTOOUT (1<<2) /* osd was automatically marked out */ +#define CEPH_OSD_NEW (1<<3) /* osd is new, never marked in */ + +extern const char *ceph_osd_state_name(int s); + +/* osd weights. fixed point value: 0x10000 == 1.0 ("in"), 0 == "out" */ +#define CEPH_OSD_IN 0x10000 +#define CEPH_OSD_OUT 0 + +/* osd primary-affinity. fixed point value: 0x10000 == baseline */ +#define CEPH_OSD_MAX_PRIMARY_AFFINITY 0x10000 +#define CEPH_OSD_DEFAULT_PRIMARY_AFFINITY 0x10000 + + +/* + * osd map flag bits + */ +#define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC), + not set since ~luminous */ +#define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC), + not set since ~luminous */ +#define CEPH_OSDMAP_PAUSERD (1<<2) /* pause all reads */ +#define CEPH_OSDMAP_PAUSEWR (1<<3) /* pause all writes */ +#define CEPH_OSDMAP_PAUSEREC (1<<4) /* pause recovery */ +#define CEPH_OSDMAP_NOUP (1<<5) /* block osd boot */ +#define CEPH_OSDMAP_NODOWN (1<<6) /* block osd mark-down/failure */ +#define CEPH_OSDMAP_NOOUT (1<<7) /* block osd auto mark-out */ +#define CEPH_OSDMAP_NOIN (1<<8) /* block osd auto mark-in */ +#define CEPH_OSDMAP_NOBACKFILL (1<<9) /* block osd backfill */ +#define CEPH_OSDMAP_NORECOVER (1<<10) /* block osd recovery and backfill */ +#define CEPH_OSDMAP_NOSCRUB (1<<11) /* block periodic scrub */ +#define CEPH_OSDMAP_NODEEP_SCRUB (1<<12) /* block periodic deep-scrub */ +#define CEPH_OSDMAP_NOTIERAGENT (1<<13) /* disable tiering agent */ +#define CEPH_OSDMAP_NOREBALANCE (1<<14) /* block osd backfill unless pg is degraded */ +#define CEPH_OSDMAP_SORTBITWISE (1<<15) /* use bitwise hobject_t sort */ +#define CEPH_OSDMAP_REQUIRE_JEWEL (1<<16) /* require jewel for booting osds */ +#define CEPH_OSDMAP_REQUIRE_KRAKEN (1<<17) /* require kraken for booting osds */ +#define CEPH_OSDMAP_REQUIRE_LUMINOUS (1<<18) /* require l for booting osds */ +#define CEPH_OSDMAP_RECOVERY_DELETES (1<<19) /* deletes performed during recovery instead of peering */ + +/* + * The error code to return when an OSD can't handle a write + * because it is too large. + */ +#define OSD_WRITETOOBIG EMSGSIZE + +/* + * osd ops + * + * WARNING: do not use these op codes directly. Use the helpers + * defined below instead. In certain cases, op code behavior was + * redefined, resulting in special-cases in the helpers. + */ +#define CEPH_OSD_OP_MODE 0xf000 +#define CEPH_OSD_OP_MODE_RD 0x1000 +#define CEPH_OSD_OP_MODE_WR 0x2000 +#define CEPH_OSD_OP_MODE_RMW 0x3000 +#define CEPH_OSD_OP_MODE_SUB 0x4000 +#define CEPH_OSD_OP_MODE_CACHE 0x8000 + +#define CEPH_OSD_OP_TYPE 0x0f00 +#define CEPH_OSD_OP_TYPE_LOCK 0x0100 +#define CEPH_OSD_OP_TYPE_DATA 0x0200 +#define CEPH_OSD_OP_TYPE_ATTR 0x0300 +#define CEPH_OSD_OP_TYPE_EXEC 0x0400 +#define CEPH_OSD_OP_TYPE_PG 0x0500 +#define CEPH_OSD_OP_TYPE_MULTI 0x0600 /* multiobject */ + +#define __CEPH_OSD_OP1(mode, nr) \ + (CEPH_OSD_OP_MODE_##mode | (nr)) + +#define __CEPH_OSD_OP(mode, type, nr) \ + (CEPH_OSD_OP_MODE_##mode | CEPH_OSD_OP_TYPE_##type | (nr)) + +#define __CEPH_FORALL_OSD_OPS(f) \ + /** data **/ \ + /* read */ \ + f(READ, __CEPH_OSD_OP(RD, DATA, 1), "read") \ + f(STAT, __CEPH_OSD_OP(RD, DATA, 2), "stat") \ + f(MAPEXT, __CEPH_OSD_OP(RD, DATA, 3), "mapext") \ + \ + /* fancy read */ \ + f(MASKTRUNC, __CEPH_OSD_OP(RD, DATA, 4), "masktrunc") \ + f(SPARSE_READ, __CEPH_OSD_OP(RD, DATA, 5), "sparse-read") \ + \ + f(NOTIFY, __CEPH_OSD_OP(RD, DATA, 6), "notify") \ + f(NOTIFY_ACK, __CEPH_OSD_OP(RD, DATA, 7), "notify-ack") \ + \ + /* versioning */ \ + f(ASSERT_VER, __CEPH_OSD_OP(RD, DATA, 8), "assert-version") \ + \ + f(LIST_WATCHERS, __CEPH_OSD_OP(RD, DATA, 9), "list-watchers") \ + \ + f(LIST_SNAPS, __CEPH_OSD_OP(RD, DATA, 10), "list-snaps") \ + \ + /* sync */ \ + f(SYNC_READ, __CEPH_OSD_OP(RD, DATA, 11), "sync_read") \ + \ + /* write */ \ + f(WRITE, __CEPH_OSD_OP(WR, DATA, 1), "write") \ + f(WRITEFULL, __CEPH_OSD_OP(WR, DATA, 2), "writefull") \ + f(TRUNCATE, __CEPH_OSD_OP(WR, DATA, 3), "truncate") \ + f(ZERO, __CEPH_OSD_OP(WR, DATA, 4), "zero") \ + f(DELETE, __CEPH_OSD_OP(WR, DATA, 5), "delete") \ + \ + /* fancy write */ \ + f(APPEND, __CEPH_OSD_OP(WR, DATA, 6), "append") \ + f(SETTRUNC, __CEPH_OSD_OP(WR, DATA, 8), "settrunc") \ + f(TRIMTRUNC, __CEPH_OSD_OP(WR, DATA, 9), "trimtrunc") \ + \ + f(TMAPUP, __CEPH_OSD_OP(RMW, DATA, 10), "tmapup") \ + f(TMAPPUT, __CEPH_OSD_OP(WR, DATA, 11), "tmapput") \ + f(TMAPGET, __CEPH_OSD_OP(RD, DATA, 12), "tmapget") \ + \ + f(CREATE, __CEPH_OSD_OP(WR, DATA, 13), "create") \ + f(ROLLBACK, __CEPH_OSD_OP(WR, DATA, 14), "rollback") \ + \ + f(WATCH, __CEPH_OSD_OP(WR, DATA, 15), "watch") \ + \ + /* omap */ \ + f(OMAPGETKEYS, __CEPH_OSD_OP(RD, DATA, 17), "omap-get-keys") \ + f(OMAPGETVALS, __CEPH_OSD_OP(RD, DATA, 18), "omap-get-vals") \ + f(OMAPGETHEADER, __CEPH_OSD_OP(RD, DATA, 19), "omap-get-header") \ + f(OMAPGETVALSBYKEYS, __CEPH_OSD_OP(RD, DATA, 20), "omap-get-vals-by-keys") \ + f(OMAPSETVALS, __CEPH_OSD_OP(WR, DATA, 21), "omap-set-vals") \ + f(OMAPSETHEADER, __CEPH_OSD_OP(WR, DATA, 22), "omap-set-header") \ + f(OMAPCLEAR, __CEPH_OSD_OP(WR, DATA, 23), "omap-clear") \ + f(OMAPRMKEYS, __CEPH_OSD_OP(WR, DATA, 24), "omap-rm-keys") \ + f(OMAP_CMP, __CEPH_OSD_OP(RD, DATA, 25), "omap-cmp") \ + \ + /* tiering */ \ + f(COPY_FROM, __CEPH_OSD_OP(WR, DATA, 26), "copy-from") \ + f(COPY_GET_CLASSIC, __CEPH_OSD_OP(RD, DATA, 27), "copy-get-classic") \ + f(UNDIRTY, __CEPH_OSD_OP(WR, DATA, 28), "undirty") \ + f(ISDIRTY, __CEPH_OSD_OP(RD, DATA, 29), "isdirty") \ + f(COPY_GET, __CEPH_OSD_OP(RD, DATA, 30), "copy-get") \ + f(CACHE_FLUSH, __CEPH_OSD_OP(CACHE, DATA, 31), "cache-flush") \ + f(CACHE_EVICT, __CEPH_OSD_OP(CACHE, DATA, 32), "cache-evict") \ + f(CACHE_TRY_FLUSH, __CEPH_OSD_OP(CACHE, DATA, 33), "cache-try-flush") \ + \ + /* convert tmap to omap */ \ + f(TMAP2OMAP, __CEPH_OSD_OP(RMW, DATA, 34), "tmap2omap") \ + \ + /* hints */ \ + f(SETALLOCHINT, __CEPH_OSD_OP(WR, DATA, 35), "set-alloc-hint") \ + \ + /** multi **/ \ + f(CLONERANGE, __CEPH_OSD_OP(WR, MULTI, 1), "clonerange") \ + f(ASSERT_SRC_VERSION, __CEPH_OSD_OP(RD, MULTI, 2), "assert-src-version") \ + f(SRC_CMPXATTR, __CEPH_OSD_OP(RD, MULTI, 3), "src-cmpxattr") \ + \ + /** attrs **/ \ + /* read */ \ + f(GETXATTR, __CEPH_OSD_OP(RD, ATTR, 1), "getxattr") \ + f(GETXATTRS, __CEPH_OSD_OP(RD, ATTR, 2), "getxattrs") \ + f(CMPXATTR, __CEPH_OSD_OP(RD, ATTR, 3), "cmpxattr") \ + \ + /* write */ \ + f(SETXATTR, __CEPH_OSD_OP(WR, ATTR, 1), "setxattr") \ + f(SETXATTRS, __CEPH_OSD_OP(WR, ATTR, 2), "setxattrs") \ + f(RESETXATTRS, __CEPH_OSD_OP(WR, ATTR, 3), "resetxattrs") \ + f(RMXATTR, __CEPH_OSD_OP(WR, ATTR, 4), "rmxattr") \ + \ + /** subop **/ \ + f(PULL, __CEPH_OSD_OP1(SUB, 1), "pull") \ + f(PUSH, __CEPH_OSD_OP1(SUB, 2), "push") \ + f(BALANCEREADS, __CEPH_OSD_OP1(SUB, 3), "balance-reads") \ + f(UNBALANCEREADS, __CEPH_OSD_OP1(SUB, 4), "unbalance-reads") \ + f(SCRUB, __CEPH_OSD_OP1(SUB, 5), "scrub") \ + f(SCRUB_RESERVE, __CEPH_OSD_OP1(SUB, 6), "scrub-reserve") \ + f(SCRUB_UNRESERVE, __CEPH_OSD_OP1(SUB, 7), "scrub-unreserve") \ + f(SCRUB_STOP, __CEPH_OSD_OP1(SUB, 8), "scrub-stop") \ + f(SCRUB_MAP, __CEPH_OSD_OP1(SUB, 9), "scrub-map") \ + \ + /** lock **/ \ + f(WRLOCK, __CEPH_OSD_OP(WR, LOCK, 1), "wrlock") \ + f(WRUNLOCK, __CEPH_OSD_OP(WR, LOCK, 2), "wrunlock") \ + f(RDLOCK, __CEPH_OSD_OP(WR, LOCK, 3), "rdlock") \ + f(RDUNLOCK, __CEPH_OSD_OP(WR, LOCK, 4), "rdunlock") \ + f(UPLOCK, __CEPH_OSD_OP(WR, LOCK, 5), "uplock") \ + f(DNLOCK, __CEPH_OSD_OP(WR, LOCK, 6), "dnlock") \ + \ + /** exec **/ \ + /* note: the RD bit here is wrong; see special-case below in helper */ \ + f(CALL, __CEPH_OSD_OP(RD, EXEC, 1), "call") \ + \ + /** pg **/ \ + f(PGLS, __CEPH_OSD_OP(RD, PG, 1), "pgls") \ + f(PGLS_FILTER, __CEPH_OSD_OP(RD, PG, 2), "pgls-filter") \ + f(PG_HITSET_LS, __CEPH_OSD_OP(RD, PG, 3), "pg-hitset-ls") \ + f(PG_HITSET_GET, __CEPH_OSD_OP(RD, PG, 4), "pg-hitset-get") + +enum { +#define GENERATE_ENUM_ENTRY(op, opcode, str) CEPH_OSD_OP_##op = (opcode), +__CEPH_FORALL_OSD_OPS(GENERATE_ENUM_ENTRY) +#undef GENERATE_ENUM_ENTRY +}; + +static inline int ceph_osd_op_type_lock(int op) +{ + return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_LOCK; +} +static inline int ceph_osd_op_type_data(int op) +{ + return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_DATA; +} +static inline int ceph_osd_op_type_attr(int op) +{ + return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_ATTR; +} +static inline int ceph_osd_op_type_exec(int op) +{ + return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_EXEC; +} +static inline int ceph_osd_op_type_pg(int op) +{ + return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_PG; +} +static inline int ceph_osd_op_type_multi(int op) +{ + return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_MULTI; +} + +static inline int ceph_osd_op_mode_subop(int op) +{ + return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_SUB; +} +static inline int ceph_osd_op_mode_read(int op) +{ + return (op & CEPH_OSD_OP_MODE_RD) && + op != CEPH_OSD_OP_CALL; +} +static inline int ceph_osd_op_mode_modify(int op) +{ + return op & CEPH_OSD_OP_MODE_WR; +} + +/* + * note that the following tmap stuff is also defined in the ceph librados.h + * any modification here needs to be updated there + */ +#define CEPH_OSD_TMAP_HDR 'h' +#define CEPH_OSD_TMAP_SET 's' +#define CEPH_OSD_TMAP_CREATE 'c' /* create key */ +#define CEPH_OSD_TMAP_RM 'r' +#define CEPH_OSD_TMAP_RMSLOPPY 'R' + +extern const char *ceph_osd_op_name(int op); + +/* + * osd op flags + * + * An op may be READ, WRITE, or READ|WRITE. + */ +enum { + CEPH_OSD_FLAG_ACK = 0x0001, /* want (or is) "ack" ack */ + CEPH_OSD_FLAG_ONNVRAM = 0x0002, /* want (or is) "onnvram" ack */ + CEPH_OSD_FLAG_ONDISK = 0x0004, /* want (or is) "ondisk" ack */ + CEPH_OSD_FLAG_RETRY = 0x0008, /* resend attempt */ + CEPH_OSD_FLAG_READ = 0x0010, /* op may read */ + CEPH_OSD_FLAG_WRITE = 0x0020, /* op may write */ + CEPH_OSD_FLAG_ORDERSNAP = 0x0040, /* EOLDSNAP if snapc is out of order */ + CEPH_OSD_FLAG_PEERSTAT_OLD = 0x0080, /* DEPRECATED msg includes osd_peer_stat */ + CEPH_OSD_FLAG_BALANCE_READS = 0x0100, + CEPH_OSD_FLAG_PARALLELEXEC = 0x0200, /* execute op in parallel */ + CEPH_OSD_FLAG_PGOP = 0x0400, /* pg op, no object */ + CEPH_OSD_FLAG_EXEC = 0x0800, /* op may exec */ + CEPH_OSD_FLAG_EXEC_PUBLIC = 0x1000, /* DEPRECATED op may exec (public) */ + CEPH_OSD_FLAG_LOCALIZE_READS = 0x2000, /* read from nearby replica, if any */ + CEPH_OSD_FLAG_RWORDERED = 0x4000, /* order wrt concurrent reads */ + CEPH_OSD_FLAG_IGNORE_CACHE = 0x8000, /* ignore cache logic */ + CEPH_OSD_FLAG_SKIPRWLOCKS = 0x10000, /* skip rw locks */ + CEPH_OSD_FLAG_IGNORE_OVERLAY = 0x20000, /* ignore pool overlay */ + CEPH_OSD_FLAG_FLUSH = 0x40000, /* this is part of flush */ + CEPH_OSD_FLAG_MAP_SNAP_CLONE = 0x80000, /* map snap direct to clone id */ + CEPH_OSD_FLAG_ENFORCE_SNAPC = 0x100000, /* use snapc provided even if + pool uses pool snaps */ + CEPH_OSD_FLAG_REDIRECTED = 0x200000, /* op has been redirected */ + CEPH_OSD_FLAG_KNOWN_REDIR = 0x400000, /* redirect bit is authoritative */ + CEPH_OSD_FLAG_FULL_TRY = 0x800000, /* try op despite full flag */ + CEPH_OSD_FLAG_FULL_FORCE = 0x1000000, /* force op despite full flag */ +}; + +enum { + CEPH_OSD_OP_FLAG_EXCL = 1, /* EXCL object create */ + CEPH_OSD_OP_FLAG_FAILOK = 2, /* continue despite failure */ +}; + +#define EOLDSNAPC ERESTART /* ORDERSNAP flag set; writer has old snapc*/ +#define EBLACKLISTED ESHUTDOWN /* blacklisted */ + +/* xattr comparison */ +enum { + CEPH_OSD_CMPXATTR_OP_NOP = 0, + CEPH_OSD_CMPXATTR_OP_EQ = 1, + CEPH_OSD_CMPXATTR_OP_NE = 2, + CEPH_OSD_CMPXATTR_OP_GT = 3, + CEPH_OSD_CMPXATTR_OP_GTE = 4, + CEPH_OSD_CMPXATTR_OP_LT = 5, + CEPH_OSD_CMPXATTR_OP_LTE = 6 +}; + +enum { + CEPH_OSD_CMPXATTR_MODE_STRING = 1, + CEPH_OSD_CMPXATTR_MODE_U64 = 2 +}; + +enum { + CEPH_OSD_WATCH_OP_UNWATCH = 0, + CEPH_OSD_WATCH_OP_LEGACY_WATCH = 1, + /* note: use only ODD ids to prevent pre-giant code from + interpreting the op as UNWATCH */ + CEPH_OSD_WATCH_OP_WATCH = 3, + CEPH_OSD_WATCH_OP_RECONNECT = 5, + CEPH_OSD_WATCH_OP_PING = 7, +}; + +const char *ceph_osd_watch_op_name(int o); + +enum { + CEPH_OSD_BACKOFF_OP_BLOCK = 1, + CEPH_OSD_BACKOFF_OP_ACK_BLOCK = 2, + CEPH_OSD_BACKOFF_OP_UNBLOCK = 3, +}; + +/* + * an individual object operation. each may be accompanied by some data + * payload + */ +struct ceph_osd_op { + __le16 op; /* CEPH_OSD_OP_* */ + __le32 flags; /* CEPH_OSD_OP_FLAG_* */ + union { + struct { + __le64 offset, length; + __le64 truncate_size; + __le32 truncate_seq; + } __attribute__ ((packed)) extent; + struct { + __le32 name_len; + __le32 value_len; + __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */ + __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */ + } __attribute__ ((packed)) xattr; + struct { + __u8 class_len; + __u8 method_len; + __u8 argc; + __le32 indata_len; + } __attribute__ ((packed)) cls; + struct { + __le64 cookie, count; + } __attribute__ ((packed)) pgls; + struct { + __le64 snapid; + } __attribute__ ((packed)) snap; + struct { + __le64 cookie; + __le64 ver; /* no longer used */ + __u8 op; /* CEPH_OSD_WATCH_OP_* */ + __le32 gen; /* registration generation */ + } __attribute__ ((packed)) watch; + struct { + __le64 cookie; + } __attribute__ ((packed)) notify; + struct { + __le64 offset, length; + __le64 src_offset; + } __attribute__ ((packed)) clonerange; + struct { + __le64 expected_object_size; + __le64 expected_write_size; + } __attribute__ ((packed)) alloc_hint; + }; + __le32 payload_len; +} __attribute__ ((packed)); + + +#endif diff --git a/include/linux/ceph/string_table.h b/include/linux/ceph/string_table.h new file mode 100644 index 000000000..a4a9962d1 --- /dev/null +++ b/include/linux/ceph/string_table.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FS_CEPH_STRING_TABLE_H +#define _FS_CEPH_STRING_TABLE_H + +#include +#include +#include +#include + +struct ceph_string { + struct kref kref; + union { + struct rb_node node; + struct rcu_head rcu; + }; + size_t len; + char str[]; +}; + +extern void ceph_release_string(struct kref *ref); +extern struct ceph_string *ceph_find_or_create_string(const char *str, + size_t len); +extern bool ceph_strings_empty(void); + +static inline struct ceph_string *ceph_get_string(struct ceph_string *str) +{ + kref_get(&str->kref); + return str; +} + +static inline void ceph_put_string(struct ceph_string *str) +{ + if (!str) + return; + kref_put(&str->kref, ceph_release_string); +} + +static inline int ceph_compare_string(struct ceph_string *cs, + const char* str, size_t len) +{ + size_t cs_len = cs ? cs->len : 0; + if (cs_len != len) + return cs_len - len; + if (len == 0) + return 0; + return strncmp(cs->str, str, len); +} + +#define ceph_try_get_string(x) \ +({ \ + struct ceph_string *___str; \ + rcu_read_lock(); \ + for (;;) { \ + ___str = rcu_dereference(x); \ + if (!___str || \ + kref_get_unless_zero(&___str->kref)) \ + break; \ + } \ + rcu_read_unlock(); \ + (___str); \ +}) + +#endif diff --git a/include/linux/ceph/striper.h b/include/linux/ceph/striper.h new file mode 100644 index 000000000..cbd0d24b7 --- /dev/null +++ b/include/linux/ceph/striper.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CEPH_STRIPER_H +#define _LINUX_CEPH_STRIPER_H + +#include +#include + +struct ceph_file_layout; + +void ceph_calc_file_object_mapping(struct ceph_file_layout *l, + u64 off, u64 len, + u64 *objno, u64 *objoff, u32 *xlen); + +struct ceph_object_extent { + struct list_head oe_item; + u64 oe_objno; + u64 oe_off; + u64 oe_len; +}; + +static inline void ceph_object_extent_init(struct ceph_object_extent *ex) +{ + INIT_LIST_HEAD(&ex->oe_item); +} + +/* + * Called for each mapped stripe unit. + * + * @bytes: number of bytes mapped, i.e. the minimum of the full length + * requested (file extent length) or the remainder of the stripe + * unit within an object + */ +typedef void (*ceph_object_extent_fn_t)(struct ceph_object_extent *ex, + u32 bytes, void *arg); + +int ceph_file_to_extents(struct ceph_file_layout *l, u64 off, u64 len, + struct list_head *object_extents, + struct ceph_object_extent *alloc_fn(void *arg), + void *alloc_arg, + ceph_object_extent_fn_t action_fn, + void *action_arg); +int ceph_iterate_extents(struct ceph_file_layout *l, u64 off, u64 len, + struct list_head *object_extents, + ceph_object_extent_fn_t action_fn, + void *action_arg); + +struct ceph_file_extent { + u64 fe_off; + u64 fe_len; +}; + +static inline u64 ceph_file_extents_bytes(struct ceph_file_extent *file_extents, + u32 num_file_extents) +{ + u64 bytes = 0; + u32 i; + + for (i = 0; i < num_file_extents; i++) + bytes += file_extents[i].fe_len; + + return bytes; +} + +int ceph_extent_to_file(struct ceph_file_layout *l, + u64 objno, u64 objoff, u64 objlen, + struct ceph_file_extent **file_extents, + u32 *num_file_extents); + +#endif diff --git a/include/linux/ceph/types.h b/include/linux/ceph/types.h new file mode 100644 index 000000000..27cd973d3 --- /dev/null +++ b/include/linux/ceph/types.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FS_CEPH_TYPES_H +#define _FS_CEPH_TYPES_H + +/* needed before including ceph_fs.h */ +#include +#include +#include +#include + +#include +#include +#include + +/* + * Identify inodes by both their ino AND snapshot id (a u64). + */ +struct ceph_vino { + u64 ino; + u64 snap; +}; + + +/* context for the caps reservation mechanism */ +struct ceph_cap_reservation { + int count; +}; + + +#endif diff --git a/include/linux/cfag12864b.h b/include/linux/cfag12864b.h new file mode 100644 index 000000000..406000496 --- /dev/null +++ b/include/linux/cfag12864b.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Filename: cfag12864b.h + * Version: 0.1.0 + * Description: cfag12864b LCD driver header + * + * Author: Copyright (C) Miguel Ojeda Sandonis + * Date: 2006-10-12 + */ + +#ifndef _CFAG12864B_H_ +#define _CFAG12864B_H_ + +#define CFAG12864B_WIDTH (128) +#define CFAG12864B_HEIGHT (64) +#define CFAG12864B_CONTROLLERS (2) +#define CFAG12864B_PAGES (8) +#define CFAG12864B_ADDRESSES (64) +#define CFAG12864B_SIZE ((CFAG12864B_CONTROLLERS) * \ + (CFAG12864B_PAGES) * \ + (CFAG12864B_ADDRESSES)) + +/* + * The driver will blit this buffer to the LCD + * + * Its size is CFAG12864B_SIZE. + */ +extern unsigned char * cfag12864b_buffer; + +/* + * Get the refresh rate of the LCD + * + * Returns the refresh rate (hertz). + */ +extern unsigned int cfag12864b_getrate(void); + +/* + * Enable refreshing + * + * Returns 0 if successful (anyone was using it), + * or != 0 if failed (someone is using it). + */ +extern unsigned char cfag12864b_enable(void); + +/* + * Disable refreshing + * + * You should call this only when you finish using the LCD. + */ +extern void cfag12864b_disable(void); + +/* + * Is enabled refreshing? (is anyone using the module?) + * + * Returns 0 if refreshing is not enabled (anyone is using it), + * or != 0 if refreshing is enabled (someone is using it). + * + * Useful for buffer read-only modules. + */ +extern unsigned char cfag12864b_isenabled(void); + +/* + * Is the module inited? + */ +extern unsigned char cfag12864b_isinited(void); + +#endif /* _CFAG12864B_H_ */ + diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h new file mode 100644 index 000000000..f92264d1e --- /dev/null +++ b/include/linux/cgroup-defs.h @@ -0,0 +1,841 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/cgroup-defs.h - basic definitions for cgroup + * + * This file provides basic type and interface. Include this file directly + * only if necessary to avoid cyclic dependencies. + */ +#ifndef _LINUX_CGROUP_DEFS_H +#define _LINUX_CGROUP_DEFS_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_CGROUPS + +struct cgroup; +struct cgroup_root; +struct cgroup_subsys; +struct cgroup_taskset; +struct kernfs_node; +struct kernfs_ops; +struct kernfs_open_file; +struct seq_file; + +#define MAX_CGROUP_TYPE_NAMELEN 32 +#define MAX_CGROUP_ROOT_NAMELEN 64 +#define MAX_CFTYPE_NAME 64 + +/* define the enumeration of all cgroup subsystems */ +#define SUBSYS(_x) _x ## _cgrp_id, +enum cgroup_subsys_id { +#include + CGROUP_SUBSYS_COUNT, +}; +#undef SUBSYS + +/* bits in struct cgroup_subsys_state flags field */ +enum { + CSS_NO_REF = (1 << 0), /* no reference counting for this css */ + CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ + CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */ + CSS_VISIBLE = (1 << 3), /* css is visible to userland */ + CSS_DYING = (1 << 4), /* css is dying */ +}; + +/* bits in struct cgroup flags field */ +enum { + /* Control Group requires release notifications to userspace */ + CGRP_NOTIFY_ON_RELEASE, + /* + * Clone the parent's configuration when creating a new child + * cpuset cgroup. For historical reasons, this option can be + * specified at mount time and thus is implemented here. + */ + CGRP_CPUSET_CLONE_CHILDREN, +}; + +/* cgroup_root->flags */ +enum { + CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */ + CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */ + + /* + * Consider namespaces as delegation boundaries. If this flag is + * set, controller specific interface files in a namespace root + * aren't writeable from inside the namespace. + */ + CGRP_ROOT_NS_DELEGATE = (1 << 3), + + /* + * Enable cpuset controller in v1 cgroup to use v2 behavior. + */ + CGRP_ROOT_CPUSET_V2_MODE = (1 << 4), +}; + +/* cftype->flags */ +enum { + CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */ + CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */ + CFTYPE_NS_DELEGATABLE = (1 << 2), /* writeable beyond delegation boundaries */ + + CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */ + CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */ + + /* internal flags, do not use outside cgroup core proper */ + __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */ + __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */ +}; + +/* + * cgroup_file is the handle for a file instance created in a cgroup which + * is used, for example, to generate file changed notifications. This can + * be obtained by setting cftype->file_offset. + */ +struct cgroup_file { + /* do not access any fields from outside cgroup core */ + struct kernfs_node *kn; + unsigned long notified_at; + struct timer_list notify_timer; +}; + +/* + * Per-subsystem/per-cgroup state maintained by the system. This is the + * fundamental structural building block that controllers deal with. + * + * Fields marked with "PI:" are public and immutable and may be accessed + * directly without synchronization. + */ +struct cgroup_subsys_state { + /* PI: the cgroup that this css is attached to */ + struct cgroup *cgroup; + + /* PI: the cgroup subsystem that this css is attached to */ + struct cgroup_subsys *ss; + + /* reference count - access via css_[try]get() and css_put() */ + struct percpu_ref refcnt; + + /* siblings list anchored at the parent's ->children */ + struct list_head sibling; + struct list_head children; + + /* flush target list anchored at cgrp->rstat_css_list */ + struct list_head rstat_css_node; + + /* + * PI: Subsys-unique ID. 0 is unused and root is always 1. The + * matching css can be looked up using css_from_id(). + */ + int id; + + unsigned int flags; + + /* + * Monotonically increasing unique serial number which defines a + * uniform order among all csses. It's guaranteed that all + * ->children lists are in the ascending order of ->serial_nr and + * used to allow interrupting and resuming iterations. + */ + u64 serial_nr; + + /* + * Incremented by online self and children. Used to guarantee that + * parents are not offlined before their children. + */ + atomic_t online_cnt; + + /* percpu_ref killing and RCU release */ + struct work_struct destroy_work; + struct rcu_work destroy_rwork; + + /* + * PI: the parent css. Placed here for cache proximity to following + * fields of the containing structure. + */ + struct cgroup_subsys_state *parent; +}; + +/* + * A css_set is a structure holding pointers to a set of + * cgroup_subsys_state objects. This saves space in the task struct + * object and speeds up fork()/exit(), since a single inc/dec and a + * list_add()/del() can bump the reference count on the entire cgroup + * set for a task. + */ +struct css_set { + /* + * Set of subsystem states, one for each subsystem. This array is + * immutable after creation apart from the init_css_set during + * subsystem registration (at boot time). + */ + struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; + + /* reference count */ + refcount_t refcount; + + /* + * For a domain cgroup, the following points to self. If threaded, + * to the matching cset of the nearest domain ancestor. The + * dom_cset provides access to the domain cgroup and its csses to + * which domain level resource consumptions should be charged. + */ + struct css_set *dom_cset; + + /* the default cgroup associated with this css_set */ + struct cgroup *dfl_cgrp; + + /* internal task count, protected by css_set_lock */ + int nr_tasks; + + /* + * Lists running through all tasks using this cgroup group. + * mg_tasks lists tasks which belong to this cset but are in the + * process of being migrated out or in. Protected by + * css_set_rwsem, but, during migration, once tasks are moved to + * mg_tasks, it can be read safely while holding cgroup_mutex. + */ + struct list_head tasks; + struct list_head mg_tasks; + struct list_head dying_tasks; + + /* all css_task_iters currently walking this cset */ + struct list_head task_iters; + + /* + * On the default hierarhcy, ->subsys[ssid] may point to a css + * attached to an ancestor instead of the cgroup this css_set is + * associated with. The following node is anchored at + * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to + * iterate through all css's attached to a given cgroup. + */ + struct list_head e_cset_node[CGROUP_SUBSYS_COUNT]; + + /* all threaded csets whose ->dom_cset points to this cset */ + struct list_head threaded_csets; + struct list_head threaded_csets_node; + + /* + * List running through all cgroup groups in the same hash + * slot. Protected by css_set_lock + */ + struct hlist_node hlist; + + /* + * List of cgrp_cset_links pointing at cgroups referenced from this + * css_set. Protected by css_set_lock. + */ + struct list_head cgrp_links; + + /* + * List of csets participating in the on-going migration either as + * source or destination. Protected by cgroup_mutex. + */ + struct list_head mg_preload_node; + struct list_head mg_node; + + /* + * If this cset is acting as the source of migration the following + * two fields are set. mg_src_cgrp and mg_dst_cgrp are + * respectively the source and destination cgroups of the on-going + * migration. mg_dst_cset is the destination cset the target tasks + * on this cset should be migrated to. Protected by cgroup_mutex. + */ + struct cgroup *mg_src_cgrp; + struct cgroup *mg_dst_cgrp; + struct css_set *mg_dst_cset; + + /* dead and being drained, ignore for migration */ + bool dead; + + /* For RCU-protected deletion */ + struct rcu_head rcu_head; +}; + +struct cgroup_base_stat { + struct task_cputime cputime; +}; + +/* + * rstat - cgroup scalable recursive statistics. Accounting is done + * per-cpu in cgroup_rstat_cpu which is then lazily propagated up the + * hierarchy on reads. + * + * When a stat gets updated, the cgroup_rstat_cpu and its ancestors are + * linked into the updated tree. On the following read, propagation only + * considers and consumes the updated tree. This makes reading O(the + * number of descendants which have been active since last read) instead of + * O(the total number of descendants). + * + * This is important because there can be a lot of (draining) cgroups which + * aren't active and stat may be read frequently. The combination can + * become very expensive. By propagating selectively, increasing reading + * frequency decreases the cost of each read. + * + * This struct hosts both the fields which implement the above - + * updated_children and updated_next - and the fields which track basic + * resource statistics on top of it - bsync, bstat and last_bstat. + */ +struct cgroup_rstat_cpu { + /* + * ->bsync protects ->bstat. These are the only fields which get + * updated in the hot path. + */ + struct u64_stats_sync bsync; + struct cgroup_base_stat bstat; + + /* + * Snapshots at the last reading. These are used to calculate the + * deltas to propagate to the global counters. + */ + struct cgroup_base_stat last_bstat; + + /* + * Child cgroups with stat updates on this cpu since the last read + * are linked on the parent's ->updated_children through + * ->updated_next. + * + * In addition to being more compact, singly-linked list pointing + * to the cgroup makes it unnecessary for each per-cpu struct to + * point back to the associated cgroup. + * + * Protected by per-cpu cgroup_rstat_cpu_lock. + */ + struct cgroup *updated_children; /* terminated by self cgroup */ + struct cgroup *updated_next; /* NULL iff not on the list */ +}; + +struct cgroup { + /* self css with NULL ->ss, points back to this cgroup */ + struct cgroup_subsys_state self; + + unsigned long flags; /* "unsigned long" so bitops work */ + + /* + * idr allocated in-hierarchy ID. + * + * ID 0 is not used, the ID of the root cgroup is always 1, and a + * new cgroup will be assigned with a smallest available ID. + * + * Allocating/Removing ID must be protected by cgroup_mutex. + */ + int id; + + /* + * The depth this cgroup is at. The root is at depth zero and each + * step down the hierarchy increments the level. This along with + * ancestor_ids[] can determine whether a given cgroup is a + * descendant of another without traversing the hierarchy. + */ + int level; + + /* Maximum allowed descent tree depth */ + int max_depth; + + /* + * Keep track of total numbers of visible and dying descent cgroups. + * Dying cgroups are cgroups which were deleted by a user, + * but are still existing because someone else is holding a reference. + * max_descendants is a maximum allowed number of descent cgroups. + * + * nr_descendants and nr_dying_descendants are protected + * by cgroup_mutex and css_set_lock. It's fine to read them holding + * any of cgroup_mutex and css_set_lock; for writing both locks + * should be held. + */ + int nr_descendants; + int nr_dying_descendants; + int max_descendants; + + /* + * Each non-empty css_set associated with this cgroup contributes + * one to nr_populated_csets. The counter is zero iff this cgroup + * doesn't have any tasks. + * + * All children which have non-zero nr_populated_csets and/or + * nr_populated_children of their own contribute one to either + * nr_populated_domain_children or nr_populated_threaded_children + * depending on their type. Each counter is zero iff all cgroups + * of the type in the subtree proper don't have any tasks. + */ + int nr_populated_csets; + int nr_populated_domain_children; + int nr_populated_threaded_children; + + int nr_threaded_children; /* # of live threaded child cgroups */ + + struct kernfs_node *kn; /* cgroup kernfs entry */ + struct cgroup_file procs_file; /* handle for "cgroup.procs" */ + struct cgroup_file events_file; /* handle for "cgroup.events" */ + + /* + * The bitmask of subsystems enabled on the child cgroups. + * ->subtree_control is the one configured through + * "cgroup.subtree_control" while ->child_ss_mask is the effective + * one which may have more subsystems enabled. Controller knobs + * are made available iff it's enabled in ->subtree_control. + */ + u16 subtree_control; + u16 subtree_ss_mask; + u16 old_subtree_control; + u16 old_subtree_ss_mask; + + /* Private pointers for each registered subsystem */ + struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT]; + + struct cgroup_root *root; + + /* + * List of cgrp_cset_links pointing at css_sets with tasks in this + * cgroup. Protected by css_set_lock. + */ + struct list_head cset_links; + + /* + * On the default hierarchy, a css_set for a cgroup with some + * susbsys disabled will point to css's which are associated with + * the closest ancestor which has the subsys enabled. The + * following lists all css_sets which point to this cgroup's css + * for the given subsystem. + */ + struct list_head e_csets[CGROUP_SUBSYS_COUNT]; + + /* + * If !threaded, self. If threaded, it points to the nearest + * domain ancestor. Inside a threaded subtree, cgroups are exempt + * from process granularity and no-internal-task constraint. + * Domain level resource consumptions which aren't tied to a + * specific task are charged to the dom_cgrp. + */ + struct cgroup *dom_cgrp; + struct cgroup *old_dom_cgrp; /* used while enabling threaded */ + + /* per-cpu recursive resource statistics */ + struct cgroup_rstat_cpu __percpu *rstat_cpu; + struct list_head rstat_css_list; + + /* cgroup basic resource statistics */ + struct cgroup_base_stat pending_bstat; /* pending from children */ + struct cgroup_base_stat bstat; + struct prev_cputime prev_cputime; /* for printing out cputime */ + + /* + * list of pidlists, up to two for each namespace (one for procs, one + * for tasks); created on demand. + */ + struct list_head pidlists; + struct mutex pidlist_mutex; + + /* used to wait for offlining of csses */ + wait_queue_head_t offline_waitq; + + /* used to schedule release agent */ + struct work_struct release_agent_work; + + /* used to store eBPF programs */ + struct cgroup_bpf bpf; + + /* If there is block congestion on this cgroup. */ + atomic_t congestion_count; + + /* ids of the ancestors at each level including self */ + int ancestor_ids[]; +}; + +/* + * A cgroup_root represents the root of a cgroup hierarchy, and may be + * associated with a kernfs_root to form an active hierarchy. This is + * internal to cgroup core. Don't access directly from controllers. + */ +struct cgroup_root { + struct kernfs_root *kf_root; + + /* The bitmask of subsystems attached to this hierarchy */ + unsigned int subsys_mask; + + /* Unique id for this hierarchy. */ + int hierarchy_id; + + /* The root cgroup. Root is destroyed on its release. */ + struct cgroup cgrp; + + /* for cgrp->ancestor_ids[0] */ + int cgrp_ancestor_id_storage; + + /* Number of cgroups in the hierarchy, used only for /proc/cgroups */ + atomic_t nr_cgrps; + + /* A list running through the active hierarchies */ + struct list_head root_list; + + /* Hierarchy-specific flags */ + unsigned int flags; + + /* IDs for cgroups in this hierarchy */ + struct idr cgroup_idr; + + /* The path to use for release notifications. */ + char release_agent_path[PATH_MAX]; + + /* The name for this hierarchy - may be empty */ + char name[MAX_CGROUP_ROOT_NAMELEN]; +}; + +/* + * struct cftype: handler definitions for cgroup control files + * + * When reading/writing to a file: + * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata + * - the 'cftype' of the file is file->f_path.dentry->d_fsdata + */ +struct cftype { + /* + * By convention, the name should begin with the name of the + * subsystem, followed by a period. Zero length string indicates + * end of cftype array. + */ + char name[MAX_CFTYPE_NAME]; + unsigned long private; + + /* + * The maximum length of string, excluding trailing nul, that can + * be passed to write. If < PAGE_SIZE-1, PAGE_SIZE-1 is assumed. + */ + size_t max_write_len; + + /* CFTYPE_* flags */ + unsigned int flags; + + /* + * If non-zero, should contain the offset from the start of css to + * a struct cgroup_file field. cgroup will record the handle of + * the created file into it. The recorded handle can be used as + * long as the containing css remains accessible. + */ + unsigned int file_offset; + + /* + * Fields used for internal bookkeeping. Initialized automatically + * during registration. + */ + struct cgroup_subsys *ss; /* NULL for cgroup core files */ + struct list_head node; /* anchored at ss->cfts */ + struct kernfs_ops *kf_ops; + + int (*open)(struct kernfs_open_file *of); + void (*release)(struct kernfs_open_file *of); + + /* + * read_u64() is a shortcut for the common case of returning a + * single integer. Use it in place of read() + */ + u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft); + /* + * read_s64() is a signed version of read_u64() + */ + s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft); + + /* generic seq_file read interface */ + int (*seq_show)(struct seq_file *sf, void *v); + + /* optional ops, implement all or none */ + void *(*seq_start)(struct seq_file *sf, loff_t *ppos); + void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos); + void (*seq_stop)(struct seq_file *sf, void *v); + + /* + * write_u64() is a shortcut for the common case of accepting + * a single integer (as parsed by simple_strtoull) from + * userspace. Use in place of write(); return 0 or error. + */ + int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft, + u64 val); + /* + * write_s64() is a signed version of write_u64() + */ + int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft, + s64 val); + + /* + * write() is the generic write callback which maps directly to + * kernfs write operation and overrides all other operations. + * Maximum write size is determined by ->max_write_len. Use + * of_css/cft() to access the associated css and cft. + */ + ssize_t (*write)(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lock_class_key lockdep_key; +#endif +}; + +/* + * Control Group subsystem type. + * See Documentation/cgroup-v1/cgroups.txt for details + */ +struct cgroup_subsys { + struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); + int (*css_online)(struct cgroup_subsys_state *css); + void (*css_offline)(struct cgroup_subsys_state *css); + void (*css_released)(struct cgroup_subsys_state *css); + void (*css_free)(struct cgroup_subsys_state *css); + void (*css_reset)(struct cgroup_subsys_state *css); + void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu); + int (*css_extra_stat_show)(struct seq_file *seq, + struct cgroup_subsys_state *css); + + int (*can_attach)(struct cgroup_taskset *tset); + void (*cancel_attach)(struct cgroup_taskset *tset); + void (*attach)(struct cgroup_taskset *tset); + void (*post_attach)(void); + int (*can_fork)(struct task_struct *task); + void (*cancel_fork)(struct task_struct *task); + void (*fork)(struct task_struct *task); + void (*exit)(struct task_struct *task); + void (*release)(struct task_struct *task); + void (*bind)(struct cgroup_subsys_state *root_css); + + bool early_init:1; + + /* + * If %true, the controller, on the default hierarchy, doesn't show + * up in "cgroup.controllers" or "cgroup.subtree_control", is + * implicitly enabled on all cgroups on the default hierarchy, and + * bypasses the "no internal process" constraint. This is for + * utility type controllers which is transparent to userland. + * + * An implicit controller can be stolen from the default hierarchy + * anytime and thus must be okay with offline csses from previous + * hierarchies coexisting with csses for the current one. + */ + bool implicit_on_dfl:1; + + /* + * If %true, the controller, supports threaded mode on the default + * hierarchy. In a threaded subtree, both process granularity and + * no-internal-process constraint are ignored and a threaded + * controllers should be able to handle that. + * + * Note that as an implicit controller is automatically enabled on + * all cgroups on the default hierarchy, it should also be + * threaded. implicit && !threaded is not supported. + */ + bool threaded:1; + + /* + * If %false, this subsystem is properly hierarchical - + * configuration, resource accounting and restriction on a parent + * cgroup cover those of its children. If %true, hierarchy support + * is broken in some ways - some subsystems ignore hierarchy + * completely while others are only implemented half-way. + * + * It's now disallowed to create nested cgroups if the subsystem is + * broken and cgroup core will emit a warning message on such + * cases. Eventually, all subsystems will be made properly + * hierarchical and this will go away. + */ + bool broken_hierarchy:1; + bool warned_broken_hierarchy:1; + + /* the following two fields are initialized automtically during boot */ + int id; + const char *name; + + /* optional, initialized automatically during boot if not set */ + const char *legacy_name; + + /* link to parent, protected by cgroup_lock() */ + struct cgroup_root *root; + + /* idr for css->id */ + struct idr css_idr; + + /* + * List of cftypes. Each entry is the first entry of an array + * terminated by zero length name. + */ + struct list_head cfts; + + /* + * Base cftypes which are automatically registered. The two can + * point to the same array. + */ + struct cftype *dfl_cftypes; /* for the default hierarchy */ + struct cftype *legacy_cftypes; /* for the legacy hierarchies */ + + /* + * A subsystem may depend on other subsystems. When such subsystem + * is enabled on a cgroup, the depended-upon subsystems are enabled + * together if available. Subsystems enabled due to dependency are + * not visible to userland until explicitly enabled. The following + * specifies the mask of subsystems that this one depends on. + */ + unsigned int depends_on; +}; + +extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; + +/** + * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups + * @tsk: target task + * + * Allows cgroup operations to synchronize against threadgroup changes + * using a percpu_rw_semaphore. + */ +static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) +{ + percpu_down_read(&cgroup_threadgroup_rwsem); +} + +/** + * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups + * @tsk: target task + * + * Counterpart of cgroup_threadcgroup_change_begin(). + */ +static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) +{ + percpu_up_read(&cgroup_threadgroup_rwsem); +} + +#else /* CONFIG_CGROUPS */ + +#define CGROUP_SUBSYS_COUNT 0 + +static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) +{ + might_sleep(); +} + +static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {} + +#endif /* CONFIG_CGROUPS */ + +#ifdef CONFIG_SOCK_CGROUP_DATA + +/* + * sock_cgroup_data is embedded at sock->sk_cgrp_data and contains + * per-socket cgroup information except for memcg association. + * + * On legacy hierarchies, net_prio and net_cls controllers directly set + * attributes on each sock which can then be tested by the network layer. + * On the default hierarchy, each sock is associated with the cgroup it was + * created in and the networking layer can match the cgroup directly. + * + * To avoid carrying all three cgroup related fields separately in sock, + * sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer. + * On boot, sock_cgroup_data records the cgroup that the sock was created + * in so that cgroup2 matches can be made; however, once either net_prio or + * net_cls starts being used, the area is overriden to carry prioidx and/or + * classid. The two modes are distinguished by whether the lowest bit is + * set. Clear bit indicates cgroup pointer while set bit prioidx and + * classid. + * + * While userland may start using net_prio or net_cls at any time, once + * either is used, cgroup2 matching no longer works. There is no reason to + * mix the two and this is in line with how legacy and v2 compatibility is + * handled. On mode switch, cgroup references which are already being + * pointed to by socks may be leaked. While this can be remedied by adding + * synchronization around sock_cgroup_data, given that the number of leaked + * cgroups is bound and highly unlikely to be high, this seems to be the + * better trade-off. + */ +struct sock_cgroup_data { + union { +#ifdef __LITTLE_ENDIAN + struct { + u8 is_data : 1; + u8 no_refcnt : 1; + u8 unused : 6; + u8 padding; + u16 prioidx; + u32 classid; + } __packed; +#else + struct { + u32 classid; + u16 prioidx; + u8 padding; + u8 unused : 6; + u8 no_refcnt : 1; + u8 is_data : 1; + } __packed; +#endif + u64 val; + }; +}; + +/* + * There's a theoretical window where the following accessors race with + * updaters and return part of the previous pointer as the prioidx or + * classid. Such races are short-lived and the result isn't critical. + */ +static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd) +{ + /* fallback to 1 which is always the ID of the root cgroup */ + return (skcd->is_data & 1) ? skcd->prioidx : 1; +} + +static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd) +{ + /* fallback to 0 which is the unconfigured default classid */ + return (skcd->is_data & 1) ? skcd->classid : 0; +} + +/* + * If invoked concurrently, the updaters may clobber each other. The + * caller is responsible for synchronization. + */ +static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd, + u16 prioidx) +{ + struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }}; + + if (sock_cgroup_prioidx(&skcd_buf) == prioidx) + return; + + if (!(skcd_buf.is_data & 1)) { + skcd_buf.val = 0; + skcd_buf.is_data = 1; + } + + skcd_buf.prioidx = prioidx; + WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */ +} + +static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd, + u32 classid) +{ + struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }}; + + if (sock_cgroup_classid(&skcd_buf) == classid) + return; + + if (!(skcd_buf.is_data & 1)) { + skcd_buf.val = 0; + skcd_buf.is_data = 1; + } + + skcd_buf.classid = classid; + WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */ +} + +#else /* CONFIG_SOCK_CGROUP_DATA */ + +struct sock_cgroup_data { +}; + +#endif /* CONFIG_SOCK_CGROUP_DATA */ + +#endif /* _LINUX_CGROUP_DEFS_H */ diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h new file mode 100644 index 000000000..02da4e1de --- /dev/null +++ b/include/linux/cgroup.h @@ -0,0 +1,889 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CGROUP_H +#define _LINUX_CGROUP_H +/* + * cgroup interface + * + * Copyright (C) 2003 BULL SA + * Copyright (C) 2004-2006 Silicon Graphics, Inc. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#ifdef CONFIG_CGROUPS + +/* + * All weight knobs on the default hierarhcy should use the following min, + * default and max values. The default value is the logarithmic center of + * MIN and MAX and allows 100x to be expressed in both directions. + */ +#define CGROUP_WEIGHT_MIN 1 +#define CGROUP_WEIGHT_DFL 100 +#define CGROUP_WEIGHT_MAX 10000 + +/* walk only threadgroup leaders */ +#define CSS_TASK_ITER_PROCS (1U << 0) +/* walk all threaded css_sets in the domain */ +#define CSS_TASK_ITER_THREADED (1U << 1) + +/* internal flags */ +#define CSS_TASK_ITER_SKIPPED (1U << 16) + +/* a css_task_iter should be treated as an opaque object */ +struct css_task_iter { + struct cgroup_subsys *ss; + unsigned int flags; + + struct list_head *cset_pos; + struct list_head *cset_head; + + struct list_head *tcset_pos; + struct list_head *tcset_head; + + struct list_head *task_pos; + struct list_head *tasks_head; + struct list_head *mg_tasks_head; + struct list_head *dying_tasks_head; + + struct list_head *cur_tasks_head; + struct css_set *cur_cset; + struct css_set *cur_dcset; + struct task_struct *cur_task; + struct list_head iters_node; /* css_set->task_iters */ +}; + +extern struct cgroup_root cgrp_dfl_root; +extern struct css_set init_css_set; + +#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys; +#include +#undef SUBSYS + +#define SUBSYS(_x) \ + extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \ + extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key; +#include +#undef SUBSYS + +/** + * cgroup_subsys_enabled - fast test on whether a subsys is enabled + * @ss: subsystem in question + */ +#define cgroup_subsys_enabled(ss) \ + static_branch_likely(&ss ## _enabled_key) + +/** + * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy + * @ss: subsystem in question + */ +#define cgroup_subsys_on_dfl(ss) \ + static_branch_likely(&ss ## _on_dfl_key) + +bool css_has_online_children(struct cgroup_subsys_state *css); +struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss); +struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, + struct cgroup_subsys *ss); +struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, + struct cgroup_subsys *ss); + +struct cgroup *cgroup_get_from_path(const char *path); +struct cgroup *cgroup_get_from_fd(int fd); + +int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); +int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); + +int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); +int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); +int cgroup_rm_cftypes(struct cftype *cfts); +void cgroup_file_notify(struct cgroup_file *cfile); + +int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen); +int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry); +int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *tsk); + +void cgroup_fork(struct task_struct *p); +extern int cgroup_can_fork(struct task_struct *p); +extern void cgroup_cancel_fork(struct task_struct *p); +extern void cgroup_post_fork(struct task_struct *p); +void cgroup_exit(struct task_struct *p); +void cgroup_release(struct task_struct *p); +void cgroup_free(struct task_struct *p); + +int cgroup_init_early(void); +int cgroup_init(void); + +/* + * Iteration helpers and macros. + */ + +struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos, + struct cgroup_subsys_state *parent); +struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos, + struct cgroup_subsys_state *css); +struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos); +struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos, + struct cgroup_subsys_state *css); + +struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset, + struct cgroup_subsys_state **dst_cssp); +struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset, + struct cgroup_subsys_state **dst_cssp); + +void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags, + struct css_task_iter *it); +struct task_struct *css_task_iter_next(struct css_task_iter *it); +void css_task_iter_end(struct css_task_iter *it); + +/** + * css_for_each_child - iterate through children of a css + * @pos: the css * to use as the loop cursor + * @parent: css whose children to walk + * + * Walk @parent's children. Must be called under rcu_read_lock(). + * + * If a subsystem synchronizes ->css_online() and the start of iteration, a + * css which finished ->css_online() is guaranteed to be visible in the + * future iterations and will stay visible until the last reference is put. + * A css which hasn't finished ->css_online() or already finished + * ->css_offline() may show up during traversal. It's each subsystem's + * responsibility to synchronize against on/offlining. + * + * It is allowed to temporarily drop RCU read lock during iteration. The + * caller is responsible for ensuring that @pos remains accessible until + * the start of the next iteration by, for example, bumping the css refcnt. + */ +#define css_for_each_child(pos, parent) \ + for ((pos) = css_next_child(NULL, (parent)); (pos); \ + (pos) = css_next_child((pos), (parent))) + +/** + * css_for_each_descendant_pre - pre-order walk of a css's descendants + * @pos: the css * to use as the loop cursor + * @root: css whose descendants to walk + * + * Walk @root's descendants. @root is included in the iteration and the + * first node to be visited. Must be called under rcu_read_lock(). + * + * If a subsystem synchronizes ->css_online() and the start of iteration, a + * css which finished ->css_online() is guaranteed to be visible in the + * future iterations and will stay visible until the last reference is put. + * A css which hasn't finished ->css_online() or already finished + * ->css_offline() may show up during traversal. It's each subsystem's + * responsibility to synchronize against on/offlining. + * + * For example, the following guarantees that a descendant can't escape + * state updates of its ancestors. + * + * my_online(@css) + * { + * Lock @css's parent and @css; + * Inherit state from the parent; + * Unlock both. + * } + * + * my_update_state(@css) + * { + * css_for_each_descendant_pre(@pos, @css) { + * Lock @pos; + * if (@pos == @css) + * Update @css's state; + * else + * Verify @pos is alive and inherit state from its parent; + * Unlock @pos; + * } + * } + * + * As long as the inheriting step, including checking the parent state, is + * enclosed inside @pos locking, double-locking the parent isn't necessary + * while inheriting. The state update to the parent is guaranteed to be + * visible by walking order and, as long as inheriting operations to the + * same @pos are atomic to each other, multiple updates racing each other + * still result in the correct state. It's guaranateed that at least one + * inheritance happens for any css after the latest update to its parent. + * + * If checking parent's state requires locking the parent, each inheriting + * iteration should lock and unlock both @pos->parent and @pos. + * + * Alternatively, a subsystem may choose to use a single global lock to + * synchronize ->css_online() and ->css_offline() against tree-walking + * operations. + * + * It is allowed to temporarily drop RCU read lock during iteration. The + * caller is responsible for ensuring that @pos remains accessible until + * the start of the next iteration by, for example, bumping the css refcnt. + */ +#define css_for_each_descendant_pre(pos, css) \ + for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \ + (pos) = css_next_descendant_pre((pos), (css))) + +/** + * css_for_each_descendant_post - post-order walk of a css's descendants + * @pos: the css * to use as the loop cursor + * @css: css whose descendants to walk + * + * Similar to css_for_each_descendant_pre() but performs post-order + * traversal instead. @root is included in the iteration and the last + * node to be visited. + * + * If a subsystem synchronizes ->css_online() and the start of iteration, a + * css which finished ->css_online() is guaranteed to be visible in the + * future iterations and will stay visible until the last reference is put. + * A css which hasn't finished ->css_online() or already finished + * ->css_offline() may show up during traversal. It's each subsystem's + * responsibility to synchronize against on/offlining. + * + * Note that the walk visibility guarantee example described in pre-order + * walk doesn't apply the same to post-order walks. + */ +#define css_for_each_descendant_post(pos, css) \ + for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \ + (pos) = css_next_descendant_post((pos), (css))) + +/** + * cgroup_taskset_for_each - iterate cgroup_taskset + * @task: the loop cursor + * @dst_css: the destination css + * @tset: taskset to iterate + * + * @tset may contain multiple tasks and they may belong to multiple + * processes. + * + * On the v2 hierarchy, there may be tasks from multiple processes and they + * may not share the source or destination csses. + * + * On traditional hierarchies, when there are multiple tasks in @tset, if a + * task of a process is in @tset, all tasks of the process are in @tset. + * Also, all are guaranteed to share the same source and destination csses. + * + * Iteration is not in any specific order. + */ +#define cgroup_taskset_for_each(task, dst_css, tset) \ + for ((task) = cgroup_taskset_first((tset), &(dst_css)); \ + (task); \ + (task) = cgroup_taskset_next((tset), &(dst_css))) + +/** + * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset + * @leader: the loop cursor + * @dst_css: the destination css + * @tset: taskset to iterate + * + * Iterate threadgroup leaders of @tset. For single-task migrations, @tset + * may not contain any. + */ +#define cgroup_taskset_for_each_leader(leader, dst_css, tset) \ + for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \ + (leader); \ + (leader) = cgroup_taskset_next((tset), &(dst_css))) \ + if ((leader) != (leader)->group_leader) \ + ; \ + else + +/* + * Inline functions. + */ + +/** + * css_get - obtain a reference on the specified css + * @css: target css + * + * The caller must already have a reference. + */ +static inline void css_get(struct cgroup_subsys_state *css) +{ + if (!(css->flags & CSS_NO_REF)) + percpu_ref_get(&css->refcnt); +} + +/** + * css_get_many - obtain references on the specified css + * @css: target css + * @n: number of references to get + * + * The caller must already have a reference. + */ +static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n) +{ + if (!(css->flags & CSS_NO_REF)) + percpu_ref_get_many(&css->refcnt, n); +} + +/** + * css_tryget - try to obtain a reference on the specified css + * @css: target css + * + * Obtain a reference on @css unless it already has reached zero and is + * being released. This function doesn't care whether @css is on or + * offline. The caller naturally needs to ensure that @css is accessible + * but doesn't have to be holding a reference on it - IOW, RCU protected + * access is good enough for this function. Returns %true if a reference + * count was successfully obtained; %false otherwise. + */ +static inline bool css_tryget(struct cgroup_subsys_state *css) +{ + if (!(css->flags & CSS_NO_REF)) + return percpu_ref_tryget(&css->refcnt); + return true; +} + +/** + * css_tryget_online - try to obtain a reference on the specified css if online + * @css: target css + * + * Obtain a reference on @css if it's online. The caller naturally needs + * to ensure that @css is accessible but doesn't have to be holding a + * reference on it - IOW, RCU protected access is good enough for this + * function. Returns %true if a reference count was successfully obtained; + * %false otherwise. + */ +static inline bool css_tryget_online(struct cgroup_subsys_state *css) +{ + if (!(css->flags & CSS_NO_REF)) + return percpu_ref_tryget_live(&css->refcnt); + return true; +} + +/** + * css_is_dying - test whether the specified css is dying + * @css: target css + * + * Test whether @css is in the process of offlining or already offline. In + * most cases, ->css_online() and ->css_offline() callbacks should be + * enough; however, the actual offline operations are RCU delayed and this + * test returns %true also when @css is scheduled to be offlined. + * + * This is useful, for example, when the use case requires synchronous + * behavior with respect to cgroup removal. cgroup removal schedules css + * offlining but the css can seem alive while the operation is being + * delayed. If the delay affects user visible semantics, this test can be + * used to resolve the situation. + */ +static inline bool css_is_dying(struct cgroup_subsys_state *css) +{ + return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt); +} + +/** + * css_put - put a css reference + * @css: target css + * + * Put a reference obtained via css_get() and css_tryget_online(). + */ +static inline void css_put(struct cgroup_subsys_state *css) +{ + if (!(css->flags & CSS_NO_REF)) + percpu_ref_put(&css->refcnt); +} + +/** + * css_put_many - put css references + * @css: target css + * @n: number of references to put + * + * Put references obtained via css_get() and css_tryget_online(). + */ +static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n) +{ + if (!(css->flags & CSS_NO_REF)) + percpu_ref_put_many(&css->refcnt, n); +} + +static inline void cgroup_get(struct cgroup *cgrp) +{ + css_get(&cgrp->self); +} + +static inline bool cgroup_tryget(struct cgroup *cgrp) +{ + return css_tryget(&cgrp->self); +} + +static inline void cgroup_put(struct cgroup *cgrp) +{ + css_put(&cgrp->self); +} + +/** + * task_css_set_check - obtain a task's css_set with extra access conditions + * @task: the task to obtain css_set for + * @__c: extra condition expression to be passed to rcu_dereference_check() + * + * A task's css_set is RCU protected, initialized and exited while holding + * task_lock(), and can only be modified while holding both cgroup_mutex + * and task_lock() while the task is alive. This macro verifies that the + * caller is inside proper critical section and returns @task's css_set. + * + * The caller can also specify additional allowed conditions via @__c, such + * as locks used during the cgroup_subsys::attach() methods. + */ +#ifdef CONFIG_PROVE_RCU +extern struct mutex cgroup_mutex; +extern spinlock_t css_set_lock; +#define task_css_set_check(task, __c) \ + rcu_dereference_check((task)->cgroups, \ + lockdep_is_held(&cgroup_mutex) || \ + lockdep_is_held(&css_set_lock) || \ + ((task)->flags & PF_EXITING) || (__c)) +#else +#define task_css_set_check(task, __c) \ + rcu_dereference((task)->cgroups) +#endif + +/** + * task_css_check - obtain css for (task, subsys) w/ extra access conds + * @task: the target task + * @subsys_id: the target subsystem ID + * @__c: extra condition expression to be passed to rcu_dereference_check() + * + * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The + * synchronization rules are the same as task_css_set_check(). + */ +#define task_css_check(task, subsys_id, __c) \ + task_css_set_check((task), (__c))->subsys[(subsys_id)] + +/** + * task_css_set - obtain a task's css_set + * @task: the task to obtain css_set for + * + * See task_css_set_check(). + */ +static inline struct css_set *task_css_set(struct task_struct *task) +{ + return task_css_set_check(task, false); +} + +/** + * task_css - obtain css for (task, subsys) + * @task: the target task + * @subsys_id: the target subsystem ID + * + * See task_css_check(). + */ +static inline struct cgroup_subsys_state *task_css(struct task_struct *task, + int subsys_id) +{ + return task_css_check(task, subsys_id, false); +} + +/** + * task_get_css - find and get the css for (task, subsys) + * @task: the target task + * @subsys_id: the target subsystem ID + * + * Find the css for the (@task, @subsys_id) combination, increment a + * reference on and return it. This function is guaranteed to return a + * valid css. The returned css may already have been offlined. + */ +static inline struct cgroup_subsys_state * +task_get_css(struct task_struct *task, int subsys_id) +{ + struct cgroup_subsys_state *css; + + rcu_read_lock(); + while (true) { + css = task_css(task, subsys_id); + /* + * Can't use css_tryget_online() here. A task which has + * PF_EXITING set may stay associated with an offline css. + * If such task calls this function, css_tryget_online() + * will keep failing. + */ + if (likely(css_tryget(css))) + break; + cpu_relax(); + } + rcu_read_unlock(); + return css; +} + +/** + * task_css_is_root - test whether a task belongs to the root css + * @task: the target task + * @subsys_id: the target subsystem ID + * + * Test whether @task belongs to the root css on the specified subsystem. + * May be invoked in any context. + */ +static inline bool task_css_is_root(struct task_struct *task, int subsys_id) +{ + return task_css_check(task, subsys_id, true) == + init_css_set.subsys[subsys_id]; +} + +static inline struct cgroup *task_cgroup(struct task_struct *task, + int subsys_id) +{ + return task_css(task, subsys_id)->cgroup; +} + +static inline struct cgroup *task_dfl_cgroup(struct task_struct *task) +{ + return task_css_set(task)->dfl_cgrp; +} + +static inline struct cgroup *cgroup_parent(struct cgroup *cgrp) +{ + struct cgroup_subsys_state *parent_css = cgrp->self.parent; + + if (parent_css) + return container_of(parent_css, struct cgroup, self); + return NULL; +} + +/** + * cgroup_is_descendant - test ancestry + * @cgrp: the cgroup to be tested + * @ancestor: possible ancestor of @cgrp + * + * Test whether @cgrp is a descendant of @ancestor. It also returns %true + * if @cgrp == @ancestor. This function is safe to call as long as @cgrp + * and @ancestor are accessible. + */ +static inline bool cgroup_is_descendant(struct cgroup *cgrp, + struct cgroup *ancestor) +{ + if (cgrp->root != ancestor->root || cgrp->level < ancestor->level) + return false; + return cgrp->ancestor_ids[ancestor->level] == ancestor->id; +} + +/** + * cgroup_ancestor - find ancestor of cgroup + * @cgrp: cgroup to find ancestor of + * @ancestor_level: level of ancestor to find starting from root + * + * Find ancestor of cgroup at specified level starting from root if it exists + * and return pointer to it. Return NULL if @cgrp doesn't have ancestor at + * @ancestor_level. + * + * This function is safe to call as long as @cgrp is accessible. + */ +static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp, + int ancestor_level) +{ + struct cgroup *ptr; + + if (cgrp->level < ancestor_level) + return NULL; + + for (ptr = cgrp; + ptr && ptr->level > ancestor_level; + ptr = cgroup_parent(ptr)) + ; + + if (ptr && ptr->level == ancestor_level) + return ptr; + + return NULL; +} + +/** + * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry + * @task: the task to be tested + * @ancestor: possible ancestor of @task's cgroup + * + * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor. + * It follows all the same rules as cgroup_is_descendant, and only applies + * to the default hierarchy. + */ +static inline bool task_under_cgroup_hierarchy(struct task_struct *task, + struct cgroup *ancestor) +{ + struct css_set *cset = task_css_set(task); + + return cgroup_is_descendant(cset->dfl_cgrp, ancestor); +} + +/* no synchronization, the result can only be used as a hint */ +static inline bool cgroup_is_populated(struct cgroup *cgrp) +{ + return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children + + cgrp->nr_populated_threaded_children; +} + +/* returns ino associated with a cgroup */ +static inline ino_t cgroup_ino(struct cgroup *cgrp) +{ + return cgrp->kn->id.ino; +} + +/* cft/css accessors for cftype->write() operation */ +static inline struct cftype *of_cft(struct kernfs_open_file *of) +{ + return of->kn->priv; +} + +struct cgroup_subsys_state *of_css(struct kernfs_open_file *of); + +/* cft/css accessors for cftype->seq_*() operations */ +static inline struct cftype *seq_cft(struct seq_file *seq) +{ + return of_cft(seq->private); +} + +static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq) +{ + return of_css(seq->private); +} + +/* + * Name / path handling functions. All are thin wrappers around the kernfs + * counterparts and can be called under any context. + */ + +static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen) +{ + return kernfs_name(cgrp->kn, buf, buflen); +} + +static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen) +{ + return kernfs_path(cgrp->kn, buf, buflen); +} + +static inline void pr_cont_cgroup_name(struct cgroup *cgrp) +{ + pr_cont_kernfs_name(cgrp->kn); +} + +static inline void pr_cont_cgroup_path(struct cgroup *cgrp) +{ + pr_cont_kernfs_path(cgrp->kn); +} + +static inline void cgroup_init_kthreadd(void) +{ + /* + * kthreadd is inherited by all kthreads, keep it in the root so + * that the new kthreads are guaranteed to stay in the root until + * initialization is finished. + */ + current->no_cgroup_migration = 1; +} + +static inline void cgroup_kthread_ready(void) +{ + /* + * This kthread finished initialization. The creator should have + * set PF_NO_SETAFFINITY if this kthread should stay in the root. + */ + current->no_cgroup_migration = 0; +} + +static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp) +{ + return &cgrp->kn->id; +} + +void cgroup_path_from_kernfs_id(const union kernfs_node_id *id, + char *buf, size_t buflen); +#else /* !CONFIG_CGROUPS */ + +struct cgroup_subsys_state; +struct cgroup; + +static inline void css_put(struct cgroup_subsys_state *css) {} +static inline int cgroup_attach_task_all(struct task_struct *from, + struct task_struct *t) { return 0; } +static inline int cgroupstats_build(struct cgroupstats *stats, + struct dentry *dentry) { return -EINVAL; } + +static inline void cgroup_fork(struct task_struct *p) {} +static inline int cgroup_can_fork(struct task_struct *p) { return 0; } +static inline void cgroup_cancel_fork(struct task_struct *p) {} +static inline void cgroup_post_fork(struct task_struct *p) {} +static inline void cgroup_exit(struct task_struct *p) {} +static inline void cgroup_release(struct task_struct *p) {} +static inline void cgroup_free(struct task_struct *p) {} + +static inline int cgroup_init_early(void) { return 0; } +static inline int cgroup_init(void) { return 0; } +static inline void cgroup_init_kthreadd(void) {} +static inline void cgroup_kthread_ready(void) {} +static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp) +{ + return NULL; +} + +static inline bool task_under_cgroup_hierarchy(struct task_struct *task, + struct cgroup *ancestor) +{ + return true; +} + +static inline void cgroup_path_from_kernfs_id(const union kernfs_node_id *id, + char *buf, size_t buflen) {} +#endif /* !CONFIG_CGROUPS */ + +#ifdef CONFIG_CGROUPS +/* + * cgroup scalable recursive statistics. + */ +void cgroup_rstat_updated(struct cgroup *cgrp, int cpu); +void cgroup_rstat_flush(struct cgroup *cgrp); +void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp); +void cgroup_rstat_flush_hold(struct cgroup *cgrp); +void cgroup_rstat_flush_release(void); + +/* + * Basic resource stats. + */ +#ifdef CONFIG_CGROUP_CPUACCT +void cpuacct_charge(struct task_struct *tsk, u64 cputime); +void cpuacct_account_field(struct task_struct *tsk, int index, u64 val); +#else +static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} +static inline void cpuacct_account_field(struct task_struct *tsk, int index, + u64 val) {} +#endif + +void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec); +void __cgroup_account_cputime_field(struct cgroup *cgrp, + enum cpu_usage_stat index, u64 delta_exec); + +static inline void cgroup_account_cputime(struct task_struct *task, + u64 delta_exec) +{ + struct cgroup *cgrp; + + cpuacct_charge(task, delta_exec); + + rcu_read_lock(); + cgrp = task_dfl_cgroup(task); + if (cgroup_parent(cgrp)) + __cgroup_account_cputime(cgrp, delta_exec); + rcu_read_unlock(); +} + +static inline void cgroup_account_cputime_field(struct task_struct *task, + enum cpu_usage_stat index, + u64 delta_exec) +{ + struct cgroup *cgrp; + + cpuacct_account_field(task, index, delta_exec); + + rcu_read_lock(); + cgrp = task_dfl_cgroup(task); + if (cgroup_parent(cgrp)) + __cgroup_account_cputime_field(cgrp, index, delta_exec); + rcu_read_unlock(); +} + +#else /* CONFIG_CGROUPS */ + +static inline void cgroup_account_cputime(struct task_struct *task, + u64 delta_exec) {} +static inline void cgroup_account_cputime_field(struct task_struct *task, + enum cpu_usage_stat index, + u64 delta_exec) {} + +#endif /* CONFIG_CGROUPS */ + +/* + * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data + * definition in cgroup-defs.h. + */ +#ifdef CONFIG_SOCK_CGROUP_DATA + +#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID) +extern spinlock_t cgroup_sk_update_lock; +#endif + +void cgroup_sk_alloc_disable(void); +void cgroup_sk_alloc(struct sock_cgroup_data *skcd); +void cgroup_sk_clone(struct sock_cgroup_data *skcd); +void cgroup_sk_free(struct sock_cgroup_data *skcd); + +static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) +{ +#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID) + unsigned long v; + + /* + * @skcd->val is 64bit but the following is safe on 32bit too as we + * just need the lower ulong to be written and read atomically. + */ + v = READ_ONCE(skcd->val); + + if (v & 3) + return &cgrp_dfl_root.cgrp; + + return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp; +#else + return (struct cgroup *)(unsigned long)skcd->val; +#endif +} + +#else /* CONFIG_CGROUP_DATA */ + +static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {} +static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {} +static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {} + +#endif /* CONFIG_CGROUP_DATA */ + +struct cgroup_namespace { + refcount_t count; + struct ns_common ns; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct css_set *root_cset; +}; + +extern struct cgroup_namespace init_cgroup_ns; + +#ifdef CONFIG_CGROUPS + +void free_cgroup_ns(struct cgroup_namespace *ns); + +struct cgroup_namespace *copy_cgroup_ns(unsigned long flags, + struct user_namespace *user_ns, + struct cgroup_namespace *old_ns); + +int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen, + struct cgroup_namespace *ns); + +#else /* !CONFIG_CGROUPS */ + +static inline void free_cgroup_ns(struct cgroup_namespace *ns) { } +static inline struct cgroup_namespace * +copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns, + struct cgroup_namespace *old_ns) +{ + return old_ns; +} + +#endif /* !CONFIG_CGROUPS */ + +static inline void get_cgroup_ns(struct cgroup_namespace *ns) +{ + if (ns) + refcount_inc(&ns->count); +} + +static inline void put_cgroup_ns(struct cgroup_namespace *ns) +{ + if (ns && refcount_dec_and_test(&ns->count)) + free_cgroup_ns(ns); +} + +#endif /* _LINUX_CGROUP_H */ diff --git a/include/linux/cgroup_rdma.h b/include/linux/cgroup_rdma.h new file mode 100644 index 000000000..e94290b29 --- /dev/null +++ b/include/linux/cgroup_rdma.h @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2016 Parav Pandit + * + * This file is subject to the terms and conditions of version 2 of the GNU + * General Public License. See the file COPYING in the main directory of the + * Linux distribution for more details. + */ + +#ifndef _CGROUP_RDMA_H +#define _CGROUP_RDMA_H + +#include + +enum rdmacg_resource_type { + RDMACG_RESOURCE_HCA_HANDLE, + RDMACG_RESOURCE_HCA_OBJECT, + RDMACG_RESOURCE_MAX, +}; + +#ifdef CONFIG_CGROUP_RDMA + +struct rdma_cgroup { + struct cgroup_subsys_state css; + + /* + * head to keep track of all resource pools + * that belongs to this cgroup. + */ + struct list_head rpools; +}; + +struct rdmacg_device { + struct list_head dev_node; + struct list_head rpools; + char *name; +}; + +/* + * APIs for RDMA/IB stack to publish when a device wants to + * participate in resource accounting + */ +int rdmacg_register_device(struct rdmacg_device *device); +void rdmacg_unregister_device(struct rdmacg_device *device); + +/* APIs for RDMA/IB stack to charge/uncharge pool specific resources */ +int rdmacg_try_charge(struct rdma_cgroup **rdmacg, + struct rdmacg_device *device, + enum rdmacg_resource_type index); +void rdmacg_uncharge(struct rdma_cgroup *cg, + struct rdmacg_device *device, + enum rdmacg_resource_type index); +#endif /* CONFIG_CGROUP_RDMA */ +#endif /* _CGROUP_RDMA_H */ diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h new file mode 100644 index 000000000..acb77dcff --- /dev/null +++ b/include/linux/cgroup_subsys.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * List of cgroup subsystems. + * + * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS. + */ + +/* + * This file *must* be included with SUBSYS() defined. + */ + +#if IS_ENABLED(CONFIG_CPUSETS) +SUBSYS(cpuset) +#endif + +#if IS_ENABLED(CONFIG_CGROUP_SCHED) +SUBSYS(cpu) +#endif + +#if IS_ENABLED(CONFIG_CGROUP_CPUACCT) +SUBSYS(cpuacct) +#endif + +#if IS_ENABLED(CONFIG_BLK_CGROUP) +SUBSYS(io) +#endif + +#if IS_ENABLED(CONFIG_MEMCG) +SUBSYS(memory) +#endif + +#if IS_ENABLED(CONFIG_CGROUP_DEVICE) +SUBSYS(devices) +#endif + +#if IS_ENABLED(CONFIG_CGROUP_FREEZER) +SUBSYS(freezer) +#endif + +#if IS_ENABLED(CONFIG_CGROUP_NET_CLASSID) +SUBSYS(net_cls) +#endif + +#if IS_ENABLED(CONFIG_CGROUP_PERF) +SUBSYS(perf_event) +#endif + +#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) +SUBSYS(net_prio) +#endif + +#if IS_ENABLED(CONFIG_CGROUP_HUGETLB) +SUBSYS(hugetlb) +#endif + +#if IS_ENABLED(CONFIG_CGROUP_PIDS) +SUBSYS(pids) +#endif + +#if IS_ENABLED(CONFIG_CGROUP_RDMA) +SUBSYS(rdma) +#endif + +/* + * The following subsystems are not supported on the default hierarchy. + */ +#if IS_ENABLED(CONFIG_CGROUP_DEBUG) +SUBSYS(debug) +#endif + +/* + * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS. + */ diff --git a/include/linux/circ_buf.h b/include/linux/circ_buf.h new file mode 100644 index 000000000..b3233e820 --- /dev/null +++ b/include/linux/circ_buf.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * See Documentation/core-api/circular-buffers.rst for more information. + */ + +#ifndef _LINUX_CIRC_BUF_H +#define _LINUX_CIRC_BUF_H 1 + +struct circ_buf { + char *buf; + int head; + int tail; +}; + +/* Return count in buffer. */ +#define CIRC_CNT(head,tail,size) (((head) - (tail)) & ((size)-1)) + +/* Return space available, 0..size-1. We always leave one free char + as a completely full buffer has head == tail, which is the same as + empty. */ +#define CIRC_SPACE(head,tail,size) CIRC_CNT((tail),((head)+1),(size)) + +/* Return count up to the end of the buffer. Carefully avoid + accessing head and tail more than once, so they can change + underneath us without returning inconsistent results. */ +#define CIRC_CNT_TO_END(head,tail,size) \ + ({int end = (size) - (tail); \ + int n = ((head) + end) & ((size)-1); \ + n < end ? n : end;}) + +/* Return space available up to the end of the buffer. */ +#define CIRC_SPACE_TO_END(head,tail,size) \ + ({int end = (size) - 1 - (head); \ + int n = (end + (tail)) & ((size)-1); \ + n <= end ? n : end+1;}) + +#endif /* _LINUX_CIRC_BUF_H */ diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h new file mode 100644 index 000000000..5f5730c1d --- /dev/null +++ b/include/linux/cleancache.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CLEANCACHE_H +#define _LINUX_CLEANCACHE_H + +#include +#include +#include + +#define CLEANCACHE_NO_POOL -1 +#define CLEANCACHE_NO_BACKEND -2 +#define CLEANCACHE_NO_BACKEND_SHARED -3 + +#define CLEANCACHE_KEY_MAX 6 + +/* + * cleancache requires every file with a page in cleancache to have a + * unique key unless/until the file is removed/truncated. For some + * filesystems, the inode number is unique, but for "modern" filesystems + * an exportable filehandle is required (see exportfs.h) + */ +struct cleancache_filekey { + union { + ino_t ino; + __u32 fh[CLEANCACHE_KEY_MAX]; + u32 key[CLEANCACHE_KEY_MAX]; + } u; +}; + +struct cleancache_ops { + int (*init_fs)(size_t); + int (*init_shared_fs)(uuid_t *uuid, size_t); + int (*get_page)(int, struct cleancache_filekey, + pgoff_t, struct page *); + void (*put_page)(int, struct cleancache_filekey, + pgoff_t, struct page *); + void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t); + void (*invalidate_inode)(int, struct cleancache_filekey); + void (*invalidate_fs)(int); +}; + +extern int cleancache_register_ops(const struct cleancache_ops *ops); +extern void __cleancache_init_fs(struct super_block *); +extern void __cleancache_init_shared_fs(struct super_block *); +extern int __cleancache_get_page(struct page *); +extern void __cleancache_put_page(struct page *); +extern void __cleancache_invalidate_page(struct address_space *, struct page *); +extern void __cleancache_invalidate_inode(struct address_space *); +extern void __cleancache_invalidate_fs(struct super_block *); + +#ifdef CONFIG_CLEANCACHE +#define cleancache_enabled (1) +static inline bool cleancache_fs_enabled_mapping(struct address_space *mapping) +{ + return mapping->host->i_sb->cleancache_poolid >= 0; +} +static inline bool cleancache_fs_enabled(struct page *page) +{ + return cleancache_fs_enabled_mapping(page->mapping); +} +#else +#define cleancache_enabled (0) +#define cleancache_fs_enabled(_page) (0) +#define cleancache_fs_enabled_mapping(_page) (0) +#endif + +/* + * The shim layer provided by these inline functions allows the compiler + * to reduce all cleancache hooks to nothingness if CONFIG_CLEANCACHE + * is disabled, to a single global variable check if CONFIG_CLEANCACHE + * is enabled but no cleancache "backend" has dynamically enabled it, + * and, for the most frequent cleancache ops, to a single global variable + * check plus a superblock element comparison if CONFIG_CLEANCACHE is enabled + * and a cleancache backend has dynamically enabled cleancache, but the + * filesystem referenced by that cleancache op has not enabled cleancache. + * As a result, CONFIG_CLEANCACHE can be enabled by default with essentially + * no measurable performance impact. + */ + +static inline void cleancache_init_fs(struct super_block *sb) +{ + if (cleancache_enabled) + __cleancache_init_fs(sb); +} + +static inline void cleancache_init_shared_fs(struct super_block *sb) +{ + if (cleancache_enabled) + __cleancache_init_shared_fs(sb); +} + +static inline int cleancache_get_page(struct page *page) +{ + if (cleancache_enabled && cleancache_fs_enabled(page)) + return __cleancache_get_page(page); + return -1; +} + +static inline void cleancache_put_page(struct page *page) +{ + if (cleancache_enabled && cleancache_fs_enabled(page)) + __cleancache_put_page(page); +} + +static inline void cleancache_invalidate_page(struct address_space *mapping, + struct page *page) +{ + /* careful... page->mapping is NULL sometimes when this is called */ + if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping)) + __cleancache_invalidate_page(mapping, page); +} + +static inline void cleancache_invalidate_inode(struct address_space *mapping) +{ + if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping)) + __cleancache_invalidate_inode(mapping); +} + +static inline void cleancache_invalidate_fs(struct super_block *sb) +{ + if (cleancache_enabled) + __cleancache_invalidate_fs(sb); +} + +#endif /* _LINUX_CLEANCACHE_H */ diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h new file mode 100644 index 000000000..d1b6d2c3a --- /dev/null +++ b/include/linux/clk-provider.h @@ -0,0 +1,1018 @@ +/* + * linux/include/linux/clk-provider.h + * + * Copyright (c) 2010-2011 Jeremy Kerr + * Copyright (C) 2011-2012 Linaro Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __LINUX_CLK_PROVIDER_H +#define __LINUX_CLK_PROVIDER_H + +#include +#include +#include + +#ifdef CONFIG_COMMON_CLK + +/* + * flags used across common struct clk. these flags should only affect the + * top-level framework. custom flags for dealing with hardware specifics + * belong in struct clk_foo + * + * Please update clk_flags[] in drivers/clk/clk.c when making changes here! + */ +#define CLK_SET_RATE_GATE BIT(0) /* must be gated across rate change */ +#define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */ +#define CLK_SET_RATE_PARENT BIT(2) /* propagate rate change up one level */ +#define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */ + /* unused */ +#define CLK_IS_BASIC BIT(5) /* Basic clk, can't do a to_clk_foo() */ +#define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */ +#define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */ +#define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */ +#define CLK_RECALC_NEW_RATES BIT(9) /* recalc rates after notifications */ +#define CLK_SET_RATE_UNGATE BIT(10) /* clock needs to run to set rate */ +#define CLK_IS_CRITICAL BIT(11) /* do not gate, ever */ +/* parents need enable during gate/ungate, set rate and re-parent */ +#define CLK_OPS_PARENT_ENABLE BIT(12) +/* duty cycle call may be forwarded to the parent clock */ +#define CLK_DUTY_CYCLE_PARENT BIT(13) + +struct clk; +struct clk_hw; +struct clk_core; +struct dentry; + +/** + * struct clk_rate_request - Structure encoding the clk constraints that + * a clock user might require. + * + * @rate: Requested clock rate. This field will be adjusted by + * clock drivers according to hardware capabilities. + * @min_rate: Minimum rate imposed by clk users. + * @max_rate: Maximum rate imposed by clk users. + * @best_parent_rate: The best parent rate a parent can provide to fulfill the + * requested constraints. + * @best_parent_hw: The most appropriate parent clock that fulfills the + * requested constraints. + * + */ +struct clk_rate_request { + unsigned long rate; + unsigned long min_rate; + unsigned long max_rate; + unsigned long best_parent_rate; + struct clk_hw *best_parent_hw; +}; + +/** + * struct clk_duty - Struture encoding the duty cycle ratio of a clock + * + * @num: Numerator of the duty cycle ratio + * @den: Denominator of the duty cycle ratio + */ +struct clk_duty { + unsigned int num; + unsigned int den; +}; + +/** + * struct clk_ops - Callback operations for hardware clocks; these are to + * be provided by the clock implementation, and will be called by drivers + * through the clk_* api. + * + * @prepare: Prepare the clock for enabling. This must not return until + * the clock is fully prepared, and it's safe to call clk_enable. + * This callback is intended to allow clock implementations to + * do any initialisation that may sleep. Called with + * prepare_lock held. + * + * @unprepare: Release the clock from its prepared state. This will typically + * undo any work done in the @prepare callback. Called with + * prepare_lock held. + * + * @is_prepared: Queries the hardware to determine if the clock is prepared. + * This function is allowed to sleep. Optional, if this op is not + * set then the prepare count will be used. + * + * @unprepare_unused: Unprepare the clock atomically. Only called from + * clk_disable_unused for prepare clocks with special needs. + * Called with prepare mutex held. This function may sleep. + * + * @enable: Enable the clock atomically. This must not return until the + * clock is generating a valid clock signal, usable by consumer + * devices. Called with enable_lock held. This function must not + * sleep. + * + * @disable: Disable the clock atomically. Called with enable_lock held. + * This function must not sleep. + * + * @is_enabled: Queries the hardware to determine if the clock is enabled. + * This function must not sleep. Optional, if this op is not + * set then the enable count will be used. + * + * @disable_unused: Disable the clock atomically. Only called from + * clk_disable_unused for gate clocks with special needs. + * Called with enable_lock held. This function must not + * sleep. + * + * @recalc_rate Recalculate the rate of this clock, by querying hardware. The + * parent rate is an input parameter. It is up to the caller to + * ensure that the prepare_mutex is held across this call. + * Returns the calculated rate. Optional, but recommended - if + * this op is not set then clock rate will be initialized to 0. + * + * @round_rate: Given a target rate as input, returns the closest rate actually + * supported by the clock. The parent rate is an input/output + * parameter. + * + * @determine_rate: Given a target rate as input, returns the closest rate + * actually supported by the clock, and optionally the parent clock + * that should be used to provide the clock rate. + * + * @set_parent: Change the input source of this clock; for clocks with multiple + * possible parents specify a new parent by passing in the index + * as a u8 corresponding to the parent in either the .parent_names + * or .parents arrays. This function in affect translates an + * array index into the value programmed into the hardware. + * Returns 0 on success, -EERROR otherwise. + * + * @get_parent: Queries the hardware to determine the parent of a clock. The + * return value is a u8 which specifies the index corresponding to + * the parent clock. This index can be applied to either the + * .parent_names or .parents arrays. In short, this function + * translates the parent value read from hardware into an array + * index. Currently only called when the clock is initialized by + * __clk_init. This callback is mandatory for clocks with + * multiple parents. It is optional (and unnecessary) for clocks + * with 0 or 1 parents. + * + * @set_rate: Change the rate of this clock. The requested rate is specified + * by the second argument, which should typically be the return + * of .round_rate call. The third argument gives the parent rate + * which is likely helpful for most .set_rate implementation. + * Returns 0 on success, -EERROR otherwise. + * + * @set_rate_and_parent: Change the rate and the parent of this clock. The + * requested rate is specified by the second argument, which + * should typically be the return of .round_rate call. The + * third argument gives the parent rate which is likely helpful + * for most .set_rate_and_parent implementation. The fourth + * argument gives the parent index. This callback is optional (and + * unnecessary) for clocks with 0 or 1 parents as well as + * for clocks that can tolerate switching the rate and the parent + * separately via calls to .set_parent and .set_rate. + * Returns 0 on success, -EERROR otherwise. + * + * @recalc_accuracy: Recalculate the accuracy of this clock. The clock accuracy + * is expressed in ppb (parts per billion). The parent accuracy is + * an input parameter. + * Returns the calculated accuracy. Optional - if this op is not + * set then clock accuracy will be initialized to parent accuracy + * or 0 (perfect clock) if clock has no parent. + * + * @get_phase: Queries the hardware to get the current phase of a clock. + * Returned values are 0-359 degrees on success, negative + * error codes on failure. + * + * @set_phase: Shift the phase this clock signal in degrees specified + * by the second argument. Valid values for degrees are + * 0-359. Return 0 on success, otherwise -EERROR. + * + * @get_duty_cycle: Queries the hardware to get the current duty cycle ratio + * of a clock. Returned values denominator cannot be 0 and must be + * superior or equal to the numerator. + * + * @set_duty_cycle: Apply the duty cycle ratio to this clock signal specified by + * the numerator (2nd argurment) and denominator (3rd argument). + * Argument must be a valid ratio (denominator > 0 + * and >= numerator) Return 0 on success, otherwise -EERROR. + * + * @init: Perform platform-specific initialization magic. + * This is not not used by any of the basic clock types. + * Please consider other ways of solving initialization problems + * before using this callback, as its use is discouraged. + * + * @debug_init: Set up type-specific debugfs entries for this clock. This + * is called once, after the debugfs directory entry for this + * clock has been created. The dentry pointer representing that + * directory is provided as an argument. Called with + * prepare_lock held. Returns 0 on success, -EERROR otherwise. + * + * + * The clk_enable/clk_disable and clk_prepare/clk_unprepare pairs allow + * implementations to split any work between atomic (enable) and sleepable + * (prepare) contexts. If enabling a clock requires code that might sleep, + * this must be done in clk_prepare. Clock enable code that will never be + * called in a sleepable context may be implemented in clk_enable. + * + * Typically, drivers will call clk_prepare when a clock may be needed later + * (eg. when a device is opened), and clk_enable when the clock is actually + * required (eg. from an interrupt). Note that clk_prepare MUST have been + * called before clk_enable. + */ +struct clk_ops { + int (*prepare)(struct clk_hw *hw); + void (*unprepare)(struct clk_hw *hw); + int (*is_prepared)(struct clk_hw *hw); + void (*unprepare_unused)(struct clk_hw *hw); + int (*enable)(struct clk_hw *hw); + void (*disable)(struct clk_hw *hw); + int (*is_enabled)(struct clk_hw *hw); + void (*disable_unused)(struct clk_hw *hw); + unsigned long (*recalc_rate)(struct clk_hw *hw, + unsigned long parent_rate); + long (*round_rate)(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate); + int (*determine_rate)(struct clk_hw *hw, + struct clk_rate_request *req); + int (*set_parent)(struct clk_hw *hw, u8 index); + u8 (*get_parent)(struct clk_hw *hw); + int (*set_rate)(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate); + int (*set_rate_and_parent)(struct clk_hw *hw, + unsigned long rate, + unsigned long parent_rate, u8 index); + unsigned long (*recalc_accuracy)(struct clk_hw *hw, + unsigned long parent_accuracy); + int (*get_phase)(struct clk_hw *hw); + int (*set_phase)(struct clk_hw *hw, int degrees); + int (*get_duty_cycle)(struct clk_hw *hw, + struct clk_duty *duty); + int (*set_duty_cycle)(struct clk_hw *hw, + struct clk_duty *duty); + void (*init)(struct clk_hw *hw); + void (*debug_init)(struct clk_hw *hw, struct dentry *dentry); +}; + +/** + * struct clk_init_data - holds init data that's common to all clocks and is + * shared between the clock provider and the common clock framework. + * + * @name: clock name + * @ops: operations this clock supports + * @parent_names: array of string names for all possible parents + * @num_parents: number of possible parents + * @flags: framework-level hints and quirks + */ +struct clk_init_data { + const char *name; + const struct clk_ops *ops; + const char * const *parent_names; + u8 num_parents; + unsigned long flags; +}; + +/** + * struct clk_hw - handle for traversing from a struct clk to its corresponding + * hardware-specific structure. struct clk_hw should be declared within struct + * clk_foo and then referenced by the struct clk instance that uses struct + * clk_foo's clk_ops + * + * @core: pointer to the struct clk_core instance that points back to this + * struct clk_hw instance + * + * @clk: pointer to the per-user struct clk instance that can be used to call + * into the clk API + * + * @init: pointer to struct clk_init_data that contains the init data shared + * with the common clock framework. + */ +struct clk_hw { + struct clk_core *core; + struct clk *clk; + const struct clk_init_data *init; +}; + +/* + * DOC: Basic clock implementations common to many platforms + * + * Each basic clock hardware type is comprised of a structure describing the + * clock hardware, implementations of the relevant callbacks in struct clk_ops, + * unique flags for that hardware type, a registration function and an + * alternative macro for static initialization + */ + +/** + * struct clk_fixed_rate - fixed-rate clock + * @hw: handle between common and hardware-specific interfaces + * @fixed_rate: constant frequency of clock + */ +struct clk_fixed_rate { + struct clk_hw hw; + unsigned long fixed_rate; + unsigned long fixed_accuracy; + u8 flags; +}; + +#define to_clk_fixed_rate(_hw) container_of(_hw, struct clk_fixed_rate, hw) + +extern const struct clk_ops clk_fixed_rate_ops; +struct clk *clk_register_fixed_rate(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + unsigned long fixed_rate); +struct clk_hw *clk_hw_register_fixed_rate(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + unsigned long fixed_rate); +struct clk *clk_register_fixed_rate_with_accuracy(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + unsigned long fixed_rate, unsigned long fixed_accuracy); +void clk_unregister_fixed_rate(struct clk *clk); +struct clk_hw *clk_hw_register_fixed_rate_with_accuracy(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + unsigned long fixed_rate, unsigned long fixed_accuracy); +void clk_hw_unregister_fixed_rate(struct clk_hw *hw); + +void of_fixed_clk_setup(struct device_node *np); + +/** + * struct clk_gate - gating clock + * + * @hw: handle between common and hardware-specific interfaces + * @reg: register controlling gate + * @bit_idx: single bit controlling gate + * @flags: hardware-specific flags + * @lock: register lock + * + * Clock which can gate its output. Implements .enable & .disable + * + * Flags: + * CLK_GATE_SET_TO_DISABLE - by default this clock sets the bit at bit_idx to + * enable the clock. Setting this flag does the opposite: setting the bit + * disable the clock and clearing it enables the clock + * CLK_GATE_HIWORD_MASK - The gate settings are only in lower 16-bit + * of this register, and mask of gate bits are in higher 16-bit of this + * register. While setting the gate bits, higher 16-bit should also be + * updated to indicate changing gate bits. + */ +struct clk_gate { + struct clk_hw hw; + void __iomem *reg; + u8 bit_idx; + u8 flags; + spinlock_t *lock; +}; + +#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw) + +#define CLK_GATE_SET_TO_DISABLE BIT(0) +#define CLK_GATE_HIWORD_MASK BIT(1) + +extern const struct clk_ops clk_gate_ops; +struct clk *clk_register_gate(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 bit_idx, + u8 clk_gate_flags, spinlock_t *lock); +struct clk_hw *clk_hw_register_gate(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 bit_idx, + u8 clk_gate_flags, spinlock_t *lock); +void clk_unregister_gate(struct clk *clk); +void clk_hw_unregister_gate(struct clk_hw *hw); +int clk_gate_is_enabled(struct clk_hw *hw); + +struct clk_div_table { + unsigned int val; + unsigned int div; +}; + +/** + * struct clk_divider - adjustable divider clock + * + * @hw: handle between common and hardware-specific interfaces + * @reg: register containing the divider + * @shift: shift to the divider bit field + * @width: width of the divider bit field + * @table: array of value/divider pairs, last entry should have div = 0 + * @lock: register lock + * + * Clock with an adjustable divider affecting its output frequency. Implements + * .recalc_rate, .set_rate and .round_rate + * + * Flags: + * CLK_DIVIDER_ONE_BASED - by default the divisor is the value read from the + * register plus one. If CLK_DIVIDER_ONE_BASED is set then the divider is + * the raw value read from the register, with the value of zero considered + * invalid, unless CLK_DIVIDER_ALLOW_ZERO is set. + * CLK_DIVIDER_POWER_OF_TWO - clock divisor is 2 raised to the value read from + * the hardware register + * CLK_DIVIDER_ALLOW_ZERO - Allow zero divisors. For dividers which have + * CLK_DIVIDER_ONE_BASED set, it is possible to end up with a zero divisor. + * Some hardware implementations gracefully handle this case and allow a + * zero divisor by not modifying their input clock + * (divide by one / bypass). + * CLK_DIVIDER_HIWORD_MASK - The divider settings are only in lower 16-bit + * of this register, and mask of divider bits are in higher 16-bit of this + * register. While setting the divider bits, higher 16-bit should also be + * updated to indicate changing divider bits. + * CLK_DIVIDER_ROUND_CLOSEST - Makes the best calculated divider to be rounded + * to the closest integer instead of the up one. + * CLK_DIVIDER_READ_ONLY - The divider settings are preconfigured and should + * not be changed by the clock framework. + * CLK_DIVIDER_MAX_AT_ZERO - For dividers which are like CLK_DIVIDER_ONE_BASED + * except when the value read from the register is zero, the divisor is + * 2^width of the field. + */ +struct clk_divider { + struct clk_hw hw; + void __iomem *reg; + u8 shift; + u8 width; + u8 flags; + const struct clk_div_table *table; + spinlock_t *lock; +}; + +#define clk_div_mask(width) ((1 << (width)) - 1) +#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw) + +#define CLK_DIVIDER_ONE_BASED BIT(0) +#define CLK_DIVIDER_POWER_OF_TWO BIT(1) +#define CLK_DIVIDER_ALLOW_ZERO BIT(2) +#define CLK_DIVIDER_HIWORD_MASK BIT(3) +#define CLK_DIVIDER_ROUND_CLOSEST BIT(4) +#define CLK_DIVIDER_READ_ONLY BIT(5) +#define CLK_DIVIDER_MAX_AT_ZERO BIT(6) + +extern const struct clk_ops clk_divider_ops; +extern const struct clk_ops clk_divider_ro_ops; + +unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, + unsigned int val, const struct clk_div_table *table, + unsigned long flags, unsigned long width); +long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent, + unsigned long rate, unsigned long *prate, + const struct clk_div_table *table, + u8 width, unsigned long flags); +long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent, + unsigned long rate, unsigned long *prate, + const struct clk_div_table *table, u8 width, + unsigned long flags, unsigned int val); +int divider_get_val(unsigned long rate, unsigned long parent_rate, + const struct clk_div_table *table, u8 width, + unsigned long flags); + +struct clk *clk_register_divider(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_divider_flags, spinlock_t *lock); +struct clk_hw *clk_hw_register_divider(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_divider_flags, spinlock_t *lock); +struct clk *clk_register_divider_table(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_divider_flags, const struct clk_div_table *table, + spinlock_t *lock); +struct clk_hw *clk_hw_register_divider_table(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_divider_flags, const struct clk_div_table *table, + spinlock_t *lock); +void clk_unregister_divider(struct clk *clk); +void clk_hw_unregister_divider(struct clk_hw *hw); + +/** + * struct clk_mux - multiplexer clock + * + * @hw: handle between common and hardware-specific interfaces + * @reg: register controlling multiplexer + * @table: array of register values corresponding to the parent index + * @shift: shift to multiplexer bit field + * @mask: mask of mutliplexer bit field + * @flags: hardware-specific flags + * @lock: register lock + * + * Clock with multiple selectable parents. Implements .get_parent, .set_parent + * and .recalc_rate + * + * Flags: + * CLK_MUX_INDEX_ONE - register index starts at 1, not 0 + * CLK_MUX_INDEX_BIT - register index is a single bit (power of two) + * CLK_MUX_HIWORD_MASK - The mux settings are only in lower 16-bit of this + * register, and mask of mux bits are in higher 16-bit of this register. + * While setting the mux bits, higher 16-bit should also be updated to + * indicate changing mux bits. + * CLK_MUX_ROUND_CLOSEST - Use the parent rate that is closest to the desired + * frequency. + */ +struct clk_mux { + struct clk_hw hw; + void __iomem *reg; + u32 *table; + u32 mask; + u8 shift; + u8 flags; + spinlock_t *lock; +}; + +#define to_clk_mux(_hw) container_of(_hw, struct clk_mux, hw) + +#define CLK_MUX_INDEX_ONE BIT(0) +#define CLK_MUX_INDEX_BIT BIT(1) +#define CLK_MUX_HIWORD_MASK BIT(2) +#define CLK_MUX_READ_ONLY BIT(3) /* mux can't be changed */ +#define CLK_MUX_ROUND_CLOSEST BIT(4) + +extern const struct clk_ops clk_mux_ops; +extern const struct clk_ops clk_mux_ro_ops; + +struct clk *clk_register_mux(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, + unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_mux_flags, spinlock_t *lock); +struct clk_hw *clk_hw_register_mux(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, + unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_mux_flags, spinlock_t *lock); + +struct clk *clk_register_mux_table(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, + unsigned long flags, + void __iomem *reg, u8 shift, u32 mask, + u8 clk_mux_flags, u32 *table, spinlock_t *lock); +struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, + unsigned long flags, + void __iomem *reg, u8 shift, u32 mask, + u8 clk_mux_flags, u32 *table, spinlock_t *lock); + +int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags, + unsigned int val); +unsigned int clk_mux_index_to_val(u32 *table, unsigned int flags, u8 index); + +void clk_unregister_mux(struct clk *clk); +void clk_hw_unregister_mux(struct clk_hw *hw); + +void of_fixed_factor_clk_setup(struct device_node *node); + +/** + * struct clk_fixed_factor - fixed multiplier and divider clock + * + * @hw: handle between common and hardware-specific interfaces + * @mult: multiplier + * @div: divider + * + * Clock with a fixed multiplier and divider. The output frequency is the + * parent clock rate divided by div and multiplied by mult. + * Implements .recalc_rate, .set_rate and .round_rate + */ + +struct clk_fixed_factor { + struct clk_hw hw; + unsigned int mult; + unsigned int div; +}; + +#define to_clk_fixed_factor(_hw) container_of(_hw, struct clk_fixed_factor, hw) + +extern const struct clk_ops clk_fixed_factor_ops; +struct clk *clk_register_fixed_factor(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + unsigned int mult, unsigned int div); +void clk_unregister_fixed_factor(struct clk *clk); +struct clk_hw *clk_hw_register_fixed_factor(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + unsigned int mult, unsigned int div); +void clk_hw_unregister_fixed_factor(struct clk_hw *hw); + +/** + * struct clk_fractional_divider - adjustable fractional divider clock + * + * @hw: handle between common and hardware-specific interfaces + * @reg: register containing the divider + * @mshift: shift to the numerator bit field + * @mwidth: width of the numerator bit field + * @nshift: shift to the denominator bit field + * @nwidth: width of the denominator bit field + * @lock: register lock + * + * Clock with adjustable fractional divider affecting its output frequency. + */ +struct clk_fractional_divider { + struct clk_hw hw; + void __iomem *reg; + u8 mshift; + u8 mwidth; + u32 mmask; + u8 nshift; + u8 nwidth; + u32 nmask; + u8 flags; + void (*approximation)(struct clk_hw *hw, + unsigned long rate, unsigned long *parent_rate, + unsigned long *m, unsigned long *n); + spinlock_t *lock; +}; + +#define to_clk_fd(_hw) container_of(_hw, struct clk_fractional_divider, hw) + +extern const struct clk_ops clk_fractional_divider_ops; +struct clk *clk_register_fractional_divider(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth, + u8 clk_divider_flags, spinlock_t *lock); +struct clk_hw *clk_hw_register_fractional_divider(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth, + u8 clk_divider_flags, spinlock_t *lock); +void clk_hw_unregister_fractional_divider(struct clk_hw *hw); + +/** + * struct clk_multiplier - adjustable multiplier clock + * + * @hw: handle between common and hardware-specific interfaces + * @reg: register containing the multiplier + * @shift: shift to the multiplier bit field + * @width: width of the multiplier bit field + * @lock: register lock + * + * Clock with an adjustable multiplier affecting its output frequency. + * Implements .recalc_rate, .set_rate and .round_rate + * + * Flags: + * CLK_MULTIPLIER_ZERO_BYPASS - By default, the multiplier is the value read + * from the register, with 0 being a valid value effectively + * zeroing the output clock rate. If CLK_MULTIPLIER_ZERO_BYPASS is + * set, then a null multiplier will be considered as a bypass, + * leaving the parent rate unmodified. + * CLK_MULTIPLIER_ROUND_CLOSEST - Makes the best calculated divider to be + * rounded to the closest integer instead of the down one. + */ +struct clk_multiplier { + struct clk_hw hw; + void __iomem *reg; + u8 shift; + u8 width; + u8 flags; + spinlock_t *lock; +}; + +#define to_clk_multiplier(_hw) container_of(_hw, struct clk_multiplier, hw) + +#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0) +#define CLK_MULTIPLIER_ROUND_CLOSEST BIT(1) + +extern const struct clk_ops clk_multiplier_ops; + +/*** + * struct clk_composite - aggregate clock of mux, divider and gate clocks + * + * @hw: handle between common and hardware-specific interfaces + * @mux_hw: handle between composite and hardware-specific mux clock + * @rate_hw: handle between composite and hardware-specific rate clock + * @gate_hw: handle between composite and hardware-specific gate clock + * @mux_ops: clock ops for mux + * @rate_ops: clock ops for rate + * @gate_ops: clock ops for gate + */ +struct clk_composite { + struct clk_hw hw; + struct clk_ops ops; + + struct clk_hw *mux_hw; + struct clk_hw *rate_hw; + struct clk_hw *gate_hw; + + const struct clk_ops *mux_ops; + const struct clk_ops *rate_ops; + const struct clk_ops *gate_ops; +}; + +#define to_clk_composite(_hw) container_of(_hw, struct clk_composite, hw) + +struct clk *clk_register_composite(struct device *dev, const char *name, + const char * const *parent_names, int num_parents, + struct clk_hw *mux_hw, const struct clk_ops *mux_ops, + struct clk_hw *rate_hw, const struct clk_ops *rate_ops, + struct clk_hw *gate_hw, const struct clk_ops *gate_ops, + unsigned long flags); +void clk_unregister_composite(struct clk *clk); +struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name, + const char * const *parent_names, int num_parents, + struct clk_hw *mux_hw, const struct clk_ops *mux_ops, + struct clk_hw *rate_hw, const struct clk_ops *rate_ops, + struct clk_hw *gate_hw, const struct clk_ops *gate_ops, + unsigned long flags); +void clk_hw_unregister_composite(struct clk_hw *hw); + +/*** + * struct clk_gpio_gate - gpio gated clock + * + * @hw: handle between common and hardware-specific interfaces + * @gpiod: gpio descriptor + * + * Clock with a gpio control for enabling and disabling the parent clock. + * Implements .enable, .disable and .is_enabled + */ + +struct clk_gpio { + struct clk_hw hw; + struct gpio_desc *gpiod; +}; + +#define to_clk_gpio(_hw) container_of(_hw, struct clk_gpio, hw) + +extern const struct clk_ops clk_gpio_gate_ops; +struct clk *clk_register_gpio_gate(struct device *dev, const char *name, + const char *parent_name, struct gpio_desc *gpiod, + unsigned long flags); +struct clk_hw *clk_hw_register_gpio_gate(struct device *dev, const char *name, + const char *parent_name, struct gpio_desc *gpiod, + unsigned long flags); +void clk_hw_unregister_gpio_gate(struct clk_hw *hw); + +/** + * struct clk_gpio_mux - gpio controlled clock multiplexer + * + * @hw: see struct clk_gpio + * @gpiod: gpio descriptor to select the parent of this clock multiplexer + * + * Clock with a gpio control for selecting the parent clock. + * Implements .get_parent, .set_parent and .determine_rate + */ + +extern const struct clk_ops clk_gpio_mux_ops; +struct clk *clk_register_gpio_mux(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod, + unsigned long flags); +struct clk_hw *clk_hw_register_gpio_mux(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod, + unsigned long flags); +void clk_hw_unregister_gpio_mux(struct clk_hw *hw); + +/** + * clk_register - allocate a new clock, register it and return an opaque cookie + * @dev: device that is registering this clock + * @hw: link to hardware-specific clock data + * + * clk_register is the primary interface for populating the clock tree with new + * clock nodes. It returns a pointer to the newly allocated struct clk which + * cannot be dereferenced by driver code but may be used in conjuction with the + * rest of the clock API. In the event of an error clk_register will return an + * error code; drivers must test for an error code after calling clk_register. + */ +struct clk *clk_register(struct device *dev, struct clk_hw *hw); +struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw); + +int __must_check clk_hw_register(struct device *dev, struct clk_hw *hw); +int __must_check devm_clk_hw_register(struct device *dev, struct clk_hw *hw); + +void clk_unregister(struct clk *clk); +void devm_clk_unregister(struct device *dev, struct clk *clk); + +void clk_hw_unregister(struct clk_hw *hw); +void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw); + +/* helper functions */ +const char *__clk_get_name(const struct clk *clk); +const char *clk_hw_get_name(const struct clk_hw *hw); +struct clk_hw *__clk_get_hw(struct clk *clk); +unsigned int clk_hw_get_num_parents(const struct clk_hw *hw); +struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw); +struct clk_hw *clk_hw_get_parent_by_index(const struct clk_hw *hw, + unsigned int index); +unsigned int __clk_get_enable_count(struct clk *clk); +unsigned long clk_hw_get_rate(const struct clk_hw *hw); +unsigned long __clk_get_flags(struct clk *clk); +unsigned long clk_hw_get_flags(const struct clk_hw *hw); +#define clk_hw_can_set_rate_parent(hw) \ + (clk_hw_get_flags((hw)) & CLK_SET_RATE_PARENT) + +bool clk_hw_is_prepared(const struct clk_hw *hw); +bool clk_hw_rate_is_protected(const struct clk_hw *hw); +bool clk_hw_is_enabled(const struct clk_hw *hw); +bool __clk_is_enabled(struct clk *clk); +struct clk *__clk_lookup(const char *name); +int __clk_mux_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req); +int __clk_determine_rate(struct clk_hw *core, struct clk_rate_request *req); +int __clk_mux_determine_rate_closest(struct clk_hw *hw, + struct clk_rate_request *req); +int clk_mux_determine_rate_flags(struct clk_hw *hw, + struct clk_rate_request *req, + unsigned long flags); +void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent); +void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate, + unsigned long max_rate); + +static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src) +{ + dst->clk = src->clk; + dst->core = src->core; +} + +static inline long divider_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate, + const struct clk_div_table *table, + u8 width, unsigned long flags) +{ + return divider_round_rate_parent(hw, clk_hw_get_parent(hw), + rate, prate, table, width, flags); +} + +static inline long divider_ro_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate, + const struct clk_div_table *table, + u8 width, unsigned long flags, + unsigned int val) +{ + return divider_ro_round_rate_parent(hw, clk_hw_get_parent(hw), + rate, prate, table, width, flags, + val); +} + +/* + * FIXME clock api without lock protection + */ +unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate); + +struct of_device_id; + +struct clk_onecell_data { + struct clk **clks; + unsigned int clk_num; +}; + +struct clk_hw_onecell_data { + unsigned int num; + struct clk_hw *hws[]; +}; + +extern struct of_device_id __clk_of_table; + +#define CLK_OF_DECLARE(name, compat, fn) OF_DECLARE_1(clk, name, compat, fn) + +/* + * Use this macro when you have a driver that requires two initialization + * routines, one at of_clk_init(), and one at platform device probe + */ +#define CLK_OF_DECLARE_DRIVER(name, compat, fn) \ + static void __init name##_of_clk_init_driver(struct device_node *np) \ + { \ + of_node_clear_flag(np, OF_POPULATED); \ + fn(np); \ + } \ + OF_DECLARE_1(clk, name, compat, name##_of_clk_init_driver) + +#define CLK_HW_INIT(_name, _parent, _ops, _flags) \ + (&(struct clk_init_data) { \ + .flags = _flags, \ + .name = _name, \ + .parent_names = (const char *[]) { _parent }, \ + .num_parents = 1, \ + .ops = _ops, \ + }) + +#define CLK_HW_INIT_PARENTS(_name, _parents, _ops, _flags) \ + (&(struct clk_init_data) { \ + .flags = _flags, \ + .name = _name, \ + .parent_names = _parents, \ + .num_parents = ARRAY_SIZE(_parents), \ + .ops = _ops, \ + }) + +#define CLK_HW_INIT_NO_PARENT(_name, _ops, _flags) \ + (&(struct clk_init_data) { \ + .flags = _flags, \ + .name = _name, \ + .parent_names = NULL, \ + .num_parents = 0, \ + .ops = _ops, \ + }) + +#define CLK_FIXED_FACTOR(_struct, _name, _parent, \ + _div, _mult, _flags) \ + struct clk_fixed_factor _struct = { \ + .div = _div, \ + .mult = _mult, \ + .hw.init = CLK_HW_INIT(_name, \ + _parent, \ + &clk_fixed_factor_ops, \ + _flags), \ + } + +#ifdef CONFIG_OF +int of_clk_add_provider(struct device_node *np, + struct clk *(*clk_src_get)(struct of_phandle_args *args, + void *data), + void *data); +int of_clk_add_hw_provider(struct device_node *np, + struct clk_hw *(*get)(struct of_phandle_args *clkspec, + void *data), + void *data); +int devm_of_clk_add_hw_provider(struct device *dev, + struct clk_hw *(*get)(struct of_phandle_args *clkspec, + void *data), + void *data); +void of_clk_del_provider(struct device_node *np); +void devm_of_clk_del_provider(struct device *dev); +struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec, + void *data); +struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, + void *data); +struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data); +struct clk_hw *of_clk_hw_onecell_get(struct of_phandle_args *clkspec, + void *data); +int of_clk_parent_fill(struct device_node *np, const char **parents, + unsigned int size); +int of_clk_detect_critical(struct device_node *np, int index, + unsigned long *flags); + +#else /* !CONFIG_OF */ + +static inline int of_clk_add_provider(struct device_node *np, + struct clk *(*clk_src_get)(struct of_phandle_args *args, + void *data), + void *data) +{ + return 0; +} +static inline int of_clk_add_hw_provider(struct device_node *np, + struct clk_hw *(*get)(struct of_phandle_args *clkspec, + void *data), + void *data) +{ + return 0; +} +static inline int devm_of_clk_add_hw_provider(struct device *dev, + struct clk_hw *(*get)(struct of_phandle_args *clkspec, + void *data), + void *data) +{ + return 0; +} +static inline void of_clk_del_provider(struct device_node *np) {} +static inline void devm_of_clk_del_provider(struct device *dev) {} +static inline struct clk *of_clk_src_simple_get( + struct of_phandle_args *clkspec, void *data) +{ + return ERR_PTR(-ENOENT); +} +static inline struct clk_hw * +of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data) +{ + return ERR_PTR(-ENOENT); +} +static inline struct clk *of_clk_src_onecell_get( + struct of_phandle_args *clkspec, void *data) +{ + return ERR_PTR(-ENOENT); +} +static inline struct clk_hw * +of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data) +{ + return ERR_PTR(-ENOENT); +} +static inline int of_clk_parent_fill(struct device_node *np, + const char **parents, unsigned int size) +{ + return 0; +} +static inline int of_clk_detect_critical(struct device_node *np, int index, + unsigned long *flags) +{ + return 0; +} +#endif /* CONFIG_OF */ + +/* + * wrap access to peripherals in accessor routines + * for improved portability across platforms + */ + +#if IS_ENABLED(CONFIG_PPC) + +static inline u32 clk_readl(u32 __iomem *reg) +{ + return ioread32be(reg); +} + +static inline void clk_writel(u32 val, u32 __iomem *reg) +{ + iowrite32be(val, reg); +} + +#else /* platform dependent I/O accessors */ + +static inline u32 clk_readl(u32 __iomem *reg) +{ + return readl(reg); +} + +static inline void clk_writel(u32 val, u32 __iomem *reg) +{ + writel(val, reg); +} + +#endif /* platform dependent I/O accessors */ + +#endif /* CONFIG_COMMON_CLK */ +#endif /* CLK_PROVIDER_H */ diff --git a/include/linux/clk.h b/include/linux/clk.h new file mode 100644 index 000000000..4f750c481 --- /dev/null +++ b/include/linux/clk.h @@ -0,0 +1,797 @@ +/* + * linux/include/linux/clk.h + * + * Copyright (C) 2004 ARM Limited. + * Written by Deep Blue Solutions Limited. + * Copyright (C) 2011-2012 Linaro Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __LINUX_CLK_H +#define __LINUX_CLK_H + +#include +#include +#include + +struct device; +struct clk; +struct device_node; +struct of_phandle_args; + +/** + * DOC: clk notifier callback types + * + * PRE_RATE_CHANGE - called immediately before the clk rate is changed, + * to indicate that the rate change will proceed. Drivers must + * immediately terminate any operations that will be affected by the + * rate change. Callbacks may either return NOTIFY_DONE, NOTIFY_OK, + * NOTIFY_STOP or NOTIFY_BAD. + * + * ABORT_RATE_CHANGE: called if the rate change failed for some reason + * after PRE_RATE_CHANGE. In this case, all registered notifiers on + * the clk will be called with ABORT_RATE_CHANGE. Callbacks must + * always return NOTIFY_DONE or NOTIFY_OK. + * + * POST_RATE_CHANGE - called after the clk rate change has successfully + * completed. Callbacks must always return NOTIFY_DONE or NOTIFY_OK. + * + */ +#define PRE_RATE_CHANGE BIT(0) +#define POST_RATE_CHANGE BIT(1) +#define ABORT_RATE_CHANGE BIT(2) + +/** + * struct clk_notifier - associate a clk with a notifier + * @clk: struct clk * to associate the notifier with + * @notifier_head: a blocking_notifier_head for this clk + * @node: linked list pointers + * + * A list of struct clk_notifier is maintained by the notifier code. + * An entry is created whenever code registers the first notifier on a + * particular @clk. Future notifiers on that @clk are added to the + * @notifier_head. + */ +struct clk_notifier { + struct clk *clk; + struct srcu_notifier_head notifier_head; + struct list_head node; +}; + +/** + * struct clk_notifier_data - rate data to pass to the notifier callback + * @clk: struct clk * being changed + * @old_rate: previous rate of this clk + * @new_rate: new rate of this clk + * + * For a pre-notifier, old_rate is the clk's rate before this rate + * change, and new_rate is what the rate will be in the future. For a + * post-notifier, old_rate and new_rate are both set to the clk's + * current rate (this was done to optimize the implementation). + */ +struct clk_notifier_data { + struct clk *clk; + unsigned long old_rate; + unsigned long new_rate; +}; + +/** + * struct clk_bulk_data - Data used for bulk clk operations. + * + * @id: clock consumer ID + * @clk: struct clk * to store the associated clock + * + * The CLK APIs provide a series of clk_bulk_() API calls as + * a convenience to consumers which require multiple clks. This + * structure is used to manage data for these calls. + */ +struct clk_bulk_data { + const char *id; + struct clk *clk; +}; + +#ifdef CONFIG_COMMON_CLK + +/** + * clk_notifier_register: register a clock rate-change notifier callback + * @clk: clock whose rate we are interested in + * @nb: notifier block with callback function pointer + * + * ProTip: debugging across notifier chains can be frustrating. Make sure that + * your notifier callback function prints a nice big warning in case of + * failure. + */ +int clk_notifier_register(struct clk *clk, struct notifier_block *nb); + +/** + * clk_notifier_unregister: unregister a clock rate-change notifier callback + * @clk: clock whose rate we are no longer interested in + * @nb: notifier block which will be unregistered + */ +int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb); + +/** + * clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion) + * for a clock source. + * @clk: clock source + * + * This gets the clock source accuracy expressed in ppb. + * A perfect clock returns 0. + */ +long clk_get_accuracy(struct clk *clk); + +/** + * clk_set_phase - adjust the phase shift of a clock signal + * @clk: clock signal source + * @degrees: number of degrees the signal is shifted + * + * Shifts the phase of a clock signal by the specified degrees. Returns 0 on + * success, -EERROR otherwise. + */ +int clk_set_phase(struct clk *clk, int degrees); + +/** + * clk_get_phase - return the phase shift of a clock signal + * @clk: clock signal source + * + * Returns the phase shift of a clock node in degrees, otherwise returns + * -EERROR. + */ +int clk_get_phase(struct clk *clk); + +/** + * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal + * @clk: clock signal source + * @num: numerator of the duty cycle ratio to be applied + * @den: denominator of the duty cycle ratio to be applied + * + * Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on + * success, -EERROR otherwise. + */ +int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den); + +/** + * clk_get_duty_cycle - return the duty cycle ratio of a clock signal + * @clk: clock signal source + * @scale: scaling factor to be applied to represent the ratio as an integer + * + * Returns the duty cycle ratio multiplied by the scale provided, otherwise + * returns -EERROR. + */ +int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale); + +/** + * clk_is_match - check if two clk's point to the same hardware clock + * @p: clk compared against q + * @q: clk compared against p + * + * Returns true if the two struct clk pointers both point to the same hardware + * clock node. Put differently, returns true if @p and @q + * share the same &struct clk_core object. + * + * Returns false otherwise. Note that two NULL clks are treated as matching. + */ +bool clk_is_match(const struct clk *p, const struct clk *q); + +#else + +static inline int clk_notifier_register(struct clk *clk, + struct notifier_block *nb) +{ + return -ENOTSUPP; +} + +static inline int clk_notifier_unregister(struct clk *clk, + struct notifier_block *nb) +{ + return -ENOTSUPP; +} + +static inline long clk_get_accuracy(struct clk *clk) +{ + return -ENOTSUPP; +} + +static inline long clk_set_phase(struct clk *clk, int phase) +{ + return -ENOTSUPP; +} + +static inline long clk_get_phase(struct clk *clk) +{ + return -ENOTSUPP; +} + +static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num, + unsigned int den) +{ + return -ENOTSUPP; +} + +static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk, + unsigned int scale) +{ + return 0; +} + +static inline bool clk_is_match(const struct clk *p, const struct clk *q) +{ + return p == q; +} + +#endif + +/** + * clk_prepare - prepare a clock source + * @clk: clock source + * + * This prepares the clock source for use. + * + * Must not be called from within atomic context. + */ +#ifdef CONFIG_HAVE_CLK_PREPARE +int clk_prepare(struct clk *clk); +int __must_check clk_bulk_prepare(int num_clks, + const struct clk_bulk_data *clks); +#else +static inline int clk_prepare(struct clk *clk) +{ + might_sleep(); + return 0; +} + +static inline int __must_check clk_bulk_prepare(int num_clks, struct clk_bulk_data *clks) +{ + might_sleep(); + return 0; +} +#endif + +/** + * clk_unprepare - undo preparation of a clock source + * @clk: clock source + * + * This undoes a previously prepared clock. The caller must balance + * the number of prepare and unprepare calls. + * + * Must not be called from within atomic context. + */ +#ifdef CONFIG_HAVE_CLK_PREPARE +void clk_unprepare(struct clk *clk); +void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks); +#else +static inline void clk_unprepare(struct clk *clk) +{ + might_sleep(); +} +static inline void clk_bulk_unprepare(int num_clks, struct clk_bulk_data *clks) +{ + might_sleep(); +} +#endif + +#ifdef CONFIG_HAVE_CLK +/** + * clk_get - lookup and obtain a reference to a clock producer. + * @dev: device for clock "consumer" + * @id: clock consumer ID + * + * Returns a struct clk corresponding to the clock producer, or + * valid IS_ERR() condition containing errno. The implementation + * uses @dev and @id to determine the clock consumer, and thereby + * the clock producer. (IOW, @id may be identical strings, but + * clk_get may return different clock producers depending on @dev.) + * + * Drivers must assume that the clock source is not enabled. + * + * clk_get should not be called from within interrupt context. + */ +struct clk *clk_get(struct device *dev, const char *id); + +/** + * clk_bulk_get - lookup and obtain a number of references to clock producer. + * @dev: device for clock "consumer" + * @num_clks: the number of clk_bulk_data + * @clks: the clk_bulk_data table of consumer + * + * This helper function allows drivers to get several clk consumers in one + * operation. If any of the clk cannot be acquired then any clks + * that were obtained will be freed before returning to the caller. + * + * Returns 0 if all clocks specified in clk_bulk_data table are obtained + * successfully, or valid IS_ERR() condition containing errno. + * The implementation uses @dev and @clk_bulk_data.id to determine the + * clock consumer, and thereby the clock producer. + * The clock returned is stored in each @clk_bulk_data.clk field. + * + * Drivers must assume that the clock source is not enabled. + * + * clk_bulk_get should not be called from within interrupt context. + */ +int __must_check clk_bulk_get(struct device *dev, int num_clks, + struct clk_bulk_data *clks); + +/** + * devm_clk_bulk_get - managed get multiple clk consumers + * @dev: device for clock "consumer" + * @num_clks: the number of clk_bulk_data + * @clks: the clk_bulk_data table of consumer + * + * Return 0 on success, an errno on failure. + * + * This helper function allows drivers to get several clk + * consumers in one operation with management, the clks will + * automatically be freed when the device is unbound. + */ +int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, + struct clk_bulk_data *clks); + +/** + * devm_clk_get - lookup and obtain a managed reference to a clock producer. + * @dev: device for clock "consumer" + * @id: clock consumer ID + * + * Returns a struct clk corresponding to the clock producer, or + * valid IS_ERR() condition containing errno. The implementation + * uses @dev and @id to determine the clock consumer, and thereby + * the clock producer. (IOW, @id may be identical strings, but + * clk_get may return different clock producers depending on @dev.) + * + * Drivers must assume that the clock source is not enabled. + * + * devm_clk_get should not be called from within interrupt context. + * + * The clock will automatically be freed when the device is unbound + * from the bus. + */ +struct clk *devm_clk_get(struct device *dev, const char *id); + +/** + * devm_get_clk_from_child - lookup and obtain a managed reference to a + * clock producer from child node. + * @dev: device for clock "consumer" + * @np: pointer to clock consumer node + * @con_id: clock consumer ID + * + * This function parses the clocks, and uses them to look up the + * struct clk from the registered list of clock providers by using + * @np and @con_id + * + * The clock will automatically be freed when the device is unbound + * from the bus. + */ +struct clk *devm_get_clk_from_child(struct device *dev, + struct device_node *np, const char *con_id); +/** + * clk_rate_exclusive_get - get exclusivity over the rate control of a + * producer + * @clk: clock source + * + * This function allows drivers to get exclusive control over the rate of a + * provider. It prevents any other consumer to execute, even indirectly, + * opereation which could alter the rate of the provider or cause glitches + * + * If exlusivity is claimed more than once on clock, even by the same driver, + * the rate effectively gets locked as exclusivity can't be preempted. + * + * Must not be called from within atomic context. + * + * Returns success (0) or negative errno. + */ +int clk_rate_exclusive_get(struct clk *clk); + +/** + * clk_rate_exclusive_put - release exclusivity over the rate control of a + * producer + * @clk: clock source + * + * This function allows drivers to release the exclusivity it previously got + * from clk_rate_exclusive_get() + * + * The caller must balance the number of clk_rate_exclusive_get() and + * clk_rate_exclusive_put() calls. + * + * Must not be called from within atomic context. + */ +void clk_rate_exclusive_put(struct clk *clk); + +/** + * clk_enable - inform the system when the clock source should be running. + * @clk: clock source + * + * If the clock can not be enabled/disabled, this should return success. + * + * May be called from atomic contexts. + * + * Returns success (0) or negative errno. + */ +int clk_enable(struct clk *clk); + +/** + * clk_bulk_enable - inform the system when the set of clks should be running. + * @num_clks: the number of clk_bulk_data + * @clks: the clk_bulk_data table of consumer + * + * May be called from atomic contexts. + * + * Returns success (0) or negative errno. + */ +int __must_check clk_bulk_enable(int num_clks, + const struct clk_bulk_data *clks); + +/** + * clk_disable - inform the system when the clock source is no longer required. + * @clk: clock source + * + * Inform the system that a clock source is no longer required by + * a driver and may be shut down. + * + * May be called from atomic contexts. + * + * Implementation detail: if the clock source is shared between + * multiple drivers, clk_enable() calls must be balanced by the + * same number of clk_disable() calls for the clock source to be + * disabled. + */ +void clk_disable(struct clk *clk); + +/** + * clk_bulk_disable - inform the system when the set of clks is no + * longer required. + * @num_clks: the number of clk_bulk_data + * @clks: the clk_bulk_data table of consumer + * + * Inform the system that a set of clks is no longer required by + * a driver and may be shut down. + * + * May be called from atomic contexts. + * + * Implementation detail: if the set of clks is shared between + * multiple drivers, clk_bulk_enable() calls must be balanced by the + * same number of clk_bulk_disable() calls for the clock source to be + * disabled. + */ +void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks); + +/** + * clk_get_rate - obtain the current clock rate (in Hz) for a clock source. + * This is only valid once the clock source has been enabled. + * @clk: clock source + */ +unsigned long clk_get_rate(struct clk *clk); + +/** + * clk_put - "free" the clock source + * @clk: clock source + * + * Note: drivers must ensure that all clk_enable calls made on this + * clock source are balanced by clk_disable calls prior to calling + * this function. + * + * clk_put should not be called from within interrupt context. + */ +void clk_put(struct clk *clk); + +/** + * clk_bulk_put - "free" the clock source + * @num_clks: the number of clk_bulk_data + * @clks: the clk_bulk_data table of consumer + * + * Note: drivers must ensure that all clk_bulk_enable calls made on this + * clock source are balanced by clk_bulk_disable calls prior to calling + * this function. + * + * clk_bulk_put should not be called from within interrupt context. + */ +void clk_bulk_put(int num_clks, struct clk_bulk_data *clks); + +/** + * devm_clk_put - "free" a managed clock source + * @dev: device used to acquire the clock + * @clk: clock source acquired with devm_clk_get() + * + * Note: drivers must ensure that all clk_enable calls made on this + * clock source are balanced by clk_disable calls prior to calling + * this function. + * + * clk_put should not be called from within interrupt context. + */ +void devm_clk_put(struct device *dev, struct clk *clk); + +/* + * The remaining APIs are optional for machine class support. + */ + + +/** + * clk_round_rate - adjust a rate to the exact rate a clock can provide + * @clk: clock source + * @rate: desired clock rate in Hz + * + * This answers the question "if I were to pass @rate to clk_set_rate(), + * what clock rate would I end up with?" without changing the hardware + * in any way. In other words: + * + * rate = clk_round_rate(clk, r); + * + * and: + * + * clk_set_rate(clk, r); + * rate = clk_get_rate(clk); + * + * are equivalent except the former does not modify the clock hardware + * in any way. + * + * Returns rounded clock rate in Hz, or negative errno. + */ +long clk_round_rate(struct clk *clk, unsigned long rate); + +/** + * clk_set_rate - set the clock rate for a clock source + * @clk: clock source + * @rate: desired clock rate in Hz + * + * Returns success (0) or negative errno. + */ +int clk_set_rate(struct clk *clk, unsigned long rate); + +/** + * clk_set_rate_exclusive- set the clock rate and claim exclusivity over + * clock source + * @clk: clock source + * @rate: desired clock rate in Hz + * + * This helper function allows drivers to atomically set the rate of a producer + * and claim exclusivity over the rate control of the producer. + * + * It is essentially a combination of clk_set_rate() and + * clk_rate_exclusite_get(). Caller must balance this call with a call to + * clk_rate_exclusive_put() + * + * Returns success (0) or negative errno. + */ +int clk_set_rate_exclusive(struct clk *clk, unsigned long rate); + +/** + * clk_has_parent - check if a clock is a possible parent for another + * @clk: clock source + * @parent: parent clock source + * + * This function can be used in drivers that need to check that a clock can be + * the parent of another without actually changing the parent. + * + * Returns true if @parent is a possible parent for @clk, false otherwise. + */ +bool clk_has_parent(struct clk *clk, struct clk *parent); + +/** + * clk_set_rate_range - set a rate range for a clock source + * @clk: clock source + * @min: desired minimum clock rate in Hz, inclusive + * @max: desired maximum clock rate in Hz, inclusive + * + * Returns success (0) or negative errno. + */ +int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max); + +/** + * clk_set_min_rate - set a minimum clock rate for a clock source + * @clk: clock source + * @rate: desired minimum clock rate in Hz, inclusive + * + * Returns success (0) or negative errno. + */ +int clk_set_min_rate(struct clk *clk, unsigned long rate); + +/** + * clk_set_max_rate - set a maximum clock rate for a clock source + * @clk: clock source + * @rate: desired maximum clock rate in Hz, inclusive + * + * Returns success (0) or negative errno. + */ +int clk_set_max_rate(struct clk *clk, unsigned long rate); + +/** + * clk_set_parent - set the parent clock source for this clock + * @clk: clock source + * @parent: parent clock source + * + * Returns success (0) or negative errno. + */ +int clk_set_parent(struct clk *clk, struct clk *parent); + +/** + * clk_get_parent - get the parent clock source for this clock + * @clk: clock source + * + * Returns struct clk corresponding to parent clock source, or + * valid IS_ERR() condition containing errno. + */ +struct clk *clk_get_parent(struct clk *clk); + +/** + * clk_get_sys - get a clock based upon the device name + * @dev_id: device name + * @con_id: connection ID + * + * Returns a struct clk corresponding to the clock producer, or + * valid IS_ERR() condition containing errno. The implementation + * uses @dev_id and @con_id to determine the clock consumer, and + * thereby the clock producer. In contrast to clk_get() this function + * takes the device name instead of the device itself for identification. + * + * Drivers must assume that the clock source is not enabled. + * + * clk_get_sys should not be called from within interrupt context. + */ +struct clk *clk_get_sys(const char *dev_id, const char *con_id); + +#else /* !CONFIG_HAVE_CLK */ + +static inline struct clk *clk_get(struct device *dev, const char *id) +{ + return NULL; +} + +static inline int __must_check clk_bulk_get(struct device *dev, int num_clks, + struct clk_bulk_data *clks) +{ + return 0; +} + +static inline struct clk *devm_clk_get(struct device *dev, const char *id) +{ + return NULL; +} + +static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, + struct clk_bulk_data *clks) +{ + return 0; +} + +static inline struct clk *devm_get_clk_from_child(struct device *dev, + struct device_node *np, const char *con_id) +{ + return NULL; +} + +static inline void clk_put(struct clk *clk) {} + +static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {} + +static inline void devm_clk_put(struct device *dev, struct clk *clk) {} + + +static inline int clk_rate_exclusive_get(struct clk *clk) +{ + return 0; +} + +static inline void clk_rate_exclusive_put(struct clk *clk) {} + +static inline int clk_enable(struct clk *clk) +{ + return 0; +} + +static inline int __must_check clk_bulk_enable(int num_clks, struct clk_bulk_data *clks) +{ + return 0; +} + +static inline void clk_disable(struct clk *clk) {} + + +static inline void clk_bulk_disable(int num_clks, + struct clk_bulk_data *clks) {} + +static inline unsigned long clk_get_rate(struct clk *clk) +{ + return 0; +} + +static inline int clk_set_rate(struct clk *clk, unsigned long rate) +{ + return 0; +} + +static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) +{ + return 0; +} + +static inline long clk_round_rate(struct clk *clk, unsigned long rate) +{ + return 0; +} + +static inline bool clk_has_parent(struct clk *clk, struct clk *parent) +{ + return true; +} + +static inline int clk_set_parent(struct clk *clk, struct clk *parent) +{ + return 0; +} + +static inline struct clk *clk_get_parent(struct clk *clk) +{ + return NULL; +} + +static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id) +{ + return NULL; +} +#endif + +/* clk_prepare_enable helps cases using clk_enable in non-atomic context. */ +static inline int clk_prepare_enable(struct clk *clk) +{ + int ret; + + ret = clk_prepare(clk); + if (ret) + return ret; + ret = clk_enable(clk); + if (ret) + clk_unprepare(clk); + + return ret; +} + +/* clk_disable_unprepare helps cases using clk_disable in non-atomic context. */ +static inline void clk_disable_unprepare(struct clk *clk) +{ + clk_disable(clk); + clk_unprepare(clk); +} + +static inline int __must_check clk_bulk_prepare_enable(int num_clks, + struct clk_bulk_data *clks) +{ + int ret; + + ret = clk_bulk_prepare(num_clks, clks); + if (ret) + return ret; + ret = clk_bulk_enable(num_clks, clks); + if (ret) + clk_bulk_unprepare(num_clks, clks); + + return ret; +} + +static inline void clk_bulk_disable_unprepare(int num_clks, + struct clk_bulk_data *clks) +{ + clk_bulk_disable(num_clks, clks); + clk_bulk_unprepare(num_clks, clks); +} + +#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) +struct clk *of_clk_get(struct device_node *np, int index); +struct clk *of_clk_get_by_name(struct device_node *np, const char *name); +struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec); +#else +static inline struct clk *of_clk_get(struct device_node *np, int index) +{ + return ERR_PTR(-ENOENT); +} +static inline struct clk *of_clk_get_by_name(struct device_node *np, + const char *name) +{ + return ERR_PTR(-ENOENT); +} +static inline struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) +{ + return ERR_PTR(-ENOENT); +} +#endif + +#endif diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h new file mode 100644 index 000000000..931ab05f7 --- /dev/null +++ b/include/linux/clk/at91_pmc.h @@ -0,0 +1,228 @@ +/* + * include/linux/clk/at91_pmc.h + * + * Copyright (C) 2005 Ivan Kokshaysky + * Copyright (C) SAN People + * + * Power Management Controller (PMC) - System peripherals registers. + * Based on AT91RM9200 datasheet revision E. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef AT91_PMC_H +#define AT91_PMC_H + +#define AT91_PMC_SCER 0x00 /* System Clock Enable Register */ +#define AT91_PMC_SCDR 0x04 /* System Clock Disable Register */ + +#define AT91_PMC_SCSR 0x08 /* System Clock Status Register */ +#define AT91_PMC_PCK (1 << 0) /* Processor Clock */ +#define AT91RM9200_PMC_UDP (1 << 1) /* USB Devcice Port Clock [AT91RM9200 only] */ +#define AT91RM9200_PMC_MCKUDP (1 << 2) /* USB Device Port Master Clock Automatic Disable on Suspend [AT91RM9200 only] */ +#define AT91RM9200_PMC_UHP (1 << 4) /* USB Host Port Clock [AT91RM9200 only] */ +#define AT91SAM926x_PMC_UHP (1 << 6) /* USB Host Port Clock [AT91SAM926x only] */ +#define AT91SAM926x_PMC_UDP (1 << 7) /* USB Devcice Port Clock [AT91SAM926x only] */ +#define AT91_PMC_PCK0 (1 << 8) /* Programmable Clock 0 */ +#define AT91_PMC_PCK1 (1 << 9) /* Programmable Clock 1 */ +#define AT91_PMC_PCK2 (1 << 10) /* Programmable Clock 2 */ +#define AT91_PMC_PCK3 (1 << 11) /* Programmable Clock 3 */ +#define AT91_PMC_PCK4 (1 << 12) /* Programmable Clock 4 [AT572D940HF only] */ +#define AT91_PMC_HCK0 (1 << 16) /* AHB Clock (USB host) [AT91SAM9261 only] */ +#define AT91_PMC_HCK1 (1 << 17) /* AHB Clock (LCD) [AT91SAM9261 only] */ + +#define AT91_PMC_PCER 0x10 /* Peripheral Clock Enable Register */ +#define AT91_PMC_PCDR 0x14 /* Peripheral Clock Disable Register */ +#define AT91_PMC_PCSR 0x18 /* Peripheral Clock Status Register */ + +#define AT91_CKGR_UCKR 0x1C /* UTMI Clock Register [some SAM9] */ +#define AT91_PMC_UPLLEN (1 << 16) /* UTMI PLL Enable */ +#define AT91_PMC_UPLLCOUNT (0xf << 20) /* UTMI PLL Start-up Time */ +#define AT91_PMC_BIASEN (1 << 24) /* UTMI BIAS Enable */ +#define AT91_PMC_BIASCOUNT (0xf << 28) /* UTMI BIAS Start-up Time */ + +#define AT91_CKGR_MOR 0x20 /* Main Oscillator Register [not on SAM9RL] */ +#define AT91_PMC_MOSCEN (1 << 0) /* Main Oscillator Enable */ +#define AT91_PMC_OSCBYPASS (1 << 1) /* Oscillator Bypass */ +#define AT91_PMC_WAITMODE (1 << 2) /* Wait Mode Command */ +#define AT91_PMC_MOSCRCEN (1 << 3) /* Main On-Chip RC Oscillator Enable [some SAM9] */ +#define AT91_PMC_OSCOUNT (0xff << 8) /* Main Oscillator Start-up Time */ +#define AT91_PMC_KEY_MASK (0xff << 16) +#define AT91_PMC_KEY (0x37 << 16) /* MOR Writing Key */ +#define AT91_PMC_MOSCSEL (1 << 24) /* Main Oscillator Selection [some SAM9] */ +#define AT91_PMC_CFDEN (1 << 25) /* Clock Failure Detector Enable [some SAM9] */ + +#define AT91_CKGR_MCFR 0x24 /* Main Clock Frequency Register */ +#define AT91_PMC_MAINF (0xffff << 0) /* Main Clock Frequency */ +#define AT91_PMC_MAINRDY (1 << 16) /* Main Clock Ready */ + +#define AT91_CKGR_PLLAR 0x28 /* PLL A Register */ +#define AT91_CKGR_PLLBR 0x2c /* PLL B Register */ +#define AT91_PMC_DIV (0xff << 0) /* Divider */ +#define AT91_PMC_PLLCOUNT (0x3f << 8) /* PLL Counter */ +#define AT91_PMC_OUT (3 << 14) /* PLL Clock Frequency Range */ +#define AT91_PMC_MUL (0x7ff << 16) /* PLL Multiplier */ +#define AT91_PMC_MUL_GET(n) ((n) >> 16 & 0x7ff) +#define AT91_PMC3_MUL (0x7f << 18) /* PLL Multiplier [SAMA5 only] */ +#define AT91_PMC3_MUL_GET(n) ((n) >> 18 & 0x7f) +#define AT91_PMC_USBDIV (3 << 28) /* USB Divisor (PLLB only) */ +#define AT91_PMC_USBDIV_1 (0 << 28) +#define AT91_PMC_USBDIV_2 (1 << 28) +#define AT91_PMC_USBDIV_4 (2 << 28) +#define AT91_PMC_USB96M (1 << 28) /* Divider by 2 Enable (PLLB only) */ + +#define AT91_PMC_MCKR 0x30 /* Master Clock Register */ +#define AT91_PMC_CSS (3 << 0) /* Master Clock Selection */ +#define AT91_PMC_CSS_SLOW (0 << 0) +#define AT91_PMC_CSS_MAIN (1 << 0) +#define AT91_PMC_CSS_PLLA (2 << 0) +#define AT91_PMC_CSS_PLLB (3 << 0) +#define AT91_PMC_CSS_UPLL (3 << 0) /* [some SAM9 only] */ +#define PMC_PRES_OFFSET 2 +#define AT91_PMC_PRES (7 << PMC_PRES_OFFSET) /* Master Clock Prescaler */ +#define AT91_PMC_PRES_1 (0 << PMC_PRES_OFFSET) +#define AT91_PMC_PRES_2 (1 << PMC_PRES_OFFSET) +#define AT91_PMC_PRES_4 (2 << PMC_PRES_OFFSET) +#define AT91_PMC_PRES_8 (3 << PMC_PRES_OFFSET) +#define AT91_PMC_PRES_16 (4 << PMC_PRES_OFFSET) +#define AT91_PMC_PRES_32 (5 << PMC_PRES_OFFSET) +#define AT91_PMC_PRES_64 (6 << PMC_PRES_OFFSET) +#define PMC_ALT_PRES_OFFSET 4 +#define AT91_PMC_ALT_PRES (7 << PMC_ALT_PRES_OFFSET) /* Master Clock Prescaler [alternate location] */ +#define AT91_PMC_ALT_PRES_1 (0 << PMC_ALT_PRES_OFFSET) +#define AT91_PMC_ALT_PRES_2 (1 << PMC_ALT_PRES_OFFSET) +#define AT91_PMC_ALT_PRES_4 (2 << PMC_ALT_PRES_OFFSET) +#define AT91_PMC_ALT_PRES_8 (3 << PMC_ALT_PRES_OFFSET) +#define AT91_PMC_ALT_PRES_16 (4 << PMC_ALT_PRES_OFFSET) +#define AT91_PMC_ALT_PRES_32 (5 << PMC_ALT_PRES_OFFSET) +#define AT91_PMC_ALT_PRES_64 (6 << PMC_ALT_PRES_OFFSET) +#define AT91_PMC_MDIV (3 << 8) /* Master Clock Division */ +#define AT91RM9200_PMC_MDIV_1 (0 << 8) /* [AT91RM9200 only] */ +#define AT91RM9200_PMC_MDIV_2 (1 << 8) +#define AT91RM9200_PMC_MDIV_3 (2 << 8) +#define AT91RM9200_PMC_MDIV_4 (3 << 8) +#define AT91SAM9_PMC_MDIV_1 (0 << 8) /* [SAM9 only] */ +#define AT91SAM9_PMC_MDIV_2 (1 << 8) +#define AT91SAM9_PMC_MDIV_4 (2 << 8) +#define AT91SAM9_PMC_MDIV_6 (3 << 8) /* [some SAM9 only] */ +#define AT91SAM9_PMC_MDIV_3 (3 << 8) /* [some SAM9 only] */ +#define AT91_PMC_PDIV (1 << 12) /* Processor Clock Division [some SAM9 only] */ +#define AT91_PMC_PDIV_1 (0 << 12) +#define AT91_PMC_PDIV_2 (1 << 12) +#define AT91_PMC_PLLADIV2 (1 << 12) /* PLLA divisor by 2 [some SAM9 only] */ +#define AT91_PMC_PLLADIV2_OFF (0 << 12) +#define AT91_PMC_PLLADIV2_ON (1 << 12) +#define AT91_PMC_H32MXDIV BIT(24) + +#define AT91_PMC_USB 0x38 /* USB Clock Register [some SAM9 only] */ +#define AT91_PMC_USBS (0x1 << 0) /* USB OHCI Input clock selection */ +#define AT91_PMC_USBS_PLLA (0 << 0) +#define AT91_PMC_USBS_UPLL (1 << 0) +#define AT91_PMC_USBS_PLLB (1 << 0) /* [AT91SAMN12 only] */ +#define AT91_PMC_OHCIUSBDIV (0xF << 8) /* Divider for USB OHCI Clock */ +#define AT91_PMC_OHCIUSBDIV_1 (0x0 << 8) +#define AT91_PMC_OHCIUSBDIV_2 (0x1 << 8) + +#define AT91_PMC_SMD 0x3c /* Soft Modem Clock Register [some SAM9 only] */ +#define AT91_PMC_SMDS (0x1 << 0) /* SMD input clock selection */ +#define AT91_PMC_SMD_DIV (0x1f << 8) /* SMD input clock divider */ +#define AT91_PMC_SMDDIV(n) (((n) << 8) & AT91_PMC_SMD_DIV) + +#define AT91_PMC_PCKR(n) (0x40 + ((n) * 4)) /* Programmable Clock 0-N Registers */ +#define AT91_PMC_ALT_PCKR_CSS (0x7 << 0) /* Programmable Clock Source Selection [alternate length] */ +#define AT91_PMC_CSS_MASTER (4 << 0) /* [some SAM9 only] */ +#define AT91_PMC_CSSMCK (0x1 << 8) /* CSS or Master Clock Selection */ +#define AT91_PMC_CSSMCK_CSS (0 << 8) +#define AT91_PMC_CSSMCK_MCK (1 << 8) + +#define AT91_PMC_IER 0x60 /* Interrupt Enable Register */ +#define AT91_PMC_IDR 0x64 /* Interrupt Disable Register */ +#define AT91_PMC_SR 0x68 /* Status Register */ +#define AT91_PMC_MOSCS (1 << 0) /* MOSCS Flag */ +#define AT91_PMC_LOCKA (1 << 1) /* PLLA Lock */ +#define AT91_PMC_LOCKB (1 << 2) /* PLLB Lock */ +#define AT91_PMC_MCKRDY (1 << 3) /* Master Clock */ +#define AT91_PMC_LOCKU (1 << 6) /* UPLL Lock [some SAM9] */ +#define AT91_PMC_OSCSEL (1 << 7) /* Slow Oscillator Selection [some SAM9] */ +#define AT91_PMC_PCK0RDY (1 << 8) /* Programmable Clock 0 */ +#define AT91_PMC_PCK1RDY (1 << 9) /* Programmable Clock 1 */ +#define AT91_PMC_PCK2RDY (1 << 10) /* Programmable Clock 2 */ +#define AT91_PMC_PCK3RDY (1 << 11) /* Programmable Clock 3 */ +#define AT91_PMC_MOSCSELS (1 << 16) /* Main Oscillator Selection [some SAM9] */ +#define AT91_PMC_MOSCRCS (1 << 17) /* Main On-Chip RC [some SAM9] */ +#define AT91_PMC_CFDEV (1 << 18) /* Clock Failure Detector Event [some SAM9] */ +#define AT91_PMC_GCKRDY (1 << 24) /* Generated Clocks */ +#define AT91_PMC_IMR 0x6c /* Interrupt Mask Register */ + +#define AT91_PMC_FSMR 0x70 /* Fast Startup Mode Register */ +#define AT91_PMC_FSTT(n) BIT(n) +#define AT91_PMC_RTCAL BIT(17) /* RTC Alarm Enable */ +#define AT91_PMC_USBAL BIT(18) /* USB Resume Enable */ +#define AT91_PMC_SDMMC_CD BIT(19) /* SDMMC Card Detect Enable */ +#define AT91_PMC_LPM BIT(20) /* Low-power Mode */ +#define AT91_PMC_RXLP_MCE BIT(24) /* Backup UART Receive Enable */ +#define AT91_PMC_ACC_CE BIT(25) /* ACC Enable */ + +#define AT91_PMC_FSPR 0x74 /* Fast Startup Polarity Reg */ + +#define AT91_PMC_FS_INPUT_MASK 0x7ff + +#define AT91_PMC_PLLICPR 0x80 /* PLL Charge Pump Current Register */ + +#define AT91_PMC_PROT 0xe4 /* Write Protect Mode Register [some SAM9] */ +#define AT91_PMC_WPEN (0x1 << 0) /* Write Protect Enable */ +#define AT91_PMC_WPKEY (0xffffff << 8) /* Write Protect Key */ +#define AT91_PMC_PROTKEY (0x504d43 << 8) /* Activation Code */ + +#define AT91_PMC_WPSR 0xe8 /* Write Protect Status Register [some SAM9] */ +#define AT91_PMC_WPVS (0x1 << 0) /* Write Protect Violation Status */ +#define AT91_PMC_WPVSRC (0xffff << 8) /* Write Protect Violation Source */ + +#define AT91_PMC_PCER1 0x100 /* Peripheral Clock Enable Register 1 [SAMA5 only]*/ +#define AT91_PMC_PCDR1 0x104 /* Peripheral Clock Enable Register 1 */ +#define AT91_PMC_PCSR1 0x108 /* Peripheral Clock Enable Register 1 */ + +#define AT91_PMC_PCR 0x10c /* Peripheral Control Register [some SAM9 and SAMA5] */ +#define AT91_PMC_PCR_PID_MASK 0x3f +#define AT91_PMC_PCR_GCKCSS_OFFSET 8 +#define AT91_PMC_PCR_GCKCSS_MASK (0x7 << AT91_PMC_PCR_GCKCSS_OFFSET) +#define AT91_PMC_PCR_GCKCSS(n) ((n) << AT91_PMC_PCR_GCKCSS_OFFSET) /* GCK Clock Source Selection */ +#define AT91_PMC_PCR_CMD (0x1 << 12) /* Command (read=0, write=1) */ +#define AT91_PMC_PCR_DIV_OFFSET 16 +#define AT91_PMC_PCR_DIV_MASK (0x3 << AT91_PMC_PCR_DIV_OFFSET) +#define AT91_PMC_PCR_DIV(n) ((n) << AT91_PMC_PCR_DIV_OFFSET) /* Divisor Value */ +#define AT91_PMC_PCR_GCKDIV_OFFSET 20 +#define AT91_PMC_PCR_GCKDIV_MASK (0xff << AT91_PMC_PCR_GCKDIV_OFFSET) +#define AT91_PMC_PCR_GCKDIV(n) ((n) << AT91_PMC_PCR_GCKDIV_OFFSET) /* Generated Clock Divisor Value */ +#define AT91_PMC_PCR_EN (0x1 << 28) /* Enable */ +#define AT91_PMC_PCR_GCKEN (0x1 << 29) /* GCK Enable */ + +#define AT91_PMC_AUDIO_PLL0 0x14c +#define AT91_PMC_AUDIO_PLL_PLLEN (1 << 0) +#define AT91_PMC_AUDIO_PLL_PADEN (1 << 1) +#define AT91_PMC_AUDIO_PLL_PMCEN (1 << 2) +#define AT91_PMC_AUDIO_PLL_RESETN (1 << 3) +#define AT91_PMC_AUDIO_PLL_ND_OFFSET 8 +#define AT91_PMC_AUDIO_PLL_ND_MASK (0x7f << AT91_PMC_AUDIO_PLL_ND_OFFSET) +#define AT91_PMC_AUDIO_PLL_ND(n) ((n) << AT91_PMC_AUDIO_PLL_ND_OFFSET) +#define AT91_PMC_AUDIO_PLL_QDPMC_OFFSET 16 +#define AT91_PMC_AUDIO_PLL_QDPMC_MASK (0x7f << AT91_PMC_AUDIO_PLL_QDPMC_OFFSET) +#define AT91_PMC_AUDIO_PLL_QDPMC(n) ((n) << AT91_PMC_AUDIO_PLL_QDPMC_OFFSET) + +#define AT91_PMC_AUDIO_PLL1 0x150 +#define AT91_PMC_AUDIO_PLL_FRACR_MASK 0x3fffff +#define AT91_PMC_AUDIO_PLL_QDPAD_OFFSET 24 +#define AT91_PMC_AUDIO_PLL_QDPAD_MASK (0x7f << AT91_PMC_AUDIO_PLL_QDPAD_OFFSET) +#define AT91_PMC_AUDIO_PLL_QDPAD(n) ((n) << AT91_PMC_AUDIO_PLL_QDPAD_OFFSET) +#define AT91_PMC_AUDIO_PLL_QDPAD_DIV_OFFSET AT91_PMC_AUDIO_PLL_QDPAD_OFFSET +#define AT91_PMC_AUDIO_PLL_QDPAD_DIV_MASK (0x3 << AT91_PMC_AUDIO_PLL_QDPAD_DIV_OFFSET) +#define AT91_PMC_AUDIO_PLL_QDPAD_DIV(n) ((n) << AT91_PMC_AUDIO_PLL_QDPAD_DIV_OFFSET) +#define AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_OFFSET 26 +#define AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_MAX 0x1f +#define AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_MASK (AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_MAX << AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_OFFSET) +#define AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV(n) ((n) << AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_OFFSET) + +#endif diff --git a/include/linux/clk/clk-conf.h b/include/linux/clk/clk-conf.h new file mode 100644 index 000000000..e0c362363 --- /dev/null +++ b/include/linux/clk/clk-conf.h @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2014 Samsung Electronics Co., Ltd. + * Sylwester Nawrocki + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include + +struct device_node; + +#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) +int of_clk_set_defaults(struct device_node *node, bool clk_supplier); +#else +static inline int of_clk_set_defaults(struct device_node *node, + bool clk_supplier) +{ + return 0; +} +#endif diff --git a/include/linux/clk/davinci.h b/include/linux/clk/davinci.h new file mode 100644 index 000000000..8a7b5cd7e --- /dev/null +++ b/include/linux/clk/davinci.h @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Clock drivers for TI DaVinci PLL and PSC controllers + * + * Copyright (C) 2018 David Lechner + */ + +#ifndef __LINUX_CLK_DAVINCI_PLL_H___ +#define __LINUX_CLK_DAVINCI_PLL_H___ + +#include +#include + +/* function for registering clocks in early boot */ + +#ifdef CONFIG_ARCH_DAVINCI_DA830 +int da830_pll_init(struct device *dev, void __iomem *base, struct regmap *cfgchip); +#endif +#ifdef CONFIG_ARCH_DAVINCI_DA850 +int da850_pll0_init(struct device *dev, void __iomem *base, struct regmap *cfgchip); +#endif +#ifdef CONFIG_ARCH_DAVINCI_DM355 +int dm355_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip); +int dm355_psc_init(struct device *dev, void __iomem *base); +#endif +#ifdef CONFIG_ARCH_DAVINCI_DM365 +int dm365_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip); +int dm365_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip); +int dm365_psc_init(struct device *dev, void __iomem *base); +#endif +#ifdef CONFIG_ARCH_DAVINCI_DM644x +int dm644x_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip); +int dm644x_psc_init(struct device *dev, void __iomem *base); +#endif +#ifdef CONFIG_ARCH_DAVINCI_DM646x +int dm646x_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip); +int dm646x_psc_init(struct device *dev, void __iomem *base); +#endif + +#endif /* __LINUX_CLK_DAVINCI_PLL_H___ */ diff --git a/include/linux/clk/mmp.h b/include/linux/clk/mmp.h new file mode 100644 index 000000000..445130460 --- /dev/null +++ b/include/linux/clk/mmp.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __CLK_MMP_H +#define __CLK_MMP_H + +#include + +extern void pxa168_clk_init(phys_addr_t mpmu_phys, + phys_addr_t apmu_phys, + phys_addr_t apbc_phys); +extern void pxa910_clk_init(phys_addr_t mpmu_phys, + phys_addr_t apmu_phys, + phys_addr_t apbc_phys, + phys_addr_t apbcp_phys); +extern void mmp2_clk_init(phys_addr_t mpmu_phys, + phys_addr_t apmu_phys, + phys_addr_t apbc_phys); + +#endif diff --git a/include/linux/clk/mxs.h b/include/linux/clk/mxs.h new file mode 100644 index 000000000..5138a90e0 --- /dev/null +++ b/include/linux/clk/mxs.h @@ -0,0 +1,14 @@ +/* + * Copyright (C) 2013 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_CLK_MXS_H +#define __LINUX_CLK_MXS_H + +int mxs_saif_clkmux_select(unsigned int clkmux); + +#endif diff --git a/include/linux/clk/renesas.h b/include/linux/clk/renesas.h new file mode 100644 index 000000000..9ebf1f824 --- /dev/null +++ b/include/linux/clk/renesas.h @@ -0,0 +1,39 @@ +/* + * Copyright 2013 Ideas On Board SPRL + * Copyright 2013, 2014 Horms Solutions Ltd. + * + * Contact: Laurent Pinchart + * Contact: Simon Horman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __LINUX_CLK_RENESAS_H_ +#define __LINUX_CLK_RENESAS_H_ + +#include + +struct device; +struct device_node; +struct generic_pm_domain; + +void cpg_mstp_add_clk_domain(struct device_node *np); +#ifdef CONFIG_CLK_RENESAS_CPG_MSTP +int cpg_mstp_attach_dev(struct generic_pm_domain *unused, struct device *dev); +void cpg_mstp_detach_dev(struct generic_pm_domain *unused, struct device *dev); +#else +#define cpg_mstp_attach_dev NULL +#define cpg_mstp_detach_dev NULL +#endif + +#ifdef CONFIG_CLK_RENESAS_CPG_MSSR +int cpg_mssr_attach_dev(struct generic_pm_domain *unused, struct device *dev); +void cpg_mssr_detach_dev(struct generic_pm_domain *unused, struct device *dev); +#else +#define cpg_mssr_attach_dev NULL +#define cpg_mssr_detach_dev NULL +#endif +#endif diff --git a/include/linux/clk/sunxi-ng.h b/include/linux/clk/sunxi-ng.h new file mode 100644 index 000000000..990f760f7 --- /dev/null +++ b/include/linux/clk/sunxi-ng.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2017 Chen-Yu Tsai. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _LINUX_CLK_SUNXI_NG_H_ +#define _LINUX_CLK_SUNXI_NG_H_ + +#include + +#ifdef CONFIG_SUNXI_CCU +int sunxi_ccu_set_mmc_timing_mode(struct clk *clk, bool new_mode); +int sunxi_ccu_get_mmc_timing_mode(struct clk *clk); +#else +static inline int sunxi_ccu_set_mmc_timing_mode(struct clk *clk, + bool new_mode) +{ + return -ENOTSUPP; +} + +static inline int sunxi_ccu_get_mmc_timing_mode(struct clk *clk) +{ + return -ENOTSUPP; +} +#endif + +#endif diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h new file mode 100644 index 000000000..afb9edfa5 --- /dev/null +++ b/include/linux/clk/tegra.h @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __LINUX_CLK_TEGRA_H_ +#define __LINUX_CLK_TEGRA_H_ + +#include +#include + +/* + * Tegra CPU clock and reset control ops + * + * wait_for_reset: + * keep waiting until the CPU in reset state + * put_in_reset: + * put the CPU in reset state + * out_of_reset: + * release the CPU from reset state + * enable_clock: + * CPU clock un-gate + * disable_clock: + * CPU clock gate + * rail_off_ready: + * CPU is ready for rail off + * suspend: + * save the clock settings when CPU go into low-power state + * resume: + * restore the clock settings when CPU exit low-power state + */ +struct tegra_cpu_car_ops { + void (*wait_for_reset)(u32 cpu); + void (*put_in_reset)(u32 cpu); + void (*out_of_reset)(u32 cpu); + void (*enable_clock)(u32 cpu); + void (*disable_clock)(u32 cpu); +#ifdef CONFIG_PM_SLEEP + bool (*rail_off_ready)(void); + void (*suspend)(void); + void (*resume)(void); +#endif +}; + +extern struct tegra_cpu_car_ops *tegra_cpu_car_ops; + +static inline void tegra_wait_cpu_in_reset(u32 cpu) +{ + if (WARN_ON(!tegra_cpu_car_ops->wait_for_reset)) + return; + + tegra_cpu_car_ops->wait_for_reset(cpu); +} + +static inline void tegra_put_cpu_in_reset(u32 cpu) +{ + if (WARN_ON(!tegra_cpu_car_ops->put_in_reset)) + return; + + tegra_cpu_car_ops->put_in_reset(cpu); +} + +static inline void tegra_cpu_out_of_reset(u32 cpu) +{ + if (WARN_ON(!tegra_cpu_car_ops->out_of_reset)) + return; + + tegra_cpu_car_ops->out_of_reset(cpu); +} + +static inline void tegra_enable_cpu_clock(u32 cpu) +{ + if (WARN_ON(!tegra_cpu_car_ops->enable_clock)) + return; + + tegra_cpu_car_ops->enable_clock(cpu); +} + +static inline void tegra_disable_cpu_clock(u32 cpu) +{ + if (WARN_ON(!tegra_cpu_car_ops->disable_clock)) + return; + + tegra_cpu_car_ops->disable_clock(cpu); +} + +#ifdef CONFIG_PM_SLEEP +static inline bool tegra_cpu_rail_off_ready(void) +{ + if (WARN_ON(!tegra_cpu_car_ops->rail_off_ready)) + return false; + + return tegra_cpu_car_ops->rail_off_ready(); +} + +static inline void tegra_cpu_clock_suspend(void) +{ + if (WARN_ON(!tegra_cpu_car_ops->suspend)) + return; + + tegra_cpu_car_ops->suspend(); +} + +static inline void tegra_cpu_clock_resume(void) +{ + if (WARN_ON(!tegra_cpu_car_ops->resume)) + return; + + tegra_cpu_car_ops->resume(); +} +#endif + +extern void tegra210_xusb_pll_hw_control_enable(void); +extern void tegra210_xusb_pll_hw_sequence_start(void); +extern void tegra210_sata_pll_hw_control_enable(void); +extern void tegra210_sata_pll_hw_sequence_start(void); +extern void tegra210_set_sata_pll_seq_sw(bool state); +extern void tegra210_put_utmipll_in_iddq(void); +extern void tegra210_put_utmipll_out_iddq(void); +extern int tegra210_clk_handle_mbist_war(unsigned int id); + +#endif /* __LINUX_CLK_TEGRA_H_ */ diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h new file mode 100644 index 000000000..a8faa38b1 --- /dev/null +++ b/include/linux/clk/ti.h @@ -0,0 +1,312 @@ +/* + * TI clock drivers support + * + * Copyright (C) 2013 Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __LINUX_CLK_TI_H__ +#define __LINUX_CLK_TI_H__ + +#include +#include + +/** + * struct clk_omap_reg - OMAP register declaration + * @offset: offset from the master IP module base address + * @index: index of the master IP module + */ +struct clk_omap_reg { + void __iomem *ptr; + u16 offset; + u8 index; + u8 flags; +}; + +/** + * struct dpll_data - DPLL registers and integration data + * @mult_div1_reg: register containing the DPLL M and N bitfields + * @mult_mask: mask of the DPLL M bitfield in @mult_div1_reg + * @div1_mask: mask of the DPLL N bitfield in @mult_div1_reg + * @clk_bypass: struct clk_hw pointer to the clock's bypass clock input + * @clk_ref: struct clk_hw pointer to the clock's reference clock input + * @control_reg: register containing the DPLL mode bitfield + * @enable_mask: mask of the DPLL mode bitfield in @control_reg + * @last_rounded_rate: cache of the last rate result of omap2_dpll_round_rate() + * @last_rounded_m: cache of the last M result of omap2_dpll_round_rate() + * @last_rounded_m4xen: cache of the last M4X result of + * omap4_dpll_regm4xen_round_rate() + * @last_rounded_lpmode: cache of the last lpmode result of + * omap4_dpll_lpmode_recalc() + * @max_multiplier: maximum valid non-bypass multiplier value (actual) + * @last_rounded_n: cache of the last N result of omap2_dpll_round_rate() + * @min_divider: minimum valid non-bypass divider value (actual) + * @max_divider: maximum valid non-bypass divider value (actual) + * @max_rate: maximum clock rate for the DPLL + * @modes: possible values of @enable_mask + * @autoidle_reg: register containing the DPLL autoidle mode bitfield + * @idlest_reg: register containing the DPLL idle status bitfield + * @autoidle_mask: mask of the DPLL autoidle mode bitfield in @autoidle_reg + * @freqsel_mask: mask of the DPLL jitter correction bitfield in @control_reg + * @dcc_mask: mask of the DPLL DCC correction bitfield @mult_div1_reg + * @dcc_rate: rate atleast which DCC @dcc_mask must be set + * @idlest_mask: mask of the DPLL idle status bitfield in @idlest_reg + * @lpmode_mask: mask of the DPLL low-power mode bitfield in @control_reg + * @m4xen_mask: mask of the DPLL M4X multiplier bitfield in @control_reg + * @auto_recal_bit: bitshift of the driftguard enable bit in @control_reg + * @recal_en_bit: bitshift of the PRM_IRQENABLE_* bit for recalibration IRQs + * @recal_st_bit: bitshift of the PRM_IRQSTATUS_* bit for recalibration IRQs + * @flags: DPLL type/features (see below) + * + * Possible values for @flags: + * DPLL_J_TYPE: "J-type DPLL" (only some 36xx, 4xxx DPLLs) + * + * @freqsel_mask is only used on the OMAP34xx family and AM35xx. + * + * XXX Some DPLLs have multiple bypass inputs, so it's not technically + * correct to only have one @clk_bypass pointer. + * + * XXX The runtime-variable fields (@last_rounded_rate, @last_rounded_m, + * @last_rounded_n) should be separated from the runtime-fixed fields + * and placed into a different structure, so that the runtime-fixed data + * can be placed into read-only space. + */ +struct dpll_data { + struct clk_omap_reg mult_div1_reg; + u32 mult_mask; + u32 div1_mask; + struct clk_hw *clk_bypass; + struct clk_hw *clk_ref; + struct clk_omap_reg control_reg; + u32 enable_mask; + unsigned long last_rounded_rate; + u16 last_rounded_m; + u8 last_rounded_m4xen; + u8 last_rounded_lpmode; + u16 max_multiplier; + u8 last_rounded_n; + u8 min_divider; + u16 max_divider; + unsigned long max_rate; + u8 modes; + struct clk_omap_reg autoidle_reg; + struct clk_omap_reg idlest_reg; + u32 autoidle_mask; + u32 freqsel_mask; + u32 idlest_mask; + u32 dco_mask; + u32 sddiv_mask; + u32 dcc_mask; + unsigned long dcc_rate; + u32 lpmode_mask; + u32 m4xen_mask; + u8 auto_recal_bit; + u8 recal_en_bit; + u8 recal_st_bit; + u8 flags; +}; + +struct clk_hw_omap; + +/** + * struct clk_hw_omap_ops - OMAP clk ops + * @find_idlest: find idlest register information for a clock + * @find_companion: find companion clock register information for a clock, + * basically converts CM_ICLKEN* <-> CM_FCLKEN* + * @allow_idle: enables autoidle hardware functionality for a clock + * @deny_idle: prevent autoidle hardware functionality for a clock + */ +struct clk_hw_omap_ops { + void (*find_idlest)(struct clk_hw_omap *oclk, + struct clk_omap_reg *idlest_reg, + u8 *idlest_bit, u8 *idlest_val); + void (*find_companion)(struct clk_hw_omap *oclk, + struct clk_omap_reg *other_reg, + u8 *other_bit); + void (*allow_idle)(struct clk_hw_omap *oclk); + void (*deny_idle)(struct clk_hw_omap *oclk); +}; + +/** + * struct clk_hw_omap - OMAP struct clk + * @node: list_head connecting this clock into the full clock list + * @enable_reg: register to write to enable the clock (see @enable_bit) + * @enable_bit: bitshift to write to enable/disable the clock (see @enable_reg) + * @flags: see "struct clk.flags possibilities" above + * @clksel_reg: for clksel clks, register va containing src/divisor select + * @dpll_data: for DPLLs, pointer to struct dpll_data for this clock + * @clkdm_name: clockdomain name that this clock is contained in + * @clkdm: pointer to struct clockdomain, resolved from @clkdm_name at runtime + * @ops: clock ops for this clock + */ +struct clk_hw_omap { + struct clk_hw hw; + struct list_head node; + unsigned long fixed_rate; + u8 fixed_div; + struct clk_omap_reg enable_reg; + u8 enable_bit; + u8 flags; + struct clk_omap_reg clksel_reg; + struct dpll_data *dpll_data; + const char *clkdm_name; + struct clockdomain *clkdm; + const struct clk_hw_omap_ops *ops; +}; + +/* + * struct clk_hw_omap.flags possibilities + * + * XXX document the rest of the clock flags here + * + * ENABLE_REG_32BIT: (OMAP1 only) clock control register must be accessed + * with 32bit ops, by default OMAP1 uses 16bit ops. + * CLOCK_IDLE_CONTROL: (OMAP1 only) clock has autoidle support. + * CLOCK_NO_IDLE_PARENT: (OMAP1 only) when clock is enabled, its parent + * clock is put to no-idle mode. + * ENABLE_ON_INIT: Clock is enabled on init. + * INVERT_ENABLE: By default, clock enable bit behavior is '1' enable, '0' + * disable. This inverts the behavior making '0' enable and '1' disable. + * CLOCK_CLKOUTX2: (OMAP4 only) DPLL CLKOUT and CLKOUTX2 GATE_CTRL + * bits share the same register. This flag allows the + * omap4_dpllmx*() code to determine which GATE_CTRL bit field + * should be used. This is a temporary solution - a better approach + * would be to associate clock type-specific data with the clock, + * similar to the struct dpll_data approach. + */ +#define ENABLE_REG_32BIT (1 << 0) /* Use 32-bit access */ +#define CLOCK_IDLE_CONTROL (1 << 1) +#define CLOCK_NO_IDLE_PARENT (1 << 2) +#define ENABLE_ON_INIT (1 << 3) /* Enable upon framework init */ +#define INVERT_ENABLE (1 << 4) /* 0 enables, 1 disables */ +#define CLOCK_CLKOUTX2 (1 << 5) + +/* CM_CLKEN_PLL*.EN* bit values - not all are available for every DPLL */ +#define DPLL_LOW_POWER_STOP 0x1 +#define DPLL_LOW_POWER_BYPASS 0x5 +#define DPLL_LOCKED 0x7 + +/* DPLL Type and DCO Selection Flags */ +#define DPLL_J_TYPE 0x1 + +/* Static memmap indices */ +enum { + TI_CLKM_CM = 0, + TI_CLKM_CM2, + TI_CLKM_PRM, + TI_CLKM_SCRM, + TI_CLKM_CTRL, + TI_CLKM_CTRL_AUX, + TI_CLKM_PLLSS, + CLK_MAX_MEMMAPS +}; + +/** + * struct ti_clk_ll_ops - low-level ops for clocks + * @clk_readl: pointer to register read function + * @clk_writel: pointer to register write function + * @clk_rmw: pointer to register read-modify-write function + * @clkdm_clk_enable: pointer to clockdomain enable function + * @clkdm_clk_disable: pointer to clockdomain disable function + * @clkdm_lookup: pointer to clockdomain lookup function + * @cm_wait_module_ready: pointer to CM module wait ready function + * @cm_split_idlest_reg: pointer to CM module function to split idlest reg + * + * Low-level ops are generally used by the basic clock types (clk-gate, + * clk-mux, clk-divider etc.) to provide support for various low-level + * hadrware interfaces (direct MMIO, regmap etc.), and is initialized + * by board code. Low-level ops also contain some other platform specific + * operations not provided directly by clock drivers. + */ +struct ti_clk_ll_ops { + u32 (*clk_readl)(const struct clk_omap_reg *reg); + void (*clk_writel)(u32 val, const struct clk_omap_reg *reg); + void (*clk_rmw)(u32 val, u32 mask, const struct clk_omap_reg *reg); + int (*clkdm_clk_enable)(struct clockdomain *clkdm, struct clk *clk); + int (*clkdm_clk_disable)(struct clockdomain *clkdm, + struct clk *clk); + struct clockdomain * (*clkdm_lookup)(const char *name); + int (*cm_wait_module_ready)(u8 part, s16 prcm_mod, u16 idlest_reg, + u8 idlest_shift); + int (*cm_split_idlest_reg)(struct clk_omap_reg *idlest_reg, + s16 *prcm_inst, u8 *idlest_reg_id); +}; + +#define to_clk_hw_omap(_hw) container_of(_hw, struct clk_hw_omap, hw) + +int omap2_clk_disable_autoidle_all(void); +int omap2_clk_enable_autoidle_all(void); +int omap2_clk_allow_idle(struct clk *clk); +int omap2_clk_deny_idle(struct clk *clk); +unsigned long omap2_dpllcore_recalc(struct clk_hw *hw, + unsigned long parent_rate); +int omap2_reprogram_dpllcore(struct clk_hw *clk, unsigned long rate, + unsigned long parent_rate); +void omap2xxx_clkt_dpllcore_init(struct clk_hw *hw); +void omap2xxx_clkt_vps_init(void); +unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk); + +void ti_dt_clk_init_retry_clks(void); +void ti_dt_clockdomains_setup(void); +int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops); + +struct regmap; + +int omap2_clk_provider_init(struct device_node *parent, int index, + struct regmap *syscon, void __iomem *mem); +void omap2_clk_legacy_provider_init(int index, void __iomem *mem); + +int omap3430_dt_clk_init(void); +int omap3630_dt_clk_init(void); +int am35xx_dt_clk_init(void); +int dm814x_dt_clk_init(void); +int dm816x_dt_clk_init(void); +int omap4xxx_dt_clk_init(void); +int omap5xxx_dt_clk_init(void); +int dra7xx_dt_clk_init(void); +int am33xx_dt_clk_init(void); +int am43xx_dt_clk_init(void); +int omap2420_dt_clk_init(void); +int omap2430_dt_clk_init(void); + +struct ti_clk_features { + u32 flags; + long fint_min; + long fint_max; + long fint_band1_max; + long fint_band2_min; + u8 dpll_bypass_vals; + u8 cm_idlest_val; +}; + +#define TI_CLK_DPLL_HAS_FREQSEL BIT(0) +#define TI_CLK_DPLL4_DENY_REPROGRAM BIT(1) +#define TI_CLK_DISABLE_CLKDM_CONTROL BIT(2) +#define TI_CLK_ERRATA_I810 BIT(3) + +void ti_clk_setup_features(struct ti_clk_features *features); +const struct ti_clk_features *ti_clk_get_features(void); + +extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll; + +#ifdef CONFIG_ATAGS +int omap3430_clk_legacy_init(void); +int omap3430es1_clk_legacy_init(void); +int omap36xx_clk_legacy_init(void); +int am35xx_clk_legacy_init(void); +#else +static inline int omap3430_clk_legacy_init(void) { return -ENXIO; } +static inline int omap3430es1_clk_legacy_init(void) { return -ENXIO; } +static inline int omap36xx_clk_legacy_init(void) { return -ENXIO; } +static inline int am35xx_clk_legacy_init(void) { return -ENXIO; } +#endif + + +#endif diff --git a/include/linux/clk/zynq.h b/include/linux/clk/zynq.h new file mode 100644 index 000000000..7a5633b71 --- /dev/null +++ b/include/linux/clk/zynq.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2013 Xilinx Inc. + * Copyright (C) 2012 National Instruments + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_CLK_ZYNQ_H_ +#define __LINUX_CLK_ZYNQ_H_ + +#include + +void zynq_clock_init(void); + +struct clk *clk_register_zynq_pll(const char *name, const char *parent, + void __iomem *pll_ctrl, void __iomem *pll_status, u8 lock_index, + spinlock_t *lock); +#endif diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h new file mode 100644 index 000000000..4890ff033 --- /dev/null +++ b/include/linux/clkdev.h @@ -0,0 +1,55 @@ +/* + * include/linux/clkdev.h + * + * Copyright (C) 2008 Russell King. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Helper for the clk API to assist looking up a struct clk. + */ +#ifndef __CLKDEV_H +#define __CLKDEV_H + +#include + +struct clk; +struct clk_hw; +struct device; + +struct clk_lookup { + struct list_head node; + const char *dev_id; + const char *con_id; + struct clk *clk; + struct clk_hw *clk_hw; +}; + +#define CLKDEV_INIT(d, n, c) \ + { \ + .dev_id = d, \ + .con_id = n, \ + .clk = c, \ + } + +struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id, + const char *dev_fmt, ...) __printf(3, 4); +struct clk_lookup *clkdev_hw_alloc(struct clk_hw *hw, const char *con_id, + const char *dev_fmt, ...) __printf(3, 4); + +void clkdev_add(struct clk_lookup *cl); +void clkdev_drop(struct clk_lookup *cl); + +struct clk_lookup *clkdev_create(struct clk *clk, const char *con_id, + const char *dev_fmt, ...) __printf(3, 4); +struct clk_lookup *clkdev_hw_create(struct clk_hw *hw, const char *con_id, + const char *dev_fmt, ...) __printf(3, 4); + +void clkdev_add_table(struct clk_lookup *, size_t); +int clk_add_alias(const char *, const char *, const char *, struct device *); + +int clk_register_clkdev(struct clk *, const char *, const char *); +int clk_hw_register_clkdev(struct clk_hw *, const char *, const char *); + +#endif diff --git a/include/linux/clock_cooling.h b/include/linux/clock_cooling.h new file mode 100644 index 000000000..4d1019d56 --- /dev/null +++ b/include/linux/clock_cooling.h @@ -0,0 +1,65 @@ +/* + * linux/include/linux/clock_cooling.h + * + * Copyright (C) 2014 Eduardo Valentin + * + * Copyright (C) 2013 Texas Instruments Inc. + * Contact: Eduardo Valentin + * + * Highly based on cpu_cooling.c. + * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com) + * Copyright (C) 2012 Amit Daniel + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef __CPU_COOLING_H__ +#define __CPU_COOLING_H__ + +#include +#include +#include + +#ifdef CONFIG_CLOCK_THERMAL +/** + * clock_cooling_register - function to create clock cooling device. + * @dev: struct device pointer to the device used as clock cooling device. + * @clock_name: string containing the clock used as cooling mechanism. + */ +struct thermal_cooling_device * +clock_cooling_register(struct device *dev, const char *clock_name); + +/** + * clock_cooling_unregister - function to remove clock cooling device. + * @cdev: thermal cooling device pointer. + */ +void clock_cooling_unregister(struct thermal_cooling_device *cdev); + +unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev, + unsigned long freq); +#else /* !CONFIG_CLOCK_THERMAL */ +static inline struct thermal_cooling_device * +clock_cooling_register(struct device *dev, const char *clock_name) +{ + return NULL; +} +static inline +void clock_cooling_unregister(struct thermal_cooling_device *cdev) +{ +} +static inline +unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev, + unsigned long freq) +{ + return THERMAL_CSTATE_INVALID; +} +#endif /* CONFIG_CLOCK_THERMAL */ + +#endif /* __CPU_COOLING_H__ */ diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h new file mode 100644 index 000000000..8ae9a95eb --- /dev/null +++ b/include/linux/clockchips.h @@ -0,0 +1,227 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* linux/include/linux/clockchips.h + * + * This file contains the structure definitions for clockchips. + * + * If you are not a clockchip, or the time of day code, you should + * not be including this file! + */ +#ifndef _LINUX_CLOCKCHIPS_H +#define _LINUX_CLOCKCHIPS_H + +#ifdef CONFIG_GENERIC_CLOCKEVENTS + +# include +# include +# include +# include + +struct clock_event_device; +struct module; + +/* + * Possible states of a clock event device. + * + * DETACHED: Device is not used by clockevents core. Initial state or can be + * reached from SHUTDOWN. + * SHUTDOWN: Device is powered-off. Can be reached from PERIODIC or ONESHOT. + * PERIODIC: Device is programmed to generate events periodically. Can be + * reached from DETACHED or SHUTDOWN. + * ONESHOT: Device is programmed to generate event only once. Can be reached + * from DETACHED or SHUTDOWN. + * ONESHOT_STOPPED: Device was programmed in ONESHOT mode and is temporarily + * stopped. + */ +enum clock_event_state { + CLOCK_EVT_STATE_DETACHED, + CLOCK_EVT_STATE_SHUTDOWN, + CLOCK_EVT_STATE_PERIODIC, + CLOCK_EVT_STATE_ONESHOT, + CLOCK_EVT_STATE_ONESHOT_STOPPED, +}; + +/* + * Clock event features + */ +# define CLOCK_EVT_FEAT_PERIODIC 0x000001 +# define CLOCK_EVT_FEAT_ONESHOT 0x000002 +# define CLOCK_EVT_FEAT_KTIME 0x000004 + +/* + * x86(64) specific (mis)features: + * + * - Clockevent source stops in C3 State and needs broadcast support. + * - Local APIC timer is used as a dummy device. + */ +# define CLOCK_EVT_FEAT_C3STOP 0x000008 +# define CLOCK_EVT_FEAT_DUMMY 0x000010 + +/* + * Core shall set the interrupt affinity dynamically in broadcast mode + */ +# define CLOCK_EVT_FEAT_DYNIRQ 0x000020 +# define CLOCK_EVT_FEAT_PERCPU 0x000040 + +/* + * Clockevent device is based on a hrtimer for broadcast + */ +# define CLOCK_EVT_FEAT_HRTIMER 0x000080 + +/** + * struct clock_event_device - clock event device descriptor + * @event_handler: Assigned by the framework to be called by the low + * level handler of the event source + * @set_next_event: set next event function using a clocksource delta + * @set_next_ktime: set next event function using a direct ktime value + * @next_event: local storage for the next event in oneshot mode + * @max_delta_ns: maximum delta value in ns + * @min_delta_ns: minimum delta value in ns + * @mult: nanosecond to cycles multiplier + * @shift: nanoseconds to cycles divisor (power of two) + * @state_use_accessors:current state of the device, assigned by the core code + * @features: features + * @retries: number of forced programming retries + * @set_state_periodic: switch state to periodic + * @set_state_oneshot: switch state to oneshot + * @set_state_oneshot_stopped: switch state to oneshot_stopped + * @set_state_shutdown: switch state to shutdown + * @tick_resume: resume clkevt device + * @broadcast: function to broadcast events + * @min_delta_ticks: minimum delta value in ticks stored for reconfiguration + * @max_delta_ticks: maximum delta value in ticks stored for reconfiguration + * @name: ptr to clock event name + * @rating: variable to rate clock event devices + * @irq: IRQ number (only for non CPU local devices) + * @bound_on: Bound on CPU + * @cpumask: cpumask to indicate for which CPUs this device works + * @list: list head for the management code + * @owner: module reference + */ +struct clock_event_device { + void (*event_handler)(struct clock_event_device *); + int (*set_next_event)(unsigned long evt, struct clock_event_device *); + int (*set_next_ktime)(ktime_t expires, struct clock_event_device *); + ktime_t next_event; + u64 max_delta_ns; + u64 min_delta_ns; + u32 mult; + u32 shift; + enum clock_event_state state_use_accessors; + unsigned int features; + unsigned long retries; + + int (*set_state_periodic)(struct clock_event_device *); + int (*set_state_oneshot)(struct clock_event_device *); + int (*set_state_oneshot_stopped)(struct clock_event_device *); + int (*set_state_shutdown)(struct clock_event_device *); + int (*tick_resume)(struct clock_event_device *); + + void (*broadcast)(const struct cpumask *mask); + void (*suspend)(struct clock_event_device *); + void (*resume)(struct clock_event_device *); + unsigned long min_delta_ticks; + unsigned long max_delta_ticks; + + const char *name; + int rating; + int irq; + int bound_on; + const struct cpumask *cpumask; + struct list_head list; + struct module *owner; +} ____cacheline_aligned; + +/* Helpers to verify state of a clockevent device */ +static inline bool clockevent_state_detached(struct clock_event_device *dev) +{ + return dev->state_use_accessors == CLOCK_EVT_STATE_DETACHED; +} + +static inline bool clockevent_state_shutdown(struct clock_event_device *dev) +{ + return dev->state_use_accessors == CLOCK_EVT_STATE_SHUTDOWN; +} + +static inline bool clockevent_state_periodic(struct clock_event_device *dev) +{ + return dev->state_use_accessors == CLOCK_EVT_STATE_PERIODIC; +} + +static inline bool clockevent_state_oneshot(struct clock_event_device *dev) +{ + return dev->state_use_accessors == CLOCK_EVT_STATE_ONESHOT; +} + +static inline bool clockevent_state_oneshot_stopped(struct clock_event_device *dev) +{ + return dev->state_use_accessors == CLOCK_EVT_STATE_ONESHOT_STOPPED; +} + +/* + * Calculate a multiplication factor for scaled math, which is used to convert + * nanoseconds based values to clock ticks: + * + * clock_ticks = (nanoseconds * factor) >> shift. + * + * div_sc is the rearranged equation to calculate a factor from a given clock + * ticks / nanoseconds ratio: + * + * factor = (clock_ticks << shift) / nanoseconds + */ +static inline unsigned long +div_sc(unsigned long ticks, unsigned long nsec, int shift) +{ + u64 tmp = ((u64)ticks) << shift; + + do_div(tmp, nsec); + + return (unsigned long) tmp; +} + +/* Clock event layer functions */ +extern u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt); +extern void clockevents_register_device(struct clock_event_device *dev); +extern int clockevents_unbind_device(struct clock_event_device *ced, int cpu); + +extern void clockevents_config_and_register(struct clock_event_device *dev, + u32 freq, unsigned long min_delta, + unsigned long max_delta); + +extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq); + +static inline void +clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 maxsec) +{ + return clocks_calc_mult_shift(&ce->mult, &ce->shift, NSEC_PER_SEC, freq, maxsec); +} + +extern void clockevents_suspend(void); +extern void clockevents_resume(void); + +# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST +# ifdef CONFIG_ARCH_HAS_TICK_BROADCAST +extern void tick_broadcast(const struct cpumask *mask); +# else +# define tick_broadcast NULL +# endif +extern int tick_receive_broadcast(void); +# endif + +# if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) +extern void tick_setup_hrtimer_broadcast(void); +extern int tick_check_broadcast_expired(void); +# else +static inline int tick_check_broadcast_expired(void) { return 0; } +static inline void tick_setup_hrtimer_broadcast(void) { } +# endif + +#else /* !CONFIG_GENERIC_CLOCKEVENTS: */ + +static inline void clockevents_suspend(void) { } +static inline void clockevents_resume(void) { } +static inline int tick_check_broadcast_expired(void) { return 0; } +static inline void tick_setup_hrtimer_broadcast(void) { } + +#endif /* !CONFIG_GENERIC_CLOCKEVENTS */ + +#endif /* _LINUX_CLOCKCHIPS_H */ diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h new file mode 100644 index 000000000..308918928 --- /dev/null +++ b/include/linux/clocksource.h @@ -0,0 +1,272 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* linux/include/linux/clocksource.h + * + * This file contains the structure definitions for clocksources. + * + * If you are not a clocksource, or timekeeping code, you should + * not be including this file! + */ +#ifndef _LINUX_CLOCKSOURCE_H +#define _LINUX_CLOCKSOURCE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct clocksource; +struct module; + +#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA +#include +#endif + +/** + * struct clocksource - hardware abstraction for a free running counter + * Provides mostly state-free accessors to the underlying hardware. + * This is the structure used for system time. + * + * @name: ptr to clocksource name + * @list: list head for registration + * @rating: rating value for selection (higher is better) + * To avoid rating inflation the following + * list should give you a guide as to how + * to assign your clocksource a rating + * 1-99: Unfit for real use + * Only available for bootup and testing purposes. + * 100-199: Base level usability. + * Functional for real use, but not desired. + * 200-299: Good. + * A correct and usable clocksource. + * 300-399: Desired. + * A reasonably fast and accurate clocksource. + * 400-499: Perfect + * The ideal clocksource. A must-use where + * available. + * @read: returns a cycle value, passes clocksource as argument + * @enable: optional function to enable the clocksource + * @disable: optional function to disable the clocksource + * @mask: bitmask for two's complement + * subtraction of non 64 bit counters + * @mult: cycle to nanosecond multiplier + * @shift: cycle to nanosecond divisor (power of two) + * @max_idle_ns: max idle time permitted by the clocksource (nsecs) + * @maxadj: maximum adjustment value to mult (~11%) + * @max_cycles: maximum safe cycle value which won't overflow on multiplication + * @flags: flags describing special properties + * @archdata: arch-specific data + * @suspend: suspend function for the clocksource, if necessary + * @resume: resume function for the clocksource, if necessary + * @mark_unstable: Optional function to inform the clocksource driver that + * the watchdog marked the clocksource unstable + * @owner: module reference, must be set by clocksource in modules + * + * Note: This struct is not used in hotpathes of the timekeeping code + * because the timekeeper caches the hot path fields in its own data + * structure, so no line cache alignment is required, + * + * The pointer to the clocksource itself is handed to the read + * callback. If you need extra information there you can wrap struct + * clocksource into your own struct. Depending on the amount of + * information you need you should consider to cache line align that + * structure. + */ +struct clocksource { + u64 (*read)(struct clocksource *cs); + u64 mask; + u32 mult; + u32 shift; + u64 max_idle_ns; + u32 maxadj; +#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA + struct arch_clocksource_data archdata; +#endif + u64 max_cycles; + const char *name; + struct list_head list; + int rating; + int (*enable)(struct clocksource *cs); + void (*disable)(struct clocksource *cs); + unsigned long flags; + void (*suspend)(struct clocksource *cs); + void (*resume)(struct clocksource *cs); + void (*mark_unstable)(struct clocksource *cs); + void (*tick_stable)(struct clocksource *cs); + + /* private: */ +#ifdef CONFIG_CLOCKSOURCE_WATCHDOG + /* Watchdog related data, used by the framework */ + struct list_head wd_list; + u64 cs_last; + u64 wd_last; +#endif + struct module *owner; +}; + +/* + * Clock source flags bits:: + */ +#define CLOCK_SOURCE_IS_CONTINUOUS 0x01 +#define CLOCK_SOURCE_MUST_VERIFY 0x02 + +#define CLOCK_SOURCE_WATCHDOG 0x10 +#define CLOCK_SOURCE_VALID_FOR_HRES 0x20 +#define CLOCK_SOURCE_UNSTABLE 0x40 +#define CLOCK_SOURCE_SUSPEND_NONSTOP 0x80 +#define CLOCK_SOURCE_RESELECT 0x100 + +/* simplify initialization of mask field */ +#define CLOCKSOURCE_MASK(bits) GENMASK_ULL((bits) - 1, 0) + +static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from) +{ + /* freq = cyc/from + * mult/2^shift = ns/cyc + * mult = ns/cyc * 2^shift + * mult = from/freq * 2^shift + * mult = from * 2^shift / freq + * mult = (from<> shift; +} + + +extern int clocksource_unregister(struct clocksource*); +extern void clocksource_touch_watchdog(void); +extern void clocksource_change_rating(struct clocksource *cs, int rating); +extern void clocksource_suspend(void); +extern void clocksource_resume(void); +extern struct clocksource * __init clocksource_default_clock(void); +extern void clocksource_mark_unstable(struct clocksource *cs); +extern void +clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles); +extern u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 now); + +extern u64 +clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cycles); +extern void +clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec); + +/* + * Don't call __clocksource_register_scale directly, use + * clocksource_register_hz/khz + */ +extern int +__clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq); +extern void +__clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq); + +/* + * Don't call this unless you are a default clocksource + * (AKA: jiffies) and absolutely have to. + */ +static inline int __clocksource_register(struct clocksource *cs) +{ + return __clocksource_register_scale(cs, 1, 0); +} + +static inline int clocksource_register_hz(struct clocksource *cs, u32 hz) +{ + return __clocksource_register_scale(cs, 1, hz); +} + +static inline int clocksource_register_khz(struct clocksource *cs, u32 khz) +{ + return __clocksource_register_scale(cs, 1000, khz); +} + +static inline void __clocksource_update_freq_hz(struct clocksource *cs, u32 hz) +{ + __clocksource_update_freq_scale(cs, 1, hz); +} + +static inline void __clocksource_update_freq_khz(struct clocksource *cs, u32 khz) +{ + __clocksource_update_freq_scale(cs, 1000, khz); +} + + +extern int timekeeping_notify(struct clocksource *clock); + +extern u64 clocksource_mmio_readl_up(struct clocksource *); +extern u64 clocksource_mmio_readl_down(struct clocksource *); +extern u64 clocksource_mmio_readw_up(struct clocksource *); +extern u64 clocksource_mmio_readw_down(struct clocksource *); + +extern int clocksource_mmio_init(void __iomem *, const char *, + unsigned long, int, unsigned, u64 (*)(struct clocksource *)); + +extern int clocksource_i8253_init(void); + +#define TIMER_OF_DECLARE(name, compat, fn) \ + OF_DECLARE_1_RET(timer, name, compat, fn) + +#define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \ + TIMER_OF_DECLARE(name, compat, fn) + +#ifdef CONFIG_TIMER_PROBE +extern void timer_probe(void); +#else +static inline void timer_probe(void) {} +#endif + +#define TIMER_ACPI_DECLARE(name, table_id, fn) \ + ACPI_DECLARE_PROBE_ENTRY(timer, name, table_id, 0, NULL, 0, fn) + +#endif /* _LINUX_CLOCKSOURCE_H */ diff --git a/include/linux/cm4000_cs.h b/include/linux/cm4000_cs.h new file mode 100644 index 000000000..ea4958e07 --- /dev/null +++ b/include/linux/cm4000_cs.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CM4000_H_ +#define _CM4000_H_ + +#include + + +#define DEVICE_NAME "cmm" +#define MODULE_NAME "cm4000_cs" + +#endif /* _CM4000_H_ */ diff --git a/include/linux/cma.h b/include/linux/cma.h new file mode 100644 index 000000000..190184b5f --- /dev/null +++ b/include/linux/cma.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __CMA_H__ +#define __CMA_H__ + +#include +#include + +/* + * There is always at least global CMA area and a few optional + * areas configured in kernel .config. + */ +#ifdef CONFIG_CMA_AREAS +#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS) + +#else +#define MAX_CMA_AREAS (0) + +#endif + +struct cma; + +extern unsigned long totalcma_pages; +extern phys_addr_t cma_get_base(const struct cma *cma); +extern unsigned long cma_get_size(const struct cma *cma); +extern const char *cma_get_name(const struct cma *cma); + +extern int __init cma_declare_contiguous(phys_addr_t base, + phys_addr_t size, phys_addr_t limit, + phys_addr_t alignment, unsigned int order_per_bit, + bool fixed, const char *name, struct cma **res_cma); +extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, + unsigned int order_per_bit, + const char *name, + struct cma **res_cma); +extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, + bool no_warn); +extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count); + +extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data); +#endif diff --git a/include/linux/cmdline-parser.h b/include/linux/cmdline-parser.h new file mode 100644 index 000000000..68a541807 --- /dev/null +++ b/include/linux/cmdline-parser.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Parsing command line, get the partitions information. + * + * Written by Cai Zhiyong + * + */ +#ifndef CMDLINEPARSEH +#define CMDLINEPARSEH + +#include +#include +#include + +/* partition flags */ +#define PF_RDONLY 0x01 /* Device is read only */ +#define PF_POWERUP_LOCK 0x02 /* Always locked after reset */ + +struct cmdline_subpart { + char name[BDEVNAME_SIZE]; /* partition name, such as 'rootfs' */ + sector_t from; + sector_t size; + int flags; + struct cmdline_subpart *next_subpart; +}; + +struct cmdline_parts { + char name[BDEVNAME_SIZE]; /* block device, such as 'mmcblk0' */ + unsigned int nr_subparts; + struct cmdline_subpart *subpart; + struct cmdline_parts *next_parts; +}; + +void cmdline_parts_free(struct cmdline_parts **parts); + +int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline); + +struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts, + const char *bdev); + +int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size, + int slot, + int (*add_part)(int, struct cmdline_subpart *, void *), + void *param); + +#endif /* CMDLINEPARSEH */ diff --git a/include/linux/cn_proc.h b/include/linux/cn_proc.h new file mode 100644 index 000000000..1d5b02a96 --- /dev/null +++ b/include/linux/cn_proc.h @@ -0,0 +1,58 @@ +/* + * cn_proc.h - process events connector + * + * Copyright (C) Matt Helsley, IBM Corp. 2005 + * Based on cn_fork.h by Nguyen Anh Quynh and Guillaume Thouvenin + * Copyright (C) 2005 Nguyen Anh Quynh + * Copyright (C) 2005 Guillaume Thouvenin + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2.1 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + */ +#ifndef CN_PROC_H +#define CN_PROC_H + +#include + +#ifdef CONFIG_PROC_EVENTS +void proc_fork_connector(struct task_struct *task); +void proc_exec_connector(struct task_struct *task); +void proc_id_connector(struct task_struct *task, int which_id); +void proc_sid_connector(struct task_struct *task); +void proc_ptrace_connector(struct task_struct *task, int which_id); +void proc_comm_connector(struct task_struct *task); +void proc_coredump_connector(struct task_struct *task); +void proc_exit_connector(struct task_struct *task); +#else +static inline void proc_fork_connector(struct task_struct *task) +{} + +static inline void proc_exec_connector(struct task_struct *task) +{} + +static inline void proc_id_connector(struct task_struct *task, + int which_id) +{} + +static inline void proc_sid_connector(struct task_struct *task) +{} + +static inline void proc_comm_connector(struct task_struct *task) +{} + +static inline void proc_ptrace_connector(struct task_struct *task, + int ptrace_id) +{} + +static inline void proc_coredump_connector(struct task_struct *task) +{} + +static inline void proc_exit_connector(struct task_struct *task) +{} +#endif /* CONFIG_PROC_EVENTS */ +#endif /* CN_PROC_H */ diff --git a/include/linux/cnt32_to_63.h b/include/linux/cnt32_to_63.h new file mode 100644 index 000000000..aa629bce9 --- /dev/null +++ b/include/linux/cnt32_to_63.h @@ -0,0 +1,107 @@ +/* + * Extend a 32-bit counter to 63 bits + * + * Author: Nicolas Pitre + * Created: December 3, 2006 + * Copyright: MontaVista Software, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#ifndef __LINUX_CNT32_TO_63_H__ +#define __LINUX_CNT32_TO_63_H__ + +#include +#include +#include + +/* this is used only to give gcc a clue about good code generation */ +union cnt32_to_63 { + struct { +#if defined(__LITTLE_ENDIAN) + u32 lo, hi; +#elif defined(__BIG_ENDIAN) + u32 hi, lo; +#endif + }; + u64 val; +}; + + +/** + * cnt32_to_63 - Expand a 32-bit counter to a 63-bit counter + * @cnt_lo: The low part of the counter + * + * Many hardware clock counters are only 32 bits wide and therefore have + * a relatively short period making wrap-arounds rather frequent. This + * is a problem when implementing sched_clock() for example, where a 64-bit + * non-wrapping monotonic value is expected to be returned. + * + * To overcome that limitation, let's extend a 32-bit counter to 63 bits + * in a completely lock free fashion. Bits 0 to 31 of the clock are provided + * by the hardware while bits 32 to 62 are stored in memory. The top bit in + * memory is used to synchronize with the hardware clock half-period. When + * the top bit of both counters (hardware and in memory) differ then the + * memory is updated with a new value, incrementing it when the hardware + * counter wraps around. + * + * Because a word store in memory is atomic then the incremented value will + * always be in synch with the top bit indicating to any potential concurrent + * reader if the value in memory is up to date or not with regards to the + * needed increment. And any race in updating the value in memory is harmless + * as the same value would simply be stored more than once. + * + * The restrictions for the algorithm to work properly are: + * + * 1) this code must be called at least once per each half period of the + * 32-bit counter; + * + * 2) this code must not be preempted for a duration longer than the + * 32-bit counter half period minus the longest period between two + * calls to this code; + * + * Those requirements ensure proper update to the state bit in memory. + * This is usually not a problem in practice, but if it is then a kernel + * timer should be scheduled to manage for this code to be executed often + * enough. + * + * And finally: + * + * 3) the cnt_lo argument must be seen as a globally incrementing value, + * meaning that it should be a direct reference to the counter data which + * can be evaluated according to a specific ordering within the macro, + * and not the result of a previous evaluation stored in a variable. + * + * For example, this is wrong: + * + * u32 partial = get_hw_count(); + * u64 full = cnt32_to_63(partial); + * return full; + * + * This is fine: + * + * u64 full = cnt32_to_63(get_hw_count()); + * return full; + * + * Note that the top bit (bit 63) in the returned value should be considered + * as garbage. It is not cleared here because callers are likely to use a + * multiplier on the returned value which can get rid of the top bit + * implicitly by making the multiplier even, therefore saving on a runtime + * clear-bit instruction. Otherwise caller must remember to clear the top + * bit explicitly. + */ +#define cnt32_to_63(cnt_lo) \ +({ \ + static u32 __m_cnt_hi; \ + union cnt32_to_63 __x; \ + __x.hi = __m_cnt_hi; \ + smp_rmb(); \ + __x.lo = (cnt_lo); \ + if (unlikely((s32)(__x.hi ^ __x.lo) < 0)) \ + __m_cnt_hi = __x.hi = (__x.hi ^ 0x80000000) + (__x.hi >> 31); \ + __x.val; \ +}) + +#endif diff --git a/include/linux/coda.h b/include/linux/coda.h new file mode 100644 index 000000000..0ca0c83fd --- /dev/null +++ b/include/linux/coda.h @@ -0,0 +1,64 @@ +/* + You may distribute this file under either of the two licenses that + follow at your discretion. +*/ + +/* BLURB lgpl + + Coda File System + Release 5 + + Copyright (c) 1987-1999 Carnegie Mellon University + Additional copyrights listed below + +This code is distributed "AS IS" without warranty of any kind under +the terms of the GNU Library General Public Licence Version 2, as +shown in the file LICENSE, or under the license shown below. The +technical and financial contributors to Coda are listed in the file +CREDITS. + + Additional copyrights +*/ + +/* + + Coda: an Experimental Distributed File System + Release 4.0 + + Copyright (c) 1987-1999 Carnegie Mellon University + All Rights Reserved + +Permission to use, copy, modify and distribute this software and its +documentation is hereby granted, provided that both the copyright +notice and this permission notice appear in all copies of the +software, derivative works or modified versions, and any portions +thereof, and that both notices appear in supporting documentation, and +that credit is given to Carnegie Mellon University in all documents +and publicity pertaining to direct or indirect use of this code or its +derivatives. + +CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS, +SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS +FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON +DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER +RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF +ANY DERIVATIVE WORK. + +Carnegie Mellon encourages users of this software to return any +improvements or extensions that they make, and to grant Carnegie +Mellon the rights to redistribute these changes without encumbrance. +*/ + +/* + * + * Based on cfs.h from Mach, but revamped for increased simplicity. + * Linux modifications by + * Peter Braam, Aug 1996 + */ +#ifndef _CODA_HEADER_ +#define _CODA_HEADER_ + +typedef unsigned long long u_quad_t; + +#include +#endif diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h new file mode 100644 index 000000000..57d2b2faf --- /dev/null +++ b/include/linux/coda_psdev.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __CODA_PSDEV_H +#define __CODA_PSDEV_H + +#include +#include +#include + +struct kstatfs; + +/* communication pending/processing queues */ +struct venus_comm { + u_long vc_seq; + wait_queue_head_t vc_waitq; /* Venus wait queue */ + struct list_head vc_pending; + struct list_head vc_processing; + int vc_inuse; + struct super_block *vc_sb; + struct mutex vc_mutex; +}; + +/* messages between coda filesystem in kernel and Venus */ +struct upc_req { + struct list_head uc_chain; + caddr_t uc_data; + u_short uc_flags; + u_short uc_inSize; /* Size is at most 5000 bytes */ + u_short uc_outSize; + u_short uc_opcode; /* copied from data to save lookup */ + int uc_unique; + wait_queue_head_t uc_sleep; /* process' wait queue */ +}; + +static inline struct venus_comm *coda_vcp(struct super_block *sb) +{ + return (struct venus_comm *)((sb)->s_fs_info); +} + +/* upcalls */ +int venus_rootfid(struct super_block *sb, struct CodaFid *fidp); +int venus_getattr(struct super_block *sb, struct CodaFid *fid, + struct coda_vattr *attr); +int venus_setattr(struct super_block *, struct CodaFid *, struct coda_vattr *); +int venus_lookup(struct super_block *sb, struct CodaFid *fid, + const char *name, int length, int *type, + struct CodaFid *resfid); +int venus_close(struct super_block *sb, struct CodaFid *fid, int flags, + kuid_t uid); +int venus_open(struct super_block *sb, struct CodaFid *fid, int flags, + struct file **f); +int venus_mkdir(struct super_block *sb, struct CodaFid *dirfid, + const char *name, int length, + struct CodaFid *newfid, struct coda_vattr *attrs); +int venus_create(struct super_block *sb, struct CodaFid *dirfid, + const char *name, int length, int excl, int mode, + struct CodaFid *newfid, struct coda_vattr *attrs) ; +int venus_rmdir(struct super_block *sb, struct CodaFid *dirfid, + const char *name, int length); +int venus_remove(struct super_block *sb, struct CodaFid *dirfid, + const char *name, int length); +int venus_readlink(struct super_block *sb, struct CodaFid *fid, + char *buffer, int *length); +int venus_rename(struct super_block *, struct CodaFid *new_fid, + struct CodaFid *old_fid, size_t old_length, + size_t new_length, const char *old_name, + const char *new_name); +int venus_link(struct super_block *sb, struct CodaFid *fid, + struct CodaFid *dirfid, const char *name, int len ); +int venus_symlink(struct super_block *sb, struct CodaFid *fid, + const char *name, int len, const char *symname, int symlen); +int venus_access(struct super_block *sb, struct CodaFid *fid, int mask); +int venus_pioctl(struct super_block *sb, struct CodaFid *fid, + unsigned int cmd, struct PioctlData *data); +int coda_downcall(struct venus_comm *vcp, int opcode, union outputArgs *out); +int venus_fsync(struct super_block *sb, struct CodaFid *fid); +int venus_statfs(struct dentry *dentry, struct kstatfs *sfs); + +/* + * Statistics + */ + +extern struct venus_comm coda_comms[]; +#endif diff --git a/include/linux/compaction.h b/include/linux/compaction.h new file mode 100644 index 000000000..68250a57a --- /dev/null +++ b/include/linux/compaction.h @@ -0,0 +1,247 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_COMPACTION_H +#define _LINUX_COMPACTION_H + +/* + * Determines how hard direct compaction should try to succeed. + * Lower value means higher priority, analogically to reclaim priority. + */ +enum compact_priority { + COMPACT_PRIO_SYNC_FULL, + MIN_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_FULL, + COMPACT_PRIO_SYNC_LIGHT, + MIN_COMPACT_COSTLY_PRIORITY = COMPACT_PRIO_SYNC_LIGHT, + DEF_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_LIGHT, + COMPACT_PRIO_ASYNC, + INIT_COMPACT_PRIORITY = COMPACT_PRIO_ASYNC +}; + +/* Return values for compact_zone() and try_to_compact_pages() */ +/* When adding new states, please adjust include/trace/events/compaction.h */ +enum compact_result { + /* For more detailed tracepoint output - internal to compaction */ + COMPACT_NOT_SUITABLE_ZONE, + /* + * compaction didn't start as it was not possible or direct reclaim + * was more suitable + */ + COMPACT_SKIPPED, + /* compaction didn't start as it was deferred due to past failures */ + COMPACT_DEFERRED, + + /* compaction not active last round */ + COMPACT_INACTIVE = COMPACT_DEFERRED, + + /* For more detailed tracepoint output - internal to compaction */ + COMPACT_NO_SUITABLE_PAGE, + /* compaction should continue to another pageblock */ + COMPACT_CONTINUE, + + /* + * The full zone was compacted scanned but wasn't successfull to compact + * suitable pages. + */ + COMPACT_COMPLETE, + /* + * direct compaction has scanned part of the zone but wasn't successfull + * to compact suitable pages. + */ + COMPACT_PARTIAL_SKIPPED, + + /* compaction terminated prematurely due to lock contentions */ + COMPACT_CONTENDED, + + /* + * direct compaction terminated after concluding that the allocation + * should now succeed + */ + COMPACT_SUCCESS, +}; + +struct alloc_context; /* in mm/internal.h */ + +/* + * Number of free order-0 pages that should be available above given watermark + * to make sure compaction has reasonable chance of not running out of free + * pages that it needs to isolate as migration target during its work. + */ +static inline unsigned long compact_gap(unsigned int order) +{ + /* + * Although all the isolations for migration are temporary, compaction + * free scanner may have up to 1 << order pages on its list and then + * try to split an (order - 1) free page. At that point, a gap of + * 1 << order might not be enough, so it's safer to require twice that + * amount. Note that the number of pages on the list is also + * effectively limited by COMPACT_CLUSTER_MAX, as that's the maximum + * that the migrate scanner can have isolated on migrate list, and free + * scanner is only invoked when the number of isolated free pages is + * lower than that. But it's not worth to complicate the formula here + * as a bigger gap for higher orders than strictly necessary can also + * improve chances of compaction success. + */ + return 2UL << order; +} + +#ifdef CONFIG_COMPACTION +extern int sysctl_compact_memory; +extern int sysctl_compaction_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *length, loff_t *ppos); +extern int sysctl_extfrag_threshold; +extern int sysctl_extfrag_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *length, loff_t *ppos); +extern int sysctl_compact_unevictable_allowed; + +extern int fragmentation_index(struct zone *zone, unsigned int order); +extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, + unsigned int order, unsigned int alloc_flags, + const struct alloc_context *ac, enum compact_priority prio); +extern void reset_isolation_suitable(pg_data_t *pgdat); +extern enum compact_result compaction_suitable(struct zone *zone, int order, + unsigned int alloc_flags, int classzone_idx); + +extern void defer_compaction(struct zone *zone, int order); +extern bool compaction_deferred(struct zone *zone, int order); +extern void compaction_defer_reset(struct zone *zone, int order, + bool alloc_success); +extern bool compaction_restarting(struct zone *zone, int order); + +/* Compaction has made some progress and retrying makes sense */ +static inline bool compaction_made_progress(enum compact_result result) +{ + /* + * Even though this might sound confusing this in fact tells us + * that the compaction successfully isolated and migrated some + * pageblocks. + */ + if (result == COMPACT_SUCCESS) + return true; + + return false; +} + +/* Compaction has failed and it doesn't make much sense to keep retrying. */ +static inline bool compaction_failed(enum compact_result result) +{ + /* All zones were scanned completely and still not result. */ + if (result == COMPACT_COMPLETE) + return true; + + return false; +} + +/* + * Compaction has backed off for some reason. It might be throttling or + * lock contention. Retrying is still worthwhile. + */ +static inline bool compaction_withdrawn(enum compact_result result) +{ + /* + * Compaction backed off due to watermark checks for order-0 + * so the regular reclaim has to try harder and reclaim something. + */ + if (result == COMPACT_SKIPPED) + return true; + + /* + * If compaction is deferred for high-order allocations, it is + * because sync compaction recently failed. If this is the case + * and the caller requested a THP allocation, we do not want + * to heavily disrupt the system, so we fail the allocation + * instead of entering direct reclaim. + */ + if (result == COMPACT_DEFERRED) + return true; + + /* + * If compaction in async mode encounters contention or blocks higher + * priority task we back off early rather than cause stalls. + */ + if (result == COMPACT_CONTENDED) + return true; + + /* + * Page scanners have met but we haven't scanned full zones so this + * is a back off in fact. + */ + if (result == COMPACT_PARTIAL_SKIPPED) + return true; + + return false; +} + + +bool compaction_zonelist_suitable(struct alloc_context *ac, int order, + int alloc_flags); + +extern int kcompactd_run(int nid); +extern void kcompactd_stop(int nid); +extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx); + +#else +static inline void reset_isolation_suitable(pg_data_t *pgdat) +{ +} + +static inline enum compact_result compaction_suitable(struct zone *zone, int order, + int alloc_flags, int classzone_idx) +{ + return COMPACT_SKIPPED; +} + +static inline void defer_compaction(struct zone *zone, int order) +{ +} + +static inline bool compaction_deferred(struct zone *zone, int order) +{ + return true; +} + +static inline bool compaction_made_progress(enum compact_result result) +{ + return false; +} + +static inline bool compaction_failed(enum compact_result result) +{ + return false; +} + +static inline bool compaction_withdrawn(enum compact_result result) +{ + return true; +} + +static inline int kcompactd_run(int nid) +{ + return 0; +} +static inline void kcompactd_stop(int nid) +{ +} + +static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx) +{ +} + +#endif /* CONFIG_COMPACTION */ + +#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) +struct node; +extern int compaction_register_node(struct node *node); +extern void compaction_unregister_node(struct node *node); + +#else + +static inline int compaction_register_node(struct node *node) +{ + return 0; +} + +static inline void compaction_unregister_node(struct node *node) +{ +} +#endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */ + +#endif /* _LINUX_COMPACTION_H */ diff --git a/include/linux/compat.h b/include/linux/compat.h new file mode 100644 index 000000000..189d0e111 --- /dev/null +++ b/include/linux/compat.h @@ -0,0 +1,1043 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_COMPAT_H +#define _LINUX_COMPAT_H +/* + * These are the type definitions for the architecture specific + * syscall compatibility layer. + */ + +#include +#include + +#include +#include /* for HZ */ +#include +#include +#include +#include +#include /* for aio_context_t */ +#include +#include + +#include + +#ifdef CONFIG_COMPAT +#include +#include +#endif + +#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER +/* + * It may be useful for an architecture to override the definitions of the + * COMPAT_SYSCALL_DEFINE0 and COMPAT_SYSCALL_DEFINEx() macros, in particular + * to use a different calling convention for syscalls. To allow for that, + + the prototypes for the compat_sys_*() functions below will *not* be included + * if CONFIG_ARCH_HAS_SYSCALL_WRAPPER is enabled. + */ +#include +#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */ + +#ifndef COMPAT_USE_64BIT_TIME +#define COMPAT_USE_64BIT_TIME 0 +#endif + +#ifndef __SC_DELOUSE +#define __SC_DELOUSE(t,v) ((__force t)(unsigned long)(v)) +#endif + +#ifndef COMPAT_SYSCALL_DEFINE0 +#define COMPAT_SYSCALL_DEFINE0(name) \ + asmlinkage long compat_sys_##name(void); \ + ALLOW_ERROR_INJECTION(compat_sys_##name, ERRNO); \ + asmlinkage long compat_sys_##name(void) +#endif /* COMPAT_SYSCALL_DEFINE0 */ + +#define COMPAT_SYSCALL_DEFINE1(name, ...) \ + COMPAT_SYSCALL_DEFINEx(1, _##name, __VA_ARGS__) +#define COMPAT_SYSCALL_DEFINE2(name, ...) \ + COMPAT_SYSCALL_DEFINEx(2, _##name, __VA_ARGS__) +#define COMPAT_SYSCALL_DEFINE3(name, ...) \ + COMPAT_SYSCALL_DEFINEx(3, _##name, __VA_ARGS__) +#define COMPAT_SYSCALL_DEFINE4(name, ...) \ + COMPAT_SYSCALL_DEFINEx(4, _##name, __VA_ARGS__) +#define COMPAT_SYSCALL_DEFINE5(name, ...) \ + COMPAT_SYSCALL_DEFINEx(5, _##name, __VA_ARGS__) +#define COMPAT_SYSCALL_DEFINE6(name, ...) \ + COMPAT_SYSCALL_DEFINEx(6, _##name, __VA_ARGS__) + +/* + * The asmlinkage stub is aliased to a function named __se_compat_sys_*() which + * sign-extends 32-bit ints to longs whenever needed. The actual work is + * done within __do_compat_sys_*(). + */ +#ifndef COMPAT_SYSCALL_DEFINEx +#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ + __diag_push(); \ + __diag_ignore(GCC, 8, "-Wattribute-alias", \ + "Type aliasing is used to sanitize syscall arguments");\ + asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ + asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \ + __attribute__((alias(__stringify(__se_compat_sys##name)))); \ + ALLOW_ERROR_INJECTION(compat_sys##name, ERRNO); \ + static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\ + asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ + asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ + { \ + long ret = __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\ + __MAP(x,__SC_TEST,__VA_ARGS__); \ + return ret; \ + } \ + __diag_pop(); \ + static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) +#endif /* COMPAT_SYSCALL_DEFINEx */ + +#ifdef CONFIG_COMPAT + +#ifndef compat_user_stack_pointer +#define compat_user_stack_pointer() current_user_stack_pointer() +#endif +#ifndef compat_sigaltstack /* we'll need that for MIPS */ +typedef struct compat_sigaltstack { + compat_uptr_t ss_sp; + int ss_flags; + compat_size_t ss_size; +} compat_stack_t; +#endif +#ifndef COMPAT_MINSIGSTKSZ +#define COMPAT_MINSIGSTKSZ MINSIGSTKSZ +#endif + +#define compat_jiffies_to_clock_t(x) \ + (((unsigned long)(x) * COMPAT_USER_HZ) / HZ) + +typedef __compat_uid32_t compat_uid_t; +typedef __compat_gid32_t compat_gid_t; + +typedef compat_ulong_t compat_aio_context_t; + +struct compat_sel_arg_struct; +struct rusage; + +struct compat_utimbuf { + compat_time_t actime; + compat_time_t modtime; +}; + +struct compat_itimerval { + struct compat_timeval it_interval; + struct compat_timeval it_value; +}; + +struct itimerval; +int get_compat_itimerval(struct itimerval *, const struct compat_itimerval __user *); +int put_compat_itimerval(struct compat_itimerval __user *, const struct itimerval *); + +struct compat_tms { + compat_clock_t tms_utime; + compat_clock_t tms_stime; + compat_clock_t tms_cutime; + compat_clock_t tms_cstime; +}; + +struct compat_timex { + compat_uint_t modes; + compat_long_t offset; + compat_long_t freq; + compat_long_t maxerror; + compat_long_t esterror; + compat_int_t status; + compat_long_t constant; + compat_long_t precision; + compat_long_t tolerance; + struct compat_timeval time; + compat_long_t tick; + compat_long_t ppsfreq; + compat_long_t jitter; + compat_int_t shift; + compat_long_t stabil; + compat_long_t jitcnt; + compat_long_t calcnt; + compat_long_t errcnt; + compat_long_t stbcnt; + compat_int_t tai; + + compat_int_t:32; compat_int_t:32; compat_int_t:32; compat_int_t:32; + compat_int_t:32; compat_int_t:32; compat_int_t:32; compat_int_t:32; + compat_int_t:32; compat_int_t:32; compat_int_t:32; +}; + +struct timex; +int compat_get_timex(struct timex *, const struct compat_timex __user *); +int compat_put_timex(struct compat_timex __user *, const struct timex *); + +#define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW) + +typedef struct { + compat_sigset_word sig[_COMPAT_NSIG_WORDS]; +} compat_sigset_t; + +struct compat_sigaction { +#ifndef __ARCH_HAS_IRIX_SIGACTION + compat_uptr_t sa_handler; + compat_ulong_t sa_flags; +#else + compat_uint_t sa_flags; + compat_uptr_t sa_handler; +#endif +#ifdef __ARCH_HAS_SA_RESTORER + compat_uptr_t sa_restorer; +#endif + compat_sigset_t sa_mask __packed; +}; + +typedef union compat_sigval { + compat_int_t sival_int; + compat_uptr_t sival_ptr; +} compat_sigval_t; + +typedef struct compat_siginfo { + int si_signo; +#ifndef __ARCH_HAS_SWAPPED_SIGINFO + int si_errno; + int si_code; +#else + int si_code; + int si_errno; +#endif + + union { + int _pad[128/sizeof(int) - 3]; + + /* kill() */ + struct { + compat_pid_t _pid; /* sender's pid */ + __compat_uid32_t _uid; /* sender's uid */ + } _kill; + + /* POSIX.1b timers */ + struct { + compat_timer_t _tid; /* timer id */ + int _overrun; /* overrun count */ + compat_sigval_t _sigval; /* same as below */ + } _timer; + + /* POSIX.1b signals */ + struct { + compat_pid_t _pid; /* sender's pid */ + __compat_uid32_t _uid; /* sender's uid */ + compat_sigval_t _sigval; + } _rt; + + /* SIGCHLD */ + struct { + compat_pid_t _pid; /* which child */ + __compat_uid32_t _uid; /* sender's uid */ + int _status; /* exit code */ + compat_clock_t _utime; + compat_clock_t _stime; + } _sigchld; + +#ifdef CONFIG_X86_X32_ABI + /* SIGCHLD (x32 version) */ + struct { + compat_pid_t _pid; /* which child */ + __compat_uid32_t _uid; /* sender's uid */ + int _status; /* exit code */ + compat_s64 _utime; + compat_s64 _stime; + } _sigchld_x32; +#endif + + /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */ + struct { + compat_uptr_t _addr; /* faulting insn/memory ref. */ +#ifdef __ARCH_SI_TRAPNO + int _trapno; /* TRAP # which caused the signal */ +#endif +#define __COMPAT_ADDR_BND_PKEY_PAD (__alignof__(compat_uptr_t) < sizeof(short) ? \ + sizeof(short) : __alignof__(compat_uptr_t)) + union { + /* + * used when si_code=BUS_MCEERR_AR or + * used when si_code=BUS_MCEERR_AO + */ + short int _addr_lsb; /* Valid LSB of the reported address. */ + /* used when si_code=SEGV_BNDERR */ + struct { + char _dummy_bnd[__COMPAT_ADDR_BND_PKEY_PAD]; + compat_uptr_t _lower; + compat_uptr_t _upper; + } _addr_bnd; + /* used when si_code=SEGV_PKUERR */ + struct { + char _dummy_pkey[__COMPAT_ADDR_BND_PKEY_PAD]; + u32 _pkey; + } _addr_pkey; + }; + } _sigfault; + + /* SIGPOLL */ + struct { + compat_long_t _band; /* POLL_IN, POLL_OUT, POLL_MSG */ + int _fd; + } _sigpoll; + + struct { + compat_uptr_t _call_addr; /* calling user insn */ + int _syscall; /* triggering system call number */ + unsigned int _arch; /* AUDIT_ARCH_* of syscall */ + } _sigsys; + } _sifields; +} compat_siginfo_t; + +/* + * These functions operate on 32- or 64-bit specs depending on + * COMPAT_USE_64BIT_TIME, hence the void user pointer arguments. + */ +extern int compat_get_timespec(struct timespec *, const void __user *); +extern int compat_put_timespec(const struct timespec *, void __user *); +extern int compat_get_timeval(struct timeval *, const void __user *); +extern int compat_put_timeval(const struct timeval *, void __user *); + +struct compat_iovec { + compat_uptr_t iov_base; + compat_size_t iov_len; +}; + +struct compat_rlimit { + compat_ulong_t rlim_cur; + compat_ulong_t rlim_max; +}; + +struct compat_rusage { + struct compat_timeval ru_utime; + struct compat_timeval ru_stime; + compat_long_t ru_maxrss; + compat_long_t ru_ixrss; + compat_long_t ru_idrss; + compat_long_t ru_isrss; + compat_long_t ru_minflt; + compat_long_t ru_majflt; + compat_long_t ru_nswap; + compat_long_t ru_inblock; + compat_long_t ru_oublock; + compat_long_t ru_msgsnd; + compat_long_t ru_msgrcv; + compat_long_t ru_nsignals; + compat_long_t ru_nvcsw; + compat_long_t ru_nivcsw; +}; + +extern int put_compat_rusage(const struct rusage *, + struct compat_rusage __user *); + +struct compat_siginfo; +struct __compat_aio_sigset; + +struct compat_dirent { + u32 d_ino; + compat_off_t d_off; + u16 d_reclen; + char d_name[256]; +}; + +struct compat_ustat { + compat_daddr_t f_tfree; + compat_ino_t f_tinode; + char f_fname[6]; + char f_fpack[6]; +}; + +#define COMPAT_SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 3) + +typedef struct compat_sigevent { + compat_sigval_t sigev_value; + compat_int_t sigev_signo; + compat_int_t sigev_notify; + union { + compat_int_t _pad[COMPAT_SIGEV_PAD_SIZE]; + compat_int_t _tid; + + struct { + compat_uptr_t _function; + compat_uptr_t _attribute; + } _sigev_thread; + } _sigev_un; +} compat_sigevent_t; + +struct compat_ifmap { + compat_ulong_t mem_start; + compat_ulong_t mem_end; + unsigned short base_addr; + unsigned char irq; + unsigned char dma; + unsigned char port; +}; + +struct compat_if_settings { + unsigned int type; /* Type of physical device or protocol */ + unsigned int size; /* Size of the data allocated by the caller */ + compat_uptr_t ifs_ifsu; /* union of pointers */ +}; + +struct compat_ifreq { + union { + char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */ + } ifr_ifrn; + union { + struct sockaddr ifru_addr; + struct sockaddr ifru_dstaddr; + struct sockaddr ifru_broadaddr; + struct sockaddr ifru_netmask; + struct sockaddr ifru_hwaddr; + short ifru_flags; + compat_int_t ifru_ivalue; + compat_int_t ifru_mtu; + struct compat_ifmap ifru_map; + char ifru_slave[IFNAMSIZ]; /* Just fits the size */ + char ifru_newname[IFNAMSIZ]; + compat_caddr_t ifru_data; + struct compat_if_settings ifru_settings; + } ifr_ifru; +}; + +struct compat_ifconf { + compat_int_t ifc_len; /* size of buffer */ + compat_caddr_t ifcbuf; +}; + +struct compat_robust_list { + compat_uptr_t next; +}; + +struct compat_robust_list_head { + struct compat_robust_list list; + compat_long_t futex_offset; + compat_uptr_t list_op_pending; +}; + +#ifdef CONFIG_COMPAT_OLD_SIGACTION +struct compat_old_sigaction { + compat_uptr_t sa_handler; + compat_old_sigset_t sa_mask; + compat_ulong_t sa_flags; + compat_uptr_t sa_restorer; +}; +#endif + +struct compat_keyctl_kdf_params { + compat_uptr_t hashname; + compat_uptr_t otherinfo; + __u32 otherinfolen; + __u32 __spare[8]; +}; + +struct compat_statfs; +struct compat_statfs64; +struct compat_old_linux_dirent; +struct compat_linux_dirent; +struct linux_dirent64; +struct compat_msghdr; +struct compat_mmsghdr; +struct compat_sysinfo; +struct compat_sysctl_args; +struct compat_kexec_segment; +struct compat_mq_attr; +struct compat_msgbuf; + +#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t)) + +#define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG) + +long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, + unsigned long bitmap_size); +long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, + unsigned long bitmap_size); +int copy_siginfo_from_user32(siginfo_t *to, const struct compat_siginfo __user *from); +int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from); +int get_compat_sigevent(struct sigevent *event, + const struct compat_sigevent __user *u_event); + +static inline int compat_timeval_compare(struct compat_timeval *lhs, + struct compat_timeval *rhs) +{ + if (lhs->tv_sec < rhs->tv_sec) + return -1; + if (lhs->tv_sec > rhs->tv_sec) + return 1; + return lhs->tv_usec - rhs->tv_usec; +} + +static inline int compat_timespec_compare(struct compat_timespec *lhs, + struct compat_timespec *rhs) +{ + if (lhs->tv_sec < rhs->tv_sec) + return -1; + if (lhs->tv_sec > rhs->tv_sec) + return 1; + return lhs->tv_nsec - rhs->tv_nsec; +} + +extern int get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat); + +/* + * Defined inline such that size can be compile time constant, which avoids + * CONFIG_HARDENED_USERCOPY complaining about copies from task_struct + */ +static inline int +put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set, + unsigned int size) +{ + /* size <= sizeof(compat_sigset_t) <= sizeof(sigset_t) */ +#ifdef __BIG_ENDIAN + compat_sigset_t v; + switch (_NSIG_WORDS) { + case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3]; + case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2]; + case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1]; + case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0]; + } + return copy_to_user(compat, &v, size) ? -EFAULT : 0; +#else + return copy_to_user(compat, set, size) ? -EFAULT : 0; +#endif +} + +extern int compat_ptrace_request(struct task_struct *child, + compat_long_t request, + compat_ulong_t addr, compat_ulong_t data); + +extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request, + compat_ulong_t addr, compat_ulong_t data); + +struct epoll_event; /* fortunately, this one is fixed-layout */ + +extern ssize_t compat_rw_copy_check_uvector(int type, + const struct compat_iovec __user *uvector, + unsigned long nr_segs, + unsigned long fast_segs, struct iovec *fast_pointer, + struct iovec **ret_pointer); + +extern void __user *compat_alloc_user_space(unsigned long len); + +int compat_restore_altstack(const compat_stack_t __user *uss); +int __compat_save_altstack(compat_stack_t __user *, unsigned long); +#define compat_save_altstack_ex(uss, sp) do { \ + compat_stack_t __user *__uss = uss; \ + struct task_struct *t = current; \ + put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \ + put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \ + put_user_ex(t->sas_ss_size, &__uss->ss_size); \ + if (t->sas_ss_flags & SS_AUTODISARM) \ + sas_ss_reset(t); \ +} while (0); + +/* + * These syscall function prototypes are kept in the same order as + * include/uapi/asm-generic/unistd.h. Deprecated or obsolete system calls + * go below. + * + * Please note that these prototypes here are only provided for information + * purposes, for static analysis, and for linking from the syscall table. + * These functions should not be called elsewhere from kernel code. + * + * As the syscall calling convention may be different from the default + * for architectures overriding the syscall calling convention, do not + * include the prototypes if CONFIG_ARCH_HAS_SYSCALL_WRAPPER is enabled. + */ +#ifndef CONFIG_ARCH_HAS_SYSCALL_WRAPPER +asmlinkage long compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p); +asmlinkage long compat_sys_io_submit(compat_aio_context_t ctx_id, int nr, + u32 __user *iocb); +asmlinkage long compat_sys_io_getevents(compat_aio_context_t ctx_id, + compat_long_t min_nr, + compat_long_t nr, + struct io_event __user *events, + struct compat_timespec __user *timeout); +asmlinkage long compat_sys_io_pgetevents(compat_aio_context_t ctx_id, + compat_long_t min_nr, + compat_long_t nr, + struct io_event __user *events, + struct compat_timespec __user *timeout, + const struct __compat_aio_sigset __user *usig); + +/* fs/cookies.c */ +asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t); + +/* fs/eventpoll.c */ +asmlinkage long compat_sys_epoll_pwait(int epfd, + struct epoll_event __user *events, + int maxevents, int timeout, + const compat_sigset_t __user *sigmask, + compat_size_t sigsetsize); + +/* fs/fcntl.c */ +asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd, + compat_ulong_t arg); +asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd, + compat_ulong_t arg); + +/* fs/ioctl.c */ +asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd, + compat_ulong_t arg); + +/* fs/namespace.c */ +asmlinkage long compat_sys_mount(const char __user *dev_name, + const char __user *dir_name, + const char __user *type, compat_ulong_t flags, + const void __user *data); + +/* fs/open.c */ +asmlinkage long compat_sys_statfs(const char __user *pathname, + struct compat_statfs __user *buf); +asmlinkage long compat_sys_statfs64(const char __user *pathname, + compat_size_t sz, + struct compat_statfs64 __user *buf); +asmlinkage long compat_sys_fstatfs(unsigned int fd, + struct compat_statfs __user *buf); +asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz, + struct compat_statfs64 __user *buf); +asmlinkage long compat_sys_truncate(const char __user *, compat_off_t); +asmlinkage long compat_sys_ftruncate(unsigned int, compat_ulong_t); +/* No generic prototype for truncate64, ftruncate64, fallocate */ +asmlinkage long compat_sys_openat(int dfd, const char __user *filename, + int flags, umode_t mode); + +/* fs/readdir.c */ +asmlinkage long compat_sys_getdents(unsigned int fd, + struct compat_linux_dirent __user *dirent, + unsigned int count); + +/* fs/read_write.c */ +asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int); +asmlinkage ssize_t compat_sys_readv(compat_ulong_t fd, + const struct compat_iovec __user *vec, compat_ulong_t vlen); +asmlinkage ssize_t compat_sys_writev(compat_ulong_t fd, + const struct compat_iovec __user *vec, compat_ulong_t vlen); +/* No generic prototype for pread64 and pwrite64 */ +asmlinkage ssize_t compat_sys_preadv(compat_ulong_t fd, + const struct compat_iovec __user *vec, + compat_ulong_t vlen, u32 pos_low, u32 pos_high); +asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd, + const struct compat_iovec __user *vec, + compat_ulong_t vlen, u32 pos_low, u32 pos_high); +#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64 +asmlinkage long compat_sys_preadv64(unsigned long fd, + const struct compat_iovec __user *vec, + unsigned long vlen, loff_t pos); +#endif + +#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64 +asmlinkage long compat_sys_pwritev64(unsigned long fd, + const struct compat_iovec __user *vec, + unsigned long vlen, loff_t pos); +#endif + +/* fs/sendfile.c */ +asmlinkage long compat_sys_sendfile(int out_fd, int in_fd, + compat_off_t __user *offset, compat_size_t count); +asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd, + compat_loff_t __user *offset, compat_size_t count); + +/* fs/select.c */ +asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp, + compat_ulong_t __user *outp, + compat_ulong_t __user *exp, + struct compat_timespec __user *tsp, + void __user *sig); +asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds, + unsigned int nfds, + struct compat_timespec __user *tsp, + const compat_sigset_t __user *sigmask, + compat_size_t sigsetsize); + +/* fs/signalfd.c */ +asmlinkage long compat_sys_signalfd4(int ufd, + const compat_sigset_t __user *sigmask, + compat_size_t sigsetsize, int flags); + +/* fs/splice.c */ +asmlinkage long compat_sys_vmsplice(int fd, const struct compat_iovec __user *, + unsigned int nr_segs, unsigned int flags); + +/* fs/stat.c */ +asmlinkage long compat_sys_newfstatat(unsigned int dfd, + const char __user *filename, + struct compat_stat __user *statbuf, + int flag); +asmlinkage long compat_sys_newfstat(unsigned int fd, + struct compat_stat __user *statbuf); + +/* fs/sync.c: No generic prototype for sync_file_range and sync_file_range2 */ + +/* fs/timerfd.c */ +asmlinkage long compat_sys_timerfd_gettime(int ufd, + struct compat_itimerspec __user *otmr); +asmlinkage long compat_sys_timerfd_settime(int ufd, int flags, + const struct compat_itimerspec __user *utmr, + struct compat_itimerspec __user *otmr); + +/* fs/utimes.c */ +asmlinkage long compat_sys_utimensat(unsigned int dfd, + const char __user *filename, + struct compat_timespec __user *t, + int flags); + +/* kernel/exit.c */ +asmlinkage long compat_sys_waitid(int, compat_pid_t, + struct compat_siginfo __user *, int, + struct compat_rusage __user *); + + + +/* kernel/futex.c */ +asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val, + struct compat_timespec __user *utime, u32 __user *uaddr2, + u32 val3); +asmlinkage long +compat_sys_set_robust_list(struct compat_robust_list_head __user *head, + compat_size_t len); +asmlinkage long +compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, + compat_size_t __user *len_ptr); + +/* kernel/hrtimer.c */ +asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp, + struct compat_timespec __user *rmtp); + +/* kernel/itimer.c */ +asmlinkage long compat_sys_getitimer(int which, + struct compat_itimerval __user *it); +asmlinkage long compat_sys_setitimer(int which, + struct compat_itimerval __user *in, + struct compat_itimerval __user *out); + +/* kernel/kexec.c */ +asmlinkage long compat_sys_kexec_load(compat_ulong_t entry, + compat_ulong_t nr_segments, + struct compat_kexec_segment __user *, + compat_ulong_t flags); + +/* kernel/posix-timers.c */ +asmlinkage long compat_sys_timer_create(clockid_t which_clock, + struct compat_sigevent __user *timer_event_spec, + timer_t __user *created_timer_id); +asmlinkage long compat_sys_timer_gettime(timer_t timer_id, + struct compat_itimerspec __user *setting); +asmlinkage long compat_sys_timer_settime(timer_t timer_id, int flags, + struct compat_itimerspec __user *new, + struct compat_itimerspec __user *old); +asmlinkage long compat_sys_clock_settime(clockid_t which_clock, + struct compat_timespec __user *tp); +asmlinkage long compat_sys_clock_gettime(clockid_t which_clock, + struct compat_timespec __user *tp); +asmlinkage long compat_sys_clock_getres(clockid_t which_clock, + struct compat_timespec __user *tp); +asmlinkage long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, + struct compat_timespec __user *rqtp, + struct compat_timespec __user *rmtp); + +/* kernel/ptrace.c */ +asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, + compat_long_t addr, compat_long_t data); + +/* kernel/sched/core.c */ +asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid, + unsigned int len, + compat_ulong_t __user *user_mask_ptr); +asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, + unsigned int len, + compat_ulong_t __user *user_mask_ptr); +asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid, + struct compat_timespec __user *interval); + +/* kernel/signal.c */ +asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr, + compat_stack_t __user *uoss_ptr); +asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset, + compat_size_t sigsetsize); +#ifndef CONFIG_ODD_RT_SIGACTION +asmlinkage long compat_sys_rt_sigaction(int, + const struct compat_sigaction __user *, + struct compat_sigaction __user *, + compat_size_t); +#endif +asmlinkage long compat_sys_rt_sigprocmask(int how, compat_sigset_t __user *set, + compat_sigset_t __user *oset, + compat_size_t sigsetsize); +asmlinkage long compat_sys_rt_sigpending(compat_sigset_t __user *uset, + compat_size_t sigsetsize); +asmlinkage long compat_sys_rt_sigtimedwait(compat_sigset_t __user *uthese, + struct compat_siginfo __user *uinfo, + struct compat_timespec __user *uts, compat_size_t sigsetsize); +asmlinkage long compat_sys_rt_sigqueueinfo(compat_pid_t pid, int sig, + struct compat_siginfo __user *uinfo); +/* No generic prototype for rt_sigreturn */ + +/* kernel/sys.c */ +asmlinkage long compat_sys_times(struct compat_tms __user *tbuf); +asmlinkage long compat_sys_getrlimit(unsigned int resource, + struct compat_rlimit __user *rlim); +asmlinkage long compat_sys_setrlimit(unsigned int resource, + struct compat_rlimit __user *rlim); +asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru); + +/* kernel/time.c */ +asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv, + struct timezone __user *tz); +asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv, + struct timezone __user *tz); +asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); + +/* kernel/timer.c */ +asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info); + +/* ipc/mqueue.c */ +asmlinkage long compat_sys_mq_open(const char __user *u_name, + int oflag, compat_mode_t mode, + struct compat_mq_attr __user *u_attr); +asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes, + const char __user *u_msg_ptr, + compat_size_t msg_len, unsigned int msg_prio, + const struct compat_timespec __user *u_abs_timeout); +asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes, + char __user *u_msg_ptr, + compat_size_t msg_len, unsigned int __user *u_msg_prio, + const struct compat_timespec __user *u_abs_timeout); +asmlinkage long compat_sys_mq_notify(mqd_t mqdes, + const struct compat_sigevent __user *u_notification); +asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes, + const struct compat_mq_attr __user *u_mqstat, + struct compat_mq_attr __user *u_omqstat); + +/* ipc/msg.c */ +asmlinkage long compat_sys_msgctl(int first, int second, void __user *uptr); +asmlinkage long compat_sys_msgrcv(int msqid, compat_uptr_t msgp, + compat_ssize_t msgsz, compat_long_t msgtyp, int msgflg); +asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp, + compat_ssize_t msgsz, int msgflg); + +/* ipc/sem.c */ +asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg); +asmlinkage long compat_sys_semtimedop(int semid, struct sembuf __user *tsems, + unsigned nsems, const struct compat_timespec __user *timeout); + +/* ipc/shm.c */ +asmlinkage long compat_sys_shmctl(int first, int second, void __user *uptr); +asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg); + +/* net/socket.c */ +asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, compat_size_t len, + unsigned flags, struct sockaddr __user *addr, + int __user *addrlen); +asmlinkage long compat_sys_setsockopt(int fd, int level, int optname, + char __user *optval, unsigned int optlen); +asmlinkage long compat_sys_getsockopt(int fd, int level, int optname, + char __user *optval, int __user *optlen); +asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, + unsigned flags); +asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, + unsigned int flags); + +/* mm/filemap.c: No generic prototype for readahead */ + +/* security/keys/keyctl.c */ +asmlinkage long compat_sys_keyctl(u32 option, + u32 arg2, u32 arg3, u32 arg4, u32 arg5); + +/* arch/example/kernel/sys_example.c */ +asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv, + const compat_uptr_t __user *envp); + +/* mm/fadvise.c: No generic prototype for fadvise64_64 */ + +/* mm/, CONFIG_MMU only */ +asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, + compat_ulong_t mode, + compat_ulong_t __user *nmask, + compat_ulong_t maxnode, compat_ulong_t flags); +asmlinkage long compat_sys_get_mempolicy(int __user *policy, + compat_ulong_t __user *nmask, + compat_ulong_t maxnode, + compat_ulong_t addr, + compat_ulong_t flags); +asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask, + compat_ulong_t maxnode); +asmlinkage long compat_sys_migrate_pages(compat_pid_t pid, + compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes, + const compat_ulong_t __user *new_nodes); +asmlinkage long compat_sys_move_pages(pid_t pid, compat_ulong_t nr_pages, + __u32 __user *pages, + const int __user *nodes, + int __user *status, + int flags); + +asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, + compat_pid_t pid, int sig, + struct compat_siginfo __user *uinfo); +asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, + unsigned vlen, unsigned int flags, + struct compat_timespec __user *timeout); +asmlinkage long compat_sys_wait4(compat_pid_t pid, + compat_uint_t __user *stat_addr, int options, + struct compat_rusage __user *ru); +asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32, + int, const char __user *); +asmlinkage long compat_sys_open_by_handle_at(int mountdirfd, + struct file_handle __user *handle, + int flags); +asmlinkage long compat_sys_clock_adjtime(clockid_t which_clock, + struct compat_timex __user *tp); +asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg, + unsigned vlen, unsigned int flags); +asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid, + const struct compat_iovec __user *lvec, + compat_ulong_t liovcnt, const struct compat_iovec __user *rvec, + compat_ulong_t riovcnt, compat_ulong_t flags); +asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid, + const struct compat_iovec __user *lvec, + compat_ulong_t liovcnt, const struct compat_iovec __user *rvec, + compat_ulong_t riovcnt, compat_ulong_t flags); +asmlinkage long compat_sys_execveat(int dfd, const char __user *filename, + const compat_uptr_t __user *argv, + const compat_uptr_t __user *envp, int flags); +asmlinkage ssize_t compat_sys_preadv2(compat_ulong_t fd, + const struct compat_iovec __user *vec, + compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags); +asmlinkage ssize_t compat_sys_pwritev2(compat_ulong_t fd, + const struct compat_iovec __user *vec, + compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags); +#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64V2 +asmlinkage long compat_sys_readv64v2(unsigned long fd, + const struct compat_iovec __user *vec, + unsigned long vlen, loff_t pos, rwf_t flags); +#endif + +#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64V2 +asmlinkage long compat_sys_pwritev64v2(unsigned long fd, + const struct compat_iovec __user *vec, + unsigned long vlen, loff_t pos, rwf_t flags); +#endif + + +/* + * Deprecated system calls which are still defined in + * include/uapi/asm-generic/unistd.h and wanted by >= 1 arch + */ + +/* __ARCH_WANT_SYSCALL_NO_AT */ +asmlinkage long compat_sys_open(const char __user *filename, int flags, + umode_t mode); +asmlinkage long compat_sys_utimes(const char __user *filename, + struct compat_timeval __user *t); + +/* __ARCH_WANT_SYSCALL_NO_FLAGS */ +asmlinkage long compat_sys_signalfd(int ufd, + const compat_sigset_t __user *sigmask, + compat_size_t sigsetsize); + +/* __ARCH_WANT_SYSCALL_OFF_T */ +asmlinkage long compat_sys_newstat(const char __user *filename, + struct compat_stat __user *statbuf); +asmlinkage long compat_sys_newlstat(const char __user *filename, + struct compat_stat __user *statbuf); + +/* __ARCH_WANT_SYSCALL_DEPRECATED */ +asmlinkage long compat_sys_time(compat_time_t __user *tloc); +asmlinkage long compat_sys_utime(const char __user *filename, + struct compat_utimbuf __user *t); +asmlinkage long compat_sys_futimesat(unsigned int dfd, + const char __user *filename, + struct compat_timeval __user *t); +asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, + compat_ulong_t __user *outp, compat_ulong_t __user *exp, + struct compat_timeval __user *tvp); +asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32); +asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len, + unsigned flags); +asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args); + +/* obsolete: fs/readdir.c */ +asmlinkage long compat_sys_old_readdir(unsigned int fd, + struct compat_old_linux_dirent __user *, + unsigned int count); + +/* obsolete: fs/select.c */ +asmlinkage long compat_sys_old_select(struct compat_sel_arg_struct __user *arg); + +/* obsolete: ipc */ +asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32); + +/* obsolete: kernel/signal.c */ +#ifdef __ARCH_WANT_SYS_SIGPENDING +asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set); +#endif + +#ifdef __ARCH_WANT_SYS_SIGPROCMASK +asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *nset, + compat_old_sigset_t __user *oset); +#endif +#ifdef CONFIG_COMPAT_OLD_SIGACTION +asmlinkage long compat_sys_sigaction(int sig, + const struct compat_old_sigaction __user *act, + struct compat_old_sigaction __user *oact); +#endif + +/* obsolete: kernel/time/time.c */ +asmlinkage long compat_sys_stime(compat_time_t __user *tptr); + +/* obsolete: net/socket.c */ +asmlinkage long compat_sys_socketcall(int call, u32 __user *args); + +#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */ + + +/* + * For most but not all architectures, "am I in a compat syscall?" and + * "am I a compat task?" are the same question. For architectures on which + * they aren't the same question, arch code can override in_compat_syscall. + */ + +#ifndef in_compat_syscall +static inline bool in_compat_syscall(void) { return is_compat_task(); } +#endif + +/** + * ns_to_compat_timeval - Compat version of ns_to_timeval + * @nsec: the nanoseconds value to be converted + * + * Returns the compat_timeval representation of the nsec parameter. + */ +static inline struct compat_timeval ns_to_compat_timeval(s64 nsec) +{ + struct timeval tv; + struct compat_timeval ctv; + + tv = ns_to_timeval(nsec); + ctv.tv_sec = tv.tv_sec; + ctv.tv_usec = tv.tv_usec; + + return ctv; +} + +/* + * Kernel code should not call compat syscalls (i.e., compat_sys_xyzyyz()) + * directly. Instead, use one of the functions which work equivalently, such + * as the kcompat_sys_xyzyyz() functions prototyped below. + */ + +int kcompat_sys_statfs64(const char __user * pathname, compat_size_t sz, + struct compat_statfs64 __user * buf); +int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz, + struct compat_statfs64 __user * buf); + +#else /* !CONFIG_COMPAT */ + +#define is_compat_task() (0) +#ifndef in_compat_syscall +static inline bool in_compat_syscall(void) { return false; } +#endif + +#endif /* CONFIG_COMPAT */ + +#endif /* _LINUX_COMPAT_H */ diff --git a/include/linux/compat_time.h b/include/linux/compat_time.h new file mode 100644 index 000000000..e70bfd1d2 --- /dev/null +++ b/include/linux/compat_time.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_COMPAT_TIME_H +#define _LINUX_COMPAT_TIME_H + +#include +#include + +typedef s32 compat_time_t; + +struct compat_timespec { + compat_time_t tv_sec; + s32 tv_nsec; +}; + +struct compat_timeval { + compat_time_t tv_sec; + s32 tv_usec; +}; + +struct compat_itimerspec { + struct compat_timespec it_interval; + struct compat_timespec it_value; +}; + +extern int compat_get_timespec64(struct timespec64 *, const void __user *); +extern int compat_put_timespec64(const struct timespec64 *, void __user *); +extern int get_compat_itimerspec64(struct itimerspec64 *its, + const struct compat_itimerspec __user *uits); +extern int put_compat_itimerspec64(const struct itimerspec64 *its, + struct compat_itimerspec __user *uits); + +#endif /* _LINUX_COMPAT_TIME_H */ diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h new file mode 100644 index 000000000..2d6e5e4bb --- /dev/null +++ b/include/linux/compiler-clang.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_COMPILER_TYPES_H +#error "Please don't include directly, include instead." +#endif + +/* Compiler specific definitions for Clang compiler */ + +#define uninitialized_var(x) x = *(&(x)) + +/* same as gcc, this was present in clang-2.6 so we can assume it works + * with any version that can compile the kernel + */ +#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) + +/* all clang versions usable with the kernel support KASAN ABI version 5 */ +#define KASAN_ABI_VERSION 5 + +/* emulate gcc's __SANITIZE_ADDRESS__ flag */ +#if __has_feature(address_sanitizer) +#define __SANITIZE_ADDRESS__ +#endif + +#define __no_sanitize_address __attribute__((no_sanitize("address"))) + +/* + * Not all versions of clang implement the the type-generic versions + * of the builtin overflow checkers. Fortunately, clang implements + * __has_builtin allowing us to avoid awkward version + * checks. Unfortunately, we don't know which version of gcc clang + * pretends to be, so the macro may or may not be defined. + */ +#if __has_builtin(__builtin_mul_overflow) && \ + __has_builtin(__builtin_add_overflow) && \ + __has_builtin(__builtin_sub_overflow) +#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 +#endif + +/* The following are for compatibility with GCC, from compiler-gcc.h, + * and may be redefined here because they should not be shared with other + * compilers, like ICC. + */ +#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) +#define __assume_aligned(a, ...) \ + __attribute__((__assume_aligned__(a, ## __VA_ARGS__))) diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h new file mode 100644 index 000000000..a80d6de3c --- /dev/null +++ b/include/linux/compiler-gcc.h @@ -0,0 +1,223 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_COMPILER_TYPES_H +#error "Please don't include directly, include instead." +#endif + +/* + * Common definitions for all gcc versions go here. + */ +#define GCC_VERSION (__GNUC__ * 10000 \ + + __GNUC_MINOR__ * 100 \ + + __GNUC_PATCHLEVEL__) + +#if GCC_VERSION < 40600 +# error Sorry, your compiler is too old - please upgrade it. +#elif defined(CONFIG_ARM64) && GCC_VERSION < 50100 +/* + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63293 + * https://lore.kernel.org/r/20210107111841.GN1551@shell.armlinux.org.uk + */ +# error Sorry, your version of GCC is too old - please use 5.1 or newer. +#endif + +/* + * This macro obfuscates arithmetic on a variable address so that gcc + * shouldn't recognize the original var, and make assumptions about it. + * + * This is needed because the C standard makes it undefined to do + * pointer arithmetic on "objects" outside their boundaries and the + * gcc optimizers assume this is the case. In particular they + * assume such arithmetic does not wrap. + * + * A miscompilation has been observed because of this on PPC. + * To work around it we hide the relationship of the pointer and the object + * using this macro. + * + * Versions of the ppc64 compiler before 4.1 had a bug where use of + * RELOC_HIDE could trash r30. The bug can be worked around by changing + * the inline assembly constraint from =g to =r, in this particular + * case either is valid. + */ +#define RELOC_HIDE(ptr, off) \ +({ \ + unsigned long __ptr; \ + __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \ + (typeof(ptr)) (__ptr + (off)); \ +}) + +/* + * A trick to suppress uninitialized variable warning without generating any + * code + */ +#define uninitialized_var(x) x = x + +#ifdef __CHECKER__ +#define __must_be_array(a) 0 +#else +/* &a[0] degrades to a pointer: a different type from an array */ +#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) +#endif + +#ifdef CONFIG_RETPOLINE +#define __noretpoline __attribute__((indirect_branch("keep"))) +#endif + +#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) + +#define __optimize(level) __attribute__((__optimize__(level))) + +#define __compiletime_object_size(obj) __builtin_object_size(obj, 0) + +#ifndef __CHECKER__ +#define __compiletime_warning(message) __attribute__((warning(message))) +#define __compiletime_error(message) __attribute__((error(message))) + +#ifdef LATENT_ENTROPY_PLUGIN +#define __latent_entropy __attribute__((latent_entropy)) +#endif +#endif /* __CHECKER__ */ + +/* + * calling noreturn functions, __builtin_unreachable() and __builtin_trap() + * confuse the stack allocation in gcc, leading to overly large stack + * frames, see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82365 + * + * Adding an empty inline assembly before it works around the problem + */ +#define barrier_before_unreachable() asm volatile("") + +/* + * Mark a position in code as unreachable. This can be used to + * suppress control flow warnings after asm blocks that transfer + * control elsewhere. + * + * Early snapshots of gcc 4.5 don't support this and we can't detect + * this in the preprocessor, but we can live with this because they're + * unreleased. Really, we need to have autoconf for the kernel. + */ +#define unreachable() \ + do { \ + annotate_unreachable(); \ + barrier_before_unreachable(); \ + __builtin_unreachable(); \ + } while (0) + +/* Mark a function definition as prohibited from being cloned. */ +#define __noclone __attribute__((__noclone__, __optimize__("no-tracer"))) + +#if defined(RANDSTRUCT_PLUGIN) && !defined(__CHECKER__) +#define __randomize_layout __attribute__((randomize_layout)) +#define __no_randomize_layout __attribute__((no_randomize_layout)) +/* This anon struct can add padding, so only enable it under randstruct. */ +#define randomized_struct_fields_start struct { +#define randomized_struct_fields_end } __randomize_layout; +#endif + +/* + * When used with Link Time Optimization, gcc can optimize away C functions or + * variables which are referenced only from assembly code. __visible tells the + * optimizer that something else uses this function or variable, thus preventing + * this. + */ +#define __visible __attribute__((externally_visible)) + +/* gcc version specific checks */ + +#if GCC_VERSION >= 40900 && !defined(__CHECKER__) +/* + * __assume_aligned(n, k): Tell the optimizer that the returned + * pointer can be assumed to be k modulo n. The second argument is + * optional (default 0), so we use a variadic macro to make the + * shorthand. + * + * Beware: Do not apply this to functions which may return + * ERR_PTRs. Also, it is probably unwise to apply it to functions + * returning extra information in the low bits (but in that case the + * compiler should see some alignment anyway, when the return value is + * massaged by 'flags = ptr & 3; ptr &= ~3;'). + */ +#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__))) +#endif + +/* + * GCC 'asm goto' miscompiles certain code sequences: + * + * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 + * + * Work it around via a compiler barrier quirk suggested by Jakub Jelinek. + * + * (asm goto is automatically volatile - the naming reflects this.) + */ +#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0) + +/* + * sparse (__CHECKER__) pretends to be gcc, but can't do constant + * folding in __builtin_bswap*() (yet), so don't set these for it. + */ +#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__) +#define __HAVE_BUILTIN_BSWAP32__ +#define __HAVE_BUILTIN_BSWAP64__ +#if GCC_VERSION >= 40800 +#define __HAVE_BUILTIN_BSWAP16__ +#endif +#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */ + +#if GCC_VERSION >= 70000 +#define KASAN_ABI_VERSION 5 +#elif GCC_VERSION >= 50000 +#define KASAN_ABI_VERSION 4 +#elif GCC_VERSION >= 40902 +#define KASAN_ABI_VERSION 3 +#endif + +#if GCC_VERSION >= 40902 +/* + * Tell the compiler that address safety instrumentation (KASAN) + * should not be applied to that function. + * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 + */ +#define __no_sanitize_address __attribute__((no_sanitize_address)) +#endif + +#if GCC_VERSION >= 50100 +/* + * Mark structures as requiring designated initializers. + * https://gcc.gnu.org/onlinedocs/gcc/Designated-Inits.html + */ +#define __designated_init __attribute__((designated_init)) +#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 +#endif + +#if GCC_VERSION >= 90100 +#define __copy(symbol) __attribute__((__copy__(symbol))) +#endif + +#if !defined(__noclone) +#define __noclone /* not needed */ +#endif + +#if !defined(__no_sanitize_address) +#define __no_sanitize_address +#endif + +/* + * Turn individual warnings and errors on and off locally, depending + * on version. + */ +#define __diag_GCC(version, severity, s) \ + __diag_GCC_ ## version(__diag_GCC_ ## severity s) + +/* Severity used in pragma directives */ +#define __diag_GCC_ignore ignored +#define __diag_GCC_warn warning +#define __diag_GCC_error error + +#define __diag_str1(s) #s +#define __diag_str(s) __diag_str1(s) +#define __diag(s) _Pragma(__diag_str(GCC diagnostic s)) + +#if GCC_VERSION >= 80000 +#define __diag_GCC_8(s) __diag(s) +#else +#define __diag_GCC_8(s) +#endif diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h new file mode 100644 index 000000000..f1fc60f10 --- /dev/null +++ b/include/linux/compiler-intel.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_COMPILER_TYPES_H +#error "Please don't include directly, include instead." +#endif + +#ifdef __ECC + +/* Compiler specific definitions for Intel ECC compiler */ + +#include + +/* Intel ECC compiler doesn't support gcc specific asm stmts. + * It uses intrinsics to do the equivalent things. + */ + +#define barrier() __memory_barrier() +#define barrier_data(ptr) barrier() + +#define RELOC_HIDE(ptr, off) \ + ({ unsigned long __ptr; \ + __ptr = (unsigned long) (ptr); \ + (typeof(ptr)) (__ptr + (off)); }) + +/* This should act as an optimization barrier on var. + * Given that this compiler does not have inline assembly, a compiler barrier + * is the best we can do. + */ +#define OPTIMIZER_HIDE_VAR(var) barrier() + +/* Intel ECC compiler doesn't support __builtin_types_compatible_p() */ +#define __must_be_array(a) 0 + +#endif + +/* icc has this, but it's called _bswap16 */ +#define __HAVE_BUILTIN_BSWAP16__ +#define __builtin_bswap16 _bswap16 + +/* The following are for compatibility with GCC, from compiler-gcc.h, + * and may be redefined here because they should not be shared with other + * compilers, like clang. + */ +#define __visible __attribute__((externally_visible)) diff --git a/include/linux/compiler.h b/include/linux/compiler.h new file mode 100644 index 000000000..ab9dfb14f --- /dev/null +++ b/include/linux/compiler.h @@ -0,0 +1,430 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_COMPILER_H +#define __LINUX_COMPILER_H + +#include + +#ifndef __ASSEMBLY__ + +#ifdef __KERNEL__ + +/* + * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code + * to disable branch tracing on a per file basis. + */ +#if defined(CONFIG_TRACE_BRANCH_PROFILING) \ + && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) +void ftrace_likely_update(struct ftrace_likely_data *f, int val, + int expect, int is_constant); + +#define likely_notrace(x) __builtin_expect(!!(x), 1) +#define unlikely_notrace(x) __builtin_expect(!!(x), 0) + +#define __branch_check__(x, expect, is_constant) ({ \ + long ______r; \ + static struct ftrace_likely_data \ + __attribute__((__aligned__(4))) \ + __attribute__((section("_ftrace_annotated_branch"))) \ + ______f = { \ + .data.func = __func__, \ + .data.file = __FILE__, \ + .data.line = __LINE__, \ + }; \ + ______r = __builtin_expect(!!(x), expect); \ + ftrace_likely_update(&______f, ______r, \ + expect, is_constant); \ + ______r; \ + }) + +/* + * Using __builtin_constant_p(x) to ignore cases where the return + * value is always the same. This idea is taken from a similar patch + * written by Daniel Walker. + */ +# ifndef likely +# define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x))) +# endif +# ifndef unlikely +# define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x))) +# endif + +#ifdef CONFIG_PROFILE_ALL_BRANCHES +/* + * "Define 'is'", Bill Clinton + * "Define 'if'", Steven Rostedt + */ +#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) +#define __trace_if(cond) \ + if (__builtin_constant_p(!!(cond)) ? !!(cond) : \ + ({ \ + int ______r; \ + static struct ftrace_branch_data \ + __attribute__((__aligned__(4))) \ + __attribute__((section("_ftrace_branch"))) \ + ______f = { \ + .func = __func__, \ + .file = __FILE__, \ + .line = __LINE__, \ + }; \ + ______r = !!(cond); \ + ______f.miss_hit[______r]++; \ + ______r; \ + })) +#endif /* CONFIG_PROFILE_ALL_BRANCHES */ + +#else +# define likely(x) __builtin_expect(!!(x), 1) +# define unlikely(x) __builtin_expect(!!(x), 0) +#endif + +/* Optimization barrier */ +#ifndef barrier +/* The "volatile" is due to gcc bugs */ +# define barrier() __asm__ __volatile__("": : :"memory") +#endif + +#ifndef barrier_data +/* + * This version is i.e. to prevent dead stores elimination on @ptr + * where gcc and llvm may behave differently when otherwise using + * normal barrier(): while gcc behavior gets along with a normal + * barrier(), llvm needs an explicit input variable to be assumed + * clobbered. The issue is as follows: while the inline asm might + * access any memory it wants, the compiler could have fit all of + * @ptr into memory registers instead, and since @ptr never escaped + * from that, it proved that the inline asm wasn't touching any of + * it. This version works well with both compilers, i.e. we're telling + * the compiler that the inline asm absolutely may see the contents + * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495 + */ +# define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory") +#endif + +/* workaround for GCC PR82365 if needed */ +#ifndef barrier_before_unreachable +# define barrier_before_unreachable() do { } while (0) +#endif + +/* Unreachable code */ +#ifdef CONFIG_STACK_VALIDATION +/* + * These macros help objtool understand GCC code flow for unreachable code. + * The __COUNTER__ based labels are a hack to make each instance of the macros + * unique, to convince GCC not to merge duplicate inline asm statements. + */ +#define annotate_reachable() ({ \ + asm volatile("%c0:\n\t" \ + ".pushsection .discard.reachable\n\t" \ + ".long %c0b - .\n\t" \ + ".popsection\n\t" : : "i" (__COUNTER__)); \ +}) +#define annotate_unreachable() ({ \ + asm volatile("%c0:\n\t" \ + ".pushsection .discard.unreachable\n\t" \ + ".long %c0b - .\n\t" \ + ".popsection\n\t" : : "i" (__COUNTER__)); \ +}) +#define ASM_UNREACHABLE \ + "999:\n\t" \ + ".pushsection .discard.unreachable\n\t" \ + ".long 999b - .\n\t" \ + ".popsection\n\t" + +#ifdef CONFIG_DEBUG_ENTRY +/* Begin/end of an instrumentation safe region */ +#define instrumentation_begin() ({ \ + asm volatile("%c0:\n\t" \ + ".pushsection .discard.instr_begin\n\t" \ + ".long %c0b - .\n\t" \ + ".popsection\n\t" : : "i" (__COUNTER__)); \ +}) + +/* + * Because instrumentation_{begin,end}() can nest, objtool validation considers + * _begin() a +1 and _end() a -1 and computes a sum over the instructions. + * When the value is greater than 0, we consider instrumentation allowed. + * + * There is a problem with code like: + * + * noinstr void foo() + * { + * instrumentation_begin(); + * ... + * if (cond) { + * instrumentation_begin(); + * ... + * instrumentation_end(); + * } + * bar(); + * instrumentation_end(); + * } + * + * If instrumentation_end() would be an empty label, like all the other + * annotations, the inner _end(), which is at the end of a conditional block, + * would land on the instruction after the block. + * + * If we then consider the sum of the !cond path, we'll see that the call to + * bar() is with a 0-value, even though, we meant it to happen with a positive + * value. + * + * To avoid this, have _end() be a NOP instruction, this ensures it will be + * part of the condition block and does not escape. + */ +#define instrumentation_end() ({ \ + asm volatile("%c0: nop\n\t" \ + ".pushsection .discard.instr_end\n\t" \ + ".long %c0b - .\n\t" \ + ".popsection\n\t" : : "i" (__COUNTER__)); \ +}) +#endif /* CONFIG_DEBUG_ENTRY */ + +#else +#define annotate_reachable() +#define annotate_unreachable() +#endif + +#ifndef instrumentation_begin +#define instrumentation_begin() do { } while(0) +#define instrumentation_end() do { } while(0) +#endif + +#ifndef ASM_UNREACHABLE +# define ASM_UNREACHABLE +#endif +#ifndef unreachable +# define unreachable() do { \ + annotate_unreachable(); \ + __builtin_unreachable(); \ +} while (0) +#endif + +/* + * KENTRY - kernel entry point + * This can be used to annotate symbols (functions or data) that are used + * without their linker symbol being referenced explicitly. For example, + * interrupt vector handlers, or functions in the kernel image that are found + * programatically. + * + * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those + * are handled in their own way (with KEEP() in linker scripts). + * + * KENTRY can be avoided if the symbols in question are marked as KEEP() in the + * linker script. For example an architecture could KEEP() its entire + * boot/exception vector code rather than annotate each function and data. + */ +#ifndef KENTRY +# define KENTRY(sym) \ + extern typeof(sym) sym; \ + static const unsigned long __kentry_##sym \ + __used \ + __attribute__((section("___kentry" "+" #sym ), used)) \ + = (unsigned long)&sym; +#endif + +#ifndef RELOC_HIDE +# define RELOC_HIDE(ptr, off) \ + ({ unsigned long __ptr; \ + __ptr = (unsigned long) (ptr); \ + (typeof(ptr)) (__ptr + (off)); }) +#endif + +#define absolute_pointer(val) RELOC_HIDE((void *)(val), 0) + +#ifndef OPTIMIZER_HIDE_VAR +/* Make the optimizer believe the variable can be manipulated arbitrarily. */ +#define OPTIMIZER_HIDE_VAR(var) \ + __asm__ ("" : "=r" (var) : "0" (var)) +#endif + +/* Not-quite-unique ID. */ +#ifndef __UNIQUE_ID +# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) +#endif + +#include + +#define __READ_ONCE_SIZE \ +({ \ + switch (size) { \ + case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \ + case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \ + case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \ + case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \ + default: \ + barrier(); \ + __builtin_memcpy((void *)res, (const void *)p, size); \ + barrier(); \ + } \ +}) + +static __always_inline +void __read_once_size(const volatile void *p, void *res, int size) +{ + __READ_ONCE_SIZE; +} + +#ifdef CONFIG_KASAN +/* + * We can't declare function 'inline' because __no_sanitize_address confilcts + * with inlining. Attempt to inline it may cause a build failure. + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 + * '__maybe_unused' allows us to avoid defined-but-not-used warnings. + */ +# define __no_kasan_or_inline __no_sanitize_address __maybe_unused +#else +# define __no_kasan_or_inline __always_inline +#endif + +static __no_kasan_or_inline +void __read_once_size_nocheck(const volatile void *p, void *res, int size) +{ + __READ_ONCE_SIZE; +} + +static __always_inline void __write_once_size(volatile void *p, void *res, int size) +{ + switch (size) { + case 1: *(volatile __u8 *)p = *(__u8 *)res; break; + case 2: *(volatile __u16 *)p = *(__u16 *)res; break; + case 4: *(volatile __u32 *)p = *(__u32 *)res; break; + case 8: *(volatile __u64 *)p = *(__u64 *)res; break; + default: + barrier(); + __builtin_memcpy((void *)p, (const void *)res, size); + barrier(); + } +} + +/* + * Prevent the compiler from merging or refetching reads or writes. The + * compiler is also forbidden from reordering successive instances of + * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some + * particular ordering. One way to make the compiler aware of ordering is to + * put the two invocations of READ_ONCE or WRITE_ONCE in different C + * statements. + * + * These two macros will also work on aggregate data types like structs or + * unions. If the size of the accessed data type exceeds the word size of + * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will + * fall back to memcpy(). There's at least two memcpy()s: one for the + * __builtin_memcpy() and then one for the macro doing the copy of variable + * - '__u' allocated on the stack. + * + * Their two major use cases are: (1) Mediating communication between + * process-level code and irq/NMI handlers, all running on the same CPU, + * and (2) Ensuring that the compiler does not fold, spindle, or otherwise + * mutilate accesses that either do not require ordering or that interact + * with an explicit memory barrier or atomic instruction that provides the + * required ordering. + */ +#include +#include + +#define __READ_ONCE(x, check) \ +({ \ + union { typeof(x) __val; char __c[1]; } __u; \ + if (check) \ + __read_once_size(&(x), __u.__c, sizeof(x)); \ + else \ + __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ + smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \ + __u.__val; \ +}) +#define READ_ONCE(x) __READ_ONCE(x, 1) + +/* + * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need + * to hide memory access from KASAN. + */ +#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) + +static __no_kasan_or_inline +unsigned long read_word_at_a_time(const void *addr) +{ + kasan_check_read(addr, 1); + return *(unsigned long *)addr; +} + +#define WRITE_ONCE(x, val) \ +({ \ + union { typeof(x) __val; char __c[1]; } __u = \ + { .__val = (__force typeof(x)) (val) }; \ + __write_once_size(&(x), __u.__c, sizeof(x)); \ + __u.__val; \ +}) + +#endif /* __KERNEL__ */ + +/* + * Force the compiler to emit 'sym' as a symbol, so that we can reference + * it from inline assembler. Necessary in case 'sym' could be inlined + * otherwise, or eliminated entirely due to lack of references that are + * visible to the compiler. + */ +#define __ADDRESSABLE(sym) \ + static void * __attribute__((section(".discard.addressable"), used)) \ + __PASTE(__addressable_##sym, __LINE__) = (void *)&sym; + +/** + * offset_to_ptr - convert a relative memory offset to an absolute pointer + * @off: the address of the 32-bit offset value + */ +static inline void *offset_to_ptr(const int *off) +{ + return (void *)((unsigned long)off + *off); +} + +#endif /* __ASSEMBLY__ */ + +#ifndef __optimize +# define __optimize(level) +#endif + +/* Compile time object size, -1 for unknown */ +#ifndef __compiletime_object_size +# define __compiletime_object_size(obj) -1 +#endif +#ifndef __compiletime_warning +# define __compiletime_warning(message) +#endif +#ifndef __compiletime_error +# define __compiletime_error(message) +#endif + +#ifdef __OPTIMIZE__ +# define __compiletime_assert(condition, msg, prefix, suffix) \ + do { \ + extern void prefix ## suffix(void) __compiletime_error(msg); \ + if (!(condition)) \ + prefix ## suffix(); \ + } while (0) +#else +# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0) +#endif + +#define _compiletime_assert(condition, msg, prefix, suffix) \ + __compiletime_assert(condition, msg, prefix, suffix) + +/** + * compiletime_assert - break build and emit msg if condition is false + * @condition: a compile-time constant condition to check + * @msg: a message to emit if condition is false + * + * In tradition of POSIX assert, this macro will break the build if the + * supplied condition is *false*, emitting the supplied error message if the + * compiler has support to do so. + */ +#define compiletime_assert(condition, msg) \ + _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) + +#define compiletime_assert_atomic_type(t) \ + compiletime_assert(__native_word(t), \ + "Need native word sized stores/loads for atomicity.") + +/* + * This is needed in functions which generate the stack canary, see + * arch/x86/kernel/smpboot.c::start_secondary() for an example. + */ +#define prevent_tail_call_optimization() mb() + +#endif /* __LINUX_COMPILER_H */ diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h new file mode 100644 index 000000000..c01100318 --- /dev/null +++ b/include/linux/compiler_types.h @@ -0,0 +1,299 @@ +#ifndef __LINUX_COMPILER_TYPES_H +#define __LINUX_COMPILER_TYPES_H + +#ifndef __ASSEMBLY__ + +#ifdef __CHECKER__ +# define __user __attribute__((noderef, address_space(1))) +# define __kernel __attribute__((address_space(0))) +# define __safe __attribute__((safe)) +# define __force __attribute__((force)) +# define __nocast __attribute__((nocast)) +# define __iomem __attribute__((noderef, address_space(2))) +# define __must_hold(x) __attribute__((context(x,1,1))) +# define __acquires(x) __attribute__((context(x,0,1))) +# define __releases(x) __attribute__((context(x,1,0))) +# define __acquire(x) __context__(x,1) +# define __release(x) __context__(x,-1) +# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) +# define __percpu __attribute__((noderef, address_space(3))) +# define __rcu __attribute__((noderef, address_space(4))) +# define __private __attribute__((noderef)) +extern void __chk_user_ptr(const volatile void __user *); +extern void __chk_io_ptr(const volatile void __iomem *); +# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member)) +#else /* __CHECKER__ */ +# ifdef STRUCTLEAK_PLUGIN +# define __user __attribute__((user)) +# else +# define __user +# endif +# define __kernel +# define __safe +# define __force +# define __nocast +# define __iomem +# define __chk_user_ptr(x) (void)0 +# define __chk_io_ptr(x) (void)0 +# define __builtin_warning(x, y...) (1) +# define __must_hold(x) +# define __acquires(x) +# define __releases(x) +# define __acquire(x) (void)0 +# define __release(x) (void)0 +# define __cond_lock(x,c) (c) +# define __percpu +# define __rcu +# define __private +# define ACCESS_PRIVATE(p, member) ((p)->member) +#endif /* __CHECKER__ */ + +/* Indirect macros required for expanded argument pasting, eg. __LINE__. */ +#define ___PASTE(a,b) a##b +#define __PASTE(a,b) ___PASTE(a,b) + +#ifdef __KERNEL__ + +/* Compiler specific macros. */ +#ifdef __clang__ +#include +#elif defined(__INTEL_COMPILER) +#include +#elif defined(__GNUC__) +/* The above compilers also define __GNUC__, so order is important here. */ +#include +#else +#error "Unknown compiler" +#endif + +/* + * Some architectures need to provide custom definitions of macros provided + * by linux/compiler-*.h, and can do so using asm/compiler.h. We include that + * conditionally rather than using an asm-generic wrapper in order to avoid + * build failures if any C compilation, which will include this file via an + * -include argument in c_flags, occurs prior to the asm-generic wrappers being + * generated. + */ +#ifdef CONFIG_HAVE_ARCH_COMPILER_H +#include +#endif + +/* + * Generic compiler-independent macros required for kernel + * build go below this comment. Actual compiler/compiler version + * specific implementations come from the above header files + */ + +struct ftrace_branch_data { + const char *func; + const char *file; + unsigned line; + union { + struct { + unsigned long correct; + unsigned long incorrect; + }; + struct { + unsigned long miss; + unsigned long hit; + }; + unsigned long miss_hit[2]; + }; +}; + +struct ftrace_likely_data { + struct ftrace_branch_data data; + unsigned long constant; +}; + +/* Don't. Just don't. */ +#define __deprecated +#define __deprecated_for_modules + +#endif /* __KERNEL__ */ + +#endif /* __ASSEMBLY__ */ + +/* + * The below symbols may be defined for one or more, but not ALL, of the above + * compilers. We don't consider that to be an error, so set them to nothing. + * For example, some of them are for compiler specific plugins. + */ +#ifndef __designated_init +# define __designated_init +#endif + +#ifndef __latent_entropy +# define __latent_entropy +#endif + +#ifndef __randomize_layout +# define __randomize_layout __designated_init +#endif + +#ifndef __no_randomize_layout +# define __no_randomize_layout +#endif + +#ifndef randomized_struct_fields_start +# define randomized_struct_fields_start +# define randomized_struct_fields_end +#endif + +#ifndef __visible +#define __visible +#endif + +/* + * Assume alignment of return value. + */ +#ifndef __assume_aligned +#define __assume_aligned(a, ...) +#endif + +#ifndef asm_volatile_goto +#define asm_volatile_goto(x...) asm goto(x) +#endif + +/* Are two types/vars the same type (ignoring qualifiers)? */ +#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) + +/* Is this type a native word size -- useful for atomic operations */ +#define __native_word(t) \ + (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \ + sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) + +#ifndef __attribute_const__ +#define __attribute_const__ __attribute__((__const__)) +#endif + +#ifndef __noclone +#define __noclone +#endif + +/* Helpers for emitting diagnostics in pragmas. */ +#ifndef __diag +#define __diag(string) +#endif + +#ifndef __diag_GCC +#define __diag_GCC(version, severity, string) +#endif + +#ifndef __copy +# define __copy(symbol) +#endif + +#define __diag_push() __diag(push) +#define __diag_pop() __diag(pop) + +#define __diag_ignore(compiler, version, option, comment) \ + __diag_ ## compiler(version, ignore, option) +#define __diag_warn(compiler, version, option, comment) \ + __diag_ ## compiler(version, warn, option) +#define __diag_error(compiler, version, option, comment) \ + __diag_ ## compiler(version, error, option) + +/* + * From the GCC manual: + * + * Many functions have no effects except the return value and their + * return value depends only on the parameters and/or global + * variables. Such a function can be subject to common subexpression + * elimination and loop optimization just as an arithmetic operator + * would be. + * [...] + */ +#define __pure __attribute__((pure)) +#define __aligned(x) __attribute__((aligned(x))) +#define __aligned_largest __attribute__((aligned)) +#define __printf(a, b) __attribute__((format(printf, a, b))) +#define __scanf(a, b) __attribute__((format(scanf, a, b))) +#define __maybe_unused __attribute__((unused)) +#define __always_unused __attribute__((unused)) +#define __mode(x) __attribute__((mode(x))) +#define __malloc __attribute__((__malloc__)) +#define __used __attribute__((__used__)) +#define __noreturn __attribute__((noreturn)) +#define __packed __attribute__((packed)) +#define __weak __attribute__((weak)) +#define __alias(symbol) __attribute__((alias(#symbol))) +#define __cold __attribute__((cold)) +#define __section(S) __attribute__((__section__(#S))) + + +#ifdef CONFIG_ENABLE_MUST_CHECK +#define __must_check __attribute__((warn_unused_result)) +#else +#define __must_check +#endif + +#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__) +#define notrace __attribute__((hotpatch(0, 0))) +#else +#define notrace __attribute__((no_instrument_function)) +#endif + +#if defined(__KERNEL__) && !defined(__ASSEMBLY__) +/* Section for code which can't be instrumented at all */ +#define noinstr \ + noinline notrace __attribute((__section__(".noinstr.text"))) +#endif + +/* + * it doesn't make sense on ARM (currently the only user of __naked) + * to trace naked functions because then mcount is called without + * stack and frame pointer being set up and there is no chance to + * restore the lr register to the value before mcount was called. + */ +#define __naked __attribute__((naked)) notrace + +#define __compiler_offsetof(a, b) __builtin_offsetof(a, b) + +/* + * Feature detection for gnu_inline (gnu89 extern inline semantics). Either + * __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics, + * and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not + * defined so the gnu89 semantics are the default. + */ +#ifdef __GNUC_STDC_INLINE__ +# define __gnu_inline __attribute__((gnu_inline)) +#else +# define __gnu_inline +#endif + +/* + * Force always-inline if the user requests it so via the .config. + * GCC does not warn about unused static inline functions for + * -Wunused-function. This turns out to avoid the need for complex #ifdef + * directives. Suppress the warning in clang as well by using "unused" + * function attribute, which is redundant but not harmful for gcc. + * Prefer gnu_inline, so that extern inline functions do not emit an + * externally visible function. This makes extern inline behave as per gnu89 + * semantics rather than c99. This prevents multiple symbol definition errors + * of extern inline functions at link time. + * A lot of inline functions can cause havoc with function tracing. + */ +#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ + !defined(CONFIG_OPTIMIZE_INLINING) +#define inline \ + inline __attribute__((always_inline, unused)) notrace __gnu_inline +#else +#define inline inline __attribute__((unused)) notrace __gnu_inline +#endif + +#define __inline__ inline +#define __inline inline +#define noinline __attribute__((noinline)) + +#ifndef __always_inline +#define __always_inline inline __attribute__((always_inline)) +#endif + +/* + * Rather then using noinline to prevent stack consumption, use + * noinline_for_stack instead. For documentation reasons. + */ +#define noinline_for_stack noinline + +#endif /* __LINUX_COMPILER_TYPES_H */ diff --git a/include/linux/completion.h b/include/linux/completion.h new file mode 100644 index 000000000..519e94915 --- /dev/null +++ b/include/linux/completion.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_COMPLETION_H +#define __LINUX_COMPLETION_H + +/* + * (C) Copyright 2001 Linus Torvalds + * + * Atomic wait-for-completion handler data structures. + * See kernel/sched/completion.c for details. + */ + +#include + +/* + * struct completion - structure used to maintain state for a "completion" + * + * This is the opaque structure used to maintain the state for a "completion". + * Completions currently use a FIFO to queue threads that have to wait for + * the "completion" event. + * + * See also: complete(), wait_for_completion() (and friends _timeout, + * _interruptible, _interruptible_timeout, and _killable), init_completion(), + * reinit_completion(), and macros DECLARE_COMPLETION(), + * DECLARE_COMPLETION_ONSTACK(). + */ +struct completion { + unsigned int done; + wait_queue_head_t wait; +}; + +#define init_completion_map(x, m) __init_completion(x) +#define init_completion(x) __init_completion(x) +static inline void complete_acquire(struct completion *x) {} +static inline void complete_release(struct completion *x) {} + +#define COMPLETION_INITIALIZER(work) \ + { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } + +#define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ + (*({ init_completion_map(&(work), &(map)); &(work); })) + +#define COMPLETION_INITIALIZER_ONSTACK(work) \ + (*({ init_completion(&work); &work; })) + +/** + * DECLARE_COMPLETION - declare and initialize a completion structure + * @work: identifier for the completion structure + * + * This macro declares and initializes a completion structure. Generally used + * for static declarations. You should use the _ONSTACK variant for automatic + * variables. + */ +#define DECLARE_COMPLETION(work) \ + struct completion work = COMPLETION_INITIALIZER(work) + +/* + * Lockdep needs to run a non-constant initializer for on-stack + * completions - so we use the _ONSTACK() variant for those that + * are on the kernel stack: + */ +/** + * DECLARE_COMPLETION_ONSTACK - declare and initialize a completion structure + * @work: identifier for the completion structure + * + * This macro declares and initializes a completion structure on the kernel + * stack. + */ +#ifdef CONFIG_LOCKDEP +# define DECLARE_COMPLETION_ONSTACK(work) \ + struct completion work = COMPLETION_INITIALIZER_ONSTACK(work) +# define DECLARE_COMPLETION_ONSTACK_MAP(work, map) \ + struct completion work = COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) +#else +# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work) +# define DECLARE_COMPLETION_ONSTACK_MAP(work, map) DECLARE_COMPLETION(work) +#endif + +/** + * init_completion - Initialize a dynamically allocated completion + * @x: pointer to completion structure that is to be initialized + * + * This inline function will initialize a dynamically created completion + * structure. + */ +static inline void __init_completion(struct completion *x) +{ + x->done = 0; + init_waitqueue_head(&x->wait); +} + +/** + * reinit_completion - reinitialize a completion structure + * @x: pointer to completion structure that is to be reinitialized + * + * This inline function should be used to reinitialize a completion structure so it can + * be reused. This is especially important after complete_all() is used. + */ +static inline void reinit_completion(struct completion *x) +{ + x->done = 0; +} + +extern void wait_for_completion(struct completion *); +extern void wait_for_completion_io(struct completion *); +extern int wait_for_completion_interruptible(struct completion *x); +extern int wait_for_completion_killable(struct completion *x); +extern unsigned long wait_for_completion_timeout(struct completion *x, + unsigned long timeout); +extern unsigned long wait_for_completion_io_timeout(struct completion *x, + unsigned long timeout); +extern long wait_for_completion_interruptible_timeout( + struct completion *x, unsigned long timeout); +extern long wait_for_completion_killable_timeout( + struct completion *x, unsigned long timeout); +extern bool try_wait_for_completion(struct completion *x); +extern bool completion_done(struct completion *x); + +extern void complete(struct completion *); +extern void complete_all(struct completion *); + +#endif diff --git a/include/linux/component.h b/include/linux/component.h new file mode 100644 index 000000000..e71fbbbc7 --- /dev/null +++ b/include/linux/component.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef COMPONENT_H +#define COMPONENT_H + +#include + +struct device; + +struct component_ops { + int (*bind)(struct device *comp, struct device *master, + void *master_data); + void (*unbind)(struct device *comp, struct device *master, + void *master_data); +}; + +int component_add(struct device *, const struct component_ops *); +void component_del(struct device *, const struct component_ops *); + +int component_bind_all(struct device *master, void *master_data); +void component_unbind_all(struct device *master, void *master_data); + +struct master; + +struct component_master_ops { + int (*bind)(struct device *master); + void (*unbind)(struct device *master); +}; + +void component_master_del(struct device *, + const struct component_master_ops *); + +struct component_match; + +int component_master_add_with_match(struct device *, + const struct component_master_ops *, struct component_match *); +void component_match_add_release(struct device *master, + struct component_match **matchptr, + void (*release)(struct device *, void *), + int (*compare)(struct device *, void *), void *compare_data); + +static inline void component_match_add(struct device *master, + struct component_match **matchptr, + int (*compare)(struct device *, void *), void *compare_data) +{ + component_match_add_release(master, matchptr, NULL, compare, + compare_data); +} + +#endif diff --git a/include/linux/concap.h b/include/linux/concap.h new file mode 100644 index 000000000..977acb3d1 --- /dev/null +++ b/include/linux/concap.h @@ -0,0 +1,112 @@ +/* $Id: concap.h,v 1.3.2.2 2004/01/12 23:08:35 keil Exp $ + * + * Copyright 1997 by Henner Eisen + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + */ + +#ifndef _LINUX_CONCAP_H +#define _LINUX_CONCAP_H + +#include +#include + +/* Stuff to support encapsulation protocols genericly. The encapsulation + protocol is processed at the uppermost layer of the network interface. + + Based on a ideas developed in a 'synchronous device' thread in the + linux-x25 mailing list contributed by Alan Cox, Thomasz Motylewski + and Jonathan Naylor. + + For more documetation on this refer to Documentation/isdn/README.concap +*/ + +struct concap_proto_ops; +struct concap_device_ops; + +/* this manages all data needed by the encapsulation protocol + */ +struct concap_proto{ + struct net_device *net_dev; /* net device using our service */ + struct concap_device_ops *dops; /* callbacks provided by device */ + struct concap_proto_ops *pops; /* callbacks provided by us */ + spinlock_t lock; + int flags; + void *proto_data; /* protocol specific private data, to + be accessed via *pops methods only*/ + /* + : + whatever + : + */ +}; + +/* Operations to be supported by the net device. Called by the encapsulation + * protocol entity. No receive method is offered because the encapsulation + * protocol directly calls netif_rx(). + */ +struct concap_device_ops{ + + /* to request data is submitted by device*/ + int (*data_req)(struct concap_proto *, struct sk_buff *); + + /* Control methods must be set to NULL by devices which do not + support connection control.*/ + /* to request a connection is set up */ + int (*connect_req)(struct concap_proto *); + + /* to request a connection is released */ + int (*disconn_req)(struct concap_proto *); +}; + +/* Operations to be supported by the encapsulation protocol. Called by + * device driver. + */ +struct concap_proto_ops{ + + /* create a new encapsulation protocol instance of same type */ + struct concap_proto * (*proto_new) (void); + + /* delete encapsulation protocol instance and free all its resources. + cprot may no loger be referenced after calling this */ + void (*proto_del)(struct concap_proto *cprot); + + /* initialize the protocol's data. To be called at interface startup + or when the device driver resets the interface. All services of the + encapsulation protocol may be used after this*/ + int (*restart)(struct concap_proto *cprot, + struct net_device *ndev, + struct concap_device_ops *dops); + + /* inactivate an encapsulation protocol instance. The encapsulation + protocol may not call any *dops methods after this. */ + int (*close)(struct concap_proto *cprot); + + /* process a frame handed down to us by upper layer */ + int (*encap_and_xmit)(struct concap_proto *cprot, struct sk_buff *skb); + + /* to be called for each data entity received from lower layer*/ + int (*data_ind)(struct concap_proto *cprot, struct sk_buff *skb); + + /* to be called when a connection was set up/down. + Protocols that don't process these primitives might fill in + dummy methods here */ + int (*connect_ind)(struct concap_proto *cprot); + int (*disconn_ind)(struct concap_proto *cprot); + /* + Some network device support functions, like net_header(), rebuild_header(), + and others, that depend solely on the encapsulation protocol, might + be provided here, too. The net device would just fill them in its + corresponding fields when it is opened. + */ +}; + +/* dummy restart/close/connect/reset/disconn methods + */ +extern int concap_nop(struct concap_proto *cprot); + +/* dummy submit method + */ +extern int concap_drop_skb(struct concap_proto *cprot, struct sk_buff *skb); +#endif diff --git a/include/linux/configfs.h b/include/linux/configfs.h new file mode 100644 index 000000000..90b90f8ba --- /dev/null +++ b/include/linux/configfs.h @@ -0,0 +1,289 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * configfs.h - definitions for the device driver filesystem + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + * Based on sysfs: + * sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel + * + * Based on kobject.h: + * Copyright (c) 2002-2003 Patrick Mochel + * Copyright (c) 2002-2003 Open Source Development Labs + * + * configfs Copyright (C) 2005 Oracle. All rights reserved. + * + * Please read Documentation/filesystems/configfs/configfs.txt before using + * the configfs interface, ESPECIALLY the parts about reference counts and + * item destructors. + */ + +#ifndef _CONFIGFS_H_ +#define _CONFIGFS_H_ + +#include /* S_IRUGO */ +#include /* ssize_t */ +#include /* struct list_head */ +#include /* struct kref */ +#include /* struct mutex */ + +#define CONFIGFS_ITEM_NAME_LEN 20 + +struct module; + +struct configfs_item_operations; +struct configfs_group_operations; +struct configfs_attribute; +struct configfs_bin_attribute; +struct configfs_subsystem; + +struct config_item { + char *ci_name; + char ci_namebuf[CONFIGFS_ITEM_NAME_LEN]; + struct kref ci_kref; + struct list_head ci_entry; + struct config_item *ci_parent; + struct config_group *ci_group; + const struct config_item_type *ci_type; + struct dentry *ci_dentry; +}; + +extern __printf(2, 3) +int config_item_set_name(struct config_item *, const char *, ...); + +static inline char *config_item_name(struct config_item * item) +{ + return item->ci_name; +} + +extern void config_item_init_type_name(struct config_item *item, + const char *name, + const struct config_item_type *type); + +extern struct config_item *config_item_get(struct config_item *); +extern struct config_item *config_item_get_unless_zero(struct config_item *); +extern void config_item_put(struct config_item *); + +struct config_item_type { + struct module *ct_owner; + struct configfs_item_operations *ct_item_ops; + struct configfs_group_operations *ct_group_ops; + struct configfs_attribute **ct_attrs; + struct configfs_bin_attribute **ct_bin_attrs; +}; + +/** + * group - a group of config_items of a specific type, belonging + * to a specific subsystem. + */ +struct config_group { + struct config_item cg_item; + struct list_head cg_children; + struct configfs_subsystem *cg_subsys; + struct list_head default_groups; + struct list_head group_entry; +}; + +extern void config_group_init(struct config_group *group); +extern void config_group_init_type_name(struct config_group *group, + const char *name, + const struct config_item_type *type); + +static inline struct config_group *to_config_group(struct config_item *item) +{ + return item ? container_of(item,struct config_group,cg_item) : NULL; +} + +static inline struct config_group *config_group_get(struct config_group *group) +{ + return group ? to_config_group(config_item_get(&group->cg_item)) : NULL; +} + +static inline void config_group_put(struct config_group *group) +{ + config_item_put(&group->cg_item); +} + +extern struct config_item *config_group_find_item(struct config_group *, + const char *); + + +static inline void configfs_add_default_group(struct config_group *new_group, + struct config_group *group) +{ + list_add_tail(&new_group->group_entry, &group->default_groups); +} + +struct configfs_attribute { + const char *ca_name; + struct module *ca_owner; + umode_t ca_mode; + ssize_t (*show)(struct config_item *, char *); + ssize_t (*store)(struct config_item *, const char *, size_t); +}; + +#define CONFIGFS_ATTR(_pfx, _name) \ +static struct configfs_attribute _pfx##attr_##_name = { \ + .ca_name = __stringify(_name), \ + .ca_mode = S_IRUGO | S_IWUSR, \ + .ca_owner = THIS_MODULE, \ + .show = _pfx##_name##_show, \ + .store = _pfx##_name##_store, \ +} + +#define CONFIGFS_ATTR_RO(_pfx, _name) \ +static struct configfs_attribute _pfx##attr_##_name = { \ + .ca_name = __stringify(_name), \ + .ca_mode = S_IRUGO, \ + .ca_owner = THIS_MODULE, \ + .show = _pfx##_name##_show, \ +} + +#define CONFIGFS_ATTR_WO(_pfx, _name) \ +static struct configfs_attribute _pfx##attr_##_name = { \ + .ca_name = __stringify(_name), \ + .ca_mode = S_IWUSR, \ + .ca_owner = THIS_MODULE, \ + .store = _pfx##_name##_store, \ +} + +struct file; +struct vm_area_struct; + +struct configfs_bin_attribute { + struct configfs_attribute cb_attr; /* std. attribute */ + void *cb_private; /* for user */ + size_t cb_max_size; /* max core size */ + ssize_t (*read)(struct config_item *, void *, size_t); + ssize_t (*write)(struct config_item *, const void *, size_t); +}; + +#define CONFIGFS_BIN_ATTR(_pfx, _name, _priv, _maxsz) \ +static struct configfs_bin_attribute _pfx##attr_##_name = { \ + .cb_attr = { \ + .ca_name = __stringify(_name), \ + .ca_mode = S_IRUGO | S_IWUSR, \ + .ca_owner = THIS_MODULE, \ + }, \ + .cb_private = _priv, \ + .cb_max_size = _maxsz, \ + .read = _pfx##_name##_read, \ + .write = _pfx##_name##_write, \ +} + +#define CONFIGFS_BIN_ATTR_RO(_pfx, _name, _priv, _maxsz) \ +static struct configfs_bin_attribute _pfx##attr_##_name = { \ + .cb_attr = { \ + .ca_name = __stringify(_name), \ + .ca_mode = S_IRUGO, \ + .ca_owner = THIS_MODULE, \ + }, \ + .cb_private = _priv, \ + .cb_max_size = _maxsz, \ + .read = _pfx##_name##_read, \ +} + +#define CONFIGFS_BIN_ATTR_WO(_pfx, _name, _priv, _maxsz) \ +static struct configfs_bin_attribute _pfx##attr_##_name = { \ + .cb_attr = { \ + .ca_name = __stringify(_name), \ + .ca_mode = S_IWUSR, \ + .ca_owner = THIS_MODULE, \ + }, \ + .cb_private = _priv, \ + .cb_max_size = _maxsz, \ + .write = _pfx##_name##_write, \ +} + +/* + * If allow_link() exists, the item can symlink(2) out to other + * items. If the item is a group, it may support mkdir(2). + * Groups supply one of make_group() and make_item(). If the + * group supports make_group(), one can create group children. If it + * supports make_item(), one can create config_item children. make_group() + * and make_item() return ERR_PTR() on errors. If it has + * default_groups on group->default_groups, it has automatically created + * group children. default_groups may coexist alongsize make_group() or + * make_item(), but if the group wishes to have only default_groups + * children (disallowing mkdir(2)), it need not provide either function. + * If the group has commit(), it supports pending and committed (active) + * items. + */ +struct configfs_item_operations { + void (*release)(struct config_item *); + int (*allow_link)(struct config_item *src, struct config_item *target); + void (*drop_link)(struct config_item *src, struct config_item *target); +}; + +struct configfs_group_operations { + struct config_item *(*make_item)(struct config_group *group, const char *name); + struct config_group *(*make_group)(struct config_group *group, const char *name); + int (*commit_item)(struct config_item *item); + void (*disconnect_notify)(struct config_group *group, struct config_item *item); + void (*drop_item)(struct config_group *group, struct config_item *item); +}; + +struct configfs_subsystem { + struct config_group su_group; + struct mutex su_mutex; +}; + +static inline struct configfs_subsystem *to_configfs_subsystem(struct config_group *group) +{ + return group ? + container_of(group, struct configfs_subsystem, su_group) : + NULL; +} + +int configfs_register_subsystem(struct configfs_subsystem *subsys); +void configfs_unregister_subsystem(struct configfs_subsystem *subsys); + +int configfs_register_group(struct config_group *parent_group, + struct config_group *group); +void configfs_unregister_group(struct config_group *group); + +void configfs_remove_default_groups(struct config_group *group); + +struct config_group * +configfs_register_default_group(struct config_group *parent_group, + const char *name, + const struct config_item_type *item_type); +void configfs_unregister_default_group(struct config_group *group); + +/* These functions can sleep and can alloc with GFP_KERNEL */ +/* WARNING: These cannot be called underneath configfs callbacks!! */ +int configfs_depend_item(struct configfs_subsystem *subsys, + struct config_item *target); +void configfs_undepend_item(struct config_item *target); + +/* + * These functions can sleep and can alloc with GFP_KERNEL + * NOTE: These should be called only underneath configfs callbacks. + * NOTE: First parameter is a caller's subsystem, not target's. + * WARNING: These cannot be called on newly created item + * (in make_group()/make_item() callback) + */ +int configfs_depend_item_unlocked(struct configfs_subsystem *caller_subsys, + struct config_item *target); + + +static inline void configfs_undepend_item_unlocked(struct config_item *target) +{ + configfs_undepend_item(target); +} + +#endif /* _CONFIGFS_H_ */ diff --git a/include/linux/connector.h b/include/linux/connector.h new file mode 100644 index 000000000..032102b19 --- /dev/null +++ b/include/linux/connector.h @@ -0,0 +1,88 @@ +/* + * connector.h + * + * 2004-2005 Copyright (c) Evgeniy Polyakov + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef __CONNECTOR_H +#define __CONNECTOR_H + + +#include + +#include +#include + +#include +#include + +#define CN_CBQ_NAMELEN 32 + +struct cn_queue_dev { + atomic_t refcnt; + unsigned char name[CN_CBQ_NAMELEN]; + + struct list_head queue_list; + spinlock_t queue_lock; + + struct sock *nls; +}; + +struct cn_callback_id { + unsigned char name[CN_CBQ_NAMELEN]; + struct cb_id id; +}; + +struct cn_callback_entry { + struct list_head callback_entry; + refcount_t refcnt; + struct cn_queue_dev *pdev; + + struct cn_callback_id id; + void (*callback) (struct cn_msg *, struct netlink_skb_parms *); + + u32 seq, group; +}; + +struct cn_dev { + struct cb_id id; + + u32 seq, groups; + struct sock *nls; + void (*input) (struct sk_buff *skb); + + struct cn_queue_dev *cbdev; +}; + +int cn_add_callback(struct cb_id *id, const char *name, + void (*callback)(struct cn_msg *, struct netlink_skb_parms *)); +void cn_del_callback(struct cb_id *); +int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 group, gfp_t gfp_mask); +int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 group, gfp_t gfp_mask); + +int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name, + struct cb_id *id, + void (*callback)(struct cn_msg *, struct netlink_skb_parms *)); +void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id); +void cn_queue_release_callback(struct cn_callback_entry *); + +struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *); +void cn_queue_free_dev(struct cn_queue_dev *dev); + +int cn_cb_equal(struct cb_id *, struct cb_id *); + +#endif /* __CONNECTOR_H */ diff --git a/include/linux/console.h b/include/linux/console.h new file mode 100644 index 000000000..35e3cc4fb --- /dev/null +++ b/include/linux/console.h @@ -0,0 +1,234 @@ +/* + * linux/include/linux/console.h + * + * Copyright (C) 1993 Hamish Macdonald + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + * + * Changed: + * 10-Mar-94: Arno Griffioen: Conversion for vt100 emulator port from PC LINUX + */ + +#ifndef _LINUX_CONSOLE_H_ +#define _LINUX_CONSOLE_H_ 1 + +#include +#include + +struct vc_data; +struct console_font_op; +struct console_font; +struct module; +struct tty_struct; +struct notifier_block; + +/* + * this is what the terminal answers to a ESC-Z or csi0c query. + */ +#define VT100ID "\033[?1;2c" +#define VT102ID "\033[?6c" + +enum con_scroll { + SM_UP, + SM_DOWN, +}; + +/** + * struct consw - callbacks for consoles + * + * @con_scroll: move lines from @top to @bottom in direction @dir by @lines. + * Return true if no generic handling should be done. + * Invoked by csi_M and printing to the console. + * @con_set_palette: sets the palette of the console to @table (optional) + * @con_scrolldelta: the contents of the console should be scrolled by @lines. + * Invoked by user. (optional) + */ +struct consw { + struct module *owner; + const char *(*con_startup)(void); + void (*con_init)(struct vc_data *vc, int init); + void (*con_deinit)(struct vc_data *vc); + void (*con_clear)(struct vc_data *vc, int sy, int sx, int height, + int width); + void (*con_putc)(struct vc_data *vc, int c, int ypos, int xpos); + void (*con_putcs)(struct vc_data *vc, const unsigned short *s, + int count, int ypos, int xpos); + void (*con_cursor)(struct vc_data *vc, int mode); + bool (*con_scroll)(struct vc_data *vc, unsigned int top, + unsigned int bottom, enum con_scroll dir, + unsigned int lines); + int (*con_switch)(struct vc_data *vc); + int (*con_blank)(struct vc_data *vc, int blank, int mode_switch); + int (*con_font_set)(struct vc_data *vc, struct console_font *font, + unsigned int flags); + int (*con_font_get)(struct vc_data *vc, struct console_font *font); + int (*con_font_default)(struct vc_data *vc, + struct console_font *font, char *name); + int (*con_font_copy)(struct vc_data *vc, int con); + int (*con_resize)(struct vc_data *vc, unsigned int width, + unsigned int height, unsigned int user); + void (*con_set_palette)(struct vc_data *vc, + const unsigned char *table); + void (*con_scrolldelta)(struct vc_data *vc, int lines); + int (*con_set_origin)(struct vc_data *vc); + void (*con_save_screen)(struct vc_data *vc); + u8 (*con_build_attr)(struct vc_data *vc, u8 color, u8 intensity, + u8 blink, u8 underline, u8 reverse, u8 italic); + void (*con_invert_region)(struct vc_data *vc, u16 *p, int count); + u16 *(*con_screen_pos)(struct vc_data *vc, int offset); + unsigned long (*con_getxy)(struct vc_data *vc, unsigned long position, + int *px, int *py); + /* + * Flush the video console driver's scrollback buffer + */ + void (*con_flush_scrollback)(struct vc_data *vc); + /* + * Prepare the console for the debugger. This includes, but is not + * limited to, unblanking the console, loading an appropriate + * palette, and allowing debugger generated output. + */ + int (*con_debug_enter)(struct vc_data *vc); + /* + * Restore the console to its pre-debug state as closely as possible. + */ + int (*con_debug_leave)(struct vc_data *vc); +}; + +extern const struct consw *conswitchp; + +extern const struct consw dummy_con; /* dummy console buffer */ +extern const struct consw vga_con; /* VGA text console */ +extern const struct consw newport_con; /* SGI Newport console */ +extern const struct consw prom_con; /* SPARC PROM console */ + +int con_is_bound(const struct consw *csw); +int do_unregister_con_driver(const struct consw *csw); +int do_take_over_console(const struct consw *sw, int first, int last, int deflt); +void give_up_console(const struct consw *sw); +#ifdef CONFIG_HW_CONSOLE +int con_debug_enter(struct vc_data *vc); +int con_debug_leave(void); +#else +static inline int con_debug_enter(struct vc_data *vc) +{ + return 0; +} +static inline int con_debug_leave(void) +{ + return 0; +} +#endif + +/* cursor */ +#define CM_DRAW (1) +#define CM_ERASE (2) +#define CM_MOVE (3) + +/* + * The interface for a console, or any other device that wants to capture + * console messages (printer driver?) + * + * If a console driver is marked CON_BOOT then it will be auto-unregistered + * when the first real console is registered. This is for early-printk drivers. + */ + +#define CON_PRINTBUFFER (1) +#define CON_CONSDEV (2) /* Last on the command line */ +#define CON_ENABLED (4) +#define CON_BOOT (8) +#define CON_ANYTIME (16) /* Safe to call when cpu is offline */ +#define CON_BRL (32) /* Used for a braille device */ +#define CON_EXTENDED (64) /* Use the extended output format a la /dev/kmsg */ + +struct console { + char name[16]; + void (*write)(struct console *, const char *, unsigned); + int (*read)(struct console *, char *, unsigned); + struct tty_driver *(*device)(struct console *, int *); + void (*unblank)(void); + int (*setup)(struct console *, char *); + int (*match)(struct console *, char *name, int idx, char *options); + short flags; + short index; + int cflag; + uint ispeed; + uint ospeed; + void *data; + struct console *next; +}; + +/* + * for_each_console() allows you to iterate on each console + */ +#define for_each_console(con) \ + for (con = console_drivers; con != NULL; con = con->next) + +extern int console_set_on_cmdline; +extern struct console *early_console; + +extern int add_preferred_console(char *name, int idx, char *options); +extern void register_console(struct console *); +extern int unregister_console(struct console *); +extern struct console *console_drivers; +extern void console_lock(void); +extern int console_trylock(void); +extern void console_unlock(void); +extern void console_conditional_schedule(void); +extern void console_unblank(void); +extern void console_flush_on_panic(void); +extern struct tty_driver *console_device(int *); +extern void console_stop(struct console *); +extern void console_start(struct console *); +extern int is_console_locked(void); +extern int braille_register_console(struct console *, int index, + char *console_options, char *braille_options); +extern int braille_unregister_console(struct console *); +#ifdef CONFIG_TTY +extern void console_sysfs_notify(void); +#else +static inline void console_sysfs_notify(void) +{ } +#endif +extern bool console_suspend_enabled; + +/* Suspend and resume console messages over PM events */ +extern void suspend_console(void); +extern void resume_console(void); + +int mda_console_init(void); +void prom_con_init(void); + +void vcs_make_sysfs(int index); +void vcs_remove_sysfs(int index); + +/* Some debug stub to catch some of the obvious races in the VT code */ +#define WARN_CONSOLE_UNLOCKED() \ + WARN_ON(!atomic_read(&ignore_console_lock_warning) && \ + !is_console_locked() && !oops_in_progress) +/* + * Increment ignore_console_lock_warning if you need to quiet + * WARN_CONSOLE_UNLOCKED() for debugging purposes. + */ +extern atomic_t ignore_console_lock_warning; + +/* VESA Blanking Levels */ +#define VESA_NO_BLANKING 0 +#define VESA_VSYNC_SUSPEND 1 +#define VESA_HSYNC_SUSPEND 2 +#define VESA_POWERDOWN 3 + +#ifdef CONFIG_VGA_CONSOLE +extern bool vgacon_text_force(void); +#else +static inline bool vgacon_text_force(void) { return false; } +#endif + +extern void console_init(void); + +/* For deferred console takeover */ +void dummycon_register_output_notifier(struct notifier_block *nb); +void dummycon_unregister_output_notifier(struct notifier_block *nb); + +#endif /* _LINUX_CONSOLE_H */ diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h new file mode 100644 index 000000000..8b5bc3a47 --- /dev/null +++ b/include/linux/console_struct.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * console_struct.h + * + * Data structure describing single virtual console except for data + * used by vt.c. + * + * Fields marked with [#] must be set by the low-level driver. + * Fields marked with [!] can be changed by the low-level driver + * to achieve effects such as fast scrolling by changing the origin. + */ + +#ifndef _LINUX_CONSOLE_STRUCT_H +#define _LINUX_CONSOLE_STRUCT_H + +#include +#include +#include + +struct uni_pagedir; +struct uni_screen; + +#define NPAR 16 + +/* + * Example: vc_data of a console that was scrolled 3 lines down. + * + * Console buffer + * vc_screenbuf ---------> +----------------------+-. + * | initializing W | \ + * | initializing X | | + * | initializing Y | > scroll-back area + * | initializing Z | | + * | | / + * vc_visible_origin ---> ^+----------------------+-: + * (changes by scroll) || Welcome to linux | \ + * || | | + * vc_rows --->< | login: root | | visible on console + * || password: | > (vc_screenbuf_size is + * vc_origin -----------> || | | vc_size_row * vc_rows) + * (start when no scroll) || Last login: 12:28 | / + * v+----------------------+-: + * | Have a lot of fun... | \ + * vc_pos -----------------|--------v | > scroll-front area + * | ~ # cat_ | / + * vc_scr_end -----------> +----------------------+-: + * (vc_origin + | | \ EMPTY, to be filled by + * vc_screenbuf_size) | | / vc_video_erase_char + * +----------------------+-' + * <---- 2 * vc_cols -----> + * <---- vc_size_row -----> + * + * Note that every character in the console buffer is accompanied with an + * attribute in the buffer right after the character. This is not depicted + * in the figure. + */ +struct vc_data { + struct tty_port port; /* Upper level data */ + + unsigned short vc_num; /* Console number */ + unsigned int vc_cols; /* [#] Console size */ + unsigned int vc_rows; + unsigned int vc_size_row; /* Bytes per row */ + unsigned int vc_scan_lines; /* # of scan lines */ + unsigned int vc_cell_height; /* CRTC character cell height */ + unsigned long vc_origin; /* [!] Start of real screen */ + unsigned long vc_scr_end; /* [!] End of real screen */ + unsigned long vc_visible_origin; /* [!] Top of visible window */ + unsigned int vc_top, vc_bottom; /* Scrolling region */ + const struct consw *vc_sw; + unsigned short *vc_screenbuf; /* In-memory character/attribute buffer */ + unsigned int vc_screenbuf_size; + unsigned char vc_mode; /* KD_TEXT, ... */ + /* attributes for all characters on screen */ + unsigned char vc_attr; /* Current attributes */ + unsigned char vc_def_color; /* Default colors */ + unsigned char vc_color; /* Foreground & background */ + unsigned char vc_s_color; /* Saved foreground & background */ + unsigned char vc_ulcolor; /* Color for underline mode */ + unsigned char vc_itcolor; + unsigned char vc_halfcolor; /* Color for half intensity mode */ + /* cursor */ + unsigned int vc_cursor_type; + unsigned short vc_complement_mask; /* [#] Xor mask for mouse pointer */ + unsigned short vc_s_complement_mask; /* Saved mouse pointer mask */ + unsigned int vc_x, vc_y; /* Cursor position */ + unsigned int vc_saved_x, vc_saved_y; + unsigned long vc_pos; /* Cursor address */ + /* fonts */ + unsigned short vc_hi_font_mask; /* [#] Attribute set for upper 256 chars of font or 0 if not supported */ + struct console_font vc_font; /* Current VC font set */ + unsigned short vc_video_erase_char; /* Background erase character */ + /* VT terminal data */ + unsigned int vc_state; /* Escape sequence parser state */ + unsigned int vc_npar,vc_par[NPAR]; /* Parameters of current escape sequence */ + /* data for manual vt switching */ + struct vt_mode vt_mode; + struct pid *vt_pid; + int vt_newvt; + wait_queue_head_t paste_wait; + /* mode flags */ + unsigned int vc_charset : 1; /* Character set G0 / G1 */ + unsigned int vc_s_charset : 1; /* Saved character set */ + unsigned int vc_disp_ctrl : 1; /* Display chars < 32? */ + unsigned int vc_toggle_meta : 1; /* Toggle high bit? */ + unsigned int vc_decscnm : 1; /* Screen Mode */ + unsigned int vc_decom : 1; /* Origin Mode */ + unsigned int vc_decawm : 1; /* Autowrap Mode */ + unsigned int vc_deccm : 1; /* Cursor Visible */ + unsigned int vc_decim : 1; /* Insert Mode */ + /* attribute flags */ + unsigned int vc_intensity : 2; /* 0=half-bright, 1=normal, 2=bold */ + unsigned int vc_italic:1; + unsigned int vc_underline : 1; + unsigned int vc_blink : 1; + unsigned int vc_reverse : 1; + unsigned int vc_s_intensity : 2; /* saved rendition */ + unsigned int vc_s_italic:1; + unsigned int vc_s_underline : 1; + unsigned int vc_s_blink : 1; + unsigned int vc_s_reverse : 1; + /* misc */ + unsigned int vc_ques : 1; + unsigned int vc_need_wrap : 1; + unsigned int vc_can_do_color : 1; + unsigned int vc_report_mouse : 2; + unsigned char vc_utf : 1; /* Unicode UTF-8 encoding */ + unsigned char vc_utf_count; + int vc_utf_char; + unsigned int vc_tab_stop[8]; /* Tab stops. 256 columns. */ + unsigned char vc_palette[16*3]; /* Colour palette for VGA+ */ + unsigned short * vc_translate; + unsigned char vc_G0_charset; + unsigned char vc_G1_charset; + unsigned char vc_saved_G0; + unsigned char vc_saved_G1; + unsigned int vc_resize_user; /* resize request from user */ + unsigned int vc_bell_pitch; /* Console bell pitch */ + unsigned int vc_bell_duration; /* Console bell duration */ + unsigned short vc_cur_blink_ms; /* Cursor blink duration */ + struct vc_data **vc_display_fg; /* [!] Ptr to var holding fg console for this display */ + struct uni_pagedir *vc_uni_pagedir; + struct uni_pagedir **vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */ + struct uni_screen *vc_uni_screen; /* unicode screen content */ + bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */ + /* additional information is in vt_kern.h */ +}; + +struct vc { + struct vc_data *d; + struct work_struct SAK_work; + + /* might add scrmem, kbd at some time, + to have everything in one place - the disadvantage + would be that vc_cons etc can no longer be static */ +}; + +extern struct vc vc_cons [MAX_NR_CONSOLES]; +extern void vc_SAK(struct work_struct *work); + +#define CUR_DEF 0 +#define CUR_NONE 1 +#define CUR_UNDERLINE 2 +#define CUR_LOWER_THIRD 3 +#define CUR_LOWER_HALF 4 +#define CUR_TWO_THIRDS 5 +#define CUR_BLOCK 6 +#define CUR_HWMASK 0x0f +#define CUR_SWMASK 0xfff0 + +#define CUR_DEFAULT CUR_UNDERLINE + +static inline bool con_is_visible(const struct vc_data *vc) +{ + return *vc->vc_display_fg == vc; +} + +#endif /* _LINUX_CONSOLE_STRUCT_H */ diff --git a/include/linux/consolemap.h b/include/linux/consolemap.h new file mode 100644 index 000000000..254246673 --- /dev/null +++ b/include/linux/consolemap.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * consolemap.h + * + * Interface between console.c, selection.c and consolemap.c + */ +#ifndef __LINUX_CONSOLEMAP_H__ +#define __LINUX_CONSOLEMAP_H__ + +#define LAT1_MAP 0 +#define GRAF_MAP 1 +#define IBMPC_MAP 2 +#define USER_MAP 3 + +#include + +#ifdef CONFIG_CONSOLE_TRANSLATIONS +struct vc_data; + +extern u16 inverse_translate(struct vc_data *conp, int glyph, int use_unicode); +extern unsigned short *set_translate(int m, struct vc_data *vc); +extern int conv_uni_to_pc(struct vc_data *conp, long ucs); +extern u32 conv_8bit_to_uni(unsigned char c); +extern int conv_uni_to_8bit(u32 uni); +void console_map_init(void); +#else +#define inverse_translate(conp, glyph, uni) ((uint16_t)glyph) +#define set_translate(m, vc) ((unsigned short *)NULL) +#define conv_uni_to_pc(conp, ucs) ((int) (ucs > 0xff ? -1: ucs)) +#define conv_8bit_to_uni(c) ((uint32_t)(c)) +#define conv_uni_to_8bit(c) ((int) ((c) & 0xff)) +#define console_map_init(c) do { ; } while (0) +#endif /* CONFIG_CONSOLE_TRANSLATIONS */ + +#endif /* __LINUX_CONSOLEMAP_H__ */ diff --git a/include/linux/const.h b/include/linux/const.h new file mode 100644 index 000000000..7b55a55f5 --- /dev/null +++ b/include/linux/const.h @@ -0,0 +1,9 @@ +#ifndef _LINUX_CONST_H +#define _LINUX_CONST_H + +#include + +#define UL(x) (_UL(x)) +#define ULL(x) (_ULL(x)) + +#endif /* _LINUX_CONST_H */ diff --git a/include/linux/container.h b/include/linux/container.h new file mode 100644 index 000000000..3c03e6fd2 --- /dev/null +++ b/include/linux/container.h @@ -0,0 +1,25 @@ +/* + * Definitions for container bus type. + * + * Copyright (C) 2013, Intel Corporation + * Author: Rafael J. Wysocki + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include + +/* drivers/base/power/container.c */ +extern struct bus_type container_subsys; + +struct container_dev { + struct device dev; + int (*offline)(struct container_dev *cdev); +}; + +static inline struct container_dev *to_container_dev(struct device *dev) +{ + return container_of(dev, struct container_dev, dev); +} diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h new file mode 100644 index 000000000..d05609ad3 --- /dev/null +++ b/include/linux/context_tracking.h @@ -0,0 +1,175 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CONTEXT_TRACKING_H +#define _LINUX_CONTEXT_TRACKING_H + +#include +#include +#include +#include + + +#ifdef CONFIG_CONTEXT_TRACKING +extern void context_tracking_cpu_set(int cpu); + +/* Called with interrupts disabled. */ +extern void __context_tracking_enter(enum ctx_state state); +extern void __context_tracking_exit(enum ctx_state state); + +extern void context_tracking_enter(enum ctx_state state); +extern void context_tracking_exit(enum ctx_state state); +extern void context_tracking_user_enter(void); +extern void context_tracking_user_exit(void); + +static inline void user_enter(void) +{ + if (context_tracking_is_enabled()) + context_tracking_enter(CONTEXT_USER); + +} +static inline void user_exit(void) +{ + if (context_tracking_is_enabled()) + context_tracking_exit(CONTEXT_USER); +} + +/* Called with interrupts disabled. */ +static inline void user_enter_irqoff(void) +{ + if (context_tracking_is_enabled()) + __context_tracking_enter(CONTEXT_USER); + +} +static inline void user_exit_irqoff(void) +{ + if (context_tracking_is_enabled()) + __context_tracking_exit(CONTEXT_USER); +} + +static inline enum ctx_state exception_enter(void) +{ + enum ctx_state prev_ctx; + + if (!context_tracking_is_enabled()) + return 0; + + prev_ctx = this_cpu_read(context_tracking.state); + if (prev_ctx != CONTEXT_KERNEL) + context_tracking_exit(prev_ctx); + + return prev_ctx; +} + +static inline void exception_exit(enum ctx_state prev_ctx) +{ + if (context_tracking_is_enabled()) { + if (prev_ctx != CONTEXT_KERNEL) + context_tracking_enter(prev_ctx); + } +} + + +/** + * ct_state() - return the current context tracking state if known + * + * Returns the current cpu's context tracking state if context tracking + * is enabled. If context tracking is disabled, returns + * CONTEXT_DISABLED. This should be used primarily for debugging. + */ +static inline enum ctx_state ct_state(void) +{ + return context_tracking_is_enabled() ? + this_cpu_read(context_tracking.state) : CONTEXT_DISABLED; +} +#else +static inline void user_enter(void) { } +static inline void user_exit(void) { } +static inline void user_enter_irqoff(void) { } +static inline void user_exit_irqoff(void) { } +static inline enum ctx_state exception_enter(void) { return 0; } +static inline void exception_exit(enum ctx_state prev_ctx) { } +static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; } +#endif /* !CONFIG_CONTEXT_TRACKING */ + +#define CT_WARN_ON(cond) WARN_ON(context_tracking_is_enabled() && (cond)) + +#ifdef CONFIG_CONTEXT_TRACKING_FORCE +extern void context_tracking_init(void); +#else +static inline void context_tracking_init(void) { } +#endif /* CONFIG_CONTEXT_TRACKING_FORCE */ + + +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +/* must be called with irqs disabled */ +static inline void guest_enter_irqoff(void) +{ + if (vtime_accounting_cpu_enabled()) + vtime_guest_enter(current); + else + current->flags |= PF_VCPU; + + if (context_tracking_is_enabled()) + __context_tracking_enter(CONTEXT_GUEST); + + /* KVM does not hold any references to rcu protected data when it + * switches CPU into a guest mode. In fact switching to a guest mode + * is very similar to exiting to userspace from rcu point of view. In + * addition CPU may stay in a guest mode for quite a long time (up to + * one time slice). Lets treat guest mode as quiescent state, just like + * we do with user-mode execution. + */ + if (!context_tracking_cpu_is_enabled()) + rcu_virt_note_context_switch(smp_processor_id()); +} + +static inline void guest_exit_irqoff(void) +{ + if (context_tracking_is_enabled()) + __context_tracking_exit(CONTEXT_GUEST); + + if (vtime_accounting_cpu_enabled()) + vtime_guest_exit(current); + else + current->flags &= ~PF_VCPU; +} + +#else +static inline void guest_enter_irqoff(void) +{ + /* + * This is running in ioctl context so its safe + * to assume that it's the stime pending cputime + * to flush. + */ + vtime_account_system(current); + current->flags |= PF_VCPU; + rcu_virt_note_context_switch(smp_processor_id()); +} + +static inline void guest_exit_irqoff(void) +{ + /* Flush the guest cputime we spent on the guest */ + vtime_account_system(current); + current->flags &= ~PF_VCPU; +} +#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */ + +static inline void guest_enter(void) +{ + unsigned long flags; + + local_irq_save(flags); + guest_enter_irqoff(); + local_irq_restore(flags); +} + +static inline void guest_exit(void) +{ + unsigned long flags; + + local_irq_save(flags); + guest_exit_irqoff(); + local_irq_restore(flags); +} + +#endif diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h new file mode 100644 index 000000000..f128dc3be --- /dev/null +++ b/include/linux/context_tracking_state.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CONTEXT_TRACKING_STATE_H +#define _LINUX_CONTEXT_TRACKING_STATE_H + +#include +#include + +struct context_tracking { + /* + * When active is false, probes are unset in order + * to minimize overhead: TIF flags are cleared + * and calls to user_enter/exit are ignored. This + * may be further optimized using static keys. + */ + bool active; + int recursion; + enum ctx_state { + CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */ + CONTEXT_KERNEL = 0, + CONTEXT_USER, + CONTEXT_GUEST, + } state; +}; + +#ifdef CONFIG_CONTEXT_TRACKING +extern struct static_key_false context_tracking_enabled; +DECLARE_PER_CPU(struct context_tracking, context_tracking); + +static inline bool context_tracking_is_enabled(void) +{ + return static_branch_unlikely(&context_tracking_enabled); +} + +static inline bool context_tracking_cpu_is_enabled(void) +{ + return __this_cpu_read(context_tracking.active); +} + +static inline bool context_tracking_in_user(void) +{ + return __this_cpu_read(context_tracking.state) == CONTEXT_USER; +} +#else +static inline bool context_tracking_in_user(void) { return false; } +static inline bool context_tracking_active(void) { return false; } +static inline bool context_tracking_is_enabled(void) { return false; } +static inline bool context_tracking_cpu_is_enabled(void) { return false; } +#endif /* CONFIG_CONTEXT_TRACKING */ + +#endif diff --git a/include/linux/cordic.h b/include/linux/cordic.h new file mode 100644 index 000000000..cf68ca4a5 --- /dev/null +++ b/include/linux/cordic.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2011 Broadcom Corporation + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef __CORDIC_H_ +#define __CORDIC_H_ + +#include + +/** + * struct cordic_iq - i/q coordinate. + * + * @i: real part of coordinate (in phase). + * @q: imaginary part of coordinate (quadrature). + */ +struct cordic_iq { + s32 i; + s32 q; +}; + +/** + * cordic_calc_iq() - calculates the i/q coordinate for given angle. + * + * @theta: angle in degrees for which i/q coordinate is to be calculated. + * @coord: function output parameter holding the i/q coordinate. + * + * The function calculates the i/q coordinate for a given angle using the + * CORDIC algorithm. The coordinate consists of a real (i) and an + * imaginary (q) part. The real part is essentially the cosine of the + * angle and the imaginary part is the sine of the angle. The returned + * values are scaled by 2^16 for precision. The range for theta is + * for -180 degrees to +180 degrees. Passed values outside this range are + * converted before doing the actual calculation. + */ +struct cordic_iq cordic_calc_iq(s32 theta); + +#endif /* __CORDIC_H_ */ diff --git a/include/linux/coredump.h b/include/linux/coredump.h new file mode 100644 index 000000000..207aed96a --- /dev/null +++ b/include/linux/coredump.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_COREDUMP_H +#define _LINUX_COREDUMP_H + +#include +#include +#include +#include + +/* + * These are the only things you should do on a core-file: use only these + * functions to write out all the necessary info. + */ +struct coredump_params; +extern int dump_skip(struct coredump_params *cprm, size_t nr); +extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr); +extern int dump_align(struct coredump_params *cprm, int align); +extern void dump_truncate(struct coredump_params *cprm); +#ifdef CONFIG_COREDUMP +extern void do_coredump(const siginfo_t *siginfo); +#else +static inline void do_coredump(const siginfo_t *siginfo) {} +#endif + +#endif /* _LINUX_COREDUMP_H */ diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h new file mode 100644 index 000000000..a1a959ba2 --- /dev/null +++ b/include/linux/coresight-pmu.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright(C) 2015 Linaro Limited. All rights reserved. + * Author: Mathieu Poirier + */ + +#ifndef _LINUX_CORESIGHT_PMU_H +#define _LINUX_CORESIGHT_PMU_H + +#define CORESIGHT_ETM_PMU_NAME "cs_etm" +#define CORESIGHT_ETM_PMU_SEED 0x10 + +/* ETMv3.5/PTM's ETMCR config bit */ +#define ETM_OPT_CYCACC 12 +#define ETM_OPT_TS 28 +#define ETM_OPT_RETSTK 29 + +/* ETMv4 CONFIGR programming bits for the ETM OPTs */ +#define ETM4_CFG_BIT_CYCACC 4 +#define ETM4_CFG_BIT_TS 11 +#define ETM4_CFG_BIT_RETSTK 12 + +static inline int coresight_get_trace_id(int cpu) +{ + /* + * A trace ID of value 0 is invalid, so let's start at some + * random value that fits in 7 bits and go from there. Since + * the common convention is to have data trace IDs be I(N) + 1, + * set instruction trace IDs as a function of the CPU number. + */ + return (CORESIGHT_ETM_PMU_SEED + (cpu * 2)); +} + +#endif diff --git a/include/linux/coresight-stm.h b/include/linux/coresight-stm.h new file mode 100644 index 000000000..74714b59f --- /dev/null +++ b/include/linux/coresight-stm.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_CORESIGHT_STM_H_ +#define __LINUX_CORESIGHT_STM_H_ + +#include + +#endif diff --git a/include/linux/coresight.h b/include/linux/coresight.h new file mode 100644 index 000000000..d828a6efe --- /dev/null +++ b/include/linux/coresight.h @@ -0,0 +1,296 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2012, The Linux Foundation. All rights reserved. + */ + +#ifndef _LINUX_CORESIGHT_H +#define _LINUX_CORESIGHT_H + +#include +#include +#include + +/* Peripheral id registers (0xFD0-0xFEC) */ +#define CORESIGHT_PERIPHIDR4 0xfd0 +#define CORESIGHT_PERIPHIDR5 0xfd4 +#define CORESIGHT_PERIPHIDR6 0xfd8 +#define CORESIGHT_PERIPHIDR7 0xfdC +#define CORESIGHT_PERIPHIDR0 0xfe0 +#define CORESIGHT_PERIPHIDR1 0xfe4 +#define CORESIGHT_PERIPHIDR2 0xfe8 +#define CORESIGHT_PERIPHIDR3 0xfeC +/* Component id registers (0xFF0-0xFFC) */ +#define CORESIGHT_COMPIDR0 0xff0 +#define CORESIGHT_COMPIDR1 0xff4 +#define CORESIGHT_COMPIDR2 0xff8 +#define CORESIGHT_COMPIDR3 0xffC + +#define ETM_ARCH_V3_3 0x23 +#define ETM_ARCH_V3_5 0x25 +#define PFT_ARCH_V1_0 0x30 +#define PFT_ARCH_V1_1 0x31 + +#define CORESIGHT_UNLOCK 0xc5acce55 + +extern struct bus_type coresight_bustype; + +enum coresight_dev_type { + CORESIGHT_DEV_TYPE_NONE, + CORESIGHT_DEV_TYPE_SINK, + CORESIGHT_DEV_TYPE_LINK, + CORESIGHT_DEV_TYPE_LINKSINK, + CORESIGHT_DEV_TYPE_SOURCE, + CORESIGHT_DEV_TYPE_HELPER, +}; + +enum coresight_dev_subtype_sink { + CORESIGHT_DEV_SUBTYPE_SINK_NONE, + CORESIGHT_DEV_SUBTYPE_SINK_PORT, + CORESIGHT_DEV_SUBTYPE_SINK_BUFFER, +}; + +enum coresight_dev_subtype_link { + CORESIGHT_DEV_SUBTYPE_LINK_NONE, + CORESIGHT_DEV_SUBTYPE_LINK_MERG, + CORESIGHT_DEV_SUBTYPE_LINK_SPLIT, + CORESIGHT_DEV_SUBTYPE_LINK_FIFO, +}; + +enum coresight_dev_subtype_source { + CORESIGHT_DEV_SUBTYPE_SOURCE_NONE, + CORESIGHT_DEV_SUBTYPE_SOURCE_PROC, + CORESIGHT_DEV_SUBTYPE_SOURCE_BUS, + CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE, +}; + +enum coresight_dev_subtype_helper { + CORESIGHT_DEV_SUBTYPE_HELPER_NONE, + CORESIGHT_DEV_SUBTYPE_HELPER_CATU, +}; + +/** + * union coresight_dev_subtype - further characterisation of a type + * @sink_subtype: type of sink this component is, as defined + * by @coresight_dev_subtype_sink. + * @link_subtype: type of link this component is, as defined + * by @coresight_dev_subtype_link. + * @source_subtype: type of source this component is, as defined + * by @coresight_dev_subtype_source. + * @helper_subtype: type of helper this component is, as defined + * by @coresight_dev_subtype_helper. + */ +union coresight_dev_subtype { + /* We have some devices which acts as LINK and SINK */ + struct { + enum coresight_dev_subtype_sink sink_subtype; + enum coresight_dev_subtype_link link_subtype; + }; + enum coresight_dev_subtype_source source_subtype; + enum coresight_dev_subtype_helper helper_subtype; +}; + +/** + * struct coresight_platform_data - data harvested from the DT specification + * @cpu: the CPU a source belongs to. Only applicable for ETM/PTMs. + * @name: name of the component as shown under sysfs. + * @nr_inport: number of input ports for this component. + * @outports: list of remote endpoint port number. + * @child_names:name of all child components connected to this device. + * @child_ports:child component port number the current component is + connected to. + * @nr_outport: number of output ports for this component. + */ +struct coresight_platform_data { + int cpu; + const char *name; + int nr_inport; + int *outports; + const char **child_names; + int *child_ports; + int nr_outport; +}; + +/** + * struct coresight_desc - description of a component required from drivers + * @type: as defined by @coresight_dev_type. + * @subtype: as defined by @coresight_dev_subtype. + * @ops: generic operations for this component, as defined + by @coresight_ops. + * @pdata: platform data collected from DT. + * @dev: The device entity associated to this component. + * @groups: operations specific to this component. These will end up + in the component's sysfs sub-directory. + */ +struct coresight_desc { + enum coresight_dev_type type; + union coresight_dev_subtype subtype; + const struct coresight_ops *ops; + struct coresight_platform_data *pdata; + struct device *dev; + const struct attribute_group **groups; +}; + +/** + * struct coresight_connection - representation of a single connection + * @outport: a connection's output port number. + * @chid_name: remote component's name. + * @child_port: remote component's port number @output is connected to. + * @child_dev: a @coresight_device representation of the component + connected to @outport. + */ +struct coresight_connection { + int outport; + const char *child_name; + int child_port; + struct coresight_device *child_dev; +}; + +/** + * struct coresight_device - representation of a device as used by the framework + * @conns: array of coresight_connections associated to this component. + * @nr_inport: number of input port associated to this component. + * @nr_outport: number of output port associated to this component. + * @type: as defined by @coresight_dev_type. + * @subtype: as defined by @coresight_dev_subtype. + * @ops: generic operations for this component, as defined + by @coresight_ops. + * @dev: The device entity associated to this component. + * @refcnt: keep track of what is in use. + * @orphan: true if the component has connections that haven't been linked. + * @enable: 'true' if component is currently part of an active path. + * @activated: 'true' only if a _sink_ has been activated. A sink can be + activated but not yet enabled. Enabling for a _sink_ + happens when a source has been selected for that it. + */ +struct coresight_device { + struct coresight_connection *conns; + int nr_inport; + int nr_outport; + enum coresight_dev_type type; + union coresight_dev_subtype subtype; + const struct coresight_ops *ops; + struct device dev; + atomic_t *refcnt; + bool orphan; + bool enable; /* true only if configured as part of a path */ + bool activated; /* true only if a sink is part of a path */ +}; + +#define to_coresight_device(d) container_of(d, struct coresight_device, dev) + +#define source_ops(csdev) csdev->ops->source_ops +#define sink_ops(csdev) csdev->ops->sink_ops +#define link_ops(csdev) csdev->ops->link_ops +#define helper_ops(csdev) csdev->ops->helper_ops + +/** + * struct coresight_ops_sink - basic operations for a sink + * Operations available for sinks + * @enable: enables the sink. + * @disable: disables the sink. + * @alloc_buffer: initialises perf's ring buffer for trace collection. + * @free_buffer: release memory allocated in @get_config. + * @set_buffer: initialises buffer mechanic before a trace session. + * @reset_buffer: finalises buffer mechanic after a trace session. + * @update_buffer: update buffer pointers after a trace session. + */ +struct coresight_ops_sink { + int (*enable)(struct coresight_device *csdev, u32 mode); + void (*disable)(struct coresight_device *csdev); + void *(*alloc_buffer)(struct coresight_device *csdev, int cpu, + void **pages, int nr_pages, bool overwrite); + void (*free_buffer)(void *config); + int (*set_buffer)(struct coresight_device *csdev, + struct perf_output_handle *handle, + void *sink_config); + unsigned long (*reset_buffer)(struct coresight_device *csdev, + struct perf_output_handle *handle, + void *sink_config); + void (*update_buffer)(struct coresight_device *csdev, + struct perf_output_handle *handle, + void *sink_config); +}; + +/** + * struct coresight_ops_link - basic operations for a link + * Operations available for links. + * @enable: enables flow between iport and oport. + * @disable: disables flow between iport and oport. + */ +struct coresight_ops_link { + int (*enable)(struct coresight_device *csdev, int iport, int oport); + void (*disable)(struct coresight_device *csdev, int iport, int oport); +}; + +/** + * struct coresight_ops_source - basic operations for a source + * Operations available for sources. + * @cpu_id: returns the value of the CPU number this component + * is associated to. + * @trace_id: returns the value of the component's trace ID as known + * to the HW. + * @enable: enables tracing for a source. + * @disable: disables tracing for a source. + */ +struct coresight_ops_source { + int (*cpu_id)(struct coresight_device *csdev); + int (*trace_id)(struct coresight_device *csdev); + int (*enable)(struct coresight_device *csdev, + struct perf_event *event, u32 mode); + void (*disable)(struct coresight_device *csdev, + struct perf_event *event); +}; + +/** + * struct coresight_ops_helper - Operations for a helper device. + * + * All operations could pass in a device specific data, which could + * help the helper device to determine what to do. + * + * @enable : Enable the device + * @disable : Disable the device + */ +struct coresight_ops_helper { + int (*enable)(struct coresight_device *csdev, void *data); + int (*disable)(struct coresight_device *csdev, void *data); +}; + +struct coresight_ops { + const struct coresight_ops_sink *sink_ops; + const struct coresight_ops_link *link_ops; + const struct coresight_ops_source *source_ops; + const struct coresight_ops_helper *helper_ops; +}; + +#ifdef CONFIG_CORESIGHT +extern struct coresight_device * +coresight_register(struct coresight_desc *desc); +extern void coresight_unregister(struct coresight_device *csdev); +extern int coresight_enable(struct coresight_device *csdev); +extern void coresight_disable(struct coresight_device *csdev); +extern int coresight_timeout(void __iomem *addr, u32 offset, + int position, int value); +#else +static inline struct coresight_device * +coresight_register(struct coresight_desc *desc) { return NULL; } +static inline void coresight_unregister(struct coresight_device *csdev) {} +static inline int +coresight_enable(struct coresight_device *csdev) { return -ENOSYS; } +static inline void coresight_disable(struct coresight_device *csdev) {} +static inline int coresight_timeout(void __iomem *addr, u32 offset, + int position, int value) { return 1; } +#endif + +#ifdef CONFIG_OF +extern int of_coresight_get_cpu(const struct device_node *node); +extern struct coresight_platform_data * +of_get_coresight_platform_data(struct device *dev, + const struct device_node *node); +#else +static inline int of_coresight_get_cpu(const struct device_node *node) +{ return 0; } +static inline struct coresight_platform_data *of_get_coresight_platform_data( + struct device *dev, const struct device_node *node) { return NULL; } +#endif + +#endif diff --git a/include/linux/count_zeros.h b/include/linux/count_zeros.h new file mode 100644 index 000000000..363da78c4 --- /dev/null +++ b/include/linux/count_zeros.h @@ -0,0 +1,57 @@ +/* Count leading and trailing zeros functions + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _LINUX_BITOPS_COUNT_ZEROS_H_ +#define _LINUX_BITOPS_COUNT_ZEROS_H_ + +#include + +/** + * count_leading_zeros - Count the number of zeros from the MSB back + * @x: The value + * + * Count the number of leading zeros from the MSB going towards the LSB in @x. + * + * If the MSB of @x is set, the result is 0. + * If only the LSB of @x is set, then the result is BITS_PER_LONG-1. + * If @x is 0 then the result is COUNT_LEADING_ZEROS_0. + */ +static inline int count_leading_zeros(unsigned long x) +{ + if (sizeof(x) == 4) + return BITS_PER_LONG - fls(x); + else + return BITS_PER_LONG - fls64(x); +} + +#define COUNT_LEADING_ZEROS_0 BITS_PER_LONG + +/** + * count_trailing_zeros - Count the number of zeros from the LSB forwards + * @x: The value + * + * Count the number of trailing zeros from the LSB going towards the MSB in @x. + * + * If the LSB of @x is set, the result is 0. + * If only the MSB of @x is set, then the result is BITS_PER_LONG-1. + * If @x is 0 then the result is COUNT_TRAILING_ZEROS_0. + */ +static inline int count_trailing_zeros(unsigned long x) +{ +#define COUNT_TRAILING_ZEROS_0 (-1) + + if (sizeof(x) == 4) + return ffs(x); + else + return (x != 0) ? __ffs(x) : COUNT_TRAILING_ZEROS_0; +} + +#endif /* _LINUX_BITOPS_COUNT_ZEROS_H_ */ diff --git a/include/linux/cper.h b/include/linux/cper.h new file mode 100644 index 000000000..9c703a0ab --- /dev/null +++ b/include/linux/cper.h @@ -0,0 +1,557 @@ +/* + * UEFI Common Platform Error Record + * + * Copyright (C) 2010, Intel Corp. + * Author: Huang Ying + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef LINUX_CPER_H +#define LINUX_CPER_H + +#include +#include + +/* CPER record signature and the size */ +#define CPER_SIG_RECORD "CPER" +#define CPER_SIG_SIZE 4 +/* Used in signature_end field in struct cper_record_header */ +#define CPER_SIG_END 0xffffffff + +/* + * CPER record header revision, used in revision field in struct + * cper_record_header + */ +#define CPER_RECORD_REV 0x0100 + +/* + * CPER record length contains the CPER fields which are relevant for further + * handling of a memory error in userspace (we don't carry all the fields + * defined in the UEFI spec because some of them don't make any sense.) + * Currently, a length of 256 should be more than enough. + */ +#define CPER_REC_LEN 256 +/* + * Severity difinition for error_severity in struct cper_record_header + * and section_severity in struct cper_section_descriptor + */ +enum { + CPER_SEV_RECOVERABLE, + CPER_SEV_FATAL, + CPER_SEV_CORRECTED, + CPER_SEV_INFORMATIONAL, +}; + +/* + * Validation bits difinition for validation_bits in struct + * cper_record_header. If set, corresponding fields in struct + * cper_record_header contain valid information. + * + * corresponds platform_id + */ +#define CPER_VALID_PLATFORM_ID 0x0001 +/* corresponds timestamp */ +#define CPER_VALID_TIMESTAMP 0x0002 +/* corresponds partition_id */ +#define CPER_VALID_PARTITION_ID 0x0004 + +/* + * Notification type used to generate error record, used in + * notification_type in struct cper_record_header + * + * Corrected Machine Check + */ +#define CPER_NOTIFY_CMC \ + GUID_INIT(0x2DCE8BB1, 0xBDD7, 0x450e, 0xB9, 0xAD, 0x9C, 0xF4, \ + 0xEB, 0xD4, 0xF8, 0x90) +/* Corrected Platform Error */ +#define CPER_NOTIFY_CPE \ + GUID_INIT(0x4E292F96, 0xD843, 0x4a55, 0xA8, 0xC2, 0xD4, 0x81, \ + 0xF2, 0x7E, 0xBE, 0xEE) +/* Machine Check Exception */ +#define CPER_NOTIFY_MCE \ + GUID_INIT(0xE8F56FFE, 0x919C, 0x4cc5, 0xBA, 0x88, 0x65, 0xAB, \ + 0xE1, 0x49, 0x13, 0xBB) +/* PCI Express Error */ +#define CPER_NOTIFY_PCIE \ + GUID_INIT(0xCF93C01F, 0x1A16, 0x4dfc, 0xB8, 0xBC, 0x9C, 0x4D, \ + 0xAF, 0x67, 0xC1, 0x04) +/* INIT Record (for IPF) */ +#define CPER_NOTIFY_INIT \ + GUID_INIT(0xCC5263E8, 0x9308, 0x454a, 0x89, 0xD0, 0x34, 0x0B, \ + 0xD3, 0x9B, 0xC9, 0x8E) +/* Non-Maskable Interrupt */ +#define CPER_NOTIFY_NMI \ + GUID_INIT(0x5BAD89FF, 0xB7E6, 0x42c9, 0x81, 0x4A, 0xCF, 0x24, \ + 0x85, 0xD6, 0xE9, 0x8A) +/* BOOT Error Record */ +#define CPER_NOTIFY_BOOT \ + GUID_INIT(0x3D61A466, 0xAB40, 0x409a, 0xA6, 0x98, 0xF3, 0x62, \ + 0xD4, 0x64, 0xB3, 0x8F) +/* DMA Remapping Error */ +#define CPER_NOTIFY_DMAR \ + GUID_INIT(0x667DD791, 0xC6B3, 0x4c27, 0x8A, 0x6B, 0x0F, 0x8E, \ + 0x72, 0x2D, 0xEB, 0x41) + +/* + * Flags bits definitions for flags in struct cper_record_header + * If set, the error has been recovered + */ +#define CPER_HW_ERROR_FLAGS_RECOVERED 0x1 +/* If set, the error is for previous boot */ +#define CPER_HW_ERROR_FLAGS_PREVERR 0x2 +/* If set, the error is injected for testing */ +#define CPER_HW_ERROR_FLAGS_SIMULATED 0x4 + +/* + * CPER section header revision, used in revision field in struct + * cper_section_descriptor + */ +#define CPER_SEC_REV 0x0100 + +/* + * Validation bits difinition for validation_bits in struct + * cper_section_descriptor. If set, corresponding fields in struct + * cper_section_descriptor contain valid information. + * + * corresponds fru_id + */ +#define CPER_SEC_VALID_FRU_ID 0x1 +/* corresponds fru_text */ +#define CPER_SEC_VALID_FRU_TEXT 0x2 + +/* + * Flags bits definitions for flags in struct cper_section_descriptor + * + * If set, the section is associated with the error condition + * directly, and should be focused on + */ +#define CPER_SEC_PRIMARY 0x0001 +/* + * If set, the error was not contained within the processor or memory + * hierarchy and the error may have propagated to persistent storage + * or network + */ +#define CPER_SEC_CONTAINMENT_WARNING 0x0002 +/* If set, the component must be re-initialized or re-enabled prior to use */ +#define CPER_SEC_RESET 0x0004 +/* If set, Linux may choose to discontinue use of the resource */ +#define CPER_SEC_ERROR_THRESHOLD_EXCEEDED 0x0008 +/* + * If set, resource could not be queried for error information due to + * conflicts with other system software or resources. Some fields of + * the section will be invalid + */ +#define CPER_SEC_RESOURCE_NOT_ACCESSIBLE 0x0010 +/* + * If set, action has been taken to ensure error containment (such as + * poisoning data), but the error has not been fully corrected and the + * data has not been consumed. Linux may choose to take further + * corrective action before the data is consumed + */ +#define CPER_SEC_LATENT_ERROR 0x0020 + +/* + * Section type definitions, used in section_type field in struct + * cper_section_descriptor + * + * Processor Generic + */ +#define CPER_SEC_PROC_GENERIC \ + GUID_INIT(0x9876CCAD, 0x47B4, 0x4bdb, 0xB6, 0x5E, 0x16, 0xF1, \ + 0x93, 0xC4, 0xF3, 0xDB) +/* Processor Specific: X86/X86_64 */ +#define CPER_SEC_PROC_IA \ + GUID_INIT(0xDC3EA0B0, 0xA144, 0x4797, 0xB9, 0x5B, 0x53, 0xFA, \ + 0x24, 0x2B, 0x6E, 0x1D) +/* Processor Specific: IA64 */ +#define CPER_SEC_PROC_IPF \ + GUID_INIT(0xE429FAF1, 0x3CB7, 0x11D4, 0x0B, 0xCA, 0x07, 0x00, \ + 0x80, 0xC7, 0x3C, 0x88, 0x81) +/* Processor Specific: ARM */ +#define CPER_SEC_PROC_ARM \ + GUID_INIT(0xE19E3D16, 0xBC11, 0x11E4, 0x9C, 0xAA, 0xC2, 0x05, \ + 0x1D, 0x5D, 0x46, 0xB0) +/* Platform Memory */ +#define CPER_SEC_PLATFORM_MEM \ + GUID_INIT(0xA5BC1114, 0x6F64, 0x4EDE, 0xB8, 0x63, 0x3E, 0x83, \ + 0xED, 0x7C, 0x83, 0xB1) +#define CPER_SEC_PCIE \ + GUID_INIT(0xD995E954, 0xBBC1, 0x430F, 0xAD, 0x91, 0xB4, 0x4D, \ + 0xCB, 0x3C, 0x6F, 0x35) +/* Firmware Error Record Reference */ +#define CPER_SEC_FW_ERR_REC_REF \ + GUID_INIT(0x81212A96, 0x09ED, 0x4996, 0x94, 0x71, 0x8D, 0x72, \ + 0x9C, 0x8E, 0x69, 0xED) +/* PCI/PCI-X Bus */ +#define CPER_SEC_PCI_X_BUS \ + GUID_INIT(0xC5753963, 0x3B84, 0x4095, 0xBF, 0x78, 0xED, 0xDA, \ + 0xD3, 0xF9, 0xC9, 0xDD) +/* PCI Component/Device */ +#define CPER_SEC_PCI_DEV \ + GUID_INIT(0xEB5E4685, 0xCA66, 0x4769, 0xB6, 0xA2, 0x26, 0x06, \ + 0x8B, 0x00, 0x13, 0x26) +#define CPER_SEC_DMAR_GENERIC \ + GUID_INIT(0x5B51FEF7, 0xC79D, 0x4434, 0x8F, 0x1B, 0xAA, 0x62, \ + 0xDE, 0x3E, 0x2C, 0x64) +/* Intel VT for Directed I/O specific DMAr */ +#define CPER_SEC_DMAR_VT \ + GUID_INIT(0x71761D37, 0x32B2, 0x45cd, 0xA7, 0xD0, 0xB0, 0xFE, \ + 0xDD, 0x93, 0xE8, 0xCF) +/* IOMMU specific DMAr */ +#define CPER_SEC_DMAR_IOMMU \ + GUID_INIT(0x036F84E1, 0x7F37, 0x428c, 0xA7, 0x9E, 0x57, 0x5F, \ + 0xDF, 0xAA, 0x84, 0xEC) + +#define CPER_PROC_VALID_TYPE 0x0001 +#define CPER_PROC_VALID_ISA 0x0002 +#define CPER_PROC_VALID_ERROR_TYPE 0x0004 +#define CPER_PROC_VALID_OPERATION 0x0008 +#define CPER_PROC_VALID_FLAGS 0x0010 +#define CPER_PROC_VALID_LEVEL 0x0020 +#define CPER_PROC_VALID_VERSION 0x0040 +#define CPER_PROC_VALID_BRAND_INFO 0x0080 +#define CPER_PROC_VALID_ID 0x0100 +#define CPER_PROC_VALID_TARGET_ADDRESS 0x0200 +#define CPER_PROC_VALID_REQUESTOR_ID 0x0400 +#define CPER_PROC_VALID_RESPONDER_ID 0x0800 +#define CPER_PROC_VALID_IP 0x1000 + +#define CPER_MEM_VALID_ERROR_STATUS 0x0001 +#define CPER_MEM_VALID_PA 0x0002 +#define CPER_MEM_VALID_PA_MASK 0x0004 +#define CPER_MEM_VALID_NODE 0x0008 +#define CPER_MEM_VALID_CARD 0x0010 +#define CPER_MEM_VALID_MODULE 0x0020 +#define CPER_MEM_VALID_BANK 0x0040 +#define CPER_MEM_VALID_DEVICE 0x0080 +#define CPER_MEM_VALID_ROW 0x0100 +#define CPER_MEM_VALID_COLUMN 0x0200 +#define CPER_MEM_VALID_BIT_POSITION 0x0400 +#define CPER_MEM_VALID_REQUESTOR_ID 0x0800 +#define CPER_MEM_VALID_RESPONDER_ID 0x1000 +#define CPER_MEM_VALID_TARGET_ID 0x2000 +#define CPER_MEM_VALID_ERROR_TYPE 0x4000 +#define CPER_MEM_VALID_RANK_NUMBER 0x8000 +#define CPER_MEM_VALID_CARD_HANDLE 0x10000 +#define CPER_MEM_VALID_MODULE_HANDLE 0x20000 + +#define CPER_PCIE_VALID_PORT_TYPE 0x0001 +#define CPER_PCIE_VALID_VERSION 0x0002 +#define CPER_PCIE_VALID_COMMAND_STATUS 0x0004 +#define CPER_PCIE_VALID_DEVICE_ID 0x0008 +#define CPER_PCIE_VALID_SERIAL_NUMBER 0x0010 +#define CPER_PCIE_VALID_BRIDGE_CONTROL_STATUS 0x0020 +#define CPER_PCIE_VALID_CAPABILITY 0x0040 +#define CPER_PCIE_VALID_AER_INFO 0x0080 + +#define CPER_PCIE_SLOT_SHIFT 3 + +#define CPER_ARM_VALID_MPIDR BIT(0) +#define CPER_ARM_VALID_AFFINITY_LEVEL BIT(1) +#define CPER_ARM_VALID_RUNNING_STATE BIT(2) +#define CPER_ARM_VALID_VENDOR_INFO BIT(3) + +#define CPER_ARM_INFO_VALID_MULTI_ERR BIT(0) +#define CPER_ARM_INFO_VALID_FLAGS BIT(1) +#define CPER_ARM_INFO_VALID_ERR_INFO BIT(2) +#define CPER_ARM_INFO_VALID_VIRT_ADDR BIT(3) +#define CPER_ARM_INFO_VALID_PHYSICAL_ADDR BIT(4) + +#define CPER_ARM_INFO_FLAGS_FIRST BIT(0) +#define CPER_ARM_INFO_FLAGS_LAST BIT(1) +#define CPER_ARM_INFO_FLAGS_PROPAGATED BIT(2) +#define CPER_ARM_INFO_FLAGS_OVERFLOW BIT(3) + +#define CPER_ARM_CACHE_ERROR 0 +#define CPER_ARM_TLB_ERROR 1 +#define CPER_ARM_BUS_ERROR 2 +#define CPER_ARM_VENDOR_ERROR 3 +#define CPER_ARM_MAX_TYPE CPER_ARM_VENDOR_ERROR + +#define CPER_ARM_ERR_VALID_TRANSACTION_TYPE BIT(0) +#define CPER_ARM_ERR_VALID_OPERATION_TYPE BIT(1) +#define CPER_ARM_ERR_VALID_LEVEL BIT(2) +#define CPER_ARM_ERR_VALID_PROC_CONTEXT_CORRUPT BIT(3) +#define CPER_ARM_ERR_VALID_CORRECTED BIT(4) +#define CPER_ARM_ERR_VALID_PRECISE_PC BIT(5) +#define CPER_ARM_ERR_VALID_RESTARTABLE_PC BIT(6) +#define CPER_ARM_ERR_VALID_PARTICIPATION_TYPE BIT(7) +#define CPER_ARM_ERR_VALID_TIME_OUT BIT(8) +#define CPER_ARM_ERR_VALID_ADDRESS_SPACE BIT(9) +#define CPER_ARM_ERR_VALID_MEM_ATTRIBUTES BIT(10) +#define CPER_ARM_ERR_VALID_ACCESS_MODE BIT(11) + +#define CPER_ARM_ERR_TRANSACTION_SHIFT 16 +#define CPER_ARM_ERR_TRANSACTION_MASK GENMASK(1,0) +#define CPER_ARM_ERR_OPERATION_SHIFT 18 +#define CPER_ARM_ERR_OPERATION_MASK GENMASK(3,0) +#define CPER_ARM_ERR_LEVEL_SHIFT 22 +#define CPER_ARM_ERR_LEVEL_MASK GENMASK(2,0) +#define CPER_ARM_ERR_PC_CORRUPT_SHIFT 25 +#define CPER_ARM_ERR_PC_CORRUPT_MASK GENMASK(0,0) +#define CPER_ARM_ERR_CORRECTED_SHIFT 26 +#define CPER_ARM_ERR_CORRECTED_MASK GENMASK(0,0) +#define CPER_ARM_ERR_PRECISE_PC_SHIFT 27 +#define CPER_ARM_ERR_PRECISE_PC_MASK GENMASK(0,0) +#define CPER_ARM_ERR_RESTARTABLE_PC_SHIFT 28 +#define CPER_ARM_ERR_RESTARTABLE_PC_MASK GENMASK(0,0) +#define CPER_ARM_ERR_PARTICIPATION_TYPE_SHIFT 29 +#define CPER_ARM_ERR_PARTICIPATION_TYPE_MASK GENMASK(1,0) +#define CPER_ARM_ERR_TIME_OUT_SHIFT 31 +#define CPER_ARM_ERR_TIME_OUT_MASK GENMASK(0,0) +#define CPER_ARM_ERR_ADDRESS_SPACE_SHIFT 32 +#define CPER_ARM_ERR_ADDRESS_SPACE_MASK GENMASK(1,0) +#define CPER_ARM_ERR_MEM_ATTRIBUTES_SHIFT 34 +#define CPER_ARM_ERR_MEM_ATTRIBUTES_MASK GENMASK(8,0) +#define CPER_ARM_ERR_ACCESS_MODE_SHIFT 43 +#define CPER_ARM_ERR_ACCESS_MODE_MASK GENMASK(0,0) + +/* + * All tables and structs must be byte-packed to match CPER + * specification, since the tables are provided by the system BIOS + */ +#pragma pack(1) + +struct cper_record_header { + char signature[CPER_SIG_SIZE]; /* must be CPER_SIG_RECORD */ + __u16 revision; /* must be CPER_RECORD_REV */ + __u32 signature_end; /* must be CPER_SIG_END */ + __u16 section_count; + __u32 error_severity; + __u32 validation_bits; + __u32 record_length; + __u64 timestamp; + guid_t platform_id; + guid_t partition_id; + guid_t creator_id; + guid_t notification_type; + __u64 record_id; + __u32 flags; + __u64 persistence_information; + __u8 reserved[12]; /* must be zero */ +}; + +struct cper_section_descriptor { + __u32 section_offset; /* Offset in bytes of the + * section body from the base + * of the record header */ + __u32 section_length; + __u16 revision; /* must be CPER_RECORD_REV */ + __u8 validation_bits; + __u8 reserved; /* must be zero */ + __u32 flags; + guid_t section_type; + guid_t fru_id; + __u32 section_severity; + __u8 fru_text[20]; +}; + +/* Generic Processor Error Section */ +struct cper_sec_proc_generic { + __u64 validation_bits; + __u8 proc_type; + __u8 proc_isa; + __u8 proc_error_type; + __u8 operation; + __u8 flags; + __u8 level; + __u16 reserved; + __u64 cpu_version; + char cpu_brand[128]; + __u64 proc_id; + __u64 target_addr; + __u64 requestor_id; + __u64 responder_id; + __u64 ip; +}; + +/* IA32/X64 Processor Error Section */ +struct cper_sec_proc_ia { + __u64 validation_bits; + __u64 lapic_id; + __u8 cpuid[48]; +}; + +/* IA32/X64 Processor Error Information Structure */ +struct cper_ia_err_info { + guid_t err_type; + __u64 validation_bits; + __u64 check_info; + __u64 target_id; + __u64 requestor_id; + __u64 responder_id; + __u64 ip; +}; + +/* IA32/X64 Processor Context Information Structure */ +struct cper_ia_proc_ctx { + __u16 reg_ctx_type; + __u16 reg_arr_size; + __u32 msr_addr; + __u64 mm_reg_addr; +}; + +/* ARM Processor Error Section */ +struct cper_sec_proc_arm { + __u32 validation_bits; + __u16 err_info_num; /* Number of Processor Error Info */ + __u16 context_info_num; /* Number of Processor Context Info Records*/ + __u32 section_length; + __u8 affinity_level; + __u8 reserved[3]; /* must be zero */ + __u64 mpidr; + __u64 midr; + __u32 running_state; /* Bit 0 set - Processor running. PSCI = 0 */ + __u32 psci_state; +}; + +/* ARM Processor Error Information Structure */ +struct cper_arm_err_info { + __u8 version; + __u8 length; + __u16 validation_bits; + __u8 type; + __u16 multiple_error; + __u8 flags; + __u64 error_info; + __u64 virt_fault_addr; + __u64 physical_fault_addr; +}; + +/* ARM Processor Context Information Structure */ +struct cper_arm_ctx_info { + __u16 version; + __u16 type; + __u32 size; +}; + +/* Old Memory Error Section UEFI 2.1, 2.2 */ +struct cper_sec_mem_err_old { + __u64 validation_bits; + __u64 error_status; + __u64 physical_addr; + __u64 physical_addr_mask; + __u16 node; + __u16 card; + __u16 module; + __u16 bank; + __u16 device; + __u16 row; + __u16 column; + __u16 bit_pos; + __u64 requestor_id; + __u64 responder_id; + __u64 target_id; + __u8 error_type; +}; + +/* Memory Error Section UEFI >= 2.3 */ +struct cper_sec_mem_err { + __u64 validation_bits; + __u64 error_status; + __u64 physical_addr; + __u64 physical_addr_mask; + __u16 node; + __u16 card; + __u16 module; + __u16 bank; + __u16 device; + __u16 row; + __u16 column; + __u16 bit_pos; + __u64 requestor_id; + __u64 responder_id; + __u64 target_id; + __u8 error_type; + __u8 reserved; + __u16 rank; + __u16 mem_array_handle; /* card handle in UEFI 2.4 */ + __u16 mem_dev_handle; /* module handle in UEFI 2.4 */ +}; + +struct cper_mem_err_compact { + __u64 validation_bits; + __u16 node; + __u16 card; + __u16 module; + __u16 bank; + __u16 device; + __u16 row; + __u16 column; + __u16 bit_pos; + __u64 requestor_id; + __u64 responder_id; + __u64 target_id; + __u16 rank; + __u16 mem_array_handle; + __u16 mem_dev_handle; +}; + +struct cper_sec_pcie { + __u64 validation_bits; + __u32 port_type; + struct { + __u8 minor; + __u8 major; + __u8 reserved[2]; + } version; + __u16 command; + __u16 status; + __u32 reserved; + struct { + __u16 vendor_id; + __u16 device_id; + __u8 class_code[3]; + __u8 function; + __u8 device; + __u16 segment; + __u8 bus; + __u8 secondary_bus; + __u16 slot; + __u8 reserved; + } device_id; + struct { + __u32 lower; + __u32 upper; + } serial_number; + struct { + __u16 secondary_status; + __u16 control; + } bridge; + __u8 capability[60]; + __u8 aer_info[96]; +}; + +/* Reset to default packing */ +#pragma pack() + +extern const char * const cper_proc_error_type_strs[4]; + +u64 cper_next_record_id(void); +const char *cper_severity_str(unsigned int); +const char *cper_mem_err_type_str(unsigned int); +void cper_print_bits(const char *prefix, unsigned int bits, + const char * const strs[], unsigned int strs_size); +void cper_mem_err_pack(const struct cper_sec_mem_err *, + struct cper_mem_err_compact *); +const char *cper_mem_err_unpack(struct trace_seq *, + struct cper_mem_err_compact *); +void cper_print_proc_arm(const char *pfx, + const struct cper_sec_proc_arm *proc); +void cper_print_proc_ia(const char *pfx, + const struct cper_sec_proc_ia *proc); + +#endif diff --git a/include/linux/cpu.h b/include/linux/cpu.h new file mode 100644 index 000000000..8cc06e1d4 --- /dev/null +++ b/include/linux/cpu.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/cpu.h - generic cpu definition + * + * This is mainly for topological representation. We define the + * basic 'struct cpu' here, which can be embedded in per-arch + * definitions of processors. + * + * Basic handling of the devices is done in drivers/base/cpu.c + * + * CPUs are exported via sysfs in the devices/system/cpu + * directory. + */ +#ifndef _LINUX_CPU_H_ +#define _LINUX_CPU_H_ + +#include +#include +#include +#include + +struct device; +struct device_node; +struct attribute_group; + +struct cpu { + int node_id; /* The node which contains the CPU */ + int hotpluggable; /* creates sysfs control file if hotpluggable */ + struct device dev; +}; + +extern void boot_cpu_init(void); +extern void boot_cpu_hotplug_init(void); +extern void cpu_init(void); +extern void trap_init(void); + +extern int register_cpu(struct cpu *cpu, int num); +extern struct device *get_cpu_device(unsigned cpu); +extern bool cpu_is_hotpluggable(unsigned cpu); +extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id); +extern bool arch_find_n_match_cpu_physical_id(struct device_node *cpun, + int cpu, unsigned int *thread); + +extern int cpu_add_dev_attr(struct device_attribute *attr); +extern void cpu_remove_dev_attr(struct device_attribute *attr); + +extern int cpu_add_dev_attr_group(struct attribute_group *attrs); +extern void cpu_remove_dev_attr_group(struct attribute_group *attrs); + +extern ssize_t cpu_show_meltdown(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_spectre_v1(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_spectre_v2(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_spec_store_bypass(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_l1tf(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_mds(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_tsx_async_abort(struct device *dev, + struct device_attribute *attr, + char *buf); +extern ssize_t cpu_show_itlb_multihit(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_mmio_stale_data(struct device *dev, + struct device_attribute *attr, + char *buf); + +extern __printf(4, 5) +struct device *cpu_device_create(struct device *parent, void *drvdata, + const struct attribute_group **groups, + const char *fmt, ...); +#ifdef CONFIG_HOTPLUG_CPU +extern void unregister_cpu(struct cpu *cpu); +extern ssize_t arch_cpu_probe(const char *, size_t); +extern ssize_t arch_cpu_release(const char *, size_t); +#endif + +/* + * These states are not related to the core CPU hotplug mechanism. They are + * used by various (sub)architectures to track internal state + */ +#define CPU_ONLINE 0x0002 /* CPU is up */ +#define CPU_UP_PREPARE 0x0003 /* CPU coming up */ +#define CPU_DEAD 0x0007 /* CPU dead */ +#define CPU_DEAD_FROZEN 0x0008 /* CPU timed out on unplug */ +#define CPU_POST_DEAD 0x0009 /* CPU successfully unplugged */ +#define CPU_BROKEN 0x000B /* CPU did not die properly */ + +#ifdef CONFIG_SMP +extern bool cpuhp_tasks_frozen; +int cpu_up(unsigned int cpu); +void notify_cpu_starting(unsigned int cpu); +extern void cpu_maps_update_begin(void); +extern void cpu_maps_update_done(void); + +#else /* CONFIG_SMP */ +#define cpuhp_tasks_frozen 0 + +static inline void cpu_maps_update_begin(void) +{ +} + +static inline void cpu_maps_update_done(void) +{ +} + +#endif /* CONFIG_SMP */ +extern struct bus_type cpu_subsys; + +#ifdef CONFIG_HOTPLUG_CPU +extern void cpus_write_lock(void); +extern void cpus_write_unlock(void); +extern void cpus_read_lock(void); +extern void cpus_read_unlock(void); +extern int cpus_read_trylock(void); +extern void lockdep_assert_cpus_held(void); +extern void cpu_hotplug_disable(void); +extern void cpu_hotplug_enable(void); +void clear_tasks_mm_cpumask(int cpu); +int cpu_down(unsigned int cpu); + +#else /* CONFIG_HOTPLUG_CPU */ + +static inline void cpus_write_lock(void) { } +static inline void cpus_write_unlock(void) { } +static inline void cpus_read_lock(void) { } +static inline void cpus_read_unlock(void) { } +static inline int cpus_read_trylock(void) { return true; } +static inline void lockdep_assert_cpus_held(void) { } +static inline void cpu_hotplug_disable(void) { } +static inline void cpu_hotplug_enable(void) { } +#endif /* !CONFIG_HOTPLUG_CPU */ + +/* Wrappers which go away once all code is converted */ +static inline void cpu_hotplug_begin(void) { cpus_write_lock(); } +static inline void cpu_hotplug_done(void) { cpus_write_unlock(); } +static inline void get_online_cpus(void) { cpus_read_lock(); } +static inline void put_online_cpus(void) { cpus_read_unlock(); } + +#ifdef CONFIG_PM_SLEEP_SMP +extern int freeze_secondary_cpus(int primary); +static inline int disable_nonboot_cpus(void) +{ + return freeze_secondary_cpus(0); +} +extern void enable_nonboot_cpus(void); +#else /* !CONFIG_PM_SLEEP_SMP */ +static inline int disable_nonboot_cpus(void) { return 0; } +static inline void enable_nonboot_cpus(void) {} +#endif /* !CONFIG_PM_SLEEP_SMP */ + +void cpu_startup_entry(enum cpuhp_state state); + +void cpu_idle_poll_ctrl(bool enable); + +/* Attach to any functions which should be considered cpuidle. */ +#define __cpuidle __attribute__((__section__(".cpuidle.text"))) + +bool cpu_in_idle(unsigned long pc); + +void arch_cpu_idle(void); +void arch_cpu_idle_prepare(void); +void arch_cpu_idle_enter(void); +void arch_cpu_idle_exit(void); +void arch_cpu_idle_dead(void); + +int cpu_report_state(int cpu); +int cpu_check_up_prepare(int cpu); +void cpu_set_state_online(int cpu); +void play_idle(unsigned long duration_ms); + +#ifdef CONFIG_HOTPLUG_CPU +bool cpu_wait_death(unsigned int cpu, int seconds); +bool cpu_report_death(void); +void cpuhp_report_idle_dead(void); +#else +static inline void cpuhp_report_idle_dead(void) { } +#endif /* #ifdef CONFIG_HOTPLUG_CPU */ + +enum cpuhp_smt_control { + CPU_SMT_ENABLED, + CPU_SMT_DISABLED, + CPU_SMT_FORCE_DISABLED, + CPU_SMT_NOT_SUPPORTED, +}; + +#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT) +extern enum cpuhp_smt_control cpu_smt_control; +extern void cpu_smt_disable(bool force); +extern void cpu_smt_check_topology(void); +extern int cpuhp_smt_enable(void); +extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval); +#else +# define cpu_smt_control (CPU_SMT_ENABLED) +static inline void cpu_smt_disable(bool force) { } +static inline void cpu_smt_check_topology(void) { } +static inline int cpuhp_smt_enable(void) { return 0; } +static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; } +#endif + +extern bool cpu_mitigations_off(void); +extern bool cpu_mitigations_auto_nosmt(void); + +#endif /* _LINUX_CPU_H_ */ diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h new file mode 100644 index 000000000..de0dafb93 --- /dev/null +++ b/include/linux/cpu_cooling.h @@ -0,0 +1,76 @@ +/* + * linux/include/linux/cpu_cooling.h + * + * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com) + * Copyright (C) 2012 Amit Daniel + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#ifndef __CPU_COOLING_H__ +#define __CPU_COOLING_H__ + +#include +#include +#include + +struct cpufreq_policy; + +#ifdef CONFIG_CPU_THERMAL +/** + * cpufreq_cooling_register - function to create cpufreq cooling device. + * @policy: cpufreq policy. + */ +struct thermal_cooling_device * +cpufreq_cooling_register(struct cpufreq_policy *policy); + +/** + * cpufreq_cooling_unregister - function to remove cpufreq cooling device. + * @cdev: thermal cooling device pointer. + */ +void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev); + +#else /* !CONFIG_CPU_THERMAL */ +static inline struct thermal_cooling_device * +cpufreq_cooling_register(struct cpufreq_policy *policy) +{ + return ERR_PTR(-ENOSYS); +} + +static inline +void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) +{ + return; +} +#endif /* CONFIG_CPU_THERMAL */ + +#if defined(CONFIG_THERMAL_OF) && defined(CONFIG_CPU_THERMAL) +/** + * of_cpufreq_cooling_register - create cpufreq cooling device based on DT. + * @policy: cpufreq policy. + */ +struct thermal_cooling_device * +of_cpufreq_cooling_register(struct cpufreq_policy *policy); +#else +static inline struct thermal_cooling_device * +of_cpufreq_cooling_register(struct cpufreq_policy *policy) +{ + return NULL; +} +#endif /* defined(CONFIG_THERMAL_OF) && defined(CONFIG_CPU_THERMAL) */ + +#endif /* __CPU_COOLING_H__ */ diff --git a/include/linux/cpu_pm.h b/include/linux/cpu_pm.h new file mode 100644 index 000000000..455b233dd --- /dev/null +++ b/include/linux/cpu_pm.h @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2011 Google, Inc. + * + * Author: + * Colin Cross + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_CPU_PM_H +#define _LINUX_CPU_PM_H + +#include +#include + +/* + * When a CPU goes to a low power state that turns off power to the CPU's + * power domain, the contents of some blocks (floating point coprocessors, + * interrupt controllers, caches, timers) in the same power domain can + * be lost. The cpm_pm notifiers provide a method for platform idle, suspend, + * and hotplug implementations to notify the drivers for these blocks that + * they may be reset. + * + * All cpu_pm notifications must be called with interrupts disabled. + * + * The notifications are split into two classes: CPU notifications and CPU + * cluster notifications. + * + * CPU notifications apply to a single CPU and must be called on the affected + * CPU. They are used to save per-cpu context for affected blocks. + * + * CPU cluster notifications apply to all CPUs in a single power domain. They + * are used to save any global context for affected blocks, and must be called + * after all the CPUs in the power domain have been notified of the low power + * state. + */ + +/* + * Event codes passed as unsigned long val to notifier calls + */ +enum cpu_pm_event { + /* A single cpu is entering a low power state */ + CPU_PM_ENTER, + + /* A single cpu failed to enter a low power state */ + CPU_PM_ENTER_FAILED, + + /* A single cpu is exiting a low power state */ + CPU_PM_EXIT, + + /* A cpu power domain is entering a low power state */ + CPU_CLUSTER_PM_ENTER, + + /* A cpu power domain failed to enter a low power state */ + CPU_CLUSTER_PM_ENTER_FAILED, + + /* A cpu power domain is exiting a low power state */ + CPU_CLUSTER_PM_EXIT, +}; + +#ifdef CONFIG_CPU_PM +int cpu_pm_register_notifier(struct notifier_block *nb); +int cpu_pm_unregister_notifier(struct notifier_block *nb); +int cpu_pm_enter(void); +int cpu_pm_exit(void); +int cpu_cluster_pm_enter(void); +int cpu_cluster_pm_exit(void); + +#else + +static inline int cpu_pm_register_notifier(struct notifier_block *nb) +{ + return 0; +} + +static inline int cpu_pm_unregister_notifier(struct notifier_block *nb) +{ + return 0; +} + +static inline int cpu_pm_enter(void) +{ + return 0; +} + +static inline int cpu_pm_exit(void) +{ + return 0; +} + +static inline int cpu_cluster_pm_enter(void) +{ + return 0; +} + +static inline int cpu_cluster_pm_exit(void) +{ + return 0; +} +#endif +#endif diff --git a/include/linux/cpu_rmap.h b/include/linux/cpu_rmap.h new file mode 100644 index 000000000..bdd18caa6 --- /dev/null +++ b/include/linux/cpu_rmap.h @@ -0,0 +1,69 @@ +#ifndef __LINUX_CPU_RMAP_H +#define __LINUX_CPU_RMAP_H + +/* + * cpu_rmap.c: CPU affinity reverse-map support + * Copyright 2011 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#include +#include +#include + +/** + * struct cpu_rmap - CPU affinity reverse-map + * @refcount: kref for object + * @size: Number of objects to be reverse-mapped + * @used: Number of objects added + * @obj: Pointer to array of object pointers + * @near: For each CPU, the index and distance to the nearest object, + * based on affinity masks + */ +struct cpu_rmap { + struct kref refcount; + u16 size, used; + void **obj; + struct { + u16 index; + u16 dist; + } near[0]; +}; +#define CPU_RMAP_DIST_INF 0xffff + +extern struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags); +extern int cpu_rmap_put(struct cpu_rmap *rmap); + +extern int cpu_rmap_add(struct cpu_rmap *rmap, void *obj); +extern int cpu_rmap_update(struct cpu_rmap *rmap, u16 index, + const struct cpumask *affinity); + +static inline u16 cpu_rmap_lookup_index(struct cpu_rmap *rmap, unsigned int cpu) +{ + return rmap->near[cpu].index; +} + +static inline void *cpu_rmap_lookup_obj(struct cpu_rmap *rmap, unsigned int cpu) +{ + return rmap->obj[rmap->near[cpu].index]; +} + +/** + * alloc_irq_cpu_rmap - allocate CPU affinity reverse-map for IRQs + * @size: Number of objects to be mapped + * + * Must be called in process context. + */ +static inline struct cpu_rmap *alloc_irq_cpu_rmap(unsigned int size) +{ + return alloc_cpu_rmap(size, GFP_KERNEL); +} +extern void free_irq_cpu_rmap(struct cpu_rmap *rmap); + +extern int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq); + +#endif /* __LINUX_CPU_RMAP_H */ diff --git a/include/linux/cpufeature.h b/include/linux/cpufeature.h new file mode 100644 index 000000000..84d3c81b5 --- /dev/null +++ b/include/linux/cpufeature.h @@ -0,0 +1,61 @@ +/* + * Copyright (C) 2014 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_CPUFEATURE_H +#define __LINUX_CPUFEATURE_H + +#ifdef CONFIG_GENERIC_CPU_AUTOPROBE + +#include +#include +#include + +/* + * Macros imported from : + * - cpu_feature(x) ordinal value of feature called 'x' + * - cpu_have_feature(u32 n) whether feature #n is available + * - MAX_CPU_FEATURES upper bound for feature ordinal values + * Optional: + * - CPU_FEATURE_TYPEFMT format string fragment for printing the cpu type + * - CPU_FEATURE_TYPEVAL set of values matching the format string above + */ + +#ifndef CPU_FEATURE_TYPEFMT +#define CPU_FEATURE_TYPEFMT "%s" +#endif + +#ifndef CPU_FEATURE_TYPEVAL +#define CPU_FEATURE_TYPEVAL ELF_PLATFORM +#endif + +/* + * Use module_cpu_feature_match(feature, module_init_function) to + * declare that + * a) the module shall be probed upon discovery of CPU feature 'feature' + * (typically at boot time using udev) + * b) the module must not be loaded if CPU feature 'feature' is not present + * (not even by manual insmod). + * + * For a list of legal values for 'feature', please consult the file + * 'asm/cpufeature.h' of your favorite architecture. + */ +#define module_cpu_feature_match(x, __initfunc) \ +static struct cpu_feature const __maybe_unused cpu_feature_match_ ## x[] = \ + { { .feature = cpu_feature(x) }, { } }; \ +MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \ + \ +static int __init cpu_feature_match_ ## x ## _init(void) \ +{ \ + if (!cpu_have_feature(cpu_feature(x))) \ + return -ENODEV; \ + return __initfunc(); \ +} \ +module_init(cpu_feature_match_ ## x ## _init) + +#endif +#endif diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h new file mode 100644 index 000000000..336166314 --- /dev/null +++ b/include/linux/cpufreq.h @@ -0,0 +1,950 @@ +/* + * linux/include/linux/cpufreq.h + * + * Copyright (C) 2001 Russell King + * (C) 2002 - 2003 Dominik Brodowski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _LINUX_CPUFREQ_H +#define _LINUX_CPUFREQ_H + +#include +#include +#include +#include +#include +#include +#include + +/********************************************************************* + * CPUFREQ INTERFACE * + *********************************************************************/ +/* + * Frequency values here are CPU kHz + * + * Maximum transition latency is in nanoseconds - if it's unknown, + * CPUFREQ_ETERNAL shall be used. + */ + +#define CPUFREQ_ETERNAL (-1) +#define CPUFREQ_NAME_LEN 16 +/* Print length for names. Extra 1 space for accommodating '\n' in prints */ +#define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1) + +struct cpufreq_governor; + +enum cpufreq_table_sorting { + CPUFREQ_TABLE_UNSORTED, + CPUFREQ_TABLE_SORTED_ASCENDING, + CPUFREQ_TABLE_SORTED_DESCENDING +}; + +struct cpufreq_freqs { + unsigned int cpu; /* cpu nr */ + unsigned int old; + unsigned int new; + u8 flags; /* flags of cpufreq_driver, see below. */ +}; + +struct cpufreq_cpuinfo { + unsigned int max_freq; + unsigned int min_freq; + + /* in 10^(-9) s = nanoseconds */ + unsigned int transition_latency; +}; + +struct cpufreq_user_policy { + unsigned int min; /* in kHz */ + unsigned int max; /* in kHz */ +}; + +struct cpufreq_policy { + /* CPUs sharing clock, require sw coordination */ + cpumask_var_t cpus; /* Online CPUs only */ + cpumask_var_t related_cpus; /* Online + Offline CPUs */ + cpumask_var_t real_cpus; /* Related and present */ + + unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs + should set cpufreq */ + unsigned int cpu; /* cpu managing this policy, must be online */ + + struct clk *clk; + struct cpufreq_cpuinfo cpuinfo;/* see above */ + + unsigned int min; /* in kHz */ + unsigned int max; /* in kHz */ + unsigned int cur; /* in kHz, only needed if cpufreq + * governors are used */ + unsigned int restore_freq; /* = policy->cur before transition */ + unsigned int suspend_freq; /* freq to set during suspend */ + + unsigned int policy; /* see above */ + unsigned int last_policy; /* policy before unplug */ + struct cpufreq_governor *governor; /* see below */ + void *governor_data; + char last_governor[CPUFREQ_NAME_LEN]; /* last governor used */ + + struct work_struct update; /* if update_policy() needs to be + * called, but you're in IRQ context */ + + struct cpufreq_user_policy user_policy; + struct cpufreq_frequency_table *freq_table; + enum cpufreq_table_sorting freq_table_sorted; + + struct list_head policy_list; + struct kobject kobj; + struct completion kobj_unregister; + + /* + * The rules for this semaphore: + * - Any routine that wants to read from the policy structure will + * do a down_read on this semaphore. + * - Any routine that will write to the policy structure and/or may take away + * the policy altogether (eg. CPU hotplug), will hold this lock in write + * mode before doing so. + */ + struct rw_semaphore rwsem; + + /* + * Fast switch flags: + * - fast_switch_possible should be set by the driver if it can + * guarantee that frequency can be changed on any CPU sharing the + * policy and that the change will affect all of the policy CPUs then. + * - fast_switch_enabled is to be set by governors that support fast + * frequency switching with the help of cpufreq_enable_fast_switch(). + */ + bool fast_switch_possible; + bool fast_switch_enabled; + + /* + * Preferred average time interval between consecutive invocations of + * the driver to set the frequency for this policy. To be set by the + * scaling driver (0, which is the default, means no preference). + */ + unsigned int transition_delay_us; + + /* + * Remote DVFS flag (Not added to the driver structure as we don't want + * to access another structure from scheduler hotpath). + * + * Should be set if CPUs can do DVFS on behalf of other CPUs from + * different cpufreq policies. + */ + bool dvfs_possible_from_any_cpu; + + /* Cached frequency lookup from cpufreq_driver_resolve_freq. */ + unsigned int cached_target_freq; + int cached_resolved_idx; + + /* Synchronization for frequency transitions */ + bool transition_ongoing; /* Tracks transition status */ + spinlock_t transition_lock; + wait_queue_head_t transition_wait; + struct task_struct *transition_task; /* Task which is doing the transition */ + + /* cpufreq-stats */ + struct cpufreq_stats *stats; + + /* For cpufreq driver's internal use */ + void *driver_data; +}; + +/* Only for ACPI */ +#define CPUFREQ_SHARED_TYPE_NONE (0) /* None */ +#define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */ +#define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */ +#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/ + +#ifdef CONFIG_CPU_FREQ +struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu); +struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); +void cpufreq_cpu_put(struct cpufreq_policy *policy); +#else +static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) +{ + return NULL; +} +static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) +{ + return NULL; +} +static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { } +#endif + +static inline bool policy_is_shared(struct cpufreq_policy *policy) +{ + return cpumask_weight(policy->cpus) > 1; +} + +/* /sys/devices/system/cpu/cpufreq: entry point for global variables */ +extern struct kobject *cpufreq_global_kobject; + +#ifdef CONFIG_CPU_FREQ +unsigned int cpufreq_get(unsigned int cpu); +unsigned int cpufreq_quick_get(unsigned int cpu); +unsigned int cpufreq_quick_get_max(unsigned int cpu); +void disable_cpufreq(void); + +u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy); +int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); +void cpufreq_update_policy(unsigned int cpu); +bool have_governor_per_policy(void); +struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy); +void cpufreq_enable_fast_switch(struct cpufreq_policy *policy); +void cpufreq_disable_fast_switch(struct cpufreq_policy *policy); +#else +static inline unsigned int cpufreq_get(unsigned int cpu) +{ + return 0; +} +static inline unsigned int cpufreq_quick_get(unsigned int cpu) +{ + return 0; +} +static inline unsigned int cpufreq_quick_get_max(unsigned int cpu) +{ + return 0; +} +static inline void disable_cpufreq(void) { } +#endif + +#ifdef CONFIG_CPU_FREQ_STAT +void cpufreq_stats_create_table(struct cpufreq_policy *policy); +void cpufreq_stats_free_table(struct cpufreq_policy *policy); +void cpufreq_stats_record_transition(struct cpufreq_policy *policy, + unsigned int new_freq); +#else +static inline void cpufreq_stats_create_table(struct cpufreq_policy *policy) { } +static inline void cpufreq_stats_free_table(struct cpufreq_policy *policy) { } +static inline void cpufreq_stats_record_transition(struct cpufreq_policy *policy, + unsigned int new_freq) { } +#endif /* CONFIG_CPU_FREQ_STAT */ + +/********************************************************************* + * CPUFREQ DRIVER INTERFACE * + *********************************************************************/ + +#define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */ +#define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */ +#define CPUFREQ_RELATION_C 2 /* closest frequency to target */ + +struct freq_attr { + struct attribute attr; + ssize_t (*show)(struct cpufreq_policy *, char *); + ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count); +}; + +#define cpufreq_freq_attr_ro(_name) \ +static struct freq_attr _name = \ +__ATTR(_name, 0444, show_##_name, NULL) + +#define cpufreq_freq_attr_ro_perm(_name, _perm) \ +static struct freq_attr _name = \ +__ATTR(_name, _perm, show_##_name, NULL) + +#define cpufreq_freq_attr_rw(_name) \ +static struct freq_attr _name = \ +__ATTR(_name, 0644, show_##_name, store_##_name) + +#define cpufreq_freq_attr_wo(_name) \ +static struct freq_attr _name = \ +__ATTR(_name, 0200, NULL, store_##_name) + +#define define_one_global_ro(_name) \ +static struct kobj_attribute _name = \ +__ATTR(_name, 0444, show_##_name, NULL) + +#define define_one_global_rw(_name) \ +static struct kobj_attribute _name = \ +__ATTR(_name, 0644, show_##_name, store_##_name) + + +struct cpufreq_driver { + char name[CPUFREQ_NAME_LEN]; + u8 flags; + void *driver_data; + + /* needed by all drivers */ + int (*init)(struct cpufreq_policy *policy); + int (*verify)(struct cpufreq_policy *policy); + + /* define one out of two */ + int (*setpolicy)(struct cpufreq_policy *policy); + + /* + * On failure, should always restore frequency to policy->restore_freq + * (i.e. old freq). + */ + int (*target)(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation); /* Deprecated */ + int (*target_index)(struct cpufreq_policy *policy, + unsigned int index); + unsigned int (*fast_switch)(struct cpufreq_policy *policy, + unsigned int target_freq); + + /* + * Caches and returns the lowest driver-supported frequency greater than + * or equal to the target frequency, subject to any driver limitations. + * Does not set the frequency. Only to be implemented for drivers with + * target(). + */ + unsigned int (*resolve_freq)(struct cpufreq_policy *policy, + unsigned int target_freq); + + /* + * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION + * unset. + * + * get_intermediate should return a stable intermediate frequency + * platform wants to switch to and target_intermediate() should set CPU + * to to that frequency, before jumping to the frequency corresponding + * to 'index'. Core will take care of sending notifications and driver + * doesn't have to handle them in target_intermediate() or + * target_index(). + * + * Drivers can return '0' from get_intermediate() in case they don't + * wish to switch to intermediate frequency for some target frequency. + * In that case core will directly call ->target_index(). + */ + unsigned int (*get_intermediate)(struct cpufreq_policy *policy, + unsigned int index); + int (*target_intermediate)(struct cpufreq_policy *policy, + unsigned int index); + + /* should be defined, if possible */ + unsigned int (*get)(unsigned int cpu); + + /* optional */ + int (*bios_limit)(int cpu, unsigned int *limit); + + int (*exit)(struct cpufreq_policy *policy); + void (*stop_cpu)(struct cpufreq_policy *policy); + int (*suspend)(struct cpufreq_policy *policy); + int (*resume)(struct cpufreq_policy *policy); + + /* Will be called after the driver is fully initialized */ + void (*ready)(struct cpufreq_policy *policy); + + struct freq_attr **attr; + + /* platform specific boost support code */ + bool boost_enabled; + int (*set_boost)(int state); +}; + +/* flags */ +#define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if + all ->init() calls failed */ +#define CPUFREQ_CONST_LOOPS (1 << 1) /* loops_per_jiffy or other + kernel "constants" aren't + affected by frequency + transitions */ +#define CPUFREQ_PM_NO_WARN (1 << 2) /* don't warn on suspend/resume + speed mismatches */ + +/* + * This should be set by platforms having multiple clock-domains, i.e. + * supporting multiple policies. With this sysfs directories of governor would + * be created in cpu/cpu/cpufreq/ directory and so they can use the same + * governor with different tunables for different clusters. + */ +#define CPUFREQ_HAVE_GOVERNOR_PER_POLICY (1 << 3) + +/* + * Driver will do POSTCHANGE notifications from outside of their ->target() + * routine and so must set cpufreq_driver->flags with this flag, so that core + * can handle them specially. + */ +#define CPUFREQ_ASYNC_NOTIFICATION (1 << 4) + +/* + * Set by drivers which want cpufreq core to check if CPU is running at a + * frequency present in freq-table exposed by the driver. For these drivers if + * CPU is found running at an out of table freq, we will try to set it to a freq + * from the table. And if that fails, we will stop further boot process by + * issuing a BUG_ON(). + */ +#define CPUFREQ_NEED_INITIAL_FREQ_CHECK (1 << 5) + +/* + * Set by drivers to disallow use of governors with "dynamic_switching" flag + * set. + */ +#define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING (1 << 6) + +int cpufreq_register_driver(struct cpufreq_driver *driver_data); +int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); + +const char *cpufreq_get_current_driver(void); +void *cpufreq_get_driver_data(void); + +static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, + unsigned int min, unsigned int max) +{ + if (policy->min < min) + policy->min = min; + if (policy->max < min) + policy->max = min; + if (policy->min > max) + policy->min = max; + if (policy->max > max) + policy->max = max; + if (policy->min > policy->max) + policy->min = policy->max; + return; +} + +static inline void +cpufreq_verify_within_cpu_limits(struct cpufreq_policy *policy) +{ + cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, + policy->cpuinfo.max_freq); +} + +#ifdef CONFIG_CPU_FREQ +void cpufreq_suspend(void); +void cpufreq_resume(void); +int cpufreq_generic_suspend(struct cpufreq_policy *policy); +#else +static inline void cpufreq_suspend(void) {} +static inline void cpufreq_resume(void) {} +#endif + +/********************************************************************* + * CPUFREQ NOTIFIER INTERFACE * + *********************************************************************/ + +#define CPUFREQ_TRANSITION_NOTIFIER (0) +#define CPUFREQ_POLICY_NOTIFIER (1) + +/* Transition notifiers */ +#define CPUFREQ_PRECHANGE (0) +#define CPUFREQ_POSTCHANGE (1) + +/* Policy Notifiers */ +#define CPUFREQ_ADJUST (0) +#define CPUFREQ_NOTIFY (1) + +#ifdef CONFIG_CPU_FREQ +int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); +int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list); + +void cpufreq_freq_transition_begin(struct cpufreq_policy *policy, + struct cpufreq_freqs *freqs); +void cpufreq_freq_transition_end(struct cpufreq_policy *policy, + struct cpufreq_freqs *freqs, int transition_failed); + +#else /* CONFIG_CPU_FREQ */ +static inline int cpufreq_register_notifier(struct notifier_block *nb, + unsigned int list) +{ + return 0; +} +static inline int cpufreq_unregister_notifier(struct notifier_block *nb, + unsigned int list) +{ + return 0; +} +#endif /* !CONFIG_CPU_FREQ */ + +/** + * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch + * safe) + * @old: old value + * @div: divisor + * @mult: multiplier + * + * + * new = old * mult / div + */ +static inline unsigned long cpufreq_scale(unsigned long old, u_int div, + u_int mult) +{ +#if BITS_PER_LONG == 32 + u64 result = ((u64) old) * ((u64) mult); + do_div(result, div); + return (unsigned long) result; + +#elif BITS_PER_LONG == 64 + unsigned long result = old * ((u64) mult); + result /= div; + return result; +#endif +} + +/********************************************************************* + * CPUFREQ GOVERNORS * + *********************************************************************/ + +/* + * If (cpufreq_driver->target) exists, the ->governor decides what frequency + * within the limits is used. If (cpufreq_driver->setpolicy> exists, these + * two generic policies are available: + */ +#define CPUFREQ_POLICY_POWERSAVE (1) +#define CPUFREQ_POLICY_PERFORMANCE (2) + +/* + * The polling frequency depends on the capability of the processor. Default + * polling frequency is 1000 times the transition latency of the processor. The + * ondemand governor will work on any processor with transition latency <= 10ms, + * using appropriate sampling rate. + */ +#define LATENCY_MULTIPLIER (1000) + +struct cpufreq_governor { + char name[CPUFREQ_NAME_LEN]; + int (*init)(struct cpufreq_policy *policy); + void (*exit)(struct cpufreq_policy *policy); + int (*start)(struct cpufreq_policy *policy); + void (*stop)(struct cpufreq_policy *policy); + void (*limits)(struct cpufreq_policy *policy); + ssize_t (*show_setspeed) (struct cpufreq_policy *policy, + char *buf); + int (*store_setspeed) (struct cpufreq_policy *policy, + unsigned int freq); + /* For governors which change frequency dynamically by themselves */ + bool dynamic_switching; + struct list_head governor_list; + struct module *owner; +}; + +/* Pass a target to the cpufreq driver */ +unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, + unsigned int target_freq); +int cpufreq_driver_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation); +int __cpufreq_driver_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation); +unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, + unsigned int target_freq); +unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy); +int cpufreq_register_governor(struct cpufreq_governor *governor); +void cpufreq_unregister_governor(struct cpufreq_governor *governor); + +struct cpufreq_governor *cpufreq_default_governor(void); +struct cpufreq_governor *cpufreq_fallback_governor(void); + +static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy) +{ + if (policy->max < policy->cur) + __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); + else if (policy->min > policy->cur) + __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); +} + +/* Governor attribute set */ +struct gov_attr_set { + struct kobject kobj; + struct list_head policy_list; + struct mutex update_lock; + int usage_count; +}; + +/* sysfs ops for cpufreq governors */ +extern const struct sysfs_ops governor_sysfs_ops; + +void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node); +void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node); +unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node); + +/* Governor sysfs attribute */ +struct governor_attr { + struct attribute attr; + ssize_t (*show)(struct gov_attr_set *attr_set, char *buf); + ssize_t (*store)(struct gov_attr_set *attr_set, const char *buf, + size_t count); +}; + +/********************************************************************* + * FREQUENCY TABLE HELPERS * + *********************************************************************/ + +/* Special Values of .frequency field */ +#define CPUFREQ_ENTRY_INVALID ~0u +#define CPUFREQ_TABLE_END ~1u +/* Special Values of .flags field */ +#define CPUFREQ_BOOST_FREQ (1 << 0) + +struct cpufreq_frequency_table { + unsigned int flags; + unsigned int driver_data; /* driver specific data, not used by core */ + unsigned int frequency; /* kHz - doesn't need to be in ascending + * order */ +}; + +#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP) +int dev_pm_opp_init_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table); +void dev_pm_opp_free_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table); +#else +static inline int dev_pm_opp_init_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table + **table) +{ + return -EINVAL; +} + +static inline void dev_pm_opp_free_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table + **table) +{ +} +#endif + +/* + * cpufreq_for_each_entry - iterate over a cpufreq_frequency_table + * @pos: the cpufreq_frequency_table * to use as a loop cursor. + * @table: the cpufreq_frequency_table * to iterate over. + */ + +#define cpufreq_for_each_entry(pos, table) \ + for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) + +/* + * cpufreq_for_each_entry_idx - iterate over a cpufreq_frequency_table + * with index + * @pos: the cpufreq_frequency_table * to use as a loop cursor. + * @table: the cpufreq_frequency_table * to iterate over. + * @idx: the table entry currently being processed + */ + +#define cpufreq_for_each_entry_idx(pos, table, idx) \ + for (pos = table, idx = 0; pos->frequency != CPUFREQ_TABLE_END; \ + pos++, idx++) + +/* + * cpufreq_for_each_valid_entry - iterate over a cpufreq_frequency_table + * excluding CPUFREQ_ENTRY_INVALID frequencies. + * @pos: the cpufreq_frequency_table * to use as a loop cursor. + * @table: the cpufreq_frequency_table * to iterate over. + */ + +#define cpufreq_for_each_valid_entry(pos, table) \ + for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) \ + if (pos->frequency == CPUFREQ_ENTRY_INVALID) \ + continue; \ + else + +/* + * cpufreq_for_each_valid_entry_idx - iterate with index over a cpufreq + * frequency_table excluding CPUFREQ_ENTRY_INVALID frequencies. + * @pos: the cpufreq_frequency_table * to use as a loop cursor. + * @table: the cpufreq_frequency_table * to iterate over. + * @idx: the table entry currently being processed + */ + +#define cpufreq_for_each_valid_entry_idx(pos, table, idx) \ + cpufreq_for_each_entry_idx(pos, table, idx) \ + if (pos->frequency == CPUFREQ_ENTRY_INVALID) \ + continue; \ + else + + +int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table); + +int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table); +int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy); + +int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation); +int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy, + unsigned int freq); + +ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf); + +#ifdef CONFIG_CPU_FREQ +int cpufreq_boost_trigger_state(int state); +int cpufreq_boost_enabled(void); +int cpufreq_enable_boost_support(void); +bool policy_has_boost_freq(struct cpufreq_policy *policy); + +/* Find lowest freq at or above target in a table in ascending order */ +static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + struct cpufreq_frequency_table *pos; + unsigned int freq; + int idx, best = -1; + + cpufreq_for_each_valid_entry_idx(pos, table, idx) { + freq = pos->frequency; + + if (freq >= target_freq) + return idx; + + best = idx; + } + + return best; +} + +/* Find lowest freq at or above target in a table in descending order */ +static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + struct cpufreq_frequency_table *pos; + unsigned int freq; + int idx, best = -1; + + cpufreq_for_each_valid_entry_idx(pos, table, idx) { + freq = pos->frequency; + + if (freq == target_freq) + return idx; + + if (freq > target_freq) { + best = idx; + continue; + } + + /* No freq found above target_freq */ + if (best == -1) + return idx; + + return best; + } + + return best; +} + +/* Works only on sorted freq-tables */ +static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + target_freq = clamp_val(target_freq, policy->min, policy->max); + + if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) + return cpufreq_table_find_index_al(policy, target_freq); + else + return cpufreq_table_find_index_dl(policy, target_freq); +} + +/* Find highest freq at or below target in a table in ascending order */ +static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + struct cpufreq_frequency_table *pos; + unsigned int freq; + int idx, best = -1; + + cpufreq_for_each_valid_entry_idx(pos, table, idx) { + freq = pos->frequency; + + if (freq == target_freq) + return idx; + + if (freq < target_freq) { + best = idx; + continue; + } + + /* No freq found below target_freq */ + if (best == -1) + return idx; + + return best; + } + + return best; +} + +/* Find highest freq at or below target in a table in descending order */ +static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + struct cpufreq_frequency_table *pos; + unsigned int freq; + int idx, best = -1; + + cpufreq_for_each_valid_entry_idx(pos, table, idx) { + freq = pos->frequency; + + if (freq <= target_freq) + return idx; + + best = idx; + } + + return best; +} + +/* Works only on sorted freq-tables */ +static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + target_freq = clamp_val(target_freq, policy->min, policy->max); + + if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) + return cpufreq_table_find_index_ah(policy, target_freq); + else + return cpufreq_table_find_index_dh(policy, target_freq); +} + +/* Find closest freq to target in a table in ascending order */ +static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + struct cpufreq_frequency_table *pos; + unsigned int freq; + int idx, best = -1; + + cpufreq_for_each_valid_entry_idx(pos, table, idx) { + freq = pos->frequency; + + if (freq == target_freq) + return idx; + + if (freq < target_freq) { + best = idx; + continue; + } + + /* No freq found below target_freq */ + if (best == -1) + return idx; + + /* Choose the closest freq */ + if (target_freq - table[best].frequency > freq - target_freq) + return idx; + + return best; + } + + return best; +} + +/* Find closest freq to target in a table in descending order */ +static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + struct cpufreq_frequency_table *pos; + unsigned int freq; + int idx, best = -1; + + cpufreq_for_each_valid_entry_idx(pos, table, idx) { + freq = pos->frequency; + + if (freq == target_freq) + return idx; + + if (freq > target_freq) { + best = idx; + continue; + } + + /* No freq found above target_freq */ + if (best == -1) + return idx; + + /* Choose the closest freq */ + if (table[best].frequency - target_freq > target_freq - freq) + return idx; + + return best; + } + + return best; +} + +/* Works only on sorted freq-tables */ +static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + target_freq = clamp_val(target_freq, policy->min, policy->max); + + if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) + return cpufreq_table_find_index_ac(policy, target_freq); + else + return cpufreq_table_find_index_dc(policy, target_freq); +} + +static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED)) + return cpufreq_table_index_unsorted(policy, target_freq, + relation); + + switch (relation) { + case CPUFREQ_RELATION_L: + return cpufreq_table_find_index_l(policy, target_freq); + case CPUFREQ_RELATION_H: + return cpufreq_table_find_index_h(policy, target_freq); + case CPUFREQ_RELATION_C: + return cpufreq_table_find_index_c(policy, target_freq); + default: + pr_err("%s: Invalid relation: %d\n", __func__, relation); + return -EINVAL; + } +} + +static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy *policy) +{ + struct cpufreq_frequency_table *pos; + int count = 0; + + if (unlikely(!policy->freq_table)) + return 0; + + cpufreq_for_each_valid_entry(pos, policy->freq_table) + count++; + + return count; +} +#else +static inline int cpufreq_boost_trigger_state(int state) +{ + return 0; +} +static inline int cpufreq_boost_enabled(void) +{ + return 0; +} + +static inline int cpufreq_enable_boost_support(void) +{ + return -EINVAL; +} + +static inline bool policy_has_boost_freq(struct cpufreq_policy *policy) +{ + return false; +} +#endif + +extern void arch_freq_prepare_all(void); +extern unsigned int arch_freq_get_on_cpu(int cpu); + +extern void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq, + unsigned long max_freq); + +/* the following are really really optional */ +extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; +extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs; +extern struct freq_attr *cpufreq_generic_attr[]; +int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy); + +unsigned int cpufreq_generic_get(unsigned int cpu); +int cpufreq_generic_init(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table, + unsigned int transition_latency); +#endif /* _LINUX_CPUFREQ_H */ diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h new file mode 100644 index 000000000..60efd9810 --- /dev/null +++ b/include/linux/cpuhotplug.h @@ -0,0 +1,399 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __CPUHOTPLUG_H +#define __CPUHOTPLUG_H + +#include + +/* + * CPU-up CPU-down + * + * BP AP BP AP + * + * OFFLINE OFFLINE + * | ^ + * v | + * BRINGUP_CPU->AP_OFFLINE BRINGUP_CPU <- AP_IDLE_DEAD (idle thread/play_dead) + * | AP_OFFLINE + * v (IRQ-off) ,---------------^ + * AP_ONLNE | (stop_machine) + * | TEARDOWN_CPU <- AP_ONLINE_IDLE + * | ^ + * v | + * AP_ACTIVE AP_ACTIVE + */ + +enum cpuhp_state { + CPUHP_INVALID = -1, + CPUHP_OFFLINE = 0, + CPUHP_CREATE_THREADS, + CPUHP_PERF_PREPARE, + CPUHP_PERF_X86_PREPARE, + CPUHP_PERF_X86_AMD_UNCORE_PREP, + CPUHP_PERF_POWER, + CPUHP_PERF_SUPERH, + CPUHP_X86_HPET_DEAD, + CPUHP_X86_APB_DEAD, + CPUHP_X86_MCE_DEAD, + CPUHP_VIRT_NET_DEAD, + CPUHP_SLUB_DEAD, + CPUHP_MM_WRITEBACK_DEAD, + CPUHP_MM_VMSTAT_DEAD, + CPUHP_SOFTIRQ_DEAD, + CPUHP_NET_MVNETA_DEAD, + CPUHP_CPUIDLE_DEAD, + CPUHP_ARM64_FPSIMD_DEAD, + CPUHP_ARM_OMAP_WAKE_DEAD, + CPUHP_IRQ_POLL_DEAD, + CPUHP_BLOCK_SOFTIRQ_DEAD, + CPUHP_ACPI_CPUDRV_DEAD, + CPUHP_S390_PFAULT_DEAD, + CPUHP_BLK_MQ_DEAD, + CPUHP_FS_BUFF_DEAD, + CPUHP_PRINTK_DEAD, + CPUHP_MM_MEMCQ_DEAD, + CPUHP_PERCPU_CNT_DEAD, + CPUHP_RADIX_DEAD, + CPUHP_PAGE_ALLOC_DEAD, + CPUHP_NET_DEV_DEAD, + CPUHP_PCI_XGENE_DEAD, + CPUHP_IOMMU_INTEL_DEAD, + CPUHP_LUSTRE_CFS_DEAD, + CPUHP_AP_ARM_CACHE_B15_RAC_DEAD, + CPUHP_PADATA_DEAD, + CPUHP_RANDOM_PREPARE, + CPUHP_WORKQUEUE_PREP, + CPUHP_POWER_NUMA_PREPARE, + CPUHP_HRTIMERS_PREPARE, + CPUHP_PROFILE_PREPARE, + CPUHP_X2APIC_PREPARE, + CPUHP_SMPCFD_PREPARE, + CPUHP_RELAY_PREPARE, + CPUHP_SLAB_PREPARE, + CPUHP_MD_RAID5_PREPARE, + CPUHP_RCUTREE_PREP, + CPUHP_CPUIDLE_COUPLED_PREPARE, + CPUHP_POWERPC_PMAC_PREPARE, + CPUHP_POWERPC_MMU_CTX_PREPARE, + CPUHP_XEN_PREPARE, + CPUHP_XEN_EVTCHN_PREPARE, + CPUHP_ARM_SHMOBILE_SCU_PREPARE, + CPUHP_SH_SH3X_PREPARE, + CPUHP_NET_FLOW_PREPARE, + CPUHP_TOPOLOGY_PREPARE, + CPUHP_NET_IUCV_PREPARE, + CPUHP_ARM_BL_PREPARE, + CPUHP_TRACE_RB_PREPARE, + CPUHP_MM_ZS_PREPARE, + CPUHP_MM_ZSWP_MEM_PREPARE, + CPUHP_MM_ZSWP_POOL_PREPARE, + CPUHP_KVM_PPC_BOOK3S_PREPARE, + CPUHP_ZCOMP_PREPARE, + CPUHP_TIMERS_PREPARE, + CPUHP_MIPS_SOC_PREPARE, + CPUHP_BP_PREPARE_DYN, + CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20, + CPUHP_BRINGUP_CPU, + CPUHP_AP_IDLE_DEAD, + CPUHP_AP_OFFLINE, + CPUHP_AP_SCHED_STARTING, + CPUHP_AP_RCUTREE_DYING, + CPUHP_AP_IRQ_GIC_STARTING, + CPUHP_AP_IRQ_HIP04_STARTING, + CPUHP_AP_IRQ_ARMADA_XP_STARTING, + CPUHP_AP_IRQ_BCM2836_STARTING, + CPUHP_AP_IRQ_MIPS_GIC_STARTING, + CPUHP_AP_ARM_MVEBU_COHERENCY, + CPUHP_AP_MICROCODE_LOADER, + CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, + CPUHP_AP_PERF_X86_STARTING, + CPUHP_AP_PERF_X86_AMD_IBS_STARTING, + CPUHP_AP_PERF_X86_CQM_STARTING, + CPUHP_AP_PERF_X86_CSTATE_STARTING, + CPUHP_AP_PERF_XTENSA_STARTING, + CPUHP_AP_MIPS_OP_LOONGSON3_STARTING, + CPUHP_AP_ARM_SDEI_STARTING, + CPUHP_AP_ARM_VFP_STARTING, + CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING, + CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING, + CPUHP_AP_PERF_ARM_ACPI_STARTING, + CPUHP_AP_PERF_ARM_STARTING, + CPUHP_AP_ARM_L2X0_STARTING, + CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, + CPUHP_AP_ARM_ARCH_TIMER_STARTING, + CPUHP_AP_OMAP_DM_TIMER_STARTING, + CPUHP_AP_ARM_GLOBAL_TIMER_STARTING, + CPUHP_AP_JCORE_TIMER_STARTING, + CPUHP_AP_ARM_TWD_STARTING, + CPUHP_AP_QCOM_TIMER_STARTING, + CPUHP_AP_ARMADA_TIMER_STARTING, + CPUHP_AP_MARCO_TIMER_STARTING, + CPUHP_AP_MIPS_GIC_TIMER_STARTING, + CPUHP_AP_ARC_TIMER_STARTING, + CPUHP_AP_RISCV_TIMER_STARTING, + CPUHP_AP_KVM_STARTING, + CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING, + CPUHP_AP_KVM_ARM_VGIC_STARTING, + CPUHP_AP_KVM_ARM_TIMER_STARTING, + /* Must be the last timer callback */ + CPUHP_AP_DUMMY_TIMER_STARTING, + CPUHP_AP_ARM_XEN_STARTING, + CPUHP_AP_ARM_CORESIGHT_STARTING, + CPUHP_AP_ARM64_ISNDEP_STARTING, + CPUHP_AP_SMPCFD_DYING, + CPUHP_AP_X86_TBOOT_DYING, + CPUHP_AP_ARM_CACHE_B15_RAC_DYING, + CPUHP_AP_ONLINE, + CPUHP_TEARDOWN_CPU, + CPUHP_AP_ONLINE_IDLE, + CPUHP_AP_SMPBOOT_THREADS, + CPUHP_AP_X86_VDSO_VMA_ONLINE, + CPUHP_AP_IRQ_AFFINITY_ONLINE, + CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS, + CPUHP_AP_PERF_ONLINE, + CPUHP_AP_PERF_X86_ONLINE, + CPUHP_AP_PERF_X86_UNCORE_ONLINE, + CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE, + CPUHP_AP_PERF_X86_AMD_POWER_ONLINE, + CPUHP_AP_PERF_X86_RAPL_ONLINE, + CPUHP_AP_PERF_X86_CQM_ONLINE, + CPUHP_AP_PERF_X86_CSTATE_ONLINE, + CPUHP_AP_PERF_S390_CF_ONLINE, + CPUHP_AP_PERF_S390_SF_ONLINE, + CPUHP_AP_PERF_ARM_CCI_ONLINE, + CPUHP_AP_PERF_ARM_CCN_ONLINE, + CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, + CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, + CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, + CPUHP_AP_PERF_ARM_L2X0_ONLINE, + CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE, + CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE, + CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE, + CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE, + CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE, + CPUHP_AP_WATCHDOG_ONLINE, + CPUHP_AP_WORKQUEUE_ONLINE, + CPUHP_AP_RANDOM_ONLINE, + CPUHP_AP_RCUTREE_ONLINE, + CPUHP_AP_BASE_CACHEINFO_ONLINE, + CPUHP_AP_ONLINE_DYN, + CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, + CPUHP_AP_X86_HPET_ONLINE, + CPUHP_AP_X86_KVM_CLK_ONLINE, + CPUHP_AP_ACTIVE, + CPUHP_ONLINE, +}; + +int __cpuhp_setup_state(enum cpuhp_state state, const char *name, bool invoke, + int (*startup)(unsigned int cpu), + int (*teardown)(unsigned int cpu), bool multi_instance); + +int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state, const char *name, + bool invoke, + int (*startup)(unsigned int cpu), + int (*teardown)(unsigned int cpu), + bool multi_instance); +/** + * cpuhp_setup_state - Setup hotplug state callbacks with calling the callbacks + * @state: The state for which the calls are installed + * @name: Name of the callback (will be used in debug output) + * @startup: startup callback function + * @teardown: teardown callback function + * + * Installs the callback functions and invokes the startup callback on + * the present cpus which have already reached the @state. + */ +static inline int cpuhp_setup_state(enum cpuhp_state state, + const char *name, + int (*startup)(unsigned int cpu), + int (*teardown)(unsigned int cpu)) +{ + return __cpuhp_setup_state(state, name, true, startup, teardown, false); +} + +static inline int cpuhp_setup_state_cpuslocked(enum cpuhp_state state, + const char *name, + int (*startup)(unsigned int cpu), + int (*teardown)(unsigned int cpu)) +{ + return __cpuhp_setup_state_cpuslocked(state, name, true, startup, + teardown, false); +} + +/** + * cpuhp_setup_state_nocalls - Setup hotplug state callbacks without calling the + * callbacks + * @state: The state for which the calls are installed + * @name: Name of the callback. + * @startup: startup callback function + * @teardown: teardown callback function + * + * Same as @cpuhp_setup_state except that no calls are executed are invoked + * during installation of this callback. NOP if SMP=n or HOTPLUG_CPU=n. + */ +static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state, + const char *name, + int (*startup)(unsigned int cpu), + int (*teardown)(unsigned int cpu)) +{ + return __cpuhp_setup_state(state, name, false, startup, teardown, + false); +} + +static inline int cpuhp_setup_state_nocalls_cpuslocked(enum cpuhp_state state, + const char *name, + int (*startup)(unsigned int cpu), + int (*teardown)(unsigned int cpu)) +{ + return __cpuhp_setup_state_cpuslocked(state, name, false, startup, + teardown, false); +} + +/** + * cpuhp_setup_state_multi - Add callbacks for multi state + * @state: The state for which the calls are installed + * @name: Name of the callback. + * @startup: startup callback function + * @teardown: teardown callback function + * + * Sets the internal multi_instance flag and prepares a state to work as a multi + * instance callback. No callbacks are invoked at this point. The callbacks are + * invoked once an instance for this state are registered via + * @cpuhp_state_add_instance or @cpuhp_state_add_instance_nocalls. + */ +static inline int cpuhp_setup_state_multi(enum cpuhp_state state, + const char *name, + int (*startup)(unsigned int cpu, + struct hlist_node *node), + int (*teardown)(unsigned int cpu, + struct hlist_node *node)) +{ + return __cpuhp_setup_state(state, name, false, + (void *) startup, + (void *) teardown, true); +} + +int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node, + bool invoke); +int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state, + struct hlist_node *node, bool invoke); + +/** + * cpuhp_state_add_instance - Add an instance for a state and invoke startup + * callback. + * @state: The state for which the instance is installed + * @node: The node for this individual state. + * + * Installs the instance for the @state and invokes the startup callback on + * the present cpus which have already reached the @state. The @state must have + * been earlier marked as multi-instance by @cpuhp_setup_state_multi. + */ +static inline int cpuhp_state_add_instance(enum cpuhp_state state, + struct hlist_node *node) +{ + return __cpuhp_state_add_instance(state, node, true); +} + +/** + * cpuhp_state_add_instance_nocalls - Add an instance for a state without + * invoking the startup callback. + * @state: The state for which the instance is installed + * @node: The node for this individual state. + * + * Installs the instance for the @state The @state must have been earlier + * marked as multi-instance by @cpuhp_setup_state_multi. + */ +static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state, + struct hlist_node *node) +{ + return __cpuhp_state_add_instance(state, node, false); +} + +static inline int +cpuhp_state_add_instance_nocalls_cpuslocked(enum cpuhp_state state, + struct hlist_node *node) +{ + return __cpuhp_state_add_instance_cpuslocked(state, node, false); +} + +void __cpuhp_remove_state(enum cpuhp_state state, bool invoke); +void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke); + +/** + * cpuhp_remove_state - Remove hotplug state callbacks and invoke the teardown + * @state: The state for which the calls are removed + * + * Removes the callback functions and invokes the teardown callback on + * the present cpus which have already reached the @state. + */ +static inline void cpuhp_remove_state(enum cpuhp_state state) +{ + __cpuhp_remove_state(state, true); +} + +/** + * cpuhp_remove_state_nocalls - Remove hotplug state callbacks without invoking + * teardown + * @state: The state for which the calls are removed + */ +static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state) +{ + __cpuhp_remove_state(state, false); +} + +static inline void cpuhp_remove_state_nocalls_cpuslocked(enum cpuhp_state state) +{ + __cpuhp_remove_state_cpuslocked(state, false); +} + +/** + * cpuhp_remove_multi_state - Remove hotplug multi state callback + * @state: The state for which the calls are removed + * + * Removes the callback functions from a multi state. This is the reverse of + * cpuhp_setup_state_multi(). All instances should have been removed before + * invoking this function. + */ +static inline void cpuhp_remove_multi_state(enum cpuhp_state state) +{ + __cpuhp_remove_state(state, false); +} + +int __cpuhp_state_remove_instance(enum cpuhp_state state, + struct hlist_node *node, bool invoke); + +/** + * cpuhp_state_remove_instance - Remove hotplug instance from state and invoke + * the teardown callback + * @state: The state from which the instance is removed + * @node: The node for this individual state. + * + * Removes the instance and invokes the teardown callback on the present cpus + * which have already reached the @state. + */ +static inline int cpuhp_state_remove_instance(enum cpuhp_state state, + struct hlist_node *node) +{ + return __cpuhp_state_remove_instance(state, node, true); +} + +/** + * cpuhp_state_remove_instance_nocalls - Remove hotplug instance from state + * without invoking the reatdown callback + * @state: The state from which the instance is removed + * @node: The node for this individual state. + * + * Removes the instance without invoking the teardown callback. + */ +static inline int cpuhp_state_remove_instance_nocalls(enum cpuhp_state state, + struct hlist_node *node) +{ + return __cpuhp_state_remove_instance(state, node, false); +} + +#ifdef CONFIG_SMP +void cpuhp_online_idle(enum cpuhp_state state); +#else +static inline void cpuhp_online_idle(enum cpuhp_state state) { } +#endif + +#endif diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h new file mode 100644 index 000000000..317aecaed --- /dev/null +++ b/include/linux/cpuidle.h @@ -0,0 +1,294 @@ +/* + * cpuidle.h - a generic framework for CPU idle power management + * + * (C) 2007 Venkatesh Pallipadi + * Shaohua Li + * Adam Belay + * + * This code is licenced under the GPL. + */ + +#ifndef _LINUX_CPUIDLE_H +#define _LINUX_CPUIDLE_H + +#include +#include +#include + +#define CPUIDLE_STATE_MAX 10 +#define CPUIDLE_NAME_LEN 16 +#define CPUIDLE_DESC_LEN 32 + +struct module; + +struct cpuidle_device; +struct cpuidle_driver; + + +/**************************** + * CPUIDLE DEVICE INTERFACE * + ****************************/ + +struct cpuidle_state_usage { + unsigned long long disable; + unsigned long long usage; + unsigned long long time; /* in US */ +#ifdef CONFIG_SUSPEND + unsigned long long s2idle_usage; + unsigned long long s2idle_time; /* in US */ +#endif +}; + +struct cpuidle_state { + char name[CPUIDLE_NAME_LEN]; + char desc[CPUIDLE_DESC_LEN]; + + unsigned int flags; + unsigned int exit_latency; /* in US */ + int power_usage; /* in mW */ + unsigned int target_residency; /* in US */ + bool disabled; /* disabled on all CPUs */ + + int (*enter) (struct cpuidle_device *dev, + struct cpuidle_driver *drv, + int index); + + int (*enter_dead) (struct cpuidle_device *dev, int index); + + /* + * CPUs execute ->enter_s2idle with the local tick or entire timekeeping + * suspended, so it must not re-enable interrupts at any point (even + * temporarily) or attempt to change states of clock event devices. + */ + void (*enter_s2idle) (struct cpuidle_device *dev, + struct cpuidle_driver *drv, + int index); +}; + +/* Idle State Flags */ +#define CPUIDLE_FLAG_NONE (0x00) +#define CPUIDLE_FLAG_POLLING (0x01) /* polling state */ +#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */ +#define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */ + +#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) + +struct cpuidle_device_kobj; +struct cpuidle_state_kobj; +struct cpuidle_driver_kobj; + +struct cpuidle_device { + unsigned int registered:1; + unsigned int enabled:1; + unsigned int use_deepest_state:1; + unsigned int poll_time_limit:1; + unsigned int cpu; + + int last_residency; + struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX]; + struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX]; + struct cpuidle_driver_kobj *kobj_driver; + struct cpuidle_device_kobj *kobj_dev; + struct list_head device_list; + +#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED + cpumask_t coupled_cpus; + struct cpuidle_coupled *coupled; +#endif +}; + +DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); +DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev); + +/** + * cpuidle_get_last_residency - retrieves the last state's residency time + * @dev: the target CPU + */ +static inline int cpuidle_get_last_residency(struct cpuidle_device *dev) +{ + return dev->last_residency; +} + + +/**************************** + * CPUIDLE DRIVER INTERFACE * + ****************************/ + +struct cpuidle_driver { + const char *name; + struct module *owner; + int refcnt; + + /* used by the cpuidle framework to setup the broadcast timer */ + unsigned int bctimer:1; + /* states array must be ordered in decreasing power consumption */ + struct cpuidle_state states[CPUIDLE_STATE_MAX]; + int state_count; + int safe_state_index; + + /* the driver handles the cpus in cpumask */ + struct cpumask *cpumask; +}; + +#ifdef CONFIG_CPU_IDLE +extern void disable_cpuidle(void); +extern bool cpuidle_not_available(struct cpuidle_driver *drv, + struct cpuidle_device *dev); + +extern int cpuidle_select(struct cpuidle_driver *drv, + struct cpuidle_device *dev, + bool *stop_tick); +extern int cpuidle_enter(struct cpuidle_driver *drv, + struct cpuidle_device *dev, int index); +extern void cpuidle_reflect(struct cpuidle_device *dev, int index); + +extern int cpuidle_register_driver(struct cpuidle_driver *drv); +extern struct cpuidle_driver *cpuidle_get_driver(void); +extern struct cpuidle_driver *cpuidle_driver_ref(void); +extern void cpuidle_driver_unref(void); +extern void cpuidle_unregister_driver(struct cpuidle_driver *drv); +extern int cpuidle_register_device(struct cpuidle_device *dev); +extern void cpuidle_unregister_device(struct cpuidle_device *dev); +extern int cpuidle_register(struct cpuidle_driver *drv, + const struct cpumask *const coupled_cpus); +extern void cpuidle_unregister(struct cpuidle_driver *drv); +extern void cpuidle_pause_and_lock(void); +extern void cpuidle_resume_and_unlock(void); +extern void cpuidle_pause(void); +extern void cpuidle_resume(void); +extern int cpuidle_enable_device(struct cpuidle_device *dev); +extern void cpuidle_disable_device(struct cpuidle_device *dev); +extern int cpuidle_play_dead(void); + +extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); +static inline struct cpuidle_device *cpuidle_get_device(void) +{return __this_cpu_read(cpuidle_devices); } +#else +static inline void disable_cpuidle(void) { } +static inline bool cpuidle_not_available(struct cpuidle_driver *drv, + struct cpuidle_device *dev) +{return true; } +static inline int cpuidle_select(struct cpuidle_driver *drv, + struct cpuidle_device *dev, bool *stop_tick) +{return -ENODEV; } +static inline int cpuidle_enter(struct cpuidle_driver *drv, + struct cpuidle_device *dev, int index) +{return -ENODEV; } +static inline void cpuidle_reflect(struct cpuidle_device *dev, int index) { } +static inline int cpuidle_register_driver(struct cpuidle_driver *drv) +{return -ENODEV; } +static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; } +static inline struct cpuidle_driver *cpuidle_driver_ref(void) {return NULL; } +static inline void cpuidle_driver_unref(void) {} +static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { } +static inline int cpuidle_register_device(struct cpuidle_device *dev) +{return -ENODEV; } +static inline void cpuidle_unregister_device(struct cpuidle_device *dev) { } +static inline int cpuidle_register(struct cpuidle_driver *drv, + const struct cpumask *const coupled_cpus) +{return -ENODEV; } +static inline void cpuidle_unregister(struct cpuidle_driver *drv) { } +static inline void cpuidle_pause_and_lock(void) { } +static inline void cpuidle_resume_and_unlock(void) { } +static inline void cpuidle_pause(void) { } +static inline void cpuidle_resume(void) { } +static inline int cpuidle_enable_device(struct cpuidle_device *dev) +{return -ENODEV; } +static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } +static inline int cpuidle_play_dead(void) {return -ENODEV; } +static inline struct cpuidle_driver *cpuidle_get_cpu_driver( + struct cpuidle_device *dev) {return NULL; } +static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; } +#endif + +#ifdef CONFIG_CPU_IDLE +extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv, + struct cpuidle_device *dev); +extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv, + struct cpuidle_device *dev); +extern void cpuidle_use_deepest_state(bool enable); +#else +static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, + struct cpuidle_device *dev) +{return -ENODEV; } +static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv, + struct cpuidle_device *dev) +{return -ENODEV; } +static inline void cpuidle_use_deepest_state(bool enable) +{ +} +#endif + +/* kernel/sched/idle.c */ +extern void sched_idle_set_state(struct cpuidle_state *idle_state); +extern void default_idle_call(void); + +#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED +void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a); +#else +static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a) +{ +} +#endif + +#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_ARCH_HAS_CPU_RELAX) +void cpuidle_poll_state_init(struct cpuidle_driver *drv); +#else +static inline void cpuidle_poll_state_init(struct cpuidle_driver *drv) {} +#endif + +/****************************** + * CPUIDLE GOVERNOR INTERFACE * + ******************************/ + +struct cpuidle_governor { + char name[CPUIDLE_NAME_LEN]; + struct list_head governor_list; + unsigned int rating; + + int (*enable) (struct cpuidle_driver *drv, + struct cpuidle_device *dev); + void (*disable) (struct cpuidle_driver *drv, + struct cpuidle_device *dev); + + int (*select) (struct cpuidle_driver *drv, + struct cpuidle_device *dev, + bool *stop_tick); + void (*reflect) (struct cpuidle_device *dev, int index); +}; + +#ifdef CONFIG_CPU_IDLE +extern int cpuidle_register_governor(struct cpuidle_governor *gov); +extern int cpuidle_governor_latency_req(unsigned int cpu); +#else +static inline int cpuidle_register_governor(struct cpuidle_governor *gov) +{return 0;} +#endif + +#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, is_retention) \ +({ \ + int __ret = 0; \ + \ + if (!idx) { \ + cpu_do_idle(); \ + return idx; \ + } \ + \ + if (!is_retention) \ + __ret = cpu_pm_enter(); \ + if (!__ret) { \ + __ret = low_level_idle_enter(idx); \ + if (!is_retention) \ + cpu_pm_exit(); \ + } \ + \ + __ret ? -1 : idx; \ +}) + +#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \ + __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 0) + +#define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \ + __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 1) + +#endif /* _LINUX_CPUIDLE_H */ diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h new file mode 100644 index 000000000..147bdec42 --- /dev/null +++ b/include/linux/cpumask.h @@ -0,0 +1,921 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_CPUMASK_H +#define __LINUX_CPUMASK_H + +/* + * Cpumasks provide a bitmap suitable for representing the + * set of CPU's in a system, one bit position per CPU number. In general, + * only nr_cpu_ids (<= NR_CPUS) bits are valid. + */ +#include +#include +#include +#include + +/* Don't assign or return these: may not be this big! */ +typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; + +/** + * cpumask_bits - get the bits in a cpumask + * @maskp: the struct cpumask * + * + * You should only assume nr_cpu_ids bits of this mask are valid. This is + * a macro so it's const-correct. + */ +#define cpumask_bits(maskp) ((maskp)->bits) + +/** + * cpumask_pr_args - printf args to output a cpumask + * @maskp: cpumask to be printed + * + * Can be used to provide arguments for '%*pb[l]' when printing a cpumask. + */ +#define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp) + +#if NR_CPUS == 1 +#define nr_cpu_ids 1U +#else +extern unsigned int nr_cpu_ids; +#endif + +#ifdef CONFIG_CPUMASK_OFFSTACK +/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also, + * not all bits may be allocated. */ +#define nr_cpumask_bits nr_cpu_ids +#else +#define nr_cpumask_bits ((unsigned int)NR_CPUS) +#endif + +/* + * The following particular system cpumasks and operations manage + * possible, present, active and online cpus. + * + * cpu_possible_mask- has bit 'cpu' set iff cpu is populatable + * cpu_present_mask - has bit 'cpu' set iff cpu is populated + * cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler + * cpu_active_mask - has bit 'cpu' set iff cpu available to migration + * + * If !CONFIG_HOTPLUG_CPU, present == possible, and active == online. + * + * The cpu_possible_mask is fixed at boot time, as the set of CPU id's + * that it is possible might ever be plugged in at anytime during the + * life of that system boot. The cpu_present_mask is dynamic(*), + * representing which CPUs are currently plugged in. And + * cpu_online_mask is the dynamic subset of cpu_present_mask, + * indicating those CPUs available for scheduling. + * + * If HOTPLUG is enabled, then cpu_possible_mask is forced to have + * all NR_CPUS bits set, otherwise it is just the set of CPUs that + * ACPI reports present at boot. + * + * If HOTPLUG is enabled, then cpu_present_mask varies dynamically, + * depending on what ACPI reports as currently plugged in, otherwise + * cpu_present_mask is just a copy of cpu_possible_mask. + * + * (*) Well, cpu_present_mask is dynamic in the hotplug case. If not + * hotplug, it's a copy of cpu_possible_mask, hence fixed at boot. + * + * Subtleties: + * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode + * assumption that their single CPU is online. The UP + * cpu_{online,possible,present}_masks are placebos. Changing them + * will have no useful affect on the following num_*_cpus() + * and cpu_*() macros in the UP case. This ugliness is a UP + * optimization - don't waste any instructions or memory references + * asking if you're online or how many CPUs there are if there is + * only one CPU. + */ + +extern struct cpumask __cpu_possible_mask; +extern struct cpumask __cpu_online_mask; +extern struct cpumask __cpu_present_mask; +extern struct cpumask __cpu_active_mask; +#define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask) +#define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask) +#define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask) +#define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask) + +#if NR_CPUS > 1 +#define num_online_cpus() cpumask_weight(cpu_online_mask) +#define num_possible_cpus() cpumask_weight(cpu_possible_mask) +#define num_present_cpus() cpumask_weight(cpu_present_mask) +#define num_active_cpus() cpumask_weight(cpu_active_mask) +#define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask) +#define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask) +#define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask) +#define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask) +#else +#define num_online_cpus() 1U +#define num_possible_cpus() 1U +#define num_present_cpus() 1U +#define num_active_cpus() 1U +#define cpu_online(cpu) ((cpu) == 0) +#define cpu_possible(cpu) ((cpu) == 0) +#define cpu_present(cpu) ((cpu) == 0) +#define cpu_active(cpu) ((cpu) == 0) +#endif + +static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits) +{ +#ifdef CONFIG_DEBUG_PER_CPU_MAPS + WARN_ON_ONCE(cpu >= bits); +#endif /* CONFIG_DEBUG_PER_CPU_MAPS */ +} + +/* verify cpu argument to cpumask_* operators */ +static inline unsigned int cpumask_check(unsigned int cpu) +{ + cpu_max_bits_warn(cpu, nr_cpumask_bits); + return cpu; +} + +#if NR_CPUS == 1 +/* Uniprocessor. Assume all masks are "1". */ +static inline unsigned int cpumask_first(const struct cpumask *srcp) +{ + return 0; +} + +static inline unsigned int cpumask_last(const struct cpumask *srcp) +{ + return 0; +} + +/* Valid inputs for n are -1 and 0. */ +static inline unsigned int cpumask_next(int n, const struct cpumask *srcp) +{ + return n+1; +} + +static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) +{ + return n+1; +} + +static inline unsigned int cpumask_next_and(int n, + const struct cpumask *srcp, + const struct cpumask *andp) +{ + return n+1; +} + +static inline unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, + int start, bool wrap) +{ + /* cpu0 unless stop condition, wrap and at cpu0, then nr_cpumask_bits */ + return (wrap && n == 0); +} + +/* cpu must be a valid cpu, ie 0, so there's no other choice. */ +static inline unsigned int cpumask_any_but(const struct cpumask *mask, + unsigned int cpu) +{ + return 1; +} + +static inline unsigned int cpumask_local_spread(unsigned int i, int node) +{ + return 0; +} + +#define for_each_cpu(cpu, mask) \ + for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) +#define for_each_cpu_not(cpu, mask) \ + for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) +#define for_each_cpu_wrap(cpu, mask, start) \ + for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start)) +#define for_each_cpu_and(cpu, mask, and) \ + for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and) +#else +/** + * cpumask_first - get the first cpu in a cpumask + * @srcp: the cpumask pointer + * + * Returns >= nr_cpu_ids if no cpus set. + */ +static inline unsigned int cpumask_first(const struct cpumask *srcp) +{ + return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits); +} + +/** + * cpumask_last - get the last CPU in a cpumask + * @srcp: - the cpumask pointer + * + * Returns >= nr_cpumask_bits if no CPUs set. + */ +static inline unsigned int cpumask_last(const struct cpumask *srcp) +{ + return find_last_bit(cpumask_bits(srcp), nr_cpumask_bits); +} + +unsigned int cpumask_next(int n, const struct cpumask *srcp); + +/** + * cpumask_next_zero - get the next unset cpu in a cpumask + * @n: the cpu prior to the place to search (ie. return will be > @n) + * @srcp: the cpumask pointer + * + * Returns >= nr_cpu_ids if no further cpus unset. + */ +static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) +{ + /* -1 is a legal arg here. */ + if (n != -1) + cpumask_check(n); + return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); +} + +int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); +int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); +unsigned int cpumask_local_spread(unsigned int i, int node); + +/** + * for_each_cpu - iterate over every cpu in a mask + * @cpu: the (optionally unsigned) integer iterator + * @mask: the cpumask pointer + * + * After the loop, cpu is >= nr_cpu_ids. + */ +#define for_each_cpu(cpu, mask) \ + for ((cpu) = -1; \ + (cpu) = cpumask_next((cpu), (mask)), \ + (cpu) < nr_cpu_ids;) + +/** + * for_each_cpu_not - iterate over every cpu in a complemented mask + * @cpu: the (optionally unsigned) integer iterator + * @mask: the cpumask pointer + * + * After the loop, cpu is >= nr_cpu_ids. + */ +#define for_each_cpu_not(cpu, mask) \ + for ((cpu) = -1; \ + (cpu) = cpumask_next_zero((cpu), (mask)), \ + (cpu) < nr_cpu_ids;) + +extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap); + +/** + * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location + * @cpu: the (optionally unsigned) integer iterator + * @mask: the cpumask poiter + * @start: the start location + * + * The implementation does not assume any bit in @mask is set (including @start). + * + * After the loop, cpu is >= nr_cpu_ids. + */ +#define for_each_cpu_wrap(cpu, mask, start) \ + for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \ + (cpu) < nr_cpumask_bits; \ + (cpu) = cpumask_next_wrap((cpu), (mask), (start), true)) + +/** + * for_each_cpu_and - iterate over every cpu in both masks + * @cpu: the (optionally unsigned) integer iterator + * @mask: the first cpumask pointer + * @and: the second cpumask pointer + * + * This saves a temporary CPU mask in many places. It is equivalent to: + * struct cpumask tmp; + * cpumask_and(&tmp, &mask, &and); + * for_each_cpu(cpu, &tmp) + * ... + * + * After the loop, cpu is >= nr_cpu_ids. + */ +#define for_each_cpu_and(cpu, mask, and) \ + for ((cpu) = -1; \ + (cpu) = cpumask_next_and((cpu), (mask), (and)), \ + (cpu) < nr_cpu_ids;) +#endif /* SMP */ + +#define CPU_BITS_NONE \ +{ \ + [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ +} + +#define CPU_BITS_CPU0 \ +{ \ + [0] = 1UL \ +} + +/** + * cpumask_set_cpu - set a cpu in a cpumask + * @cpu: cpu number (< nr_cpu_ids) + * @dstp: the cpumask pointer + */ +static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) +{ + set_bit(cpumask_check(cpu), cpumask_bits(dstp)); +} + +static inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) +{ + __set_bit(cpumask_check(cpu), cpumask_bits(dstp)); +} + + +/** + * cpumask_clear_cpu - clear a cpu in a cpumask + * @cpu: cpu number (< nr_cpu_ids) + * @dstp: the cpumask pointer + */ +static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp) +{ + clear_bit(cpumask_check(cpu), cpumask_bits(dstp)); +} + +static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp) +{ + __clear_bit(cpumask_check(cpu), cpumask_bits(dstp)); +} + +/** + * cpumask_test_cpu - test for a cpu in a cpumask + * @cpu: cpu number (< nr_cpu_ids) + * @cpumask: the cpumask pointer + * + * Returns 1 if @cpu is set in @cpumask, else returns 0 + */ +static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask) +{ + return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); +} + +/** + * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask + * @cpu: cpu number (< nr_cpu_ids) + * @cpumask: the cpumask pointer + * + * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0 + * + * test_and_set_bit wrapper for cpumasks. + */ +static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask) +{ + return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask)); +} + +/** + * cpumask_test_and_clear_cpu - atomically test and clear a cpu in a cpumask + * @cpu: cpu number (< nr_cpu_ids) + * @cpumask: the cpumask pointer + * + * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0 + * + * test_and_clear_bit wrapper for cpumasks. + */ +static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask) +{ + return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask)); +} + +/** + * cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask + * @dstp: the cpumask pointer + */ +static inline void cpumask_setall(struct cpumask *dstp) +{ + bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits); +} + +/** + * cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask + * @dstp: the cpumask pointer + */ +static inline void cpumask_clear(struct cpumask *dstp) +{ + bitmap_zero(cpumask_bits(dstp), nr_cpumask_bits); +} + +/** + * cpumask_and - *dstp = *src1p & *src2p + * @dstp: the cpumask result + * @src1p: the first input + * @src2p: the second input + * + * If *@dstp is empty, returns 0, else returns 1 + */ +static inline int cpumask_and(struct cpumask *dstp, + const struct cpumask *src1p, + const struct cpumask *src2p) +{ + return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p), + cpumask_bits(src2p), nr_cpumask_bits); +} + +/** + * cpumask_or - *dstp = *src1p | *src2p + * @dstp: the cpumask result + * @src1p: the first input + * @src2p: the second input + */ +static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p, + const struct cpumask *src2p) +{ + bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p), + cpumask_bits(src2p), nr_cpumask_bits); +} + +/** + * cpumask_xor - *dstp = *src1p ^ *src2p + * @dstp: the cpumask result + * @src1p: the first input + * @src2p: the second input + */ +static inline void cpumask_xor(struct cpumask *dstp, + const struct cpumask *src1p, + const struct cpumask *src2p) +{ + bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p), + cpumask_bits(src2p), nr_cpumask_bits); +} + +/** + * cpumask_andnot - *dstp = *src1p & ~*src2p + * @dstp: the cpumask result + * @src1p: the first input + * @src2p: the second input + * + * If *@dstp is empty, returns 0, else returns 1 + */ +static inline int cpumask_andnot(struct cpumask *dstp, + const struct cpumask *src1p, + const struct cpumask *src2p) +{ + return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p), + cpumask_bits(src2p), nr_cpumask_bits); +} + +/** + * cpumask_complement - *dstp = ~*srcp + * @dstp: the cpumask result + * @srcp: the input to invert + */ +static inline void cpumask_complement(struct cpumask *dstp, + const struct cpumask *srcp) +{ + bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp), + nr_cpumask_bits); +} + +/** + * cpumask_equal - *src1p == *src2p + * @src1p: the first input + * @src2p: the second input + */ +static inline bool cpumask_equal(const struct cpumask *src1p, + const struct cpumask *src2p) +{ + return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p), + nr_cpumask_bits); +} + +/** + * cpumask_intersects - (*src1p & *src2p) != 0 + * @src1p: the first input + * @src2p: the second input + */ +static inline bool cpumask_intersects(const struct cpumask *src1p, + const struct cpumask *src2p) +{ + return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p), + nr_cpumask_bits); +} + +/** + * cpumask_subset - (*src1p & ~*src2p) == 0 + * @src1p: the first input + * @src2p: the second input + * + * Returns 1 if *@src1p is a subset of *@src2p, else returns 0 + */ +static inline int cpumask_subset(const struct cpumask *src1p, + const struct cpumask *src2p) +{ + return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p), + nr_cpumask_bits); +} + +/** + * cpumask_empty - *srcp == 0 + * @srcp: the cpumask to that all cpus < nr_cpu_ids are clear. + */ +static inline bool cpumask_empty(const struct cpumask *srcp) +{ + return bitmap_empty(cpumask_bits(srcp), nr_cpumask_bits); +} + +/** + * cpumask_full - *srcp == 0xFFFFFFFF... + * @srcp: the cpumask to that all cpus < nr_cpu_ids are set. + */ +static inline bool cpumask_full(const struct cpumask *srcp) +{ + return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits); +} + +/** + * cpumask_weight - Count of bits in *srcp + * @srcp: the cpumask to count bits (< nr_cpu_ids) in. + */ +static inline unsigned int cpumask_weight(const struct cpumask *srcp) +{ + return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits); +} + +/** + * cpumask_shift_right - *dstp = *srcp >> n + * @dstp: the cpumask result + * @srcp: the input to shift + * @n: the number of bits to shift by + */ +static inline void cpumask_shift_right(struct cpumask *dstp, + const struct cpumask *srcp, int n) +{ + bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n, + nr_cpumask_bits); +} + +/** + * cpumask_shift_left - *dstp = *srcp << n + * @dstp: the cpumask result + * @srcp: the input to shift + * @n: the number of bits to shift by + */ +static inline void cpumask_shift_left(struct cpumask *dstp, + const struct cpumask *srcp, int n) +{ + bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n, + nr_cpumask_bits); +} + +/** + * cpumask_copy - *dstp = *srcp + * @dstp: the result + * @srcp: the input cpumask + */ +static inline void cpumask_copy(struct cpumask *dstp, + const struct cpumask *srcp) +{ + bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits); +} + +/** + * cpumask_any - pick a "random" cpu from *srcp + * @srcp: the input cpumask + * + * Returns >= nr_cpu_ids if no cpus set. + */ +#define cpumask_any(srcp) cpumask_first(srcp) + +/** + * cpumask_first_and - return the first cpu from *srcp1 & *srcp2 + * @src1p: the first input + * @src2p: the second input + * + * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and(). + */ +#define cpumask_first_and(src1p, src2p) cpumask_next_and(-1, (src1p), (src2p)) + +/** + * cpumask_any_and - pick a "random" cpu from *mask1 & *mask2 + * @mask1: the first input cpumask + * @mask2: the second input cpumask + * + * Returns >= nr_cpu_ids if no cpus set. + */ +#define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2)) + +/** + * cpumask_of - the cpumask containing just a given cpu + * @cpu: the cpu (<= nr_cpu_ids) + */ +#define cpumask_of(cpu) (get_cpu_mask(cpu)) + +/** + * cpumask_parse_user - extract a cpumask from a user string + * @buf: the buffer to extract from + * @len: the length of the buffer + * @dstp: the cpumask to set. + * + * Returns -errno, or 0 for success. + */ +static inline int cpumask_parse_user(const char __user *buf, int len, + struct cpumask *dstp) +{ + return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits); +} + +/** + * cpumask_parselist_user - extract a cpumask from a user string + * @buf: the buffer to extract from + * @len: the length of the buffer + * @dstp: the cpumask to set. + * + * Returns -errno, or 0 for success. + */ +static inline int cpumask_parselist_user(const char __user *buf, int len, + struct cpumask *dstp) +{ + return bitmap_parselist_user(buf, len, cpumask_bits(dstp), + nr_cpumask_bits); +} + +/** + * cpumask_parse - extract a cpumask from a string + * @buf: the buffer to extract from + * @dstp: the cpumask to set. + * + * Returns -errno, or 0 for success. + */ +static inline int cpumask_parse(const char *buf, struct cpumask *dstp) +{ + char *nl = strchr(buf, '\n'); + unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf); + + return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits); +} + +/** + * cpulist_parse - extract a cpumask from a user string of ranges + * @buf: the buffer to extract from + * @dstp: the cpumask to set. + * + * Returns -errno, or 0 for success. + */ +static inline int cpulist_parse(const char *buf, struct cpumask *dstp) +{ + return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits); +} + +/** + * cpumask_size - size to allocate for a 'struct cpumask' in bytes + */ +static inline unsigned int cpumask_size(void) +{ + return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long); +} + +/* + * cpumask_var_t: struct cpumask for stack usage. + * + * Oh, the wicked games we play! In order to make kernel coding a + * little more difficult, we typedef cpumask_var_t to an array or a + * pointer: doing &mask on an array is a noop, so it still works. + * + * ie. + * cpumask_var_t tmpmask; + * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) + * return -ENOMEM; + * + * ... use 'tmpmask' like a normal struct cpumask * ... + * + * free_cpumask_var(tmpmask); + * + * + * However, one notable exception is there. alloc_cpumask_var() allocates + * only nr_cpumask_bits bits (in the other hand, real cpumask_t always has + * NR_CPUS bits). Therefore you don't have to dereference cpumask_var_t. + * + * cpumask_var_t tmpmask; + * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) + * return -ENOMEM; + * + * var = *tmpmask; + * + * This code makes NR_CPUS length memcopy and brings to a memory corruption. + * cpumask_copy() provide safe copy functionality. + * + * Note that there is another evil here: If you define a cpumask_var_t + * as a percpu variable then the way to obtain the address of the cpumask + * structure differently influences what this_cpu_* operation needs to be + * used. Please use this_cpu_cpumask_var_t in those cases. The direct use + * of this_cpu_ptr() or this_cpu_read() will lead to failures when the + * other type of cpumask_var_t implementation is configured. + * + * Please also note that __cpumask_var_read_mostly can be used to declare + * a cpumask_var_t variable itself (not its content) as read mostly. + */ +#ifdef CONFIG_CPUMASK_OFFSTACK +typedef struct cpumask *cpumask_var_t; + +#define this_cpu_cpumask_var_ptr(x) this_cpu_read(x) +#define __cpumask_var_read_mostly __read_mostly + +bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); +bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); +bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); +bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); +void alloc_bootmem_cpumask_var(cpumask_var_t *mask); +void free_cpumask_var(cpumask_var_t mask); +void free_bootmem_cpumask_var(cpumask_var_t mask); + +static inline bool cpumask_available(cpumask_var_t mask) +{ + return mask != NULL; +} + +#else +typedef struct cpumask cpumask_var_t[1]; + +#define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x) +#define __cpumask_var_read_mostly + +static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) +{ + return true; +} + +static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, + int node) +{ + return true; +} + +static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) +{ + cpumask_clear(*mask); + return true; +} + +static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, + int node) +{ + cpumask_clear(*mask); + return true; +} + +static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask) +{ +} + +static inline void free_cpumask_var(cpumask_var_t mask) +{ +} + +static inline void free_bootmem_cpumask_var(cpumask_var_t mask) +{ +} + +static inline bool cpumask_available(cpumask_var_t mask) +{ + return true; +} +#endif /* CONFIG_CPUMASK_OFFSTACK */ + +/* It's common to want to use cpu_all_mask in struct member initializers, + * so it has to refer to an address rather than a pointer. */ +extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); +#define cpu_all_mask to_cpumask(cpu_all_bits) + +/* First bits of cpu_bit_bitmap are in fact unset. */ +#define cpu_none_mask to_cpumask(cpu_bit_bitmap[0]) + +#define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask) +#define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask) +#define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask) + +/* Wrappers for arch boot code to manipulate normally-constant masks */ +void init_cpu_present(const struct cpumask *src); +void init_cpu_possible(const struct cpumask *src); +void init_cpu_online(const struct cpumask *src); + +static inline void reset_cpu_possible_mask(void) +{ + bitmap_zero(cpumask_bits(&__cpu_possible_mask), NR_CPUS); +} + +static inline void +set_cpu_possible(unsigned int cpu, bool possible) +{ + if (possible) + cpumask_set_cpu(cpu, &__cpu_possible_mask); + else + cpumask_clear_cpu(cpu, &__cpu_possible_mask); +} + +static inline void +set_cpu_present(unsigned int cpu, bool present) +{ + if (present) + cpumask_set_cpu(cpu, &__cpu_present_mask); + else + cpumask_clear_cpu(cpu, &__cpu_present_mask); +} + +static inline void +set_cpu_online(unsigned int cpu, bool online) +{ + if (online) + cpumask_set_cpu(cpu, &__cpu_online_mask); + else + cpumask_clear_cpu(cpu, &__cpu_online_mask); +} + +static inline void +set_cpu_active(unsigned int cpu, bool active) +{ + if (active) + cpumask_set_cpu(cpu, &__cpu_active_mask); + else + cpumask_clear_cpu(cpu, &__cpu_active_mask); +} + + +/** + * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * + * @bitmap: the bitmap + * + * There are a few places where cpumask_var_t isn't appropriate and + * static cpumasks must be used (eg. very early boot), yet we don't + * expose the definition of 'struct cpumask'. + * + * This does the conversion, and can be used as a constant initializer. + */ +#define to_cpumask(bitmap) \ + ((struct cpumask *)(1 ? (bitmap) \ + : (void *)sizeof(__check_is_bitmap(bitmap)))) + +static inline int __check_is_bitmap(const unsigned long *bitmap) +{ + return 1; +} + +/* + * Special-case data structure for "single bit set only" constant CPU masks. + * + * We pre-generate all the 64 (or 32) possible bit positions, with enough + * padding to the left and the right, and return the constant pointer + * appropriately offset. + */ +extern const unsigned long + cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; + +static inline const struct cpumask *get_cpu_mask(unsigned int cpu) +{ + const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; + p -= cpu / BITS_PER_LONG; + return to_cpumask(p); +} + +#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) + +#if NR_CPUS <= BITS_PER_LONG +#define CPU_BITS_ALL \ +{ \ + [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ +} + +#else /* NR_CPUS > BITS_PER_LONG */ + +#define CPU_BITS_ALL \ +{ \ + [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ + [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ +} +#endif /* NR_CPUS > BITS_PER_LONG */ + +/** + * cpumap_print_to_pagebuf - copies the cpumask into the buffer either + * as comma-separated list of cpus or hex values of cpumask + * @list: indicates whether the cpumap must be list + * @mask: the cpumask to copy + * @buf: the buffer to copy into + * + * Returns the length of the (null-terminated) @buf string, zero if + * nothing is copied. + */ +static inline ssize_t +cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask) +{ + return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask), + nr_cpu_ids); +} + +#if NR_CPUS <= BITS_PER_LONG +#define CPU_MASK_ALL \ +(cpumask_t) { { \ + [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ +} } +#else +#define CPU_MASK_ALL \ +(cpumask_t) { { \ + [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ + [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ +} } +#endif /* NR_CPUS > BITS_PER_LONG */ + +#define CPU_MASK_NONE \ +(cpumask_t) { { \ + [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ +} } + +#define CPU_MASK_CPU0 \ +(cpumask_t) { { \ + [0] = 1UL \ +} } + +#endif /* __LINUX_CPUMASK_H */ diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h new file mode 100644 index 000000000..934633a05 --- /dev/null +++ b/include/linux/cpuset.h @@ -0,0 +1,280 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CPUSET_H +#define _LINUX_CPUSET_H +/* + * cpuset interface + * + * Copyright (C) 2003 BULL SA + * Copyright (C) 2004-2006 Silicon Graphics, Inc. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_CPUSETS + +/* + * Static branch rewrites can happen in an arbitrary order for a given + * key. In code paths where we need to loop with read_mems_allowed_begin() and + * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need + * to ensure that begin() always gets rewritten before retry() in the + * disabled -> enabled transition. If not, then if local irqs are disabled + * around the loop, we can deadlock since retry() would always be + * comparing the latest value of the mems_allowed seqcount against 0 as + * begin() still would see cpusets_enabled() as false. The enabled -> disabled + * transition should happen in reverse order for the same reasons (want to stop + * looking at real value of mems_allowed.sequence in retry() first). + */ +extern struct static_key_false cpusets_pre_enable_key; +extern struct static_key_false cpusets_enabled_key; +static inline bool cpusets_enabled(void) +{ + return static_branch_unlikely(&cpusets_enabled_key); +} + +static inline void cpuset_inc(void) +{ + static_branch_inc(&cpusets_pre_enable_key); + static_branch_inc(&cpusets_enabled_key); +} + +static inline void cpuset_dec(void) +{ + static_branch_dec(&cpusets_enabled_key); + static_branch_dec(&cpusets_pre_enable_key); +} + +extern int cpuset_init(void); +extern void cpuset_init_smp(void); +extern void cpuset_force_rebuild(void); +extern void cpuset_update_active_cpus(void); +extern void cpuset_wait_for_hotplug(void); +extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); +extern void cpuset_cpus_allowed_fallback(struct task_struct *p); +extern nodemask_t cpuset_mems_allowed(struct task_struct *p); +#define cpuset_current_mems_allowed (current->mems_allowed) +void cpuset_init_current_mems_allowed(void); +int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); + +extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask); + +static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) +{ + if (cpusets_enabled()) + return __cpuset_node_allowed(node, gfp_mask); + return true; +} + +static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) +{ + return __cpuset_node_allowed(zone_to_nid(z), gfp_mask); +} + +static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) +{ + if (cpusets_enabled()) + return __cpuset_zone_allowed(z, gfp_mask); + return true; +} + +extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, + const struct task_struct *tsk2); + +#define cpuset_memory_pressure_bump() \ + do { \ + if (cpuset_memory_pressure_enabled) \ + __cpuset_memory_pressure_bump(); \ + } while (0) +extern int cpuset_memory_pressure_enabled; +extern void __cpuset_memory_pressure_bump(void); + +extern void cpuset_task_status_allowed(struct seq_file *m, + struct task_struct *task); +extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *tsk); + +extern int cpuset_mem_spread_node(void); +extern int cpuset_slab_spread_node(void); + +static inline int cpuset_do_page_mem_spread(void) +{ + return task_spread_page(current); +} + +static inline int cpuset_do_slab_mem_spread(void) +{ + return task_spread_slab(current); +} + +extern bool current_cpuset_is_being_rebound(void); + +extern void rebuild_sched_domains(void); + +extern void cpuset_print_current_mems_allowed(void); + +/* + * read_mems_allowed_begin is required when making decisions involving + * mems_allowed such as during page allocation. mems_allowed can be updated in + * parallel and depending on the new value an operation can fail potentially + * causing process failure. A retry loop with read_mems_allowed_begin and + * read_mems_allowed_retry prevents these artificial failures. + */ +static inline unsigned int read_mems_allowed_begin(void) +{ + if (!static_branch_unlikely(&cpusets_pre_enable_key)) + return 0; + + return read_seqcount_begin(¤t->mems_allowed_seq); +} + +/* + * If this returns true, the operation that took place after + * read_mems_allowed_begin may have failed artificially due to a concurrent + * update of mems_allowed. It is up to the caller to retry the operation if + * appropriate. + */ +static inline bool read_mems_allowed_retry(unsigned int seq) +{ + if (!static_branch_unlikely(&cpusets_enabled_key)) + return false; + + return read_seqcount_retry(¤t->mems_allowed_seq, seq); +} + +static inline void set_mems_allowed(nodemask_t nodemask) +{ + unsigned long flags; + + task_lock(current); + local_irq_save(flags); + write_seqcount_begin(¤t->mems_allowed_seq); + current->mems_allowed = nodemask; + write_seqcount_end(¤t->mems_allowed_seq); + local_irq_restore(flags); + task_unlock(current); +} + +#else /* !CONFIG_CPUSETS */ + +static inline bool cpusets_enabled(void) { return false; } + +static inline int cpuset_init(void) { return 0; } +static inline void cpuset_init_smp(void) {} + +static inline void cpuset_force_rebuild(void) { } + +static inline void cpuset_update_active_cpus(void) +{ + partition_sched_domains(1, NULL, NULL); +} + +static inline void cpuset_wait_for_hotplug(void) { } + +static inline void cpuset_cpus_allowed(struct task_struct *p, + struct cpumask *mask) +{ + cpumask_copy(mask, cpu_possible_mask); +} + +static inline void cpuset_cpus_allowed_fallback(struct task_struct *p) +{ +} + +static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) +{ + return node_possible_map; +} + +#define cpuset_current_mems_allowed (node_states[N_MEMORY]) +static inline void cpuset_init_current_mems_allowed(void) {} + +static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) +{ + return 1; +} + +static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) +{ + return true; +} + +static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) +{ + return true; +} + +static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) +{ + return true; +} + +static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, + const struct task_struct *tsk2) +{ + return 1; +} + +static inline void cpuset_memory_pressure_bump(void) {} + +static inline void cpuset_task_status_allowed(struct seq_file *m, + struct task_struct *task) +{ +} + +static inline int cpuset_mem_spread_node(void) +{ + return 0; +} + +static inline int cpuset_slab_spread_node(void) +{ + return 0; +} + +static inline int cpuset_do_page_mem_spread(void) +{ + return 0; +} + +static inline int cpuset_do_slab_mem_spread(void) +{ + return 0; +} + +static inline bool current_cpuset_is_being_rebound(void) +{ + return false; +} + +static inline void rebuild_sched_domains(void) +{ + partition_sched_domains(1, NULL, NULL); +} + +static inline void cpuset_print_current_mems_allowed(void) +{ +} + +static inline void set_mems_allowed(nodemask_t nodemask) +{ +} + +static inline unsigned int read_mems_allowed_begin(void) +{ + return 0; +} + +static inline bool read_mems_allowed_retry(unsigned int seq) +{ + return false; +} + +#endif /* !CONFIG_CPUSETS */ + +#endif /* _LINUX_CPUSET_H */ diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h new file mode 100644 index 000000000..525510a9f --- /dev/null +++ b/include/linux/crash_core.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_CRASH_CORE_H +#define LINUX_CRASH_CORE_H + +#include +#include +#include + +#define CRASH_CORE_NOTE_NAME "CORE" +#define CRASH_CORE_NOTE_HEAD_BYTES ALIGN(sizeof(struct elf_note), 4) +#define CRASH_CORE_NOTE_NAME_BYTES ALIGN(sizeof(CRASH_CORE_NOTE_NAME), 4) +#define CRASH_CORE_NOTE_DESC_BYTES ALIGN(sizeof(struct elf_prstatus), 4) + +/* + * The per-cpu notes area is a list of notes terminated by a "NULL" + * note header. For kdump, the code in vmcore.c runs in the context + * of the second kernel to combine them into one note. + */ +#define CRASH_CORE_NOTE_BYTES ((CRASH_CORE_NOTE_HEAD_BYTES * 2) + \ + CRASH_CORE_NOTE_NAME_BYTES + \ + CRASH_CORE_NOTE_DESC_BYTES) + +#define VMCOREINFO_BYTES PAGE_SIZE +#define VMCOREINFO_NOTE_NAME "VMCOREINFO" +#define VMCOREINFO_NOTE_NAME_BYTES ALIGN(sizeof(VMCOREINFO_NOTE_NAME), 4) +#define VMCOREINFO_NOTE_SIZE ((CRASH_CORE_NOTE_HEAD_BYTES * 2) + \ + VMCOREINFO_NOTE_NAME_BYTES + \ + VMCOREINFO_BYTES) + +typedef u32 note_buf_t[CRASH_CORE_NOTE_BYTES/4]; + +void crash_update_vmcoreinfo_safecopy(void *ptr); +void crash_save_vmcoreinfo(void); +void arch_crash_save_vmcoreinfo(void); +__printf(1, 2) +void vmcoreinfo_append_str(const char *fmt, ...); +phys_addr_t paddr_vmcoreinfo_note(void); + +#define VMCOREINFO_OSRELEASE(value) \ + vmcoreinfo_append_str("OSRELEASE=%s\n", value) +#define VMCOREINFO_PAGESIZE(value) \ + vmcoreinfo_append_str("PAGESIZE=%ld\n", value) +#define VMCOREINFO_SYMBOL(name) \ + vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name) +#define VMCOREINFO_SYMBOL_ARRAY(name) \ + vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)name) +#define VMCOREINFO_SIZE(name) \ + vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \ + (unsigned long)sizeof(name)) +#define VMCOREINFO_STRUCT_SIZE(name) \ + vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \ + (unsigned long)sizeof(struct name)) +#define VMCOREINFO_OFFSET(name, field) \ + vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #field, \ + (unsigned long)offsetof(struct name, field)) +#define VMCOREINFO_LENGTH(name, value) \ + vmcoreinfo_append_str("LENGTH(%s)=%lu\n", #name, (unsigned long)value) +#define VMCOREINFO_NUMBER(name) \ + vmcoreinfo_append_str("NUMBER(%s)=%ld\n", #name, (long)name) +#define VMCOREINFO_CONFIG(name) \ + vmcoreinfo_append_str("CONFIG_%s=y\n", #name) + +extern unsigned char *vmcoreinfo_data; +extern size_t vmcoreinfo_size; +extern u32 *vmcoreinfo_note; + +Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type, + void *data, size_t data_len); +void final_note(Elf_Word *buf); + +int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, + unsigned long long *crash_size, unsigned long long *crash_base); +int parse_crashkernel_high(char *cmdline, unsigned long long system_ram, + unsigned long long *crash_size, unsigned long long *crash_base); +int parse_crashkernel_low(char *cmdline, unsigned long long system_ram, + unsigned long long *crash_size, unsigned long long *crash_base); + +#endif /* LINUX_CRASH_CORE_H */ diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h new file mode 100644 index 000000000..3e4ba9d75 --- /dev/null +++ b/include/linux/crash_dump.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_CRASH_DUMP_H +#define LINUX_CRASH_DUMP_H + +#include +#include +#include +#include + +#include /* for pgprot_t */ + +#ifdef CONFIG_CRASH_DUMP +#define ELFCORE_ADDR_MAX (-1ULL) +#define ELFCORE_ADDR_ERR (-2ULL) + +extern unsigned long long elfcorehdr_addr; +extern unsigned long long elfcorehdr_size; + +extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size); +extern void elfcorehdr_free(unsigned long long addr); +extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos); +extern ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos); +extern int remap_oldmem_pfn_range(struct vm_area_struct *vma, + unsigned long from, unsigned long pfn, + unsigned long size, pgprot_t prot); + +extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, + unsigned long, int); +void vmcore_cleanup(void); + +/* Architecture code defines this if there are other possible ELF + * machine types, e.g. on bi-arch capable hardware. */ +#ifndef vmcore_elf_check_arch_cross +#define vmcore_elf_check_arch_cross(x) 0 +#endif + +/* + * Architecture code can redefine this if there are any special checks + * needed for 32-bit ELF or 64-bit ELF vmcores. In case of 32-bit + * only architecture, vmcore_elf64_check_arch can be set to zero. + */ +#ifndef vmcore_elf32_check_arch +#define vmcore_elf32_check_arch(x) elf_check_arch(x) +#endif + +#ifndef vmcore_elf64_check_arch +#define vmcore_elf64_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x)) +#endif + +/* + * is_kdump_kernel() checks whether this kernel is booting after a panic of + * previous kernel or not. This is determined by checking if previous kernel + * has passed the elf core header address on command line. + * + * This is not just a test if CONFIG_CRASH_DUMP is enabled or not. It will + * return true if CONFIG_CRASH_DUMP=y and if kernel is booting after a panic + * of previous kernel. + */ + +static inline bool is_kdump_kernel(void) +{ + return elfcorehdr_addr != ELFCORE_ADDR_MAX; +} + +/* is_vmcore_usable() checks if the kernel is booting after a panic and + * the vmcore region is usable. + * + * This makes use of the fact that due to alignment -2ULL is not + * a valid pointer, much in the vain of IS_ERR(), except + * dealing directly with an unsigned long long rather than a pointer. + */ + +static inline int is_vmcore_usable(void) +{ + return is_kdump_kernel() && elfcorehdr_addr != ELFCORE_ADDR_ERR ? 1 : 0; +} + +/* vmcore_unusable() marks the vmcore as unusable, + * without disturbing the logic of is_kdump_kernel() + */ + +static inline void vmcore_unusable(void) +{ + if (is_kdump_kernel()) + elfcorehdr_addr = ELFCORE_ADDR_ERR; +} + +#define HAVE_OLDMEM_PFN_IS_RAM 1 +extern int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn)); +extern void unregister_oldmem_pfn_is_ram(void); + +#else /* !CONFIG_CRASH_DUMP */ +static inline bool is_kdump_kernel(void) { return 0; } +#endif /* CONFIG_CRASH_DUMP */ + +extern unsigned long saved_max_pfn; + +/* Device Dump information to be filled by drivers */ +struct vmcoredd_data { + char dump_name[VMCOREDD_MAX_NAME_BYTES]; /* Unique name of the dump */ + unsigned int size; /* Size of the dump */ + /* Driver's registered callback to be invoked to collect dump */ + int (*vmcoredd_callback)(struct vmcoredd_data *data, void *buf); +}; + +#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP +int vmcore_add_device_dump(struct vmcoredd_data *data); +#else +static inline int vmcore_add_device_dump(struct vmcoredd_data *data) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ +#endif /* LINUX_CRASHDUMP_H */ diff --git a/include/linux/crc-ccitt.h b/include/linux/crc-ccitt.h new file mode 100644 index 000000000..72c92c396 --- /dev/null +++ b/include/linux/crc-ccitt.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CRC_CCITT_H +#define _LINUX_CRC_CCITT_H + +#include + +extern u16 const crc_ccitt_table[256]; +extern u16 const crc_ccitt_false_table[256]; + +extern u16 crc_ccitt(u16 crc, const u8 *buffer, size_t len); +extern u16 crc_ccitt_false(u16 crc, const u8 *buffer, size_t len); + +static inline u16 crc_ccitt_byte(u16 crc, const u8 c) +{ + return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff]; +} + +static inline u16 crc_ccitt_false_byte(u16 crc, const u8 c) +{ + return (crc << 8) ^ crc_ccitt_false_table[(crc >> 8) ^ c]; +} + +#endif /* _LINUX_CRC_CCITT_H */ diff --git a/include/linux/crc-itu-t.h b/include/linux/crc-itu-t.h new file mode 100644 index 000000000..a9953c762 --- /dev/null +++ b/include/linux/crc-itu-t.h @@ -0,0 +1,28 @@ +/* + * crc-itu-t.h - CRC ITU-T V.41 routine + * + * Implements the standard CRC ITU-T V.41: + * Width 16 + * Poly 0x1021 (x^16 + x^12 + x^15 + 1) + * Init 0 + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#ifndef CRC_ITU_T_H +#define CRC_ITU_T_H + +#include + +extern u16 const crc_itu_t_table[256]; + +extern u16 crc_itu_t(u16 crc, const u8 *buffer, size_t len); + +static inline u16 crc_itu_t_byte(u16 crc, const u8 data) +{ + return (crc << 8) ^ crc_itu_t_table[((crc >> 8) ^ data) & 0xff]; +} + +#endif /* CRC_ITU_T_H */ + diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h new file mode 100644 index 000000000..1fe0cfcde --- /dev/null +++ b/include/linux/crc-t10dif.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CRC_T10DIF_H +#define _LINUX_CRC_T10DIF_H + +#include + +#define CRC_T10DIF_DIGEST_SIZE 2 +#define CRC_T10DIF_BLOCK_SIZE 1 + +extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, + size_t len); +extern __u16 crc_t10dif(unsigned char const *, size_t); +extern __u16 crc_t10dif_update(__u16 crc, unsigned char const *, size_t); + +#endif diff --git a/include/linux/crc16.h b/include/linux/crc16.h new file mode 100644 index 000000000..9443c084f --- /dev/null +++ b/include/linux/crc16.h @@ -0,0 +1,30 @@ +/* + * crc16.h - CRC-16 routine + * + * Implements the standard CRC-16: + * Width 16 + * Poly 0x8005 (x^16 + x^15 + x^2 + 1) + * Init 0 + * + * Copyright (c) 2005 Ben Gardner + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#ifndef __CRC16_H +#define __CRC16_H + +#include + +extern u16 const crc16_table[256]; + +extern u16 crc16(u16 crc, const u8 *buffer, size_t len); + +static inline u16 crc16_byte(u16 crc, const u8 data) +{ + return (crc >> 8) ^ crc16_table[(crc ^ data) & 0xff]; +} + +#endif /* __CRC16_H */ + diff --git a/include/linux/crc32.h b/include/linux/crc32.h new file mode 100644 index 000000000..9e8a032c1 --- /dev/null +++ b/include/linux/crc32.h @@ -0,0 +1,79 @@ +/* + * crc32.h + * See linux/lib/crc32.c for license and changes + */ +#ifndef _LINUX_CRC32_H +#define _LINUX_CRC32_H + +#include +#include + +u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len); +u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len); + +/** + * crc32_le_combine - Combine two crc32 check values into one. For two + * sequences of bytes, seq1 and seq2 with lengths len1 + * and len2, crc32_le() check values were calculated + * for each, crc1 and crc2. + * + * @crc1: crc32 of the first block + * @crc2: crc32 of the second block + * @len2: length of the second block + * + * Return: The crc32_le() check value of seq1 and seq2 concatenated, + * requiring only crc1, crc2, and len2. Note: If seq_full denotes + * the concatenated memory area of seq1 with seq2, and crc_full + * the crc32_le() value of seq_full, then crc_full == + * crc32_le_combine(crc1, crc2, len2) when crc_full was seeded + * with the same initializer as crc1, and crc2 seed was 0. See + * also crc32_combine_test(). + */ +u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len); + +static inline u32 crc32_le_combine(u32 crc1, u32 crc2, size_t len2) +{ + return crc32_le_shift(crc1, len2) ^ crc2; +} + +u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len); + +/** + * __crc32c_le_combine - Combine two crc32c check values into one. For two + * sequences of bytes, seq1 and seq2 with lengths len1 + * and len2, __crc32c_le() check values were calculated + * for each, crc1 and crc2. + * + * @crc1: crc32c of the first block + * @crc2: crc32c of the second block + * @len2: length of the second block + * + * Return: The __crc32c_le() check value of seq1 and seq2 concatenated, + * requiring only crc1, crc2, and len2. Note: If seq_full denotes + * the concatenated memory area of seq1 with seq2, and crc_full + * the __crc32c_le() value of seq_full, then crc_full == + * __crc32c_le_combine(crc1, crc2, len2) when crc_full was + * seeded with the same initializer as crc1, and crc2 seed + * was 0. See also crc32c_combine_test(). + */ +u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len); + +static inline u32 __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2) +{ + return __crc32c_le_shift(crc1, len2) ^ crc2; +} + +#define crc32(seed, data, length) crc32_le(seed, (unsigned char const *)(data), length) + +/* + * Helpers for hash table generation of ethernet nics: + * + * Ethernet sends the least significant bit of a byte first, thus crc32_le + * is used. The output of crc32_le is bit reversed [most significant bit + * is in bit nr 0], thus it must be reversed before use. Except for + * nics that bit swap the result internally... + */ +#define ether_crc(length, data) bitrev32(crc32_le(~0, data, length)) +#define ether_crc_le(length, data) crc32_le(~0, data, length) + +#endif /* _LINUX_CRC32_H */ diff --git a/include/linux/crc32c.h b/include/linux/crc32c.h new file mode 100644 index 000000000..bd21af828 --- /dev/null +++ b/include/linux/crc32c.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CRC32C_H +#define _LINUX_CRC32C_H + +#include + +extern u32 crc32c(u32 crc, const void *address, unsigned int length); +extern const char *crc32c_impl(void); + +/* This macro exists for backwards-compatibility. */ +#define crc32c_le crc32c + +#endif /* _LINUX_CRC32C_H */ diff --git a/include/linux/crc32poly.h b/include/linux/crc32poly.h new file mode 100644 index 000000000..62c4b7790 --- /dev/null +++ b/include/linux/crc32poly.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CRC32_POLY_H +#define _LINUX_CRC32_POLY_H + +/* + * There are multiple 16-bit CRC polynomials in common use, but this is + * *the* standard CRC-32 polynomial, first popularized by Ethernet. + * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0 + */ +#define CRC32_POLY_LE 0xedb88320 +#define CRC32_POLY_BE 0x04c11db7 + +/* + * This is the CRC32c polynomial, as outlined by Castagnoli. + * x^32+x^28+x^27+x^26+x^25+x^23+x^22+x^20+x^19+x^18+x^14+x^13+x^11+x^10+x^9+ + * x^8+x^6+x^0 + */ +#define CRC32C_POLY_LE 0x82F63B78 + +#endif /* _LINUX_CRC32_POLY_H */ diff --git a/include/linux/crc4.h b/include/linux/crc4.h new file mode 100644 index 000000000..bd2c90556 --- /dev/null +++ b/include/linux/crc4.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CRC4_H +#define _LINUX_CRC4_H + +#include + +extern uint8_t crc4(uint8_t c, uint64_t x, int bits); + +#endif /* _LINUX_CRC4_H */ diff --git a/include/linux/crc64.h b/include/linux/crc64.h new file mode 100644 index 000000000..c756e65a1 --- /dev/null +++ b/include/linux/crc64.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * See lib/crc64.c for the related specification and polynomial arithmetic. + */ +#ifndef _LINUX_CRC64_H +#define _LINUX_CRC64_H + +#include + +u64 __pure crc64_be(u64 crc, const void *p, size_t len); +#endif /* _LINUX_CRC64_H */ diff --git a/include/linux/crc7.h b/include/linux/crc7.h new file mode 100644 index 000000000..b462842f3 --- /dev/null +++ b/include/linux/crc7.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CRC7_H +#define _LINUX_CRC7_H +#include + +extern const u8 crc7_be_syndrome_table[256]; + +static inline u8 crc7_be_byte(u8 crc, u8 data) +{ + return crc7_be_syndrome_table[crc ^ data]; +} + +extern u8 crc7_be(u8 crc, const u8 *buffer, size_t len); + +#endif diff --git a/include/linux/crc8.h b/include/linux/crc8.h new file mode 100644 index 000000000..13c8dabb0 --- /dev/null +++ b/include/linux/crc8.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2011 Broadcom Corporation + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef __CRC8_H_ +#define __CRC8_H_ + +#include + +/* see usage of this value in crc8() description */ +#define CRC8_INIT_VALUE 0xFF + +/* + * Return value of crc8() indicating valid message+crc. This is true + * if a CRC is inverted before transmission. The CRC computed over the + * whole received bitstream is _table[x], where x is the bit pattern + * of the modification (almost always 0xff). + */ +#define CRC8_GOOD_VALUE(_table) (_table[0xFF]) + +/* required table size for crc8 algorithm */ +#define CRC8_TABLE_SIZE 256 + +/* helper macro assuring right table size is used */ +#define DECLARE_CRC8_TABLE(_table) \ + static u8 _table[CRC8_TABLE_SIZE] + +/** + * crc8_populate_lsb - fill crc table for given polynomial in regular bit order. + * + * @table: table to be filled. + * @polynomial: polynomial for which table is to be filled. + * + * This function fills the provided table according the polynomial provided for + * regular bit order (lsb first). Polynomials in CRC algorithms are typically + * represented as shown below. + * + * poly = x^8 + x^7 + x^6 + x^4 + x^2 + 1 + * + * For lsb first direction x^7 maps to the lsb. So the polynomial is as below. + * + * - lsb first: poly = 10101011(1) = 0xAB + */ +void crc8_populate_lsb(u8 table[CRC8_TABLE_SIZE], u8 polynomial); + +/** + * crc8_populate_msb - fill crc table for given polynomial in reverse bit order. + * + * @table: table to be filled. + * @polynomial: polynomial for which table is to be filled. + * + * This function fills the provided table according the polynomial provided for + * reverse bit order (msb first). Polynomials in CRC algorithms are typically + * represented as shown below. + * + * poly = x^8 + x^7 + x^6 + x^4 + x^2 + 1 + * + * For msb first direction x^7 maps to the msb. So the polynomial is as below. + * + * - msb first: poly = (1)11010101 = 0xD5 + */ +void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial); + +/** + * crc8() - calculate a crc8 over the given input data. + * + * @table: crc table used for calculation. + * @pdata: pointer to data buffer. + * @nbytes: number of bytes in data buffer. + * @crc: previous returned crc8 value. + * + * The CRC8 is calculated using the polynomial given in crc8_populate_msb() + * or crc8_populate_lsb(). + * + * The caller provides the initial value (either %CRC8_INIT_VALUE + * or the previous returned value) to allow for processing of + * discontiguous blocks of data. When generating the CRC the + * caller is responsible for complementing the final return value + * and inserting it into the byte stream. When validating a byte + * stream (including CRC8), a final return value of %CRC8_GOOD_VALUE + * indicates the byte stream data can be considered valid. + * + * Reference: + * "A Painless Guide to CRC Error Detection Algorithms", ver 3, Aug 1993 + * Williams, Ross N., rossross.net + * (see URL http://www.ross.net/crc/download/crc_v3.txt). + */ +u8 crc8(const u8 table[CRC8_TABLE_SIZE], u8 *pdata, size_t nbytes, u8 crc); + +#endif /* __CRC8_H_ */ diff --git a/include/linux/cred.h b/include/linux/cred.h new file mode 100644 index 000000000..4b081e491 --- /dev/null +++ b/include/linux/cred.h @@ -0,0 +1,418 @@ +/* Credentials management - see Documentation/security/credentials.rst + * + * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _LINUX_CRED_H +#define _LINUX_CRED_H + +#include +#include +#include +#include +#include +#include +#include +#include + +struct cred; +struct inode; + +/* + * COW Supplementary groups list + */ +struct group_info { + atomic_t usage; + int ngroups; + kgid_t gid[0]; +} __randomize_layout; + +/** + * get_group_info - Get a reference to a group info structure + * @group_info: The group info to reference + * + * This gets a reference to a set of supplementary groups. + * + * If the caller is accessing a task's credentials, they must hold the RCU read + * lock when reading. + */ +static inline struct group_info *get_group_info(struct group_info *gi) +{ + atomic_inc(&gi->usage); + return gi; +} + +/** + * put_group_info - Release a reference to a group info structure + * @group_info: The group info to release + */ +#define put_group_info(group_info) \ +do { \ + if (atomic_dec_and_test(&(group_info)->usage)) \ + groups_free(group_info); \ +} while (0) + +extern struct group_info init_groups; +#ifdef CONFIG_MULTIUSER +extern struct group_info *groups_alloc(int); +extern void groups_free(struct group_info *); + +extern int in_group_p(kgid_t); +extern int in_egroup_p(kgid_t); +extern int groups_search(const struct group_info *, kgid_t); + +extern int set_current_groups(struct group_info *); +extern void set_groups(struct cred *, struct group_info *); +extern bool may_setgroups(void); +extern void groups_sort(struct group_info *); +#else +static inline void groups_free(struct group_info *group_info) +{ +} + +static inline int in_group_p(kgid_t grp) +{ + return 1; +} +static inline int in_egroup_p(kgid_t grp) +{ + return 1; +} +static inline int groups_search(const struct group_info *group_info, kgid_t grp) +{ + return 1; +} +#endif + +/* + * The security context of a task + * + * The parts of the context break down into two categories: + * + * (1) The objective context of a task. These parts are used when some other + * task is attempting to affect this one. + * + * (2) The subjective context. These details are used when the task is acting + * upon another object, be that a file, a task, a key or whatever. + * + * Note that some members of this structure belong to both categories - the + * LSM security pointer for instance. + * + * A task has two security pointers. task->real_cred points to the objective + * context that defines that task's actual details. The objective part of this + * context is used whenever that task is acted upon. + * + * task->cred points to the subjective context that defines the details of how + * that task is going to act upon another object. This may be overridden + * temporarily to point to another security context, but normally points to the + * same context as task->real_cred. + */ +struct cred { + atomic_t usage; +#ifdef CONFIG_DEBUG_CREDENTIALS + atomic_t subscribers; /* number of processes subscribed */ + void *put_addr; + unsigned magic; +#define CRED_MAGIC 0x43736564 +#define CRED_MAGIC_DEAD 0x44656144 +#endif + kuid_t uid; /* real UID of the task */ + kgid_t gid; /* real GID of the task */ + kuid_t suid; /* saved UID of the task */ + kgid_t sgid; /* saved GID of the task */ + kuid_t euid; /* effective UID of the task */ + kgid_t egid; /* effective GID of the task */ + kuid_t fsuid; /* UID for VFS ops */ + kgid_t fsgid; /* GID for VFS ops */ + unsigned securebits; /* SUID-less security management */ + kernel_cap_t cap_inheritable; /* caps our children can inherit */ + kernel_cap_t cap_permitted; /* caps we're permitted */ + kernel_cap_t cap_effective; /* caps we can actually use */ + kernel_cap_t cap_bset; /* capability bounding set */ + kernel_cap_t cap_ambient; /* Ambient capability set */ +#ifdef CONFIG_KEYS + unsigned char jit_keyring; /* default keyring to attach requested + * keys to */ + struct key __rcu *session_keyring; /* keyring inherited over fork */ + struct key *process_keyring; /* keyring private to this process */ + struct key *thread_keyring; /* keyring private to this thread */ + struct key *request_key_auth; /* assumed request_key authority */ +#endif +#ifdef CONFIG_SECURITY + void *security; /* subjective LSM security */ +#endif + struct user_struct *user; /* real user ID subscription */ + struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */ + struct group_info *group_info; /* supplementary groups for euid/fsgid */ + /* RCU deletion */ + union { + int non_rcu; /* Can we skip RCU deletion? */ + struct rcu_head rcu; /* RCU deletion hook */ + }; +} __randomize_layout; + +extern void __put_cred(struct cred *); +extern void exit_creds(struct task_struct *); +extern int copy_creds(struct task_struct *, unsigned long); +extern const struct cred *get_task_cred(struct task_struct *); +extern struct cred *cred_alloc_blank(void); +extern struct cred *prepare_creds(void); +extern struct cred *prepare_exec_creds(void); +extern int commit_creds(struct cred *); +extern void abort_creds(struct cred *); +extern const struct cred *override_creds(const struct cred *); +extern void revert_creds(const struct cred *); +extern struct cred *prepare_kernel_cred(struct task_struct *); +extern int change_create_files_as(struct cred *, struct inode *); +extern int set_security_override(struct cred *, u32); +extern int set_security_override_from_ctx(struct cred *, const char *); +extern int set_create_files_as(struct cred *, struct inode *); +extern void __init cred_init(void); + +/* + * check for validity of credentials + */ +#ifdef CONFIG_DEBUG_CREDENTIALS +extern void __invalid_creds(const struct cred *, const char *, unsigned); +extern void __validate_process_creds(struct task_struct *, + const char *, unsigned); + +extern bool creds_are_invalid(const struct cred *cred); + +static inline void __validate_creds(const struct cred *cred, + const char *file, unsigned line) +{ + if (unlikely(creds_are_invalid(cred))) + __invalid_creds(cred, file, line); +} + +#define validate_creds(cred) \ +do { \ + __validate_creds((cred), __FILE__, __LINE__); \ +} while(0) + +#define validate_process_creds() \ +do { \ + __validate_process_creds(current, __FILE__, __LINE__); \ +} while(0) + +extern void validate_creds_for_do_exit(struct task_struct *); +#else +static inline void validate_creds(const struct cred *cred) +{ +} +static inline void validate_creds_for_do_exit(struct task_struct *tsk) +{ +} +static inline void validate_process_creds(void) +{ +} +#endif + +static inline bool cap_ambient_invariant_ok(const struct cred *cred) +{ + return cap_issubset(cred->cap_ambient, + cap_intersect(cred->cap_permitted, + cred->cap_inheritable)); +} + +/** + * get_new_cred - Get a reference on a new set of credentials + * @cred: The new credentials to reference + * + * Get a reference on the specified set of new credentials. The caller must + * release the reference. + */ +static inline struct cred *get_new_cred(struct cred *cred) +{ + atomic_inc(&cred->usage); + return cred; +} + +/** + * get_cred - Get a reference on a set of credentials + * @cred: The credentials to reference + * + * Get a reference on the specified set of credentials. The caller must + * release the reference. If %NULL is passed, it is returned with no action. + * + * This is used to deal with a committed set of credentials. Although the + * pointer is const, this will temporarily discard the const and increment the + * usage count. The purpose of this is to attempt to catch at compile time the + * accidental alteration of a set of credentials that should be considered + * immutable. + */ +static inline const struct cred *get_cred(const struct cred *cred) +{ + struct cred *nonconst_cred = (struct cred *) cred; + if (!cred) + return cred; + validate_creds(cred); + nonconst_cred->non_rcu = 0; + return get_new_cred(nonconst_cred); +} + +/** + * put_cred - Release a reference to a set of credentials + * @cred: The credentials to release + * + * Release a reference to a set of credentials, deleting them when the last ref + * is released. If %NULL is passed, nothing is done. + * + * This takes a const pointer to a set of credentials because the credentials + * on task_struct are attached by const pointers to prevent accidental + * alteration of otherwise immutable credential sets. + */ +static inline void put_cred(const struct cred *_cred) +{ + struct cred *cred = (struct cred *) _cred; + + if (cred) { + validate_creds(cred); + if (atomic_dec_and_test(&(cred)->usage)) + __put_cred(cred); + } +} + +/** + * current_cred - Access the current task's subjective credentials + * + * Access the subjective credentials of the current task. RCU-safe, + * since nobody else can modify it. + */ +#define current_cred() \ + rcu_dereference_protected(current->cred, 1) + +/** + * current_real_cred - Access the current task's objective credentials + * + * Access the objective credentials of the current task. RCU-safe, + * since nobody else can modify it. + */ +#define current_real_cred() \ + rcu_dereference_protected(current->real_cred, 1) + +/** + * __task_cred - Access a task's objective credentials + * @task: The task to query + * + * Access the objective credentials of a task. The caller must hold the RCU + * readlock. + * + * The result of this function should not be passed directly to get_cred(); + * rather get_task_cred() should be used instead. + */ +#define __task_cred(task) \ + rcu_dereference((task)->real_cred) + +/** + * get_current_cred - Get the current task's subjective credentials + * + * Get the subjective credentials of the current task, pinning them so that + * they can't go away. Accessing the current task's credentials directly is + * not permitted. + */ +#define get_current_cred() \ + (get_cred(current_cred())) + +/** + * get_current_user - Get the current task's user_struct + * + * Get the user record of the current task, pinning it so that it can't go + * away. + */ +#define get_current_user() \ +({ \ + struct user_struct *__u; \ + const struct cred *__cred; \ + __cred = current_cred(); \ + __u = get_uid(__cred->user); \ + __u; \ +}) + +/** + * get_current_groups - Get the current task's supplementary group list + * + * Get the supplementary group list of the current task, pinning it so that it + * can't go away. + */ +#define get_current_groups() \ +({ \ + struct group_info *__groups; \ + const struct cred *__cred; \ + __cred = current_cred(); \ + __groups = get_group_info(__cred->group_info); \ + __groups; \ +}) + +#define task_cred_xxx(task, xxx) \ +({ \ + __typeof__(((struct cred *)NULL)->xxx) ___val; \ + rcu_read_lock(); \ + ___val = __task_cred((task))->xxx; \ + rcu_read_unlock(); \ + ___val; \ +}) + +#define task_uid(task) (task_cred_xxx((task), uid)) +#define task_euid(task) (task_cred_xxx((task), euid)) + +#define current_cred_xxx(xxx) \ +({ \ + current_cred()->xxx; \ +}) + +#define current_uid() (current_cred_xxx(uid)) +#define current_gid() (current_cred_xxx(gid)) +#define current_euid() (current_cred_xxx(euid)) +#define current_egid() (current_cred_xxx(egid)) +#define current_suid() (current_cred_xxx(suid)) +#define current_sgid() (current_cred_xxx(sgid)) +#define current_fsuid() (current_cred_xxx(fsuid)) +#define current_fsgid() (current_cred_xxx(fsgid)) +#define current_cap() (current_cred_xxx(cap_effective)) +#define current_user() (current_cred_xxx(user)) +#define current_security() (current_cred_xxx(security)) + +extern struct user_namespace init_user_ns; +#ifdef CONFIG_USER_NS +#define current_user_ns() (current_cred_xxx(user_ns)) +#else +static inline struct user_namespace *current_user_ns(void) +{ + return &init_user_ns; +} +#endif + + +#define current_uid_gid(_uid, _gid) \ +do { \ + const struct cred *__cred; \ + __cred = current_cred(); \ + *(_uid) = __cred->uid; \ + *(_gid) = __cred->gid; \ +} while(0) + +#define current_euid_egid(_euid, _egid) \ +do { \ + const struct cred *__cred; \ + __cred = current_cred(); \ + *(_euid) = __cred->euid; \ + *(_egid) = __cred->egid; \ +} while(0) + +#define current_fsuid_fsgid(_fsuid, _fsgid) \ +do { \ + const struct cred *__cred; \ + __cred = current_cred(); \ + *(_fsuid) = __cred->fsuid; \ + *(_fsgid) = __cred->fsgid; \ +} while(0) + +#endif /* _LINUX_CRED_H */ diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h new file mode 100644 index 000000000..54741295c --- /dev/null +++ b/include/linux/crush/crush.h @@ -0,0 +1,345 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef CEPH_CRUSH_CRUSH_H +#define CEPH_CRUSH_CRUSH_H + +#ifdef __KERNEL__ +# include +# include +#else +# include "crush_compat.h" +#endif + +/* + * CRUSH is a pseudo-random data distribution algorithm that + * efficiently distributes input values (typically, data objects) + * across a heterogeneous, structured storage cluster. + * + * The algorithm was originally described in detail in this paper + * (although the algorithm has evolved somewhat since then): + * + * http://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf + * + * LGPL2 + */ + + +#define CRUSH_MAGIC 0x00010000ul /* for detecting algorithm revisions */ + +#define CRUSH_MAX_DEPTH 10 /* max crush hierarchy depth */ +#define CRUSH_MAX_RULESET (1<<8) /* max crush ruleset number */ +#define CRUSH_MAX_RULES CRUSH_MAX_RULESET /* should be the same as max rulesets */ + +#define CRUSH_MAX_DEVICE_WEIGHT (100u * 0x10000u) +#define CRUSH_MAX_BUCKET_WEIGHT (65535u * 0x10000u) + +#define CRUSH_ITEM_UNDEF 0x7ffffffe /* undefined result (internal use only) */ +#define CRUSH_ITEM_NONE 0x7fffffff /* no result */ + +/* + * CRUSH uses user-defined "rules" to describe how inputs should be + * mapped to devices. A rule consists of sequence of steps to perform + * to generate the set of output devices. + */ +struct crush_rule_step { + __u32 op; + __s32 arg1; + __s32 arg2; +}; + +/* step op codes */ +enum { + CRUSH_RULE_NOOP = 0, + CRUSH_RULE_TAKE = 1, /* arg1 = value to start with */ + CRUSH_RULE_CHOOSE_FIRSTN = 2, /* arg1 = num items to pick */ + /* arg2 = type */ + CRUSH_RULE_CHOOSE_INDEP = 3, /* same */ + CRUSH_RULE_EMIT = 4, /* no args */ + CRUSH_RULE_CHOOSELEAF_FIRSTN = 6, + CRUSH_RULE_CHOOSELEAF_INDEP = 7, + + CRUSH_RULE_SET_CHOOSE_TRIES = 8, /* override choose_total_tries */ + CRUSH_RULE_SET_CHOOSELEAF_TRIES = 9, /* override chooseleaf_descend_once */ + CRUSH_RULE_SET_CHOOSE_LOCAL_TRIES = 10, + CRUSH_RULE_SET_CHOOSE_LOCAL_FALLBACK_TRIES = 11, + CRUSH_RULE_SET_CHOOSELEAF_VARY_R = 12, + CRUSH_RULE_SET_CHOOSELEAF_STABLE = 13 +}; + +/* + * for specifying choose num (arg1) relative to the max parameter + * passed to do_rule + */ +#define CRUSH_CHOOSE_N 0 +#define CRUSH_CHOOSE_N_MINUS(x) (-(x)) + +/* + * The rule mask is used to describe what the rule is intended for. + * Given a ruleset and size of output set, we search through the + * rule list for a matching rule_mask. + */ +struct crush_rule_mask { + __u8 ruleset; + __u8 type; + __u8 min_size; + __u8 max_size; +}; + +struct crush_rule { + __u32 len; + struct crush_rule_mask mask; + struct crush_rule_step steps[0]; +}; + +#define crush_rule_size(len) (sizeof(struct crush_rule) + \ + (len)*sizeof(struct crush_rule_step)) + + + +/* + * A bucket is a named container of other items (either devices or + * other buckets). Items within a bucket are chosen using one of a + * few different algorithms. The table summarizes how the speed of + * each option measures up against mapping stability when items are + * added or removed. + * + * Bucket Alg Speed Additions Removals + * ------------------------------------------------ + * uniform O(1) poor poor + * list O(n) optimal poor + * tree O(log n) good good + * straw O(n) better better + * straw2 O(n) optimal optimal + */ +enum { + CRUSH_BUCKET_UNIFORM = 1, + CRUSH_BUCKET_LIST = 2, + CRUSH_BUCKET_TREE = 3, + CRUSH_BUCKET_STRAW = 4, + CRUSH_BUCKET_STRAW2 = 5, +}; +extern const char *crush_bucket_alg_name(int alg); + +/* + * although tree was a legacy algorithm, it has been buggy, so + * exclude it. + */ +#define CRUSH_LEGACY_ALLOWED_BUCKET_ALGS ( \ + (1 << CRUSH_BUCKET_UNIFORM) | \ + (1 << CRUSH_BUCKET_LIST) | \ + (1 << CRUSH_BUCKET_STRAW)) + +struct crush_bucket { + __s32 id; /* this'll be negative */ + __u16 type; /* non-zero; type=0 is reserved for devices */ + __u8 alg; /* one of CRUSH_BUCKET_* */ + __u8 hash; /* which hash function to use, CRUSH_HASH_* */ + __u32 weight; /* 16-bit fixed point */ + __u32 size; /* num items */ + __s32 *items; + +}; + +/** @ingroup API + * + * Replacement weights for each item in a bucket. The size of the + * array must be exactly the size of the straw2 bucket, just as the + * item_weights array. + * + */ +struct crush_weight_set { + __u32 *weights; /*!< 16.16 fixed point weights + in the same order as items */ + __u32 size; /*!< size of the __weights__ array */ +}; + +/** @ingroup API + * + * Replacement weights and ids for a given straw2 bucket, for + * placement purposes. + * + * When crush_do_rule() chooses the Nth item from a straw2 bucket, the + * replacement weights found at __weight_set[N]__ are used instead of + * the weights from __item_weights__. If __N__ is greater than + * __weight_set_size__, the weights found at __weight_set_size-1__ are + * used instead. For instance if __weight_set__ is: + * + * [ [ 0x10000, 0x20000 ], // position 0 + * [ 0x20000, 0x40000 ] ] // position 1 + * + * choosing the 0th item will use position 0 weights [ 0x10000, 0x20000 ] + * choosing the 1th item will use position 1 weights [ 0x20000, 0x40000 ] + * choosing the 2th item will use position 1 weights [ 0x20000, 0x40000 ] + * etc. + * + */ +struct crush_choose_arg { + __s32 *ids; /*!< values to use instead of items */ + __u32 ids_size; /*!< size of the __ids__ array */ + struct crush_weight_set *weight_set; /*!< weight replacements for + a given position */ + __u32 weight_set_size; /*!< size of the __weight_set__ array */ +}; + +/** @ingroup API + * + * Replacement weights and ids for each bucket in the crushmap. The + * __size__ of the __args__ array must be exactly the same as the + * __map->max_buckets__. + * + * The __crush_choose_arg__ at index N will be used when choosing + * an item from the bucket __map->buckets[N]__ bucket, provided it + * is a straw2 bucket. + * + */ +struct crush_choose_arg_map { +#ifdef __KERNEL__ + struct rb_node node; + s64 choose_args_index; +#endif + struct crush_choose_arg *args; /*!< replacement for each bucket + in the crushmap */ + __u32 size; /*!< size of the __args__ array */ +}; + +struct crush_bucket_uniform { + struct crush_bucket h; + __u32 item_weight; /* 16-bit fixed point; all items equally weighted */ +}; + +struct crush_bucket_list { + struct crush_bucket h; + __u32 *item_weights; /* 16-bit fixed point */ + __u32 *sum_weights; /* 16-bit fixed point. element i is sum + of weights 0..i, inclusive */ +}; + +struct crush_bucket_tree { + struct crush_bucket h; /* note: h.size is _tree_ size, not number of + actual items */ + __u8 num_nodes; + __u32 *node_weights; +}; + +struct crush_bucket_straw { + struct crush_bucket h; + __u32 *item_weights; /* 16-bit fixed point */ + __u32 *straws; /* 16-bit fixed point */ +}; + +struct crush_bucket_straw2 { + struct crush_bucket h; + __u32 *item_weights; /* 16-bit fixed point */ +}; + + + +/* + * CRUSH map includes all buckets, rules, etc. + */ +struct crush_map { + struct crush_bucket **buckets; + struct crush_rule **rules; + + __s32 max_buckets; + __u32 max_rules; + __s32 max_devices; + + /* choose local retries before re-descent */ + __u32 choose_local_tries; + /* choose local attempts using a fallback permutation before + * re-descent */ + __u32 choose_local_fallback_tries; + /* choose attempts before giving up */ + __u32 choose_total_tries; + /* attempt chooseleaf inner descent once for firstn mode; on + * reject retry outer descent. Note that this does *not* + * apply to a collision: in that case we will retry as we used + * to. */ + __u32 chooseleaf_descend_once; + + /* if non-zero, feed r into chooseleaf, bit-shifted right by (r-1) + * bits. a value of 1 is best for new clusters. for legacy clusters + * that want to limit reshuffling, a value of 3 or 4 will make the + * mappings line up a bit better with previous mappings. */ + __u8 chooseleaf_vary_r; + + /* if true, it makes chooseleaf firstn to return stable results (if + * no local retry) so that data migrations would be optimal when some + * device fails. */ + __u8 chooseleaf_stable; + + /* + * This value is calculated after decode or construction by + * the builder. It is exposed here (rather than having a + * 'build CRUSH working space' function) so that callers can + * reserve a static buffer, allocate space on the stack, or + * otherwise avoid calling into the heap allocator if they + * want to. The size of the working space depends on the map, + * while the size of the scratch vector passed to the mapper + * depends on the size of the desired result set. + * + * Nothing stops the caller from allocating both in one swell + * foop and passing in two points, though. + */ + size_t working_size; + +#ifndef __KERNEL__ + /* + * version 0 (original) of straw_calc has various flaws. version 1 + * fixes a few of them. + */ + __u8 straw_calc_version; + + /* + * allowed bucket algs is a bitmask, here the bit positions + * are CRUSH_BUCKET_*. note that these are *bits* and + * CRUSH_BUCKET_* values are not, so we need to or together (1 + * << CRUSH_BUCKET_WHATEVER). The 0th bit is not used to + * minimize confusion (bucket type values start at 1). + */ + __u32 allowed_bucket_algs; + + __u32 *choose_tries; +#else + /* CrushWrapper::choose_args */ + struct rb_root choose_args; +#endif +}; + + +/* crush.c */ +extern int crush_get_bucket_item_weight(const struct crush_bucket *b, int pos); +extern void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b); +extern void crush_destroy_bucket_list(struct crush_bucket_list *b); +extern void crush_destroy_bucket_tree(struct crush_bucket_tree *b); +extern void crush_destroy_bucket_straw(struct crush_bucket_straw *b); +extern void crush_destroy_bucket_straw2(struct crush_bucket_straw2 *b); +extern void crush_destroy_bucket(struct crush_bucket *b); +extern void crush_destroy_rule(struct crush_rule *r); +extern void crush_destroy(struct crush_map *map); + +static inline int crush_calc_tree_node(int i) +{ + return ((i+1) << 1)-1; +} + +/* + * These data structures are private to the CRUSH implementation. They + * are exposed in this header file because builder needs their + * definitions to calculate the total working size. + * + * Moving this out of the crush map allow us to treat the CRUSH map as + * immutable within the mapper and removes the requirement for a CRUSH + * map lock. + */ +struct crush_work_bucket { + __u32 perm_x; /* @x for which *perm is defined */ + __u32 perm_n; /* num elements of *perm that are permuted/defined */ + __u32 *perm; /* Permutation of the bucket's items */ +}; + +struct crush_work { + struct crush_work_bucket **work; /* Per-bucket working store */ +}; + +#endif diff --git a/include/linux/crush/hash.h b/include/linux/crush/hash.h new file mode 100644 index 000000000..904df41f7 --- /dev/null +++ b/include/linux/crush/hash.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef CEPH_CRUSH_HASH_H +#define CEPH_CRUSH_HASH_H + +#ifdef __KERNEL__ +# include +#else +# include "crush_compat.h" +#endif + +#define CRUSH_HASH_RJENKINS1 0 + +#define CRUSH_HASH_DEFAULT CRUSH_HASH_RJENKINS1 + +extern const char *crush_hash_name(int type); + +extern __u32 crush_hash32(int type, __u32 a); +extern __u32 crush_hash32_2(int type, __u32 a, __u32 b); +extern __u32 crush_hash32_3(int type, __u32 a, __u32 b, __u32 c); +extern __u32 crush_hash32_4(int type, __u32 a, __u32 b, __u32 c, __u32 d); +extern __u32 crush_hash32_5(int type, __u32 a, __u32 b, __u32 c, __u32 d, + __u32 e); + +#endif diff --git a/include/linux/crush/mapper.h b/include/linux/crush/mapper.h new file mode 100644 index 000000000..f9b99232f --- /dev/null +++ b/include/linux/crush/mapper.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef CEPH_CRUSH_MAPPER_H +#define CEPH_CRUSH_MAPPER_H + +/* + * CRUSH functions for find rules and then mapping an input to an + * output set. + * + * LGPL2 + */ + +#include "crush.h" + +extern int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size); +int crush_do_rule(const struct crush_map *map, + int ruleno, int x, int *result, int result_max, + const __u32 *weight, int weight_max, + void *cwin, const struct crush_choose_arg *choose_args); + +/* + * Returns the exact amount of workspace that will need to be used + * for a given combination of crush_map and result_max. The caller can + * then allocate this much on its own, either on the stack, in a + * per-thread long-lived buffer, or however it likes. + */ +static inline size_t crush_work_size(const struct crush_map *map, + int result_max) +{ + return map->working_size + result_max * 3 * sizeof(__u32); +} + +void crush_init_workspace(const struct crush_map *map, void *v); + +#endif diff --git a/include/linux/crypto.h b/include/linux/crypto.h new file mode 100644 index 000000000..e8839d3a7 --- /dev/null +++ b/include/linux/crypto.h @@ -0,0 +1,1669 @@ +/* + * Scatterlist Cryptographic API. + * + * Copyright (c) 2002 James Morris + * Copyright (c) 2002 David S. Miller (davem@redhat.com) + * Copyright (c) 2005 Herbert Xu + * + * Portions derived from Cryptoapi, by Alexander Kjeldaas + * and Nettle, by Niels Möller. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#ifndef _LINUX_CRYPTO_H +#define _LINUX_CRYPTO_H + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Autoloaded crypto modules should only use a prefixed name to avoid allowing + * arbitrary modules to be loaded. Loading from userspace may still need the + * unprefixed names, so retains those aliases as well. + * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3 + * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro + * expands twice on the same line. Instead, use a separate base name for the + * alias. + */ +#define MODULE_ALIAS_CRYPTO(name) \ + __MODULE_INFO(alias, alias_userspace, name); \ + __MODULE_INFO(alias, alias_crypto, "crypto-" name) + +/* + * Algorithm masks and types. + */ +#define CRYPTO_ALG_TYPE_MASK 0x0000000f +#define CRYPTO_ALG_TYPE_CIPHER 0x00000001 +#define CRYPTO_ALG_TYPE_COMPRESS 0x00000002 +#define CRYPTO_ALG_TYPE_AEAD 0x00000003 +#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004 +#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 +#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 +#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 +#define CRYPTO_ALG_TYPE_KPP 0x00000008 +#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a +#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b +#define CRYPTO_ALG_TYPE_RNG 0x0000000c +#define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d +#define CRYPTO_ALG_TYPE_DIGEST 0x0000000e +#define CRYPTO_ALG_TYPE_HASH 0x0000000e +#define CRYPTO_ALG_TYPE_SHASH 0x0000000e +#define CRYPTO_ALG_TYPE_AHASH 0x0000000f + +#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e +#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e +#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c +#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e + +#define CRYPTO_ALG_LARVAL 0x00000010 +#define CRYPTO_ALG_DEAD 0x00000020 +#define CRYPTO_ALG_DYING 0x00000040 +#define CRYPTO_ALG_ASYNC 0x00000080 + +/* + * Set this bit if and only if the algorithm requires another algorithm of + * the same type to handle corner cases. + */ +#define CRYPTO_ALG_NEED_FALLBACK 0x00000100 + +/* + * This bit is set for symmetric key ciphers that have already been wrapped + * with a generic IV generator to prevent them from being wrapped again. + */ +#define CRYPTO_ALG_GENIV 0x00000200 + +/* + * Set if the algorithm has passed automated run-time testing. Note that + * if there is no run-time testing for a given algorithm it is considered + * to have passed. + */ + +#define CRYPTO_ALG_TESTED 0x00000400 + +/* + * Set if the algorithm is an instance that is built from templates. + */ +#define CRYPTO_ALG_INSTANCE 0x00000800 + +/* Set this bit if the algorithm provided is hardware accelerated but + * not available to userspace via instruction set or so. + */ +#define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000 + +/* + * Mark a cipher as a service implementation only usable by another + * cipher and never by a normal user of the kernel crypto API + */ +#define CRYPTO_ALG_INTERNAL 0x00002000 + +/* + * Set if the algorithm has a ->setkey() method but can be used without + * calling it first, i.e. there is a default key. + */ +#define CRYPTO_ALG_OPTIONAL_KEY 0x00004000 + +/* + * Don't trigger module loading + */ +#define CRYPTO_NOLOAD 0x00008000 + +/* + * Transform masks and values (for crt_flags). + */ +#define CRYPTO_TFM_NEED_KEY 0x00000001 + +#define CRYPTO_TFM_REQ_MASK 0x000fff00 +#define CRYPTO_TFM_RES_MASK 0xfff00000 + +#define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100 +#define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200 +#define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400 +#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000 +#define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000 +#define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000 +#define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000 +#define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000 + +/* + * Miscellaneous stuff. + */ +#define CRYPTO_MAX_ALG_NAME 128 + +/* + * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual + * declaration) is used to ensure that the crypto_tfm context structure is + * aligned correctly for the given architecture so that there are no alignment + * faults for C data types. In particular, this is required on platforms such + * as arm where pointers are 32-bit aligned but there are data types such as + * u64 which require 64-bit alignment. + */ +#define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN + +#define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN))) + +struct scatterlist; +struct crypto_ablkcipher; +struct crypto_async_request; +struct crypto_blkcipher; +struct crypto_tfm; +struct crypto_type; +struct skcipher_givcrypt_request; + +typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); + +/** + * DOC: Block Cipher Context Data Structures + * + * These data structures define the operating context for each block cipher + * type. + */ + +struct crypto_async_request { + struct list_head list; + crypto_completion_t complete; + void *data; + struct crypto_tfm *tfm; + + u32 flags; +}; + +struct ablkcipher_request { + struct crypto_async_request base; + + unsigned int nbytes; + + void *info; + + struct scatterlist *src; + struct scatterlist *dst; + + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +struct blkcipher_desc { + struct crypto_blkcipher *tfm; + void *info; + u32 flags; +}; + +struct cipher_desc { + struct crypto_tfm *tfm; + void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); + unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst, + const u8 *src, unsigned int nbytes); + void *info; +}; + +/** + * DOC: Block Cipher Algorithm Definitions + * + * These data structures define modular crypto algorithm implementations, + * managed via crypto_register_alg() and crypto_unregister_alg(). + */ + +/** + * struct ablkcipher_alg - asynchronous block cipher definition + * @min_keysize: Minimum key size supported by the transformation. This is the + * smallest key length supported by this transformation algorithm. + * This must be set to one of the pre-defined values as this is + * not hardware specific. Possible values for this field can be + * found via git grep "_MIN_KEY_SIZE" include/crypto/ + * @max_keysize: Maximum key size supported by the transformation. This is the + * largest key length supported by this transformation algorithm. + * This must be set to one of the pre-defined values as this is + * not hardware specific. Possible values for this field can be + * found via git grep "_MAX_KEY_SIZE" include/crypto/ + * @setkey: Set key for the transformation. This function is used to either + * program a supplied key into the hardware or store the key in the + * transformation context for programming it later. Note that this + * function does modify the transformation context. This function can + * be called multiple times during the existence of the transformation + * object, so one must make sure the key is properly reprogrammed into + * the hardware. This function is also responsible for checking the key + * length for validity. In case a software fallback was put in place in + * the @cra_init call, this function might need to use the fallback if + * the algorithm doesn't support all of the key sizes. + * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt + * the supplied scatterlist containing the blocks of data. The crypto + * API consumer is responsible for aligning the entries of the + * scatterlist properly and making sure the chunks are correctly + * sized. In case a software fallback was put in place in the + * @cra_init call, this function might need to use the fallback if + * the algorithm doesn't support all of the key sizes. In case the + * key was stored in transformation context, the key might need to be + * re-programmed into the hardware in this function. This function + * shall not modify the transformation context, as this function may + * be called in parallel with the same transformation object. + * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt + * and the conditions are exactly the same. + * @givencrypt: Update the IV for encryption. With this function, a cipher + * implementation may provide the function on how to update the IV + * for encryption. + * @givdecrypt: Update the IV for decryption. This is the reverse of + * @givencrypt . + * @geniv: The transformation implementation may use an "IV generator" provided + * by the kernel crypto API. Several use cases have a predefined + * approach how IVs are to be updated. For such use cases, the kernel + * crypto API provides ready-to-use implementations that can be + * referenced with this variable. + * @ivsize: IV size applicable for transformation. The consumer must provide an + * IV of exactly that size to perform the encrypt or decrypt operation. + * + * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are + * mandatory and must be filled. + */ +struct ablkcipher_alg { + int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct ablkcipher_request *req); + int (*decrypt)(struct ablkcipher_request *req); + int (*givencrypt)(struct skcipher_givcrypt_request *req); + int (*givdecrypt)(struct skcipher_givcrypt_request *req); + + const char *geniv; + + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; +}; + +/** + * struct blkcipher_alg - synchronous block cipher definition + * @min_keysize: see struct ablkcipher_alg + * @max_keysize: see struct ablkcipher_alg + * @setkey: see struct ablkcipher_alg + * @encrypt: see struct ablkcipher_alg + * @decrypt: see struct ablkcipher_alg + * @geniv: see struct ablkcipher_alg + * @ivsize: see struct ablkcipher_alg + * + * All fields except @geniv and @ivsize are mandatory and must be filled. + */ +struct blkcipher_alg { + int (*setkey)(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes); + int (*decrypt)(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes); + + const char *geniv; + + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; +}; + +/** + * struct cipher_alg - single-block symmetric ciphers definition + * @cia_min_keysize: Minimum key size supported by the transformation. This is + * the smallest key length supported by this transformation + * algorithm. This must be set to one of the pre-defined + * values as this is not hardware specific. Possible values + * for this field can be found via git grep "_MIN_KEY_SIZE" + * include/crypto/ + * @cia_max_keysize: Maximum key size supported by the transformation. This is + * the largest key length supported by this transformation + * algorithm. This must be set to one of the pre-defined values + * as this is not hardware specific. Possible values for this + * field can be found via git grep "_MAX_KEY_SIZE" + * include/crypto/ + * @cia_setkey: Set key for the transformation. This function is used to either + * program a supplied key into the hardware or store the key in the + * transformation context for programming it later. Note that this + * function does modify the transformation context. This function + * can be called multiple times during the existence of the + * transformation object, so one must make sure the key is properly + * reprogrammed into the hardware. This function is also + * responsible for checking the key length for validity. + * @cia_encrypt: Encrypt a single block. This function is used to encrypt a + * single block of data, which must be @cra_blocksize big. This + * always operates on a full @cra_blocksize and it is not possible + * to encrypt a block of smaller size. The supplied buffers must + * therefore also be at least of @cra_blocksize size. Both the + * input and output buffers are always aligned to @cra_alignmask. + * In case either of the input or output buffer supplied by user + * of the crypto API is not aligned to @cra_alignmask, the crypto + * API will re-align the buffers. The re-alignment means that a + * new buffer will be allocated, the data will be copied into the + * new buffer, then the processing will happen on the new buffer, + * then the data will be copied back into the original buffer and + * finally the new buffer will be freed. In case a software + * fallback was put in place in the @cra_init call, this function + * might need to use the fallback if the algorithm doesn't support + * all of the key sizes. In case the key was stored in + * transformation context, the key might need to be re-programmed + * into the hardware in this function. This function shall not + * modify the transformation context, as this function may be + * called in parallel with the same transformation object. + * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to + * @cia_encrypt, and the conditions are exactly the same. + * + * All fields are mandatory and must be filled. + */ +struct cipher_alg { + unsigned int cia_min_keysize; + unsigned int cia_max_keysize; + int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen); + void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); + void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); +}; + +struct compress_alg { + int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen); + int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen); +}; + + +#define cra_ablkcipher cra_u.ablkcipher +#define cra_blkcipher cra_u.blkcipher +#define cra_cipher cra_u.cipher +#define cra_compress cra_u.compress + +/** + * struct crypto_alg - definition of a cryptograpic cipher algorithm + * @cra_flags: Flags describing this transformation. See include/linux/crypto.h + * CRYPTO_ALG_* flags for the flags which go in here. Those are + * used for fine-tuning the description of the transformation + * algorithm. + * @cra_blocksize: Minimum block size of this transformation. The size in bytes + * of the smallest possible unit which can be transformed with + * this algorithm. The users must respect this value. + * In case of HASH transformation, it is possible for a smaller + * block than @cra_blocksize to be passed to the crypto API for + * transformation, in case of any other transformation type, an + * error will be returned upon any attempt to transform smaller + * than @cra_blocksize chunks. + * @cra_ctxsize: Size of the operational context of the transformation. This + * value informs the kernel crypto API about the memory size + * needed to be allocated for the transformation context. + * @cra_alignmask: Alignment mask for the input and output data buffer. The data + * buffer containing the input data for the algorithm must be + * aligned to this alignment mask. The data buffer for the + * output data must be aligned to this alignment mask. Note that + * the Crypto API will do the re-alignment in software, but + * only under special conditions and there is a performance hit. + * The re-alignment happens at these occasions for different + * @cra_u types: cipher -- For both input data and output data + * buffer; ahash -- For output hash destination buf; shash -- + * For output hash destination buf. + * This is needed on hardware which is flawed by design and + * cannot pick data from arbitrary addresses. + * @cra_priority: Priority of this transformation implementation. In case + * multiple transformations with same @cra_name are available to + * the Crypto API, the kernel will use the one with highest + * @cra_priority. + * @cra_name: Generic name (usable by multiple implementations) of the + * transformation algorithm. This is the name of the transformation + * itself. This field is used by the kernel when looking up the + * providers of particular transformation. + * @cra_driver_name: Unique name of the transformation provider. This is the + * name of the provider of the transformation. This can be any + * arbitrary value, but in the usual case, this contains the + * name of the chip or provider and the name of the + * transformation algorithm. + * @cra_type: Type of the cryptographic transformation. This is a pointer to + * struct crypto_type, which implements callbacks common for all + * transformation types. There are multiple options: + * &crypto_blkcipher_type, &crypto_ablkcipher_type, + * &crypto_ahash_type, &crypto_rng_type. + * This field might be empty. In that case, there are no common + * callbacks. This is the case for: cipher, compress, shash. + * @cra_u: Callbacks implementing the transformation. This is a union of + * multiple structures. Depending on the type of transformation selected + * by @cra_type and @cra_flags above, the associated structure must be + * filled with callbacks. This field might be empty. This is the case + * for ahash, shash. + * @cra_init: Initialize the cryptographic transformation object. This function + * is used to initialize the cryptographic transformation object. + * This function is called only once at the instantiation time, right + * after the transformation context was allocated. In case the + * cryptographic hardware has some special requirements which need to + * be handled by software, this function shall check for the precise + * requirement of the transformation and put any software fallbacks + * in place. + * @cra_exit: Deinitialize the cryptographic transformation object. This is a + * counterpart to @cra_init, used to remove various changes set in + * @cra_init. + * @cra_u.ablkcipher: Union member which contains an asynchronous block cipher + * definition. See @struct @ablkcipher_alg. + * @cra_u.blkcipher: Union member which contains a synchronous block cipher + * definition See @struct @blkcipher_alg. + * @cra_u.cipher: Union member which contains a single-block symmetric cipher + * definition. See @struct @cipher_alg. + * @cra_u.compress: Union member which contains a (de)compression algorithm. + * See @struct @compress_alg. + * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE + * @cra_list: internally used + * @cra_users: internally used + * @cra_refcnt: internally used + * @cra_destroy: internally used + * + * The struct crypto_alg describes a generic Crypto API algorithm and is common + * for all of the transformations. Any variable not documented here shall not + * be used by a cipher implementation as it is internal to the Crypto API. + */ +struct crypto_alg { + struct list_head cra_list; + struct list_head cra_users; + + u32 cra_flags; + unsigned int cra_blocksize; + unsigned int cra_ctxsize; + unsigned int cra_alignmask; + + int cra_priority; + refcount_t cra_refcnt; + + char cra_name[CRYPTO_MAX_ALG_NAME]; + char cra_driver_name[CRYPTO_MAX_ALG_NAME]; + + const struct crypto_type *cra_type; + + union { + struct ablkcipher_alg ablkcipher; + struct blkcipher_alg blkcipher; + struct cipher_alg cipher; + struct compress_alg compress; + } cra_u; + + int (*cra_init)(struct crypto_tfm *tfm); + void (*cra_exit)(struct crypto_tfm *tfm); + void (*cra_destroy)(struct crypto_alg *alg); + + struct module *cra_module; +} CRYPTO_MINALIGN_ATTR; + +/* + * A helper struct for waiting for completion of async crypto ops + */ +struct crypto_wait { + struct completion completion; + int err; +}; + +/* + * Macro for declaring a crypto op async wait object on stack + */ +#define DECLARE_CRYPTO_WAIT(_wait) \ + struct crypto_wait _wait = { \ + COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 } + +/* + * Async ops completion helper functioons + */ +void crypto_req_done(struct crypto_async_request *req, int err); + +static inline int crypto_wait_req(int err, struct crypto_wait *wait) +{ + switch (err) { + case -EINPROGRESS: + case -EBUSY: + wait_for_completion(&wait->completion); + reinit_completion(&wait->completion); + err = wait->err; + break; + }; + + return err; +} + +static inline void crypto_init_wait(struct crypto_wait *wait) +{ + init_completion(&wait->completion); +} + +/* + * Algorithm registration interface. + */ +int crypto_register_alg(struct crypto_alg *alg); +int crypto_unregister_alg(struct crypto_alg *alg); +int crypto_register_algs(struct crypto_alg *algs, int count); +int crypto_unregister_algs(struct crypto_alg *algs, int count); + +/* + * Algorithm query interface. + */ +int crypto_has_alg(const char *name, u32 type, u32 mask); + +/* + * Transforms: user-instantiated objects which encapsulate algorithms + * and core processing logic. Managed via crypto_alloc_*() and + * crypto_free_*(), as well as the various helpers below. + */ + +struct ablkcipher_tfm { + int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct ablkcipher_request *req); + int (*decrypt)(struct ablkcipher_request *req); + + struct crypto_ablkcipher *base; + + unsigned int ivsize; + unsigned int reqsize; +}; + +struct blkcipher_tfm { + void *iv; + int (*setkey)(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes); + int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes); +}; + +struct cipher_tfm { + int (*cit_setkey)(struct crypto_tfm *tfm, + const u8 *key, unsigned int keylen); + void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); + void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); +}; + +struct compress_tfm { + int (*cot_compress)(struct crypto_tfm *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen); + int (*cot_decompress)(struct crypto_tfm *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen); +}; + +#define crt_ablkcipher crt_u.ablkcipher +#define crt_blkcipher crt_u.blkcipher +#define crt_cipher crt_u.cipher +#define crt_compress crt_u.compress + +struct crypto_tfm { + + u32 crt_flags; + + union { + struct ablkcipher_tfm ablkcipher; + struct blkcipher_tfm blkcipher; + struct cipher_tfm cipher; + struct compress_tfm compress; + } crt_u; + + void (*exit)(struct crypto_tfm *tfm); + + struct crypto_alg *__crt_alg; + + void *__crt_ctx[] CRYPTO_MINALIGN_ATTR; +}; + +struct crypto_ablkcipher { + struct crypto_tfm base; +}; + +struct crypto_blkcipher { + struct crypto_tfm base; +}; + +struct crypto_cipher { + struct crypto_tfm base; +}; + +struct crypto_comp { + struct crypto_tfm base; +}; + +enum { + CRYPTOA_UNSPEC, + CRYPTOA_ALG, + CRYPTOA_TYPE, + CRYPTOA_U32, + __CRYPTOA_MAX, +}; + +#define CRYPTOA_MAX (__CRYPTOA_MAX - 1) + +/* Maximum number of (rtattr) parameters for each template. */ +#define CRYPTO_MAX_ATTRS 32 + +struct crypto_attr_alg { + char name[CRYPTO_MAX_ALG_NAME]; +}; + +struct crypto_attr_type { + u32 type; + u32 mask; +}; + +struct crypto_attr_u32 { + u32 num; +}; + +/* + * Transform user interface. + */ + +struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask); +void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm); + +static inline void crypto_free_tfm(struct crypto_tfm *tfm) +{ + return crypto_destroy_tfm(tfm, tfm); +} + +int alg_test(const char *driver, const char *alg, u32 type, u32 mask); + +/* + * Transform helpers which query the underlying algorithm. + */ +static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_name; +} + +static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_driver_name; +} + +static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_priority; +} + +static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK; +} + +static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_blocksize; +} + +static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_alignmask; +} + +static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm) +{ + return tfm->crt_flags; +} + +static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags) +{ + tfm->crt_flags |= flags; +} + +static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags) +{ + tfm->crt_flags &= ~flags; +} + +static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm) +{ + return tfm->__crt_ctx; +} + +static inline unsigned int crypto_tfm_ctx_alignment(void) +{ + struct crypto_tfm *tfm; + return __alignof__(tfm->__crt_ctx); +} + +/* + * API wrappers. + */ +static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast( + struct crypto_tfm *tfm) +{ + return (struct crypto_ablkcipher *)tfm; +} + +static inline u32 crypto_skcipher_type(u32 type) +{ + type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); + type |= CRYPTO_ALG_TYPE_BLKCIPHER; + return type; +} + +static inline u32 crypto_skcipher_mask(u32 mask) +{ + mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); + mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK; + return mask; +} + +/** + * DOC: Asynchronous Block Cipher API + * + * Asynchronous block cipher API is used with the ciphers of type + * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto). + * + * Asynchronous cipher operations imply that the function invocation for a + * cipher request returns immediately before the completion of the operation. + * The cipher request is scheduled as a separate kernel thread and therefore + * load-balanced on the different CPUs via the process scheduler. To allow + * the kernel crypto API to inform the caller about the completion of a cipher + * request, the caller must provide a callback function. That function is + * invoked with the cipher handle when the request completes. + * + * To support the asynchronous operation, additional information than just the + * cipher handle must be supplied to the kernel crypto API. That additional + * information is given by filling in the ablkcipher_request data structure. + * + * For the asynchronous block cipher API, the state is maintained with the tfm + * cipher handle. A single tfm can be used across multiple calls and in + * parallel. For asynchronous block cipher calls, context data supplied and + * only used by the caller can be referenced the request data structure in + * addition to the IV used for the cipher request. The maintenance of such + * state information would be important for a crypto driver implementer to + * have, because when calling the callback function upon completion of the + * cipher operation, that callback function may need some information about + * which operation just finished if it invoked multiple in parallel. This + * state information is unused by the kernel crypto API. + */ + +static inline struct crypto_tfm *crypto_ablkcipher_tfm( + struct crypto_ablkcipher *tfm) +{ + return &tfm->base; +} + +/** + * crypto_free_ablkcipher() - zeroize and free cipher handle + * @tfm: cipher handle to be freed + */ +static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm) +{ + crypto_free_tfm(crypto_ablkcipher_tfm(tfm)); +} + +/** + * crypto_has_ablkcipher() - Search for the availability of an ablkcipher. + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * ablkcipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Return: true when the ablkcipher is known to the kernel crypto API; false + * otherwise + */ +static inline int crypto_has_ablkcipher(const char *alg_name, u32 type, + u32 mask) +{ + return crypto_has_alg(alg_name, crypto_skcipher_type(type), + crypto_skcipher_mask(mask)); +} + +static inline struct ablkcipher_tfm *crypto_ablkcipher_crt( + struct crypto_ablkcipher *tfm) +{ + return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher; +} + +/** + * crypto_ablkcipher_ivsize() - obtain IV size + * @tfm: cipher handle + * + * The size of the IV for the ablkcipher referenced by the cipher handle is + * returned. This IV size may be zero if the cipher does not need an IV. + * + * Return: IV size in bytes + */ +static inline unsigned int crypto_ablkcipher_ivsize( + struct crypto_ablkcipher *tfm) +{ + return crypto_ablkcipher_crt(tfm)->ivsize; +} + +/** + * crypto_ablkcipher_blocksize() - obtain block size of cipher + * @tfm: cipher handle + * + * The block size for the ablkcipher referenced with the cipher handle is + * returned. The caller may use that information to allocate appropriate + * memory for the data returned by the encryption or decryption operation + * + * Return: block size of cipher + */ +static inline unsigned int crypto_ablkcipher_blocksize( + struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm)); +} + +static inline unsigned int crypto_ablkcipher_alignmask( + struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm)); +} + +static inline u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm)); +} + +static inline void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm, + u32 flags) +{ + crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags); +} + +static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm, + u32 flags) +{ + crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags); +} + +/** + * crypto_ablkcipher_setkey() - set key for cipher + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the ablkcipher referenced by the cipher + * handle. + * + * Note, the key length determines the cipher type. Many block ciphers implement + * different cipher modes depending on the key size, such as AES-128 vs AES-192 + * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 + * is performed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, + const u8 *key, unsigned int keylen) +{ + struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm); + + return crt->setkey(crt->base, key, keylen); +} + +/** + * crypto_ablkcipher_reqtfm() - obtain cipher handle from request + * @req: ablkcipher_request out of which the cipher handle is to be obtained + * + * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request + * data structure. + * + * Return: crypto_ablkcipher handle + */ +static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( + struct ablkcipher_request *req) +{ + return __crypto_ablkcipher_cast(req->base.tfm); +} + +/** + * crypto_ablkcipher_encrypt() - encrypt plaintext + * @req: reference to the ablkcipher_request handle that holds all information + * needed to perform the cipher operation + * + * Encrypt plaintext data using the ablkcipher_request handle. That data + * structure and how it is filled with data is discussed with the + * ablkcipher_request_* functions. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ +static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) +{ + struct ablkcipher_tfm *crt = + crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); + return crt->encrypt(req); +} + +/** + * crypto_ablkcipher_decrypt() - decrypt ciphertext + * @req: reference to the ablkcipher_request handle that holds all information + * needed to perform the cipher operation + * + * Decrypt ciphertext data using the ablkcipher_request handle. That data + * structure and how it is filled with data is discussed with the + * ablkcipher_request_* functions. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ +static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) +{ + struct ablkcipher_tfm *crt = + crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); + return crt->decrypt(req); +} + +/** + * DOC: Asynchronous Cipher Request Handle + * + * The ablkcipher_request data structure contains all pointers to data + * required for the asynchronous cipher operation. This includes the cipher + * handle (which can be used by multiple ablkcipher_request instances), pointer + * to plaintext and ciphertext, asynchronous callback function, etc. It acts + * as a handle to the ablkcipher_request_* API calls in a similar way as + * ablkcipher handle to the crypto_ablkcipher_* API calls. + */ + +/** + * crypto_ablkcipher_reqsize() - obtain size of the request data structure + * @tfm: cipher handle + * + * Return: number of bytes + */ +static inline unsigned int crypto_ablkcipher_reqsize( + struct crypto_ablkcipher *tfm) +{ + return crypto_ablkcipher_crt(tfm)->reqsize; +} + +/** + * ablkcipher_request_set_tfm() - update cipher handle reference in request + * @req: request handle to be modified + * @tfm: cipher handle that shall be added to the request handle + * + * Allow the caller to replace the existing ablkcipher handle in the request + * data structure with a different one. + */ +static inline void ablkcipher_request_set_tfm( + struct ablkcipher_request *req, struct crypto_ablkcipher *tfm) +{ + req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base); +} + +static inline struct ablkcipher_request *ablkcipher_request_cast( + struct crypto_async_request *req) +{ + return container_of(req, struct ablkcipher_request, base); +} + +/** + * ablkcipher_request_alloc() - allocate request data structure + * @tfm: cipher handle to be registered with the request + * @gfp: memory allocation flag that is handed to kmalloc by the API call. + * + * Allocate the request data structure that must be used with the ablkcipher + * encrypt and decrypt API calls. During the allocation, the provided ablkcipher + * handle is registered in the request data structure. + * + * Return: allocated request handle in case of success, or NULL if out of memory + */ +static inline struct ablkcipher_request *ablkcipher_request_alloc( + struct crypto_ablkcipher *tfm, gfp_t gfp) +{ + struct ablkcipher_request *req; + + req = kmalloc(sizeof(struct ablkcipher_request) + + crypto_ablkcipher_reqsize(tfm), gfp); + + if (likely(req)) + ablkcipher_request_set_tfm(req, tfm); + + return req; +} + +/** + * ablkcipher_request_free() - zeroize and free request data structure + * @req: request data structure cipher handle to be freed + */ +static inline void ablkcipher_request_free(struct ablkcipher_request *req) +{ + kzfree(req); +} + +/** + * ablkcipher_request_set_callback() - set asynchronous callback function + * @req: request handle + * @flags: specify zero or an ORing of the flags + * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and + * increase the wait queue beyond the initial maximum size; + * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep + * @compl: callback function pointer to be registered with the request handle + * @data: The data pointer refers to memory that is not used by the kernel + * crypto API, but provided to the callback function for it to use. Here, + * the caller can provide a reference to memory the callback function can + * operate on. As the callback function is invoked asynchronously to the + * related functionality, it may need to access data structures of the + * related functionality which can be referenced using this pointer. The + * callback function can access the memory via the "data" field in the + * crypto_async_request data structure provided to the callback function. + * + * This function allows setting the callback function that is triggered once the + * cipher operation completes. + * + * The callback function is registered with the ablkcipher_request handle and + * must comply with the following template:: + * + * void callback_function(struct crypto_async_request *req, int error) + */ +static inline void ablkcipher_request_set_callback( + struct ablkcipher_request *req, + u32 flags, crypto_completion_t compl, void *data) +{ + req->base.complete = compl; + req->base.data = data; + req->base.flags = flags; +} + +/** + * ablkcipher_request_set_crypt() - set data buffers + * @req: request handle + * @src: source scatter / gather list + * @dst: destination scatter / gather list + * @nbytes: number of bytes to process from @src + * @iv: IV for the cipher operation which must comply with the IV size defined + * by crypto_ablkcipher_ivsize + * + * This function allows setting of the source data and destination data + * scatter / gather lists. + * + * For encryption, the source is treated as the plaintext and the + * destination is the ciphertext. For a decryption operation, the use is + * reversed - the source is the ciphertext and the destination is the plaintext. + */ +static inline void ablkcipher_request_set_crypt( + struct ablkcipher_request *req, + struct scatterlist *src, struct scatterlist *dst, + unsigned int nbytes, void *iv) +{ + req->src = src; + req->dst = dst; + req->nbytes = nbytes; + req->info = iv; +} + +/** + * DOC: Synchronous Block Cipher API + * + * The synchronous block cipher API is used with the ciphers of type + * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto) + * + * Synchronous calls, have a context in the tfm. But since a single tfm can be + * used in multiple calls and in parallel, this info should not be changeable + * (unless a lock is used). This applies, for example, to the symmetric key. + * However, the IV is changeable, so there is an iv field in blkcipher_tfm + * structure for synchronous blkcipher api. So, its the only state info that can + * be kept for synchronous calls without using a big lock across a tfm. + * + * The block cipher API allows the use of a complete cipher, i.e. a cipher + * consisting of a template (a block chaining mode) and a single block cipher + * primitive (e.g. AES). + * + * The plaintext data buffer and the ciphertext data buffer are pointed to + * by using scatter/gather lists. The cipher operation is performed + * on all segments of the provided scatter/gather lists. + * + * The kernel crypto API supports a cipher operation "in-place" which means that + * the caller may provide the same scatter/gather list for the plaintext and + * cipher text. After the completion of the cipher operation, the plaintext + * data is replaced with the ciphertext data in case of an encryption and vice + * versa for a decryption. The caller must ensure that the scatter/gather lists + * for the output data point to sufficiently large buffers, i.e. multiples of + * the block size of the cipher. + */ + +static inline struct crypto_blkcipher *__crypto_blkcipher_cast( + struct crypto_tfm *tfm) +{ + return (struct crypto_blkcipher *)tfm; +} + +static inline struct crypto_blkcipher *crypto_blkcipher_cast( + struct crypto_tfm *tfm) +{ + BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER); + return __crypto_blkcipher_cast(tfm); +} + +/** + * crypto_alloc_blkcipher() - allocate synchronous block cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * blkcipher cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for a block cipher. The returned struct + * crypto_blkcipher is the cipher handle that is required for any subsequent + * API invocation for that block cipher. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +static inline struct crypto_blkcipher *crypto_alloc_blkcipher( + const char *alg_name, u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_BLKCIPHER; + mask |= CRYPTO_ALG_TYPE_MASK; + + return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask)); +} + +static inline struct crypto_tfm *crypto_blkcipher_tfm( + struct crypto_blkcipher *tfm) +{ + return &tfm->base; +} + +/** + * crypto_free_blkcipher() - zeroize and free the block cipher handle + * @tfm: cipher handle to be freed + */ +static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm) +{ + crypto_free_tfm(crypto_blkcipher_tfm(tfm)); +} + +/** + * crypto_has_blkcipher() - Search for the availability of a block cipher + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * block cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Return: true when the block cipher is known to the kernel crypto API; false + * otherwise + */ +static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_BLKCIPHER; + mask |= CRYPTO_ALG_TYPE_MASK; + + return crypto_has_alg(alg_name, type, mask); +} + +/** + * crypto_blkcipher_name() - return the name / cra_name from the cipher handle + * @tfm: cipher handle + * + * Return: The character string holding the name of the cipher + */ +static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm) +{ + return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm)); +} + +static inline struct blkcipher_tfm *crypto_blkcipher_crt( + struct crypto_blkcipher *tfm) +{ + return &crypto_blkcipher_tfm(tfm)->crt_blkcipher; +} + +static inline struct blkcipher_alg *crypto_blkcipher_alg( + struct crypto_blkcipher *tfm) +{ + return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher; +} + +/** + * crypto_blkcipher_ivsize() - obtain IV size + * @tfm: cipher handle + * + * The size of the IV for the block cipher referenced by the cipher handle is + * returned. This IV size may be zero if the cipher does not need an IV. + * + * Return: IV size in bytes + */ +static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm) +{ + return crypto_blkcipher_alg(tfm)->ivsize; +} + +/** + * crypto_blkcipher_blocksize() - obtain block size of cipher + * @tfm: cipher handle + * + * The block size for the block cipher referenced with the cipher handle is + * returned. The caller may use that information to allocate appropriate + * memory for the data returned by the encryption or decryption operation. + * + * Return: block size of cipher + */ +static inline unsigned int crypto_blkcipher_blocksize( + struct crypto_blkcipher *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm)); +} + +static inline unsigned int crypto_blkcipher_alignmask( + struct crypto_blkcipher *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm)); +} + +static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm) +{ + return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm)); +} + +static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm, + u32 flags) +{ + crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags); +} + +static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm, + u32 flags) +{ + crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags); +} + +/** + * crypto_blkcipher_setkey() - set key for cipher + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the block cipher referenced by the cipher + * handle. + * + * Note, the key length determines the cipher type. Many block ciphers implement + * different cipher modes depending on the key size, such as AES-128 vs AES-192 + * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 + * is performed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm, + const u8 *key, unsigned int keylen) +{ + return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm), + key, keylen); +} + +/** + * crypto_blkcipher_encrypt() - encrypt plaintext + * @desc: reference to the block cipher handle with meta data + * @dst: scatter/gather list that is filled by the cipher operation with the + * ciphertext + * @src: scatter/gather list that holds the plaintext + * @nbytes: number of bytes of the plaintext to encrypt. + * + * Encrypt plaintext data using the IV set by the caller with a preceding + * call of crypto_blkcipher_set_iv. + * + * The blkcipher_desc data structure must be filled by the caller and can + * reside on the stack. The caller must fill desc as follows: desc.tfm is filled + * with the block cipher handle; desc.flags is filled with either + * CRYPTO_TFM_REQ_MAY_SLEEP or 0. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ +static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + desc->info = crypto_blkcipher_crt(desc->tfm)->iv; + return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); +} + +/** + * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV + * @desc: reference to the block cipher handle with meta data + * @dst: scatter/gather list that is filled by the cipher operation with the + * ciphertext + * @src: scatter/gather list that holds the plaintext + * @nbytes: number of bytes of the plaintext to encrypt. + * + * Encrypt plaintext data with the use of an IV that is solely used for this + * cipher operation. Any previously set IV is not used. + * + * The blkcipher_desc data structure must be filled by the caller and can + * reside on the stack. The caller must fill desc as follows: desc.tfm is filled + * with the block cipher handle; desc.info is filled with the IV to be used for + * the current operation; desc.flags is filled with either + * CRYPTO_TFM_REQ_MAY_SLEEP or 0. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ +static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); +} + +/** + * crypto_blkcipher_decrypt() - decrypt ciphertext + * @desc: reference to the block cipher handle with meta data + * @dst: scatter/gather list that is filled by the cipher operation with the + * plaintext + * @src: scatter/gather list that holds the ciphertext + * @nbytes: number of bytes of the ciphertext to decrypt. + * + * Decrypt ciphertext data using the IV set by the caller with a preceding + * call of crypto_blkcipher_set_iv. + * + * The blkcipher_desc data structure must be filled by the caller as documented + * for the crypto_blkcipher_encrypt call above. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + * + */ +static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + desc->info = crypto_blkcipher_crt(desc->tfm)->iv; + return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); +} + +/** + * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV + * @desc: reference to the block cipher handle with meta data + * @dst: scatter/gather list that is filled by the cipher operation with the + * plaintext + * @src: scatter/gather list that holds the ciphertext + * @nbytes: number of bytes of the ciphertext to decrypt. + * + * Decrypt ciphertext data with the use of an IV that is solely used for this + * cipher operation. Any previously set IV is not used. + * + * The blkcipher_desc data structure must be filled by the caller as documented + * for the crypto_blkcipher_encrypt_iv call above. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ +static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); +} + +/** + * crypto_blkcipher_set_iv() - set IV for cipher + * @tfm: cipher handle + * @src: buffer holding the IV + * @len: length of the IV in bytes + * + * The caller provided IV is set for the block cipher referenced by the cipher + * handle. + */ +static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm, + const u8 *src, unsigned int len) +{ + memcpy(crypto_blkcipher_crt(tfm)->iv, src, len); +} + +/** + * crypto_blkcipher_get_iv() - obtain IV from cipher + * @tfm: cipher handle + * @dst: buffer filled with the IV + * @len: length of the buffer dst + * + * The caller can obtain the IV set for the block cipher referenced by the + * cipher handle and store it into the user-provided buffer. If the buffer + * has an insufficient space, the IV is truncated to fit the buffer. + */ +static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm, + u8 *dst, unsigned int len) +{ + memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len); +} + +/** + * DOC: Single Block Cipher API + * + * The single block cipher API is used with the ciphers of type + * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto). + * + * Using the single block cipher API calls, operations with the basic cipher + * primitive can be implemented. These cipher primitives exclude any block + * chaining operations including IV handling. + * + * The purpose of this single block cipher API is to support the implementation + * of templates or other concepts that only need to perform the cipher operation + * on one block at a time. Templates invoke the underlying cipher primitive + * block-wise and process either the input or the output data of these cipher + * operations. + */ + +static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) +{ + return (struct crypto_cipher *)tfm; +} + +static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm) +{ + BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); + return __crypto_cipher_cast(tfm); +} + +/** + * crypto_alloc_cipher() - allocate single block cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * single block cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for a single block cipher. The returned struct + * crypto_cipher is the cipher handle that is required for any subsequent API + * invocation for that single block cipher. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, + u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_CIPHER; + mask |= CRYPTO_ALG_TYPE_MASK; + + return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask)); +} + +static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm) +{ + return &tfm->base; +} + +/** + * crypto_free_cipher() - zeroize and free the single block cipher handle + * @tfm: cipher handle to be freed + */ +static inline void crypto_free_cipher(struct crypto_cipher *tfm) +{ + crypto_free_tfm(crypto_cipher_tfm(tfm)); +} + +/** + * crypto_has_cipher() - Search for the availability of a single block cipher + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * single block cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Return: true when the single block cipher is known to the kernel crypto API; + * false otherwise + */ +static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_CIPHER; + mask |= CRYPTO_ALG_TYPE_MASK; + + return crypto_has_alg(alg_name, type, mask); +} + +static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm) +{ + return &crypto_cipher_tfm(tfm)->crt_cipher; +} + +/** + * crypto_cipher_blocksize() - obtain block size for cipher + * @tfm: cipher handle + * + * The block size for the single block cipher referenced with the cipher handle + * tfm is returned. The caller may use that information to allocate appropriate + * memory for the data returned by the encryption or decryption operation + * + * Return: block size of cipher + */ +static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); +} + +static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm)); +} + +static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm) +{ + return crypto_tfm_get_flags(crypto_cipher_tfm(tfm)); +} + +static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm, + u32 flags) +{ + crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags); +} + +static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm, + u32 flags) +{ + crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); +} + +/** + * crypto_cipher_setkey() - set key for cipher + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the single block cipher referenced by the + * cipher handle. + * + * Note, the key length determines the cipher type. Many block ciphers implement + * different cipher modes depending on the key size, such as AES-128 vs AES-192 + * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 + * is performed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, + const u8 *key, unsigned int keylen) +{ + return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm), + key, keylen); +} + +/** + * crypto_cipher_encrypt_one() - encrypt one block of plaintext + * @tfm: cipher handle + * @dst: points to the buffer that will be filled with the ciphertext + * @src: buffer holding the plaintext to be encrypted + * + * Invoke the encryption operation of one block. The caller must ensure that + * the plaintext and ciphertext buffers are at least one block in size. + */ +static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, + u8 *dst, const u8 *src) +{ + crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm), + dst, src); +} + +/** + * crypto_cipher_decrypt_one() - decrypt one block of ciphertext + * @tfm: cipher handle + * @dst: points to the buffer that will be filled with the plaintext + * @src: buffer holding the ciphertext to be decrypted + * + * Invoke the decryption operation of one block. The caller must ensure that + * the plaintext and ciphertext buffers are at least one block in size. + */ +static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, + u8 *dst, const u8 *src) +{ + crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm), + dst, src); +} + +static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm) +{ + return (struct crypto_comp *)tfm; +} + +static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm) +{ + BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) & + CRYPTO_ALG_TYPE_MASK); + return __crypto_comp_cast(tfm); +} + +static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name, + u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_COMPRESS; + mask |= CRYPTO_ALG_TYPE_MASK; + + return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask)); +} + +static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm) +{ + return &tfm->base; +} + +static inline void crypto_free_comp(struct crypto_comp *tfm) +{ + crypto_free_tfm(crypto_comp_tfm(tfm)); +} + +static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_COMPRESS; + mask |= CRYPTO_ALG_TYPE_MASK; + + return crypto_has_alg(alg_name, type, mask); +} + +static inline const char *crypto_comp_name(struct crypto_comp *tfm) +{ + return crypto_tfm_alg_name(crypto_comp_tfm(tfm)); +} + +static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm) +{ + return &crypto_comp_tfm(tfm)->crt_compress; +} + +static inline int crypto_comp_compress(struct crypto_comp *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen) +{ + return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm), + src, slen, dst, dlen); +} + +static inline int crypto_comp_decompress(struct crypto_comp *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen) +{ + return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm), + src, slen, dst, dlen); +} + +#endif /* _LINUX_CRYPTO_H */ + diff --git a/include/linux/cryptohash.h b/include/linux/cryptohash.h new file mode 100644 index 000000000..f6ba4c3e6 --- /dev/null +++ b/include/linux/cryptohash.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __CRYPTOHASH_H +#define __CRYPTOHASH_H + +#include + +#define SHA_DIGEST_WORDS 5 +#define SHA_MESSAGE_BYTES (512 /*bits*/ / 8) +#define SHA_WORKSPACE_WORDS 16 + +void sha_init(__u32 *buf); +void sha_transform(__u32 *digest, const char *data, __u32 *W); + +#endif diff --git a/include/linux/cs5535.h b/include/linux/cs5535.h new file mode 100644 index 000000000..cfe83239d --- /dev/null +++ b/include/linux/cs5535.h @@ -0,0 +1,239 @@ +/* + * AMD CS5535/CS5536 definitions + * Copyright (C) 2006 Advanced Micro Devices, Inc. + * Copyright (C) 2009 Andres Salomon + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#ifndef _CS5535_H +#define _CS5535_H + +#include + +/* MSRs */ +#define MSR_GLIU_P2D_RO0 0x10000029 + +#define MSR_LX_GLD_MSR_CONFIG 0x48002001 +#define MSR_LX_MSR_PADSEL 0x48002011 /* NOT 0x48000011; the data + * sheet has the wrong value */ +#define MSR_GLCP_SYS_RSTPLL 0x4C000014 +#define MSR_GLCP_DOTPLL 0x4C000015 + +#define MSR_LBAR_SMB 0x5140000B +#define MSR_LBAR_GPIO 0x5140000C +#define MSR_LBAR_MFGPT 0x5140000D +#define MSR_LBAR_ACPI 0x5140000E +#define MSR_LBAR_PMS 0x5140000F + +#define MSR_DIVIL_SOFT_RESET 0x51400017 + +#define MSR_PIC_YSEL_LOW 0x51400020 +#define MSR_PIC_YSEL_HIGH 0x51400021 +#define MSR_PIC_ZSEL_LOW 0x51400022 +#define MSR_PIC_ZSEL_HIGH 0x51400023 +#define MSR_PIC_IRQM_LPC 0x51400025 + +#define MSR_MFGPT_IRQ 0x51400028 +#define MSR_MFGPT_NR 0x51400029 +#define MSR_MFGPT_SETUP 0x5140002B + +#define MSR_RTC_DOMA_OFFSET 0x51400055 +#define MSR_RTC_MONA_OFFSET 0x51400056 +#define MSR_RTC_CEN_OFFSET 0x51400057 + +#define MSR_LX_SPARE_MSR 0x80000011 /* DC-specific */ + +#define MSR_GX_GLD_MSR_CONFIG 0xC0002001 +#define MSR_GX_MSR_PADSEL 0xC0002011 + +static inline int cs5535_pic_unreqz_select_high(unsigned int group, + unsigned int irq) +{ + uint32_t lo, hi; + + rdmsr(MSR_PIC_ZSEL_HIGH, lo, hi); + lo &= ~(0xF << (group * 4)); + lo |= (irq & 0xF) << (group * 4); + wrmsr(MSR_PIC_ZSEL_HIGH, lo, hi); + return 0; +} + +/* PIC registers */ +#define CS5536_PIC_INT_SEL1 0x4d0 +#define CS5536_PIC_INT_SEL2 0x4d1 + +/* resource sizes */ +#define LBAR_GPIO_SIZE 0xFF +#define LBAR_MFGPT_SIZE 0x40 +#define LBAR_ACPI_SIZE 0x40 +#define LBAR_PMS_SIZE 0x80 + +/* + * PMC registers (PMS block) + * It is only safe to access these registers as dword accesses. + * See CS5536 Specification Update erratas 17 & 18 + */ +#define CS5536_PM_SCLK 0x10 +#define CS5536_PM_IN_SLPCTL 0x20 +#define CS5536_PM_WKXD 0x34 +#define CS5536_PM_WKD 0x30 +#define CS5536_PM_SSC 0x54 + +/* + * PM registers (ACPI block) + * It is only safe to access these registers as dword accesses. + * See CS5536 Specification Update erratas 17 & 18 + */ +#define CS5536_PM1_STS 0x00 +#define CS5536_PM1_EN 0x02 +#define CS5536_PM1_CNT 0x08 +#define CS5536_PM_GPE0_STS 0x18 +#define CS5536_PM_GPE0_EN 0x1c + +/* CS5536_PM1_STS bits */ +#define CS5536_WAK_FLAG (1 << 15) +#define CS5536_RTC_FLAG (1 << 10) +#define CS5536_PWRBTN_FLAG (1 << 8) + +/* CS5536_PM1_EN bits */ +#define CS5536_PM_PWRBTN (1 << 8) +#define CS5536_PM_RTC (1 << 10) + +/* CS5536_PM_GPE0_STS bits */ +#define CS5536_GPIOM7_PME_FLAG (1 << 31) +#define CS5536_GPIOM6_PME_FLAG (1 << 30) + +/* CS5536_PM_GPE0_EN bits */ +#define CS5536_GPIOM7_PME_EN (1 << 31) +#define CS5536_GPIOM6_PME_EN (1 << 30) + +/* VSA2 magic values */ +#define VSA_VRC_INDEX 0xAC1C +#define VSA_VRC_DATA 0xAC1E +#define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */ +#define VSA_VR_SIGNATURE 0x0003 +#define VSA_VR_MEM_SIZE 0x0200 +#define AMD_VSA_SIG 0x4132 /* signature is ascii 'VSA2' */ +#define GSW_VSA_SIG 0x534d /* General Software signature */ + +#include + +static inline int cs5535_has_vsa2(void) +{ + static int has_vsa2 = -1; + + if (has_vsa2 == -1) { + uint16_t val; + + /* + * The VSA has virtual registers that we can query for a + * signature. + */ + outw(VSA_VR_UNLOCK, VSA_VRC_INDEX); + outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX); + + val = inw(VSA_VRC_DATA); + has_vsa2 = (val == AMD_VSA_SIG || val == GSW_VSA_SIG); + } + + return has_vsa2; +} + +/* GPIOs */ +#define GPIO_OUTPUT_VAL 0x00 +#define GPIO_OUTPUT_ENABLE 0x04 +#define GPIO_OUTPUT_OPEN_DRAIN 0x08 +#define GPIO_OUTPUT_INVERT 0x0C +#define GPIO_OUTPUT_AUX1 0x10 +#define GPIO_OUTPUT_AUX2 0x14 +#define GPIO_PULL_UP 0x18 +#define GPIO_PULL_DOWN 0x1C +#define GPIO_INPUT_ENABLE 0x20 +#define GPIO_INPUT_INVERT 0x24 +#define GPIO_INPUT_FILTER 0x28 +#define GPIO_INPUT_EVENT_COUNT 0x2C +#define GPIO_READ_BACK 0x30 +#define GPIO_INPUT_AUX1 0x34 +#define GPIO_EVENTS_ENABLE 0x38 +#define GPIO_LOCK_ENABLE 0x3C +#define GPIO_POSITIVE_EDGE_EN 0x40 +#define GPIO_NEGATIVE_EDGE_EN 0x44 +#define GPIO_POSITIVE_EDGE_STS 0x48 +#define GPIO_NEGATIVE_EDGE_STS 0x4C + +#define GPIO_FLTR7_AMOUNT 0xD8 + +#define GPIO_MAP_X 0xE0 +#define GPIO_MAP_Y 0xE4 +#define GPIO_MAP_Z 0xE8 +#define GPIO_MAP_W 0xEC + +#define GPIO_FE7_SEL 0xF7 + +void cs5535_gpio_set(unsigned offset, unsigned int reg); +void cs5535_gpio_clear(unsigned offset, unsigned int reg); +int cs5535_gpio_isset(unsigned offset, unsigned int reg); +int cs5535_gpio_set_irq(unsigned group, unsigned irq); +void cs5535_gpio_setup_event(unsigned offset, int pair, int pme); + +/* MFGPTs */ + +#define MFGPT_MAX_TIMERS 8 +#define MFGPT_TIMER_ANY (-1) + +#define MFGPT_DOMAIN_WORKING 1 +#define MFGPT_DOMAIN_STANDBY 2 +#define MFGPT_DOMAIN_ANY (MFGPT_DOMAIN_WORKING | MFGPT_DOMAIN_STANDBY) + +#define MFGPT_CMP1 0 +#define MFGPT_CMP2 1 + +#define MFGPT_EVENT_IRQ 0 +#define MFGPT_EVENT_NMI 1 +#define MFGPT_EVENT_RESET 3 + +#define MFGPT_REG_CMP1 0 +#define MFGPT_REG_CMP2 2 +#define MFGPT_REG_COUNTER 4 +#define MFGPT_REG_SETUP 6 + +#define MFGPT_SETUP_CNTEN (1 << 15) +#define MFGPT_SETUP_CMP2 (1 << 14) +#define MFGPT_SETUP_CMP1 (1 << 13) +#define MFGPT_SETUP_SETUP (1 << 12) +#define MFGPT_SETUP_STOPEN (1 << 11) +#define MFGPT_SETUP_EXTEN (1 << 10) +#define MFGPT_SETUP_REVEN (1 << 5) +#define MFGPT_SETUP_CLKSEL (1 << 4) + +struct cs5535_mfgpt_timer; + +extern uint16_t cs5535_mfgpt_read(struct cs5535_mfgpt_timer *timer, + uint16_t reg); +extern void cs5535_mfgpt_write(struct cs5535_mfgpt_timer *timer, uint16_t reg, + uint16_t value); + +extern int cs5535_mfgpt_toggle_event(struct cs5535_mfgpt_timer *timer, int cmp, + int event, int enable); +extern int cs5535_mfgpt_set_irq(struct cs5535_mfgpt_timer *timer, int cmp, + int *irq, int enable); +extern struct cs5535_mfgpt_timer *cs5535_mfgpt_alloc_timer(int timer, + int domain); +extern void cs5535_mfgpt_free_timer(struct cs5535_mfgpt_timer *timer); + +static inline int cs5535_mfgpt_setup_irq(struct cs5535_mfgpt_timer *timer, + int cmp, int *irq) +{ + return cs5535_mfgpt_set_irq(timer, cmp, irq, 1); +} + +static inline int cs5535_mfgpt_release_irq(struct cs5535_mfgpt_timer *timer, + int cmp, int *irq) +{ + return cs5535_mfgpt_set_irq(timer, cmp, irq, 0); +} + +#endif diff --git a/include/linux/ctype.h b/include/linux/ctype.h new file mode 100644 index 000000000..363b00442 --- /dev/null +++ b/include/linux/ctype.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CTYPE_H +#define _LINUX_CTYPE_H + +/* + * NOTE! This ctype does not handle EOF like the standard C + * library is required to. + */ + +#define _U 0x01 /* upper */ +#define _L 0x02 /* lower */ +#define _D 0x04 /* digit */ +#define _C 0x08 /* cntrl */ +#define _P 0x10 /* punct */ +#define _S 0x20 /* white space (space/lf/tab) */ +#define _X 0x40 /* hex digit */ +#define _SP 0x80 /* hard space (0x20) */ + +extern const unsigned char _ctype[]; + +#define __ismask(x) (_ctype[(int)(unsigned char)(x)]) + +#define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0) +#define isalpha(c) ((__ismask(c)&(_U|_L)) != 0) +#define iscntrl(c) ((__ismask(c)&(_C)) != 0) +static inline int isdigit(int c) +{ + return '0' <= c && c <= '9'; +} +#define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0) +#define islower(c) ((__ismask(c)&(_L)) != 0) +#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0) +#define ispunct(c) ((__ismask(c)&(_P)) != 0) +/* Note: isspace() must return false for %NUL-terminator */ +#define isspace(c) ((__ismask(c)&(_S)) != 0) +#define isupper(c) ((__ismask(c)&(_U)) != 0) +#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0) + +#define isascii(c) (((unsigned char)(c))<=0x7f) +#define toascii(c) (((unsigned char)(c))&0x7f) + +static inline unsigned char __tolower(unsigned char c) +{ + if (isupper(c)) + c -= 'A'-'a'; + return c; +} + +static inline unsigned char __toupper(unsigned char c) +{ + if (islower(c)) + c -= 'a'-'A'; + return c; +} + +#define tolower(c) __tolower(c) +#define toupper(c) __toupper(c) + +/* + * Fast implementation of tolower() for internal usage. Do not use in your + * code. + */ +static inline char _tolower(const char c) +{ + return c | 0x20; +} + +/* Fast check for octal digit */ +static inline int isodigit(const char c) +{ + return c >= '0' && c <= '7'; +} + +#endif diff --git a/include/linux/cuda.h b/include/linux/cuda.h new file mode 100644 index 000000000..056867f09 --- /dev/null +++ b/include/linux/cuda.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Definitions for talking to the CUDA. The CUDA is a microcontroller + * which controls the ADB, system power, RTC, and various other things. + * + * Copyright (C) 1996 Paul Mackerras. + */ +#ifndef _LINUX_CUDA_H +#define _LINUX_CUDA_H + +#include + + +extern int find_via_cuda(void); +extern int cuda_request(struct adb_request *req, + void (*done)(struct adb_request *), int nbytes, ...); +extern void cuda_poll(void); + +#endif /* _LINUX_CUDA_H */ diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h new file mode 100644 index 000000000..05ee0f194 --- /dev/null +++ b/include/linux/cyclades.h @@ -0,0 +1,364 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* $Revision: 3.0 $$Date: 1998/11/02 14:20:59 $ + * linux/include/linux/cyclades.h + * + * This file was initially written by + * Randolph Bentson and is maintained by + * Ivan Passos . + * + * This file contains the general definitions for the cyclades.c driver + *$Log: cyclades.h,v $ + *Revision 3.1 2002/01/29 11:36:16 henrique + *added throttle field on struct cyclades_port to indicate whether the + *port is throttled or not + * + *Revision 3.1 2000/04/19 18:52:52 ivan + *converted address fields to unsigned long and added fields for physical + *addresses on cyclades_card structure; + * + *Revision 3.0 1998/11/02 14:20:59 ivan + *added nports field on cyclades_card structure; + * + *Revision 2.5 1998/08/03 16:57:01 ivan + *added cyclades_idle_stats structure; + * + *Revision 2.4 1998/06/01 12:09:53 ivan + *removed closing_wait2 from cyclades_port structure; + * + *Revision 2.3 1998/03/16 18:01:12 ivan + *changes in the cyclades_port structure to get it closer to the + *standard serial port structure; + *added constants for new ioctls; + * + *Revision 2.2 1998/02/17 16:50:00 ivan + *changes in the cyclades_port structure (addition of shutdown_wait and + *chip_rev variables); + *added constants for new ioctls and for CD1400 rev. numbers. + * + *Revision 2.1 1997/10/24 16:03:00 ivan + *added rflow (which allows enabling the CD1400 special flow control + *feature) and rtsdtr_inv (which allows DTR/RTS pin inversion) to + *cyclades_port structure; + *added Alpha support + * + *Revision 2.0 1997/06/30 10:30:00 ivan + *added some new doorbell command constants related to IOCTLW and + *UART error signaling + * + *Revision 1.8 1997/06/03 15:30:00 ivan + *added constant ZFIRM_HLT + *added constant CyPCI_Ze_win ( = 2 * Cy_PCI_Zwin) + * + *Revision 1.7 1997/03/26 10:30:00 daniel + *new entries at the end of cyclades_port struct to reallocate + *variables illegally allocated within card memory. + * + *Revision 1.6 1996/09/09 18:35:30 bentson + *fold in changes for Cyclom-Z -- including structures for + *communicating with board as well modest changes to original + *structures to support new features. + * + *Revision 1.5 1995/11/13 21:13:31 bentson + *changes suggested by Michael Chastain + *to support use of this file in non-kernel applications + * + * + */ +#ifndef _LINUX_CYCLADES_H +#define _LINUX_CYCLADES_H + +#include + + +/* Per card data structure */ +struct cyclades_card { + void __iomem *base_addr; + union { + void __iomem *p9050; + struct RUNTIME_9060 __iomem *p9060; + } ctl_addr; + struct BOARD_CTRL __iomem *board_ctrl; /* cyz specific */ + int irq; + unsigned int num_chips; /* 0 if card absent, -1 if Z/PCI, else Y */ + unsigned int first_line; /* minor number of first channel on card */ + unsigned int nports; /* Number of ports in the card */ + int bus_index; /* address shift - 0 for ISA, 1 for PCI */ + int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */ + u32 hw_ver; + spinlock_t card_lock; + struct cyclades_port *ports; +}; + +/*************************************** + * Memory access functions/macros * + * (required to support Alpha systems) * + ***************************************/ + +#define cy_writeb(port,val) do { writeb((val), (port)); mb(); } while (0) +#define cy_writew(port,val) do { writew((val), (port)); mb(); } while (0) +#define cy_writel(port,val) do { writel((val), (port)); mb(); } while (0) + +/* + * Statistics counters + */ +struct cyclades_icount { + __u32 cts, dsr, rng, dcd, tx, rx; + __u32 frame, parity, overrun, brk; + __u32 buf_overrun; +}; + +/* + * This is our internal structure for each serial port's state. + * + * Many fields are paralleled by the structure used by the serial_struct + * structure. + * + * For definitions of the flags field, see tty.h + */ + +struct cyclades_port { + int magic; + struct tty_port port; + struct cyclades_card *card; + union { + struct { + void __iomem *base_addr; + } cyy; + struct { + struct CH_CTRL __iomem *ch_ctrl; + struct BUF_CTRL __iomem *buf_ctrl; + } cyz; + } u; + int line; + int flags; /* defined in tty.h */ + int type; /* UART type */ + int read_status_mask; + int ignore_status_mask; + int timeout; + int xmit_fifo_size; + int cor1,cor2,cor3,cor4,cor5; + int tbpr,tco,rbpr,rco; + int baud; + int rflow; + int rtsdtr_inv; + int chip_rev; + int custom_divisor; + u8 x_char; /* to be pushed out ASAP */ + int breakon; + int breakoff; + int xmit_head; + int xmit_tail; + int xmit_cnt; + int default_threshold; + int default_timeout; + unsigned long rflush_count; + struct cyclades_monitor mon; + struct cyclades_idle_stats idle_stats; + struct cyclades_icount icount; + struct completion shutdown_wait; + int throttle; +#ifdef CONFIG_CYZ_INTR + struct timer_list rx_full_timer; +#endif +}; + +#define CLOSING_WAIT_DELAY 30*HZ +#define CY_CLOSING_WAIT_NONE ASYNC_CLOSING_WAIT_NONE +#define CY_CLOSING_WAIT_INF ASYNC_CLOSING_WAIT_INF + + +#define CyMAX_CHIPS_PER_CARD 8 +#define CyMAX_CHAR_FIFO 12 +#define CyPORTS_PER_CHIP 4 +#define CD1400_MAX_SPEED 115200 + +#define CyISA_Ywin 0x2000 + +#define CyPCI_Ywin 0x4000 +#define CyPCI_Yctl 0x80 +#define CyPCI_Zctl CTRL_WINDOW_SIZE +#define CyPCI_Zwin 0x80000 +#define CyPCI_Ze_win (2 * CyPCI_Zwin) + +#define PCI_DEVICE_ID_MASK 0x06 + +/**** CD1400 registers ****/ + +#define CD1400_REV_G 0x46 +#define CD1400_REV_J 0x48 + +#define CyRegSize 0x0400 +#define Cy_HwReset 0x1400 +#define Cy_ClrIntr 0x1800 +#define Cy_EpldRev 0x1e00 + +/* Global Registers */ + +#define CyGFRCR (0x40*2) +#define CyRevE (44) +#define CyCAR (0x68*2) +#define CyCHAN_0 (0x00) +#define CyCHAN_1 (0x01) +#define CyCHAN_2 (0x02) +#define CyCHAN_3 (0x03) +#define CyGCR (0x4B*2) +#define CyCH0_SERIAL (0x00) +#define CyCH0_PARALLEL (0x80) +#define CySVRR (0x67*2) +#define CySRModem (0x04) +#define CySRTransmit (0x02) +#define CySRReceive (0x01) +#define CyRICR (0x44*2) +#define CyTICR (0x45*2) +#define CyMICR (0x46*2) +#define CyICR0 (0x00) +#define CyICR1 (0x01) +#define CyICR2 (0x02) +#define CyICR3 (0x03) +#define CyRIR (0x6B*2) +#define CyTIR (0x6A*2) +#define CyMIR (0x69*2) +#define CyIRDirEq (0x80) +#define CyIRBusy (0x40) +#define CyIRUnfair (0x20) +#define CyIRContext (0x1C) +#define CyIRChannel (0x03) +#define CyPPR (0x7E*2) +#define CyCLOCK_20_1MS (0x27) +#define CyCLOCK_25_1MS (0x31) +#define CyCLOCK_25_5MS (0xf4) +#define CyCLOCK_60_1MS (0x75) +#define CyCLOCK_60_2MS (0xea) + +/* Virtual Registers */ + +#define CyRIVR (0x43*2) +#define CyTIVR (0x42*2) +#define CyMIVR (0x41*2) +#define CyIVRMask (0x07) +#define CyIVRRxEx (0x07) +#define CyIVRRxOK (0x03) +#define CyIVRTxOK (0x02) +#define CyIVRMdmOK (0x01) +#define CyTDR (0x63*2) +#define CyRDSR (0x62*2) +#define CyTIMEOUT (0x80) +#define CySPECHAR (0x70) +#define CyBREAK (0x08) +#define CyPARITY (0x04) +#define CyFRAME (0x02) +#define CyOVERRUN (0x01) +#define CyMISR (0x4C*2) +/* see CyMCOR_ and CyMSVR_ for bits*/ +#define CyEOSRR (0x60*2) + +/* Channel Registers */ + +#define CyLIVR (0x18*2) +#define CyMscsr (0x01) +#define CyTdsr (0x02) +#define CyRgdsr (0x03) +#define CyRedsr (0x07) +#define CyCCR (0x05*2) +/* Format 1 */ +#define CyCHAN_RESET (0x80) +#define CyCHIP_RESET (0x81) +#define CyFlushTransFIFO (0x82) +/* Format 2 */ +#define CyCOR_CHANGE (0x40) +#define CyCOR1ch (0x02) +#define CyCOR2ch (0x04) +#define CyCOR3ch (0x08) +/* Format 3 */ +#define CySEND_SPEC_1 (0x21) +#define CySEND_SPEC_2 (0x22) +#define CySEND_SPEC_3 (0x23) +#define CySEND_SPEC_4 (0x24) +/* Format 4 */ +#define CyCHAN_CTL (0x10) +#define CyDIS_RCVR (0x01) +#define CyENB_RCVR (0x02) +#define CyDIS_XMTR (0x04) +#define CyENB_XMTR (0x08) +#define CySRER (0x06*2) +#define CyMdmCh (0x80) +#define CyRxData (0x10) +#define CyTxRdy (0x04) +#define CyTxMpty (0x02) +#define CyNNDT (0x01) +#define CyCOR1 (0x08*2) +#define CyPARITY_NONE (0x00) +#define CyPARITY_0 (0x20) +#define CyPARITY_1 (0xA0) +#define CyPARITY_E (0x40) +#define CyPARITY_O (0xC0) +#define Cy_1_STOP (0x00) +#define Cy_1_5_STOP (0x04) +#define Cy_2_STOP (0x08) +#define Cy_5_BITS (0x00) +#define Cy_6_BITS (0x01) +#define Cy_7_BITS (0x02) +#define Cy_8_BITS (0x03) +#define CyCOR2 (0x09*2) +#define CyIXM (0x80) +#define CyTxIBE (0x40) +#define CyETC (0x20) +#define CyAUTO_TXFL (0x60) +#define CyLLM (0x10) +#define CyRLM (0x08) +#define CyRtsAO (0x04) +#define CyCtsAE (0x02) +#define CyDsrAE (0x01) +#define CyCOR3 (0x0A*2) +#define CySPL_CH_DRANGE (0x80) /* special character detect range */ +#define CySPL_CH_DET1 (0x40) /* enable special character detection + on SCHR4-SCHR3 */ +#define CyFL_CTRL_TRNSP (0x20) /* Flow Control Transparency */ +#define CySPL_CH_DET2 (0x10) /* Enable special character detection + on SCHR2-SCHR1 */ +#define CyREC_FIFO (0x0F) /* Receive FIFO threshold */ +#define CyCOR4 (0x1E*2) +#define CyCOR5 (0x1F*2) +#define CyCCSR (0x0B*2) +#define CyRxEN (0x80) +#define CyRxFloff (0x40) +#define CyRxFlon (0x20) +#define CyTxEN (0x08) +#define CyTxFloff (0x04) +#define CyTxFlon (0x02) +#define CyRDCR (0x0E*2) +#define CySCHR1 (0x1A*2) +#define CySCHR2 (0x1B*2) +#define CySCHR3 (0x1C*2) +#define CySCHR4 (0x1D*2) +#define CySCRL (0x22*2) +#define CySCRH (0x23*2) +#define CyLNC (0x24*2) +#define CyMCOR1 (0x15*2) +#define CyMCOR2 (0x16*2) +#define CyRTPR (0x21*2) +#define CyMSVR1 (0x6C*2) +#define CyMSVR2 (0x6D*2) +#define CyANY_DELTA (0xF0) +#define CyDSR (0x80) +#define CyCTS (0x40) +#define CyRI (0x20) +#define CyDCD (0x10) +#define CyDTR (0x02) +#define CyRTS (0x01) +#define CyPVSR (0x6F*2) +#define CyRBPR (0x78*2) +#define CyRCOR (0x7C*2) +#define CyTBPR (0x72*2) +#define CyTCOR (0x76*2) + +/* Custom Registers */ + +#define CyPLX_VER (0x3400) +#define PLX_9050 0x0b +#define PLX_9060 0x0c +#define PLX_9080 0x0d + +/***************************************************************************/ + +#endif /* _LINUX_CYCLADES_H */ diff --git a/include/linux/davinci_emac.h b/include/linux/davinci_emac.h new file mode 100644 index 000000000..05b97144d --- /dev/null +++ b/include/linux/davinci_emac.h @@ -0,0 +1,50 @@ +/* + * TI DaVinci EMAC platform support + * + * Author: Kevin Hilman, Deep Root Systems, LLC + * + * 2007 (c) Deep Root Systems, LLC. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. + */ +#ifndef _LINUX_DAVINCI_EMAC_H +#define _LINUX_DAVINCI_EMAC_H + +#include +#include + +struct mdio_platform_data { + unsigned long bus_freq; +}; + +struct emac_platform_data { + char mac_addr[ETH_ALEN]; + u32 ctrl_reg_offset; + u32 ctrl_mod_reg_offset; + u32 ctrl_ram_offset; + u32 hw_ram_addr; + u32 ctrl_ram_size; + + /* + * phy_id can be one of the following: + * - NULL : use the first phy on the bus, + * - "" : force to 100/full, no mdio control + * - ":" : use the specified bus and phy + */ + const char *phy_id; + + u8 rmii_en; + u8 version; + bool no_bd_ram; + void (*interrupt_enable) (void); + void (*interrupt_disable) (void); +}; + +enum { + EMAC_VERSION_1, /* DM644x */ + EMAC_VERSION_2, /* DM646x */ +}; + +void davinci_get_mac_addr(struct nvmem_device *nvmem, void *context); +#endif diff --git a/include/linux/dax.h b/include/linux/dax.h new file mode 100644 index 000000000..450b28db9 --- /dev/null +++ b/include/linux/dax.h @@ -0,0 +1,177 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_DAX_H +#define _LINUX_DAX_H + +#include +#include +#include +#include + +struct iomap_ops; +struct dax_device; +struct dax_operations { + /* + * direct_access: translate a device-relative + * logical-page-offset into an absolute physical pfn. Return the + * number of pages available for DAX at that pfn. + */ + long (*direct_access)(struct dax_device *, pgoff_t, long, + void **, pfn_t *); + /* copy_from_iter: required operation for fs-dax direct-i/o */ + size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t, + struct iov_iter *); + /* copy_to_iter: required operation for fs-dax direct-i/o */ + size_t (*copy_to_iter)(struct dax_device *, pgoff_t, void *, size_t, + struct iov_iter *); +}; + +extern struct attribute_group dax_attribute_group; + +#if IS_ENABLED(CONFIG_DAX) +struct dax_device *dax_get_by_host(const char *host); +struct dax_device *alloc_dax(void *private, const char *host, + const struct dax_operations *ops); +void put_dax(struct dax_device *dax_dev); +void kill_dax(struct dax_device *dax_dev); +void dax_write_cache(struct dax_device *dax_dev, bool wc); +bool dax_write_cache_enabled(struct dax_device *dax_dev); +#else +static inline struct dax_device *dax_get_by_host(const char *host) +{ + return NULL; +} +static inline struct dax_device *alloc_dax(void *private, const char *host, + const struct dax_operations *ops) +{ + /* + * Callers should check IS_ENABLED(CONFIG_DAX) to know if this + * NULL is an error or expected. + */ + return NULL; +} +static inline void put_dax(struct dax_device *dax_dev) +{ +} +static inline void kill_dax(struct dax_device *dax_dev) +{ +} +static inline void dax_write_cache(struct dax_device *dax_dev, bool wc) +{ +} +static inline bool dax_write_cache_enabled(struct dax_device *dax_dev) +{ + return false; +} +#endif + +struct writeback_control; +int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff); +#if IS_ENABLED(CONFIG_FS_DAX) +bool __bdev_dax_supported(struct block_device *bdev, int blocksize); +static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize) +{ + return __bdev_dax_supported(bdev, blocksize); +} + +static inline struct dax_device *fs_dax_get_by_host(const char *host) +{ + return dax_get_by_host(host); +} + +static inline void fs_put_dax(struct dax_device *dax_dev) +{ + put_dax(dax_dev); +} + +struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev); +int dax_writeback_mapping_range(struct address_space *mapping, + struct block_device *bdev, struct writeback_control *wbc); + +struct page *dax_layout_busy_page(struct address_space *mapping); +bool dax_lock_mapping_entry(struct page *page); +void dax_unlock_mapping_entry(struct page *page); +#else +static inline bool bdev_dax_supported(struct block_device *bdev, + int blocksize) +{ + return false; +} + +static inline struct dax_device *fs_dax_get_by_host(const char *host) +{ + return NULL; +} + +static inline void fs_put_dax(struct dax_device *dax_dev) +{ +} + +static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev) +{ + return NULL; +} + +static inline struct page *dax_layout_busy_page(struct address_space *mapping) +{ + return NULL; +} + +static inline int dax_writeback_mapping_range(struct address_space *mapping, + struct block_device *bdev, struct writeback_control *wbc) +{ + return -EOPNOTSUPP; +} + +static inline bool dax_lock_mapping_entry(struct page *page) +{ + if (IS_DAX(page->mapping->host)) + return true; + return false; +} + +static inline void dax_unlock_mapping_entry(struct page *page) +{ +} +#endif + +int dax_read_lock(void); +void dax_read_unlock(int id); +bool dax_alive(struct dax_device *dax_dev); +void *dax_get_private(struct dax_device *dax_dev); +long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, + void **kaddr, pfn_t *pfn); +size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, + size_t bytes, struct iov_iter *i); +size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, + size_t bytes, struct iov_iter *i); +void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); + +ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, + const struct iomap_ops *ops); +vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, + pfn_t *pfnp, int *errp, const struct iomap_ops *ops); +vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, + enum page_entry_size pe_size, pfn_t pfn); +int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); +int dax_invalidate_mapping_entry_sync(struct address_space *mapping, + pgoff_t index); + +#ifdef CONFIG_FS_DAX +int __dax_zero_page_range(struct block_device *bdev, + struct dax_device *dax_dev, sector_t sector, + unsigned int offset, unsigned int length); +#else +static inline int __dax_zero_page_range(struct block_device *bdev, + struct dax_device *dax_dev, sector_t sector, + unsigned int offset, unsigned int length) +{ + return -ENXIO; +} +#endif + +static inline bool dax_mapping(struct address_space *mapping) +{ + return mapping->host && IS_DAX(mapping->host); +} + +#endif diff --git a/include/linux/dca.h b/include/linux/dca.h new file mode 100644 index 000000000..ad956c2e0 --- /dev/null +++ b/include/linux/dca.h @@ -0,0 +1,82 @@ +/* + * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. + */ +#ifndef DCA_H +#define DCA_H + +#include + +/* DCA Provider API */ + +/* DCA Notifier Interface */ +void dca_register_notify(struct notifier_block *nb); +void dca_unregister_notify(struct notifier_block *nb); + +#define DCA_PROVIDER_ADD 0x0001 +#define DCA_PROVIDER_REMOVE 0x0002 + +struct dca_provider { + struct list_head node; + const struct dca_ops *ops; + struct device *cd; + int id; +}; + +struct dca_domain { + struct list_head node; + struct list_head dca_providers; + struct pci_bus *pci_rc; +}; + +struct dca_ops { + int (*add_requester) (struct dca_provider *, struct device *); + int (*remove_requester) (struct dca_provider *, struct device *); + u8 (*get_tag) (struct dca_provider *, struct device *, + int cpu); + int (*dev_managed) (struct dca_provider *, struct device *); +}; + +struct dca_provider *alloc_dca_provider(const struct dca_ops *ops, + int priv_size); +void free_dca_provider(struct dca_provider *dca); +int register_dca_provider(struct dca_provider *dca, struct device *dev); +void unregister_dca_provider(struct dca_provider *dca, struct device *dev); + +static inline void *dca_priv(struct dca_provider *dca) +{ + return (void *)dca + sizeof(struct dca_provider); +} + +/* Requester API */ +#define DCA_GET_TAG_TWO_ARGS +int dca_add_requester(struct device *dev); +int dca_remove_requester(struct device *dev); +u8 dca_get_tag(int cpu); +u8 dca3_get_tag(struct device *dev, int cpu); + +/* internal stuff */ +int __init dca_sysfs_init(void); +void __exit dca_sysfs_exit(void); +int dca_sysfs_add_provider(struct dca_provider *dca, struct device *dev); +void dca_sysfs_remove_provider(struct dca_provider *dca); +int dca_sysfs_add_req(struct dca_provider *dca, struct device *dev, int slot); +void dca_sysfs_remove_req(struct dca_provider *dca, int slot); + +#endif /* DCA_H */ diff --git a/include/linux/dcache.h b/include/linux/dcache.h new file mode 100644 index 000000000..02b1b40fe --- /dev/null +++ b/include/linux/dcache.h @@ -0,0 +1,602 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_DCACHE_H +#define __LINUX_DCACHE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct path; +struct vfsmount; + +/* + * linux/include/linux/dcache.h + * + * Dirent cache data structures + * + * (C) Copyright 1997 Thomas Schoebel-Theuer, + * with heavy changes by Linus Torvalds + */ + +#define IS_ROOT(x) ((x) == (x)->d_parent) + +/* The hash is always the low bits of hash_len */ +#ifdef __LITTLE_ENDIAN + #define HASH_LEN_DECLARE u32 hash; u32 len + #define bytemask_from_count(cnt) (~(~0ul << (cnt)*8)) +#else + #define HASH_LEN_DECLARE u32 len; u32 hash + #define bytemask_from_count(cnt) (~(~0ul >> (cnt)*8)) +#endif + +/* + * "quick string" -- eases parameter passing, but more importantly + * saves "metadata" about the string (ie length and the hash). + * + * hash comes first so it snuggles against d_parent in the + * dentry. + */ +struct qstr { + union { + struct { + HASH_LEN_DECLARE; + }; + u64 hash_len; + }; + const unsigned char *name; +}; + +#define QSTR_INIT(n,l) { { { .len = l } }, .name = n } + +extern const struct qstr empty_name; +extern const struct qstr slash_name; + +struct dentry_stat_t { + long nr_dentry; + long nr_unused; + long age_limit; /* age in seconds */ + long want_pages; /* pages requested by system */ + long dummy[2]; +}; +extern struct dentry_stat_t dentry_stat; + +/* + * Try to keep struct dentry aligned on 64 byte cachelines (this will + * give reasonable cacheline footprint with larger lines without the + * large memory footprint increase). + */ +#ifdef CONFIG_64BIT +# define DNAME_INLINE_LEN 32 /* 192 bytes */ +#else +# ifdef CONFIG_SMP +# define DNAME_INLINE_LEN 36 /* 128 bytes */ +# else +# define DNAME_INLINE_LEN 40 /* 128 bytes */ +# endif +#endif + +#define d_lock d_lockref.lock + +struct dentry { + /* RCU lookup touched fields */ + unsigned int d_flags; /* protected by d_lock */ + seqcount_t d_seq; /* per dentry seqlock */ + struct hlist_bl_node d_hash; /* lookup hash list */ + struct dentry *d_parent; /* parent directory */ + struct qstr d_name; + struct inode *d_inode; /* Where the name belongs to - NULL is + * negative */ + unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */ + + /* Ref lookup also touches following */ + struct lockref d_lockref; /* per-dentry lock and refcount */ + const struct dentry_operations *d_op; + struct super_block *d_sb; /* The root of the dentry tree */ + unsigned long d_time; /* used by d_revalidate */ + void *d_fsdata; /* fs-specific data */ + + union { + struct list_head d_lru; /* LRU list */ + wait_queue_head_t *d_wait; /* in-lookup ones only */ + }; + struct list_head d_child; /* child of parent list */ + struct list_head d_subdirs; /* our children */ + /* + * d_alias and d_rcu can share memory + */ + union { + struct hlist_node d_alias; /* inode alias list */ + struct hlist_bl_node d_in_lookup_hash; /* only for in-lookup ones */ + struct rcu_head d_rcu; + } d_u; +} __randomize_layout; + +/* + * dentry->d_lock spinlock nesting subclasses: + * + * 0: normal + * 1: nested + */ +enum dentry_d_lock_class +{ + DENTRY_D_LOCK_NORMAL, /* implicitly used by plain spin_lock() APIs. */ + DENTRY_D_LOCK_NESTED +}; + +struct dentry_operations { + int (*d_revalidate)(struct dentry *, unsigned int); + int (*d_weak_revalidate)(struct dentry *, unsigned int); + int (*d_hash)(const struct dentry *, struct qstr *); + int (*d_compare)(const struct dentry *, + unsigned int, const char *, const struct qstr *); + int (*d_delete)(const struct dentry *); + int (*d_init)(struct dentry *); + void (*d_release)(struct dentry *); + void (*d_prune)(struct dentry *); + void (*d_iput)(struct dentry *, struct inode *); + char *(*d_dname)(struct dentry *, char *, int); + struct vfsmount *(*d_automount)(struct path *); + int (*d_manage)(const struct path *, bool); + struct dentry *(*d_real)(struct dentry *, const struct inode *); +} ____cacheline_aligned; + +/* + * Locking rules for dentry_operations callbacks are to be found in + * Documentation/filesystems/Locking. Keep it updated! + * + * FUrther descriptions are found in Documentation/filesystems/vfs.txt. + * Keep it updated too! + */ + +/* d_flags entries */ +#define DCACHE_OP_HASH 0x00000001 +#define DCACHE_OP_COMPARE 0x00000002 +#define DCACHE_OP_REVALIDATE 0x00000004 +#define DCACHE_OP_DELETE 0x00000008 +#define DCACHE_OP_PRUNE 0x00000010 + +#define DCACHE_DISCONNECTED 0x00000020 + /* This dentry is possibly not currently connected to the dcache tree, in + * which case its parent will either be itself, or will have this flag as + * well. nfsd will not use a dentry with this bit set, but will first + * endeavour to clear the bit either by discovering that it is connected, + * or by performing lookup operations. Any filesystem which supports + * nfsd_operations MUST have a lookup function which, if it finds a + * directory inode with a DCACHE_DISCONNECTED dentry, will d_move that + * dentry into place and return that dentry rather than the passed one, + * typically using d_splice_alias. */ + +#define DCACHE_REFERENCED 0x00000040 /* Recently used, don't discard. */ + +#define DCACHE_CANT_MOUNT 0x00000100 +#define DCACHE_GENOCIDE 0x00000200 +#define DCACHE_SHRINK_LIST 0x00000400 + +#define DCACHE_OP_WEAK_REVALIDATE 0x00000800 + +#define DCACHE_NFSFS_RENAMED 0x00001000 + /* this dentry has been "silly renamed" and has to be deleted on the last + * dput() */ +#define DCACHE_COOKIE 0x00002000 /* For use by dcookie subsystem */ +#define DCACHE_FSNOTIFY_PARENT_WATCHED 0x00004000 + /* Parent inode is watched by some fsnotify listener */ + +#define DCACHE_DENTRY_KILLED 0x00008000 + +#define DCACHE_MOUNTED 0x00010000 /* is a mountpoint */ +#define DCACHE_NEED_AUTOMOUNT 0x00020000 /* handle automount on this dir */ +#define DCACHE_MANAGE_TRANSIT 0x00040000 /* manage transit from this dirent */ +#define DCACHE_MANAGED_DENTRY \ + (DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT) + +#define DCACHE_LRU_LIST 0x00080000 + +#define DCACHE_ENTRY_TYPE 0x00700000 +#define DCACHE_MISS_TYPE 0x00000000 /* Negative dentry (maybe fallthru to nowhere) */ +#define DCACHE_WHITEOUT_TYPE 0x00100000 /* Whiteout dentry (stop pathwalk) */ +#define DCACHE_DIRECTORY_TYPE 0x00200000 /* Normal directory */ +#define DCACHE_AUTODIR_TYPE 0x00300000 /* Lookupless directory (presumed automount) */ +#define DCACHE_REGULAR_TYPE 0x00400000 /* Regular file type (or fallthru to such) */ +#define DCACHE_SPECIAL_TYPE 0x00500000 /* Other file type (or fallthru to such) */ +#define DCACHE_SYMLINK_TYPE 0x00600000 /* Symlink (or fallthru to such) */ + +#define DCACHE_MAY_FREE 0x00800000 +#define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */ +#define DCACHE_ENCRYPTED_NAME 0x02000000 /* Encrypted name (dir key was unavailable) */ +#define DCACHE_OP_REAL 0x04000000 + +#define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */ +#define DCACHE_DENTRY_CURSOR 0x20000000 +#define DCACHE_NORCU 0x40000000 /* No RCU delay for freeing */ + +extern seqlock_t rename_lock; + +/* + * These are the low-level FS interfaces to the dcache.. + */ +extern void d_instantiate(struct dentry *, struct inode *); +extern void d_instantiate_new(struct dentry *, struct inode *); +extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); +extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *); +extern void __d_drop(struct dentry *dentry); +extern void d_drop(struct dentry *dentry); +extern void d_delete(struct dentry *); +extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op); + +/* allocate/de-allocate */ +extern struct dentry * d_alloc(struct dentry *, const struct qstr *); +extern struct dentry * d_alloc_anon(struct super_block *); +extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *); +extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *, + wait_queue_head_t *); +extern struct dentry * d_splice_alias(struct inode *, struct dentry *); +extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *); +extern struct dentry * d_exact_alias(struct dentry *, struct inode *); +extern struct dentry *d_find_any_alias(struct inode *inode); +extern struct dentry * d_obtain_alias(struct inode *); +extern struct dentry * d_obtain_root(struct inode *); +extern void shrink_dcache_sb(struct super_block *); +extern void shrink_dcache_parent(struct dentry *); +extern void shrink_dcache_for_umount(struct super_block *); +extern void d_invalidate(struct dentry *); + +/* only used at mount-time */ +extern struct dentry * d_make_root(struct inode *); + +/* - the ramfs-type tree */ +extern void d_genocide(struct dentry *); + +extern void d_tmpfile(struct dentry *, struct inode *); + +extern struct dentry *d_find_alias(struct inode *); +extern void d_prune_aliases(struct inode *); + +/* test whether we have any submounts in a subdir tree */ +extern int path_has_submounts(const struct path *); + +/* + * This adds the entry to the hash queues. + */ +extern void d_rehash(struct dentry *); + +extern void d_add(struct dentry *, struct inode *); + +/* used for rename() and baskets */ +extern void d_move(struct dentry *, struct dentry *); +extern void d_exchange(struct dentry *, struct dentry *); +extern struct dentry *d_ancestor(struct dentry *, struct dentry *); + +/* appendix may either be NULL or be used for transname suffixes */ +extern struct dentry *d_lookup(const struct dentry *, const struct qstr *); +extern struct dentry *d_hash_and_lookup(struct dentry *, struct qstr *); +extern struct dentry *__d_lookup(const struct dentry *, const struct qstr *); +extern struct dentry *__d_lookup_rcu(const struct dentry *parent, + const struct qstr *name, unsigned *seq); + +static inline unsigned d_count(const struct dentry *dentry) +{ + return dentry->d_lockref.count; +} + +/* + * helper function for dentry_operations.d_dname() members + */ +extern __printf(4, 5) +char *dynamic_dname(struct dentry *, char *, int, const char *, ...); +extern char *simple_dname(struct dentry *, char *, int); + +extern char *__d_path(const struct path *, const struct path *, char *, int); +extern char *d_absolute_path(const struct path *, char *, int); +extern char *d_path(const struct path *, char *, int); +extern char *dentry_path_raw(struct dentry *, char *, int); +extern char *dentry_path(struct dentry *, char *, int); + +/* Allocation counts.. */ + +/** + * dget, dget_dlock - get a reference to a dentry + * @dentry: dentry to get a reference to + * + * Given a dentry or %NULL pointer increment the reference count + * if appropriate and return the dentry. A dentry will not be + * destroyed when it has references. + */ +static inline struct dentry *dget_dlock(struct dentry *dentry) +{ + if (dentry) + dentry->d_lockref.count++; + return dentry; +} + +static inline struct dentry *dget(struct dentry *dentry) +{ + if (dentry) + lockref_get(&dentry->d_lockref); + return dentry; +} + +extern struct dentry *dget_parent(struct dentry *dentry); + +/** + * d_unhashed - is dentry hashed + * @dentry: entry to check + * + * Returns true if the dentry passed is not currently hashed. + */ + +static inline int d_unhashed(const struct dentry *dentry) +{ + return hlist_bl_unhashed(&dentry->d_hash); +} + +static inline int d_unlinked(const struct dentry *dentry) +{ + return d_unhashed(dentry) && !IS_ROOT(dentry); +} + +static inline int cant_mount(const struct dentry *dentry) +{ + return (dentry->d_flags & DCACHE_CANT_MOUNT); +} + +static inline void dont_mount(struct dentry *dentry) +{ + spin_lock(&dentry->d_lock); + dentry->d_flags |= DCACHE_CANT_MOUNT; + spin_unlock(&dentry->d_lock); +} + +extern void __d_lookup_done(struct dentry *); + +static inline int d_in_lookup(const struct dentry *dentry) +{ + return dentry->d_flags & DCACHE_PAR_LOOKUP; +} + +static inline void d_lookup_done(struct dentry *dentry) +{ + if (unlikely(d_in_lookup(dentry))) { + spin_lock(&dentry->d_lock); + __d_lookup_done(dentry); + spin_unlock(&dentry->d_lock); + } +} + +extern void dput(struct dentry *); + +static inline bool d_managed(const struct dentry *dentry) +{ + return dentry->d_flags & DCACHE_MANAGED_DENTRY; +} + +static inline bool d_mountpoint(const struct dentry *dentry) +{ + return dentry->d_flags & DCACHE_MOUNTED; +} + +/* + * Directory cache entry type accessor functions. + */ +static inline unsigned __d_entry_type(const struct dentry *dentry) +{ + return dentry->d_flags & DCACHE_ENTRY_TYPE; +} + +static inline bool d_is_miss(const struct dentry *dentry) +{ + return __d_entry_type(dentry) == DCACHE_MISS_TYPE; +} + +static inline bool d_is_whiteout(const struct dentry *dentry) +{ + return __d_entry_type(dentry) == DCACHE_WHITEOUT_TYPE; +} + +static inline bool d_can_lookup(const struct dentry *dentry) +{ + return __d_entry_type(dentry) == DCACHE_DIRECTORY_TYPE; +} + +static inline bool d_is_autodir(const struct dentry *dentry) +{ + return __d_entry_type(dentry) == DCACHE_AUTODIR_TYPE; +} + +static inline bool d_is_dir(const struct dentry *dentry) +{ + return d_can_lookup(dentry) || d_is_autodir(dentry); +} + +static inline bool d_is_symlink(const struct dentry *dentry) +{ + return __d_entry_type(dentry) == DCACHE_SYMLINK_TYPE; +} + +static inline bool d_is_reg(const struct dentry *dentry) +{ + return __d_entry_type(dentry) == DCACHE_REGULAR_TYPE; +} + +static inline bool d_is_special(const struct dentry *dentry) +{ + return __d_entry_type(dentry) == DCACHE_SPECIAL_TYPE; +} + +static inline bool d_is_file(const struct dentry *dentry) +{ + return d_is_reg(dentry) || d_is_special(dentry); +} + +static inline bool d_is_negative(const struct dentry *dentry) +{ + // TODO: check d_is_whiteout(dentry) also. + return d_is_miss(dentry); +} + +static inline bool d_is_positive(const struct dentry *dentry) +{ + return !d_is_negative(dentry); +} + +/** + * d_really_is_negative - Determine if a dentry is really negative (ignoring fallthroughs) + * @dentry: The dentry in question + * + * Returns true if the dentry represents either an absent name or a name that + * doesn't map to an inode (ie. ->d_inode is NULL). The dentry could represent + * a true miss, a whiteout that isn't represented by a 0,0 chardev or a + * fallthrough marker in an opaque directory. + * + * Note! (1) This should be used *only* by a filesystem to examine its own + * dentries. It should not be used to look at some other filesystem's + * dentries. (2) It should also be used in combination with d_inode() to get + * the inode. (3) The dentry may have something attached to ->d_lower and the + * type field of the flags may be set to something other than miss or whiteout. + */ +static inline bool d_really_is_negative(const struct dentry *dentry) +{ + return dentry->d_inode == NULL; +} + +/** + * d_really_is_positive - Determine if a dentry is really positive (ignoring fallthroughs) + * @dentry: The dentry in question + * + * Returns true if the dentry represents a name that maps to an inode + * (ie. ->d_inode is not NULL). The dentry might still represent a whiteout if + * that is represented on medium as a 0,0 chardev. + * + * Note! (1) This should be used *only* by a filesystem to examine its own + * dentries. It should not be used to look at some other filesystem's + * dentries. (2) It should also be used in combination with d_inode() to get + * the inode. + */ +static inline bool d_really_is_positive(const struct dentry *dentry) +{ + return dentry->d_inode != NULL; +} + +static inline int simple_positive(const struct dentry *dentry) +{ + return d_really_is_positive(dentry) && !d_unhashed(dentry); +} + +extern void d_set_fallthru(struct dentry *dentry); + +static inline bool d_is_fallthru(const struct dentry *dentry) +{ + return dentry->d_flags & DCACHE_FALLTHRU; +} + + +extern int sysctl_vfs_cache_pressure; + +static inline unsigned long vfs_pressure_ratio(unsigned long val) +{ + return mult_frac(val, sysctl_vfs_cache_pressure, 100); +} + +/** + * d_inode - Get the actual inode of this dentry + * @dentry: The dentry to query + * + * This is the helper normal filesystems should use to get at their own inodes + * in their own dentries and ignore the layering superimposed upon them. + */ +static inline struct inode *d_inode(const struct dentry *dentry) +{ + return dentry->d_inode; +} + +/** + * d_inode_rcu - Get the actual inode of this dentry with READ_ONCE() + * @dentry: The dentry to query + * + * This is the helper normal filesystems should use to get at their own inodes + * in their own dentries and ignore the layering superimposed upon them. + */ +static inline struct inode *d_inode_rcu(const struct dentry *dentry) +{ + return READ_ONCE(dentry->d_inode); +} + +/** + * d_backing_inode - Get upper or lower inode we should be using + * @upper: The upper layer + * + * This is the helper that should be used to get at the inode that will be used + * if this dentry were to be opened as a file. The inode may be on the upper + * dentry or it may be on a lower dentry pinned by the upper. + * + * Normal filesystems should not use this to access their own inodes. + */ +static inline struct inode *d_backing_inode(const struct dentry *upper) +{ + struct inode *inode = upper->d_inode; + + return inode; +} + +/** + * d_backing_dentry - Get upper or lower dentry we should be using + * @upper: The upper layer + * + * This is the helper that should be used to get the dentry of the inode that + * will be used if this dentry were opened as a file. It may be the upper + * dentry or it may be a lower dentry pinned by the upper. + * + * Normal filesystems should not use this to access their own dentries. + */ +static inline struct dentry *d_backing_dentry(struct dentry *upper) +{ + return upper; +} + +/** + * d_real - Return the real dentry + * @dentry: the dentry to query + * @inode: inode to select the dentry from multiple layers (can be NULL) + * + * If dentry is on a union/overlay, then return the underlying, real dentry. + * Otherwise return the dentry itself. + * + * See also: Documentation/filesystems/vfs.txt + */ +static inline struct dentry *d_real(struct dentry *dentry, + const struct inode *inode) +{ + if (unlikely(dentry->d_flags & DCACHE_OP_REAL)) + return dentry->d_op->d_real(dentry, inode); + else + return dentry; +} + +/** + * d_real_inode - Return the real inode + * @dentry: The dentry to query + * + * If dentry is on a union/overlay, then return the underlying, real inode. + * Otherwise return d_inode(). + */ +static inline struct inode *d_real_inode(const struct dentry *dentry) +{ + /* This usage of d_real() results in const dentry */ + return d_backing_inode(d_real((struct dentry *) dentry, NULL)); +} + +struct name_snapshot { + const unsigned char *name; + unsigned char inline_name[DNAME_INLINE_LEN]; +}; +void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *); +void release_dentry_name_snapshot(struct name_snapshot *); + +#endif /* __LINUX_DCACHE_H */ diff --git a/include/linux/dccp.h b/include/linux/dccp.h new file mode 100644 index 000000000..6b64b6cc2 --- /dev/null +++ b/include/linux/dccp.h @@ -0,0 +1,326 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_DCCP_H +#define _LINUX_DCCP_H + + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +enum dccp_state { + DCCP_OPEN = TCP_ESTABLISHED, + DCCP_REQUESTING = TCP_SYN_SENT, + DCCP_LISTEN = TCP_LISTEN, + DCCP_RESPOND = TCP_SYN_RECV, + /* + * States involved in closing a DCCP connection: + * 1) ACTIVE_CLOSEREQ is entered by a server sending a CloseReq. + * + * 2) CLOSING can have three different meanings (RFC 4340, 8.3): + * a. Client has performed active-close, has sent a Close to the server + * from state OPEN or PARTOPEN, and is waiting for the final Reset + * (in this case, SOCK_DONE == 1). + * b. Client is asked to perform passive-close, by receiving a CloseReq + * in (PART)OPEN state. It sends a Close and waits for final Reset + * (in this case, SOCK_DONE == 0). + * c. Server performs an active-close as in (a), keeps TIMEWAIT state. + * + * 3) The following intermediate states are employed to give passively + * closing nodes a chance to process their unread data: + * - PASSIVE_CLOSE (from OPEN => CLOSED) and + * - PASSIVE_CLOSEREQ (from (PART)OPEN to CLOSING; case (b) above). + */ + DCCP_ACTIVE_CLOSEREQ = TCP_FIN_WAIT1, + DCCP_PASSIVE_CLOSE = TCP_CLOSE_WAIT, /* any node receiving a Close */ + DCCP_CLOSING = TCP_CLOSING, + DCCP_TIME_WAIT = TCP_TIME_WAIT, + DCCP_CLOSED = TCP_CLOSE, + DCCP_NEW_SYN_RECV = TCP_NEW_SYN_RECV, + DCCP_PARTOPEN = TCP_MAX_STATES, + DCCP_PASSIVE_CLOSEREQ, /* clients receiving CloseReq */ + DCCP_MAX_STATES +}; + +enum { + DCCPF_OPEN = TCPF_ESTABLISHED, + DCCPF_REQUESTING = TCPF_SYN_SENT, + DCCPF_LISTEN = TCPF_LISTEN, + DCCPF_RESPOND = TCPF_SYN_RECV, + DCCPF_ACTIVE_CLOSEREQ = TCPF_FIN_WAIT1, + DCCPF_CLOSING = TCPF_CLOSING, + DCCPF_TIME_WAIT = TCPF_TIME_WAIT, + DCCPF_CLOSED = TCPF_CLOSE, + DCCPF_NEW_SYN_RECV = TCPF_NEW_SYN_RECV, + DCCPF_PARTOPEN = (1 << DCCP_PARTOPEN), +}; + +static inline struct dccp_hdr *dccp_hdr(const struct sk_buff *skb) +{ + return (struct dccp_hdr *)skb_transport_header(skb); +} + +static inline struct dccp_hdr *dccp_zeroed_hdr(struct sk_buff *skb, int headlen) +{ + skb_push(skb, headlen); + skb_reset_transport_header(skb); + return memset(skb_transport_header(skb), 0, headlen); +} + +static inline struct dccp_hdr_ext *dccp_hdrx(const struct dccp_hdr *dh) +{ + return (struct dccp_hdr_ext *)((unsigned char *)dh + sizeof(*dh)); +} + +static inline unsigned int __dccp_basic_hdr_len(const struct dccp_hdr *dh) +{ + return sizeof(*dh) + (dh->dccph_x ? sizeof(struct dccp_hdr_ext) : 0); +} + +static inline unsigned int dccp_basic_hdr_len(const struct sk_buff *skb) +{ + const struct dccp_hdr *dh = dccp_hdr(skb); + return __dccp_basic_hdr_len(dh); +} + +static inline __u64 dccp_hdr_seq(const struct dccp_hdr *dh) +{ + __u64 seq_nr = ntohs(dh->dccph_seq); + + if (dh->dccph_x != 0) + seq_nr = (seq_nr << 32) + ntohl(dccp_hdrx(dh)->dccph_seq_low); + else + seq_nr += (u32)dh->dccph_seq2 << 16; + + return seq_nr; +} + +static inline struct dccp_hdr_request *dccp_hdr_request(struct sk_buff *skb) +{ + return (struct dccp_hdr_request *)(skb_transport_header(skb) + + dccp_basic_hdr_len(skb)); +} + +static inline struct dccp_hdr_ack_bits *dccp_hdr_ack_bits(const struct sk_buff *skb) +{ + return (struct dccp_hdr_ack_bits *)(skb_transport_header(skb) + + dccp_basic_hdr_len(skb)); +} + +static inline u64 dccp_hdr_ack_seq(const struct sk_buff *skb) +{ + const struct dccp_hdr_ack_bits *dhack = dccp_hdr_ack_bits(skb); + return ((u64)ntohs(dhack->dccph_ack_nr_high) << 32) + ntohl(dhack->dccph_ack_nr_low); +} + +static inline struct dccp_hdr_response *dccp_hdr_response(struct sk_buff *skb) +{ + return (struct dccp_hdr_response *)(skb_transport_header(skb) + + dccp_basic_hdr_len(skb)); +} + +static inline struct dccp_hdr_reset *dccp_hdr_reset(struct sk_buff *skb) +{ + return (struct dccp_hdr_reset *)(skb_transport_header(skb) + + dccp_basic_hdr_len(skb)); +} + +static inline unsigned int __dccp_hdr_len(const struct dccp_hdr *dh) +{ + return __dccp_basic_hdr_len(dh) + + dccp_packet_hdr_len(dh->dccph_type); +} + +static inline unsigned int dccp_hdr_len(const struct sk_buff *skb) +{ + return __dccp_hdr_len(dccp_hdr(skb)); +} + +/** + * struct dccp_request_sock - represent DCCP-specific connection request + * @dreq_inet_rsk: structure inherited from + * @dreq_iss: initial sequence number, sent on the first Response (RFC 4340, 7.1) + * @dreq_gss: greatest sequence number sent (for retransmitted Responses) + * @dreq_isr: initial sequence number received in the first Request + * @dreq_gsr: greatest sequence number received (for retransmitted Request(s)) + * @dreq_service: service code present on the Request (there is just one) + * @dreq_featneg: feature negotiation options for this connection + * The following two fields are analogous to the ones in dccp_sock: + * @dreq_timestamp_echo: last received timestamp to echo (13.1) + * @dreq_timestamp_echo: the time of receiving the last @dreq_timestamp_echo + */ +struct dccp_request_sock { + struct inet_request_sock dreq_inet_rsk; + __u64 dreq_iss; + __u64 dreq_gss; + __u64 dreq_isr; + __u64 dreq_gsr; + __be32 dreq_service; + spinlock_t dreq_lock; + struct list_head dreq_featneg; + __u32 dreq_timestamp_echo; + __u32 dreq_timestamp_time; +}; + +static inline struct dccp_request_sock *dccp_rsk(const struct request_sock *req) +{ + return (struct dccp_request_sock *)req; +} + +extern struct inet_timewait_death_row dccp_death_row; + +extern int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq, + struct sk_buff *skb); + +struct dccp_options_received { + u64 dccpor_ndp:48; + u32 dccpor_timestamp; + u32 dccpor_timestamp_echo; + u32 dccpor_elapsed_time; +}; + +struct ccid; + +enum dccp_role { + DCCP_ROLE_UNDEFINED, + DCCP_ROLE_LISTEN, + DCCP_ROLE_CLIENT, + DCCP_ROLE_SERVER, +}; + +struct dccp_service_list { + __u32 dccpsl_nr; + __be32 dccpsl_list[0]; +}; + +#define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1) +#define DCCP_SERVICE_CODE_IS_ABSENT 0 + +static inline bool dccp_list_has_service(const struct dccp_service_list *sl, + const __be32 service) +{ + if (likely(sl != NULL)) { + u32 i = sl->dccpsl_nr; + while (i--) + if (sl->dccpsl_list[i] == service) + return true; + } + return false; +} + +struct dccp_ackvec; + +/** + * struct dccp_sock - DCCP socket state + * + * @dccps_swl - sequence number window low + * @dccps_swh - sequence number window high + * @dccps_awl - acknowledgement number window low + * @dccps_awh - acknowledgement number window high + * @dccps_iss - initial sequence number sent + * @dccps_isr - initial sequence number received + * @dccps_osr - first OPEN sequence number received + * @dccps_gss - greatest sequence number sent + * @dccps_gsr - greatest valid sequence number received + * @dccps_gar - greatest valid ack number received on a non-Sync; initialized to %dccps_iss + * @dccps_service - first (passive sock) or unique (active sock) service code + * @dccps_service_list - second .. last service code on passive socket + * @dccps_timestamp_echo - latest timestamp received on a TIMESTAMP option + * @dccps_timestamp_time - time of receiving latest @dccps_timestamp_echo + * @dccps_l_ack_ratio - feature-local Ack Ratio + * @dccps_r_ack_ratio - feature-remote Ack Ratio + * @dccps_l_seq_win - local Sequence Window (influences ack number validity) + * @dccps_r_seq_win - remote Sequence Window (influences seq number validity) + * @dccps_pcslen - sender partial checksum coverage (via sockopt) + * @dccps_pcrlen - receiver partial checksum coverage (via sockopt) + * @dccps_send_ndp_count - local Send NDP Count feature (7.7.2) + * @dccps_ndp_count - number of Non Data Packets since last data packet + * @dccps_mss_cache - current value of MSS (path MTU minus header sizes) + * @dccps_rate_last - timestamp for rate-limiting DCCP-Sync (RFC 4340, 7.5.4) + * @dccps_featneg - tracks feature-negotiation state (mostly during handshake) + * @dccps_hc_rx_ackvec - rx half connection ack vector + * @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection) + * @dccps_hc_tx_ccid - CCID used for the sender (or sending half-connection) + * @dccps_options_received - parsed set of retrieved options + * @dccps_qpolicy - TX dequeueing policy, one of %dccp_packet_dequeueing_policy + * @dccps_tx_qlen - maximum length of the TX queue + * @dccps_role - role of this sock, one of %dccp_role + * @dccps_hc_rx_insert_options - receiver wants to add options when acking + * @dccps_hc_tx_insert_options - sender wants to add options when sending + * @dccps_server_timewait - server holds timewait state on close (RFC 4340, 8.3) + * @dccps_sync_scheduled - flag which signals "send out-of-band message soon" + * @dccps_xmitlet - tasklet scheduled by the TX CCID to dequeue data packets + * @dccps_xmit_timer - used by the TX CCID to delay sending (rate-based pacing) + * @dccps_syn_rtt - RTT sample from Request/Response exchange (in usecs) + */ +struct dccp_sock { + /* inet_connection_sock has to be the first member of dccp_sock */ + struct inet_connection_sock dccps_inet_connection; +#define dccps_syn_rtt dccps_inet_connection.icsk_ack.lrcvtime + __u64 dccps_swl; + __u64 dccps_swh; + __u64 dccps_awl; + __u64 dccps_awh; + __u64 dccps_iss; + __u64 dccps_isr; + __u64 dccps_osr; + __u64 dccps_gss; + __u64 dccps_gsr; + __u64 dccps_gar; + __be32 dccps_service; + __u32 dccps_mss_cache; + struct dccp_service_list *dccps_service_list; + __u32 dccps_timestamp_echo; + __u32 dccps_timestamp_time; + __u16 dccps_l_ack_ratio; + __u16 dccps_r_ack_ratio; + __u64 dccps_l_seq_win:48; + __u64 dccps_r_seq_win:48; + __u8 dccps_pcslen:4; + __u8 dccps_pcrlen:4; + __u8 dccps_send_ndp_count:1; + __u64 dccps_ndp_count:48; + unsigned long dccps_rate_last; + struct list_head dccps_featneg; + struct dccp_ackvec *dccps_hc_rx_ackvec; + struct ccid *dccps_hc_rx_ccid; + struct ccid *dccps_hc_tx_ccid; + struct dccp_options_received dccps_options_received; + __u8 dccps_qpolicy; + __u32 dccps_tx_qlen; + enum dccp_role dccps_role:2; + __u8 dccps_hc_rx_insert_options:1; + __u8 dccps_hc_tx_insert_options:1; + __u8 dccps_server_timewait:1; + __u8 dccps_sync_scheduled:1; + struct tasklet_struct dccps_xmitlet; + struct timer_list dccps_xmit_timer; +}; + +static inline struct dccp_sock *dccp_sk(const struct sock *sk) +{ + return (struct dccp_sock *)sk; +} + +static inline const char *dccp_role(const struct sock *sk) +{ + switch (dccp_sk(sk)->dccps_role) { + case DCCP_ROLE_UNDEFINED: return "undefined"; + case DCCP_ROLE_LISTEN: return "listen"; + case DCCP_ROLE_SERVER: return "server"; + case DCCP_ROLE_CLIENT: return "client"; + } + return NULL; +} + +extern void dccp_syn_ack_timeout(const struct request_sock *req); + +#endif /* _LINUX_DCCP_H */ diff --git a/include/linux/dcookies.h b/include/linux/dcookies.h new file mode 100644 index 000000000..ddfdac20c --- /dev/null +++ b/include/linux/dcookies.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * dcookies.h + * + * Persistent cookie-path mappings + * + * Copyright 2002 John Levon + */ + +#ifndef DCOOKIES_H +#define DCOOKIES_H + + +#ifdef CONFIG_PROFILING + +#include +#include + +struct dcookie_user; +struct path; + +/** + * dcookie_register - register a user of dcookies + * + * Register as a dcookie user. Returns %NULL on failure. + */ +struct dcookie_user * dcookie_register(void); + +/** + * dcookie_unregister - unregister a user of dcookies + * + * Unregister as a dcookie user. This may invalidate + * any dcookie values returned from get_dcookie(). + */ +void dcookie_unregister(struct dcookie_user * user); + +/** + * get_dcookie - acquire a dcookie + * + * Convert the given dentry/vfsmount pair into + * a cookie value. + * + * Returns -EINVAL if no living task has registered as a + * dcookie user. + * + * Returns 0 on success, with *cookie filled in + */ +int get_dcookie(const struct path *path, unsigned long *cookie); + +#else + +static inline struct dcookie_user * dcookie_register(void) +{ + return NULL; +} + +static inline void dcookie_unregister(struct dcookie_user * user) +{ + return; +} + +static inline int get_dcookie(const struct path *path, unsigned long *cookie) +{ + return -ENOSYS; +} + +#endif /* CONFIG_PROFILING */ + +#endif /* DCOOKIES_H */ diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h new file mode 100644 index 000000000..120225e9a --- /dev/null +++ b/include/linux/debug_locks.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_DEBUG_LOCKING_H +#define __LINUX_DEBUG_LOCKING_H + +#include +#include +#include + +struct task_struct; + +extern int debug_locks; +extern int debug_locks_silent; + + +static inline int __debug_locks_off(void) +{ + return xchg(&debug_locks, 0); +} + +/* + * Generic 'turn off all lock debugging' function: + */ +extern int debug_locks_off(void); + +#define DEBUG_LOCKS_WARN_ON(c) \ +({ \ + int __ret = 0; \ + \ + if (!oops_in_progress && unlikely(c)) { \ + if (debug_locks_off() && !debug_locks_silent) \ + WARN(1, "DEBUG_LOCKS_WARN_ON(%s)", #c); \ + __ret = 1; \ + } \ + __ret; \ +}) + +#ifdef CONFIG_SMP +# define SMP_DEBUG_LOCKS_WARN_ON(c) DEBUG_LOCKS_WARN_ON(c) +#else +# define SMP_DEBUG_LOCKS_WARN_ON(c) do { } while (0) +#endif + +#ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS + extern void locking_selftest(void); +#else +# define locking_selftest() do { } while (0) +#endif + +struct task_struct; + +#ifdef CONFIG_LOCKDEP +extern void debug_show_all_locks(void); +extern void debug_show_held_locks(struct task_struct *task); +extern void debug_check_no_locks_freed(const void *from, unsigned long len); +extern void debug_check_no_locks_held(void); +#else +static inline void debug_show_all_locks(void) +{ +} + +static inline void debug_show_held_locks(struct task_struct *task) +{ +} + +static inline void +debug_check_no_locks_freed(const void *from, unsigned long len) +{ +} + +static inline void +debug_check_no_locks_held(void) +{ +} +#endif + +#endif diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h new file mode 100644 index 000000000..3bc1034c5 --- /dev/null +++ b/include/linux/debugfs.h @@ -0,0 +1,389 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * debugfs.h - a tiny little debug file system + * + * Copyright (C) 2004 Greg Kroah-Hartman + * Copyright (C) 2004 IBM Inc. + * + * debugfs is for people to use instead of /proc or /sys. + * See Documentation/filesystems/ for more details. + */ + +#ifndef _DEBUGFS_H_ +#define _DEBUGFS_H_ + +#include +#include + +#include +#include + +struct device; +struct file_operations; + +struct debugfs_blob_wrapper { + void *data; + unsigned long size; +}; + +struct debugfs_reg32 { + char *name; + unsigned long offset; +}; + +struct debugfs_regset32 { + const struct debugfs_reg32 *regs; + int nregs; + void __iomem *base; +}; + +extern struct dentry *arch_debugfs_dir; + +#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ +static int __fops ## _open(struct inode *inode, struct file *file) \ +{ \ + __simple_attr_check_format(__fmt, 0ull); \ + return simple_attr_open(inode, file, __get, __set, __fmt); \ +} \ +static const struct file_operations __fops = { \ + .owner = THIS_MODULE, \ + .open = __fops ## _open, \ + .release = simple_attr_release, \ + .read = debugfs_attr_read, \ + .write = debugfs_attr_write, \ + .llseek = no_llseek, \ +} + +typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *); + +#if defined(CONFIG_DEBUG_FS) + +struct dentry *debugfs_lookup(const char *name, struct dentry *parent); + +struct dentry *debugfs_create_file(const char *name, umode_t mode, + struct dentry *parent, void *data, + const struct file_operations *fops); +struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode, + struct dentry *parent, void *data, + const struct file_operations *fops); + +struct dentry *debugfs_create_file_size(const char *name, umode_t mode, + struct dentry *parent, void *data, + const struct file_operations *fops, + loff_t file_size); + +struct dentry *debugfs_create_dir(const char *name, struct dentry *parent); + +struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, + const char *dest); + +struct dentry *debugfs_create_automount(const char *name, + struct dentry *parent, + debugfs_automount_t f, + void *data); + +void debugfs_remove(struct dentry *dentry); +void debugfs_remove_recursive(struct dentry *dentry); + +const struct file_operations *debugfs_real_fops(const struct file *filp); + +int debugfs_file_get(struct dentry *dentry); +void debugfs_file_put(struct dentry *dentry); + +ssize_t debugfs_attr_read(struct file *file, char __user *buf, + size_t len, loff_t *ppos); +ssize_t debugfs_attr_write(struct file *file, const char __user *buf, + size_t len, loff_t *ppos); + +struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, + struct dentry *new_dir, const char *new_name); + +struct dentry *debugfs_create_u8(const char *name, umode_t mode, + struct dentry *parent, u8 *value); +struct dentry *debugfs_create_u16(const char *name, umode_t mode, + struct dentry *parent, u16 *value); +struct dentry *debugfs_create_u32(const char *name, umode_t mode, + struct dentry *parent, u32 *value); +struct dentry *debugfs_create_u64(const char *name, umode_t mode, + struct dentry *parent, u64 *value); +struct dentry *debugfs_create_ulong(const char *name, umode_t mode, + struct dentry *parent, unsigned long *value); +struct dentry *debugfs_create_x8(const char *name, umode_t mode, + struct dentry *parent, u8 *value); +struct dentry *debugfs_create_x16(const char *name, umode_t mode, + struct dentry *parent, u16 *value); +struct dentry *debugfs_create_x32(const char *name, umode_t mode, + struct dentry *parent, u32 *value); +struct dentry *debugfs_create_x64(const char *name, umode_t mode, + struct dentry *parent, u64 *value); +struct dentry *debugfs_create_size_t(const char *name, umode_t mode, + struct dentry *parent, size_t *value); +struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode, + struct dentry *parent, atomic_t *value); +struct dentry *debugfs_create_bool(const char *name, umode_t mode, + struct dentry *parent, bool *value); + +struct dentry *debugfs_create_blob(const char *name, umode_t mode, + struct dentry *parent, + struct debugfs_blob_wrapper *blob); + +struct dentry *debugfs_create_regset32(const char *name, umode_t mode, + struct dentry *parent, + struct debugfs_regset32 *regset); + +void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, + int nregs, void __iomem *base, char *prefix); + +struct dentry *debugfs_create_u32_array(const char *name, umode_t mode, + struct dentry *parent, + u32 *array, u32 elements); + +struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name, + struct dentry *parent, + int (*read_fn)(struct seq_file *s, + void *data)); + +bool debugfs_initialized(void); + +ssize_t debugfs_read_file_bool(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos); + +ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos); + +#else + +#include + +/* + * We do not return NULL from these functions if CONFIG_DEBUG_FS is not enabled + * so users have a chance to detect if there was a real error or not. We don't + * want to duplicate the design decision mistakes of procfs and devfs again. + */ + +static inline struct dentry *debugfs_lookup(const char *name, + struct dentry *parent) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_file(const char *name, umode_t mode, + struct dentry *parent, void *data, + const struct file_operations *fops) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_file_unsafe(const char *name, + umode_t mode, struct dentry *parent, + void *data, + const struct file_operations *fops) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_file_size(const char *name, umode_t mode, + struct dentry *parent, void *data, + const struct file_operations *fops, + loff_t file_size) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_dir(const char *name, + struct dentry *parent) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_symlink(const char *name, + struct dentry *parent, + const char *dest) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_automount(const char *name, + struct dentry *parent, + debugfs_automount_t f, + void *data) +{ + return ERR_PTR(-ENODEV); +} + +static inline void debugfs_remove(struct dentry *dentry) +{ } + +static inline void debugfs_remove_recursive(struct dentry *dentry) +{ } + +const struct file_operations *debugfs_real_fops(const struct file *filp); + +static inline int debugfs_file_get(struct dentry *dentry) +{ + return 0; +} + +static inline void debugfs_file_put(struct dentry *dentry) +{ } + +static inline ssize_t debugfs_attr_read(struct file *file, char __user *buf, + size_t len, loff_t *ppos) +{ + return -ENODEV; +} + +static inline ssize_t debugfs_attr_write(struct file *file, + const char __user *buf, + size_t len, loff_t *ppos) +{ + return -ENODEV; +} + +static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, + struct dentry *new_dir, char *new_name) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_u8(const char *name, umode_t mode, + struct dentry *parent, + u8 *value) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_u16(const char *name, umode_t mode, + struct dentry *parent, + u16 *value) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_u32(const char *name, umode_t mode, + struct dentry *parent, + u32 *value) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_u64(const char *name, umode_t mode, + struct dentry *parent, + u64 *value) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_ulong(const char *name, + umode_t mode, + struct dentry *parent, + unsigned long *value) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_x8(const char *name, umode_t mode, + struct dentry *parent, + u8 *value) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_x16(const char *name, umode_t mode, + struct dentry *parent, + u16 *value) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_x32(const char *name, umode_t mode, + struct dentry *parent, + u32 *value) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_x64(const char *name, umode_t mode, + struct dentry *parent, + u64 *value) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_size_t(const char *name, umode_t mode, + struct dentry *parent, + size_t *value) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode, + struct dentry *parent, atomic_t *value) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode, + struct dentry *parent, + bool *value) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_blob(const char *name, umode_t mode, + struct dentry *parent, + struct debugfs_blob_wrapper *blob) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_regset32(const char *name, + umode_t mode, struct dentry *parent, + struct debugfs_regset32 *regset) +{ + return ERR_PTR(-ENODEV); +} + +static inline void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, + int nregs, void __iomem *base, char *prefix) +{ +} + +static inline bool debugfs_initialized(void) +{ + return false; +} + +static inline struct dentry *debugfs_create_u32_array(const char *name, umode_t mode, + struct dentry *parent, + u32 *array, u32 elements) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_devm_seqfile(struct device *dev, + const char *name, + struct dentry *parent, + int (*read_fn)(struct seq_file *s, + void *data)) +{ + return ERR_PTR(-ENODEV); +} + +static inline ssize_t debugfs_read_file_bool(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + return -ENODEV; +} + +static inline ssize_t debugfs_write_file_bool(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + return -ENODEV; +} + +#endif + +#endif diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h new file mode 100644 index 000000000..afc416e5d --- /dev/null +++ b/include/linux/debugobjects.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_DEBUGOBJECTS_H +#define _LINUX_DEBUGOBJECTS_H + +#include +#include + +enum debug_obj_state { + ODEBUG_STATE_NONE, + ODEBUG_STATE_INIT, + ODEBUG_STATE_INACTIVE, + ODEBUG_STATE_ACTIVE, + ODEBUG_STATE_DESTROYED, + ODEBUG_STATE_NOTAVAILABLE, + ODEBUG_STATE_MAX, +}; + +struct debug_obj_descr; + +/** + * struct debug_obj - representaion of an tracked object + * @node: hlist node to link the object into the tracker list + * @state: tracked object state + * @astate: current active state + * @object: pointer to the real object + * @descr: pointer to an object type specific debug description structure + */ +struct debug_obj { + struct hlist_node node; + enum debug_obj_state state; + unsigned int astate; + void *object; + struct debug_obj_descr *descr; +}; + +/** + * struct debug_obj_descr - object type specific debug description structure + * + * @name: name of the object typee + * @debug_hint: function returning address, which have associated + * kernel symbol, to allow identify the object + * @is_static_object: return true if the obj is static, otherwise return false + * @fixup_init: fixup function, which is called when the init check + * fails. All fixup functions must return true if fixup + * was successful, otherwise return false + * @fixup_activate: fixup function, which is called when the activate check + * fails + * @fixup_destroy: fixup function, which is called when the destroy check + * fails + * @fixup_free: fixup function, which is called when the free check + * fails + * @fixup_assert_init: fixup function, which is called when the assert_init + * check fails + */ +struct debug_obj_descr { + const char *name; + void *(*debug_hint)(void *addr); + bool (*is_static_object)(void *addr); + bool (*fixup_init)(void *addr, enum debug_obj_state state); + bool (*fixup_activate)(void *addr, enum debug_obj_state state); + bool (*fixup_destroy)(void *addr, enum debug_obj_state state); + bool (*fixup_free)(void *addr, enum debug_obj_state state); + bool (*fixup_assert_init)(void *addr, enum debug_obj_state state); +}; + +#ifdef CONFIG_DEBUG_OBJECTS +extern void debug_object_init (void *addr, struct debug_obj_descr *descr); +extern void +debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr); +extern int debug_object_activate (void *addr, struct debug_obj_descr *descr); +extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr); +extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); +extern void debug_object_free (void *addr, struct debug_obj_descr *descr); +extern void debug_object_assert_init(void *addr, struct debug_obj_descr *descr); + +/* + * Active state: + * - Set at 0 upon initialization. + * - Must return to 0 before deactivation. + */ +extern void +debug_object_active_state(void *addr, struct debug_obj_descr *descr, + unsigned int expect, unsigned int next); + +extern void debug_objects_early_init(void); +extern void debug_objects_mem_init(void); +#else +static inline void +debug_object_init (void *addr, struct debug_obj_descr *descr) { } +static inline void +debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) { } +static inline int +debug_object_activate (void *addr, struct debug_obj_descr *descr) { return 0; } +static inline void +debug_object_deactivate(void *addr, struct debug_obj_descr *descr) { } +static inline void +debug_object_destroy (void *addr, struct debug_obj_descr *descr) { } +static inline void +debug_object_free (void *addr, struct debug_obj_descr *descr) { } +static inline void +debug_object_assert_init(void *addr, struct debug_obj_descr *descr) { } + +static inline void debug_objects_early_init(void) { } +static inline void debug_objects_mem_init(void) { } +#endif + +#ifdef CONFIG_DEBUG_OBJECTS_FREE +extern void debug_check_no_obj_freed(const void *address, unsigned long size); +#else +static inline void +debug_check_no_obj_freed(const void *address, unsigned long size) { } +#endif + +#endif diff --git a/include/linux/decompress/bunzip2.h b/include/linux/decompress/bunzip2.h new file mode 100644 index 000000000..586016394 --- /dev/null +++ b/include/linux/decompress/bunzip2.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef DECOMPRESS_BUNZIP2_H +#define DECOMPRESS_BUNZIP2_H + +int bunzip2(unsigned char *inbuf, long len, + long (*fill)(void*, unsigned long), + long (*flush)(void*, unsigned long), + unsigned char *output, + long *pos, + void(*error)(char *x)); +#endif diff --git a/include/linux/decompress/generic.h b/include/linux/decompress/generic.h new file mode 100644 index 000000000..207d80138 --- /dev/null +++ b/include/linux/decompress/generic.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef DECOMPRESS_GENERIC_H +#define DECOMPRESS_GENERIC_H + +typedef int (*decompress_fn) (unsigned char *inbuf, long len, + long (*fill)(void*, unsigned long), + long (*flush)(void*, unsigned long), + unsigned char *outbuf, + long *posp, + void(*error)(char *x)); + +/* inbuf - input buffer + *len - len of pre-read data in inbuf + *fill - function to fill inbuf when empty + *flush - function to write out outbuf + *outbuf - output buffer + *posp - if non-null, input position (number of bytes read) will be + * returned here + * + *If len != 0, inbuf should contain all the necessary input data, and fill + *should be NULL + *If len = 0, inbuf can be NULL, in which case the decompressor will allocate + *the input buffer. If inbuf != NULL it must be at least XXX_IOBUF_SIZE bytes. + *fill will be called (repeatedly...) to read data, at most XXX_IOBUF_SIZE + *bytes should be read per call. Replace XXX with the appropriate decompressor + *name, i.e. LZMA_IOBUF_SIZE. + * + *If flush = NULL, outbuf must be large enough to buffer all the expected + *output. If flush != NULL, the output buffer will be allocated by the + *decompressor (outbuf = NULL), and the flush function will be called to + *flush the output buffer at the appropriate time (decompressor and stream + *dependent). + */ + + +/* Utility routine to detect the decompression method */ +decompress_fn decompress_method(const unsigned char *inbuf, long len, + const char **name); + +#endif diff --git a/include/linux/decompress/inflate.h b/include/linux/decompress/inflate.h new file mode 100644 index 000000000..b65f24e7d --- /dev/null +++ b/include/linux/decompress/inflate.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_DECOMPRESS_INFLATE_H +#define LINUX_DECOMPRESS_INFLATE_H + +int gunzip(unsigned char *inbuf, long len, + long (*fill)(void*, unsigned long), + long (*flush)(void*, unsigned long), + unsigned char *output, + long *pos, + void(*error_fn)(char *x)); +#endif diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h new file mode 100644 index 000000000..868e9eacd --- /dev/null +++ b/include/linux/decompress/mm.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/compr_mm.h + * + * Memory management for pre-boot and ramdisk uncompressors + * + * Authors: Alain Knaff + * + */ + +#ifndef DECOMPR_MM_H +#define DECOMPR_MM_H + +#ifdef STATIC + +/* Code active when included from pre-boot environment: */ + +/* + * Some architectures want to ensure there is no local data in their + * pre-boot environment, so that data can arbitrarily relocated (via + * GOT references). This is achieved by defining STATIC_RW_DATA to + * be null. + */ +#ifndef STATIC_RW_DATA +#define STATIC_RW_DATA static +#endif + +/* A trivial malloc implementation, adapted from + * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 + */ +STATIC_RW_DATA unsigned long malloc_ptr; +STATIC_RW_DATA int malloc_count; + +static void *malloc(int size) +{ + void *p; + + if (size < 0) + return NULL; + if (!malloc_ptr) + malloc_ptr = free_mem_ptr; + + malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */ + + p = (void *)malloc_ptr; + malloc_ptr += size; + + if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr) + return NULL; + + malloc_count++; + return p; +} + +static void free(void *where) +{ + malloc_count--; + if (!malloc_count) + malloc_ptr = free_mem_ptr; +} + +#define large_malloc(a) malloc(a) +#define large_free(a) free(a) + +#define INIT + +#else /* STATIC */ + +/* Code active when compiled standalone for use when loading ramdisk: */ + +#include +#include +#include +#include +#include + +/* Use defines rather than static inline in order to avoid spurious + * warnings when not needed (indeed large_malloc / large_free are not + * needed by inflate */ + +#define malloc(a) kmalloc(a, GFP_KERNEL) +#define free(a) kfree(a) + +#define large_malloc(a) vmalloc(a) +#define large_free(a) vfree(a) + +#define INIT __init +#define STATIC + +#include + +#endif /* STATIC */ + +#endif /* DECOMPR_MM_H */ diff --git a/include/linux/decompress/unlz4.h b/include/linux/decompress/unlz4.h new file mode 100644 index 000000000..5a235f605 --- /dev/null +++ b/include/linux/decompress/unlz4.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef DECOMPRESS_UNLZ4_H +#define DECOMPRESS_UNLZ4_H + +int unlz4(unsigned char *inbuf, long len, + long (*fill)(void*, unsigned long), + long (*flush)(void*, unsigned long), + unsigned char *output, + long *pos, + void(*error)(char *x)); +#endif diff --git a/include/linux/decompress/unlzma.h b/include/linux/decompress/unlzma.h new file mode 100644 index 000000000..1c930f125 --- /dev/null +++ b/include/linux/decompress/unlzma.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef DECOMPRESS_UNLZMA_H +#define DECOMPRESS_UNLZMA_H + +int unlzma(unsigned char *, long, + long (*fill)(void*, unsigned long), + long (*flush)(void*, unsigned long), + unsigned char *output, + long *posp, + void(*error)(char *x) + ); + +#endif diff --git a/include/linux/decompress/unlzo.h b/include/linux/decompress/unlzo.h new file mode 100644 index 000000000..550ae8783 --- /dev/null +++ b/include/linux/decompress/unlzo.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef DECOMPRESS_UNLZO_H +#define DECOMPRESS_UNLZO_H + +int unlzo(unsigned char *inbuf, long len, + long (*fill)(void*, unsigned long), + long (*flush)(void*, unsigned long), + unsigned char *output, + long *pos, + void(*error)(char *x)); +#endif diff --git a/include/linux/decompress/unxz.h b/include/linux/decompress/unxz.h new file mode 100644 index 000000000..f764e2a72 --- /dev/null +++ b/include/linux/decompress/unxz.h @@ -0,0 +1,19 @@ +/* + * Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd + * + * Author: Lasse Collin + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +#ifndef DECOMPRESS_UNXZ_H +#define DECOMPRESS_UNXZ_H + +int unxz(unsigned char *in, long in_size, + long (*fill)(void *dest, unsigned long size), + long (*flush)(void *src, unsigned long size), + unsigned char *out, long *in_used, + void (*error)(char *x)); + +#endif diff --git a/include/linux/delay.h b/include/linux/delay.h new file mode 100644 index 000000000..b78bab439 --- /dev/null +++ b/include/linux/delay.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_DELAY_H +#define _LINUX_DELAY_H + +/* + * Copyright (C) 1993 Linus Torvalds + * + * Delay routines, using a pre-computed "loops_per_jiffy" value. + * + * Please note that ndelay(), udelay() and mdelay() may return early for + * several reasons: + * 1. computed loops_per_jiffy too low (due to the time taken to + * execute the timer interrupt.) + * 2. cache behaviour affecting the time it takes to execute the + * loop function. + * 3. CPU clock rate changes. + * + * Please see this thread: + * http://lists.openwall.net/linux-kernel/2011/01/09/56 + */ + +#include + +extern unsigned long loops_per_jiffy; + +#include + +/* + * Using udelay() for intervals greater than a few milliseconds can + * risk overflow for high loops_per_jiffy (high bogomips) machines. The + * mdelay() provides a wrapper to prevent this. For delays greater + * than MAX_UDELAY_MS milliseconds, the wrapper is used. Architecture + * specific values can be defined in asm-???/delay.h as an override. + * The 2nd mdelay() definition ensures GCC will optimize away the + * while loop for the common cases where n <= MAX_UDELAY_MS -- Paul G. + */ + +#ifndef MAX_UDELAY_MS +#define MAX_UDELAY_MS 5 +#endif + +#ifndef mdelay +#define mdelay(n) (\ + (__builtin_constant_p(n) && (n)<=MAX_UDELAY_MS) ? udelay((n)*1000) : \ + ({unsigned long __ms=(n); while (__ms--) udelay(1000);})) +#endif + +#ifndef ndelay +static inline void ndelay(unsigned long x) +{ + udelay(DIV_ROUND_UP(x, 1000)); +} +#define ndelay(x) ndelay(x) +#endif + +extern unsigned long lpj_fine; +void calibrate_delay(void); +void msleep(unsigned int msecs); +unsigned long msleep_interruptible(unsigned int msecs); +void usleep_range(unsigned long min, unsigned long max); + +static inline void ssleep(unsigned int seconds) +{ + msleep(seconds * 1000); +} + +#endif /* defined(_LINUX_DELAY_H) */ diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h new file mode 100644 index 000000000..31c865d18 --- /dev/null +++ b/include/linux/delayacct.h @@ -0,0 +1,188 @@ +/* delayacct.h - per-task delay accounting + * + * Copyright (C) Shailabh Nagar, IBM Corp. 2006 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + */ + +#ifndef _LINUX_DELAYACCT_H +#define _LINUX_DELAYACCT_H + +#include + +/* + * Per-task flags relevant to delay accounting + * maintained privately to avoid exhausting similar flags in sched.h:PF_* + * Used to set current->delays->flags + */ +#define DELAYACCT_PF_SWAPIN 0x00000001 /* I am doing a swapin */ +#define DELAYACCT_PF_BLKIO 0x00000002 /* I am waiting on IO */ + +#ifdef CONFIG_TASK_DELAY_ACCT +struct task_delay_info { + raw_spinlock_t lock; + unsigned int flags; /* Private per-task flags */ + + /* For each stat XXX, add following, aligned appropriately + * + * struct timespec XXX_start, XXX_end; + * u64 XXX_delay; + * u32 XXX_count; + * + * Atomicity of updates to XXX_delay, XXX_count protected by + * single lock above (split into XXX_lock if contention is an issue). + */ + + /* + * XXX_count is incremented on every XXX operation, the delay + * associated with the operation is added to XXX_delay. + * XXX_delay contains the accumulated delay time in nanoseconds. + */ + u64 blkio_start; /* Shared by blkio, swapin */ + u64 blkio_delay; /* wait for sync block io completion */ + u64 swapin_delay; /* wait for swapin block io completion */ + u32 blkio_count; /* total count of the number of sync block */ + /* io operations performed */ + u32 swapin_count; /* total count of the number of swapin block */ + /* io operations performed */ + + u64 freepages_start; + u64 freepages_delay; /* wait for memory reclaim */ + u32 freepages_count; /* total count of memory reclaim */ +}; +#endif + +#include +#include + +#ifdef CONFIG_TASK_DELAY_ACCT +extern int delayacct_on; /* Delay accounting turned on/off */ +extern struct kmem_cache *delayacct_cache; +extern void delayacct_init(void); +extern void __delayacct_tsk_init(struct task_struct *); +extern void __delayacct_tsk_exit(struct task_struct *); +extern void __delayacct_blkio_start(void); +extern void __delayacct_blkio_end(struct task_struct *); +extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *); +extern __u64 __delayacct_blkio_ticks(struct task_struct *); +extern void __delayacct_freepages_start(void); +extern void __delayacct_freepages_end(void); + +static inline int delayacct_is_task_waiting_on_io(struct task_struct *p) +{ + if (p->delays) + return (p->delays->flags & DELAYACCT_PF_BLKIO); + else + return 0; +} + +static inline void delayacct_set_flag(int flag) +{ + if (current->delays) + current->delays->flags |= flag; +} + +static inline void delayacct_clear_flag(int flag) +{ + if (current->delays) + current->delays->flags &= ~flag; +} + +static inline void delayacct_tsk_init(struct task_struct *tsk) +{ + /* reinitialize in case parent's non-null pointer was dup'ed*/ + tsk->delays = NULL; + if (delayacct_on) + __delayacct_tsk_init(tsk); +} + +/* Free tsk->delays. Called from bad fork and __put_task_struct + * where there's no risk of tsk->delays being accessed elsewhere + */ +static inline void delayacct_tsk_free(struct task_struct *tsk) +{ + if (tsk->delays) + kmem_cache_free(delayacct_cache, tsk->delays); + tsk->delays = NULL; +} + +static inline void delayacct_blkio_start(void) +{ + delayacct_set_flag(DELAYACCT_PF_BLKIO); + if (current->delays) + __delayacct_blkio_start(); +} + +static inline void delayacct_blkio_end(struct task_struct *p) +{ + if (p->delays) + __delayacct_blkio_end(p); + delayacct_clear_flag(DELAYACCT_PF_BLKIO); +} + +static inline int delayacct_add_tsk(struct taskstats *d, + struct task_struct *tsk) +{ + if (!delayacct_on || !tsk->delays) + return 0; + return __delayacct_add_tsk(d, tsk); +} + +static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk) +{ + if (tsk->delays) + return __delayacct_blkio_ticks(tsk); + return 0; +} + +static inline void delayacct_freepages_start(void) +{ + if (current->delays) + __delayacct_freepages_start(); +} + +static inline void delayacct_freepages_end(void) +{ + if (current->delays) + __delayacct_freepages_end(); +} + +#else +static inline void delayacct_set_flag(int flag) +{} +static inline void delayacct_clear_flag(int flag) +{} +static inline void delayacct_init(void) +{} +static inline void delayacct_tsk_init(struct task_struct *tsk) +{} +static inline void delayacct_tsk_free(struct task_struct *tsk) +{} +static inline void delayacct_blkio_start(void) +{} +static inline void delayacct_blkio_end(struct task_struct *p) +{} +static inline int delayacct_add_tsk(struct taskstats *d, + struct task_struct *tsk) +{ return 0; } +static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk) +{ return 0; } +static inline int delayacct_is_task_waiting_on_io(struct task_struct *p) +{ return 0; } +static inline void delayacct_freepages_start(void) +{} +static inline void delayacct_freepages_end(void) +{} + +#endif /* CONFIG_TASK_DELAY_ACCT */ + +#endif diff --git a/include/linux/delayed_call.h b/include/linux/delayed_call.h new file mode 100644 index 000000000..a26c3b95b --- /dev/null +++ b/include/linux/delayed_call.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DELAYED_CALL_H +#define _DELAYED_CALL_H + +/* + * Poor man's closures; I wish we could've done them sanely polymorphic, + * but... + */ + +struct delayed_call { + void (*fn)(void *); + void *arg; +}; + +#define DEFINE_DELAYED_CALL(name) struct delayed_call name = {NULL, NULL} + +/* I really wish we had closures with sane typechecking... */ +static inline void set_delayed_call(struct delayed_call *call, + void (*fn)(void *), void *arg) +{ + call->fn = fn; + call->arg = arg; +} + +static inline void do_delayed_call(struct delayed_call *call) +{ + if (call->fn) + call->fn(call->arg); +} + +static inline void clear_delayed_call(struct delayed_call *call) +{ + call->fn = NULL; +} +#endif diff --git a/include/linux/dell-led.h b/include/linux/dell-led.h new file mode 100644 index 000000000..925214715 --- /dev/null +++ b/include/linux/dell-led.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DELL_LED_H__ +#define __DELL_LED_H__ + +int dell_micmute_led_set(int on); + +#endif diff --git a/include/linux/devcoredump.h b/include/linux/devcoredump.h new file mode 100644 index 000000000..269521f14 --- /dev/null +++ b/include/linux/devcoredump.h @@ -0,0 +1,105 @@ +/* + * This file is provided under the GPLv2 license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + */ +#ifndef __DEVCOREDUMP_H +#define __DEVCOREDUMP_H + +#include +#include +#include + +#include +#include + +/* + * _devcd_free_sgtable - free all the memory of the given scatterlist table + * (i.e. both pages and scatterlist instances) + * NOTE: if two tables allocated and chained using the sg_chain function then + * this function should be called only once on the first table + * @table: pointer to sg_table to free + */ +static inline void _devcd_free_sgtable(struct scatterlist *table) +{ + int i; + struct page *page; + struct scatterlist *iter; + struct scatterlist *delete_iter; + + /* free pages */ + iter = table; + for_each_sg(table, iter, sg_nents(table), i) { + page = sg_page(iter); + if (page) + __free_page(page); + } + + /* then free all chained tables */ + iter = table; + delete_iter = table; /* always points on a head of a table */ + while (!sg_is_last(iter)) { + iter++; + if (sg_is_chain(iter)) { + iter = sg_chain_ptr(iter); + kfree(delete_iter); + delete_iter = iter; + } + } + + /* free the last table */ + kfree(delete_iter); +} + + +#ifdef CONFIG_DEV_COREDUMP +void dev_coredumpv(struct device *dev, void *data, size_t datalen, + gfp_t gfp); + +void dev_coredumpm(struct device *dev, struct module *owner, + void *data, size_t datalen, gfp_t gfp, + ssize_t (*read)(char *buffer, loff_t offset, size_t count, + void *data, size_t datalen), + void (*free)(void *data)); + +void dev_coredumpsg(struct device *dev, struct scatterlist *table, + size_t datalen, gfp_t gfp); +#else +static inline void dev_coredumpv(struct device *dev, void *data, + size_t datalen, gfp_t gfp) +{ + vfree(data); +} + +static inline void +dev_coredumpm(struct device *dev, struct module *owner, + void *data, size_t datalen, gfp_t gfp, + ssize_t (*read)(char *buffer, loff_t offset, size_t count, + void *data, size_t datalen), + void (*free)(void *data)) +{ + free(data); +} + +static inline void dev_coredumpsg(struct device *dev, struct scatterlist *table, + size_t datalen, gfp_t gfp) +{ + _devcd_free_sgtable(table); +} +#endif /* CONFIG_DEV_COREDUMP */ + +#endif /* __DEVCOREDUMP_H */ diff --git a/include/linux/devfreq-event.h b/include/linux/devfreq-event.h new file mode 100644 index 000000000..4db00b02c --- /dev/null +++ b/include/linux/devfreq-event.h @@ -0,0 +1,191 @@ +/* + * devfreq-event: a framework to provide raw data and events of devfreq devices + * + * Copyright (C) 2014 Samsung Electronics + * Author: Chanwoo Choi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_DEVFREQ_EVENT_H__ +#define __LINUX_DEVFREQ_EVENT_H__ + +#include + +/** + * struct devfreq_event_dev - the devfreq-event device + * + * @node : Contain the devfreq-event device that have been registered. + * @dev : the device registered by devfreq-event class. dev.parent is + * the device using devfreq-event. + * @lock : a mutex to protect accessing devfreq-event. + * @enable_count: the number of enable function have been called. + * @desc : the description for devfreq-event device. + * + * This structure contains devfreq-event device information. + */ +struct devfreq_event_dev { + struct list_head node; + + struct device dev; + struct mutex lock; + u32 enable_count; + + const struct devfreq_event_desc *desc; +}; + +/** + * struct devfreq_event_data - the devfreq-event data + * + * @load_count : load count of devfreq-event device for the given period. + * @total_count : total count of devfreq-event device for the given period. + * each count may represent a clock cycle, a time unit + * (ns/us/...), or anything the device driver wants. + * Generally, utilization is load_count / total_count. + * + * This structure contains the data of devfreq-event device for polling period. + */ +struct devfreq_event_data { + unsigned long load_count; + unsigned long total_count; +}; + +/** + * struct devfreq_event_ops - the operations of devfreq-event device + * + * @enable : Enable the devfreq-event device. + * @disable : Disable the devfreq-event device. + * @reset : Reset all setting of the devfreq-event device. + * @set_event : Set the specific event type for the devfreq-event device. + * @get_event : Get the result of the devfreq-event devie with specific + * event type. + * + * This structure contains devfreq-event device operations which can be + * implemented by devfreq-event device drivers. + */ +struct devfreq_event_ops { + /* Optional functions */ + int (*enable)(struct devfreq_event_dev *edev); + int (*disable)(struct devfreq_event_dev *edev); + int (*reset)(struct devfreq_event_dev *edev); + + /* Mandatory functions */ + int (*set_event)(struct devfreq_event_dev *edev); + int (*get_event)(struct devfreq_event_dev *edev, + struct devfreq_event_data *edata); +}; + +/** + * struct devfreq_event_desc - the descriptor of devfreq-event device + * + * @name : the name of devfreq-event device. + * @driver_data : the private data for devfreq-event driver. + * @ops : the operation to control devfreq-event device. + * + * Each devfreq-event device is described with a this structure. + * This structure contains the various data for devfreq-event device. + */ +struct devfreq_event_desc { + const char *name; + void *driver_data; + + const struct devfreq_event_ops *ops; +}; + +#if defined(CONFIG_PM_DEVFREQ_EVENT) +extern int devfreq_event_enable_edev(struct devfreq_event_dev *edev); +extern int devfreq_event_disable_edev(struct devfreq_event_dev *edev); +extern bool devfreq_event_is_enabled(struct devfreq_event_dev *edev); +extern int devfreq_event_set_event(struct devfreq_event_dev *edev); +extern int devfreq_event_get_event(struct devfreq_event_dev *edev, + struct devfreq_event_data *edata); +extern int devfreq_event_reset_event(struct devfreq_event_dev *edev); +extern struct devfreq_event_dev *devfreq_event_get_edev_by_phandle( + struct device *dev, int index); +extern int devfreq_event_get_edev_count(struct device *dev); +extern struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev, + struct devfreq_event_desc *desc); +extern int devfreq_event_remove_edev(struct devfreq_event_dev *edev); +extern struct devfreq_event_dev *devm_devfreq_event_add_edev(struct device *dev, + struct devfreq_event_desc *desc); +extern void devm_devfreq_event_remove_edev(struct device *dev, + struct devfreq_event_dev *edev); +static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev) +{ + return edev->desc->driver_data; +} +#else +static inline int devfreq_event_enable_edev(struct devfreq_event_dev *edev) +{ + return -EINVAL; +} + +static inline int devfreq_event_disable_edev(struct devfreq_event_dev *edev) +{ + return -EINVAL; +} + +static inline bool devfreq_event_is_enabled(struct devfreq_event_dev *edev) +{ + return false; +} + +static inline int devfreq_event_set_event(struct devfreq_event_dev *edev) +{ + return -EINVAL; +} + +static inline int devfreq_event_get_event(struct devfreq_event_dev *edev, + struct devfreq_event_data *edata) +{ + return -EINVAL; +} + +static inline int devfreq_event_reset_event(struct devfreq_event_dev *edev) +{ + return -EINVAL; +} + +static inline struct devfreq_event_dev *devfreq_event_get_edev_by_phandle( + struct device *dev, int index) +{ + return ERR_PTR(-EINVAL); +} + +static inline int devfreq_event_get_edev_count(struct device *dev) +{ + return -EINVAL; +} + +static inline struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev, + struct devfreq_event_desc *desc) +{ + return ERR_PTR(-EINVAL); +} + +static inline int devfreq_event_remove_edev(struct devfreq_event_dev *edev) +{ + return -EINVAL; +} + +static inline struct devfreq_event_dev *devm_devfreq_event_add_edev( + struct device *dev, + struct devfreq_event_desc *desc) +{ + return ERR_PTR(-EINVAL); +} + +static inline void devm_devfreq_event_remove_edev(struct device *dev, + struct devfreq_event_dev *edev) +{ +} + +static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev) +{ + return NULL; +} +#endif /* CONFIG_PM_DEVFREQ_EVENT */ + +#endif /* __LINUX_DEVFREQ_EVENT_H__ */ diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h new file mode 100644 index 000000000..3aae5b3af --- /dev/null +++ b/include/linux/devfreq.h @@ -0,0 +1,389 @@ +/* + * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework + * for Non-CPU Devices. + * + * Copyright (C) 2011 Samsung Electronics + * MyungJoo Ham + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_DEVFREQ_H__ +#define __LINUX_DEVFREQ_H__ + +#include +#include +#include + +#define DEVFREQ_NAME_LEN 16 + +/* DEVFREQ governor name */ +#define DEVFREQ_GOV_SIMPLE_ONDEMAND "simple_ondemand" +#define DEVFREQ_GOV_PERFORMANCE "performance" +#define DEVFREQ_GOV_POWERSAVE "powersave" +#define DEVFREQ_GOV_USERSPACE "userspace" +#define DEVFREQ_GOV_PASSIVE "passive" + +/* DEVFREQ notifier interface */ +#define DEVFREQ_TRANSITION_NOTIFIER (0) + +/* Transition notifiers of DEVFREQ_TRANSITION_NOTIFIER */ +#define DEVFREQ_PRECHANGE (0) +#define DEVFREQ_POSTCHANGE (1) + +struct devfreq; +struct devfreq_governor; + +/** + * struct devfreq_dev_status - Data given from devfreq user device to + * governors. Represents the performance + * statistics. + * @total_time: The total time represented by this instance of + * devfreq_dev_status + * @busy_time: The time that the device was working among the + * total_time. + * @current_frequency: The operating frequency. + * @private_data: An entry not specified by the devfreq framework. + * A device and a specific governor may have their + * own protocol with private_data. However, because + * this is governor-specific, a governor using this + * will be only compatible with devices aware of it. + */ +struct devfreq_dev_status { + /* both since the last measure */ + unsigned long total_time; + unsigned long busy_time; + unsigned long current_frequency; + void *private_data; +}; + +/* + * The resulting frequency should be at most this. (this bound is the + * least upper bound; thus, the resulting freq should be lower or same) + * If the flag is not set, the resulting frequency should be at most the + * bound (greatest lower bound) + */ +#define DEVFREQ_FLAG_LEAST_UPPER_BOUND 0x1 + +/** + * struct devfreq_dev_profile - Devfreq's user device profile + * @initial_freq: The operating frequency when devfreq_add_device() is + * called. + * @polling_ms: The polling interval in ms. 0 disables polling. + * @target: The device should set its operating frequency at + * freq or lowest-upper-than-freq value. If freq is + * higher than any operable frequency, set maximum. + * Before returning, target function should set + * freq at the current frequency. + * The "flags" parameter's possible values are + * explained above with "DEVFREQ_FLAG_*" macros. + * @get_dev_status: The device should provide the current performance + * status to devfreq. Governors are recommended not to + * use this directly. Instead, governors are recommended + * to use devfreq_update_stats() along with + * devfreq.last_status. + * @get_cur_freq: The device should provide the current frequency + * at which it is operating. + * @exit: An optional callback that is called when devfreq + * is removing the devfreq object due to error or + * from devfreq_remove_device() call. If the user + * has registered devfreq->nb at a notifier-head, + * this is the time to unregister it. + * @freq_table: Optional list of frequencies to support statistics + * and freq_table must be generated in ascending order. + * @max_state: The size of freq_table. + */ +struct devfreq_dev_profile { + unsigned long initial_freq; + unsigned int polling_ms; + + int (*target)(struct device *dev, unsigned long *freq, u32 flags); + int (*get_dev_status)(struct device *dev, + struct devfreq_dev_status *stat); + int (*get_cur_freq)(struct device *dev, unsigned long *freq); + void (*exit)(struct device *dev); + + unsigned long *freq_table; + unsigned int max_state; +}; + +/** + * struct devfreq - Device devfreq structure + * @node: list node - contains the devices with devfreq that have been + * registered. + * @lock: a mutex to protect accessing devfreq. + * @dev: device registered by devfreq class. dev.parent is the device + * using devfreq. + * @profile: device-specific devfreq profile + * @governor: method how to choose frequency based on the usage. + * @governor_name: devfreq governor name for use with this devfreq + * @nb: notifier block used to notify devfreq object that it should + * reevaluate operable frequencies. Devfreq users may use + * devfreq.nb to the corresponding register notifier call chain. + * @work: delayed work for load monitoring. + * @previous_freq: previously configured frequency value. + * @data: Private data of the governor. The devfreq framework does not + * touch this. + * @min_freq: Limit minimum frequency requested by user (0: none) + * @max_freq: Limit maximum frequency requested by user (0: none) + * @scaling_min_freq: Limit minimum frequency requested by OPP interface + * @scaling_max_freq: Limit maximum frequency requested by OPP interface + * @stop_polling: devfreq polling status of a device. + * @total_trans: Number of devfreq transitions + * @trans_table: Statistics of devfreq transitions + * @time_in_state: Statistics of devfreq states + * @last_stat_updated: The last time stat updated + * @transition_notifier_list: list head of DEVFREQ_TRANSITION_NOTIFIER notifier + * + * This structure stores the devfreq information for a give device. + * + * Note that when a governor accesses entries in struct devfreq in its + * functions except for the context of callbacks defined in struct + * devfreq_governor, the governor should protect its access with the + * struct mutex lock in struct devfreq. A governor may use this mutex + * to protect its own private data in void *data as well. + */ +struct devfreq { + struct list_head node; + + struct mutex lock; + struct device dev; + struct devfreq_dev_profile *profile; + const struct devfreq_governor *governor; + char governor_name[DEVFREQ_NAME_LEN]; + struct notifier_block nb; + struct delayed_work work; + + unsigned long previous_freq; + struct devfreq_dev_status last_status; + + void *data; /* private data for governors */ + + unsigned long min_freq; + unsigned long max_freq; + unsigned long scaling_min_freq; + unsigned long scaling_max_freq; + bool stop_polling; + + /* information for device frequency transition */ + unsigned int total_trans; + unsigned int *trans_table; + unsigned long *time_in_state; + unsigned long last_stat_updated; + + struct srcu_notifier_head transition_notifier_list; +}; + +struct devfreq_freqs { + unsigned long old; + unsigned long new; +}; + +#if defined(CONFIG_PM_DEVFREQ) +extern struct devfreq *devfreq_add_device(struct device *dev, + struct devfreq_dev_profile *profile, + const char *governor_name, + void *data); +extern int devfreq_remove_device(struct devfreq *devfreq); +extern struct devfreq *devm_devfreq_add_device(struct device *dev, + struct devfreq_dev_profile *profile, + const char *governor_name, + void *data); +extern void devm_devfreq_remove_device(struct device *dev, + struct devfreq *devfreq); + +/* Supposed to be called by PM callbacks */ +extern int devfreq_suspend_device(struct devfreq *devfreq); +extern int devfreq_resume_device(struct devfreq *devfreq); + +/* Helper functions for devfreq user device driver with OPP. */ +extern struct dev_pm_opp *devfreq_recommended_opp(struct device *dev, + unsigned long *freq, u32 flags); +extern int devfreq_register_opp_notifier(struct device *dev, + struct devfreq *devfreq); +extern int devfreq_unregister_opp_notifier(struct device *dev, + struct devfreq *devfreq); +extern int devm_devfreq_register_opp_notifier(struct device *dev, + struct devfreq *devfreq); +extern void devm_devfreq_unregister_opp_notifier(struct device *dev, + struct devfreq *devfreq); +extern int devfreq_register_notifier(struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list); +extern int devfreq_unregister_notifier(struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list); +extern int devm_devfreq_register_notifier(struct device *dev, + struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list); +extern void devm_devfreq_unregister_notifier(struct device *dev, + struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list); +extern struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, + int index); + +#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) +/** + * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq + * and devfreq_add_device + * @upthreshold: If the load is over this value, the frequency jumps. + * Specify 0 to use the default. Valid value = 0 to 100. + * @downdifferential: If the load is under upthreshold - downdifferential, + * the governor may consider slowing the frequency down. + * Specify 0 to use the default. Valid value = 0 to 100. + * downdifferential < upthreshold must hold. + * + * If the fed devfreq_simple_ondemand_data pointer is NULL to the governor, + * the governor uses the default values. + */ +struct devfreq_simple_ondemand_data { + unsigned int upthreshold; + unsigned int downdifferential; +}; +#endif + +#if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE) +/** + * struct devfreq_passive_data - void *data fed to struct devfreq + * and devfreq_add_device + * @parent: the devfreq instance of parent device. + * @get_target_freq: Optional callback, Returns desired operating frequency + * for the device using passive governor. That is called + * when passive governor should decide the next frequency + * by using the new frequency of parent devfreq device + * using governors except for passive governor. + * If the devfreq device has the specific method to decide + * the next frequency, should use this callback. + * @this: the devfreq instance of own device. + * @nb: the notifier block for DEVFREQ_TRANSITION_NOTIFIER list + * + * The devfreq_passive_data have to set the devfreq instance of parent + * device with governors except for the passive governor. But, don't need to + * initialize the 'this' and 'nb' field because the devfreq core will handle + * them. + */ +struct devfreq_passive_data { + /* Should set the devfreq instance of parent device */ + struct devfreq *parent; + + /* Optional callback to decide the next frequency of passvice device */ + int (*get_target_freq)(struct devfreq *this, unsigned long *freq); + + /* For passive governor's internal use. Don't need to set them */ + struct devfreq *this; + struct notifier_block nb; +}; +#endif + +#else /* !CONFIG_PM_DEVFREQ */ +static inline struct devfreq *devfreq_add_device(struct device *dev, + struct devfreq_dev_profile *profile, + const char *governor_name, + void *data) +{ + return ERR_PTR(-ENOSYS); +} + +static inline int devfreq_remove_device(struct devfreq *devfreq) +{ + return 0; +} + +static inline struct devfreq *devm_devfreq_add_device(struct device *dev, + struct devfreq_dev_profile *profile, + const char *governor_name, + void *data) +{ + return ERR_PTR(-ENOSYS); +} + +static inline void devm_devfreq_remove_device(struct device *dev, + struct devfreq *devfreq) +{ +} + +static inline int devfreq_suspend_device(struct devfreq *devfreq) +{ + return 0; +} + +static inline int devfreq_resume_device(struct devfreq *devfreq) +{ + return 0; +} + +static inline struct dev_pm_opp *devfreq_recommended_opp(struct device *dev, + unsigned long *freq, u32 flags) +{ + return ERR_PTR(-EINVAL); +} + +static inline int devfreq_register_opp_notifier(struct device *dev, + struct devfreq *devfreq) +{ + return -EINVAL; +} + +static inline int devfreq_unregister_opp_notifier(struct device *dev, + struct devfreq *devfreq) +{ + return -EINVAL; +} + +static inline int devm_devfreq_register_opp_notifier(struct device *dev, + struct devfreq *devfreq) +{ + return -EINVAL; +} + +static inline void devm_devfreq_unregister_opp_notifier(struct device *dev, + struct devfreq *devfreq) +{ +} + +static inline int devfreq_register_notifier(struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list) +{ + return 0; +} + +static inline int devfreq_unregister_notifier(struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list) +{ + return 0; +} + +static inline int devm_devfreq_register_notifier(struct device *dev, + struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list) +{ + return 0; +} + +static inline void devm_devfreq_unregister_notifier(struct device *dev, + struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list) +{ +} + +static inline struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, + int index) +{ + return ERR_PTR(-ENODEV); +} + +static inline int devfreq_update_stats(struct devfreq *df) +{ + return -EINVAL; +} +#endif /* CONFIG_PM_DEVFREQ */ + +#endif /* __LINUX_DEVFREQ_H__ */ diff --git a/include/linux/devfreq_cooling.h b/include/linux/devfreq_cooling.h new file mode 100644 index 000000000..79a6e37a1 --- /dev/null +++ b/include/linux/devfreq_cooling.h @@ -0,0 +1,103 @@ +/* + * devfreq_cooling: Thermal cooling device implementation for devices using + * devfreq + * + * Copyright (C) 2014-2015 ARM Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __DEVFREQ_COOLING_H__ +#define __DEVFREQ_COOLING_H__ + +#include +#include + + +/** + * struct devfreq_cooling_power - Devfreq cooling power ops + * @get_static_power: Take voltage, in mV, and return the static power + * in mW. If NULL, the static power is assumed + * to be 0. + * @get_dynamic_power: Take voltage, in mV, and frequency, in HZ, and + * return the dynamic power draw in mW. If NULL, + * a simple power model is used. + * @dyn_power_coeff: Coefficient for the simple dynamic power model in + * mW/(MHz mV mV). + * If get_dynamic_power() is NULL, then the + * dynamic power is calculated as + * @dyn_power_coeff * frequency * voltage^2 + * @get_real_power: When this is set, the framework uses it to ask the + * device driver for the actual power. + * Some devices have more sophisticated methods + * (like power counters) to approximate the actual power + * that they use. + * This function provides more accurate data to the + * thermal governor. When the driver does not provide + * such function, framework just uses pre-calculated + * table and scale the power by 'utilization' + * (based on 'busy_time' and 'total_time' taken from + * devfreq 'last_status'). + * The value returned by this function must be lower + * or equal than the maximum power value + * for the current state + * (which can be found in power_table[state]). + * When this interface is used, the power_table holds + * max total (static + dynamic) power value for each OPP. + */ +struct devfreq_cooling_power { + unsigned long (*get_static_power)(struct devfreq *devfreq, + unsigned long voltage); + unsigned long (*get_dynamic_power)(struct devfreq *devfreq, + unsigned long freq, + unsigned long voltage); + int (*get_real_power)(struct devfreq *df, u32 *power, + unsigned long freq, unsigned long voltage); + unsigned long dyn_power_coeff; +}; + +#ifdef CONFIG_DEVFREQ_THERMAL + +struct thermal_cooling_device * +of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df, + struct devfreq_cooling_power *dfc_power); +struct thermal_cooling_device * +of_devfreq_cooling_register(struct device_node *np, struct devfreq *df); +struct thermal_cooling_device *devfreq_cooling_register(struct devfreq *df); +void devfreq_cooling_unregister(struct thermal_cooling_device *dfc); + +#else /* !CONFIG_DEVFREQ_THERMAL */ + +static inline struct thermal_cooling_device * +of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df, + struct devfreq_cooling_power *dfc_power) +{ + return ERR_PTR(-EINVAL); +} + +static inline struct thermal_cooling_device * +of_devfreq_cooling_register(struct device_node *np, struct devfreq *df) +{ + return ERR_PTR(-EINVAL); +} + +static inline struct thermal_cooling_device * +devfreq_cooling_register(struct devfreq *df) +{ + return ERR_PTR(-EINVAL); +} + +static inline void +devfreq_cooling_unregister(struct thermal_cooling_device *dfc) +{ +} + +#endif /* CONFIG_DEVFREQ_THERMAL */ +#endif /* __DEVFREQ_COOLING_H__ */ diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h new file mode 100644 index 000000000..cde670864 --- /dev/null +++ b/include/linux/device-mapper.h @@ -0,0 +1,621 @@ +/* + * Copyright (C) 2001 Sistina Software (UK) Limited. + * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. + * + * This file is released under the LGPL. + */ + +#ifndef _LINUX_DEVICE_MAPPER_H +#define _LINUX_DEVICE_MAPPER_H + +#include +#include +#include +#include + +struct dm_dev; +struct dm_target; +struct dm_table; +struct mapped_device; +struct bio_vec; + +/* + * Type of table, mapped_device's mempool and request_queue + */ +enum dm_queue_mode { + DM_TYPE_NONE = 0, + DM_TYPE_BIO_BASED = 1, + DM_TYPE_REQUEST_BASED = 2, + DM_TYPE_MQ_REQUEST_BASED = 3, + DM_TYPE_DAX_BIO_BASED = 4, + DM_TYPE_NVME_BIO_BASED = 5, +}; + +typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; + +union map_info { + void *ptr; +}; + +/* + * In the constructor the target parameter will already have the + * table, type, begin and len fields filled in. + */ +typedef int (*dm_ctr_fn) (struct dm_target *target, + unsigned int argc, char **argv); + +/* + * The destructor doesn't need to free the dm_target, just + * anything hidden ti->private. + */ +typedef void (*dm_dtr_fn) (struct dm_target *ti); + +/* + * The map function must return: + * < 0: error + * = 0: The target will handle the io by resubmitting it later + * = 1: simple remap complete + * = 2: The target wants to push back the io + */ +typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio); +typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti, + struct request *rq, + union map_info *map_context, + struct request **clone); +typedef void (*dm_release_clone_request_fn) (struct request *clone, + union map_info *map_context); + +/* + * Returns: + * < 0 : error (currently ignored) + * 0 : ended successfully + * 1 : for some reason the io has still not completed (eg, + * multipath target might want to requeue a failed io). + * 2 : The target wants to push back the io + */ +typedef int (*dm_endio_fn) (struct dm_target *ti, + struct bio *bio, blk_status_t *error); +typedef int (*dm_request_endio_fn) (struct dm_target *ti, + struct request *clone, blk_status_t error, + union map_info *map_context); + +typedef void (*dm_presuspend_fn) (struct dm_target *ti); +typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti); +typedef void (*dm_postsuspend_fn) (struct dm_target *ti); +typedef int (*dm_preresume_fn) (struct dm_target *ti); +typedef void (*dm_resume_fn) (struct dm_target *ti); + +typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type, + unsigned status_flags, char *result, unsigned maxlen); + +typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv, + char *result, unsigned maxlen); + +typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev); + +/* + * These iteration functions are typically used to check (and combine) + * properties of underlying devices. + * E.g. Does at least one underlying device support flush? + * Does any underlying device not support WRITE_SAME? + * + * The callout function is called once for each contiguous section of + * an underlying device. State can be maintained in *data. + * Return non-zero to stop iterating through any further devices. + */ +typedef int (*iterate_devices_callout_fn) (struct dm_target *ti, + struct dm_dev *dev, + sector_t start, sector_t len, + void *data); + +/* + * This function must iterate through each section of device used by the + * target until it encounters a non-zero return code, which it then returns. + * Returns zero if no callout returned non-zero. + */ +typedef int (*dm_iterate_devices_fn) (struct dm_target *ti, + iterate_devices_callout_fn fn, + void *data); + +typedef void (*dm_io_hints_fn) (struct dm_target *ti, + struct queue_limits *limits); + +/* + * Returns: + * 0: The target can handle the next I/O immediately. + * 1: The target can't handle the next I/O immediately. + */ +typedef int (*dm_busy_fn) (struct dm_target *ti); + +/* + * Returns: + * < 0 : error + * >= 0 : the number of bytes accessible at the address + */ +typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff, + long nr_pages, void **kaddr, pfn_t *pfn); +typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff, + void *addr, size_t bytes, struct iov_iter *i); +#define PAGE_SECTORS (PAGE_SIZE / 512) + +void dm_error(const char *message); + +struct dm_dev { + struct block_device *bdev; + struct dax_device *dax_dev; + fmode_t mode; + char name[16]; +}; + +dev_t dm_get_dev_t(const char *path); + +/* + * Constructors should call these functions to ensure destination devices + * are opened/closed correctly. + */ +int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, + struct dm_dev **result); +void dm_put_device(struct dm_target *ti, struct dm_dev *d); + +/* + * Information about a target type + */ + +struct target_type { + uint64_t features; + const char *name; + struct module *module; + unsigned version[3]; + dm_ctr_fn ctr; + dm_dtr_fn dtr; + dm_map_fn map; + dm_clone_and_map_request_fn clone_and_map_rq; + dm_release_clone_request_fn release_clone_rq; + dm_endio_fn end_io; + dm_request_endio_fn rq_end_io; + dm_presuspend_fn presuspend; + dm_presuspend_undo_fn presuspend_undo; + dm_postsuspend_fn postsuspend; + dm_preresume_fn preresume; + dm_resume_fn resume; + dm_status_fn status; + dm_message_fn message; + dm_prepare_ioctl_fn prepare_ioctl; + dm_busy_fn busy; + dm_iterate_devices_fn iterate_devices; + dm_io_hints_fn io_hints; + dm_dax_direct_access_fn direct_access; + dm_dax_copy_iter_fn dax_copy_from_iter; + dm_dax_copy_iter_fn dax_copy_to_iter; + + /* For internal device-mapper use. */ + struct list_head list; +}; + +/* + * Target features + */ + +/* + * Any table that contains an instance of this target must have only one. + */ +#define DM_TARGET_SINGLETON 0x00000001 +#define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON) + +/* + * Indicates that a target does not support read-only devices. + */ +#define DM_TARGET_ALWAYS_WRITEABLE 0x00000002 +#define dm_target_always_writeable(type) \ + ((type)->features & DM_TARGET_ALWAYS_WRITEABLE) + +/* + * Any device that contains a table with an instance of this target may never + * have tables containing any different target type. + */ +#define DM_TARGET_IMMUTABLE 0x00000004 +#define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE) + +/* + * Indicates that a target may replace any target; even immutable targets. + * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined. + */ +#define DM_TARGET_WILDCARD 0x00000008 +#define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD) + +/* + * A target implements own bio data integrity. + */ +#define DM_TARGET_INTEGRITY 0x00000010 +#define dm_target_has_integrity(type) ((type)->features & DM_TARGET_INTEGRITY) + +/* + * A target passes integrity data to the lower device. + */ +#define DM_TARGET_PASSES_INTEGRITY 0x00000020 +#define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY) + +/* + * Indicates that a target supports host-managed zoned block devices. + */ +#define DM_TARGET_ZONED_HM 0x00000040 +#define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM) + +struct dm_target { + struct dm_table *table; + struct target_type *type; + + /* target limits */ + sector_t begin; + sector_t len; + + /* If non-zero, maximum size of I/O submitted to a target. */ + uint32_t max_io_len; + + /* + * A number of zero-length barrier bios that will be submitted + * to the target for the purpose of flushing cache. + * + * The bio number can be accessed with dm_bio_get_target_bio_nr. + * It is a responsibility of the target driver to remap these bios + * to the real underlying devices. + */ + unsigned num_flush_bios; + + /* + * The number of discard bios that will be submitted to the target. + * The bio number can be accessed with dm_bio_get_target_bio_nr. + */ + unsigned num_discard_bios; + + /* + * The number of secure erase bios that will be submitted to the target. + * The bio number can be accessed with dm_bio_get_target_bio_nr. + */ + unsigned num_secure_erase_bios; + + /* + * The number of WRITE SAME bios that will be submitted to the target. + * The bio number can be accessed with dm_bio_get_target_bio_nr. + */ + unsigned num_write_same_bios; + + /* + * The number of WRITE ZEROES bios that will be submitted to the target. + * The bio number can be accessed with dm_bio_get_target_bio_nr. + */ + unsigned num_write_zeroes_bios; + + /* + * The minimum number of extra bytes allocated in each io for the + * target to use. + */ + unsigned per_io_data_size; + + /* target specific data */ + void *private; + + /* Used to provide an error string from the ctr */ + char *error; + + /* + * Set if this target needs to receive flushes regardless of + * whether or not its underlying devices have support. + */ + bool flush_supported:1; + + /* + * Set if this target needs to receive discards regardless of + * whether or not its underlying devices have support. + */ + bool discards_supported:1; + + /* + * Set if the target required discard bios to be split + * on max_io_len boundary. + */ + bool split_discard_bios:1; + + /* + * Set if we need to limit the number of in-flight bios when swapping. + */ + bool limit_swap_bios:1; +}; + +/* Each target can link one of these into the table */ +struct dm_target_callbacks { + struct list_head list; + int (*congested_fn) (struct dm_target_callbacks *, int); +}; + +void *dm_per_bio_data(struct bio *bio, size_t data_size); +struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size); +unsigned dm_bio_get_target_bio_nr(const struct bio *bio); + +int dm_register_target(struct target_type *t); +void dm_unregister_target(struct target_type *t); + +/* + * Target argument parsing. + */ +struct dm_arg_set { + unsigned argc; + char **argv; +}; + +/* + * The minimum and maximum value of a numeric argument, together with + * the error message to use if the number is found to be outside that range. + */ +struct dm_arg { + unsigned min; + unsigned max; + char *error; +}; + +/* + * Validate the next argument, either returning it as *value or, if invalid, + * returning -EINVAL and setting *error. + */ +int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set, + unsigned *value, char **error); + +/* + * Process the next argument as the start of a group containing between + * arg->min and arg->max further arguments. Either return the size as + * *num_args or, if invalid, return -EINVAL and set *error. + */ +int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set, + unsigned *num_args, char **error); + +/* + * Return the current argument and shift to the next. + */ +const char *dm_shift_arg(struct dm_arg_set *as); + +/* + * Move through num_args arguments. + */ +void dm_consume_args(struct dm_arg_set *as, unsigned num_args); + +/*----------------------------------------------------------------- + * Functions for creating and manipulating mapped devices. + * Drop the reference with dm_put when you finish with the object. + *---------------------------------------------------------------*/ + +/* + * DM_ANY_MINOR chooses the next available minor number. + */ +#define DM_ANY_MINOR (-1) +int dm_create(int minor, struct mapped_device **md); + +/* + * Reference counting for md. + */ +struct mapped_device *dm_get_md(dev_t dev); +void dm_get(struct mapped_device *md); +int dm_hold(struct mapped_device *md); +void dm_put(struct mapped_device *md); + +/* + * An arbitrary pointer may be stored alongside a mapped device. + */ +void dm_set_mdptr(struct mapped_device *md, void *ptr); +void *dm_get_mdptr(struct mapped_device *md); + +/* + * A device can still be used while suspended, but I/O is deferred. + */ +int dm_suspend(struct mapped_device *md, unsigned suspend_flags); +int dm_resume(struct mapped_device *md); + +/* + * Event functions. + */ +uint32_t dm_get_event_nr(struct mapped_device *md); +int dm_wait_event(struct mapped_device *md, int event_nr); +uint32_t dm_next_uevent_seq(struct mapped_device *md); +void dm_uevent_add(struct mapped_device *md, struct list_head *elist); + +/* + * Info functions. + */ +const char *dm_device_name(struct mapped_device *md); +int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); +struct gendisk *dm_disk(struct mapped_device *md); +int dm_suspended(struct dm_target *ti); +int dm_post_suspending(struct dm_target *ti); +int dm_noflush_suspending(struct dm_target *ti); +void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors); +void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, + sector_t start); +union map_info *dm_get_rq_mapinfo(struct request *rq); + +struct queue_limits *dm_get_queue_limits(struct mapped_device *md); + +/* + * Geometry functions. + */ +int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo); +int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo); + +/*----------------------------------------------------------------- + * Functions for manipulating device-mapper tables. + *---------------------------------------------------------------*/ + +/* + * First create an empty table. + */ +int dm_table_create(struct dm_table **result, fmode_t mode, + unsigned num_targets, struct mapped_device *md); + +/* + * Then call this once for each target. + */ +int dm_table_add_target(struct dm_table *t, const char *type, + sector_t start, sector_t len, char *params); + +/* + * Target_ctr should call this if it needs to add any callbacks. + */ +void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb); + +/* + * Target can use this to set the table's type. + * Can only ever be called from a target's ctr. + * Useful for "hybrid" target (supports both bio-based + * and request-based). + */ +void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type); + +/* + * Finally call this to make the table ready for use. + */ +int dm_table_complete(struct dm_table *t); + +/* + * Destroy the table when finished. + */ +void dm_table_destroy(struct dm_table *t); + +/* + * Target may require that it is never sent I/O larger than len. + */ +int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len); + +/* + * Table reference counting. + */ +struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx); +void dm_put_live_table(struct mapped_device *md, int srcu_idx); +void dm_sync_table(struct mapped_device *md); + +/* + * Queries + */ +sector_t dm_table_get_size(struct dm_table *t); +unsigned int dm_table_get_num_targets(struct dm_table *t); +fmode_t dm_table_get_mode(struct dm_table *t); +struct mapped_device *dm_table_get_md(struct dm_table *t); + +/* + * Trigger an event. + */ +void dm_table_event(struct dm_table *t); + +/* + * Run the queue for request-based targets. + */ +void dm_table_run_md_queue_async(struct dm_table *t); + +/* + * The device must be suspended before calling this method. + * Returns the previous table, which the caller must destroy. + */ +struct dm_table *dm_swap_table(struct mapped_device *md, + struct dm_table *t); + +/* + * A wrapper around vmalloc. + */ +void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); + +/*----------------------------------------------------------------- + * Macros. + *---------------------------------------------------------------*/ +#define DM_NAME "device-mapper" + +#define DM_RATELIMIT(pr_func, fmt, ...) \ +do { \ + static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + \ + if (__ratelimit(&rs)) \ + pr_func(DM_FMT(fmt), ##__VA_ARGS__); \ +} while (0) + +#define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n" + +#define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__) + +#define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__) +#define DMERR_LIMIT(fmt, ...) DM_RATELIMIT(pr_err, fmt, ##__VA_ARGS__) +#define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__) +#define DMWARN_LIMIT(fmt, ...) DM_RATELIMIT(pr_warn, fmt, ##__VA_ARGS__) +#define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__) +#define DMINFO_LIMIT(fmt, ...) DM_RATELIMIT(pr_info, fmt, ##__VA_ARGS__) + +#ifdef CONFIG_DM_DEBUG +#define DMDEBUG(fmt, ...) printk(KERN_DEBUG DM_FMT(fmt), ##__VA_ARGS__) +#define DMDEBUG_LIMIT(fmt, ...) DM_RATELIMIT(pr_debug, fmt, ##__VA_ARGS__) +#else +#define DMDEBUG(fmt, ...) no_printk(fmt, ##__VA_ARGS__) +#define DMDEBUG_LIMIT(fmt, ...) no_printk(fmt, ##__VA_ARGS__) +#endif + +#define DMEMIT(x...) sz += ((sz >= maxlen) ? \ + 0 : scnprintf(result + sz, maxlen - sz, x)) + +/* + * Definitions of return values from target end_io function. + */ +#define DM_ENDIO_DONE 0 +#define DM_ENDIO_INCOMPLETE 1 +#define DM_ENDIO_REQUEUE 2 +#define DM_ENDIO_DELAY_REQUEUE 3 + +/* + * Definitions of return values from target map function. + */ +#define DM_MAPIO_SUBMITTED 0 +#define DM_MAPIO_REMAPPED 1 +#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE +#define DM_MAPIO_DELAY_REQUEUE DM_ENDIO_DELAY_REQUEUE +#define DM_MAPIO_KILL 4 + +#define dm_sector_div64(x, y)( \ +{ \ + u64 _res; \ + (x) = div64_u64_rem(x, y, &_res); \ + _res; \ +} \ +) + +/* + * Ceiling(n / sz) + */ +#define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz)) + +#define dm_sector_div_up(n, sz) ( \ +{ \ + sector_t _r = ((n) + (sz) - 1); \ + sector_div(_r, (sz)); \ + _r; \ +} \ +) + +/* + * ceiling(n / size) * size + */ +#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz)) + +#define dm_array_too_big(fixed, obj, num) \ + ((num) > (UINT_MAX - (fixed)) / (obj)) + +/* + * Sector offset taken relative to the start of the target instead of + * relative to the start of the device. + */ +#define dm_target_offset(ti, sector) ((sector) - (ti)->begin) + +static inline sector_t to_sector(unsigned long long n) +{ + return (n >> SECTOR_SHIFT); +} + +static inline unsigned long to_bytes(sector_t n) +{ + return (n << SECTOR_SHIFT); +} + +#endif /* _LINUX_DEVICE_MAPPER_H */ diff --git a/include/linux/device.h b/include/linux/device.h new file mode 100644 index 000000000..37e359d81 --- /dev/null +++ b/include/linux/device.h @@ -0,0 +1,1642 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * device.h - generic, centralized driver model + * + * Copyright (c) 2001-2003 Patrick Mochel + * Copyright (c) 2004-2009 Greg Kroah-Hartman + * Copyright (c) 2008-2009 Novell Inc. + * + * See Documentation/driver-model/ for more information. + */ + +#ifndef _DEVICE_H_ +#define _DEVICE_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct device; +struct device_private; +struct device_driver; +struct driver_private; +struct module; +struct class; +struct subsys_private; +struct bus_type; +struct device_node; +struct fwnode_handle; +struct iommu_ops; +struct iommu_group; +struct iommu_fwspec; +struct dev_pin_info; + +struct bus_attribute { + struct attribute attr; + ssize_t (*show)(struct bus_type *bus, char *buf); + ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count); +}; + +#define BUS_ATTR(_name, _mode, _show, _store) \ + struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store) +#define BUS_ATTR_RW(_name) \ + struct bus_attribute bus_attr_##_name = __ATTR_RW(_name) +#define BUS_ATTR_RO(_name) \ + struct bus_attribute bus_attr_##_name = __ATTR_RO(_name) + +extern int __must_check bus_create_file(struct bus_type *, + struct bus_attribute *); +extern void bus_remove_file(struct bus_type *, struct bus_attribute *); + +/** + * struct bus_type - The bus type of the device + * + * @name: The name of the bus. + * @dev_name: Used for subsystems to enumerate devices like ("foo%u", dev->id). + * @dev_root: Default device to use as the parent. + * @bus_groups: Default attributes of the bus. + * @dev_groups: Default attributes of the devices on the bus. + * @drv_groups: Default attributes of the device drivers on the bus. + * @match: Called, perhaps multiple times, whenever a new device or driver + * is added for this bus. It should return a positive value if the + * given device can be handled by the given driver and zero + * otherwise. It may also return error code if determining that + * the driver supports the device is not possible. In case of + * -EPROBE_DEFER it will queue the device for deferred probing. + * @uevent: Called when a device is added, removed, or a few other things + * that generate uevents to add the environment variables. + * @probe: Called when a new device or driver add to this bus, and callback + * the specific driver's probe to initial the matched device. + * @remove: Called when a device removed from this bus. + * @shutdown: Called at shut-down time to quiesce the device. + * + * @online: Called to put the device back online (after offlining it). + * @offline: Called to put the device offline for hot-removal. May fail. + * + * @suspend: Called when a device on this bus wants to go to sleep mode. + * @resume: Called to bring a device on this bus out of sleep mode. + * @num_vf: Called to find out how many virtual functions a device on this + * bus supports. + * @dma_configure: Called to setup DMA configuration on a device on + * this bus. + * @pm: Power management operations of this bus, callback the specific + * device driver's pm-ops. + * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU + * driver implementations to a bus and allow the driver to do + * bus-specific setup + * @p: The private data of the driver core, only the driver core can + * touch this. + * @lock_key: Lock class key for use by the lock validator + * @need_parent_lock: When probing or removing a device on this bus, the + * device core should lock the device's parent. + * + * A bus is a channel between the processor and one or more devices. For the + * purposes of the device model, all devices are connected via a bus, even if + * it is an internal, virtual, "platform" bus. Buses can plug into each other. + * A USB controller is usually a PCI device, for example. The device model + * represents the actual connections between buses and the devices they control. + * A bus is represented by the bus_type structure. It contains the name, the + * default attributes, the bus' methods, PM operations, and the driver core's + * private data. + */ +struct bus_type { + const char *name; + const char *dev_name; + struct device *dev_root; + const struct attribute_group **bus_groups; + const struct attribute_group **dev_groups; + const struct attribute_group **drv_groups; + + int (*match)(struct device *dev, struct device_driver *drv); + int (*uevent)(struct device *dev, struct kobj_uevent_env *env); + int (*probe)(struct device *dev); + int (*remove)(struct device *dev); + void (*shutdown)(struct device *dev); + + int (*online)(struct device *dev); + int (*offline)(struct device *dev); + + int (*suspend)(struct device *dev, pm_message_t state); + int (*resume)(struct device *dev); + + int (*num_vf)(struct device *dev); + + int (*dma_configure)(struct device *dev); + + const struct dev_pm_ops *pm; + + const struct iommu_ops *iommu_ops; + + struct subsys_private *p; + struct lock_class_key lock_key; + + bool need_parent_lock; +}; + +extern int __must_check bus_register(struct bus_type *bus); + +extern void bus_unregister(struct bus_type *bus); + +extern int __must_check bus_rescan_devices(struct bus_type *bus); + +/* iterator helpers for buses */ +struct subsys_dev_iter { + struct klist_iter ki; + const struct device_type *type; +}; +void subsys_dev_iter_init(struct subsys_dev_iter *iter, + struct bus_type *subsys, + struct device *start, + const struct device_type *type); +struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter); +void subsys_dev_iter_exit(struct subsys_dev_iter *iter); + +int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data, + int (*fn)(struct device *dev, void *data)); +struct device *bus_find_device(struct bus_type *bus, struct device *start, + void *data, + int (*match)(struct device *dev, void *data)); +struct device *bus_find_device_by_name(struct bus_type *bus, + struct device *start, + const char *name); +struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id, + struct device *hint); +int bus_for_each_drv(struct bus_type *bus, struct device_driver *start, + void *data, int (*fn)(struct device_driver *, void *)); +void bus_sort_breadthfirst(struct bus_type *bus, + int (*compare)(const struct device *a, + const struct device *b)); +/* + * Bus notifiers: Get notified of addition/removal of devices + * and binding/unbinding of drivers to devices. + * In the long run, it should be a replacement for the platform + * notify hooks. + */ +struct notifier_block; + +extern int bus_register_notifier(struct bus_type *bus, + struct notifier_block *nb); +extern int bus_unregister_notifier(struct bus_type *bus, + struct notifier_block *nb); + +/* All 4 notifers below get called with the target struct device * + * as an argument. Note that those functions are likely to be called + * with the device lock held in the core, so be careful. + */ +#define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */ +#define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device to be removed */ +#define BUS_NOTIFY_REMOVED_DEVICE 0x00000003 /* device removed */ +#define BUS_NOTIFY_BIND_DRIVER 0x00000004 /* driver about to be + bound */ +#define BUS_NOTIFY_BOUND_DRIVER 0x00000005 /* driver bound to device */ +#define BUS_NOTIFY_UNBIND_DRIVER 0x00000006 /* driver about to be + unbound */ +#define BUS_NOTIFY_UNBOUND_DRIVER 0x00000007 /* driver is unbound + from the device */ +#define BUS_NOTIFY_DRIVER_NOT_BOUND 0x00000008 /* driver fails to be bound */ + +extern struct kset *bus_get_kset(struct bus_type *bus); +extern struct klist *bus_get_device_klist(struct bus_type *bus); + +/** + * enum probe_type - device driver probe type to try + * Device drivers may opt in for special handling of their + * respective probe routines. This tells the core what to + * expect and prefer. + * + * @PROBE_DEFAULT_STRATEGY: Used by drivers that work equally well + * whether probed synchronously or asynchronously. + * @PROBE_PREFER_ASYNCHRONOUS: Drivers for "slow" devices which + * probing order is not essential for booting the system may + * opt into executing their probes asynchronously. + * @PROBE_FORCE_SYNCHRONOUS: Use this to annotate drivers that need + * their probe routines to run synchronously with driver and + * device registration (with the exception of -EPROBE_DEFER + * handling - re-probing always ends up being done asynchronously). + * + * Note that the end goal is to switch the kernel to use asynchronous + * probing by default, so annotating drivers with + * %PROBE_PREFER_ASYNCHRONOUS is a temporary measure that allows us + * to speed up boot process while we are validating the rest of the + * drivers. + */ +enum probe_type { + PROBE_DEFAULT_STRATEGY, + PROBE_PREFER_ASYNCHRONOUS, + PROBE_FORCE_SYNCHRONOUS, +}; + +/** + * struct device_driver - The basic device driver structure + * @name: Name of the device driver. + * @bus: The bus which the device of this driver belongs to. + * @owner: The module owner. + * @mod_name: Used for built-in modules. + * @suppress_bind_attrs: Disables bind/unbind via sysfs. + * @probe_type: Type of the probe (synchronous or asynchronous) to use. + * @of_match_table: The open firmware table. + * @acpi_match_table: The ACPI match table. + * @probe: Called to query the existence of a specific device, + * whether this driver can work with it, and bind the driver + * to a specific device. + * @remove: Called when the device is removed from the system to + * unbind a device from this driver. + * @shutdown: Called at shut-down time to quiesce the device. + * @suspend: Called to put the device to sleep mode. Usually to a + * low power state. + * @resume: Called to bring a device from sleep mode. + * @groups: Default attributes that get created by the driver core + * automatically. + * @pm: Power management operations of the device which matched + * this driver. + * @coredump: Called when sysfs entry is written to. The device driver + * is expected to call the dev_coredump API resulting in a + * uevent. + * @p: Driver core's private data, no one other than the driver + * core can touch this. + * + * The device driver-model tracks all of the drivers known to the system. + * The main reason for this tracking is to enable the driver core to match + * up drivers with new devices. Once drivers are known objects within the + * system, however, a number of other things become possible. Device drivers + * can export information and configuration variables that are independent + * of any specific device. + */ +struct device_driver { + const char *name; + struct bus_type *bus; + + struct module *owner; + const char *mod_name; /* used for built-in modules */ + + bool suppress_bind_attrs; /* disables bind/unbind via sysfs */ + enum probe_type probe_type; + + const struct of_device_id *of_match_table; + const struct acpi_device_id *acpi_match_table; + + int (*probe) (struct device *dev); + int (*remove) (struct device *dev); + void (*shutdown) (struct device *dev); + int (*suspend) (struct device *dev, pm_message_t state); + int (*resume) (struct device *dev); + const struct attribute_group **groups; + + const struct dev_pm_ops *pm; + void (*coredump) (struct device *dev); + + struct driver_private *p; +}; + + +extern int __must_check driver_register(struct device_driver *drv); +extern void driver_unregister(struct device_driver *drv); + +extern struct device_driver *driver_find(const char *name, + struct bus_type *bus); +extern int driver_probe_done(void); +extern void wait_for_device_probe(void); + +/* sysfs interface for exporting driver attributes */ + +struct driver_attribute { + struct attribute attr; + ssize_t (*show)(struct device_driver *driver, char *buf); + ssize_t (*store)(struct device_driver *driver, const char *buf, + size_t count); +}; + +#define DRIVER_ATTR_RW(_name) \ + struct driver_attribute driver_attr_##_name = __ATTR_RW(_name) +#define DRIVER_ATTR_RO(_name) \ + struct driver_attribute driver_attr_##_name = __ATTR_RO(_name) +#define DRIVER_ATTR_WO(_name) \ + struct driver_attribute driver_attr_##_name = __ATTR_WO(_name) + +extern int __must_check driver_create_file(struct device_driver *driver, + const struct driver_attribute *attr); +extern void driver_remove_file(struct device_driver *driver, + const struct driver_attribute *attr); + +extern int __must_check driver_for_each_device(struct device_driver *drv, + struct device *start, + void *data, + int (*fn)(struct device *dev, + void *)); +struct device *driver_find_device(struct device_driver *drv, + struct device *start, void *data, + int (*match)(struct device *dev, void *data)); + +void driver_deferred_probe_add(struct device *dev); +int driver_deferred_probe_check_state(struct device *dev); + +/** + * struct subsys_interface - interfaces to device functions + * @name: name of the device function + * @subsys: subsytem of the devices to attach to + * @node: the list of functions registered at the subsystem + * @add_dev: device hookup to device function handler + * @remove_dev: device hookup to device function handler + * + * Simple interfaces attached to a subsystem. Multiple interfaces can + * attach to a subsystem and its devices. Unlike drivers, they do not + * exclusively claim or control devices. Interfaces usually represent + * a specific functionality of a subsystem/class of devices. + */ +struct subsys_interface { + const char *name; + struct bus_type *subsys; + struct list_head node; + int (*add_dev)(struct device *dev, struct subsys_interface *sif); + void (*remove_dev)(struct device *dev, struct subsys_interface *sif); +}; + +int subsys_interface_register(struct subsys_interface *sif); +void subsys_interface_unregister(struct subsys_interface *sif); + +int subsys_system_register(struct bus_type *subsys, + const struct attribute_group **groups); +int subsys_virtual_register(struct bus_type *subsys, + const struct attribute_group **groups); + +/** + * struct class - device classes + * @name: Name of the class. + * @owner: The module owner. + * @class_groups: Default attributes of this class. + * @dev_groups: Default attributes of the devices that belong to the class. + * @dev_kobj: The kobject that represents this class and links it into the hierarchy. + * @dev_uevent: Called when a device is added, removed from this class, or a + * few other things that generate uevents to add the environment + * variables. + * @devnode: Callback to provide the devtmpfs. + * @class_release: Called to release this class. + * @dev_release: Called to release the device. + * @shutdown_pre: Called at shut-down time before driver shutdown. + * @ns_type: Callbacks so sysfs can detemine namespaces. + * @namespace: Namespace of the device belongs to this class. + * @get_ownership: Allows class to specify uid/gid of the sysfs directories + * for the devices belonging to the class. Usually tied to + * device's namespace. + * @pm: The default device power management operations of this class. + * @p: The private data of the driver core, no one other than the + * driver core can touch this. + * + * A class is a higher-level view of a device that abstracts out low-level + * implementation details. Drivers may see a SCSI disk or an ATA disk, but, + * at the class level, they are all simply disks. Classes allow user space + * to work with devices based on what they do, rather than how they are + * connected or how they work. + */ +struct class { + const char *name; + struct module *owner; + + const struct attribute_group **class_groups; + const struct attribute_group **dev_groups; + struct kobject *dev_kobj; + + int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env); + char *(*devnode)(struct device *dev, umode_t *mode); + + void (*class_release)(struct class *class); + void (*dev_release)(struct device *dev); + + int (*shutdown_pre)(struct device *dev); + + const struct kobj_ns_type_operations *ns_type; + const void *(*namespace)(struct device *dev); + + void (*get_ownership)(struct device *dev, kuid_t *uid, kgid_t *gid); + + const struct dev_pm_ops *pm; + + struct subsys_private *p; +}; + +struct class_dev_iter { + struct klist_iter ki; + const struct device_type *type; +}; + +extern struct kobject *sysfs_dev_block_kobj; +extern struct kobject *sysfs_dev_char_kobj; +extern int __must_check __class_register(struct class *class, + struct lock_class_key *key); +extern void class_unregister(struct class *class); + +/* This is a #define to keep the compiler from merging different + * instances of the __key variable */ +#define class_register(class) \ +({ \ + static struct lock_class_key __key; \ + __class_register(class, &__key); \ +}) + +struct class_compat; +struct class_compat *class_compat_register(const char *name); +void class_compat_unregister(struct class_compat *cls); +int class_compat_create_link(struct class_compat *cls, struct device *dev, + struct device *device_link); +void class_compat_remove_link(struct class_compat *cls, struct device *dev, + struct device *device_link); + +extern void class_dev_iter_init(struct class_dev_iter *iter, + struct class *class, + struct device *start, + const struct device_type *type); +extern struct device *class_dev_iter_next(struct class_dev_iter *iter); +extern void class_dev_iter_exit(struct class_dev_iter *iter); + +extern int class_for_each_device(struct class *class, struct device *start, + void *data, + int (*fn)(struct device *dev, void *data)); +extern struct device *class_find_device(struct class *class, + struct device *start, const void *data, + int (*match)(struct device *, const void *)); + +struct class_attribute { + struct attribute attr; + ssize_t (*show)(struct class *class, struct class_attribute *attr, + char *buf); + ssize_t (*store)(struct class *class, struct class_attribute *attr, + const char *buf, size_t count); +}; + +#define CLASS_ATTR_RW(_name) \ + struct class_attribute class_attr_##_name = __ATTR_RW(_name) +#define CLASS_ATTR_RO(_name) \ + struct class_attribute class_attr_##_name = __ATTR_RO(_name) +#define CLASS_ATTR_WO(_name) \ + struct class_attribute class_attr_##_name = __ATTR_WO(_name) + +extern int __must_check class_create_file_ns(struct class *class, + const struct class_attribute *attr, + const void *ns); +extern void class_remove_file_ns(struct class *class, + const struct class_attribute *attr, + const void *ns); + +static inline int __must_check class_create_file(struct class *class, + const struct class_attribute *attr) +{ + return class_create_file_ns(class, attr, NULL); +} + +static inline void class_remove_file(struct class *class, + const struct class_attribute *attr) +{ + return class_remove_file_ns(class, attr, NULL); +} + +/* Simple class attribute that is just a static string */ +struct class_attribute_string { + struct class_attribute attr; + char *str; +}; + +/* Currently read-only only */ +#define _CLASS_ATTR_STRING(_name, _mode, _str) \ + { __ATTR(_name, _mode, show_class_attr_string, NULL), _str } +#define CLASS_ATTR_STRING(_name, _mode, _str) \ + struct class_attribute_string class_attr_##_name = \ + _CLASS_ATTR_STRING(_name, _mode, _str) + +extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr, + char *buf); + +struct class_interface { + struct list_head node; + struct class *class; + + int (*add_dev) (struct device *, struct class_interface *); + void (*remove_dev) (struct device *, struct class_interface *); +}; + +extern int __must_check class_interface_register(struct class_interface *); +extern void class_interface_unregister(struct class_interface *); + +extern struct class * __must_check __class_create(struct module *owner, + const char *name, + struct lock_class_key *key); +extern void class_destroy(struct class *cls); + +/* This is a #define to keep the compiler from merging different + * instances of the __key variable */ +#define class_create(owner, name) \ +({ \ + static struct lock_class_key __key; \ + __class_create(owner, name, &__key); \ +}) + +/* + * The type of device, "struct device" is embedded in. A class + * or bus can contain devices of different types + * like "partitions" and "disks", "mouse" and "event". + * This identifies the device type and carries type-specific + * information, equivalent to the kobj_type of a kobject. + * If "name" is specified, the uevent will contain it in + * the DEVTYPE variable. + */ +struct device_type { + const char *name; + const struct attribute_group **groups; + int (*uevent)(struct device *dev, struct kobj_uevent_env *env); + char *(*devnode)(struct device *dev, umode_t *mode, + kuid_t *uid, kgid_t *gid); + void (*release)(struct device *dev); + + const struct dev_pm_ops *pm; +}; + +/* interface for exporting device attributes */ +struct device_attribute { + struct attribute attr; + ssize_t (*show)(struct device *dev, struct device_attribute *attr, + char *buf); + ssize_t (*store)(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count); +}; + +struct dev_ext_attribute { + struct device_attribute attr; + void *var; +}; + +ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr, + char *buf); +ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count); +ssize_t device_show_int(struct device *dev, struct device_attribute *attr, + char *buf); +ssize_t device_store_int(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count); +ssize_t device_show_bool(struct device *dev, struct device_attribute *attr, + char *buf); +ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count); + +#define DEVICE_ATTR(_name, _mode, _show, _store) \ + struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) +#define DEVICE_ATTR_PREALLOC(_name, _mode, _show, _store) \ + struct device_attribute dev_attr_##_name = \ + __ATTR_PREALLOC(_name, _mode, _show, _store) +#define DEVICE_ATTR_RW(_name) \ + struct device_attribute dev_attr_##_name = __ATTR_RW(_name) +#define DEVICE_ATTR_RO(_name) \ + struct device_attribute dev_attr_##_name = __ATTR_RO(_name) +#define DEVICE_ATTR_WO(_name) \ + struct device_attribute dev_attr_##_name = __ATTR_WO(_name) +#define DEVICE_ULONG_ATTR(_name, _mode, _var) \ + struct dev_ext_attribute dev_attr_##_name = \ + { __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) } +#define DEVICE_INT_ATTR(_name, _mode, _var) \ + struct dev_ext_attribute dev_attr_##_name = \ + { __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) } +#define DEVICE_BOOL_ATTR(_name, _mode, _var) \ + struct dev_ext_attribute dev_attr_##_name = \ + { __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) } +#define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \ + struct device_attribute dev_attr_##_name = \ + __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) + +extern int device_create_file(struct device *device, + const struct device_attribute *entry); +extern void device_remove_file(struct device *dev, + const struct device_attribute *attr); +extern bool device_remove_file_self(struct device *dev, + const struct device_attribute *attr); +extern int __must_check device_create_bin_file(struct device *dev, + const struct bin_attribute *attr); +extern void device_remove_bin_file(struct device *dev, + const struct bin_attribute *attr); + +/* device resource management */ +typedef void (*dr_release_t)(struct device *dev, void *res); +typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data); + +#ifdef CONFIG_DEBUG_DEVRES +extern void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, + int nid, const char *name) __malloc; +#define devres_alloc(release, size, gfp) \ + __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release) +#define devres_alloc_node(release, size, gfp, nid) \ + __devres_alloc_node(release, size, gfp, nid, #release) +#else +extern void *devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, + int nid) __malloc; +static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp) +{ + return devres_alloc_node(release, size, gfp, NUMA_NO_NODE); +} +#endif + +extern void devres_for_each_res(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data, + void (*fn)(struct device *, void *, void *), + void *data); +extern void devres_free(void *res); +extern void devres_add(struct device *dev, void *res); +extern void *devres_find(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data); +extern void *devres_get(struct device *dev, void *new_res, + dr_match_t match, void *match_data); +extern void *devres_remove(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data); +extern int devres_destroy(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data); +extern int devres_release(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data); + +/* devres group */ +extern void * __must_check devres_open_group(struct device *dev, void *id, + gfp_t gfp); +extern void devres_close_group(struct device *dev, void *id); +extern void devres_remove_group(struct device *dev, void *id); +extern int devres_release_group(struct device *dev, void *id); + +/* managed devm_k.alloc/kfree for device drivers */ +extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc; +extern __printf(3, 0) +char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, + va_list ap) __malloc; +extern __printf(3, 4) +char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) __malloc; +static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) +{ + return devm_kmalloc(dev, size, gfp | __GFP_ZERO); +} +static inline void *devm_kmalloc_array(struct device *dev, + size_t n, size_t size, gfp_t flags) +{ + size_t bytes; + + if (unlikely(check_mul_overflow(n, size, &bytes))) + return NULL; + + return devm_kmalloc(dev, bytes, flags); +} +static inline void *devm_kcalloc(struct device *dev, + size_t n, size_t size, gfp_t flags) +{ + return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO); +} +extern void devm_kfree(struct device *dev, void *p); +extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc; +extern void *devm_kmemdup(struct device *dev, const void *src, size_t len, + gfp_t gfp); + +extern unsigned long devm_get_free_pages(struct device *dev, + gfp_t gfp_mask, unsigned int order); +extern void devm_free_pages(struct device *dev, unsigned long addr); + +void __iomem *devm_ioremap_resource(struct device *dev, + const struct resource *res); + +void __iomem *devm_of_iomap(struct device *dev, + struct device_node *node, int index, + resource_size_t *size); + +/* allows to add/remove a custom action to devres stack */ +int devm_add_action(struct device *dev, void (*action)(void *), void *data); +void devm_remove_action(struct device *dev, void (*action)(void *), void *data); + +static inline int devm_add_action_or_reset(struct device *dev, + void (*action)(void *), void *data) +{ + int ret; + + ret = devm_add_action(dev, action, data); + if (ret) + action(data); + + return ret; +} + +/** + * devm_alloc_percpu - Resource-managed alloc_percpu + * @dev: Device to allocate per-cpu memory for + * @type: Type to allocate per-cpu memory for + * + * Managed alloc_percpu. Per-cpu memory allocated with this function is + * automatically freed on driver detach. + * + * RETURNS: + * Pointer to allocated memory on success, NULL on failure. + */ +#define devm_alloc_percpu(dev, type) \ + ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \ + __alignof__(type))) + +void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, + size_t align); +void devm_free_percpu(struct device *dev, void __percpu *pdata); + +struct device_dma_parameters { + /* + * a low level driver may set these to teach IOMMU code about + * sg limitations. + */ + unsigned int max_segment_size; + unsigned long segment_boundary_mask; +}; + +/** + * struct device_connection - Device Connection Descriptor + * @endpoint: The names of the two devices connected together + * @id: Unique identifier for the connection + * @list: List head, private, for internal use only + */ +struct device_connection { + const char *endpoint[2]; + const char *id; + struct list_head list; +}; + +void *device_connection_find_match(struct device *dev, const char *con_id, + void *data, + void *(*match)(struct device_connection *con, + int ep, void *data)); + +struct device *device_connection_find(struct device *dev, const char *con_id); + +void device_connection_add(struct device_connection *con); +void device_connection_remove(struct device_connection *con); + +/** + * device_connections_add - Add multiple device connections at once + * @cons: Zero terminated array of device connection descriptors + */ +static inline void device_connections_add(struct device_connection *cons) +{ + struct device_connection *c; + + for (c = cons; c->endpoint[0]; c++) + device_connection_add(c); +} + +/** + * device_connections_remove - Remove multiple device connections at once + * @cons: Zero terminated array of device connection descriptors + */ +static inline void device_connections_remove(struct device_connection *cons) +{ + struct device_connection *c; + + for (c = cons; c->endpoint[0]; c++) + device_connection_remove(c); +} + +/** + * enum device_link_state - Device link states. + * @DL_STATE_NONE: The presence of the drivers is not being tracked. + * @DL_STATE_DORMANT: None of the supplier/consumer drivers is present. + * @DL_STATE_AVAILABLE: The supplier driver is present, but the consumer is not. + * @DL_STATE_CONSUMER_PROBE: The consumer is probing (supplier driver present). + * @DL_STATE_ACTIVE: Both the supplier and consumer drivers are present. + * @DL_STATE_SUPPLIER_UNBIND: The supplier driver is unbinding. + */ +enum device_link_state { + DL_STATE_NONE = -1, + DL_STATE_DORMANT = 0, + DL_STATE_AVAILABLE, + DL_STATE_CONSUMER_PROBE, + DL_STATE_ACTIVE, + DL_STATE_SUPPLIER_UNBIND, +}; + +/* + * Device link flags. + * + * STATELESS: The core will not remove this link automatically. + * AUTOREMOVE_CONSUMER: Remove the link automatically on consumer driver unbind. + * PM_RUNTIME: If set, the runtime PM framework will use this link. + * RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation. + * AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver unbind. + * AUTOPROBE_CONSUMER: Probe consumer driver automatically after supplier binds. + * MANAGED: The core tracks presence of supplier/consumer drivers (internal). + */ +#define DL_FLAG_STATELESS BIT(0) +#define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1) +#define DL_FLAG_PM_RUNTIME BIT(2) +#define DL_FLAG_RPM_ACTIVE BIT(3) +#define DL_FLAG_AUTOREMOVE_SUPPLIER BIT(4) +#define DL_FLAG_AUTOPROBE_CONSUMER BIT(5) +#define DL_FLAG_MANAGED BIT(6) + +/** + * struct device_link - Device link representation. + * @supplier: The device on the supplier end of the link. + * @s_node: Hook to the supplier device's list of links to consumers. + * @consumer: The device on the consumer end of the link. + * @c_node: Hook to the consumer device's list of links to suppliers. + * @status: The state of the link (with respect to the presence of drivers). + * @flags: Link flags. + * @rpm_active: Whether or not the consumer device is runtime-PM-active. + * @kref: Count repeated addition of the same link. + * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks. + */ +struct device_link { + struct device *supplier; + struct list_head s_node; + struct device *consumer; + struct list_head c_node; + enum device_link_state status; + u32 flags; + refcount_t rpm_active; + struct kref kref; +#ifdef CONFIG_SRCU + struct rcu_head rcu_head; +#endif + bool supplier_preactivated; /* Owned by consumer probe. */ +}; + +/** + * enum dl_dev_state - Device driver presence tracking information. + * @DL_DEV_NO_DRIVER: There is no driver attached to the device. + * @DL_DEV_PROBING: A driver is probing. + * @DL_DEV_DRIVER_BOUND: The driver has been bound to the device. + * @DL_DEV_UNBINDING: The driver is unbinding from the device. + */ +enum dl_dev_state { + DL_DEV_NO_DRIVER = 0, + DL_DEV_PROBING, + DL_DEV_DRIVER_BOUND, + DL_DEV_UNBINDING, +}; + +/** + * struct dev_links_info - Device data related to device links. + * @suppliers: List of links to supplier devices. + * @consumers: List of links to consumer devices. + * @status: Driver status information. + */ +struct dev_links_info { + struct list_head suppliers; + struct list_head consumers; + enum dl_dev_state status; +}; + +/** + * struct device - The basic device structure + * @parent: The device's "parent" device, the device to which it is attached. + * In most cases, a parent device is some sort of bus or host + * controller. If parent is NULL, the device, is a top-level device, + * which is not usually what you want. + * @p: Holds the private data of the driver core portions of the device. + * See the comment of the struct device_private for detail. + * @kobj: A top-level, abstract class from which other classes are derived. + * @init_name: Initial name of the device. + * @type: The type of device. + * This identifies the device type and carries type-specific + * information. + * @mutex: Mutex to synchronize calls to its driver. + * @bus: Type of bus device is on. + * @driver: Which driver has allocated this + * @platform_data: Platform data specific to the device. + * Example: For devices on custom boards, as typical of embedded + * and SOC based hardware, Linux often uses platform_data to point + * to board-specific structures describing devices and how they + * are wired. That can include what ports are available, chip + * variants, which GPIO pins act in what additional roles, and so + * on. This shrinks the "Board Support Packages" (BSPs) and + * minimizes board-specific #ifdefs in drivers. + * @driver_data: Private pointer for driver specific info. + * @links: Links to suppliers and consumers of this device. + * @power: For device power management. + * See Documentation/driver-api/pm/devices.rst for details. + * @pm_domain: Provide callbacks that are executed during system suspend, + * hibernation, system resume and during runtime PM transitions + * along with subsystem-level and driver-level callbacks. + * @pins: For device pin management. + * See Documentation/driver-api/pinctl.rst for details. + * @msi_list: Hosts MSI descriptors + * @msi_domain: The generic MSI domain this device is using. + * @numa_node: NUMA node this device is close to. + * @dma_ops: DMA mapping operations for this device. + * @dma_mask: Dma mask (if dma'ble device). + * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all + * hardware supports 64-bit addresses for consistent allocations + * such descriptors. + * @bus_dma_mask: Mask of an upstream bridge or bus which imposes a smaller DMA + * limit than the device itself supports. + * @dma_pfn_offset: offset of DMA memory range relatively of RAM + * @dma_parms: A low level driver may set these to teach IOMMU code about + * segment limitations. + * @dma_pools: Dma pools (if dma'ble device). + * @dma_mem: Internal for coherent mem override. + * @cma_area: Contiguous memory area for dma allocations + * @archdata: For arch-specific additions. + * @of_node: Associated device tree node. + * @fwnode: Associated device node supplied by platform firmware. + * @devt: For creating the sysfs "dev". + * @id: device instance + * @devres_lock: Spinlock to protect the resource of the device. + * @devres_head: The resources list of the device. + * @knode_class: The node used to add the device to the class list. + * @class: The class of the device. + * @groups: Optional attribute groups. + * @release: Callback to free the device after all references have + * gone away. This should be set by the allocator of the + * device (i.e. the bus driver that discovered the device). + * @iommu_group: IOMMU group the device belongs to. + * @iommu_fwspec: IOMMU-specific properties supplied by firmware. + * + * @offline_disabled: If set, the device is permanently online. + * @offline: Set after successful invocation of bus type's .offline(). + * @of_node_reused: Set if the device-tree node is shared with an ancestor + * device. + * + * At the lowest level, every device in a Linux system is represented by an + * instance of struct device. The device structure contains the information + * that the device model core needs to model the system. Most subsystems, + * however, track additional information about the devices they host. As a + * result, it is rare for devices to be represented by bare device structures; + * instead, that structure, like kobject structures, is usually embedded within + * a higher-level representation of the device. + */ +struct device { + struct device *parent; + + struct device_private *p; + + struct kobject kobj; + const char *init_name; /* initial name of the device */ + const struct device_type *type; + + struct mutex mutex; /* mutex to synchronize calls to + * its driver. + */ + + struct bus_type *bus; /* type of bus device is on */ + struct device_driver *driver; /* which driver has allocated this + device */ + void *platform_data; /* Platform specific data, device + core doesn't touch it */ + void *driver_data; /* Driver data, set and get with + dev_set/get_drvdata */ + struct dev_links_info links; + struct dev_pm_info power; + struct dev_pm_domain *pm_domain; + +#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN + struct irq_domain *msi_domain; +#endif +#ifdef CONFIG_PINCTRL + struct dev_pin_info *pins; +#endif +#ifdef CONFIG_GENERIC_MSI_IRQ + raw_spinlock_t msi_lock; + struct list_head msi_list; +#endif + +#ifdef CONFIG_NUMA + int numa_node; /* NUMA node this device is close to */ +#endif + const struct dma_map_ops *dma_ops; + u64 *dma_mask; /* dma mask (if dma'able device) */ + u64 coherent_dma_mask;/* Like dma_mask, but for + alloc_coherent mappings as + not all hardware supports + 64 bit addresses for consistent + allocations such descriptors. */ + u64 bus_dma_mask; /* upstream dma_mask constraint */ + unsigned long dma_pfn_offset; + + struct device_dma_parameters *dma_parms; + + struct list_head dma_pools; /* dma pools (if dma'ble) */ + + struct dma_coherent_mem *dma_mem; /* internal for coherent mem + override */ +#ifdef CONFIG_DMA_CMA + struct cma *cma_area; /* contiguous memory area for dma + allocations */ +#endif + /* arch specific additions */ + struct dev_archdata archdata; + + struct device_node *of_node; /* associated device tree node */ + struct fwnode_handle *fwnode; /* firmware device node */ + + dev_t devt; /* dev_t, creates the sysfs "dev" */ + u32 id; /* device instance */ + + spinlock_t devres_lock; + struct list_head devres_head; + + struct klist_node knode_class; + struct class *class; + const struct attribute_group **groups; /* optional groups */ + + void (*release)(struct device *dev); + struct iommu_group *iommu_group; + struct iommu_fwspec *iommu_fwspec; + + bool offline_disabled:1; + bool offline:1; + bool of_node_reused:1; +}; + +static inline struct device *kobj_to_dev(struct kobject *kobj) +{ + return container_of(kobj, struct device, kobj); +} + +/* Get the wakeup routines, which depend on struct device */ +#include + +static inline const char *dev_name(const struct device *dev) +{ + /* Use the init name until the kobject becomes available */ + if (dev->init_name) + return dev->init_name; + + return kobject_name(&dev->kobj); +} + +extern __printf(2, 3) +int dev_set_name(struct device *dev, const char *name, ...); + +#ifdef CONFIG_NUMA +static inline int dev_to_node(struct device *dev) +{ + return dev->numa_node; +} +static inline void set_dev_node(struct device *dev, int node) +{ + dev->numa_node = node; +} +#else +static inline int dev_to_node(struct device *dev) +{ + return -1; +} +static inline void set_dev_node(struct device *dev, int node) +{ +} +#endif + +static inline struct irq_domain *dev_get_msi_domain(const struct device *dev) +{ +#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN + return dev->msi_domain; +#else + return NULL; +#endif +} + +static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d) +{ +#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN + dev->msi_domain = d; +#endif +} + +static inline void *dev_get_drvdata(const struct device *dev) +{ + return dev->driver_data; +} + +static inline void dev_set_drvdata(struct device *dev, void *data) +{ + dev->driver_data = data; +} + +static inline struct pm_subsys_data *dev_to_psd(struct device *dev) +{ + return dev ? dev->power.subsys_data : NULL; +} + +static inline unsigned int dev_get_uevent_suppress(const struct device *dev) +{ + return dev->kobj.uevent_suppress; +} + +static inline void dev_set_uevent_suppress(struct device *dev, int val) +{ + dev->kobj.uevent_suppress = val; +} + +static inline int device_is_registered(struct device *dev) +{ + return dev->kobj.state_in_sysfs; +} + +static inline void device_enable_async_suspend(struct device *dev) +{ + if (!dev->power.is_prepared) + dev->power.async_suspend = true; +} + +static inline void device_disable_async_suspend(struct device *dev) +{ + if (!dev->power.is_prepared) + dev->power.async_suspend = false; +} + +static inline bool device_async_suspend_enabled(struct device *dev) +{ + return !!dev->power.async_suspend; +} + +static inline void dev_pm_syscore_device(struct device *dev, bool val) +{ +#ifdef CONFIG_PM_SLEEP + dev->power.syscore = val; +#endif +} + +static inline void dev_pm_set_driver_flags(struct device *dev, u32 flags) +{ + dev->power.driver_flags = flags; +} + +static inline bool dev_pm_test_driver_flags(struct device *dev, u32 flags) +{ + return !!(dev->power.driver_flags & flags); +} + +static inline void device_lock(struct device *dev) +{ + mutex_lock(&dev->mutex); +} + +static inline int device_lock_interruptible(struct device *dev) +{ + return mutex_lock_interruptible(&dev->mutex); +} + +static inline int device_trylock(struct device *dev) +{ + return mutex_trylock(&dev->mutex); +} + +static inline void device_unlock(struct device *dev) +{ + mutex_unlock(&dev->mutex); +} + +static inline void device_lock_assert(struct device *dev) +{ + lockdep_assert_held(&dev->mutex); +} + +static inline struct device_node *dev_of_node(struct device *dev) +{ + if (!IS_ENABLED(CONFIG_OF)) + return NULL; + return dev->of_node; +} + +void driver_init(void); + +/* + * High level routines for use by the bus drivers + */ +extern int __must_check device_register(struct device *dev); +extern void device_unregister(struct device *dev); +extern void device_initialize(struct device *dev); +extern int __must_check device_add(struct device *dev); +extern void device_del(struct device *dev); +extern int device_for_each_child(struct device *dev, void *data, + int (*fn)(struct device *dev, void *data)); +extern int device_for_each_child_reverse(struct device *dev, void *data, + int (*fn)(struct device *dev, void *data)); +extern struct device *device_find_child(struct device *dev, void *data, + int (*match)(struct device *dev, void *data)); +extern int device_rename(struct device *dev, const char *new_name); +extern int device_move(struct device *dev, struct device *new_parent, + enum dpm_order dpm_order); +extern const char *device_get_devnode(struct device *dev, + umode_t *mode, kuid_t *uid, kgid_t *gid, + const char **tmp); + +static inline bool device_supports_offline(struct device *dev) +{ + return dev->bus && dev->bus->offline && dev->bus->online; +} + +extern void lock_device_hotplug(void); +extern void unlock_device_hotplug(void); +extern int lock_device_hotplug_sysfs(void); +extern int device_offline(struct device *dev); +extern int device_online(struct device *dev); +extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); +extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode); +void device_set_of_node_from_dev(struct device *dev, const struct device *dev2); + +static inline int dev_num_vf(struct device *dev) +{ + if (dev->bus && dev->bus->num_vf) + return dev->bus->num_vf(dev); + return 0; +} + +/* + * Root device objects for grouping under /sys/devices + */ +extern struct device *__root_device_register(const char *name, + struct module *owner); + +/* This is a macro to avoid include problems with THIS_MODULE */ +#define root_device_register(name) \ + __root_device_register(name, THIS_MODULE) + +extern void root_device_unregister(struct device *root); + +static inline void *dev_get_platdata(const struct device *dev) +{ + return dev->platform_data; +} + +/* + * Manual binding of a device to driver. See drivers/base/bus.c + * for information on use. + */ +extern int __must_check device_bind_driver(struct device *dev); +extern void device_release_driver(struct device *dev); +extern int __must_check device_attach(struct device *dev); +extern int __must_check driver_attach(struct device_driver *drv); +extern void device_initial_probe(struct device *dev); +extern int __must_check device_reprobe(struct device *dev); + +extern bool device_is_bound(struct device *dev); + +/* + * Easy functions for dynamically creating devices on the fly + */ +extern __printf(5, 0) +struct device *device_create_vargs(struct class *cls, struct device *parent, + dev_t devt, void *drvdata, + const char *fmt, va_list vargs); +extern __printf(5, 6) +struct device *device_create(struct class *cls, struct device *parent, + dev_t devt, void *drvdata, + const char *fmt, ...); +extern __printf(6, 7) +struct device *device_create_with_groups(struct class *cls, + struct device *parent, dev_t devt, void *drvdata, + const struct attribute_group **groups, + const char *fmt, ...); +extern void device_destroy(struct class *cls, dev_t devt); + +extern int __must_check device_add_groups(struct device *dev, + const struct attribute_group **groups); +extern void device_remove_groups(struct device *dev, + const struct attribute_group **groups); + +static inline int __must_check device_add_group(struct device *dev, + const struct attribute_group *grp) +{ + const struct attribute_group *groups[] = { grp, NULL }; + + return device_add_groups(dev, groups); +} + +static inline void device_remove_group(struct device *dev, + const struct attribute_group *grp) +{ + const struct attribute_group *groups[] = { grp, NULL }; + + return device_remove_groups(dev, groups); +} + +extern int __must_check devm_device_add_groups(struct device *dev, + const struct attribute_group **groups); +extern void devm_device_remove_groups(struct device *dev, + const struct attribute_group **groups); +extern int __must_check devm_device_add_group(struct device *dev, + const struct attribute_group *grp); +extern void devm_device_remove_group(struct device *dev, + const struct attribute_group *grp); + +/* + * Platform "fixup" functions - allow the platform to have their say + * about devices and actions that the general device layer doesn't + * know about. + */ +/* Notify platform of device discovery */ +extern int (*platform_notify)(struct device *dev); + +extern int (*platform_notify_remove)(struct device *dev); + + +/* + * get_device - atomically increment the reference count for the device. + * + */ +extern struct device *get_device(struct device *dev); +extern void put_device(struct device *dev); +extern bool kill_device(struct device *dev); + +#ifdef CONFIG_DEVTMPFS +extern int devtmpfs_create_node(struct device *dev); +extern int devtmpfs_delete_node(struct device *dev); +extern int devtmpfs_mount(const char *mntdir); +#else +static inline int devtmpfs_create_node(struct device *dev) { return 0; } +static inline int devtmpfs_delete_node(struct device *dev) { return 0; } +static inline int devtmpfs_mount(const char *mountpoint) { return 0; } +#endif + +/* drivers/base/power/shutdown.c */ +extern void device_shutdown(void); + +/* debugging and troubleshooting/diagnostic helpers. */ +extern const char *dev_driver_string(const struct device *dev); + +/* Device links interface. */ +struct device_link *device_link_add(struct device *consumer, + struct device *supplier, u32 flags); +void device_link_del(struct device_link *link); +void device_link_remove(void *consumer, struct device *supplier); + +#ifndef dev_fmt +#define dev_fmt(fmt) fmt +#endif + +#ifdef CONFIG_PRINTK + +__printf(3, 0) +int dev_vprintk_emit(int level, const struct device *dev, + const char *fmt, va_list args); +__printf(3, 4) +int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...); + +__printf(3, 4) +void dev_printk(const char *level, const struct device *dev, + const char *fmt, ...); +__printf(2, 3) +void _dev_emerg(const struct device *dev, const char *fmt, ...); +__printf(2, 3) +void _dev_alert(const struct device *dev, const char *fmt, ...); +__printf(2, 3) +void _dev_crit(const struct device *dev, const char *fmt, ...); +__printf(2, 3) +void _dev_err(const struct device *dev, const char *fmt, ...); +__printf(2, 3) +void _dev_warn(const struct device *dev, const char *fmt, ...); +__printf(2, 3) +void _dev_notice(const struct device *dev, const char *fmt, ...); +__printf(2, 3) +void _dev_info(const struct device *dev, const char *fmt, ...); + +#else + +static inline __printf(3, 0) +int dev_vprintk_emit(int level, const struct device *dev, + const char *fmt, va_list args) +{ return 0; } +static inline __printf(3, 4) +int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...) +{ return 0; } + +static inline void __dev_printk(const char *level, const struct device *dev, + struct va_format *vaf) +{} +static inline __printf(3, 4) +void dev_printk(const char *level, const struct device *dev, + const char *fmt, ...) +{} + +static inline __printf(2, 3) +void _dev_emerg(const struct device *dev, const char *fmt, ...) +{} +static inline __printf(2, 3) +void _dev_crit(const struct device *dev, const char *fmt, ...) +{} +static inline __printf(2, 3) +void _dev_alert(const struct device *dev, const char *fmt, ...) +{} +static inline __printf(2, 3) +void _dev_err(const struct device *dev, const char *fmt, ...) +{} +static inline __printf(2, 3) +void _dev_warn(const struct device *dev, const char *fmt, ...) +{} +static inline __printf(2, 3) +void _dev_notice(const struct device *dev, const char *fmt, ...) +{} +static inline __printf(2, 3) +void _dev_info(const struct device *dev, const char *fmt, ...) +{} + +#endif + +/* + * #defines for all the dev_ macros to prefix with whatever + * possible use of #define dev_fmt(fmt) ... + */ + +#define dev_emerg(dev, fmt, ...) \ + _dev_emerg(dev, dev_fmt(fmt), ##__VA_ARGS__) +#define dev_crit(dev, fmt, ...) \ + _dev_crit(dev, dev_fmt(fmt), ##__VA_ARGS__) +#define dev_alert(dev, fmt, ...) \ + _dev_alert(dev, dev_fmt(fmt), ##__VA_ARGS__) +#define dev_err(dev, fmt, ...) \ + _dev_err(dev, dev_fmt(fmt), ##__VA_ARGS__) +#define dev_warn(dev, fmt, ...) \ + _dev_warn(dev, dev_fmt(fmt), ##__VA_ARGS__) +#define dev_notice(dev, fmt, ...) \ + _dev_notice(dev, dev_fmt(fmt), ##__VA_ARGS__) +#define dev_info(dev, fmt, ...) \ + _dev_info(dev, dev_fmt(fmt), ##__VA_ARGS__) + +#if defined(CONFIG_DYNAMIC_DEBUG) +#define dev_dbg(dev, fmt, ...) \ + dynamic_dev_dbg(dev, dev_fmt(fmt), ##__VA_ARGS__) +#elif defined(DEBUG) +#define dev_dbg(dev, fmt, ...) \ + dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__) +#else +#define dev_dbg(dev, fmt, ...) \ +({ \ + if (0) \ + dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \ +}) +#endif + +#ifdef CONFIG_PRINTK +#define dev_level_once(dev_level, dev, fmt, ...) \ +do { \ + static bool __print_once __read_mostly; \ + \ + if (!__print_once) { \ + __print_once = true; \ + dev_level(dev, fmt, ##__VA_ARGS__); \ + } \ +} while (0) +#else +#define dev_level_once(dev_level, dev, fmt, ...) \ +do { \ + if (0) \ + dev_level(dev, fmt, ##__VA_ARGS__); \ +} while (0) +#endif + +#define dev_emerg_once(dev, fmt, ...) \ + dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__) +#define dev_alert_once(dev, fmt, ...) \ + dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__) +#define dev_crit_once(dev, fmt, ...) \ + dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__) +#define dev_err_once(dev, fmt, ...) \ + dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__) +#define dev_warn_once(dev, fmt, ...) \ + dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__) +#define dev_notice_once(dev, fmt, ...) \ + dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__) +#define dev_info_once(dev, fmt, ...) \ + dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__) +#define dev_dbg_once(dev, fmt, ...) \ + dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__) + +#define dev_level_ratelimited(dev_level, dev, fmt, ...) \ +do { \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + if (__ratelimit(&_rs)) \ + dev_level(dev, fmt, ##__VA_ARGS__); \ +} while (0) + +#define dev_emerg_ratelimited(dev, fmt, ...) \ + dev_level_ratelimited(dev_emerg, dev, fmt, ##__VA_ARGS__) +#define dev_alert_ratelimited(dev, fmt, ...) \ + dev_level_ratelimited(dev_alert, dev, fmt, ##__VA_ARGS__) +#define dev_crit_ratelimited(dev, fmt, ...) \ + dev_level_ratelimited(dev_crit, dev, fmt, ##__VA_ARGS__) +#define dev_err_ratelimited(dev, fmt, ...) \ + dev_level_ratelimited(dev_err, dev, fmt, ##__VA_ARGS__) +#define dev_warn_ratelimited(dev, fmt, ...) \ + dev_level_ratelimited(dev_warn, dev, fmt, ##__VA_ARGS__) +#define dev_notice_ratelimited(dev, fmt, ...) \ + dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__) +#define dev_info_ratelimited(dev, fmt, ...) \ + dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__) +#if defined(CONFIG_DYNAMIC_DEBUG) +/* descriptor check is first to prevent flooding with "callbacks suppressed" */ +#define dev_dbg_ratelimited(dev, fmt, ...) \ +do { \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ + if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \ + __ratelimit(&_rs)) \ + __dynamic_dev_dbg(&descriptor, dev, dev_fmt(fmt), \ + ##__VA_ARGS__); \ +} while (0) +#elif defined(DEBUG) +#define dev_dbg_ratelimited(dev, fmt, ...) \ +do { \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + if (__ratelimit(&_rs)) \ + dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \ +} while (0) +#else +#define dev_dbg_ratelimited(dev, fmt, ...) \ +do { \ + if (0) \ + dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \ +} while (0) +#endif + +#ifdef VERBOSE_DEBUG +#define dev_vdbg dev_dbg +#else +#define dev_vdbg(dev, fmt, ...) \ +({ \ + if (0) \ + dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \ +}) +#endif + +/* + * dev_WARN*() acts like dev_printk(), but with the key difference of + * using WARN/WARN_ONCE to include file/line information and a backtrace. + */ +#define dev_WARN(dev, format, arg...) \ + WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg); + +#define dev_WARN_ONCE(dev, condition, format, arg...) \ + WARN_ONCE(condition, "%s %s: " format, \ + dev_driver_string(dev), dev_name(dev), ## arg) + +/* Create alias, so I can be autoloaded. */ +#define MODULE_ALIAS_CHARDEV(major,minor) \ + MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor)) +#define MODULE_ALIAS_CHARDEV_MAJOR(major) \ + MODULE_ALIAS("char-major-" __stringify(major) "-*") + +#ifdef CONFIG_SYSFS_DEPRECATED +extern long sysfs_deprecated; +#else +#define sysfs_deprecated 0 +#endif + +/** + * module_driver() - Helper macro for drivers that don't do anything + * special in module init/exit. This eliminates a lot of boilerplate. + * Each module may only use this macro once, and calling it replaces + * module_init() and module_exit(). + * + * @__driver: driver name + * @__register: register function for this driver type + * @__unregister: unregister function for this driver type + * @...: Additional arguments to be passed to __register and __unregister. + * + * Use this macro to construct bus specific macros for registering + * drivers, and do not use it on its own. + */ +#define module_driver(__driver, __register, __unregister, ...) \ +static int __init __driver##_init(void) \ +{ \ + return __register(&(__driver) , ##__VA_ARGS__); \ +} \ +module_init(__driver##_init); \ +static void __exit __driver##_exit(void) \ +{ \ + __unregister(&(__driver) , ##__VA_ARGS__); \ +} \ +module_exit(__driver##_exit); + +/** + * builtin_driver() - Helper macro for drivers that don't do anything + * special in init and have no exit. This eliminates some boilerplate. + * Each driver may only use this macro once, and calling it replaces + * device_initcall (or in some cases, the legacy __initcall). This is + * meant to be a direct parallel of module_driver() above but without + * the __exit stuff that is not used for builtin cases. + * + * @__driver: driver name + * @__register: register function for this driver type + * @...: Additional arguments to be passed to __register + * + * Use this macro to construct bus specific macros for registering + * drivers, and do not use it on its own. + */ +#define builtin_driver(__driver, __register, ...) \ +static int __init __driver##_init(void) \ +{ \ + return __register(&(__driver) , ##__VA_ARGS__); \ +} \ +device_initcall(__driver##_init); + +#endif /* _DEVICE_H_ */ diff --git a/include/linux/device_cgroup.h b/include/linux/device_cgroup.h new file mode 100644 index 000000000..8557efe09 --- /dev/null +++ b/include/linux/device_cgroup.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#include + +#define DEVCG_ACC_MKNOD 1 +#define DEVCG_ACC_READ 2 +#define DEVCG_ACC_WRITE 4 +#define DEVCG_ACC_MASK (DEVCG_ACC_MKNOD | DEVCG_ACC_READ | DEVCG_ACC_WRITE) + +#define DEVCG_DEV_BLOCK 1 +#define DEVCG_DEV_CHAR 2 +#define DEVCG_DEV_ALL 4 /* this represents all devices */ + +#ifdef CONFIG_CGROUP_DEVICE +extern int __devcgroup_check_permission(short type, u32 major, u32 minor, + short access); +#else +static inline int __devcgroup_check_permission(short type, u32 major, u32 minor, + short access) +{ return 0; } +#endif + +#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) +static inline int devcgroup_check_permission(short type, u32 major, u32 minor, + short access) +{ + int rc = BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access); + + if (rc) + return -EPERM; + + return __devcgroup_check_permission(type, major, minor, access); +} + +static inline int devcgroup_inode_permission(struct inode *inode, int mask) +{ + short type, access = 0; + + if (likely(!inode->i_rdev)) + return 0; + + if (S_ISBLK(inode->i_mode)) + type = DEVCG_DEV_BLOCK; + else if (S_ISCHR(inode->i_mode)) + type = DEVCG_DEV_CHAR; + else + return 0; + + if (mask & MAY_WRITE) + access |= DEVCG_ACC_WRITE; + if (mask & MAY_READ) + access |= DEVCG_ACC_READ; + + return devcgroup_check_permission(type, imajor(inode), iminor(inode), + access); +} + +static inline int devcgroup_inode_mknod(int mode, dev_t dev) +{ + short type; + + if (!S_ISBLK(mode) && !S_ISCHR(mode)) + return 0; + + if (S_ISBLK(mode)) + type = DEVCG_DEV_BLOCK; + else + type = DEVCG_DEV_CHAR; + + return devcgroup_check_permission(type, MAJOR(dev), MINOR(dev), + DEVCG_ACC_MKNOD); +} + +#else +static inline int devcgroup_inode_permission(struct inode *inode, int mask) +{ return 0; } +static inline int devcgroup_inode_mknod(int mode, dev_t dev) +{ return 0; } +#endif diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h new file mode 100644 index 000000000..100cb4343 --- /dev/null +++ b/include/linux/devpts_fs.h @@ -0,0 +1,48 @@ +/* -*- linux-c -*- --------------------------------------------------------- * + * + * linux/include/linux/devpts_fs.h + * + * Copyright 1998-2004 H. Peter Anvin -- All Rights Reserved + * + * This file is part of the Linux kernel and is made available under + * the terms of the GNU General Public License, version 2, or at your + * option, any later version, incorporated herein by reference. + * + * ------------------------------------------------------------------------- */ + +#ifndef _LINUX_DEVPTS_FS_H +#define _LINUX_DEVPTS_FS_H + +#include + +#ifdef CONFIG_UNIX98_PTYS + +struct pts_fs_info; + +struct vfsmount *devpts_mntget(struct file *, struct pts_fs_info *); +struct pts_fs_info *devpts_acquire(struct file *); +void devpts_release(struct pts_fs_info *); + +int devpts_new_index(struct pts_fs_info *); +void devpts_kill_index(struct pts_fs_info *, int); + +/* mknod in devpts */ +struct dentry *devpts_pty_new(struct pts_fs_info *, int, void *); +/* get private structure */ +void *devpts_get_priv(struct dentry *); +/* unlink */ +void devpts_pty_kill(struct dentry *); + +/* in pty.c */ +int ptm_open_peer(struct file *master, struct tty_struct *tty, int flags); + +#else +static inline int +ptm_open_peer(struct file *master, struct tty_struct *tty, int flags) +{ + return -EIO; +} +#endif + + +#endif /* _LINUX_DEVPTS_FS_H */ diff --git a/include/linux/digsig.h b/include/linux/digsig.h new file mode 100644 index 000000000..6f85a070b --- /dev/null +++ b/include/linux/digsig.h @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2011 Nokia Corporation + * Copyright (C) 2011 Intel Corporation + * + * Author: + * Dmitry Kasatkin + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + * + */ + +#ifndef _DIGSIG_H +#define _DIGSIG_H + +#include + +enum pubkey_algo { + PUBKEY_ALGO_RSA, + PUBKEY_ALGO_MAX, +}; + +enum digest_algo { + DIGEST_ALGO_SHA1, + DIGEST_ALGO_SHA256, + DIGEST_ALGO_MAX +}; + +struct pubkey_hdr { + uint8_t version; /* key format version */ + uint32_t timestamp; /* key made, always 0 for now */ + uint8_t algo; + uint8_t nmpi; + char mpi[0]; +} __packed; + +struct signature_hdr { + uint8_t version; /* signature format version */ + uint32_t timestamp; /* signature made */ + uint8_t algo; + uint8_t hash; + uint8_t keyid[8]; + uint8_t nmpi; + char mpi[0]; +} __packed; + +#if defined(CONFIG_SIGNATURE) || defined(CONFIG_SIGNATURE_MODULE) + +int digsig_verify(struct key *keyring, const char *sig, int siglen, + const char *digest, int digestlen); + +#else + +static inline int digsig_verify(struct key *keyring, const char *sig, + int siglen, const char *digest, int digestlen) +{ + return -EOPNOTSUPP; +} + +#endif /* CONFIG_SIGNATURE */ + +#endif /* _DIGSIG_H */ diff --git a/include/linux/dio.h b/include/linux/dio.h new file mode 100644 index 000000000..1470d1d94 --- /dev/null +++ b/include/linux/dio.h @@ -0,0 +1,281 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* header file for DIO boards for the HP300 architecture. + * Maybe this should handle DIO-II later? + * The general structure of this is vaguely based on how + * the Amiga port handles Zorro boards. + * Copyright (C) Peter Maydell 05/1998 + * Converted to driver model Jochen Friedrich + * + * The board IDs are from the NetBSD kernel, which for once provided + * helpful comments... + * + * This goes with drivers/dio/dio.c + */ + +#ifndef _LINUX_DIO_H +#define _LINUX_DIO_H + +/* The DIO boards in a system are distinguished by 'select codes' which + * range from 0-63 (DIO) and 132-255 (DIO-II). + * The DIO board with select code sc is located at physical address + * 0x600000 + sc * 0x10000 + * So DIO cards cover [0x600000-0x800000); the areas [0x200000-0x400000) and + * [0x800000-0x1000000) are for additional space required by things + * like framebuffers. [0x400000-0x600000) is for miscellaneous internal I/O. + * On Linux, this is currently all mapped into the virtual address space + * at 0xf0000000 on bootup. + * DIO-II boards are at 0x1000000 + (sc - 132) * 0x400000 + * which is address range [0x1000000-0x20000000) -- too big to map completely, + * so currently we just don't handle DIO-II boards. It wouldn't be hard to + * do with ioremap() though. + */ + +#include + +#ifdef __KERNEL__ + +#include + +typedef __u16 dio_id; + + /* + * DIO devices + */ + +struct dio_dev { + struct dio_bus *bus; + dio_id id; + int scode; + struct dio_driver *driver; /* which driver has allocated this device */ + struct device dev; /* Generic device interface */ + u8 ipl; + char name[64]; + struct resource resource; +}; + +#define to_dio_dev(n) container_of(n, struct dio_dev, dev) + + /* + * DIO bus + */ + +struct dio_bus { + struct list_head devices; /* list of devices on this bus */ + unsigned int num_resources; /* number of resources */ + struct resource resources[2]; /* address space routed to this bus */ + struct device dev; + char name[10]; +}; + +extern struct dio_bus dio_bus; /* Single DIO bus */ +extern struct bus_type dio_bus_type; + + /* + * DIO device IDs + */ + +struct dio_device_id { + dio_id id; /* Device ID or DIO_WILDCARD */ + unsigned long driver_data; /* Data private to the driver */ +}; + + /* + * DIO device drivers + */ + +struct dio_driver { + struct list_head node; + char *name; + const struct dio_device_id *id_table; /* NULL if wants all devices */ + int (*probe)(struct dio_dev *z, const struct dio_device_id *id); +/* New device inserted */ + void (*remove)(struct dio_dev *z); /* Device removed (NULL if not a hot-plug capable driver) */ + struct device_driver driver; +}; + +#define to_dio_driver(drv) container_of(drv, struct dio_driver, driver) + +/* DIO/DIO-II boards all have the following 8bit registers. + * These are offsets from the base of the device. + */ +#define DIO_IDOFF 0x01 /* primary device ID */ +#define DIO_IPLOFF 0x03 /* interrupt priority level */ +#define DIO_SECIDOFF 0x15 /* secondary device ID */ +#define DIOII_SIZEOFF 0x101 /* device size, DIO-II only */ +#define DIO_VIRADDRBASE 0xf0000000UL /* vir addr where IOspace is mapped */ + +#define DIO_BASE 0x600000 /* start of DIO space */ +#define DIO_END 0x1000000 /* end of DIO space */ +#define DIO_DEVSIZE 0x10000 /* size of a DIO device */ + +#define DIOII_BASE 0x01000000 /* start of DIO-II space */ +#define DIOII_END 0x20000000 /* end of DIO-II space */ +#define DIOII_DEVSIZE 0x00400000 /* size of a DIO-II device */ + +/* Highest valid select code. If we add DIO-II support this should become + * 256 for everything except HP320, which only has DIO. + */ +#define DIO_SCMAX (hp300_model == HP_320 ? 32 : 256) +#define DIOII_SCBASE 132 /* lowest DIO-II select code */ +#define DIO_SCINHOLE(scode) (((scode) >= 32) && ((scode) < DIOII_SCBASE)) +#define DIO_ISDIOII(scode) ((scode) >= 132 && (scode) < 256) + +/* macros to read device IDs, given base address */ +#define DIO_ID(baseaddr) in_8((baseaddr) + DIO_IDOFF) +#define DIO_SECID(baseaddr) in_8((baseaddr) + DIO_SECIDOFF) + +/* extract the interrupt level */ +#define DIO_IPL(baseaddr) (((in_8((baseaddr) + DIO_IPLOFF) >> 4) & 0x03) + 3) + +/* find the size of a DIO-II board's address space. + * DIO boards are all fixed length. + */ +#define DIOII_SIZE(baseaddr) ((in_8((baseaddr) + DIOII_SIZEOFF) + 1) * 0x100000) + +/* general purpose macro for both DIO and DIO-II */ +#define DIO_SIZE(scode, base) (DIO_ISDIOII((scode)) ? DIOII_SIZE((base)) : DIO_DEVSIZE) + +/* The hardware has primary and secondary IDs; we encode these in a single + * int as PRIMARY ID & (SECONDARY ID << 8). + * In practice this is only important for framebuffers, + * and everybody else just sets ID fields equal to the DIO_ID_FOO value. + */ +#define DIO_ENCODE_ID(pr,sec) ((((int)sec & 0xff) << 8) | ((int)pr & 0xff)) +/* macro to determine whether a given primary ID requires a secondary ID byte */ +#define DIO_NEEDSSECID(id) ((id) == DIO_ID_FBUFFER) +#define DIO_WILDCARD 0xff + +/* Now a whole slew of macros giving device IDs and descriptive strings: */ +#define DIO_ID_DCA0 0x02 /* 98644A serial */ +#define DIO_DESC_DCA0 "98644A DCA0 serial" +#define DIO_ID_DCA0REM 0x82 /* 98644A serial */ +#define DIO_DESC_DCA0REM "98644A DCA0REM serial" +#define DIO_ID_DCA1 0x42 /* 98644A serial */ +#define DIO_DESC_DCA1 "98644A DCA1 serial" +#define DIO_ID_DCA1REM 0xc2 /* 98644A serial */ +#define DIO_DESC_DCA1REM "98644A DCA1REM serial" +#define DIO_ID_DCM 0x05 /* 98642A serial MUX */ +#define DIO_DESC_DCM "98642A DCM serial MUX" +#define DIO_ID_DCMREM 0x85 /* 98642A serial MUX */ +#define DIO_DESC_DCMREM "98642A DCMREM serial MUX" +#define DIO_ID_LAN 0x15 /* 98643A LAN */ +#define DIO_DESC_LAN "98643A LANCE ethernet" +#define DIO_ID_FHPIB 0x08 /* 98625A/98625B fast HP-IB */ +#define DIO_DESC_FHPIB "98625A/98625B fast HPIB" +#define DIO_ID_NHPIB 0x01 /* 98624A HP-IB (normal ie slow) */ +#define DIO_DESC_NHPIB "98624A HPIB" +#define DIO_ID_SCSI0 0x07 /* 98265A SCSI */ +#define DIO_DESC_SCSI0 "98265A SCSI0" +#define DIO_ID_SCSI1 0x27 /* ditto */ +#define DIO_DESC_SCSI1 "98265A SCSI1" +#define DIO_ID_SCSI2 0x47 /* ditto */ +#define DIO_DESC_SCSI2 "98265A SCSI2" +#define DIO_ID_SCSI3 0x67 /* ditto */ +#define DIO_DESC_SCSI3 "98265A SCSI3" +#define DIO_ID_FBUFFER 0x39 /* framebuffer: flavour is distinguished by secondary ID */ +#define DIO_DESC_FBUFFER "bitmapped display" +/* the NetBSD kernel source is a bit unsure as to what these next IDs actually do :-> */ +#define DIO_ID_MISC0 0x03 /* 98622A */ +#define DIO_DESC_MISC0 "98622A" +#define DIO_ID_MISC1 0x04 /* 98623A */ +#define DIO_DESC_MISC1 "98623A" +#define DIO_ID_PARALLEL 0x06 /* internal parallel */ +#define DIO_DESC_PARALLEL "internal parallel" +#define DIO_ID_MISC2 0x09 /* 98287A keyboard */ +#define DIO_DESC_MISC2 "98287A keyboard" +#define DIO_ID_MISC3 0x0a /* HP98635A FP accelerator */ +#define DIO_DESC_MISC3 "HP98635A FP accelerator" +#define DIO_ID_MISC4 0x0b /* timer */ +#define DIO_DESC_MISC4 "timer" +#define DIO_ID_MISC5 0x12 /* 98640A */ +#define DIO_DESC_MISC5 "98640A" +#define DIO_ID_MISC6 0x16 /* 98659A */ +#define DIO_DESC_MISC6 "98659A" +#define DIO_ID_MISC7 0x19 /* 237 display */ +#define DIO_DESC_MISC7 "237 display" +#define DIO_ID_MISC8 0x1a /* quad-wide card */ +#define DIO_DESC_MISC8 "quad-wide card" +#define DIO_ID_MISC9 0x1b /* 98253A */ +#define DIO_DESC_MISC9 "98253A" +#define DIO_ID_MISC10 0x1c /* 98627A */ +#define DIO_DESC_MISC10 "98253A" +#define DIO_ID_MISC11 0x1d /* 98633A */ +#define DIO_DESC_MISC11 "98633A" +#define DIO_ID_MISC12 0x1e /* 98259A */ +#define DIO_DESC_MISC12 "98259A" +#define DIO_ID_MISC13 0x1f /* 8741 */ +#define DIO_DESC_MISC13 "8741" +#define DIO_ID_VME 0x31 /* 98577A VME adapter */ +#define DIO_DESC_VME "98577A VME adapter" +#define DIO_ID_DCL 0x34 /* 98628A serial */ +#define DIO_DESC_DCL "98628A DCL serial" +#define DIO_ID_DCLREM 0xb4 /* 98628A serial */ +#define DIO_DESC_DCLREM "98628A DCLREM serial" +/* These are the secondary IDs for the framebuffers */ +#define DIO_ID2_GATORBOX 0x01 /* 98700/98710 "gatorbox" */ +#define DIO_DESC2_GATORBOX "98700/98710 \"gatorbox\" display" +#define DIO_ID2_TOPCAT 0x02 /* 98544/98545/98547 "topcat" */ +#define DIO_DESC2_TOPCAT "98544/98545/98547 \"topcat\" display" +#define DIO_ID2_RENAISSANCE 0x04 /* 98720/98721 "renaissance" */ +#define DIO_DESC2_RENAISSANCE "98720/98721 \"renaissance\" display" +#define DIO_ID2_LRCATSEYE 0x05 /* lowres "catseye" */ +#define DIO_DESC2_LRCATSEYE "low-res catseye display" +#define DIO_ID2_HRCCATSEYE 0x06 /* highres colour "catseye" */ +#define DIO_DESC2_HRCCATSEYE "high-res color catseye display" +#define DIO_ID2_HRMCATSEYE 0x07 /* highres mono "catseye" */ +#define DIO_DESC2_HRMCATSEYE "high-res mono catseye display" +#define DIO_ID2_DAVINCI 0x08 /* 98730/98731 "davinci" */ +#define DIO_DESC2_DAVINCI "98730/98731 \"davinci\" display" +#define DIO_ID2_XXXCATSEYE 0x09 /* "catseye" */ +#define DIO_DESC2_XXXCATSEYE "catseye display" +#define DIO_ID2_HYPERION 0x0e /* A1096A "hyperion" */ +#define DIO_DESC2_HYPERION "A1096A \"hyperion\" display" +#define DIO_ID2_XGENESIS 0x0b /* "x-genesis"; no NetBSD support */ +#define DIO_DESC2_XGENESIS "\"x-genesis\" display" +#define DIO_ID2_TIGER 0x0c /* "tiger"; no NetBSD support */ +#define DIO_DESC2_TIGER "\"tiger\" display" +#define DIO_ID2_YGENESIS 0x0d /* "y-genesis"; no NetBSD support */ +#define DIO_DESC2_YGENESIS "\"y-genesis\" display" +/* if you add new IDs then you should tell dio.c about them so it can + * identify them... + */ + +extern int dio_find(int deviceid); +extern unsigned long dio_scodetophysaddr(int scode); +extern int dio_create_sysfs_dev_files(struct dio_dev *); + +/* New-style probing */ +extern int dio_register_driver(struct dio_driver *); +extern void dio_unregister_driver(struct dio_driver *); +extern const struct dio_device_id *dio_match_device(const struct dio_device_id *ids, const struct dio_dev *z); +static inline struct dio_driver *dio_dev_driver(const struct dio_dev *d) +{ + return d->driver; +} + +#define dio_resource_start(d) ((d)->resource.start) +#define dio_resource_end(d) ((d)->resource.end) +#define dio_resource_len(d) (resource_size(&(d)->resource)) +#define dio_resource_flags(d) ((d)->resource.flags) + +#define dio_request_device(d, name) \ + request_mem_region(dio_resource_start(d), dio_resource_len(d), name) +#define dio_release_device(d) \ + release_mem_region(dio_resource_start(d), dio_resource_len(d)) + +/* Similar to the helpers above, these manipulate per-dio_dev + * driver-specific data. They are really just a wrapper around + * the generic device structure functions of these calls. + */ +static inline void *dio_get_drvdata (struct dio_dev *d) +{ + return dev_get_drvdata(&d->dev); +} + +static inline void dio_set_drvdata (struct dio_dev *d, void *data) +{ + dev_set_drvdata(&d->dev, data); +} + +#endif /* __KERNEL__ */ +#endif /* ndef _LINUX_DIO_H */ diff --git a/include/linux/dirent.h b/include/linux/dirent.h new file mode 100644 index 000000000..fc61f3cff --- /dev/null +++ b/include/linux/dirent.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_DIRENT_H +#define _LINUX_DIRENT_H + +struct linux_dirent64 { + u64 d_ino; + s64 d_off; + unsigned short d_reclen; + unsigned char d_type; + char d_name[0]; +}; + +#endif diff --git a/include/linux/dlm.h b/include/linux/dlm.h new file mode 100644 index 000000000..d02da2c6f --- /dev/null +++ b/include/linux/dlm.h @@ -0,0 +1,172 @@ +/****************************************************************************** +******************************************************************************* +** +** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. +** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved. +** +** This copyrighted material is made available to anyone wishing to use, +** modify, copy, or redistribute it subject to the terms and conditions +** of the GNU General Public License v.2. +** +******************************************************************************* +******************************************************************************/ +#ifndef __DLM_DOT_H__ +#define __DLM_DOT_H__ + +#include + + +struct dlm_slot { + int nodeid; /* 1 to MAX_INT */ + int slot; /* 1 to MAX_INT */ +}; + +/* + * recover_prep: called before the dlm begins lock recovery. + * Notfies lockspace user that locks from failed members will be granted. + * recover_slot: called after recover_prep and before recover_done. + * Identifies a failed lockspace member. + * recover_done: called after the dlm completes lock recovery. + * Identifies lockspace members and lockspace generation number. + */ + +struct dlm_lockspace_ops { + void (*recover_prep) (void *ops_arg); + void (*recover_slot) (void *ops_arg, struct dlm_slot *slot); + void (*recover_done) (void *ops_arg, struct dlm_slot *slots, + int num_slots, int our_slot, uint32_t generation); +}; + +/* + * dlm_new_lockspace + * + * Create/join a lockspace. + * + * name: lockspace name, null terminated, up to DLM_LOCKSPACE_LEN (not + * including terminating null). + * + * cluster: cluster name, null terminated, up to DLM_LOCKSPACE_LEN (not + * including terminating null). Optional. When cluster is null, it + * is not used. When set, dlm_new_lockspace() returns -EBADR if cluster + * is not equal to the dlm cluster name. + * + * flags: + * DLM_LSFL_NODIR + * The dlm should not use a resource directory, but statically assign + * resource mastery to nodes based on the name hash that is otherwise + * used to select the directory node. Must be the same on all nodes. + * DLM_LSFL_TIMEWARN + * The dlm should emit netlink messages if locks have been waiting + * for a configurable amount of time. (Unused.) + * DLM_LSFL_FS + * The lockspace user is in the kernel (i.e. filesystem). Enables + * direct bast/cast callbacks. + * DLM_LSFL_NEWEXCL + * dlm_new_lockspace() should return -EEXIST if the lockspace exists. + * + * lvblen: length of lvb in bytes. Must be multiple of 8. + * dlm_new_lockspace() returns an error if this does not match + * what other nodes are using. + * + * ops: callbacks that indicate lockspace recovery points so the + * caller can coordinate its recovery and know lockspace members. + * This is only used by the initial dlm_new_lockspace() call. + * Optional. + * + * ops_arg: arg for ops callbacks. + * + * ops_result: tells caller if the ops callbacks (if provided) will + * be used or not. 0: will be used, -EXXX will not be used. + * -EOPNOTSUPP: the dlm does not have recovery_callbacks enabled. + * + * lockspace: handle for dlm functions + */ + +int dlm_new_lockspace(const char *name, const char *cluster, + uint32_t flags, int lvblen, + const struct dlm_lockspace_ops *ops, void *ops_arg, + int *ops_result, dlm_lockspace_t **lockspace); + +/* + * dlm_release_lockspace + * + * Stop a lockspace. + */ + +int dlm_release_lockspace(dlm_lockspace_t *lockspace, int force); + +/* + * dlm_lock + * + * Make an asynchronous request to acquire or convert a lock on a named + * resource. + * + * lockspace: context for the request + * mode: the requested mode of the lock (DLM_LOCK_) + * lksb: lock status block for input and async return values + * flags: input flags (DLM_LKF_) + * name: name of the resource to lock, can be binary + * namelen: the length in bytes of the resource name (MAX_RESNAME_LEN) + * parent: the lock ID of a parent lock or 0 if none + * lockast: function DLM executes when it completes processing the request + * astarg: argument passed to lockast and bast functions + * bast: function DLM executes when this lock later blocks another request + * + * Returns: + * 0 if request is successfully queued for processing + * -EINVAL if any input parameters are invalid + * -EAGAIN if request would block and is flagged DLM_LKF_NOQUEUE + * -ENOMEM if there is no memory to process request + * -ENOTCONN if there is a communication error + * + * If the call to dlm_lock returns an error then the operation has failed and + * the AST routine will not be called. If dlm_lock returns 0 it is still + * possible that the lock operation will fail. The AST routine will be called + * when the locking is complete and the status is returned in the lksb. + * + * If the AST routines or parameter are passed to a conversion operation then + * they will overwrite those values that were passed to a previous dlm_lock + * call. + * + * AST routines should not block (at least not for long), but may make + * any locking calls they please. + */ + +int dlm_lock(dlm_lockspace_t *lockspace, + int mode, + struct dlm_lksb *lksb, + uint32_t flags, + void *name, + unsigned int namelen, + uint32_t parent_lkid, + void (*lockast) (void *astarg), + void *astarg, + void (*bast) (void *astarg, int mode)); + +/* + * dlm_unlock + * + * Asynchronously release a lock on a resource. The AST routine is called + * when the resource is successfully unlocked. + * + * lockspace: context for the request + * lkid: the lock ID as returned in the lksb + * flags: input flags (DLM_LKF_) + * lksb: if NULL the lksb parameter passed to last lock request is used + * astarg: the arg used with the completion ast for the unlock + * + * Returns: + * 0 if request is successfully queued for processing + * -EINVAL if any input parameters are invalid + * -ENOTEMPTY if the lock still has sublocks + * -EBUSY if the lock is waiting for a remote lock operation + * -ENOTCONN if there is a communication error + */ + +int dlm_unlock(dlm_lockspace_t *lockspace, + uint32_t lkid, + uint32_t flags, + struct dlm_lksb *lksb, + void *astarg); + +#endif /* __DLM_DOT_H__ */ diff --git a/include/linux/dlm_plock.h b/include/linux/dlm_plock.h new file mode 100644 index 000000000..95ad387a7 --- /dev/null +++ b/include/linux/dlm_plock.h @@ -0,0 +1,19 @@ +/* + * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. + * + * This copyrighted material is made available to anyone wishing to use, + * modify, copy, or redistribute it subject to the terms and conditions + * of the GNU General Public License v.2. + */ +#ifndef __DLM_PLOCK_DOT_H__ +#define __DLM_PLOCK_DOT_H__ + +#include + +int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file, + int cmd, struct file_lock *fl); +int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file, + struct file_lock *fl); +int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file, + struct file_lock *fl); +#endif diff --git a/include/linux/dm-bufio.h b/include/linux/dm-bufio.h new file mode 100644 index 000000000..45ba37aaf --- /dev/null +++ b/include/linux/dm-bufio.h @@ -0,0 +1,149 @@ +/* + * Copyright (C) 2009-2011 Red Hat, Inc. + * + * Author: Mikulas Patocka + * + * This file is released under the GPL. + */ + +#ifndef _LINUX_DM_BUFIO_H +#define _LINUX_DM_BUFIO_H + +#include +#include + +/*----------------------------------------------------------------*/ + +struct dm_bufio_client; +struct dm_buffer; + +/* + * Create a buffered IO cache on a given device + */ +struct dm_bufio_client * +dm_bufio_client_create(struct block_device *bdev, unsigned block_size, + unsigned reserved_buffers, unsigned aux_size, + void (*alloc_callback)(struct dm_buffer *), + void (*write_callback)(struct dm_buffer *)); + +/* + * Release a buffered IO cache. + */ +void dm_bufio_client_destroy(struct dm_bufio_client *c); + +/* + * Set the sector range. + * When this function is called, there must be no I/O in progress on the bufio + * client. + */ +void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start); + +/* + * WARNING: to avoid deadlocks, these conditions are observed: + * + * - At most one thread can hold at most "reserved_buffers" simultaneously. + * - Each other threads can hold at most one buffer. + * - Threads which call only dm_bufio_get can hold unlimited number of + * buffers. + */ + +/* + * Read a given block from disk. Returns pointer to data. Returns a + * pointer to dm_buffer that can be used to release the buffer or to make + * it dirty. + */ +void *dm_bufio_read(struct dm_bufio_client *c, sector_t block, + struct dm_buffer **bp); + +/* + * Like dm_bufio_read, but return buffer from cache, don't read + * it. If the buffer is not in the cache, return NULL. + */ +void *dm_bufio_get(struct dm_bufio_client *c, sector_t block, + struct dm_buffer **bp); + +/* + * Like dm_bufio_read, but don't read anything from the disk. It is + * expected that the caller initializes the buffer and marks it dirty. + */ +void *dm_bufio_new(struct dm_bufio_client *c, sector_t block, + struct dm_buffer **bp); + +/* + * Prefetch the specified blocks to the cache. + * The function starts to read the blocks and returns without waiting for + * I/O to finish. + */ +void dm_bufio_prefetch(struct dm_bufio_client *c, + sector_t block, unsigned n_blocks); + +/* + * Release a reference obtained with dm_bufio_{read,get,new}. The data + * pointer and dm_buffer pointer is no longer valid after this call. + */ +void dm_bufio_release(struct dm_buffer *b); + +/* + * Mark a buffer dirty. It should be called after the buffer is modified. + * + * In case of memory pressure, the buffer may be written after + * dm_bufio_mark_buffer_dirty, but before dm_bufio_write_dirty_buffers. So + * dm_bufio_write_dirty_buffers guarantees that the buffer is on-disk but + * the actual writing may occur earlier. + */ +void dm_bufio_mark_buffer_dirty(struct dm_buffer *b); + +/* + * Mark a part of the buffer dirty. + * + * The specified part of the buffer is scheduled to be written. dm-bufio may + * write the specified part of the buffer or it may write a larger superset. + */ +void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b, + unsigned start, unsigned end); + +/* + * Initiate writing of dirty buffers, without waiting for completion. + */ +void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c); + +/* + * Write all dirty buffers. Guarantees that all dirty buffers created prior + * to this call are on disk when this call exits. + */ +int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c); + +/* + * Send an empty write barrier to the device to flush hardware disk cache. + */ +int dm_bufio_issue_flush(struct dm_bufio_client *c); + +/* + * Like dm_bufio_release but also move the buffer to the new + * block. dm_bufio_write_dirty_buffers is needed to commit the new block. + */ +void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block); + +/* + * Free the given buffer. + * This is just a hint, if the buffer is in use or dirty, this function + * does nothing. + */ +void dm_bufio_forget(struct dm_bufio_client *c, sector_t block); + +/* + * Set the minimum number of buffers before cleanup happens. + */ +void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n); + +unsigned dm_bufio_get_block_size(struct dm_bufio_client *c); +sector_t dm_bufio_get_device_size(struct dm_bufio_client *c); +struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c); +sector_t dm_bufio_get_block_number(struct dm_buffer *b); +void *dm_bufio_get_block_data(struct dm_buffer *b); +void *dm_bufio_get_aux_data(struct dm_buffer *b); +struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b); + +/*----------------------------------------------------------------*/ + +#endif diff --git a/include/linux/dm-dirty-log.h b/include/linux/dm-dirty-log.h new file mode 100644 index 000000000..7084503c3 --- /dev/null +++ b/include/linux/dm-dirty-log.h @@ -0,0 +1,146 @@ +/* + * Copyright (C) 2003 Sistina Software + * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. + * + * Device-Mapper dirty region log. + * + * This file is released under the LGPL. + */ + +#ifndef _LINUX_DM_DIRTY_LOG +#define _LINUX_DM_DIRTY_LOG + +#ifdef __KERNEL__ + +#include +#include + +typedef sector_t region_t; + +struct dm_dirty_log_type; + +struct dm_dirty_log { + struct dm_dirty_log_type *type; + int (*flush_callback_fn)(struct dm_target *ti); + void *context; +}; + +struct dm_dirty_log_type { + const char *name; + struct module *module; + + /* For internal device-mapper use */ + struct list_head list; + + int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti, + unsigned argc, char **argv); + void (*dtr)(struct dm_dirty_log *log); + + /* + * There are times when we don't want the log to touch + * the disk. + */ + int (*presuspend)(struct dm_dirty_log *log); + int (*postsuspend)(struct dm_dirty_log *log); + int (*resume)(struct dm_dirty_log *log); + + /* + * Retrieves the smallest size of region that the log can + * deal with. + */ + uint32_t (*get_region_size)(struct dm_dirty_log *log); + + /* + * A predicate to say whether a region is clean or not. + * May block. + */ + int (*is_clean)(struct dm_dirty_log *log, region_t region); + + /* + * Returns: 0, 1, -EWOULDBLOCK, < 0 + * + * A predicate function to check the area given by + * [sector, sector + len) is in sync. + * + * If -EWOULDBLOCK is returned the state of the region is + * unknown, typically this will result in a read being + * passed to a daemon to deal with, since a daemon is + * allowed to block. + */ + int (*in_sync)(struct dm_dirty_log *log, region_t region, + int can_block); + + /* + * Flush the current log state (eg, to disk). This + * function may block. + */ + int (*flush)(struct dm_dirty_log *log); + + /* + * Mark an area as clean or dirty. These functions may + * block, though for performance reasons blocking should + * be extremely rare (eg, allocating another chunk of + * memory for some reason). + */ + void (*mark_region)(struct dm_dirty_log *log, region_t region); + void (*clear_region)(struct dm_dirty_log *log, region_t region); + + /* + * Returns: <0 (error), 0 (no region), 1 (region) + * + * The mirrord will need perform recovery on regions of + * the mirror that are in the NOSYNC state. This + * function asks the log to tell the caller about the + * next region that this machine should recover. + * + * Do not confuse this function with 'in_sync()', one + * tells you if an area is synchronised, the other + * assigns recovery work. + */ + int (*get_resync_work)(struct dm_dirty_log *log, region_t *region); + + /* + * This notifies the log that the resync status of a region + * has changed. It also clears the region from the recovering + * list (if present). + */ + void (*set_region_sync)(struct dm_dirty_log *log, + region_t region, int in_sync); + + /* + * Returns the number of regions that are in sync. + */ + region_t (*get_sync_count)(struct dm_dirty_log *log); + + /* + * Support function for mirror status requests. + */ + int (*status)(struct dm_dirty_log *log, status_type_t status_type, + char *result, unsigned maxlen); + + /* + * is_remote_recovering is necessary for cluster mirroring. It provides + * a way to detect recovery on another node, so we aren't writing + * concurrently. This function is likely to block (when a cluster log + * is used). + * + * Returns: 0, 1 + */ + int (*is_remote_recovering)(struct dm_dirty_log *log, region_t region); +}; + +int dm_dirty_log_type_register(struct dm_dirty_log_type *type); +int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type); + +/* + * Make sure you use these two functions, rather than calling + * type->constructor/destructor() directly. + */ +struct dm_dirty_log *dm_dirty_log_create(const char *type_name, + struct dm_target *ti, + int (*flush_callback_fn)(struct dm_target *ti), + unsigned argc, char **argv); +void dm_dirty_log_destroy(struct dm_dirty_log *log); + +#endif /* __KERNEL__ */ +#endif /* _LINUX_DM_DIRTY_LOG_H */ diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h new file mode 100644 index 000000000..a52c6580c --- /dev/null +++ b/include/linux/dm-io.h @@ -0,0 +1,85 @@ +/* + * Copyright (C) 2003 Sistina Software + * Copyright (C) 2004 - 2008 Red Hat, Inc. All rights reserved. + * + * Device-Mapper low-level I/O. + * + * This file is released under the GPL. + */ + +#ifndef _LINUX_DM_IO_H +#define _LINUX_DM_IO_H + +#ifdef __KERNEL__ + +#include + +struct dm_io_region { + struct block_device *bdev; + sector_t sector; + sector_t count; /* If this is zero the region is ignored. */ +}; + +struct page_list { + struct page_list *next; + struct page *page; +}; + +typedef void (*io_notify_fn)(unsigned long error, void *context); + +enum dm_io_mem_type { + DM_IO_PAGE_LIST,/* Page list */ + DM_IO_BIO, /* Bio vector */ + DM_IO_VMA, /* Virtual memory area */ + DM_IO_KMEM, /* Kernel memory */ +}; + +struct dm_io_memory { + enum dm_io_mem_type type; + + unsigned offset; + + union { + struct page_list *pl; + struct bio *bio; + void *vma; + void *addr; + } ptr; +}; + +struct dm_io_notify { + io_notify_fn fn; /* Callback for asynchronous requests */ + void *context; /* Passed to callback */ +}; + +/* + * IO request structure + */ +struct dm_io_client; +struct dm_io_request { + int bi_op; /* REQ_OP */ + int bi_op_flags; /* req_flag_bits */ + struct dm_io_memory mem; /* Memory to use for io */ + struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */ + struct dm_io_client *client; /* Client memory handler */ +}; + +/* + * For async io calls, users can alternatively use the dm_io() function below + * and dm_io_client_create() to create private mempools for the client. + * + * Create/destroy may block. + */ +struct dm_io_client *dm_io_client_create(void); +void dm_io_client_destroy(struct dm_io_client *client); + +/* + * IO interface using private per-client pools. + * Each bit in the optional 'sync_error_bits' bitset indicates whether an + * error occurred doing io to the corresponding region. + */ +int dm_io(struct dm_io_request *io_req, unsigned num_regions, + struct dm_io_region *region, unsigned long *sync_error_bits); + +#endif /* __KERNEL__ */ +#endif /* _LINUX_DM_IO_H */ diff --git a/include/linux/dm-kcopyd.h b/include/linux/dm-kcopyd.h new file mode 100644 index 000000000..e42de7750 --- /dev/null +++ b/include/linux/dm-kcopyd.h @@ -0,0 +1,89 @@ +/* + * Copyright (C) 2001 - 2003 Sistina Software + * Copyright (C) 2004 - 2008 Red Hat, Inc. All rights reserved. + * + * kcopyd provides a simple interface for copying an area of one + * block-device to one or more other block-devices, either synchronous + * or with an asynchronous completion notification. + * + * This file is released under the GPL. + */ + +#ifndef _LINUX_DM_KCOPYD_H +#define _LINUX_DM_KCOPYD_H + +#ifdef __KERNEL__ + +#include + +/* FIXME: make this configurable */ +#define DM_KCOPYD_MAX_REGIONS 8 + +#define DM_KCOPYD_IGNORE_ERROR 1 +#define DM_KCOPYD_WRITE_SEQ 2 + +struct dm_kcopyd_throttle { + unsigned throttle; + unsigned num_io_jobs; + unsigned io_period; + unsigned total_period; + unsigned last_jiffies; +}; + +/* + * kcopyd clients that want to support throttling must pass an initialised + * dm_kcopyd_throttle struct into dm_kcopyd_client_create(). + * Two or more clients may share the same instance of this struct between + * them if they wish to be throttled as a group. + * + * This macro also creates a corresponding module parameter to configure + * the amount of throttling. + */ +#define DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(name, description) \ +static struct dm_kcopyd_throttle dm_kcopyd_throttle = { 100, 0, 0, 0, 0 }; \ +module_param_named(name, dm_kcopyd_throttle.throttle, uint, 0644); \ +MODULE_PARM_DESC(name, description) + +/* + * To use kcopyd you must first create a dm_kcopyd_client object. + * throttle can be NULL if you don't want any throttling. + */ +struct dm_kcopyd_client; +struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle); +void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc); + +/* + * Submit a copy job to kcopyd. This is built on top of the + * previous three fns. + * + * read_err is a boolean, + * write_err is a bitset, with 1 bit for each destination region + */ +typedef void (*dm_kcopyd_notify_fn)(int read_err, unsigned long write_err, + void *context); + +void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, + unsigned num_dests, struct dm_io_region *dests, + unsigned flags, dm_kcopyd_notify_fn fn, void *context); + +/* + * Prepare a callback and submit it via the kcopyd thread. + * + * dm_kcopyd_prepare_callback allocates a callback structure and returns it. + * It must not be called from interrupt context. + * The returned value should be passed into dm_kcopyd_do_callback. + * + * dm_kcopyd_do_callback submits the callback. + * It may be called from interrupt context. + * The callback is issued from the kcopyd thread. + */ +void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc, + dm_kcopyd_notify_fn fn, void *context); +void dm_kcopyd_do_callback(void *job, int read_err, unsigned long write_err); + +void dm_kcopyd_zero(struct dm_kcopyd_client *kc, + unsigned num_dests, struct dm_io_region *dests, + unsigned flags, dm_kcopyd_notify_fn fn, void *context); + +#endif /* __KERNEL__ */ +#endif /* _LINUX_DM_KCOPYD_H */ diff --git a/include/linux/dm-region-hash.h b/include/linux/dm-region-hash.h new file mode 100644 index 000000000..9e2a7a401 --- /dev/null +++ b/include/linux/dm-region-hash.h @@ -0,0 +1,103 @@ +/* + * Copyright (C) 2003 Sistina Software Limited. + * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. + * + * Device-Mapper dirty region hash interface. + * + * This file is released under the GPL. + */ + +#ifndef DM_REGION_HASH_H +#define DM_REGION_HASH_H + +#include + +/*----------------------------------------------------------------- + * Region hash + *----------------------------------------------------------------*/ +struct dm_region_hash; +struct dm_region; + +/* + * States a region can have. + */ +enum dm_rh_region_states { + DM_RH_CLEAN = 0x01, /* No writes in flight. */ + DM_RH_DIRTY = 0x02, /* Writes in flight. */ + DM_RH_NOSYNC = 0x04, /* Out of sync. */ + DM_RH_RECOVERING = 0x08, /* Under resynchronization. */ +}; + +/* + * Region hash create/destroy. + */ +struct bio_list; +struct dm_region_hash *dm_region_hash_create( + void *context, void (*dispatch_bios)(void *context, + struct bio_list *bios), + void (*wakeup_workers)(void *context), + void (*wakeup_all_recovery_waiters)(void *context), + sector_t target_begin, unsigned max_recovery, + struct dm_dirty_log *log, uint32_t region_size, + region_t nr_regions); +void dm_region_hash_destroy(struct dm_region_hash *rh); + +struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh); + +/* + * Conversion functions. + */ +region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio); +sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region); +void *dm_rh_region_context(struct dm_region *reg); + +/* + * Get region size and key (ie. number of the region). + */ +sector_t dm_rh_get_region_size(struct dm_region_hash *rh); +region_t dm_rh_get_region_key(struct dm_region *reg); + +/* + * Get/set/update region state (and dirty log). + * + */ +int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block); +void dm_rh_set_state(struct dm_region_hash *rh, region_t region, + enum dm_rh_region_states state, int may_block); + +/* Non-zero errors_handled leaves the state of the region NOSYNC */ +void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled); + +/* Flush the region hash and dirty log. */ +int dm_rh_flush(struct dm_region_hash *rh); + +/* Inc/dec pending count on regions. */ +void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios); +void dm_rh_dec(struct dm_region_hash *rh, region_t region); + +/* Delay bios on regions. */ +void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio); + +void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio); + +/* + * Region recovery control. + */ + +/* Prepare some regions for recovery by starting to quiesce them. */ +void dm_rh_recovery_prepare(struct dm_region_hash *rh); + +/* Try fetching a quiesced region for recovery. */ +struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh); + +/* Report recovery end on a region. */ +void dm_rh_recovery_end(struct dm_region *reg, int error); + +/* Returns number of regions with recovery work outstanding. */ +int dm_rh_recovery_in_flight(struct dm_region_hash *rh); + +/* Start/stop recovery. */ +void dm_rh_start_recovery(struct dm_region_hash *rh); +void dm_rh_stop_recovery(struct dm_region_hash *rh); + +#endif /* DM_REGION_HASH_H */ diff --git a/include/linux/dm9000.h b/include/linux/dm9000.h new file mode 100644 index 000000000..841925fbf --- /dev/null +++ b/include/linux/dm9000.h @@ -0,0 +1,42 @@ +/* include/linux/dm9000.h + * + * Copyright (c) 2004 Simtec Electronics + * Ben Dooks + * + * Header file for dm9000 platform data + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * +*/ + +#ifndef __DM9000_PLATFORM_DATA +#define __DM9000_PLATFORM_DATA __FILE__ + +#include + +/* IO control flags */ + +#define DM9000_PLATF_8BITONLY (0x0001) +#define DM9000_PLATF_16BITONLY (0x0002) +#define DM9000_PLATF_32BITONLY (0x0004) +#define DM9000_PLATF_EXT_PHY (0x0008) +#define DM9000_PLATF_NO_EEPROM (0x0010) +#define DM9000_PLATF_SIMPLE_PHY (0x0020) /* Use NSR to find LinkStatus */ + +/* platform data for platform device structure's platform_data field */ + +struct dm9000_plat_data { + unsigned int flags; + unsigned char dev_addr[ETH_ALEN]; + + /* allow replacement IO routines */ + + void (*inblk)(void __iomem *reg, void *data, int len); + void (*outblk)(void __iomem *reg, void *data, int len); + void (*dumpblk)(void __iomem *reg, int len); +}; + +#endif /* __DM9000_PLATFORM_DATA */ + diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h new file mode 100644 index 000000000..58725f890 --- /dev/null +++ b/include/linux/dma-buf.h @@ -0,0 +1,402 @@ +/* + * Header file for dma buffer sharing framework. + * + * Copyright(C) 2011 Linaro Limited. All rights reserved. + * Author: Sumit Semwal + * + * Many thanks to linaro-mm-sig list, and specially + * Arnd Bergmann , Rob Clark and + * Daniel Vetter for their support in creation and + * refining of this idea. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ +#ifndef __DMA_BUF_H__ +#define __DMA_BUF_H__ + +#include +#include +#include +#include +#include +#include +#include +#include + +struct device; +struct dma_buf; +struct dma_buf_attachment; + +/** + * struct dma_buf_ops - operations possible on struct dma_buf + * @map_atomic: [optional] maps a page from the buffer into kernel address + * space, users may not block until the subsequent unmap call. + * This callback must not sleep. + * @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer. + * This Callback must not sleep. + * @map: [optional] maps a page from the buffer into kernel address space. + * @unmap: [optional] unmaps a page from the buffer. + * @vmap: [optional] creates a virtual mapping for the buffer into kernel + * address space. Same restrictions as for vmap and friends apply. + * @vunmap: [optional] unmaps a vmap from the buffer + */ +struct dma_buf_ops { + /** + * @attach: + * + * This is called from dma_buf_attach() to make sure that a given + * &dma_buf_attachment.dev can access the provided &dma_buf. Exporters + * which support buffer objects in special locations like VRAM or + * device-specific carveout areas should check whether the buffer could + * be move to system memory (or directly accessed by the provided + * device), and otherwise need to fail the attach operation. + * + * The exporter should also in general check whether the current + * allocation fullfills the DMA constraints of the new device. If this + * is not the case, and the allocation cannot be moved, it should also + * fail the attach operation. + * + * Any exporter-private housekeeping data can be stored in the + * &dma_buf_attachment.priv pointer. + * + * This callback is optional. + * + * Returns: + * + * 0 on success, negative error code on failure. It might return -EBUSY + * to signal that backing storage is already allocated and incompatible + * with the requirements of requesting device. + */ + int (*attach)(struct dma_buf *, struct dma_buf_attachment *); + + /** + * @detach: + * + * This is called by dma_buf_detach() to release a &dma_buf_attachment. + * Provided so that exporters can clean up any housekeeping for an + * &dma_buf_attachment. + * + * This callback is optional. + */ + void (*detach)(struct dma_buf *, struct dma_buf_attachment *); + + /** + * @map_dma_buf: + * + * This is called by dma_buf_map_attachment() and is used to map a + * shared &dma_buf into device address space, and it is mandatory. It + * can only be called if @attach has been called successfully. This + * essentially pins the DMA buffer into place, and it cannot be moved + * any more + * + * This call may sleep, e.g. when the backing storage first needs to be + * allocated, or moved to a location suitable for all currently attached + * devices. + * + * Note that any specific buffer attributes required for this function + * should get added to device_dma_parameters accessible via + * &device.dma_params from the &dma_buf_attachment. The @attach callback + * should also check these constraints. + * + * If this is being called for the first time, the exporter can now + * choose to scan through the list of attachments for this buffer, + * collate the requirements of the attached devices, and choose an + * appropriate backing storage for the buffer. + * + * Based on enum dma_data_direction, it might be possible to have + * multiple users accessing at the same time (for reading, maybe), or + * any other kind of sharing that the exporter might wish to make + * available to buffer-users. + * + * Returns: + * + * A &sg_table scatter list of or the backing storage of the DMA buffer, + * already mapped into the device address space of the &device attached + * with the provided &dma_buf_attachment. + * + * On failure, returns a negative error value wrapped into a pointer. + * May also return -EINTR when a signal was received while being + * blocked. + */ + struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, + enum dma_data_direction); + /** + * @unmap_dma_buf: + * + * This is called by dma_buf_unmap_attachment() and should unmap and + * release the &sg_table allocated in @map_dma_buf, and it is mandatory. + * It should also unpin the backing storage if this is the last mapping + * of the DMA buffer, it the exporter supports backing storage + * migration. + */ + void (*unmap_dma_buf)(struct dma_buf_attachment *, + struct sg_table *, + enum dma_data_direction); + + /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY + * if the call would block. + */ + + /** + * @release: + * + * Called after the last dma_buf_put to release the &dma_buf, and + * mandatory. + */ + void (*release)(struct dma_buf *); + + /** + * @begin_cpu_access: + * + * This is called from dma_buf_begin_cpu_access() and allows the + * exporter to ensure that the memory is actually available for cpu + * access - the exporter might need to allocate or swap-in and pin the + * backing storage. The exporter also needs to ensure that cpu access is + * coherent for the access direction. The direction can be used by the + * exporter to optimize the cache flushing, i.e. access with a different + * direction (read instead of write) might return stale or even bogus + * data (e.g. when the exporter needs to copy the data to temporary + * storage). + * + * This callback is optional. + * + * FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command + * from userspace (where storage shouldn't be pinned to avoid handing + * de-factor mlock rights to userspace) and for the kernel-internal + * users of the various kmap interfaces, where the backing storage must + * be pinned to guarantee that the atomic kmap calls can succeed. Since + * there's no in-kernel users of the kmap interfaces yet this isn't a + * real problem. + * + * Returns: + * + * 0 on success or a negative error code on failure. This can for + * example fail when the backing storage can't be allocated. Can also + * return -ERESTARTSYS or -EINTR when the call has been interrupted and + * needs to be restarted. + */ + int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); + + /** + * @end_cpu_access: + * + * This is called from dma_buf_end_cpu_access() when the importer is + * done accessing the CPU. The exporter can use this to flush caches and + * unpin any resources pinned in @begin_cpu_access. + * The result of any dma_buf kmap calls after end_cpu_access is + * undefined. + * + * This callback is optional. + * + * Returns: + * + * 0 on success or a negative error code on failure. Can return + * -ERESTARTSYS or -EINTR when the call has been interrupted and needs + * to be restarted. + */ + int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); + void *(*map)(struct dma_buf *, unsigned long); + void (*unmap)(struct dma_buf *, unsigned long, void *); + + /** + * @mmap: + * + * This callback is used by the dma_buf_mmap() function + * + * Note that the mapping needs to be incoherent, userspace is expected + * to braket CPU access using the DMA_BUF_IOCTL_SYNC interface. + * + * Because dma-buf buffers have invariant size over their lifetime, the + * dma-buf core checks whether a vma is too large and rejects such + * mappings. The exporter hence does not need to duplicate this check. + * Drivers do not need to check this themselves. + * + * If an exporter needs to manually flush caches and hence needs to fake + * coherency for mmap support, it needs to be able to zap all the ptes + * pointing at the backing storage. Now linux mm needs a struct + * address_space associated with the struct file stored in vma->vm_file + * to do that with the function unmap_mapping_range. But the dma_buf + * framework only backs every dma_buf fd with the anon_file struct file, + * i.e. all dma_bufs share the same file. + * + * Hence exporters need to setup their own file (and address_space) + * association by setting vma->vm_file and adjusting vma->vm_pgoff in + * the dma_buf mmap callback. In the specific case of a gem driver the + * exporter could use the shmem file already provided by gem (and set + * vm_pgoff = 0). Exporters can then zap ptes by unmapping the + * corresponding range of the struct address_space associated with their + * own file. + * + * This callback is optional. + * + * Returns: + * + * 0 on success or a negative error code on failure. + */ + int (*mmap)(struct dma_buf *, struct vm_area_struct *vma); + + void *(*vmap)(struct dma_buf *); + void (*vunmap)(struct dma_buf *, void *vaddr); +}; + +/** + * struct dma_buf - shared buffer object + * @size: size of the buffer + * @file: file pointer used for sharing buffers across, and for refcounting. + * @attachments: list of dma_buf_attachment that denotes all devices attached. + * @ops: dma_buf_ops associated with this buffer object. + * @lock: used internally to serialize list manipulation, attach/detach and vmap/unmap + * @vmapping_counter: used internally to refcnt the vmaps + * @vmap_ptr: the current vmap ptr if vmapping_counter > 0 + * @exp_name: name of the exporter; useful for debugging. + * @owner: pointer to exporter module; used for refcounting when exporter is a + * kernel module. + * @list_node: node for dma_buf accounting and debugging. + * @priv: exporter specific private data for this buffer object. + * @resv: reservation object linked to this dma-buf + * @poll: for userspace poll support + * @cb_excl: for userspace poll support + * @cb_shared: for userspace poll support + * + * This represents a shared buffer, created by calling dma_buf_export(). The + * userspace representation is a normal file descriptor, which can be created by + * calling dma_buf_fd(). + * + * Shared dma buffers are reference counted using dma_buf_put() and + * get_dma_buf(). + * + * Device DMA access is handled by the separate &struct dma_buf_attachment. + */ +struct dma_buf { + size_t size; + struct file *file; + struct list_head attachments; + const struct dma_buf_ops *ops; + struct mutex lock; + unsigned vmapping_counter; + void *vmap_ptr; + const char *exp_name; + struct module *owner; + struct list_head list_node; + void *priv; + struct reservation_object *resv; + + /* poll support */ + wait_queue_head_t poll; + + struct dma_buf_poll_cb_t { + struct dma_fence_cb cb; + wait_queue_head_t *poll; + + __poll_t active; + } cb_excl, cb_shared; +}; + +/** + * struct dma_buf_attachment - holds device-buffer attachment data + * @dmabuf: buffer for this attachment. + * @dev: device attached to the buffer. + * @node: list of dma_buf_attachment. + * @priv: exporter specific attachment data. + * + * This structure holds the attachment information between the dma_buf buffer + * and its user device(s). The list contains one attachment struct per device + * attached to the buffer. + * + * An attachment is created by calling dma_buf_attach(), and released again by + * calling dma_buf_detach(). The DMA mapping itself needed to initiate a + * transfer is created by dma_buf_map_attachment() and freed again by calling + * dma_buf_unmap_attachment(). + */ +struct dma_buf_attachment { + struct dma_buf *dmabuf; + struct device *dev; + struct list_head node; + void *priv; +}; + +/** + * struct dma_buf_export_info - holds information needed to export a dma_buf + * @exp_name: name of the exporter - useful for debugging. + * @owner: pointer to exporter module - used for refcounting kernel module + * @ops: Attach allocator-defined dma buf ops to the new buffer + * @size: Size of the buffer + * @flags: mode flags for the file + * @resv: reservation-object, NULL to allocate default one + * @priv: Attach private data of allocator to this buffer + * + * This structure holds the information required to export the buffer. Used + * with dma_buf_export() only. + */ +struct dma_buf_export_info { + const char *exp_name; + struct module *owner; + const struct dma_buf_ops *ops; + size_t size; + int flags; + struct reservation_object *resv; + void *priv; +}; + +/** + * DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters + * @name: export-info name + * + * DEFINE_DMA_BUF_EXPORT_INFO macro defines the &struct dma_buf_export_info, + * zeroes it out and pre-populates exp_name in it. + */ +#define DEFINE_DMA_BUF_EXPORT_INFO(name) \ + struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \ + .owner = THIS_MODULE } + +/** + * get_dma_buf - convenience wrapper for get_file. + * @dmabuf: [in] pointer to dma_buf + * + * Increments the reference count on the dma-buf, needed in case of drivers + * that either need to create additional references to the dmabuf on the + * kernel side. For example, an exporter that needs to keep a dmabuf ptr + * so that subsequent exports don't create a new dmabuf. + */ +static inline void get_dma_buf(struct dma_buf *dmabuf) +{ + get_file(dmabuf->file); +} + +struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, + struct device *dev); +void dma_buf_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *dmabuf_attach); + +struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info); + +int dma_buf_fd(struct dma_buf *dmabuf, int flags); +struct dma_buf *dma_buf_get(int fd); +void dma_buf_put(struct dma_buf *dmabuf); + +struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *, + enum dma_data_direction); +void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, + enum dma_data_direction); +int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, + enum dma_data_direction dir); +int dma_buf_end_cpu_access(struct dma_buf *dma_buf, + enum dma_data_direction dir); +void *dma_buf_kmap(struct dma_buf *, unsigned long); +void dma_buf_kunmap(struct dma_buf *, unsigned long, void *); + +int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, + unsigned long); +void *dma_buf_vmap(struct dma_buf *); +void dma_buf_vunmap(struct dma_buf *, void *vaddr); +#endif /* __DMA_BUF_H__ */ diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h new file mode 100644 index 000000000..f247e8aa5 --- /dev/null +++ b/include/linux/dma-contiguous.h @@ -0,0 +1,164 @@ +#ifndef __LINUX_CMA_H +#define __LINUX_CMA_H + +/* + * Contiguous Memory Allocator for DMA mapping framework + * Copyright (c) 2010-2011 by Samsung Electronics. + * Written by: + * Marek Szyprowski + * Michal Nazarewicz + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License or (at your optional) any later version of the license. + */ + +/* + * Contiguous Memory Allocator + * + * The Contiguous Memory Allocator (CMA) makes it possible to + * allocate big contiguous chunks of memory after the system has + * booted. + * + * Why is it needed? + * + * Various devices on embedded systems have no scatter-getter and/or + * IO map support and require contiguous blocks of memory to + * operate. They include devices such as cameras, hardware video + * coders, etc. + * + * Such devices often require big memory buffers (a full HD frame + * is, for instance, more then 2 mega pixels large, i.e. more than 6 + * MB of memory), which makes mechanisms such as kmalloc() or + * alloc_page() ineffective. + * + * At the same time, a solution where a big memory region is + * reserved for a device is suboptimal since often more memory is + * reserved then strictly required and, moreover, the memory is + * inaccessible to page system even if device drivers don't use it. + * + * CMA tries to solve this issue by operating on memory regions + * where only movable pages can be allocated from. This way, kernel + * can use the memory for pagecache and when device driver requests + * it, allocated pages can be migrated. + * + * Driver usage + * + * CMA should not be used by the device drivers directly. It is + * only a helper framework for dma-mapping subsystem. + * + * For more information, see kernel-docs in kernel/dma/contiguous.c + */ + +#ifdef __KERNEL__ + +#include + +struct cma; +struct page; + +#ifdef CONFIG_DMA_CMA + +extern struct cma *dma_contiguous_default_area; + +static inline struct cma *dev_get_cma_area(struct device *dev) +{ + if (dev && dev->cma_area) + return dev->cma_area; + return dma_contiguous_default_area; +} + +static inline void dev_set_cma_area(struct device *dev, struct cma *cma) +{ + if (dev) + dev->cma_area = cma; +} + +static inline void dma_contiguous_set_default(struct cma *cma) +{ + dma_contiguous_default_area = cma; +} + +void dma_contiguous_reserve(phys_addr_t addr_limit); + +int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, + phys_addr_t limit, struct cma **res_cma, + bool fixed); + +/** + * dma_declare_contiguous() - reserve area for contiguous memory handling + * for particular device + * @dev: Pointer to device structure. + * @size: Size of the reserved memory. + * @base: Start address of the reserved memory (optional, 0 for any). + * @limit: End address of the reserved memory (optional, 0 for any). + * + * This function reserves memory for specified device. It should be + * called by board specific code when early allocator (memblock or bootmem) + * is still activate. + */ + +static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size, + phys_addr_t base, phys_addr_t limit) +{ + struct cma *cma; + int ret; + ret = dma_contiguous_reserve_area(size, base, limit, &cma, true); + if (ret == 0) + dev_set_cma_area(dev, cma); + + return ret; +} + +struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, + unsigned int order, bool no_warn); +bool dma_release_from_contiguous(struct device *dev, struct page *pages, + int count); + +#else + +static inline struct cma *dev_get_cma_area(struct device *dev) +{ + return NULL; +} + +static inline void dev_set_cma_area(struct device *dev, struct cma *cma) { } + +static inline void dma_contiguous_set_default(struct cma *cma) { } + +static inline void dma_contiguous_reserve(phys_addr_t limit) { } + +static inline int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, + phys_addr_t limit, struct cma **res_cma, + bool fixed) +{ + return -ENOSYS; +} + +static inline +int dma_declare_contiguous(struct device *dev, phys_addr_t size, + phys_addr_t base, phys_addr_t limit) +{ + return -ENOSYS; +} + +static inline +struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, + unsigned int order, bool no_warn) +{ + return NULL; +} + +static inline +bool dma_release_from_contiguous(struct device *dev, struct page *pages, + int count) +{ + return false; +} + +#endif + +#endif + +#endif diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h new file mode 100644 index 000000000..a785f2507 --- /dev/null +++ b/include/linux/dma-debug.h @@ -0,0 +1,207 @@ +/* + * Copyright (C) 2008 Advanced Micro Devices, Inc. + * + * Author: Joerg Roedel + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __DMA_DEBUG_H +#define __DMA_DEBUG_H + +#include + +struct device; +struct scatterlist; +struct bus_type; + +#ifdef CONFIG_DMA_API_DEBUG + +extern void dma_debug_add_bus(struct bus_type *bus); + +extern int dma_debug_resize_entries(u32 num_entries); + +extern void debug_dma_map_page(struct device *dev, struct page *page, + size_t offset, size_t size, + int direction, dma_addr_t dma_addr, + bool map_single); + +extern void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); + +extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, int direction, bool map_single); + +extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, int mapped_ents, int direction); + +extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, + int nelems, int dir); + +extern void debug_dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t dma_addr, void *virt); + +extern void debug_dma_free_coherent(struct device *dev, size_t size, + void *virt, dma_addr_t addr); + +extern void debug_dma_map_resource(struct device *dev, phys_addr_t addr, + size_t size, int direction, + dma_addr_t dma_addr); + +extern void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr, + size_t size, int direction); + +extern void debug_dma_sync_single_for_cpu(struct device *dev, + dma_addr_t dma_handle, size_t size, + int direction); + +extern void debug_dma_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, + size_t size, int direction); + +extern void debug_dma_sync_single_range_for_cpu(struct device *dev, + dma_addr_t dma_handle, + unsigned long offset, + size_t size, + int direction); + +extern void debug_dma_sync_single_range_for_device(struct device *dev, + dma_addr_t dma_handle, + unsigned long offset, + size_t size, int direction); + +extern void debug_dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sg, + int nelems, int direction); + +extern void debug_dma_sync_sg_for_device(struct device *dev, + struct scatterlist *sg, + int nelems, int direction); + +extern void debug_dma_dump_mappings(struct device *dev); + +extern void debug_dma_assert_idle(struct page *page); + +#else /* CONFIG_DMA_API_DEBUG */ + +static inline void dma_debug_add_bus(struct bus_type *bus) +{ +} + +static inline int dma_debug_resize_entries(u32 num_entries) +{ + return 0; +} + +static inline void debug_dma_map_page(struct device *dev, struct page *page, + size_t offset, size_t size, + int direction, dma_addr_t dma_addr, + bool map_single) +{ +} + +static inline void debug_dma_mapping_error(struct device *dev, + dma_addr_t dma_addr) +{ +} + +static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, int direction, + bool map_single) +{ +} + +static inline void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, int mapped_ents, int direction) +{ +} + +static inline void debug_dma_unmap_sg(struct device *dev, + struct scatterlist *sglist, + int nelems, int dir) +{ +} + +static inline void debug_dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t dma_addr, void *virt) +{ +} + +static inline void debug_dma_free_coherent(struct device *dev, size_t size, + void *virt, dma_addr_t addr) +{ +} + +static inline void debug_dma_map_resource(struct device *dev, phys_addr_t addr, + size_t size, int direction, + dma_addr_t dma_addr) +{ +} + +static inline void debug_dma_unmap_resource(struct device *dev, + dma_addr_t dma_addr, size_t size, + int direction) +{ +} + +static inline void debug_dma_sync_single_for_cpu(struct device *dev, + dma_addr_t dma_handle, + size_t size, int direction) +{ +} + +static inline void debug_dma_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, + size_t size, int direction) +{ +} + +static inline void debug_dma_sync_single_range_for_cpu(struct device *dev, + dma_addr_t dma_handle, + unsigned long offset, + size_t size, + int direction) +{ +} + +static inline void debug_dma_sync_single_range_for_device(struct device *dev, + dma_addr_t dma_handle, + unsigned long offset, + size_t size, + int direction) +{ +} + +static inline void debug_dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sg, + int nelems, int direction) +{ +} + +static inline void debug_dma_sync_sg_for_device(struct device *dev, + struct scatterlist *sg, + int nelems, int direction) +{ +} + +static inline void debug_dma_dump_mappings(struct device *dev) +{ +} + +static inline void debug_dma_assert_idle(struct page *page) +{ +} + +#endif /* CONFIG_DMA_API_DEBUG */ + +#endif /* __DMA_DEBUG_H */ diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h new file mode 100644 index 000000000..8d9f33feb --- /dev/null +++ b/include/linux/dma-direct.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_DMA_DIRECT_H +#define _LINUX_DMA_DIRECT_H 1 + +#include +#include + +#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA +#include +#else +static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr) +{ + dma_addr_t dev_addr = (dma_addr_t)paddr; + + return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT); +} + +static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr) +{ + phys_addr_t paddr = (phys_addr_t)dev_addr; + + return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT); +} + +static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) +{ + if (!dev->dma_mask) + return false; + + return addr + size - 1 <= *dev->dma_mask; +} +#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */ + +/* + * If memory encryption is supported, phys_to_dma will set the memory encryption + * bit in the DMA address, and dma_to_phys will clear it. The raw __phys_to_dma + * and __dma_to_phys versions should only be used on non-encrypted memory for + * special occasions like DMA coherent buffers. + */ +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) +{ + return __sme_set(__phys_to_dma(dev, paddr)); +} + +static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) +{ + return __sme_clr(__dma_to_phys(dev, daddr)); +} + +#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN +void dma_mark_clean(void *addr, size_t size); +#else +static inline void dma_mark_clean(void *addr, size_t size) +{ +} +#endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */ + +void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t gfp, unsigned long attrs); +void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_addr, unsigned long attrs); +dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir, + unsigned long attrs); +int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, + enum dma_data_direction dir, unsigned long attrs); +int dma_direct_supported(struct device *dev, u64 mask); +int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr); +#endif /* _LINUX_DMA_DIRECT_H */ diff --git a/include/linux/dma-direction.h b/include/linux/dma-direction.h new file mode 100644 index 000000000..9c96e30e6 --- /dev/null +++ b/include/linux/dma-direction.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_DMA_DIRECTION_H +#define _LINUX_DMA_DIRECTION_H + +enum dma_data_direction { + DMA_BIDIRECTIONAL = 0, + DMA_TO_DEVICE = 1, + DMA_FROM_DEVICE = 2, + DMA_NONE = 3, +}; + +#endif diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h new file mode 100644 index 000000000..bc8940ca2 --- /dev/null +++ b/include/linux/dma-fence-array.h @@ -0,0 +1,91 @@ +/* + * fence-array: aggregates fence to be waited together + * + * Copyright (C) 2016 Collabora Ltd + * Copyright (C) 2016 Advanced Micro Devices, Inc. + * Authors: + * Gustavo Padovan + * Christian König + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __LINUX_DMA_FENCE_ARRAY_H +#define __LINUX_DMA_FENCE_ARRAY_H + +#include +#include + +/** + * struct dma_fence_array_cb - callback helper for fence array + * @cb: fence callback structure for signaling + * @array: reference to the parent fence array object + */ +struct dma_fence_array_cb { + struct dma_fence_cb cb; + struct dma_fence_array *array; +}; + +/** + * struct dma_fence_array - fence to represent an array of fences + * @base: fence base class + * @lock: spinlock for fence handling + * @num_fences: number of fences in the array + * @num_pending: fences in the array still pending + * @fences: array of the fences + */ +struct dma_fence_array { + struct dma_fence base; + + spinlock_t lock; + unsigned num_fences; + atomic_t num_pending; + struct dma_fence **fences; + + struct irq_work work; +}; + +extern const struct dma_fence_ops dma_fence_array_ops; + +/** + * dma_fence_is_array - check if a fence is from the array subsclass + * @fence: fence to test + * + * Return true if it is a dma_fence_array and false otherwise. + */ +static inline bool dma_fence_is_array(struct dma_fence *fence) +{ + return fence->ops == &dma_fence_array_ops; +} + +/** + * to_dma_fence_array - cast a fence to a dma_fence_array + * @fence: fence to cast to a dma_fence_array + * + * Returns NULL if the fence is not a dma_fence_array, + * or the dma_fence_array otherwise. + */ +static inline struct dma_fence_array * +to_dma_fence_array(struct dma_fence *fence) +{ + if (fence->ops != &dma_fence_array_ops) + return NULL; + + return container_of(fence, struct dma_fence_array, base); +} + +struct dma_fence_array *dma_fence_array_create(int num_fences, + struct dma_fence **fences, + u64 context, unsigned seqno, + bool signal_on_any); + +bool dma_fence_match_context(struct dma_fence *fence, u64 context); + +#endif /* __LINUX_DMA_FENCE_ARRAY_H */ diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h new file mode 100644 index 000000000..02dba8cd0 --- /dev/null +++ b/include/linux/dma-fence.h @@ -0,0 +1,568 @@ +/* + * Fence mechanism for dma-buf to allow for asynchronous dma access + * + * Copyright (C) 2012 Canonical Ltd + * Copyright (C) 2012 Texas Instruments + * + * Authors: + * Rob Clark + * Maarten Lankhorst + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __LINUX_DMA_FENCE_H +#define __LINUX_DMA_FENCE_H + +#include +#include +#include +#include +#include +#include +#include +#include + +struct dma_fence; +struct dma_fence_ops; +struct dma_fence_cb; + +/** + * struct dma_fence - software synchronization primitive + * @refcount: refcount for this fence + * @ops: dma_fence_ops associated with this fence + * @rcu: used for releasing fence with kfree_rcu + * @cb_list: list of all callbacks to call + * @lock: spin_lock_irqsave used for locking + * @context: execution context this fence belongs to, returned by + * dma_fence_context_alloc() + * @seqno: the sequence number of this fence inside the execution context, + * can be compared to decide which fence would be signaled later. + * @flags: A mask of DMA_FENCE_FLAG_* defined below + * @timestamp: Timestamp when the fence was signaled. + * @error: Optional, only valid if < 0, must be set before calling + * dma_fence_signal, indicates that the fence has completed with an error. + * + * the flags member must be manipulated and read using the appropriate + * atomic ops (bit_*), so taking the spinlock will not be needed most + * of the time. + * + * DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled + * DMA_FENCE_FLAG_TIMESTAMP_BIT - timestamp recorded for fence signaling + * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called + * DMA_FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the + * implementer of the fence for its own purposes. Can be used in different + * ways by different fence implementers, so do not rely on this. + * + * Since atomic bitops are used, this is not guaranteed to be the case. + * Particularly, if the bit was set, but dma_fence_signal was called right + * before this bit was set, it would have been able to set the + * DMA_FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called. + * Adding a check for DMA_FENCE_FLAG_SIGNALED_BIT after setting + * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that + * after dma_fence_signal was called, any enable_signaling call will have either + * been completed, or never called at all. + */ +struct dma_fence { + struct kref refcount; + const struct dma_fence_ops *ops; + struct rcu_head rcu; + struct list_head cb_list; + spinlock_t *lock; + u64 context; + unsigned seqno; + unsigned long flags; + ktime_t timestamp; + int error; +}; + +enum dma_fence_flag_bits { + DMA_FENCE_FLAG_SIGNALED_BIT, + DMA_FENCE_FLAG_TIMESTAMP_BIT, + DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + DMA_FENCE_FLAG_USER_BITS, /* must always be last member */ +}; + +typedef void (*dma_fence_func_t)(struct dma_fence *fence, + struct dma_fence_cb *cb); + +/** + * struct dma_fence_cb - callback for dma_fence_add_callback() + * @node: used by dma_fence_add_callback() to append this struct to fence::cb_list + * @func: dma_fence_func_t to call + * + * This struct will be initialized by dma_fence_add_callback(), additional + * data can be passed along by embedding dma_fence_cb in another struct. + */ +struct dma_fence_cb { + struct list_head node; + dma_fence_func_t func; +}; + +/** + * struct dma_fence_ops - operations implemented for fence + * + */ +struct dma_fence_ops { + /** + * @get_driver_name: + * + * Returns the driver name. This is a callback to allow drivers to + * compute the name at runtime, without having it to store permanently + * for each fence, or build a cache of some sort. + * + * This callback is mandatory. + */ + const char * (*get_driver_name)(struct dma_fence *fence); + + /** + * @get_timeline_name: + * + * Return the name of the context this fence belongs to. This is a + * callback to allow drivers to compute the name at runtime, without + * having it to store permanently for each fence, or build a cache of + * some sort. + * + * This callback is mandatory. + */ + const char * (*get_timeline_name)(struct dma_fence *fence); + + /** + * @enable_signaling: + * + * Enable software signaling of fence. + * + * For fence implementations that have the capability for hw->hw + * signaling, they can implement this op to enable the necessary + * interrupts, or insert commands into cmdstream, etc, to avoid these + * costly operations for the common case where only hw->hw + * synchronization is required. This is called in the first + * dma_fence_wait() or dma_fence_add_callback() path to let the fence + * implementation know that there is another driver waiting on the + * signal (ie. hw->sw case). + * + * This function can be called from atomic context, but not + * from irq context, so normal spinlocks can be used. + * + * A return value of false indicates the fence already passed, + * or some failure occurred that made it impossible to enable + * signaling. True indicates successful enabling. + * + * &dma_fence.error may be set in enable_signaling, but only when false + * is returned. + * + * Since many implementations can call dma_fence_signal() even when before + * @enable_signaling has been called there's a race window, where the + * dma_fence_signal() might result in the final fence reference being + * released and its memory freed. To avoid this, implementations of this + * callback should grab their own reference using dma_fence_get(), to be + * released when the fence is signalled (through e.g. the interrupt + * handler). + * + * This callback is optional. If this callback is not present, then the + * driver must always have signaling enabled. + */ + bool (*enable_signaling)(struct dma_fence *fence); + + /** + * @signaled: + * + * Peek whether the fence is signaled, as a fastpath optimization for + * e.g. dma_fence_wait() or dma_fence_add_callback(). Note that this + * callback does not need to make any guarantees beyond that a fence + * once indicates as signalled must always return true from this + * callback. This callback may return false even if the fence has + * completed already, in this case information hasn't propogated throug + * the system yet. See also dma_fence_is_signaled(). + * + * May set &dma_fence.error if returning true. + * + * This callback is optional. + */ + bool (*signaled)(struct dma_fence *fence); + + /** + * @wait: + * + * Custom wait implementation, defaults to dma_fence_default_wait() if + * not set. + * + * The dma_fence_default_wait implementation should work for any fence, as long + * as @enable_signaling works correctly. This hook allows drivers to + * have an optimized version for the case where a process context is + * already available, e.g. if @enable_signaling for the general case + * needs to set up a worker thread. + * + * Must return -ERESTARTSYS if the wait is intr = true and the wait was + * interrupted, and remaining jiffies if fence has signaled, or 0 if wait + * timed out. Can also return other error values on custom implementations, + * which should be treated as if the fence is signaled. For example a hardware + * lockup could be reported like that. + * + * This callback is optional. + */ + signed long (*wait)(struct dma_fence *fence, + bool intr, signed long timeout); + + /** + * @release: + * + * Called on destruction of fence to release additional resources. + * Can be called from irq context. This callback is optional. If it is + * NULL, then dma_fence_free() is instead called as the default + * implementation. + */ + void (*release)(struct dma_fence *fence); + + /** + * @fence_value_str: + * + * Callback to fill in free-form debug info specific to this fence, like + * the sequence number. + * + * This callback is optional. + */ + void (*fence_value_str)(struct dma_fence *fence, char *str, int size); + + /** + * @timeline_value_str: + * + * Fills in the current value of the timeline as a string, like the + * sequence number. Note that the specific fence passed to this function + * should not matter, drivers should only use it to look up the + * corresponding timeline structures. + */ + void (*timeline_value_str)(struct dma_fence *fence, + char *str, int size); +}; + +void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, + spinlock_t *lock, u64 context, unsigned seqno); + +void dma_fence_release(struct kref *kref); +void dma_fence_free(struct dma_fence *fence); + +/** + * dma_fence_put - decreases refcount of the fence + * @fence: fence to reduce refcount of + */ +static inline void dma_fence_put(struct dma_fence *fence) +{ + if (fence) + kref_put(&fence->refcount, dma_fence_release); +} + +/** + * dma_fence_get - increases refcount of the fence + * @fence: fence to increase refcount of + * + * Returns the same fence, with refcount increased by 1. + */ +static inline struct dma_fence *dma_fence_get(struct dma_fence *fence) +{ + if (fence) + kref_get(&fence->refcount); + return fence; +} + +/** + * dma_fence_get_rcu - get a fence from a reservation_object_list with + * rcu read lock + * @fence: fence to increase refcount of + * + * Function returns NULL if no refcount could be obtained, or the fence. + */ +static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence) +{ + if (kref_get_unless_zero(&fence->refcount)) + return fence; + else + return NULL; +} + +/** + * dma_fence_get_rcu_safe - acquire a reference to an RCU tracked fence + * @fencep: pointer to fence to increase refcount of + * + * Function returns NULL if no refcount could be obtained, or the fence. + * This function handles acquiring a reference to a fence that may be + * reallocated within the RCU grace period (such as with SLAB_TYPESAFE_BY_RCU), + * so long as the caller is using RCU on the pointer to the fence. + * + * An alternative mechanism is to employ a seqlock to protect a bunch of + * fences, such as used by struct reservation_object. When using a seqlock, + * the seqlock must be taken before and checked after a reference to the + * fence is acquired (as shown here). + * + * The caller is required to hold the RCU read lock. + */ +static inline struct dma_fence * +dma_fence_get_rcu_safe(struct dma_fence __rcu **fencep) +{ + do { + struct dma_fence *fence; + + fence = rcu_dereference(*fencep); + if (!fence) + return NULL; + + if (!dma_fence_get_rcu(fence)) + continue; + + /* The atomic_inc_not_zero() inside dma_fence_get_rcu() + * provides a full memory barrier upon success (such as now). + * This is paired with the write barrier from assigning + * to the __rcu protected fence pointer so that if that + * pointer still matches the current fence, we know we + * have successfully acquire a reference to it. If it no + * longer matches, we are holding a reference to some other + * reallocated pointer. This is possible if the allocator + * is using a freelist like SLAB_TYPESAFE_BY_RCU where the + * fence remains valid for the RCU grace period, but it + * may be reallocated. When using such allocators, we are + * responsible for ensuring the reference we get is to + * the right fence, as below. + */ + if (fence == rcu_access_pointer(*fencep)) + return rcu_pointer_handoff(fence); + + dma_fence_put(fence); + } while (1); +} + +int dma_fence_signal(struct dma_fence *fence); +int dma_fence_signal_locked(struct dma_fence *fence); +signed long dma_fence_default_wait(struct dma_fence *fence, + bool intr, signed long timeout); +int dma_fence_add_callback(struct dma_fence *fence, + struct dma_fence_cb *cb, + dma_fence_func_t func); +bool dma_fence_remove_callback(struct dma_fence *fence, + struct dma_fence_cb *cb); +void dma_fence_enable_sw_signaling(struct dma_fence *fence); + +/** + * dma_fence_is_signaled_locked - Return an indication if the fence + * is signaled yet. + * @fence: the fence to check + * + * Returns true if the fence was already signaled, false if not. Since this + * function doesn't enable signaling, it is not guaranteed to ever return + * true if dma_fence_add_callback(), dma_fence_wait() or + * dma_fence_enable_sw_signaling() haven't been called before. + * + * This function requires &dma_fence.lock to be held. + * + * See also dma_fence_is_signaled(). + */ +static inline bool +dma_fence_is_signaled_locked(struct dma_fence *fence) +{ + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return true; + + if (fence->ops->signaled && fence->ops->signaled(fence)) { + dma_fence_signal_locked(fence); + return true; + } + + return false; +} + +/** + * dma_fence_is_signaled - Return an indication if the fence is signaled yet. + * @fence: the fence to check + * + * Returns true if the fence was already signaled, false if not. Since this + * function doesn't enable signaling, it is not guaranteed to ever return + * true if dma_fence_add_callback(), dma_fence_wait() or + * dma_fence_enable_sw_signaling() haven't been called before. + * + * It's recommended for seqno fences to call dma_fence_signal when the + * operation is complete, it makes it possible to prevent issues from + * wraparound between time of issue and time of use by checking the return + * value of this function before calling hardware-specific wait instructions. + * + * See also dma_fence_is_signaled_locked(). + */ +static inline bool +dma_fence_is_signaled(struct dma_fence *fence) +{ + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return true; + + if (fence->ops->signaled && fence->ops->signaled(fence)) { + dma_fence_signal(fence); + return true; + } + + return false; +} + +/** + * __dma_fence_is_later - return if f1 is chronologically later than f2 + * @f1: the first fence's seqno + * @f2: the second fence's seqno from the same context + * + * Returns true if f1 is chronologically later than f2. Both fences must be + * from the same context, since a seqno is not common across contexts. + */ +static inline bool __dma_fence_is_later(u32 f1, u32 f2) +{ + return (int)(f1 - f2) > 0; +} + +/** + * dma_fence_is_later - return if f1 is chronologically later than f2 + * @f1: the first fence from the same context + * @f2: the second fence from the same context + * + * Returns true if f1 is chronologically later than f2. Both fences must be + * from the same context, since a seqno is not re-used across contexts. + */ +static inline bool dma_fence_is_later(struct dma_fence *f1, + struct dma_fence *f2) +{ + if (WARN_ON(f1->context != f2->context)) + return false; + + return __dma_fence_is_later(f1->seqno, f2->seqno); +} + +/** + * dma_fence_later - return the chronologically later fence + * @f1: the first fence from the same context + * @f2: the second fence from the same context + * + * Returns NULL if both fences are signaled, otherwise the fence that would be + * signaled last. Both fences must be from the same context, since a seqno is + * not re-used across contexts. + */ +static inline struct dma_fence *dma_fence_later(struct dma_fence *f1, + struct dma_fence *f2) +{ + if (WARN_ON(f1->context != f2->context)) + return NULL; + + /* + * Can't check just DMA_FENCE_FLAG_SIGNALED_BIT here, it may never + * have been set if enable_signaling wasn't called, and enabling that + * here is overkill. + */ + if (dma_fence_is_later(f1, f2)) + return dma_fence_is_signaled(f1) ? NULL : f1; + else + return dma_fence_is_signaled(f2) ? NULL : f2; +} + +/** + * dma_fence_get_status_locked - returns the status upon completion + * @fence: the dma_fence to query + * + * Drivers can supply an optional error status condition before they signal + * the fence (to indicate whether the fence was completed due to an error + * rather than success). The value of the status condition is only valid + * if the fence has been signaled, dma_fence_get_status_locked() first checks + * the signal state before reporting the error status. + * + * Returns 0 if the fence has not yet been signaled, 1 if the fence has + * been signaled without an error condition, or a negative error code + * if the fence has been completed in err. + */ +static inline int dma_fence_get_status_locked(struct dma_fence *fence) +{ + if (dma_fence_is_signaled_locked(fence)) + return fence->error ?: 1; + else + return 0; +} + +int dma_fence_get_status(struct dma_fence *fence); + +/** + * dma_fence_set_error - flag an error condition on the fence + * @fence: the dma_fence + * @error: the error to store + * + * Drivers can supply an optional error status condition before they signal + * the fence, to indicate that the fence was completed due to an error + * rather than success. This must be set before signaling (so that the value + * is visible before any waiters on the signal callback are woken). This + * helper exists to help catching erroneous setting of #dma_fence.error. + */ +static inline void dma_fence_set_error(struct dma_fence *fence, + int error) +{ + WARN_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)); + WARN_ON(error >= 0 || error < -MAX_ERRNO); + + fence->error = error; +} + +signed long dma_fence_wait_timeout(struct dma_fence *, + bool intr, signed long timeout); +signed long dma_fence_wait_any_timeout(struct dma_fence **fences, + uint32_t count, + bool intr, signed long timeout, + uint32_t *idx); + +/** + * dma_fence_wait - sleep until the fence gets signaled + * @fence: the fence to wait on + * @intr: if true, do an interruptible wait + * + * This function will return -ERESTARTSYS if interrupted by a signal, + * or 0 if the fence was signaled. Other error values may be + * returned on custom implementations. + * + * Performs a synchronous wait on this fence. It is assumed the caller + * directly or indirectly holds a reference to the fence, otherwise the + * fence might be freed before return, resulting in undefined behavior. + * + * See also dma_fence_wait_timeout() and dma_fence_wait_any_timeout(). + */ +static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr) +{ + signed long ret; + + /* Since dma_fence_wait_timeout cannot timeout with + * MAX_SCHEDULE_TIMEOUT, only valid return values are + * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT. + */ + ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); + + return ret < 0 ? ret : 0; +} + +u64 dma_fence_context_alloc(unsigned num); + +#define DMA_FENCE_TRACE(f, fmt, args...) \ + do { \ + struct dma_fence *__ff = (f); \ + if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \ + pr_info("f %llu#%u: " fmt, \ + __ff->context, __ff->seqno, ##args); \ + } while (0) + +#define DMA_FENCE_WARN(f, fmt, args...) \ + do { \ + struct dma_fence *__ff = (f); \ + pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ + ##args); \ + } while (0) + +#define DMA_FENCE_ERR(f, fmt, args...) \ + do { \ + struct dma_fence *__ff = (f); \ + pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ + ##args); \ + } while (0) + +#endif /* __LINUX_DMA_FENCE_H */ diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h new file mode 100644 index 000000000..e8ca5e654 --- /dev/null +++ b/include/linux/dma-iommu.h @@ -0,0 +1,113 @@ +/* + * Copyright (C) 2014-2015 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef __DMA_IOMMU_H +#define __DMA_IOMMU_H + +#ifdef __KERNEL__ +#include +#include + +#ifdef CONFIG_IOMMU_DMA +#include +#include +#include + +int iommu_dma_init(void); + +/* Domain management interface for IOMMU drivers */ +int iommu_get_dma_cookie(struct iommu_domain *domain); +int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base); +void iommu_put_dma_cookie(struct iommu_domain *domain); + +/* Setup call for arch DMA mapping code */ +int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, + u64 size, struct device *dev); + +/* General helpers for DMA-API <-> IOMMU-API interaction */ +int dma_info_to_prot(enum dma_data_direction dir, bool coherent, + unsigned long attrs); + +/* + * These implement the bulk of the relevant DMA mapping callbacks, but require + * the arch code to take care of attributes and cache maintenance + */ +struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, + unsigned long attrs, int prot, dma_addr_t *handle, + void (*flush_page)(struct device *, const void *, phys_addr_t)); +void iommu_dma_free(struct device *dev, struct page **pages, size_t size, + dma_addr_t *handle); + +int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma); + +dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, int prot); +int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, int prot); + +/* + * Arch code with no special attribute handling may use these + * directly as DMA mapping callbacks for simplicity + */ +void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, + enum dma_data_direction dir, unsigned long attrs); +void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, unsigned long attrs); +dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, + size_t size, enum dma_data_direction dir, unsigned long attrs); +void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, + size_t size, enum dma_data_direction dir, unsigned long attrs); +int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); + +/* The DMA API isn't _quite_ the whole story, though... */ +void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg); +void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list); + +#else + +struct iommu_domain; +struct msi_msg; +struct device; + +static inline int iommu_dma_init(void) +{ + return 0; +} + +static inline int iommu_get_dma_cookie(struct iommu_domain *domain) +{ + return -ENODEV; +} + +static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) +{ + return -ENODEV; +} + +static inline void iommu_put_dma_cookie(struct iommu_domain *domain) +{ +} + +static inline void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg) +{ +} + +static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) +{ +} + +#endif /* CONFIG_IOMMU_DMA */ +#endif /* __KERNEL__ */ +#endif /* __DMA_IOMMU_H */ diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h new file mode 100644 index 000000000..669cde2fa --- /dev/null +++ b/include/linux/dma-mapping.h @@ -0,0 +1,853 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_DMA_MAPPING_H +#define _LINUX_DMA_MAPPING_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * List of possible attributes associated with a DMA mapping. The semantics + * of each attribute should be defined in Documentation/DMA-attributes.txt. + * + * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute + * forces all pending DMA writes to complete. + */ +#define DMA_ATTR_WRITE_BARRIER (1UL << 0) +/* + * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping + * may be weakly ordered, that is that reads and writes may pass each other. + */ +#define DMA_ATTR_WEAK_ORDERING (1UL << 1) +/* + * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be + * buffered to improve performance. + */ +#define DMA_ATTR_WRITE_COMBINE (1UL << 2) +/* + * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either + * consistent or non-consistent memory as it sees fit. + */ +#define DMA_ATTR_NON_CONSISTENT (1UL << 3) +/* + * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel + * virtual mapping for the allocated buffer. + */ +#define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4) +/* + * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of + * the CPU cache for the given buffer assuming that it has been already + * transferred to 'device' domain. + */ +#define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5) +/* + * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer + * in physical memory. + */ +#define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6) +/* + * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem + * that it's probably not worth the time to try to allocate memory to in a way + * that gives better TLB efficiency. + */ +#define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7) +/* + * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress + * allocation failure reports (similarly to __GFP_NOWARN). + */ +#define DMA_ATTR_NO_WARN (1UL << 8) + +/* + * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully + * accessible at an elevated privilege level (and ideally inaccessible or + * at least read-only at lesser-privileged levels). + */ +#define DMA_ATTR_PRIVILEGED (1UL << 9) + +/* + * A dma_addr_t can hold any valid DMA or bus address for the platform. + * It can be given to a device to use as a DMA source or target. A CPU cannot + * reference a dma_addr_t directly because there may be translation between + * its physical address space and the bus address space. + */ +struct dma_map_ops { + void* (*alloc)(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + unsigned long attrs); + void (*free)(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle, + unsigned long attrs); + int (*mmap)(struct device *, struct vm_area_struct *, + void *, dma_addr_t, size_t, + unsigned long attrs); + + int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *, + dma_addr_t, size_t, unsigned long attrs); + + dma_addr_t (*map_page)(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs); + void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir, + unsigned long attrs); + /* + * map_sg returns 0 on error and a value > 0 on success. + * It should never return a value < 0. + */ + int (*map_sg)(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + unsigned long attrs); + void (*unmap_sg)(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir, + unsigned long attrs); + dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr, + size_t size, enum dma_data_direction dir, + unsigned long attrs); + void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir, + unsigned long attrs); + void (*sync_single_for_cpu)(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction dir); + void (*sync_single_for_device)(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction dir); + void (*sync_sg_for_cpu)(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir); + void (*sync_sg_for_device)(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir); + void (*cache_sync)(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction direction); + int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); + int (*dma_supported)(struct device *dev, u64 mask); +#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK + u64 (*get_required_mask)(struct device *dev); +#endif +}; + +extern const struct dma_map_ops dma_direct_ops; +extern const struct dma_map_ops dma_noncoherent_ops; +extern const struct dma_map_ops dma_virt_ops; + +#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) + +#define DMA_MASK_NONE 0x0ULL + +static inline int valid_dma_direction(int dma_direction) +{ + return ((dma_direction == DMA_BIDIRECTIONAL) || + (dma_direction == DMA_TO_DEVICE) || + (dma_direction == DMA_FROM_DEVICE)); +} + +static inline int is_device_dma_capable(struct device *dev) +{ + return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; +} + +#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT +/* + * These three functions are only for dma allocator. + * Don't use them in device drivers. + */ +int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, + dma_addr_t *dma_handle, void **ret); +int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr); + +int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, size_t size, int *ret); + +void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle); +int dma_release_from_global_coherent(int order, void *vaddr); +int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, + size_t size, int *ret); + +#else +#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0) +#define dma_release_from_dev_coherent(dev, order, vaddr) (0) +#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) + +static inline void *dma_alloc_from_global_coherent(ssize_t size, + dma_addr_t *dma_handle) +{ + return NULL; +} + +static inline int dma_release_from_global_coherent(int order, void *vaddr) +{ + return 0; +} + +static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, + void *cpu_addr, size_t size, + int *ret) +{ + return 0; +} +#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ + +#ifdef CONFIG_HAS_DMA +#include +static inline const struct dma_map_ops *get_dma_ops(struct device *dev) +{ + if (dev && dev->dma_ops) + return dev->dma_ops; + return get_arch_dma_ops(dev ? dev->bus : NULL); +} + +static inline void set_dma_ops(struct device *dev, + const struct dma_map_ops *dma_ops) +{ + dev->dma_ops = dma_ops; +} +#else +/* + * Define the dma api to allow compilation of dma dependent code. + * Code that depends on the dma-mapping API needs to set 'depends on HAS_DMA' + * in its Kconfig, unless it already depends on || COMPILE_TEST, + * where guarantuees the availability of the dma-mapping API. + */ +static inline const struct dma_map_ops *get_dma_ops(struct device *dev) +{ + return NULL; +} +#endif + +static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, + size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + dma_addr_t addr; + + BUG_ON(!valid_dma_direction(dir)); + addr = ops->map_page(dev, virt_to_page(ptr), + offset_in_page(ptr), size, + dir, attrs); + debug_dma_map_page(dev, virt_to_page(ptr), + offset_in_page(ptr), size, + dir, addr, true); + return addr; +} + +static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, + size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->unmap_page) + ops->unmap_page(dev, addr, size, dir, attrs); + debug_dma_unmap_page(dev, addr, size, dir, true); +} + +/* + * dma_maps_sg_attrs returns 0 on error and > 0 on success. + * It should never return a value < 0. + */ +static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + unsigned long attrs) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + int ents; + + BUG_ON(!valid_dma_direction(dir)); + ents = ops->map_sg(dev, sg, nents, dir, attrs); + BUG_ON(ents < 0); + debug_dma_map_sg(dev, sg, nents, ents, dir); + + return ents; +} + +static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + unsigned long attrs) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + debug_dma_unmap_sg(dev, sg, nents, dir); + if (ops->unmap_sg) + ops->unmap_sg(dev, sg, nents, dir, attrs); +} + +static inline dma_addr_t dma_map_page_attrs(struct device *dev, + struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + dma_addr_t addr; + + BUG_ON(!valid_dma_direction(dir)); + addr = ops->map_page(dev, page, offset, size, dir, attrs); + debug_dma_map_page(dev, page, offset, size, dir, addr, false); + + return addr; +} + +static inline void dma_unmap_page_attrs(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->unmap_page) + ops->unmap_page(dev, addr, size, dir, attrs); + debug_dma_unmap_page(dev, addr, size, dir, false); +} + +static inline dma_addr_t dma_map_resource(struct device *dev, + phys_addr_t phys_addr, + size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + dma_addr_t addr; + + BUG_ON(!valid_dma_direction(dir)); + + /* Don't allow RAM to be mapped */ + BUG_ON(pfn_valid(PHYS_PFN(phys_addr))); + + addr = phys_addr; + if (ops->map_resource) + addr = ops->map_resource(dev, phys_addr, size, dir, attrs); + + debug_dma_map_resource(dev, phys_addr, size, dir, addr); + + return addr; +} + +static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->unmap_resource) + ops->unmap_resource(dev, addr, size, dir, attrs); + debug_dma_unmap_resource(dev, addr, size, dir); +} + +static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, + size_t size, + enum dma_data_direction dir) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_single_for_cpu) + ops->sync_single_for_cpu(dev, addr, size, dir); + debug_dma_sync_single_for_cpu(dev, addr, size, dir); +} + +static inline void dma_sync_single_for_device(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_single_for_device) + ops->sync_single_for_device(dev, addr, size, dir); + debug_dma_sync_single_for_device(dev, addr, size, dir); +} + +static inline void dma_sync_single_range_for_cpu(struct device *dev, + dma_addr_t addr, + unsigned long offset, + size_t size, + enum dma_data_direction dir) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_single_for_cpu) + ops->sync_single_for_cpu(dev, addr + offset, size, dir); + debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); +} + +static inline void dma_sync_single_range_for_device(struct device *dev, + dma_addr_t addr, + unsigned long offset, + size_t size, + enum dma_data_direction dir) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_single_for_device) + ops->sync_single_for_device(dev, addr + offset, size, dir); + debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); +} + +static inline void +dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_sg_for_cpu) + ops->sync_sg_for_cpu(dev, sg, nelems, dir); + debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); +} + +static inline void +dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_sg_for_device) + ops->sync_sg_for_device(dev, sg, nelems, dir); + debug_dma_sync_sg_for_device(dev, sg, nelems, dir); + +} + +#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) +#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) +#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) +#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) +#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0) +#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0) + +static inline void +dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction dir) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->cache_sync) + ops->cache_sync(dev, vaddr, size, dir); +} + +extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size); + +void *dma_common_contiguous_remap(struct page *page, size_t size, + unsigned long vm_flags, + pgprot_t prot, const void *caller); + +void *dma_common_pages_remap(struct page **pages, size_t size, + unsigned long vm_flags, pgprot_t prot, + const void *caller); +void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); + +/** + * dma_mmap_attrs - map a coherent DMA allocation into user space + * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices + * @vma: vm_area_struct describing requested user mapping + * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs + * @handle: device-view address returned from dma_alloc_attrs + * @size: size of memory originally requested in dma_alloc_attrs + * @attrs: attributes of mapping properties requested in dma_alloc_attrs + * + * Map a coherent DMA buffer previously allocated by dma_alloc_attrs + * into user space. The coherent DMA buffer must not be freed by the + * driver until the user space mapping has been released. + */ +static inline int +dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, + dma_addr_t dma_addr, size_t size, unsigned long attrs) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + BUG_ON(!ops); + if (ops->mmap) + return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); + return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); +} + +#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) + +int +dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t dma_addr, size_t size); + +static inline int +dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, + dma_addr_t dma_addr, size_t size, + unsigned long attrs) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + BUG_ON(!ops); + if (ops->get_sgtable) + return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, + attrs); + return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); +} + +#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) + +#ifndef arch_dma_alloc_attrs +#define arch_dma_alloc_attrs(dev) (true) +#endif + +static inline void *dma_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag, + unsigned long attrs) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + void *cpu_addr; + + BUG_ON(!ops); + WARN_ON_ONCE(dev && !dev->coherent_dma_mask); + + if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) + return cpu_addr; + + /* let the implementation decide on the zone to allocate from: */ + flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); + + if (!arch_dma_alloc_attrs(&dev)) + return NULL; + if (!ops->alloc) + return NULL; + + cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); + debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); + return cpu_addr; +} + +static inline void dma_free_attrs(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle, + unsigned long attrs) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!ops); + + if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr)) + return; + /* + * On non-coherent platforms which implement DMA-coherent buffers via + * non-cacheable remaps, ops->free() may call vunmap(). Thus getting + * this far in IRQ context is a) at risk of a BUG_ON() or trying to + * sleep on some machines, and b) an indication that the driver is + * probably misusing the coherent API anyway. + */ + WARN_ON(irqs_disabled()); + + if (!ops->free || !cpu_addr) + return; + + debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); + ops->free(dev, size, cpu_addr, dma_handle, attrs); +} + +static inline void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + return dma_alloc_attrs(dev, size, dma_handle, flag, 0); +} + +static inline void dma_free_coherent(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle) +{ + return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0); +} + +static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + debug_dma_mapping_error(dev, dma_addr); + if (ops->mapping_error) + return ops->mapping_error(dev, dma_addr); + return 0; +} + +static inline void dma_check_mask(struct device *dev, u64 mask) +{ + if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1))) + dev_warn(dev, "SME is active, device will require DMA bounce buffers\n"); +} + +static inline int dma_supported(struct device *dev, u64 mask) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + if (!ops) + return 0; + if (!ops->dma_supported) + return 1; + return ops->dma_supported(dev, mask); +} + +#ifndef HAVE_ARCH_DMA_SET_MASK +static inline int dma_set_mask(struct device *dev, u64 mask) +{ + if (!dev->dma_mask || !dma_supported(dev, mask)) + return -EIO; + + dma_check_mask(dev, mask); + + *dev->dma_mask = mask; + return 0; +} +#endif + +static inline u64 dma_get_mask(struct device *dev) +{ + if (dev && dev->dma_mask && *dev->dma_mask) + return *dev->dma_mask; + return DMA_BIT_MASK(32); +} + +#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK +int dma_set_coherent_mask(struct device *dev, u64 mask); +#else +static inline int dma_set_coherent_mask(struct device *dev, u64 mask) +{ + if (!dma_supported(dev, mask)) + return -EIO; + + dma_check_mask(dev, mask); + + dev->coherent_dma_mask = mask; + return 0; +} +#endif + +/* + * Set both the DMA mask and the coherent DMA mask to the same thing. + * Note that we don't check the return value from dma_set_coherent_mask() + * as the DMA API guarantees that the coherent DMA mask can be set to + * the same or smaller than the streaming DMA mask. + */ +static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) +{ + int rc = dma_set_mask(dev, mask); + if (rc == 0) + dma_set_coherent_mask(dev, mask); + return rc; +} + +/* + * Similar to the above, except it deals with the case where the device + * does not have dev->dma_mask appropriately setup. + */ +static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) +{ + dev->dma_mask = &dev->coherent_dma_mask; + return dma_set_mask_and_coherent(dev, mask); +} + +extern u64 dma_get_required_mask(struct device *dev); + +#ifndef arch_setup_dma_ops +static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, + u64 size, const struct iommu_ops *iommu, + bool coherent) { } +#endif + +#ifndef arch_teardown_dma_ops +static inline void arch_teardown_dma_ops(struct device *dev) { } +#endif + +static inline unsigned int dma_get_max_seg_size(struct device *dev) +{ + if (dev->dma_parms && dev->dma_parms->max_segment_size) + return dev->dma_parms->max_segment_size; + return SZ_64K; +} + +static inline int dma_set_max_seg_size(struct device *dev, unsigned int size) +{ + if (dev->dma_parms) { + dev->dma_parms->max_segment_size = size; + return 0; + } + return -EIO; +} + +static inline unsigned long dma_get_seg_boundary(struct device *dev) +{ + if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) + return dev->dma_parms->segment_boundary_mask; + return DMA_BIT_MASK(32); +} + +static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) +{ + if (dev->dma_parms) { + dev->dma_parms->segment_boundary_mask = mask; + return 0; + } + return -EIO; +} + +#ifndef dma_max_pfn +static inline unsigned long dma_max_pfn(struct device *dev) +{ + return (*dev->dma_mask >> PAGE_SHIFT) + dev->dma_pfn_offset; +} +#endif + +static inline void *dma_zalloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + void *ret = dma_alloc_coherent(dev, size, dma_handle, + flag | __GFP_ZERO); + return ret; +} + +static inline int dma_get_cache_alignment(void) +{ +#ifdef ARCH_DMA_MINALIGN + return ARCH_DMA_MINALIGN; +#endif + return 1; +} + +/* flags for the coherent memory api */ +#define DMA_MEMORY_EXCLUSIVE 0x01 + +#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT +int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, + dma_addr_t device_addr, size_t size, int flags); +void dma_release_declared_memory(struct device *dev); +void *dma_mark_declared_memory_occupied(struct device *dev, + dma_addr_t device_addr, size_t size); +#else +static inline int +dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, + dma_addr_t device_addr, size_t size, int flags) +{ + return -ENOSYS; +} + +static inline void +dma_release_declared_memory(struct device *dev) +{ +} + +static inline void * +dma_mark_declared_memory_occupied(struct device *dev, + dma_addr_t device_addr, size_t size) +{ + return ERR_PTR(-EBUSY); +} +#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ + +#ifdef CONFIG_HAS_DMA +int dma_configure(struct device *dev); +void dma_deconfigure(struct device *dev); +#else +static inline int dma_configure(struct device *dev) +{ + return 0; +} + +static inline void dma_deconfigure(struct device *dev) {} +#endif + +/* + * Managed DMA API + */ +#ifdef CONFIG_HAS_DMA +extern void *dmam_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp); +extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle); +#else /* !CONFIG_HAS_DMA */ +static inline void *dmam_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp) +{ return NULL; } +static inline void dmam_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle) { } +#endif /* !CONFIG_HAS_DMA */ + +extern void *dmam_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + unsigned long attrs); +#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT +extern int dmam_declare_coherent_memory(struct device *dev, + phys_addr_t phys_addr, + dma_addr_t device_addr, size_t size, + int flags); +extern void dmam_release_declared_memory(struct device *dev); +#else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ +static inline int dmam_declare_coherent_memory(struct device *dev, + phys_addr_t phys_addr, dma_addr_t device_addr, + size_t size, gfp_t gfp) +{ + return 0; +} + +static inline void dmam_release_declared_memory(struct device *dev) +{ +} +#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ + +static inline void *dma_alloc_wc(struct device *dev, size_t size, + dma_addr_t *dma_addr, gfp_t gfp) +{ + return dma_alloc_attrs(dev, size, dma_addr, gfp, + DMA_ATTR_WRITE_COMBINE); +} +#ifndef dma_alloc_writecombine +#define dma_alloc_writecombine dma_alloc_wc +#endif + +static inline void dma_free_wc(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_addr) +{ + return dma_free_attrs(dev, size, cpu_addr, dma_addr, + DMA_ATTR_WRITE_COMBINE); +} +#ifndef dma_free_writecombine +#define dma_free_writecombine dma_free_wc +#endif + +static inline int dma_mmap_wc(struct device *dev, + struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, + size_t size) +{ + return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, + DMA_ATTR_WRITE_COMBINE); +} +#ifndef dma_mmap_writecombine +#define dma_mmap_writecombine dma_mmap_wc +#endif + +#ifdef CONFIG_NEED_DMA_MAP_STATE +#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME +#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME +#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) +#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) +#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) +#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) +#else +#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) +#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) +#define dma_unmap_addr(PTR, ADDR_NAME) (0) +#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) +#define dma_unmap_len(PTR, LEN_NAME) (0) +#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) +#endif + +#endif diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h new file mode 100644 index 000000000..a0aa00cc9 --- /dev/null +++ b/include/linux/dma-noncoherent.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_DMA_NONCOHERENT_H +#define _LINUX_DMA_NONCOHERENT_H 1 + +#include + +void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t gfp, unsigned long attrs); +void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_addr, unsigned long attrs); + +#ifdef CONFIG_DMA_NONCOHERENT_MMAP +int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs); +#else +#define arch_dma_mmap NULL +#endif /* CONFIG_DMA_NONCOHERENT_MMAP */ + +#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC +void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction direction); +#else +#define arch_dma_cache_sync NULL +#endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */ + +#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE +void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir); +#else +static inline void arch_sync_dma_for_device(struct device *dev, + phys_addr_t paddr, size_t size, enum dma_data_direction dir) +{ +} +#endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */ + +#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU +void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir); +#else +static inline void arch_sync_dma_for_cpu(struct device *dev, + phys_addr_t paddr, size_t size, enum dma_data_direction dir) +{ +} +#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */ + +#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL +void arch_sync_dma_for_cpu_all(struct device *dev); +#else +static inline void arch_sync_dma_for_cpu_all(struct device *dev) +{ +} +#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */ + +#endif /* _LINUX_DMA_NONCOHERENT_H */ diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h new file mode 100644 index 000000000..e166cac8e --- /dev/null +++ b/include/linux/dma/dw.h @@ -0,0 +1,53 @@ +/* + * Driver for the Synopsys DesignWare DMA Controller + * + * Copyright (C) 2007 Atmel Corporation + * Copyright (C) 2010-2011 ST Microelectronics + * Copyright (C) 2014 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _DMA_DW_H +#define _DMA_DW_H + +#include +#include +#include + +#include + +struct dw_dma; + +/** + * struct dw_dma_chip - representation of DesignWare DMA controller hardware + * @dev: struct device of the DMA controller + * @id: instance ID + * @irq: irq line + * @regs: memory mapped I/O space + * @clk: hclk clock + * @dw: struct dw_dma that is filed by dw_dma_probe() + * @pdata: pointer to platform data + */ +struct dw_dma_chip { + struct device *dev; + int id; + int irq; + void __iomem *regs; + struct clk *clk; + struct dw_dma *dw; + + const struct dw_dma_platform_data *pdata; +}; + +/* Export to the platform drivers */ +#if IS_ENABLED(CONFIG_DW_DMAC_CORE) +int dw_dma_probe(struct dw_dma_chip *chip); +int dw_dma_remove(struct dw_dma_chip *chip); +#else +static inline int dw_dma_probe(struct dw_dma_chip *chip) { return -ENODEV; } +static inline int dw_dma_remove(struct dw_dma_chip *chip) { return 0; } +#endif /* CONFIG_DW_DMAC_CORE */ + +#endif /* _DMA_DW_H */ diff --git a/include/linux/dma/hsu.h b/include/linux/dma/hsu.h new file mode 100644 index 000000000..197eec63e --- /dev/null +++ b/include/linux/dma/hsu.h @@ -0,0 +1,64 @@ +/* + * Driver for the High Speed UART DMA + * + * Copyright (C) 2015 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _DMA_HSU_H +#define _DMA_HSU_H + +#include +#include + +#include + +struct hsu_dma; + +/** + * struct hsu_dma_chip - representation of HSU DMA hardware + * @dev: struct device of the DMA controller + * @irq: irq line + * @regs: memory mapped I/O space + * @length: I/O space length + * @offset: offset of the I/O space where registers are located + * @hsu: struct hsu_dma that is filed by ->probe() + * @pdata: platform data for the DMA controller if provided + */ +struct hsu_dma_chip { + struct device *dev; + int irq; + void __iomem *regs; + unsigned int length; + unsigned int offset; + struct hsu_dma *hsu; +}; + +#if IS_ENABLED(CONFIG_HSU_DMA) +/* Export to the internal users */ +int hsu_dma_get_status(struct hsu_dma_chip *chip, unsigned short nr, + u32 *status); +int hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, u32 status); + +/* Export to the platform drivers */ +int hsu_dma_probe(struct hsu_dma_chip *chip); +int hsu_dma_remove(struct hsu_dma_chip *chip); +#else +static inline int hsu_dma_get_status(struct hsu_dma_chip *chip, + unsigned short nr, u32 *status) +{ + return 0; +} +static inline int hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, + u32 status) +{ + return 0; +} +static inline int hsu_dma_probe(struct hsu_dma_chip *chip) { return -ENODEV; } +static inline int hsu_dma_remove(struct hsu_dma_chip *chip) { return 0; } +#endif /* CONFIG_HSU_DMA */ + +#endif /* _DMA_HSU_H */ diff --git a/include/linux/dma/ipu-dma.h b/include/linux/dma/ipu-dma.h new file mode 100644 index 000000000..18031115c --- /dev/null +++ b/include/linux/dma/ipu-dma.h @@ -0,0 +1,177 @@ +/* + * Copyright (C) 2008 + * Guennadi Liakhovetski, DENX Software Engineering, + * + * Copyright (C) 2005-2007 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_DMA_IPU_DMA_H +#define __LINUX_DMA_IPU_DMA_H + +#include +#include + +/* IPU DMA Controller channel definitions. */ +enum ipu_channel { + IDMAC_IC_0 = 0, /* IC (encoding task) to memory */ + IDMAC_IC_1 = 1, /* IC (viewfinder task) to memory */ + IDMAC_ADC_0 = 1, + IDMAC_IC_2 = 2, + IDMAC_ADC_1 = 2, + IDMAC_IC_3 = 3, + IDMAC_IC_4 = 4, + IDMAC_IC_5 = 5, + IDMAC_IC_6 = 6, + IDMAC_IC_7 = 7, /* IC (sensor data) to memory */ + IDMAC_IC_8 = 8, + IDMAC_IC_9 = 9, + IDMAC_IC_10 = 10, + IDMAC_IC_11 = 11, + IDMAC_IC_12 = 12, + IDMAC_IC_13 = 13, + IDMAC_SDC_0 = 14, /* Background synchronous display data */ + IDMAC_SDC_1 = 15, /* Foreground data (overlay) */ + IDMAC_SDC_2 = 16, + IDMAC_SDC_3 = 17, + IDMAC_ADC_2 = 18, + IDMAC_ADC_3 = 19, + IDMAC_ADC_4 = 20, + IDMAC_ADC_5 = 21, + IDMAC_ADC_6 = 22, + IDMAC_ADC_7 = 23, + IDMAC_PF_0 = 24, + IDMAC_PF_1 = 25, + IDMAC_PF_2 = 26, + IDMAC_PF_3 = 27, + IDMAC_PF_4 = 28, + IDMAC_PF_5 = 29, + IDMAC_PF_6 = 30, + IDMAC_PF_7 = 31, +}; + +/* Order significant! */ +enum ipu_channel_status { + IPU_CHANNEL_FREE, + IPU_CHANNEL_INITIALIZED, + IPU_CHANNEL_READY, + IPU_CHANNEL_ENABLED, +}; + +#define IPU_CHANNELS_NUM 32 + +enum pixel_fmt { + /* 1 byte */ + IPU_PIX_FMT_GENERIC, + IPU_PIX_FMT_RGB332, + IPU_PIX_FMT_YUV420P, + IPU_PIX_FMT_YUV422P, + IPU_PIX_FMT_YUV420P2, + IPU_PIX_FMT_YVU422P, + /* 2 bytes */ + IPU_PIX_FMT_RGB565, + IPU_PIX_FMT_RGB666, + IPU_PIX_FMT_BGR666, + IPU_PIX_FMT_YUYV, + IPU_PIX_FMT_UYVY, + /* 3 bytes */ + IPU_PIX_FMT_RGB24, + IPU_PIX_FMT_BGR24, + /* 4 bytes */ + IPU_PIX_FMT_GENERIC_32, + IPU_PIX_FMT_RGB32, + IPU_PIX_FMT_BGR32, + IPU_PIX_FMT_ABGR32, + IPU_PIX_FMT_BGRA32, + IPU_PIX_FMT_RGBA32, +}; + +enum ipu_color_space { + IPU_COLORSPACE_RGB, + IPU_COLORSPACE_YCBCR, + IPU_COLORSPACE_YUV +}; + +/* + * Enumeration of IPU rotation modes + */ +enum ipu_rotate_mode { + /* Note the enum values correspond to BAM value */ + IPU_ROTATE_NONE = 0, + IPU_ROTATE_VERT_FLIP = 1, + IPU_ROTATE_HORIZ_FLIP = 2, + IPU_ROTATE_180 = 3, + IPU_ROTATE_90_RIGHT = 4, + IPU_ROTATE_90_RIGHT_VFLIP = 5, + IPU_ROTATE_90_RIGHT_HFLIP = 6, + IPU_ROTATE_90_LEFT = 7, +}; + +/* + * Enumeration of DI ports for ADC. + */ +enum display_port { + DISP0, + DISP1, + DISP2, + DISP3 +}; + +struct idmac_video_param { + unsigned short in_width; + unsigned short in_height; + uint32_t in_pixel_fmt; + unsigned short out_width; + unsigned short out_height; + uint32_t out_pixel_fmt; + unsigned short out_stride; + bool graphics_combine_en; + bool global_alpha_en; + bool key_color_en; + enum display_port disp; + unsigned short out_left; + unsigned short out_top; +}; + +/* + * Union of initialization parameters for a logical channel. So far only video + * parameters are used. + */ +union ipu_channel_param { + struct idmac_video_param video; +}; + +struct idmac_tx_desc { + struct dma_async_tx_descriptor txd; + struct scatterlist *sg; /* scatterlist for this */ + unsigned int sg_len; /* tx-descriptor. */ + struct list_head list; +}; + +struct idmac_channel { + struct dma_chan dma_chan; + dma_cookie_t completed; /* last completed cookie */ + union ipu_channel_param params; + enum ipu_channel link; /* input channel, linked to the output */ + enum ipu_channel_status status; + void *client; /* Only one client per channel */ + unsigned int n_tx_desc; + struct idmac_tx_desc *desc; /* allocated tx-descriptors */ + struct scatterlist *sg[2]; /* scatterlist elements in buffer-0 and -1 */ + struct list_head free_list; /* free tx-descriptors */ + struct list_head queue; /* queued tx-descriptors */ + spinlock_t lock; /* protects sg[0,1], queue */ + struct mutex chan_mutex; /* protects status, cookie, free_list */ + bool sec_chan_en; + int active_buffer; + unsigned int eof_irq; + char eof_name[16]; /* EOF IRQ name for request_irq() */ +}; + +#define to_tx_desc(tx) container_of(tx, struct idmac_tx_desc, txd) +#define to_idmac_chan(c) container_of(c, struct idmac_channel, dma_chan) + +#endif /* __LINUX_DMA_IPU_DMA_H */ diff --git a/include/linux/dma/mmp-pdma.h b/include/linux/dma/mmp-pdma.h new file mode 100644 index 000000000..25cab62a2 --- /dev/null +++ b/include/linux/dma/mmp-pdma.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _MMP_PDMA_H_ +#define _MMP_PDMA_H_ + +struct dma_chan; + +#ifdef CONFIG_MMP_PDMA +bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param); +#else +static inline bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param) +{ + return false; +} +#endif + +#endif /* _MMP_PDMA_H_ */ diff --git a/include/linux/dma/pxa-dma.h b/include/linux/dma/pxa-dma.h new file mode 100644 index 000000000..9fc594f69 --- /dev/null +++ b/include/linux/dma/pxa-dma.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _PXA_DMA_H_ +#define _PXA_DMA_H_ + +enum pxad_chan_prio { + PXAD_PRIO_HIGHEST = 0, + PXAD_PRIO_NORMAL, + PXAD_PRIO_LOW, + PXAD_PRIO_LOWEST, +}; + +/** + * struct pxad_param - dma channel request parameters + * @drcmr: requestor line number + * @prio: minimal mandatory priority of the channel + * + * If a requested channel is granted, its priority will be at least @prio, + * ie. if PXAD_PRIO_LOW is required, the requested channel will be either + * PXAD_PRIO_LOW, PXAD_PRIO_NORMAL or PXAD_PRIO_HIGHEST. + */ +struct pxad_param { + unsigned int drcmr; + enum pxad_chan_prio prio; +}; + +struct dma_chan; + +#ifdef CONFIG_PXA_DMA +bool pxad_filter_fn(struct dma_chan *chan, void *param); +#else +static inline bool pxad_filter_fn(struct dma_chan *chan, void *param) +{ + return false; +} +#endif + +#endif /* _PXA_DMA_H_ */ diff --git a/include/linux/dma/qcom_bam_dma.h b/include/linux/dma/qcom_bam_dma.h new file mode 100644 index 000000000..077d43a35 --- /dev/null +++ b/include/linux/dma/qcom_bam_dma.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _QCOM_BAM_DMA_H +#define _QCOM_BAM_DMA_H + +#include + +/* + * This data type corresponds to the native Command Element + * supported by BAM DMA Engine. + * + * @cmd_and_addr - upper 8 bits command and lower 24 bits register address. + * @data - for write command: content to be written into peripheral register. + * for read command: dest addr to write peripheral register value. + * @mask - register mask. + * @reserved - for future usage. + * + */ +struct bam_cmd_element { + __le32 cmd_and_addr; + __le32 data; + __le32 mask; + __le32 reserved; +}; + +/* + * This enum indicates the command type in a command element + */ +enum bam_command_type { + BAM_WRITE_COMMAND = 0, + BAM_READ_COMMAND, +}; + +/* + * prep_bam_ce_le32 - Wrapper function to prepare a single BAM command + * element with the data already in le32 format. + * + * @bam_ce: bam command element + * @addr: target address + * @cmd: BAM command + * @data: actual data for write and dest addr for read in le32 + */ +static inline void +bam_prep_ce_le32(struct bam_cmd_element *bam_ce, u32 addr, + enum bam_command_type cmd, __le32 data) +{ + bam_ce->cmd_and_addr = + cpu_to_le32((addr & 0xffffff) | ((cmd & 0xff) << 24)); + bam_ce->data = data; + bam_ce->mask = cpu_to_le32(0xffffffff); +} + +/* + * bam_prep_ce - Wrapper function to prepare a single BAM command element + * with the data. + * + * @bam_ce: BAM command element + * @addr: target address + * @cmd: BAM command + * @data: actual data for write and dest addr for read + */ +static inline void +bam_prep_ce(struct bam_cmd_element *bam_ce, u32 addr, + enum bam_command_type cmd, u32 data) +{ + bam_prep_ce_le32(bam_ce, addr, cmd, cpu_to_le32(data)); +} +#endif diff --git a/include/linux/dma/sprd-dma.h b/include/linux/dma/sprd-dma.h new file mode 100644 index 000000000..b0115e340 --- /dev/null +++ b/include/linux/dma/sprd-dma.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _SPRD_DMA_H_ +#define _SPRD_DMA_H_ + +#define SPRD_DMA_REQ_SHIFT 16 +#define SPRD_DMA_FLAGS(req_mode, int_type) \ + ((req_mode) << SPRD_DMA_REQ_SHIFT | (int_type)) + +/* + * enum sprd_dma_req_mode: define the DMA request mode + * @SPRD_DMA_FRAG_REQ: fragment request mode + * @SPRD_DMA_BLK_REQ: block request mode + * @SPRD_DMA_TRANS_REQ: transaction request mode + * @SPRD_DMA_LIST_REQ: link-list request mode + * + * We have 4 types request mode: fragment mode, block mode, transaction mode + * and linklist mode. One transaction can contain several blocks, one block can + * contain several fragments. Link-list mode means we can save several DMA + * configuration into one reserved memory, then DMA can fetch each DMA + * configuration automatically to start transfer. + */ +enum sprd_dma_req_mode { + SPRD_DMA_FRAG_REQ, + SPRD_DMA_BLK_REQ, + SPRD_DMA_TRANS_REQ, + SPRD_DMA_LIST_REQ, +}; + +/* + * enum sprd_dma_int_type: define the DMA interrupt type + * @SPRD_DMA_NO_INT: do not need generate DMA interrupts. + * @SPRD_DMA_FRAG_INT: fragment done interrupt when one fragment request + * is done. + * @SPRD_DMA_BLK_INT: block done interrupt when one block request is done. + * @SPRD_DMA_BLK_FRAG_INT: block and fragment interrupt when one fragment + * or one block request is done. + * @SPRD_DMA_TRANS_INT: tansaction done interrupt when one transaction + * request is done. + * @SPRD_DMA_TRANS_FRAG_INT: transaction and fragment interrupt when one + * transaction request or fragment request is done. + * @SPRD_DMA_TRANS_BLK_INT: transaction and block interrupt when one + * transaction request or block request is done. + * @SPRD_DMA_LIST_INT: link-list done interrupt when one link-list request + * is done. + * @SPRD_DMA_CFGERR_INT: configure error interrupt when configuration is + * incorrect. + */ +enum sprd_dma_int_type { + SPRD_DMA_NO_INT, + SPRD_DMA_FRAG_INT, + SPRD_DMA_BLK_INT, + SPRD_DMA_BLK_FRAG_INT, + SPRD_DMA_TRANS_INT, + SPRD_DMA_TRANS_FRAG_INT, + SPRD_DMA_TRANS_BLK_INT, + SPRD_DMA_LIST_INT, + SPRD_DMA_CFGERR_INT, +}; + +#endif diff --git a/include/linux/dma/xilinx_dma.h b/include/linux/dma/xilinx_dma.h new file mode 100644 index 000000000..5b6e61e4b --- /dev/null +++ b/include/linux/dma/xilinx_dma.h @@ -0,0 +1,49 @@ +/* + * Xilinx DMA Engine drivers support header file + * + * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved. + * + * This is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __DMA_XILINX_DMA_H +#define __DMA_XILINX_DMA_H + +#include +#include + +/** + * struct xilinx_vdma_config - VDMA Configuration structure + * @frm_dly: Frame delay + * @gen_lock: Whether in gen-lock mode + * @master: Master that it syncs to + * @frm_cnt_en: Enable frame count enable + * @park: Whether wants to park + * @park_frm: Frame to park on + * @coalesc: Interrupt coalescing threshold + * @delay: Delay counter + * @reset: Reset Channel + * @ext_fsync: External Frame Sync source + * @vflip_en: Vertical Flip enable + */ +struct xilinx_vdma_config { + int frm_dly; + int gen_lock; + int master; + int frm_cnt_en; + int park; + int park_frm; + int coalesc; + int delay; + int reset; + int ext_fsync; + bool vflip_en; +}; + +int xilinx_vdma_channel_set_config(struct dma_chan *dchan, + struct xilinx_vdma_config *cfg); + +#endif diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h new file mode 100644 index 000000000..21b3e7d33 --- /dev/null +++ b/include/linux/dma_remapping.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DMA_REMAPPING_H +#define _DMA_REMAPPING_H + +/* + * VT-d hardware uses 4KiB page size regardless of host page size. + */ +#define VTD_PAGE_SHIFT (12) +#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT) +#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT) +#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK) + +#define VTD_STRIDE_SHIFT (9) +#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT) + +#define DMA_PTE_READ (1) +#define DMA_PTE_WRITE (2) +#define DMA_PTE_LARGE_PAGE (1 << 7) +#define DMA_PTE_SNP (1 << 11) + +#define CONTEXT_TT_MULTI_LEVEL 0 +#define CONTEXT_TT_DEV_IOTLB 1 +#define CONTEXT_TT_PASS_THROUGH 2 +/* Extended context entry types */ +#define CONTEXT_TT_PT_PASID 4 +#define CONTEXT_TT_PT_PASID_DEV_IOTLB 5 +#define CONTEXT_TT_MASK (7ULL << 2) + +#define CONTEXT_DINVE (1ULL << 8) +#define CONTEXT_PRS (1ULL << 9) +#define CONTEXT_PASIDE (1ULL << 11) + +struct intel_iommu; +struct dmar_domain; +struct root_entry; + + +#ifdef CONFIG_INTEL_IOMMU +extern int iommu_calculate_agaw(struct intel_iommu *iommu); +extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); +extern int dmar_disabled; +extern int intel_iommu_enabled; +extern int intel_iommu_tboot_noforce; +#else +static inline int iommu_calculate_agaw(struct intel_iommu *iommu) +{ + return 0; +} +static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu) +{ + return 0; +} +#define dmar_disabled (1) +#define intel_iommu_enabled (0) +#endif + + +#endif diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h new file mode 100644 index 000000000..50128c36f --- /dev/null +++ b/include/linux/dmaengine.h @@ -0,0 +1,1439 @@ +/* + * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. + */ +#ifndef LINUX_DMAENGINE_H +#define LINUX_DMAENGINE_H + +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * typedef dma_cookie_t - an opaque DMA cookie + * + * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code + */ +typedef s32 dma_cookie_t; +#define DMA_MIN_COOKIE 1 + +static inline int dma_submit_error(dma_cookie_t cookie) +{ + return cookie < 0 ? cookie : 0; +} + +/** + * enum dma_status - DMA transaction status + * @DMA_COMPLETE: transaction completed + * @DMA_IN_PROGRESS: transaction not yet processed + * @DMA_PAUSED: transaction is paused + * @DMA_ERROR: transaction failed + */ +enum dma_status { + DMA_COMPLETE, + DMA_IN_PROGRESS, + DMA_PAUSED, + DMA_ERROR, +}; + +/** + * enum dma_transaction_type - DMA transaction types/indexes + * + * Note: The DMA_ASYNC_TX capability is not to be set by drivers. It is + * automatically set as dma devices are registered. + */ +enum dma_transaction_type { + DMA_MEMCPY, + DMA_XOR, + DMA_PQ, + DMA_XOR_VAL, + DMA_PQ_VAL, + DMA_MEMSET, + DMA_MEMSET_SG, + DMA_INTERRUPT, + DMA_PRIVATE, + DMA_ASYNC_TX, + DMA_SLAVE, + DMA_CYCLIC, + DMA_INTERLEAVE, +/* last transaction type for creation of the capabilities mask */ + DMA_TX_TYPE_END, +}; + +/** + * enum dma_transfer_direction - dma transfer mode and direction indicator + * @DMA_MEM_TO_MEM: Async/Memcpy mode + * @DMA_MEM_TO_DEV: Slave mode & From Memory to Device + * @DMA_DEV_TO_MEM: Slave mode & From Device to Memory + * @DMA_DEV_TO_DEV: Slave mode & From Device to Device + */ +enum dma_transfer_direction { + DMA_MEM_TO_MEM, + DMA_MEM_TO_DEV, + DMA_DEV_TO_MEM, + DMA_DEV_TO_DEV, + DMA_TRANS_NONE, +}; + +/** + * Interleaved Transfer Request + * ---------------------------- + * A chunk is collection of contiguous bytes to be transfered. + * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG). + * ICGs may or maynot change between chunks. + * A FRAME is the smallest series of contiguous {chunk,icg} pairs, + * that when repeated an integral number of times, specifies the transfer. + * A transfer template is specification of a Frame, the number of times + * it is to be repeated and other per-transfer attributes. + * + * Practically, a client driver would have ready a template for each + * type of transfer it is going to need during its lifetime and + * set only 'src_start' and 'dst_start' before submitting the requests. + * + * + * | Frame-1 | Frame-2 | ~ | Frame-'numf' | + * |====....==.===...=...|====....==.===...=...| ~ |====....==.===...=...| + * + * == Chunk size + * ... ICG + */ + +/** + * struct data_chunk - Element of scatter-gather list that makes a frame. + * @size: Number of bytes to read from source. + * size_dst := fn(op, size_src), so doesn't mean much for destination. + * @icg: Number of bytes to jump after last src/dst address of this + * chunk and before first src/dst address for next chunk. + * Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false. + * Ignored for src(assumed 0), if src_inc is true and src_sgl is false. + * @dst_icg: Number of bytes to jump after last dst address of this + * chunk and before the first dst address for next chunk. + * Ignored if dst_inc is true and dst_sgl is false. + * @src_icg: Number of bytes to jump after last src address of this + * chunk and before the first src address for next chunk. + * Ignored if src_inc is true and src_sgl is false. + */ +struct data_chunk { + size_t size; + size_t icg; + size_t dst_icg; + size_t src_icg; +}; + +/** + * struct dma_interleaved_template - Template to convey DMAC the transfer pattern + * and attributes. + * @src_start: Bus address of source for the first chunk. + * @dst_start: Bus address of destination for the first chunk. + * @dir: Specifies the type of Source and Destination. + * @src_inc: If the source address increments after reading from it. + * @dst_inc: If the destination address increments after writing to it. + * @src_sgl: If the 'icg' of sgl[] applies to Source (scattered read). + * Otherwise, source is read contiguously (icg ignored). + * Ignored if src_inc is false. + * @dst_sgl: If the 'icg' of sgl[] applies to Destination (scattered write). + * Otherwise, destination is filled contiguously (icg ignored). + * Ignored if dst_inc is false. + * @numf: Number of frames in this template. + * @frame_size: Number of chunks in a frame i.e, size of sgl[]. + * @sgl: Array of {chunk,icg} pairs that make up a frame. + */ +struct dma_interleaved_template { + dma_addr_t src_start; + dma_addr_t dst_start; + enum dma_transfer_direction dir; + bool src_inc; + bool dst_inc; + bool src_sgl; + bool dst_sgl; + size_t numf; + size_t frame_size; + struct data_chunk sgl[0]; +}; + +/** + * enum dma_ctrl_flags - DMA flags to augment operation preparation, + * control completion, and communicate status. + * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of + * this transaction + * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client + * acknowledges receipt, i.e. has has a chance to establish any dependency + * chains + * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q + * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P + * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as + * sources that were the result of a previous operation, in the case of a PQ + * operation it continues the calculation with new sources + * @DMA_PREP_FENCE - tell the driver that subsequent operations depend + * on the result of this operation + * @DMA_CTRL_REUSE: client can reuse the descriptor and submit again till + * cleared or freed + * @DMA_PREP_CMD: tell the driver that the data passed to DMA API is command + * data and the descriptor should be in different format from normal + * data descriptors. + */ +enum dma_ctrl_flags { + DMA_PREP_INTERRUPT = (1 << 0), + DMA_CTRL_ACK = (1 << 1), + DMA_PREP_PQ_DISABLE_P = (1 << 2), + DMA_PREP_PQ_DISABLE_Q = (1 << 3), + DMA_PREP_CONTINUE = (1 << 4), + DMA_PREP_FENCE = (1 << 5), + DMA_CTRL_REUSE = (1 << 6), + DMA_PREP_CMD = (1 << 7), +}; + +/** + * enum sum_check_bits - bit position of pq_check_flags + */ +enum sum_check_bits { + SUM_CHECK_P = 0, + SUM_CHECK_Q = 1, +}; + +/** + * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations + * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise + * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise + */ +enum sum_check_flags { + SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P), + SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q), +}; + + +/** + * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. + * See linux/cpumask.h + */ +typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; + +/** + * struct dma_chan_percpu - the per-CPU part of struct dma_chan + * @memcpy_count: transaction counter + * @bytes_transferred: byte counter + */ + +struct dma_chan_percpu { + /* stats */ + unsigned long memcpy_count; + unsigned long bytes_transferred; +}; + +/** + * struct dma_router - DMA router structure + * @dev: pointer to the DMA router device + * @route_free: function to be called when the route can be disconnected + */ +struct dma_router { + struct device *dev; + void (*route_free)(struct device *dev, void *route_data); +}; + +/** + * struct dma_chan - devices supply DMA channels, clients use them + * @device: ptr to the dma device who supplies this channel, always !%NULL + * @cookie: last cookie value returned to client + * @completed_cookie: last completed cookie for this channel + * @chan_id: channel ID for sysfs + * @dev: class device for sysfs + * @device_node: used to add this to the device chan list + * @local: per-cpu pointer to a struct dma_chan_percpu + * @client_count: how many clients are using this channel + * @table_count: number of appearances in the mem-to-mem allocation table + * @router: pointer to the DMA router structure + * @route_data: channel specific data for the router + * @private: private data for certain client-channel associations + */ +struct dma_chan { + struct dma_device *device; + dma_cookie_t cookie; + dma_cookie_t completed_cookie; + + /* sysfs */ + int chan_id; + struct dma_chan_dev *dev; + + struct list_head device_node; + struct dma_chan_percpu __percpu *local; + int client_count; + int table_count; + + /* DMA router */ + struct dma_router *router; + void *route_data; + + void *private; +}; + +/** + * struct dma_chan_dev - relate sysfs device node to backing channel device + * @chan: driver channel device + * @device: sysfs device + * @dev_id: parent dma_device dev_id + * @idr_ref: reference count to gate release of dma_device dev_id + */ +struct dma_chan_dev { + struct dma_chan *chan; + struct device device; + int dev_id; + atomic_t *idr_ref; +}; + +/** + * enum dma_slave_buswidth - defines bus width of the DMA slave + * device, source or target buses + */ +enum dma_slave_buswidth { + DMA_SLAVE_BUSWIDTH_UNDEFINED = 0, + DMA_SLAVE_BUSWIDTH_1_BYTE = 1, + DMA_SLAVE_BUSWIDTH_2_BYTES = 2, + DMA_SLAVE_BUSWIDTH_3_BYTES = 3, + DMA_SLAVE_BUSWIDTH_4_BYTES = 4, + DMA_SLAVE_BUSWIDTH_8_BYTES = 8, + DMA_SLAVE_BUSWIDTH_16_BYTES = 16, + DMA_SLAVE_BUSWIDTH_32_BYTES = 32, + DMA_SLAVE_BUSWIDTH_64_BYTES = 64, +}; + +/** + * struct dma_slave_config - dma slave channel runtime config + * @direction: whether the data shall go in or out on this slave + * channel, right now. DMA_MEM_TO_DEV and DMA_DEV_TO_MEM are + * legal values. DEPRECATED, drivers should use the direction argument + * to the device_prep_slave_sg and device_prep_dma_cyclic functions or + * the dir field in the dma_interleaved_template structure. + * @src_addr: this is the physical address where DMA slave data + * should be read (RX), if the source is memory this argument is + * ignored. + * @dst_addr: this is the physical address where DMA slave data + * should be written (TX), if the source is memory this argument + * is ignored. + * @src_addr_width: this is the width in bytes of the source (RX) + * register where DMA data shall be read. If the source + * is memory this may be ignored depending on architecture. + * Legal values: 1, 2, 3, 4, 8, 16, 32, 64. + * @dst_addr_width: same as src_addr_width but for destination + * target (TX) mutatis mutandis. + * @src_maxburst: the maximum number of words (note: words, as in + * units of the src_addr_width member, not bytes) that can be sent + * in one burst to the device. Typically something like half the + * FIFO depth on I/O peripherals so you don't overflow it. This + * may or may not be applicable on memory sources. + * @dst_maxburst: same as src_maxburst but for destination target + * mutatis mutandis. + * @src_port_window_size: The length of the register area in words the data need + * to be accessed on the device side. It is only used for devices which is using + * an area instead of a single register to receive the data. Typically the DMA + * loops in this area in order to transfer the data. + * @dst_port_window_size: same as src_port_window_size but for the destination + * port. + * @device_fc: Flow Controller Settings. Only valid for slave channels. Fill + * with 'true' if peripheral should be flow controller. Direction will be + * selected at Runtime. + * @slave_id: Slave requester id. Only valid for slave channels. The dma + * slave peripheral will have unique id as dma requester which need to be + * pass as slave config. + * + * This struct is passed in as configuration data to a DMA engine + * in order to set up a certain channel for DMA transport at runtime. + * The DMA device/engine has to provide support for an additional + * callback in the dma_device structure, device_config and this struct + * will then be passed in as an argument to the function. + * + * The rationale for adding configuration information to this struct is as + * follows: if it is likely that more than one DMA slave controllers in + * the world will support the configuration option, then make it generic. + * If not: if it is fixed so that it be sent in static from the platform + * data, then prefer to do that. + */ +struct dma_slave_config { + enum dma_transfer_direction direction; + phys_addr_t src_addr; + phys_addr_t dst_addr; + enum dma_slave_buswidth src_addr_width; + enum dma_slave_buswidth dst_addr_width; + u32 src_maxburst; + u32 dst_maxburst; + u32 src_port_window_size; + u32 dst_port_window_size; + bool device_fc; + unsigned int slave_id; +}; + +/** + * enum dma_residue_granularity - Granularity of the reported transfer residue + * @DMA_RESIDUE_GRANULARITY_DESCRIPTOR: Residue reporting is not support. The + * DMA channel is only able to tell whether a descriptor has been completed or + * not, which means residue reporting is not supported by this channel. The + * residue field of the dma_tx_state field will always be 0. + * @DMA_RESIDUE_GRANULARITY_SEGMENT: Residue is updated after each successfully + * completed segment of the transfer (For cyclic transfers this is after each + * period). This is typically implemented by having the hardware generate an + * interrupt after each transferred segment and then the drivers updates the + * outstanding residue by the size of the segment. Another possibility is if + * the hardware supports scatter-gather and the segment descriptor has a field + * which gets set after the segment has been completed. The driver then counts + * the number of segments without the flag set to compute the residue. + * @DMA_RESIDUE_GRANULARITY_BURST: Residue is updated after each transferred + * burst. This is typically only supported if the hardware has a progress + * register of some sort (E.g. a register with the current read/write address + * or a register with the amount of bursts/beats/bytes that have been + * transferred or still need to be transferred). + */ +enum dma_residue_granularity { + DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0, + DMA_RESIDUE_GRANULARITY_SEGMENT = 1, + DMA_RESIDUE_GRANULARITY_BURST = 2, +}; + +/** + * struct dma_slave_caps - expose capabilities of a slave channel only + * @src_addr_widths: bit mask of src addr widths the channel supports. + * Width is specified in bytes, e.g. for a channel supporting + * a width of 4 the mask should have BIT(4) set. + * @dst_addr_widths: bit mask of dst addr widths the channel supports + * @directions: bit mask of slave directions the channel supports. + * Since the enum dma_transfer_direction is not defined as bit flag for + * each type, the dma controller should set BIT() and same + * should be checked by controller as well + * @max_burst: max burst capability per-transfer + * @cmd_pause: true, if pause is supported (i.e. for reading residue or + * for resume later) + * @cmd_resume: true, if resume is supported + * @cmd_terminate: true, if terminate cmd is supported + * @residue_granularity: granularity of the reported transfer residue + * @descriptor_reuse: if a descriptor can be reused by client and + * resubmitted multiple times + */ +struct dma_slave_caps { + u32 src_addr_widths; + u32 dst_addr_widths; + u32 directions; + u32 max_burst; + bool cmd_pause; + bool cmd_resume; + bool cmd_terminate; + enum dma_residue_granularity residue_granularity; + bool descriptor_reuse; +}; + +static inline const char *dma_chan_name(struct dma_chan *chan) +{ + return dev_name(&chan->dev->device); +} + +void dma_chan_cleanup(struct kref *kref); + +/** + * typedef dma_filter_fn - callback filter for dma_request_channel + * @chan: channel to be reviewed + * @filter_param: opaque parameter passed through dma_request_channel + * + * When this optional parameter is specified in a call to dma_request_channel a + * suitable channel is passed to this routine for further dispositioning before + * being returned. Where 'suitable' indicates a non-busy channel that + * satisfies the given capability mask. It returns 'true' to indicate that the + * channel is suitable. + */ +typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); + +typedef void (*dma_async_tx_callback)(void *dma_async_param); + +enum dmaengine_tx_result { + DMA_TRANS_NOERROR = 0, /* SUCCESS */ + DMA_TRANS_READ_FAILED, /* Source DMA read failed */ + DMA_TRANS_WRITE_FAILED, /* Destination DMA write failed */ + DMA_TRANS_ABORTED, /* Op never submitted / aborted */ +}; + +struct dmaengine_result { + enum dmaengine_tx_result result; + u32 residue; +}; + +typedef void (*dma_async_tx_callback_result)(void *dma_async_param, + const struct dmaengine_result *result); + +struct dmaengine_unmap_data { +#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) + u16 map_cnt; +#else + u8 map_cnt; +#endif + u8 to_cnt; + u8 from_cnt; + u8 bidi_cnt; + struct device *dev; + struct kref kref; + size_t len; + dma_addr_t addr[0]; +}; + +/** + * struct dma_async_tx_descriptor - async transaction descriptor + * ---dma generic offload fields--- + * @cookie: tracking cookie for this transaction, set to -EBUSY if + * this tx is sitting on a dependency list + * @flags: flags to augment operation preparation, control completion, and + * communicate status + * @phys: physical address of the descriptor + * @chan: target channel for this operation + * @tx_submit: accept the descriptor, assign ordered cookie and mark the + * descriptor pending. To be pushed on .issue_pending() call + * @callback: routine to call after this operation is complete + * @callback_param: general parameter to pass to the callback routine + * ---async_tx api specific fields--- + * @next: at completion submit this descriptor + * @parent: pointer to the next level up in the dependency chain + * @lock: protect the parent and next pointers + */ +struct dma_async_tx_descriptor { + dma_cookie_t cookie; + enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */ + dma_addr_t phys; + struct dma_chan *chan; + dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); + int (*desc_free)(struct dma_async_tx_descriptor *tx); + dma_async_tx_callback callback; + dma_async_tx_callback_result callback_result; + void *callback_param; + struct dmaengine_unmap_data *unmap; +#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH + struct dma_async_tx_descriptor *next; + struct dma_async_tx_descriptor *parent; + spinlock_t lock; +#endif +}; + +#ifdef CONFIG_DMA_ENGINE +static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, + struct dmaengine_unmap_data *unmap) +{ + kref_get(&unmap->kref); + tx->unmap = unmap; +} + +struct dmaengine_unmap_data * +dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags); +void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap); +#else +static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, + struct dmaengine_unmap_data *unmap) +{ +} +static inline struct dmaengine_unmap_data * +dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) +{ + return NULL; +} +static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) +{ +} +#endif + +static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx) +{ + if (tx->unmap) { + dmaengine_unmap_put(tx->unmap); + tx->unmap = NULL; + } +} + +#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH +static inline void txd_lock(struct dma_async_tx_descriptor *txd) +{ +} +static inline void txd_unlock(struct dma_async_tx_descriptor *txd) +{ +} +static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next) +{ + BUG(); +} +static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd) +{ +} +static inline void txd_clear_next(struct dma_async_tx_descriptor *txd) +{ +} +static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd) +{ + return NULL; +} +static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd) +{ + return NULL; +} + +#else +static inline void txd_lock(struct dma_async_tx_descriptor *txd) +{ + spin_lock_bh(&txd->lock); +} +static inline void txd_unlock(struct dma_async_tx_descriptor *txd) +{ + spin_unlock_bh(&txd->lock); +} +static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next) +{ + txd->next = next; + next->parent = txd; +} +static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd) +{ + txd->parent = NULL; +} +static inline void txd_clear_next(struct dma_async_tx_descriptor *txd) +{ + txd->next = NULL; +} +static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd) +{ + return txd->parent; +} +static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd) +{ + return txd->next; +} +#endif + +/** + * struct dma_tx_state - filled in to report the status of + * a transfer. + * @last: last completed DMA cookie + * @used: last issued DMA cookie (i.e. the one in progress) + * @residue: the remaining number of bytes left to transmit + * on the selected transfer for states DMA_IN_PROGRESS and + * DMA_PAUSED if this is implemented in the driver, else 0 + */ +struct dma_tx_state { + dma_cookie_t last; + dma_cookie_t used; + u32 residue; +}; + +/** + * enum dmaengine_alignment - defines alignment of the DMA async tx + * buffers + */ +enum dmaengine_alignment { + DMAENGINE_ALIGN_1_BYTE = 0, + DMAENGINE_ALIGN_2_BYTES = 1, + DMAENGINE_ALIGN_4_BYTES = 2, + DMAENGINE_ALIGN_8_BYTES = 3, + DMAENGINE_ALIGN_16_BYTES = 4, + DMAENGINE_ALIGN_32_BYTES = 5, + DMAENGINE_ALIGN_64_BYTES = 6, +}; + +/** + * struct dma_slave_map - associates slave device and it's slave channel with + * parameter to be used by a filter function + * @devname: name of the device + * @slave: slave channel name + * @param: opaque parameter to pass to struct dma_filter.fn + */ +struct dma_slave_map { + const char *devname; + const char *slave; + void *param; +}; + +/** + * struct dma_filter - information for slave device/channel to filter_fn/param + * mapping + * @fn: filter function callback + * @mapcnt: number of slave device/channel in the map + * @map: array of channel to filter mapping data + */ +struct dma_filter { + dma_filter_fn fn; + int mapcnt; + const struct dma_slave_map *map; +}; + +/** + * struct dma_device - info on the entity supplying DMA services + * @chancnt: how many DMA channels are supported + * @privatecnt: how many DMA channels are requested by dma_request_channel + * @channels: the list of struct dma_chan + * @global_node: list_head for global dma_device_list + * @filter: information for device/slave to filter function/param mapping + * @cap_mask: one or more dma_capability flags + * @max_xor: maximum number of xor sources, 0 if no capability + * @max_pq: maximum number of PQ sources and PQ-continue capability + * @copy_align: alignment shift for memcpy operations + * @xor_align: alignment shift for xor operations + * @pq_align: alignment shift for pq operations + * @fill_align: alignment shift for memset operations + * @dev_id: unique device ID + * @dev: struct device reference for dma mapping api + * @owner: owner module (automatically set based on the provided dev) + * @src_addr_widths: bit mask of src addr widths the device supports + * Width is specified in bytes, e.g. for a device supporting + * a width of 4 the mask should have BIT(4) set. + * @dst_addr_widths: bit mask of dst addr widths the device supports + * @directions: bit mask of slave directions the device supports. + * Since the enum dma_transfer_direction is not defined as bit flag for + * each type, the dma controller should set BIT() and same + * should be checked by controller as well + * @max_burst: max burst capability per-transfer + * @residue_granularity: granularity of the transfer residue reported + * by tx_status + * @device_alloc_chan_resources: allocate resources and return the + * number of allocated descriptors + * @device_free_chan_resources: release DMA channel's resources + * @device_prep_dma_memcpy: prepares a memcpy operation + * @device_prep_dma_xor: prepares a xor operation + * @device_prep_dma_xor_val: prepares a xor validation operation + * @device_prep_dma_pq: prepares a pq operation + * @device_prep_dma_pq_val: prepares a pqzero_sum operation + * @device_prep_dma_memset: prepares a memset operation + * @device_prep_dma_memset_sg: prepares a memset operation over a scatter list + * @device_prep_dma_interrupt: prepares an end of chain interrupt operation + * @device_prep_slave_sg: prepares a slave dma operation + * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio. + * The function takes a buffer of size buf_len. The callback function will + * be called after period_len bytes have been transferred. + * @device_prep_interleaved_dma: Transfer expression in a generic way. + * @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address + * @device_config: Pushes a new configuration to a channel, return 0 or an error + * code + * @device_pause: Pauses any transfer happening on a channel. Returns + * 0 or an error code + * @device_resume: Resumes any transfer on a channel previously + * paused. Returns 0 or an error code + * @device_terminate_all: Aborts all transfers on a channel. Returns 0 + * or an error code + * @device_synchronize: Synchronizes the termination of a transfers to the + * current context. + * @device_tx_status: poll for transaction completion, the optional + * txstate parameter can be supplied with a pointer to get a + * struct with auxiliary transfer status information, otherwise the call + * will just return a simple status code + * @device_issue_pending: push pending transactions to hardware + * @descriptor_reuse: a submitted transfer can be resubmitted after completion + */ +struct dma_device { + + unsigned int chancnt; + unsigned int privatecnt; + struct list_head channels; + struct list_head global_node; + struct dma_filter filter; + dma_cap_mask_t cap_mask; + unsigned short max_xor; + unsigned short max_pq; + enum dmaengine_alignment copy_align; + enum dmaengine_alignment xor_align; + enum dmaengine_alignment pq_align; + enum dmaengine_alignment fill_align; + #define DMA_HAS_PQ_CONTINUE (1 << 15) + + int dev_id; + struct device *dev; + struct module *owner; + + u32 src_addr_widths; + u32 dst_addr_widths; + u32 directions; + u32 max_burst; + bool descriptor_reuse; + enum dma_residue_granularity residue_granularity; + + int (*device_alloc_chan_resources)(struct dma_chan *chan); + void (*device_free_chan_resources)(struct dma_chan *chan); + + struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( + struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, + size_t len, unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_dma_xor)( + struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, + unsigned int src_cnt, size_t len, unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)( + struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, + size_t len, enum sum_check_flags *result, unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_dma_pq)( + struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, + unsigned int src_cnt, const unsigned char *scf, + size_t len, unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)( + struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, + unsigned int src_cnt, const unsigned char *scf, size_t len, + enum sum_check_flags *pqres, unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_dma_memset)( + struct dma_chan *chan, dma_addr_t dest, int value, size_t len, + unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)( + struct dma_chan *chan, struct scatterlist *sg, + unsigned int nents, int value, unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( + struct dma_chan *chan, unsigned long flags); + + struct dma_async_tx_descriptor *(*device_prep_slave_sg)( + struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_transfer_direction direction, + unsigned long flags, void *context); + struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)( + struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction direction, + unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)( + struct dma_chan *chan, struct dma_interleaved_template *xt, + unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)( + struct dma_chan *chan, dma_addr_t dst, u64 data, + unsigned long flags); + + int (*device_config)(struct dma_chan *chan, + struct dma_slave_config *config); + int (*device_pause)(struct dma_chan *chan); + int (*device_resume)(struct dma_chan *chan); + int (*device_terminate_all)(struct dma_chan *chan); + void (*device_synchronize)(struct dma_chan *chan); + + enum dma_status (*device_tx_status)(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *txstate); + void (*device_issue_pending)(struct dma_chan *chan); +}; + +static inline int dmaengine_slave_config(struct dma_chan *chan, + struct dma_slave_config *config) +{ + if (chan->device->device_config) + return chan->device->device_config(chan, config); + + return -ENOSYS; +} + +static inline bool is_slave_direction(enum dma_transfer_direction direction) +{ + return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM); +} + +static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single( + struct dma_chan *chan, dma_addr_t buf, size_t len, + enum dma_transfer_direction dir, unsigned long flags) +{ + struct scatterlist sg; + sg_init_table(&sg, 1); + sg_dma_address(&sg) = buf; + sg_dma_len(&sg) = len; + + if (!chan || !chan->device || !chan->device->device_prep_slave_sg) + return NULL; + + return chan->device->device_prep_slave_sg(chan, &sg, 1, + dir, flags, NULL); +} + +static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg( + struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, + enum dma_transfer_direction dir, unsigned long flags) +{ + if (!chan || !chan->device || !chan->device->device_prep_slave_sg) + return NULL; + + return chan->device->device_prep_slave_sg(chan, sgl, sg_len, + dir, flags, NULL); +} + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE +struct rio_dma_ext; +static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg( + struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, + enum dma_transfer_direction dir, unsigned long flags, + struct rio_dma_ext *rio_ext) +{ + if (!chan || !chan->device || !chan->device->device_prep_slave_sg) + return NULL; + + return chan->device->device_prep_slave_sg(chan, sgl, sg_len, + dir, flags, rio_ext); +} +#endif + +static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction dir, + unsigned long flags) +{ + if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic) + return NULL; + + return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len, + period_len, dir, flags); +} + +static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma( + struct dma_chan *chan, struct dma_interleaved_template *xt, + unsigned long flags) +{ + if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma) + return NULL; + + return chan->device->device_prep_interleaved_dma(chan, xt, flags); +} + +static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset( + struct dma_chan *chan, dma_addr_t dest, int value, size_t len, + unsigned long flags) +{ + if (!chan || !chan->device || !chan->device->device_prep_dma_memset) + return NULL; + + return chan->device->device_prep_dma_memset(chan, dest, value, + len, flags); +} + +static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy( + struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, + size_t len, unsigned long flags) +{ + if (!chan || !chan->device || !chan->device->device_prep_dma_memcpy) + return NULL; + + return chan->device->device_prep_dma_memcpy(chan, dest, src, + len, flags); +} + +/** + * dmaengine_terminate_all() - Terminate all active DMA transfers + * @chan: The channel for which to terminate the transfers + * + * This function is DEPRECATED use either dmaengine_terminate_sync() or + * dmaengine_terminate_async() instead. + */ +static inline int dmaengine_terminate_all(struct dma_chan *chan) +{ + if (chan->device->device_terminate_all) + return chan->device->device_terminate_all(chan); + + return -ENOSYS; +} + +/** + * dmaengine_terminate_async() - Terminate all active DMA transfers + * @chan: The channel for which to terminate the transfers + * + * Calling this function will terminate all active and pending descriptors + * that have previously been submitted to the channel. It is not guaranteed + * though that the transfer for the active descriptor has stopped when the + * function returns. Furthermore it is possible the complete callback of a + * submitted transfer is still running when this function returns. + * + * dmaengine_synchronize() needs to be called before it is safe to free + * any memory that is accessed by previously submitted descriptors or before + * freeing any resources accessed from within the completion callback of any + * perviously submitted descriptors. + * + * This function can be called from atomic context as well as from within a + * complete callback of a descriptor submitted on the same channel. + * + * If none of the two conditions above apply consider using + * dmaengine_terminate_sync() instead. + */ +static inline int dmaengine_terminate_async(struct dma_chan *chan) +{ + if (chan->device->device_terminate_all) + return chan->device->device_terminate_all(chan); + + return -EINVAL; +} + +/** + * dmaengine_synchronize() - Synchronize DMA channel termination + * @chan: The channel to synchronize + * + * Synchronizes to the DMA channel termination to the current context. When this + * function returns it is guaranteed that all transfers for previously issued + * descriptors have stopped and and it is safe to free the memory assoicated + * with them. Furthermore it is guaranteed that all complete callback functions + * for a previously submitted descriptor have finished running and it is safe to + * free resources accessed from within the complete callbacks. + * + * The behavior of this function is undefined if dma_async_issue_pending() has + * been called between dmaengine_terminate_async() and this function. + * + * This function must only be called from non-atomic context and must not be + * called from within a complete callback of a descriptor submitted on the same + * channel. + */ +static inline void dmaengine_synchronize(struct dma_chan *chan) +{ + might_sleep(); + + if (chan->device->device_synchronize) + chan->device->device_synchronize(chan); +} + +/** + * dmaengine_terminate_sync() - Terminate all active DMA transfers + * @chan: The channel for which to terminate the transfers + * + * Calling this function will terminate all active and pending transfers + * that have previously been submitted to the channel. It is similar to + * dmaengine_terminate_async() but guarantees that the DMA transfer has actually + * stopped and that all complete callbacks have finished running when the + * function returns. + * + * This function must only be called from non-atomic context and must not be + * called from within a complete callback of a descriptor submitted on the same + * channel. + */ +static inline int dmaengine_terminate_sync(struct dma_chan *chan) +{ + int ret; + + ret = dmaengine_terminate_async(chan); + if (ret) + return ret; + + dmaengine_synchronize(chan); + + return 0; +} + +static inline int dmaengine_pause(struct dma_chan *chan) +{ + if (chan->device->device_pause) + return chan->device->device_pause(chan); + + return -ENOSYS; +} + +static inline int dmaengine_resume(struct dma_chan *chan) +{ + if (chan->device->device_resume) + return chan->device->device_resume(chan); + + return -ENOSYS; +} + +static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, struct dma_tx_state *state) +{ + return chan->device->device_tx_status(chan, cookie, state); +} + +static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc) +{ + return desc->tx_submit(desc); +} + +static inline bool dmaengine_check_align(enum dmaengine_alignment align, + size_t off1, size_t off2, size_t len) +{ + size_t mask; + + if (!align) + return true; + mask = (1 << align) - 1; + if (mask & (off1 | off2 | len)) + return false; + return true; +} + +static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1, + size_t off2, size_t len) +{ + return dmaengine_check_align(dev->copy_align, off1, off2, len); +} + +static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1, + size_t off2, size_t len) +{ + return dmaengine_check_align(dev->xor_align, off1, off2, len); +} + +static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1, + size_t off2, size_t len) +{ + return dmaengine_check_align(dev->pq_align, off1, off2, len); +} + +static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1, + size_t off2, size_t len) +{ + return dmaengine_check_align(dev->fill_align, off1, off2, len); +} + +static inline void +dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue) +{ + dma->max_pq = maxpq; + if (has_pq_continue) + dma->max_pq |= DMA_HAS_PQ_CONTINUE; +} + +static inline bool dmaf_continue(enum dma_ctrl_flags flags) +{ + return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE; +} + +static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags) +{ + enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P; + + return (flags & mask) == mask; +} + +static inline bool dma_dev_has_pq_continue(struct dma_device *dma) +{ + return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE; +} + +static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma) +{ + return dma->max_pq & ~DMA_HAS_PQ_CONTINUE; +} + +/* dma_maxpq - reduce maxpq in the face of continued operations + * @dma - dma device with PQ capability + * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set + * + * When an engine does not support native continuation we need 3 extra + * source slots to reuse P and Q with the following coefficients: + * 1/ {00} * P : remove P from Q', but use it as a source for P' + * 2/ {01} * Q : use Q to continue Q' calculation + * 3/ {00} * Q : subtract Q from P' to cancel (2) + * + * In the case where P is disabled we only need 1 extra source: + * 1/ {01} * Q : use Q to continue Q' calculation + */ +static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags) +{ + if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags)) + return dma_dev_to_maxpq(dma); + else if (dmaf_p_disabled_continue(flags)) + return dma_dev_to_maxpq(dma) - 1; + else if (dmaf_continue(flags)) + return dma_dev_to_maxpq(dma) - 3; + BUG(); +} + +static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg, + size_t dir_icg) +{ + if (inc) { + if (dir_icg) + return dir_icg; + else if (sgl) + return icg; + } + + return 0; +} + +static inline size_t dmaengine_get_dst_icg(struct dma_interleaved_template *xt, + struct data_chunk *chunk) +{ + return dmaengine_get_icg(xt->dst_inc, xt->dst_sgl, + chunk->icg, chunk->dst_icg); +} + +static inline size_t dmaengine_get_src_icg(struct dma_interleaved_template *xt, + struct data_chunk *chunk) +{ + return dmaengine_get_icg(xt->src_inc, xt->src_sgl, + chunk->icg, chunk->src_icg); +} + +/* --- public DMA engine API --- */ + +#ifdef CONFIG_DMA_ENGINE +void dmaengine_get(void); +void dmaengine_put(void); +#else +static inline void dmaengine_get(void) +{ +} +static inline void dmaengine_put(void) +{ +} +#endif + +#ifdef CONFIG_ASYNC_TX_DMA +#define async_dmaengine_get() dmaengine_get() +#define async_dmaengine_put() dmaengine_put() +#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH +#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX) +#else +#define async_dma_find_channel(type) dma_find_channel(type) +#endif /* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH */ +#else +static inline void async_dmaengine_get(void) +{ +} +static inline void async_dmaengine_put(void) +{ +} +static inline struct dma_chan * +async_dma_find_channel(enum dma_transaction_type type) +{ + return NULL; +} +#endif /* CONFIG_ASYNC_TX_DMA */ +void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, + struct dma_chan *chan); + +static inline void async_tx_ack(struct dma_async_tx_descriptor *tx) +{ + tx->flags |= DMA_CTRL_ACK; +} + +static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx) +{ + tx->flags &= ~DMA_CTRL_ACK; +} + +static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx) +{ + return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK; +} + +#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask)) +static inline void +__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) +{ + set_bit(tx_type, dstp->bits); +} + +#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask)) +static inline void +__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) +{ + clear_bit(tx_type, dstp->bits); +} + +#define dma_cap_zero(mask) __dma_cap_zero(&(mask)) +static inline void __dma_cap_zero(dma_cap_mask_t *dstp) +{ + bitmap_zero(dstp->bits, DMA_TX_TYPE_END); +} + +#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask)) +static inline int +__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) +{ + return test_bit(tx_type, srcp->bits); +} + +#define for_each_dma_cap_mask(cap, mask) \ + for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END) + +/** + * dma_async_issue_pending - flush pending transactions to HW + * @chan: target DMA channel + * + * This allows drivers to push copies to HW in batches, + * reducing MMIO writes where possible. + */ +static inline void dma_async_issue_pending(struct dma_chan *chan) +{ + chan->device->device_issue_pending(chan); +} + +/** + * dma_async_is_tx_complete - poll for transaction completion + * @chan: DMA channel + * @cookie: transaction identifier to check status of + * @last: returns last completed cookie, can be NULL + * @used: returns last issued cookie, can be NULL + * + * If @last and @used are passed in, upon return they reflect the driver + * internal state and can be used with dma_async_is_complete() to check + * the status of multiple cookies without re-checking hardware state. + */ +static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, + dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) +{ + struct dma_tx_state state; + enum dma_status status; + + status = chan->device->device_tx_status(chan, cookie, &state); + if (last) + *last = state.last; + if (used) + *used = state.used; + return status; +} + +/** + * dma_async_is_complete - test a cookie against chan state + * @cookie: transaction identifier to test status of + * @last_complete: last know completed transaction + * @last_used: last cookie value handed out + * + * dma_async_is_complete() is used in dma_async_is_tx_complete() + * the test logic is separated for lightweight testing of multiple cookies + */ +static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, + dma_cookie_t last_complete, dma_cookie_t last_used) +{ + if (last_complete <= last_used) { + if ((cookie <= last_complete) || (cookie > last_used)) + return DMA_COMPLETE; + } else { + if ((cookie <= last_complete) && (cookie > last_used)) + return DMA_COMPLETE; + } + return DMA_IN_PROGRESS; +} + +static inline void +dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue) +{ + if (st) { + st->last = last; + st->used = used; + st->residue = residue; + } +} + +#ifdef CONFIG_DMA_ENGINE +struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); +enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); +enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); +void dma_issue_pending_all(void); +struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, + dma_filter_fn fn, void *fn_param); +struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); + +struct dma_chan *dma_request_chan(struct device *dev, const char *name); +struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask); + +void dma_release_channel(struct dma_chan *chan); +int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps); +#else +static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) +{ + return NULL; +} +static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) +{ + return DMA_COMPLETE; +} +static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) +{ + return DMA_COMPLETE; +} +static inline void dma_issue_pending_all(void) +{ +} +static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, + dma_filter_fn fn, void *fn_param) +{ + return NULL; +} +static inline struct dma_chan *dma_request_slave_channel(struct device *dev, + const char *name) +{ + return NULL; +} +static inline struct dma_chan *dma_request_chan(struct device *dev, + const char *name) +{ + return ERR_PTR(-ENODEV); +} +static inline struct dma_chan *dma_request_chan_by_mask( + const dma_cap_mask_t *mask) +{ + return ERR_PTR(-ENODEV); +} +static inline void dma_release_channel(struct dma_chan *chan) +{ +} +static inline int dma_get_slave_caps(struct dma_chan *chan, + struct dma_slave_caps *caps) +{ + return -ENXIO; +} +#endif + +#define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name) + +static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx) +{ + struct dma_slave_caps caps; + int ret; + + ret = dma_get_slave_caps(tx->chan, &caps); + if (ret) + return ret; + + if (caps.descriptor_reuse) { + tx->flags |= DMA_CTRL_REUSE; + return 0; + } else { + return -EPERM; + } +} + +static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx) +{ + tx->flags &= ~DMA_CTRL_REUSE; +} + +static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx) +{ + return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE; +} + +static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc) +{ + /* this is supported for reusable desc, so check that */ + if (dmaengine_desc_test_reuse(desc)) + return desc->desc_free(desc); + else + return -EPERM; +} + +/* --- DMA device --- */ + +int dma_async_device_register(struct dma_device *device); +int dmaenginem_async_device_register(struct dma_device *device); +void dma_async_device_unregister(struct dma_device *device); +void dma_run_dependencies(struct dma_async_tx_descriptor *tx); +struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); +struct dma_chan *dma_get_any_slave_channel(struct dma_device *device); +#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) +#define dma_request_slave_channel_compat(mask, x, y, dev, name) \ + __dma_request_slave_channel_compat(&(mask), x, y, dev, name) + +static inline struct dma_chan +*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask, + dma_filter_fn fn, void *fn_param, + struct device *dev, const char *name) +{ + struct dma_chan *chan; + + chan = dma_request_slave_channel(dev, name); + if (chan) + return chan; + + if (!fn || !fn_param) + return NULL; + + return __dma_request_channel(mask, fn, fn_param); +} +#endif /* DMAENGINE_H */ diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h new file mode 100644 index 000000000..f632ecfb4 --- /dev/null +++ b/include/linux/dmapool.h @@ -0,0 +1,59 @@ +/* + * include/linux/dmapool.h + * + * Allocation pools for DMAable (coherent) memory. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef LINUX_DMAPOOL_H +#define LINUX_DMAPOOL_H + +#include +#include + +struct device; + +#ifdef CONFIG_HAS_DMA + +struct dma_pool *dma_pool_create(const char *name, struct device *dev, + size_t size, size_t align, size_t allocation); + +void dma_pool_destroy(struct dma_pool *pool); + +void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, + dma_addr_t *handle); +void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr); + +/* + * Managed DMA pool + */ +struct dma_pool *dmam_pool_create(const char *name, struct device *dev, + size_t size, size_t align, size_t allocation); +void dmam_pool_destroy(struct dma_pool *pool); + +#else /* !CONFIG_HAS_DMA */ +static inline struct dma_pool *dma_pool_create(const char *name, + struct device *dev, size_t size, size_t align, size_t allocation) +{ return NULL; } +static inline void dma_pool_destroy(struct dma_pool *pool) { } +static inline void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, + dma_addr_t *handle) { return NULL; } +static inline void dma_pool_free(struct dma_pool *pool, void *vaddr, + dma_addr_t addr) { } +static inline struct dma_pool *dmam_pool_create(const char *name, + struct device *dev, size_t size, size_t align, size_t allocation) +{ return NULL; } +static inline void dmam_pool_destroy(struct dma_pool *pool) { } +#endif /* !CONFIG_HAS_DMA */ + +static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags, + dma_addr_t *handle) +{ + return dma_pool_alloc(pool, mem_flags | __GFP_ZERO, handle); +} + +#endif + diff --git a/include/linux/dmar.h b/include/linux/dmar.h new file mode 100644 index 000000000..843a41ba7 --- /dev/null +++ b/include/linux/dmar.h @@ -0,0 +1,281 @@ +/* + * Copyright (c) 2006, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + * + * Copyright (C) Ashok Raj + * Copyright (C) Shaohua Li + */ + +#ifndef __DMAR_H__ +#define __DMAR_H__ + +#include +#include +#include +#include +#include +#include + +struct acpi_dmar_header; + +#ifdef CONFIG_X86 +# define DMAR_UNITS_SUPPORTED MAX_IO_APICS +#else +# define DMAR_UNITS_SUPPORTED 64 +#endif + +/* DMAR Flags */ +#define DMAR_INTR_REMAP 0x1 +#define DMAR_X2APIC_OPT_OUT 0x2 + +struct intel_iommu; + +struct dmar_dev_scope { + struct device __rcu *dev; + u8 bus; + u8 devfn; +}; + +#ifdef CONFIG_DMAR_TABLE +extern struct acpi_table_header *dmar_tbl; +struct dmar_drhd_unit { + struct list_head list; /* list of drhd units */ + struct acpi_dmar_header *hdr; /* ACPI header */ + u64 reg_base_addr; /* register base address*/ + struct dmar_dev_scope *devices;/* target device array */ + int devices_cnt; /* target device count */ + u16 segment; /* PCI domain */ + u8 ignored:1; /* ignore drhd */ + u8 include_all:1; + struct intel_iommu *iommu; +}; + +struct dmar_pci_path { + u8 bus; + u8 device; + u8 function; +}; + +struct dmar_pci_notify_info { + struct pci_dev *dev; + unsigned long event; + int bus; + u16 seg; + u16 level; + struct dmar_pci_path path[]; +} __attribute__((packed)); + +extern struct rw_semaphore dmar_global_lock; +extern struct list_head dmar_drhd_units; + +#define for_each_drhd_unit(drhd) \ + list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) + +#define for_each_active_drhd_unit(drhd) \ + list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \ + if (drhd->ignored) {} else + +#define for_each_active_iommu(i, drhd) \ + list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \ + if (i=drhd->iommu, drhd->ignored) {} else + +#define for_each_iommu(i, drhd) \ + list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \ + if (i=drhd->iommu, 0) {} else + +static inline bool dmar_rcu_check(void) +{ + return rwsem_is_locked(&dmar_global_lock) || + system_state == SYSTEM_BOOTING; +} + +#define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check()) + +#define for_each_dev_scope(a, c, p, d) \ + for ((p) = 0; ((d) = (p) < (c) ? dmar_rcu_dereference((a)[(p)].dev) : \ + NULL, (p) < (c)); (p)++) + +#define for_each_active_dev_scope(a, c, p, d) \ + for_each_dev_scope((a), (c), (p), (d)) if (!(d)) { continue; } else + +extern int dmar_table_init(void); +extern int dmar_dev_scope_init(void); +extern void dmar_register_bus_notifier(void); +extern int dmar_parse_dev_scope(void *start, void *end, int *cnt, + struct dmar_dev_scope **devices, u16 segment); +extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt); +extern void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt); +extern int dmar_insert_dev_scope(struct dmar_pci_notify_info *info, + void *start, void*end, u16 segment, + struct dmar_dev_scope *devices, + int devices_cnt); +extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, + u16 segment, struct dmar_dev_scope *devices, + int count); +/* Intel IOMMU detection */ +extern int detect_intel_iommu(void); +extern int enable_drhd_fault_handling(void); +extern int dmar_device_add(acpi_handle handle); +extern int dmar_device_remove(acpi_handle handle); + +static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg) +{ + return 0; +} + +#ifdef CONFIG_INTEL_IOMMU +extern int iommu_detected, no_iommu; +extern int intel_iommu_init(void); +extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg); +extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg); +extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg); +extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg); +extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert); +extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); +#else /* !CONFIG_INTEL_IOMMU: */ +static inline int intel_iommu_init(void) { return -ENODEV; } + +#define dmar_parse_one_rmrr dmar_res_noop +#define dmar_parse_one_atsr dmar_res_noop +#define dmar_check_one_atsr dmar_res_noop +#define dmar_release_one_atsr dmar_res_noop + +static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) +{ + return 0; +} + +static inline int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert) +{ + return 0; +} +#endif /* CONFIG_INTEL_IOMMU */ + +#ifdef CONFIG_IRQ_REMAP +extern int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert); +#else /* CONFIG_IRQ_REMAP */ +static inline int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert) +{ return 0; } +#endif /* CONFIG_IRQ_REMAP */ + +#else /* CONFIG_DMAR_TABLE */ + +static inline int dmar_device_add(void *handle) +{ + return 0; +} + +static inline int dmar_device_remove(void *handle) +{ + return 0; +} + +#endif /* CONFIG_DMAR_TABLE */ + +struct irte { + union { + /* Shared between remapped and posted mode*/ + struct { + __u64 present : 1, /* 0 */ + fpd : 1, /* 1 */ + __res0 : 6, /* 2 - 6 */ + avail : 4, /* 8 - 11 */ + __res1 : 3, /* 12 - 14 */ + pst : 1, /* 15 */ + vector : 8, /* 16 - 23 */ + __res2 : 40; /* 24 - 63 */ + }; + + /* Remapped mode */ + struct { + __u64 r_present : 1, /* 0 */ + r_fpd : 1, /* 1 */ + dst_mode : 1, /* 2 */ + redir_hint : 1, /* 3 */ + trigger_mode : 1, /* 4 */ + dlvry_mode : 3, /* 5 - 7 */ + r_avail : 4, /* 8 - 11 */ + r_res0 : 4, /* 12 - 15 */ + r_vector : 8, /* 16 - 23 */ + r_res1 : 8, /* 24 - 31 */ + dest_id : 32; /* 32 - 63 */ + }; + + /* Posted mode */ + struct { + __u64 p_present : 1, /* 0 */ + p_fpd : 1, /* 1 */ + p_res0 : 6, /* 2 - 7 */ + p_avail : 4, /* 8 - 11 */ + p_res1 : 2, /* 12 - 13 */ + p_urgent : 1, /* 14 */ + p_pst : 1, /* 15 */ + p_vector : 8, /* 16 - 23 */ + p_res2 : 14, /* 24 - 37 */ + pda_l : 26; /* 38 - 63 */ + }; + __u64 low; + }; + + union { + /* Shared between remapped and posted mode*/ + struct { + __u64 sid : 16, /* 64 - 79 */ + sq : 2, /* 80 - 81 */ + svt : 2, /* 82 - 83 */ + __res3 : 44; /* 84 - 127 */ + }; + + /* Posted mode*/ + struct { + __u64 p_sid : 16, /* 64 - 79 */ + p_sq : 2, /* 80 - 81 */ + p_svt : 2, /* 82 - 83 */ + p_res3 : 12, /* 84 - 95 */ + pda_h : 32; /* 96 - 127 */ + }; + __u64 high; + }; +}; + +static inline void dmar_copy_shared_irte(struct irte *dst, struct irte *src) +{ + dst->present = src->present; + dst->fpd = src->fpd; + dst->avail = src->avail; + dst->pst = src->pst; + dst->vector = src->vector; + dst->sid = src->sid; + dst->sq = src->sq; + dst->svt = src->svt; +} + +#define PDA_LOW_BIT 26 +#define PDA_HIGH_BIT 32 + +/* Can't use the common MSI interrupt functions + * since DMAR is not a pci device + */ +struct irq_data; +extern void dmar_msi_unmask(struct irq_data *data); +extern void dmar_msi_mask(struct irq_data *data); +extern void dmar_msi_read(int irq, struct msi_msg *msg); +extern void dmar_msi_write(int irq, struct msi_msg *msg); +extern int dmar_set_interrupt(struct intel_iommu *iommu); +extern irqreturn_t dmar_fault(int irq, void *dev_id); +extern int dmar_alloc_hwirq(int id, int node, void *arg); +extern void dmar_free_hwirq(int irq); + +#endif /* __DMAR_H__ */ diff --git a/include/linux/dmi.h b/include/linux/dmi.h new file mode 100644 index 000000000..c46fdb367 --- /dev/null +++ b/include/linux/dmi.h @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DMI_H__ +#define __DMI_H__ + +#include +#include +#include + +/* enum dmi_field is in mod_devicetable.h */ + +enum dmi_device_type { + DMI_DEV_TYPE_ANY = 0, + DMI_DEV_TYPE_OTHER, + DMI_DEV_TYPE_UNKNOWN, + DMI_DEV_TYPE_VIDEO, + DMI_DEV_TYPE_SCSI, + DMI_DEV_TYPE_ETHERNET, + DMI_DEV_TYPE_TOKENRING, + DMI_DEV_TYPE_SOUND, + DMI_DEV_TYPE_PATA, + DMI_DEV_TYPE_SATA, + DMI_DEV_TYPE_SAS, + DMI_DEV_TYPE_IPMI = -1, + DMI_DEV_TYPE_OEM_STRING = -2, + DMI_DEV_TYPE_DEV_ONBOARD = -3, + DMI_DEV_TYPE_DEV_SLOT = -4, +}; + +enum dmi_entry_type { + DMI_ENTRY_BIOS = 0, + DMI_ENTRY_SYSTEM, + DMI_ENTRY_BASEBOARD, + DMI_ENTRY_CHASSIS, + DMI_ENTRY_PROCESSOR, + DMI_ENTRY_MEM_CONTROLLER, + DMI_ENTRY_MEM_MODULE, + DMI_ENTRY_CACHE, + DMI_ENTRY_PORT_CONNECTOR, + DMI_ENTRY_SYSTEM_SLOT, + DMI_ENTRY_ONBOARD_DEVICE, + DMI_ENTRY_OEMSTRINGS, + DMI_ENTRY_SYSCONF, + DMI_ENTRY_BIOS_LANG, + DMI_ENTRY_GROUP_ASSOC, + DMI_ENTRY_SYSTEM_EVENT_LOG, + DMI_ENTRY_PHYS_MEM_ARRAY, + DMI_ENTRY_MEM_DEVICE, + DMI_ENTRY_32_MEM_ERROR, + DMI_ENTRY_MEM_ARRAY_MAPPED_ADDR, + DMI_ENTRY_MEM_DEV_MAPPED_ADDR, + DMI_ENTRY_BUILTIN_POINTING_DEV, + DMI_ENTRY_PORTABLE_BATTERY, + DMI_ENTRY_SYSTEM_RESET, + DMI_ENTRY_HW_SECURITY, + DMI_ENTRY_SYSTEM_POWER_CONTROLS, + DMI_ENTRY_VOLTAGE_PROBE, + DMI_ENTRY_COOLING_DEV, + DMI_ENTRY_TEMP_PROBE, + DMI_ENTRY_ELECTRICAL_CURRENT_PROBE, + DMI_ENTRY_OOB_REMOTE_ACCESS, + DMI_ENTRY_BIS_ENTRY, + DMI_ENTRY_SYSTEM_BOOT, + DMI_ENTRY_MGMT_DEV, + DMI_ENTRY_MGMT_DEV_COMPONENT, + DMI_ENTRY_MGMT_DEV_THRES, + DMI_ENTRY_MEM_CHANNEL, + DMI_ENTRY_IPMI_DEV, + DMI_ENTRY_SYS_POWER_SUPPLY, + DMI_ENTRY_ADDITIONAL, + DMI_ENTRY_ONBOARD_DEV_EXT, + DMI_ENTRY_MGMT_CONTROLLER_HOST, + DMI_ENTRY_INACTIVE = 126, + DMI_ENTRY_END_OF_TABLE = 127, +}; + +struct dmi_header { + u8 type; + u8 length; + u16 handle; +} __packed; + +struct dmi_device { + struct list_head list; + int type; + const char *name; + void *device_data; /* Type specific data */ +}; + +#ifdef CONFIG_DMI + +struct dmi_dev_onboard { + struct dmi_device dev; + int instance; + int segment; + int bus; + int devfn; +}; + +extern struct kobject *dmi_kobj; +extern int dmi_check_system(const struct dmi_system_id *list); +const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list); +extern const char * dmi_get_system_info(int field); +extern const struct dmi_device * dmi_find_device(int type, const char *name, + const struct dmi_device *from); +extern void dmi_scan_machine(void); +extern void dmi_memdev_walk(void); +extern void dmi_set_dump_stack_arch_desc(void); +extern bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp); +extern int dmi_get_bios_year(void); +extern int dmi_name_in_vendors(const char *str); +extern int dmi_name_in_serial(const char *str); +extern int dmi_available; +extern int dmi_walk(void (*decode)(const struct dmi_header *, void *), + void *private_data); +extern bool dmi_match(enum dmi_field f, const char *str); +extern void dmi_memdev_name(u16 handle, const char **bank, const char **device); +extern u64 dmi_memdev_size(u16 handle); + +#else + +static inline int dmi_check_system(const struct dmi_system_id *list) { return 0; } +static inline const char * dmi_get_system_info(int field) { return NULL; } +static inline const struct dmi_device * dmi_find_device(int type, const char *name, + const struct dmi_device *from) { return NULL; } +static inline void dmi_scan_machine(void) { return; } +static inline void dmi_memdev_walk(void) { } +static inline void dmi_set_dump_stack_arch_desc(void) { } +static inline bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp) +{ + if (yearp) + *yearp = 0; + if (monthp) + *monthp = 0; + if (dayp) + *dayp = 0; + return false; +} +static inline int dmi_get_bios_year(void) { return -ENXIO; } +static inline int dmi_name_in_vendors(const char *s) { return 0; } +static inline int dmi_name_in_serial(const char *s) { return 0; } +#define dmi_available 0 +static inline int dmi_walk(void (*decode)(const struct dmi_header *, void *), + void *private_data) { return -ENXIO; } +static inline bool dmi_match(enum dmi_field f, const char *str) + { return false; } +static inline void dmi_memdev_name(u16 handle, const char **bank, + const char **device) { } +static inline u64 dmi_memdev_size(u16 handle) { return ~0ul; } +static inline const struct dmi_system_id * + dmi_first_match(const struct dmi_system_id *list) { return NULL; } + +#endif + +#endif /* __DMI_H__ */ diff --git a/include/linux/dnotify.h b/include/linux/dnotify.h new file mode 100644 index 000000000..0aad774be --- /dev/null +++ b/include/linux/dnotify.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_DNOTIFY_H +#define _LINUX_DNOTIFY_H +/* + * Directory notification for Linux + * + * Copyright (C) 2000,2002 Stephen Rothwell + */ + +#include + +struct dnotify_struct { + struct dnotify_struct * dn_next; + __u32 dn_mask; + int dn_fd; + struct file * dn_filp; + fl_owner_t dn_owner; +}; + +#ifdef __KERNEL__ + + +#ifdef CONFIG_DNOTIFY + +#define DNOTIFY_ALL_EVENTS (FS_DELETE | FS_DELETE_CHILD |\ + FS_MODIFY | FS_MODIFY_CHILD |\ + FS_ACCESS | FS_ACCESS_CHILD |\ + FS_ATTRIB | FS_ATTRIB_CHILD |\ + FS_CREATE | FS_DN_RENAME |\ + FS_MOVED_FROM | FS_MOVED_TO) + +extern int dir_notify_enable; +extern void dnotify_flush(struct file *, fl_owner_t); +extern int fcntl_dirnotify(int, struct file *, unsigned long); + +#else + +static inline void dnotify_flush(struct file *filp, fl_owner_t id) +{ +} + +static inline int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) +{ + return -EINVAL; +} + +#endif /* CONFIG_DNOTIFY */ + +#endif /* __KERNEL __ */ + +#endif /* _LINUX_DNOTIFY_H */ diff --git a/include/linux/dns_resolver.h b/include/linux/dns_resolver.h new file mode 100644 index 000000000..6ac3cad9a --- /dev/null +++ b/include/linux/dns_resolver.h @@ -0,0 +1,34 @@ +/* + * DNS Resolver upcall management for CIFS DFS and AFS + * Handles host name to IP address resolution and DNS query for AFSDB RR. + * + * Copyright (c) International Business Machines Corp., 2008 + * Author(s): Steve French (sfrench@us.ibm.com) + * Wang Lei (wang840925@gmail.com) + * + * This library is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; either version 2.1 of the License, or + * (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _LINUX_DNS_RESOLVER_H +#define _LINUX_DNS_RESOLVER_H + +#ifdef __KERNEL__ + +extern int dns_query(const char *type, const char *name, size_t namelen, + const char *options, char **_result, time64_t *_expiry); + +#endif /* KERNEL */ + +#endif /* _LINUX_DNS_RESOLVER_H */ diff --git a/include/linux/dqblk_qtree.h b/include/linux/dqblk_qtree.h new file mode 100644 index 000000000..100d22a46 --- /dev/null +++ b/include/linux/dqblk_qtree.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Definitions of structures and functions for quota formats using trie + */ + +#ifndef _LINUX_DQBLK_QTREE_H +#define _LINUX_DQBLK_QTREE_H + +#include + +/* Numbers of blocks needed for updates - we count with the smallest + * possible block size (1024) */ +#define QTREE_INIT_ALLOC 4 +#define QTREE_INIT_REWRITE 2 +#define QTREE_DEL_ALLOC 0 +#define QTREE_DEL_REWRITE 6 + +struct dquot; +struct kqid; + +/* Operations */ +struct qtree_fmt_operations { + void (*mem2disk_dqblk)(void *disk, struct dquot *dquot); /* Convert given entry from in memory format to disk one */ + void (*disk2mem_dqblk)(struct dquot *dquot, void *disk); /* Convert given entry from disk format to in memory one */ + int (*is_id)(void *disk, struct dquot *dquot); /* Is this structure for given id? */ +}; + +/* Inmemory copy of version specific information */ +struct qtree_mem_dqinfo { + struct super_block *dqi_sb; /* Sb quota is on */ + int dqi_type; /* Quota type */ + unsigned int dqi_blocks; /* # of blocks in quota file */ + unsigned int dqi_free_blk; /* First block in list of free blocks */ + unsigned int dqi_free_entry; /* First block with free entry */ + unsigned int dqi_blocksize_bits; /* Block size of quota file */ + unsigned int dqi_entry_size; /* Size of quota entry in quota file */ + unsigned int dqi_usable_bs; /* Space usable in block for quota data */ + unsigned int dqi_qtree_depth; /* Precomputed depth of quota tree */ + const struct qtree_fmt_operations *dqi_ops; /* Operations for entry manipulation */ +}; + +int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); +int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); +int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); +int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); +int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk); +static inline int qtree_depth(struct qtree_mem_dqinfo *info) +{ + unsigned int epb = info->dqi_usable_bs >> 2; + unsigned long long entries = epb; + int i; + + for (i = 1; entries < (1ULL << 32); i++) + entries *= epb; + return i; +} +int qtree_get_next_id(struct qtree_mem_dqinfo *info, struct kqid *qid); + +#endif /* _LINUX_DQBLK_QTREE_H */ diff --git a/include/linux/dqblk_v1.h b/include/linux/dqblk_v1.h new file mode 100644 index 000000000..85d837a14 --- /dev/null +++ b/include/linux/dqblk_v1.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * File with in-memory structures of old quota format + */ + +#ifndef _LINUX_DQBLK_V1_H +#define _LINUX_DQBLK_V1_H + +/* Numbers of blocks needed for updates */ +#define V1_INIT_ALLOC 1 +#define V1_INIT_REWRITE 1 +#define V1_DEL_ALLOC 0 +#define V1_DEL_REWRITE 2 + +#endif /* _LINUX_DQBLK_V1_H */ diff --git a/include/linux/dqblk_v2.h b/include/linux/dqblk_v2.h new file mode 100644 index 000000000..da95932ad --- /dev/null +++ b/include/linux/dqblk_v2.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Definitions for vfsv0 quota format + */ + +#ifndef _LINUX_DQBLK_V2_H +#define _LINUX_DQBLK_V2_H + +#include + +/* Numbers of blocks needed for updates */ +#define V2_INIT_ALLOC QTREE_INIT_ALLOC +#define V2_INIT_REWRITE QTREE_INIT_REWRITE +#define V2_DEL_ALLOC QTREE_DEL_ALLOC +#define V2_DEL_REWRITE QTREE_DEL_REWRITE + +#endif /* _LINUX_DQBLK_V2_H */ diff --git a/include/linux/drbd.h b/include/linux/drbd.h new file mode 100644 index 000000000..2d0259327 --- /dev/null +++ b/include/linux/drbd.h @@ -0,0 +1,411 @@ +/* + drbd.h + Kernel module for 2.6.x Kernels + + This file is part of DRBD by Philipp Reisner and Lars Ellenberg. + + Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. + Copyright (C) 2001-2008, Philipp Reisner . + Copyright (C) 2001-2008, Lars Ellenberg . + + drbd is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + drbd is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with drbd; see the file COPYING. If not, write to + the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + +*/ +#ifndef DRBD_H +#define DRBD_H +#include + +#ifdef __KERNEL__ +#include +#include +#else +#include +#include +#include + +/* Although the Linux source code makes a difference between + generic endianness and the bitfields' endianness, there is no + architecture as of Linux-2.6.24-rc4 where the bitfields' endianness + does not match the generic endianness. */ + +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define __LITTLE_ENDIAN_BITFIELD +#elif __BYTE_ORDER == __BIG_ENDIAN +#define __BIG_ENDIAN_BITFIELD +#else +# error "sorry, weird endianness on this box" +#endif + +#endif + +extern const char *drbd_buildtag(void); +#define REL_VERSION "8.4.10" +#define API_VERSION 1 +#define PRO_VERSION_MIN 86 +#define PRO_VERSION_MAX 101 + + +enum drbd_io_error_p { + EP_PASS_ON, /* FIXME should the better be named "Ignore"? */ + EP_CALL_HELPER, + EP_DETACH +}; + +enum drbd_fencing_p { + FP_NOT_AVAIL = -1, /* Not a policy */ + FP_DONT_CARE = 0, + FP_RESOURCE, + FP_STONITH +}; + +enum drbd_disconnect_p { + DP_RECONNECT, + DP_DROP_NET_CONF, + DP_FREEZE_IO +}; + +enum drbd_after_sb_p { + ASB_DISCONNECT, + ASB_DISCARD_YOUNGER_PRI, + ASB_DISCARD_OLDER_PRI, + ASB_DISCARD_ZERO_CHG, + ASB_DISCARD_LEAST_CHG, + ASB_DISCARD_LOCAL, + ASB_DISCARD_REMOTE, + ASB_CONSENSUS, + ASB_DISCARD_SECONDARY, + ASB_CALL_HELPER, + ASB_VIOLENTLY +}; + +enum drbd_on_no_data { + OND_IO_ERROR, + OND_SUSPEND_IO +}; + +enum drbd_on_congestion { + OC_BLOCK, + OC_PULL_AHEAD, + OC_DISCONNECT, +}; + +enum drbd_read_balancing { + RB_PREFER_LOCAL, + RB_PREFER_REMOTE, + RB_ROUND_ROBIN, + RB_LEAST_PENDING, + RB_CONGESTED_REMOTE, + RB_32K_STRIPING, + RB_64K_STRIPING, + RB_128K_STRIPING, + RB_256K_STRIPING, + RB_512K_STRIPING, + RB_1M_STRIPING, +}; + +/* KEEP the order, do not delete or insert. Only append. */ +enum drbd_ret_code { + ERR_CODE_BASE = 100, + NO_ERROR = 101, + ERR_LOCAL_ADDR = 102, + ERR_PEER_ADDR = 103, + ERR_OPEN_DISK = 104, + ERR_OPEN_MD_DISK = 105, + ERR_DISK_NOT_BDEV = 107, + ERR_MD_NOT_BDEV = 108, + ERR_DISK_TOO_SMALL = 111, + ERR_MD_DISK_TOO_SMALL = 112, + ERR_BDCLAIM_DISK = 114, + ERR_BDCLAIM_MD_DISK = 115, + ERR_MD_IDX_INVALID = 116, + ERR_IO_MD_DISK = 118, + ERR_MD_INVALID = 119, + ERR_AUTH_ALG = 120, + ERR_AUTH_ALG_ND = 121, + ERR_NOMEM = 122, + ERR_DISCARD_IMPOSSIBLE = 123, + ERR_DISK_CONFIGURED = 124, + ERR_NET_CONFIGURED = 125, + ERR_MANDATORY_TAG = 126, + ERR_MINOR_INVALID = 127, + ERR_INTR = 129, /* EINTR */ + ERR_RESIZE_RESYNC = 130, + ERR_NO_PRIMARY = 131, + ERR_RESYNC_AFTER = 132, + ERR_RESYNC_AFTER_CYCLE = 133, + ERR_PAUSE_IS_SET = 134, + ERR_PAUSE_IS_CLEAR = 135, + ERR_PACKET_NR = 137, + ERR_NO_DISK = 138, + ERR_NOT_PROTO_C = 139, + ERR_NOMEM_BITMAP = 140, + ERR_INTEGRITY_ALG = 141, /* DRBD 8.2 only */ + ERR_INTEGRITY_ALG_ND = 142, /* DRBD 8.2 only */ + ERR_CPU_MASK_PARSE = 143, /* DRBD 8.2 only */ + ERR_CSUMS_ALG = 144, /* DRBD 8.2 only */ + ERR_CSUMS_ALG_ND = 145, /* DRBD 8.2 only */ + ERR_VERIFY_ALG = 146, /* DRBD 8.2 only */ + ERR_VERIFY_ALG_ND = 147, /* DRBD 8.2 only */ + ERR_CSUMS_RESYNC_RUNNING= 148, /* DRBD 8.2 only */ + ERR_VERIFY_RUNNING = 149, /* DRBD 8.2 only */ + ERR_DATA_NOT_CURRENT = 150, + ERR_CONNECTED = 151, /* DRBD 8.3 only */ + ERR_PERM = 152, + ERR_NEED_APV_93 = 153, + ERR_STONITH_AND_PROT_A = 154, + ERR_CONG_NOT_PROTO_A = 155, + ERR_PIC_AFTER_DEP = 156, + ERR_PIC_PEER_DEP = 157, + ERR_RES_NOT_KNOWN = 158, + ERR_RES_IN_USE = 159, + ERR_MINOR_CONFIGURED = 160, + ERR_MINOR_OR_VOLUME_EXISTS = 161, + ERR_INVALID_REQUEST = 162, + ERR_NEED_APV_100 = 163, + ERR_NEED_ALLOW_TWO_PRI = 164, + ERR_MD_UNCLEAN = 165, + ERR_MD_LAYOUT_CONNECTED = 166, + ERR_MD_LAYOUT_TOO_BIG = 167, + ERR_MD_LAYOUT_TOO_SMALL = 168, + ERR_MD_LAYOUT_NO_FIT = 169, + ERR_IMPLICIT_SHRINK = 170, + /* insert new ones above this line */ + AFTER_LAST_ERR_CODE +}; + +#define DRBD_PROT_A 1 +#define DRBD_PROT_B 2 +#define DRBD_PROT_C 3 + +enum drbd_role { + R_UNKNOWN = 0, + R_PRIMARY = 1, /* role */ + R_SECONDARY = 2, /* role */ + R_MASK = 3, +}; + +/* The order of these constants is important. + * The lower ones (=C_WF_REPORT_PARAMS ==> There is a socket + */ +enum drbd_conns { + C_STANDALONE, + C_DISCONNECTING, /* Temporal state on the way to StandAlone. */ + C_UNCONNECTED, /* >= C_UNCONNECTED -> inc_net() succeeds */ + + /* These temporal states are all used on the way + * from >= C_CONNECTED to Unconnected. + * The 'disconnect reason' states + * I do not allow to change between them. */ + C_TIMEOUT, + C_BROKEN_PIPE, + C_NETWORK_FAILURE, + C_PROTOCOL_ERROR, + C_TEAR_DOWN, + + C_WF_CONNECTION, + C_WF_REPORT_PARAMS, /* we have a socket */ + C_CONNECTED, /* we have introduced each other */ + C_STARTING_SYNC_S, /* starting full sync by admin request. */ + C_STARTING_SYNC_T, /* starting full sync by admin request. */ + C_WF_BITMAP_S, + C_WF_BITMAP_T, + C_WF_SYNC_UUID, + + /* All SyncStates are tested with this comparison + * xx >= C_SYNC_SOURCE && xx <= C_PAUSED_SYNC_T */ + C_SYNC_SOURCE, + C_SYNC_TARGET, + C_VERIFY_S, + C_VERIFY_T, + C_PAUSED_SYNC_S, + C_PAUSED_SYNC_T, + + C_AHEAD, + C_BEHIND, + + C_MASK = 31 +}; + +enum drbd_disk_state { + D_DISKLESS, + D_ATTACHING, /* In the process of reading the meta-data */ + D_FAILED, /* Becomes D_DISKLESS as soon as we told it the peer */ + /* when >= D_FAILED it is legal to access mdev->ldev */ + D_NEGOTIATING, /* Late attaching state, we need to talk to the peer */ + D_INCONSISTENT, + D_OUTDATED, + D_UNKNOWN, /* Only used for the peer, never for myself */ + D_CONSISTENT, /* Might be D_OUTDATED, might be D_UP_TO_DATE ... */ + D_UP_TO_DATE, /* Only this disk state allows applications' IO ! */ + D_MASK = 15 +}; + +union drbd_state { +/* According to gcc's docs is the ... + * The order of allocation of bit-fields within a unit (C90 6.5.2.1, C99 6.7.2.1). + * Determined by ABI. + * pointed out by Maxim Uvarov q + * even though we transmit as "cpu_to_be32(state)", + * the offsets of the bitfields still need to be swapped + * on different endianness. + */ + struct { +#if defined(__LITTLE_ENDIAN_BITFIELD) + unsigned role:2 ; /* 3/4 primary/secondary/unknown */ + unsigned peer:2 ; /* 3/4 primary/secondary/unknown */ + unsigned conn:5 ; /* 17/32 cstates */ + unsigned disk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */ + unsigned pdsk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */ + unsigned susp:1 ; /* 2/2 IO suspended no/yes (by user) */ + unsigned aftr_isp:1 ; /* isp .. imposed sync pause */ + unsigned peer_isp:1 ; + unsigned user_isp:1 ; + unsigned susp_nod:1 ; /* IO suspended because no data */ + unsigned susp_fen:1 ; /* IO suspended because fence peer handler runs*/ + unsigned _pad:9; /* 0 unused */ +#elif defined(__BIG_ENDIAN_BITFIELD) + unsigned _pad:9; + unsigned susp_fen:1 ; + unsigned susp_nod:1 ; + unsigned user_isp:1 ; + unsigned peer_isp:1 ; + unsigned aftr_isp:1 ; /* isp .. imposed sync pause */ + unsigned susp:1 ; /* 2/2 IO suspended no/yes */ + unsigned pdsk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */ + unsigned disk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */ + unsigned conn:5 ; /* 17/32 cstates */ + unsigned peer:2 ; /* 3/4 primary/secondary/unknown */ + unsigned role:2 ; /* 3/4 primary/secondary/unknown */ +#else +# error "this endianness is not supported" +#endif + }; + unsigned int i; +}; + +enum drbd_state_rv { + SS_CW_NO_NEED = 4, + SS_CW_SUCCESS = 3, + SS_NOTHING_TO_DO = 2, + SS_SUCCESS = 1, + SS_UNKNOWN_ERROR = 0, /* Used to sleep longer in _drbd_request_state */ + SS_TWO_PRIMARIES = -1, + SS_NO_UP_TO_DATE_DISK = -2, + SS_NO_LOCAL_DISK = -4, + SS_NO_REMOTE_DISK = -5, + SS_CONNECTED_OUTDATES = -6, + SS_PRIMARY_NOP = -7, + SS_RESYNC_RUNNING = -8, + SS_ALREADY_STANDALONE = -9, + SS_CW_FAILED_BY_PEER = -10, + SS_IS_DISKLESS = -11, + SS_DEVICE_IN_USE = -12, + SS_NO_NET_CONFIG = -13, + SS_NO_VERIFY_ALG = -14, /* drbd-8.2 only */ + SS_NEED_CONNECTION = -15, /* drbd-8.2 only */ + SS_LOWER_THAN_OUTDATED = -16, + SS_NOT_SUPPORTED = -17, /* drbd-8.2 only */ + SS_IN_TRANSIENT_STATE = -18, /* Retry after the next state change */ + SS_CONCURRENT_ST_CHG = -19, /* Concurrent cluster side state change! */ + SS_O_VOL_PEER_PRI = -20, + SS_OUTDATE_WO_CONN = -21, + SS_AFTER_LAST_ERROR = -22, /* Keep this at bottom */ +}; + +#define SHARED_SECRET_MAX 64 + +#define MDF_CONSISTENT (1 << 0) +#define MDF_PRIMARY_IND (1 << 1) +#define MDF_CONNECTED_IND (1 << 2) +#define MDF_FULL_SYNC (1 << 3) +#define MDF_WAS_UP_TO_DATE (1 << 4) +#define MDF_PEER_OUT_DATED (1 << 5) +#define MDF_CRASHED_PRIMARY (1 << 6) +#define MDF_AL_CLEAN (1 << 7) +#define MDF_AL_DISABLED (1 << 8) + +#define MAX_PEERS 32 + +enum drbd_uuid_index { + UI_CURRENT, + UI_BITMAP, + UI_HISTORY_START, + UI_HISTORY_END, + UI_SIZE, /* nl-packet: number of dirty bits */ + UI_FLAGS, /* nl-packet: flags */ + UI_EXTENDED_SIZE /* Everything. */ +}; + +#define HISTORY_UUIDS MAX_PEERS + +enum drbd_timeout_flag { + UT_DEFAULT = 0, + UT_DEGRADED = 1, + UT_PEER_OUTDATED = 2, +}; + +enum drbd_notification_type { + NOTIFY_EXISTS, + NOTIFY_CREATE, + NOTIFY_CHANGE, + NOTIFY_DESTROY, + NOTIFY_CALL, + NOTIFY_RESPONSE, + + NOTIFY_CONTINUES = 0x8000, + NOTIFY_FLAGS = NOTIFY_CONTINUES, +}; + +enum drbd_peer_state { + P_INCONSISTENT = 3, + P_OUTDATED = 4, + P_DOWN = 5, + P_PRIMARY = 6, + P_FENCING = 7, +}; + +#define UUID_JUST_CREATED ((__u64)4) + +enum write_ordering_e { + WO_NONE, + WO_DRAIN_IO, + WO_BDEV_FLUSH, + WO_BIO_BARRIER +}; + +/* magic numbers used in meta data and network packets */ +#define DRBD_MAGIC 0x83740267 +#define DRBD_MAGIC_BIG 0x835a +#define DRBD_MAGIC_100 0x8620ec20 + +#define DRBD_MD_MAGIC_07 (DRBD_MAGIC+3) +#define DRBD_MD_MAGIC_08 (DRBD_MAGIC+4) +#define DRBD_MD_MAGIC_84_UNCLEAN (DRBD_MAGIC+5) + + +/* how I came up with this magic? + * base64 decode "actlog==" ;) */ +#define DRBD_AL_MAGIC 0x69cb65a2 + +/* these are of type "int" */ +#define DRBD_MD_INDEX_INTERNAL -1 +#define DRBD_MD_INDEX_FLEX_EXT -2 +#define DRBD_MD_INDEX_FLEX_INT -3 + +#define DRBD_CPU_MASK_SIZE 32 + +#endif diff --git a/include/linux/drbd_genl.h b/include/linux/drbd_genl.h new file mode 100644 index 000000000..53f44b8cd --- /dev/null +++ b/include/linux/drbd_genl.h @@ -0,0 +1,536 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * General overview: + * full generic netlink message: + * |nlmsghdr|genlmsghdr| + * + * payload: + * |optional fixed size family header| + * + * sequence of netlink attributes: + * I chose to have all "top level" attributes NLA_NESTED, + * corresponding to some real struct. + * So we have a sequence of |tla, len| + * + * nested nla sequence: + * may be empty, or contain a sequence of netlink attributes + * representing the struct fields. + * + * The tag number of any field (regardless of containing struct) + * will be available as T_ ## field_name, + * so you cannot have the same field name in two differnt structs. + * + * The tag numbers themselves are per struct, though, + * so should always begin at 1 (not 0, that is the special "NLA_UNSPEC" type, + * which we won't use here). + * The tag numbers are used as index in the respective nla_policy array. + * + * GENL_struct(tag_name, tag_number, struct name, struct fields) - struct and policy + * genl_magic_struct.h + * generates the struct declaration, + * generates an entry in the tla enum, + * genl_magic_func.h + * generates an entry in the static tla policy + * with .type = NLA_NESTED + * generates the static _nl_policy definition, + * and static conversion functions + * + * genl_magic_func.h + * + * GENL_mc_group(group) + * genl_magic_struct.h + * does nothing + * genl_magic_func.h + * defines and registers the mcast group, + * and provides a send helper + * + * GENL_notification(op_name, op_num, mcast_group, tla list) + * These are notifications to userspace. + * + * genl_magic_struct.h + * generates an entry in the genl_ops enum, + * genl_magic_func.h + * does nothing + * + * mcast group: the name of the mcast group this notification should be + * expected on + * tla list: the list of expected top level attributes, + * for documentation and sanity checking. + * + * GENL_op(op_name, op_num, flags and handler, tla list) - "genl operations" + * These are requests from userspace. + * + * _op and _notification share the same "number space", + * op_nr will be assigned to "genlmsghdr->cmd" + * + * genl_magic_struct.h + * generates an entry in the genl_ops enum, + * genl_magic_func.h + * generates an entry in the static genl_ops array, + * and static register/unregister functions to + * genl_register_family(). + * + * flags and handler: + * GENL_op_init( .doit = x, .dumpit = y, .flags = something) + * GENL_doit(x) => .dumpit = NULL, .flags = GENL_ADMIN_PERM + * tla list: the list of expected top level attributes, + * for documentation and sanity checking. + */ + +/* + * STRUCTS + */ + +/* this is sent kernel -> userland on various error conditions, and contains + * informational textual info, which is supposedly human readable. + * The computer relevant return code is in the drbd_genlmsghdr. + */ +GENL_struct(DRBD_NLA_CFG_REPLY, 1, drbd_cfg_reply, + /* "arbitrary" size strings, nla_policy.len = 0 */ + __str_field(1, DRBD_GENLA_F_MANDATORY, info_text, 0) +) + +/* Configuration requests typically need a context to operate on. + * Possible keys are device minor (fits in the drbd_genlmsghdr), + * the replication link (aka connection) name, + * and/or the replication group (aka resource) name, + * and the volume id within the resource. */ +GENL_struct(DRBD_NLA_CFG_CONTEXT, 2, drbd_cfg_context, + __u32_field(1, DRBD_GENLA_F_MANDATORY, ctx_volume) + __str_field(2, DRBD_GENLA_F_MANDATORY, ctx_resource_name, 128) + __bin_field(3, DRBD_GENLA_F_MANDATORY, ctx_my_addr, 128) + __bin_field(4, DRBD_GENLA_F_MANDATORY, ctx_peer_addr, 128) +) + +GENL_struct(DRBD_NLA_DISK_CONF, 3, disk_conf, + __str_field(1, DRBD_F_REQUIRED | DRBD_F_INVARIANT, backing_dev, 128) + __str_field(2, DRBD_F_REQUIRED | DRBD_F_INVARIANT, meta_dev, 128) + __s32_field(3, DRBD_F_REQUIRED | DRBD_F_INVARIANT, meta_dev_idx) + + /* use the resize command to try and change the disk_size */ + __u64_field(4, DRBD_GENLA_F_MANDATORY | DRBD_F_INVARIANT, disk_size) + /* we could change the max_bio_bvecs, + * but it won't propagate through the stack */ + __u32_field(5, DRBD_GENLA_F_MANDATORY | DRBD_F_INVARIANT, max_bio_bvecs) + + __u32_field_def(6, DRBD_GENLA_F_MANDATORY, on_io_error, DRBD_ON_IO_ERROR_DEF) + __u32_field_def(7, DRBD_GENLA_F_MANDATORY, fencing, DRBD_FENCING_DEF) + + __u32_field_def(8, DRBD_GENLA_F_MANDATORY, resync_rate, DRBD_RESYNC_RATE_DEF) + __s32_field_def(9, DRBD_GENLA_F_MANDATORY, resync_after, DRBD_MINOR_NUMBER_DEF) + __u32_field_def(10, DRBD_GENLA_F_MANDATORY, al_extents, DRBD_AL_EXTENTS_DEF) + __u32_field_def(11, DRBD_GENLA_F_MANDATORY, c_plan_ahead, DRBD_C_PLAN_AHEAD_DEF) + __u32_field_def(12, DRBD_GENLA_F_MANDATORY, c_delay_target, DRBD_C_DELAY_TARGET_DEF) + __u32_field_def(13, DRBD_GENLA_F_MANDATORY, c_fill_target, DRBD_C_FILL_TARGET_DEF) + __u32_field_def(14, DRBD_GENLA_F_MANDATORY, c_max_rate, DRBD_C_MAX_RATE_DEF) + __u32_field_def(15, DRBD_GENLA_F_MANDATORY, c_min_rate, DRBD_C_MIN_RATE_DEF) + __u32_field_def(20, DRBD_GENLA_F_MANDATORY, disk_timeout, DRBD_DISK_TIMEOUT_DEF) + __u32_field_def(21, 0 /* OPTIONAL */, read_balancing, DRBD_READ_BALANCING_DEF) + __u32_field_def(25, 0 /* OPTIONAL */, rs_discard_granularity, DRBD_RS_DISCARD_GRANULARITY_DEF) + + __flg_field_def(16, DRBD_GENLA_F_MANDATORY, disk_barrier, DRBD_DISK_BARRIER_DEF) + __flg_field_def(17, DRBD_GENLA_F_MANDATORY, disk_flushes, DRBD_DISK_FLUSHES_DEF) + __flg_field_def(18, DRBD_GENLA_F_MANDATORY, disk_drain, DRBD_DISK_DRAIN_DEF) + __flg_field_def(19, DRBD_GENLA_F_MANDATORY, md_flushes, DRBD_MD_FLUSHES_DEF) + __flg_field_def(23, 0 /* OPTIONAL */, al_updates, DRBD_AL_UPDATES_DEF) + __flg_field_def(24, 0 /* OPTIONAL */, discard_zeroes_if_aligned, DRBD_DISCARD_ZEROES_IF_ALIGNED_DEF) + __flg_field_def(26, 0 /* OPTIONAL */, disable_write_same, DRBD_DISABLE_WRITE_SAME_DEF) +) + +GENL_struct(DRBD_NLA_RESOURCE_OPTS, 4, res_opts, + __str_field_def(1, DRBD_GENLA_F_MANDATORY, cpu_mask, DRBD_CPU_MASK_SIZE) + __u32_field_def(2, DRBD_GENLA_F_MANDATORY, on_no_data, DRBD_ON_NO_DATA_DEF) +) + +GENL_struct(DRBD_NLA_NET_CONF, 5, net_conf, + __str_field_def(1, DRBD_GENLA_F_MANDATORY | DRBD_F_SENSITIVE, + shared_secret, SHARED_SECRET_MAX) + __str_field_def(2, DRBD_GENLA_F_MANDATORY, cram_hmac_alg, SHARED_SECRET_MAX) + __str_field_def(3, DRBD_GENLA_F_MANDATORY, integrity_alg, SHARED_SECRET_MAX) + __str_field_def(4, DRBD_GENLA_F_MANDATORY, verify_alg, SHARED_SECRET_MAX) + __str_field_def(5, DRBD_GENLA_F_MANDATORY, csums_alg, SHARED_SECRET_MAX) + __u32_field_def(6, DRBD_GENLA_F_MANDATORY, wire_protocol, DRBD_PROTOCOL_DEF) + __u32_field_def(7, DRBD_GENLA_F_MANDATORY, connect_int, DRBD_CONNECT_INT_DEF) + __u32_field_def(8, DRBD_GENLA_F_MANDATORY, timeout, DRBD_TIMEOUT_DEF) + __u32_field_def(9, DRBD_GENLA_F_MANDATORY, ping_int, DRBD_PING_INT_DEF) + __u32_field_def(10, DRBD_GENLA_F_MANDATORY, ping_timeo, DRBD_PING_TIMEO_DEF) + __u32_field_def(11, DRBD_GENLA_F_MANDATORY, sndbuf_size, DRBD_SNDBUF_SIZE_DEF) + __u32_field_def(12, DRBD_GENLA_F_MANDATORY, rcvbuf_size, DRBD_RCVBUF_SIZE_DEF) + __u32_field_def(13, DRBD_GENLA_F_MANDATORY, ko_count, DRBD_KO_COUNT_DEF) + __u32_field_def(14, DRBD_GENLA_F_MANDATORY, max_buffers, DRBD_MAX_BUFFERS_DEF) + __u32_field_def(15, DRBD_GENLA_F_MANDATORY, max_epoch_size, DRBD_MAX_EPOCH_SIZE_DEF) + __u32_field_def(16, DRBD_GENLA_F_MANDATORY, unplug_watermark, DRBD_UNPLUG_WATERMARK_DEF) + __u32_field_def(17, DRBD_GENLA_F_MANDATORY, after_sb_0p, DRBD_AFTER_SB_0P_DEF) + __u32_field_def(18, DRBD_GENLA_F_MANDATORY, after_sb_1p, DRBD_AFTER_SB_1P_DEF) + __u32_field_def(19, DRBD_GENLA_F_MANDATORY, after_sb_2p, DRBD_AFTER_SB_2P_DEF) + __u32_field_def(20, DRBD_GENLA_F_MANDATORY, rr_conflict, DRBD_RR_CONFLICT_DEF) + __u32_field_def(21, DRBD_GENLA_F_MANDATORY, on_congestion, DRBD_ON_CONGESTION_DEF) + __u32_field_def(22, DRBD_GENLA_F_MANDATORY, cong_fill, DRBD_CONG_FILL_DEF) + __u32_field_def(23, DRBD_GENLA_F_MANDATORY, cong_extents, DRBD_CONG_EXTENTS_DEF) + __flg_field_def(24, DRBD_GENLA_F_MANDATORY, two_primaries, DRBD_ALLOW_TWO_PRIMARIES_DEF) + __flg_field(25, DRBD_GENLA_F_MANDATORY | DRBD_F_INVARIANT, discard_my_data) + __flg_field_def(26, DRBD_GENLA_F_MANDATORY, tcp_cork, DRBD_TCP_CORK_DEF) + __flg_field_def(27, DRBD_GENLA_F_MANDATORY, always_asbp, DRBD_ALWAYS_ASBP_DEF) + __flg_field(28, DRBD_GENLA_F_MANDATORY | DRBD_F_INVARIANT, tentative) + __flg_field_def(29, DRBD_GENLA_F_MANDATORY, use_rle, DRBD_USE_RLE_DEF) + /* 9: __u32_field_def(30, DRBD_GENLA_F_MANDATORY, fencing_policy, DRBD_FENCING_DEF) */ + /* 9: __str_field_def(31, DRBD_GENLA_F_MANDATORY, name, SHARED_SECRET_MAX) */ + /* 9: __u32_field(32, DRBD_F_REQUIRED | DRBD_F_INVARIANT, peer_node_id) */ + __flg_field_def(33, 0 /* OPTIONAL */, csums_after_crash_only, DRBD_CSUMS_AFTER_CRASH_ONLY_DEF) + __u32_field_def(34, 0 /* OPTIONAL */, sock_check_timeo, DRBD_SOCKET_CHECK_TIMEO_DEF) +) + +GENL_struct(DRBD_NLA_SET_ROLE_PARMS, 6, set_role_parms, + __flg_field(1, DRBD_GENLA_F_MANDATORY, assume_uptodate) +) + +GENL_struct(DRBD_NLA_RESIZE_PARMS, 7, resize_parms, + __u64_field(1, DRBD_GENLA_F_MANDATORY, resize_size) + __flg_field(2, DRBD_GENLA_F_MANDATORY, resize_force) + __flg_field(3, DRBD_GENLA_F_MANDATORY, no_resync) + __u32_field_def(4, 0 /* OPTIONAL */, al_stripes, DRBD_AL_STRIPES_DEF) + __u32_field_def(5, 0 /* OPTIONAL */, al_stripe_size, DRBD_AL_STRIPE_SIZE_DEF) +) + +GENL_struct(DRBD_NLA_STATE_INFO, 8, state_info, + /* the reason of the broadcast, + * if this is an event triggered broadcast. */ + __u32_field(1, DRBD_GENLA_F_MANDATORY, sib_reason) + __u32_field(2, DRBD_F_REQUIRED, current_state) + __u64_field(3, DRBD_GENLA_F_MANDATORY, capacity) + __u64_field(4, DRBD_GENLA_F_MANDATORY, ed_uuid) + + /* These are for broadcast from after state change work. + * prev_state and new_state are from the moment the state change took + * place, new_state is not neccessarily the same as current_state, + * there may have been more state changes since. Which will be + * broadcasted soon, in their respective after state change work. */ + __u32_field(5, DRBD_GENLA_F_MANDATORY, prev_state) + __u32_field(6, DRBD_GENLA_F_MANDATORY, new_state) + + /* if we have a local disk: */ + __bin_field(7, DRBD_GENLA_F_MANDATORY, uuids, (UI_SIZE*sizeof(__u64))) + __u32_field(8, DRBD_GENLA_F_MANDATORY, disk_flags) + __u64_field(9, DRBD_GENLA_F_MANDATORY, bits_total) + __u64_field(10, DRBD_GENLA_F_MANDATORY, bits_oos) + /* and in case resync or online verify is active */ + __u64_field(11, DRBD_GENLA_F_MANDATORY, bits_rs_total) + __u64_field(12, DRBD_GENLA_F_MANDATORY, bits_rs_failed) + + /* for pre and post notifications of helper execution */ + __str_field(13, DRBD_GENLA_F_MANDATORY, helper, 32) + __u32_field(14, DRBD_GENLA_F_MANDATORY, helper_exit_code) + + __u64_field(15, 0, send_cnt) + __u64_field(16, 0, recv_cnt) + __u64_field(17, 0, read_cnt) + __u64_field(18, 0, writ_cnt) + __u64_field(19, 0, al_writ_cnt) + __u64_field(20, 0, bm_writ_cnt) + __u32_field(21, 0, ap_bio_cnt) + __u32_field(22, 0, ap_pending_cnt) + __u32_field(23, 0, rs_pending_cnt) +) + +GENL_struct(DRBD_NLA_START_OV_PARMS, 9, start_ov_parms, + __u64_field(1, DRBD_GENLA_F_MANDATORY, ov_start_sector) + __u64_field(2, DRBD_GENLA_F_MANDATORY, ov_stop_sector) +) + +GENL_struct(DRBD_NLA_NEW_C_UUID_PARMS, 10, new_c_uuid_parms, + __flg_field(1, DRBD_GENLA_F_MANDATORY, clear_bm) +) + +GENL_struct(DRBD_NLA_TIMEOUT_PARMS, 11, timeout_parms, + __u32_field(1, DRBD_F_REQUIRED, timeout_type) +) + +GENL_struct(DRBD_NLA_DISCONNECT_PARMS, 12, disconnect_parms, + __flg_field(1, DRBD_GENLA_F_MANDATORY, force_disconnect) +) + +GENL_struct(DRBD_NLA_DETACH_PARMS, 13, detach_parms, + __flg_field(1, DRBD_GENLA_F_MANDATORY, force_detach) +) + +GENL_struct(DRBD_NLA_RESOURCE_INFO, 15, resource_info, + __u32_field(1, 0, res_role) + __flg_field(2, 0, res_susp) + __flg_field(3, 0, res_susp_nod) + __flg_field(4, 0, res_susp_fen) + /* __flg_field(5, 0, res_weak) */ +) + +GENL_struct(DRBD_NLA_DEVICE_INFO, 16, device_info, + __u32_field(1, 0, dev_disk_state) +) + +GENL_struct(DRBD_NLA_CONNECTION_INFO, 17, connection_info, + __u32_field(1, 0, conn_connection_state) + __u32_field(2, 0, conn_role) +) + +GENL_struct(DRBD_NLA_PEER_DEVICE_INFO, 18, peer_device_info, + __u32_field(1, 0, peer_repl_state) + __u32_field(2, 0, peer_disk_state) + __u32_field(3, 0, peer_resync_susp_user) + __u32_field(4, 0, peer_resync_susp_peer) + __u32_field(5, 0, peer_resync_susp_dependency) +) + +GENL_struct(DRBD_NLA_RESOURCE_STATISTICS, 19, resource_statistics, + __u32_field(1, 0, res_stat_write_ordering) +) + +GENL_struct(DRBD_NLA_DEVICE_STATISTICS, 20, device_statistics, + __u64_field(1, 0, dev_size) /* (sectors) */ + __u64_field(2, 0, dev_read) /* (sectors) */ + __u64_field(3, 0, dev_write) /* (sectors) */ + __u64_field(4, 0, dev_al_writes) /* activity log writes (count) */ + __u64_field(5, 0, dev_bm_writes) /* bitmap writes (count) */ + __u32_field(6, 0, dev_upper_pending) /* application requests in progress */ + __u32_field(7, 0, dev_lower_pending) /* backing device requests in progress */ + __flg_field(8, 0, dev_upper_blocked) + __flg_field(9, 0, dev_lower_blocked) + __flg_field(10, 0, dev_al_suspended) /* activity log suspended */ + __u64_field(11, 0, dev_exposed_data_uuid) + __u64_field(12, 0, dev_current_uuid) + __u32_field(13, 0, dev_disk_flags) + __bin_field(14, 0, history_uuids, HISTORY_UUIDS * sizeof(__u64)) +) + +GENL_struct(DRBD_NLA_CONNECTION_STATISTICS, 21, connection_statistics, + __flg_field(1, 0, conn_congested) +) + +GENL_struct(DRBD_NLA_PEER_DEVICE_STATISTICS, 22, peer_device_statistics, + __u64_field(1, 0, peer_dev_received) /* sectors */ + __u64_field(2, 0, peer_dev_sent) /* sectors */ + __u32_field(3, 0, peer_dev_pending) /* number of requests */ + __u32_field(4, 0, peer_dev_unacked) /* number of requests */ + __u64_field(5, 0, peer_dev_out_of_sync) /* sectors */ + __u64_field(6, 0, peer_dev_resync_failed) /* sectors */ + __u64_field(7, 0, peer_dev_bitmap_uuid) + __u32_field(9, 0, peer_dev_flags) +) + +GENL_struct(DRBD_NLA_NOTIFICATION_HEADER, 23, drbd_notification_header, + __u32_field(1, DRBD_GENLA_F_MANDATORY, nh_type) +) + +GENL_struct(DRBD_NLA_HELPER, 24, drbd_helper_info, + __str_field(1, DRBD_GENLA_F_MANDATORY, helper_name, 32) + __u32_field(2, DRBD_GENLA_F_MANDATORY, helper_status) +) + +/* + * Notifications and commands (genlmsghdr->cmd) + */ +GENL_mc_group(events) + + /* kernel -> userspace announcement of changes */ +GENL_notification( + DRBD_EVENT, 1, events, + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED) + GENL_tla_expected(DRBD_NLA_STATE_INFO, DRBD_F_REQUIRED) + GENL_tla_expected(DRBD_NLA_NET_CONF, DRBD_GENLA_F_MANDATORY) + GENL_tla_expected(DRBD_NLA_DISK_CONF, DRBD_GENLA_F_MANDATORY) + GENL_tla_expected(DRBD_NLA_SYNCER_CONF, DRBD_GENLA_F_MANDATORY) +) + + /* query kernel for specific or all info */ +GENL_op( + DRBD_ADM_GET_STATUS, 2, + GENL_op_init( + .doit = drbd_adm_get_status, + .dumpit = drbd_adm_get_status_all, + /* anyone may ask for the status, + * it is broadcasted anyways */ + ), + /* To select the object .doit. + * Or a subset of objects in .dumpit. */ + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_GENLA_F_MANDATORY) +) + + /* add DRBD minor devices as volumes to resources */ +GENL_op(DRBD_ADM_NEW_MINOR, 5, GENL_doit(drbd_adm_new_minor), + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)) +GENL_op(DRBD_ADM_DEL_MINOR, 6, GENL_doit(drbd_adm_del_minor), + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)) + + /* add or delete resources */ +GENL_op(DRBD_ADM_NEW_RESOURCE, 7, GENL_doit(drbd_adm_new_resource), + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)) +GENL_op(DRBD_ADM_DEL_RESOURCE, 8, GENL_doit(drbd_adm_del_resource), + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)) + +GENL_op(DRBD_ADM_RESOURCE_OPTS, 9, + GENL_doit(drbd_adm_resource_opts), + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED) + GENL_tla_expected(DRBD_NLA_RESOURCE_OPTS, DRBD_GENLA_F_MANDATORY) +) + +GENL_op( + DRBD_ADM_CONNECT, 10, + GENL_doit(drbd_adm_connect), + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED) + GENL_tla_expected(DRBD_NLA_NET_CONF, DRBD_F_REQUIRED) +) + +GENL_op( + DRBD_ADM_CHG_NET_OPTS, 29, + GENL_doit(drbd_adm_net_opts), + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED) + GENL_tla_expected(DRBD_NLA_NET_CONF, DRBD_F_REQUIRED) +) + +GENL_op(DRBD_ADM_DISCONNECT, 11, GENL_doit(drbd_adm_disconnect), + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)) + +GENL_op(DRBD_ADM_ATTACH, 12, + GENL_doit(drbd_adm_attach), + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED) + GENL_tla_expected(DRBD_NLA_DISK_CONF, DRBD_F_REQUIRED) +) + +GENL_op(DRBD_ADM_CHG_DISK_OPTS, 28, + GENL_doit(drbd_adm_disk_opts), + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED) + GENL_tla_expected(DRBD_NLA_DISK_OPTS, DRBD_F_REQUIRED) +) + +GENL_op( + DRBD_ADM_RESIZE, 13, + GENL_doit(drbd_adm_resize), + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED) + GENL_tla_expected(DRBD_NLA_RESIZE_PARMS, DRBD_GENLA_F_MANDATORY) +) + +GENL_op( + DRBD_ADM_PRIMARY, 14, + GENL_doit(drbd_adm_set_role), + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED) + GENL_tla_expected(DRBD_NLA_SET_ROLE_PARMS, DRBD_F_REQUIRED) +) + +GENL_op( + DRBD_ADM_SECONDARY, 15, + GENL_doit(drbd_adm_set_role), + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED) + GENL_tla_expected(DRBD_NLA_SET_ROLE_PARMS, DRBD_F_REQUIRED) +) + +GENL_op( + DRBD_ADM_NEW_C_UUID, 16, + GENL_doit(drbd_adm_new_c_uuid), + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED) + GENL_tla_expected(DRBD_NLA_NEW_C_UUID_PARMS, DRBD_GENLA_F_MANDATORY) +) + +GENL_op( + DRBD_ADM_START_OV, 17, + GENL_doit(drbd_adm_start_ov), + GENL_tla_expected(DRBD_NLA_START_OV_PARMS, DRBD_GENLA_F_MANDATORY) +) + +GENL_op(DRBD_ADM_DETACH, 18, GENL_doit(drbd_adm_detach), + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED) + GENL_tla_expected(DRBD_NLA_DETACH_PARMS, DRBD_GENLA_F_MANDATORY)) + +GENL_op(DRBD_ADM_INVALIDATE, 19, GENL_doit(drbd_adm_invalidate), + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)) +GENL_op(DRBD_ADM_INVAL_PEER, 20, GENL_doit(drbd_adm_invalidate_peer), + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)) +GENL_op(DRBD_ADM_PAUSE_SYNC, 21, GENL_doit(drbd_adm_pause_sync), + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)) +GENL_op(DRBD_ADM_RESUME_SYNC, 22, GENL_doit(drbd_adm_resume_sync), + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)) +GENL_op(DRBD_ADM_SUSPEND_IO, 23, GENL_doit(drbd_adm_suspend_io), + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)) +GENL_op(DRBD_ADM_RESUME_IO, 24, GENL_doit(drbd_adm_resume_io), + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)) +GENL_op(DRBD_ADM_OUTDATE, 25, GENL_doit(drbd_adm_outdate), + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)) +GENL_op(DRBD_ADM_GET_TIMEOUT_TYPE, 26, GENL_doit(drbd_adm_get_timeout_type), + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)) +GENL_op(DRBD_ADM_DOWN, 27, GENL_doit(drbd_adm_down), + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)) + +GENL_op(DRBD_ADM_GET_RESOURCES, 30, + GENL_op_init( + .dumpit = drbd_adm_dump_resources, + ), + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_GENLA_F_MANDATORY) + GENL_tla_expected(DRBD_NLA_RESOURCE_INFO, DRBD_GENLA_F_MANDATORY) + GENL_tla_expected(DRBD_NLA_RESOURCE_STATISTICS, DRBD_GENLA_F_MANDATORY)) + +GENL_op(DRBD_ADM_GET_DEVICES, 31, + GENL_op_init( + .dumpit = drbd_adm_dump_devices, + .done = drbd_adm_dump_devices_done, + ), + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_GENLA_F_MANDATORY) + GENL_tla_expected(DRBD_NLA_DEVICE_INFO, DRBD_GENLA_F_MANDATORY) + GENL_tla_expected(DRBD_NLA_DEVICE_STATISTICS, DRBD_GENLA_F_MANDATORY)) + +GENL_op(DRBD_ADM_GET_CONNECTIONS, 32, + GENL_op_init( + .dumpit = drbd_adm_dump_connections, + .done = drbd_adm_dump_connections_done, + ), + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_GENLA_F_MANDATORY) + GENL_tla_expected(DRBD_NLA_CONNECTION_INFO, DRBD_GENLA_F_MANDATORY) + GENL_tla_expected(DRBD_NLA_CONNECTION_STATISTICS, DRBD_GENLA_F_MANDATORY)) + +GENL_op(DRBD_ADM_GET_PEER_DEVICES, 33, + GENL_op_init( + .dumpit = drbd_adm_dump_peer_devices, + .done = drbd_adm_dump_peer_devices_done, + ), + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_GENLA_F_MANDATORY) + GENL_tla_expected(DRBD_NLA_PEER_DEVICE_INFO, DRBD_GENLA_F_MANDATORY) + GENL_tla_expected(DRBD_NLA_PEER_DEVICE_STATISTICS, DRBD_GENLA_F_MANDATORY)) + +GENL_notification( + DRBD_RESOURCE_STATE, 34, events, + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED) + GENL_tla_expected(DRBD_NLA_NOTIFICATION_HEADER, DRBD_F_REQUIRED) + GENL_tla_expected(DRBD_NLA_RESOURCE_INFO, DRBD_F_REQUIRED) + GENL_tla_expected(DRBD_NLA_RESOURCE_STATISTICS, DRBD_F_REQUIRED)) + +GENL_notification( + DRBD_DEVICE_STATE, 35, events, + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED) + GENL_tla_expected(DRBD_NLA_NOTIFICATION_HEADER, DRBD_F_REQUIRED) + GENL_tla_expected(DRBD_NLA_DEVICE_INFO, DRBD_F_REQUIRED) + GENL_tla_expected(DRBD_NLA_DEVICE_STATISTICS, DRBD_F_REQUIRED)) + +GENL_notification( + DRBD_CONNECTION_STATE, 36, events, + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED) + GENL_tla_expected(DRBD_NLA_NOTIFICATION_HEADER, DRBD_F_REQUIRED) + GENL_tla_expected(DRBD_NLA_CONNECTION_INFO, DRBD_F_REQUIRED) + GENL_tla_expected(DRBD_NLA_CONNECTION_STATISTICS, DRBD_F_REQUIRED)) + +GENL_notification( + DRBD_PEER_DEVICE_STATE, 37, events, + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED) + GENL_tla_expected(DRBD_NLA_NOTIFICATION_HEADER, DRBD_F_REQUIRED) + GENL_tla_expected(DRBD_NLA_PEER_DEVICE_INFO, DRBD_F_REQUIRED) + GENL_tla_expected(DRBD_NLA_PEER_DEVICE_STATISTICS, DRBD_F_REQUIRED)) + +GENL_op( + DRBD_ADM_GET_INITIAL_STATE, 38, + GENL_op_init( + .dumpit = drbd_adm_get_initial_state, + ), + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_GENLA_F_MANDATORY)) + +GENL_notification( + DRBD_HELPER, 40, events, + GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED) + GENL_tla_expected(DRBD_NLA_HELPER, DRBD_F_REQUIRED)) + +GENL_notification( + DRBD_INITIAL_STATE_DONE, 41, events, + GENL_tla_expected(DRBD_NLA_NOTIFICATION_HEADER, DRBD_F_REQUIRED)) diff --git a/include/linux/drbd_genl_api.h b/include/linux/drbd_genl_api.h new file mode 100644 index 000000000..bd62efc29 --- /dev/null +++ b/include/linux/drbd_genl_api.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef DRBD_GENL_STRUCT_H +#define DRBD_GENL_STRUCT_H + +/** + * struct drbd_genlmsghdr - DRBD specific header used in NETLINK_GENERIC requests + * @minor: + * For admin requests (user -> kernel): which minor device to operate on. + * For (unicast) replies or informational (broadcast) messages + * (kernel -> user): which minor device the information is about. + * If we do not operate on minors, but on connections or resources, + * the minor value shall be (~0), and the attribute DRBD_NLA_CFG_CONTEXT + * is used instead. + * @flags: possible operation modifiers (relevant only for user->kernel): + * DRBD_GENL_F_SET_DEFAULTS + * @volume: + * When creating a new minor (adding it to a resource), the resource needs + * to know which volume number within the resource this is supposed to be. + * The volume number corresponds to the same volume number on the remote side, + * whereas the minor number on the remote side may be different + * (union with flags). + * @ret_code: kernel->userland unicast cfg reply return code (union with flags); + */ +struct drbd_genlmsghdr { + __u32 minor; + union { + __u32 flags; + __s32 ret_code; + }; +}; + +/* To be used in drbd_genlmsghdr.flags */ +enum { + DRBD_GENL_F_SET_DEFAULTS = 1, +}; + +enum drbd_state_info_bcast_reason { + SIB_GET_STATUS_REPLY = 1, + SIB_STATE_CHANGE = 2, + SIB_HELPER_PRE = 3, + SIB_HELPER_POST = 4, + SIB_SYNC_PROGRESS = 5, +}; + +/* hack around predefined gcc/cpp "linux=1", + * we cannot possibly include <1/drbd_genl.h> */ +#undef linux + +#include +#define GENL_MAGIC_VERSION API_VERSION +#define GENL_MAGIC_FAMILY drbd +#define GENL_MAGIC_FAMILY_HDRSZ sizeof(struct drbd_genlmsghdr) +#define GENL_MAGIC_INCLUDE_FILE +#include + +#endif diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h new file mode 100644 index 000000000..9e33f7038 --- /dev/null +++ b/include/linux/drbd_limits.h @@ -0,0 +1,251 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + drbd_limits.h + This file is part of DRBD by Philipp Reisner and Lars Ellenberg. +*/ + +/* + * Our current limitations. + * Some of them are hard limits, + * some of them are arbitrary range limits, that make it easier to provide + * feedback about nonsense settings for certain configurable values. + */ + +#ifndef DRBD_LIMITS_H +#define DRBD_LIMITS_H 1 + +#define DEBUG_RANGE_CHECK 0 + +#define DRBD_MINOR_COUNT_MIN 1 +#define DRBD_MINOR_COUNT_MAX 255 +#define DRBD_MINOR_COUNT_DEF 32 +#define DRBD_MINOR_COUNT_SCALE '1' + +#define DRBD_VOLUME_MAX 65535 + +#define DRBD_DIALOG_REFRESH_MIN 0 +#define DRBD_DIALOG_REFRESH_MAX 600 +#define DRBD_DIALOG_REFRESH_SCALE '1' + +/* valid port number */ +#define DRBD_PORT_MIN 1 +#define DRBD_PORT_MAX 0xffff +#define DRBD_PORT_SCALE '1' + +/* startup { */ + /* if you want more than 3.4 days, disable */ +#define DRBD_WFC_TIMEOUT_MIN 0 +#define DRBD_WFC_TIMEOUT_MAX 300000 +#define DRBD_WFC_TIMEOUT_DEF 0 +#define DRBD_WFC_TIMEOUT_SCALE '1' + +#define DRBD_DEGR_WFC_TIMEOUT_MIN 0 +#define DRBD_DEGR_WFC_TIMEOUT_MAX 300000 +#define DRBD_DEGR_WFC_TIMEOUT_DEF 0 +#define DRBD_DEGR_WFC_TIMEOUT_SCALE '1' + +#define DRBD_OUTDATED_WFC_TIMEOUT_MIN 0 +#define DRBD_OUTDATED_WFC_TIMEOUT_MAX 300000 +#define DRBD_OUTDATED_WFC_TIMEOUT_DEF 0 +#define DRBD_OUTDATED_WFC_TIMEOUT_SCALE '1' +/* }*/ + +/* net { */ + /* timeout, unit centi seconds + * more than one minute timeout is not useful */ +#define DRBD_TIMEOUT_MIN 1 +#define DRBD_TIMEOUT_MAX 600 +#define DRBD_TIMEOUT_DEF 60 /* 6 seconds */ +#define DRBD_TIMEOUT_SCALE '1' + + /* If backing disk takes longer than disk_timeout, mark the disk as failed */ +#define DRBD_DISK_TIMEOUT_MIN 0 /* 0 = disabled */ +#define DRBD_DISK_TIMEOUT_MAX 6000 /* 10 Minutes */ +#define DRBD_DISK_TIMEOUT_DEF 0 /* disabled */ +#define DRBD_DISK_TIMEOUT_SCALE '1' + + /* active connection retries when C_WF_CONNECTION */ +#define DRBD_CONNECT_INT_MIN 1 +#define DRBD_CONNECT_INT_MAX 120 +#define DRBD_CONNECT_INT_DEF 10 /* seconds */ +#define DRBD_CONNECT_INT_SCALE '1' + + /* keep-alive probes when idle */ +#define DRBD_PING_INT_MIN 1 +#define DRBD_PING_INT_MAX 120 +#define DRBD_PING_INT_DEF 10 +#define DRBD_PING_INT_SCALE '1' + + /* timeout for the ping packets.*/ +#define DRBD_PING_TIMEO_MIN 1 +#define DRBD_PING_TIMEO_MAX 300 +#define DRBD_PING_TIMEO_DEF 5 +#define DRBD_PING_TIMEO_SCALE '1' + + /* max number of write requests between write barriers */ +#define DRBD_MAX_EPOCH_SIZE_MIN 1 +#define DRBD_MAX_EPOCH_SIZE_MAX 20000 +#define DRBD_MAX_EPOCH_SIZE_DEF 2048 +#define DRBD_MAX_EPOCH_SIZE_SCALE '1' + + /* I don't think that a tcp send buffer of more than 10M is useful */ +#define DRBD_SNDBUF_SIZE_MIN 0 +#define DRBD_SNDBUF_SIZE_MAX (10<<20) +#define DRBD_SNDBUF_SIZE_DEF 0 +#define DRBD_SNDBUF_SIZE_SCALE '1' + +#define DRBD_RCVBUF_SIZE_MIN 0 +#define DRBD_RCVBUF_SIZE_MAX (10<<20) +#define DRBD_RCVBUF_SIZE_DEF 0 +#define DRBD_RCVBUF_SIZE_SCALE '1' + + /* @4k PageSize -> 128kB - 512MB */ +#define DRBD_MAX_BUFFERS_MIN 32 +#define DRBD_MAX_BUFFERS_MAX 131072 +#define DRBD_MAX_BUFFERS_DEF 2048 +#define DRBD_MAX_BUFFERS_SCALE '1' + + /* @4k PageSize -> 4kB - 512MB */ +#define DRBD_UNPLUG_WATERMARK_MIN 1 +#define DRBD_UNPLUG_WATERMARK_MAX 131072 +#define DRBD_UNPLUG_WATERMARK_DEF (DRBD_MAX_BUFFERS_DEF/16) +#define DRBD_UNPLUG_WATERMARK_SCALE '1' + + /* 0 is disabled. + * 200 should be more than enough even for very short timeouts */ +#define DRBD_KO_COUNT_MIN 0 +#define DRBD_KO_COUNT_MAX 200 +#define DRBD_KO_COUNT_DEF 7 +#define DRBD_KO_COUNT_SCALE '1' +/* } */ + +/* syncer { */ + /* FIXME allow rate to be zero? */ +#define DRBD_RESYNC_RATE_MIN 1 +/* channel bonding 10 GbE, or other hardware */ +#define DRBD_RESYNC_RATE_MAX (4 << 20) +#define DRBD_RESYNC_RATE_DEF 250 +#define DRBD_RESYNC_RATE_SCALE 'k' /* kilobytes */ + +#define DRBD_AL_EXTENTS_MIN 67 + /* we use u16 as "slot number", (u16)~0 is "FREE". + * If you use >= 292 kB on-disk ring buffer, + * this is the maximum you can use: */ +#define DRBD_AL_EXTENTS_MAX 0xfffe +#define DRBD_AL_EXTENTS_DEF 1237 +#define DRBD_AL_EXTENTS_SCALE '1' + +#define DRBD_MINOR_NUMBER_MIN -1 +#define DRBD_MINOR_NUMBER_MAX ((1 << 20) - 1) +#define DRBD_MINOR_NUMBER_DEF -1 +#define DRBD_MINOR_NUMBER_SCALE '1' + +/* } */ + +/* drbdsetup XY resize -d Z + * you are free to reduce the device size to nothing, if you want to. + * the upper limit with 64bit kernel, enough ram and flexible meta data + * is 1 PiB, currently. */ +/* DRBD_MAX_SECTORS */ +#define DRBD_DISK_SIZE_MIN 0 +#define DRBD_DISK_SIZE_MAX (1 * (2LLU << 40)) +#define DRBD_DISK_SIZE_DEF 0 /* = disabled = no user size... */ +#define DRBD_DISK_SIZE_SCALE 's' /* sectors */ + +#define DRBD_ON_IO_ERROR_DEF EP_DETACH +#define DRBD_FENCING_DEF FP_DONT_CARE +#define DRBD_AFTER_SB_0P_DEF ASB_DISCONNECT +#define DRBD_AFTER_SB_1P_DEF ASB_DISCONNECT +#define DRBD_AFTER_SB_2P_DEF ASB_DISCONNECT +#define DRBD_RR_CONFLICT_DEF ASB_DISCONNECT +#define DRBD_ON_NO_DATA_DEF OND_IO_ERROR +#define DRBD_ON_CONGESTION_DEF OC_BLOCK +#define DRBD_READ_BALANCING_DEF RB_PREFER_LOCAL + +#define DRBD_MAX_BIO_BVECS_MIN 0 +#define DRBD_MAX_BIO_BVECS_MAX 128 +#define DRBD_MAX_BIO_BVECS_DEF 0 +#define DRBD_MAX_BIO_BVECS_SCALE '1' + +#define DRBD_C_PLAN_AHEAD_MIN 0 +#define DRBD_C_PLAN_AHEAD_MAX 300 +#define DRBD_C_PLAN_AHEAD_DEF 20 +#define DRBD_C_PLAN_AHEAD_SCALE '1' + +#define DRBD_C_DELAY_TARGET_MIN 1 +#define DRBD_C_DELAY_TARGET_MAX 100 +#define DRBD_C_DELAY_TARGET_DEF 10 +#define DRBD_C_DELAY_TARGET_SCALE '1' + +#define DRBD_C_FILL_TARGET_MIN 0 +#define DRBD_C_FILL_TARGET_MAX (1<<20) /* 500MByte in sec */ +#define DRBD_C_FILL_TARGET_DEF 100 /* Try to place 50KiB in socket send buffer during resync */ +#define DRBD_C_FILL_TARGET_SCALE 's' /* sectors */ + +#define DRBD_C_MAX_RATE_MIN 250 +#define DRBD_C_MAX_RATE_MAX (4 << 20) +#define DRBD_C_MAX_RATE_DEF 102400 +#define DRBD_C_MAX_RATE_SCALE 'k' /* kilobytes */ + +#define DRBD_C_MIN_RATE_MIN 0 +#define DRBD_C_MIN_RATE_MAX (4 << 20) +#define DRBD_C_MIN_RATE_DEF 250 +#define DRBD_C_MIN_RATE_SCALE 'k' /* kilobytes */ + +#define DRBD_CONG_FILL_MIN 0 +#define DRBD_CONG_FILL_MAX (10<<21) /* 10GByte in sectors */ +#define DRBD_CONG_FILL_DEF 0 +#define DRBD_CONG_FILL_SCALE 's' /* sectors */ + +#define DRBD_CONG_EXTENTS_MIN DRBD_AL_EXTENTS_MIN +#define DRBD_CONG_EXTENTS_MAX DRBD_AL_EXTENTS_MAX +#define DRBD_CONG_EXTENTS_DEF DRBD_AL_EXTENTS_DEF +#define DRBD_CONG_EXTENTS_SCALE DRBD_AL_EXTENTS_SCALE + +#define DRBD_PROTOCOL_DEF DRBD_PROT_C + +#define DRBD_DISK_BARRIER_DEF 0 +#define DRBD_DISK_FLUSHES_DEF 1 +#define DRBD_DISK_DRAIN_DEF 1 +#define DRBD_MD_FLUSHES_DEF 1 +#define DRBD_TCP_CORK_DEF 1 +#define DRBD_AL_UPDATES_DEF 1 + +/* We used to ignore the discard_zeroes_data setting. + * To not change established (and expected) behaviour, + * by default assume that, for discard_zeroes_data=0, + * we can make that an effective discard_zeroes_data=1, + * if we only explicitly zero-out unaligned partial chunks. */ +#define DRBD_DISCARD_ZEROES_IF_ALIGNED_DEF 1 + +/* Some backends pretend to support WRITE SAME, + * but fail such requests when they are actually submitted. + * This is to tell DRBD to not even try. */ +#define DRBD_DISABLE_WRITE_SAME_DEF 0 + +#define DRBD_ALLOW_TWO_PRIMARIES_DEF 0 +#define DRBD_ALWAYS_ASBP_DEF 0 +#define DRBD_USE_RLE_DEF 1 +#define DRBD_CSUMS_AFTER_CRASH_ONLY_DEF 0 + +#define DRBD_AL_STRIPES_MIN 1 +#define DRBD_AL_STRIPES_MAX 1024 +#define DRBD_AL_STRIPES_DEF 1 +#define DRBD_AL_STRIPES_SCALE '1' + +#define DRBD_AL_STRIPE_SIZE_MIN 4 +#define DRBD_AL_STRIPE_SIZE_MAX 16777216 +#define DRBD_AL_STRIPE_SIZE_DEF 32 +#define DRBD_AL_STRIPE_SIZE_SCALE 'k' /* kilobytes */ + +#define DRBD_SOCKET_CHECK_TIMEO_MIN 0 +#define DRBD_SOCKET_CHECK_TIMEO_MAX DRBD_PING_TIMEO_MAX +#define DRBD_SOCKET_CHECK_TIMEO_DEF 0 +#define DRBD_SOCKET_CHECK_TIMEO_SCALE '1' + +#define DRBD_RS_DISCARD_GRANULARITY_MIN 0 +#define DRBD_RS_DISCARD_GRANULARITY_MAX (1<<20) /* 1MiByte */ +#define DRBD_RS_DISCARD_GRANULARITY_DEF 0 /* disabled by default */ +#define DRBD_RS_DISCARD_GRANULARITY_SCALE '1' /* bytes */ + +#endif diff --git a/include/linux/ds2782_battery.h b/include/linux/ds2782_battery.h new file mode 100644 index 000000000..fb6c97e10 --- /dev/null +++ b/include/linux/ds2782_battery.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_DS2782_BATTERY_H +#define __LINUX_DS2782_BATTERY_H + +struct ds278x_platform_data { + int rsns; +}; + +#endif diff --git a/include/linux/dsa/lan9303.h b/include/linux/dsa/lan9303.h new file mode 100644 index 000000000..b4f22112b --- /dev/null +++ b/include/linux/dsa/lan9303.h @@ -0,0 +1,39 @@ +/* Included by drivers/net/dsa/lan9303.h and net/dsa/tag_lan9303.c */ +#include + +struct lan9303; + +struct lan9303_phy_ops { + /* PHY 1 and 2 access*/ + int (*phy_read)(struct lan9303 *chip, int port, int regnum); + int (*phy_write)(struct lan9303 *chip, int port, + int regnum, u16 val); +}; + +#define LAN9303_NUM_ALR_RECORDS 512 +struct lan9303_alr_cache_entry { + u8 mac_addr[ETH_ALEN]; + u8 port_map; /* Bitmap of ports. Zero if unused entry */ + u8 stp_override; /* non zero if set LAN9303_ALR_DAT1_AGE_OVERRID */ +}; + +struct lan9303 { + struct device *dev; + struct regmap *regmap; + struct regmap_irq_chip_data *irq_data; + struct gpio_desc *reset_gpio; + u32 reset_duration; /* in [ms] */ + int phy_addr_base; + struct dsa_switch *ds; + struct mutex indirect_mutex; /* protect indexed register access */ + struct mutex alr_mutex; /* protect ALR access */ + const struct lan9303_phy_ops *ops; + bool is_bridged; /* true if port 1 and 2 are bridged */ + + /* remember LAN9303_SWE_PORT_STATE while not bridged */ + u32 swe_port_state; + /* LAN9303 do not offer reading specific ALR entry. Cache all + * static entries in a flat table + **/ + struct lan9303_alr_cache_entry alr_cache[LAN9303_NUM_ALR_RECORDS]; +}; diff --git a/include/linux/dtlk.h b/include/linux/dtlk.h new file mode 100644 index 000000000..27b95e70b --- /dev/null +++ b/include/linux/dtlk.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#define DTLK_MINOR 0 +#define DTLK_IO_EXTENT 0x02 + + /* ioctl's use magic number of 0xa3 */ +#define DTLK_INTERROGATE 0xa390 /* get settings from the DoubleTalk */ +#define DTLK_STATUS 0xa391 /* get status from the DoubleTalk */ + + +#define DTLK_CLEAR 0x18 /* stops speech */ + +#define DTLK_MAX_RETRIES (loops_per_jiffy/(10000/HZ)) + + /* TTS Port Status Flags */ +#define TTS_READABLE 0x80 /* mask for bit which is nonzero if a + byte can be read from the TTS port */ +#define TTS_SPEAKING 0x40 /* mask for SYNC bit, which is nonzero + while DoubleTalk is producing + output with TTS, PCM or CVSD + synthesizers or tone generators + (that is, all but LPC) */ +#define TTS_SPEAKING2 0x20 /* mask for SYNC2 bit, + which falls to zero up to 0.4 sec + before speech stops */ +#define TTS_WRITABLE 0x10 /* mask for RDY bit, which when set to + 1, indicates the TTS port is ready + to accept a byte of data. The RDY + bit goes zero 2-3 usec after + writing, and goes 1 again 180-190 + usec later. */ +#define TTS_ALMOST_FULL 0x08 /* mask for AF bit: When set to 1, + indicates that less than 300 free + bytes are available in the TTS + input buffer. AF is always 0 in the + PCM, TGN and CVSD modes. */ +#define TTS_ALMOST_EMPTY 0x04 /* mask for AE bit: When set to 1, + indicates that less than 300 bytes + of data remain in DoubleTalk's + input (TTS or PCM) buffer. AE is + always 1 in the TGN and CVSD + modes. */ + + /* LPC speak commands */ +#define LPC_5220_NORMAL 0x60 /* 5220 format decoding table, normal rate */ +#define LPC_5220_FAST 0x64 /* 5220 format decoding table, fast rate */ +#define LPC_D6_NORMAL 0x20 /* D6 format decoding table, normal rate */ +#define LPC_D6_FAST 0x24 /* D6 format decoding table, fast rate */ + + /* LPC Port Status Flags (valid only after one of the LPC + speak commands) */ +#define LPC_SPEAKING 0x80 /* mask for TS bit: When set to 1, + indicates the LPC synthesizer is + producing speech.*/ +#define LPC_BUFFER_LOW 0x40 /* mask for BL bit: When set to 1, + indicates that the hardware LPC + data buffer has less than 30 bytes + remaining. (Total internal buffer + size = 4096 bytes.) */ +#define LPC_BUFFER_EMPTY 0x20 /* mask for BE bit: When set to 1, + indicates that the LPC data buffer + ran out of data (error condition if + TS is also 1). */ + + /* data returned by Interrogate command */ +struct dtlk_settings +{ + unsigned short serial_number; /* 0-7Fh:0-7Fh */ + unsigned char rom_version[24]; /* null terminated string */ + unsigned char mode; /* 0=Character; 1=Phoneme; 2=Text */ + unsigned char punc_level; /* nB; 0-7 */ + unsigned char formant_freq; /* nF; 0-9 */ + unsigned char pitch; /* nP; 0-99 */ + unsigned char speed; /* nS; 0-9 */ + unsigned char volume; /* nV; 0-9 */ + unsigned char tone; /* nX; 0-2 */ + unsigned char expression; /* nE; 0-9 */ + unsigned char ext_dict_loaded; /* 1=exception dictionary loaded */ + unsigned char ext_dict_status; /* 1=exception dictionary enabled */ + unsigned char free_ram; /* # pages (truncated) remaining for + text buffer */ + unsigned char articulation; /* nA; 0-9 */ + unsigned char reverb; /* nR; 0-9 */ + unsigned char eob; /* 7Fh value indicating end of + parameter block */ + unsigned char has_indexing; /* nonzero if indexing is implemented */ +}; diff --git a/include/linux/dw_apb_timer.h b/include/linux/dw_apb_timer.h new file mode 100644 index 000000000..4334106f4 --- /dev/null +++ b/include/linux/dw_apb_timer.h @@ -0,0 +1,55 @@ +/* + * (C) Copyright 2009 Intel Corporation + * Author: Jacob Pan (jacob.jun.pan@intel.com) + * + * Shared with ARM platforms, Jamie Iles, Picochip 2011 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Support for the Synopsys DesignWare APB Timers. + */ +#ifndef __DW_APB_TIMER_H__ +#define __DW_APB_TIMER_H__ + +#include +#include +#include + +#define APBTMRS_REG_SIZE 0x14 + +struct dw_apb_timer { + void __iomem *base; + unsigned long freq; + int irq; +}; + +struct dw_apb_clock_event_device { + struct clock_event_device ced; + struct dw_apb_timer timer; + struct irqaction irqaction; + void (*eoi)(struct dw_apb_timer *); +}; + +struct dw_apb_clocksource { + struct dw_apb_timer timer; + struct clocksource cs; +}; + +void dw_apb_clockevent_register(struct dw_apb_clock_event_device *dw_ced); +void dw_apb_clockevent_pause(struct dw_apb_clock_event_device *dw_ced); +void dw_apb_clockevent_resume(struct dw_apb_clock_event_device *dw_ced); +void dw_apb_clockevent_stop(struct dw_apb_clock_event_device *dw_ced); + +struct dw_apb_clock_event_device * +dw_apb_clockevent_init(int cpu, const char *name, unsigned rating, + void __iomem *base, int irq, unsigned long freq); +struct dw_apb_clocksource * +dw_apb_clocksource_init(unsigned rating, const char *name, void __iomem *base, + unsigned long freq); +void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs); +void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs); +u64 dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs); + +#endif /* __DW_APB_TIMER_H__ */ diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h new file mode 100644 index 000000000..b3419da1a --- /dev/null +++ b/include/linux/dynamic_debug.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DYNAMIC_DEBUG_H +#define _DYNAMIC_DEBUG_H + +#if defined(CONFIG_JUMP_LABEL) +#include +#endif + +/* + * An instance of this structure is created in a special + * ELF section at every dynamic debug callsite. At runtime, + * the special section is treated as an array of these. + */ +struct _ddebug { + /* + * These fields are used to drive the user interface + * for selecting and displaying debug callsites. + */ + const char *modname; + const char *function; + const char *filename; + const char *format; + unsigned int lineno:18; + /* + * The flags field controls the behaviour at the callsite. + * The bits here are changed dynamically when the user + * writes commands to /dynamic_debug/control + */ +#define _DPRINTK_FLAGS_NONE 0 +#define _DPRINTK_FLAGS_PRINT (1<<0) /* printk() a message using the format */ +#define _DPRINTK_FLAGS_INCL_MODNAME (1<<1) +#define _DPRINTK_FLAGS_INCL_FUNCNAME (1<<2) +#define _DPRINTK_FLAGS_INCL_LINENO (1<<3) +#define _DPRINTK_FLAGS_INCL_TID (1<<4) +#if defined DEBUG +#define _DPRINTK_FLAGS_DEFAULT _DPRINTK_FLAGS_PRINT +#else +#define _DPRINTK_FLAGS_DEFAULT 0 +#endif + unsigned int flags:8; +#ifdef CONFIG_JUMP_LABEL + union { + struct static_key_true dd_key_true; + struct static_key_false dd_key_false; + } key; +#endif +} __attribute__((aligned(8))); + + +int ddebug_add_module(struct _ddebug *tab, unsigned int n, + const char *modname); + +#if defined(CONFIG_DYNAMIC_DEBUG) +extern int ddebug_remove_module(const char *mod_name); +extern __printf(2, 3) +void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...); + +extern int ddebug_dyndbg_module_param_cb(char *param, char *val, + const char *modname); + +struct device; + +extern __printf(3, 4) +void __dynamic_dev_dbg(struct _ddebug *descriptor, const struct device *dev, + const char *fmt, ...); + +struct net_device; + +extern __printf(3, 4) +void __dynamic_netdev_dbg(struct _ddebug *descriptor, + const struct net_device *dev, + const char *fmt, ...); + +#define DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, key, init) \ + static struct _ddebug __aligned(8) \ + __attribute__((section("__verbose"))) name = { \ + .modname = KBUILD_MODNAME, \ + .function = __func__, \ + .filename = __FILE__, \ + .format = (fmt), \ + .lineno = __LINE__, \ + .flags = _DPRINTK_FLAGS_DEFAULT, \ + dd_key_init(key, init) \ + } + +#ifdef CONFIG_JUMP_LABEL + +#define dd_key_init(key, init) key = (init) + +#ifdef DEBUG +#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ + DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, .key.dd_key_true, \ + (STATIC_KEY_TRUE_INIT)) + +#define DYNAMIC_DEBUG_BRANCH(descriptor) \ + static_branch_likely(&descriptor.key.dd_key_true) +#else +#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ + DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, .key.dd_key_false, \ + (STATIC_KEY_FALSE_INIT)) + +#define DYNAMIC_DEBUG_BRANCH(descriptor) \ + static_branch_unlikely(&descriptor.key.dd_key_false) +#endif + +#else + +#define dd_key_init(key, init) + +#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ + DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, 0, 0) + +#ifdef DEBUG +#define DYNAMIC_DEBUG_BRANCH(descriptor) \ + likely(descriptor.flags & _DPRINTK_FLAGS_PRINT) +#else +#define DYNAMIC_DEBUG_BRANCH(descriptor) \ + unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) +#endif + +#endif + +#define dynamic_pr_debug(fmt, ...) \ +do { \ + DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ + if (DYNAMIC_DEBUG_BRANCH(descriptor)) \ + __dynamic_pr_debug(&descriptor, pr_fmt(fmt), \ + ##__VA_ARGS__); \ +} while (0) + +#define dynamic_dev_dbg(dev, fmt, ...) \ +do { \ + DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ + if (DYNAMIC_DEBUG_BRANCH(descriptor)) \ + __dynamic_dev_dbg(&descriptor, dev, fmt, \ + ##__VA_ARGS__); \ +} while (0) + +#define dynamic_netdev_dbg(dev, fmt, ...) \ +do { \ + DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ + if (DYNAMIC_DEBUG_BRANCH(descriptor)) \ + __dynamic_netdev_dbg(&descriptor, dev, fmt, \ + ##__VA_ARGS__); \ +} while (0) + +#define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \ + groupsize, buf, len, ascii) \ +do { \ + DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, \ + __builtin_constant_p(prefix_str) ? prefix_str : "hexdump");\ + if (DYNAMIC_DEBUG_BRANCH(descriptor)) \ + print_hex_dump(KERN_DEBUG, prefix_str, \ + prefix_type, rowsize, groupsize, \ + buf, len, ascii); \ +} while (0) + +#else + +#include +#include + +static inline int ddebug_remove_module(const char *mod) +{ + return 0; +} + +static inline int ddebug_dyndbg_module_param_cb(char *param, char *val, + const char *modname) +{ + if (strstr(param, "dyndbg")) { + /* avoid pr_warn(), which wants pr_fmt() fully defined */ + printk(KERN_WARNING "dyndbg param is supported only in " + "CONFIG_DYNAMIC_DEBUG builds\n"); + return 0; /* allow and ignore */ + } + return -EINVAL; +} + +#define dynamic_pr_debug(fmt, ...) \ + do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0) +#define dynamic_dev_dbg(dev, fmt, ...) \ + do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0) +#endif + +#endif diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h new file mode 100644 index 000000000..99fc06f0a --- /dev/null +++ b/include/linux/dynamic_queue_limits.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Dynamic queue limits (dql) - Definitions + * + * Copyright (c) 2011, Tom Herbert + * + * This header file contains the definitions for dynamic queue limits (dql). + * dql would be used in conjunction with a producer/consumer type queue + * (possibly a HW queue). Such a queue would have these general properties: + * + * 1) Objects are queued up to some limit specified as number of objects. + * 2) Periodically a completion process executes which retires consumed + * objects. + * 3) Starvation occurs when limit has been reached, all queued data has + * actually been consumed, but completion processing has not yet run + * so queuing new data is blocked. + * 4) Minimizing the amount of queued data is desirable. + * + * The goal of dql is to calculate the limit as the minimum number of objects + * needed to prevent starvation. + * + * The primary functions of dql are: + * dql_queued - called when objects are enqueued to record number of objects + * dql_avail - returns how many objects are available to be queued based + * on the object limit and how many objects are already enqueued + * dql_completed - called at completion time to indicate how many objects + * were retired from the queue + * + * The dql implementation does not implement any locking for the dql data + * structures, the higher layer should provide this. dql_queued should + * be serialized to prevent concurrent execution of the function; this + * is also true for dql_completed. However, dql_queued and dlq_completed can + * be executed concurrently (i.e. they can be protected by different locks). + */ + +#ifndef _LINUX_DQL_H +#define _LINUX_DQL_H + +#ifdef __KERNEL__ + +struct dql { + /* Fields accessed in enqueue path (dql_queued) */ + unsigned int num_queued; /* Total ever queued */ + unsigned int adj_limit; /* limit + num_completed */ + unsigned int last_obj_cnt; /* Count at last queuing */ + + /* Fields accessed only by completion path (dql_completed) */ + + unsigned int limit ____cacheline_aligned_in_smp; /* Current limit */ + unsigned int num_completed; /* Total ever completed */ + + unsigned int prev_ovlimit; /* Previous over limit */ + unsigned int prev_num_queued; /* Previous queue total */ + unsigned int prev_last_obj_cnt; /* Previous queuing cnt */ + + unsigned int lowest_slack; /* Lowest slack found */ + unsigned long slack_start_time; /* Time slacks seen */ + + /* Configuration */ + unsigned int max_limit; /* Max limit */ + unsigned int min_limit; /* Minimum limit */ + unsigned int slack_hold_time; /* Time to measure slack */ +}; + +/* Set some static maximums */ +#define DQL_MAX_OBJECT (UINT_MAX / 16) +#define DQL_MAX_LIMIT ((UINT_MAX / 2) - DQL_MAX_OBJECT) + +/* + * Record number of objects queued. Assumes that caller has already checked + * availability in the queue with dql_avail. + */ +static inline void dql_queued(struct dql *dql, unsigned int count) +{ + BUG_ON(count > DQL_MAX_OBJECT); + + dql->last_obj_cnt = count; + + /* We want to force a write first, so that cpu do not attempt + * to get cache line containing last_obj_cnt, num_queued, adj_limit + * in Shared state, but directly does a Request For Ownership + * It is only a hint, we use barrier() only. + */ + barrier(); + + dql->num_queued += count; +} + +/* Returns how many objects can be queued, < 0 indicates over limit. */ +static inline int dql_avail(const struct dql *dql) +{ + return READ_ONCE(dql->adj_limit) - READ_ONCE(dql->num_queued); +} + +/* Record number of completed objects and recalculate the limit. */ +void dql_completed(struct dql *dql, unsigned int count); + +/* Reset dql state */ +void dql_reset(struct dql *dql); + +/* Initialize dql state */ +void dql_init(struct dql *dql, unsigned int hold_time); + +#endif /* _KERNEL_ */ + +#endif /* _LINUX_DQL_H */ diff --git a/include/linux/earlycpio.h b/include/linux/earlycpio.h new file mode 100644 index 000000000..c70519267 --- /dev/null +++ b/include/linux/earlycpio.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_EARLYCPIO_H +#define _LINUX_EARLYCPIO_H + +#include + +#define MAX_CPIO_FILE_NAME 18 + +struct cpio_data { + void *data; + size_t size; + char name[MAX_CPIO_FILE_NAME]; +}; + +struct cpio_data find_cpio_data(const char *path, void *data, size_t len, + long *offset); + +#endif /* _LINUX_EARLYCPIO_H */ diff --git a/include/linux/ecryptfs.h b/include/linux/ecryptfs.h new file mode 100644 index 000000000..91e142abf --- /dev/null +++ b/include/linux/ecryptfs.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_ECRYPTFS_H +#define _LINUX_ECRYPTFS_H + +/* Version verification for shared data structures w/ userspace */ +#define ECRYPTFS_VERSION_MAJOR 0x00 +#define ECRYPTFS_VERSION_MINOR 0x04 +#define ECRYPTFS_SUPPORTED_FILE_VERSION 0x03 +/* These flags indicate which features are supported by the kernel + * module; userspace tools such as the mount helper read the feature + * bits from a sysfs handle in order to determine how to behave. */ +#define ECRYPTFS_VERSIONING_PASSPHRASE 0x00000001 +#define ECRYPTFS_VERSIONING_PUBKEY 0x00000002 +#define ECRYPTFS_VERSIONING_PLAINTEXT_PASSTHROUGH 0x00000004 +#define ECRYPTFS_VERSIONING_POLICY 0x00000008 +#define ECRYPTFS_VERSIONING_XATTR 0x00000010 +#define ECRYPTFS_VERSIONING_MULTKEY 0x00000020 +#define ECRYPTFS_VERSIONING_DEVMISC 0x00000040 +#define ECRYPTFS_VERSIONING_HMAC 0x00000080 +#define ECRYPTFS_VERSIONING_FILENAME_ENCRYPTION 0x00000100 +#define ECRYPTFS_VERSIONING_GCM 0x00000200 +#define ECRYPTFS_MAX_PASSWORD_LENGTH 64 +#define ECRYPTFS_MAX_PASSPHRASE_BYTES ECRYPTFS_MAX_PASSWORD_LENGTH +#define ECRYPTFS_SALT_SIZE 8 +#define ECRYPTFS_SALT_SIZE_HEX (ECRYPTFS_SALT_SIZE*2) +/* The original signature size is only for what is stored on disk; all + * in-memory representations are expanded hex, so it better adapted to + * be passed around or referenced on the command line */ +#define ECRYPTFS_SIG_SIZE 8 +#define ECRYPTFS_SIG_SIZE_HEX (ECRYPTFS_SIG_SIZE*2) +#define ECRYPTFS_PASSWORD_SIG_SIZE ECRYPTFS_SIG_SIZE_HEX +#define ECRYPTFS_MAX_KEY_BYTES 64 +#define ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES 512 +#define ECRYPTFS_FILE_VERSION 0x03 +#define ECRYPTFS_MAX_PKI_NAME_BYTES 16 + +#define RFC2440_CIPHER_DES3_EDE 0x02 +#define RFC2440_CIPHER_CAST_5 0x03 +#define RFC2440_CIPHER_BLOWFISH 0x04 +#define RFC2440_CIPHER_AES_128 0x07 +#define RFC2440_CIPHER_AES_192 0x08 +#define RFC2440_CIPHER_AES_256 0x09 +#define RFC2440_CIPHER_TWOFISH 0x0a +#define RFC2440_CIPHER_CAST_6 0x0b + +#define RFC2440_CIPHER_RSA 0x01 + +/** + * For convenience, we may need to pass around the encrypted session + * key between kernel and userspace because the authentication token + * may not be extractable. For example, the TPM may not release the + * private key, instead requiring the encrypted data and returning the + * decrypted data. + */ +struct ecryptfs_session_key { +#define ECRYPTFS_USERSPACE_SHOULD_TRY_TO_DECRYPT 0x00000001 +#define ECRYPTFS_USERSPACE_SHOULD_TRY_TO_ENCRYPT 0x00000002 +#define ECRYPTFS_CONTAINS_DECRYPTED_KEY 0x00000004 +#define ECRYPTFS_CONTAINS_ENCRYPTED_KEY 0x00000008 + u32 flags; + u32 encrypted_key_size; + u32 decrypted_key_size; + u8 encrypted_key[ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES]; + u8 decrypted_key[ECRYPTFS_MAX_KEY_BYTES]; +}; + +struct ecryptfs_password { + u32 password_bytes; + s32 hash_algo; + u32 hash_iterations; + u32 session_key_encryption_key_bytes; +#define ECRYPTFS_PERSISTENT_PASSWORD 0x01 +#define ECRYPTFS_SESSION_KEY_ENCRYPTION_KEY_SET 0x02 + u32 flags; + /* Iterated-hash concatenation of salt and passphrase */ + u8 session_key_encryption_key[ECRYPTFS_MAX_KEY_BYTES]; + u8 signature[ECRYPTFS_PASSWORD_SIG_SIZE + 1]; + /* Always in expanded hex */ + u8 salt[ECRYPTFS_SALT_SIZE]; +}; + +enum ecryptfs_token_types {ECRYPTFS_PASSWORD, ECRYPTFS_PRIVATE_KEY}; + +struct ecryptfs_private_key { + u32 key_size; + u32 data_len; + u8 signature[ECRYPTFS_PASSWORD_SIG_SIZE + 1]; + char pki_type[ECRYPTFS_MAX_PKI_NAME_BYTES + 1]; + u8 data[]; +}; + +/* May be a password or a private key */ +struct ecryptfs_auth_tok { + u16 version; /* 8-bit major and 8-bit minor */ + u16 token_type; +#define ECRYPTFS_ENCRYPT_ONLY 0x00000001 + u32 flags; + struct ecryptfs_session_key session_key; + u8 reserved[32]; + union { + struct ecryptfs_password password; + struct ecryptfs_private_key private_key; + } token; +} __attribute__ ((packed)); + +#endif /* _LINUX_ECRYPTFS_H */ diff --git a/include/linux/edac.h b/include/linux/edac.h new file mode 100644 index 000000000..958d69332 --- /dev/null +++ b/include/linux/edac.h @@ -0,0 +1,676 @@ +/* + * Generic EDAC defs + * + * Author: Dave Jiang + * + * 2006-2008 (c) MontaVista Software, Inc. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. + * + */ +#ifndef _LINUX_EDAC_H_ +#define _LINUX_EDAC_H_ + +#include +#include +#include +#include +#include +#include + +#define EDAC_DEVICE_NAME_LEN 31 + +struct device; + +#define EDAC_OPSTATE_INVAL -1 +#define EDAC_OPSTATE_POLL 0 +#define EDAC_OPSTATE_NMI 1 +#define EDAC_OPSTATE_INT 2 + +extern int edac_op_state; + +struct bus_type *edac_get_sysfs_subsys(void); +int edac_get_report_status(void); +void edac_set_report_status(int new); + +enum { + EDAC_REPORTING_ENABLED, + EDAC_REPORTING_DISABLED, + EDAC_REPORTING_FORCE +}; + +static inline void opstate_init(void) +{ + switch (edac_op_state) { + case EDAC_OPSTATE_POLL: + case EDAC_OPSTATE_NMI: + break; + default: + edac_op_state = EDAC_OPSTATE_POLL; + } + return; +} + +/* Max length of a DIMM label*/ +#define EDAC_MC_LABEL_LEN 31 + +/* Maximum size of the location string */ +#define LOCATION_SIZE 256 + +/* Defines the maximum number of labels that can be reported */ +#define EDAC_MAX_LABELS 8 + +/* String used to join two or more labels */ +#define OTHER_LABEL " or " + +/** + * enum dev_type - describe the type of memory DRAM chips used at the stick + * @DEV_UNKNOWN: Can't be determined, or MC doesn't support detect it + * @DEV_X1: 1 bit for data + * @DEV_X2: 2 bits for data + * @DEV_X4: 4 bits for data + * @DEV_X8: 8 bits for data + * @DEV_X16: 16 bits for data + * @DEV_X32: 32 bits for data + * @DEV_X64: 64 bits for data + * + * Typical values are x4 and x8. + */ +enum dev_type { + DEV_UNKNOWN = 0, + DEV_X1, + DEV_X2, + DEV_X4, + DEV_X8, + DEV_X16, + DEV_X32, /* Do these parts exist? */ + DEV_X64 /* Do these parts exist? */ +}; + +#define DEV_FLAG_UNKNOWN BIT(DEV_UNKNOWN) +#define DEV_FLAG_X1 BIT(DEV_X1) +#define DEV_FLAG_X2 BIT(DEV_X2) +#define DEV_FLAG_X4 BIT(DEV_X4) +#define DEV_FLAG_X8 BIT(DEV_X8) +#define DEV_FLAG_X16 BIT(DEV_X16) +#define DEV_FLAG_X32 BIT(DEV_X32) +#define DEV_FLAG_X64 BIT(DEV_X64) + +/** + * enum hw_event_mc_err_type - type of the detected error + * + * @HW_EVENT_ERR_CORRECTED: Corrected Error - Indicates that an ECC + * corrected error was detected + * @HW_EVENT_ERR_UNCORRECTED: Uncorrected Error - Indicates an error that + * can't be corrected by ECC, but it is not + * fatal (maybe it is on an unused memory area, + * or the memory controller could recover from + * it for example, by re-trying the operation). + * @HW_EVENT_ERR_DEFERRED: Deferred Error - Indicates an uncorrectable + * error whose handling is not urgent. This could + * be due to hardware data poisoning where the + * system can continue operation until the poisoned + * data is consumed. Preemptive measures may also + * be taken, e.g. offlining pages, etc. + * @HW_EVENT_ERR_FATAL: Fatal Error - Uncorrected error that could not + * be recovered. + * @HW_EVENT_ERR_INFO: Informational - The CPER spec defines a forth + * type of error: informational logs. + */ +enum hw_event_mc_err_type { + HW_EVENT_ERR_CORRECTED, + HW_EVENT_ERR_UNCORRECTED, + HW_EVENT_ERR_DEFERRED, + HW_EVENT_ERR_FATAL, + HW_EVENT_ERR_INFO, +}; + +static inline char *mc_event_error_type(const unsigned int err_type) +{ + switch (err_type) { + case HW_EVENT_ERR_CORRECTED: + return "Corrected"; + case HW_EVENT_ERR_UNCORRECTED: + return "Uncorrected"; + case HW_EVENT_ERR_DEFERRED: + return "Deferred"; + case HW_EVENT_ERR_FATAL: + return "Fatal"; + default: + case HW_EVENT_ERR_INFO: + return "Info"; + } +} + +/** + * enum mem_type - memory types. For a more detailed reference, please see + * http://en.wikipedia.org/wiki/DRAM + * + * @MEM_EMPTY: Empty csrow + * @MEM_RESERVED: Reserved csrow type + * @MEM_UNKNOWN: Unknown csrow type + * @MEM_FPM: FPM - Fast Page Mode, used on systems up to 1995. + * @MEM_EDO: EDO - Extended data out, used on systems up to 1998. + * @MEM_BEDO: BEDO - Burst Extended data out, an EDO variant. + * @MEM_SDR: SDR - Single data rate SDRAM + * http://en.wikipedia.org/wiki/Synchronous_dynamic_random-access_memory + * They use 3 pins for chip select: Pins 0 and 2 are + * for rank 0; pins 1 and 3 are for rank 1, if the memory + * is dual-rank. + * @MEM_RDR: Registered SDR SDRAM + * @MEM_DDR: Double data rate SDRAM + * http://en.wikipedia.org/wiki/DDR_SDRAM + * @MEM_RDDR: Registered Double data rate SDRAM + * This is a variant of the DDR memories. + * A registered memory has a buffer inside it, hiding + * part of the memory details to the memory controller. + * @MEM_RMBS: Rambus DRAM, used on a few Pentium III/IV controllers. + * @MEM_DDR2: DDR2 RAM, as described at JEDEC JESD79-2F. + * Those memories are labeled as "PC2-" instead of "PC" to + * differentiate from DDR. + * @MEM_FB_DDR2: Fully-Buffered DDR2, as described at JEDEC Std No. 205 + * and JESD206. + * Those memories are accessed per DIMM slot, and not by + * a chip select signal. + * @MEM_RDDR2: Registered DDR2 RAM + * This is a variant of the DDR2 memories. + * @MEM_XDR: Rambus XDR + * It is an evolution of the original RAMBUS memories, + * created to compete with DDR2. Weren't used on any + * x86 arch, but cell_edac PPC memory controller uses it. + * @MEM_DDR3: DDR3 RAM + * @MEM_RDDR3: Registered DDR3 RAM + * This is a variant of the DDR3 memories. + * @MEM_LRDDR3: Load-Reduced DDR3 memory. + * @MEM_DDR4: Unbuffered DDR4 RAM + * @MEM_RDDR4: Registered DDR4 RAM + * This is a variant of the DDR4 memories. + * @MEM_LRDDR4: Load-Reduced DDR4 memory. + * @MEM_NVDIMM: Non-volatile RAM + */ +enum mem_type { + MEM_EMPTY = 0, + MEM_RESERVED, + MEM_UNKNOWN, + MEM_FPM, + MEM_EDO, + MEM_BEDO, + MEM_SDR, + MEM_RDR, + MEM_DDR, + MEM_RDDR, + MEM_RMBS, + MEM_DDR2, + MEM_FB_DDR2, + MEM_RDDR2, + MEM_XDR, + MEM_DDR3, + MEM_RDDR3, + MEM_LRDDR3, + MEM_DDR4, + MEM_RDDR4, + MEM_LRDDR4, + MEM_NVDIMM, +}; + +#define MEM_FLAG_EMPTY BIT(MEM_EMPTY) +#define MEM_FLAG_RESERVED BIT(MEM_RESERVED) +#define MEM_FLAG_UNKNOWN BIT(MEM_UNKNOWN) +#define MEM_FLAG_FPM BIT(MEM_FPM) +#define MEM_FLAG_EDO BIT(MEM_EDO) +#define MEM_FLAG_BEDO BIT(MEM_BEDO) +#define MEM_FLAG_SDR BIT(MEM_SDR) +#define MEM_FLAG_RDR BIT(MEM_RDR) +#define MEM_FLAG_DDR BIT(MEM_DDR) +#define MEM_FLAG_RDDR BIT(MEM_RDDR) +#define MEM_FLAG_RMBS BIT(MEM_RMBS) +#define MEM_FLAG_DDR2 BIT(MEM_DDR2) +#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2) +#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2) +#define MEM_FLAG_XDR BIT(MEM_XDR) +#define MEM_FLAG_DDR3 BIT(MEM_DDR3) +#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3) +#define MEM_FLAG_DDR4 BIT(MEM_DDR4) +#define MEM_FLAG_RDDR4 BIT(MEM_RDDR4) +#define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4) +#define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM) + +/** + * enum edac-type - Error Detection and Correction capabilities and mode + * @EDAC_UNKNOWN: Unknown if ECC is available + * @EDAC_NONE: Doesn't support ECC + * @EDAC_RESERVED: Reserved ECC type + * @EDAC_PARITY: Detects parity errors + * @EDAC_EC: Error Checking - no correction + * @EDAC_SECDED: Single bit error correction, Double detection + * @EDAC_S2ECD2ED: Chipkill x2 devices - do these exist? + * @EDAC_S4ECD4ED: Chipkill x4 devices + * @EDAC_S8ECD8ED: Chipkill x8 devices + * @EDAC_S16ECD16ED: Chipkill x16 devices + */ +enum edac_type { + EDAC_UNKNOWN = 0, + EDAC_NONE, + EDAC_RESERVED, + EDAC_PARITY, + EDAC_EC, + EDAC_SECDED, + EDAC_S2ECD2ED, + EDAC_S4ECD4ED, + EDAC_S8ECD8ED, + EDAC_S16ECD16ED, +}; + +#define EDAC_FLAG_UNKNOWN BIT(EDAC_UNKNOWN) +#define EDAC_FLAG_NONE BIT(EDAC_NONE) +#define EDAC_FLAG_PARITY BIT(EDAC_PARITY) +#define EDAC_FLAG_EC BIT(EDAC_EC) +#define EDAC_FLAG_SECDED BIT(EDAC_SECDED) +#define EDAC_FLAG_S2ECD2ED BIT(EDAC_S2ECD2ED) +#define EDAC_FLAG_S4ECD4ED BIT(EDAC_S4ECD4ED) +#define EDAC_FLAG_S8ECD8ED BIT(EDAC_S8ECD8ED) +#define EDAC_FLAG_S16ECD16ED BIT(EDAC_S16ECD16ED) + +/** + * enum scrub_type - scrubbing capabilities + * @SCRUB_UNKNOWN: Unknown if scrubber is available + * @SCRUB_NONE: No scrubber + * @SCRUB_SW_PROG: SW progressive (sequential) scrubbing + * @SCRUB_SW_SRC: Software scrub only errors + * @SCRUB_SW_PROG_SRC: Progressive software scrub from an error + * @SCRUB_SW_TUNABLE: Software scrub frequency is tunable + * @SCRUB_HW_PROG: HW progressive (sequential) scrubbing + * @SCRUB_HW_SRC: Hardware scrub only errors + * @SCRUB_HW_PROG_SRC: Progressive hardware scrub from an error + * @SCRUB_HW_TUNABLE: Hardware scrub frequency is tunable + */ +enum scrub_type { + SCRUB_UNKNOWN = 0, + SCRUB_NONE, + SCRUB_SW_PROG, + SCRUB_SW_SRC, + SCRUB_SW_PROG_SRC, + SCRUB_SW_TUNABLE, + SCRUB_HW_PROG, + SCRUB_HW_SRC, + SCRUB_HW_PROG_SRC, + SCRUB_HW_TUNABLE +}; + +#define SCRUB_FLAG_SW_PROG BIT(SCRUB_SW_PROG) +#define SCRUB_FLAG_SW_SRC BIT(SCRUB_SW_SRC) +#define SCRUB_FLAG_SW_PROG_SRC BIT(SCRUB_SW_PROG_SRC) +#define SCRUB_FLAG_SW_TUN BIT(SCRUB_SW_SCRUB_TUNABLE) +#define SCRUB_FLAG_HW_PROG BIT(SCRUB_HW_PROG) +#define SCRUB_FLAG_HW_SRC BIT(SCRUB_HW_SRC) +#define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC) +#define SCRUB_FLAG_HW_TUN BIT(SCRUB_HW_TUNABLE) + +/* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */ + +/* EDAC internal operation states */ +#define OP_ALLOC 0x100 +#define OP_RUNNING_POLL 0x201 +#define OP_RUNNING_INTERRUPT 0x202 +#define OP_RUNNING_POLL_INTR 0x203 +#define OP_OFFLINE 0x300 + +/** + * enum edac_mc_layer - memory controller hierarchy layer + * + * @EDAC_MC_LAYER_BRANCH: memory layer is named "branch" + * @EDAC_MC_LAYER_CHANNEL: memory layer is named "channel" + * @EDAC_MC_LAYER_SLOT: memory layer is named "slot" + * @EDAC_MC_LAYER_CHIP_SELECT: memory layer is named "chip select" + * @EDAC_MC_LAYER_ALL_MEM: memory layout is unknown. All memory is mapped + * as a single memory area. This is used when + * retrieving errors from a firmware driven driver. + * + * This enum is used by the drivers to tell edac_mc_sysfs what name should + * be used when describing a memory stick location. + */ +enum edac_mc_layer_type { + EDAC_MC_LAYER_BRANCH, + EDAC_MC_LAYER_CHANNEL, + EDAC_MC_LAYER_SLOT, + EDAC_MC_LAYER_CHIP_SELECT, + EDAC_MC_LAYER_ALL_MEM, +}; + +/** + * struct edac_mc_layer - describes the memory controller hierarchy + * @type: layer type + * @size: number of components per layer. For example, + * if the channel layer has two channels, size = 2 + * @is_virt_csrow: This layer is part of the "csrow" when old API + * compatibility mode is enabled. Otherwise, it is + * a channel + */ +struct edac_mc_layer { + enum edac_mc_layer_type type; + unsigned size; + bool is_virt_csrow; +}; + +/* + * Maximum number of layers used by the memory controller to uniquely + * identify a single memory stick. + * NOTE: Changing this constant requires not only to change the constant + * below, but also to change the existing code at the core, as there are + * some code there that are optimized for 3 layers. + */ +#define EDAC_MAX_LAYERS 3 + +/** + * EDAC_DIMM_OFF - Macro responsible to get a pointer offset inside a pointer + * array for the element given by [layer0,layer1,layer2] + * position + * + * @layers: a struct edac_mc_layer array, describing how many elements + * were allocated for each layer + * @nlayers: Number of layers at the @layers array + * @layer0: layer0 position + * @layer1: layer1 position. Unused if n_layers < 2 + * @layer2: layer2 position. Unused if n_layers < 3 + * + * For 1 layer, this macro returns "var[layer0] - var"; + * + * For 2 layers, this macro is similar to allocate a bi-dimensional array + * and to return "var[layer0][layer1] - var"; + * + * For 3 layers, this macro is similar to allocate a tri-dimensional array + * and to return "var[layer0][layer1][layer2] - var". + * + * A loop could be used here to make it more generic, but, as we only have + * 3 layers, this is a little faster. + * + * By design, layers can never be 0 or more than 3. If that ever happens, + * a NULL is returned, causing an OOPS during the memory allocation routine, + * with would point to the developer that he's doing something wrong. + */ +#define EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2) ({ \ + int __i; \ + if ((nlayers) == 1) \ + __i = layer0; \ + else if ((nlayers) == 2) \ + __i = (layer1) + ((layers[1]).size * (layer0)); \ + else if ((nlayers) == 3) \ + __i = (layer2) + ((layers[2]).size * ((layer1) + \ + ((layers[1]).size * (layer0)))); \ + else \ + __i = -EINVAL; \ + __i; \ +}) + +/** + * EDAC_DIMM_PTR - Macro responsible to get a pointer inside a pointer array + * for the element given by [layer0,layer1,layer2] position + * + * @layers: a struct edac_mc_layer array, describing how many elements + * were allocated for each layer + * @var: name of the var where we want to get the pointer + * (like mci->dimms) + * @nlayers: Number of layers at the @layers array + * @layer0: layer0 position + * @layer1: layer1 position. Unused if n_layers < 2 + * @layer2: layer2 position. Unused if n_layers < 3 + * + * For 1 layer, this macro returns "var[layer0]"; + * + * For 2 layers, this macro is similar to allocate a bi-dimensional array + * and to return "var[layer0][layer1]"; + * + * For 3 layers, this macro is similar to allocate a tri-dimensional array + * and to return "var[layer0][layer1][layer2]"; + */ +#define EDAC_DIMM_PTR(layers, var, nlayers, layer0, layer1, layer2) ({ \ + typeof(*var) __p; \ + int ___i = EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2); \ + if (___i < 0) \ + __p = NULL; \ + else \ + __p = (var)[___i]; \ + __p; \ +}) + +struct dimm_info { + struct device dev; + + char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */ + + /* Memory location data */ + unsigned location[EDAC_MAX_LAYERS]; + + struct mem_ctl_info *mci; /* the parent */ + + u32 grain; /* granularity of reported error in bytes */ + enum dev_type dtype; /* memory device type */ + enum mem_type mtype; /* memory dimm type */ + enum edac_type edac_mode; /* EDAC mode for this dimm */ + + u32 nr_pages; /* number of pages on this dimm */ + + unsigned csrow, cschannel; /* Points to the old API data */ +}; + +/** + * struct rank_info - contains the information for one DIMM rank + * + * @chan_idx: channel number where the rank is (typically, 0 or 1) + * @ce_count: number of correctable errors for this rank + * @csrow: A pointer to the chip select row structure (the parent + * structure). The location of the rank is given by + * the (csrow->csrow_idx, chan_idx) vector. + * @dimm: A pointer to the DIMM structure, where the DIMM label + * information is stored. + * + * FIXME: Currently, the EDAC core model will assume one DIMM per rank. + * This is a bad assumption, but it makes this patch easier. Later + * patches in this series will fix this issue. + */ +struct rank_info { + int chan_idx; + struct csrow_info *csrow; + struct dimm_info *dimm; + + u32 ce_count; /* Correctable Errors for this csrow */ +}; + +struct csrow_info { + struct device dev; + + /* Used only by edac_mc_find_csrow_by_page() */ + unsigned long first_page; /* first page number in csrow */ + unsigned long last_page; /* last page number in csrow */ + unsigned long page_mask; /* used for interleaving - + * 0UL for non intlv */ + + int csrow_idx; /* the chip-select row */ + + u32 ue_count; /* Uncorrectable Errors for this csrow */ + u32 ce_count; /* Correctable Errors for this csrow */ + + struct mem_ctl_info *mci; /* the parent */ + + /* channel information for this csrow */ + u32 nr_channels; + struct rank_info **channels; +}; + +/* + * struct errcount_attribute - used to store the several error counts + */ +struct errcount_attribute_data { + int n_layers; + int pos[EDAC_MAX_LAYERS]; + int layer0, layer1, layer2; +}; + +/** + * struct edac_raw_error_desc - Raw error report structure + * @grain: minimum granularity for an error report, in bytes + * @error_count: number of errors of the same type + * @top_layer: top layer of the error (layer[0]) + * @mid_layer: middle layer of the error (layer[1]) + * @low_layer: low layer of the error (layer[2]) + * @page_frame_number: page where the error happened + * @offset_in_page: page offset + * @syndrome: syndrome of the error (or 0 if unknown or if + * the syndrome is not applicable) + * @msg: error message + * @location: location of the error + * @label: label of the affected DIMM(s) + * @other_detail: other driver-specific detail about the error + * @enable_per_layer_report: if false, the error affects all layers + * (typically, a memory controller error) + */ +struct edac_raw_error_desc { + /* + * NOTE: everything before grain won't be cleaned by + * edac_raw_error_desc_clean() + */ + char location[LOCATION_SIZE]; + char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * EDAC_MAX_LABELS]; + long grain; + + /* the vars below and grain will be cleaned on every new error report */ + u16 error_count; + int top_layer; + int mid_layer; + int low_layer; + unsigned long page_frame_number; + unsigned long offset_in_page; + unsigned long syndrome; + const char *msg; + const char *other_detail; + bool enable_per_layer_report; +}; + +/* MEMORY controller information structure + */ +struct mem_ctl_info { + struct device dev; + struct bus_type *bus; + + struct list_head link; /* for global list of mem_ctl_info structs */ + + struct module *owner; /* Module owner of this control struct */ + + unsigned long mtype_cap; /* memory types supported by mc */ + unsigned long edac_ctl_cap; /* Mem controller EDAC capabilities */ + unsigned long edac_cap; /* configuration capabilities - this is + * closely related to edac_ctl_cap. The + * difference is that the controller may be + * capable of s4ecd4ed which would be listed + * in edac_ctl_cap, but if channels aren't + * capable of s4ecd4ed then the edac_cap would + * not have that capability. + */ + unsigned long scrub_cap; /* chipset scrub capabilities */ + enum scrub_type scrub_mode; /* current scrub mode */ + + /* Translates sdram memory scrub rate given in bytes/sec to the + internal representation and configures whatever else needs + to be configured. + */ + int (*set_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 bw); + + /* Get the current sdram memory scrub rate from the internal + representation and converts it to the closest matching + bandwidth in bytes/sec. + */ + int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci); + + + /* pointer to edac checking routine */ + void (*edac_check) (struct mem_ctl_info * mci); + + /* + * Remaps memory pages: controller pages to physical pages. + * For most MC's, this will be NULL. + */ + /* FIXME - why not send the phys page to begin with? */ + unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci, + unsigned long page); + int mc_idx; + struct csrow_info **csrows; + unsigned nr_csrows, num_cschannel; + + /* + * Memory Controller hierarchy + * + * There are basically two types of memory controller: the ones that + * sees memory sticks ("dimms"), and the ones that sees memory ranks. + * All old memory controllers enumerate memories per rank, but most + * of the recent drivers enumerate memories per DIMM, instead. + * When the memory controller is per rank, csbased is true. + */ + unsigned n_layers; + struct edac_mc_layer *layers; + bool csbased; + + /* + * DIMM info. Will eventually remove the entire csrows_info some day + */ + unsigned tot_dimms; + struct dimm_info **dimms; + + /* + * FIXME - what about controllers on other busses? - IDs must be + * unique. dev pointer should be sufficiently unique, but + * BUS:SLOT.FUNC numbers may not be unique. + */ + struct device *pdev; + const char *mod_name; + const char *ctl_name; + const char *dev_name; + void *pvt_info; + unsigned long start_time; /* mci load start time (in jiffies) */ + + /* + * drivers shouldn't access those fields directly, as the core + * already handles that. + */ + u32 ce_noinfo_count, ue_noinfo_count; + u32 ue_mc, ce_mc; + u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS]; + + struct completion complete; + + /* Additional top controller level attributes, but specified + * by the low level driver. + * + * Set by the low level driver to provide attributes at the + * controller level. + * An array of structures, NULL terminated + * + * If attributes are desired, then set to array of attributes + * If no attributes are desired, leave NULL + */ + const struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes; + + /* work struct for this MC */ + struct delayed_work work; + + /* + * Used to report an error - by being at the global struct + * makes the memory allocated by the EDAC core + */ + struct edac_raw_error_desc error_desc; + + /* the internal state of this controller instance */ + int op_state; + + struct dentry *debugfs; + u8 fake_inject_layer[EDAC_MAX_LAYERS]; + bool fake_inject_ue; + u16 fake_inject_count; +}; + +/* + * Maximum number of memory controllers in the coherent fabric. + */ +#define EDAC_MAX_MCS 2 * MAX_NUMNODES + +#endif diff --git a/include/linux/edd.h b/include/linux/edd.h new file mode 100644 index 000000000..83d4371ec --- /dev/null +++ b/include/linux/edd.h @@ -0,0 +1,38 @@ +/* + * linux/include/linux/edd.h + * Copyright (C) 2002, 2003, 2004 Dell Inc. + * by Matt Domsch + * + * structures and definitions for the int 13h, ax={41,48}h + * BIOS Enhanced Disk Drive Services + * This is based on the T13 group document D1572 Revision 0 (August 14 2002) + * available at http://www.t13.org/docs2002/d1572r0.pdf. It is + * very similar to D1484 Revision 3 http://www.t13.org/docs2002/d1484r3.pdf + * + * In a nutshell, arch/{i386,x86_64}/boot/setup.S populates a scratch + * table in the boot_params that contains a list of BIOS-enumerated + * boot devices. + * In arch/{i386,x86_64}/kernel/setup.c, this information is + * transferred into the edd structure, and in drivers/firmware/edd.c, that + * information is used to identify BIOS boot disk. The code in setup.S + * is very sensitive to the size of these structures. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License v2.0 as published by + * the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef _LINUX_EDD_H +#define _LINUX_EDD_H + +#include + +#ifndef __ASSEMBLY__ +extern struct edd edd; +#endif /*!__ASSEMBLY__ */ +#endif /* _LINUX_EDD_H */ diff --git a/include/linux/edma.h b/include/linux/edma.h new file mode 100644 index 000000000..a1307e782 --- /dev/null +++ b/include/linux/edma.h @@ -0,0 +1,29 @@ +/* + * TI EDMA DMA engine driver + * + * Copyright 2012 Texas Instruments + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __LINUX_EDMA_H +#define __LINUX_EDMA_H + +struct dma_chan; + +#if defined(CONFIG_TI_EDMA) || defined(CONFIG_TI_EDMA_MODULE) +bool edma_filter_fn(struct dma_chan *, void *); +#else +static inline bool edma_filter_fn(struct dma_chan *chan, void *param) +{ + return false; +} +#endif + +#endif diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h new file mode 100644 index 000000000..eb0b19880 --- /dev/null +++ b/include/linux/eeprom_93cx6.h @@ -0,0 +1,86 @@ +/* + Copyright (C) 2004 - 2006 rt2x00 SourceForge Project + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: eeprom_93cx6 + Abstract: EEPROM reader datastructures for 93cx6 chipsets. + Supported chipsets: 93c46, 93c56 and 93c66. + */ + +/* + * EEPROM operation defines. + */ +#define PCI_EEPROM_WIDTH_93C46 6 +#define PCI_EEPROM_WIDTH_93C56 8 +#define PCI_EEPROM_WIDTH_93C66 8 +#define PCI_EEPROM_WIDTH_93C86 8 +#define PCI_EEPROM_WIDTH_OPCODE 3 +#define PCI_EEPROM_WRITE_OPCODE 0x05 +#define PCI_EEPROM_ERASE_OPCODE 0x07 +#define PCI_EEPROM_READ_OPCODE 0x06 +#define PCI_EEPROM_EWDS_OPCODE 0x10 +#define PCI_EEPROM_EWEN_OPCODE 0x13 + +/** + * struct eeprom_93cx6 - control structure for setting the commands + * for reading the eeprom data. + * @data: private pointer for the driver. + * @register_read(struct eeprom_93cx6 *eeprom): handler to + * read the eeprom register, this function should set all reg_* fields. + * @register_write(struct eeprom_93cx6 *eeprom): handler to + * write to the eeprom register by using all reg_* fields. + * @width: eeprom width, should be one of the PCI_EEPROM_WIDTH_* defines + * @drive_data: Set if we're driving the data line. + * @reg_data_in: register field to indicate data input + * @reg_data_out: register field to indicate data output + * @reg_data_clock: register field to set the data clock + * @reg_chip_select: register field to set the chip select + * + * This structure is used for the communication between the driver + * and the eeprom_93cx6 handlers for reading the eeprom. + */ +struct eeprom_93cx6 { + void *data; + + void (*register_read)(struct eeprom_93cx6 *eeprom); + void (*register_write)(struct eeprom_93cx6 *eeprom); + + int width; + + char drive_data; + char reg_data_in; + char reg_data_out; + char reg_data_clock; + char reg_chip_select; +}; + +extern void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom, + const u8 word, u16 *data); +extern void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, + const u8 word, __le16 *data, const u16 words); +extern void eeprom_93cx6_readb(struct eeprom_93cx6 *eeprom, + const u8 byte, u8 *data); +extern void eeprom_93cx6_multireadb(struct eeprom_93cx6 *eeprom, + const u8 byte, u8 *data, const u16 bytes); + +extern void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable); + +extern void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom, + u8 addr, u16 data); diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h new file mode 100644 index 000000000..99580c22f --- /dev/null +++ b/include/linux/eeprom_93xx46.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Module: eeprom_93xx46 + * platform description for 93xx46 EEPROMs. + */ +#include + +struct eeprom_93xx46_platform_data { + unsigned char flags; +#define EE_ADDR8 0x01 /* 8 bit addr. cfg */ +#define EE_ADDR16 0x02 /* 16 bit addr. cfg */ +#define EE_READONLY 0x08 /* forbid writing */ + + unsigned int quirks; +/* Single word read transfers only; no sequential read. */ +#define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ (1 << 0) +/* Instructions such as EWEN are (addrlen + 2) in length. */ +#define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH (1 << 1) +/* Add extra cycle after address during a read */ +#define EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE BIT(2) + + /* + * optional hooks to control additional logic + * before and after spi transfer. + */ + void (*prepare)(void *); + void (*finish)(void *); + struct gpio_desc *select; +}; diff --git a/include/linux/efi-bgrt.h b/include/linux/efi-bgrt.h new file mode 100644 index 000000000..e6cd51005 --- /dev/null +++ b/include/linux/efi-bgrt.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_EFI_BGRT_H +#define _LINUX_EFI_BGRT_H + +#include + +#ifdef CONFIG_ACPI_BGRT + +void efi_bgrt_init(struct acpi_table_header *table); +int __init acpi_parse_bgrt(struct acpi_table_header *table); + +/* The BGRT data itself; only valid if bgrt_image != NULL. */ +extern size_t bgrt_image_size; +extern struct acpi_table_bgrt bgrt_tab; + +#else /* !CONFIG_ACPI_BGRT */ + +static inline void efi_bgrt_init(struct acpi_table_header *table) {} +static inline int __init acpi_parse_bgrt(struct acpi_table_header *table) +{ + return 0; +} + +#endif /* !CONFIG_ACPI_BGRT */ + +#endif /* _LINUX_EFI_BGRT_H */ diff --git a/include/linux/efi.h b/include/linux/efi.h new file mode 100644 index 000000000..ec89e8bcc --- /dev/null +++ b/include/linux/efi.h @@ -0,0 +1,1712 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_EFI_H +#define _LINUX_EFI_H + +/* + * Extensible Firmware Interface + * Based on 'Extensible Firmware Interface Specification' version 0.9, April 30, 1999 + * + * Copyright (C) 1999 VA Linux Systems + * Copyright (C) 1999 Walt Drummond + * Copyright (C) 1999, 2002-2003 Hewlett-Packard Co. + * David Mosberger-Tang + * Stephane Eranian + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define EFI_SUCCESS 0 +#define EFI_LOAD_ERROR ( 1 | (1UL << (BITS_PER_LONG-1))) +#define EFI_INVALID_PARAMETER ( 2 | (1UL << (BITS_PER_LONG-1))) +#define EFI_UNSUPPORTED ( 3 | (1UL << (BITS_PER_LONG-1))) +#define EFI_BAD_BUFFER_SIZE ( 4 | (1UL << (BITS_PER_LONG-1))) +#define EFI_BUFFER_TOO_SMALL ( 5 | (1UL << (BITS_PER_LONG-1))) +#define EFI_NOT_READY ( 6 | (1UL << (BITS_PER_LONG-1))) +#define EFI_DEVICE_ERROR ( 7 | (1UL << (BITS_PER_LONG-1))) +#define EFI_WRITE_PROTECTED ( 8 | (1UL << (BITS_PER_LONG-1))) +#define EFI_OUT_OF_RESOURCES ( 9 | (1UL << (BITS_PER_LONG-1))) +#define EFI_NOT_FOUND (14 | (1UL << (BITS_PER_LONG-1))) +#define EFI_ABORTED (21 | (1UL << (BITS_PER_LONG-1))) +#define EFI_SECURITY_VIOLATION (26 | (1UL << (BITS_PER_LONG-1))) + +typedef unsigned long efi_status_t; +typedef u8 efi_bool_t; +typedef u16 efi_char16_t; /* UNICODE character */ +typedef u64 efi_physical_addr_t; +typedef void *efi_handle_t; + +typedef guid_t efi_guid_t; + +#define EFI_GUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \ + GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) + +/* + * Generic EFI table header + */ +typedef struct { + u64 signature; + u32 revision; + u32 headersize; + u32 crc32; + u32 reserved; +} efi_table_hdr_t; + +/* + * Memory map descriptor: + */ + +/* Memory types: */ +#define EFI_RESERVED_TYPE 0 +#define EFI_LOADER_CODE 1 +#define EFI_LOADER_DATA 2 +#define EFI_BOOT_SERVICES_CODE 3 +#define EFI_BOOT_SERVICES_DATA 4 +#define EFI_RUNTIME_SERVICES_CODE 5 +#define EFI_RUNTIME_SERVICES_DATA 6 +#define EFI_CONVENTIONAL_MEMORY 7 +#define EFI_UNUSABLE_MEMORY 8 +#define EFI_ACPI_RECLAIM_MEMORY 9 +#define EFI_ACPI_MEMORY_NVS 10 +#define EFI_MEMORY_MAPPED_IO 11 +#define EFI_MEMORY_MAPPED_IO_PORT_SPACE 12 +#define EFI_PAL_CODE 13 +#define EFI_PERSISTENT_MEMORY 14 +#define EFI_MAX_MEMORY_TYPE 15 + +/* Attribute values: */ +#define EFI_MEMORY_UC ((u64)0x0000000000000001ULL) /* uncached */ +#define EFI_MEMORY_WC ((u64)0x0000000000000002ULL) /* write-coalescing */ +#define EFI_MEMORY_WT ((u64)0x0000000000000004ULL) /* write-through */ +#define EFI_MEMORY_WB ((u64)0x0000000000000008ULL) /* write-back */ +#define EFI_MEMORY_UCE ((u64)0x0000000000000010ULL) /* uncached, exported */ +#define EFI_MEMORY_WP ((u64)0x0000000000001000ULL) /* write-protect */ +#define EFI_MEMORY_RP ((u64)0x0000000000002000ULL) /* read-protect */ +#define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */ +#define EFI_MEMORY_NV ((u64)0x0000000000008000ULL) /* non-volatile */ +#define EFI_MEMORY_MORE_RELIABLE \ + ((u64)0x0000000000010000ULL) /* higher reliability */ +#define EFI_MEMORY_RO ((u64)0x0000000000020000ULL) /* read-only */ +#define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */ +#define EFI_MEMORY_DESCRIPTOR_VERSION 1 + +#define EFI_PAGE_SHIFT 12 +#define EFI_PAGE_SIZE (1UL << EFI_PAGE_SHIFT) +#define EFI_PAGES_MAX (U64_MAX >> EFI_PAGE_SHIFT) + +typedef struct { + u32 type; + u32 pad; + u64 phys_addr; + u64 virt_addr; + u64 num_pages; + u64 attribute; +} efi_memory_desc_t; + +typedef struct { + efi_guid_t guid; + u32 headersize; + u32 flags; + u32 imagesize; +} efi_capsule_header_t; + +struct efi_boot_memmap { + efi_memory_desc_t **map; + unsigned long *map_size; + unsigned long *desc_size; + u32 *desc_ver; + unsigned long *key_ptr; + unsigned long *buff_size; +}; + +/* + * EFI capsule flags + */ +#define EFI_CAPSULE_PERSIST_ACROSS_RESET 0x00010000 +#define EFI_CAPSULE_POPULATE_SYSTEM_TABLE 0x00020000 +#define EFI_CAPSULE_INITIATE_RESET 0x00040000 + +struct capsule_info { + efi_capsule_header_t header; + efi_capsule_header_t *capsule; + int reset_type; + long index; + size_t count; + size_t total_size; + struct page **pages; + phys_addr_t *phys; + size_t page_bytes_remain; +}; + +int efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff, + size_t hdr_bytes); +int __efi_capsule_setup_info(struct capsule_info *cap_info); + +/* + * Allocation types for calls to boottime->allocate_pages. + */ +#define EFI_ALLOCATE_ANY_PAGES 0 +#define EFI_ALLOCATE_MAX_ADDRESS 1 +#define EFI_ALLOCATE_ADDRESS 2 +#define EFI_MAX_ALLOCATE_TYPE 3 + +typedef int (*efi_freemem_callback_t) (u64 start, u64 end, void *arg); + +/* + * Types and defines for Time Services + */ +#define EFI_TIME_ADJUST_DAYLIGHT 0x1 +#define EFI_TIME_IN_DAYLIGHT 0x2 +#define EFI_UNSPECIFIED_TIMEZONE 0x07ff + +typedef struct { + u16 year; + u8 month; + u8 day; + u8 hour; + u8 minute; + u8 second; + u8 pad1; + u32 nanosecond; + s16 timezone; + u8 daylight; + u8 pad2; +} efi_time_t; + +typedef struct { + u32 resolution; + u32 accuracy; + u8 sets_to_zero; +} efi_time_cap_t; + +typedef struct { + efi_table_hdr_t hdr; + u32 raise_tpl; + u32 restore_tpl; + u32 allocate_pages; + u32 free_pages; + u32 get_memory_map; + u32 allocate_pool; + u32 free_pool; + u32 create_event; + u32 set_timer; + u32 wait_for_event; + u32 signal_event; + u32 close_event; + u32 check_event; + u32 install_protocol_interface; + u32 reinstall_protocol_interface; + u32 uninstall_protocol_interface; + u32 handle_protocol; + u32 __reserved; + u32 register_protocol_notify; + u32 locate_handle; + u32 locate_device_path; + u32 install_configuration_table; + u32 load_image; + u32 start_image; + u32 exit; + u32 unload_image; + u32 exit_boot_services; + u32 get_next_monotonic_count; + u32 stall; + u32 set_watchdog_timer; + u32 connect_controller; + u32 disconnect_controller; + u32 open_protocol; + u32 close_protocol; + u32 open_protocol_information; + u32 protocols_per_handle; + u32 locate_handle_buffer; + u32 locate_protocol; + u32 install_multiple_protocol_interfaces; + u32 uninstall_multiple_protocol_interfaces; + u32 calculate_crc32; + u32 copy_mem; + u32 set_mem; + u32 create_event_ex; +} __packed efi_boot_services_32_t; + +typedef struct { + efi_table_hdr_t hdr; + u64 raise_tpl; + u64 restore_tpl; + u64 allocate_pages; + u64 free_pages; + u64 get_memory_map; + u64 allocate_pool; + u64 free_pool; + u64 create_event; + u64 set_timer; + u64 wait_for_event; + u64 signal_event; + u64 close_event; + u64 check_event; + u64 install_protocol_interface; + u64 reinstall_protocol_interface; + u64 uninstall_protocol_interface; + u64 handle_protocol; + u64 __reserved; + u64 register_protocol_notify; + u64 locate_handle; + u64 locate_device_path; + u64 install_configuration_table; + u64 load_image; + u64 start_image; + u64 exit; + u64 unload_image; + u64 exit_boot_services; + u64 get_next_monotonic_count; + u64 stall; + u64 set_watchdog_timer; + u64 connect_controller; + u64 disconnect_controller; + u64 open_protocol; + u64 close_protocol; + u64 open_protocol_information; + u64 protocols_per_handle; + u64 locate_handle_buffer; + u64 locate_protocol; + u64 install_multiple_protocol_interfaces; + u64 uninstall_multiple_protocol_interfaces; + u64 calculate_crc32; + u64 copy_mem; + u64 set_mem; + u64 create_event_ex; +} __packed efi_boot_services_64_t; + +/* + * EFI Boot Services table + */ +typedef struct { + efi_table_hdr_t hdr; + void *raise_tpl; + void *restore_tpl; + efi_status_t (*allocate_pages)(int, int, unsigned long, + efi_physical_addr_t *); + efi_status_t (*free_pages)(efi_physical_addr_t, unsigned long); + efi_status_t (*get_memory_map)(unsigned long *, void *, unsigned long *, + unsigned long *, u32 *); + efi_status_t (*allocate_pool)(int, unsigned long, void **); + efi_status_t (*free_pool)(void *); + void *create_event; + void *set_timer; + void *wait_for_event; + void *signal_event; + void *close_event; + void *check_event; + void *install_protocol_interface; + void *reinstall_protocol_interface; + void *uninstall_protocol_interface; + efi_status_t (*handle_protocol)(efi_handle_t, efi_guid_t *, void **); + void *__reserved; + void *register_protocol_notify; + efi_status_t (*locate_handle)(int, efi_guid_t *, void *, + unsigned long *, efi_handle_t *); + void *locate_device_path; + efi_status_t (*install_configuration_table)(efi_guid_t *, void *); + void *load_image; + void *start_image; + void *exit; + void *unload_image; + efi_status_t (*exit_boot_services)(efi_handle_t, unsigned long); + void *get_next_monotonic_count; + void *stall; + void *set_watchdog_timer; + void *connect_controller; + void *disconnect_controller; + void *open_protocol; + void *close_protocol; + void *open_protocol_information; + void *protocols_per_handle; + void *locate_handle_buffer; + efi_status_t (*locate_protocol)(efi_guid_t *, void *, void **); + void *install_multiple_protocol_interfaces; + void *uninstall_multiple_protocol_interfaces; + void *calculate_crc32; + void *copy_mem; + void *set_mem; + void *create_event_ex; +} efi_boot_services_t; + +typedef enum { + EfiPciIoWidthUint8, + EfiPciIoWidthUint16, + EfiPciIoWidthUint32, + EfiPciIoWidthUint64, + EfiPciIoWidthFifoUint8, + EfiPciIoWidthFifoUint16, + EfiPciIoWidthFifoUint32, + EfiPciIoWidthFifoUint64, + EfiPciIoWidthFillUint8, + EfiPciIoWidthFillUint16, + EfiPciIoWidthFillUint32, + EfiPciIoWidthFillUint64, + EfiPciIoWidthMaximum +} EFI_PCI_IO_PROTOCOL_WIDTH; + +typedef enum { + EfiPciIoAttributeOperationGet, + EfiPciIoAttributeOperationSet, + EfiPciIoAttributeOperationEnable, + EfiPciIoAttributeOperationDisable, + EfiPciIoAttributeOperationSupported, + EfiPciIoAttributeOperationMaximum +} EFI_PCI_IO_PROTOCOL_ATTRIBUTE_OPERATION; + +typedef struct { + u32 read; + u32 write; +} efi_pci_io_protocol_access_32_t; + +typedef struct { + u64 read; + u64 write; +} efi_pci_io_protocol_access_64_t; + +typedef struct { + void *read; + void *write; +} efi_pci_io_protocol_access_t; + +typedef struct { + u32 poll_mem; + u32 poll_io; + efi_pci_io_protocol_access_32_t mem; + efi_pci_io_protocol_access_32_t io; + efi_pci_io_protocol_access_32_t pci; + u32 copy_mem; + u32 map; + u32 unmap; + u32 allocate_buffer; + u32 free_buffer; + u32 flush; + u32 get_location; + u32 attributes; + u32 get_bar_attributes; + u32 set_bar_attributes; + u64 romsize; + u32 romimage; +} efi_pci_io_protocol_32_t; + +typedef struct { + u64 poll_mem; + u64 poll_io; + efi_pci_io_protocol_access_64_t mem; + efi_pci_io_protocol_access_64_t io; + efi_pci_io_protocol_access_64_t pci; + u64 copy_mem; + u64 map; + u64 unmap; + u64 allocate_buffer; + u64 free_buffer; + u64 flush; + u64 get_location; + u64 attributes; + u64 get_bar_attributes; + u64 set_bar_attributes; + u64 romsize; + u64 romimage; +} efi_pci_io_protocol_64_t; + +typedef struct { + void *poll_mem; + void *poll_io; + efi_pci_io_protocol_access_t mem; + efi_pci_io_protocol_access_t io; + efi_pci_io_protocol_access_t pci; + void *copy_mem; + void *map; + void *unmap; + void *allocate_buffer; + void *free_buffer; + void *flush; + void *get_location; + void *attributes; + void *get_bar_attributes; + void *set_bar_attributes; + uint64_t romsize; + void *romimage; +} efi_pci_io_protocol_t; + +#define EFI_PCI_IO_ATTRIBUTE_ISA_MOTHERBOARD_IO 0x0001 +#define EFI_PCI_IO_ATTRIBUTE_ISA_IO 0x0002 +#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO 0x0004 +#define EFI_PCI_IO_ATTRIBUTE_VGA_MEMORY 0x0008 +#define EFI_PCI_IO_ATTRIBUTE_VGA_IO 0x0010 +#define EFI_PCI_IO_ATTRIBUTE_IDE_PRIMARY_IO 0x0020 +#define EFI_PCI_IO_ATTRIBUTE_IDE_SECONDARY_IO 0x0040 +#define EFI_PCI_IO_ATTRIBUTE_MEMORY_WRITE_COMBINE 0x0080 +#define EFI_PCI_IO_ATTRIBUTE_IO 0x0100 +#define EFI_PCI_IO_ATTRIBUTE_MEMORY 0x0200 +#define EFI_PCI_IO_ATTRIBUTE_BUS_MASTER 0x0400 +#define EFI_PCI_IO_ATTRIBUTE_MEMORY_CACHED 0x0800 +#define EFI_PCI_IO_ATTRIBUTE_MEMORY_DISABLE 0x1000 +#define EFI_PCI_IO_ATTRIBUTE_EMBEDDED_DEVICE 0x2000 +#define EFI_PCI_IO_ATTRIBUTE_EMBEDDED_ROM 0x4000 +#define EFI_PCI_IO_ATTRIBUTE_DUAL_ADDRESS_CYCLE 0x8000 +#define EFI_PCI_IO_ATTRIBUTE_ISA_IO_16 0x10000 +#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO_16 0x20000 +#define EFI_PCI_IO_ATTRIBUTE_VGA_IO_16 0x40000 + +typedef struct { + u32 version; + u32 get; + u32 set; + u32 del; + u32 get_all; +} apple_properties_protocol_32_t; + +typedef struct { + u64 version; + u64 get; + u64 set; + u64 del; + u64 get_all; +} apple_properties_protocol_64_t; + +typedef struct { + u32 get_capability; + u32 get_event_log; + u32 hash_log_extend_event; + u32 submit_command; + u32 get_active_pcr_banks; + u32 set_active_pcr_banks; + u32 get_result_of_set_active_pcr_banks; +} efi_tcg2_protocol_32_t; + +typedef struct { + u64 get_capability; + u64 get_event_log; + u64 hash_log_extend_event; + u64 submit_command; + u64 get_active_pcr_banks; + u64 set_active_pcr_banks; + u64 get_result_of_set_active_pcr_banks; +} efi_tcg2_protocol_64_t; + +typedef u32 efi_tcg2_event_log_format; + +typedef struct { + void *get_capability; + efi_status_t (*get_event_log)(efi_handle_t, efi_tcg2_event_log_format, + efi_physical_addr_t *, efi_physical_addr_t *, efi_bool_t *); + void *hash_log_extend_event; + void *submit_command; + void *get_active_pcr_banks; + void *set_active_pcr_banks; + void *get_result_of_set_active_pcr_banks; +} efi_tcg2_protocol_t; + +/* + * Types and defines for EFI ResetSystem + */ +#define EFI_RESET_COLD 0 +#define EFI_RESET_WARM 1 +#define EFI_RESET_SHUTDOWN 2 + +/* + * EFI Runtime Services table + */ +#define EFI_RUNTIME_SERVICES_SIGNATURE ((u64)0x5652453544e5552ULL) +#define EFI_RUNTIME_SERVICES_REVISION 0x00010000 + +typedef struct { + efi_table_hdr_t hdr; + u32 get_time; + u32 set_time; + u32 get_wakeup_time; + u32 set_wakeup_time; + u32 set_virtual_address_map; + u32 convert_pointer; + u32 get_variable; + u32 get_next_variable; + u32 set_variable; + u32 get_next_high_mono_count; + u32 reset_system; + u32 update_capsule; + u32 query_capsule_caps; + u32 query_variable_info; +} efi_runtime_services_32_t; + +typedef struct { + efi_table_hdr_t hdr; + u64 get_time; + u64 set_time; + u64 get_wakeup_time; + u64 set_wakeup_time; + u64 set_virtual_address_map; + u64 convert_pointer; + u64 get_variable; + u64 get_next_variable; + u64 set_variable; + u64 get_next_high_mono_count; + u64 reset_system; + u64 update_capsule; + u64 query_capsule_caps; + u64 query_variable_info; +} efi_runtime_services_64_t; + +typedef efi_status_t efi_get_time_t (efi_time_t *tm, efi_time_cap_t *tc); +typedef efi_status_t efi_set_time_t (efi_time_t *tm); +typedef efi_status_t efi_get_wakeup_time_t (efi_bool_t *enabled, efi_bool_t *pending, + efi_time_t *tm); +typedef efi_status_t efi_set_wakeup_time_t (efi_bool_t enabled, efi_time_t *tm); +typedef efi_status_t efi_get_variable_t (efi_char16_t *name, efi_guid_t *vendor, u32 *attr, + unsigned long *data_size, void *data); +typedef efi_status_t efi_get_next_variable_t (unsigned long *name_size, efi_char16_t *name, + efi_guid_t *vendor); +typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor, + u32 attr, unsigned long data_size, + void *data); +typedef efi_status_t efi_get_next_high_mono_count_t (u32 *count); +typedef void efi_reset_system_t (int reset_type, efi_status_t status, + unsigned long data_size, efi_char16_t *data); +typedef efi_status_t efi_set_virtual_address_map_t (unsigned long memory_map_size, + unsigned long descriptor_size, + u32 descriptor_version, + efi_memory_desc_t *virtual_map); +typedef efi_status_t efi_query_variable_info_t(u32 attr, + u64 *storage_space, + u64 *remaining_space, + u64 *max_variable_size); +typedef efi_status_t efi_update_capsule_t(efi_capsule_header_t **capsules, + unsigned long count, + unsigned long sg_list); +typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **capsules, + unsigned long count, + u64 *max_size, + int *reset_type); +typedef efi_status_t efi_query_variable_store_t(u32 attributes, + unsigned long size, + bool nonblocking); + +typedef struct { + efi_table_hdr_t hdr; + efi_get_time_t *get_time; + efi_set_time_t *set_time; + efi_get_wakeup_time_t *get_wakeup_time; + efi_set_wakeup_time_t *set_wakeup_time; + efi_set_virtual_address_map_t *set_virtual_address_map; + void *convert_pointer; + efi_get_variable_t *get_variable; + efi_get_next_variable_t *get_next_variable; + efi_set_variable_t *set_variable; + efi_get_next_high_mono_count_t *get_next_high_mono_count; + efi_reset_system_t *reset_system; + efi_update_capsule_t *update_capsule; + efi_query_capsule_caps_t *query_capsule_caps; + efi_query_variable_info_t *query_variable_info; +} efi_runtime_services_t; + +void efi_native_runtime_setup(void); + +/* + * EFI Configuration Table and GUID definitions + * + * These are all defined in a single line to make them easier to + * grep for and to see them at a glance - while still having a + * similar structure to the definitions in the spec. + * + * Here's how they are structured: + * + * GUID: 12345678-1234-1234-1234-123456789012 + * Spec: + * #define EFI_SOME_PROTOCOL_GUID \ + * {0x12345678,0x1234,0x1234,\ + * {0x12,0x34,0x12,0x34,0x56,0x78,0x90,0x12}} + * Here: + * #define SOME_PROTOCOL_GUID EFI_GUID(0x12345678, 0x1234, 0x1234, 0x12, 0x34, 0x12, 0x34, 0x56, 0x78, 0x90, 0x12) + * ^ tabs ^extra space + * + * Note that the 'extra space' separates the values at the same place + * where the UEFI SPEC breaks the line. + */ +#define NULL_GUID EFI_GUID(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00) +#define MPS_TABLE_GUID EFI_GUID(0xeb9d2d2f, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) +#define ACPI_TABLE_GUID EFI_GUID(0xeb9d2d30, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) +#define ACPI_20_TABLE_GUID EFI_GUID(0x8868e871, 0xe4f1, 0x11d3, 0xbc, 0x22, 0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81) +#define SMBIOS_TABLE_GUID EFI_GUID(0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) +#define SMBIOS3_TABLE_GUID EFI_GUID(0xf2fd1544, 0x9794, 0x4a2c, 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94) +#define SAL_SYSTEM_TABLE_GUID EFI_GUID(0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) +#define HCDP_TABLE_GUID EFI_GUID(0xf951938d, 0x620b, 0x42ef, 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98) +#define UGA_IO_PROTOCOL_GUID EFI_GUID(0x61a4d49e, 0x6f68, 0x4f1b, 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0x0b, 0x07, 0xa2) +#define EFI_GLOBAL_VARIABLE_GUID EFI_GUID(0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c) +#define UV_SYSTEM_TABLE_GUID EFI_GUID(0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93) +#define LINUX_EFI_CRASH_GUID EFI_GUID(0xcfc8fc79, 0xbe2e, 0x4ddc, 0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0) +#define LOADED_IMAGE_PROTOCOL_GUID EFI_GUID(0x5b1b31a1, 0x9562, 0x11d2, 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) +#define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID EFI_GUID(0x9042a9de, 0x23dc, 0x4a38, 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a) +#define EFI_UGA_PROTOCOL_GUID EFI_GUID(0x982c298b, 0xf4fa, 0x41cb, 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39) +#define EFI_PCI_IO_PROTOCOL_GUID EFI_GUID(0x4cf5b200, 0x68b8, 0x4ca5, 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x02, 0x9a) +#define EFI_FILE_INFO_ID EFI_GUID(0x09576e92, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) +#define EFI_SYSTEM_RESOURCE_TABLE_GUID EFI_GUID(0xb122a263, 0x3661, 0x4f68, 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80) +#define EFI_FILE_SYSTEM_GUID EFI_GUID(0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) +#define DEVICE_TREE_GUID EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0) +#define EFI_PROPERTIES_TABLE_GUID EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5) +#define EFI_RNG_PROTOCOL_GUID EFI_GUID(0x3152bca5, 0xeade, 0x433d, 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44) +#define EFI_RNG_ALGORITHM_RAW EFI_GUID(0xe43176d7, 0xb6e8, 0x4827, 0xb7, 0x84, 0x7f, 0xfd, 0xc4, 0xb6, 0x85, 0x61) +#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20) +#define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) +#define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0) +#define EFI_TCG2_PROTOCOL_GUID EFI_GUID(0x607f766c, 0x7455, 0x42be, 0x93, 0x0b, 0xe4, 0xd7, 0x6d, 0xb2, 0x72, 0x0f) + +#define EFI_IMAGE_SECURITY_DATABASE_GUID EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f) +#define EFI_SHIM_LOCK_GUID EFI_GUID(0x605dab50, 0xe046, 0x4300, 0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23) + +/* + * This GUID is used to pass to the kernel proper the struct screen_info + * structure that was populated by the stub based on the GOP protocol instance + * associated with ConOut + */ +#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95) +#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f) +#define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b) +#define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa) + +typedef struct { + efi_guid_t guid; + u64 table; +} efi_config_table_64_t; + +typedef struct { + efi_guid_t guid; + u32 table; +} efi_config_table_32_t; + +typedef struct { + efi_guid_t guid; + unsigned long table; +} efi_config_table_t; + +typedef struct { + efi_guid_t guid; + const char *name; + unsigned long *ptr; +} efi_config_table_type_t; + +#define EFI_SYSTEM_TABLE_SIGNATURE ((u64)0x5453595320494249ULL) + +#define EFI_2_30_SYSTEM_TABLE_REVISION ((2 << 16) | (30)) +#define EFI_2_20_SYSTEM_TABLE_REVISION ((2 << 16) | (20)) +#define EFI_2_10_SYSTEM_TABLE_REVISION ((2 << 16) | (10)) +#define EFI_2_00_SYSTEM_TABLE_REVISION ((2 << 16) | (00)) +#define EFI_1_10_SYSTEM_TABLE_REVISION ((1 << 16) | (10)) +#define EFI_1_02_SYSTEM_TABLE_REVISION ((1 << 16) | (02)) + +typedef struct { + efi_table_hdr_t hdr; + u64 fw_vendor; /* physical addr of CHAR16 vendor string */ + u32 fw_revision; + u32 __pad1; + u64 con_in_handle; + u64 con_in; + u64 con_out_handle; + u64 con_out; + u64 stderr_handle; + u64 stderr; + u64 runtime; + u64 boottime; + u32 nr_tables; + u32 __pad2; + u64 tables; +} efi_system_table_64_t; + +typedef struct { + efi_table_hdr_t hdr; + u32 fw_vendor; /* physical addr of CHAR16 vendor string */ + u32 fw_revision; + u32 con_in_handle; + u32 con_in; + u32 con_out_handle; + u32 con_out; + u32 stderr_handle; + u32 stderr; + u32 runtime; + u32 boottime; + u32 nr_tables; + u32 tables; +} efi_system_table_32_t; + +typedef struct { + efi_table_hdr_t hdr; + unsigned long fw_vendor; /* physical addr of CHAR16 vendor string */ + u32 fw_revision; + unsigned long con_in_handle; + unsigned long con_in; + unsigned long con_out_handle; + unsigned long con_out; + unsigned long stderr_handle; + unsigned long stderr; + efi_runtime_services_t *runtime; + efi_boot_services_t *boottime; + unsigned long nr_tables; + unsigned long tables; +} efi_system_table_t; + +/* + * Architecture independent structure for describing a memory map for the + * benefit of efi_memmap_init_early(), saving us the need to pass four + * parameters. + */ +struct efi_memory_map_data { + phys_addr_t phys_map; + unsigned long size; + unsigned long desc_version; + unsigned long desc_size; +}; + +struct efi_memory_map { + phys_addr_t phys_map; + void *map; + void *map_end; + int nr_map; + unsigned long desc_version; + unsigned long desc_size; + bool late; +}; + +struct efi_mem_range { + struct range range; + u64 attribute; +}; + +struct efi_fdt_params { + u64 system_table; + u64 mmap; + u32 mmap_size; + u32 desc_size; + u32 desc_ver; +}; + +typedef struct { + u32 revision; + u32 parent_handle; + u32 system_table; + u32 device_handle; + u32 file_path; + u32 reserved; + u32 load_options_size; + u32 load_options; + u32 image_base; + __aligned_u64 image_size; + unsigned int image_code_type; + unsigned int image_data_type; + unsigned long unload; +} efi_loaded_image_32_t; + +typedef struct { + u32 revision; + u64 parent_handle; + u64 system_table; + u64 device_handle; + u64 file_path; + u64 reserved; + u32 load_options_size; + u64 load_options; + u64 image_base; + __aligned_u64 image_size; + unsigned int image_code_type; + unsigned int image_data_type; + unsigned long unload; +} efi_loaded_image_64_t; + +typedef struct { + u32 revision; + void *parent_handle; + efi_system_table_t *system_table; + void *device_handle; + void *file_path; + void *reserved; + u32 load_options_size; + void *load_options; + void *image_base; + __aligned_u64 image_size; + unsigned int image_code_type; + unsigned int image_data_type; + unsigned long unload; +} efi_loaded_image_t; + + +typedef struct { + u64 size; + u64 file_size; + u64 phys_size; + efi_time_t create_time; + efi_time_t last_access_time; + efi_time_t modification_time; + __aligned_u64 attribute; + efi_char16_t filename[1]; +} efi_file_info_t; + +typedef struct { + u64 revision; + u32 open; + u32 close; + u32 delete; + u32 read; + u32 write; + u32 get_position; + u32 set_position; + u32 get_info; + u32 set_info; + u32 flush; +} efi_file_handle_32_t; + +typedef struct { + u64 revision; + u64 open; + u64 close; + u64 delete; + u64 read; + u64 write; + u64 get_position; + u64 set_position; + u64 get_info; + u64 set_info; + u64 flush; +} efi_file_handle_64_t; + +typedef struct _efi_file_handle { + u64 revision; + efi_status_t (*open)(struct _efi_file_handle *, + struct _efi_file_handle **, + efi_char16_t *, u64, u64); + efi_status_t (*close)(struct _efi_file_handle *); + void *delete; + efi_status_t (*read)(struct _efi_file_handle *, unsigned long *, + void *); + void *write; + void *get_position; + void *set_position; + efi_status_t (*get_info)(struct _efi_file_handle *, efi_guid_t *, + unsigned long *, void *); + void *set_info; + void *flush; +} efi_file_handle_t; + +typedef struct { + u64 revision; + u32 open_volume; +} efi_file_io_interface_32_t; + +typedef struct { + u64 revision; + u64 open_volume; +} efi_file_io_interface_64_t; + +typedef struct _efi_file_io_interface { + u64 revision; + int (*open_volume)(struct _efi_file_io_interface *, + efi_file_handle_t **); +} efi_file_io_interface_t; + +#define EFI_FILE_MODE_READ 0x0000000000000001 +#define EFI_FILE_MODE_WRITE 0x0000000000000002 +#define EFI_FILE_MODE_CREATE 0x8000000000000000 + +typedef struct { + u32 version; + u32 length; + u64 memory_protection_attribute; +} efi_properties_table_t; + +#define EFI_PROPERTIES_TABLE_VERSION 0x00010000 +#define EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA 0x1 + +#define EFI_INVALID_TABLE_ADDR (~0UL) + +typedef struct { + u32 version; + u32 num_entries; + u32 desc_size; + u32 reserved; + efi_memory_desc_t entry[0]; +} efi_memory_attributes_table_t; + +/* + * All runtime access to EFI goes through this structure: + */ +extern struct efi { + efi_system_table_t *systab; /* EFI system table */ + unsigned int runtime_version; /* Runtime services version */ + unsigned long mps; /* MPS table */ + unsigned long acpi; /* ACPI table (IA64 ext 0.71) */ + unsigned long acpi20; /* ACPI table (ACPI 2.0) */ + unsigned long smbios; /* SMBIOS table (32 bit entry point) */ + unsigned long smbios3; /* SMBIOS table (64 bit entry point) */ + unsigned long sal_systab; /* SAL system table */ + unsigned long boot_info; /* boot info table */ + unsigned long hcdp; /* HCDP table */ + unsigned long uga; /* UGA table */ + unsigned long uv_systab; /* UV system table */ + unsigned long fw_vendor; /* fw_vendor */ + unsigned long runtime; /* runtime table */ + unsigned long config_table; /* config tables */ + unsigned long esrt; /* ESRT table */ + unsigned long properties_table; /* properties table */ + unsigned long mem_attr_table; /* memory attributes table */ + unsigned long rng_seed; /* UEFI firmware random seed */ + unsigned long tpm_log; /* TPM2 Event Log table */ + efi_get_time_t *get_time; + efi_set_time_t *set_time; + efi_get_wakeup_time_t *get_wakeup_time; + efi_set_wakeup_time_t *set_wakeup_time; + efi_get_variable_t *get_variable; + efi_get_next_variable_t *get_next_variable; + efi_set_variable_t *set_variable; + efi_set_variable_t *set_variable_nonblocking; + efi_query_variable_info_t *query_variable_info; + efi_query_variable_info_t *query_variable_info_nonblocking; + efi_update_capsule_t *update_capsule; + efi_query_capsule_caps_t *query_capsule_caps; + efi_get_next_high_mono_count_t *get_next_high_mono_count; + efi_reset_system_t *reset_system; + efi_set_virtual_address_map_t *set_virtual_address_map; + struct efi_memory_map memmap; + unsigned long flags; +} efi; + +extern struct mm_struct efi_mm; + +static inline int +efi_guidcmp (efi_guid_t left, efi_guid_t right) +{ + return memcmp(&left, &right, sizeof (efi_guid_t)); +} + +static inline char * +efi_guid_to_str(efi_guid_t *guid, char *out) +{ + sprintf(out, "%pUl", guid->b); + return out; +} + +extern void efi_init (void); +extern void *efi_get_pal_addr (void); +extern void efi_map_pal_code (void); +extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg); +extern void efi_gettimeofday (struct timespec64 *ts); +#ifdef CONFIG_EFI +extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */ +#else +static inline void efi_enter_virtual_mode (void) {} +#endif +#ifdef CONFIG_X86 +extern void efi_free_boot_services(void); +extern efi_status_t efi_query_variable_store(u32 attributes, + unsigned long size, + bool nonblocking); +extern void efi_find_mirror(void); +#else +static inline void efi_free_boot_services(void) {} + +static inline efi_status_t efi_query_variable_store(u32 attributes, + unsigned long size, + bool nonblocking) +{ + return EFI_SUCCESS; +} +#endif +extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr); + +extern phys_addr_t __init efi_memmap_alloc(unsigned int num_entries); +extern int __init efi_memmap_init_early(struct efi_memory_map_data *data); +extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size); +extern void __init efi_memmap_unmap(void); +extern int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map); +extern int __init efi_memmap_split_count(efi_memory_desc_t *md, + struct range *range); +extern void __init efi_memmap_insert(struct efi_memory_map *old_memmap, + void *buf, struct efi_mem_range *mem); + +extern int efi_config_init(efi_config_table_type_t *arch_tables); +#ifdef CONFIG_EFI_ESRT +extern void __init efi_esrt_init(void); +#else +static inline void efi_esrt_init(void) { } +#endif +extern int efi_config_parse_tables(void *config_tables, int count, int sz, + efi_config_table_type_t *arch_tables); +extern u64 efi_get_iobase (void); +extern int efi_mem_type(unsigned long phys_addr); +extern u64 efi_mem_attributes (unsigned long phys_addr); +extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size); +extern int __init efi_uart_console_only (void); +extern u64 efi_mem_desc_end(efi_memory_desc_t *md); +extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md); +extern void efi_mem_reserve(phys_addr_t addr, u64 size); +extern void efi_initialize_iomem_resources(struct resource *code_resource, + struct resource *data_resource, struct resource *bss_resource); +extern void efi_reserve_boot_services(void); +extern int efi_get_fdt_params(struct efi_fdt_params *params); +extern struct kobject *efi_kobj; + +extern int efi_reboot_quirk_mode; +extern bool efi_poweroff_required(void); + +#ifdef CONFIG_EFI_FAKE_MEMMAP +extern void __init efi_fake_memmap(void); +#else +static inline void efi_fake_memmap(void) { } +#endif + +/* + * efi_memattr_perm_setter - arch specific callback function passed into + * efi_memattr_apply_permissions() that updates the + * mapping permissions described by the second + * argument in the page tables referred to by the + * first argument. + */ +typedef int (*efi_memattr_perm_setter)(struct mm_struct *, efi_memory_desc_t *); + +extern int efi_memattr_init(void); +extern int efi_memattr_apply_permissions(struct mm_struct *mm, + efi_memattr_perm_setter fn); + +/* + * efi_early_memdesc_ptr - get the n-th EFI memmap descriptor + * @map: the start of efi memmap + * @desc_size: the size of space for each EFI memmap descriptor + * @n: the index of efi memmap descriptor + * + * EFI boot service provides the GetMemoryMap() function to get a copy of the + * current memory map which is an array of memory descriptors, each of + * which describes a contiguous block of memory. It also gets the size of the + * map, and the size of each descriptor, etc. + * + * Note that per section 6.2 of UEFI Spec 2.6 Errata A, the returned size of + * each descriptor might not be equal to sizeof(efi_memory_memdesc_t), + * since efi_memory_memdesc_t may be extended in the future. Thus the OS + * MUST use the returned size of the descriptor to find the start of each + * efi_memory_memdesc_t in the memory map array. This should only be used + * during bootup since for_each_efi_memory_desc_xxx() is available after the + * kernel initializes the EFI subsystem to set up struct efi_memory_map. + */ +#define efi_early_memdesc_ptr(map, desc_size, n) \ + (efi_memory_desc_t *)((void *)(map) + ((n) * (desc_size))) + +/* Iterate through an efi_memory_map */ +#define for_each_efi_memory_desc_in_map(m, md) \ + for ((md) = (m)->map; \ + (md) && ((void *)(md) + (m)->desc_size) <= (m)->map_end; \ + (md) = (void *)(md) + (m)->desc_size) + +/** + * for_each_efi_memory_desc - iterate over descriptors in efi.memmap + * @md: the efi_memory_desc_t * iterator + * + * Once the loop finishes @md must not be accessed. + */ +#define for_each_efi_memory_desc(md) \ + for_each_efi_memory_desc_in_map(&efi.memmap, md) + +/* + * Format an EFI memory descriptor's type and attributes to a user-provided + * character buffer, as per snprintf(), and return the buffer. + */ +char * __init efi_md_typeattr_format(char *buf, size_t size, + const efi_memory_desc_t *md); + +/** + * efi_range_is_wc - check the WC bit on an address range + * @start: starting kvirt address + * @len: length of range + * + * Consult the EFI memory map and make sure it's ok to set this range WC. + * Returns true or false. + */ +static inline int efi_range_is_wc(unsigned long start, unsigned long len) +{ + unsigned long i; + + for (i = 0; i < len; i += (1UL << EFI_PAGE_SHIFT)) { + unsigned long paddr = __pa(start + i); + if (!(efi_mem_attributes(paddr) & EFI_MEMORY_WC)) + return 0; + } + /* The range checked out */ + return 1; +} + +#ifdef CONFIG_EFI_PCDP +extern int __init efi_setup_pcdp_console(char *); +#endif + +/* + * We play games with efi_enabled so that the compiler will, if + * possible, remove EFI-related code altogether. + */ +#define EFI_BOOT 0 /* Were we booted from EFI? */ +#define EFI_CONFIG_TABLES 2 /* Can we use EFI config tables? */ +#define EFI_RUNTIME_SERVICES 3 /* Can we use runtime services? */ +#define EFI_MEMMAP 4 /* Can we use EFI memory map? */ +#define EFI_64BIT 5 /* Is the firmware 64-bit? */ +#define EFI_PARAVIRT 6 /* Access is via a paravirt interface */ +#define EFI_ARCH_1 7 /* First arch-specific bit */ +#define EFI_DBG 8 /* Print additional debug info at runtime */ +#define EFI_NX_PE_DATA 9 /* Can runtime data regions be mapped non-executable? */ +#define EFI_MEM_ATTR 10 /* Did firmware publish an EFI_MEMORY_ATTRIBUTES table? */ + +#ifdef CONFIG_EFI +/* + * Test whether the above EFI_* bits are enabled. + */ +static inline bool efi_enabled(int feature) +{ + return test_bit(feature, &efi.flags) != 0; +} +extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused); + +extern bool efi_is_table_address(unsigned long phys_addr); +#else +static inline bool efi_enabled(int feature) +{ + return false; +} +static inline void +efi_reboot(enum reboot_mode reboot_mode, const char *__unused) {} + +static inline bool +efi_capsule_pending(int *reset_type) +{ + return false; +} + +static inline bool efi_is_table_address(unsigned long phys_addr) +{ + return false; +} +#endif + +extern int efi_status_to_err(efi_status_t status); + +/* + * Variable Attributes + */ +#define EFI_VARIABLE_NON_VOLATILE 0x0000000000000001 +#define EFI_VARIABLE_BOOTSERVICE_ACCESS 0x0000000000000002 +#define EFI_VARIABLE_RUNTIME_ACCESS 0x0000000000000004 +#define EFI_VARIABLE_HARDWARE_ERROR_RECORD 0x0000000000000008 +#define EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS 0x0000000000000010 +#define EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS 0x0000000000000020 +#define EFI_VARIABLE_APPEND_WRITE 0x0000000000000040 + +#define EFI_VARIABLE_MASK (EFI_VARIABLE_NON_VOLATILE | \ + EFI_VARIABLE_BOOTSERVICE_ACCESS | \ + EFI_VARIABLE_RUNTIME_ACCESS | \ + EFI_VARIABLE_HARDWARE_ERROR_RECORD | \ + EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS | \ + EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS | \ + EFI_VARIABLE_APPEND_WRITE) +/* + * Length of a GUID string (strlen("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee")) + * not including trailing NUL + */ +#define EFI_VARIABLE_GUID_LEN UUID_STRING_LEN + +/* + * The type of search to perform when calling boottime->locate_handle + */ +#define EFI_LOCATE_ALL_HANDLES 0 +#define EFI_LOCATE_BY_REGISTER_NOTIFY 1 +#define EFI_LOCATE_BY_PROTOCOL 2 + +/* + * EFI Device Path information + */ +#define EFI_DEV_HW 0x01 +#define EFI_DEV_PCI 1 +#define EFI_DEV_PCCARD 2 +#define EFI_DEV_MEM_MAPPED 3 +#define EFI_DEV_VENDOR 4 +#define EFI_DEV_CONTROLLER 5 +#define EFI_DEV_ACPI 0x02 +#define EFI_DEV_BASIC_ACPI 1 +#define EFI_DEV_EXPANDED_ACPI 2 +#define EFI_DEV_MSG 0x03 +#define EFI_DEV_MSG_ATAPI 1 +#define EFI_DEV_MSG_SCSI 2 +#define EFI_DEV_MSG_FC 3 +#define EFI_DEV_MSG_1394 4 +#define EFI_DEV_MSG_USB 5 +#define EFI_DEV_MSG_USB_CLASS 15 +#define EFI_DEV_MSG_I20 6 +#define EFI_DEV_MSG_MAC 11 +#define EFI_DEV_MSG_IPV4 12 +#define EFI_DEV_MSG_IPV6 13 +#define EFI_DEV_MSG_INFINIBAND 9 +#define EFI_DEV_MSG_UART 14 +#define EFI_DEV_MSG_VENDOR 10 +#define EFI_DEV_MEDIA 0x04 +#define EFI_DEV_MEDIA_HARD_DRIVE 1 +#define EFI_DEV_MEDIA_CDROM 2 +#define EFI_DEV_MEDIA_VENDOR 3 +#define EFI_DEV_MEDIA_FILE 4 +#define EFI_DEV_MEDIA_PROTOCOL 5 +#define EFI_DEV_BIOS_BOOT 0x05 +#define EFI_DEV_END_PATH 0x7F +#define EFI_DEV_END_PATH2 0xFF +#define EFI_DEV_END_INSTANCE 0x01 +#define EFI_DEV_END_ENTIRE 0xFF + +struct efi_generic_dev_path { + u8 type; + u8 sub_type; + u16 length; +} __attribute ((packed)); + +struct efi_dev_path { + u8 type; /* can be replaced with unnamed */ + u8 sub_type; /* struct efi_generic_dev_path; */ + u16 length; /* once we've moved to -std=c11 */ + union { + struct { + u32 hid; + u32 uid; + } acpi; + struct { + u8 fn; + u8 dev; + } pci; + }; +} __attribute ((packed)); + +#if IS_ENABLED(CONFIG_EFI_DEV_PATH_PARSER) +struct device *efi_get_device_by_path(struct efi_dev_path **node, size_t *len); +#endif + +static inline void memrange_efi_to_native(u64 *addr, u64 *npages) +{ + *npages = PFN_UP(*addr + (*npages<kobj); +} + +int efivars_register(struct efivars *efivars, + const struct efivar_operations *ops, + struct kobject *kobject); +int efivars_unregister(struct efivars *efivars); +struct kobject *efivars_kobject(void); + +int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *), + void *data, bool duplicates, struct list_head *head); + +int efivar_entry_add(struct efivar_entry *entry, struct list_head *head); +int efivar_entry_remove(struct efivar_entry *entry); + +int __efivar_entry_delete(struct efivar_entry *entry); +int efivar_entry_delete(struct efivar_entry *entry); + +int efivar_entry_size(struct efivar_entry *entry, unsigned long *size); +int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes, + unsigned long *size, void *data); +int efivar_entry_get(struct efivar_entry *entry, u32 *attributes, + unsigned long *size, void *data); +int efivar_entry_set(struct efivar_entry *entry, u32 attributes, + unsigned long size, void *data, struct list_head *head); +int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes, + unsigned long *size, void *data, bool *set); +int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes, + bool block, unsigned long size, void *data); + +int efivar_entry_iter_begin(void); +void efivar_entry_iter_end(void); + +int __efivar_entry_iter(int (*func)(struct efivar_entry *, void *), + struct list_head *head, void *data, + struct efivar_entry **prev); +int efivar_entry_iter(int (*func)(struct efivar_entry *, void *), + struct list_head *head, void *data); + +struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid, + struct list_head *head, bool remove); + +bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data, + unsigned long data_size); +bool efivar_variable_is_removable(efi_guid_t vendor, const char *name, + size_t len); + +extern struct work_struct efivar_work; +void efivar_run_worker(void); + +#if defined(CONFIG_EFI_VARS) || defined(CONFIG_EFI_VARS_MODULE) +int efivars_sysfs_init(void); + +#define EFIVARS_DATA_SIZE_MAX 1024 + +#endif /* CONFIG_EFI_VARS */ +extern bool efi_capsule_pending(int *reset_type); + +extern int efi_capsule_supported(efi_guid_t guid, u32 flags, + size_t size, int *reset); + +extern int efi_capsule_update(efi_capsule_header_t *capsule, + phys_addr_t *pages); + +#ifdef CONFIG_EFI_RUNTIME_MAP +int efi_runtime_map_init(struct kobject *); +int efi_get_runtime_map_size(void); +int efi_get_runtime_map_desc_size(void); +int efi_runtime_map_copy(void *buf, size_t bufsz); +#else +static inline int efi_runtime_map_init(struct kobject *kobj) +{ + return 0; +} + +static inline int efi_get_runtime_map_size(void) +{ + return 0; +} + +static inline int efi_get_runtime_map_desc_size(void) +{ + return 0; +} + +static inline int efi_runtime_map_copy(void *buf, size_t bufsz) +{ + return 0; +} + +#endif + +/* prototypes shared between arch specific and generic stub code */ + +void efi_printk(efi_system_table_t *sys_table_arg, char *str); + +void efi_free(efi_system_table_t *sys_table_arg, unsigned long size, + unsigned long addr); + +char *efi_convert_cmdline(efi_system_table_t *sys_table_arg, + efi_loaded_image_t *image, int *cmd_line_len); + +efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg, + struct efi_boot_memmap *map); + +efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg, + unsigned long size, unsigned long align, + unsigned long *addr); + +efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg, + unsigned long size, unsigned long align, + unsigned long *addr, unsigned long max); + +efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg, + unsigned long *image_addr, + unsigned long image_size, + unsigned long alloc_size, + unsigned long preferred_addr, + unsigned long alignment); + +efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, + efi_loaded_image_t *image, + char *cmd_line, char *option_string, + unsigned long max_addr, + unsigned long *load_addr, + unsigned long *load_size); + +efi_status_t efi_parse_options(char const *cmdline); + +efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg, + struct screen_info *si, efi_guid_t *proto, + unsigned long size); + +#ifdef CONFIG_EFI +extern bool efi_runtime_disabled(void); +#else +static inline bool efi_runtime_disabled(void) { return true; } +#endif + +extern void efi_call_virt_check_flags(unsigned long flags, const char *call); + +enum efi_secureboot_mode { + efi_secureboot_mode_unset, + efi_secureboot_mode_unknown, + efi_secureboot_mode_disabled, + efi_secureboot_mode_enabled, +}; +enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table); + +#ifdef CONFIG_RESET_ATTACK_MITIGATION +void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg); +#else +static inline void +efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg) { } +#endif + +void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table); + +/* + * Arch code can implement the following three template macros, avoiding + * reptition for the void/non-void return cases of {__,}efi_call_virt(): + * + * * arch_efi_call_virt_setup() + * + * Sets up the environment for the call (e.g. switching page tables, + * allowing kernel-mode use of floating point, if required). + * + * * arch_efi_call_virt() + * + * Performs the call. The last expression in the macro must be the call + * itself, allowing the logic to be shared by the void and non-void + * cases. + * + * * arch_efi_call_virt_teardown() + * + * Restores the usual kernel environment once the call has returned. + */ + +#define efi_call_virt_pointer(p, f, args...) \ +({ \ + efi_status_t __s; \ + unsigned long __flags; \ + \ + arch_efi_call_virt_setup(); \ + \ + local_save_flags(__flags); \ + __s = arch_efi_call_virt(p, f, args); \ + efi_call_virt_check_flags(__flags, __stringify(f)); \ + \ + arch_efi_call_virt_teardown(); \ + \ + __s; \ +}) + +#define __efi_call_virt_pointer(p, f, args...) \ +({ \ + unsigned long __flags; \ + \ + arch_efi_call_virt_setup(); \ + \ + local_save_flags(__flags); \ + arch_efi_call_virt(p, f, args); \ + efi_call_virt_check_flags(__flags, __stringify(f)); \ + \ + arch_efi_call_virt_teardown(); \ +}) + +typedef efi_status_t (*efi_exit_boot_map_processing)( + efi_system_table_t *sys_table_arg, + struct efi_boot_memmap *map, + void *priv); + +efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table, + void *handle, + struct efi_boot_memmap *map, + void *priv, + efi_exit_boot_map_processing priv_func); + +#define EFI_RANDOM_SEED_SIZE 64U + +struct linux_efi_random_seed { + u32 size; + u8 bits[]; +}; + +struct linux_efi_tpm_eventlog { + u32 size; + u8 version; + u8 log[]; +}; + +extern int efi_tpm_eventlog_init(void); + +/* efi_runtime_service() function identifiers */ +enum efi_rts_ids { + GET_TIME, + SET_TIME, + GET_WAKEUP_TIME, + SET_WAKEUP_TIME, + GET_VARIABLE, + GET_NEXT_VARIABLE, + SET_VARIABLE, + QUERY_VARIABLE_INFO, + GET_NEXT_HIGH_MONO_COUNT, + UPDATE_CAPSULE, + QUERY_CAPSULE_CAPS, +}; + +/* + * efi_runtime_work: Details of EFI Runtime Service work + * @arg<1-5>: EFI Runtime Service function arguments + * @status: Status of executing EFI Runtime Service + * @efi_rts_id: EFI Runtime Service function identifier + * @efi_rts_comp: Struct used for handling completions + */ +struct efi_runtime_work { + void *arg1; + void *arg2; + void *arg3; + void *arg4; + void *arg5; + efi_status_t status; + struct work_struct work; + enum efi_rts_ids efi_rts_id; + struct completion efi_rts_comp; +}; + +extern struct efi_runtime_work efi_rts_work; + +/* Workqueue to queue EFI Runtime Services */ +extern struct workqueue_struct *efi_rts_wq; + +#endif /* _LINUX_EFI_H */ diff --git a/include/linux/efs_vh.h b/include/linux/efs_vh.h new file mode 100644 index 000000000..206c5270f --- /dev/null +++ b/include/linux/efs_vh.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * efs_vh.h + * + * Copyright (c) 1999 Al Smith + * + * Portions derived from IRIX header files (c) 1985 MIPS Computer Systems, Inc. + */ + +#ifndef __EFS_VH_H__ +#define __EFS_VH_H__ + +#define VHMAGIC 0xbe5a941 /* volume header magic number */ +#define NPARTAB 16 /* 16 unix partitions */ +#define NVDIR 15 /* max of 15 directory entries */ +#define BFNAMESIZE 16 /* max 16 chars in boot file name */ +#define VDNAMESIZE 8 + +struct volume_directory { + char vd_name[VDNAMESIZE]; /* name */ + __be32 vd_lbn; /* logical block number */ + __be32 vd_nbytes; /* file length in bytes */ +}; + +struct partition_table { /* one per logical partition */ + __be32 pt_nblks; /* # of logical blks in partition */ + __be32 pt_firstlbn; /* first lbn of partition */ + __be32 pt_type; /* use of partition */ +}; + +struct volume_header { + __be32 vh_magic; /* identifies volume header */ + __be16 vh_rootpt; /* root partition number */ + __be16 vh_swappt; /* swap partition number */ + char vh_bootfile[BFNAMESIZE]; /* name of file to boot */ + char pad[48]; /* device param space */ + struct volume_directory vh_vd[NVDIR]; /* other vol hdr contents */ + struct partition_table vh_pt[NPARTAB]; /* device partition layout */ + __be32 vh_csum; /* volume header checksum */ + __be32 vh_fill; /* fill out to 512 bytes */ +}; + +/* partition type sysv is used for EFS format CD-ROM partitions */ +#define SGI_SYSV 0x05 +#define SGI_EFS 0x07 +#define IS_EFS(x) (((x) == SGI_EFS) || ((x) == SGI_SYSV)) + +struct pt_types { + int pt_type; + char *pt_name; +}; + +#endif /* __EFS_VH_H__ */ + diff --git a/include/linux/eisa.h b/include/linux/eisa.h new file mode 100644 index 000000000..b012e30af --- /dev/null +++ b/include/linux/eisa.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_EISA_H +#define _LINUX_EISA_H + +#include +#include +#include + +#define EISA_MAX_SLOTS 8 + +#define EISA_MAX_RESOURCES 4 + +/* A few EISA constants/offsets... */ + +#define EISA_DMA1_STATUS 8 +#define EISA_INT1_CTRL 0x20 +#define EISA_INT1_MASK 0x21 +#define EISA_INT2_CTRL 0xA0 +#define EISA_INT2_MASK 0xA1 +#define EISA_DMA2_STATUS 0xD0 +#define EISA_DMA2_WRITE_SINGLE 0xD4 +#define EISA_EXT_NMI_RESET_CTRL 0x461 +#define EISA_INT1_EDGE_LEVEL 0x4D0 +#define EISA_INT2_EDGE_LEVEL 0x4D1 +#define EISA_VENDOR_ID_OFFSET 0xC80 +#define EISA_CONFIG_OFFSET 0xC84 + +#define EISA_CONFIG_ENABLED 1 +#define EISA_CONFIG_FORCED 2 + +/* There is not much we can say about an EISA device, apart from + * signature, slot number, and base address. dma_mask is set by + * default to parent device mask..*/ + +struct eisa_device { + struct eisa_device_id id; + int slot; + int state; + unsigned long base_addr; + struct resource res[EISA_MAX_RESOURCES]; + u64 dma_mask; + struct device dev; /* generic device */ +#ifdef CONFIG_EISA_NAMES + char pretty_name[50]; +#endif +}; + +#define to_eisa_device(n) container_of(n, struct eisa_device, dev) + +static inline int eisa_get_region_index (void *addr) +{ + unsigned long x = (unsigned long) addr; + + x &= 0xc00; + return (x >> 12); +} + +struct eisa_driver { + const struct eisa_device_id *id_table; + struct device_driver driver; +}; + +#define to_eisa_driver(drv) container_of(drv,struct eisa_driver, driver) + +/* These external functions are only available when EISA support is enabled. */ +#ifdef CONFIG_EISA + +extern struct bus_type eisa_bus_type; +int eisa_driver_register (struct eisa_driver *edrv); +void eisa_driver_unregister (struct eisa_driver *edrv); + +#else /* !CONFIG_EISA */ + +static inline int eisa_driver_register (struct eisa_driver *edrv) { return 0; } +static inline void eisa_driver_unregister (struct eisa_driver *edrv) { } + +#endif /* !CONFIG_EISA */ + +/* Mimics pci.h... */ +static inline void *eisa_get_drvdata (struct eisa_device *edev) +{ + return dev_get_drvdata(&edev->dev); +} + +static inline void eisa_set_drvdata (struct eisa_device *edev, void *data) +{ + dev_set_drvdata(&edev->dev, data); +} + +/* The EISA root device. There's rumours about machines with multiple + * busses (PA-RISC ?), so we try to handle that. */ + +struct eisa_root_device { + struct device *dev; /* Pointer to bridge device */ + struct resource *res; + unsigned long bus_base_addr; + int slots; /* Max slot number */ + int force_probe; /* Probe even when no slot 0 */ + u64 dma_mask; /* from bridge device */ + int bus_nr; /* Set by eisa_root_register */ + struct resource eisa_root_res; /* ditto */ +}; + +int eisa_root_register (struct eisa_root_device *root); + +#ifdef CONFIG_EISA +extern int EISA_bus; +#else +# define EISA_bus 0 +#endif + +#endif diff --git a/include/linux/elevator.h b/include/linux/elevator.h new file mode 100644 index 000000000..a2bf4a6b9 --- /dev/null +++ b/include/linux/elevator.h @@ -0,0 +1,269 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_ELEVATOR_H +#define _LINUX_ELEVATOR_H + +#include +#include + +#ifdef CONFIG_BLOCK + +struct io_cq; +struct elevator_type; +#ifdef CONFIG_BLK_DEBUG_FS +struct blk_mq_debugfs_attr; +#endif + +/* + * Return values from elevator merger + */ +enum elv_merge { + ELEVATOR_NO_MERGE = 0, + ELEVATOR_FRONT_MERGE = 1, + ELEVATOR_BACK_MERGE = 2, + ELEVATOR_DISCARD_MERGE = 3, +}; + +typedef enum elv_merge (elevator_merge_fn) (struct request_queue *, struct request **, + struct bio *); + +typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *, struct request *); + +typedef void (elevator_merged_fn) (struct request_queue *, struct request *, enum elv_merge); + +typedef int (elevator_allow_bio_merge_fn) (struct request_queue *, + struct request *, struct bio *); + +typedef int (elevator_allow_rq_merge_fn) (struct request_queue *, + struct request *, struct request *); + +typedef void (elevator_bio_merged_fn) (struct request_queue *, + struct request *, struct bio *); + +typedef int (elevator_dispatch_fn) (struct request_queue *, int); + +typedef void (elevator_add_req_fn) (struct request_queue *, struct request *); +typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *); +typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *); +typedef int (elevator_may_queue_fn) (struct request_queue *, unsigned int); + +typedef void (elevator_init_icq_fn) (struct io_cq *); +typedef void (elevator_exit_icq_fn) (struct io_cq *); +typedef int (elevator_set_req_fn) (struct request_queue *, struct request *, + struct bio *, gfp_t); +typedef void (elevator_put_req_fn) (struct request *); +typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *); +typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *); + +typedef int (elevator_init_fn) (struct request_queue *, + struct elevator_type *e); +typedef void (elevator_exit_fn) (struct elevator_queue *); +typedef void (elevator_registered_fn) (struct request_queue *); + +struct elevator_ops +{ + elevator_merge_fn *elevator_merge_fn; + elevator_merged_fn *elevator_merged_fn; + elevator_merge_req_fn *elevator_merge_req_fn; + elevator_allow_bio_merge_fn *elevator_allow_bio_merge_fn; + elevator_allow_rq_merge_fn *elevator_allow_rq_merge_fn; + elevator_bio_merged_fn *elevator_bio_merged_fn; + + elevator_dispatch_fn *elevator_dispatch_fn; + elevator_add_req_fn *elevator_add_req_fn; + elevator_activate_req_fn *elevator_activate_req_fn; + elevator_deactivate_req_fn *elevator_deactivate_req_fn; + + elevator_completed_req_fn *elevator_completed_req_fn; + + elevator_request_list_fn *elevator_former_req_fn; + elevator_request_list_fn *elevator_latter_req_fn; + + elevator_init_icq_fn *elevator_init_icq_fn; /* see iocontext.h */ + elevator_exit_icq_fn *elevator_exit_icq_fn; /* ditto */ + + elevator_set_req_fn *elevator_set_req_fn; + elevator_put_req_fn *elevator_put_req_fn; + + elevator_may_queue_fn *elevator_may_queue_fn; + + elevator_init_fn *elevator_init_fn; + elevator_exit_fn *elevator_exit_fn; + elevator_registered_fn *elevator_registered_fn; +}; + +struct blk_mq_alloc_data; +struct blk_mq_hw_ctx; + +struct elevator_mq_ops { + int (*init_sched)(struct request_queue *, struct elevator_type *); + void (*exit_sched)(struct elevator_queue *); + int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int); + void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); + void (*depth_updated)(struct blk_mq_hw_ctx *); + + bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); + bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *); + int (*request_merge)(struct request_queue *q, struct request **, struct bio *); + void (*request_merged)(struct request_queue *, struct request *, enum elv_merge); + void (*requests_merged)(struct request_queue *, struct request *, struct request *); + void (*limit_depth)(unsigned int, struct blk_mq_alloc_data *); + void (*prepare_request)(struct request *, struct bio *bio); + void (*finish_request)(struct request *); + void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool); + struct request *(*dispatch_request)(struct blk_mq_hw_ctx *); + bool (*has_work)(struct blk_mq_hw_ctx *); + void (*completed_request)(struct request *); + void (*started_request)(struct request *); + void (*requeue_request)(struct request *); + struct request *(*former_request)(struct request_queue *, struct request *); + struct request *(*next_request)(struct request_queue *, struct request *); + void (*init_icq)(struct io_cq *); + void (*exit_icq)(struct io_cq *); +}; + +#define ELV_NAME_MAX (16) + +struct elv_fs_entry { + struct attribute attr; + ssize_t (*show)(struct elevator_queue *, char *); + ssize_t (*store)(struct elevator_queue *, const char *, size_t); +}; + +/* + * identifies an elevator type, such as AS or deadline + */ +struct elevator_type +{ + /* managed by elevator core */ + struct kmem_cache *icq_cache; + + /* fields provided by elevator implementation */ + union { + struct elevator_ops sq; + struct elevator_mq_ops mq; + } ops; + size_t icq_size; /* see iocontext.h */ + size_t icq_align; /* ditto */ + struct elv_fs_entry *elevator_attrs; + char elevator_name[ELV_NAME_MAX]; + const char *elevator_alias; + struct module *elevator_owner; + bool uses_mq; +#ifdef CONFIG_BLK_DEBUG_FS + const struct blk_mq_debugfs_attr *queue_debugfs_attrs; + const struct blk_mq_debugfs_attr *hctx_debugfs_attrs; +#endif + + /* managed by elevator core */ + char icq_cache_name[ELV_NAME_MAX + 6]; /* elvname + "_io_cq" */ + struct list_head list; +}; + +#define ELV_HASH_BITS 6 + +void elv_rqhash_del(struct request_queue *q, struct request *rq); +void elv_rqhash_add(struct request_queue *q, struct request *rq); +void elv_rqhash_reposition(struct request_queue *q, struct request *rq); +struct request *elv_rqhash_find(struct request_queue *q, sector_t offset); + +/* + * each queue has an elevator_queue associated with it + */ +struct elevator_queue +{ + struct elevator_type *type; + void *elevator_data; + struct kobject kobj; + struct mutex sysfs_lock; + unsigned int registered:1; + unsigned int uses_mq:1; + DECLARE_HASHTABLE(hash, ELV_HASH_BITS); +}; + +/* + * block elevator interface + */ +extern void elv_dispatch_sort(struct request_queue *, struct request *); +extern void elv_dispatch_add_tail(struct request_queue *, struct request *); +extern void elv_add_request(struct request_queue *, struct request *, int); +extern void __elv_add_request(struct request_queue *, struct request *, int); +extern enum elv_merge elv_merge(struct request_queue *, struct request **, + struct bio *); +extern void elv_merge_requests(struct request_queue *, struct request *, + struct request *); +extern void elv_merged_request(struct request_queue *, struct request *, + enum elv_merge); +extern void elv_bio_merged(struct request_queue *q, struct request *, + struct bio *); +extern bool elv_attempt_insert_merge(struct request_queue *, struct request *); +extern void elv_requeue_request(struct request_queue *, struct request *); +extern struct request *elv_former_request(struct request_queue *, struct request *); +extern struct request *elv_latter_request(struct request_queue *, struct request *); +extern int elv_may_queue(struct request_queue *, unsigned int); +extern void elv_completed_request(struct request_queue *, struct request *); +extern int elv_set_request(struct request_queue *q, struct request *rq, + struct bio *bio, gfp_t gfp_mask); +extern void elv_put_request(struct request_queue *, struct request *); +extern void elv_drain_elevator(struct request_queue *); + +/* + * io scheduler registration + */ +extern void __init load_default_elevator_module(void); +extern int elv_register(struct elevator_type *); +extern void elv_unregister(struct elevator_type *); + +/* + * io scheduler sysfs switching + */ +extern ssize_t elv_iosched_show(struct request_queue *, char *); +extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t); + +extern bool elv_bio_merge_ok(struct request *, struct bio *); +extern struct elevator_queue *elevator_alloc(struct request_queue *, + struct elevator_type *); + +/* + * Helper functions. + */ +extern struct request *elv_rb_former_request(struct request_queue *, struct request *); +extern struct request *elv_rb_latter_request(struct request_queue *, struct request *); + +/* + * rb support functions. + */ +extern void elv_rb_add(struct rb_root *, struct request *); +extern void elv_rb_del(struct rb_root *, struct request *); +extern struct request *elv_rb_find(struct rb_root *, sector_t); + +/* + * Insertion selection + */ +#define ELEVATOR_INSERT_FRONT 1 +#define ELEVATOR_INSERT_BACK 2 +#define ELEVATOR_INSERT_SORT 3 +#define ELEVATOR_INSERT_REQUEUE 4 +#define ELEVATOR_INSERT_FLUSH 5 +#define ELEVATOR_INSERT_SORT_MERGE 6 + +/* + * return values from elevator_may_queue_fn + */ +enum { + ELV_MQUEUE_MAY, + ELV_MQUEUE_NO, + ELV_MQUEUE_MUST, +}; + +#define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) +#define rb_entry_rq(node) rb_entry((node), struct request, rb_node) + +#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist) +#define rq_fifo_clear(rq) list_del_init(&(rq)->queuelist) + +#else /* CONFIG_BLOCK */ + +static inline void load_default_elevator_module(void) { } + +#endif /* CONFIG_BLOCK */ +#endif diff --git a/include/linux/elf-fdpic.h b/include/linux/elf-fdpic.h new file mode 100644 index 000000000..386440317 --- /dev/null +++ b/include/linux/elf-fdpic.h @@ -0,0 +1,51 @@ +/* FDPIC ELF load map + * + * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _LINUX_ELF_FDPIC_H +#define _LINUX_ELF_FDPIC_H + +#include + +/* + * binfmt binary parameters structure + */ +struct elf_fdpic_params { + struct elfhdr hdr; /* ref copy of ELF header */ + struct elf_phdr *phdrs; /* ref copy of PT_PHDR table */ + struct elf32_fdpic_loadmap *loadmap; /* loadmap to be passed to userspace */ + unsigned long elfhdr_addr; /* mapped ELF header user address */ + unsigned long ph_addr; /* mapped PT_PHDR user address */ + unsigned long map_addr; /* mapped loadmap user address */ + unsigned long entry_addr; /* mapped entry user address */ + unsigned long stack_size; /* stack size requested (PT_GNU_STACK) */ + unsigned long dynamic_addr; /* mapped PT_DYNAMIC user address */ + unsigned long load_addr; /* user address at which to map binary */ + unsigned long flags; +#define ELF_FDPIC_FLAG_ARRANGEMENT 0x0000000f /* PT_LOAD arrangement flags */ +#define ELF_FDPIC_FLAG_INDEPENDENT 0x00000000 /* PT_LOADs can be put anywhere */ +#define ELF_FDPIC_FLAG_HONOURVADDR 0x00000001 /* PT_LOAD.vaddr must be honoured */ +#define ELF_FDPIC_FLAG_CONSTDISP 0x00000002 /* PT_LOADs require constant + * displacement */ +#define ELF_FDPIC_FLAG_CONTIGUOUS 0x00000003 /* PT_LOADs should be contiguous */ +#define ELF_FDPIC_FLAG_EXEC_STACK 0x00000010 /* T if stack to be executable */ +#define ELF_FDPIC_FLAG_NOEXEC_STACK 0x00000020 /* T if stack not to be executable */ +#define ELF_FDPIC_FLAG_EXECUTABLE 0x00000040 /* T if this object is the executable */ +#define ELF_FDPIC_FLAG_PRESENT 0x80000000 /* T if this object is present */ +}; + +#ifdef CONFIG_MMU +extern void elf_fdpic_arch_lay_out_mm(struct elf_fdpic_params *exec_params, + struct elf_fdpic_params *interp_params, + unsigned long *start_stack, + unsigned long *start_brk); +#endif + +#endif /* _LINUX_ELF_FDPIC_H */ diff --git a/include/linux/elf-randomize.h b/include/linux/elf-randomize.h new file mode 100644 index 000000000..da0dbb7b6 --- /dev/null +++ b/include/linux/elf-randomize.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ELF_RANDOMIZE_H +#define _ELF_RANDOMIZE_H + +struct mm_struct; + +#ifndef CONFIG_ARCH_HAS_ELF_RANDOMIZE +static inline unsigned long arch_mmap_rnd(void) { return 0; } +# if defined(arch_randomize_brk) && defined(CONFIG_COMPAT_BRK) +# define compat_brk_randomized +# endif +# ifndef arch_randomize_brk +# define arch_randomize_brk(mm) (mm->brk) +# endif +#else +extern unsigned long arch_mmap_rnd(void); +extern unsigned long arch_randomize_brk(struct mm_struct *mm); +# ifdef CONFIG_COMPAT_BRK +# define compat_brk_randomized +# endif +#endif + +#endif diff --git a/include/linux/elf.h b/include/linux/elf.h new file mode 100644 index 000000000..e3649b3e9 --- /dev/null +++ b/include/linux/elf.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_ELF_H +#define _LINUX_ELF_H + +#include +#include + +#ifndef elf_read_implies_exec + /* Executables for which elf_read_implies_exec() returns TRUE will + have the READ_IMPLIES_EXEC personality flag set automatically. + Override in asm/elf.h as needed. */ +# define elf_read_implies_exec(ex, have_pt_gnu_stack) 0 +#endif +#ifndef SET_PERSONALITY +#define SET_PERSONALITY(ex) \ + set_personality(PER_LINUX | (current->personality & (~PER_MASK))) +#endif + +#ifndef SET_PERSONALITY2 +#define SET_PERSONALITY2(ex, state) \ + SET_PERSONALITY(ex) +#endif + +#if ELF_CLASS == ELFCLASS32 + +extern Elf32_Dyn _DYNAMIC []; +#define elfhdr elf32_hdr +#define elf_phdr elf32_phdr +#define elf_shdr elf32_shdr +#define elf_note elf32_note +#define elf_addr_t Elf32_Off +#define Elf_Half Elf32_Half +#define Elf_Word Elf32_Word + +#else + +extern Elf64_Dyn _DYNAMIC []; +#define elfhdr elf64_hdr +#define elf_phdr elf64_phdr +#define elf_shdr elf64_shdr +#define elf_note elf64_note +#define elf_addr_t Elf64_Off +#define Elf_Half Elf64_Half +#define Elf_Word Elf64_Word + +#endif + +/* Optional callbacks to write extra ELF notes. */ +struct file; +struct coredump_params; + +#ifndef ARCH_HAVE_EXTRA_ELF_NOTES +static inline int elf_coredump_extra_notes_size(void) { return 0; } +static inline int elf_coredump_extra_notes_write(struct coredump_params *cprm) { return 0; } +#else +extern int elf_coredump_extra_notes_size(void); +extern int elf_coredump_extra_notes_write(struct coredump_params *cprm); +#endif +#endif /* _LINUX_ELF_H */ diff --git a/include/linux/elfcore-compat.h b/include/linux/elfcore-compat.h new file mode 100644 index 000000000..b5f2efdd0 --- /dev/null +++ b/include/linux/elfcore-compat.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_ELFCORE_COMPAT_H +#define _LINUX_ELFCORE_COMPAT_H + +#include +#include +#include + +/* + * Make sure these layouts match the linux/elfcore.h native definitions. + */ + +struct compat_elf_siginfo +{ + compat_int_t si_signo; + compat_int_t si_code; + compat_int_t si_errno; +}; + +struct compat_elf_prstatus +{ + struct compat_elf_siginfo pr_info; + short pr_cursig; + compat_ulong_t pr_sigpend; + compat_ulong_t pr_sighold; + compat_pid_t pr_pid; + compat_pid_t pr_ppid; + compat_pid_t pr_pgrp; + compat_pid_t pr_sid; + struct compat_timeval pr_utime; + struct compat_timeval pr_stime; + struct compat_timeval pr_cutime; + struct compat_timeval pr_cstime; + compat_elf_gregset_t pr_reg; +#ifdef CONFIG_BINFMT_ELF_FDPIC + compat_ulong_t pr_exec_fdpic_loadmap; + compat_ulong_t pr_interp_fdpic_loadmap; +#endif + compat_int_t pr_fpvalid; +}; + +struct compat_elf_prpsinfo +{ + char pr_state; + char pr_sname; + char pr_zomb; + char pr_nice; + compat_ulong_t pr_flag; + __compat_uid_t pr_uid; + __compat_gid_t pr_gid; + compat_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; + char pr_fname[16]; + char pr_psargs[ELF_PRARGSZ]; +}; + +#endif /* _LINUX_ELFCORE_COMPAT_H */ diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h new file mode 100644 index 000000000..9d249dfba --- /dev/null +++ b/include/linux/elfcore.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_ELFCORE_H +#define _LINUX_ELFCORE_H + +#include +#include +#include + +#include +#include + +struct coredump_params; + +static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *regs) +{ +#ifdef ELF_CORE_COPY_REGS + ELF_CORE_COPY_REGS((*elfregs), regs) +#else + BUG_ON(sizeof(*elfregs) != sizeof(*regs)); + *(struct pt_regs *)elfregs = *regs; +#endif +} + +static inline void elf_core_copy_kernel_regs(elf_gregset_t *elfregs, struct pt_regs *regs) +{ +#ifdef ELF_CORE_COPY_KERNEL_REGS + ELF_CORE_COPY_KERNEL_REGS((*elfregs), regs); +#else + elf_core_copy_regs(elfregs, regs); +#endif +} + +static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs) +{ +#if defined (ELF_CORE_COPY_TASK_REGS) + return ELF_CORE_COPY_TASK_REGS(t, elfregs); +#elif defined (task_pt_regs) + elf_core_copy_regs(elfregs, task_pt_regs(t)); +#endif + return 0; +} + +extern int dump_fpu (struct pt_regs *, elf_fpregset_t *); + +static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_regs *regs, elf_fpregset_t *fpu) +{ +#ifdef ELF_CORE_COPY_FPREGS + return ELF_CORE_COPY_FPREGS(t, fpu); +#else + return dump_fpu(regs, fpu); +#endif +} + +#ifdef ELF_CORE_COPY_XFPREGS +static inline int elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu) +{ + return ELF_CORE_COPY_XFPREGS(t, xfpu); +} +#endif + +#if (defined(CONFIG_UML) && defined(CONFIG_X86_32)) || defined(CONFIG_IA64) +/* + * These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out + * extra segments containing the gate DSO contents. Dumping its + * contents makes post-mortem fully interpretable later without matching up + * the same kernel and hardware config to see what PC values meant. + * Dumping its extra ELF program headers includes all the other information + * a debugger needs to easily find how the gate DSO was being used. + */ +extern Elf_Half elf_core_extra_phdrs(void); +extern int +elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset); +extern int +elf_core_write_extra_data(struct coredump_params *cprm); +extern size_t elf_core_extra_data_size(void); +#else +static inline Elf_Half elf_core_extra_phdrs(void) +{ + return 0; +} + +static inline int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) +{ + return 1; +} + +static inline int elf_core_write_extra_data(struct coredump_params *cprm) +{ + return 1; +} + +static inline size_t elf_core_extra_data_size(void) +{ + return 0; +} +#endif + +#endif /* _LINUX_ELFCORE_H */ diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h new file mode 100644 index 000000000..7fdd7f355 --- /dev/null +++ b/include/linux/elfnote.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_ELFNOTE_H +#define _LINUX_ELFNOTE_H +/* + * Helper macros to generate ELF Note structures, which are put into a + * PT_NOTE segment of the final vmlinux image. These are useful for + * including name-value pairs of metadata into the kernel binary (or + * modules?) for use by external programs. + * + * Each note has three parts: a name, a type and a desc. The name is + * intended to distinguish the note's originator, so it would be a + * company, project, subsystem, etc; it must be in a suitable form for + * use in a section name. The type is an integer which is used to tag + * the data, and is considered to be within the "name" namespace (so + * "FooCo"'s type 42 is distinct from "BarProj"'s type 42). The + * "desc" field is the actual data. There are no constraints on the + * desc field's contents, though typically they're fairly small. + * + * All notes from a given NAME are put into a section named + * .note.NAME. When the kernel image is finally linked, all the notes + * are packed into a single .notes section, which is mapped into the + * PT_NOTE segment. Because notes for a given name are grouped into + * the same section, they'll all be adjacent the output file. + * + * This file defines macros for both C and assembler use. Their + * syntax is slightly different, but they're semantically similar. + * + * See the ELF specification for more detail about ELF notes. + */ + +#ifdef __ASSEMBLER__ +/* + * Generate a structure with the same shape as Elf{32,64}_Nhdr (which + * turn out to be the same size and shape), followed by the name and + * desc data with appropriate padding. The 'desctype' argument is the + * assembler pseudo op defining the type of the data e.g. .asciz while + * 'descdata' is the data itself e.g. "hello, world". + * + * e.g. ELFNOTE(XYZCo, 42, .asciz, "forty-two") + * ELFNOTE(XYZCo, 12, .long, 0xdeadbeef) + */ +#define ELFNOTE_START(name, type, flags) \ +.pushsection .note.name, flags,@note ; \ + .balign 4 ; \ + .long 2f - 1f /* namesz */ ; \ + .long 4484f - 3f /* descsz */ ; \ + .long type ; \ +1:.asciz #name ; \ +2:.balign 4 ; \ +3: + +#define ELFNOTE_END \ +4484:.balign 4 ; \ +.popsection ; + +#define ELFNOTE(name, type, desc) \ + ELFNOTE_START(name, type, "a") \ + desc ; \ + ELFNOTE_END + +#else /* !__ASSEMBLER__ */ +#include +/* + * Use an anonymous structure which matches the shape of + * Elf{32,64}_Nhdr, but includes the name and desc data. The size and + * type of name and desc depend on the macro arguments. "name" must + * be a literal string, and "desc" must be passed by value. You may + * only define one note per line, since __LINE__ is used to generate + * unique symbols. + */ +#define _ELFNOTE_PASTE(a,b) a##b +#define _ELFNOTE(size, name, unique, type, desc) \ + static const struct { \ + struct elf##size##_note _nhdr; \ + unsigned char _name[sizeof(name)] \ + __attribute__((aligned(sizeof(Elf##size##_Word)))); \ + typeof(desc) _desc \ + __attribute__((aligned(sizeof(Elf##size##_Word)))); \ + } _ELFNOTE_PASTE(_note_, unique) \ + __used \ + __attribute__((section(".note." name), \ + aligned(sizeof(Elf##size##_Word)), \ + unused)) = { \ + { \ + sizeof(name), \ + sizeof(desc), \ + type, \ + }, \ + name, \ + desc \ + } +#define ELFNOTE(size, name, type, desc) \ + _ELFNOTE(size, name, __LINE__, type, desc) + +#define ELFNOTE32(name, type, desc) ELFNOTE(32, name, type, desc) +#define ELFNOTE64(name, type, desc) ELFNOTE(64, name, type, desc) +#endif /* __ASSEMBLER__ */ + +#endif /* _LINUX_ELFNOTE_H */ diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h new file mode 100644 index 000000000..a4cf57cd0 --- /dev/null +++ b/include/linux/enclosure.h @@ -0,0 +1,146 @@ +/* + * Enclosure Services + * + * Copyright (C) 2008 James Bottomley + * +**----------------------------------------------------------------------------- +** +** This program is free software; you can redistribute it and/or +** modify it under the terms of the GNU General Public License +** version 2 as published by the Free Software Foundation. +** +** This program is distributed in the hope that it will be useful, +** but WITHOUT ANY WARRANTY; without even the implied warranty of +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +** GNU General Public License for more details. +** +** You should have received a copy of the GNU General Public License +** along with this program; if not, write to the Free Software +** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +** +**----------------------------------------------------------------------------- +*/ +#ifndef _LINUX_ENCLOSURE_H_ +#define _LINUX_ENCLOSURE_H_ + +#include +#include + +/* A few generic types ... taken from ses-2 */ +enum enclosure_component_type { + ENCLOSURE_COMPONENT_DEVICE = 0x01, + ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS = 0x07, + ENCLOSURE_COMPONENT_SCSI_TARGET_PORT = 0x14, + ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT = 0x15, + ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17, + ENCLOSURE_COMPONENT_SAS_EXPANDER = 0x18, +}; + +/* ses-2 common element status */ +enum enclosure_status { + ENCLOSURE_STATUS_UNSUPPORTED = 0, + ENCLOSURE_STATUS_OK, + ENCLOSURE_STATUS_CRITICAL, + ENCLOSURE_STATUS_NON_CRITICAL, + ENCLOSURE_STATUS_UNRECOVERABLE, + ENCLOSURE_STATUS_NOT_INSTALLED, + ENCLOSURE_STATUS_UNKNOWN, + ENCLOSURE_STATUS_UNAVAILABLE, + /* last element for counting purposes */ + ENCLOSURE_STATUS_MAX +}; + +/* SFF-8485 activity light settings */ +enum enclosure_component_setting { + ENCLOSURE_SETTING_DISABLED = 0, + ENCLOSURE_SETTING_ENABLED = 1, + ENCLOSURE_SETTING_BLINK_A_ON_OFF = 2, + ENCLOSURE_SETTING_BLINK_A_OFF_ON = 3, + ENCLOSURE_SETTING_BLINK_B_ON_OFF = 6, + ENCLOSURE_SETTING_BLINK_B_OFF_ON = 7, +}; + +struct enclosure_device; +struct enclosure_component; +struct enclosure_component_callbacks { + void (*get_status)(struct enclosure_device *, + struct enclosure_component *); + int (*set_status)(struct enclosure_device *, + struct enclosure_component *, + enum enclosure_status); + void (*get_fault)(struct enclosure_device *, + struct enclosure_component *); + int (*set_fault)(struct enclosure_device *, + struct enclosure_component *, + enum enclosure_component_setting); + void (*get_active)(struct enclosure_device *, + struct enclosure_component *); + int (*set_active)(struct enclosure_device *, + struct enclosure_component *, + enum enclosure_component_setting); + void (*get_locate)(struct enclosure_device *, + struct enclosure_component *); + int (*set_locate)(struct enclosure_device *, + struct enclosure_component *, + enum enclosure_component_setting); + void (*get_power_status)(struct enclosure_device *, + struct enclosure_component *); + int (*set_power_status)(struct enclosure_device *, + struct enclosure_component *, + int); + int (*show_id)(struct enclosure_device *, char *buf); +}; + + +struct enclosure_component { + void *scratch; + struct device cdev; + struct device *dev; + enum enclosure_component_type type; + int number; + int fault; + int active; + int locate; + int slot; + enum enclosure_status status; + int power_status; +}; + +struct enclosure_device { + void *scratch; + struct list_head node; + struct device edev; + struct enclosure_component_callbacks *cb; + int components; + struct enclosure_component component[0]; +}; + +static inline struct enclosure_device * +to_enclosure_device(struct device *dev) +{ + return container_of(dev, struct enclosure_device, edev); +} + +static inline struct enclosure_component * +to_enclosure_component(struct device *dev) +{ + return container_of(dev, struct enclosure_component, cdev); +} + +struct enclosure_device * +enclosure_register(struct device *, const char *, int, + struct enclosure_component_callbacks *); +void enclosure_unregister(struct enclosure_device *); +struct enclosure_component * +enclosure_component_alloc(struct enclosure_device *, unsigned int, + enum enclosure_component_type, const char *); +int enclosure_component_register(struct enclosure_component *); +int enclosure_add_device(struct enclosure_device *enclosure, int component, + struct device *dev); +int enclosure_remove_device(struct enclosure_device *, struct device *); +struct enclosure_device *enclosure_find(struct device *dev, + struct enclosure_device *start); +int enclosure_for_each_device(int (*fn)(struct enclosure_device *, void *), + void *data); + +#endif /* _LINUX_ENCLOSURE_H_ */ diff --git a/include/linux/err.h b/include/linux/err.h new file mode 100644 index 000000000..87be24350 --- /dev/null +++ b/include/linux/err.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_ERR_H +#define _LINUX_ERR_H + +#include +#include + +#include + +/* + * Kernel pointers have redundant information, so we can use a + * scheme where we can return either an error code or a normal + * pointer with the same return value. + * + * This should be a per-architecture thing, to allow different + * error and pointer decisions. + */ +#define MAX_ERRNO 4095 + +#ifndef __ASSEMBLY__ + +#define IS_ERR_VALUE(x) unlikely((unsigned long)(void *)(x) >= (unsigned long)-MAX_ERRNO) + +static inline void * __must_check ERR_PTR(long error) +{ + return (void *) error; +} + +static inline long __must_check PTR_ERR(__force const void *ptr) +{ + return (long) ptr; +} + +static inline bool __must_check IS_ERR(__force const void *ptr) +{ + return IS_ERR_VALUE((unsigned long)ptr); +} + +static inline bool __must_check IS_ERR_OR_NULL(__force const void *ptr) +{ + return unlikely(!ptr) || IS_ERR_VALUE((unsigned long)ptr); +} + +/** + * ERR_CAST - Explicitly cast an error-valued pointer to another pointer type + * @ptr: The pointer to cast. + * + * Explicitly cast an error-valued pointer to another pointer type in such a + * way as to make it clear that's what's going on. + */ +static inline void * __must_check ERR_CAST(__force const void *ptr) +{ + /* cast away the const */ + return (void *) ptr; +} + +static inline int __must_check PTR_ERR_OR_ZERO(__force const void *ptr) +{ + if (IS_ERR(ptr)) + return PTR_ERR(ptr); + else + return 0; +} + +/* Deprecated */ +#define PTR_RET(p) PTR_ERR_OR_ZERO(p) + +#endif + +#endif /* _LINUX_ERR_H */ diff --git a/include/linux/errno.h b/include/linux/errno.h new file mode 100644 index 000000000..3cba62757 --- /dev/null +++ b/include/linux/errno.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_ERRNO_H +#define _LINUX_ERRNO_H + +#include + + +/* + * These should never be seen by user programs. To return one of ERESTART* + * codes, signal_pending() MUST be set. Note that ptrace can observe these + * at syscall exit tracing, but they will never be left for the debugged user + * process to see. + */ +#define ERESTARTSYS 512 +#define ERESTARTNOINTR 513 +#define ERESTARTNOHAND 514 /* restart if no handler.. */ +#define ENOIOCTLCMD 515 /* No ioctl command */ +#define ERESTART_RESTARTBLOCK 516 /* restart by calling sys_restart_syscall */ +#define EPROBE_DEFER 517 /* Driver requests probe retry */ +#define EOPENSTALE 518 /* open found a stale dentry */ + +/* Defined for the NFSv3 protocol */ +#define EBADHANDLE 521 /* Illegal NFS file handle */ +#define ENOTSYNC 522 /* Update synchronization mismatch */ +#define EBADCOOKIE 523 /* Cookie is stale */ +#define ENOTSUPP 524 /* Operation is not supported */ +#define ETOOSMALL 525 /* Buffer or request is too small */ +#define ESERVERFAULT 526 /* An untranslatable error occurred */ +#define EBADTYPE 527 /* Type not supported by server */ +#define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */ +#define EIOCBQUEUED 529 /* iocb queued, will get completion event */ +#define ERECALLCONFLICT 530 /* conflict with recalled state */ + +#endif diff --git a/include/linux/error-injection.h b/include/linux/error-injection.h new file mode 100644 index 000000000..280c61ecb --- /dev/null +++ b/include/linux/error-injection.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_ERROR_INJECTION_H +#define _LINUX_ERROR_INJECTION_H + +#ifdef CONFIG_FUNCTION_ERROR_INJECTION + +#include + +extern bool within_error_injection_list(unsigned long addr); +extern int get_injectable_error_type(unsigned long addr); + +#else /* !CONFIG_FUNCTION_ERROR_INJECTION */ + +#include +static inline bool within_error_injection_list(unsigned long addr) +{ + return false; +} + +static inline int get_injectable_error_type(unsigned long addr) +{ + return EI_ETYPE_NONE; +} + +#endif + +#endif /* _LINUX_ERROR_INJECTION_H */ diff --git a/include/linux/errqueue.h b/include/linux/errqueue.h new file mode 100644 index 000000000..be1cf7291 --- /dev/null +++ b/include/linux/errqueue.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_ERRQUEUE_H +#define _LINUX_ERRQUEUE_H 1 + + +#include +#if IS_ENABLED(CONFIG_IPV6) +#include +#endif +#include + +#define SKB_EXT_ERR(skb) ((struct sock_exterr_skb *) ((skb)->cb)) + +struct sock_exterr_skb { + union { + struct inet_skb_parm h4; +#if IS_ENABLED(CONFIG_IPV6) + struct inet6_skb_parm h6; +#endif + } header; + struct sock_extended_err ee; + u16 addr_offset; + __be16 port; + u8 opt_stats:1, + unused:7; +}; + +#endif diff --git a/include/linux/errseq.h b/include/linux/errseq.h new file mode 100644 index 000000000..fc2777770 --- /dev/null +++ b/include/linux/errseq.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * See Documentation/core-api/errseq.rst and lib/errseq.c + */ +#ifndef _LINUX_ERRSEQ_H +#define _LINUX_ERRSEQ_H + +typedef u32 errseq_t; + +errseq_t errseq_set(errseq_t *eseq, int err); +errseq_t errseq_sample(errseq_t *eseq); +int errseq_check(errseq_t *eseq, errseq_t since); +int errseq_check_and_advance(errseq_t *eseq, errseq_t *since); +#endif diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h new file mode 100644 index 000000000..e1e9eff09 --- /dev/null +++ b/include/linux/etherdevice.h @@ -0,0 +1,525 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. NET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Definitions for the Ethernet handlers. + * + * Version: @(#)eth.h 1.0.4 05/13/93 + * + * Authors: Ross Biro + * Fred N. van Kempen, + * + * Relocated to include/linux where it belongs by Alan Cox + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ +#ifndef _LINUX_ETHERDEVICE_H +#define _LINUX_ETHERDEVICE_H + +#include +#include +#include +#include +#include + +#ifdef __KERNEL__ +struct device; +int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr); +unsigned char *arch_get_platform_mac_address(void); +u32 eth_get_headlen(void *data, unsigned int max_len); +__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); +extern const struct header_ops eth_header_ops; + +int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, + const void *daddr, const void *saddr, unsigned len); +int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); +int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, + __be16 type); +void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev, + const unsigned char *haddr); +int eth_prepare_mac_addr_change(struct net_device *dev, void *p); +void eth_commit_mac_addr_change(struct net_device *dev, void *p); +int eth_mac_addr(struct net_device *dev, void *p); +int eth_change_mtu(struct net_device *dev, int new_mtu); +int eth_validate_addr(struct net_device *dev); + +struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs, + unsigned int rxqs); +#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1) +#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count) + +struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv, + unsigned int txqs, + unsigned int rxqs); +#define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1) + +struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb); +int eth_gro_complete(struct sk_buff *skb, int nhoff); + +/* Reserved Ethernet Addresses per IEEE 802.1Q */ +static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = +{ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; +#define eth_stp_addr eth_reserved_addr_base + +/** + * is_link_local_ether_addr - Determine if given Ethernet address is link-local + * @addr: Pointer to a six-byte array containing the Ethernet address + * + * Return true if address is link local reserved addr (01:80:c2:00:00:0X) per + * IEEE 802.1Q 8.6.3 Frame filtering. + * + * Please note: addr must be aligned to u16. + */ +static inline bool is_link_local_ether_addr(const u8 *addr) +{ + __be16 *a = (__be16 *)addr; + static const __be16 *b = (const __be16 *)eth_reserved_addr_base; + static const __be16 m = cpu_to_be16(0xfff0); + +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + return (((*(const u32 *)addr) ^ (*(const u32 *)b)) | + (__force int)((a[2] ^ b[2]) & m)) == 0; +#else + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0; +#endif +} + +/** + * is_zero_ether_addr - Determine if give Ethernet address is all zeros. + * @addr: Pointer to a six-byte array containing the Ethernet address + * + * Return true if the address is all zeroes. + * + * Please note: addr must be aligned to u16. + */ +static inline bool is_zero_ether_addr(const u8 *addr) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + return ((*(const u32 *)addr) | (*(const u16 *)(addr + 4))) == 0; +#else + return (*(const u16 *)(addr + 0) | + *(const u16 *)(addr + 2) | + *(const u16 *)(addr + 4)) == 0; +#endif +} + +/** + * is_multicast_ether_addr - Determine if the Ethernet address is a multicast. + * @addr: Pointer to a six-byte array containing the Ethernet address + * + * Return true if the address is a multicast address. + * By definition the broadcast address is also a multicast address. + */ +static inline bool is_multicast_ether_addr(const u8 *addr) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + u32 a = *(const u32 *)addr; +#else + u16 a = *(const u16 *)addr; +#endif +#ifdef __BIG_ENDIAN + return 0x01 & (a >> ((sizeof(a) * 8) - 8)); +#else + return 0x01 & a; +#endif +} + +static inline bool is_multicast_ether_addr_64bits(const u8 *addr) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 +#ifdef __BIG_ENDIAN + return 0x01 & ((*(const u64 *)addr) >> 56); +#else + return 0x01 & (*(const u64 *)addr); +#endif +#else + return is_multicast_ether_addr(addr); +#endif +} + +/** + * is_local_ether_addr - Determine if the Ethernet address is locally-assigned one (IEEE 802). + * @addr: Pointer to a six-byte array containing the Ethernet address + * + * Return true if the address is a local address. + */ +static inline bool is_local_ether_addr(const u8 *addr) +{ + return 0x02 & addr[0]; +} + +/** + * is_broadcast_ether_addr - Determine if the Ethernet address is broadcast + * @addr: Pointer to a six-byte array containing the Ethernet address + * + * Return true if the address is the broadcast address. + * + * Please note: addr must be aligned to u16. + */ +static inline bool is_broadcast_ether_addr(const u8 *addr) +{ + return (*(const u16 *)(addr + 0) & + *(const u16 *)(addr + 2) & + *(const u16 *)(addr + 4)) == 0xffff; +} + +/** + * is_unicast_ether_addr - Determine if the Ethernet address is unicast + * @addr: Pointer to a six-byte array containing the Ethernet address + * + * Return true if the address is a unicast address. + */ +static inline bool is_unicast_ether_addr(const u8 *addr) +{ + return !is_multicast_ether_addr(addr); +} + +/** + * is_valid_ether_addr - Determine if the given Ethernet address is valid + * @addr: Pointer to a six-byte array containing the Ethernet address + * + * Check that the Ethernet address (MAC) is not 00:00:00:00:00:00, is not + * a multicast address, and is not FF:FF:FF:FF:FF:FF. + * + * Return true if the address is valid. + * + * Please note: addr must be aligned to u16. + */ +static inline bool is_valid_ether_addr(const u8 *addr) +{ + /* FF:FF:FF:FF:FF:FF is a multicast address so we don't need to + * explicitly check for it here. */ + return !is_multicast_ether_addr(addr) && !is_zero_ether_addr(addr); +} + +/** + * eth_proto_is_802_3 - Determine if a given Ethertype/length is a protocol + * @proto: Ethertype/length value to be tested + * + * Check that the value from the Ethertype/length field is a valid Ethertype. + * + * Return true if the valid is an 802.3 supported Ethertype. + */ +static inline bool eth_proto_is_802_3(__be16 proto) +{ +#ifndef __BIG_ENDIAN + /* if CPU is little endian mask off bits representing LSB */ + proto &= htons(0xFF00); +#endif + /* cast both to u16 and compare since LSB can be ignored */ + return (__force u16)proto >= (__force u16)htons(ETH_P_802_3_MIN); +} + +/** + * eth_random_addr - Generate software assigned random Ethernet address + * @addr: Pointer to a six-byte array containing the Ethernet address + * + * Generate a random Ethernet address (MAC) that is not multicast + * and has the local assigned bit set. + */ +static inline void eth_random_addr(u8 *addr) +{ + get_random_bytes(addr, ETH_ALEN); + addr[0] &= 0xfe; /* clear multicast bit */ + addr[0] |= 0x02; /* set local assignment bit (IEEE802) */ +} + +#define random_ether_addr(addr) eth_random_addr(addr) + +/** + * eth_broadcast_addr - Assign broadcast address + * @addr: Pointer to a six-byte array containing the Ethernet address + * + * Assign the broadcast address to the given address array. + */ +static inline void eth_broadcast_addr(u8 *addr) +{ + memset(addr, 0xff, ETH_ALEN); +} + +/** + * eth_zero_addr - Assign zero address + * @addr: Pointer to a six-byte array containing the Ethernet address + * + * Assign the zero address to the given address array. + */ +static inline void eth_zero_addr(u8 *addr) +{ + memset(addr, 0x00, ETH_ALEN); +} + +/** + * eth_hw_addr_random - Generate software assigned random Ethernet and + * set device flag + * @dev: pointer to net_device structure + * + * Generate a random Ethernet address (MAC) to be used by a net device + * and set addr_assign_type so the state can be read by sysfs and be + * used by userspace. + */ +static inline void eth_hw_addr_random(struct net_device *dev) +{ + dev->addr_assign_type = NET_ADDR_RANDOM; + eth_random_addr(dev->dev_addr); +} + +/** + * ether_addr_copy - Copy an Ethernet address + * @dst: Pointer to a six-byte array Ethernet address destination + * @src: Pointer to a six-byte array Ethernet address source + * + * Please note: dst & src must both be aligned to u16. + */ +static inline void ether_addr_copy(u8 *dst, const u8 *src) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + *(u32 *)dst = *(const u32 *)src; + *(u16 *)(dst + 4) = *(const u16 *)(src + 4); +#else + u16 *a = (u16 *)dst; + const u16 *b = (const u16 *)src; + + a[0] = b[0]; + a[1] = b[1]; + a[2] = b[2]; +#endif +} + +/** + * eth_hw_addr_inherit - Copy dev_addr from another net_device + * @dst: pointer to net_device to copy dev_addr to + * @src: pointer to net_device to copy dev_addr from + * + * Copy the Ethernet address from one net_device to another along with + * the address attributes (addr_assign_type). + */ +static inline void eth_hw_addr_inherit(struct net_device *dst, + struct net_device *src) +{ + dst->addr_assign_type = src->addr_assign_type; + ether_addr_copy(dst->dev_addr, src->dev_addr); +} + +/** + * ether_addr_equal - Compare two Ethernet addresses + * @addr1: Pointer to a six-byte array containing the Ethernet address + * @addr2: Pointer other six-byte array containing the Ethernet address + * + * Compare two Ethernet addresses, returns true if equal + * + * Please note: addr1 & addr2 must both be aligned to u16. + */ +static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + u32 fold = ((*(const u32 *)addr1) ^ (*(const u32 *)addr2)) | + ((*(const u16 *)(addr1 + 4)) ^ (*(const u16 *)(addr2 + 4))); + + return fold == 0; +#else + const u16 *a = (const u16 *)addr1; + const u16 *b = (const u16 *)addr2; + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) == 0; +#endif +} + +/** + * ether_addr_equal_64bits - Compare two Ethernet addresses + * @addr1: Pointer to an array of 8 bytes + * @addr2: Pointer to an other array of 8 bytes + * + * Compare two Ethernet addresses, returns true if equal, false otherwise. + * + * The function doesn't need any conditional branches and possibly uses + * word memory accesses on CPU allowing cheap unaligned memory reads. + * arrays = { byte1, byte2, byte3, byte4, byte5, byte6, pad1, pad2 } + * + * Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits. + */ + +static inline bool ether_addr_equal_64bits(const u8 *addr1, const u8 *addr2) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 + u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2); + +#ifdef __BIG_ENDIAN + return (fold >> 16) == 0; +#else + return (fold << 16) == 0; +#endif +#else + return ether_addr_equal(addr1, addr2); +#endif +} + +/** + * ether_addr_equal_unaligned - Compare two not u16 aligned Ethernet addresses + * @addr1: Pointer to a six-byte array containing the Ethernet address + * @addr2: Pointer other six-byte array containing the Ethernet address + * + * Compare two Ethernet addresses, returns true if equal + * + * Please note: Use only when any Ethernet address may not be u16 aligned. + */ +static inline bool ether_addr_equal_unaligned(const u8 *addr1, const u8 *addr2) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + return ether_addr_equal(addr1, addr2); +#else + return memcmp(addr1, addr2, ETH_ALEN) == 0; +#endif +} + +/** + * ether_addr_equal_masked - Compare two Ethernet addresses with a mask + * @addr1: Pointer to a six-byte array containing the 1st Ethernet address + * @addr2: Pointer to a six-byte array containing the 2nd Ethernet address + * @mask: Pointer to a six-byte array containing the Ethernet address bitmask + * + * Compare two Ethernet addresses with a mask, returns true if for every bit + * set in the bitmask the equivalent bits in the ethernet addresses are equal. + * Using a mask with all bits set is a slower ether_addr_equal. + */ +static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2, + const u8 *mask) +{ + int i; + + for (i = 0; i < ETH_ALEN; i++) { + if ((addr1[i] ^ addr2[i]) & mask[i]) + return false; + } + + return true; +} + +/** + * ether_addr_to_u64 - Convert an Ethernet address into a u64 value. + * @addr: Pointer to a six-byte array containing the Ethernet address + * + * Return a u64 value of the address + */ +static inline u64 ether_addr_to_u64(const u8 *addr) +{ + u64 u = 0; + int i; + + for (i = 0; i < ETH_ALEN; i++) + u = u << 8 | addr[i]; + + return u; +} + +/** + * u64_to_ether_addr - Convert a u64 to an Ethernet address. + * @u: u64 to convert to an Ethernet MAC address + * @addr: Pointer to a six-byte array to contain the Ethernet address + */ +static inline void u64_to_ether_addr(u64 u, u8 *addr) +{ + int i; + + for (i = ETH_ALEN - 1; i >= 0; i--) { + addr[i] = u & 0xff; + u = u >> 8; + } +} + +/** + * eth_addr_dec - Decrement the given MAC address + * + * @addr: Pointer to a six-byte array containing Ethernet address to decrement + */ +static inline void eth_addr_dec(u8 *addr) +{ + u64 u = ether_addr_to_u64(addr); + + u--; + u64_to_ether_addr(u, addr); +} + +/** + * is_etherdev_addr - Tell if given Ethernet address belongs to the device. + * @dev: Pointer to a device structure + * @addr: Pointer to a six-byte array containing the Ethernet address + * + * Compare passed address with all addresses of the device. Return true if the + * address if one of the device addresses. + * + * Note that this function calls ether_addr_equal_64bits() so take care of + * the right padding. + */ +static inline bool is_etherdev_addr(const struct net_device *dev, + const u8 addr[6 + 2]) +{ + struct netdev_hw_addr *ha; + bool res = false; + + rcu_read_lock(); + for_each_dev_addr(dev, ha) { + res = ether_addr_equal_64bits(addr, ha->addr); + if (res) + break; + } + rcu_read_unlock(); + return res; +} +#endif /* __KERNEL__ */ + +/** + * compare_ether_header - Compare two Ethernet headers + * @a: Pointer to Ethernet header + * @b: Pointer to Ethernet header + * + * Compare two Ethernet headers, returns 0 if equal. + * This assumes that the network header (i.e., IP header) is 4-byte + * aligned OR the platform can handle unaligned access. This is the + * case for all packets coming into netif_receive_skb or similar + * entry points. + */ + +static inline unsigned long compare_ether_header(const void *a, const void *b) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 + unsigned long fold; + + /* + * We want to compare 14 bytes: + * [a0 ... a13] ^ [b0 ... b13] + * Use two long XOR, ORed together, with an overlap of two bytes. + * [a0 a1 a2 a3 a4 a5 a6 a7 ] ^ [b0 b1 b2 b3 b4 b5 b6 b7 ] | + * [a6 a7 a8 a9 a10 a11 a12 a13] ^ [b6 b7 b8 b9 b10 b11 b12 b13] + * This means the [a6 a7] ^ [b6 b7] part is done two times. + */ + fold = *(unsigned long *)a ^ *(unsigned long *)b; + fold |= *(unsigned long *)(a + 6) ^ *(unsigned long *)(b + 6); + return fold; +#else + u32 *a32 = (u32 *)((u8 *)a + 2); + u32 *b32 = (u32 *)((u8 *)b + 2); + + return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) | + (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]); +#endif +} + +/** + * eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame + * @skb: Buffer to pad + * + * An Ethernet frame should have a minimum size of 60 bytes. This function + * takes short frames and pads them with zeros up to the 60 byte limit. + */ +static inline int eth_skb_pad(struct sk_buff *skb) +{ + return skb_put_padto(skb, ETH_ZLEN); +} + +#endif /* _LINUX_ETHERDEVICE_H */ diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h new file mode 100644 index 000000000..f8a2245b7 --- /dev/null +++ b/include/linux/ethtool.h @@ -0,0 +1,416 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ethtool.h: Defines for Linux ethtool. + * + * Copyright (C) 1998 David S. Miller (davem@redhat.com) + * Copyright 2001 Jeff Garzik + * Portions Copyright 2001 Sun Microsystems (thockin@sun.com) + * Portions Copyright 2002 Intel (eli.kupermann@intel.com, + * christopher.leech@intel.com, + * scott.feldman@intel.com) + * Portions Copyright (C) Sun Microsystems 2008 + */ +#ifndef _LINUX_ETHTOOL_H +#define _LINUX_ETHTOOL_H + +#include +#include +#include + +#ifdef CONFIG_COMPAT + +struct compat_ethtool_rx_flow_spec { + u32 flow_type; + union ethtool_flow_union h_u; + struct ethtool_flow_ext h_ext; + union ethtool_flow_union m_u; + struct ethtool_flow_ext m_ext; + compat_u64 ring_cookie; + u32 location; +}; + +struct compat_ethtool_rxnfc { + u32 cmd; + u32 flow_type; + compat_u64 data; + struct compat_ethtool_rx_flow_spec fs; + u32 rule_cnt; + u32 rule_locs[0]; +}; + +#endif /* CONFIG_COMPAT */ + +#include + +/** + * enum ethtool_phys_id_state - indicator state for physical identification + * @ETHTOOL_ID_INACTIVE: Physical ID indicator should be deactivated + * @ETHTOOL_ID_ACTIVE: Physical ID indicator should be activated + * @ETHTOOL_ID_ON: LED should be turned on (used iff %ETHTOOL_ID_ACTIVE + * is not supported) + * @ETHTOOL_ID_OFF: LED should be turned off (used iff %ETHTOOL_ID_ACTIVE + * is not supported) + */ +enum ethtool_phys_id_state { + ETHTOOL_ID_INACTIVE, + ETHTOOL_ID_ACTIVE, + ETHTOOL_ID_ON, + ETHTOOL_ID_OFF +}; + +enum { + ETH_RSS_HASH_TOP_BIT, /* Configurable RSS hash function - Toeplitz */ + ETH_RSS_HASH_XOR_BIT, /* Configurable RSS hash function - Xor */ + ETH_RSS_HASH_CRC32_BIT, /* Configurable RSS hash function - Crc32 */ + + /* + * Add your fresh new hash function bits above and remember to update + * rss_hash_func_strings[] in ethtool.c + */ + ETH_RSS_HASH_FUNCS_COUNT +}; + +#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit)) +#define __ETH_RSS_HASH(name) __ETH_RSS_HASH_BIT(ETH_RSS_HASH_##name##_BIT) + +#define ETH_RSS_HASH_TOP __ETH_RSS_HASH(TOP) +#define ETH_RSS_HASH_XOR __ETH_RSS_HASH(XOR) +#define ETH_RSS_HASH_CRC32 __ETH_RSS_HASH(CRC32) + +#define ETH_RSS_HASH_UNKNOWN 0 +#define ETH_RSS_HASH_NO_CHANGE 0 + +struct net_device; + +/* Some generic methods drivers may use in their ethtool_ops */ +u32 ethtool_op_get_link(struct net_device *dev); +int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti); + +/** + * ethtool_rxfh_indir_default - get default value for RX flow hash indirection + * @index: Index in RX flow hash indirection table + * @n_rx_rings: Number of RX rings to use + * + * This function provides the default policy for RX flow hash indirection. + */ +static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) +{ + return index % n_rx_rings; +} + +/* number of link mode bits/ulongs handled internally by kernel */ +#define __ETHTOOL_LINK_MODE_MASK_NBITS \ + (__ETHTOOL_LINK_MODE_LAST + 1) + +/* declare a link mode bitmap */ +#define __ETHTOOL_DECLARE_LINK_MODE_MASK(name) \ + DECLARE_BITMAP(name, __ETHTOOL_LINK_MODE_MASK_NBITS) + +/* drivers must ignore base.cmd and base.link_mode_masks_nwords + * fields, but they are allowed to overwrite them (will be ignored). + */ +struct ethtool_link_ksettings { + struct ethtool_link_settings base; + struct { + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); + __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising); + } link_modes; +}; + +/** + * ethtool_link_ksettings_zero_link_mode - clear link_ksettings link mode mask + * @ptr : pointer to struct ethtool_link_ksettings + * @name : one of supported/advertising/lp_advertising + */ +#define ethtool_link_ksettings_zero_link_mode(ptr, name) \ + bitmap_zero((ptr)->link_modes.name, __ETHTOOL_LINK_MODE_MASK_NBITS) + +/** + * ethtool_link_ksettings_add_link_mode - set bit in link_ksettings + * link mode mask + * @ptr : pointer to struct ethtool_link_ksettings + * @name : one of supported/advertising/lp_advertising + * @mode : one of the ETHTOOL_LINK_MODE_*_BIT + * (not atomic, no bound checking) + */ +#define ethtool_link_ksettings_add_link_mode(ptr, name, mode) \ + __set_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name) + +/** + * ethtool_link_ksettings_del_link_mode - clear bit in link_ksettings + * link mode mask + * @ptr : pointer to struct ethtool_link_ksettings + * @name : one of supported/advertising/lp_advertising + * @mode : one of the ETHTOOL_LINK_MODE_*_BIT + * (not atomic, no bound checking) + */ +#define ethtool_link_ksettings_del_link_mode(ptr, name, mode) \ + __clear_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name) + +/** + * ethtool_link_ksettings_test_link_mode - test bit in ksettings link mode mask + * @ptr : pointer to struct ethtool_link_ksettings + * @name : one of supported/advertising/lp_advertising + * @mode : one of the ETHTOOL_LINK_MODE_*_BIT + * (not atomic, no bound checking) + * + * Returns true/false. + */ +#define ethtool_link_ksettings_test_link_mode(ptr, name, mode) \ + test_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name) + +extern int +__ethtool_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *link_ksettings); + +/** + * ethtool_intersect_link_masks - Given two link masks, AND them together + * @dst: first mask and where result is stored + * @src: second mask to intersect with + * + * Given two link mode masks, AND them together and save the result in dst. + */ +void ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, + struct ethtool_link_ksettings *src); + +void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst, + u32 legacy_u32); + +/* return false if src had higher bits set. lower bits always updated. */ +bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, + const unsigned long *src); + +/** + * struct ethtool_ops - optional netdev operations + * @get_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings + * API. Get various device settings including Ethernet link + * settings. The @cmd parameter is expected to have been cleared + * before get_settings is called. Returns a negative error code + * or zero. + * @set_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings + * API. Set various device settings including Ethernet link + * settings. Returns a negative error code or zero. + * @get_drvinfo: Report driver/device information. Should only set the + * @driver, @version, @fw_version and @bus_info fields. If not + * implemented, the @driver and @bus_info fields will be filled in + * according to the netdev's parent device. + * @get_regs_len: Get buffer length required for @get_regs + * @get_regs: Get device registers + * @get_wol: Report whether Wake-on-Lan is enabled + * @set_wol: Turn Wake-on-Lan on or off. Returns a negative error code + * or zero. + * @get_msglevel: Report driver message level. This should be the value + * of the @msg_enable field used by netif logging functions. + * @set_msglevel: Set driver message level + * @nway_reset: Restart autonegotiation. Returns a negative error code + * or zero. + * @get_link: Report whether physical link is up. Will only be called if + * the netdev is up. Should usually be set to ethtool_op_get_link(), + * which uses netif_carrier_ok(). + * @get_eeprom: Read data from the device EEPROM. + * Should fill in the magic field. Don't need to check len for zero + * or wraparound. Fill in the data argument with the eeprom values + * from offset to offset + len. Update len to the amount read. + * Returns an error or zero. + * @set_eeprom: Write data to the device EEPROM. + * Should validate the magic field. Don't need to check len for zero + * or wraparound. Update len to the amount written. Returns an error + * or zero. + * @get_coalesce: Get interrupt coalescing parameters. Returns a negative + * error code or zero. + * @set_coalesce: Set interrupt coalescing parameters. Returns a negative + * error code or zero. + * @get_ringparam: Report ring sizes + * @set_ringparam: Set ring sizes. Returns a negative error code or zero. + * @get_pauseparam: Report pause parameters + * @set_pauseparam: Set pause parameters. Returns a negative error code + * or zero. + * @self_test: Run specified self-tests + * @get_strings: Return a set of strings that describe the requested objects + * @set_phys_id: Identify the physical devices, e.g. by flashing an LED + * attached to it. The implementation may update the indicator + * asynchronously or synchronously, but in either case it must return + * quickly. It is initially called with the argument %ETHTOOL_ID_ACTIVE, + * and must either activate asynchronous updates and return zero, return + * a negative error or return a positive frequency for synchronous + * indication (e.g. 1 for one on/off cycle per second). If it returns + * a frequency then it will be called again at intervals with the + * argument %ETHTOOL_ID_ON or %ETHTOOL_ID_OFF and should set the state of + * the indicator accordingly. Finally, it is called with the argument + * %ETHTOOL_ID_INACTIVE and must deactivate the indicator. Returns a + * negative error code or zero. + * @get_ethtool_stats: Return extended statistics about the device. + * This is only useful if the device maintains statistics not + * included in &struct rtnl_link_stats64. + * @begin: Function to be called before any other operation. Returns a + * negative error code or zero. + * @complete: Function to be called after any other operation except + * @begin. Will be called even if the other operation failed. + * @get_priv_flags: Report driver-specific feature flags. + * @set_priv_flags: Set driver-specific feature flags. Returns a negative + * error code or zero. + * @get_sset_count: Get number of strings that @get_strings will write. + * @get_rxnfc: Get RX flow classification rules. Returns a negative + * error code or zero. + * @set_rxnfc: Set RX flow classification rules. Returns a negative + * error code or zero. + * @flash_device: Write a firmware image to device's flash memory. + * Returns a negative error code or zero. + * @reset: Reset (part of) the device, as specified by a bitmask of + * flags from &enum ethtool_reset_flags. Returns a negative + * error code or zero. + * @get_rxfh_key_size: Get the size of the RX flow hash key. + * Returns zero if not supported for this specific device. + * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table. + * Returns zero if not supported for this specific device. + * @get_rxfh: Get the contents of the RX flow hash indirection table, hash key + * and/or hash function. + * Returns a negative error code or zero. + * @set_rxfh: Set the contents of the RX flow hash indirection table, hash + * key, and/or hash function. Arguments which are set to %NULL or zero + * will remain unchanged. + * Returns a negative error code or zero. An error code must be returned + * if at least one unsupported change was requested. + * @get_channels: Get number of channels. + * @set_channels: Set number of channels. Returns a negative error code or + * zero. + * @get_dump_flag: Get dump flag indicating current dump length, version, + * and flag of the device. + * @get_dump_data: Get dump data. + * @set_dump: Set dump specific flags to the device. + * @get_ts_info: Get the time stamping and PTP hardware clock capabilities. + * Drivers supporting transmit time stamps in software should set this to + * ethtool_op_get_ts_info(). + * @get_module_info: Get the size and type of the eeprom contained within + * a plug-in module. + * @get_module_eeprom: Get the eeprom information from the plug-in module + * @get_eee: Get Energy-Efficient (EEE) supported and status. + * @set_eee: Set EEE status (enable/disable) as well as LPI timers. + * @get_per_queue_coalesce: Get interrupt coalescing parameters per queue. + * It must check that the given queue number is valid. If neither a RX nor + * a TX queue has this number, return -EINVAL. If only a RX queue or a TX + * queue has this number, set the inapplicable fields to ~0 and return 0. + * Returns a negative error code or zero. + * @set_per_queue_coalesce: Set interrupt coalescing parameters per queue. + * It must check that the given queue number is valid. If neither a RX nor + * a TX queue has this number, return -EINVAL. If only a RX queue or a TX + * queue has this number, ignore the inapplicable fields. + * Returns a negative error code or zero. + * @get_link_ksettings: When defined, takes precedence over the + * %get_settings method. Get various device settings + * including Ethernet link settings. The %cmd and + * %link_mode_masks_nwords fields should be ignored (use + * %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter), any + * change to them will be overwritten by kernel. Returns a + * negative error code or zero. + * @set_link_ksettings: When defined, takes precedence over the + * %set_settings method. Set various device settings including + * Ethernet link settings. The %cmd and %link_mode_masks_nwords + * fields should be ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS + * instead of the latter), any change to them will be overwritten + * by kernel. Returns a negative error code or zero. + * @get_fecparam: Get the network device Forward Error Correction parameters. + * @set_fecparam: Set the network device Forward Error Correction parameters. + * @get_ethtool_phy_stats: Return extended statistics about the PHY device. + * This is only useful if the device maintains PHY statistics and + * cannot use the standard PHY library helpers. + * + * All operations are optional (i.e. the function pointer may be set + * to %NULL) and callers must take this into account. Callers must + * hold the RTNL lock. + * + * See the structures used by these operations for further documentation. + * Note that for all operations using a structure ending with a zero- + * length array, the array is allocated separately in the kernel and + * is passed to the driver as an additional parameter. + * + * See &struct net_device and &struct net_device_ops for documentation + * of the generic netdev features interface. + */ +struct ethtool_ops { + int (*get_settings)(struct net_device *, struct ethtool_cmd *); + int (*set_settings)(struct net_device *, struct ethtool_cmd *); + void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); + int (*get_regs_len)(struct net_device *); + void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); + void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); + int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); + u32 (*get_msglevel)(struct net_device *); + void (*set_msglevel)(struct net_device *, u32); + int (*nway_reset)(struct net_device *); + u32 (*get_link)(struct net_device *); + int (*get_eeprom_len)(struct net_device *); + int (*get_eeprom)(struct net_device *, + struct ethtool_eeprom *, u8 *); + int (*set_eeprom)(struct net_device *, + struct ethtool_eeprom *, u8 *); + int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); + int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); + void (*get_ringparam)(struct net_device *, + struct ethtool_ringparam *); + int (*set_ringparam)(struct net_device *, + struct ethtool_ringparam *); + void (*get_pauseparam)(struct net_device *, + struct ethtool_pauseparam*); + int (*set_pauseparam)(struct net_device *, + struct ethtool_pauseparam*); + void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); + void (*get_strings)(struct net_device *, u32 stringset, u8 *); + int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state); + void (*get_ethtool_stats)(struct net_device *, + struct ethtool_stats *, u64 *); + int (*begin)(struct net_device *); + void (*complete)(struct net_device *); + u32 (*get_priv_flags)(struct net_device *); + int (*set_priv_flags)(struct net_device *, u32); + int (*get_sset_count)(struct net_device *, int); + int (*get_rxnfc)(struct net_device *, + struct ethtool_rxnfc *, u32 *rule_locs); + int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); + int (*flash_device)(struct net_device *, struct ethtool_flash *); + int (*reset)(struct net_device *, u32 *); + u32 (*get_rxfh_key_size)(struct net_device *); + u32 (*get_rxfh_indir_size)(struct net_device *); + int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key, + u8 *hfunc); + int (*set_rxfh)(struct net_device *, const u32 *indir, + const u8 *key, const u8 hfunc); + int (*get_rxfh_context)(struct net_device *, u32 *indir, u8 *key, + u8 *hfunc, u32 rss_context); + int (*set_rxfh_context)(struct net_device *, const u32 *indir, + const u8 *key, const u8 hfunc, + u32 *rss_context, bool delete); + void (*get_channels)(struct net_device *, struct ethtool_channels *); + int (*set_channels)(struct net_device *, struct ethtool_channels *); + int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); + int (*get_dump_data)(struct net_device *, + struct ethtool_dump *, void *); + int (*set_dump)(struct net_device *, struct ethtool_dump *); + int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *); + int (*get_module_info)(struct net_device *, + struct ethtool_modinfo *); + int (*get_module_eeprom)(struct net_device *, + struct ethtool_eeprom *, u8 *); + int (*get_eee)(struct net_device *, struct ethtool_eee *); + int (*set_eee)(struct net_device *, struct ethtool_eee *); + int (*get_tunable)(struct net_device *, + const struct ethtool_tunable *, void *); + int (*set_tunable)(struct net_device *, + const struct ethtool_tunable *, const void *); + int (*get_per_queue_coalesce)(struct net_device *, u32, + struct ethtool_coalesce *); + int (*set_per_queue_coalesce)(struct net_device *, u32, + struct ethtool_coalesce *); + int (*get_link_ksettings)(struct net_device *, + struct ethtool_link_ksettings *); + int (*set_link_ksettings)(struct net_device *, + const struct ethtool_link_ksettings *); + int (*get_fecparam)(struct net_device *, + struct ethtool_fecparam *); + int (*set_fecparam)(struct net_device *, + struct ethtool_fecparam *); + void (*get_ethtool_phy_stats)(struct net_device *, + struct ethtool_stats *, u64 *); +}; +#endif /* _LINUX_ETHTOOL_H */ diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h new file mode 100644 index 000000000..dc4fd8a66 --- /dev/null +++ b/include/linux/eventfd.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/eventfd.h + * + * Copyright (C) 2007 Davide Libenzi + * + */ + +#ifndef _LINUX_EVENTFD_H +#define _LINUX_EVENTFD_H + +#include +#include +#include +#include +#include + +/* + * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining + * new flags, since they might collide with O_* ones. We want + * to re-use O_* flags that couldn't possibly have a meaning + * from eventfd, in order to leave a free define-space for + * shared O_* flags. + */ +#define EFD_SEMAPHORE (1 << 0) +#define EFD_CLOEXEC O_CLOEXEC +#define EFD_NONBLOCK O_NONBLOCK + +#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) +#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE) + +struct eventfd_ctx; +struct file; + +#ifdef CONFIG_EVENTFD + +void eventfd_ctx_put(struct eventfd_ctx *ctx); +struct file *eventfd_fget(int fd); +struct eventfd_ctx *eventfd_ctx_fdget(int fd); +struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); +__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n); +int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, + __u64 *cnt); + +DECLARE_PER_CPU(int, eventfd_wake_count); + +static inline bool eventfd_signal_count(void) +{ + return this_cpu_read(eventfd_wake_count); +} + +#else /* CONFIG_EVENTFD */ + +/* + * Ugly ugly ugly error layer to support modules that uses eventfd but + * pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO. + */ + +static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd) +{ + return ERR_PTR(-ENOSYS); +} + +static inline int eventfd_signal(struct eventfd_ctx *ctx, int n) +{ + return -ENOSYS; +} + +static inline void eventfd_ctx_put(struct eventfd_ctx *ctx) +{ + +} + +static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, + wait_queue_entry_t *wait, __u64 *cnt) +{ + return -ENOSYS; +} + +static inline bool eventfd_signal_count(void) +{ + return false; +} + +#endif + +#endif /* _LINUX_EVENTFD_H */ + diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h new file mode 100644 index 000000000..2f14ac73d --- /dev/null +++ b/include/linux/eventpoll.h @@ -0,0 +1,76 @@ +/* + * include/linux/eventpoll.h ( Efficient event polling implementation ) + * Copyright (C) 2001,...,2006 Davide Libenzi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Davide Libenzi + * + */ +#ifndef _LINUX_EVENTPOLL_H +#define _LINUX_EVENTPOLL_H + +#include +#include + + +/* Forward declarations to avoid compiler errors */ +struct file; + + +#ifdef CONFIG_EPOLL + +#ifdef CONFIG_CHECKPOINT_RESTORE +struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, unsigned long toff); +#endif + +/* Used to initialize the epoll bits inside the "struct file" */ +static inline void eventpoll_init_file(struct file *file) +{ + INIT_LIST_HEAD(&file->f_ep_links); + INIT_LIST_HEAD(&file->f_tfile_llink); +} + + +/* Used to release the epoll bits inside the "struct file" */ +void eventpoll_release_file(struct file *file); + +/* + * This is called from inside fs/file_table.c:__fput() to unlink files + * from the eventpoll interface. We need to have this facility to cleanup + * correctly files that are closed without being removed from the eventpoll + * interface. + */ +static inline void eventpoll_release(struct file *file) +{ + + /* + * Fast check to avoid the get/release of the semaphore. Since + * we're doing this outside the semaphore lock, it might return + * false negatives, but we don't care. It'll help in 99.99% of cases + * to avoid the semaphore lock. False positives simply cannot happen + * because the file in on the way to be removed and nobody ( but + * eventpoll ) has still a reference to this file. + */ + if (likely(list_empty(&file->f_ep_links))) + return; + + /* + * The file is being closed while it is still linked to an epoll + * descriptor. We need to handle this by correctly unlinking it + * from its containers. + */ + eventpoll_release_file(file); +} + +#else + +static inline void eventpoll_init_file(struct file *file) {} +static inline void eventpoll_release(struct file *file) {} + +#endif + +#endif /* #ifndef _LINUX_EVENTPOLL_H */ diff --git a/include/linux/evm.h b/include/linux/evm.h new file mode 100644 index 000000000..8302bc29b --- /dev/null +++ b/include/linux/evm.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * evm.h + * + * Copyright (c) 2009 IBM Corporation + * Author: Mimi Zohar + */ + +#ifndef _LINUX_EVM_H +#define _LINUX_EVM_H + +#include +#include + +struct integrity_iint_cache; + +#ifdef CONFIG_EVM +extern int evm_set_key(void *key, size_t keylen); +extern enum integrity_status evm_verifyxattr(struct dentry *dentry, + const char *xattr_name, + void *xattr_value, + size_t xattr_value_len, + struct integrity_iint_cache *iint); +extern int evm_inode_setattr(struct dentry *dentry, struct iattr *attr); +extern void evm_inode_post_setattr(struct dentry *dentry, int ia_valid); +extern int evm_inode_setxattr(struct dentry *dentry, const char *name, + const void *value, size_t size); +extern void evm_inode_post_setxattr(struct dentry *dentry, + const char *xattr_name, + const void *xattr_value, + size_t xattr_value_len); +extern int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name); +extern void evm_inode_post_removexattr(struct dentry *dentry, + const char *xattr_name); +extern int evm_inode_init_security(struct inode *inode, + const struct xattr *xattr_array, + struct xattr *evm); +#ifdef CONFIG_FS_POSIX_ACL +extern int posix_xattr_acl(const char *xattrname); +#else +static inline int posix_xattr_acl(const char *xattrname) +{ + return 0; +} +#endif +#else + +static inline int evm_set_key(void *key, size_t keylen) +{ + return -EOPNOTSUPP; +} + +#ifdef CONFIG_INTEGRITY +static inline enum integrity_status evm_verifyxattr(struct dentry *dentry, + const char *xattr_name, + void *xattr_value, + size_t xattr_value_len, + struct integrity_iint_cache *iint) +{ + return INTEGRITY_UNKNOWN; +} +#endif + +static inline int evm_inode_setattr(struct dentry *dentry, struct iattr *attr) +{ + return 0; +} + +static inline void evm_inode_post_setattr(struct dentry *dentry, int ia_valid) +{ + return; +} + +static inline int evm_inode_setxattr(struct dentry *dentry, const char *name, + const void *value, size_t size) +{ + return 0; +} + +static inline void evm_inode_post_setxattr(struct dentry *dentry, + const char *xattr_name, + const void *xattr_value, + size_t xattr_value_len) +{ + return; +} + +static inline int evm_inode_removexattr(struct dentry *dentry, + const char *xattr_name) +{ + return 0; +} + +static inline void evm_inode_post_removexattr(struct dentry *dentry, + const char *xattr_name) +{ + return; +} + +static inline int evm_inode_init_security(struct inode *inode, + const struct xattr *xattr_array, + struct xattr *evm) +{ + return 0; +} + +#endif /* CONFIG_EVM */ +#endif /* LINUX_EVM_H */ diff --git a/include/linux/export.h b/include/linux/export.h new file mode 100644 index 000000000..ce764a5d2 --- /dev/null +++ b/include/linux/export.h @@ -0,0 +1,153 @@ +#ifndef _LINUX_EXPORT_H +#define _LINUX_EXPORT_H + +/* + * Export symbols from the kernel to modules. Forked from module.h + * to reduce the amount of pointless cruft we feed to gcc when only + * exporting a simple symbol or two. + * + * Try not to add #includes here. It slows compilation and makes kernel + * hackers place grumpy comments in header files. + */ + +#ifndef __ASSEMBLY__ +#ifdef MODULE +extern struct module __this_module; +#define THIS_MODULE (&__this_module) +#else +#define THIS_MODULE ((struct module *)0) +#endif + +#ifdef CONFIG_MODULES + +#if defined(__KERNEL__) && !defined(__GENKSYMS__) +#ifdef CONFIG_MODVERSIONS +/* Mark the CRC weak since genksyms apparently decides not to + * generate a checksums for some symbols */ +#if defined(CONFIG_MODULE_REL_CRCS) +#define __CRC_SYMBOL(sym, sec) \ + asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \ + " .weak __crc_" #sym " \n" \ + " .long __crc_" #sym " - . \n" \ + " .previous \n"); +#else +#define __CRC_SYMBOL(sym, sec) \ + asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \ + " .weak __crc_" #sym " \n" \ + " .long __crc_" #sym " \n" \ + " .previous \n"); +#endif +#else +#define __CRC_SYMBOL(sym, sec) +#endif + +#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS +#include +/* + * Emit the ksymtab entry as a pair of relative references: this reduces + * the size by half on 64-bit architectures, and eliminates the need for + * absolute relocations that require runtime processing on relocatable + * kernels. + */ +#define __KSYMTAB_ENTRY(sym, sec) \ + __ADDRESSABLE(sym) \ + asm(" .section \"___ksymtab" sec "+" #sym "\", \"a\" \n" \ + " .balign 8 \n" \ + "__ksymtab_" #sym ": \n" \ + " .long " #sym "- . \n" \ + " .long __kstrtab_" #sym "- . \n" \ + " .previous \n") + +struct kernel_symbol { + int value_offset; + int name_offset; +}; +#else +#define __KSYMTAB_ENTRY(sym, sec) \ + static const struct kernel_symbol __ksymtab_##sym \ + __attribute__((section("___ksymtab" sec "+" #sym), used)) \ + = { (unsigned long)&sym, __kstrtab_##sym } + +struct kernel_symbol { + unsigned long value; + const char *name; +}; +#endif + +/* For every exported symbol, place a struct in the __ksymtab section */ +#define ___EXPORT_SYMBOL(sym, sec) \ + extern typeof(sym) sym; \ + __CRC_SYMBOL(sym, sec) \ + static const char __kstrtab_##sym[] \ + __attribute__((section("__ksymtab_strings"), used, aligned(1))) \ + = #sym; \ + __KSYMTAB_ENTRY(sym, sec) + +#if defined(__DISABLE_EXPORTS) + +/* + * Allow symbol exports to be disabled completely so that C code may + * be reused in other execution contexts such as the UEFI stub or the + * decompressor. + */ +#define __EXPORT_SYMBOL(sym, sec) + +#elif defined(__KSYM_DEPS__) + +/* + * For fine grained build dependencies, we want to tell the build system + * about each possible exported symbol even if they're not actually exported. + * We use a string pattern that is unlikely to be valid code that the build + * system filters out from the preprocessor output (see ksym_dep_filter + * in scripts/Kbuild.include). + */ +#define __EXPORT_SYMBOL(sym, sec) === __KSYM_##sym === + +#elif defined(CONFIG_TRIM_UNUSED_KSYMS) + +#include + +#define __EXPORT_SYMBOL(sym, sec) \ + __cond_export_sym(sym, sec, __is_defined(__KSYM_##sym)) +#define __cond_export_sym(sym, sec, conf) \ + ___cond_export_sym(sym, sec, conf) +#define ___cond_export_sym(sym, sec, enabled) \ + __cond_export_sym_##enabled(sym, sec) +#define __cond_export_sym_1(sym, sec) ___EXPORT_SYMBOL(sym, sec) +#define __cond_export_sym_0(sym, sec) /* nothing */ + +#else +#define __EXPORT_SYMBOL ___EXPORT_SYMBOL +#endif + +#define EXPORT_SYMBOL(sym) \ + __EXPORT_SYMBOL(sym, "") + +#define EXPORT_SYMBOL_GPL(sym) \ + __EXPORT_SYMBOL(sym, "_gpl") + +#define EXPORT_SYMBOL_GPL_FUTURE(sym) \ + __EXPORT_SYMBOL(sym, "_gpl_future") + +#ifdef CONFIG_UNUSED_SYMBOLS +#define EXPORT_UNUSED_SYMBOL(sym) __EXPORT_SYMBOL(sym, "_unused") +#define EXPORT_UNUSED_SYMBOL_GPL(sym) __EXPORT_SYMBOL(sym, "_unused_gpl") +#else +#define EXPORT_UNUSED_SYMBOL(sym) +#define EXPORT_UNUSED_SYMBOL_GPL(sym) +#endif + +#endif /* __GENKSYMS__ */ + +#else /* !CONFIG_MODULES... */ + +#define EXPORT_SYMBOL(sym) +#define EXPORT_SYMBOL_GPL(sym) +#define EXPORT_SYMBOL_GPL_FUTURE(sym) +#define EXPORT_UNUSED_SYMBOL(sym) +#define EXPORT_UNUSED_SYMBOL_GPL(sym) + +#endif /* CONFIG_MODULES */ +#endif /* !__ASSEMBLY__ */ + +#endif /* _LINUX_EXPORT_H */ diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h new file mode 100644 index 000000000..0d3037419 --- /dev/null +++ b/include/linux/exportfs.h @@ -0,0 +1,231 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_EXPORTFS_H +#define LINUX_EXPORTFS_H 1 + +#include + +struct dentry; +struct iattr; +struct inode; +struct iomap; +struct super_block; +struct vfsmount; + +/* limit the handle size to NFSv4 handle size now */ +#define MAX_HANDLE_SZ 128 + +/* + * The fileid_type identifies how the file within the filesystem is encoded. + * In theory this is freely set and parsed by the filesystem, but we try to + * stick to conventions so we can share some generic code and don't confuse + * sniffers like ethereal/wireshark. + * + * The filesystem must not use the value '0' or '0xff'. + */ +enum fid_type { + /* + * The root, or export point, of the filesystem. + * (Never actually passed down to the filesystem. + */ + FILEID_ROOT = 0, + + /* + * 32bit inode number, 32 bit generation number. + */ + FILEID_INO32_GEN = 1, + + /* + * 32bit inode number, 32 bit generation number, + * 32 bit parent directory inode number. + */ + FILEID_INO32_GEN_PARENT = 2, + + /* + * 64 bit object ID, 64 bit root object ID, + * 32 bit generation number. + */ + FILEID_BTRFS_WITHOUT_PARENT = 0x4d, + + /* + * 64 bit object ID, 64 bit root object ID, + * 32 bit generation number, + * 64 bit parent object ID, 32 bit parent generation. + */ + FILEID_BTRFS_WITH_PARENT = 0x4e, + + /* + * 64 bit object ID, 64 bit root object ID, + * 32 bit generation number, + * 64 bit parent object ID, 32 bit parent generation, + * 64 bit parent root object ID. + */ + FILEID_BTRFS_WITH_PARENT_ROOT = 0x4f, + + /* + * 32 bit block number, 16 bit partition reference, + * 16 bit unused, 32 bit generation number. + */ + FILEID_UDF_WITHOUT_PARENT = 0x51, + + /* + * 32 bit block number, 16 bit partition reference, + * 16 bit unused, 32 bit generation number, + * 32 bit parent block number, 32 bit parent generation number + */ + FILEID_UDF_WITH_PARENT = 0x52, + + /* + * 64 bit checkpoint number, 64 bit inode number, + * 32 bit generation number. + */ + FILEID_NILFS_WITHOUT_PARENT = 0x61, + + /* + * 64 bit checkpoint number, 64 bit inode number, + * 32 bit generation number, 32 bit parent generation. + * 64 bit parent inode number. + */ + FILEID_NILFS_WITH_PARENT = 0x62, + + /* + * 32 bit generation number, 40 bit i_pos. + */ + FILEID_FAT_WITHOUT_PARENT = 0x71, + + /* + * 32 bit generation number, 40 bit i_pos, + * 32 bit parent generation number, 40 bit parent i_pos + */ + FILEID_FAT_WITH_PARENT = 0x72, + + /* + * 128 bit child FID (struct lu_fid) + * 128 bit parent FID (struct lu_fid) + */ + FILEID_LUSTRE = 0x97, + + /* + * Filesystems must not use 0xff file ID. + */ + FILEID_INVALID = 0xff, +}; + +struct fid { + union { + struct { + u32 ino; + u32 gen; + u32 parent_ino; + u32 parent_gen; + } i32; + struct { + u32 block; + u16 partref; + u16 parent_partref; + u32 generation; + u32 parent_block; + u32 parent_generation; + } udf; + __u32 raw[0]; + }; +}; + +/** + * struct export_operations - for nfsd to communicate with file systems + * @encode_fh: encode a file handle fragment from a dentry + * @fh_to_dentry: find the implied object and get a dentry for it + * @fh_to_parent: find the implied object's parent and get a dentry for it + * @get_name: find the name for a given inode in a given directory + * @get_parent: find the parent of a given directory + * @commit_metadata: commit metadata changes to stable storage + * + * See Documentation/filesystems/nfs/Exporting for details on how to use + * this interface correctly. + * + * encode_fh: + * @encode_fh should store in the file handle fragment @fh (using at most + * @max_len bytes) information that can be used by @decode_fh to recover the + * file referred to by the &struct dentry @de. If the @connectable flag is + * set, the encode_fh() should store sufficient information so that a good + * attempt can be made to find not only the file but also it's place in the + * filesystem. This typically means storing a reference to de->d_parent in + * the filehandle fragment. encode_fh() should return the fileid_type on + * success and on error returns 255 (if the space needed to encode fh is + * greater than @max_len*4 bytes). On error @max_len contains the minimum + * size(in 4 byte unit) needed to encode the file handle. + * + * fh_to_dentry: + * @fh_to_dentry is given a &struct super_block (@sb) and a file handle + * fragment (@fh, @fh_len). It should return a &struct dentry which refers + * to the same file that the file handle fragment refers to. If it cannot, + * it should return a %NULL pointer if the file cannot be found, or an + * %ERR_PTR error code of %ENOMEM if a memory allocation failure occurred. + * Any other error code is treated like %NULL, and will cause an %ESTALE error + * for callers of exportfs_decode_fh(). + * Any suitable dentry can be returned including, if necessary, a new dentry + * created with d_alloc_root. The caller can then find any other extant + * dentries by following the d_alias links. + * + * fh_to_parent: + * Same as @fh_to_dentry, except that it returns a pointer to the parent + * dentry if it was encoded into the filehandle fragment by @encode_fh. + * + * get_name: + * @get_name should find a name for the given @child in the given @parent + * directory. The name should be stored in the @name (with the + * understanding that it is already pointing to a a %NAME_MAX+1 sized + * buffer. get_name() should return %0 on success, a negative error code + * or error. @get_name will be called without @parent->i_mutex held. + * + * get_parent: + * @get_parent should find the parent directory for the given @child which + * is also a directory. In the event that it cannot be found, or storage + * space cannot be allocated, a %ERR_PTR should be returned. + * + * commit_metadata: + * @commit_metadata should commit metadata changes to stable storage. + * + * Locking rules: + * get_parent is called with child->d_inode->i_mutex down + * get_name is not (which is possibly inconsistent) + */ + +struct export_operations { + int (*encode_fh)(struct inode *inode, __u32 *fh, int *max_len, + struct inode *parent); + struct dentry * (*fh_to_dentry)(struct super_block *sb, struct fid *fid, + int fh_len, int fh_type); + struct dentry * (*fh_to_parent)(struct super_block *sb, struct fid *fid, + int fh_len, int fh_type); + int (*get_name)(struct dentry *parent, char *name, + struct dentry *child); + struct dentry * (*get_parent)(struct dentry *child); + int (*commit_metadata)(struct inode *inode); + + int (*get_uuid)(struct super_block *sb, u8 *buf, u32 *len, u64 *offset); + int (*map_blocks)(struct inode *inode, loff_t offset, + u64 len, struct iomap *iomap, + bool write, u32 *device_generation); + int (*commit_blocks)(struct inode *inode, struct iomap *iomaps, + int nr_iomaps, struct iattr *iattr); +}; + +extern int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid, + int *max_len, struct inode *parent); +extern int exportfs_encode_fh(struct dentry *dentry, struct fid *fid, + int *max_len, int connectable); +extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid, + int fh_len, int fileid_type, int (*acceptable)(void *, struct dentry *), + void *context); + +/* + * Generic helpers for filesystems. + */ +extern struct dentry *generic_fh_to_dentry(struct super_block *sb, + struct fid *fid, int fh_len, int fh_type, + struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen)); +extern struct dentry *generic_fh_to_parent(struct super_block *sb, + struct fid *fid, int fh_len, int fh_type, + struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen)); + +#endif /* LINUX_EXPORTFS_H */ diff --git a/include/linux/ext2_fs.h b/include/linux/ext2_fs.h new file mode 100644 index 000000000..1fef88569 --- /dev/null +++ b/include/linux/ext2_fs.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/ext2_fs.h + * + * Copyright (C) 1992, 1993, 1994, 1995 + * Remy Card (card@masi.ibp.fr) + * Laboratoire MASI - Institut Blaise Pascal + * Universite Pierre et Marie Curie (Paris VI) + * + * from + * + * linux/include/linux/minix_fs.h + * + * Copyright (C) 1991, 1992 Linus Torvalds + */ + +#ifndef _LINUX_EXT2_FS_H +#define _LINUX_EXT2_FS_H + +#include +#include + +#define EXT2_NAME_LEN 255 + +/* + * Maximal count of links to a file + */ +#define EXT2_LINK_MAX 32000 + +#define EXT2_SB_MAGIC_OFFSET 0x38 +#define EXT2_SB_BLOCKS_OFFSET 0x04 +#define EXT2_SB_BSIZE_OFFSET 0x18 + +static inline u64 ext2_image_size(void *ext2_sb) +{ + __u8 *p = ext2_sb; + if (*(__le16 *)(p + EXT2_SB_MAGIC_OFFSET) != cpu_to_le16(EXT2_SUPER_MAGIC)) + return 0; + return (u64)le32_to_cpup((__le32 *)(p + EXT2_SB_BLOCKS_OFFSET)) << + le32_to_cpup((__le32 *)(p + EXT2_SB_BSIZE_OFFSET)); +} + +#endif /* _LINUX_EXT2_FS_H */ diff --git a/include/linux/extable.h b/include/linux/extable.h new file mode 100644 index 000000000..41c5b3a25 --- /dev/null +++ b/include/linux/extable.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_EXTABLE_H +#define _LINUX_EXTABLE_H + +#include /* for NULL */ +#include + +struct module; +struct exception_table_entry; + +const struct exception_table_entry * +search_extable(const struct exception_table_entry *base, + const size_t num, + unsigned long value); +void sort_extable(struct exception_table_entry *start, + struct exception_table_entry *finish); +void sort_main_extable(void); +void trim_init_extable(struct module *m); + +/* Given an address, look for it in the exception tables */ +const struct exception_table_entry *search_exception_tables(unsigned long add); + +#ifdef CONFIG_MODULES +/* For extable.c to search modules' exception tables. */ +const struct exception_table_entry *search_module_extables(unsigned long addr); +#else +static inline const struct exception_table_entry * +search_module_extables(unsigned long addr) +{ + return NULL; +} +#endif /*CONFIG_MODULES*/ + +#endif /* _LINUX_EXTABLE_H */ diff --git a/include/linux/extcon-provider.h b/include/linux/extcon-provider.h new file mode 100644 index 000000000..2feca5881 --- /dev/null +++ b/include/linux/extcon-provider.h @@ -0,0 +1,142 @@ +/* + * External Connector (extcon) framework + * - linux/include/linux/extcon-provider.h for extcon provider device driver. + * + * Copyright (C) 2017 Samsung Electronics + * Author: Chanwoo Choi + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_EXTCON_PROVIDER_H__ +#define __LINUX_EXTCON_PROVIDER_H__ + +#include + +struct extcon_dev; + +#if IS_ENABLED(CONFIG_EXTCON) + +/* Following APIs register/unregister the extcon device. */ +extern int extcon_dev_register(struct extcon_dev *edev); +extern void extcon_dev_unregister(struct extcon_dev *edev); +extern int devm_extcon_dev_register(struct device *dev, + struct extcon_dev *edev); +extern void devm_extcon_dev_unregister(struct device *dev, + struct extcon_dev *edev); + +/* Following APIs allocate/free the memory of the extcon device. */ +extern struct extcon_dev *extcon_dev_allocate(const unsigned int *cable); +extern void extcon_dev_free(struct extcon_dev *edev); +extern struct extcon_dev *devm_extcon_dev_allocate(struct device *dev, + const unsigned int *cable); +extern void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev); + +/* Synchronize the state and property value for each external connector. */ +extern int extcon_sync(struct extcon_dev *edev, unsigned int id); + +/* + * Following APIs set the connected state of each external connector. + * The 'id' argument indicates the defined external connector. + */ +extern int extcon_set_state(struct extcon_dev *edev, unsigned int id, + bool state); +extern int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id, + bool state); + +/* + * Following APIs set the property of each external connector. + * The 'id' argument indicates the defined external connector + * and the 'prop' indicates the extcon property. + * + * And extcon_set_property_capability() set the capability of the property + * for each external connector. They are used to set the capability of the + * property of each external connector based on the id and property. + */ +extern int extcon_set_property(struct extcon_dev *edev, unsigned int id, + unsigned int prop, + union extcon_property_value prop_val); +extern int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id, + unsigned int prop, + union extcon_property_value prop_val); +extern int extcon_set_property_capability(struct extcon_dev *edev, + unsigned int id, unsigned int prop); + +#else /* CONFIG_EXTCON */ +static inline int extcon_dev_register(struct extcon_dev *edev) +{ + return 0; +} + +static inline void extcon_dev_unregister(struct extcon_dev *edev) { } + +static inline int devm_extcon_dev_register(struct device *dev, + struct extcon_dev *edev) +{ + return -EINVAL; +} + +static inline void devm_extcon_dev_unregister(struct device *dev, + struct extcon_dev *edev) { } + +static inline struct extcon_dev *extcon_dev_allocate(const unsigned int *cable) +{ + return ERR_PTR(-ENOSYS); +} + +static inline void extcon_dev_free(struct extcon_dev *edev) { } + +static inline struct extcon_dev *devm_extcon_dev_allocate(struct device *dev, + const unsigned int *cable) +{ + return ERR_PTR(-ENOSYS); +} + +static inline void devm_extcon_dev_free(struct extcon_dev *edev) { } + + +static inline int extcon_set_state(struct extcon_dev *edev, unsigned int id, + bool state) +{ + return 0; +} + +static inline int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id, + bool state) +{ + return 0; +} + +static inline int extcon_sync(struct extcon_dev *edev, unsigned int id) +{ + return 0; +} + +static inline int extcon_set_property(struct extcon_dev *edev, unsigned int id, + unsigned int prop, + union extcon_property_value prop_val) +{ + return 0; +} + +static inline int extcon_set_property_sync(struct extcon_dev *edev, + unsigned int id, unsigned int prop, + union extcon_property_value prop_val) +{ + return 0; +} + +static inline int extcon_set_property_capability(struct extcon_dev *edev, + unsigned int id, unsigned int prop) +{ + return 0; +} +#endif /* CONFIG_EXTCON */ +#endif /* __LINUX_EXTCON_PROVIDER_H__ */ diff --git a/include/linux/extcon.h b/include/linux/extcon.h new file mode 100644 index 000000000..fdef4c784 --- /dev/null +++ b/include/linux/extcon.h @@ -0,0 +1,344 @@ +/* + * External Connector (extcon) framework + * - linux/include/linux/extcon.h for extcon consumer device driver. + * + * Copyright (C) 2015 Samsung Electronics + * Author: Chanwoo Choi + * + * Copyright (C) 2012 Samsung Electronics + * Author: Donggeun Kim + * Author: MyungJoo Ham + * + * based on switch class driver + * Copyright (C) 2008 Google, Inc. + * Author: Mike Lockwood + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_EXTCON_H__ +#define __LINUX_EXTCON_H__ + +#include + +/* + * Define the type of supported external connectors + */ +#define EXTCON_TYPE_USB BIT(0) /* USB connector */ +#define EXTCON_TYPE_CHG BIT(1) /* Charger connector */ +#define EXTCON_TYPE_JACK BIT(2) /* Jack connector */ +#define EXTCON_TYPE_DISP BIT(3) /* Display connector */ +#define EXTCON_TYPE_MISC BIT(4) /* Miscellaneous connector */ + +/* + * Define the unique id of supported external connectors + */ +#define EXTCON_NONE 0 + +/* USB external connector */ +#define EXTCON_USB 1 +#define EXTCON_USB_HOST 2 + +/* + * Charging external connector + * + * When one SDP charger connector was reported, we should also report + * the USB connector, which means EXTCON_CHG_USB_SDP should always + * appear together with EXTCON_USB. The same as ACA charger connector, + * EXTCON_CHG_USB_ACA would normally appear with EXTCON_USB_HOST. + * + * The EXTCON_CHG_USB_SLOW connector can provide at least 500mA of + * current at 5V. The EXTCON_CHG_USB_FAST connector can provide at + * least 1A of current at 5V. + */ +#define EXTCON_CHG_USB_SDP 5 /* Standard Downstream Port */ +#define EXTCON_CHG_USB_DCP 6 /* Dedicated Charging Port */ +#define EXTCON_CHG_USB_CDP 7 /* Charging Downstream Port */ +#define EXTCON_CHG_USB_ACA 8 /* Accessory Charger Adapter */ +#define EXTCON_CHG_USB_FAST 9 +#define EXTCON_CHG_USB_SLOW 10 +#define EXTCON_CHG_WPT 11 /* Wireless Power Transfer */ +#define EXTCON_CHG_USB_PD 12 /* USB Power Delivery */ + +/* Jack external connector */ +#define EXTCON_JACK_MICROPHONE 20 +#define EXTCON_JACK_HEADPHONE 21 +#define EXTCON_JACK_LINE_IN 22 +#define EXTCON_JACK_LINE_OUT 23 +#define EXTCON_JACK_VIDEO_IN 24 +#define EXTCON_JACK_VIDEO_OUT 25 +#define EXTCON_JACK_SPDIF_IN 26 /* Sony Philips Digital InterFace */ +#define EXTCON_JACK_SPDIF_OUT 27 + +/* Display external connector */ +#define EXTCON_DISP_HDMI 40 /* High-Definition Multimedia Interface */ +#define EXTCON_DISP_MHL 41 /* Mobile High-Definition Link */ +#define EXTCON_DISP_DVI 42 /* Digital Visual Interface */ +#define EXTCON_DISP_VGA 43 /* Video Graphics Array */ +#define EXTCON_DISP_DP 44 /* Display Port */ +#define EXTCON_DISP_HMD 45 /* Head-Mounted Display */ + +/* Miscellaneous external connector */ +#define EXTCON_DOCK 60 +#define EXTCON_JIG 61 +#define EXTCON_MECHANICAL 62 + +#define EXTCON_NUM 63 + +/* + * Define the properties of supported external connectors. + * + * When adding the new extcon property, they *must* have + * the type/value/default information. Also, you *have to* + * modify the EXTCON_PROP_[type]_START/END definitions + * which mean the range of the supported properties + * for each extcon type. + * + * The naming style of property + * : EXTCON_PROP_[type]_[property name] + * + * EXTCON_PROP_USB_[property name] : USB property + * EXTCON_PROP_CHG_[property name] : Charger property + * EXTCON_PROP_JACK_[property name] : Jack property + * EXTCON_PROP_DISP_[property name] : Display property + */ + +/* + * Properties of EXTCON_TYPE_USB. + * + * - EXTCON_PROP_USB_VBUS + * @type: integer (intval) + * @value: 0 (low) or 1 (high) + * @default: 0 (low) + * - EXTCON_PROP_USB_TYPEC_POLARITY + * @type: integer (intval) + * @value: 0 (normal) or 1 (flip) + * @default: 0 (normal) + * - EXTCON_PROP_USB_SS (SuperSpeed) + * @type: integer (intval) + * @value: 0 (USB/USB2) or 1 (USB3) + * @default: 0 (USB/USB2) + * + */ +#define EXTCON_PROP_USB_VBUS 0 +#define EXTCON_PROP_USB_TYPEC_POLARITY 1 +#define EXTCON_PROP_USB_SS 2 + +#define EXTCON_PROP_USB_MIN 0 +#define EXTCON_PROP_USB_MAX 2 +#define EXTCON_PROP_USB_CNT (EXTCON_PROP_USB_MAX - EXTCON_PROP_USB_MIN + 1) + +/* Properties of EXTCON_TYPE_CHG. */ +#define EXTCON_PROP_CHG_MIN 50 +#define EXTCON_PROP_CHG_MAX 50 +#define EXTCON_PROP_CHG_CNT (EXTCON_PROP_CHG_MAX - EXTCON_PROP_CHG_MIN + 1) + +/* Properties of EXTCON_TYPE_JACK. */ +#define EXTCON_PROP_JACK_MIN 100 +#define EXTCON_PROP_JACK_MAX 100 +#define EXTCON_PROP_JACK_CNT (EXTCON_PROP_JACK_MAX - EXTCON_PROP_JACK_MIN + 1) + +/* + * Properties of EXTCON_TYPE_DISP. + * + * - EXTCON_PROP_DISP_HPD (Hot Plug Detect) + * @type: integer (intval) + * @value: 0 (no hpd) or 1 (hpd) + * @default: 0 (no hpd) + * + */ +#define EXTCON_PROP_DISP_HPD 150 + +/* Properties of EXTCON_TYPE_DISP. */ +#define EXTCON_PROP_DISP_MIN 150 +#define EXTCON_PROP_DISP_MAX 151 +#define EXTCON_PROP_DISP_CNT (EXTCON_PROP_DISP_MAX - EXTCON_PROP_DISP_MIN + 1) + +/* + * Define the type of property's value. + * + * Define the property's value as union type. Because each property + * would need the different data type to store it. + */ +union extcon_property_value { + int intval; /* type : integer (intval) */ +}; + +struct extcon_dev; + +#if IS_ENABLED(CONFIG_EXTCON) +/* + * Following APIs get the connected state of each external connector. + * The 'id' argument indicates the defined external connector. + */ +extern int extcon_get_state(struct extcon_dev *edev, unsigned int id); + +/* + * Following APIs get the property of each external connector. + * The 'id' argument indicates the defined external connector + * and the 'prop' indicates the extcon property. + * + * And extcon_get_property_capability() get the capability of the property + * for each external connector. They are used to get the capability of the + * property of each external connector based on the id and property. + */ +extern int extcon_get_property(struct extcon_dev *edev, unsigned int id, + unsigned int prop, + union extcon_property_value *prop_val); +extern int extcon_get_property_capability(struct extcon_dev *edev, + unsigned int id, unsigned int prop); + +/* + * Following APIs register the notifier block in order to detect + * the change of both state and property value for each external connector. + * + * extcon_register_notifier(*edev, id, *nb) : Register a notifier block + * for specific external connector of the extcon. + * extcon_register_notifier_all(*edev, *nb) : Register a notifier block + * for all supported external connectors of the extcon. + */ +extern int extcon_register_notifier(struct extcon_dev *edev, unsigned int id, + struct notifier_block *nb); +extern int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id, + struct notifier_block *nb); +extern int devm_extcon_register_notifier(struct device *dev, + struct extcon_dev *edev, unsigned int id, + struct notifier_block *nb); +extern void devm_extcon_unregister_notifier(struct device *dev, + struct extcon_dev *edev, unsigned int id, + struct notifier_block *nb); + +extern int extcon_register_notifier_all(struct extcon_dev *edev, + struct notifier_block *nb); +extern int extcon_unregister_notifier_all(struct extcon_dev *edev, + struct notifier_block *nb); +extern int devm_extcon_register_notifier_all(struct device *dev, + struct extcon_dev *edev, + struct notifier_block *nb); +extern void devm_extcon_unregister_notifier_all(struct device *dev, + struct extcon_dev *edev, + struct notifier_block *nb); + +/* + * Following APIs get the extcon_dev from devicetree or by through extcon name. + */ +extern struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name); +extern struct extcon_dev *extcon_find_edev_by_node(struct device_node *node); +extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, + int index); + +/* Following API get the name of extcon device. */ +extern const char *extcon_get_edev_name(struct extcon_dev *edev); + +#else /* CONFIG_EXTCON */ +static inline int extcon_get_state(struct extcon_dev *edev, unsigned int id) +{ + return 0; +} + +static inline int extcon_get_property(struct extcon_dev *edev, unsigned int id, + unsigned int prop, + union extcon_property_value *prop_val) +{ + return 0; +} + +static inline int extcon_get_property_capability(struct extcon_dev *edev, + unsigned int id, unsigned int prop) +{ + return 0; +} + +static inline int extcon_register_notifier(struct extcon_dev *edev, + unsigned int id, struct notifier_block *nb) +{ + return 0; +} + +static inline int extcon_unregister_notifier(struct extcon_dev *edev, + unsigned int id, struct notifier_block *nb) +{ + return 0; +} + +static inline int devm_extcon_register_notifier(struct device *dev, + struct extcon_dev *edev, unsigned int id, + struct notifier_block *nb) +{ + return -ENOSYS; +} + +static inline void devm_extcon_unregister_notifier(struct device *dev, + struct extcon_dev *edev, unsigned int id, + struct notifier_block *nb) { } + +static inline int extcon_register_notifier_all(struct extcon_dev *edev, + struct notifier_block *nb) +{ + return 0; +} + +static inline int extcon_unregister_notifier_all(struct extcon_dev *edev, + struct notifier_block *nb) +{ + return 0; +} + +static inline int devm_extcon_register_notifier_all(struct device *dev, + struct extcon_dev *edev, + struct notifier_block *nb) +{ + return 0; +} + +static inline void devm_extcon_unregister_notifier_all(struct device *dev, + struct extcon_dev *edev, + struct notifier_block *nb) { } + +static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct extcon_dev *extcon_find_edev_by_node(struct device_node *node) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, + int index) +{ + return ERR_PTR(-ENODEV); +} +#endif /* CONFIG_EXTCON */ + +/* + * Following structure and API are deprecated. EXTCON remains the function + * definition to prevent the build break. + */ +struct extcon_specific_cable_nb { + struct notifier_block *user_nb; + int cable_index; + struct extcon_dev *edev; + unsigned long previous_value; +}; + +static inline int extcon_register_interest(struct extcon_specific_cable_nb *obj, + const char *extcon_name, const char *cable_name, + struct notifier_block *nb) +{ + return -EINVAL; +} + +static inline int extcon_unregister_interest(struct extcon_specific_cable_nb *obj) +{ + return -EINVAL; +} +#endif /* __LINUX_EXTCON_H__ */ diff --git a/include/linux/extcon/extcon-adc-jack.h b/include/linux/extcon/extcon-adc-jack.h new file mode 100644 index 000000000..2aa32075b --- /dev/null +++ b/include/linux/extcon/extcon-adc-jack.h @@ -0,0 +1,72 @@ +/* + * include/linux/extcon/extcon-adc-jack.h + * + * Analog Jack extcon driver with ADC-based detection capability. + * + * Copyright (C) 2012 Samsung Electronics + * MyungJoo Ham + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef _EXTCON_ADC_JACK_H_ +#define _EXTCON_ADC_JACK_H_ __FILE__ + +#include +#include + +/** + * struct adc_jack_cond - condition to use an extcon state + * denotes the last adc_jack_cond element among the array) + * @id: the unique id of each external connector + * @min_adc: min adc value for this condition + * @max_adc: max adc value for this condition + * + * For example, if { .state = 0x3, .min_adc = 100, .max_adc = 200}, it means + * that if ADC value is between (inclusive) 100 and 200, than the cable 0 and + * 1 are attached (1<<0 | 1<<1 == 0x3) + * + * Note that you don't need to describe condition for "no cable attached" + * because when no adc_jack_cond is met, state = 0 is automatically chosen. + */ +struct adc_jack_cond { + unsigned int id; + u32 min_adc; + u32 max_adc; +}; + +/** + * struct adc_jack_pdata - platform data for adc jack device. + * @name: name of the extcon device. If null, "adc-jack" is used. + * @consumer_channel: Unique name to identify the channel on the consumer + * side. This typically describes the channels used within + * the consumer. E.g. 'battery_voltage' + * @cable_names: array of extcon id for supported cables. + * @adc_contitions: array of struct adc_jack_cond conditions ending + * with .state = 0 entry. This describes how to decode + * adc values into extcon state. + * @irq_flags: irq flags used for the @irq + * @handling_delay_ms: in some devices, we need to read ADC value some + * milli-seconds after the interrupt occurs. You may + * describe such delays with @handling_delay_ms, which + * is rounded-off by jiffies. + * @wakeup_source: flag to wake up the system for extcon events. + */ +struct adc_jack_pdata { + const char *name; + const char *consumer_channel; + + const unsigned int *cable_names; + + /* The last entry's state should be 0 */ + struct adc_jack_cond *adc_conditions; + + unsigned long irq_flags; + unsigned long handling_delay_ms; /* in ms */ + bool wakeup_source; +}; + +#endif /* _EXTCON_ADC_JACK_H */ diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h new file mode 100644 index 000000000..40fec5f94 --- /dev/null +++ b/include/linux/f2fs_fs.h @@ -0,0 +1,545 @@ +/** + * include/linux/f2fs_fs.h + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _LINUX_F2FS_FS_H +#define _LINUX_F2FS_FS_H + +#include +#include + +#define F2FS_SUPER_OFFSET 1024 /* byte-size offset */ +#define F2FS_MIN_LOG_SECTOR_SIZE 9 /* 9 bits for 512 bytes */ +#define F2FS_MAX_LOG_SECTOR_SIZE 12 /* 12 bits for 4096 bytes */ +#define F2FS_LOG_SECTORS_PER_BLOCK 3 /* log number for sector/blk */ +#define F2FS_BLKSIZE 4096 /* support only 4KB block */ +#define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */ +#define F2FS_MAX_EXTENSION 64 /* # of extension entries */ +#define F2FS_EXTENSION_LEN 8 /* max size of extension */ +#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS) + +#define NULL_ADDR ((block_t)0) /* used as block_t addresses */ +#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */ + +#define F2FS_BYTES_TO_BLK(bytes) ((bytes) >> F2FS_BLKSIZE_BITS) +#define F2FS_BLK_TO_BYTES(blk) ((blk) << F2FS_BLKSIZE_BITS) + +/* 0, 1(node nid), 2(meta nid) are reserved node id */ +#define F2FS_RESERVED_NODE_NUM 3 + +#define F2FS_ROOT_INO(sbi) ((sbi)->root_ino_num) +#define F2FS_NODE_INO(sbi) ((sbi)->node_ino_num) +#define F2FS_META_INO(sbi) ((sbi)->meta_ino_num) + +#define F2FS_MAX_QUOTAS 3 + +#define F2FS_IO_SIZE(sbi) (1 << F2FS_OPTION(sbi).write_io_size_bits) /* Blocks */ +#define F2FS_IO_SIZE_KB(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 2)) /* KB */ +#define F2FS_IO_SIZE_BYTES(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 12)) /* B */ +#define F2FS_IO_SIZE_BITS(sbi) (F2FS_OPTION(sbi).write_io_size_bits) /* power of 2 */ +#define F2FS_IO_SIZE_MASK(sbi) (F2FS_IO_SIZE(sbi) - 1) + +/* This flag is used by node and meta inodes, and by recovery */ +#define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO) + +/* + * For further optimization on multi-head logs, on-disk layout supports maximum + * 16 logs by default. The number, 16, is expected to cover all the cases + * enoughly. The implementaion currently uses no more than 6 logs. + * Half the logs are used for nodes, and the other half are used for data. + */ +#define MAX_ACTIVE_LOGS 16 +#define MAX_ACTIVE_NODE_LOGS 8 +#define MAX_ACTIVE_DATA_LOGS 8 + +#define VERSION_LEN 256 +#define MAX_VOLUME_NAME 512 +#define MAX_PATH_LEN 64 +#define MAX_DEVICES 8 + +/* + * For superblock + */ +struct f2fs_device { + __u8 path[MAX_PATH_LEN]; + __le32 total_segments; +} __packed; + +struct f2fs_super_block { + __le32 magic; /* Magic Number */ + __le16 major_ver; /* Major Version */ + __le16 minor_ver; /* Minor Version */ + __le32 log_sectorsize; /* log2 sector size in bytes */ + __le32 log_sectors_per_block; /* log2 # of sectors per block */ + __le32 log_blocksize; /* log2 block size in bytes */ + __le32 log_blocks_per_seg; /* log2 # of blocks per segment */ + __le32 segs_per_sec; /* # of segments per section */ + __le32 secs_per_zone; /* # of sections per zone */ + __le32 checksum_offset; /* checksum offset inside super block */ + __le64 block_count; /* total # of user blocks */ + __le32 section_count; /* total # of sections */ + __le32 segment_count; /* total # of segments */ + __le32 segment_count_ckpt; /* # of segments for checkpoint */ + __le32 segment_count_sit; /* # of segments for SIT */ + __le32 segment_count_nat; /* # of segments for NAT */ + __le32 segment_count_ssa; /* # of segments for SSA */ + __le32 segment_count_main; /* # of segments for main area */ + __le32 segment0_blkaddr; /* start block address of segment 0 */ + __le32 cp_blkaddr; /* start block address of checkpoint */ + __le32 sit_blkaddr; /* start block address of SIT */ + __le32 nat_blkaddr; /* start block address of NAT */ + __le32 ssa_blkaddr; /* start block address of SSA */ + __le32 main_blkaddr; /* start block address of main area */ + __le32 root_ino; /* root inode number */ + __le32 node_ino; /* node inode number */ + __le32 meta_ino; /* meta inode number */ + __u8 uuid[16]; /* 128-bit uuid for volume */ + __le16 volume_name[MAX_VOLUME_NAME]; /* volume name */ + __le32 extension_count; /* # of extensions below */ + __u8 extension_list[F2FS_MAX_EXTENSION][F2FS_EXTENSION_LEN];/* extension array */ + __le32 cp_payload; + __u8 version[VERSION_LEN]; /* the kernel version */ + __u8 init_version[VERSION_LEN]; /* the initial kernel version */ + __le32 feature; /* defined features */ + __u8 encryption_level; /* versioning level for encryption */ + __u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */ + struct f2fs_device devs[MAX_DEVICES]; /* device list */ + __le32 qf_ino[F2FS_MAX_QUOTAS]; /* quota inode numbers */ + __u8 hot_ext_count; /* # of hot file extension */ + __u8 reserved[314]; /* valid reserved region */ +} __packed; + +/* + * For checkpoint + */ +#define CP_LARGE_NAT_BITMAP_FLAG 0x00000400 +#define CP_NOCRC_RECOVERY_FLAG 0x00000200 +#define CP_TRIMMED_FLAG 0x00000100 +#define CP_NAT_BITS_FLAG 0x00000080 +#define CP_CRC_RECOVERY_FLAG 0x00000040 +#define CP_FASTBOOT_FLAG 0x00000020 +#define CP_FSCK_FLAG 0x00000010 +#define CP_ERROR_FLAG 0x00000008 +#define CP_COMPACT_SUM_FLAG 0x00000004 +#define CP_ORPHAN_PRESENT_FLAG 0x00000002 +#define CP_UMOUNT_FLAG 0x00000001 + +#define F2FS_CP_PACKS 2 /* # of checkpoint packs */ + +struct f2fs_checkpoint { + __le64 checkpoint_ver; /* checkpoint block version number */ + __le64 user_block_count; /* # of user blocks */ + __le64 valid_block_count; /* # of valid blocks in main area */ + __le32 rsvd_segment_count; /* # of reserved segments for gc */ + __le32 overprov_segment_count; /* # of overprovision segments */ + __le32 free_segment_count; /* # of free segments in main area */ + + /* information of current node segments */ + __le32 cur_node_segno[MAX_ACTIVE_NODE_LOGS]; + __le16 cur_node_blkoff[MAX_ACTIVE_NODE_LOGS]; + /* information of current data segments */ + __le32 cur_data_segno[MAX_ACTIVE_DATA_LOGS]; + __le16 cur_data_blkoff[MAX_ACTIVE_DATA_LOGS]; + __le32 ckpt_flags; /* Flags : umount and journal_present */ + __le32 cp_pack_total_block_count; /* total # of one cp pack */ + __le32 cp_pack_start_sum; /* start block number of data summary */ + __le32 valid_node_count; /* Total number of valid nodes */ + __le32 valid_inode_count; /* Total number of valid inodes */ + __le32 next_free_nid; /* Next free node number */ + __le32 sit_ver_bitmap_bytesize; /* Default value 64 */ + __le32 nat_ver_bitmap_bytesize; /* Default value 256 */ + __le32 checksum_offset; /* checksum offset inside cp block */ + __le64 elapsed_time; /* mounted time */ + /* allocation type of current segment */ + unsigned char alloc_type[MAX_ACTIVE_LOGS]; + + /* SIT and NAT version bitmap */ + unsigned char sit_nat_version_bitmap[1]; +} __packed; + +/* + * For orphan inode management + */ +#define F2FS_ORPHANS_PER_BLOCK 1020 + +#define GET_ORPHAN_BLOCKS(n) (((n) + F2FS_ORPHANS_PER_BLOCK - 1) / \ + F2FS_ORPHANS_PER_BLOCK) + +struct f2fs_orphan_block { + __le32 ino[F2FS_ORPHANS_PER_BLOCK]; /* inode numbers */ + __le32 reserved; /* reserved */ + __le16 blk_addr; /* block index in current CP */ + __le16 blk_count; /* Number of orphan inode blocks in CP */ + __le32 entry_count; /* Total number of orphan nodes in current CP */ + __le32 check_sum; /* CRC32 for orphan inode block */ +} __packed; + +/* + * For NODE structure + */ +struct f2fs_extent { + __le32 fofs; /* start file offset of the extent */ + __le32 blk; /* start block address of the extent */ + __le32 len; /* lengh of the extent */ +} __packed; + +#define F2FS_NAME_LEN 255 +/* 200 bytes for inline xattrs by default */ +#define DEFAULT_INLINE_XATTR_ADDRS 50 +#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */ +#define CUR_ADDRS_PER_INODE(inode) (DEF_ADDRS_PER_INODE - \ + get_extra_isize(inode)) +#define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */ +#define ADDRS_PER_INODE(inode) addrs_per_inode(inode) +#define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */ +#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */ + +#define ADDRS_PER_PAGE(page, inode) \ + (IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK) + +#define NODE_DIR1_BLOCK (DEF_ADDRS_PER_INODE + 1) +#define NODE_DIR2_BLOCK (DEF_ADDRS_PER_INODE + 2) +#define NODE_IND1_BLOCK (DEF_ADDRS_PER_INODE + 3) +#define NODE_IND2_BLOCK (DEF_ADDRS_PER_INODE + 4) +#define NODE_DIND_BLOCK (DEF_ADDRS_PER_INODE + 5) + +#define F2FS_INLINE_XATTR 0x01 /* file inline xattr flag */ +#define F2FS_INLINE_DATA 0x02 /* file inline data flag */ +#define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */ +#define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */ +#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */ +#define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */ +#define F2FS_PIN_FILE 0x40 /* file should not be gced */ + +struct f2fs_inode { + __le16 i_mode; /* file mode */ + __u8 i_advise; /* file hints */ + __u8 i_inline; /* file inline flags */ + __le32 i_uid; /* user ID */ + __le32 i_gid; /* group ID */ + __le32 i_links; /* links count */ + __le64 i_size; /* file size in bytes */ + __le64 i_blocks; /* file size in blocks */ + __le64 i_atime; /* access time */ + __le64 i_ctime; /* change time */ + __le64 i_mtime; /* modification time */ + __le32 i_atime_nsec; /* access time in nano scale */ + __le32 i_ctime_nsec; /* change time in nano scale */ + __le32 i_mtime_nsec; /* modification time in nano scale */ + __le32 i_generation; /* file version (for NFS) */ + union { + __le32 i_current_depth; /* only for directory depth */ + __le16 i_gc_failures; /* + * # of gc failures on pinned file. + * only for regular files. + */ + }; + __le32 i_xattr_nid; /* nid to save xattr */ + __le32 i_flags; /* file attributes */ + __le32 i_pino; /* parent inode number */ + __le32 i_namelen; /* file name length */ + __u8 i_name[F2FS_NAME_LEN]; /* file name for SPOR */ + __u8 i_dir_level; /* dentry_level for large dir */ + + struct f2fs_extent i_ext; /* caching a largest extent */ + + union { + struct { + __le16 i_extra_isize; /* extra inode attribute size */ + __le16 i_inline_xattr_size; /* inline xattr size, unit: 4 bytes */ + __le32 i_projid; /* project id */ + __le32 i_inode_checksum;/* inode meta checksum */ + __le64 i_crtime; /* creation time */ + __le32 i_crtime_nsec; /* creation time in nano scale */ + __le32 i_extra_end[0]; /* for attribute size calculation */ + } __packed; + __le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */ + }; + __le32 i_nid[DEF_NIDS_PER_INODE]; /* direct(2), indirect(2), + double_indirect(1) node id */ +} __packed; + +struct direct_node { + __le32 addr[ADDRS_PER_BLOCK]; /* array of data block address */ +} __packed; + +struct indirect_node { + __le32 nid[NIDS_PER_BLOCK]; /* array of data block address */ +} __packed; + +enum { + COLD_BIT_SHIFT = 0, + FSYNC_BIT_SHIFT, + DENT_BIT_SHIFT, + OFFSET_BIT_SHIFT +}; + +#define OFFSET_BIT_MASK (0x07) /* (0x01 << OFFSET_BIT_SHIFT) - 1 */ + +struct node_footer { + __le32 nid; /* node id */ + __le32 ino; /* inode nunmber */ + __le32 flag; /* include cold/fsync/dentry marks and offset */ + __le64 cp_ver; /* checkpoint version */ + __le32 next_blkaddr; /* next node page block address */ +} __packed; + +struct f2fs_node { + /* can be one of three types: inode, direct, and indirect types */ + union { + struct f2fs_inode i; + struct direct_node dn; + struct indirect_node in; + }; + struct node_footer footer; +} __packed; + +/* + * For NAT entries + */ +#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry)) + +struct f2fs_nat_entry { + __u8 version; /* latest version of cached nat entry */ + __le32 ino; /* inode number */ + __le32 block_addr; /* block address */ +} __packed; + +struct f2fs_nat_block { + struct f2fs_nat_entry entries[NAT_ENTRY_PER_BLOCK]; +} __packed; + +/* + * For SIT entries + * + * Each segment is 2MB in size by default so that a bitmap for validity of + * there-in blocks should occupy 64 bytes, 512 bits. + * Not allow to change this. + */ +#define SIT_VBLOCK_MAP_SIZE 64 +#define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry)) + +/* + * F2FS uses 4 bytes to represent block address. As a result, supported size of + * disk is 16 TB and it equals to 16 * 1024 * 1024 / 2 segments. + */ +#define F2FS_MAX_SEGMENT ((16 * 1024 * 1024) / 2) + +/* + * Note that f2fs_sit_entry->vblocks has the following bit-field information. + * [15:10] : allocation type such as CURSEG_XXXX_TYPE + * [9:0] : valid block count + */ +#define SIT_VBLOCKS_SHIFT 10 +#define SIT_VBLOCKS_MASK ((1 << SIT_VBLOCKS_SHIFT) - 1) +#define GET_SIT_VBLOCKS(raw_sit) \ + (le16_to_cpu((raw_sit)->vblocks) & SIT_VBLOCKS_MASK) +#define GET_SIT_TYPE(raw_sit) \ + ((le16_to_cpu((raw_sit)->vblocks) & ~SIT_VBLOCKS_MASK) \ + >> SIT_VBLOCKS_SHIFT) + +struct f2fs_sit_entry { + __le16 vblocks; /* reference above */ + __u8 valid_map[SIT_VBLOCK_MAP_SIZE]; /* bitmap for valid blocks */ + __le64 mtime; /* segment age for cleaning */ +} __packed; + +struct f2fs_sit_block { + struct f2fs_sit_entry entries[SIT_ENTRY_PER_BLOCK]; +} __packed; + +/* + * For segment summary + * + * One summary block contains exactly 512 summary entries, which represents + * exactly 2MB segment by default. Not allow to change the basic units. + * + * NOTE: For initializing fields, you must use set_summary + * + * - If data page, nid represents dnode's nid + * - If node page, nid represents the node page's nid. + * + * The ofs_in_node is used by only data page. It represents offset + * from node's page's beginning to get a data block address. + * ex) data_blkaddr = (block_t)(nodepage_start_address + ofs_in_node) + */ +#define ENTRIES_IN_SUM 512 +#define SUMMARY_SIZE (7) /* sizeof(struct summary) */ +#define SUM_FOOTER_SIZE (5) /* sizeof(struct summary_footer) */ +#define SUM_ENTRY_SIZE (SUMMARY_SIZE * ENTRIES_IN_SUM) + +/* a summary entry for a 4KB-sized block in a segment */ +struct f2fs_summary { + __le32 nid; /* parent node id */ + union { + __u8 reserved[3]; + struct { + __u8 version; /* node version number */ + __le16 ofs_in_node; /* block index in parent node */ + } __packed; + }; +} __packed; + +/* summary block type, node or data, is stored to the summary_footer */ +#define SUM_TYPE_NODE (1) +#define SUM_TYPE_DATA (0) + +struct summary_footer { + unsigned char entry_type; /* SUM_TYPE_XXX */ + __le32 check_sum; /* summary checksum */ +} __packed; + +#define SUM_JOURNAL_SIZE (F2FS_BLKSIZE - SUM_FOOTER_SIZE -\ + SUM_ENTRY_SIZE) +#define NAT_JOURNAL_ENTRIES ((SUM_JOURNAL_SIZE - 2) /\ + sizeof(struct nat_journal_entry)) +#define NAT_JOURNAL_RESERVED ((SUM_JOURNAL_SIZE - 2) %\ + sizeof(struct nat_journal_entry)) +#define SIT_JOURNAL_ENTRIES ((SUM_JOURNAL_SIZE - 2) /\ + sizeof(struct sit_journal_entry)) +#define SIT_JOURNAL_RESERVED ((SUM_JOURNAL_SIZE - 2) %\ + sizeof(struct sit_journal_entry)) + +/* Reserved area should make size of f2fs_extra_info equals to + * that of nat_journal and sit_journal. + */ +#define EXTRA_INFO_RESERVED (SUM_JOURNAL_SIZE - 2 - 8) + +/* + * frequently updated NAT/SIT entries can be stored in the spare area in + * summary blocks + */ +enum { + NAT_JOURNAL = 0, + SIT_JOURNAL +}; + +struct nat_journal_entry { + __le32 nid; + struct f2fs_nat_entry ne; +} __packed; + +struct nat_journal { + struct nat_journal_entry entries[NAT_JOURNAL_ENTRIES]; + __u8 reserved[NAT_JOURNAL_RESERVED]; +} __packed; + +struct sit_journal_entry { + __le32 segno; + struct f2fs_sit_entry se; +} __packed; + +struct sit_journal { + struct sit_journal_entry entries[SIT_JOURNAL_ENTRIES]; + __u8 reserved[SIT_JOURNAL_RESERVED]; +} __packed; + +struct f2fs_extra_info { + __le64 kbytes_written; + __u8 reserved[EXTRA_INFO_RESERVED]; +} __packed; + +struct f2fs_journal { + union { + __le16 n_nats; + __le16 n_sits; + }; + /* spare area is used by NAT or SIT journals or extra info */ + union { + struct nat_journal nat_j; + struct sit_journal sit_j; + struct f2fs_extra_info info; + }; +} __packed; + +/* 4KB-sized summary block structure */ +struct f2fs_summary_block { + struct f2fs_summary entries[ENTRIES_IN_SUM]; + struct f2fs_journal journal; + struct summary_footer footer; +} __packed; + +/* + * For directory operations + */ +#define F2FS_DOT_HASH 0 +#define F2FS_DDOT_HASH F2FS_DOT_HASH +#define F2FS_MAX_HASH (~((0x3ULL) << 62)) +#define F2FS_HASH_COL_BIT ((0x1ULL) << 63) + +typedef __le32 f2fs_hash_t; + +/* One directory entry slot covers 8bytes-long file name */ +#define F2FS_SLOT_LEN 8 +#define F2FS_SLOT_LEN_BITS 3 + +#define GET_DENTRY_SLOTS(x) (((x) + F2FS_SLOT_LEN - 1) >> F2FS_SLOT_LEN_BITS) + +/* MAX level for dir lookup */ +#define MAX_DIR_HASH_DEPTH 63 + +/* MAX buckets in one level of dir */ +#define MAX_DIR_BUCKETS (1 << ((MAX_DIR_HASH_DEPTH / 2) - 1)) + +/* + * space utilization of regular dentry and inline dentry (w/o extra reservation) + * regular dentry inline dentry (def) inline dentry (min) + * bitmap 1 * 27 = 27 1 * 23 = 23 1 * 1 = 1 + * reserved 1 * 3 = 3 1 * 7 = 7 1 * 1 = 1 + * dentry 11 * 214 = 2354 11 * 182 = 2002 11 * 2 = 22 + * filename 8 * 214 = 1712 8 * 182 = 1456 8 * 2 = 16 + * total 4096 3488 40 + * + * Note: there are more reserved space in inline dentry than in regular + * dentry, when converting inline dentry we should handle this carefully. + */ +#define NR_DENTRY_IN_BLOCK 214 /* the number of dentry in a block */ +#define SIZE_OF_DIR_ENTRY 11 /* by byte */ +#define SIZE_OF_DENTRY_BITMAP ((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \ + BITS_PER_BYTE) +#define SIZE_OF_RESERVED (PAGE_SIZE - ((SIZE_OF_DIR_ENTRY + \ + F2FS_SLOT_LEN) * \ + NR_DENTRY_IN_BLOCK + SIZE_OF_DENTRY_BITMAP)) +#define MIN_INLINE_DENTRY_SIZE 40 /* just include '.' and '..' entries */ + +/* One directory entry slot representing F2FS_SLOT_LEN-sized file name */ +struct f2fs_dir_entry { + __le32 hash_code; /* hash code of file name */ + __le32 ino; /* inode number */ + __le16 name_len; /* lengh of file name */ + __u8 file_type; /* file type */ +} __packed; + +/* 4KB-sized directory entry block */ +struct f2fs_dentry_block { + /* validity bitmap for directory entries in each block */ + __u8 dentry_bitmap[SIZE_OF_DENTRY_BITMAP]; + __u8 reserved[SIZE_OF_RESERVED]; + struct f2fs_dir_entry dentry[NR_DENTRY_IN_BLOCK]; + __u8 filename[NR_DENTRY_IN_BLOCK][F2FS_SLOT_LEN]; +} __packed; + +/* file types used in inode_info->flags */ +enum { + F2FS_FT_UNKNOWN, + F2FS_FT_REG_FILE, + F2FS_FT_DIR, + F2FS_FT_CHRDEV, + F2FS_FT_BLKDEV, + F2FS_FT_FIFO, + F2FS_FT_SOCK, + F2FS_FT_SYMLINK, + F2FS_FT_MAX +}; + +#define S_SHIFT 12 + +#define F2FS_DEF_PROJID 0 /* default project ID */ + +#endif /* _LINUX_F2FS_FS_H */ diff --git a/include/linux/f75375s.h b/include/linux/f75375s.h new file mode 100644 index 000000000..e99e22500 --- /dev/null +++ b/include/linux/f75375s.h @@ -0,0 +1,21 @@ +/* + * f75375s.h - platform data structure for f75375s sensor + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2007, Riku Voipio + */ + +#ifndef __LINUX_F75375S_H +#define __LINUX_F75375S_H + +/* We want to set fans spinning on systems where there is no + * BIOS to do that for us */ +struct f75375s_platform_data { + u8 pwm[2]; + u8 pwm_enable[2]; +}; + +#endif /* __LINUX_F75375S_H */ diff --git a/include/linux/falloc.h b/include/linux/falloc.h new file mode 100644 index 000000000..674d59f4d --- /dev/null +++ b/include/linux/falloc.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FALLOC_H_ +#define _FALLOC_H_ + +#include + + +/* + * Space reservation ioctls and argument structure + * are designed to be compatible with the legacy XFS ioctls. + */ +struct space_resv { + __s16 l_type; + __s16 l_whence; + __s64 l_start; + __s64 l_len; /* len == 0 means until end of file */ + __s32 l_sysid; + __u32 l_pid; + __s32 l_pad[4]; /* reserved area */ +}; + +#define FS_IOC_RESVSP _IOW('X', 40, struct space_resv) +#define FS_IOC_RESVSP64 _IOW('X', 42, struct space_resv) + +#define FALLOC_FL_SUPPORTED_MASK (FALLOC_FL_KEEP_SIZE | \ + FALLOC_FL_PUNCH_HOLE | \ + FALLOC_FL_COLLAPSE_RANGE | \ + FALLOC_FL_ZERO_RANGE | \ + FALLOC_FL_INSERT_RANGE | \ + FALLOC_FL_UNSHARE_RANGE) + +#endif /* _FALLOC_H_ */ diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h new file mode 100644 index 000000000..096c96f4f --- /dev/null +++ b/include/linux/fanotify.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_FANOTIFY_H +#define _LINUX_FANOTIFY_H + +#include + +/* not valid from userspace, only kernel internal */ +#define FAN_MARK_ONDIR 0x00000100 +#endif /* _LINUX_FANOTIFY_H */ diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h new file mode 100644 index 000000000..7e6c77740 --- /dev/null +++ b/include/linux/fault-inject.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_FAULT_INJECT_H +#define _LINUX_FAULT_INJECT_H + +#ifdef CONFIG_FAULT_INJECTION + +#include +#include +#include +#include + +/* + * For explanation of the elements of this struct, see + * Documentation/fault-injection/fault-injection.txt + */ +struct fault_attr { + unsigned long probability; + unsigned long interval; + atomic_t times; + atomic_t space; + unsigned long verbose; + bool task_filter; + unsigned long stacktrace_depth; + unsigned long require_start; + unsigned long require_end; + unsigned long reject_start; + unsigned long reject_end; + + unsigned long count; + struct ratelimit_state ratelimit_state; + struct dentry *dname; +}; + +#define FAULT_ATTR_INITIALIZER { \ + .interval = 1, \ + .times = ATOMIC_INIT(1), \ + .require_end = ULONG_MAX, \ + .stacktrace_depth = 32, \ + .ratelimit_state = RATELIMIT_STATE_INIT_DISABLED, \ + .verbose = 2, \ + .dname = NULL, \ + } + +#define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER +int setup_fault_attr(struct fault_attr *attr, char *str); +bool should_fail(struct fault_attr *attr, ssize_t size); + +#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS + +struct dentry *fault_create_debugfs_attr(const char *name, + struct dentry *parent, struct fault_attr *attr); + +#else /* CONFIG_FAULT_INJECTION_DEBUG_FS */ + +static inline struct dentry *fault_create_debugfs_attr(const char *name, + struct dentry *parent, struct fault_attr *attr) +{ + return ERR_PTR(-ENODEV); +} + +#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ + +#endif /* CONFIG_FAULT_INJECTION */ + +struct kmem_cache; + +int should_failslab(struct kmem_cache *s, gfp_t gfpflags); +#ifdef CONFIG_FAILSLAB +extern bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags); +#else +static inline bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags) +{ + return false; +} +#endif /* CONFIG_FAILSLAB */ + +#endif /* _LINUX_FAULT_INJECT_H */ diff --git a/include/linux/fb.h b/include/linux/fb.h new file mode 100644 index 000000000..7bfed8460 --- /dev/null +++ b/include/linux/fb.h @@ -0,0 +1,838 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_FB_H +#define _LINUX_FB_H + +#include +#include + +#define FBIO_CURSOR _IOWR('F', 0x08, struct fb_cursor_user) + +#include +#include +#include +#include +#include +#include +#include +#include + +struct vm_area_struct; +struct fb_info; +struct device; +struct file; +struct videomode; +struct device_node; + +/* Definitions below are used in the parsed monitor specs */ +#define FB_DPMS_ACTIVE_OFF 1 +#define FB_DPMS_SUSPEND 2 +#define FB_DPMS_STANDBY 4 + +#define FB_DISP_DDI 1 +#define FB_DISP_ANA_700_300 2 +#define FB_DISP_ANA_714_286 4 +#define FB_DISP_ANA_1000_400 8 +#define FB_DISP_ANA_700_000 16 + +#define FB_DISP_MONO 32 +#define FB_DISP_RGB 64 +#define FB_DISP_MULTI 128 +#define FB_DISP_UNKNOWN 256 + +#define FB_SIGNAL_NONE 0 +#define FB_SIGNAL_BLANK_BLANK 1 +#define FB_SIGNAL_SEPARATE 2 +#define FB_SIGNAL_COMPOSITE 4 +#define FB_SIGNAL_SYNC_ON_GREEN 8 +#define FB_SIGNAL_SERRATION_ON 16 + +#define FB_MISC_PRIM_COLOR 1 +#define FB_MISC_1ST_DETAIL 2 /* First Detailed Timing is preferred */ +#define FB_MISC_HDMI 4 +struct fb_chroma { + __u32 redx; /* in fraction of 1024 */ + __u32 greenx; + __u32 bluex; + __u32 whitex; + __u32 redy; + __u32 greeny; + __u32 bluey; + __u32 whitey; +}; + +struct fb_monspecs { + struct fb_chroma chroma; + struct fb_videomode *modedb; /* mode database */ + __u8 manufacturer[4]; /* Manufacturer */ + __u8 monitor[14]; /* Monitor String */ + __u8 serial_no[14]; /* Serial Number */ + __u8 ascii[14]; /* ? */ + __u32 modedb_len; /* mode database length */ + __u32 model; /* Monitor Model */ + __u32 serial; /* Serial Number - Integer */ + __u32 year; /* Year manufactured */ + __u32 week; /* Week Manufactured */ + __u32 hfmin; /* hfreq lower limit (Hz) */ + __u32 hfmax; /* hfreq upper limit (Hz) */ + __u32 dclkmin; /* pixelclock lower limit (Hz) */ + __u32 dclkmax; /* pixelclock upper limit (Hz) */ + __u16 input; /* display type - see FB_DISP_* */ + __u16 dpms; /* DPMS support - see FB_DPMS_ */ + __u16 signal; /* Signal Type - see FB_SIGNAL_* */ + __u16 vfmin; /* vfreq lower limit (Hz) */ + __u16 vfmax; /* vfreq upper limit (Hz) */ + __u16 gamma; /* Gamma - in fractions of 100 */ + __u16 gtf : 1; /* supports GTF */ + __u16 misc; /* Misc flags - see FB_MISC_* */ + __u8 version; /* EDID version... */ + __u8 revision; /* ...and revision */ + __u8 max_x; /* Maximum horizontal size (cm) */ + __u8 max_y; /* Maximum vertical size (cm) */ +}; + +struct fb_cmap_user { + __u32 start; /* First entry */ + __u32 len; /* Number of entries */ + __u16 __user *red; /* Red values */ + __u16 __user *green; + __u16 __user *blue; + __u16 __user *transp; /* transparency, can be NULL */ +}; + +struct fb_image_user { + __u32 dx; /* Where to place image */ + __u32 dy; + __u32 width; /* Size of image */ + __u32 height; + __u32 fg_color; /* Only used when a mono bitmap */ + __u32 bg_color; + __u8 depth; /* Depth of the image */ + const char __user *data; /* Pointer to image data */ + struct fb_cmap_user cmap; /* color map info */ +}; + +struct fb_cursor_user { + __u16 set; /* what to set */ + __u16 enable; /* cursor on/off */ + __u16 rop; /* bitop operation */ + const char __user *mask; /* cursor mask bits */ + struct fbcurpos hot; /* cursor hot spot */ + struct fb_image_user image; /* Cursor image */ +}; + +/* + * Register/unregister for framebuffer events + */ + +/* The resolution of the passed in fb_info about to change */ +#define FB_EVENT_MODE_CHANGE 0x01 +/* The display on this fb_info is being suspended, no access to the + * framebuffer is allowed any more after that call returns + */ +#define FB_EVENT_SUSPEND 0x02 +/* The display on this fb_info was resumed, you can restore the display + * if you own it + */ +#define FB_EVENT_RESUME 0x03 +/* An entry from the modelist was removed */ +#define FB_EVENT_MODE_DELETE 0x04 +/* A driver registered itself */ +#define FB_EVENT_FB_REGISTERED 0x05 +/* A driver unregistered itself */ +#define FB_EVENT_FB_UNREGISTERED 0x06 +/* CONSOLE-SPECIFIC: get console to framebuffer mapping */ +#define FB_EVENT_GET_CONSOLE_MAP 0x07 +/* CONSOLE-SPECIFIC: set console to framebuffer mapping */ +#define FB_EVENT_SET_CONSOLE_MAP 0x08 +/* A hardware display blank change occurred */ +#define FB_EVENT_BLANK 0x09 +/* Private modelist is to be replaced */ +#define FB_EVENT_NEW_MODELIST 0x0A +/* The resolution of the passed in fb_info about to change and + all vc's should be changed */ +#define FB_EVENT_MODE_CHANGE_ALL 0x0B +/* A software display blank change occurred */ +#define FB_EVENT_CONBLANK 0x0C +/* Get drawing requirements */ +#define FB_EVENT_GET_REQ 0x0D +/* Unbind from the console if possible */ +#define FB_EVENT_FB_UNBIND 0x0E +/* CONSOLE-SPECIFIC: remap all consoles to new fb - for vga_switcheroo */ +#define FB_EVENT_REMAP_ALL_CONSOLE 0x0F +/* A hardware display blank early change occurred */ +#define FB_EARLY_EVENT_BLANK 0x10 +/* A hardware display blank revert early change occurred */ +#define FB_R_EARLY_EVENT_BLANK 0x11 + +struct fb_event { + struct fb_info *info; + void *data; +}; + +struct fb_blit_caps { + u32 x; + u32 y; + u32 len; + u32 flags; +}; + +#ifdef CONFIG_FB_NOTIFY +extern int fb_register_client(struct notifier_block *nb); +extern int fb_unregister_client(struct notifier_block *nb); +extern int fb_notifier_call_chain(unsigned long val, void *v); +#else +static inline int fb_register_client(struct notifier_block *nb) +{ + return 0; +}; + +static inline int fb_unregister_client(struct notifier_block *nb) +{ + return 0; +}; + +static inline int fb_notifier_call_chain(unsigned long val, void *v) +{ + return 0; +}; +#endif + +/* + * Pixmap structure definition + * + * The purpose of this structure is to translate data + * from the hardware independent format of fbdev to what + * format the hardware needs. + */ + +#define FB_PIXMAP_DEFAULT 1 /* used internally by fbcon */ +#define FB_PIXMAP_SYSTEM 2 /* memory is in system RAM */ +#define FB_PIXMAP_IO 4 /* memory is iomapped */ +#define FB_PIXMAP_SYNC 256 /* set if GPU can DMA */ + +struct fb_pixmap { + u8 *addr; /* pointer to memory */ + u32 size; /* size of buffer in bytes */ + u32 offset; /* current offset to buffer */ + u32 buf_align; /* byte alignment of each bitmap */ + u32 scan_align; /* alignment per scanline */ + u32 access_align; /* alignment per read/write (bits) */ + u32 flags; /* see FB_PIXMAP_* */ + u32 blit_x; /* supported bit block dimensions (1-32)*/ + u32 blit_y; /* Format: blit_x = 1 << (width - 1) */ + /* blit_y = 1 << (height - 1) */ + /* if 0, will be set to 0xffffffff (all)*/ + /* access methods */ + void (*writeio)(struct fb_info *info, void __iomem *dst, void *src, unsigned int size); + void (*readio) (struct fb_info *info, void *dst, void __iomem *src, unsigned int size); +}; + +#ifdef CONFIG_FB_DEFERRED_IO +struct fb_deferred_io { + /* delay between mkwrite and deferred handler */ + unsigned long delay; + struct mutex lock; /* mutex that protects the page list */ + struct list_head pagelist; /* list of touched pages */ + /* callback */ + void (*first_io)(struct fb_info *info); + void (*deferred_io)(struct fb_info *info, struct list_head *pagelist); +}; +#endif + +/* + * Frame buffer operations + * + * LOCKING NOTE: those functions must _ALL_ be called with the console + * semaphore held, this is the only suitable locking mechanism we have + * in 2.6. Some may be called at interrupt time at this point though. + * + * The exception to this is the debug related hooks. Putting the fb + * into a debug state (e.g. flipping to the kernel console) and restoring + * it must be done in a lock-free manner, so low level drivers should + * keep track of the initial console (if applicable) and may need to + * perform direct, unlocked hardware writes in these hooks. + */ + +struct fb_ops { + /* open/release and usage marking */ + struct module *owner; + int (*fb_open)(struct fb_info *info, int user); + int (*fb_release)(struct fb_info *info, int user); + + /* For framebuffers with strange non linear layouts or that do not + * work with normal memory mapped access + */ + ssize_t (*fb_read)(struct fb_info *info, char __user *buf, + size_t count, loff_t *ppos); + ssize_t (*fb_write)(struct fb_info *info, const char __user *buf, + size_t count, loff_t *ppos); + + /* checks var and eventually tweaks it to something supported, + * DO NOT MODIFY PAR */ + int (*fb_check_var)(struct fb_var_screeninfo *var, struct fb_info *info); + + /* set the video mode according to info->var */ + int (*fb_set_par)(struct fb_info *info); + + /* set color register */ + int (*fb_setcolreg)(unsigned regno, unsigned red, unsigned green, + unsigned blue, unsigned transp, struct fb_info *info); + + /* set color registers in batch */ + int (*fb_setcmap)(struct fb_cmap *cmap, struct fb_info *info); + + /* blank display */ + int (*fb_blank)(int blank, struct fb_info *info); + + /* pan display */ + int (*fb_pan_display)(struct fb_var_screeninfo *var, struct fb_info *info); + + /* Draws a rectangle */ + void (*fb_fillrect) (struct fb_info *info, const struct fb_fillrect *rect); + /* Copy data from area to another */ + void (*fb_copyarea) (struct fb_info *info, const struct fb_copyarea *region); + /* Draws a image to the display */ + void (*fb_imageblit) (struct fb_info *info, const struct fb_image *image); + + /* Draws cursor */ + int (*fb_cursor) (struct fb_info *info, struct fb_cursor *cursor); + + /* wait for blit idle, optional */ + int (*fb_sync)(struct fb_info *info); + + /* perform fb specific ioctl (optional) */ + int (*fb_ioctl)(struct fb_info *info, unsigned int cmd, + unsigned long arg); + + /* Handle 32bit compat ioctl (optional) */ + int (*fb_compat_ioctl)(struct fb_info *info, unsigned cmd, + unsigned long arg); + + /* perform fb specific mmap */ + int (*fb_mmap)(struct fb_info *info, struct vm_area_struct *vma); + + /* get capability given var */ + void (*fb_get_caps)(struct fb_info *info, struct fb_blit_caps *caps, + struct fb_var_screeninfo *var); + + /* teardown any resources to do with this framebuffer */ + void (*fb_destroy)(struct fb_info *info); + + /* called at KDB enter and leave time to prepare the console */ + int (*fb_debug_enter)(struct fb_info *info); + int (*fb_debug_leave)(struct fb_info *info); +}; + +#ifdef CONFIG_FB_TILEBLITTING +#define FB_TILE_CURSOR_NONE 0 +#define FB_TILE_CURSOR_UNDERLINE 1 +#define FB_TILE_CURSOR_LOWER_THIRD 2 +#define FB_TILE_CURSOR_LOWER_HALF 3 +#define FB_TILE_CURSOR_TWO_THIRDS 4 +#define FB_TILE_CURSOR_BLOCK 5 + +struct fb_tilemap { + __u32 width; /* width of each tile in pixels */ + __u32 height; /* height of each tile in scanlines */ + __u32 depth; /* color depth of each tile */ + __u32 length; /* number of tiles in the map */ + const __u8 *data; /* actual tile map: a bitmap array, packed + to the nearest byte */ +}; + +struct fb_tilerect { + __u32 sx; /* origin in the x-axis */ + __u32 sy; /* origin in the y-axis */ + __u32 width; /* number of tiles in the x-axis */ + __u32 height; /* number of tiles in the y-axis */ + __u32 index; /* what tile to use: index to tile map */ + __u32 fg; /* foreground color */ + __u32 bg; /* background color */ + __u32 rop; /* raster operation */ +}; + +struct fb_tilearea { + __u32 sx; /* source origin in the x-axis */ + __u32 sy; /* source origin in the y-axis */ + __u32 dx; /* destination origin in the x-axis */ + __u32 dy; /* destination origin in the y-axis */ + __u32 width; /* number of tiles in the x-axis */ + __u32 height; /* number of tiles in the y-axis */ +}; + +struct fb_tileblit { + __u32 sx; /* origin in the x-axis */ + __u32 sy; /* origin in the y-axis */ + __u32 width; /* number of tiles in the x-axis */ + __u32 height; /* number of tiles in the y-axis */ + __u32 fg; /* foreground color */ + __u32 bg; /* background color */ + __u32 length; /* number of tiles to draw */ + __u32 *indices; /* array of indices to tile map */ +}; + +struct fb_tilecursor { + __u32 sx; /* cursor position in the x-axis */ + __u32 sy; /* cursor position in the y-axis */ + __u32 mode; /* 0 = erase, 1 = draw */ + __u32 shape; /* see FB_TILE_CURSOR_* */ + __u32 fg; /* foreground color */ + __u32 bg; /* background color */ +}; + +struct fb_tile_ops { + /* set tile characteristics */ + void (*fb_settile)(struct fb_info *info, struct fb_tilemap *map); + + /* all dimensions from hereon are in terms of tiles */ + + /* move a rectangular region of tiles from one area to another*/ + void (*fb_tilecopy)(struct fb_info *info, struct fb_tilearea *area); + /* fill a rectangular region with a tile */ + void (*fb_tilefill)(struct fb_info *info, struct fb_tilerect *rect); + /* copy an array of tiles */ + void (*fb_tileblit)(struct fb_info *info, struct fb_tileblit *blit); + /* cursor */ + void (*fb_tilecursor)(struct fb_info *info, + struct fb_tilecursor *cursor); + /* get maximum length of the tile map */ + int (*fb_get_tilemax)(struct fb_info *info); +}; +#endif /* CONFIG_FB_TILEBLITTING */ + +/* FBINFO_* = fb_info.flags bit flags */ +#define FBINFO_DEFAULT 0 +#define FBINFO_HWACCEL_DISABLED 0x0002 + /* When FBINFO_HWACCEL_DISABLED is set: + * Hardware acceleration is turned off. Software implementations + * of required functions (copyarea(), fillrect(), and imageblit()) + * takes over; acceleration engine should be in a quiescent state */ + +/* hints */ +#define FBINFO_VIRTFB 0x0004 /* FB is System RAM, not device. */ +#define FBINFO_PARTIAL_PAN_OK 0x0040 /* otw use pan only for double-buffering */ +#define FBINFO_READS_FAST 0x0080 /* soft-copy faster than rendering */ + +/* hardware supported ops */ +/* semantics: when a bit is set, it indicates that the operation is + * accelerated by hardware. + * required functions will still work even if the bit is not set. + * optional functions may not even exist if the flag bit is not set. + */ +#define FBINFO_HWACCEL_NONE 0x0000 +#define FBINFO_HWACCEL_COPYAREA 0x0100 /* required */ +#define FBINFO_HWACCEL_FILLRECT 0x0200 /* required */ +#define FBINFO_HWACCEL_IMAGEBLIT 0x0400 /* required */ +#define FBINFO_HWACCEL_ROTATE 0x0800 /* optional */ +#define FBINFO_HWACCEL_XPAN 0x1000 /* optional */ +#define FBINFO_HWACCEL_YPAN 0x2000 /* optional */ +#define FBINFO_HWACCEL_YWRAP 0x4000 /* optional */ + +#define FBINFO_MISC_USEREVENT 0x10000 /* event request + from userspace */ +#define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */ + +/* A driver may set this flag to indicate that it does want a set_par to be + * called every time when fbcon_switch is executed. The advantage is that with + * this flag set you can really be sure that set_par is always called before + * any of the functions dependent on the correct hardware state or altering + * that state, even if you are using some broken X releases. The disadvantage + * is that it introduces unwanted delays to every console switch if set_par + * is slow. It is a good idea to try this flag in the drivers initialization + * code whenever there is a bug report related to switching between X and the + * framebuffer console. + */ +#define FBINFO_MISC_ALWAYS_SETPAR 0x40000 + +/* where the fb is a firmware driver, and can be replaced with a proper one */ +#define FBINFO_MISC_FIRMWARE 0x80000 +/* + * Host and GPU endianness differ. + */ +#define FBINFO_FOREIGN_ENDIAN 0x100000 +/* + * Big endian math. This is the same flags as above, but with different + * meaning, it is set by the fb subsystem depending FOREIGN_ENDIAN flag + * and host endianness. Drivers should not use this flag. + */ +#define FBINFO_BE_MATH 0x100000 + +/* report to the VT layer that this fb driver can accept forced console + output like oopses */ +#define FBINFO_CAN_FORCE_OUTPUT 0x200000 + +struct fb_info { + atomic_t count; + int node; + int flags; + /* + * -1 by default, set to a FB_ROTATE_* value by the driver, if it knows + * a lcd is not mounted upright and fbcon should rotate to compensate. + */ + int fbcon_rotate_hint; + struct mutex lock; /* Lock for open/release/ioctl funcs */ + struct mutex mm_lock; /* Lock for fb_mmap and smem_* fields */ + struct fb_var_screeninfo var; /* Current var */ + struct fb_fix_screeninfo fix; /* Current fix */ + struct fb_monspecs monspecs; /* Current Monitor specs */ + struct work_struct queue; /* Framebuffer event queue */ + struct fb_pixmap pixmap; /* Image hardware mapper */ + struct fb_pixmap sprite; /* Cursor hardware mapper */ + struct fb_cmap cmap; /* Current cmap */ + struct list_head modelist; /* mode list */ + struct fb_videomode *mode; /* current mode */ + +#ifdef CONFIG_FB_BACKLIGHT + /* assigned backlight device */ + /* set before framebuffer registration, + remove after unregister */ + struct backlight_device *bl_dev; + + /* Backlight level curve */ + struct mutex bl_curve_mutex; + u8 bl_curve[FB_BACKLIGHT_LEVELS]; +#endif +#ifdef CONFIG_FB_DEFERRED_IO + struct delayed_work deferred_work; + struct fb_deferred_io *fbdefio; +#endif + + struct fb_ops *fbops; + struct device *device; /* This is the parent */ + struct device *dev; /* This is this fb device */ + int class_flag; /* private sysfs flags */ +#ifdef CONFIG_FB_TILEBLITTING + struct fb_tile_ops *tileops; /* Tile Blitting */ +#endif + union { + char __iomem *screen_base; /* Virtual address */ + char *screen_buffer; + }; + unsigned long screen_size; /* Amount of ioremapped VRAM or 0 */ + void *pseudo_palette; /* Fake palette of 16 colors */ +#define FBINFO_STATE_RUNNING 0 +#define FBINFO_STATE_SUSPENDED 1 + u32 state; /* Hardware state i.e suspend */ + void *fbcon_par; /* fbcon use-only private area */ + /* From here on everything is device dependent */ + void *par; + /* we need the PCI or similar aperture base/size not + smem_start/size as smem_start may just be an object + allocated inside the aperture so may not actually overlap */ + struct apertures_struct { + unsigned int count; + struct aperture { + resource_size_t base; + resource_size_t size; + } ranges[0]; + } *apertures; + + bool skip_vt_switch; /* no VT switch on suspend/resume required */ +}; + +static inline struct apertures_struct *alloc_apertures(unsigned int max_num) { + struct apertures_struct *a = kzalloc(sizeof(struct apertures_struct) + + max_num * sizeof(struct aperture), GFP_KERNEL); + if (!a) + return NULL; + a->count = max_num; + return a; +} + +#define FBINFO_FLAG_DEFAULT FBINFO_DEFAULT + +/* This will go away + * fbset currently hacks in FB_ACCELF_TEXT into var.accel_flags + * when it wants to turn the acceleration engine on. This is + * really a separate operation, and should be modified via sysfs. + * But for now, we leave it broken with the following define + */ +#define STUPID_ACCELF_TEXT_SHIT + +// This will go away +#if defined(__sparc__) + +/* We map all of our framebuffers such that big-endian accesses + * are what we want, so the following is sufficient. + */ + +// This will go away +#define fb_readb sbus_readb +#define fb_readw sbus_readw +#define fb_readl sbus_readl +#define fb_readq sbus_readq +#define fb_writeb sbus_writeb +#define fb_writew sbus_writew +#define fb_writel sbus_writel +#define fb_writeq sbus_writeq +#define fb_memset sbus_memset_io +#define fb_memcpy_fromfb sbus_memcpy_fromio +#define fb_memcpy_tofb sbus_memcpy_toio + +#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || \ + defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || \ + defined(__arm__) || defined(__aarch64__) + +#define fb_readb __raw_readb +#define fb_readw __raw_readw +#define fb_readl __raw_readl +#define fb_readq __raw_readq +#define fb_writeb __raw_writeb +#define fb_writew __raw_writew +#define fb_writel __raw_writel +#define fb_writeq __raw_writeq +#define fb_memset memset_io +#define fb_memcpy_fromfb memcpy_fromio +#define fb_memcpy_tofb memcpy_toio + +#else + +#define fb_readb(addr) (*(volatile u8 *) (addr)) +#define fb_readw(addr) (*(volatile u16 *) (addr)) +#define fb_readl(addr) (*(volatile u32 *) (addr)) +#define fb_readq(addr) (*(volatile u64 *) (addr)) +#define fb_writeb(b,addr) (*(volatile u8 *) (addr) = (b)) +#define fb_writew(b,addr) (*(volatile u16 *) (addr) = (b)) +#define fb_writel(b,addr) (*(volatile u32 *) (addr) = (b)) +#define fb_writeq(b,addr) (*(volatile u64 *) (addr) = (b)) +#define fb_memset memset +#define fb_memcpy_fromfb memcpy +#define fb_memcpy_tofb memcpy + +#endif + +#define FB_LEFT_POS(p, bpp) (fb_be_math(p) ? (32 - (bpp)) : 0) +#define FB_SHIFT_HIGH(p, val, bits) (fb_be_math(p) ? (val) >> (bits) : \ + (val) << (bits)) +#define FB_SHIFT_LOW(p, val, bits) (fb_be_math(p) ? (val) << (bits) : \ + (val) >> (bits)) + + /* + * `Generic' versions of the frame buffer device operations + */ + +extern int fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var); +extern int fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var); +extern int fb_blank(struct fb_info *info, int blank); +extern void cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect); +extern void cfb_copyarea(struct fb_info *info, const struct fb_copyarea *area); +extern void cfb_imageblit(struct fb_info *info, const struct fb_image *image); +/* + * Drawing operations where framebuffer is in system RAM + */ +extern void sys_fillrect(struct fb_info *info, const struct fb_fillrect *rect); +extern void sys_copyarea(struct fb_info *info, const struct fb_copyarea *area); +extern void sys_imageblit(struct fb_info *info, const struct fb_image *image); +extern ssize_t fb_sys_read(struct fb_info *info, char __user *buf, + size_t count, loff_t *ppos); +extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf, + size_t count, loff_t *ppos); + +/* drivers/video/fbmem.c */ +extern int register_framebuffer(struct fb_info *fb_info); +extern int unregister_framebuffer(struct fb_info *fb_info); +extern int unlink_framebuffer(struct fb_info *fb_info); +extern int remove_conflicting_framebuffers(struct apertures_struct *a, + const char *name, bool primary); +extern int fb_prepare_logo(struct fb_info *fb_info, int rotate); +extern int fb_show_logo(struct fb_info *fb_info, int rotate); +extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size); +extern void fb_pad_unaligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 idx, + u32 height, u32 shift_high, u32 shift_low, u32 mod); +extern void fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 s_pitch, u32 height); +extern void fb_set_suspend(struct fb_info *info, int state); +extern int fb_get_color_depth(struct fb_var_screeninfo *var, + struct fb_fix_screeninfo *fix); +extern int fb_get_options(const char *name, char **option); +extern int fb_new_modelist(struct fb_info *info); + +extern struct fb_info *registered_fb[FB_MAX]; +extern int num_registered_fb; +extern struct class *fb_class; + +#define for_each_registered_fb(i) \ + for (i = 0; i < FB_MAX; i++) \ + if (!registered_fb[i]) {} else + +extern int lock_fb_info(struct fb_info *info); + +static inline void unlock_fb_info(struct fb_info *info) +{ + mutex_unlock(&info->lock); +} + +static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, + u8 *src, u32 s_pitch, u32 height) +{ + u32 i, j; + + d_pitch -= s_pitch; + + for (i = height; i--; ) { + /* s_pitch is a few bytes at the most, memcpy is suboptimal */ + for (j = 0; j < s_pitch; j++) + *dst++ = *src++; + dst += d_pitch; + } +} + +/* drivers/video/fb_defio.c */ +int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma); +extern void fb_deferred_io_init(struct fb_info *info); +extern void fb_deferred_io_open(struct fb_info *info, + struct inode *inode, + struct file *file); +extern void fb_deferred_io_cleanup(struct fb_info *info); +extern int fb_deferred_io_fsync(struct file *file, loff_t start, + loff_t end, int datasync); + +static inline bool fb_be_math(struct fb_info *info) +{ +#ifdef CONFIG_FB_FOREIGN_ENDIAN +#if defined(CONFIG_FB_BOTH_ENDIAN) + return info->flags & FBINFO_BE_MATH; +#elif defined(CONFIG_FB_BIG_ENDIAN) + return true; +#elif defined(CONFIG_FB_LITTLE_ENDIAN) + return false; +#endif /* CONFIG_FB_BOTH_ENDIAN */ +#else +#ifdef __BIG_ENDIAN + return true; +#else + return false; +#endif /* __BIG_ENDIAN */ +#endif /* CONFIG_FB_FOREIGN_ENDIAN */ +} + +/* drivers/video/fbsysfs.c */ +extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev); +extern void framebuffer_release(struct fb_info *info); +extern int fb_init_device(struct fb_info *fb_info); +extern void fb_cleanup_device(struct fb_info *head); +extern void fb_bl_default_curve(struct fb_info *fb_info, u8 off, u8 min, u8 max); + +/* drivers/video/fbmon.c */ +#define FB_MAXTIMINGS 0 +#define FB_VSYNCTIMINGS 1 +#define FB_HSYNCTIMINGS 2 +#define FB_DCLKTIMINGS 3 +#define FB_IGNOREMON 0x100 + +#define FB_MODE_IS_UNKNOWN 0 +#define FB_MODE_IS_DETAILED 1 +#define FB_MODE_IS_STANDARD 2 +#define FB_MODE_IS_VESA 4 +#define FB_MODE_IS_CALCULATED 8 +#define FB_MODE_IS_FIRST 16 +#define FB_MODE_IS_FROM_VAR 32 + +extern int fbmon_dpms(const struct fb_info *fb_info); +extern int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, + struct fb_info *info); +extern int fb_validate_mode(const struct fb_var_screeninfo *var, + struct fb_info *info); +extern int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var); +extern const unsigned char *fb_firmware_edid(struct device *device); +extern void fb_edid_to_monspecs(unsigned char *edid, + struct fb_monspecs *specs); +extern void fb_destroy_modedb(struct fb_videomode *modedb); +extern int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb); +extern unsigned char *fb_ddc_read(struct i2c_adapter *adapter); + +extern int of_get_fb_videomode(struct device_node *np, + struct fb_videomode *fb, + int index); +extern int fb_videomode_from_videomode(const struct videomode *vm, + struct fb_videomode *fbmode); + +/* drivers/video/modedb.c */ +#define VESA_MODEDB_SIZE 43 +#define DMT_SIZE 0x50 + +extern void fb_var_to_videomode(struct fb_videomode *mode, + const struct fb_var_screeninfo *var); +extern void fb_videomode_to_var(struct fb_var_screeninfo *var, + const struct fb_videomode *mode); +extern int fb_mode_is_equal(const struct fb_videomode *mode1, + const struct fb_videomode *mode2); +extern int fb_add_videomode(const struct fb_videomode *mode, + struct list_head *head); +extern void fb_delete_videomode(const struct fb_videomode *mode, + struct list_head *head); +extern const struct fb_videomode *fb_match_mode(const struct fb_var_screeninfo *var, + struct list_head *head); +extern const struct fb_videomode *fb_find_best_mode(const struct fb_var_screeninfo *var, + struct list_head *head); +extern const struct fb_videomode *fb_find_nearest_mode(const struct fb_videomode *mode, + struct list_head *head); +extern void fb_destroy_modelist(struct list_head *head); +extern void fb_videomode_to_modelist(const struct fb_videomode *modedb, int num, + struct list_head *head); +extern const struct fb_videomode *fb_find_best_display(const struct fb_monspecs *specs, + struct list_head *head); + +/* drivers/video/fbcmap.c */ +extern int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp); +extern int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags); +extern void fb_dealloc_cmap(struct fb_cmap *cmap); +extern int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to); +extern int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to); +extern int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *fb_info); +extern int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *fb_info); +extern const struct fb_cmap *fb_default_cmap(int len); +extern void fb_invert_cmaps(void); + +struct fb_videomode { + const char *name; /* optional */ + u32 refresh; /* optional */ + u32 xres; + u32 yres; + u32 pixclock; + u32 left_margin; + u32 right_margin; + u32 upper_margin; + u32 lower_margin; + u32 hsync_len; + u32 vsync_len; + u32 sync; + u32 vmode; + u32 flag; +}; + +struct dmt_videomode { + u32 dmt_id; + u32 std_2byte_code; + u32 cvt_3byte_code; + const struct fb_videomode *mode; +}; + +extern const char *fb_mode_option; +extern const struct fb_videomode vesa_modes[]; +extern const struct dmt_videomode dmt_modes[]; + +struct fb_modelist { + struct list_head list; + struct fb_videomode mode; +}; + +extern int fb_find_mode(struct fb_var_screeninfo *var, + struct fb_info *info, const char *mode_option, + const struct fb_videomode *db, + unsigned int dbsize, + const struct fb_videomode *default_mode, + unsigned int default_bpp); + +/* Convenience logging macros */ +#define fb_err(fb_info, fmt, ...) \ + pr_err("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__) +#define fb_notice(info, fmt, ...) \ + pr_notice("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__) +#define fb_warn(fb_info, fmt, ...) \ + pr_warn("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__) +#define fb_info(fb_info, fmt, ...) \ + pr_info("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__) +#define fb_dbg(fb_info, fmt, ...) \ + pr_debug("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__) + +#endif /* _LINUX_FB_H */ diff --git a/include/linux/fbcon.h b/include/linux/fbcon.h new file mode 100644 index 000000000..f68a7db14 --- /dev/null +++ b/include/linux/fbcon.h @@ -0,0 +1,12 @@ +#ifndef _LINUX_FBCON_H +#define _LINUX_FBCON_H + +#ifdef CONFIG_FRAMEBUFFER_CONSOLE +void __init fb_console_init(void); +void __exit fb_console_exit(void); +#else +static inline void fb_console_init(void) {} +static inline void fb_console_exit(void) {} +#endif + +#endif /* _LINUX_FBCON_H */ diff --git a/include/linux/fcdevice.h b/include/linux/fcdevice.h new file mode 100644 index 000000000..5009fa16b --- /dev/null +++ b/include/linux/fcdevice.h @@ -0,0 +1,33 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. NET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Definitions for the Fibre Channel handlers. + * + * Version: @(#)fcdevice.h 1.0.0 09/26/98 + * + * Authors: Vineet Abraham + * + * Relocated to include/linux where it belongs by Alan Cox + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * WARNING: This move may well be temporary. This file will get merged with others RSN. + * + */ +#ifndef _LINUX_FCDEVICE_H +#define _LINUX_FCDEVICE_H + + +#include + +#ifdef __KERNEL__ +struct net_device *alloc_fcdev(int sizeof_priv); +#endif + +#endif /* _LINUX_FCDEVICE_H */ diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h new file mode 100644 index 000000000..27dc7a606 --- /dev/null +++ b/include/linux/fcntl.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_FCNTL_H +#define _LINUX_FCNTL_H + +#include + +/* list of all valid flags for the open/openat flags argument: */ +#define VALID_OPEN_FLAGS \ + (O_RDONLY | O_WRONLY | O_RDWR | O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC | \ + O_APPEND | O_NDELAY | O_NONBLOCK | O_NDELAY | __O_SYNC | O_DSYNC | \ + FASYNC | O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | \ + O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE) + +#ifndef force_o_largefile +#define force_o_largefile() (BITS_PER_LONG != 32) +#endif + +#if BITS_PER_LONG == 32 +#define IS_GETLK32(cmd) ((cmd) == F_GETLK) +#define IS_SETLK32(cmd) ((cmd) == F_SETLK) +#define IS_SETLKW32(cmd) ((cmd) == F_SETLKW) +#define IS_GETLK64(cmd) ((cmd) == F_GETLK64) +#define IS_SETLK64(cmd) ((cmd) == F_SETLK64) +#define IS_SETLKW64(cmd) ((cmd) == F_SETLKW64) +#else +#define IS_GETLK32(cmd) (0) +#define IS_SETLK32(cmd) (0) +#define IS_SETLKW32(cmd) (0) +#define IS_GETLK64(cmd) ((cmd) == F_GETLK) +#define IS_SETLK64(cmd) ((cmd) == F_SETLK) +#define IS_SETLKW64(cmd) ((cmd) == F_SETLKW) +#endif /* BITS_PER_LONG == 32 */ + +#define IS_GETLK(cmd) (IS_GETLK32(cmd) || IS_GETLK64(cmd)) +#define IS_SETLK(cmd) (IS_SETLK32(cmd) || IS_SETLK64(cmd)) +#define IS_SETLKW(cmd) (IS_SETLKW32(cmd) || IS_SETLKW64(cmd)) + +#endif diff --git a/include/linux/fd.h b/include/linux/fd.h new file mode 100644 index 000000000..ece5ea532 --- /dev/null +++ b/include/linux/fd.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_FD_H +#define _LINUX_FD_H + +#include + +#ifdef CONFIG_COMPAT +#include + +struct compat_floppy_struct { + compat_uint_t size; + compat_uint_t sect; + compat_uint_t head; + compat_uint_t track; + compat_uint_t stretch; + unsigned char gap; + unsigned char rate; + unsigned char spec1; + unsigned char fmt_gap; + const compat_caddr_t name; +}; + +#define FDGETPRM32 _IOR(2, 0x04, struct compat_floppy_struct) +#endif +#endif diff --git a/include/linux/fddidevice.h b/include/linux/fddidevice.h new file mode 100644 index 000000000..32c22cfb2 --- /dev/null +++ b/include/linux/fddidevice.h @@ -0,0 +1,32 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Definitions for the FDDI handlers. + * + * Version: @(#)fddidevice.h 1.0.0 08/12/96 + * + * Author: Lawrence V. Stefani, + * + * fddidevice.h is based on previous trdevice.h work by + * Ross Biro + * Fred N. van Kempen, + * Alan Cox, + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_FDDIDEVICE_H +#define _LINUX_FDDIDEVICE_H + +#include + +#ifdef __KERNEL__ +__be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev); +struct net_device *alloc_fddidev(int sizeof_priv); +#endif + +#endif /* _LINUX_FDDIDEVICE_H */ diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h new file mode 100644 index 000000000..41615f38b --- /dev/null +++ b/include/linux/fdtable.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * descriptor table internals; you almost certainly want file.h instead. + */ + +#ifndef __LINUX_FDTABLE_H +#define __LINUX_FDTABLE_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * The default fd array needs to be at least BITS_PER_LONG, + * as this is the granularity returned by copy_fdset(). + */ +#define NR_OPEN_DEFAULT BITS_PER_LONG + +struct fdtable { + unsigned int max_fds; + struct file __rcu **fd; /* current fd array */ + unsigned long *close_on_exec; + unsigned long *open_fds; + unsigned long *full_fds_bits; + struct rcu_head rcu; +}; + +static inline bool close_on_exec(unsigned int fd, const struct fdtable *fdt) +{ + return test_bit(fd, fdt->close_on_exec); +} + +static inline bool fd_is_open(unsigned int fd, const struct fdtable *fdt) +{ + return test_bit(fd, fdt->open_fds); +} + +/* + * Open file table structure + */ +struct files_struct { + /* + * read mostly part + */ + atomic_t count; + bool resize_in_progress; + wait_queue_head_t resize_wait; + + struct fdtable __rcu *fdt; + struct fdtable fdtab; + /* + * written part on a separate cache line in SMP + */ + spinlock_t file_lock ____cacheline_aligned_in_smp; + unsigned int next_fd; + unsigned long close_on_exec_init[1]; + unsigned long open_fds_init[1]; + unsigned long full_fds_bits_init[1]; + struct file __rcu * fd_array[NR_OPEN_DEFAULT]; +}; + +struct file_operations; +struct vfsmount; +struct dentry; + +#define rcu_dereference_check_fdtable(files, fdtfd) \ + rcu_dereference_check((fdtfd), lockdep_is_held(&(files)->file_lock)) + +#define files_fdtable(files) \ + rcu_dereference_check_fdtable((files), (files)->fdt) + +/* + * The caller must ensure that fd table isn't shared or hold rcu or file lock + */ +static inline struct file *__fcheck_files(struct files_struct *files, unsigned int fd) +{ + struct fdtable *fdt = rcu_dereference_raw(files->fdt); + + if (fd < fdt->max_fds) { + fd = array_index_nospec(fd, fdt->max_fds); + return rcu_dereference_raw(fdt->fd[fd]); + } + return NULL; +} + +static inline struct file *fcheck_files(struct files_struct *files, unsigned int fd) +{ + RCU_LOCKDEP_WARN(!rcu_read_lock_held() && + !lockdep_is_held(&files->file_lock), + "suspicious rcu_dereference_check() usage"); + return __fcheck_files(files, fd); +} + +/* + * Check whether the specified fd has an open file. + */ +#define fcheck(fd) fcheck_files(current->files, fd) + +struct task_struct; + +struct files_struct *get_files_struct(struct task_struct *); +void put_files_struct(struct files_struct *fs); +void reset_files_struct(struct files_struct *); +int unshare_files(struct files_struct **); +struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy; +void do_close_on_exec(struct files_struct *); +int iterate_fd(struct files_struct *, unsigned, + int (*)(const void *, struct file *, unsigned), + const void *); + +extern int __alloc_fd(struct files_struct *files, + unsigned start, unsigned end, unsigned flags); +extern void __fd_install(struct files_struct *files, + unsigned int fd, struct file *file); +extern int __close_fd(struct files_struct *files, + unsigned int fd); + +extern struct kmem_cache *files_cachep; + +#endif /* __LINUX_FDTABLE_H */ diff --git a/include/linux/fec.h b/include/linux/fec.h new file mode 100644 index 000000000..1454a5036 --- /dev/null +++ b/include/linux/fec.h @@ -0,0 +1,25 @@ +/* include/linux/fec.h + * + * Copyright (c) 2009 Orex Computed Radiography + * Baruch Siach + * + * Copyright (C) 2010 Freescale Semiconductor, Inc. + * + * Header file for the FEC platform data + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __LINUX_FEC_H__ +#define __LINUX_FEC_H__ + +#include + +struct fec_platform_data { + phy_interface_t phy; + unsigned char mac[ETH_ALEN]; + void (*sleep_mode_enable)(int enabled); +}; + +#endif diff --git a/include/linux/file.h b/include/linux/file.h new file mode 100644 index 000000000..3fcddff56 --- /dev/null +++ b/include/linux/file.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Wrapper functions for accessing the file_struct fd array. + */ + +#ifndef __LINUX_FILE_H +#define __LINUX_FILE_H + +#include +#include +#include + +struct file; + +extern void fput(struct file *); +extern void fput_many(struct file *, unsigned int); + +struct file_operations; +struct vfsmount; +struct dentry; +struct inode; +struct path; +extern struct file *alloc_file_pseudo(struct inode *, struct vfsmount *, + const char *, int flags, const struct file_operations *); +extern struct file *alloc_file_clone(struct file *, int flags, + const struct file_operations *); + +static inline void fput_light(struct file *file, int fput_needed) +{ + if (fput_needed) + fput(file); +} + +struct fd { + struct file *file; + unsigned int flags; +}; +#define FDPUT_FPUT 1 +#define FDPUT_POS_UNLOCK 2 + +static inline void fdput(struct fd fd) +{ + if (fd.flags & FDPUT_FPUT) + fput(fd.file); +} + +extern struct file *fget(unsigned int fd); +extern struct file *fget_many(unsigned int fd, unsigned int refs); +extern struct file *fget_raw(unsigned int fd); +extern unsigned long __fdget(unsigned int fd); +extern unsigned long __fdget_raw(unsigned int fd); +extern unsigned long __fdget_pos(unsigned int fd); +extern void __f_unlock_pos(struct file *); + +static inline struct fd __to_fd(unsigned long v) +{ + return (struct fd){(struct file *)(v & ~3),v & 3}; +} + +static inline struct fd fdget(unsigned int fd) +{ + return __to_fd(__fdget(fd)); +} + +static inline struct fd fdget_raw(unsigned int fd) +{ + return __to_fd(__fdget_raw(fd)); +} + +static inline struct fd fdget_pos(int fd) +{ + return __to_fd(__fdget_pos(fd)); +} + +static inline void fdput_pos(struct fd f) +{ + if (f.flags & FDPUT_POS_UNLOCK) + __f_unlock_pos(f.file); + fdput(f); +} + +extern int f_dupfd(unsigned int from, struct file *file, unsigned flags); +extern int replace_fd(unsigned fd, struct file *file, unsigned flags); +extern void set_close_on_exec(unsigned int fd, int flag); +extern bool get_close_on_exec(unsigned int fd); +extern int get_unused_fd_flags(unsigned flags); +extern void put_unused_fd(unsigned int fd); + +extern void fd_install(unsigned int fd, struct file *file); + +extern void flush_delayed_fput(void); +extern void __fput_sync(struct file *); + +#endif /* __LINUX_FILE_H */ diff --git a/include/linux/filter.h b/include/linux/filter.h new file mode 100644 index 000000000..89a6ef659 --- /dev/null +++ b/include/linux/filter.h @@ -0,0 +1,1142 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Linux Socket Filter Data Structures + */ +#ifndef __LINUX_FILTER_H__ +#define __LINUX_FILTER_H__ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +struct sk_buff; +struct sock; +struct seccomp_data; +struct bpf_prog_aux; +struct xdp_rxq_info; +struct xdp_buff; +struct sock_reuseport; + +/* ArgX, context and stack frame pointer register positions. Note, + * Arg1, Arg2, Arg3, etc are used as argument mappings of function + * calls in BPF_CALL instruction. + */ +#define BPF_REG_ARG1 BPF_REG_1 +#define BPF_REG_ARG2 BPF_REG_2 +#define BPF_REG_ARG3 BPF_REG_3 +#define BPF_REG_ARG4 BPF_REG_4 +#define BPF_REG_ARG5 BPF_REG_5 +#define BPF_REG_CTX BPF_REG_6 +#define BPF_REG_FP BPF_REG_10 + +/* Additional register mappings for converted user programs. */ +#define BPF_REG_A BPF_REG_0 +#define BPF_REG_X BPF_REG_7 +#define BPF_REG_TMP BPF_REG_2 /* scratch reg */ +#define BPF_REG_D BPF_REG_8 /* data, callee-saved */ +#define BPF_REG_H BPF_REG_9 /* hlen, callee-saved */ + +/* Kernel hidden auxiliary/helper register. */ +#define BPF_REG_AX MAX_BPF_REG +#define MAX_BPF_EXT_REG (MAX_BPF_REG + 1) +#define MAX_BPF_JIT_REG MAX_BPF_EXT_REG + +/* unused opcode to mark special call to bpf_tail_call() helper */ +#define BPF_TAIL_CALL 0xf0 + +/* unused opcode to mark call to interpreter with arguments */ +#define BPF_CALL_ARGS 0xe0 + +/* unused opcode to mark speculation barrier for mitigating + * Speculative Store Bypass + */ +#define BPF_NOSPEC 0xc0 + +/* As per nm, we expose JITed images as text (code) section for + * kallsyms. That way, tools like perf can find it to match + * addresses. + */ +#define BPF_SYM_ELF_TYPE 't' + +/* BPF program can access up to 512 bytes of stack space. */ +#define MAX_BPF_STACK 512 + +/* Helper macros for filter block array initializers. */ + +/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ + +#define BPF_ALU_REG(CLASS, OP, DST, SRC) \ + ((struct bpf_insn) { \ + .code = CLASS | BPF_OP(OP) | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = 0, \ + .imm = 0 }) + +#define BPF_ALU64_REG(OP, DST, SRC) \ + ((struct bpf_insn) { \ + .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = 0, \ + .imm = 0 }) + +#define BPF_ALU32_REG(OP, DST, SRC) \ + ((struct bpf_insn) { \ + .code = BPF_ALU | BPF_OP(OP) | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = 0, \ + .imm = 0 }) + +/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */ + +#define BPF_ALU64_IMM(OP, DST, IMM) \ + ((struct bpf_insn) { \ + .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = 0, \ + .imm = IMM }) + +#define BPF_ALU32_IMM(OP, DST, IMM) \ + ((struct bpf_insn) { \ + .code = BPF_ALU | BPF_OP(OP) | BPF_K, \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = 0, \ + .imm = IMM }) + +/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */ + +#define BPF_ENDIAN(TYPE, DST, LEN) \ + ((struct bpf_insn) { \ + .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = 0, \ + .imm = LEN }) + +/* Short form of mov, dst_reg = src_reg */ + +#define BPF_MOV_REG(CLASS, DST, SRC) \ + ((struct bpf_insn) { \ + .code = CLASS | BPF_MOV | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = 0, \ + .imm = 0 }) + +#define BPF_MOV64_REG(DST, SRC) \ + ((struct bpf_insn) { \ + .code = BPF_ALU64 | BPF_MOV | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = 0, \ + .imm = 0 }) + +#define BPF_MOV32_REG(DST, SRC) \ + ((struct bpf_insn) { \ + .code = BPF_ALU | BPF_MOV | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = 0, \ + .imm = 0 }) + +/* Short form of mov, dst_reg = imm32 */ + +#define BPF_MOV64_IMM(DST, IMM) \ + ((struct bpf_insn) { \ + .code = BPF_ALU64 | BPF_MOV | BPF_K, \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = 0, \ + .imm = IMM }) + +#define BPF_MOV32_IMM(DST, IMM) \ + ((struct bpf_insn) { \ + .code = BPF_ALU | BPF_MOV | BPF_K, \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = 0, \ + .imm = IMM }) + +#define BPF_RAW_REG(insn, DST, SRC) \ + ((struct bpf_insn) { \ + .code = (insn).code, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = (insn).off, \ + .imm = (insn).imm }) + +/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ +#define BPF_LD_IMM64(DST, IMM) \ + BPF_LD_IMM64_RAW(DST, 0, IMM) + +#define BPF_LD_IMM64_RAW(DST, SRC, IMM) \ + ((struct bpf_insn) { \ + .code = BPF_LD | BPF_DW | BPF_IMM, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = 0, \ + .imm = (__u32) (IMM) }), \ + ((struct bpf_insn) { \ + .code = 0, /* zero is reserved opcode */ \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = 0, \ + .imm = ((__u64) (IMM)) >> 32 }) + +/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */ +#define BPF_LD_MAP_FD(DST, MAP_FD) \ + BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD) + +/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */ + +#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \ + ((struct bpf_insn) { \ + .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = 0, \ + .imm = IMM }) + +#define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \ + ((struct bpf_insn) { \ + .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = 0, \ + .imm = IMM }) + +/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */ + +#define BPF_LD_ABS(SIZE, IMM) \ + ((struct bpf_insn) { \ + .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = 0, \ + .imm = IMM }) + +/* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */ + +#define BPF_LD_IND(SIZE, SRC, IMM) \ + ((struct bpf_insn) { \ + .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \ + .dst_reg = 0, \ + .src_reg = SRC, \ + .off = 0, \ + .imm = IMM }) + +/* Memory load, dst_reg = *(uint *) (src_reg + off16) */ + +#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = 0 }) + +/* Memory store, *(uint *) (dst_reg + off16) = src_reg */ + +#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = 0 }) + +/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */ + +#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = 0 }) + +/* Memory store, *(uint *) (dst_reg + off16) = imm32 */ + +#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ + ((struct bpf_insn) { \ + .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = OFF, \ + .imm = IMM }) + +/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */ + +#define BPF_JMP_REG(OP, DST, SRC, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_JMP | BPF_OP(OP) | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = 0 }) + +/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */ + +#define BPF_JMP_IMM(OP, DST, IMM, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_JMP | BPF_OP(OP) | BPF_K, \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = OFF, \ + .imm = IMM }) + +/* Unconditional jumps, goto pc + off16 */ + +#define BPF_JMP_A(OFF) \ + ((struct bpf_insn) { \ + .code = BPF_JMP | BPF_JA, \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = OFF, \ + .imm = 0 }) + +/* Relative call */ + +#define BPF_CALL_REL(TGT) \ + ((struct bpf_insn) { \ + .code = BPF_JMP | BPF_CALL, \ + .dst_reg = 0, \ + .src_reg = BPF_PSEUDO_CALL, \ + .off = 0, \ + .imm = TGT }) + +/* Function call */ + +#define BPF_CAST_CALL(x) \ + ((u64 (*)(u64, u64, u64, u64, u64))(x)) + +#define BPF_EMIT_CALL(FUNC) \ + ((struct bpf_insn) { \ + .code = BPF_JMP | BPF_CALL, \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = 0, \ + .imm = ((FUNC) - __bpf_call_base) }) + +/* Raw code statement block */ + +#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \ + ((struct bpf_insn) { \ + .code = CODE, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = IMM }) + +/* Program exit */ + +#define BPF_EXIT_INSN() \ + ((struct bpf_insn) { \ + .code = BPF_JMP | BPF_EXIT, \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = 0, \ + .imm = 0 }) + +/* Speculation barrier */ + +#define BPF_ST_NOSPEC() \ + ((struct bpf_insn) { \ + .code = BPF_ST | BPF_NOSPEC, \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = 0, \ + .imm = 0 }) + +/* Internal classic blocks for direct assignment */ + +#define __BPF_STMT(CODE, K) \ + ((struct sock_filter) BPF_STMT(CODE, K)) + +#define __BPF_JUMP(CODE, K, JT, JF) \ + ((struct sock_filter) BPF_JUMP(CODE, K, JT, JF)) + +#define bytes_to_bpf_size(bytes) \ +({ \ + int bpf_size = -EINVAL; \ + \ + if (bytes == sizeof(u8)) \ + bpf_size = BPF_B; \ + else if (bytes == sizeof(u16)) \ + bpf_size = BPF_H; \ + else if (bytes == sizeof(u32)) \ + bpf_size = BPF_W; \ + else if (bytes == sizeof(u64)) \ + bpf_size = BPF_DW; \ + \ + bpf_size; \ +}) + +#define bpf_size_to_bytes(bpf_size) \ +({ \ + int bytes = -EINVAL; \ + \ + if (bpf_size == BPF_B) \ + bytes = sizeof(u8); \ + else if (bpf_size == BPF_H) \ + bytes = sizeof(u16); \ + else if (bpf_size == BPF_W) \ + bytes = sizeof(u32); \ + else if (bpf_size == BPF_DW) \ + bytes = sizeof(u64); \ + \ + bytes; \ +}) + +#define BPF_SIZEOF(type) \ + ({ \ + const int __size = bytes_to_bpf_size(sizeof(type)); \ + BUILD_BUG_ON(__size < 0); \ + __size; \ + }) + +#define BPF_FIELD_SIZEOF(type, field) \ + ({ \ + const int __size = bytes_to_bpf_size(FIELD_SIZEOF(type, field)); \ + BUILD_BUG_ON(__size < 0); \ + __size; \ + }) + +#define BPF_LDST_BYTES(insn) \ + ({ \ + const int __size = bpf_size_to_bytes(BPF_SIZE((insn)->code)); \ + WARN_ON(__size < 0); \ + __size; \ + }) + +#define __BPF_MAP_0(m, v, ...) v +#define __BPF_MAP_1(m, v, t, a, ...) m(t, a) +#define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__) +#define __BPF_MAP_3(m, v, t, a, ...) m(t, a), __BPF_MAP_2(m, v, __VA_ARGS__) +#define __BPF_MAP_4(m, v, t, a, ...) m(t, a), __BPF_MAP_3(m, v, __VA_ARGS__) +#define __BPF_MAP_5(m, v, t, a, ...) m(t, a), __BPF_MAP_4(m, v, __VA_ARGS__) + +#define __BPF_REG_0(...) __BPF_PAD(5) +#define __BPF_REG_1(...) __BPF_MAP(1, __VA_ARGS__), __BPF_PAD(4) +#define __BPF_REG_2(...) __BPF_MAP(2, __VA_ARGS__), __BPF_PAD(3) +#define __BPF_REG_3(...) __BPF_MAP(3, __VA_ARGS__), __BPF_PAD(2) +#define __BPF_REG_4(...) __BPF_MAP(4, __VA_ARGS__), __BPF_PAD(1) +#define __BPF_REG_5(...) __BPF_MAP(5, __VA_ARGS__) + +#define __BPF_MAP(n, ...) __BPF_MAP_##n(__VA_ARGS__) +#define __BPF_REG(n, ...) __BPF_REG_##n(__VA_ARGS__) + +#define __BPF_CAST(t, a) \ + (__force t) \ + (__force \ + typeof(__builtin_choose_expr(sizeof(t) == sizeof(unsigned long), \ + (unsigned long)0, (t)0))) a +#define __BPF_V void +#define __BPF_N + +#define __BPF_DECL_ARGS(t, a) t a +#define __BPF_DECL_REGS(t, a) u64 a + +#define __BPF_PAD(n) \ + __BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \ + u64, __ur_3, u64, __ur_4, u64, __ur_5) + +#define BPF_CALL_x(x, name, ...) \ + static __always_inline \ + u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \ + u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \ + u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \ + { \ + return ____##name(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\ + } \ + static __always_inline \ + u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)) + +#define BPF_CALL_0(name, ...) BPF_CALL_x(0, name, __VA_ARGS__) +#define BPF_CALL_1(name, ...) BPF_CALL_x(1, name, __VA_ARGS__) +#define BPF_CALL_2(name, ...) BPF_CALL_x(2, name, __VA_ARGS__) +#define BPF_CALL_3(name, ...) BPF_CALL_x(3, name, __VA_ARGS__) +#define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__) +#define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__) + +#define bpf_ctx_range(TYPE, MEMBER) \ + offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 +#define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2) \ + offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1 + +#define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \ + ({ \ + BUILD_BUG_ON(FIELD_SIZEOF(TYPE, MEMBER) != (SIZE)); \ + *(PTR_SIZE) = (SIZE); \ + offsetof(TYPE, MEMBER); \ + }) + +#ifdef CONFIG_COMPAT +/* A struct sock_filter is architecture independent. */ +struct compat_sock_fprog { + u16 len; + compat_uptr_t filter; /* struct sock_filter * */ +}; +#endif + +struct sock_fprog_kern { + u16 len; + struct sock_filter *filter; +}; + +struct bpf_binary_header { + u32 pages; + /* Some arches need word alignment for their instructions */ + u8 image[] __aligned(4); +}; + +struct bpf_prog { + u16 pages; /* Number of allocated pages */ + u16 jited:1, /* Is our filter JIT'ed? */ + jit_requested:1,/* archs need to JIT the prog */ + undo_set_mem:1, /* Passed set_memory_ro() checkpoint */ + gpl_compatible:1, /* Is filter GPL compatible? */ + cb_access:1, /* Is control block accessed? */ + dst_needed:1, /* Do we need dst entry? */ + blinded:1, /* Was blinded */ + is_func:1, /* program is a bpf function */ + kprobe_override:1, /* Do we override a kprobe? */ + has_callchain_buf:1; /* callchain buffer allocated? */ + enum bpf_prog_type type; /* Type of BPF program */ + enum bpf_attach_type expected_attach_type; /* For some prog types */ + u32 len; /* Number of filter blocks */ + u32 jited_len; /* Size of jited insns in bytes */ + u8 tag[BPF_TAG_SIZE]; + struct bpf_prog_aux *aux; /* Auxiliary fields */ + struct sock_fprog_kern *orig_prog; /* Original BPF program */ + unsigned int (*bpf_func)(const void *ctx, + const struct bpf_insn *insn); + /* Instructions for interpreter */ + union { + struct sock_filter insns[0]; + struct bpf_insn insnsi[0]; + }; +}; + +struct sk_filter { + refcount_t refcnt; + struct rcu_head rcu; + struct bpf_prog *prog; +}; + +#define BPF_PROG_RUN(filter, ctx) (*(filter)->bpf_func)(ctx, (filter)->insnsi) + +#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN + +struct bpf_skb_data_end { + struct qdisc_skb_cb qdisc_cb; + void *data_meta; + void *data_end; +}; + +struct sk_msg_buff { + void *data; + void *data_end; + __u32 apply_bytes; + __u32 cork_bytes; + int sg_copybreak; + int sg_start; + int sg_curr; + int sg_end; + struct scatterlist sg_data[MAX_SKB_FRAGS]; + bool sg_copy[MAX_SKB_FRAGS]; + __u32 flags; + struct sock *sk_redir; + struct sock *sk; + struct sk_buff *skb; + struct list_head list; +}; + +struct bpf_redirect_info { + u32 ifindex; + u32 flags; + struct bpf_map *map; + struct bpf_map *map_to_flush; + u32 kern_flags; +}; + +DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info); + +/* flags for bpf_redirect_info kern_flags */ +#define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */ + +/* Compute the linear packet data range [data, data_end) which + * will be accessed by various program types (cls_bpf, act_bpf, + * lwt, ...). Subsystems allowing direct data access must (!) + * ensure that cb[] area can be written to when BPF program is + * invoked (otherwise cb[] save/restore is necessary). + */ +static inline void bpf_compute_data_pointers(struct sk_buff *skb) +{ + struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; + + BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb)); + cb->data_meta = skb->data - skb_metadata_len(skb); + cb->data_end = skb->data + skb_headlen(skb); +} + +static inline u8 *bpf_skb_cb(struct sk_buff *skb) +{ + /* eBPF programs may read/write skb->cb[] area to transfer meta + * data between tail calls. Since this also needs to work with + * tc, that scratch memory is mapped to qdisc_skb_cb's data area. + * + * In some socket filter cases, the cb unfortunately needs to be + * saved/restored so that protocol specific skb->cb[] data won't + * be lost. In any case, due to unpriviledged eBPF programs + * attached to sockets, we need to clear the bpf_skb_cb() area + * to not leak previous contents to user space. + */ + BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != BPF_SKB_CB_LEN); + BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != + FIELD_SIZEOF(struct qdisc_skb_cb, data)); + + return qdisc_skb_cb(skb)->data; +} + +static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, + struct sk_buff *skb) +{ + u8 *cb_data = bpf_skb_cb(skb); + u8 cb_saved[BPF_SKB_CB_LEN]; + u32 res; + + if (unlikely(prog->cb_access)) { + memcpy(cb_saved, cb_data, sizeof(cb_saved)); + memset(cb_data, 0, sizeof(cb_saved)); + } + + res = BPF_PROG_RUN(prog, skb); + + if (unlikely(prog->cb_access)) + memcpy(cb_data, cb_saved, sizeof(cb_saved)); + + return res; +} + +static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, + struct sk_buff *skb) +{ + u8 *cb_data = bpf_skb_cb(skb); + + if (unlikely(prog->cb_access)) + memset(cb_data, 0, BPF_SKB_CB_LEN); + + return BPF_PROG_RUN(prog, skb); +} + +static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, + struct xdp_buff *xdp) +{ + /* Caller needs to hold rcu_read_lock() (!), otherwise program + * can be released while still running, or map elements could be + * freed early while still having concurrent users. XDP fastpath + * already takes rcu_read_lock() when fetching the program, so + * it's not necessary here anymore. + */ + return BPF_PROG_RUN(prog, xdp); +} + +static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog) +{ + return prog->len * sizeof(struct bpf_insn); +} + +static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog) +{ + return round_up(bpf_prog_insn_size(prog) + + sizeof(__be64) + 1, SHA_MESSAGE_BYTES); +} + +static inline unsigned int bpf_prog_size(unsigned int proglen) +{ + return max(sizeof(struct bpf_prog), + offsetof(struct bpf_prog, insns[proglen])); +} + +static inline bool bpf_prog_was_classic(const struct bpf_prog *prog) +{ + /* When classic BPF programs have been loaded and the arch + * does not have a classic BPF JIT (anymore), they have been + * converted via bpf_migrate_filter() to eBPF and thus always + * have an unspec program type. + */ + return prog->type == BPF_PROG_TYPE_UNSPEC; +} + +static inline u32 bpf_ctx_off_adjust_machine(u32 size) +{ + const u32 size_machine = sizeof(unsigned long); + + if (size > size_machine && size % size_machine == 0) + size = size_machine; + + return size; +} + +static inline bool +bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default) +{ + return size <= size_default && (size & (size - 1)) == 0; +} + +#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) + +static inline void bpf_prog_lock_ro(struct bpf_prog *fp) +{ + fp->undo_set_mem = 1; + set_memory_ro((unsigned long)fp, fp->pages); +} + +static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) +{ + if (fp->undo_set_mem) + set_memory_rw((unsigned long)fp, fp->pages); +} + +static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) +{ + set_memory_ro((unsigned long)hdr, hdr->pages); + set_memory_x((unsigned long)hdr, hdr->pages); +} + +static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) +{ + set_memory_rw((unsigned long)hdr, hdr->pages); +} + +static inline struct bpf_binary_header * +bpf_jit_binary_hdr(const struct bpf_prog *fp) +{ + unsigned long real_start = (unsigned long)fp->bpf_func; + unsigned long addr = real_start & PAGE_MASK; + + return (void *)addr; +} + +int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); +static inline int sk_filter(struct sock *sk, struct sk_buff *skb) +{ + return sk_filter_trim_cap(sk, skb, 1); +} + +struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); +void bpf_prog_free(struct bpf_prog *fp); + +bool bpf_opcode_in_insntable(u8 code); + +struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); +struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, + gfp_t gfp_extra_flags); +void __bpf_prog_free(struct bpf_prog *fp); + +static inline void bpf_prog_unlock_free(struct bpf_prog *fp) +{ + bpf_prog_unlock_ro(fp); + __bpf_prog_free(fp); +} + +typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter, + unsigned int flen); + +int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); +int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, + bpf_aux_classic_check_t trans, bool save_orig); +void bpf_prog_destroy(struct bpf_prog *fp); + +int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); +int sk_attach_bpf(u32 ufd, struct sock *sk); +int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk); +int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk); +void sk_reuseport_prog_free(struct bpf_prog *prog); +int sk_detach_filter(struct sock *sk); +int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, + unsigned int len); + +bool sk_filter_charge(struct sock *sk, struct sk_filter *fp); +void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); + +u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); +#define __bpf_call_base_args \ + ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \ + (void *)__bpf_call_base) + +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); +void bpf_jit_compile(struct bpf_prog *prog); +bool bpf_helper_changes_pkt_data(void *func); + +static inline bool bpf_dump_raw_ok(const struct cred *cred) +{ + /* Reconstruction of call-sites is dependent on kallsyms, + * thus make dump the same restriction. + */ + return kallsyms_show_value(cred); +} + +struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, + const struct bpf_insn *patch, u32 len); + +void bpf_clear_redirect_map(struct bpf_map *map); + +static inline bool xdp_return_frame_no_direct(void) +{ + struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + + return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT; +} + +static inline void xdp_set_return_frame_no_direct(void) +{ + struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + + ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT; +} + +static inline void xdp_clear_return_frame_no_direct(void) +{ + struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + + ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT; +} + +static inline int xdp_ok_fwd_dev(const struct net_device *fwd, + unsigned int pktlen) +{ + unsigned int len; + + if (unlikely(!(fwd->flags & IFF_UP))) + return -ENETDOWN; + + len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN; + if (pktlen > len) + return -EMSGSIZE; + + return 0; +} + +/* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the + * same cpu context. Further for best results no more than a single map + * for the do_redirect/do_flush pair should be used. This limitation is + * because we only track one map and force a flush when the map changes. + * This does not appear to be a real limitation for existing software. + */ +int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, + struct xdp_buff *xdp, struct bpf_prog *prog); +int xdp_do_redirect(struct net_device *dev, + struct xdp_buff *xdp, + struct bpf_prog *prog); +void xdp_do_flush_map(void); + +void bpf_warn_invalid_xdp_action(u32 act); + +struct sock *do_sk_redirect_map(struct sk_buff *skb); +struct sock *do_msg_redirect_map(struct sk_msg_buff *md); + +#ifdef CONFIG_INET +struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, + struct bpf_prog *prog, struct sk_buff *skb, + u32 hash); +#else +static inline struct sock * +bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, + struct bpf_prog *prog, struct sk_buff *skb, + u32 hash) +{ + return NULL; +} +#endif + +#ifdef CONFIG_BPF_JIT +extern int bpf_jit_enable; +extern int bpf_jit_harden; +extern int bpf_jit_kallsyms; +extern long bpf_jit_limit; +extern long bpf_jit_limit_max; + +typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); + +struct bpf_binary_header * +bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, + unsigned int alignment, + bpf_jit_fill_hole_t bpf_fill_ill_insns); +void bpf_jit_binary_free(struct bpf_binary_header *hdr); +u64 bpf_jit_alloc_exec_limit(void); +void *bpf_jit_alloc_exec(unsigned long size); +void bpf_jit_free_exec(void *addr); +void bpf_jit_free(struct bpf_prog *fp); + +struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp); +void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other); + +static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, + u32 pass, void *image) +{ + pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen, + proglen, pass, image, current->comm, task_pid_nr(current)); + + if (image) + print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET, + 16, 1, image, proglen, false); +} + +static inline bool bpf_jit_is_ebpf(void) +{ +# ifdef CONFIG_HAVE_EBPF_JIT + return true; +# else + return false; +# endif +} + +static inline bool ebpf_jit_enabled(void) +{ + return bpf_jit_enable && bpf_jit_is_ebpf(); +} + +static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) +{ + return fp->jited && bpf_jit_is_ebpf(); +} + +static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog) +{ + /* These are the prerequisites, should someone ever have the + * idea to call blinding outside of them, we make sure to + * bail out. + */ + if (!bpf_jit_is_ebpf()) + return false; + if (!prog->jit_requested) + return false; + if (!bpf_jit_harden) + return false; + if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN)) + return false; + + return true; +} + +static inline bool bpf_jit_kallsyms_enabled(void) +{ + /* There are a couple of corner cases where kallsyms should + * not be enabled f.e. on hardening. + */ + if (bpf_jit_harden) + return false; + if (!bpf_jit_kallsyms) + return false; + if (bpf_jit_kallsyms == 1) + return true; + + return false; +} + +const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, + unsigned long *off, char *sym); +bool is_bpf_text_address(unsigned long addr); +int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + char *sym); + +static inline const char * +bpf_address_lookup(unsigned long addr, unsigned long *size, + unsigned long *off, char **modname, char *sym) +{ + const char *ret = __bpf_address_lookup(addr, size, off, sym); + + if (ret && modname) + *modname = NULL; + return ret; +} + +void bpf_prog_kallsyms_add(struct bpf_prog *fp); +void bpf_prog_kallsyms_del(struct bpf_prog *fp); + +#else /* CONFIG_BPF_JIT */ + +static inline bool ebpf_jit_enabled(void) +{ + return false; +} + +static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) +{ + return false; +} + +static inline void bpf_jit_free(struct bpf_prog *fp) +{ + bpf_prog_unlock_free(fp); +} + +static inline bool bpf_jit_kallsyms_enabled(void) +{ + return false; +} + +static inline const char * +__bpf_address_lookup(unsigned long addr, unsigned long *size, + unsigned long *off, char *sym) +{ + return NULL; +} + +static inline bool is_bpf_text_address(unsigned long addr) +{ + return false; +} + +static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value, + char *type, char *sym) +{ + return -ERANGE; +} + +static inline const char * +bpf_address_lookup(unsigned long addr, unsigned long *size, + unsigned long *off, char **modname, char *sym) +{ + return NULL; +} + +static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp) +{ +} + +static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp) +{ +} +#endif /* CONFIG_BPF_JIT */ + +void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp); +void bpf_prog_kallsyms_del_all(struct bpf_prog *fp); + +#define BPF_ANC BIT(15) + +static inline bool bpf_needs_clear_a(const struct sock_filter *first) +{ + switch (first->code) { + case BPF_RET | BPF_K: + case BPF_LD | BPF_W | BPF_LEN: + return false; + + case BPF_LD | BPF_W | BPF_ABS: + case BPF_LD | BPF_H | BPF_ABS: + case BPF_LD | BPF_B | BPF_ABS: + if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X) + return true; + return false; + + default: + return true; + } +} + +static inline u16 bpf_anc_helper(const struct sock_filter *ftest) +{ + BUG_ON(ftest->code & BPF_ANC); + + switch (ftest->code) { + case BPF_LD | BPF_W | BPF_ABS: + case BPF_LD | BPF_H | BPF_ABS: + case BPF_LD | BPF_B | BPF_ABS: +#define BPF_ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \ + return BPF_ANC | SKF_AD_##CODE + switch (ftest->k) { + BPF_ANCILLARY(PROTOCOL); + BPF_ANCILLARY(PKTTYPE); + BPF_ANCILLARY(IFINDEX); + BPF_ANCILLARY(NLATTR); + BPF_ANCILLARY(NLATTR_NEST); + BPF_ANCILLARY(MARK); + BPF_ANCILLARY(QUEUE); + BPF_ANCILLARY(HATYPE); + BPF_ANCILLARY(RXHASH); + BPF_ANCILLARY(CPU); + BPF_ANCILLARY(ALU_XOR_X); + BPF_ANCILLARY(VLAN_TAG); + BPF_ANCILLARY(VLAN_TAG_PRESENT); + BPF_ANCILLARY(PAY_OFFSET); + BPF_ANCILLARY(RANDOM); + BPF_ANCILLARY(VLAN_TPID); + } + /* Fallthrough. */ + default: + return ftest->code; + } +} + +void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, + int k, unsigned int size); + +static inline void *bpf_load_pointer(const struct sk_buff *skb, int k, + unsigned int size, void *buffer) +{ + if (k >= 0) + return skb_header_pointer(skb, k, size, buffer); + + return bpf_internal_load_pointer_neg_helper(skb, k, size); +} + +static inline int bpf_tell_extensions(void) +{ + return SKF_AD_MAX; +} + +struct bpf_sock_addr_kern { + struct sock *sk; + struct sockaddr *uaddr; + /* Temporary "register" to make indirect stores to nested structures + * defined above. We need three registers to make such a store, but + * only two (src and dst) are available at convert_ctx_access time + */ + u64 tmp_reg; + void *t_ctx; /* Attach type specific context. */ +}; + +struct bpf_sock_ops_kern { + struct sock *sk; + u32 op; + union { + u32 args[4]; + u32 reply; + u32 replylong[4]; + }; + u32 is_fullsock; + u64 temp; /* temp and everything after is not + * initialized to 0 before calling + * the BPF program. New fields that + * should be initialized to 0 should + * be inserted before temp. + * temp is scratch storage used by + * sock_ops_convert_ctx_access + * as temporary storage of a register. + */ +}; + +#endif /* __LINUX_FILTER_H__ */ diff --git a/include/linux/fips.h b/include/linux/fips.h new file mode 100644 index 000000000..afeeece92 --- /dev/null +++ b/include/linux/fips.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FIPS_H +#define _FIPS_H + +#ifdef CONFIG_CRYPTO_FIPS +extern int fips_enabled; +#else +#define fips_enabled 0 +#endif + +#endif diff --git a/include/linux/firewire.h b/include/linux/firewire.h new file mode 100644 index 000000000..aec8f30ab --- /dev/null +++ b/include/linux/firewire.h @@ -0,0 +1,473 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_FIREWIRE_H +#define _LINUX_FIREWIRE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define CSR_REGISTER_BASE 0xfffff0000000ULL + +/* register offsets are relative to CSR_REGISTER_BASE */ +#define CSR_STATE_CLEAR 0x0 +#define CSR_STATE_SET 0x4 +#define CSR_NODE_IDS 0x8 +#define CSR_RESET_START 0xc +#define CSR_SPLIT_TIMEOUT_HI 0x18 +#define CSR_SPLIT_TIMEOUT_LO 0x1c +#define CSR_CYCLE_TIME 0x200 +#define CSR_BUS_TIME 0x204 +#define CSR_BUSY_TIMEOUT 0x210 +#define CSR_PRIORITY_BUDGET 0x218 +#define CSR_BUS_MANAGER_ID 0x21c +#define CSR_BANDWIDTH_AVAILABLE 0x220 +#define CSR_CHANNELS_AVAILABLE 0x224 +#define CSR_CHANNELS_AVAILABLE_HI 0x224 +#define CSR_CHANNELS_AVAILABLE_LO 0x228 +#define CSR_MAINT_UTILITY 0x230 +#define CSR_BROADCAST_CHANNEL 0x234 +#define CSR_CONFIG_ROM 0x400 +#define CSR_CONFIG_ROM_END 0x800 +#define CSR_OMPR 0x900 +#define CSR_OPCR(i) (0x904 + (i) * 4) +#define CSR_IMPR 0x980 +#define CSR_IPCR(i) (0x984 + (i) * 4) +#define CSR_FCP_COMMAND 0xB00 +#define CSR_FCP_RESPONSE 0xD00 +#define CSR_FCP_END 0xF00 +#define CSR_TOPOLOGY_MAP 0x1000 +#define CSR_TOPOLOGY_MAP_END 0x1400 +#define CSR_SPEED_MAP 0x2000 +#define CSR_SPEED_MAP_END 0x3000 + +#define CSR_OFFSET 0x40 +#define CSR_LEAF 0x80 +#define CSR_DIRECTORY 0xc0 + +#define CSR_DESCRIPTOR 0x01 +#define CSR_VENDOR 0x03 +#define CSR_HARDWARE_VERSION 0x04 +#define CSR_UNIT 0x11 +#define CSR_SPECIFIER_ID 0x12 +#define CSR_VERSION 0x13 +#define CSR_DEPENDENT_INFO 0x14 +#define CSR_MODEL 0x17 +#define CSR_DIRECTORY_ID 0x20 + +struct fw_csr_iterator { + const u32 *p; + const u32 *end; +}; + +void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p); +int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value); +int fw_csr_string(const u32 *directory, int key, char *buf, size_t size); + +extern struct bus_type fw_bus_type; + +struct fw_card_driver; +struct fw_node; + +struct fw_card { + const struct fw_card_driver *driver; + struct device *device; + struct kref kref; + struct completion done; + + int node_id; + int generation; + int current_tlabel; + u64 tlabel_mask; + struct list_head transaction_list; + u64 reset_jiffies; + + u32 split_timeout_hi; + u32 split_timeout_lo; + unsigned int split_timeout_cycles; + unsigned int split_timeout_jiffies; + + unsigned long long guid; + unsigned max_receive; + int link_speed; + int config_rom_generation; + + spinlock_t lock; /* Take this lock when handling the lists in + * this struct. */ + struct fw_node *local_node; + struct fw_node *root_node; + struct fw_node *irm_node; + u8 color; /* must be u8 to match the definition in struct fw_node */ + int gap_count; + bool beta_repeaters_present; + + int index; + struct list_head link; + + struct list_head phy_receiver_list; + + struct delayed_work br_work; /* bus reset job */ + bool br_short; + + struct delayed_work bm_work; /* bus manager job */ + int bm_retries; + int bm_generation; + int bm_node_id; + bool bm_abdicate; + + bool priority_budget_implemented; /* controller feature */ + bool broadcast_channel_auto_allocated; /* controller feature */ + + bool broadcast_channel_allocated; + u32 broadcast_channel; + __be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4]; + + __be32 maint_utility_register; +}; + +static inline struct fw_card *fw_card_get(struct fw_card *card) +{ + kref_get(&card->kref); + + return card; +} + +void fw_card_release(struct kref *kref); + +static inline void fw_card_put(struct fw_card *card) +{ + kref_put(&card->kref, fw_card_release); +} + +struct fw_attribute_group { + struct attribute_group *groups[2]; + struct attribute_group group; + struct attribute *attrs[13]; +}; + +enum fw_device_state { + FW_DEVICE_INITIALIZING, + FW_DEVICE_RUNNING, + FW_DEVICE_GONE, + FW_DEVICE_SHUTDOWN, +}; + +/* + * Note, fw_device.generation always has to be read before fw_device.node_id. + * Use SMP memory barriers to ensure this. Otherwise requests will be sent + * to an outdated node_id if the generation was updated in the meantime due + * to a bus reset. + * + * Likewise, fw-core will take care to update .node_id before .generation so + * that whenever fw_device.generation is current WRT the actual bus generation, + * fw_device.node_id is guaranteed to be current too. + * + * The same applies to fw_device.card->node_id vs. fw_device.generation. + * + * fw_device.config_rom and fw_device.config_rom_length may be accessed during + * the lifetime of any fw_unit belonging to the fw_device, before device_del() + * was called on the last fw_unit. Alternatively, they may be accessed while + * holding fw_device_rwsem. + */ +struct fw_device { + atomic_t state; + struct fw_node *node; + int node_id; + int generation; + unsigned max_speed; + struct fw_card *card; + struct device device; + + struct mutex client_list_mutex; + struct list_head client_list; + + const u32 *config_rom; + size_t config_rom_length; + int config_rom_retries; + unsigned is_local:1; + unsigned max_rec:4; + unsigned cmc:1; + unsigned irmc:1; + unsigned bc_implemented:2; + + work_func_t workfn; + struct delayed_work work; + struct fw_attribute_group attribute_group; +}; + +static inline struct fw_device *fw_device(struct device *dev) +{ + return container_of(dev, struct fw_device, device); +} + +static inline int fw_device_is_shutdown(struct fw_device *device) +{ + return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN; +} + +int fw_device_enable_phys_dma(struct fw_device *device); + +/* + * fw_unit.directory must not be accessed after device_del(&fw_unit.device). + */ +struct fw_unit { + struct device device; + const u32 *directory; + struct fw_attribute_group attribute_group; +}; + +static inline struct fw_unit *fw_unit(struct device *dev) +{ + return container_of(dev, struct fw_unit, device); +} + +static inline struct fw_unit *fw_unit_get(struct fw_unit *unit) +{ + get_device(&unit->device); + + return unit; +} + +static inline void fw_unit_put(struct fw_unit *unit) +{ + put_device(&unit->device); +} + +static inline struct fw_device *fw_parent_device(struct fw_unit *unit) +{ + return fw_device(unit->device.parent); +} + +struct ieee1394_device_id; + +struct fw_driver { + struct device_driver driver; + int (*probe)(struct fw_unit *unit, const struct ieee1394_device_id *id); + /* Called when the parent device sits through a bus reset. */ + void (*update)(struct fw_unit *unit); + void (*remove)(struct fw_unit *unit); + const struct ieee1394_device_id *id_table; +}; + +struct fw_packet; +struct fw_request; + +typedef void (*fw_packet_callback_t)(struct fw_packet *packet, + struct fw_card *card, int status); +typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode, + void *data, size_t length, + void *callback_data); +/* + * This callback handles an inbound request subaction. It is called in + * RCU read-side context, therefore must not sleep. + * + * The callback should not initiate outbound request subactions directly. + * Otherwise there is a danger of recursion of inbound and outbound + * transactions from and to the local node. + * + * The callback is responsible that either fw_send_response() or kfree() + * is called on the @request, except for FCP registers for which the core + * takes care of that. + */ +typedef void (*fw_address_callback_t)(struct fw_card *card, + struct fw_request *request, + int tcode, int destination, int source, + int generation, + unsigned long long offset, + void *data, size_t length, + void *callback_data); + +struct fw_packet { + int speed; + int generation; + u32 header[4]; + size_t header_length; + void *payload; + size_t payload_length; + dma_addr_t payload_bus; + bool payload_mapped; + u32 timestamp; + + /* + * This callback is called when the packet transmission has completed. + * For successful transmission, the status code is the ack received + * from the destination. Otherwise it is one of the juju-specific + * rcodes: RCODE_SEND_ERROR, _CANCELLED, _BUSY, _GENERATION, _NO_ACK. + * The callback can be called from tasklet context and thus + * must never block. + */ + fw_packet_callback_t callback; + int ack; + struct list_head link; + void *driver_data; +}; + +struct fw_transaction { + int node_id; /* The generation is implied; it is always the current. */ + int tlabel; + struct list_head link; + struct fw_card *card; + bool is_split_transaction; + struct timer_list split_timeout_timer; + + struct fw_packet packet; + + /* + * The data passed to the callback is valid only during the + * callback. + */ + fw_transaction_callback_t callback; + void *callback_data; +}; + +struct fw_address_handler { + u64 offset; + u64 length; + fw_address_callback_t address_callback; + void *callback_data; + struct list_head link; +}; + +struct fw_address_region { + u64 start; + u64 end; +}; + +extern const struct fw_address_region fw_high_memory_region; + +int fw_core_add_address_handler(struct fw_address_handler *handler, + const struct fw_address_region *region); +void fw_core_remove_address_handler(struct fw_address_handler *handler); +void fw_send_response(struct fw_card *card, + struct fw_request *request, int rcode); +int fw_get_request_speed(struct fw_request *request); +void fw_send_request(struct fw_card *card, struct fw_transaction *t, + int tcode, int destination_id, int generation, int speed, + unsigned long long offset, void *payload, size_t length, + fw_transaction_callback_t callback, void *callback_data); +int fw_cancel_transaction(struct fw_card *card, + struct fw_transaction *transaction); +int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, + int generation, int speed, unsigned long long offset, + void *payload, size_t length); +const char *fw_rcode_string(int rcode); + +static inline int fw_stream_packet_destination_id(int tag, int channel, int sy) +{ + return tag << 14 | channel << 8 | sy; +} + +void fw_schedule_bus_reset(struct fw_card *card, bool delayed, + bool short_reset); + +struct fw_descriptor { + struct list_head link; + size_t length; + u32 immediate; + u32 key; + const u32 *data; +}; + +int fw_core_add_descriptor(struct fw_descriptor *desc); +void fw_core_remove_descriptor(struct fw_descriptor *desc); + +/* + * The iso packet format allows for an immediate header/payload part + * stored in 'header' immediately after the packet info plus an + * indirect payload part that is pointer to by the 'payload' field. + * Applications can use one or the other or both to implement simple + * low-bandwidth streaming (e.g. audio) or more advanced + * scatter-gather streaming (e.g. assembling video frame automatically). + */ +struct fw_iso_packet { + u16 payload_length; /* Length of indirect payload */ + u32 interrupt:1; /* Generate interrupt on this packet */ + u32 skip:1; /* tx: Set to not send packet at all */ + /* rx: Sync bit, wait for matching sy */ + u32 tag:2; /* tx: Tag in packet header */ + u32 sy:4; /* tx: Sy in packet header */ + u32 header_length:8; /* Length of immediate header */ + u32 header[0]; /* tx: Top of 1394 isoch. data_block */ +}; + +#define FW_ISO_CONTEXT_TRANSMIT 0 +#define FW_ISO_CONTEXT_RECEIVE 1 +#define FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL 2 + +#define FW_ISO_CONTEXT_MATCH_TAG0 1 +#define FW_ISO_CONTEXT_MATCH_TAG1 2 +#define FW_ISO_CONTEXT_MATCH_TAG2 4 +#define FW_ISO_CONTEXT_MATCH_TAG3 8 +#define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15 + +/* + * An iso buffer is just a set of pages mapped for DMA in the + * specified direction. Since the pages are to be used for DMA, they + * are not mapped into the kernel virtual address space. We store the + * DMA address in the page private. The helper function + * fw_iso_buffer_map() will map the pages into a given vma. + */ +struct fw_iso_buffer { + enum dma_data_direction direction; + struct page **pages; + int page_count; + int page_count_mapped; +}; + +int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, + int page_count, enum dma_data_direction direction); +void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); +size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed); + +struct fw_iso_context; +typedef void (*fw_iso_callback_t)(struct fw_iso_context *context, + u32 cycle, size_t header_length, + void *header, void *data); +typedef void (*fw_iso_mc_callback_t)(struct fw_iso_context *context, + dma_addr_t completed, void *data); +struct fw_iso_context { + struct fw_card *card; + int type; + int channel; + int speed; + bool drop_overflow_headers; + size_t header_size; + union { + fw_iso_callback_t sc; + fw_iso_mc_callback_t mc; + } callback; + void *callback_data; +}; + +struct fw_iso_context *fw_iso_context_create(struct fw_card *card, + int type, int channel, int speed, size_t header_size, + fw_iso_callback_t callback, void *callback_data); +int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels); +int fw_iso_context_queue(struct fw_iso_context *ctx, + struct fw_iso_packet *packet, + struct fw_iso_buffer *buffer, + unsigned long payload); +void fw_iso_context_queue_flush(struct fw_iso_context *ctx); +int fw_iso_context_flush_completions(struct fw_iso_context *ctx); +int fw_iso_context_start(struct fw_iso_context *ctx, + int cycle, int sync, int tags); +int fw_iso_context_stop(struct fw_iso_context *ctx); +void fw_iso_context_destroy(struct fw_iso_context *ctx); +void fw_iso_resource_manage(struct fw_card *card, int generation, + u64 channels_mask, int *channel, int *bandwidth, + bool allocate); + +extern struct workqueue_struct *fw_workqueue; + +#endif /* _LINUX_FIREWIRE_H */ diff --git a/include/linux/firmware-map.h b/include/linux/firmware-map.h new file mode 100644 index 000000000..71d4fa721 --- /dev/null +++ b/include/linux/firmware-map.h @@ -0,0 +1,49 @@ +/* + * include/linux/firmware-map.h: + * Copyright (C) 2008 SUSE LINUX Products GmbH + * by Bernhard Walle + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License v2.0 as published by + * the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef _LINUX_FIRMWARE_MAP_H +#define _LINUX_FIRMWARE_MAP_H + +#include + +/* + * provide a dummy interface if CONFIG_FIRMWARE_MEMMAP is disabled + */ +#ifdef CONFIG_FIRMWARE_MEMMAP + +int firmware_map_add_early(u64 start, u64 end, const char *type); +int firmware_map_add_hotplug(u64 start, u64 end, const char *type); +int firmware_map_remove(u64 start, u64 end, const char *type); + +#else /* CONFIG_FIRMWARE_MEMMAP */ + +static inline int firmware_map_add_early(u64 start, u64 end, const char *type) +{ + return 0; +} + +static inline int firmware_map_add_hotplug(u64 start, u64 end, const char *type) +{ + return 0; +} + +static inline int firmware_map_remove(u64 start, u64 end, const char *type) +{ + return 0; +} + +#endif /* CONFIG_FIRMWARE_MEMMAP */ + +#endif /* _LINUX_FIRMWARE_MAP_H */ diff --git a/include/linux/firmware.h b/include/linux/firmware.h new file mode 100644 index 000000000..2dd566c91 --- /dev/null +++ b/include/linux/firmware.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_FIRMWARE_H +#define _LINUX_FIRMWARE_H + +#include +#include +#include + +#define FW_ACTION_NOHOTPLUG 0 +#define FW_ACTION_HOTPLUG 1 + +struct firmware { + size_t size; + const u8 *data; + struct page **pages; + + /* firmware loader private fields */ + void *priv; +}; + +struct module; +struct device; + +struct builtin_fw { + char *name; + void *data; + unsigned long size; +}; + +/* We have to play tricks here much like stringify() to get the + __COUNTER__ macro to be expanded as we want it */ +#define __fw_concat1(x, y) x##y +#define __fw_concat(x, y) __fw_concat1(x, y) + +#define DECLARE_BUILTIN_FIRMWARE(name, blob) \ + DECLARE_BUILTIN_FIRMWARE_SIZE(name, &(blob), sizeof(blob)) + +#define DECLARE_BUILTIN_FIRMWARE_SIZE(name, blob, size) \ + static const struct builtin_fw __fw_concat(__builtin_fw,__COUNTER__) \ + __used __section(.builtin_fw) = { name, blob, size } + +#if defined(CONFIG_FW_LOADER) || (defined(CONFIG_FW_LOADER_MODULE) && defined(MODULE)) +int request_firmware(const struct firmware **fw, const char *name, + struct device *device); +int firmware_request_nowarn(const struct firmware **fw, const char *name, + struct device *device); +int request_firmware_nowait( + struct module *module, bool uevent, + const char *name, struct device *device, gfp_t gfp, void *context, + void (*cont)(const struct firmware *fw, void *context)); +int request_firmware_direct(const struct firmware **fw, const char *name, + struct device *device); +int request_firmware_into_buf(const struct firmware **firmware_p, + const char *name, struct device *device, void *buf, size_t size); + +void release_firmware(const struct firmware *fw); +#else +static inline int request_firmware(const struct firmware **fw, + const char *name, + struct device *device) +{ + return -EINVAL; +} + +static inline int firmware_request_nowarn(const struct firmware **fw, + const char *name, + struct device *device) +{ + return -EINVAL; +} + +static inline int request_firmware_nowait( + struct module *module, bool uevent, + const char *name, struct device *device, gfp_t gfp, void *context, + void (*cont)(const struct firmware *fw, void *context)) +{ + return -EINVAL; +} + +static inline void release_firmware(const struct firmware *fw) +{ +} + +static inline int request_firmware_direct(const struct firmware **fw, + const char *name, + struct device *device) +{ + return -EINVAL; +} + +static inline int request_firmware_into_buf(const struct firmware **firmware_p, + const char *name, struct device *device, void *buf, size_t size) +{ + return -EINVAL; +} + +#endif + +int firmware_request_cache(struct device *device, const char *name); + +#endif diff --git a/include/linux/firmware/meson/meson_sm.h b/include/linux/firmware/meson/meson_sm.h new file mode 100644 index 000000000..37a5eaea6 --- /dev/null +++ b/include/linux/firmware/meson/meson_sm.h @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2016 Endless Mobile, Inc. + * Author: Carlo Caione + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef _MESON_SM_FW_H_ +#define _MESON_SM_FW_H_ + +enum { + SM_EFUSE_READ, + SM_EFUSE_WRITE, + SM_EFUSE_USER_MAX, +}; + +struct meson_sm_firmware; + +int meson_sm_call(unsigned int cmd_index, u32 *ret, u32 arg0, u32 arg1, + u32 arg2, u32 arg3, u32 arg4); +int meson_sm_call_write(void *buffer, unsigned int b_size, unsigned int cmd_index, + u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4); +int meson_sm_call_read(void *buffer, unsigned int bsize, unsigned int cmd_index, + u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4); + +#endif /* _MESON_SM_FW_H_ */ diff --git a/include/linux/fixp-arith.h b/include/linux/fixp-arith.h new file mode 100644 index 000000000..d4686fe1c --- /dev/null +++ b/include/linux/fixp-arith.h @@ -0,0 +1,156 @@ +#ifndef _FIXP_ARITH_H +#define _FIXP_ARITH_H + +#include + +/* + * Simplistic fixed-point arithmetics. + * Hmm, I'm probably duplicating some code :( + * + * Copyright (c) 2002 Johann Deneux + */ + +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Should you need to contact me, the author, you can do so by + * e-mail - mail your message to + */ + +#include + +static const s32 sin_table[] = { + 0x00000000, 0x023be165, 0x04779632, 0x06b2f1d2, 0x08edc7b6, 0x0b27eb5c, + 0x0d61304d, 0x0f996a26, 0x11d06c96, 0x14060b67, 0x163a1a7d, 0x186c6ddd, + 0x1a9cd9ac, 0x1ccb3236, 0x1ef74bf2, 0x2120fb82, 0x234815ba, 0x256c6f9e, + 0x278dde6e, 0x29ac379f, 0x2bc750e8, 0x2ddf003f, 0x2ff31bdd, 0x32037a44, + 0x340ff241, 0x36185aee, 0x381c8bb5, 0x3a1c5c56, 0x3c17a4e7, 0x3e0e3ddb, + 0x3fffffff, 0x41ecc483, 0x43d464fa, 0x45b6bb5d, 0x4793a20f, 0x496af3e1, + 0x4b3c8c11, 0x4d084650, 0x4ecdfec6, 0x508d9210, 0x5246dd48, 0x53f9be04, + 0x55a6125a, 0x574bb8e5, 0x58ea90c2, 0x5a827999, 0x5c135399, 0x5d9cff82, + 0x5f1f5ea0, 0x609a52d1, 0x620dbe8a, 0x637984d3, 0x64dd894f, 0x6639b039, + 0x678dde6d, 0x68d9f963, 0x6a1de735, 0x6b598ea1, 0x6c8cd70a, 0x6db7a879, + 0x6ed9eba0, 0x6ff389de, 0x71046d3c, 0x720c8074, 0x730baeec, 0x7401e4bf, + 0x74ef0ebb, 0x75d31a5f, 0x76adf5e5, 0x777f903b, 0x7847d908, 0x7906c0af, + 0x79bc384c, 0x7a6831b8, 0x7b0a9f8c, 0x7ba3751c, 0x7c32a67c, 0x7cb82884, + 0x7d33f0c8, 0x7da5f5a3, 0x7e0e2e31, 0x7e6c924f, 0x7ec11aa3, 0x7f0bc095, + 0x7f4c7e52, 0x7f834ecf, 0x7fb02dc4, 0x7fd317b3, 0x7fec09e1, 0x7ffb025e, + 0x7fffffff +}; + +/** + * __fixp_sin32() returns the sin of an angle in degrees + * + * @degrees: angle, in degrees, from 0 to 360. + * + * The returned value ranges from -0x7fffffff to +0x7fffffff. + */ +static inline s32 __fixp_sin32(int degrees) +{ + s32 ret; + bool negative = false; + + if (degrees > 180) { + negative = true; + degrees -= 180; + } + if (degrees > 90) + degrees = 180 - degrees; + + ret = sin_table[degrees]; + + return negative ? -ret : ret; +} + +/** + * fixp_sin32() returns the sin of an angle in degrees + * + * @degrees: angle, in degrees. The angle can be positive or negative + * + * The returned value ranges from -0x7fffffff to +0x7fffffff. + */ +static inline s32 fixp_sin32(int degrees) +{ + degrees = (degrees % 360 + 360) % 360; + + return __fixp_sin32(degrees); +} + +/* cos(x) = sin(x + 90 degrees) */ +#define fixp_cos32(v) fixp_sin32((v) + 90) + +/* + * 16 bits variants + * + * The returned value ranges from -0x7fff to 0x7fff + */ + +#define fixp_sin16(v) (fixp_sin32(v) >> 16) +#define fixp_cos16(v) (fixp_cos32(v) >> 16) + +/** + * fixp_sin32_rad() - calculates the sin of an angle in radians + * + * @radians: angle, in radians + * @twopi: value to be used for 2*pi + * + * Provides a variant for the cases where just 360 + * values is not enough. This function uses linear + * interpolation to a wider range of values given by + * twopi var. + * + * Experimental tests gave a maximum difference of + * 0.000038 between the value calculated by sin() and + * the one produced by this function, when twopi is + * equal to 360000. That seems to be enough precision + * for practical purposes. + * + * Please notice that two high numbers for twopi could cause + * overflows, so the routine will not allow values of twopi + * bigger than 1^18. + */ +static inline s32 fixp_sin32_rad(u32 radians, u32 twopi) +{ + int degrees; + s32 v1, v2, dx, dy; + s64 tmp; + + /* + * Avoid too large values for twopi, as we don't want overflows. + */ + BUG_ON(twopi > 1 << 18); + + degrees = (radians * 360) / twopi; + tmp = radians - (degrees * twopi) / 360; + + degrees = (degrees % 360 + 360) % 360; + v1 = __fixp_sin32(degrees); + + v2 = fixp_sin32(degrees + 1); + + dx = twopi / 360; + dy = v2 - v1; + + tmp *= dy; + + return v1 + div_s64(tmp, dx); +} + +/* cos(x) = sin(x + pi/2 radians) */ + +#define fixp_cos32_rad(rad, twopi) \ + fixp_sin32_rad(rad + twopi / 4, twopi) + +#endif diff --git a/include/linux/flat.h b/include/linux/flat.h new file mode 100644 index 000000000..569b67d64 --- /dev/null +++ b/include/linux/flat.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2002-2003 David McCullough + * Copyright (C) 1998 Kenneth Albanowski + * The Silver Hammer Group, Ltd. + * + * This file provides the definitions and structures needed to + * support uClinux flat-format executables. + */ +#ifndef _LINUX_FLAT_H +#define _LINUX_FLAT_H + +#include +#include + +/* + * While it would be nice to keep this header clean, users of older + * tools still need this support in the kernel. So this section is + * purely for compatibility with old tool chains. + * + * DO NOT make changes or enhancements to the old format please, just work + * with the format above, except to fix bugs with old format support. + */ + +#include + +#define OLD_FLAT_VERSION 0x00000002L +#define OLD_FLAT_RELOC_TYPE_TEXT 0 +#define OLD_FLAT_RELOC_TYPE_DATA 1 +#define OLD_FLAT_RELOC_TYPE_BSS 2 + +typedef union { + unsigned long value; + struct { +# if defined(mc68000) && !defined(CONFIG_COLDFIRE) + signed long offset : 30; + unsigned long type : 2; +# define OLD_FLAT_FLAG_RAM 0x1 /* load program entirely into RAM */ +# elif defined(__BIG_ENDIAN_BITFIELD) + unsigned long type : 2; + signed long offset : 30; +# define OLD_FLAT_FLAG_RAM 0x1 /* load program entirely into RAM */ +# elif defined(__LITTLE_ENDIAN_BITFIELD) + signed long offset : 30; + unsigned long type : 2; +# define OLD_FLAT_FLAG_RAM 0x1 /* load program entirely into RAM */ +# else +# error "Unknown bitfield order for flat files." +# endif + } reloc; +} flat_v2_reloc_t; + +#endif /* _LINUX_FLAT_H */ diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h new file mode 100644 index 000000000..b94fa61b5 --- /dev/null +++ b/include/linux/flex_array.h @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FLEX_ARRAY_H +#define _FLEX_ARRAY_H + +#include +#include +#include + +#define FLEX_ARRAY_PART_SIZE PAGE_SIZE +#define FLEX_ARRAY_BASE_SIZE PAGE_SIZE + +struct flex_array_part; + +/* + * This is meant to replace cases where an array-like + * structure has gotten too big to fit into kmalloc() + * and the developer is getting tempted to use + * vmalloc(). + */ + +struct flex_array { + union { + struct { + int element_size; + int total_nr_elements; + int elems_per_part; + struct reciprocal_value reciprocal_elems; + struct flex_array_part *parts[]; + }; + /* + * This little trick makes sure that + * sizeof(flex_array) == PAGE_SIZE + */ + char padding[FLEX_ARRAY_BASE_SIZE]; + }; +}; + +/* Number of bytes left in base struct flex_array, excluding metadata */ +#define FLEX_ARRAY_BASE_BYTES_LEFT \ + (FLEX_ARRAY_BASE_SIZE - offsetof(struct flex_array, parts)) + +/* Number of pointers in base to struct flex_array_part pages */ +#define FLEX_ARRAY_NR_BASE_PTRS \ + (FLEX_ARRAY_BASE_BYTES_LEFT / sizeof(struct flex_array_part *)) + +/* Number of elements of size that fit in struct flex_array_part */ +#define FLEX_ARRAY_ELEMENTS_PER_PART(size) \ + (FLEX_ARRAY_PART_SIZE / size) + +/* + * Defines a statically allocated flex array and ensures its parameters are + * valid. + */ +#define DEFINE_FLEX_ARRAY(__arrayname, __element_size, __total) \ + struct flex_array __arrayname = { { { \ + .element_size = (__element_size), \ + .total_nr_elements = (__total), \ + } } }; \ + static inline void __arrayname##_invalid_parameter(void) \ + { \ + BUILD_BUG_ON((__total) > FLEX_ARRAY_NR_BASE_PTRS * \ + FLEX_ARRAY_ELEMENTS_PER_PART(__element_size)); \ + } + +/** + * flex_array_alloc() - Creates a flexible array. + * @element_size: individual object size. + * @total: maximum number of objects which can be stored. + * @flags: GFP flags + * + * Return: Returns an object of structure flex_array. + */ +struct flex_array *flex_array_alloc(int element_size, unsigned int total, + gfp_t flags); + +/** + * flex_array_prealloc() - Ensures that memory for the elements indexed in the + * range defined by start and nr_elements has been allocated. + * @fa: array to allocate memory to. + * @start: start address + * @nr_elements: number of elements to be allocated. + * @flags: GFP flags + * + */ +int flex_array_prealloc(struct flex_array *fa, unsigned int start, + unsigned int nr_elements, gfp_t flags); + +/** + * flex_array_free() - Removes all elements of a flexible array. + * @fa: array to be freed. + */ +void flex_array_free(struct flex_array *fa); + +/** + * flex_array_free_parts() - Removes all elements of a flexible array, but + * leaves the array itself in place. + * @fa: array to be emptied. + */ +void flex_array_free_parts(struct flex_array *fa); + +/** + * flex_array_put() - Stores data into a flexible array. + * @fa: array where element is to be stored. + * @element_nr: position to copy, must be less than the maximum specified when + * the array was created. + * @src: data source to be copied into the array. + * @flags: GFP flags + * + * Return: Returns zero on success, a negative error code otherwise. + */ +int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, + gfp_t flags); + +/** + * flex_array_clear() - Clears an individual element in the array, sets the + * given element to FLEX_ARRAY_FREE. + * @element_nr: element position to clear. + * @fa: array to which element to be cleared belongs. + * + * Return: Returns zero on success, -EINVAL otherwise. + */ +int flex_array_clear(struct flex_array *fa, unsigned int element_nr); + +/** + * flex_array_get() - Retrieves data into a flexible array. + * + * @element_nr: Element position to retrieve data from. + * @fa: array from which data is to be retrieved. + * + * Return: Returns a pointer to the data element, or NULL if that + * particular element has never been allocated. + */ +void *flex_array_get(struct flex_array *fa, unsigned int element_nr); + +/** + * flex_array_shrink() - Reduces the allocated size of an array. + * @fa: array to shrink. + * + * Return: Returns number of pages of memory actually freed. + * + */ +int flex_array_shrink(struct flex_array *fa); + +#define flex_array_put_ptr(fa, nr, src, gfp) \ + flex_array_put(fa, nr, (void *)&(src), gfp) + +void *flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr); + +#endif /* _FLEX_ARRAY_H */ diff --git a/include/linux/flex_proportions.h b/include/linux/flex_proportions.h new file mode 100644 index 000000000..c12df59d3 --- /dev/null +++ b/include/linux/flex_proportions.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Floating proportions with flexible aging period + * + * Copyright (C) 2011, SUSE, Jan Kara + */ + +#ifndef _LINUX_FLEX_PROPORTIONS_H +#define _LINUX_FLEX_PROPORTIONS_H + +#include +#include +#include +#include + +/* + * When maximum proportion of some event type is specified, this is the + * precision with which we allow limitting. Note that this creates an upper + * bound on the number of events per period like + * ULLONG_MAX >> FPROP_FRAC_SHIFT. + */ +#define FPROP_FRAC_SHIFT 10 +#define FPROP_FRAC_BASE (1UL << FPROP_FRAC_SHIFT) + +/* + * ---- Global proportion definitions ---- + */ +struct fprop_global { + /* Number of events in the current period */ + struct percpu_counter events; + /* Current period */ + unsigned int period; + /* Synchronization with period transitions */ + seqcount_t sequence; +}; + +int fprop_global_init(struct fprop_global *p, gfp_t gfp); +void fprop_global_destroy(struct fprop_global *p); +bool fprop_new_period(struct fprop_global *p, int periods); + +/* + * ---- SINGLE ---- + */ +struct fprop_local_single { + /* the local events counter */ + unsigned long events; + /* Period in which we last updated events */ + unsigned int period; + raw_spinlock_t lock; /* Protect period and numerator */ +}; + +#define INIT_FPROP_LOCAL_SINGLE(name) \ +{ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ +} + +int fprop_local_init_single(struct fprop_local_single *pl); +void fprop_local_destroy_single(struct fprop_local_single *pl); +void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl); +void fprop_fraction_single(struct fprop_global *p, + struct fprop_local_single *pl, unsigned long *numerator, + unsigned long *denominator); + +static inline +void fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl) +{ + unsigned long flags; + + local_irq_save(flags); + __fprop_inc_single(p, pl); + local_irq_restore(flags); +} + +/* + * ---- PERCPU ---- + */ +struct fprop_local_percpu { + /* the local events counter */ + struct percpu_counter events; + /* Period in which we last updated events */ + unsigned int period; + raw_spinlock_t lock; /* Protect period and numerator */ +}; + +int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp); +void fprop_local_destroy_percpu(struct fprop_local_percpu *pl); +void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl); +void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl, + int max_frac); +void fprop_fraction_percpu(struct fprop_global *p, + struct fprop_local_percpu *pl, unsigned long *numerator, + unsigned long *denominator); + +static inline +void fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl) +{ + unsigned long flags; + + local_irq_save(flags); + __fprop_inc_percpu(p, pl); + local_irq_restore(flags); +} + +#endif diff --git a/include/linux/fmc-sdb.h b/include/linux/fmc-sdb.h new file mode 100644 index 000000000..bec899f08 --- /dev/null +++ b/include/linux/fmc-sdb.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file is separate from sdb.h, because I want that one to remain + * unchanged (as far as possible) from the official sdb distribution + * + * This file and associated functionality are a playground for me to + * understand stuff which will later be implemented in more generic places. + */ +#include + +/* This is the union of all currently defined types */ +union sdb_record { + struct sdb_interconnect ic; + struct sdb_device dev; + struct sdb_bridge bridge; + struct sdb_integration integr; + struct sdb_empty empty; + struct sdb_synthesis synthesis; + struct sdb_repo_url repo_url; +}; + +struct fmc_device; + +/* Every sdb table is turned into this structure */ +struct sdb_array { + int len; + int level; + unsigned long baseaddr; + struct fmc_device *fmc; /* the device that hosts it */ + struct sdb_array *parent; /* NULL at root */ + union sdb_record *record; /* copies of the struct */ + struct sdb_array **subtree; /* only valid for bridge items */ +}; + +extern int fmc_scan_sdb_tree(struct fmc_device *fmc, unsigned long address); +extern void fmc_show_sdb_tree(const struct fmc_device *fmc); +extern signed long fmc_find_sdb_device(struct sdb_array *tree, uint64_t vendor, + uint32_t device, unsigned long *sz); +extern int fmc_free_sdb_tree(struct fmc_device *fmc); diff --git a/include/linux/fmc.h b/include/linux/fmc.h new file mode 100644 index 000000000..3dc8a1b2d --- /dev/null +++ b/include/linux/fmc.h @@ -0,0 +1,270 @@ +/* + * Copyright (C) 2012 CERN (www.cern.ch) + * Author: Alessandro Rubini + * + * Released according to the GNU GPL, version 2 or any later version. + * + * This work is part of the White Rabbit project, a research effort led + * by CERN, the European Institute for Nuclear Research. + */ +#ifndef __LINUX_FMC_H__ +#define __LINUX_FMC_H__ +#include +#include +#include +#include +#include +#include + +struct fmc_device; +struct fmc_driver; + +/* + * This bus abstraction is developed separately from drivers, so we need + * to check the version of the data structures we receive. + */ + +#define FMC_MAJOR 3 +#define FMC_MINOR 0 +#define FMC_VERSION ((FMC_MAJOR << 16) | FMC_MINOR) +#define __FMC_MAJOR(x) ((x) >> 16) +#define __FMC_MINOR(x) ((x) & 0xffff) + +/* + * The device identification, as defined by the IPMI FRU (Field Replaceable + * Unit) includes four different strings to describe the device. Here we + * only match the "Board Manufacturer" and the "Board Product Name", + * ignoring the "Board Serial Number" and "Board Part Number". All 4 are + * expected to be strings, so they are treated as zero-terminated C strings. + * Unspecified string (NULL) means "any", so if both are unspecified this + * is a catch-all driver. So null entries are allowed and we use array + * and length. This is unlike pci and usb that use null-terminated arrays + */ +struct fmc_fru_id { + char *manufacturer; + char *product_name; +}; + +/* + * If the FPGA is already programmed (think Etherbone or the second + * SVEC slot), we can match on SDB devices in the memory image. This + * match uses an array of devices that must all be present, and the + * match is based on vendor and device only. Further checks are expected + * to happen in the probe function. Zero means "any" and catch-all is allowed. + */ +struct fmc_sdb_one_id { + uint64_t vendor; + uint32_t device; +}; +struct fmc_sdb_id { + struct fmc_sdb_one_id *cores; + int cores_nr; +}; + +struct fmc_device_id { + struct fmc_fru_id *fru_id; + int fru_id_nr; + struct fmc_sdb_id *sdb_id; + int sdb_id_nr; +}; + +/* This sizes the module_param_array used by generic module parameters */ +#define FMC_MAX_CARDS 32 + +/* The driver is a pretty simple thing */ +struct fmc_driver { + unsigned long version; + struct device_driver driver; + int (*probe)(struct fmc_device *); + int (*remove)(struct fmc_device *); + const struct fmc_device_id id_table; + /* What follows is for generic module parameters */ + int busid_n; + int busid_val[FMC_MAX_CARDS]; + int gw_n; + char *gw_val[FMC_MAX_CARDS]; +}; +#define to_fmc_driver(x) container_of((x), struct fmc_driver, driver) + +/* These are the generic parameters, that drivers may instantiate */ +#define FMC_PARAM_BUSID(_d) \ + module_param_array_named(busid, _d.busid_val, int, &_d.busid_n, 0444) +#define FMC_PARAM_GATEWARE(_d) \ + module_param_array_named(gateware, _d.gw_val, charp, &_d.gw_n, 0444) + +/* + * Drivers may need to configure gpio pins in the carrier. To read input + * (a very uncommon operation, and definitely not in the hot paths), just + * configure one gpio only and get 0 or 1 as retval of the config method + */ +struct fmc_gpio { + char *carrier_name; /* name or NULL for virtual pins */ + int gpio; + int _gpio; /* internal use by the carrier */ + int mode; /* GPIOF_DIR_OUT etc, from */ + int irqmode; /* IRQF_TRIGGER_LOW and so on */ +}; + +/* The numbering of gpio pins allows access to raw pins or virtual roles */ +#define FMC_GPIO_RAW(x) (x) /* 4096 of them */ +#define __FMC_GPIO_IS_RAW(x) ((x) < 0x1000) +#define FMC_GPIO_IRQ(x) ((x) + 0x1000) /* 256 of them */ +#define FMC_GPIO_LED(x) ((x) + 0x1100) /* 256 of them */ +#define FMC_GPIO_KEY(x) ((x) + 0x1200) /* 256 of them */ +#define FMC_GPIO_TP(x) ((x) + 0x1300) /* 256 of them */ +#define FMC_GPIO_USER(x) ((x) + 0x1400) /* 256 of them */ +/* We may add SCL and SDA, or other roles if the need arises */ + +/* GPIOF_DIR_IN etc are missing before 3.0. copy from */ +#ifndef GPIOF_DIR_IN +# define GPIOF_DIR_OUT (0 << 0) +# define GPIOF_DIR_IN (1 << 0) +# define GPIOF_INIT_LOW (0 << 1) +# define GPIOF_INIT_HIGH (1 << 1) +#endif + +/* + * The operations are offered by each carrier and should make driver + * design completely independent of the carrier. Named GPIO pins may be + * the exception. + */ +struct fmc_operations { + uint32_t (*read32)(struct fmc_device *fmc, int offset); + void (*write32)(struct fmc_device *fmc, uint32_t value, int offset); + int (*validate)(struct fmc_device *fmc, struct fmc_driver *drv); + int (*reprogram_raw)(struct fmc_device *f, struct fmc_driver *d, + void *gw, unsigned long len); + int (*reprogram)(struct fmc_device *f, struct fmc_driver *d, char *gw); + int (*irq_request)(struct fmc_device *fmc, irq_handler_t h, + char *name, int flags); + void (*irq_ack)(struct fmc_device *fmc); + int (*irq_free)(struct fmc_device *fmc); + int (*gpio_config)(struct fmc_device *fmc, struct fmc_gpio *gpio, + int ngpio); + int (*read_ee)(struct fmc_device *fmc, int pos, void *d, int l); + int (*write_ee)(struct fmc_device *fmc, int pos, const void *d, int l); +}; + +/* Prefer this helper rather than calling of fmc->reprogram directly */ +int fmc_reprogram_raw(struct fmc_device *fmc, struct fmc_driver *d, + void *gw, unsigned long len, int sdb_entry); +extern int fmc_reprogram(struct fmc_device *f, struct fmc_driver *d, char *gw, + int sdb_entry); + +/* + * The device reports all information needed to access hw. + * + * If we have eeprom_len and not contents, the core reads it. + * Then, parsing of identifiers is done by the core which fills fmc_fru_id.. + * Similarly a device that must be matched based on SDB cores must + * fill the entry point and the core will scan the bus (FIXME: sdb match) + */ +struct fmc_device { + unsigned long version; + unsigned long flags; + struct module *owner; /* char device must pin it */ + struct fmc_fru_id id; /* for EEPROM-based match */ + struct fmc_operations *op; /* carrier-provided */ + int irq; /* according to host bus. 0 == none */ + int eeprom_len; /* Usually 8kB, may be less */ + int eeprom_addr; /* 0x50, 0x52 etc */ + uint8_t *eeprom; /* Full contents or leading part */ + char *carrier_name; /* "SPEC" or similar, for special use */ + void *carrier_data; /* "struct spec *" or equivalent */ + __iomem void *fpga_base; /* May be NULL (Etherbone) */ + __iomem void *slot_base; /* Set by the driver */ + struct fmc_device **devarray; /* Allocated by the bus */ + int slot_id; /* Index in the slot array */ + int nr_slots; /* Number of slots in this carrier */ + unsigned long memlen; /* Used for the char device */ + struct device dev; /* For Linux use */ + struct device *hwdev; /* The underlying hardware device */ + unsigned long sdbfs_entry; + struct sdb_array *sdb; + uint32_t device_id; /* Filled by the device */ + char *mezzanine_name; /* Defaults to ``fmc'' */ + void *mezzanine_data; + + struct dentry *dbg_dir; + struct dentry *dbg_sdb_dump; +}; +#define to_fmc_device(x) container_of((x), struct fmc_device, dev) + +#define FMC_DEVICE_HAS_GOLDEN 1 +#define FMC_DEVICE_HAS_CUSTOM 2 +#define FMC_DEVICE_NO_MEZZANINE 4 +#define FMC_DEVICE_MATCH_SDB 8 /* fmc-core must scan sdb in fpga */ + +/* + * If fpga_base can be used, the carrier offers no readl/writel methods, and + * this expands to a single, fast, I/O access. + */ +static inline uint32_t fmc_readl(struct fmc_device *fmc, int offset) +{ + if (unlikely(fmc->op->read32)) + return fmc->op->read32(fmc, offset); + return readl(fmc->fpga_base + offset); +} +static inline void fmc_writel(struct fmc_device *fmc, uint32_t val, int off) +{ + if (unlikely(fmc->op->write32)) + fmc->op->write32(fmc, val, off); + else + writel(val, fmc->fpga_base + off); +} + +/* pci-like naming */ +static inline void *fmc_get_drvdata(const struct fmc_device *fmc) +{ + return dev_get_drvdata(&fmc->dev); +} + +static inline void fmc_set_drvdata(struct fmc_device *fmc, void *data) +{ + dev_set_drvdata(&fmc->dev, data); +} + +struct fmc_gateware { + void *bitstream; + unsigned long len; +}; + +/* The 5 access points */ +extern int fmc_driver_register(struct fmc_driver *drv); +extern void fmc_driver_unregister(struct fmc_driver *drv); +extern int fmc_device_register(struct fmc_device *tdev); +extern int fmc_device_register_gw(struct fmc_device *tdev, + struct fmc_gateware *gw); +extern void fmc_device_unregister(struct fmc_device *tdev); + +/* Three more for device sets, all driven by the same FPGA */ +extern int fmc_device_register_n(struct fmc_device **devs, int n); +extern int fmc_device_register_n_gw(struct fmc_device **devs, int n, + struct fmc_gateware *gw); +extern void fmc_device_unregister_n(struct fmc_device **devs, int n); + +/* Internal cross-calls between files; not exported to other modules */ +extern int fmc_match(struct device *dev, struct device_driver *drv); +extern int fmc_fill_id_info(struct fmc_device *fmc); +extern void fmc_free_id_info(struct fmc_device *fmc); +extern void fmc_dump_eeprom(const struct fmc_device *fmc); + +/* helpers for FMC operations */ +extern int fmc_irq_request(struct fmc_device *fmc, irq_handler_t h, + char *name, int flags); +extern void fmc_irq_free(struct fmc_device *fmc); +extern void fmc_irq_ack(struct fmc_device *fmc); +extern int fmc_validate(struct fmc_device *fmc, struct fmc_driver *drv); +extern int fmc_gpio_config(struct fmc_device *fmc, struct fmc_gpio *gpio, + int ngpio); +extern int fmc_read_ee(struct fmc_device *fmc, int pos, void *d, int l); +extern int fmc_write_ee(struct fmc_device *fmc, int pos, const void *d, int l); + +/* helpers for FMC operations */ +extern int fmc_irq_request(struct fmc_device *fmc, irq_handler_t h, + char *name, int flags); +extern void fmc_irq_free(struct fmc_device *fmc); +extern void fmc_irq_ack(struct fmc_device *fmc); +extern int fmc_validate(struct fmc_device *fmc, struct fmc_driver *drv); + +#endif /* __LINUX_FMC_H__ */ diff --git a/include/linux/font.h b/include/linux/font.h new file mode 100644 index 000000000..f85e70bd4 --- /dev/null +++ b/include/linux/font.h @@ -0,0 +1,73 @@ +/* + * font.h -- `Soft' font definitions + * + * Created 1995 by Geert Uytterhoeven + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ + +#ifndef _VIDEO_FONT_H +#define _VIDEO_FONT_H + +#include + +struct font_desc { + int idx; + const char *name; + int width, height; + const void *data; + int pref; +}; + +#define VGA8x8_IDX 0 +#define VGA8x16_IDX 1 +#define PEARL8x8_IDX 2 +#define VGA6x11_IDX 3 +#define FONT7x14_IDX 4 +#define FONT10x18_IDX 5 +#define SUN8x16_IDX 6 +#define SUN12x22_IDX 7 +#define ACORN8x8_IDX 8 +#define MINI4x6_IDX 9 +#define FONT6x10_IDX 10 + +extern const struct font_desc font_vga_8x8, + font_vga_8x16, + font_pearl_8x8, + font_vga_6x11, + font_7x14, + font_10x18, + font_sun_8x16, + font_sun_12x22, + font_acorn_8x8, + font_mini_4x6, + font_6x10; + +/* Find a font with a specific name */ + +extern const struct font_desc *find_font(const char *name); + +/* Get the default font for a specific screen size */ + +extern const struct font_desc *get_default_font(int xres, int yres, + u32 font_w, u32 font_h); + +/* Max. length for the name of a predefined font */ +#define MAX_FONT_NAME 32 + +/* Extra word getters */ +#define REFCOUNT(fd) (((int *)(fd))[-1]) +#define FNTSIZE(fd) (((int *)(fd))[-2]) +#define FNTCHARCNT(fd) (((int *)(fd))[-3]) +#define FNTSUM(fd) (((int *)(fd))[-4]) + +#define FONT_EXTRA_WORDS 4 + +struct font_data { + unsigned int extra[FONT_EXTRA_WORDS]; + const unsigned char data[]; +} __packed; + +#endif /* _VIDEO_FONT_H */ diff --git a/include/linux/fpga/altera-pr-ip-core.h b/include/linux/fpga/altera-pr-ip-core.h new file mode 100644 index 000000000..7d4664730 --- /dev/null +++ b/include/linux/fpga/altera-pr-ip-core.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Driver for Altera Partial Reconfiguration IP Core + * + * Copyright (C) 2016 Intel Corporation + * + * Based on socfpga-a10.c Copyright (C) 2015-2016 Altera Corporation + * by Alan Tull + */ + +#ifndef _ALT_PR_IP_CORE_H +#define _ALT_PR_IP_CORE_H +#include + +int alt_pr_register(struct device *dev, void __iomem *reg_base); +int alt_pr_unregister(struct device *dev); + +#endif /* _ALT_PR_IP_CORE_H */ diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h new file mode 100644 index 000000000..ce550fcf6 --- /dev/null +++ b/include/linux/fpga/fpga-bridge.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _LINUX_FPGA_BRIDGE_H +#define _LINUX_FPGA_BRIDGE_H + +#include +#include + +struct fpga_bridge; + +/** + * struct fpga_bridge_ops - ops for low level FPGA bridge drivers + * @enable_show: returns the FPGA bridge's status + * @enable_set: set a FPGA bridge as enabled or disabled + * @fpga_bridge_remove: set FPGA into a specific state during driver remove + * @groups: optional attribute groups. + */ +struct fpga_bridge_ops { + int (*enable_show)(struct fpga_bridge *bridge); + int (*enable_set)(struct fpga_bridge *bridge, bool enable); + void (*fpga_bridge_remove)(struct fpga_bridge *bridge); + const struct attribute_group **groups; +}; + +/** + * struct fpga_bridge - FPGA bridge structure + * @name: name of low level FPGA bridge + * @dev: FPGA bridge device + * @mutex: enforces exclusive reference to bridge + * @br_ops: pointer to struct of FPGA bridge ops + * @info: fpga image specific information + * @node: FPGA bridge list node + * @priv: low level driver private date + */ +struct fpga_bridge { + const char *name; + struct device dev; + struct mutex mutex; /* for exclusive reference to bridge */ + const struct fpga_bridge_ops *br_ops; + struct fpga_image_info *info; + struct list_head node; + void *priv; +}; + +#define to_fpga_bridge(d) container_of(d, struct fpga_bridge, dev) + +struct fpga_bridge *of_fpga_bridge_get(struct device_node *node, + struct fpga_image_info *info); +struct fpga_bridge *fpga_bridge_get(struct device *dev, + struct fpga_image_info *info); +void fpga_bridge_put(struct fpga_bridge *bridge); +int fpga_bridge_enable(struct fpga_bridge *bridge); +int fpga_bridge_disable(struct fpga_bridge *bridge); + +int fpga_bridges_enable(struct list_head *bridge_list); +int fpga_bridges_disable(struct list_head *bridge_list); +void fpga_bridges_put(struct list_head *bridge_list); +int fpga_bridge_get_to_list(struct device *dev, + struct fpga_image_info *info, + struct list_head *bridge_list); +int of_fpga_bridge_get_to_list(struct device_node *np, + struct fpga_image_info *info, + struct list_head *bridge_list); + +struct fpga_bridge *fpga_bridge_create(struct device *dev, const char *name, + const struct fpga_bridge_ops *br_ops, + void *priv); +void fpga_bridge_free(struct fpga_bridge *br); +int fpga_bridge_register(struct fpga_bridge *br); +void fpga_bridge_unregister(struct fpga_bridge *br); + +#endif /* _LINUX_FPGA_BRIDGE_H */ diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h new file mode 100644 index 000000000..8ab5df769 --- /dev/null +++ b/include/linux/fpga/fpga-mgr.h @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * FPGA Framework + * + * Copyright (C) 2013-2016 Altera Corporation + * Copyright (C) 2017 Intel Corporation + */ +#ifndef _LINUX_FPGA_MGR_H +#define _LINUX_FPGA_MGR_H + +#include +#include + +struct fpga_manager; +struct sg_table; + +/** + * enum fpga_mgr_states - fpga framework states + * @FPGA_MGR_STATE_UNKNOWN: can't determine state + * @FPGA_MGR_STATE_POWER_OFF: FPGA power is off + * @FPGA_MGR_STATE_POWER_UP: FPGA reports power is up + * @FPGA_MGR_STATE_RESET: FPGA in reset state + * @FPGA_MGR_STATE_FIRMWARE_REQ: firmware request in progress + * @FPGA_MGR_STATE_FIRMWARE_REQ_ERR: firmware request failed + * @FPGA_MGR_STATE_WRITE_INIT: preparing FPGA for programming + * @FPGA_MGR_STATE_WRITE_INIT_ERR: Error during WRITE_INIT stage + * @FPGA_MGR_STATE_WRITE: writing image to FPGA + * @FPGA_MGR_STATE_WRITE_ERR: Error while writing FPGA + * @FPGA_MGR_STATE_WRITE_COMPLETE: Doing post programming steps + * @FPGA_MGR_STATE_WRITE_COMPLETE_ERR: Error during WRITE_COMPLETE + * @FPGA_MGR_STATE_OPERATING: FPGA is programmed and operating + */ +enum fpga_mgr_states { + /* default FPGA states */ + FPGA_MGR_STATE_UNKNOWN, + FPGA_MGR_STATE_POWER_OFF, + FPGA_MGR_STATE_POWER_UP, + FPGA_MGR_STATE_RESET, + + /* getting an image for loading */ + FPGA_MGR_STATE_FIRMWARE_REQ, + FPGA_MGR_STATE_FIRMWARE_REQ_ERR, + + /* write sequence: init, write, complete */ + FPGA_MGR_STATE_WRITE_INIT, + FPGA_MGR_STATE_WRITE_INIT_ERR, + FPGA_MGR_STATE_WRITE, + FPGA_MGR_STATE_WRITE_ERR, + FPGA_MGR_STATE_WRITE_COMPLETE, + FPGA_MGR_STATE_WRITE_COMPLETE_ERR, + + /* fpga is programmed and operating */ + FPGA_MGR_STATE_OPERATING, +}; + +/** + * DOC: FPGA Manager flags + * + * Flags used in the &fpga_image_info->flags field + * + * %FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported + * + * %FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting + * + * %FPGA_MGR_ENCRYPTED_BITSTREAM: indicates bitstream is encrypted + * + * %FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first + * + * %FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed + */ +#define FPGA_MGR_PARTIAL_RECONFIG BIT(0) +#define FPGA_MGR_EXTERNAL_CONFIG BIT(1) +#define FPGA_MGR_ENCRYPTED_BITSTREAM BIT(2) +#define FPGA_MGR_BITSTREAM_LSB_FIRST BIT(3) +#define FPGA_MGR_COMPRESSED_BITSTREAM BIT(4) + +/** + * struct fpga_image_info - information specific to a FPGA image + * @flags: boolean flags as defined above + * @enable_timeout_us: maximum time to enable traffic through bridge (uSec) + * @disable_timeout_us: maximum time to disable traffic through bridge (uSec) + * @config_complete_timeout_us: maximum time for FPGA to switch to operating + * status in the write_complete op. + * @firmware_name: name of FPGA image firmware file + * @sgt: scatter/gather table containing FPGA image + * @buf: contiguous buffer containing FPGA image + * @count: size of buf + * @region_id: id of target region + * @dev: device that owns this + * @overlay: Device Tree overlay + */ +struct fpga_image_info { + u32 flags; + u32 enable_timeout_us; + u32 disable_timeout_us; + u32 config_complete_timeout_us; + char *firmware_name; + struct sg_table *sgt; + const char *buf; + size_t count; + int region_id; + struct device *dev; +#ifdef CONFIG_OF + struct device_node *overlay; +#endif +}; + +/** + * struct fpga_manager_ops - ops for low level fpga manager drivers + * @initial_header_size: Maximum number of bytes that should be passed into write_init + * @state: returns an enum value of the FPGA's state + * @status: returns status of the FPGA, including reconfiguration error code + * @write_init: prepare the FPGA to receive confuration data + * @write: write count bytes of configuration data to the FPGA + * @write_sg: write the scatter list of configuration data to the FPGA + * @write_complete: set FPGA to operating state after writing is done + * @fpga_remove: optional: Set FPGA into a specific state during driver remove + * @groups: optional attribute groups. + * + * fpga_manager_ops are the low level functions implemented by a specific + * fpga manager driver. The optional ones are tested for NULL before being + * called, so leaving them out is fine. + */ +struct fpga_manager_ops { + size_t initial_header_size; + enum fpga_mgr_states (*state)(struct fpga_manager *mgr); + u64 (*status)(struct fpga_manager *mgr); + int (*write_init)(struct fpga_manager *mgr, + struct fpga_image_info *info, + const char *buf, size_t count); + int (*write)(struct fpga_manager *mgr, const char *buf, size_t count); + int (*write_sg)(struct fpga_manager *mgr, struct sg_table *sgt); + int (*write_complete)(struct fpga_manager *mgr, + struct fpga_image_info *info); + void (*fpga_remove)(struct fpga_manager *mgr); + const struct attribute_group **groups; +}; + +/* FPGA manager status: Partial/Full Reconfiguration errors */ +#define FPGA_MGR_STATUS_OPERATION_ERR BIT(0) +#define FPGA_MGR_STATUS_CRC_ERR BIT(1) +#define FPGA_MGR_STATUS_INCOMPATIBLE_IMAGE_ERR BIT(2) +#define FPGA_MGR_STATUS_IP_PROTOCOL_ERR BIT(3) +#define FPGA_MGR_STATUS_FIFO_OVERFLOW_ERR BIT(4) + +/** + * struct fpga_compat_id - id for compatibility check + * + * @id_h: high 64bit of the compat_id + * @id_l: low 64bit of the compat_id + */ +struct fpga_compat_id { + u64 id_h; + u64 id_l; +}; + +/** + * struct fpga_manager - fpga manager structure + * @name: name of low level fpga manager + * @dev: fpga manager device + * @ref_mutex: only allows one reference to fpga manager + * @state: state of fpga manager + * @compat_id: FPGA manager id for compatibility check. + * @mops: pointer to struct of fpga manager ops + * @priv: low level driver private date + */ +struct fpga_manager { + const char *name; + struct device dev; + struct mutex ref_mutex; + enum fpga_mgr_states state; + struct fpga_compat_id *compat_id; + const struct fpga_manager_ops *mops; + void *priv; +}; + +#define to_fpga_manager(d) container_of(d, struct fpga_manager, dev) + +struct fpga_image_info *fpga_image_info_alloc(struct device *dev); + +void fpga_image_info_free(struct fpga_image_info *info); + +int fpga_mgr_load(struct fpga_manager *mgr, struct fpga_image_info *info); + +int fpga_mgr_lock(struct fpga_manager *mgr); +void fpga_mgr_unlock(struct fpga_manager *mgr); + +struct fpga_manager *of_fpga_mgr_get(struct device_node *node); + +struct fpga_manager *fpga_mgr_get(struct device *dev); + +void fpga_mgr_put(struct fpga_manager *mgr); + +struct fpga_manager *fpga_mgr_create(struct device *dev, const char *name, + const struct fpga_manager_ops *mops, + void *priv); +void fpga_mgr_free(struct fpga_manager *mgr); +int fpga_mgr_register(struct fpga_manager *mgr); +void fpga_mgr_unregister(struct fpga_manager *mgr); + +#endif /*_LINUX_FPGA_MGR_H */ diff --git a/include/linux/fpga/fpga-region.h b/include/linux/fpga/fpga-region.h new file mode 100644 index 000000000..0521b7f57 --- /dev/null +++ b/include/linux/fpga/fpga-region.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _FPGA_REGION_H +#define _FPGA_REGION_H + +#include +#include +#include + +/** + * struct fpga_region - FPGA Region structure + * @dev: FPGA Region device + * @mutex: enforces exclusive reference to region + * @bridge_list: list of FPGA bridges specified in region + * @mgr: FPGA manager + * @info: FPGA image info + * @compat_id: FPGA region id for compatibility check. + * @priv: private data + * @get_bridges: optional function to get bridges to a list + */ +struct fpga_region { + struct device dev; + struct mutex mutex; /* for exclusive reference to region */ + struct list_head bridge_list; + struct fpga_manager *mgr; + struct fpga_image_info *info; + struct fpga_compat_id *compat_id; + void *priv; + int (*get_bridges)(struct fpga_region *region); +}; + +#define to_fpga_region(d) container_of(d, struct fpga_region, dev) + +struct fpga_region *fpga_region_class_find( + struct device *start, const void *data, + int (*match)(struct device *, const void *)); + +int fpga_region_program_fpga(struct fpga_region *region); + +struct fpga_region +*fpga_region_create(struct device *dev, struct fpga_manager *mgr, + int (*get_bridges)(struct fpga_region *)); +void fpga_region_free(struct fpga_region *region); +int fpga_region_register(struct fpga_region *region); +void fpga_region_unregister(struct fpga_region *region); + +#endif /* _FPGA_REGION_H */ diff --git a/include/linux/frame.h b/include/linux/frame.h new file mode 100644 index 000000000..02d3ca2d9 --- /dev/null +++ b/include/linux/frame.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_FRAME_H +#define _LINUX_FRAME_H + +#ifdef CONFIG_STACK_VALIDATION +/* + * This macro marks the given function's stack frame as "non-standard", which + * tells objtool to ignore the function when doing stack metadata validation. + * It should only be used in special cases where you're 100% sure it won't + * affect the reliability of frame pointers and kernel stack traces. + * + * For more information, see tools/objtool/Documentation/stack-validation.txt. + */ +#define STACK_FRAME_NON_STANDARD(func) \ + static void __used __section(.discard.func_stack_frame_non_standard) \ + *__func_stack_frame_non_standard_##func = func + +#else /* !CONFIG_STACK_VALIDATION */ + +#define STACK_FRAME_NON_STANDARD(func) + +#endif /* CONFIG_STACK_VALIDATION */ + +#endif /* _LINUX_FRAME_H */ diff --git a/include/linux/freezer.h b/include/linux/freezer.h new file mode 100644 index 000000000..21f5aa0b2 --- /dev/null +++ b/include/linux/freezer.h @@ -0,0 +1,302 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Freezer declarations */ + +#ifndef FREEZER_H_INCLUDED +#define FREEZER_H_INCLUDED + +#include +#include +#include +#include + +#ifdef CONFIG_FREEZER +extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */ +extern bool pm_freezing; /* PM freezing in effect */ +extern bool pm_nosig_freezing; /* PM nosig freezing in effect */ + +/* + * Timeout for stopping processes + */ +extern unsigned int freeze_timeout_msecs; + +/* + * Check if a process has been frozen + */ +static inline bool frozen(struct task_struct *p) +{ + return p->flags & PF_FROZEN; +} + +extern bool freezing_slow_path(struct task_struct *p); + +/* + * Check if there is a request to freeze a process + */ +static inline bool freezing(struct task_struct *p) +{ + if (likely(!atomic_read(&system_freezing_cnt))) + return false; + return freezing_slow_path(p); +} + +/* Takes and releases task alloc lock using task_lock() */ +extern void __thaw_task(struct task_struct *t); + +extern bool __refrigerator(bool check_kthr_stop); +extern int freeze_processes(void); +extern int freeze_kernel_threads(void); +extern void thaw_processes(void); +extern void thaw_kernel_threads(void); + +/* + * DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION + * If try_to_freeze causes a lockdep warning it means the caller may deadlock + */ +static inline bool try_to_freeze_unsafe(void) +{ + might_sleep(); + if (likely(!freezing(current))) + return false; + return __refrigerator(false); +} + +static inline bool try_to_freeze(void) +{ + if (!(current->flags & PF_NOFREEZE)) + debug_check_no_locks_held(); + return try_to_freeze_unsafe(); +} + +extern bool freeze_task(struct task_struct *p); +extern bool set_freezable(void); + +#ifdef CONFIG_CGROUP_FREEZER +extern bool cgroup_freezing(struct task_struct *task); +#else /* !CONFIG_CGROUP_FREEZER */ +static inline bool cgroup_freezing(struct task_struct *task) +{ + return false; +} +#endif /* !CONFIG_CGROUP_FREEZER */ + +/* + * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it + * calls wait_for_completion(&vfork) and reset right after it returns from this + * function. Next, the parent should call try_to_freeze() to freeze itself + * appropriately in case the child has exited before the freezing of tasks is + * complete. However, we don't want kernel threads to be frozen in unexpected + * places, so we allow them to block freeze_processes() instead or to set + * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the + * parent won't really block freeze_processes(), since ____call_usermodehelper() + * (the child) does a little before exec/exit and it can't be frozen before + * waking up the parent. + */ + + +/** + * freezer_do_not_count - tell freezer to ignore %current + * + * Tell freezers to ignore the current task when determining whether the + * target frozen state is reached. IOW, the current task will be + * considered frozen enough by freezers. + * + * The caller shouldn't do anything which isn't allowed for a frozen task + * until freezer_cont() is called. Usually, freezer[_do_not]_count() pair + * wrap a scheduling operation and nothing much else. + */ +static inline void freezer_do_not_count(void) +{ + current->flags |= PF_FREEZER_SKIP; +} + +/** + * freezer_count - tell freezer to stop ignoring %current + * + * Undo freezer_do_not_count(). It tells freezers that %current should be + * considered again and tries to freeze if freezing condition is already in + * effect. + */ +static inline void freezer_count(void) +{ + current->flags &= ~PF_FREEZER_SKIP; + /* + * If freezing is in progress, the following paired with smp_mb() + * in freezer_should_skip() ensures that either we see %true + * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP. + */ + smp_mb(); + try_to_freeze(); +} + +/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */ +static inline void freezer_count_unsafe(void) +{ + current->flags &= ~PF_FREEZER_SKIP; + smp_mb(); + try_to_freeze_unsafe(); +} + +/** + * freezer_should_skip - whether to skip a task when determining frozen + * state is reached + * @p: task in quesion + * + * This function is used by freezers after establishing %true freezing() to + * test whether a task should be skipped when determining the target frozen + * state is reached. IOW, if this function returns %true, @p is considered + * frozen enough. + */ +static inline bool freezer_should_skip(struct task_struct *p) +{ + /* + * The following smp_mb() paired with the one in freezer_count() + * ensures that either freezer_count() sees %true freezing() or we + * see cleared %PF_FREEZER_SKIP and return %false. This makes it + * impossible for a task to slip frozen state testing after + * clearing %PF_FREEZER_SKIP. + */ + smp_mb(); + return p->flags & PF_FREEZER_SKIP; +} + +/* + * These functions are intended to be used whenever you want allow a sleeping + * task to be frozen. Note that neither return any clear indication of + * whether a freeze event happened while in this function. + */ + +/* Like schedule(), but should not block the freezer. */ +static inline void freezable_schedule(void) +{ + freezer_do_not_count(); + schedule(); + freezer_count(); +} + +/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */ +static inline void freezable_schedule_unsafe(void) +{ + freezer_do_not_count(); + schedule(); + freezer_count_unsafe(); +} + +/* + * Like schedule_timeout(), but should not block the freezer. Do not + * call this with locks held. + */ +static inline long freezable_schedule_timeout(long timeout) +{ + long __retval; + freezer_do_not_count(); + __retval = schedule_timeout(timeout); + freezer_count(); + return __retval; +} + +/* + * Like schedule_timeout_interruptible(), but should not block the freezer. Do not + * call this with locks held. + */ +static inline long freezable_schedule_timeout_interruptible(long timeout) +{ + long __retval; + freezer_do_not_count(); + __retval = schedule_timeout_interruptible(timeout); + freezer_count(); + return __retval; +} + +/* Like schedule_timeout_killable(), but should not block the freezer. */ +static inline long freezable_schedule_timeout_killable(long timeout) +{ + long __retval; + freezer_do_not_count(); + __retval = schedule_timeout_killable(timeout); + freezer_count(); + return __retval; +} + +/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */ +static inline long freezable_schedule_timeout_killable_unsafe(long timeout) +{ + long __retval; + freezer_do_not_count(); + __retval = schedule_timeout_killable(timeout); + freezer_count_unsafe(); + return __retval; +} + +/* + * Like schedule_hrtimeout_range(), but should not block the freezer. Do not + * call this with locks held. + */ +static inline int freezable_schedule_hrtimeout_range(ktime_t *expires, + u64 delta, const enum hrtimer_mode mode) +{ + int __retval; + freezer_do_not_count(); + __retval = schedule_hrtimeout_range(expires, delta, mode); + freezer_count(); + return __retval; +} + +/* + * Freezer-friendly wrappers around wait_event_interruptible(), + * wait_event_killable() and wait_event_interruptible_timeout(), originally + * defined in + */ + +/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */ +#define wait_event_freezekillable_unsafe(wq, condition) \ +({ \ + int __retval; \ + freezer_do_not_count(); \ + __retval = wait_event_killable(wq, (condition)); \ + freezer_count_unsafe(); \ + __retval; \ +}) + +#else /* !CONFIG_FREEZER */ +static inline bool frozen(struct task_struct *p) { return false; } +static inline bool freezing(struct task_struct *p) { return false; } +static inline void __thaw_task(struct task_struct *t) {} + +static inline bool __refrigerator(bool check_kthr_stop) { return false; } +static inline int freeze_processes(void) { return -ENOSYS; } +static inline int freeze_kernel_threads(void) { return -ENOSYS; } +static inline void thaw_processes(void) {} +static inline void thaw_kernel_threads(void) {} + +static inline bool try_to_freeze_nowarn(void) { return false; } +static inline bool try_to_freeze(void) { return false; } + +static inline void freezer_do_not_count(void) {} +static inline void freezer_count(void) {} +static inline int freezer_should_skip(struct task_struct *p) { return 0; } +static inline void set_freezable(void) {} + +#define freezable_schedule() schedule() + +#define freezable_schedule_unsafe() schedule() + +#define freezable_schedule_timeout(timeout) schedule_timeout(timeout) + +#define freezable_schedule_timeout_interruptible(timeout) \ + schedule_timeout_interruptible(timeout) + +#define freezable_schedule_timeout_killable(timeout) \ + schedule_timeout_killable(timeout) + +#define freezable_schedule_timeout_killable_unsafe(timeout) \ + schedule_timeout_killable(timeout) + +#define freezable_schedule_hrtimeout_range(expires, delta, mode) \ + schedule_hrtimeout_range(expires, delta, mode) + +#define wait_event_freezekillable_unsafe(wq, condition) \ + wait_event_killable(wq, condition) + +#endif /* !CONFIG_FREEZER */ + +#endif /* FREEZER_H_INCLUDED */ diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h new file mode 100644 index 000000000..011965c08 --- /dev/null +++ b/include/linux/frontswap.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_FRONTSWAP_H +#define _LINUX_FRONTSWAP_H + +#include +#include +#include +#include + +struct frontswap_ops { + void (*init)(unsigned); /* this swap type was just swapon'ed */ + int (*store)(unsigned, pgoff_t, struct page *); /* store a page */ + int (*load)(unsigned, pgoff_t, struct page *); /* load a page */ + void (*invalidate_page)(unsigned, pgoff_t); /* page no longer needed */ + void (*invalidate_area)(unsigned); /* swap type just swapoff'ed */ + struct frontswap_ops *next; /* private pointer to next ops */ +}; + +extern void frontswap_register_ops(struct frontswap_ops *ops); +extern void frontswap_shrink(unsigned long); +extern unsigned long frontswap_curr_pages(void); +extern void frontswap_writethrough(bool); +#define FRONTSWAP_HAS_EXCLUSIVE_GETS +extern void frontswap_tmem_exclusive_gets(bool); + +extern bool __frontswap_test(struct swap_info_struct *, pgoff_t); +extern void __frontswap_init(unsigned type, unsigned long *map); +extern int __frontswap_store(struct page *page); +extern int __frontswap_load(struct page *page); +extern void __frontswap_invalidate_page(unsigned, pgoff_t); +extern void __frontswap_invalidate_area(unsigned); + +#ifdef CONFIG_FRONTSWAP +extern struct static_key_false frontswap_enabled_key; + +static inline bool frontswap_enabled(void) +{ + return static_branch_unlikely(&frontswap_enabled_key); +} + +static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset) +{ + return __frontswap_test(sis, offset); +} + +static inline void frontswap_map_set(struct swap_info_struct *p, + unsigned long *map) +{ + p->frontswap_map = map; +} + +static inline unsigned long *frontswap_map_get(struct swap_info_struct *p) +{ + return p->frontswap_map; +} +#else +/* all inline routines become no-ops and all externs are ignored */ + +static inline bool frontswap_enabled(void) +{ + return false; +} + +static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset) +{ + return false; +} + +static inline void frontswap_map_set(struct swap_info_struct *p, + unsigned long *map) +{ +} + +static inline unsigned long *frontswap_map_get(struct swap_info_struct *p) +{ + return NULL; +} +#endif + +static inline int frontswap_store(struct page *page) +{ + if (frontswap_enabled()) + return __frontswap_store(page); + + return -1; +} + +static inline int frontswap_load(struct page *page) +{ + if (frontswap_enabled()) + return __frontswap_load(page); + + return -1; +} + +static inline void frontswap_invalidate_page(unsigned type, pgoff_t offset) +{ + if (frontswap_enabled()) + __frontswap_invalidate_page(type, offset); +} + +static inline void frontswap_invalidate_area(unsigned type) +{ + if (frontswap_enabled()) + __frontswap_invalidate_area(type); +} + +static inline void frontswap_init(unsigned type, unsigned long *map) +{ +#ifdef CONFIG_FRONTSWAP + __frontswap_init(type, map); +#endif +} + +#endif /* _LINUX_FRONTSWAP_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h new file mode 100644 index 000000000..86f884e78 --- /dev/null +++ b/include/linux/fs.h @@ -0,0 +1,3477 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_FS_H +#define _LINUX_FS_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +struct backing_dev_info; +struct bdi_writeback; +struct bio; +struct export_operations; +struct hd_geometry; +struct iovec; +struct kiocb; +struct kobject; +struct pipe_inode_info; +struct poll_table_struct; +struct kstatfs; +struct vm_area_struct; +struct vfsmount; +struct cred; +struct swap_info_struct; +struct seq_file; +struct workqueue_struct; +struct iov_iter; +struct fscrypt_info; +struct fscrypt_operations; + +extern void __init inode_init(void); +extern void __init inode_init_early(void); +extern void __init files_init(void); +extern void __init files_maxfiles_init(void); + +extern struct files_stat_struct files_stat; +extern unsigned long get_max_files(void); +extern unsigned int sysctl_nr_open; +extern struct inodes_stat_t inodes_stat; +extern int leases_enable, lease_break_time; +extern int sysctl_protected_symlinks; +extern int sysctl_protected_hardlinks; +extern int sysctl_protected_fifos; +extern int sysctl_protected_regular; + +typedef __kernel_rwf_t rwf_t; + +struct buffer_head; +typedef int (get_block_t)(struct inode *inode, sector_t iblock, + struct buffer_head *bh_result, int create); +typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, + ssize_t bytes, void *private); + +#define MAY_EXEC 0x00000001 +#define MAY_WRITE 0x00000002 +#define MAY_READ 0x00000004 +#define MAY_APPEND 0x00000008 +#define MAY_ACCESS 0x00000010 +#define MAY_OPEN 0x00000020 +#define MAY_CHDIR 0x00000040 +/* called from RCU mode, don't block */ +#define MAY_NOT_BLOCK 0x00000080 + +/* + * flags in file.f_mode. Note that FMODE_READ and FMODE_WRITE must correspond + * to O_WRONLY and O_RDWR via the strange trick in do_dentry_open() + */ + +/* file is open for reading */ +#define FMODE_READ ((__force fmode_t)0x1) +/* file is open for writing */ +#define FMODE_WRITE ((__force fmode_t)0x2) +/* file is seekable */ +#define FMODE_LSEEK ((__force fmode_t)0x4) +/* file can be accessed using pread */ +#define FMODE_PREAD ((__force fmode_t)0x8) +/* file can be accessed using pwrite */ +#define FMODE_PWRITE ((__force fmode_t)0x10) +/* File is opened for execution with sys_execve / sys_uselib */ +#define FMODE_EXEC ((__force fmode_t)0x20) +/* File is opened with O_NDELAY (only set for block devices) */ +#define FMODE_NDELAY ((__force fmode_t)0x40) +/* File is opened with O_EXCL (only set for block devices) */ +#define FMODE_EXCL ((__force fmode_t)0x80) +/* File is opened using open(.., 3, ..) and is writeable only for ioctls + (specialy hack for floppy.c) */ +#define FMODE_WRITE_IOCTL ((__force fmode_t)0x100) +/* 32bit hashes as llseek() offset (for directories) */ +#define FMODE_32BITHASH ((__force fmode_t)0x200) +/* 64bit hashes as llseek() offset (for directories) */ +#define FMODE_64BITHASH ((__force fmode_t)0x400) + +/* + * Don't update ctime and mtime. + * + * Currently a special hack for the XFS open_by_handle ioctl, but we'll + * hopefully graduate it to a proper O_CMTIME flag supported by open(2) soon. + */ +#define FMODE_NOCMTIME ((__force fmode_t)0x800) + +/* Expect random access pattern */ +#define FMODE_RANDOM ((__force fmode_t)0x1000) + +/* File is huge (eg. /dev/kmem): treat loff_t as unsigned */ +#define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000) + +/* File is opened with O_PATH; almost nothing can be done with it */ +#define FMODE_PATH ((__force fmode_t)0x4000) + +/* File needs atomic accesses to f_pos */ +#define FMODE_ATOMIC_POS ((__force fmode_t)0x8000) +/* Write access to underlying fs */ +#define FMODE_WRITER ((__force fmode_t)0x10000) +/* Has read method(s) */ +#define FMODE_CAN_READ ((__force fmode_t)0x20000) +/* Has write method(s) */ +#define FMODE_CAN_WRITE ((__force fmode_t)0x40000) + +#define FMODE_OPENED ((__force fmode_t)0x80000) +#define FMODE_CREATED ((__force fmode_t)0x100000) + +/* File is stream-like */ +#define FMODE_STREAM ((__force fmode_t)0x200000) + +/* File was opened by fanotify and shouldn't generate fanotify events */ +#define FMODE_NONOTIFY ((__force fmode_t)0x4000000) + +/* File is capable of returning -EAGAIN if I/O will block */ +#define FMODE_NOWAIT ((__force fmode_t)0x8000000) + +/* File does not contribute to nr_files count */ +#define FMODE_NOACCOUNT ((__force fmode_t)0x20000000) + +/* + * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector + * that indicates that they should check the contents of the iovec are + * valid, but not check the memory that the iovec elements + * points too. + */ +#define CHECK_IOVEC_ONLY -1 + +/* + * Attribute flags. These should be or-ed together to figure out what + * has been changed! + */ +#define ATTR_MODE (1 << 0) +#define ATTR_UID (1 << 1) +#define ATTR_GID (1 << 2) +#define ATTR_SIZE (1 << 3) +#define ATTR_ATIME (1 << 4) +#define ATTR_MTIME (1 << 5) +#define ATTR_CTIME (1 << 6) +#define ATTR_ATIME_SET (1 << 7) +#define ATTR_MTIME_SET (1 << 8) +#define ATTR_FORCE (1 << 9) /* Not a change, but a change it */ +#define ATTR_KILL_SUID (1 << 11) +#define ATTR_KILL_SGID (1 << 12) +#define ATTR_FILE (1 << 13) +#define ATTR_KILL_PRIV (1 << 14) +#define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */ +#define ATTR_TIMES_SET (1 << 16) +#define ATTR_TOUCH (1 << 17) + +/* + * Whiteout is represented by a char device. The following constants define the + * mode and device number to use. + */ +#define WHITEOUT_MODE 0 +#define WHITEOUT_DEV 0 + +/* + * This is the Inode Attributes structure, used for notify_change(). It + * uses the above definitions as flags, to know which values have changed. + * Also, in this manner, a Filesystem can look at only the values it cares + * about. Basically, these are the attributes that the VFS layer can + * request to change from the FS layer. + * + * Derek Atkins 94-10-20 + */ +struct iattr { + unsigned int ia_valid; + umode_t ia_mode; + kuid_t ia_uid; + kgid_t ia_gid; + loff_t ia_size; + struct timespec64 ia_atime; + struct timespec64 ia_mtime; + struct timespec64 ia_ctime; + + /* + * Not an attribute, but an auxiliary info for filesystems wanting to + * implement an ftruncate() like method. NOTE: filesystem should + * check for (ia_valid & ATTR_FILE), and not for (ia_file != NULL). + */ + struct file *ia_file; +}; + +/* + * Includes for diskquotas. + */ +#include + +/* + * Maximum number of layers of fs stack. Needs to be limited to + * prevent kernel stack overflow + */ +#define FILESYSTEM_MAX_STACK_DEPTH 2 + +/** + * enum positive_aop_returns - aop return codes with specific semantics + * + * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has + * completed, that the page is still locked, and + * should be considered active. The VM uses this hint + * to return the page to the active list -- it won't + * be a candidate for writeback again in the near + * future. Other callers must be careful to unlock + * the page if they get this return. Returned by + * writepage(); + * + * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has + * unlocked it and the page might have been truncated. + * The caller should back up to acquiring a new page and + * trying again. The aop will be taking reasonable + * precautions not to livelock. If the caller held a page + * reference, it should drop it before retrying. Returned + * by readpage(). + * + * address_space_operation functions return these large constants to indicate + * special semantics to the caller. These are much larger than the bytes in a + * page to allow for functions that return the number of bytes operated on in a + * given page. + */ + +enum positive_aop_returns { + AOP_WRITEPAGE_ACTIVATE = 0x80000, + AOP_TRUNCATED_PAGE = 0x80001, +}; + +#define AOP_FLAG_CONT_EXPAND 0x0001 /* called from cont_expand */ +#define AOP_FLAG_NOFS 0x0002 /* used by filesystem to direct + * helper code (eg buffer layer) + * to clear GFP_FS from alloc */ + +/* + * oh the beauties of C type declarations. + */ +struct page; +struct address_space; +struct writeback_control; + +/* + * Write life time hint values. + * Stored in struct inode as u8. + */ +enum rw_hint { + WRITE_LIFE_NOT_SET = 0, + WRITE_LIFE_NONE = RWH_WRITE_LIFE_NONE, + WRITE_LIFE_SHORT = RWH_WRITE_LIFE_SHORT, + WRITE_LIFE_MEDIUM = RWH_WRITE_LIFE_MEDIUM, + WRITE_LIFE_LONG = RWH_WRITE_LIFE_LONG, + WRITE_LIFE_EXTREME = RWH_WRITE_LIFE_EXTREME, +}; + +#define IOCB_EVENTFD (1 << 0) +#define IOCB_APPEND (1 << 1) +#define IOCB_DIRECT (1 << 2) +#define IOCB_HIPRI (1 << 3) +#define IOCB_DSYNC (1 << 4) +#define IOCB_SYNC (1 << 5) +#define IOCB_WRITE (1 << 6) +#define IOCB_NOWAIT (1 << 7) + +struct kiocb { + struct file *ki_filp; + + /* The 'ki_filp' pointer is shared in a union for aio */ + randomized_struct_fields_start + + loff_t ki_pos; + void (*ki_complete)(struct kiocb *iocb, long ret, long ret2); + void *private; + int ki_flags; + u16 ki_hint; + u16 ki_ioprio; /* See linux/ioprio.h */ + + randomized_struct_fields_end +}; + +static inline bool is_sync_kiocb(struct kiocb *kiocb) +{ + return kiocb->ki_complete == NULL; +} + +/* + * "descriptor" for what we're up to with a read. + * This allows us to use the same read code yet + * have multiple different users of the data that + * we read from a file. + * + * The simplest case just copies the data to user + * mode. + */ +typedef struct { + size_t written; + size_t count; + union { + char __user *buf; + void *data; + } arg; + int error; +} read_descriptor_t; + +typedef int (*read_actor_t)(read_descriptor_t *, struct page *, + unsigned long, unsigned long); + +struct address_space_operations { + int (*writepage)(struct page *page, struct writeback_control *wbc); + int (*readpage)(struct file *, struct page *); + + /* Write back some dirty pages from this mapping. */ + int (*writepages)(struct address_space *, struct writeback_control *); + + /* Set a page dirty. Return true if this dirtied it */ + int (*set_page_dirty)(struct page *page); + + /* + * Reads in the requested pages. Unlike ->readpage(), this is + * PURELY used for read-ahead!. + */ + int (*readpages)(struct file *filp, struct address_space *mapping, + struct list_head *pages, unsigned nr_pages); + + int (*write_begin)(struct file *, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata); + int (*write_end)(struct file *, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata); + + /* Unfortunately this kludge is needed for FIBMAP. Don't use it */ + sector_t (*bmap)(struct address_space *, sector_t); + void (*invalidatepage) (struct page *, unsigned int, unsigned int); + int (*releasepage) (struct page *, gfp_t); + void (*freepage)(struct page *); + ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter); + /* + * migrate the contents of a page to the specified target. If + * migrate_mode is MIGRATE_ASYNC, it must not block. + */ + int (*migratepage) (struct address_space *, + struct page *, struct page *, enum migrate_mode); + bool (*isolate_page)(struct page *, isolate_mode_t); + void (*putback_page)(struct page *); + int (*launder_page) (struct page *); + int (*is_partially_uptodate) (struct page *, unsigned long, + unsigned long); + void (*is_dirty_writeback) (struct page *, bool *, bool *); + int (*error_remove_page)(struct address_space *, struct page *); + + /* swapfile support */ + int (*swap_activate)(struct swap_info_struct *sis, struct file *file, + sector_t *span); + void (*swap_deactivate)(struct file *file); +}; + +extern const struct address_space_operations empty_aops; + +/* + * pagecache_write_begin/pagecache_write_end must be used by general code + * to write into the pagecache. + */ +int pagecache_write_begin(struct file *, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata); + +int pagecache_write_end(struct file *, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata); + +struct address_space { + struct inode *host; /* owner: inode, block_device */ + struct radix_tree_root i_pages; /* cached pages */ + atomic_t i_mmap_writable;/* count VM_SHARED mappings */ + struct rb_root_cached i_mmap; /* tree of private and shared mappings */ + struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */ + /* Protected by the i_pages lock */ + unsigned long nrpages; /* number of total pages */ + /* number of shadow or DAX exceptional entries */ + unsigned long nrexceptional; + pgoff_t writeback_index;/* writeback starts here */ + const struct address_space_operations *a_ops; /* methods */ + unsigned long flags; /* error bits */ + spinlock_t private_lock; /* for use by the address_space */ + gfp_t gfp_mask; /* implicit gfp mask for allocations */ + struct list_head private_list; /* for use by the address_space */ + void *private_data; /* ditto */ + errseq_t wb_err; +} __attribute__((aligned(sizeof(long)))) __randomize_layout; + /* + * On most architectures that alignment is already the case; but + * must be enforced here for CRIS, to let the least significant bit + * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON. + */ +struct request_queue; + +struct block_device { + dev_t bd_dev; /* not a kdev_t - it's a search key */ + int bd_openers; + struct inode * bd_inode; /* will die */ + struct super_block * bd_super; + struct mutex bd_mutex; /* open/close mutex */ + void * bd_claiming; + void * bd_holder; + int bd_holders; + bool bd_write_holder; +#ifdef CONFIG_SYSFS + struct list_head bd_holder_disks; +#endif + struct block_device * bd_contains; + unsigned bd_block_size; + u8 bd_partno; + struct hd_struct * bd_part; + /* number of times partitions within this device have been opened. */ + unsigned bd_part_count; + int bd_invalidated; + struct gendisk * bd_disk; + struct request_queue * bd_queue; + struct backing_dev_info *bd_bdi; + struct list_head bd_list; + /* + * Private data. You must have bd_claim'ed the block_device + * to use this. NOTE: bd_claim allows an owner to claim + * the same device multiple times, the owner must take special + * care to not mess up bd_private for that case. + */ + unsigned long bd_private; + + /* The counter of freeze processes */ + int bd_fsfreeze_count; + /* Mutex for freeze */ + struct mutex bd_fsfreeze_mutex; +} __randomize_layout; + +/* + * Radix-tree tags, for tagging dirty and writeback pages within the pagecache + * radix trees + */ +#define PAGECACHE_TAG_DIRTY 0 +#define PAGECACHE_TAG_WRITEBACK 1 +#define PAGECACHE_TAG_TOWRITE 2 + +int mapping_tagged(struct address_space *mapping, int tag); + +static inline void i_mmap_lock_write(struct address_space *mapping) +{ + down_write(&mapping->i_mmap_rwsem); +} + +static inline void i_mmap_unlock_write(struct address_space *mapping) +{ + up_write(&mapping->i_mmap_rwsem); +} + +static inline void i_mmap_lock_read(struct address_space *mapping) +{ + down_read(&mapping->i_mmap_rwsem); +} + +static inline void i_mmap_unlock_read(struct address_space *mapping) +{ + up_read(&mapping->i_mmap_rwsem); +} + +/* + * Might pages of this file be mapped into userspace? + */ +static inline int mapping_mapped(struct address_space *mapping) +{ + return !RB_EMPTY_ROOT(&mapping->i_mmap.rb_root); +} + +/* + * Might pages of this file have been modified in userspace? + * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff + * marks vma as VM_SHARED if it is shared, and the file was opened for + * writing i.e. vma may be mprotected writable even if now readonly. + * + * If i_mmap_writable is negative, no new writable mappings are allowed. You + * can only deny writable mappings, if none exists right now. + */ +static inline int mapping_writably_mapped(struct address_space *mapping) +{ + return atomic_read(&mapping->i_mmap_writable) > 0; +} + +static inline int mapping_map_writable(struct address_space *mapping) +{ + return atomic_inc_unless_negative(&mapping->i_mmap_writable) ? + 0 : -EPERM; +} + +static inline void mapping_unmap_writable(struct address_space *mapping) +{ + atomic_dec(&mapping->i_mmap_writable); +} + +static inline int mapping_deny_writable(struct address_space *mapping) +{ + return atomic_dec_unless_positive(&mapping->i_mmap_writable) ? + 0 : -EBUSY; +} + +static inline void mapping_allow_writable(struct address_space *mapping) +{ + atomic_inc(&mapping->i_mmap_writable); +} + +/* + * Use sequence counter to get consistent i_size on 32-bit processors. + */ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) +#include +#define __NEED_I_SIZE_ORDERED +#define i_size_ordered_init(inode) seqcount_init(&inode->i_size_seqcount) +#else +#define i_size_ordered_init(inode) do { } while (0) +#endif + +struct posix_acl; +#define ACL_NOT_CACHED ((void *)(-1)) +#define ACL_DONT_CACHE ((void *)(-3)) + +static inline struct posix_acl * +uncached_acl_sentinel(struct task_struct *task) +{ + return (void *)task + 1; +} + +static inline bool +is_uncached_acl(struct posix_acl *acl) +{ + return (long)acl & 1; +} + +#define IOP_FASTPERM 0x0001 +#define IOP_LOOKUP 0x0002 +#define IOP_NOFOLLOW 0x0004 +#define IOP_XATTR 0x0008 +#define IOP_DEFAULT_READLINK 0x0010 + +struct fsnotify_mark_connector; + +/* + * Keep mostly read-only and often accessed (especially for + * the RCU path lookup and 'stat' data) fields at the beginning + * of the 'struct inode' + */ +struct inode { + umode_t i_mode; + unsigned short i_opflags; + kuid_t i_uid; + kgid_t i_gid; + unsigned int i_flags; + +#ifdef CONFIG_FS_POSIX_ACL + struct posix_acl *i_acl; + struct posix_acl *i_default_acl; +#endif + + const struct inode_operations *i_op; + struct super_block *i_sb; + struct address_space *i_mapping; + +#ifdef CONFIG_SECURITY + void *i_security; +#endif + + /* Stat data, not accessed from path walking */ + unsigned long i_ino; + /* + * Filesystems may only read i_nlink directly. They shall use the + * following functions for modification: + * + * (set|clear|inc|drop)_nlink + * inode_(inc|dec)_link_count + */ + union { + const unsigned int i_nlink; + unsigned int __i_nlink; + }; + dev_t i_rdev; + loff_t i_size; + struct timespec64 i_atime; + struct timespec64 i_mtime; + struct timespec64 i_ctime; + spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ + unsigned short i_bytes; + u8 i_blkbits; + u8 i_write_hint; + blkcnt_t i_blocks; + +#ifdef __NEED_I_SIZE_ORDERED + seqcount_t i_size_seqcount; +#endif + + /* Misc */ + unsigned long i_state; + struct rw_semaphore i_rwsem; + + unsigned long dirtied_when; /* jiffies of first dirtying */ + unsigned long dirtied_time_when; + + struct hlist_node i_hash; + struct list_head i_io_list; /* backing dev IO list */ +#ifdef CONFIG_CGROUP_WRITEBACK + struct bdi_writeback *i_wb; /* the associated cgroup wb */ + + /* foreign inode detection, see wbc_detach_inode() */ + int i_wb_frn_winner; + u16 i_wb_frn_avg_time; + u16 i_wb_frn_history; +#endif + struct list_head i_lru; /* inode LRU list */ + struct list_head i_sb_list; + struct list_head i_wb_list; /* backing dev writeback list */ + union { + struct hlist_head i_dentry; + struct rcu_head i_rcu; + }; + atomic64_t i_version; + atomic64_t i_sequence; /* see futex */ + atomic_t i_count; + atomic_t i_dio_count; + atomic_t i_writecount; +#ifdef CONFIG_IMA + atomic_t i_readcount; /* struct files open RO */ +#endif + const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ + struct file_lock_context *i_flctx; + struct address_space i_data; + struct list_head i_devices; + union { + struct pipe_inode_info *i_pipe; + struct block_device *i_bdev; + struct cdev *i_cdev; + char *i_link; + unsigned i_dir_seq; + }; + + __u32 i_generation; + +#ifdef CONFIG_FSNOTIFY + __u32 i_fsnotify_mask; /* all events this inode cares about */ + struct fsnotify_mark_connector __rcu *i_fsnotify_marks; +#endif + +#if IS_ENABLED(CONFIG_FS_ENCRYPTION) + struct fscrypt_info *i_crypt_info; +#endif + + void *i_private; /* fs or device private pointer */ +} __randomize_layout; + +static inline unsigned int i_blocksize(const struct inode *node) +{ + return (1 << node->i_blkbits); +} + +static inline int inode_unhashed(struct inode *inode) +{ + return hlist_unhashed(&inode->i_hash); +} + +/* + * __mark_inode_dirty expects inodes to be hashed. Since we don't + * want special inodes in the fileset inode space, we make them + * appear hashed, but do not put on any lists. hlist_del() + * will work fine and require no locking. + */ +static inline void inode_fake_hash(struct inode *inode) +{ + hlist_add_fake(&inode->i_hash); +} + +/* + * inode->i_mutex nesting subclasses for the lock validator: + * + * 0: the object of the current VFS operation + * 1: parent + * 2: child/target + * 3: xattr + * 4: second non-directory + * 5: second parent (when locking independent directories in rename) + * + * I_MUTEX_NONDIR2 is for certain operations (such as rename) which lock two + * non-directories at once. + * + * The locking order between these classes is + * parent[2] -> child -> grandchild -> normal -> xattr -> second non-directory + */ +enum inode_i_mutex_lock_class +{ + I_MUTEX_NORMAL, + I_MUTEX_PARENT, + I_MUTEX_CHILD, + I_MUTEX_XATTR, + I_MUTEX_NONDIR2, + I_MUTEX_PARENT2, +}; + +static inline void inode_lock(struct inode *inode) +{ + down_write(&inode->i_rwsem); +} + +static inline void inode_unlock(struct inode *inode) +{ + up_write(&inode->i_rwsem); +} + +static inline void inode_lock_shared(struct inode *inode) +{ + down_read(&inode->i_rwsem); +} + +static inline void inode_unlock_shared(struct inode *inode) +{ + up_read(&inode->i_rwsem); +} + +static inline int inode_trylock(struct inode *inode) +{ + return down_write_trylock(&inode->i_rwsem); +} + +static inline int inode_trylock_shared(struct inode *inode) +{ + return down_read_trylock(&inode->i_rwsem); +} + +static inline int inode_is_locked(struct inode *inode) +{ + return rwsem_is_locked(&inode->i_rwsem); +} + +static inline void inode_lock_nested(struct inode *inode, unsigned subclass) +{ + down_write_nested(&inode->i_rwsem, subclass); +} + +static inline void inode_lock_shared_nested(struct inode *inode, unsigned subclass) +{ + down_read_nested(&inode->i_rwsem, subclass); +} + +void lock_two_nondirectories(struct inode *, struct inode*); +void unlock_two_nondirectories(struct inode *, struct inode*); + +/* + * NOTE: in a 32bit arch with a preemptable kernel and + * an UP compile the i_size_read/write must be atomic + * with respect to the local cpu (unlike with preempt disabled), + * but they don't need to be atomic with respect to other cpus like in + * true SMP (so they need either to either locally disable irq around + * the read or for example on x86 they can be still implemented as a + * cmpxchg8b without the need of the lock prefix). For SMP compiles + * and 64bit archs it makes no difference if preempt is enabled or not. + */ +static inline loff_t i_size_read(const struct inode *inode) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + loff_t i_size; + unsigned int seq; + + do { + seq = read_seqcount_begin(&inode->i_size_seqcount); + i_size = inode->i_size; + } while (read_seqcount_retry(&inode->i_size_seqcount, seq)); + return i_size; +#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) + loff_t i_size; + + preempt_disable(); + i_size = inode->i_size; + preempt_enable(); + return i_size; +#else + return inode->i_size; +#endif +} + +/* + * NOTE: unlike i_size_read(), i_size_write() does need locking around it + * (normally i_mutex), otherwise on 32bit/SMP an update of i_size_seqcount + * can be lost, resulting in subsequent i_size_read() calls spinning forever. + */ +static inline void i_size_write(struct inode *inode, loff_t i_size) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + preempt_disable(); + write_seqcount_begin(&inode->i_size_seqcount); + inode->i_size = i_size; + write_seqcount_end(&inode->i_size_seqcount); + preempt_enable(); +#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) + preempt_disable(); + inode->i_size = i_size; + preempt_enable(); +#else + inode->i_size = i_size; +#endif +} + +static inline unsigned iminor(const struct inode *inode) +{ + return MINOR(inode->i_rdev); +} + +static inline unsigned imajor(const struct inode *inode) +{ + return MAJOR(inode->i_rdev); +} + +extern struct block_device *I_BDEV(struct inode *inode); + +struct fown_struct { + rwlock_t lock; /* protects pid, uid, euid fields */ + struct pid *pid; /* pid or -pgrp where SIGIO should be sent */ + enum pid_type pid_type; /* Kind of process group SIGIO should be sent to */ + kuid_t uid, euid; /* uid/euid of process setting the owner */ + int signum; /* posix.1b rt signal to be delivered on IO */ +}; + +/* + * Track a single file's readahead state + */ +struct file_ra_state { + pgoff_t start; /* where readahead started */ + unsigned int size; /* # of readahead pages */ + unsigned int async_size; /* do asynchronous readahead when + there are only # of pages ahead */ + + unsigned int ra_pages; /* Maximum readahead window */ + unsigned int mmap_miss; /* Cache miss stat for mmap accesses */ + loff_t prev_pos; /* Cache last read() position */ +}; + +/* + * Check if @index falls in the readahead windows. + */ +static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index) +{ + return (index >= ra->start && + index < ra->start + ra->size); +} + +struct file { + union { + struct llist_node fu_llist; + struct rcu_head fu_rcuhead; + } f_u; + struct path f_path; + struct inode *f_inode; /* cached value */ + const struct file_operations *f_op; + + /* + * Protects f_ep_links, f_flags. + * Must not be taken from IRQ context. + */ + spinlock_t f_lock; + enum rw_hint f_write_hint; + atomic_long_t f_count; + unsigned int f_flags; + fmode_t f_mode; + struct mutex f_pos_lock; + loff_t f_pos; + struct fown_struct f_owner; + const struct cred *f_cred; + struct file_ra_state f_ra; + + u64 f_version; +#ifdef CONFIG_SECURITY + void *f_security; +#endif + /* needed for tty driver, and maybe others */ + void *private_data; + +#ifdef CONFIG_EPOLL + /* Used by fs/eventpoll.c to link all the hooks to this file */ + struct list_head f_ep_links; + struct list_head f_tfile_llink; +#endif /* #ifdef CONFIG_EPOLL */ + struct address_space *f_mapping; + errseq_t f_wb_err; +} __randomize_layout + __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */ + +struct file_handle { + __u32 handle_bytes; + int handle_type; + /* file identifier */ + unsigned char f_handle[]; +}; + +static inline struct file *get_file(struct file *f) +{ + atomic_long_inc(&f->f_count); + return f; +} +#define get_file_rcu_many(x, cnt) \ + atomic_long_add_unless(&(x)->f_count, (cnt), 0) +#define get_file_rcu(x) get_file_rcu_many((x), 1) +#define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1) +#define file_count(x) atomic_long_read(&(x)->f_count) + +#define MAX_NON_LFS ((1UL<<31) - 1) + +/* Page cache limit. The filesystems should put that into their s_maxbytes + limits, otherwise bad things can happen in VM. */ +#if BITS_PER_LONG==32 +#define MAX_LFS_FILESIZE ((loff_t)ULONG_MAX << PAGE_SHIFT) +#elif BITS_PER_LONG==64 +#define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX) +#endif + +#define FL_POSIX 1 +#define FL_FLOCK 2 +#define FL_DELEG 4 /* NFSv4 delegation */ +#define FL_ACCESS 8 /* not trying to lock, just looking */ +#define FL_EXISTS 16 /* when unlocking, test for existence */ +#define FL_LEASE 32 /* lease held on this file */ +#define FL_CLOSE 64 /* unlock on close */ +#define FL_SLEEP 128 /* A blocking lock */ +#define FL_DOWNGRADE_PENDING 256 /* Lease is being downgraded */ +#define FL_UNLOCK_PENDING 512 /* Lease is being broken */ +#define FL_OFDLCK 1024 /* lock is "owned" by struct file */ +#define FL_LAYOUT 2048 /* outstanding pNFS layout */ + +#define FL_CLOSE_POSIX (FL_POSIX | FL_CLOSE) + +/* + * Special return value from posix_lock_file() and vfs_lock_file() for + * asynchronous locking. + */ +#define FILE_LOCK_DEFERRED 1 + +/* legacy typedef, should eventually be removed */ +typedef void *fl_owner_t; + +struct file_lock; + +struct file_lock_operations { + void (*fl_copy_lock)(struct file_lock *, struct file_lock *); + void (*fl_release_private)(struct file_lock *); +}; + +struct lock_manager_operations { + int (*lm_compare_owner)(struct file_lock *, struct file_lock *); + unsigned long (*lm_owner_key)(struct file_lock *); + fl_owner_t (*lm_get_owner)(fl_owner_t); + void (*lm_put_owner)(fl_owner_t); + void (*lm_notify)(struct file_lock *); /* unblock callback */ + int (*lm_grant)(struct file_lock *, int); + bool (*lm_break)(struct file_lock *); + int (*lm_change)(struct file_lock *, int, struct list_head *); + void (*lm_setup)(struct file_lock *, void **); +}; + +struct lock_manager { + struct list_head list; + /* + * NFSv4 and up also want opens blocked during the grace period; + * NLM doesn't care: + */ + bool block_opens; +}; + +struct net; +void locks_start_grace(struct net *, struct lock_manager *); +void locks_end_grace(struct lock_manager *); +bool locks_in_grace(struct net *); +bool opens_in_grace(struct net *); + +/* that will die - we need it for nfs_lock_info */ +#include + +/* + * struct file_lock represents a generic "file lock". It's used to represent + * POSIX byte range locks, BSD (flock) locks, and leases. It's important to + * note that the same struct is used to represent both a request for a lock and + * the lock itself, but the same object is never used for both. + * + * FIXME: should we create a separate "struct lock_request" to help distinguish + * these two uses? + * + * The varous i_flctx lists are ordered by: + * + * 1) lock owner + * 2) lock range start + * 3) lock range end + * + * Obviously, the last two criteria only matter for POSIX locks. + */ +struct file_lock { + struct file_lock *fl_next; /* singly linked list for this inode */ + struct list_head fl_list; /* link into file_lock_context */ + struct hlist_node fl_link; /* node in global lists */ + struct list_head fl_block; /* circular list of blocked processes */ + fl_owner_t fl_owner; + unsigned int fl_flags; + unsigned char fl_type; + unsigned int fl_pid; + int fl_link_cpu; /* what cpu's list is this on? */ + wait_queue_head_t fl_wait; + struct file *fl_file; + loff_t fl_start; + loff_t fl_end; + + struct fasync_struct * fl_fasync; /* for lease break notifications */ + /* for lease breaks: */ + unsigned long fl_break_time; + unsigned long fl_downgrade_time; + + const struct file_lock_operations *fl_ops; /* Callbacks for filesystems */ + const struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */ + union { + struct nfs_lock_info nfs_fl; + struct nfs4_lock_info nfs4_fl; + struct { + struct list_head link; /* link in AFS vnode's pending_locks list */ + int state; /* state of grant or error if -ve */ + } afs; + } fl_u; +} __randomize_layout; + +struct file_lock_context { + spinlock_t flc_lock; + struct list_head flc_flock; + struct list_head flc_posix; + struct list_head flc_lease; +}; + +/* The following constant reflects the upper bound of the file/locking space */ +#ifndef OFFSET_MAX +#define INT_LIMIT(x) (~((x)1 << (sizeof(x)*8 - 1))) +#define OFFSET_MAX INT_LIMIT(loff_t) +#define OFFT_OFFSET_MAX INT_LIMIT(off_t) +#endif + +extern void send_sigio(struct fown_struct *fown, int fd, int band); + +#define locks_inode(f) file_inode(f) + +#ifdef CONFIG_FILE_LOCKING +extern int fcntl_getlk(struct file *, unsigned int, struct flock *); +extern int fcntl_setlk(unsigned int, struct file *, unsigned int, + struct flock *); + +#if BITS_PER_LONG == 32 +extern int fcntl_getlk64(struct file *, unsigned int, struct flock64 *); +extern int fcntl_setlk64(unsigned int, struct file *, unsigned int, + struct flock64 *); +#endif + +extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg); +extern int fcntl_getlease(struct file *filp); + +/* fs/locks.c */ +void locks_free_lock_context(struct inode *inode); +void locks_free_lock(struct file_lock *fl); +extern void locks_init_lock(struct file_lock *); +extern struct file_lock * locks_alloc_lock(void); +extern void locks_copy_lock(struct file_lock *, struct file_lock *); +extern void locks_copy_conflock(struct file_lock *, struct file_lock *); +extern void locks_remove_posix(struct file *, fl_owner_t); +extern void locks_remove_file(struct file *); +extern void locks_release_private(struct file_lock *); +extern void posix_test_lock(struct file *, struct file_lock *); +extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); +extern int posix_unblock_lock(struct file_lock *); +extern int vfs_test_lock(struct file *, struct file_lock *); +extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); +extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); +extern int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl); +extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type); +extern void lease_get_mtime(struct inode *, struct timespec64 *time); +extern int generic_setlease(struct file *, long, struct file_lock **, void **priv); +extern int vfs_setlease(struct file *, long, struct file_lock **, void **); +extern int lease_modify(struct file_lock *, int, struct list_head *); +struct files_struct; +extern void show_fd_locks(struct seq_file *f, + struct file *filp, struct files_struct *files); +#else /* !CONFIG_FILE_LOCKING */ +static inline int fcntl_getlk(struct file *file, unsigned int cmd, + struct flock __user *user) +{ + return -EINVAL; +} + +static inline int fcntl_setlk(unsigned int fd, struct file *file, + unsigned int cmd, struct flock __user *user) +{ + return -EACCES; +} + +#if BITS_PER_LONG == 32 +static inline int fcntl_getlk64(struct file *file, unsigned int cmd, + struct flock64 __user *user) +{ + return -EINVAL; +} + +static inline int fcntl_setlk64(unsigned int fd, struct file *file, + unsigned int cmd, struct flock64 __user *user) +{ + return -EACCES; +} +#endif +static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg) +{ + return -EINVAL; +} + +static inline int fcntl_getlease(struct file *filp) +{ + return F_UNLCK; +} + +static inline void +locks_free_lock_context(struct inode *inode) +{ +} + +static inline void locks_init_lock(struct file_lock *fl) +{ + return; +} + +static inline void locks_copy_conflock(struct file_lock *new, struct file_lock *fl) +{ + return; +} + +static inline void locks_copy_lock(struct file_lock *new, struct file_lock *fl) +{ + return; +} + +static inline void locks_remove_posix(struct file *filp, fl_owner_t owner) +{ + return; +} + +static inline void locks_remove_file(struct file *filp) +{ + return; +} + +static inline void posix_test_lock(struct file *filp, struct file_lock *fl) +{ + return; +} + +static inline int posix_lock_file(struct file *filp, struct file_lock *fl, + struct file_lock *conflock) +{ + return -ENOLCK; +} + +static inline int posix_unblock_lock(struct file_lock *waiter) +{ + return -ENOENT; +} + +static inline int vfs_test_lock(struct file *filp, struct file_lock *fl) +{ + return 0; +} + +static inline int vfs_lock_file(struct file *filp, unsigned int cmd, + struct file_lock *fl, struct file_lock *conf) +{ + return -ENOLCK; +} + +static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl) +{ + return 0; +} + +static inline int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl) +{ + return -ENOLCK; +} + +static inline int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) +{ + return 0; +} + +static inline void lease_get_mtime(struct inode *inode, + struct timespec64 *time) +{ + return; +} + +static inline int generic_setlease(struct file *filp, long arg, + struct file_lock **flp, void **priv) +{ + return -EINVAL; +} + +static inline int vfs_setlease(struct file *filp, long arg, + struct file_lock **lease, void **priv) +{ + return -EINVAL; +} + +static inline int lease_modify(struct file_lock *fl, int arg, + struct list_head *dispose) +{ + return -EINVAL; +} + +struct files_struct; +static inline void show_fd_locks(struct seq_file *f, + struct file *filp, struct files_struct *files) {} +#endif /* !CONFIG_FILE_LOCKING */ + +static inline struct inode *file_inode(const struct file *f) +{ + return f->f_inode; +} + +static inline struct dentry *file_dentry(const struct file *file) +{ + return d_real(file->f_path.dentry, file_inode(file)); +} + +static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl) +{ + return locks_lock_inode_wait(locks_inode(filp), fl); +} + +struct fasync_struct { + rwlock_t fa_lock; + int magic; + int fa_fd; + struct fasync_struct *fa_next; /* singly linked list */ + struct file *fa_file; + struct rcu_head fa_rcu; +}; + +#define FASYNC_MAGIC 0x4601 + +/* SMP safe fasync helpers: */ +extern int fasync_helper(int, struct file *, int, struct fasync_struct **); +extern struct fasync_struct *fasync_insert_entry(int, struct file *, struct fasync_struct **, struct fasync_struct *); +extern int fasync_remove_entry(struct file *, struct fasync_struct **); +extern struct fasync_struct *fasync_alloc(void); +extern void fasync_free(struct fasync_struct *); + +/* can be called from interrupts */ +extern void kill_fasync(struct fasync_struct **, int, int); + +extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force); +extern int f_setown(struct file *filp, unsigned long arg, int force); +extern void f_delown(struct file *filp); +extern pid_t f_getown(struct file *filp); +extern int send_sigurg(struct fown_struct *fown); + +/* + * sb->s_flags. Note that these mirror the equivalent MS_* flags where + * represented in both. + */ +#define SB_RDONLY 1 /* Mount read-only */ +#define SB_NOSUID 2 /* Ignore suid and sgid bits */ +#define SB_NODEV 4 /* Disallow access to device special files */ +#define SB_NOEXEC 8 /* Disallow program execution */ +#define SB_SYNCHRONOUS 16 /* Writes are synced at once */ +#define SB_MANDLOCK 64 /* Allow mandatory locks on an FS */ +#define SB_DIRSYNC 128 /* Directory modifications are synchronous */ +#define SB_NOATIME 1024 /* Do not update access times. */ +#define SB_NODIRATIME 2048 /* Do not update directory access times */ +#define SB_SILENT 32768 +#define SB_POSIXACL (1<<16) /* VFS does not apply the umask */ +#define SB_KERNMOUNT (1<<22) /* this is a kern_mount call */ +#define SB_I_VERSION (1<<23) /* Update inode I_version field */ +#define SB_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */ + +/* These sb flags are internal to the kernel */ +#define SB_SUBMOUNT (1<<26) +#define SB_NOSEC (1<<28) +#define SB_BORN (1<<29) +#define SB_ACTIVE (1<<30) +#define SB_NOUSER (1<<31) + +/* + * Umount options + */ + +#define MNT_FORCE 0x00000001 /* Attempt to forcibily umount */ +#define MNT_DETACH 0x00000002 /* Just detach from the tree */ +#define MNT_EXPIRE 0x00000004 /* Mark for expiry */ +#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */ +#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */ + +/* sb->s_iflags */ +#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */ +#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */ +#define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */ +#define SB_I_MULTIROOT 0x00000008 /* Multiple roots to the dentry tree */ + +/* sb->s_iflags to limit user namespace mounts */ +#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */ +#define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020 +#define SB_I_UNTRUSTED_MOUNTER 0x00000040 + +/* Possible states of 'frozen' field */ +enum { + SB_UNFROZEN = 0, /* FS is unfrozen */ + SB_FREEZE_WRITE = 1, /* Writes, dir ops, ioctls frozen */ + SB_FREEZE_PAGEFAULT = 2, /* Page faults stopped as well */ + SB_FREEZE_FS = 3, /* For internal FS use (e.g. to stop + * internal threads if needed) */ + SB_FREEZE_COMPLETE = 4, /* ->freeze_fs finished successfully */ +}; + +#define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1) + +struct sb_writers { + int frozen; /* Is sb frozen? */ + wait_queue_head_t wait_unfrozen; /* for get_super_thawed() */ + struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS]; +}; + +struct super_block { + struct list_head s_list; /* Keep this first */ + dev_t s_dev; /* search index; _not_ kdev_t */ + unsigned char s_blocksize_bits; + unsigned long s_blocksize; + loff_t s_maxbytes; /* Max file size */ + struct file_system_type *s_type; + const struct super_operations *s_op; + const struct dquot_operations *dq_op; + const struct quotactl_ops *s_qcop; + const struct export_operations *s_export_op; + unsigned long s_flags; + unsigned long s_iflags; /* internal SB_I_* flags */ + unsigned long s_magic; + struct dentry *s_root; + struct rw_semaphore s_umount; + int s_count; + atomic_t s_active; +#ifdef CONFIG_SECURITY + void *s_security; +#endif + const struct xattr_handler **s_xattr; +#if IS_ENABLED(CONFIG_FS_ENCRYPTION) + const struct fscrypt_operations *s_cop; +#endif + struct hlist_bl_head s_roots; /* alternate root dentries for NFS */ + struct list_head s_mounts; /* list of mounts; _not_ for fs use */ + struct block_device *s_bdev; + struct backing_dev_info *s_bdi; + struct mtd_info *s_mtd; + struct hlist_node s_instances; + unsigned int s_quota_types; /* Bitmask of supported quota types */ + struct quota_info s_dquot; /* Diskquota specific options */ + + struct sb_writers s_writers; + + char s_id[32]; /* Informational name */ + uuid_t s_uuid; /* UUID */ + + void *s_fs_info; /* Filesystem private info */ + unsigned int s_max_links; + fmode_t s_mode; + + /* Granularity of c/m/atime in ns. + Cannot be worse than a second */ + u32 s_time_gran; + + /* + * The next field is for VFS *only*. No filesystems have any business + * even looking at it. You had been warned. + */ + struct mutex s_vfs_rename_mutex; /* Kludge */ + + /* + * Filesystem subtype. If non-empty the filesystem type field + * in /proc/mounts will be "type.subtype" + */ + char *s_subtype; + + const struct dentry_operations *s_d_op; /* default d_op for dentries */ + + /* + * Saved pool identifier for cleancache (-1 means none) + */ + int cleancache_poolid; + + struct shrinker s_shrink; /* per-sb shrinker handle */ + + /* Number of inodes with nlink == 0 but still referenced */ + atomic_long_t s_remove_count; + + /* Pending fsnotify inode refs */ + atomic_long_t s_fsnotify_inode_refs; + + /* Being remounted read-only */ + int s_readonly_remount; + + /* AIO completions deferred from interrupt context */ + struct workqueue_struct *s_dio_done_wq; + struct hlist_head s_pins; + + /* + * Owning user namespace and default context in which to + * interpret filesystem uids, gids, quotas, device nodes, + * xattrs and security labels. + */ + struct user_namespace *s_user_ns; + + /* + * Keep the lru lists last in the structure so they always sit on their + * own individual cachelines. + */ + struct list_lru s_dentry_lru ____cacheline_aligned_in_smp; + struct list_lru s_inode_lru ____cacheline_aligned_in_smp; + struct rcu_head rcu; + struct work_struct destroy_work; + + struct mutex s_sync_lock; /* sync serialisation lock */ + + /* + * Indicates how deep in a filesystem stack this SB is + */ + int s_stack_depth; + + /* s_inode_list_lock protects s_inodes */ + spinlock_t s_inode_list_lock ____cacheline_aligned_in_smp; + struct list_head s_inodes; /* all inodes */ + + spinlock_t s_inode_wblist_lock; + struct list_head s_inodes_wb; /* writeback inodes */ +} __randomize_layout; + +/* Helper functions so that in most cases filesystems will + * not need to deal directly with kuid_t and kgid_t and can + * instead deal with the raw numeric values that are stored + * in the filesystem. + */ +static inline uid_t i_uid_read(const struct inode *inode) +{ + return from_kuid(inode->i_sb->s_user_ns, inode->i_uid); +} + +static inline gid_t i_gid_read(const struct inode *inode) +{ + return from_kgid(inode->i_sb->s_user_ns, inode->i_gid); +} + +static inline void i_uid_write(struct inode *inode, uid_t uid) +{ + inode->i_uid = make_kuid(inode->i_sb->s_user_ns, uid); +} + +static inline void i_gid_write(struct inode *inode, gid_t gid) +{ + inode->i_gid = make_kgid(inode->i_sb->s_user_ns, gid); +} + +extern struct timespec64 timespec64_trunc(struct timespec64 t, unsigned gran); +extern struct timespec64 current_time(struct inode *inode); + +/* + * Snapshotting support. + */ + +void __sb_end_write(struct super_block *sb, int level); +int __sb_start_write(struct super_block *sb, int level, bool wait); + +#define __sb_writers_acquired(sb, lev) \ + percpu_rwsem_acquire(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_) +#define __sb_writers_release(sb, lev) \ + percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_) + +/** + * sb_end_write - drop write access to a superblock + * @sb: the super we wrote to + * + * Decrement number of writers to the filesystem. Wake up possible waiters + * wanting to freeze the filesystem. + */ +static inline void sb_end_write(struct super_block *sb) +{ + __sb_end_write(sb, SB_FREEZE_WRITE); +} + +/** + * sb_end_pagefault - drop write access to a superblock from a page fault + * @sb: the super we wrote to + * + * Decrement number of processes handling write page fault to the filesystem. + * Wake up possible waiters wanting to freeze the filesystem. + */ +static inline void sb_end_pagefault(struct super_block *sb) +{ + __sb_end_write(sb, SB_FREEZE_PAGEFAULT); +} + +/** + * sb_end_intwrite - drop write access to a superblock for internal fs purposes + * @sb: the super we wrote to + * + * Decrement fs-internal number of writers to the filesystem. Wake up possible + * waiters wanting to freeze the filesystem. + */ +static inline void sb_end_intwrite(struct super_block *sb) +{ + __sb_end_write(sb, SB_FREEZE_FS); +} + +/** + * sb_start_write - get write access to a superblock + * @sb: the super we write to + * + * When a process wants to write data or metadata to a file system (i.e. dirty + * a page or an inode), it should embed the operation in a sb_start_write() - + * sb_end_write() pair to get exclusion against file system freezing. This + * function increments number of writers preventing freezing. If the file + * system is already frozen, the function waits until the file system is + * thawed. + * + * Since freeze protection behaves as a lock, users have to preserve + * ordering of freeze protection and other filesystem locks. Generally, + * freeze protection should be the outermost lock. In particular, we have: + * + * sb_start_write + * -> i_mutex (write path, truncate, directory ops, ...) + * -> s_umount (freeze_super, thaw_super) + */ +static inline void sb_start_write(struct super_block *sb) +{ + __sb_start_write(sb, SB_FREEZE_WRITE, true); +} + +static inline int sb_start_write_trylock(struct super_block *sb) +{ + return __sb_start_write(sb, SB_FREEZE_WRITE, false); +} + +/** + * sb_start_pagefault - get write access to a superblock from a page fault + * @sb: the super we write to + * + * When a process starts handling write page fault, it should embed the + * operation into sb_start_pagefault() - sb_end_pagefault() pair to get + * exclusion against file system freezing. This is needed since the page fault + * is going to dirty a page. This function increments number of running page + * faults preventing freezing. If the file system is already frozen, the + * function waits until the file system is thawed. + * + * Since page fault freeze protection behaves as a lock, users have to preserve + * ordering of freeze protection and other filesystem locks. It is advised to + * put sb_start_pagefault() close to mmap_sem in lock ordering. Page fault + * handling code implies lock dependency: + * + * mmap_sem + * -> sb_start_pagefault + */ +static inline void sb_start_pagefault(struct super_block *sb) +{ + __sb_start_write(sb, SB_FREEZE_PAGEFAULT, true); +} + +/* + * sb_start_intwrite - get write access to a superblock for internal fs purposes + * @sb: the super we write to + * + * This is the third level of protection against filesystem freezing. It is + * free for use by a filesystem. The only requirement is that it must rank + * below sb_start_pagefault. + * + * For example filesystem can call sb_start_intwrite() when starting a + * transaction which somewhat eases handling of freezing for internal sources + * of filesystem changes (internal fs threads, discarding preallocation on file + * close, etc.). + */ +static inline void sb_start_intwrite(struct super_block *sb) +{ + __sb_start_write(sb, SB_FREEZE_FS, true); +} + +static inline int sb_start_intwrite_trylock(struct super_block *sb) +{ + return __sb_start_write(sb, SB_FREEZE_FS, false); +} + + +extern bool inode_owner_or_capable(const struct inode *inode); + +/* + * VFS helper functions.. + */ +extern int vfs_create(struct inode *, struct dentry *, umode_t, bool); +extern int vfs_mkdir(struct inode *, struct dentry *, umode_t); +extern int vfs_mknod(struct inode *, struct dentry *, umode_t, dev_t); +extern int vfs_symlink(struct inode *, struct dentry *, const char *); +extern int vfs_link(struct dentry *, struct inode *, struct dentry *, struct inode **); +extern int vfs_rmdir(struct inode *, struct dentry *); +extern int vfs_unlink(struct inode *, struct dentry *, struct inode **); +extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int); +extern int vfs_whiteout(struct inode *, struct dentry *); + +extern struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, + int open_flag); + +int vfs_mkobj(struct dentry *, umode_t, + int (*f)(struct dentry *, umode_t, void *), + void *); + +extern long vfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); + +/* + * VFS file helper functions. + */ +extern void inode_init_owner(struct inode *inode, const struct inode *dir, + umode_t mode); +extern bool may_open_dev(const struct path *path); +/* + * VFS FS_IOC_FIEMAP helper definitions. + */ +struct fiemap_extent_info { + unsigned int fi_flags; /* Flags as passed from user */ + unsigned int fi_extents_mapped; /* Number of mapped extents */ + unsigned int fi_extents_max; /* Size of fiemap_extent array */ + struct fiemap_extent __user *fi_extents_start; /* Start of + fiemap_extent array */ +}; +int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical, + u64 phys, u64 len, u32 flags); +int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags); + +/* + * File types + * + * NOTE! These match bits 12..15 of stat.st_mode + * (ie "(i_mode >> 12) & 15"). + */ +#define DT_UNKNOWN 0 +#define DT_FIFO 1 +#define DT_CHR 2 +#define DT_DIR 4 +#define DT_BLK 6 +#define DT_REG 8 +#define DT_LNK 10 +#define DT_SOCK 12 +#define DT_WHT 14 + +/* + * This is the "filldir" function type, used by readdir() to let + * the kernel specify what kind of dirent layout it wants to have. + * This allows the kernel to read directories into kernel space or + * to have different dirent layouts depending on the binary type. + */ +struct dir_context; +typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64, + unsigned); + +struct dir_context { + filldir_t actor; + loff_t pos; +}; + +struct block_device_operations; + +/* These macros are for out of kernel modules to test that + * the kernel supports the unlocked_ioctl and compat_ioctl + * fields in struct file_operations. */ +#define HAVE_COMPAT_IOCTL 1 +#define HAVE_UNLOCKED_IOCTL 1 + +/* + * These flags let !MMU mmap() govern direct device mapping vs immediate + * copying more easily for MAP_PRIVATE, especially for ROM filesystems. + * + * NOMMU_MAP_COPY: Copy can be mapped (MAP_PRIVATE) + * NOMMU_MAP_DIRECT: Can be mapped directly (MAP_SHARED) + * NOMMU_MAP_READ: Can be mapped for reading + * NOMMU_MAP_WRITE: Can be mapped for writing + * NOMMU_MAP_EXEC: Can be mapped for execution + */ +#define NOMMU_MAP_COPY 0x00000001 +#define NOMMU_MAP_DIRECT 0x00000008 +#define NOMMU_MAP_READ VM_MAYREAD +#define NOMMU_MAP_WRITE VM_MAYWRITE +#define NOMMU_MAP_EXEC VM_MAYEXEC + +#define NOMMU_VMFLAGS \ + (NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC) + + +struct iov_iter; + +struct file_operations { + struct module *owner; + loff_t (*llseek) (struct file *, loff_t, int); + ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); + ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); + ssize_t (*read_iter) (struct kiocb *, struct iov_iter *); + ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); + int (*iterate) (struct file *, struct dir_context *); + int (*iterate_shared) (struct file *, struct dir_context *); + __poll_t (*poll) (struct file *, struct poll_table_struct *); + long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); + long (*compat_ioctl) (struct file *, unsigned int, unsigned long); + int (*mmap) (struct file *, struct vm_area_struct *); + unsigned long mmap_supported_flags; + int (*open) (struct inode *, struct file *); + int (*flush) (struct file *, fl_owner_t id); + int (*release) (struct inode *, struct file *); + int (*fsync) (struct file *, loff_t, loff_t, int datasync); + int (*fasync) (int, struct file *, int); + int (*lock) (struct file *, int, struct file_lock *); + ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int); + unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); + int (*check_flags)(int); + int (*flock) (struct file *, int, struct file_lock *); + ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); + ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); + int (*setlease)(struct file *, long, struct file_lock **, void **); + long (*fallocate)(struct file *file, int mode, loff_t offset, + loff_t len); + void (*show_fdinfo)(struct seq_file *m, struct file *f); +#ifndef CONFIG_MMU + unsigned (*mmap_capabilities)(struct file *); +#endif + ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, + loff_t, size_t, unsigned int); + int (*clone_file_range)(struct file *, loff_t, struct file *, loff_t, + u64); + int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t, + u64); + int (*fadvise)(struct file *, loff_t, loff_t, int); +} __randomize_layout; + +struct inode_operations { + struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int); + const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *); + int (*permission) (struct inode *, int); + struct posix_acl * (*get_acl)(struct inode *, int); + + int (*readlink) (struct dentry *, char __user *,int); + + int (*create) (struct inode *,struct dentry *, umode_t, bool); + int (*link) (struct dentry *,struct inode *,struct dentry *); + int (*unlink) (struct inode *,struct dentry *); + int (*symlink) (struct inode *,struct dentry *,const char *); + int (*mkdir) (struct inode *,struct dentry *,umode_t); + int (*rmdir) (struct inode *,struct dentry *); + int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t); + int (*rename) (struct inode *, struct dentry *, + struct inode *, struct dentry *, unsigned int); + int (*setattr) (struct dentry *, struct iattr *); + int (*getattr) (const struct path *, struct kstat *, u32, unsigned int); + ssize_t (*listxattr) (struct dentry *, char *, size_t); + int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, + u64 len); + int (*update_time)(struct inode *, struct timespec64 *, int); + int (*atomic_open)(struct inode *, struct dentry *, + struct file *, unsigned open_flag, + umode_t create_mode); + int (*tmpfile) (struct inode *, struct dentry *, umode_t); + int (*set_acl)(struct inode *, struct posix_acl *, int); +} ____cacheline_aligned; + +static inline ssize_t call_read_iter(struct file *file, struct kiocb *kio, + struct iov_iter *iter) +{ + return file->f_op->read_iter(kio, iter); +} + +static inline ssize_t call_write_iter(struct file *file, struct kiocb *kio, + struct iov_iter *iter) +{ + return file->f_op->write_iter(kio, iter); +} + +static inline int call_mmap(struct file *file, struct vm_area_struct *vma) +{ + return file->f_op->mmap(file, vma); +} + +ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, + unsigned long nr_segs, unsigned long fast_segs, + struct iovec *fast_pointer, + struct iovec **ret_pointer); + +extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *); +extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *); +extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *); +extern ssize_t vfs_readv(struct file *, const struct iovec __user *, + unsigned long, loff_t *, rwf_t); +extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *, + loff_t, size_t, unsigned int); +extern int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in, + struct inode *inode_out, loff_t pos_out, + u64 *len, bool is_dedupe); +extern int do_clone_file_range(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, u64 len); +extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, u64 len); +extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff, + struct inode *dest, loff_t destoff, + loff_t len, bool *is_same); +extern int vfs_dedupe_file_range(struct file *file, + struct file_dedupe_range *same); +extern int vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos, + struct file *dst_file, loff_t dst_pos, + u64 len); + + +struct super_operations { + struct inode *(*alloc_inode)(struct super_block *sb); + void (*destroy_inode)(struct inode *); + + void (*dirty_inode) (struct inode *, int flags); + int (*write_inode) (struct inode *, struct writeback_control *wbc); + int (*drop_inode) (struct inode *); + void (*evict_inode) (struct inode *); + void (*put_super) (struct super_block *); + int (*sync_fs)(struct super_block *sb, int wait); + int (*freeze_super) (struct super_block *); + int (*freeze_fs) (struct super_block *); + int (*thaw_super) (struct super_block *); + int (*unfreeze_fs) (struct super_block *); + int (*statfs) (struct dentry *, struct kstatfs *); + int (*remount_fs) (struct super_block *, int *, char *); + void (*umount_begin) (struct super_block *); + + int (*show_options)(struct seq_file *, struct dentry *); + int (*show_devname)(struct seq_file *, struct dentry *); + int (*show_path)(struct seq_file *, struct dentry *); + int (*show_stats)(struct seq_file *, struct dentry *); +#ifdef CONFIG_QUOTA + ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); + ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); + struct dquot **(*get_dquots)(struct inode *); +#endif + int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); + long (*nr_cached_objects)(struct super_block *, + struct shrink_control *); + long (*free_cached_objects)(struct super_block *, + struct shrink_control *); +}; + +/* + * Inode flags - they have no relation to superblock flags now + */ +#define S_SYNC 1 /* Writes are synced at once */ +#define S_NOATIME 2 /* Do not update access times */ +#define S_APPEND 4 /* Append-only file */ +#define S_IMMUTABLE 8 /* Immutable file */ +#define S_DEAD 16 /* removed, but still open directory */ +#define S_NOQUOTA 32 /* Inode is not counted to quota */ +#define S_DIRSYNC 64 /* Directory modifications are synchronous */ +#define S_NOCMTIME 128 /* Do not update file c/mtime */ +#define S_SWAPFILE 256 /* Do not truncate: swapon got its bmaps */ +#define S_PRIVATE 512 /* Inode is fs-internal */ +#define S_IMA 1024 /* Inode has an associated IMA struct */ +#define S_AUTOMOUNT 2048 /* Automount/referral quasi-directory */ +#define S_NOSEC 4096 /* no suid or xattr security attributes */ +#ifdef CONFIG_FS_DAX +#define S_DAX 8192 /* Direct Access, avoiding the page cache */ +#else +#define S_DAX 0 /* Make all the DAX code disappear */ +#endif +#define S_ENCRYPTED 16384 /* Encrypted file (using fs/crypto/) */ + +/* + * Note that nosuid etc flags are inode-specific: setting some file-system + * flags just means all the inodes inherit those flags by default. It might be + * possible to override it selectively if you really wanted to with some + * ioctl() that is not currently implemented. + * + * Exception: SB_RDONLY is always applied to the entire file system. + * + * Unfortunately, it is possible to change a filesystems flags with it mounted + * with files in use. This means that all of the inodes will not have their + * i_flags updated. Hence, i_flags no longer inherit the superblock mount + * flags, so these have to be checked separately. -- rmk@arm.uk.linux.org + */ +#define __IS_FLG(inode, flg) ((inode)->i_sb->s_flags & (flg)) + +static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags & SB_RDONLY; } +#define IS_RDONLY(inode) sb_rdonly((inode)->i_sb) +#define IS_SYNC(inode) (__IS_FLG(inode, SB_SYNCHRONOUS) || \ + ((inode)->i_flags & S_SYNC)) +#define IS_DIRSYNC(inode) (__IS_FLG(inode, SB_SYNCHRONOUS|SB_DIRSYNC) || \ + ((inode)->i_flags & (S_SYNC|S_DIRSYNC))) +#define IS_MANDLOCK(inode) __IS_FLG(inode, SB_MANDLOCK) +#define IS_NOATIME(inode) __IS_FLG(inode, SB_RDONLY|SB_NOATIME) +#define IS_I_VERSION(inode) __IS_FLG(inode, SB_I_VERSION) + +#define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA) +#define IS_APPEND(inode) ((inode)->i_flags & S_APPEND) +#define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE) +#define IS_POSIXACL(inode) __IS_FLG(inode, SB_POSIXACL) + +#define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD) +#define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME) +#define IS_SWAPFILE(inode) ((inode)->i_flags & S_SWAPFILE) +#define IS_PRIVATE(inode) ((inode)->i_flags & S_PRIVATE) +#define IS_IMA(inode) ((inode)->i_flags & S_IMA) +#define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT) +#define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC) +#define IS_DAX(inode) ((inode)->i_flags & S_DAX) +#define IS_ENCRYPTED(inode) ((inode)->i_flags & S_ENCRYPTED) + +#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \ + (inode)->i_rdev == WHITEOUT_DEV) + +static inline bool HAS_UNMAPPED_ID(struct inode *inode) +{ + return !uid_valid(inode->i_uid) || !gid_valid(inode->i_gid); +} + +static inline enum rw_hint file_write_hint(struct file *file) +{ + if (file->f_write_hint != WRITE_LIFE_NOT_SET) + return file->f_write_hint; + + return file_inode(file)->i_write_hint; +} + +static inline int iocb_flags(struct file *file); + +static inline u16 ki_hint_validate(enum rw_hint hint) +{ + typeof(((struct kiocb *)0)->ki_hint) max_hint = -1; + + if (hint <= max_hint) + return hint; + return 0; +} + +static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) +{ + *kiocb = (struct kiocb) { + .ki_filp = filp, + .ki_flags = iocb_flags(filp), + .ki_hint = ki_hint_validate(file_write_hint(filp)), + .ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0), + }; +} + +/* + * Inode state bits. Protected by inode->i_lock + * + * Three bits determine the dirty state of the inode, I_DIRTY_SYNC, + * I_DIRTY_DATASYNC and I_DIRTY_PAGES. + * + * Four bits define the lifetime of an inode. Initially, inodes are I_NEW, + * until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at + * various stages of removing an inode. + * + * Two bits are used for locking and completion notification, I_NEW and I_SYNC. + * + * I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on + * fdatasync(). i_atime is the usual cause. + * I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of + * these changes separately from I_DIRTY_SYNC so that we + * don't have to write inode on fdatasync() when only + * mtime has changed in it. + * I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean. + * I_NEW Serves as both a mutex and completion notification. + * New inodes set I_NEW. If two processes both create + * the same inode, one of them will release its inode and + * wait for I_NEW to be released before returning. + * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can + * also cause waiting on I_NEW, without I_NEW actually + * being set. find_inode() uses this to prevent returning + * nearly-dead inodes. + * I_WILL_FREE Must be set when calling write_inode_now() if i_count + * is zero. I_FREEING must be set when I_WILL_FREE is + * cleared. + * I_FREEING Set when inode is about to be freed but still has dirty + * pages or buffers attached or the inode itself is still + * dirty. + * I_CLEAR Added by clear_inode(). In this state the inode is + * clean and can be destroyed. Inode keeps I_FREEING. + * + * Inodes that are I_WILL_FREE, I_FREEING or I_CLEAR are + * prohibited for many purposes. iget() must wait for + * the inode to be completely released, then create it + * anew. Other functions will just ignore such inodes, + * if appropriate. I_NEW is used for waiting. + * + * I_SYNC Writeback of inode is running. The bit is set during + * data writeback, and cleared with a wakeup on the bit + * address once it is done. The bit is also used to pin + * the inode in memory for flusher thread. + * + * I_REFERENCED Marks the inode as recently references on the LRU list. + * + * I_DIO_WAKEUP Never set. Only used as a key for wait_on_bit(). + * + * I_WB_SWITCH Cgroup bdi_writeback switching in progress. Used to + * synchronize competing switching instances and to tell + * wb stat updates to grab the i_pages lock. See + * inode_switch_wb_work_fn() for details. + * + * I_OVL_INUSE Used by overlayfs to get exclusive ownership on upper + * and work dirs among overlayfs mounts. + * + * I_CREATING New object's inode in the middle of setting up. + * + * I_SYNC_QUEUED Inode is queued in b_io or b_more_io writeback lists. + * Used to detect that mark_inode_dirty() should not move + * inode between dirty lists. + * + * Q: What is the difference between I_WILL_FREE and I_FREEING? + */ +#define I_DIRTY_SYNC (1 << 0) +#define I_DIRTY_DATASYNC (1 << 1) +#define I_DIRTY_PAGES (1 << 2) +#define __I_NEW 3 +#define I_NEW (1 << __I_NEW) +#define I_WILL_FREE (1 << 4) +#define I_FREEING (1 << 5) +#define I_CLEAR (1 << 6) +#define __I_SYNC 7 +#define I_SYNC (1 << __I_SYNC) +#define I_REFERENCED (1 << 8) +#define __I_DIO_WAKEUP 9 +#define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP) +#define I_LINKABLE (1 << 10) +#define I_DIRTY_TIME (1 << 11) +#define I_WB_SWITCH (1 << 13) +#define I_OVL_INUSE (1 << 14) +#define I_CREATING (1 << 15) +#define I_SYNC_QUEUED (1 << 17) + +#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC) +#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES) +#define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME) + +extern void __mark_inode_dirty(struct inode *, int); +static inline void mark_inode_dirty(struct inode *inode) +{ + __mark_inode_dirty(inode, I_DIRTY); +} + +static inline void mark_inode_dirty_sync(struct inode *inode) +{ + __mark_inode_dirty(inode, I_DIRTY_SYNC); +} + +extern void inc_nlink(struct inode *inode); +extern void drop_nlink(struct inode *inode); +extern void clear_nlink(struct inode *inode); +extern void set_nlink(struct inode *inode, unsigned int nlink); + +static inline void inode_inc_link_count(struct inode *inode) +{ + inc_nlink(inode); + mark_inode_dirty(inode); +} + +static inline void inode_dec_link_count(struct inode *inode) +{ + drop_nlink(inode); + mark_inode_dirty(inode); +} + +enum file_time_flags { + S_ATIME = 1, + S_MTIME = 2, + S_CTIME = 4, + S_VERSION = 8, +}; + +extern bool atime_needs_update(const struct path *, struct inode *); +extern void touch_atime(const struct path *); +static inline void file_accessed(struct file *file) +{ + if (!(file->f_flags & O_NOATIME)) + touch_atime(&file->f_path); +} + +int sync_inode(struct inode *inode, struct writeback_control *wbc); +int sync_inode_metadata(struct inode *inode, int wait); + +struct file_system_type { + const char *name; + int fs_flags; +#define FS_REQUIRES_DEV 1 +#define FS_BINARY_MOUNTDATA 2 +#define FS_HAS_SUBTYPE 4 +#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */ +#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */ + struct dentry *(*mount) (struct file_system_type *, int, + const char *, void *); + void (*kill_sb) (struct super_block *); + struct module *owner; + struct file_system_type * next; + struct hlist_head fs_supers; + + struct lock_class_key s_lock_key; + struct lock_class_key s_umount_key; + struct lock_class_key s_vfs_rename_key; + struct lock_class_key s_writers_key[SB_FREEZE_LEVELS]; + + struct lock_class_key i_lock_key; + struct lock_class_key i_mutex_key; + struct lock_class_key i_mutex_dir_key; +}; + +#define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME) + +extern struct dentry *mount_ns(struct file_system_type *fs_type, + int flags, void *data, void *ns, struct user_namespace *user_ns, + int (*fill_super)(struct super_block *, void *, int)); +#ifdef CONFIG_BLOCK +extern struct dentry *mount_bdev(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data, + int (*fill_super)(struct super_block *, void *, int)); +#else +static inline struct dentry *mount_bdev(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data, + int (*fill_super)(struct super_block *, void *, int)) +{ + return ERR_PTR(-ENODEV); +} +#endif +extern struct dentry *mount_single(struct file_system_type *fs_type, + int flags, void *data, + int (*fill_super)(struct super_block *, void *, int)); +extern struct dentry *mount_nodev(struct file_system_type *fs_type, + int flags, void *data, + int (*fill_super)(struct super_block *, void *, int)); +extern struct dentry *mount_subtree(struct vfsmount *mnt, const char *path); +void generic_shutdown_super(struct super_block *sb); +#ifdef CONFIG_BLOCK +void kill_block_super(struct super_block *sb); +#else +static inline void kill_block_super(struct super_block *sb) +{ + BUG(); +} +#endif +void kill_anon_super(struct super_block *sb); +void kill_litter_super(struct super_block *sb); +void deactivate_super(struct super_block *sb); +void deactivate_locked_super(struct super_block *sb); +int set_anon_super(struct super_block *s, void *data); +int get_anon_bdev(dev_t *); +void free_anon_bdev(dev_t); +struct super_block *sget_userns(struct file_system_type *type, + int (*test)(struct super_block *,void *), + int (*set)(struct super_block *,void *), + int flags, struct user_namespace *user_ns, + void *data); +struct super_block *sget(struct file_system_type *type, + int (*test)(struct super_block *,void *), + int (*set)(struct super_block *,void *), + int flags, void *data); +extern struct dentry *mount_pseudo_xattr(struct file_system_type *, char *, + const struct super_operations *ops, + const struct xattr_handler **xattr, + const struct dentry_operations *dops, + unsigned long); + +static inline struct dentry * +mount_pseudo(struct file_system_type *fs_type, char *name, + const struct super_operations *ops, + const struct dentry_operations *dops, unsigned long magic) +{ + return mount_pseudo_xattr(fs_type, name, ops, NULL, dops, magic); +} + +/* Alas, no aliases. Too much hassle with bringing module.h everywhere */ +#define fops_get(fops) \ + (((fops) && try_module_get((fops)->owner) ? (fops) : NULL)) +#define fops_put(fops) \ + do { if (fops) module_put((fops)->owner); } while(0) +/* + * This one is to be used *ONLY* from ->open() instances. + * fops must be non-NULL, pinned down *and* module dependencies + * should be sufficient to pin the caller down as well. + */ +#define replace_fops(f, fops) \ + do { \ + struct file *__file = (f); \ + fops_put(__file->f_op); \ + BUG_ON(!(__file->f_op = (fops))); \ + } while(0) + +extern int register_filesystem(struct file_system_type *); +extern int unregister_filesystem(struct file_system_type *); +extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data); +#define kern_mount(type) kern_mount_data(type, NULL) +extern void kern_unmount(struct vfsmount *mnt); +extern int may_umount_tree(struct vfsmount *); +extern int may_umount(struct vfsmount *); +extern long do_mount(const char *, const char __user *, + const char *, unsigned long, void *); +extern struct vfsmount *collect_mounts(const struct path *); +extern void drop_collected_mounts(struct vfsmount *); +extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *, + struct vfsmount *); +extern int vfs_statfs(const struct path *, struct kstatfs *); +extern int user_statfs(const char __user *, struct kstatfs *); +extern int fd_statfs(int, struct kstatfs *); +extern int freeze_super(struct super_block *super); +extern int thaw_super(struct super_block *super); +extern bool our_mnt(struct vfsmount *mnt); +extern __printf(2, 3) +int super_setup_bdi_name(struct super_block *sb, char *fmt, ...); +extern int super_setup_bdi(struct super_block *sb); + +extern int current_umask(void); + +extern void ihold(struct inode * inode); +extern void iput(struct inode *); +extern int generic_update_time(struct inode *, struct timespec64 *, int); + +/* /sys/fs */ +extern struct kobject *fs_kobj; + +#define MAX_RW_COUNT (INT_MAX & PAGE_MASK) + +#ifdef CONFIG_MANDATORY_FILE_LOCKING +extern int locks_mandatory_locked(struct file *); +extern int locks_mandatory_area(struct inode *, struct file *, loff_t, loff_t, unsigned char); + +/* + * Candidates for mandatory locking have the setgid bit set + * but no group execute bit - an otherwise meaningless combination. + */ + +static inline int __mandatory_lock(struct inode *ino) +{ + return (ino->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID; +} + +/* + * ... and these candidates should be on SB_MANDLOCK mounted fs, + * otherwise these will be advisory locks + */ + +static inline int mandatory_lock(struct inode *ino) +{ + return IS_MANDLOCK(ino) && __mandatory_lock(ino); +} + +static inline int locks_verify_locked(struct file *file) +{ + if (mandatory_lock(locks_inode(file))) + return locks_mandatory_locked(file); + return 0; +} + +static inline int locks_verify_truncate(struct inode *inode, + struct file *f, + loff_t size) +{ + if (!inode->i_flctx || !mandatory_lock(inode)) + return 0; + + if (size < inode->i_size) { + return locks_mandatory_area(inode, f, size, inode->i_size - 1, + F_WRLCK); + } else { + return locks_mandatory_area(inode, f, inode->i_size, size - 1, + F_WRLCK); + } +} + +#else /* !CONFIG_MANDATORY_FILE_LOCKING */ + +static inline int locks_mandatory_locked(struct file *file) +{ + return 0; +} + +static inline int locks_mandatory_area(struct inode *inode, struct file *filp, + loff_t start, loff_t end, unsigned char type) +{ + return 0; +} + +static inline int __mandatory_lock(struct inode *inode) +{ + return 0; +} + +static inline int mandatory_lock(struct inode *inode) +{ + return 0; +} + +static inline int locks_verify_locked(struct file *file) +{ + return 0; +} + +static inline int locks_verify_truncate(struct inode *inode, struct file *filp, + size_t size) +{ + return 0; +} + +#endif /* CONFIG_MANDATORY_FILE_LOCKING */ + + +#ifdef CONFIG_FILE_LOCKING +static inline int break_lease(struct inode *inode, unsigned int mode) +{ + /* + * Since this check is lockless, we must ensure that any refcounts + * taken are done before checking i_flctx->flc_lease. Otherwise, we + * could end up racing with tasks trying to set a new lease on this + * file. + */ + smp_mb(); + if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease)) + return __break_lease(inode, mode, FL_LEASE); + return 0; +} + +static inline int break_deleg(struct inode *inode, unsigned int mode) +{ + /* + * Since this check is lockless, we must ensure that any refcounts + * taken are done before checking i_flctx->flc_lease. Otherwise, we + * could end up racing with tasks trying to set a new lease on this + * file. + */ + smp_mb(); + if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease)) + return __break_lease(inode, mode, FL_DELEG); + return 0; +} + +static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode) +{ + int ret; + + ret = break_deleg(inode, O_WRONLY|O_NONBLOCK); + if (ret == -EWOULDBLOCK && delegated_inode) { + *delegated_inode = inode; + ihold(inode); + } + return ret; +} + +static inline int break_deleg_wait(struct inode **delegated_inode) +{ + int ret; + + ret = break_deleg(*delegated_inode, O_WRONLY); + iput(*delegated_inode); + *delegated_inode = NULL; + return ret; +} + +static inline int break_layout(struct inode *inode, bool wait) +{ + smp_mb(); + if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease)) + return __break_lease(inode, + wait ? O_WRONLY : O_WRONLY | O_NONBLOCK, + FL_LAYOUT); + return 0; +} + +#else /* !CONFIG_FILE_LOCKING */ +static inline int break_lease(struct inode *inode, unsigned int mode) +{ + return 0; +} + +static inline int break_deleg(struct inode *inode, unsigned int mode) +{ + return 0; +} + +static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode) +{ + return 0; +} + +static inline int break_deleg_wait(struct inode **delegated_inode) +{ + BUG(); + return 0; +} + +static inline int break_layout(struct inode *inode, bool wait) +{ + return 0; +} + +#endif /* CONFIG_FILE_LOCKING */ + +/* fs/open.c */ +struct audit_names; +struct filename { + const char *name; /* pointer to actual string */ + const __user char *uptr; /* original userland pointer */ + int refcnt; + struct audit_names *aname; + const char iname[]; +}; + +extern long vfs_truncate(const struct path *, loff_t); +extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, + struct file *filp); +extern int vfs_fallocate(struct file *file, int mode, loff_t offset, + loff_t len); +extern long do_sys_open(int dfd, const char __user *filename, int flags, + umode_t mode); +extern struct file *file_open_name(struct filename *, int, umode_t); +extern struct file *filp_open(const char *, int, umode_t); +extern struct file *file_open_root(struct dentry *, struct vfsmount *, + const char *, int, umode_t); +extern struct file * dentry_open(const struct path *, int, const struct cred *); +extern struct file * open_with_fake_path(const struct path *, int, + struct inode*, const struct cred *); +static inline struct file *file_clone_open(struct file *file) +{ + return dentry_open(&file->f_path, file->f_flags, file->f_cred); +} +extern int filp_close(struct file *, fl_owner_t id); + +extern struct filename *getname_flags(const char __user *, int, int *); +extern struct filename *getname(const char __user *); +extern struct filename *getname_kernel(const char *); +extern void putname(struct filename *name); + +extern int finish_open(struct file *file, struct dentry *dentry, + int (*open)(struct inode *, struct file *)); +extern int finish_no_open(struct file *file, struct dentry *dentry); + +/* fs/ioctl.c */ + +extern int ioctl_preallocate(struct file *filp, void __user *argp); + +/* fs/dcache.c */ +extern void __init vfs_caches_init_early(void); +extern void __init vfs_caches_init(void); + +extern struct kmem_cache *names_cachep; + +#define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL) +#define __putname(name) kmem_cache_free(names_cachep, (void *)(name)) + +#ifdef CONFIG_BLOCK +extern int register_blkdev(unsigned int, const char *); +extern void unregister_blkdev(unsigned int, const char *); +extern void bdev_unhash_inode(dev_t dev); +extern struct block_device *bdget(dev_t); +extern struct block_device *bdgrab(struct block_device *bdev); +extern void bd_set_size(struct block_device *, loff_t size); +extern void bd_forget(struct inode *inode); +extern void bdput(struct block_device *); +extern void invalidate_bdev(struct block_device *); +extern void iterate_bdevs(void (*)(struct block_device *, void *), void *); +extern int sync_blockdev(struct block_device *bdev); +extern void kill_bdev(struct block_device *); +extern struct super_block *freeze_bdev(struct block_device *); +extern void emergency_thaw_all(void); +extern void emergency_thaw_bdev(struct super_block *sb); +extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); +extern int fsync_bdev(struct block_device *); + +extern struct super_block *blockdev_superblock; + +static inline bool sb_is_blkdev_sb(struct super_block *sb) +{ + return sb == blockdev_superblock; +} +#else +static inline void bd_forget(struct inode *inode) {} +static inline int sync_blockdev(struct block_device *bdev) { return 0; } +static inline void kill_bdev(struct block_device *bdev) {} +static inline void invalidate_bdev(struct block_device *bdev) {} + +static inline struct super_block *freeze_bdev(struct block_device *sb) +{ + return NULL; +} + +static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb) +{ + return 0; +} + +static inline int emergency_thaw_bdev(struct super_block *sb) +{ + return 0; +} + +static inline void iterate_bdevs(void (*f)(struct block_device *, void *), void *arg) +{ +} + +static inline bool sb_is_blkdev_sb(struct super_block *sb) +{ + return false; +} +#endif +extern int sync_filesystem(struct super_block *); +extern const struct file_operations def_blk_fops; +extern const struct file_operations def_chr_fops; +#ifdef CONFIG_BLOCK +extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long); +extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); +extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); +extern int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder); +extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, + void *holder); +extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, + void *holder); +extern void blkdev_put(struct block_device *bdev, fmode_t mode); +extern int __blkdev_reread_part(struct block_device *bdev); +extern int blkdev_reread_part(struct block_device *bdev); + +#ifdef CONFIG_SYSFS +extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); +extern void bd_unlink_disk_holder(struct block_device *bdev, + struct gendisk *disk); +#else +static inline int bd_link_disk_holder(struct block_device *bdev, + struct gendisk *disk) +{ + return 0; +} +static inline void bd_unlink_disk_holder(struct block_device *bdev, + struct gendisk *disk) +{ +} +#endif +#endif + +/* fs/char_dev.c */ +#define CHRDEV_MAJOR_MAX 512 +/* Marks the bottom of the first segment of free char majors */ +#define CHRDEV_MAJOR_DYN_END 234 +/* Marks the top and bottom of the second segment of free char majors */ +#define CHRDEV_MAJOR_DYN_EXT_START 511 +#define CHRDEV_MAJOR_DYN_EXT_END 384 + +extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *); +extern int register_chrdev_region(dev_t, unsigned, const char *); +extern int __register_chrdev(unsigned int major, unsigned int baseminor, + unsigned int count, const char *name, + const struct file_operations *fops); +extern void __unregister_chrdev(unsigned int major, unsigned int baseminor, + unsigned int count, const char *name); +extern void unregister_chrdev_region(dev_t, unsigned); +extern void chrdev_show(struct seq_file *,off_t); + +static inline int register_chrdev(unsigned int major, const char *name, + const struct file_operations *fops) +{ + return __register_chrdev(major, 0, 256, name, fops); +} + +static inline void unregister_chrdev(unsigned int major, const char *name) +{ + __unregister_chrdev(major, 0, 256, name); +} + +/* fs/block_dev.c */ +#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ +#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ + +#ifdef CONFIG_BLOCK +#define BLKDEV_MAJOR_MAX 512 +extern const char *__bdevname(dev_t, char *buffer); +extern const char *bdevname(struct block_device *bdev, char *buffer); +extern struct block_device *lookup_bdev(const char *); +extern void blkdev_show(struct seq_file *,off_t); + +#else +#define BLKDEV_MAJOR_MAX 0 +#endif + +extern void init_special_inode(struct inode *, umode_t, dev_t); + +/* Invalid inode operations -- fs/bad_inode.c */ +extern void make_bad_inode(struct inode *); +extern bool is_bad_inode(struct inode *); + +#ifdef CONFIG_BLOCK +extern void check_disk_size_change(struct gendisk *disk, + struct block_device *bdev, bool verbose); +extern int revalidate_disk(struct gendisk *); +extern int check_disk_change(struct block_device *); +extern int __invalidate_device(struct block_device *, bool); +extern int invalidate_partition(struct gendisk *, int); +#endif +unsigned long invalidate_mapping_pages(struct address_space *mapping, + pgoff_t start, pgoff_t end); + +static inline void invalidate_remote_inode(struct inode *inode) +{ + if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || + S_ISLNK(inode->i_mode)) + invalidate_mapping_pages(inode->i_mapping, 0, -1); +} +extern int invalidate_inode_pages2(struct address_space *mapping); +extern int invalidate_inode_pages2_range(struct address_space *mapping, + pgoff_t start, pgoff_t end); +extern int write_inode_now(struct inode *, int); +extern int filemap_fdatawrite(struct address_space *); +extern int filemap_flush(struct address_space *); +extern int filemap_fdatawait_keep_errors(struct address_space *mapping); +extern int filemap_fdatawait_range(struct address_space *, loff_t lstart, + loff_t lend); +extern int filemap_fdatawait_range_keep_errors(struct address_space *mapping, + loff_t start_byte, loff_t end_byte); + +static inline int filemap_fdatawait(struct address_space *mapping) +{ + return filemap_fdatawait_range(mapping, 0, LLONG_MAX); +} + +extern bool filemap_range_has_page(struct address_space *, loff_t lstart, + loff_t lend); +extern int filemap_write_and_wait(struct address_space *mapping); +extern int filemap_write_and_wait_range(struct address_space *mapping, + loff_t lstart, loff_t lend); +extern int __filemap_fdatawrite_range(struct address_space *mapping, + loff_t start, loff_t end, int sync_mode); +extern int filemap_fdatawrite_range(struct address_space *mapping, + loff_t start, loff_t end); +extern int filemap_check_errors(struct address_space *mapping); +extern void __filemap_set_wb_err(struct address_space *mapping, int err); + +extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart, + loff_t lend); +extern int __must_check file_check_and_advance_wb_err(struct file *file); +extern int __must_check file_write_and_wait_range(struct file *file, + loff_t start, loff_t end); + +static inline int file_write_and_wait(struct file *file) +{ + return file_write_and_wait_range(file, 0, LLONG_MAX); +} + +/** + * filemap_set_wb_err - set a writeback error on an address_space + * @mapping: mapping in which to set writeback error + * @err: error to be set in mapping + * + * When writeback fails in some way, we must record that error so that + * userspace can be informed when fsync and the like are called. We endeavor + * to report errors on any file that was open at the time of the error. Some + * internal callers also need to know when writeback errors have occurred. + * + * When a writeback error occurs, most filesystems will want to call + * filemap_set_wb_err to record the error in the mapping so that it will be + * automatically reported whenever fsync is called on the file. + */ +static inline void filemap_set_wb_err(struct address_space *mapping, int err) +{ + /* Fastpath for common case of no error */ + if (unlikely(err)) + __filemap_set_wb_err(mapping, err); +} + +/** + * filemap_check_wb_error - has an error occurred since the mark was sampled? + * @mapping: mapping to check for writeback errors + * @since: previously-sampled errseq_t + * + * Grab the errseq_t value from the mapping, and see if it has changed "since" + * the given value was sampled. + * + * If it has then report the latest error set, otherwise return 0. + */ +static inline int filemap_check_wb_err(struct address_space *mapping, + errseq_t since) +{ + return errseq_check(&mapping->wb_err, since); +} + +/** + * filemap_sample_wb_err - sample the current errseq_t to test for later errors + * @mapping: mapping to be sampled + * + * Writeback errors are always reported relative to a particular sample point + * in the past. This function provides those sample points. + */ +static inline errseq_t filemap_sample_wb_err(struct address_space *mapping) +{ + return errseq_sample(&mapping->wb_err); +} + +extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end, + int datasync); +extern int vfs_fsync(struct file *file, int datasync); + +/* + * Sync the bytes written if this was a synchronous write. Expect ki_pos + * to already be updated for the write, and will return either the amount + * of bytes passed in, or an error if syncing the file failed. + */ +static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count) +{ + if (iocb->ki_flags & IOCB_DSYNC) { + int ret = vfs_fsync_range(iocb->ki_filp, + iocb->ki_pos - count, iocb->ki_pos - 1, + (iocb->ki_flags & IOCB_SYNC) ? 0 : 1); + if (ret) + return ret; + } + + return count; +} + +extern void emergency_sync(void); +extern void emergency_remount(void); +#ifdef CONFIG_BLOCK +extern sector_t bmap(struct inode *, sector_t); +#endif +extern int notify_change(struct dentry *, struct iattr *, struct inode **); +extern int inode_permission(struct inode *, int); +extern int generic_permission(struct inode *, int); +extern int __check_sticky(struct inode *dir, struct inode *inode); + +static inline bool execute_ok(struct inode *inode) +{ + return (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode); +} + +static inline void file_start_write(struct file *file) +{ + if (!S_ISREG(file_inode(file)->i_mode)) + return; + __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, true); +} + +static inline bool file_start_write_trylock(struct file *file) +{ + if (!S_ISREG(file_inode(file)->i_mode)) + return true; + return __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, false); +} + +static inline void file_end_write(struct file *file) +{ + if (!S_ISREG(file_inode(file)->i_mode)) + return; + __sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE); +} + +/* + * get_write_access() gets write permission for a file. + * put_write_access() releases this write permission. + * This is used for regular files. + * We cannot support write (and maybe mmap read-write shared) accesses and + * MAP_DENYWRITE mmappings simultaneously. The i_writecount field of an inode + * can have the following values: + * 0: no writers, no VM_DENYWRITE mappings + * < 0: (-i_writecount) vm_area_structs with VM_DENYWRITE set exist + * > 0: (i_writecount) users are writing to the file. + * + * Normally we operate on that counter with atomic_{inc,dec} and it's safe + * except for the cases where we don't hold i_writecount yet. Then we need to + * use {get,deny}_write_access() - these functions check the sign and refuse + * to do the change if sign is wrong. + */ +static inline int get_write_access(struct inode *inode) +{ + return atomic_inc_unless_negative(&inode->i_writecount) ? 0 : -ETXTBSY; +} +static inline int deny_write_access(struct file *file) +{ + struct inode *inode = file_inode(file); + return atomic_dec_unless_positive(&inode->i_writecount) ? 0 : -ETXTBSY; +} +static inline void put_write_access(struct inode * inode) +{ + atomic_dec(&inode->i_writecount); +} +static inline void allow_write_access(struct file *file) +{ + if (file) + atomic_inc(&file_inode(file)->i_writecount); +} +static inline bool inode_is_open_for_write(const struct inode *inode) +{ + return atomic_read(&inode->i_writecount) > 0; +} + +#ifdef CONFIG_IMA +static inline void i_readcount_dec(struct inode *inode) +{ + BUG_ON(!atomic_read(&inode->i_readcount)); + atomic_dec(&inode->i_readcount); +} +static inline void i_readcount_inc(struct inode *inode) +{ + atomic_inc(&inode->i_readcount); +} +#else +static inline void i_readcount_dec(struct inode *inode) +{ + return; +} +static inline void i_readcount_inc(struct inode *inode) +{ + return; +} +#endif +extern int do_pipe_flags(int *, int); + +#define __kernel_read_file_id(id) \ + id(UNKNOWN, unknown) \ + id(FIRMWARE, firmware) \ + id(FIRMWARE_PREALLOC_BUFFER, firmware) \ + id(MODULE, kernel-module) \ + id(KEXEC_IMAGE, kexec-image) \ + id(KEXEC_INITRAMFS, kexec-initramfs) \ + id(POLICY, security-policy) \ + id(X509_CERTIFICATE, x509-certificate) \ + id(MAX_ID, ) + +#define __fid_enumify(ENUM, dummy) READING_ ## ENUM, +#define __fid_stringify(dummy, str) #str, + +enum kernel_read_file_id { + __kernel_read_file_id(__fid_enumify) +}; + +static const char * const kernel_read_file_str[] = { + __kernel_read_file_id(__fid_stringify) +}; + +static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id) +{ + if ((unsigned)id >= READING_MAX_ID) + return kernel_read_file_str[READING_UNKNOWN]; + + return kernel_read_file_str[id]; +} + +extern int kernel_read_file(struct file *, void **, loff_t *, loff_t, + enum kernel_read_file_id); +extern int kernel_read_file_from_path(const char *, void **, loff_t *, loff_t, + enum kernel_read_file_id); +extern int kernel_read_file_from_fd(int, void **, loff_t *, loff_t, + enum kernel_read_file_id); +extern ssize_t kernel_read(struct file *, void *, size_t, loff_t *); +extern ssize_t kernel_write(struct file *, const void *, size_t, loff_t *); +extern ssize_t __kernel_write(struct file *, const void *, size_t, loff_t *); +extern struct file * open_exec(const char *); + +/* fs/dcache.c -- generic fs support functions */ +extern bool is_subdir(struct dentry *, struct dentry *); +extern bool path_is_under(const struct path *, const struct path *); + +extern char *file_path(struct file *, char *, int); + +#include + +/* needed for stackable file system support */ +extern loff_t default_llseek(struct file *file, loff_t offset, int whence); + +extern loff_t vfs_llseek(struct file *file, loff_t offset, int whence); + +extern int inode_init_always(struct super_block *, struct inode *); +extern void inode_init_once(struct inode *); +extern void address_space_init_once(struct address_space *mapping); +extern struct inode * igrab(struct inode *); +extern ino_t iunique(struct super_block *, ino_t); +extern int inode_needs_sync(struct inode *inode); +extern int generic_delete_inode(struct inode *inode); +static inline int generic_drop_inode(struct inode *inode) +{ + return !inode->i_nlink || inode_unhashed(inode); +} + +extern struct inode *ilookup5_nowait(struct super_block *sb, + unsigned long hashval, int (*test)(struct inode *, void *), + void *data); +extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval, + int (*test)(struct inode *, void *), void *data); +extern struct inode *ilookup(struct super_block *sb, unsigned long ino); + +extern struct inode *inode_insert5(struct inode *inode, unsigned long hashval, + int (*test)(struct inode *, void *), + int (*set)(struct inode *, void *), + void *data); +extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *); +extern struct inode * iget_locked(struct super_block *, unsigned long); +extern struct inode *find_inode_nowait(struct super_block *, + unsigned long, + int (*match)(struct inode *, + unsigned long, void *), + void *data); +extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *); +extern int insert_inode_locked(struct inode *); +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern void lockdep_annotate_inode_mutex_key(struct inode *inode); +#else +static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { }; +#endif +extern void unlock_new_inode(struct inode *); +extern void discard_new_inode(struct inode *); +extern unsigned int get_next_ino(void); +extern void evict_inodes(struct super_block *sb); + +extern void __iget(struct inode * inode); +extern void iget_failed(struct inode *); +extern void clear_inode(struct inode *); +extern void __destroy_inode(struct inode *); +extern struct inode *new_inode_pseudo(struct super_block *sb); +extern struct inode *new_inode(struct super_block *sb); +extern void free_inode_nonrcu(struct inode *inode); +extern int should_remove_suid(struct dentry *); +extern int file_remove_privs(struct file *); + +extern void __insert_inode_hash(struct inode *, unsigned long hashval); +static inline void insert_inode_hash(struct inode *inode) +{ + __insert_inode_hash(inode, inode->i_ino); +} + +extern void __remove_inode_hash(struct inode *); +static inline void remove_inode_hash(struct inode *inode) +{ + if (!inode_unhashed(inode) && !hlist_fake(&inode->i_hash)) + __remove_inode_hash(inode); +} + +extern void inode_sb_list_add(struct inode *inode); + +#ifdef CONFIG_BLOCK +extern int bdev_read_only(struct block_device *); +#endif +extern int set_blocksize(struct block_device *, int); +extern int sb_set_blocksize(struct super_block *, int); +extern int sb_min_blocksize(struct super_block *, int); + +extern int generic_file_mmap(struct file *, struct vm_area_struct *); +extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); +extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *); +extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *); +extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *); +extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *); +extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *); +extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t); + +ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos, + rwf_t flags); +ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos, + rwf_t flags); + +/* fs/block_dev.c */ +extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to); +extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from); +extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end, + int datasync); +extern void block_sync_page(struct page *page); + +/* fs/splice.c */ +extern ssize_t generic_file_splice_read(struct file *, loff_t *, + struct pipe_inode_info *, size_t, unsigned int); +extern ssize_t iter_file_splice_write(struct pipe_inode_info *, + struct file *, loff_t *, size_t, unsigned int); +extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, + struct file *out, loff_t *, size_t len, unsigned int flags); +extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, + loff_t *opos, size_t len, unsigned int flags); + + +extern void +file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); +extern loff_t noop_llseek(struct file *file, loff_t offset, int whence); +extern loff_t no_llseek(struct file *file, loff_t offset, int whence); +extern loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize); +extern loff_t generic_file_llseek(struct file *file, loff_t offset, int whence); +extern loff_t generic_file_llseek_size(struct file *file, loff_t offset, + int whence, loff_t maxsize, loff_t eof); +extern loff_t fixed_size_llseek(struct file *file, loff_t offset, + int whence, loff_t size); +extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t); +extern loff_t no_seek_end_llseek(struct file *, loff_t, int); +extern int generic_file_open(struct inode * inode, struct file * filp); +extern int nonseekable_open(struct inode * inode, struct file * filp); +extern int stream_open(struct inode * inode, struct file * filp); + +#ifdef CONFIG_BLOCK +typedef void (dio_submit_t)(struct bio *bio, struct inode *inode, + loff_t file_offset); + +enum { + /* need locking between buffered and direct access */ + DIO_LOCKING = 0x01, + + /* filesystem does not support filling holes */ + DIO_SKIP_HOLES = 0x02, +}; + +void dio_end_io(struct bio *bio); +void dio_warn_stale_pagecache(struct file *filp); + +ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, + struct block_device *bdev, struct iov_iter *iter, + get_block_t get_block, + dio_iodone_t end_io, dio_submit_t submit_io, + int flags); + +static inline ssize_t blockdev_direct_IO(struct kiocb *iocb, + struct inode *inode, + struct iov_iter *iter, + get_block_t get_block) +{ + return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, + get_block, NULL, NULL, DIO_LOCKING | DIO_SKIP_HOLES); +} +#endif + +void inode_dio_wait(struct inode *inode); + +/* + * inode_dio_begin - signal start of a direct I/O requests + * @inode: inode the direct I/O happens on + * + * This is called once we've finished processing a direct I/O request, + * and is used to wake up callers waiting for direct I/O to be quiesced. + */ +static inline void inode_dio_begin(struct inode *inode) +{ + atomic_inc(&inode->i_dio_count); +} + +/* + * inode_dio_end - signal finish of a direct I/O requests + * @inode: inode the direct I/O happens on + * + * This is called once we've finished processing a direct I/O request, + * and is used to wake up callers waiting for direct I/O to be quiesced. + */ +static inline void inode_dio_end(struct inode *inode) +{ + if (atomic_dec_and_test(&inode->i_dio_count)) + wake_up_bit(&inode->i_state, __I_DIO_WAKEUP); +} + +extern void inode_set_flags(struct inode *inode, unsigned int flags, + unsigned int mask); + +extern const struct file_operations generic_ro_fops; + +#define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m)) + +extern int readlink_copy(char __user *, int, const char *); +extern int page_readlink(struct dentry *, char __user *, int); +extern const char *page_get_link(struct dentry *, struct inode *, + struct delayed_call *); +extern void page_put_link(void *); +extern int __page_symlink(struct inode *inode, const char *symname, int len, + int nofs); +extern int page_symlink(struct inode *inode, const char *symname, int len); +extern const struct inode_operations page_symlink_inode_operations; +extern void kfree_link(void *); +extern void generic_fillattr(struct inode *, struct kstat *); +extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int); +extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int); +void __inode_add_bytes(struct inode *inode, loff_t bytes); +void inode_add_bytes(struct inode *inode, loff_t bytes); +void __inode_sub_bytes(struct inode *inode, loff_t bytes); +void inode_sub_bytes(struct inode *inode, loff_t bytes); +static inline loff_t __inode_get_bytes(struct inode *inode) +{ + return (((loff_t)inode->i_blocks) << 9) + inode->i_bytes; +} +loff_t inode_get_bytes(struct inode *inode); +void inode_set_bytes(struct inode *inode, loff_t bytes); +const char *simple_get_link(struct dentry *, struct inode *, + struct delayed_call *); +extern const struct inode_operations simple_symlink_inode_operations; + +extern int iterate_dir(struct file *, struct dir_context *); + +extern int vfs_statx(int, const char __user *, int, struct kstat *, u32); +extern int vfs_statx_fd(unsigned int, struct kstat *, u32, unsigned int); + +static inline int vfs_stat(const char __user *filename, struct kstat *stat) +{ + return vfs_statx(AT_FDCWD, filename, AT_NO_AUTOMOUNT, + stat, STATX_BASIC_STATS); +} +static inline int vfs_lstat(const char __user *name, struct kstat *stat) +{ + return vfs_statx(AT_FDCWD, name, AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT, + stat, STATX_BASIC_STATS); +} +static inline int vfs_fstatat(int dfd, const char __user *filename, + struct kstat *stat, int flags) +{ + return vfs_statx(dfd, filename, flags | AT_NO_AUTOMOUNT, + stat, STATX_BASIC_STATS); +} +static inline int vfs_fstat(int fd, struct kstat *stat) +{ + return vfs_statx_fd(fd, stat, STATX_BASIC_STATS, 0); +} + + +extern const char *vfs_get_link(struct dentry *, struct delayed_call *); +extern int vfs_readlink(struct dentry *, char __user *, int); + +extern int __generic_block_fiemap(struct inode *inode, + struct fiemap_extent_info *fieinfo, + loff_t start, loff_t len, + get_block_t *get_block); +extern int generic_block_fiemap(struct inode *inode, + struct fiemap_extent_info *fieinfo, u64 start, + u64 len, get_block_t *get_block); + +extern struct file_system_type *get_filesystem(struct file_system_type *fs); +extern void put_filesystem(struct file_system_type *fs); +extern struct file_system_type *get_fs_type(const char *name); +extern struct super_block *get_super(struct block_device *); +extern struct super_block *get_super_thawed(struct block_device *); +extern struct super_block *get_super_exclusive_thawed(struct block_device *bdev); +extern struct super_block *get_active_super(struct block_device *bdev); +extern void drop_super(struct super_block *sb); +extern void drop_super_exclusive(struct super_block *sb); +extern void iterate_supers(void (*)(struct super_block *, void *), void *); +extern void iterate_supers_type(struct file_system_type *, + void (*)(struct super_block *, void *), void *); + +extern int dcache_dir_open(struct inode *, struct file *); +extern int dcache_dir_close(struct inode *, struct file *); +extern loff_t dcache_dir_lseek(struct file *, loff_t, int); +extern int dcache_readdir(struct file *, struct dir_context *); +extern int simple_setattr(struct dentry *, struct iattr *); +extern int simple_getattr(const struct path *, struct kstat *, u32, unsigned int); +extern int simple_statfs(struct dentry *, struct kstatfs *); +extern int simple_open(struct inode *inode, struct file *file); +extern int simple_link(struct dentry *, struct inode *, struct dentry *); +extern int simple_unlink(struct inode *, struct dentry *); +extern int simple_rmdir(struct inode *, struct dentry *); +extern int simple_rename(struct inode *, struct dentry *, + struct inode *, struct dentry *, unsigned int); +extern int noop_fsync(struct file *, loff_t, loff_t, int); +extern int noop_set_page_dirty(struct page *page); +extern void noop_invalidatepage(struct page *page, unsigned int offset, + unsigned int length); +extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter); +extern int simple_empty(struct dentry *); +extern int simple_readpage(struct file *file, struct page *page); +extern int simple_write_begin(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata); +extern int simple_write_end(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata); +extern int always_delete_dentry(const struct dentry *); +extern struct inode *alloc_anon_inode(struct super_block *); +extern int simple_nosetlease(struct file *, long, struct file_lock **, void **); +extern const struct dentry_operations simple_dentry_operations; + +extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags); +extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *); +extern const struct file_operations simple_dir_operations; +extern const struct inode_operations simple_dir_inode_operations; +extern void make_empty_dir_inode(struct inode *inode); +extern bool is_empty_dir_inode(struct inode *inode); +struct tree_descr { const char *name; const struct file_operations *ops; int mode; }; +struct dentry *d_alloc_name(struct dentry *, const char *); +extern int simple_fill_super(struct super_block *, unsigned long, + const struct tree_descr *); +extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count); +extern void simple_release_fs(struct vfsmount **mount, int *count); + +extern ssize_t simple_read_from_buffer(void __user *to, size_t count, + loff_t *ppos, const void *from, size_t available); +extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos, + const void __user *from, size_t count); + +extern int __generic_file_fsync(struct file *, loff_t, loff_t, int); +extern int generic_file_fsync(struct file *, loff_t, loff_t, int); + +extern int generic_check_addressable(unsigned, u64); + +#ifdef CONFIG_MIGRATION +extern int buffer_migrate_page(struct address_space *, + struct page *, struct page *, + enum migrate_mode); +#else +#define buffer_migrate_page NULL +#endif + +extern int setattr_prepare(struct dentry *, struct iattr *); +extern int inode_newsize_ok(const struct inode *, loff_t offset); +extern void setattr_copy(struct inode *inode, const struct iattr *attr); + +extern int file_update_time(struct file *file); + +static inline bool io_is_direct(struct file *filp) +{ + return (filp->f_flags & O_DIRECT) || IS_DAX(filp->f_mapping->host); +} + +static inline bool vma_is_dax(struct vm_area_struct *vma) +{ + return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host); +} + +static inline bool vma_is_fsdax(struct vm_area_struct *vma) +{ + struct inode *inode; + + if (!vma->vm_file) + return false; + if (!vma_is_dax(vma)) + return false; + inode = file_inode(vma->vm_file); + if (S_ISCHR(inode->i_mode)) + return false; /* device-dax */ + return true; +} + +static inline int iocb_flags(struct file *file) +{ + int res = 0; + if (file->f_flags & O_APPEND) + res |= IOCB_APPEND; + if (io_is_direct(file)) + res |= IOCB_DIRECT; + if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host)) + res |= IOCB_DSYNC; + if (file->f_flags & __O_SYNC) + res |= IOCB_SYNC; + return res; +} + +static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags) +{ + if (unlikely(flags & ~RWF_SUPPORTED)) + return -EOPNOTSUPP; + + if (flags & RWF_NOWAIT) { + if (!(ki->ki_filp->f_mode & FMODE_NOWAIT)) + return -EOPNOTSUPP; + ki->ki_flags |= IOCB_NOWAIT; + } + if (flags & RWF_HIPRI) + ki->ki_flags |= IOCB_HIPRI; + if (flags & RWF_DSYNC) + ki->ki_flags |= IOCB_DSYNC; + if (flags & RWF_SYNC) + ki->ki_flags |= (IOCB_DSYNC | IOCB_SYNC); + if (flags & RWF_APPEND) + ki->ki_flags |= IOCB_APPEND; + return 0; +} + +static inline ino_t parent_ino(struct dentry *dentry) +{ + ino_t res; + + /* + * Don't strictly need d_lock here? If the parent ino could change + * then surely we'd have a deeper race in the caller? + */ + spin_lock(&dentry->d_lock); + res = dentry->d_parent->d_inode->i_ino; + spin_unlock(&dentry->d_lock); + return res; +} + +/* Transaction based IO helpers */ + +/* + * An argresp is stored in an allocated page and holds the + * size of the argument or response, along with its content + */ +struct simple_transaction_argresp { + ssize_t size; + char data[0]; +}; + +#define SIMPLE_TRANSACTION_LIMIT (PAGE_SIZE - sizeof(struct simple_transaction_argresp)) + +char *simple_transaction_get(struct file *file, const char __user *buf, + size_t size); +ssize_t simple_transaction_read(struct file *file, char __user *buf, + size_t size, loff_t *pos); +int simple_transaction_release(struct inode *inode, struct file *file); + +void simple_transaction_set(struct file *file, size_t n); + +/* + * simple attribute files + * + * These attributes behave similar to those in sysfs: + * + * Writing to an attribute immediately sets a value, an open file can be + * written to multiple times. + * + * Reading from an attribute creates a buffer from the value that might get + * read with multiple read calls. When the attribute has been read + * completely, no further read calls are possible until the file is opened + * again. + * + * All attributes contain a text representation of a numeric value + * that are accessed with the get() and set() functions. + */ +#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \ +static int __fops ## _open(struct inode *inode, struct file *file) \ +{ \ + __simple_attr_check_format(__fmt, 0ull); \ + return simple_attr_open(inode, file, __get, __set, __fmt); \ +} \ +static const struct file_operations __fops = { \ + .owner = THIS_MODULE, \ + .open = __fops ## _open, \ + .release = simple_attr_release, \ + .read = simple_attr_read, \ + .write = simple_attr_write, \ + .llseek = generic_file_llseek, \ +} + +static inline __printf(1, 2) +void __simple_attr_check_format(const char *fmt, ...) +{ + /* don't do anything, just let the compiler check the arguments; */ +} + +int simple_attr_open(struct inode *inode, struct file *file, + int (*get)(void *, u64 *), int (*set)(void *, u64), + const char *fmt); +int simple_attr_release(struct inode *inode, struct file *file); +ssize_t simple_attr_read(struct file *file, char __user *buf, + size_t len, loff_t *ppos); +ssize_t simple_attr_write(struct file *file, const char __user *buf, + size_t len, loff_t *ppos); + +struct ctl_table; +int proc_nr_files(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); +int proc_nr_dentry(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); +int proc_nr_inodes(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); +int __init get_filesystem_list(char *buf); + +#define __FMODE_EXEC ((__force int) FMODE_EXEC) +#define __FMODE_NONOTIFY ((__force int) FMODE_NONOTIFY) + +#define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE]) +#define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \ + (flag & __FMODE_NONOTIFY))) + +static inline bool is_sxid(umode_t mode) +{ + return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP)); +} + +static inline int check_sticky(struct inode *dir, struct inode *inode) +{ + if (!(dir->i_mode & S_ISVTX)) + return 0; + + return __check_sticky(dir, inode); +} + +static inline void inode_has_no_xattr(struct inode *inode) +{ + if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & SB_NOSEC)) + inode->i_flags |= S_NOSEC; +} + +static inline bool is_root_inode(struct inode *inode) +{ + return inode == inode->i_sb->s_root->d_inode; +} + +static inline bool dir_emit(struct dir_context *ctx, + const char *name, int namelen, + u64 ino, unsigned type) +{ + return ctx->actor(ctx, name, namelen, ctx->pos, ino, type) == 0; +} +static inline bool dir_emit_dot(struct file *file, struct dir_context *ctx) +{ + return ctx->actor(ctx, ".", 1, ctx->pos, + file->f_path.dentry->d_inode->i_ino, DT_DIR) == 0; +} +static inline bool dir_emit_dotdot(struct file *file, struct dir_context *ctx) +{ + return ctx->actor(ctx, "..", 2, ctx->pos, + parent_ino(file->f_path.dentry), DT_DIR) == 0; +} +static inline bool dir_emit_dots(struct file *file, struct dir_context *ctx) +{ + if (ctx->pos == 0) { + if (!dir_emit_dot(file, ctx)) + return false; + ctx->pos = 1; + } + if (ctx->pos == 1) { + if (!dir_emit_dotdot(file, ctx)) + return false; + ctx->pos = 2; + } + return true; +} +static inline bool dir_relax(struct inode *inode) +{ + inode_unlock(inode); + inode_lock(inode); + return !IS_DEADDIR(inode); +} + +static inline bool dir_relax_shared(struct inode *inode) +{ + inode_unlock_shared(inode); + inode_lock_shared(inode); + return !IS_DEADDIR(inode); +} + +extern bool path_noexec(const struct path *path); +extern void inode_nohighmem(struct inode *inode); + +/* mm/fadvise.c */ +extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len, + int advice); + +#endif /* _LINUX_FS_H */ diff --git a/include/linux/fs_enet_pd.h b/include/linux/fs_enet_pd.h new file mode 100644 index 000000000..77d783f71 --- /dev/null +++ b/include/linux/fs_enet_pd.h @@ -0,0 +1,165 @@ +/* + * Platform information definitions for the + * universal Freescale Ethernet driver. + * + * Copyright (c) 2003 Intracom S.A. + * by Pantelis Antoniou + * + * 2005 (c) MontaVista Software, Inc. + * Vitaly Bordug + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef FS_ENET_PD_H +#define FS_ENET_PD_H + +#include +#include +#include +#include +#include + +#define FS_ENET_NAME "fs_enet" + +enum fs_id { + fsid_fec1, + fsid_fec2, + fsid_fcc1, + fsid_fcc2, + fsid_fcc3, + fsid_scc1, + fsid_scc2, + fsid_scc3, + fsid_scc4, +}; + +#define FS_MAX_INDEX 9 + +static inline int fs_get_fec_index(enum fs_id id) +{ + if (id >= fsid_fec1 && id <= fsid_fec2) + return id - fsid_fec1; + return -1; +} + +static inline int fs_get_fcc_index(enum fs_id id) +{ + if (id >= fsid_fcc1 && id <= fsid_fcc3) + return id - fsid_fcc1; + return -1; +} + +static inline int fs_get_scc_index(enum fs_id id) +{ + if (id >= fsid_scc1 && id <= fsid_scc4) + return id - fsid_scc1; + return -1; +} + +static inline int fs_fec_index2id(int index) +{ + int id = fsid_fec1 + index - 1; + if (id >= fsid_fec1 && id <= fsid_fec2) + return id; + return FS_MAX_INDEX; + } + +static inline int fs_fcc_index2id(int index) +{ + int id = fsid_fcc1 + index - 1; + if (id >= fsid_fcc1 && id <= fsid_fcc3) + return id; + return FS_MAX_INDEX; +} + +static inline int fs_scc_index2id(int index) +{ + int id = fsid_scc1 + index - 1; + if (id >= fsid_scc1 && id <= fsid_scc4) + return id; + return FS_MAX_INDEX; +} + +enum fs_mii_method { + fsmii_fixed, + fsmii_fec, + fsmii_bitbang, +}; + +enum fs_ioport { + fsiop_porta, + fsiop_portb, + fsiop_portc, + fsiop_portd, + fsiop_porte, +}; + +struct fs_mii_bit { + u32 offset; + u8 bit; + u8 polarity; +}; +struct fs_mii_bb_platform_info { + struct fs_mii_bit mdio_dir; + struct fs_mii_bit mdio_dat; + struct fs_mii_bit mdc_dat; + int delay; /* delay in us */ + int irq[32]; /* irqs per phy's */ +}; + +struct fs_platform_info { + + void(*init_ioports)(struct fs_platform_info *); + /* device specific information */ + int fs_no; /* controller index */ + char fs_type[4]; /* controller type */ + + u32 cp_page; /* CPM page */ + u32 cp_block; /* CPM sblock */ + u32 cp_command; /* CPM page/sblock/mcn */ + + u32 clk_trx; /* some stuff for pins & mux configuration*/ + u32 clk_rx; + u32 clk_tx; + u32 clk_route; + u32 clk_mask; + + u32 mem_offset; + u32 dpram_offset; + u32 fcc_regs_c; + + u32 device_flags; + + struct device_node *phy_node; + const struct fs_mii_bus_info *bus_info; + + int rx_ring, tx_ring; /* number of buffers on rx */ + __u8 macaddr[ETH_ALEN]; /* mac address */ + int rx_copybreak; /* limit we copy small frames */ + int napi_weight; /* NAPI weight */ + + int use_rmii; /* use RMII mode */ + int has_phy; /* if the network is phy container as well...*/ + + struct clk *clk_per; /* 'per' clock for register access */ +}; +struct fs_mii_fec_platform_info { + u32 irq[32]; + u32 mii_speed; +}; + +static inline int fs_get_id(struct fs_platform_info *fpi) +{ + if(strstr(fpi->fs_type, "SCC")) + return fs_scc_index2id(fpi->fs_no); + if(strstr(fpi->fs_type, "FCC")) + return fs_fcc_index2id(fpi->fs_no); + if(strstr(fpi->fs_type, "FEC")) + return fs_fec_index2id(fpi->fs_no); + return fpi->fs_no; +} + +#endif diff --git a/include/linux/fs_pin.h b/include/linux/fs_pin.h new file mode 100644 index 000000000..7cab74d66 --- /dev/null +++ b/include/linux/fs_pin.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include + +struct fs_pin { + wait_queue_head_t wait; + int done; + struct hlist_node s_list; + struct hlist_node m_list; + void (*kill)(struct fs_pin *); +}; + +struct vfsmount; + +static inline void init_fs_pin(struct fs_pin *p, void (*kill)(struct fs_pin *)) +{ + init_waitqueue_head(&p->wait); + INIT_HLIST_NODE(&p->s_list); + INIT_HLIST_NODE(&p->m_list); + p->kill = kill; +} + +void pin_remove(struct fs_pin *); +void pin_insert_group(struct fs_pin *, struct vfsmount *, struct hlist_head *); +void pin_insert(struct fs_pin *, struct vfsmount *); +void pin_kill(struct fs_pin *); diff --git a/include/linux/fs_stack.h b/include/linux/fs_stack.h new file mode 100644 index 000000000..54210a42c --- /dev/null +++ b/include/linux/fs_stack.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_FS_STACK_H +#define _LINUX_FS_STACK_H + +/* This file defines generic functions used primarily by stackable + * filesystems; none of these functions require i_mutex to be held. + */ + +#include + +/* externs for fs/stack.c */ +extern void fsstack_copy_attr_all(struct inode *dest, const struct inode *src); +extern void fsstack_copy_inode_size(struct inode *dst, struct inode *src); + +/* inlines */ +static inline void fsstack_copy_attr_atime(struct inode *dest, + const struct inode *src) +{ + dest->i_atime = src->i_atime; +} + +static inline void fsstack_copy_attr_times(struct inode *dest, + const struct inode *src) +{ + dest->i_atime = src->i_atime; + dest->i_mtime = src->i_mtime; + dest->i_ctime = src->i_ctime; +} + +#endif /* _LINUX_FS_STACK_H */ diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h new file mode 100644 index 000000000..cf1015abf --- /dev/null +++ b/include/linux/fs_struct.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_FS_STRUCT_H +#define _LINUX_FS_STRUCT_H + +#include +#include +#include + +struct fs_struct { + int users; + spinlock_t lock; + seqcount_t seq; + int umask; + int in_exec; + struct path root, pwd; +} __randomize_layout; + +extern struct kmem_cache *fs_cachep; + +extern void exit_fs(struct task_struct *); +extern void set_fs_root(struct fs_struct *, const struct path *); +extern void set_fs_pwd(struct fs_struct *, const struct path *); +extern struct fs_struct *copy_fs_struct(struct fs_struct *); +extern void free_fs_struct(struct fs_struct *); +extern int unshare_fs_struct(void); + +static inline void get_fs_root(struct fs_struct *fs, struct path *root) +{ + spin_lock(&fs->lock); + *root = fs->root; + path_get(root); + spin_unlock(&fs->lock); +} + +static inline void get_fs_pwd(struct fs_struct *fs, struct path *pwd) +{ + spin_lock(&fs->lock); + *pwd = fs->pwd; + path_get(pwd); + spin_unlock(&fs->lock); +} + +extern bool current_chrooted(void); + +#endif /* _LINUX_FS_STRUCT_H */ diff --git a/include/linux/fs_uart_pd.h b/include/linux/fs_uart_pd.h new file mode 100644 index 000000000..36b61ff39 --- /dev/null +++ b/include/linux/fs_uart_pd.h @@ -0,0 +1,71 @@ +/* + * Platform information definitions for the CPM Uart driver. + * + * 2006 (c) MontaVista Software, Inc. + * Vitaly Bordug + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef FS_UART_PD_H +#define FS_UART_PD_H + +#include + +enum fs_uart_id { + fsid_smc1_uart, + fsid_smc2_uart, + fsid_scc1_uart, + fsid_scc2_uart, + fsid_scc3_uart, + fsid_scc4_uart, + fs_uart_nr, +}; + +static inline int fs_uart_id_scc2fsid(int id) +{ + return fsid_scc1_uart + id - 1; +} + +static inline int fs_uart_id_fsid2scc(int id) +{ + return id - fsid_scc1_uart + 1; +} + +static inline int fs_uart_id_smc2fsid(int id) +{ + return fsid_smc1_uart + id - 1; +} + +static inline int fs_uart_id_fsid2smc(int id) +{ + return id - fsid_smc1_uart + 1; +} + +struct fs_uart_platform_info { + void(*init_ioports)(struct fs_uart_platform_info *); + /* device specific information */ + int fs_no; /* controller index */ + char fs_type[4]; /* controller type */ + u32 uart_clk; + u8 tx_num_fifo; + u8 tx_buf_size; + u8 rx_num_fifo; + u8 rx_buf_size; + u8 brg; + u8 clk_rx; + u8 clk_tx; +}; + +static inline int fs_uart_get_id(struct fs_uart_platform_info *fpi) +{ + if(strstr(fpi->fs_type, "SMC")) + return fs_uart_id_smc2fsid(fpi->fs_no); + if(strstr(fpi->fs_type, "SCC")) + return fs_uart_id_scc2fsid(fpi->fs_no); + return fpi->fs_no; +} + +#endif diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h new file mode 100644 index 000000000..610815e3f --- /dev/null +++ b/include/linux/fscache-cache.h @@ -0,0 +1,569 @@ +/* General filesystem caching backing cache interface + * + * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * NOTE!!! See: + * + * Documentation/filesystems/caching/backend-api.txt + * + * for a description of the cache backend interface declared here. + */ + +#ifndef _LINUX_FSCACHE_CACHE_H +#define _LINUX_FSCACHE_CACHE_H + +#include +#include +#include + +#define NR_MAXCACHES BITS_PER_LONG + +struct fscache_cache; +struct fscache_cache_ops; +struct fscache_object; +struct fscache_operation; + +enum fscache_obj_ref_trace { + fscache_obj_get_add_to_deps, + fscache_obj_get_queue, + fscache_obj_put_alloc_fail, + fscache_obj_put_attach_fail, + fscache_obj_put_drop_obj, + fscache_obj_put_enq_dep, + fscache_obj_put_queue, + fscache_obj_put_work, + fscache_obj_ref__nr_traces +}; + +/* + * cache tag definition + */ +struct fscache_cache_tag { + struct list_head link; + struct fscache_cache *cache; /* cache referred to by this tag */ + unsigned long flags; +#define FSCACHE_TAG_RESERVED 0 /* T if tag is reserved for a cache */ + atomic_t usage; + char name[0]; /* tag name */ +}; + +/* + * cache definition + */ +struct fscache_cache { + const struct fscache_cache_ops *ops; + struct fscache_cache_tag *tag; /* tag representing this cache */ + struct kobject *kobj; /* system representation of this cache */ + struct list_head link; /* link in list of caches */ + size_t max_index_size; /* maximum size of index data */ + char identifier[36]; /* cache label */ + + /* node management */ + struct work_struct op_gc; /* operation garbage collector */ + struct list_head object_list; /* list of data/index objects */ + struct list_head op_gc_list; /* list of ops to be deleted */ + spinlock_t object_list_lock; + spinlock_t op_gc_list_lock; + atomic_t object_count; /* no. of live objects in this cache */ + struct fscache_object *fsdef; /* object for the fsdef index */ + unsigned long flags; +#define FSCACHE_IOERROR 0 /* cache stopped on I/O error */ +#define FSCACHE_CACHE_WITHDRAWN 1 /* cache has been withdrawn */ +}; + +extern wait_queue_head_t fscache_cache_cleared_wq; + +/* + * operation to be applied to a cache object + * - retrieval initiation operations are done in the context of the process + * that issued them, and not in an async thread pool + */ +typedef void (*fscache_operation_release_t)(struct fscache_operation *op); +typedef void (*fscache_operation_processor_t)(struct fscache_operation *op); +typedef void (*fscache_operation_cancel_t)(struct fscache_operation *op); + +enum fscache_operation_state { + FSCACHE_OP_ST_BLANK, /* Op is not yet submitted */ + FSCACHE_OP_ST_INITIALISED, /* Op is initialised */ + FSCACHE_OP_ST_PENDING, /* Op is blocked from running */ + FSCACHE_OP_ST_IN_PROGRESS, /* Op is in progress */ + FSCACHE_OP_ST_COMPLETE, /* Op is complete */ + FSCACHE_OP_ST_CANCELLED, /* Op has been cancelled */ + FSCACHE_OP_ST_DEAD /* Op is now dead */ +}; + +struct fscache_operation { + struct work_struct work; /* record for async ops */ + struct list_head pend_link; /* link in object->pending_ops */ + struct fscache_object *object; /* object to be operated upon */ + + unsigned long flags; +#define FSCACHE_OP_TYPE 0x000f /* operation type */ +#define FSCACHE_OP_ASYNC 0x0001 /* - async op, processor may sleep for disk */ +#define FSCACHE_OP_MYTHREAD 0x0002 /* - processing is done be issuing thread, not pool */ +#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */ +#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */ +#define FSCACHE_OP_DEC_READ_CNT 6 /* decrement object->n_reads on destruction */ +#define FSCACHE_OP_UNUSE_COOKIE 7 /* call fscache_unuse_cookie() on completion */ +#define FSCACHE_OP_KEEP_FLAGS 0x00f0 /* flags to keep when repurposing an op */ + + enum fscache_operation_state state; + atomic_t usage; + unsigned debug_id; /* debugging ID */ + + /* operation processor callback + * - can be NULL if FSCACHE_OP_WAITING is going to be used to perform + * the op in a non-pool thread */ + fscache_operation_processor_t processor; + + /* Operation cancellation cleanup (optional) */ + fscache_operation_cancel_t cancel; + + /* operation releaser */ + fscache_operation_release_t release; +}; + +extern atomic_t fscache_op_debug_id; +extern void fscache_op_work_func(struct work_struct *work); + +extern void fscache_enqueue_operation(struct fscache_operation *); +extern void fscache_op_complete(struct fscache_operation *, bool); +extern void fscache_put_operation(struct fscache_operation *); +extern void fscache_operation_init(struct fscache_cookie *, + struct fscache_operation *, + fscache_operation_processor_t, + fscache_operation_cancel_t, + fscache_operation_release_t); + +/* + * data read operation + */ +struct fscache_retrieval { + struct fscache_operation op; + struct fscache_cookie *cookie; /* The netfs cookie */ + struct address_space *mapping; /* netfs pages */ + fscache_rw_complete_t end_io_func; /* function to call on I/O completion */ + void *context; /* netfs read context (pinned) */ + struct list_head to_do; /* list of things to be done by the backend */ + unsigned long start_time; /* time at which retrieval started */ + atomic_t n_pages; /* number of pages to be retrieved */ +}; + +typedef int (*fscache_page_retrieval_func_t)(struct fscache_retrieval *op, + struct page *page, + gfp_t gfp); + +typedef int (*fscache_pages_retrieval_func_t)(struct fscache_retrieval *op, + struct list_head *pages, + unsigned *nr_pages, + gfp_t gfp); + +/** + * fscache_get_retrieval - Get an extra reference on a retrieval operation + * @op: The retrieval operation to get a reference on + * + * Get an extra reference on a retrieval operation. + */ +static inline +struct fscache_retrieval *fscache_get_retrieval(struct fscache_retrieval *op) +{ + atomic_inc(&op->op.usage); + return op; +} + +/** + * fscache_enqueue_retrieval - Enqueue a retrieval operation for processing + * @op: The retrieval operation affected + * + * Enqueue a retrieval operation for processing by the FS-Cache thread pool. + */ +static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op) +{ + fscache_enqueue_operation(&op->op); +} + +/** + * fscache_retrieval_complete - Record (partial) completion of a retrieval + * @op: The retrieval operation affected + * @n_pages: The number of pages to account for + */ +static inline void fscache_retrieval_complete(struct fscache_retrieval *op, + int n_pages) +{ + if (atomic_sub_return_relaxed(n_pages, &op->n_pages) <= 0) + fscache_op_complete(&op->op, false); +} + +/** + * fscache_put_retrieval - Drop a reference to a retrieval operation + * @op: The retrieval operation affected + * + * Drop a reference to a retrieval operation. + */ +static inline void fscache_put_retrieval(struct fscache_retrieval *op) +{ + fscache_put_operation(&op->op); +} + +/* + * cached page storage work item + * - used to do three things: + * - batch writes to the cache + * - do cache writes asynchronously + * - defer writes until cache object lookup completion + */ +struct fscache_storage { + struct fscache_operation op; + pgoff_t store_limit; /* don't write more than this */ +}; + +/* + * cache operations + */ +struct fscache_cache_ops { + /* name of cache provider */ + const char *name; + + /* allocate an object record for a cookie */ + struct fscache_object *(*alloc_object)(struct fscache_cache *cache, + struct fscache_cookie *cookie); + + /* look up the object for a cookie + * - return -ETIMEDOUT to be requeued + */ + int (*lookup_object)(struct fscache_object *object); + + /* finished looking up */ + void (*lookup_complete)(struct fscache_object *object); + + /* increment the usage count on this object (may fail if unmounting) */ + struct fscache_object *(*grab_object)(struct fscache_object *object, + enum fscache_obj_ref_trace why); + + /* pin an object in the cache */ + int (*pin_object)(struct fscache_object *object); + + /* unpin an object in the cache */ + void (*unpin_object)(struct fscache_object *object); + + /* check the consistency between the backing cache and the FS-Cache + * cookie */ + int (*check_consistency)(struct fscache_operation *op); + + /* store the updated auxiliary data on an object */ + void (*update_object)(struct fscache_object *object); + + /* Invalidate an object */ + void (*invalidate_object)(struct fscache_operation *op); + + /* discard the resources pinned by an object and effect retirement if + * necessary */ + void (*drop_object)(struct fscache_object *object); + + /* dispose of a reference to an object */ + void (*put_object)(struct fscache_object *object, + enum fscache_obj_ref_trace why); + + /* sync a cache */ + void (*sync_cache)(struct fscache_cache *cache); + + /* notification that the attributes of a non-index object (such as + * i_size) have changed */ + int (*attr_changed)(struct fscache_object *object); + + /* reserve space for an object's data and associated metadata */ + int (*reserve_space)(struct fscache_object *object, loff_t i_size); + + /* request a backing block for a page be read or allocated in the + * cache */ + fscache_page_retrieval_func_t read_or_alloc_page; + + /* request backing blocks for a list of pages be read or allocated in + * the cache */ + fscache_pages_retrieval_func_t read_or_alloc_pages; + + /* request a backing block for a page be allocated in the cache so that + * it can be written directly */ + fscache_page_retrieval_func_t allocate_page; + + /* request backing blocks for pages be allocated in the cache so that + * they can be written directly */ + fscache_pages_retrieval_func_t allocate_pages; + + /* write a page to its backing block in the cache */ + int (*write_page)(struct fscache_storage *op, struct page *page); + + /* detach backing block from a page (optional) + * - must release the cookie lock before returning + * - may sleep + */ + void (*uncache_page)(struct fscache_object *object, + struct page *page); + + /* dissociate a cache from all the pages it was backing */ + void (*dissociate_pages)(struct fscache_cache *cache); +}; + +extern struct fscache_cookie fscache_fsdef_index; + +/* + * Event list for fscache_object::{event_mask,events} + */ +enum { + FSCACHE_OBJECT_EV_NEW_CHILD, /* T if object has a new child */ + FSCACHE_OBJECT_EV_PARENT_READY, /* T if object's parent is ready */ + FSCACHE_OBJECT_EV_UPDATE, /* T if object should be updated */ + FSCACHE_OBJECT_EV_INVALIDATE, /* T if cache requested object invalidation */ + FSCACHE_OBJECT_EV_CLEARED, /* T if accessors all gone */ + FSCACHE_OBJECT_EV_ERROR, /* T if fatal error occurred during processing */ + FSCACHE_OBJECT_EV_KILL, /* T if netfs relinquished or cache withdrew object */ + NR_FSCACHE_OBJECT_EVENTS +}; + +#define FSCACHE_OBJECT_EVENTS_MASK ((1UL << NR_FSCACHE_OBJECT_EVENTS) - 1) + +/* + * States for object state machine. + */ +struct fscache_transition { + unsigned long events; + const struct fscache_state *transit_to; +}; + +struct fscache_state { + char name[24]; + char short_name[8]; + const struct fscache_state *(*work)(struct fscache_object *object, + int event); + const struct fscache_transition transitions[]; +}; + +/* + * on-disk cache file or index handle + */ +struct fscache_object { + const struct fscache_state *state; /* Object state machine state */ + const struct fscache_transition *oob_table; /* OOB state transition table */ + int debug_id; /* debugging ID */ + int n_children; /* number of child objects */ + int n_ops; /* number of extant ops on object */ + int n_obj_ops; /* number of object ops outstanding on object */ + int n_in_progress; /* number of ops in progress */ + int n_exclusive; /* number of exclusive ops queued or in progress */ + atomic_t n_reads; /* number of read ops in progress */ + spinlock_t lock; /* state and operations lock */ + + unsigned long lookup_jif; /* time at which lookup started */ + unsigned long oob_event_mask; /* OOB events this object is interested in */ + unsigned long event_mask; /* events this object is interested in */ + unsigned long events; /* events to be processed by this object + * (order is important - using fls) */ + + unsigned long flags; +#define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */ +#define FSCACHE_OBJECT_PENDING_WRITE 1 /* T if object has pending write */ +#define FSCACHE_OBJECT_WAITING 2 /* T if object is waiting on its parent */ +#define FSCACHE_OBJECT_IS_LIVE 3 /* T if object is not withdrawn or relinquished */ +#define FSCACHE_OBJECT_IS_LOOKED_UP 4 /* T if object has been looked up */ +#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */ +#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */ +#define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */ +#define FSCACHE_OBJECT_RUN_AFTER_DEAD 8 /* T if object has been dispatched after death */ + + struct list_head cache_link; /* link in cache->object_list */ + struct hlist_node cookie_link; /* link in cookie->backing_objects */ + struct fscache_cache *cache; /* cache that supplied this object */ + struct fscache_cookie *cookie; /* netfs's file/index object */ + struct fscache_object *parent; /* parent object */ + struct work_struct work; /* attention scheduling record */ + struct list_head dependents; /* FIFO of dependent objects */ + struct list_head dep_link; /* link in parent's dependents list */ + struct list_head pending_ops; /* unstarted operations on this object */ +#ifdef CONFIG_FSCACHE_OBJECT_LIST + struct rb_node objlist_link; /* link in global object list */ +#endif + pgoff_t store_limit; /* current storage limit */ + loff_t store_limit_l; /* current storage limit */ +}; + +extern void fscache_object_init(struct fscache_object *, struct fscache_cookie *, + struct fscache_cache *); +extern void fscache_object_destroy(struct fscache_object *); + +extern void fscache_object_lookup_negative(struct fscache_object *object); +extern void fscache_obtained_object(struct fscache_object *object); + +static inline bool fscache_object_is_live(struct fscache_object *object) +{ + return test_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags); +} + +static inline bool fscache_object_is_dying(struct fscache_object *object) +{ + return !fscache_object_is_live(object); +} + +static inline bool fscache_object_is_available(struct fscache_object *object) +{ + return test_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags); +} + +static inline bool fscache_cache_is_broken(struct fscache_object *object) +{ + return test_bit(FSCACHE_IOERROR, &object->cache->flags); +} + +static inline bool fscache_object_is_active(struct fscache_object *object) +{ + return fscache_object_is_available(object) && + fscache_object_is_live(object) && + !fscache_cache_is_broken(object); +} + +/** + * fscache_object_destroyed - Note destruction of an object in a cache + * @cache: The cache from which the object came + * + * Note the destruction and deallocation of an object record in a cache. + */ +static inline void fscache_object_destroyed(struct fscache_cache *cache) +{ + if (atomic_dec_and_test(&cache->object_count)) + wake_up_all(&fscache_cache_cleared_wq); +} + +/** + * fscache_object_lookup_error - Note an object encountered an error + * @object: The object on which the error was encountered + * + * Note that an object encountered a fatal error (usually an I/O error) and + * that it should be withdrawn as soon as possible. + */ +static inline void fscache_object_lookup_error(struct fscache_object *object) +{ + set_bit(FSCACHE_OBJECT_EV_ERROR, &object->events); +} + +/** + * fscache_set_store_limit - Set the maximum size to be stored in an object + * @object: The object to set the maximum on + * @i_size: The limit to set in bytes + * + * Set the maximum size an object is permitted to reach, implying the highest + * byte that may be written. Intended to be called by the attr_changed() op. + * + * See Documentation/filesystems/caching/backend-api.txt for a complete + * description. + */ +static inline +void fscache_set_store_limit(struct fscache_object *object, loff_t i_size) +{ + object->store_limit_l = i_size; + object->store_limit = i_size >> PAGE_SHIFT; + if (i_size & ~PAGE_MASK) + object->store_limit++; +} + +/** + * fscache_end_io - End a retrieval operation on a page + * @op: The FS-Cache operation covering the retrieval + * @page: The page that was to be fetched + * @error: The error code (0 if successful) + * + * Note the end of an operation to retrieve a page, as covered by a particular + * operation record. + */ +static inline void fscache_end_io(struct fscache_retrieval *op, + struct page *page, int error) +{ + op->end_io_func(page, op->context, error); +} + +static inline void __fscache_use_cookie(struct fscache_cookie *cookie) +{ + atomic_inc(&cookie->n_active); +} + +/** + * fscache_use_cookie - Request usage of cookie attached to an object + * @object: Object description + * + * Request usage of the cookie attached to an object. NULL is returned if the + * relinquishment had reduced the cookie usage count to 0. + */ +static inline bool fscache_use_cookie(struct fscache_object *object) +{ + struct fscache_cookie *cookie = object->cookie; + return atomic_inc_not_zero(&cookie->n_active) != 0; +} + +static inline bool __fscache_unuse_cookie(struct fscache_cookie *cookie) +{ + return atomic_dec_and_test(&cookie->n_active); +} + +static inline void __fscache_wake_unused_cookie(struct fscache_cookie *cookie) +{ + wake_up_var(&cookie->n_active); +} + +/** + * fscache_unuse_cookie - Cease usage of cookie attached to an object + * @object: Object description + * + * Cease usage of the cookie attached to an object. When the users count + * reaches zero then the cookie relinquishment will be permitted to proceed. + */ +static inline void fscache_unuse_cookie(struct fscache_object *object) +{ + struct fscache_cookie *cookie = object->cookie; + if (__fscache_unuse_cookie(cookie)) + __fscache_wake_unused_cookie(cookie); +} + +/* + * out-of-line cache backend functions + */ +extern __printf(3, 4) +void fscache_init_cache(struct fscache_cache *cache, + const struct fscache_cache_ops *ops, + const char *idfmt, ...); + +extern int fscache_add_cache(struct fscache_cache *cache, + struct fscache_object *fsdef, + const char *tagname); +extern void fscache_withdraw_cache(struct fscache_cache *cache); + +extern void fscache_io_error(struct fscache_cache *cache); + +extern void fscache_mark_page_cached(struct fscache_retrieval *op, + struct page *page); + +extern void fscache_mark_pages_cached(struct fscache_retrieval *op, + struct pagevec *pagevec); + +extern bool fscache_object_sleep_till_congested(signed long *timeoutp); + +extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object, + const void *data, + uint16_t datalen, + loff_t object_size); + +extern void fscache_object_retrying_stale(struct fscache_object *object); + +enum fscache_why_object_killed { + FSCACHE_OBJECT_IS_STALE, + FSCACHE_OBJECT_NO_SPACE, + FSCACHE_OBJECT_WAS_RETIRED, + FSCACHE_OBJECT_WAS_CULLED, +}; +extern void fscache_object_mark_killed(struct fscache_object *object, + enum fscache_why_object_killed why); + +#endif /* _LINUX_FSCACHE_CACHE_H */ diff --git a/include/linux/fscache.h b/include/linux/fscache.h new file mode 100644 index 000000000..84b90a79d --- /dev/null +++ b/include/linux/fscache.h @@ -0,0 +1,847 @@ +/* General filesystem caching interface + * + * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * NOTE!!! See: + * + * Documentation/filesystems/caching/netfs-api.txt + * + * for a description of the network filesystem interface declared here. + */ + +#ifndef _LINUX_FSCACHE_H +#define _LINUX_FSCACHE_H + +#include +#include +#include +#include +#include + +#if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE) +#define fscache_available() (1) +#define fscache_cookie_valid(cookie) (cookie) +#else +#define fscache_available() (0) +#define fscache_cookie_valid(cookie) (0) +#endif + + +/* + * overload PG_private_2 to give us PG_fscache - this is used to indicate that + * a page is currently backed by a local disk cache + */ +#define PageFsCache(page) PagePrivate2((page)) +#define SetPageFsCache(page) SetPagePrivate2((page)) +#define ClearPageFsCache(page) ClearPagePrivate2((page)) +#define TestSetPageFsCache(page) TestSetPagePrivate2((page)) +#define TestClearPageFsCache(page) TestClearPagePrivate2((page)) + +/* pattern used to fill dead space in an index entry */ +#define FSCACHE_INDEX_DEADFILL_PATTERN 0x79 + +struct pagevec; +struct fscache_cache_tag; +struct fscache_cookie; +struct fscache_netfs; + +typedef void (*fscache_rw_complete_t)(struct page *page, + void *context, + int error); + +/* result of index entry consultation */ +enum fscache_checkaux { + FSCACHE_CHECKAUX_OKAY, /* entry okay as is */ + FSCACHE_CHECKAUX_NEEDS_UPDATE, /* entry requires update */ + FSCACHE_CHECKAUX_OBSOLETE, /* entry requires deletion */ +}; + +/* + * fscache cookie definition + */ +struct fscache_cookie_def { + /* name of cookie type */ + char name[16]; + + /* cookie type */ + uint8_t type; +#define FSCACHE_COOKIE_TYPE_INDEX 0 +#define FSCACHE_COOKIE_TYPE_DATAFILE 1 + + /* select the cache into which to insert an entry in this index + * - optional + * - should return a cache identifier or NULL to cause the cache to be + * inherited from the parent if possible or the first cache picked + * for a non-index file if not + */ + struct fscache_cache_tag *(*select_cache)( + const void *parent_netfs_data, + const void *cookie_netfs_data); + + /* consult the netfs about the state of an object + * - this function can be absent if the index carries no state data + * - the netfs data from the cookie being used as the target is + * presented, as is the auxiliary data and the object size + */ + enum fscache_checkaux (*check_aux)(void *cookie_netfs_data, + const void *data, + uint16_t datalen, + loff_t object_size); + + /* get an extra reference on a read context + * - this function can be absent if the completion function doesn't + * require a context + */ + void (*get_context)(void *cookie_netfs_data, void *context); + + /* release an extra reference on a read context + * - this function can be absent if the completion function doesn't + * require a context + */ + void (*put_context)(void *cookie_netfs_data, void *context); + + /* indicate page that now have cache metadata retained + * - this function should mark the specified page as now being cached + * - the page will have been marked with PG_fscache before this is + * called, so this is optional + */ + void (*mark_page_cached)(void *cookie_netfs_data, + struct address_space *mapping, + struct page *page); +}; + +/* + * fscache cached network filesystem type + * - name, version and ops must be filled in before registration + * - all other fields will be set during registration + */ +struct fscache_netfs { + uint32_t version; /* indexing version */ + const char *name; /* filesystem name */ + struct fscache_cookie *primary_index; +}; + +/* + * data file or index object cookie + * - a file will only appear in one cache + * - a request to cache a file may or may not be honoured, subject to + * constraints such as disk space + * - indices are created on disk just-in-time + */ +struct fscache_cookie { + atomic_t usage; /* number of users of this cookie */ + atomic_t n_children; /* number of children of this cookie */ + atomic_t n_active; /* number of active users of netfs ptrs */ + spinlock_t lock; + spinlock_t stores_lock; /* lock on page store tree */ + struct hlist_head backing_objects; /* object(s) backing this file/index */ + const struct fscache_cookie_def *def; /* definition */ + struct fscache_cookie *parent; /* parent of this entry */ + struct hlist_bl_node hash_link; /* Link in hash table */ + void *netfs_data; /* back pointer to netfs */ + struct radix_tree_root stores; /* pages to be stored on this cookie */ +#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */ +#define FSCACHE_COOKIE_STORING_TAG 1 /* pages tag: writing to cache */ + + unsigned long flags; +#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */ +#define FSCACHE_COOKIE_NO_DATA_YET 1 /* T if new object with no cached data yet */ +#define FSCACHE_COOKIE_UNAVAILABLE 2 /* T if cookie is unavailable (error, etc) */ +#define FSCACHE_COOKIE_INVALIDATING 3 /* T if cookie is being invalidated */ +#define FSCACHE_COOKIE_RELINQUISHED 4 /* T if cookie has been relinquished */ +#define FSCACHE_COOKIE_ENABLED 5 /* T if cookie is enabled */ +#define FSCACHE_COOKIE_ENABLEMENT_LOCK 6 /* T if cookie is being en/disabled */ +#define FSCACHE_COOKIE_AUX_UPDATED 8 /* T if the auxiliary data was updated */ +#define FSCACHE_COOKIE_ACQUIRED 9 /* T if cookie is in use */ +#define FSCACHE_COOKIE_RELINQUISHING 10 /* T if cookie is being relinquished */ + + u8 type; /* Type of object */ + u8 key_len; /* Length of index key */ + u8 aux_len; /* Length of auxiliary data */ + u32 key_hash; /* Hash of parent, type, key, len */ + union { + void *key; /* Index key */ + u8 inline_key[16]; /* - If the key is short enough */ + }; + union { + void *aux; /* Auxiliary data */ + u8 inline_aux[8]; /* - If the aux data is short enough */ + }; +}; + +static inline bool fscache_cookie_enabled(struct fscache_cookie *cookie) +{ + return test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags); +} + +/* + * slow-path functions for when there is actually caching available, and the + * netfs does actually have a valid token + * - these are not to be called directly + * - these are undefined symbols when FS-Cache is not configured and the + * optimiser takes care of not using them + */ +extern int __fscache_register_netfs(struct fscache_netfs *); +extern void __fscache_unregister_netfs(struct fscache_netfs *); +extern struct fscache_cache_tag *__fscache_lookup_cache_tag(const char *); +extern void __fscache_release_cache_tag(struct fscache_cache_tag *); + +extern struct fscache_cookie *__fscache_acquire_cookie( + struct fscache_cookie *, + const struct fscache_cookie_def *, + const void *, size_t, + const void *, size_t, + void *, loff_t, bool); +extern void __fscache_relinquish_cookie(struct fscache_cookie *, const void *, bool); +extern int __fscache_check_consistency(struct fscache_cookie *, const void *); +extern void __fscache_update_cookie(struct fscache_cookie *, const void *); +extern int __fscache_attr_changed(struct fscache_cookie *); +extern void __fscache_invalidate(struct fscache_cookie *); +extern void __fscache_wait_on_invalidate(struct fscache_cookie *); +extern int __fscache_read_or_alloc_page(struct fscache_cookie *, + struct page *, + fscache_rw_complete_t, + void *, + gfp_t); +extern int __fscache_read_or_alloc_pages(struct fscache_cookie *, + struct address_space *, + struct list_head *, + unsigned *, + fscache_rw_complete_t, + void *, + gfp_t); +extern int __fscache_alloc_page(struct fscache_cookie *, struct page *, gfp_t); +extern int __fscache_write_page(struct fscache_cookie *, struct page *, loff_t, gfp_t); +extern void __fscache_uncache_page(struct fscache_cookie *, struct page *); +extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *); +extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *); +extern bool __fscache_maybe_release_page(struct fscache_cookie *, struct page *, + gfp_t); +extern void __fscache_uncache_all_inode_pages(struct fscache_cookie *, + struct inode *); +extern void __fscache_readpages_cancel(struct fscache_cookie *cookie, + struct list_head *pages); +extern void __fscache_disable_cookie(struct fscache_cookie *, const void *, bool); +extern void __fscache_enable_cookie(struct fscache_cookie *, const void *, loff_t, + bool (*)(void *), void *); + +/** + * fscache_register_netfs - Register a filesystem as desiring caching services + * @netfs: The description of the filesystem + * + * Register a filesystem as desiring caching services if they're available. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_register_netfs(struct fscache_netfs *netfs) +{ + if (fscache_available()) + return __fscache_register_netfs(netfs); + else + return 0; +} + +/** + * fscache_unregister_netfs - Indicate that a filesystem no longer desires + * caching services + * @netfs: The description of the filesystem + * + * Indicate that a filesystem no longer desires caching services for the + * moment. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_unregister_netfs(struct fscache_netfs *netfs) +{ + if (fscache_available()) + __fscache_unregister_netfs(netfs); +} + +/** + * fscache_lookup_cache_tag - Look up a cache tag + * @name: The name of the tag to search for + * + * Acquire a specific cache referral tag that can be used to select a specific + * cache in which to cache an index. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +struct fscache_cache_tag *fscache_lookup_cache_tag(const char *name) +{ + if (fscache_available()) + return __fscache_lookup_cache_tag(name); + else + return NULL; +} + +/** + * fscache_release_cache_tag - Release a cache tag + * @tag: The tag to release + * + * Release a reference to a cache referral tag previously looked up. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_release_cache_tag(struct fscache_cache_tag *tag) +{ + if (fscache_available()) + __fscache_release_cache_tag(tag); +} + +/** + * fscache_acquire_cookie - Acquire a cookie to represent a cache object + * @parent: The cookie that's to be the parent of this one + * @def: A description of the cache object, including callback operations + * @index_key: The index key for this cookie + * @index_key_len: Size of the index key + * @aux_data: The auxiliary data for the cookie (may be NULL) + * @aux_data_len: Size of the auxiliary data buffer + * @netfs_data: An arbitrary piece of data to be kept in the cookie to + * represent the cache object to the netfs + * @object_size: The initial size of object + * @enable: Whether or not to enable a data cookie immediately + * + * This function is used to inform FS-Cache about part of an index hierarchy + * that can be used to locate files. This is done by requesting a cookie for + * each index in the path to the file. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +struct fscache_cookie *fscache_acquire_cookie( + struct fscache_cookie *parent, + const struct fscache_cookie_def *def, + const void *index_key, + size_t index_key_len, + const void *aux_data, + size_t aux_data_len, + void *netfs_data, + loff_t object_size, + bool enable) +{ + if (fscache_cookie_valid(parent) && fscache_cookie_enabled(parent)) + return __fscache_acquire_cookie(parent, def, + index_key, index_key_len, + aux_data, aux_data_len, + netfs_data, object_size, enable); + else + return NULL; +} + +/** + * fscache_relinquish_cookie - Return the cookie to the cache, maybe discarding + * it + * @cookie: The cookie being returned + * @aux_data: The updated auxiliary data for the cookie (may be NULL) + * @retire: True if the cache object the cookie represents is to be discarded + * + * This function returns a cookie to the cache, forcibly discarding the + * associated cache object if retire is set to true. The opportunity is + * provided to update the auxiliary data in the cache before the object is + * disconnected. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_relinquish_cookie(struct fscache_cookie *cookie, + const void *aux_data, + bool retire) +{ + if (fscache_cookie_valid(cookie)) + __fscache_relinquish_cookie(cookie, aux_data, retire); +} + +/** + * fscache_check_consistency - Request validation of a cache's auxiliary data + * @cookie: The cookie representing the cache object + * @aux_data: The updated auxiliary data for the cookie (may be NULL) + * + * Request an consistency check from fscache, which passes the request to the + * backing cache. The auxiliary data on the cookie will be updated first if + * @aux_data is set. + * + * Returns 0 if consistent and -ESTALE if inconsistent. May also + * return -ENOMEM and -ERESTARTSYS. + */ +static inline +int fscache_check_consistency(struct fscache_cookie *cookie, + const void *aux_data) +{ + if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie)) + return __fscache_check_consistency(cookie, aux_data); + else + return 0; +} + +/** + * fscache_update_cookie - Request that a cache object be updated + * @cookie: The cookie representing the cache object + * @aux_data: The updated auxiliary data for the cookie (may be NULL) + * + * Request an update of the index data for the cache object associated with the + * cookie. The auxiliary data on the cookie will be updated first if @aux_data + * is set. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_update_cookie(struct fscache_cookie *cookie, const void *aux_data) +{ + if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie)) + __fscache_update_cookie(cookie, aux_data); +} + +/** + * fscache_pin_cookie - Pin a data-storage cache object in its cache + * @cookie: The cookie representing the cache object + * + * Permit data-storage cache objects to be pinned in the cache. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_pin_cookie(struct fscache_cookie *cookie) +{ + return -ENOBUFS; +} + +/** + * fscache_pin_cookie - Unpin a data-storage cache object in its cache + * @cookie: The cookie representing the cache object + * + * Permit data-storage cache objects to be unpinned from the cache. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_unpin_cookie(struct fscache_cookie *cookie) +{ +} + +/** + * fscache_attr_changed - Notify cache that an object's attributes changed + * @cookie: The cookie representing the cache object + * + * Send a notification to the cache indicating that an object's attributes have + * changed. This includes the data size. These attributes will be obtained + * through the get_attr() cookie definition op. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_attr_changed(struct fscache_cookie *cookie) +{ + if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie)) + return __fscache_attr_changed(cookie); + else + return -ENOBUFS; +} + +/** + * fscache_invalidate - Notify cache that an object needs invalidation + * @cookie: The cookie representing the cache object + * + * Notify the cache that an object is needs to be invalidated and that it + * should abort any retrievals or stores it is doing on the cache. The object + * is then marked non-caching until such time as the invalidation is complete. + * + * This can be called with spinlocks held. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_invalidate(struct fscache_cookie *cookie) +{ + if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie)) + __fscache_invalidate(cookie); +} + +/** + * fscache_wait_on_invalidate - Wait for invalidation to complete + * @cookie: The cookie representing the cache object + * + * Wait for the invalidation of an object to complete. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_wait_on_invalidate(struct fscache_cookie *cookie) +{ + if (fscache_cookie_valid(cookie)) + __fscache_wait_on_invalidate(cookie); +} + +/** + * fscache_reserve_space - Reserve data space for a cached object + * @cookie: The cookie representing the cache object + * @i_size: The amount of space to be reserved + * + * Reserve an amount of space in the cache for the cache object attached to a + * cookie so that a write to that object within the space can always be + * honoured. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_reserve_space(struct fscache_cookie *cookie, loff_t size) +{ + return -ENOBUFS; +} + +/** + * fscache_read_or_alloc_page - Read a page from the cache or allocate a block + * in which to store it + * @cookie: The cookie representing the cache object + * @page: The netfs page to fill if possible + * @end_io_func: The callback to invoke when and if the page is filled + * @context: An arbitrary piece of data to pass on to end_io_func() + * @gfp: The conditions under which memory allocation should be made + * + * Read a page from the cache, or if that's not possible make a potential + * one-block reservation in the cache into which the page may be stored once + * fetched from the server. + * + * If the page is not backed by the cache object, or if it there's some reason + * it can't be, -ENOBUFS will be returned and nothing more will be done for + * that page. + * + * Else, if that page is backed by the cache, a read will be initiated directly + * to the netfs's page and 0 will be returned by this function. The + * end_io_func() callback will be invoked when the operation terminates on a + * completion or failure. Note that the callback may be invoked before the + * return. + * + * Else, if the page is unbacked, -ENODATA is returned and a block may have + * been allocated in the cache. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_read_or_alloc_page(struct fscache_cookie *cookie, + struct page *page, + fscache_rw_complete_t end_io_func, + void *context, + gfp_t gfp) +{ + if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie)) + return __fscache_read_or_alloc_page(cookie, page, end_io_func, + context, gfp); + else + return -ENOBUFS; +} + +/** + * fscache_read_or_alloc_pages - Read pages from the cache and/or allocate + * blocks in which to store them + * @cookie: The cookie representing the cache object + * @mapping: The netfs inode mapping to which the pages will be attached + * @pages: A list of potential netfs pages to be filled + * @nr_pages: Number of pages to be read and/or allocated + * @end_io_func: The callback to invoke when and if each page is filled + * @context: An arbitrary piece of data to pass on to end_io_func() + * @gfp: The conditions under which memory allocation should be made + * + * Read a set of pages from the cache, or if that's not possible, attempt to + * make a potential one-block reservation for each page in the cache into which + * that page may be stored once fetched from the server. + * + * If some pages are not backed by the cache object, or if it there's some + * reason they can't be, -ENOBUFS will be returned and nothing more will be + * done for that pages. + * + * Else, if some of the pages are backed by the cache, a read will be initiated + * directly to the netfs's page and 0 will be returned by this function. The + * end_io_func() callback will be invoked when the operation terminates on a + * completion or failure. Note that the callback may be invoked before the + * return. + * + * Else, if a page is unbacked, -ENODATA is returned and a block may have + * been allocated in the cache. + * + * Because the function may want to return all of -ENOBUFS, -ENODATA and 0 in + * regard to different pages, the return values are prioritised in that order. + * Any pages submitted for reading are removed from the pages list. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_read_or_alloc_pages(struct fscache_cookie *cookie, + struct address_space *mapping, + struct list_head *pages, + unsigned *nr_pages, + fscache_rw_complete_t end_io_func, + void *context, + gfp_t gfp) +{ + if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie)) + return __fscache_read_or_alloc_pages(cookie, mapping, pages, + nr_pages, end_io_func, + context, gfp); + else + return -ENOBUFS; +} + +/** + * fscache_alloc_page - Allocate a block in which to store a page + * @cookie: The cookie representing the cache object + * @page: The netfs page to allocate a page for + * @gfp: The conditions under which memory allocation should be made + * + * Request Allocation a block in the cache in which to store a netfs page + * without retrieving any contents from the cache. + * + * If the page is not backed by a file then -ENOBUFS will be returned and + * nothing more will be done, and no reservation will be made. + * + * Else, a block will be allocated if one wasn't already, and 0 will be + * returned + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_alloc_page(struct fscache_cookie *cookie, + struct page *page, + gfp_t gfp) +{ + if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie)) + return __fscache_alloc_page(cookie, page, gfp); + else + return -ENOBUFS; +} + +/** + * fscache_readpages_cancel - Cancel read/alloc on pages + * @cookie: The cookie representing the inode's cache object. + * @pages: The netfs pages that we canceled write on in readpages() + * + * Uncache/unreserve the pages reserved earlier in readpages() via + * fscache_readpages_or_alloc() and similar. In most successful caches in + * readpages() this doesn't do anything. In cases when the underlying netfs's + * readahead failed we need to clean up the pagelist (unmark and uncache). + * + * This function may sleep as it may have to clean up disk state. + */ +static inline +void fscache_readpages_cancel(struct fscache_cookie *cookie, + struct list_head *pages) +{ + if (fscache_cookie_valid(cookie)) + __fscache_readpages_cancel(cookie, pages); +} + +/** + * fscache_write_page - Request storage of a page in the cache + * @cookie: The cookie representing the cache object + * @page: The netfs page to store + * @object_size: Updated size of object + * @gfp: The conditions under which memory allocation should be made + * + * Request the contents of the netfs page be written into the cache. This + * request may be ignored if no cache block is currently allocated, in which + * case it will return -ENOBUFS. + * + * If a cache block was already allocated, a write will be initiated and 0 will + * be returned. The PG_fscache_write page bit is set immediately and will then + * be cleared at the completion of the write to indicate the success or failure + * of the operation. Note that the completion may happen before the return. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_write_page(struct fscache_cookie *cookie, + struct page *page, + loff_t object_size, + gfp_t gfp) +{ + if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie)) + return __fscache_write_page(cookie, page, object_size, gfp); + else + return -ENOBUFS; +} + +/** + * fscache_uncache_page - Indicate that caching is no longer required on a page + * @cookie: The cookie representing the cache object + * @page: The netfs page that was being cached. + * + * Tell the cache that we no longer want a page to be cached and that it should + * remove any knowledge of the netfs page it may have. + * + * Note that this cannot cancel any outstanding I/O operations between this + * page and the cache. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_uncache_page(struct fscache_cookie *cookie, + struct page *page) +{ + if (fscache_cookie_valid(cookie)) + __fscache_uncache_page(cookie, page); +} + +/** + * fscache_check_page_write - Ask if a page is being writing to the cache + * @cookie: The cookie representing the cache object + * @page: The netfs page that is being cached. + * + * Ask the cache if a page is being written to the cache. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +bool fscache_check_page_write(struct fscache_cookie *cookie, + struct page *page) +{ + if (fscache_cookie_valid(cookie)) + return __fscache_check_page_write(cookie, page); + return false; +} + +/** + * fscache_wait_on_page_write - Wait for a page to complete writing to the cache + * @cookie: The cookie representing the cache object + * @page: The netfs page that is being cached. + * + * Ask the cache to wake us up when a page is no longer being written to the + * cache. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_wait_on_page_write(struct fscache_cookie *cookie, + struct page *page) +{ + if (fscache_cookie_valid(cookie)) + __fscache_wait_on_page_write(cookie, page); +} + +/** + * fscache_maybe_release_page - Consider releasing a page, cancelling a store + * @cookie: The cookie representing the cache object + * @page: The netfs page that is being cached. + * @gfp: The gfp flags passed to releasepage() + * + * Consider releasing a page for the vmscan algorithm, on behalf of the netfs's + * releasepage() call. A storage request on the page may cancelled if it is + * not currently being processed. + * + * The function returns true if the page no longer has a storage request on it, + * and false if a storage request is left in place. If true is returned, the + * page will have been passed to fscache_uncache_page(). If false is returned + * the page cannot be freed yet. + */ +static inline +bool fscache_maybe_release_page(struct fscache_cookie *cookie, + struct page *page, + gfp_t gfp) +{ + if (fscache_cookie_valid(cookie) && PageFsCache(page)) + return __fscache_maybe_release_page(cookie, page, gfp); + return true; +} + +/** + * fscache_uncache_all_inode_pages - Uncache all an inode's pages + * @cookie: The cookie representing the inode's cache object. + * @inode: The inode to uncache pages from. + * + * Uncache all the pages in an inode that are marked PG_fscache, assuming them + * to be associated with the given cookie. + * + * This function may sleep. It will wait for pages that are being written out + * and will wait whilst the PG_fscache mark is removed by the cache. + */ +static inline +void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie, + struct inode *inode) +{ + if (fscache_cookie_valid(cookie)) + __fscache_uncache_all_inode_pages(cookie, inode); +} + +/** + * fscache_disable_cookie - Disable a cookie + * @cookie: The cookie representing the cache object + * @aux_data: The updated auxiliary data for the cookie (may be NULL) + * @invalidate: Invalidate the backing object + * + * Disable a cookie from accepting further alloc, read, write, invalidate, + * update or acquire operations. Outstanding operations can still be waited + * upon and pages can still be uncached and the cookie relinquished. + * + * This will not return until all outstanding operations have completed. + * + * If @invalidate is set, then the backing object will be invalidated and + * detached, otherwise it will just be detached. + * + * If @aux_data is set, then auxiliary data will be updated from that. + */ +static inline +void fscache_disable_cookie(struct fscache_cookie *cookie, + const void *aux_data, + bool invalidate) +{ + if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie)) + __fscache_disable_cookie(cookie, aux_data, invalidate); +} + +/** + * fscache_enable_cookie - Reenable a cookie + * @cookie: The cookie representing the cache object + * @aux_data: The updated auxiliary data for the cookie (may be NULL) + * @object_size: Current size of object + * @can_enable: A function to permit enablement once lock is held + * @data: Data for can_enable() + * + * Reenable a previously disabled cookie, allowing it to accept further alloc, + * read, write, invalidate, update or acquire operations. An attempt will be + * made to immediately reattach the cookie to a backing object. If @aux_data + * is set, the auxiliary data attached to the cookie will be updated. + * + * The can_enable() function is called (if not NULL) once the enablement lock + * is held to rule on whether enablement is still permitted to go ahead. + */ +static inline +void fscache_enable_cookie(struct fscache_cookie *cookie, + const void *aux_data, + loff_t object_size, + bool (*can_enable)(void *data), + void *data) +{ + if (fscache_cookie_valid(cookie) && !fscache_cookie_enabled(cookie)) + __fscache_enable_cookie(cookie, aux_data, object_size, + can_enable, data); +} + +#endif /* _LINUX_FSCACHE_H */ diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h new file mode 100644 index 000000000..c1e4a615b --- /dev/null +++ b/include/linux/fscrypt.h @@ -0,0 +1,260 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * fscrypt.h: declarations for per-file encryption + * + * Filesystems that implement per-file encryption include this header + * file with the __FS_HAS_ENCRYPTION set according to whether that filesystem + * is being built with encryption support or not. + * + * Copyright (C) 2015, Google, Inc. + * + * Written by Michael Halcrow, 2015. + * Modified by Jaegeuk Kim, 2015. + */ +#ifndef _LINUX_FSCRYPT_H +#define _LINUX_FSCRYPT_H + +#include + +#define FS_CRYPTO_BLOCK_SIZE 16 + +struct fscrypt_ctx; +struct fscrypt_info; + +struct fscrypt_str { + unsigned char *name; + u32 len; +}; + +struct fscrypt_name { + const struct qstr *usr_fname; + struct fscrypt_str disk_name; + u32 hash; + u32 minor_hash; + struct fscrypt_str crypto_buf; + bool is_ciphertext_name; +}; + +#define FSTR_INIT(n, l) { .name = n, .len = l } +#define FSTR_TO_QSTR(f) QSTR_INIT((f)->name, (f)->len) +#define fname_name(p) ((p)->disk_name.name) +#define fname_len(p) ((p)->disk_name.len) + +/* Maximum value for the third parameter of fscrypt_operations.set_context(). */ +#define FSCRYPT_SET_CONTEXT_MAX_SIZE 28 + +#if __FS_HAS_ENCRYPTION +#include +#else +#include +#endif + +/** + * fscrypt_require_key - require an inode's encryption key + * @inode: the inode we need the key for + * + * If the inode is encrypted, set up its encryption key if not already done. + * Then require that the key be present and return -ENOKEY otherwise. + * + * No locks are needed, and the key will live as long as the struct inode --- so + * it won't go away from under you. + * + * Return: 0 on success, -ENOKEY if the key is missing, or another -errno code + * if a problem occurred while setting up the encryption key. + */ +static inline int fscrypt_require_key(struct inode *inode) +{ + if (IS_ENCRYPTED(inode)) { + int err = fscrypt_get_encryption_info(inode); + + if (err) + return err; + if (!fscrypt_has_encryption_key(inode)) + return -ENOKEY; + } + return 0; +} + +/** + * fscrypt_prepare_link - prepare to link an inode into a possibly-encrypted directory + * @old_dentry: an existing dentry for the inode being linked + * @dir: the target directory + * @dentry: negative dentry for the target filename + * + * A new link can only be added to an encrypted directory if the directory's + * encryption key is available --- since otherwise we'd have no way to encrypt + * the filename. Therefore, we first set up the directory's encryption key (if + * not already done) and return an error if it's unavailable. + * + * We also verify that the link will not violate the constraint that all files + * in an encrypted directory tree use the same encryption policy. + * + * Return: 0 on success, -ENOKEY if the directory's encryption key is missing, + * -EXDEV if the link would result in an inconsistent encryption policy, or + * another -errno code. + */ +static inline int fscrypt_prepare_link(struct dentry *old_dentry, + struct inode *dir, + struct dentry *dentry) +{ + if (IS_ENCRYPTED(dir)) + return __fscrypt_prepare_link(d_inode(old_dentry), dir, dentry); + return 0; +} + +/** + * fscrypt_prepare_rename - prepare for a rename between possibly-encrypted directories + * @old_dir: source directory + * @old_dentry: dentry for source file + * @new_dir: target directory + * @new_dentry: dentry for target location (may be negative unless exchanging) + * @flags: rename flags (we care at least about %RENAME_EXCHANGE) + * + * Prepare for ->rename() where the source and/or target directories may be + * encrypted. A new link can only be added to an encrypted directory if the + * directory's encryption key is available --- since otherwise we'd have no way + * to encrypt the filename. A rename to an existing name, on the other hand, + * *is* cryptographically possible without the key. However, we take the more + * conservative approach and just forbid all no-key renames. + * + * We also verify that the rename will not violate the constraint that all files + * in an encrypted directory tree use the same encryption policy. + * + * Return: 0 on success, -ENOKEY if an encryption key is missing, -EXDEV if the + * rename would cause inconsistent encryption policies, or another -errno code. + */ +static inline int fscrypt_prepare_rename(struct inode *old_dir, + struct dentry *old_dentry, + struct inode *new_dir, + struct dentry *new_dentry, + unsigned int flags) +{ + if (IS_ENCRYPTED(old_dir) || IS_ENCRYPTED(new_dir)) + return __fscrypt_prepare_rename(old_dir, old_dentry, + new_dir, new_dentry, flags); + return 0; +} + +/** + * fscrypt_prepare_lookup - prepare to lookup a name in a possibly-encrypted directory + * @dir: directory being searched + * @dentry: filename being looked up + * @fname: (output) the name to use to search the on-disk directory + * + * Prepare for ->lookup() in a directory which may be encrypted by determining + * the name that will actually be used to search the directory on-disk. Lookups + * can be done with or without the directory's encryption key; without the key, + * filenames are presented in encrypted form. Therefore, we'll try to set up + * the directory's encryption key, but even without it the lookup can continue. + * + * This also installs a custom ->d_revalidate() method which will invalidate the + * dentry if it was created without the key and the key is later added. + * + * Return: 0 on success; -ENOENT if key is unavailable but the filename isn't a + * correctly formed encoded ciphertext name, so a negative dentry should be + * created; or another -errno code. + */ +static inline int fscrypt_prepare_lookup(struct inode *dir, + struct dentry *dentry, + struct fscrypt_name *fname) +{ + if (IS_ENCRYPTED(dir)) + return __fscrypt_prepare_lookup(dir, dentry, fname); + + memset(fname, 0, sizeof(*fname)); + fname->usr_fname = &dentry->d_name; + fname->disk_name.name = (unsigned char *)dentry->d_name.name; + fname->disk_name.len = dentry->d_name.len; + return 0; +} + +/** + * fscrypt_prepare_setattr - prepare to change a possibly-encrypted inode's attributes + * @dentry: dentry through which the inode is being changed + * @attr: attributes to change + * + * Prepare for ->setattr() on a possibly-encrypted inode. On an encrypted file, + * most attribute changes are allowed even without the encryption key. However, + * without the encryption key we do have to forbid truncates. This is needed + * because the size being truncated to may not be a multiple of the filesystem + * block size, and in that case we'd have to decrypt the final block, zero the + * portion past i_size, and re-encrypt it. (We *could* allow truncating to a + * filesystem block boundary, but it's simpler to just forbid all truncates --- + * and we already forbid all other contents modifications without the key.) + * + * Return: 0 on success, -ENOKEY if the key is missing, or another -errno code + * if a problem occurred while setting up the encryption key. + */ +static inline int fscrypt_prepare_setattr(struct dentry *dentry, + struct iattr *attr) +{ + if (attr->ia_valid & ATTR_SIZE) + return fscrypt_require_key(d_inode(dentry)); + return 0; +} + +/** + * fscrypt_prepare_symlink - prepare to create a possibly-encrypted symlink + * @dir: directory in which the symlink is being created + * @target: plaintext symlink target + * @len: length of @target excluding null terminator + * @max_len: space the filesystem has available to store the symlink target + * @disk_link: (out) the on-disk symlink target being prepared + * + * This function computes the size the symlink target will require on-disk, + * stores it in @disk_link->len, and validates it against @max_len. An + * encrypted symlink may be longer than the original. + * + * Additionally, @disk_link->name is set to @target if the symlink will be + * unencrypted, but left NULL if the symlink will be encrypted. For encrypted + * symlinks, the filesystem must call fscrypt_encrypt_symlink() to create the + * on-disk target later. (The reason for the two-step process is that some + * filesystems need to know the size of the symlink target before creating the + * inode, e.g. to determine whether it will be a "fast" or "slow" symlink.) + * + * Return: 0 on success, -ENAMETOOLONG if the symlink target is too long, + * -ENOKEY if the encryption key is missing, or another -errno code if a problem + * occurred while setting up the encryption key. + */ +static inline int fscrypt_prepare_symlink(struct inode *dir, + const char *target, + unsigned int len, + unsigned int max_len, + struct fscrypt_str *disk_link) +{ + if (IS_ENCRYPTED(dir) || fscrypt_dummy_context_enabled(dir)) + return __fscrypt_prepare_symlink(dir, len, max_len, disk_link); + + disk_link->name = (unsigned char *)target; + disk_link->len = len + 1; + if (disk_link->len > max_len) + return -ENAMETOOLONG; + return 0; +} + +/** + * fscrypt_encrypt_symlink - encrypt the symlink target if needed + * @inode: symlink inode + * @target: plaintext symlink target + * @len: length of @target excluding null terminator + * @disk_link: (in/out) the on-disk symlink target being prepared + * + * If the symlink target needs to be encrypted, then this function encrypts it + * into @disk_link->name. fscrypt_prepare_symlink() must have been called + * previously to compute @disk_link->len. If the filesystem did not allocate a + * buffer for @disk_link->name after calling fscrypt_prepare_link(), then one + * will be kmalloc()'ed and the filesystem will be responsible for freeing it. + * + * Return: 0 on success, -errno on failure + */ +static inline int fscrypt_encrypt_symlink(struct inode *inode, + const char *target, + unsigned int len, + struct fscrypt_str *disk_link) +{ + if (IS_ENCRYPTED(inode)) + return __fscrypt_encrypt_symlink(inode, target, len, disk_link); + return 0; +} + +#endif /* _LINUX_FSCRYPT_H */ diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h new file mode 100644 index 000000000..6e95c5a9b --- /dev/null +++ b/include/linux/fscrypt_notsupp.h @@ -0,0 +1,243 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * fscrypt_notsupp.h + * + * This stubs out the fscrypt functions for filesystems configured without + * encryption support. + * + * Do not include this file directly. Use fscrypt.h instead! + */ +#ifndef _LINUX_FSCRYPT_H +#error "Incorrect include of linux/fscrypt_notsupp.h!" +#endif + +#ifndef _LINUX_FSCRYPT_NOTSUPP_H +#define _LINUX_FSCRYPT_NOTSUPP_H + +static inline bool fscrypt_has_encryption_key(const struct inode *inode) +{ + return false; +} + +static inline bool fscrypt_dummy_context_enabled(struct inode *inode) +{ + return false; +} + +static inline bool fscrypt_is_nokey_name(const struct dentry *dentry) +{ + return false; +} + +/* crypto.c */ +static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work) +{ +} + +static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, + gfp_t gfp_flags) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx) +{ + return; +} + +static inline struct page *fscrypt_encrypt_page(const struct inode *inode, + struct page *page, + unsigned int len, + unsigned int offs, + u64 lblk_num, gfp_t gfp_flags) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline int fscrypt_decrypt_page(const struct inode *inode, + struct page *page, + unsigned int len, unsigned int offs, + u64 lblk_num) +{ + return -EOPNOTSUPP; +} + +static inline struct page *fscrypt_control_page(struct page *page) +{ + WARN_ON_ONCE(1); + return ERR_PTR(-EINVAL); +} + +static inline void fscrypt_restore_control_page(struct page *page) +{ + return; +} + +/* policy.c */ +static inline int fscrypt_ioctl_set_policy(struct file *filp, + const void __user *arg) +{ + return -EOPNOTSUPP; +} + +static inline int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg) +{ + return -EOPNOTSUPP; +} + +static inline int fscrypt_has_permitted_context(struct inode *parent, + struct inode *child) +{ + return 0; +} + +static inline int fscrypt_inherit_context(struct inode *parent, + struct inode *child, + void *fs_data, bool preload) +{ + return -EOPNOTSUPP; +} + +/* keyinfo.c */ +static inline int fscrypt_get_encryption_info(struct inode *inode) +{ + return -EOPNOTSUPP; +} + +static inline void fscrypt_put_encryption_info(struct inode *inode) +{ + return; +} + + /* fname.c */ +static inline int fscrypt_setup_filename(struct inode *dir, + const struct qstr *iname, + int lookup, struct fscrypt_name *fname) +{ + if (IS_ENCRYPTED(dir)) + return -EOPNOTSUPP; + + memset(fname, 0, sizeof(*fname)); + fname->usr_fname = iname; + fname->disk_name.name = (unsigned char *)iname->name; + fname->disk_name.len = iname->len; + return 0; +} + +static inline void fscrypt_free_filename(struct fscrypt_name *fname) +{ + return; +} + +static inline int fscrypt_fname_alloc_buffer(const struct inode *inode, + u32 max_encrypted_len, + struct fscrypt_str *crypto_str) +{ + return -EOPNOTSUPP; +} + +static inline void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str) +{ + return; +} + +static inline int fscrypt_fname_disk_to_usr(struct inode *inode, + u32 hash, u32 minor_hash, + const struct fscrypt_str *iname, + struct fscrypt_str *oname) +{ + return -EOPNOTSUPP; +} + +static inline bool fscrypt_match_name(const struct fscrypt_name *fname, + const u8 *de_name, u32 de_name_len) +{ + /* Encryption support disabled; use standard comparison */ + if (de_name_len != fname->disk_name.len) + return false; + return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len); +} + +/* bio.c */ +static inline void fscrypt_decrypt_bio(struct bio *bio) +{ +} + +static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, + struct bio *bio) +{ +} + +static inline void fscrypt_pullback_bio_page(struct page **page, bool restore) +{ + return; +} + +static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, + sector_t pblk, unsigned int len) +{ + return -EOPNOTSUPP; +} + +/* hooks.c */ + +static inline int fscrypt_file_open(struct inode *inode, struct file *filp) +{ + if (IS_ENCRYPTED(inode)) + return -EOPNOTSUPP; + return 0; +} + +static inline int __fscrypt_prepare_link(struct inode *inode, struct inode *dir, + struct dentry *dentry) +{ + return -EOPNOTSUPP; +} + +static inline int __fscrypt_prepare_rename(struct inode *old_dir, + struct dentry *old_dentry, + struct inode *new_dir, + struct dentry *new_dentry, + unsigned int flags) +{ + return -EOPNOTSUPP; +} + +static inline int __fscrypt_prepare_lookup(struct inode *dir, + struct dentry *dentry, + struct fscrypt_name *fname) +{ + return -EOPNOTSUPP; +} + +static inline int __fscrypt_prepare_symlink(struct inode *dir, + unsigned int len, + unsigned int max_len, + struct fscrypt_str *disk_link) +{ + return -EOPNOTSUPP; +} + +static inline int __fscrypt_encrypt_symlink(struct inode *inode, + const char *target, + unsigned int len, + struct fscrypt_str *disk_link) +{ + return -EOPNOTSUPP; +} + +static inline const char *fscrypt_get_symlink(struct inode *inode, + const void *caddr, + unsigned int max_size, + struct delayed_call *done) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline int fscrypt_symlink_getattr(const struct path *path, + struct kstat *stat) +{ + return -EOPNOTSUPP; +} + +#endif /* _LINUX_FSCRYPT_NOTSUPP_H */ diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h new file mode 100644 index 000000000..f4cb4871a --- /dev/null +++ b/include/linux/fscrypt_supp.h @@ -0,0 +1,236 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * fscrypt_supp.h + * + * Do not include this file directly. Use fscrypt.h instead! + */ +#ifndef _LINUX_FSCRYPT_H +#error "Incorrect include of linux/fscrypt_supp.h!" +#endif + +#ifndef _LINUX_FSCRYPT_SUPP_H +#define _LINUX_FSCRYPT_SUPP_H + +#include +#include + +/* + * fscrypt superblock flags + */ +#define FS_CFLG_OWN_PAGES (1U << 1) + +/* + * crypto operations for filesystems + */ +struct fscrypt_operations { + unsigned int flags; + const char *key_prefix; + int (*get_context)(struct inode *, void *, size_t); + int (*set_context)(struct inode *, const void *, size_t, void *); + bool (*dummy_context)(struct inode *); + bool (*empty_dir)(struct inode *); + unsigned int max_namelen; +}; + +struct fscrypt_ctx { + union { + struct { + struct page *bounce_page; /* Ciphertext page */ + struct page *control_page; /* Original page */ + } w; + struct { + struct bio *bio; + struct work_struct work; + } r; + struct list_head free_list; /* Free list */ + }; + u8 flags; /* Flags */ +}; + +static inline bool fscrypt_has_encryption_key(const struct inode *inode) +{ + return (inode->i_crypt_info != NULL); +} + +static inline bool fscrypt_dummy_context_enabled(struct inode *inode) +{ + return inode->i_sb->s_cop->dummy_context && + inode->i_sb->s_cop->dummy_context(inode); +} + +/** + * fscrypt_is_nokey_name() - test whether a dentry is a no-key name + * @dentry: the dentry to check + * + * This returns true if the dentry is a no-key dentry. A no-key dentry is a + * dentry that was created in an encrypted directory that hasn't had its + * encryption key added yet. Such dentries may be either positive or negative. + * + * When a filesystem is asked to create a new filename in an encrypted directory + * and the new filename's dentry is a no-key dentry, it must fail the operation + * with ENOKEY. This includes ->create(), ->mkdir(), ->mknod(), ->symlink(), + * ->rename(), and ->link(). (However, ->rename() and ->link() are already + * handled by fscrypt_prepare_rename() and fscrypt_prepare_link().) + * + * This is necessary because creating a filename requires the directory's + * encryption key, but just checking for the key on the directory inode during + * the final filesystem operation doesn't guarantee that the key was available + * during the preceding dentry lookup. And the key must have already been + * available during the dentry lookup in order for it to have been checked + * whether the filename already exists in the directory and for the new file's + * dentry not to be invalidated due to it incorrectly having the no-key flag. + * + * Return: %true if the dentry is a no-key name + */ +static inline bool fscrypt_is_nokey_name(const struct dentry *dentry) +{ + return dentry->d_flags & DCACHE_ENCRYPTED_NAME; +} + +/* crypto.c */ +extern void fscrypt_enqueue_decrypt_work(struct work_struct *); +extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t); +extern void fscrypt_release_ctx(struct fscrypt_ctx *); +extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *, + unsigned int, unsigned int, + u64, gfp_t); +extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int, + unsigned int, u64); + +static inline struct page *fscrypt_control_page(struct page *page) +{ + return ((struct fscrypt_ctx *)page_private(page))->w.control_page; +} + +extern void fscrypt_restore_control_page(struct page *); + +/* policy.c */ +extern int fscrypt_ioctl_set_policy(struct file *, const void __user *); +extern int fscrypt_ioctl_get_policy(struct file *, void __user *); +extern int fscrypt_has_permitted_context(struct inode *, struct inode *); +extern int fscrypt_inherit_context(struct inode *, struct inode *, + void *, bool); +/* keyinfo.c */ +extern int fscrypt_get_encryption_info(struct inode *); +extern void fscrypt_put_encryption_info(struct inode *); + +/* fname.c */ +extern int fscrypt_setup_filename(struct inode *, const struct qstr *, + int lookup, struct fscrypt_name *); + +static inline void fscrypt_free_filename(struct fscrypt_name *fname) +{ + kfree(fname->crypto_buf.name); +} + +extern int fscrypt_fname_alloc_buffer(const struct inode *, u32, + struct fscrypt_str *); +extern void fscrypt_fname_free_buffer(struct fscrypt_str *); +extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32, + const struct fscrypt_str *, struct fscrypt_str *); + +#define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE 32 + +/* Extracts the second-to-last ciphertext block; see explanation below */ +#define FSCRYPT_FNAME_DIGEST(name, len) \ + ((name) + round_down((len) - FS_CRYPTO_BLOCK_SIZE - 1, \ + FS_CRYPTO_BLOCK_SIZE)) + +#define FSCRYPT_FNAME_DIGEST_SIZE FS_CRYPTO_BLOCK_SIZE + +/** + * fscrypt_digested_name - alternate identifier for an on-disk filename + * + * When userspace lists an encrypted directory without access to the key, + * filenames whose ciphertext is longer than FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE + * bytes are shown in this abbreviated form (base64-encoded) rather than as the + * full ciphertext (base64-encoded). This is necessary to allow supporting + * filenames up to NAME_MAX bytes, since base64 encoding expands the length. + * + * To make it possible for filesystems to still find the correct directory entry + * despite not knowing the full on-disk name, we encode any filesystem-specific + * 'hash' and/or 'minor_hash' which the filesystem may need for its lookups, + * followed by the second-to-last ciphertext block of the filename. Due to the + * use of the CBC-CTS encryption mode, the second-to-last ciphertext block + * depends on the full plaintext. (Note that ciphertext stealing causes the + * last two blocks to appear "flipped".) This makes accidental collisions very + * unlikely: just a 1 in 2^128 chance for two filenames to collide even if they + * share the same filesystem-specific hashes. + * + * However, this scheme isn't immune to intentional collisions, which can be + * created by anyone able to create arbitrary plaintext filenames and view them + * without the key. Making the "digest" be a real cryptographic hash like + * SHA-256 over the full ciphertext would prevent this, although it would be + * less efficient and harder to implement, especially since the filesystem would + * need to calculate it for each directory entry examined during a search. + */ +struct fscrypt_digested_name { + u32 hash; + u32 minor_hash; + u8 digest[FSCRYPT_FNAME_DIGEST_SIZE]; +}; + +/** + * fscrypt_match_name() - test whether the given name matches a directory entry + * @fname: the name being searched for + * @de_name: the name from the directory entry + * @de_name_len: the length of @de_name in bytes + * + * Normally @fname->disk_name will be set, and in that case we simply compare + * that to the name stored in the directory entry. The only exception is that + * if we don't have the key for an encrypted directory and a filename in it is + * very long, then we won't have the full disk_name and we'll instead need to + * match against the fscrypt_digested_name. + * + * Return: %true if the name matches, otherwise %false. + */ +static inline bool fscrypt_match_name(const struct fscrypt_name *fname, + const u8 *de_name, u32 de_name_len) +{ + if (unlikely(!fname->disk_name.name)) { + const struct fscrypt_digested_name *n = + (const void *)fname->crypto_buf.name; + if (WARN_ON_ONCE(fname->usr_fname->name[0] != '_')) + return false; + if (de_name_len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE) + return false; + return !memcmp(FSCRYPT_FNAME_DIGEST(de_name, de_name_len), + n->digest, FSCRYPT_FNAME_DIGEST_SIZE); + } + + if (de_name_len != fname->disk_name.len) + return false; + return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len); +} + +/* bio.c */ +extern void fscrypt_decrypt_bio(struct bio *); +extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, + struct bio *bio); +extern void fscrypt_pullback_bio_page(struct page **, bool); +extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t, + unsigned int); + +/* hooks.c */ +extern int fscrypt_file_open(struct inode *inode, struct file *filp); +extern int __fscrypt_prepare_link(struct inode *inode, struct inode *dir, + struct dentry *dentry); +extern int __fscrypt_prepare_rename(struct inode *old_dir, + struct dentry *old_dentry, + struct inode *new_dir, + struct dentry *new_dentry, + unsigned int flags); +extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry, + struct fscrypt_name *fname); +extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len, + unsigned int max_len, + struct fscrypt_str *disk_link); +extern int __fscrypt_encrypt_symlink(struct inode *inode, const char *target, + unsigned int len, + struct fscrypt_str *disk_link); +extern const char *fscrypt_get_symlink(struct inode *inode, const void *caddr, + unsigned int max_size, + struct delayed_call *done); +int fscrypt_symlink_getattr(const struct path *path, struct kstat *stat); + +#endif /* _LINUX_FSCRYPT_SUPP_H */ diff --git a/include/linux/fsi-sbefifo.h b/include/linux/fsi-sbefifo.h new file mode 100644 index 000000000..13f9ebeaa --- /dev/null +++ b/include/linux/fsi-sbefifo.h @@ -0,0 +1,33 @@ +/* + * SBEFIFO FSI Client device driver + * + * Copyright (C) IBM Corporation 2017 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERGCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef LINUX_FSI_SBEFIFO_H +#define LINUX_FSI_SBEFIFO_H + +#define SBEFIFO_CMD_PUT_OCC_SRAM 0xa404 +#define SBEFIFO_CMD_GET_OCC_SRAM 0xa403 +#define SBEFIFO_CMD_GET_SBE_FFDC 0xa801 + +#define SBEFIFO_MAX_FFDC_SIZE 0x2000 + +struct device; + +int sbefifo_submit(struct device *dev, const __be32 *command, size_t cmd_len, + __be32 *response, size_t *resp_len); + +int sbefifo_parse_status(struct device *dev, u16 cmd, __be32 *response, + size_t resp_len, size_t *data_len); + +#endif /* LINUX_FSI_SBEFIFO_H */ diff --git a/include/linux/fsi.h b/include/linux/fsi.h new file mode 100644 index 000000000..ec3be0d5b --- /dev/null +++ b/include/linux/fsi.h @@ -0,0 +1,93 @@ +/* FSI device & driver interfaces + * + * Copyright (C) IBM Corporation 2016 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef LINUX_FSI_H +#define LINUX_FSI_H + +#include + +struct fsi_device { + struct device dev; + u8 engine_type; + u8 version; + u8 unit; + struct fsi_slave *slave; + uint32_t addr; + uint32_t size; +}; + +extern int fsi_device_read(struct fsi_device *dev, uint32_t addr, + void *val, size_t size); +extern int fsi_device_write(struct fsi_device *dev, uint32_t addr, + const void *val, size_t size); +extern int fsi_device_peek(struct fsi_device *dev, void *val); + +struct fsi_device_id { + u8 engine_type; + u8 version; +}; + +#define FSI_VERSION_ANY 0 + +#define FSI_DEVICE(t) \ + .engine_type = (t), .version = FSI_VERSION_ANY, + +#define FSI_DEVICE_VERSIONED(t, v) \ + .engine_type = (t), .version = (v), + +struct fsi_driver { + struct device_driver drv; + const struct fsi_device_id *id_table; +}; + +#define to_fsi_dev(devp) container_of(devp, struct fsi_device, dev) +#define to_fsi_drv(drvp) container_of(drvp, struct fsi_driver, drv) + +extern int fsi_driver_register(struct fsi_driver *fsi_drv); +extern void fsi_driver_unregister(struct fsi_driver *fsi_drv); + +/* module_fsi_driver() - Helper macro for drivers that don't do + * anything special in module init/exit. This eliminates a lot of + * boilerplate. Each module may only use this macro once, and + * calling it replaces module_init() and module_exit() + */ +#define module_fsi_driver(__fsi_driver) \ + module_driver(__fsi_driver, fsi_driver_register, \ + fsi_driver_unregister) + +/* direct slave API */ +extern int fsi_slave_claim_range(struct fsi_slave *slave, + uint32_t addr, uint32_t size); +extern void fsi_slave_release_range(struct fsi_slave *slave, + uint32_t addr, uint32_t size); +extern int fsi_slave_read(struct fsi_slave *slave, uint32_t addr, + void *val, size_t size); +extern int fsi_slave_write(struct fsi_slave *slave, uint32_t addr, + const void *val, size_t size); + +extern struct bus_type fsi_bus_type; +extern const struct device_type fsi_cdev_type; + +enum fsi_dev_type { + fsi_dev_cfam, + fsi_dev_sbefifo, + fsi_dev_scom, + fsi_dev_occ +}; + +extern int fsi_get_new_minor(struct fsi_device *fdev, enum fsi_dev_type type, + dev_t *out_dev, int *out_index); +extern void fsi_free_minor(dev_t dev); + +#endif /* LINUX_FSI_H */ diff --git a/include/linux/fsl-diu-fb.h b/include/linux/fsl-diu-fb.h new file mode 100644 index 000000000..c46eab5bc --- /dev/null +++ b/include/linux/fsl-diu-fb.h @@ -0,0 +1,173 @@ +/* + * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved. + * + * Freescale DIU Frame Buffer device driver + * + * Authors: Hongjun Chen + * Paul Widmer + * Srikanth Srinivasan + * York Sun + * + * Based on imxfb.c Copyright (C) 2004 S.Hauer, Pengutronix + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __FSL_DIU_FB_H__ +#define __FSL_DIU_FB_H__ + +#include + +struct mfb_chroma_key { + int enable; + __u8 red_max; + __u8 green_max; + __u8 blue_max; + __u8 red_min; + __u8 green_min; + __u8 blue_min; +}; + +struct aoi_display_offset { + __s32 x_aoi_d; + __s32 y_aoi_d; +}; + +#define MFB_SET_CHROMA_KEY _IOW('M', 1, struct mfb_chroma_key) +#define MFB_SET_BRIGHTNESS _IOW('M', 3, __u8) +#define MFB_SET_ALPHA _IOW('M', 0, __u8) +#define MFB_GET_ALPHA _IOR('M', 0, __u8) +#define MFB_SET_AOID _IOW('M', 4, struct aoi_display_offset) +#define MFB_GET_AOID _IOR('M', 4, struct aoi_display_offset) +#define MFB_SET_PIXFMT _IOW('M', 8, __u32) +#define MFB_GET_PIXFMT _IOR('M', 8, __u32) + +/* + * The MPC5121 BSP comes with a gamma_set utility that initializes the + * gamma table. Unfortunately, it uses bad values for the IOCTL commands, + * but there's nothing we can do about it now. These ioctls are only + * supported on the MPC5121. + */ +#define MFB_SET_GAMMA _IOW('M', 1, __u8) +#define MFB_GET_GAMMA _IOR('M', 1, __u8) + +/* + * The original definitions of MFB_SET_PIXFMT and MFB_GET_PIXFMT used the + * wrong value for 'size' field of the ioctl. The current macros above use the + * right size, but we still need to provide backwards compatibility, at least + * for a while. +*/ +#define MFB_SET_PIXFMT_OLD 0x80014d08 +#define MFB_GET_PIXFMT_OLD 0x40014d08 + +#ifdef __KERNEL__ + +/* + * These are the fields of area descriptor(in DDR memory) for every plane + */ +struct diu_ad { + /* Word 0(32-bit) in DDR memory */ +/* __u16 comp; */ +/* __u16 pixel_s:2; */ +/* __u16 palette:1; */ +/* __u16 red_c:2; */ +/* __u16 green_c:2; */ +/* __u16 blue_c:2; */ +/* __u16 alpha_c:3; */ +/* __u16 byte_f:1; */ +/* __u16 res0:3; */ + + __be32 pix_fmt; /* hard coding pixel format */ + + /* Word 1(32-bit) in DDR memory */ + __le32 addr; + + /* Word 2(32-bit) in DDR memory */ +/* __u32 delta_xs:11; */ +/* __u32 res1:1; */ +/* __u32 delta_ys:11; */ +/* __u32 res2:1; */ +/* __u32 g_alpha:8; */ + __le32 src_size_g_alpha; + + /* Word 3(32-bit) in DDR memory */ +/* __u32 delta_xi:11; */ +/* __u32 res3:5; */ +/* __u32 delta_yi:11; */ +/* __u32 res4:3; */ +/* __u32 flip:2; */ + __le32 aoi_size; + + /* Word 4(32-bit) in DDR memory */ + /*__u32 offset_xi:11; + __u32 res5:5; + __u32 offset_yi:11; + __u32 res6:5; + */ + __le32 offset_xyi; + + /* Word 5(32-bit) in DDR memory */ + /*__u32 offset_xd:11; + __u32 res7:5; + __u32 offset_yd:11; + __u32 res8:5; */ + __le32 offset_xyd; + + + /* Word 6(32-bit) in DDR memory */ + __u8 ckmax_r; + __u8 ckmax_g; + __u8 ckmax_b; + __u8 res9; + + /* Word 7(32-bit) in DDR memory */ + __u8 ckmin_r; + __u8 ckmin_g; + __u8 ckmin_b; + __u8 res10; +/* __u32 res10:8; */ + + /* Word 8(32-bit) in DDR memory */ + __le32 next_ad; + + /* Word 9(32-bit) in DDR memory, just for 64-bit aligned */ + __u32 paddr; +} __attribute__ ((packed)); + +/* DIU register map */ +struct diu { + __be32 desc[3]; + __be32 gamma; + __be32 palette; + __be32 cursor; + __be32 curs_pos; + __be32 diu_mode; + __be32 bgnd; + __be32 bgnd_wb; + __be32 disp_size; + __be32 wb_size; + __be32 wb_mem_addr; + __be32 hsyn_para; + __be32 vsyn_para; + __be32 syn_pol; + __be32 thresholds; + __be32 int_status; + __be32 int_mask; + __be32 colorbar[8]; + __be32 filling; + __be32 plut; +} __attribute__ ((packed)); + +/* + * Modes of operation of DIU. The DIU supports five different modes, but + * the driver only supports modes 0 and 1. + */ +#define MFB_MODE0 0 /* DIU off */ +#define MFB_MODE1 1 /* All three planes output to display */ + +#endif /* __KERNEL__ */ +#endif /* __FSL_DIU_FB_H__ */ diff --git a/include/linux/fsl/bestcomm/ata.h b/include/linux/fsl/bestcomm/ata.h new file mode 100644 index 000000000..0b2371811 --- /dev/null +++ b/include/linux/fsl/bestcomm/ata.h @@ -0,0 +1,30 @@ +/* + * Header for Bestcomm ATA task driver + * + * + * Copyright (C) 2006 Freescale - John Rigby + * Copyright (C) 2006 Sylvain Munaut + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef __BESTCOMM_ATA_H__ +#define __BESTCOMM_ATA_H__ + + +struct bcom_ata_bd { + u32 status; + u32 src_pa; + u32 dst_pa; +}; + +extern struct bcom_task * bcom_ata_init(int queue_len, int maxbufsize); +extern void bcom_ata_rx_prepare(struct bcom_task *tsk); +extern void bcom_ata_tx_prepare(struct bcom_task *tsk); +extern void bcom_ata_reset_bd(struct bcom_task *tsk); +extern void bcom_ata_release(struct bcom_task *tsk); + +#endif /* __BESTCOMM_ATA_H__ */ + diff --git a/include/linux/fsl/bestcomm/bestcomm.h b/include/linux/fsl/bestcomm/bestcomm.h new file mode 100644 index 000000000..a0e2e6b19 --- /dev/null +++ b/include/linux/fsl/bestcomm/bestcomm.h @@ -0,0 +1,213 @@ +/* + * Public header for the MPC52xx processor BestComm driver + * + * + * Copyright (C) 2006 Sylvain Munaut + * Copyright (C) 2005 Varma Electronics Oy, + * ( by Andrey Volkov ) + * Copyright (C) 2003-2004 MontaVista, Software, Inc. + * ( by Dale Farnsworth ) + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef __BESTCOMM_H__ +#define __BESTCOMM_H__ + +/** + * struct bcom_bd - Structure describing a generic BestComm buffer descriptor + * @status: The current status of this buffer. Exact meaning depends on the + * task type + * @data: An array of u32 extra data. Size of array is task dependent. + * + * Note: Don't dereference a bcom_bd pointer as an array. The size of the + * bcom_bd is variable. Use bcom_get_bd() instead. + */ +struct bcom_bd { + u32 status; + u32 data[0]; /* variable payload size */ +}; + +/* ======================================================================== */ +/* Generic task management */ +/* ======================================================================== */ + +/** + * struct bcom_task - Structure describing a loaded BestComm task + * + * This structure is never built by the driver it self. It's built and + * filled the intermediate layer of the BestComm API, the task dependent + * support code. + * + * Most likely you don't need to poke around inside this structure. The + * fields are exposed in the header just for the sake of inline functions + */ +struct bcom_task { + unsigned int tasknum; + unsigned int flags; + int irq; + + struct bcom_bd *bd; + phys_addr_t bd_pa; + void **cookie; + unsigned short index; + unsigned short outdex; + unsigned int num_bd; + unsigned int bd_size; + + void* priv; +}; + +#define BCOM_FLAGS_NONE 0x00000000ul +#define BCOM_FLAGS_ENABLE_TASK (1ul << 0) + +/** + * bcom_enable - Enable a BestComm task + * @tsk: The BestComm task structure + * + * This function makes sure the given task is enabled and can be run + * by the BestComm engine as needed + */ +extern void bcom_enable(struct bcom_task *tsk); + +/** + * bcom_disable - Disable a BestComm task + * @tsk: The BestComm task structure + * + * This function disable a given task, making sure it's not executed + * by the BestComm engine. + */ +extern void bcom_disable(struct bcom_task *tsk); + + +/** + * bcom_get_task_irq - Returns the irq number of a BestComm task + * @tsk: The BestComm task structure + */ +static inline int +bcom_get_task_irq(struct bcom_task *tsk) { + return tsk->irq; +} + +/* ======================================================================== */ +/* BD based tasks helpers */ +/* ======================================================================== */ + +#define BCOM_BD_READY 0x40000000ul + +/** _bcom_next_index - Get next input index. + * @tsk: pointer to task structure + * + * Support function; Device drivers should not call this + */ +static inline int +_bcom_next_index(struct bcom_task *tsk) +{ + return ((tsk->index + 1) == tsk->num_bd) ? 0 : tsk->index + 1; +} + +/** _bcom_next_outdex - Get next output index. + * @tsk: pointer to task structure + * + * Support function; Device drivers should not call this + */ +static inline int +_bcom_next_outdex(struct bcom_task *tsk) +{ + return ((tsk->outdex + 1) == tsk->num_bd) ? 0 : tsk->outdex + 1; +} + +/** + * bcom_queue_empty - Checks if a BestComm task BD queue is empty + * @tsk: The BestComm task structure + */ +static inline int +bcom_queue_empty(struct bcom_task *tsk) +{ + return tsk->index == tsk->outdex; +} + +/** + * bcom_queue_full - Checks if a BestComm task BD queue is full + * @tsk: The BestComm task structure + */ +static inline int +bcom_queue_full(struct bcom_task *tsk) +{ + return tsk->outdex == _bcom_next_index(tsk); +} + +/** + * bcom_get_bd - Get a BD from the queue + * @tsk: The BestComm task structure + * index: Index of the BD to fetch + */ +static inline struct bcom_bd +*bcom_get_bd(struct bcom_task *tsk, unsigned int index) +{ + /* A cast to (void*) so the address can be incremented by the + * real size instead of by sizeof(struct bcom_bd) */ + return ((void *)tsk->bd) + (index * tsk->bd_size); +} + +/** + * bcom_buffer_done - Checks if a BestComm + * @tsk: The BestComm task structure + */ +static inline int +bcom_buffer_done(struct bcom_task *tsk) +{ + struct bcom_bd *bd; + if (bcom_queue_empty(tsk)) + return 0; + + bd = bcom_get_bd(tsk, tsk->outdex); + return !(bd->status & BCOM_BD_READY); +} + +/** + * bcom_prepare_next_buffer - clear status of next available buffer. + * @tsk: The BestComm task structure + * + * Returns pointer to next buffer descriptor + */ +static inline struct bcom_bd * +bcom_prepare_next_buffer(struct bcom_task *tsk) +{ + struct bcom_bd *bd; + + bd = bcom_get_bd(tsk, tsk->index); + bd->status = 0; /* cleanup last status */ + return bd; +} + +static inline void +bcom_submit_next_buffer(struct bcom_task *tsk, void *cookie) +{ + struct bcom_bd *bd = bcom_get_bd(tsk, tsk->index); + + tsk->cookie[tsk->index] = cookie; + mb(); /* ensure the bd is really up-to-date */ + bd->status |= BCOM_BD_READY; + tsk->index = _bcom_next_index(tsk); + if (tsk->flags & BCOM_FLAGS_ENABLE_TASK) + bcom_enable(tsk); +} + +static inline void * +bcom_retrieve_buffer(struct bcom_task *tsk, u32 *p_status, struct bcom_bd **p_bd) +{ + void *cookie = tsk->cookie[tsk->outdex]; + struct bcom_bd *bd = bcom_get_bd(tsk, tsk->outdex); + + if (p_status) + *p_status = bd->status; + if (p_bd) + *p_bd = bd; + tsk->outdex = _bcom_next_outdex(tsk); + return cookie; +} + +#endif /* __BESTCOMM_H__ */ diff --git a/include/linux/fsl/bestcomm/bestcomm_priv.h b/include/linux/fsl/bestcomm/bestcomm_priv.h new file mode 100644 index 000000000..3b52f3ffb --- /dev/null +++ b/include/linux/fsl/bestcomm/bestcomm_priv.h @@ -0,0 +1,350 @@ +/* + * Private header for the MPC52xx processor BestComm driver + * + * By private, we mean that driver should not use it directly. It's meant + * to be used by the BestComm engine driver itself and by the intermediate + * layer between the core and the drivers. + * + * Copyright (C) 2006 Sylvain Munaut + * Copyright (C) 2005 Varma Electronics Oy, + * ( by Andrey Volkov ) + * Copyright (C) 2003-2004 MontaVista, Software, Inc. + * ( by Dale Farnsworth ) + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef __BESTCOMM_PRIV_H__ +#define __BESTCOMM_PRIV_H__ + +#include +#include +#include +#include + +#include "sram.h" + + +/* ======================================================================== */ +/* Engine related stuff */ +/* ======================================================================== */ + +/* Zones sizes and needed alignments */ +#define BCOM_MAX_TASKS 16 +#define BCOM_MAX_VAR 24 +#define BCOM_MAX_INC 8 +#define BCOM_MAX_FDT 64 +#define BCOM_MAX_CTX 20 +#define BCOM_CTX_SIZE (BCOM_MAX_CTX * sizeof(u32)) +#define BCOM_CTX_ALIGN 0x100 +#define BCOM_VAR_SIZE (BCOM_MAX_VAR * sizeof(u32)) +#define BCOM_INC_SIZE (BCOM_MAX_INC * sizeof(u32)) +#define BCOM_VAR_ALIGN 0x80 +#define BCOM_FDT_SIZE (BCOM_MAX_FDT * sizeof(u32)) +#define BCOM_FDT_ALIGN 0x100 + +/** + * struct bcom_tdt - Task Descriptor Table Entry + * + */ +struct bcom_tdt { + u32 start; + u32 stop; + u32 var; + u32 fdt; + u32 exec_status; /* used internally by BestComm engine */ + u32 mvtp; /* used internally by BestComm engine */ + u32 context; + u32 litbase; +}; + +/** + * struct bcom_engine + * + * This holds all info needed globaly to handle the engine + */ +struct bcom_engine { + struct device_node *ofnode; + struct mpc52xx_sdma __iomem *regs; + phys_addr_t regs_base; + + struct bcom_tdt *tdt; + u32 *ctx; + u32 *var; + u32 *fdt; + + spinlock_t lock; +}; + +extern struct bcom_engine *bcom_eng; + + +/* ======================================================================== */ +/* Tasks related stuff */ +/* ======================================================================== */ + +/* Tasks image header */ +#define BCOM_TASK_MAGIC 0x4243544B /* 'BCTK' */ + +struct bcom_task_header { + u32 magic; + u8 desc_size; /* the size fields */ + u8 var_size; /* are given in number */ + u8 inc_size; /* of 32-bits words */ + u8 first_var; + u8 reserved[8]; +}; + +/* Descriptors structure & co */ +#define BCOM_DESC_NOP 0x000001f8 +#define BCOM_LCD_MASK 0x80000000 +#define BCOM_DRD_EXTENDED 0x40000000 +#define BCOM_DRD_INITIATOR_SHIFT 21 + +/* Tasks pragma */ +#define BCOM_PRAGMA_BIT_RSV 7 /* reserved pragma bit */ +#define BCOM_PRAGMA_BIT_PRECISE_INC 6 /* increment 0=when possible, */ + /* 1=iter end */ +#define BCOM_PRAGMA_BIT_RST_ERROR_NO 5 /* don't reset errors on */ + /* task enable */ +#define BCOM_PRAGMA_BIT_PACK 4 /* pack data enable */ +#define BCOM_PRAGMA_BIT_INTEGER 3 /* data alignment */ + /* 0=frac(msb), 1=int(lsb) */ +#define BCOM_PRAGMA_BIT_SPECREAD 2 /* XLB speculative read */ +#define BCOM_PRAGMA_BIT_CW 1 /* write line buffer enable */ +#define BCOM_PRAGMA_BIT_RL 0 /* read line buffer enable */ + + /* Looks like XLB speculative read generates XLB errors when a buffer + * is at the end of the physical memory. i.e. when accessing the + * lasts words, the engine tries to prefetch the next but there is no + * next ... + */ +#define BCOM_STD_PRAGMA ((0 << BCOM_PRAGMA_BIT_RSV) | \ + (0 << BCOM_PRAGMA_BIT_PRECISE_INC) | \ + (0 << BCOM_PRAGMA_BIT_RST_ERROR_NO) | \ + (0 << BCOM_PRAGMA_BIT_PACK) | \ + (0 << BCOM_PRAGMA_BIT_INTEGER) | \ + (0 << BCOM_PRAGMA_BIT_SPECREAD) | \ + (1 << BCOM_PRAGMA_BIT_CW) | \ + (1 << BCOM_PRAGMA_BIT_RL)) + +#define BCOM_PCI_PRAGMA ((0 << BCOM_PRAGMA_BIT_RSV) | \ + (0 << BCOM_PRAGMA_BIT_PRECISE_INC) | \ + (0 << BCOM_PRAGMA_BIT_RST_ERROR_NO) | \ + (0 << BCOM_PRAGMA_BIT_PACK) | \ + (1 << BCOM_PRAGMA_BIT_INTEGER) | \ + (0 << BCOM_PRAGMA_BIT_SPECREAD) | \ + (1 << BCOM_PRAGMA_BIT_CW) | \ + (1 << BCOM_PRAGMA_BIT_RL)) + +#define BCOM_ATA_PRAGMA BCOM_STD_PRAGMA +#define BCOM_CRC16_DP_0_PRAGMA BCOM_STD_PRAGMA +#define BCOM_CRC16_DP_1_PRAGMA BCOM_STD_PRAGMA +#define BCOM_FEC_RX_BD_PRAGMA BCOM_STD_PRAGMA +#define BCOM_FEC_TX_BD_PRAGMA BCOM_STD_PRAGMA +#define BCOM_GEN_DP_0_PRAGMA BCOM_STD_PRAGMA +#define BCOM_GEN_DP_1_PRAGMA BCOM_STD_PRAGMA +#define BCOM_GEN_DP_2_PRAGMA BCOM_STD_PRAGMA +#define BCOM_GEN_DP_3_PRAGMA BCOM_STD_PRAGMA +#define BCOM_GEN_DP_BD_0_PRAGMA BCOM_STD_PRAGMA +#define BCOM_GEN_DP_BD_1_PRAGMA BCOM_STD_PRAGMA +#define BCOM_GEN_RX_BD_PRAGMA BCOM_STD_PRAGMA +#define BCOM_GEN_TX_BD_PRAGMA BCOM_STD_PRAGMA +#define BCOM_GEN_LPC_PRAGMA BCOM_STD_PRAGMA +#define BCOM_PCI_RX_PRAGMA BCOM_PCI_PRAGMA +#define BCOM_PCI_TX_PRAGMA BCOM_PCI_PRAGMA + +/* Initiators number */ +#define BCOM_INITIATOR_ALWAYS 0 +#define BCOM_INITIATOR_SCTMR_0 1 +#define BCOM_INITIATOR_SCTMR_1 2 +#define BCOM_INITIATOR_FEC_RX 3 +#define BCOM_INITIATOR_FEC_TX 4 +#define BCOM_INITIATOR_ATA_RX 5 +#define BCOM_INITIATOR_ATA_TX 6 +#define BCOM_INITIATOR_SCPCI_RX 7 +#define BCOM_INITIATOR_SCPCI_TX 8 +#define BCOM_INITIATOR_PSC3_RX 9 +#define BCOM_INITIATOR_PSC3_TX 10 +#define BCOM_INITIATOR_PSC2_RX 11 +#define BCOM_INITIATOR_PSC2_TX 12 +#define BCOM_INITIATOR_PSC1_RX 13 +#define BCOM_INITIATOR_PSC1_TX 14 +#define BCOM_INITIATOR_SCTMR_2 15 +#define BCOM_INITIATOR_SCLPC 16 +#define BCOM_INITIATOR_PSC5_RX 17 +#define BCOM_INITIATOR_PSC5_TX 18 +#define BCOM_INITIATOR_PSC4_RX 19 +#define BCOM_INITIATOR_PSC4_TX 20 +#define BCOM_INITIATOR_I2C2_RX 21 +#define BCOM_INITIATOR_I2C2_TX 22 +#define BCOM_INITIATOR_I2C1_RX 23 +#define BCOM_INITIATOR_I2C1_TX 24 +#define BCOM_INITIATOR_PSC6_RX 25 +#define BCOM_INITIATOR_PSC6_TX 26 +#define BCOM_INITIATOR_IRDA_RX 25 +#define BCOM_INITIATOR_IRDA_TX 26 +#define BCOM_INITIATOR_SCTMR_3 27 +#define BCOM_INITIATOR_SCTMR_4 28 +#define BCOM_INITIATOR_SCTMR_5 29 +#define BCOM_INITIATOR_SCTMR_6 30 +#define BCOM_INITIATOR_SCTMR_7 31 + +/* Initiators priorities */ +#define BCOM_IPR_ALWAYS 7 +#define BCOM_IPR_SCTMR_0 2 +#define BCOM_IPR_SCTMR_1 2 +#define BCOM_IPR_FEC_RX 6 +#define BCOM_IPR_FEC_TX 5 +#define BCOM_IPR_ATA_RX 7 +#define BCOM_IPR_ATA_TX 7 +#define BCOM_IPR_SCPCI_RX 2 +#define BCOM_IPR_SCPCI_TX 2 +#define BCOM_IPR_PSC3_RX 2 +#define BCOM_IPR_PSC3_TX 2 +#define BCOM_IPR_PSC2_RX 2 +#define BCOM_IPR_PSC2_TX 2 +#define BCOM_IPR_PSC1_RX 2 +#define BCOM_IPR_PSC1_TX 2 +#define BCOM_IPR_SCTMR_2 2 +#define BCOM_IPR_SCLPC 2 +#define BCOM_IPR_PSC5_RX 2 +#define BCOM_IPR_PSC5_TX 2 +#define BCOM_IPR_PSC4_RX 2 +#define BCOM_IPR_PSC4_TX 2 +#define BCOM_IPR_I2C2_RX 2 +#define BCOM_IPR_I2C2_TX 2 +#define BCOM_IPR_I2C1_RX 2 +#define BCOM_IPR_I2C1_TX 2 +#define BCOM_IPR_PSC6_RX 2 +#define BCOM_IPR_PSC6_TX 2 +#define BCOM_IPR_IRDA_RX 2 +#define BCOM_IPR_IRDA_TX 2 +#define BCOM_IPR_SCTMR_3 2 +#define BCOM_IPR_SCTMR_4 2 +#define BCOM_IPR_SCTMR_5 2 +#define BCOM_IPR_SCTMR_6 2 +#define BCOM_IPR_SCTMR_7 2 + + +/* ======================================================================== */ +/* API */ +/* ======================================================================== */ + +extern struct bcom_task *bcom_task_alloc(int bd_count, int bd_size, int priv_size); +extern void bcom_task_free(struct bcom_task *tsk); +extern int bcom_load_image(int task, u32 *task_image); +extern void bcom_set_initiator(int task, int initiator); + + +#define TASK_ENABLE 0x8000 + +/** + * bcom_disable_prefetch - Hook to disable bus prefetching + * + * ATA DMA and the original MPC5200 need this due to silicon bugs. At the + * moment disabling prefetch is a one-way street. There is no mechanism + * in place to turn prefetch back on after it has been disabled. There is + * no reason it couldn't be done, it would just be more complex to implement. + */ +static inline void bcom_disable_prefetch(void) +{ + u16 regval; + + regval = in_be16(&bcom_eng->regs->PtdCntrl); + out_be16(&bcom_eng->regs->PtdCntrl, regval | 1); +}; + +static inline void +bcom_enable_task(int task) +{ + u16 reg; + reg = in_be16(&bcom_eng->regs->tcr[task]); + out_be16(&bcom_eng->regs->tcr[task], reg | TASK_ENABLE); +} + +static inline void +bcom_disable_task(int task) +{ + u16 reg = in_be16(&bcom_eng->regs->tcr[task]); + out_be16(&bcom_eng->regs->tcr[task], reg & ~TASK_ENABLE); +} + + +static inline u32 * +bcom_task_desc(int task) +{ + return bcom_sram_pa2va(bcom_eng->tdt[task].start); +} + +static inline int +bcom_task_num_descs(int task) +{ + return (bcom_eng->tdt[task].stop - bcom_eng->tdt[task].start)/sizeof(u32) + 1; +} + +static inline u32 * +bcom_task_var(int task) +{ + return bcom_sram_pa2va(bcom_eng->tdt[task].var); +} + +static inline u32 * +bcom_task_inc(int task) +{ + return &bcom_task_var(task)[BCOM_MAX_VAR]; +} + + +static inline int +bcom_drd_is_extended(u32 desc) +{ + return (desc) & BCOM_DRD_EXTENDED; +} + +static inline int +bcom_desc_is_drd(u32 desc) +{ + return !(desc & BCOM_LCD_MASK) && desc != BCOM_DESC_NOP; +} + +static inline int +bcom_desc_initiator(u32 desc) +{ + return (desc >> BCOM_DRD_INITIATOR_SHIFT) & 0x1f; +} + +static inline void +bcom_set_desc_initiator(u32 *desc, int initiator) +{ + *desc = (*desc & ~(0x1f << BCOM_DRD_INITIATOR_SHIFT)) | + ((initiator & 0x1f) << BCOM_DRD_INITIATOR_SHIFT); +} + + +static inline void +bcom_set_task_pragma(int task, int pragma) +{ + u32 *fdt = &bcom_eng->tdt[task].fdt; + *fdt = (*fdt & ~0xff) | pragma; +} + +static inline void +bcom_set_task_auto_start(int task, int next_task) +{ + u16 __iomem *tcr = &bcom_eng->regs->tcr[task]; + out_be16(tcr, (in_be16(tcr) & ~0xff) | 0x00c0 | next_task); +} + +static inline void +bcom_set_tcr_initiator(int task, int initiator) +{ + u16 __iomem *tcr = &bcom_eng->regs->tcr[task]; + out_be16(tcr, (in_be16(tcr) & ~0x1f00) | ((initiator & 0x1f) << 8)); +} + + +#endif /* __BESTCOMM_PRIV_H__ */ + diff --git a/include/linux/fsl/bestcomm/fec.h b/include/linux/fsl/bestcomm/fec.h new file mode 100644 index 000000000..ee565d94d --- /dev/null +++ b/include/linux/fsl/bestcomm/fec.h @@ -0,0 +1,61 @@ +/* + * Header for Bestcomm FEC tasks driver + * + * + * Copyright (C) 2006-2007 Sylvain Munaut + * Copyright (C) 2003-2004 MontaVista, Software, Inc. + * ( by Dale Farnsworth ) + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef __BESTCOMM_FEC_H__ +#define __BESTCOMM_FEC_H__ + + +struct bcom_fec_bd { + u32 status; + u32 skb_pa; +}; + +#define BCOM_FEC_TX_BD_TFD 0x08000000ul /* transmit frame done */ +#define BCOM_FEC_TX_BD_TC 0x04000000ul /* transmit CRC */ +#define BCOM_FEC_TX_BD_ABC 0x02000000ul /* append bad CRC */ + +#define BCOM_FEC_RX_BD_L 0x08000000ul /* buffer is last in frame */ +#define BCOM_FEC_RX_BD_BC 0x00800000ul /* DA is broadcast */ +#define BCOM_FEC_RX_BD_MC 0x00400000ul /* DA is multicast and not broadcast */ +#define BCOM_FEC_RX_BD_LG 0x00200000ul /* Rx frame length violation */ +#define BCOM_FEC_RX_BD_NO 0x00100000ul /* Rx non-octet aligned frame */ +#define BCOM_FEC_RX_BD_CR 0x00040000ul /* Rx CRC error */ +#define BCOM_FEC_RX_BD_OV 0x00020000ul /* overrun */ +#define BCOM_FEC_RX_BD_TR 0x00010000ul /* Rx frame truncated */ +#define BCOM_FEC_RX_BD_LEN_MASK 0x000007fful /* mask for length of received frame */ +#define BCOM_FEC_RX_BD_ERRORS (BCOM_FEC_RX_BD_LG | BCOM_FEC_RX_BD_NO | \ + BCOM_FEC_RX_BD_CR | BCOM_FEC_RX_BD_OV | BCOM_FEC_RX_BD_TR) + + +extern struct bcom_task * +bcom_fec_rx_init(int queue_len, phys_addr_t fifo, int maxbufsize); + +extern int +bcom_fec_rx_reset(struct bcom_task *tsk); + +extern void +bcom_fec_rx_release(struct bcom_task *tsk); + + +extern struct bcom_task * +bcom_fec_tx_init(int queue_len, phys_addr_t fifo); + +extern int +bcom_fec_tx_reset(struct bcom_task *tsk); + +extern void +bcom_fec_tx_release(struct bcom_task *tsk); + + +#endif /* __BESTCOMM_FEC_H__ */ + diff --git a/include/linux/fsl/bestcomm/gen_bd.h b/include/linux/fsl/bestcomm/gen_bd.h new file mode 100644 index 000000000..de47260e6 --- /dev/null +++ b/include/linux/fsl/bestcomm/gen_bd.h @@ -0,0 +1,53 @@ +/* + * Header for Bestcomm General Buffer Descriptor tasks driver + * + * + * Copyright (C) 2007 Sylvain Munaut + * Copyright (C) 2006 AppSpec Computer Technologies Corp. + * Jeff Gibbons + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * + */ + +#ifndef __BESTCOMM_GEN_BD_H__ +#define __BESTCOMM_GEN_BD_H__ + +struct bcom_gen_bd { + u32 status; + u32 buf_pa; +}; + + +extern struct bcom_task * +bcom_gen_bd_rx_init(int queue_len, phys_addr_t fifo, + int initiator, int ipr, int maxbufsize); + +extern int +bcom_gen_bd_rx_reset(struct bcom_task *tsk); + +extern void +bcom_gen_bd_rx_release(struct bcom_task *tsk); + + +extern struct bcom_task * +bcom_gen_bd_tx_init(int queue_len, phys_addr_t fifo, + int initiator, int ipr); + +extern int +bcom_gen_bd_tx_reset(struct bcom_task *tsk); + +extern void +bcom_gen_bd_tx_release(struct bcom_task *tsk); + + +/* PSC support utility wrappers */ +struct bcom_task * bcom_psc_gen_bd_rx_init(unsigned psc_num, int queue_len, + phys_addr_t fifo, int maxbufsize); +struct bcom_task * bcom_psc_gen_bd_tx_init(unsigned psc_num, int queue_len, + phys_addr_t fifo); +#endif /* __BESTCOMM_GEN_BD_H__ */ + diff --git a/include/linux/fsl/bestcomm/sram.h b/include/linux/fsl/bestcomm/sram.h new file mode 100644 index 000000000..b6d668963 --- /dev/null +++ b/include/linux/fsl/bestcomm/sram.h @@ -0,0 +1,54 @@ +/* + * Handling of a sram zone for bestcomm + * + * + * Copyright (C) 2007 Sylvain Munaut + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef __BESTCOMM_SRAM_H__ +#define __BESTCOMM_SRAM_H__ + +#include +#include +#include + + +/* Structure used internally */ + /* The internals are here for the inline functions + * sake, certainly not for the user to mess with ! + */ +struct bcom_sram { + phys_addr_t base_phys; + void *base_virt; + unsigned int size; + rh_info_t *rh; + spinlock_t lock; +}; + +extern struct bcom_sram *bcom_sram; + + +/* Public API */ +extern int bcom_sram_init(struct device_node *sram_node, char *owner); +extern void bcom_sram_cleanup(void); + +extern void* bcom_sram_alloc(int size, int align, phys_addr_t *phys); +extern void bcom_sram_free(void *ptr); + +static inline phys_addr_t bcom_sram_va2pa(void *va) { + return bcom_sram->base_phys + + (unsigned long)(va - bcom_sram->base_virt); +} + +static inline void *bcom_sram_pa2va(phys_addr_t pa) { + return bcom_sram->base_virt + + (unsigned long)(pa - bcom_sram->base_phys); +} + + +#endif /* __BESTCOMM_SRAM_H__ */ + diff --git a/include/linux/fsl/edac.h b/include/linux/fsl/edac.h new file mode 100644 index 000000000..148a297d7 --- /dev/null +++ b/include/linux/fsl/edac.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef FSL_EDAC_H +#define FSL_EDAC_H + +struct mpc85xx_edac_pci_plat_data { + struct device_node *of_node; +}; + +#endif diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h new file mode 100644 index 000000000..941b11811 --- /dev/null +++ b/include/linux/fsl/guts.h @@ -0,0 +1,327 @@ +/** + * Freecale 85xx and 86xx Global Utilties register set + * + * Authors: Jeff Brown + * Timur Tabi + * + * Copyright 2004,2007,2012 Freescale Semiconductor, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __FSL_GUTS_H__ +#define __FSL_GUTS_H__ + +#include +#include + +/** + * Global Utility Registers. + * + * Not all registers defined in this structure are available on all chips, so + * you are expected to know whether a given register actually exists on your + * chip before you access it. + * + * Also, some registers are similar on different chips but have slightly + * different names. In these cases, one name is chosen to avoid extraneous + * #ifdefs. + */ +struct ccsr_guts { + u32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ + u32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */ + u32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and + * Control Register + */ + u32 pordevsr; /* 0x.000c - POR I/O Device Status Register */ + u32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */ + u32 pordevsr2; /* 0x.0014 - POR device status register 2 */ + u8 res018[0x20 - 0x18]; + u32 porcir; /* 0x.0020 - POR Configuration Information + * Register + */ + u8 res024[0x30 - 0x24]; + u32 gpiocr; /* 0x.0030 - GPIO Control Register */ + u8 res034[0x40 - 0x34]; + u32 gpoutdr; /* 0x.0040 - General-Purpose Output Data + * Register + */ + u8 res044[0x50 - 0x44]; + u32 gpindr; /* 0x.0050 - General-Purpose Input Data + * Register + */ + u8 res054[0x60 - 0x54]; + u32 pmuxcr; /* 0x.0060 - Alternate Function Signal + * Multiplex Control + */ + u32 pmuxcr2; /* 0x.0064 - Alternate function signal + * multiplex control 2 + */ + u32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */ + u8 res06c[0x70 - 0x6c]; + u32 devdisr; /* 0x.0070 - Device Disable Control */ +#define CCSR_GUTS_DEVDISR_TB1 0x00001000 +#define CCSR_GUTS_DEVDISR_TB0 0x00004000 + u32 devdisr2; /* 0x.0074 - Device Disable Control 2 */ + u8 res078[0x7c - 0x78]; + u32 pmjcr; /* 0x.007c - 4 Power Management Jog Control + * Register + */ + u32 powmgtcsr; /* 0x.0080 - Power Management Status and + * Control Register + */ + u32 pmrccr; /* 0x.0084 - Power Management Reset Counter + * Configuration Register + */ + u32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter + * Configuration Register + */ + u32 pmcdr; /* 0x.008c - 4Power management clock disable + * register + */ + u32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */ + u32 rstrscr; /* 0x.0094 - Reset Request Status and + * Control Register + */ + u32 ectrstcr; /* 0x.0098 - Exception reset control register */ + u32 autorstsr; /* 0x.009c - Automatic reset status register */ + u32 pvr; /* 0x.00a0 - Processor Version Register */ + u32 svr; /* 0x.00a4 - System Version Register */ + u8 res0a8[0xb0 - 0xa8]; + u32 rstcr; /* 0x.00b0 - Reset Control Register */ + u8 res0b4[0xc0 - 0xb4]; + u32 iovselsr; /* 0x.00c0 - I/O voltage select status register + Called 'elbcvselcr' on 86xx SOCs */ + u8 res0c4[0x100 - 0xc4]; + u32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers + There are 16 registers */ + u8 res140[0x224 - 0x140]; + u32 iodelay1; /* 0x.0224 - IO delay control register 1 */ + u32 iodelay2; /* 0x.0228 - IO delay control register 2 */ + u8 res22c[0x604 - 0x22c]; + u32 pamubypenr; /* 0x.604 - PAMU bypass enable register */ + u8 res608[0x800 - 0x608]; + u32 clkdvdr; /* 0x.0800 - Clock Divide Register */ + u8 res804[0x900 - 0x804]; + u32 ircr; /* 0x.0900 - Infrared Control Register */ + u8 res904[0x908 - 0x904]; + u32 dmacr; /* 0x.0908 - DMA Control Register */ + u8 res90c[0x914 - 0x90c]; + u32 elbccr; /* 0x.0914 - eLBC Control Register */ + u8 res918[0xb20 - 0x918]; + u32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */ + u32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */ + u32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */ + u8 resb2c[0xe00 - 0xb2c]; + u32 clkocr; /* 0x.0e00 - Clock Out Select Register */ + u8 rese04[0xe10 - 0xe04]; + u32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */ + u8 rese14[0xe20 - 0xe14]; + u32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */ + u32 cpfor; /* 0x.0e24 - L2 charge pump fuse override + * register + */ + u8 rese28[0xf04 - 0xe28]; + u32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */ + u32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */ + u8 resf0c[0xf2c - 0xf0c]; + u32 itcr; /* 0x.0f2c - Internal transaction control + * register + */ + u8 resf30[0xf40 - 0xf30]; + u32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */ + u32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ +} __attribute__ ((packed)); + +u32 fsl_guts_get_svr(void); + +/* Alternate function signal multiplex control */ +#define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x)) + +#ifdef CONFIG_PPC_86xx + +#define CCSR_GUTS_DMACR_DEV_SSI 0 /* DMA controller/channel set to SSI */ +#define CCSR_GUTS_DMACR_DEV_IR 1 /* DMA controller/channel set to IR */ + +/* + * Set the DMACR register in the GUTS + * + * The DMACR register determines the source of initiated transfers for each + * channel on each DMA controller. Rather than have a bunch of repetitive + * macros for the bit patterns, we just have a function that calculates + * them. + * + * guts: Pointer to GUTS structure + * co: The DMA controller (0 or 1) + * ch: The channel on the DMA controller (0, 1, 2, or 3) + * device: The device to set as the source (CCSR_GUTS_DMACR_DEV_xx) + */ +static inline void guts_set_dmacr(struct ccsr_guts __iomem *guts, + unsigned int co, unsigned int ch, unsigned int device) +{ + unsigned int shift = 16 + (8 * (1 - co) + 2 * (3 - ch)); + + clrsetbits_be32(&guts->dmacr, 3 << shift, device << shift); +} + +#define CCSR_GUTS_PMUXCR_LDPSEL 0x00010000 +#define CCSR_GUTS_PMUXCR_SSI1_MASK 0x0000C000 /* Bitmask for SSI1 */ +#define CCSR_GUTS_PMUXCR_SSI1_LA 0x00000000 /* Latched address */ +#define CCSR_GUTS_PMUXCR_SSI1_HI 0x00004000 /* High impedance */ +#define CCSR_GUTS_PMUXCR_SSI1_SSI 0x00008000 /* Used for SSI1 */ +#define CCSR_GUTS_PMUXCR_SSI2_MASK 0x00003000 /* Bitmask for SSI2 */ +#define CCSR_GUTS_PMUXCR_SSI2_LA 0x00000000 /* Latched address */ +#define CCSR_GUTS_PMUXCR_SSI2_HI 0x00001000 /* High impedance */ +#define CCSR_GUTS_PMUXCR_SSI2_SSI 0x00002000 /* Used for SSI2 */ +#define CCSR_GUTS_PMUXCR_LA_22_25_LA 0x00000000 /* Latched Address */ +#define CCSR_GUTS_PMUXCR_LA_22_25_HI 0x00000400 /* High impedance */ +#define CCSR_GUTS_PMUXCR_DBGDRV 0x00000200 /* Signals not driven */ +#define CCSR_GUTS_PMUXCR_DMA2_0 0x00000008 +#define CCSR_GUTS_PMUXCR_DMA2_3 0x00000004 +#define CCSR_GUTS_PMUXCR_DMA1_0 0x00000002 +#define CCSR_GUTS_PMUXCR_DMA1_3 0x00000001 + +/* + * Set the DMA external control bits in the GUTS + * + * The DMA external control bits in the PMUXCR are only meaningful for + * channels 0 and 3. Any other channels are ignored. + * + * guts: Pointer to GUTS structure + * co: The DMA controller (0 or 1) + * ch: The channel on the DMA controller (0, 1, 2, or 3) + * value: the new value for the bit (0 or 1) + */ +static inline void guts_set_pmuxcr_dma(struct ccsr_guts __iomem *guts, + unsigned int co, unsigned int ch, unsigned int value) +{ + if ((ch == 0) || (ch == 3)) { + unsigned int shift = 2 * (co + 1) - (ch & 1) - 1; + + clrsetbits_be32(&guts->pmuxcr, 1 << shift, value << shift); + } +} + +#define CCSR_GUTS_CLKDVDR_PXCKEN 0x80000000 +#define CCSR_GUTS_CLKDVDR_SSICKEN 0x20000000 +#define CCSR_GUTS_CLKDVDR_PXCKINV 0x10000000 +#define CCSR_GUTS_CLKDVDR_PXCKDLY_SHIFT 25 +#define CCSR_GUTS_CLKDVDR_PXCKDLY_MASK 0x06000000 +#define CCSR_GUTS_CLKDVDR_PXCKDLY(x) \ + (((x) & 3) << CCSR_GUTS_CLKDVDR_PXCKDLY_SHIFT) +#define CCSR_GUTS_CLKDVDR_PXCLK_SHIFT 16 +#define CCSR_GUTS_CLKDVDR_PXCLK_MASK 0x001F0000 +#define CCSR_GUTS_CLKDVDR_PXCLK(x) (((x) & 31) << CCSR_GUTS_CLKDVDR_PXCLK_SHIFT) +#define CCSR_GUTS_CLKDVDR_SSICLK_MASK 0x000000FF +#define CCSR_GUTS_CLKDVDR_SSICLK(x) ((x) & CCSR_GUTS_CLKDVDR_SSICLK_MASK) + +#endif + +struct ccsr_rcpm_v1 { + u8 res0000[4]; + __be32 cdozsr; /* 0x0004 Core Doze Status Register */ + u8 res0008[4]; + __be32 cdozcr; /* 0x000c Core Doze Control Register */ + u8 res0010[4]; + __be32 cnapsr; /* 0x0014 Core Nap Status Register */ + u8 res0018[4]; + __be32 cnapcr; /* 0x001c Core Nap Control Register */ + u8 res0020[4]; + __be32 cdozpsr; /* 0x0024 Core Doze Previous Status Register */ + u8 res0028[4]; + __be32 cnappsr; /* 0x002c Core Nap Previous Status Register */ + u8 res0030[4]; + __be32 cwaitsr; /* 0x0034 Core Wait Status Register */ + u8 res0038[4]; + __be32 cwdtdsr; /* 0x003c Core Watchdog Detect Status Register */ + __be32 powmgtcsr; /* 0x0040 PM Control&Status Register */ +#define RCPM_POWMGTCSR_SLP 0x00020000 + u8 res0044[12]; + __be32 ippdexpcr; /* 0x0050 IP Powerdown Exception Control Register */ + u8 res0054[16]; + __be32 cpmimr; /* 0x0064 Core PM IRQ Mask Register */ + u8 res0068[4]; + __be32 cpmcimr; /* 0x006c Core PM Critical IRQ Mask Register */ + u8 res0070[4]; + __be32 cpmmcmr; /* 0x0074 Core PM Machine Check Mask Register */ + u8 res0078[4]; + __be32 cpmnmimr; /* 0x007c Core PM NMI Mask Register */ + u8 res0080[4]; + __be32 ctbenr; /* 0x0084 Core Time Base Enable Register */ + u8 res0088[4]; + __be32 ctbckselr; /* 0x008c Core Time Base Clock Select Register */ + u8 res0090[4]; + __be32 ctbhltcr; /* 0x0094 Core Time Base Halt Control Register */ + u8 res0098[4]; + __be32 cmcpmaskcr; /* 0x00a4 Core Machine Check Mask Register */ +}; + +struct ccsr_rcpm_v2 { + u8 res_00[12]; + __be32 tph10sr0; /* Thread PH10 Status Register */ + u8 res_10[12]; + __be32 tph10setr0; /* Thread PH10 Set Control Register */ + u8 res_20[12]; + __be32 tph10clrr0; /* Thread PH10 Clear Control Register */ + u8 res_30[12]; + __be32 tph10psr0; /* Thread PH10 Previous Status Register */ + u8 res_40[12]; + __be32 twaitsr0; /* Thread Wait Status Register */ + u8 res_50[96]; + __be32 pcph15sr; /* Physical Core PH15 Status Register */ + __be32 pcph15setr; /* Physical Core PH15 Set Control Register */ + __be32 pcph15clrr; /* Physical Core PH15 Clear Control Register */ + __be32 pcph15psr; /* Physical Core PH15 Prev Status Register */ + u8 res_c0[16]; + __be32 pcph20sr; /* Physical Core PH20 Status Register */ + __be32 pcph20setr; /* Physical Core PH20 Set Control Register */ + __be32 pcph20clrr; /* Physical Core PH20 Clear Control Register */ + __be32 pcph20psr; /* Physical Core PH20 Prev Status Register */ + __be32 pcpw20sr; /* Physical Core PW20 Status Register */ + u8 res_e0[12]; + __be32 pcph30sr; /* Physical Core PH30 Status Register */ + __be32 pcph30setr; /* Physical Core PH30 Set Control Register */ + __be32 pcph30clrr; /* Physical Core PH30 Clear Control Register */ + __be32 pcph30psr; /* Physical Core PH30 Prev Status Register */ + u8 res_100[32]; + __be32 ippwrgatecr; /* IP Power Gating Control Register */ + u8 res_124[12]; + __be32 powmgtcsr; /* Power Management Control & Status Reg */ +#define RCPM_POWMGTCSR_LPM20_RQ 0x00100000 +#define RCPM_POWMGTCSR_LPM20_ST 0x00000200 +#define RCPM_POWMGTCSR_P_LPM20_ST 0x00000100 + u8 res_134[12]; + __be32 ippdexpcr[4]; /* IP Powerdown Exception Control Reg */ + u8 res_150[12]; + __be32 tpmimr0; /* Thread PM Interrupt Mask Reg */ + u8 res_160[12]; + __be32 tpmcimr0; /* Thread PM Crit Interrupt Mask Reg */ + u8 res_170[12]; + __be32 tpmmcmr0; /* Thread PM Machine Check Interrupt Mask Reg */ + u8 res_180[12]; + __be32 tpmnmimr0; /* Thread PM NMI Mask Reg */ + u8 res_190[12]; + __be32 tmcpmaskcr0; /* Thread Machine Check Mask Control Reg */ + __be32 pctbenr; /* Physical Core Time Base Enable Reg */ + __be32 pctbclkselr; /* Physical Core Time Base Clock Select */ + __be32 tbclkdivr; /* Time Base Clock Divider Register */ + u8 res_1ac[4]; + __be32 ttbhltcr[4]; /* Thread Time Base Halt Control Register */ + __be32 clpcl10sr; /* Cluster PCL10 Status Register */ + __be32 clpcl10setr; /* Cluster PCL30 Set Control Register */ + __be32 clpcl10clrr; /* Cluster PCL30 Clear Control Register */ + __be32 clpcl10psr; /* Cluster PCL30 Prev Status Register */ + __be32 cddslpsetr; /* Core Domain Deep Sleep Set Register */ + __be32 cddslpclrr; /* Core Domain Deep Sleep Clear Register */ + __be32 cdpwroksetr; /* Core Domain Power OK Set Register */ + __be32 cdpwrokclrr; /* Core Domain Power OK Clear Register */ + __be32 cdpwrensr; /* Core Domain Power Enable Status Register */ + __be32 cddslsr; /* Core Domain Deep Sleep Status Register */ + u8 res_1e8[8]; + __be32 dslpcntcr[8]; /* Deep Sleep Counter Cfg Register */ + u8 res_300[3568]; +}; + +#endif diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h new file mode 100644 index 000000000..f27cb1408 --- /dev/null +++ b/include/linux/fsl/mc.h @@ -0,0 +1,562 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Freescale Management Complex (MC) bus public interface + * + * Copyright (C) 2014-2016 Freescale Semiconductor, Inc. + * Author: German Rivera + * + */ +#ifndef _FSL_MC_H_ +#define _FSL_MC_H_ + +#include +#include +#include + +#define FSL_MC_VENDOR_FREESCALE 0x1957 + +struct irq_domain; +struct msi_domain_info; + +struct fsl_mc_device; +struct fsl_mc_io; + +/** + * struct fsl_mc_driver - MC object device driver object + * @driver: Generic device driver + * @match_id_table: table of supported device matching Ids + * @probe: Function called when a device is added + * @remove: Function called when a device is removed + * @shutdown: Function called at shutdown time to quiesce the device + * @suspend: Function called when a device is stopped + * @resume: Function called when a device is resumed + * + * Generic DPAA device driver object for device drivers that are registered + * with a DPRC bus. This structure is to be embedded in each device-specific + * driver structure. + */ +struct fsl_mc_driver { + struct device_driver driver; + const struct fsl_mc_device_id *match_id_table; + int (*probe)(struct fsl_mc_device *dev); + int (*remove)(struct fsl_mc_device *dev); + void (*shutdown)(struct fsl_mc_device *dev); + int (*suspend)(struct fsl_mc_device *dev, pm_message_t state); + int (*resume)(struct fsl_mc_device *dev); +}; + +#define to_fsl_mc_driver(_drv) \ + container_of(_drv, struct fsl_mc_driver, driver) + +/** + * enum fsl_mc_pool_type - Types of allocatable MC bus resources + * + * Entries in these enum are used as indices in the array of resource + * pools of an fsl_mc_bus object. + */ +enum fsl_mc_pool_type { + FSL_MC_POOL_DPMCP = 0x0, /* corresponds to "dpmcp" in the MC */ + FSL_MC_POOL_DPBP, /* corresponds to "dpbp" in the MC */ + FSL_MC_POOL_DPCON, /* corresponds to "dpcon" in the MC */ + FSL_MC_POOL_IRQ, + + /* + * NOTE: New resource pool types must be added before this entry + */ + FSL_MC_NUM_POOL_TYPES +}; + +/** + * struct fsl_mc_resource - MC generic resource + * @type: type of resource + * @id: unique MC resource Id within the resources of the same type + * @data: pointer to resource-specific data if the resource is currently + * allocated, or NULL if the resource is not currently allocated. + * @parent_pool: pointer to the parent resource pool from which this + * resource is allocated from. + * @node: Node in the free list of the corresponding resource pool + * + * NOTE: This structure is to be embedded as a field of specific + * MC resource structures. + */ +struct fsl_mc_resource { + enum fsl_mc_pool_type type; + s32 id; + void *data; + struct fsl_mc_resource_pool *parent_pool; + struct list_head node; +}; + +/** + * struct fsl_mc_device_irq - MC object device message-based interrupt + * @msi_desc: pointer to MSI descriptor allocated by fsl_mc_msi_alloc_descs() + * @mc_dev: MC object device that owns this interrupt + * @dev_irq_index: device-relative IRQ index + * @resource: MC generic resource associated with the interrupt + */ +struct fsl_mc_device_irq { + struct msi_desc *msi_desc; + struct fsl_mc_device *mc_dev; + u8 dev_irq_index; + struct fsl_mc_resource resource; +}; + +#define to_fsl_mc_irq(_mc_resource) \ + container_of(_mc_resource, struct fsl_mc_device_irq, resource) + +/* Opened state - Indicates that an object is open by at least one owner */ +#define FSL_MC_OBJ_STATE_OPEN 0x00000001 +/* Plugged state - Indicates that the object is plugged */ +#define FSL_MC_OBJ_STATE_PLUGGED 0x00000002 + +/** + * Shareability flag - Object flag indicating no memory shareability. + * the object generates memory accesses that are non coherent with other + * masters; + * user is responsible for proper memory handling through IOMMU configuration. + */ +#define FSL_MC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001 + +/** + * struct fsl_mc_obj_desc - Object descriptor + * @type: Type of object: NULL terminated string + * @id: ID of logical object resource + * @vendor: Object vendor identifier + * @ver_major: Major version number + * @ver_minor: Minor version number + * @irq_count: Number of interrupts supported by the object + * @region_count: Number of mappable regions supported by the object + * @state: Object state: combination of FSL_MC_OBJ_STATE_ states + * @label: Object label: NULL terminated string + * @flags: Object's flags + */ +struct fsl_mc_obj_desc { + char type[16]; + int id; + u16 vendor; + u16 ver_major; + u16 ver_minor; + u8 irq_count; + u8 region_count; + u32 state; + char label[16]; + u16 flags; +}; + +/** + * Bit masks for a MC object device (struct fsl_mc_device) flags + */ +#define FSL_MC_IS_DPRC 0x0001 + +/** + * struct fsl_mc_device - MC object device object + * @dev: Linux driver model device object + * @dma_mask: Default DMA mask + * @flags: MC object device flags + * @icid: Isolation context ID for the device + * @mc_handle: MC handle for the corresponding MC object opened + * @mc_io: Pointer to MC IO object assigned to this device or + * NULL if none. + * @obj_desc: MC description of the DPAA device + * @regions: pointer to array of MMIO region entries + * @irqs: pointer to array of pointers to interrupts allocated to this device + * @resource: generic resource associated with this MC object device, if any. + * + * Generic device object for MC object devices that are "attached" to a + * MC bus. + * + * NOTES: + * - For a non-DPRC object its icid is the same as its parent DPRC's icid. + * - The SMMU notifier callback gets invoked after device_add() has been + * called for an MC object device, but before the device-specific probe + * callback gets called. + * - DP_OBJ_DPRC objects are the only MC objects that have built-in MC + * portals. For all other MC objects, their device drivers are responsible for + * allocating MC portals for them by calling fsl_mc_portal_allocate(). + * - Some types of MC objects (e.g., DP_OBJ_DPBP, DP_OBJ_DPCON) are + * treated as resources that can be allocated/deallocated from the + * corresponding resource pool in the object's parent DPRC, using the + * fsl_mc_object_allocate()/fsl_mc_object_free() functions. These MC objects + * are known as "allocatable" objects. For them, the corresponding + * fsl_mc_device's 'resource' points to the associated resource object. + * For MC objects that are not allocatable (e.g., DP_OBJ_DPRC, DP_OBJ_DPNI), + * 'resource' is NULL. + */ +struct fsl_mc_device { + struct device dev; + u64 dma_mask; + u16 flags; + u16 icid; + u16 mc_handle; + struct fsl_mc_io *mc_io; + struct fsl_mc_obj_desc obj_desc; + struct resource *regions; + struct fsl_mc_device_irq **irqs; + struct fsl_mc_resource *resource; +}; + +#define to_fsl_mc_device(_dev) \ + container_of(_dev, struct fsl_mc_device, dev) + +#define MC_CMD_NUM_OF_PARAMS 7 + +struct mc_cmd_header { + u8 src_id; + u8 flags_hw; + u8 status; + u8 flags_sw; + __le16 token; + __le16 cmd_id; +}; + +struct fsl_mc_command { + u64 header; + u64 params[MC_CMD_NUM_OF_PARAMS]; +}; + +enum mc_cmd_status { + MC_CMD_STATUS_OK = 0x0, /* Completed successfully */ + MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */ + MC_CMD_STATUS_AUTH_ERR = 0x3, /* Authentication error */ + MC_CMD_STATUS_NO_PRIVILEGE = 0x4, /* No privilege */ + MC_CMD_STATUS_DMA_ERR = 0x5, /* DMA or I/O error */ + MC_CMD_STATUS_CONFIG_ERR = 0x6, /* Configuration error */ + MC_CMD_STATUS_TIMEOUT = 0x7, /* Operation timed out */ + MC_CMD_STATUS_NO_RESOURCE = 0x8, /* No resources */ + MC_CMD_STATUS_NO_MEMORY = 0x9, /* No memory available */ + MC_CMD_STATUS_BUSY = 0xA, /* Device is busy */ + MC_CMD_STATUS_UNSUPPORTED_OP = 0xB, /* Unsupported operation */ + MC_CMD_STATUS_INVALID_STATE = 0xC /* Invalid state */ +}; + +/* + * MC command flags + */ + +/* High priority flag */ +#define MC_CMD_FLAG_PRI 0x80 +/* Command completion flag */ +#define MC_CMD_FLAG_INTR_DIS 0x01 + +static inline u64 mc_encode_cmd_header(u16 cmd_id, + u32 cmd_flags, + u16 token) +{ + u64 header = 0; + struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header; + + hdr->cmd_id = cpu_to_le16(cmd_id); + hdr->token = cpu_to_le16(token); + hdr->status = MC_CMD_STATUS_READY; + if (cmd_flags & MC_CMD_FLAG_PRI) + hdr->flags_hw = MC_CMD_FLAG_PRI; + if (cmd_flags & MC_CMD_FLAG_INTR_DIS) + hdr->flags_sw = MC_CMD_FLAG_INTR_DIS; + + return header; +} + +static inline u16 mc_cmd_hdr_read_token(struct fsl_mc_command *cmd) +{ + struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header; + u16 token = le16_to_cpu(hdr->token); + + return token; +} + +struct mc_rsp_create { + __le32 object_id; +}; + +struct mc_rsp_api_ver { + __le16 major_ver; + __le16 minor_ver; +}; + +static inline u32 mc_cmd_read_object_id(struct fsl_mc_command *cmd) +{ + struct mc_rsp_create *rsp_params; + + rsp_params = (struct mc_rsp_create *)cmd->params; + return le32_to_cpu(rsp_params->object_id); +} + +static inline void mc_cmd_read_api_version(struct fsl_mc_command *cmd, + u16 *major_ver, + u16 *minor_ver) +{ + struct mc_rsp_api_ver *rsp_params; + + rsp_params = (struct mc_rsp_api_ver *)cmd->params; + *major_ver = le16_to_cpu(rsp_params->major_ver); + *minor_ver = le16_to_cpu(rsp_params->minor_ver); +} + +/** + * Bit masks for a MC I/O object (struct fsl_mc_io) flags + */ +#define FSL_MC_IO_ATOMIC_CONTEXT_PORTAL 0x0001 + +/** + * struct fsl_mc_io - MC I/O object to be passed-in to mc_send_command() + * @dev: device associated with this Mc I/O object + * @flags: flags for mc_send_command() + * @portal_size: MC command portal size in bytes + * @portal_phys_addr: MC command portal physical address + * @portal_virt_addr: MC command portal virtual address + * @dpmcp_dev: pointer to the DPMCP device associated with the MC portal. + * + * Fields are only meaningful if the FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not + * set: + * @mutex: Mutex to serialize mc_send_command() calls that use the same MC + * portal, if the fsl_mc_io object was created with the + * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag off. mc_send_command() calls for this + * fsl_mc_io object must be made only from non-atomic context. + * + * Fields are only meaningful if the FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is + * set: + * @spinlock: Spinlock to serialize mc_send_command() calls that use the same MC + * portal, if the fsl_mc_io object was created with the + * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag on. mc_send_command() calls for this + * fsl_mc_io object can be made from atomic or non-atomic context. + */ +struct fsl_mc_io { + struct device *dev; + u16 flags; + u32 portal_size; + phys_addr_t portal_phys_addr; + void __iomem *portal_virt_addr; + struct fsl_mc_device *dpmcp_dev; + union { + /* + * This field is only meaningful if the + * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not set + */ + struct mutex mutex; /* serializes mc_send_command() */ + + /* + * This field is only meaningful if the + * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is set + */ + spinlock_t spinlock; /* serializes mc_send_command() */ + }; +}; + +int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd); + +#ifdef CONFIG_FSL_MC_BUS +#define dev_is_fsl_mc(_dev) ((_dev)->bus == &fsl_mc_bus_type) +#else +/* If fsl-mc bus is not present device cannot belong to fsl-mc bus */ +#define dev_is_fsl_mc(_dev) (0) +#endif + +/* + * module_fsl_mc_driver() - Helper macro for drivers that don't do + * anything special in module init/exit. This eliminates a lot of + * boilerplate. Each module may only use this macro once, and + * calling it replaces module_init() and module_exit() + */ +#define module_fsl_mc_driver(__fsl_mc_driver) \ + module_driver(__fsl_mc_driver, fsl_mc_driver_register, \ + fsl_mc_driver_unregister) + +/* + * Macro to avoid include chaining to get THIS_MODULE + */ +#define fsl_mc_driver_register(drv) \ + __fsl_mc_driver_register(drv, THIS_MODULE) + +int __must_check __fsl_mc_driver_register(struct fsl_mc_driver *fsl_mc_driver, + struct module *owner); + +void fsl_mc_driver_unregister(struct fsl_mc_driver *driver); + +int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev, + u16 mc_io_flags, + struct fsl_mc_io **new_mc_io); + +void fsl_mc_portal_free(struct fsl_mc_io *mc_io); + +int fsl_mc_portal_reset(struct fsl_mc_io *mc_io); + +int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev, + enum fsl_mc_pool_type pool_type, + struct fsl_mc_device **new_mc_adev); + +void fsl_mc_object_free(struct fsl_mc_device *mc_adev); + +struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode, + struct msi_domain_info *info, + struct irq_domain *parent); + +int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev); + +void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev); + +extern struct bus_type fsl_mc_bus_type; + +extern struct device_type fsl_mc_bus_dprc_type; +extern struct device_type fsl_mc_bus_dpni_type; +extern struct device_type fsl_mc_bus_dpio_type; +extern struct device_type fsl_mc_bus_dpsw_type; +extern struct device_type fsl_mc_bus_dpbp_type; +extern struct device_type fsl_mc_bus_dpcon_type; +extern struct device_type fsl_mc_bus_dpmcp_type; +extern struct device_type fsl_mc_bus_dpmac_type; +extern struct device_type fsl_mc_bus_dprtc_type; + +static inline bool is_fsl_mc_bus_dprc(const struct fsl_mc_device *mc_dev) +{ + return mc_dev->dev.type == &fsl_mc_bus_dprc_type; +} + +static inline bool is_fsl_mc_bus_dpni(const struct fsl_mc_device *mc_dev) +{ + return mc_dev->dev.type == &fsl_mc_bus_dpni_type; +} + +static inline bool is_fsl_mc_bus_dpio(const struct fsl_mc_device *mc_dev) +{ + return mc_dev->dev.type == &fsl_mc_bus_dpio_type; +} + +static inline bool is_fsl_mc_bus_dpsw(const struct fsl_mc_device *mc_dev) +{ + return mc_dev->dev.type == &fsl_mc_bus_dpsw_type; +} + +static inline bool is_fsl_mc_bus_dpbp(const struct fsl_mc_device *mc_dev) +{ + return mc_dev->dev.type == &fsl_mc_bus_dpbp_type; +} + +static inline bool is_fsl_mc_bus_dpcon(const struct fsl_mc_device *mc_dev) +{ + return mc_dev->dev.type == &fsl_mc_bus_dpcon_type; +} + +static inline bool is_fsl_mc_bus_dpmcp(const struct fsl_mc_device *mc_dev) +{ + return mc_dev->dev.type == &fsl_mc_bus_dpmcp_type; +} + +static inline bool is_fsl_mc_bus_dpmac(const struct fsl_mc_device *mc_dev) +{ + return mc_dev->dev.type == &fsl_mc_bus_dpmac_type; +} + +static inline bool is_fsl_mc_bus_dprtc(const struct fsl_mc_device *mc_dev) +{ + return mc_dev->dev.type == &fsl_mc_bus_dprtc_type; +} + +/* + * Data Path Buffer Pool (DPBP) API + * Contains initialization APIs and runtime control APIs for DPBP + */ + +int dpbp_open(struct fsl_mc_io *mc_io, + u32 cmd_flags, + int dpbp_id, + u16 *token); + +int dpbp_close(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +int dpbp_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +int dpbp_disable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +int dpbp_reset(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +/** + * struct dpbp_attr - Structure representing DPBP attributes + * @id: DPBP object ID + * @bpid: Hardware buffer pool ID; should be used as an argument in + * acquire/release operations on buffers + */ +struct dpbp_attr { + int id; + u16 bpid; +}; + +int dpbp_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpbp_attr *attr); + +/* Data Path Concentrator (DPCON) API + * Contains initialization APIs and runtime control APIs for DPCON + */ + +/** + * Use it to disable notifications; see dpcon_set_notification() + */ +#define DPCON_INVALID_DPIO_ID (int)(-1) + +int dpcon_open(struct fsl_mc_io *mc_io, + u32 cmd_flags, + int dpcon_id, + u16 *token); + +int dpcon_close(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +int dpcon_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +int dpcon_disable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +int dpcon_reset(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +/** + * struct dpcon_attr - Structure representing DPCON attributes + * @id: DPCON object ID + * @qbman_ch_id: Channel ID to be used by dequeue operation + * @num_priorities: Number of priorities for the DPCON channel (1-8) + */ +struct dpcon_attr { + int id; + u16 qbman_ch_id; + u8 num_priorities; +}; + +int dpcon_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpcon_attr *attr); + +/** + * struct dpcon_notification_cfg - Structure representing notification params + * @dpio_id: DPIO object ID; must be configured with a notification channel; + * to disable notifications set it to 'DPCON_INVALID_DPIO_ID'; + * @priority: Priority selection within the DPIO channel; valid values + * are 0-7, depending on the number of priorities in that channel + * @user_ctx: User context value provided with each CDAN message + */ +struct dpcon_notification_cfg { + int dpio_id; + u8 priority; + u64 user_ctx; +}; + +int dpcon_set_notification(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpcon_notification_cfg *cfg); + +#endif /* _FSL_MC_H_ */ diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h new file mode 100644 index 000000000..c1f003aad --- /dev/null +++ b/include/linux/fsl/ptp_qoriq.h @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2010 OMICRON electronics GmbH + * Copyright 2018 NXP + */ +#ifndef __PTP_QORIQ_H__ +#define __PTP_QORIQ_H__ + +#include +#include + +/* + * qoriq ptp registers + */ +struct ctrl_regs { + u32 tmr_ctrl; /* Timer control register */ + u32 tmr_tevent; /* Timestamp event register */ + u32 tmr_temask; /* Timer event mask register */ + u32 tmr_pevent; /* Timestamp event register */ + u32 tmr_pemask; /* Timer event mask register */ + u32 tmr_stat; /* Timestamp status register */ + u32 tmr_cnt_h; /* Timer counter high register */ + u32 tmr_cnt_l; /* Timer counter low register */ + u32 tmr_add; /* Timer drift compensation addend register */ + u32 tmr_acc; /* Timer accumulator register */ + u32 tmr_prsc; /* Timer prescale */ + u8 res1[4]; + u32 tmroff_h; /* Timer offset high */ + u32 tmroff_l; /* Timer offset low */ +}; + +struct alarm_regs { + u32 tmr_alarm1_h; /* Timer alarm 1 high register */ + u32 tmr_alarm1_l; /* Timer alarm 1 high register */ + u32 tmr_alarm2_h; /* Timer alarm 2 high register */ + u32 tmr_alarm2_l; /* Timer alarm 2 high register */ +}; + +struct fiper_regs { + u32 tmr_fiper1; /* Timer fixed period interval */ + u32 tmr_fiper2; /* Timer fixed period interval */ + u32 tmr_fiper3; /* Timer fixed period interval */ +}; + +struct etts_regs { + u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */ + u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */ + u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */ + u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */ +}; + +struct qoriq_ptp_registers { + struct ctrl_regs __iomem *ctrl_regs; + struct alarm_regs __iomem *alarm_regs; + struct fiper_regs __iomem *fiper_regs; + struct etts_regs __iomem *etts_regs; +}; + +/* Offset definitions for the four register groups */ +#define CTRL_REGS_OFFSET 0x0 +#define ALARM_REGS_OFFSET 0x40 +#define FIPER_REGS_OFFSET 0x80 +#define ETTS_REGS_OFFSET 0xa0 + +#define FMAN_CTRL_REGS_OFFSET 0x80 +#define FMAN_ALARM_REGS_OFFSET 0xb8 +#define FMAN_FIPER_REGS_OFFSET 0xd0 +#define FMAN_ETTS_REGS_OFFSET 0xe0 + + +/* Bit definitions for the TMR_CTRL register */ +#define ALM1P (1<<31) /* Alarm1 output polarity */ +#define ALM2P (1<<30) /* Alarm2 output polarity */ +#define FIPERST (1<<28) /* FIPER start indication */ +#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */ +#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */ +#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */ +#define TCLK_PERIOD_MASK (0x3ff) +#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */ +#define FRD (1<<14) /* FIPER Realignment Disable */ +#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */ +#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */ +#define ETEP2 (1<<9) /* External trigger 2 edge polarity */ +#define ETEP1 (1<<8) /* External trigger 1 edge polarity */ +#define COPH (1<<7) /* Generated clock output phase. */ +#define CIPH (1<<6) /* External oscillator input clock phase */ +#define TMSR (1<<5) /* Timer soft reset. */ +#define BYP (1<<3) /* Bypass drift compensated clock */ +#define TE (1<<2) /* 1588 timer enable. */ +#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */ +#define CKSEL_MASK (0x3) + +/* Bit definitions for the TMR_TEVENT register */ +#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */ +#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */ +#define ALM2 (1<<17) /* Current time = alarm time register 2 */ +#define ALM1 (1<<16) /* Current time = alarm time register 1 */ +#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */ +#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */ +#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */ + +/* Bit definitions for the TMR_TEMASK register */ +#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */ +#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */ +#define ALM2EN (1<<17) /* Timer ALM2 event enable */ +#define ALM1EN (1<<16) /* Timer ALM1 event enable */ +#define PP1EN (1<<7) /* Periodic pulse event 1 enable */ +#define PP2EN (1<<6) /* Periodic pulse event 2 enable */ + +/* Bit definitions for the TMR_PEVENT register */ +#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */ +#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */ +#define RXP (1<<0) /* PTP frame has been received */ + +/* Bit definitions for the TMR_PEMASK register */ +#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */ +#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */ +#define RXPEN (1<<0) /* Receive PTP packet event enable */ + +/* Bit definitions for the TMR_STAT register */ +#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */ +#define STAT_VEC_MASK (0x3f) + +/* Bit definitions for the TMR_PRSC register */ +#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */ +#define PRSC_OCK_MASK (0xffff) + + +#define DRIVER "ptp_qoriq" +#define N_EXT_TS 2 + +#define DEFAULT_CKSEL 1 +#define DEFAULT_TMR_PRSC 2 +#define DEFAULT_FIPER1_PERIOD 1000000000 +#define DEFAULT_FIPER2_PERIOD 100000 + +struct qoriq_ptp { + void __iomem *base; + struct qoriq_ptp_registers regs; + spinlock_t lock; /* protects regs */ + struct ptp_clock *clock; + struct ptp_clock_info caps; + struct resource *rsrc; + int irq; + int phc_index; + u64 alarm_interval; /* for periodic alarm */ + u64 alarm_value; + u32 tclk_period; /* nanoseconds */ + u32 tmr_prsc; + u32 tmr_add; + u32 cksel; + u32 tmr_fiper1; + u32 tmr_fiper2; +}; + +static inline u32 qoriq_read(unsigned __iomem *addr) +{ + u32 val; + + val = ioread32be(addr); + return val; +} + +static inline void qoriq_write(unsigned __iomem *addr, u32 val) +{ + iowrite32be(val, addr); +} + +#endif diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h new file mode 100644 index 000000000..60cef8227 --- /dev/null +++ b/include/linux/fsl_devices.h @@ -0,0 +1,157 @@ +/* + * include/linux/fsl_devices.h + * + * Definitions for any platform device related flags or structures for + * Freescale processor devices + * + * Maintainer: Kumar Gala + * + * Copyright 2004,2012 Freescale Semiconductor, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef _FSL_DEVICE_H_ +#define _FSL_DEVICE_H_ + +#define FSL_UTMI_PHY_DLY 10 /*As per P1010RM, delay for UTMI + PHY CLK to become stable - 10ms*/ +#define FSL_USB_PHY_CLK_TIMEOUT 10000 /* uSec */ + +#include + +/* + * Some conventions on how we handle peripherals on Freescale chips + * + * unique device: a platform_device entry in fsl_plat_devs[] plus + * associated device information in its platform_data structure. + * + * A chip is described by a set of unique devices. + * + * Each sub-arch has its own master list of unique devices and + * enumerates them by enum fsl_devices in a sub-arch specific header + * + * The platform data structure is broken into two parts. The + * first is device specific information that help identify any + * unique features of a peripheral. The second is any + * information that may be defined by the board or how the device + * is connected externally of the chip. + * + * naming conventions: + * - platform data structures: _platform_data + * - platform data device flags: FSL__DEV_ + * - platform data board flags: FSL__BRD_ + * + */ + +enum fsl_usb2_controller_ver { + FSL_USB_VER_NONE = -1, + FSL_USB_VER_OLD = 0, + FSL_USB_VER_1_6 = 1, + FSL_USB_VER_2_2 = 2, + FSL_USB_VER_2_4 = 3, + FSL_USB_VER_2_5 = 4, +}; + +enum fsl_usb2_operating_modes { + FSL_USB2_MPH_HOST, + FSL_USB2_DR_HOST, + FSL_USB2_DR_DEVICE, + FSL_USB2_DR_OTG, +}; + +enum fsl_usb2_phy_modes { + FSL_USB2_PHY_NONE, + FSL_USB2_PHY_ULPI, + FSL_USB2_PHY_UTMI, + FSL_USB2_PHY_UTMI_WIDE, + FSL_USB2_PHY_SERIAL, + FSL_USB2_PHY_UTMI_DUAL, +}; + +struct clk; +struct platform_device; + +struct fsl_usb2_platform_data { + /* board specific information */ + enum fsl_usb2_controller_ver controller_ver; + enum fsl_usb2_operating_modes operating_mode; + enum fsl_usb2_phy_modes phy_mode; + unsigned int port_enables; + unsigned int workaround; + + int (*init)(struct platform_device *); + void (*exit)(struct platform_device *); + void __iomem *regs; /* ioremap'd register base */ + struct clk *clk; + unsigned power_budget; /* hcd->power_budget */ + unsigned big_endian_mmio:1; + unsigned big_endian_desc:1; + unsigned es:1; /* need USBMODE:ES */ + unsigned le_setup_buf:1; + unsigned have_sysif_regs:1; + unsigned invert_drvvbus:1; + unsigned invert_pwr_fault:1; + + unsigned suspended:1; + unsigned already_suspended:1; + unsigned has_fsl_erratum_a007792:1; + unsigned has_fsl_erratum_a005275:1; + unsigned has_fsl_erratum_a005697:1; + unsigned check_phy_clk_valid:1; + + /* register save area for suspend/resume */ + u32 pm_command; + u32 pm_status; + u32 pm_intr_enable; + u32 pm_frame_index; + u32 pm_segment; + u32 pm_frame_list; + u32 pm_async_next; + u32 pm_configured_flag; + u32 pm_portsc; + u32 pm_usbgenctrl; +}; + +/* Flags in fsl_usb2_mph_platform_data */ +#define FSL_USB2_PORT0_ENABLED 0x00000001 +#define FSL_USB2_PORT1_ENABLED 0x00000002 + +#define FLS_USB2_WORKAROUND_ENGCM09152 (1 << 0) + +struct spi_device; + +struct fsl_spi_platform_data { + u32 initial_spmode; /* initial SPMODE value */ + s16 bus_num; + unsigned int flags; +#define SPI_QE_CPU_MODE (1 << 0) /* QE CPU ("PIO") mode */ +#define SPI_CPM_MODE (1 << 1) /* CPM/QE ("DMA") mode */ +#define SPI_CPM1 (1 << 2) /* SPI unit is in CPM1 block */ +#define SPI_CPM2 (1 << 3) /* SPI unit is in CPM2 block */ +#define SPI_QE (1 << 4) /* SPI unit is in QE block */ + /* board specific information */ + u16 max_chipselect; + void (*cs_control)(struct spi_device *spi, bool on); + u32 sysclk; +}; + +struct mpc8xx_pcmcia_ops { + void(*hw_ctrl)(int slot, int enable); + int(*voltage_set)(int slot, int vcc, int vpp); +}; + +/* Returns non-zero if the current suspend operation would + * lead to a deep sleep (i.e. power removed from the core, + * instead of just the clock). + */ +#if defined(CONFIG_PPC_83xx) && defined(CONFIG_SUSPEND) +int fsl_deep_sleep(void); +#else +static inline int fsl_deep_sleep(void) { return 0; } +#endif + +#endif /* _FSL_DEVICE_H_ */ diff --git a/include/linux/fsl_hypervisor.h b/include/linux/fsl_hypervisor.h new file mode 100644 index 000000000..2a707d7fb --- /dev/null +++ b/include/linux/fsl_hypervisor.h @@ -0,0 +1,63 @@ +/* + * Freescale hypervisor ioctl and kernel interface + * + * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. + * Author: Timur Tabi + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * This software is provided by Freescale Semiconductor "as is" and any + * express or implied warranties, including, but not limited to, the implied + * warranties of merchantability and fitness for a particular purpose are + * disclaimed. In no event shall Freescale Semiconductor be liable for any + * direct, indirect, incidental, special, exemplary, or consequential damages + * (including, but not limited to, procurement of substitute goods or services; + * loss of use, data, or profits; or business interruption) however caused and + * on any theory of liability, whether in contract, strict liability, or tort + * (including negligence or otherwise) arising in any way out of the use of this + * software, even if advised of the possibility of such damage. + * + * This file is used by the Freescale hypervisor management driver. It can + * also be included by applications that need to communicate with the driver + * via the ioctl interface. + */ +#ifndef FSL_HYPERVISOR_H +#define FSL_HYPERVISOR_H + +#include + + +/** + * fsl_hv_event_register() - register a callback for failover events + * @nb: pointer to caller-supplied notifier_block structure + * + * This function is called by device drivers to register their callback + * functions for fail-over events. + * + * The caller should allocate a notifier_block object and initialize the + * 'priority' and 'notifier_call' fields. + */ +int fsl_hv_failover_register(struct notifier_block *nb); + +/** + * fsl_hv_event_unregister() - unregister a callback for failover events + * @nb: the same 'nb' used in previous fsl_hv_failover_register call + */ +int fsl_hv_failover_unregister(struct notifier_block *nb); + +#endif diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h new file mode 100644 index 000000000..5f343b796 --- /dev/null +++ b/include/linux/fsl_ifc.h @@ -0,0 +1,916 @@ +/* Freescale Integrated Flash Controller + * + * Copyright 2011 Freescale Semiconductor, Inc + * + * Author: Dipen Dudhat + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __ASM_FSL_IFC_H +#define __ASM_FSL_IFC_H + +#include +#include +#include + +#include +#include + +/* + * The actual number of banks implemented depends on the IFC version + * - IFC version 1.0 implements 4 banks. + * - IFC version 1.1 onward implements 8 banks. + */ +#define FSL_IFC_BANK_COUNT 8 + +#define FSL_IFC_VERSION_MASK 0x0F0F0000 +#define FSL_IFC_VERSION_1_0_0 0x01000000 +#define FSL_IFC_VERSION_1_1_0 0x01010000 +#define FSL_IFC_VERSION_2_0_0 0x02000000 + +#define PGOFFSET_64K (64*1024) +#define PGOFFSET_4K (4*1024) + +/* + * CSPR - Chip Select Property Register + */ +#define CSPR_BA 0xFFFF0000 +#define CSPR_BA_SHIFT 16 +#define CSPR_PORT_SIZE 0x00000180 +#define CSPR_PORT_SIZE_SHIFT 7 +/* Port Size 8 bit */ +#define CSPR_PORT_SIZE_8 0x00000080 +/* Port Size 16 bit */ +#define CSPR_PORT_SIZE_16 0x00000100 +/* Port Size 32 bit */ +#define CSPR_PORT_SIZE_32 0x00000180 +/* Write Protect */ +#define CSPR_WP 0x00000040 +#define CSPR_WP_SHIFT 6 +/* Machine Select */ +#define CSPR_MSEL 0x00000006 +#define CSPR_MSEL_SHIFT 1 +/* NOR */ +#define CSPR_MSEL_NOR 0x00000000 +/* NAND */ +#define CSPR_MSEL_NAND 0x00000002 +/* GPCM */ +#define CSPR_MSEL_GPCM 0x00000004 +/* Bank Valid */ +#define CSPR_V 0x00000001 +#define CSPR_V_SHIFT 0 + +/* + * Address Mask Register + */ +#define IFC_AMASK_MASK 0xFFFF0000 +#define IFC_AMASK_SHIFT 16 +#define IFC_AMASK(n) (IFC_AMASK_MASK << \ + (__ilog2(n) - IFC_AMASK_SHIFT)) + +/* + * Chip Select Option Register IFC_NAND Machine + */ +/* Enable ECC Encoder */ +#define CSOR_NAND_ECC_ENC_EN 0x80000000 +#define CSOR_NAND_ECC_MODE_MASK 0x30000000 +/* 4 bit correction per 520 Byte sector */ +#define CSOR_NAND_ECC_MODE_4 0x00000000 +/* 8 bit correction per 528 Byte sector */ +#define CSOR_NAND_ECC_MODE_8 0x10000000 +/* Enable ECC Decoder */ +#define CSOR_NAND_ECC_DEC_EN 0x04000000 +/* Row Address Length */ +#define CSOR_NAND_RAL_MASK 0x01800000 +#define CSOR_NAND_RAL_SHIFT 20 +#define CSOR_NAND_RAL_1 0x00000000 +#define CSOR_NAND_RAL_2 0x00800000 +#define CSOR_NAND_RAL_3 0x01000000 +#define CSOR_NAND_RAL_4 0x01800000 +/* Page Size 512b, 2k, 4k */ +#define CSOR_NAND_PGS_MASK 0x00180000 +#define CSOR_NAND_PGS_SHIFT 16 +#define CSOR_NAND_PGS_512 0x00000000 +#define CSOR_NAND_PGS_2K 0x00080000 +#define CSOR_NAND_PGS_4K 0x00100000 +#define CSOR_NAND_PGS_8K 0x00180000 +/* Spare region Size */ +#define CSOR_NAND_SPRZ_MASK 0x0000E000 +#define CSOR_NAND_SPRZ_SHIFT 13 +#define CSOR_NAND_SPRZ_16 0x00000000 +#define CSOR_NAND_SPRZ_64 0x00002000 +#define CSOR_NAND_SPRZ_128 0x00004000 +#define CSOR_NAND_SPRZ_210 0x00006000 +#define CSOR_NAND_SPRZ_218 0x00008000 +#define CSOR_NAND_SPRZ_224 0x0000A000 +#define CSOR_NAND_SPRZ_CSOR_EXT 0x0000C000 +/* Pages Per Block */ +#define CSOR_NAND_PB_MASK 0x00000700 +#define CSOR_NAND_PB_SHIFT 8 +#define CSOR_NAND_PB(n) ((__ilog2(n) - 5) << CSOR_NAND_PB_SHIFT) +/* Time for Read Enable High to Output High Impedance */ +#define CSOR_NAND_TRHZ_MASK 0x0000001C +#define CSOR_NAND_TRHZ_SHIFT 2 +#define CSOR_NAND_TRHZ_20 0x00000000 +#define CSOR_NAND_TRHZ_40 0x00000004 +#define CSOR_NAND_TRHZ_60 0x00000008 +#define CSOR_NAND_TRHZ_80 0x0000000C +#define CSOR_NAND_TRHZ_100 0x00000010 +/* Buffer control disable */ +#define CSOR_NAND_BCTLD 0x00000001 + +/* + * Chip Select Option Register - NOR Flash Mode + */ +/* Enable Address shift Mode */ +#define CSOR_NOR_ADM_SHFT_MODE_EN 0x80000000 +/* Page Read Enable from NOR device */ +#define CSOR_NOR_PGRD_EN 0x10000000 +/* AVD Toggle Enable during Burst Program */ +#define CSOR_NOR_AVD_TGL_PGM_EN 0x01000000 +/* Address Data Multiplexing Shift */ +#define CSOR_NOR_ADM_MASK 0x0003E000 +#define CSOR_NOR_ADM_SHIFT_SHIFT 13 +#define CSOR_NOR_ADM_SHIFT(n) ((n) << CSOR_NOR_ADM_SHIFT_SHIFT) +/* Type of the NOR device hooked */ +#define CSOR_NOR_NOR_MODE_AYSNC_NOR 0x00000000 +#define CSOR_NOR_NOR_MODE_AVD_NOR 0x00000020 +/* Time for Read Enable High to Output High Impedance */ +#define CSOR_NOR_TRHZ_MASK 0x0000001C +#define CSOR_NOR_TRHZ_SHIFT 2 +#define CSOR_NOR_TRHZ_20 0x00000000 +#define CSOR_NOR_TRHZ_40 0x00000004 +#define CSOR_NOR_TRHZ_60 0x00000008 +#define CSOR_NOR_TRHZ_80 0x0000000C +#define CSOR_NOR_TRHZ_100 0x00000010 +/* Buffer control disable */ +#define CSOR_NOR_BCTLD 0x00000001 + +/* + * Chip Select Option Register - GPCM Mode + */ +/* GPCM Mode - Normal */ +#define CSOR_GPCM_GPMODE_NORMAL 0x00000000 +/* GPCM Mode - GenericASIC */ +#define CSOR_GPCM_GPMODE_ASIC 0x80000000 +/* Parity Mode odd/even */ +#define CSOR_GPCM_PARITY_EVEN 0x40000000 +/* Parity Checking enable/disable */ +#define CSOR_GPCM_PAR_EN 0x20000000 +/* GPCM Timeout Count */ +#define CSOR_GPCM_GPTO_MASK 0x0F000000 +#define CSOR_GPCM_GPTO_SHIFT 24 +#define CSOR_GPCM_GPTO(n) ((__ilog2(n) - 8) << CSOR_GPCM_GPTO_SHIFT) +/* GPCM External Access Termination mode for read access */ +#define CSOR_GPCM_RGETA_EXT 0x00080000 +/* GPCM External Access Termination mode for write access */ +#define CSOR_GPCM_WGETA_EXT 0x00040000 +/* Address Data Multiplexing Shift */ +#define CSOR_GPCM_ADM_MASK 0x0003E000 +#define CSOR_GPCM_ADM_SHIFT_SHIFT 13 +#define CSOR_GPCM_ADM_SHIFT(n) ((n) << CSOR_GPCM_ADM_SHIFT_SHIFT) +/* Generic ASIC Parity error indication delay */ +#define CSOR_GPCM_GAPERRD_MASK 0x00000180 +#define CSOR_GPCM_GAPERRD_SHIFT 7 +#define CSOR_GPCM_GAPERRD(n) (((n) - 1) << CSOR_GPCM_GAPERRD_SHIFT) +/* Time for Read Enable High to Output High Impedance */ +#define CSOR_GPCM_TRHZ_MASK 0x0000001C +#define CSOR_GPCM_TRHZ_20 0x00000000 +#define CSOR_GPCM_TRHZ_40 0x00000004 +#define CSOR_GPCM_TRHZ_60 0x00000008 +#define CSOR_GPCM_TRHZ_80 0x0000000C +#define CSOR_GPCM_TRHZ_100 0x00000010 +/* Buffer control disable */ +#define CSOR_GPCM_BCTLD 0x00000001 + +/* + * Ready Busy Status Register (RB_STAT) + */ +/* CSn is READY */ +#define IFC_RB_STAT_READY_CS0 0x80000000 +#define IFC_RB_STAT_READY_CS1 0x40000000 +#define IFC_RB_STAT_READY_CS2 0x20000000 +#define IFC_RB_STAT_READY_CS3 0x10000000 + +/* + * General Control Register (GCR) + */ +#define IFC_GCR_MASK 0x8000F800 +/* reset all IFC hardware */ +#define IFC_GCR_SOFT_RST_ALL 0x80000000 +/* Turnaroud Time of external buffer */ +#define IFC_GCR_TBCTL_TRN_TIME 0x0000F800 +#define IFC_GCR_TBCTL_TRN_TIME_SHIFT 11 + +/* + * Common Event and Error Status Register (CM_EVTER_STAT) + */ +/* Chip select error */ +#define IFC_CM_EVTER_STAT_CSER 0x80000000 + +/* + * Common Event and Error Enable Register (CM_EVTER_EN) + */ +/* Chip select error checking enable */ +#define IFC_CM_EVTER_EN_CSEREN 0x80000000 + +/* + * Common Event and Error Interrupt Enable Register (CM_EVTER_INTR_EN) + */ +/* Chip select error interrupt enable */ +#define IFC_CM_EVTER_INTR_EN_CSERIREN 0x80000000 + +/* + * Common Transfer Error Attribute Register-0 (CM_ERATTR0) + */ +/* transaction type of error Read/Write */ +#define IFC_CM_ERATTR0_ERTYP_READ 0x80000000 +#define IFC_CM_ERATTR0_ERAID 0x0FF00000 +#define IFC_CM_ERATTR0_ERAID_SHIFT 20 +#define IFC_CM_ERATTR0_ESRCID 0x0000FF00 +#define IFC_CM_ERATTR0_ESRCID_SHIFT 8 + +/* + * Clock Control Register (CCR) + */ +#define IFC_CCR_MASK 0x0F0F8800 +/* Clock division ratio */ +#define IFC_CCR_CLK_DIV_MASK 0x0F000000 +#define IFC_CCR_CLK_DIV_SHIFT 24 +#define IFC_CCR_CLK_DIV(n) ((n-1) << IFC_CCR_CLK_DIV_SHIFT) +/* IFC Clock Delay */ +#define IFC_CCR_CLK_DLY_MASK 0x000F0000 +#define IFC_CCR_CLK_DLY_SHIFT 16 +#define IFC_CCR_CLK_DLY(n) ((n) << IFC_CCR_CLK_DLY_SHIFT) +/* Invert IFC clock before sending out */ +#define IFC_CCR_INV_CLK_EN 0x00008000 +/* Fedback IFC Clock */ +#define IFC_CCR_FB_IFC_CLK_SEL 0x00000800 + +/* + * Clock Status Register (CSR) + */ +/* Clk is stable */ +#define IFC_CSR_CLK_STAT_STABLE 0x80000000 + +/* + * IFC_NAND Machine Specific Registers + */ +/* + * NAND Configuration Register (NCFGR) + */ +/* Auto Boot Mode */ +#define IFC_NAND_NCFGR_BOOT 0x80000000 +/* SRAM Initialization */ +#define IFC_NAND_NCFGR_SRAM_INIT_EN 0x20000000 +/* Addressing Mode-ROW0+n/COL0 */ +#define IFC_NAND_NCFGR_ADDR_MODE_RC0 0x00000000 +/* Addressing Mode-ROW0+n/COL0+n */ +#define IFC_NAND_NCFGR_ADDR_MODE_RC1 0x00400000 +/* Number of loop iterations of FIR sequences for multi page operations */ +#define IFC_NAND_NCFGR_NUM_LOOP_MASK 0x0000F000 +#define IFC_NAND_NCFGR_NUM_LOOP_SHIFT 12 +#define IFC_NAND_NCFGR_NUM_LOOP(n) ((n) << IFC_NAND_NCFGR_NUM_LOOP_SHIFT) +/* Number of wait cycles */ +#define IFC_NAND_NCFGR_NUM_WAIT_MASK 0x000000FF +#define IFC_NAND_NCFGR_NUM_WAIT_SHIFT 0 + +/* + * NAND Flash Command Registers (NAND_FCR0/NAND_FCR1) + */ +/* General purpose FCM flash command bytes CMD0-CMD7 */ +#define IFC_NAND_FCR0_CMD0 0xFF000000 +#define IFC_NAND_FCR0_CMD0_SHIFT 24 +#define IFC_NAND_FCR0_CMD1 0x00FF0000 +#define IFC_NAND_FCR0_CMD1_SHIFT 16 +#define IFC_NAND_FCR0_CMD2 0x0000FF00 +#define IFC_NAND_FCR0_CMD2_SHIFT 8 +#define IFC_NAND_FCR0_CMD3 0x000000FF +#define IFC_NAND_FCR0_CMD3_SHIFT 0 +#define IFC_NAND_FCR1_CMD4 0xFF000000 +#define IFC_NAND_FCR1_CMD4_SHIFT 24 +#define IFC_NAND_FCR1_CMD5 0x00FF0000 +#define IFC_NAND_FCR1_CMD5_SHIFT 16 +#define IFC_NAND_FCR1_CMD6 0x0000FF00 +#define IFC_NAND_FCR1_CMD6_SHIFT 8 +#define IFC_NAND_FCR1_CMD7 0x000000FF +#define IFC_NAND_FCR1_CMD7_SHIFT 0 + +/* + * Flash ROW and COL Address Register (ROWn, COLn) + */ +/* Main/spare region locator */ +#define IFC_NAND_COL_MS 0x80000000 +/* Column Address */ +#define IFC_NAND_COL_CA_MASK 0x00000FFF + +/* + * NAND Flash Byte Count Register (NAND_BC) + */ +/* Byte Count for read/Write */ +#define IFC_NAND_BC 0x000001FF + +/* + * NAND Flash Instruction Registers (NAND_FIR0/NAND_FIR1/NAND_FIR2) + */ +/* NAND Machine specific opcodes OP0-OP14*/ +#define IFC_NAND_FIR0_OP0 0xFC000000 +#define IFC_NAND_FIR0_OP0_SHIFT 26 +#define IFC_NAND_FIR0_OP1 0x03F00000 +#define IFC_NAND_FIR0_OP1_SHIFT 20 +#define IFC_NAND_FIR0_OP2 0x000FC000 +#define IFC_NAND_FIR0_OP2_SHIFT 14 +#define IFC_NAND_FIR0_OP3 0x00003F00 +#define IFC_NAND_FIR0_OP3_SHIFT 8 +#define IFC_NAND_FIR0_OP4 0x000000FC +#define IFC_NAND_FIR0_OP4_SHIFT 2 +#define IFC_NAND_FIR1_OP5 0xFC000000 +#define IFC_NAND_FIR1_OP5_SHIFT 26 +#define IFC_NAND_FIR1_OP6 0x03F00000 +#define IFC_NAND_FIR1_OP6_SHIFT 20 +#define IFC_NAND_FIR1_OP7 0x000FC000 +#define IFC_NAND_FIR1_OP7_SHIFT 14 +#define IFC_NAND_FIR1_OP8 0x00003F00 +#define IFC_NAND_FIR1_OP8_SHIFT 8 +#define IFC_NAND_FIR1_OP9 0x000000FC +#define IFC_NAND_FIR1_OP9_SHIFT 2 +#define IFC_NAND_FIR2_OP10 0xFC000000 +#define IFC_NAND_FIR2_OP10_SHIFT 26 +#define IFC_NAND_FIR2_OP11 0x03F00000 +#define IFC_NAND_FIR2_OP11_SHIFT 20 +#define IFC_NAND_FIR2_OP12 0x000FC000 +#define IFC_NAND_FIR2_OP12_SHIFT 14 +#define IFC_NAND_FIR2_OP13 0x00003F00 +#define IFC_NAND_FIR2_OP13_SHIFT 8 +#define IFC_NAND_FIR2_OP14 0x000000FC +#define IFC_NAND_FIR2_OP14_SHIFT 2 + +/* + * Instruction opcodes to be programmed + * in FIR registers- 6bits + */ +enum ifc_nand_fir_opcodes { + IFC_FIR_OP_NOP, + IFC_FIR_OP_CA0, + IFC_FIR_OP_CA1, + IFC_FIR_OP_CA2, + IFC_FIR_OP_CA3, + IFC_FIR_OP_RA0, + IFC_FIR_OP_RA1, + IFC_FIR_OP_RA2, + IFC_FIR_OP_RA3, + IFC_FIR_OP_CMD0, + IFC_FIR_OP_CMD1, + IFC_FIR_OP_CMD2, + IFC_FIR_OP_CMD3, + IFC_FIR_OP_CMD4, + IFC_FIR_OP_CMD5, + IFC_FIR_OP_CMD6, + IFC_FIR_OP_CMD7, + IFC_FIR_OP_CW0, + IFC_FIR_OP_CW1, + IFC_FIR_OP_CW2, + IFC_FIR_OP_CW3, + IFC_FIR_OP_CW4, + IFC_FIR_OP_CW5, + IFC_FIR_OP_CW6, + IFC_FIR_OP_CW7, + IFC_FIR_OP_WBCD, + IFC_FIR_OP_RBCD, + IFC_FIR_OP_BTRD, + IFC_FIR_OP_RDSTAT, + IFC_FIR_OP_NWAIT, + IFC_FIR_OP_WFR, + IFC_FIR_OP_SBRD, + IFC_FIR_OP_UA, + IFC_FIR_OP_RB, +}; + +/* + * NAND Chip Select Register (NAND_CSEL) + */ +#define IFC_NAND_CSEL 0x0C000000 +#define IFC_NAND_CSEL_SHIFT 26 +#define IFC_NAND_CSEL_CS0 0x00000000 +#define IFC_NAND_CSEL_CS1 0x04000000 +#define IFC_NAND_CSEL_CS2 0x08000000 +#define IFC_NAND_CSEL_CS3 0x0C000000 + +/* + * NAND Operation Sequence Start (NANDSEQ_STRT) + */ +/* NAND Flash Operation Start */ +#define IFC_NAND_SEQ_STRT_FIR_STRT 0x80000000 +/* Automatic Erase */ +#define IFC_NAND_SEQ_STRT_AUTO_ERS 0x00800000 +/* Automatic Program */ +#define IFC_NAND_SEQ_STRT_AUTO_PGM 0x00100000 +/* Automatic Copyback */ +#define IFC_NAND_SEQ_STRT_AUTO_CPB 0x00020000 +/* Automatic Read Operation */ +#define IFC_NAND_SEQ_STRT_AUTO_RD 0x00004000 +/* Automatic Status Read */ +#define IFC_NAND_SEQ_STRT_AUTO_STAT_RD 0x00000800 + +/* + * NAND Event and Error Status Register (NAND_EVTER_STAT) + */ +/* Operation Complete */ +#define IFC_NAND_EVTER_STAT_OPC 0x80000000 +/* Flash Timeout Error */ +#define IFC_NAND_EVTER_STAT_FTOER 0x08000000 +/* Write Protect Error */ +#define IFC_NAND_EVTER_STAT_WPER 0x04000000 +/* ECC Error */ +#define IFC_NAND_EVTER_STAT_ECCER 0x02000000 +/* RCW Load Done */ +#define IFC_NAND_EVTER_STAT_RCW_DN 0x00008000 +/* Boot Loadr Done */ +#define IFC_NAND_EVTER_STAT_BOOT_DN 0x00004000 +/* Bad Block Indicator search select */ +#define IFC_NAND_EVTER_STAT_BBI_SRCH_SE 0x00000800 + +/* + * NAND Flash Page Read Completion Event Status Register + * (PGRDCMPL_EVT_STAT) + */ +#define PGRDCMPL_EVT_STAT_MASK 0xFFFF0000 +/* Small Page 0-15 Done */ +#define PGRDCMPL_EVT_STAT_SECTION_SP(n) (1 << (31 - (n))) +/* Large Page(2K) 0-3 Done */ +#define PGRDCMPL_EVT_STAT_LP_2K(n) (0xF << (28 - (n)*4)) +/* Large Page(4K) 0-1 Done */ +#define PGRDCMPL_EVT_STAT_LP_4K(n) (0xFF << (24 - (n)*8)) + +/* + * NAND Event and Error Enable Register (NAND_EVTER_EN) + */ +/* Operation complete event enable */ +#define IFC_NAND_EVTER_EN_OPC_EN 0x80000000 +/* Page read complete event enable */ +#define IFC_NAND_EVTER_EN_PGRDCMPL_EN 0x20000000 +/* Flash Timeout error enable */ +#define IFC_NAND_EVTER_EN_FTOER_EN 0x08000000 +/* Write Protect error enable */ +#define IFC_NAND_EVTER_EN_WPER_EN 0x04000000 +/* ECC error logging enable */ +#define IFC_NAND_EVTER_EN_ECCER_EN 0x02000000 + +/* + * NAND Event and Error Interrupt Enable Register (NAND_EVTER_INTR_EN) + */ +/* Enable interrupt for operation complete */ +#define IFC_NAND_EVTER_INTR_OPCIR_EN 0x80000000 +/* Enable interrupt for Page read complete */ +#define IFC_NAND_EVTER_INTR_PGRDCMPLIR_EN 0x20000000 +/* Enable interrupt for Flash timeout error */ +#define IFC_NAND_EVTER_INTR_FTOERIR_EN 0x08000000 +/* Enable interrupt for Write protect error */ +#define IFC_NAND_EVTER_INTR_WPERIR_EN 0x04000000 +/* Enable interrupt for ECC error*/ +#define IFC_NAND_EVTER_INTR_ECCERIR_EN 0x02000000 + +/* + * NAND Transfer Error Attribute Register-0 (NAND_ERATTR0) + */ +#define IFC_NAND_ERATTR0_MASK 0x0C080000 +/* Error on CS0-3 for NAND */ +#define IFC_NAND_ERATTR0_ERCS_CS0 0x00000000 +#define IFC_NAND_ERATTR0_ERCS_CS1 0x04000000 +#define IFC_NAND_ERATTR0_ERCS_CS2 0x08000000 +#define IFC_NAND_ERATTR0_ERCS_CS3 0x0C000000 +/* Transaction type of error Read/Write */ +#define IFC_NAND_ERATTR0_ERTTYPE_READ 0x00080000 + +/* + * NAND Flash Status Register (NAND_FSR) + */ +/* First byte of data read from read status op */ +#define IFC_NAND_NFSR_RS0 0xFF000000 +/* Second byte of data read from read status op */ +#define IFC_NAND_NFSR_RS1 0x00FF0000 + +/* + * ECC Error Status Registers (ECCSTAT0-ECCSTAT3) + */ +/* Number of ECC errors on sector n (n = 0-15) */ +#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR0_MASK 0x0F000000 +#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR0_SHIFT 24 +#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR1_MASK 0x000F0000 +#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR1_SHIFT 16 +#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR2_MASK 0x00000F00 +#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR2_SHIFT 8 +#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR3_MASK 0x0000000F +#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR3_SHIFT 0 +#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR4_MASK 0x0F000000 +#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR4_SHIFT 24 +#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR5_MASK 0x000F0000 +#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR5_SHIFT 16 +#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR6_MASK 0x00000F00 +#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR6_SHIFT 8 +#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR7_MASK 0x0000000F +#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR7_SHIFT 0 +#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR8_MASK 0x0F000000 +#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR8_SHIFT 24 +#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR9_MASK 0x000F0000 +#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR9_SHIFT 16 +#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR10_MASK 0x00000F00 +#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR10_SHIFT 8 +#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR11_MASK 0x0000000F +#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR11_SHIFT 0 +#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR12_MASK 0x0F000000 +#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR12_SHIFT 24 +#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR13_MASK 0x000F0000 +#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR13_SHIFT 16 +#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR14_MASK 0x00000F00 +#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR14_SHIFT 8 +#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR15_MASK 0x0000000F +#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR15_SHIFT 0 + +/* + * NAND Control Register (NANDCR) + */ +#define IFC_NAND_NCR_FTOCNT_MASK 0x1E000000 +#define IFC_NAND_NCR_FTOCNT_SHIFT 25 +#define IFC_NAND_NCR_FTOCNT(n) ((_ilog2(n) - 8) << IFC_NAND_NCR_FTOCNT_SHIFT) + +/* + * NAND_AUTOBOOT_TRGR + */ +/* Trigger RCW load */ +#define IFC_NAND_AUTOBOOT_TRGR_RCW_LD 0x80000000 +/* Trigget Auto Boot */ +#define IFC_NAND_AUTOBOOT_TRGR_BOOT_LD 0x20000000 + +/* + * NAND_MDR + */ +/* 1st read data byte when opcode SBRD */ +#define IFC_NAND_MDR_RDATA0 0xFF000000 +/* 2nd read data byte when opcode SBRD */ +#define IFC_NAND_MDR_RDATA1 0x00FF0000 + +/* + * NOR Machine Specific Registers + */ +/* + * NOR Event and Error Status Register (NOR_EVTER_STAT) + */ +/* NOR Command Sequence Operation Complete */ +#define IFC_NOR_EVTER_STAT_OPC_NOR 0x80000000 +/* Write Protect Error */ +#define IFC_NOR_EVTER_STAT_WPER 0x04000000 +/* Command Sequence Timeout Error */ +#define IFC_NOR_EVTER_STAT_STOER 0x01000000 + +/* + * NOR Event and Error Enable Register (NOR_EVTER_EN) + */ +/* NOR Command Seq complete event enable */ +#define IFC_NOR_EVTER_EN_OPCEN_NOR 0x80000000 +/* Write Protect Error Checking Enable */ +#define IFC_NOR_EVTER_EN_WPEREN 0x04000000 +/* Timeout Error Enable */ +#define IFC_NOR_EVTER_EN_STOEREN 0x01000000 + +/* + * NOR Event and Error Interrupt Enable Register (NOR_EVTER_INTR_EN) + */ +/* Enable interrupt for OPC complete */ +#define IFC_NOR_EVTER_INTR_OPCEN_NOR 0x80000000 +/* Enable interrupt for write protect error */ +#define IFC_NOR_EVTER_INTR_WPEREN 0x04000000 +/* Enable interrupt for timeout error */ +#define IFC_NOR_EVTER_INTR_STOEREN 0x01000000 + +/* + * NOR Transfer Error Attribute Register-0 (NOR_ERATTR0) + */ +/* Source ID for error transaction */ +#define IFC_NOR_ERATTR0_ERSRCID 0xFF000000 +/* AXI ID for error transation */ +#define IFC_NOR_ERATTR0_ERAID 0x000FF000 +/* Chip select corresponds to NOR error */ +#define IFC_NOR_ERATTR0_ERCS_CS0 0x00000000 +#define IFC_NOR_ERATTR0_ERCS_CS1 0x00000010 +#define IFC_NOR_ERATTR0_ERCS_CS2 0x00000020 +#define IFC_NOR_ERATTR0_ERCS_CS3 0x00000030 +/* Type of transaction read/write */ +#define IFC_NOR_ERATTR0_ERTYPE_READ 0x00000001 + +/* + * NOR Transfer Error Attribute Register-2 (NOR_ERATTR2) + */ +#define IFC_NOR_ERATTR2_ER_NUM_PHASE_EXP 0x000F0000 +#define IFC_NOR_ERATTR2_ER_NUM_PHASE_PER 0x00000F00 + +/* + * NOR Control Register (NORCR) + */ +#define IFC_NORCR_MASK 0x0F0F0000 +/* No. of Address/Data Phase */ +#define IFC_NORCR_NUM_PHASE_MASK 0x0F000000 +#define IFC_NORCR_NUM_PHASE_SHIFT 24 +#define IFC_NORCR_NUM_PHASE(n) ((n-1) << IFC_NORCR_NUM_PHASE_SHIFT) +/* Sequence Timeout Count */ +#define IFC_NORCR_STOCNT_MASK 0x000F0000 +#define IFC_NORCR_STOCNT_SHIFT 16 +#define IFC_NORCR_STOCNT(n) ((__ilog2(n) - 8) << IFC_NORCR_STOCNT_SHIFT) + +/* + * GPCM Machine specific registers + */ +/* + * GPCM Event and Error Status Register (GPCM_EVTER_STAT) + */ +/* Timeout error */ +#define IFC_GPCM_EVTER_STAT_TOER 0x04000000 +/* Parity error */ +#define IFC_GPCM_EVTER_STAT_PER 0x01000000 + +/* + * GPCM Event and Error Enable Register (GPCM_EVTER_EN) + */ +/* Timeout error enable */ +#define IFC_GPCM_EVTER_EN_TOER_EN 0x04000000 +/* Parity error enable */ +#define IFC_GPCM_EVTER_EN_PER_EN 0x01000000 + +/* + * GPCM Event and Error Interrupt Enable Register (GPCM_EVTER_INTR_EN) + */ +/* Enable Interrupt for timeout error */ +#define IFC_GPCM_EEIER_TOERIR_EN 0x04000000 +/* Enable Interrupt for Parity error */ +#define IFC_GPCM_EEIER_PERIR_EN 0x01000000 + +/* + * GPCM Transfer Error Attribute Register-0 (GPCM_ERATTR0) + */ +/* Source ID for error transaction */ +#define IFC_GPCM_ERATTR0_ERSRCID 0xFF000000 +/* AXI ID for error transaction */ +#define IFC_GPCM_ERATTR0_ERAID 0x000FF000 +/* Chip select corresponds to GPCM error */ +#define IFC_GPCM_ERATTR0_ERCS_CS0 0x00000000 +#define IFC_GPCM_ERATTR0_ERCS_CS1 0x00000040 +#define IFC_GPCM_ERATTR0_ERCS_CS2 0x00000080 +#define IFC_GPCM_ERATTR0_ERCS_CS3 0x000000C0 +/* Type of transaction read/Write */ +#define IFC_GPCM_ERATTR0_ERTYPE_READ 0x00000001 + +/* + * GPCM Transfer Error Attribute Register-2 (GPCM_ERATTR2) + */ +/* On which beat of address/data parity error is observed */ +#define IFC_GPCM_ERATTR2_PERR_BEAT 0x00000C00 +/* Parity Error on byte */ +#define IFC_GPCM_ERATTR2_PERR_BYTE 0x000000F0 +/* Parity Error reported in addr or data phase */ +#define IFC_GPCM_ERATTR2_PERR_DATA_PHASE 0x00000001 + +/* + * GPCM Status Register (GPCM_STAT) + */ +#define IFC_GPCM_STAT_BSY 0x80000000 /* GPCM is busy */ + +/* + * IFC Controller NAND Machine registers + */ +struct fsl_ifc_nand { + __be32 ncfgr; + u32 res1[0x4]; + __be32 nand_fcr0; + __be32 nand_fcr1; + u32 res2[0x8]; + __be32 row0; + u32 res3; + __be32 col0; + u32 res4; + __be32 row1; + u32 res5; + __be32 col1; + u32 res6; + __be32 row2; + u32 res7; + __be32 col2; + u32 res8; + __be32 row3; + u32 res9; + __be32 col3; + u32 res10[0x24]; + __be32 nand_fbcr; + u32 res11; + __be32 nand_fir0; + __be32 nand_fir1; + __be32 nand_fir2; + u32 res12[0x10]; + __be32 nand_csel; + u32 res13; + __be32 nandseq_strt; + u32 res14; + __be32 nand_evter_stat; + u32 res15; + __be32 pgrdcmpl_evt_stat; + u32 res16[0x2]; + __be32 nand_evter_en; + u32 res17[0x2]; + __be32 nand_evter_intr_en; + __be32 nand_vol_addr_stat; + u32 res18; + __be32 nand_erattr0; + __be32 nand_erattr1; + u32 res19[0x10]; + __be32 nand_fsr; + u32 res20; + __be32 nand_eccstat[8]; + u32 res21[0x1c]; + __be32 nanndcr; + u32 res22[0x2]; + __be32 nand_autoboot_trgr; + u32 res23; + __be32 nand_mdr; + u32 res24[0x1C]; + __be32 nand_dll_lowcfg0; + __be32 nand_dll_lowcfg1; + u32 res25; + __be32 nand_dll_lowstat; + u32 res26[0x3c]; +}; + +/* + * IFC controller NOR Machine registers + */ +struct fsl_ifc_nor { + __be32 nor_evter_stat; + u32 res1[0x2]; + __be32 nor_evter_en; + u32 res2[0x2]; + __be32 nor_evter_intr_en; + u32 res3[0x2]; + __be32 nor_erattr0; + __be32 nor_erattr1; + __be32 nor_erattr2; + u32 res4[0x4]; + __be32 norcr; + u32 res5[0xEF]; +}; + +/* + * IFC controller GPCM Machine registers + */ +struct fsl_ifc_gpcm { + __be32 gpcm_evter_stat; + u32 res1[0x2]; + __be32 gpcm_evter_en; + u32 res2[0x2]; + __be32 gpcm_evter_intr_en; + u32 res3[0x2]; + __be32 gpcm_erattr0; + __be32 gpcm_erattr1; + __be32 gpcm_erattr2; + __be32 gpcm_stat; +}; + +/* + * IFC Controller Registers + */ +struct fsl_ifc_global { + __be32 ifc_rev; + u32 res1[0x2]; + struct { + __be32 cspr_ext; + __be32 cspr; + u32 res2; + } cspr_cs[FSL_IFC_BANK_COUNT]; + u32 res3[0xd]; + struct { + __be32 amask; + u32 res4[0x2]; + } amask_cs[FSL_IFC_BANK_COUNT]; + u32 res5[0xc]; + struct { + __be32 csor; + __be32 csor_ext; + u32 res6; + } csor_cs[FSL_IFC_BANK_COUNT]; + u32 res7[0xc]; + struct { + __be32 ftim[4]; + u32 res8[0x8]; + } ftim_cs[FSL_IFC_BANK_COUNT]; + u32 res9[0x30]; + __be32 rb_stat; + __be32 rb_map; + __be32 wb_map; + __be32 ifc_gcr; + u32 res10[0x2]; + __be32 cm_evter_stat; + u32 res11[0x2]; + __be32 cm_evter_en; + u32 res12[0x2]; + __be32 cm_evter_intr_en; + u32 res13[0x2]; + __be32 cm_erattr0; + __be32 cm_erattr1; + u32 res14[0x2]; + __be32 ifc_ccr; + __be32 ifc_csr; + __be32 ddr_ccr_low; +}; + + +struct fsl_ifc_runtime { + struct fsl_ifc_nand ifc_nand; + struct fsl_ifc_nor ifc_nor; + struct fsl_ifc_gpcm ifc_gpcm; +}; + +extern unsigned int convert_ifc_address(phys_addr_t addr_base); +extern int fsl_ifc_find(phys_addr_t addr_base); + +/* overview of the fsl ifc controller */ + +struct fsl_ifc_ctrl { + /* device info */ + struct device *dev; + struct fsl_ifc_global __iomem *gregs; + struct fsl_ifc_runtime __iomem *rregs; + int irq; + int nand_irq; + spinlock_t lock; + void *nand; + int version; + int banks; + + u32 nand_stat; + wait_queue_head_t nand_wait; + bool little_endian; +}; + +extern struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev; + +static inline u32 ifc_in32(void __iomem *addr) +{ + u32 val; + + if (fsl_ifc_ctrl_dev->little_endian) + val = ioread32(addr); + else + val = ioread32be(addr); + + return val; +} + +static inline u16 ifc_in16(void __iomem *addr) +{ + u16 val; + + if (fsl_ifc_ctrl_dev->little_endian) + val = ioread16(addr); + else + val = ioread16be(addr); + + return val; +} + +static inline u8 ifc_in8(void __iomem *addr) +{ + return ioread8(addr); +} + +static inline void ifc_out32(u32 val, void __iomem *addr) +{ + if (fsl_ifc_ctrl_dev->little_endian) + iowrite32(val, addr); + else + iowrite32be(val, addr); +} + +static inline void ifc_out16(u16 val, void __iomem *addr) +{ + if (fsl_ifc_ctrl_dev->little_endian) + iowrite16(val, addr); + else + iowrite16be(val, addr); +} + +static inline void ifc_out8(u8 val, void __iomem *addr) +{ + iowrite8(val, addr); +} + +#endif /* __ASM_FSL_IFC_H */ diff --git a/include/linux/fsldma.h b/include/linux/fsldma.h new file mode 100644 index 000000000..b213c0296 --- /dev/null +++ b/include/linux/fsldma.h @@ -0,0 +1,13 @@ +/* + * This is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef FSL_DMA_H +#define FSL_DMA_H +/* fsl dma API for enxternal start */ +int fsl_dma_external_start(struct dma_chan *dchan, int enable); + +#endif diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h new file mode 100644 index 000000000..fd1ce1055 --- /dev/null +++ b/include/linux/fsnotify.h @@ -0,0 +1,293 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_FS_NOTIFY_H +#define _LINUX_FS_NOTIFY_H + +/* + * include/linux/fsnotify.h - generic hooks for filesystem notification, to + * reduce in-source duplication from both dnotify and inotify. + * + * We don't compile any of this away in some complicated menagerie of ifdefs. + * Instead, we rely on the code inside to optimize away as needed. + * + * (C) Copyright 2005 Robert Love + */ + +#include +#include +#include +#include + +/* Notify this dentry's parent about a child's events. */ +static inline int fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask) +{ + if (!dentry) + dentry = path->dentry; + + return __fsnotify_parent(path, dentry, mask); +} + +/* simple call site for access decisions */ +static inline int fsnotify_perm(struct file *file, int mask) +{ + const struct path *path = &file->f_path; + struct inode *inode = file_inode(file); + __u32 fsnotify_mask = 0; + int ret; + + if (file->f_mode & FMODE_NONOTIFY) + return 0; + if (!(mask & (MAY_READ | MAY_OPEN))) + return 0; + if (mask & MAY_OPEN) + fsnotify_mask = FS_OPEN_PERM; + else if (mask & MAY_READ) + fsnotify_mask = FS_ACCESS_PERM; + else + BUG(); + + ret = fsnotify_parent(path, NULL, fsnotify_mask); + if (ret) + return ret; + + return fsnotify(inode, fsnotify_mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); +} + +/* + * fsnotify_link_count - inode's link count changed + */ +static inline void fsnotify_link_count(struct inode *inode) +{ + fsnotify(inode, FS_ATTRIB, inode, FSNOTIFY_EVENT_INODE, NULL, 0); +} + +/* + * fsnotify_move - file old_name at old_dir was moved to new_name at new_dir + */ +static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, + const unsigned char *old_name, + int isdir, struct inode *target, struct dentry *moved) +{ + struct inode *source = moved->d_inode; + u32 fs_cookie = fsnotify_get_cookie(); + __u32 old_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_FROM); + __u32 new_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_TO); + const unsigned char *new_name = moved->d_name.name; + + if (old_dir == new_dir) + old_dir_mask |= FS_DN_RENAME; + + if (isdir) { + old_dir_mask |= FS_ISDIR; + new_dir_mask |= FS_ISDIR; + } + + fsnotify(old_dir, old_dir_mask, source, FSNOTIFY_EVENT_INODE, old_name, + fs_cookie); + fsnotify(new_dir, new_dir_mask, source, FSNOTIFY_EVENT_INODE, new_name, + fs_cookie); + + if (target) + fsnotify_link_count(target); + + if (source) + fsnotify(source, FS_MOVE_SELF, moved->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0); + audit_inode_child(new_dir, moved, AUDIT_TYPE_CHILD_CREATE); +} + +/* + * fsnotify_inode_delete - and inode is being evicted from cache, clean up is needed + */ +static inline void fsnotify_inode_delete(struct inode *inode) +{ + __fsnotify_inode_delete(inode); +} + +/* + * fsnotify_vfsmount_delete - a vfsmount is being destroyed, clean up is needed + */ +static inline void fsnotify_vfsmount_delete(struct vfsmount *mnt) +{ + __fsnotify_vfsmount_delete(mnt); +} + +/* + * fsnotify_nameremove - a filename was removed from a directory + */ +static inline void fsnotify_nameremove(struct dentry *dentry, int isdir) +{ + __u32 mask = FS_DELETE; + + if (isdir) + mask |= FS_ISDIR; + + fsnotify_parent(NULL, dentry, mask); +} + +/* + * fsnotify_inoderemove - an inode is going away + */ +static inline void fsnotify_inoderemove(struct inode *inode) +{ + fsnotify(inode, FS_DELETE_SELF, inode, FSNOTIFY_EVENT_INODE, NULL, 0); + __fsnotify_inode_delete(inode); +} + +/* + * fsnotify_create - 'name' was linked in + */ +static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) +{ + audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE); + + fsnotify(inode, FS_CREATE, dentry->d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0); +} + +/* + * fsnotify_link - new hardlink in 'inode' directory + * Note: We have to pass also the linked inode ptr as some filesystems leave + * new_dentry->d_inode NULL and instantiate inode pointer later + */ +static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry) +{ + fsnotify_link_count(inode); + audit_inode_child(dir, new_dentry, AUDIT_TYPE_CHILD_CREATE); + + fsnotify(dir, FS_CREATE, inode, FSNOTIFY_EVENT_INODE, new_dentry->d_name.name, 0); +} + +/* + * fsnotify_mkdir - directory 'name' was created + */ +static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) +{ + __u32 mask = (FS_CREATE | FS_ISDIR); + struct inode *d_inode = dentry->d_inode; + + audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE); + + fsnotify(inode, mask, d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0); +} + +/* + * fsnotify_access - file was read + */ +static inline void fsnotify_access(struct file *file) +{ + const struct path *path = &file->f_path; + struct inode *inode = file_inode(file); + __u32 mask = FS_ACCESS; + + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; + + if (!(file->f_mode & FMODE_NONOTIFY)) { + fsnotify_parent(path, NULL, mask); + fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); + } +} + +/* + * fsnotify_modify - file was modified + */ +static inline void fsnotify_modify(struct file *file) +{ + const struct path *path = &file->f_path; + struct inode *inode = file_inode(file); + __u32 mask = FS_MODIFY; + + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; + + if (!(file->f_mode & FMODE_NONOTIFY)) { + fsnotify_parent(path, NULL, mask); + fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); + } +} + +/* + * fsnotify_open - file was opened + */ +static inline void fsnotify_open(struct file *file) +{ + const struct path *path = &file->f_path; + struct inode *inode = file_inode(file); + __u32 mask = FS_OPEN; + + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; + + fsnotify_parent(path, NULL, mask); + fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); +} + +/* + * fsnotify_close - file was closed + */ +static inline void fsnotify_close(struct file *file) +{ + const struct path *path = &file->f_path; + struct inode *inode = file_inode(file); + fmode_t mode = file->f_mode; + __u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE; + + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; + + if (!(file->f_mode & FMODE_NONOTIFY)) { + fsnotify_parent(path, NULL, mask); + fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); + } +} + +/* + * fsnotify_xattr - extended attributes were changed + */ +static inline void fsnotify_xattr(struct dentry *dentry) +{ + struct inode *inode = dentry->d_inode; + __u32 mask = FS_ATTRIB; + + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; + + fsnotify_parent(NULL, dentry, mask); + fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); +} + +/* + * fsnotify_change - notify_change event. file was modified and/or metadata + * was changed. + */ +static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) +{ + struct inode *inode = dentry->d_inode; + __u32 mask = 0; + + if (ia_valid & ATTR_UID) + mask |= FS_ATTRIB; + if (ia_valid & ATTR_GID) + mask |= FS_ATTRIB; + if (ia_valid & ATTR_SIZE) + mask |= FS_MODIFY; + + /* both times implies a utime(s) call */ + if ((ia_valid & (ATTR_ATIME | ATTR_MTIME)) == (ATTR_ATIME | ATTR_MTIME)) + mask |= FS_ATTRIB; + else if (ia_valid & ATTR_ATIME) + mask |= FS_ACCESS; + else if (ia_valid & ATTR_MTIME) + mask |= FS_MODIFY; + + if (ia_valid & ATTR_MODE) + mask |= FS_ATTRIB; + + if (mask) { + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; + + fsnotify_parent(NULL, dentry, mask); + fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); + } +} + +#endif /* _LINUX_FS_NOTIFY_H */ diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h new file mode 100644 index 000000000..4599d1c95 --- /dev/null +++ b/include/linux/fsnotify_backend.h @@ -0,0 +1,507 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Filesystem access notification for Linux + * + * Copyright (C) 2008 Red Hat, Inc., Eric Paris + */ + +#ifndef __LINUX_FSNOTIFY_BACKEND_H +#define __LINUX_FSNOTIFY_BACKEND_H + +#ifdef __KERNEL__ + +#include /* inotify uses this */ +#include /* struct inode */ +#include +#include /* struct path */ +#include +#include +#include +#include +#include + +/* + * IN_* from inotfy.h lines up EXACTLY with FS_*, this is so we can easily + * convert between them. dnotify only needs conversion at watch creation + * so no perf loss there. fanotify isn't defined yet, so it can use the + * wholes if it needs more events. + */ +#define FS_ACCESS 0x00000001 /* File was accessed */ +#define FS_MODIFY 0x00000002 /* File was modified */ +#define FS_ATTRIB 0x00000004 /* Metadata changed */ +#define FS_CLOSE_WRITE 0x00000008 /* Writtable file was closed */ +#define FS_CLOSE_NOWRITE 0x00000010 /* Unwrittable file closed */ +#define FS_OPEN 0x00000020 /* File was opened */ +#define FS_MOVED_FROM 0x00000040 /* File was moved from X */ +#define FS_MOVED_TO 0x00000080 /* File was moved to Y */ +#define FS_CREATE 0x00000100 /* Subfile was created */ +#define FS_DELETE 0x00000200 /* Subfile was deleted */ +#define FS_DELETE_SELF 0x00000400 /* Self was deleted */ +#define FS_MOVE_SELF 0x00000800 /* Self was moved */ + +#define FS_UNMOUNT 0x00002000 /* inode on umount fs */ +#define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */ +#define FS_IN_IGNORED 0x00008000 /* last inotify event here */ + +#define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */ +#define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */ + +#define FS_EXCL_UNLINK 0x04000000 /* do not send events if object is unlinked */ +#define FS_ISDIR 0x40000000 /* event occurred against dir */ +#define FS_IN_ONESHOT 0x80000000 /* only send event once */ + +#define FS_DN_RENAME 0x10000000 /* file renamed */ +#define FS_DN_MULTISHOT 0x20000000 /* dnotify multishot */ + +/* This inode cares about things that happen to its children. Always set for + * dnotify and inotify. */ +#define FS_EVENT_ON_CHILD 0x08000000 + +/* This is a list of all events that may get sent to a parernt based on fs event + * happening to inodes inside that directory */ +#define FS_EVENTS_POSS_ON_CHILD (FS_ACCESS | FS_MODIFY | FS_ATTRIB |\ + FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN |\ + FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\ + FS_DELETE | FS_OPEN_PERM | FS_ACCESS_PERM) + +#define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO) + +#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM) + +/* Events that can be reported to backends */ +#define ALL_FSNOTIFY_EVENTS (FS_ACCESS | FS_MODIFY | FS_ATTRIB | \ + FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN | \ + FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE | \ + FS_DELETE | FS_DELETE_SELF | FS_MOVE_SELF | \ + FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \ + FS_OPEN_PERM | FS_ACCESS_PERM | FS_DN_RENAME) + +/* Extra flags that may be reported with event or control handling of events */ +#define ALL_FSNOTIFY_FLAGS (FS_EXCL_UNLINK | FS_ISDIR | FS_IN_ONESHOT | \ + FS_DN_MULTISHOT | FS_EVENT_ON_CHILD) + +#define ALL_FSNOTIFY_BITS (ALL_FSNOTIFY_EVENTS | ALL_FSNOTIFY_FLAGS) + +struct fsnotify_group; +struct fsnotify_event; +struct fsnotify_mark; +struct fsnotify_event_private_data; +struct fsnotify_fname; +struct fsnotify_iter_info; + +struct mem_cgroup; + +/* + * Each group much define these ops. The fsnotify infrastructure will call + * these operations for each relevant group. + * + * handle_event - main call for a group to handle an fs event + * free_group_priv - called when a group refcnt hits 0 to clean up the private union + * freeing_mark - called when a mark is being destroyed for some reason. The group + * MUST be holding a reference on each mark and that reference must be + * dropped in this function. inotify uses this function to send + * userspace messages that marks have been removed. + */ +struct fsnotify_ops { + int (*handle_event)(struct fsnotify_group *group, + struct inode *inode, + u32 mask, const void *data, int data_type, + const unsigned char *file_name, u32 cookie, + struct fsnotify_iter_info *iter_info); + void (*free_group_priv)(struct fsnotify_group *group); + void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group); + void (*free_event)(struct fsnotify_event *event); + /* called on final put+free to free memory */ + void (*free_mark)(struct fsnotify_mark *mark); +}; + +/* + * all of the information about the original object we want to now send to + * a group. If you want to carry more info from the accessing task to the + * listener this structure is where you need to be adding fields. + */ +struct fsnotify_event { + struct list_head list; + /* inode may ONLY be dereferenced during handle_event(). */ + struct inode *inode; /* either the inode the event happened to or its parent */ + u32 mask; /* the type of access, bitwise OR for FS_* event types */ +}; + +/* + * A group is a "thing" that wants to receive notification about filesystem + * events. The mask holds the subset of event types this group cares about. + * refcnt on a group is up to the implementor and at any moment if it goes 0 + * everything will be cleaned up. + */ +struct fsnotify_group { + const struct fsnotify_ops *ops; /* how this group handles things */ + + /* + * How the refcnt is used is up to each group. When the refcnt hits 0 + * fsnotify will clean up all of the resources associated with this group. + * As an example, the dnotify group will always have a refcnt=1 and that + * will never change. Inotify, on the other hand, has a group per + * inotify_init() and the refcnt will hit 0 only when that fd has been + * closed. + */ + refcount_t refcnt; /* things with interest in this group */ + + /* needed to send notification to userspace */ + spinlock_t notification_lock; /* protect the notification_list */ + struct list_head notification_list; /* list of event_holder this group needs to send to userspace */ + wait_queue_head_t notification_waitq; /* read() on the notification file blocks on this waitq */ + unsigned int q_len; /* events on the queue */ + unsigned int max_events; /* maximum events allowed on the list */ + /* + * Valid fsnotify group priorities. Events are send in order from highest + * priority to lowest priority. We default to the lowest priority. + */ + #define FS_PRIO_0 0 /* normal notifiers, no permissions */ + #define FS_PRIO_1 1 /* fanotify content based access control */ + #define FS_PRIO_2 2 /* fanotify pre-content access */ + unsigned int priority; + bool shutdown; /* group is being shut down, don't queue more events */ + + /* stores all fastpath marks assoc with this group so they can be cleaned on unregister */ + struct mutex mark_mutex; /* protect marks_list */ + atomic_t num_marks; /* 1 for each mark and 1 for not being + * past the point of no return when freeing + * a group */ + atomic_t user_waits; /* Number of tasks waiting for user + * response */ + struct list_head marks_list; /* all inode marks for this group */ + + struct fasync_struct *fsn_fa; /* async notification */ + + struct fsnotify_event *overflow_event; /* Event we queue when the + * notification list is too + * full */ + + struct mem_cgroup *memcg; /* memcg to charge allocations */ + + /* groups can define private fields here or use the void *private */ + union { + void *private; +#ifdef CONFIG_INOTIFY_USER + struct inotify_group_private_data { + spinlock_t idr_lock; + struct idr idr; + struct ucounts *ucounts; + } inotify_data; +#endif +#ifdef CONFIG_FANOTIFY + struct fanotify_group_private_data { + /* allows a group to block waiting for a userspace response */ + struct list_head access_list; + wait_queue_head_t access_waitq; + int f_flags; + unsigned int max_marks; + struct user_struct *user; + bool audit; + } fanotify_data; +#endif /* CONFIG_FANOTIFY */ + }; +}; + +/* when calling fsnotify tell it if the data is a path or inode */ +#define FSNOTIFY_EVENT_NONE 0 +#define FSNOTIFY_EVENT_PATH 1 +#define FSNOTIFY_EVENT_INODE 2 + +enum fsnotify_obj_type { + FSNOTIFY_OBJ_TYPE_INODE, + FSNOTIFY_OBJ_TYPE_VFSMOUNT, + FSNOTIFY_OBJ_TYPE_COUNT, + FSNOTIFY_OBJ_TYPE_DETACHED = FSNOTIFY_OBJ_TYPE_COUNT +}; + +#define FSNOTIFY_OBJ_TYPE_INODE_FL (1U << FSNOTIFY_OBJ_TYPE_INODE) +#define FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL (1U << FSNOTIFY_OBJ_TYPE_VFSMOUNT) +#define FSNOTIFY_OBJ_ALL_TYPES_MASK ((1U << FSNOTIFY_OBJ_TYPE_COUNT) - 1) + +static inline bool fsnotify_valid_obj_type(unsigned int type) +{ + return (type < FSNOTIFY_OBJ_TYPE_COUNT); +} + +struct fsnotify_iter_info { + struct fsnotify_mark *marks[FSNOTIFY_OBJ_TYPE_COUNT]; + unsigned int report_mask; + int srcu_idx; +}; + +static inline bool fsnotify_iter_should_report_type( + struct fsnotify_iter_info *iter_info, int type) +{ + return (iter_info->report_mask & (1U << type)); +} + +static inline void fsnotify_iter_set_report_type( + struct fsnotify_iter_info *iter_info, int type) +{ + iter_info->report_mask |= (1U << type); +} + +static inline void fsnotify_iter_set_report_type_mark( + struct fsnotify_iter_info *iter_info, int type, + struct fsnotify_mark *mark) +{ + iter_info->marks[type] = mark; + iter_info->report_mask |= (1U << type); +} + +#define FSNOTIFY_ITER_FUNCS(name, NAME) \ +static inline struct fsnotify_mark *fsnotify_iter_##name##_mark( \ + struct fsnotify_iter_info *iter_info) \ +{ \ + return (iter_info->report_mask & FSNOTIFY_OBJ_TYPE_##NAME##_FL) ? \ + iter_info->marks[FSNOTIFY_OBJ_TYPE_##NAME] : NULL; \ +} + +FSNOTIFY_ITER_FUNCS(inode, INODE) +FSNOTIFY_ITER_FUNCS(vfsmount, VFSMOUNT) + +#define fsnotify_foreach_obj_type(type) \ + for (type = 0; type < FSNOTIFY_OBJ_TYPE_COUNT; type++) + +/* + * fsnotify_connp_t is what we embed in objects which connector can be attached + * to. fsnotify_connp_t * is how we refer from connector back to object. + */ +struct fsnotify_mark_connector; +typedef struct fsnotify_mark_connector __rcu *fsnotify_connp_t; + +/* + * Inode / vfsmount point to this structure which tracks all marks attached to + * the inode / vfsmount. The reference to inode / vfsmount is held by this + * structure. We destroy this structure when there are no more marks attached + * to it. The structure is protected by fsnotify_mark_srcu. + */ +struct fsnotify_mark_connector { + spinlock_t lock; + unsigned int type; /* Type of object [lock] */ + union { + /* Object pointer [lock] */ + fsnotify_connp_t *obj; + /* Used listing heads to free after srcu period expires */ + struct fsnotify_mark_connector *destroy_next; + }; + struct hlist_head list; +}; + +/* + * A mark is simply an object attached to an in core inode which allows an + * fsnotify listener to indicate they are either no longer interested in events + * of a type matching mask or only interested in those events. + * + * These are flushed when an inode is evicted from core and may be flushed + * when the inode is modified (as seen by fsnotify_access). Some fsnotify + * users (such as dnotify) will flush these when the open fd is closed and not + * at inode eviction or modification. + * + * Text in brackets is showing the lock(s) protecting modifications of a + * particular entry. obj_lock means either inode->i_lock or + * mnt->mnt_root->d_lock depending on the mark type. + */ +struct fsnotify_mark { + /* Mask this mark is for [mark->lock, group->mark_mutex] */ + __u32 mask; + /* We hold one for presence in g_list. Also one ref for each 'thing' + * in kernel that found and may be using this mark. */ + refcount_t refcnt; + /* Group this mark is for. Set on mark creation, stable until last ref + * is dropped */ + struct fsnotify_group *group; + /* List of marks by group->marks_list. Also reused for queueing + * mark into destroy_list when it's waiting for the end of SRCU period + * before it can be freed. [group->mark_mutex] */ + struct list_head g_list; + /* Protects inode / mnt pointers, flags, masks */ + spinlock_t lock; + /* List of marks for inode / vfsmount [connector->lock, mark ref] */ + struct hlist_node obj_list; + /* Head of list of marks for an object [mark ref] */ + struct fsnotify_mark_connector *connector; + /* Events types to ignore [mark->lock, group->mark_mutex] */ + __u32 ignored_mask; +#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x01 +#define FSNOTIFY_MARK_FLAG_ALIVE 0x02 +#define FSNOTIFY_MARK_FLAG_ATTACHED 0x04 + unsigned int flags; /* flags [mark->lock] */ +}; + +#ifdef CONFIG_FSNOTIFY + +/* called from the vfs helpers */ + +/* main fsnotify call to send events */ +extern int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is, + const unsigned char *name, u32 cookie); +extern int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask); +extern void __fsnotify_inode_delete(struct inode *inode); +extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt); +extern u32 fsnotify_get_cookie(void); + +static inline int fsnotify_inode_watches_children(struct inode *inode) +{ + /* FS_EVENT_ON_CHILD is set if the inode may care */ + if (!(inode->i_fsnotify_mask & FS_EVENT_ON_CHILD)) + return 0; + /* this inode might care about child events, does it care about the + * specific set of events that can happen on a child? */ + return inode->i_fsnotify_mask & FS_EVENTS_POSS_ON_CHILD; +} + +/* + * Update the dentry with a flag indicating the interest of its parent to receive + * filesystem events when those events happens to this dentry->d_inode. + */ +static inline void fsnotify_update_flags(struct dentry *dentry) +{ + assert_spin_locked(&dentry->d_lock); + + /* + * Serialisation of setting PARENT_WATCHED on the dentries is provided + * by d_lock. If inotify_inode_watched changes after we have taken + * d_lock, the following __fsnotify_update_child_dentry_flags call will + * find our entry, so it will spin until we complete here, and update + * us with the new state. + */ + if (fsnotify_inode_watches_children(dentry->d_parent->d_inode)) + dentry->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED; + else + dentry->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED; +} + +/* called from fsnotify listeners, such as fanotify or dnotify */ + +/* create a new group */ +extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops); +/* get reference to a group */ +extern void fsnotify_get_group(struct fsnotify_group *group); +/* drop reference on a group from fsnotify_alloc_group */ +extern void fsnotify_put_group(struct fsnotify_group *group); +/* group destruction begins, stop queuing new events */ +extern void fsnotify_group_stop_queueing(struct fsnotify_group *group); +/* destroy group */ +extern void fsnotify_destroy_group(struct fsnotify_group *group); +/* fasync handler function */ +extern int fsnotify_fasync(int fd, struct file *file, int on); +/* Free event from memory */ +extern void fsnotify_destroy_event(struct fsnotify_group *group, + struct fsnotify_event *event); +/* attach the event to the group notification queue */ +extern int fsnotify_add_event(struct fsnotify_group *group, + struct fsnotify_event *event, + int (*merge)(struct list_head *, + struct fsnotify_event *)); +/* Queue overflow event to a notification group */ +static inline void fsnotify_queue_overflow(struct fsnotify_group *group) +{ + fsnotify_add_event(group, group->overflow_event, NULL); +} + +/* true if the group notification queue is empty */ +extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group); +/* return, but do not dequeue the first event on the notification queue */ +extern struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group); +/* return AND dequeue the first event on the notification queue */ +extern struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group); + +/* functions used to manipulate the marks attached to inodes */ + +/* Get mask of events for a list of marks */ +extern __u32 fsnotify_conn_mask(struct fsnotify_mark_connector *conn); +/* Calculate mask of events for a list of marks */ +extern void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn); +extern void fsnotify_init_mark(struct fsnotify_mark *mark, + struct fsnotify_group *group); +/* Find mark belonging to given group in the list of marks */ +extern struct fsnotify_mark *fsnotify_find_mark(fsnotify_connp_t *connp, + struct fsnotify_group *group); +/* attach the mark to the object */ +extern int fsnotify_add_mark(struct fsnotify_mark *mark, + fsnotify_connp_t *connp, unsigned int type, + int allow_dups); +extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark, + fsnotify_connp_t *connp, unsigned int type, + int allow_dups); +/* attach the mark to the inode */ +static inline int fsnotify_add_inode_mark(struct fsnotify_mark *mark, + struct inode *inode, + int allow_dups) +{ + return fsnotify_add_mark(mark, &inode->i_fsnotify_marks, + FSNOTIFY_OBJ_TYPE_INODE, allow_dups); +} +static inline int fsnotify_add_inode_mark_locked(struct fsnotify_mark *mark, + struct inode *inode, + int allow_dups) +{ + return fsnotify_add_mark_locked(mark, &inode->i_fsnotify_marks, + FSNOTIFY_OBJ_TYPE_INODE, allow_dups); +} +/* given a group and a mark, flag mark to be freed when all references are dropped */ +extern void fsnotify_destroy_mark(struct fsnotify_mark *mark, + struct fsnotify_group *group); +/* detach mark from inode / mount list, group list, drop inode reference */ +extern void fsnotify_detach_mark(struct fsnotify_mark *mark); +/* free mark */ +extern void fsnotify_free_mark(struct fsnotify_mark *mark); +/* run all the marks in a group, and clear all of the marks attached to given object type */ +extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group, unsigned int type); +/* run all the marks in a group, and clear all of the vfsmount marks */ +static inline void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group) +{ + fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL); +} +/* run all the marks in a group, and clear all of the inode marks */ +static inline void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group) +{ + fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE_FL); +} +extern void fsnotify_get_mark(struct fsnotify_mark *mark); +extern void fsnotify_put_mark(struct fsnotify_mark *mark); +extern void fsnotify_unmount_inodes(struct super_block *sb); +extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info); +extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info); + +/* put here because inotify does some weird stuff when destroying watches */ +extern void fsnotify_init_event(struct fsnotify_event *event, + struct inode *to_tell, u32 mask); + +#else + +static inline int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is, + const unsigned char *name, u32 cookie) +{ + return 0; +} + +static inline int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask) +{ + return 0; +} + +static inline void __fsnotify_inode_delete(struct inode *inode) +{} + +static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt) +{} + +static inline void fsnotify_update_flags(struct dentry *dentry) +{} + +static inline u32 fsnotify_get_cookie(void) +{ + return 0; +} + +static inline void fsnotify_unmount_inodes(struct super_block *sb) +{} + +#endif /* CONFIG_FSNOTIFY */ + +#endif /* __KERNEL __ */ + +#endif /* __LINUX_FSNOTIFY_BACKEND_H */ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h new file mode 100644 index 000000000..dd16e8218 --- /dev/null +++ b/include/linux/ftrace.h @@ -0,0 +1,921 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Ftrace header. For implementation details beyond the random comments + * scattered below, see: Documentation/trace/ftrace-design.rst + */ + +#ifndef _LINUX_FTRACE_H +#define _LINUX_FTRACE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * If the arch supports passing the variable contents of + * function_trace_op as the third parameter back from the + * mcount call, then the arch should define this as 1. + */ +#ifndef ARCH_SUPPORTS_FTRACE_OPS +#define ARCH_SUPPORTS_FTRACE_OPS 0 +#endif + +/* + * If the arch's mcount caller does not support all of ftrace's + * features, then it must call an indirect function that + * does. Or at least does enough to prevent any unwelcomed side effects. + */ +#if !ARCH_SUPPORTS_FTRACE_OPS +# define FTRACE_FORCE_LIST_FUNC 1 +#else +# define FTRACE_FORCE_LIST_FUNC 0 +#endif + +/* Main tracing buffer and events set up */ +#ifdef CONFIG_TRACING +void trace_init(void); +void early_trace_init(void); +#else +static inline void trace_init(void) { } +static inline void early_trace_init(void) { } +#endif + +struct module; +struct ftrace_hash; + +#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \ + defined(CONFIG_DYNAMIC_FTRACE) +const char * +ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, + unsigned long *off, char **modname, char *sym); +int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, + char *type, char *name, + char *module_name, int *exported); +#else +static inline const char * +ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, + unsigned long *off, char **modname, char *sym) +{ + return NULL; +} +static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, + char *type, char *name, + char *module_name, int *exported) +{ + return -1; +} +#endif + + +#ifdef CONFIG_FUNCTION_TRACER + +extern int ftrace_enabled; +extern int +ftrace_enable_sysctl(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); + +struct ftrace_ops; + +typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct pt_regs *regs); + +ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); + +/* + * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are + * set in the flags member. + * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and + * IPMODIFY are a kind of attribute flags which can be set only before + * registering the ftrace_ops, and can not be modified while registered. + * Changing those attribute flags after registering ftrace_ops will + * cause unexpected results. + * + * ENABLED - set/unset when ftrace_ops is registered/unregistered + * DYNAMIC - set when ftrace_ops is registered to denote dynamically + * allocated ftrace_ops which need special care + * SAVE_REGS - The ftrace_ops wants regs saved at each function called + * and passed to the callback. If this flag is set, but the + * architecture does not support passing regs + * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the + * ftrace_ops will fail to register, unless the next flag + * is set. + * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the + * handler can handle an arch that does not save regs + * (the handler tests if regs == NULL), then it can set + * this flag instead. It will not fail registering the ftrace_ops + * but, the regs field will be NULL if the arch does not support + * passing regs to the handler. + * Note, if this flag is set, the SAVE_REGS flag will automatically + * get set upon registering the ftrace_ops, if the arch supports it. + * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure + * that the call back has its own recursion protection. If it does + * not set this, then the ftrace infrastructure will add recursion + * protection for the caller. + * STUB - The ftrace_ops is just a place holder. + * INITIALIZED - The ftrace_ops has already been initialized (first use time + * register_ftrace_function() is called, it will initialized the ops) + * DELETED - The ops are being deleted, do not let them be registered again. + * ADDING - The ops is in the process of being added. + * REMOVING - The ops is in the process of being removed. + * MODIFYING - The ops is in the process of changing its filter functions. + * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code. + * The arch specific code sets this flag when it allocated a + * trampoline. This lets the arch know that it can update the + * trampoline in case the callback function changes. + * The ftrace_ops trampoline can be set by the ftrace users, and + * in such cases the arch must not modify it. Only the arch ftrace + * core code should set this flag. + * IPMODIFY - The ops can modify the IP register. This can only be set with + * SAVE_REGS. If another ops with this flag set is already registered + * for any of the functions that this ops will be registered for, then + * this ops will fail to register or set_filter_ip. + * PID - Is affected by set_ftrace_pid (allows filtering on those pids) + * RCU - Set when the ops can only be called when RCU is watching. + * TRACE_ARRAY - The ops->private points to a trace_array descriptor. + */ +enum { + FTRACE_OPS_FL_ENABLED = 1 << 0, + FTRACE_OPS_FL_DYNAMIC = 1 << 1, + FTRACE_OPS_FL_SAVE_REGS = 1 << 2, + FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 3, + FTRACE_OPS_FL_RECURSION_SAFE = 1 << 4, + FTRACE_OPS_FL_STUB = 1 << 5, + FTRACE_OPS_FL_INITIALIZED = 1 << 6, + FTRACE_OPS_FL_DELETED = 1 << 7, + FTRACE_OPS_FL_ADDING = 1 << 8, + FTRACE_OPS_FL_REMOVING = 1 << 9, + FTRACE_OPS_FL_MODIFYING = 1 << 10, + FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 11, + FTRACE_OPS_FL_IPMODIFY = 1 << 12, + FTRACE_OPS_FL_PID = 1 << 13, + FTRACE_OPS_FL_RCU = 1 << 14, + FTRACE_OPS_FL_TRACE_ARRAY = 1 << 15, +}; + +#ifdef CONFIG_DYNAMIC_FTRACE +/* The hash used to know what functions callbacks trace */ +struct ftrace_ops_hash { + struct ftrace_hash __rcu *notrace_hash; + struct ftrace_hash __rcu *filter_hash; + struct mutex regex_lock; +}; + +void ftrace_free_init_mem(void); +void ftrace_free_mem(struct module *mod, void *start, void *end); +#else +static inline void ftrace_free_init_mem(void) { } +static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } +#endif + +/* + * Note, ftrace_ops can be referenced outside of RCU protection, unless + * the RCU flag is set. If ftrace_ops is allocated and not part of kernel + * core data, the unregistering of it will perform a scheduling on all CPUs + * to make sure that there are no more users. Depending on the load of the + * system that may take a bit of time. + * + * Any private data added must also take care not to be freed and if private + * data is added to a ftrace_ops that is in core code, the user of the + * ftrace_ops must perform a schedule_on_each_cpu() before freeing it. + */ +struct ftrace_ops { + ftrace_func_t func; + struct ftrace_ops __rcu *next; + unsigned long flags; + void *private; + ftrace_func_t saved_func; +#ifdef CONFIG_DYNAMIC_FTRACE + struct ftrace_ops_hash local_hash; + struct ftrace_ops_hash *func_hash; + struct ftrace_ops_hash old_hash; + unsigned long trampoline; + unsigned long trampoline_size; +#endif +}; + +/* + * Type of the current tracing. + */ +enum ftrace_tracing_type_t { + FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */ + FTRACE_TYPE_RETURN, /* Hook the return of the function */ +}; + +/* Current tracing type, default is FTRACE_TYPE_ENTER */ +extern enum ftrace_tracing_type_t ftrace_tracing_type; + +/* + * The ftrace_ops must be a static and should also + * be read_mostly. These functions do modify read_mostly variables + * so use them sparely. Never free an ftrace_op or modify the + * next pointer after it has been registered. Even after unregistering + * it, the next pointer may still be used internally. + */ +int register_ftrace_function(struct ftrace_ops *ops); +int unregister_ftrace_function(struct ftrace_ops *ops); + +extern void ftrace_stub(unsigned long a0, unsigned long a1, + struct ftrace_ops *op, struct pt_regs *regs); + +#else /* !CONFIG_FUNCTION_TRACER */ +/* + * (un)register_ftrace_function must be a macro since the ops parameter + * must not be evaluated. + */ +#define register_ftrace_function(ops) ({ 0; }) +#define unregister_ftrace_function(ops) ({ 0; }) +static inline void ftrace_kill(void) { } +static inline void ftrace_free_init_mem(void) { } +static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } +#endif /* CONFIG_FUNCTION_TRACER */ + +#ifdef CONFIG_STACK_TRACER + +#define STACK_TRACE_ENTRIES 500 + +struct stack_trace; + +extern unsigned stack_trace_index[]; +extern struct stack_trace stack_trace_max; +extern unsigned long stack_trace_max_size; +extern arch_spinlock_t stack_trace_max_lock; + +extern int stack_tracer_enabled; +void stack_trace_print(void); +int +stack_trace_sysctl(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); + +/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */ +DECLARE_PER_CPU(int, disable_stack_tracer); + +/** + * stack_tracer_disable - temporarily disable the stack tracer + * + * There's a few locations (namely in RCU) where stack tracing + * cannot be executed. This function is used to disable stack + * tracing during those critical sections. + * + * This function must be called with preemption or interrupts + * disabled and stack_tracer_enable() must be called shortly after + * while preemption or interrupts are still disabled. + */ +static inline void stack_tracer_disable(void) +{ + /* Preemption or interupts must be disabled */ + if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) + WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); + this_cpu_inc(disable_stack_tracer); +} + +/** + * stack_tracer_enable - re-enable the stack tracer + * + * After stack_tracer_disable() is called, stack_tracer_enable() + * must be called shortly afterward. + */ +static inline void stack_tracer_enable(void) +{ + if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) + WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); + this_cpu_dec(disable_stack_tracer); +} +#else +static inline void stack_tracer_disable(void) { } +static inline void stack_tracer_enable(void) { } +#endif + +#ifdef CONFIG_DYNAMIC_FTRACE + +int ftrace_arch_code_modify_prepare(void); +int ftrace_arch_code_modify_post_process(void); + +struct dyn_ftrace; + +enum ftrace_bug_type { + FTRACE_BUG_UNKNOWN, + FTRACE_BUG_INIT, + FTRACE_BUG_NOP, + FTRACE_BUG_CALL, + FTRACE_BUG_UPDATE, +}; +extern enum ftrace_bug_type ftrace_bug_type; + +/* + * Archs can set this to point to a variable that holds the value that was + * expected at the call site before calling ftrace_bug(). + */ +extern const void *ftrace_expected; + +void ftrace_bug(int err, struct dyn_ftrace *rec); + +struct seq_file; + +extern int ftrace_text_reserved(const void *start, const void *end); + +struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr); + +bool is_ftrace_trampoline(unsigned long addr); + +/* + * The dyn_ftrace record's flags field is split into two parts. + * the first part which is '0-FTRACE_REF_MAX' is a counter of + * the number of callbacks that have registered the function that + * the dyn_ftrace descriptor represents. + * + * The second part is a mask: + * ENABLED - the function is being traced + * REGS - the record wants the function to save regs + * REGS_EN - the function is set up to save regs. + * IPMODIFY - the record allows for the IP address to be changed. + * DISABLED - the record is not ready to be touched yet + * + * When a new ftrace_ops is registered and wants a function to save + * pt_regs, the rec->flag REGS is set. When the function has been + * set up to save regs, the REG_EN flag is set. Once a function + * starts saving regs it will do so until all ftrace_ops are removed + * from tracing that function. + */ +enum { + FTRACE_FL_ENABLED = (1UL << 31), + FTRACE_FL_REGS = (1UL << 30), + FTRACE_FL_REGS_EN = (1UL << 29), + FTRACE_FL_TRAMP = (1UL << 28), + FTRACE_FL_TRAMP_EN = (1UL << 27), + FTRACE_FL_IPMODIFY = (1UL << 26), + FTRACE_FL_DISABLED = (1UL << 25), +}; + +#define FTRACE_REF_MAX_SHIFT 25 +#define FTRACE_FL_BITS 7 +#define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) +#define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) +#define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) + +#define ftrace_rec_count(rec) ((rec)->flags & ~FTRACE_FL_MASK) + +struct dyn_ftrace { + unsigned long ip; /* address of mcount call-site */ + unsigned long flags; + struct dyn_arch_ftrace arch; +}; + +int ftrace_force_update(void); +int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, + int remove, int reset); +int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, + int len, int reset); +int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, + int len, int reset); +void ftrace_set_global_filter(unsigned char *buf, int len, int reset); +void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); +void ftrace_free_filter(struct ftrace_ops *ops); +void ftrace_ops_set_global_filter(struct ftrace_ops *ops); + +enum { + FTRACE_UPDATE_CALLS = (1 << 0), + FTRACE_DISABLE_CALLS = (1 << 1), + FTRACE_UPDATE_TRACE_FUNC = (1 << 2), + FTRACE_START_FUNC_RET = (1 << 3), + FTRACE_STOP_FUNC_RET = (1 << 4), +}; + +/* + * The FTRACE_UPDATE_* enum is used to pass information back + * from the ftrace_update_record() and ftrace_test_record() + * functions. These are called by the code update routines + * to find out what is to be done for a given function. + * + * IGNORE - The function is already what we want it to be + * MAKE_CALL - Start tracing the function + * MODIFY_CALL - Stop saving regs for the function + * MAKE_NOP - Stop tracing the function + */ +enum { + FTRACE_UPDATE_IGNORE, + FTRACE_UPDATE_MAKE_CALL, + FTRACE_UPDATE_MODIFY_CALL, + FTRACE_UPDATE_MAKE_NOP, +}; + +enum { + FTRACE_ITER_FILTER = (1 << 0), + FTRACE_ITER_NOTRACE = (1 << 1), + FTRACE_ITER_PRINTALL = (1 << 2), + FTRACE_ITER_DO_PROBES = (1 << 3), + FTRACE_ITER_PROBE = (1 << 4), + FTRACE_ITER_MOD = (1 << 5), + FTRACE_ITER_ENABLED = (1 << 6), +}; + +void arch_ftrace_update_code(int command); + +struct ftrace_rec_iter; + +struct ftrace_rec_iter *ftrace_rec_iter_start(void); +struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter); +struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter); + +#define for_ftrace_rec_iter(iter) \ + for (iter = ftrace_rec_iter_start(); \ + iter; \ + iter = ftrace_rec_iter_next(iter)) + + +int ftrace_update_record(struct dyn_ftrace *rec, int enable); +int ftrace_test_record(struct dyn_ftrace *rec, int enable); +void ftrace_run_stop_machine(int command); +unsigned long ftrace_location(unsigned long ip); +unsigned long ftrace_location_range(unsigned long start, unsigned long end); +unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec); +unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec); + +extern ftrace_func_t ftrace_trace_function; + +int ftrace_regex_open(struct ftrace_ops *ops, int flag, + struct inode *inode, struct file *file); +ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, + size_t cnt, loff_t *ppos); +ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, + size_t cnt, loff_t *ppos); +int ftrace_regex_release(struct inode *inode, struct file *file); + +void __init +ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable); + +/* defined in arch */ +extern int ftrace_ip_converted(unsigned long ip); +extern int ftrace_dyn_arch_init(void); +extern void ftrace_replace_code(int enable); +extern int ftrace_update_ftrace_func(ftrace_func_t func); +extern void ftrace_caller(void); +extern void ftrace_regs_caller(void); +extern void ftrace_call(void); +extern void ftrace_regs_call(void); +extern void mcount_call(void); + +void ftrace_modify_all_code(int command); + +#ifndef FTRACE_ADDR +#define FTRACE_ADDR ((unsigned long)ftrace_caller) +#endif + +#ifndef FTRACE_GRAPH_ADDR +#define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller) +#endif + +#ifndef FTRACE_REGS_ADDR +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +# define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller) +#else +# define FTRACE_REGS_ADDR FTRACE_ADDR +#endif +#endif + +/* + * If an arch would like functions that are only traced + * by the function graph tracer to jump directly to its own + * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR + * to be that address to jump to. + */ +#ifndef FTRACE_GRAPH_TRAMP_ADDR +#define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0) +#endif + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +extern void ftrace_graph_caller(void); +extern int ftrace_enable_ftrace_graph_caller(void); +extern int ftrace_disable_ftrace_graph_caller(void); +#else +static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; } +static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } +#endif + +/** + * ftrace_make_nop - convert code into nop + * @mod: module structure if called by module load initialization + * @rec: the mcount call site record + * @addr: the address that the call site should be calling + * + * This is a very sensitive operation and great care needs + * to be taken by the arch. The operation should carefully + * read the location, check to see if what is read is indeed + * what we expect it to be, and then on success of the compare, + * it should write to the location. + * + * The code segment at @rec->ip should be a caller to @addr + * + * Return must be: + * 0 on success + * -EFAULT on error reading the location + * -EINVAL on a failed compare of the contents + * -EPERM on error writing to the location + * Any other value will be considered a failure. + */ +extern int ftrace_make_nop(struct module *mod, + struct dyn_ftrace *rec, unsigned long addr); + +/** + * ftrace_make_call - convert a nop call site into a call to addr + * @rec: the mcount call site record + * @addr: the address that the call site should call + * + * This is a very sensitive operation and great care needs + * to be taken by the arch. The operation should carefully + * read the location, check to see if what is read is indeed + * what we expect it to be, and then on success of the compare, + * it should write to the location. + * + * The code segment at @rec->ip should be a nop + * + * Return must be: + * 0 on success + * -EFAULT on error reading the location + * -EINVAL on a failed compare of the contents + * -EPERM on error writing to the location + * Any other value will be considered a failure. + */ +extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +/** + * ftrace_modify_call - convert from one addr to another (no nop) + * @rec: the mcount call site record + * @old_addr: the address expected to be currently called to + * @addr: the address to change to + * + * This is a very sensitive operation and great care needs + * to be taken by the arch. The operation should carefully + * read the location, check to see if what is read is indeed + * what we expect it to be, and then on success of the compare, + * it should write to the location. + * + * The code segment at @rec->ip should be a caller to @old_addr + * + * Return must be: + * 0 on success + * -EFAULT on error reading the location + * -EINVAL on a failed compare of the contents + * -EPERM on error writing to the location + * Any other value will be considered a failure. + */ +extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, + unsigned long addr); +#else +/* Should never be called */ +static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, + unsigned long addr) +{ + return -EINVAL; +} +#endif + +/* May be defined in arch */ +extern int ftrace_arch_read_dyn_info(char *buf, int size); + +extern int skip_trace(unsigned long ip); +extern void ftrace_module_init(struct module *mod); +extern void ftrace_module_enable(struct module *mod); +extern void ftrace_release_mod(struct module *mod); + +extern void ftrace_disable_daemon(void); +extern void ftrace_enable_daemon(void); +#else /* CONFIG_DYNAMIC_FTRACE */ +static inline int skip_trace(unsigned long ip) { return 0; } +static inline int ftrace_force_update(void) { return 0; } +static inline void ftrace_disable_daemon(void) { } +static inline void ftrace_enable_daemon(void) { } +static inline void ftrace_module_init(struct module *mod) { } +static inline void ftrace_module_enable(struct module *mod) { } +static inline void ftrace_release_mod(struct module *mod) { } +static inline int ftrace_text_reserved(const void *start, const void *end) +{ + return 0; +} +static inline unsigned long ftrace_location(unsigned long ip) +{ + return 0; +} + +/* + * Again users of functions that have ftrace_ops may not + * have them defined when ftrace is not enabled, but these + * functions may still be called. Use a macro instead of inline. + */ +#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; }) +#define ftrace_set_early_filter(ops, buf, enable) do { } while (0) +#define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; }) +#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) +#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) +#define ftrace_free_filter(ops) do { } while (0) +#define ftrace_ops_set_global_filter(ops) do { } while (0) + +static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, + size_t cnt, loff_t *ppos) { return -ENODEV; } +static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, + size_t cnt, loff_t *ppos) { return -ENODEV; } +static inline int +ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } + +static inline bool is_ftrace_trampoline(unsigned long addr) +{ + return false; +} +#endif /* CONFIG_DYNAMIC_FTRACE */ + +/* totally disable ftrace - can not re-enable after this */ +void ftrace_kill(void); + +static inline void tracer_disable(void) +{ +#ifdef CONFIG_FUNCTION_TRACER + ftrace_enabled = 0; +#endif +} + +/* + * Ftrace disable/restore without lock. Some synchronization mechanism + * must be used to prevent ftrace_enabled to be changed between + * disable/restore. + */ +static inline int __ftrace_enabled_save(void) +{ +#ifdef CONFIG_FUNCTION_TRACER + int saved_ftrace_enabled = ftrace_enabled; + ftrace_enabled = 0; + return saved_ftrace_enabled; +#else + return 0; +#endif +} + +static inline void __ftrace_enabled_restore(int enabled) +{ +#ifdef CONFIG_FUNCTION_TRACER + ftrace_enabled = enabled; +#endif +} + +/* All archs should have this, but we define it for consistency */ +#ifndef ftrace_return_address0 +# define ftrace_return_address0 __builtin_return_address(0) +#endif + +/* Archs may use other ways for ADDR1 and beyond */ +#ifndef ftrace_return_address +# ifdef CONFIG_FRAME_POINTER +# define ftrace_return_address(n) __builtin_return_address(n) +# else +# define ftrace_return_address(n) 0UL +# endif +#endif + +#define CALLER_ADDR0 ((unsigned long)ftrace_return_address0) +#define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1)) +#define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2)) +#define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3)) +#define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4)) +#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5)) +#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6)) + +static inline unsigned long get_lock_parent_ip(void) +{ + unsigned long addr = CALLER_ADDR0; + + if (!in_lock_functions(addr)) + return addr; + addr = CALLER_ADDR1; + if (!in_lock_functions(addr)) + return addr; + return CALLER_ADDR2; +} + +#ifdef CONFIG_TRACE_PREEMPT_TOGGLE + extern void trace_preempt_on(unsigned long a0, unsigned long a1); + extern void trace_preempt_off(unsigned long a0, unsigned long a1); +#else +/* + * Use defines instead of static inlines because some arches will make code out + * of the CALLER_ADDR, when we really want these to be a real nop. + */ +# define trace_preempt_on(a0, a1) do { } while (0) +# define trace_preempt_off(a0, a1) do { } while (0) +#endif + +#ifdef CONFIG_FTRACE_MCOUNT_RECORD +extern void ftrace_init(void); +#else +static inline void ftrace_init(void) { } +#endif + +/* + * Structure that defines an entry function trace. + * It's already packed but the attribute "packed" is needed + * to remove extra padding at the end. + */ +struct ftrace_graph_ent { + unsigned long func; /* Current function */ + int depth; +} __packed; + +/* + * Structure that defines a return function trace. + * It's already packed but the attribute "packed" is needed + * to remove extra padding at the end. + */ +struct ftrace_graph_ret { + unsigned long func; /* Current function */ + /* Number of functions that overran the depth limit for current task */ + unsigned long overrun; + unsigned long long calltime; + unsigned long long rettime; + int depth; +} __packed; + +/* Type of the callback handlers for tracing function graph*/ +typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ +typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + +/* + * Stack of return addresses for functions + * of a thread. + * Used in struct thread_info + */ +struct ftrace_ret_stack { + unsigned long ret; + unsigned long func; + unsigned long long calltime; +#ifdef CONFIG_FUNCTION_PROFILER + unsigned long long subtime; +#endif +#ifdef HAVE_FUNCTION_GRAPH_FP_TEST + unsigned long fp; +#endif +#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR + unsigned long *retp; +#endif +}; + +/* + * Primary handler of a function return. + * It relays on ftrace_return_to_handler. + * Defined in entry_32/64.S + */ +extern void return_to_handler(void); + +extern int +function_graph_enter(unsigned long ret, unsigned long func, + unsigned long frame_pointer, unsigned long *retp); + +unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, + unsigned long ret, unsigned long *retp); + +/* + * Sometimes we don't want to trace a function with the function + * graph tracer but we want them to keep traced by the usual function + * tracer if the function graph tracer is not configured. + */ +#define __notrace_funcgraph notrace + +#define FTRACE_NOTRACE_DEPTH 65536 +#define FTRACE_RETFUNC_DEPTH 50 +#define FTRACE_RETSTACK_ALLOC_SIZE 32 +extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, + trace_func_graph_ent_t entryfunc); + +extern bool ftrace_graph_is_dead(void); +extern void ftrace_graph_stop(void); + +/* The current handlers in use */ +extern trace_func_graph_ret_t ftrace_graph_return; +extern trace_func_graph_ent_t ftrace_graph_entry; + +extern void unregister_ftrace_graph(void); + +extern void ftrace_graph_init_task(struct task_struct *t); +extern void ftrace_graph_exit_task(struct task_struct *t); +extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); + +static inline int task_curr_ret_stack(struct task_struct *t) +{ + return t->curr_ret_stack; +} + +static inline void pause_graph_tracing(void) +{ + atomic_inc(¤t->tracing_graph_pause); +} + +static inline void unpause_graph_tracing(void) +{ + atomic_dec(¤t->tracing_graph_pause); +} +#else /* !CONFIG_FUNCTION_GRAPH_TRACER */ + +#define __notrace_funcgraph + +static inline void ftrace_graph_init_task(struct task_struct *t) { } +static inline void ftrace_graph_exit_task(struct task_struct *t) { } +static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } + +static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc, + trace_func_graph_ent_t entryfunc) +{ + return -1; +} +static inline void unregister_ftrace_graph(void) { } + +static inline int task_curr_ret_stack(struct task_struct *tsk) +{ + return -1; +} + +static inline unsigned long +ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret, + unsigned long *retp) +{ + return ret; +} + +static inline void pause_graph_tracing(void) { } +static inline void unpause_graph_tracing(void) { } +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + +#ifdef CONFIG_TRACING + +/* flags for current->trace */ +enum { + TSK_TRACE_FL_TRACE_BIT = 0, + TSK_TRACE_FL_GRAPH_BIT = 1, +}; +enum { + TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT, + TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT, +}; + +static inline void set_tsk_trace_trace(struct task_struct *tsk) +{ + set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); +} + +static inline void clear_tsk_trace_trace(struct task_struct *tsk) +{ + clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); +} + +static inline int test_tsk_trace_trace(struct task_struct *tsk) +{ + return tsk->trace & TSK_TRACE_FL_TRACE; +} + +static inline void set_tsk_trace_graph(struct task_struct *tsk) +{ + set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); +} + +static inline void clear_tsk_trace_graph(struct task_struct *tsk) +{ + clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); +} + +static inline int test_tsk_trace_graph(struct task_struct *tsk) +{ + return tsk->trace & TSK_TRACE_FL_GRAPH; +} + +enum ftrace_dump_mode; + +extern enum ftrace_dump_mode ftrace_dump_on_oops; +extern int tracepoint_printk; + +extern void disable_trace_on_warning(void); +extern int __disable_trace_on_warning; + +int tracepoint_printk_sysctl(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); + +#else /* CONFIG_TRACING */ +static inline void disable_trace_on_warning(void) { } +#endif /* CONFIG_TRACING */ + +#ifdef CONFIG_FTRACE_SYSCALLS + +unsigned long arch_syscall_addr(int nr); + +#endif /* CONFIG_FTRACE_SYSCALLS */ + +#endif /* _LINUX_FTRACE_H */ diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h new file mode 100644 index 000000000..ccda97dc7 --- /dev/null +++ b/include/linux/ftrace_irq.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_FTRACE_IRQ_H +#define _LINUX_FTRACE_IRQ_H + + +#ifdef CONFIG_FTRACE_NMI_ENTER +extern void arch_ftrace_nmi_enter(void); +extern void arch_ftrace_nmi_exit(void); +#else +static inline void arch_ftrace_nmi_enter(void) { } +static inline void arch_ftrace_nmi_exit(void) { } +#endif + +#ifdef CONFIG_HWLAT_TRACER +extern bool trace_hwlat_callback_enabled; +extern void trace_hwlat_callback(bool enter); +#endif + +static inline void ftrace_nmi_enter(void) +{ +#ifdef CONFIG_HWLAT_TRACER + if (trace_hwlat_callback_enabled) + trace_hwlat_callback(true); +#endif + arch_ftrace_nmi_enter(); +} + +static inline void ftrace_nmi_exit(void) +{ + arch_ftrace_nmi_exit(); +#ifdef CONFIG_HWLAT_TRACER + if (trace_hwlat_callback_enabled) + trace_hwlat_callback(false); +#endif +} + +#endif /* _LINUX_FTRACE_IRQ_H */ diff --git a/include/linux/futex.h b/include/linux/futex.h new file mode 100644 index 000000000..b70df27d7 --- /dev/null +++ b/include/linux/futex.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_FUTEX_H +#define _LINUX_FUTEX_H + +#include +#include + +#include + +struct inode; +struct mm_struct; +struct task_struct; + +/* + * Futexes are matched on equal values of this key. + * The key type depends on whether it's a shared or private mapping. + * Don't rearrange members without looking at hash_futex(). + * + * offset is aligned to a multiple of sizeof(u32) (== 4) by definition. + * We use the two low order bits of offset to tell what is the kind of key : + * 00 : Private process futex (PTHREAD_PROCESS_PRIVATE) + * (no reference on an inode or mm) + * 01 : Shared futex (PTHREAD_PROCESS_SHARED) + * mapped on a file (reference on the underlying inode) + * 10 : Shared futex (PTHREAD_PROCESS_SHARED) + * (but private mapping on an mm, and reference taken on it) +*/ + +#define FUT_OFF_INODE 1 /* We set bit 0 if key has a reference on inode */ +#define FUT_OFF_MMSHARED 2 /* We set bit 1 if key has a reference on mm */ + +union futex_key { + struct { + u64 i_seq; + unsigned long pgoff; + unsigned int offset; + } shared; + struct { + union { + struct mm_struct *mm; + u64 __tmp; + }; + unsigned long address; + unsigned int offset; + } private; + struct { + u64 ptr; + unsigned long word; + unsigned int offset; + } both; +}; + +#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = 0ULL } } + +#ifdef CONFIG_FUTEX +enum { + FUTEX_STATE_OK, + FUTEX_STATE_EXITING, + FUTEX_STATE_DEAD, +}; + +static inline void futex_init_task(struct task_struct *tsk) +{ + tsk->robust_list = NULL; +#ifdef CONFIG_COMPAT + tsk->compat_robust_list = NULL; +#endif + INIT_LIST_HEAD(&tsk->pi_state_list); + tsk->pi_state_cache = NULL; + tsk->futex_state = FUTEX_STATE_OK; + mutex_init(&tsk->futex_exit_mutex); +} + +void futex_exit_recursive(struct task_struct *tsk); +void futex_exit_release(struct task_struct *tsk); +void futex_exec_release(struct task_struct *tsk); + +long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, + u32 __user *uaddr2, u32 val2, u32 val3); +#else +static inline void futex_init_task(struct task_struct *tsk) { } +static inline void futex_exit_recursive(struct task_struct *tsk) { } +static inline void futex_exit_release(struct task_struct *tsk) { } +static inline void futex_exec_release(struct task_struct *tsk) { } +static inline long do_futex(u32 __user *uaddr, int op, u32 val, + ktime_t *timeout, u32 __user *uaddr2, + u32 val2, u32 val3) +{ + return -EINVAL; +} +#endif + +#endif diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h new file mode 100644 index 000000000..faebf0ca0 --- /dev/null +++ b/include/linux/fwnode.h @@ -0,0 +1,129 @@ +/* + * fwnode.h - Firmware device node object handle type definition. + * + * Copyright (C) 2015, Intel Corporation + * Author: Rafael J. Wysocki + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _LINUX_FWNODE_H_ +#define _LINUX_FWNODE_H_ + +#include + +struct fwnode_operations; +struct device; + +struct fwnode_handle { + struct fwnode_handle *secondary; + const struct fwnode_operations *ops; +}; + +/** + * struct fwnode_endpoint - Fwnode graph endpoint + * @port: Port number + * @id: Endpoint id + * @local_fwnode: reference to the related fwnode + */ +struct fwnode_endpoint { + unsigned int port; + unsigned int id; + const struct fwnode_handle *local_fwnode; +}; + +#define NR_FWNODE_REFERENCE_ARGS 8 + +/** + * struct fwnode_reference_args - Fwnode reference with additional arguments + * @fwnode:- A reference to the base fwnode + * @nargs: Number of elements in @args array + * @args: Integer arguments on the fwnode + */ +struct fwnode_reference_args { + struct fwnode_handle *fwnode; + unsigned int nargs; + u64 args[NR_FWNODE_REFERENCE_ARGS]; +}; + +/** + * struct fwnode_operations - Operations for fwnode interface + * @get: Get a reference to an fwnode. + * @put: Put a reference to an fwnode. + * @device_get_match_data: Return the device driver match data. + * @property_present: Return true if a property is present. + * @property_read_integer_array: Read an array of integer properties. Return + * zero on success, a negative error code + * otherwise. + * @property_read_string_array: Read an array of string properties. Return zero + * on success, a negative error code otherwise. + * @get_parent: Return the parent of an fwnode. + * @get_next_child_node: Return the next child node in an iteration. + * @get_named_child_node: Return a child node with a given name. + * @get_reference_args: Return a reference pointed to by a property, with args + * @graph_get_next_endpoint: Return an endpoint node in an iteration. + * @graph_get_remote_endpoint: Return the remote endpoint node of a local + * endpoint node. + * @graph_get_port_parent: Return the parent node of a port node. + * @graph_parse_endpoint: Parse endpoint for port and endpoint id. + */ +struct fwnode_operations { + struct fwnode_handle *(*get)(struct fwnode_handle *fwnode); + void (*put)(struct fwnode_handle *fwnode); + bool (*device_is_available)(const struct fwnode_handle *fwnode); + const void *(*device_get_match_data)(const struct fwnode_handle *fwnode, + const struct device *dev); + bool (*property_present)(const struct fwnode_handle *fwnode, + const char *propname); + int (*property_read_int_array)(const struct fwnode_handle *fwnode, + const char *propname, + unsigned int elem_size, void *val, + size_t nval); + int + (*property_read_string_array)(const struct fwnode_handle *fwnode_handle, + const char *propname, const char **val, + size_t nval); + struct fwnode_handle *(*get_parent)(const struct fwnode_handle *fwnode); + struct fwnode_handle * + (*get_next_child_node)(const struct fwnode_handle *fwnode, + struct fwnode_handle *child); + struct fwnode_handle * + (*get_named_child_node)(const struct fwnode_handle *fwnode, + const char *name); + int (*get_reference_args)(const struct fwnode_handle *fwnode, + const char *prop, const char *nargs_prop, + unsigned int nargs, unsigned int index, + struct fwnode_reference_args *args); + struct fwnode_handle * + (*graph_get_next_endpoint)(const struct fwnode_handle *fwnode, + struct fwnode_handle *prev); + struct fwnode_handle * + (*graph_get_remote_endpoint)(const struct fwnode_handle *fwnode); + struct fwnode_handle * + (*graph_get_port_parent)(struct fwnode_handle *fwnode); + int (*graph_parse_endpoint)(const struct fwnode_handle *fwnode, + struct fwnode_endpoint *endpoint); +}; + +#define fwnode_has_op(fwnode, op) \ + ((fwnode) && (fwnode)->ops && (fwnode)->ops->op) +#define fwnode_call_int_op(fwnode, op, ...) \ + (fwnode ? (fwnode_has_op(fwnode, op) ? \ + (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : -ENXIO) : \ + -EINVAL) +#define fwnode_call_bool_op(fwnode, op, ...) \ + (fwnode ? (fwnode_has_op(fwnode, op) ? \ + (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : false) : \ + false) +#define fwnode_call_ptr_op(fwnode, op, ...) \ + (fwnode_has_op(fwnode, op) ? \ + (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : NULL) +#define fwnode_call_void_op(fwnode, op, ...) \ + do { \ + if (fwnode_has_op(fwnode, op)) \ + (fwnode)->ops->op(fwnode, ## __VA_ARGS__); \ + } while (false) + +#endif diff --git a/include/linux/gameport.h b/include/linux/gameport.h new file mode 100644 index 000000000..bb7de09e8 --- /dev/null +++ b/include/linux/gameport.h @@ -0,0 +1,219 @@ +/* + * Copyright (c) 1999-2002 Vojtech Pavlik + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ +#ifndef _GAMEPORT_H +#define _GAMEPORT_H + +#include +#include +#include +#include +#include +#include +#include +#include + +struct gameport { + + void *port_data; /* Private pointer for gameport drivers */ + char name[32]; + char phys[32]; + + int io; + int speed; + int fuzz; + + void (*trigger)(struct gameport *); + unsigned char (*read)(struct gameport *); + int (*cooked_read)(struct gameport *, int *, int *); + int (*calibrate)(struct gameport *, int *, int *); + int (*open)(struct gameport *, int); + void (*close)(struct gameport *); + + struct timer_list poll_timer; + unsigned int poll_interval; /* in msecs */ + spinlock_t timer_lock; + unsigned int poll_cnt; + void (*poll_handler)(struct gameport *); + + struct gameport *parent, *child; + + struct gameport_driver *drv; + struct mutex drv_mutex; /* protects serio->drv so attributes can pin driver */ + + struct device dev; + + struct list_head node; +}; +#define to_gameport_port(d) container_of(d, struct gameport, dev) + +struct gameport_driver { + const char *description; + + int (*connect)(struct gameport *, struct gameport_driver *drv); + int (*reconnect)(struct gameport *); + void (*disconnect)(struct gameport *); + + struct device_driver driver; + + bool ignore; +}; +#define to_gameport_driver(d) container_of(d, struct gameport_driver, driver) + +int gameport_open(struct gameport *gameport, struct gameport_driver *drv, int mode); +void gameport_close(struct gameport *gameport); + +#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE)) + +void __gameport_register_port(struct gameport *gameport, struct module *owner); +/* use a define to avoid include chaining to get THIS_MODULE */ +#define gameport_register_port(gameport) \ + __gameport_register_port(gameport, THIS_MODULE) + +void gameport_unregister_port(struct gameport *gameport); + +__printf(2, 3) +void gameport_set_phys(struct gameport *gameport, const char *fmt, ...); + +#else + +static inline void gameport_register_port(struct gameport *gameport) +{ + return; +} + +static inline void gameport_unregister_port(struct gameport *gameport) +{ + return; +} + +static inline __printf(2, 3) +void gameport_set_phys(struct gameport *gameport, const char *fmt, ...) +{ + return; +} + +#endif + +static inline struct gameport *gameport_allocate_port(void) +{ + struct gameport *gameport = kzalloc(sizeof(struct gameport), GFP_KERNEL); + + return gameport; +} + +static inline void gameport_free_port(struct gameport *gameport) +{ + kfree(gameport); +} + +static inline void gameport_set_name(struct gameport *gameport, const char *name) +{ + strlcpy(gameport->name, name, sizeof(gameport->name)); +} + +/* + * Use the following functions to manipulate gameport's per-port + * driver-specific data. + */ +static inline void *gameport_get_drvdata(struct gameport *gameport) +{ + return dev_get_drvdata(&gameport->dev); +} + +static inline void gameport_set_drvdata(struct gameport *gameport, void *data) +{ + dev_set_drvdata(&gameport->dev, data); +} + +/* + * Use the following functions to pin gameport's driver in process context + */ +static inline int gameport_pin_driver(struct gameport *gameport) +{ + return mutex_lock_interruptible(&gameport->drv_mutex); +} + +static inline void gameport_unpin_driver(struct gameport *gameport) +{ + mutex_unlock(&gameport->drv_mutex); +} + +int __must_check __gameport_register_driver(struct gameport_driver *drv, + struct module *owner, const char *mod_name); + +/* use a define to avoid include chaining to get THIS_MODULE & friends */ +#define gameport_register_driver(drv) \ + __gameport_register_driver(drv, THIS_MODULE, KBUILD_MODNAME) + +void gameport_unregister_driver(struct gameport_driver *drv); + +/** + * module_gameport_driver() - Helper macro for registering a gameport driver + * @__gameport_driver: gameport_driver struct + * + * Helper macro for gameport drivers which do not do anything special in + * module init/exit. This eliminates a lot of boilerplate. Each module may + * only use this macro once, and calling it replaces module_init() and + * module_exit(). + */ +#define module_gameport_driver(__gameport_driver) \ + module_driver(__gameport_driver, gameport_register_driver, \ + gameport_unregister_driver) + + +static inline void gameport_trigger(struct gameport *gameport) +{ + if (gameport->trigger) + gameport->trigger(gameport); + else + outb(0xff, gameport->io); +} + +static inline unsigned char gameport_read(struct gameport *gameport) +{ + if (gameport->read) + return gameport->read(gameport); + else + return inb(gameport->io); +} + +static inline int gameport_cooked_read(struct gameport *gameport, int *axes, int *buttons) +{ + if (gameport->cooked_read) + return gameport->cooked_read(gameport, axes, buttons); + else + return -1; +} + +static inline int gameport_calibrate(struct gameport *gameport, int *axes, int *max) +{ + if (gameport->calibrate) + return gameport->calibrate(gameport, axes, max); + else + return -1; +} + +static inline int gameport_time(struct gameport *gameport, int time) +{ + return (time * gameport->speed) / 1000; +} + +static inline void gameport_set_poll_handler(struct gameport *gameport, void (*handler)(struct gameport *)) +{ + gameport->poll_handler = handler; +} + +static inline void gameport_set_poll_interval(struct gameport *gameport, unsigned int msecs) +{ + gameport->poll_interval = msecs; +} + +void gameport_start_polling(struct gameport *gameport); +void gameport_stop_polling(struct gameport *gameport); + +#endif diff --git a/include/linux/gcd.h b/include/linux/gcd.h new file mode 100644 index 000000000..cb572677f --- /dev/null +++ b/include/linux/gcd.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _GCD_H +#define _GCD_H + +#include + +unsigned long gcd(unsigned long a, unsigned long b) __attribute_const__; + +#endif /* _GCD_H */ diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h new file mode 100644 index 000000000..dd0a45237 --- /dev/null +++ b/include/linux/genalloc.h @@ -0,0 +1,172 @@ +/* + * Basic general purpose allocator for managing special purpose + * memory, for example, memory that is not managed by the regular + * kmalloc/kfree interface. Uses for this includes on-device special + * memory, uncached memory etc. + * + * It is safe to use the allocator in NMI handlers and other special + * unblockable contexts that could otherwise deadlock on locks. This + * is implemented by using atomic operations and retries on any + * conflicts. The disadvantage is that there may be livelocks in + * extreme cases. For better scalability, one allocator can be used + * for each CPU. + * + * The lockless operation only works if there is enough memory + * available. If new memory is added to the pool a lock has to be + * still taken. So any user relying on locklessness has to ensure + * that sufficient memory is preallocated. + * + * The basic atomic operation of this allocator is cmpxchg on long. + * On architectures that don't have NMI-safe cmpxchg implementation, + * the allocator can NOT be used in NMI handler. So code uses the + * allocator in NMI handler should depend on + * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + + +#ifndef __GENALLOC_H__ +#define __GENALLOC_H__ + +#include +#include +#include + +struct device; +struct device_node; +struct gen_pool; + +/** + * typedef genpool_algo_t: Allocation callback function type definition + * @map: Pointer to bitmap + * @size: The bitmap size in bits + * @start: The bitnumber to start searching at + * @nr: The number of zeroed bits we're looking for + * @data: optional additional data used by the callback + * @pool: the pool being allocated from + */ +typedef unsigned long (*genpool_algo_t)(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr, + void *data, struct gen_pool *pool, + unsigned long start_addr); + +/* + * General purpose special memory pool descriptor. + */ +struct gen_pool { + spinlock_t lock; + struct list_head chunks; /* list of chunks in this pool */ + int min_alloc_order; /* minimum allocation order */ + + genpool_algo_t algo; /* allocation function */ + void *data; + + const char *name; +}; + +/* + * General purpose special memory pool chunk descriptor. + */ +struct gen_pool_chunk { + struct list_head next_chunk; /* next chunk in pool */ + atomic_long_t avail; + phys_addr_t phys_addr; /* physical starting address of memory chunk */ + unsigned long start_addr; /* start address of memory chunk */ + unsigned long end_addr; /* end address of memory chunk (inclusive) */ + unsigned long bits[0]; /* bitmap for allocating memory chunk */ +}; + +/* + * gen_pool data descriptor for gen_pool_first_fit_align. + */ +struct genpool_data_align { + int align; /* alignment by bytes for starting address */ +}; + +/* + * gen_pool data descriptor for gen_pool_fixed_alloc. + */ +struct genpool_data_fixed { + unsigned long offset; /* The offset of the specific region */ +}; + +extern struct gen_pool *gen_pool_create(int, int); +extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long); +extern int gen_pool_add_virt(struct gen_pool *, unsigned long, phys_addr_t, + size_t, int); +/** + * gen_pool_add - add a new chunk of special memory to the pool + * @pool: pool to add new memory chunk to + * @addr: starting address of memory chunk to add to pool + * @size: size in bytes of the memory chunk to add to pool + * @nid: node id of the node the chunk structure and bitmap should be + * allocated on, or -1 + * + * Add a new chunk of special memory to the specified pool. + * + * Returns 0 on success or a -ve errno on failure. + */ +static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr, + size_t size, int nid) +{ + return gen_pool_add_virt(pool, addr, -1, size, nid); +} +extern void gen_pool_destroy(struct gen_pool *); +extern unsigned long gen_pool_alloc(struct gen_pool *, size_t); +extern unsigned long gen_pool_alloc_algo(struct gen_pool *, size_t, + genpool_algo_t algo, void *data); +extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, + dma_addr_t *dma); +extern void gen_pool_free(struct gen_pool *, unsigned long, size_t); +extern void gen_pool_for_each_chunk(struct gen_pool *, + void (*)(struct gen_pool *, struct gen_pool_chunk *, void *), void *); +extern size_t gen_pool_avail(struct gen_pool *); +extern size_t gen_pool_size(struct gen_pool *); + +extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, + void *data); + +extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, + unsigned long start, unsigned int nr, void *data, + struct gen_pool *pool, unsigned long start_addr); + +extern unsigned long gen_pool_fixed_alloc(unsigned long *map, + unsigned long size, unsigned long start, unsigned int nr, + void *data, struct gen_pool *pool, unsigned long start_addr); + +extern unsigned long gen_pool_first_fit_align(unsigned long *map, + unsigned long size, unsigned long start, unsigned int nr, + void *data, struct gen_pool *pool, unsigned long start_addr); + + +extern unsigned long gen_pool_first_fit_order_align(unsigned long *map, + unsigned long size, unsigned long start, unsigned int nr, + void *data, struct gen_pool *pool, unsigned long start_addr); + +extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, + unsigned long start, unsigned int nr, void *data, + struct gen_pool *pool, unsigned long start_addr); + + +extern struct gen_pool *devm_gen_pool_create(struct device *dev, + int min_alloc_order, int nid, const char *name); +extern struct gen_pool *gen_pool_get(struct device *dev, const char *name); + +bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, + size_t size); + +#ifdef CONFIG_OF +extern struct gen_pool *of_gen_pool_get(struct device_node *np, + const char *propname, int index); +#else +static inline struct gen_pool *of_gen_pool_get(struct device_node *np, + const char *propname, int index) +{ + return NULL; +} +#endif +#endif /* __GENALLOC_H__ */ diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h new file mode 100644 index 000000000..bc738504a --- /dev/null +++ b/include/linux/genetlink.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_GENERIC_NETLINK_H +#define __LINUX_GENERIC_NETLINK_H + +#include + + +/* All generic netlink requests are serialized by a global lock. */ +extern void genl_lock(void); +extern void genl_unlock(void); +#ifdef CONFIG_LOCKDEP +extern bool lockdep_genl_is_held(void); +#endif + +/* for synchronisation between af_netlink and genetlink */ +extern atomic_t genl_sk_destructing_cnt; +extern wait_queue_head_t genl_sk_destructing_waitq; + +/** + * rcu_dereference_genl - rcu_dereference with debug checking + * @p: The pointer to read, prior to dereferencing + * + * Do an rcu_dereference(p), but check caller either holds rcu_read_lock() + * or genl mutex. Note : Please prefer genl_dereference() or rcu_dereference() + */ +#define rcu_dereference_genl(p) \ + rcu_dereference_check(p, lockdep_genl_is_held()) + +/** + * genl_dereference - fetch RCU pointer when updates are prevented by genl mutex + * @p: The pointer to read, prior to dereferencing + * + * Return the value of the specified RCU-protected pointer, but omit + * the READ_ONCE(), because caller holds genl mutex. + */ +#define genl_dereference(p) \ + rcu_dereference_protected(p, lockdep_genl_is_held()) + +#define MODULE_ALIAS_GENL_FAMILY(family)\ + MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family) + +#endif /* __LINUX_GENERIC_NETLINK_H */ diff --git a/include/linux/genhd.h b/include/linux/genhd.h new file mode 100644 index 000000000..a488098f8 --- /dev/null +++ b/include/linux/genhd.h @@ -0,0 +1,766 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_GENHD_H +#define _LINUX_GENHD_H + +/* + * genhd.h Copyright (C) 1992 Drew Eckhardt + * Generic hard disk header file by + * Drew Eckhardt + * + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_BLOCK + +#define dev_to_disk(device) container_of((device), struct gendisk, part0.__dev) +#define dev_to_part(device) container_of((device), struct hd_struct, __dev) +#define disk_to_dev(disk) (&(disk)->part0.__dev) +#define part_to_dev(part) (&((part)->__dev)) + +extern struct device_type part_type; +extern struct kobject *block_depr; +extern struct class block_class; + +enum { +/* These three have identical behaviour; use the second one if DOS FDISK gets + confused about extended/logical partitions starting past cylinder 1023. */ + DOS_EXTENDED_PARTITION = 5, + LINUX_EXTENDED_PARTITION = 0x85, + WIN98_EXTENDED_PARTITION = 0x0f, + + SUN_WHOLE_DISK = DOS_EXTENDED_PARTITION, + + LINUX_SWAP_PARTITION = 0x82, + LINUX_DATA_PARTITION = 0x83, + LINUX_LVM_PARTITION = 0x8e, + LINUX_RAID_PARTITION = 0xfd, /* autodetect RAID partition */ + + SOLARIS_X86_PARTITION = LINUX_SWAP_PARTITION, + NEW_SOLARIS_X86_PARTITION = 0xbf, + + DM6_AUX1PARTITION = 0x51, /* no DDO: use xlated geom */ + DM6_AUX3PARTITION = 0x53, /* no DDO: use xlated geom */ + DM6_PARTITION = 0x54, /* has DDO: use xlated geom & offset */ + EZD_PARTITION = 0x55, /* EZ-DRIVE */ + + FREEBSD_PARTITION = 0xa5, /* FreeBSD Partition ID */ + OPENBSD_PARTITION = 0xa6, /* OpenBSD Partition ID */ + NETBSD_PARTITION = 0xa9, /* NetBSD Partition ID */ + BSDI_PARTITION = 0xb7, /* BSDI Partition ID */ + MINIX_PARTITION = 0x81, /* Minix Partition ID */ + UNIXWARE_PARTITION = 0x63, /* Same as GNU_HURD and SCO Unix */ +}; + +#define DISK_MAX_PARTS 256 +#define DISK_NAME_LEN 32 + +#include +#include +#include +#include +#include +#include + +struct partition { + unsigned char boot_ind; /* 0x80 - active */ + unsigned char head; /* starting head */ + unsigned char sector; /* starting sector */ + unsigned char cyl; /* starting cylinder */ + unsigned char sys_ind; /* What partition type */ + unsigned char end_head; /* end head */ + unsigned char end_sector; /* end sector */ + unsigned char end_cyl; /* end cylinder */ + __le32 start_sect; /* starting sector counting from 0 */ + __le32 nr_sects; /* nr of sectors in partition */ +} __attribute__((packed)); + +struct disk_stats { + u64 nsecs[NR_STAT_GROUPS]; + unsigned long sectors[NR_STAT_GROUPS]; + unsigned long ios[NR_STAT_GROUPS]; + unsigned long merges[NR_STAT_GROUPS]; + unsigned long io_ticks; + unsigned long time_in_queue; +}; + +#define PARTITION_META_INFO_VOLNAMELTH 64 +/* + * Enough for the string representation of any kind of UUID plus NULL. + * EFI UUID is 36 characters. MSDOS UUID is 11 characters. + */ +#define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1) + +struct partition_meta_info { + char uuid[PARTITION_META_INFO_UUIDLTH]; + u8 volname[PARTITION_META_INFO_VOLNAMELTH]; +}; + +struct hd_struct { + sector_t start_sect; + /* + * nr_sects is protected by sequence counter. One might extend a + * partition while IO is happening to it and update of nr_sects + * can be non-atomic on 32bit machines with 64bit sector_t. + */ + sector_t nr_sects; + seqcount_t nr_sects_seq; + sector_t alignment_offset; + unsigned int discard_alignment; + struct device __dev; + struct kobject *holder_dir; + int policy, partno; + struct partition_meta_info *info; +#ifdef CONFIG_FAIL_MAKE_REQUEST + int make_it_fail; +#endif + unsigned long stamp; + atomic_t in_flight[2]; +#ifdef CONFIG_SMP + struct disk_stats __percpu *dkstats; +#else + struct disk_stats dkstats; +#endif + struct percpu_ref ref; + struct rcu_work rcu_work; +}; + +#define GENHD_FL_REMOVABLE 1 +/* 2 is unused */ +#define GENHD_FL_MEDIA_CHANGE_NOTIFY 4 +#define GENHD_FL_CD 8 +#define GENHD_FL_UP 16 +#define GENHD_FL_SUPPRESS_PARTITION_INFO 32 +#define GENHD_FL_EXT_DEVT 64 /* allow extended devt */ +#define GENHD_FL_NATIVE_CAPACITY 128 +#define GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE 256 +#define GENHD_FL_NO_PART_SCAN 512 +#define GENHD_FL_HIDDEN 1024 + +enum { + DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */ + DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */ +}; + +struct disk_part_tbl { + struct rcu_head rcu_head; + int len; + struct hd_struct __rcu *last_lookup; + struct hd_struct __rcu *part[]; +}; + +struct disk_events; +struct badblocks; + +#if defined(CONFIG_BLK_DEV_INTEGRITY) + +struct blk_integrity { + const struct blk_integrity_profile *profile; + unsigned char flags; + unsigned char tuple_size; + unsigned char interval_exp; + unsigned char tag_size; +}; + +#endif /* CONFIG_BLK_DEV_INTEGRITY */ + +struct gendisk { + /* major, first_minor and minors are input parameters only, + * don't use directly. Use disk_devt() and disk_max_parts(). + */ + int major; /* major number of driver */ + int first_minor; + int minors; /* maximum number of minors, =1 for + * disks that can't be partitioned. */ + + char disk_name[DISK_NAME_LEN]; /* name of major driver */ + char *(*devnode)(struct gendisk *gd, umode_t *mode); + + unsigned int events; /* supported events */ + unsigned int async_events; /* async events, subset of all */ + + /* Array of pointers to partitions indexed by partno. + * Protected with matching bdev lock but stat and other + * non-critical accesses use RCU. Always access through + * helpers. + */ + struct disk_part_tbl __rcu *part_tbl; + struct hd_struct part0; + + const struct block_device_operations *fops; + struct request_queue *queue; + void *private_data; + + int flags; + struct rw_semaphore lookup_sem; + struct kobject *slave_dir; + + struct timer_rand_state *random; + atomic_t sync_io; /* RAID */ + struct disk_events *ev; +#ifdef CONFIG_BLK_DEV_INTEGRITY + struct kobject integrity_kobj; +#endif /* CONFIG_BLK_DEV_INTEGRITY */ + int node_id; + struct badblocks *bb; + struct lockdep_map lockdep_map; +}; + +static inline struct gendisk *part_to_disk(struct hd_struct *part) +{ + if (likely(part)) { + if (part->partno) + return dev_to_disk(part_to_dev(part)->parent); + else + return dev_to_disk(part_to_dev(part)); + } + return NULL; +} + +static inline int disk_max_parts(struct gendisk *disk) +{ + if (disk->flags & GENHD_FL_EXT_DEVT) + return DISK_MAX_PARTS; + return disk->minors; +} + +static inline bool disk_part_scan_enabled(struct gendisk *disk) +{ + return disk_max_parts(disk) > 1 && + !(disk->flags & GENHD_FL_NO_PART_SCAN); +} + +static inline dev_t disk_devt(struct gendisk *disk) +{ + return MKDEV(disk->major, disk->first_minor); +} + +static inline dev_t part_devt(struct hd_struct *part) +{ + return part_to_dev(part)->devt; +} + +extern struct hd_struct *__disk_get_part(struct gendisk *disk, int partno); +extern struct hd_struct *disk_get_part(struct gendisk *disk, int partno); + +static inline void disk_put_part(struct hd_struct *part) +{ + if (likely(part)) + put_device(part_to_dev(part)); +} + +/* + * Smarter partition iterator without context limits. + */ +#define DISK_PITER_REVERSE (1 << 0) /* iterate in the reverse direction */ +#define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */ +#define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */ +#define DISK_PITER_INCL_EMPTY_PART0 (1 << 3) /* include empty partition 0 */ + +struct disk_part_iter { + struct gendisk *disk; + struct hd_struct *part; + int idx; + unsigned int flags; +}; + +extern void disk_part_iter_init(struct disk_part_iter *piter, + struct gendisk *disk, unsigned int flags); +extern struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter); +extern void disk_part_iter_exit(struct disk_part_iter *piter); + +extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, + sector_t sector); + +/* + * Macros to operate on percpu disk statistics: + * + * {disk|part|all}_stat_{add|sub|inc|dec}() modify the stat counters + * and should be called between disk_stat_lock() and + * disk_stat_unlock(). + * + * part_stat_read() can be called at any time. + * + * part_stat_{add|set_all}() and {init|free}_part_stats are for + * internal use only. + */ +#ifdef CONFIG_SMP +#define part_stat_lock() ({ rcu_read_lock(); get_cpu(); }) +#define part_stat_unlock() do { put_cpu(); rcu_read_unlock(); } while (0) + +#define __part_stat_add(cpu, part, field, addnd) \ + (per_cpu_ptr((part)->dkstats, (cpu))->field += (addnd)) + +#define part_stat_read(part, field) \ +({ \ + typeof((part)->dkstats->field) res = 0; \ + unsigned int _cpu; \ + for_each_possible_cpu(_cpu) \ + res += per_cpu_ptr((part)->dkstats, _cpu)->field; \ + res; \ +}) + +static inline void part_stat_set_all(struct hd_struct *part, int value) +{ + int i; + + for_each_possible_cpu(i) + memset(per_cpu_ptr(part->dkstats, i), value, + sizeof(struct disk_stats)); +} + +static inline int init_part_stats(struct hd_struct *part) +{ + part->dkstats = alloc_percpu(struct disk_stats); + if (!part->dkstats) + return 0; + return 1; +} + +static inline void free_part_stats(struct hd_struct *part) +{ + free_percpu(part->dkstats); +} + +#else /* !CONFIG_SMP */ +#define part_stat_lock() ({ rcu_read_lock(); 0; }) +#define part_stat_unlock() rcu_read_unlock() + +#define __part_stat_add(cpu, part, field, addnd) \ + ((part)->dkstats.field += addnd) + +#define part_stat_read(part, field) ((part)->dkstats.field) + +static inline void part_stat_set_all(struct hd_struct *part, int value) +{ + memset(&part->dkstats, value, sizeof(struct disk_stats)); +} + +static inline int init_part_stats(struct hd_struct *part) +{ + return 1; +} + +static inline void free_part_stats(struct hd_struct *part) +{ +} + +#endif /* CONFIG_SMP */ + +#define part_stat_read_msecs(part, which) \ + div_u64(part_stat_read(part, nsecs[which]), NSEC_PER_MSEC) + +#define part_stat_read_accum(part, field) \ + (part_stat_read(part, field[STAT_READ]) + \ + part_stat_read(part, field[STAT_WRITE]) + \ + part_stat_read(part, field[STAT_DISCARD])) + +#define part_stat_add(cpu, part, field, addnd) do { \ + __part_stat_add((cpu), (part), field, addnd); \ + if ((part)->partno) \ + __part_stat_add((cpu), &part_to_disk((part))->part0, \ + field, addnd); \ +} while (0) + +#define part_stat_dec(cpu, gendiskp, field) \ + part_stat_add(cpu, gendiskp, field, -1) +#define part_stat_inc(cpu, gendiskp, field) \ + part_stat_add(cpu, gendiskp, field, 1) +#define part_stat_sub(cpu, gendiskp, field, subnd) \ + part_stat_add(cpu, gendiskp, field, -subnd) + +void part_in_flight(struct request_queue *q, struct hd_struct *part, + unsigned int inflight[2]); +void part_in_flight_rw(struct request_queue *q, struct hd_struct *part, + unsigned int inflight[2]); +void part_dec_in_flight(struct request_queue *q, struct hd_struct *part, + int rw); +void part_inc_in_flight(struct request_queue *q, struct hd_struct *part, + int rw); + +static inline struct partition_meta_info *alloc_part_info(struct gendisk *disk) +{ + if (disk) + return kzalloc_node(sizeof(struct partition_meta_info), + GFP_KERNEL, disk->node_id); + return kzalloc(sizeof(struct partition_meta_info), GFP_KERNEL); +} + +static inline void free_part_info(struct hd_struct *part) +{ + kfree(part->info); +} + +/* block/blk-core.c */ +extern void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part); + +/* block/genhd.c */ +extern void device_add_disk(struct device *parent, struct gendisk *disk, + const struct attribute_group **groups); +static inline void add_disk(struct gendisk *disk) +{ + device_add_disk(NULL, disk, NULL); +} +extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk); +static inline void add_disk_no_queue_reg(struct gendisk *disk) +{ + device_add_disk_no_queue_reg(NULL, disk); +} + +extern void del_gendisk(struct gendisk *gp); +extern struct gendisk *get_gendisk(dev_t dev, int *partno); +extern struct block_device *bdget_disk(struct gendisk *disk, int partno); + +extern void set_device_ro(struct block_device *bdev, int flag); +extern void set_disk_ro(struct gendisk *disk, int flag); + +static inline int get_disk_ro(struct gendisk *disk) +{ + return disk->part0.policy; +} + +extern void disk_block_events(struct gendisk *disk); +extern void disk_unblock_events(struct gendisk *disk); +extern void disk_flush_events(struct gendisk *disk, unsigned int mask); +extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask); + +/* drivers/char/random.c */ +extern void add_disk_randomness(struct gendisk *disk) __latent_entropy; +extern void rand_initialize_disk(struct gendisk *disk); + +static inline sector_t get_start_sect(struct block_device *bdev) +{ + return bdev->bd_part->start_sect; +} +static inline sector_t get_capacity(struct gendisk *disk) +{ + return disk->part0.nr_sects; +} +static inline void set_capacity(struct gendisk *disk, sector_t size) +{ + disk->part0.nr_sects = size; +} + +#ifdef CONFIG_SOLARIS_X86_PARTITION + +#define SOLARIS_X86_NUMSLICE 16 +#define SOLARIS_X86_VTOC_SANE (0x600DDEEEUL) + +struct solaris_x86_slice { + __le16 s_tag; /* ID tag of partition */ + __le16 s_flag; /* permission flags */ + __le32 s_start; /* start sector no of partition */ + __le32 s_size; /* # of blocks in partition */ +}; + +struct solaris_x86_vtoc { + unsigned int v_bootinfo[3]; /* info needed by mboot (unsupported) */ + __le32 v_sanity; /* to verify vtoc sanity */ + __le32 v_version; /* layout version */ + char v_volume[8]; /* volume name */ + __le16 v_sectorsz; /* sector size in bytes */ + __le16 v_nparts; /* number of partitions */ + unsigned int v_reserved[10]; /* free space */ + struct solaris_x86_slice + v_slice[SOLARIS_X86_NUMSLICE]; /* slice headers */ + unsigned int timestamp[SOLARIS_X86_NUMSLICE]; /* timestamp (unsupported) */ + char v_asciilabel[128]; /* for compatibility */ +}; + +#endif /* CONFIG_SOLARIS_X86_PARTITION */ + +#ifdef CONFIG_BSD_DISKLABEL +/* + * BSD disklabel support by Yossi Gottlieb + * updated by Marc Espie + */ + +/* check against BSD src/sys/sys/disklabel.h for consistency */ + +#define BSD_DISKMAGIC (0x82564557UL) /* The disk magic number */ +#define BSD_MAXPARTITIONS 16 +#define OPENBSD_MAXPARTITIONS 16 +#define BSD_FS_UNUSED 0 /* disklabel unused partition entry ID */ +struct bsd_disklabel { + __le32 d_magic; /* the magic number */ + __s16 d_type; /* drive type */ + __s16 d_subtype; /* controller/d_type specific */ + char d_typename[16]; /* type name, e.g. "eagle" */ + char d_packname[16]; /* pack identifier */ + __u32 d_secsize; /* # of bytes per sector */ + __u32 d_nsectors; /* # of data sectors per track */ + __u32 d_ntracks; /* # of tracks per cylinder */ + __u32 d_ncylinders; /* # of data cylinders per unit */ + __u32 d_secpercyl; /* # of data sectors per cylinder */ + __u32 d_secperunit; /* # of data sectors per unit */ + __u16 d_sparespertrack; /* # of spare sectors per track */ + __u16 d_sparespercyl; /* # of spare sectors per cylinder */ + __u32 d_acylinders; /* # of alt. cylinders per unit */ + __u16 d_rpm; /* rotational speed */ + __u16 d_interleave; /* hardware sector interleave */ + __u16 d_trackskew; /* sector 0 skew, per track */ + __u16 d_cylskew; /* sector 0 skew, per cylinder */ + __u32 d_headswitch; /* head switch time, usec */ + __u32 d_trkseek; /* track-to-track seek, usec */ + __u32 d_flags; /* generic flags */ +#define NDDATA 5 + __u32 d_drivedata[NDDATA]; /* drive-type specific information */ +#define NSPARE 5 + __u32 d_spare[NSPARE]; /* reserved for future use */ + __le32 d_magic2; /* the magic number (again) */ + __le16 d_checksum; /* xor of data incl. partitions */ + + /* filesystem and partition information: */ + __le16 d_npartitions; /* number of partitions in following */ + __le32 d_bbsize; /* size of boot area at sn0, bytes */ + __le32 d_sbsize; /* max size of fs superblock, bytes */ + struct bsd_partition { /* the partition table */ + __le32 p_size; /* number of sectors in partition */ + __le32 p_offset; /* starting sector */ + __le32 p_fsize; /* filesystem basic fragment size */ + __u8 p_fstype; /* filesystem type, see below */ + __u8 p_frag; /* filesystem fragments per block */ + __le16 p_cpg; /* filesystem cylinders per group */ + } d_partitions[BSD_MAXPARTITIONS]; /* actually may be more */ +}; + +#endif /* CONFIG_BSD_DISKLABEL */ + +#ifdef CONFIG_UNIXWARE_DISKLABEL +/* + * Unixware slices support by Andrzej Krzysztofowicz + * and Krzysztof G. Baranowski + */ + +#define UNIXWARE_DISKMAGIC (0xCA5E600DUL) /* The disk magic number */ +#define UNIXWARE_DISKMAGIC2 (0x600DDEEEUL) /* The slice table magic nr */ +#define UNIXWARE_NUMSLICE 16 +#define UNIXWARE_FS_UNUSED 0 /* Unused slice entry ID */ + +struct unixware_slice { + __le16 s_label; /* label */ + __le16 s_flags; /* permission flags */ + __le32 start_sect; /* starting sector */ + __le32 nr_sects; /* number of sectors in slice */ +}; + +struct unixware_disklabel { + __le32 d_type; /* drive type */ + __le32 d_magic; /* the magic number */ + __le32 d_version; /* version number */ + char d_serial[12]; /* serial number of the device */ + __le32 d_ncylinders; /* # of data cylinders per device */ + __le32 d_ntracks; /* # of tracks per cylinder */ + __le32 d_nsectors; /* # of data sectors per track */ + __le32 d_secsize; /* # of bytes per sector */ + __le32 d_part_start; /* # of first sector of this partition */ + __le32 d_unknown1[12]; /* ? */ + __le32 d_alt_tbl; /* byte offset of alternate table */ + __le32 d_alt_len; /* byte length of alternate table */ + __le32 d_phys_cyl; /* # of physical cylinders per device */ + __le32 d_phys_trk; /* # of physical tracks per cylinder */ + __le32 d_phys_sec; /* # of physical sectors per track */ + __le32 d_phys_bytes; /* # of physical bytes per sector */ + __le32 d_unknown2; /* ? */ + __le32 d_unknown3; /* ? */ + __le32 d_pad[8]; /* pad */ + + struct unixware_vtoc { + __le32 v_magic; /* the magic number */ + __le32 v_version; /* version number */ + char v_name[8]; /* volume name */ + __le16 v_nslices; /* # of slices */ + __le16 v_unknown1; /* ? */ + __le32 v_reserved[10]; /* reserved */ + struct unixware_slice + v_slice[UNIXWARE_NUMSLICE]; /* slice headers */ + } vtoc; + +}; /* 408 */ + +#endif /* CONFIG_UNIXWARE_DISKLABEL */ + +#ifdef CONFIG_MINIX_SUBPARTITION +# define MINIX_NR_SUBPARTITIONS 4 +#endif /* CONFIG_MINIX_SUBPARTITION */ + +#define ADDPART_FLAG_NONE 0 +#define ADDPART_FLAG_RAID 1 +#define ADDPART_FLAG_WHOLEDISK 2 + +extern int blk_alloc_devt(struct hd_struct *part, dev_t *devt); +extern void blk_free_devt(dev_t devt); +extern void blk_invalidate_devt(dev_t devt); +extern dev_t blk_lookup_devt(const char *name, int partno); +extern char *disk_name (struct gendisk *hd, int partno, char *buf); + +extern int disk_expand_part_tbl(struct gendisk *disk, int target); +extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev); +extern int invalidate_partitions(struct gendisk *disk, struct block_device *bdev); +extern struct hd_struct * __must_check add_partition(struct gendisk *disk, + int partno, sector_t start, + sector_t len, int flags, + struct partition_meta_info + *info); +extern void __delete_partition(struct percpu_ref *); +extern void delete_partition(struct gendisk *, int); +extern void printk_all_partitions(void); + +extern struct gendisk *__alloc_disk_node(int minors, int node_id); +extern struct kobject *get_disk_and_module(struct gendisk *disk); +extern void put_disk(struct gendisk *disk); +extern void put_disk_and_module(struct gendisk *disk); +extern void blk_register_region(dev_t devt, unsigned long range, + struct module *module, + struct kobject *(*probe)(dev_t, int *, void *), + int (*lock)(dev_t, void *), + void *data); +extern void blk_unregister_region(dev_t devt, unsigned long range); + +extern ssize_t part_size_show(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t part_stat_show(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t part_inflight_show(struct device *dev, + struct device_attribute *attr, char *buf); +#ifdef CONFIG_FAIL_MAKE_REQUEST +extern ssize_t part_fail_show(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t part_fail_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); +#endif /* CONFIG_FAIL_MAKE_REQUEST */ + +#define alloc_disk_node(minors, node_id) \ +({ \ + static struct lock_class_key __key; \ + const char *__name; \ + struct gendisk *__disk; \ + \ + __name = "(gendisk_completion)"#minors"("#node_id")"; \ + \ + __disk = __alloc_disk_node(minors, node_id); \ + \ + if (__disk) \ + lockdep_init_map(&__disk->lockdep_map, __name, &__key, 0); \ + \ + __disk; \ +}) + +#define alloc_disk(minors) alloc_disk_node(minors, NUMA_NO_NODE) + +static inline int hd_ref_init(struct hd_struct *part) +{ + if (percpu_ref_init(&part->ref, __delete_partition, 0, + GFP_KERNEL)) + return -ENOMEM; + return 0; +} + +static inline void hd_struct_get(struct hd_struct *part) +{ + percpu_ref_get(&part->ref); +} + +static inline int hd_struct_try_get(struct hd_struct *part) +{ + return percpu_ref_tryget_live(&part->ref); +} + +static inline void hd_struct_put(struct hd_struct *part) +{ + percpu_ref_put(&part->ref); +} + +static inline void hd_struct_kill(struct hd_struct *part) +{ + percpu_ref_kill(&part->ref); +} + +static inline void hd_free_part(struct hd_struct *part) +{ + free_part_stats(part); + free_part_info(part); + percpu_ref_exit(&part->ref); +} + +/* + * Any access of part->nr_sects which is not protected by partition + * bd_mutex or gendisk bdev bd_mutex, should be done using this + * accessor function. + * + * Code written along the lines of i_size_read() and i_size_write(). + * CONFIG_PREEMPT case optimizes the case of UP kernel with preemption + * on. + */ +static inline sector_t part_nr_sects_read(struct hd_struct *part) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_SMP) + sector_t nr_sects; + unsigned seq; + do { + seq = read_seqcount_begin(&part->nr_sects_seq); + nr_sects = part->nr_sects; + } while (read_seqcount_retry(&part->nr_sects_seq, seq)); + return nr_sects; +#elif BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_PREEMPT) + sector_t nr_sects; + + preempt_disable(); + nr_sects = part->nr_sects; + preempt_enable(); + return nr_sects; +#else + return part->nr_sects; +#endif +} + +/* + * Should be called with mutex lock held (typically bd_mutex) of partition + * to provide mutual exlusion among writers otherwise seqcount might be + * left in wrong state leaving the readers spinning infinitely. + */ +static inline void part_nr_sects_write(struct hd_struct *part, sector_t size) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_SMP) + preempt_disable(); + write_seqcount_begin(&part->nr_sects_seq); + part->nr_sects = size; + write_seqcount_end(&part->nr_sects_seq); + preempt_enable(); +#elif BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_PREEMPT) + preempt_disable(); + part->nr_sects = size; + preempt_enable(); +#else + part->nr_sects = size; +#endif +} + +#if defined(CONFIG_BLK_DEV_INTEGRITY) +extern void blk_integrity_add(struct gendisk *); +extern void blk_integrity_del(struct gendisk *); +#else /* CONFIG_BLK_DEV_INTEGRITY */ +static inline void blk_integrity_add(struct gendisk *disk) { } +static inline void blk_integrity_del(struct gendisk *disk) { } +#endif /* CONFIG_BLK_DEV_INTEGRITY */ + +#else /* CONFIG_BLOCK */ + +static inline void printk_all_partitions(void) { } + +static inline dev_t blk_lookup_devt(const char *name, int partno) +{ + dev_t devt = MKDEV(0, 0); + return devt; +} +#endif /* CONFIG_BLOCK */ + +#endif /* _LINUX_GENHD_H */ diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h new file mode 100644 index 000000000..83f81ac53 --- /dev/null +++ b/include/linux/genl_magic_func.h @@ -0,0 +1,407 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef GENL_MAGIC_FUNC_H +#define GENL_MAGIC_FUNC_H + +#include +#include + +/* + * Magic: declare tla policy {{{1 + * Magic: declare nested policies + * {{{2 + */ +#undef GENL_mc_group +#define GENL_mc_group(group) + +#undef GENL_notification +#define GENL_notification(op_name, op_num, mcast_group, tla_list) + +#undef GENL_op +#define GENL_op(op_name, op_num, handler, tla_list) + +#undef GENL_struct +#define GENL_struct(tag_name, tag_number, s_name, s_fields) \ + [tag_name] = { .type = NLA_NESTED }, + +static struct nla_policy CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy)[] = { +#include GENL_MAGIC_INCLUDE_FILE +}; + +#undef GENL_struct +#define GENL_struct(tag_name, tag_number, s_name, s_fields) \ +static struct nla_policy s_name ## _nl_policy[] __read_mostly = \ +{ s_fields }; + +#undef __field +#define __field(attr_nr, attr_flag, name, nla_type, _type, __get, \ + __put, __is_signed) \ + [attr_nr] = { .type = nla_type }, + +#undef __array +#define __array(attr_nr, attr_flag, name, nla_type, _type, maxlen, \ + __get, __put, __is_signed) \ + [attr_nr] = { .type = nla_type, \ + .len = maxlen - (nla_type == NLA_NUL_STRING) }, + +#include GENL_MAGIC_INCLUDE_FILE + +#ifndef __KERNEL__ +#ifndef pr_info +#define pr_info(args...) fprintf(stderr, args); +#endif +#endif + +#ifdef GENL_MAGIC_DEBUG +static void dprint_field(const char *dir, int nla_type, + const char *name, void *valp) +{ + __u64 val = valp ? *(__u32 *)valp : 1; + switch (nla_type) { + case NLA_U8: val = (__u8)val; + case NLA_U16: val = (__u16)val; + case NLA_U32: val = (__u32)val; + pr_info("%s attr %s: %d 0x%08x\n", dir, + name, (int)val, (unsigned)val); + break; + case NLA_U64: + val = *(__u64*)valp; + pr_info("%s attr %s: %lld 0x%08llx\n", dir, + name, (long long)val, (unsigned long long)val); + break; + case NLA_FLAG: + if (val) + pr_info("%s attr %s: set\n", dir, name); + break; + } +} + +static void dprint_array(const char *dir, int nla_type, + const char *name, const char *val, unsigned len) +{ + switch (nla_type) { + case NLA_NUL_STRING: + if (len && val[len-1] == '\0') + len--; + pr_info("%s attr %s: [len:%u] '%s'\n", dir, name, len, val); + break; + default: + /* we can always show 4 byte, + * thats what nlattr are aligned to. */ + pr_info("%s attr %s: [len:%u] %02x%02x%02x%02x ...\n", + dir, name, len, val[0], val[1], val[2], val[3]); + } +} + +#define DPRINT_TLA(a, op, b) pr_info("%s %s %s\n", a, op, b); + +/* Name is a member field name of the struct s. + * If s is NULL (only parsing, no copy requested in *_from_attrs()), + * nla is supposed to point to the attribute containing the information + * corresponding to that struct member. */ +#define DPRINT_FIELD(dir, nla_type, name, s, nla) \ + do { \ + if (s) \ + dprint_field(dir, nla_type, #name, &s->name); \ + else if (nla) \ + dprint_field(dir, nla_type, #name, \ + (nla_type == NLA_FLAG) ? NULL \ + : nla_data(nla)); \ + } while (0) + +#define DPRINT_ARRAY(dir, nla_type, name, s, nla) \ + do { \ + if (s) \ + dprint_array(dir, nla_type, #name, \ + s->name, s->name ## _len); \ + else if (nla) \ + dprint_array(dir, nla_type, #name, \ + nla_data(nla), nla_len(nla)); \ + } while (0) +#else +#define DPRINT_TLA(a, op, b) do {} while (0) +#define DPRINT_FIELD(dir, nla_type, name, s, nla) do {} while (0) +#define DPRINT_ARRAY(dir, nla_type, name, s, nla) do {} while (0) +#endif + +/* + * Magic: provide conversion functions {{{1 + * populate struct from attribute table: + * {{{2 + */ + +/* processing of generic netlink messages is serialized. + * use one static buffer for parsing of nested attributes */ +static struct nlattr *nested_attr_tb[128]; + +#undef GENL_struct +#define GENL_struct(tag_name, tag_number, s_name, s_fields) \ +/* *_from_attrs functions are static, but potentially unused */ \ +static int __ ## s_name ## _from_attrs(struct s_name *s, \ + struct genl_info *info, bool exclude_invariants) \ +{ \ + const int maxtype = ARRAY_SIZE(s_name ## _nl_policy)-1; \ + struct nlattr *tla = info->attrs[tag_number]; \ + struct nlattr **ntb = nested_attr_tb; \ + struct nlattr *nla; \ + int err; \ + BUILD_BUG_ON(ARRAY_SIZE(s_name ## _nl_policy) > ARRAY_SIZE(nested_attr_tb)); \ + if (!tla) \ + return -ENOMSG; \ + DPRINT_TLA(#s_name, "<=-", #tag_name); \ + err = drbd_nla_parse_nested(ntb, maxtype, tla, s_name ## _nl_policy); \ + if (err) \ + return err; \ + \ + s_fields \ + return 0; \ +} __attribute__((unused)) \ +static int s_name ## _from_attrs(struct s_name *s, \ + struct genl_info *info) \ +{ \ + return __ ## s_name ## _from_attrs(s, info, false); \ +} __attribute__((unused)) \ +static int s_name ## _from_attrs_for_change(struct s_name *s, \ + struct genl_info *info) \ +{ \ + return __ ## s_name ## _from_attrs(s, info, true); \ +} __attribute__((unused)) \ + +#define __assign(attr_nr, attr_flag, name, nla_type, type, assignment...) \ + nla = ntb[attr_nr]; \ + if (nla) { \ + if (exclude_invariants && !!((attr_flag) & DRBD_F_INVARIANT)) { \ + pr_info("<< must not change invariant attr: %s\n", #name); \ + return -EEXIST; \ + } \ + assignment; \ + } else if (exclude_invariants && !!((attr_flag) & DRBD_F_INVARIANT)) { \ + /* attribute missing from payload, */ \ + /* which was expected */ \ + } else if ((attr_flag) & DRBD_F_REQUIRED) { \ + pr_info("<< missing attr: %s\n", #name); \ + return -ENOMSG; \ + } + +#undef __field +#define __field(attr_nr, attr_flag, name, nla_type, type, __get, __put, \ + __is_signed) \ + __assign(attr_nr, attr_flag, name, nla_type, type, \ + if (s) \ + s->name = __get(nla); \ + DPRINT_FIELD("<<", nla_type, name, s, nla)) + +/* validate_nla() already checked nla_len <= maxlen appropriately. */ +#undef __array +#define __array(attr_nr, attr_flag, name, nla_type, type, maxlen, \ + __get, __put, __is_signed) \ + __assign(attr_nr, attr_flag, name, nla_type, type, \ + if (s) \ + s->name ## _len = \ + __get(s->name, nla, maxlen); \ + DPRINT_ARRAY("<<", nla_type, name, s, nla)) + +#include GENL_MAGIC_INCLUDE_FILE + +#undef GENL_struct +#define GENL_struct(tag_name, tag_number, s_name, s_fields) + +/* + * Magic: define op number to op name mapping {{{1 + * {{{2 + */ +const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd) +{ + switch (cmd) { +#undef GENL_op +#define GENL_op(op_name, op_num, handler, tla_list) \ + case op_num: return #op_name; +#include GENL_MAGIC_INCLUDE_FILE + default: + return "unknown"; + } +} + +#ifdef __KERNEL__ +#include +/* + * Magic: define genl_ops {{{1 + * {{{2 + */ + +#undef GENL_op +#define GENL_op(op_name, op_num, handler, tla_list) \ +{ \ + handler \ + .cmd = op_name, \ + .policy = CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy), \ +}, + +#define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops) +static struct genl_ops ZZZ_genl_ops[] __read_mostly = { +#include GENL_MAGIC_INCLUDE_FILE +}; + +#undef GENL_op +#define GENL_op(op_name, op_num, handler, tla_list) + +/* + * Define the genl_family, multicast groups, {{{1 + * and provide register/unregister functions. + * {{{2 + */ +#define ZZZ_genl_family CONCAT_(GENL_MAGIC_FAMILY, _genl_family) +static struct genl_family ZZZ_genl_family; +/* + * Magic: define multicast groups + * Magic: define multicast group registration helper + */ +#define ZZZ_genl_mcgrps CONCAT_(GENL_MAGIC_FAMILY, _genl_mcgrps) +static const struct genl_multicast_group ZZZ_genl_mcgrps[] = { +#undef GENL_mc_group +#define GENL_mc_group(group) { .name = #group, }, +#include GENL_MAGIC_INCLUDE_FILE +}; + +enum CONCAT_(GENL_MAGIC_FAMILY, group_ids) { +#undef GENL_mc_group +#define GENL_mc_group(group) CONCAT_(GENL_MAGIC_FAMILY, _group_ ## group), +#include GENL_MAGIC_INCLUDE_FILE +}; + +#undef GENL_mc_group +#define GENL_mc_group(group) \ +static int CONCAT_(GENL_MAGIC_FAMILY, _genl_multicast_ ## group)( \ + struct sk_buff *skb, gfp_t flags) \ +{ \ + unsigned int group_id = \ + CONCAT_(GENL_MAGIC_FAMILY, _group_ ## group); \ + return genlmsg_multicast(&ZZZ_genl_family, skb, 0, \ + group_id, flags); \ +} + +#include GENL_MAGIC_INCLUDE_FILE + +#undef GENL_mc_group +#define GENL_mc_group(group) + +static struct genl_family ZZZ_genl_family __ro_after_init = { + .name = __stringify(GENL_MAGIC_FAMILY), + .version = GENL_MAGIC_VERSION, +#ifdef GENL_MAGIC_FAMILY_HDRSZ + .hdrsize = NLA_ALIGN(GENL_MAGIC_FAMILY_HDRSZ), +#endif + .maxattr = ARRAY_SIZE(drbd_tla_nl_policy)-1, + .ops = ZZZ_genl_ops, + .n_ops = ARRAY_SIZE(ZZZ_genl_ops), + .mcgrps = ZZZ_genl_mcgrps, + .n_mcgrps = ARRAY_SIZE(ZZZ_genl_mcgrps), + .module = THIS_MODULE, +}; + +int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void) +{ + return genl_register_family(&ZZZ_genl_family); +} + +void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void) +{ + genl_unregister_family(&ZZZ_genl_family); +} + +/* + * Magic: provide conversion functions {{{1 + * populate skb from struct. + * {{{2 + */ + +#undef GENL_op +#define GENL_op(op_name, op_num, handler, tla_list) + +#undef GENL_struct +#define GENL_struct(tag_name, tag_number, s_name, s_fields) \ +static int s_name ## _to_skb(struct sk_buff *skb, struct s_name *s, \ + const bool exclude_sensitive) \ +{ \ + struct nlattr *tla = nla_nest_start(skb, tag_number); \ + if (!tla) \ + goto nla_put_failure; \ + DPRINT_TLA(#s_name, "-=>", #tag_name); \ + s_fields \ + nla_nest_end(skb, tla); \ + return 0; \ + \ +nla_put_failure: \ + if (tla) \ + nla_nest_cancel(skb, tla); \ + return -EMSGSIZE; \ +} \ +static inline int s_name ## _to_priv_skb(struct sk_buff *skb, \ + struct s_name *s) \ +{ \ + return s_name ## _to_skb(skb, s, 0); \ +} \ +static inline int s_name ## _to_unpriv_skb(struct sk_buff *skb, \ + struct s_name *s) \ +{ \ + return s_name ## _to_skb(skb, s, 1); \ +} + + +#undef __field +#define __field(attr_nr, attr_flag, name, nla_type, type, __get, __put, \ + __is_signed) \ + if (!exclude_sensitive || !((attr_flag) & DRBD_F_SENSITIVE)) { \ + DPRINT_FIELD(">>", nla_type, name, s, NULL); \ + if (__put(skb, attr_nr, s->name)) \ + goto nla_put_failure; \ + } + +#undef __array +#define __array(attr_nr, attr_flag, name, nla_type, type, maxlen, \ + __get, __put, __is_signed) \ + if (!exclude_sensitive || !((attr_flag) & DRBD_F_SENSITIVE)) { \ + DPRINT_ARRAY(">>",nla_type, name, s, NULL); \ + if (__put(skb, attr_nr, min_t(int, maxlen, \ + s->name ## _len + (nla_type == NLA_NUL_STRING)),\ + s->name)) \ + goto nla_put_failure; \ + } + +#include GENL_MAGIC_INCLUDE_FILE + + +/* Functions for initializing structs to default values. */ + +#undef __field +#define __field(attr_nr, attr_flag, name, nla_type, type, __get, __put, \ + __is_signed) +#undef __array +#define __array(attr_nr, attr_flag, name, nla_type, type, maxlen, \ + __get, __put, __is_signed) +#undef __u32_field_def +#define __u32_field_def(attr_nr, attr_flag, name, default) \ + x->name = default; +#undef __s32_field_def +#define __s32_field_def(attr_nr, attr_flag, name, default) \ + x->name = default; +#undef __flg_field_def +#define __flg_field_def(attr_nr, attr_flag, name, default) \ + x->name = default; +#undef __str_field_def +#define __str_field_def(attr_nr, attr_flag, name, maxlen) \ + memset(x->name, 0, sizeof(x->name)); \ + x->name ## _len = 0; +#undef GENL_struct +#define GENL_struct(tag_name, tag_number, s_name, s_fields) \ +static void set_ ## s_name ## _defaults(struct s_name *x) __attribute__((unused)); \ +static void set_ ## s_name ## _defaults(struct s_name *x) { \ +s_fields \ +} + +#include GENL_MAGIC_INCLUDE_FILE + +#endif /* __KERNEL__ */ + +/* }}}1 */ +#endif /* GENL_MAGIC_FUNC_H */ +/* vim: set foldmethod=marker foldlevel=1 nofoldenable : */ diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h new file mode 100644 index 000000000..eeae59d3c --- /dev/null +++ b/include/linux/genl_magic_struct.h @@ -0,0 +1,286 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef GENL_MAGIC_STRUCT_H +#define GENL_MAGIC_STRUCT_H + +#ifndef GENL_MAGIC_FAMILY +# error "you need to define GENL_MAGIC_FAMILY before inclusion" +#endif + +#ifndef GENL_MAGIC_VERSION +# error "you need to define GENL_MAGIC_VERSION before inclusion" +#endif + +#ifndef GENL_MAGIC_INCLUDE_FILE +# error "you need to define GENL_MAGIC_INCLUDE_FILE before inclusion" +#endif + +#include +#include + +#define CONCAT__(a,b) a ## b +#define CONCAT_(a,b) CONCAT__(a,b) + +extern int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void); +extern void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void); + +/* + * Extension of genl attribute validation policies {{{2 + */ + +/* + * @DRBD_GENLA_F_MANDATORY: By default, netlink ignores attributes it does not + * know about. This flag can be set in nlattr->nla_type to indicate that this + * attribute must not be ignored. + * + * We check and remove this flag in drbd_nla_check_mandatory() before + * validating the attribute types and lengths via nla_parse_nested(). + */ +#define DRBD_GENLA_F_MANDATORY (1 << 14) + +/* + * Flags specific to drbd and not visible at the netlink layer, used in + * _from_attrs and _to_skb: + * + * @DRBD_F_REQUIRED: Attribute is required; a request without this attribute is + * invalid. + * + * @DRBD_F_SENSITIVE: Attribute includes sensitive information and must not be + * included in unpriviledged get requests or broadcasts. + * + * @DRBD_F_INVARIANT: Attribute is set when an object is initially created, but + * cannot subsequently be changed. + */ +#define DRBD_F_REQUIRED (1 << 0) +#define DRBD_F_SENSITIVE (1 << 1) +#define DRBD_F_INVARIANT (1 << 2) + +#define __nla_type(x) ((__u16)((x) & NLA_TYPE_MASK & ~DRBD_GENLA_F_MANDATORY)) + +/* }}}1 + * MAGIC + * multi-include macro expansion magic starts here + */ + +/* MAGIC helpers {{{2 */ + +static inline int nla_put_u64_0pad(struct sk_buff *skb, int attrtype, u64 value) +{ + return nla_put_64bit(skb, attrtype, sizeof(u64), &value, 0); +} + +/* possible field types */ +#define __flg_field(attr_nr, attr_flag, name) \ + __field(attr_nr, attr_flag, name, NLA_U8, char, \ + nla_get_u8, nla_put_u8, false) +#define __u8_field(attr_nr, attr_flag, name) \ + __field(attr_nr, attr_flag, name, NLA_U8, unsigned char, \ + nla_get_u8, nla_put_u8, false) +#define __u16_field(attr_nr, attr_flag, name) \ + __field(attr_nr, attr_flag, name, NLA_U16, __u16, \ + nla_get_u16, nla_put_u16, false) +#define __u32_field(attr_nr, attr_flag, name) \ + __field(attr_nr, attr_flag, name, NLA_U32, __u32, \ + nla_get_u32, nla_put_u32, false) +#define __s32_field(attr_nr, attr_flag, name) \ + __field(attr_nr, attr_flag, name, NLA_U32, __s32, \ + nla_get_u32, nla_put_u32, true) +#define __u64_field(attr_nr, attr_flag, name) \ + __field(attr_nr, attr_flag, name, NLA_U64, __u64, \ + nla_get_u64, nla_put_u64_0pad, false) +#define __str_field(attr_nr, attr_flag, name, maxlen) \ + __array(attr_nr, attr_flag, name, NLA_NUL_STRING, char, maxlen, \ + nla_strlcpy, nla_put, false) +#define __bin_field(attr_nr, attr_flag, name, maxlen) \ + __array(attr_nr, attr_flag, name, NLA_BINARY, char, maxlen, \ + nla_memcpy, nla_put, false) + +/* fields with default values */ +#define __flg_field_def(attr_nr, attr_flag, name, default) \ + __flg_field(attr_nr, attr_flag, name) +#define __u32_field_def(attr_nr, attr_flag, name, default) \ + __u32_field(attr_nr, attr_flag, name) +#define __s32_field_def(attr_nr, attr_flag, name, default) \ + __s32_field(attr_nr, attr_flag, name) +#define __str_field_def(attr_nr, attr_flag, name, maxlen) \ + __str_field(attr_nr, attr_flag, name, maxlen) + +#define GENL_op_init(args...) args +#define GENL_doit(handler) \ + .doit = handler, \ + .flags = GENL_ADMIN_PERM, +#define GENL_dumpit(handler) \ + .dumpit = handler, \ + .flags = GENL_ADMIN_PERM, + +/* }}}1 + * Magic: define the enum symbols for genl_ops + * Magic: define the enum symbols for top level attributes + * Magic: define the enum symbols for nested attributes + * {{{2 + */ + +#undef GENL_struct +#define GENL_struct(tag_name, tag_number, s_name, s_fields) + +#undef GENL_mc_group +#define GENL_mc_group(group) + +#undef GENL_notification +#define GENL_notification(op_name, op_num, mcast_group, tla_list) \ + op_name = op_num, + +#undef GENL_op +#define GENL_op(op_name, op_num, handler, tla_list) \ + op_name = op_num, + +enum { +#include GENL_MAGIC_INCLUDE_FILE +}; + +#undef GENL_notification +#define GENL_notification(op_name, op_num, mcast_group, tla_list) + +#undef GENL_op +#define GENL_op(op_name, op_num, handler, attr_list) + +#undef GENL_struct +#define GENL_struct(tag_name, tag_number, s_name, s_fields) \ + tag_name = tag_number, + +enum { +#include GENL_MAGIC_INCLUDE_FILE +}; + +#undef GENL_struct +#define GENL_struct(tag_name, tag_number, s_name, s_fields) \ +enum { \ + s_fields \ +}; + +#undef __field +#define __field(attr_nr, attr_flag, name, nla_type, type, \ + __get, __put, __is_signed) \ + T_ ## name = (__u16)(attr_nr | ((attr_flag) & DRBD_GENLA_F_MANDATORY)), + +#undef __array +#define __array(attr_nr, attr_flag, name, nla_type, type, \ + maxlen, __get, __put, __is_signed) \ + T_ ## name = (__u16)(attr_nr | ((attr_flag) & DRBD_GENLA_F_MANDATORY)), + +#include GENL_MAGIC_INCLUDE_FILE + +/* }}}1 + * Magic: compile time assert unique numbers for operations + * Magic: -"- unique numbers for top level attributes + * Magic: -"- unique numbers for nested attributes + * {{{2 + */ + +#undef GENL_struct +#define GENL_struct(tag_name, tag_number, s_name, s_fields) + +#undef GENL_op +#define GENL_op(op_name, op_num, handler, attr_list) \ + case op_name: + +#undef GENL_notification +#define GENL_notification(op_name, op_num, mcast_group, tla_list) \ + case op_name: + +static inline void ct_assert_unique_operations(void) +{ + switch (0) { +#include GENL_MAGIC_INCLUDE_FILE + case 0: + ; + } +} + +#undef GENL_op +#define GENL_op(op_name, op_num, handler, attr_list) + +#undef GENL_notification +#define GENL_notification(op_name, op_num, mcast_group, tla_list) + +#undef GENL_struct +#define GENL_struct(tag_name, tag_number, s_name, s_fields) \ + case tag_number: + +static inline void ct_assert_unique_top_level_attributes(void) +{ + switch (0) { +#include GENL_MAGIC_INCLUDE_FILE + case 0: + ; + } +} + +#undef GENL_struct +#define GENL_struct(tag_name, tag_number, s_name, s_fields) \ +static inline void ct_assert_unique_ ## s_name ## _attributes(void) \ +{ \ + switch (0) { \ + s_fields \ + case 0: \ + ; \ + } \ +} + +#undef __field +#define __field(attr_nr, attr_flag, name, nla_type, type, __get, __put, \ + __is_signed) \ + case attr_nr: + +#undef __array +#define __array(attr_nr, attr_flag, name, nla_type, type, maxlen, \ + __get, __put, __is_signed) \ + case attr_nr: + +#include GENL_MAGIC_INCLUDE_FILE + +/* }}}1 + * Magic: declare structs + * struct { + * fields + * }; + * {{{2 + */ + +#undef GENL_struct +#define GENL_struct(tag_name, tag_number, s_name, s_fields) \ +struct s_name { s_fields }; + +#undef __field +#define __field(attr_nr, attr_flag, name, nla_type, type, __get, __put, \ + __is_signed) \ + type name; + +#undef __array +#define __array(attr_nr, attr_flag, name, nla_type, type, maxlen, \ + __get, __put, __is_signed) \ + type name[maxlen]; \ + __u32 name ## _len; + +#include GENL_MAGIC_INCLUDE_FILE + +#undef GENL_struct +#define GENL_struct(tag_name, tag_number, s_name, s_fields) \ +enum { \ + s_fields \ +}; + +#undef __field +#define __field(attr_nr, attr_flag, name, nla_type, type, __get, __put, \ + is_signed) \ + F_ ## name ## _IS_SIGNED = is_signed, + +#undef __array +#define __array(attr_nr, attr_flag, name, nla_type, type, maxlen, \ + __get, __put, is_signed) \ + F_ ## name ## _IS_SIGNED = is_signed, + +#include GENL_MAGIC_INCLUDE_FILE + +/* }}}1 */ +#endif /* GENL_MAGIC_STRUCT_H */ +/* vim: set foldmethod=marker nofoldenable : */ diff --git a/include/linux/getcpu.h b/include/linux/getcpu.h new file mode 100644 index 000000000..c304dcdb4 --- /dev/null +++ b/include/linux/getcpu.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_GETCPU_H +#define _LINUX_GETCPU_H 1 + +/* Cache for getcpu() to speed it up. Results might be a short time + out of date, but will be faster. + + User programs should not refer to the contents of this structure. + I repeat they should not refer to it. If they do they will break + in future kernels. + + It is only a private cache for vgetcpu(). It will change in future kernels. + The user program must store this information per thread (__thread) + If you want 100% accurate information pass NULL instead. */ +struct getcpu_cache { + unsigned long blob[128 / sizeof(long)]; +}; + +#endif diff --git a/include/linux/gfp.h b/include/linux/gfp.h new file mode 100644 index 000000000..f78d1e895 --- /dev/null +++ b/include/linux/gfp.h @@ -0,0 +1,623 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_GFP_H +#define __LINUX_GFP_H + +#include +#include +#include +#include +#include + +struct vm_area_struct; + +/* + * In case of changes, please don't forget to update + * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c + */ + +/* Plain integer GFP bitmasks. Do not use this directly. */ +#define ___GFP_DMA 0x01u +#define ___GFP_HIGHMEM 0x02u +#define ___GFP_DMA32 0x04u +#define ___GFP_MOVABLE 0x08u +#define ___GFP_RECLAIMABLE 0x10u +#define ___GFP_HIGH 0x20u +#define ___GFP_IO 0x40u +#define ___GFP_FS 0x80u +#define ___GFP_WRITE 0x100u +#define ___GFP_NOWARN 0x200u +#define ___GFP_RETRY_MAYFAIL 0x400u +#define ___GFP_NOFAIL 0x800u +#define ___GFP_NORETRY 0x1000u +#define ___GFP_MEMALLOC 0x2000u +#define ___GFP_COMP 0x4000u +#define ___GFP_ZERO 0x8000u +#define ___GFP_NOMEMALLOC 0x10000u +#define ___GFP_HARDWALL 0x20000u +#define ___GFP_THISNODE 0x40000u +#define ___GFP_ATOMIC 0x80000u +#define ___GFP_ACCOUNT 0x100000u +#define ___GFP_DIRECT_RECLAIM 0x200000u +#define ___GFP_KSWAPD_RECLAIM 0x400000u +#ifdef CONFIG_LOCKDEP +#define ___GFP_NOLOCKDEP 0x800000u +#else +#define ___GFP_NOLOCKDEP 0 +#endif +/* If the above are modified, __GFP_BITS_SHIFT may need updating */ + +/* + * Physical address zone modifiers (see linux/mmzone.h - low four bits) + * + * Do not put any conditional on these. If necessary modify the definitions + * without the underscores and use them consistently. The definitions here may + * be used in bit comparisons. + */ +#define __GFP_DMA ((__force gfp_t)___GFP_DMA) +#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) +#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) +#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ +#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) + +/** + * DOC: Page mobility and placement hints + * + * Page mobility and placement hints + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * These flags provide hints about how mobile the page is. Pages with similar + * mobility are placed within the same pageblocks to minimise problems due + * to external fragmentation. + * + * %__GFP_MOVABLE (also a zone modifier) indicates that the page can be + * moved by page migration during memory compaction or can be reclaimed. + * + * %__GFP_RECLAIMABLE is used for slab allocations that specify + * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers. + * + * %__GFP_WRITE indicates the caller intends to dirty the page. Where possible, + * these pages will be spread between local zones to avoid all the dirty + * pages being in one zone (fair zone allocation policy). + * + * %__GFP_HARDWALL enforces the cpuset memory allocation policy. + * + * %__GFP_THISNODE forces the allocation to be satisified from the requested + * node with no fallbacks or placement policy enforcements. + * + * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg. + */ +#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) +#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) +#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) +#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) +#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT) + +/** + * DOC: Watermark modifiers + * + * Watermark modifiers -- controls access to emergency reserves + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * %__GFP_HIGH indicates that the caller is high-priority and that granting + * the request is necessary before the system can make forward progress. + * For example, creating an IO context to clean pages. + * + * %__GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is + * high priority. Users are typically interrupt handlers. This may be + * used in conjunction with %__GFP_HIGH + * + * %__GFP_MEMALLOC allows access to all memory. This should only be used when + * the caller guarantees the allocation will allow more memory to be freed + * very shortly e.g. process exiting or swapping. Users either should + * be the MM or co-ordinating closely with the VM (e.g. swap over NFS). + * + * %__GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves. + * This takes precedence over the %__GFP_MEMALLOC flag if both are set. + */ +#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC) +#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) +#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC) +#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) + +/** + * DOC: Reclaim modifiers + * + * Reclaim modifiers + * ~~~~~~~~~~~~~~~~~ + * + * %__GFP_IO can start physical IO. + * + * %__GFP_FS can call down to the low-level FS. Clearing the flag avoids the + * allocator recursing into the filesystem which might already be holding + * locks. + * + * %__GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim. + * This flag can be cleared to avoid unnecessary delays when a fallback + * option is available. + * + * %__GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when + * the low watermark is reached and have it reclaim pages until the high + * watermark is reached. A caller may wish to clear this flag when fallback + * options are available and the reclaim is likely to disrupt the system. The + * canonical example is THP allocation where a fallback is cheap but + * reclaim/compaction may cause indirect stalls. + * + * %__GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim. + * + * The default allocator behavior depends on the request size. We have a concept + * of so called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER). + * !costly allocations are too essential to fail so they are implicitly + * non-failing by default (with some exceptions like OOM victims might fail so + * the caller still has to check for failures) while costly requests try to be + * not disruptive and back off even without invoking the OOM killer. + * The following three modifiers might be used to override some of these + * implicit rules + * + * %__GFP_NORETRY: The VM implementation will try only very lightweight + * memory direct reclaim to get some memory under memory pressure (thus + * it can sleep). It will avoid disruptive actions like OOM killer. The + * caller must handle the failure which is quite likely to happen under + * heavy memory pressure. The flag is suitable when failure can easily be + * handled at small cost, such as reduced throughput + * + * %__GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim + * procedures that have previously failed if there is some indication + * that progress has been made else where. It can wait for other + * tasks to attempt high level approaches to freeing memory such as + * compaction (which removes fragmentation) and page-out. + * There is still a definite limit to the number of retries, but it is + * a larger limit than with %__GFP_NORETRY. + * Allocations with this flag may fail, but only when there is + * genuinely little unused memory. While these allocations do not + * directly trigger the OOM killer, their failure indicates that + * the system is likely to need to use the OOM killer soon. The + * caller must handle failure, but can reasonably do so by failing + * a higher-level request, or completing it only in a much less + * efficient manner. + * If the allocation does fail, and the caller is in a position to + * free some non-essential memory, doing so could benefit the system + * as a whole. + * + * %__GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller + * cannot handle allocation failures. The allocation could block + * indefinitely but will never return with failure. Testing for + * failure is pointless. + * New users should be evaluated carefully (and the flag should be + * used only when there is no reasonable failure policy) but it is + * definitely preferable to use the flag rather than opencode endless + * loop around allocator. + * Using this flag for costly allocations is _highly_ discouraged. + */ +#define __GFP_IO ((__force gfp_t)___GFP_IO) +#define __GFP_FS ((__force gfp_t)___GFP_FS) +#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */ +#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */ +#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM)) +#define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL) +#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) +#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) + +/** + * DOC: Action modifiers + * + * Action modifiers + * ~~~~~~~~~~~~~~~~ + * + * %__GFP_NOWARN suppresses allocation failure reports. + * + * %__GFP_COMP address compound page metadata. + * + * %__GFP_ZERO returns a zeroed page on success. + */ +#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) +#define __GFP_COMP ((__force gfp_t)___GFP_COMP) +#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) + +/* Disable lockdep for GFP context tracking */ +#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) + +/* Room for N __GFP_FOO bits */ +#define __GFP_BITS_SHIFT (23 + IS_ENABLED(CONFIG_LOCKDEP)) +#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) + +/** + * DOC: Useful GFP flag combinations + * + * Useful GFP flag combinations + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * Useful GFP flag combinations that are commonly used. It is recommended + * that subsystems start with one of these combinations and then set/clear + * %__GFP_FOO flags as necessary. + * + * %GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower + * watermark is applied to allow access to "atomic reserves" + * + * %GFP_KERNEL is typical for kernel-internal allocations. The caller requires + * %ZONE_NORMAL or a lower zone for direct access but can direct reclaim. + * + * %GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is + * accounted to kmemcg. + * + * %GFP_NOWAIT is for kernel allocations that should not stall for direct + * reclaim, start physical IO or use any filesystem callback. + * + * %GFP_NOIO will use direct reclaim to discard clean pages or slab pages + * that do not require the starting of any physical IO. + * Please try to avoid using this flag directly and instead use + * memalloc_noio_{save,restore} to mark the whole scope which cannot + * perform any IO with a short explanation why. All allocation requests + * will inherit GFP_NOIO implicitly. + * + * %GFP_NOFS will use direct reclaim but will not use any filesystem interfaces. + * Please try to avoid using this flag directly and instead use + * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't + * recurse into the FS layer with a short explanation why. All allocation + * requests will inherit GFP_NOFS implicitly. + * + * %GFP_USER is for userspace allocations that also need to be directly + * accessibly by the kernel or hardware. It is typically used by hardware + * for buffers that are mapped to userspace (e.g. graphics) that hardware + * still must DMA to. cpuset limits are enforced for these allocations. + * + * %GFP_DMA exists for historical reasons and should be avoided where possible. + * The flags indicates that the caller requires that the lowest zone be + * used (%ZONE_DMA or 16M on x86-64). Ideally, this would be removed but + * it would require careful auditing as some users really require it and + * others use the flag to avoid lowmem reserves in %ZONE_DMA and treat the + * lowest zone as a type of emergency reserve. + * + * %GFP_DMA32 is similar to %GFP_DMA except that the caller requires a 32-bit + * address. + * + * %GFP_HIGHUSER is for userspace allocations that may be mapped to userspace, + * do not need to be directly accessible by the kernel but that cannot + * move once in use. An example may be a hardware allocation that maps + * data directly into userspace but has no addressing limitations. + * + * %GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not + * need direct access to but can use kmap() when access is required. They + * are expected to be movable via page reclaim or page migration. Typically, + * pages on the LRU would also be allocated with %GFP_HIGHUSER_MOVABLE. + * + * %GFP_TRANSHUGE and %GFP_TRANSHUGE_LIGHT are used for THP allocations. They + * are compound allocations that will generally fail quickly if memory is not + * available and will not wake kswapd/kcompactd on failure. The _LIGHT + * version does not attempt reclaim/compaction at all and is by default used + * in page fault path, while the non-light is used by khugepaged. + */ +#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) +#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) +#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT) +#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) +#define GFP_NOIO (__GFP_RECLAIM) +#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO) +#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL) +#define GFP_DMA __GFP_DMA +#define GFP_DMA32 __GFP_DMA32 +#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) +#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) +#define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ + __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM) +#define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM) + +/* Convert GFP flags to their corresponding migrate type */ +#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) +#define GFP_MOVABLE_SHIFT 3 + +static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) +{ + VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); + BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE); + BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE); + + if (unlikely(page_group_by_mobility_disabled)) + return MIGRATE_UNMOVABLE; + + /* Group based on mobility */ + return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; +} +#undef GFP_MOVABLE_MASK +#undef GFP_MOVABLE_SHIFT + +static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) +{ + return !!(gfp_flags & __GFP_DIRECT_RECLAIM); +} + +/** + * gfpflags_normal_context - is gfp_flags a normal sleepable context? + * @gfp_flags: gfp_flags to test + * + * Test whether @gfp_flags indicates that the allocation is from the + * %current context and allowed to sleep. + * + * An allocation being allowed to block doesn't mean it owns the %current + * context. When direct reclaim path tries to allocate memory, the + * allocation context is nested inside whatever %current was doing at the + * time of the original allocation. The nested allocation may be allowed + * to block but modifying anything %current owns can corrupt the outer + * context's expectations. + * + * %true result from this function indicates that the allocation context + * can sleep and use anything that's associated with %current. + */ +static inline bool gfpflags_normal_context(const gfp_t gfp_flags) +{ + return (gfp_flags & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC)) == + __GFP_DIRECT_RECLAIM; +} + +#ifdef CONFIG_HIGHMEM +#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM +#else +#define OPT_ZONE_HIGHMEM ZONE_NORMAL +#endif + +#ifdef CONFIG_ZONE_DMA +#define OPT_ZONE_DMA ZONE_DMA +#else +#define OPT_ZONE_DMA ZONE_NORMAL +#endif + +#ifdef CONFIG_ZONE_DMA32 +#define OPT_ZONE_DMA32 ZONE_DMA32 +#else +#define OPT_ZONE_DMA32 ZONE_NORMAL +#endif + +/* + * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the + * zone to use given the lowest 4 bits of gfp_t. Entries are GFP_ZONES_SHIFT + * bits long and there are 16 of them to cover all possible combinations of + * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM. + * + * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. + * But GFP_MOVABLE is not only a zone specifier but also an allocation + * policy. Therefore __GFP_MOVABLE plus another zone selector is valid. + * Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1". + * + * bit result + * ================= + * 0x0 => NORMAL + * 0x1 => DMA or NORMAL + * 0x2 => HIGHMEM or NORMAL + * 0x3 => BAD (DMA+HIGHMEM) + * 0x4 => DMA32 or NORMAL + * 0x5 => BAD (DMA+DMA32) + * 0x6 => BAD (HIGHMEM+DMA32) + * 0x7 => BAD (HIGHMEM+DMA32+DMA) + * 0x8 => NORMAL (MOVABLE+0) + * 0x9 => DMA or NORMAL (MOVABLE+DMA) + * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too) + * 0xb => BAD (MOVABLE+HIGHMEM+DMA) + * 0xc => DMA32 or NORMAL (MOVABLE+DMA32) + * 0xd => BAD (MOVABLE+DMA32+DMA) + * 0xe => BAD (MOVABLE+DMA32+HIGHMEM) + * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) + * + * GFP_ZONES_SHIFT must be <= 2 on 32 bit platforms. + */ + +#if defined(CONFIG_ZONE_DEVICE) && (MAX_NR_ZONES-1) <= 4 +/* ZONE_DEVICE is not a valid GFP zone specifier */ +#define GFP_ZONES_SHIFT 2 +#else +#define GFP_ZONES_SHIFT ZONES_SHIFT +#endif + +#if 16 * GFP_ZONES_SHIFT > BITS_PER_LONG +#error GFP_ZONES_SHIFT too large to create GFP_ZONE_TABLE integer +#endif + +#define GFP_ZONE_TABLE ( \ + (ZONE_NORMAL << 0 * GFP_ZONES_SHIFT) \ + | (OPT_ZONE_DMA << ___GFP_DMA * GFP_ZONES_SHIFT) \ + | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * GFP_ZONES_SHIFT) \ + | (OPT_ZONE_DMA32 << ___GFP_DMA32 * GFP_ZONES_SHIFT) \ + | (ZONE_NORMAL << ___GFP_MOVABLE * GFP_ZONES_SHIFT) \ + | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * GFP_ZONES_SHIFT) \ + | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * GFP_ZONES_SHIFT)\ + | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * GFP_ZONES_SHIFT)\ +) + +/* + * GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32 + * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per + * entry starting with bit 0. Bit is set if the combination is not + * allowed. + */ +#define GFP_ZONE_BAD ( \ + 1 << (___GFP_DMA | ___GFP_HIGHMEM) \ + | 1 << (___GFP_DMA | ___GFP_DMA32) \ + | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \ + | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \ + | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \ + | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \ + | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \ + | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \ +) + +static inline enum zone_type gfp_zone(gfp_t flags) +{ + enum zone_type z; + int bit = (__force int) (flags & GFP_ZONEMASK); + + z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) & + ((1 << GFP_ZONES_SHIFT) - 1); + VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1); + return z; +} + +/* + * There is only one page-allocator function, and two main namespaces to + * it. The alloc_page*() variants return 'struct page *' and as such + * can allocate highmem pages, the *get*page*() variants return + * virtual kernel addresses to the allocated page(s). + */ + +static inline int gfp_zonelist(gfp_t flags) +{ +#ifdef CONFIG_NUMA + if (unlikely(flags & __GFP_THISNODE)) + return ZONELIST_NOFALLBACK; +#endif + return ZONELIST_FALLBACK; +} + +/* + * We get the zone list from the current node and the gfp_mask. + * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones. + * There are two zonelists per node, one for all zones with memory and + * one containing just zones from the node the zonelist belongs to. + * + * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets + * optimized to &contig_page_data at compile-time. + */ +static inline struct zonelist *node_zonelist(int nid, gfp_t flags) +{ + return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags); +} + +#ifndef HAVE_ARCH_FREE_PAGE +static inline void arch_free_page(struct page *page, int order) { } +#endif +#ifndef HAVE_ARCH_ALLOC_PAGE +static inline void arch_alloc_page(struct page *page, int order) { } +#endif + +struct page * +__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, + nodemask_t *nodemask); + +static inline struct page * +__alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid) +{ + return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL); +} + +/* + * Allocate pages, preferring the node given as nid. The node must be valid and + * online. For more general interface, see alloc_pages_node(). + */ +static inline struct page * +__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) +{ + VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); + VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid)); + + return __alloc_pages(gfp_mask, order, nid); +} + +/* + * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE, + * prefer the current CPU's closest node. Otherwise node must be valid and + * online. + */ +static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, + unsigned int order) +{ + if (nid == NUMA_NO_NODE) + nid = numa_mem_id(); + + return __alloc_pages_node(nid, gfp_mask, order); +} + +#ifdef CONFIG_NUMA +extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); + +static inline struct page * +alloc_pages(gfp_t gfp_mask, unsigned int order) +{ + return alloc_pages_current(gfp_mask, order); +} +extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, + struct vm_area_struct *vma, unsigned long addr, + int node, bool hugepage); +#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ + alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true) +#else +#define alloc_pages(gfp_mask, order) \ + alloc_pages_node(numa_node_id(), gfp_mask, order) +#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\ + alloc_pages(gfp_mask, order) +#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ + alloc_pages(gfp_mask, order) +#endif +#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) +#define alloc_page_vma(gfp_mask, vma, addr) \ + alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false) +#define alloc_page_vma_node(gfp_mask, vma, addr, node) \ + alloc_pages_vma(gfp_mask, 0, vma, addr, node, false) + +extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); +extern unsigned long get_zeroed_page(gfp_t gfp_mask); + +void *alloc_pages_exact(size_t size, gfp_t gfp_mask); +void free_pages_exact(void *virt, size_t size); +void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); + +#define __get_free_page(gfp_mask) \ + __get_free_pages((gfp_mask), 0) + +#define __get_dma_pages(gfp_mask, order) \ + __get_free_pages((gfp_mask) | GFP_DMA, (order)) + +extern void __free_pages(struct page *page, unsigned int order); +extern void free_pages(unsigned long addr, unsigned int order); +extern void free_unref_page(struct page *page); +extern void free_unref_page_list(struct list_head *list); + +struct page_frag_cache; +extern void __page_frag_cache_drain(struct page *page, unsigned int count); +extern void *page_frag_alloc(struct page_frag_cache *nc, + unsigned int fragsz, gfp_t gfp_mask); +extern void page_frag_free(void *addr); + +#define __free_page(page) __free_pages((page), 0) +#define free_page(addr) free_pages((addr), 0) + +void page_alloc_init(void); +void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); +void drain_all_pages(struct zone *zone); +void drain_local_pages(struct zone *zone); + +void page_alloc_init_late(void); + +/* + * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what + * GFP flags are used before interrupts are enabled. Once interrupts are + * enabled, it is set to __GFP_BITS_MASK while the system is running. During + * hibernation, it is used by PM to avoid I/O during memory allocation while + * devices are suspended. + */ +extern gfp_t gfp_allowed_mask; + +/* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */ +bool gfp_pfmemalloc_allowed(gfp_t gfp_mask); + +extern void pm_restrict_gfp_mask(void); +extern void pm_restore_gfp_mask(void); + +#ifdef CONFIG_PM_SLEEP +extern bool pm_suspended_storage(void); +#else +static inline bool pm_suspended_storage(void) +{ + return false; +} +#endif /* CONFIG_PM_SLEEP */ + +#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA) +/* The below functions must be run on a range from a single zone. */ +extern int alloc_contig_range(unsigned long start, unsigned long end, + unsigned migratetype, gfp_t gfp_mask); +extern void free_contig_range(unsigned long pfn, unsigned nr_pages); +#endif + +#ifdef CONFIG_CMA +/* CMA stuff */ +extern void init_cma_reserved_pageblock(struct page *page); +#endif + +#endif /* __LINUX_GFP_H */ diff --git a/include/linux/glob.h b/include/linux/glob.h new file mode 100644 index 000000000..861327b33 --- /dev/null +++ b/include/linux/glob.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_GLOB_H +#define _LINUX_GLOB_H + +#include /* For bool */ +#include /* For __pure */ + +bool __pure glob_match(char const *pat, char const *str); + +#endif /* _LINUX_GLOB_H */ diff --git a/include/linux/gnss.h b/include/linux/gnss.h new file mode 100644 index 000000000..435469770 --- /dev/null +++ b/include/linux/gnss.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * GNSS receiver support + * + * Copyright (C) 2018 Johan Hovold + */ + +#ifndef _LINUX_GNSS_H +#define _LINUX_GNSS_H + +#include +#include +#include +#include +#include +#include +#include + +struct gnss_device; + +enum gnss_type { + GNSS_TYPE_NMEA = 0, + GNSS_TYPE_SIRF, + GNSS_TYPE_UBX, + + GNSS_TYPE_COUNT +}; + +struct gnss_operations { + int (*open)(struct gnss_device *gdev); + void (*close)(struct gnss_device *gdev); + int (*write_raw)(struct gnss_device *gdev, const unsigned char *buf, + size_t count); +}; + +struct gnss_device { + struct device dev; + struct cdev cdev; + int id; + + enum gnss_type type; + unsigned long flags; + + struct rw_semaphore rwsem; + const struct gnss_operations *ops; + unsigned int count; + unsigned int disconnected:1; + + struct mutex read_mutex; + struct kfifo read_fifo; + wait_queue_head_t read_queue; + + struct mutex write_mutex; + char *write_buf; +}; + +struct gnss_device *gnss_allocate_device(struct device *parent); +void gnss_put_device(struct gnss_device *gdev); +int gnss_register_device(struct gnss_device *gdev); +void gnss_deregister_device(struct gnss_device *gdev); + +int gnss_insert_raw(struct gnss_device *gdev, const unsigned char *buf, + size_t count); + +static inline void gnss_set_drvdata(struct gnss_device *gdev, void *data) +{ + dev_set_drvdata(&gdev->dev, data); +} + +static inline void *gnss_get_drvdata(struct gnss_device *gdev) +{ + return dev_get_drvdata(&gdev->dev); +} + +#endif /* _LINUX_GNSS_H */ diff --git a/include/linux/goldfish.h b/include/linux/goldfish.h new file mode 100644 index 000000000..265a099cd --- /dev/null +++ b/include/linux/goldfish.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_GOLDFISH_H +#define __LINUX_GOLDFISH_H + +#include +#include +#include + +/* Helpers for Goldfish virtual platform */ + +static inline void gf_write_ptr(const void *ptr, void __iomem *portl, + void __iomem *porth) +{ + const unsigned long addr = (unsigned long)ptr; + + writel(lower_32_bits(addr), portl); +#ifdef CONFIG_64BIT + writel(upper_32_bits(addr), porth); +#endif +} + +static inline void gf_write_dma_addr(const dma_addr_t addr, + void __iomem *portl, + void __iomem *porth) +{ + writel(lower_32_bits(addr), portl); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + writel(upper_32_bits(addr), porth); +#endif +} + + +#endif /* __LINUX_GOLDFISH_H */ diff --git a/include/linux/gpio-pxa.h b/include/linux/gpio-pxa.h new file mode 100644 index 000000000..1e1fa0160 --- /dev/null +++ b/include/linux/gpio-pxa.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __GPIO_PXA_H +#define __GPIO_PXA_H + +#define GPIO_bit(x) (1 << ((x) & 0x1f)) + +#define gpio_to_bank(gpio) ((gpio) >> 5) + +/* NOTE: some PXAs have fewer on-chip GPIOs (like PXA255, with 85). + * Those cases currently cause holes in the GPIO number space, the + * actual number of the last GPIO is recorded by 'pxa_last_gpio'. + */ +extern int pxa_last_gpio; + +extern int pxa_irq_to_gpio(int irq); + +struct pxa_gpio_platform_data { + int irq_base; + int (*gpio_set_wake)(unsigned int gpio, unsigned int on); +}; + +#endif /* __GPIO_PXA_H */ diff --git a/include/linux/gpio.h b/include/linux/gpio.h new file mode 100644 index 000000000..b3115d1a7 --- /dev/null +++ b/include/linux/gpio.h @@ -0,0 +1,264 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * + * + * This is the LEGACY GPIO bulk include file, including legacy APIs. It is + * used for GPIO drivers still referencing the global GPIO numberspace, + * and should not be included in new code. + * + * If you're implementing a GPIO driver, only include + * If you're implementing a GPIO consumer, only include + */ +#ifndef __LINUX_GPIO_H +#define __LINUX_GPIO_H + +#include + +/* see Documentation/driver-api/gpio/legacy.rst */ + +/* make these flag values available regardless of GPIO kconfig options */ +#define GPIOF_DIR_OUT (0 << 0) +#define GPIOF_DIR_IN (1 << 0) + +#define GPIOF_INIT_LOW (0 << 1) +#define GPIOF_INIT_HIGH (1 << 1) + +#define GPIOF_IN (GPIOF_DIR_IN) +#define GPIOF_OUT_INIT_LOW (GPIOF_DIR_OUT | GPIOF_INIT_LOW) +#define GPIOF_OUT_INIT_HIGH (GPIOF_DIR_OUT | GPIOF_INIT_HIGH) + +/* Gpio pin is active-low */ +#define GPIOF_ACTIVE_LOW (1 << 2) + +/* Gpio pin is open drain */ +#define GPIOF_OPEN_DRAIN (1 << 3) + +/* Gpio pin is open source */ +#define GPIOF_OPEN_SOURCE (1 << 4) + +#define GPIOF_EXPORT (1 << 5) +#define GPIOF_EXPORT_CHANGEABLE (1 << 6) +#define GPIOF_EXPORT_DIR_FIXED (GPIOF_EXPORT) +#define GPIOF_EXPORT_DIR_CHANGEABLE (GPIOF_EXPORT | GPIOF_EXPORT_CHANGEABLE) + +/** + * struct gpio - a structure describing a GPIO with configuration + * @gpio: the GPIO number + * @flags: GPIO configuration as specified by GPIOF_* + * @label: a literal description string of this GPIO + */ +struct gpio { + unsigned gpio; + unsigned long flags; + const char *label; +}; + +#ifdef CONFIG_GPIOLIB + +#ifdef CONFIG_ARCH_HAVE_CUSTOM_GPIO_H +#include +#else + +#include + +static inline int gpio_get_value(unsigned int gpio) +{ + return __gpio_get_value(gpio); +} + +static inline void gpio_set_value(unsigned int gpio, int value) +{ + __gpio_set_value(gpio, value); +} + +static inline int gpio_cansleep(unsigned int gpio) +{ + return __gpio_cansleep(gpio); +} + +static inline int gpio_to_irq(unsigned int gpio) +{ + return __gpio_to_irq(gpio); +} + +static inline int irq_to_gpio(unsigned int irq) +{ + return -EINVAL; +} + +#endif /* ! CONFIG_ARCH_HAVE_CUSTOM_GPIO_H */ + +/* CONFIG_GPIOLIB: bindings for managed devices that want to request gpios */ + +struct device; + +int devm_gpio_request(struct device *dev, unsigned gpio, const char *label); +int devm_gpio_request_one(struct device *dev, unsigned gpio, + unsigned long flags, const char *label); +void devm_gpio_free(struct device *dev, unsigned int gpio); + +#else /* ! CONFIG_GPIOLIB */ + +#include +#include +#include +#include + +struct device; +struct gpio_chip; + +static inline bool gpio_is_valid(int number) +{ + return false; +} + +static inline int gpio_request(unsigned gpio, const char *label) +{ + return -ENOSYS; +} + +static inline int gpio_request_one(unsigned gpio, + unsigned long flags, const char *label) +{ + return -ENOSYS; +} + +static inline int gpio_request_array(const struct gpio *array, size_t num) +{ + return -ENOSYS; +} + +static inline void gpio_free(unsigned gpio) +{ + might_sleep(); + + /* GPIO can never have been requested */ + WARN_ON(1); +} + +static inline void gpio_free_array(const struct gpio *array, size_t num) +{ + might_sleep(); + + /* GPIO can never have been requested */ + WARN_ON(1); +} + +static inline int gpio_direction_input(unsigned gpio) +{ + return -ENOSYS; +} + +static inline int gpio_direction_output(unsigned gpio, int value) +{ + return -ENOSYS; +} + +static inline int gpio_set_debounce(unsigned gpio, unsigned debounce) +{ + return -ENOSYS; +} + +static inline int gpio_get_value(unsigned gpio) +{ + /* GPIO can never have been requested or set as {in,out}put */ + WARN_ON(1); + return 0; +} + +static inline void gpio_set_value(unsigned gpio, int value) +{ + /* GPIO can never have been requested or set as output */ + WARN_ON(1); +} + +static inline int gpio_cansleep(unsigned gpio) +{ + /* GPIO can never have been requested or set as {in,out}put */ + WARN_ON(1); + return 0; +} + +static inline int gpio_get_value_cansleep(unsigned gpio) +{ + /* GPIO can never have been requested or set as {in,out}put */ + WARN_ON(1); + return 0; +} + +static inline void gpio_set_value_cansleep(unsigned gpio, int value) +{ + /* GPIO can never have been requested or set as output */ + WARN_ON(1); +} + +static inline int gpio_export(unsigned gpio, bool direction_may_change) +{ + /* GPIO can never have been requested or set as {in,out}put */ + WARN_ON(1); + return -EINVAL; +} + +static inline int gpio_export_link(struct device *dev, const char *name, + unsigned gpio) +{ + /* GPIO can never have been exported */ + WARN_ON(1); + return -EINVAL; +} + +static inline void gpio_unexport(unsigned gpio) +{ + /* GPIO can never have been exported */ + WARN_ON(1); +} + +static inline int gpio_to_irq(unsigned gpio) +{ + /* GPIO can never have been requested or set as input */ + WARN_ON(1); + return -EINVAL; +} + +static inline int gpiochip_lock_as_irq(struct gpio_chip *chip, + unsigned int offset) +{ + WARN_ON(1); + return -EINVAL; +} + +static inline void gpiochip_unlock_as_irq(struct gpio_chip *chip, + unsigned int offset) +{ + WARN_ON(1); +} + +static inline int irq_to_gpio(unsigned irq) +{ + /* irq can never have been returned from gpio_to_irq() */ + WARN_ON(1); + return -EINVAL; +} + +static inline int devm_gpio_request(struct device *dev, unsigned gpio, + const char *label) +{ + WARN_ON(1); + return -EINVAL; +} + +static inline int devm_gpio_request_one(struct device *dev, unsigned gpio, + unsigned long flags, const char *label) +{ + WARN_ON(1); + return -EINVAL; +} + +static inline void devm_gpio_free(struct device *dev, unsigned int gpio) +{ + WARN_ON(1); +} + +#endif /* ! CONFIG_GPIOLIB */ + +#endif /* __LINUX_GPIO_H */ diff --git a/include/linux/gpio/aspeed.h b/include/linux/gpio/aspeed.h new file mode 100644 index 000000000..1bfb3cdc8 --- /dev/null +++ b/include/linux/gpio/aspeed.h @@ -0,0 +1,15 @@ +#ifndef __GPIO_ASPEED_H +#define __GPIO_ASPEED_H + +struct aspeed_gpio_copro_ops { + int (*request_access)(void *data); + int (*release_access)(void *data); +}; + +int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc, + u16 *vreg_offset, u16 *dreg_offset, u8 *bit); +int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc); +int aspeed_gpio_copro_set_ops(const struct aspeed_gpio_copro_ops *ops, void *data); + + +#endif /* __GPIO_ASPEED_H */ diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h new file mode 100644 index 000000000..8dfd8300d --- /dev/null +++ b/include/linux/gpio/consumer.h @@ -0,0 +1,561 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_GPIO_CONSUMER_H +#define __LINUX_GPIO_CONSUMER_H + +#include +#include +#include + +struct device; + +/** + * Opaque descriptor for a GPIO. These are obtained using gpiod_get() and are + * preferable to the old integer-based handles. + * + * Contrary to integers, a pointer to a gpio_desc is guaranteed to be valid + * until the GPIO is released. + */ +struct gpio_desc; + +/** + * Struct containing an array of descriptors that can be obtained using + * gpiod_get_array(). + */ +struct gpio_descs { + unsigned int ndescs; + struct gpio_desc *desc[]; +}; + +#define GPIOD_FLAGS_BIT_DIR_SET BIT(0) +#define GPIOD_FLAGS_BIT_DIR_OUT BIT(1) +#define GPIOD_FLAGS_BIT_DIR_VAL BIT(2) +#define GPIOD_FLAGS_BIT_OPEN_DRAIN BIT(3) + +/** + * Optional flags that can be passed to one of gpiod_* to configure direction + * and output value. These values cannot be OR'd. + */ +enum gpiod_flags { + GPIOD_ASIS = 0, + GPIOD_IN = GPIOD_FLAGS_BIT_DIR_SET, + GPIOD_OUT_LOW = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT, + GPIOD_OUT_HIGH = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT | + GPIOD_FLAGS_BIT_DIR_VAL, + GPIOD_OUT_LOW_OPEN_DRAIN = GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_OPEN_DRAIN, + GPIOD_OUT_HIGH_OPEN_DRAIN = GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_OPEN_DRAIN, +}; + +#ifdef CONFIG_GPIOLIB + +/* Return the number of GPIOs associated with a device / function */ +int gpiod_count(struct device *dev, const char *con_id); + +/* Acquire and dispose GPIOs */ +struct gpio_desc *__must_check gpiod_get(struct device *dev, + const char *con_id, + enum gpiod_flags flags); +struct gpio_desc *__must_check gpiod_get_index(struct device *dev, + const char *con_id, + unsigned int idx, + enum gpiod_flags flags); +struct gpio_desc *__must_check gpiod_get_optional(struct device *dev, + const char *con_id, + enum gpiod_flags flags); +struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev, + const char *con_id, + unsigned int index, + enum gpiod_flags flags); +struct gpio_descs *__must_check gpiod_get_array(struct device *dev, + const char *con_id, + enum gpiod_flags flags); +struct gpio_descs *__must_check gpiod_get_array_optional(struct device *dev, + const char *con_id, + enum gpiod_flags flags); +void gpiod_put(struct gpio_desc *desc); +void gpiod_put_array(struct gpio_descs *descs); + +struct gpio_desc *__must_check devm_gpiod_get(struct device *dev, + const char *con_id, + enum gpiod_flags flags); +struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev, + const char *con_id, + unsigned int idx, + enum gpiod_flags flags); +struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev, + const char *con_id, + enum gpiod_flags flags); +struct gpio_desc *__must_check +devm_gpiod_get_index_optional(struct device *dev, const char *con_id, + unsigned int index, enum gpiod_flags flags); +struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev, + const char *con_id, + enum gpiod_flags flags); +struct gpio_descs *__must_check +devm_gpiod_get_array_optional(struct device *dev, const char *con_id, + enum gpiod_flags flags); +void devm_gpiod_put(struct device *dev, struct gpio_desc *desc); +void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs); + +int gpiod_get_direction(struct gpio_desc *desc); +int gpiod_direction_input(struct gpio_desc *desc); +int gpiod_direction_output(struct gpio_desc *desc, int value); +int gpiod_direction_output_raw(struct gpio_desc *desc, int value); + +/* Value get/set from non-sleeping context */ +int gpiod_get_value(const struct gpio_desc *desc); +int gpiod_get_array_value(unsigned int array_size, + struct gpio_desc **desc_array, int *value_array); +void gpiod_set_value(struct gpio_desc *desc, int value); +void gpiod_set_array_value(unsigned int array_size, + struct gpio_desc **desc_array, int *value_array); +int gpiod_get_raw_value(const struct gpio_desc *desc); +int gpiod_get_raw_array_value(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array); +void gpiod_set_raw_value(struct gpio_desc *desc, int value); +int gpiod_set_raw_array_value(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array); + +/* Value get/set from sleeping context */ +int gpiod_get_value_cansleep(const struct gpio_desc *desc); +int gpiod_get_array_value_cansleep(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array); +void gpiod_set_value_cansleep(struct gpio_desc *desc, int value); +void gpiod_set_array_value_cansleep(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array); +int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc); +int gpiod_get_raw_array_value_cansleep(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array); +void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value); +int gpiod_set_raw_array_value_cansleep(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array); + +int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce); +int gpiod_set_transitory(struct gpio_desc *desc, bool transitory); + +int gpiod_is_active_low(const struct gpio_desc *desc); +int gpiod_cansleep(const struct gpio_desc *desc); + +int gpiod_to_irq(const struct gpio_desc *desc); +int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name); + +/* Convert between the old gpio_ and new gpiod_ interfaces */ +struct gpio_desc *gpio_to_desc(unsigned gpio); +int desc_to_gpio(const struct gpio_desc *desc); + +/* Child properties interface */ +struct device_node; +struct fwnode_handle; + +struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev, + struct device_node *node, + const char *propname, int index, + enum gpiod_flags dflags, + const char *label); +struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, + const char *propname, int index, + enum gpiod_flags dflags, + const char *label); +struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev, + const char *con_id, int index, + struct fwnode_handle *child, + enum gpiod_flags flags, + const char *label); + +#else /* CONFIG_GPIOLIB */ + +static inline int gpiod_count(struct device *dev, const char *con_id) +{ + return 0; +} + +static inline struct gpio_desc *__must_check gpiod_get(struct device *dev, + const char *con_id, + enum gpiod_flags flags) +{ + return ERR_PTR(-ENOSYS); +} +static inline struct gpio_desc *__must_check +gpiod_get_index(struct device *dev, + const char *con_id, + unsigned int idx, + enum gpiod_flags flags) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct gpio_desc *__must_check +gpiod_get_optional(struct device *dev, const char *con_id, + enum gpiod_flags flags) +{ + return NULL; +} + +static inline struct gpio_desc *__must_check +gpiod_get_index_optional(struct device *dev, const char *con_id, + unsigned int index, enum gpiod_flags flags) +{ + return NULL; +} + +static inline struct gpio_descs *__must_check +gpiod_get_array(struct device *dev, const char *con_id, + enum gpiod_flags flags) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct gpio_descs *__must_check +gpiod_get_array_optional(struct device *dev, const char *con_id, + enum gpiod_flags flags) +{ + return NULL; +} + +static inline void gpiod_put(struct gpio_desc *desc) +{ + might_sleep(); + + /* GPIO can never have been requested */ + WARN_ON(desc); +} + +static inline void gpiod_put_array(struct gpio_descs *descs) +{ + might_sleep(); + + /* GPIO can never have been requested */ + WARN_ON(descs); +} + +static inline struct gpio_desc *__must_check +devm_gpiod_get(struct device *dev, + const char *con_id, + enum gpiod_flags flags) +{ + return ERR_PTR(-ENOSYS); +} +static inline +struct gpio_desc *__must_check +devm_gpiod_get_index(struct device *dev, + const char *con_id, + unsigned int idx, + enum gpiod_flags flags) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct gpio_desc *__must_check +devm_gpiod_get_optional(struct device *dev, const char *con_id, + enum gpiod_flags flags) +{ + return NULL; +} + +static inline struct gpio_desc *__must_check +devm_gpiod_get_index_optional(struct device *dev, const char *con_id, + unsigned int index, enum gpiod_flags flags) +{ + return NULL; +} + +static inline struct gpio_descs *__must_check +devm_gpiod_get_array(struct device *dev, const char *con_id, + enum gpiod_flags flags) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct gpio_descs *__must_check +devm_gpiod_get_array_optional(struct device *dev, const char *con_id, + enum gpiod_flags flags) +{ + return NULL; +} + +static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc) +{ + might_sleep(); + + /* GPIO can never have been requested */ + WARN_ON(desc); +} + +static inline void devm_gpiod_put_array(struct device *dev, + struct gpio_descs *descs) +{ + might_sleep(); + + /* GPIO can never have been requested */ + WARN_ON(descs); +} + + +static inline int gpiod_get_direction(const struct gpio_desc *desc) +{ + /* GPIO can never have been requested */ + WARN_ON(desc); + return -ENOSYS; +} +static inline int gpiod_direction_input(struct gpio_desc *desc) +{ + /* GPIO can never have been requested */ + WARN_ON(desc); + return -ENOSYS; +} +static inline int gpiod_direction_output(struct gpio_desc *desc, int value) +{ + /* GPIO can never have been requested */ + WARN_ON(desc); + return -ENOSYS; +} +static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value) +{ + /* GPIO can never have been requested */ + WARN_ON(desc); + return -ENOSYS; +} + + +static inline int gpiod_get_value(const struct gpio_desc *desc) +{ + /* GPIO can never have been requested */ + WARN_ON(desc); + return 0; +} +static inline int gpiod_get_array_value(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array) +{ + /* GPIO can never have been requested */ + WARN_ON(desc_array); + return 0; +} +static inline void gpiod_set_value(struct gpio_desc *desc, int value) +{ + /* GPIO can never have been requested */ + WARN_ON(desc); +} +static inline void gpiod_set_array_value(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array) +{ + /* GPIO can never have been requested */ + WARN_ON(desc_array); +} +static inline int gpiod_get_raw_value(const struct gpio_desc *desc) +{ + /* GPIO can never have been requested */ + WARN_ON(desc); + return 0; +} +static inline int gpiod_get_raw_array_value(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array) +{ + /* GPIO can never have been requested */ + WARN_ON(desc_array); + return 0; +} +static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value) +{ + /* GPIO can never have been requested */ + WARN_ON(desc); +} +static inline int gpiod_set_raw_array_value(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array) +{ + /* GPIO can never have been requested */ + WARN_ON(desc_array); + return 0; +} + +static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc) +{ + /* GPIO can never have been requested */ + WARN_ON(desc); + return 0; +} +static inline int gpiod_get_array_value_cansleep(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array) +{ + /* GPIO can never have been requested */ + WARN_ON(desc_array); + return 0; +} +static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value) +{ + /* GPIO can never have been requested */ + WARN_ON(desc); +} +static inline void gpiod_set_array_value_cansleep(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array) +{ + /* GPIO can never have been requested */ + WARN_ON(desc_array); +} +static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc) +{ + /* GPIO can never have been requested */ + WARN_ON(desc); + return 0; +} +static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array) +{ + /* GPIO can never have been requested */ + WARN_ON(desc_array); + return 0; +} +static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, + int value) +{ + /* GPIO can never have been requested */ + WARN_ON(desc); +} +static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array) +{ + /* GPIO can never have been requested */ + WARN_ON(desc_array); + return 0; +} + +static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) +{ + /* GPIO can never have been requested */ + WARN_ON(desc); + return -ENOSYS; +} + +static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory) +{ + /* GPIO can never have been requested */ + WARN_ON(desc); + return -ENOSYS; +} + +static inline int gpiod_is_active_low(const struct gpio_desc *desc) +{ + /* GPIO can never have been requested */ + WARN_ON(desc); + return 0; +} +static inline int gpiod_cansleep(const struct gpio_desc *desc) +{ + /* GPIO can never have been requested */ + WARN_ON(desc); + return 0; +} + +static inline int gpiod_to_irq(const struct gpio_desc *desc) +{ + /* GPIO can never have been requested */ + WARN_ON(desc); + return -EINVAL; +} + +static inline int gpiod_set_consumer_name(struct gpio_desc *desc, + const char *name) +{ + /* GPIO can never have been requested */ + WARN_ON(desc); + return -EINVAL; +} + +static inline struct gpio_desc *gpio_to_desc(unsigned gpio) +{ + return NULL; +} + +static inline int desc_to_gpio(const struct gpio_desc *desc) +{ + /* GPIO can never have been requested */ + WARN_ON(desc); + return -EINVAL; +} + +/* Child properties interface */ +struct device_node; +struct fwnode_handle; + +static inline +struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev, + struct device_node *node, + const char *propname, int index, + enum gpiod_flags dflags, + const char *label) +{ + return ERR_PTR(-ENOSYS); +} + +static inline +struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, + const char *propname, int index, + enum gpiod_flags dflags, + const char *label) +{ + return ERR_PTR(-ENOSYS); +} + +static inline +struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev, + const char *con_id, int index, + struct fwnode_handle *child, + enum gpiod_flags flags, + const char *label) +{ + return ERR_PTR(-ENOSYS); +} + +#endif /* CONFIG_GPIOLIB */ + +static inline +struct gpio_desc *devm_fwnode_get_gpiod_from_child(struct device *dev, + const char *con_id, + struct fwnode_handle *child, + enum gpiod_flags flags, + const char *label) +{ + return devm_fwnode_get_index_gpiod_from_child(dev, con_id, 0, child, + flags, label); +} + +#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS) + +int gpiod_export(struct gpio_desc *desc, bool direction_may_change); +int gpiod_export_link(struct device *dev, const char *name, + struct gpio_desc *desc); +void gpiod_unexport(struct gpio_desc *desc); + +#else /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */ + +static inline int gpiod_export(struct gpio_desc *desc, + bool direction_may_change) +{ + return -ENOSYS; +} + +static inline int gpiod_export_link(struct device *dev, const char *name, + struct gpio_desc *desc) +{ + return -ENOSYS; +} + +static inline void gpiod_unexport(struct gpio_desc *desc) +{ +} + +#endif /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */ + +#endif diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h new file mode 100644 index 000000000..a4d5eb377 --- /dev/null +++ b/include/linux/gpio/driver.h @@ -0,0 +1,594 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_GPIO_DRIVER_H +#define __LINUX_GPIO_DRIVER_H + +#include +#include +#include +#include +#include +#include +#include +#include + +struct gpio_desc; +struct of_phandle_args; +struct device_node; +struct seq_file; +struct gpio_device; +struct module; + +#ifdef CONFIG_GPIOLIB + +#ifdef CONFIG_GPIOLIB_IRQCHIP +/** + * struct gpio_irq_chip - GPIO interrupt controller + */ +struct gpio_irq_chip { + /** + * @chip: + * + * GPIO IRQ chip implementation, provided by GPIO driver. + */ + struct irq_chip *chip; + + /** + * @domain: + * + * Interrupt translation domain; responsible for mapping between GPIO + * hwirq number and Linux IRQ number. + */ + struct irq_domain *domain; + + /** + * @domain_ops: + * + * Table of interrupt domain operations for this IRQ chip. + */ + const struct irq_domain_ops *domain_ops; + + /** + * @handler: + * + * The IRQ handler to use (often a predefined IRQ core function) for + * GPIO IRQs, provided by GPIO driver. + */ + irq_flow_handler_t handler; + + /** + * @default_type: + * + * Default IRQ triggering type applied during GPIO driver + * initialization, provided by GPIO driver. + */ + unsigned int default_type; + + /** + * @lock_key: + * + * Per GPIO IRQ chip lockdep classes. + */ + struct lock_class_key *lock_key; + struct lock_class_key *request_key; + + /** + * @parent_handler: + * + * The interrupt handler for the GPIO chip's parent interrupts, may be + * NULL if the parent interrupts are nested rather than cascaded. + */ + irq_flow_handler_t parent_handler; + + /** + * @parent_handler_data: + * + * Data associated, and passed to, the handler for the parent + * interrupt. + */ + void *parent_handler_data; + + /** + * @num_parents: + * + * The number of interrupt parents of a GPIO chip. + */ + unsigned int num_parents; + + /** + * @parent_irq: + * + * For use by gpiochip_set_cascaded_irqchip() + */ + unsigned int parent_irq; + + /** + * @parents: + * + * A list of interrupt parents of a GPIO chip. This is owned by the + * driver, so the core will only reference this list, not modify it. + */ + unsigned int *parents; + + /** + * @map: + * + * A list of interrupt parents for each line of a GPIO chip. + */ + unsigned int *map; + + /** + * @threaded: + * + * True if set the interrupt handling uses nested threads. + */ + bool threaded; + + /** + * @need_valid_mask: + * + * If set core allocates @valid_mask with all bits set to one. + */ + bool need_valid_mask; + + /** + * @valid_mask: + * + * If not %NULL holds bitmask of GPIOs which are valid to be included + * in IRQ domain of the chip. + */ + unsigned long *valid_mask; + + /** + * @first: + * + * Required for static IRQ allocation. If set, irq_domain_add_simple() + * will allocate and map all IRQs during initialization. + */ + unsigned int first; +}; + +static inline struct gpio_irq_chip *to_gpio_irq_chip(struct irq_chip *chip) +{ + return container_of(chip, struct gpio_irq_chip, chip); +} +#endif + +/** + * struct gpio_chip - abstract a GPIO controller + * @label: a functional name for the GPIO device, such as a part + * number or the name of the SoC IP-block implementing it. + * @gpiodev: the internal state holder, opaque struct + * @parent: optional parent device providing the GPIOs + * @owner: helps prevent removal of modules exporting active GPIOs + * @request: optional hook for chip-specific activation, such as + * enabling module power and clock; may sleep + * @free: optional hook for chip-specific deactivation, such as + * disabling module power and clock; may sleep + * @get_direction: returns direction for signal "offset", 0=out, 1=in, + * (same as GPIOF_DIR_XXX), or negative error + * @direction_input: configures signal "offset" as input, or returns error + * @direction_output: configures signal "offset" as output, or returns error + * @get: returns value for signal "offset", 0=low, 1=high, or negative error + * @get_multiple: reads values for multiple signals defined by "mask" and + * stores them in "bits", returns 0 on success or negative error + * @set: assigns output value for signal "offset" + * @set_multiple: assigns output values for multiple signals defined by "mask" + * @set_config: optional hook for all kinds of settings. Uses the same + * packed config format as generic pinconf. + * @to_irq: optional hook supporting non-static gpio_to_irq() mappings; + * implementation may not sleep + * @dbg_show: optional routine to show contents in debugfs; default code + * will be used when this is omitted, but custom code can show extra + * state (such as pullup/pulldown configuration). + * @base: identifies the first GPIO number handled by this chip; + * or, if negative during registration, requests dynamic ID allocation. + * DEPRECATION: providing anything non-negative and nailing the base + * offset of GPIO chips is deprecated. Please pass -1 as base to + * let gpiolib select the chip base in all possible cases. We want to + * get rid of the static GPIO number space in the long run. + * @ngpio: the number of GPIOs handled by this controller; the last GPIO + * handled is (base + ngpio - 1). + * @names: if set, must be an array of strings to use as alternative + * names for the GPIOs in this chip. Any entry in the array + * may be NULL if there is no alias for the GPIO, however the + * array must be @ngpio entries long. A name can include a single printk + * format specifier for an unsigned int. It is substituted by the actual + * number of the gpio. + * @can_sleep: flag must be set iff get()/set() methods sleep, as they + * must while accessing GPIO expander chips over I2C or SPI. This + * implies that if the chip supports IRQs, these IRQs need to be threaded + * as the chip access may sleep when e.g. reading out the IRQ status + * registers. + * @read_reg: reader function for generic GPIO + * @write_reg: writer function for generic GPIO + * @be_bits: if the generic GPIO has big endian bit order (bit 31 is representing + * line 0, bit 30 is line 1 ... bit 0 is line 31) this is set to true by the + * generic GPIO core. It is for internal housekeeping only. + * @reg_dat: data (in) register for generic GPIO + * @reg_set: output set register (out=high) for generic GPIO + * @reg_clr: output clear register (out=low) for generic GPIO + * @reg_dir: direction setting register for generic GPIO + * @bgpio_dir_inverted: indicates that the direction register is inverted + * (gpiolib private state variable) + * @bgpio_bits: number of register bits used for a generic GPIO i.e. + * * 8 + * @bgpio_lock: used to lock chip->bgpio_data. Also, this is needed to keep + * shadowed and real data registers writes together. + * @bgpio_data: shadowed data register for generic GPIO to clear/set bits + * safely. + * @bgpio_dir: shadowed direction register for generic GPIO to clear/set + * direction safely. + * + * A gpio_chip can help platforms abstract various sources of GPIOs so + * they can all be accessed through a common programing interface. + * Example sources would be SOC controllers, FPGAs, multifunction + * chips, dedicated GPIO expanders, and so on. + * + * Each chip controls a number of signals, identified in method calls + * by "offset" values in the range 0..(@ngpio - 1). When those signals + * are referenced through calls like gpio_get_value(gpio), the offset + * is calculated by subtracting @base from the gpio number. + */ +struct gpio_chip { + const char *label; + struct gpio_device *gpiodev; + struct device *parent; + struct module *owner; + + int (*request)(struct gpio_chip *chip, + unsigned offset); + void (*free)(struct gpio_chip *chip, + unsigned offset); + int (*get_direction)(struct gpio_chip *chip, + unsigned offset); + int (*direction_input)(struct gpio_chip *chip, + unsigned offset); + int (*direction_output)(struct gpio_chip *chip, + unsigned offset, int value); + int (*get)(struct gpio_chip *chip, + unsigned offset); + int (*get_multiple)(struct gpio_chip *chip, + unsigned long *mask, + unsigned long *bits); + void (*set)(struct gpio_chip *chip, + unsigned offset, int value); + void (*set_multiple)(struct gpio_chip *chip, + unsigned long *mask, + unsigned long *bits); + int (*set_config)(struct gpio_chip *chip, + unsigned offset, + unsigned long config); + int (*to_irq)(struct gpio_chip *chip, + unsigned offset); + + void (*dbg_show)(struct seq_file *s, + struct gpio_chip *chip); + int base; + u16 ngpio; + const char *const *names; + bool can_sleep; + +#if IS_ENABLED(CONFIG_GPIO_GENERIC) + unsigned long (*read_reg)(void __iomem *reg); + void (*write_reg)(void __iomem *reg, unsigned long data); + bool be_bits; + void __iomem *reg_dat; + void __iomem *reg_set; + void __iomem *reg_clr; + void __iomem *reg_dir; + bool bgpio_dir_inverted; + int bgpio_bits; + spinlock_t bgpio_lock; + unsigned long bgpio_data; + unsigned long bgpio_dir; +#endif + +#ifdef CONFIG_GPIOLIB_IRQCHIP + /* + * With CONFIG_GPIOLIB_IRQCHIP we get an irqchip inside the gpiolib + * to handle IRQs for most practical cases. + */ + + /** + * @irq: + * + * Integrates interrupt chip functionality with the GPIO chip. Can be + * used to handle IRQs for most practical cases. + */ + struct gpio_irq_chip irq; +#endif + + /** + * @need_valid_mask: + * + * If set core allocates @valid_mask with all bits set to one. + */ + bool need_valid_mask; + + /** + * @valid_mask: + * + * If not %NULL holds bitmask of GPIOs which are valid to be used + * from the chip. + */ + unsigned long *valid_mask; + +#if defined(CONFIG_OF_GPIO) + /* + * If CONFIG_OF is enabled, then all GPIO controllers described in the + * device tree automatically may have an OF translation + */ + + /** + * @of_node: + * + * Pointer to a device tree node representing this GPIO controller. + */ + struct device_node *of_node; + + /** + * @of_gpio_n_cells: + * + * Number of cells used to form the GPIO specifier. + */ + unsigned int of_gpio_n_cells; + + /** + * @of_xlate: + * + * Callback to translate a device tree GPIO specifier into a chip- + * relative GPIO number and flags. + */ + int (*of_xlate)(struct gpio_chip *gc, + const struct of_phandle_args *gpiospec, u32 *flags); +#endif +}; + +extern const char *gpiochip_is_requested(struct gpio_chip *chip, + unsigned offset); + +/* add/remove chips */ +extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, + struct lock_class_key *lock_key, + struct lock_class_key *request_key); + +/** + * gpiochip_add_data() - register a gpio_chip + * @chip: the chip to register, with chip->base initialized + * @data: driver-private data associated with this chip + * + * Context: potentially before irqs will work + * + * When gpiochip_add_data() is called very early during boot, so that GPIOs + * can be freely used, the chip->parent device must be registered before + * the gpio framework's arch_initcall(). Otherwise sysfs initialization + * for GPIOs will fail rudely. + * + * gpiochip_add_data() must only be called after gpiolib initialization, + * ie after core_initcall(). + * + * If chip->base is negative, this requests dynamic assignment of + * a range of valid GPIOs. + * + * Returns: + * A negative errno if the chip can't be registered, such as because the + * chip->base is invalid or already associated with a different chip. + * Otherwise it returns zero as a success code. + */ +#ifdef CONFIG_LOCKDEP +#define gpiochip_add_data(chip, data) ({ \ + static struct lock_class_key lock_key; \ + static struct lock_class_key request_key; \ + gpiochip_add_data_with_key(chip, data, &lock_key, \ + &request_key); \ + }) +#else +#define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL, NULL) +#endif + +static inline int gpiochip_add(struct gpio_chip *chip) +{ + return gpiochip_add_data(chip, NULL); +} +extern void gpiochip_remove(struct gpio_chip *chip); +extern int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *chip, + void *data); +extern void devm_gpiochip_remove(struct device *dev, struct gpio_chip *chip); + +extern struct gpio_chip *gpiochip_find(void *data, + int (*match)(struct gpio_chip *chip, void *data)); + +/* lock/unlock as IRQ */ +int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset); +void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset); +bool gpiochip_line_is_irq(struct gpio_chip *chip, unsigned int offset); + +/* Line status inquiry for drivers */ +bool gpiochip_line_is_open_drain(struct gpio_chip *chip, unsigned int offset); +bool gpiochip_line_is_open_source(struct gpio_chip *chip, unsigned int offset); + +/* Sleep persistence inquiry for drivers */ +bool gpiochip_line_is_persistent(struct gpio_chip *chip, unsigned int offset); +bool gpiochip_line_is_valid(const struct gpio_chip *chip, unsigned int offset); + +/* get driver data */ +void *gpiochip_get_data(struct gpio_chip *chip); + +struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc); + +struct bgpio_pdata { + const char *label; + int base; + int ngpio; +}; + +#if IS_ENABLED(CONFIG_GPIO_GENERIC) + +int bgpio_init(struct gpio_chip *gc, struct device *dev, + unsigned long sz, void __iomem *dat, void __iomem *set, + void __iomem *clr, void __iomem *dirout, void __iomem *dirin, + unsigned long flags); + +#define BGPIOF_BIG_ENDIAN BIT(0) +#define BGPIOF_UNREADABLE_REG_SET BIT(1) /* reg_set is unreadable */ +#define BGPIOF_UNREADABLE_REG_DIR BIT(2) /* reg_dir is unreadable */ +#define BGPIOF_BIG_ENDIAN_BYTE_ORDER BIT(3) +#define BGPIOF_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */ +#define BGPIOF_NO_OUTPUT BIT(5) /* only input */ + +#endif + +#ifdef CONFIG_GPIOLIB_IRQCHIP + +int gpiochip_irq_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hwirq); +void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq); + +void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip, + struct irq_chip *irqchip, + unsigned int parent_irq, + irq_flow_handler_t parent_handler); + +void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip, + struct irq_chip *irqchip, + unsigned int parent_irq); + +int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip, + struct irq_chip *irqchip, + unsigned int first_irq, + irq_flow_handler_t handler, + unsigned int type, + bool threaded, + struct lock_class_key *lock_key, + struct lock_class_key *request_key); + +bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip, + unsigned int offset); + +#ifdef CONFIG_LOCKDEP + +/* + * Lockdep requires that each irqchip instance be created with a + * unique key so as to avoid unnecessary warnings. This upfront + * boilerplate static inlines provides such a key for each + * unique instance. + */ +static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip, + struct irq_chip *irqchip, + unsigned int first_irq, + irq_flow_handler_t handler, + unsigned int type) +{ + static struct lock_class_key lock_key; + static struct lock_class_key request_key; + + return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, + handler, type, false, + &lock_key, &request_key); +} + +static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, + struct irq_chip *irqchip, + unsigned int first_irq, + irq_flow_handler_t handler, + unsigned int type) +{ + + static struct lock_class_key lock_key; + static struct lock_class_key request_key; + + return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, + handler, type, true, + &lock_key, &request_key); +} +#else +static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip, + struct irq_chip *irqchip, + unsigned int first_irq, + irq_flow_handler_t handler, + unsigned int type) +{ + return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, + handler, type, false, NULL, NULL); +} + +static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, + struct irq_chip *irqchip, + unsigned int first_irq, + irq_flow_handler_t handler, + unsigned int type) +{ + return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, + handler, type, true, NULL, NULL); +} +#endif /* CONFIG_LOCKDEP */ + +#endif /* CONFIG_GPIOLIB_IRQCHIP */ + +int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset); +void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset); +int gpiochip_generic_config(struct gpio_chip *chip, unsigned offset, + unsigned long config); + +#ifdef CONFIG_PINCTRL + +/** + * struct gpio_pin_range - pin range controlled by a gpio chip + * @node: list for maintaining set of pin ranges, used internally + * @pctldev: pinctrl device which handles corresponding pins + * @range: actual range of pins controlled by a gpio controller + */ +struct gpio_pin_range { + struct list_head node; + struct pinctrl_dev *pctldev; + struct pinctrl_gpio_range range; +}; + +int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, + unsigned int gpio_offset, unsigned int pin_offset, + unsigned int npins); +int gpiochip_add_pingroup_range(struct gpio_chip *chip, + struct pinctrl_dev *pctldev, + unsigned int gpio_offset, const char *pin_group); +void gpiochip_remove_pin_ranges(struct gpio_chip *chip); + +#else + +static inline int +gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, + unsigned int gpio_offset, unsigned int pin_offset, + unsigned int npins) +{ + return 0; +} +static inline int +gpiochip_add_pingroup_range(struct gpio_chip *chip, + struct pinctrl_dev *pctldev, + unsigned int gpio_offset, const char *pin_group) +{ + return 0; +} + +static inline void +gpiochip_remove_pin_ranges(struct gpio_chip *chip) +{ +} + +#endif /* CONFIG_PINCTRL */ + +struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum, + const char *label); +void gpiochip_free_own_desc(struct gpio_desc *desc); + +#else /* CONFIG_GPIOLIB */ + +static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc) +{ + /* GPIO can never have been requested */ + WARN_ON(1); + return ERR_PTR(-ENODEV); +} + +#endif /* CONFIG_GPIOLIB */ + +#endif diff --git a/include/linux/gpio/gpio-reg.h b/include/linux/gpio/gpio-reg.h new file mode 100644 index 000000000..5c6efd394 --- /dev/null +++ b/include/linux/gpio/gpio-reg.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef GPIO_REG_H +#define GPIO_REG_H + +struct device; +struct irq_domain; + +struct gpio_chip *gpio_reg_init(struct device *dev, void __iomem *reg, + int base, int num, const char *label, u32 direction, u32 def_out, + const char *const *names, struct irq_domain *irqdom, const int *irqs); + +int gpio_reg_resume(struct gpio_chip *gc); + +#endif diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h new file mode 100644 index 000000000..daa44eac9 --- /dev/null +++ b/include/linux/gpio/machine.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_GPIO_MACHINE_H +#define __LINUX_GPIO_MACHINE_H + +#include +#include + +enum gpio_lookup_flags { + GPIO_ACTIVE_HIGH = (0 << 0), + GPIO_ACTIVE_LOW = (1 << 0), + GPIO_OPEN_DRAIN = (1 << 1), + GPIO_OPEN_SOURCE = (1 << 2), + GPIO_PERSISTENT = (0 << 3), + GPIO_TRANSITORY = (1 << 3), +}; + +/** + * struct gpiod_lookup - lookup table + * @chip_label: name of the chip the GPIO belongs to + * @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO + * @con_id: name of the GPIO from the device's point of view + * @idx: index of the GPIO in case several GPIOs share the same name + * @flags: mask of GPIO_* values + * + * gpiod_lookup is a lookup table for associating GPIOs to specific devices and + * functions using platform data. + */ +struct gpiod_lookup { + const char *chip_label; + u16 chip_hwnum; + const char *con_id; + unsigned int idx; + enum gpio_lookup_flags flags; +}; + +struct gpiod_lookup_table { + struct list_head list; + const char *dev_id; + struct gpiod_lookup table[]; +}; + +/** + * struct gpiod_hog - GPIO line hog table + * @chip_label: name of the chip the GPIO belongs to + * @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO + * @line_name: consumer name for the hogged line + * @lflags: mask of GPIO lookup flags + * @dflags: GPIO flags used to specify the direction and value + */ +struct gpiod_hog { + struct list_head list; + const char *chip_label; + u16 chip_hwnum; + const char *line_name; + enum gpio_lookup_flags lflags; + int dflags; +}; + +/* + * Simple definition of a single GPIO under a con_id + */ +#define GPIO_LOOKUP(_chip_label, _chip_hwnum, _con_id, _flags) \ + GPIO_LOOKUP_IDX(_chip_label, _chip_hwnum, _con_id, 0, _flags) + +/* + * Use this macro if you need to have several GPIOs under the same con_id. + * Each GPIO needs to use a different index and can be accessed using + * gpiod_get_index() + */ +#define GPIO_LOOKUP_IDX(_chip_label, _chip_hwnum, _con_id, _idx, _flags) \ +{ \ + .chip_label = _chip_label, \ + .chip_hwnum = _chip_hwnum, \ + .con_id = _con_id, \ + .idx = _idx, \ + .flags = _flags, \ +} + +/* + * Simple definition of a single GPIO hog in an array. + */ +#define GPIO_HOG(_chip_label, _chip_hwnum, _line_name, _lflags, _dflags) \ +{ \ + .chip_label = _chip_label, \ + .chip_hwnum = _chip_hwnum, \ + .line_name = _line_name, \ + .lflags = _lflags, \ + .dflags = _dflags, \ +} + +#ifdef CONFIG_GPIOLIB +void gpiod_add_lookup_table(struct gpiod_lookup_table *table); +void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n); +void gpiod_remove_lookup_table(struct gpiod_lookup_table *table); +void gpiod_add_hogs(struct gpiod_hog *hogs); +#else +static inline +void gpiod_add_lookup_table(struct gpiod_lookup_table *table) {} +static inline +void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n) {} +static inline +void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) {} +static inline void gpiod_add_hogs(struct gpiod_hog *hogs) {} +#endif + +#endif /* __LINUX_GPIO_MACHINE_H */ diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h new file mode 100644 index 000000000..3f84aeb81 --- /dev/null +++ b/include/linux/gpio_keys.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _GPIO_KEYS_H +#define _GPIO_KEYS_H + +#include + +struct device; + +/** + * struct gpio_keys_button - configuration parameters + * @code: input event code (KEY_*, SW_*) + * @gpio: %-1 if this key does not support gpio + * @active_low: %true indicates that button is considered + * depressed when gpio is low + * @desc: label that will be attached to button's gpio + * @type: input event type (%EV_KEY, %EV_SW, %EV_ABS) + * @wakeup: configure the button as a wake-up source + * @wakeup_event_action: event action to trigger wakeup + * @debounce_interval: debounce ticks interval in msecs + * @can_disable: %true indicates that userspace is allowed to + * disable button via sysfs + * @value: axis value for %EV_ABS + * @irq: Irq number in case of interrupt keys + */ +struct gpio_keys_button { + unsigned int code; + int gpio; + int active_low; + const char *desc; + unsigned int type; + int wakeup; + int wakeup_event_action; + int debounce_interval; + bool can_disable; + int value; + unsigned int irq; +}; + +/** + * struct gpio_keys_platform_data - platform data for gpio_keys driver + * @buttons: pointer to array of &gpio_keys_button structures + * describing buttons attached to the device + * @nbuttons: number of elements in @buttons array + * @poll_interval: polling interval in msecs - for polling driver only + * @rep: enable input subsystem auto repeat + * @enable: platform hook for enabling the device + * @disable: platform hook for disabling the device + * @name: input device name + */ +struct gpio_keys_platform_data { + const struct gpio_keys_button *buttons; + int nbuttons; + unsigned int poll_interval; + unsigned int rep:1; + int (*enable)(struct device *dev); + void (*disable)(struct device *dev); + const char *name; +}; + +#endif diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h new file mode 100644 index 000000000..da0af631d --- /dev/null +++ b/include/linux/hardirq.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_HARDIRQ_H +#define LINUX_HARDIRQ_H + +#include +#include +#include +#include +#include + + +extern void synchronize_irq(unsigned int irq); +extern bool synchronize_hardirq(unsigned int irq); + +#if defined(CONFIG_TINY_RCU) + +static inline void rcu_nmi_enter(void) +{ +} + +static inline void rcu_nmi_exit(void) +{ +} + +#else +extern void rcu_nmi_enter(void); +extern void rcu_nmi_exit(void); +#endif + +/* + * It is safe to do non-atomic ops on ->hardirq_context, + * because NMI handlers may not preempt and the ops are + * always balanced, so the interrupted value of ->hardirq_context + * will always be restored. + */ +#define __irq_enter() \ + do { \ + account_irq_enter_time(current); \ + preempt_count_add(HARDIRQ_OFFSET); \ + trace_hardirq_enter(); \ + } while (0) + +/* + * Enter irq context (on NO_HZ, update jiffies): + */ +extern void irq_enter(void); + +/* + * Exit irq context without processing softirqs: + */ +#define __irq_exit() \ + do { \ + trace_hardirq_exit(); \ + account_irq_exit_time(current); \ + preempt_count_sub(HARDIRQ_OFFSET); \ + } while (0) + +/* + * Exit irq context and process softirqs if needed: + */ +extern void irq_exit(void); + +#ifndef arch_nmi_enter +#define arch_nmi_enter() do { } while (0) +#define arch_nmi_exit() do { } while (0) +#endif + +#define nmi_enter() \ + do { \ + arch_nmi_enter(); \ + printk_nmi_enter(); \ + lockdep_off(); \ + ftrace_nmi_enter(); \ + BUG_ON(in_nmi()); \ + preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \ + rcu_nmi_enter(); \ + trace_hardirq_enter(); \ + } while (0) + +#define nmi_exit() \ + do { \ + trace_hardirq_exit(); \ + rcu_nmi_exit(); \ + BUG_ON(!in_nmi()); \ + preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \ + ftrace_nmi_exit(); \ + lockdep_on(); \ + printk_nmi_exit(); \ + arch_nmi_exit(); \ + } while (0) + +#endif /* LINUX_HARDIRQ_H */ diff --git a/include/linux/hash.h b/include/linux/hash.h new file mode 100644 index 000000000..ad6fa21d9 --- /dev/null +++ b/include/linux/hash.h @@ -0,0 +1,104 @@ +#ifndef _LINUX_HASH_H +#define _LINUX_HASH_H +/* Fast hashing routine for ints, longs and pointers. + (C) 2002 Nadia Yvette Chambers, IBM */ + +#include +#include + +/* + * The "GOLDEN_RATIO_PRIME" is used in ifs/btrfs/brtfs_inode.h and + * fs/inode.c. It's not actually prime any more (the previous primes + * were actively bad for hashing), but the name remains. + */ +#if BITS_PER_LONG == 32 +#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_32 +#define hash_long(val, bits) hash_32(val, bits) +#elif BITS_PER_LONG == 64 +#define hash_long(val, bits) hash_64(val, bits) +#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_64 +#else +#error Wordsize not 32 or 64 +#endif + +/* + * This hash multiplies the input by a large odd number and takes the + * high bits. Since multiplication propagates changes to the most + * significant end only, it is essential that the high bits of the + * product be used for the hash value. + * + * Chuck Lever verified the effectiveness of this technique: + * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf + * + * Although a random odd number will do, it turns out that the golden + * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice + * properties. (See Knuth vol 3, section 6.4, exercise 9.) + * + * These are the negative, (1 - phi) = phi**2 = (3 - sqrt(5))/2, + * which is very slightly easier to multiply by and makes no + * difference to the hash distribution. + */ +#define GOLDEN_RATIO_32 0x61C88647 +#define GOLDEN_RATIO_64 0x61C8864680B583EBull + +#ifdef CONFIG_HAVE_ARCH_HASH +/* This header may use the GOLDEN_RATIO_xx constants */ +#include +#endif + +/* + * The _generic versions exist only so lib/test_hash.c can compare + * the arch-optimized versions with the generic. + * + * Note that if you change these, any that aren't updated + * to match need to have their HAVE_ARCH_* define values updated so the + * self-test will not false-positive. + */ +#ifndef HAVE_ARCH__HASH_32 +#define __hash_32 __hash_32_generic +#endif +static inline u32 __hash_32_generic(u32 val) +{ + return val * GOLDEN_RATIO_32; +} + +#ifndef HAVE_ARCH_HASH_32 +#define hash_32 hash_32_generic +#endif +static inline u32 hash_32_generic(u32 val, unsigned int bits) +{ + /* High bits are more random, so use them. */ + return __hash_32(val) >> (32 - bits); +} + +#ifndef HAVE_ARCH_HASH_64 +#define hash_64 hash_64_generic +#endif +static __always_inline u32 hash_64_generic(u64 val, unsigned int bits) +{ +#if BITS_PER_LONG == 64 + /* 64x64-bit multiply is efficient on all 64-bit processors */ + return val * GOLDEN_RATIO_64 >> (64 - bits); +#else + /* Hash 64 bits using only 32x32-bit multiply. */ + return hash_32((u32)val ^ __hash_32(val >> 32), bits); +#endif +} + +static inline u32 hash_ptr(const void *ptr, unsigned int bits) +{ + return hash_long((unsigned long)ptr, bits); +} + +/* This really should be called fold32_ptr; it does no hashing to speak of. */ +static inline u32 hash32_ptr(const void *ptr) +{ + unsigned long val = (unsigned long)ptr; + +#if BITS_PER_LONG == 64 + val ^= (val >> 32); +#endif + return (u32)val; +} + +#endif /* _LINUX_HASH_H */ diff --git a/include/linux/hashtable.h b/include/linux/hashtable.h new file mode 100644 index 000000000..417d2c4bc --- /dev/null +++ b/include/linux/hashtable.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Statically sized hash table implementation + * (C) 2012 Sasha Levin + */ + +#ifndef _LINUX_HASHTABLE_H +#define _LINUX_HASHTABLE_H + +#include +#include +#include +#include +#include + +#define DEFINE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + +#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] __read_mostly = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + +#define DECLARE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] + +#define HASH_SIZE(name) (ARRAY_SIZE(name)) +#define HASH_BITS(name) ilog2(HASH_SIZE(name)) + +/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */ +#define hash_min(val, bits) \ + (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) + +static inline void __hash_init(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + INIT_HLIST_HEAD(&ht[i]); +} + +/** + * hash_init - initialize a hash table + * @hashtable: hashtable to be initialized + * + * Calculates the size of the hashtable from the given parameter, otherwise + * same as hash_init_size. + * + * This has to be a macro since HASH_BITS() will not work on pointers since + * it calculates the size during preprocessing. + */ +#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) + +/** + * hash_add - add an object to a hashtable + * @hashtable: hashtable to add to + * @node: the &struct hlist_node of the object to be added + * @key: the key of the object to be added + */ +#define hash_add(hashtable, node, key) \ + hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) + +/** + * hash_add_rcu - add an object to a rcu enabled hashtable + * @hashtable: hashtable to add to + * @node: the &struct hlist_node of the object to be added + * @key: the key of the object to be added + */ +#define hash_add_rcu(hashtable, node, key) \ + hlist_add_head_rcu(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) + +/** + * hash_hashed - check whether an object is in any hashtable + * @node: the &struct hlist_node of the object to be checked + */ +static inline bool hash_hashed(struct hlist_node *node) +{ + return !hlist_unhashed(node); +} + +static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + if (!hlist_empty(&ht[i])) + return false; + + return true; +} + +/** + * hash_empty - check whether a hashtable is empty + * @hashtable: hashtable to check + * + * This has to be a macro since HASH_BITS() will not work on pointers since + * it calculates the size during preprocessing. + */ +#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable)) + +/** + * hash_del - remove an object from a hashtable + * @node: &struct hlist_node of the object to remove + */ +static inline void hash_del(struct hlist_node *node) +{ + hlist_del_init(node); +} + +/** + * hash_del_rcu - remove an object from a rcu enabled hashtable + * @node: &struct hlist_node of the object to remove + */ +static inline void hash_del_rcu(struct hlist_node *node) +{ + hlist_del_init_rcu(node); +} + +/** + * hash_for_each - iterate over a hashtable + * @name: hashtable to iterate + * @bkt: integer to use as bucket loop cursor + * @obj: the type * to use as a loop cursor for each entry + * @member: the name of the hlist_node within the struct + */ +#define hash_for_each(name, bkt, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry(obj, &name[bkt], member) + +/** + * hash_for_each_rcu - iterate over a rcu enabled hashtable + * @name: hashtable to iterate + * @bkt: integer to use as bucket loop cursor + * @obj: the type * to use as a loop cursor for each entry + * @member: the name of the hlist_node within the struct + */ +#define hash_for_each_rcu(name, bkt, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry_rcu(obj, &name[bkt], member) + +/** + * hash_for_each_safe - iterate over a hashtable safe against removal of + * hash entry + * @name: hashtable to iterate + * @bkt: integer to use as bucket loop cursor + * @tmp: a &struct used for temporary storage + * @obj: the type * to use as a loop cursor for each entry + * @member: the name of the hlist_node within the struct + */ +#define hash_for_each_safe(name, bkt, tmp, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry_safe(obj, tmp, &name[bkt], member) + +/** + * hash_for_each_possible - iterate over all possible objects hashing to the + * same bucket + * @name: hashtable to iterate + * @obj: the type * to use as a loop cursor for each entry + * @member: the name of the hlist_node within the struct + * @key: the key of the objects to iterate over + */ +#define hash_for_each_possible(name, obj, member, key) \ + hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member) + +/** + * hash_for_each_possible_rcu - iterate over all possible objects hashing to the + * same bucket in an rcu enabled hashtable + * @name: hashtable to iterate + * @obj: the type * to use as a loop cursor for each entry + * @member: the name of the hlist_node within the struct + * @key: the key of the objects to iterate over + */ +#define hash_for_each_possible_rcu(name, obj, member, key) \ + hlist_for_each_entry_rcu(obj, &name[hash_min(key, HASH_BITS(name))],\ + member) + +/** + * hash_for_each_possible_rcu_notrace - iterate over all possible objects hashing + * to the same bucket in an rcu enabled hashtable in a rcu enabled hashtable + * @name: hashtable to iterate + * @obj: the type * to use as a loop cursor for each entry + * @member: the name of the hlist_node within the struct + * @key: the key of the objects to iterate over + * + * This is the same as hash_for_each_possible_rcu() except that it does + * not do any RCU debugging or tracing. + */ +#define hash_for_each_possible_rcu_notrace(name, obj, member, key) \ + hlist_for_each_entry_rcu_notrace(obj, \ + &name[hash_min(key, HASH_BITS(name))], member) + +/** + * hash_for_each_possible_safe - iterate over all possible objects hashing to the + * same bucket safe against removals + * @name: hashtable to iterate + * @obj: the type * to use as a loop cursor for each entry + * @tmp: a &struct used for temporary storage + * @member: the name of the hlist_node within the struct + * @key: the key of the objects to iterate over + */ +#define hash_for_each_possible_safe(name, obj, tmp, member, key) \ + hlist_for_each_entry_safe(obj, tmp,\ + &name[hash_min(key, HASH_BITS(name))], member) + + +#endif diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h new file mode 100644 index 000000000..97585d967 --- /dev/null +++ b/include/linux/hdlc.h @@ -0,0 +1,118 @@ +/* + * Generic HDLC support routines for Linux + * + * Copyright (C) 1999-2005 Krzysztof Halasa + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ +#ifndef __HDLC_H +#define __HDLC_H + + +#include +#include +#include +#include + +/* This structure is a private property of HDLC protocols. + Hardware drivers have no interest here */ + +struct hdlc_proto { + int (*open)(struct net_device *dev); + void (*close)(struct net_device *dev); + void (*start)(struct net_device *dev); /* if open & DCD */ + void (*stop)(struct net_device *dev); /* if open & !DCD */ + void (*detach)(struct net_device *dev); + int (*ioctl)(struct net_device *dev, struct ifreq *ifr); + __be16 (*type_trans)(struct sk_buff *skb, struct net_device *dev); + int (*netif_rx)(struct sk_buff *skb); + netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev); + struct module *module; + struct hdlc_proto *next; /* next protocol in the list */ +}; + + +/* Pointed to by netdev_priv(dev) */ +typedef struct hdlc_device { + /* used by HDLC layer to take control over HDLC device from hw driver*/ + int (*attach)(struct net_device *dev, + unsigned short encoding, unsigned short parity); + + /* hardware driver must handle this instead of dev->hard_start_xmit */ + netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev); + + /* Things below are for HDLC layer internal use only */ + const struct hdlc_proto *proto; + int carrier; + int open; + spinlock_t state_lock; + void *state; + void *priv; +} hdlc_device; + + + +/* Exported from hdlc module */ + +/* Called by hardware driver when a user requests HDLC service */ +int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); + +/* Must be used by hardware driver on module startup/exit */ +#define register_hdlc_device(dev) register_netdev(dev) +void unregister_hdlc_device(struct net_device *dev); + + +void register_hdlc_protocol(struct hdlc_proto *proto); +void unregister_hdlc_protocol(struct hdlc_proto *proto); + +struct net_device *alloc_hdlcdev(void *priv); + +static inline struct hdlc_device* dev_to_hdlc(struct net_device *dev) +{ + return netdev_priv(dev); +} + +static __inline__ void debug_frame(const struct sk_buff *skb) +{ + int i; + + for (i=0; i < skb->len; i++) { + if (i == 100) { + printk("...\n"); + return; + } + printk(" %02X", skb->data[i]); + } + printk("\n"); +} + + +/* Must be called by hardware driver when HDLC device is being opened */ +int hdlc_open(struct net_device *dev); +/* Must be called by hardware driver when HDLC device is being closed */ +void hdlc_close(struct net_device *dev); +/* Must be pointed to by hw driver's dev->netdev_ops->ndo_start_xmit */ +netdev_tx_t hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev); + +int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto, + size_t size); +/* May be used by hardware driver to gain control over HDLC device */ +int detach_hdlc_protocol(struct net_device *dev); + +static __inline__ __be16 hdlc_type_trans(struct sk_buff *skb, + struct net_device *dev) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); + + skb->dev = dev; + skb_reset_mac_header(skb); + + if (hdlc->proto->type_trans) + return hdlc->proto->type_trans(skb, dev); + else + return htons(ETH_P_HDLC); +} + +#endif /* __HDLC_H */ diff --git a/include/linux/hdlcdrv.h b/include/linux/hdlcdrv.h new file mode 100644 index 000000000..d4d633a49 --- /dev/null +++ b/include/linux/hdlcdrv.h @@ -0,0 +1,276 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * hdlcdrv.h -- HDLC packet radio network driver. + * The Linux soundcard driver for 1200 baud and 9600 baud packet radio + * (C) 1996-1998 by Thomas Sailer, HB9JNX/AE4WA + */ +#ifndef _HDLCDRV_H +#define _HDLCDRV_H + + +#include +#include +#include +#include + +#define HDLCDRV_MAGIC 0x5ac6e778 +#define HDLCDRV_HDLCBUFFER 32 /* should be a power of 2 for speed reasons */ +#define HDLCDRV_BITBUFFER 256 /* should be a power of 2 for speed reasons */ +#undef HDLCDRV_LOOPBACK /* define for HDLC debugging purposes */ +#define HDLCDRV_DEBUG + +/* maximum packet length, excluding CRC */ +#define HDLCDRV_MAXFLEN 400 + + +struct hdlcdrv_hdlcbuffer { + spinlock_t lock; + unsigned rd, wr; + unsigned short buf[HDLCDRV_HDLCBUFFER]; +}; + +#ifdef HDLCDRV_DEBUG +struct hdlcdrv_bitbuffer { + unsigned int rd; + unsigned int wr; + unsigned int shreg; + unsigned char buffer[HDLCDRV_BITBUFFER]; +}; + +static inline void hdlcdrv_add_bitbuffer(struct hdlcdrv_bitbuffer *buf, + unsigned int bit) +{ + unsigned char new; + + new = buf->shreg & 1; + buf->shreg >>= 1; + buf->shreg |= (!!bit) << 7; + if (new) { + buf->buffer[buf->wr] = buf->shreg; + buf->wr = (buf->wr+1) % sizeof(buf->buffer); + buf->shreg = 0x80; + } +} + +static inline void hdlcdrv_add_bitbuffer_word(struct hdlcdrv_bitbuffer *buf, + unsigned int bits) +{ + buf->buffer[buf->wr] = bits & 0xff; + buf->wr = (buf->wr+1) % sizeof(buf->buffer); + buf->buffer[buf->wr] = (bits >> 8) & 0xff; + buf->wr = (buf->wr+1) % sizeof(buf->buffer); + +} +#endif /* HDLCDRV_DEBUG */ + +/* -------------------------------------------------------------------- */ +/* + * Information that need to be kept for each driver. + */ + +struct hdlcdrv_ops { + /* + * first some informations needed by the hdlcdrv routines + */ + const char *drvname; + const char *drvinfo; + /* + * the routines called by the hdlcdrv routines + */ + int (*open)(struct net_device *); + int (*close)(struct net_device *); + int (*ioctl)(struct net_device *, struct ifreq *, + struct hdlcdrv_ioctl *, int); +}; + +struct hdlcdrv_state { + int magic; + int opened; + + const struct hdlcdrv_ops *ops; + + struct { + int bitrate; + } par; + + struct hdlcdrv_pttoutput { + int dma2; + int seriobase; + int pariobase; + int midiiobase; + unsigned int flags; + } ptt_out; + + struct hdlcdrv_channel_params ch_params; + + struct hdlcdrv_hdlcrx { + struct hdlcdrv_hdlcbuffer hbuf; + unsigned long in_hdlc_rx; + /* 0 = sync hunt, != 0 receiving */ + int rx_state; + unsigned int bitstream; + unsigned int bitbuf; + int numbits; + unsigned char dcd; + + int len; + unsigned char *bp; + unsigned char buffer[HDLCDRV_MAXFLEN+2]; + } hdlcrx; + + struct hdlcdrv_hdlctx { + struct hdlcdrv_hdlcbuffer hbuf; + unsigned long in_hdlc_tx; + /* + * 0 = send flags + * 1 = send txtail (flags) + * 2 = send packet + */ + int tx_state; + int numflags; + unsigned int bitstream; + unsigned char ptt; + int calibrate; + int slotcnt; + + unsigned int bitbuf; + int numbits; + + int len; + unsigned char *bp; + unsigned char buffer[HDLCDRV_MAXFLEN+2]; + } hdlctx; + +#ifdef HDLCDRV_DEBUG + struct hdlcdrv_bitbuffer bitbuf_channel; + struct hdlcdrv_bitbuffer bitbuf_hdlc; +#endif /* HDLCDRV_DEBUG */ + + int ptt_keyed; + + /* queued skb for transmission */ + struct sk_buff *skb; +}; + + +/* -------------------------------------------------------------------- */ + +static inline int hdlcdrv_hbuf_full(struct hdlcdrv_hdlcbuffer *hb) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&hb->lock, flags); + ret = !((HDLCDRV_HDLCBUFFER - 1 + hb->rd - hb->wr) % HDLCDRV_HDLCBUFFER); + spin_unlock_irqrestore(&hb->lock, flags); + return ret; +} + +/* -------------------------------------------------------------------- */ + +static inline int hdlcdrv_hbuf_empty(struct hdlcdrv_hdlcbuffer *hb) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&hb->lock, flags); + ret = (hb->rd == hb->wr); + spin_unlock_irqrestore(&hb->lock, flags); + return ret; +} + +/* -------------------------------------------------------------------- */ + +static inline unsigned short hdlcdrv_hbuf_get(struct hdlcdrv_hdlcbuffer *hb) +{ + unsigned long flags; + unsigned short val; + unsigned newr; + + spin_lock_irqsave(&hb->lock, flags); + if (hb->rd == hb->wr) + val = 0; + else { + newr = (hb->rd+1) % HDLCDRV_HDLCBUFFER; + val = hb->buf[hb->rd]; + hb->rd = newr; + } + spin_unlock_irqrestore(&hb->lock, flags); + return val; +} + +/* -------------------------------------------------------------------- */ + +static inline void hdlcdrv_hbuf_put(struct hdlcdrv_hdlcbuffer *hb, + unsigned short val) +{ + unsigned newp; + unsigned long flags; + + spin_lock_irqsave(&hb->lock, flags); + newp = (hb->wr+1) % HDLCDRV_HDLCBUFFER; + if (newp != hb->rd) { + hb->buf[hb->wr] = val & 0xffff; + hb->wr = newp; + } + spin_unlock_irqrestore(&hb->lock, flags); +} + +/* -------------------------------------------------------------------- */ + +static inline void hdlcdrv_putbits(struct hdlcdrv_state *s, unsigned int bits) +{ + hdlcdrv_hbuf_put(&s->hdlcrx.hbuf, bits); +} + +static inline unsigned int hdlcdrv_getbits(struct hdlcdrv_state *s) +{ + unsigned int ret; + + if (hdlcdrv_hbuf_empty(&s->hdlctx.hbuf)) { + if (s->hdlctx.calibrate > 0) + s->hdlctx.calibrate--; + else + s->hdlctx.ptt = 0; + ret = 0; + } else + ret = hdlcdrv_hbuf_get(&s->hdlctx.hbuf); +#ifdef HDLCDRV_LOOPBACK + hdlcdrv_hbuf_put(&s->hdlcrx.hbuf, ret); +#endif /* HDLCDRV_LOOPBACK */ + return ret; +} + +static inline void hdlcdrv_channelbit(struct hdlcdrv_state *s, unsigned int bit) +{ +#ifdef HDLCDRV_DEBUG + hdlcdrv_add_bitbuffer(&s->bitbuf_channel, bit); +#endif /* HDLCDRV_DEBUG */ +} + +static inline void hdlcdrv_setdcd(struct hdlcdrv_state *s, int dcd) +{ + s->hdlcrx.dcd = !!dcd; +} + +static inline int hdlcdrv_ptt(struct hdlcdrv_state *s) +{ + return s->hdlctx.ptt || (s->hdlctx.calibrate > 0); +} + +/* -------------------------------------------------------------------- */ + +void hdlcdrv_receiver(struct net_device *, struct hdlcdrv_state *); +void hdlcdrv_transmitter(struct net_device *, struct hdlcdrv_state *); +void hdlcdrv_arbitrate(struct net_device *, struct hdlcdrv_state *); +struct net_device *hdlcdrv_register(const struct hdlcdrv_ops *ops, + unsigned int privsize, const char *ifname, + unsigned int baseaddr, unsigned int irq, + unsigned int dma); +void hdlcdrv_unregister(struct net_device *dev); + +/* -------------------------------------------------------------------- */ + + + +#endif /* _HDLCDRV_H */ diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h new file mode 100644 index 000000000..4f3febc0f --- /dev/null +++ b/include/linux/hdmi.h @@ -0,0 +1,339 @@ +/* + * Copyright (C) 2012 Avionic Design GmbH + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __LINUX_HDMI_H_ +#define __LINUX_HDMI_H_ + +#include +#include + +enum hdmi_infoframe_type { + HDMI_INFOFRAME_TYPE_VENDOR = 0x81, + HDMI_INFOFRAME_TYPE_AVI = 0x82, + HDMI_INFOFRAME_TYPE_SPD = 0x83, + HDMI_INFOFRAME_TYPE_AUDIO = 0x84, +}; + +#define HDMI_IEEE_OUI 0x000c03 +#define HDMI_FORUM_IEEE_OUI 0xc45dd8 +#define HDMI_INFOFRAME_HEADER_SIZE 4 +#define HDMI_AVI_INFOFRAME_SIZE 13 +#define HDMI_SPD_INFOFRAME_SIZE 25 +#define HDMI_AUDIO_INFOFRAME_SIZE 10 + +#define HDMI_INFOFRAME_SIZE(type) \ + (HDMI_INFOFRAME_HEADER_SIZE + HDMI_ ## type ## _INFOFRAME_SIZE) + +struct hdmi_any_infoframe { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; +}; + +enum hdmi_colorspace { + HDMI_COLORSPACE_RGB, + HDMI_COLORSPACE_YUV422, + HDMI_COLORSPACE_YUV444, + HDMI_COLORSPACE_YUV420, + HDMI_COLORSPACE_RESERVED4, + HDMI_COLORSPACE_RESERVED5, + HDMI_COLORSPACE_RESERVED6, + HDMI_COLORSPACE_IDO_DEFINED, +}; + +enum hdmi_scan_mode { + HDMI_SCAN_MODE_NONE, + HDMI_SCAN_MODE_OVERSCAN, + HDMI_SCAN_MODE_UNDERSCAN, + HDMI_SCAN_MODE_RESERVED, +}; + +enum hdmi_colorimetry { + HDMI_COLORIMETRY_NONE, + HDMI_COLORIMETRY_ITU_601, + HDMI_COLORIMETRY_ITU_709, + HDMI_COLORIMETRY_EXTENDED, +}; + +enum hdmi_picture_aspect { + HDMI_PICTURE_ASPECT_NONE, + HDMI_PICTURE_ASPECT_4_3, + HDMI_PICTURE_ASPECT_16_9, + HDMI_PICTURE_ASPECT_64_27, + HDMI_PICTURE_ASPECT_256_135, + HDMI_PICTURE_ASPECT_RESERVED, +}; + +enum hdmi_active_aspect { + HDMI_ACTIVE_ASPECT_16_9_TOP = 2, + HDMI_ACTIVE_ASPECT_14_9_TOP = 3, + HDMI_ACTIVE_ASPECT_16_9_CENTER = 4, + HDMI_ACTIVE_ASPECT_PICTURE = 8, + HDMI_ACTIVE_ASPECT_4_3 = 9, + HDMI_ACTIVE_ASPECT_16_9 = 10, + HDMI_ACTIVE_ASPECT_14_9 = 11, + HDMI_ACTIVE_ASPECT_4_3_SP_14_9 = 13, + HDMI_ACTIVE_ASPECT_16_9_SP_14_9 = 14, + HDMI_ACTIVE_ASPECT_16_9_SP_4_3 = 15, +}; + +enum hdmi_extended_colorimetry { + HDMI_EXTENDED_COLORIMETRY_XV_YCC_601, + HDMI_EXTENDED_COLORIMETRY_XV_YCC_709, + HDMI_EXTENDED_COLORIMETRY_S_YCC_601, + HDMI_EXTENDED_COLORIMETRY_OPYCC_601, + HDMI_EXTENDED_COLORIMETRY_OPRGB, + + /* The following EC values are only defined in CEA-861-F. */ + HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM, + HDMI_EXTENDED_COLORIMETRY_BT2020, + HDMI_EXTENDED_COLORIMETRY_RESERVED, +}; + +enum hdmi_quantization_range { + HDMI_QUANTIZATION_RANGE_DEFAULT, + HDMI_QUANTIZATION_RANGE_LIMITED, + HDMI_QUANTIZATION_RANGE_FULL, + HDMI_QUANTIZATION_RANGE_RESERVED, +}; + +/* non-uniform picture scaling */ +enum hdmi_nups { + HDMI_NUPS_UNKNOWN, + HDMI_NUPS_HORIZONTAL, + HDMI_NUPS_VERTICAL, + HDMI_NUPS_BOTH, +}; + +enum hdmi_ycc_quantization_range { + HDMI_YCC_QUANTIZATION_RANGE_LIMITED, + HDMI_YCC_QUANTIZATION_RANGE_FULL, +}; + +enum hdmi_content_type { + HDMI_CONTENT_TYPE_GRAPHICS, + HDMI_CONTENT_TYPE_PHOTO, + HDMI_CONTENT_TYPE_CINEMA, + HDMI_CONTENT_TYPE_GAME, +}; + +struct hdmi_avi_infoframe { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; + enum hdmi_colorspace colorspace; + enum hdmi_scan_mode scan_mode; + enum hdmi_colorimetry colorimetry; + enum hdmi_picture_aspect picture_aspect; + enum hdmi_active_aspect active_aspect; + bool itc; + enum hdmi_extended_colorimetry extended_colorimetry; + enum hdmi_quantization_range quantization_range; + enum hdmi_nups nups; + unsigned char video_code; + enum hdmi_ycc_quantization_range ycc_quantization_range; + enum hdmi_content_type content_type; + unsigned char pixel_repeat; + unsigned short top_bar; + unsigned short bottom_bar; + unsigned short left_bar; + unsigned short right_bar; +}; + +int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame); +ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer, + size_t size); + +enum hdmi_spd_sdi { + HDMI_SPD_SDI_UNKNOWN, + HDMI_SPD_SDI_DSTB, + HDMI_SPD_SDI_DVDP, + HDMI_SPD_SDI_DVHS, + HDMI_SPD_SDI_HDDVR, + HDMI_SPD_SDI_DVC, + HDMI_SPD_SDI_DSC, + HDMI_SPD_SDI_VCD, + HDMI_SPD_SDI_GAME, + HDMI_SPD_SDI_PC, + HDMI_SPD_SDI_BD, + HDMI_SPD_SDI_SACD, + HDMI_SPD_SDI_HDDVD, + HDMI_SPD_SDI_PMP, +}; + +struct hdmi_spd_infoframe { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; + char vendor[8]; + char product[16]; + enum hdmi_spd_sdi sdi; +}; + +int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame, + const char *vendor, const char *product); +ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer, + size_t size); + +enum hdmi_audio_coding_type { + HDMI_AUDIO_CODING_TYPE_STREAM, + HDMI_AUDIO_CODING_TYPE_PCM, + HDMI_AUDIO_CODING_TYPE_AC3, + HDMI_AUDIO_CODING_TYPE_MPEG1, + HDMI_AUDIO_CODING_TYPE_MP3, + HDMI_AUDIO_CODING_TYPE_MPEG2, + HDMI_AUDIO_CODING_TYPE_AAC_LC, + HDMI_AUDIO_CODING_TYPE_DTS, + HDMI_AUDIO_CODING_TYPE_ATRAC, + HDMI_AUDIO_CODING_TYPE_DSD, + HDMI_AUDIO_CODING_TYPE_EAC3, + HDMI_AUDIO_CODING_TYPE_DTS_HD, + HDMI_AUDIO_CODING_TYPE_MLP, + HDMI_AUDIO_CODING_TYPE_DST, + HDMI_AUDIO_CODING_TYPE_WMA_PRO, + HDMI_AUDIO_CODING_TYPE_CXT, +}; + +enum hdmi_audio_sample_size { + HDMI_AUDIO_SAMPLE_SIZE_STREAM, + HDMI_AUDIO_SAMPLE_SIZE_16, + HDMI_AUDIO_SAMPLE_SIZE_20, + HDMI_AUDIO_SAMPLE_SIZE_24, +}; + +enum hdmi_audio_sample_frequency { + HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM, + HDMI_AUDIO_SAMPLE_FREQUENCY_32000, + HDMI_AUDIO_SAMPLE_FREQUENCY_44100, + HDMI_AUDIO_SAMPLE_FREQUENCY_48000, + HDMI_AUDIO_SAMPLE_FREQUENCY_88200, + HDMI_AUDIO_SAMPLE_FREQUENCY_96000, + HDMI_AUDIO_SAMPLE_FREQUENCY_176400, + HDMI_AUDIO_SAMPLE_FREQUENCY_192000, +}; + +enum hdmi_audio_coding_type_ext { + /* Refer to Audio Coding Type (CT) field in Data Byte 1 */ + HDMI_AUDIO_CODING_TYPE_EXT_CT, + + /* + * The next three CXT values are defined in CEA-861-E only. + * They do not exist in older versions, and in CEA-861-F they are + * defined as 'Not in use'. + */ + HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC, + HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC_V2, + HDMI_AUDIO_CODING_TYPE_EXT_MPEG_SURROUND, + + /* The following CXT values are only defined in CEA-861-F. */ + HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC, + HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_V2, + HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC, + HDMI_AUDIO_CODING_TYPE_EXT_DRA, + HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_SURROUND, + HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC_SURROUND = 10, +}; + +struct hdmi_audio_infoframe { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; + unsigned char channels; + enum hdmi_audio_coding_type coding_type; + enum hdmi_audio_sample_size sample_size; + enum hdmi_audio_sample_frequency sample_frequency; + enum hdmi_audio_coding_type_ext coding_type_ext; + unsigned char channel_allocation; + unsigned char level_shift_value; + bool downmix_inhibit; + +}; + +int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame); +ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame, + void *buffer, size_t size); + +enum hdmi_3d_structure { + HDMI_3D_STRUCTURE_INVALID = -1, + HDMI_3D_STRUCTURE_FRAME_PACKING = 0, + HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE, + HDMI_3D_STRUCTURE_LINE_ALTERNATIVE, + HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL, + HDMI_3D_STRUCTURE_L_DEPTH, + HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH, + HDMI_3D_STRUCTURE_TOP_AND_BOTTOM, + HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF = 8, +}; + + +struct hdmi_vendor_infoframe { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; + unsigned int oui; + u8 vic; + enum hdmi_3d_structure s3d_struct; + unsigned int s3d_ext_data; +}; + +int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame); +ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame, + void *buffer, size_t size); + +union hdmi_vendor_any_infoframe { + struct { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; + unsigned int oui; + } any; + struct hdmi_vendor_infoframe hdmi; +}; + +/** + * union hdmi_infoframe - overall union of all abstract infoframe representations + * @any: generic infoframe + * @avi: avi infoframe + * @spd: spd infoframe + * @vendor: union of all vendor infoframes + * @audio: audio infoframe + * + * This is used by the generic pack function. This works since all infoframes + * have the same header which also indicates which type of infoframe should be + * packed. + */ +union hdmi_infoframe { + struct hdmi_any_infoframe any; + struct hdmi_avi_infoframe avi; + struct hdmi_spd_infoframe spd; + union hdmi_vendor_any_infoframe vendor; + struct hdmi_audio_infoframe audio; +}; + +ssize_t +hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size); +int hdmi_infoframe_unpack(union hdmi_infoframe *frame, void *buffer); +void hdmi_infoframe_log(const char *level, struct device *dev, + union hdmi_infoframe *frame); + +#endif /* _DRM_HDMI_H */ diff --git a/include/linux/hid-debug.h b/include/linux/hid-debug.h new file mode 100644 index 000000000..2d6100edf --- /dev/null +++ b/include/linux/hid-debug.h @@ -0,0 +1,66 @@ +#ifndef __HID_DEBUG_H +#define __HID_DEBUG_H + +/* + * Copyright (c) 2007-2009 Jiri Kosina + */ + +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#ifdef CONFIG_DEBUG_FS + +#include + +#define HID_DEBUG_BUFSIZE 512 +#define HID_DEBUG_FIFOSIZE 512 + +void hid_dump_input(struct hid_device *, struct hid_usage *, __s32); +void hid_dump_report(struct hid_device *, int , u8 *, int); +void hid_dump_device(struct hid_device *, struct seq_file *); +void hid_dump_field(struct hid_field *, int, struct seq_file *); +char *hid_resolv_usage(unsigned, struct seq_file *); +void hid_debug_register(struct hid_device *, const char *); +void hid_debug_unregister(struct hid_device *); +void hid_debug_init(void); +void hid_debug_exit(void); +void hid_debug_event(struct hid_device *, char *); + +struct hid_debug_list { + DECLARE_KFIFO_PTR(hid_debug_fifo, char); + struct fasync_struct *fasync; + struct hid_device *hdev; + struct list_head node; + struct mutex read_mutex; +}; + +#else + +#define hid_dump_input(a,b,c) do { } while (0) +#define hid_dump_report(a,b,c,d) do { } while (0) +#define hid_dump_device(a,b) do { } while (0) +#define hid_dump_field(a,b,c) do { } while (0) +#define hid_resolv_usage(a,b) do { } while (0) +#define hid_debug_register(a, b) do { } while (0) +#define hid_debug_unregister(a) do { } while (0) +#define hid_debug_init() do { } while (0) +#define hid_debug_exit() do { } while (0) +#define hid_debug_event(a,b) do { } while (0) + +#endif + +#endif diff --git a/include/linux/hid-roccat.h b/include/linux/hid-roccat.h new file mode 100644 index 000000000..24e1ca01f --- /dev/null +++ b/include/linux/hid-roccat.h @@ -0,0 +1,29 @@ +#ifndef __HID_ROCCAT_H +#define __HID_ROCCAT_H + +/* + * Copyright (c) 2010 Stefan Achatz + */ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include +#include + +#define ROCCATIOCGREPSIZE _IOR('H', 0xf1, int) + +#ifdef __KERNEL__ + +int roccat_connect(struct class *klass, struct hid_device *hid, + int report_size); +void roccat_disconnect(int minor); +int roccat_report_event(int minor, u8 const *data); + +#endif + +#endif diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h new file mode 100644 index 000000000..dc12f5c4b --- /dev/null +++ b/include/linux/hid-sensor-hub.h @@ -0,0 +1,288 @@ +/* + * HID Sensors Driver + * Copyright (c) 2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ +#ifndef _HID_SENSORS_HUB_H +#define _HID_SENSORS_HUB_H + +#include +#include +#include +#include + +/** + * struct hid_sensor_hub_attribute_info - Attribute info + * @usage_id: Parent usage id of a physical device. + * @attrib_id: Attribute id for this attribute. + * @report_id: Report id in which this information resides. + * @index: Field index in the report. + * @units: Measurment unit for this attribute. + * @unit_expo: Exponent used in the data. + * @size: Size in bytes for data size. + * @logical_minimum: Logical minimum value for this attribute. + * @logical_maximum: Logical maximum value for this attribute. + */ +struct hid_sensor_hub_attribute_info { + u32 usage_id; + u32 attrib_id; + s32 report_id; + s32 index; + s32 units; + s32 unit_expo; + s32 size; + s32 logical_minimum; + s32 logical_maximum; +}; + +/** + * struct sensor_hub_pending - Synchronous read pending information + * @status: Pending status true/false. + * @ready: Completion synchronization data. + * @usage_id: Usage id for physical device, E.g. Gyro usage id. + * @attr_usage_id: Usage Id of a field, E.g. X-AXIS for a gyro. + * @raw_size: Response size for a read request. + * @raw_data: Place holder for received response. + */ +struct sensor_hub_pending { + bool status; + struct completion ready; + u32 usage_id; + u32 attr_usage_id; + int raw_size; + u8 *raw_data; +}; + +/** + * struct hid_sensor_hub_device - Stores the hub instance data + * @hdev: Stores the hid instance. + * @vendor_id: Vendor id of hub device. + * @product_id: Product id of hub device. + * @usage: Usage id for this hub device instance. + * @start_collection_index: Starting index for a phy type collection + * @end_collection_index: Last index for a phy type collection + * @mutex_ptr: synchronizing mutex pointer. + * @pending: Holds information of pending sync read request. + */ +struct hid_sensor_hub_device { + struct hid_device *hdev; + u32 vendor_id; + u32 product_id; + u32 usage; + int start_collection_index; + int end_collection_index; + struct mutex *mutex_ptr; + struct sensor_hub_pending pending; +}; + +/** + * struct hid_sensor_hub_callbacks - Client callback functions + * @pdev: Platform device instance of the client driver. + * @suspend: Suspend callback. + * @resume: Resume callback. + * @capture_sample: Callback to get a sample. + * @send_event: Send notification to indicate all samples are + * captured, process and send event + */ +struct hid_sensor_hub_callbacks { + struct platform_device *pdev; + int (*suspend)(struct hid_sensor_hub_device *hsdev, void *priv); + int (*resume)(struct hid_sensor_hub_device *hsdev, void *priv); + int (*capture_sample)(struct hid_sensor_hub_device *hsdev, + u32 usage_id, size_t raw_len, char *raw_data, + void *priv); + int (*send_event)(struct hid_sensor_hub_device *hsdev, u32 usage_id, + void *priv); +}; + +/** +* sensor_hub_device_open() - Open hub device +* @hsdev: Hub device instance. +* +* Used to open hid device for sensor hub. +*/ +int sensor_hub_device_open(struct hid_sensor_hub_device *hsdev); + +/** +* sensor_hub_device_clode() - Close hub device +* @hsdev: Hub device instance. +* +* Used to clode hid device for sensor hub. +*/ +void sensor_hub_device_close(struct hid_sensor_hub_device *hsdev); + +/* Registration functions */ + +/** +* sensor_hub_register_callback() - Register client callbacks +* @hsdev: Hub device instance. +* @usage_id: Usage id of the client (E.g. 0x200076 for Gyro). +* @usage_callback: Callback function storage +* +* Used to register callbacks by client processing drivers. Sensor +* hub core driver will call these callbacks to offload processing +* of data streams and notifications. +*/ +int sensor_hub_register_callback(struct hid_sensor_hub_device *hsdev, + u32 usage_id, + struct hid_sensor_hub_callbacks *usage_callback); + +/** +* sensor_hub_remove_callback() - Remove client callbacks +* @hsdev: Hub device instance. +* @usage_id: Usage id of the client (E.g. 0x200076 for Gyro). +* +* If there is a callback registred, this call will remove that +* callbacks, so that it will stop data and event notifications. +*/ +int sensor_hub_remove_callback(struct hid_sensor_hub_device *hsdev, + u32 usage_id); + + +/* Hid sensor hub core interfaces */ + +/** +* sensor_hub_input_get_attribute_info() - Get an attribute information +* @hsdev: Hub device instance. +* @type: Type of this attribute, input/output/feature +* @usage_id: Attribute usage id of parent physical device as per spec +* @attr_usage_id: Attribute usage id as per spec +* @info: return information about attribute after parsing report +* +* Parses report and returns the attribute information such as report id, +* field index, units and exponet etc. +*/ +int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev, + u8 type, + u32 usage_id, u32 attr_usage_id, + struct hid_sensor_hub_attribute_info *info); + +/** +* sensor_hub_input_attr_get_raw_value() - Synchronous read request +* @hsdev: Hub device instance. +* @usage_id: Attribute usage id of parent physical device as per spec +* @attr_usage_id: Attribute usage id as per spec +* @report_id: Report id to look for +* @flag: Synchronous or asynchronous read +* @is_signed: If true then fields < 32 bits will be sign-extended +* +* Issues a synchronous or asynchronous read request for an input attribute. +* Returns data upto 32 bits. +*/ + +enum sensor_hub_read_flags { + SENSOR_HUB_SYNC, + SENSOR_HUB_ASYNC, +}; + +int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev, + u32 usage_id, + u32 attr_usage_id, u32 report_id, + enum sensor_hub_read_flags flag, + bool is_signed +); + +/** +* sensor_hub_set_feature() - Feature set request +* @hsdev: Hub device instance. +* @report_id: Report id to look for +* @field_index: Field index inside a report +* @buffer_size: size of the buffer +* @buffer: buffer to use in the feature set +* +* Used to set a field in feature report. For example this can set polling +* interval, sensitivity, activate/deactivate state. +*/ +int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id, + u32 field_index, int buffer_size, void *buffer); + +/** +* sensor_hub_get_feature() - Feature get request +* @hsdev: Hub device instance. +* @report_id: Report id to look for +* @field_index: Field index inside a report +* @buffer_size: size of the buffer +* @buffer: buffer to copy output +* +* Used to get a field in feature report. For example this can get polling +* interval, sensitivity, activate/deactivate state. On success it returns +* number of bytes copied to buffer. On failure, it returns value < 0. +*/ +int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id, + u32 field_index, int buffer_size, void *buffer); + +/* hid-sensor-attributes */ + +/* Common hid sensor iio structure */ +struct hid_sensor_common { + struct hid_sensor_hub_device *hsdev; + struct platform_device *pdev; + unsigned usage_id; + atomic_t data_ready; + atomic_t user_requested_state; + atomic_t runtime_pm_enable; + int poll_interval; + int raw_hystersis; + int latency_ms; + struct iio_trigger *trigger; + int timestamp_ns_scale; + struct hid_sensor_hub_attribute_info poll; + struct hid_sensor_hub_attribute_info report_state; + struct hid_sensor_hub_attribute_info power_state; + struct hid_sensor_hub_attribute_info sensitivity; + struct hid_sensor_hub_attribute_info report_latency; + struct work_struct work; +}; + +/* Convert from hid unit expo to regular exponent */ +static inline int hid_sensor_convert_exponent(int unit_expo) +{ + if (unit_expo < 0x08) + return unit_expo; + else if (unit_expo <= 0x0f) + return -(0x0f-unit_expo+1); + else + return 0; +} + +int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev, + u32 usage_id, + struct hid_sensor_common *st); +int hid_sensor_write_raw_hyst_value(struct hid_sensor_common *st, + int val1, int val2); +int hid_sensor_read_raw_hyst_value(struct hid_sensor_common *st, + int *val1, int *val2); +int hid_sensor_write_samp_freq_value(struct hid_sensor_common *st, + int val1, int val2); +int hid_sensor_read_samp_freq_value(struct hid_sensor_common *st, + int *val1, int *val2); + +int hid_sensor_get_usage_index(struct hid_sensor_hub_device *hsdev, + u32 report_id, int field_index, u32 usage_id); + +int hid_sensor_format_scale(u32 usage_id, + struct hid_sensor_hub_attribute_info *attr_info, + int *val0, int *val1); + +s32 hid_sensor_read_poll_value(struct hid_sensor_common *st); + +int64_t hid_sensor_convert_timestamp(struct hid_sensor_common *st, + int64_t raw_value); +bool hid_sensor_batch_mode_supported(struct hid_sensor_common *st); +int hid_sensor_set_report_latency(struct hid_sensor_common *st, int latency); +int hid_sensor_get_report_latency(struct hid_sensor_common *st); + +#endif diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h new file mode 100644 index 000000000..76033e042 --- /dev/null +++ b/include/linux/hid-sensor-ids.h @@ -0,0 +1,174 @@ +/* + * HID Sensors Driver + * Copyright (c) 2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ +#ifndef _HID_SENSORS_IDS_H +#define _HID_SENSORS_IDS_H + +#define HID_MAX_PHY_DEVICES 0xFF + +#define HID_USAGE_SENSOR_COLLECTION 0x200001 + +/* Accel 3D (200073) */ +#define HID_USAGE_SENSOR_ACCEL_3D 0x200073 +#define HID_USAGE_SENSOR_DATA_ACCELERATION 0x200452 +#define HID_USAGE_SENSOR_ACCEL_X_AXIS 0x200453 +#define HID_USAGE_SENSOR_ACCEL_Y_AXIS 0x200454 +#define HID_USAGE_SENSOR_ACCEL_Z_AXIS 0x200455 + +/* ALS (200041) */ +#define HID_USAGE_SENSOR_ALS 0x200041 +#define HID_USAGE_SENSOR_DATA_LIGHT 0x2004d0 +#define HID_USAGE_SENSOR_LIGHT_ILLUM 0x2004d1 + +/* PROX (200011) */ +#define HID_USAGE_SENSOR_PROX 0x200011 +#define HID_USAGE_SENSOR_DATA_PRESENCE 0x2004b0 +#define HID_USAGE_SENSOR_HUMAN_PRESENCE 0x2004b1 + +/* Pressure (200031) */ +#define HID_USAGE_SENSOR_PRESSURE 0x200031 +#define HID_USAGE_SENSOR_DATA_ATMOSPHERIC_PRESSURE 0x200430 +#define HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE 0x200431 + +/* Tempreture (200033) */ +#define HID_USAGE_SENSOR_TEMPERATURE 0x200033 +#define HID_USAGE_SENSOR_DATA_ENVIRONMENTAL_TEMPERATURE 0x200434 + +/* humidity */ +#define HID_USAGE_SENSOR_HUMIDITY 0x200032 +#define HID_USAGE_SENSOR_ATMOSPHERIC_HUMIDITY 0x200433 + +/* Gyro 3D: (200076) */ +#define HID_USAGE_SENSOR_GYRO_3D 0x200076 +#define HID_USAGE_SENSOR_DATA_ANGL_VELOCITY 0x200456 +#define HID_USAGE_SENSOR_ANGL_VELOCITY_X_AXIS 0x200457 +#define HID_USAGE_SENSOR_ANGL_VELOCITY_Y_AXIS 0x200458 +#define HID_USAGE_SENSOR_ANGL_VELOCITY_Z_AXIS 0x200459 + +/* Gravity vector */ +#define HID_USAGE_SENSOR_GRAVITY_VECTOR 0x20007B + +/* ORIENTATION: Compass 3D: (200083) */ +#define HID_USAGE_SENSOR_COMPASS_3D 0x200083 +#define HID_USAGE_SENSOR_DATA_ORIENTATION 0x200470 +#define HID_USAGE_SENSOR_ORIENT_MAGN_HEADING 0x200471 +#define HID_USAGE_SENSOR_ORIENT_MAGN_HEADING_X 0x200472 +#define HID_USAGE_SENSOR_ORIENT_MAGN_HEADING_Y 0x200473 +#define HID_USAGE_SENSOR_ORIENT_MAGN_HEADING_Z 0x200474 + +#define HID_USAGE_SENSOR_ORIENT_COMP_MAGN_NORTH 0x200475 +#define HID_USAGE_SENSOR_ORIENT_COMP_TRUE_NORTH 0x200476 +#define HID_USAGE_SENSOR_ORIENT_MAGN_NORTH 0x200477 +#define HID_USAGE_SENSOR_ORIENT_TRUE_NORTH 0x200478 + +#define HID_USAGE_SENSOR_ORIENT_DISTANCE 0x200479 +#define HID_USAGE_SENSOR_ORIENT_DISTANCE_X 0x20047A +#define HID_USAGE_SENSOR_ORIENT_DISTANCE_Y 0x20047B +#define HID_USAGE_SENSOR_ORIENT_DISTANCE_Z 0x20047C +#define HID_USAGE_SENSOR_ORIENT_DISTANCE_OUT_OF_RANGE 0x20047D + +/* ORIENTATION: Inclinometer 3D: (200086) */ +#define HID_USAGE_SENSOR_INCLINOMETER_3D 0x200086 +#define HID_USAGE_SENSOR_ORIENT_TILT 0x20047E +#define HID_USAGE_SENSOR_ORIENT_TILT_X 0x20047F +#define HID_USAGE_SENSOR_ORIENT_TILT_Y 0x200480 +#define HID_USAGE_SENSOR_ORIENT_TILT_Z 0x200481 + +#define HID_USAGE_SENSOR_DEVICE_ORIENTATION 0x20008A +#define HID_USAGE_SENSOR_RELATIVE_ORIENTATION 0x20008E +#define HID_USAGE_SENSOR_GEOMAGNETIC_ORIENTATION 0x2000C1 +#define HID_USAGE_SENSOR_ORIENT_ROTATION_MATRIX 0x200482 +#define HID_USAGE_SENSOR_ORIENT_QUATERNION 0x200483 +#define HID_USAGE_SENSOR_ORIENT_MAGN_FLUX 0x200484 + +#define HID_USAGE_SENSOR_ORIENT_MAGN_FLUX_X_AXIS 0x200485 +#define HID_USAGE_SENSOR_ORIENT_MAGN_FLUX_Y_AXIS 0x200486 +#define HID_USAGE_SENSOR_ORIENT_MAGN_FLUX_Z_AXIS 0x200487 + +/* Time (2000a0) */ +#define HID_USAGE_SENSOR_TIME 0x2000a0 +#define HID_USAGE_SENSOR_TIME_YEAR 0x200521 +#define HID_USAGE_SENSOR_TIME_MONTH 0x200522 +#define HID_USAGE_SENSOR_TIME_DAY 0x200523 +#define HID_USAGE_SENSOR_TIME_HOUR 0x200525 +#define HID_USAGE_SENSOR_TIME_MINUTE 0x200526 +#define HID_USAGE_SENSOR_TIME_SECOND 0x200527 +#define HID_USAGE_SENSOR_TIME_TIMESTAMP 0x200529 + +/* Units */ +#define HID_USAGE_SENSOR_UNITS_NOT_SPECIFIED 0x00 +#define HID_USAGE_SENSOR_UNITS_LUX 0x01 +#define HID_USAGE_SENSOR_UNITS_KELVIN 0x01000100 +#define HID_USAGE_SENSOR_UNITS_FAHRENHEIT 0x03000100 +#define HID_USAGE_SENSOR_UNITS_PASCAL 0xF1E1 +#define HID_USAGE_SENSOR_UNITS_NEWTON 0x11E1 +#define HID_USAGE_SENSOR_UNITS_METERS_PER_SECOND 0x11F0 +#define HID_USAGE_SENSOR_UNITS_METERS_PER_SEC_SQRD 0x11E0 +#define HID_USAGE_SENSOR_UNITS_FARAD 0xE14F2000 +#define HID_USAGE_SENSOR_UNITS_AMPERE 0x01001000 +#define HID_USAGE_SENSOR_UNITS_WATT 0x21d1 +#define HID_USAGE_SENSOR_UNITS_HENRY 0x21E1E000 +#define HID_USAGE_SENSOR_UNITS_OHM 0x21D1E000 +#define HID_USAGE_SENSOR_UNITS_VOLT 0x21D1F000 +#define HID_USAGE_SENSOR_UNITS_HERTZ 0x01F0 +#define HID_USAGE_SENSOR_UNITS_DEGREES_PER_SEC_SQRD 0x14E0 +#define HID_USAGE_SENSOR_UNITS_RADIANS 0x12 +#define HID_USAGE_SENSOR_UNITS_RADIANS_PER_SECOND 0x12F0 +#define HID_USAGE_SENSOR_UNITS_RADIANS_PER_SEC_SQRD 0x12E0 +#define HID_USAGE_SENSOR_UNITS_SECOND 0x0110 +#define HID_USAGE_SENSOR_UNITS_GAUSS 0x01E1F000 +#define HID_USAGE_SENSOR_UNITS_GRAM 0x0101 +#define HID_USAGE_SENSOR_UNITS_CENTIMETER 0x11 +#define HID_USAGE_SENSOR_UNITS_G 0x1A +#define HID_USAGE_SENSOR_UNITS_MILLISECOND 0x19 +#define HID_USAGE_SENSOR_UNITS_PERCENT 0x17 +#define HID_USAGE_SENSOR_UNITS_DEGREES 0x14 +#define HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND 0x15 + +/* Common selectors */ +#define HID_USAGE_SENSOR_PROP_REPORT_INTERVAL 0x20030E +#define HID_USAGE_SENSOR_PROP_SENSITIVITY_ABS 0x20030F +#define HID_USAGE_SENSOR_PROP_SENSITIVITY_RANGE_PCT 0x200310 +#define HID_USAGE_SENSOR_PROP_SENSITIVITY_REL_PCT 0x200311 +#define HID_USAGE_SENSOR_PROP_ACCURACY 0x200312 +#define HID_USAGE_SENSOR_PROP_RESOLUTION 0x200313 +#define HID_USAGE_SENSOR_PROP_RANGE_MAXIMUM 0x200314 +#define HID_USAGE_SENSOR_PROP_RANGE_MINIMUM 0x200315 +#define HID_USAGE_SENSOR_PROP_REPORT_STATE 0x200316 +#define HID_USAGE_SENSOR_PROY_POWER_STATE 0x200319 + +/* Batch mode selectors */ +#define HID_USAGE_SENSOR_PROP_REPORT_LATENCY 0x20031B + +/* Per data field properties */ +#define HID_USAGE_SENSOR_DATA_MOD_NONE 0x00 +#define HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS 0x1000 + +/* Power state enumerations */ +#define HID_USAGE_SENSOR_PROP_POWER_STATE_UNDEFINED_ENUM 0x200850 +#define HID_USAGE_SENSOR_PROP_POWER_STATE_D0_FULL_POWER_ENUM 0x200851 +#define HID_USAGE_SENSOR_PROP_POWER_STATE_D1_LOW_POWER_ENUM 0x200852 +#define HID_USAGE_SENSOR_PROP_POWER_STATE_D2_STANDBY_WITH_WAKE_ENUM 0x200853 +#define HID_USAGE_SENSOR_PROP_POWER_STATE_D3_SLEEP_WITH_WAKE_ENUM 0x200854 +#define HID_USAGE_SENSOR_PROP_POWER_STATE_D4_POWER_OFF_ENUM 0x200855 + +/* Report State enumerations */ +#define HID_USAGE_SENSOR_PROP_REPORTING_STATE_NO_EVENTS_ENUM 0x200840 +#define HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM 0x200841 + +#endif diff --git a/include/linux/hid.h b/include/linux/hid.h new file mode 100644 index 000000000..c51ebce21 --- /dev/null +++ b/include/linux/hid.h @@ -0,0 +1,1203 @@ +/* + * Copyright (c) 1999 Andreas Gal + * Copyright (c) 2000-2001 Vojtech Pavlik + * Copyright (c) 2006-2007 Jiri Kosina + */ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Should you need to contact me, the author, you can do so either by + * e-mail - mail your message to , or by paper mail: + * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic + */ +#ifndef __HID_H +#define __HID_H + + +#include +#include +#include +#include +#include /* hid_device_id */ +#include +#include +#include +#include +#include +#include +#include + +/* + * We parse each description item into this structure. Short items data + * values are expanded to 32-bit signed int, long items contain a pointer + * into the data area. + */ + +struct hid_item { + unsigned format; + __u8 size; + __u8 type; + __u8 tag; + union { + __u8 u8; + __s8 s8; + __u16 u16; + __s16 s16; + __u32 u32; + __s32 s32; + __u8 *longdata; + } data; +}; + +/* + * HID report item format + */ + +#define HID_ITEM_FORMAT_SHORT 0 +#define HID_ITEM_FORMAT_LONG 1 + +/* + * Special tag indicating long items + */ + +#define HID_ITEM_TAG_LONG 15 + +/* + * HID report descriptor item type (prefix bit 2,3) + */ + +#define HID_ITEM_TYPE_MAIN 0 +#define HID_ITEM_TYPE_GLOBAL 1 +#define HID_ITEM_TYPE_LOCAL 2 +#define HID_ITEM_TYPE_RESERVED 3 + +/* + * HID report descriptor main item tags + */ + +#define HID_MAIN_ITEM_TAG_INPUT 8 +#define HID_MAIN_ITEM_TAG_OUTPUT 9 +#define HID_MAIN_ITEM_TAG_FEATURE 11 +#define HID_MAIN_ITEM_TAG_BEGIN_COLLECTION 10 +#define HID_MAIN_ITEM_TAG_END_COLLECTION 12 + +/* + * HID report descriptor main item contents + */ + +#define HID_MAIN_ITEM_CONSTANT 0x001 +#define HID_MAIN_ITEM_VARIABLE 0x002 +#define HID_MAIN_ITEM_RELATIVE 0x004 +#define HID_MAIN_ITEM_WRAP 0x008 +#define HID_MAIN_ITEM_NONLINEAR 0x010 +#define HID_MAIN_ITEM_NO_PREFERRED 0x020 +#define HID_MAIN_ITEM_NULL_STATE 0x040 +#define HID_MAIN_ITEM_VOLATILE 0x080 +#define HID_MAIN_ITEM_BUFFERED_BYTE 0x100 + +/* + * HID report descriptor collection item types + */ + +#define HID_COLLECTION_PHYSICAL 0 +#define HID_COLLECTION_APPLICATION 1 +#define HID_COLLECTION_LOGICAL 2 + +/* + * HID report descriptor global item tags + */ + +#define HID_GLOBAL_ITEM_TAG_USAGE_PAGE 0 +#define HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM 1 +#define HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM 2 +#define HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM 3 +#define HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM 4 +#define HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT 5 +#define HID_GLOBAL_ITEM_TAG_UNIT 6 +#define HID_GLOBAL_ITEM_TAG_REPORT_SIZE 7 +#define HID_GLOBAL_ITEM_TAG_REPORT_ID 8 +#define HID_GLOBAL_ITEM_TAG_REPORT_COUNT 9 +#define HID_GLOBAL_ITEM_TAG_PUSH 10 +#define HID_GLOBAL_ITEM_TAG_POP 11 + +/* + * HID report descriptor local item tags + */ + +#define HID_LOCAL_ITEM_TAG_USAGE 0 +#define HID_LOCAL_ITEM_TAG_USAGE_MINIMUM 1 +#define HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM 2 +#define HID_LOCAL_ITEM_TAG_DESIGNATOR_INDEX 3 +#define HID_LOCAL_ITEM_TAG_DESIGNATOR_MINIMUM 4 +#define HID_LOCAL_ITEM_TAG_DESIGNATOR_MAXIMUM 5 +#define HID_LOCAL_ITEM_TAG_STRING_INDEX 7 +#define HID_LOCAL_ITEM_TAG_STRING_MINIMUM 8 +#define HID_LOCAL_ITEM_TAG_STRING_MAXIMUM 9 +#define HID_LOCAL_ITEM_TAG_DELIMITER 10 + +/* + * HID usage tables + */ + +#define HID_USAGE_PAGE 0xffff0000 + +#define HID_UP_UNDEFINED 0x00000000 +#define HID_UP_GENDESK 0x00010000 +#define HID_UP_SIMULATION 0x00020000 +#define HID_UP_GENDEVCTRLS 0x00060000 +#define HID_UP_KEYBOARD 0x00070000 +#define HID_UP_LED 0x00080000 +#define HID_UP_BUTTON 0x00090000 +#define HID_UP_ORDINAL 0x000a0000 +#define HID_UP_TELEPHONY 0x000b0000 +#define HID_UP_CONSUMER 0x000c0000 +#define HID_UP_DIGITIZER 0x000d0000 +#define HID_UP_PID 0x000f0000 +#define HID_UP_HPVENDOR 0xff7f0000 +#define HID_UP_HPVENDOR2 0xff010000 +#define HID_UP_MSVENDOR 0xff000000 +#define HID_UP_CUSTOM 0x00ff0000 +#define HID_UP_LOGIVENDOR 0xffbc0000 +#define HID_UP_LOGIVENDOR2 0xff090000 +#define HID_UP_LOGIVENDOR3 0xff430000 +#define HID_UP_LNVENDOR 0xffa00000 +#define HID_UP_SENSOR 0x00200000 +#define HID_UP_ASUSVENDOR 0xff310000 + +#define HID_USAGE 0x0000ffff + +#define HID_GD_POINTER 0x00010001 +#define HID_GD_MOUSE 0x00010002 +#define HID_GD_JOYSTICK 0x00010004 +#define HID_GD_GAMEPAD 0x00010005 +#define HID_GD_KEYBOARD 0x00010006 +#define HID_GD_KEYPAD 0x00010007 +#define HID_GD_MULTIAXIS 0x00010008 +/* + * Microsoft Win8 Wireless Radio Controls extensions CA, see: + * http://www.usb.org/developers/hidpage/HUTRR40RadioHIDUsagesFinal.pdf + */ +#define HID_GD_WIRELESS_RADIO_CTLS 0x0001000c +/* + * System Multi-Axis, see: + * http://www.usb.org/developers/hidpage/HUTRR62_-_Generic_Desktop_CA_for_System_Multi-Axis_Controllers.txt + */ +#define HID_GD_SYSTEM_MULTIAXIS 0x0001000e + +#define HID_GD_X 0x00010030 +#define HID_GD_Y 0x00010031 +#define HID_GD_Z 0x00010032 +#define HID_GD_RX 0x00010033 +#define HID_GD_RY 0x00010034 +#define HID_GD_RZ 0x00010035 +#define HID_GD_SLIDER 0x00010036 +#define HID_GD_DIAL 0x00010037 +#define HID_GD_WHEEL 0x00010038 +#define HID_GD_HATSWITCH 0x00010039 +#define HID_GD_BUFFER 0x0001003a +#define HID_GD_BYTECOUNT 0x0001003b +#define HID_GD_MOTION 0x0001003c +#define HID_GD_START 0x0001003d +#define HID_GD_SELECT 0x0001003e +#define HID_GD_VX 0x00010040 +#define HID_GD_VY 0x00010041 +#define HID_GD_VZ 0x00010042 +#define HID_GD_VBRX 0x00010043 +#define HID_GD_VBRY 0x00010044 +#define HID_GD_VBRZ 0x00010045 +#define HID_GD_VNO 0x00010046 +#define HID_GD_FEATURE 0x00010047 +#define HID_GD_SYSTEM_CONTROL 0x00010080 +#define HID_GD_UP 0x00010090 +#define HID_GD_DOWN 0x00010091 +#define HID_GD_RIGHT 0x00010092 +#define HID_GD_LEFT 0x00010093 +/* Microsoft Win8 Wireless Radio Controls CA usage codes */ +#define HID_GD_RFKILL_BTN 0x000100c6 +#define HID_GD_RFKILL_LED 0x000100c7 +#define HID_GD_RFKILL_SWITCH 0x000100c8 + +#define HID_DC_BATTERYSTRENGTH 0x00060020 + +#define HID_CP_CONSUMER_CONTROL 0x000c0001 + +#define HID_DG_DIGITIZER 0x000d0001 +#define HID_DG_PEN 0x000d0002 +#define HID_DG_LIGHTPEN 0x000d0003 +#define HID_DG_TOUCHSCREEN 0x000d0004 +#define HID_DG_TOUCHPAD 0x000d0005 +#define HID_DG_STYLUS 0x000d0020 +#define HID_DG_PUCK 0x000d0021 +#define HID_DG_FINGER 0x000d0022 +#define HID_DG_TIPPRESSURE 0x000d0030 +#define HID_DG_BARRELPRESSURE 0x000d0031 +#define HID_DG_INRANGE 0x000d0032 +#define HID_DG_TOUCH 0x000d0033 +#define HID_DG_UNTOUCH 0x000d0034 +#define HID_DG_TAP 0x000d0035 +#define HID_DG_TABLETFUNCTIONKEY 0x000d0039 +#define HID_DG_PROGRAMCHANGEKEY 0x000d003a +#define HID_DG_BATTERYSTRENGTH 0x000d003b +#define HID_DG_INVERT 0x000d003c +#define HID_DG_TILT_X 0x000d003d +#define HID_DG_TILT_Y 0x000d003e +#define HID_DG_TWIST 0x000d0041 +#define HID_DG_TIPSWITCH 0x000d0042 +#define HID_DG_TIPSWITCH2 0x000d0043 +#define HID_DG_BARRELSWITCH 0x000d0044 +#define HID_DG_ERASER 0x000d0045 +#define HID_DG_TABLETPICK 0x000d0046 + +#define HID_CP_CONSUMERCONTROL 0x000c0001 +#define HID_CP_NUMERICKEYPAD 0x000c0002 +#define HID_CP_PROGRAMMABLEBUTTONS 0x000c0003 +#define HID_CP_MICROPHONE 0x000c0004 +#define HID_CP_HEADPHONE 0x000c0005 +#define HID_CP_GRAPHICEQUALIZER 0x000c0006 +#define HID_CP_FUNCTIONBUTTONS 0x000c0036 +#define HID_CP_SELECTION 0x000c0080 +#define HID_CP_MEDIASELECTION 0x000c0087 +#define HID_CP_SELECTDISC 0x000c00ba +#define HID_CP_VOLUMEUP 0x000c00e9 +#define HID_CP_VOLUMEDOWN 0x000c00ea +#define HID_CP_PLAYBACKSPEED 0x000c00f1 +#define HID_CP_PROXIMITY 0x000c0109 +#define HID_CP_SPEAKERSYSTEM 0x000c0160 +#define HID_CP_CHANNELLEFT 0x000c0161 +#define HID_CP_CHANNELRIGHT 0x000c0162 +#define HID_CP_CHANNELCENTER 0x000c0163 +#define HID_CP_CHANNELFRONT 0x000c0164 +#define HID_CP_CHANNELCENTERFRONT 0x000c0165 +#define HID_CP_CHANNELSIDE 0x000c0166 +#define HID_CP_CHANNELSURROUND 0x000c0167 +#define HID_CP_CHANNELLOWFREQUENCYENHANCEMENT 0x000c0168 +#define HID_CP_CHANNELTOP 0x000c0169 +#define HID_CP_CHANNELUNKNOWN 0x000c016a +#define HID_CP_APPLICATIONLAUNCHBUTTONS 0x000c0180 +#define HID_CP_GENERICGUIAPPLICATIONCONTROLS 0x000c0200 + +#define HID_DG_DEVICECONFIG 0x000d000e +#define HID_DG_DEVICESETTINGS 0x000d0023 +#define HID_DG_AZIMUTH 0x000d003f +#define HID_DG_CONFIDENCE 0x000d0047 +#define HID_DG_WIDTH 0x000d0048 +#define HID_DG_HEIGHT 0x000d0049 +#define HID_DG_CONTACTID 0x000d0051 +#define HID_DG_INPUTMODE 0x000d0052 +#define HID_DG_DEVICEINDEX 0x000d0053 +#define HID_DG_CONTACTCOUNT 0x000d0054 +#define HID_DG_CONTACTMAX 0x000d0055 +#define HID_DG_SCANTIME 0x000d0056 +#define HID_DG_SURFACESWITCH 0x000d0057 +#define HID_DG_BUTTONSWITCH 0x000d0058 +#define HID_DG_BUTTONTYPE 0x000d0059 +#define HID_DG_BARRELSWITCH2 0x000d005a +#define HID_DG_TOOLSERIALNUMBER 0x000d005b +#define HID_DG_LATENCYMODE 0x000d0060 + +#define HID_VD_ASUS_CUSTOM_MEDIA_KEYS 0xff310076 +/* + * HID report types --- Ouch! HID spec says 1 2 3! + */ + +#define HID_INPUT_REPORT 0 +#define HID_OUTPUT_REPORT 1 +#define HID_FEATURE_REPORT 2 + +#define HID_REPORT_TYPES 3 + +/* + * HID connect requests + */ + +#define HID_CONNECT_HIDINPUT BIT(0) +#define HID_CONNECT_HIDINPUT_FORCE BIT(1) +#define HID_CONNECT_HIDRAW BIT(2) +#define HID_CONNECT_HIDDEV BIT(3) +#define HID_CONNECT_HIDDEV_FORCE BIT(4) +#define HID_CONNECT_FF BIT(5) +#define HID_CONNECT_DRIVER BIT(6) +#define HID_CONNECT_DEFAULT (HID_CONNECT_HIDINPUT|HID_CONNECT_HIDRAW| \ + HID_CONNECT_HIDDEV|HID_CONNECT_FF) + +/* + * HID device quirks. + */ + +/* + * Increase this if you need to configure more HID quirks at module load time + */ +#define MAX_USBHID_BOOT_QUIRKS 4 + +#define HID_QUIRK_INVERT BIT(0) +#define HID_QUIRK_NOTOUCH BIT(1) +#define HID_QUIRK_IGNORE BIT(2) +#define HID_QUIRK_NOGET BIT(3) +#define HID_QUIRK_HIDDEV_FORCE BIT(4) +#define HID_QUIRK_BADPAD BIT(5) +#define HID_QUIRK_MULTI_INPUT BIT(6) +#define HID_QUIRK_HIDINPUT_FORCE BIT(7) +/* BIT(8) reserved for backward compatibility, was HID_QUIRK_NO_EMPTY_INPUT */ +/* BIT(9) reserved for backward compatibility, was NO_INIT_INPUT_REPORTS */ +#define HID_QUIRK_ALWAYS_POLL BIT(10) +#define HID_QUIRK_INPUT_PER_APP BIT(11) +#define HID_QUIRK_X_INVERT BIT(12) +#define HID_QUIRK_Y_INVERT BIT(13) +#define HID_QUIRK_SKIP_OUTPUT_REPORTS BIT(16) +#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID BIT(17) +#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP BIT(18) +#define HID_QUIRK_HAVE_SPECIAL_DRIVER BIT(19) +#define HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE BIT(20) +#define HID_QUIRK_FULLSPEED_INTERVAL BIT(28) +#define HID_QUIRK_NO_INIT_REPORTS BIT(29) +#define HID_QUIRK_NO_IGNORE BIT(30) +#define HID_QUIRK_NO_INPUT_SYNC BIT(31) + +/* + * HID device groups + * + * Note: HID_GROUP_ANY is declared in linux/mod_devicetable.h + * and has a value of 0x0000 + */ +#define HID_GROUP_GENERIC 0x0001 +#define HID_GROUP_MULTITOUCH 0x0002 +#define HID_GROUP_SENSOR_HUB 0x0003 +#define HID_GROUP_MULTITOUCH_WIN_8 0x0004 + +/* + * Vendor specific HID device groups + */ +#define HID_GROUP_RMI 0x0100 +#define HID_GROUP_WACOM 0x0101 +#define HID_GROUP_LOGITECH_DJ_DEVICE 0x0102 +#define HID_GROUP_STEAM 0x0103 + +/* + * HID protocol status + */ +#define HID_REPORT_PROTOCOL 1 +#define HID_BOOT_PROTOCOL 0 + +/* + * This is the global environment of the parser. This information is + * persistent for main-items. The global environment can be saved and + * restored with PUSH/POP statements. + */ + +struct hid_global { + unsigned usage_page; + __s32 logical_minimum; + __s32 logical_maximum; + __s32 physical_minimum; + __s32 physical_maximum; + __s32 unit_exponent; + unsigned unit; + unsigned report_id; + unsigned report_size; + unsigned report_count; +}; + +/* + * This is the local environment. It is persistent up the next main-item. + */ + +#define HID_MAX_USAGES 12288 +#define HID_DEFAULT_NUM_COLLECTIONS 16 + +struct hid_local { + unsigned usage[HID_MAX_USAGES]; /* usage array */ + u8 usage_size[HID_MAX_USAGES]; /* usage size array */ + unsigned collection_index[HID_MAX_USAGES]; /* collection index array */ + unsigned usage_index; + unsigned usage_minimum; + unsigned delimiter_depth; + unsigned delimiter_branch; +}; + +/* + * This is the collection stack. We climb up the stack to determine + * application and function of each field. + */ + +struct hid_collection { + unsigned type; + unsigned usage; + unsigned level; +}; + +struct hid_usage { + unsigned hid; /* hid usage code */ + unsigned collection_index; /* index into collection array */ + unsigned usage_index; /* index into usage array */ + /* hidinput data */ + __u16 code; /* input driver code */ + __u8 type; /* input driver type */ + __s8 hat_min; /* hat switch fun */ + __s8 hat_max; /* ditto */ + __s8 hat_dir; /* ditto */ +}; + +struct hid_input; + +struct hid_field { + unsigned physical; /* physical usage for this field */ + unsigned logical; /* logical usage for this field */ + unsigned application; /* application usage for this field */ + struct hid_usage *usage; /* usage table for this function */ + unsigned maxusage; /* maximum usage index */ + unsigned flags; /* main-item flags (i.e. volatile,array,constant) */ + unsigned report_offset; /* bit offset in the report */ + unsigned report_size; /* size of this field in the report */ + unsigned report_count; /* number of this field in the report */ + unsigned report_type; /* (input,output,feature) */ + __s32 *value; /* last known value(s) */ + __s32 logical_minimum; + __s32 logical_maximum; + __s32 physical_minimum; + __s32 physical_maximum; + __s32 unit_exponent; + unsigned unit; + struct hid_report *report; /* associated report */ + unsigned index; /* index into report->field[] */ + /* hidinput data */ + struct hid_input *hidinput; /* associated input structure */ + __u16 dpad; /* dpad input code */ +}; + +#define HID_MAX_FIELDS 256 + +struct hid_report { + struct list_head list; + struct list_head hidinput_list; + unsigned int id; /* id of this report */ + unsigned int type; /* report type */ + unsigned int application; /* application usage for this report */ + struct hid_field *field[HID_MAX_FIELDS]; /* fields of the report */ + unsigned maxfield; /* maximum valid field index */ + unsigned size; /* size of the report (bits) */ + struct hid_device *device; /* associated device */ +}; + +#define HID_MAX_IDS 256 + +struct hid_report_enum { + unsigned numbered; + struct list_head report_list; + struct hid_report *report_id_hash[HID_MAX_IDS]; +}; + +#define HID_MIN_BUFFER_SIZE 64 /* make sure there is at least a packet size of space */ +#define HID_MAX_BUFFER_SIZE 8192 /* 8kb */ +#define HID_CONTROL_FIFO_SIZE 256 /* to init devices with >100 reports */ +#define HID_OUTPUT_FIFO_SIZE 64 + +struct hid_control_fifo { + unsigned char dir; + struct hid_report *report; + char *raw_report; +}; + +struct hid_output_fifo { + struct hid_report *report; + char *raw_report; +}; + +#define HID_CLAIMED_INPUT BIT(0) +#define HID_CLAIMED_HIDDEV BIT(1) +#define HID_CLAIMED_HIDRAW BIT(2) +#define HID_CLAIMED_DRIVER BIT(3) + +#define HID_STAT_ADDED BIT(0) +#define HID_STAT_PARSED BIT(1) +#define HID_STAT_DUP_DETECTED BIT(2) +#define HID_STAT_REPROBED BIT(3) + +struct hid_input { + struct list_head list; + struct hid_report *report; + struct input_dev *input; + const char *name; + bool registered; + struct list_head reports; /* the list of reports */ + unsigned int application; /* application usage for this input */ +}; + +enum hid_type { + HID_TYPE_OTHER = 0, + HID_TYPE_USBMOUSE, + HID_TYPE_USBNONE +}; + +enum hid_battery_status { + HID_BATTERY_UNKNOWN = 0, + HID_BATTERY_QUERIED, /* Kernel explicitly queried battery strength */ + HID_BATTERY_REPORTED, /* Device sent unsolicited battery strength report */ +}; + +struct hid_driver; +struct hid_ll_driver; + +struct hid_device { /* device report descriptor */ + __u8 *dev_rdesc; + unsigned dev_rsize; + __u8 *rdesc; + unsigned rsize; + struct hid_collection *collection; /* List of HID collections */ + unsigned collection_size; /* Number of allocated hid_collections */ + unsigned maxcollection; /* Number of parsed collections */ + unsigned maxapplication; /* Number of applications */ + __u16 bus; /* BUS ID */ + __u16 group; /* Report group */ + __u32 vendor; /* Vendor ID */ + __u32 product; /* Product ID */ + __u32 version; /* HID version */ + enum hid_type type; /* device type (mouse, kbd, ...) */ + unsigned country; /* HID country */ + struct hid_report_enum report_enum[HID_REPORT_TYPES]; + struct work_struct led_work; /* delayed LED worker */ + + struct semaphore driver_input_lock; /* protects the current driver */ + struct device dev; /* device */ + struct hid_driver *driver; + + struct hid_ll_driver *ll_driver; + struct mutex ll_open_lock; + unsigned int ll_open_count; + +#ifdef CONFIG_HID_BATTERY_STRENGTH + /* + * Power supply information for HID devices which report + * battery strength. power_supply was successfully registered if + * battery is non-NULL. + */ + struct power_supply *battery; + __s32 battery_capacity; + __s32 battery_min; + __s32 battery_max; + __s32 battery_report_type; + __s32 battery_report_id; + enum hid_battery_status battery_status; + bool battery_avoid_query; +#endif + + unsigned long status; /* see STAT flags above */ + unsigned claimed; /* Claimed by hidinput, hiddev? */ + unsigned quirks; /* Various quirks the device can pull on us */ + bool io_started; /* If IO has started */ + + struct list_head inputs; /* The list of inputs */ + void *hiddev; /* The hiddev structure */ + void *hidraw; + + char name[128]; /* Device name */ + char phys[64]; /* Device physical location */ + char uniq[64]; /* Device unique identifier (serial #) */ + + void *driver_data; + + /* temporary hid_ff handling (until moved to the drivers) */ + int (*ff_init)(struct hid_device *); + + /* hiddev event handler */ + int (*hiddev_connect)(struct hid_device *, unsigned int); + void (*hiddev_disconnect)(struct hid_device *); + void (*hiddev_hid_event) (struct hid_device *, struct hid_field *field, + struct hid_usage *, __s32); + void (*hiddev_report_event) (struct hid_device *, struct hid_report *); + + /* debugging support via debugfs */ + unsigned short debug; + struct dentry *debug_dir; + struct dentry *debug_rdesc; + struct dentry *debug_events; + struct list_head debug_list; + spinlock_t debug_list_lock; + wait_queue_head_t debug_wait; +}; + +#define to_hid_device(pdev) \ + container_of(pdev, struct hid_device, dev) + +static inline void *hid_get_drvdata(struct hid_device *hdev) +{ + return dev_get_drvdata(&hdev->dev); +} + +static inline void hid_set_drvdata(struct hid_device *hdev, void *data) +{ + dev_set_drvdata(&hdev->dev, data); +} + +#define HID_GLOBAL_STACK_SIZE 4 +#define HID_COLLECTION_STACK_SIZE 4 + +#define HID_SCAN_FLAG_MT_WIN_8 BIT(0) +#define HID_SCAN_FLAG_VENDOR_SPECIFIC BIT(1) +#define HID_SCAN_FLAG_GD_POINTER BIT(2) + +struct hid_parser { + struct hid_global global; + struct hid_global global_stack[HID_GLOBAL_STACK_SIZE]; + unsigned int global_stack_ptr; + struct hid_local local; + unsigned int *collection_stack; + unsigned int collection_stack_ptr; + unsigned int collection_stack_size; + struct hid_device *device; + unsigned int scan_flags; +}; + +struct hid_class_descriptor { + __u8 bDescriptorType; + __le16 wDescriptorLength; +} __attribute__ ((packed)); + +struct hid_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 bcdHID; + __u8 bCountryCode; + __u8 bNumDescriptors; + + struct hid_class_descriptor desc[1]; +} __attribute__ ((packed)); + +#define HID_DEVICE(b, g, ven, prod) \ + .bus = (b), .group = (g), .vendor = (ven), .product = (prod) +#define HID_USB_DEVICE(ven, prod) \ + .bus = BUS_USB, .vendor = (ven), .product = (prod) +#define HID_BLUETOOTH_DEVICE(ven, prod) \ + .bus = BUS_BLUETOOTH, .vendor = (ven), .product = (prod) +#define HID_I2C_DEVICE(ven, prod) \ + .bus = BUS_I2C, .vendor = (ven), .product = (prod) + +#define HID_REPORT_ID(rep) \ + .report_type = (rep) +#define HID_USAGE_ID(uhid, utype, ucode) \ + .usage_hid = (uhid), .usage_type = (utype), .usage_code = (ucode) +/* we don't want to catch types and codes equal to 0 */ +#define HID_TERMINATOR (HID_ANY_ID - 1) + +struct hid_report_id { + __u32 report_type; +}; +struct hid_usage_id { + __u32 usage_hid; + __u32 usage_type; + __u32 usage_code; +}; + +/** + * struct hid_driver + * @name: driver name (e.g. "Footech_bar-wheel") + * @id_table: which devices is this driver for (must be non-NULL for probe + * to be called) + * @dyn_list: list of dynamically added device ids + * @dyn_lock: lock protecting @dyn_list + * @match: check if the given device is handled by this driver + * @probe: new device inserted + * @remove: device removed (NULL if not a hot-plug capable driver) + * @report_table: on which reports to call raw_event (NULL means all) + * @raw_event: if report in report_table, this hook is called (NULL means nop) + * @usage_table: on which events to call event (NULL means all) + * @event: if usage in usage_table, this hook is called (NULL means nop) + * @report: this hook is called after parsing a report (NULL means nop) + * @report_fixup: called before report descriptor parsing (NULL means nop) + * @input_mapping: invoked on input registering before mapping an usage + * @input_mapped: invoked on input registering after mapping an usage + * @input_configured: invoked just before the device is registered + * @feature_mapping: invoked on feature registering + * @suspend: invoked on suspend (NULL means nop) + * @resume: invoked on resume if device was not reset (NULL means nop) + * @reset_resume: invoked on resume if device was reset (NULL means nop) + * + * probe should return -errno on error, or 0 on success. During probe, + * input will not be passed to raw_event unless hid_device_io_start is + * called. + * + * raw_event and event should return 0 on no action performed, 1 when no + * further processing should be done and negative on error + * + * input_mapping shall return a negative value to completely ignore this usage + * (e.g. doubled or invalid usage), zero to continue with parsing of this + * usage by generic code (no special handling needed) or positive to skip + * generic parsing (needed special handling which was done in the hook already) + * input_mapped shall return negative to inform the layer that this usage + * should not be considered for further processing or zero to notify that + * no processing was performed and should be done in a generic manner + * Both these functions may be NULL which means the same behavior as returning + * zero from them. + */ +struct hid_driver { + char *name; + const struct hid_device_id *id_table; + + struct list_head dyn_list; + spinlock_t dyn_lock; + + bool (*match)(struct hid_device *dev, bool ignore_special_driver); + int (*probe)(struct hid_device *dev, const struct hid_device_id *id); + void (*remove)(struct hid_device *dev); + + const struct hid_report_id *report_table; + int (*raw_event)(struct hid_device *hdev, struct hid_report *report, + u8 *data, int size); + const struct hid_usage_id *usage_table; + int (*event)(struct hid_device *hdev, struct hid_field *field, + struct hid_usage *usage, __s32 value); + void (*report)(struct hid_device *hdev, struct hid_report *report); + + __u8 *(*report_fixup)(struct hid_device *hdev, __u8 *buf, + unsigned int *size); + + int (*input_mapping)(struct hid_device *hdev, + struct hid_input *hidinput, struct hid_field *field, + struct hid_usage *usage, unsigned long **bit, int *max); + int (*input_mapped)(struct hid_device *hdev, + struct hid_input *hidinput, struct hid_field *field, + struct hid_usage *usage, unsigned long **bit, int *max); + int (*input_configured)(struct hid_device *hdev, + struct hid_input *hidinput); + void (*feature_mapping)(struct hid_device *hdev, + struct hid_field *field, + struct hid_usage *usage); +#ifdef CONFIG_PM + int (*suspend)(struct hid_device *hdev, pm_message_t message); + int (*resume)(struct hid_device *hdev); + int (*reset_resume)(struct hid_device *hdev); +#endif +/* private: */ + struct device_driver driver; +}; + +#define to_hid_driver(pdrv) \ + container_of(pdrv, struct hid_driver, driver) + +/** + * hid_ll_driver - low level driver callbacks + * @start: called on probe to start the device + * @stop: called on remove + * @open: called by input layer on open + * @close: called by input layer on close + * @power: request underlying hardware to enter requested power mode + * @parse: this method is called only once to parse the device data, + * shouldn't allocate anything to not leak memory + * @request: send report request to device (e.g. feature report) + * @wait: wait for buffered io to complete (send/recv reports) + * @raw_request: send raw report request to device (e.g. feature report) + * @output_report: send output report to device + * @idle: send idle request to device + */ +struct hid_ll_driver { + int (*start)(struct hid_device *hdev); + void (*stop)(struct hid_device *hdev); + + int (*open)(struct hid_device *hdev); + void (*close)(struct hid_device *hdev); + + int (*power)(struct hid_device *hdev, int level); + + int (*parse)(struct hid_device *hdev); + + void (*request)(struct hid_device *hdev, + struct hid_report *report, int reqtype); + + int (*wait)(struct hid_device *hdev); + + int (*raw_request) (struct hid_device *hdev, unsigned char reportnum, + __u8 *buf, size_t len, unsigned char rtype, + int reqtype); + + int (*output_report) (struct hid_device *hdev, __u8 *buf, size_t len); + + int (*idle)(struct hid_device *hdev, int report, int idle, int reqtype); +}; + +extern struct hid_ll_driver i2c_hid_ll_driver; +extern struct hid_ll_driver hidp_hid_driver; +extern struct hid_ll_driver uhid_hid_driver; +extern struct hid_ll_driver usb_hid_driver; + +static inline bool hid_is_using_ll_driver(struct hid_device *hdev, + struct hid_ll_driver *driver) +{ + return hdev->ll_driver == driver; +} + +static inline bool hid_is_usb(struct hid_device *hdev) +{ + return hid_is_using_ll_driver(hdev, &usb_hid_driver); +} + +#define PM_HINT_FULLON 1<<5 +#define PM_HINT_NORMAL 1<<1 + +/* Applications from HID Usage Tables 4/8/99 Version 1.1 */ +/* We ignore a few input applications that are not widely used */ +#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || ((a >= 0x000d0002) && (a <= 0x000d0006))) + +/* HID core API */ + +extern int hid_debug; + +extern bool hid_ignore(struct hid_device *); +extern int hid_add_device(struct hid_device *); +extern void hid_destroy_device(struct hid_device *); + +extern struct bus_type hid_bus_type; + +extern int __must_check __hid_register_driver(struct hid_driver *, + struct module *, const char *mod_name); + +/* use a define to avoid include chaining to get THIS_MODULE & friends */ +#define hid_register_driver(driver) \ + __hid_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) + +extern void hid_unregister_driver(struct hid_driver *); + +/** + * module_hid_driver() - Helper macro for registering a HID driver + * @__hid_driver: hid_driver struct + * + * Helper macro for HID drivers which do not do anything special in module + * init/exit. This eliminates a lot of boilerplate. Each module may only + * use this macro once, and calling it replaces module_init() and module_exit() + */ +#define module_hid_driver(__hid_driver) \ + module_driver(__hid_driver, hid_register_driver, \ + hid_unregister_driver) + +extern void hidinput_hid_event(struct hid_device *, struct hid_field *, struct hid_usage *, __s32); +extern void hidinput_report_event(struct hid_device *hid, struct hid_report *report); +extern int hidinput_connect(struct hid_device *hid, unsigned int force); +extern void hidinput_disconnect(struct hid_device *); + +int hid_set_field(struct hid_field *, unsigned, __s32); +int hid_input_report(struct hid_device *, int type, u8 *, u32, int); +int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field); +struct hid_field *hidinput_get_led_field(struct hid_device *hid); +unsigned int hidinput_count_leds(struct hid_device *hid); +__s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code); +void hid_output_report(struct hid_report *report, __u8 *data); +void __hid_request(struct hid_device *hid, struct hid_report *rep, int reqtype); +u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags); +struct hid_device *hid_allocate_device(void); +struct hid_report *hid_register_report(struct hid_device *device, + unsigned int type, unsigned int id, + unsigned int application); +int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size); +struct hid_report *hid_validate_values(struct hid_device *hid, + unsigned int type, unsigned int id, + unsigned int field_index, + unsigned int report_counts); +int hid_open_report(struct hid_device *device); +int hid_check_keys_pressed(struct hid_device *hid); +int hid_connect(struct hid_device *hid, unsigned int connect_mask); +void hid_disconnect(struct hid_device *hid); +bool hid_match_one_id(const struct hid_device *hdev, + const struct hid_device_id *id); +const struct hid_device_id *hid_match_id(const struct hid_device *hdev, + const struct hid_device_id *id); +const struct hid_device_id *hid_match_device(struct hid_device *hdev, + struct hid_driver *hdrv); +bool hid_compare_device_paths(struct hid_device *hdev_a, + struct hid_device *hdev_b, char separator); +s32 hid_snto32(__u32 value, unsigned n); +__u32 hid_field_extract(const struct hid_device *hid, __u8 *report, + unsigned offset, unsigned n); + +/** + * hid_device_io_start - enable HID input during probe, remove + * + * @hid - the device + * + * This should only be called during probe or remove and only be + * called by the thread calling probe or remove. It will allow + * incoming packets to be delivered to the driver. + */ +static inline void hid_device_io_start(struct hid_device *hid) { + if (hid->io_started) { + dev_warn(&hid->dev, "io already started\n"); + return; + } + hid->io_started = true; + up(&hid->driver_input_lock); +} + +/** + * hid_device_io_stop - disable HID input during probe, remove + * + * @hid - the device + * + * Should only be called after hid_device_io_start. It will prevent + * incoming packets from going to the driver for the duration of + * probe, remove. If called during probe, packets will still go to the + * driver after probe is complete. This function should only be called + * by the thread calling probe or remove. + */ +static inline void hid_device_io_stop(struct hid_device *hid) { + if (!hid->io_started) { + dev_warn(&hid->dev, "io already stopped\n"); + return; + } + hid->io_started = false; + down(&hid->driver_input_lock); +} + +/** + * hid_map_usage - map usage input bits + * + * @hidinput: hidinput which we are interested in + * @usage: usage to fill in + * @bit: pointer to input->{}bit (out parameter) + * @max: maximal valid usage->code to consider later (out parameter) + * @type: input event type (EV_KEY, EV_REL, ...) + * @c: code which corresponds to this usage and type + * + * The value pointed to by @bit will be set to NULL if either @type is + * an unhandled event type, or if @c is out of range for @type. This + * can be used as an error condition. + */ +static inline void hid_map_usage(struct hid_input *hidinput, + struct hid_usage *usage, unsigned long **bit, int *max, + __u8 type, unsigned int c) +{ + struct input_dev *input = hidinput->input; + unsigned long *bmap = NULL; + unsigned int limit = 0; + + switch (type) { + case EV_ABS: + bmap = input->absbit; + limit = ABS_MAX; + break; + case EV_REL: + bmap = input->relbit; + limit = REL_MAX; + break; + case EV_KEY: + bmap = input->keybit; + limit = KEY_MAX; + break; + case EV_LED: + bmap = input->ledbit; + limit = LED_MAX; + break; + } + + if (unlikely(c > limit || !bmap)) { + pr_warn_ratelimited("%s: Invalid code %d type %d\n", + input->name, c, type); + *bit = NULL; + return; + } + + usage->type = type; + usage->code = c; + *max = limit; + *bit = bmap; +} + +/** + * hid_map_usage_clear - map usage input bits and clear the input bit + * + * The same as hid_map_usage, except the @c bit is also cleared in supported + * bits (@bit). + */ +static inline void hid_map_usage_clear(struct hid_input *hidinput, + struct hid_usage *usage, unsigned long **bit, int *max, + __u8 type, __u16 c) +{ + hid_map_usage(hidinput, usage, bit, max, type, c); + if (*bit) + clear_bit(usage->code, *bit); +} + +/** + * hid_parse - parse HW reports + * + * @hdev: hid device + * + * Call this from probe after you set up the device (if needed). Your + * report_fixup will be called (if non-NULL) after reading raw report from + * device before passing it to hid layer for real parsing. + */ +static inline int __must_check hid_parse(struct hid_device *hdev) +{ + return hid_open_report(hdev); +} + +int __must_check hid_hw_start(struct hid_device *hdev, + unsigned int connect_mask); +void hid_hw_stop(struct hid_device *hdev); +int __must_check hid_hw_open(struct hid_device *hdev); +void hid_hw_close(struct hid_device *hdev); + +/** + * hid_hw_power - requests underlying HW to go into given power mode + * + * @hdev: hid device + * @level: requested power level (one of %PM_HINT_* defines) + * + * This function requests underlying hardware to enter requested power + * mode. + */ + +static inline int hid_hw_power(struct hid_device *hdev, int level) +{ + return hdev->ll_driver->power ? hdev->ll_driver->power(hdev, level) : 0; +} + + +/** + * hid_hw_request - send report request to device + * + * @hdev: hid device + * @report: report to send + * @reqtype: hid request type + */ +static inline void hid_hw_request(struct hid_device *hdev, + struct hid_report *report, int reqtype) +{ + if (hdev->ll_driver->request) + return hdev->ll_driver->request(hdev, report, reqtype); + + __hid_request(hdev, report, reqtype); +} + +/** + * hid_hw_raw_request - send report request to device + * + * @hdev: hid device + * @reportnum: report ID + * @buf: in/out data to transfer + * @len: length of buf + * @rtype: HID report type + * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT + * + * @return: count of data transfered, negative if error + * + * Same behavior as hid_hw_request, but with raw buffers instead. + */ +static inline int hid_hw_raw_request(struct hid_device *hdev, + unsigned char reportnum, __u8 *buf, + size_t len, unsigned char rtype, int reqtype) +{ + if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf) + return -EINVAL; + + return hdev->ll_driver->raw_request(hdev, reportnum, buf, len, + rtype, reqtype); +} + +/** + * hid_hw_output_report - send output report to device + * + * @hdev: hid device + * @buf: raw data to transfer + * @len: length of buf + * + * @return: count of data transfered, negative if error + */ +static inline int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, + size_t len) +{ + if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf) + return -EINVAL; + + if (hdev->ll_driver->output_report) + return hdev->ll_driver->output_report(hdev, buf, len); + + return -ENOSYS; +} + +/** + * hid_hw_idle - send idle request to device + * + * @hdev: hid device + * @report: report to control + * @idle: idle state + * @reqtype: hid request type + */ +static inline int hid_hw_idle(struct hid_device *hdev, int report, int idle, + int reqtype) +{ + if (hdev->ll_driver->idle) + return hdev->ll_driver->idle(hdev, report, idle, reqtype); + + return 0; +} + +/** + * hid_hw_wait - wait for buffered io to complete + * + * @hdev: hid device + */ +static inline void hid_hw_wait(struct hid_device *hdev) +{ + if (hdev->ll_driver->wait) + hdev->ll_driver->wait(hdev); +} + +/** + * hid_report_len - calculate the report length + * + * @report: the report we want to know the length + */ +static inline u32 hid_report_len(struct hid_report *report) +{ + return DIV_ROUND_UP(report->size, 8) + (report->id > 0); +} + +int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, + int interrupt); + +/* HID quirks API */ +unsigned long hid_lookup_quirk(const struct hid_device *hdev); +int hid_quirks_init(char **quirks_param, __u16 bus, int count); +void hid_quirks_exit(__u16 bus); + +#ifdef CONFIG_HID_PID +int hid_pidff_init(struct hid_device *hid); +#else +#define hid_pidff_init NULL +#endif + +#define dbg_hid(format, arg...) \ +do { \ + if (hid_debug) \ + printk(KERN_DEBUG "%s: " format, __FILE__, ##arg); \ +} while (0) + +#define hid_printk(level, hid, fmt, arg...) \ + dev_printk(level, &(hid)->dev, fmt, ##arg) +#define hid_emerg(hid, fmt, arg...) \ + dev_emerg(&(hid)->dev, fmt, ##arg) +#define hid_crit(hid, fmt, arg...) \ + dev_crit(&(hid)->dev, fmt, ##arg) +#define hid_alert(hid, fmt, arg...) \ + dev_alert(&(hid)->dev, fmt, ##arg) +#define hid_err(hid, fmt, arg...) \ + dev_err(&(hid)->dev, fmt, ##arg) +#define hid_notice(hid, fmt, arg...) \ + dev_notice(&(hid)->dev, fmt, ##arg) +#define hid_warn(hid, fmt, arg...) \ + dev_warn(&(hid)->dev, fmt, ##arg) +#define hid_info(hid, fmt, arg...) \ + dev_info(&(hid)->dev, fmt, ##arg) +#define hid_dbg(hid, fmt, arg...) \ + dev_dbg(&(hid)->dev, fmt, ##arg) + +#endif diff --git a/include/linux/hiddev.h b/include/linux/hiddev.h new file mode 100644 index 000000000..921622222 --- /dev/null +++ b/include/linux/hiddev.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 1999-2000 Vojtech Pavlik + * + * Sponsored by SuSE + */ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Should you need to contact me, the author, you can do so either by + * e-mail - mail your message to , or by paper mail: + * Vojtech Pavlik, Ucitelska 1576, Prague 8, 182 00 Czech Republic + */ +#ifndef _HIDDEV_H +#define _HIDDEV_H + +#include + + +/* + * In-kernel definitions. + */ + +struct hiddev { + int minor; + int exist; + int open; + struct mutex existancelock; + wait_queue_head_t wait; + struct hid_device *hid; + struct list_head list; + spinlock_t list_lock; + bool initialized; +}; + +struct hid_device; +struct hid_usage; +struct hid_field; +struct hid_report; + +#ifdef CONFIG_USB_HIDDEV +int hiddev_connect(struct hid_device *hid, unsigned int force); +void hiddev_disconnect(struct hid_device *); +void hiddev_hid_event(struct hid_device *hid, struct hid_field *field, + struct hid_usage *usage, __s32 value); +void hiddev_report_event(struct hid_device *hid, struct hid_report *report); +#else +static inline int hiddev_connect(struct hid_device *hid, + unsigned int force) +{ return -1; } +static inline void hiddev_disconnect(struct hid_device *hid) { } +static inline void hiddev_hid_event(struct hid_device *hid, struct hid_field *field, + struct hid_usage *usage, __s32 value) { } +static inline void hiddev_report_event(struct hid_device *hid, struct hid_report *report) { } +#endif + +#endif diff --git a/include/linux/hidraw.h b/include/linux/hidraw.h new file mode 100644 index 000000000..ddf52612e --- /dev/null +++ b/include/linux/hidraw.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2007 Jiri Kosina + */ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ +#ifndef _HIDRAW_H +#define _HIDRAW_H + +#include + + +struct hidraw { + unsigned int minor; + int exist; + int open; + wait_queue_head_t wait; + struct hid_device *hid; + struct device *dev; + spinlock_t list_lock; + struct list_head list; +}; + +struct hidraw_report { + __u8 *value; + int len; +}; + +struct hidraw_list { + struct hidraw_report buffer[HIDRAW_BUFFER_SIZE]; + int head; + int tail; + struct fasync_struct *fasync; + struct hidraw *hidraw; + struct list_head node; + struct mutex read_mutex; +}; + +#ifdef CONFIG_HIDRAW +int hidraw_init(void); +void hidraw_exit(void); +int hidraw_report_event(struct hid_device *, u8 *, int); +int hidraw_connect(struct hid_device *); +void hidraw_disconnect(struct hid_device *); +#else +static inline int hidraw_init(void) { return 0; } +static inline void hidraw_exit(void) { } +static inline int hidraw_report_event(struct hid_device *hid, u8 *data, int len) { return 0; } +static inline int hidraw_connect(struct hid_device *hid) { return -1; } +static inline void hidraw_disconnect(struct hid_device *hid) { } +#endif + +#endif diff --git a/include/linux/highmem.h b/include/linux/highmem.h new file mode 100644 index 000000000..069067983 --- /dev/null +++ b/include/linux/highmem.h @@ -0,0 +1,255 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_HIGHMEM_H +#define _LINUX_HIGHMEM_H + +#include +#include +#include +#include +#include +#include + +#include + +#ifndef ARCH_HAS_FLUSH_ANON_PAGE +static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) +{ +} +#endif + +#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE +static inline void flush_kernel_dcache_page(struct page *page) +{ +} +static inline void flush_kernel_vmap_range(void *vaddr, int size) +{ +} +static inline void invalidate_kernel_vmap_range(void *vaddr, int size) +{ +} +#endif + +#include + +#ifdef CONFIG_HIGHMEM +#include + +/* declarations for linux/mm/highmem.c */ +unsigned int nr_free_highpages(void); +extern unsigned long totalhigh_pages; + +void kmap_flush_unused(void); + +struct page *kmap_to_page(void *addr); + +#else /* CONFIG_HIGHMEM */ + +static inline unsigned int nr_free_highpages(void) { return 0; } + +static inline struct page *kmap_to_page(void *addr) +{ + return virt_to_page(addr); +} + +#define totalhigh_pages 0UL + +#ifndef ARCH_HAS_KMAP +static inline void *kmap(struct page *page) +{ + might_sleep(); + return page_address(page); +} + +static inline void kunmap(struct page *page) +{ +} + +static inline void *kmap_atomic(struct page *page) +{ + preempt_disable(); + pagefault_disable(); + return page_address(page); +} +#define kmap_atomic_prot(page, prot) kmap_atomic(page) + +static inline void __kunmap_atomic(void *addr) +{ + pagefault_enable(); + preempt_enable(); +} + +#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) + +#define kmap_flush_unused() do {} while(0) +#endif + +#endif /* CONFIG_HIGHMEM */ + +#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) + +DECLARE_PER_CPU(int, __kmap_atomic_idx); + +static inline int kmap_atomic_idx_push(void) +{ + int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; + +#ifdef CONFIG_DEBUG_HIGHMEM + WARN_ON_ONCE(in_irq() && !irqs_disabled()); + BUG_ON(idx >= KM_TYPE_NR); +#endif + return idx; +} + +static inline int kmap_atomic_idx(void) +{ + return __this_cpu_read(__kmap_atomic_idx) - 1; +} + +static inline void kmap_atomic_idx_pop(void) +{ +#ifdef CONFIG_DEBUG_HIGHMEM + int idx = __this_cpu_dec_return(__kmap_atomic_idx); + + BUG_ON(idx < 0); +#else + __this_cpu_dec(__kmap_atomic_idx); +#endif +} + +#endif + +/* + * Prevent people trying to call kunmap_atomic() as if it were kunmap() + * kunmap_atomic() should get the return value of kmap_atomic, not the page. + */ +#define kunmap_atomic(addr) \ +do { \ + BUILD_BUG_ON(__same_type((addr), struct page *)); \ + __kunmap_atomic(addr); \ +} while (0) + + +/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ +#ifndef clear_user_highpage +static inline void clear_user_highpage(struct page *page, unsigned long vaddr) +{ + void *addr = kmap_atomic(page); + clear_user_page(addr, vaddr, page); + kunmap_atomic(addr); +} +#endif + +#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE +/** + * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags + * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE + * @vma: The VMA the page is to be allocated for + * @vaddr: The virtual address the page will be inserted into + * + * This function will allocate a page for a VMA but the caller is expected + * to specify via movableflags whether the page will be movable in the + * future or not + * + * An architecture may override this function by defining + * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own + * implementation. + */ +static inline struct page * +__alloc_zeroed_user_highpage(gfp_t movableflags, + struct vm_area_struct *vma, + unsigned long vaddr) +{ + struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags, + vma, vaddr); + + if (page) + clear_user_highpage(page, vaddr); + + return page; +} +#endif + +/** + * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move + * @vma: The VMA the page is to be allocated for + * @vaddr: The virtual address the page will be inserted into + * + * This function will allocate a page for a VMA that the caller knows will + * be able to migrate in the future using move_pages() or reclaimed + */ +static inline struct page * +alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, + unsigned long vaddr) +{ + return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); +} + +static inline void clear_highpage(struct page *page) +{ + void *kaddr = kmap_atomic(page); + clear_page(kaddr); + kunmap_atomic(kaddr); +} + +static inline void zero_user_segments(struct page *page, + unsigned start1, unsigned end1, + unsigned start2, unsigned end2) +{ + void *kaddr = kmap_atomic(page); + + BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE); + + if (end1 > start1) + memset(kaddr + start1, 0, end1 - start1); + + if (end2 > start2) + memset(kaddr + start2, 0, end2 - start2); + + kunmap_atomic(kaddr); + flush_dcache_page(page); +} + +static inline void zero_user_segment(struct page *page, + unsigned start, unsigned end) +{ + zero_user_segments(page, start, end, 0, 0); +} + +static inline void zero_user(struct page *page, + unsigned start, unsigned size) +{ + zero_user_segments(page, start, start + size, 0, 0); +} + +#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE + +static inline void copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) +{ + char *vfrom, *vto; + + vfrom = kmap_atomic(from); + vto = kmap_atomic(to); + copy_user_page(vto, vfrom, vaddr, to); + kunmap_atomic(vto); + kunmap_atomic(vfrom); +} + +#endif + +#ifndef __HAVE_ARCH_COPY_HIGHPAGE + +static inline void copy_highpage(struct page *to, struct page *from) +{ + char *vfrom, *vto; + + vfrom = kmap_atomic(from); + vto = kmap_atomic(to); + copy_page(vto, vfrom); + kunmap_atomic(vto); + kunmap_atomic(vfrom); +} + +#endif + +#endif /* _LINUX_HIGHMEM_H */ diff --git a/include/linux/highuid.h b/include/linux/highuid.h new file mode 100644 index 000000000..50d383fd6 --- /dev/null +++ b/include/linux/highuid.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_HIGHUID_H +#define _LINUX_HIGHUID_H + +#include + +/* + * general notes: + * + * CONFIG_UID16 is defined if the given architecture needs to + * support backwards compatibility for old system calls. + * + * kernel code should use uid_t and gid_t at all times when dealing with + * kernel-private data. + * + * old_uid_t and old_gid_t should only be different if CONFIG_UID16 is + * defined, else the platform should provide dummy typedefs for them + * such that they are equivalent to __kernel_{u,g}id_t. + * + * uid16_t and gid16_t are used on all architectures. (when dealing + * with structures hard coded to 16 bits, such as in filesystems) + */ + + +/* + * This is the "overflow" UID and GID. They are used to signify uid/gid + * overflow to old programs when they request uid/gid information but are + * using the old 16 bit interfaces. + * When you run a libc5 program, it will think that all highuid files or + * processes are owned by this uid/gid. + * The idea is that it's better to do so than possibly return 0 in lieu of + * 65536, etc. + */ + +extern int overflowuid; +extern int overflowgid; + +extern void __bad_uid(void); +extern void __bad_gid(void); + +#define DEFAULT_OVERFLOWUID 65534 +#define DEFAULT_OVERFLOWGID 65534 + +#ifdef CONFIG_UID16 + +/* prevent uid mod 65536 effect by returning a default value for high UIDs */ +#define high2lowuid(uid) ((uid) & ~0xFFFF ? (old_uid_t)overflowuid : (old_uid_t)(uid)) +#define high2lowgid(gid) ((gid) & ~0xFFFF ? (old_gid_t)overflowgid : (old_gid_t)(gid)) +/* + * -1 is different in 16 bits than it is in 32 bits + * these macros are used by chown(), setreuid(), ..., + */ +#define low2highuid(uid) ((uid) == (old_uid_t)-1 ? (uid_t)-1 : (uid_t)(uid)) +#define low2highgid(gid) ((gid) == (old_gid_t)-1 ? (gid_t)-1 : (gid_t)(gid)) + +#define __convert_uid(size, uid) \ + (size >= sizeof(uid) ? (uid) : high2lowuid(uid)) +#define __convert_gid(size, gid) \ + (size >= sizeof(gid) ? (gid) : high2lowgid(gid)) + + +#else + +#define __convert_uid(size, uid) (uid) +#define __convert_gid(size, gid) (gid) + +#endif /* !CONFIG_UID16 */ + +/* uid/gid input should be always 32bit uid_t */ +#define SET_UID(var, uid) do { (var) = __convert_uid(sizeof(var), (uid)); } while (0) +#define SET_GID(var, gid) do { (var) = __convert_gid(sizeof(var), (gid)); } while (0) + +/* + * Everything below this line is needed on all architectures, to deal with + * filesystems that only store 16 bits of the UID/GID, etc. + */ + +/* + * This is the UID and GID that will get written to disk if a filesystem + * only supports 16-bit UIDs and the kernel has a high UID/GID to write + */ +extern int fs_overflowuid; +extern int fs_overflowgid; + +#define DEFAULT_FS_OVERFLOWUID 65534 +#define DEFAULT_FS_OVERFLOWGID 65534 + +/* + * Since these macros are used in architectures that only need limited + * 16-bit UID back compatibility, we won't use old_uid_t and old_gid_t + */ +#define fs_high2lowuid(uid) ((uid) & ~0xFFFF ? (uid16_t)fs_overflowuid : (uid16_t)(uid)) +#define fs_high2lowgid(gid) ((gid) & ~0xFFFF ? (gid16_t)fs_overflowgid : (gid16_t)(gid)) + +#define low_16_bits(x) ((x) & 0xFFFF) +#define high_16_bits(x) (((x) & 0xFFFF0000) >> 16) + +#endif /* _LINUX_HIGHUID_H */ diff --git a/include/linux/hil.h b/include/linux/hil.h new file mode 100644 index 000000000..523785a9d --- /dev/null +++ b/include/linux/hil.h @@ -0,0 +1,483 @@ +#ifndef _HIL_H_ +#define _HIL_H_ + +/* + * Hewlett Packard Human Interface Loop (HP-HIL) Protocol -- header. + * + * Copyright (c) 2001 Brian S. Julin + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL"). + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * + * References: + * HP-HIL Technical Reference Manual. Hewlett Packard Product No. 45918A + * + * A note of thanks to HP for providing and shipping reference materials + * free of charge to help in the development of HIL support for Linux. + * + */ + +#include + +/* Physical constants relevant to raw loop/device timing. + */ + +#define HIL_CLOCK 8MHZ +#define HIL_EK1_CLOCK 30HZ +#define HIL_EK2_CLOCK 60HZ + +#define HIL_TIMEOUT_DEV 5 /* ms */ +#define HIL_TIMEOUT_DEVS 10 /* ms */ +#define HIL_TIMEOUT_NORESP 10 /* ms */ +#define HIL_TIMEOUT_DEVS_DATA 16 /* ms */ +#define HIL_TIMEOUT_SELFTEST 200 /* ms */ + + +/* Actual wire line coding. These will only be useful if someone is + * implementing a software MLC to run HIL devices on a non-parisc machine. + */ + +#define HIL_WIRE_PACKET_LEN 15 +enum hil_wire_bitpos { + HIL_WIRE_START = 0, + HIL_WIRE_ADDR2, + HIL_WIRE_ADDR1, + HIL_WIRE_ADDR0, + HIL_WIRE_COMMAND, + HIL_WIRE_DATA7, + HIL_WIRE_DATA6, + HIL_WIRE_DATA5, + HIL_WIRE_DATA4, + HIL_WIRE_DATA3, + HIL_WIRE_DATA2, + HIL_WIRE_DATA1, + HIL_WIRE_DATA0, + HIL_WIRE_PARITY, + HIL_WIRE_STOP +}; + +/* HP documentation uses these bit positions to refer to commands; + * we will call these "packets". + */ +enum hil_pkt_bitpos { + HIL_PKT_CMD = 0x00000800, + HIL_PKT_ADDR2 = 0x00000400, + HIL_PKT_ADDR1 = 0x00000200, + HIL_PKT_ADDR0 = 0x00000100, + HIL_PKT_ADDR_MASK = 0x00000700, + HIL_PKT_ADDR_SHIFT = 8, + HIL_PKT_DATA7 = 0x00000080, + HIL_PKT_DATA6 = 0x00000040, + HIL_PKT_DATA5 = 0x00000020, + HIL_PKT_DATA4 = 0x00000010, + HIL_PKT_DATA3 = 0x00000008, + HIL_PKT_DATA2 = 0x00000004, + HIL_PKT_DATA1 = 0x00000002, + HIL_PKT_DATA0 = 0x00000001, + HIL_PKT_DATA_MASK = 0x000000FF, + HIL_PKT_DATA_SHIFT = 0 +}; + +/* The HIL MLC also has several error/status/control bits. We extend the + * "packet" to include these when direct access to the MLC is available, + * or emulate them in cases where they are not available. + * + * This way the device driver knows that the underlying MLC driver + * has had to deal with loop errors. + */ +enum hil_error_bitpos { + HIL_ERR_OB = 0x00000800, /* MLC is busy sending an auto-poll, + or we have filled up the output + buffer and must wait. */ + HIL_ERR_INT = 0x00010000, /* A normal interrupt has occurred. */ + HIL_ERR_NMI = 0x00020000, /* An NMI has occurred. */ + HIL_ERR_LERR = 0x00040000, /* A poll didn't come back. */ + HIL_ERR_PERR = 0x01000000, /* There was a Parity Error. */ + HIL_ERR_FERR = 0x02000000, /* There was a Framing Error. */ + HIL_ERR_FOF = 0x04000000 /* Input FIFO Overflowed. */ +}; + +enum hil_control_bitpos { + HIL_CTRL_TEST = 0x00010000, + HIL_CTRL_IPF = 0x00040000, + HIL_CTRL_APE = 0x02000000 +}; + +/* Bits 30,31 are unused, we use them to control write behavior. */ +#define HIL_DO_ALTER_CTRL 0x40000000 /* Write MSW of packet to control + before writing LSW to loop */ +#define HIL_CTRL_ONLY 0xc0000000 /* *Only* alter the control registers */ + +/* This gives us a 32-bit "packet" + */ +typedef u32 hil_packet; + + +/* HIL Loop commands + */ +enum hil_command { + HIL_CMD_IFC = 0x00, /* Interface Clear */ + HIL_CMD_EPT = 0x01, /* Enter Pass-Thru Mode */ + HIL_CMD_ELB = 0x02, /* Enter Loop-Back Mode */ + HIL_CMD_IDD = 0x03, /* Identify and Describe */ + HIL_CMD_DSR = 0x04, /* Device Soft Reset */ + HIL_CMD_PST = 0x05, /* Perform Self Test */ + HIL_CMD_RRG = 0x06, /* Read Register */ + HIL_CMD_WRG = 0x07, /* Write Register */ + HIL_CMD_ACF = 0x08, /* Auto Configure */ + HIL_CMDID_ACF = 0x07, /* Auto Configure bits with incremented ID */ + HIL_CMD_POL = 0x10, /* Poll */ + HIL_CMDCT_POL = 0x0f, /* Poll command bits with item count */ + HIL_CMD_RPL = 0x20, /* RePoll */ + HIL_CMDCT_RPL = 0x0f, /* RePoll command bits with item count */ + HIL_CMD_RNM = 0x30, /* Report Name */ + HIL_CMD_RST = 0x31, /* Report Status */ + HIL_CMD_EXD = 0x32, /* Extended Describe */ + HIL_CMD_RSC = 0x33, /* Report Security Code */ + + /* 0x34 to 0x3c reserved for future use */ + + HIL_CMD_DKA = 0x3d, /* Disable Keyswitch Autorepeat */ + HIL_CMD_EK1 = 0x3e, /* Enable Keyswitch Autorepeat 1 */ + HIL_CMD_EK2 = 0x3f, /* Enable Keyswitch Autorepeat 2 */ + HIL_CMD_PR1 = 0x40, /* Prompt1 */ + HIL_CMD_PR2 = 0x41, /* Prompt2 */ + HIL_CMD_PR3 = 0x42, /* Prompt3 */ + HIL_CMD_PR4 = 0x43, /* Prompt4 */ + HIL_CMD_PR5 = 0x44, /* Prompt5 */ + HIL_CMD_PR6 = 0x45, /* Prompt6 */ + HIL_CMD_PR7 = 0x46, /* Prompt7 */ + HIL_CMD_PRM = 0x47, /* Prompt (General Purpose) */ + HIL_CMD_AK1 = 0x48, /* Acknowledge1 */ + HIL_CMD_AK2 = 0x49, /* Acknowledge2 */ + HIL_CMD_AK3 = 0x4a, /* Acknowledge3 */ + HIL_CMD_AK4 = 0x4b, /* Acknowledge4 */ + HIL_CMD_AK5 = 0x4c, /* Acknowledge5 */ + HIL_CMD_AK6 = 0x4d, /* Acknowledge6 */ + HIL_CMD_AK7 = 0x4e, /* Acknowledge7 */ + HIL_CMD_ACK = 0x4f, /* Acknowledge (General Purpose) */ + + /* 0x50 to 0x78 reserved for future use */ + /* 0x80 to 0xEF device-specific commands */ + /* 0xf0 to 0xf9 reserved for future use */ + + HIL_CMD_RIO = 0xfa, /* Register I/O Error */ + HIL_CMD_SHR = 0xfb, /* System Hard Reset */ + HIL_CMD_TER = 0xfc, /* Transmission Error */ + HIL_CMD_CAE = 0xfd, /* Configuration Address Error */ + HIL_CMD_DHR = 0xfe, /* Device Hard Reset */ + + /* 0xff is prohibited from use. */ +}; + + +/* + * Response "records" to HIL commands + */ + +/* Device ID byte + */ +#define HIL_IDD_DID_TYPE_MASK 0xe0 /* Primary type bits */ +#define HIL_IDD_DID_TYPE_KB_INTEGRAL 0xa0 /* Integral keyboard */ +#define HIL_IDD_DID_TYPE_KB_ITF 0xc0 /* ITD keyboard */ +#define HIL_IDD_DID_TYPE_KB_RSVD 0xe0 /* Reserved keyboard type */ +#define HIL_IDD_DID_TYPE_KB_LANG_MASK 0x1f /* Keyboard locale bits */ +#define HIL_IDD_DID_KBLANG_USE_ESD 0x00 /* Use ESD Locale instead */ +#define HIL_IDD_DID_TYPE_ABS 0x80 /* Absolute Positioners */ +#define HIL_IDD_DID_ABS_RSVD1_MASK 0xf8 /* Reserved */ +#define HIL_IDD_DID_ABS_RSVD1 0x98 +#define HIL_IDD_DID_ABS_TABLET_MASK 0xf8 /* Tablets and digitizers */ +#define HIL_IDD_DID_ABS_TABLET 0x90 +#define HIL_IDD_DID_ABS_TSCREEN_MASK 0xfc /* Touch screens */ +#define HIL_IDD_DID_ABS_TSCREEN 0x8c +#define HIL_IDD_DID_ABS_RSVD2_MASK 0xfc /* Reserved */ +#define HIL_IDD_DID_ABS_RSVD2 0x88 +#define HIL_IDD_DID_ABS_RSVD3_MASK 0xfc /* Reserved */ +#define HIL_IDD_DID_ABS_RSVD3 0x80 +#define HIL_IDD_DID_TYPE_REL 0x60 /* Relative Positioners */ +#define HIL_IDD_DID_REL_RSVD1_MASK 0xf0 /* Reserved */ +#define HIL_IDD_DID_REL_RSVD1 0x70 +#define HIL_IDD_DID_REL_RSVD2_MASK 0xfc /* Reserved */ +#define HIL_IDD_DID_REL_RSVD2 0x6c +#define HIL_IDD_DID_REL_MOUSE_MASK 0xfc /* Mouse */ +#define HIL_IDD_DID_REL_MOUSE 0x68 +#define HIL_IDD_DID_REL_QUAD_MASK 0xf8 /* Other Quadrature Devices */ +#define HIL_IDD_DID_REL_QUAD 0x60 +#define HIL_IDD_DID_TYPE_CHAR 0x40 /* Character Entry */ +#define HIL_IDD_DID_CHAR_BARCODE_MASK 0xfc /* Barcode Reader */ +#define HIL_IDD_DID_CHAR_BARCODE 0x5c +#define HIL_IDD_DID_CHAR_RSVD1_MASK 0xfc /* Reserved */ +#define HIL_IDD_DID_CHAR_RSVD1 0x58 +#define HIL_IDD_DID_CHAR_RSVD2_MASK 0xf8 /* Reserved */ +#define HIL_IDD_DID_CHAR_RSVD2 0x50 +#define HIL_IDD_DID_CHAR_RSVD3_MASK 0xf0 /* Reserved */ +#define HIL_IDD_DID_CHAR_RSVD3 0x40 +#define HIL_IDD_DID_TYPE_OTHER 0x20 /* Miscellaneous */ +#define HIL_IDD_DID_OTHER_RSVD1_MASK 0xf0 /* Reserved */ +#define HIL_IDD_DID_OTHER_RSVD1 0x30 +#define HIL_IDD_DID_OTHER_BARCODE_MASK 0xfc /* Tone Generator */ +#define HIL_IDD_DID_OTHER_BARCODE 0x2c +#define HIL_IDD_DID_OTHER_RSVD2_MASK 0xfc /* Reserved */ +#define HIL_IDD_DID_OTHER_RSVD2 0x28 +#define HIL_IDD_DID_OTHER_RSVD3_MASK 0xf8 /* Reserved */ +#define HIL_IDD_DID_OTHER_RSVD3 0x20 +#define HIL_IDD_DID_TYPE_KEYPAD 0x00 /* Vectra Keyboard */ + +/* IDD record header + */ +#define HIL_IDD_HEADER_AXSET_MASK 0x03 /* Number of axis in a set */ +#define HIL_IDD_HEADER_RSC 0x04 /* Supports RSC command */ +#define HIL_IDD_HEADER_EXD 0x08 /* Supports EXD command */ +#define HIL_IDD_HEADER_IOD 0x10 /* IOD byte to follow */ +#define HIL_IDD_HEADER_16BIT 0x20 /* 16 (vs. 8) bit resolution */ +#define HIL_IDD_HEADER_ABS 0x40 /* Reports Absolute Position */ +#define HIL_IDD_HEADER_2X_AXIS 0x80 /* Two sets of 1-3 axis */ + +/* I/O Descriptor + */ +#define HIL_IDD_IOD_NBUTTON_MASK 0x07 /* Number of buttons */ +#define HIL_IDD_IOD_PROXIMITY 0x08 /* Proximity in/out events */ +#define HIL_IDD_IOD_PROMPT_MASK 0x70 /* Number of prompts/acks */ +#define HIL_IDD_IOD_PROMPT_SHIFT 4 +#define HIL_IDD_IOD_PROMPT 0x80 /* Generic prompt/ack */ + +#define HIL_IDD_NUM_AXES_PER_SET(header_packet) \ +((header_packet) & HIL_IDD_HEADER_AXSET_MASK) + +#define HIL_IDD_NUM_AXSETS(header_packet) \ +(2 - !((header_packet) & HIL_IDD_HEADER_2X_AXIS)) + +#define HIL_IDD_LEN(header_packet) \ +((4 - !(header_packet & HIL_IDD_HEADER_IOD) - \ + 2 * !(HIL_IDD_NUM_AXES_PER_SET(header_packet))) + \ + 2 * HIL_IDD_NUM_AXES_PER_SET(header_packet) * \ + !!((header_packet) & HIL_IDD_HEADER_ABS)) + +/* The following HIL_IDD_* macros assume you have an array of + * packets and/or unpacked 8-bit data in the order that they + * were received. + */ + +#define HIL_IDD_AXIS_COUNTS_PER_M(header_ptr) \ +(!(HIL_IDD_NUM_AXSETS(*(header_ptr))) ? -1 : \ +(((*(header_ptr + 1) & HIL_PKT_DATA_MASK) + \ + ((*(header_ptr + 2) & HIL_PKT_DATA_MASK)) << 8) \ +* ((*(header_ptr) & HIL_IDD_HEADER_16BIT) ? 100 : 1))) + +#define HIL_IDD_AXIS_MAX(header_ptr, __axnum) \ +((!(*(header_ptr) & HIL_IDD_HEADER_ABS) || \ + (HIL_IDD_NUM_AXES_PER_SET(*(header_ptr)) <= __axnum)) ? 0 : \ + ((HIL_PKT_DATA_MASK & *((header_ptr) + 3 + 2 * __axnum)) + \ + ((HIL_PKT_DATA_MASK & *((header_ptr) + 4 + 2 * __axnum)) << 8))) + +#define HIL_IDD_IOD(header_ptr) \ +(*(header_ptr + HIL_IDD_LEN((*header_ptr)) - 1)) + +#define HIL_IDD_HAS_GEN_PROMPT(header_ptr) \ +((*header_ptr & HIL_IDD_HEADER_IOD) && \ + (HIL_IDD_IOD(header_ptr) & HIL_IDD_IOD_PROMPT)) + +#define HIL_IDD_HAS_GEN_PROXIMITY(header_ptr) \ +((*header_ptr & HIL_IDD_HEADER_IOD) && \ + (HIL_IDD_IOD(header_ptr) & HIL_IDD_IOD_PROXIMITY)) + +#define HIL_IDD_NUM_BUTTONS(header_ptr) \ +((*header_ptr & HIL_IDD_HEADER_IOD) ? \ + (HIL_IDD_IOD(header_ptr) & HIL_IDD_IOD_NBUTTON_MASK) : 0) + +#define HIL_IDD_NUM_PROMPTS(header_ptr) \ +((*header_ptr & HIL_IDD_HEADER_IOD) ? \ + ((HIL_IDD_IOD(header_ptr) & HIL_IDD_IOD_NPROMPT_MASK) \ + >> HIL_IDD_IOD_PROMPT_SHIFT) : 0) + +/* The response to HIL EXD commands -- the "extended describe record" */ +#define HIL_EXD_HEADER_WRG 0x03 /* Supports type2 WRG */ +#define HIL_EXD_HEADER_WRG_TYPE1 0x01 /* Supports type1 WRG */ +#define HIL_EXD_HEADER_WRG_TYPE2 0x02 /* Supports type2 WRG */ +#define HIL_EXD_HEADER_RRG 0x04 /* Supports RRG command */ +#define HIL_EXD_HEADER_RNM 0x10 /* Supports RNM command */ +#define HIL_EXD_HEADER_RST 0x20 /* Supports RST command */ +#define HIL_EXD_HEADER_LOCALE 0x40 /* Contains locale code */ + +#define HIL_EXD_NUM_RRG(header_ptr) \ +((*header_ptr & HIL_EXD_HEADER_RRG) ? \ + (*(header_ptr + 1) & HIL_PKT_DATA_MASK) : 0) + +#define HIL_EXD_NUM_WWG(header_ptr) \ +((*header_ptr & HIL_EXD_HEADER_WRG) ? \ + (*(header_ptr + 2 - !(*header_ptr & HIL_EXD_HEADER_RRG)) & \ + HIL_PKT_DATA_MASK) : 0) + +#define HIL_EXD_LEN(header_ptr) \ +(!!(*header_ptr & HIL_EXD_HEADER_RRG) + \ + !!(*header_ptr & HIL_EXD_HEADER_WRG) + \ + !!(*header_ptr & HIL_EXD_HEADER_LOCALE) + \ + 2 * !!(*header_ptr & HIL_EXD_HEADER_WRG_TYPE2) + 1) + +#define HIL_EXD_LOCALE(header_ptr) \ +(!(*header_ptr & HIL_EXD_HEADER_LOCALE) ? -1 : \ + (*(header_ptr + HIL_EXD_LEN(header_ptr) - 1) & HIL_PKT_DATA_MASK)) + +#define HIL_EXD_WRG_TYPE2_LEN(header_ptr) \ +(!(*header_ptr & HIL_EXD_HEADER_WRG_TYPE2) ? -1 : \ + (*(header_ptr + HIL_EXD_LEN(header_ptr) - 2 - \ + !!(*header_ptr & HIL_EXD_HEADER_LOCALE)) & HIL_PKT_DATA_MASK) + \ + ((*(header_ptr + HIL_EXD_LEN(header_ptr) - 1 - \ + !!(*header_ptr & HIL_EXD_HEADER_LOCALE)) & HIL_PKT_DATA_MASK) << 8)) + +/* Device locale codes. */ + +/* Last defined locale code. Everything above this is "Reserved", + and note that this same table applies to the Device ID Byte where + keyboards may have a nationality code which is only 5 bits. */ +#define HIL_LOCALE_MAX 0x1f + +/* Map to hopefully useful strings. I was trying to make these look + like locale.aliases strings do; maybe that isn't the right table to + emulate. In either case, I didn't have much to work on. */ +#define HIL_LOCALE_MAP \ +"", /* 0x00 Reserved */ \ +"", /* 0x01 Reserved */ \ +"", /* 0x02 Reserved */ \ +"swiss.french", /* 0x03 Swiss/French */ \ +"portuguese", /* 0x04 Portuguese */ \ +"arabic", /* 0x05 Arabic */ \ +"hebrew", /* 0x06 Hebrew */ \ +"english.canadian", /* 0x07 Canadian English */ \ +"turkish", /* 0x08 Turkish */ \ +"greek", /* 0x09 Greek */ \ +"thai", /* 0x0a Thai (Thailand) */ \ +"italian", /* 0x0b Italian */ \ +"korean", /* 0x0c Hangul (Korea) */ \ +"dutch", /* 0x0d Dutch */ \ +"swedish", /* 0x0e Swedish */ \ +"german", /* 0x0f German */ \ +"chinese", /* 0x10 Chinese-PRC */ \ +"chinese", /* 0x11 Chinese-ROC */ \ +"swiss.french", /* 0x12 Swiss/French II */ \ +"spanish", /* 0x13 Spanish */ \ +"swiss.german", /* 0x14 Swiss/German II */ \ +"flemish", /* 0x15 Belgian (Flemish) */ \ +"finnish", /* 0x16 Finnish */ \ +"english.uk", /* 0x17 United Kingdom */ \ +"french.canadian", /* 0x18 French/Canadian */ \ +"swiss.german", /* 0x19 Swiss/German */ \ +"norwegian", /* 0x1a Norwegian */ \ +"french", /* 0x1b French */ \ +"danish", /* 0x1c Danish */ \ +"japanese", /* 0x1d Katakana */ \ +"spanish", /* 0x1e Latin American/Spanish*/\ +"english.us" /* 0x1f United States */ \ + + +/* HIL keycodes */ +#define HIL_KEYCODES_SET1_TBLSIZE 128 +#define HIL_KEYCODES_SET1 \ + KEY_5, KEY_RESERVED, KEY_RIGHTALT, KEY_LEFTALT, \ + KEY_RIGHTSHIFT, KEY_LEFTSHIFT, KEY_LEFTCTRL, KEY_SYSRQ, \ + KEY_KP4, KEY_KP8, KEY_KP5, KEY_KP9, \ + KEY_KP6, KEY_KP7, KEY_KPCOMMA, KEY_KPENTER, \ + KEY_KP1, KEY_KPSLASH, KEY_KP2, KEY_KPPLUS, \ + KEY_KP3, KEY_KPASTERISK, KEY_KP0, KEY_KPMINUS, \ + KEY_B, KEY_V, KEY_C, KEY_X, \ + KEY_Z, KEY_RESERVED, KEY_RESERVED, KEY_ESC, \ + KEY_6, KEY_F10, KEY_3, KEY_F11, \ + KEY_KPDOT, KEY_F9, KEY_TAB /*KP*/, KEY_F12, \ + KEY_H, KEY_G, KEY_F, KEY_D, \ + KEY_S, KEY_A, KEY_RESERVED, KEY_CAPSLOCK, \ + KEY_U, KEY_Y, KEY_T, KEY_R, \ + KEY_E, KEY_W, KEY_Q, KEY_TAB, \ + KEY_7, KEY_6, KEY_5, KEY_4, \ + KEY_3, KEY_2, KEY_1, KEY_GRAVE, \ + KEY_F13, KEY_F14, KEY_F15, KEY_F16, \ + KEY_F17, KEY_F18, KEY_F19, KEY_F20, \ + KEY_MENU, KEY_F4, KEY_F3, KEY_F2, \ + KEY_F1, KEY_VOLUMEUP, KEY_STOP, KEY_SENDFILE, \ + KEY_SYSRQ, KEY_F5, KEY_F6, KEY_F7, \ + KEY_F8, KEY_VOLUMEDOWN, KEY_DEL_EOL, KEY_DEL_EOS, \ + KEY_8, KEY_9, KEY_0, KEY_MINUS, \ + KEY_EQUAL, KEY_BACKSPACE, KEY_INS_LINE, KEY_DEL_LINE, \ + KEY_I, KEY_O, KEY_P, KEY_LEFTBRACE, \ + KEY_RIGHTBRACE, KEY_BACKSLASH, KEY_INSERT, KEY_DELETE, \ + KEY_J, KEY_K, KEY_L, KEY_SEMICOLON, \ + KEY_APOSTROPHE, KEY_ENTER, KEY_HOME, KEY_PAGEUP, \ + KEY_M, KEY_COMMA, KEY_DOT, KEY_SLASH, \ + KEY_BACKSLASH, KEY_SELECT, KEY_102ND, KEY_PAGEDOWN, \ + KEY_N, KEY_SPACE, KEY_NEXT, KEY_RESERVED, \ + KEY_LEFT, KEY_DOWN, KEY_UP, KEY_RIGHT + + +#define HIL_KEYCODES_SET3_TBLSIZE 128 +#define HIL_KEYCODES_SET3 \ + KEY_RESERVED, KEY_ESC, KEY_1, KEY_2, \ + KEY_3, KEY_4, KEY_5, KEY_6, \ + KEY_7, KEY_8, KEY_9, KEY_0, \ + KEY_MINUS, KEY_EQUAL, KEY_BACKSPACE, KEY_TAB, \ + KEY_Q, KEY_W, KEY_E, KEY_R, \ + KEY_T, KEY_Y, KEY_U, KEY_I, \ + KEY_O, KEY_P, KEY_LEFTBRACE, KEY_RIGHTBRACE, \ + KEY_ENTER, KEY_LEFTCTRL, KEY_A, KEY_S, \ + KEY_D, KEY_F, KEY_G, KEY_H, \ + KEY_J, KEY_K, KEY_L, KEY_SEMICOLON, \ + KEY_APOSTROPHE,KEY_GRAVE, KEY_LEFTSHIFT, KEY_BACKSLASH, \ + KEY_Z, KEY_X, KEY_C, KEY_V, \ + KEY_B, KEY_N, KEY_M, KEY_COMMA, \ + KEY_DOT, KEY_SLASH, KEY_RIGHTSHIFT, KEY_KPASTERISK, \ + KEY_LEFTALT, KEY_SPACE, KEY_CAPSLOCK, KEY_F1, \ + KEY_F2, KEY_F3, KEY_F4, KEY_F5, \ + KEY_F6, KEY_F7, KEY_F8, KEY_F9, \ + KEY_F10, KEY_NUMLOCK, KEY_SCROLLLOCK, KEY_KP7, \ + KEY_KP8, KEY_KP9, KEY_KPMINUS, KEY_KP4, \ + KEY_KP5, KEY_KP6, KEY_KPPLUS, KEY_KP1, \ + KEY_KP2, KEY_KP3, KEY_KP0, KEY_KPDOT, \ + KEY_SYSRQ, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, \ + KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, \ + KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, \ + KEY_UP, KEY_LEFT, KEY_DOWN, KEY_RIGHT, \ + KEY_HOME, KEY_PAGEUP, KEY_END, KEY_PAGEDOWN, \ + KEY_INSERT, KEY_DELETE, KEY_102ND, KEY_RESERVED, \ + KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, \ + KEY_F1, KEY_F2, KEY_F3, KEY_F4, \ + KEY_F5, KEY_F6, KEY_F7, KEY_F8, \ + KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, \ + KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED + + +/* Response to POL command, the "poll record header" */ + +#define HIL_POL_NUM_AXES_MASK 0x03 /* Number of axis reported */ +#define HIL_POL_CTS 0x04 /* Device ready to receive data */ +#define HIL_POL_STATUS_PENDING 0x08 /* Device has status to report */ +#define HIL_POL_CHARTYPE_MASK 0x70 /* Type of character data to follow */ +#define HIL_POL_CHARTYPE_NONE 0x00 /* No character data to follow */ +#define HIL_POL_CHARTYPE_RSVD1 0x10 /* Reserved Set 1 */ +#define HIL_POL_CHARTYPE_ASCII 0x20 /* U.S. ASCII */ +#define HIL_POL_CHARTYPE_BINARY 0x30 /* Binary data */ +#define HIL_POL_CHARTYPE_SET1 0x40 /* Keycode Set 1 */ +#define HIL_POL_CHARTYPE_RSVD2 0x50 /* Reserved Set 2 */ +#define HIL_POL_CHARTYPE_SET2 0x60 /* Keycode Set 2 */ +#define HIL_POL_CHARTYPE_SET3 0x70 /* Keycode Set 3 */ +#define HIL_POL_AXIS_ALT 0x80 /* Data is from axis set 2 */ + + +#endif /* _HIL_H_ */ diff --git a/include/linux/hil_mlc.h b/include/linux/hil_mlc.h new file mode 100644 index 000000000..369221fd5 --- /dev/null +++ b/include/linux/hil_mlc.h @@ -0,0 +1,168 @@ +/* + * HP Human Interface Loop Master Link Controller driver. + * + * Copyright (c) 2001 Brian S. Julin + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL"). + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * + * References: + * HP-HIL Technical Reference Manual. Hewlett Packard Product No. 45918A + * + */ + +#include +#include +#include +#include +#include +#include + +typedef struct hil_mlc hil_mlc; + +/* The HIL has a complicated state engine. + * We define the structure of nodes in the state engine here. + */ +enum hilse_act { + /* HILSE_OUT prepares to receive input if the next node + * is an IN or EXPECT, and then sends the given packet. + */ + HILSE_OUT = 0, + + /* HILSE_CTS checks if the loop is busy. */ + HILSE_CTS, + + /* HILSE_OUT_LAST sends the given command packet to + * the last configured/running device on the loop. + */ + HILSE_OUT_LAST, + + /* HILSE_OUT_DISC sends the given command packet to + * the next device past the last configured/running one. + */ + HILSE_OUT_DISC, + + /* HILSE_FUNC runs a callback function with given arguments. + * a positive return value causes the "ugly" branch to be taken. + */ + HILSE_FUNC, + + /* HILSE_IN simply expects any non-errored packet to arrive + * within arg usecs. + */ + HILSE_IN = 0x100, + + /* HILSE_EXPECT expects a particular packet to arrive + * within arg usecs, any other packet is considered an error. + */ + HILSE_EXPECT, + + /* HILSE_EXPECT_LAST as above but dev field should be last + * discovered/operational device. + */ + HILSE_EXPECT_LAST, + + /* HILSE_EXPECT_LAST as above but dev field should be first + * undiscovered/inoperational device. + */ + HILSE_EXPECT_DISC +}; + +typedef int (hilse_func) (hil_mlc *mlc, int arg); +struct hilse_node { + enum hilse_act act; /* How to process this node */ + union { + hilse_func *func; /* Function to call if HILSE_FUNC */ + hil_packet packet; /* Packet to send or to compare */ + } object; + int arg; /* Timeout in usec or parm for func */ + int good; /* Node to jump to on success */ + int bad; /* Node to jump to on error */ + int ugly; /* Node to jump to on timeout */ +}; + +/* Methods for back-end drivers, e.g. hp_sdc_mlc */ +typedef int (hil_mlc_cts) (hil_mlc *mlc); +typedef int (hil_mlc_out) (hil_mlc *mlc); +typedef int (hil_mlc_in) (hil_mlc *mlc, suseconds_t timeout); + +struct hil_mlc_devinfo { + uint8_t idd[16]; /* Device ID Byte and Describe Record */ + uint8_t rsc[16]; /* Security Code Header and Record */ + uint8_t exd[16]; /* Extended Describe Record */ + uint8_t rnm[16]; /* Device name as returned by RNM command */ +}; + +struct hil_mlc_serio_map { + hil_mlc *mlc; + int di_revmap; + int didx; +}; + +/* How many (possibly old/detached) devices the we try to keep track of */ +#define HIL_MLC_DEVMEM 16 + +struct hil_mlc { + struct list_head list; /* hil_mlc is organized as linked list */ + + rwlock_t lock; + + void *priv; /* Data specific to a particular type of MLC */ + + int seidx; /* Current node in state engine */ + int istarted, ostarted; + + hil_mlc_cts *cts; + struct semaphore csem; /* Raised when loop idle */ + + hil_mlc_out *out; + struct semaphore osem; /* Raised when outpacket dispatched */ + hil_packet opacket; + + hil_mlc_in *in; + struct semaphore isem; /* Raised when a packet arrives */ + hil_packet ipacket[16]; + hil_packet imatch; + int icount; + unsigned long instart; + unsigned long intimeout; + + int ddi; /* Last operational device id */ + int lcv; /* LCV to throttle loops */ + time64_t lcv_time; /* Time loop was started */ + + int di_map[7]; /* Maps below items to live devs */ + struct hil_mlc_devinfo di[HIL_MLC_DEVMEM]; + struct serio *serio[HIL_MLC_DEVMEM]; + struct hil_mlc_serio_map serio_map[HIL_MLC_DEVMEM]; + hil_packet serio_opacket[HIL_MLC_DEVMEM]; + int serio_oidx[HIL_MLC_DEVMEM]; + struct hil_mlc_devinfo di_scratch; /* Temporary area */ + + int opercnt; + + struct tasklet_struct *tasklet; +}; + +int hil_mlc_register(hil_mlc *mlc); +int hil_mlc_unregister(hil_mlc *mlc); diff --git a/include/linux/hippidevice.h b/include/linux/hippidevice.h new file mode 100644 index 000000000..402f99e32 --- /dev/null +++ b/include/linux/hippidevice.h @@ -0,0 +1,40 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Definitions for the HIPPI handlers. + * + * Version: @(#)hippidevice.h 1.0.0 05/26/97 + * + * Author: Jes Sorensen, + * + * hippidevice.h is based on previous fddidevice.h work by + * Ross Biro + * Fred N. van Kempen, + * Alan Cox, + * Lawrence V. Stefani, + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_HIPPIDEVICE_H +#define _LINUX_HIPPIDEVICE_H + +#include + +#ifdef __KERNEL__ + +struct hippi_cb { + __u32 ifield; +}; + +__be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev); +int hippi_mac_addr(struct net_device *dev, void *p); +int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p); +struct net_device *alloc_hippi_dev(int sizeof_priv); +#endif + +#endif /* _LINUX_HIPPIDEVICE_H */ diff --git a/include/linux/hmm.h b/include/linux/hmm.h new file mode 100644 index 000000000..5ec8635f6 --- /dev/null +++ b/include/linux/hmm.h @@ -0,0 +1,562 @@ +/* + * Copyright 2013 Red Hat Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Authors: Jérôme Glisse + */ +/* + * Heterogeneous Memory Management (HMM) + * + * See Documentation/vm/hmm.rst for reasons and overview of what HMM is and it + * is for. Here we focus on the HMM API description, with some explanation of + * the underlying implementation. + * + * Short description: HMM provides a set of helpers to share a virtual address + * space between CPU and a device, so that the device can access any valid + * address of the process (while still obeying memory protection). HMM also + * provides helpers to migrate process memory to device memory, and back. Each + * set of functionality (address space mirroring, and migration to and from + * device memory) can be used independently of the other. + * + * + * HMM address space mirroring API: + * + * Use HMM address space mirroring if you want to mirror range of the CPU page + * table of a process into a device page table. Here, "mirror" means "keep + * synchronized". Prerequisites: the device must provide the ability to write- + * protect its page tables (at PAGE_SIZE granularity), and must be able to + * recover from the resulting potential page faults. + * + * HMM guarantees that at any point in time, a given virtual address points to + * either the same memory in both CPU and device page tables (that is: CPU and + * device page tables each point to the same pages), or that one page table (CPU + * or device) points to no entry, while the other still points to the old page + * for the address. The latter case happens when the CPU page table update + * happens first, and then the update is mirrored over to the device page table. + * This does not cause any issue, because the CPU page table cannot start + * pointing to a new page until the device page table is invalidated. + * + * HMM uses mmu_notifiers to monitor the CPU page tables, and forwards any + * updates to each device driver that has registered a mirror. It also provides + * some API calls to help with taking a snapshot of the CPU page table, and to + * synchronize with any updates that might happen concurrently. + * + * + * HMM migration to and from device memory: + * + * HMM provides a set of helpers to hotplug device memory as ZONE_DEVICE, with + * a new MEMORY_DEVICE_PRIVATE type. This provides a struct page for each page + * of the device memory, and allows the device driver to manage its memory + * using those struct pages. Having struct pages for device memory makes + * migration easier. Because that memory is not addressable by the CPU it must + * never be pinned to the device; in other words, any CPU page fault can always + * cause the device memory to be migrated (copied/moved) back to regular memory. + * + * A new migrate helper (migrate_vma()) has been added (see mm/migrate.c) that + * allows use of a device DMA engine to perform the copy operation between + * regular system memory and device memory. + */ +#ifndef LINUX_HMM_H +#define LINUX_HMM_H + +#include + +#if IS_ENABLED(CONFIG_HMM) + +#include +#include +#include +#include + +struct hmm; + +/* + * hmm_pfn_flag_e - HMM flag enums + * + * Flags: + * HMM_PFN_VALID: pfn is valid. It has, at least, read permission. + * HMM_PFN_WRITE: CPU page table has write permission set + * HMM_PFN_DEVICE_PRIVATE: private device memory (ZONE_DEVICE) + * + * The driver provide a flags array, if driver valid bit for an entry is bit + * 3 ie (entry & (1 << 3)) is true if entry is valid then driver must provide + * an array in hmm_range.flags with hmm_range.flags[HMM_PFN_VALID] == 1 << 3. + * Same logic apply to all flags. This is same idea as vm_page_prot in vma + * except that this is per device driver rather than per architecture. + */ +enum hmm_pfn_flag_e { + HMM_PFN_VALID = 0, + HMM_PFN_WRITE, + HMM_PFN_DEVICE_PRIVATE, + HMM_PFN_FLAG_MAX +}; + +/* + * hmm_pfn_value_e - HMM pfn special value + * + * Flags: + * HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory + * HMM_PFN_NONE: corresponding CPU page table entry is pte_none() + * HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the + * result of vm_insert_pfn() or vm_insert_page(). Therefore, it should not + * be mirrored by a device, because the entry will never have HMM_PFN_VALID + * set and the pfn value is undefined. + * + * Driver provide entry value for none entry, error entry and special entry, + * driver can alias (ie use same value for error and special for instance). It + * should not alias none and error or special. + * + * HMM pfn value returned by hmm_vma_get_pfns() or hmm_vma_fault() will be: + * hmm_range.values[HMM_PFN_ERROR] if CPU page table entry is poisonous, + * hmm_range.values[HMM_PFN_NONE] if there is no CPU page table + * hmm_range.values[HMM_PFN_SPECIAL] if CPU page table entry is a special one + */ +enum hmm_pfn_value_e { + HMM_PFN_ERROR, + HMM_PFN_NONE, + HMM_PFN_SPECIAL, + HMM_PFN_VALUE_MAX +}; + +/* + * struct hmm_range - track invalidation lock on virtual address range + * + * @vma: the vm area struct for the range + * @list: all range lock are on a list + * @start: range virtual start address (inclusive) + * @end: range virtual end address (exclusive) + * @pfns: array of pfns (big enough for the range) + * @flags: pfn flags to match device driver page table + * @values: pfn value for some special case (none, special, error, ...) + * @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT) + * @valid: pfns array did not change since it has been fill by an HMM function + */ +struct hmm_range { + struct vm_area_struct *vma; + struct list_head list; + unsigned long start; + unsigned long end; + uint64_t *pfns; + const uint64_t *flags; + const uint64_t *values; + uint8_t pfn_shift; + bool valid; +}; + +/* + * hmm_pfn_to_page() - return struct page pointed to by a valid HMM pfn + * @range: range use to decode HMM pfn value + * @pfn: HMM pfn value to get corresponding struct page from + * Returns: struct page pointer if pfn is a valid HMM pfn, NULL otherwise + * + * If the HMM pfn is valid (ie valid flag set) then return the struct page + * matching the pfn value stored in the HMM pfn. Otherwise return NULL. + */ +static inline struct page *hmm_pfn_to_page(const struct hmm_range *range, + uint64_t pfn) +{ + if (pfn == range->values[HMM_PFN_NONE]) + return NULL; + if (pfn == range->values[HMM_PFN_ERROR]) + return NULL; + if (pfn == range->values[HMM_PFN_SPECIAL]) + return NULL; + if (!(pfn & range->flags[HMM_PFN_VALID])) + return NULL; + return pfn_to_page(pfn >> range->pfn_shift); +} + +/* + * hmm_pfn_to_pfn() - return pfn value store in a HMM pfn + * @range: range use to decode HMM pfn value + * @pfn: HMM pfn value to extract pfn from + * Returns: pfn value if HMM pfn is valid, -1UL otherwise + */ +static inline unsigned long hmm_pfn_to_pfn(const struct hmm_range *range, + uint64_t pfn) +{ + if (pfn == range->values[HMM_PFN_NONE]) + return -1UL; + if (pfn == range->values[HMM_PFN_ERROR]) + return -1UL; + if (pfn == range->values[HMM_PFN_SPECIAL]) + return -1UL; + if (!(pfn & range->flags[HMM_PFN_VALID])) + return -1UL; + return (pfn >> range->pfn_shift); +} + +/* + * hmm_pfn_from_page() - create a valid HMM pfn value from struct page + * @range: range use to encode HMM pfn value + * @page: struct page pointer for which to create the HMM pfn + * Returns: valid HMM pfn for the page + */ +static inline uint64_t hmm_pfn_from_page(const struct hmm_range *range, + struct page *page) +{ + return (page_to_pfn(page) << range->pfn_shift) | + range->flags[HMM_PFN_VALID]; +} + +/* + * hmm_pfn_from_pfn() - create a valid HMM pfn value from pfn + * @range: range use to encode HMM pfn value + * @pfn: pfn value for which to create the HMM pfn + * Returns: valid HMM pfn for the pfn + */ +static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range, + unsigned long pfn) +{ + return (pfn << range->pfn_shift) | + range->flags[HMM_PFN_VALID]; +} + + +#if IS_ENABLED(CONFIG_HMM_MIRROR) +/* + * Mirroring: how to synchronize device page table with CPU page table. + * + * A device driver that is participating in HMM mirroring must always + * synchronize with CPU page table updates. For this, device drivers can either + * directly use mmu_notifier APIs or they can use the hmm_mirror API. Device + * drivers can decide to register one mirror per device per process, or just + * one mirror per process for a group of devices. The pattern is: + * + * int device_bind_address_space(..., struct mm_struct *mm, ...) + * { + * struct device_address_space *das; + * + * // Device driver specific initialization, and allocation of das + * // which contains an hmm_mirror struct as one of its fields. + * ... + * + * ret = hmm_mirror_register(&das->mirror, mm, &device_mirror_ops); + * if (ret) { + * // Cleanup on error + * return ret; + * } + * + * // Other device driver specific initialization + * ... + * } + * + * Once an hmm_mirror is registered for an address space, the device driver + * will get callbacks through sync_cpu_device_pagetables() operation (see + * hmm_mirror_ops struct). + * + * Device driver must not free the struct containing the hmm_mirror struct + * before calling hmm_mirror_unregister(). The expected usage is to do that when + * the device driver is unbinding from an address space. + * + * + * void device_unbind_address_space(struct device_address_space *das) + * { + * // Device driver specific cleanup + * ... + * + * hmm_mirror_unregister(&das->mirror); + * + * // Other device driver specific cleanup, and now das can be freed + * ... + * } + */ + +struct hmm_mirror; + +/* + * enum hmm_update_type - type of update + * @HMM_UPDATE_INVALIDATE: invalidate range (no indication as to why) + */ +enum hmm_update_type { + HMM_UPDATE_INVALIDATE, +}; + +/* + * struct hmm_mirror_ops - HMM mirror device operations callback + * + * @update: callback to update range on a device + */ +struct hmm_mirror_ops { + /* release() - release hmm_mirror + * + * @mirror: pointer to struct hmm_mirror + * + * This is called when the mm_struct is being released. + * The callback should make sure no references to the mirror occur + * after the callback returns. + */ + void (*release)(struct hmm_mirror *mirror); + + /* sync_cpu_device_pagetables() - synchronize page tables + * + * @mirror: pointer to struct hmm_mirror + * @update_type: type of update that occurred to the CPU page table + * @start: virtual start address of the range to update + * @end: virtual end address of the range to update + * + * This callback ultimately originates from mmu_notifiers when the CPU + * page table is updated. The device driver must update its page table + * in response to this callback. The update argument tells what action + * to perform. + * + * The device driver must not return from this callback until the device + * page tables are completely updated (TLBs flushed, etc); this is a + * synchronous call. + */ + void (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror, + enum hmm_update_type update_type, + unsigned long start, + unsigned long end); +}; + +/* + * struct hmm_mirror - mirror struct for a device driver + * + * @hmm: pointer to struct hmm (which is unique per mm_struct) + * @ops: device driver callback for HMM mirror operations + * @list: for list of mirrors of a given mm + * + * Each address space (mm_struct) being mirrored by a device must register one + * instance of an hmm_mirror struct with HMM. HMM will track the list of all + * mirrors for each mm_struct. + */ +struct hmm_mirror { + struct hmm *hmm; + const struct hmm_mirror_ops *ops; + struct list_head list; +}; + +int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm); +void hmm_mirror_unregister(struct hmm_mirror *mirror); + + +/* + * To snapshot the CPU page table, call hmm_vma_get_pfns(), then take a device + * driver lock that serializes device page table updates, then call + * hmm_vma_range_done(), to check if the snapshot is still valid. The same + * device driver page table update lock must also be used in the + * hmm_mirror_ops.sync_cpu_device_pagetables() callback, so that CPU page + * table invalidation serializes on it. + * + * YOU MUST CALL hmm_vma_range_done() ONCE AND ONLY ONCE EACH TIME YOU CALL + * hmm_vma_get_pfns() WITHOUT ERROR ! + * + * IF YOU DO NOT FOLLOW THE ABOVE RULE THE SNAPSHOT CONTENT MIGHT BE INVALID ! + */ +int hmm_vma_get_pfns(struct hmm_range *range); +bool hmm_vma_range_done(struct hmm_range *range); + + +/* + * Fault memory on behalf of device driver. Unlike handle_mm_fault(), this will + * not migrate any device memory back to system memory. The HMM pfn array will + * be updated with the fault result and current snapshot of the CPU page table + * for the range. + * + * The mmap_sem must be taken in read mode before entering and it might be + * dropped by the function if the block argument is false. In that case, the + * function returns -EAGAIN. + * + * Return value does not reflect if the fault was successful for every single + * address or not. Therefore, the caller must to inspect the HMM pfn array to + * determine fault status for each address. + * + * Trying to fault inside an invalid vma will result in -EINVAL. + * + * See the function description in mm/hmm.c for further documentation. + */ +int hmm_vma_fault(struct hmm_range *range, bool block); + +/* Below are for HMM internal use only! Not to be used by device driver! */ +void hmm_mm_destroy(struct mm_struct *mm); + +static inline void hmm_mm_init(struct mm_struct *mm) +{ + mm->hmm = NULL; +} +#else /* IS_ENABLED(CONFIG_HMM_MIRROR) */ +static inline void hmm_mm_destroy(struct mm_struct *mm) {} +static inline void hmm_mm_init(struct mm_struct *mm) {} +#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ + +#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC) +struct hmm_devmem; + +struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma, + unsigned long addr); + +/* + * struct hmm_devmem_ops - callback for ZONE_DEVICE memory events + * + * @free: call when refcount on page reach 1 and thus is no longer use + * @fault: call when there is a page fault to unaddressable memory + * + * Both callback happens from page_free() and page_fault() callback of struct + * dev_pagemap respectively. See include/linux/memremap.h for more details on + * those. + * + * The hmm_devmem_ops callback are just here to provide a coherent and + * uniq API to device driver and device driver should not register their + * own page_free() or page_fault() but rely on the hmm_devmem_ops call- + * back. + */ +struct hmm_devmem_ops { + /* + * free() - free a device page + * @devmem: device memory structure (see struct hmm_devmem) + * @page: pointer to struct page being freed + * + * Call back occurs whenever a device page refcount reach 1 which + * means that no one is holding any reference on the page anymore + * (ZONE_DEVICE page have an elevated refcount of 1 as default so + * that they are not release to the general page allocator). + * + * Note that callback has exclusive ownership of the page (as no + * one is holding any reference). + */ + void (*free)(struct hmm_devmem *devmem, struct page *page); + /* + * fault() - CPU page fault or get user page (GUP) + * @devmem: device memory structure (see struct hmm_devmem) + * @vma: virtual memory area containing the virtual address + * @addr: virtual address that faulted or for which there is a GUP + * @page: pointer to struct page backing virtual address (unreliable) + * @flags: FAULT_FLAG_* (see include/linux/mm.h) + * @pmdp: page middle directory + * Returns: VM_FAULT_MINOR/MAJOR on success or one of VM_FAULT_ERROR + * on error + * + * The callback occurs whenever there is a CPU page fault or GUP on a + * virtual address. This means that the device driver must migrate the + * page back to regular memory (CPU accessible). + * + * The device driver is free to migrate more than one page from the + * fault() callback as an optimization. However if device decide to + * migrate more than one page it must always priotirize the faulting + * address over the others. + * + * The struct page pointer is only given as an hint to allow quick + * lookup of internal device driver data. A concurrent migration + * might have already free that page and the virtual address might + * not longer be back by it. So it should not be modified by the + * callback. + * + * Note that mmap semaphore is held in read mode at least when this + * callback occurs, hence the vma is valid upon callback entry. + */ + int (*fault)(struct hmm_devmem *devmem, + struct vm_area_struct *vma, + unsigned long addr, + const struct page *page, + unsigned int flags, + pmd_t *pmdp); +}; + +/* + * struct hmm_devmem - track device memory + * + * @completion: completion object for device memory + * @pfn_first: first pfn for this resource (set by hmm_devmem_add()) + * @pfn_last: last pfn for this resource (set by hmm_devmem_add()) + * @resource: IO resource reserved for this chunk of memory + * @pagemap: device page map for that chunk + * @device: device to bind resource to + * @ops: memory operations callback + * @ref: per CPU refcount + * + * This an helper structure for device drivers that do not wish to implement + * the gory details related to hotplugging new memoy and allocating struct + * pages. + * + * Device drivers can directly use ZONE_DEVICE memory on their own if they + * wish to do so. + */ +struct hmm_devmem { + struct completion completion; + unsigned long pfn_first; + unsigned long pfn_last; + struct resource *resource; + struct device *device; + struct dev_pagemap pagemap; + const struct hmm_devmem_ops *ops; + struct percpu_ref ref; +}; + +/* + * To add (hotplug) device memory, HMM assumes that there is no real resource + * that reserves a range in the physical address space (this is intended to be + * use by unaddressable device memory). It will reserve a physical range big + * enough and allocate struct page for it. + * + * The device driver can wrap the hmm_devmem struct inside a private device + * driver struct. + */ +struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, + struct device *device, + unsigned long size); +struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, + struct device *device, + struct resource *res); + +/* + * hmm_devmem_page_set_drvdata - set per-page driver data field + * + * @page: pointer to struct page + * @data: driver data value to set + * + * Because page can not be on lru we have an unsigned long that driver can use + * to store a per page field. This just a simple helper to do that. + */ +static inline void hmm_devmem_page_set_drvdata(struct page *page, + unsigned long data) +{ + page->hmm_data = data; +} + +/* + * hmm_devmem_page_get_drvdata - get per page driver data field + * + * @page: pointer to struct page + * Return: driver data value + */ +static inline unsigned long hmm_devmem_page_get_drvdata(const struct page *page) +{ + return page->hmm_data; +} + + +/* + * struct hmm_device - fake device to hang device memory onto + * + * @device: device struct + * @minor: device minor number + */ +struct hmm_device { + struct device device; + unsigned int minor; +}; + +/* + * A device driver that wants to handle multiple devices memory through a + * single fake device can use hmm_device to do so. This is purely a helper and + * it is not strictly needed, in order to make use of any HMM functionality. + */ +struct hmm_device *hmm_device_new(void *drvdata); +void hmm_device_put(struct hmm_device *hmm_device); +#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ +#else /* IS_ENABLED(CONFIG_HMM) */ +static inline void hmm_mm_destroy(struct mm_struct *mm) {} +static inline void hmm_mm_init(struct mm_struct *mm) {} +#endif /* IS_ENABLED(CONFIG_HMM) */ + +#endif /* LINUX_HMM_H */ diff --git a/include/linux/host1x.h b/include/linux/host1x.h new file mode 100644 index 000000000..aef6e2f73 --- /dev/null +++ b/include/linux/host1x.h @@ -0,0 +1,336 @@ +/* + * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __LINUX_HOST1X_H +#define __LINUX_HOST1X_H + +#include +#include + +enum host1x_class { + HOST1X_CLASS_HOST1X = 0x1, + HOST1X_CLASS_GR2D = 0x51, + HOST1X_CLASS_GR2D_SB = 0x52, + HOST1X_CLASS_VIC = 0x5D, + HOST1X_CLASS_GR3D = 0x60, +}; + +struct host1x_client; + +/** + * struct host1x_client_ops - host1x client operations + * @init: host1x client initialization code + * @exit: host1x client tear down code + */ +struct host1x_client_ops { + int (*init)(struct host1x_client *client); + int (*exit)(struct host1x_client *client); +}; + +/** + * struct host1x_client - host1x client structure + * @list: list node for the host1x client + * @parent: pointer to struct device representing the host1x controller + * @dev: pointer to struct device backing this host1x client + * @ops: host1x client operations + * @class: host1x class represented by this client + * @channel: host1x channel associated with this client + * @syncpts: array of syncpoints requested for this client + * @num_syncpts: number of syncpoints requested for this client + */ +struct host1x_client { + struct list_head list; + struct device *parent; + struct device *dev; + + const struct host1x_client_ops *ops; + + enum host1x_class class; + struct host1x_channel *channel; + + struct host1x_syncpt **syncpts; + unsigned int num_syncpts; +}; + +/* + * host1x buffer objects + */ + +struct host1x_bo; +struct sg_table; + +struct host1x_bo_ops { + struct host1x_bo *(*get)(struct host1x_bo *bo); + void (*put)(struct host1x_bo *bo); + dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt); + void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt); + void *(*mmap)(struct host1x_bo *bo); + void (*munmap)(struct host1x_bo *bo, void *addr); + void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum); + void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr); +}; + +struct host1x_bo { + const struct host1x_bo_ops *ops; +}; + +static inline void host1x_bo_init(struct host1x_bo *bo, + const struct host1x_bo_ops *ops) +{ + bo->ops = ops; +} + +static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo) +{ + return bo->ops->get(bo); +} + +static inline void host1x_bo_put(struct host1x_bo *bo) +{ + bo->ops->put(bo); +} + +static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo, + struct sg_table **sgt) +{ + return bo->ops->pin(bo, sgt); +} + +static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt) +{ + bo->ops->unpin(bo, sgt); +} + +static inline void *host1x_bo_mmap(struct host1x_bo *bo) +{ + return bo->ops->mmap(bo); +} + +static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr) +{ + bo->ops->munmap(bo, addr); +} + +static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum) +{ + return bo->ops->kmap(bo, pagenum); +} + +static inline void host1x_bo_kunmap(struct host1x_bo *bo, + unsigned int pagenum, void *addr) +{ + bo->ops->kunmap(bo, pagenum, addr); +} + +/* + * host1x syncpoints + */ + +#define HOST1X_SYNCPT_CLIENT_MANAGED (1 << 0) +#define HOST1X_SYNCPT_HAS_BASE (1 << 1) + +struct host1x_syncpt_base; +struct host1x_syncpt; +struct host1x; + +struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id); +u32 host1x_syncpt_id(struct host1x_syncpt *sp); +u32 host1x_syncpt_read_min(struct host1x_syncpt *sp); +u32 host1x_syncpt_read_max(struct host1x_syncpt *sp); +u32 host1x_syncpt_read(struct host1x_syncpt *sp); +int host1x_syncpt_incr(struct host1x_syncpt *sp); +u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs); +int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout, + u32 *value); +struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client, + unsigned long flags); +void host1x_syncpt_free(struct host1x_syncpt *sp); + +struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp); +u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base); + +/* + * host1x channel + */ + +struct host1x_channel; +struct host1x_job; + +struct host1x_channel *host1x_channel_request(struct device *dev); +struct host1x_channel *host1x_channel_get(struct host1x_channel *channel); +void host1x_channel_put(struct host1x_channel *channel); +int host1x_job_submit(struct host1x_job *job); + +/* + * host1x job + */ + +struct host1x_reloc { + struct { + struct host1x_bo *bo; + unsigned long offset; + } cmdbuf; + struct { + struct host1x_bo *bo; + unsigned long offset; + } target; + unsigned long shift; +}; + +struct host1x_job { + /* When refcount goes to zero, job can be freed */ + struct kref ref; + + /* List entry */ + struct list_head list; + + /* Channel where job is submitted to */ + struct host1x_channel *channel; + + /* client where the job originated */ + struct host1x_client *client; + + /* Gathers and their memory */ + struct host1x_job_gather *gathers; + unsigned int num_gathers; + + /* Array of handles to be pinned & unpinned */ + struct host1x_reloc *relocs; + unsigned int num_relocs; + struct host1x_job_unpin_data *unpins; + unsigned int num_unpins; + + dma_addr_t *addr_phys; + dma_addr_t *gather_addr_phys; + dma_addr_t *reloc_addr_phys; + + /* Sync point id, number of increments and end related to the submit */ + u32 syncpt_id; + u32 syncpt_incrs; + u32 syncpt_end; + + /* Maximum time to wait for this job */ + unsigned int timeout; + + /* Index and number of slots used in the push buffer */ + unsigned int first_get; + unsigned int num_slots; + + /* Copy of gathers */ + size_t gather_copy_size; + dma_addr_t gather_copy; + u8 *gather_copy_mapped; + + /* Check if register is marked as an address reg */ + int (*is_addr_reg)(struct device *dev, u32 class, u32 reg); + + /* Check if class belongs to the unit */ + int (*is_valid_class)(u32 class); + + /* Request a SETCLASS to this class */ + u32 class; + + /* Add a channel wait for previous ops to complete */ + bool serialize; +}; + +struct host1x_job *host1x_job_alloc(struct host1x_channel *ch, + u32 num_cmdbufs, u32 num_relocs); +void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo, + unsigned int words, unsigned int offset); +struct host1x_job *host1x_job_get(struct host1x_job *job); +void host1x_job_put(struct host1x_job *job); +int host1x_job_pin(struct host1x_job *job, struct device *dev); +void host1x_job_unpin(struct host1x_job *job); + +/* + * subdevice probe infrastructure + */ + +struct host1x_device; + +/** + * struct host1x_driver - host1x logical device driver + * @driver: core driver + * @subdevs: table of OF device IDs matching subdevices for this driver + * @list: list node for the driver + * @probe: called when the host1x logical device is probed + * @remove: called when the host1x logical device is removed + * @shutdown: called when the host1x logical device is shut down + */ +struct host1x_driver { + struct device_driver driver; + + const struct of_device_id *subdevs; + struct list_head list; + + int (*probe)(struct host1x_device *device); + int (*remove)(struct host1x_device *device); + void (*shutdown)(struct host1x_device *device); +}; + +static inline struct host1x_driver * +to_host1x_driver(struct device_driver *driver) +{ + return container_of(driver, struct host1x_driver, driver); +} + +int host1x_driver_register_full(struct host1x_driver *driver, + struct module *owner); +void host1x_driver_unregister(struct host1x_driver *driver); + +#define host1x_driver_register(driver) \ + host1x_driver_register_full(driver, THIS_MODULE) + +struct host1x_device { + struct host1x_driver *driver; + struct list_head list; + struct device dev; + + struct mutex subdevs_lock; + struct list_head subdevs; + struct list_head active; + + struct mutex clients_lock; + struct list_head clients; + + bool registered; + + struct device_dma_parameters dma_parms; +}; + +static inline struct host1x_device *to_host1x_device(struct device *dev) +{ + return container_of(dev, struct host1x_device, dev); +} + +int host1x_device_init(struct host1x_device *device); +int host1x_device_exit(struct host1x_device *device); + +int host1x_client_register(struct host1x_client *client); +int host1x_client_unregister(struct host1x_client *client); + +struct tegra_mipi_device; + +struct tegra_mipi_device *tegra_mipi_request(struct device *device); +void tegra_mipi_free(struct tegra_mipi_device *device); +int tegra_mipi_enable(struct tegra_mipi_device *device); +int tegra_mipi_disable(struct tegra_mipi_device *device); +int tegra_mipi_calibrate(struct tegra_mipi_device *device); + +#endif diff --git a/include/linux/hp_sdc.h b/include/linux/hp_sdc.h new file mode 100644 index 000000000..6f1dee7e6 --- /dev/null +++ b/include/linux/hp_sdc.h @@ -0,0 +1,301 @@ +/* + * HP i8042 System Device Controller -- header + * + * Copyright (c) 2001 Brian S. Julin + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL"). + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * + * References: + * + * HP-HIL Technical Reference Manual. Hewlett Packard Product No. 45918A + * + * System Device Controller Microprocessor Firmware Theory of Operation + * for Part Number 1820-4784 Revision B. Dwg No. A-1820-4784-2 + * + */ + +#ifndef _LINUX_HP_SDC_H +#define _LINUX_HP_SDC_H + +#include +#include +#include +#include +#if defined(__hppa__) +#include +#endif + + +/* No 4X status reads take longer than this (in usec). + */ +#define HP_SDC_MAX_REG_DELAY 20000 + +typedef void (hp_sdc_irqhook) (int irq, void *dev_id, + uint8_t status, uint8_t data); + +int hp_sdc_request_timer_irq(hp_sdc_irqhook *callback); +int hp_sdc_request_hil_irq(hp_sdc_irqhook *callback); +int hp_sdc_request_cooked_irq(hp_sdc_irqhook *callback); +int hp_sdc_release_timer_irq(hp_sdc_irqhook *callback); +int hp_sdc_release_hil_irq(hp_sdc_irqhook *callback); +int hp_sdc_release_cooked_irq(hp_sdc_irqhook *callback); + +typedef struct { + int actidx; /* Start of act. Acts are atomic WRT I/O to SDC */ + int idx; /* Index within the act */ + int endidx; /* transaction is over and done if idx == endidx */ + uint8_t *seq; /* commands/data for the transaction */ + union { + hp_sdc_irqhook *irqhook; /* Callback, isr or tasklet context */ + struct semaphore *semaphore; /* Semaphore to sleep on. */ + } act; +} hp_sdc_transaction; +int __hp_sdc_enqueue_transaction(hp_sdc_transaction *this); +int hp_sdc_enqueue_transaction(hp_sdc_transaction *this); +int hp_sdc_dequeue_transaction(hp_sdc_transaction *this); + +/* The HP_SDC_ACT* values are peculiar to this driver. + * Nuance: never HP_SDC_ACT_DATAIN | HP_SDC_ACT_DEALLOC, use another + * act to perform the dealloc. + */ +#define HP_SDC_ACT_PRECMD 0x01 /* Send a command first */ +#define HP_SDC_ACT_DATAREG 0x02 /* Set data registers */ +#define HP_SDC_ACT_DATAOUT 0x04 /* Send data bytes */ +#define HP_SDC_ACT_POSTCMD 0x08 /* Send command after */ +#define HP_SDC_ACT_DATAIN 0x10 /* Collect data after */ +#define HP_SDC_ACT_DURING 0x1f +#define HP_SDC_ACT_SEMAPHORE 0x20 /* Raise semaphore after */ +#define HP_SDC_ACT_CALLBACK 0x40 /* Pass data to IRQ handler */ +#define HP_SDC_ACT_DEALLOC 0x80 /* Destroy transaction after */ +#define HP_SDC_ACT_AFTER 0xe0 +#define HP_SDC_ACT_DEAD 0x60 /* Act timed out. */ + +/* Rest of the flags are straightforward representation of the SDC interface */ +#define HP_SDC_STATUS_IBF 0x02 /* Input buffer full */ + +#define HP_SDC_STATUS_IRQMASK 0xf0 /* Bits containing "level 1" irq */ +#define HP_SDC_STATUS_PERIODIC 0x10 /* Periodic 10ms timer */ +#define HP_SDC_STATUS_USERTIMER 0x20 /* "Special purpose" timer */ +#define HP_SDC_STATUS_TIMER 0x30 /* Both PERIODIC and USERTIMER */ +#define HP_SDC_STATUS_REG 0x40 /* Data from an i8042 register */ +#define HP_SDC_STATUS_HILCMD 0x50 /* Command from HIL MLC */ +#define HP_SDC_STATUS_HILDATA 0x60 /* Data from HIL MLC */ +#define HP_SDC_STATUS_PUP 0x70 /* Successful power-up self test */ +#define HP_SDC_STATUS_KCOOKED 0x80 /* Key from cooked kbd */ +#define HP_SDC_STATUS_KRPG 0xc0 /* Key from Repeat Gen */ +#define HP_SDC_STATUS_KMOD_SUP 0x10 /* Shift key is up */ +#define HP_SDC_STATUS_KMOD_CUP 0x20 /* Control key is up */ + +#define HP_SDC_NMISTATUS_FHS 0x40 /* NMI is a fast handshake irq */ + +/* Internal i8042 registers (there are more, but they are not too useful). */ + +#define HP_SDC_USE 0x02 /* Resource usage (including OB bit) */ +#define HP_SDC_IM 0x04 /* Interrupt mask */ +#define HP_SDC_CFG 0x11 /* Configuration register */ +#define HP_SDC_KBLANGUAGE 0x12 /* Keyboard language */ + +#define HP_SDC_D0 0x70 /* General purpose data buffer 0 */ +#define HP_SDC_D1 0x71 /* General purpose data buffer 1 */ +#define HP_SDC_D2 0x72 /* General purpose data buffer 2 */ +#define HP_SDC_D3 0x73 /* General purpose data buffer 3 */ +#define HP_SDC_VT1 0x74 /* Timer for voice 1 */ +#define HP_SDC_VT2 0x75 /* Timer for voice 2 */ +#define HP_SDC_VT3 0x76 /* Timer for voice 3 */ +#define HP_SDC_VT4 0x77 /* Timer for voice 4 */ +#define HP_SDC_KBN 0x78 /* Which HIL devs are Nimitz */ +#define HP_SDC_KBC 0x79 /* Which HIL devs are cooked kbds */ +#define HP_SDC_LPS 0x7a /* i8042's view of HIL status */ +#define HP_SDC_LPC 0x7b /* i8042's view of HIL "control" */ +#define HP_SDC_RSV 0x7c /* Reserved "for testing" */ +#define HP_SDC_LPR 0x7d /* i8042 count of HIL reconfigs */ +#define HP_SDC_XTD 0x7e /* "Extended Configuration" register */ +#define HP_SDC_STR 0x7f /* i8042 self-test result */ + +/* Bitfields for above registers */ +#define HP_SDC_USE_LOOP 0x04 /* Command is currently on the loop. */ + +#define HP_SDC_IM_MASK 0x1f /* these bits not part of cmd/status */ +#define HP_SDC_IM_FH 0x10 /* Mask the fast handshake irq */ +#define HP_SDC_IM_PT 0x08 /* Mask the periodic timer irq */ +#define HP_SDC_IM_TIMERS 0x04 /* Mask the MT/DT/CT irq */ +#define HP_SDC_IM_RESET 0x02 /* Mask the reset key irq */ +#define HP_SDC_IM_HIL 0x01 /* Mask the HIL MLC irq */ + +#define HP_SDC_CFG_ROLLOVER 0x08 /* WTF is "N-key rollover"? */ +#define HP_SDC_CFG_KBD 0x10 /* There is a keyboard */ +#define HP_SDC_CFG_NEW 0x20 /* Supports/uses HIL MLC */ +#define HP_SDC_CFG_KBD_OLD 0x03 /* keyboard code for non-HIL */ +#define HP_SDC_CFG_KBD_NEW 0x07 /* keyboard code from HIL autoconfig */ +#define HP_SDC_CFG_REV 0x40 /* Code revision bit */ +#define HP_SDC_CFG_IDPROM 0x80 /* IDPROM present in kbd (not HIL) */ + +#define HP_SDC_LPS_NDEV 0x07 /* # devices autoconfigured on HIL */ +#define HP_SDC_LPS_ACSUCC 0x08 /* loop autoconfigured successfully */ +#define HP_SDC_LPS_ACFAIL 0x80 /* last loop autoconfigure failed */ + +#define HP_SDC_LPC_APE_IPF 0x01 /* HIL MLC APE/IPF (autopoll) set */ +#define HP_SDC_LPC_ARCONERR 0x02 /* i8042 autoreconfigs loop on err */ +#define HP_SDC_LPC_ARCQUIET 0x03 /* i8042 doesn't report autoreconfigs*/ +#define HP_SDC_LPC_COOK 0x10 /* i8042 cooks devices in _KBN */ +#define HP_SDC_LPC_RC 0x80 /* causes autoreconfig */ + +#define HP_SDC_XTD_REV 0x07 /* contains revision code */ +#define HP_SDC_XTD_REV_STRINGS(val, str) \ +switch (val) { \ + case 0x1: str = "1820-3712"; break; \ + case 0x2: str = "1820-4379"; break; \ + case 0x3: str = "1820-4784"; break; \ + default: str = "unknown"; \ +}; +#define HP_SDC_XTD_BEEPER 0x08 /* TI SN76494 beeper available */ +#define HP_SDC_XTD_BBRTC 0x20 /* OKI MSM-58321 BBRTC present */ + +#define HP_SDC_CMD_LOAD_RT 0x31 /* Load real time (from 8042) */ +#define HP_SDC_CMD_LOAD_FHS 0x36 /* Load the fast handshake timer */ +#define HP_SDC_CMD_LOAD_MT 0x38 /* Load the match timer */ +#define HP_SDC_CMD_LOAD_DT 0x3B /* Load the delay timer */ +#define HP_SDC_CMD_LOAD_CT 0x3E /* Load the cycle timer */ + +#define HP_SDC_CMD_SET_IM 0x40 /* 010xxxxx == set irq mask */ + +/* The documents provided do not explicitly state that all registers betweem + * 0x01 and 0x1f inclusive can be read by sending their register index as a + * command, but this is implied and appears to be the case. + */ +#define HP_SDC_CMD_READ_RAM 0x00 /* Load from i8042 RAM (autoinc) */ +#define HP_SDC_CMD_READ_USE 0x02 /* Undocumented! Load from usage reg */ +#define HP_SDC_CMD_READ_IM 0x04 /* Load current interrupt mask */ +#define HP_SDC_CMD_READ_KCC 0x11 /* Load primary kbd config code */ +#define HP_SDC_CMD_READ_KLC 0x12 /* Load primary kbd language code */ +#define HP_SDC_CMD_READ_T1 0x13 /* Load timer output buffer byte 1 */ +#define HP_SDC_CMD_READ_T2 0x14 /* Load timer output buffer byte 1 */ +#define HP_SDC_CMD_READ_T3 0x15 /* Load timer output buffer byte 1 */ +#define HP_SDC_CMD_READ_T4 0x16 /* Load timer output buffer byte 1 */ +#define HP_SDC_CMD_READ_T5 0x17 /* Load timer output buffer byte 1 */ +#define HP_SDC_CMD_READ_D0 0xf0 /* Load from i8042 RAM location 0x70 */ +#define HP_SDC_CMD_READ_D1 0xf1 /* Load from i8042 RAM location 0x71 */ +#define HP_SDC_CMD_READ_D2 0xf2 /* Load from i8042 RAM location 0x72 */ +#define HP_SDC_CMD_READ_D3 0xf3 /* Load from i8042 RAM location 0x73 */ +#define HP_SDC_CMD_READ_VT1 0xf4 /* Load from i8042 RAM location 0x74 */ +#define HP_SDC_CMD_READ_VT2 0xf5 /* Load from i8042 RAM location 0x75 */ +#define HP_SDC_CMD_READ_VT3 0xf6 /* Load from i8042 RAM location 0x76 */ +#define HP_SDC_CMD_READ_VT4 0xf7 /* Load from i8042 RAM location 0x77 */ +#define HP_SDC_CMD_READ_KBN 0xf8 /* Load from i8042 RAM location 0x78 */ +#define HP_SDC_CMD_READ_KBC 0xf9 /* Load from i8042 RAM location 0x79 */ +#define HP_SDC_CMD_READ_LPS 0xfa /* Load from i8042 RAM location 0x7a */ +#define HP_SDC_CMD_READ_LPC 0xfb /* Load from i8042 RAM location 0x7b */ +#define HP_SDC_CMD_READ_RSV 0xfc /* Load from i8042 RAM location 0x7c */ +#define HP_SDC_CMD_READ_LPR 0xfd /* Load from i8042 RAM location 0x7d */ +#define HP_SDC_CMD_READ_XTD 0xfe /* Load from i8042 RAM location 0x7e */ +#define HP_SDC_CMD_READ_STR 0xff /* Load from i8042 RAM location 0x7f */ + +#define HP_SDC_CMD_SET_ARD 0xA0 /* Set emulated autorepeat delay */ +#define HP_SDC_CMD_SET_ARR 0xA2 /* Set emulated autorepeat rate */ +#define HP_SDC_CMD_SET_BELL 0xA3 /* Set voice 3 params for "beep" cmd */ +#define HP_SDC_CMD_SET_RPGR 0xA6 /* Set "RPG" irq rate (doesn't work) */ +#define HP_SDC_CMD_SET_RTMS 0xAD /* Set the RTC time (milliseconds) */ +#define HP_SDC_CMD_SET_RTD 0xAF /* Set the RTC time (days) */ +#define HP_SDC_CMD_SET_FHS 0xB2 /* Set fast handshake timer */ +#define HP_SDC_CMD_SET_MT 0xB4 /* Set match timer */ +#define HP_SDC_CMD_SET_DT 0xB7 /* Set delay timer */ +#define HP_SDC_CMD_SET_CT 0xBA /* Set cycle timer */ +#define HP_SDC_CMD_SET_RAMP 0xC1 /* Reset READ_RAM autoinc counter */ +#define HP_SDC_CMD_SET_D0 0xe0 /* Load to i8042 RAM location 0x70 */ +#define HP_SDC_CMD_SET_D1 0xe1 /* Load to i8042 RAM location 0x71 */ +#define HP_SDC_CMD_SET_D2 0xe2 /* Load to i8042 RAM location 0x72 */ +#define HP_SDC_CMD_SET_D3 0xe3 /* Load to i8042 RAM location 0x73 */ +#define HP_SDC_CMD_SET_VT1 0xe4 /* Load to i8042 RAM location 0x74 */ +#define HP_SDC_CMD_SET_VT2 0xe5 /* Load to i8042 RAM location 0x75 */ +#define HP_SDC_CMD_SET_VT3 0xe6 /* Load to i8042 RAM location 0x76 */ +#define HP_SDC_CMD_SET_VT4 0xe7 /* Load to i8042 RAM location 0x77 */ +#define HP_SDC_CMD_SET_KBN 0xe8 /* Load to i8042 RAM location 0x78 */ +#define HP_SDC_CMD_SET_KBC 0xe9 /* Load to i8042 RAM location 0x79 */ +#define HP_SDC_CMD_SET_LPS 0xea /* Load to i8042 RAM location 0x7a */ +#define HP_SDC_CMD_SET_LPC 0xeb /* Load to i8042 RAM location 0x7b */ +#define HP_SDC_CMD_SET_RSV 0xec /* Load to i8042 RAM location 0x7c */ +#define HP_SDC_CMD_SET_LPR 0xed /* Load to i8042 RAM location 0x7d */ +#define HP_SDC_CMD_SET_XTD 0xee /* Load to i8042 RAM location 0x7e */ +#define HP_SDC_CMD_SET_STR 0xef /* Load to i8042 RAM location 0x7f */ + +#define HP_SDC_CMD_DO_RTCW 0xc2 /* i8042 RAM 0x70 --> RTC */ +#define HP_SDC_CMD_DO_RTCR 0xc3 /* RTC[0x70 0:3] --> irq/status/data */ +#define HP_SDC_CMD_DO_BEEP 0xc4 /* i8042 RAM 0x70-74 --> beeper,VT3 */ +#define HP_SDC_CMD_DO_HIL 0xc5 /* i8042 RAM 0x70-73 --> + HIL MLC R0,R1 i8042 HIL watchdog */ + +/* Values used to (de)mangle input/output to/from the HIL MLC */ +#define HP_SDC_DATA 0x40 /* Data from an 8042 register */ +#define HP_SDC_HIL_CMD 0x50 /* Data from HIL MLC R1/8042 */ +#define HP_SDC_HIL_R1MASK 0x0f /* Contents of HIL MLC R1 0:3 */ +#define HP_SDC_HIL_AUTO 0x10 /* Set if POL results from i8042 */ +#define HP_SDC_HIL_ISERR 0x80 /* Has meaning as in next 4 values */ +#define HP_SDC_HIL_RC_DONE 0x80 /* i8042 auto-configured loop */ +#define HP_SDC_HIL_ERR 0x81 /* HIL MLC R2 had a bit set */ +#define HP_SDC_HIL_TO 0x82 /* i8042 HIL watchdog expired */ +#define HP_SDC_HIL_RC 0x84 /* i8042 is auto-configuring loop */ +#define HP_SDC_HIL_DAT 0x60 /* Data from HIL MLC R0 */ + + +typedef struct { + rwlock_t ibf_lock; + rwlock_t lock; /* user/tasklet lock */ + rwlock_t rtq_lock; /* isr/tasklet lock */ + rwlock_t hook_lock; /* isr/user lock for handler add/del */ + + unsigned int irq, nmi; /* Our IRQ lines */ + unsigned long base_io, status_io, data_io; /* Our IO ports */ + + uint8_t im; /* Interrupt mask */ + int set_im; /* Interrupt mask needs to be set. */ + + int ibf; /* Last known status of IBF flag */ + uint8_t wi; /* current i8042 write index */ + uint8_t r7[4]; /* current i8042[0x70 - 0x74] values */ + uint8_t r11, r7e; /* Values from version/revision regs */ + + hp_sdc_irqhook *timer, *reg, *hil, *pup, *cooked; + +#define HP_SDC_QUEUE_LEN 16 + hp_sdc_transaction *tq[HP_SDC_QUEUE_LEN]; /* All pending read/writes */ + + int rcurr, rqty; /* Current read transact in process */ + ktime_t rtime; /* Time when current read started */ + int wcurr; /* Current write transact in process */ + + int dev_err; /* carries status from registration */ +#if defined(__hppa__) + struct parisc_device *dev; +#elif defined(__mc68000__) + void *dev; +#else +#error No support for device registration on this arch yet. +#endif + + struct timer_list kicker; /* Keeps below task alive */ + struct tasklet_struct task; + +} hp_i8042_sdc; + +#endif /* _LINUX_HP_SDC_H */ diff --git a/include/linux/hpet.h b/include/linux/hpet.h new file mode 100644 index 000000000..8604564b9 --- /dev/null +++ b/include/linux/hpet.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __HPET__ +#define __HPET__ 1 + +#include + + +/* + * Offsets into HPET Registers + */ + +struct hpet { + u64 hpet_cap; /* capabilities */ + u64 res0; /* reserved */ + u64 hpet_config; /* configuration */ + u64 res1; /* reserved */ + u64 hpet_isr; /* interrupt status reg */ + u64 res2[25]; /* reserved */ + union { /* main counter */ + u64 _hpet_mc64; + u32 _hpet_mc32; + unsigned long _hpet_mc; + } _u0; + u64 res3; /* reserved */ + struct hpet_timer { + u64 hpet_config; /* configuration/cap */ + union { /* timer compare register */ + u64 _hpet_hc64; + u32 _hpet_hc32; + unsigned long _hpet_compare; + } _u1; + u64 hpet_fsb[2]; /* FSB route */ + } hpet_timers[1]; +}; + +#define hpet_mc _u0._hpet_mc +#define hpet_compare _u1._hpet_compare + +#define HPET_MAX_TIMERS (32) +#define HPET_MAX_IRQ (32) + +/* + * HPET general capabilities register + */ + +#define HPET_COUNTER_CLK_PERIOD_MASK (0xffffffff00000000ULL) +#define HPET_COUNTER_CLK_PERIOD_SHIFT (32UL) +#define HPET_VENDOR_ID_MASK (0x00000000ffff0000ULL) +#define HPET_VENDOR_ID_SHIFT (16ULL) +#define HPET_LEG_RT_CAP_MASK (0x8000) +#define HPET_COUNTER_SIZE_MASK (0x2000) +#define HPET_NUM_TIM_CAP_MASK (0x1f00) +#define HPET_NUM_TIM_CAP_SHIFT (8ULL) + +/* + * HPET general configuration register + */ + +#define HPET_LEG_RT_CNF_MASK (2UL) +#define HPET_ENABLE_CNF_MASK (1UL) + + +/* + * Timer configuration register + */ + +#define Tn_INT_ROUTE_CAP_MASK (0xffffffff00000000ULL) +#define Tn_INT_ROUTE_CAP_SHIFT (32UL) +#define Tn_FSB_INT_DELCAP_MASK (0x8000UL) +#define Tn_FSB_INT_DELCAP_SHIFT (15) +#define Tn_FSB_EN_CNF_MASK (0x4000UL) +#define Tn_FSB_EN_CNF_SHIFT (14) +#define Tn_INT_ROUTE_CNF_MASK (0x3e00UL) +#define Tn_INT_ROUTE_CNF_SHIFT (9) +#define Tn_32MODE_CNF_MASK (0x0100UL) +#define Tn_VAL_SET_CNF_MASK (0x0040UL) +#define Tn_SIZE_CAP_MASK (0x0020UL) +#define Tn_PER_INT_CAP_MASK (0x0010UL) +#define Tn_TYPE_CNF_MASK (0x0008UL) +#define Tn_INT_ENB_CNF_MASK (0x0004UL) +#define Tn_INT_TYPE_CNF_MASK (0x0002UL) + +/* + * Timer FSB Interrupt Route Register + */ + +#define Tn_FSB_INT_ADDR_MASK (0xffffffff00000000ULL) +#define Tn_FSB_INT_ADDR_SHIFT (32UL) +#define Tn_FSB_INT_VAL_MASK (0x00000000ffffffffULL) + +/* + * exported interfaces + */ + +struct hpet_data { + unsigned long hd_phys_address; + void __iomem *hd_address; + unsigned short hd_nirqs; + unsigned int hd_state; /* timer allocated */ + unsigned int hd_irq[HPET_MAX_TIMERS]; +}; + +static inline void hpet_reserve_timer(struct hpet_data *hd, int timer) +{ + hd->hd_state |= (1 << timer); + return; +} + +int hpet_alloc(struct hpet_data *); + +#endif /* !__HPET__ */ diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h new file mode 100644 index 000000000..542b4fa2c --- /dev/null +++ b/include/linux/hrtimer.h @@ -0,0 +1,516 @@ +/* + * include/linux/hrtimer.h + * + * hrtimers - High-resolution kernel timers + * + * Copyright(C) 2005, Thomas Gleixner + * Copyright(C) 2005, Red Hat, Inc., Ingo Molnar + * + * data type definitions, declarations, prototypes + * + * Started by: Thomas Gleixner and Ingo Molnar + * + * For licencing details see kernel-base/COPYING + */ +#ifndef _LINUX_HRTIMER_H +#define _LINUX_HRTIMER_H + +#include +#include +#include +#include +#include +#include +#include + +struct hrtimer_clock_base; +struct hrtimer_cpu_base; + +/* + * Mode arguments of xxx_hrtimer functions: + * + * HRTIMER_MODE_ABS - Time value is absolute + * HRTIMER_MODE_REL - Time value is relative to now + * HRTIMER_MODE_PINNED - Timer is bound to CPU (is only considered + * when starting the timer) + * HRTIMER_MODE_SOFT - Timer callback function will be executed in + * soft irq context + */ +enum hrtimer_mode { + HRTIMER_MODE_ABS = 0x00, + HRTIMER_MODE_REL = 0x01, + HRTIMER_MODE_PINNED = 0x02, + HRTIMER_MODE_SOFT = 0x04, + + HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED, + HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED, + + HRTIMER_MODE_ABS_SOFT = HRTIMER_MODE_ABS | HRTIMER_MODE_SOFT, + HRTIMER_MODE_REL_SOFT = HRTIMER_MODE_REL | HRTIMER_MODE_SOFT, + + HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT, + HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT, + +}; + +/* + * Return values for the callback function + */ +enum hrtimer_restart { + HRTIMER_NORESTART, /* Timer is not restarted */ + HRTIMER_RESTART, /* Timer must be restarted */ +}; + +/* + * Values to track state of the timer + * + * Possible states: + * + * 0x00 inactive + * 0x01 enqueued into rbtree + * + * The callback state is not part of the timer->state because clearing it would + * mean touching the timer after the callback, this makes it impossible to free + * the timer from the callback function. + * + * Therefore we track the callback state in: + * + * timer->base->cpu_base->running == timer + * + * On SMP it is possible to have a "callback function running and enqueued" + * status. It happens for example when a posix timer expired and the callback + * queued a signal. Between dropping the lock which protects the posix timer + * and reacquiring the base lock of the hrtimer, another CPU can deliver the + * signal and rearm the timer. + * + * All state transitions are protected by cpu_base->lock. + */ +#define HRTIMER_STATE_INACTIVE 0x00 +#define HRTIMER_STATE_ENQUEUED 0x01 + +/** + * struct hrtimer - the basic hrtimer structure + * @node: timerqueue node, which also manages node.expires, + * the absolute expiry time in the hrtimers internal + * representation. The time is related to the clock on + * which the timer is based. Is setup by adding + * slack to the _softexpires value. For non range timers + * identical to _softexpires. + * @_softexpires: the absolute earliest expiry time of the hrtimer. + * The time which was given as expiry time when the timer + * was armed. + * @function: timer expiry callback function + * @base: pointer to the timer base (per cpu and per clock) + * @state: state information (See bit values above) + * @is_rel: Set if the timer was armed relative + * @is_soft: Set if hrtimer will be expired in soft interrupt context. + * + * The hrtimer structure must be initialized by hrtimer_init() + */ +struct hrtimer { + struct timerqueue_node node; + ktime_t _softexpires; + enum hrtimer_restart (*function)(struct hrtimer *); + struct hrtimer_clock_base *base; + u8 state; + u8 is_rel; + u8 is_soft; +}; + +/** + * struct hrtimer_sleeper - simple sleeper structure + * @timer: embedded timer structure + * @task: task to wake up + * + * task is set to NULL, when the timer expires. + */ +struct hrtimer_sleeper { + struct hrtimer timer; + struct task_struct *task; +}; + +#ifdef CONFIG_64BIT +# define __hrtimer_clock_base_align ____cacheline_aligned +#else +# define __hrtimer_clock_base_align +#endif + +/** + * struct hrtimer_clock_base - the timer base for a specific clock + * @cpu_base: per cpu clock base + * @index: clock type index for per_cpu support when moving a + * timer to a base on another cpu. + * @clockid: clock id for per_cpu support + * @seq: seqcount around __run_hrtimer + * @running: pointer to the currently running hrtimer + * @active: red black tree root node for the active timers + * @get_time: function to retrieve the current time of the clock + * @offset: offset of this clock to the monotonic base + */ +struct hrtimer_clock_base { + struct hrtimer_cpu_base *cpu_base; + unsigned int index; + clockid_t clockid; + seqcount_t seq; + struct hrtimer *running; + struct timerqueue_head active; + ktime_t (*get_time)(void); + ktime_t offset; +} __hrtimer_clock_base_align; + +enum hrtimer_base_type { + HRTIMER_BASE_MONOTONIC, + HRTIMER_BASE_REALTIME, + HRTIMER_BASE_BOOTTIME, + HRTIMER_BASE_TAI, + HRTIMER_BASE_MONOTONIC_SOFT, + HRTIMER_BASE_REALTIME_SOFT, + HRTIMER_BASE_BOOTTIME_SOFT, + HRTIMER_BASE_TAI_SOFT, + HRTIMER_MAX_CLOCK_BASES, +}; + +/** + * struct hrtimer_cpu_base - the per cpu clock bases + * @lock: lock protecting the base and associated clock bases + * and timers + * @cpu: cpu number + * @active_bases: Bitfield to mark bases with active timers + * @clock_was_set_seq: Sequence counter of clock was set events + * @hres_active: State of high resolution mode + * @in_hrtirq: hrtimer_interrupt() is currently executing + * @hang_detected: The last hrtimer interrupt detected a hang + * @softirq_activated: displays, if the softirq is raised - update of softirq + * related settings is not required then. + * @nr_events: Total number of hrtimer interrupt events + * @nr_retries: Total number of hrtimer interrupt retries + * @nr_hangs: Total number of hrtimer interrupt hangs + * @max_hang_time: Maximum time spent in hrtimer_interrupt + * @expires_next: absolute time of the next event, is required for remote + * hrtimer enqueue; it is the total first expiry time (hard + * and soft hrtimer are taken into account) + * @next_timer: Pointer to the first expiring timer + * @softirq_expires_next: Time to check, if soft queues needs also to be expired + * @softirq_next_timer: Pointer to the first expiring softirq based timer + * @clock_base: array of clock bases for this cpu + * + * Note: next_timer is just an optimization for __remove_hrtimer(). + * Do not dereference the pointer because it is not reliable on + * cross cpu removals. + */ +struct hrtimer_cpu_base { + raw_spinlock_t lock; + unsigned int cpu; + unsigned int active_bases; + unsigned int clock_was_set_seq; + unsigned int hres_active : 1, + in_hrtirq : 1, + hang_detected : 1, + softirq_activated : 1; +#ifdef CONFIG_HIGH_RES_TIMERS + unsigned int nr_events; + unsigned short nr_retries; + unsigned short nr_hangs; + unsigned int max_hang_time; +#endif + ktime_t expires_next; + struct hrtimer *next_timer; + ktime_t softirq_expires_next; + struct hrtimer *softirq_next_timer; + struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; +} ____cacheline_aligned; + +static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) +{ + timer->node.expires = time; + timer->_softexpires = time; +} + +static inline void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta) +{ + timer->_softexpires = time; + timer->node.expires = ktime_add_safe(time, delta); +} + +static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, u64 delta) +{ + timer->_softexpires = time; + timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta)); +} + +static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64) +{ + timer->node.expires = tv64; + timer->_softexpires = tv64; +} + +static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time) +{ + timer->node.expires = ktime_add_safe(timer->node.expires, time); + timer->_softexpires = ktime_add_safe(timer->_softexpires, time); +} + +static inline void hrtimer_add_expires_ns(struct hrtimer *timer, u64 ns) +{ + timer->node.expires = ktime_add_ns(timer->node.expires, ns); + timer->_softexpires = ktime_add_ns(timer->_softexpires, ns); +} + +static inline ktime_t hrtimer_get_expires(const struct hrtimer *timer) +{ + return timer->node.expires; +} + +static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer) +{ + return timer->_softexpires; +} + +static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer) +{ + return timer->node.expires; +} +static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer) +{ + return timer->_softexpires; +} + +static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer) +{ + return ktime_to_ns(timer->node.expires); +} + +static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer) +{ + return ktime_sub(timer->node.expires, timer->base->get_time()); +} + +static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer) +{ + return timer->base->get_time(); +} + +static inline int hrtimer_is_hres_active(struct hrtimer *timer) +{ + return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ? + timer->base->cpu_base->hres_active : 0; +} + +#ifdef CONFIG_HIGH_RES_TIMERS +struct clock_event_device; + +extern void hrtimer_interrupt(struct clock_event_device *dev); + +/* + * The resolution of the clocks. The resolution value is returned in + * the clock_getres() system call to give application programmers an + * idea of the (in)accuracy of timers. Timer values are rounded up to + * this resolution values. + */ +# define HIGH_RES_NSEC 1 +# define KTIME_HIGH_RES (HIGH_RES_NSEC) +# define MONOTONIC_RES_NSEC HIGH_RES_NSEC +# define KTIME_MONOTONIC_RES KTIME_HIGH_RES + +extern void clock_was_set_delayed(void); + +extern unsigned int hrtimer_resolution; + +#else + +# define MONOTONIC_RES_NSEC LOW_RES_NSEC +# define KTIME_MONOTONIC_RES KTIME_LOW_RES + +#define hrtimer_resolution (unsigned int)LOW_RES_NSEC + +static inline void clock_was_set_delayed(void) { } + +#endif + +static inline ktime_t +__hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now) +{ + ktime_t rem = ktime_sub(timer->node.expires, now); + + /* + * Adjust relative timers for the extra we added in + * hrtimer_start_range_ns() to prevent short timeouts. + */ + if (IS_ENABLED(CONFIG_TIME_LOW_RES) && timer->is_rel) + rem -= hrtimer_resolution; + return rem; +} + +static inline ktime_t +hrtimer_expires_remaining_adjusted(const struct hrtimer *timer) +{ + return __hrtimer_expires_remaining_adjusted(timer, + timer->base->get_time()); +} + +extern void clock_was_set(void); +#ifdef CONFIG_TIMERFD +extern void timerfd_clock_was_set(void); +#else +static inline void timerfd_clock_was_set(void) { } +#endif +extern void hrtimers_resume(void); + +DECLARE_PER_CPU(struct tick_device, tick_cpu_device); + + +/* Exported timer functions: */ + +/* Initialize timers: */ +extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock, + enum hrtimer_mode mode); + +#ifdef CONFIG_DEBUG_OBJECTS_TIMERS +extern void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t which_clock, + enum hrtimer_mode mode); + +extern void destroy_hrtimer_on_stack(struct hrtimer *timer); +#else +static inline void hrtimer_init_on_stack(struct hrtimer *timer, + clockid_t which_clock, + enum hrtimer_mode mode) +{ + hrtimer_init(timer, which_clock, mode); +} +static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { } +#endif + +/* Basic timer operations: */ +extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, + u64 range_ns, const enum hrtimer_mode mode); + +/** + * hrtimer_start - (re)start an hrtimer + * @timer: the timer to be added + * @tim: expiry time + * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or + * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED); + * softirq based mode is considered for debug purpose only! + */ +static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim, + const enum hrtimer_mode mode) +{ + hrtimer_start_range_ns(timer, tim, 0, mode); +} + +extern int hrtimer_cancel(struct hrtimer *timer); +extern int hrtimer_try_to_cancel(struct hrtimer *timer); + +static inline void hrtimer_start_expires(struct hrtimer *timer, + enum hrtimer_mode mode) +{ + u64 delta; + ktime_t soft, hard; + soft = hrtimer_get_softexpires(timer); + hard = hrtimer_get_expires(timer); + delta = ktime_to_ns(ktime_sub(hard, soft)); + hrtimer_start_range_ns(timer, soft, delta, mode); +} + +static inline void hrtimer_restart(struct hrtimer *timer) +{ + hrtimer_start_expires(timer, HRTIMER_MODE_ABS); +} + +/* Query timers: */ +extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust); + +static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer) +{ + return __hrtimer_get_remaining(timer, false); +} + +extern u64 hrtimer_get_next_event(void); +extern u64 hrtimer_next_event_without(const struct hrtimer *exclude); + +extern bool hrtimer_active(const struct hrtimer *timer); + +/** + * hrtimer_is_queued = check, whether the timer is on one of the queues + * @timer: Timer to check + * + * Returns: True if the timer is queued, false otherwise + * + * The function can be used lockless, but it gives only a current snapshot. + */ +static inline bool hrtimer_is_queued(struct hrtimer *timer) +{ + /* The READ_ONCE pairs with the update functions of timer->state */ + return !!(READ_ONCE(timer->state) & HRTIMER_STATE_ENQUEUED); +} + +/* + * Helper function to check, whether the timer is running the callback + * function + */ +static inline int hrtimer_callback_running(struct hrtimer *timer) +{ + return timer->base->running == timer; +} + +/* Forward a hrtimer so it expires after now: */ +extern u64 +hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval); + +/** + * hrtimer_forward_now - forward the timer expiry so it expires after now + * @timer: hrtimer to forward + * @interval: the interval to forward + * + * Forward the timer expiry so it will expire after the current time + * of the hrtimer clock base. Returns the number of overruns. + * + * Can be safely called from the callback function of @timer. If + * called from other contexts @timer must neither be enqueued nor + * running the callback and the caller needs to take care of + * serialization. + * + * Note: This only updates the timer expiry value and does not requeue + * the timer. + */ +static inline u64 hrtimer_forward_now(struct hrtimer *timer, + ktime_t interval) +{ + return hrtimer_forward(timer, timer->base->get_time(), interval); +} + +/* Precise sleep: */ + +extern int nanosleep_copyout(struct restart_block *, struct timespec64 *); +extern long hrtimer_nanosleep(const struct timespec64 *rqtp, + const enum hrtimer_mode mode, + const clockid_t clockid); + +extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, + struct task_struct *tsk); + +extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta, + const enum hrtimer_mode mode); +extern int schedule_hrtimeout_range_clock(ktime_t *expires, + u64 delta, + const enum hrtimer_mode mode, + clockid_t clock_id); +extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode); + +/* Soft interrupt function to run the hrtimer queues: */ +extern void hrtimer_run_queues(void); + +/* Bootup initialization: */ +extern void __init hrtimers_init(void); + +/* Show pending timers: */ +extern void sysrq_timer_list_show(void); + +int hrtimers_prepare_cpu(unsigned int cpu); +#ifdef CONFIG_HOTPLUG_CPU +int hrtimers_dead_cpu(unsigned int cpu); +#else +#define hrtimers_dead_cpu NULL +#endif + +#endif diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h new file mode 100644 index 000000000..57402544b --- /dev/null +++ b/include/linux/hsi/hsi.h @@ -0,0 +1,441 @@ +/* + * HSI core header file. + * + * Copyright (C) 2010 Nokia Corporation. All rights reserved. + * + * Contact: Carlos Chinea + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef __LINUX_HSI_H__ +#define __LINUX_HSI_H__ + +#include +#include +#include +#include +#include +#include + +/* HSI message ttype */ +#define HSI_MSG_READ 0 +#define HSI_MSG_WRITE 1 + +/* HSI configuration values */ +enum { + HSI_MODE_STREAM = 1, + HSI_MODE_FRAME, +}; + +enum { + HSI_FLOW_SYNC, /* Synchronized flow */ + HSI_FLOW_PIPE, /* Pipelined flow */ +}; + +enum { + HSI_ARB_RR, /* Round-robin arbitration */ + HSI_ARB_PRIO, /* Channel priority arbitration */ +}; + +#define HSI_MAX_CHANNELS 16 + +/* HSI message status codes */ +enum { + HSI_STATUS_COMPLETED, /* Message transfer is completed */ + HSI_STATUS_PENDING, /* Message pending to be read/write (POLL) */ + HSI_STATUS_PROCEEDING, /* Message transfer is ongoing */ + HSI_STATUS_QUEUED, /* Message waiting to be served */ + HSI_STATUS_ERROR, /* Error when message transfer was ongoing */ +}; + +/* HSI port event codes */ +enum { + HSI_EVENT_START_RX, + HSI_EVENT_STOP_RX, +}; + +/** + * struct hsi_channel - channel resource used by the hsi clients + * @id: Channel number + * @name: Channel name + */ +struct hsi_channel { + unsigned int id; + const char *name; +}; + +/** + * struct hsi_config - Configuration for RX/TX HSI modules + * @mode: Bit transmission mode (STREAM or FRAME) + * @channels: Channel resources used by the client + * @num_channels: Number of channel resources + * @num_hw_channels: Number of channels the transceiver is configured for [1..16] + * @speed: Max bit transmission speed (Kbit/s) + * @flow: RX flow type (SYNCHRONIZED or PIPELINE) + * @arb_mode: Arbitration mode for TX frame (Round robin, priority) + */ +struct hsi_config { + unsigned int mode; + struct hsi_channel *channels; + unsigned int num_channels; + unsigned int num_hw_channels; + unsigned int speed; + union { + unsigned int flow; /* RX only */ + unsigned int arb_mode; /* TX only */ + }; +}; + +/** + * struct hsi_board_info - HSI client board info + * @name: Name for the HSI device + * @hsi_id: HSI controller id where the client sits + * @port: Port number in the controller where the client sits + * @tx_cfg: HSI TX configuration + * @rx_cfg: HSI RX configuration + * @platform_data: Platform related data + * @archdata: Architecture-dependent device data + */ +struct hsi_board_info { + const char *name; + unsigned int hsi_id; + unsigned int port; + struct hsi_config tx_cfg; + struct hsi_config rx_cfg; + void *platform_data; + struct dev_archdata *archdata; +}; + +#ifdef CONFIG_HSI_BOARDINFO +extern int hsi_register_board_info(struct hsi_board_info const *info, + unsigned int len); +#else +static inline int hsi_register_board_info(struct hsi_board_info const *info, + unsigned int len) +{ + return 0; +} +#endif /* CONFIG_HSI_BOARDINFO */ + +/** + * struct hsi_client - HSI client attached to an HSI port + * @device: Driver model representation of the device + * @tx_cfg: HSI TX configuration + * @rx_cfg: HSI RX configuration + */ +struct hsi_client { + struct device device; + struct hsi_config tx_cfg; + struct hsi_config rx_cfg; + /* private: */ + void (*ehandler)(struct hsi_client *, unsigned long); + unsigned int pclaimed:1; + struct notifier_block nb; +}; + +#define to_hsi_client(dev) container_of(dev, struct hsi_client, device) + +static inline void hsi_client_set_drvdata(struct hsi_client *cl, void *data) +{ + dev_set_drvdata(&cl->device, data); +} + +static inline void *hsi_client_drvdata(struct hsi_client *cl) +{ + return dev_get_drvdata(&cl->device); +} + +int hsi_register_port_event(struct hsi_client *cl, + void (*handler)(struct hsi_client *, unsigned long)); +int hsi_unregister_port_event(struct hsi_client *cl); + +/** + * struct hsi_client_driver - Driver associated to an HSI client + * @driver: Driver model representation of the driver + */ +struct hsi_client_driver { + struct device_driver driver; +}; + +#define to_hsi_client_driver(drv) container_of(drv, struct hsi_client_driver,\ + driver) + +int hsi_register_client_driver(struct hsi_client_driver *drv); + +static inline void hsi_unregister_client_driver(struct hsi_client_driver *drv) +{ + driver_unregister(&drv->driver); +} + +/** + * struct hsi_msg - HSI message descriptor + * @link: Free to use by the current descriptor owner + * @cl: HSI device client that issues the transfer + * @sgt: Head of the scatterlist array + * @context: Client context data associated to the transfer + * @complete: Transfer completion callback + * @destructor: Destructor to free resources when flushing + * @status: Status of the transfer when completed + * @actual_len: Actual length of data transferred on completion + * @channel: Channel were to TX/RX the message + * @ttype: Transfer type (TX if set, RX otherwise) + * @break_frame: if true HSI will send/receive a break frame. Data buffers are + * ignored in the request. + */ +struct hsi_msg { + struct list_head link; + struct hsi_client *cl; + struct sg_table sgt; + void *context; + + void (*complete)(struct hsi_msg *msg); + void (*destructor)(struct hsi_msg *msg); + + int status; + unsigned int actual_len; + unsigned int channel; + unsigned int ttype:1; + unsigned int break_frame:1; +}; + +struct hsi_msg *hsi_alloc_msg(unsigned int n_frag, gfp_t flags); +void hsi_free_msg(struct hsi_msg *msg); + +/** + * struct hsi_port - HSI port device + * @device: Driver model representation of the device + * @tx_cfg: Current TX path configuration + * @rx_cfg: Current RX path configuration + * @num: Port number + * @shared: Set when port can be shared by different clients + * @claimed: Reference count of clients which claimed the port + * @lock: Serialize port claim + * @async: Asynchronous transfer callback + * @setup: Callback to set the HSI client configuration + * @flush: Callback to clean the HW state and destroy all pending transfers + * @start_tx: Callback to inform that a client wants to TX data + * @stop_tx: Callback to inform that a client no longer wishes to TX data + * @release: Callback to inform that a client no longer uses the port + * @n_head: Notifier chain for signaling port events to the clients. + */ +struct hsi_port { + struct device device; + struct hsi_config tx_cfg; + struct hsi_config rx_cfg; + unsigned int num; + unsigned int shared:1; + int claimed; + struct mutex lock; + int (*async)(struct hsi_msg *msg); + int (*setup)(struct hsi_client *cl); + int (*flush)(struct hsi_client *cl); + int (*start_tx)(struct hsi_client *cl); + int (*stop_tx)(struct hsi_client *cl); + int (*release)(struct hsi_client *cl); + /* private */ + struct blocking_notifier_head n_head; +}; + +#define to_hsi_port(dev) container_of(dev, struct hsi_port, device) +#define hsi_get_port(cl) to_hsi_port((cl)->device.parent) + +int hsi_event(struct hsi_port *port, unsigned long event); +int hsi_claim_port(struct hsi_client *cl, unsigned int share); +void hsi_release_port(struct hsi_client *cl); + +static inline int hsi_port_claimed(struct hsi_client *cl) +{ + return cl->pclaimed; +} + +static inline void hsi_port_set_drvdata(struct hsi_port *port, void *data) +{ + dev_set_drvdata(&port->device, data); +} + +static inline void *hsi_port_drvdata(struct hsi_port *port) +{ + return dev_get_drvdata(&port->device); +} + +/** + * struct hsi_controller - HSI controller device + * @device: Driver model representation of the device + * @owner: Pointer to the module owning the controller + * @id: HSI controller ID + * @num_ports: Number of ports in the HSI controller + * @port: Array of HSI ports + */ +struct hsi_controller { + struct device device; + struct module *owner; + unsigned int id; + unsigned int num_ports; + struct hsi_port **port; +}; + +#define to_hsi_controller(dev) container_of(dev, struct hsi_controller, device) + +struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags); +void hsi_put_controller(struct hsi_controller *hsi); +int hsi_register_controller(struct hsi_controller *hsi); +void hsi_unregister_controller(struct hsi_controller *hsi); +struct hsi_client *hsi_new_client(struct hsi_port *port, + struct hsi_board_info *info); +int hsi_remove_client(struct device *dev, void *data); +void hsi_port_unregister_clients(struct hsi_port *port); + +#ifdef CONFIG_OF +void hsi_add_clients_from_dt(struct hsi_port *port, + struct device_node *clients); +#else +static inline void hsi_add_clients_from_dt(struct hsi_port *port, + struct device_node *clients) +{ + return; +} +#endif + +static inline void hsi_controller_set_drvdata(struct hsi_controller *hsi, + void *data) +{ + dev_set_drvdata(&hsi->device, data); +} + +static inline void *hsi_controller_drvdata(struct hsi_controller *hsi) +{ + return dev_get_drvdata(&hsi->device); +} + +static inline struct hsi_port *hsi_find_port_num(struct hsi_controller *hsi, + unsigned int num) +{ + return (num < hsi->num_ports) ? hsi->port[num] : NULL; +} + +/* + * API for HSI clients + */ +int hsi_async(struct hsi_client *cl, struct hsi_msg *msg); + +int hsi_get_channel_id_by_name(struct hsi_client *cl, char *name); + +/** + * hsi_id - Get HSI controller ID associated to a client + * @cl: Pointer to a HSI client + * + * Return the controller id where the client is attached to + */ +static inline unsigned int hsi_id(struct hsi_client *cl) +{ + return to_hsi_controller(cl->device.parent->parent)->id; +} + +/** + * hsi_port_id - Gets the port number a client is attached to + * @cl: Pointer to HSI client + * + * Return the port number associated to the client + */ +static inline unsigned int hsi_port_id(struct hsi_client *cl) +{ + return to_hsi_port(cl->device.parent)->num; +} + +/** + * hsi_setup - Configure the client's port + * @cl: Pointer to the HSI client + * + * When sharing ports, clients should either relay on a single + * client setup or have the same setup for all of them. + * + * Return -errno on failure, 0 on success + */ +static inline int hsi_setup(struct hsi_client *cl) +{ + if (!hsi_port_claimed(cl)) + return -EACCES; + return hsi_get_port(cl)->setup(cl); +} + +/** + * hsi_flush - Flush all pending transactions on the client's port + * @cl: Pointer to the HSI client + * + * This function will destroy all pending hsi_msg in the port and reset + * the HW port so it is ready to receive and transmit from a clean state. + * + * Return -errno on failure, 0 on success + */ +static inline int hsi_flush(struct hsi_client *cl) +{ + if (!hsi_port_claimed(cl)) + return -EACCES; + return hsi_get_port(cl)->flush(cl); +} + +/** + * hsi_async_read - Submit a read transfer + * @cl: Pointer to the HSI client + * @msg: HSI message descriptor of the transfer + * + * Return -errno on failure, 0 on success + */ +static inline int hsi_async_read(struct hsi_client *cl, struct hsi_msg *msg) +{ + msg->ttype = HSI_MSG_READ; + return hsi_async(cl, msg); +} + +/** + * hsi_async_write - Submit a write transfer + * @cl: Pointer to the HSI client + * @msg: HSI message descriptor of the transfer + * + * Return -errno on failure, 0 on success + */ +static inline int hsi_async_write(struct hsi_client *cl, struct hsi_msg *msg) +{ + msg->ttype = HSI_MSG_WRITE; + return hsi_async(cl, msg); +} + +/** + * hsi_start_tx - Signal the port that the client wants to start a TX + * @cl: Pointer to the HSI client + * + * Return -errno on failure, 0 on success + */ +static inline int hsi_start_tx(struct hsi_client *cl) +{ + if (!hsi_port_claimed(cl)) + return -EACCES; + return hsi_get_port(cl)->start_tx(cl); +} + +/** + * hsi_stop_tx - Signal the port that the client no longer wants to transmit + * @cl: Pointer to the HSI client + * + * Return -errno on failure, 0 on success + */ +static inline int hsi_stop_tx(struct hsi_client *cl) +{ + if (!hsi_port_claimed(cl)) + return -EACCES; + return hsi_get_port(cl)->stop_tx(cl); +} +#endif /* __LINUX_HSI_H__ */ diff --git a/include/linux/hsi/ssi_protocol.h b/include/linux/hsi/ssi_protocol.h new file mode 100644 index 000000000..1433651be --- /dev/null +++ b/include/linux/hsi/ssi_protocol.h @@ -0,0 +1,42 @@ +/* + * ssip_slave.h + * + * SSIP slave support header file + * + * Copyright (C) 2010 Nokia Corporation. All rights reserved. + * + * Contact: Carlos Chinea + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef __LINUX_SSIP_SLAVE_H__ +#define __LINUX_SSIP_SLAVE_H__ + +#include + +static inline void ssip_slave_put_master(struct hsi_client *master) +{ +} + +struct hsi_client *ssip_slave_get_master(struct hsi_client *slave); +int ssip_slave_start_tx(struct hsi_client *master); +int ssip_slave_stop_tx(struct hsi_client *master); +void ssip_reset_event(struct hsi_client *master); + +int ssip_slave_running(struct hsi_client *master); + +#endif /* __LINUX_SSIP_SLAVE_H__ */ + diff --git a/include/linux/htcpld.h b/include/linux/htcpld.h new file mode 100644 index 000000000..842fce69a --- /dev/null +++ b/include/linux/htcpld.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_HTCPLD_H +#define __LINUX_HTCPLD_H + +struct htcpld_chip_platform_data { + unsigned int addr; + unsigned int reset; + unsigned int num_gpios; + unsigned int gpio_out_base; + unsigned int gpio_in_base; + unsigned int irq_base; + unsigned int num_irqs; +}; + +struct htcpld_core_platform_data { + unsigned int int_reset_gpio_hi; + unsigned int int_reset_gpio_lo; + unsigned int i2c_adapter_id; + + struct htcpld_chip_platform_data *chip; + unsigned int num_chip; +}; + +#endif /* __LINUX_HTCPLD_H */ + diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h new file mode 100644 index 000000000..becf9b1ea --- /dev/null +++ b/include/linux/huge_mm.h @@ -0,0 +1,379 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_HUGE_MM_H +#define _LINUX_HUGE_MM_H + +#include +#include + +#include /* only for vma_is_dax() */ + +extern vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf); +extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, + pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, + struct vm_area_struct *vma); +extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd); +extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, + pud_t *dst_pud, pud_t *src_pud, unsigned long addr, + struct vm_area_struct *vma); + +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD +extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud); +#else +static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) +{ +} +#endif + +extern vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd); +extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, + unsigned long addr, + pmd_t *pmd, + unsigned int flags); +extern bool madvise_free_huge_pmd(struct mmu_gather *tlb, + struct vm_area_struct *vma, + pmd_t *pmd, unsigned long addr, unsigned long next); +extern int zap_huge_pmd(struct mmu_gather *tlb, + struct vm_area_struct *vma, + pmd_t *pmd, unsigned long addr); +extern int zap_huge_pud(struct mmu_gather *tlb, + struct vm_area_struct *vma, + pud_t *pud, unsigned long addr); +extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long addr, unsigned long end, + unsigned char *vec); +extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, + unsigned long new_addr, unsigned long old_end, + pmd_t *old_pmd, pmd_t *new_pmd); +extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long addr, pgprot_t newprot, + int prot_numa); +vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write); +vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write); +enum transparent_hugepage_flag { + TRANSPARENT_HUGEPAGE_FLAG, + TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, + TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, + TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, + TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, + TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, + TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, + TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, +#ifdef CONFIG_DEBUG_VM + TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG, +#endif +}; + +struct kobject; +struct kobj_attribute; + +extern ssize_t single_hugepage_flag_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count, + enum transparent_hugepage_flag flag); +extern ssize_t single_hugepage_flag_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf, + enum transparent_hugepage_flag flag); +extern struct kobj_attribute shmem_enabled_attr; + +#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) +#define HPAGE_PMD_NR (1<vm_flags & VM_NOHUGEPAGE) + return false; + + if (is_vma_temporary_stack(vma)) + return false; + + if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) + return false; + + if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG)) + return true; + + if (vma_is_dax(vma)) + return true; + + if (transparent_hugepage_flags & + (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)) + return !!(vma->vm_flags & VM_HUGEPAGE); + + return false; +} + +bool transparent_hugepage_enabled(struct vm_area_struct *vma); + +#define transparent_hugepage_use_zero_page() \ + (transparent_hugepage_flags & \ + (1<vm_mm->mmap_sem), vma); + if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) + return __pmd_trans_huge_lock(pmd, vma); + else + return NULL; +} +static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, + struct vm_area_struct *vma) +{ + VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); + if (pud_trans_huge(*pud) || pud_devmap(*pud)) + return __pud_trans_huge_lock(pud, vma); + else + return NULL; +} +static inline int hpage_nr_pages(struct page *page) +{ + if (unlikely(PageTransHuge(page))) + return HPAGE_PMD_NR; + return 1; +} + +struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmd, int flags); +struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, + pud_t *pud, int flags); + +extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd); + +extern struct page *huge_zero_page; +extern unsigned long huge_zero_pfn; + +static inline bool is_huge_zero_page(struct page *page) +{ + return READ_ONCE(huge_zero_page) == page; +} + +static inline bool is_huge_zero_pmd(pmd_t pmd) +{ + return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd); +} + +static inline bool is_huge_zero_pud(pud_t pud) +{ + return false; +} + +struct page *mm_get_huge_zero_page(struct mm_struct *mm); +void mm_put_huge_zero_page(struct mm_struct *mm); + +#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot)) + +static inline bool thp_migration_supported(void) +{ + return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION); +} + +#else /* CONFIG_TRANSPARENT_HUGEPAGE */ +#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) +#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) +#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; }) + +#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; }) +#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; }) +#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; }) + +#define hpage_nr_pages(x) 1 + +static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) +{ + return false; +} + +static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma) +{ + return false; +} + +static inline void prep_transhuge_page(struct page *page) {} + +#define transparent_hugepage_flags 0UL + +#define thp_get_unmapped_area NULL + +static inline bool +can_split_huge_page(struct page *page, int *pextra_pins) +{ + BUILD_BUG(); + return false; +} +static inline int +split_huge_page_to_list(struct page *page, struct list_head *list) +{ + return 0; +} +static inline int split_huge_page(struct page *page) +{ + return 0; +} +static inline void deferred_split_huge_page(struct page *page) {} +#define split_huge_pmd(__vma, __pmd, __address) \ + do { } while (0) + +static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long address, bool freeze, struct page *page) {} +static inline void split_huge_pmd_address(struct vm_area_struct *vma, + unsigned long address, bool freeze, struct page *page) {} + +#define split_huge_pud(__vma, __pmd, __address) \ + do { } while (0) + +static inline int hugepage_madvise(struct vm_area_struct *vma, + unsigned long *vm_flags, int advice) +{ + BUG(); + return 0; +} +static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, + unsigned long start, + unsigned long end, + long adjust_next) +{ +} +static inline int is_swap_pmd(pmd_t pmd) +{ + return 0; +} +static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, + struct vm_area_struct *vma) +{ + return NULL; +} +static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, + struct vm_area_struct *vma) +{ + return NULL; +} + +static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, + pmd_t orig_pmd) +{ + return 0; +} + +static inline bool is_huge_zero_page(struct page *page) +{ + return false; +} + +static inline bool is_huge_zero_pmd(pmd_t pmd) +{ + return false; +} + +static inline bool is_huge_zero_pud(pud_t pud) +{ + return false; +} + +static inline void mm_put_huge_zero_page(struct mm_struct *mm) +{ + return; +} + +static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmd, int flags) +{ + return NULL; +} + +static inline struct page *follow_devmap_pud(struct vm_area_struct *vma, + unsigned long addr, pud_t *pud, int flags) +{ + return NULL; +} + +static inline bool thp_migration_supported(void) +{ + return false; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#endif /* _LINUX_HUGE_MM_H */ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h new file mode 100644 index 000000000..cb7dc38e9 --- /dev/null +++ b/include/linux/hugetlb.h @@ -0,0 +1,629 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_HUGETLB_H +#define _LINUX_HUGETLB_H + +#include +#include +#include +#include +#include +#include +#include +#include + +struct ctl_table; +struct user_struct; +struct mmu_gather; + +#ifndef is_hugepd +/* + * Some architectures requires a hugepage directory format that is + * required to support multiple hugepage sizes. For example + * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables" + * introduced the same on powerpc. This allows for a more flexible hugepage + * pagetable layout. + */ +typedef struct { unsigned long pd; } hugepd_t; +#define is_hugepd(hugepd) (0) +#define __hugepd(x) ((hugepd_t) { (x) }) +static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr, + unsigned pdshift, unsigned long end, + int write, struct page **pages, int *nr) +{ + return 0; +} +#else +extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr, + unsigned pdshift, unsigned long end, + int write, struct page **pages, int *nr); +#endif + + +#ifdef CONFIG_HUGETLB_PAGE + +#include +#include +#include + +struct hugepage_subpool { + spinlock_t lock; + long count; + long max_hpages; /* Maximum huge pages or -1 if no maximum. */ + long used_hpages; /* Used count against maximum, includes */ + /* both alloced and reserved pages. */ + struct hstate *hstate; + long min_hpages; /* Minimum huge pages or -1 if no minimum. */ + long rsv_hpages; /* Pages reserved against global pool to */ + /* sasitfy minimum size. */ +}; + +struct resv_map { + struct kref refs; + spinlock_t lock; + struct list_head regions; + long adds_in_progress; + struct list_head region_cache; + long region_cache_count; +}; +extern struct resv_map *resv_map_alloc(void); +void resv_map_release(struct kref *ref); + +extern spinlock_t hugetlb_lock; +extern int hugetlb_max_hstate __read_mostly; +#define for_each_hstate(h) \ + for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++) + +struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, + long min_hpages); +void hugepage_put_subpool(struct hugepage_subpool *spool); + +void reset_vma_resv_huge_pages(struct vm_area_struct *vma); +int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); +int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); +int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); + +#ifdef CONFIG_NUMA +int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +#endif + +int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); +long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, + struct page **, struct vm_area_struct **, + unsigned long *, unsigned long *, long, unsigned int, + int *); +void unmap_hugepage_range(struct vm_area_struct *, + unsigned long, unsigned long, struct page *); +void __unmap_hugepage_range_final(struct mmu_gather *tlb, + struct vm_area_struct *vma, + unsigned long start, unsigned long end, + struct page *ref_page); +void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, + unsigned long start, unsigned long end, + struct page *ref_page); +void hugetlb_report_meminfo(struct seq_file *); +int hugetlb_report_node_meminfo(int, char *); +void hugetlb_show_meminfo(void); +unsigned long hugetlb_total_pages(void); +vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, unsigned int flags); +int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, + struct vm_area_struct *dst_vma, + unsigned long dst_addr, + unsigned long src_addr, + struct page **pagep); +int hugetlb_reserve_pages(struct inode *inode, long from, long to, + struct vm_area_struct *vma, + vm_flags_t vm_flags); +long hugetlb_unreserve_pages(struct inode *inode, long start, long end, + long freed); +bool isolate_huge_page(struct page *page, struct list_head *list); +void putback_active_hugepage(struct page *page); +void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason); +void free_huge_page(struct page *page); +void hugetlb_fix_reserve_counts(struct inode *inode); +extern struct mutex *hugetlb_fault_mutex_table; +u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, + pgoff_t idx); + +pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); + +extern int sysctl_hugetlb_shm_group; +extern struct list_head huge_boot_pages; + +/* arch callbacks */ + +pte_t *huge_pte_alloc(struct mm_struct *mm, + unsigned long addr, unsigned long sz); +pte_t *huge_pte_offset(struct mm_struct *mm, + unsigned long addr, unsigned long sz); +int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep); +void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, + unsigned long *start, unsigned long *end); +struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, + int write); +struct page *follow_huge_pd(struct vm_area_struct *vma, + unsigned long address, hugepd_t hpd, + int flags, int pdshift); +struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, + pmd_t *pmd, int flags); +struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, + pud_t *pud, int flags); +struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address, + pgd_t *pgd, int flags); + +int pmd_huge(pmd_t pmd); +int pud_huge(pud_t pud); +unsigned long hugetlb_change_protection(struct vm_area_struct *vma, + unsigned long address, unsigned long end, pgprot_t newprot); + +bool is_hugetlb_entry_migration(pte_t pte); + +#else /* !CONFIG_HUGETLB_PAGE */ + +static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) +{ +} + +static inline unsigned long hugetlb_total_pages(void) +{ + return 0; +} + +static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, + pte_t *ptep) +{ + return 0; +} + +static inline void adjust_range_if_pmd_sharing_possible( + struct vm_area_struct *vma, + unsigned long *start, unsigned long *end) +{ +} + +#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; }) +#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL) +#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) +static inline void hugetlb_report_meminfo(struct seq_file *m) +{ +} +#define hugetlb_report_node_meminfo(n, buf) 0 +static inline void hugetlb_show_meminfo(void) +{ +} +#define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL +#define follow_huge_pmd(mm, addr, pmd, flags) NULL +#define follow_huge_pud(mm, addr, pud, flags) NULL +#define follow_huge_pgd(mm, addr, pgd, flags) NULL +#define prepare_hugepage_range(file, addr, len) (-EINVAL) +#define pmd_huge(x) 0 +#define pud_huge(x) 0 +#define is_hugepage_only_range(mm, addr, len) 0 +#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) +#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; }) +#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \ + src_addr, pagep) ({ BUG(); 0; }) +#define huge_pte_offset(mm, address, sz) 0 + +static inline bool isolate_huge_page(struct page *page, struct list_head *list) +{ + return false; +} +#define putback_active_hugepage(p) do {} while (0) +#define move_hugetlb_state(old, new, reason) do {} while (0) + +static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma, + unsigned long address, unsigned long end, pgprot_t newprot) +{ + return 0; +} + +static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb, + struct vm_area_struct *vma, unsigned long start, + unsigned long end, struct page *ref_page) +{ + BUG(); +} + +static inline void __unmap_hugepage_range(struct mmu_gather *tlb, + struct vm_area_struct *vma, unsigned long start, + unsigned long end, struct page *ref_page) +{ + BUG(); +} + +#endif /* !CONFIG_HUGETLB_PAGE */ +/* + * hugepages at page global directory. If arch support + * hugepages at pgd level, they need to define this. + */ +#ifndef pgd_huge +#define pgd_huge(x) 0 +#endif +#ifndef p4d_huge +#define p4d_huge(x) 0 +#endif + +#ifndef pgd_write +static inline int pgd_write(pgd_t pgd) +{ + BUG(); + return 0; +} +#endif + +#define HUGETLB_ANON_FILE "anon_hugepage" + +enum { + /* + * The file will be used as an shm file so shmfs accounting rules + * apply + */ + HUGETLB_SHMFS_INODE = 1, + /* + * The file is being created on the internal vfs mount and shmfs + * accounting rules do not apply + */ + HUGETLB_ANONHUGE_INODE = 2, +}; + +#ifdef CONFIG_HUGETLBFS +struct hugetlbfs_sb_info { + long max_inodes; /* inodes allowed */ + long free_inodes; /* inodes free */ + spinlock_t stat_lock; + struct hstate *hstate; + struct hugepage_subpool *spool; + kuid_t uid; + kgid_t gid; + umode_t mode; +}; + +static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) +{ + return sb->s_fs_info; +} + +struct hugetlbfs_inode_info { + struct shared_policy policy; + struct inode vfs_inode; + unsigned int seals; +}; + +static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) +{ + return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); +} + +extern const struct file_operations hugetlbfs_file_operations; +extern const struct vm_operations_struct hugetlb_vm_ops; +struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, + struct user_struct **user, int creat_flags, + int page_size_log); + +static inline bool is_file_hugepages(struct file *file) +{ + if (file->f_op == &hugetlbfs_file_operations) + return true; + + return is_file_shm_hugepages(file); +} + + +#else /* !CONFIG_HUGETLBFS */ + +#define is_file_hugepages(file) false +static inline struct file * +hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, + struct user_struct **user, int creat_flags, + int page_size_log) +{ + return ERR_PTR(-ENOSYS); +} + +#endif /* !CONFIG_HUGETLBFS */ + +#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA +unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags); +#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ + +#ifdef CONFIG_HUGETLB_PAGE + +#define HSTATE_NAME_LEN 32 +/* Defines one hugetlb page size */ +struct hstate { + int next_nid_to_alloc; + int next_nid_to_free; + unsigned int order; + unsigned long mask; + unsigned long max_huge_pages; + unsigned long nr_huge_pages; + unsigned long free_huge_pages; + unsigned long resv_huge_pages; + unsigned long surplus_huge_pages; + unsigned long nr_overcommit_huge_pages; + struct list_head hugepage_activelist; + struct list_head hugepage_freelists[MAX_NUMNODES]; + unsigned int nr_huge_pages_node[MAX_NUMNODES]; + unsigned int free_huge_pages_node[MAX_NUMNODES]; + unsigned int surplus_huge_pages_node[MAX_NUMNODES]; +#ifdef CONFIG_CGROUP_HUGETLB + /* cgroup control files */ + struct cftype cgroup_files[5]; +#endif + char name[HSTATE_NAME_LEN]; +}; + +struct huge_bootmem_page { + struct list_head list; + struct hstate *hstate; +}; + +struct page *alloc_huge_page(struct vm_area_struct *vma, + unsigned long addr, int avoid_reserve); +struct page *alloc_huge_page_node(struct hstate *h, int nid); +struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, + nodemask_t *nmask); +struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, + unsigned long address); +int huge_add_to_page_cache(struct page *page, struct address_space *mapping, + pgoff_t idx); + +/* arch callback */ +int __init __alloc_bootmem_huge_page(struct hstate *h); +int __init alloc_bootmem_huge_page(struct hstate *h); + +void __init hugetlb_bad_size(void); +void __init hugetlb_add_hstate(unsigned order); +struct hstate *size_to_hstate(unsigned long size); + +#ifndef HUGE_MAX_HSTATE +#define HUGE_MAX_HSTATE 1 +#endif + +extern struct hstate hstates[HUGE_MAX_HSTATE]; +extern unsigned int default_hstate_idx; + +#define default_hstate (hstates[default_hstate_idx]) + +static inline struct hstate *hstate_inode(struct inode *i) +{ + return HUGETLBFS_SB(i->i_sb)->hstate; +} + +static inline struct hstate *hstate_file(struct file *f) +{ + return hstate_inode(file_inode(f)); +} + +static inline struct hstate *hstate_sizelog(int page_size_log) +{ + if (!page_size_log) + return &default_hstate; + + return size_to_hstate(1UL << page_size_log); +} + +static inline struct hstate *hstate_vma(struct vm_area_struct *vma) +{ + return hstate_file(vma->vm_file); +} + +static inline unsigned long huge_page_size(struct hstate *h) +{ + return (unsigned long)PAGE_SIZE << h->order; +} + +extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma); + +extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma); + +static inline unsigned long huge_page_mask(struct hstate *h) +{ + return h->mask; +} + +static inline unsigned int huge_page_order(struct hstate *h) +{ + return h->order; +} + +static inline unsigned huge_page_shift(struct hstate *h) +{ + return h->order + PAGE_SHIFT; +} + +static inline bool hstate_is_gigantic(struct hstate *h) +{ + return huge_page_order(h) >= MAX_ORDER; +} + +static inline unsigned int pages_per_huge_page(struct hstate *h) +{ + return 1 << h->order; +} + +static inline unsigned int blocks_per_huge_page(struct hstate *h) +{ + return huge_page_size(h) / 512; +} + +#include + +#ifndef arch_make_huge_pte +static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, + struct page *page, int writable) +{ + return entry; +} +#endif + +static inline struct hstate *page_hstate(struct page *page) +{ + VM_BUG_ON_PAGE(!PageHuge(page), page); + return size_to_hstate(PAGE_SIZE << compound_order(page)); +} + +static inline unsigned hstate_index_to_shift(unsigned index) +{ + return hstates[index].order + PAGE_SHIFT; +} + +static inline int hstate_index(struct hstate *h) +{ + return h - hstates; +} + +extern int dissolve_free_huge_page(struct page *page); +extern int dissolve_free_huge_pages(unsigned long start_pfn, + unsigned long end_pfn); +static inline bool hugepage_migration_supported(struct hstate *h) +{ +#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION + if ((huge_page_shift(h) == PMD_SHIFT) || + (huge_page_shift(h) == PGDIR_SHIFT)) + return true; + else + return false; +#else + return false; +#endif +} + +static inline spinlock_t *huge_pte_lockptr(struct hstate *h, + struct mm_struct *mm, pte_t *pte) +{ + if (huge_page_size(h) == PMD_SIZE) + return pmd_lockptr(mm, (pmd_t *) pte); + VM_BUG_ON(huge_page_size(h) == PAGE_SIZE); + return &mm->page_table_lock; +} + +#ifndef hugepages_supported +/* + * Some platform decide whether they support huge pages at boot + * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0 + * when there is no such support + */ +#define hugepages_supported() (HPAGE_SHIFT != 0) +#endif + +void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm); + +static inline void hugetlb_count_init(struct mm_struct *mm) +{ + atomic_long_set(&mm->hugetlb_usage, 0); +} + +static inline void hugetlb_count_add(long l, struct mm_struct *mm) +{ + atomic_long_add(l, &mm->hugetlb_usage); +} + +static inline void hugetlb_count_sub(long l, struct mm_struct *mm) +{ + atomic_long_sub(l, &mm->hugetlb_usage); +} + +#ifndef set_huge_swap_pte_at +static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned long sz) +{ + set_huge_pte_at(mm, addr, ptep, pte); +} +#endif + +void set_page_huge_active(struct page *page); + +#else /* CONFIG_HUGETLB_PAGE */ +struct hstate {}; +#define alloc_huge_page(v, a, r) NULL +#define alloc_huge_page_node(h, nid) NULL +#define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL +#define alloc_huge_page_vma(h, vma, address) NULL +#define alloc_bootmem_huge_page(h) NULL +#define hstate_file(f) NULL +#define hstate_sizelog(s) NULL +#define hstate_vma(v) NULL +#define hstate_inode(i) NULL +#define page_hstate(page) NULL +#define huge_page_size(h) PAGE_SIZE +#define huge_page_mask(h) PAGE_MASK +#define vma_kernel_pagesize(v) PAGE_SIZE +#define vma_mmu_pagesize(v) PAGE_SIZE +#define huge_page_order(h) 0 +#define huge_page_shift(h) PAGE_SHIFT +static inline bool hstate_is_gigantic(struct hstate *h) +{ + return false; +} + +static inline unsigned int pages_per_huge_page(struct hstate *h) +{ + return 1; +} + +static inline unsigned hstate_index_to_shift(unsigned index) +{ + return 0; +} + +static inline int hstate_index(struct hstate *h) +{ + return 0; +} + +static inline int dissolve_free_huge_page(struct page *page) +{ + return 0; +} + +static inline int dissolve_free_huge_pages(unsigned long start_pfn, + unsigned long end_pfn) +{ + return 0; +} + +static inline bool hugepage_migration_supported(struct hstate *h) +{ + return false; +} + +static inline spinlock_t *huge_pte_lockptr(struct hstate *h, + struct mm_struct *mm, pte_t *pte) +{ + return &mm->page_table_lock; +} + +static inline void hugetlb_count_init(struct mm_struct *mm) +{ +} + +static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m) +{ +} + +static inline void hugetlb_count_sub(long l, struct mm_struct *mm) +{ +} + +static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned long sz) +{ +} +#endif /* CONFIG_HUGETLB_PAGE */ + +static inline spinlock_t *huge_pte_lock(struct hstate *h, + struct mm_struct *mm, pte_t *pte) +{ + spinlock_t *ptl; + + ptl = huge_pte_lockptr(h, mm, pte); + spin_lock(ptl); + return ptl; +} + +#endif /* _LINUX_HUGETLB_H */ diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h new file mode 100644 index 000000000..063962f6d --- /dev/null +++ b/include/linux/hugetlb_cgroup.h @@ -0,0 +1,119 @@ +/* + * Copyright IBM Corporation, 2012 + * Author Aneesh Kumar K.V + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2.1 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + */ + +#ifndef _LINUX_HUGETLB_CGROUP_H +#define _LINUX_HUGETLB_CGROUP_H + +#include + +struct hugetlb_cgroup; +/* + * Minimum page order trackable by hugetlb cgroup. + * At least 3 pages are necessary for all the tracking information. + */ +#define HUGETLB_CGROUP_MIN_ORDER 2 + +#ifdef CONFIG_CGROUP_HUGETLB + +static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) +{ + VM_BUG_ON_PAGE(!PageHuge(page), page); + + if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) + return NULL; + return (struct hugetlb_cgroup *)page[2].private; +} + +static inline +int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg) +{ + VM_BUG_ON_PAGE(!PageHuge(page), page); + + if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) + return -1; + page[2].private = (unsigned long)h_cg; + return 0; +} + +static inline bool hugetlb_cgroup_disabled(void) +{ + return !cgroup_subsys_enabled(hugetlb_cgrp_subsys); +} + +extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, + struct hugetlb_cgroup **ptr); +extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, + struct hugetlb_cgroup *h_cg, + struct page *page); +extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, + struct page *page); +extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, + struct hugetlb_cgroup *h_cg); +extern void hugetlb_cgroup_file_init(void) __init; +extern void hugetlb_cgroup_migrate(struct page *oldhpage, + struct page *newhpage); + +#else +static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) +{ + return NULL; +} + +static inline +int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg) +{ + return 0; +} + +static inline bool hugetlb_cgroup_disabled(void) +{ + return true; +} + +static inline int +hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, + struct hugetlb_cgroup **ptr) +{ + return 0; +} + +static inline void +hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, + struct hugetlb_cgroup *h_cg, + struct page *page) +{ +} + +static inline void +hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, struct page *page) +{ +} + +static inline void +hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, + struct hugetlb_cgroup *h_cg) +{ +} + +static inline void hugetlb_cgroup_file_init(void) +{ +} + +static inline void hugetlb_cgroup_migrate(struct page *oldhpage, + struct page *newhpage) +{ +} + +#endif /* CONFIG_MEM_RES_CTLR_HUGETLB */ +#endif diff --git a/include/linux/hugetlb_inline.h b/include/linux/hugetlb_inline.h new file mode 100644 index 000000000..0660a03d3 --- /dev/null +++ b/include/linux/hugetlb_inline.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_HUGETLB_INLINE_H +#define _LINUX_HUGETLB_INLINE_H + +#ifdef CONFIG_HUGETLB_PAGE + +#include + +static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) +{ + return !!(vma->vm_flags & VM_HUGETLB); +} + +#else + +static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) +{ + return false; +} + +#endif + +#endif diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h new file mode 100644 index 000000000..6058c3844 --- /dev/null +++ b/include/linux/hw_breakpoint.h @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_HW_BREAKPOINT_H +#define _LINUX_HW_BREAKPOINT_H + +#include +#include + +#ifdef CONFIG_HAVE_HW_BREAKPOINT + +extern int __init init_hw_breakpoint(void); + +static inline void hw_breakpoint_init(struct perf_event_attr *attr) +{ + memset(attr, 0, sizeof(*attr)); + + attr->type = PERF_TYPE_BREAKPOINT; + attr->size = sizeof(*attr); + /* + * As it's for in-kernel or ptrace use, we want it to be pinned + * and to call its callback every hits. + */ + attr->pinned = 1; + attr->sample_period = 1; +} + +static inline void ptrace_breakpoint_init(struct perf_event_attr *attr) +{ + hw_breakpoint_init(attr); + attr->exclude_kernel = 1; +} + +static inline unsigned long hw_breakpoint_addr(struct perf_event *bp) +{ + return bp->attr.bp_addr; +} + +static inline int hw_breakpoint_type(struct perf_event *bp) +{ + return bp->attr.bp_type; +} + +static inline unsigned long hw_breakpoint_len(struct perf_event *bp) +{ + return bp->attr.bp_len; +} + +extern struct perf_event * +register_user_hw_breakpoint(struct perf_event_attr *attr, + perf_overflow_handler_t triggered, + void *context, + struct task_struct *tsk); + +/* FIXME: only change from the attr, and don't unregister */ +extern int +modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr); +extern int +modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr, + bool check); + +/* + * Kernel breakpoints are not associated with any particular thread. + */ +extern struct perf_event * +register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr, + perf_overflow_handler_t triggered, + void *context, + int cpu); + +extern struct perf_event * __percpu * +register_wide_hw_breakpoint(struct perf_event_attr *attr, + perf_overflow_handler_t triggered, + void *context); + +extern int register_perf_hw_breakpoint(struct perf_event *bp); +extern int __register_perf_hw_breakpoint(struct perf_event *bp); +extern void unregister_hw_breakpoint(struct perf_event *bp); +extern void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events); + +extern int dbg_reserve_bp_slot(struct perf_event *bp); +extern int dbg_release_bp_slot(struct perf_event *bp); +extern int reserve_bp_slot(struct perf_event *bp); +extern void release_bp_slot(struct perf_event *bp); + +extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk); + +static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp) +{ + return &bp->hw.info; +} + +#else /* !CONFIG_HAVE_HW_BREAKPOINT */ + +static inline int __init init_hw_breakpoint(void) { return 0; } + +static inline struct perf_event * +register_user_hw_breakpoint(struct perf_event_attr *attr, + perf_overflow_handler_t triggered, + void *context, + struct task_struct *tsk) { return NULL; } +static inline int +modify_user_hw_breakpoint(struct perf_event *bp, + struct perf_event_attr *attr) { return -ENOSYS; } +static inline int +modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr, + bool check) { return -ENOSYS; } + +static inline struct perf_event * +register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr, + perf_overflow_handler_t triggered, + void *context, + int cpu) { return NULL; } +static inline struct perf_event * __percpu * +register_wide_hw_breakpoint(struct perf_event_attr *attr, + perf_overflow_handler_t triggered, + void *context) { return NULL; } +static inline int +register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; } +static inline int +__register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; } +static inline void unregister_hw_breakpoint(struct perf_event *bp) { } +static inline void +unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) { } +static inline int +reserve_bp_slot(struct perf_event *bp) {return -ENOSYS; } +static inline void release_bp_slot(struct perf_event *bp) { } + +static inline void flush_ptrace_hw_breakpoint(struct task_struct *tsk) { } + +static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp) +{ + return NULL; +} + +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ +#endif /* _LINUX_HW_BREAKPOINT_H */ diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h new file mode 100644 index 000000000..31587f36c --- /dev/null +++ b/include/linux/hw_random.h @@ -0,0 +1,63 @@ +/* + Hardware Random Number Generator + + Please read Documentation/hw_random.txt for details on use. + + ---------------------------------------------------------- + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + */ + +#ifndef LINUX_HWRANDOM_H_ +#define LINUX_HWRANDOM_H_ + +#include +#include +#include +#include + +/** + * struct hwrng - Hardware Random Number Generator driver + * @name: Unique RNG name. + * @init: Initialization callback (can be NULL). + * @cleanup: Cleanup callback (can be NULL). + * @data_present: Callback to determine if data is available + * on the RNG. If NULL, it is assumed that + * there is always data available. *OBSOLETE* + * @data_read: Read data from the RNG device. + * Returns the number of lower random bytes in "data". + * Must not be NULL. *OBSOLETE* + * @read: New API. drivers can fill up to max bytes of data + * into the buffer. The buffer is aligned for any type + * and max is a multiple of 4 and >= 32 bytes. + * @priv: Private data, for use by the RNG driver. + * @quality: Estimation of true entropy in RNG's bitstream + * (per mill). + */ +struct hwrng { + const char *name; + int (*init)(struct hwrng *rng); + void (*cleanup)(struct hwrng *rng); + int (*data_present)(struct hwrng *rng, int wait); + int (*data_read)(struct hwrng *rng, u32 *data); + int (*read)(struct hwrng *rng, void *data, size_t max, bool wait); + unsigned long priv; + unsigned short quality; + + /* internal. */ + struct list_head list; + struct kref ref; + struct completion cleanup_done; +}; + +struct device; + +/** Register a new Hardware Random Number Generator driver. */ +extern int hwrng_register(struct hwrng *rng); +extern int devm_hwrng_register(struct device *dev, struct hwrng *rng); +/** Unregister a Hardware Random Number Generator driver. */ +extern void hwrng_unregister(struct hwrng *rng); +extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng); + +#endif /* LINUX_HWRANDOM_H_ */ diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h new file mode 100644 index 000000000..1c7b89ae6 --- /dev/null +++ b/include/linux/hwmon-sysfs.h @@ -0,0 +1,57 @@ +/* + * hwmon-sysfs.h - hardware monitoring chip driver sysfs defines + * + * Copyright (C) 2005 Yani Ioannou + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#ifndef _LINUX_HWMON_SYSFS_H +#define _LINUX_HWMON_SYSFS_H + +#include + +struct sensor_device_attribute{ + struct device_attribute dev_attr; + int index; +}; +#define to_sensor_dev_attr(_dev_attr) \ + container_of(_dev_attr, struct sensor_device_attribute, dev_attr) + +#define SENSOR_ATTR(_name, _mode, _show, _store, _index) \ + { .dev_attr = __ATTR(_name, _mode, _show, _store), \ + .index = _index } + +#define SENSOR_DEVICE_ATTR(_name, _mode, _show, _store, _index) \ +struct sensor_device_attribute sensor_dev_attr_##_name \ + = SENSOR_ATTR(_name, _mode, _show, _store, _index) + +struct sensor_device_attribute_2 { + struct device_attribute dev_attr; + u8 index; + u8 nr; +}; +#define to_sensor_dev_attr_2(_dev_attr) \ + container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr) + +#define SENSOR_ATTR_2(_name, _mode, _show, _store, _nr, _index) \ + { .dev_attr = __ATTR(_name, _mode, _show, _store), \ + .index = _index, \ + .nr = _nr } + +#define SENSOR_DEVICE_ATTR_2(_name,_mode,_show,_store,_nr,_index) \ +struct sensor_device_attribute_2 sensor_dev_attr_##_name \ + = SENSOR_ATTR_2(_name, _mode, _show, _store, _nr, _index) + +#endif /* _LINUX_HWMON_SYSFS_H */ diff --git a/include/linux/hwmon-vid.h b/include/linux/hwmon-vid.h new file mode 100644 index 000000000..da0a680e2 --- /dev/null +++ b/include/linux/hwmon-vid.h @@ -0,0 +1,45 @@ +/* + hwmon-vid.h - VID/VRM/VRD voltage conversions + + Originally part of lm_sensors + Copyright (c) 2002 Mark D. Studebaker + With assistance from Trent Piepho + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#ifndef _LINUX_HWMON_VID_H +#define _LINUX_HWMON_VID_H + +int vid_from_reg(int val, u8 vrm); +u8 vid_which_vrm(void); + +/* vrm is the VRM/VRD document version multiplied by 10. + val is in mV to avoid floating point in the kernel. + Returned value is the 4-, 5- or 6-bit VID code. + Note that only VRM 9.x is supported for now. */ +static inline int vid_to_reg(int val, u8 vrm) +{ + switch (vrm) { + case 91: /* VRM 9.1 */ + case 90: /* VRM 9.0 */ + return ((val >= 1100) && (val <= 1850) ? + ((18499 - val * 10) / 25 + 5) / 10 : -1); + default: + return -EINVAL; + } +} + +#endif /* _LINUX_HWMON_VID_H */ diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h new file mode 100644 index 000000000..8fde789f2 --- /dev/null +++ b/include/linux/hwmon.h @@ -0,0 +1,424 @@ +/* + hwmon.h - part of lm_sensors, Linux kernel modules for hardware monitoring + + This file declares helper functions for the sysfs class "hwmon", + for use by sensors drivers. + + Copyright (C) 2005 Mark M. Hoffman + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. +*/ + +#ifndef _HWMON_H_ +#define _HWMON_H_ + +#include + +struct device; +struct attribute_group; + +enum hwmon_sensor_types { + hwmon_chip, + hwmon_temp, + hwmon_in, + hwmon_curr, + hwmon_power, + hwmon_energy, + hwmon_humidity, + hwmon_fan, + hwmon_pwm, + hwmon_max, +}; + +enum hwmon_chip_attributes { + hwmon_chip_temp_reset_history, + hwmon_chip_in_reset_history, + hwmon_chip_curr_reset_history, + hwmon_chip_power_reset_history, + hwmon_chip_register_tz, + hwmon_chip_update_interval, + hwmon_chip_alarms, +}; + +#define HWMON_C_TEMP_RESET_HISTORY BIT(hwmon_chip_temp_reset_history) +#define HWMON_C_IN_RESET_HISTORY BIT(hwmon_chip_in_reset_history) +#define HWMON_C_CURR_RESET_HISTORY BIT(hwmon_chip_curr_reset_history) +#define HWMON_C_POWER_RESET_HISTORY BIT(hwmon_chip_power_reset_history) +#define HWMON_C_REGISTER_TZ BIT(hwmon_chip_register_tz) +#define HWMON_C_UPDATE_INTERVAL BIT(hwmon_chip_update_interval) +#define HWMON_C_ALARMS BIT(hwmon_chip_alarms) + +enum hwmon_temp_attributes { + hwmon_temp_input = 0, + hwmon_temp_type, + hwmon_temp_lcrit, + hwmon_temp_lcrit_hyst, + hwmon_temp_min, + hwmon_temp_min_hyst, + hwmon_temp_max, + hwmon_temp_max_hyst, + hwmon_temp_crit, + hwmon_temp_crit_hyst, + hwmon_temp_emergency, + hwmon_temp_emergency_hyst, + hwmon_temp_alarm, + hwmon_temp_lcrit_alarm, + hwmon_temp_min_alarm, + hwmon_temp_max_alarm, + hwmon_temp_crit_alarm, + hwmon_temp_emergency_alarm, + hwmon_temp_fault, + hwmon_temp_offset, + hwmon_temp_label, + hwmon_temp_lowest, + hwmon_temp_highest, + hwmon_temp_reset_history, +}; + +#define HWMON_T_INPUT BIT(hwmon_temp_input) +#define HWMON_T_TYPE BIT(hwmon_temp_type) +#define HWMON_T_LCRIT BIT(hwmon_temp_lcrit) +#define HWMON_T_LCRIT_HYST BIT(hwmon_temp_lcrit_hyst) +#define HWMON_T_MIN BIT(hwmon_temp_min) +#define HWMON_T_MIN_HYST BIT(hwmon_temp_min_hyst) +#define HWMON_T_MAX BIT(hwmon_temp_max) +#define HWMON_T_MAX_HYST BIT(hwmon_temp_max_hyst) +#define HWMON_T_CRIT BIT(hwmon_temp_crit) +#define HWMON_T_CRIT_HYST BIT(hwmon_temp_crit_hyst) +#define HWMON_T_EMERGENCY BIT(hwmon_temp_emergency) +#define HWMON_T_EMERGENCY_HYST BIT(hwmon_temp_emergency_hyst) +#define HWMON_T_ALARM BIT(hwmon_temp_alarm) +#define HWMON_T_MIN_ALARM BIT(hwmon_temp_min_alarm) +#define HWMON_T_MAX_ALARM BIT(hwmon_temp_max_alarm) +#define HWMON_T_CRIT_ALARM BIT(hwmon_temp_crit_alarm) +#define HWMON_T_LCRIT_ALARM BIT(hwmon_temp_lcrit_alarm) +#define HWMON_T_EMERGENCY_ALARM BIT(hwmon_temp_emergency_alarm) +#define HWMON_T_FAULT BIT(hwmon_temp_fault) +#define HWMON_T_OFFSET BIT(hwmon_temp_offset) +#define HWMON_T_LABEL BIT(hwmon_temp_label) +#define HWMON_T_LOWEST BIT(hwmon_temp_lowest) +#define HWMON_T_HIGHEST BIT(hwmon_temp_highest) +#define HWMON_T_RESET_HISTORY BIT(hwmon_temp_reset_history) + +enum hwmon_in_attributes { + hwmon_in_input, + hwmon_in_min, + hwmon_in_max, + hwmon_in_lcrit, + hwmon_in_crit, + hwmon_in_average, + hwmon_in_lowest, + hwmon_in_highest, + hwmon_in_reset_history, + hwmon_in_label, + hwmon_in_alarm, + hwmon_in_min_alarm, + hwmon_in_max_alarm, + hwmon_in_lcrit_alarm, + hwmon_in_crit_alarm, +}; + +#define HWMON_I_INPUT BIT(hwmon_in_input) +#define HWMON_I_MIN BIT(hwmon_in_min) +#define HWMON_I_MAX BIT(hwmon_in_max) +#define HWMON_I_LCRIT BIT(hwmon_in_lcrit) +#define HWMON_I_CRIT BIT(hwmon_in_crit) +#define HWMON_I_AVERAGE BIT(hwmon_in_average) +#define HWMON_I_LOWEST BIT(hwmon_in_lowest) +#define HWMON_I_HIGHEST BIT(hwmon_in_highest) +#define HWMON_I_RESET_HISTORY BIT(hwmon_in_reset_history) +#define HWMON_I_LABEL BIT(hwmon_in_label) +#define HWMON_I_ALARM BIT(hwmon_in_alarm) +#define HWMON_I_MIN_ALARM BIT(hwmon_in_min_alarm) +#define HWMON_I_MAX_ALARM BIT(hwmon_in_max_alarm) +#define HWMON_I_LCRIT_ALARM BIT(hwmon_in_lcrit_alarm) +#define HWMON_I_CRIT_ALARM BIT(hwmon_in_crit_alarm) + +enum hwmon_curr_attributes { + hwmon_curr_input, + hwmon_curr_min, + hwmon_curr_max, + hwmon_curr_lcrit, + hwmon_curr_crit, + hwmon_curr_average, + hwmon_curr_lowest, + hwmon_curr_highest, + hwmon_curr_reset_history, + hwmon_curr_label, + hwmon_curr_alarm, + hwmon_curr_min_alarm, + hwmon_curr_max_alarm, + hwmon_curr_lcrit_alarm, + hwmon_curr_crit_alarm, +}; + +#define HWMON_C_INPUT BIT(hwmon_curr_input) +#define HWMON_C_MIN BIT(hwmon_curr_min) +#define HWMON_C_MAX BIT(hwmon_curr_max) +#define HWMON_C_LCRIT BIT(hwmon_curr_lcrit) +#define HWMON_C_CRIT BIT(hwmon_curr_crit) +#define HWMON_C_AVERAGE BIT(hwmon_curr_average) +#define HWMON_C_LOWEST BIT(hwmon_curr_lowest) +#define HWMON_C_HIGHEST BIT(hwmon_curr_highest) +#define HWMON_C_RESET_HISTORY BIT(hwmon_curr_reset_history) +#define HWMON_C_LABEL BIT(hwmon_curr_label) +#define HWMON_C_ALARM BIT(hwmon_curr_alarm) +#define HWMON_C_MIN_ALARM BIT(hwmon_curr_min_alarm) +#define HWMON_C_MAX_ALARM BIT(hwmon_curr_max_alarm) +#define HWMON_C_LCRIT_ALARM BIT(hwmon_curr_lcrit_alarm) +#define HWMON_C_CRIT_ALARM BIT(hwmon_curr_crit_alarm) + +enum hwmon_power_attributes { + hwmon_power_average, + hwmon_power_average_interval, + hwmon_power_average_interval_max, + hwmon_power_average_interval_min, + hwmon_power_average_highest, + hwmon_power_average_lowest, + hwmon_power_average_max, + hwmon_power_average_min, + hwmon_power_input, + hwmon_power_input_highest, + hwmon_power_input_lowest, + hwmon_power_reset_history, + hwmon_power_accuracy, + hwmon_power_cap, + hwmon_power_cap_hyst, + hwmon_power_cap_max, + hwmon_power_cap_min, + hwmon_power_min, + hwmon_power_max, + hwmon_power_crit, + hwmon_power_lcrit, + hwmon_power_label, + hwmon_power_alarm, + hwmon_power_cap_alarm, + hwmon_power_min_alarm, + hwmon_power_max_alarm, + hwmon_power_lcrit_alarm, + hwmon_power_crit_alarm, +}; + +#define HWMON_P_AVERAGE BIT(hwmon_power_average) +#define HWMON_P_AVERAGE_INTERVAL BIT(hwmon_power_average_interval) +#define HWMON_P_AVERAGE_INTERVAL_MAX BIT(hwmon_power_average_interval_max) +#define HWMON_P_AVERAGE_INTERVAL_MIN BIT(hwmon_power_average_interval_min) +#define HWMON_P_AVERAGE_HIGHEST BIT(hwmon_power_average_highest) +#define HWMON_P_AVERAGE_LOWEST BIT(hwmon_power_average_lowest) +#define HWMON_P_AVERAGE_MAX BIT(hwmon_power_average_max) +#define HWMON_P_AVERAGE_MIN BIT(hwmon_power_average_min) +#define HWMON_P_INPUT BIT(hwmon_power_input) +#define HWMON_P_INPUT_HIGHEST BIT(hwmon_power_input_highest) +#define HWMON_P_INPUT_LOWEST BIT(hwmon_power_input_lowest) +#define HWMON_P_RESET_HISTORY BIT(hwmon_power_reset_history) +#define HWMON_P_ACCURACY BIT(hwmon_power_accuracy) +#define HWMON_P_CAP BIT(hwmon_power_cap) +#define HWMON_P_CAP_HYST BIT(hwmon_power_cap_hyst) +#define HWMON_P_CAP_MAX BIT(hwmon_power_cap_max) +#define HWMON_P_CAP_MIN BIT(hwmon_power_cap_min) +#define HWMON_P_MIN BIT(hwmon_power_min) +#define HWMON_P_MAX BIT(hwmon_power_max) +#define HWMON_P_LCRIT BIT(hwmon_power_lcrit) +#define HWMON_P_CRIT BIT(hwmon_power_crit) +#define HWMON_P_LABEL BIT(hwmon_power_label) +#define HWMON_P_ALARM BIT(hwmon_power_alarm) +#define HWMON_P_CAP_ALARM BIT(hwmon_power_cap_alarm) +#define HWMON_P_MIN_ALARM BIT(hwmon_power_min_alarm) +#define HWMON_P_MAX_ALARM BIT(hwmon_power_max_alarm) +#define HWMON_P_LCRIT_ALARM BIT(hwmon_power_lcrit_alarm) +#define HWMON_P_CRIT_ALARM BIT(hwmon_power_crit_alarm) + +enum hwmon_energy_attributes { + hwmon_energy_input, + hwmon_energy_label, +}; + +#define HWMON_E_INPUT BIT(hwmon_energy_input) +#define HWMON_E_LABEL BIT(hwmon_energy_label) + +enum hwmon_humidity_attributes { + hwmon_humidity_input, + hwmon_humidity_label, + hwmon_humidity_min, + hwmon_humidity_min_hyst, + hwmon_humidity_max, + hwmon_humidity_max_hyst, + hwmon_humidity_alarm, + hwmon_humidity_fault, +}; + +#define HWMON_H_INPUT BIT(hwmon_humidity_input) +#define HWMON_H_LABEL BIT(hwmon_humidity_label) +#define HWMON_H_MIN BIT(hwmon_humidity_min) +#define HWMON_H_MIN_HYST BIT(hwmon_humidity_min_hyst) +#define HWMON_H_MAX BIT(hwmon_humidity_max) +#define HWMON_H_MAX_HYST BIT(hwmon_humidity_max_hyst) +#define HWMON_H_ALARM BIT(hwmon_humidity_alarm) +#define HWMON_H_FAULT BIT(hwmon_humidity_fault) + +enum hwmon_fan_attributes { + hwmon_fan_input, + hwmon_fan_label, + hwmon_fan_min, + hwmon_fan_max, + hwmon_fan_div, + hwmon_fan_pulses, + hwmon_fan_target, + hwmon_fan_alarm, + hwmon_fan_min_alarm, + hwmon_fan_max_alarm, + hwmon_fan_fault, +}; + +#define HWMON_F_INPUT BIT(hwmon_fan_input) +#define HWMON_F_LABEL BIT(hwmon_fan_label) +#define HWMON_F_MIN BIT(hwmon_fan_min) +#define HWMON_F_MAX BIT(hwmon_fan_max) +#define HWMON_F_DIV BIT(hwmon_fan_div) +#define HWMON_F_PULSES BIT(hwmon_fan_pulses) +#define HWMON_F_TARGET BIT(hwmon_fan_target) +#define HWMON_F_ALARM BIT(hwmon_fan_alarm) +#define HWMON_F_MIN_ALARM BIT(hwmon_fan_min_alarm) +#define HWMON_F_MAX_ALARM BIT(hwmon_fan_max_alarm) +#define HWMON_F_FAULT BIT(hwmon_fan_fault) + +enum hwmon_pwm_attributes { + hwmon_pwm_input, + hwmon_pwm_enable, + hwmon_pwm_mode, + hwmon_pwm_freq, +}; + +#define HWMON_PWM_INPUT BIT(hwmon_pwm_input) +#define HWMON_PWM_ENABLE BIT(hwmon_pwm_enable) +#define HWMON_PWM_MODE BIT(hwmon_pwm_mode) +#define HWMON_PWM_FREQ BIT(hwmon_pwm_freq) + +/** + * struct hwmon_ops - hwmon device operations + * @is_visible: Callback to return attribute visibility. Mandatory. + * Parameters are: + * @const void *drvdata: + * Pointer to driver-private data structure passed + * as argument to hwmon_device_register_with_info(). + * @type: Sensor type + * @attr: Sensor attribute + * @channel: + * Channel number + * The function returns the file permissions. + * If the return value is 0, no attribute will be created. + * @read: Read callback for data attributes. Mandatory if readable + * data attributes are present. + * Parameters are: + * @dev: Pointer to hardware monitoring device + * @type: Sensor type + * @attr: Sensor attribute + * @channel: + * Channel number + * @val: Pointer to returned value + * The function returns 0 on success or a negative error number. + * @read_string: + * Read callback for string attributes. Mandatory if string + * attributes are present. + * Parameters are: + * @dev: Pointer to hardware monitoring device + * @type: Sensor type + * @attr: Sensor attribute + * @channel: + * Channel number + * @str: Pointer to returned string + * The function returns 0 on success or a negative error number. + * @write: Write callback for data attributes. Mandatory if writeable + * data attributes are present. + * Parameters are: + * @dev: Pointer to hardware monitoring device + * @type: Sensor type + * @attr: Sensor attribute + * @channel: + * Channel number + * @val: Value to write + * The function returns 0 on success or a negative error number. + */ +struct hwmon_ops { + umode_t (*is_visible)(const void *drvdata, enum hwmon_sensor_types type, + u32 attr, int channel); + int (*read)(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val); + int (*read_string)(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, const char **str); + int (*write)(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val); +}; + +/** + * Channel information + * @type: Channel type. + * @config: Pointer to NULL-terminated list of channel parameters. + * Use for per-channel attributes. + */ +struct hwmon_channel_info { + enum hwmon_sensor_types type; + const u32 *config; +}; + +/** + * Chip configuration + * @ops: Pointer to hwmon operations. + * @info: Null-terminated list of channel information. + */ +struct hwmon_chip_info { + const struct hwmon_ops *ops; + const struct hwmon_channel_info **info; +}; + +/* hwmon_device_register() is deprecated */ +struct device *hwmon_device_register(struct device *dev); + +struct device * +hwmon_device_register_with_groups(struct device *dev, const char *name, + void *drvdata, + const struct attribute_group **groups); +struct device * +devm_hwmon_device_register_with_groups(struct device *dev, const char *name, + void *drvdata, + const struct attribute_group **groups); +struct device * +hwmon_device_register_with_info(struct device *dev, + const char *name, void *drvdata, + const struct hwmon_chip_info *info, + const struct attribute_group **extra_groups); +struct device * +devm_hwmon_device_register_with_info(struct device *dev, + const char *name, void *drvdata, + const struct hwmon_chip_info *info, + const struct attribute_group **extra_groups); + +void hwmon_device_unregister(struct device *dev); +void devm_hwmon_device_unregister(struct device *dev); + +/** + * hwmon_is_bad_char - Is the char invalid in a hwmon name + * @ch: the char to be considered + * + * hwmon_is_bad_char() can be used to determine if the given character + * may not be used in a hwmon name. + * + * Returns true if the char is invalid, false otherwise. + */ +static inline bool hwmon_is_bad_char(const char ch) +{ + switch (ch) { + case '-': + case '*': + case ' ': + case '\t': + case '\n': + return true; + default: + return false; + } +} + +#endif diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h new file mode 100644 index 000000000..0afe693be --- /dev/null +++ b/include/linux/hwspinlock.h @@ -0,0 +1,405 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Hardware spinlock public header + * + * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com + * + * Contact: Ohad Ben-Cohen + */ + +#ifndef __LINUX_HWSPINLOCK_H +#define __LINUX_HWSPINLOCK_H + +#include +#include + +/* hwspinlock mode argument */ +#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */ +#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */ +#define HWLOCK_RAW 0x03 + +struct device; +struct device_node; +struct hwspinlock; +struct hwspinlock_device; +struct hwspinlock_ops; + +/** + * struct hwspinlock_pdata - platform data for hwspinlock drivers + * @base_id: base id for this hwspinlock device + * + * hwspinlock devices provide system-wide hardware locks that are used + * by remote processors that have no other way to achieve synchronization. + * + * To achieve that, each physical lock must have a system-wide id number + * that is agreed upon, otherwise remote processors can't possibly assume + * they're using the same hardware lock. + * + * Usually boards have a single hwspinlock device, which provides several + * hwspinlocks, and in this case, they can be trivially numbered 0 to + * (num-of-locks - 1). + * + * In case boards have several hwspinlocks devices, a different base id + * should be used for each hwspinlock device (they can't all use 0 as + * a starting id!). + * + * This platform data structure should be used to provide the base id + * for each device (which is trivially 0 when only a single hwspinlock + * device exists). It can be shared between different platforms, hence + * its location. + */ +struct hwspinlock_pdata { + int base_id; +}; + +#ifdef CONFIG_HWSPINLOCK + +int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev, + const struct hwspinlock_ops *ops, int base_id, int num_locks); +int hwspin_lock_unregister(struct hwspinlock_device *bank); +struct hwspinlock *hwspin_lock_request(void); +struct hwspinlock *hwspin_lock_request_specific(unsigned int id); +int hwspin_lock_free(struct hwspinlock *hwlock); +int of_hwspin_lock_get_id(struct device_node *np, int index); +int hwspin_lock_get_id(struct hwspinlock *hwlock); +int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int, + unsigned long *); +int __hwspin_trylock(struct hwspinlock *, int, unsigned long *); +void __hwspin_unlock(struct hwspinlock *, int, unsigned long *); +int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name); +int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock); +struct hwspinlock *devm_hwspin_lock_request(struct device *dev); +struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev, + unsigned int id); +int devm_hwspin_lock_unregister(struct device *dev, + struct hwspinlock_device *bank); +int devm_hwspin_lock_register(struct device *dev, + struct hwspinlock_device *bank, + const struct hwspinlock_ops *ops, + int base_id, int num_locks); + +#else /* !CONFIG_HWSPINLOCK */ + +/* + * We don't want these functions to fail if CONFIG_HWSPINLOCK is not + * enabled. We prefer to silently succeed in this case, and let the + * code path get compiled away. This way, if CONFIG_HWSPINLOCK is not + * required on a given setup, users will still work. + * + * The only exception is hwspin_lock_register/hwspin_lock_unregister, with which + * we _do_ want users to fail (no point in registering hwspinlock instances if + * the framework is not available). + * + * Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking + * users. Others, which care, can still check this with IS_ERR. + */ +static inline struct hwspinlock *hwspin_lock_request(void) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct hwspinlock *hwspin_lock_request_specific(unsigned int id) +{ + return ERR_PTR(-ENODEV); +} + +static inline int hwspin_lock_free(struct hwspinlock *hwlock) +{ + return 0; +} + +static inline +int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to, + int mode, unsigned long *flags) +{ + return 0; +} + +static inline +int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags) +{ + return 0; +} + +static inline +void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags) +{ +} + +static inline int of_hwspin_lock_get_id(struct device_node *np, int index) +{ + return 0; +} + +static inline int hwspin_lock_get_id(struct hwspinlock *hwlock) +{ + return 0; +} + +static inline +int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name) +{ + return 0; +} + +static inline +int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock) +{ + return 0; +} + +static inline struct hwspinlock *devm_hwspin_lock_request(struct device *dev) +{ + return ERR_PTR(-ENODEV); +} + +static inline +struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev, + unsigned int id) +{ + return ERR_PTR(-ENODEV); +} + +#endif /* !CONFIG_HWSPINLOCK */ + +/** + * hwspin_trylock_irqsave() - try to lock an hwspinlock, disable interrupts + * @hwlock: an hwspinlock which we want to trylock + * @flags: a pointer to where the caller's interrupt state will be saved at + * + * This function attempts to lock the underlying hwspinlock, and will + * immediately fail if the hwspinlock is already locked. + * + * Upon a successful return from this function, preemption and local + * interrupts are disabled (previous interrupts state is saved at @flags), + * so the caller must not sleep, and is advised to release the hwspinlock + * as soon as possible. + * + * Returns 0 if we successfully locked the hwspinlock, -EBUSY if + * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid. + */ +static inline +int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags) +{ + return __hwspin_trylock(hwlock, HWLOCK_IRQSTATE, flags); +} + +/** + * hwspin_trylock_irq() - try to lock an hwspinlock, disable interrupts + * @hwlock: an hwspinlock which we want to trylock + * + * This function attempts to lock the underlying hwspinlock, and will + * immediately fail if the hwspinlock is already locked. + * + * Upon a successful return from this function, preemption and local + * interrupts are disabled, so the caller must not sleep, and is advised + * to release the hwspinlock as soon as possible. + * + * Returns 0 if we successfully locked the hwspinlock, -EBUSY if + * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid. + */ +static inline int hwspin_trylock_irq(struct hwspinlock *hwlock) +{ + return __hwspin_trylock(hwlock, HWLOCK_IRQ, NULL); +} + +/** + * hwspin_trylock_raw() - attempt to lock a specific hwspinlock + * @hwlock: an hwspinlock which we want to trylock + * + * This function attempts to lock an hwspinlock, and will immediately fail + * if the hwspinlock is already taken. + * + * Caution: User must protect the routine of getting hardware lock with mutex + * or spinlock to avoid dead-lock, that will let user can do some time-consuming + * or sleepable operations under the hardware lock. + * + * Returns 0 if we successfully locked the hwspinlock, -EBUSY if + * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid. + */ +static inline int hwspin_trylock_raw(struct hwspinlock *hwlock) +{ + return __hwspin_trylock(hwlock, HWLOCK_RAW, NULL); +} + +/** + * hwspin_trylock() - attempt to lock a specific hwspinlock + * @hwlock: an hwspinlock which we want to trylock + * + * This function attempts to lock an hwspinlock, and will immediately fail + * if the hwspinlock is already taken. + * + * Upon a successful return from this function, preemption is disabled, + * so the caller must not sleep, and is advised to release the hwspinlock + * as soon as possible. This is required in order to minimize remote cores + * polling on the hardware interconnect. + * + * Returns 0 if we successfully locked the hwspinlock, -EBUSY if + * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid. + */ +static inline int hwspin_trylock(struct hwspinlock *hwlock) +{ + return __hwspin_trylock(hwlock, 0, NULL); +} + +/** + * hwspin_lock_timeout_irqsave() - lock hwspinlock, with timeout, disable irqs + * @hwlock: the hwspinlock to be locked + * @to: timeout value in msecs + * @flags: a pointer to where the caller's interrupt state will be saved at + * + * This function locks the underlying @hwlock. If the @hwlock + * is already taken, the function will busy loop waiting for it to + * be released, but give up when @timeout msecs have elapsed. + * + * Upon a successful return from this function, preemption and local interrupts + * are disabled (plus previous interrupt state is saved), so the caller must + * not sleep, and is advised to release the hwspinlock as soon as possible. + * + * Returns 0 when the @hwlock was successfully taken, and an appropriate + * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still + * busy after @timeout msecs). The function will never sleep. + */ +static inline int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock, + unsigned int to, unsigned long *flags) +{ + return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQSTATE, flags); +} + +/** + * hwspin_lock_timeout_irq() - lock hwspinlock, with timeout, disable irqs + * @hwlock: the hwspinlock to be locked + * @to: timeout value in msecs + * + * This function locks the underlying @hwlock. If the @hwlock + * is already taken, the function will busy loop waiting for it to + * be released, but give up when @timeout msecs have elapsed. + * + * Upon a successful return from this function, preemption and local interrupts + * are disabled so the caller must not sleep, and is advised to release the + * hwspinlock as soon as possible. + * + * Returns 0 when the @hwlock was successfully taken, and an appropriate + * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still + * busy after @timeout msecs). The function will never sleep. + */ +static inline +int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to) +{ + return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL); +} + +/** + * hwspin_lock_timeout_raw() - lock an hwspinlock with timeout limit + * @hwlock: the hwspinlock to be locked + * @to: timeout value in msecs + * + * This function locks the underlying @hwlock. If the @hwlock + * is already taken, the function will busy loop waiting for it to + * be released, but give up when @timeout msecs have elapsed. + * + * Caution: User must protect the routine of getting hardware lock with mutex + * or spinlock to avoid dead-lock, that will let user can do some time-consuming + * or sleepable operations under the hardware lock. + * + * Returns 0 when the @hwlock was successfully taken, and an appropriate + * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still + * busy after @timeout msecs). The function will never sleep. + */ +static inline +int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to) +{ + return __hwspin_lock_timeout(hwlock, to, HWLOCK_RAW, NULL); +} + +/** + * hwspin_lock_timeout() - lock an hwspinlock with timeout limit + * @hwlock: the hwspinlock to be locked + * @to: timeout value in msecs + * + * This function locks the underlying @hwlock. If the @hwlock + * is already taken, the function will busy loop waiting for it to + * be released, but give up when @timeout msecs have elapsed. + * + * Upon a successful return from this function, preemption is disabled + * so the caller must not sleep, and is advised to release the hwspinlock + * as soon as possible. + * This is required in order to minimize remote cores polling on the + * hardware interconnect. + * + * Returns 0 when the @hwlock was successfully taken, and an appropriate + * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still + * busy after @timeout msecs). The function will never sleep. + */ +static inline +int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to) +{ + return __hwspin_lock_timeout(hwlock, to, 0, NULL); +} + +/** + * hwspin_unlock_irqrestore() - unlock hwspinlock, restore irq state + * @hwlock: a previously-acquired hwspinlock which we want to unlock + * @flags: previous caller's interrupt state to restore + * + * This function will unlock a specific hwspinlock, enable preemption and + * restore the previous state of the local interrupts. It should be used + * to undo, e.g., hwspin_trylock_irqsave(). + * + * @hwlock must be already locked before calling this function: it is a bug + * to call unlock on a @hwlock that is already unlocked. + */ +static inline void hwspin_unlock_irqrestore(struct hwspinlock *hwlock, + unsigned long *flags) +{ + __hwspin_unlock(hwlock, HWLOCK_IRQSTATE, flags); +} + +/** + * hwspin_unlock_irq() - unlock hwspinlock, enable interrupts + * @hwlock: a previously-acquired hwspinlock which we want to unlock + * + * This function will unlock a specific hwspinlock, enable preemption and + * enable local interrupts. Should be used to undo hwspin_lock_irq(). + * + * @hwlock must be already locked (e.g. by hwspin_trylock_irq()) before + * calling this function: it is a bug to call unlock on a @hwlock that is + * already unlocked. + */ +static inline void hwspin_unlock_irq(struct hwspinlock *hwlock) +{ + __hwspin_unlock(hwlock, HWLOCK_IRQ, NULL); +} + +/** + * hwspin_unlock_raw() - unlock hwspinlock + * @hwlock: a previously-acquired hwspinlock which we want to unlock + * + * This function will unlock a specific hwspinlock. + * + * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling + * this function: it is a bug to call unlock on a @hwlock that is already + * unlocked. + */ +static inline void hwspin_unlock_raw(struct hwspinlock *hwlock) +{ + __hwspin_unlock(hwlock, HWLOCK_RAW, NULL); +} + +/** + * hwspin_unlock() - unlock hwspinlock + * @hwlock: a previously-acquired hwspinlock which we want to unlock + * + * This function will unlock a specific hwspinlock and enable preemption + * back. + * + * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling + * this function: it is a bug to call unlock on a @hwlock that is already + * unlocked. + */ +static inline void hwspin_unlock(struct hwspinlock *hwlock) +{ + __hwspin_unlock(hwlock, 0, NULL); +} + +#endif /* __LINUX_HWSPINLOCK_H */ diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h new file mode 100644 index 000000000..35461d49d --- /dev/null +++ b/include/linux/hyperv.h @@ -0,0 +1,1552 @@ +/* + * + * Copyright (c) 2011, Microsoft Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + * + * Authors: + * Haiyang Zhang + * Hank Janssen + * K. Y. Srinivasan + * + */ + +#ifndef _HYPERV_H +#define _HYPERV_H + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAX_PAGE_BUFFER_COUNT 32 +#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */ + +#pragma pack(push, 1) + +/* Single-page buffer */ +struct hv_page_buffer { + u32 len; + u32 offset; + u64 pfn; +}; + +/* Multiple-page buffer */ +struct hv_multipage_buffer { + /* Length and Offset determines the # of pfns in the array */ + u32 len; + u32 offset; + u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT]; +}; + +/* + * Multiple-page buffer array; the pfn array is variable size: + * The number of entries in the PFN array is determined by + * "len" and "offset". + */ +struct hv_mpb_array { + /* Length and Offset determines the # of pfns in the array */ + u32 len; + u32 offset; + u64 pfn_array[]; +}; + +/* 0x18 includes the proprietary packet header */ +#define MAX_PAGE_BUFFER_PACKET (0x18 + \ + (sizeof(struct hv_page_buffer) * \ + MAX_PAGE_BUFFER_COUNT)) +#define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \ + sizeof(struct hv_multipage_buffer)) + + +#pragma pack(pop) + +struct hv_ring_buffer { + /* Offset in bytes from the start of ring data below */ + u32 write_index; + + /* Offset in bytes from the start of ring data below */ + u32 read_index; + + u32 interrupt_mask; + + /* + * WS2012/Win8 and later versions of Hyper-V implement interrupt + * driven flow management. The feature bit feat_pending_send_sz + * is set by the host on the host->guest ring buffer, and by the + * guest on the guest->host ring buffer. + * + * The meaning of the feature bit is a bit complex in that it has + * semantics that apply to both ring buffers. If the guest sets + * the feature bit in the guest->host ring buffer, the guest is + * telling the host that: + * 1) It will set the pending_send_sz field in the guest->host ring + * buffer when it is waiting for space to become available, and + * 2) It will read the pending_send_sz field in the host->guest + * ring buffer and interrupt the host when it frees enough space + * + * Similarly, if the host sets the feature bit in the host->guest + * ring buffer, the host is telling the guest that: + * 1) It will set the pending_send_sz field in the host->guest ring + * buffer when it is waiting for space to become available, and + * 2) It will read the pending_send_sz field in the guest->host + * ring buffer and interrupt the guest when it frees enough space + * + * If either the guest or host does not set the feature bit that it + * owns, that guest or host must do polling if it encounters a full + * ring buffer, and not signal the other end with an interrupt. + */ + u32 pending_send_sz; + u32 reserved1[12]; + union { + struct { + u32 feat_pending_send_sz:1; + }; + u32 value; + } feature_bits; + + /* Pad it to PAGE_SIZE so that data starts on page boundary */ + u8 reserved2[4028]; + + /* + * Ring data starts here + RingDataStartOffset + * !!! DO NOT place any fields below this !!! + */ + u8 buffer[0]; +} __packed; + +struct hv_ring_buffer_info { + struct hv_ring_buffer *ring_buffer; + u32 ring_size; /* Include the shared header */ + struct reciprocal_value ring_size_div10_reciprocal; + spinlock_t ring_lock; + + u32 ring_datasize; /* < ring_size */ + u32 priv_read_index; +}; + + +static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi) +{ + u32 read_loc, write_loc, dsize, read; + + dsize = rbi->ring_datasize; + read_loc = rbi->ring_buffer->read_index; + write_loc = READ_ONCE(rbi->ring_buffer->write_index); + + read = write_loc >= read_loc ? (write_loc - read_loc) : + (dsize - read_loc) + write_loc; + + return read; +} + +static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi) +{ + u32 read_loc, write_loc, dsize, write; + + dsize = rbi->ring_datasize; + read_loc = READ_ONCE(rbi->ring_buffer->read_index); + write_loc = rbi->ring_buffer->write_index; + + write = write_loc >= read_loc ? dsize - (write_loc - read_loc) : + read_loc - write_loc; + return write; +} + +static inline u32 hv_get_avail_to_write_percent( + const struct hv_ring_buffer_info *rbi) +{ + u32 avail_write = hv_get_bytes_to_write(rbi); + + return reciprocal_divide( + (avail_write << 3) + (avail_write << 1), + rbi->ring_size_div10_reciprocal); +} + +/* + * VMBUS version is 32 bit entity broken up into + * two 16 bit quantities: major_number. minor_number. + * + * 0 . 13 (Windows Server 2008) + * 1 . 1 (Windows 7) + * 2 . 4 (Windows 8) + * 3 . 0 (Windows 8 R2) + * 4 . 0 (Windows 10) + * 5 . 0 (Newer Windows 10) + */ + +#define VERSION_WS2008 ((0 << 16) | (13)) +#define VERSION_WIN7 ((1 << 16) | (1)) +#define VERSION_WIN8 ((2 << 16) | (4)) +#define VERSION_WIN8_1 ((3 << 16) | (0)) +#define VERSION_WIN10 ((4 << 16) | (0)) +#define VERSION_WIN10_V5 ((5 << 16) | (0)) + +#define VERSION_INVAL -1 + +#define VERSION_CURRENT VERSION_WIN10_V5 + +/* Make maximum size of pipe payload of 16K */ +#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384) + +/* Define PipeMode values. */ +#define VMBUS_PIPE_TYPE_BYTE 0x00000000 +#define VMBUS_PIPE_TYPE_MESSAGE 0x00000004 + +/* The size of the user defined data buffer for non-pipe offers. */ +#define MAX_USER_DEFINED_BYTES 120 + +/* The size of the user defined data buffer for pipe offers. */ +#define MAX_PIPE_USER_DEFINED_BYTES 116 + +/* + * At the center of the Channel Management library is the Channel Offer. This + * struct contains the fundamental information about an offer. + */ +struct vmbus_channel_offer { + uuid_le if_type; + uuid_le if_instance; + + /* + * These two fields are not currently used. + */ + u64 reserved1; + u64 reserved2; + + u16 chn_flags; + u16 mmio_megabytes; /* in bytes * 1024 * 1024 */ + + union { + /* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */ + struct { + unsigned char user_def[MAX_USER_DEFINED_BYTES]; + } std; + + /* + * Pipes: + * The following sructure is an integrated pipe protocol, which + * is implemented on top of standard user-defined data. Pipe + * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own + * use. + */ + struct { + u32 pipe_mode; + unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES]; + } pipe; + } u; + /* + * The sub_channel_index is defined in win8. + */ + u16 sub_channel_index; + u16 reserved3; +} __packed; + +/* Server Flags */ +#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1 +#define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2 +#define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4 +#define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10 +#define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100 +#define VMBUS_CHANNEL_PARENT_OFFER 0x200 +#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400 +#define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000 + +struct vmpacket_descriptor { + u16 type; + u16 offset8; + u16 len8; + u16 flags; + u64 trans_id; +} __packed; + +struct vmpacket_header { + u32 prev_pkt_start_offset; + struct vmpacket_descriptor descriptor; +} __packed; + +struct vmtransfer_page_range { + u32 byte_count; + u32 byte_offset; +} __packed; + +struct vmtransfer_page_packet_header { + struct vmpacket_descriptor d; + u16 xfer_pageset_id; + u8 sender_owns_set; + u8 reserved; + u32 range_cnt; + struct vmtransfer_page_range ranges[1]; +} __packed; + +struct vmgpadl_packet_header { + struct vmpacket_descriptor d; + u32 gpadl; + u32 reserved; +} __packed; + +struct vmadd_remove_transfer_page_set { + struct vmpacket_descriptor d; + u32 gpadl; + u16 xfer_pageset_id; + u16 reserved; +} __packed; + +/* + * This structure defines a range in guest physical space that can be made to + * look virtually contiguous. + */ +struct gpa_range { + u32 byte_count; + u32 byte_offset; + u64 pfn_array[0]; +}; + +/* + * This is the format for an Establish Gpadl packet, which contains a handle by + * which this GPADL will be known and a set of GPA ranges associated with it. + * This can be converted to a MDL by the guest OS. If there are multiple GPA + * ranges, then the resulting MDL will be "chained," representing multiple VA + * ranges. + */ +struct vmestablish_gpadl { + struct vmpacket_descriptor d; + u32 gpadl; + u32 range_cnt; + struct gpa_range range[1]; +} __packed; + +/* + * This is the format for a Teardown Gpadl packet, which indicates that the + * GPADL handle in the Establish Gpadl packet will never be referenced again. + */ +struct vmteardown_gpadl { + struct vmpacket_descriptor d; + u32 gpadl; + u32 reserved; /* for alignment to a 8-byte boundary */ +} __packed; + +/* + * This is the format for a GPA-Direct packet, which contains a set of GPA + * ranges, in addition to commands and/or data. + */ +struct vmdata_gpa_direct { + struct vmpacket_descriptor d; + u32 reserved; + u32 range_cnt; + struct gpa_range range[1]; +} __packed; + +/* This is the format for a Additional Data Packet. */ +struct vmadditional_data { + struct vmpacket_descriptor d; + u64 total_bytes; + u32 offset; + u32 byte_cnt; + unsigned char data[1]; +} __packed; + +union vmpacket_largest_possible_header { + struct vmpacket_descriptor simple_hdr; + struct vmtransfer_page_packet_header xfer_page_hdr; + struct vmgpadl_packet_header gpadl_hdr; + struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr; + struct vmestablish_gpadl establish_gpadl_hdr; + struct vmteardown_gpadl teardown_gpadl_hdr; + struct vmdata_gpa_direct data_gpa_direct_hdr; +}; + +#define VMPACKET_DATA_START_ADDRESS(__packet) \ + (void *)(((unsigned char *)__packet) + \ + ((struct vmpacket_descriptor)__packet)->offset8 * 8) + +#define VMPACKET_DATA_LENGTH(__packet) \ + ((((struct vmpacket_descriptor)__packet)->len8 - \ + ((struct vmpacket_descriptor)__packet)->offset8) * 8) + +#define VMPACKET_TRANSFER_MODE(__packet) \ + (((struct IMPACT)__packet)->type) + +enum vmbus_packet_type { + VM_PKT_INVALID = 0x0, + VM_PKT_SYNCH = 0x1, + VM_PKT_ADD_XFER_PAGESET = 0x2, + VM_PKT_RM_XFER_PAGESET = 0x3, + VM_PKT_ESTABLISH_GPADL = 0x4, + VM_PKT_TEARDOWN_GPADL = 0x5, + VM_PKT_DATA_INBAND = 0x6, + VM_PKT_DATA_USING_XFER_PAGES = 0x7, + VM_PKT_DATA_USING_GPADL = 0x8, + VM_PKT_DATA_USING_GPA_DIRECT = 0x9, + VM_PKT_CANCEL_REQUEST = 0xa, + VM_PKT_COMP = 0xb, + VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc, + VM_PKT_ADDITIONAL_DATA = 0xd +}; + +#define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1 + + +/* Version 1 messages */ +enum vmbus_channel_message_type { + CHANNELMSG_INVALID = 0, + CHANNELMSG_OFFERCHANNEL = 1, + CHANNELMSG_RESCIND_CHANNELOFFER = 2, + CHANNELMSG_REQUESTOFFERS = 3, + CHANNELMSG_ALLOFFERS_DELIVERED = 4, + CHANNELMSG_OPENCHANNEL = 5, + CHANNELMSG_OPENCHANNEL_RESULT = 6, + CHANNELMSG_CLOSECHANNEL = 7, + CHANNELMSG_GPADL_HEADER = 8, + CHANNELMSG_GPADL_BODY = 9, + CHANNELMSG_GPADL_CREATED = 10, + CHANNELMSG_GPADL_TEARDOWN = 11, + CHANNELMSG_GPADL_TORNDOWN = 12, + CHANNELMSG_RELID_RELEASED = 13, + CHANNELMSG_INITIATE_CONTACT = 14, + CHANNELMSG_VERSION_RESPONSE = 15, + CHANNELMSG_UNLOAD = 16, + CHANNELMSG_UNLOAD_RESPONSE = 17, + CHANNELMSG_18 = 18, + CHANNELMSG_19 = 19, + CHANNELMSG_20 = 20, + CHANNELMSG_TL_CONNECT_REQUEST = 21, + CHANNELMSG_22 = 22, + CHANNELMSG_TL_CONNECT_RESULT = 23, + CHANNELMSG_COUNT +}; + +struct vmbus_channel_message_header { + enum vmbus_channel_message_type msgtype; + u32 padding; +} __packed; + +/* Query VMBus Version parameters */ +struct vmbus_channel_query_vmbus_version { + struct vmbus_channel_message_header header; + u32 version; +} __packed; + +/* VMBus Version Supported parameters */ +struct vmbus_channel_version_supported { + struct vmbus_channel_message_header header; + u8 version_supported; +} __packed; + +/* Offer Channel parameters */ +struct vmbus_channel_offer_channel { + struct vmbus_channel_message_header header; + struct vmbus_channel_offer offer; + u32 child_relid; + u8 monitorid; + /* + * win7 and beyond splits this field into a bit field. + */ + u8 monitor_allocated:1; + u8 reserved:7; + /* + * These are new fields added in win7 and later. + * Do not access these fields without checking the + * negotiated protocol. + * + * If "is_dedicated_interrupt" is set, we must not set the + * associated bit in the channel bitmap while sending the + * interrupt to the host. + * + * connection_id is to be used in signaling the host. + */ + u16 is_dedicated_interrupt:1; + u16 reserved1:15; + u32 connection_id; +} __packed; + +/* Rescind Offer parameters */ +struct vmbus_channel_rescind_offer { + struct vmbus_channel_message_header header; + u32 child_relid; +} __packed; + +static inline u32 +hv_ringbuffer_pending_size(const struct hv_ring_buffer_info *rbi) +{ + return rbi->ring_buffer->pending_send_sz; +} + +/* + * Request Offer -- no parameters, SynIC message contains the partition ID + * Set Snoop -- no parameters, SynIC message contains the partition ID + * Clear Snoop -- no parameters, SynIC message contains the partition ID + * All Offers Delivered -- no parameters, SynIC message contains the partition + * ID + * Flush Client -- no parameters, SynIC message contains the partition ID + */ + +/* Open Channel parameters */ +struct vmbus_channel_open_channel { + struct vmbus_channel_message_header header; + + /* Identifies the specific VMBus channel that is being opened. */ + u32 child_relid; + + /* ID making a particular open request at a channel offer unique. */ + u32 openid; + + /* GPADL for the channel's ring buffer. */ + u32 ringbuffer_gpadlhandle; + + /* + * Starting with win8, this field will be used to specify + * the target virtual processor on which to deliver the interrupt for + * the host to guest communication. + * Prior to win8, incoming channel interrupts would only + * be delivered on cpu 0. Setting this value to 0 would + * preserve the earlier behavior. + */ + u32 target_vp; + + /* + * The upstream ring buffer begins at offset zero in the memory + * described by RingBufferGpadlHandle. The downstream ring buffer + * follows it at this offset (in pages). + */ + u32 downstream_ringbuffer_pageoffset; + + /* User-specific data to be passed along to the server endpoint. */ + unsigned char userdata[MAX_USER_DEFINED_BYTES]; +} __packed; + +/* Open Channel Result parameters */ +struct vmbus_channel_open_result { + struct vmbus_channel_message_header header; + u32 child_relid; + u32 openid; + u32 status; +} __packed; + +/* Close channel parameters; */ +struct vmbus_channel_close_channel { + struct vmbus_channel_message_header header; + u32 child_relid; +} __packed; + +/* Channel Message GPADL */ +#define GPADL_TYPE_RING_BUFFER 1 +#define GPADL_TYPE_SERVER_SAVE_AREA 2 +#define GPADL_TYPE_TRANSACTION 8 + +/* + * The number of PFNs in a GPADL message is defined by the number of + * pages that would be spanned by ByteCount and ByteOffset. If the + * implied number of PFNs won't fit in this packet, there will be a + * follow-up packet that contains more. + */ +struct vmbus_channel_gpadl_header { + struct vmbus_channel_message_header header; + u32 child_relid; + u32 gpadl; + u16 range_buflen; + u16 rangecount; + struct gpa_range range[0]; +} __packed; + +/* This is the followup packet that contains more PFNs. */ +struct vmbus_channel_gpadl_body { + struct vmbus_channel_message_header header; + u32 msgnumber; + u32 gpadl; + u64 pfn[0]; +} __packed; + +struct vmbus_channel_gpadl_created { + struct vmbus_channel_message_header header; + u32 child_relid; + u32 gpadl; + u32 creation_status; +} __packed; + +struct vmbus_channel_gpadl_teardown { + struct vmbus_channel_message_header header; + u32 child_relid; + u32 gpadl; +} __packed; + +struct vmbus_channel_gpadl_torndown { + struct vmbus_channel_message_header header; + u32 gpadl; +} __packed; + +struct vmbus_channel_relid_released { + struct vmbus_channel_message_header header; + u32 child_relid; +} __packed; + +struct vmbus_channel_initiate_contact { + struct vmbus_channel_message_header header; + u32 vmbus_version_requested; + u32 target_vcpu; /* The VCPU the host should respond to */ + union { + u64 interrupt_page; + struct { + u8 msg_sint; + u8 padding1[3]; + u32 padding2; + }; + }; + u64 monitor_page1; + u64 monitor_page2; +} __packed; + +/* Hyper-V socket: guest's connect()-ing to host */ +struct vmbus_channel_tl_connect_request { + struct vmbus_channel_message_header header; + uuid_le guest_endpoint_id; + uuid_le host_service_id; +} __packed; + +struct vmbus_channel_version_response { + struct vmbus_channel_message_header header; + u8 version_supported; + + u8 connection_state; + u16 padding; + + /* + * On new hosts that support VMBus protocol 5.0, we must use + * VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message, + * and for subsequent messages, we must use the Message Connection ID + * field in the host-returned Version Response Message. + * + * On old hosts, we should always use VMBUS_MESSAGE_CONNECTION_ID (1). + */ + u32 msg_conn_id; +} __packed; + +enum vmbus_channel_state { + CHANNEL_OFFER_STATE, + CHANNEL_OPENING_STATE, + CHANNEL_OPEN_STATE, + CHANNEL_OPENED_STATE, +}; + +/* + * Represents each channel msg on the vmbus connection This is a + * variable-size data structure depending on the msg type itself + */ +struct vmbus_channel_msginfo { + /* Bookkeeping stuff */ + struct list_head msglistentry; + + /* So far, this is only used to handle gpadl body message */ + struct list_head submsglist; + + /* Synchronize the request/response if needed */ + struct completion waitevent; + struct vmbus_channel *waiting_channel; + union { + struct vmbus_channel_version_supported version_supported; + struct vmbus_channel_open_result open_result; + struct vmbus_channel_gpadl_torndown gpadl_torndown; + struct vmbus_channel_gpadl_created gpadl_created; + struct vmbus_channel_version_response version_response; + } response; + + u32 msgsize; + /* + * The channel message that goes out on the "wire". + * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header + */ + unsigned char msg[0]; +}; + +struct vmbus_close_msg { + struct vmbus_channel_msginfo info; + struct vmbus_channel_close_channel msg; +}; + +/* Define connection identifier type. */ +union hv_connection_id { + u32 asu32; + struct { + u32 id:24; + u32 reserved:8; + } u; +}; + +enum hv_numa_policy { + HV_BALANCED = 0, + HV_LOCALIZED, +}; + +enum vmbus_device_type { + HV_IDE = 0, + HV_SCSI, + HV_FC, + HV_NIC, + HV_ND, + HV_PCIE, + HV_FB, + HV_KBD, + HV_MOUSE, + HV_KVP, + HV_TS, + HV_HB, + HV_SHUTDOWN, + HV_FCOPY, + HV_BACKUP, + HV_DM, + HV_UNKNOWN, +}; + +struct vmbus_device { + u16 dev_type; + uuid_le guid; + bool perf_device; +}; + +struct vmbus_channel { + struct list_head listentry; + + struct hv_device *device_obj; + + enum vmbus_channel_state state; + + struct vmbus_channel_offer_channel offermsg; + /* + * These are based on the OfferMsg.MonitorId. + * Save it here for easy access. + */ + u8 monitor_grp; + u8 monitor_bit; + + bool rescind; /* got rescind msg */ + struct completion rescind_event; + + u32 ringbuffer_gpadlhandle; + + /* Allocated memory for ring buffer */ + struct page *ringbuffer_page; + u32 ringbuffer_pagecount; + struct hv_ring_buffer_info outbound; /* send to parent */ + struct hv_ring_buffer_info inbound; /* receive from parent */ + + struct vmbus_close_msg close_msg; + + /* Statistics */ + u64 interrupts; /* Host to Guest interrupts */ + u64 sig_events; /* Guest to Host events */ + + /* Channel callback's invoked in softirq context */ + struct tasklet_struct callback_event; + void (*onchannel_callback)(void *context); + void *channel_callback_context; + + /* + * A channel can be marked for one of three modes of reading: + * BATCHED - callback called from taslket and should read + * channel until empty. Interrupts from the host + * are masked while read is in process (default). + * DIRECT - callback called from tasklet (softirq). + * ISR - callback called in interrupt context and must + * invoke its own deferred processing. + * Host interrupts are disabled and must be re-enabled + * when ring is empty. + */ + enum hv_callback_mode { + HV_CALL_BATCHED, + HV_CALL_DIRECT, + HV_CALL_ISR + } callback_mode; + + bool is_dedicated_interrupt; + u64 sig_event; + + /* + * Starting with win8, this field will be used to specify + * the target virtual processor on which to deliver the interrupt for + * the host to guest communication. + * Prior to win8, incoming channel interrupts would only + * be delivered on cpu 0. Setting this value to 0 would + * preserve the earlier behavior. + */ + u32 target_vp; + /* The corresponding CPUID in the guest */ + u32 target_cpu; + /* + * State to manage the CPU affiliation of channels. + */ + struct cpumask alloced_cpus_in_node; + int numa_node; + /* + * Support for sub-channels. For high performance devices, + * it will be useful to have multiple sub-channels to support + * a scalable communication infrastructure with the host. + * The support for sub-channels is implemented as an extention + * to the current infrastructure. + * The initial offer is considered the primary channel and this + * offer message will indicate if the host supports sub-channels. + * The guest is free to ask for sub-channels to be offerred and can + * open these sub-channels as a normal "primary" channel. However, + * all sub-channels will have the same type and instance guids as the + * primary channel. Requests sent on a given channel will result in a + * response on the same channel. + */ + + /* + * Sub-channel creation callback. This callback will be called in + * process context when a sub-channel offer is received from the host. + * The guest can open the sub-channel in the context of this callback. + */ + void (*sc_creation_callback)(struct vmbus_channel *new_sc); + + /* + * Channel rescind callback. Some channels (the hvsock ones), need to + * register a callback which is invoked in vmbus_onoffer_rescind(). + */ + void (*chn_rescind_callback)(struct vmbus_channel *channel); + + /* + * The spinlock to protect the structure. It is being used to protect + * test-and-set access to various attributes of the structure as well + * as all sc_list operations. + */ + spinlock_t lock; + /* + * All Sub-channels of a primary channel are linked here. + */ + struct list_head sc_list; + /* + * Current number of sub-channels. + */ + int num_sc; + /* + * Number of a sub-channel (position within sc_list) which is supposed + * to be used as the next outgoing channel. + */ + int next_oc; + /* + * The primary channel this sub-channel belongs to. + * This will be NULL for the primary channel. + */ + struct vmbus_channel *primary_channel; + /* + * Support per-channel state for use by vmbus drivers. + */ + void *per_channel_state; + /* + * To support per-cpu lookup mapping of relid to channel, + * link up channels based on their CPU affinity. + */ + struct list_head percpu_list; + + /* + * Defer freeing channel until after all cpu's have + * gone through grace period. + */ + struct rcu_head rcu; + + /* + * For sysfs per-channel properties. + */ + struct kobject kobj; + + /* + * For performance critical channels (storage, networking + * etc,), Hyper-V has a mechanism to enhance the throughput + * at the expense of latency: + * When the host is to be signaled, we just set a bit in a shared page + * and this bit will be inspected by the hypervisor within a certain + * window and if the bit is set, the host will be signaled. The window + * of time is the monitor latency - currently around 100 usecs. This + * mechanism improves throughput by: + * + * A) Making the host more efficient - each time it wakes up, + * potentially it will process morev number of packets. The + * monitor latency allows a batch to build up. + * B) By deferring the hypercall to signal, we will also minimize + * the interrupts. + * + * Clearly, these optimizations improve throughput at the expense of + * latency. Furthermore, since the channel is shared for both + * control and data messages, control messages currently suffer + * unnecessary latency adversley impacting performance and boot + * time. To fix this issue, permit tagging the channel as being + * in "low latency" mode. In this mode, we will bypass the monitor + * mechanism. + */ + bool low_latency; + + /* + * NUMA distribution policy: + * We support two policies: + * 1) Balanced: Here all performance critical channels are + * distributed evenly amongst all the NUMA nodes. + * This policy will be the default policy. + * 2) Localized: All channels of a given instance of a + * performance critical service will be assigned CPUs + * within a selected NUMA node. + */ + enum hv_numa_policy affinity_policy; + + bool probe_done; + + /* + * We must offload the handling of the primary/sub channels + * from the single-threaded vmbus_connection.work_queue to + * two different workqueue, otherwise we can block + * vmbus_connection.work_queue and hang: see vmbus_process_offer(). + */ + struct work_struct add_channel_work; +}; + +static inline bool is_hvsock_channel(const struct vmbus_channel *c) +{ + return !!(c->offermsg.offer.chn_flags & + VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER); +} + +static inline void set_channel_affinity_state(struct vmbus_channel *c, + enum hv_numa_policy policy) +{ + c->affinity_policy = policy; +} + +static inline void set_channel_read_mode(struct vmbus_channel *c, + enum hv_callback_mode mode) +{ + c->callback_mode = mode; +} + +static inline void set_per_channel_state(struct vmbus_channel *c, void *s) +{ + c->per_channel_state = s; +} + +static inline void *get_per_channel_state(struct vmbus_channel *c) +{ + return c->per_channel_state; +} + +static inline void set_channel_pending_send_size(struct vmbus_channel *c, + u32 size) +{ + c->outbound.ring_buffer->pending_send_sz = size; +} + +static inline void set_low_latency_mode(struct vmbus_channel *c) +{ + c->low_latency = true; +} + +static inline void clear_low_latency_mode(struct vmbus_channel *c) +{ + c->low_latency = false; +} + +void vmbus_onmessage(void *context); + +int vmbus_request_offers(void); + +/* + * APIs for managing sub-channels. + */ + +void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel, + void (*sc_cr_cb)(struct vmbus_channel *new_sc)); + +void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel, + void (*chn_rescind_cb)(struct vmbus_channel *)); + +/* + * Retrieve the (sub) channel on which to send an outgoing request. + * When a primary channel has multiple sub-channels, we choose a + * channel whose VCPU binding is closest to the VCPU on which + * this call is being made. + */ +struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary); + +/* + * Check if sub-channels have already been offerred. This API will be useful + * when the driver is unloaded after establishing sub-channels. In this case, + * when the driver is re-loaded, the driver would have to check if the + * subchannels have already been established before attempting to request + * the creation of sub-channels. + * This function returns TRUE to indicate that subchannels have already been + * created. + * This function should be invoked after setting the callback function for + * sub-channel creation. + */ +bool vmbus_are_subchannels_present(struct vmbus_channel *primary); + +/* The format must be the same as struct vmdata_gpa_direct */ +struct vmbus_channel_packet_page_buffer { + u16 type; + u16 dataoffset8; + u16 length8; + u16 flags; + u64 transactionid; + u32 reserved; + u32 rangecount; + struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT]; +} __packed; + +/* The format must be the same as struct vmdata_gpa_direct */ +struct vmbus_channel_packet_multipage_buffer { + u16 type; + u16 dataoffset8; + u16 length8; + u16 flags; + u64 transactionid; + u32 reserved; + u32 rangecount; /* Always 1 in this case */ + struct hv_multipage_buffer range; +} __packed; + +/* The format must be the same as struct vmdata_gpa_direct */ +struct vmbus_packet_mpb_array { + u16 type; + u16 dataoffset8; + u16 length8; + u16 flags; + u64 transactionid; + u32 reserved; + u32 rangecount; /* Always 1 in this case */ + struct hv_mpb_array range; +} __packed; + + +extern int vmbus_open(struct vmbus_channel *channel, + u32 send_ringbuffersize, + u32 recv_ringbuffersize, + void *userdata, + u32 userdatalen, + void (*onchannel_callback)(void *context), + void *context); + +extern void vmbus_close(struct vmbus_channel *channel); + +extern int vmbus_sendpacket(struct vmbus_channel *channel, + void *buffer, + u32 bufferLen, + u64 requestid, + enum vmbus_packet_type type, + u32 flags); + +extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, + struct hv_page_buffer pagebuffers[], + u32 pagecount, + void *buffer, + u32 bufferlen, + u64 requestid); + +extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, + struct vmbus_packet_mpb_array *mpb, + u32 desc_size, + void *buffer, + u32 bufferlen, + u64 requestid); + +extern int vmbus_establish_gpadl(struct vmbus_channel *channel, + void *kbuffer, + u32 size, + u32 *gpadl_handle); + +extern int vmbus_teardown_gpadl(struct vmbus_channel *channel, + u32 gpadl_handle); + +void vmbus_reset_channel_cb(struct vmbus_channel *channel); + +extern int vmbus_recvpacket(struct vmbus_channel *channel, + void *buffer, + u32 bufferlen, + u32 *buffer_actual_len, + u64 *requestid); + +extern int vmbus_recvpacket_raw(struct vmbus_channel *channel, + void *buffer, + u32 bufferlen, + u32 *buffer_actual_len, + u64 *requestid); + + +extern void vmbus_ontimer(unsigned long data); + +/* Base driver object */ +struct hv_driver { + const char *name; + + /* + * A hvsock offer, which has a VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER + * channel flag, actually doesn't mean a synthetic device because the + * offer's if_type/if_instance can change for every new hvsock + * connection. + * + * However, to facilitate the notification of new-offer/rescind-offer + * from vmbus driver to hvsock driver, we can handle hvsock offer as + * a special vmbus device, and hence we need the below flag to + * indicate if the driver is the hvsock driver or not: we need to + * specially treat the hvosck offer & driver in vmbus_match(). + */ + bool hvsock; + + /* the device type supported by this driver */ + uuid_le dev_type; + const struct hv_vmbus_device_id *id_table; + + struct device_driver driver; + + /* dynamic device GUID's */ + struct { + spinlock_t lock; + struct list_head list; + } dynids; + + int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *); + int (*remove)(struct hv_device *); + void (*shutdown)(struct hv_device *); + +}; + +/* Base device object */ +struct hv_device { + /* the device type id of this device */ + uuid_le dev_type; + + /* the device instance id of this device */ + uuid_le dev_instance; + u16 vendor_id; + u16 device_id; + + struct device device; + + struct vmbus_channel *channel; + struct kset *channels_kset; +}; + + +static inline struct hv_device *device_to_hv_device(struct device *d) +{ + return container_of(d, struct hv_device, device); +} + +static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d) +{ + return container_of(d, struct hv_driver, driver); +} + +static inline void hv_set_drvdata(struct hv_device *dev, void *data) +{ + dev_set_drvdata(&dev->device, data); +} + +static inline void *hv_get_drvdata(struct hv_device *dev) +{ + return dev_get_drvdata(&dev->device); +} + +struct hv_ring_buffer_debug_info { + u32 current_interrupt_mask; + u32 current_read_index; + u32 current_write_index; + u32 bytes_avail_toread; + u32 bytes_avail_towrite; +}; + + +int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, + struct hv_ring_buffer_debug_info *debug_info); + +/* Vmbus interface */ +#define vmbus_driver_register(driver) \ + __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) +int __must_check __vmbus_driver_register(struct hv_driver *hv_driver, + struct module *owner, + const char *mod_name); +void vmbus_driver_unregister(struct hv_driver *hv_driver); + +void vmbus_hvsock_device_unregister(struct vmbus_channel *channel); + +int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, + resource_size_t min, resource_size_t max, + resource_size_t size, resource_size_t align, + bool fb_overlap_ok); +void vmbus_free_mmio(resource_size_t start, resource_size_t size); + +/* + * GUID definitions of various offer types - services offered to the guest. + */ + +/* + * Network GUID + * {f8615163-df3e-46c5-913f-f2d2f965ed0e} + */ +#define HV_NIC_GUID \ + .guid = UUID_LE(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \ + 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e) + +/* + * IDE GUID + * {32412632-86cb-44a2-9b5c-50d1417354f5} + */ +#define HV_IDE_GUID \ + .guid = UUID_LE(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \ + 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5) + +/* + * SCSI GUID + * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} + */ +#define HV_SCSI_GUID \ + .guid = UUID_LE(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \ + 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f) + +/* + * Shutdown GUID + * {0e0b6031-5213-4934-818b-38d90ced39db} + */ +#define HV_SHUTDOWN_GUID \ + .guid = UUID_LE(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \ + 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb) + +/* + * Time Synch GUID + * {9527E630-D0AE-497b-ADCE-E80AB0175CAF} + */ +#define HV_TS_GUID \ + .guid = UUID_LE(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \ + 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf) + +/* + * Heartbeat GUID + * {57164f39-9115-4e78-ab55-382f3bd5422d} + */ +#define HV_HEART_BEAT_GUID \ + .guid = UUID_LE(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \ + 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d) + +/* + * KVP GUID + * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6} + */ +#define HV_KVP_GUID \ + .guid = UUID_LE(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \ + 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6) + +/* + * Dynamic memory GUID + * {525074dc-8985-46e2-8057-a307dc18a502} + */ +#define HV_DM_GUID \ + .guid = UUID_LE(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \ + 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02) + +/* + * Mouse GUID + * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a} + */ +#define HV_MOUSE_GUID \ + .guid = UUID_LE(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \ + 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a) + +/* + * Keyboard GUID + * {f912ad6d-2b17-48ea-bd65-f927a61c7684} + */ +#define HV_KBD_GUID \ + .guid = UUID_LE(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \ + 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84) + +/* + * VSS (Backup/Restore) GUID + */ +#define HV_VSS_GUID \ + .guid = UUID_LE(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \ + 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40) +/* + * Synthetic Video GUID + * {DA0A7802-E377-4aac-8E77-0558EB1073F8} + */ +#define HV_SYNTHVID_GUID \ + .guid = UUID_LE(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \ + 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8) + +/* + * Synthetic FC GUID + * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda} + */ +#define HV_SYNTHFC_GUID \ + .guid = UUID_LE(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \ + 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda) + +/* + * Guest File Copy Service + * {34D14BE3-DEE4-41c8-9AE7-6B174977C192} + */ + +#define HV_FCOPY_GUID \ + .guid = UUID_LE(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \ + 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92) + +/* + * NetworkDirect. This is the guest RDMA service. + * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501} + */ +#define HV_ND_GUID \ + .guid = UUID_LE(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \ + 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01) + +/* + * PCI Express Pass Through + * {44C4F61D-4444-4400-9D52-802E27EDE19F} + */ + +#define HV_PCIE_GUID \ + .guid = UUID_LE(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \ + 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f) + +/* + * Linux doesn't support the 3 devices: the first two are for + * Automatic Virtual Machine Activation, and the third is for + * Remote Desktop Virtualization. + * {f8e65716-3cb3-4a06-9a60-1889c5cccab5} + * {3375baf4-9e15-4b30-b765-67acb10d607b} + * {276aacf4-ac15-426c-98dd-7521ad3f01fe} + */ + +#define HV_AVMA1_GUID \ + .guid = UUID_LE(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \ + 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5) + +#define HV_AVMA2_GUID \ + .guid = UUID_LE(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \ + 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b) + +#define HV_RDV_GUID \ + .guid = UUID_LE(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \ + 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe) + +/* + * Common header for Hyper-V ICs + */ + +#define ICMSGTYPE_NEGOTIATE 0 +#define ICMSGTYPE_HEARTBEAT 1 +#define ICMSGTYPE_KVPEXCHANGE 2 +#define ICMSGTYPE_SHUTDOWN 3 +#define ICMSGTYPE_TIMESYNC 4 +#define ICMSGTYPE_VSS 5 + +#define ICMSGHDRFLAG_TRANSACTION 1 +#define ICMSGHDRFLAG_REQUEST 2 +#define ICMSGHDRFLAG_RESPONSE 4 + + +/* + * While we want to handle util services as regular devices, + * there is only one instance of each of these services; so + * we statically allocate the service specific state. + */ + +struct hv_util_service { + u8 *recv_buffer; + void *channel; + void (*util_cb)(void *); + int (*util_init)(struct hv_util_service *); + void (*util_deinit)(void); +}; + +struct vmbuspipe_hdr { + u32 flags; + u32 msgsize; +} __packed; + +struct ic_version { + u16 major; + u16 minor; +} __packed; + +struct icmsg_hdr { + struct ic_version icverframe; + u16 icmsgtype; + struct ic_version icvermsg; + u16 icmsgsize; + u32 status; + u8 ictransaction_id; + u8 icflags; + u8 reserved[2]; +} __packed; + +struct icmsg_negotiate { + u16 icframe_vercnt; + u16 icmsg_vercnt; + u32 reserved; + struct ic_version icversion_data[1]; /* any size array */ +} __packed; + +struct shutdown_msg_data { + u32 reason_code; + u32 timeout_seconds; + u32 flags; + u8 display_message[2048]; +} __packed; + +struct heartbeat_msg_data { + u64 seq_num; + u32 reserved[8]; +} __packed; + +/* Time Sync IC defs */ +#define ICTIMESYNCFLAG_PROBE 0 +#define ICTIMESYNCFLAG_SYNC 1 +#define ICTIMESYNCFLAG_SAMPLE 2 + +#ifdef __x86_64__ +#define WLTIMEDELTA 116444736000000000L /* in 100ns unit */ +#else +#define WLTIMEDELTA 116444736000000000LL +#endif + +struct ictimesync_data { + u64 parenttime; + u64 childtime; + u64 roundtriptime; + u8 flags; +} __packed; + +struct ictimesync_ref_data { + u64 parenttime; + u64 vmreferencetime; + u8 flags; + char leapflags; + char stratum; + u8 reserved[3]; +} __packed; + +struct hyperv_service_callback { + u8 msg_type; + char *log_msg; + uuid_le data; + struct vmbus_channel *channel; + void (*callback)(void *context); +}; + +#define MAX_SRV_VER 0x7ffffff +extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, + const int *fw_version, int fw_vercnt, + const int *srv_version, int srv_vercnt, + int *nego_fw_version, int *nego_srv_version); + +void hv_process_channel_removal(u32 relid); + +void vmbus_setevent(struct vmbus_channel *channel); +/* + * Negotiated version with the Host. + */ + +extern __u32 vmbus_proto_version; + +int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id, + const uuid_le *shv_host_servie_id); +void vmbus_set_event(struct vmbus_channel *channel); + +/* Get the start of the ring buffer. */ +static inline void * +hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info) +{ + return ring_info->ring_buffer->buffer; +} + +/* + * Mask off host interrupt callback notifications + */ +static inline void hv_begin_read(struct hv_ring_buffer_info *rbi) +{ + rbi->ring_buffer->interrupt_mask = 1; + + /* make sure mask update is not reordered */ + virt_mb(); +} + +/* + * Re-enable host callback and return number of outstanding bytes + */ +static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi) +{ + + rbi->ring_buffer->interrupt_mask = 0; + + /* make sure mask update is not reordered */ + virt_mb(); + + /* + * Now check to see if the ring buffer is still empty. + * If it is not, we raced and we need to process new + * incoming messages. + */ + return hv_get_bytes_to_read(rbi); +} + +/* + * An API to support in-place processing of incoming VMBUS packets. + */ + +/* Get data payload associated with descriptor */ +static inline void *hv_pkt_data(const struct vmpacket_descriptor *desc) +{ + return (void *)((unsigned long)desc + (desc->offset8 << 3)); +} + +/* Get data size associated with descriptor */ +static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc) +{ + return (desc->len8 << 3) - (desc->offset8 << 3); +} + + +struct vmpacket_descriptor * +hv_pkt_iter_first(struct vmbus_channel *channel); + +struct vmpacket_descriptor * +__hv_pkt_iter_next(struct vmbus_channel *channel, + const struct vmpacket_descriptor *pkt); + +void hv_pkt_iter_close(struct vmbus_channel *channel); + +/* + * Get next packet descriptor from iterator + * If at end of list, return NULL and update host. + */ +static inline struct vmpacket_descriptor * +hv_pkt_iter_next(struct vmbus_channel *channel, + const struct vmpacket_descriptor *pkt) +{ + struct vmpacket_descriptor *nxt; + + nxt = __hv_pkt_iter_next(channel, pkt); + if (!nxt) + hv_pkt_iter_close(channel); + + return nxt; +} + +#define foreach_vmbus_pkt(pkt, channel) \ + for (pkt = hv_pkt_iter_first(channel); pkt; \ + pkt = hv_pkt_iter_next(channel, pkt)) + +#endif /* _HYPERV_H */ diff --git a/include/linux/hypervisor.h b/include/linux/hypervisor.h new file mode 100644 index 000000000..fc08b433c --- /dev/null +++ b/include/linux/hypervisor.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_HYPEVISOR_H +#define __LINUX_HYPEVISOR_H + +/* + * Generic Hypervisor support + * Juergen Gross + */ + +#ifdef CONFIG_X86 + +#include +#include + +static inline void hypervisor_pin_vcpu(int cpu) +{ + x86_platform.hyper.pin_vcpu(cpu); +} + +#else /* !CONFIG_X86 */ + +#include + +static inline void hypervisor_pin_vcpu(int cpu) +{ +} + +static inline bool jailhouse_paravirt(void) +{ + return of_find_compatible_node(NULL, NULL, "jailhouse,cell"); +} + +#endif /* !CONFIG_X86 */ + +#endif /* __LINUX_HYPEVISOR_H */ diff --git a/include/linux/i2c-algo-bit.h b/include/linux/i2c-algo-bit.h new file mode 100644 index 000000000..63904ba68 --- /dev/null +++ b/include/linux/i2c-algo-bit.h @@ -0,0 +1,55 @@ +/* ------------------------------------------------------------------------- */ +/* i2c-algo-bit.h i2c driver algorithms for bit-shift adapters */ +/* ------------------------------------------------------------------------- */ +/* Copyright (C) 1995-99 Simon G. Vogl + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + MA 02110-1301 USA. */ +/* ------------------------------------------------------------------------- */ + +/* With some changes from Kyösti Mälkki and even + Frodo Looijaard */ + +#ifndef _LINUX_I2C_ALGO_BIT_H +#define _LINUX_I2C_ALGO_BIT_H + +/* --- Defines for bit-adapters --------------------------------------- */ +/* + * This struct contains the hw-dependent functions of bit-style adapters to + * manipulate the line states, and to init any hw-specific features. This is + * only used if you have more than one hw-type of adapter running. + */ +struct i2c_algo_bit_data { + void *data; /* private data for lowlevel routines */ + void (*setsda) (void *data, int state); + void (*setscl) (void *data, int state); + int (*getsda) (void *data); + int (*getscl) (void *data); + int (*pre_xfer) (struct i2c_adapter *); + void (*post_xfer) (struct i2c_adapter *); + + /* local settings */ + int udelay; /* half clock cycle time in us, + minimum 2 us for fast-mode I2C, + minimum 5 us for standard-mode I2C and SMBus, + maximum 50 us for SMBus */ + int timeout; /* in jiffies */ +}; + +int i2c_bit_add_bus(struct i2c_adapter *); +int i2c_bit_add_numbered_bus(struct i2c_adapter *); +extern const struct i2c_algorithm i2c_bit_algo; + +#endif /* _LINUX_I2C_ALGO_BIT_H */ diff --git a/include/linux/i2c-algo-pca.h b/include/linux/i2c-algo-pca.h new file mode 100644 index 000000000..7c522fdd9 --- /dev/null +++ b/include/linux/i2c-algo-pca.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_I2C_ALGO_PCA_H +#define _LINUX_I2C_ALGO_PCA_H + +/* Chips known to the pca algo */ +#define I2C_PCA_CHIP_9564 0x00 +#define I2C_PCA_CHIP_9665 0x01 + +/* Internal period for PCA9665 oscilator */ +#define I2C_PCA_OSC_PER 3 /* e10-8s */ + +/* Clock speeds for the bus for PCA9564*/ +#define I2C_PCA_CON_330kHz 0x00 +#define I2C_PCA_CON_288kHz 0x01 +#define I2C_PCA_CON_217kHz 0x02 +#define I2C_PCA_CON_146kHz 0x03 +#define I2C_PCA_CON_88kHz 0x04 +#define I2C_PCA_CON_59kHz 0x05 +#define I2C_PCA_CON_44kHz 0x06 +#define I2C_PCA_CON_36kHz 0x07 + +/* PCA9564 registers */ +#define I2C_PCA_STA 0x00 /* STATUS Read Only */ +#define I2C_PCA_TO 0x00 /* TIMEOUT Write Only */ +#define I2C_PCA_DAT 0x01 /* DATA Read/Write */ +#define I2C_PCA_ADR 0x02 /* OWN ADR Read/Write */ +#define I2C_PCA_CON 0x03 /* CONTROL Read/Write */ + +/* PCA9665 registers */ +#define I2C_PCA_INDPTR 0x00 /* INDIRECT Pointer Write Only */ +#define I2C_PCA_IND 0x02 /* INDIRECT Read/Write */ + +/* PCA9665 indirect registers */ +#define I2C_PCA_ICOUNT 0x00 /* Byte Count for buffered mode */ +#define I2C_PCA_IADR 0x01 /* OWN ADR */ +#define I2C_PCA_ISCLL 0x02 /* SCL LOW period */ +#define I2C_PCA_ISCLH 0x03 /* SCL HIGH period */ +#define I2C_PCA_ITO 0x04 /* TIMEOUT */ +#define I2C_PCA_IPRESET 0x05 /* Parallel bus reset */ +#define I2C_PCA_IMODE 0x06 /* I2C Bus mode */ + +/* PCA9665 I2C bus mode */ +#define I2C_PCA_MODE_STD 0x00 /* Standard mode */ +#define I2C_PCA_MODE_FAST 0x01 /* Fast mode */ +#define I2C_PCA_MODE_FASTP 0x02 /* Fast Plus mode */ +#define I2C_PCA_MODE_TURBO 0x03 /* Turbo mode */ + + +#define I2C_PCA_CON_AA 0x80 /* Assert Acknowledge */ +#define I2C_PCA_CON_ENSIO 0x40 /* Enable */ +#define I2C_PCA_CON_STA 0x20 /* Start */ +#define I2C_PCA_CON_STO 0x10 /* Stop */ +#define I2C_PCA_CON_SI 0x08 /* Serial Interrupt */ +#define I2C_PCA_CON_CR 0x07 /* Clock Rate (MASK) */ + +/** + * struct pca_i2c_bus_settings - The configured PCA i2c bus settings + * @mode: Configured i2c bus mode + * @tlow: Configured SCL LOW period + * @thi: Configured SCL HIGH period + * @clock_freq: The configured clock frequency + */ +struct pca_i2c_bus_settings { + int mode; + int tlow; + int thi; + int clock_freq; +}; + +struct i2c_algo_pca_data { + void *data; /* private low level data */ + void (*write_byte) (void *data, int reg, int val); + int (*read_byte) (void *data, int reg); + int (*wait_for_completion) (void *data); + void (*reset_chip) (void *data); + /* For PCA9564, use one of the predefined frequencies: + * 330000, 288000, 217000, 146000, 88000, 59000, 44000, 36000 + * For PCA9665, use the frequency you want here. */ + unsigned int i2c_clock; + unsigned int chip; + struct pca_i2c_bus_settings bus_settings; +}; + +int i2c_pca_add_bus(struct i2c_adapter *); +int i2c_pca_add_numbered_bus(struct i2c_adapter *); + +#endif /* _LINUX_I2C_ALGO_PCA_H */ diff --git a/include/linux/i2c-algo-pcf.h b/include/linux/i2c-algo-pcf.h new file mode 100644 index 000000000..538e8f41a --- /dev/null +++ b/include/linux/i2c-algo-pcf.h @@ -0,0 +1,49 @@ +/* ------------------------------------------------------------------------- */ +/* adap-pcf.h i2c driver algorithms for PCF8584 adapters */ +/* ------------------------------------------------------------------------- */ +/* Copyright (C) 1995-97 Simon G. Vogl + 1998-99 Hans Berglund + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + MA 02110-1301 USA. */ +/* ------------------------------------------------------------------------- */ + +/* With some changes from Kyösti Mälkki and even + Frodo Looijaard */ + +#ifndef _LINUX_I2C_ALGO_PCF_H +#define _LINUX_I2C_ALGO_PCF_H + +struct i2c_algo_pcf_data { + void *data; /* private data for lolevel routines */ + void (*setpcf) (void *data, int ctl, int val); + int (*getpcf) (void *data, int ctl); + int (*getown) (void *data); + int (*getclock) (void *data); + void (*waitforpin) (void *data); + + void (*xfer_begin) (void *data); + void (*xfer_end) (void *data); + + /* Multi-master lost arbitration back-off delay (msecs) + * This should be set by the bus adapter or knowledgable client + * if bus is multi-mastered, else zero + */ + unsigned long lab_mdelay; +}; + +int i2c_pcf_add_bus(struct i2c_adapter *); + +#endif /* _LINUX_I2C_ALGO_PCF_H */ diff --git a/include/linux/i2c-dev.h b/include/linux/i2c-dev.h new file mode 100644 index 000000000..79727144c --- /dev/null +++ b/include/linux/i2c-dev.h @@ -0,0 +1,28 @@ +/* + i2c-dev.h - i2c-bus driver, char device interface + + Copyright (C) 1995-97 Simon G. Vogl + Copyright (C) 1998-99 Frodo Looijaard + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + MA 02110-1301 USA. +*/ +#ifndef _LINUX_I2C_DEV_H +#define _LINUX_I2C_DEV_H + +#include + +#define I2C_MAJOR 89 /* Device major number */ +#endif /* _LINUX_I2C_DEV_H */ diff --git a/include/linux/i2c-mux.h b/include/linux/i2c-mux.h new file mode 100644 index 000000000..bd74d5706 --- /dev/null +++ b/include/linux/i2c-mux.h @@ -0,0 +1,80 @@ +/* + * + * i2c-mux.h - functions for the i2c-bus mux support + * + * Copyright (c) 2008-2009 Rodolfo Giometti + * Copyright (c) 2008-2009 Eurotech S.p.A. + * Michael Lawnick + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + * MA 02110-1301 USA. + */ + +#ifndef _LINUX_I2C_MUX_H +#define _LINUX_I2C_MUX_H + +#ifdef __KERNEL__ + +#include + +struct i2c_mux_core { + struct i2c_adapter *parent; + struct device *dev; + unsigned int mux_locked:1; + unsigned int arbitrator:1; + unsigned int gate:1; + + void *priv; + + int (*select)(struct i2c_mux_core *, u32 chan_id); + int (*deselect)(struct i2c_mux_core *, u32 chan_id); + + int num_adapters; + int max_adapters; + struct i2c_adapter *adapter[0]; +}; + +struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent, + struct device *dev, int max_adapters, + int sizeof_priv, u32 flags, + int (*select)(struct i2c_mux_core *, u32), + int (*deselect)(struct i2c_mux_core *, u32)); + +/* flags for i2c_mux_alloc */ +#define I2C_MUX_LOCKED BIT(0) +#define I2C_MUX_ARBITRATOR BIT(1) +#define I2C_MUX_GATE BIT(2) + +static inline void *i2c_mux_priv(struct i2c_mux_core *muxc) +{ + return muxc->priv; +} + +struct i2c_adapter *i2c_root_adapter(struct device *dev); + +/* + * Called to create an i2c bus on a multiplexed bus segment. + * The chan_id parameter is passed to the select and deselect + * callback functions to perform hardware-specific mux control. + */ +int i2c_mux_add_adapter(struct i2c_mux_core *muxc, + u32 force_nr, u32 chan_id, + unsigned int class); + +void i2c_mux_del_adapters(struct i2c_mux_core *muxc); + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_I2C_MUX_H */ diff --git a/include/linux/i2c-pxa.h b/include/linux/i2c-pxa.h new file mode 100644 index 000000000..a897e2b50 --- /dev/null +++ b/include/linux/i2c-pxa.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_I2C_ALGO_PXA_H +#define _LINUX_I2C_ALGO_PXA_H + +typedef enum i2c_slave_event_e { + I2C_SLAVE_EVENT_START_READ, + I2C_SLAVE_EVENT_START_WRITE, + I2C_SLAVE_EVENT_STOP +} i2c_slave_event_t; + +struct i2c_slave_client { + void *data; + void (*event)(void *ptr, i2c_slave_event_t event); + int (*read) (void *ptr); + void (*write)(void *ptr, unsigned int val); +}; + +#endif /* _LINUX_I2C_ALGO_PXA_H */ diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h new file mode 100644 index 000000000..fb0e040b1 --- /dev/null +++ b/include/linux/i2c-smbus.h @@ -0,0 +1,61 @@ +/* + * i2c-smbus.h - SMBus extensions to the I2C protocol + * + * Copyright (C) 2010 Jean Delvare + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + * MA 02110-1301 USA. + */ + +#ifndef _LINUX_I2C_SMBUS_H +#define _LINUX_I2C_SMBUS_H + +#include +#include +#include + + +/** + * i2c_smbus_alert_setup - platform data for the smbus_alert i2c client + * @alert_edge_triggered: whether the alert interrupt is edge (1) or level (0) + * triggered + * @irq: IRQ number, if the smbus_alert driver should take care of interrupt + * handling + * + * If irq is not specified, the smbus_alert driver doesn't take care of + * interrupt handling. In that case it is up to the I2C bus driver to either + * handle the interrupts or to poll for alerts. + * + * If irq is specified then it it crucial that alert_edge_triggered is + * properly set. + */ +struct i2c_smbus_alert_setup { + int irq; +}; + +struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter, + struct i2c_smbus_alert_setup *setup); +int i2c_handle_smbus_alert(struct i2c_client *ara); + +#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_OF) +int of_i2c_setup_smbus_alert(struct i2c_adapter *adap); +#else +static inline int of_i2c_setup_smbus_alert(struct i2c_adapter *adap) +{ + return 0; +} +#endif + +#endif /* _LINUX_I2C_SMBUS_H */ diff --git a/include/linux/i2c.h b/include/linux/i2c.h new file mode 100644 index 000000000..6fda04587 --- /dev/null +++ b/include/linux/i2c.h @@ -0,0 +1,955 @@ +/* ------------------------------------------------------------------------- */ +/* */ +/* i2c.h - definitions for the i2c-bus interface */ +/* */ +/* ------------------------------------------------------------------------- */ +/* Copyright (C) 1995-2000 Simon G. Vogl + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + MA 02110-1301 USA. */ +/* ------------------------------------------------------------------------- */ + +/* With some changes from Kyösti Mälkki and + Frodo Looijaard */ +#ifndef _LINUX_I2C_H +#define _LINUX_I2C_H + +#include +#include /* for struct device */ +#include /* for completion */ +#include +#include +#include /* for Host Notify IRQ */ +#include /* for struct device_node */ +#include /* for swab16 */ +#include + +extern struct bus_type i2c_bus_type; +extern struct device_type i2c_adapter_type; +extern struct device_type i2c_client_type; + +/* --- General options ------------------------------------------------ */ + +struct i2c_msg; +struct i2c_algorithm; +struct i2c_adapter; +struct i2c_client; +struct i2c_driver; +struct i2c_device_identity; +union i2c_smbus_data; +struct i2c_board_info; +enum i2c_slave_event; +typedef int (*i2c_slave_cb_t)(struct i2c_client *, enum i2c_slave_event, u8 *); + +struct module; +struct property_entry; + +#if IS_ENABLED(CONFIG_I2C) +/* + * The master routines are the ones normally used to transmit data to devices + * on a bus (or read from them). Apart from two basic transfer functions to + * transmit one message at a time, a more complex version can be used to + * transmit an arbitrary number of messages without interruption. + * @count must be be less than 64k since msg.len is u16. + */ +extern int i2c_transfer_buffer_flags(const struct i2c_client *client, + char *buf, int count, u16 flags); + +/** + * i2c_master_recv - issue a single I2C message in master receive mode + * @client: Handle to slave device + * @buf: Where to store data read from slave + * @count: How many bytes to read, must be less than 64k since msg.len is u16 + * + * Returns negative errno, or else the number of bytes read. + */ +static inline int i2c_master_recv(const struct i2c_client *client, + char *buf, int count) +{ + return i2c_transfer_buffer_flags(client, buf, count, I2C_M_RD); +}; + +/** + * i2c_master_recv_dmasafe - issue a single I2C message in master receive mode + * using a DMA safe buffer + * @client: Handle to slave device + * @buf: Where to store data read from slave, must be safe to use with DMA + * @count: How many bytes to read, must be less than 64k since msg.len is u16 + * + * Returns negative errno, or else the number of bytes read. + */ +static inline int i2c_master_recv_dmasafe(const struct i2c_client *client, + char *buf, int count) +{ + return i2c_transfer_buffer_flags(client, buf, count, + I2C_M_RD | I2C_M_DMA_SAFE); +}; + +/** + * i2c_master_send - issue a single I2C message in master transmit mode + * @client: Handle to slave device + * @buf: Data that will be written to the slave + * @count: How many bytes to write, must be less than 64k since msg.len is u16 + * + * Returns negative errno, or else the number of bytes written. + */ +static inline int i2c_master_send(const struct i2c_client *client, + const char *buf, int count) +{ + return i2c_transfer_buffer_flags(client, (char *)buf, count, 0); +}; + +/** + * i2c_master_send_dmasafe - issue a single I2C message in master transmit mode + * using a DMA safe buffer + * @client: Handle to slave device + * @buf: Data that will be written to the slave, must be safe to use with DMA + * @count: How many bytes to write, must be less than 64k since msg.len is u16 + * + * Returns negative errno, or else the number of bytes written. + */ +static inline int i2c_master_send_dmasafe(const struct i2c_client *client, + const char *buf, int count) +{ + return i2c_transfer_buffer_flags(client, (char *)buf, count, + I2C_M_DMA_SAFE); +}; + +/* Transfer num messages. + */ +extern int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, + int num); +/* Unlocked flavor */ +extern int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, + int num); + +/* This is the very generalized SMBus access routine. You probably do not + want to use this, though; one of the functions below may be much easier, + and probably just as fast. + Note that we use i2c_adapter here, because you do not need a specific + smbus adapter to call this function. */ +s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, + unsigned short flags, char read_write, u8 command, + int protocol, union i2c_smbus_data *data); + +/* Unlocked flavor */ +s32 __i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, + unsigned short flags, char read_write, u8 command, + int protocol, union i2c_smbus_data *data); + +/* Now follow the 'nice' access routines. These also document the calling + conventions of i2c_smbus_xfer. */ + +extern s32 i2c_smbus_read_byte(const struct i2c_client *client); +extern s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value); +extern s32 i2c_smbus_read_byte_data(const struct i2c_client *client, + u8 command); +extern s32 i2c_smbus_write_byte_data(const struct i2c_client *client, + u8 command, u8 value); +extern s32 i2c_smbus_read_word_data(const struct i2c_client *client, + u8 command); +extern s32 i2c_smbus_write_word_data(const struct i2c_client *client, + u8 command, u16 value); + +static inline s32 +i2c_smbus_read_word_swapped(const struct i2c_client *client, u8 command) +{ + s32 value = i2c_smbus_read_word_data(client, command); + + return (value < 0) ? value : swab16(value); +} + +static inline s32 +i2c_smbus_write_word_swapped(const struct i2c_client *client, + u8 command, u16 value) +{ + return i2c_smbus_write_word_data(client, command, swab16(value)); +} + +/* Returns the number of read bytes */ +extern s32 i2c_smbus_read_block_data(const struct i2c_client *client, + u8 command, u8 *values); +extern s32 i2c_smbus_write_block_data(const struct i2c_client *client, + u8 command, u8 length, const u8 *values); +/* Returns the number of read bytes */ +extern s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client, + u8 command, u8 length, u8 *values); +extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client, + u8 command, u8 length, + const u8 *values); +extern s32 +i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client, + u8 command, u8 length, u8 *values); +int i2c_get_device_id(const struct i2c_client *client, + struct i2c_device_identity *id); +#endif /* I2C */ + +/** + * struct i2c_device_identity - i2c client device identification + * @manufacturer_id: 0 - 4095, database maintained by NXP + * @part_id: 0 - 511, according to manufacturer + * @die_revision: 0 - 7, according to manufacturer + */ +struct i2c_device_identity { + u16 manufacturer_id; +#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS 0 +#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS_1 1 +#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS_2 2 +#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS_3 3 +#define I2C_DEVICE_ID_RAMTRON_INTERNATIONAL 4 +#define I2C_DEVICE_ID_ANALOG_DEVICES 5 +#define I2C_DEVICE_ID_STMICROELECTRONICS 6 +#define I2C_DEVICE_ID_ON_SEMICONDUCTOR 7 +#define I2C_DEVICE_ID_SPRINTEK_CORPORATION 8 +#define I2C_DEVICE_ID_ESPROS_PHOTONICS_AG 9 +#define I2C_DEVICE_ID_FUJITSU_SEMICONDUCTOR 10 +#define I2C_DEVICE_ID_FLIR 11 +#define I2C_DEVICE_ID_O2MICRO 12 +#define I2C_DEVICE_ID_ATMEL 13 +#define I2C_DEVICE_ID_NONE 0xffff + u16 part_id; + u8 die_revision; +}; + +enum i2c_alert_protocol { + I2C_PROTOCOL_SMBUS_ALERT, + I2C_PROTOCOL_SMBUS_HOST_NOTIFY, +}; + +/** + * struct i2c_driver - represent an I2C device driver + * @class: What kind of i2c device we instantiate (for detect) + * @probe: Callback for device binding - soon to be deprecated + * @probe_new: New callback for device binding + * @remove: Callback for device unbinding + * @shutdown: Callback for device shutdown + * @alert: Alert callback, for example for the SMBus alert protocol + * @command: Callback for bus-wide signaling (optional) + * @driver: Device driver model driver + * @id_table: List of I2C devices supported by this driver + * @detect: Callback for device detection + * @address_list: The I2C addresses to probe (for detect) + * @clients: List of detected clients we created (for i2c-core use only) + * @disable_i2c_core_irq_mapping: Tell the i2c-core to not do irq-mapping + * + * The driver.owner field should be set to the module owner of this driver. + * The driver.name field should be set to the name of this driver. + * + * For automatic device detection, both @detect and @address_list must + * be defined. @class should also be set, otherwise only devices forced + * with module parameters will be created. The detect function must + * fill at least the name field of the i2c_board_info structure it is + * handed upon successful detection, and possibly also the flags field. + * + * If @detect is missing, the driver will still work fine for enumerated + * devices. Detected devices simply won't be supported. This is expected + * for the many I2C/SMBus devices which can't be detected reliably, and + * the ones which can always be enumerated in practice. + * + * The i2c_client structure which is handed to the @detect callback is + * not a real i2c_client. It is initialized just enough so that you can + * call i2c_smbus_read_byte_data and friends on it. Don't do anything + * else with it. In particular, calling dev_dbg and friends on it is + * not allowed. + */ +struct i2c_driver { + unsigned int class; + + /* Standard driver model interfaces */ + int (*probe)(struct i2c_client *, const struct i2c_device_id *); + int (*remove)(struct i2c_client *); + + /* New driver model interface to aid the seamless removal of the + * current probe()'s, more commonly unused than used second parameter. + */ + int (*probe_new)(struct i2c_client *); + + /* driver model interfaces that don't relate to enumeration */ + void (*shutdown)(struct i2c_client *); + + /* Alert callback, for example for the SMBus alert protocol. + * The format and meaning of the data value depends on the protocol. + * For the SMBus alert protocol, there is a single bit of data passed + * as the alert response's low bit ("event flag"). + * For the SMBus Host Notify protocol, the data corresponds to the + * 16-bit payload data reported by the slave device acting as master. + */ + void (*alert)(struct i2c_client *, enum i2c_alert_protocol protocol, + unsigned int data); + + /* a ioctl like command that can be used to perform specific functions + * with the device. + */ + int (*command)(struct i2c_client *client, unsigned int cmd, void *arg); + + struct device_driver driver; + const struct i2c_device_id *id_table; + + /* Device detection callback for automatic device creation */ + int (*detect)(struct i2c_client *, struct i2c_board_info *); + const unsigned short *address_list; + struct list_head clients; + + bool disable_i2c_core_irq_mapping; +}; +#define to_i2c_driver(d) container_of(d, struct i2c_driver, driver) + +/** + * struct i2c_client - represent an I2C slave device + * @flags: I2C_CLIENT_TEN indicates the device uses a ten bit chip address; + * I2C_CLIENT_PEC indicates it uses SMBus Packet Error Checking + * @addr: Address used on the I2C bus connected to the parent adapter. + * @name: Indicates the type of the device, usually a chip name that's + * generic enough to hide second-sourcing and compatible revisions. + * @adapter: manages the bus segment hosting this I2C device + * @dev: Driver model device node for the slave. + * @irq: indicates the IRQ generated by this device (if any) + * @detected: member of an i2c_driver.clients list or i2c-core's + * userspace_devices list + * @slave_cb: Callback when I2C slave mode of an adapter is used. The adapter + * calls it to pass on slave events to the slave driver. + * + * An i2c_client identifies a single device (i.e. chip) connected to an + * i2c bus. The behaviour exposed to Linux is defined by the driver + * managing the device. + */ +struct i2c_client { + unsigned short flags; /* div., see below */ + unsigned short addr; /* chip address - NOTE: 7bit */ + /* addresses are stored in the */ + /* _LOWER_ 7 bits */ + char name[I2C_NAME_SIZE]; + struct i2c_adapter *adapter; /* the adapter we sit on */ + struct device dev; /* the device structure */ + int init_irq; /* irq set at initialization */ + int irq; /* irq issued by device */ + struct list_head detected; +#if IS_ENABLED(CONFIG_I2C_SLAVE) + i2c_slave_cb_t slave_cb; /* callback for slave mode */ +#endif +}; +#define to_i2c_client(d) container_of(d, struct i2c_client, dev) + +extern struct i2c_client *i2c_verify_client(struct device *dev); +extern struct i2c_adapter *i2c_verify_adapter(struct device *dev); +extern const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id, + const struct i2c_client *client); + +static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj) +{ + struct device * const dev = container_of(kobj, struct device, kobj); + return to_i2c_client(dev); +} + +static inline void *i2c_get_clientdata(const struct i2c_client *dev) +{ + return dev_get_drvdata(&dev->dev); +} + +static inline void i2c_set_clientdata(struct i2c_client *dev, void *data) +{ + dev_set_drvdata(&dev->dev, data); +} + +/* I2C slave support */ + +#if IS_ENABLED(CONFIG_I2C_SLAVE) +enum i2c_slave_event { + I2C_SLAVE_READ_REQUESTED, + I2C_SLAVE_WRITE_REQUESTED, + I2C_SLAVE_READ_PROCESSED, + I2C_SLAVE_WRITE_RECEIVED, + I2C_SLAVE_STOP, +}; + +extern int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb); +extern int i2c_slave_unregister(struct i2c_client *client); +extern bool i2c_detect_slave_mode(struct device *dev); + +static inline int i2c_slave_event(struct i2c_client *client, + enum i2c_slave_event event, u8 *val) +{ + return client->slave_cb(client, event, val); +} +#else +static inline bool i2c_detect_slave_mode(struct device *dev) { return false; } +#endif + +/** + * struct i2c_board_info - template for device creation + * @type: chip type, to initialize i2c_client.name + * @flags: to initialize i2c_client.flags + * @addr: stored in i2c_client.addr + * @dev_name: Overrides the default - dev_name if set + * @platform_data: stored in i2c_client.dev.platform_data + * @of_node: pointer to OpenFirmware device node + * @fwnode: device node supplied by the platform firmware + * @properties: additional device properties for the device + * @resources: resources associated with the device + * @num_resources: number of resources in the @resources array + * @irq: stored in i2c_client.irq + * + * I2C doesn't actually support hardware probing, although controllers and + * devices may be able to use I2C_SMBUS_QUICK to tell whether or not there's + * a device at a given address. Drivers commonly need more information than + * that, such as chip type, configuration, associated IRQ, and so on. + * + * i2c_board_info is used to build tables of information listing I2C devices + * that are present. This information is used to grow the driver model tree. + * For mainboards this is done statically using i2c_register_board_info(); + * bus numbers identify adapters that aren't yet available. For add-on boards, + * i2c_new_device() does this dynamically with the adapter already known. + */ +struct i2c_board_info { + char type[I2C_NAME_SIZE]; + unsigned short flags; + unsigned short addr; + const char *dev_name; + void *platform_data; + struct device_node *of_node; + struct fwnode_handle *fwnode; + const struct property_entry *properties; + const struct resource *resources; + unsigned int num_resources; + int irq; +}; + +/** + * I2C_BOARD_INFO - macro used to list an i2c device and its address + * @dev_type: identifies the device type + * @dev_addr: the device's address on the bus. + * + * This macro initializes essential fields of a struct i2c_board_info, + * declaring what has been provided on a particular board. Optional + * fields (such as associated irq, or device-specific platform_data) + * are provided using conventional syntax. + */ +#define I2C_BOARD_INFO(dev_type, dev_addr) \ + .type = dev_type, .addr = (dev_addr) + + +#if IS_ENABLED(CONFIG_I2C) +/* Add-on boards should register/unregister their devices; e.g. a board + * with integrated I2C, a config eeprom, sensors, and a codec that's + * used in conjunction with the primary hardware. + */ +extern struct i2c_client * +i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info); + +/* If you don't know the exact address of an I2C device, use this variant + * instead, which can probe for device presence in a list of possible + * addresses. The "probe" callback function is optional. If it is provided, + * it must return 1 on successful probe, 0 otherwise. If it is not provided, + * a default probing method is used. + */ +extern struct i2c_client * +i2c_new_probed_device(struct i2c_adapter *adap, + struct i2c_board_info *info, + unsigned short const *addr_list, + int (*probe)(struct i2c_adapter *, unsigned short addr)); + +/* Common custom probe functions */ +extern int i2c_probe_func_quick_read(struct i2c_adapter *, unsigned short addr); + +/* For devices that use several addresses, use i2c_new_dummy() to make + * client handles for the extra addresses. + */ +extern struct i2c_client * +i2c_new_dummy(struct i2c_adapter *adap, u16 address); + +extern struct i2c_client * +i2c_new_secondary_device(struct i2c_client *client, + const char *name, + u16 default_addr); + +extern void i2c_unregister_device(struct i2c_client *); +#endif /* I2C */ + +/* Mainboard arch_initcall() code should register all its I2C devices. + * This is done at arch_initcall time, before declaring any i2c adapters. + * Modules for add-on boards must use other calls. + */ +#ifdef CONFIG_I2C_BOARDINFO +extern int +i2c_register_board_info(int busnum, struct i2c_board_info const *info, + unsigned n); +#else +static inline int +i2c_register_board_info(int busnum, struct i2c_board_info const *info, + unsigned n) +{ + return 0; +} +#endif /* I2C_BOARDINFO */ + +/** + * struct i2c_algorithm - represent I2C transfer method + * @master_xfer: Issue a set of i2c transactions to the given I2C adapter + * defined by the msgs array, with num messages available to transfer via + * the adapter specified by adap. + * @smbus_xfer: Issue smbus transactions to the given I2C adapter. If this + * is not present, then the bus layer will try and convert the SMBus calls + * into I2C transfers instead. + * @functionality: Return the flags that this algorithm/adapter pair supports + * from the I2C_FUNC_* flags. + * @reg_slave: Register given client to I2C slave mode of this adapter + * @unreg_slave: Unregister given client from I2C slave mode of this adapter + * + * The following structs are for those who like to implement new bus drivers: + * i2c_algorithm is the interface to a class of hardware solutions which can + * be addressed using the same bus algorithms - i.e. bit-banging or the PCF8584 + * to name two of the most common. + * + * The return codes from the @master_xfer field should indicate the type of + * error code that occurred during the transfer, as documented in the kernel + * Documentation file Documentation/i2c/fault-codes. + */ +struct i2c_algorithm { + /* If an adapter algorithm can't do I2C-level access, set master_xfer + to NULL. If an adapter algorithm can do SMBus access, set + smbus_xfer. If set to NULL, the SMBus protocol is simulated + using common I2C messages */ + /* master_xfer should return the number of messages successfully + processed, or a negative value on error */ + int (*master_xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs, + int num); + int (*smbus_xfer) (struct i2c_adapter *adap, u16 addr, + unsigned short flags, char read_write, + u8 command, int size, union i2c_smbus_data *data); + + /* To determine what the adapter supports */ + u32 (*functionality) (struct i2c_adapter *); + +#if IS_ENABLED(CONFIG_I2C_SLAVE) + int (*reg_slave)(struct i2c_client *client); + int (*unreg_slave)(struct i2c_client *client); +#endif +}; + +/** + * struct i2c_lock_operations - represent I2C locking operations + * @lock_bus: Get exclusive access to an I2C bus segment + * @trylock_bus: Try to get exclusive access to an I2C bus segment + * @unlock_bus: Release exclusive access to an I2C bus segment + * + * The main operations are wrapped by i2c_lock_bus and i2c_unlock_bus. + */ +struct i2c_lock_operations { + void (*lock_bus)(struct i2c_adapter *, unsigned int flags); + int (*trylock_bus)(struct i2c_adapter *, unsigned int flags); + void (*unlock_bus)(struct i2c_adapter *, unsigned int flags); +}; + +/** + * struct i2c_timings - I2C timing information + * @bus_freq_hz: the bus frequency in Hz + * @scl_rise_ns: time SCL signal takes to rise in ns; t(r) in the I2C specification + * @scl_fall_ns: time SCL signal takes to fall in ns; t(f) in the I2C specification + * @scl_int_delay_ns: time IP core additionally needs to setup SCL in ns + * @sda_fall_ns: time SDA signal takes to fall in ns; t(f) in the I2C specification + * @sda_hold_ns: time IP core additionally needs to hold SDA in ns + */ +struct i2c_timings { + u32 bus_freq_hz; + u32 scl_rise_ns; + u32 scl_fall_ns; + u32 scl_int_delay_ns; + u32 sda_fall_ns; + u32 sda_hold_ns; +}; + +/** + * struct i2c_bus_recovery_info - I2C bus recovery information + * @recover_bus: Recover routine. Either pass driver's recover_bus() routine, or + * i2c_generic_scl_recovery(). + * @get_scl: This gets current value of SCL line. Mandatory for generic SCL + * recovery. Populated internally for generic GPIO recovery. + * @set_scl: This sets/clears the SCL line. Mandatory for generic SCL recovery. + * Populated internally for generic GPIO recovery. + * @get_sda: This gets current value of SDA line. This or set_sda() is mandatory + * for generic SCL recovery. Populated internally, if sda_gpio is a valid + * GPIO, for generic GPIO recovery. + * @set_sda: This sets/clears the SDA line. This or get_sda() is mandatory for + * generic SCL recovery. Populated internally, if sda_gpio is a valid GPIO, + * for generic GPIO recovery. + * @get_bus_free: Returns the bus free state as seen from the IP core in case it + * has a more complex internal logic than just reading SDA. Optional. + * @prepare_recovery: This will be called before starting recovery. Platform may + * configure padmux here for SDA/SCL line or something else they want. + * @unprepare_recovery: This will be called after completing recovery. Platform + * may configure padmux here for SDA/SCL line or something else they want. + * @scl_gpiod: gpiod of the SCL line. Only required for GPIO recovery. + * @sda_gpiod: gpiod of the SDA line. Only required for GPIO recovery. + */ +struct i2c_bus_recovery_info { + int (*recover_bus)(struct i2c_adapter *adap); + + int (*get_scl)(struct i2c_adapter *adap); + void (*set_scl)(struct i2c_adapter *adap, int val); + int (*get_sda)(struct i2c_adapter *adap); + void (*set_sda)(struct i2c_adapter *adap, int val); + int (*get_bus_free)(struct i2c_adapter *adap); + + void (*prepare_recovery)(struct i2c_adapter *adap); + void (*unprepare_recovery)(struct i2c_adapter *adap); + + /* gpio recovery */ + struct gpio_desc *scl_gpiod; + struct gpio_desc *sda_gpiod; +}; + +int i2c_recover_bus(struct i2c_adapter *adap); + +/* Generic recovery routines */ +int i2c_generic_scl_recovery(struct i2c_adapter *adap); + +/** + * struct i2c_adapter_quirks - describe flaws of an i2c adapter + * @flags: see I2C_AQ_* for possible flags and read below + * @max_num_msgs: maximum number of messages per transfer + * @max_write_len: maximum length of a write message + * @max_read_len: maximum length of a read message + * @max_comb_1st_msg_len: maximum length of the first msg in a combined message + * @max_comb_2nd_msg_len: maximum length of the second msg in a combined message + * + * Note about combined messages: Some I2C controllers can only send one message + * per transfer, plus something called combined message or write-then-read. + * This is (usually) a small write message followed by a read message and + * barely enough to access register based devices like EEPROMs. There is a flag + * to support this mode. It implies max_num_msg = 2 and does the length checks + * with max_comb_*_len because combined message mode usually has its own + * limitations. Because of HW implementations, some controllers can actually do + * write-then-anything or other variants. To support that, write-then-read has + * been broken out into smaller bits like write-first and read-second which can + * be combined as needed. + */ + +struct i2c_adapter_quirks { + u64 flags; + int max_num_msgs; + u16 max_write_len; + u16 max_read_len; + u16 max_comb_1st_msg_len; + u16 max_comb_2nd_msg_len; +}; + +/* enforce max_num_msgs = 2 and use max_comb_*_len for length checks */ +#define I2C_AQ_COMB BIT(0) +/* first combined message must be write */ +#define I2C_AQ_COMB_WRITE_FIRST BIT(1) +/* second combined message must be read */ +#define I2C_AQ_COMB_READ_SECOND BIT(2) +/* both combined messages must have the same target address */ +#define I2C_AQ_COMB_SAME_ADDR BIT(3) +/* convenience macro for typical write-then read case */ +#define I2C_AQ_COMB_WRITE_THEN_READ (I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST | \ + I2C_AQ_COMB_READ_SECOND | I2C_AQ_COMB_SAME_ADDR) +/* clock stretching is not supported */ +#define I2C_AQ_NO_CLK_STRETCH BIT(4) +/* message cannot have length of 0 */ +#define I2C_AQ_NO_ZERO_LEN_READ BIT(5) +#define I2C_AQ_NO_ZERO_LEN_WRITE BIT(6) +#define I2C_AQ_NO_ZERO_LEN (I2C_AQ_NO_ZERO_LEN_READ | I2C_AQ_NO_ZERO_LEN_WRITE) +/* adapter cannot do repeated START */ +#define I2C_AQ_NO_REP_START BIT(7) + +/* + * i2c_adapter is the structure used to identify a physical i2c bus along + * with the access algorithms necessary to access it. + */ +struct i2c_adapter { + struct module *owner; + unsigned int class; /* classes to allow probing for */ + const struct i2c_algorithm *algo; /* the algorithm to access the bus */ + void *algo_data; + + /* data fields that are valid for all devices */ + const struct i2c_lock_operations *lock_ops; + struct rt_mutex bus_lock; + struct rt_mutex mux_lock; + + int timeout; /* in jiffies */ + int retries; + struct device dev; /* the adapter device */ + + int nr; + char name[48]; + struct completion dev_released; + + struct mutex userspace_clients_lock; + struct list_head userspace_clients; + + struct i2c_bus_recovery_info *bus_recovery_info; + const struct i2c_adapter_quirks *quirks; + + struct irq_domain *host_notify_domain; +}; +#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) + +static inline void *i2c_get_adapdata(const struct i2c_adapter *dev) +{ + return dev_get_drvdata(&dev->dev); +} + +static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data) +{ + dev_set_drvdata(&dev->dev, data); +} + +static inline struct i2c_adapter * +i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter) +{ +#if IS_ENABLED(CONFIG_I2C_MUX) + struct device *parent = adapter->dev.parent; + + if (parent != NULL && parent->type == &i2c_adapter_type) + return to_i2c_adapter(parent); + else +#endif + return NULL; +} + +int i2c_for_each_dev(void *data, int (*fn)(struct device *, void *)); + +/* Adapter locking functions, exported for shared pin cases */ +#define I2C_LOCK_ROOT_ADAPTER BIT(0) +#define I2C_LOCK_SEGMENT BIT(1) + +/** + * i2c_lock_bus - Get exclusive access to an I2C bus segment + * @adapter: Target I2C bus segment + * @flags: I2C_LOCK_ROOT_ADAPTER locks the root i2c adapter, I2C_LOCK_SEGMENT + * locks only this branch in the adapter tree + */ +static inline void +i2c_lock_bus(struct i2c_adapter *adapter, unsigned int flags) +{ + adapter->lock_ops->lock_bus(adapter, flags); +} + +/** + * i2c_trylock_bus - Try to get exclusive access to an I2C bus segment + * @adapter: Target I2C bus segment + * @flags: I2C_LOCK_ROOT_ADAPTER tries to locks the root i2c adapter, + * I2C_LOCK_SEGMENT tries to lock only this branch in the adapter tree + * + * Return: true if the I2C bus segment is locked, false otherwise + */ +static inline int +i2c_trylock_bus(struct i2c_adapter *adapter, unsigned int flags) +{ + return adapter->lock_ops->trylock_bus(adapter, flags); +} + +/** + * i2c_unlock_bus - Release exclusive access to an I2C bus segment + * @adapter: Target I2C bus segment + * @flags: I2C_LOCK_ROOT_ADAPTER unlocks the root i2c adapter, I2C_LOCK_SEGMENT + * unlocks only this branch in the adapter tree + */ +static inline void +i2c_unlock_bus(struct i2c_adapter *adapter, unsigned int flags) +{ + adapter->lock_ops->unlock_bus(adapter, flags); +} + +/*flags for the client struct: */ +#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */ +#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */ + /* Must equal I2C_M_TEN below */ +#define I2C_CLIENT_SLAVE 0x20 /* we are the slave */ +#define I2C_CLIENT_HOST_NOTIFY 0x40 /* We want to use I2C host notify */ +#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */ +#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */ + /* Must match I2C_M_STOP|IGNORE_NAK */ + +/* i2c adapter classes (bitmask) */ +#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */ +#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */ +#define I2C_CLASS_SPD (1<<7) /* Memory modules */ +/* Warn users that the adapter doesn't support classes anymore */ +#define I2C_CLASS_DEPRECATED (1<<8) + +/* Internal numbers to terminate lists */ +#define I2C_CLIENT_END 0xfffeU + +/* Construct an I2C_CLIENT_END-terminated array of i2c addresses */ +#define I2C_ADDRS(addr, addrs...) \ + ((const unsigned short []){ addr, ## addrs, I2C_CLIENT_END }) + + +/* ----- functions exported by i2c.o */ + +/* administration... + */ +#if IS_ENABLED(CONFIG_I2C) +extern int i2c_add_adapter(struct i2c_adapter *); +extern void i2c_del_adapter(struct i2c_adapter *); +extern int i2c_add_numbered_adapter(struct i2c_adapter *); + +extern int i2c_register_driver(struct module *, struct i2c_driver *); +extern void i2c_del_driver(struct i2c_driver *); + +/* use a define to avoid include chaining to get THIS_MODULE */ +#define i2c_add_driver(driver) \ + i2c_register_driver(THIS_MODULE, driver) + +extern struct i2c_client *i2c_use_client(struct i2c_client *client); +extern void i2c_release_client(struct i2c_client *client); + +/* call the i2c_client->command() of all attached clients with + * the given arguments */ +extern void i2c_clients_command(struct i2c_adapter *adap, + unsigned int cmd, void *arg); + +extern struct i2c_adapter *i2c_get_adapter(int nr); +extern void i2c_put_adapter(struct i2c_adapter *adap); +extern unsigned int i2c_adapter_depth(struct i2c_adapter *adapter); + +void i2c_parse_fw_timings(struct device *dev, struct i2c_timings *t, bool use_defaults); + +/* Return the functionality mask */ +static inline u32 i2c_get_functionality(struct i2c_adapter *adap) +{ + return adap->algo->functionality(adap); +} + +/* Return 1 if adapter supports everything we need, 0 if not. */ +static inline int i2c_check_functionality(struct i2c_adapter *adap, u32 func) +{ + return (func & i2c_get_functionality(adap)) == func; +} + +/** + * i2c_check_quirks() - Function for checking the quirk flags in an i2c adapter + * @adap: i2c adapter + * @quirks: quirk flags + * + * Return: true if the adapter has all the specified quirk flags, false if not + */ +static inline bool i2c_check_quirks(struct i2c_adapter *adap, u64 quirks) +{ + if (!adap->quirks) + return false; + return (adap->quirks->flags & quirks) == quirks; +} + +/* Return the adapter number for a specific adapter */ +static inline int i2c_adapter_id(struct i2c_adapter *adap) +{ + return adap->nr; +} + +static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg) +{ + return (msg->addr << 1) | (msg->flags & I2C_M_RD ? 1 : 0); +} + +u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold); +void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred); + +int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr); +/** + * module_i2c_driver() - Helper macro for registering a modular I2C driver + * @__i2c_driver: i2c_driver struct + * + * Helper macro for I2C drivers which do not do anything special in module + * init/exit. This eliminates a lot of boilerplate. Each module may only + * use this macro once, and calling it replaces module_init() and module_exit() + */ +#define module_i2c_driver(__i2c_driver) \ + module_driver(__i2c_driver, i2c_add_driver, \ + i2c_del_driver) + +/** + * builtin_i2c_driver() - Helper macro for registering a builtin I2C driver + * @__i2c_driver: i2c_driver struct + * + * Helper macro for I2C drivers which do not do anything special in their + * init. This eliminates a lot of boilerplate. Each driver may only + * use this macro once, and calling it replaces device_initcall(). + */ +#define builtin_i2c_driver(__i2c_driver) \ + builtin_driver(__i2c_driver, i2c_add_driver) + +#endif /* I2C */ + +#if IS_ENABLED(CONFIG_OF) +/* must call put_device() when done with returned i2c_client device */ +extern struct i2c_client *of_find_i2c_device_by_node(struct device_node *node); + +/* must call put_device() when done with returned i2c_adapter device */ +extern struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node); + +/* must call i2c_put_adapter() when done with returned i2c_adapter device */ +struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node); + +extern const struct of_device_id +*i2c_of_match_device(const struct of_device_id *matches, + struct i2c_client *client); + +int of_i2c_get_board_info(struct device *dev, struct device_node *node, + struct i2c_board_info *info); + +#else + +static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node) +{ + return NULL; +} + +static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node) +{ + return NULL; +} + +static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node) +{ + return NULL; +} + +static inline const struct of_device_id +*i2c_of_match_device(const struct of_device_id *matches, + struct i2c_client *client) +{ + return NULL; +} + +static inline int of_i2c_get_board_info(struct device *dev, + struct device_node *node, + struct i2c_board_info *info) +{ + return -ENOTSUPP; +} + +#endif /* CONFIG_OF */ + +#if IS_ENABLED(CONFIG_ACPI) +u32 i2c_acpi_find_bus_speed(struct device *dev); +struct i2c_client *i2c_acpi_new_device(struct device *dev, int index, + struct i2c_board_info *info); +#else +static inline u32 i2c_acpi_find_bus_speed(struct device *dev) +{ + return 0; +} +static inline struct i2c_client *i2c_acpi_new_device(struct device *dev, + int index, struct i2c_board_info *info) +{ + return NULL; +} +#endif /* CONFIG_ACPI */ + +#endif /* _LINUX_I2C_H */ diff --git a/include/linux/i8042.h b/include/linux/i8042.h new file mode 100644 index 000000000..d98780ca9 --- /dev/null +++ b/include/linux/i8042.h @@ -0,0 +1,99 @@ +#ifndef _LINUX_I8042_H +#define _LINUX_I8042_H + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include + +/* + * Standard commands. + */ + +#define I8042_CMD_CTL_RCTR 0x0120 +#define I8042_CMD_CTL_WCTR 0x1060 +#define I8042_CMD_CTL_TEST 0x01aa + +#define I8042_CMD_KBD_DISABLE 0x00ad +#define I8042_CMD_KBD_ENABLE 0x00ae +#define I8042_CMD_KBD_TEST 0x01ab +#define I8042_CMD_KBD_LOOP 0x11d2 + +#define I8042_CMD_AUX_DISABLE 0x00a7 +#define I8042_CMD_AUX_ENABLE 0x00a8 +#define I8042_CMD_AUX_TEST 0x01a9 +#define I8042_CMD_AUX_SEND 0x10d4 +#define I8042_CMD_AUX_LOOP 0x11d3 + +#define I8042_CMD_MUX_PFX 0x0090 +#define I8042_CMD_MUX_SEND 0x1090 + +/* + * Status register bits. + */ + +#define I8042_STR_PARITY 0x80 +#define I8042_STR_TIMEOUT 0x40 +#define I8042_STR_AUXDATA 0x20 +#define I8042_STR_KEYLOCK 0x10 +#define I8042_STR_CMDDAT 0x08 +#define I8042_STR_MUXERR 0x04 +#define I8042_STR_IBF 0x02 +#define I8042_STR_OBF 0x01 + +/* + * Control register bits. + */ + +#define I8042_CTR_KBDINT 0x01 +#define I8042_CTR_AUXINT 0x02 +#define I8042_CTR_IGNKEYLOCK 0x08 +#define I8042_CTR_KBDDIS 0x10 +#define I8042_CTR_AUXDIS 0x20 +#define I8042_CTR_XLATE 0x40 + +struct serio; + +#if defined(CONFIG_SERIO_I8042) || defined(CONFIG_SERIO_I8042_MODULE) + +void i8042_lock_chip(void); +void i8042_unlock_chip(void); +int i8042_command(unsigned char *param, int command); +int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str, + struct serio *serio)); +int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str, + struct serio *serio)); + +#else + +static inline void i8042_lock_chip(void) +{ +} + +static inline void i8042_unlock_chip(void) +{ +} + +static inline int i8042_command(unsigned char *param, int command) +{ + return -ENODEV; +} + +static inline int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str, + struct serio *serio)) +{ + return -ENODEV; +} + +static inline int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str, + struct serio *serio)) +{ + return -ENODEV; +} + +#endif + +#endif diff --git a/include/linux/i8253.h b/include/linux/i8253.h new file mode 100644 index 000000000..8336b2f6f --- /dev/null +++ b/include/linux/i8253.h @@ -0,0 +1,30 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Machine specific IO port address definition for generic. + * Written by Osamu Tomita + */ +#ifndef __LINUX_I8253_H +#define __LINUX_I8253_H + +#include +#include +#include + +/* i8253A PIT registers */ +#define PIT_MODE 0x43 +#define PIT_CH0 0x40 +#define PIT_CH2 0x42 + +#define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ) + +extern raw_spinlock_t i8253_lock; +extern bool i8253_clear_counter_on_shutdown; +extern struct clock_event_device i8253_clockevent; +extern void clockevent_i8253_init(bool oneshot); + +extern void setup_pit_timer(void); + +#endif /* __LINUX_I8253_H */ diff --git a/include/linux/icmp.h b/include/linux/icmp.h new file mode 100644 index 000000000..efc184906 --- /dev/null +++ b/include/linux/icmp.h @@ -0,0 +1,27 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Definitions for the ICMP protocol. + * + * Version: @(#)icmp.h 1.0.3 04/28/93 + * + * Author: Fred N. van Kempen, + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_ICMP_H +#define _LINUX_ICMP_H + +#include +#include + +static inline struct icmphdr *icmp_hdr(const struct sk_buff *skb) +{ + return (struct icmphdr *)skb_transport_header(skb); +} +#endif /* _LINUX_ICMP_H */ diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h new file mode 100644 index 000000000..0be0d68fb --- /dev/null +++ b/include/linux/icmpv6.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_ICMPV6_H +#define _LINUX_ICMPV6_H + +#include +#include +#include + +static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb) +{ + return (struct icmp6hdr *)skb_transport_header(skb); +} + +#include + +#if IS_ENABLED(CONFIG_IPV6) + +typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct in6_addr *force_saddr, + const struct inet6_skb_parm *parm); +void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct in6_addr *force_saddr, + const struct inet6_skb_parm *parm); +#if IS_BUILTIN(CONFIG_IPV6) +static inline void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct inet6_skb_parm *parm) +{ + icmp6_send(skb, type, code, info, NULL, parm); +} +static inline int inet6_register_icmp_sender(ip6_icmp_send_t *fn) +{ + BUILD_BUG_ON(fn != icmp6_send); + return 0; +} +static inline int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn) +{ + BUILD_BUG_ON(fn != icmp6_send); + return 0; +} +#else +extern void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct inet6_skb_parm *parm); +extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn); +extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn); +#endif + +static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) +{ + __icmpv6_send(skb, type, code, info, IP6CB(skb)); +} + +int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type, + unsigned int data_len); + +#if IS_ENABLED(CONFIG_NF_NAT) +void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info); +#else +static inline void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info) +{ + struct inet6_skb_parm parm = { 0 }; + __icmpv6_send(skb_in, type, code, info, &parm); +} +#endif + +#else + +static inline void icmpv6_send(struct sk_buff *skb, + u8 type, u8 code, __u32 info) +{ +} + +static inline void icmpv6_ndo_send(struct sk_buff *skb, + u8 type, u8 code, __u32 info) +{ +} +#endif + +extern int icmpv6_init(void); +extern int icmpv6_err_convert(u8 type, u8 code, + int *err); +extern void icmpv6_cleanup(void); +extern void icmpv6_param_prob(struct sk_buff *skb, + u8 code, int pos); + +struct flowi6; +struct in6_addr; +extern void icmpv6_flow_init(struct sock *sk, + struct flowi6 *fl6, + u8 type, + const struct in6_addr *saddr, + const struct in6_addr *daddr, + int oif); +#endif diff --git a/include/linux/ide.h b/include/linux/ide.h new file mode 100644 index 000000000..c74b03219 --- /dev/null +++ b/include/linux/ide.h @@ -0,0 +1,1610 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _IDE_H +#define _IDE_H +/* + * linux/include/linux/ide.h + * + * Copyright (C) 1994-2002 Linus Torvalds & authors + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +/* for request_sense */ +#include +#include +#include +#include + +/* + * Probably not wise to fiddle with these + */ +#define SUPPORT_VLB_SYNC 1 +#define IDE_DEFAULT_MAX_FAILURES 1 +#define ERROR_MAX 8 /* Max read/write errors per sector */ +#define ERROR_RESET 3 /* Reset controller every 4th retry */ +#define ERROR_RECAL 1 /* Recalibrate every 2nd retry */ + +struct device; + +/* values for ide_request.type */ +enum ata_priv_type { + ATA_PRIV_MISC, + ATA_PRIV_TASKFILE, + ATA_PRIV_PC, + ATA_PRIV_SENSE, /* sense request */ + ATA_PRIV_PM_SUSPEND, /* suspend request */ + ATA_PRIV_PM_RESUME, /* resume request */ +}; + +struct ide_request { + struct scsi_request sreq; + u8 sense[SCSI_SENSE_BUFFERSIZE]; + u8 type; +}; + +static inline struct ide_request *ide_req(struct request *rq) +{ + return blk_mq_rq_to_pdu(rq); +} + +static inline bool ata_misc_request(struct request *rq) +{ + return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_MISC; +} + +static inline bool ata_taskfile_request(struct request *rq) +{ + return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_TASKFILE; +} + +static inline bool ata_pc_request(struct request *rq) +{ + return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_PC; +} + +static inline bool ata_sense_request(struct request *rq) +{ + return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_SENSE; +} + +static inline bool ata_pm_request(struct request *rq) +{ + return blk_rq_is_private(rq) && + (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND || + ide_req(rq)->type == ATA_PRIV_PM_RESUME); +} + +/* Error codes returned in result to the higher part of the driver. */ +enum { + IDE_DRV_ERROR_GENERAL = 101, + IDE_DRV_ERROR_FILEMARK = 102, + IDE_DRV_ERROR_EOD = 103, +}; + +/* + * Definitions for accessing IDE controller registers + */ +#define IDE_NR_PORTS (10) + +struct ide_io_ports { + unsigned long data_addr; + + union { + unsigned long error_addr; /* read: error */ + unsigned long feature_addr; /* write: feature */ + }; + + unsigned long nsect_addr; + unsigned long lbal_addr; + unsigned long lbam_addr; + unsigned long lbah_addr; + + unsigned long device_addr; + + union { + unsigned long status_addr; /*  read: status  */ + unsigned long command_addr; /* write: command */ + }; + + unsigned long ctl_addr; + + unsigned long irq_addr; +}; + +#define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good)) + +#define BAD_R_STAT (ATA_BUSY | ATA_ERR) +#define BAD_W_STAT (BAD_R_STAT | ATA_DF) +#define BAD_STAT (BAD_R_STAT | ATA_DRQ) +#define DRIVE_READY (ATA_DRDY | ATA_DSC) + +#define BAD_CRC (ATA_ABORTED | ATA_ICRC) + +#define SATA_NR_PORTS (3) /* 16 possible ?? */ + +#define SATA_STATUS_OFFSET (0) +#define SATA_ERROR_OFFSET (1) +#define SATA_CONTROL_OFFSET (2) + +/* + * Our Physical Region Descriptor (PRD) table should be large enough + * to handle the biggest I/O request we are likely to see. Since requests + * can have no more than 256 sectors, and since the typical blocksize is + * two or more sectors, we could get by with a limit of 128 entries here for + * the usual worst case. Most requests seem to include some contiguous blocks, + * further reducing the number of table entries required. + * + * The driver reverts to PIO mode for individual requests that exceed + * this limit (possible with 512 byte blocksizes, eg. MSDOS f/s), so handling + * 100% of all crazy scenarios here is not necessary. + * + * As it turns out though, we must allocate a full 4KB page for this, + * so the two PRD tables (ide0 & ide1) will each get half of that, + * allowing each to have about 256 entries (8 bytes each) from this. + */ +#define PRD_BYTES 8 +#define PRD_ENTRIES 256 + +/* + * Some more useful definitions + */ +#define PARTN_BITS 6 /* number of minor dev bits for partitions */ +#define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */ + +/* + * Timeouts for various operations: + */ +enum { + /* spec allows up to 20ms, but CF cards and SSD drives need more */ + WAIT_DRQ = 1 * HZ, /* 1s */ + /* some laptops are very slow */ + WAIT_READY = 5 * HZ, /* 5s */ + /* should be less than 3ms (?), if all ATAPI CD is closed at boot */ + WAIT_PIDENTIFY = 10 * HZ, /* 10s */ + /* worst case when spinning up */ + WAIT_WORSTCASE = 30 * HZ, /* 30s */ + /* maximum wait for an IRQ to happen */ + WAIT_CMD = 10 * HZ, /* 10s */ + /* Some drives require a longer IRQ timeout. */ + WAIT_FLOPPY_CMD = 50 * HZ, /* 50s */ + /* + * Some drives (for example, Seagate STT3401A Travan) require a very + * long timeout, because they don't return an interrupt or clear their + * BSY bit until after the command completes (even retension commands). + */ + WAIT_TAPE_CMD = 900 * HZ, /* 900s */ + /* minimum sleep time */ + WAIT_MIN_SLEEP = HZ / 50, /* 20ms */ +}; + +/* + * Op codes for special requests to be handled by ide_special_rq(). + * Values should be in the range of 0x20 to 0x3f. + */ +#define REQ_DRIVE_RESET 0x20 +#define REQ_DEVSET_EXEC 0x21 +#define REQ_PARK_HEADS 0x22 +#define REQ_UNPARK_HEADS 0x23 + +/* + * hwif_chipset_t is used to keep track of the specific hardware + * chipset used by each IDE interface, if known. + */ +enum { ide_unknown, ide_generic, ide_pci, + ide_cmd640, ide_dtc2278, ide_ali14xx, + ide_qd65xx, ide_umc8672, ide_ht6560b, + ide_4drives, ide_pmac, ide_acorn, + ide_au1xxx, ide_palm3710 +}; + +typedef u8 hwif_chipset_t; + +/* + * Structure to hold all information about the location of this port + */ +struct ide_hw { + union { + struct ide_io_ports io_ports; + unsigned long io_ports_array[IDE_NR_PORTS]; + }; + + int irq; /* our irq number */ + struct device *dev, *parent; + unsigned long config; +}; + +static inline void ide_std_init_ports(struct ide_hw *hw, + unsigned long io_addr, + unsigned long ctl_addr) +{ + unsigned int i; + + for (i = 0; i <= 7; i++) + hw->io_ports_array[i] = io_addr++; + + hw->io_ports.ctl_addr = ctl_addr; +} + +#define MAX_HWIFS 10 + +/* + * Now for the data we need to maintain per-drive: ide_drive_t + */ + +#define ide_scsi 0x21 +#define ide_disk 0x20 +#define ide_optical 0x7 +#define ide_cdrom 0x5 +#define ide_tape 0x1 +#define ide_floppy 0x0 + +/* + * Special Driver Flags + */ +enum { + IDE_SFLAG_SET_GEOMETRY = (1 << 0), + IDE_SFLAG_RECALIBRATE = (1 << 1), + IDE_SFLAG_SET_MULTMODE = (1 << 2), +}; + +/* + * Status returned from various ide_ functions + */ +typedef enum { + ide_stopped, /* no drive operation was started */ + ide_started, /* a drive operation was started, handler was set */ +} ide_startstop_t; + +enum { + IDE_VALID_ERROR = (1 << 1), + IDE_VALID_FEATURE = IDE_VALID_ERROR, + IDE_VALID_NSECT = (1 << 2), + IDE_VALID_LBAL = (1 << 3), + IDE_VALID_LBAM = (1 << 4), + IDE_VALID_LBAH = (1 << 5), + IDE_VALID_DEVICE = (1 << 6), + IDE_VALID_LBA = IDE_VALID_LBAL | + IDE_VALID_LBAM | + IDE_VALID_LBAH, + IDE_VALID_OUT_TF = IDE_VALID_FEATURE | + IDE_VALID_NSECT | + IDE_VALID_LBA, + IDE_VALID_IN_TF = IDE_VALID_NSECT | + IDE_VALID_LBA, + IDE_VALID_OUT_HOB = IDE_VALID_OUT_TF, + IDE_VALID_IN_HOB = IDE_VALID_ERROR | + IDE_VALID_NSECT | + IDE_VALID_LBA, +}; + +enum { + IDE_TFLAG_LBA48 = (1 << 0), + IDE_TFLAG_WRITE = (1 << 1), + IDE_TFLAG_CUSTOM_HANDLER = (1 << 2), + IDE_TFLAG_DMA_PIO_FALLBACK = (1 << 3), + /* force 16-bit I/O operations */ + IDE_TFLAG_IO_16BIT = (1 << 4), + /* struct ide_cmd was allocated using kmalloc() */ + IDE_TFLAG_DYN = (1 << 5), + IDE_TFLAG_FS = (1 << 6), + IDE_TFLAG_MULTI_PIO = (1 << 7), + IDE_TFLAG_SET_XFER = (1 << 8), +}; + +enum { + IDE_FTFLAG_FLAGGED = (1 << 0), + IDE_FTFLAG_SET_IN_FLAGS = (1 << 1), + IDE_FTFLAG_OUT_DATA = (1 << 2), + IDE_FTFLAG_IN_DATA = (1 << 3), +}; + +struct ide_taskfile { + u8 data; /* 0: data byte (for TASKFILE ioctl) */ + union { /* 1: */ + u8 error; /* read: error */ + u8 feature; /* write: feature */ + }; + u8 nsect; /* 2: number of sectors */ + u8 lbal; /* 3: LBA low */ + u8 lbam; /* 4: LBA mid */ + u8 lbah; /* 5: LBA high */ + u8 device; /* 6: device select */ + union { /* 7: */ + u8 status; /* read: status */ + u8 command; /* write: command */ + }; +}; + +struct ide_cmd { + struct ide_taskfile tf; + struct ide_taskfile hob; + struct { + struct { + u8 tf; + u8 hob; + } out, in; + } valid; + + u16 tf_flags; + u8 ftf_flags; /* for TASKFILE ioctl */ + int protocol; + + int sg_nents; /* number of sg entries */ + int orig_sg_nents; + int sg_dma_direction; /* DMA transfer direction */ + + unsigned int nbytes; + unsigned int nleft; + unsigned int last_xfer_len; + + struct scatterlist *cursg; + unsigned int cursg_ofs; + + struct request *rq; /* copy of request */ +}; + +/* ATAPI packet command flags */ +enum { + /* set when an error is considered normal - no retry (ide-tape) */ + PC_FLAG_ABORT = (1 << 0), + PC_FLAG_SUPPRESS_ERROR = (1 << 1), + PC_FLAG_WAIT_FOR_DSC = (1 << 2), + PC_FLAG_DMA_OK = (1 << 3), + PC_FLAG_DMA_IN_PROGRESS = (1 << 4), + PC_FLAG_DMA_ERROR = (1 << 5), + PC_FLAG_WRITING = (1 << 6), +}; + +#define ATAPI_WAIT_PC (60 * HZ) + +struct ide_atapi_pc { + /* actual packet bytes */ + u8 c[12]; + /* incremented on each retry */ + int retries; + int error; + + /* bytes to transfer */ + int req_xfer; + + /* the corresponding request */ + struct request *rq; + + unsigned long flags; + + /* + * those are more or less driver-specific and some of them are subject + * to change/removal later. + */ + unsigned long timeout; +}; + +struct ide_devset; +struct ide_driver; + +#ifdef CONFIG_BLK_DEV_IDEACPI +struct ide_acpi_drive_link; +struct ide_acpi_hwif_link; +#endif + +struct ide_drive_s; + +struct ide_disk_ops { + int (*check)(struct ide_drive_s *, const char *); + int (*get_capacity)(struct ide_drive_s *); + void (*unlock_native_capacity)(struct ide_drive_s *); + void (*setup)(struct ide_drive_s *); + void (*flush)(struct ide_drive_s *); + int (*init_media)(struct ide_drive_s *, struct gendisk *); + int (*set_doorlock)(struct ide_drive_s *, struct gendisk *, + int); + ide_startstop_t (*do_request)(struct ide_drive_s *, struct request *, + sector_t); + int (*ioctl)(struct ide_drive_s *, struct block_device *, + fmode_t, unsigned int, unsigned long); +}; + +/* ATAPI device flags */ +enum { + IDE_AFLAG_DRQ_INTERRUPT = (1 << 0), + + /* ide-cd */ + /* Drive cannot eject the disc. */ + IDE_AFLAG_NO_EJECT = (1 << 1), + /* Drive is a pre ATAPI 1.2 drive. */ + IDE_AFLAG_PRE_ATAPI12 = (1 << 2), + /* TOC addresses are in BCD. */ + IDE_AFLAG_TOCADDR_AS_BCD = (1 << 3), + /* TOC track numbers are in BCD. */ + IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 4), + /* Saved TOC information is current. */ + IDE_AFLAG_TOC_VALID = (1 << 6), + /* We think that the drive door is locked. */ + IDE_AFLAG_DOOR_LOCKED = (1 << 7), + /* SET_CD_SPEED command is unsupported. */ + IDE_AFLAG_NO_SPEED_SELECT = (1 << 8), + IDE_AFLAG_VERTOS_300_SSD = (1 << 9), + IDE_AFLAG_VERTOS_600_ESD = (1 << 10), + IDE_AFLAG_SANYO_3CD = (1 << 11), + IDE_AFLAG_FULL_CAPS_PAGE = (1 << 12), + IDE_AFLAG_PLAY_AUDIO_OK = (1 << 13), + IDE_AFLAG_LE_SPEED_FIELDS = (1 << 14), + + /* ide-floppy */ + /* Avoid commands not supported in Clik drive */ + IDE_AFLAG_CLIK_DRIVE = (1 << 15), + /* Requires BH algorithm for packets */ + IDE_AFLAG_ZIP_DRIVE = (1 << 16), + /* Supports format progress report */ + IDE_AFLAG_SRFP = (1 << 17), + + /* ide-tape */ + IDE_AFLAG_IGNORE_DSC = (1 << 18), + /* 0 When the tape position is unknown */ + IDE_AFLAG_ADDRESS_VALID = (1 << 19), + /* Device already opened */ + IDE_AFLAG_BUSY = (1 << 20), + /* Attempt to auto-detect the current user block size */ + IDE_AFLAG_DETECT_BS = (1 << 21), + /* Currently on a filemark */ + IDE_AFLAG_FILEMARK = (1 << 22), + /* 0 = no tape is loaded, so we don't rewind after ejecting */ + IDE_AFLAG_MEDIUM_PRESENT = (1 << 23), + + IDE_AFLAG_NO_AUTOCLOSE = (1 << 24), +}; + +/* device flags */ +enum { + /* restore settings after device reset */ + IDE_DFLAG_KEEP_SETTINGS = (1 << 0), + /* device is using DMA for read/write */ + IDE_DFLAG_USING_DMA = (1 << 1), + /* okay to unmask other IRQs */ + IDE_DFLAG_UNMASK = (1 << 2), + /* don't attempt flushes */ + IDE_DFLAG_NOFLUSH = (1 << 3), + /* DSC overlap */ + IDE_DFLAG_DSC_OVERLAP = (1 << 4), + /* give potential excess bandwidth */ + IDE_DFLAG_NICE1 = (1 << 5), + /* device is physically present */ + IDE_DFLAG_PRESENT = (1 << 6), + /* disable Host Protected Area */ + IDE_DFLAG_NOHPA = (1 << 7), + /* id read from device (synthetic if not set) */ + IDE_DFLAG_ID_READ = (1 << 8), + IDE_DFLAG_NOPROBE = (1 << 9), + /* need to do check_media_change() */ + IDE_DFLAG_REMOVABLE = (1 << 10), + /* needed for removable devices */ + IDE_DFLAG_ATTACH = (1 << 11), + IDE_DFLAG_FORCED_GEOM = (1 << 12), + /* disallow setting unmask bit */ + IDE_DFLAG_NO_UNMASK = (1 << 13), + /* disallow enabling 32-bit I/O */ + IDE_DFLAG_NO_IO_32BIT = (1 << 14), + /* for removable only: door lock/unlock works */ + IDE_DFLAG_DOORLOCKING = (1 << 15), + /* disallow DMA */ + IDE_DFLAG_NODMA = (1 << 16), + /* powermanagement told us not to do anything, so sleep nicely */ + IDE_DFLAG_BLOCKED = (1 << 17), + /* sleeping & sleep field valid */ + IDE_DFLAG_SLEEPING = (1 << 18), + IDE_DFLAG_POST_RESET = (1 << 19), + IDE_DFLAG_UDMA33_WARNED = (1 << 20), + IDE_DFLAG_LBA48 = (1 << 21), + /* status of write cache */ + IDE_DFLAG_WCACHE = (1 << 22), + /* used for ignoring ATA_DF */ + IDE_DFLAG_NOWERR = (1 << 23), + /* retrying in PIO */ + IDE_DFLAG_DMA_PIO_RETRY = (1 << 24), + IDE_DFLAG_LBA = (1 << 25), + /* don't unload heads */ + IDE_DFLAG_NO_UNLOAD = (1 << 26), + /* heads unloaded, please don't reset port */ + IDE_DFLAG_PARKED = (1 << 27), + IDE_DFLAG_MEDIA_CHANGED = (1 << 28), + /* write protect */ + IDE_DFLAG_WP = (1 << 29), + IDE_DFLAG_FORMAT_IN_PROGRESS = (1 << 30), + IDE_DFLAG_NIEN_QUIRK = (1 << 31), +}; + +struct ide_drive_s { + char name[4]; /* drive name, such as "hda" */ + char driver_req[10]; /* requests specific driver */ + + struct request_queue *queue; /* request queue */ + + struct request *rq; /* current request */ + void *driver_data; /* extra driver data */ + u16 *id; /* identification info */ +#ifdef CONFIG_IDE_PROC_FS + struct proc_dir_entry *proc; /* /proc/ide/ directory entry */ + const struct ide_proc_devset *settings; /* /proc/ide/ drive settings */ +#endif + struct hwif_s *hwif; /* actually (ide_hwif_t *) */ + + const struct ide_disk_ops *disk_ops; + + unsigned long dev_flags; + + unsigned long sleep; /* sleep until this time */ + unsigned long timeout; /* max time to wait for irq */ + + u8 special_flags; /* special action flags */ + + u8 select; /* basic drive/head select reg value */ + u8 retry_pio; /* retrying dma capable host in pio */ + u8 waiting_for_dma; /* dma currently in progress */ + u8 dma; /* atapi dma flag */ + + u8 init_speed; /* transfer rate set at boot */ + u8 current_speed; /* current transfer rate set */ + u8 desired_speed; /* desired transfer rate set */ + u8 pio_mode; /* for ->set_pio_mode _only_ */ + u8 dma_mode; /* for ->set_dma_mode _only_ */ + u8 dn; /* now wide spread use */ + u8 acoustic; /* acoustic management */ + u8 media; /* disk, cdrom, tape, floppy, ... */ + u8 ready_stat; /* min status value for drive ready */ + u8 mult_count; /* current multiple sector setting */ + u8 mult_req; /* requested multiple sector setting */ + u8 io_32bit; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */ + u8 bad_wstat; /* used for ignoring ATA_DF */ + u8 head; /* "real" number of heads */ + u8 sect; /* "real" sectors per track */ + u8 bios_head; /* BIOS/fdisk/LILO number of heads */ + u8 bios_sect; /* BIOS/fdisk/LILO sectors per track */ + + /* delay this long before sending packet command */ + u8 pc_delay; + + unsigned int bios_cyl; /* BIOS/fdisk/LILO number of cyls */ + unsigned int cyl; /* "real" number of cyls */ + void *drive_data; /* used by set_pio_mode/dev_select() */ + unsigned int failures; /* current failure count */ + unsigned int max_failures; /* maximum allowed failure count */ + u64 probed_capacity;/* initial/native media capacity */ + u64 capacity64; /* total number of sectors */ + + int lun; /* logical unit */ + int crc_count; /* crc counter to reduce drive speed */ + + unsigned long debug_mask; /* debugging levels switch */ + +#ifdef CONFIG_BLK_DEV_IDEACPI + struct ide_acpi_drive_link *acpidata; +#endif + struct list_head list; + struct device gendev; + struct completion gendev_rel_comp; /* to deal with device release() */ + + /* current packet command */ + struct ide_atapi_pc *pc; + + /* last failed packet command */ + struct ide_atapi_pc *failed_pc; + + /* callback for packet commands */ + int (*pc_callback)(struct ide_drive_s *, int); + + ide_startstop_t (*irq_handler)(struct ide_drive_s *); + + unsigned long atapi_flags; + + struct ide_atapi_pc request_sense_pc; + + /* current sense rq and buffer */ + bool sense_rq_armed; + struct request *sense_rq; + struct request_sense sense_data; +}; + +typedef struct ide_drive_s ide_drive_t; + +#define to_ide_device(dev) container_of(dev, ide_drive_t, gendev) + +#define to_ide_drv(obj, cont_type) \ + container_of(obj, struct cont_type, dev) + +#define ide_drv_g(disk, cont_type) \ + container_of((disk)->private_data, struct cont_type, driver) + +struct ide_port_info; + +struct ide_tp_ops { + void (*exec_command)(struct hwif_s *, u8); + u8 (*read_status)(struct hwif_s *); + u8 (*read_altstatus)(struct hwif_s *); + void (*write_devctl)(struct hwif_s *, u8); + + void (*dev_select)(ide_drive_t *); + void (*tf_load)(ide_drive_t *, struct ide_taskfile *, u8); + void (*tf_read)(ide_drive_t *, struct ide_taskfile *, u8); + + void (*input_data)(ide_drive_t *, struct ide_cmd *, + void *, unsigned int); + void (*output_data)(ide_drive_t *, struct ide_cmd *, + void *, unsigned int); +}; + +extern const struct ide_tp_ops default_tp_ops; + +/** + * struct ide_port_ops - IDE port operations + * + * @init_dev: host specific initialization of a device + * @set_pio_mode: routine to program host for PIO mode + * @set_dma_mode: routine to program host for DMA mode + * @reset_poll: chipset polling based on hba specifics + * @pre_reset: chipset specific changes to default for device-hba resets + * @resetproc: routine to reset controller after a disk reset + * @maskproc: special host masking for drive selection + * @quirkproc: check host's drive quirk list + * @clear_irq: clear IRQ + * + * @mdma_filter: filter MDMA modes + * @udma_filter: filter UDMA modes + * + * @cable_detect: detect cable type + */ +struct ide_port_ops { + void (*init_dev)(ide_drive_t *); + void (*set_pio_mode)(struct hwif_s *, ide_drive_t *); + void (*set_dma_mode)(struct hwif_s *, ide_drive_t *); + blk_status_t (*reset_poll)(ide_drive_t *); + void (*pre_reset)(ide_drive_t *); + void (*resetproc)(ide_drive_t *); + void (*maskproc)(ide_drive_t *, int); + void (*quirkproc)(ide_drive_t *); + void (*clear_irq)(ide_drive_t *); + int (*test_irq)(struct hwif_s *); + + u8 (*mdma_filter)(ide_drive_t *); + u8 (*udma_filter)(ide_drive_t *); + + u8 (*cable_detect)(struct hwif_s *); +}; + +struct ide_dma_ops { + void (*dma_host_set)(struct ide_drive_s *, int); + int (*dma_setup)(struct ide_drive_s *, struct ide_cmd *); + void (*dma_start)(struct ide_drive_s *); + int (*dma_end)(struct ide_drive_s *); + int (*dma_test_irq)(struct ide_drive_s *); + void (*dma_lost_irq)(struct ide_drive_s *); + /* below ones are optional */ + int (*dma_check)(struct ide_drive_s *, struct ide_cmd *); + int (*dma_timer_expiry)(struct ide_drive_s *); + void (*dma_clear)(struct ide_drive_s *); + /* + * The following method is optional and only required to be + * implemented for the SFF-8038i compatible controllers. + */ + u8 (*dma_sff_read_status)(struct hwif_s *); +}; + +enum { + IDE_PFLAG_PROBING = (1 << 0), +}; + +struct ide_host; + +typedef struct hwif_s { + struct hwif_s *mate; /* other hwif from same PCI chip */ + struct proc_dir_entry *proc; /* /proc/ide/ directory entry */ + + struct ide_host *host; + + char name[6]; /* name of interface, eg. "ide0" */ + + struct ide_io_ports io_ports; + + unsigned long sata_scr[SATA_NR_PORTS]; + + ide_drive_t *devices[MAX_DRIVES + 1]; + + unsigned long port_flags; + + u8 major; /* our major number */ + u8 index; /* 0 for ide0; 1 for ide1; ... */ + u8 channel; /* for dual-port chips: 0=primary, 1=secondary */ + + u32 host_flags; + + u8 pio_mask; + + u8 ultra_mask; + u8 mwdma_mask; + u8 swdma_mask; + + u8 cbl; /* cable type */ + + hwif_chipset_t chipset; /* sub-module for tuning.. */ + + struct device *dev; + + void (*rw_disk)(ide_drive_t *, struct request *); + + const struct ide_tp_ops *tp_ops; + const struct ide_port_ops *port_ops; + const struct ide_dma_ops *dma_ops; + + /* dma physical region descriptor table (cpu view) */ + unsigned int *dmatable_cpu; + /* dma physical region descriptor table (dma view) */ + dma_addr_t dmatable_dma; + + /* maximum number of PRD table entries */ + int prd_max_nents; + /* PRD entry size in bytes */ + int prd_ent_size; + + /* Scatter-gather list used to build the above */ + struct scatterlist *sg_table; + int sg_max_nents; /* Maximum number of entries in it */ + + struct ide_cmd cmd; /* current command */ + + int rqsize; /* max sectors per request */ + int irq; /* our irq number */ + + unsigned long dma_base; /* base addr for dma ports */ + + unsigned long config_data; /* for use by chipset-specific code */ + unsigned long select_data; /* for use by chipset-specific code */ + + unsigned long extra_base; /* extra addr for dma ports */ + unsigned extra_ports; /* number of extra dma ports */ + + unsigned present : 1; /* this interface exists */ + unsigned busy : 1; /* serializes devices on a port */ + + struct device gendev; + struct device *portdev; + + struct completion gendev_rel_comp; /* To deal with device release() */ + + void *hwif_data; /* extra hwif data */ + +#ifdef CONFIG_BLK_DEV_IDEACPI + struct ide_acpi_hwif_link *acpidata; +#endif + + /* IRQ handler, if active */ + ide_startstop_t (*handler)(ide_drive_t *); + + /* BOOL: polling active & poll_timeout field valid */ + unsigned int polling : 1; + + /* current drive */ + ide_drive_t *cur_dev; + + /* current request */ + struct request *rq; + + /* failsafe timer */ + struct timer_list timer; + /* timeout value during long polls */ + unsigned long poll_timeout; + /* queried upon timeouts */ + int (*expiry)(ide_drive_t *); + + int req_gen; + int req_gen_timer; + + spinlock_t lock; +} ____cacheline_internodealigned_in_smp ide_hwif_t; + +#define MAX_HOST_PORTS 4 + +struct ide_host { + ide_hwif_t *ports[MAX_HOST_PORTS + 1]; + unsigned int n_ports; + struct device *dev[2]; + + int (*init_chipset)(struct pci_dev *); + + void (*get_lock)(irq_handler_t, void *); + void (*release_lock)(void); + + irq_handler_t irq_handler; + + unsigned long host_flags; + + int irq_flags; + + void *host_priv; + ide_hwif_t *cur_port; /* for hosts requiring serialization */ + + /* used for hosts requiring serialization */ + volatile unsigned long host_busy; +}; + +#define IDE_HOST_BUSY 0 + +/* + * internal ide interrupt handler type + */ +typedef ide_startstop_t (ide_handler_t)(ide_drive_t *); +typedef int (ide_expiry_t)(ide_drive_t *); + +/* used by ide-cd, ide-floppy, etc. */ +typedef void (xfer_func_t)(ide_drive_t *, struct ide_cmd *, void *, unsigned); + +extern struct mutex ide_setting_mtx; + +/* + * configurable drive settings + */ + +#define DS_SYNC (1 << 0) + +struct ide_devset { + int (*get)(ide_drive_t *); + int (*set)(ide_drive_t *, int); + unsigned int flags; +}; + +#define __DEVSET(_flags, _get, _set) { \ + .flags = _flags, \ + .get = _get, \ + .set = _set, \ +} + +#define ide_devset_get(name, field) \ +static int get_##name(ide_drive_t *drive) \ +{ \ + return drive->field; \ +} + +#define ide_devset_set(name, field) \ +static int set_##name(ide_drive_t *drive, int arg) \ +{ \ + drive->field = arg; \ + return 0; \ +} + +#define ide_devset_get_flag(name, flag) \ +static int get_##name(ide_drive_t *drive) \ +{ \ + return !!(drive->dev_flags & flag); \ +} + +#define ide_devset_set_flag(name, flag) \ +static int set_##name(ide_drive_t *drive, int arg) \ +{ \ + if (arg) \ + drive->dev_flags |= flag; \ + else \ + drive->dev_flags &= ~flag; \ + return 0; \ +} + +#define __IDE_DEVSET(_name, _flags, _get, _set) \ +const struct ide_devset ide_devset_##_name = \ + __DEVSET(_flags, _get, _set) + +#define IDE_DEVSET(_name, _flags, _get, _set) \ +static __IDE_DEVSET(_name, _flags, _get, _set) + +#define ide_devset_rw(_name, _func) \ +IDE_DEVSET(_name, 0, get_##_func, set_##_func) + +#define ide_devset_w(_name, _func) \ +IDE_DEVSET(_name, 0, NULL, set_##_func) + +#define ide_ext_devset_rw(_name, _func) \ +__IDE_DEVSET(_name, 0, get_##_func, set_##_func) + +#define ide_ext_devset_rw_sync(_name, _func) \ +__IDE_DEVSET(_name, DS_SYNC, get_##_func, set_##_func) + +#define ide_decl_devset(_name) \ +extern const struct ide_devset ide_devset_##_name + +ide_decl_devset(io_32bit); +ide_decl_devset(keepsettings); +ide_decl_devset(pio_mode); +ide_decl_devset(unmaskirq); +ide_decl_devset(using_dma); + +#ifdef CONFIG_IDE_PROC_FS +/* + * /proc/ide interface + */ + +#define ide_devset_rw_field(_name, _field) \ +ide_devset_get(_name, _field); \ +ide_devset_set(_name, _field); \ +IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name) + +#define ide_devset_rw_flag(_name, _field) \ +ide_devset_get_flag(_name, _field); \ +ide_devset_set_flag(_name, _field); \ +IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name) + +struct ide_proc_devset { + const char *name; + const struct ide_devset *setting; + int min, max; + int (*mulf)(ide_drive_t *); + int (*divf)(ide_drive_t *); +}; + +#define __IDE_PROC_DEVSET(_name, _min, _max, _mulf, _divf) { \ + .name = __stringify(_name), \ + .setting = &ide_devset_##_name, \ + .min = _min, \ + .max = _max, \ + .mulf = _mulf, \ + .divf = _divf, \ +} + +#define IDE_PROC_DEVSET(_name, _min, _max) \ +__IDE_PROC_DEVSET(_name, _min, _max, NULL, NULL) + +typedef struct { + const char *name; + umode_t mode; + int (*show)(struct seq_file *, void *); +} ide_proc_entry_t; + +void proc_ide_create(void); +void proc_ide_destroy(void); +void ide_proc_register_port(ide_hwif_t *); +void ide_proc_port_register_devices(ide_hwif_t *); +void ide_proc_unregister_device(ide_drive_t *); +void ide_proc_unregister_port(ide_hwif_t *); +void ide_proc_register_driver(ide_drive_t *, struct ide_driver *); +void ide_proc_unregister_driver(ide_drive_t *, struct ide_driver *); + +int ide_capacity_proc_show(struct seq_file *m, void *v); +int ide_geometry_proc_show(struct seq_file *m, void *v); +#else +static inline void proc_ide_create(void) { ; } +static inline void proc_ide_destroy(void) { ; } +static inline void ide_proc_register_port(ide_hwif_t *hwif) { ; } +static inline void ide_proc_port_register_devices(ide_hwif_t *hwif) { ; } +static inline void ide_proc_unregister_device(ide_drive_t *drive) { ; } +static inline void ide_proc_unregister_port(ide_hwif_t *hwif) { ; } +static inline void ide_proc_register_driver(ide_drive_t *drive, + struct ide_driver *driver) { ; } +static inline void ide_proc_unregister_driver(ide_drive_t *drive, + struct ide_driver *driver) { ; } +#endif + +enum { + /* enter/exit functions */ + IDE_DBG_FUNC = (1 << 0), + /* sense key/asc handling */ + IDE_DBG_SENSE = (1 << 1), + /* packet commands handling */ + IDE_DBG_PC = (1 << 2), + /* request handling */ + IDE_DBG_RQ = (1 << 3), + /* driver probing/setup */ + IDE_DBG_PROBE = (1 << 4), +}; + +/* DRV_NAME has to be defined in the driver before using the macro below */ +#define __ide_debug_log(lvl, fmt, args...) \ +{ \ + if (unlikely(drive->debug_mask & lvl)) \ + printk(KERN_INFO DRV_NAME ": %s: " fmt "\n", \ + __func__, ## args); \ +} + +/* + * Power Management state machine (rq->pm->pm_step). + * + * For each step, the core calls ide_start_power_step() first. + * This can return: + * - ide_stopped : In this case, the core calls us back again unless + * step have been set to ide_power_state_completed. + * - ide_started : In this case, the channel is left busy until an + * async event (interrupt) occurs. + * Typically, ide_start_power_step() will issue a taskfile request with + * do_rw_taskfile(). + * + * Upon reception of the interrupt, the core will call ide_complete_power_step() + * with the error code if any. This routine should update the step value + * and return. It should not start a new request. The core will call + * ide_start_power_step() for the new step value, unless step have been + * set to IDE_PM_COMPLETED. + */ +enum { + IDE_PM_START_SUSPEND, + IDE_PM_FLUSH_CACHE = IDE_PM_START_SUSPEND, + IDE_PM_STANDBY, + + IDE_PM_START_RESUME, + IDE_PM_RESTORE_PIO = IDE_PM_START_RESUME, + IDE_PM_IDLE, + IDE_PM_RESTORE_DMA, + + IDE_PM_COMPLETED, +}; + +int generic_ide_suspend(struct device *, pm_message_t); +int generic_ide_resume(struct device *); + +void ide_complete_power_step(ide_drive_t *, struct request *); +ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *); +void ide_complete_pm_rq(ide_drive_t *, struct request *); +void ide_check_pm_state(ide_drive_t *, struct request *); + +/* + * Subdrivers support. + * + * The gendriver.owner field should be set to the module owner of this driver. + * The gendriver.name field should be set to the name of this driver + */ +struct ide_driver { + const char *version; + ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t); + struct device_driver gen_driver; + int (*probe)(ide_drive_t *); + void (*remove)(ide_drive_t *); + void (*resume)(ide_drive_t *); + void (*shutdown)(ide_drive_t *); +#ifdef CONFIG_IDE_PROC_FS + ide_proc_entry_t * (*proc_entries)(ide_drive_t *); + const struct ide_proc_devset * (*proc_devsets)(ide_drive_t *); +#endif +}; + +#define to_ide_driver(drv) container_of(drv, struct ide_driver, gen_driver) + +int ide_device_get(ide_drive_t *); +void ide_device_put(ide_drive_t *); + +struct ide_ioctl_devset { + unsigned int get_ioctl; + unsigned int set_ioctl; + const struct ide_devset *setting; +}; + +int ide_setting_ioctl(ide_drive_t *, struct block_device *, unsigned int, + unsigned long, const struct ide_ioctl_devset *); + +int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned long); + +extern int ide_vlb_clk; +extern int ide_pci_clk; + +int ide_end_rq(ide_drive_t *, struct request *, blk_status_t, unsigned int); +void ide_kill_rq(ide_drive_t *, struct request *); + +void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int); +void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int); + +void ide_execute_command(ide_drive_t *, struct ide_cmd *, ide_handler_t *, + unsigned int); + +void ide_pad_transfer(ide_drive_t *, int, int); + +ide_startstop_t ide_error(ide_drive_t *, const char *, u8); + +void ide_fix_driveid(u16 *); + +extern void ide_fixstring(u8 *, const int, const int); + +int ide_busy_sleep(ide_drive_t *, unsigned long, int); + +int __ide_wait_stat(ide_drive_t *, u8, u8, unsigned long, u8 *); +int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long); + +ide_startstop_t ide_do_park_unpark(ide_drive_t *, struct request *); +ide_startstop_t ide_do_devset(ide_drive_t *, struct request *); + +extern ide_startstop_t ide_do_reset (ide_drive_t *); + +extern int ide_devset_execute(ide_drive_t *drive, + const struct ide_devset *setting, int arg); + +void ide_complete_cmd(ide_drive_t *, struct ide_cmd *, u8, u8); +int ide_complete_rq(ide_drive_t *, blk_status_t, unsigned int); + +void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd); +void ide_tf_dump(const char *, struct ide_cmd *); + +void ide_exec_command(ide_hwif_t *, u8); +u8 ide_read_status(ide_hwif_t *); +u8 ide_read_altstatus(ide_hwif_t *); +void ide_write_devctl(ide_hwif_t *, u8); + +void ide_dev_select(ide_drive_t *); +void ide_tf_load(ide_drive_t *, struct ide_taskfile *, u8); +void ide_tf_read(ide_drive_t *, struct ide_taskfile *, u8); + +void ide_input_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int); +void ide_output_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int); + +void SELECT_MASK(ide_drive_t *, int); + +u8 ide_read_error(ide_drive_t *); +void ide_read_bcount_and_ireason(ide_drive_t *, u16 *, u8 *); + +int ide_check_ireason(ide_drive_t *, struct request *, int, int, int); + +int ide_check_atapi_device(ide_drive_t *, const char *); + +void ide_init_pc(struct ide_atapi_pc *); + +/* Disk head parking */ +extern wait_queue_head_t ide_park_wq; +ssize_t ide_park_show(struct device *dev, struct device_attribute *attr, + char *buf); +ssize_t ide_park_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t len); + +/* + * Special requests for ide-tape block device strategy routine. + * + * In order to service a character device command, we add special requests to + * the tail of our block device request queue and wait for their completion. + */ +enum { + REQ_IDETAPE_PC1 = (1 << 0), /* packet command (first stage) */ + REQ_IDETAPE_PC2 = (1 << 1), /* packet command (second stage) */ + REQ_IDETAPE_READ = (1 << 2), + REQ_IDETAPE_WRITE = (1 << 3), +}; + +int ide_queue_pc_tail(ide_drive_t *, struct gendisk *, struct ide_atapi_pc *, + void *, unsigned int); + +int ide_do_test_unit_ready(ide_drive_t *, struct gendisk *); +int ide_do_start_stop(ide_drive_t *, struct gendisk *, int); +int ide_set_media_lock(ide_drive_t *, struct gendisk *, int); +void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *); +void ide_retry_pc(ide_drive_t *drive); + +void ide_prep_sense(ide_drive_t *drive, struct request *rq); +int ide_queue_sense_rq(ide_drive_t *drive, void *special); + +int ide_cd_expiry(ide_drive_t *); + +int ide_cd_get_xferlen(struct request *); + +ide_startstop_t ide_issue_pc(ide_drive_t *, struct ide_cmd *); + +ide_startstop_t do_rw_taskfile(ide_drive_t *, struct ide_cmd *); + +void ide_pio_bytes(ide_drive_t *, struct ide_cmd *, unsigned int, unsigned int); + +void ide_finish_cmd(ide_drive_t *, struct ide_cmd *, u8); + +int ide_raw_taskfile(ide_drive_t *, struct ide_cmd *, u8 *, u16); +int ide_no_data_taskfile(ide_drive_t *, struct ide_cmd *); + +int ide_taskfile_ioctl(ide_drive_t *, unsigned long); + +int ide_dev_read_id(ide_drive_t *, u8, u16 *, int); + +extern int ide_driveid_update(ide_drive_t *); +extern int ide_config_drive_speed(ide_drive_t *, u8); +extern u8 eighty_ninty_three (ide_drive_t *); +extern int taskfile_lib_get_identify(ide_drive_t *drive, u8 *); + +extern int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout); + +extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout); + +extern void ide_timer_expiry(struct timer_list *t); +extern irqreturn_t ide_intr(int irq, void *dev_id); +extern void do_ide_request(struct request_queue *); +extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq); + +void ide_init_disk(struct gendisk *, ide_drive_t *); + +#ifdef CONFIG_IDEPCI_PCIBUS_ORDER +extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *owner, const char *mod_name); +#define ide_pci_register_driver(d) __ide_pci_register_driver(d, THIS_MODULE, KBUILD_MODNAME) +#else +#define ide_pci_register_driver(d) pci_register_driver(d) +#endif + +static inline int ide_pci_is_in_compatibility_mode(struct pci_dev *dev) +{ + if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && (dev->class & 5) != 5) + return 1; + return 0; +} + +void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, + struct ide_hw *, struct ide_hw **); +void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *); + +#ifdef CONFIG_BLK_DEV_IDEDMA_PCI +int ide_pci_set_master(struct pci_dev *, const char *); +unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *); +int ide_pci_check_simplex(ide_hwif_t *, const struct ide_port_info *); +int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *); +#else +static inline int ide_hwif_setup_dma(ide_hwif_t *hwif, + const struct ide_port_info *d) +{ + return -EINVAL; +} +#endif + +struct ide_pci_enablebit { + u8 reg; /* byte pci reg holding the enable-bit */ + u8 mask; /* mask to isolate the enable-bit */ + u8 val; /* value of masked reg when "enabled" */ +}; + +enum { + /* Uses ISA control ports not PCI ones. */ + IDE_HFLAG_ISA_PORTS = (1 << 0), + /* single port device */ + IDE_HFLAG_SINGLE = (1 << 1), + /* don't use legacy PIO blacklist */ + IDE_HFLAG_PIO_NO_BLACKLIST = (1 << 2), + /* set for the second port of QD65xx */ + IDE_HFLAG_QD_2ND_PORT = (1 << 3), + /* use PIO8/9 for prefetch off/on */ + IDE_HFLAG_ABUSE_PREFETCH = (1 << 4), + /* use PIO6/7 for fast-devsel off/on */ + IDE_HFLAG_ABUSE_FAST_DEVSEL = (1 << 5), + /* use 100-102 and 200-202 PIO values to set DMA modes */ + IDE_HFLAG_ABUSE_DMA_MODES = (1 << 6), + /* + * keep DMA setting when programming PIO mode, may be used only + * for hosts which have separate PIO and DMA timings (ie. PMAC) + */ + IDE_HFLAG_SET_PIO_MODE_KEEP_DMA = (1 << 7), + /* program host for the transfer mode after programming device */ + IDE_HFLAG_POST_SET_MODE = (1 << 8), + /* don't program host/device for the transfer mode ("smart" hosts) */ + IDE_HFLAG_NO_SET_MODE = (1 << 9), + /* trust BIOS for programming chipset/device for DMA */ + IDE_HFLAG_TRUST_BIOS_FOR_DMA = (1 << 10), + /* host is CS5510/CS5520 */ + IDE_HFLAG_CS5520 = (1 << 11), + /* ATAPI DMA is unsupported */ + IDE_HFLAG_NO_ATAPI_DMA = (1 << 12), + /* set if host is a "non-bootable" controller */ + IDE_HFLAG_NON_BOOTABLE = (1 << 13), + /* host doesn't support DMA */ + IDE_HFLAG_NO_DMA = (1 << 14), + /* check if host is PCI IDE device before allowing DMA */ + IDE_HFLAG_NO_AUTODMA = (1 << 15), + /* host uses MMIO */ + IDE_HFLAG_MMIO = (1 << 16), + /* no LBA48 */ + IDE_HFLAG_NO_LBA48 = (1 << 17), + /* no LBA48 DMA */ + IDE_HFLAG_NO_LBA48_DMA = (1 << 18), + /* data FIFO is cleared by an error */ + IDE_HFLAG_ERROR_STOPS_FIFO = (1 << 19), + /* serialize ports */ + IDE_HFLAG_SERIALIZE = (1 << 20), + /* host is DTC2278 */ + IDE_HFLAG_DTC2278 = (1 << 21), + /* 4 devices on a single set of I/O ports */ + IDE_HFLAG_4DRIVES = (1 << 22), + /* host is TRM290 */ + IDE_HFLAG_TRM290 = (1 << 23), + /* use 32-bit I/O ops */ + IDE_HFLAG_IO_32BIT = (1 << 24), + /* unmask IRQs */ + IDE_HFLAG_UNMASK_IRQS = (1 << 25), + IDE_HFLAG_BROKEN_ALTSTATUS = (1 << 26), + /* serialize ports if DMA is possible (for sl82c105) */ + IDE_HFLAG_SERIALIZE_DMA = (1 << 27), + /* force host out of "simplex" mode */ + IDE_HFLAG_CLEAR_SIMPLEX = (1 << 28), + /* DSC overlap is unsupported */ + IDE_HFLAG_NO_DSC = (1 << 29), + /* never use 32-bit I/O ops */ + IDE_HFLAG_NO_IO_32BIT = (1 << 30), + /* never unmask IRQs */ + IDE_HFLAG_NO_UNMASK_IRQS = (1 << 31), +}; + +#ifdef CONFIG_BLK_DEV_OFFBOARD +# define IDE_HFLAG_OFF_BOARD 0 +#else +# define IDE_HFLAG_OFF_BOARD IDE_HFLAG_NON_BOOTABLE +#endif + +struct ide_port_info { + char *name; + + int (*init_chipset)(struct pci_dev *); + + void (*get_lock)(irq_handler_t, void *); + void (*release_lock)(void); + + void (*init_iops)(ide_hwif_t *); + void (*init_hwif)(ide_hwif_t *); + int (*init_dma)(ide_hwif_t *, + const struct ide_port_info *); + + const struct ide_tp_ops *tp_ops; + const struct ide_port_ops *port_ops; + const struct ide_dma_ops *dma_ops; + + struct ide_pci_enablebit enablebits[2]; + + hwif_chipset_t chipset; + + u16 max_sectors; /* if < than the default one */ + + u32 host_flags; + + int irq_flags; + + u8 pio_mask; + u8 swdma_mask; + u8 mwdma_mask; + u8 udma_mask; +}; + +/* + * State information carried for REQ_TYPE_ATA_PM_SUSPEND and REQ_TYPE_ATA_PM_RESUME + * requests. + */ +struct ide_pm_state { + /* PM state machine step value, currently driver specific */ + int pm_step; + /* requested PM state value (S1, S2, S3, S4, ...) */ + u32 pm_state; + void* data; /* for driver use */ +}; + + +int ide_pci_init_one(struct pci_dev *, const struct ide_port_info *, void *); +int ide_pci_init_two(struct pci_dev *, struct pci_dev *, + const struct ide_port_info *, void *); +void ide_pci_remove(struct pci_dev *); + +#ifdef CONFIG_PM +int ide_pci_suspend(struct pci_dev *, pm_message_t); +int ide_pci_resume(struct pci_dev *); +#else +#define ide_pci_suspend NULL +#define ide_pci_resume NULL +#endif + +void ide_map_sg(ide_drive_t *, struct ide_cmd *); +void ide_init_sg_cmd(struct ide_cmd *, unsigned int); + +#define BAD_DMA_DRIVE 0 +#define GOOD_DMA_DRIVE 1 + +struct drive_list_entry { + const char *id_model; + const char *id_firmware; +}; + +int ide_in_drive_list(u16 *, const struct drive_list_entry *); + +#ifdef CONFIG_BLK_DEV_IDEDMA +int ide_dma_good_drive(ide_drive_t *); +int __ide_dma_bad_drive(ide_drive_t *); + +u8 ide_find_dma_mode(ide_drive_t *, u8); + +static inline u8 ide_max_dma_mode(ide_drive_t *drive) +{ + return ide_find_dma_mode(drive, XFER_UDMA_6); +} + +void ide_dma_off_quietly(ide_drive_t *); +void ide_dma_off(ide_drive_t *); +void ide_dma_on(ide_drive_t *); +int ide_set_dma(ide_drive_t *); +void ide_check_dma_crc(ide_drive_t *); +ide_startstop_t ide_dma_intr(ide_drive_t *); + +int ide_allocate_dma_engine(ide_hwif_t *); +void ide_release_dma_engine(ide_hwif_t *); + +int ide_dma_prepare(ide_drive_t *, struct ide_cmd *); +void ide_dma_unmap_sg(ide_drive_t *, struct ide_cmd *); + +#ifdef CONFIG_BLK_DEV_IDEDMA_SFF +int config_drive_for_dma(ide_drive_t *); +int ide_build_dmatable(ide_drive_t *, struct ide_cmd *); +void ide_dma_host_set(ide_drive_t *, int); +int ide_dma_setup(ide_drive_t *, struct ide_cmd *); +extern void ide_dma_start(ide_drive_t *); +int ide_dma_end(ide_drive_t *); +int ide_dma_test_irq(ide_drive_t *); +int ide_dma_sff_timer_expiry(ide_drive_t *); +u8 ide_dma_sff_read_status(ide_hwif_t *); +extern const struct ide_dma_ops sff_dma_ops; +#else +static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; } +#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ + +void ide_dma_lost_irq(ide_drive_t *); +ide_startstop_t ide_dma_timeout_retry(ide_drive_t *, int); + +#else +static inline u8 ide_find_dma_mode(ide_drive_t *drive, u8 speed) { return 0; } +static inline u8 ide_max_dma_mode(ide_drive_t *drive) { return 0; } +static inline void ide_dma_off_quietly(ide_drive_t *drive) { ; } +static inline void ide_dma_off(ide_drive_t *drive) { ; } +static inline void ide_dma_on(ide_drive_t *drive) { ; } +static inline void ide_dma_verbose(ide_drive_t *drive) { ; } +static inline int ide_set_dma(ide_drive_t *drive) { return 1; } +static inline void ide_check_dma_crc(ide_drive_t *drive) { ; } +static inline ide_startstop_t ide_dma_intr(ide_drive_t *drive) { return ide_stopped; } +static inline ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { return ide_stopped; } +static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; } +static inline int ide_dma_prepare(ide_drive_t *drive, + struct ide_cmd *cmd) { return 1; } +static inline void ide_dma_unmap_sg(ide_drive_t *drive, + struct ide_cmd *cmd) { ; } +#endif /* CONFIG_BLK_DEV_IDEDMA */ + +#ifdef CONFIG_BLK_DEV_IDEACPI +int ide_acpi_init(void); +bool ide_port_acpi(ide_hwif_t *hwif); +extern int ide_acpi_exec_tfs(ide_drive_t *drive); +extern void ide_acpi_get_timing(ide_hwif_t *hwif); +extern void ide_acpi_push_timing(ide_hwif_t *hwif); +void ide_acpi_init_port(ide_hwif_t *); +void ide_acpi_port_init_devices(ide_hwif_t *); +extern void ide_acpi_set_state(ide_hwif_t *hwif, int on); +#else +static inline int ide_acpi_init(void) { return 0; } +static inline bool ide_port_acpi(ide_hwif_t *hwif) { return 0; } +static inline int ide_acpi_exec_tfs(ide_drive_t *drive) { return 0; } +static inline void ide_acpi_get_timing(ide_hwif_t *hwif) { ; } +static inline void ide_acpi_push_timing(ide_hwif_t *hwif) { ; } +static inline void ide_acpi_init_port(ide_hwif_t *hwif) { ; } +static inline void ide_acpi_port_init_devices(ide_hwif_t *hwif) { ; } +static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {} +#endif + +void ide_register_region(struct gendisk *); +void ide_unregister_region(struct gendisk *); + +void ide_check_nien_quirk_list(ide_drive_t *); +void ide_undecoded_slave(ide_drive_t *); + +void ide_port_apply_params(ide_hwif_t *); +int ide_sysfs_register_port(ide_hwif_t *); + +struct ide_host *ide_host_alloc(const struct ide_port_info *, struct ide_hw **, + unsigned int); +void ide_host_free(struct ide_host *); +int ide_host_register(struct ide_host *, const struct ide_port_info *, + struct ide_hw **); +int ide_host_add(const struct ide_port_info *, struct ide_hw **, unsigned int, + struct ide_host **); +void ide_host_remove(struct ide_host *); +int ide_legacy_device_add(const struct ide_port_info *, unsigned long); +void ide_port_unregister_devices(ide_hwif_t *); +void ide_port_scan(ide_hwif_t *); + +static inline void *ide_get_hwifdata (ide_hwif_t * hwif) +{ + return hwif->hwif_data; +} + +static inline void ide_set_hwifdata (ide_hwif_t * hwif, void *data) +{ + hwif->hwif_data = data; +} + +u64 ide_get_lba_addr(struct ide_cmd *, int); +u8 ide_dump_status(ide_drive_t *, const char *, u8); + +struct ide_timing { + u8 mode; + u8 setup; /* t1 */ + u16 act8b; /* t2 for 8-bit io */ + u16 rec8b; /* t2i for 8-bit io */ + u16 cyc8b; /* t0 for 8-bit io */ + u16 active; /* t2 or tD */ + u16 recover; /* t2i or tK */ + u16 cycle; /* t0 */ + u16 udma; /* t2CYCTYP/2 */ +}; + +enum { + IDE_TIMING_SETUP = (1 << 0), + IDE_TIMING_ACT8B = (1 << 1), + IDE_TIMING_REC8B = (1 << 2), + IDE_TIMING_CYC8B = (1 << 3), + IDE_TIMING_8BIT = IDE_TIMING_ACT8B | IDE_TIMING_REC8B | + IDE_TIMING_CYC8B, + IDE_TIMING_ACTIVE = (1 << 4), + IDE_TIMING_RECOVER = (1 << 5), + IDE_TIMING_CYCLE = (1 << 6), + IDE_TIMING_UDMA = (1 << 7), + IDE_TIMING_ALL = IDE_TIMING_SETUP | IDE_TIMING_8BIT | + IDE_TIMING_ACTIVE | IDE_TIMING_RECOVER | + IDE_TIMING_CYCLE | IDE_TIMING_UDMA, +}; + +struct ide_timing *ide_timing_find_mode(u8); +u16 ide_pio_cycle_time(ide_drive_t *, u8); +void ide_timing_merge(struct ide_timing *, struct ide_timing *, + struct ide_timing *, unsigned int); +int ide_timing_compute(ide_drive_t *, u8, struct ide_timing *, int, int); + +#ifdef CONFIG_IDE_XFER_MODE +int ide_scan_pio_blacklist(char *); +const char *ide_xfer_verbose(u8); +int ide_pio_need_iordy(ide_drive_t *, const u8); +int ide_set_pio_mode(ide_drive_t *, u8); +int ide_set_dma_mode(ide_drive_t *, u8); +void ide_set_pio(ide_drive_t *, u8); +int ide_set_xfer_rate(ide_drive_t *, u8); +#else +static inline void ide_set_pio(ide_drive_t *drive, u8 pio) { ; } +static inline int ide_set_xfer_rate(ide_drive_t *drive, u8 rate) { return -1; } +#endif + +static inline void ide_set_max_pio(ide_drive_t *drive) +{ + ide_set_pio(drive, 255); +} + +char *ide_media_string(ide_drive_t *); + +extern const struct attribute_group *ide_dev_groups[]; +extern struct bus_type ide_bus_type; +extern struct class *ide_port_class; + +static inline void ide_dump_identify(u8 *id) +{ + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 2, id, 512, 0); +} + +static inline int hwif_to_node(ide_hwif_t *hwif) +{ + return hwif->dev ? dev_to_node(hwif->dev) : -1; +} + +static inline ide_drive_t *ide_get_pair_dev(ide_drive_t *drive) +{ + ide_drive_t *peer = drive->hwif->devices[(drive->dn ^ 1) & 1]; + + return (peer->dev_flags & IDE_DFLAG_PRESENT) ? peer : NULL; +} + +static inline void *ide_get_drivedata(ide_drive_t *drive) +{ + return drive->drive_data; +} + +static inline void ide_set_drivedata(ide_drive_t *drive, void *data) +{ + drive->drive_data = data; +} + +#define ide_port_for_each_dev(i, dev, port) \ + for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++) + +#define ide_port_for_each_present_dev(i, dev, port) \ + for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++) \ + if ((dev)->dev_flags & IDE_DFLAG_PRESENT) + +#define ide_host_for_each_port(i, port, host) \ + for ((i) = 0; ((port) = (host)->ports[i]) || (i) < MAX_HOST_PORTS; (i)++) + + +#endif /* _IDE_H */ diff --git a/include/linux/idle_inject.h b/include/linux/idle_inject.h new file mode 100644 index 000000000..bdc0293fb --- /dev/null +++ b/include/linux/idle_inject.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Linaro Ltd + * + * Author: Daniel Lezcano + * + */ +#ifndef __IDLE_INJECT_H__ +#define __IDLE_INJECT_H__ + +/* private idle injection device structure */ +struct idle_inject_device; + +struct idle_inject_device *idle_inject_register(struct cpumask *cpumask); + +void idle_inject_unregister(struct idle_inject_device *ii_dev); + +int idle_inject_start(struct idle_inject_device *ii_dev); + +void idle_inject_stop(struct idle_inject_device *ii_dev); + +void idle_inject_set_duration(struct idle_inject_device *ii_dev, + unsigned int run_duration_ms, + unsigned int idle_duration_ms); + +void idle_inject_get_duration(struct idle_inject_device *ii_dev, + unsigned int *run_duration_ms, + unsigned int *idle_duration_ms); +#endif /* __IDLE_INJECT_H__ */ diff --git a/include/linux/idr.h b/include/linux/idr.h new file mode 100644 index 000000000..b6c6151c7 --- /dev/null +++ b/include/linux/idr.h @@ -0,0 +1,309 @@ +/* + * include/linux/idr.h + * + * 2002-10-18 written by Jim Houston jim.houston@ccur.com + * Copyright (C) 2002 by Concurrent Computer Corporation + * Distributed under the GNU GPL license version 2. + * + * Small id to pointer translation service avoiding fixed sized + * tables. + */ + +#ifndef __IDR_H__ +#define __IDR_H__ + +#include +#include +#include + +struct idr { + struct radix_tree_root idr_rt; + unsigned int idr_base; + unsigned int idr_next; +}; + +/* + * The IDR API does not expose the tagging functionality of the radix tree + * to users. Use tag 0 to track whether a node has free space below it. + */ +#define IDR_FREE 0 + +/* Set the IDR flag and the IDR_FREE tag */ +#define IDR_RT_MARKER (ROOT_IS_IDR | (__force gfp_t) \ + (1 << (ROOT_TAG_SHIFT + IDR_FREE))) + +#define IDR_INIT_BASE(name, base) { \ + .idr_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER), \ + .idr_base = (base), \ + .idr_next = 0, \ +} + +/** + * IDR_INIT() - Initialise an IDR. + * @name: Name of IDR. + * + * A freshly-initialised IDR contains no IDs. + */ +#define IDR_INIT(name) IDR_INIT_BASE(name, 0) + +/** + * DEFINE_IDR() - Define a statically-allocated IDR. + * @name: Name of IDR. + * + * An IDR defined using this macro is ready for use with no additional + * initialisation required. It contains no IDs. + */ +#define DEFINE_IDR(name) struct idr name = IDR_INIT(name) + +/** + * idr_get_cursor - Return the current position of the cyclic allocator + * @idr: idr handle + * + * The value returned is the value that will be next returned from + * idr_alloc_cyclic() if it is free (otherwise the search will start from + * this position). + */ +static inline unsigned int idr_get_cursor(const struct idr *idr) +{ + return READ_ONCE(idr->idr_next); +} + +/** + * idr_set_cursor - Set the current position of the cyclic allocator + * @idr: idr handle + * @val: new position + * + * The next call to idr_alloc_cyclic() will return @val if it is free + * (otherwise the search will start from this position). + */ +static inline void idr_set_cursor(struct idr *idr, unsigned int val) +{ + WRITE_ONCE(idr->idr_next, val); +} + +/** + * DOC: idr sync + * idr synchronization (stolen from radix-tree.h) + * + * idr_find() is able to be called locklessly, using RCU. The caller must + * ensure calls to this function are made within rcu_read_lock() regions. + * Other readers (lock-free or otherwise) and modifications may be running + * concurrently. + * + * It is still required that the caller manage the synchronization and + * lifetimes of the items. So if RCU lock-free lookups are used, typically + * this would mean that the items have their own locks, or are amenable to + * lock-free access; and that the items are freed by RCU (or only freed after + * having been deleted from the idr tree *and* a synchronize_rcu() grace + * period). + */ + +#define idr_lock(idr) xa_lock(&(idr)->idr_rt) +#define idr_unlock(idr) xa_unlock(&(idr)->idr_rt) +#define idr_lock_bh(idr) xa_lock_bh(&(idr)->idr_rt) +#define idr_unlock_bh(idr) xa_unlock_bh(&(idr)->idr_rt) +#define idr_lock_irq(idr) xa_lock_irq(&(idr)->idr_rt) +#define idr_unlock_irq(idr) xa_unlock_irq(&(idr)->idr_rt) +#define idr_lock_irqsave(idr, flags) \ + xa_lock_irqsave(&(idr)->idr_rt, flags) +#define idr_unlock_irqrestore(idr, flags) \ + xa_unlock_irqrestore(&(idr)->idr_rt, flags) + +void idr_preload(gfp_t gfp_mask); + +int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t); +int __must_check idr_alloc_u32(struct idr *, void *ptr, u32 *id, + unsigned long max, gfp_t); +int idr_alloc_cyclic(struct idr *, void *ptr, int start, int end, gfp_t); +void *idr_remove(struct idr *, unsigned long id); +void *idr_find(const struct idr *, unsigned long id); +int idr_for_each(const struct idr *, + int (*fn)(int id, void *p, void *data), void *data); +void *idr_get_next(struct idr *, int *nextid); +void *idr_get_next_ul(struct idr *, unsigned long *nextid); +void *idr_replace(struct idr *, void *, unsigned long id); +void idr_destroy(struct idr *); + +/** + * idr_init_base() - Initialise an IDR. + * @idr: IDR handle. + * @base: The base value for the IDR. + * + * This variation of idr_init() creates an IDR which will allocate IDs + * starting at %base. + */ +static inline void idr_init_base(struct idr *idr, int base) +{ + INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER); + idr->idr_base = base; + idr->idr_next = 0; +} + +/** + * idr_init() - Initialise an IDR. + * @idr: IDR handle. + * + * Initialise a dynamically allocated IDR. To initialise a + * statically allocated IDR, use DEFINE_IDR(). + */ +static inline void idr_init(struct idr *idr) +{ + idr_init_base(idr, 0); +} + +/** + * idr_is_empty() - Are there any IDs allocated? + * @idr: IDR handle. + * + * Return: %true if any IDs have been allocated from this IDR. + */ +static inline bool idr_is_empty(const struct idr *idr) +{ + return radix_tree_empty(&idr->idr_rt) && + radix_tree_tagged(&idr->idr_rt, IDR_FREE); +} + +/** + * idr_preload_end - end preload section started with idr_preload() + * + * Each idr_preload() should be matched with an invocation of this + * function. See idr_preload() for details. + */ +static inline void idr_preload_end(void) +{ + preempt_enable(); +} + +/** + * idr_for_each_entry() - Iterate over an IDR's elements of a given type. + * @idr: IDR handle. + * @entry: The type * to use as cursor + * @id: Entry ID. + * + * @entry and @id do not need to be initialized before the loop, and + * after normal termination @entry is left with the value NULL. This + * is convenient for a "not found" value. + */ +#define idr_for_each_entry(idr, entry, id) \ + for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; id += 1U) + +/** + * idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type. + * @idr: IDR handle. + * @entry: The type * to use as cursor. + * @id: Entry ID. + * + * @entry and @id do not need to be initialized before the loop, and + * after normal termination @entry is left with the value NULL. This + * is convenient for a "not found" value. + */ +#define idr_for_each_entry_ul(idr, entry, id) \ + for (id = 0; ((entry) = idr_get_next_ul(idr, &(id))) != NULL; ++id) + +/** + * idr_for_each_entry_continue() - Continue iteration over an IDR's elements of a given type + * @idr: IDR handle. + * @entry: The type * to use as a cursor. + * @id: Entry ID. + * + * Continue to iterate over entries, continuing after the current position. + */ +#define idr_for_each_entry_continue(idr, entry, id) \ + for ((entry) = idr_get_next((idr), &(id)); \ + entry; \ + ++id, (entry) = idr_get_next((idr), &(id))) + +/* + * IDA - IDR based id allocator, use when translation from id to + * pointer isn't necessary. + */ +#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */ +#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long)) +#define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8) + +struct ida_bitmap { + unsigned long bitmap[IDA_BITMAP_LONGS]; +}; + +DECLARE_PER_CPU(struct ida_bitmap *, ida_bitmap); + +struct ida { + struct radix_tree_root ida_rt; +}; + +#define IDA_INIT(name) { \ + .ida_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER | GFP_NOWAIT), \ +} +#define DEFINE_IDA(name) struct ida name = IDA_INIT(name) + +int ida_alloc_range(struct ida *, unsigned int min, unsigned int max, gfp_t); +void ida_free(struct ida *, unsigned int id); +void ida_destroy(struct ida *ida); + +/** + * ida_alloc() - Allocate an unused ID. + * @ida: IDA handle. + * @gfp: Memory allocation flags. + * + * Allocate an ID between 0 and %INT_MAX, inclusive. + * + * Context: Any context. + * Return: The allocated ID, or %-ENOMEM if memory could not be allocated, + * or %-ENOSPC if there are no free IDs. + */ +static inline int ida_alloc(struct ida *ida, gfp_t gfp) +{ + return ida_alloc_range(ida, 0, ~0, gfp); +} + +/** + * ida_alloc_min() - Allocate an unused ID. + * @ida: IDA handle. + * @min: Lowest ID to allocate. + * @gfp: Memory allocation flags. + * + * Allocate an ID between @min and %INT_MAX, inclusive. + * + * Context: Any context. + * Return: The allocated ID, or %-ENOMEM if memory could not be allocated, + * or %-ENOSPC if there are no free IDs. + */ +static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp) +{ + return ida_alloc_range(ida, min, ~0, gfp); +} + +/** + * ida_alloc_max() - Allocate an unused ID. + * @ida: IDA handle. + * @max: Highest ID to allocate. + * @gfp: Memory allocation flags. + * + * Allocate an ID between 0 and @max, inclusive. + * + * Context: Any context. + * Return: The allocated ID, or %-ENOMEM if memory could not be allocated, + * or %-ENOSPC if there are no free IDs. + */ +static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp) +{ + return ida_alloc_range(ida, 0, max, gfp); +} + +static inline void ida_init(struct ida *ida) +{ + INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT); +} + +#define ida_simple_get(ida, start, end, gfp) \ + ida_alloc_range(ida, start, (end) - 1, gfp) +#define ida_simple_remove(ida, id) ida_free(ida, id) + +static inline bool ida_is_empty(const struct ida *ida) +{ + return radix_tree_empty(&ida->ida_rt); +} + +/* in lib/radix-tree.c */ +int ida_pre_get(struct ida *ida, gfp_t gfp_mask); +#endif /* __IDR_H__ */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h new file mode 100644 index 000000000..778d3ef93 --- /dev/null +++ b/include/linux/ieee80211.h @@ -0,0 +1,3250 @@ +/* + * IEEE 802.11 defines + * + * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen + * + * Copyright (c) 2002-2003, Jouni Malinen + * Copyright (c) 2005, Devicescape Software, Inc. + * Copyright (c) 2006, Michael Wu + * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright (c) 2016 - 2017 Intel Deutschland GmbH + * Copyright (c) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef LINUX_IEEE80211_H +#define LINUX_IEEE80211_H + +#include +#include +#include +#include +#include + +/* + * DS bit usage + * + * TA = transmitter address + * RA = receiver address + * DA = destination address + * SA = source address + * + * ToDS FromDS A1(RA) A2(TA) A3 A4 Use + * ----------------------------------------------------------------- + * 0 0 DA SA BSSID - IBSS/DLS + * 0 1 DA BSSID SA - AP -> STA + * 1 0 BSSID SA DA - AP <- STA + * 1 1 RA TA DA SA unspecified (WDS) + */ + +#define FCS_LEN 4 + +#define IEEE80211_FCTL_VERS 0x0003 +#define IEEE80211_FCTL_FTYPE 0x000c +#define IEEE80211_FCTL_STYPE 0x00f0 +#define IEEE80211_FCTL_TODS 0x0100 +#define IEEE80211_FCTL_FROMDS 0x0200 +#define IEEE80211_FCTL_MOREFRAGS 0x0400 +#define IEEE80211_FCTL_RETRY 0x0800 +#define IEEE80211_FCTL_PM 0x1000 +#define IEEE80211_FCTL_MOREDATA 0x2000 +#define IEEE80211_FCTL_PROTECTED 0x4000 +#define IEEE80211_FCTL_ORDER 0x8000 +#define IEEE80211_FCTL_CTL_EXT 0x0f00 + +#define IEEE80211_SCTL_FRAG 0x000F +#define IEEE80211_SCTL_SEQ 0xFFF0 + +#define IEEE80211_FTYPE_MGMT 0x0000 +#define IEEE80211_FTYPE_CTL 0x0004 +#define IEEE80211_FTYPE_DATA 0x0008 +#define IEEE80211_FTYPE_EXT 0x000c + +/* management */ +#define IEEE80211_STYPE_ASSOC_REQ 0x0000 +#define IEEE80211_STYPE_ASSOC_RESP 0x0010 +#define IEEE80211_STYPE_REASSOC_REQ 0x0020 +#define IEEE80211_STYPE_REASSOC_RESP 0x0030 +#define IEEE80211_STYPE_PROBE_REQ 0x0040 +#define IEEE80211_STYPE_PROBE_RESP 0x0050 +#define IEEE80211_STYPE_BEACON 0x0080 +#define IEEE80211_STYPE_ATIM 0x0090 +#define IEEE80211_STYPE_DISASSOC 0x00A0 +#define IEEE80211_STYPE_AUTH 0x00B0 +#define IEEE80211_STYPE_DEAUTH 0x00C0 +#define IEEE80211_STYPE_ACTION 0x00D0 + +/* control */ +#define IEEE80211_STYPE_CTL_EXT 0x0060 +#define IEEE80211_STYPE_BACK_REQ 0x0080 +#define IEEE80211_STYPE_BACK 0x0090 +#define IEEE80211_STYPE_PSPOLL 0x00A0 +#define IEEE80211_STYPE_RTS 0x00B0 +#define IEEE80211_STYPE_CTS 0x00C0 +#define IEEE80211_STYPE_ACK 0x00D0 +#define IEEE80211_STYPE_CFEND 0x00E0 +#define IEEE80211_STYPE_CFENDACK 0x00F0 + +/* data */ +#define IEEE80211_STYPE_DATA 0x0000 +#define IEEE80211_STYPE_DATA_CFACK 0x0010 +#define IEEE80211_STYPE_DATA_CFPOLL 0x0020 +#define IEEE80211_STYPE_DATA_CFACKPOLL 0x0030 +#define IEEE80211_STYPE_NULLFUNC 0x0040 +#define IEEE80211_STYPE_CFACK 0x0050 +#define IEEE80211_STYPE_CFPOLL 0x0060 +#define IEEE80211_STYPE_CFACKPOLL 0x0070 +#define IEEE80211_STYPE_QOS_DATA 0x0080 +#define IEEE80211_STYPE_QOS_DATA_CFACK 0x0090 +#define IEEE80211_STYPE_QOS_DATA_CFPOLL 0x00A0 +#define IEEE80211_STYPE_QOS_DATA_CFACKPOLL 0x00B0 +#define IEEE80211_STYPE_QOS_NULLFUNC 0x00C0 +#define IEEE80211_STYPE_QOS_CFACK 0x00D0 +#define IEEE80211_STYPE_QOS_CFPOLL 0x00E0 +#define IEEE80211_STYPE_QOS_CFACKPOLL 0x00F0 + +/* extension, added by 802.11ad */ +#define IEEE80211_STYPE_DMG_BEACON 0x0000 + +/* control extension - for IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTL_EXT */ +#define IEEE80211_CTL_EXT_POLL 0x2000 +#define IEEE80211_CTL_EXT_SPR 0x3000 +#define IEEE80211_CTL_EXT_GRANT 0x4000 +#define IEEE80211_CTL_EXT_DMG_CTS 0x5000 +#define IEEE80211_CTL_EXT_DMG_DTS 0x6000 +#define IEEE80211_CTL_EXT_SSW 0x8000 +#define IEEE80211_CTL_EXT_SSW_FBACK 0x9000 +#define IEEE80211_CTL_EXT_SSW_ACK 0xa000 + + +#define IEEE80211_SN_MASK ((IEEE80211_SCTL_SEQ) >> 4) +#define IEEE80211_MAX_SN IEEE80211_SN_MASK +#define IEEE80211_SN_MODULO (IEEE80211_MAX_SN + 1) + +static inline bool ieee80211_sn_less(u16 sn1, u16 sn2) +{ + return ((sn1 - sn2) & IEEE80211_SN_MASK) > (IEEE80211_SN_MODULO >> 1); +} + +static inline u16 ieee80211_sn_add(u16 sn1, u16 sn2) +{ + return (sn1 + sn2) & IEEE80211_SN_MASK; +} + +static inline u16 ieee80211_sn_inc(u16 sn) +{ + return ieee80211_sn_add(sn, 1); +} + +static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2) +{ + return (sn1 - sn2) & IEEE80211_SN_MASK; +} + +#define IEEE80211_SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) +#define IEEE80211_SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) + +/* miscellaneous IEEE 802.11 constants */ +#define IEEE80211_MAX_FRAG_THRESHOLD 2352 +#define IEEE80211_MAX_RTS_THRESHOLD 2353 +#define IEEE80211_MAX_AID 2007 +#define IEEE80211_MAX_TIM_LEN 251 +#define IEEE80211_MAX_MESH_PEERINGS 63 +/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section + 6.2.1.1.2. + + 802.11e clarifies the figure in section 7.1.2. The frame body is + up to 2304 octets long (maximum MSDU size) plus any crypt overhead. */ +#define IEEE80211_MAX_DATA_LEN 2304 +/* 802.11ad extends maximum MSDU size for DMG (freq > 40Ghz) networks + * to 7920 bytes, see 8.2.3 General frame format + */ +#define IEEE80211_MAX_DATA_LEN_DMG 7920 +/* 30 byte 4 addr hdr, 2 byte QoS, 2304 byte MSDU, 12 byte crypt, 4 byte FCS */ +#define IEEE80211_MAX_FRAME_LEN 2352 + +/* Maximal size of an A-MSDU that can be transported in a HT BA session */ +#define IEEE80211_MAX_MPDU_LEN_HT_BA 4095 + +/* Maximal size of an A-MSDU */ +#define IEEE80211_MAX_MPDU_LEN_HT_3839 3839 +#define IEEE80211_MAX_MPDU_LEN_HT_7935 7935 + +#define IEEE80211_MAX_MPDU_LEN_VHT_3895 3895 +#define IEEE80211_MAX_MPDU_LEN_VHT_7991 7991 +#define IEEE80211_MAX_MPDU_LEN_VHT_11454 11454 + +#define IEEE80211_MAX_SSID_LEN 32 + +#define IEEE80211_MAX_MESH_ID_LEN 32 + +#define IEEE80211_FIRST_TSPEC_TSID 8 +#define IEEE80211_NUM_TIDS 16 + +/* number of user priorities 802.11 uses */ +#define IEEE80211_NUM_UPS 8 +/* number of ACs */ +#define IEEE80211_NUM_ACS 4 + +#define IEEE80211_QOS_CTL_LEN 2 +/* 1d tag mask */ +#define IEEE80211_QOS_CTL_TAG1D_MASK 0x0007 +/* TID mask */ +#define IEEE80211_QOS_CTL_TID_MASK 0x000f +/* EOSP */ +#define IEEE80211_QOS_CTL_EOSP 0x0010 +/* ACK policy */ +#define IEEE80211_QOS_CTL_ACK_POLICY_NORMAL 0x0000 +#define IEEE80211_QOS_CTL_ACK_POLICY_NOACK 0x0020 +#define IEEE80211_QOS_CTL_ACK_POLICY_NO_EXPL 0x0040 +#define IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK 0x0060 +#define IEEE80211_QOS_CTL_ACK_POLICY_MASK 0x0060 +/* A-MSDU 802.11n */ +#define IEEE80211_QOS_CTL_A_MSDU_PRESENT 0x0080 +/* Mesh Control 802.11s */ +#define IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT 0x0100 + +/* Mesh Power Save Level */ +#define IEEE80211_QOS_CTL_MESH_PS_LEVEL 0x0200 +/* Mesh Receiver Service Period Initiated */ +#define IEEE80211_QOS_CTL_RSPI 0x0400 + +/* U-APSD queue for WMM IEs sent by AP */ +#define IEEE80211_WMM_IE_AP_QOSINFO_UAPSD (1<<7) +#define IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK 0x0f + +/* U-APSD queues for WMM IEs sent by STA */ +#define IEEE80211_WMM_IE_STA_QOSINFO_AC_VO (1<<0) +#define IEEE80211_WMM_IE_STA_QOSINFO_AC_VI (1<<1) +#define IEEE80211_WMM_IE_STA_QOSINFO_AC_BK (1<<2) +#define IEEE80211_WMM_IE_STA_QOSINFO_AC_BE (1<<3) +#define IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK 0x0f + +/* U-APSD max SP length for WMM IEs sent by STA */ +#define IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL 0x00 +#define IEEE80211_WMM_IE_STA_QOSINFO_SP_2 0x01 +#define IEEE80211_WMM_IE_STA_QOSINFO_SP_4 0x02 +#define IEEE80211_WMM_IE_STA_QOSINFO_SP_6 0x03 +#define IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK 0x03 +#define IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT 5 + +#define IEEE80211_HT_CTL_LEN 4 + +struct ieee80211_hdr { + __le16 frame_control; + __le16 duration_id; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; + u8 addr3[ETH_ALEN]; + __le16 seq_ctrl; + u8 addr4[ETH_ALEN]; +} __packed __aligned(2); + +struct ieee80211_hdr_3addr { + __le16 frame_control; + __le16 duration_id; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; + u8 addr3[ETH_ALEN]; + __le16 seq_ctrl; +} __packed __aligned(2); + +struct ieee80211_qos_hdr { + __le16 frame_control; + __le16 duration_id; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; + u8 addr3[ETH_ALEN]; + __le16 seq_ctrl; + __le16 qos_ctrl; +} __packed __aligned(2); + +/** + * ieee80211_has_tods - check if IEEE80211_FCTL_TODS is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_has_tods(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_TODS)) != 0; +} + +/** + * ieee80211_has_fromds - check if IEEE80211_FCTL_FROMDS is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_has_fromds(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FROMDS)) != 0; +} + +/** + * ieee80211_has_a4 - check if IEEE80211_FCTL_TODS and IEEE80211_FCTL_FROMDS are set + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_has_a4(__le16 fc) +{ + __le16 tmp = cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS); + return (fc & tmp) == tmp; +} + +/** + * ieee80211_has_morefrags - check if IEEE80211_FCTL_MOREFRAGS is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_has_morefrags(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) != 0; +} + +/** + * ieee80211_has_retry - check if IEEE80211_FCTL_RETRY is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_has_retry(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_RETRY)) != 0; +} + +/** + * ieee80211_has_pm - check if IEEE80211_FCTL_PM is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_has_pm(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_PM)) != 0; +} + +/** + * ieee80211_has_moredata - check if IEEE80211_FCTL_MOREDATA is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_has_moredata(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_MOREDATA)) != 0; +} + +/** + * ieee80211_has_protected - check if IEEE80211_FCTL_PROTECTED is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_has_protected(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_PROTECTED)) != 0; +} + +/** + * ieee80211_has_order - check if IEEE80211_FCTL_ORDER is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_has_order(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_ORDER)) != 0; +} + +/** + * ieee80211_is_mgmt - check if type is IEEE80211_FTYPE_MGMT + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_mgmt(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT); +} + +/** + * ieee80211_is_ctl - check if type is IEEE80211_FTYPE_CTL + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_ctl(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL); +} + +/** + * ieee80211_is_data - check if type is IEEE80211_FTYPE_DATA + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_data(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == + cpu_to_le16(IEEE80211_FTYPE_DATA); +} + +/** + * ieee80211_is_data_qos - check if type is IEEE80211_FTYPE_DATA and IEEE80211_STYPE_QOS_DATA is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_data_qos(__le16 fc) +{ + /* + * mask with QOS_DATA rather than IEEE80211_FCTL_STYPE as we just need + * to check the one bit + */ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_STYPE_QOS_DATA)) == + cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA); +} + +/** + * ieee80211_is_data_present - check if type is IEEE80211_FTYPE_DATA and has data + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_data_present(__le16 fc) +{ + /* + * mask with 0x40 and test that that bit is clear to only return true + * for the data-containing substypes. + */ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | 0x40)) == + cpu_to_le16(IEEE80211_FTYPE_DATA); +} + +/** + * ieee80211_is_assoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_REQ + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_assoc_req(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_REQ); +} + +/** + * ieee80211_is_assoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_RESP + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_assoc_resp(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_RESP); +} + +/** + * ieee80211_is_reassoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_REQ + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_reassoc_req(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_REQ); +} + +/** + * ieee80211_is_reassoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_RESP + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_reassoc_resp(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_RESP); +} + +/** + * ieee80211_is_probe_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_REQ + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_probe_req(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ); +} + +/** + * ieee80211_is_probe_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_RESP + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_probe_resp(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP); +} + +/** + * ieee80211_is_beacon - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_BEACON + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_beacon(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); +} + +/** + * ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_atim(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ATIM); +} + +/** + * ieee80211_is_disassoc - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DISASSOC + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_disassoc(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DISASSOC); +} + +/** + * ieee80211_is_auth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_AUTH + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_auth(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH); +} + +/** + * ieee80211_is_deauth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DEAUTH + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_deauth(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DEAUTH); +} + +/** + * ieee80211_is_action - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ACTION + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_action(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); +} + +/** + * ieee80211_is_back_req - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK_REQ + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_back_req(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ); +} + +/** + * ieee80211_is_back - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_back(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK); +} + +/** + * ieee80211_is_pspoll - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_PSPOLL + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_pspoll(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL); +} + +/** + * ieee80211_is_rts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_RTS + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_rts(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS); +} + +/** + * ieee80211_is_cts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CTS + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_cts(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS); +} + +/** + * ieee80211_is_ack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_ACK + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_ack(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_ACK); +} + +/** + * ieee80211_is_cfend - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFEND + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_cfend(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CFEND); +} + +/** + * ieee80211_is_cfendack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFENDACK + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_cfendack(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CFENDACK); +} + +/** + * ieee80211_is_nullfunc - check if frame is a regular (non-QoS) nullfunc frame + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_nullfunc(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC); +} + +/** + * ieee80211_is_qos_nullfunc - check if frame is a QoS nullfunc frame + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_qos_nullfunc(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC); +} + +/** + * ieee80211_is_any_nullfunc - check if frame is regular or QoS nullfunc frame + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_any_nullfunc(__le16 fc) +{ + return (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)); +} + +/** + * ieee80211_is_bufferable_mmpdu - check if frame is bufferable MMPDU + * @fc: frame control field in little-endian byteorder + */ +static inline bool ieee80211_is_bufferable_mmpdu(__le16 fc) +{ + /* IEEE 802.11-2012, definition of "bufferable management frame"; + * note that this ignores the IBSS special case. */ + return ieee80211_is_mgmt(fc) && + (ieee80211_is_action(fc) || + ieee80211_is_disassoc(fc) || + ieee80211_is_deauth(fc)); +} + +/** + * ieee80211_is_first_frag - check if IEEE80211_SCTL_FRAG is not set + * @seq_ctrl: frame sequence control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_first_frag(__le16 seq_ctrl) +{ + return (seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0; +} + +/** + * ieee80211_is_frag - check if a frame is a fragment + * @hdr: 802.11 header of the frame + */ +static inline bool ieee80211_is_frag(struct ieee80211_hdr *hdr) +{ + return ieee80211_has_morefrags(hdr->frame_control) || + hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG); +} + +struct ieee80211s_hdr { + u8 flags; + u8 ttl; + __le32 seqnum; + u8 eaddr1[ETH_ALEN]; + u8 eaddr2[ETH_ALEN]; +} __packed __aligned(2); + +/* Mesh flags */ +#define MESH_FLAGS_AE_A4 0x1 +#define MESH_FLAGS_AE_A5_A6 0x2 +#define MESH_FLAGS_AE 0x3 +#define MESH_FLAGS_PS_DEEP 0x4 + +/** + * enum ieee80211_preq_flags - mesh PREQ element flags + * + * @IEEE80211_PREQ_PROACTIVE_PREP_FLAG: proactive PREP subfield + */ +enum ieee80211_preq_flags { + IEEE80211_PREQ_PROACTIVE_PREP_FLAG = 1<<2, +}; + +/** + * enum ieee80211_preq_target_flags - mesh PREQ element per target flags + * + * @IEEE80211_PREQ_TO_FLAG: target only subfield + * @IEEE80211_PREQ_USN_FLAG: unknown target HWMP sequence number subfield + */ +enum ieee80211_preq_target_flags { + IEEE80211_PREQ_TO_FLAG = 1<<0, + IEEE80211_PREQ_USN_FLAG = 1<<2, +}; + +/** + * struct ieee80211_quiet_ie + * + * This structure refers to "Quiet information element" + */ +struct ieee80211_quiet_ie { + u8 count; + u8 period; + __le16 duration; + __le16 offset; +} __packed; + +/** + * struct ieee80211_msrment_ie + * + * This structure refers to "Measurement Request/Report information element" + */ +struct ieee80211_msrment_ie { + u8 token; + u8 mode; + u8 type; + u8 request[0]; +} __packed; + +/** + * struct ieee80211_channel_sw_ie + * + * This structure refers to "Channel Switch Announcement information element" + */ +struct ieee80211_channel_sw_ie { + u8 mode; + u8 new_ch_num; + u8 count; +} __packed; + +/** + * struct ieee80211_ext_chansw_ie + * + * This structure represents the "Extended Channel Switch Announcement element" + */ +struct ieee80211_ext_chansw_ie { + u8 mode; + u8 new_operating_class; + u8 new_ch_num; + u8 count; +} __packed; + +/** + * struct ieee80211_sec_chan_offs_ie - secondary channel offset IE + * @sec_chan_offs: secondary channel offset, uses IEEE80211_HT_PARAM_CHA_SEC_* + * values here + * This structure represents the "Secondary Channel Offset element" + */ +struct ieee80211_sec_chan_offs_ie { + u8 sec_chan_offs; +} __packed; + +/** + * struct ieee80211_mesh_chansw_params_ie - mesh channel switch parameters IE + * + * This structure represents the "Mesh Channel Switch Paramters element" + */ +struct ieee80211_mesh_chansw_params_ie { + u8 mesh_ttl; + u8 mesh_flags; + __le16 mesh_reason; + __le16 mesh_pre_value; +} __packed; + +/** + * struct ieee80211_wide_bw_chansw_ie - wide bandwidth channel switch IE + */ +struct ieee80211_wide_bw_chansw_ie { + u8 new_channel_width; + u8 new_center_freq_seg0, new_center_freq_seg1; +} __packed; + +/** + * struct ieee80211_tim + * + * This structure refers to "Traffic Indication Map information element" + */ +struct ieee80211_tim_ie { + u8 dtim_count; + u8 dtim_period; + u8 bitmap_ctrl; + /* variable size: 1 - 251 bytes */ + u8 virtual_map[1]; +} __packed; + +/** + * struct ieee80211_meshconf_ie + * + * This structure refers to "Mesh Configuration information element" + */ +struct ieee80211_meshconf_ie { + u8 meshconf_psel; + u8 meshconf_pmetric; + u8 meshconf_congest; + u8 meshconf_synch; + u8 meshconf_auth; + u8 meshconf_form; + u8 meshconf_cap; +} __packed; + +/** + * enum mesh_config_capab_flags - Mesh Configuration IE capability field flags + * + * @IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS: STA is willing to establish + * additional mesh peerings with other mesh STAs + * @IEEE80211_MESHCONF_CAPAB_FORWARDING: the STA forwards MSDUs + * @IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING: TBTT adjustment procedure + * is ongoing + * @IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL: STA is in deep sleep mode or has + * neighbors in deep sleep mode + */ +enum mesh_config_capab_flags { + IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS = 0x01, + IEEE80211_MESHCONF_CAPAB_FORWARDING = 0x08, + IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING = 0x20, + IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL = 0x40, +}; + +/** + * mesh channel switch parameters element's flag indicator + * + */ +#define WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT BIT(0) +#define WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR BIT(1) +#define WLAN_EID_CHAN_SWITCH_PARAM_REASON BIT(2) + +/** + * struct ieee80211_rann_ie + * + * This structure refers to "Root Announcement information element" + */ +struct ieee80211_rann_ie { + u8 rann_flags; + u8 rann_hopcount; + u8 rann_ttl; + u8 rann_addr[ETH_ALEN]; + __le32 rann_seq; + __le32 rann_interval; + __le32 rann_metric; +} __packed; + +enum ieee80211_rann_flags { + RANN_FLAG_IS_GATE = 1 << 0, +}; + +enum ieee80211_ht_chanwidth_values { + IEEE80211_HT_CHANWIDTH_20MHZ = 0, + IEEE80211_HT_CHANWIDTH_ANY = 1, +}; + +/** + * enum ieee80211_opmode_bits - VHT operating mode field bits + * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK: channel width mask + * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ: 20 MHz channel width + * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ: 40 MHz channel width + * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ: 80 MHz channel width + * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ: 160 MHz or 80+80 MHz channel width + * @IEEE80211_OPMODE_NOTIF_RX_NSS_MASK: number of spatial streams mask + * (the NSS value is the value of this field + 1) + * @IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT: number of spatial streams shift + * @IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF: indicates streams in SU-MIMO PPDU + * using a beamforming steering matrix + */ +enum ieee80211_vht_opmode_bits { + IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK = 3, + IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ = 0, + IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ = 1, + IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ = 2, + IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ = 3, + IEEE80211_OPMODE_NOTIF_RX_NSS_MASK = 0x70, + IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT = 4, + IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF = 0x80, +}; + +#define WLAN_SA_QUERY_TR_ID_LEN 2 +#define WLAN_MEMBERSHIP_LEN 8 +#define WLAN_USER_POSITION_LEN 16 + +/** + * struct ieee80211_tpc_report_ie + * + * This structure refers to "TPC Report element" + */ +struct ieee80211_tpc_report_ie { + u8 tx_power; + u8 link_margin; +} __packed; + +struct ieee80211_mgmt { + __le16 frame_control; + __le16 duration; + u8 da[ETH_ALEN]; + u8 sa[ETH_ALEN]; + u8 bssid[ETH_ALEN]; + __le16 seq_ctrl; + union { + struct { + __le16 auth_alg; + __le16 auth_transaction; + __le16 status_code; + /* possibly followed by Challenge text */ + u8 variable[0]; + } __packed auth; + struct { + __le16 reason_code; + } __packed deauth; + struct { + __le16 capab_info; + __le16 listen_interval; + /* followed by SSID and Supported rates */ + u8 variable[0]; + } __packed assoc_req; + struct { + __le16 capab_info; + __le16 status_code; + __le16 aid; + /* followed by Supported rates */ + u8 variable[0]; + } __packed assoc_resp, reassoc_resp; + struct { + __le16 capab_info; + __le16 listen_interval; + u8 current_ap[ETH_ALEN]; + /* followed by SSID and Supported rates */ + u8 variable[0]; + } __packed reassoc_req; + struct { + __le16 reason_code; + } __packed disassoc; + struct { + __le64 timestamp; + __le16 beacon_int; + __le16 capab_info; + /* followed by some of SSID, Supported rates, + * FH Params, DS Params, CF Params, IBSS Params, TIM */ + u8 variable[0]; + } __packed beacon; + struct { + /* only variable items: SSID, Supported rates */ + u8 variable[0]; + } __packed probe_req; + struct { + __le64 timestamp; + __le16 beacon_int; + __le16 capab_info; + /* followed by some of SSID, Supported rates, + * FH Params, DS Params, CF Params, IBSS Params */ + u8 variable[0]; + } __packed probe_resp; + struct { + u8 category; + union { + struct { + u8 action_code; + u8 dialog_token; + u8 status_code; + u8 variable[0]; + } __packed wme_action; + struct{ + u8 action_code; + u8 variable[0]; + } __packed chan_switch; + struct{ + u8 action_code; + struct ieee80211_ext_chansw_ie data; + u8 variable[0]; + } __packed ext_chan_switch; + struct{ + u8 action_code; + u8 dialog_token; + u8 element_id; + u8 length; + struct ieee80211_msrment_ie msr_elem; + } __packed measurement; + struct{ + u8 action_code; + u8 dialog_token; + __le16 capab; + __le16 timeout; + __le16 start_seq_num; + } __packed addba_req; + struct{ + u8 action_code; + u8 dialog_token; + __le16 status; + __le16 capab; + __le16 timeout; + } __packed addba_resp; + struct{ + u8 action_code; + __le16 params; + __le16 reason_code; + } __packed delba; + struct { + u8 action_code; + u8 variable[0]; + } __packed self_prot; + struct{ + u8 action_code; + u8 variable[0]; + } __packed mesh_action; + struct { + u8 action; + u8 trans_id[WLAN_SA_QUERY_TR_ID_LEN]; + } __packed sa_query; + struct { + u8 action; + u8 smps_control; + } __packed ht_smps; + struct { + u8 action_code; + u8 chanwidth; + } __packed ht_notify_cw; + struct { + u8 action_code; + u8 dialog_token; + __le16 capability; + u8 variable[0]; + } __packed tdls_discover_resp; + struct { + u8 action_code; + u8 operating_mode; + } __packed vht_opmode_notif; + struct { + u8 action_code; + u8 membership[WLAN_MEMBERSHIP_LEN]; + u8 position[WLAN_USER_POSITION_LEN]; + } __packed vht_group_notif; + struct { + u8 action_code; + u8 dialog_token; + u8 tpc_elem_id; + u8 tpc_elem_length; + struct ieee80211_tpc_report_ie tpc; + } __packed tpc_report; + struct { + u8 action_code; + u8 dialog_token; + u8 follow_up; + u8 tod[6]; + u8 toa[6]; + __le16 tod_error; + __le16 toa_error; + u8 variable[0]; + } __packed ftm; + } u; + } __packed action; + } u; +} __packed __aligned(2); + +/* Supported rates membership selectors */ +#define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127 +#define BSS_MEMBERSHIP_SELECTOR_VHT_PHY 126 + +/* mgmt header + 1 byte category code */ +#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u) + + +/* Management MIC information element (IEEE 802.11w) */ +struct ieee80211_mmie { + u8 element_id; + u8 length; + __le16 key_id; + u8 sequence_number[6]; + u8 mic[8]; +} __packed; + +/* Management MIC information element (IEEE 802.11w) for GMAC and CMAC-256 */ +struct ieee80211_mmie_16 { + u8 element_id; + u8 length; + __le16 key_id; + u8 sequence_number[6]; + u8 mic[16]; +} __packed; + +struct ieee80211_vendor_ie { + u8 element_id; + u8 len; + u8 oui[3]; + u8 oui_type; +} __packed; + +struct ieee80211_wmm_ac_param { + u8 aci_aifsn; /* AIFSN, ACM, ACI */ + u8 cw; /* ECWmin, ECWmax (CW = 2^ECW - 1) */ + __le16 txop_limit; +} __packed; + +struct ieee80211_wmm_param_ie { + u8 element_id; /* Element ID: 221 (0xdd); */ + u8 len; /* Length: 24 */ + /* required fields for WMM version 1 */ + u8 oui[3]; /* 00:50:f2 */ + u8 oui_type; /* 2 */ + u8 oui_subtype; /* 1 */ + u8 version; /* 1 for WMM version 1.0 */ + u8 qos_info; /* AP/STA specific QoS info */ + u8 reserved; /* 0 */ + /* AC_BE, AC_BK, AC_VI, AC_VO */ + struct ieee80211_wmm_ac_param ac[4]; +} __packed; + +/* Control frames */ +struct ieee80211_rts { + __le16 frame_control; + __le16 duration; + u8 ra[ETH_ALEN]; + u8 ta[ETH_ALEN]; +} __packed __aligned(2); + +struct ieee80211_cts { + __le16 frame_control; + __le16 duration; + u8 ra[ETH_ALEN]; +} __packed __aligned(2); + +struct ieee80211_pspoll { + __le16 frame_control; + __le16 aid; + u8 bssid[ETH_ALEN]; + u8 ta[ETH_ALEN]; +} __packed __aligned(2); + +/* TDLS */ + +/* Channel switch timing */ +struct ieee80211_ch_switch_timing { + __le16 switch_time; + __le16 switch_timeout; +} __packed; + +/* Link-id information element */ +struct ieee80211_tdls_lnkie { + u8 ie_type; /* Link Identifier IE */ + u8 ie_len; + u8 bssid[ETH_ALEN]; + u8 init_sta[ETH_ALEN]; + u8 resp_sta[ETH_ALEN]; +} __packed; + +struct ieee80211_tdls_data { + u8 da[ETH_ALEN]; + u8 sa[ETH_ALEN]; + __be16 ether_type; + u8 payload_type; + u8 category; + u8 action_code; + union { + struct { + u8 dialog_token; + __le16 capability; + u8 variable[0]; + } __packed setup_req; + struct { + __le16 status_code; + u8 dialog_token; + __le16 capability; + u8 variable[0]; + } __packed setup_resp; + struct { + __le16 status_code; + u8 dialog_token; + u8 variable[0]; + } __packed setup_cfm; + struct { + __le16 reason_code; + u8 variable[0]; + } __packed teardown; + struct { + u8 dialog_token; + u8 variable[0]; + } __packed discover_req; + struct { + u8 target_channel; + u8 oper_class; + u8 variable[0]; + } __packed chan_switch_req; + struct { + __le16 status_code; + u8 variable[0]; + } __packed chan_switch_resp; + } u; +} __packed; + +/* + * Peer-to-Peer IE attribute related definitions. + */ +/** + * enum ieee80211_p2p_attr_id - identifies type of peer-to-peer attribute. + */ +enum ieee80211_p2p_attr_id { + IEEE80211_P2P_ATTR_STATUS = 0, + IEEE80211_P2P_ATTR_MINOR_REASON, + IEEE80211_P2P_ATTR_CAPABILITY, + IEEE80211_P2P_ATTR_DEVICE_ID, + IEEE80211_P2P_ATTR_GO_INTENT, + IEEE80211_P2P_ATTR_GO_CONFIG_TIMEOUT, + IEEE80211_P2P_ATTR_LISTEN_CHANNEL, + IEEE80211_P2P_ATTR_GROUP_BSSID, + IEEE80211_P2P_ATTR_EXT_LISTEN_TIMING, + IEEE80211_P2P_ATTR_INTENDED_IFACE_ADDR, + IEEE80211_P2P_ATTR_MANAGABILITY, + IEEE80211_P2P_ATTR_CHANNEL_LIST, + IEEE80211_P2P_ATTR_ABSENCE_NOTICE, + IEEE80211_P2P_ATTR_DEVICE_INFO, + IEEE80211_P2P_ATTR_GROUP_INFO, + IEEE80211_P2P_ATTR_GROUP_ID, + IEEE80211_P2P_ATTR_INTERFACE, + IEEE80211_P2P_ATTR_OPER_CHANNEL, + IEEE80211_P2P_ATTR_INVITE_FLAGS, + /* 19 - 220: Reserved */ + IEEE80211_P2P_ATTR_VENDOR_SPECIFIC = 221, + + IEEE80211_P2P_ATTR_MAX +}; + +/* Notice of Absence attribute - described in P2P spec 4.1.14 */ +/* Typical max value used here */ +#define IEEE80211_P2P_NOA_DESC_MAX 4 + +struct ieee80211_p2p_noa_desc { + u8 count; + __le32 duration; + __le32 interval; + __le32 start_time; +} __packed; + +struct ieee80211_p2p_noa_attr { + u8 index; + u8 oppps_ctwindow; + struct ieee80211_p2p_noa_desc desc[IEEE80211_P2P_NOA_DESC_MAX]; +} __packed; + +#define IEEE80211_P2P_OPPPS_ENABLE_BIT BIT(7) +#define IEEE80211_P2P_OPPPS_CTWINDOW_MASK 0x7F + +/** + * struct ieee80211_bar - HT Block Ack Request + * + * This structure refers to "HT BlockAckReq" as + * described in 802.11n draft section 7.2.1.7.1 + */ +struct ieee80211_bar { + __le16 frame_control; + __le16 duration; + __u8 ra[ETH_ALEN]; + __u8 ta[ETH_ALEN]; + __le16 control; + __le16 start_seq_num; +} __packed; + +/* 802.11 BAR control masks */ +#define IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL 0x0000 +#define IEEE80211_BAR_CTRL_MULTI_TID 0x0002 +#define IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA 0x0004 +#define IEEE80211_BAR_CTRL_TID_INFO_MASK 0xf000 +#define IEEE80211_BAR_CTRL_TID_INFO_SHIFT 12 + +#define IEEE80211_HT_MCS_MASK_LEN 10 + +/** + * struct ieee80211_mcs_info - MCS information + * @rx_mask: RX mask + * @rx_highest: highest supported RX rate. If set represents + * the highest supported RX data rate in units of 1 Mbps. + * If this field is 0 this value should not be used to + * consider the highest RX data rate supported. + * @tx_params: TX parameters + */ +struct ieee80211_mcs_info { + u8 rx_mask[IEEE80211_HT_MCS_MASK_LEN]; + __le16 rx_highest; + u8 tx_params; + u8 reserved[3]; +} __packed; + +/* 802.11n HT capability MSC set */ +#define IEEE80211_HT_MCS_RX_HIGHEST_MASK 0x3ff +#define IEEE80211_HT_MCS_TX_DEFINED 0x01 +#define IEEE80211_HT_MCS_TX_RX_DIFF 0x02 +/* value 0 == 1 stream etc */ +#define IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK 0x0C +#define IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT 2 +#define IEEE80211_HT_MCS_TX_MAX_STREAMS 4 +#define IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION 0x10 + +/* + * 802.11n D5.0 20.3.5 / 20.6 says: + * - indices 0 to 7 and 32 are single spatial stream + * - 8 to 31 are multiple spatial streams using equal modulation + * [8..15 for two streams, 16..23 for three and 24..31 for four] + * - remainder are multiple spatial streams using unequal modulation + */ +#define IEEE80211_HT_MCS_UNEQUAL_MODULATION_START 33 +#define IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE \ + (IEEE80211_HT_MCS_UNEQUAL_MODULATION_START / 8) + +/** + * struct ieee80211_ht_cap - HT capabilities + * + * This structure is the "HT capabilities element" as + * described in 802.11n D5.0 7.3.2.57 + */ +struct ieee80211_ht_cap { + __le16 cap_info; + u8 ampdu_params_info; + + /* 16 bytes MCS information */ + struct ieee80211_mcs_info mcs; + + __le16 extended_ht_cap_info; + __le32 tx_BF_cap_info; + u8 antenna_selection_info; +} __packed; + +/* 802.11n HT capabilities masks (for cap_info) */ +#define IEEE80211_HT_CAP_LDPC_CODING 0x0001 +#define IEEE80211_HT_CAP_SUP_WIDTH_20_40 0x0002 +#define IEEE80211_HT_CAP_SM_PS 0x000C +#define IEEE80211_HT_CAP_SM_PS_SHIFT 2 +#define IEEE80211_HT_CAP_GRN_FLD 0x0010 +#define IEEE80211_HT_CAP_SGI_20 0x0020 +#define IEEE80211_HT_CAP_SGI_40 0x0040 +#define IEEE80211_HT_CAP_TX_STBC 0x0080 +#define IEEE80211_HT_CAP_RX_STBC 0x0300 +#define IEEE80211_HT_CAP_RX_STBC_SHIFT 8 +#define IEEE80211_HT_CAP_DELAY_BA 0x0400 +#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800 +#define IEEE80211_HT_CAP_DSSSCCK40 0x1000 +#define IEEE80211_HT_CAP_RESERVED 0x2000 +#define IEEE80211_HT_CAP_40MHZ_INTOLERANT 0x4000 +#define IEEE80211_HT_CAP_LSIG_TXOP_PROT 0x8000 + +/* 802.11n HT extended capabilities masks (for extended_ht_cap_info) */ +#define IEEE80211_HT_EXT_CAP_PCO 0x0001 +#define IEEE80211_HT_EXT_CAP_PCO_TIME 0x0006 +#define IEEE80211_HT_EXT_CAP_PCO_TIME_SHIFT 1 +#define IEEE80211_HT_EXT_CAP_MCS_FB 0x0300 +#define IEEE80211_HT_EXT_CAP_MCS_FB_SHIFT 8 +#define IEEE80211_HT_EXT_CAP_HTC_SUP 0x0400 +#define IEEE80211_HT_EXT_CAP_RD_RESPONDER 0x0800 + +/* 802.11n HT capability AMPDU settings (for ampdu_params_info) */ +#define IEEE80211_HT_AMPDU_PARM_FACTOR 0x03 +#define IEEE80211_HT_AMPDU_PARM_DENSITY 0x1C +#define IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT 2 + +/* + * Maximum length of AMPDU that the STA can receive in high-throughput (HT). + * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets) + */ +enum ieee80211_max_ampdu_length_exp { + IEEE80211_HT_MAX_AMPDU_8K = 0, + IEEE80211_HT_MAX_AMPDU_16K = 1, + IEEE80211_HT_MAX_AMPDU_32K = 2, + IEEE80211_HT_MAX_AMPDU_64K = 3 +}; + +/* + * Maximum length of AMPDU that the STA can receive in VHT. + * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets) + */ +enum ieee80211_vht_max_ampdu_length_exp { + IEEE80211_VHT_MAX_AMPDU_8K = 0, + IEEE80211_VHT_MAX_AMPDU_16K = 1, + IEEE80211_VHT_MAX_AMPDU_32K = 2, + IEEE80211_VHT_MAX_AMPDU_64K = 3, + IEEE80211_VHT_MAX_AMPDU_128K = 4, + IEEE80211_VHT_MAX_AMPDU_256K = 5, + IEEE80211_VHT_MAX_AMPDU_512K = 6, + IEEE80211_VHT_MAX_AMPDU_1024K = 7 +}; + +#define IEEE80211_HT_MAX_AMPDU_FACTOR 13 + +/* Minimum MPDU start spacing */ +enum ieee80211_min_mpdu_spacing { + IEEE80211_HT_MPDU_DENSITY_NONE = 0, /* No restriction */ + IEEE80211_HT_MPDU_DENSITY_0_25 = 1, /* 1/4 usec */ + IEEE80211_HT_MPDU_DENSITY_0_5 = 2, /* 1/2 usec */ + IEEE80211_HT_MPDU_DENSITY_1 = 3, /* 1 usec */ + IEEE80211_HT_MPDU_DENSITY_2 = 4, /* 2 usec */ + IEEE80211_HT_MPDU_DENSITY_4 = 5, /* 4 usec */ + IEEE80211_HT_MPDU_DENSITY_8 = 6, /* 8 usec */ + IEEE80211_HT_MPDU_DENSITY_16 = 7 /* 16 usec */ +}; + +/** + * struct ieee80211_ht_operation - HT operation IE + * + * This structure is the "HT operation element" as + * described in 802.11n-2009 7.3.2.57 + */ +struct ieee80211_ht_operation { + u8 primary_chan; + u8 ht_param; + __le16 operation_mode; + __le16 stbc_param; + u8 basic_set[16]; +} __packed; + +/* for ht_param */ +#define IEEE80211_HT_PARAM_CHA_SEC_OFFSET 0x03 +#define IEEE80211_HT_PARAM_CHA_SEC_NONE 0x00 +#define IEEE80211_HT_PARAM_CHA_SEC_ABOVE 0x01 +#define IEEE80211_HT_PARAM_CHA_SEC_BELOW 0x03 +#define IEEE80211_HT_PARAM_CHAN_WIDTH_ANY 0x04 +#define IEEE80211_HT_PARAM_RIFS_MODE 0x08 + +/* for operation_mode */ +#define IEEE80211_HT_OP_MODE_PROTECTION 0x0003 +#define IEEE80211_HT_OP_MODE_PROTECTION_NONE 0 +#define IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER 1 +#define IEEE80211_HT_OP_MODE_PROTECTION_20MHZ 2 +#define IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED 3 +#define IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT 0x0004 +#define IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT 0x0010 +#define IEEE80211_HT_OP_MODE_CCFS2_SHIFT 5 +#define IEEE80211_HT_OP_MODE_CCFS2_MASK 0x1fe0 + +/* for stbc_param */ +#define IEEE80211_HT_STBC_PARAM_DUAL_BEACON 0x0040 +#define IEEE80211_HT_STBC_PARAM_DUAL_CTS_PROT 0x0080 +#define IEEE80211_HT_STBC_PARAM_STBC_BEACON 0x0100 +#define IEEE80211_HT_STBC_PARAM_LSIG_TXOP_FULLPROT 0x0200 +#define IEEE80211_HT_STBC_PARAM_PCO_ACTIVE 0x0400 +#define IEEE80211_HT_STBC_PARAM_PCO_PHASE 0x0800 + + +/* block-ack parameters */ +#define IEEE80211_ADDBA_PARAM_AMSDU_MASK 0x0001 +#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002 +#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C +#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0 +#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000 +#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800 + +/* + * A-MPDU buffer sizes + * According to HT size varies from 8 to 64 frames + * HE adds the ability to have up to 256 frames. + */ +#define IEEE80211_MIN_AMPDU_BUF 0x8 +#define IEEE80211_MAX_AMPDU_BUF_HT 0x40 +#define IEEE80211_MAX_AMPDU_BUF 0x100 + + +/* Spatial Multiplexing Power Save Modes (for capability) */ +#define WLAN_HT_CAP_SM_PS_STATIC 0 +#define WLAN_HT_CAP_SM_PS_DYNAMIC 1 +#define WLAN_HT_CAP_SM_PS_INVALID 2 +#define WLAN_HT_CAP_SM_PS_DISABLED 3 + +/* for SM power control field lower two bits */ +#define WLAN_HT_SMPS_CONTROL_DISABLED 0 +#define WLAN_HT_SMPS_CONTROL_STATIC 1 +#define WLAN_HT_SMPS_CONTROL_DYNAMIC 3 + +/** + * struct ieee80211_vht_mcs_info - VHT MCS information + * @rx_mcs_map: RX MCS map 2 bits for each stream, total 8 streams + * @rx_highest: Indicates highest long GI VHT PPDU data rate + * STA can receive. Rate expressed in units of 1 Mbps. + * If this field is 0 this value should not be used to + * consider the highest RX data rate supported. + * The top 3 bits of this field are reserved. + * @tx_mcs_map: TX MCS map 2 bits for each stream, total 8 streams + * @tx_highest: Indicates highest long GI VHT PPDU data rate + * STA can transmit. Rate expressed in units of 1 Mbps. + * If this field is 0 this value should not be used to + * consider the highest TX data rate supported. + * The top 3 bits of this field are reserved. + */ +struct ieee80211_vht_mcs_info { + __le16 rx_mcs_map; + __le16 rx_highest; + __le16 tx_mcs_map; + __le16 tx_highest; +} __packed; + +/** + * enum ieee80211_vht_mcs_support - VHT MCS support definitions + * @IEEE80211_VHT_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the + * number of streams + * @IEEE80211_VHT_MCS_SUPPORT_0_8: MCSes 0-8 are supported + * @IEEE80211_VHT_MCS_SUPPORT_0_9: MCSes 0-9 are supported + * @IEEE80211_VHT_MCS_NOT_SUPPORTED: This number of streams isn't supported + * + * These definitions are used in each 2-bit subfield of the @rx_mcs_map + * and @tx_mcs_map fields of &struct ieee80211_vht_mcs_info, which are + * both split into 8 subfields by number of streams. These values indicate + * which MCSes are supported for the number of streams the value appears + * for. + */ +enum ieee80211_vht_mcs_support { + IEEE80211_VHT_MCS_SUPPORT_0_7 = 0, + IEEE80211_VHT_MCS_SUPPORT_0_8 = 1, + IEEE80211_VHT_MCS_SUPPORT_0_9 = 2, + IEEE80211_VHT_MCS_NOT_SUPPORTED = 3, +}; + +/** + * struct ieee80211_vht_cap - VHT capabilities + * + * This structure is the "VHT capabilities element" as + * described in 802.11ac D3.0 8.4.2.160 + * @vht_cap_info: VHT capability info + * @supp_mcs: VHT MCS supported rates + */ +struct ieee80211_vht_cap { + __le32 vht_cap_info; + struct ieee80211_vht_mcs_info supp_mcs; +} __packed; + +/** + * enum ieee80211_vht_chanwidth - VHT channel width + * @IEEE80211_VHT_CHANWIDTH_USE_HT: use the HT operation IE to + * determine the channel width (20 or 40 MHz) + * @IEEE80211_VHT_CHANWIDTH_80MHZ: 80 MHz bandwidth + * @IEEE80211_VHT_CHANWIDTH_160MHZ: 160 MHz bandwidth + * @IEEE80211_VHT_CHANWIDTH_80P80MHZ: 80+80 MHz bandwidth + */ +enum ieee80211_vht_chanwidth { + IEEE80211_VHT_CHANWIDTH_USE_HT = 0, + IEEE80211_VHT_CHANWIDTH_80MHZ = 1, + IEEE80211_VHT_CHANWIDTH_160MHZ = 2, + IEEE80211_VHT_CHANWIDTH_80P80MHZ = 3, +}; + +/** + * struct ieee80211_vht_operation - VHT operation IE + * + * This structure is the "VHT operation element" as + * described in 802.11ac D3.0 8.4.2.161 + * @chan_width: Operating channel width + * @center_freq_seg0_idx: center freq segment 0 index + * @center_freq_seg1_idx: center freq segment 1 index + * @basic_mcs_set: VHT Basic MCS rate set + */ +struct ieee80211_vht_operation { + u8 chan_width; + u8 center_freq_seg0_idx; + u8 center_freq_seg1_idx; + __le16 basic_mcs_set; +} __packed; + +/** + * struct ieee80211_he_cap_elem - HE capabilities element + * + * This structure is the "HE capabilities element" fixed fields as + * described in P802.11ax_D2.0 section 9.4.2.237.2 and 9.4.2.237.3 + */ +struct ieee80211_he_cap_elem { + u8 mac_cap_info[5]; + u8 phy_cap_info[9]; +} __packed; + +#define IEEE80211_TX_RX_MCS_NSS_DESC_MAX_LEN 5 + +/** + * enum ieee80211_he_mcs_support - HE MCS support definitions + * @IEEE80211_HE_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the + * number of streams + * @IEEE80211_HE_MCS_SUPPORT_0_9: MCSes 0-9 are supported + * @IEEE80211_HE_MCS_SUPPORT_0_11: MCSes 0-11 are supported + * @IEEE80211_HE_MCS_NOT_SUPPORTED: This number of streams isn't supported + * + * These definitions are used in each 2-bit subfield of the rx_mcs_* + * and tx_mcs_* fields of &struct ieee80211_he_mcs_nss_supp, which are + * both split into 8 subfields by number of streams. These values indicate + * which MCSes are supported for the number of streams the value appears + * for. + */ +enum ieee80211_he_mcs_support { + IEEE80211_HE_MCS_SUPPORT_0_7 = 0, + IEEE80211_HE_MCS_SUPPORT_0_9 = 1, + IEEE80211_HE_MCS_SUPPORT_0_11 = 2, + IEEE80211_HE_MCS_NOT_SUPPORTED = 3, +}; + +/** + * struct ieee80211_he_mcs_nss_supp - HE Tx/Rx HE MCS NSS Support Field + * + * This structure holds the data required for the Tx/Rx HE MCS NSS Support Field + * described in P802.11ax_D2.0 section 9.4.2.237.4 + * + * @rx_mcs_80: Rx MCS map 2 bits for each stream, total 8 streams, for channel + * widths less than 80MHz. + * @tx_mcs_80: Tx MCS map 2 bits for each stream, total 8 streams, for channel + * widths less than 80MHz. + * @rx_mcs_160: Rx MCS map 2 bits for each stream, total 8 streams, for channel + * width 160MHz. + * @tx_mcs_160: Tx MCS map 2 bits for each stream, total 8 streams, for channel + * width 160MHz. + * @rx_mcs_80p80: Rx MCS map 2 bits for each stream, total 8 streams, for + * channel width 80p80MHz. + * @tx_mcs_80p80: Tx MCS map 2 bits for each stream, total 8 streams, for + * channel width 80p80MHz. + */ +struct ieee80211_he_mcs_nss_supp { + __le16 rx_mcs_80; + __le16 tx_mcs_80; + __le16 rx_mcs_160; + __le16 tx_mcs_160; + __le16 rx_mcs_80p80; + __le16 tx_mcs_80p80; +} __packed; + +/** + * struct ieee80211_he_operation - HE capabilities element + * + * This structure is the "HE operation element" fields as + * described in P802.11ax_D2.0 section 9.4.2.238 + */ +struct ieee80211_he_operation { + __le32 he_oper_params; + __le16 he_mcs_nss_set; + /* Optional 0,1,3 or 4 bytes: depends on @he_oper_params */ + u8 optional[0]; +} __packed; + +/** + * struct ieee80211_he_mu_edca_param_ac_rec - MU AC Parameter Record field + * + * This structure is the "MU AC Parameter Record" fields as + * described in P802.11ax_D2.0 section 9.4.2.240 + */ +struct ieee80211_he_mu_edca_param_ac_rec { + u8 aifsn; + u8 ecw_min_max; + u8 mu_edca_timer; +} __packed; + +/** + * struct ieee80211_mu_edca_param_set - MU EDCA Parameter Set element + * + * This structure is the "MU EDCA Parameter Set element" fields as + * described in P802.11ax_D2.0 section 9.4.2.240 + */ +struct ieee80211_mu_edca_param_set { + u8 mu_qos_info; + struct ieee80211_he_mu_edca_param_ac_rec ac_be; + struct ieee80211_he_mu_edca_param_ac_rec ac_bk; + struct ieee80211_he_mu_edca_param_ac_rec ac_vi; + struct ieee80211_he_mu_edca_param_ac_rec ac_vo; +} __packed; + +/* 802.11ac VHT Capabilities */ +#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000 +#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 0x00000001 +#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 0x00000002 +#define IEEE80211_VHT_CAP_MAX_MPDU_MASK 0x00000003 +#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ 0x00000004 +#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ 0x00000008 +#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK 0x0000000C +#define IEEE80211_VHT_CAP_RXLDPC 0x00000010 +#define IEEE80211_VHT_CAP_SHORT_GI_80 0x00000020 +#define IEEE80211_VHT_CAP_SHORT_GI_160 0x00000040 +#define IEEE80211_VHT_CAP_TXSTBC 0x00000080 +#define IEEE80211_VHT_CAP_RXSTBC_1 0x00000100 +#define IEEE80211_VHT_CAP_RXSTBC_2 0x00000200 +#define IEEE80211_VHT_CAP_RXSTBC_3 0x00000300 +#define IEEE80211_VHT_CAP_RXSTBC_4 0x00000400 +#define IEEE80211_VHT_CAP_RXSTBC_MASK 0x00000700 +#define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800 +#define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000 +#define IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT 13 +#define IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK \ + (7 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT) +#define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT 16 +#define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK \ + (7 << IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT) +#define IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE 0x00080000 +#define IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE 0x00100000 +#define IEEE80211_VHT_CAP_VHT_TXOP_PS 0x00200000 +#define IEEE80211_VHT_CAP_HTC_VHT 0x00400000 +#define IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT 23 +#define IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK \ + (7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT) +#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_UNSOL_MFB 0x08000000 +#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB 0x0c000000 +#define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000 +#define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000 + +/* 802.11ax HE MAC capabilities */ +#define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01 +#define IEEE80211_HE_MAC_CAP0_TWT_REQ 0x02 +#define IEEE80211_HE_MAC_CAP0_TWT_RES 0x04 +#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_NOT_SUPP 0x00 +#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_1 0x08 +#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_2 0x10 +#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_3 0x18 +#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_MASK 0x18 +#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_1 0x00 +#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_2 0x20 +#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_4 0x40 +#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_8 0x60 +#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_16 0x80 +#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_32 0xa0 +#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_64 0xc0 +#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_UNLIMITED 0xe0 +#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_MASK 0xe0 + +#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_UNLIMITED 0x00 +#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_128 0x01 +#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_256 0x02 +#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_512 0x03 +#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_MASK 0x03 +#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_0US 0x00 +#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_8US 0x04 +#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US 0x08 +#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK 0x0c +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_1 0x00 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_2 0x10 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_3 0x20 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_4 0x30 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_5 0x40 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_6 0x50 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_7 0x60 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8 0x70 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_MASK 0x70 + +/* Link adaptation is split between byte HE_MAC_CAP1 and + * HE_MAC_CAP2. It should be set only if IEEE80211_HE_MAC_CAP0_HTC_HE + * in which case the following values apply: + * 0 = No feedback. + * 1 = reserved. + * 2 = Unsolicited feedback. + * 3 = both + */ +#define IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION 0x80 + +#define IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION 0x01 +#define IEEE80211_HE_MAC_CAP2_ALL_ACK 0x02 +#define IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED 0x04 +#define IEEE80211_HE_MAC_CAP2_BSR 0x08 +#define IEEE80211_HE_MAC_CAP2_BCAST_TWT 0x10 +#define IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP 0x20 +#define IEEE80211_HE_MAC_CAP2_MU_CASCADING 0x40 +#define IEEE80211_HE_MAC_CAP2_ACK_EN 0x80 + +#define IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU 0x01 +#define IEEE80211_HE_MAC_CAP3_OMI_CONTROL 0x02 +#define IEEE80211_HE_MAC_CAP3_OFDMA_RA 0x04 + +/* The maximum length of an A-MDPU is defined by the combination of the Maximum + * A-MDPU Length Exponent field in the HT capabilities, VHT capabilities and the + * same field in the HE capabilities. + */ +#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_USE_VHT 0x00 +#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_1 0x08 +#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2 0x10 +#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_RESERVED 0x18 +#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_MASK 0x18 +#define IEEE80211_HE_MAC_CAP3_A_AMSDU_FRAG 0x20 +#define IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED 0x40 +#define IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS 0x80 + +#define IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG 0x01 +#define IEEE80211_HE_MAC_CAP4_QTP 0x02 +#define IEEE80211_HE_MAC_CAP4_BQR 0x04 +#define IEEE80211_HE_MAC_CAP4_SR_RESP 0x08 +#define IEEE80211_HE_MAC_CAP4_NDP_FB_REP 0x10 +#define IEEE80211_HE_MAC_CAP4_OPS 0x20 +#define IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU 0x40 + +/* 802.11ax HE PHY capabilities */ +#define IEEE80211_HE_PHY_CAP0_DUAL_BAND 0x01 +#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02 +#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04 +#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G 0x08 +#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G 0x10 +#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G 0x20 +#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G 0x40 +#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK 0xfe + +#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_20MHZ 0x01 +#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_40MHZ 0x02 +#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_20MHZ 0x04 +#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_40MHZ 0x08 +#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK 0x0f +#define IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A 0x10 +#define IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD 0x20 +#define IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US 0x40 +/* Midamble RX Max NSTS is split between byte #2 and byte #3 */ +#define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS 0x80 + +#define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_MAX_NSTS 0x01 +#define IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US 0x02 +#define IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ 0x04 +#define IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ 0x08 +#define IEEE80211_HE_PHY_CAP2_DOPPLER_TX 0x10 +#define IEEE80211_HE_PHY_CAP2_DOPPLER_RX 0x20 + +/* Note that the meaning of UL MU below is different between an AP and a non-AP + * sta, where in the AP case it indicates support for Rx and in the non-AP sta + * case it indicates support for Tx. + */ +#define IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO 0x40 +#define IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO 0x80 + +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM 0x00 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK 0x01 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK 0x02 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM 0x03 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK 0x03 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 0x00 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2 0x04 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM 0x00 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK 0x08 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK 0x10 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM 0x18 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK 0x18 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 0x00 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_2 0x20 +#define IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA 0x40 +#define IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER 0x80 + +#define IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE 0x01 +#define IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER 0x02 + +/* Minimal allowed value of Max STS under 80MHz is 3 */ +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 0x0c +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_5 0x10 +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_6 0x14 +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_7 0x18 +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8 0x1c +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK 0x1c + +/* Minimal allowed value of Max STS above 80MHz is 3 */ +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4 0x60 +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_5 0x80 +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_6 0xa0 +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_7 0xc0 +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 0xe0 +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK 0xe0 + +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_1 0x00 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 0x01 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_3 0x02 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_4 0x03 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_5 0x04 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_6 0x05 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_7 0x06 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_8 0x07 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK 0x07 + +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_1 0x00 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2 0x08 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_3 0x10 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_4 0x18 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_5 0x20 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_6 0x28 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_7 0x30 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_8 0x38 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK 0x38 + +#define IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK 0x40 +#define IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK 0x80 + +#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU 0x01 +#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU 0x02 +#define IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB 0x04 +#define IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB 0x08 +#define IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB 0x10 +#define IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE 0x20 +#define IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO 0x40 +#define IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT 0x80 + +#define IEEE80211_HE_PHY_CAP7_SRP_BASED_SR 0x01 +#define IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR 0x02 +#define IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI 0x04 +#define IEEE80211_HE_PHY_CAP7_MAX_NC_1 0x08 +#define IEEE80211_HE_PHY_CAP7_MAX_NC_2 0x10 +#define IEEE80211_HE_PHY_CAP7_MAX_NC_3 0x18 +#define IEEE80211_HE_PHY_CAP7_MAX_NC_4 0x20 +#define IEEE80211_HE_PHY_CAP7_MAX_NC_5 0x28 +#define IEEE80211_HE_PHY_CAP7_MAX_NC_6 0x30 +#define IEEE80211_HE_PHY_CAP7_MAX_NC_7 0x38 +#define IEEE80211_HE_PHY_CAP7_MAX_NC_MASK 0x38 +#define IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ 0x40 +#define IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ 0x80 + +#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI 0x01 +#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G 0x02 +#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU 0x04 +#define IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU 0x08 +#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI 0x10 +#define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_2X_AND_1XLTF 0x20 + +/* 802.11ax HE TX/RX MCS NSS Support */ +#define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3) +#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_POS (6) +#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_POS (11) +#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_MASK 0x07c0 +#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_MASK 0xf800 + +/* TX/RX HE MCS Support field Highest MCS subfield encoding */ +enum ieee80211_he_highest_mcs_supported_subfield_enc { + HIGHEST_MCS_SUPPORTED_MCS7 = 0, + HIGHEST_MCS_SUPPORTED_MCS8, + HIGHEST_MCS_SUPPORTED_MCS9, + HIGHEST_MCS_SUPPORTED_MCS10, + HIGHEST_MCS_SUPPORTED_MCS11, +}; + +/* Calculate 802.11ax HE capabilities IE Tx/Rx HE MCS NSS Support Field size */ +static inline u8 +ieee80211_he_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap) +{ + u8 count = 4; + + if (he_cap->phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) + count += 4; + + if (he_cap->phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) + count += 4; + + return count; +} + +/* 802.11ax HE PPE Thresholds */ +#define IEEE80211_PPE_THRES_NSS_SUPPORT_2NSS (1) +#define IEEE80211_PPE_THRES_NSS_POS (0) +#define IEEE80211_PPE_THRES_NSS_MASK (7) +#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_2x966_AND_966_RU \ + (BIT(5) | BIT(6)) +#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK 0x78 +#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS (3) +#define IEEE80211_PPE_THRES_INFO_PPET_SIZE (3) + +/* + * Calculate 802.11ax HE capabilities IE PPE field size + * Input: Header byte of ppe_thres (first byte), and HE capa IE's PHY cap u8* + */ +static inline u8 +ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info) +{ + u8 n; + + if ((phy_cap_info[6] & + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) == 0) + return 0; + + n = hweight8(ppe_thres_hdr & + IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK); + n *= (1 + ((ppe_thres_hdr & IEEE80211_PPE_THRES_NSS_MASK) >> + IEEE80211_PPE_THRES_NSS_POS)); + + /* + * Each pair is 6 bits, and we need to add the 7 "header" bits to the + * total size. + */ + n = (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) + 7; + n = DIV_ROUND_UP(n, 8); + + return n; +} + +/* HE Operation defines */ +#define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x0000003f +#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x000001c0 +#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_OFFSET 6 +#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000200 +#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x000ffc00 +#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 10 +#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x000100000 +#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x000200000 +#define IEEE80211_HE_OPERATION_MULTI_BSSID_AP 0x10000000 +#define IEEE80211_HE_OPERATION_TX_BSSID_INDICATOR 0x20000000 +#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x40000000 + +/* + * ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size + * @he_oper_ie: byte data of the He Operations IE, stating from the the byte + * after the ext ID byte. It is assumed that he_oper_ie has at least + * sizeof(struct ieee80211_he_operation) bytes, checked already in + * ieee802_11_parse_elems_crc() + * @return the actual size of the IE data (not including header), or 0 on error + */ +static inline u8 +ieee80211_he_oper_size(const u8 *he_oper_ie) +{ + struct ieee80211_he_operation *he_oper = (void *)he_oper_ie; + u8 oper_len = sizeof(struct ieee80211_he_operation); + u32 he_oper_params; + + /* Make sure the input is not NULL */ + if (!he_oper_ie) + return 0; + + /* Calc required length */ + he_oper_params = le32_to_cpu(he_oper->he_oper_params); + if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO) + oper_len += 3; + if (he_oper_params & IEEE80211_HE_OPERATION_MULTI_BSSID_AP) + oper_len++; + + /* Add the first byte (extension ID) to the total length */ + oper_len++; + + return oper_len; +} + +/* Authentication algorithms */ +#define WLAN_AUTH_OPEN 0 +#define WLAN_AUTH_SHARED_KEY 1 +#define WLAN_AUTH_FT 2 +#define WLAN_AUTH_SAE 3 +#define WLAN_AUTH_FILS_SK 4 +#define WLAN_AUTH_FILS_SK_PFS 5 +#define WLAN_AUTH_FILS_PK 6 +#define WLAN_AUTH_LEAP 128 + +#define WLAN_AUTH_CHALLENGE_LEN 128 + +#define WLAN_CAPABILITY_ESS (1<<0) +#define WLAN_CAPABILITY_IBSS (1<<1) + +/* + * A mesh STA sets the ESS and IBSS capability bits to zero. + * however, this holds true for p2p probe responses (in the p2p_find + * phase) as well. + */ +#define WLAN_CAPABILITY_IS_STA_BSS(cap) \ + (!((cap) & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS))) + +#define WLAN_CAPABILITY_CF_POLLABLE (1<<2) +#define WLAN_CAPABILITY_CF_POLL_REQUEST (1<<3) +#define WLAN_CAPABILITY_PRIVACY (1<<4) +#define WLAN_CAPABILITY_SHORT_PREAMBLE (1<<5) +#define WLAN_CAPABILITY_PBCC (1<<6) +#define WLAN_CAPABILITY_CHANNEL_AGILITY (1<<7) + +/* 802.11h */ +#define WLAN_CAPABILITY_SPECTRUM_MGMT (1<<8) +#define WLAN_CAPABILITY_QOS (1<<9) +#define WLAN_CAPABILITY_SHORT_SLOT_TIME (1<<10) +#define WLAN_CAPABILITY_APSD (1<<11) +#define WLAN_CAPABILITY_RADIO_MEASURE (1<<12) +#define WLAN_CAPABILITY_DSSS_OFDM (1<<13) +#define WLAN_CAPABILITY_DEL_BACK (1<<14) +#define WLAN_CAPABILITY_IMM_BACK (1<<15) + +/* DMG (60gHz) 802.11ad */ +/* type - bits 0..1 */ +#define WLAN_CAPABILITY_DMG_TYPE_MASK (3<<0) +#define WLAN_CAPABILITY_DMG_TYPE_IBSS (1<<0) /* Tx by: STA */ +#define WLAN_CAPABILITY_DMG_TYPE_PBSS (2<<0) /* Tx by: PCP */ +#define WLAN_CAPABILITY_DMG_TYPE_AP (3<<0) /* Tx by: AP */ + +#define WLAN_CAPABILITY_DMG_CBAP_ONLY (1<<2) +#define WLAN_CAPABILITY_DMG_CBAP_SOURCE (1<<3) +#define WLAN_CAPABILITY_DMG_PRIVACY (1<<4) +#define WLAN_CAPABILITY_DMG_ECPAC (1<<5) + +#define WLAN_CAPABILITY_DMG_SPECTRUM_MGMT (1<<8) +#define WLAN_CAPABILITY_DMG_RADIO_MEASURE (1<<12) + +/* measurement */ +#define IEEE80211_SPCT_MSR_RPRT_MODE_LATE (1<<0) +#define IEEE80211_SPCT_MSR_RPRT_MODE_INCAPABLE (1<<1) +#define IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED (1<<2) + +#define IEEE80211_SPCT_MSR_RPRT_TYPE_BASIC 0 +#define IEEE80211_SPCT_MSR_RPRT_TYPE_CCA 1 +#define IEEE80211_SPCT_MSR_RPRT_TYPE_RPI 2 + +/* 802.11g ERP information element */ +#define WLAN_ERP_NON_ERP_PRESENT (1<<0) +#define WLAN_ERP_USE_PROTECTION (1<<1) +#define WLAN_ERP_BARKER_PREAMBLE (1<<2) + +/* WLAN_ERP_BARKER_PREAMBLE values */ +enum { + WLAN_ERP_PREAMBLE_SHORT = 0, + WLAN_ERP_PREAMBLE_LONG = 1, +}; + +/* Band ID, 802.11ad #8.4.1.45 */ +enum { + IEEE80211_BANDID_TV_WS = 0, /* TV white spaces */ + IEEE80211_BANDID_SUB1 = 1, /* Sub-1 GHz (excluding TV white spaces) */ + IEEE80211_BANDID_2G = 2, /* 2.4 GHz */ + IEEE80211_BANDID_3G = 3, /* 3.6 GHz */ + IEEE80211_BANDID_5G = 4, /* 4.9 and 5 GHz */ + IEEE80211_BANDID_60G = 5, /* 60 GHz */ +}; + +/* Status codes */ +enum ieee80211_statuscode { + WLAN_STATUS_SUCCESS = 0, + WLAN_STATUS_UNSPECIFIED_FAILURE = 1, + WLAN_STATUS_CAPS_UNSUPPORTED = 10, + WLAN_STATUS_REASSOC_NO_ASSOC = 11, + WLAN_STATUS_ASSOC_DENIED_UNSPEC = 12, + WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG = 13, + WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION = 14, + WLAN_STATUS_CHALLENGE_FAIL = 15, + WLAN_STATUS_AUTH_TIMEOUT = 16, + WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA = 17, + WLAN_STATUS_ASSOC_DENIED_RATES = 18, + /* 802.11b */ + WLAN_STATUS_ASSOC_DENIED_NOSHORTPREAMBLE = 19, + WLAN_STATUS_ASSOC_DENIED_NOPBCC = 20, + WLAN_STATUS_ASSOC_DENIED_NOAGILITY = 21, + /* 802.11h */ + WLAN_STATUS_ASSOC_DENIED_NOSPECTRUM = 22, + WLAN_STATUS_ASSOC_REJECTED_BAD_POWER = 23, + WLAN_STATUS_ASSOC_REJECTED_BAD_SUPP_CHAN = 24, + /* 802.11g */ + WLAN_STATUS_ASSOC_DENIED_NOSHORTTIME = 25, + WLAN_STATUS_ASSOC_DENIED_NODSSSOFDM = 26, + /* 802.11w */ + WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY = 30, + WLAN_STATUS_ROBUST_MGMT_FRAME_POLICY_VIOLATION = 31, + /* 802.11i */ + WLAN_STATUS_INVALID_IE = 40, + WLAN_STATUS_INVALID_GROUP_CIPHER = 41, + WLAN_STATUS_INVALID_PAIRWISE_CIPHER = 42, + WLAN_STATUS_INVALID_AKMP = 43, + WLAN_STATUS_UNSUPP_RSN_VERSION = 44, + WLAN_STATUS_INVALID_RSN_IE_CAP = 45, + WLAN_STATUS_CIPHER_SUITE_REJECTED = 46, + /* 802.11e */ + WLAN_STATUS_UNSPECIFIED_QOS = 32, + WLAN_STATUS_ASSOC_DENIED_NOBANDWIDTH = 33, + WLAN_STATUS_ASSOC_DENIED_LOWACK = 34, + WLAN_STATUS_ASSOC_DENIED_UNSUPP_QOS = 35, + WLAN_STATUS_REQUEST_DECLINED = 37, + WLAN_STATUS_INVALID_QOS_PARAM = 38, + WLAN_STATUS_CHANGE_TSPEC = 39, + WLAN_STATUS_WAIT_TS_DELAY = 47, + WLAN_STATUS_NO_DIRECT_LINK = 48, + WLAN_STATUS_STA_NOT_PRESENT = 49, + WLAN_STATUS_STA_NOT_QSTA = 50, + /* 802.11s */ + WLAN_STATUS_ANTI_CLOG_REQUIRED = 76, + WLAN_STATUS_FCG_NOT_SUPP = 78, + WLAN_STATUS_STA_NO_TBTT = 78, + /* 802.11ad */ + WLAN_STATUS_REJECTED_WITH_SUGGESTED_CHANGES = 39, + WLAN_STATUS_REJECTED_FOR_DELAY_PERIOD = 47, + WLAN_STATUS_REJECT_WITH_SCHEDULE = 83, + WLAN_STATUS_PENDING_ADMITTING_FST_SESSION = 86, + WLAN_STATUS_PERFORMING_FST_NOW = 87, + WLAN_STATUS_PENDING_GAP_IN_BA_WINDOW = 88, + WLAN_STATUS_REJECT_U_PID_SETTING = 89, + WLAN_STATUS_REJECT_DSE_BAND = 96, + WLAN_STATUS_DENIED_WITH_SUGGESTED_BAND_AND_CHANNEL = 99, + WLAN_STATUS_DENIED_DUE_TO_SPECTRUM_MANAGEMENT = 103, + /* 802.11ai */ + WLAN_STATUS_FILS_AUTHENTICATION_FAILURE = 108, + WLAN_STATUS_UNKNOWN_AUTHENTICATION_SERVER = 109, +}; + + +/* Reason codes */ +enum ieee80211_reasoncode { + WLAN_REASON_UNSPECIFIED = 1, + WLAN_REASON_PREV_AUTH_NOT_VALID = 2, + WLAN_REASON_DEAUTH_LEAVING = 3, + WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY = 4, + WLAN_REASON_DISASSOC_AP_BUSY = 5, + WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA = 6, + WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA = 7, + WLAN_REASON_DISASSOC_STA_HAS_LEFT = 8, + WLAN_REASON_STA_REQ_ASSOC_WITHOUT_AUTH = 9, + /* 802.11h */ + WLAN_REASON_DISASSOC_BAD_POWER = 10, + WLAN_REASON_DISASSOC_BAD_SUPP_CHAN = 11, + /* 802.11i */ + WLAN_REASON_INVALID_IE = 13, + WLAN_REASON_MIC_FAILURE = 14, + WLAN_REASON_4WAY_HANDSHAKE_TIMEOUT = 15, + WLAN_REASON_GROUP_KEY_HANDSHAKE_TIMEOUT = 16, + WLAN_REASON_IE_DIFFERENT = 17, + WLAN_REASON_INVALID_GROUP_CIPHER = 18, + WLAN_REASON_INVALID_PAIRWISE_CIPHER = 19, + WLAN_REASON_INVALID_AKMP = 20, + WLAN_REASON_UNSUPP_RSN_VERSION = 21, + WLAN_REASON_INVALID_RSN_IE_CAP = 22, + WLAN_REASON_IEEE8021X_FAILED = 23, + WLAN_REASON_CIPHER_SUITE_REJECTED = 24, + /* TDLS (802.11z) */ + WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE = 25, + WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED = 26, + /* 802.11e */ + WLAN_REASON_DISASSOC_UNSPECIFIED_QOS = 32, + WLAN_REASON_DISASSOC_QAP_NO_BANDWIDTH = 33, + WLAN_REASON_DISASSOC_LOW_ACK = 34, + WLAN_REASON_DISASSOC_QAP_EXCEED_TXOP = 35, + WLAN_REASON_QSTA_LEAVE_QBSS = 36, + WLAN_REASON_QSTA_NOT_USE = 37, + WLAN_REASON_QSTA_REQUIRE_SETUP = 38, + WLAN_REASON_QSTA_TIMEOUT = 39, + WLAN_REASON_QSTA_CIPHER_NOT_SUPP = 45, + /* 802.11s */ + WLAN_REASON_MESH_PEER_CANCELED = 52, + WLAN_REASON_MESH_MAX_PEERS = 53, + WLAN_REASON_MESH_CONFIG = 54, + WLAN_REASON_MESH_CLOSE = 55, + WLAN_REASON_MESH_MAX_RETRIES = 56, + WLAN_REASON_MESH_CONFIRM_TIMEOUT = 57, + WLAN_REASON_MESH_INVALID_GTK = 58, + WLAN_REASON_MESH_INCONSISTENT_PARAM = 59, + WLAN_REASON_MESH_INVALID_SECURITY = 60, + WLAN_REASON_MESH_PATH_ERROR = 61, + WLAN_REASON_MESH_PATH_NOFORWARD = 62, + WLAN_REASON_MESH_PATH_DEST_UNREACHABLE = 63, + WLAN_REASON_MAC_EXISTS_IN_MBSS = 64, + WLAN_REASON_MESH_CHAN_REGULATORY = 65, + WLAN_REASON_MESH_CHAN = 66, +}; + + +/* Information Element IDs */ +enum ieee80211_eid { + WLAN_EID_SSID = 0, + WLAN_EID_SUPP_RATES = 1, + WLAN_EID_FH_PARAMS = 2, /* reserved now */ + WLAN_EID_DS_PARAMS = 3, + WLAN_EID_CF_PARAMS = 4, + WLAN_EID_TIM = 5, + WLAN_EID_IBSS_PARAMS = 6, + WLAN_EID_COUNTRY = 7, + /* 8, 9 reserved */ + WLAN_EID_REQUEST = 10, + WLAN_EID_QBSS_LOAD = 11, + WLAN_EID_EDCA_PARAM_SET = 12, + WLAN_EID_TSPEC = 13, + WLAN_EID_TCLAS = 14, + WLAN_EID_SCHEDULE = 15, + WLAN_EID_CHALLENGE = 16, + /* 17-31 reserved for challenge text extension */ + WLAN_EID_PWR_CONSTRAINT = 32, + WLAN_EID_PWR_CAPABILITY = 33, + WLAN_EID_TPC_REQUEST = 34, + WLAN_EID_TPC_REPORT = 35, + WLAN_EID_SUPPORTED_CHANNELS = 36, + WLAN_EID_CHANNEL_SWITCH = 37, + WLAN_EID_MEASURE_REQUEST = 38, + WLAN_EID_MEASURE_REPORT = 39, + WLAN_EID_QUIET = 40, + WLAN_EID_IBSS_DFS = 41, + WLAN_EID_ERP_INFO = 42, + WLAN_EID_TS_DELAY = 43, + WLAN_EID_TCLAS_PROCESSING = 44, + WLAN_EID_HT_CAPABILITY = 45, + WLAN_EID_QOS_CAPA = 46, + /* 47 reserved for Broadcom */ + WLAN_EID_RSN = 48, + WLAN_EID_802_15_COEX = 49, + WLAN_EID_EXT_SUPP_RATES = 50, + WLAN_EID_AP_CHAN_REPORT = 51, + WLAN_EID_NEIGHBOR_REPORT = 52, + WLAN_EID_RCPI = 53, + WLAN_EID_MOBILITY_DOMAIN = 54, + WLAN_EID_FAST_BSS_TRANSITION = 55, + WLAN_EID_TIMEOUT_INTERVAL = 56, + WLAN_EID_RIC_DATA = 57, + WLAN_EID_DSE_REGISTERED_LOCATION = 58, + WLAN_EID_SUPPORTED_REGULATORY_CLASSES = 59, + WLAN_EID_EXT_CHANSWITCH_ANN = 60, + WLAN_EID_HT_OPERATION = 61, + WLAN_EID_SECONDARY_CHANNEL_OFFSET = 62, + WLAN_EID_BSS_AVG_ACCESS_DELAY = 63, + WLAN_EID_ANTENNA_INFO = 64, + WLAN_EID_RSNI = 65, + WLAN_EID_MEASUREMENT_PILOT_TX_INFO = 66, + WLAN_EID_BSS_AVAILABLE_CAPACITY = 67, + WLAN_EID_BSS_AC_ACCESS_DELAY = 68, + WLAN_EID_TIME_ADVERTISEMENT = 69, + WLAN_EID_RRM_ENABLED_CAPABILITIES = 70, + WLAN_EID_MULTIPLE_BSSID = 71, + WLAN_EID_BSS_COEX_2040 = 72, + WLAN_EID_BSS_INTOLERANT_CHL_REPORT = 73, + WLAN_EID_OVERLAP_BSS_SCAN_PARAM = 74, + WLAN_EID_RIC_DESCRIPTOR = 75, + WLAN_EID_MMIE = 76, + WLAN_EID_ASSOC_COMEBACK_TIME = 77, + WLAN_EID_EVENT_REQUEST = 78, + WLAN_EID_EVENT_REPORT = 79, + WLAN_EID_DIAGNOSTIC_REQUEST = 80, + WLAN_EID_DIAGNOSTIC_REPORT = 81, + WLAN_EID_LOCATION_PARAMS = 82, + WLAN_EID_NON_TX_BSSID_CAP = 83, + WLAN_EID_SSID_LIST = 84, + WLAN_EID_MULTI_BSSID_IDX = 85, + WLAN_EID_FMS_DESCRIPTOR = 86, + WLAN_EID_FMS_REQUEST = 87, + WLAN_EID_FMS_RESPONSE = 88, + WLAN_EID_QOS_TRAFFIC_CAPA = 89, + WLAN_EID_BSS_MAX_IDLE_PERIOD = 90, + WLAN_EID_TSF_REQUEST = 91, + WLAN_EID_TSF_RESPOSNE = 92, + WLAN_EID_WNM_SLEEP_MODE = 93, + WLAN_EID_TIM_BCAST_REQ = 94, + WLAN_EID_TIM_BCAST_RESP = 95, + WLAN_EID_COLL_IF_REPORT = 96, + WLAN_EID_CHANNEL_USAGE = 97, + WLAN_EID_TIME_ZONE = 98, + WLAN_EID_DMS_REQUEST = 99, + WLAN_EID_DMS_RESPONSE = 100, + WLAN_EID_LINK_ID = 101, + WLAN_EID_WAKEUP_SCHEDUL = 102, + /* 103 reserved */ + WLAN_EID_CHAN_SWITCH_TIMING = 104, + WLAN_EID_PTI_CONTROL = 105, + WLAN_EID_PU_BUFFER_STATUS = 106, + WLAN_EID_INTERWORKING = 107, + WLAN_EID_ADVERTISEMENT_PROTOCOL = 108, + WLAN_EID_EXPEDITED_BW_REQ = 109, + WLAN_EID_QOS_MAP_SET = 110, + WLAN_EID_ROAMING_CONSORTIUM = 111, + WLAN_EID_EMERGENCY_ALERT = 112, + WLAN_EID_MESH_CONFIG = 113, + WLAN_EID_MESH_ID = 114, + WLAN_EID_LINK_METRIC_REPORT = 115, + WLAN_EID_CONGESTION_NOTIFICATION = 116, + WLAN_EID_PEER_MGMT = 117, + WLAN_EID_CHAN_SWITCH_PARAM = 118, + WLAN_EID_MESH_AWAKE_WINDOW = 119, + WLAN_EID_BEACON_TIMING = 120, + WLAN_EID_MCCAOP_SETUP_REQ = 121, + WLAN_EID_MCCAOP_SETUP_RESP = 122, + WLAN_EID_MCCAOP_ADVERT = 123, + WLAN_EID_MCCAOP_TEARDOWN = 124, + WLAN_EID_GANN = 125, + WLAN_EID_RANN = 126, + WLAN_EID_EXT_CAPABILITY = 127, + /* 128, 129 reserved for Agere */ + WLAN_EID_PREQ = 130, + WLAN_EID_PREP = 131, + WLAN_EID_PERR = 132, + /* 133-136 reserved for Cisco */ + WLAN_EID_PXU = 137, + WLAN_EID_PXUC = 138, + WLAN_EID_AUTH_MESH_PEER_EXCH = 139, + WLAN_EID_MIC = 140, + WLAN_EID_DESTINATION_URI = 141, + WLAN_EID_UAPSD_COEX = 142, + WLAN_EID_WAKEUP_SCHEDULE = 143, + WLAN_EID_EXT_SCHEDULE = 144, + WLAN_EID_STA_AVAILABILITY = 145, + WLAN_EID_DMG_TSPEC = 146, + WLAN_EID_DMG_AT = 147, + WLAN_EID_DMG_CAP = 148, + /* 149 reserved for Cisco */ + WLAN_EID_CISCO_VENDOR_SPECIFIC = 150, + WLAN_EID_DMG_OPERATION = 151, + WLAN_EID_DMG_BSS_PARAM_CHANGE = 152, + WLAN_EID_DMG_BEAM_REFINEMENT = 153, + WLAN_EID_CHANNEL_MEASURE_FEEDBACK = 154, + /* 155-156 reserved for Cisco */ + WLAN_EID_AWAKE_WINDOW = 157, + WLAN_EID_MULTI_BAND = 158, + WLAN_EID_ADDBA_EXT = 159, + WLAN_EID_NEXT_PCP_LIST = 160, + WLAN_EID_PCP_HANDOVER = 161, + WLAN_EID_DMG_LINK_MARGIN = 162, + WLAN_EID_SWITCHING_STREAM = 163, + WLAN_EID_SESSION_TRANSITION = 164, + WLAN_EID_DYN_TONE_PAIRING_REPORT = 165, + WLAN_EID_CLUSTER_REPORT = 166, + WLAN_EID_RELAY_CAP = 167, + WLAN_EID_RELAY_XFER_PARAM_SET = 168, + WLAN_EID_BEAM_LINK_MAINT = 169, + WLAN_EID_MULTIPLE_MAC_ADDR = 170, + WLAN_EID_U_PID = 171, + WLAN_EID_DMG_LINK_ADAPT_ACK = 172, + /* 173 reserved for Symbol */ + WLAN_EID_MCCAOP_ADV_OVERVIEW = 174, + WLAN_EID_QUIET_PERIOD_REQ = 175, + /* 176 reserved for Symbol */ + WLAN_EID_QUIET_PERIOD_RESP = 177, + /* 178-179 reserved for Symbol */ + /* 180 reserved for ISO/IEC 20011 */ + WLAN_EID_EPAC_POLICY = 182, + WLAN_EID_CLISTER_TIME_OFF = 183, + WLAN_EID_INTER_AC_PRIO = 184, + WLAN_EID_SCS_DESCRIPTOR = 185, + WLAN_EID_QLOAD_REPORT = 186, + WLAN_EID_HCCA_TXOP_UPDATE_COUNT = 187, + WLAN_EID_HL_STREAM_ID = 188, + WLAN_EID_GCR_GROUP_ADDR = 189, + WLAN_EID_ANTENNA_SECTOR_ID_PATTERN = 190, + WLAN_EID_VHT_CAPABILITY = 191, + WLAN_EID_VHT_OPERATION = 192, + WLAN_EID_EXTENDED_BSS_LOAD = 193, + WLAN_EID_WIDE_BW_CHANNEL_SWITCH = 194, + WLAN_EID_VHT_TX_POWER_ENVELOPE = 195, + WLAN_EID_CHANNEL_SWITCH_WRAPPER = 196, + WLAN_EID_AID = 197, + WLAN_EID_QUIET_CHANNEL = 198, + WLAN_EID_OPMODE_NOTIF = 199, + + WLAN_EID_VENDOR_SPECIFIC = 221, + WLAN_EID_QOS_PARAMETER = 222, + WLAN_EID_CAG_NUMBER = 237, + WLAN_EID_AP_CSN = 239, + WLAN_EID_FILS_INDICATION = 240, + WLAN_EID_DILS = 241, + WLAN_EID_FRAGMENT = 242, + WLAN_EID_EXTENSION = 255 +}; + +/* Element ID Extensions for Element ID 255 */ +enum ieee80211_eid_ext { + WLAN_EID_EXT_ASSOC_DELAY_INFO = 1, + WLAN_EID_EXT_FILS_REQ_PARAMS = 2, + WLAN_EID_EXT_FILS_KEY_CONFIRM = 3, + WLAN_EID_EXT_FILS_SESSION = 4, + WLAN_EID_EXT_FILS_HLP_CONTAINER = 5, + WLAN_EID_EXT_FILS_IP_ADDR_ASSIGN = 6, + WLAN_EID_EXT_KEY_DELIVERY = 7, + WLAN_EID_EXT_FILS_WRAPPED_DATA = 8, + WLAN_EID_EXT_FILS_PUBLIC_KEY = 12, + WLAN_EID_EXT_FILS_NONCE = 13, + WLAN_EID_EXT_FUTURE_CHAN_GUIDANCE = 14, + WLAN_EID_EXT_HE_CAPABILITY = 35, + WLAN_EID_EXT_HE_OPERATION = 36, + WLAN_EID_EXT_UORA = 37, + WLAN_EID_EXT_HE_MU_EDCA = 38, +}; + +/* Action category code */ +enum ieee80211_category { + WLAN_CATEGORY_SPECTRUM_MGMT = 0, + WLAN_CATEGORY_QOS = 1, + WLAN_CATEGORY_DLS = 2, + WLAN_CATEGORY_BACK = 3, + WLAN_CATEGORY_PUBLIC = 4, + WLAN_CATEGORY_RADIO_MEASUREMENT = 5, + WLAN_CATEGORY_HT = 7, + WLAN_CATEGORY_SA_QUERY = 8, + WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9, + WLAN_CATEGORY_WNM = 10, + WLAN_CATEGORY_WNM_UNPROTECTED = 11, + WLAN_CATEGORY_TDLS = 12, + WLAN_CATEGORY_MESH_ACTION = 13, + WLAN_CATEGORY_MULTIHOP_ACTION = 14, + WLAN_CATEGORY_SELF_PROTECTED = 15, + WLAN_CATEGORY_DMG = 16, + WLAN_CATEGORY_WMM = 17, + WLAN_CATEGORY_FST = 18, + WLAN_CATEGORY_UNPROT_DMG = 20, + WLAN_CATEGORY_VHT = 21, + WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126, + WLAN_CATEGORY_VENDOR_SPECIFIC = 127, +}; + +/* SPECTRUM_MGMT action code */ +enum ieee80211_spectrum_mgmt_actioncode { + WLAN_ACTION_SPCT_MSR_REQ = 0, + WLAN_ACTION_SPCT_MSR_RPRT = 1, + WLAN_ACTION_SPCT_TPC_REQ = 2, + WLAN_ACTION_SPCT_TPC_RPRT = 3, + WLAN_ACTION_SPCT_CHL_SWITCH = 4, +}; + +/* HT action codes */ +enum ieee80211_ht_actioncode { + WLAN_HT_ACTION_NOTIFY_CHANWIDTH = 0, + WLAN_HT_ACTION_SMPS = 1, + WLAN_HT_ACTION_PSMP = 2, + WLAN_HT_ACTION_PCO_PHASE = 3, + WLAN_HT_ACTION_CSI = 4, + WLAN_HT_ACTION_NONCOMPRESSED_BF = 5, + WLAN_HT_ACTION_COMPRESSED_BF = 6, + WLAN_HT_ACTION_ASEL_IDX_FEEDBACK = 7, +}; + +/* VHT action codes */ +enum ieee80211_vht_actioncode { + WLAN_VHT_ACTION_COMPRESSED_BF = 0, + WLAN_VHT_ACTION_GROUPID_MGMT = 1, + WLAN_VHT_ACTION_OPMODE_NOTIF = 2, +}; + +/* Self Protected Action codes */ +enum ieee80211_self_protected_actioncode { + WLAN_SP_RESERVED = 0, + WLAN_SP_MESH_PEERING_OPEN = 1, + WLAN_SP_MESH_PEERING_CONFIRM = 2, + WLAN_SP_MESH_PEERING_CLOSE = 3, + WLAN_SP_MGK_INFORM = 4, + WLAN_SP_MGK_ACK = 5, +}; + +/* Mesh action codes */ +enum ieee80211_mesh_actioncode { + WLAN_MESH_ACTION_LINK_METRIC_REPORT, + WLAN_MESH_ACTION_HWMP_PATH_SELECTION, + WLAN_MESH_ACTION_GATE_ANNOUNCEMENT, + WLAN_MESH_ACTION_CONGESTION_CONTROL_NOTIFICATION, + WLAN_MESH_ACTION_MCCA_SETUP_REQUEST, + WLAN_MESH_ACTION_MCCA_SETUP_REPLY, + WLAN_MESH_ACTION_MCCA_ADVERTISEMENT_REQUEST, + WLAN_MESH_ACTION_MCCA_ADVERTISEMENT, + WLAN_MESH_ACTION_MCCA_TEARDOWN, + WLAN_MESH_ACTION_TBTT_ADJUSTMENT_REQUEST, + WLAN_MESH_ACTION_TBTT_ADJUSTMENT_RESPONSE, +}; + +/* Security key length */ +enum ieee80211_key_len { + WLAN_KEY_LEN_WEP40 = 5, + WLAN_KEY_LEN_WEP104 = 13, + WLAN_KEY_LEN_CCMP = 16, + WLAN_KEY_LEN_CCMP_256 = 32, + WLAN_KEY_LEN_TKIP = 32, + WLAN_KEY_LEN_AES_CMAC = 16, + WLAN_KEY_LEN_SMS4 = 32, + WLAN_KEY_LEN_GCMP = 16, + WLAN_KEY_LEN_GCMP_256 = 32, + WLAN_KEY_LEN_BIP_CMAC_256 = 32, + WLAN_KEY_LEN_BIP_GMAC_128 = 16, + WLAN_KEY_LEN_BIP_GMAC_256 = 32, +}; + +#define IEEE80211_WEP_IV_LEN 4 +#define IEEE80211_WEP_ICV_LEN 4 +#define IEEE80211_CCMP_HDR_LEN 8 +#define IEEE80211_CCMP_MIC_LEN 8 +#define IEEE80211_CCMP_PN_LEN 6 +#define IEEE80211_CCMP_256_HDR_LEN 8 +#define IEEE80211_CCMP_256_MIC_LEN 16 +#define IEEE80211_CCMP_256_PN_LEN 6 +#define IEEE80211_TKIP_IV_LEN 8 +#define IEEE80211_TKIP_ICV_LEN 4 +#define IEEE80211_CMAC_PN_LEN 6 +#define IEEE80211_GMAC_PN_LEN 6 +#define IEEE80211_GCMP_HDR_LEN 8 +#define IEEE80211_GCMP_MIC_LEN 16 +#define IEEE80211_GCMP_PN_LEN 6 + +#define FILS_NONCE_LEN 16 +#define FILS_MAX_KEK_LEN 64 + +#define FILS_ERP_MAX_USERNAME_LEN 16 +#define FILS_ERP_MAX_REALM_LEN 253 +#define FILS_ERP_MAX_RRK_LEN 64 + +#define PMK_MAX_LEN 64 + +/* Public action codes (IEEE Std 802.11-2016, 9.6.8.1, Table 9-307) */ +enum ieee80211_pub_actioncode { + WLAN_PUB_ACTION_20_40_BSS_COEX = 0, + WLAN_PUB_ACTION_DSE_ENABLEMENT = 1, + WLAN_PUB_ACTION_DSE_DEENABLEMENT = 2, + WLAN_PUB_ACTION_DSE_REG_LOC_ANN = 3, + WLAN_PUB_ACTION_EXT_CHANSW_ANN = 4, + WLAN_PUB_ACTION_DSE_MSMT_REQ = 5, + WLAN_PUB_ACTION_DSE_MSMT_RESP = 6, + WLAN_PUB_ACTION_MSMT_PILOT = 7, + WLAN_PUB_ACTION_DSE_PC = 8, + WLAN_PUB_ACTION_VENDOR_SPECIFIC = 9, + WLAN_PUB_ACTION_GAS_INITIAL_REQ = 10, + WLAN_PUB_ACTION_GAS_INITIAL_RESP = 11, + WLAN_PUB_ACTION_GAS_COMEBACK_REQ = 12, + WLAN_PUB_ACTION_GAS_COMEBACK_RESP = 13, + WLAN_PUB_ACTION_TDLS_DISCOVER_RES = 14, + WLAN_PUB_ACTION_LOC_TRACK_NOTI = 15, + WLAN_PUB_ACTION_QAB_REQUEST_FRAME = 16, + WLAN_PUB_ACTION_QAB_RESPONSE_FRAME = 17, + WLAN_PUB_ACTION_QMF_POLICY = 18, + WLAN_PUB_ACTION_QMF_POLICY_CHANGE = 19, + WLAN_PUB_ACTION_QLOAD_REQUEST = 20, + WLAN_PUB_ACTION_QLOAD_REPORT = 21, + WLAN_PUB_ACTION_HCCA_TXOP_ADVERT = 22, + WLAN_PUB_ACTION_HCCA_TXOP_RESPONSE = 23, + WLAN_PUB_ACTION_PUBLIC_KEY = 24, + WLAN_PUB_ACTION_CHANNEL_AVAIL_QUERY = 25, + WLAN_PUB_ACTION_CHANNEL_SCHEDULE_MGMT = 26, + WLAN_PUB_ACTION_CONTACT_VERI_SIGNAL = 27, + WLAN_PUB_ACTION_GDD_ENABLEMENT_REQ = 28, + WLAN_PUB_ACTION_GDD_ENABLEMENT_RESP = 29, + WLAN_PUB_ACTION_NETWORK_CHANNEL_CONTROL = 30, + WLAN_PUB_ACTION_WHITE_SPACE_MAP_ANN = 31, + WLAN_PUB_ACTION_FTM_REQUEST = 32, + WLAN_PUB_ACTION_FTM = 33, + WLAN_PUB_ACTION_FILS_DISCOVERY = 34, +}; + +/* TDLS action codes */ +enum ieee80211_tdls_actioncode { + WLAN_TDLS_SETUP_REQUEST = 0, + WLAN_TDLS_SETUP_RESPONSE = 1, + WLAN_TDLS_SETUP_CONFIRM = 2, + WLAN_TDLS_TEARDOWN = 3, + WLAN_TDLS_PEER_TRAFFIC_INDICATION = 4, + WLAN_TDLS_CHANNEL_SWITCH_REQUEST = 5, + WLAN_TDLS_CHANNEL_SWITCH_RESPONSE = 6, + WLAN_TDLS_PEER_PSM_REQUEST = 7, + WLAN_TDLS_PEER_PSM_RESPONSE = 8, + WLAN_TDLS_PEER_TRAFFIC_RESPONSE = 9, + WLAN_TDLS_DISCOVERY_REQUEST = 10, +}; + +/* Extended Channel Switching capability to be set in the 1st byte of + * the @WLAN_EID_EXT_CAPABILITY information element + */ +#define WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING BIT(2) + +/* TDLS capabilities in the the 4th byte of @WLAN_EID_EXT_CAPABILITY */ +#define WLAN_EXT_CAPA4_TDLS_BUFFER_STA BIT(4) +#define WLAN_EXT_CAPA4_TDLS_PEER_PSM BIT(5) +#define WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH BIT(6) + +/* Interworking capabilities are set in 7th bit of 4th byte of the + * @WLAN_EID_EXT_CAPABILITY information element + */ +#define WLAN_EXT_CAPA4_INTERWORKING_ENABLED BIT(7) + +/* + * TDLS capabililites to be enabled in the 5th byte of the + * @WLAN_EID_EXT_CAPABILITY information element + */ +#define WLAN_EXT_CAPA5_TDLS_ENABLED BIT(5) +#define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6) +#define WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED BIT(7) + +#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(5) +#define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6) + +/* Defines the maximal number of MSDUs in an A-MSDU. */ +#define WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB BIT(7) +#define WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB BIT(0) + +/* + * Fine Timing Measurement Initiator - bit 71 of @WLAN_EID_EXT_CAPABILITY + * information element + */ +#define WLAN_EXT_CAPA9_FTM_INITIATOR BIT(7) + +/* TDLS specific payload type in the LLC/SNAP header */ +#define WLAN_TDLS_SNAP_RFTYPE 0x2 + +/* BSS Coex IE information field bits */ +#define WLAN_BSS_COEX_INFORMATION_REQUEST BIT(0) + +/** + * enum ieee80211_mesh_sync_method - mesh synchronization method identifier + * + * @IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET: the default synchronization method + * @IEEE80211_SYNC_METHOD_VENDOR: a vendor specific synchronization method + * that will be specified in a vendor specific information element + */ +enum ieee80211_mesh_sync_method { + IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET = 1, + IEEE80211_SYNC_METHOD_VENDOR = 255, +}; + +/** + * enum ieee80211_mesh_path_protocol - mesh path selection protocol identifier + * + * @IEEE80211_PATH_PROTOCOL_HWMP: the default path selection protocol + * @IEEE80211_PATH_PROTOCOL_VENDOR: a vendor specific protocol that will + * be specified in a vendor specific information element + */ +enum ieee80211_mesh_path_protocol { + IEEE80211_PATH_PROTOCOL_HWMP = 1, + IEEE80211_PATH_PROTOCOL_VENDOR = 255, +}; + +/** + * enum ieee80211_mesh_path_metric - mesh path selection metric identifier + * + * @IEEE80211_PATH_METRIC_AIRTIME: the default path selection metric + * @IEEE80211_PATH_METRIC_VENDOR: a vendor specific metric that will be + * specified in a vendor specific information element + */ +enum ieee80211_mesh_path_metric { + IEEE80211_PATH_METRIC_AIRTIME = 1, + IEEE80211_PATH_METRIC_VENDOR = 255, +}; + +/** + * enum ieee80211_root_mode_identifier - root mesh STA mode identifier + * + * These attribute are used by dot11MeshHWMPRootMode to set root mesh STA mode + * + * @IEEE80211_ROOTMODE_NO_ROOT: the mesh STA is not a root mesh STA (default) + * @IEEE80211_ROOTMODE_ROOT: the mesh STA is a root mesh STA if greater than + * this value + * @IEEE80211_PROACTIVE_PREQ_NO_PREP: the mesh STA is a root mesh STA supports + * the proactive PREQ with proactive PREP subfield set to 0 + * @IEEE80211_PROACTIVE_PREQ_WITH_PREP: the mesh STA is a root mesh STA + * supports the proactive PREQ with proactive PREP subfield set to 1 + * @IEEE80211_PROACTIVE_RANN: the mesh STA is a root mesh STA supports + * the proactive RANN + */ +enum ieee80211_root_mode_identifier { + IEEE80211_ROOTMODE_NO_ROOT = 0, + IEEE80211_ROOTMODE_ROOT = 1, + IEEE80211_PROACTIVE_PREQ_NO_PREP = 2, + IEEE80211_PROACTIVE_PREQ_WITH_PREP = 3, + IEEE80211_PROACTIVE_RANN = 4, +}; + +/* + * IEEE 802.11-2007 7.3.2.9 Country information element + * + * Minimum length is 8 octets, ie len must be evenly + * divisible by 2 + */ + +/* Although the spec says 8 I'm seeing 6 in practice */ +#define IEEE80211_COUNTRY_IE_MIN_LEN 6 + +/* The Country String field of the element shall be 3 octets in length */ +#define IEEE80211_COUNTRY_STRING_LEN 3 + +/* + * For regulatory extension stuff see IEEE 802.11-2007 + * Annex I (page 1141) and Annex J (page 1147). Also + * review 7.3.2.9. + * + * When dot11RegulatoryClassesRequired is true and the + * first_channel/reg_extension_id is >= 201 then the IE + * compromises of the 'ext' struct represented below: + * + * - Regulatory extension ID - when generating IE this just needs + * to be monotonically increasing for each triplet passed in + * the IE + * - Regulatory class - index into set of rules + * - Coverage class - index into air propagation time (Table 7-27), + * in microseconds, you can compute the air propagation time from + * the index by multiplying by 3, so index 10 yields a propagation + * of 10 us. Valid values are 0-31, values 32-255 are not defined + * yet. A value of 0 inicates air propagation of <= 1 us. + * + * See also Table I.2 for Emission limit sets and table + * I.3 for Behavior limit sets. Table J.1 indicates how to map + * a reg_class to an emission limit set and behavior limit set. + */ +#define IEEE80211_COUNTRY_EXTENSION_ID 201 + +/* + * Channels numbers in the IE must be monotonically increasing + * if dot11RegulatoryClassesRequired is not true. + * + * If dot11RegulatoryClassesRequired is true consecutive + * subband triplets following a regulatory triplet shall + * have monotonically increasing first_channel number fields. + * + * Channel numbers shall not overlap. + * + * Note that max_power is signed. + */ +struct ieee80211_country_ie_triplet { + union { + struct { + u8 first_channel; + u8 num_channels; + s8 max_power; + } __packed chans; + struct { + u8 reg_extension_id; + u8 reg_class; + u8 coverage_class; + } __packed ext; + }; +} __packed; + +enum ieee80211_timeout_interval_type { + WLAN_TIMEOUT_REASSOC_DEADLINE = 1 /* 802.11r */, + WLAN_TIMEOUT_KEY_LIFETIME = 2 /* 802.11r */, + WLAN_TIMEOUT_ASSOC_COMEBACK = 3 /* 802.11w */, +}; + +/** + * struct ieee80211_timeout_interval_ie - Timeout Interval element + * @type: type, see &enum ieee80211_timeout_interval_type + * @value: timeout interval value + */ +struct ieee80211_timeout_interval_ie { + u8 type; + __le32 value; +} __packed; + +/** + * enum ieee80211_idle_options - BSS idle options + * @WLAN_IDLE_OPTIONS_PROTECTED_KEEP_ALIVE: the station should send an RSN + * protected frame to the AP to reset the idle timer at the AP for + * the station. + */ +enum ieee80211_idle_options { + WLAN_IDLE_OPTIONS_PROTECTED_KEEP_ALIVE = BIT(0), +}; + +/** + * struct ieee80211_bss_max_idle_period_ie + * + * This structure refers to "BSS Max idle period element" + * + * @max_idle_period: indicates the time period during which a station can + * refrain from transmitting frames to its associated AP without being + * disassociated. In units of 1000 TUs. + * @idle_options: indicates the options associated with the BSS idle capability + * as specified in &enum ieee80211_idle_options. + */ +struct ieee80211_bss_max_idle_period_ie { + __le16 max_idle_period; + u8 idle_options; +} __packed; + +/* BACK action code */ +enum ieee80211_back_actioncode { + WLAN_ACTION_ADDBA_REQ = 0, + WLAN_ACTION_ADDBA_RESP = 1, + WLAN_ACTION_DELBA = 2, +}; + +/* BACK (block-ack) parties */ +enum ieee80211_back_parties { + WLAN_BACK_RECIPIENT = 0, + WLAN_BACK_INITIATOR = 1, +}; + +/* SA Query action */ +enum ieee80211_sa_query_action { + WLAN_ACTION_SA_QUERY_REQUEST = 0, + WLAN_ACTION_SA_QUERY_RESPONSE = 1, +}; + + +#define SUITE(oui, id) (((oui) << 8) | (id)) + +/* cipher suite selectors */ +#define WLAN_CIPHER_SUITE_USE_GROUP SUITE(0x000FAC, 0) +#define WLAN_CIPHER_SUITE_WEP40 SUITE(0x000FAC, 1) +#define WLAN_CIPHER_SUITE_TKIP SUITE(0x000FAC, 2) +/* reserved: SUITE(0x000FAC, 3) */ +#define WLAN_CIPHER_SUITE_CCMP SUITE(0x000FAC, 4) +#define WLAN_CIPHER_SUITE_WEP104 SUITE(0x000FAC, 5) +#define WLAN_CIPHER_SUITE_AES_CMAC SUITE(0x000FAC, 6) +#define WLAN_CIPHER_SUITE_GCMP SUITE(0x000FAC, 8) +#define WLAN_CIPHER_SUITE_GCMP_256 SUITE(0x000FAC, 9) +#define WLAN_CIPHER_SUITE_CCMP_256 SUITE(0x000FAC, 10) +#define WLAN_CIPHER_SUITE_BIP_GMAC_128 SUITE(0x000FAC, 11) +#define WLAN_CIPHER_SUITE_BIP_GMAC_256 SUITE(0x000FAC, 12) +#define WLAN_CIPHER_SUITE_BIP_CMAC_256 SUITE(0x000FAC, 13) + +#define WLAN_CIPHER_SUITE_SMS4 SUITE(0x001472, 1) + +/* AKM suite selectors */ +#define WLAN_AKM_SUITE_8021X SUITE(0x000FAC, 1) +#define WLAN_AKM_SUITE_PSK SUITE(0x000FAC, 2) +#define WLAN_AKM_SUITE_FT_8021X SUITE(0x000FAC, 3) +#define WLAN_AKM_SUITE_FT_PSK SUITE(0x000FAC, 4) +#define WLAN_AKM_SUITE_8021X_SHA256 SUITE(0x000FAC, 5) +#define WLAN_AKM_SUITE_PSK_SHA256 SUITE(0x000FAC, 6) +#define WLAN_AKM_SUITE_TDLS SUITE(0x000FAC, 7) +#define WLAN_AKM_SUITE_SAE SUITE(0x000FAC, 8) +#define WLAN_AKM_SUITE_FT_OVER_SAE SUITE(0x000FAC, 9) +#define WLAN_AKM_SUITE_8021X_SUITE_B SUITE(0x000FAC, 11) +#define WLAN_AKM_SUITE_8021X_SUITE_B_192 SUITE(0x000FAC, 12) +#define WLAN_AKM_SUITE_FILS_SHA256 SUITE(0x000FAC, 14) +#define WLAN_AKM_SUITE_FILS_SHA384 SUITE(0x000FAC, 15) +#define WLAN_AKM_SUITE_FT_FILS_SHA256 SUITE(0x000FAC, 16) +#define WLAN_AKM_SUITE_FT_FILS_SHA384 SUITE(0x000FAC, 17) + +#define WLAN_MAX_KEY_LEN 32 + +#define WLAN_PMK_NAME_LEN 16 +#define WLAN_PMKID_LEN 16 +#define WLAN_PMK_LEN_EAP_LEAP 16 +#define WLAN_PMK_LEN 32 +#define WLAN_PMK_LEN_SUITE_B_192 48 + +#define WLAN_OUI_WFA 0x506f9a +#define WLAN_OUI_TYPE_WFA_P2P 9 +#define WLAN_OUI_MICROSOFT 0x0050f2 +#define WLAN_OUI_TYPE_MICROSOFT_WPA 1 +#define WLAN_OUI_TYPE_MICROSOFT_WMM 2 +#define WLAN_OUI_TYPE_MICROSOFT_WPS 4 +#define WLAN_OUI_TYPE_MICROSOFT_TPC 8 + +/* + * WMM/802.11e Tspec Element + */ +#define IEEE80211_WMM_IE_TSPEC_TID_MASK 0x0F +#define IEEE80211_WMM_IE_TSPEC_TID_SHIFT 1 + +enum ieee80211_tspec_status_code { + IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED = 0, + IEEE80211_TSPEC_STATUS_ADDTS_INVAL_PARAMS = 0x1, +}; + +struct ieee80211_tspec_ie { + u8 element_id; + u8 len; + u8 oui[3]; + u8 oui_type; + u8 oui_subtype; + u8 version; + __le16 tsinfo; + u8 tsinfo_resvd; + __le16 nominal_msdu; + __le16 max_msdu; + __le32 min_service_int; + __le32 max_service_int; + __le32 inactivity_int; + __le32 suspension_int; + __le32 service_start_time; + __le32 min_data_rate; + __le32 mean_data_rate; + __le32 peak_data_rate; + __le32 max_burst_size; + __le32 delay_bound; + __le32 min_phy_rate; + __le16 sba; + __le16 medium_time; +} __packed; + +/** + * ieee80211_get_qos_ctl - get pointer to qos control bytes + * @hdr: the frame + * + * The qos ctrl bytes come after the frame_control, duration, seq_num + * and 3 or 4 addresses of length ETH_ALEN. + * 3 addr: 2 + 2 + 2 + 3*6 = 24 + * 4 addr: 2 + 2 + 2 + 4*6 = 30 + */ +static inline u8 *ieee80211_get_qos_ctl(struct ieee80211_hdr *hdr) +{ + if (ieee80211_has_a4(hdr->frame_control)) + return (u8 *)hdr + 30; + else + return (u8 *)hdr + 24; +} + +/** + * ieee80211_get_tid - get qos TID + * @hdr: the frame + */ +static inline u8 ieee80211_get_tid(struct ieee80211_hdr *hdr) +{ + u8 *qc = ieee80211_get_qos_ctl(hdr); + + return qc[0] & IEEE80211_QOS_CTL_TID_MASK; +} + +/** + * ieee80211_get_SA - get pointer to SA + * @hdr: the frame + * + * Given an 802.11 frame, this function returns the offset + * to the source address (SA). It does not verify that the + * header is long enough to contain the address, and the + * header must be long enough to contain the frame control + * field. + */ +static inline u8 *ieee80211_get_SA(struct ieee80211_hdr *hdr) +{ + if (ieee80211_has_a4(hdr->frame_control)) + return hdr->addr4; + if (ieee80211_has_fromds(hdr->frame_control)) + return hdr->addr3; + return hdr->addr2; +} + +/** + * ieee80211_get_DA - get pointer to DA + * @hdr: the frame + * + * Given an 802.11 frame, this function returns the offset + * to the destination address (DA). It does not verify that + * the header is long enough to contain the address, and the + * header must be long enough to contain the frame control + * field. + */ +static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr) +{ + if (ieee80211_has_tods(hdr->frame_control)) + return hdr->addr3; + else + return hdr->addr1; +} + +/** + * _ieee80211_is_robust_mgmt_frame - check if frame is a robust management frame + * @hdr: the frame (buffer must include at least the first octet of payload) + */ +static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr) +{ + if (ieee80211_is_disassoc(hdr->frame_control) || + ieee80211_is_deauth(hdr->frame_control)) + return true; + + if (ieee80211_is_action(hdr->frame_control)) { + u8 *category; + + /* + * Action frames, excluding Public Action frames, are Robust + * Management Frames. However, if we are looking at a Protected + * frame, skip the check since the data may be encrypted and + * the frame has already been found to be a Robust Management + * Frame (by the other end). + */ + if (ieee80211_has_protected(hdr->frame_control)) + return true; + category = ((u8 *) hdr) + 24; + return *category != WLAN_CATEGORY_PUBLIC && + *category != WLAN_CATEGORY_HT && + *category != WLAN_CATEGORY_WNM_UNPROTECTED && + *category != WLAN_CATEGORY_SELF_PROTECTED && + *category != WLAN_CATEGORY_UNPROT_DMG && + *category != WLAN_CATEGORY_VHT && + *category != WLAN_CATEGORY_VENDOR_SPECIFIC; + } + + return false; +} + +/** + * ieee80211_is_robust_mgmt_frame - check if skb contains a robust mgmt frame + * @skb: the skb containing the frame, length will be checked + */ +static inline bool ieee80211_is_robust_mgmt_frame(struct sk_buff *skb) +{ + if (skb->len < IEEE80211_MIN_ACTION_SIZE) + return false; + return _ieee80211_is_robust_mgmt_frame((void *)skb->data); +} + +/** + * ieee80211_is_public_action - check if frame is a public action frame + * @hdr: the frame + * @len: length of the frame + */ +static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr, + size_t len) +{ + struct ieee80211_mgmt *mgmt = (void *)hdr; + + if (len < IEEE80211_MIN_ACTION_SIZE) + return false; + if (!ieee80211_is_action(hdr->frame_control)) + return false; + return mgmt->u.action.category == WLAN_CATEGORY_PUBLIC; +} + +/** + * _ieee80211_is_group_privacy_action - check if frame is a group addressed + * privacy action frame + * @hdr: the frame + */ +static inline bool _ieee80211_is_group_privacy_action(struct ieee80211_hdr *hdr) +{ + struct ieee80211_mgmt *mgmt = (void *)hdr; + + if (!ieee80211_is_action(hdr->frame_control) || + !is_multicast_ether_addr(hdr->addr1)) + return false; + + return mgmt->u.action.category == WLAN_CATEGORY_MESH_ACTION || + mgmt->u.action.category == WLAN_CATEGORY_MULTIHOP_ACTION; +} + +/** + * ieee80211_is_group_privacy_action - check if frame is a group addressed + * privacy action frame + * @skb: the skb containing the frame, length will be checked + */ +static inline bool ieee80211_is_group_privacy_action(struct sk_buff *skb) +{ + if (skb->len < IEEE80211_MIN_ACTION_SIZE) + return false; + return _ieee80211_is_group_privacy_action((void *)skb->data); +} + +/** + * ieee80211_tu_to_usec - convert time units (TU) to microseconds + * @tu: the TUs + */ +static inline unsigned long ieee80211_tu_to_usec(unsigned long tu) +{ + return 1024 * tu; +} + +/** + * ieee80211_check_tim - check if AID bit is set in TIM + * @tim: the TIM IE + * @tim_len: length of the TIM IE + * @aid: the AID to look for + */ +static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim, + u8 tim_len, u16 aid) +{ + u8 mask; + u8 index, indexn1, indexn2; + + if (unlikely(!tim || tim_len < sizeof(*tim))) + return false; + + aid &= 0x3fff; + index = aid / 8; + mask = 1 << (aid & 7); + + indexn1 = tim->bitmap_ctrl & 0xfe; + indexn2 = tim_len + indexn1 - 4; + + if (index < indexn1 || index > indexn2) + return false; + + index -= indexn1; + + return !!(tim->virtual_map[index] & mask); +} + +/** + * ieee80211_get_tdls_action - get tdls packet action (or -1, if not tdls packet) + * @skb: the skb containing the frame, length will not be checked + * @hdr_size: the size of the ieee80211_hdr that starts at skb->data + * + * This function assumes the frame is a data frame, and that the network header + * is in the correct place. + */ +static inline int ieee80211_get_tdls_action(struct sk_buff *skb, u32 hdr_size) +{ + if (!skb_is_nonlinear(skb) && + skb->len > (skb_network_offset(skb) + 2)) { + /* Point to where the indication of TDLS should start */ + const u8 *tdls_data = skb_network_header(skb) - 2; + + if (get_unaligned_be16(tdls_data) == ETH_P_TDLS && + tdls_data[2] == WLAN_TDLS_SNAP_RFTYPE && + tdls_data[3] == WLAN_CATEGORY_TDLS) + return tdls_data[4]; + } + + return -1; +} + +/* convert time units */ +#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024)) +#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x)) + +/** + * ieee80211_action_contains_tpc - checks if the frame contains TPC element + * @skb: the skb containing the frame, length will be checked + * + * This function checks if it's either TPC report action frame or Link + * Measurement report action frame as defined in IEEE Std. 802.11-2012 8.5.2.5 + * and 8.5.7.5 accordingly. + */ +static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt = (void *)skb->data; + + if (!ieee80211_is_action(mgmt->frame_control)) + return false; + + if (skb->len < IEEE80211_MIN_ACTION_SIZE + + sizeof(mgmt->u.action.u.tpc_report)) + return false; + + /* + * TPC report - check that: + * category = 0 (Spectrum Management) or 5 (Radio Measurement) + * spectrum management action = 3 (TPC/Link Measurement report) + * TPC report EID = 35 + * TPC report element length = 2 + * + * The spectrum management's tpc_report struct is used here both for + * parsing tpc_report and radio measurement's link measurement report + * frame, since the relevant part is identical in both frames. + */ + if (mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT && + mgmt->u.action.category != WLAN_CATEGORY_RADIO_MEASUREMENT) + return false; + + /* both spectrum mgmt and link measurement have same action code */ + if (mgmt->u.action.u.tpc_report.action_code != + WLAN_ACTION_SPCT_TPC_RPRT) + return false; + + if (mgmt->u.action.u.tpc_report.tpc_elem_id != WLAN_EID_TPC_REPORT || + mgmt->u.action.u.tpc_report.tpc_elem_length != + sizeof(struct ieee80211_tpc_report_ie)) + return false; + + return true; +} + +struct element { + u8 id; + u8 datalen; + u8 data[]; +} __packed; + +/* element iteration helpers */ +#define for_each_element(_elem, _data, _datalen) \ + for (_elem = (const struct element *)(_data); \ + (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >= \ + (int)sizeof(*_elem) && \ + (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >= \ + (int)sizeof(*_elem) + _elem->datalen; \ + _elem = (const struct element *)(_elem->data + _elem->datalen)) + +#define for_each_element_id(element, _id, data, datalen) \ + for_each_element(element, data, datalen) \ + if (element->id == (_id)) + +#define for_each_element_extid(element, extid, data, datalen) \ + for_each_element(element, data, datalen) \ + if (element->id == WLAN_EID_EXTENSION && \ + element->datalen > 0 && \ + element->data[0] == (extid)) + +#define for_each_subelement(sub, element) \ + for_each_element(sub, (element)->data, (element)->datalen) + +#define for_each_subelement_id(sub, id, element) \ + for_each_element_id(sub, id, (element)->data, (element)->datalen) + +#define for_each_subelement_extid(sub, extid, element) \ + for_each_element_extid(sub, extid, (element)->data, (element)->datalen) + +/** + * for_each_element_completed - determine if element parsing consumed all data + * @element: element pointer after for_each_element() or friends + * @data: same data pointer as passed to for_each_element() or friends + * @datalen: same data length as passed to for_each_element() or friends + * + * This function returns %true if all the data was parsed or considered + * while walking the elements. Only use this if your for_each_element() + * loop cannot be broken out of, otherwise it always returns %false. + * + * If some data was malformed, this returns %false since the last parsed + * element will not fill the whole remaining data. + */ +static inline bool for_each_element_completed(const struct element *element, + const void *data, size_t datalen) +{ + return (const u8 *)element == (const u8 *)data + datalen; +} + +#endif /* LINUX_IEEE80211_H */ diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h new file mode 100644 index 000000000..ddb890174 --- /dev/null +++ b/include/linux/ieee802154.h @@ -0,0 +1,363 @@ +/* + * IEEE802.15.4-2003 specification + * + * Copyright (C) 2007, 2008 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Written by: + * Pavel Smolenskiy + * Maxim Gorbachyov + * Maxim Osipov + * Dmitry Eremin-Solenikov + * Alexander Smirnov + */ + +#ifndef LINUX_IEEE802154_H +#define LINUX_IEEE802154_H + +#include +#include + +#define IEEE802154_MTU 127 +#define IEEE802154_ACK_PSDU_LEN 5 +#define IEEE802154_MIN_PSDU_LEN 9 +#define IEEE802154_FCS_LEN 2 +#define IEEE802154_MAX_AUTH_TAG_LEN 16 +#define IEEE802154_FC_LEN 2 +#define IEEE802154_SEQ_LEN 1 + +/* General MAC frame format: + * 2 bytes: Frame Control + * 1 byte: Sequence Number + * 20 bytes: Addressing fields + * 14 bytes: Auxiliary Security Header + */ +#define IEEE802154_MAX_HEADER_LEN (2 + 1 + 20 + 14) +#define IEEE802154_MIN_HEADER_LEN (IEEE802154_ACK_PSDU_LEN - \ + IEEE802154_FCS_LEN) + +#define IEEE802154_PAN_ID_BROADCAST 0xffff +#define IEEE802154_ADDR_SHORT_BROADCAST 0xffff +#define IEEE802154_ADDR_SHORT_UNSPEC 0xfffe + +#define IEEE802154_EXTENDED_ADDR_LEN 8 +#define IEEE802154_SHORT_ADDR_LEN 2 +#define IEEE802154_PAN_ID_LEN 2 + +#define IEEE802154_LIFS_PERIOD 40 +#define IEEE802154_SIFS_PERIOD 12 +#define IEEE802154_MAX_SIFS_FRAME_SIZE 18 + +#define IEEE802154_MAX_CHANNEL 26 +#define IEEE802154_MAX_PAGE 31 + +#define IEEE802154_FC_TYPE_BEACON 0x0 /* Frame is beacon */ +#define IEEE802154_FC_TYPE_DATA 0x1 /* Frame is data */ +#define IEEE802154_FC_TYPE_ACK 0x2 /* Frame is acknowledgment */ +#define IEEE802154_FC_TYPE_MAC_CMD 0x3 /* Frame is MAC command */ + +#define IEEE802154_FC_TYPE_SHIFT 0 +#define IEEE802154_FC_TYPE_MASK ((1 << 3) - 1) +#define IEEE802154_FC_TYPE(x) ((x & IEEE802154_FC_TYPE_MASK) >> IEEE802154_FC_TYPE_SHIFT) +#define IEEE802154_FC_SET_TYPE(v, x) do { \ + v = (((v) & ~IEEE802154_FC_TYPE_MASK) | \ + (((x) << IEEE802154_FC_TYPE_SHIFT) & IEEE802154_FC_TYPE_MASK)); \ + } while (0) + +#define IEEE802154_FC_SECEN_SHIFT 3 +#define IEEE802154_FC_SECEN (1 << IEEE802154_FC_SECEN_SHIFT) +#define IEEE802154_FC_FRPEND_SHIFT 4 +#define IEEE802154_FC_FRPEND (1 << IEEE802154_FC_FRPEND_SHIFT) +#define IEEE802154_FC_ACK_REQ_SHIFT 5 +#define IEEE802154_FC_ACK_REQ (1 << IEEE802154_FC_ACK_REQ_SHIFT) +#define IEEE802154_FC_INTRA_PAN_SHIFT 6 +#define IEEE802154_FC_INTRA_PAN (1 << IEEE802154_FC_INTRA_PAN_SHIFT) + +#define IEEE802154_FC_SAMODE_SHIFT 14 +#define IEEE802154_FC_SAMODE_MASK (3 << IEEE802154_FC_SAMODE_SHIFT) +#define IEEE802154_FC_DAMODE_SHIFT 10 +#define IEEE802154_FC_DAMODE_MASK (3 << IEEE802154_FC_DAMODE_SHIFT) + +#define IEEE802154_FC_VERSION_SHIFT 12 +#define IEEE802154_FC_VERSION_MASK (3 << IEEE802154_FC_VERSION_SHIFT) +#define IEEE802154_FC_VERSION(x) ((x & IEEE802154_FC_VERSION_MASK) >> IEEE802154_FC_VERSION_SHIFT) + +#define IEEE802154_FC_SAMODE(x) \ + (((x) & IEEE802154_FC_SAMODE_MASK) >> IEEE802154_FC_SAMODE_SHIFT) + +#define IEEE802154_FC_DAMODE(x) \ + (((x) & IEEE802154_FC_DAMODE_MASK) >> IEEE802154_FC_DAMODE_SHIFT) + +#define IEEE802154_SCF_SECLEVEL_MASK 7 +#define IEEE802154_SCF_SECLEVEL_SHIFT 0 +#define IEEE802154_SCF_SECLEVEL(x) (x & IEEE802154_SCF_SECLEVEL_MASK) +#define IEEE802154_SCF_KEY_ID_MODE_SHIFT 3 +#define IEEE802154_SCF_KEY_ID_MODE_MASK (3 << IEEE802154_SCF_KEY_ID_MODE_SHIFT) +#define IEEE802154_SCF_KEY_ID_MODE(x) \ + ((x & IEEE802154_SCF_KEY_ID_MODE_MASK) >> IEEE802154_SCF_KEY_ID_MODE_SHIFT) + +#define IEEE802154_SCF_KEY_IMPLICIT 0 +#define IEEE802154_SCF_KEY_INDEX 1 +#define IEEE802154_SCF_KEY_SHORT_INDEX 2 +#define IEEE802154_SCF_KEY_HW_INDEX 3 + +#define IEEE802154_SCF_SECLEVEL_NONE 0 +#define IEEE802154_SCF_SECLEVEL_MIC32 1 +#define IEEE802154_SCF_SECLEVEL_MIC64 2 +#define IEEE802154_SCF_SECLEVEL_MIC128 3 +#define IEEE802154_SCF_SECLEVEL_ENC 4 +#define IEEE802154_SCF_SECLEVEL_ENC_MIC32 5 +#define IEEE802154_SCF_SECLEVEL_ENC_MIC64 6 +#define IEEE802154_SCF_SECLEVEL_ENC_MIC128 7 + +/* MAC footer size */ +#define IEEE802154_MFR_SIZE 2 /* 2 octets */ + +/* MAC's Command Frames Identifiers */ +#define IEEE802154_CMD_ASSOCIATION_REQ 0x01 +#define IEEE802154_CMD_ASSOCIATION_RESP 0x02 +#define IEEE802154_CMD_DISASSOCIATION_NOTIFY 0x03 +#define IEEE802154_CMD_DATA_REQ 0x04 +#define IEEE802154_CMD_PANID_CONFLICT_NOTIFY 0x05 +#define IEEE802154_CMD_ORPHAN_NOTIFY 0x06 +#define IEEE802154_CMD_BEACON_REQ 0x07 +#define IEEE802154_CMD_COORD_REALIGN_NOTIFY 0x08 +#define IEEE802154_CMD_GTS_REQ 0x09 + +/* + * The return values of MAC operations + */ +enum { + /* + * The requested operation was completed successfully. + * For a transmission request, this value indicates + * a successful transmission. + */ + IEEE802154_SUCCESS = 0x0, + + /* The beacon was lost following a synchronization request. */ + IEEE802154_BEACON_LOSS = 0xe0, + /* + * A transmission could not take place due to activity on the + * channel, i.e., the CSMA-CA mechanism has failed. + */ + IEEE802154_CHNL_ACCESS_FAIL = 0xe1, + /* The GTS request has been denied by the PAN coordinator. */ + IEEE802154_DENINED = 0xe2, + /* The attempt to disable the transceiver has failed. */ + IEEE802154_DISABLE_TRX_FAIL = 0xe3, + /* + * The received frame induces a failed security check according to + * the security suite. + */ + IEEE802154_FAILED_SECURITY_CHECK = 0xe4, + /* + * The frame resulting from secure processing has a length that is + * greater than aMACMaxFrameSize. + */ + IEEE802154_FRAME_TOO_LONG = 0xe5, + /* + * The requested GTS transmission failed because the specified GTS + * either did not have a transmit GTS direction or was not defined. + */ + IEEE802154_INVALID_GTS = 0xe6, + /* + * A request to purge an MSDU from the transaction queue was made using + * an MSDU handle that was not found in the transaction table. + */ + IEEE802154_INVALID_HANDLE = 0xe7, + /* A parameter in the primitive is out of the valid range.*/ + IEEE802154_INVALID_PARAMETER = 0xe8, + /* No acknowledgment was received after aMaxFrameRetries. */ + IEEE802154_NO_ACK = 0xe9, + /* A scan operation failed to find any network beacons.*/ + IEEE802154_NO_BEACON = 0xea, + /* No response data were available following a request. */ + IEEE802154_NO_DATA = 0xeb, + /* The operation failed because a short address was not allocated. */ + IEEE802154_NO_SHORT_ADDRESS = 0xec, + /* + * A receiver enable request was unsuccessful because it could not be + * completed within the CAP. + */ + IEEE802154_OUT_OF_CAP = 0xed, + /* + * A PAN identifier conflict has been detected and communicated to the + * PAN coordinator. + */ + IEEE802154_PANID_CONFLICT = 0xee, + /* A coordinator realignment command has been received. */ + IEEE802154_REALIGMENT = 0xef, + /* The transaction has expired and its information discarded. */ + IEEE802154_TRANSACTION_EXPIRED = 0xf0, + /* There is no capacity to store the transaction. */ + IEEE802154_TRANSACTION_OVERFLOW = 0xf1, + /* + * The transceiver was in the transmitter enabled state when the + * receiver was requested to be enabled. + */ + IEEE802154_TX_ACTIVE = 0xf2, + /* The appropriate key is not available in the ACL. */ + IEEE802154_UNAVAILABLE_KEY = 0xf3, + /* + * A SET/GET request was issued with the identifier of a PIB attribute + * that is not supported. + */ + IEEE802154_UNSUPPORTED_ATTR = 0xf4, + /* + * A request to perform a scan operation failed because the MLME was + * in the process of performing a previously initiated scan operation. + */ + IEEE802154_SCAN_IN_PROGRESS = 0xfc, +}; + +/* frame control handling */ +#define IEEE802154_FCTL_FTYPE 0x0003 +#define IEEE802154_FCTL_ACKREQ 0x0020 +#define IEEE802154_FCTL_SECEN 0x0004 +#define IEEE802154_FCTL_INTRA_PAN 0x0040 +#define IEEE802154_FCTL_DADDR 0x0c00 +#define IEEE802154_FCTL_SADDR 0xc000 + +#define IEEE802154_FTYPE_DATA 0x0001 + +#define IEEE802154_FCTL_ADDR_NONE 0x0000 +#define IEEE802154_FCTL_DADDR_SHORT 0x0800 +#define IEEE802154_FCTL_DADDR_EXTENDED 0x0c00 +#define IEEE802154_FCTL_SADDR_SHORT 0x8000 +#define IEEE802154_FCTL_SADDR_EXTENDED 0xc000 + +/* + * ieee802154_is_data - check if type is IEEE802154_FTYPE_DATA + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee802154_is_data(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE802154_FCTL_FTYPE)) == + cpu_to_le16(IEEE802154_FTYPE_DATA); +} + +/** + * ieee802154_is_secen - check if Security bit is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee802154_is_secen(__le16 fc) +{ + return fc & cpu_to_le16(IEEE802154_FCTL_SECEN); +} + +/** + * ieee802154_is_ackreq - check if acknowledgment request bit is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee802154_is_ackreq(__le16 fc) +{ + return fc & cpu_to_le16(IEEE802154_FCTL_ACKREQ); +} + +/** + * ieee802154_is_intra_pan - check if intra pan id communication + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee802154_is_intra_pan(__le16 fc) +{ + return fc & cpu_to_le16(IEEE802154_FCTL_INTRA_PAN); +} + +/* + * ieee802154_daddr_mode - get daddr mode from fc + * @fc: frame control bytes in little-endian byteorder + */ +static inline __le16 ieee802154_daddr_mode(__le16 fc) +{ + return fc & cpu_to_le16(IEEE802154_FCTL_DADDR); +} + +/* + * ieee802154_saddr_mode - get saddr mode from fc + * @fc: frame control bytes in little-endian byteorder + */ +static inline __le16 ieee802154_saddr_mode(__le16 fc) +{ + return fc & cpu_to_le16(IEEE802154_FCTL_SADDR); +} + +/** + * ieee802154_is_valid_psdu_len - check if psdu len is valid + * available lengths: + * 0-4 Reserved + * 5 MPDU (Acknowledgment) + * 6-8 Reserved + * 9-127 MPDU + * + * @len: psdu len with (MHR + payload + MFR) + */ +static inline bool ieee802154_is_valid_psdu_len(u8 len) +{ + return (len == IEEE802154_ACK_PSDU_LEN || + (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU)); +} + +/** + * ieee802154_is_valid_extended_unicast_addr - check if extended addr is valid + * @addr: extended addr to check + */ +static inline bool ieee802154_is_valid_extended_unicast_addr(__le64 addr) +{ + /* Bail out if the address is all zero, or if the group + * address bit is set. + */ + return ((addr != cpu_to_le64(0x0000000000000000ULL)) && + !(addr & cpu_to_le64(0x0100000000000000ULL))); +} + +/** + * ieee802154_is_broadcast_short_addr - check if short addr is broadcast + * @addr: short addr to check + */ +static inline bool ieee802154_is_broadcast_short_addr(__le16 addr) +{ + return (addr == cpu_to_le16(IEEE802154_ADDR_SHORT_BROADCAST)); +} + +/** + * ieee802154_is_unspec_short_addr - check if short addr is unspecified + * @addr: short addr to check + */ +static inline bool ieee802154_is_unspec_short_addr(__le16 addr) +{ + return (addr == cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC)); +} + +/** + * ieee802154_is_valid_src_short_addr - check if source short address is valid + * @addr: short addr to check + */ +static inline bool ieee802154_is_valid_src_short_addr(__le16 addr) +{ + return !(ieee802154_is_broadcast_short_addr(addr) || + ieee802154_is_unspec_short_addr(addr)); +} + +/** + * ieee802154_random_extended_addr - generates a random extended address + * @addr: extended addr pointer to place the random address + */ +static inline void ieee802154_random_extended_addr(__le64 *addr) +{ + get_random_bytes(addr, IEEE802154_EXTENDED_ADDR_LEN); + + /* clear the group bit, and set the locally administered bit */ + ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] &= ~0x01; + ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] |= 0x02; +} + +#endif /* LINUX_IEEE802154_H */ diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h new file mode 100644 index 000000000..c697a0524 --- /dev/null +++ b/include/linux/if_arp.h @@ -0,0 +1,65 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Global definitions for the ARP (RFC 826) protocol. + * + * Version: @(#)if_arp.h 1.0.1 04/16/93 + * + * Authors: Original taken from Berkeley UNIX 4.3, (c) UCB 1986-1988 + * Portions taken from the KA9Q/NOS (v2.00m PA0GRI) source. + * Ross Biro + * Fred N. van Kempen, + * Florian La Roche, + * Jonathan Layes + * Arnaldo Carvalho de Melo ARPHRD_HWX25 + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_IF_ARP_H +#define _LINUX_IF_ARP_H + +#include +#include + +static inline struct arphdr *arp_hdr(const struct sk_buff *skb) +{ + return (struct arphdr *)skb_network_header(skb); +} + +static inline unsigned int arp_hdr_len(const struct net_device *dev) +{ + switch (dev->type) { +#if IS_ENABLED(CONFIG_FIREWIRE_NET) + case ARPHRD_IEEE1394: + /* ARP header, device address and 2 IP addresses */ + return sizeof(struct arphdr) + dev->addr_len + sizeof(u32) * 2; +#endif + default: + /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ + return sizeof(struct arphdr) + (dev->addr_len + sizeof(u32)) * 2; + } +} + +static inline bool dev_is_mac_header_xmit(const struct net_device *dev) +{ + switch (dev->type) { + case ARPHRD_TUNNEL: + case ARPHRD_TUNNEL6: + case ARPHRD_SIT: + case ARPHRD_IPGRE: + case ARPHRD_VOID: + case ARPHRD_NONE: + case ARPHRD_RAWIP: + case ARPHRD_PIMREG: + return false; + default: + return true; + } +} + +#endif /* _LINUX_IF_ARP_H */ diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h new file mode 100644 index 000000000..c20c7e197 --- /dev/null +++ b/include/linux/if_bridge.h @@ -0,0 +1,132 @@ +/* + * Linux ethernet bridge + * + * Authors: + * Lennert Buytenhek + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_IF_BRIDGE_H +#define _LINUX_IF_BRIDGE_H + + +#include +#include +#include + +struct br_ip { + union { + __be32 ip4; +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr ip6; +#endif + } u; + __be16 proto; + __u16 vid; +}; + +struct br_ip_list { + struct list_head list; + struct br_ip addr; +}; + +#define BR_HAIRPIN_MODE BIT(0) +#define BR_BPDU_GUARD BIT(1) +#define BR_ROOT_BLOCK BIT(2) +#define BR_MULTICAST_FAST_LEAVE BIT(3) +#define BR_ADMIN_COST BIT(4) +#define BR_LEARNING BIT(5) +#define BR_FLOOD BIT(6) +#define BR_AUTO_MASK (BR_FLOOD | BR_LEARNING) +#define BR_PROMISC BIT(7) +#define BR_PROXYARP BIT(8) +#define BR_LEARNING_SYNC BIT(9) +#define BR_PROXYARP_WIFI BIT(10) +#define BR_MCAST_FLOOD BIT(11) +#define BR_MULTICAST_TO_UNICAST BIT(12) +#define BR_VLAN_TUNNEL BIT(13) +#define BR_BCAST_FLOOD BIT(14) +#define BR_NEIGH_SUPPRESS BIT(15) +#define BR_ISOLATED BIT(16) + +#define BR_DEFAULT_AGEING_TIME (300 * HZ) + +extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); + +typedef int br_should_route_hook_t(struct sk_buff *skb); +extern br_should_route_hook_t __rcu *br_should_route_hook; + +#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING) +int br_multicast_list_adjacent(struct net_device *dev, + struct list_head *br_ip_list); +bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto); +bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto); +bool br_multicast_enabled(const struct net_device *dev); +bool br_multicast_router(const struct net_device *dev); +#else +static inline int br_multicast_list_adjacent(struct net_device *dev, + struct list_head *br_ip_list) +{ + return 0; +} +static inline bool br_multicast_has_querier_anywhere(struct net_device *dev, + int proto) +{ + return false; +} +static inline bool br_multicast_has_querier_adjacent(struct net_device *dev, + int proto) +{ + return false; +} +static inline bool br_multicast_enabled(const struct net_device *dev) +{ + return false; +} +static inline bool br_multicast_router(const struct net_device *dev) +{ + return false; +} +#endif + +#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING) +bool br_vlan_enabled(const struct net_device *dev); +int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid); +int br_vlan_get_info(const struct net_device *dev, u16 vid, + struct bridge_vlan_info *p_vinfo); +#else +static inline bool br_vlan_enabled(const struct net_device *dev) +{ + return false; +} + +static inline int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid) +{ + return -EINVAL; +} + +static inline int br_vlan_get_info(const struct net_device *dev, u16 vid, + struct bridge_vlan_info *p_vinfo) +{ + return -EINVAL; +} +#endif + +#if IS_ENABLED(CONFIG_BRIDGE) +struct net_device *br_fdb_find_port(const struct net_device *br_dev, + const unsigned char *addr, + __u16 vid); +#else +static inline struct net_device * +br_fdb_find_port(const struct net_device *br_dev, + const unsigned char *addr, + __u16 vid) +{ + return NULL; +} +#endif + +#endif diff --git a/include/linux/if_eql.h b/include/linux/if_eql.h new file mode 100644 index 000000000..d984694c3 --- /dev/null +++ b/include/linux/if_eql.h @@ -0,0 +1,49 @@ +/* + * Equalizer Load-balancer for serial network interfaces. + * + * (c) Copyright 1995 Simon "Guru Aleph-Null" Janes + * NCM: Network and Communications Management, Inc. + * + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + * The author may be reached as simon@ncm.com, or C/O + * NCM + * Attn: Simon Janes + * 6803 Whittier Ave + * McLean VA 22101 + * Phone: 1-703-847-0040 ext 103 + */ +#ifndef _LINUX_IF_EQL_H +#define _LINUX_IF_EQL_H + + +#include +#include +#include + +typedef struct slave { + struct list_head list; + struct net_device *dev; + long priority; + long priority_bps; + long priority_Bps; + long bytes_queued; +} slave_t; + +typedef struct slave_queue { + spinlock_t lock; + struct list_head all_slaves; + int num_slaves; + struct net_device *master_dev; +} slave_queue_t; + +typedef struct equalizer { + slave_queue_t queue; + int min_slaves; + int max_slaves; + struct timer_list timer; +} equalizer_t; + +#endif /* _LINUX_EQL_H */ diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h new file mode 100644 index 000000000..d433f5e29 --- /dev/null +++ b/include/linux/if_ether.h @@ -0,0 +1,48 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Global definitions for the Ethernet IEEE 802.3 interface. + * + * Version: @(#)if_ether.h 1.0.1a 02/08/94 + * + * Author: Fred N. van Kempen, + * Donald Becker, + * Alan Cox, + * Steve Whitehouse, + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_IF_ETHER_H +#define _LINUX_IF_ETHER_H + +#include +#include + +static inline struct ethhdr *eth_hdr(const struct sk_buff *skb) +{ + return (struct ethhdr *)skb_mac_header(skb); +} + +/* Prefer this version in TX path, instead of + * skb_reset_mac_header() + eth_hdr() + */ +static inline struct ethhdr *skb_eth_hdr(const struct sk_buff *skb) +{ + return (struct ethhdr *)skb->data; +} + +static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb) +{ + return (struct ethhdr *)skb_inner_mac_header(skb); +} + +int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); + +extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len); + +#endif /* _LINUX_IF_ETHER_H */ diff --git a/include/linux/if_fddi.h b/include/linux/if_fddi.h new file mode 100644 index 000000000..f5550b3ee --- /dev/null +++ b/include/linux/if_fddi.h @@ -0,0 +1,121 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Global definitions for the ANSI FDDI interface. + * + * Version: @(#)if_fddi.h 1.0.2 Sep 29 2004 + * + * Author: Lawrence V. Stefani, + * + * if_fddi.h is based on previous if_ether.h and if_tr.h work by + * Fred N. van Kempen, + * Donald Becker, + * Alan Cox, + * Steve Whitehouse, + * Peter De Schrijver, + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_IF_FDDI_H +#define _LINUX_IF_FDDI_H + +#include +#include + +/* Define FDDI statistics structure */ +struct fddi_statistics { + + /* Generic statistics. */ + + struct net_device_stats gen; + + /* Detailed FDDI statistics. Adopted from RFC 1512 */ + + __u8 smt_station_id[8]; + __u32 smt_op_version_id; + __u32 smt_hi_version_id; + __u32 smt_lo_version_id; + __u8 smt_user_data[32]; + __u32 smt_mib_version_id; + __u32 smt_mac_cts; + __u32 smt_non_master_cts; + __u32 smt_master_cts; + __u32 smt_available_paths; + __u32 smt_config_capabilities; + __u32 smt_config_policy; + __u32 smt_connection_policy; + __u32 smt_t_notify; + __u32 smt_stat_rpt_policy; + __u32 smt_trace_max_expiration; + __u32 smt_bypass_present; + __u32 smt_ecm_state; + __u32 smt_cf_state; + __u32 smt_remote_disconnect_flag; + __u32 smt_station_status; + __u32 smt_peer_wrap_flag; + __u32 smt_time_stamp; + __u32 smt_transition_time_stamp; + __u32 mac_frame_status_functions; + __u32 mac_t_max_capability; + __u32 mac_tvx_capability; + __u32 mac_available_paths; + __u32 mac_current_path; + __u8 mac_upstream_nbr[FDDI_K_ALEN]; + __u8 mac_downstream_nbr[FDDI_K_ALEN]; + __u8 mac_old_upstream_nbr[FDDI_K_ALEN]; + __u8 mac_old_downstream_nbr[FDDI_K_ALEN]; + __u32 mac_dup_address_test; + __u32 mac_requested_paths; + __u32 mac_downstream_port_type; + __u8 mac_smt_address[FDDI_K_ALEN]; + __u32 mac_t_req; + __u32 mac_t_neg; + __u32 mac_t_max; + __u32 mac_tvx_value; + __u32 mac_frame_cts; + __u32 mac_copied_cts; + __u32 mac_transmit_cts; + __u32 mac_error_cts; + __u32 mac_lost_cts; + __u32 mac_frame_error_threshold; + __u32 mac_frame_error_ratio; + __u32 mac_rmt_state; + __u32 mac_da_flag; + __u32 mac_una_da_flag; + __u32 mac_frame_error_flag; + __u32 mac_ma_unitdata_available; + __u32 mac_hardware_present; + __u32 mac_ma_unitdata_enable; + __u32 path_tvx_lower_bound; + __u32 path_t_max_lower_bound; + __u32 path_max_t_req; + __u32 path_configuration[8]; + __u32 port_my_type[2]; + __u32 port_neighbor_type[2]; + __u32 port_connection_policies[2]; + __u32 port_mac_indicated[2]; + __u32 port_current_path[2]; + __u8 port_requested_paths[3*2]; + __u32 port_mac_placement[2]; + __u32 port_available_paths[2]; + __u32 port_pmd_class[2]; + __u32 port_connection_capabilities[2]; + __u32 port_bs_flag[2]; + __u32 port_lct_fail_cts[2]; + __u32 port_ler_estimate[2]; + __u32 port_lem_reject_cts[2]; + __u32 port_lem_cts[2]; + __u32 port_ler_cutoff[2]; + __u32 port_ler_alarm[2]; + __u32 port_connect_state[2]; + __u32 port_pcm_state[2]; + __u32 port_pc_withhold[2]; + __u32 port_ler_flag[2]; + __u32 port_hardware_present[2]; +}; +#endif /* _LINUX_IF_FDDI_H */ diff --git a/include/linux/if_frad.h b/include/linux/if_frad.h new file mode 100644 index 000000000..82a1b4e93 --- /dev/null +++ b/include/linux/if_frad.h @@ -0,0 +1,96 @@ +/* + * DLCI/FRAD Definitions for Frame Relay Access Devices. DLCI devices are + * created for each DLCI associated with a FRAD. The FRAD driver + * is not truly a network device, but the lower level device + * handler. This allows other FRAD manufacturers to use the DLCI + * code, including its RFC1490 encapsulation alongside the current + * implementation for the Sangoma cards. + * + * Version: @(#)if_ifrad.h 0.15 31 Mar 96 + * + * Author: Mike McLagan + * + * Changes: + * 0.15 Mike McLagan changed structure defs (packed) + * re-arranged flags + * added DLCI_RET vars + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _FRAD_H_ +#define _FRAD_H_ + +#include + + +#if defined(CONFIG_DLCI) || defined(CONFIG_DLCI_MODULE) + +/* these are the fields of an RFC 1490 header */ +struct frhdr +{ + unsigned char control; + + /* for IP packets, this can be the NLPID */ + unsigned char pad; + + unsigned char NLPID; + unsigned char OUI[3]; + __be16 PID; + +#define IP_NLPID pad +} __packed; + +/* see RFC 1490 for the definition of the following */ +#define FRAD_I_UI 0x03 + +#define FRAD_P_PADDING 0x00 +#define FRAD_P_Q933 0x08 +#define FRAD_P_SNAP 0x80 +#define FRAD_P_CLNP 0x81 +#define FRAD_P_IP 0xCC + +struct dlci_local +{ + struct net_device *master; + struct net_device *slave; + struct dlci_conf config; + int configured; + struct list_head list; + + /* callback function */ + void (*receive)(struct sk_buff *skb, struct net_device *); +}; + +struct frad_local +{ + /* devices which this FRAD is slaved to */ + struct net_device *master[CONFIG_DLCI_MAX]; + short dlci[CONFIG_DLCI_MAX]; + + struct frad_conf config; + int configured; /* has this device been configured */ + int initialized; /* mem_start, port, irq set ? */ + + /* callback functions */ + int (*activate)(struct net_device *, struct net_device *); + int (*deactivate)(struct net_device *, struct net_device *); + int (*assoc)(struct net_device *, struct net_device *); + int (*deassoc)(struct net_device *, struct net_device *); + int (*dlci_conf)(struct net_device *, struct net_device *, int get); + + /* fields that are used by the Sangoma SDLA cards */ + struct timer_list timer; + struct net_device *dev; + int type; /* adapter type */ + int state; /* state of the S502/8 control latch */ + int buffer; /* current buffer for S508 firmware */ +}; + +#endif /* CONFIG_DLCI || CONFIG_DLCI_MODULE */ + +extern void dlci_ioctl_set(int (*hook)(unsigned int, void __user *)); + +#endif diff --git a/include/linux/if_link.h b/include/linux/if_link.h new file mode 100644 index 000000000..622658dfb --- /dev/null +++ b/include/linux/if_link.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_IF_LINK_H +#define _LINUX_IF_LINK_H + +#include + + +/* We don't want this structure exposed to user space */ +struct ifla_vf_stats { + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 broadcast; + __u64 multicast; + __u64 rx_dropped; + __u64 tx_dropped; +}; + +struct ifla_vf_info { + __u32 vf; + __u8 mac[32]; + __u32 vlan; + __u32 qos; + __u32 spoofchk; + __u32 linkstate; + __u32 min_tx_rate; + __u32 max_tx_rate; + __u32 rss_query_en; + __u32 trusted; + __be16 vlan_proto; +}; +#endif /* _LINUX_IF_LINK_H */ diff --git a/include/linux/if_ltalk.h b/include/linux/if_ltalk.h new file mode 100644 index 000000000..4cc1c0b77 --- /dev/null +++ b/include/linux/if_ltalk.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_LTALK_H +#define __LINUX_LTALK_H + +#include + +extern struct net_device *alloc_ltalkdev(int sizeof_priv); +#endif diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h new file mode 100644 index 000000000..1261559d7 --- /dev/null +++ b/include/linux/if_macvlan.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_IF_MACVLAN_H +#define _LINUX_IF_MACVLAN_H + +#include +#include +#include +#include +#include +#include +#include + +struct macvlan_port; + +#define MACVLAN_MC_FILTER_BITS 8 +#define MACVLAN_MC_FILTER_SZ (1 << MACVLAN_MC_FILTER_BITS) + +struct macvlan_dev { + struct net_device *dev; + struct list_head list; + struct hlist_node hlist; + struct macvlan_port *port; + struct net_device *lowerdev; + void *accel_priv; + struct vlan_pcpu_stats __percpu *pcpu_stats; + + DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ); + + netdev_features_t set_features; + enum macvlan_mode mode; + u16 flags; + int nest_level; + unsigned int macaddr_count; +#ifdef CONFIG_NET_POLL_CONTROLLER + struct netpoll *netpoll; +#endif +}; + +static inline void macvlan_count_rx(const struct macvlan_dev *vlan, + unsigned int len, bool success, + bool multicast) +{ + if (likely(success)) { + struct vlan_pcpu_stats *pcpu_stats; + + pcpu_stats = get_cpu_ptr(vlan->pcpu_stats); + u64_stats_update_begin(&pcpu_stats->syncp); + pcpu_stats->rx_packets++; + pcpu_stats->rx_bytes += len; + if (multicast) + pcpu_stats->rx_multicast++; + u64_stats_update_end(&pcpu_stats->syncp); + put_cpu_ptr(vlan->pcpu_stats); + } else { + this_cpu_inc(vlan->pcpu_stats->rx_errors); + } +} + +extern void macvlan_common_setup(struct net_device *dev); + +extern int macvlan_common_newlink(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack); + +extern void macvlan_dellink(struct net_device *dev, struct list_head *head); + +extern int macvlan_link_register(struct rtnl_link_ops *ops); + +#if IS_ENABLED(CONFIG_MACVLAN) +static inline struct net_device * +macvlan_dev_real_dev(const struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->lowerdev; +} +#else +static inline struct net_device * +macvlan_dev_real_dev(const struct net_device *dev) +{ + BUG(); + return NULL; +} +#endif + +static inline void *macvlan_accel_priv(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->accel_priv; +} + +static inline bool macvlan_supports_dest_filter(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->mode == MACVLAN_MODE_PRIVATE || + macvlan->mode == MACVLAN_MODE_VEPA || + macvlan->mode == MACVLAN_MODE_BRIDGE; +} + +static inline int macvlan_release_l2fw_offload(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + macvlan->accel_priv = NULL; + return dev_uc_add(macvlan->lowerdev, dev->dev_addr); +} +#endif /* _LINUX_IF_MACVLAN_H */ diff --git a/include/linux/if_phonet.h b/include/linux/if_phonet.h new file mode 100644 index 000000000..2d8486168 --- /dev/null +++ b/include/linux/if_phonet.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * File: if_phonet.h + * + * Phonet interface kernel definitions + * + * Copyright (C) 2008 Nokia Corporation. All rights reserved. + */ +#ifndef LINUX_IF_PHONET_H +#define LINUX_IF_PHONET_H + +#include + +extern const struct header_ops phonet_header_ops; +#endif diff --git a/include/linux/if_pppol2tp.h b/include/linux/if_pppol2tp.h new file mode 100644 index 000000000..0fb71e532 --- /dev/null +++ b/include/linux/if_pppol2tp.h @@ -0,0 +1,21 @@ +/*************************************************************************** + * Linux PPP over L2TP (PPPoL2TP) Socket Implementation (RFC 2661) + * + * This file supplies definitions required by the PPP over L2TP driver + * (l2tp_ppp.c). All version information wrt this file is located in l2tp_ppp.c + * + * License: + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ +#ifndef __LINUX_IF_PPPOL2TP_H +#define __LINUX_IF_PPPOL2TP_H + +#include +#include +#include + +#endif diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h new file mode 100644 index 000000000..24e9b360d --- /dev/null +++ b/include/linux/if_pppox.h @@ -0,0 +1,100 @@ +/*************************************************************************** + * Linux PPP over X - Generic PPP transport layer sockets + * Linux PPP over Ethernet (PPPoE) Socket Implementation (RFC 2516) + * + * This file supplies definitions required by the PPP over Ethernet driver + * (pppox.c). All version information wrt this file is located in pppox.c + * + * License: + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ +#ifndef __LINUX_IF_PPPOX_H +#define __LINUX_IF_PPPOX_H + +#include +#include +#include +#include +#include +#include + +static inline struct pppoe_hdr *pppoe_hdr(const struct sk_buff *skb) +{ + return (struct pppoe_hdr *)skb_network_header(skb); +} + +struct pppoe_opt { + struct net_device *dev; /* device associated with socket*/ + int ifindex; /* ifindex of device associated with socket */ + struct pppoe_addr pa; /* what this socket is bound to*/ + struct sockaddr_pppox relay; /* what socket data will be + relayed to (PPPoE relaying) */ + struct work_struct padt_work;/* Work item for handling PADT */ +}; + +struct pptp_opt { + struct pptp_addr src_addr; + struct pptp_addr dst_addr; + u32 ack_sent, ack_recv; + u32 seq_sent, seq_recv; + int ppp_flags; +}; +#include + +struct pppox_sock { + /* struct sock must be the first member of pppox_sock */ + struct sock sk; + struct ppp_channel chan; + struct pppox_sock *next; /* for hash table */ + union { + struct pppoe_opt pppoe; + struct pptp_opt pptp; + } proto; + __be16 num; +}; +#define pppoe_dev proto.pppoe.dev +#define pppoe_ifindex proto.pppoe.ifindex +#define pppoe_pa proto.pppoe.pa +#define pppoe_relay proto.pppoe.relay + +static inline struct pppox_sock *pppox_sk(struct sock *sk) +{ + return (struct pppox_sock *)sk; +} + +static inline struct sock *sk_pppox(struct pppox_sock *po) +{ + return (struct sock *)po; +} + +struct module; + +struct pppox_proto { + int (*create)(struct net *net, struct socket *sock, int kern); + int (*ioctl)(struct socket *sock, unsigned int cmd, + unsigned long arg); + struct module *owner; +}; + +extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp); +extern void unregister_pppox_proto(int proto_num); +extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */ +extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); +extern int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); + +#define PPPOEIOCSFWD32 _IOW(0xB1 ,0, compat_size_t) + +/* PPPoX socket states */ +enum { + PPPOX_NONE = 0, /* initial state */ + PPPOX_CONNECTED = 1, /* connection established ==TCP_ESTABLISHED */ + PPPOX_BOUND = 2, /* bound to ppp device */ + PPPOX_RELAY = 4, /* forwarding is enabled */ + PPPOX_DEAD = 16 /* dead, useless, please clean me up!*/ +}; + +#endif /* !(__LINUX_IF_PPPOX_H) */ diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h new file mode 100644 index 000000000..8e66866c1 --- /dev/null +++ b/include/linux/if_tap.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_IF_TAP_H_ +#define _LINUX_IF_TAP_H_ + +#if IS_ENABLED(CONFIG_TAP) +struct socket *tap_get_socket(struct file *); +struct ptr_ring *tap_get_ptr_ring(struct file *file); +#else +#include +#include +struct file; +struct socket; +static inline struct socket *tap_get_socket(struct file *f) +{ + return ERR_PTR(-EINVAL); +} +static inline struct ptr_ring *tap_get_ptr_ring(struct file *f) +{ + return ERR_PTR(-EINVAL); +} +#endif /* CONFIG_TAP */ + +#include +#include + +/* + * Maximum times a tap device can be opened. This can be used to + * configure the number of receive queue, e.g. for multiqueue virtio. + */ +#define MAX_TAP_QUEUES 256 + +struct tap_queue; + +struct tap_dev { + struct net_device *dev; + u16 flags; + /* This array tracks active taps. */ + struct tap_queue __rcu *taps[MAX_TAP_QUEUES]; + /* This list tracks all taps (both enabled and disabled) */ + struct list_head queue_list; + int numvtaps; + int numqueues; + netdev_features_t tap_features; + int minor; + + void (*update_features)(struct tap_dev *tap, netdev_features_t features); + void (*count_tx_dropped)(struct tap_dev *tap); + void (*count_rx_dropped)(struct tap_dev *tap); +}; + +/* + * A tap queue is the central object of tap module, it connects + * an open character device to virtual interface. There can be + * multiple queues on one interface, which map back to queues + * implemented in hardware on the underlying device. + * + * tap_proto is used to allocate queues through the sock allocation + * mechanism. + * + */ + +struct tap_queue { + struct sock sk; + struct socket sock; + struct socket_wq wq; + int vnet_hdr_sz; + struct tap_dev __rcu *tap; + struct file *file; + unsigned int flags; + u16 queue_index; + bool enabled; + struct list_head next; + struct ptr_ring ring; +}; + +rx_handler_result_t tap_handle_frame(struct sk_buff **pskb); +void tap_del_queues(struct tap_dev *tap); +int tap_get_minor(dev_t major, struct tap_dev *tap); +void tap_free_minor(dev_t major, struct tap_dev *tap); +int tap_queue_resize(struct tap_dev *tap); +int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major, + const char *device_name, struct module *module); +void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev); + +#endif /*_LINUX_IF_TAP_H_*/ diff --git a/include/linux/if_team.h b/include/linux/if_team.h new file mode 100644 index 000000000..ac42da56f --- /dev/null +++ b/include/linux/if_team.h @@ -0,0 +1,321 @@ +/* + * include/linux/if_team.h - Network team device driver header + * Copyright (c) 2011 Jiri Pirko + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef _LINUX_IF_TEAM_H_ +#define _LINUX_IF_TEAM_H_ + +#include +#include +#include +#include + +struct team_pcpu_stats { + u64 rx_packets; + u64 rx_bytes; + u64 rx_multicast; + u64 tx_packets; + u64 tx_bytes; + struct u64_stats_sync syncp; + u32 rx_dropped; + u32 tx_dropped; + u32 rx_nohandler; +}; + +struct team; + +struct team_port { + struct net_device *dev; + struct hlist_node hlist; /* node in enabled ports hash list */ + struct list_head list; /* node in ordinary list */ + struct team *team; + int index; /* index of enabled port. If disabled, it's set to -1 */ + + bool linkup; /* either state.linkup or user.linkup */ + + struct { + bool linkup; + u32 speed; + u8 duplex; + } state; + + /* Values set by userspace */ + struct { + bool linkup; + bool linkup_enabled; + } user; + + /* Custom gennetlink interface related flags */ + bool changed; + bool removed; + + /* + * A place for storing original values of the device before it + * become a port. + */ + struct { + unsigned char dev_addr[MAX_ADDR_LEN]; + unsigned int mtu; + } orig; + +#ifdef CONFIG_NET_POLL_CONTROLLER + struct netpoll *np; +#endif + + s32 priority; /* lower number ~ higher priority */ + u16 queue_id; + struct list_head qom_list; /* node in queue override mapping list */ + struct rcu_head rcu; + long mode_priv[0]; +}; + +static inline struct team_port *team_port_get_rcu(const struct net_device *dev) +{ + return rcu_dereference(dev->rx_handler_data); +} + +static inline bool team_port_enabled(struct team_port *port) +{ + return port->index != -1; +} + +static inline bool team_port_txable(struct team_port *port) +{ + return port->linkup && team_port_enabled(port); +} + +static inline bool team_port_dev_txable(const struct net_device *port_dev) +{ + struct team_port *port; + bool txable; + + rcu_read_lock(); + port = team_port_get_rcu(port_dev); + txable = port ? team_port_txable(port) : false; + rcu_read_unlock(); + + return txable; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static inline void team_netpoll_send_skb(struct team_port *port, + struct sk_buff *skb) +{ + struct netpoll *np = port->np; + + if (np) + netpoll_send_skb(np, skb); +} +#else +static inline void team_netpoll_send_skb(struct team_port *port, + struct sk_buff *skb) +{ +} +#endif + +struct team_mode_ops { + int (*init)(struct team *team); + void (*exit)(struct team *team); + rx_handler_result_t (*receive)(struct team *team, + struct team_port *port, + struct sk_buff *skb); + bool (*transmit)(struct team *team, struct sk_buff *skb); + int (*port_enter)(struct team *team, struct team_port *port); + void (*port_leave)(struct team *team, struct team_port *port); + void (*port_change_dev_addr)(struct team *team, struct team_port *port); + void (*port_enabled)(struct team *team, struct team_port *port); + void (*port_disabled)(struct team *team, struct team_port *port); +}; + +extern int team_modeop_port_enter(struct team *team, struct team_port *port); +extern void team_modeop_port_change_dev_addr(struct team *team, + struct team_port *port); + +enum team_option_type { + TEAM_OPTION_TYPE_U32, + TEAM_OPTION_TYPE_STRING, + TEAM_OPTION_TYPE_BINARY, + TEAM_OPTION_TYPE_BOOL, + TEAM_OPTION_TYPE_S32, +}; + +struct team_option_inst_info { + u32 array_index; + struct team_port *port; /* != NULL if per-port */ +}; + +struct team_gsetter_ctx { + union { + u32 u32_val; + const char *str_val; + struct { + const void *ptr; + u32 len; + } bin_val; + bool bool_val; + s32 s32_val; + } data; + struct team_option_inst_info *info; +}; + +struct team_option { + struct list_head list; + const char *name; + bool per_port; + unsigned int array_size; /* != 0 means the option is array */ + enum team_option_type type; + int (*init)(struct team *team, struct team_option_inst_info *info); + int (*getter)(struct team *team, struct team_gsetter_ctx *ctx); + int (*setter)(struct team *team, struct team_gsetter_ctx *ctx); +}; + +extern void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info); +extern void team_options_change_check(struct team *team); + +struct team_mode { + const char *kind; + struct module *owner; + size_t priv_size; + size_t port_priv_size; + const struct team_mode_ops *ops; + enum netdev_lag_tx_type lag_tx_type; +}; + +#define TEAM_PORT_HASHBITS 4 +#define TEAM_PORT_HASHENTRIES (1 << TEAM_PORT_HASHBITS) + +#define TEAM_MODE_PRIV_LONGS 4 +#define TEAM_MODE_PRIV_SIZE (sizeof(long) * TEAM_MODE_PRIV_LONGS) + +struct team { + struct net_device *dev; /* associated netdevice */ + struct team_pcpu_stats __percpu *pcpu_stats; + + struct mutex lock; /* used for overall locking, e.g. port lists write */ + + /* + * List of enabled ports and their count + */ + int en_port_count; + struct hlist_head en_port_hlist[TEAM_PORT_HASHENTRIES]; + + struct list_head port_list; /* list of all ports */ + + struct list_head option_list; + struct list_head option_inst_list; /* list of option instances */ + + const struct team_mode *mode; + struct team_mode_ops ops; + bool user_carrier_enabled; + bool queue_override_enabled; + struct list_head *qom_lists; /* array of queue override mapping lists */ + bool port_mtu_change_allowed; + struct { + unsigned int count; + unsigned int interval; /* in ms */ + atomic_t count_pending; + struct delayed_work dw; + } notify_peers; + struct { + unsigned int count; + unsigned int interval; /* in ms */ + atomic_t count_pending; + struct delayed_work dw; + } mcast_rejoin; + long mode_priv[TEAM_MODE_PRIV_LONGS]; +}; + +static inline int team_dev_queue_xmit(struct team *team, struct team_port *port, + struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(skb->queue_mapping) != + sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping)); + skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping); + + skb->dev = port->dev; + if (unlikely(netpoll_tx_running(team->dev))) { + team_netpoll_send_skb(port, skb); + return 0; + } + return dev_queue_xmit(skb); +} + +static inline struct hlist_head *team_port_index_hash(struct team *team, + int port_index) +{ + return &team->en_port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)]; +} + +static inline struct team_port *team_get_port_by_index(struct team *team, + int port_index) +{ + struct team_port *port; + struct hlist_head *head = team_port_index_hash(team, port_index); + + hlist_for_each_entry(port, head, hlist) + if (port->index == port_index) + return port; + return NULL; +} + +static inline int team_num_to_port_index(struct team *team, unsigned int num) +{ + int en_port_count = READ_ONCE(team->en_port_count); + + if (unlikely(!en_port_count)) + return 0; + return num % en_port_count; +} + +static inline struct team_port *team_get_port_by_index_rcu(struct team *team, + int port_index) +{ + struct team_port *port; + struct hlist_head *head = team_port_index_hash(team, port_index); + + hlist_for_each_entry_rcu(port, head, hlist) + if (port->index == port_index) + return port; + return NULL; +} + +static inline struct team_port * +team_get_first_port_txable_rcu(struct team *team, struct team_port *port) +{ + struct team_port *cur; + + if (likely(team_port_txable(port))) + return port; + cur = port; + list_for_each_entry_continue_rcu(cur, &team->port_list, list) + if (team_port_txable(cur)) + return cur; + list_for_each_entry_rcu(cur, &team->port_list, list) { + if (cur == port) + break; + if (team_port_txable(cur)) + return cur; + } + return NULL; +} + +extern int team_options_register(struct team *team, + const struct team_option *option, + size_t option_count); +extern void team_options_unregister(struct team *team, + const struct team_option *option, + size_t option_count); +extern int team_mode_register(const struct team_mode *mode); +extern void team_mode_unregister(const struct team_mode *mode); + +#define TEAM_DEFAULT_NUM_TX_QUEUES 16 +#define TEAM_DEFAULT_NUM_RX_QUEUES 16 + +#define MODULE_ALIAS_TEAM_MODE(kind) MODULE_ALIAS("team-mode-" kind) + +#endif /* _LINUX_IF_TEAM_H_ */ diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h new file mode 100644 index 000000000..3d2996dc7 --- /dev/null +++ b/include/linux/if_tun.h @@ -0,0 +1,58 @@ +/* + * Universal TUN/TAP device driver. + * Copyright (C) 1999-2000 Maxim Krasnyansky + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __IF_TUN_H +#define __IF_TUN_H + +#include + +#define TUN_XDP_FLAG 0x1UL + +#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE) +struct socket *tun_get_socket(struct file *); +struct ptr_ring *tun_get_tx_ring(struct file *file); +bool tun_is_xdp_frame(void *ptr); +void *tun_xdp_to_ptr(void *ptr); +void *tun_ptr_to_xdp(void *ptr); +void tun_ptr_free(void *ptr); +#else +#include +#include +struct file; +struct socket; +static inline struct socket *tun_get_socket(struct file *f) +{ + return ERR_PTR(-EINVAL); +} +static inline struct ptr_ring *tun_get_tx_ring(struct file *f) +{ + return ERR_PTR(-EINVAL); +} +static inline bool tun_is_xdp_frame(void *ptr) +{ + return false; +} +static inline void *tun_xdp_to_ptr(void *ptr) +{ + return NULL; +} +static inline void *tun_ptr_to_xdp(void *ptr) +{ + return NULL; +} +static inline void tun_ptr_free(void *ptr) +{ +} +#endif /* CONFIG_TUN */ +#endif /* __IF_TUN_H */ diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h new file mode 100644 index 000000000..26606523e --- /dev/null +++ b/include/linux/if_tunnel.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _IF_TUNNEL_H_ +#define _IF_TUNNEL_H_ + +#include +#include +#include +#include + +/* + * Locking : hash tables are protected by RCU and RTNL + */ + +#define for_each_ip_tunnel_rcu(pos, start) \ + for (pos = rcu_dereference(start); pos; pos = rcu_dereference(pos->next)) + +#endif /* _IF_TUNNEL_H_ */ diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h new file mode 100644 index 000000000..60afff19d --- /dev/null +++ b/include/linux/if_vlan.h @@ -0,0 +1,748 @@ +/* + * VLAN An implementation of 802.1Q VLAN tagging. + * + * Authors: Ben Greear + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ +#ifndef _LINUX_IF_VLAN_H_ +#define _LINUX_IF_VLAN_H_ + +#include +#include +#include +#include +#include + +#define VLAN_HLEN 4 /* The additional bytes required by VLAN + * (in addition to the Ethernet header) + */ +#define VLAN_ETH_HLEN 18 /* Total octets in header. */ +#define VLAN_ETH_ZLEN 64 /* Min. octets in frame sans FCS */ + +/* + * According to 802.3ac, the packet can be 4 bytes longer. --Klika Jan + */ +#define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */ +#define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */ + +#define VLAN_MAX_DEPTH 8 /* Max. number of nested VLAN tags parsed */ + +/* + * struct vlan_hdr - vlan header + * @h_vlan_TCI: priority and VLAN ID + * @h_vlan_encapsulated_proto: packet type ID or len + */ +struct vlan_hdr { + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; + +/** + * struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr) + * @h_dest: destination ethernet address + * @h_source: source ethernet address + * @h_vlan_proto: ethernet protocol + * @h_vlan_TCI: priority and VLAN ID + * @h_vlan_encapsulated_proto: packet type ID or len + */ +struct vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; + +#include + +static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) +{ + return (struct vlan_ethhdr *)skb_mac_header(skb); +} + +#define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */ +#define VLAN_PRIO_SHIFT 13 +#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */ +#define VLAN_TAG_PRESENT VLAN_CFI_MASK +#define VLAN_VID_MASK 0x0fff /* VLAN Identifier */ +#define VLAN_N_VID 4096 + +/* found in socket.c */ +extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); + +static inline bool is_vlan_dev(const struct net_device *dev) +{ + return dev->priv_flags & IFF_802_1Q_VLAN; +} + +#define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT) +#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) +#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) +#define skb_vlan_tag_get_prio(__skb) ((__skb)->vlan_tci & VLAN_PRIO_MASK) + +static inline int vlan_get_rx_ctag_filter_info(struct net_device *dev) +{ + ASSERT_RTNL(); + return notifier_to_errno(call_netdevice_notifiers(NETDEV_CVLAN_FILTER_PUSH_INFO, dev)); +} + +static inline void vlan_drop_rx_ctag_filter_info(struct net_device *dev) +{ + ASSERT_RTNL(); + call_netdevice_notifiers(NETDEV_CVLAN_FILTER_DROP_INFO, dev); +} + +static inline int vlan_get_rx_stag_filter_info(struct net_device *dev) +{ + ASSERT_RTNL(); + return notifier_to_errno(call_netdevice_notifiers(NETDEV_SVLAN_FILTER_PUSH_INFO, dev)); +} + +static inline void vlan_drop_rx_stag_filter_info(struct net_device *dev) +{ + ASSERT_RTNL(); + call_netdevice_notifiers(NETDEV_SVLAN_FILTER_DROP_INFO, dev); +} + +/** + * struct vlan_pcpu_stats - VLAN percpu rx/tx stats + * @rx_packets: number of received packets + * @rx_bytes: number of received bytes + * @rx_multicast: number of received multicast packets + * @tx_packets: number of transmitted packets + * @tx_bytes: number of transmitted bytes + * @syncp: synchronization point for 64bit counters + * @rx_errors: number of rx errors + * @tx_dropped: number of tx drops + */ +struct vlan_pcpu_stats { + u64 rx_packets; + u64 rx_bytes; + u64 rx_multicast; + u64 tx_packets; + u64 tx_bytes; + struct u64_stats_sync syncp; + u32 rx_errors; + u32 tx_dropped; +}; + +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) + +extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev, + __be16 vlan_proto, u16 vlan_id); +extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); +extern u16 vlan_dev_vlan_id(const struct net_device *dev); +extern __be16 vlan_dev_vlan_proto(const struct net_device *dev); + +/** + * struct vlan_priority_tci_mapping - vlan egress priority mappings + * @priority: skb priority + * @vlan_qos: vlan priority: (skb->priority << 13) & 0xE000 + * @next: pointer to next struct + */ +struct vlan_priority_tci_mapping { + u32 priority; + u16 vlan_qos; + struct vlan_priority_tci_mapping *next; +}; + +struct proc_dir_entry; +struct netpoll; + +/** + * struct vlan_dev_priv - VLAN private device data + * @nr_ingress_mappings: number of ingress priority mappings + * @ingress_priority_map: ingress priority mappings + * @nr_egress_mappings: number of egress priority mappings + * @egress_priority_map: hash of egress priority mappings + * @vlan_proto: VLAN encapsulation protocol + * @vlan_id: VLAN identifier + * @flags: device flags + * @real_dev: underlying netdevice + * @real_dev_addr: address of underlying netdevice + * @dent: proc dir entry + * @vlan_pcpu_stats: ptr to percpu rx stats + */ +struct vlan_dev_priv { + unsigned int nr_ingress_mappings; + u32 ingress_priority_map[8]; + unsigned int nr_egress_mappings; + struct vlan_priority_tci_mapping *egress_priority_map[16]; + + __be16 vlan_proto; + u16 vlan_id; + u16 flags; + + struct net_device *real_dev; + unsigned char real_dev_addr[ETH_ALEN]; + + struct proc_dir_entry *dent; + struct vlan_pcpu_stats __percpu *vlan_pcpu_stats; +#ifdef CONFIG_NET_POLL_CONTROLLER + struct netpoll *netpoll; +#endif + unsigned int nest_level; +}; + +static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev) +{ + return netdev_priv(dev); +} + +static inline u16 +vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio) +{ + struct vlan_priority_tci_mapping *mp; + + smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */ + + mp = vlan_dev_priv(dev)->egress_priority_map[(skprio & 0xF)]; + while (mp) { + if (mp->priority == skprio) { + return mp->vlan_qos; /* This should already be shifted + * to mask correctly with the + * VLAN's TCI */ + } + mp = mp->next; + } + return 0; +} + +extern bool vlan_do_receive(struct sk_buff **skb); + +extern int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid); +extern void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid); + +extern int vlan_vids_add_by_dev(struct net_device *dev, + const struct net_device *by_dev); +extern void vlan_vids_del_by_dev(struct net_device *dev, + const struct net_device *by_dev); + +extern bool vlan_uses_dev(const struct net_device *dev); + +static inline int vlan_get_encap_level(struct net_device *dev) +{ + BUG_ON(!is_vlan_dev(dev)); + return vlan_dev_priv(dev)->nest_level; +} +#else +static inline struct net_device * +__vlan_find_dev_deep_rcu(struct net_device *real_dev, + __be16 vlan_proto, u16 vlan_id) +{ + return NULL; +} + +static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) +{ + BUG(); + return NULL; +} + +static inline u16 vlan_dev_vlan_id(const struct net_device *dev) +{ + BUG(); + return 0; +} + +static inline __be16 vlan_dev_vlan_proto(const struct net_device *dev) +{ + BUG(); + return 0; +} + +static inline u16 vlan_dev_get_egress_qos_mask(struct net_device *dev, + u32 skprio) +{ + return 0; +} + +static inline bool vlan_do_receive(struct sk_buff **skb) +{ + return false; +} + +static inline int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid) +{ + return 0; +} + +static inline void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid) +{ +} + +static inline int vlan_vids_add_by_dev(struct net_device *dev, + const struct net_device *by_dev) +{ + return 0; +} + +static inline void vlan_vids_del_by_dev(struct net_device *dev, + const struct net_device *by_dev) +{ +} + +static inline bool vlan_uses_dev(const struct net_device *dev) +{ + return false; +} +static inline int vlan_get_encap_level(struct net_device *dev) +{ + BUG(); + return 0; +} +#endif + +/** + * eth_type_vlan - check for valid vlan ether type. + * @ethertype: ether type to check + * + * Returns true if the ether type is a vlan ether type. + */ +static inline bool eth_type_vlan(__be16 ethertype) +{ + switch (ethertype) { + case htons(ETH_P_8021Q): + case htons(ETH_P_8021AD): + return true; + default: + return false; + } +} + +static inline bool vlan_hw_offload_capable(netdev_features_t features, + __be16 proto) +{ + if (proto == htons(ETH_P_8021Q) && features & NETIF_F_HW_VLAN_CTAG_TX) + return true; + if (proto == htons(ETH_P_8021AD) && features & NETIF_F_HW_VLAN_STAG_TX) + return true; + return false; +} + +/** + * __vlan_insert_inner_tag - inner VLAN tag inserting + * @skb: skbuff to tag + * @vlan_proto: VLAN encapsulation protocol + * @vlan_tci: VLAN TCI to insert + * @mac_len: MAC header length including outer vlan headers + * + * Inserts the VLAN tag into @skb as part of the payload at offset mac_len + * Returns error if skb_cow_head fails. + * + * Does not change skb->protocol so this function can be used during receive. + */ +static inline int __vlan_insert_inner_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci, + unsigned int mac_len) +{ + struct vlan_ethhdr *veth; + + if (skb_cow_head(skb, VLAN_HLEN) < 0) + return -ENOMEM; + + skb_push(skb, VLAN_HLEN); + + /* Move the mac header sans proto to the beginning of the new header. */ + if (likely(mac_len > ETH_TLEN)) + memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN); + skb->mac_header -= VLAN_HLEN; + + veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN); + + /* first, the ethernet type */ + if (likely(mac_len >= ETH_TLEN)) { + /* h_vlan_encapsulated_proto should already be populated, and + * skb->data has space for h_vlan_proto + */ + veth->h_vlan_proto = vlan_proto; + } else { + /* h_vlan_encapsulated_proto should not be populated, and + * skb->data has no space for h_vlan_proto + */ + veth->h_vlan_encapsulated_proto = skb->protocol; + } + + /* now, the TCI */ + veth->h_vlan_TCI = htons(vlan_tci); + + return 0; +} + +/** + * __vlan_insert_tag - regular VLAN tag inserting + * @skb: skbuff to tag + * @vlan_proto: VLAN encapsulation protocol + * @vlan_tci: VLAN TCI to insert + * + * Inserts the VLAN tag into @skb as part of the payload + * Returns error if skb_cow_head fails. + * + * Does not change skb->protocol so this function can be used during receive. + */ +static inline int __vlan_insert_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci) +{ + return __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN); +} + +/** + * vlan_insert_inner_tag - inner VLAN tag inserting + * @skb: skbuff to tag + * @vlan_proto: VLAN encapsulation protocol + * @vlan_tci: VLAN TCI to insert + * @mac_len: MAC header length including outer vlan headers + * + * Inserts the VLAN tag into @skb as part of the payload at offset mac_len + * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. + * + * Following the skb_unshare() example, in case of error, the calling function + * doesn't have to worry about freeing the original skb. + * + * Does not change skb->protocol so this function can be used during receive. + */ +static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb, + __be16 vlan_proto, + u16 vlan_tci, + unsigned int mac_len) +{ + int err; + + err = __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, mac_len); + if (err) { + dev_kfree_skb_any(skb); + return NULL; + } + return skb; +} + +/** + * vlan_insert_tag - regular VLAN tag inserting + * @skb: skbuff to tag + * @vlan_proto: VLAN encapsulation protocol + * @vlan_tci: VLAN TCI to insert + * + * Inserts the VLAN tag into @skb as part of the payload + * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. + * + * Following the skb_unshare() example, in case of error, the calling function + * doesn't have to worry about freeing the original skb. + * + * Does not change skb->protocol so this function can be used during receive. + */ +static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci) +{ + return vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN); +} + +/** + * vlan_insert_tag_set_proto - regular VLAN tag inserting + * @skb: skbuff to tag + * @vlan_proto: VLAN encapsulation protocol + * @vlan_tci: VLAN TCI to insert + * + * Inserts the VLAN tag into @skb as part of the payload + * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. + * + * Following the skb_unshare() example, in case of error, the calling function + * doesn't have to worry about freeing the original skb. + */ +static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb, + __be16 vlan_proto, + u16 vlan_tci) +{ + skb = vlan_insert_tag(skb, vlan_proto, vlan_tci); + if (skb) + skb->protocol = vlan_proto; + return skb; +} + +/* + * __vlan_hwaccel_push_inside - pushes vlan tag to the payload + * @skb: skbuff to tag + * + * Pushes the VLAN tag from @skb->vlan_tci inside to the payload. + * + * Following the skb_unshare() example, in case of error, the calling function + * doesn't have to worry about freeing the original skb. + */ +static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb) +{ + skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto, + skb_vlan_tag_get(skb)); + if (likely(skb)) + skb->vlan_tci = 0; + return skb; +} + +/** + * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting + * @skb: skbuff to tag + * @vlan_proto: VLAN encapsulation protocol + * @vlan_tci: VLAN TCI to insert + * + * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest + */ +static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci) +{ + skb->vlan_proto = vlan_proto; + skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci; +} + +/** + * __vlan_get_tag - get the VLAN ID that is part of the payload + * @skb: skbuff to query + * @vlan_tci: buffer to store value + * + * Returns error if the skb is not of VLAN type + */ +static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) +{ + struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data; + + if (!eth_type_vlan(veth->h_vlan_proto)) + return -EINVAL; + + *vlan_tci = ntohs(veth->h_vlan_TCI); + return 0; +} + +/** + * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[] + * @skb: skbuff to query + * @vlan_tci: buffer to store value + * + * Returns error if @skb->vlan_tci is not set correctly + */ +static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb, + u16 *vlan_tci) +{ + if (skb_vlan_tag_present(skb)) { + *vlan_tci = skb_vlan_tag_get(skb); + return 0; + } else { + *vlan_tci = 0; + return -EINVAL; + } +} + +#define HAVE_VLAN_GET_TAG + +/** + * vlan_get_tag - get the VLAN ID from the skb + * @skb: skbuff to query + * @vlan_tci: buffer to store value + * + * Returns error if the skb is not VLAN tagged + */ +static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) +{ + if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX) { + return __vlan_hwaccel_get_tag(skb, vlan_tci); + } else { + return __vlan_get_tag(skb, vlan_tci); + } +} + +/** + * vlan_get_protocol - get protocol EtherType. + * @skb: skbuff to query + * @type: first vlan protocol + * @depth: buffer to store length of eth and vlan tags in bytes + * + * Returns the EtherType of the packet, regardless of whether it is + * vlan encapsulated (normal or hardware accelerated) or not. + */ +static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type, + int *depth) +{ + unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH; + + /* if type is 802.1Q/AD then the header should already be + * present at mac_len - VLAN_HLEN (if mac_len > 0), or at + * ETH_HLEN otherwise + */ + if (eth_type_vlan(type)) { + if (vlan_depth) { + if (WARN_ON(vlan_depth < VLAN_HLEN)) + return 0; + vlan_depth -= VLAN_HLEN; + } else { + vlan_depth = ETH_HLEN; + } + do { + struct vlan_hdr vhdr, *vh; + + vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr); + if (unlikely(!vh || !--parse_depth)) + return 0; + + type = vh->h_vlan_encapsulated_proto; + vlan_depth += VLAN_HLEN; + } while (eth_type_vlan(type)); + } + + if (depth) + *depth = vlan_depth; + + return type; +} + +/** + * vlan_get_protocol - get protocol EtherType. + * @skb: skbuff to query + * + * Returns the EtherType of the packet, regardless of whether it is + * vlan encapsulated (normal or hardware accelerated) or not. + */ +static inline __be16 vlan_get_protocol(const struct sk_buff *skb) +{ + return __vlan_get_protocol(skb, skb->protocol, NULL); +} + +/* A getter for the SKB protocol field which will handle VLAN tags consistently + * whether VLAN acceleration is enabled or not. + */ +static inline __be16 skb_protocol(const struct sk_buff *skb, bool skip_vlan) +{ + if (!skip_vlan) + /* VLAN acceleration strips the VLAN header from the skb and + * moves it to skb->vlan_proto + */ + return skb_vlan_tag_present(skb) ? skb->vlan_proto : skb->protocol; + + return vlan_get_protocol(skb); +} + +static inline void vlan_set_encap_proto(struct sk_buff *skb, + struct vlan_hdr *vhdr) +{ + __be16 proto; + unsigned short *rawp; + + /* + * Was a VLAN packet, grab the encapsulated protocol, which the layer + * three protocols care about. + */ + + proto = vhdr->h_vlan_encapsulated_proto; + if (eth_proto_is_802_3(proto)) { + skb->protocol = proto; + return; + } + + rawp = (unsigned short *)(vhdr + 1); + if (*rawp == 0xFFFF) + /* + * This is a magic hack to spot IPX packets. Older Novell + * breaks the protocol design and runs IPX over 802.3 without + * an 802.2 LLC layer. We look for FFFF which isn't a used + * 802.2 SSAP/DSAP. This won't work for fault tolerant netware + * but does for the rest. + */ + skb->protocol = htons(ETH_P_802_3); + else + /* + * Real 802.2 LLC + */ + skb->protocol = htons(ETH_P_802_2); +} + +/** + * skb_vlan_tagged - check if skb is vlan tagged. + * @skb: skbuff to query + * + * Returns true if the skb is tagged, regardless of whether it is hardware + * accelerated or not. + */ +static inline bool skb_vlan_tagged(const struct sk_buff *skb) +{ + if (!skb_vlan_tag_present(skb) && + likely(!eth_type_vlan(skb->protocol))) + return false; + + return true; +} + +/** + * skb_vlan_tagged_multi - check if skb is vlan tagged with multiple headers. + * @skb: skbuff to query + * + * Returns true if the skb is tagged with multiple vlan headers, regardless + * of whether it is hardware accelerated or not. + */ +static inline bool skb_vlan_tagged_multi(struct sk_buff *skb) +{ + __be16 protocol = skb->protocol; + + if (!skb_vlan_tag_present(skb)) { + struct vlan_ethhdr *veh; + + if (likely(!eth_type_vlan(protocol))) + return false; + + if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN))) + return false; + + veh = (struct vlan_ethhdr *)skb->data; + protocol = veh->h_vlan_encapsulated_proto; + } + + if (!eth_type_vlan(protocol)) + return false; + + return true; +} + +/** + * vlan_features_check - drop unsafe features for skb with multiple tags. + * @skb: skbuff to query + * @features: features to be checked + * + * Returns features without unsafe ones if the skb has multiple tags. + */ +static inline netdev_features_t vlan_features_check(struct sk_buff *skb, + netdev_features_t features) +{ + if (skb_vlan_tagged_multi(skb)) { + /* In the case of multi-tagged packets, use a direct mask + * instead of using netdev_interesect_features(), to make + * sure that only devices supporting NETIF_F_HW_CSUM will + * have checksum offloading support. + */ + features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | + NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX; + } + + return features; +} + +/** + * compare_vlan_header - Compare two vlan headers + * @h1: Pointer to vlan header + * @h2: Pointer to vlan header + * + * Compare two vlan headers, returns 0 if equal. + * + * Please note that alignment of h1 & h2 are only guaranteed to be 16 bits. + */ +static inline unsigned long compare_vlan_header(const struct vlan_hdr *h1, + const struct vlan_hdr *h2) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + return *(u32 *)h1 ^ *(u32 *)h2; +#else + return ((__force u32)h1->h_vlan_TCI ^ (__force u32)h2->h_vlan_TCI) | + ((__force u32)h1->h_vlan_encapsulated_proto ^ + (__force u32)h2->h_vlan_encapsulated_proto); +#endif +} +#endif /* !(_LINUX_IF_VLAN_H_) */ diff --git a/include/linux/igmp.h b/include/linux/igmp.h new file mode 100644 index 000000000..119f53941 --- /dev/null +++ b/include/linux/igmp.h @@ -0,0 +1,135 @@ +/* + * Linux NET3: Internet Group Management Protocol [IGMP] + * + * Authors: + * Alan Cox + * + * Extended to talk the BSD extended IGMP protocol of mrouted 3.6 + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_IGMP_H +#define _LINUX_IGMP_H + +#include +#include +#include +#include +#include + +static inline struct igmphdr *igmp_hdr(const struct sk_buff *skb) +{ + return (struct igmphdr *)skb_transport_header(skb); +} + +static inline struct igmpv3_report * + igmpv3_report_hdr(const struct sk_buff *skb) +{ + return (struct igmpv3_report *)skb_transport_header(skb); +} + +static inline struct igmpv3_query * + igmpv3_query_hdr(const struct sk_buff *skb) +{ + return (struct igmpv3_query *)skb_transport_header(skb); +} + +struct ip_sf_socklist { + unsigned int sl_max; + unsigned int sl_count; + struct rcu_head rcu; + __be32 sl_addr[0]; +}; + +#define IP_SFLSIZE(count) (sizeof(struct ip_sf_socklist) + \ + (count) * sizeof(__be32)) + +#define IP_SFBLOCK 10 /* allocate this many at once */ + +/* ip_mc_socklist is real list now. Speed is not argument; + this list never used in fast path code + */ + +struct ip_mc_socklist { + struct ip_mc_socklist __rcu *next_rcu; + struct ip_mreqn multi; + unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */ + struct ip_sf_socklist __rcu *sflist; + struct rcu_head rcu; +}; + +struct ip_sf_list { + struct ip_sf_list *sf_next; + __be32 sf_inaddr; + unsigned long sf_count[2]; /* include/exclude counts */ + unsigned char sf_gsresp; /* include in g & s response? */ + unsigned char sf_oldin; /* change state */ + unsigned char sf_crcount; /* retrans. left to send */ +}; + +struct ip_mc_list { + struct in_device *interface; + __be32 multiaddr; + unsigned int sfmode; + struct ip_sf_list *sources; + struct ip_sf_list *tomb; + unsigned long sfcount[2]; + union { + struct ip_mc_list *next; + struct ip_mc_list __rcu *next_rcu; + }; + struct ip_mc_list __rcu *next_hash; + struct timer_list timer; + int users; + refcount_t refcnt; + spinlock_t lock; + char tm_running; + char reporter; + char unsolicit_count; + char loaded; + unsigned char gsquery; /* check source marks? */ + unsigned char crcount; + struct rcu_head rcu; +}; + +/* V3 exponential field decoding */ +#define IGMPV3_MASK(value, nb) ((nb)>=32 ? (value) : ((1<<(nb))-1) & (value)) +#define IGMPV3_EXP(thresh, nbmant, nbexp, value) \ + ((value) < (thresh) ? (value) : \ + ((IGMPV3_MASK(value, nbmant) | (1<<(nbmant))) << \ + (IGMPV3_MASK((value) >> (nbmant), nbexp) + (nbexp)))) + +#define IGMPV3_QQIC(value) IGMPV3_EXP(0x80, 4, 3, value) +#define IGMPV3_MRC(value) IGMPV3_EXP(0x80, 4, 3, value) + +extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto); +extern int igmp_rcv(struct sk_buff *); +extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr); +extern int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr, + unsigned int mode); +extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr); +extern void ip_mc_drop_socket(struct sock *sk); +extern int ip_mc_source(int add, int omode, struct sock *sk, + struct ip_mreq_source *mreqs, int ifindex); +extern int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf,int ifindex); +extern int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf, + struct ip_msfilter __user *optval, int __user *optlen); +extern int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf, + struct group_filter __user *optval, int __user *optlen); +extern int ip_mc_sf_allow(struct sock *sk, __be32 local, __be32 rmt, + int dif, int sdif); +extern void ip_mc_init_dev(struct in_device *); +extern void ip_mc_destroy_dev(struct in_device *); +extern void ip_mc_up(struct in_device *); +extern void ip_mc_down(struct in_device *); +extern void ip_mc_unmap(struct in_device *); +extern void ip_mc_remap(struct in_device *); +extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr); +extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr); +int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed); + +#endif diff --git a/include/linux/ihex.h b/include/linux/ihex.h new file mode 100644 index 000000000..75c194391 --- /dev/null +++ b/include/linux/ihex.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Compact binary representation of ihex records. Some devices need their + * firmware loaded in strange orders rather than a single big blob, but + * actually parsing ihex-as-text within the kernel seems silly. Thus,... + */ + +#ifndef __LINUX_IHEX_H__ +#define __LINUX_IHEX_H__ + +#include +#include +#include + +/* Intel HEX files actually limit the length to 256 bytes, but we have + drivers which would benefit from using separate records which are + longer than that, so we extend to 16 bits of length */ +struct ihex_binrec { + __be32 addr; + __be16 len; + uint8_t data[0]; +} __attribute__((packed)); + +/* Find the next record, taking into account the 4-byte alignment */ +static inline const struct ihex_binrec * +ihex_next_binrec(const struct ihex_binrec *rec) +{ + int next = ((be16_to_cpu(rec->len) + 5) & ~3) - 2; + rec = (void *)&rec->data[next]; + + return be16_to_cpu(rec->len) ? rec : NULL; +} + +/* Check that ihex_next_binrec() won't take us off the end of the image... */ +static inline int ihex_validate_fw(const struct firmware *fw) +{ + const struct ihex_binrec *rec; + size_t ofs = 0; + + while (ofs <= fw->size - sizeof(*rec)) { + rec = (void *)&fw->data[ofs]; + + /* Zero length marks end of records */ + if (!be16_to_cpu(rec->len)) + return 0; + + /* Point to next record... */ + ofs += (sizeof(*rec) + be16_to_cpu(rec->len) + 3) & ~3; + } + return -EINVAL; +} + +/* Request firmware and validate it so that we can trust we won't + * run off the end while reading records... */ +static inline int request_ihex_firmware(const struct firmware **fw, + const char *fw_name, + struct device *dev) +{ + const struct firmware *lfw; + int ret; + + ret = request_firmware(&lfw, fw_name, dev); + if (ret) + return ret; + ret = ihex_validate_fw(lfw); + if (ret) { + dev_err(dev, "Firmware \"%s\" not valid IHEX records\n", + fw_name); + release_firmware(lfw); + return ret; + } + *fw = lfw; + return 0; +} +#endif /* __LINUX_IHEX_H__ */ diff --git a/include/linux/iio/accel/kxcjk_1013.h b/include/linux/iio/accel/kxcjk_1013.h new file mode 100644 index 000000000..fd1d540ea --- /dev/null +++ b/include/linux/iio/accel/kxcjk_1013.h @@ -0,0 +1,22 @@ +/* + * KXCJK-1013 3-axis accelerometer Interface + * Copyright (c) 2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IIO_KXCJK_1013_H__ +#define __IIO_KXCJK_1013_H__ + +struct kxcjk_1013_platform_data { + bool active_high_intr; +}; + +#endif diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h new file mode 100644 index 000000000..57c122ae5 --- /dev/null +++ b/include/linux/iio/adc/ad_sigma_delta.h @@ -0,0 +1,188 @@ +/* + * Support code for Analog Devices Sigma-Delta ADCs + * + * Copyright 2012 Analog Devices Inc. + * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2. + */ +#ifndef __AD_SIGMA_DELTA_H__ +#define __AD_SIGMA_DELTA_H__ + +enum ad_sigma_delta_mode { + AD_SD_MODE_CONTINUOUS = 0, + AD_SD_MODE_SINGLE = 1, + AD_SD_MODE_IDLE = 2, + AD_SD_MODE_POWERDOWN = 3, +}; + +/** + * struct ad_sigma_delta_calib_data - Calibration data for Sigma Delta devices + * @mode: Calibration mode. + * @channel: Calibration channel. + */ +struct ad_sd_calib_data { + unsigned int mode; + unsigned int channel; +}; + +struct ad_sigma_delta; +struct iio_dev; + +/** + * struct ad_sigma_delta_info - Sigma Delta driver specific callbacks and options + * @set_channel: Will be called to select the current channel, may be NULL. + * @set_mode: Will be called to select the current mode, may be NULL. + * @postprocess_sample: Is called for each sampled data word, can be used to + * modify or drop the sample data, it, may be NULL. + * @has_registers: true if the device has writable and readable registers, false + * if there is just one read-only sample data shift register. + * @addr_shift: Shift of the register address in the communications register. + * @read_mask: Mask for the communications register having the read bit set. + */ +struct ad_sigma_delta_info { + int (*set_channel)(struct ad_sigma_delta *, unsigned int channel); + int (*set_mode)(struct ad_sigma_delta *, enum ad_sigma_delta_mode mode); + int (*postprocess_sample)(struct ad_sigma_delta *, unsigned int raw_sample); + bool has_registers; + unsigned int addr_shift; + unsigned int read_mask; +}; + +/** + * struct ad_sigma_delta - Sigma Delta device struct + * @spi: The spi device associated with the Sigma Delta device. + * @trig: The IIO trigger associated with the Sigma Delta device. + * + * Most of the fields are private to the sigma delta library code and should not + * be accessed by individual drivers. + */ +struct ad_sigma_delta { + struct spi_device *spi; + struct iio_trigger *trig; + +/* private: */ + struct completion completion; + bool irq_dis; + + bool bus_locked; + bool keep_cs_asserted; + + uint8_t comm; + + const struct ad_sigma_delta_info *info; + + /* + * DMA (thus cache coherency maintenance) requires the + * transfer buffers to live in their own cache lines. + */ + uint8_t data[4] ____cacheline_aligned; +}; + +static inline int ad_sigma_delta_set_channel(struct ad_sigma_delta *sd, + unsigned int channel) +{ + if (sd->info->set_channel) + return sd->info->set_channel(sd, channel); + + return 0; +} + +static inline int ad_sigma_delta_set_mode(struct ad_sigma_delta *sd, + unsigned int mode) +{ + if (sd->info->set_mode) + return sd->info->set_mode(sd, mode); + + return 0; +} + +static inline int ad_sigma_delta_postprocess_sample(struct ad_sigma_delta *sd, + unsigned int raw_sample) +{ + if (sd->info->postprocess_sample) + return sd->info->postprocess_sample(sd, raw_sample); + + return 0; +} + +void ad_sd_set_comm(struct ad_sigma_delta *sigma_delta, uint8_t comm); +int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg, + unsigned int size, unsigned int val); +int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg, + unsigned int size, unsigned int *val); + +int ad_sd_reset(struct ad_sigma_delta *sigma_delta, + unsigned int reset_length); + +int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, int *val); +int ad_sd_calibrate_all(struct ad_sigma_delta *sigma_delta, + const struct ad_sd_calib_data *cd, unsigned int n); +int ad_sd_init(struct ad_sigma_delta *sigma_delta, struct iio_dev *indio_dev, + struct spi_device *spi, const struct ad_sigma_delta_info *info); + +int ad_sd_setup_buffer_and_trigger(struct iio_dev *indio_dev); +void ad_sd_cleanup_buffer_and_trigger(struct iio_dev *indio_dev); + +int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig); + +#define __AD_SD_CHANNEL(_si, _channel1, _channel2, _address, _bits, \ + _storagebits, _shift, _extend_name, _type, _mask_all) \ + { \ + .type = (_type), \ + .differential = (_channel2 == -1 ? 0 : 1), \ + .indexed = 1, \ + .channel = (_channel1), \ + .channel2 = (_channel2), \ + .address = (_address), \ + .extend_name = (_extend_name), \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + BIT(IIO_CHAN_INFO_OFFSET), \ + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ + .info_mask_shared_by_all = _mask_all, \ + .scan_index = (_si), \ + .scan_type = { \ + .sign = 'u', \ + .realbits = (_bits), \ + .storagebits = (_storagebits), \ + .shift = (_shift), \ + .endianness = IIO_BE, \ + }, \ + } + +#define AD_SD_DIFF_CHANNEL(_si, _channel1, _channel2, _address, _bits, \ + _storagebits, _shift) \ + __AD_SD_CHANNEL(_si, _channel1, _channel2, _address, _bits, \ + _storagebits, _shift, NULL, IIO_VOLTAGE, \ + BIT(IIO_CHAN_INFO_SAMP_FREQ)) + +#define AD_SD_SHORTED_CHANNEL(_si, _channel, _address, _bits, \ + _storagebits, _shift) \ + __AD_SD_CHANNEL(_si, _channel, _channel, _address, _bits, \ + _storagebits, _shift, "shorted", IIO_VOLTAGE, \ + BIT(IIO_CHAN_INFO_SAMP_FREQ)) + +#define AD_SD_CHANNEL(_si, _channel, _address, _bits, \ + _storagebits, _shift) \ + __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \ + _storagebits, _shift, NULL, IIO_VOLTAGE, \ + BIT(IIO_CHAN_INFO_SAMP_FREQ)) + +#define AD_SD_CHANNEL_NO_SAMP_FREQ(_si, _channel, _address, _bits, \ + _storagebits, _shift) \ + __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \ + _storagebits, _shift, NULL, IIO_VOLTAGE, 0) + +#define AD_SD_TEMP_CHANNEL(_si, _address, _bits, _storagebits, _shift) \ + __AD_SD_CHANNEL(_si, 0, -1, _address, _bits, \ + _storagebits, _shift, NULL, IIO_TEMP, \ + BIT(IIO_CHAN_INFO_SAMP_FREQ)) + +#define AD_SD_SUPPLY_CHANNEL(_si, _channel, _address, _bits, _storagebits, \ + _shift) \ + __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \ + _storagebits, _shift, "supply", IIO_VOLTAGE, \ + BIT(IIO_CHAN_INFO_SAMP_FREQ)) + +#endif diff --git a/include/linux/iio/adc/stm32-dfsdm-adc.h b/include/linux/iio/adc/stm32-dfsdm-adc.h new file mode 100644 index 000000000..0da298b41 --- /dev/null +++ b/include/linux/iio/adc/stm32-dfsdm-adc.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file discribe the STM32 DFSDM IIO driver API for audio part + * + * Copyright (C) 2017, STMicroelectronics - All Rights Reserved + * Author(s): Arnaud Pouliquen . + */ + +#ifndef STM32_DFSDM_ADC_H +#define STM32_DFSDM_ADC_H + +#include + +int stm32_dfsdm_get_buff_cb(struct iio_dev *iio_dev, + int (*cb)(const void *data, size_t size, + void *private), + void *private); +int stm32_dfsdm_release_buff_cb(struct iio_dev *iio_dev); + +#endif diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h new file mode 100644 index 000000000..67c75372b --- /dev/null +++ b/include/linux/iio/buffer-dma.h @@ -0,0 +1,152 @@ +/* + * Copyright 2013-2015 Analog Devices Inc. + * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2. + */ + +#ifndef __INDUSTRIALIO_DMA_BUFFER_H__ +#define __INDUSTRIALIO_DMA_BUFFER_H__ + +#include +#include +#include +#include +#include + +struct iio_dma_buffer_queue; +struct iio_dma_buffer_ops; +struct device; + +struct iio_buffer_block { + u32 size; + u32 bytes_used; +}; + +/** + * enum iio_block_state - State of a struct iio_dma_buffer_block + * @IIO_BLOCK_STATE_DEQUEUED: Block is not queued + * @IIO_BLOCK_STATE_QUEUED: Block is on the incoming queue + * @IIO_BLOCK_STATE_ACTIVE: Block is currently being processed by the DMA + * @IIO_BLOCK_STATE_DONE: Block is on the outgoing queue + * @IIO_BLOCK_STATE_DEAD: Block has been marked as to be freed + */ +enum iio_block_state { + IIO_BLOCK_STATE_DEQUEUED, + IIO_BLOCK_STATE_QUEUED, + IIO_BLOCK_STATE_ACTIVE, + IIO_BLOCK_STATE_DONE, + IIO_BLOCK_STATE_DEAD, +}; + +/** + * struct iio_dma_buffer_block - IIO buffer block + * @head: List head + * @size: Total size of the block in bytes + * @bytes_used: Number of bytes that contain valid data + * @vaddr: Virutal address of the blocks memory + * @phys_addr: Physical address of the blocks memory + * @queue: Parent DMA buffer queue + * @kref: kref used to manage the lifetime of block + * @state: Current state of the block + */ +struct iio_dma_buffer_block { + /* May only be accessed by the owner of the block */ + struct list_head head; + size_t bytes_used; + + /* + * Set during allocation, constant thereafter. May be accessed read-only + * by anybody holding a reference to the block. + */ + void *vaddr; + dma_addr_t phys_addr; + size_t size; + struct iio_dma_buffer_queue *queue; + + /* Must not be accessed outside the core. */ + struct kref kref; + /* + * Must not be accessed outside the core. Access needs to hold + * queue->list_lock if the block is not owned by the core. + */ + enum iio_block_state state; +}; + +/** + * struct iio_dma_buffer_queue_fileio - FileIO state for the DMA buffer + * @blocks: Buffer blocks used for fileio + * @active_block: Block being used in read() + * @pos: Read offset in the active block + * @block_size: Size of each block + */ +struct iio_dma_buffer_queue_fileio { + struct iio_dma_buffer_block *blocks[2]; + struct iio_dma_buffer_block *active_block; + size_t pos; + size_t block_size; +}; + +/** + * struct iio_dma_buffer_queue - DMA buffer base structure + * @buffer: IIO buffer base structure + * @dev: Parent device + * @ops: DMA buffer callbacks + * @lock: Protects the incoming list, active and the fields in the fileio + * substruct + * @list_lock: Protects lists that contain blocks which can be modified in + * atomic context as well as blocks on those lists. This is the outgoing queue + * list and typically also a list of active blocks in the part that handles + * the DMA controller + * @incoming: List of buffers on the incoming queue + * @outgoing: List of buffers on the outgoing queue + * @active: Whether the buffer is currently active + * @fileio: FileIO state + */ +struct iio_dma_buffer_queue { + struct iio_buffer buffer; + struct device *dev; + const struct iio_dma_buffer_ops *ops; + + struct mutex lock; + spinlock_t list_lock; + struct list_head incoming; + struct list_head outgoing; + + bool active; + + struct iio_dma_buffer_queue_fileio fileio; +}; + +/** + * struct iio_dma_buffer_ops - DMA buffer callback operations + * @submit: Called when a block is submitted to the DMA controller + * @abort: Should abort all pending transfers + */ +struct iio_dma_buffer_ops { + int (*submit)(struct iio_dma_buffer_queue *queue, + struct iio_dma_buffer_block *block); + void (*abort)(struct iio_dma_buffer_queue *queue); +}; + +void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block); +void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue, + struct list_head *list); + +int iio_dma_buffer_enable(struct iio_buffer *buffer, + struct iio_dev *indio_dev); +int iio_dma_buffer_disable(struct iio_buffer *buffer, + struct iio_dev *indio_dev); +int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n, + char __user *user_buffer); +size_t iio_dma_buffer_data_available(struct iio_buffer *buffer); +int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd); +int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length); +int iio_dma_buffer_request_update(struct iio_buffer *buffer); + +int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue, + struct device *dma_dev, const struct iio_dma_buffer_ops *ops); +void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue); +void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue); + +#endif diff --git a/include/linux/iio/buffer-dmaengine.h b/include/linux/iio/buffer-dmaengine.h new file mode 100644 index 000000000..5dcddf427 --- /dev/null +++ b/include/linux/iio/buffer-dmaengine.h @@ -0,0 +1,18 @@ +/* + * Copyright 2014-2015 Analog Devices Inc. + * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2 or later. + */ + +#ifndef __IIO_DMAENGINE_H__ +#define __IIO_DMAENGINE_H__ + +struct iio_buffer; +struct device; + +struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev, + const char *channel); +void iio_dmaengine_buffer_free(struct iio_buffer *buffer); + +#endif diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h new file mode 100644 index 000000000..48767c776 --- /dev/null +++ b/include/linux/iio/buffer.h @@ -0,0 +1,53 @@ +/* The industrial I/O core - generic buffer interfaces. + * + * Copyright (c) 2008 Jonathan Cameron + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#ifndef _IIO_BUFFER_GENERIC_H_ +#define _IIO_BUFFER_GENERIC_H_ +#include +#include + +struct iio_buffer; + +void iio_buffer_set_attrs(struct iio_buffer *buffer, + const struct attribute **attrs); + +int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data); + +/** + * iio_push_to_buffers_with_timestamp() - push data and timestamp to buffers + * @indio_dev: iio_dev structure for device. + * @data: sample data + * @timestamp: timestamp for the sample data + * + * Pushes data to the IIO device's buffers. If timestamps are enabled for the + * device the function will store the supplied timestamp as the last element in + * the sample data buffer before pushing it to the device buffers. The sample + * data buffer needs to be large enough to hold the additional timestamp + * (usually the buffer should be indio->scan_bytes bytes large). + * + * Returns 0 on success, a negative error code otherwise. + */ +static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev, + void *data, int64_t timestamp) +{ + if (indio_dev->scan_timestamp) { + size_t ts_offset = indio_dev->scan_bytes / sizeof(int64_t) - 1; + ((int64_t *)data)[ts_offset] = timestamp; + } + + return iio_push_to_buffers(indio_dev, data); +} + +bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, + const unsigned long *mask); + +void iio_device_attach_buffer(struct iio_dev *indio_dev, + struct iio_buffer *buffer); + +#endif /* _IIO_BUFFER_GENERIC_H_ */ diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h new file mode 100644 index 000000000..d1171db23 --- /dev/null +++ b/include/linux/iio/buffer_impl.h @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _IIO_BUFFER_GENERIC_IMPL_H_ +#define _IIO_BUFFER_GENERIC_IMPL_H_ +#include +#include + +#ifdef CONFIG_IIO_BUFFER + +struct iio_dev; +struct iio_buffer; + +/** + * INDIO_BUFFER_FLAG_FIXED_WATERMARK - Watermark level of the buffer can not be + * configured. It has a fixed value which will be buffer specific. + */ +#define INDIO_BUFFER_FLAG_FIXED_WATERMARK BIT(0) + +/** + * struct iio_buffer_access_funcs - access functions for buffers. + * @store_to: actually store stuff to the buffer + * @read_first_n: try to get a specified number of bytes (must exist) + * @data_available: indicates how much data is available for reading from + * the buffer. + * @request_update: if a parameter change has been marked, update underlying + * storage. + * @set_bytes_per_datum:set number of bytes per datum + * @set_length: set number of datums in buffer + * @enable: called if the buffer is attached to a device and the + * device starts sampling. Calls are balanced with + * @disable. + * @disable: called if the buffer is attached to a device and the + * device stops sampling. Calles are balanced with @enable. + * @release: called when the last reference to the buffer is dropped, + * should free all resources allocated by the buffer. + * @modes: Supported operating modes by this buffer type + * @flags: A bitmask combination of INDIO_BUFFER_FLAG_* + * + * The purpose of this structure is to make the buffer element + * modular as event for a given driver, different usecases may require + * different buffer designs (space efficiency vs speed for example). + * + * It is worth noting that a given buffer implementation may only support a + * small proportion of these functions. The core code 'should' cope fine with + * any of them not existing. + **/ +struct iio_buffer_access_funcs { + int (*store_to)(struct iio_buffer *buffer, const void *data); + int (*read_first_n)(struct iio_buffer *buffer, + size_t n, + char __user *buf); + size_t (*data_available)(struct iio_buffer *buffer); + + int (*request_update)(struct iio_buffer *buffer); + + int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd); + int (*set_length)(struct iio_buffer *buffer, unsigned int length); + + int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); + int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); + + void (*release)(struct iio_buffer *buffer); + + unsigned int modes; + unsigned int flags; +}; + +/** + * struct iio_buffer - general buffer structure + * + * Note that the internals of this structure should only be of interest to + * those writing new buffer implementations. + */ +struct iio_buffer { + /** @length: Number of datums in buffer. */ + unsigned int length; + + /** @bytes_per_datum: Size of individual datum including timestamp. */ + size_t bytes_per_datum; + + /** + * @access: Buffer access functions associated with the + * implementation. + */ + const struct iio_buffer_access_funcs *access; + + /** @scan_mask: Bitmask used in masking scan mode elements. */ + long *scan_mask; + + /** @demux_list: List of operations required to demux the scan. */ + struct list_head demux_list; + + /** @pollq: Wait queue to allow for polling on the buffer. */ + wait_queue_head_t pollq; + + /** @watermark: Number of datums to wait for poll/read. */ + unsigned int watermark; + + /* private: */ + /* + * @scan_el_attrs: Control of scan elements if that scan mode + * control method is used. + */ + struct attribute_group *scan_el_attrs; + + /* @scan_timestamp: Does the scan mode include a timestamp. */ + bool scan_timestamp; + + /* @scan_el_dev_attr_list: List of scan element related attributes. */ + struct list_head scan_el_dev_attr_list; + + /* @buffer_group: Attributes of the buffer group. */ + struct attribute_group buffer_group; + + /* + * @scan_el_group: Attribute group for those attributes not + * created from the iio_chan_info array. + */ + struct attribute_group scan_el_group; + + /* @stufftoread: Flag to indicate new data. */ + bool stufftoread; + + /* @attrs: Standard attributes of the buffer. */ + const struct attribute **attrs; + + /* @demux_bounce: Buffer for doing gather from incoming scan. */ + void *demux_bounce; + + /* @buffer_list: Entry in the devices list of current buffers. */ + struct list_head buffer_list; + + /* @ref: Reference count of the buffer. */ + struct kref ref; +}; + +/** + * iio_update_buffers() - add or remove buffer from active list + * @indio_dev: device to add buffer to + * @insert_buffer: buffer to insert + * @remove_buffer: buffer_to_remove + * + * Note this will tear down the all buffering and build it up again + */ +int iio_update_buffers(struct iio_dev *indio_dev, + struct iio_buffer *insert_buffer, + struct iio_buffer *remove_buffer); + +/** + * iio_buffer_init() - Initialize the buffer structure + * @buffer: buffer to be initialized + **/ +void iio_buffer_init(struct iio_buffer *buffer); + +struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer); +void iio_buffer_put(struct iio_buffer *buffer); + +#else /* CONFIG_IIO_BUFFER */ + +static inline void iio_buffer_get(struct iio_buffer *buffer) {} +static inline void iio_buffer_put(struct iio_buffer *buffer) {} + +#endif /* CONFIG_IIO_BUFFER */ +#endif /* _IIO_BUFFER_GENERIC_IMPL_H_ */ diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h new file mode 100644 index 000000000..ce1644541 --- /dev/null +++ b/include/linux/iio/common/cros_ec_sensors_core.h @@ -0,0 +1,180 @@ +/* + * ChromeOS EC sensor hub + * + * Copyright (C) 2016 Google, Inc + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __CROS_EC_SENSORS_CORE_H +#define __CROS_EC_SENSORS_CORE_H + +#include +#include +#include + +enum { + CROS_EC_SENSOR_X, + CROS_EC_SENSOR_Y, + CROS_EC_SENSOR_Z, + CROS_EC_SENSOR_MAX_AXIS, +}; + +/* EC returns sensor values using signed 16 bit registers */ +#define CROS_EC_SENSOR_BITS 16 + +/* + * 4 16 bit channels are allowed. + * Good enough for current sensors, they use up to 3 16 bit vectors. + */ +#define CROS_EC_SAMPLE_SIZE (sizeof(s64) * 2) + +/* Minimum sampling period to use when device is suspending */ +#define CROS_EC_MIN_SUSPEND_SAMPLING_FREQUENCY 1000 /* 1 second */ + +/** + * struct cros_ec_sensors_core_state - state data for EC sensors IIO driver + * @ec: cros EC device structure + * @cmd_lock: lock used to prevent simultaneous access to the + * commands. + * @msg: cros EC command structure + * @param: motion sensor parameters structure + * @resp: motion sensor response structure + * @type: type of motion sensor + * @loc: location where the motion sensor is placed + * @calib: calibration parameters. Note that trigger + * captured data will always provide the calibrated + * data + * @samples: static array to hold data from a single capture. + * For each channel we need 2 bytes, except for + * the timestamp. The timestamp is always last and + * is always 8-byte aligned. + * @read_ec_sensors_data: function used for accessing sensors values + * @cuur_sampl_freq: current sampling period + */ +struct cros_ec_sensors_core_state { + struct cros_ec_device *ec; + struct mutex cmd_lock; + + struct cros_ec_command *msg; + struct ec_params_motion_sense param; + struct ec_response_motion_sense *resp; + + enum motionsensor_type type; + enum motionsensor_location loc; + + s16 calib[CROS_EC_SENSOR_MAX_AXIS]; + + u8 samples[CROS_EC_SAMPLE_SIZE]; + + int (*read_ec_sensors_data)(struct iio_dev *indio_dev, + unsigned long scan_mask, s16 *data); + + int curr_sampl_freq; +}; + +/** + * cros_ec_sensors_read_lpc() - retrieve data from EC shared memory + * @indio_dev: pointer to IIO device + * @scan_mask: bitmap of the sensor indices to scan + * @data: location to store data + * + * This is the safe function for reading the EC data. It guarantees that the + * data sampled was not modified by the EC while being read. + * + * Return: 0 on success, -errno on failure. + */ +int cros_ec_sensors_read_lpc(struct iio_dev *indio_dev, unsigned long scan_mask, + s16 *data); + +/** + * cros_ec_sensors_read_cmd() - retrieve data using the EC command protocol + * @indio_dev: pointer to IIO device + * @scan_mask: bitmap of the sensor indices to scan + * @data: location to store data + * + * Return: 0 on success, -errno on failure. + */ +int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev, unsigned long scan_mask, + s16 *data); + +struct platform_device; +/** + * cros_ec_sensors_core_init() - basic initialization of the core structure + * @pdev: platform device created for the sensors + * @indio_dev: iio device structure of the device + * @physical_device: true if the device refers to a physical device + * + * Return: 0 on success, -errno on failure. + */ +int cros_ec_sensors_core_init(struct platform_device *pdev, + struct iio_dev *indio_dev, bool physical_device); + +/** + * cros_ec_sensors_capture() - the trigger handler function + * @irq: the interrupt number. + * @p: a pointer to the poll function. + * + * On a trigger event occurring, if the pollfunc is attached then this + * handler is called as a threaded interrupt (and hence may sleep). It + * is responsible for grabbing data from the device and pushing it into + * the associated buffer. + * + * Return: IRQ_HANDLED + */ +irqreturn_t cros_ec_sensors_capture(int irq, void *p); + +/** + * cros_ec_motion_send_host_cmd() - send motion sense host command + * @st: pointer to state information for device + * @opt_length: optional length to reduce the response size, useful on the data + * path. Otherwise, the maximal allowed response size is used + * + * When called, the sub-command is assumed to be set in param->cmd. + * + * Return: 0 on success, -errno on failure. + */ +int cros_ec_motion_send_host_cmd(struct cros_ec_sensors_core_state *st, + u16 opt_length); + +/** + * cros_ec_sensors_core_read() - function to request a value from the sensor + * @st: pointer to state information for device + * @chan: channel specification structure table + * @val: will contain one element making up the returned value + * @val2: will contain another element making up the returned value + * @mask: specifies which values to be requested + * + * Return: the type of value returned by the device + */ +int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask); + +/** + * cros_ec_sensors_core_write() - function to write a value to the sensor + * @st: pointer to state information for device + * @chan: channel specification structure table + * @val: first part of value to write + * @val2: second part of value to write + * @mask: specifies which values to write + * + * Return: the type of value returned by the device + */ +int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st, + struct iio_chan_spec const *chan, + int val, int val2, long mask); + +extern const struct dev_pm_ops cros_ec_sensors_pm_ops; + +/* List of extended channel specification for all sensors */ +extern const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[]; + +#endif /* __CROS_EC_SENSORS_CORE_H */ diff --git a/include/linux/iio/common/ssp_sensors.h b/include/linux/iio/common/ssp_sensors.h new file mode 100644 index 000000000..f4d1b0edb --- /dev/null +++ b/include/linux/iio/common/ssp_sensors.h @@ -0,0 +1,82 @@ +/* + * Copyright (C) 2014, Samsung Electronics Co. Ltd. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef _SSP_SENSORS_H_ +#define _SSP_SENSORS_H_ + +#include + +#define SSP_TIME_SIZE 4 +#define SSP_ACCELEROMETER_SIZE 6 +#define SSP_GYROSCOPE_SIZE 6 +#define SSP_BIO_HRM_RAW_SIZE 8 +#define SSP_BIO_HRM_RAW_FAC_SIZE 36 +#define SSP_BIO_HRM_LIB_SIZE 8 + +/** + * enum ssp_sensor_type - SSP sensor type + */ +enum ssp_sensor_type { + SSP_ACCELEROMETER_SENSOR = 0, + SSP_GYROSCOPE_SENSOR, + SSP_GEOMAGNETIC_UNCALIB_SENSOR, + SSP_GEOMAGNETIC_RAW, + SSP_GEOMAGNETIC_SENSOR, + SSP_PRESSURE_SENSOR, + SSP_GESTURE_SENSOR, + SSP_PROXIMITY_SENSOR, + SSP_TEMPERATURE_HUMIDITY_SENSOR, + SSP_LIGHT_SENSOR, + SSP_PROXIMITY_RAW, + SSP_ORIENTATION_SENSOR, + SSP_STEP_DETECTOR, + SSP_SIG_MOTION_SENSOR, + SSP_GYRO_UNCALIB_SENSOR, + SSP_GAME_ROTATION_VECTOR, + SSP_ROTATION_VECTOR, + SSP_STEP_COUNTER, + SSP_BIO_HRM_RAW, + SSP_BIO_HRM_RAW_FAC, + SSP_BIO_HRM_LIB, + SSP_SENSOR_MAX, +}; + +struct ssp_data; + +/** + * struct ssp_sensor_data - Sensor object + * @process_data: Callback to feed sensor data. + * @type: Used sensor type. + * @buffer: Received data buffer. + */ +struct ssp_sensor_data { + int (*process_data)(struct iio_dev *indio_dev, void *buf, + int64_t timestamp); + enum ssp_sensor_type type; + u8 *buffer; +}; + +void ssp_register_consumer(struct iio_dev *indio_dev, + enum ssp_sensor_type type); + +int ssp_enable_sensor(struct ssp_data *data, enum ssp_sensor_type type, + u32 delay); + +int ssp_disable_sensor(struct ssp_data *data, enum ssp_sensor_type type); + +u32 ssp_get_sensor_delay(struct ssp_data *data, enum ssp_sensor_type); + +int ssp_change_delay(struct ssp_data *data, enum ssp_sensor_type type, + u32 delay); +#endif /* _SSP_SENSORS_H_ */ diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h new file mode 100644 index 000000000..f9bd6e8ab --- /dev/null +++ b/include/linux/iio/common/st_sensors.h @@ -0,0 +1,358 @@ +/* + * STMicroelectronics sensors library driver + * + * Copyright 2012-2013 STMicroelectronics Inc. + * + * Denis Ciocca + * + * Licensed under the GPL-2. + */ + +#ifndef ST_SENSORS_H +#define ST_SENSORS_H + +#include +#include +#include +#include +#include +#include + +#include + +#define ST_SENSORS_TX_MAX_LENGTH 2 +#define ST_SENSORS_RX_MAX_LENGTH 6 + +#define ST_SENSORS_ODR_LIST_MAX 10 +#define ST_SENSORS_FULLSCALE_AVL_MAX 10 + +#define ST_SENSORS_NUMBER_ALL_CHANNELS 4 +#define ST_SENSORS_ENABLE_ALL_AXIS 0x07 +#define ST_SENSORS_SCAN_X 0 +#define ST_SENSORS_SCAN_Y 1 +#define ST_SENSORS_SCAN_Z 2 +#define ST_SENSORS_DEFAULT_POWER_ON_VALUE 0x01 +#define ST_SENSORS_DEFAULT_POWER_OFF_VALUE 0x00 +#define ST_SENSORS_DEFAULT_WAI_ADDRESS 0x0f +#define ST_SENSORS_DEFAULT_AXIS_ADDR 0x20 +#define ST_SENSORS_DEFAULT_AXIS_MASK 0x07 +#define ST_SENSORS_DEFAULT_AXIS_N_BIT 3 +#define ST_SENSORS_DEFAULT_STAT_ADDR 0x27 + +#define ST_SENSORS_MAX_NAME 17 +#define ST_SENSORS_MAX_4WAI 7 + +#define ST_SENSORS_LSM_CHANNELS(device_type, mask, index, mod, \ + ch2, s, endian, rbits, sbits, addr) \ +{ \ + .type = device_type, \ + .modified = mod, \ + .info_mask_separate = mask, \ + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \ + .scan_index = index, \ + .channel2 = ch2, \ + .address = addr, \ + .scan_type = { \ + .sign = s, \ + .realbits = rbits, \ + .shift = sbits - rbits, \ + .storagebits = sbits, \ + .endianness = endian, \ + }, \ +} + +#define ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL() \ + IIO_DEV_ATTR_SAMP_FREQ_AVAIL( \ + st_sensors_sysfs_sampling_frequency_avail) + +#define ST_SENSORS_DEV_ATTR_SCALE_AVAIL(name) \ + IIO_DEVICE_ATTR(name, S_IRUGO, \ + st_sensors_sysfs_scale_avail, NULL , 0); + +struct st_sensor_odr_avl { + unsigned int hz; + u8 value; +}; + +struct st_sensor_odr { + u8 addr; + u8 mask; + struct st_sensor_odr_avl odr_avl[ST_SENSORS_ODR_LIST_MAX]; +}; + +struct st_sensor_power { + u8 addr; + u8 mask; + u8 value_off; + u8 value_on; +}; + +struct st_sensor_axis { + u8 addr; + u8 mask; +}; + +struct st_sensor_fullscale_avl { + unsigned int num; + u8 value; + unsigned int gain; + unsigned int gain2; +}; + +struct st_sensor_fullscale { + u8 addr; + u8 mask; + struct st_sensor_fullscale_avl fs_avl[ST_SENSORS_FULLSCALE_AVL_MAX]; +}; + +struct st_sensor_sim { + u8 addr; + u8 value; +}; + +/** + * struct st_sensor_bdu - ST sensor device block data update + * @addr: address of the register. + * @mask: mask to write the block data update flag. + */ +struct st_sensor_bdu { + u8 addr; + u8 mask; +}; + +/** + * struct st_sensor_das - ST sensor device data alignment selection + * @addr: address of the register. + * @mask: mask to write the das flag for left alignment. + */ +struct st_sensor_das { + u8 addr; + u8 mask; +}; + +/** + * struct st_sensor_int_drdy - ST sensor device drdy line parameters + * @addr: address of INT drdy register. + * @mask: mask to enable drdy line. + * @addr_od: address to enable/disable Open Drain on the INT line. + * @mask_od: mask to enable/disable Open Drain on the INT line. + */ +struct st_sensor_int_drdy { + u8 addr; + u8 mask; + u8 addr_od; + u8 mask_od; +}; + +/** + * struct st_sensor_data_ready_irq - ST sensor device data-ready interrupt + * struct int1 - data-ready configuration register for INT1 pin. + * struct int2 - data-ready configuration register for INT2 pin. + * @addr_ihl: address to enable/disable active low on the INT lines. + * @mask_ihl: mask to enable/disable active low on the INT lines. + * struct stat_drdy - status register of DRDY (data ready) interrupt. + * struct ig1 - represents the Interrupt Generator 1 of sensors. + * @en_addr: address of the enable ig1 register. + * @en_mask: mask to write the on/off value for enable. + */ +struct st_sensor_data_ready_irq { + struct st_sensor_int_drdy int1; + struct st_sensor_int_drdy int2; + u8 addr_ihl; + u8 mask_ihl; + struct { + u8 addr; + u8 mask; + } stat_drdy; + struct { + u8 en_addr; + u8 en_mask; + } ig1; +}; + +/** + * struct st_sensor_transfer_buffer - ST sensor device I/O buffer + * @buf_lock: Mutex to protect rx and tx buffers. + * @tx_buf: Buffer used by SPI transfer function to send data to the sensors. + * This buffer is used to avoid DMA not-aligned issue. + * @rx_buf: Buffer used by SPI transfer to receive data from sensors. + * This buffer is used to avoid DMA not-aligned issue. + */ +struct st_sensor_transfer_buffer { + struct mutex buf_lock; + u8 rx_buf[ST_SENSORS_RX_MAX_LENGTH]; + u8 tx_buf[ST_SENSORS_TX_MAX_LENGTH] ____cacheline_aligned; +}; + +/** + * struct st_sensor_transfer_function - ST sensor device I/O function + * @read_byte: Function used to read one byte. + * @write_byte: Function used to write one byte. + * @read_multiple_byte: Function used to read multiple byte. + */ +struct st_sensor_transfer_function { + int (*read_byte) (struct st_sensor_transfer_buffer *tb, + struct device *dev, u8 reg_addr, u8 *res_byte); + int (*write_byte) (struct st_sensor_transfer_buffer *tb, + struct device *dev, u8 reg_addr, u8 data); + int (*read_multiple_byte) (struct st_sensor_transfer_buffer *tb, + struct device *dev, u8 reg_addr, int len, u8 *data, + bool multiread_bit); +}; + +/** + * struct st_sensor_settings - ST specific sensor settings + * @wai: Contents of WhoAmI register. + * @wai_addr: The address of WhoAmI register. + * @sensors_supported: List of supported sensors by struct itself. + * @ch: IIO channels for the sensor. + * @odr: Output data rate register and ODR list available. + * @pw: Power register of the sensor. + * @enable_axis: Enable one or more axis of the sensor. + * @fs: Full scale register and full scale list available. + * @bdu: Block data update register. + * @das: Data Alignment Selection register. + * @drdy_irq: Data ready register of the sensor. + * @sim: SPI serial interface mode register of the sensor. + * @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read. + * @bootime: samples to discard when sensor passing from power-down to power-up. + */ +struct st_sensor_settings { + u8 wai; + u8 wai_addr; + char sensors_supported[ST_SENSORS_MAX_4WAI][ST_SENSORS_MAX_NAME]; + struct iio_chan_spec *ch; + int num_ch; + struct st_sensor_odr odr; + struct st_sensor_power pw; + struct st_sensor_axis enable_axis; + struct st_sensor_fullscale fs; + struct st_sensor_bdu bdu; + struct st_sensor_das das; + struct st_sensor_data_ready_irq drdy_irq; + struct st_sensor_sim sim; + bool multi_read_bit; + unsigned int bootime; +}; + +/** + * struct st_sensor_data - ST sensor device status + * @dev: Pointer to instance of struct device (I2C or SPI). + * @trig: The trigger in use by the core driver. + * @sensor_settings: Pointer to the specific sensor settings in use. + * @current_fullscale: Maximum range of measure by the sensor. + * @vdd: Pointer to sensor's Vdd power supply + * @vdd_io: Pointer to sensor's Vdd-IO power supply + * @enabled: Status of the sensor (false->off, true->on). + * @multiread_bit: Use or not particular bit for [I2C/SPI] multiread. + * @buffer_data: Data used by buffer part. + * @odr: Output data rate of the sensor [Hz]. + * num_data_channels: Number of data channels used in buffer. + * @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2). + * @int_pin_open_drain: Set the interrupt/DRDY to open drain. + * @get_irq_data_ready: Function to get the IRQ used for data ready signal. + * @tf: Transfer function structure used by I/O operations. + * @tb: Transfer buffers and mutex used by I/O operations. + * @edge_irq: the IRQ triggers on edges and need special handling. + * @hw_irq_trigger: if we're using the hardware interrupt on the sensor. + * @hw_timestamp: Latest timestamp from the interrupt handler, when in use. + */ +struct st_sensor_data { + struct device *dev; + struct iio_trigger *trig; + struct st_sensor_settings *sensor_settings; + struct st_sensor_fullscale_avl *current_fullscale; + struct regulator *vdd; + struct regulator *vdd_io; + + bool enabled; + bool multiread_bit; + + char *buffer_data; + + unsigned int odr; + unsigned int num_data_channels; + + u8 drdy_int_pin; + bool int_pin_open_drain; + + unsigned int (*get_irq_data_ready) (struct iio_dev *indio_dev); + + const struct st_sensor_transfer_function *tf; + struct st_sensor_transfer_buffer tb; + + bool edge_irq; + bool hw_irq_trigger; + s64 hw_timestamp; +}; + +#ifdef CONFIG_IIO_BUFFER +irqreturn_t st_sensors_trigger_handler(int irq, void *p); +#endif + +#ifdef CONFIG_IIO_TRIGGER +int st_sensors_allocate_trigger(struct iio_dev *indio_dev, + const struct iio_trigger_ops *trigger_ops); + +void st_sensors_deallocate_trigger(struct iio_dev *indio_dev); +int st_sensors_validate_device(struct iio_trigger *trig, + struct iio_dev *indio_dev); +#else +static inline int st_sensors_allocate_trigger(struct iio_dev *indio_dev, + const struct iio_trigger_ops *trigger_ops) +{ + return 0; +} +static inline void st_sensors_deallocate_trigger(struct iio_dev *indio_dev) +{ + return; +} +#define st_sensors_validate_device NULL +#endif + +int st_sensors_init_sensor(struct iio_dev *indio_dev, + struct st_sensors_platform_data *pdata); + +int st_sensors_set_enable(struct iio_dev *indio_dev, bool enable); + +int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable); + +int st_sensors_power_enable(struct iio_dev *indio_dev); + +void st_sensors_power_disable(struct iio_dev *indio_dev); + +int st_sensors_debugfs_reg_access(struct iio_dev *indio_dev, + unsigned reg, unsigned writeval, + unsigned *readval); + +int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr); + +int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable); + +int st_sensors_set_fullscale_by_gain(struct iio_dev *indio_dev, int scale); + +int st_sensors_read_info_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *ch, int *val); + +int st_sensors_check_device_support(struct iio_dev *indio_dev, + int num_sensors_list, const struct st_sensor_settings *sensor_settings); + +ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev, + struct device_attribute *attr, char *buf); + +ssize_t st_sensors_sysfs_scale_avail(struct device *dev, + struct device_attribute *attr, char *buf); + +#ifdef CONFIG_OF +void st_sensors_of_name_probe(struct device *dev, + const struct of_device_id *match, + char *name, int len); +#else +static inline void st_sensors_of_name_probe(struct device *dev, + const struct of_device_id *match, + char *name, int len) +{ +} +#endif + +#endif /* ST_SENSORS_H */ diff --git a/include/linux/iio/common/st_sensors_i2c.h b/include/linux/iio/common/st_sensors_i2c.h new file mode 100644 index 000000000..0a2c25e06 --- /dev/null +++ b/include/linux/iio/common/st_sensors_i2c.h @@ -0,0 +1,30 @@ +/* + * STMicroelectronics sensors i2c library driver + * + * Copyright 2012-2013 STMicroelectronics Inc. + * + * Denis Ciocca + * + * Licensed under the GPL-2. + */ + +#ifndef ST_SENSORS_I2C_H +#define ST_SENSORS_I2C_H + +#include +#include +#include + +void st_sensors_i2c_configure(struct iio_dev *indio_dev, + struct i2c_client *client, struct st_sensor_data *sdata); + +#ifdef CONFIG_ACPI +int st_sensors_match_acpi_device(struct device *dev); +#else +static inline int st_sensors_match_acpi_device(struct device *dev) +{ + return -ENODEV; +} +#endif + +#endif /* ST_SENSORS_I2C_H */ diff --git a/include/linux/iio/common/st_sensors_spi.h b/include/linux/iio/common/st_sensors_spi.h new file mode 100644 index 000000000..d964a3563 --- /dev/null +++ b/include/linux/iio/common/st_sensors_spi.h @@ -0,0 +1,20 @@ +/* + * STMicroelectronics sensors spi library driver + * + * Copyright 2012-2013 STMicroelectronics Inc. + * + * Denis Ciocca + * + * Licensed under the GPL-2. + */ + +#ifndef ST_SENSORS_SPI_H +#define ST_SENSORS_SPI_H + +#include +#include + +void st_sensors_spi_configure(struct iio_dev *indio_dev, + struct spi_device *spi, struct st_sensor_data *sdata); + +#endif /* ST_SENSORS_SPI_H */ diff --git a/include/linux/iio/configfs.h b/include/linux/iio/configfs.h new file mode 100644 index 000000000..93befd67c --- /dev/null +++ b/include/linux/iio/configfs.h @@ -0,0 +1,15 @@ +/* + * Industrial I/O configfs support + * + * Copyright (c) 2015 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ +#ifndef __IIO_CONFIGFS +#define __IIO_CONFIGFS + +extern struct configfs_subsystem iio_configfs_subsys; + +#endif /* __IIO_CONFIGFS */ diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h new file mode 100644 index 000000000..9887f4f8e --- /dev/null +++ b/include/linux/iio/consumer.h @@ -0,0 +1,389 @@ +/* + * Industrial I/O in kernel consumer interface + * + * Copyright (c) 2011 Jonathan Cameron + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ +#ifndef _IIO_INKERN_CONSUMER_H_ +#define _IIO_INKERN_CONSUMER_H_ + +#include +#include + +struct iio_dev; +struct iio_chan_spec; +struct device; + +/** + * struct iio_channel - everything needed for a consumer to use a channel + * @indio_dev: Device on which the channel exists. + * @channel: Full description of the channel. + * @data: Data about the channel used by consumer. + */ +struct iio_channel { + struct iio_dev *indio_dev; + const struct iio_chan_spec *channel; + void *data; +}; + +/** + * iio_channel_get() - get description of all that is needed to access channel. + * @dev: Pointer to consumer device. Device name must match + * the name of the device as provided in the iio_map + * with which the desired provider to consumer mapping + * was registered. + * @consumer_channel: Unique name to identify the channel on the consumer + * side. This typically describes the channels use within + * the consumer. E.g. 'battery_voltage' + */ +struct iio_channel *iio_channel_get(struct device *dev, + const char *consumer_channel); + +/** + * iio_channel_release() - release channels obtained via iio_channel_get + * @chan: The channel to be released. + */ +void iio_channel_release(struct iio_channel *chan); + +/** + * devm_iio_channel_get() - Resource managed version of iio_channel_get(). + * @dev: Pointer to consumer device. Device name must match + * the name of the device as provided in the iio_map + * with which the desired provider to consumer mapping + * was registered. + * @consumer_channel: Unique name to identify the channel on the consumer + * side. This typically describes the channels use within + * the consumer. E.g. 'battery_voltage' + * + * Returns a pointer to negative errno if it is not able to get the iio channel + * otherwise returns valid pointer for iio channel. + * + * The allocated iio channel is automatically released when the device is + * unbound. + */ +struct iio_channel *devm_iio_channel_get(struct device *dev, + const char *consumer_channel); +/** + * devm_iio_channel_release() - Resource managed version of + * iio_channel_release(). + * @dev: Pointer to consumer device for which resource + * is allocared. + * @chan: The channel to be released. + */ +void devm_iio_channel_release(struct device *dev, struct iio_channel *chan); + +/** + * iio_channel_get_all() - get all channels associated with a client + * @dev: Pointer to consumer device. + * + * Returns an array of iio_channel structures terminated with one with + * null iio_dev pointer. + * This function is used by fairly generic consumers to get all the + * channels registered as having this consumer. + */ +struct iio_channel *iio_channel_get_all(struct device *dev); + +/** + * iio_channel_release_all() - reverse iio_channel_get_all + * @chan: Array of channels to be released. + */ +void iio_channel_release_all(struct iio_channel *chan); + +/** + * devm_iio_channel_get_all() - Resource managed version of + * iio_channel_get_all(). + * @dev: Pointer to consumer device. + * + * Returns a pointer to negative errno if it is not able to get the iio channel + * otherwise returns an array of iio_channel structures terminated with one with + * null iio_dev pointer. + * + * This function is used by fairly generic consumers to get all the + * channels registered as having this consumer. + * + * The allocated iio channels are automatically released when the device is + * unbounded. + */ +struct iio_channel *devm_iio_channel_get_all(struct device *dev); + +/** + * devm_iio_channel_release_all() - Resource managed version of + * iio_channel_release_all(). + * @dev: Pointer to consumer device for which resource + * is allocared. + * @chan: Array channel to be released. + */ +void devm_iio_channel_release_all(struct device *dev, struct iio_channel *chan); + +struct iio_cb_buffer; +/** + * iio_channel_get_all_cb() - register callback for triggered capture + * @dev: Pointer to client device. + * @cb: Callback function. + * @private: Private data passed to callback. + * + * NB right now we have no ability to mux data from multiple devices. + * So if the channels requested come from different devices this will + * fail. + */ +struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev, + int (*cb)(const void *data, + void *private), + void *private); +/** + * iio_channel_cb_set_buffer_watermark() - set the buffer watermark. + * @cb_buffer: The callback buffer from whom we want the channel + * information. + * @watermark: buffer watermark in bytes. + * + * This function allows to configure the buffer watermark. + */ +int iio_channel_cb_set_buffer_watermark(struct iio_cb_buffer *cb_buffer, + size_t watermark); + +/** + * iio_channel_release_all_cb() - release and unregister the callback. + * @cb_buffer: The callback buffer that was allocated. + */ +void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buffer); + +/** + * iio_channel_start_all_cb() - start the flow of data through callback. + * @cb_buff: The callback buffer we are starting. + */ +int iio_channel_start_all_cb(struct iio_cb_buffer *cb_buff); + +/** + * iio_channel_stop_all_cb() - stop the flow of data through the callback. + * @cb_buff: The callback buffer we are stopping. + */ +void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff); + +/** + * iio_channel_cb_get_channels() - get access to the underlying channels. + * @cb_buffer: The callback buffer from whom we want the channel + * information. + * + * This function allows one to obtain information about the channels. + * Whilst this may allow direct reading if all buffers are disabled, the + * primary aim is to allow drivers that are consuming a channel to query + * things like scaling of the channel. + */ +struct iio_channel +*iio_channel_cb_get_channels(const struct iio_cb_buffer *cb_buffer); + +/** + * iio_channel_cb_get_iio_dev() - get access to the underlying device. + * @cb_buffer: The callback buffer from whom we want the device + * information. + * + * This function allows one to obtain information about the device. + * The primary aim is to allow drivers that are consuming a device to query + * things like current trigger. + */ +struct iio_dev +*iio_channel_cb_get_iio_dev(const struct iio_cb_buffer *cb_buffer); + +/** + * iio_read_channel_raw() - read from a given channel + * @chan: The channel being queried. + * @val: Value read back. + * + * Note raw reads from iio channels are in adc counts and hence + * scale will need to be applied if standard units required. + */ +int iio_read_channel_raw(struct iio_channel *chan, + int *val); + +/** + * iio_read_channel_average_raw() - read from a given channel + * @chan: The channel being queried. + * @val: Value read back. + * + * Note raw reads from iio channels are in adc counts and hence + * scale will need to be applied if standard units required. + * + * In opposit to the normal iio_read_channel_raw this function + * returns the average of multiple reads. + */ +int iio_read_channel_average_raw(struct iio_channel *chan, int *val); + +/** + * iio_read_channel_processed() - read processed value from a given channel + * @chan: The channel being queried. + * @val: Value read back. + * + * Returns an error code or 0. + * + * This function will read a processed value from a channel. A processed value + * means that this value will have the correct unit and not some device internal + * representation. If the device does not support reporting a processed value + * the function will query the raw value and the channels scale and offset and + * do the appropriate transformation. + */ +int iio_read_channel_processed(struct iio_channel *chan, int *val); + +/** + * iio_write_channel_attribute() - Write values to the device attribute. + * @chan: The channel being queried. + * @val: Value being written. + * @val2: Value being written.val2 use depends on attribute type. + * @attribute: info attribute to be read. + * + * Returns an error code or 0. + */ +int iio_write_channel_attribute(struct iio_channel *chan, int val, + int val2, enum iio_chan_info_enum attribute); + +/** + * iio_read_channel_attribute() - Read values from the device attribute. + * @chan: The channel being queried. + * @val: Value being written. + * @val2: Value being written.Val2 use depends on attribute type. + * @attribute: info attribute to be written. + * + * Returns an error code if failed. Else returns a description of what is in val + * and val2, such as IIO_VAL_INT_PLUS_MICRO telling us we have a value of val + * + val2/1e6 + */ +int iio_read_channel_attribute(struct iio_channel *chan, int *val, + int *val2, enum iio_chan_info_enum attribute); + +/** + * iio_write_channel_raw() - write to a given channel + * @chan: The channel being queried. + * @val: Value being written. + * + * Note raw writes to iio channels are in dac counts and hence + * scale will need to be applied if standard units required. + */ +int iio_write_channel_raw(struct iio_channel *chan, int val); + +/** + * iio_read_max_channel_raw() - read maximum available raw value from a given + * channel, i.e. the maximum possible value. + * @chan: The channel being queried. + * @val: Value read back. + * + * Note raw reads from iio channels are in adc counts and hence + * scale will need to be applied if standard units are required. + */ +int iio_read_max_channel_raw(struct iio_channel *chan, int *val); + +/** + * iio_read_avail_channel_raw() - read available raw values from a given channel + * @chan: The channel being queried. + * @vals: Available values read back. + * @length: Number of entries in vals. + * + * Returns an error code, IIO_AVAIL_RANGE or IIO_AVAIL_LIST. + * + * For ranges, three vals are always returned; min, step and max. + * For lists, all the possible values are enumerated. + * + * Note raw available values from iio channels are in adc counts and + * hence scale will need to be applied if standard units are required. + */ +int iio_read_avail_channel_raw(struct iio_channel *chan, + const int **vals, int *length); + +/** + * iio_get_channel_type() - get the type of a channel + * @channel: The channel being queried. + * @type: The type of the channel. + * + * returns the enum iio_chan_type of the channel + */ +int iio_get_channel_type(struct iio_channel *channel, + enum iio_chan_type *type); + +/** + * iio_read_channel_offset() - read the offset value for a channel + * @chan: The channel being queried. + * @val: First part of value read back. + * @val2: Second part of value read back. + * + * Note returns a description of what is in val and val2, such + * as IIO_VAL_INT_PLUS_MICRO telling us we have a value of val + * + val2/1e6 + */ +int iio_read_channel_offset(struct iio_channel *chan, int *val, + int *val2); + +/** + * iio_read_channel_scale() - read the scale value for a channel + * @chan: The channel being queried. + * @val: First part of value read back. + * @val2: Second part of value read back. + * + * Note returns a description of what is in val and val2, such + * as IIO_VAL_INT_PLUS_MICRO telling us we have a value of val + * + val2/1e6 + */ +int iio_read_channel_scale(struct iio_channel *chan, int *val, + int *val2); + +/** + * iio_convert_raw_to_processed() - Converts a raw value to a processed value + * @chan: The channel being queried + * @raw: The raw IIO to convert + * @processed: The result of the conversion + * @scale: Scale factor to apply during the conversion + * + * Returns an error code or 0. + * + * This function converts a raw value to processed value for a specific channel. + * A raw value is the device internal representation of a sample and the value + * returned by iio_read_channel_raw, so the unit of that value is device + * depended. A processed value on the other hand is value has a normed unit + * according with the IIO specification. + * + * The scale factor allows to increase the precession of the returned value. For + * a scale factor of 1 the function will return the result in the normal IIO + * unit for the channel type. E.g. millivolt for voltage channels, if you want + * nanovolts instead pass 1000000 as the scale factor. + */ +int iio_convert_raw_to_processed(struct iio_channel *chan, int raw, + int *processed, unsigned int scale); + +/** + * iio_get_channel_ext_info_count() - get number of ext_info attributes + * connected to the channel. + * @chan: The channel being queried + * + * Returns the number of ext_info attributes + */ +unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan); + +/** + * iio_read_channel_ext_info() - read ext_info attribute from a given channel + * @chan: The channel being queried. + * @attr: The ext_info attribute to read. + * @buf: Where to store the attribute value. Assumed to hold + * at least PAGE_SIZE bytes. + * + * Returns the number of bytes written to buf (perhaps w/o zero termination; + * it need not even be a string), or an error code. + */ +ssize_t iio_read_channel_ext_info(struct iio_channel *chan, + const char *attr, char *buf); + +/** + * iio_write_channel_ext_info() - write ext_info attribute from a given channel + * @chan: The channel being queried. + * @attr: The ext_info attribute to read. + * @buf: The new attribute value. Strings needs to be zero- + * terminated, but the terminator should not be included + * in the below len. + * @len: The size of the new attribute value. + * + * Returns the number of accepted bytes, which should be the same as len. + * An error code can also be returned. + */ +ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr, + const char *buf, size_t len); + +#endif diff --git a/include/linux/iio/dac/ad5421.h b/include/linux/iio/dac/ad5421.h new file mode 100644 index 000000000..d8ee9a7f8 --- /dev/null +++ b/include/linux/iio/dac/ad5421.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __IIO_DAC_AD5421_H__ +#define __IIO_DAC_AD5421_H__ + +/** + * enum ad5421_current_range - Current range the AD5421 is configured for. + * @AD5421_CURRENT_RANGE_4mA_20mA: 4 mA to 20 mA (RANGE1,0 pins = 00) + * @AD5421_CURRENT_RANGE_3mA8_21mA: 3.8 mA to 21 mA (RANGE1,0 pins = x1) + * @AD5421_CURRENT_RANGE_3mA2_24mA: 3.2 mA to 24 mA (RANGE1,0 pins = 10) + */ + +enum ad5421_current_range { + AD5421_CURRENT_RANGE_4mA_20mA, + AD5421_CURRENT_RANGE_3mA8_21mA, + AD5421_CURRENT_RANGE_3mA2_24mA, +}; + +/** + * struct ad5421_platform_data - AD5421 DAC driver platform data + * @external_vref: whether an external reference voltage is used or not + * @current_range: Current range the AD5421 is configured for + */ + +struct ad5421_platform_data { + bool external_vref; + enum ad5421_current_range current_range; +}; + +#endif diff --git a/include/linux/iio/dac/ad5504.h b/include/linux/iio/dac/ad5504.h new file mode 100644 index 000000000..43895376a --- /dev/null +++ b/include/linux/iio/dac/ad5504.h @@ -0,0 +1,16 @@ +/* + * AD5504 SPI DAC driver + * + * Copyright 2011 Analog Devices Inc. + * + * Licensed under the GPL-2. + */ + +#ifndef SPI_AD5504_H_ +#define SPI_AD5504_H_ + +struct ad5504_platform_data { + u16 vref_mv; +}; + +#endif /* SPI_AD5504_H_ */ diff --git a/include/linux/iio/dac/ad5791.h b/include/linux/iio/dac/ad5791.h new file mode 100644 index 000000000..45ee281c6 --- /dev/null +++ b/include/linux/iio/dac/ad5791.h @@ -0,0 +1,25 @@ +/* + * AD5791 SPI DAC driver + * + * Copyright 2011 Analog Devices Inc. + * + * Licensed under the GPL-2. + */ + +#ifndef SPI_AD5791_H_ +#define SPI_AD5791_H_ + +/** + * struct ad5791_platform_data - platform specific information + * @vref_pos_mv: Vdd Positive Analog Supply Volatge (mV) + * @vref_neg_mv: Vdd Negative Analog Supply Volatge (mV) + * @use_rbuf_gain2: ext. amplifier connected in gain of two configuration + */ + +struct ad5791_platform_data { + u16 vref_pos_mv; + u16 vref_neg_mv; + bool use_rbuf_gain2; +}; + +#endif /* SPI_AD5791_H_ */ diff --git a/include/linux/iio/dac/max517.h b/include/linux/iio/dac/max517.h new file mode 100644 index 000000000..7668716cd --- /dev/null +++ b/include/linux/iio/dac/max517.h @@ -0,0 +1,15 @@ +/* + * MAX517 DAC driver + * + * Copyright 2011 Roland Stigge + * + * Licensed under the GPL-2 or later. + */ +#ifndef IIO_DAC_MAX517_H_ +#define IIO_DAC_MAX517_H_ + +struct max517_platform_data { + u16 vref_mv[8]; +}; + +#endif /* IIO_DAC_MAX517_H_ */ diff --git a/include/linux/iio/dac/mcp4725.h b/include/linux/iio/dac/mcp4725.h new file mode 100644 index 000000000..628b2cf54 --- /dev/null +++ b/include/linux/iio/dac/mcp4725.h @@ -0,0 +1,26 @@ +/* + * MCP4725 DAC driver + * + * Copyright (C) 2012 Peter Meerwald + * + * Licensed under the GPL-2 or later. + */ + +#ifndef IIO_DAC_MCP4725_H_ +#define IIO_DAC_MCP4725_H_ + +/** + * struct mcp4725_platform_data - MCP4725/6 DAC specific data. + * @use_vref: Whether an external reference voltage on Vref pin should be used. + * Additional vref-supply must be specified when used. + * @vref_buffered: Controls buffering of the external reference voltage. + * + * Vref related settings are available only on MCP4756. See + * Documentation/devicetree/bindings/iio/dac/mcp4725.txt for more information. + */ +struct mcp4725_platform_data { + bool use_vref; + bool vref_buffered; +}; + +#endif /* IIO_DAC_MCP4725_H_ */ diff --git a/include/linux/iio/driver.h b/include/linux/iio/driver.h new file mode 100644 index 000000000..7dfb10ee2 --- /dev/null +++ b/include/linux/iio/driver.h @@ -0,0 +1,31 @@ +/* + * Industrial I/O in kernel access map interface. + * + * Copyright (c) 2011 Jonathan Cameron + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#ifndef _IIO_INKERN_H_ +#define _IIO_INKERN_H_ + +struct iio_map; + +/** + * iio_map_array_register() - tell the core about inkernel consumers + * @indio_dev: provider device + * @map: array of mappings specifying association of channel with client + */ +int iio_map_array_register(struct iio_dev *indio_dev, + struct iio_map *map); + +/** + * iio_map_array_unregister() - tell the core to remove consumer mappings for + * the given provider device + * @indio_dev: provider device + */ +int iio_map_array_unregister(struct iio_dev *indio_dev); + +#endif diff --git a/include/linux/iio/events.h b/include/linux/iio/events.h new file mode 100644 index 000000000..8ad87d1c5 --- /dev/null +++ b/include/linux/iio/events.h @@ -0,0 +1,59 @@ +/* The industrial I/O - event passing to userspace + * + * Copyright (c) 2008-2011 Jonathan Cameron + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ +#ifndef _IIO_EVENTS_H_ +#define _IIO_EVENTS_H_ + +#include +#include + +/** + * IIO_EVENT_CODE() - create event identifier + * @chan_type: Type of the channel. Should be one of enum iio_chan_type. + * @diff: Whether the event is for an differential channel or not. + * @modifier: Modifier for the channel. Should be one of enum iio_modifier. + * @direction: Direction of the event. One of enum iio_event_direction. + * @type: Type of the event. Should be one of enum iio_event_type. + * @chan: Channel number for non-differential channels. + * @chan1: First channel number for differential channels. + * @chan2: Second channel number for differential channels. + */ + +#define IIO_EVENT_CODE(chan_type, diff, modifier, direction, \ + type, chan, chan1, chan2) \ + (((u64)type << 56) | ((u64)diff << 55) | \ + ((u64)direction << 48) | ((u64)modifier << 40) | \ + ((u64)chan_type << 32) | (((u16)chan2) << 16) | ((u16)chan1) | \ + ((u16)chan)) + + +/** + * IIO_MOD_EVENT_CODE() - create event identifier for modified channels + * @chan_type: Type of the channel. Should be one of enum iio_chan_type. + * @number: Channel number. + * @modifier: Modifier for the channel. Should be one of enum iio_modifier. + * @type: Type of the event. Should be one of enum iio_event_type. + * @direction: Direction of the event. One of enum iio_event_direction. + */ + +#define IIO_MOD_EVENT_CODE(chan_type, number, modifier, \ + type, direction) \ + IIO_EVENT_CODE(chan_type, 0, modifier, direction, type, number, 0, 0) + +/** + * IIO_UNMOD_EVENT_CODE() - create event identifier for unmodified channels + * @chan_type: Type of the channel. Should be one of enum iio_chan_type. + * @number: Channel number. + * @type: Type of the event. Should be one of enum iio_event_type. + * @direction: Direction of the event. One of enum iio_event_direction. + */ + +#define IIO_UNMOD_EVENT_CODE(chan_type, number, type, direction) \ + IIO_EVENT_CODE(chan_type, 0, 0, direction, type, number, 0, 0) + +#endif diff --git a/include/linux/iio/frequency/ad9523.h b/include/linux/iio/frequency/ad9523.h new file mode 100644 index 000000000..12ce3ee42 --- /dev/null +++ b/include/linux/iio/frequency/ad9523.h @@ -0,0 +1,195 @@ +/* + * AD9523 SPI Low Jitter Clock Generator + * + * Copyright 2012 Analog Devices Inc. + * + * Licensed under the GPL-2. + */ + +#ifndef IIO_FREQUENCY_AD9523_H_ +#define IIO_FREQUENCY_AD9523_H_ + +enum outp_drv_mode { + TRISTATE, + LVPECL_8mA, + LVDS_4mA, + LVDS_7mA, + HSTL0_16mA, + HSTL1_8mA, + CMOS_CONF1, + CMOS_CONF2, + CMOS_CONF3, + CMOS_CONF4, + CMOS_CONF5, + CMOS_CONF6, + CMOS_CONF7, + CMOS_CONF8, + CMOS_CONF9 +}; + +enum ref_sel_mode { + NONEREVERTIVE_STAY_ON_REFB, + REVERT_TO_REFA, + SELECT_REFA, + SELECT_REFB, + EXT_REF_SEL +}; + +/** + * struct ad9523_channel_spec - Output channel configuration + * + * @channel_num: Output channel number. + * @divider_output_invert_en: Invert the polarity of the output clock. + * @sync_ignore_en: Ignore chip-level SYNC signal. + * @low_power_mode_en: Reduce power used in the differential output modes. + * @use_alt_clock_src: Channel divider uses alternative clk source. + * @output_dis: Disables, powers down the entire channel. + * @driver_mode: Output driver mode (logic level family). + * @divider_phase: Divider initial phase after a SYNC. Range 0..63 + LSB = 1/2 of a period of the divider input clock. + * @channel_divider: 10-bit channel divider. + * @extended_name: Optional descriptive channel name. + */ + +struct ad9523_channel_spec { + unsigned channel_num; + bool divider_output_invert_en; + bool sync_ignore_en; + bool low_power_mode_en; + /* CH0..CH3 VCXO, CH4..CH9 VCO2 */ + bool use_alt_clock_src; + bool output_dis; + enum outp_drv_mode driver_mode; + unsigned char divider_phase; + unsigned short channel_divider; + char extended_name[16]; +}; + +enum pll1_rzero_resistor { + RZERO_883_OHM, + RZERO_677_OHM, + RZERO_341_OHM, + RZERO_135_OHM, + RZERO_10_OHM, + RZERO_USE_EXT_RES = 8, +}; + +enum rpole2_resistor { + RPOLE2_900_OHM, + RPOLE2_450_OHM, + RPOLE2_300_OHM, + RPOLE2_225_OHM, +}; + +enum rzero_resistor { + RZERO_3250_OHM, + RZERO_2750_OHM, + RZERO_2250_OHM, + RZERO_2100_OHM, + RZERO_3000_OHM, + RZERO_2500_OHM, + RZERO_2000_OHM, + RZERO_1850_OHM, +}; + +enum cpole1_capacitor { + CPOLE1_0_PF, + CPOLE1_8_PF, + CPOLE1_16_PF, + CPOLE1_24_PF, + _CPOLE1_24_PF, /* place holder */ + CPOLE1_32_PF, + CPOLE1_40_PF, + CPOLE1_48_PF, +}; + +/** + * struct ad9523_platform_data - platform specific information + * + * @vcxo_freq: External VCXO frequency in Hz + * @refa_diff_rcv_en: REFA differential/single-ended input selection. + * @refb_diff_rcv_en: REFB differential/single-ended input selection. + * @zd_in_diff_en: Zero Delay differential/single-ended input selection. + * @osc_in_diff_en: OSC differential/ single-ended input selection. + * @refa_cmos_neg_inp_en: REFA single-ended neg./pos. input enable. + * @refb_cmos_neg_inp_en: REFB single-ended neg./pos. input enable. + * @zd_in_cmos_neg_inp_en: Zero Delay single-ended neg./pos. input enable. + * @osc_in_cmos_neg_inp_en: OSC single-ended neg./pos. input enable. + * @refa_r_div: PLL1 10-bit REFA R divider. + * @refb_r_div: PLL1 10-bit REFB R divider. + * @pll1_feedback_div: PLL1 10-bit Feedback N divider. + * @pll1_charge_pump_current_nA: Magnitude of PLL1 charge pump current (nA). + * @zero_delay_mode_internal_en: Internal, external Zero Delay mode selection. + * @osc_in_feedback_en: PLL1 feedback path, local feedback from + * the OSC_IN receiver or zero delay mode + * @pll1_loop_filter_rzero: PLL1 Loop Filter Zero Resistor selection. + * @ref_mode: Reference selection mode. + * @pll2_charge_pump_current_nA: Magnitude of PLL2 charge pump current (nA). + * @pll2_ndiv_a_cnt: PLL2 Feedback N-divider, A Counter, range 0..4. + * @pll2_ndiv_b_cnt: PLL2 Feedback N-divider, B Counter, range 0..63. + * @pll2_freq_doubler_en: PLL2 frequency doubler enable. + * @pll2_r2_div: PLL2 R2 divider, range 0..31. + * @pll2_vco_diff_m1: VCO1 divider, range 3..5. + * @pll2_vco_diff_m2: VCO2 divider, range 3..5. + * @rpole2: PLL2 loop filter Rpole resistor value. + * @rzero: PLL2 loop filter Rzero resistor value. + * @cpole1: PLL2 loop filter Cpole capacitor value. + * @rzero_bypass_en: PLL2 loop filter Rzero bypass enable. + * @num_channels: Array size of struct ad9523_channel_spec. + * @channels: Pointer to channel array. + * @name: Optional alternative iio device name. + */ + +struct ad9523_platform_data { + unsigned long vcxo_freq; + + /* Differential/ Single-Ended Input Configuration */ + bool refa_diff_rcv_en; + bool refb_diff_rcv_en; + bool zd_in_diff_en; + bool osc_in_diff_en; + + /* + * Valid if differential input disabled + * if false defaults to pos input + */ + bool refa_cmos_neg_inp_en; + bool refb_cmos_neg_inp_en; + bool zd_in_cmos_neg_inp_en; + bool osc_in_cmos_neg_inp_en; + + /* PLL1 Setting */ + unsigned short refa_r_div; + unsigned short refb_r_div; + unsigned short pll1_feedback_div; + unsigned short pll1_charge_pump_current_nA; + bool zero_delay_mode_internal_en; + bool osc_in_feedback_en; + enum pll1_rzero_resistor pll1_loop_filter_rzero; + + /* Reference */ + enum ref_sel_mode ref_mode; + + /* PLL2 Setting */ + unsigned int pll2_charge_pump_current_nA; + unsigned char pll2_ndiv_a_cnt; + unsigned char pll2_ndiv_b_cnt; + bool pll2_freq_doubler_en; + unsigned char pll2_r2_div; + unsigned char pll2_vco_diff_m1; /* 3..5 */ + unsigned char pll2_vco_diff_m2; /* 3..5 */ + + /* Loop Filter PLL2 */ + enum rpole2_resistor rpole2; + enum rzero_resistor rzero; + enum cpole1_capacitor cpole1; + bool rzero_bypass_en; + + /* Output Channel Configuration */ + int num_channels; + struct ad9523_channel_spec *channels; + + char name[SPI_NAME_SIZE]; +}; + +#endif /* IIO_FREQUENCY_AD9523_H_ */ diff --git a/include/linux/iio/frequency/adf4350.h b/include/linux/iio/frequency/adf4350.h new file mode 100644 index 000000000..ffd8c8f90 --- /dev/null +++ b/include/linux/iio/frequency/adf4350.h @@ -0,0 +1,128 @@ +/* + * ADF4350/ADF4351 SPI PLL driver + * + * Copyright 2012-2013 Analog Devices Inc. + * + * Licensed under the GPL-2. + */ + +#ifndef IIO_PLL_ADF4350_H_ +#define IIO_PLL_ADF4350_H_ + +/* Registers */ +#define ADF4350_REG0 0 +#define ADF4350_REG1 1 +#define ADF4350_REG2 2 +#define ADF4350_REG3 3 +#define ADF4350_REG4 4 +#define ADF4350_REG5 5 + +/* REG0 Bit Definitions */ +#define ADF4350_REG0_FRACT(x) (((x) & 0xFFF) << 3) +#define ADF4350_REG0_INT(x) (((x) & 0xFFFF) << 15) + +/* REG1 Bit Definitions */ +#define ADF4350_REG1_MOD(x) (((x) & 0xFFF) << 3) +#define ADF4350_REG1_PHASE(x) (((x) & 0xFFF) << 15) +#define ADF4350_REG1_PRESCALER (1 << 27) + +/* REG2 Bit Definitions */ +#define ADF4350_REG2_COUNTER_RESET_EN (1 << 3) +#define ADF4350_REG2_CP_THREESTATE_EN (1 << 4) +#define ADF4350_REG2_POWER_DOWN_EN (1 << 5) +#define ADF4350_REG2_PD_POLARITY_POS (1 << 6) +#define ADF4350_REG2_LDP_6ns (1 << 7) +#define ADF4350_REG2_LDP_10ns (0 << 7) +#define ADF4350_REG2_LDF_FRACT_N (0 << 8) +#define ADF4350_REG2_LDF_INT_N (1 << 8) +#define ADF4350_REG2_CHARGE_PUMP_CURR_uA(x) (((((x)-312) / 312) & 0xF) << 9) +#define ADF4350_REG2_DOUBLE_BUFF_EN (1 << 13) +#define ADF4350_REG2_10BIT_R_CNT(x) ((x) << 14) +#define ADF4350_REG2_RDIV2_EN (1 << 24) +#define ADF4350_REG2_RMULT2_EN (1 << 25) +#define ADF4350_REG2_MUXOUT(x) ((x) << 26) +#define ADF4350_REG2_NOISE_MODE(x) (((unsigned)(x)) << 29) +#define ADF4350_MUXOUT_THREESTATE 0 +#define ADF4350_MUXOUT_DVDD 1 +#define ADF4350_MUXOUT_GND 2 +#define ADF4350_MUXOUT_R_DIV_OUT 3 +#define ADF4350_MUXOUT_N_DIV_OUT 4 +#define ADF4350_MUXOUT_ANALOG_LOCK_DETECT 5 +#define ADF4350_MUXOUT_DIGITAL_LOCK_DETECT 6 + +/* REG3 Bit Definitions */ +#define ADF4350_REG3_12BIT_CLKDIV(x) ((x) << 3) +#define ADF4350_REG3_12BIT_CLKDIV_MODE(x) ((x) << 16) +#define ADF4350_REG3_12BIT_CSR_EN (1 << 18) +#define ADF4351_REG3_CHARGE_CANCELLATION_EN (1 << 21) +#define ADF4351_REG3_ANTI_BACKLASH_3ns_EN (1 << 22) +#define ADF4351_REG3_BAND_SEL_CLOCK_MODE_HIGH (1 << 23) + +/* REG4 Bit Definitions */ +#define ADF4350_REG4_OUTPUT_PWR(x) ((x) << 3) +#define ADF4350_REG4_RF_OUT_EN (1 << 5) +#define ADF4350_REG4_AUX_OUTPUT_PWR(x) ((x) << 6) +#define ADF4350_REG4_AUX_OUTPUT_EN (1 << 8) +#define ADF4350_REG4_AUX_OUTPUT_FUND (1 << 9) +#define ADF4350_REG4_AUX_OUTPUT_DIV (0 << 9) +#define ADF4350_REG4_MUTE_TILL_LOCK_EN (1 << 10) +#define ADF4350_REG4_VCO_PWRDOWN_EN (1 << 11) +#define ADF4350_REG4_8BIT_BAND_SEL_CLKDIV(x) ((x) << 12) +#define ADF4350_REG4_RF_DIV_SEL(x) ((x) << 20) +#define ADF4350_REG4_FEEDBACK_DIVIDED (0 << 23) +#define ADF4350_REG4_FEEDBACK_FUND (1 << 23) + +/* REG5 Bit Definitions */ +#define ADF4350_REG5_LD_PIN_MODE_LOW (0 << 22) +#define ADF4350_REG5_LD_PIN_MODE_DIGITAL (1 << 22) +#define ADF4350_REG5_LD_PIN_MODE_HIGH (3 << 22) + +/* Specifications */ +#define ADF4350_MAX_OUT_FREQ 4400000000ULL /* Hz */ +#define ADF4350_MIN_OUT_FREQ 137500000 /* Hz */ +#define ADF4351_MIN_OUT_FREQ 34375000 /* Hz */ +#define ADF4350_MIN_VCO_FREQ 2200000000ULL /* Hz */ +#define ADF4350_MAX_FREQ_45_PRESC 3000000000ULL /* Hz */ +#define ADF4350_MAX_FREQ_PFD 32000000 /* Hz */ +#define ADF4350_MAX_BANDSEL_CLK 125000 /* Hz */ +#define ADF4350_MAX_FREQ_REFIN 250000000 /* Hz */ +#define ADF4350_MAX_MODULUS 4095 +#define ADF4350_MAX_R_CNT 1023 + + +/** + * struct adf4350_platform_data - platform specific information + * @name: Optional device name. + * @clkin: REFin frequency in Hz. + * @channel_spacing: Channel spacing in Hz (influences MODULUS). + * @power_up_frequency: Optional, If set in Hz the PLL tunes to the desired + * frequency on probe. + * @ref_div_factor: Optional, if set the driver skips dynamic calculation + * and uses this default value instead. + * @ref_doubler_en: Enables reference doubler. + * @ref_div2_en: Enables reference divider. + * @r2_user_settings: User defined settings for ADF4350/1 REGISTER_2. + * @r3_user_settings: User defined settings for ADF4350/1 REGISTER_3. + * @r4_user_settings: User defined settings for ADF4350/1 REGISTER_4. + * @gpio_lock_detect: Optional, if set with a valid GPIO number, + * pll lock state is tested upon read. + * If not used - set to -1. + */ + +struct adf4350_platform_data { + char name[32]; + unsigned long clkin; + unsigned long channel_spacing; + unsigned long long power_up_frequency; + + unsigned short ref_div_factor; /* 10-bit R counter */ + bool ref_doubler_en; + bool ref_div2_en; + + unsigned r2_user_settings; + unsigned r3_user_settings; + unsigned r4_user_settings; + int gpio_lock_detect; +}; + +#endif /* IIO_PLL_ADF4350_H_ */ diff --git a/include/linux/iio/gyro/itg3200.h b/include/linux/iio/gyro/itg3200.h new file mode 100644 index 000000000..2a820850f --- /dev/null +++ b/include/linux/iio/gyro/itg3200.h @@ -0,0 +1,154 @@ +/* + * itg3200.h -- support InvenSense ITG3200 + * Digital 3-Axis Gyroscope driver + * + * Copyright (c) 2011 Christian Strobel + * Copyright (c) 2011 Manuel Stahl + * Copyright (c) 2012 Thorsten Nowak + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef I2C_ITG3200_H_ +#define I2C_ITG3200_H_ + +#include + +/* Register with I2C address (34h) */ +#define ITG3200_REG_ADDRESS 0x00 + +/* Sample rate divider + * Range: 0 to 255 + * Default value: 0x00 */ +#define ITG3200_REG_SAMPLE_RATE_DIV 0x15 + +/* Digital low pass filter settings */ +#define ITG3200_REG_DLPF 0x16 +/* DLPF full scale range */ +#define ITG3200_DLPF_FS_SEL_2000 0x18 +/* Bandwidth (Hz) and internal sample rate + * (kHz) of DLPF */ +#define ITG3200_DLPF_256_8 0x00 +#define ITG3200_DLPF_188_1 0x01 +#define ITG3200_DLPF_98_1 0x02 +#define ITG3200_DLPF_42_1 0x03 +#define ITG3200_DLPF_20_1 0x04 +#define ITG3200_DLPF_10_1 0x05 +#define ITG3200_DLPF_5_1 0x06 + +#define ITG3200_DLPF_CFG_MASK 0x07 + +/* Configuration for interrupt operations */ +#define ITG3200_REG_IRQ_CONFIG 0x17 +/* Logic level */ +#define ITG3200_IRQ_ACTIVE_LOW 0x80 +#define ITG3200_IRQ_ACTIVE_HIGH 0x00 +/* Drive type */ +#define ITG3200_IRQ_OPEN_DRAIN 0x40 +#define ITG3200_IRQ_PUSH_PULL 0x00 +/* Latch mode */ +#define ITG3200_IRQ_LATCH_UNTIL_CLEARED 0x20 +#define ITG3200_IRQ_LATCH_50US_PULSE 0x00 +/* Latch clear method */ +#define ITG3200_IRQ_LATCH_CLEAR_ANY 0x10 +#define ITG3200_IRQ_LATCH_CLEAR_STATUS 0x00 +/* Enable interrupt when device is ready */ +#define ITG3200_IRQ_DEVICE_RDY_ENABLE 0x04 +/* Enable interrupt when data is available */ +#define ITG3200_IRQ_DATA_RDY_ENABLE 0x01 + +/* Determine the status of ITG-3200 interrupts */ +#define ITG3200_REG_IRQ_STATUS 0x1A +/* Status of 'device is ready'-interrupt */ +#define ITG3200_IRQ_DEVICE_RDY_STATUS 0x04 +/* Status of 'data is available'-interrupt */ +#define ITG3200_IRQ_DATA_RDY_STATUS 0x01 + +/* Sensor registers */ +#define ITG3200_REG_TEMP_OUT_H 0x1B +#define ITG3200_REG_TEMP_OUT_L 0x1C +#define ITG3200_REG_GYRO_XOUT_H 0x1D +#define ITG3200_REG_GYRO_XOUT_L 0x1E +#define ITG3200_REG_GYRO_YOUT_H 0x1F +#define ITG3200_REG_GYRO_YOUT_L 0x20 +#define ITG3200_REG_GYRO_ZOUT_H 0x21 +#define ITG3200_REG_GYRO_ZOUT_L 0x22 + +/* Power management */ +#define ITG3200_REG_POWER_MANAGEMENT 0x3E +/* Reset device and internal registers to the + * power-up-default settings */ +#define ITG3200_RESET 0x80 +/* Enable low power sleep mode */ +#define ITG3200_SLEEP 0x40 +/* Put according gyroscope in standby mode */ +#define ITG3200_STANDBY_GYRO_X 0x20 +#define ITG3200_STANDBY_GYRO_Y 0x10 +#define ITG3200_STANDBY_GYRO_Z 0x08 +/* Determine the device clock source */ +#define ITG3200_CLK_INTERNAL 0x00 +#define ITG3200_CLK_GYRO_X 0x01 +#define ITG3200_CLK_GYRO_Y 0x02 +#define ITG3200_CLK_GYRO_Z 0x03 +#define ITG3200_CLK_EXT_32K 0x04 +#define ITG3200_CLK_EXT_19M 0x05 + + +/** + * struct itg3200 - device instance specific data + * @i2c: actual i2c_client + * @trig: data ready trigger from itg3200 pin + **/ +struct itg3200 { + struct i2c_client *i2c; + struct iio_trigger *trig; +}; + +enum ITG3200_SCAN_INDEX { + ITG3200_SCAN_TEMP, + ITG3200_SCAN_GYRO_X, + ITG3200_SCAN_GYRO_Y, + ITG3200_SCAN_GYRO_Z, + ITG3200_SCAN_ELEMENTS, +}; + +int itg3200_write_reg_8(struct iio_dev *indio_dev, + u8 reg_address, u8 val); + +int itg3200_read_reg_8(struct iio_dev *indio_dev, + u8 reg_address, u8 *val); + + +#ifdef CONFIG_IIO_BUFFER + +void itg3200_remove_trigger(struct iio_dev *indio_dev); +int itg3200_probe_trigger(struct iio_dev *indio_dev); + +int itg3200_buffer_configure(struct iio_dev *indio_dev); +void itg3200_buffer_unconfigure(struct iio_dev *indio_dev); + +#else /* CONFIG_IIO_BUFFER */ + +static inline void itg3200_remove_trigger(struct iio_dev *indio_dev) +{ +} + +static inline int itg3200_probe_trigger(struct iio_dev *indio_dev) +{ + return 0; +} + +static inline int itg3200_buffer_configure(struct iio_dev *indio_dev) +{ + return 0; +} + +static inline void itg3200_buffer_unconfigure(struct iio_dev *indio_dev) +{ +} + +#endif /* CONFIG_IIO_BUFFER */ + +#endif /* ITG3200_H_ */ diff --git a/include/linux/iio/hw-consumer.h b/include/linux/iio/hw-consumer.h new file mode 100644 index 000000000..44d48bb1d --- /dev/null +++ b/include/linux/iio/hw-consumer.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Industrial I/O in kernel hardware consumer interface + * + * Copyright 2017 Analog Devices Inc. + * Author: Lars-Peter Clausen + */ + +#ifndef LINUX_IIO_HW_CONSUMER_H +#define LINUX_IIO_HW_CONSUMER_H + +struct iio_hw_consumer; + +struct iio_hw_consumer *iio_hw_consumer_alloc(struct device *dev); +void iio_hw_consumer_free(struct iio_hw_consumer *hwc); +struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev); +void devm_iio_hw_consumer_free(struct device *dev, struct iio_hw_consumer *hwc); +int iio_hw_consumer_enable(struct iio_hw_consumer *hwc); +void iio_hw_consumer_disable(struct iio_hw_consumer *hwc); + +#endif diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h new file mode 100644 index 000000000..136ce5154 --- /dev/null +++ b/include/linux/iio/iio.h @@ -0,0 +1,766 @@ + +/* The industrial I/O core + * + * Copyright (c) 2008 Jonathan Cameron + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ +#ifndef _INDUSTRIAL_IO_H_ +#define _INDUSTRIAL_IO_H_ + +#include +#include +#include +#include +/* IIO TODO LIST */ +/* + * Provide means of adjusting timer accuracy. + * Currently assumes nano seconds. + */ + +enum iio_shared_by { + IIO_SEPARATE, + IIO_SHARED_BY_TYPE, + IIO_SHARED_BY_DIR, + IIO_SHARED_BY_ALL +}; + +enum iio_endian { + IIO_CPU, + IIO_BE, + IIO_LE, +}; + +struct iio_chan_spec; +struct iio_dev; + +/** + * struct iio_chan_spec_ext_info - Extended channel info attribute + * @name: Info attribute name + * @shared: Whether this attribute is shared between all channels. + * @read: Read callback for this info attribute, may be NULL. + * @write: Write callback for this info attribute, may be NULL. + * @private: Data private to the driver. + */ +struct iio_chan_spec_ext_info { + const char *name; + enum iio_shared_by shared; + ssize_t (*read)(struct iio_dev *, uintptr_t private, + struct iio_chan_spec const *, char *buf); + ssize_t (*write)(struct iio_dev *, uintptr_t private, + struct iio_chan_spec const *, const char *buf, + size_t len); + uintptr_t private; +}; + +/** + * struct iio_enum - Enum channel info attribute + * @items: An array of strings. + * @num_items: Length of the item array. + * @set: Set callback function, may be NULL. + * @get: Get callback function, may be NULL. + * + * The iio_enum struct can be used to implement enum style channel attributes. + * Enum style attributes are those which have a set of strings which map to + * unsigned integer values. The IIO enum helper code takes care of mapping + * between value and string as well as generating a "_available" file which + * contains a list of all available items. The set callback will be called when + * the attribute is updated. The last parameter is the index to the newly + * activated item. The get callback will be used to query the currently active + * item and is supposed to return the index for it. + */ +struct iio_enum { + const char * const *items; + unsigned int num_items; + int (*set)(struct iio_dev *, const struct iio_chan_spec *, unsigned int); + int (*get)(struct iio_dev *, const struct iio_chan_spec *); +}; + +ssize_t iio_enum_available_read(struct iio_dev *indio_dev, + uintptr_t priv, const struct iio_chan_spec *chan, char *buf); +ssize_t iio_enum_read(struct iio_dev *indio_dev, + uintptr_t priv, const struct iio_chan_spec *chan, char *buf); +ssize_t iio_enum_write(struct iio_dev *indio_dev, + uintptr_t priv, const struct iio_chan_spec *chan, const char *buf, + size_t len); + +/** + * IIO_ENUM() - Initialize enum extended channel attribute + * @_name: Attribute name + * @_shared: Whether the attribute is shared between all channels + * @_e: Pointer to an iio_enum struct + * + * This should usually be used together with IIO_ENUM_AVAILABLE() + */ +#define IIO_ENUM(_name, _shared, _e) \ +{ \ + .name = (_name), \ + .shared = (_shared), \ + .read = iio_enum_read, \ + .write = iio_enum_write, \ + .private = (uintptr_t)(_e), \ +} + +/** + * IIO_ENUM_AVAILABLE() - Initialize enum available extended channel attribute + * @_name: Attribute name ("_available" will be appended to the name) + * @_e: Pointer to an iio_enum struct + * + * Creates a read only attribute which lists all the available enum items in a + * space separated list. This should usually be used together with IIO_ENUM() + */ +#define IIO_ENUM_AVAILABLE(_name, _e) \ +{ \ + .name = (_name "_available"), \ + .shared = IIO_SHARED_BY_TYPE, \ + .read = iio_enum_available_read, \ + .private = (uintptr_t)(_e), \ +} + +/** + * struct iio_mount_matrix - iio mounting matrix + * @rotation: 3 dimensional space rotation matrix defining sensor alignment with + * main hardware + */ +struct iio_mount_matrix { + const char *rotation[9]; +}; + +ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv, + const struct iio_chan_spec *chan, char *buf); +int of_iio_read_mount_matrix(const struct device *dev, const char *propname, + struct iio_mount_matrix *matrix); + +typedef const struct iio_mount_matrix * + (iio_get_mount_matrix_t)(const struct iio_dev *indio_dev, + const struct iio_chan_spec *chan); + +/** + * IIO_MOUNT_MATRIX() - Initialize mount matrix extended channel attribute + * @_shared: Whether the attribute is shared between all channels + * @_get: Pointer to an iio_get_mount_matrix_t accessor + */ +#define IIO_MOUNT_MATRIX(_shared, _get) \ +{ \ + .name = "mount_matrix", \ + .shared = (_shared), \ + .read = iio_show_mount_matrix, \ + .private = (uintptr_t)(_get), \ +} + +/** + * struct iio_event_spec - specification for a channel event + * @type: Type of the event + * @dir: Direction of the event + * @mask_separate: Bit mask of enum iio_event_info values. Attributes + * set in this mask will be registered per channel. + * @mask_shared_by_type: Bit mask of enum iio_event_info values. Attributes + * set in this mask will be shared by channel type. + * @mask_shared_by_dir: Bit mask of enum iio_event_info values. Attributes + * set in this mask will be shared by channel type and + * direction. + * @mask_shared_by_all: Bit mask of enum iio_event_info values. Attributes + * set in this mask will be shared by all channels. + */ +struct iio_event_spec { + enum iio_event_type type; + enum iio_event_direction dir; + unsigned long mask_separate; + unsigned long mask_shared_by_type; + unsigned long mask_shared_by_dir; + unsigned long mask_shared_by_all; +}; + +/** + * struct iio_chan_spec - specification of a single channel + * @type: What type of measurement is the channel making. + * @channel: What number do we wish to assign the channel. + * @channel2: If there is a second number for a differential + * channel then this is it. If modified is set then the + * value here specifies the modifier. + * @address: Driver specific identifier. + * @scan_index: Monotonic index to give ordering in scans when read + * from a buffer. + * @scan_type: struct describing the scan type + * @scan_type.sign: 's' or 'u' to specify signed or unsigned + * @scan_type.realbits: Number of valid bits of data + * @scan_type.storagebits: Realbits + padding + * @scan_type.shift: Shift right by this before masking out + * realbits. + * @scan_type.repeat: Number of times real/storage bits repeats. + * When the repeat element is more than 1, then + * the type element in sysfs will show a repeat + * value. Otherwise, the number of repetitions + * is omitted. + * @scan_type.endianness: little or big endian + * @info_mask_separate: What information is to be exported that is specific to + * this channel. + * @info_mask_separate_available: What availability information is to be + * exported that is specific to this channel. + * @info_mask_shared_by_type: What information is to be exported that is shared + * by all channels of the same type. + * @info_mask_shared_by_type_available: What availability information is to be + * exported that is shared by all channels of the same + * type. + * @info_mask_shared_by_dir: What information is to be exported that is shared + * by all channels of the same direction. + * @info_mask_shared_by_dir_available: What availability information is to be + * exported that is shared by all channels of the same + * direction. + * @info_mask_shared_by_all: What information is to be exported that is shared + * by all channels. + * @info_mask_shared_by_all_available: What availability information is to be + * exported that is shared by all channels. + * @event_spec: Array of events which should be registered for this + * channel. + * @num_event_specs: Size of the event_spec array. + * @ext_info: Array of extended info attributes for this channel. + * The array is NULL terminated, the last element should + * have its name field set to NULL. + * @extend_name: Allows labeling of channel attributes with an + * informative name. Note this has no effect codes etc, + * unlike modifiers. + * @datasheet_name: A name used in in-kernel mapping of channels. It should + * correspond to the first name that the channel is referred + * to by in the datasheet (e.g. IND), or the nearest + * possible compound name (e.g. IND-INC). + * @modified: Does a modifier apply to this channel. What these are + * depends on the channel type. Modifier is set in + * channel2. Examples are IIO_MOD_X for axial sensors about + * the 'x' axis. + * @indexed: Specify the channel has a numerical index. If not, + * the channel index number will be suppressed for sysfs + * attributes but not for event codes. + * @output: Channel is output. + * @differential: Channel is differential. + */ +struct iio_chan_spec { + enum iio_chan_type type; + int channel; + int channel2; + unsigned long address; + int scan_index; + struct { + char sign; + u8 realbits; + u8 storagebits; + u8 shift; + u8 repeat; + enum iio_endian endianness; + } scan_type; + long info_mask_separate; + long info_mask_separate_available; + long info_mask_shared_by_type; + long info_mask_shared_by_type_available; + long info_mask_shared_by_dir; + long info_mask_shared_by_dir_available; + long info_mask_shared_by_all; + long info_mask_shared_by_all_available; + const struct iio_event_spec *event_spec; + unsigned int num_event_specs; + const struct iio_chan_spec_ext_info *ext_info; + const char *extend_name; + const char *datasheet_name; + unsigned modified:1; + unsigned indexed:1; + unsigned output:1; + unsigned differential:1; +}; + + +/** + * iio_channel_has_info() - Checks whether a channel supports a info attribute + * @chan: The channel to be queried + * @type: Type of the info attribute to be checked + * + * Returns true if the channels supports reporting values for the given info + * attribute type, false otherwise. + */ +static inline bool iio_channel_has_info(const struct iio_chan_spec *chan, + enum iio_chan_info_enum type) +{ + return (chan->info_mask_separate & BIT(type)) | + (chan->info_mask_shared_by_type & BIT(type)) | + (chan->info_mask_shared_by_dir & BIT(type)) | + (chan->info_mask_shared_by_all & BIT(type)); +} + +/** + * iio_channel_has_available() - Checks if a channel has an available attribute + * @chan: The channel to be queried + * @type: Type of the available attribute to be checked + * + * Returns true if the channel supports reporting available values for the + * given attribute type, false otherwise. + */ +static inline bool iio_channel_has_available(const struct iio_chan_spec *chan, + enum iio_chan_info_enum type) +{ + return (chan->info_mask_separate_available & BIT(type)) | + (chan->info_mask_shared_by_type_available & BIT(type)) | + (chan->info_mask_shared_by_dir_available & BIT(type)) | + (chan->info_mask_shared_by_all_available & BIT(type)); +} + +#define IIO_CHAN_SOFT_TIMESTAMP(_si) { \ + .type = IIO_TIMESTAMP, \ + .channel = -1, \ + .scan_index = _si, \ + .scan_type = { \ + .sign = 's', \ + .realbits = 64, \ + .storagebits = 64, \ + }, \ +} + +s64 iio_get_time_ns(const struct iio_dev *indio_dev); +unsigned int iio_get_time_res(const struct iio_dev *indio_dev); + +/* Device operating modes */ +#define INDIO_DIRECT_MODE 0x01 +#define INDIO_BUFFER_TRIGGERED 0x02 +#define INDIO_BUFFER_SOFTWARE 0x04 +#define INDIO_BUFFER_HARDWARE 0x08 +#define INDIO_EVENT_TRIGGERED 0x10 +#define INDIO_HARDWARE_TRIGGERED 0x20 + +#define INDIO_ALL_BUFFER_MODES \ + (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE | INDIO_BUFFER_SOFTWARE) + +#define INDIO_ALL_TRIGGERED_MODES \ + (INDIO_BUFFER_TRIGGERED \ + | INDIO_EVENT_TRIGGERED \ + | INDIO_HARDWARE_TRIGGERED) + +#define INDIO_MAX_RAW_ELEMENTS 4 + +struct iio_trigger; /* forward declaration */ + +/** + * struct iio_info - constant information about device + * @event_attrs: event control attributes + * @attrs: general purpose device attributes + * @read_raw: function to request a value from the device. + * mask specifies which value. Note 0 means a reading of + * the channel in question. Return value will specify the + * type of value returned by the device. val and val2 will + * contain the elements making up the returned value. + * @read_raw_multi: function to return values from the device. + * mask specifies which value. Note 0 means a reading of + * the channel in question. Return value will specify the + * type of value returned by the device. vals pointer + * contain the elements making up the returned value. + * max_len specifies maximum number of elements + * vals pointer can contain. val_len is used to return + * length of valid elements in vals. + * @read_avail: function to return the available values from the device. + * mask specifies which value. Note 0 means the available + * values for the channel in question. Return value + * specifies if a IIO_AVAIL_LIST or a IIO_AVAIL_RANGE is + * returned in vals. The type of the vals are returned in + * type and the number of vals is returned in length. For + * ranges, there are always three vals returned; min, step + * and max. For lists, all possible values are enumerated. + * @write_raw: function to write a value to the device. + * Parameters are the same as for read_raw. + * @write_raw_get_fmt: callback function to query the expected + * format/precision. If not set by the driver, write_raw + * returns IIO_VAL_INT_PLUS_MICRO. + * @read_event_config: find out if the event is enabled. + * @write_event_config: set if the event is enabled. + * @read_event_value: read a configuration value associated with the event. + * @write_event_value: write a configuration value for the event. + * @validate_trigger: function to validate the trigger when the + * current trigger gets changed. + * @update_scan_mode: function to configure device and scan buffer when + * channels have changed + * @debugfs_reg_access: function to read or write register value of device + * @of_xlate: function pointer to obtain channel specifier index. + * When #iio-cells is greater than '0', the driver could + * provide a custom of_xlate function that reads the + * *args* and returns the appropriate index in registered + * IIO channels array. + * @hwfifo_set_watermark: function pointer to set the current hardware + * fifo watermark level; see hwfifo_* entries in + * Documentation/ABI/testing/sysfs-bus-iio for details on + * how the hardware fifo operates + * @hwfifo_flush_to_buffer: function pointer to flush the samples stored + * in the hardware fifo to the device buffer. The driver + * should not flush more than count samples. The function + * must return the number of samples flushed, 0 if no + * samples were flushed or a negative integer if no samples + * were flushed and there was an error. + **/ +struct iio_info { + const struct attribute_group *event_attrs; + const struct attribute_group *attrs; + + int (*read_raw)(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, + int *val2, + long mask); + + int (*read_raw_multi)(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int max_len, + int *vals, + int *val_len, + long mask); + + int (*read_avail)(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, + int *type, + int *length, + long mask); + + int (*write_raw)(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, + int val2, + long mask); + + int (*write_raw_get_fmt)(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + long mask); + + int (*read_event_config)(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir); + + int (*write_event_config)(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + int state); + + int (*read_event_value)(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + enum iio_event_info info, int *val, int *val2); + + int (*write_event_value)(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + enum iio_event_info info, int val, int val2); + + int (*validate_trigger)(struct iio_dev *indio_dev, + struct iio_trigger *trig); + int (*update_scan_mode)(struct iio_dev *indio_dev, + const unsigned long *scan_mask); + int (*debugfs_reg_access)(struct iio_dev *indio_dev, + unsigned reg, unsigned writeval, + unsigned *readval); + int (*of_xlate)(struct iio_dev *indio_dev, + const struct of_phandle_args *iiospec); + int (*hwfifo_set_watermark)(struct iio_dev *indio_dev, unsigned val); + int (*hwfifo_flush_to_buffer)(struct iio_dev *indio_dev, + unsigned count); +}; + +/** + * struct iio_buffer_setup_ops - buffer setup related callbacks + * @preenable: [DRIVER] function to run prior to marking buffer enabled + * @postenable: [DRIVER] function to run after marking buffer enabled + * @predisable: [DRIVER] function to run prior to marking buffer + * disabled + * @postdisable: [DRIVER] function to run after marking buffer disabled + * @validate_scan_mask: [DRIVER] function callback to check whether a given + * scan mask is valid for the device. + */ +struct iio_buffer_setup_ops { + int (*preenable)(struct iio_dev *); + int (*postenable)(struct iio_dev *); + int (*predisable)(struct iio_dev *); + int (*postdisable)(struct iio_dev *); + bool (*validate_scan_mask)(struct iio_dev *indio_dev, + const unsigned long *scan_mask); +}; + +/** + * struct iio_dev - industrial I/O device + * @id: [INTERN] used to identify device internally + * @driver_module: [INTERN] used to make it harder to undercut users + * @modes: [DRIVER] operating modes supported by device + * @currentmode: [DRIVER] current operating mode + * @dev: [DRIVER] device structure, should be assigned a parent + * and owner + * @event_interface: [INTERN] event chrdevs associated with interrupt lines + * @buffer: [DRIVER] any buffer present + * @buffer_list: [INTERN] list of all buffers currently attached + * @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux + * @mlock: [DRIVER] lock used to prevent simultaneous device state + * changes + * @available_scan_masks: [DRIVER] optional array of allowed bitmasks + * @masklength: [INTERN] the length of the mask established from + * channels + * @active_scan_mask: [INTERN] union of all scan masks requested by buffers + * @scan_timestamp: [INTERN] set if any buffers have requested timestamp + * @scan_index_timestamp:[INTERN] cache of the index to the timestamp + * @trig: [INTERN] current device trigger (buffer modes) + * @trig_readonly: [INTERN] mark the current trigger immutable + * @pollfunc: [DRIVER] function run on trigger being received + * @pollfunc_event: [DRIVER] function run on events trigger being received + * @channels: [DRIVER] channel specification structure table + * @num_channels: [DRIVER] number of channels specified in @channels. + * @channel_attr_list: [INTERN] keep track of automatically created channel + * attributes + * @chan_attr_group: [INTERN] group for all attrs in base directory + * @name: [DRIVER] name of the device. + * @info: [DRIVER] callbacks and constant info from driver + * @clock_id: [INTERN] timestamping clock posix identifier + * @info_exist_lock: [INTERN] lock to prevent use during removal + * @setup_ops: [DRIVER] callbacks to call before and after buffer + * enable/disable + * @chrdev: [INTERN] associated character device + * @groups: [INTERN] attribute groups + * @groupcounter: [INTERN] index of next attribute group + * @flags: [INTERN] file ops related flags including busy flag. + * @debugfs_dentry: [INTERN] device specific debugfs dentry. + * @cached_reg_addr: [INTERN] cached register address for debugfs reads. + */ +struct iio_dev { + int id; + struct module *driver_module; + + int modes; + int currentmode; + struct device dev; + + struct iio_event_interface *event_interface; + + struct iio_buffer *buffer; + struct list_head buffer_list; + int scan_bytes; + struct mutex mlock; + + const unsigned long *available_scan_masks; + unsigned masklength; + const unsigned long *active_scan_mask; + bool scan_timestamp; + unsigned scan_index_timestamp; + struct iio_trigger *trig; + bool trig_readonly; + struct iio_poll_func *pollfunc; + struct iio_poll_func *pollfunc_event; + + struct iio_chan_spec const *channels; + int num_channels; + + struct list_head channel_attr_list; + struct attribute_group chan_attr_group; + const char *name; + const struct iio_info *info; + clockid_t clock_id; + struct mutex info_exist_lock; + const struct iio_buffer_setup_ops *setup_ops; + struct cdev chrdev; +#define IIO_MAX_GROUPS 6 + const struct attribute_group *groups[IIO_MAX_GROUPS + 1]; + int groupcounter; + + unsigned long flags; +#if defined(CONFIG_DEBUG_FS) + struct dentry *debugfs_dentry; + unsigned cached_reg_addr; +#endif +}; + +const struct iio_chan_spec +*iio_find_channel_from_si(struct iio_dev *indio_dev, int si); +/** + * iio_device_register() - register a device with the IIO subsystem + * @indio_dev: Device structure filled by the device driver + **/ +#define iio_device_register(indio_dev) \ + __iio_device_register((indio_dev), THIS_MODULE) +int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod); +void iio_device_unregister(struct iio_dev *indio_dev); +/** + * devm_iio_device_register - Resource-managed iio_device_register() + * @dev: Device to allocate iio_dev for + * @indio_dev: Device structure filled by the device driver + * + * Managed iio_device_register. The IIO device registered with this + * function is automatically unregistered on driver detach. This function + * calls iio_device_register() internally. Refer to that function for more + * information. + * + * If an iio_dev registered with this function needs to be unregistered + * separately, devm_iio_device_unregister() must be used. + * + * RETURNS: + * 0 on success, negative error number on failure. + */ +#define devm_iio_device_register(dev, indio_dev) \ + __devm_iio_device_register((dev), (indio_dev), THIS_MODULE) +int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev, + struct module *this_mod); +void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev); +int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp); +int iio_device_claim_direct_mode(struct iio_dev *indio_dev); +void iio_device_release_direct_mode(struct iio_dev *indio_dev); + +extern struct bus_type iio_bus_type; + +/** + * iio_device_put() - reference counted deallocation of struct device + * @indio_dev: IIO device structure containing the device + **/ +static inline void iio_device_put(struct iio_dev *indio_dev) +{ + if (indio_dev) + put_device(&indio_dev->dev); +} + +/** + * iio_device_get_clock() - Retrieve current timestamping clock for the device + * @indio_dev: IIO device structure containing the device + */ +static inline clockid_t iio_device_get_clock(const struct iio_dev *indio_dev) +{ + return indio_dev->clock_id; +} + +/** + * dev_to_iio_dev() - Get IIO device struct from a device struct + * @dev: The device embedded in the IIO device + * + * Note: The device must be a IIO device, otherwise the result is undefined. + */ +static inline struct iio_dev *dev_to_iio_dev(struct device *dev) +{ + return container_of(dev, struct iio_dev, dev); +} + +/** + * iio_device_get() - increment reference count for the device + * @indio_dev: IIO device structure + * + * Returns: The passed IIO device + **/ +static inline struct iio_dev *iio_device_get(struct iio_dev *indio_dev) +{ + return indio_dev ? dev_to_iio_dev(get_device(&indio_dev->dev)) : NULL; +} + + +/** + * iio_device_set_drvdata() - Set device driver data + * @indio_dev: IIO device structure + * @data: Driver specific data + * + * Allows to attach an arbitrary pointer to an IIO device, which can later be + * retrieved by iio_device_get_drvdata(). + */ +static inline void iio_device_set_drvdata(struct iio_dev *indio_dev, void *data) +{ + dev_set_drvdata(&indio_dev->dev, data); +} + +/** + * iio_device_get_drvdata() - Get device driver data + * @indio_dev: IIO device structure + * + * Returns the data previously set with iio_device_set_drvdata() + */ +static inline void *iio_device_get_drvdata(struct iio_dev *indio_dev) +{ + return dev_get_drvdata(&indio_dev->dev); +} + +/* Can we make this smaller? */ +#define IIO_ALIGN L1_CACHE_BYTES +struct iio_dev *iio_device_alloc(int sizeof_priv); + +static inline void *iio_priv(const struct iio_dev *indio_dev) +{ + return (char *)indio_dev + ALIGN(sizeof(struct iio_dev), IIO_ALIGN); +} + +static inline struct iio_dev *iio_priv_to_dev(void *priv) +{ + return (struct iio_dev *)((char *)priv - + ALIGN(sizeof(struct iio_dev), IIO_ALIGN)); +} + +void iio_device_free(struct iio_dev *indio_dev); +int devm_iio_device_match(struct device *dev, void *res, void *data); +struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv); +void devm_iio_device_free(struct device *dev, struct iio_dev *indio_dev); +struct iio_trigger *devm_iio_trigger_alloc(struct device *dev, + const char *fmt, ...); +void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig); + +/** + * iio_buffer_enabled() - helper function to test if the buffer is enabled + * @indio_dev: IIO device structure for device + **/ +static inline bool iio_buffer_enabled(struct iio_dev *indio_dev) +{ + return indio_dev->currentmode + & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE | + INDIO_BUFFER_SOFTWARE); +} + +/** + * iio_get_debugfs_dentry() - helper function to get the debugfs_dentry + * @indio_dev: IIO device structure for device + **/ +#if defined(CONFIG_DEBUG_FS) +static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev) +{ + return indio_dev->debugfs_dentry; +} +#else +static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev) +{ + return NULL; +} +#endif + +ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals); + +int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer, + int *fract); + +/** + * IIO_DEGREE_TO_RAD() - Convert degree to rad + * @deg: A value in degree + * + * Returns the given value converted from degree to rad + */ +#define IIO_DEGREE_TO_RAD(deg) (((deg) * 314159ULL + 9000000ULL) / 18000000ULL) + +/** + * IIO_RAD_TO_DEGREE() - Convert rad to degree + * @rad: A value in rad + * + * Returns the given value converted from rad to degree + */ +#define IIO_RAD_TO_DEGREE(rad) \ + (((rad) * 18000000ULL + 314159ULL / 2) / 314159ULL) + +/** + * IIO_G_TO_M_S_2() - Convert g to meter / second**2 + * @g: A value in g + * + * Returns the given value converted from g to meter / second**2 + */ +#define IIO_G_TO_M_S_2(g) ((g) * 980665ULL / 100000ULL) + +/** + * IIO_M_S_2_TO_G() - Convert meter / second**2 to g + * @ms2: A value in meter / second**2 + * + * Returns the given value converted from meter / second**2 to g + */ +#define IIO_M_S_2_TO_G(ms2) (((ms2) * 100000ULL + 980665ULL / 2) / 980665ULL) + +#endif /* _INDUSTRIAL_IO_H_ */ diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h new file mode 100644 index 000000000..360da7d18 --- /dev/null +++ b/include/linux/iio/imu/adis.h @@ -0,0 +1,284 @@ +/* + * Common library for ADIS16XXX devices + * + * Copyright 2012 Analog Devices Inc. + * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2 or later. + */ + +#ifndef __IIO_ADIS_H__ +#define __IIO_ADIS_H__ + +#include +#include +#include + +#define ADIS_WRITE_REG(reg) ((0x80 | (reg))) +#define ADIS_READ_REG(reg) ((reg) & 0x7f) + +#define ADIS_PAGE_SIZE 0x80 +#define ADIS_REG_PAGE_ID 0x00 + +struct adis; + +/** + * struct adis_data - ADIS chip variant specific data + * @read_delay: SPI delay for read operations in us + * @write_delay: SPI delay for write operations in us + * @glob_cmd_reg: Register address of the GLOB_CMD register + * @msc_ctrl_reg: Register address of the MSC_CTRL register + * @diag_stat_reg: Register address of the DIAG_STAT register + * @status_error_msgs: Array of error messgaes + * @status_error_mask: + */ +struct adis_data { + unsigned int read_delay; + unsigned int write_delay; + + unsigned int glob_cmd_reg; + unsigned int msc_ctrl_reg; + unsigned int diag_stat_reg; + + unsigned int self_test_mask; + bool self_test_no_autoclear; + unsigned int startup_delay; + + const char * const *status_error_msgs; + unsigned int status_error_mask; + + int (*enable_irq)(struct adis *adis, bool enable); + + bool has_paging; +}; + +struct adis { + struct spi_device *spi; + struct iio_trigger *trig; + + const struct adis_data *data; + + struct mutex txrx_lock; + struct spi_message msg; + struct spi_transfer *xfer; + unsigned int current_page; + void *buffer; + + uint8_t tx[10] ____cacheline_aligned; + uint8_t rx[4]; +}; + +int adis_init(struct adis *adis, struct iio_dev *indio_dev, + struct spi_device *spi, const struct adis_data *data); +int adis_reset(struct adis *adis); + +int adis_write_reg(struct adis *adis, unsigned int reg, + unsigned int val, unsigned int size); +int adis_read_reg(struct adis *adis, unsigned int reg, + unsigned int *val, unsigned int size); + +/** + * adis_write_reg_8() - Write single byte to a register + * @adis: The adis device + * @reg: The address of the register to be written + * @value: The value to write + */ +static inline int adis_write_reg_8(struct adis *adis, unsigned int reg, + uint8_t val) +{ + return adis_write_reg(adis, reg, val, 1); +} + +/** + * adis_write_reg_16() - Write 2 bytes to a pair of registers + * @adis: The adis device + * @reg: The address of the lower of the two registers + * @value: Value to be written + */ +static inline int adis_write_reg_16(struct adis *adis, unsigned int reg, + uint16_t val) +{ + return adis_write_reg(adis, reg, val, 2); +} + +/** + * adis_write_reg_32() - write 4 bytes to four registers + * @adis: The adis device + * @reg: The address of the lower of the four register + * @value: Value to be written + */ +static inline int adis_write_reg_32(struct adis *adis, unsigned int reg, + uint32_t val) +{ + return adis_write_reg(adis, reg, val, 4); +} + +/** + * adis_read_reg_16() - read 2 bytes from a 16-bit register + * @adis: The adis device + * @reg: The address of the lower of the two registers + * @val: The value read back from the device + */ +static inline int adis_read_reg_16(struct adis *adis, unsigned int reg, + uint16_t *val) +{ + unsigned int tmp; + int ret; + + ret = adis_read_reg(adis, reg, &tmp, 2); + *val = tmp; + + return ret; +} + +/** + * adis_read_reg_32() - read 4 bytes from a 32-bit register + * @adis: The adis device + * @reg: The address of the lower of the two registers + * @val: The value read back from the device + */ +static inline int adis_read_reg_32(struct adis *adis, unsigned int reg, + uint32_t *val) +{ + unsigned int tmp; + int ret; + + ret = adis_read_reg(adis, reg, &tmp, 4); + *val = tmp; + + return ret; +} + +int adis_enable_irq(struct adis *adis, bool enable); +int adis_check_status(struct adis *adis); + +int adis_initial_startup(struct adis *adis); + +int adis_single_conversion(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, unsigned int error_mask, + int *val); + +#define ADIS_VOLTAGE_CHAN(addr, si, chan, name, info_all, bits) { \ + .type = IIO_VOLTAGE, \ + .indexed = 1, \ + .channel = (chan), \ + .extend_name = name, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + BIT(IIO_CHAN_INFO_SCALE), \ + .info_mask_shared_by_all = info_all, \ + .address = (addr), \ + .scan_index = (si), \ + .scan_type = { \ + .sign = 'u', \ + .realbits = (bits), \ + .storagebits = 16, \ + .endianness = IIO_BE, \ + }, \ +} + +#define ADIS_SUPPLY_CHAN(addr, si, info_all, bits) \ + ADIS_VOLTAGE_CHAN(addr, si, 0, "supply", info_all, bits) + +#define ADIS_AUX_ADC_CHAN(addr, si, info_all, bits) \ + ADIS_VOLTAGE_CHAN(addr, si, 1, NULL, info_all, bits) + +#define ADIS_TEMP_CHAN(addr, si, info_all, bits) { \ + .type = IIO_TEMP, \ + .indexed = 1, \ + .channel = 0, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_OFFSET), \ + .info_mask_shared_by_all = info_all, \ + .address = (addr), \ + .scan_index = (si), \ + .scan_type = { \ + .sign = 'u', \ + .realbits = (bits), \ + .storagebits = 16, \ + .endianness = IIO_BE, \ + }, \ +} + +#define ADIS_MOD_CHAN(_type, mod, addr, si, info_sep, info_all, bits) { \ + .type = (_type), \ + .modified = 1, \ + .channel2 = IIO_MOD_ ## mod, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + info_sep, \ + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ + .info_mask_shared_by_all = info_all, \ + .address = (addr), \ + .scan_index = (si), \ + .scan_type = { \ + .sign = 's', \ + .realbits = (bits), \ + .storagebits = 16, \ + .endianness = IIO_BE, \ + }, \ +} + +#define ADIS_ACCEL_CHAN(mod, addr, si, info_sep, info_all, bits) \ + ADIS_MOD_CHAN(IIO_ACCEL, mod, addr, si, info_sep, info_all, bits) + +#define ADIS_GYRO_CHAN(mod, addr, si, info_sep, info_all, bits) \ + ADIS_MOD_CHAN(IIO_ANGL_VEL, mod, addr, si, info_sep, info_all, bits) + +#define ADIS_INCLI_CHAN(mod, addr, si, info_sep, info_all, bits) \ + ADIS_MOD_CHAN(IIO_INCLI, mod, addr, si, info_sep, info_all, bits) + +#define ADIS_ROT_CHAN(mod, addr, si, info_sep, info_all, bits) \ + ADIS_MOD_CHAN(IIO_ROT, mod, addr, si, info_sep, info_all, bits) + +#ifdef CONFIG_IIO_ADIS_LIB_BUFFER + +int adis_setup_buffer_and_trigger(struct adis *adis, + struct iio_dev *indio_dev, irqreturn_t (*trigger_handler)(int, void *)); +void adis_cleanup_buffer_and_trigger(struct adis *adis, + struct iio_dev *indio_dev); + +int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev); +void adis_remove_trigger(struct adis *adis); + +int adis_update_scan_mode(struct iio_dev *indio_dev, + const unsigned long *scan_mask); + +#else /* CONFIG_IIO_BUFFER */ + +static inline int adis_setup_buffer_and_trigger(struct adis *adis, + struct iio_dev *indio_dev, irqreturn_t (*trigger_handler)(int, void *)) +{ + return 0; +} + +static inline void adis_cleanup_buffer_and_trigger(struct adis *adis, + struct iio_dev *indio_dev) +{ +} + +static inline int adis_probe_trigger(struct adis *adis, + struct iio_dev *indio_dev) +{ + return 0; +} + +static inline void adis_remove_trigger(struct adis *adis) +{ +} + +#define adis_update_scan_mode NULL + +#endif /* CONFIG_IIO_BUFFER */ + +#ifdef CONFIG_DEBUG_FS + +int adis_debugfs_reg_access(struct iio_dev *indio_dev, + unsigned int reg, unsigned int writeval, unsigned int *readval); + +#else + +#define adis_debugfs_reg_access NULL + +#endif + +#endif diff --git a/include/linux/iio/kfifo_buf.h b/include/linux/iio/kfifo_buf.h new file mode 100644 index 000000000..764659e01 --- /dev/null +++ b/include/linux/iio/kfifo_buf.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_IIO_KFIFO_BUF_H__ +#define __LINUX_IIO_KFIFO_BUF_H__ + +struct iio_buffer; +struct device; + +struct iio_buffer *iio_kfifo_allocate(void); +void iio_kfifo_free(struct iio_buffer *r); + +struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev); +void devm_iio_kfifo_free(struct device *dev, struct iio_buffer *r); + +#endif diff --git a/include/linux/iio/machine.h b/include/linux/iio/machine.h new file mode 100644 index 000000000..5e1cfa75f --- /dev/null +++ b/include/linux/iio/machine.h @@ -0,0 +1,38 @@ +/* + * Industrial I/O in kernel access map definitions for board files. + * + * Copyright (c) 2011 Jonathan Cameron + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#ifndef __LINUX_IIO_MACHINE_H__ +#define __LINUX_IIO_MACHINE_H__ + +/** + * struct iio_map - description of link between consumer and device channels + * @adc_channel_label: Label used to identify the channel on the provider. + * This is matched against the datasheet_name element + * of struct iio_chan_spec. + * @consumer_dev_name: Name to uniquely identify the consumer device. + * @consumer_channel: Unique name used to identify the channel on the + * consumer side. + * @consumer_data: Data about the channel for use by the consumer driver. + */ +struct iio_map { + const char *adc_channel_label; + const char *consumer_dev_name; + const char *consumer_channel; + void *consumer_data; +}; + +#define IIO_MAP(_provider_channel, _consumer_dev_name, _consumer_channel) \ +{ \ + .adc_channel_label = _provider_channel, \ + .consumer_dev_name = _consumer_dev_name, \ + .consumer_channel = _consumer_channel, \ +} + +#endif diff --git a/include/linux/iio/magnetometer/ak8975.h b/include/linux/iio/magnetometer/ak8975.h new file mode 100644 index 000000000..ac9366f80 --- /dev/null +++ b/include/linux/iio/magnetometer/ak8975.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __IIO_MAGNETOMETER_AK8975_H__ +#define __IIO_MAGNETOMETER_AK8975_H__ + +#include + +/** + * struct ak8975_platform_data - AK8975 magnetometer driver platform data + * @eoc_gpio: data ready event gpio + * @orientation: mounting matrix relative to main hardware + */ +struct ak8975_platform_data { + int eoc_gpio; + struct iio_mount_matrix orientation; +}; + +#endif diff --git a/include/linux/iio/sw_device.h b/include/linux/iio/sw_device.h new file mode 100644 index 000000000..8642b91a7 --- /dev/null +++ b/include/linux/iio/sw_device.h @@ -0,0 +1,70 @@ +/* + * Industrial I/O software device interface + * + * Copyright (c) 2016 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#ifndef __IIO_SW_DEVICE +#define __IIO_SW_DEVICE + +#include +#include +#include +#include + +#define module_iio_sw_device_driver(__iio_sw_device_type) \ + module_driver(__iio_sw_device_type, iio_register_sw_device_type, \ + iio_unregister_sw_device_type) + +struct iio_sw_device_ops; + +struct iio_sw_device_type { + const char *name; + struct module *owner; + const struct iio_sw_device_ops *ops; + struct list_head list; + struct config_group *group; +}; + +struct iio_sw_device { + struct iio_dev *device; + struct iio_sw_device_type *device_type; + struct config_group group; +}; + +struct iio_sw_device_ops { + struct iio_sw_device* (*probe)(const char *); + int (*remove)(struct iio_sw_device *); +}; + +static inline +struct iio_sw_device *to_iio_sw_device(struct config_item *item) +{ + return container_of(to_config_group(item), struct iio_sw_device, + group); +} + +int iio_register_sw_device_type(struct iio_sw_device_type *dt); +void iio_unregister_sw_device_type(struct iio_sw_device_type *dt); + +struct iio_sw_device *iio_sw_device_create(const char *, const char *); +void iio_sw_device_destroy(struct iio_sw_device *); + +int iio_sw_device_type_configfs_register(struct iio_sw_device_type *dt); +void iio_sw_device_type_configfs_unregister(struct iio_sw_device_type *dt); + +static inline +void iio_swd_group_init_type_name(struct iio_sw_device *d, + const char *name, + const struct config_item_type *type) +{ +#if IS_ENABLED(CONFIG_CONFIGFS_FS) + config_group_init_type_name(&d->group, name, type); +#endif +} + +#endif /* __IIO_SW_DEVICE */ diff --git a/include/linux/iio/sw_trigger.h b/include/linux/iio/sw_trigger.h new file mode 100644 index 000000000..0c43738a9 --- /dev/null +++ b/include/linux/iio/sw_trigger.h @@ -0,0 +1,70 @@ +/* + * Industrial I/O software trigger interface + * + * Copyright (c) 2015 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#ifndef __IIO_SW_TRIGGER +#define __IIO_SW_TRIGGER + +#include +#include +#include +#include + +#define module_iio_sw_trigger_driver(__iio_sw_trigger_type) \ + module_driver(__iio_sw_trigger_type, iio_register_sw_trigger_type, \ + iio_unregister_sw_trigger_type) + +struct iio_sw_trigger_ops; + +struct iio_sw_trigger_type { + const char *name; + struct module *owner; + const struct iio_sw_trigger_ops *ops; + struct list_head list; + struct config_group *group; +}; + +struct iio_sw_trigger { + struct iio_trigger *trigger; + struct iio_sw_trigger_type *trigger_type; + struct config_group group; +}; + +struct iio_sw_trigger_ops { + struct iio_sw_trigger* (*probe)(const char *); + int (*remove)(struct iio_sw_trigger *); +}; + +static inline +struct iio_sw_trigger *to_iio_sw_trigger(struct config_item *item) +{ + return container_of(to_config_group(item), struct iio_sw_trigger, + group); +} + +int iio_register_sw_trigger_type(struct iio_sw_trigger_type *tt); +void iio_unregister_sw_trigger_type(struct iio_sw_trigger_type *tt); + +struct iio_sw_trigger *iio_sw_trigger_create(const char *, const char *); +void iio_sw_trigger_destroy(struct iio_sw_trigger *); + +int iio_sw_trigger_type_configfs_register(struct iio_sw_trigger_type *tt); +void iio_sw_trigger_type_configfs_unregister(struct iio_sw_trigger_type *tt); + +static inline +void iio_swt_group_init_type_name(struct iio_sw_trigger *t, + const char *name, + const struct config_item_type *type) +{ +#if IS_ENABLED(CONFIG_CONFIGFS_FS) + config_group_init_type_name(&t->group, name, type); +#endif +} + +#endif /* __IIO_SW_TRIGGER */ diff --git a/include/linux/iio/sysfs.h b/include/linux/iio/sysfs.h new file mode 100644 index 000000000..ce9426c50 --- /dev/null +++ b/include/linux/iio/sysfs.h @@ -0,0 +1,152 @@ +/* The industrial I/O core + * + *Copyright (c) 2008 Jonathan Cameron + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * General attributes + */ + +#ifndef _INDUSTRIAL_IO_SYSFS_H_ +#define _INDUSTRIAL_IO_SYSFS_H_ + +struct iio_chan_spec; + +/** + * struct iio_dev_attr - iio specific device attribute + * @dev_attr: underlying device attribute + * @address: associated register address + * @l: list head for maintaining list of dynamically created attrs + * @c: specification for the underlying channel + */ +struct iio_dev_attr { + struct device_attribute dev_attr; + u64 address; + struct list_head l; + struct iio_chan_spec const *c; +}; + +#define to_iio_dev_attr(_dev_attr) \ + container_of(_dev_attr, struct iio_dev_attr, dev_attr) + +ssize_t iio_read_const_attr(struct device *dev, + struct device_attribute *attr, + char *len); + +/** + * struct iio_const_attr - constant device specific attribute + * often used for things like available modes + * @string: attribute string + * @dev_attr: underlying device attribute + */ +struct iio_const_attr { + const char *string; + struct device_attribute dev_attr; +}; + +#define to_iio_const_attr(_dev_attr) \ + container_of(_dev_attr, struct iio_const_attr, dev_attr) + +/* Some attributes will be hard coded (device dependent) and not require an + address, in these cases pass a negative */ +#define IIO_ATTR(_name, _mode, _show, _store, _addr) \ + { .dev_attr = __ATTR(_name, _mode, _show, _store), \ + .address = _addr } + +#define IIO_ATTR_RO(_name, _addr) \ + { .dev_attr = __ATTR_RO(_name), \ + .address = _addr } + +#define IIO_ATTR_WO(_name, _addr) \ + { .dev_attr = __ATTR_WO(_name), \ + .address = _addr } + +#define IIO_ATTR_RW(_name, _addr) \ + { .dev_attr = __ATTR_RW(_name), \ + .address = _addr } + +#define IIO_DEVICE_ATTR(_name, _mode, _show, _store, _addr) \ + struct iio_dev_attr iio_dev_attr_##_name \ + = IIO_ATTR(_name, _mode, _show, _store, _addr) + +#define IIO_DEVICE_ATTR_RO(_name, _addr) \ + struct iio_dev_attr iio_dev_attr_##_name \ + = IIO_ATTR_RO(_name, _addr) + +#define IIO_DEVICE_ATTR_WO(_name, _addr) \ + struct iio_dev_attr iio_dev_attr_##_name \ + = IIO_ATTR_WO(_name, _addr) + +#define IIO_DEVICE_ATTR_RW(_name, _addr) \ + struct iio_dev_attr iio_dev_attr_##_name \ + = IIO_ATTR_RW(_name, _addr) + +#define IIO_DEVICE_ATTR_NAMED(_vname, _name, _mode, _show, _store, _addr) \ + struct iio_dev_attr iio_dev_attr_##_vname \ + = IIO_ATTR(_name, _mode, _show, _store, _addr) + +#define IIO_CONST_ATTR(_name, _string) \ + struct iio_const_attr iio_const_attr_##_name \ + = { .string = _string, \ + .dev_attr = __ATTR(_name, S_IRUGO, iio_read_const_attr, NULL)} + +#define IIO_CONST_ATTR_NAMED(_vname, _name, _string) \ + struct iio_const_attr iio_const_attr_##_vname \ + = { .string = _string, \ + .dev_attr = __ATTR(_name, S_IRUGO, iio_read_const_attr, NULL)} + +/* Generic attributes of onetype or another */ + +/** + * IIO_DEV_ATTR_SAMP_FREQ - sets any internal clock frequency + * @_mode: sysfs file mode/permissions + * @_show: output method for the attribute + * @_store: input method for the attribute + **/ +#define IIO_DEV_ATTR_SAMP_FREQ(_mode, _show, _store) \ + IIO_DEVICE_ATTR(sampling_frequency, _mode, _show, _store, 0) + +/** + * IIO_DEV_ATTR_SAMP_FREQ_AVAIL - list available sampling frequencies + * @_show: output method for the attribute + * + * May be mode dependent on some devices + **/ +#define IIO_DEV_ATTR_SAMP_FREQ_AVAIL(_show) \ + IIO_DEVICE_ATTR(sampling_frequency_available, S_IRUGO, _show, NULL, 0) +/** + * IIO_CONST_ATTR_SAMP_FREQ_AVAIL - list available sampling frequencies + * @_string: frequency string for the attribute + * + * Constant version + **/ +#define IIO_CONST_ATTR_SAMP_FREQ_AVAIL(_string) \ + IIO_CONST_ATTR(sampling_frequency_available, _string) + +/** + * IIO_DEV_ATTR_INT_TIME_AVAIL - list available integration times + * @_show: output method for the attribute + **/ +#define IIO_DEV_ATTR_INT_TIME_AVAIL(_show) \ + IIO_DEVICE_ATTR(integration_time_available, S_IRUGO, _show, NULL, 0) +/** + * IIO_CONST_ATTR_INT_TIME_AVAIL - list available integration times + * @_string: frequency string for the attribute + * + * Constant version + **/ +#define IIO_CONST_ATTR_INT_TIME_AVAIL(_string) \ + IIO_CONST_ATTR(integration_time_available, _string) + +#define IIO_DEV_ATTR_TEMP_RAW(_show) \ + IIO_DEVICE_ATTR(in_temp_raw, S_IRUGO, _show, NULL, 0) + +#define IIO_CONST_ATTR_TEMP_OFFSET(_string) \ + IIO_CONST_ATTR(in_temp_offset, _string) + +#define IIO_CONST_ATTR_TEMP_SCALE(_string) \ + IIO_CONST_ATTR(in_temp_scale, _string) + +#endif /* _INDUSTRIAL_IO_SYSFS_H_ */ diff --git a/include/linux/iio/timer/stm32-lptim-trigger.h b/include/linux/iio/timer/stm32-lptim-trigger.h new file mode 100644 index 000000000..464458d20 --- /dev/null +++ b/include/linux/iio/timer/stm32-lptim-trigger.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) STMicroelectronics 2017 + * + * Author: Fabrice Gasnier + * + * License terms: GNU General Public License (GPL), version 2 + */ + +#ifndef _STM32_LPTIM_TRIGGER_H_ +#define _STM32_LPTIM_TRIGGER_H_ + +#include +#include + +#define LPTIM1_OUT "lptim1_out" +#define LPTIM2_OUT "lptim2_out" +#define LPTIM3_OUT "lptim3_out" + +#if IS_REACHABLE(CONFIG_IIO_STM32_LPTIMER_TRIGGER) +bool is_stm32_lptim_trigger(struct iio_trigger *trig); +#else +static inline bool is_stm32_lptim_trigger(struct iio_trigger *trig) +{ +#if IS_ENABLED(CONFIG_IIO_STM32_LPTIMER_TRIGGER) + pr_warn_once("stm32 lptim_trigger not linked in\n"); +#endif + return false; +} +#endif +#endif diff --git a/include/linux/iio/timer/stm32-timer-trigger.h b/include/linux/iio/timer/stm32-timer-trigger.h new file mode 100644 index 000000000..d68add80a --- /dev/null +++ b/include/linux/iio/timer/stm32-timer-trigger.h @@ -0,0 +1,78 @@ +/* + * Copyright (C) STMicroelectronics 2016 + * + * Author: Benjamin Gaignard + * + * License terms: GNU General Public License (GPL), version 2 + */ + +#ifndef _STM32_TIMER_TRIGGER_H_ +#define _STM32_TIMER_TRIGGER_H_ + +#define TIM1_TRGO "tim1_trgo" +#define TIM1_TRGO2 "tim1_trgo2" +#define TIM1_CH1 "tim1_ch1" +#define TIM1_CH2 "tim1_ch2" +#define TIM1_CH3 "tim1_ch3" +#define TIM1_CH4 "tim1_ch4" + +#define TIM2_TRGO "tim2_trgo" +#define TIM2_CH1 "tim2_ch1" +#define TIM2_CH2 "tim2_ch2" +#define TIM2_CH3 "tim2_ch3" +#define TIM2_CH4 "tim2_ch4" + +#define TIM3_TRGO "tim3_trgo" +#define TIM3_CH1 "tim3_ch1" +#define TIM3_CH2 "tim3_ch2" +#define TIM3_CH3 "tim3_ch3" +#define TIM3_CH4 "tim3_ch4" + +#define TIM4_TRGO "tim4_trgo" +#define TIM4_CH1 "tim4_ch1" +#define TIM4_CH2 "tim4_ch2" +#define TIM4_CH3 "tim4_ch3" +#define TIM4_CH4 "tim4_ch4" + +#define TIM5_TRGO "tim5_trgo" +#define TIM5_CH1 "tim5_ch1" +#define TIM5_CH2 "tim5_ch2" +#define TIM5_CH3 "tim5_ch3" +#define TIM5_CH4 "tim5_ch4" + +#define TIM6_TRGO "tim6_trgo" + +#define TIM7_TRGO "tim7_trgo" + +#define TIM8_TRGO "tim8_trgo" +#define TIM8_TRGO2 "tim8_trgo2" +#define TIM8_CH1 "tim8_ch1" +#define TIM8_CH2 "tim8_ch2" +#define TIM8_CH3 "tim8_ch3" +#define TIM8_CH4 "tim8_ch4" + +#define TIM9_TRGO "tim9_trgo" +#define TIM9_CH1 "tim9_ch1" +#define TIM9_CH2 "tim9_ch2" + +#define TIM10_OC1 "tim10_oc1" + +#define TIM11_OC1 "tim11_oc1" + +#define TIM12_TRGO "tim12_trgo" +#define TIM12_CH1 "tim12_ch1" +#define TIM12_CH2 "tim12_ch2" + +#define TIM13_OC1 "tim13_oc1" + +#define TIM14_OC1 "tim14_oc1" + +#define TIM15_TRGO "tim15_trgo" + +#define TIM16_OC1 "tim16_oc1" + +#define TIM17_OC1 "tim17_oc1" + +bool is_stm32_timer_trigger(struct iio_trigger *trig); + +#endif diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h new file mode 100644 index 000000000..b19b7204e --- /dev/null +++ b/include/linux/iio/trigger.h @@ -0,0 +1,186 @@ +/* The industrial I/O core, trigger handling functions + * + * Copyright (c) 2008 Jonathan Cameron + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ +#include +#include +#include + +#ifndef _IIO_TRIGGER_H_ +#define _IIO_TRIGGER_H_ + +#ifdef CONFIG_IIO_TRIGGER +struct iio_subirq { + bool enabled; +}; + +struct iio_dev; +struct iio_trigger; + +/** + * struct iio_trigger_ops - operations structure for an iio_trigger. + * @set_trigger_state: switch on/off the trigger on demand + * @try_reenable: function to reenable the trigger when the + * use count is zero (may be NULL) + * @validate_device: function to validate the device when the + * current trigger gets changed. + * + * This is typically static const within a driver and shared by + * instances of a given device. + **/ +struct iio_trigger_ops { + int (*set_trigger_state)(struct iio_trigger *trig, bool state); + int (*try_reenable)(struct iio_trigger *trig); + int (*validate_device)(struct iio_trigger *trig, + struct iio_dev *indio_dev); +}; + + +/** + * struct iio_trigger - industrial I/O trigger device + * @ops: [DRIVER] operations structure + * @owner: [INTERN] owner of this driver module + * @id: [INTERN] unique id number + * @name: [DRIVER] unique name + * @dev: [DRIVER] associated device (if relevant) + * @list: [INTERN] used in maintenance of global trigger list + * @alloc_list: [DRIVER] used for driver specific trigger list + * @use_count: [INTERN] use count for the trigger. + * @subirq_chip: [INTERN] associate 'virtual' irq chip. + * @subirq_base: [INTERN] base number for irqs provided by trigger. + * @subirqs: [INTERN] information about the 'child' irqs. + * @pool: [INTERN] bitmap of irqs currently in use. + * @pool_lock: [INTERN] protection of the irq pool. + * @attached_own_device:[INTERN] if we are using our own device as trigger, + * i.e. if we registered a poll function to the same + * device as the one providing the trigger. + **/ +struct iio_trigger { + const struct iio_trigger_ops *ops; + struct module *owner; + int id; + const char *name; + struct device dev; + + struct list_head list; + struct list_head alloc_list; + atomic_t use_count; + + struct irq_chip subirq_chip; + int subirq_base; + + struct iio_subirq subirqs[CONFIG_IIO_CONSUMERS_PER_TRIGGER]; + unsigned long pool[BITS_TO_LONGS(CONFIG_IIO_CONSUMERS_PER_TRIGGER)]; + struct mutex pool_lock; + bool attached_own_device; +}; + + +static inline struct iio_trigger *to_iio_trigger(struct device *d) +{ + return container_of(d, struct iio_trigger, dev); +} + +static inline void iio_trigger_put(struct iio_trigger *trig) +{ + module_put(trig->owner); + put_device(&trig->dev); +} + +static inline struct iio_trigger *iio_trigger_get(struct iio_trigger *trig) +{ + get_device(&trig->dev); + __module_get(trig->owner); + + return trig; +} + +/** + * iio_device_set_drvdata() - Set trigger driver data + * @trig: IIO trigger structure + * @data: Driver specific data + * + * Allows to attach an arbitrary pointer to an IIO trigger, which can later be + * retrieved by iio_trigger_get_drvdata(). + */ +static inline void iio_trigger_set_drvdata(struct iio_trigger *trig, void *data) +{ + dev_set_drvdata(&trig->dev, data); +} + +/** + * iio_trigger_get_drvdata() - Get trigger driver data + * @trig: IIO trigger structure + * + * Returns the data previously set with iio_trigger_set_drvdata() + */ +static inline void *iio_trigger_get_drvdata(struct iio_trigger *trig) +{ + return dev_get_drvdata(&trig->dev); +} + +/** + * iio_trigger_register() - register a trigger with the IIO core + * @trig_info: trigger to be registered + **/ +#define iio_trigger_register(trig_info) \ + __iio_trigger_register((trig_info), THIS_MODULE) +int __iio_trigger_register(struct iio_trigger *trig_info, + struct module *this_mod); + +#define devm_iio_trigger_register(dev, trig_info) \ + __devm_iio_trigger_register((dev), (trig_info), THIS_MODULE) +int __devm_iio_trigger_register(struct device *dev, + struct iio_trigger *trig_info, + struct module *this_mod); + +/** + * iio_trigger_unregister() - unregister a trigger from the core + * @trig_info: trigger to be unregistered + **/ +void iio_trigger_unregister(struct iio_trigger *trig_info); + +void devm_iio_trigger_unregister(struct device *dev, + struct iio_trigger *trig_info); + +/** + * iio_trigger_set_immutable() - set an immutable trigger on destination + * + * @indio_dev: IIO device structure containing the device + * @trig: trigger to assign to device + * + **/ +int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig); + +/** + * iio_trigger_poll() - called on a trigger occurring + * @trig: trigger which occurred + * + * Typically called in relevant hardware interrupt handler. + **/ +void iio_trigger_poll(struct iio_trigger *trig); +void iio_trigger_poll_chained(struct iio_trigger *trig); + +irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private); + +__printf(1, 2) struct iio_trigger *iio_trigger_alloc(const char *fmt, ...); +void iio_trigger_free(struct iio_trigger *trig); + +/** + * iio_trigger_using_own() - tells us if we use our own HW trigger ourselves + * @indio_dev: device to check + */ +bool iio_trigger_using_own(struct iio_dev *indio_dev); + +int iio_trigger_validate_own_device(struct iio_trigger *trig, + struct iio_dev *indio_dev); + +#else +struct iio_trigger; +struct iio_trigger_ops; +#endif +#endif /* _IIO_TRIGGER_H_ */ diff --git a/include/linux/iio/trigger_consumer.h b/include/linux/iio/trigger_consumer.h new file mode 100644 index 000000000..c4f8c7409 --- /dev/null +++ b/include/linux/iio/trigger_consumer.h @@ -0,0 +1,63 @@ +/* The industrial I/O core, trigger consumer functions + * + * Copyright (c) 2008-2011 Jonathan Cameron + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#ifndef __LINUX_IIO_TRIGGER_CONSUMER_H__ +#define __LINUX_IIO_TRIGGER_CONSUMER_H__ + +#include +#include + +struct iio_dev; +struct iio_trigger; + +/** + * struct iio_poll_func - poll function pair + * + * @indio_dev: data specific to device (passed into poll func) + * @h: the function that is actually run on trigger + * @thread: threaded interrupt part + * @type: the type of interrupt (basically if oneshot) + * @name: name used to identify the trigger consumer. + * @irq: the corresponding irq as allocated from the + * trigger pool + * @timestamp: some devices need a timestamp grabbed as soon + * as possible after the trigger - hence handler + * passes it via here. + **/ +struct iio_poll_func { + struct iio_dev *indio_dev; + irqreturn_t (*h)(int irq, void *p); + irqreturn_t (*thread)(int irq, void *p); + int type; + char *name; + int irq; + s64 timestamp; +}; + + +struct iio_poll_func +*iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p), + irqreturn_t (*thread)(int irq, void *p), + int type, + struct iio_dev *indio_dev, + const char *fmt, + ...); +void iio_dealloc_pollfunc(struct iio_poll_func *pf); +irqreturn_t iio_pollfunc_store_time(int irq, void *p); + +void iio_trigger_notify_done(struct iio_trigger *trig); + +/* + * Two functions for common case where all that happens is a pollfunc + * is attached and detached from a trigger + */ +int iio_triggered_buffer_postenable(struct iio_dev *indio_dev); +int iio_triggered_buffer_predisable(struct iio_dev *indio_dev); + +#endif diff --git a/include/linux/iio/triggered_buffer.h b/include/linux/iio/triggered_buffer.h new file mode 100644 index 000000000..238ad30ce --- /dev/null +++ b/include/linux/iio/triggered_buffer.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_IIO_TRIGGERED_BUFFER_H_ +#define _LINUX_IIO_TRIGGERED_BUFFER_H_ + +#include + +struct iio_dev; +struct iio_buffer_setup_ops; + +int iio_triggered_buffer_setup(struct iio_dev *indio_dev, + irqreturn_t (*h)(int irq, void *p), + irqreturn_t (*thread)(int irq, void *p), + const struct iio_buffer_setup_ops *setup_ops); +void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev); + +int devm_iio_triggered_buffer_setup(struct device *dev, + struct iio_dev *indio_dev, + irqreturn_t (*h)(int irq, void *p), + irqreturn_t (*thread)(int irq, void *p), + const struct iio_buffer_setup_ops *ops); +void devm_iio_triggered_buffer_cleanup(struct device *dev, + struct iio_dev *indio_dev); + +#endif diff --git a/include/linux/iio/triggered_event.h b/include/linux/iio/triggered_event.h new file mode 100644 index 000000000..13250fd99 --- /dev/null +++ b/include/linux/iio/triggered_event.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_IIO_TRIGGERED_EVENT_H_ +#define _LINUX_IIO_TRIGGERED_EVENT_H_ + +#include + +int iio_triggered_event_setup(struct iio_dev *indio_dev, + irqreturn_t (*h)(int irq, void *p), + irqreturn_t (*thread)(int irq, void *p)); +void iio_triggered_event_cleanup(struct iio_dev *indio_dev); + +#endif diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h new file mode 100644 index 000000000..6eb3d683e --- /dev/null +++ b/include/linux/iio/types.h @@ -0,0 +1,65 @@ +/* industrial I/O data types needed both in and out of kernel + * + * Copyright (c) 2008 Jonathan Cameron + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#ifndef _IIO_TYPES_H_ +#define _IIO_TYPES_H_ + +#include + +enum iio_event_info { + IIO_EV_INFO_ENABLE, + IIO_EV_INFO_VALUE, + IIO_EV_INFO_HYSTERESIS, + IIO_EV_INFO_PERIOD, + IIO_EV_INFO_HIGH_PASS_FILTER_3DB, + IIO_EV_INFO_LOW_PASS_FILTER_3DB, +}; + +#define IIO_VAL_INT 1 +#define IIO_VAL_INT_PLUS_MICRO 2 +#define IIO_VAL_INT_PLUS_NANO 3 +#define IIO_VAL_INT_PLUS_MICRO_DB 4 +#define IIO_VAL_INT_MULTIPLE 5 +#define IIO_VAL_FRACTIONAL 10 +#define IIO_VAL_FRACTIONAL_LOG2 11 + +enum iio_available_type { + IIO_AVAIL_LIST, + IIO_AVAIL_RANGE, +}; + +enum iio_chan_info_enum { + IIO_CHAN_INFO_RAW = 0, + IIO_CHAN_INFO_PROCESSED, + IIO_CHAN_INFO_SCALE, + IIO_CHAN_INFO_OFFSET, + IIO_CHAN_INFO_CALIBSCALE, + IIO_CHAN_INFO_CALIBBIAS, + IIO_CHAN_INFO_PEAK, + IIO_CHAN_INFO_PEAK_SCALE, + IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW, + IIO_CHAN_INFO_AVERAGE_RAW, + IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY, + IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY, + IIO_CHAN_INFO_SAMP_FREQ, + IIO_CHAN_INFO_FREQUENCY, + IIO_CHAN_INFO_PHASE, + IIO_CHAN_INFO_HARDWAREGAIN, + IIO_CHAN_INFO_HYSTERESIS, + IIO_CHAN_INFO_INT_TIME, + IIO_CHAN_INFO_ENABLE, + IIO_CHAN_INFO_CALIBHEIGHT, + IIO_CHAN_INFO_CALIBWEIGHT, + IIO_CHAN_INFO_DEBOUNCE_COUNT, + IIO_CHAN_INFO_DEBOUNCE_TIME, + IIO_CHAN_INFO_CALIBEMISSIVITY, + IIO_CHAN_INFO_OVERSAMPLING_RATIO, +}; + +#endif /* _IIO_TYPES_H_ */ diff --git a/include/linux/ima.h b/include/linux/ima.h new file mode 100644 index 000000000..97914a283 --- /dev/null +++ b/include/linux/ima.h @@ -0,0 +1,115 @@ +/* + * Copyright (C) 2008 IBM Corporation + * Author: Mimi Zohar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + */ + +#ifndef _LINUX_IMA_H +#define _LINUX_IMA_H + +#include +#include +#include +struct linux_binprm; + +#ifdef CONFIG_IMA +extern int ima_bprm_check(struct linux_binprm *bprm); +extern int ima_file_check(struct file *file, int mask); +extern void ima_file_free(struct file *file); +extern int ima_file_mmap(struct file *file, unsigned long prot); +extern int ima_load_data(enum kernel_load_data_id id); +extern int ima_read_file(struct file *file, enum kernel_read_file_id id); +extern int ima_post_read_file(struct file *file, void *buf, loff_t size, + enum kernel_read_file_id id); +extern void ima_post_path_mknod(struct dentry *dentry); + +#ifdef CONFIG_IMA_KEXEC +extern void ima_add_kexec_buffer(struct kimage *image); +#endif + +#else +static inline int ima_bprm_check(struct linux_binprm *bprm) +{ + return 0; +} + +static inline int ima_file_check(struct file *file, int mask) +{ + return 0; +} + +static inline void ima_file_free(struct file *file) +{ + return; +} + +static inline int ima_file_mmap(struct file *file, unsigned long prot) +{ + return 0; +} + +static inline int ima_load_data(enum kernel_load_data_id id) +{ + return 0; +} + +static inline int ima_read_file(struct file *file, enum kernel_read_file_id id) +{ + return 0; +} + +static inline int ima_post_read_file(struct file *file, void *buf, loff_t size, + enum kernel_read_file_id id) +{ + return 0; +} + +static inline void ima_post_path_mknod(struct dentry *dentry) +{ + return; +} + +#endif /* CONFIG_IMA */ + +#ifndef CONFIG_IMA_KEXEC +struct kimage; + +static inline void ima_add_kexec_buffer(struct kimage *image) +{} +#endif + +#ifdef CONFIG_IMA_APPRAISE +extern bool is_ima_appraise_enabled(void); +extern void ima_inode_post_setattr(struct dentry *dentry); +extern int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name, + const void *xattr_value, size_t xattr_value_len); +extern int ima_inode_removexattr(struct dentry *dentry, const char *xattr_name); +#else +static inline bool is_ima_appraise_enabled(void) +{ + return 0; +} + +static inline void ima_inode_post_setattr(struct dentry *dentry) +{ + return; +} + +static inline int ima_inode_setxattr(struct dentry *dentry, + const char *xattr_name, + const void *xattr_value, + size_t xattr_value_len) +{ + return 0; +} + +static inline int ima_inode_removexattr(struct dentry *dentry, + const char *xattr_name) +{ + return 0; +} +#endif /* CONFIG_IMA_APPRAISE */ +#endif /* _LINUX_IMA_H */ diff --git a/include/linux/imx-media.h b/include/linux/imx-media.h new file mode 100644 index 000000000..77221ecad --- /dev/null +++ b/include/linux/imx-media.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2014-2017 Mentor Graphics Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the + * License, or (at your option) any later version + */ + +#ifndef __LINUX_IMX_MEDIA_H__ +#define __LINUX_IMX_MEDIA_H__ + +/* + * events from the subdevs + */ +#define V4L2_EVENT_IMX_CLASS V4L2_EVENT_PRIVATE_START +#define V4L2_EVENT_IMX_FRAME_INTERVAL_ERROR (V4L2_EVENT_IMX_CLASS + 1) + +enum imx_ctrl_id { + V4L2_CID_IMX_FIM_ENABLE = (V4L2_CID_USER_IMX_BASE + 0), + V4L2_CID_IMX_FIM_NUM, + V4L2_CID_IMX_FIM_TOLERANCE_MIN, + V4L2_CID_IMX_FIM_TOLERANCE_MAX, + V4L2_CID_IMX_FIM_NUM_SKIP, + V4L2_CID_IMX_FIM_ICAP_EDGE, + V4L2_CID_IMX_FIM_ICAP_CHANNEL, +}; + +#endif diff --git a/include/linux/in.h b/include/linux/in.h new file mode 100644 index 000000000..31b493734 --- /dev/null +++ b/include/linux/in.h @@ -0,0 +1,104 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Definitions of the Internet Protocol. + * + * Version: @(#)in.h 1.0.1 04/21/93 + * + * Authors: Original taken from the GNU Project file. + * Fred N. van Kempen, + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_IN_H +#define _LINUX_IN_H + + +#include +#include + +static inline int proto_ports_offset(int proto) +{ + switch (proto) { + case IPPROTO_TCP: + case IPPROTO_UDP: + case IPPROTO_DCCP: + case IPPROTO_ESP: /* SPI */ + case IPPROTO_SCTP: + case IPPROTO_UDPLITE: + return 0; + case IPPROTO_AH: /* SPI */ + return 4; + default: + return -EINVAL; + } +} + +static inline bool ipv4_is_loopback(__be32 addr) +{ + return (addr & htonl(0xff000000)) == htonl(0x7f000000); +} + +static inline bool ipv4_is_multicast(__be32 addr) +{ + return (addr & htonl(0xf0000000)) == htonl(0xe0000000); +} + +static inline bool ipv4_is_local_multicast(__be32 addr) +{ + return (addr & htonl(0xffffff00)) == htonl(0xe0000000); +} + +static inline bool ipv4_is_lbcast(__be32 addr) +{ + /* limited broadcast */ + return addr == htonl(INADDR_BROADCAST); +} + +static inline bool ipv4_is_zeronet(__be32 addr) +{ + return (addr & htonl(0xff000000)) == htonl(0x00000000); +} + +/* Special-Use IPv4 Addresses (RFC3330) */ + +static inline bool ipv4_is_private_10(__be32 addr) +{ + return (addr & htonl(0xff000000)) == htonl(0x0a000000); +} + +static inline bool ipv4_is_private_172(__be32 addr) +{ + return (addr & htonl(0xfff00000)) == htonl(0xac100000); +} + +static inline bool ipv4_is_private_192(__be32 addr) +{ + return (addr & htonl(0xffff0000)) == htonl(0xc0a80000); +} + +static inline bool ipv4_is_linklocal_169(__be32 addr) +{ + return (addr & htonl(0xffff0000)) == htonl(0xa9fe0000); +} + +static inline bool ipv4_is_anycast_6to4(__be32 addr) +{ + return (addr & htonl(0xffffff00)) == htonl(0xc0586300); +} + +static inline bool ipv4_is_test_192(__be32 addr) +{ + return (addr & htonl(0xffffff00)) == htonl(0xc0000200); +} + +static inline bool ipv4_is_test_198(__be32 addr) +{ + return (addr & htonl(0xfffe0000)) == htonl(0xc6120000); +} +#endif /* _LINUX_IN_H */ diff --git a/include/linux/in6.h b/include/linux/in6.h new file mode 100644 index 000000000..34edf1f6c --- /dev/null +++ b/include/linux/in6.h @@ -0,0 +1,48 @@ +/* + * Types and definitions for AF_INET6 + * Linux INET6 implementation + * + * Authors: + * Pedro Roque + * + * Sources: + * IPv6 Program Interfaces for BSD Systems + * + * + * Advanced Sockets API for IPv6 + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_IN6_H +#define _LINUX_IN6_H + +#include + +/* IPv6 Wildcard Address (::) and Loopback Address (::1) defined in RFC2553 + * NOTE: Be aware the IN6ADDR_* constants and in6addr_* externals are defined + * in network byte order, not in host byte order as are the IPv4 equivalents + */ +extern const struct in6_addr in6addr_any; +#define IN6ADDR_ANY_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 } } } +extern const struct in6_addr in6addr_loopback; +#define IN6ADDR_LOOPBACK_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } } +extern const struct in6_addr in6addr_linklocal_allnodes; +#define IN6ADDR_LINKLOCAL_ALLNODES_INIT \ + { { { 0xff,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } } +extern const struct in6_addr in6addr_linklocal_allrouters; +#define IN6ADDR_LINKLOCAL_ALLROUTERS_INIT \ + { { { 0xff,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2 } } } +extern const struct in6_addr in6addr_interfacelocal_allnodes; +#define IN6ADDR_INTERFACELOCAL_ALLNODES_INIT \ + { { { 0xff,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } } +extern const struct in6_addr in6addr_interfacelocal_allrouters; +#define IN6ADDR_INTERFACELOCAL_ALLROUTERS_INIT \ + { { { 0xff,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2 } } } +extern const struct in6_addr in6addr_sitelocal_allrouters; +#define IN6ADDR_SITELOCAL_ALLROUTERS_INIT \ + { { { 0xff,5,0,0,0,0,0,0,0,0,0,0,0,0,0,2 } } } +#endif diff --git a/include/linux/inet.h b/include/linux/inet.h new file mode 100644 index 000000000..97defc113 --- /dev/null +++ b/include/linux/inet.h @@ -0,0 +1,64 @@ +/* + * Swansea University Computer Society NET3 + * + * This work is derived from NET2Debugged, which is in turn derived + * from NET2D which was written by: + * Fred N. van Kempen, + * + * This work was derived from Ross Biro's inspirational work + * for the LINUX operating system. His version numbers were: + * + * $Id: Space.c,v 0.8.4.5 1992/12/12 19:25:04 bir7 Exp $ + * $Id: arp.c,v 0.8.4.6 1993/01/28 22:30:00 bir7 Exp $ + * $Id: arp.h,v 0.8.4.6 1993/01/28 22:30:00 bir7 Exp $ + * $Id: dev.c,v 0.8.4.13 1993/01/23 18:00:11 bir7 Exp $ + * $Id: dev.h,v 0.8.4.7 1993/01/23 18:00:11 bir7 Exp $ + * $Id: eth.c,v 0.8.4.4 1993/01/22 23:21:38 bir7 Exp $ + * $Id: eth.h,v 0.8.4.1 1992/11/10 00:17:18 bir7 Exp $ + * $Id: icmp.c,v 0.8.4.9 1993/01/23 18:00:11 bir7 Exp $ + * $Id: icmp.h,v 0.8.4.2 1992/11/15 14:55:30 bir7 Exp $ + * $Id: ip.c,v 0.8.4.8 1992/12/12 19:25:04 bir7 Exp $ + * $Id: ip.h,v 0.8.4.2 1993/01/23 18:00:11 bir7 Exp $ + * $Id: loopback.c,v 0.8.4.8 1993/01/23 18:00:11 bir7 Exp $ + * $Id: packet.c,v 0.8.4.7 1993/01/26 22:04:00 bir7 Exp $ + * $Id: protocols.c,v 0.8.4.3 1992/11/15 14:55:30 bir7 Exp $ + * $Id: raw.c,v 0.8.4.12 1993/01/26 22:04:00 bir7 Exp $ + * $Id: sock.c,v 0.8.4.6 1993/01/28 22:30:00 bir7 Exp $ + * $Id: sock.h,v 0.8.4.7 1993/01/26 22:04:00 bir7 Exp $ + * $Id: tcp.c,v 0.8.4.16 1993/01/26 22:04:00 bir7 Exp $ + * $Id: tcp.h,v 0.8.4.7 1993/01/22 22:58:08 bir7 Exp $ + * $Id: timer.c,v 0.8.4.8 1993/01/23 18:00:11 bir7 Exp $ + * $Id: timer.h,v 0.8.4.2 1993/01/23 18:00:11 bir7 Exp $ + * $Id: udp.c,v 0.8.4.12 1993/01/26 22:04:00 bir7 Exp $ + * $Id: udp.h,v 0.8.4.1 1992/11/10 00:17:18 bir7 Exp $ + * $Id: we.c,v 0.8.4.10 1993/01/23 18:00:11 bir7 Exp $ + * $Id: wereg.h,v 0.8.4.1 1992/11/10 00:17:18 bir7 Exp $ + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_INET_H +#define _LINUX_INET_H + +#include +#include +#include + +/* + * These mimic similar macros defined in user-space for inet_ntop(3). + * See /usr/include/netinet/in.h . + */ +#define INET_ADDRSTRLEN (16) +#define INET6_ADDRSTRLEN (48) + +extern __be32 in_aton(const char *str); +extern int in4_pton(const char *src, int srclen, u8 *dst, int delim, const char **end); +extern int in6_pton(const char *src, int srclen, u8 *dst, int delim, const char **end); + +extern int inet_pton_with_scope(struct net *net, unsigned short af, + const char *src, const char *port, struct sockaddr_storage *addr); +extern bool inet_addr_is_any(struct sockaddr *addr); + +#endif /* _LINUX_INET_H */ diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h new file mode 100644 index 000000000..c91cf2dee --- /dev/null +++ b/include/linux/inet_diag.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _INET_DIAG_H_ +#define _INET_DIAG_H_ 1 + +#include +#include + +struct inet_hashinfo; + +struct inet_diag_handler { + void (*dump)(struct sk_buff *skb, + struct netlink_callback *cb, + const struct inet_diag_req_v2 *r, + struct nlattr *bc); + + int (*dump_one)(struct sk_buff *in_skb, + const struct nlmsghdr *nlh, + const struct inet_diag_req_v2 *req); + + void (*idiag_get_info)(struct sock *sk, + struct inet_diag_msg *r, + void *info); + + int (*idiag_get_aux)(struct sock *sk, + bool net_admin, + struct sk_buff *skb); + + size_t (*idiag_get_aux_size)(struct sock *sk, + bool net_admin); + + int (*destroy)(struct sk_buff *in_skb, + const struct inet_diag_req_v2 *req); + + __u16 idiag_type; + __u16 idiag_info_size; +}; + +struct inet_connection_sock; +int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, + struct sk_buff *skb, const struct inet_diag_req_v2 *req, + struct user_namespace *user_ns, + u32 pid, u32 seq, u16 nlmsg_flags, + const struct nlmsghdr *unlh, bool net_admin); +void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb, + struct netlink_callback *cb, + const struct inet_diag_req_v2 *r, + struct nlattr *bc); +int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, + struct sk_buff *in_skb, const struct nlmsghdr *nlh, + const struct inet_diag_req_v2 *req); + +struct sock *inet_diag_find_one_icsk(struct net *net, + struct inet_hashinfo *hashinfo, + const struct inet_diag_req_v2 *req); + +int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk); + +void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk); + +static inline size_t inet_diag_msg_attrs_size(void) +{ + return nla_total_size(1) /* INET_DIAG_SHUTDOWN */ + + nla_total_size(1) /* INET_DIAG_TOS */ +#if IS_ENABLED(CONFIG_IPV6) + + nla_total_size(1) /* INET_DIAG_TCLASS */ + + nla_total_size(1) /* INET_DIAG_SKV6ONLY */ +#endif + + nla_total_size(4) /* INET_DIAG_MARK */ + + nla_total_size(4); /* INET_DIAG_CLASS_ID */ +} +int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb, + struct inet_diag_msg *r, int ext, + struct user_namespace *user_ns, bool net_admin); + +extern int inet_diag_register(const struct inet_diag_handler *handler); +extern void inet_diag_unregister(const struct inet_diag_handler *handler); +#endif /* _INET_DIAG_H_ */ diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h new file mode 100644 index 000000000..131f93f8d --- /dev/null +++ b/include/linux/inetdevice.h @@ -0,0 +1,276 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_INETDEVICE_H +#define _LINUX_INETDEVICE_H + +#ifdef __KERNEL__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct ipv4_devconf { + void *sysctl; + int data[IPV4_DEVCONF_MAX]; + DECLARE_BITMAP(state, IPV4_DEVCONF_MAX); +}; + +#define MC_HASH_SZ_LOG 9 + +struct in_device { + struct net_device *dev; + refcount_t refcnt; + int dead; + struct in_ifaddr *ifa_list; /* IP ifaddr chain */ + + struct ip_mc_list __rcu *mc_list; /* IP multicast filter chain */ + struct ip_mc_list __rcu * __rcu *mc_hash; + + int mc_count; /* Number of installed mcasts */ + spinlock_t mc_tomb_lock; + struct ip_mc_list *mc_tomb; + unsigned long mr_v1_seen; + unsigned long mr_v2_seen; + unsigned long mr_maxdelay; + unsigned long mr_qi; /* Query Interval */ + unsigned long mr_qri; /* Query Response Interval */ + unsigned char mr_qrv; /* Query Robustness Variable */ + unsigned char mr_gq_running; + u32 mr_ifc_count; + struct timer_list mr_gq_timer; /* general query timer */ + struct timer_list mr_ifc_timer; /* interface change timer */ + + struct neigh_parms *arp_parms; + struct ipv4_devconf cnf; + struct rcu_head rcu_head; +}; + +#define IPV4_DEVCONF(cnf, attr) ((cnf).data[IPV4_DEVCONF_ ## attr - 1]) +#define IPV4_DEVCONF_ALL(net, attr) \ + IPV4_DEVCONF((*(net)->ipv4.devconf_all), attr) + +static inline int ipv4_devconf_get(struct in_device *in_dev, int index) +{ + index--; + return in_dev->cnf.data[index]; +} + +static inline void ipv4_devconf_set(struct in_device *in_dev, int index, + int val) +{ + index--; + set_bit(index, in_dev->cnf.state); + in_dev->cnf.data[index] = val; +} + +static inline void ipv4_devconf_setall(struct in_device *in_dev) +{ + bitmap_fill(in_dev->cnf.state, IPV4_DEVCONF_MAX); +} + +#define IN_DEV_CONF_GET(in_dev, attr) \ + ipv4_devconf_get((in_dev), IPV4_DEVCONF_ ## attr) +#define IN_DEV_CONF_SET(in_dev, attr, val) \ + ipv4_devconf_set((in_dev), IPV4_DEVCONF_ ## attr, (val)) + +#define IN_DEV_ANDCONF(in_dev, attr) \ + (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr) && \ + IN_DEV_CONF_GET((in_dev), attr)) + +#define IN_DEV_NET_ORCONF(in_dev, net, attr) \ + (IPV4_DEVCONF_ALL(net, attr) || \ + IN_DEV_CONF_GET((in_dev), attr)) + +#define IN_DEV_ORCONF(in_dev, attr) \ + IN_DEV_NET_ORCONF(in_dev, dev_net(in_dev->dev), attr) + +#define IN_DEV_MAXCONF(in_dev, attr) \ + (max(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr), \ + IN_DEV_CONF_GET((in_dev), attr))) + +#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING) +#define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING) +#define IN_DEV_BFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), BC_FORWARDING) +#define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER) +#define IN_DEV_SRC_VMARK(in_dev) IN_DEV_ORCONF((in_dev), SRC_VMARK) +#define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \ + ACCEPT_SOURCE_ROUTE) +#define IN_DEV_ACCEPT_LOCAL(in_dev) IN_DEV_ORCONF((in_dev), ACCEPT_LOCAL) +#define IN_DEV_BOOTP_RELAY(in_dev) IN_DEV_ANDCONF((in_dev), BOOTP_RELAY) + +#define IN_DEV_LOG_MARTIANS(in_dev) IN_DEV_ORCONF((in_dev), LOG_MARTIANS) +#define IN_DEV_PROXY_ARP(in_dev) IN_DEV_ORCONF((in_dev), PROXY_ARP) +#define IN_DEV_PROXY_ARP_PVLAN(in_dev) IN_DEV_CONF_GET(in_dev, PROXY_ARP_PVLAN) +#define IN_DEV_SHARED_MEDIA(in_dev) IN_DEV_ORCONF((in_dev), SHARED_MEDIA) +#define IN_DEV_TX_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), SEND_REDIRECTS) +#define IN_DEV_SEC_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), \ + SECURE_REDIRECTS) +#define IN_DEV_IDTAG(in_dev) IN_DEV_CONF_GET(in_dev, TAG) +#define IN_DEV_MEDIUM_ID(in_dev) IN_DEV_CONF_GET(in_dev, MEDIUM_ID) +#define IN_DEV_PROMOTE_SECONDARIES(in_dev) \ + IN_DEV_ORCONF((in_dev), \ + PROMOTE_SECONDARIES) +#define IN_DEV_ROUTE_LOCALNET(in_dev) IN_DEV_ORCONF(in_dev, ROUTE_LOCALNET) +#define IN_DEV_NET_ROUTE_LOCALNET(in_dev, net) \ + IN_DEV_NET_ORCONF(in_dev, net, ROUTE_LOCALNET) + +#define IN_DEV_RX_REDIRECTS(in_dev) \ + ((IN_DEV_FORWARD(in_dev) && \ + IN_DEV_ANDCONF((in_dev), ACCEPT_REDIRECTS)) \ + || (!IN_DEV_FORWARD(in_dev) && \ + IN_DEV_ORCONF((in_dev), ACCEPT_REDIRECTS))) + +#define IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) \ + IN_DEV_CONF_GET((in_dev), IGNORE_ROUTES_WITH_LINKDOWN) + +#define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER) +#define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_ORCONF((in_dev), ARP_ACCEPT) +#define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE) +#define IN_DEV_ARP_IGNORE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_IGNORE) +#define IN_DEV_ARP_NOTIFY(in_dev) IN_DEV_MAXCONF((in_dev), ARP_NOTIFY) + +struct in_ifaddr { + struct hlist_node hash; + struct in_ifaddr *ifa_next; + struct in_device *ifa_dev; + struct rcu_head rcu_head; + __be32 ifa_local; + __be32 ifa_address; + __be32 ifa_mask; + __u32 ifa_rt_priority; + __be32 ifa_broadcast; + unsigned char ifa_scope; + unsigned char ifa_prefixlen; + __u32 ifa_flags; + char ifa_label[IFNAMSIZ]; + + /* In seconds, relative to tstamp. Expiry is at tstamp + HZ * lft. */ + __u32 ifa_valid_lft; + __u32 ifa_preferred_lft; + unsigned long ifa_cstamp; /* created timestamp */ + unsigned long ifa_tstamp; /* updated timestamp */ +}; + +struct in_validator_info { + __be32 ivi_addr; + struct in_device *ivi_dev; + struct netlink_ext_ack *extack; +}; + +int register_inetaddr_notifier(struct notifier_block *nb); +int unregister_inetaddr_notifier(struct notifier_block *nb); +int register_inetaddr_validator_notifier(struct notifier_block *nb); +int unregister_inetaddr_validator_notifier(struct notifier_block *nb); + +void inet_netconf_notify_devconf(struct net *net, int event, int type, + int ifindex, struct ipv4_devconf *devconf); + +struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref); +static inline struct net_device *ip_dev_find(struct net *net, __be32 addr) +{ + return __ip_dev_find(net, addr, true); +} + +int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b); +int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *); +void devinet_init(void); +struct in_device *inetdev_by_index(struct net *, int); +__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope); +__be32 inet_confirm_addr(struct net *net, struct in_device *in_dev, __be32 dst, + __be32 local, int scope); +struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, + __be32 mask); +struct in_ifaddr *inet_lookup_ifaddr_rcu(struct net *net, __be32 addr); +static __inline__ bool inet_ifa_match(__be32 addr, struct in_ifaddr *ifa) +{ + return !((addr^ifa->ifa_address)&ifa->ifa_mask); +} + +/* + * Check if a mask is acceptable. + */ + +static __inline__ bool bad_mask(__be32 mask, __be32 addr) +{ + __u32 hmask; + if (addr & (mask = ~mask)) + return true; + hmask = ntohl(mask); + if (hmask & (hmask+1)) + return true; + return false; +} + +#define for_primary_ifa(in_dev) { struct in_ifaddr *ifa; \ + for (ifa = (in_dev)->ifa_list; ifa && !(ifa->ifa_flags&IFA_F_SECONDARY); ifa = ifa->ifa_next) + +#define for_ifa(in_dev) { struct in_ifaddr *ifa; \ + for (ifa = (in_dev)->ifa_list; ifa; ifa = ifa->ifa_next) + + +#define endfor_ifa(in_dev) } + +static inline struct in_device *__in_dev_get_rcu(const struct net_device *dev) +{ + return rcu_dereference(dev->ip_ptr); +} + +static inline struct in_device *in_dev_get(const struct net_device *dev) +{ + struct in_device *in_dev; + + rcu_read_lock(); + in_dev = __in_dev_get_rcu(dev); + if (in_dev) + refcount_inc(&in_dev->refcnt); + rcu_read_unlock(); + return in_dev; +} + +static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev) +{ + return rtnl_dereference(dev->ip_ptr); +} + +static inline struct neigh_parms *__in_dev_arp_parms_get_rcu(const struct net_device *dev) +{ + struct in_device *in_dev = __in_dev_get_rcu(dev); + + return in_dev ? in_dev->arp_parms : NULL; +} + +void in_dev_finish_destroy(struct in_device *idev); + +static inline void in_dev_put(struct in_device *idev) +{ + if (refcount_dec_and_test(&idev->refcnt)) + in_dev_finish_destroy(idev); +} + +#define __in_dev_put(idev) refcount_dec(&(idev)->refcnt) +#define in_dev_hold(idev) refcount_inc(&(idev)->refcnt) + +#endif /* __KERNEL__ */ + +static __inline__ __be32 inet_make_mask(int logmask) +{ + if (logmask) + return htonl(~((1U<<(32-logmask))-1)); + return 0; +} + +static __inline__ int inet_mask_len(__be32 mask) +{ + __u32 hmask = ntohl(mask); + if (!hmask) + return 0; + return 32 - ffz(~hmask); +} + + +#endif /* _LINUX_INETDEVICE_H */ diff --git a/include/linux/init.h b/include/linux/init.h new file mode 100644 index 000000000..2538d176d --- /dev/null +++ b/include/linux/init.h @@ -0,0 +1,309 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_INIT_H +#define _LINUX_INIT_H + +#include +#include + +/* Built-in __init functions needn't be compiled with retpoline */ +#if defined(__noretpoline) && !defined(MODULE) +#define __noinitretpoline __noretpoline +#else +#define __noinitretpoline +#endif + +/* These macros are used to mark some functions or + * initialized data (doesn't apply to uninitialized data) + * as `initialization' functions. The kernel can take this + * as hint that the function is used only during the initialization + * phase and free up used memory resources after + * + * Usage: + * For functions: + * + * You should add __init immediately before the function name, like: + * + * static void __init initme(int x, int y) + * { + * extern int z; z = x * y; + * } + * + * If the function has a prototype somewhere, you can also add + * __init between closing brace of the prototype and semicolon: + * + * extern int initialize_foobar_device(int, int, int) __init; + * + * For initialized data: + * You should insert __initdata or __initconst between the variable name + * and equal sign followed by value, e.g.: + * + * static int init_variable __initdata = 0; + * static const char linux_logo[] __initconst = { 0x32, 0x36, ... }; + * + * Don't forget to initialize data not at file scope, i.e. within a function, + * as gcc otherwise puts the data into the bss section and not into the init + * section. + */ + +/* These are for everybody (although not all archs will actually + discard it in modules) */ +#define __init __section(.init.text) __cold __latent_entropy __noinitretpoline +#define __initdata __section(.init.data) +#define __initconst __section(.init.rodata) +#define __exitdata __section(.exit.data) +#define __exit_call __used __section(.exitcall.exit) + +/* + * modpost check for section mismatches during the kernel build. + * A section mismatch happens when there are references from a + * code or data section to an init section (both code or data). + * The init sections are (for most archs) discarded by the kernel + * when early init has completed so all such references are potential bugs. + * For exit sections the same issue exists. + * + * The following markers are used for the cases where the reference to + * the *init / *exit section (code or data) is valid and will teach + * modpost not to issue a warning. Intended semantics is that a code or + * data tagged __ref* can reference code or data from init section without + * producing a warning (of course, no warning does not mean code is + * correct, so optimally document why the __ref is needed and why it's OK). + * + * The markers follow same syntax rules as __init / __initdata. + */ +#define __ref __section(.ref.text) noinline +#define __refdata __section(.ref.data) +#define __refconst __section(.ref.rodata) + +#ifdef MODULE +#define __exitused +#else +#define __exitused __used +#endif + +#define __exit __section(.exit.text) __exitused __cold notrace + +/* Used for MEMORY_HOTPLUG */ +#define __meminit __section(.meminit.text) __cold notrace \ + __latent_entropy +#define __meminitdata __section(.meminit.data) +#define __meminitconst __section(.meminit.rodata) +#define __memexit __section(.memexit.text) __exitused __cold notrace +#define __memexitdata __section(.memexit.data) +#define __memexitconst __section(.memexit.rodata) + +/* For assembly routines */ +#define __HEAD .section ".head.text","ax" +#define __INIT .section ".init.text","ax" +#define __FINIT .previous + +#define __INITDATA .section ".init.data","aw",%progbits +#define __INITRODATA .section ".init.rodata","a",%progbits +#define __FINITDATA .previous + +#define __MEMINIT .section ".meminit.text", "ax" +#define __MEMINITDATA .section ".meminit.data", "aw" +#define __MEMINITRODATA .section ".meminit.rodata", "a" + +/* silence warnings when references are OK */ +#define __REF .section ".ref.text", "ax" +#define __REFDATA .section ".ref.data", "aw" +#define __REFCONST .section ".ref.rodata", "a" + +#ifndef __ASSEMBLY__ +/* + * Used for initialization calls.. + */ +typedef int (*initcall_t)(void); +typedef void (*exitcall_t)(void); + +#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS +typedef int initcall_entry_t; + +static inline initcall_t initcall_from_entry(initcall_entry_t *entry) +{ + return offset_to_ptr(entry); +} +#else +typedef initcall_t initcall_entry_t; + +static inline initcall_t initcall_from_entry(initcall_entry_t *entry) +{ + return *entry; +} +#endif + +extern initcall_entry_t __con_initcall_start[], __con_initcall_end[]; +extern initcall_entry_t __security_initcall_start[], __security_initcall_end[]; + +/* Used for contructor calls. */ +typedef void (*ctor_fn_t)(void); + +/* Defined in init/main.c */ +extern int do_one_initcall(initcall_t fn); +extern char __initdata boot_command_line[]; +extern char *saved_command_line; +extern unsigned int reset_devices; + +/* used by init/main.c */ +void setup_arch(char **); +void prepare_namespace(void); +void __init load_default_modules(void); +int __init init_rootfs(void); + +#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX) +extern bool rodata_enabled; +#endif +#ifdef CONFIG_STRICT_KERNEL_RWX +void mark_rodata_ro(void); +#endif + +extern void (*late_time_init)(void); + +extern bool initcall_debug; + +#endif + +#ifndef MODULE + +#ifndef __ASSEMBLY__ + +/* + * initcalls are now grouped by functionality into separate + * subsections. Ordering inside the subsections is determined + * by link order. + * For backwards compatibility, initcall() puts the call in + * the device init subsection. + * + * The `id' arg to __define_initcall() is needed so that multiple initcalls + * can point at the same handler without causing duplicate-symbol build errors. + * + * Initcalls are run by placing pointers in initcall sections that the + * kernel iterates at runtime. The linker can do dead code / data elimination + * and remove that completely, so the initcall sections have to be marked + * as KEEP() in the linker script. + */ + +#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS +#define ___define_initcall(fn, id, __sec) \ + __ADDRESSABLE(fn) \ + asm(".section \"" #__sec ".init\", \"a\" \n" \ + "__initcall_" #fn #id ": \n" \ + ".long " #fn " - . \n" \ + ".previous \n"); +#else +#define ___define_initcall(fn, id, __sec) \ + static initcall_t __initcall_##fn##id __used \ + __attribute__((__section__(#__sec ".init"))) = fn; +#endif + +#define __define_initcall(fn, id) ___define_initcall(fn, id, .initcall##id) + +/* + * Early initcalls run before initializing SMP. + * + * Only for built-in code, not modules. + */ +#define early_initcall(fn) __define_initcall(fn, early) + +/* + * A "pure" initcall has no dependencies on anything else, and purely + * initializes variables that couldn't be statically initialized. + * + * This only exists for built-in code, not for modules. + * Keep main.c:initcall_level_names[] in sync. + */ +#define pure_initcall(fn) __define_initcall(fn, 0) + +#define core_initcall(fn) __define_initcall(fn, 1) +#define core_initcall_sync(fn) __define_initcall(fn, 1s) +#define postcore_initcall(fn) __define_initcall(fn, 2) +#define postcore_initcall_sync(fn) __define_initcall(fn, 2s) +#define arch_initcall(fn) __define_initcall(fn, 3) +#define arch_initcall_sync(fn) __define_initcall(fn, 3s) +#define subsys_initcall(fn) __define_initcall(fn, 4) +#define subsys_initcall_sync(fn) __define_initcall(fn, 4s) +#define fs_initcall(fn) __define_initcall(fn, 5) +#define fs_initcall_sync(fn) __define_initcall(fn, 5s) +#define rootfs_initcall(fn) __define_initcall(fn, rootfs) +#define device_initcall(fn) __define_initcall(fn, 6) +#define device_initcall_sync(fn) __define_initcall(fn, 6s) +#define late_initcall(fn) __define_initcall(fn, 7) +#define late_initcall_sync(fn) __define_initcall(fn, 7s) + +#define __initcall(fn) device_initcall(fn) + +#define __exitcall(fn) \ + static exitcall_t __exitcall_##fn __exit_call = fn + +#define console_initcall(fn) ___define_initcall(fn,, .con_initcall) +#define security_initcall(fn) ___define_initcall(fn,, .security_initcall) + +struct obs_kernel_param { + const char *str; + int (*setup_func)(char *); + int early; +}; + +/* + * Only for really core code. See moduleparam.h for the normal way. + * + * Force the alignment so the compiler doesn't space elements of the + * obs_kernel_param "array" too far apart in .init.setup. + */ +#define __setup_param(str, unique_id, fn, early) \ + static const char __setup_str_##unique_id[] __initconst \ + __aligned(1) = str; \ + static struct obs_kernel_param __setup_##unique_id \ + __used __section(.init.setup) \ + __attribute__((aligned((sizeof(long))))) \ + = { __setup_str_##unique_id, fn, early } + +#define __setup(str, fn) \ + __setup_param(str, fn, fn, 0) + +/* + * NOTE: fn is as per module_param, not __setup! + * Emits warning if fn returns non-zero. + */ +#define early_param(str, fn) \ + __setup_param(str, fn, fn, 1) + +#define early_param_on_off(str_on, str_off, var, config) \ + \ + int var = IS_ENABLED(config); \ + \ + static int __init parse_##var##_on(char *arg) \ + { \ + var = 1; \ + return 0; \ + } \ + __setup_param(str_on, parse_##var##_on, parse_##var##_on, 1); \ + \ + static int __init parse_##var##_off(char *arg) \ + { \ + var = 0; \ + return 0; \ + } \ + __setup_param(str_off, parse_##var##_off, parse_##var##_off, 1) + +/* Relies on boot_command_line being set */ +void __init parse_early_param(void); +void __init parse_early_options(char *cmdline); +#endif /* __ASSEMBLY__ */ + +#else /* MODULE */ + +#define __setup_param(str, unique_id, fn) /* nothing */ +#define __setup(str, func) /* nothing */ +#endif + +/* Data marked not to be saved by software suspend */ +#define __nosavedata __section(.data..nosave) + +#ifdef MODULE +#define __exit_p(x) x +#else +#define __exit_p(x) NULL +#endif + +#endif /* _LINUX_INIT_H */ diff --git a/include/linux/init_ohci1394_dma.h b/include/linux/init_ohci1394_dma.h new file mode 100644 index 000000000..228afca43 --- /dev/null +++ b/include/linux/init_ohci1394_dma.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT +extern int __initdata init_ohci1394_dma_early; +extern void __init init_ohci1394_dma_on_all_controllers(void); +#endif diff --git a/include/linux/init_task.h b/include/linux/init_task.h new file mode 100644 index 000000000..a7083a45a --- /dev/null +++ b/include/linux/init_task.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX__INIT_TASK_H +#define _LINUX__INIT_TASK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +extern struct files_struct init_files; +extern struct fs_struct init_fs; +extern struct nsproxy init_nsproxy; +extern struct group_info init_groups; +extern struct cred init_cred; + +#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE +#define INIT_PREV_CPUTIME(x) .prev_cputime = { \ + .lock = __RAW_SPIN_LOCK_UNLOCKED(x.prev_cputime.lock), \ +}, +#else +#define INIT_PREV_CPUTIME(x) +#endif + +#ifdef CONFIG_POSIX_TIMERS +#define INIT_CPU_TIMERS(s) \ + .cpu_timers = { \ + LIST_HEAD_INIT(s.cpu_timers[0]), \ + LIST_HEAD_INIT(s.cpu_timers[1]), \ + LIST_HEAD_INIT(s.cpu_timers[2]), \ + }, +#else +#define INIT_CPU_TIMERS(s) +#endif + +#define INIT_TASK_COMM "swapper" + +/* Attach to the init_task data structure for proper alignment */ +#ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK +#define __init_task_data __attribute__((__section__(".data..init_task"))) +#else +#define __init_task_data /**/ +#endif + +/* Attach to the thread_info data structure for proper alignment */ +#define __init_thread_info __attribute__((__section__(".data..init_thread_info"))) + +#endif diff --git a/include/linux/initrd.h b/include/linux/initrd.h new file mode 100644 index 000000000..84b423044 --- /dev/null +++ b/include/linux/initrd.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */ + +/* 1 = load ramdisk, 0 = don't load */ +extern int rd_doload; + +/* 1 = prompt for ramdisk, 0 = don't prompt */ +extern int rd_prompt; + +/* starting block # of image */ +extern int rd_image_start; + +/* size of a single RAM disk */ +extern unsigned long rd_size; + +/* 1 if it is not an error if initrd_start < memory_start */ +extern int initrd_below_start_ok; + +/* free_initrd_mem always gets called with the next two as arguments.. */ +extern unsigned long initrd_start, initrd_end; +extern void free_initrd_mem(unsigned long, unsigned long); + +extern unsigned int real_root_dev; diff --git a/include/linux/inotify.h b/include/linux/inotify.h new file mode 100644 index 000000000..6a24905f6 --- /dev/null +++ b/include/linux/inotify.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Inode based directory notification for Linux + * + * Copyright (C) 2005 John McCutchan + */ +#ifndef _LINUX_INOTIFY_H +#define _LINUX_INOTIFY_H + +#include +#include + +extern struct ctl_table inotify_table[]; /* for sysctl */ + +#define ALL_INOTIFY_BITS (IN_ACCESS | IN_MODIFY | IN_ATTRIB | IN_CLOSE_WRITE | \ + IN_CLOSE_NOWRITE | IN_OPEN | IN_MOVED_FROM | \ + IN_MOVED_TO | IN_CREATE | IN_DELETE | \ + IN_DELETE_SELF | IN_MOVE_SELF | IN_UNMOUNT | \ + IN_Q_OVERFLOW | IN_IGNORED | IN_ONLYDIR | \ + IN_DONT_FOLLOW | IN_EXCL_UNLINK | IN_MASK_ADD | \ + IN_MASK_CREATE | IN_ISDIR | IN_ONESHOT) + +#endif /* _LINUX_INOTIFY_H */ diff --git a/include/linux/input-polldev.h b/include/linux/input-polldev.h new file mode 100644 index 000000000..246518267 --- /dev/null +++ b/include/linux/input-polldev.h @@ -0,0 +1,61 @@ +#ifndef _INPUT_POLLDEV_H +#define _INPUT_POLLDEV_H + +/* + * Copyright (c) 2007 Dmitry Torokhov + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include +#include + +/** + * struct input_polled_dev - simple polled input device + * @private: private driver data. + * @open: driver-supplied method that prepares device for polling + * (enabled the device and maybe flushes device state). + * @close: driver-supplied method that is called when device is no + * longer being polled. Used to put device into low power mode. + * @poll: driver-supplied method that polls the device and posts + * input events (mandatory). + * @poll_interval: specifies how often the poll() method should be called. + * Defaults to 500 msec unless overridden when registering the device. + * @poll_interval_max: specifies upper bound for the poll interval. + * Defaults to the initial value of @poll_interval. + * @poll_interval_min: specifies lower bound for the poll interval. + * Defaults to 0. + * @input: input device structure associated with the polled device. + * Must be properly initialized by the driver (id, name, phys, bits). + * + * Polled input device provides a skeleton for supporting simple input + * devices that do not raise interrupts but have to be periodically + * scanned or polled to detect changes in their state. + */ +struct input_polled_dev { + void *private; + + void (*open)(struct input_polled_dev *dev); + void (*close)(struct input_polled_dev *dev); + void (*poll)(struct input_polled_dev *dev); + unsigned int poll_interval; /* msec */ + unsigned int poll_interval_max; /* msec */ + unsigned int poll_interval_min; /* msec */ + + struct input_dev *input; + +/* private: */ + struct delayed_work work; + + bool devres_managed; +}; + +struct input_polled_dev *input_allocate_polled_device(void); +struct input_polled_dev *devm_input_allocate_polled_device(struct device *dev); +void input_free_polled_device(struct input_polled_dev *dev); +int input_register_polled_device(struct input_polled_dev *dev); +void input_unregister_polled_device(struct input_polled_dev *dev); + +#endif diff --git a/include/linux/input.h b/include/linux/input.h new file mode 100644 index 000000000..7c7516eb7 --- /dev/null +++ b/include/linux/input.h @@ -0,0 +1,544 @@ +/* + * Copyright (c) 1999-2002 Vojtech Pavlik + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ +#ifndef _INPUT_H +#define _INPUT_H + +#include +#include +#include +/* Implementation details, userspace should not care about these */ +#define ABS_MT_FIRST ABS_MT_TOUCH_MAJOR +#define ABS_MT_LAST ABS_MT_TOOL_Y + +/* + * In-kernel definitions. + */ + +#include +#include +#include +#include + +/** + * struct input_value - input value representation + * @type: type of value (EV_KEY, EV_ABS, etc) + * @code: the value code + * @value: the value + */ +struct input_value { + __u16 type; + __u16 code; + __s32 value; +}; + +/** + * struct input_dev - represents an input device + * @name: name of the device + * @phys: physical path to the device in the system hierarchy + * @uniq: unique identification code for the device (if device has it) + * @id: id of the device (struct input_id) + * @propbit: bitmap of device properties and quirks + * @evbit: bitmap of types of events supported by the device (EV_KEY, + * EV_REL, etc.) + * @keybit: bitmap of keys/buttons this device has + * @relbit: bitmap of relative axes for the device + * @absbit: bitmap of absolute axes for the device + * @mscbit: bitmap of miscellaneous events supported by the device + * @ledbit: bitmap of leds present on the device + * @sndbit: bitmap of sound effects supported by the device + * @ffbit: bitmap of force feedback effects supported by the device + * @swbit: bitmap of switches present on the device + * @hint_events_per_packet: average number of events generated by the + * device in a packet (between EV_SYN/SYN_REPORT events). Used by + * event handlers to estimate size of the buffer needed to hold + * events. + * @keycodemax: size of keycode table + * @keycodesize: size of elements in keycode table + * @keycode: map of scancodes to keycodes for this device + * @getkeycode: optional legacy method to retrieve current keymap. + * @setkeycode: optional method to alter current keymap, used to implement + * sparse keymaps. If not supplied default mechanism will be used. + * The method is being called while holding event_lock and thus must + * not sleep + * @ff: force feedback structure associated with the device if device + * supports force feedback effects + * @repeat_key: stores key code of the last key pressed; used to implement + * software autorepeat + * @timer: timer for software autorepeat + * @rep: current values for autorepeat parameters (delay, rate) + * @mt: pointer to multitouch state + * @absinfo: array of &struct input_absinfo elements holding information + * about absolute axes (current value, min, max, flat, fuzz, + * resolution) + * @key: reflects current state of device's keys/buttons + * @led: reflects current state of device's LEDs + * @snd: reflects current state of sound effects + * @sw: reflects current state of device's switches + * @open: this method is called when the very first user calls + * input_open_device(). The driver must prepare the device + * to start generating events (start polling thread, + * request an IRQ, submit URB, etc.) + * @close: this method is called when the very last user calls + * input_close_device(). + * @flush: purges the device. Most commonly used to get rid of force + * feedback effects loaded into the device when disconnecting + * from it + * @event: event handler for events sent _to_ the device, like EV_LED + * or EV_SND. The device is expected to carry out the requested + * action (turn on a LED, play sound, etc.) The call is protected + * by @event_lock and must not sleep + * @grab: input handle that currently has the device grabbed (via + * EVIOCGRAB ioctl). When a handle grabs a device it becomes sole + * recipient for all input events coming from the device + * @event_lock: this spinlock is taken when input core receives + * and processes a new event for the device (in input_event()). + * Code that accesses and/or modifies parameters of a device + * (such as keymap or absmin, absmax, absfuzz, etc.) after device + * has been registered with input core must take this lock. + * @mutex: serializes calls to open(), close() and flush() methods + * @users: stores number of users (input handlers) that opened this + * device. It is used by input_open_device() and input_close_device() + * to make sure that dev->open() is only called when the first + * user opens device and dev->close() is called when the very + * last user closes the device + * @going_away: marks devices that are in a middle of unregistering and + * causes input_open_device*() fail with -ENODEV. + * @dev: driver model's view of this device + * @h_list: list of input handles associated with the device. When + * accessing the list dev->mutex must be held + * @node: used to place the device onto input_dev_list + * @num_vals: number of values queued in the current frame + * @max_vals: maximum number of values queued in a frame + * @vals: array of values queued in the current frame + * @devres_managed: indicates that devices is managed with devres framework + * and needs not be explicitly unregistered or freed. + */ +struct input_dev { + const char *name; + const char *phys; + const char *uniq; + struct input_id id; + + unsigned long propbit[BITS_TO_LONGS(INPUT_PROP_CNT)]; + + unsigned long evbit[BITS_TO_LONGS(EV_CNT)]; + unsigned long keybit[BITS_TO_LONGS(KEY_CNT)]; + unsigned long relbit[BITS_TO_LONGS(REL_CNT)]; + unsigned long absbit[BITS_TO_LONGS(ABS_CNT)]; + unsigned long mscbit[BITS_TO_LONGS(MSC_CNT)]; + unsigned long ledbit[BITS_TO_LONGS(LED_CNT)]; + unsigned long sndbit[BITS_TO_LONGS(SND_CNT)]; + unsigned long ffbit[BITS_TO_LONGS(FF_CNT)]; + unsigned long swbit[BITS_TO_LONGS(SW_CNT)]; + + unsigned int hint_events_per_packet; + + unsigned int keycodemax; + unsigned int keycodesize; + void *keycode; + + int (*setkeycode)(struct input_dev *dev, + const struct input_keymap_entry *ke, + unsigned int *old_keycode); + int (*getkeycode)(struct input_dev *dev, + struct input_keymap_entry *ke); + + struct ff_device *ff; + + unsigned int repeat_key; + struct timer_list timer; + + int rep[REP_CNT]; + + struct input_mt *mt; + + struct input_absinfo *absinfo; + + unsigned long key[BITS_TO_LONGS(KEY_CNT)]; + unsigned long led[BITS_TO_LONGS(LED_CNT)]; + unsigned long snd[BITS_TO_LONGS(SND_CNT)]; + unsigned long sw[BITS_TO_LONGS(SW_CNT)]; + + int (*open)(struct input_dev *dev); + void (*close)(struct input_dev *dev); + int (*flush)(struct input_dev *dev, struct file *file); + int (*event)(struct input_dev *dev, unsigned int type, unsigned int code, int value); + + struct input_handle __rcu *grab; + + spinlock_t event_lock; + struct mutex mutex; + + unsigned int users; + bool going_away; + + struct device dev; + + struct list_head h_list; + struct list_head node; + + unsigned int num_vals; + unsigned int max_vals; + struct input_value *vals; + + bool devres_managed; +}; +#define to_input_dev(d) container_of(d, struct input_dev, dev) + +/* + * Verify that we are in sync with input_device_id mod_devicetable.h #defines + */ + +#if EV_MAX != INPUT_DEVICE_ID_EV_MAX +#error "EV_MAX and INPUT_DEVICE_ID_EV_MAX do not match" +#endif + +#if KEY_MIN_INTERESTING != INPUT_DEVICE_ID_KEY_MIN_INTERESTING +#error "KEY_MIN_INTERESTING and INPUT_DEVICE_ID_KEY_MIN_INTERESTING do not match" +#endif + +#if KEY_MAX != INPUT_DEVICE_ID_KEY_MAX +#error "KEY_MAX and INPUT_DEVICE_ID_KEY_MAX do not match" +#endif + +#if REL_MAX != INPUT_DEVICE_ID_REL_MAX +#error "REL_MAX and INPUT_DEVICE_ID_REL_MAX do not match" +#endif + +#if ABS_MAX != INPUT_DEVICE_ID_ABS_MAX +#error "ABS_MAX and INPUT_DEVICE_ID_ABS_MAX do not match" +#endif + +#if MSC_MAX != INPUT_DEVICE_ID_MSC_MAX +#error "MSC_MAX and INPUT_DEVICE_ID_MSC_MAX do not match" +#endif + +#if LED_MAX != INPUT_DEVICE_ID_LED_MAX +#error "LED_MAX and INPUT_DEVICE_ID_LED_MAX do not match" +#endif + +#if SND_MAX != INPUT_DEVICE_ID_SND_MAX +#error "SND_MAX and INPUT_DEVICE_ID_SND_MAX do not match" +#endif + +#if FF_MAX != INPUT_DEVICE_ID_FF_MAX +#error "FF_MAX and INPUT_DEVICE_ID_FF_MAX do not match" +#endif + +#if SW_MAX != INPUT_DEVICE_ID_SW_MAX +#error "SW_MAX and INPUT_DEVICE_ID_SW_MAX do not match" +#endif + +#if INPUT_PROP_MAX != INPUT_DEVICE_ID_PROP_MAX +#error "INPUT_PROP_MAX and INPUT_DEVICE_ID_PROP_MAX do not match" +#endif + +#define INPUT_DEVICE_ID_MATCH_DEVICE \ + (INPUT_DEVICE_ID_MATCH_BUS | INPUT_DEVICE_ID_MATCH_VENDOR | INPUT_DEVICE_ID_MATCH_PRODUCT) +#define INPUT_DEVICE_ID_MATCH_DEVICE_AND_VERSION \ + (INPUT_DEVICE_ID_MATCH_DEVICE | INPUT_DEVICE_ID_MATCH_VERSION) + +struct input_handle; + +/** + * struct input_handler - implements one of interfaces for input devices + * @private: driver-specific data + * @event: event handler. This method is being called by input core with + * interrupts disabled and dev->event_lock spinlock held and so + * it may not sleep + * @events: event sequence handler. This method is being called by + * input core with interrupts disabled and dev->event_lock + * spinlock held and so it may not sleep + * @filter: similar to @event; separates normal event handlers from + * "filters". + * @match: called after comparing device's id with handler's id_table + * to perform fine-grained matching between device and handler + * @connect: called when attaching a handler to an input device + * @disconnect: disconnects a handler from input device + * @start: starts handler for given handle. This function is called by + * input core right after connect() method and also when a process + * that "grabbed" a device releases it + * @legacy_minors: set to %true by drivers using legacy minor ranges + * @minor: beginning of range of 32 legacy minors for devices this driver + * can provide + * @name: name of the handler, to be shown in /proc/bus/input/handlers + * @id_table: pointer to a table of input_device_ids this driver can + * handle + * @h_list: list of input handles associated with the handler + * @node: for placing the driver onto input_handler_list + * + * Input handlers attach to input devices and create input handles. There + * are likely several handlers attached to any given input device at the + * same time. All of them will get their copy of input event generated by + * the device. + * + * The very same structure is used to implement input filters. Input core + * allows filters to run first and will not pass event to regular handlers + * if any of the filters indicate that the event should be filtered (by + * returning %true from their filter() method). + * + * Note that input core serializes calls to connect() and disconnect() + * methods. + */ +struct input_handler { + + void *private; + + void (*event)(struct input_handle *handle, unsigned int type, unsigned int code, int value); + void (*events)(struct input_handle *handle, + const struct input_value *vals, unsigned int count); + bool (*filter)(struct input_handle *handle, unsigned int type, unsigned int code, int value); + bool (*match)(struct input_handler *handler, struct input_dev *dev); + int (*connect)(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id); + void (*disconnect)(struct input_handle *handle); + void (*start)(struct input_handle *handle); + + bool legacy_minors; + int minor; + const char *name; + + const struct input_device_id *id_table; + + struct list_head h_list; + struct list_head node; +}; + +/** + * struct input_handle - links input device with an input handler + * @private: handler-specific data + * @open: counter showing whether the handle is 'open', i.e. should deliver + * events from its device + * @name: name given to the handle by handler that created it + * @dev: input device the handle is attached to + * @handler: handler that works with the device through this handle + * @d_node: used to put the handle on device's list of attached handles + * @h_node: used to put the handle on handler's list of handles from which + * it gets events + */ +struct input_handle { + + void *private; + + int open; + const char *name; + + struct input_dev *dev; + struct input_handler *handler; + + struct list_head d_node; + struct list_head h_node; +}; + +struct input_dev __must_check *input_allocate_device(void); +struct input_dev __must_check *devm_input_allocate_device(struct device *); +void input_free_device(struct input_dev *dev); + +static inline struct input_dev *input_get_device(struct input_dev *dev) +{ + return dev ? to_input_dev(get_device(&dev->dev)) : NULL; +} + +static inline void input_put_device(struct input_dev *dev) +{ + if (dev) + put_device(&dev->dev); +} + +static inline void *input_get_drvdata(struct input_dev *dev) +{ + return dev_get_drvdata(&dev->dev); +} + +static inline void input_set_drvdata(struct input_dev *dev, void *data) +{ + dev_set_drvdata(&dev->dev, data); +} + +int __must_check input_register_device(struct input_dev *); +void input_unregister_device(struct input_dev *); + +void input_reset_device(struct input_dev *); + +int __must_check input_register_handler(struct input_handler *); +void input_unregister_handler(struct input_handler *); + +int __must_check input_get_new_minor(int legacy_base, unsigned int legacy_num, + bool allow_dynamic); +void input_free_minor(unsigned int minor); + +int input_handler_for_each_handle(struct input_handler *, void *data, + int (*fn)(struct input_handle *, void *)); + +int input_register_handle(struct input_handle *); +void input_unregister_handle(struct input_handle *); + +int input_grab_device(struct input_handle *); +void input_release_device(struct input_handle *); + +int input_open_device(struct input_handle *); +void input_close_device(struct input_handle *); + +int input_flush_device(struct input_handle *handle, struct file *file); + +void input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value); +void input_inject_event(struct input_handle *handle, unsigned int type, unsigned int code, int value); + +static inline void input_report_key(struct input_dev *dev, unsigned int code, int value) +{ + input_event(dev, EV_KEY, code, !!value); +} + +static inline void input_report_rel(struct input_dev *dev, unsigned int code, int value) +{ + input_event(dev, EV_REL, code, value); +} + +static inline void input_report_abs(struct input_dev *dev, unsigned int code, int value) +{ + input_event(dev, EV_ABS, code, value); +} + +static inline void input_report_ff_status(struct input_dev *dev, unsigned int code, int value) +{ + input_event(dev, EV_FF_STATUS, code, value); +} + +static inline void input_report_switch(struct input_dev *dev, unsigned int code, int value) +{ + input_event(dev, EV_SW, code, !!value); +} + +static inline void input_sync(struct input_dev *dev) +{ + input_event(dev, EV_SYN, SYN_REPORT, 0); +} + +static inline void input_mt_sync(struct input_dev *dev) +{ + input_event(dev, EV_SYN, SYN_MT_REPORT, 0); +} + +void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code); + +/** + * input_set_events_per_packet - tell handlers about the driver event rate + * @dev: the input device used by the driver + * @n_events: the average number of events between calls to input_sync() + * + * If the event rate sent from a device is unusually large, use this + * function to set the expected event rate. This will allow handlers + * to set up an appropriate buffer size for the event stream, in order + * to minimize information loss. + */ +static inline void input_set_events_per_packet(struct input_dev *dev, int n_events) +{ + dev->hint_events_per_packet = n_events; +} + +void input_alloc_absinfo(struct input_dev *dev); +void input_set_abs_params(struct input_dev *dev, unsigned int axis, + int min, int max, int fuzz, int flat); + +#define INPUT_GENERATE_ABS_ACCESSORS(_suffix, _item) \ +static inline int input_abs_get_##_suffix(struct input_dev *dev, \ + unsigned int axis) \ +{ \ + return dev->absinfo ? dev->absinfo[axis]._item : 0; \ +} \ + \ +static inline void input_abs_set_##_suffix(struct input_dev *dev, \ + unsigned int axis, int val) \ +{ \ + input_alloc_absinfo(dev); \ + if (dev->absinfo) \ + dev->absinfo[axis]._item = val; \ +} + +INPUT_GENERATE_ABS_ACCESSORS(val, value) +INPUT_GENERATE_ABS_ACCESSORS(min, minimum) +INPUT_GENERATE_ABS_ACCESSORS(max, maximum) +INPUT_GENERATE_ABS_ACCESSORS(fuzz, fuzz) +INPUT_GENERATE_ABS_ACCESSORS(flat, flat) +INPUT_GENERATE_ABS_ACCESSORS(res, resolution) + +int input_scancode_to_scalar(const struct input_keymap_entry *ke, + unsigned int *scancode); + +int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke); +int input_set_keycode(struct input_dev *dev, + const struct input_keymap_entry *ke); + +bool input_match_device_id(const struct input_dev *dev, + const struct input_device_id *id); + +void input_enable_softrepeat(struct input_dev *dev, int delay, int period); + +extern struct class input_class; + +/** + * struct ff_device - force-feedback part of an input device + * @upload: Called to upload an new effect into device + * @erase: Called to erase an effect from device + * @playback: Called to request device to start playing specified effect + * @set_gain: Called to set specified gain + * @set_autocenter: Called to auto-center device + * @destroy: called by input core when parent input device is being + * destroyed + * @private: driver-specific data, will be freed automatically + * @ffbit: bitmap of force feedback capabilities truly supported by + * device (not emulated like ones in input_dev->ffbit) + * @mutex: mutex for serializing access to the device + * @max_effects: maximum number of effects supported by device + * @effects: pointer to an array of effects currently loaded into device + * @effect_owners: array of effect owners; when file handle owning + * an effect gets closed the effect is automatically erased + * + * Every force-feedback device must implement upload() and playback() + * methods; erase() is optional. set_gain() and set_autocenter() need + * only be implemented if driver sets up FF_GAIN and FF_AUTOCENTER + * bits. + * + * Note that playback(), set_gain() and set_autocenter() are called with + * dev->event_lock spinlock held and interrupts off and thus may not + * sleep. + */ +struct ff_device { + int (*upload)(struct input_dev *dev, struct ff_effect *effect, + struct ff_effect *old); + int (*erase)(struct input_dev *dev, int effect_id); + + int (*playback)(struct input_dev *dev, int effect_id, int value); + void (*set_gain)(struct input_dev *dev, u16 gain); + void (*set_autocenter)(struct input_dev *dev, u16 magnitude); + + void (*destroy)(struct ff_device *); + + void *private; + + unsigned long ffbit[BITS_TO_LONGS(FF_CNT)]; + + struct mutex mutex; + + int max_effects; + struct ff_effect *effects; + struct file *effect_owners[]; +}; + +int input_ff_create(struct input_dev *dev, unsigned int max_effects); +void input_ff_destroy(struct input_dev *dev); + +int input_ff_event(struct input_dev *dev, unsigned int type, unsigned int code, int value); + +int input_ff_upload(struct input_dev *dev, struct ff_effect *effect, struct file *file); +int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file); +int input_ff_flush(struct input_dev *dev, struct file *file); + +int input_ff_create_memless(struct input_dev *dev, void *data, + int (*play_effect)(struct input_dev *, void *, struct ff_effect *)); + +#endif diff --git a/include/linux/input/ad714x.h b/include/linux/input/ad714x.h new file mode 100644 index 000000000..d388d857b --- /dev/null +++ b/include/linux/input/ad714x.h @@ -0,0 +1,64 @@ +/* + * include/linux/input/ad714x.h + * + * AD714x is very flexible, it can be used as buttons, scrollwheel, + * slider, touchpad at the same time. That depends on the boards. + * The platform_data for the device's "struct device" holds this + * information. + * + * Copyright 2009-2011 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef __LINUX_INPUT_AD714X_H__ +#define __LINUX_INPUT_AD714X_H__ + +#define STAGE_NUM 12 +#define STAGE_CFGREG_NUM 8 +#define SYS_CFGREG_NUM 8 + +/* board information which need be initialized in arch/mach... */ +struct ad714x_slider_plat { + int start_stage; + int end_stage; + int max_coord; +}; + +struct ad714x_wheel_plat { + int start_stage; + int end_stage; + int max_coord; +}; + +struct ad714x_touchpad_plat { + int x_start_stage; + int x_end_stage; + int x_max_coord; + + int y_start_stage; + int y_end_stage; + int y_max_coord; +}; + +struct ad714x_button_plat { + int keycode; + unsigned short l_mask; + unsigned short h_mask; +}; + +struct ad714x_platform_data { + int slider_num; + int wheel_num; + int touchpad_num; + int button_num; + struct ad714x_slider_plat *slider; + struct ad714x_wheel_plat *wheel; + struct ad714x_touchpad_plat *touchpad; + struct ad714x_button_plat *button; + unsigned short stage_cfg_reg[STAGE_NUM][STAGE_CFGREG_NUM]; + unsigned short sys_cfg_reg[SYS_CFGREG_NUM]; + unsigned long irqflags; +}; + +#endif diff --git a/include/linux/input/adp5589.h b/include/linux/input/adp5589.h new file mode 100644 index 000000000..1a05eee15 --- /dev/null +++ b/include/linux/input/adp5589.h @@ -0,0 +1,188 @@ +/* + * Analog Devices ADP5589/ADP5585 I/O Expander and QWERTY Keypad Controller + * + * Copyright 2010-2011 Analog Devices Inc. + * + * Licensed under the GPL-2. + */ + +#ifndef _ADP5589_H +#define _ADP5589_H + +/* + * ADP5589 specific GPI and Keymap defines + */ + +#define ADP5589_KEYMAPSIZE 88 + +#define ADP5589_GPI_PIN_ROW0 97 +#define ADP5589_GPI_PIN_ROW1 98 +#define ADP5589_GPI_PIN_ROW2 99 +#define ADP5589_GPI_PIN_ROW3 100 +#define ADP5589_GPI_PIN_ROW4 101 +#define ADP5589_GPI_PIN_ROW5 102 +#define ADP5589_GPI_PIN_ROW6 103 +#define ADP5589_GPI_PIN_ROW7 104 +#define ADP5589_GPI_PIN_COL0 105 +#define ADP5589_GPI_PIN_COL1 106 +#define ADP5589_GPI_PIN_COL2 107 +#define ADP5589_GPI_PIN_COL3 108 +#define ADP5589_GPI_PIN_COL4 109 +#define ADP5589_GPI_PIN_COL5 110 +#define ADP5589_GPI_PIN_COL6 111 +#define ADP5589_GPI_PIN_COL7 112 +#define ADP5589_GPI_PIN_COL8 113 +#define ADP5589_GPI_PIN_COL9 114 +#define ADP5589_GPI_PIN_COL10 115 +#define GPI_LOGIC1 116 +#define GPI_LOGIC2 117 + +#define ADP5589_GPI_PIN_ROW_BASE ADP5589_GPI_PIN_ROW0 +#define ADP5589_GPI_PIN_ROW_END ADP5589_GPI_PIN_ROW7 +#define ADP5589_GPI_PIN_COL_BASE ADP5589_GPI_PIN_COL0 +#define ADP5589_GPI_PIN_COL_END ADP5589_GPI_PIN_COL10 + +#define ADP5589_GPI_PIN_BASE ADP5589_GPI_PIN_ROW_BASE +#define ADP5589_GPI_PIN_END ADP5589_GPI_PIN_COL_END + +#define ADP5589_GPIMAPSIZE_MAX (ADP5589_GPI_PIN_END - ADP5589_GPI_PIN_BASE + 1) + +/* + * ADP5585 specific GPI and Keymap defines + */ + +#define ADP5585_KEYMAPSIZE 30 + +#define ADP5585_GPI_PIN_ROW0 37 +#define ADP5585_GPI_PIN_ROW1 38 +#define ADP5585_GPI_PIN_ROW2 39 +#define ADP5585_GPI_PIN_ROW3 40 +#define ADP5585_GPI_PIN_ROW4 41 +#define ADP5585_GPI_PIN_ROW5 42 +#define ADP5585_GPI_PIN_COL0 43 +#define ADP5585_GPI_PIN_COL1 44 +#define ADP5585_GPI_PIN_COL2 45 +#define ADP5585_GPI_PIN_COL3 46 +#define ADP5585_GPI_PIN_COL4 47 +#define GPI_LOGIC 48 + +#define ADP5585_GPI_PIN_ROW_BASE ADP5585_GPI_PIN_ROW0 +#define ADP5585_GPI_PIN_ROW_END ADP5585_GPI_PIN_ROW5 +#define ADP5585_GPI_PIN_COL_BASE ADP5585_GPI_PIN_COL0 +#define ADP5585_GPI_PIN_COL_END ADP5585_GPI_PIN_COL4 + +#define ADP5585_GPI_PIN_BASE ADP5585_GPI_PIN_ROW_BASE +#define ADP5585_GPI_PIN_END ADP5585_GPI_PIN_COL_END + +#define ADP5585_GPIMAPSIZE_MAX (ADP5585_GPI_PIN_END - ADP5585_GPI_PIN_BASE + 1) + +struct adp5589_gpi_map { + unsigned short pin; + unsigned short sw_evt; +}; + +/* scan_cycle_time */ +#define ADP5589_SCAN_CYCLE_10ms 0 +#define ADP5589_SCAN_CYCLE_20ms 1 +#define ADP5589_SCAN_CYCLE_30ms 2 +#define ADP5589_SCAN_CYCLE_40ms 3 + +/* RESET_CFG */ +#define RESET_PULSE_WIDTH_500us 0 +#define RESET_PULSE_WIDTH_1ms 1 +#define RESET_PULSE_WIDTH_2ms 2 +#define RESET_PULSE_WIDTH_10ms 3 + +#define RESET_TRIG_TIME_0ms (0 << 2) +#define RESET_TRIG_TIME_1000ms (1 << 2) +#define RESET_TRIG_TIME_1500ms (2 << 2) +#define RESET_TRIG_TIME_2000ms (3 << 2) +#define RESET_TRIG_TIME_2500ms (4 << 2) +#define RESET_TRIG_TIME_3000ms (5 << 2) +#define RESET_TRIG_TIME_3500ms (6 << 2) +#define RESET_TRIG_TIME_4000ms (7 << 2) + +#define RESET_PASSTHRU_EN (1 << 5) +#define RESET1_POL_HIGH (1 << 6) +#define RESET1_POL_LOW (0 << 6) +#define RESET2_POL_HIGH (1 << 7) +#define RESET2_POL_LOW (0 << 7) + +/* ADP5589 Mask Bits: + * C C C C C C C C C C C | R R R R R R R R + * 1 9 8 7 6 5 4 3 2 1 0 | 7 6 5 4 3 2 1 0 + * 0 + * ---------------- BIT ------------------ + * 1 1 1 1 1 1 1 1 1 0 0 | 0 0 0 0 0 0 0 0 + * 8 7 6 5 4 3 2 1 0 9 8 | 7 6 5 4 3 2 1 0 + */ + +#define ADP_ROW(x) (1 << (x)) +#define ADP_COL(x) (1 << (x + 8)) +#define ADP5589_ROW_MASK 0xFF +#define ADP5589_COL_MASK 0xFF +#define ADP5589_COL_SHIFT 8 +#define ADP5589_MAX_ROW_NUM 7 +#define ADP5589_MAX_COL_NUM 10 + +/* ADP5585 Mask Bits: + * C C C C C | R R R R R R + * 4 3 2 1 0 | 5 4 3 2 1 0 + * + * ---- BIT -- ----------- + * 1 0 0 0 0 | 0 0 0 0 0 0 + * 0 9 8 7 6 | 5 4 3 2 1 0 + */ + +#define ADP5585_ROW_MASK 0x3F +#define ADP5585_COL_MASK 0x1F +#define ADP5585_ROW_SHIFT 0 +#define ADP5585_COL_SHIFT 6 +#define ADP5585_MAX_ROW_NUM 5 +#define ADP5585_MAX_COL_NUM 4 + +#define ADP5585_ROW(x) (1 << ((x) & ADP5585_ROW_MASK)) +#define ADP5585_COL(x) (1 << (((x) & ADP5585_COL_MASK) + ADP5585_COL_SHIFT)) + +/* Put one of these structures in i2c_board_info platform_data */ + +struct adp5589_kpad_platform_data { + unsigned keypad_en_mask; /* Keypad (Rows/Columns) enable mask */ + const unsigned short *keymap; /* Pointer to keymap */ + unsigned short keymapsize; /* Keymap size */ + bool repeat; /* Enable key repeat */ + bool en_keylock; /* Enable key lock feature (ADP5589 only)*/ + unsigned char unlock_key1; /* Unlock Key 1 (ADP5589 only) */ + unsigned char unlock_key2; /* Unlock Key 2 (ADP5589 only) */ + unsigned char unlock_timer; /* Time in seconds [0..7] between the two unlock keys 0=disable (ADP5589 only) */ + unsigned char scan_cycle_time; /* Time between consecutive scan cycles */ + unsigned char reset_cfg; /* Reset config */ + unsigned short reset1_key_1; /* Reset Key 1 */ + unsigned short reset1_key_2; /* Reset Key 2 */ + unsigned short reset1_key_3; /* Reset Key 3 */ + unsigned short reset2_key_1; /* Reset Key 1 */ + unsigned short reset2_key_2; /* Reset Key 2 */ + unsigned debounce_dis_mask; /* Disable debounce mask */ + unsigned pull_dis_mask; /* Disable all pull resistors mask */ + unsigned pullup_en_100k; /* Pull-Up 100k Enable Mask */ + unsigned pullup_en_300k; /* Pull-Up 300k Enable Mask */ + unsigned pulldown_en_300k; /* Pull-Down 300k Enable Mask */ + const struct adp5589_gpi_map *gpimap; + unsigned short gpimapsize; + const struct adp5589_gpio_platform_data *gpio_data; +}; + +struct i2c_client; /* forward declaration */ + +struct adp5589_gpio_platform_data { + int gpio_start; /* GPIO Chip base # */ + int (*setup)(struct i2c_client *client, + int gpio, unsigned ngpio, + void *context); + int (*teardown)(struct i2c_client *client, + int gpio, unsigned ngpio, + void *context); + void *context; +}; + +#endif diff --git a/include/linux/input/adxl34x.h b/include/linux/input/adxl34x.h new file mode 100644 index 000000000..010d98175 --- /dev/null +++ b/include/linux/input/adxl34x.h @@ -0,0 +1,358 @@ +/* + * include/linux/input/adxl34x.h + * + * Digital Accelerometer characteristics are highly application specific + * and may vary between boards and models. The platform_data for the + * device's "struct device" holds this information. + * + * Copyright 2009 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef __LINUX_INPUT_ADXL34X_H__ +#define __LINUX_INPUT_ADXL34X_H__ + +#include + +struct adxl34x_platform_data { + + /* + * X,Y,Z Axis Offset: + * offer user offset adjustments in twoscompliment + * form with a scale factor of 15.6 mg/LSB (i.e. 0x7F = +2 g) + */ + + s8 x_axis_offset; + s8 y_axis_offset; + s8 z_axis_offset; + + /* + * TAP_X/Y/Z Enable: Setting TAP_X, Y, or Z Enable enables X, + * Y, or Z participation in Tap detection. A '0' excludes the + * selected axis from participation in Tap detection. + * Setting the SUPPRESS bit suppresses Double Tap detection if + * acceleration greater than tap_threshold is present during the + * tap_latency period, i.e. after the first tap but before the + * opening of the second tap window. + */ + +#define ADXL_SUPPRESS (1 << 3) +#define ADXL_TAP_X_EN (1 << 2) +#define ADXL_TAP_Y_EN (1 << 1) +#define ADXL_TAP_Z_EN (1 << 0) + + u8 tap_axis_control; + + /* + * tap_threshold: + * holds the threshold value for tap detection/interrupts. + * The data format is unsigned. The scale factor is 62.5 mg/LSB + * (i.e. 0xFF = +16 g). A zero value may result in undesirable + * behavior if Tap/Double Tap is enabled. + */ + + u8 tap_threshold; + + /* + * tap_duration: + * is an unsigned time value representing the maximum + * time that an event must be above the tap_threshold threshold + * to qualify as a tap event. The scale factor is 625 us/LSB. A zero + * value will prevent Tap/Double Tap functions from working. + */ + + u8 tap_duration; + + /* + * tap_latency: + * is an unsigned time value representing the wait time + * from the detection of a tap event to the opening of the time + * window tap_window for a possible second tap event. The scale + * factor is 1.25 ms/LSB. A zero value will disable the Double Tap + * function. + */ + + u8 tap_latency; + + /* + * tap_window: + * is an unsigned time value representing the amount + * of time after the expiration of tap_latency during which a second + * tap can begin. The scale factor is 1.25 ms/LSB. A zero value will + * disable the Double Tap function. + */ + + u8 tap_window; + + /* + * act_axis_control: + * X/Y/Z Enable: A '1' enables X, Y, or Z participation in activity + * or inactivity detection. A '0' excludes the selected axis from + * participation. If all of the axes are excluded, the function is + * disabled. + * AC/DC: A '0' = DC coupled operation and a '1' = AC coupled + * operation. In DC coupled operation, the current acceleration is + * compared with activity_threshold and inactivity_threshold directly + * to determine whether activity or inactivity is detected. In AC + * coupled operation for activity detection, the acceleration value + * at the start of activity detection is taken as a reference value. + * New samples of acceleration are then compared to this + * reference value and if the magnitude of the difference exceeds + * activity_threshold the device will trigger an activity interrupt. In + * AC coupled operation for inactivity detection, a reference value + * is used again for comparison and is updated whenever the + * device exceeds the inactivity threshold. Once the reference + * value is selected, the device compares the magnitude of the + * difference between the reference value and the current + * acceleration with inactivity_threshold. If the difference is below + * inactivity_threshold for a total of inactivity_time, the device is + * considered inactive and the inactivity interrupt is triggered. + */ + +#define ADXL_ACT_ACDC (1 << 7) +#define ADXL_ACT_X_EN (1 << 6) +#define ADXL_ACT_Y_EN (1 << 5) +#define ADXL_ACT_Z_EN (1 << 4) +#define ADXL_INACT_ACDC (1 << 3) +#define ADXL_INACT_X_EN (1 << 2) +#define ADXL_INACT_Y_EN (1 << 1) +#define ADXL_INACT_Z_EN (1 << 0) + + u8 act_axis_control; + + /* + * activity_threshold: + * holds the threshold value for activity detection. + * The data format is unsigned. The scale factor is + * 62.5 mg/LSB. A zero value may result in undesirable behavior if + * Activity interrupt is enabled. + */ + + u8 activity_threshold; + + /* + * inactivity_threshold: + * holds the threshold value for inactivity + * detection. The data format is unsigned. The scale + * factor is 62.5 mg/LSB. A zero value may result in undesirable + * behavior if Inactivity interrupt is enabled. + */ + + u8 inactivity_threshold; + + /* + * inactivity_time: + * is an unsigned time value representing the + * amount of time that acceleration must be below the value in + * inactivity_threshold for inactivity to be declared. The scale factor + * is 1 second/LSB. Unlike the other interrupt functions, which + * operate on unfiltered data, the inactivity function operates on the + * filtered output data. At least one output sample must be + * generated for the inactivity interrupt to be triggered. This will + * result in the function appearing un-responsive if the + * inactivity_time register is set with a value less than the time + * constant of the Output Data Rate. A zero value will result in an + * interrupt when the output data is below inactivity_threshold. + */ + + u8 inactivity_time; + + /* + * free_fall_threshold: + * holds the threshold value for Free-Fall detection. + * The data format is unsigned. The root-sum-square(RSS) value + * of all axes is calculated and compared to the value in + * free_fall_threshold to determine if a free fall event may be + * occurring. The scale factor is 62.5 mg/LSB. A zero value may + * result in undesirable behavior if Free-Fall interrupt is + * enabled. Values between 300 and 600 mg (0x05 to 0x09) are + * recommended. + */ + + u8 free_fall_threshold; + + /* + * free_fall_time: + * is an unsigned time value representing the minimum + * time that the RSS value of all axes must be less than + * free_fall_threshold to generate a Free-Fall interrupt. The + * scale factor is 5 ms/LSB. A zero value may result in + * undesirable behavior if Free-Fall interrupt is enabled. + * Values between 100 to 350 ms (0x14 to 0x46) are recommended. + */ + + u8 free_fall_time; + + /* + * data_rate: + * Selects device bandwidth and output data rate. + * RATE = 3200 Hz / (2^(15 - x)). Default value is 0x0A, or 100 Hz + * Output Data Rate. An Output Data Rate should be selected that + * is appropriate for the communication protocol and frequency + * selected. Selecting too high of an Output Data Rate with a low + * communication speed will result in samples being discarded. + */ + + u8 data_rate; + + /* + * data_range: + * FULL_RES: When this bit is set with the device is + * in Full-Resolution Mode, where the output resolution increases + * with RANGE to maintain a 4 mg/LSB scale factor. When this + * bit is cleared the device is in 10-bit Mode and RANGE determine the + * maximum g-Range and scale factor. + */ + +#define ADXL_FULL_RES (1 << 3) +#define ADXL_RANGE_PM_2g 0 +#define ADXL_RANGE_PM_4g 1 +#define ADXL_RANGE_PM_8g 2 +#define ADXL_RANGE_PM_16g 3 + + u8 data_range; + + /* + * low_power_mode: + * A '0' = Normal operation and a '1' = Reduced + * power operation with somewhat higher noise. + */ + + u8 low_power_mode; + + /* + * power_mode: + * LINK: A '1' with both the activity and inactivity functions + * enabled will delay the start of the activity function until + * inactivity is detected. Once activity is detected, inactivity + * detection will begin and prevent the detection of activity. This + * bit serially links the activity and inactivity functions. When '0' + * the inactivity and activity functions are concurrent. Additional + * information can be found in the ADXL34x datasheet's Application + * section under Link Mode. + * AUTO_SLEEP: A '1' sets the ADXL34x to switch to Sleep Mode + * when inactivity (acceleration has been below inactivity_threshold + * for at least inactivity_time) is detected and the LINK bit is set. + * A '0' disables automatic switching to Sleep Mode. See the + * Sleep Bit section of the ADXL34x datasheet for more information. + */ + +#define ADXL_LINK (1 << 5) +#define ADXL_AUTO_SLEEP (1 << 4) + + u8 power_mode; + + /* + * fifo_mode: + * BYPASS The FIFO is bypassed + * FIFO FIFO collects up to 32 values then stops collecting data + * STREAM FIFO holds the last 32 data values. Once full, the FIFO's + * oldest data is lost as it is replaced with newer data + * + * DEFAULT should be ADXL_FIFO_STREAM + */ + +#define ADXL_FIFO_BYPASS 0 +#define ADXL_FIFO_FIFO 1 +#define ADXL_FIFO_STREAM 2 + + u8 fifo_mode; + + /* + * watermark: + * The Watermark feature can be used to reduce the interrupt load + * of the system. The FIFO fills up to the value stored in watermark + * [1..32] and then generates an interrupt. + * A '0' disables the watermark feature. + */ + + u8 watermark; + + /* + * When acceleration measurements are received from the ADXL34x + * events are sent to the event subsystem. The following settings + * select the event type and event code for new x, y and z axis data + * respectively. + */ + u32 ev_type; /* EV_ABS or EV_REL */ + + u32 ev_code_x; /* ABS_X,Y,Z or REL_X,Y,Z */ + u32 ev_code_y; /* ABS_X,Y,Z or REL_X,Y,Z */ + u32 ev_code_z; /* ABS_X,Y,Z or REL_X,Y,Z */ + + /* + * A valid BTN or KEY Code; use tap_axis_control to disable + * event reporting + */ + + u32 ev_code_tap[3]; /* EV_KEY {X-Axis, Y-Axis, Z-Axis} */ + + /* + * A valid BTN or KEY Code for Free-Fall or Activity enables + * input event reporting. A '0' disables the Free-Fall or + * Activity reporting. + */ + + u32 ev_code_ff; /* EV_KEY */ + u32 ev_code_act_inactivity; /* EV_KEY */ + + /* + * Use ADXL34x INT2 pin instead of INT1 pin for interrupt output + */ + u8 use_int2; + + /* + * ADXL346 only ORIENTATION SENSING feature + * The orientation function of the ADXL346 reports both 2-D and + * 3-D orientation concurrently. + */ + +#define ADXL_EN_ORIENTATION_2D 1 +#define ADXL_EN_ORIENTATION_3D 2 +#define ADXL_EN_ORIENTATION_2D_3D 3 + + u8 orientation_enable; + + /* + * The width of the deadzone region between two or more + * orientation positions is determined by setting the Deadzone + * value. The deadzone region size can be specified with a + * resolution of 3.6deg. The deadzone angle represents the total + * angle where the orientation is considered invalid. + */ + +#define ADXL_DEADZONE_ANGLE_0p0 0 /* !!!0.0 [deg] */ +#define ADXL_DEADZONE_ANGLE_3p6 1 /* 3.6 [deg] */ +#define ADXL_DEADZONE_ANGLE_7p2 2 /* 7.2 [deg] */ +#define ADXL_DEADZONE_ANGLE_10p8 3 /* 10.8 [deg] */ +#define ADXL_DEADZONE_ANGLE_14p4 4 /* 14.4 [deg] */ +#define ADXL_DEADZONE_ANGLE_18p0 5 /* 18.0 [deg] */ +#define ADXL_DEADZONE_ANGLE_21p6 6 /* 21.6 [deg] */ +#define ADXL_DEADZONE_ANGLE_25p2 7 /* 25.2 [deg] */ + + u8 deadzone_angle; + + /* + * To eliminate most human motion such as walking or shaking, + * a Divisor value should be selected to effectively limit the + * orientation bandwidth. Set the depth of the filter used to + * low-pass filter the measured acceleration for stable + * orientation sensing + */ + +#define ADXL_LP_FILTER_DIVISOR_2 0 +#define ADXL_LP_FILTER_DIVISOR_4 1 +#define ADXL_LP_FILTER_DIVISOR_8 2 +#define ADXL_LP_FILTER_DIVISOR_16 3 +#define ADXL_LP_FILTER_DIVISOR_32 4 +#define ADXL_LP_FILTER_DIVISOR_64 5 +#define ADXL_LP_FILTER_DIVISOR_128 6 +#define ADXL_LP_FILTER_DIVISOR_256 7 + + u8 divisor_length; + + u32 ev_codes_orient_2d[4]; /* EV_KEY {+X, -X, +Y, -Y} */ + u32 ev_codes_orient_3d[6]; /* EV_KEY {+Z, +Y, +X, -X, -Y, -Z} */ +}; +#endif diff --git a/include/linux/input/as5011.h b/include/linux/input/as5011.h new file mode 100644 index 000000000..1affd0ddf --- /dev/null +++ b/include/linux/input/as5011.h @@ -0,0 +1,20 @@ +#ifndef _AS5011_H +#define _AS5011_H + +/* + * Copyright (c) 2010, 2011 Fabien Marteau + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +struct as5011_platform_data { + unsigned int button_gpio; + unsigned int axis_irq; /* irq number */ + unsigned long axis_irqflags; + char xp, xn; /* threshold for x axis */ + char yp, yn; /* threshold for y axis */ +}; + +#endif /* _AS5011_H */ diff --git a/include/linux/input/auo-pixcir-ts.h b/include/linux/input/auo-pixcir-ts.h new file mode 100644 index 000000000..5049f2192 --- /dev/null +++ b/include/linux/input/auo-pixcir-ts.h @@ -0,0 +1,54 @@ +/* + * Driver for AUO in-cell touchscreens + * + * Copyright (c) 2011 Heiko Stuebner + * + * based on auo_touch.h from Dell Streak kernel + * + * Copyright (c) 2008 QUALCOMM Incorporated. + * Copyright (c) 2008 QUALCOMM USA, INC. + * + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __AUO_PIXCIR_TS_H__ +#define __AUO_PIXCIR_TS_H__ + +/* + * Interrupt modes: + * periodical: interrupt is asserted periodicaly + * compare coordinates: interrupt is asserted when coordinates change + * indicate touch: interrupt is asserted during touch + */ +#define AUO_PIXCIR_INT_PERIODICAL 0x00 +#define AUO_PIXCIR_INT_COMP_COORD 0x01 +#define AUO_PIXCIR_INT_TOUCH_IND 0x02 + +/* + * @gpio_int interrupt gpio + * @int_setting one of AUO_PIXCIR_INT_* + * @init_hw hardwarespecific init + * @exit_hw hardwarespecific shutdown + * @x_max x-resolution + * @y_max y-resolution + */ +struct auo_pixcir_ts_platdata { + int gpio_int; + int gpio_rst; + + int int_setting; + + unsigned int x_max; + unsigned int y_max; +}; + +#endif diff --git a/include/linux/input/bu21013.h b/include/linux/input/bu21013.h new file mode 100644 index 000000000..6230d76bd --- /dev/null +++ b/include/linux/input/bu21013.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Naveen Kumar G for ST-Ericsson + * License terms:GNU General Public License (GPL) version 2 + */ + +#ifndef _BU21013_H +#define _BU21013_H + +/** + * struct bu21013_platform_device - Handle the platform data + * @touch_x_max: touch x max + * @touch_y_max: touch y max + * @cs_pin: chip select pin + * @touch_pin: touch gpio pin + * @ext_clk: external clock flag + * @x_flip: x flip flag + * @y_flip: y flip flag + * @wakeup: wakeup flag + * + * This is used to handle the platform data + */ +struct bu21013_platform_device { + int touch_x_max; + int touch_y_max; + unsigned int cs_pin; + unsigned int touch_pin; + bool ext_clk; + bool x_flip; + bool y_flip; + bool wakeup; +}; + +#endif diff --git a/include/linux/input/cma3000.h b/include/linux/input/cma3000.h new file mode 100644 index 000000000..cbbaac27d --- /dev/null +++ b/include/linux/input/cma3000.h @@ -0,0 +1,59 @@ +/* + * VTI CMA3000_Dxx Accelerometer driver + * + * Copyright (C) 2010 Texas Instruments + * Author: Hemanth V + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef _LINUX_CMA3000_H +#define _LINUX_CMA3000_H + +#define CMAMODE_DEFAULT 0 +#define CMAMODE_MEAS100 1 +#define CMAMODE_MEAS400 2 +#define CMAMODE_MEAS40 3 +#define CMAMODE_MOTDET 4 +#define CMAMODE_FF100 5 +#define CMAMODE_FF400 6 +#define CMAMODE_POFF 7 + +#define CMARANGE_2G 2000 +#define CMARANGE_8G 8000 + +/** + * struct cma3000_i2c_platform_data - CMA3000 Platform data + * @fuzz_x: Noise on X Axis + * @fuzz_y: Noise on Y Axis + * @fuzz_z: Noise on Z Axis + * @g_range: G range in milli g i.e 2000 or 8000 + * @mode: Operating mode + * @mdthr: Motion detect threshold value + * @mdfftmr: Motion detect and free fall time value + * @ffthr: Free fall threshold value + */ + +struct cma3000_platform_data { + int fuzz_x; + int fuzz_y; + int fuzz_z; + int g_range; + uint8_t mode; + uint8_t mdthr; + uint8_t mdfftmr; + uint8_t ffthr; + unsigned long irqflags; +}; + +#endif diff --git a/include/linux/input/cy8ctmg110_pdata.h b/include/linux/input/cy8ctmg110_pdata.h new file mode 100644 index 000000000..77582ae17 --- /dev/null +++ b/include/linux/input/cy8ctmg110_pdata.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CY8CTMG110_PDATA_H +#define _LINUX_CY8CTMG110_PDATA_H + +struct cy8ctmg110_pdata +{ + int reset_pin; /* Reset pin is wired to this GPIO (optional) */ + int irq_pin; /* IRQ pin is wired to this GPIO */ +}; + +#endif diff --git a/include/linux/input/cyttsp.h b/include/linux/input/cyttsp.h new file mode 100644 index 000000000..586c8c95d --- /dev/null +++ b/include/linux/input/cyttsp.h @@ -0,0 +1,43 @@ +/* + * Header file for: + * Cypress TrueTouch(TM) Standard Product (TTSP) touchscreen drivers. + * For use with Cypress Txx3xx parts. + * Supported parts include: + * CY8CTST341 + * CY8CTMA340 + * + * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc. + * Copyright (C) 2012 Javier Martinez Canillas + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2, and only version 2, as published by the + * Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Contact Cypress Semiconductor at www.cypress.com (kev@cypress.com) + * + */ +#ifndef _CYTTSP_H_ +#define _CYTTSP_H_ + +#define CY_SPI_NAME "cyttsp-spi" +#define CY_I2C_NAME "cyttsp-i2c" +/* Active Power state scanning/processing refresh interval */ +#define CY_ACT_INTRVL_DFLT 0x00 /* ms */ +/* touch timeout for the Active power */ +#define CY_TCH_TMOUT_DFLT 0xFF /* ms */ +/* Low Power state scanning/processing refresh interval */ +#define CY_LP_INTRVL_DFLT 0x0A /* ms */ +/* Active distance in pixels for a gesture to be reported */ +#define CY_ACT_DIST_DFLT 0xF8 /* pixels */ + +#endif /* _CYTTSP_H_ */ diff --git a/include/linux/input/gp2ap002a00f.h b/include/linux/input/gp2ap002a00f.h new file mode 100644 index 000000000..3614a13a8 --- /dev/null +++ b/include/linux/input/gp2ap002a00f.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _GP2AP002A00F_H_ +#define _GP2AP002A00F_H_ + +#include + +#define GP2A_I2C_NAME "gp2ap002a00f" + +/** + * struct gp2a_platform_data - Sharp gp2ap002a00f proximity platform data + * @vout_gpio: The gpio connected to the object detected pin (VOUT) + * @wakeup: Set to true if the proximity can wake the device from suspend + * @hw_setup: Callback for setting up hardware such as gpios and vregs + * @hw_shutdown: Callback for properly shutting down hardware + */ +struct gp2a_platform_data { + int vout_gpio; + bool wakeup; + int (*hw_setup)(struct i2c_client *client); + int (*hw_shutdown)(struct i2c_client *client); +}; + +#endif diff --git a/include/linux/input/ili210x.h b/include/linux/input/ili210x.h new file mode 100644 index 000000000..b76e7c140 --- /dev/null +++ b/include/linux/input/ili210x.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ILI210X_H +#define _ILI210X_H + +struct ili210x_platform_data { + unsigned long irq_flags; + unsigned int poll_period; + bool (*get_pendown_state)(void); +}; + +#endif diff --git a/include/linux/input/kxtj9.h b/include/linux/input/kxtj9.h new file mode 100644 index 000000000..d415579b5 --- /dev/null +++ b/include/linux/input/kxtj9.h @@ -0,0 +1,61 @@ +/* + * Copyright (C) 2011 Kionix, Inc. + * Written by Chris Hudson + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + * 02111-1307, USA + */ + +#ifndef __KXTJ9_H__ +#define __KXTJ9_H__ + +#define KXTJ9_I2C_ADDR 0x0F + +struct kxtj9_platform_data { + unsigned int min_interval; /* minimum poll interval (in milli-seconds) */ + unsigned int init_interval; /* initial poll interval (in milli-seconds) */ + + /* + * By default, x is axis 0, y is axis 1, z is axis 2; these can be + * changed to account for sensor orientation within the host device. + */ + u8 axis_map_x; + u8 axis_map_y; + u8 axis_map_z; + + /* + * Each axis can be negated to account for sensor orientation within + * the host device. + */ + bool negate_x; + bool negate_y; + bool negate_z; + + /* CTRL_REG1: set resolution, g-range, data ready enable */ + /* Output resolution: 8-bit valid or 12-bit valid */ + #define RES_8BIT 0 + #define RES_12BIT (1 << 6) + u8 res_12bit; + /* Output g-range: +/-2g, 4g, or 8g */ + #define KXTJ9_G_2G 0 + #define KXTJ9_G_4G (1 << 3) + #define KXTJ9_G_8G (1 << 4) + u8 g_range; + + int (*init)(void); + void (*exit)(void); + int (*power_on)(void); + int (*power_off)(void); +}; +#endif /* __KXTJ9_H__ */ diff --git a/include/linux/input/lm8333.h b/include/linux/input/lm8333.h new file mode 100644 index 000000000..79f918c6e --- /dev/null +++ b/include/linux/input/lm8333.h @@ -0,0 +1,24 @@ +/* + * public include for LM8333 keypad driver - same license as driver + * Copyright (C) 2012 Wolfram Sang, Pengutronix + */ + +#ifndef _LM8333_H +#define _LM8333_H + +struct lm8333; + +struct lm8333_platform_data { + /* Keymap data */ + const struct matrix_keymap_data *matrix_data; + /* Active timeout before enter HALT mode in microseconds */ + unsigned active_time; + /* Debounce interval in microseconds */ + unsigned debounce_time; +}; + +extern int lm8333_read8(struct lm8333 *lm8333, u8 cmd); +extern int lm8333_write8(struct lm8333 *lm8333, u8 cmd, u8 val); +extern int lm8333_read_block(struct lm8333 *lm8333, u8 cmd, u8 len, u8 *buf); + +#endif /* _LM8333_H */ diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h new file mode 100644 index 000000000..9476768c3 --- /dev/null +++ b/include/linux/input/matrix_keypad.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _MATRIX_KEYPAD_H +#define _MATRIX_KEYPAD_H + +#include +#include +#include + +#define MATRIX_MAX_ROWS 32 +#define MATRIX_MAX_COLS 32 + +#define KEY(row, col, val) ((((row) & (MATRIX_MAX_ROWS - 1)) << 24) |\ + (((col) & (MATRIX_MAX_COLS - 1)) << 16) |\ + ((val) & 0xffff)) + +#define KEY_ROW(k) (((k) >> 24) & 0xff) +#define KEY_COL(k) (((k) >> 16) & 0xff) +#define KEY_VAL(k) ((k) & 0xffff) + +#define MATRIX_SCAN_CODE(row, col, row_shift) (((row) << (row_shift)) + (col)) + +/** + * struct matrix_keymap_data - keymap for matrix keyboards + * @keymap: pointer to array of uint32 values encoded with KEY() macro + * representing keymap + * @keymap_size: number of entries (initialized) in this keymap + * + * This structure is supposed to be used by platform code to supply + * keymaps to drivers that implement matrix-like keypads/keyboards. + */ +struct matrix_keymap_data { + const uint32_t *keymap; + unsigned int keymap_size; +}; + +/** + * struct matrix_keypad_platform_data - platform-dependent keypad data + * @keymap_data: pointer to &matrix_keymap_data + * @row_gpios: pointer to array of gpio numbers representing rows + * @col_gpios: pointer to array of gpio numbers reporesenting colums + * @num_row_gpios: actual number of row gpios used by device + * @num_col_gpios: actual number of col gpios used by device + * @col_scan_delay_us: delay, measured in microseconds, that is + * needed before we can keypad after activating column gpio + * @debounce_ms: debounce interval in milliseconds + * @clustered_irq: may be specified if interrupts of all row/column GPIOs + * are bundled to one single irq + * @clustered_irq_flags: flags that are needed for the clustered irq + * @active_low: gpio polarity + * @wakeup: controls whether the device should be set up as wakeup + * source + * @no_autorepeat: disable key autorepeat + * @drive_inactive_cols: drive inactive columns during scan, rather than + * making them inputs. + * + * This structure represents platform-specific data that use used by + * matrix_keypad driver to perform proper initialization. + */ +struct matrix_keypad_platform_data { + const struct matrix_keymap_data *keymap_data; + + const unsigned int *row_gpios; + const unsigned int *col_gpios; + + unsigned int num_row_gpios; + unsigned int num_col_gpios; + + unsigned int col_scan_delay_us; + + /* key debounce interval in milli-second */ + unsigned int debounce_ms; + + unsigned int clustered_irq; + unsigned int clustered_irq_flags; + + bool active_low; + bool wakeup; + bool no_autorepeat; + bool drive_inactive_cols; +}; + +int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data, + const char *keymap_name, + unsigned int rows, unsigned int cols, + unsigned short *keymap, + struct input_dev *input_dev); +int matrix_keypad_parse_properties(struct device *dev, + unsigned int *rows, unsigned int *cols); + +#define matrix_keypad_parse_of_params matrix_keypad_parse_properties + +#endif /* _MATRIX_KEYPAD_H */ diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h new file mode 100644 index 000000000..3f4bf60b0 --- /dev/null +++ b/include/linux/input/mt.h @@ -0,0 +1,127 @@ +#ifndef _INPUT_MT_H +#define _INPUT_MT_H + +/* + * Input Multitouch Library + * + * Copyright (c) 2010 Henrik Rydberg + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include + +#define TRKID_MAX 0xffff + +#define INPUT_MT_POINTER 0x0001 /* pointer device, e.g. trackpad */ +#define INPUT_MT_DIRECT 0x0002 /* direct device, e.g. touchscreen */ +#define INPUT_MT_DROP_UNUSED 0x0004 /* drop contacts not seen in frame */ +#define INPUT_MT_TRACK 0x0008 /* use in-kernel tracking */ +#define INPUT_MT_SEMI_MT 0x0010 /* semi-mt device, finger count handled manually */ + +/** + * struct input_mt_slot - represents the state of an input MT slot + * @abs: holds current values of ABS_MT axes for this slot + * @frame: last frame at which input_mt_report_slot_state() was called + * @key: optional driver designation of this slot + */ +struct input_mt_slot { + int abs[ABS_MT_LAST - ABS_MT_FIRST + 1]; + unsigned int frame; + unsigned int key; +}; + +/** + * struct input_mt - state of tracked contacts + * @trkid: stores MT tracking ID for the next contact + * @num_slots: number of MT slots the device uses + * @slot: MT slot currently being transmitted + * @flags: input_mt operation flags + * @frame: increases every time input_mt_sync_frame() is called + * @red: reduced cost matrix for in-kernel tracking + * @slots: array of slots holding current values of tracked contacts + */ +struct input_mt { + int trkid; + int num_slots; + int slot; + unsigned int flags; + unsigned int frame; + int *red; + struct input_mt_slot slots[]; +}; + +static inline void input_mt_set_value(struct input_mt_slot *slot, + unsigned code, int value) +{ + slot->abs[code - ABS_MT_FIRST] = value; +} + +static inline int input_mt_get_value(const struct input_mt_slot *slot, + unsigned code) +{ + return slot->abs[code - ABS_MT_FIRST]; +} + +static inline bool input_mt_is_active(const struct input_mt_slot *slot) +{ + return input_mt_get_value(slot, ABS_MT_TRACKING_ID) >= 0; +} + +static inline bool input_mt_is_used(const struct input_mt *mt, + const struct input_mt_slot *slot) +{ + return slot->frame == mt->frame; +} + +int input_mt_init_slots(struct input_dev *dev, unsigned int num_slots, + unsigned int flags); +void input_mt_destroy_slots(struct input_dev *dev); + +static inline int input_mt_new_trkid(struct input_mt *mt) +{ + return mt->trkid++ & TRKID_MAX; +} + +static inline void input_mt_slot(struct input_dev *dev, int slot) +{ + input_event(dev, EV_ABS, ABS_MT_SLOT, slot); +} + +static inline bool input_is_mt_value(int axis) +{ + return axis >= ABS_MT_FIRST && axis <= ABS_MT_LAST; +} + +static inline bool input_is_mt_axis(int axis) +{ + return axis == ABS_MT_SLOT || input_is_mt_value(axis); +} + +bool input_mt_report_slot_state(struct input_dev *dev, + unsigned int tool_type, bool active); + +void input_mt_report_finger_count(struct input_dev *dev, int count); +void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count); +void input_mt_drop_unused(struct input_dev *dev); + +void input_mt_sync_frame(struct input_dev *dev); + +/** + * struct input_mt_pos - contact position + * @x: horizontal coordinate + * @y: vertical coordinate + */ +struct input_mt_pos { + s16 x, y; +}; + +int input_mt_assign_slots(struct input_dev *dev, int *slots, + const struct input_mt_pos *pos, int num_pos, + int dmax); + +int input_mt_get_slot_by_key(struct input_dev *dev, int key); + +#endif diff --git a/include/linux/input/navpoint.h b/include/linux/input/navpoint.h new file mode 100644 index 000000000..45050eb34 --- /dev/null +++ b/include/linux/input/navpoint.h @@ -0,0 +1,12 @@ +/* + * Copyright (C) 2012 Paul Parsons + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +struct navpoint_platform_data { + int port; /* PXA SSP port for pxa_ssp_request() */ + int gpio; /* GPIO for power on/off */ +}; diff --git a/include/linux/input/samsung-keypad.h b/include/linux/input/samsung-keypad.h new file mode 100644 index 000000000..f25619bfd --- /dev/null +++ b/include/linux/input/samsung-keypad.h @@ -0,0 +1,43 @@ +/* + * Samsung Keypad platform data definitions + * + * Copyright (C) 2010 Samsung Electronics Co.Ltd + * Author: Joonyoung Shim + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __SAMSUNG_KEYPAD_H +#define __SAMSUNG_KEYPAD_H + +#include + +#define SAMSUNG_MAX_ROWS 8 +#define SAMSUNG_MAX_COLS 8 + +/** + * struct samsung_keypad_platdata - Platform device data for Samsung Keypad. + * @keymap_data: pointer to &matrix_keymap_data. + * @rows: number of keypad row supported. + * @cols: number of keypad col supported. + * @no_autorepeat: disable key autorepeat. + * @wakeup: controls whether the device should be set up as wakeup source. + * @cfg_gpio: configure the GPIO. + * + * Initialisation data specific to either the machine or the platform + * for the device driver to use or call-back when configuring gpio. + */ +struct samsung_keypad_platdata { + const struct matrix_keymap_data *keymap_data; + unsigned int rows; + unsigned int cols; + bool no_autorepeat; + bool wakeup; + + void (*cfg_gpio)(unsigned int rows, unsigned int cols); +}; + +#endif /* __SAMSUNG_KEYPAD_H */ diff --git a/include/linux/input/sh_keysc.h b/include/linux/input/sh_keysc.h new file mode 100644 index 000000000..b3c4f3b66 --- /dev/null +++ b/include/linux/input/sh_keysc.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __SH_KEYSC_H__ +#define __SH_KEYSC_H__ + +#define SH_KEYSC_MAXKEYS 64 + +struct sh_keysc_info { + enum { SH_KEYSC_MODE_1, SH_KEYSC_MODE_2, SH_KEYSC_MODE_3, + SH_KEYSC_MODE_4, SH_KEYSC_MODE_5, SH_KEYSC_MODE_6 } mode; + int scan_timing; /* 0 -> 7, see KYCR1, SCN[2:0] */ + int delay; + int kycr2_delay; + int keycodes[SH_KEYSC_MAXKEYS]; /* KEYIN * KEYOUT */ +}; + +#endif /* __SH_KEYSC_H__ */ diff --git a/include/linux/input/sparse-keymap.h b/include/linux/input/sparse-keymap.h new file mode 100644 index 000000000..c7346e33d --- /dev/null +++ b/include/linux/input/sparse-keymap.h @@ -0,0 +1,61 @@ +#ifndef _SPARSE_KEYMAP_H +#define _SPARSE_KEYMAP_H + +/* + * Copyright (c) 2009 Dmitry Torokhov + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#define KE_END 0 /* Indicates end of keymap */ +#define KE_KEY 1 /* Ordinary key/button */ +#define KE_SW 2 /* Switch (predetermined value) */ +#define KE_VSW 3 /* Switch (value supplied at runtime) */ +#define KE_IGNORE 4 /* Known entry that should be ignored */ +#define KE_LAST KE_IGNORE + +/** + * struct key_entry - keymap entry for use in sparse keymap + * @type: Type of the key entry (KE_KEY, KE_SW, KE_VSW, KE_END); + * drivers are allowed to extend the list with their own + * private definitions. + * @code: Device-specific data identifying the button/switch + * @keycode: KEY_* code assigned to a key/button + * @sw.code: SW_* code assigned to a switch + * @sw.value: Value that should be sent in an input even when KE_SW + * switch is toggled. KE_VSW switches ignore this field and + * expect driver to supply value for the event. + * + * This structure defines an entry in a sparse keymap used by some + * input devices for which traditional table-based approach is not + * suitable. + */ +struct key_entry { + int type; /* See KE_* above */ + u32 code; + union { + u16 keycode; /* For KE_KEY */ + struct { /* For KE_SW, KE_VSW */ + u8 code; + u8 value; /* For KE_SW, ignored by KE_VSW */ + } sw; + }; +}; + +struct key_entry *sparse_keymap_entry_from_scancode(struct input_dev *dev, + unsigned int code); +struct key_entry *sparse_keymap_entry_from_keycode(struct input_dev *dev, + unsigned int code); +int sparse_keymap_setup(struct input_dev *dev, + const struct key_entry *keymap, + int (*setup)(struct input_dev *, struct key_entry *)); + +void sparse_keymap_report_entry(struct input_dev *dev, const struct key_entry *ke, + unsigned int value, bool autorelease); + +bool sparse_keymap_report_event(struct input_dev *dev, unsigned int code, + unsigned int value, bool autorelease); + +#endif /* _SPARSE_KEYMAP_H */ diff --git a/include/linux/input/touchscreen.h b/include/linux/input/touchscreen.h new file mode 100644 index 000000000..09d22ccb9 --- /dev/null +++ b/include/linux/input/touchscreen.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2014 Sebastian Reichel + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#ifndef _TOUCHSCREEN_H +#define _TOUCHSCREEN_H + +struct input_dev; +struct input_mt_pos; + +struct touchscreen_properties { + unsigned int max_x; + unsigned int max_y; + bool invert_x; + bool invert_y; + bool swap_x_y; +}; + +void touchscreen_parse_properties(struct input_dev *input, bool multitouch, + struct touchscreen_properties *prop); + +void touchscreen_set_mt_pos(struct input_mt_pos *pos, + const struct touchscreen_properties *prop, + unsigned int x, unsigned int y); + +void touchscreen_report_pos(struct input_dev *input, + const struct touchscreen_properties *prop, + unsigned int x, unsigned int y, + bool multitouch); + +#endif diff --git a/include/linux/input/tps6507x-ts.h b/include/linux/input/tps6507x-ts.h new file mode 100644 index 000000000..b433df801 --- /dev/null +++ b/include/linux/input/tps6507x-ts.h @@ -0,0 +1,23 @@ +/* linux/i2c/tps6507x-ts.h + * + * Functions to access TPS65070 touch screen chip. + * + * Copyright (c) 2009 RidgeRun (todd.fischer@ridgerun.com) + * + * + * For licencing details see kernel-base/COPYING + */ + +#ifndef __LINUX_I2C_TPS6507X_TS_H +#define __LINUX_I2C_TPS6507X_TS_H + +/* Board specific touch screen initial values */ +struct touchscreen_init_data { + int poll_period; /* ms */ + __u16 min_pressure; /* min reading to be treated as a touch */ + __u16 vendor; + __u16 product; + __u16 version; +}; + +#endif /* __LINUX_I2C_TPS6507X_TS_H */ diff --git a/include/linux/integrity.h b/include/linux/integrity.h new file mode 100644 index 000000000..54c853ec2 --- /dev/null +++ b/include/linux/integrity.h @@ -0,0 +1,60 @@ +/* + * Copyright (C) 2009 IBM Corporation + * Author: Mimi Zohar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + */ + +#ifndef _LINUX_INTEGRITY_H +#define _LINUX_INTEGRITY_H + +#include + +enum integrity_status { + INTEGRITY_PASS = 0, + INTEGRITY_PASS_IMMUTABLE, + INTEGRITY_FAIL, + INTEGRITY_NOLABEL, + INTEGRITY_NOXATTRS, + INTEGRITY_UNKNOWN, +}; + +/* List of EVM protected security xattrs */ +#ifdef CONFIG_INTEGRITY +extern struct integrity_iint_cache *integrity_inode_get(struct inode *inode); +extern void integrity_inode_free(struct inode *inode); +extern void __init integrity_load_keys(void); + +#else +static inline struct integrity_iint_cache * + integrity_inode_get(struct inode *inode) +{ + return NULL; +} + +static inline void integrity_inode_free(struct inode *inode) +{ + return; +} + +static inline void integrity_load_keys(void) +{ +} +#endif /* CONFIG_INTEGRITY */ + +#ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS + +extern int integrity_kernel_module_request(char *kmod_name); + +#else + +static inline int integrity_kernel_module_request(char *kmod_name) +{ + return 0; +} + +#endif /* CONFIG_INTEGRITY_ASYMMETRIC_KEYS */ + +#endif /* _LINUX_INTEGRITY_H */ diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h new file mode 100644 index 000000000..786df33c0 --- /dev/null +++ b/include/linux/intel-iommu.h @@ -0,0 +1,567 @@ +/* + * Copyright © 2006-2015, Intel Corporation. + * + * Authors: Ashok Raj + * Anil S Keshavamurthy + * David Woodhouse + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + */ + +#ifndef _INTEL_IOMMU_H_ +#define _INTEL_IOMMU_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/* + * Intel IOMMU register specification per version 1.0 public spec. + */ + +#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */ +#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */ +#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */ +#define DMAR_GCMD_REG 0x18 /* Global command register */ +#define DMAR_GSTS_REG 0x1c /* Global status register */ +#define DMAR_RTADDR_REG 0x20 /* Root entry table */ +#define DMAR_CCMD_REG 0x28 /* Context command reg */ +#define DMAR_FSTS_REG 0x34 /* Fault Status register */ +#define DMAR_FECTL_REG 0x38 /* Fault control register */ +#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */ +#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */ +#define DMAR_FEUADDR_REG 0x44 /* Upper address register */ +#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */ +#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */ +#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */ +#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */ +#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */ +#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */ +#define DMAR_IQH_REG 0x80 /* Invalidation queue head register */ +#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */ +#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */ +#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */ +#define DMAR_ICS_REG 0x9c /* Invalidation complete status register */ +#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */ +#define DMAR_PQH_REG 0xc0 /* Page request queue head register */ +#define DMAR_PQT_REG 0xc8 /* Page request queue tail register */ +#define DMAR_PQA_REG 0xd0 /* Page request queue address register */ +#define DMAR_PRS_REG 0xdc /* Page request status register */ +#define DMAR_PECTL_REG 0xe0 /* Page request event control register */ +#define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */ +#define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */ +#define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */ + +#define OFFSET_STRIDE (9) + +#define dmar_readq(a) readq(a) +#define dmar_writeq(a,v) writeq(v,a) + +#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4) +#define DMAR_VER_MINOR(v) ((v) & 0x0f) + +/* + * Decoding Capability Register + */ +#define cap_5lp_support(c) (((c) >> 60) & 1) +#define cap_pi_support(c) (((c) >> 59) & 1) +#define cap_fl1gp_support(c) (((c) >> 56) & 1) +#define cap_read_drain(c) (((c) >> 55) & 1) +#define cap_write_drain(c) (((c) >> 54) & 1) +#define cap_max_amask_val(c) (((c) >> 48) & 0x3f) +#define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1) +#define cap_pgsel_inv(c) (((c) >> 39) & 1) + +#define cap_super_page_val(c) (((c) >> 34) & 0xf) +#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \ + * OFFSET_STRIDE) + 21) + +#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16) +#define cap_max_fault_reg_offset(c) \ + (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16) + +#define cap_zlr(c) (((c) >> 22) & 1) +#define cap_isoch(c) (((c) >> 23) & 1) +#define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1) +#define cap_sagaw(c) (((c) >> 8) & 0x1f) +#define cap_caching_mode(c) (((c) >> 7) & 1) +#define cap_phmr(c) (((c) >> 6) & 1) +#define cap_plmr(c) (((c) >> 5) & 1) +#define cap_rwbf(c) (((c) >> 4) & 1) +#define cap_afl(c) (((c) >> 3) & 1) +#define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7))) +/* + * Extended Capability Register + */ + +#define ecap_dit(e) ((e >> 41) & 0x1) +#define ecap_pasid(e) ((e >> 40) & 0x1) +#define ecap_pss(e) ((e >> 35) & 0x1f) +#define ecap_eafs(e) ((e >> 34) & 0x1) +#define ecap_nwfs(e) ((e >> 33) & 0x1) +#define ecap_srs(e) ((e >> 31) & 0x1) +#define ecap_ers(e) ((e >> 30) & 0x1) +#define ecap_prs(e) ((e >> 29) & 0x1) +#define ecap_broken_pasid(e) ((e >> 28) & 0x1) +#define ecap_dis(e) ((e >> 27) & 0x1) +#define ecap_nest(e) ((e >> 26) & 0x1) +#define ecap_mts(e) ((e >> 25) & 0x1) +#define ecap_ecs(e) ((e >> 24) & 0x1) +#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16) +#define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16) +#define ecap_coherent(e) ((e) & 0x1) +#define ecap_qis(e) ((e) & 0x2) +#define ecap_pass_through(e) ((e >> 6) & 0x1) +#define ecap_eim_support(e) ((e >> 4) & 0x1) +#define ecap_ir_support(e) ((e >> 3) & 0x1) +#define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1) +#define ecap_max_handle_mask(e) ((e >> 20) & 0xf) +#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */ + +/* IOTLB_REG */ +#define DMA_TLB_FLUSH_GRANU_OFFSET 60 +#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60) +#define DMA_TLB_DSI_FLUSH (((u64)2) << 60) +#define DMA_TLB_PSI_FLUSH (((u64)3) << 60) +#define DMA_TLB_IIRG(type) ((type >> 60) & 3) +#define DMA_TLB_IAIG(val) (((val) >> 57) & 3) +#define DMA_TLB_READ_DRAIN (((u64)1) << 49) +#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48) +#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32) +#define DMA_TLB_IVT (((u64)1) << 63) +#define DMA_TLB_IH_NONLEAF (((u64)1) << 6) +#define DMA_TLB_MAX_SIZE (0x3f) + +/* INVALID_DESC */ +#define DMA_CCMD_INVL_GRANU_OFFSET 61 +#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 4) +#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 4) +#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 4) +#define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7) +#define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6) +#define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16))) +#define DMA_ID_TLB_IH_NONLEAF (((u64)1) << 6) +#define DMA_ID_TLB_ADDR(addr) (addr) +#define DMA_ID_TLB_ADDR_MASK(mask) (mask) + +/* PMEN_REG */ +#define DMA_PMEN_EPM (((u32)1)<<31) +#define DMA_PMEN_PRS (((u32)1)<<0) + +/* GCMD_REG */ +#define DMA_GCMD_TE (((u32)1) << 31) +#define DMA_GCMD_SRTP (((u32)1) << 30) +#define DMA_GCMD_SFL (((u32)1) << 29) +#define DMA_GCMD_EAFL (((u32)1) << 28) +#define DMA_GCMD_WBF (((u32)1) << 27) +#define DMA_GCMD_QIE (((u32)1) << 26) +#define DMA_GCMD_SIRTP (((u32)1) << 24) +#define DMA_GCMD_IRE (((u32) 1) << 25) +#define DMA_GCMD_CFI (((u32) 1) << 23) + +/* GSTS_REG */ +#define DMA_GSTS_TES (((u32)1) << 31) +#define DMA_GSTS_RTPS (((u32)1) << 30) +#define DMA_GSTS_FLS (((u32)1) << 29) +#define DMA_GSTS_AFLS (((u32)1) << 28) +#define DMA_GSTS_WBFS (((u32)1) << 27) +#define DMA_GSTS_QIES (((u32)1) << 26) +#define DMA_GSTS_IRTPS (((u32)1) << 24) +#define DMA_GSTS_IRES (((u32)1) << 25) +#define DMA_GSTS_CFIS (((u32)1) << 23) + +/* DMA_RTADDR_REG */ +#define DMA_RTADDR_RTT (((u64)1) << 11) + +/* CCMD_REG */ +#define DMA_CCMD_ICC (((u64)1) << 63) +#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61) +#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61) +#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61) +#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32) +#define DMA_CCMD_MASK_NOBIT 0 +#define DMA_CCMD_MASK_1BIT 1 +#define DMA_CCMD_MASK_2BIT 2 +#define DMA_CCMD_MASK_3BIT 3 +#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16) +#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff)) + +/* FECTL_REG */ +#define DMA_FECTL_IM (((u32)1) << 31) + +/* FSTS_REG */ +#define DMA_FSTS_PFO (1 << 0) /* Primary Fault Overflow */ +#define DMA_FSTS_PPF (1 << 1) /* Primary Pending Fault */ +#define DMA_FSTS_IQE (1 << 4) /* Invalidation Queue Error */ +#define DMA_FSTS_ICE (1 << 5) /* Invalidation Completion Error */ +#define DMA_FSTS_ITE (1 << 6) /* Invalidation Time-out Error */ +#define DMA_FSTS_PRO (1 << 7) /* Page Request Overflow */ +#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff) + +/* FRCD_REG, 32 bits access */ +#define DMA_FRCD_F (((u32)1) << 31) +#define dma_frcd_type(d) ((d >> 30) & 1) +#define dma_frcd_fault_reason(c) (c & 0xff) +#define dma_frcd_source_id(c) (c & 0xffff) +/* low 64 bit */ +#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT)) + +/* PRS_REG */ +#define DMA_PRS_PPR ((u32)1) + +#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ +do { \ + cycles_t start_time = get_cycles(); \ + while (1) { \ + sts = op(iommu->reg + offset); \ + if (cond) \ + break; \ + if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\ + panic("DMAR hardware is malfunctioning\n"); \ + cpu_relax(); \ + } \ +} while (0) + +#define QI_LENGTH 256 /* queue length */ + +enum { + QI_FREE, + QI_IN_USE, + QI_DONE, + QI_ABORT +}; + +#define QI_CC_TYPE 0x1 +#define QI_IOTLB_TYPE 0x2 +#define QI_DIOTLB_TYPE 0x3 +#define QI_IEC_TYPE 0x4 +#define QI_IWD_TYPE 0x5 +#define QI_EIOTLB_TYPE 0x6 +#define QI_PC_TYPE 0x7 +#define QI_DEIOTLB_TYPE 0x8 +#define QI_PGRP_RESP_TYPE 0x9 +#define QI_PSTRM_RESP_TYPE 0xa + +#define QI_IEC_SELECTIVE (((u64)1) << 4) +#define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32)) +#define QI_IEC_IM(m) (((u64)(m & 0x1f) << 27)) + +#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32) +#define QI_IWD_STATUS_WRITE (((u64)1) << 5) + +#define QI_IOTLB_DID(did) (((u64)did) << 16) +#define QI_IOTLB_DR(dr) (((u64)dr) << 7) +#define QI_IOTLB_DW(dw) (((u64)dw) << 6) +#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4)) +#define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK) +#define QI_IOTLB_IH(ih) (((u64)ih) << 6) +#define QI_IOTLB_AM(am) (((u8)am)) + +#define QI_CC_FM(fm) (((u64)fm) << 48) +#define QI_CC_SID(sid) (((u64)sid) << 32) +#define QI_CC_DID(did) (((u64)did) << 16) +#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4)) + +#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32) +#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16) +#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) +#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \ + ((u64)((pfsid >> 4) & 0xfff) << 52)) +#define QI_DEV_IOTLB_SIZE 1 +#define QI_DEV_IOTLB_MAX_INVS 32 + +#define QI_PC_PASID(pasid) (((u64)pasid) << 32) +#define QI_PC_DID(did) (((u64)did) << 16) +#define QI_PC_GRAN(gran) (((u64)gran) << 4) + +#define QI_PC_ALL_PASIDS (QI_PC_TYPE | QI_PC_GRAN(0)) +#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1)) + +#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) +#define QI_EIOTLB_GL(gl) (((u64)gl) << 7) +#define QI_EIOTLB_IH(ih) (((u64)ih) << 6) +#define QI_EIOTLB_AM(am) (((u64)am)) +#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32) +#define QI_EIOTLB_DID(did) (((u64)did) << 16) +#define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4) + +#define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK) +#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) +#define QI_DEV_EIOTLB_GLOB(g) ((u64)(g) & 0x1) +#define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32) +#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16) +#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4) +#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \ + ((u64)((pfsid >> 4) & 0xfff) << 52)) +#define QI_DEV_EIOTLB_MAX_INVS 32 + +#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55) +#define QI_PGRP_PRIV(priv) (((u64)(priv)) << 32) +#define QI_PGRP_RESP_CODE(res) ((u64)(res)) +#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32) +#define QI_PGRP_DID(did) (((u64)(did)) << 16) +#define QI_PGRP_PASID_P(p) (((u64)(p)) << 4) + +#define QI_PSTRM_ADDR(addr) (((u64)(addr)) & VTD_PAGE_MASK) +#define QI_PSTRM_DEVFN(devfn) (((u64)(devfn)) << 4) +#define QI_PSTRM_RESP_CODE(res) ((u64)(res)) +#define QI_PSTRM_IDX(idx) (((u64)(idx)) << 55) +#define QI_PSTRM_PRIV(priv) (((u64)(priv)) << 32) +#define QI_PSTRM_BUS(bus) (((u64)(bus)) << 24) +#define QI_PSTRM_PASID(pasid) (((u64)(pasid)) << 4) + +#define QI_RESP_SUCCESS 0x0 +#define QI_RESP_INVALID 0x1 +#define QI_RESP_FAILURE 0xf + +#define QI_GRAN_ALL_ALL 0 +#define QI_GRAN_NONG_ALL 1 +#define QI_GRAN_NONG_PASID 2 +#define QI_GRAN_PSI_PASID 3 + +struct qi_desc { + u64 low, high; +}; + +struct q_inval { + raw_spinlock_t q_lock; + struct qi_desc *desc; /* invalidation queue */ + int *desc_status; /* desc status */ + int free_head; /* first free entry */ + int free_tail; /* last free entry */ + int free_cnt; +}; + +#ifdef CONFIG_IRQ_REMAP +/* 1MB - maximum possible interrupt remapping table size */ +#define INTR_REMAP_PAGE_ORDER 8 +#define INTR_REMAP_TABLE_REG_SIZE 0xf +#define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf + +#define INTR_REMAP_TABLE_ENTRIES 65536 + +struct irq_domain; + +struct ir_table { + struct irte *base; + unsigned long *bitmap; +}; +#endif + +struct iommu_flush { + void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid, + u8 fm, u64 type); + void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr, + unsigned int size_order, u64 type); +}; + +enum { + SR_DMAR_FECTL_REG, + SR_DMAR_FEDATA_REG, + SR_DMAR_FEADDR_REG, + SR_DMAR_FEUADDR_REG, + MAX_SR_DMAR_REGS +}; + +#define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0) +#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1) + +struct pasid_entry; +struct pasid_state_entry; +struct page_req_dsc; + +struct dmar_domain { + int nid; /* node id */ + + unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED]; + /* Refcount of devices per iommu */ + + + u16 iommu_did[DMAR_UNITS_SUPPORTED]; + /* Domain ids per IOMMU. Use u16 since + * domain ids are 16 bit wide according + * to VT-d spec, section 9.3 */ + + bool has_iotlb_device; + struct list_head devices; /* all devices' list */ + struct iova_domain iovad; /* iova's that belong to this domain */ + + struct dma_pte *pgd; /* virtual address */ + int gaw; /* max guest address width */ + + /* adjusted guest address width, 0 is level 2 30-bit */ + int agaw; + + int flags; /* flags to find out type of domain */ + + int iommu_coherency;/* indicate coherency of iommu access */ + int iommu_snooping; /* indicate snooping control feature*/ + int iommu_count; /* reference count of iommu */ + int iommu_superpage;/* Level of superpages supported: + 0 == 4KiB (no superpages), 1 == 2MiB, + 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */ + u64 max_addr; /* maximum mapped address */ + + struct iommu_domain domain; /* generic domain data structure for + iommu core */ +}; + +struct intel_iommu { + void __iomem *reg; /* Pointer to hardware regs, virtual addr */ + u64 reg_phys; /* physical address of hw register set */ + u64 reg_size; /* size of hw register set */ + u64 cap; + u64 ecap; + u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ + raw_spinlock_t register_lock; /* protect register handling */ + int seq_id; /* sequence id of the iommu */ + int agaw; /* agaw of this iommu */ + int msagaw; /* max sagaw of this iommu */ + unsigned int irq, pr_irq; + u16 segment; /* PCI segment# */ + unsigned char name[13]; /* Device Name */ + +#ifdef CONFIG_INTEL_IOMMU + unsigned long *domain_ids; /* bitmap of domains */ + struct dmar_domain ***domains; /* ptr to domains */ + spinlock_t lock; /* protect context, domain ids */ + struct root_entry *root_entry; /* virtual address */ + + struct iommu_flush flush; +#endif +#ifdef CONFIG_INTEL_IOMMU_SVM + /* These are large and need to be contiguous, so we allocate just + * one for now. We'll maybe want to rethink that if we truly give + * devices away to userspace processes (e.g. for DPDK) and don't + * want to trust that userspace will use *only* the PASID it was + * told to. But while it's all driver-arbitrated, we're fine. */ + struct pasid_state_entry *pasid_state_table; + struct page_req_dsc *prq; + unsigned char prq_name[16]; /* Name for PRQ interrupt */ + u32 pasid_max; +#endif + struct q_inval *qi; /* Queued invalidation info */ + u32 *iommu_state; /* Store iommu states between suspend and resume.*/ + +#ifdef CONFIG_IRQ_REMAP + struct ir_table *ir_table; /* Interrupt remapping info */ + struct irq_domain *ir_domain; + struct irq_domain *ir_msi_domain; +#endif + struct iommu_device iommu; /* IOMMU core code handle */ + int node; + u32 flags; /* Software defined flags */ + + struct dmar_drhd_unit *drhd; +}; + +/* PCI domain-device relationship */ +struct device_domain_info { + struct list_head link; /* link to domain siblings */ + struct list_head global; /* link to global list */ + struct list_head table; /* link to pasid table */ + u8 bus; /* PCI bus number */ + u8 devfn; /* PCI devfn number */ + u16 pfsid; /* SRIOV physical function source ID */ + u8 pasid_supported:3; + u8 pasid_enabled:1; + u8 pri_supported:1; + u8 pri_enabled:1; + u8 ats_supported:1; + u8 ats_enabled:1; + u8 ats_qdep; + struct device *dev; /* it's NULL for PCIe-to-PCI bridge */ + struct intel_iommu *iommu; /* IOMMU used by this device */ + struct dmar_domain *domain; /* pointer to domain */ + struct pasid_table *pasid_table; /* pasid table */ +}; + +static inline void __iommu_flush_cache( + struct intel_iommu *iommu, void *addr, int size) +{ + if (!ecap_coherent(iommu->ecap)) + clflush_cache_range(addr, size); +} + +extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev); +extern int dmar_find_matched_atsr_unit(struct pci_dev *dev); + +extern int dmar_enable_qi(struct intel_iommu *iommu); +extern void dmar_disable_qi(struct intel_iommu *iommu); +extern int dmar_reenable_qi(struct intel_iommu *iommu); +extern void qi_global_iec(struct intel_iommu *iommu); + +extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, + u8 fm, u64 type); +extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, + unsigned int size_order, u64 type); +extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, + u16 qdep, u64 addr, unsigned mask); +extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); + +extern int dmar_ir_support(void); + +struct dmar_domain *get_valid_domain_for_dev(struct device *dev); +void *alloc_pgtable_page(int node); +void free_pgtable_page(void *vaddr); +struct intel_iommu *domain_get_iommu(struct dmar_domain *domain); +int for_each_device_domain(int (*fn)(struct device_domain_info *info, + void *data), void *data); + +#ifdef CONFIG_INTEL_IOMMU_SVM +int intel_svm_init(struct intel_iommu *iommu); +int intel_svm_exit(struct intel_iommu *iommu); +extern int intel_svm_enable_prq(struct intel_iommu *iommu); +extern int intel_svm_finish_prq(struct intel_iommu *iommu); + +struct svm_dev_ops; + +struct intel_svm_dev { + struct list_head list; + struct rcu_head rcu; + struct device *dev; + struct svm_dev_ops *ops; + int users; + u16 did; + u16 dev_iotlb:1; + u16 sid, qdep; +}; + +struct intel_svm { + struct mmu_notifier notifier; + struct mm_struct *mm; + struct intel_iommu *iommu; + int flags; + int pasid; + struct list_head devs; + struct list_head list; +}; + +extern int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev); +extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev); +#endif + +extern const struct attribute_group *intel_iommu_groups[]; + +#endif diff --git a/include/linux/intel-pti.h b/include/linux/intel-pti.h new file mode 100644 index 000000000..2710d72de --- /dev/null +++ b/include/linux/intel-pti.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) Intel 2011 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * The PTI (Parallel Trace Interface) driver directs trace data routed from + * various parts in the system out through the Intel Penwell PTI port and + * out of the mobile device for analysis with a debugging tool + * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7, + * compact JTAG, standard. + * + * This header file will allow other parts of the OS to use the + * interface to write out it's contents for debugging a mobile system. + */ + +#ifndef LINUX_INTEL_PTI_H_ +#define LINUX_INTEL_PTI_H_ + +/* offset for last dword of any PTI message. Part of MIPI P1149.7 */ +#define PTI_LASTDWORD_DTS 0x30 + +/* basic structure used as a write address to the PTI HW */ +struct pti_masterchannel { + u8 master; + u8 channel; +}; + +/* the following functions are defined in misc/pti.c */ +void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count); +struct pti_masterchannel *pti_request_masterchannel(u8 type, + const char *thread_name); +void pti_release_masterchannel(struct pti_masterchannel *mc); + +#endif /* LINUX_INTEL_PTI_H_ */ diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h new file mode 100644 index 000000000..733eaf95e --- /dev/null +++ b/include/linux/intel-svm.h @@ -0,0 +1,141 @@ +/* + * Copyright © 2015 Intel Corporation. + * + * Authors: David Woodhouse + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __INTEL_SVM_H__ +#define __INTEL_SVM_H__ + +struct device; + +struct svm_dev_ops { + void (*fault_cb)(struct device *dev, int pasid, u64 address, + u32 private, int rwxp, int response); +}; + +/* Values for rxwp in fault_cb callback */ +#define SVM_REQ_READ (1<<3) +#define SVM_REQ_WRITE (1<<2) +#define SVM_REQ_EXEC (1<<1) +#define SVM_REQ_PRIV (1<<0) + + +/* + * The SVM_FLAG_PRIVATE_PASID flag requests a PASID which is *not* the "main" + * PASID for the current process. Even if a PASID already exists, a new one + * will be allocated. And the PASID allocated with SVM_FLAG_PRIVATE_PASID + * will not be given to subsequent callers. This facility allows a driver to + * disambiguate between multiple device contexts which access the same MM, + * if there is no other way to do so. It should be used sparingly, if at all. + */ +#define SVM_FLAG_PRIVATE_PASID (1<<0) + +/* + * The SVM_FLAG_SUPERVISOR_MODE flag requests a PASID which can be used only + * for access to kernel addresses. No IOTLB flushes are automatically done + * for kernel mappings; it is valid only for access to the kernel's static + * 1:1 mapping of physical memory — not to vmalloc or even module mappings. + * A future API addition may permit the use of such ranges, by means of an + * explicit IOTLB flush call (akin to the DMA API's unmap method). + * + * It is unlikely that we will ever hook into flush_tlb_kernel_range() to + * do such IOTLB flushes automatically. + */ +#define SVM_FLAG_SUPERVISOR_MODE (1<<1) + +#ifdef CONFIG_INTEL_IOMMU_SVM + +/** + * intel_svm_bind_mm() - Bind the current process to a PASID + * @dev: Device to be granted acccess + * @pasid: Address for allocated PASID + * @flags: Flags. Later for requesting supervisor mode, etc. + * @ops: Callbacks to device driver + * + * This function attempts to enable PASID support for the given device. + * If the @pasid argument is non-%NULL, a PASID is allocated for access + * to the MM of the current process. + * + * By using a %NULL value for the @pasid argument, this function can + * be used to simply validate that PASID support is available for the + * given device — i.e. that it is behind an IOMMU which has the + * requisite support, and is enabled. + * + * Page faults are handled transparently by the IOMMU code, and there + * should be no need for the device driver to be involved. If a page + * fault cannot be handled (i.e. is an invalid address rather than + * just needs paging in), then the page request will be completed by + * the core IOMMU code with appropriate status, and the device itself + * can then report the resulting fault to its driver via whatever + * mechanism is appropriate. + * + * Multiple calls from the same process may result in the same PASID + * being re-used. A reference count is kept. + */ +extern int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, + struct svm_dev_ops *ops); + +/** + * intel_svm_unbind_mm() - Unbind a specified PASID + * @dev: Device for which PASID was allocated + * @pasid: PASID value to be unbound + * + * This function allows a PASID to be retired when the device no + * longer requires access to the address space of a given process. + * + * If the use count for the PASID in question reaches zero, the + * PASID is revoked and may no longer be used by hardware. + * + * Device drivers are required to ensure that no access (including + * page requests) is currently outstanding for the PASID in question, + * before calling this function. + */ +extern int intel_svm_unbind_mm(struct device *dev, int pasid); + +/** + * intel_svm_is_pasid_valid() - check if pasid is valid + * @dev: Device for which PASID was allocated + * @pasid: PASID value to be checked + * + * This function checks if the specified pasid is still valid. A + * valid pasid means the backing mm is still having a valid user. + * For kernel callers init_mm is always valid. for other mm, if mm->mm_users + * is non-zero, it is valid. + * + * returns -EINVAL if invalid pasid, 0 if pasid ref count is invalid + * 1 if pasid is valid. + */ +extern int intel_svm_is_pasid_valid(struct device *dev, int pasid); + +#else /* CONFIG_INTEL_IOMMU_SVM */ + +static inline int intel_svm_bind_mm(struct device *dev, int *pasid, + int flags, struct svm_dev_ops *ops) +{ + return -ENOSYS; +} + +static inline int intel_svm_unbind_mm(struct device *dev, int pasid) +{ + BUG(); +} + +static inline int intel_svm_is_pasid_valid(struct device *dev, int pasid) +{ + return -EINVAL; +} +#endif /* CONFIG_INTEL_IOMMU_SVM */ + +#define intel_svm_available(dev) (!intel_svm_bind_mm((dev), NULL, 0, NULL)) + +#endif /* __INTEL_SVM_H__ */ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h new file mode 100644 index 000000000..eeceac337 --- /dev/null +++ b/include/linux/interrupt.h @@ -0,0 +1,728 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* interrupt.h */ +#ifndef _LINUX_INTERRUPT_H +#define _LINUX_INTERRUPT_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* + * These correspond to the IORESOURCE_IRQ_* defines in + * linux/ioport.h to select the interrupt line behaviour. When + * requesting an interrupt without specifying a IRQF_TRIGGER, the + * setting should be assumed to be "as already configured", which + * may be as per machine or firmware initialisation. + */ +#define IRQF_TRIGGER_NONE 0x00000000 +#define IRQF_TRIGGER_RISING 0x00000001 +#define IRQF_TRIGGER_FALLING 0x00000002 +#define IRQF_TRIGGER_HIGH 0x00000004 +#define IRQF_TRIGGER_LOW 0x00000008 +#define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \ + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING) +#define IRQF_TRIGGER_PROBE 0x00000010 + +/* + * These flags used only by the kernel as part of the + * irq handling routines. + * + * IRQF_SHARED - allow sharing the irq among several devices + * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur + * IRQF_TIMER - Flag to mark this interrupt as timer interrupt + * IRQF_PERCPU - Interrupt is per cpu + * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing + * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is + * registered first in an shared interrupt is considered for + * performance reasons) + * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. + * Used by threaded interrupts which need to keep the + * irq line disabled until the threaded handler has been run. + * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee + * that this interrupt will wake the system from a suspended + * state. See Documentation/power/suspend-and-interrupts.txt + * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set + * IRQF_NO_THREAD - Interrupt cannot be threaded + * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device + * resume time. + * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this + * interrupt handler after suspending interrupts. For system + * wakeup devices users need to implement wakeup detection in + * their interrupt handlers. + */ +#define IRQF_SHARED 0x00000080 +#define IRQF_PROBE_SHARED 0x00000100 +#define __IRQF_TIMER 0x00000200 +#define IRQF_PERCPU 0x00000400 +#define IRQF_NOBALANCING 0x00000800 +#define IRQF_IRQPOLL 0x00001000 +#define IRQF_ONESHOT 0x00002000 +#define IRQF_NO_SUSPEND 0x00004000 +#define IRQF_FORCE_RESUME 0x00008000 +#define IRQF_NO_THREAD 0x00010000 +#define IRQF_EARLY_RESUME 0x00020000 +#define IRQF_COND_SUSPEND 0x00040000 + +#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) + +/* + * These values can be returned by request_any_context_irq() and + * describe the context the interrupt will be run in. + * + * IRQC_IS_HARDIRQ - interrupt runs in hardirq context + * IRQC_IS_NESTED - interrupt runs in a nested threaded context + */ +enum { + IRQC_IS_HARDIRQ = 0, + IRQC_IS_NESTED, +}; + +typedef irqreturn_t (*irq_handler_t)(int, void *); + +/** + * struct irqaction - per interrupt action descriptor + * @handler: interrupt handler function + * @name: name of the device + * @dev_id: cookie to identify the device + * @percpu_dev_id: cookie to identify the device + * @next: pointer to the next irqaction for shared interrupts + * @irq: interrupt number + * @flags: flags (see IRQF_* above) + * @thread_fn: interrupt handler function for threaded interrupts + * @thread: thread pointer for threaded interrupts + * @secondary: pointer to secondary irqaction (force threading) + * @thread_flags: flags related to @thread + * @thread_mask: bitmask for keeping track of @thread activity + * @dir: pointer to the proc/irq/NN/name entry + */ +struct irqaction { + irq_handler_t handler; + void *dev_id; + void __percpu *percpu_dev_id; + struct irqaction *next; + irq_handler_t thread_fn; + struct task_struct *thread; + struct irqaction *secondary; + unsigned int irq; + unsigned int flags; + unsigned long thread_flags; + unsigned long thread_mask; + const char *name; + struct proc_dir_entry *dir; +} ____cacheline_internodealigned_in_smp; + +extern irqreturn_t no_action(int cpl, void *dev_id); + +/* + * If a (PCI) device interrupt is not connected we set dev->irq to + * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we + * can distingiush that case from other error returns. + * + * 0x80000000 is guaranteed to be outside the available range of interrupts + * and easy to distinguish from other possible incorrect values. + */ +#define IRQ_NOTCONNECTED (1U << 31) + +extern int __must_check +request_threaded_irq(unsigned int irq, irq_handler_t handler, + irq_handler_t thread_fn, + unsigned long flags, const char *name, void *dev); + +static inline int __must_check +request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, + const char *name, void *dev) +{ + return request_threaded_irq(irq, handler, NULL, flags, name, dev); +} + +extern int __must_check +request_any_context_irq(unsigned int irq, irq_handler_t handler, + unsigned long flags, const char *name, void *dev_id); + +extern int __must_check +__request_percpu_irq(unsigned int irq, irq_handler_t handler, + unsigned long flags, const char *devname, + void __percpu *percpu_dev_id); + +static inline int __must_check +request_percpu_irq(unsigned int irq, irq_handler_t handler, + const char *devname, void __percpu *percpu_dev_id) +{ + return __request_percpu_irq(irq, handler, 0, + devname, percpu_dev_id); +} + +extern const void *free_irq(unsigned int, void *); +extern void free_percpu_irq(unsigned int, void __percpu *); + +struct device; + +extern int __must_check +devm_request_threaded_irq(struct device *dev, unsigned int irq, + irq_handler_t handler, irq_handler_t thread_fn, + unsigned long irqflags, const char *devname, + void *dev_id); + +static inline int __must_check +devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, + unsigned long irqflags, const char *devname, void *dev_id) +{ + return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags, + devname, dev_id); +} + +extern int __must_check +devm_request_any_context_irq(struct device *dev, unsigned int irq, + irq_handler_t handler, unsigned long irqflags, + const char *devname, void *dev_id); + +extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); + +/* + * On lockdep we dont want to enable hardirqs in hardirq + * context. Use local_irq_enable_in_hardirq() to annotate + * kernel code that has to do this nevertheless (pretty much + * the only valid case is for old/broken hardware that is + * insanely slow). + * + * NOTE: in theory this might break fragile code that relies + * on hardirq delivery - in practice we dont seem to have such + * places left. So the only effect should be slightly increased + * irqs-off latencies. + */ +#ifdef CONFIG_LOCKDEP +# define local_irq_enable_in_hardirq() do { } while (0) +#else +# define local_irq_enable_in_hardirq() local_irq_enable() +#endif + +extern void disable_irq_nosync(unsigned int irq); +extern bool disable_hardirq(unsigned int irq); +extern void disable_irq(unsigned int irq); +extern void disable_percpu_irq(unsigned int irq); +extern void enable_irq(unsigned int irq); +extern void enable_percpu_irq(unsigned int irq, unsigned int type); +extern bool irq_percpu_is_enabled(unsigned int irq); +extern void irq_wake_thread(unsigned int irq, void *dev_id); + +/* The following three functions are for the core kernel use only. */ +extern void suspend_device_irqs(void); +extern void resume_device_irqs(void); + +/** + * struct irq_affinity_notify - context for notification of IRQ affinity changes + * @irq: Interrupt to which notification applies + * @kref: Reference count, for internal use + * @work: Work item, for internal use + * @notify: Function to be called on change. This will be + * called in process context. + * @release: Function to be called on release. This will be + * called in process context. Once registered, the + * structure must only be freed when this function is + * called or later. + */ +struct irq_affinity_notify { + unsigned int irq; + struct kref kref; + struct work_struct work; + void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); + void (*release)(struct kref *ref); +}; + +/** + * struct irq_affinity - Description for automatic irq affinity assignements + * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of + * the MSI(-X) vector space + * @post_vectors: Don't apply affinity to @post_vectors at end of + * the MSI(-X) vector space + */ +struct irq_affinity { + int pre_vectors; + int post_vectors; +}; + +#if defined(CONFIG_SMP) + +extern cpumask_var_t irq_default_affinity; + +/* Internal implementation. Use the helpers below */ +extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask, + bool force); + +/** + * irq_set_affinity - Set the irq affinity of a given irq + * @irq: Interrupt to set affinity + * @cpumask: cpumask + * + * Fails if cpumask does not contain an online CPU + */ +static inline int +irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) +{ + return __irq_set_affinity(irq, cpumask, false); +} + +/** + * irq_force_affinity - Force the irq affinity of a given irq + * @irq: Interrupt to set affinity + * @cpumask: cpumask + * + * Same as irq_set_affinity, but without checking the mask against + * online cpus. + * + * Solely for low level cpu hotplug code, where we need to make per + * cpu interrupts affine before the cpu becomes online. + */ +static inline int +irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) +{ + return __irq_set_affinity(irq, cpumask, true); +} + +extern int irq_can_set_affinity(unsigned int irq); +extern int irq_select_affinity(unsigned int irq); + +extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); + +extern int +irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); + +struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd); +int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd); + +#else /* CONFIG_SMP */ + +static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) +{ + return -EINVAL; +} + +static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) +{ + return 0; +} + +static inline int irq_can_set_affinity(unsigned int irq) +{ + return 0; +} + +static inline int irq_select_affinity(unsigned int irq) { return 0; } + +static inline int irq_set_affinity_hint(unsigned int irq, + const struct cpumask *m) +{ + return -EINVAL; +} + +static inline int +irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) +{ + return 0; +} + +static inline struct cpumask * +irq_create_affinity_masks(int nvec, const struct irq_affinity *affd) +{ + return NULL; +} + +static inline int +irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd) +{ + return maxvec; +} + +#endif /* CONFIG_SMP */ + +/* + * Special lockdep variants of irq disabling/enabling. + * These should be used for locking constructs that + * know that a particular irq context which is disabled, + * and which is the only irq-context user of a lock, + * that it's safe to take the lock in the irq-disabled + * section without disabling hardirqs. + * + * On !CONFIG_LOCKDEP they are equivalent to the normal + * irq disable/enable methods. + */ +static inline void disable_irq_nosync_lockdep(unsigned int irq) +{ + disable_irq_nosync(irq); +#ifdef CONFIG_LOCKDEP + local_irq_disable(); +#endif +} + +static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags) +{ + disable_irq_nosync(irq); +#ifdef CONFIG_LOCKDEP + local_irq_save(*flags); +#endif +} + +static inline void disable_irq_lockdep(unsigned int irq) +{ + disable_irq(irq); +#ifdef CONFIG_LOCKDEP + local_irq_disable(); +#endif +} + +static inline void enable_irq_lockdep(unsigned int irq) +{ +#ifdef CONFIG_LOCKDEP + local_irq_enable(); +#endif + enable_irq(irq); +} + +static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags) +{ +#ifdef CONFIG_LOCKDEP + local_irq_restore(*flags); +#endif + enable_irq(irq); +} + +/* IRQ wakeup (PM) control: */ +extern int irq_set_irq_wake(unsigned int irq, unsigned int on); + +static inline int enable_irq_wake(unsigned int irq) +{ + return irq_set_irq_wake(irq, 1); +} + +static inline int disable_irq_wake(unsigned int irq) +{ + return irq_set_irq_wake(irq, 0); +} + +/* + * irq_get_irqchip_state/irq_set_irqchip_state specific flags + */ +enum irqchip_irq_state { + IRQCHIP_STATE_PENDING, /* Is interrupt pending? */ + IRQCHIP_STATE_ACTIVE, /* Is interrupt in progress? */ + IRQCHIP_STATE_MASKED, /* Is interrupt masked? */ + IRQCHIP_STATE_LINE_LEVEL, /* Is IRQ line high? */ +}; + +extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, + bool *state); +extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, + bool state); + +#ifdef CONFIG_IRQ_FORCED_THREADING +extern bool force_irqthreads; +#else +#define force_irqthreads (0) +#endif + +#ifndef local_softirq_pending + +#ifndef local_softirq_pending_ref +#define local_softirq_pending_ref irq_stat.__softirq_pending +#endif + +#define local_softirq_pending() (__this_cpu_read(local_softirq_pending_ref)) +#define set_softirq_pending(x) (__this_cpu_write(local_softirq_pending_ref, (x))) +#define or_softirq_pending(x) (__this_cpu_or(local_softirq_pending_ref, (x))) + +#endif /* local_softirq_pending */ + +/* Some architectures might implement lazy enabling/disabling of + * interrupts. In some cases, such as stop_machine, we might want + * to ensure that after a local_irq_disable(), interrupts have + * really been disabled in hardware. Such architectures need to + * implement the following hook. + */ +#ifndef hard_irq_disable +#define hard_irq_disable() do { } while(0) +#endif + +/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high + frequency threaded job scheduling. For almost all the purposes + tasklets are more than enough. F.e. all serial device BHs et + al. should be converted to tasklets, not to softirqs. + */ + +enum +{ + HI_SOFTIRQ=0, + TIMER_SOFTIRQ, + NET_TX_SOFTIRQ, + NET_RX_SOFTIRQ, + BLOCK_SOFTIRQ, + IRQ_POLL_SOFTIRQ, + TASKLET_SOFTIRQ, + SCHED_SOFTIRQ, + HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the + numbering. Sigh! */ + RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ + + NR_SOFTIRQS +}; + +#define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ)) + +/* map softirq index to softirq name. update 'softirq_to_name' in + * kernel/softirq.c when adding a new softirq. + */ +extern const char * const softirq_to_name[NR_SOFTIRQS]; + +/* softirq mask and active fields moved to irq_cpustat_t in + * asm/hardirq.h to get better cache usage. KAO + */ + +struct softirq_action +{ + void (*action)(struct softirq_action *); +}; + +asmlinkage void do_softirq(void); +asmlinkage void __do_softirq(void); + +#ifdef __ARCH_HAS_DO_SOFTIRQ +void do_softirq_own_stack(void); +#else +static inline void do_softirq_own_stack(void) +{ + __do_softirq(); +} +#endif + +extern void open_softirq(int nr, void (*action)(struct softirq_action *)); +extern void softirq_init(void); +extern void __raise_softirq_irqoff(unsigned int nr); + +extern void raise_softirq_irqoff(unsigned int nr); +extern void raise_softirq(unsigned int nr); + +DECLARE_PER_CPU(struct task_struct *, ksoftirqd); + +static inline struct task_struct *this_cpu_ksoftirqd(void) +{ + return this_cpu_read(ksoftirqd); +} + +/* Tasklets --- multithreaded analogue of BHs. + + Main feature differing them of generic softirqs: tasklet + is running only on one CPU simultaneously. + + Main feature differing them of BHs: different tasklets + may be run simultaneously on different CPUs. + + Properties: + * If tasklet_schedule() is called, then tasklet is guaranteed + to be executed on some cpu at least once after this. + * If the tasklet is already scheduled, but its execution is still not + started, it will be executed only once. + * If this tasklet is already running on another CPU (or schedule is called + from tasklet itself), it is rescheduled for later. + * Tasklet is strictly serialized wrt itself, but not + wrt another tasklets. If client needs some intertask synchronization, + he makes it with spinlocks. + */ + +struct tasklet_struct +{ + struct tasklet_struct *next; + unsigned long state; + atomic_t count; + void (*func)(unsigned long); + unsigned long data; +}; + +#define DECLARE_TASKLET(name, func, data) \ +struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } + +#define DECLARE_TASKLET_DISABLED(name, func, data) \ +struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } + + +enum +{ + TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ + TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ +}; + +#ifdef CONFIG_SMP +static inline int tasklet_trylock(struct tasklet_struct *t) +{ + return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); +} + +static inline void tasklet_unlock(struct tasklet_struct *t) +{ + smp_mb__before_atomic(); + clear_bit(TASKLET_STATE_RUN, &(t)->state); +} + +static inline void tasklet_unlock_wait(struct tasklet_struct *t) +{ + while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } +} +#else +#define tasklet_trylock(t) 1 +#define tasklet_unlock_wait(t) do { } while (0) +#define tasklet_unlock(t) do { } while (0) +#endif + +extern void __tasklet_schedule(struct tasklet_struct *t); + +static inline void tasklet_schedule(struct tasklet_struct *t) +{ + if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) + __tasklet_schedule(t); +} + +extern void __tasklet_hi_schedule(struct tasklet_struct *t); + +static inline void tasklet_hi_schedule(struct tasklet_struct *t) +{ + if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) + __tasklet_hi_schedule(t); +} + +static inline void tasklet_disable_nosync(struct tasklet_struct *t) +{ + atomic_inc(&t->count); + smp_mb__after_atomic(); +} + +static inline void tasklet_disable(struct tasklet_struct *t) +{ + tasklet_disable_nosync(t); + tasklet_unlock_wait(t); + smp_mb(); +} + +static inline void tasklet_enable(struct tasklet_struct *t) +{ + smp_mb__before_atomic(); + atomic_dec(&t->count); +} + +extern void tasklet_kill(struct tasklet_struct *t); +extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); +extern void tasklet_init(struct tasklet_struct *t, + void (*func)(unsigned long), unsigned long data); + +struct tasklet_hrtimer { + struct hrtimer timer; + struct tasklet_struct tasklet; + enum hrtimer_restart (*function)(struct hrtimer *); +}; + +extern void +tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, + enum hrtimer_restart (*function)(struct hrtimer *), + clockid_t which_clock, enum hrtimer_mode mode); + +static inline +void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time, + const enum hrtimer_mode mode) +{ + hrtimer_start(&ttimer->timer, time, mode); +} + +static inline +void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer) +{ + hrtimer_cancel(&ttimer->timer); + tasklet_kill(&ttimer->tasklet); +} + +/* + * Autoprobing for irqs: + * + * probe_irq_on() and probe_irq_off() provide robust primitives + * for accurate IRQ probing during kernel initialization. They are + * reasonably simple to use, are not "fooled" by spurious interrupts, + * and, unlike other attempts at IRQ probing, they do not get hung on + * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards). + * + * For reasonably foolproof probing, use them as follows: + * + * 1. clear and/or mask the device's internal interrupt. + * 2. sti(); + * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs + * 4. enable the device and cause it to trigger an interrupt. + * 5. wait for the device to interrupt, using non-intrusive polling or a delay. + * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple + * 7. service the device to clear its pending interrupt. + * 8. loop again if paranoia is required. + * + * probe_irq_on() returns a mask of allocated irq's. + * + * probe_irq_off() takes the mask as a parameter, + * and returns the irq number which occurred, + * or zero if none occurred, or a negative irq number + * if more than one irq occurred. + */ + +#if !defined(CONFIG_GENERIC_IRQ_PROBE) +static inline unsigned long probe_irq_on(void) +{ + return 0; +} +static inline int probe_irq_off(unsigned long val) +{ + return 0; +} +static inline unsigned int probe_irq_mask(unsigned long val) +{ + return 0; +} +#else +extern unsigned long probe_irq_on(void); /* returns 0 on failure */ +extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */ +extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */ +#endif + +#ifdef CONFIG_PROC_FS +/* Initialize /proc/irq/ */ +extern void init_irq_proc(void); +#else +static inline void init_irq_proc(void) +{ +} +#endif + +#ifdef CONFIG_IRQ_TIMINGS +void irq_timings_enable(void); +void irq_timings_disable(void); +u64 irq_timings_next_event(u64 now); +#endif + +struct seq_file; +int show_interrupts(struct seq_file *p, void *v); +int arch_show_interrupts(struct seq_file *p, int prec); + +extern int early_irq_init(void); +extern int arch_probe_nr_irqs(void); +extern int arch_early_irq_init(void); + +/* + * We want to know which function is an entrypoint of a hardirq or a softirq. + */ +#define __irq_entry __attribute__((__section__(".irqentry.text"))) +#define __softirq_entry \ + __attribute__((__section__(".softirqentry.text"))) + +#endif diff --git a/include/linux/interval_tree.h b/include/linux/interval_tree.h new file mode 100644 index 000000000..288c26f50 --- /dev/null +++ b/include/linux/interval_tree.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_INTERVAL_TREE_H +#define _LINUX_INTERVAL_TREE_H + +#include + +struct interval_tree_node { + struct rb_node rb; + unsigned long start; /* Start of interval */ + unsigned long last; /* Last location _in_ interval */ + unsigned long __subtree_last; +}; + +extern void +interval_tree_insert(struct interval_tree_node *node, + struct rb_root_cached *root); + +extern void +interval_tree_remove(struct interval_tree_node *node, + struct rb_root_cached *root); + +extern struct interval_tree_node * +interval_tree_iter_first(struct rb_root_cached *root, + unsigned long start, unsigned long last); + +extern struct interval_tree_node * +interval_tree_iter_next(struct interval_tree_node *node, + unsigned long start, unsigned long last); + +#endif /* _LINUX_INTERVAL_TREE_H */ diff --git a/include/linux/interval_tree_generic.h b/include/linux/interval_tree_generic.h new file mode 100644 index 000000000..1f97ce26c --- /dev/null +++ b/include/linux/interval_tree_generic.h @@ -0,0 +1,217 @@ +/* + Interval Trees + (C) 2012 Michel Lespinasse + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + include/linux/interval_tree_generic.h +*/ + +#include + +/* + * Template for implementing interval trees + * + * ITSTRUCT: struct type of the interval tree nodes + * ITRB: name of struct rb_node field within ITSTRUCT + * ITTYPE: type of the interval endpoints + * ITSUBTREE: name of ITTYPE field within ITSTRUCT holding last-in-subtree + * ITSTART(n): start endpoint of ITSTRUCT node n + * ITLAST(n): last endpoint of ITSTRUCT node n + * ITSTATIC: 'static' or empty + * ITPREFIX: prefix to use for the inline tree definitions + * + * Note - before using this, please consider if generic version + * (interval_tree.h) would work for you... + */ + +#define INTERVAL_TREE_DEFINE(ITSTRUCT, ITRB, ITTYPE, ITSUBTREE, \ + ITSTART, ITLAST, ITSTATIC, ITPREFIX) \ + \ +/* Callbacks for augmented rbtree insert and remove */ \ + \ +static inline ITTYPE ITPREFIX ## _compute_subtree_last(ITSTRUCT *node) \ +{ \ + ITTYPE max = ITLAST(node), subtree_last; \ + if (node->ITRB.rb_left) { \ + subtree_last = rb_entry(node->ITRB.rb_left, \ + ITSTRUCT, ITRB)->ITSUBTREE; \ + if (max < subtree_last) \ + max = subtree_last; \ + } \ + if (node->ITRB.rb_right) { \ + subtree_last = rb_entry(node->ITRB.rb_right, \ + ITSTRUCT, ITRB)->ITSUBTREE; \ + if (max < subtree_last) \ + max = subtree_last; \ + } \ + return max; \ +} \ + \ +RB_DECLARE_CALLBACKS(static, ITPREFIX ## _augment, ITSTRUCT, ITRB, \ + ITTYPE, ITSUBTREE, ITPREFIX ## _compute_subtree_last) \ + \ +/* Insert / remove interval nodes from the tree */ \ + \ +ITSTATIC void ITPREFIX ## _insert(ITSTRUCT *node, \ + struct rb_root_cached *root) \ +{ \ + struct rb_node **link = &root->rb_root.rb_node, *rb_parent = NULL; \ + ITTYPE start = ITSTART(node), last = ITLAST(node); \ + ITSTRUCT *parent; \ + bool leftmost = true; \ + \ + while (*link) { \ + rb_parent = *link; \ + parent = rb_entry(rb_parent, ITSTRUCT, ITRB); \ + if (parent->ITSUBTREE < last) \ + parent->ITSUBTREE = last; \ + if (start < ITSTART(parent)) \ + link = &parent->ITRB.rb_left; \ + else { \ + link = &parent->ITRB.rb_right; \ + leftmost = false; \ + } \ + } \ + \ + node->ITSUBTREE = last; \ + rb_link_node(&node->ITRB, rb_parent, link); \ + rb_insert_augmented_cached(&node->ITRB, root, \ + leftmost, &ITPREFIX ## _augment); \ +} \ + \ +ITSTATIC void ITPREFIX ## _remove(ITSTRUCT *node, \ + struct rb_root_cached *root) \ +{ \ + rb_erase_augmented_cached(&node->ITRB, root, &ITPREFIX ## _augment); \ +} \ + \ +/* \ + * Iterate over intervals intersecting [start;last] \ + * \ + * Note that a node's interval intersects [start;last] iff: \ + * Cond1: ITSTART(node) <= last \ + * and \ + * Cond2: start <= ITLAST(node) \ + */ \ + \ +static ITSTRUCT * \ +ITPREFIX ## _subtree_search(ITSTRUCT *node, ITTYPE start, ITTYPE last) \ +{ \ + while (true) { \ + /* \ + * Loop invariant: start <= node->ITSUBTREE \ + * (Cond2 is satisfied by one of the subtree nodes) \ + */ \ + if (node->ITRB.rb_left) { \ + ITSTRUCT *left = rb_entry(node->ITRB.rb_left, \ + ITSTRUCT, ITRB); \ + if (start <= left->ITSUBTREE) { \ + /* \ + * Some nodes in left subtree satisfy Cond2. \ + * Iterate to find the leftmost such node N. \ + * If it also satisfies Cond1, that's the \ + * match we are looking for. Otherwise, there \ + * is no matching interval as nodes to the \ + * right of N can't satisfy Cond1 either. \ + */ \ + node = left; \ + continue; \ + } \ + } \ + if (ITSTART(node) <= last) { /* Cond1 */ \ + if (start <= ITLAST(node)) /* Cond2 */ \ + return node; /* node is leftmost match */ \ + if (node->ITRB.rb_right) { \ + node = rb_entry(node->ITRB.rb_right, \ + ITSTRUCT, ITRB); \ + if (start <= node->ITSUBTREE) \ + continue; \ + } \ + } \ + return NULL; /* No match */ \ + } \ +} \ + \ +ITSTATIC ITSTRUCT * \ +ITPREFIX ## _iter_first(struct rb_root_cached *root, \ + ITTYPE start, ITTYPE last) \ +{ \ + ITSTRUCT *node, *leftmost; \ + \ + if (!root->rb_root.rb_node) \ + return NULL; \ + \ + /* \ + * Fastpath range intersection/overlap between A: [a0, a1] and \ + * B: [b0, b1] is given by: \ + * \ + * a0 <= b1 && b0 <= a1 \ + * \ + * ... where A holds the lock range and B holds the smallest \ + * 'start' and largest 'last' in the tree. For the later, we \ + * rely on the root node, which by augmented interval tree \ + * property, holds the largest value in its last-in-subtree. \ + * This allows mitigating some of the tree walk overhead for \ + * for non-intersecting ranges, maintained and consulted in O(1). \ + */ \ + node = rb_entry(root->rb_root.rb_node, ITSTRUCT, ITRB); \ + if (node->ITSUBTREE < start) \ + return NULL; \ + \ + leftmost = rb_entry(root->rb_leftmost, ITSTRUCT, ITRB); \ + if (ITSTART(leftmost) > last) \ + return NULL; \ + \ + return ITPREFIX ## _subtree_search(node, start, last); \ +} \ + \ +ITSTATIC ITSTRUCT * \ +ITPREFIX ## _iter_next(ITSTRUCT *node, ITTYPE start, ITTYPE last) \ +{ \ + struct rb_node *rb = node->ITRB.rb_right, *prev; \ + \ + while (true) { \ + /* \ + * Loop invariants: \ + * Cond1: ITSTART(node) <= last \ + * rb == node->ITRB.rb_right \ + * \ + * First, search right subtree if suitable \ + */ \ + if (rb) { \ + ITSTRUCT *right = rb_entry(rb, ITSTRUCT, ITRB); \ + if (start <= right->ITSUBTREE) \ + return ITPREFIX ## _subtree_search(right, \ + start, last); \ + } \ + \ + /* Move up the tree until we come from a node's left child */ \ + do { \ + rb = rb_parent(&node->ITRB); \ + if (!rb) \ + return NULL; \ + prev = &node->ITRB; \ + node = rb_entry(rb, ITSTRUCT, ITRB); \ + rb = node->ITRB.rb_right; \ + } while (prev == rb); \ + \ + /* Check if the node intersects [start;last] */ \ + if (last < ITSTART(node)) /* !Cond1 */ \ + return NULL; \ + else if (start <= ITLAST(node)) /* Cond2 */ \ + return node; \ + } \ +} diff --git a/include/linux/io-64-nonatomic-hi-lo.h b/include/linux/io-64-nonatomic-hi-lo.h new file mode 100644 index 000000000..862d786a9 --- /dev/null +++ b/include/linux/io-64-nonatomic-hi-lo.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_IO_64_NONATOMIC_HI_LO_H_ +#define _LINUX_IO_64_NONATOMIC_HI_LO_H_ + +#include +#include + +static inline __u64 hi_lo_readq(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + high = readl(p + 1); + low = readl(p); + + return low + ((u64)high << 32); +} + +static inline void hi_lo_writeq(__u64 val, volatile void __iomem *addr) +{ + writel(val >> 32, addr + 4); + writel(val, addr); +} + +static inline __u64 hi_lo_readq_relaxed(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + high = readl_relaxed(p + 1); + low = readl_relaxed(p); + + return low + ((u64)high << 32); +} + +static inline void hi_lo_writeq_relaxed(__u64 val, volatile void __iomem *addr) +{ + writel_relaxed(val >> 32, addr + 4); + writel_relaxed(val, addr); +} + +#ifndef readq +#define readq hi_lo_readq +#endif + +#ifndef writeq +#define writeq hi_lo_writeq +#endif + +#ifndef readq_relaxed +#define readq_relaxed hi_lo_readq_relaxed +#endif + +#ifndef writeq_relaxed +#define writeq_relaxed hi_lo_writeq_relaxed +#endif + +#endif /* _LINUX_IO_64_NONATOMIC_HI_LO_H_ */ diff --git a/include/linux/io-64-nonatomic-lo-hi.h b/include/linux/io-64-nonatomic-lo-hi.h new file mode 100644 index 000000000..d042e7bb5 --- /dev/null +++ b/include/linux/io-64-nonatomic-lo-hi.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_IO_64_NONATOMIC_LO_HI_H_ +#define _LINUX_IO_64_NONATOMIC_LO_HI_H_ + +#include +#include + +static inline __u64 lo_hi_readq(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + low = readl(p); + high = readl(p + 1); + + return low + ((u64)high << 32); +} + +static inline void lo_hi_writeq(__u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr + 4); +} + +static inline __u64 lo_hi_readq_relaxed(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + low = readl_relaxed(p); + high = readl_relaxed(p + 1); + + return low + ((u64)high << 32); +} + +static inline void lo_hi_writeq_relaxed(__u64 val, volatile void __iomem *addr) +{ + writel_relaxed(val, addr); + writel_relaxed(val >> 32, addr + 4); +} + +#ifndef readq +#define readq lo_hi_readq +#endif + +#ifndef writeq +#define writeq lo_hi_writeq +#endif + +#ifndef readq_relaxed +#define readq_relaxed lo_hi_readq_relaxed +#endif + +#ifndef writeq_relaxed +#define writeq_relaxed lo_hi_writeq_relaxed +#endif + +#endif /* _LINUX_IO_64_NONATOMIC_LO_HI_H_ */ diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h new file mode 100644 index 000000000..fa46183b1 --- /dev/null +++ b/include/linux/io-mapping.h @@ -0,0 +1,205 @@ +/* + * Copyright © 2008 Keith Packard + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef _LINUX_IO_MAPPING_H +#define _LINUX_IO_MAPPING_H + +#include +#include +#include +#include +#include + +/* + * The io_mapping mechanism provides an abstraction for mapping + * individual pages from an io device to the CPU in an efficient fashion. + * + * See Documentation/io-mapping.txt + */ + +struct io_mapping { + resource_size_t base; + unsigned long size; + pgprot_t prot; + void __iomem *iomem; +}; + +#ifdef CONFIG_HAVE_ATOMIC_IOMAP + +#include +/* + * For small address space machines, mapping large objects + * into the kernel virtual space isn't practical. Where + * available, use fixmap support to dynamically map pages + * of the object at run time. + */ + +static inline struct io_mapping * +io_mapping_init_wc(struct io_mapping *iomap, + resource_size_t base, + unsigned long size) +{ + pgprot_t prot; + + if (iomap_create_wc(base, size, &prot)) + return NULL; + + iomap->base = base; + iomap->size = size; + iomap->prot = prot; + return iomap; +} + +static inline void +io_mapping_fini(struct io_mapping *mapping) +{ + iomap_free(mapping->base, mapping->size); +} + +/* Atomic map/unmap */ +static inline void __iomem * +io_mapping_map_atomic_wc(struct io_mapping *mapping, + unsigned long offset) +{ + resource_size_t phys_addr; + unsigned long pfn; + + BUG_ON(offset >= mapping->size); + phys_addr = mapping->base + offset; + pfn = (unsigned long) (phys_addr >> PAGE_SHIFT); + return iomap_atomic_prot_pfn(pfn, mapping->prot); +} + +static inline void +io_mapping_unmap_atomic(void __iomem *vaddr) +{ + iounmap_atomic(vaddr); +} + +static inline void __iomem * +io_mapping_map_wc(struct io_mapping *mapping, + unsigned long offset, + unsigned long size) +{ + resource_size_t phys_addr; + + BUG_ON(offset >= mapping->size); + phys_addr = mapping->base + offset; + + return ioremap_wc(phys_addr, size); +} + +static inline void +io_mapping_unmap(void __iomem *vaddr) +{ + iounmap(vaddr); +} + +#else + +#include +#include + +/* Create the io_mapping object*/ +static inline struct io_mapping * +io_mapping_init_wc(struct io_mapping *iomap, + resource_size_t base, + unsigned long size) +{ + iomap->iomem = ioremap_wc(base, size); + if (!iomap->iomem) + return NULL; + + iomap->base = base; + iomap->size = size; +#if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */ + iomap->prot = pgprot_noncached_wc(PAGE_KERNEL); +#elif defined(pgprot_writecombine) + iomap->prot = pgprot_writecombine(PAGE_KERNEL); +#else + iomap->prot = pgprot_noncached(PAGE_KERNEL); +#endif + + return iomap; +} + +static inline void +io_mapping_fini(struct io_mapping *mapping) +{ + iounmap(mapping->iomem); +} + +/* Non-atomic map/unmap */ +static inline void __iomem * +io_mapping_map_wc(struct io_mapping *mapping, + unsigned long offset, + unsigned long size) +{ + return mapping->iomem + offset; +} + +static inline void +io_mapping_unmap(void __iomem *vaddr) +{ +} + +/* Atomic map/unmap */ +static inline void __iomem * +io_mapping_map_atomic_wc(struct io_mapping *mapping, + unsigned long offset) +{ + preempt_disable(); + pagefault_disable(); + return io_mapping_map_wc(mapping, offset, PAGE_SIZE); +} + +static inline void +io_mapping_unmap_atomic(void __iomem *vaddr) +{ + io_mapping_unmap(vaddr); + pagefault_enable(); + preempt_enable(); +} + +#endif /* HAVE_ATOMIC_IOMAP */ + +static inline struct io_mapping * +io_mapping_create_wc(resource_size_t base, + unsigned long size) +{ + struct io_mapping *iomap; + + iomap = kmalloc(sizeof(*iomap), GFP_KERNEL); + if (!iomap) + return NULL; + + if (!io_mapping_init_wc(iomap, base, size)) { + kfree(iomap); + return NULL; + } + + return iomap; +} + +static inline void +io_mapping_free(struct io_mapping *iomap) +{ + io_mapping_fini(iomap); + kfree(iomap); +} + +#endif /* _LINUX_IO_MAPPING_H */ diff --git a/include/linux/io.h b/include/linux/io.h new file mode 100644 index 000000000..da39ff89d --- /dev/null +++ b/include/linux/io.h @@ -0,0 +1,191 @@ +/* + * Copyright 2006 PathScale, Inc. All Rights Reserved. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef _LINUX_IO_H +#define _LINUX_IO_H + +#include +#include +#include +#include +#include +#include + +struct device; +struct resource; + +__visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count); +void __ioread32_copy(void *to, const void __iomem *from, size_t count); +void __iowrite64_copy(void __iomem *to, const void *from, size_t count); + +#ifdef CONFIG_MMU +int ioremap_page_range(unsigned long addr, unsigned long end, + phys_addr_t phys_addr, pgprot_t prot); +#else +static inline int ioremap_page_range(unsigned long addr, unsigned long end, + phys_addr_t phys_addr, pgprot_t prot) +{ + return 0; +} +#endif + +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +void __init ioremap_huge_init(void); +int arch_ioremap_pud_supported(void); +int arch_ioremap_pmd_supported(void); +#else +static inline void ioremap_huge_init(void) { } +#endif + +/* + * Managed iomap interface + */ +#ifdef CONFIG_HAS_IOPORT_MAP +void __iomem * devm_ioport_map(struct device *dev, unsigned long port, + unsigned int nr); +void devm_ioport_unmap(struct device *dev, void __iomem *addr); +#else +static inline void __iomem *devm_ioport_map(struct device *dev, + unsigned long port, + unsigned int nr) +{ + return NULL; +} + +static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr) +{ +} +#endif + +#define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err) + +void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, + resource_size_t size); +void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset, + resource_size_t size); +void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset, + resource_size_t size); +void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset, + resource_size_t size); +void devm_iounmap(struct device *dev, void __iomem *addr); +int check_signature(const volatile void __iomem *io_addr, + const unsigned char *signature, int length); +void devm_ioremap_release(struct device *dev, void *res); + +void *devm_memremap(struct device *dev, resource_size_t offset, + size_t size, unsigned long flags); +void devm_memunmap(struct device *dev, void *addr); + +void *__devm_memremap_pages(struct device *dev, struct resource *res); + +#ifdef CONFIG_PCI +/* + * The PCI specifications (Rev 3.0, 3.2.5 "Transaction Ordering and + * Posting") mandate non-posted configuration transactions. There is + * no ioremap API in the kernel that can guarantee non-posted write + * semantics across arches so provide a default implementation for + * mapping PCI config space that defaults to ioremap_nocache(); arches + * should override it if they have memory mapping implementations that + * guarantee non-posted writes semantics to make the memory mapping + * compliant with the PCI specification. + */ +#ifndef pci_remap_cfgspace +#define pci_remap_cfgspace pci_remap_cfgspace +static inline void __iomem *pci_remap_cfgspace(phys_addr_t offset, + size_t size) +{ + return ioremap_nocache(offset, size); +} +#endif +#endif + +/* + * Some systems do not have legacy ISA devices. + * /dev/port is not a valid interface on these systems. + * So for those archs, should define the following symbol. + */ +#ifndef arch_has_dev_port +#define arch_has_dev_port() (1) +#endif + +/* + * Some systems (x86 without PAT) have a somewhat reliable way to mark a + * physical address range such that uncached mappings will actually + * end up write-combining. This facility should be used in conjunction + * with pgprot_writecombine, ioremap-wc, or set_memory_wc, since it has + * no effect if the per-page mechanisms are functional. + * (On x86 without PAT, these functions manipulate MTRRs.) + * + * arch_phys_del_wc(0) or arch_phys_del_wc(any error code) is guaranteed + * to have no effect. + */ +#ifndef arch_phys_wc_add +static inline int __must_check arch_phys_wc_add(unsigned long base, + unsigned long size) +{ + return 0; /* It worked (i.e. did nothing). */ +} + +static inline void arch_phys_wc_del(int handle) +{ +} + +#define arch_phys_wc_add arch_phys_wc_add +#ifndef arch_phys_wc_index +static inline int arch_phys_wc_index(int handle) +{ + return -1; +} +#define arch_phys_wc_index arch_phys_wc_index +#endif +#endif + +enum { + /* See memremap() kernel-doc for usage description... */ + MEMREMAP_WB = 1 << 0, + MEMREMAP_WT = 1 << 1, + MEMREMAP_WC = 1 << 2, + MEMREMAP_ENC = 1 << 3, + MEMREMAP_DEC = 1 << 4, +}; + +void *memremap(resource_size_t offset, size_t size, unsigned long flags); +void memunmap(void *addr); + +/* + * On x86 PAT systems we have memory tracking that keeps track of + * the allowed mappings on memory ranges. This tracking works for + * all the in-kernel mapping APIs (ioremap*), but where the user + * wishes to map a range from a physical device into user memory + * the tracking won't be updated. This API is to be used by + * drivers which remap physical device pages into userspace, + * and wants to make sure they are mapped WC and not UC. + */ +#ifndef arch_io_reserve_memtype_wc +static inline int arch_io_reserve_memtype_wc(resource_size_t base, + resource_size_t size) +{ + return 0; +} + +static inline void arch_io_free_memtype_wc(resource_size_t base, + resource_size_t size) +{ +} +#endif + +#endif /* _LINUX_IO_H */ diff --git a/include/linux/ioc3.h b/include/linux/ioc3.h new file mode 100644 index 000000000..38b286e9a --- /dev/null +++ b/include/linux/ioc3.h @@ -0,0 +1,93 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2005 Stanislaw Skowronek + */ + +#ifndef _LINUX_IOC3_H +#define _LINUX_IOC3_H + +#include + +#define IOC3_MAX_SUBMODULES 32 + +#define IOC3_CLASS_NONE 0 +#define IOC3_CLASS_BASE_IP27 1 +#define IOC3_CLASS_BASE_IP30 2 +#define IOC3_CLASS_MENET_123 3 +#define IOC3_CLASS_MENET_4 4 +#define IOC3_CLASS_CADDUO 5 +#define IOC3_CLASS_SERIAL 6 + +/* One of these per IOC3 */ +struct ioc3_driver_data { + struct list_head list; + int id; /* IOC3 sequence number */ + /* PCI mapping */ + unsigned long pma; /* physical address */ + struct ioc3 __iomem *vma; /* pointer to registers */ + struct pci_dev *pdev; /* PCI device */ + /* IRQ stuff */ + int dual_irq; /* set if separate IRQs are used */ + int irq_io, irq_eth; /* IRQ numbers */ + /* GPIO magic */ + spinlock_t gpio_lock; + unsigned int gpdr_shadow; + /* NIC identifiers */ + char nic_part[32]; + char nic_serial[16]; + char nic_mac[6]; + /* submodule set */ + int class; + void *data[IOC3_MAX_SUBMODULES]; /* for submodule use */ + int active[IOC3_MAX_SUBMODULES]; /* set if probe succeeds */ + /* is_ir_lock must be held while + * modifying sio_ie values, so + * we can be sure that sio_ie is + * not changing when we read it + * along with sio_ir. + */ + spinlock_t ir_lock; /* SIO_IE[SC] mod lock */ +}; + +/* One per submodule */ +struct ioc3_submodule { + char *name; /* descriptive submodule name */ + struct module *owner; /* owning kernel module */ + int ethernet; /* set for ethernet drivers */ + int (*probe) (struct ioc3_submodule *, struct ioc3_driver_data *); + int (*remove) (struct ioc3_submodule *, struct ioc3_driver_data *); + int id; /* assigned by IOC3, index for the "data" array */ + /* IRQ stuff */ + unsigned int irq_mask; /* IOC3 IRQ mask, leave clear for Ethernet */ + int reset_mask; /* non-zero if you want the ioc3.c module to reset interrupts */ + int (*intr) (struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int); + /* private submodule data */ + void *data; /* assigned by submodule */ +}; + +/********************************** + * Functions needed by submodules * + **********************************/ + +#define IOC3_W_IES 0 +#define IOC3_W_IEC 1 + +/* registers a submodule for all existing and future IOC3 chips */ +extern int ioc3_register_submodule(struct ioc3_submodule *); +/* unregisters a submodule */ +extern void ioc3_unregister_submodule(struct ioc3_submodule *); +/* enables IRQs indicated by irq_mask for a specified IOC3 chip */ +extern void ioc3_enable(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int); +/* ackowledges specified IRQs */ +extern void ioc3_ack(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int); +/* disables IRQs indicated by irq_mask for a specified IOC3 chip */ +extern void ioc3_disable(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int); +/* atomically sets GPCR bits */ +extern void ioc3_gpcr_set(struct ioc3_driver_data *, unsigned int); +/* general ireg writer */ +extern void ioc3_write_ireg(struct ioc3_driver_data *idd, uint32_t value, int reg); + +#endif diff --git a/include/linux/ioc4.h b/include/linux/ioc4.h new file mode 100644 index 000000000..51e2b9fb6 --- /dev/null +++ b/include/linux/ioc4.h @@ -0,0 +1,184 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2005 Silicon Graphics, Inc. All Rights Reserved. + */ + +#ifndef _LINUX_IOC4_H +#define _LINUX_IOC4_H + +#include + +/*************** + * Definitions * + ***************/ + +/* Miscellaneous values inherent to hardware */ + +#define IOC4_EXTINT_COUNT_DIVISOR 520 /* PCI clocks per COUNT tick */ + +/*********************************** + * Structures needed by subdrivers * + ***********************************/ + +/* This structure fully describes the IOC4 miscellaneous registers which + * appear at bar[0]+0x00000 through bar[0]+0x0005c. The corresponding + * PCI resource is managed by the main IOC4 driver because it contains + * registers of interest to many different IOC4 subdrivers. + */ +struct ioc4_misc_regs { + /* Miscellaneous IOC4 registers */ + union ioc4_pci_err_addr_l { + uint32_t raw; + struct { + uint32_t valid:1; /* Address captured */ + uint32_t master_id:4; /* Unit causing error + * 0/1: Serial port 0 TX/RX + * 2/3: Serial port 1 TX/RX + * 4/5: Serial port 2 TX/RX + * 6/7: Serial port 3 TX/RX + * 8: ATA/ATAPI + * 9-15: Undefined + */ + uint32_t mul_err:1; /* Multiple errors occurred */ + uint32_t addr:26; /* Bits 31-6 of error addr */ + } fields; + } pci_err_addr_l; + uint32_t pci_err_addr_h; /* Bits 63-32 of error addr */ + union ioc4_sio_int { + uint32_t raw; + struct { + uint8_t tx_mt:1; /* TX ring buffer empty */ + uint8_t rx_full:1; /* RX ring buffer full */ + uint8_t rx_high:1; /* RX high-water exceeded */ + uint8_t rx_timer:1; /* RX timer has triggered */ + uint8_t delta_dcd:1; /* DELTA_DCD seen */ + uint8_t delta_cts:1; /* DELTA_CTS seen */ + uint8_t intr_pass:1; /* Interrupt pass-through */ + uint8_t tx_explicit:1; /* TX, MCW, or delay complete */ + } fields[4]; + } sio_ir; /* Serial interrupt state */ + union ioc4_other_int { + uint32_t raw; + struct { + uint32_t ata_int:1; /* ATA port passthru */ + uint32_t ata_memerr:1; /* ATA halted by mem error */ + uint32_t memerr:4; /* Serial halted by mem err */ + uint32_t kbd_int:1; /* kbd/mouse intr asserted */ + uint32_t reserved:16; /* zero */ + uint32_t rt_int:1; /* INT_OUT section latch */ + uint32_t gen_int:8; /* Intr. from generic pins */ + } fields; + } other_ir; /* Other interrupt state */ + union ioc4_sio_int sio_ies; /* Serial interrupt enable set */ + union ioc4_other_int other_ies; /* Other interrupt enable set */ + union ioc4_sio_int sio_iec; /* Serial interrupt enable clear */ + union ioc4_other_int other_iec; /* Other interrupt enable clear */ + union ioc4_sio_cr { + uint32_t raw; + struct { + uint32_t cmd_pulse:4; /* Bytebus strobe width */ + uint32_t arb_diag:3; /* PCI bus requester */ + uint32_t sio_diag_idle:1; /* Active ser req? */ + uint32_t ata_diag_idle:1; /* Active ATA req? */ + uint32_t ata_diag_active:1; /* ATA req is winner */ + uint32_t reserved:22; /* zero */ + } fields; + } sio_cr; + uint32_t unused1; + union ioc4_int_out { + uint32_t raw; + struct { + uint32_t count:16; /* Period control */ + uint32_t mode:3; /* Output signal shape */ + uint32_t reserved:11; /* zero */ + uint32_t diag:1; /* Timebase control */ + uint32_t int_out:1; /* Current value */ + } fields; + } int_out; /* External interrupt output control */ + uint32_t unused2; + union ioc4_gpcr { + uint32_t raw; + struct { + uint32_t dir:8; /* Pin direction */ + uint32_t edge:8; /* Edge/level mode */ + uint32_t reserved1:4; /* zero */ + uint32_t int_out_en:1; /* INT_OUT enable */ + uint32_t reserved2:11; /* zero */ + } fields; + } gpcr_s; /* Generic PIO control set */ + union ioc4_gpcr gpcr_c; /* Generic PIO control clear */ + union ioc4_gpdr { + uint32_t raw; + struct { + uint32_t gen_pin:8; /* State of pins */ + uint32_t reserved:24; + } fields; + } gpdr; /* Generic PIO data */ + uint32_t unused3; + union ioc4_gppr { + uint32_t raw; + struct { + uint32_t gen_pin:1; /* Single pin state */ + uint32_t reserved:31; + } fields; + } gppr[8]; /* Generic PIO pins */ +}; + +/* Masks for GPCR DIR pins */ +#define IOC4_GPCR_DIR_0 0x01 /* External interrupt output */ +#define IOC4_GPCR_DIR_1 0x02 /* External interrupt input */ +#define IOC4_GPCR_DIR_2 0x04 +#define IOC4_GPCR_DIR_3 0x08 /* Keyboard/mouse presence */ +#define IOC4_GPCR_DIR_4 0x10 /* Ser. port 0 xcvr select (0=232, 1=422) */ +#define IOC4_GPCR_DIR_5 0x20 /* Ser. port 1 xcvr select (0=232, 1=422) */ +#define IOC4_GPCR_DIR_6 0x40 /* Ser. port 2 xcvr select (0=232, 1=422) */ +#define IOC4_GPCR_DIR_7 0x80 /* Ser. port 3 xcvr select (0=232, 1=422) */ + +/* Masks for GPCR EDGE pins */ +#define IOC4_GPCR_EDGE_0 0x01 +#define IOC4_GPCR_EDGE_1 0x02 /* External interrupt input */ +#define IOC4_GPCR_EDGE_2 0x04 +#define IOC4_GPCR_EDGE_3 0x08 +#define IOC4_GPCR_EDGE_4 0x10 +#define IOC4_GPCR_EDGE_5 0x20 +#define IOC4_GPCR_EDGE_6 0x40 +#define IOC4_GPCR_EDGE_7 0x80 + +#define IOC4_VARIANT_IO9 0x0900 +#define IOC4_VARIANT_PCI_RT 0x0901 +#define IOC4_VARIANT_IO10 0x1000 + +/* One of these per IOC4 */ +struct ioc4_driver_data { + struct list_head idd_list; + unsigned long idd_bar0; + struct pci_dev *idd_pdev; + const struct pci_device_id *idd_pci_id; + struct ioc4_misc_regs __iomem *idd_misc_regs; + unsigned long count_period; + void *idd_serial_data; + unsigned int idd_variant; +}; + +/* One per submodule */ +struct ioc4_submodule { + struct list_head is_list; + char *is_name; + struct module *is_owner; + int (*is_probe) (struct ioc4_driver_data *); + int (*is_remove) (struct ioc4_driver_data *); +}; + +#define IOC4_NUM_CARDS 8 /* max cards per partition */ + +/********************************** + * Functions needed by submodules * + **********************************/ + +extern int ioc4_register_submodule(struct ioc4_submodule *); +extern void ioc4_unregister_submodule(struct ioc4_submodule *); + +#endif /* _LINUX_IOC4_H */ diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h new file mode 100644 index 000000000..1dcd9198b --- /dev/null +++ b/include/linux/iocontext.h @@ -0,0 +1,159 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef IOCONTEXT_H +#define IOCONTEXT_H + +#include +#include +#include + +enum { + ICQ_EXITED = 1 << 2, + ICQ_DESTROYED = 1 << 3, +}; + +/* + * An io_cq (icq) is association between an io_context (ioc) and a + * request_queue (q). This is used by elevators which need to track + * information per ioc - q pair. + * + * Elevator can request use of icq by setting elevator_type->icq_size and + * ->icq_align. Both size and align must be larger than that of struct + * io_cq and elevator can use the tail area for private information. The + * recommended way to do this is defining a struct which contains io_cq as + * the first member followed by private members and using its size and + * align. For example, + * + * struct snail_io_cq { + * struct io_cq icq; + * int poke_snail; + * int feed_snail; + * }; + * + * struct elevator_type snail_elv_type { + * .ops = { ... }, + * .icq_size = sizeof(struct snail_io_cq), + * .icq_align = __alignof__(struct snail_io_cq), + * ... + * }; + * + * If icq_size is set, block core will manage icq's. All requests will + * have its ->elv.icq field set before elevator_ops->elevator_set_req_fn() + * is called and be holding a reference to the associated io_context. + * + * Whenever a new icq is created, elevator_ops->elevator_init_icq_fn() is + * called and, on destruction, ->elevator_exit_icq_fn(). Both functions + * are called with both the associated io_context and queue locks held. + * + * Elevator is allowed to lookup icq using ioc_lookup_icq() while holding + * queue lock but the returned icq is valid only until the queue lock is + * released. Elevators can not and should not try to create or destroy + * icq's. + * + * As icq's are linked from both ioc and q, the locking rules are a bit + * complex. + * + * - ioc lock nests inside q lock. + * + * - ioc->icq_list and icq->ioc_node are protected by ioc lock. + * q->icq_list and icq->q_node by q lock. + * + * - ioc->icq_tree and ioc->icq_hint are protected by ioc lock, while icq + * itself is protected by q lock. However, both the indexes and icq + * itself are also RCU managed and lookup can be performed holding only + * the q lock. + * + * - icq's are not reference counted. They are destroyed when either the + * ioc or q goes away. Each request with icq set holds an extra + * reference to ioc to ensure it stays until the request is completed. + * + * - Linking and unlinking icq's are performed while holding both ioc and q + * locks. Due to the lock ordering, q exit is simple but ioc exit + * requires reverse-order double lock dance. + */ +struct io_cq { + struct request_queue *q; + struct io_context *ioc; + + /* + * q_node and ioc_node link io_cq through icq_list of q and ioc + * respectively. Both fields are unused once ioc_exit_icq() is + * called and shared with __rcu_icq_cache and __rcu_head which are + * used for RCU free of io_cq. + */ + union { + struct list_head q_node; + struct kmem_cache *__rcu_icq_cache; + }; + union { + struct hlist_node ioc_node; + struct rcu_head __rcu_head; + }; + + unsigned int flags; +}; + +/* + * I/O subsystem state of the associated processes. It is refcounted + * and kmalloc'ed. These could be shared between processes. + */ +struct io_context { + atomic_long_t refcount; + atomic_t active_ref; + atomic_t nr_tasks; + + /* all the fields below are protected by this lock */ + spinlock_t lock; + + unsigned short ioprio; + + /* + * For request batching + */ + int nr_batch_requests; /* Number of requests left in the batch */ + unsigned long last_waited; /* Time last woken after wait for request */ + + struct radix_tree_root icq_tree; + struct io_cq __rcu *icq_hint; + struct hlist_head icq_list; + + struct work_struct release_work; +}; + +/** + * get_io_context_active - get active reference on ioc + * @ioc: ioc of interest + * + * Only iocs with active reference can issue new IOs. This function + * acquires an active reference on @ioc. The caller must already have an + * active reference on @ioc. + */ +static inline void get_io_context_active(struct io_context *ioc) +{ + WARN_ON_ONCE(atomic_long_read(&ioc->refcount) <= 0); + WARN_ON_ONCE(atomic_read(&ioc->active_ref) <= 0); + atomic_long_inc(&ioc->refcount); + atomic_inc(&ioc->active_ref); +} + +static inline void ioc_task_link(struct io_context *ioc) +{ + get_io_context_active(ioc); + + WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0); + atomic_inc(&ioc->nr_tasks); +} + +struct task_struct; +#ifdef CONFIG_BLOCK +void put_io_context(struct io_context *ioc); +void put_io_context_active(struct io_context *ioc); +void exit_io_context(struct task_struct *task); +struct io_context *get_task_io_context(struct task_struct *task, + gfp_t gfp_flags, int node); +#else +struct io_context; +static inline void put_io_context(struct io_context *ioc) { } +static inline void exit_io_context(struct task_struct *task) { } +#endif + +#endif diff --git a/include/linux/iomap.h b/include/linux/iomap.h new file mode 100644 index 000000000..e93ecacb5 --- /dev/null +++ b/include/linux/iomap.h @@ -0,0 +1,176 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_IOMAP_H +#define LINUX_IOMAP_H 1 + +#include +#include +#include +#include + +struct address_space; +struct fiemap_extent_info; +struct inode; +struct iov_iter; +struct kiocb; +struct page; +struct vm_area_struct; +struct vm_fault; + +/* + * Types of block ranges for iomap mappings: + */ +#define IOMAP_HOLE 0x01 /* no blocks allocated, need allocation */ +#define IOMAP_DELALLOC 0x02 /* delayed allocation blocks */ +#define IOMAP_MAPPED 0x03 /* blocks allocated at @addr */ +#define IOMAP_UNWRITTEN 0x04 /* blocks allocated at @addr in unwritten state */ +#define IOMAP_INLINE 0x05 /* data inline in the inode */ + +/* + * Flags for all iomap mappings: + * + * IOMAP_F_DIRTY indicates the inode has uncommitted metadata needed to access + * written data and requires fdatasync to commit them to persistent storage. + */ +#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */ +#define IOMAP_F_DIRTY 0x02 /* uncommitted metadata */ +#define IOMAP_F_BUFFER_HEAD 0x04 /* file system requires buffer heads */ + +/* + * Flags that only need to be reported for IOMAP_REPORT requests: + */ +#define IOMAP_F_MERGED 0x10 /* contains multiple blocks/extents */ +#define IOMAP_F_SHARED 0x20 /* block shared with another file */ + +/* + * Flags from 0x1000 up are for file system specific usage: + */ +#define IOMAP_F_PRIVATE 0x1000 + + +/* + * Magic value for addr: + */ +#define IOMAP_NULL_ADDR -1ULL /* addr is not valid */ + +struct iomap { + u64 addr; /* disk offset of mapping, bytes */ + loff_t offset; /* file offset of mapping, bytes */ + u64 length; /* length of mapping, bytes */ + u16 type; /* type of mapping */ + u16 flags; /* flags for mapping */ + struct block_device *bdev; /* block device for I/O */ + struct dax_device *dax_dev; /* dax_dev for dax operations */ + void *inline_data; + void *private; /* filesystem private */ + + /* + * Called when finished processing a page in the mapping returned in + * this iomap. At least for now this is only supported in the buffered + * write path. + */ + void (*page_done)(struct inode *inode, loff_t pos, unsigned copied, + struct page *page, struct iomap *iomap); +}; + +/* + * Flags for iomap_begin / iomap_end. No flag implies a read. + */ +#define IOMAP_WRITE (1 << 0) /* writing, must allocate blocks */ +#define IOMAP_ZERO (1 << 1) /* zeroing operation, may skip holes */ +#define IOMAP_REPORT (1 << 2) /* report extent status, e.g. FIEMAP */ +#define IOMAP_FAULT (1 << 3) /* mapping for page fault */ +#define IOMAP_DIRECT (1 << 4) /* direct I/O */ +#define IOMAP_NOWAIT (1 << 5) /* do not block */ + +struct iomap_ops { + /* + * Return the existing mapping at pos, or reserve space starting at + * pos for up to length, as long as we can do it as a single mapping. + * The actual length is returned in iomap->length. + */ + int (*iomap_begin)(struct inode *inode, loff_t pos, loff_t length, + unsigned flags, struct iomap *iomap); + + /* + * Commit and/or unreserve space previous allocated using iomap_begin. + * Written indicates the length of the successful write operation which + * needs to be commited, while the rest needs to be unreserved. + * Written might be zero if no data was written. + */ + int (*iomap_end)(struct inode *inode, loff_t pos, loff_t length, + ssize_t written, unsigned flags, struct iomap *iomap); +}; + +/* + * Structure allocate for each page when block size < PAGE_SIZE to track + * sub-page uptodate status and I/O completions. + */ +struct iomap_page { + atomic_t read_count; + atomic_t write_count; + spinlock_t uptodate_lock; + DECLARE_BITMAP(uptodate, PAGE_SIZE / 512); +}; + +static inline struct iomap_page *to_iomap_page(struct page *page) +{ + if (page_has_private(page)) + return (struct iomap_page *)page_private(page); + return NULL; +} + +ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from, + const struct iomap_ops *ops); +int iomap_readpage(struct page *page, const struct iomap_ops *ops); +int iomap_readpages(struct address_space *mapping, struct list_head *pages, + unsigned nr_pages, const struct iomap_ops *ops); +int iomap_set_page_dirty(struct page *page); +int iomap_is_partially_uptodate(struct page *page, unsigned long from, + unsigned long count); +int iomap_releasepage(struct page *page, gfp_t gfp_mask); +void iomap_invalidatepage(struct page *page, unsigned int offset, + unsigned int len); +#ifdef CONFIG_MIGRATION +int iomap_migrate_page(struct address_space *mapping, struct page *newpage, + struct page *page, enum migrate_mode mode); +#else +#define iomap_migrate_page NULL +#endif +int iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len, + const struct iomap_ops *ops); +int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, + bool *did_zero, const struct iomap_ops *ops); +int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, + const struct iomap_ops *ops); +int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops); +int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, + loff_t start, loff_t len, const struct iomap_ops *ops); +loff_t iomap_seek_hole(struct inode *inode, loff_t offset, + const struct iomap_ops *ops); +loff_t iomap_seek_data(struct inode *inode, loff_t offset, + const struct iomap_ops *ops); +sector_t iomap_bmap(struct address_space *mapping, sector_t bno, + const struct iomap_ops *ops); + +/* + * Flags for direct I/O ->end_io: + */ +#define IOMAP_DIO_UNWRITTEN (1 << 0) /* covers unwritten extent(s) */ +#define IOMAP_DIO_COW (1 << 1) /* covers COW extent(s) */ +typedef int (iomap_dio_end_io_t)(struct kiocb *iocb, ssize_t ret, + unsigned flags); +ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, + const struct iomap_ops *ops, iomap_dio_end_io_t end_io); + +#ifdef CONFIG_SWAP +struct file; +struct swap_info_struct; + +int iomap_swapfile_activate(struct swap_info_struct *sis, + struct file *swap_file, sector_t *pagespan, + const struct iomap_ops *ops); +#else +# define iomap_swapfile_activate(sis, swapfile, pagespan, ops) (-EIO) +#endif /* CONFIG_SWAP */ + +#endif /* LINUX_IOMAP_H */ diff --git a/include/linux/iommu-helper.h b/include/linux/iommu-helper.h new file mode 100644 index 000000000..70d01edcb --- /dev/null +++ b/include/linux/iommu-helper.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_IOMMU_HELPER_H +#define _LINUX_IOMMU_HELPER_H + +#include +#include + +static inline unsigned long iommu_device_max_index(unsigned long size, + unsigned long offset, + u64 dma_mask) +{ + if (size + offset > dma_mask) + return dma_mask - offset + 1; + else + return size; +} + +static inline int iommu_is_span_boundary(unsigned int index, unsigned int nr, + unsigned long shift, unsigned long boundary_size) +{ + BUG_ON(!is_power_of_2(boundary_size)); + + shift = (shift + index) & (boundary_size - 1); + return shift + nr > boundary_size; +} + +extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, + unsigned long start, unsigned int nr, + unsigned long shift, + unsigned long boundary_size, + unsigned long align_mask); + +static inline unsigned long iommu_num_pages(unsigned long addr, + unsigned long len, + unsigned long io_page_size) +{ + unsigned long size = (addr & (io_page_size - 1)) + len; + + return DIV_ROUND_UP(size, io_page_size); +} + +#endif diff --git a/include/linux/iommu.h b/include/linux/iommu.h new file mode 100644 index 000000000..87994c265 --- /dev/null +++ b/include/linux/iommu.h @@ -0,0 +1,696 @@ +/* + * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. + * Author: Joerg Roedel + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_IOMMU_H +#define __LINUX_IOMMU_H + +#include +#include +#include +#include +#include +#include + +#define IOMMU_READ (1 << 0) +#define IOMMU_WRITE (1 << 1) +#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ +#define IOMMU_NOEXEC (1 << 3) +#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ +/* + * Where the bus hardware includes a privilege level as part of its access type + * markings, and certain devices are capable of issuing transactions marked as + * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other + * given permission flags only apply to accesses at the higher privilege level, + * and that unprivileged transactions should have as little access as possible. + * This would usually imply the same permissions as kernel mappings on the CPU, + * if the IOMMU page table format is equivalent. + */ +#define IOMMU_PRIV (1 << 5) + +struct iommu_ops; +struct iommu_group; +struct bus_type; +struct device; +struct iommu_domain; +struct notifier_block; + +/* iommu fault flags */ +#define IOMMU_FAULT_READ 0x0 +#define IOMMU_FAULT_WRITE 0x1 + +typedef int (*iommu_fault_handler_t)(struct iommu_domain *, + struct device *, unsigned long, int, void *); + +struct iommu_domain_geometry { + dma_addr_t aperture_start; /* First address that can be mapped */ + dma_addr_t aperture_end; /* Last address that can be mapped */ + bool force_aperture; /* DMA only allowed in mappable range? */ +}; + +/* Domain feature flags */ +#define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */ +#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API + implementation */ +#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */ + +/* + * This are the possible domain-types + * + * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate + * devices + * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses + * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used + * for VMs + * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations. + * This flag allows IOMMU drivers to implement + * certain optimizations for these domains + */ +#define IOMMU_DOMAIN_BLOCKED (0U) +#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT) +#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING) +#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \ + __IOMMU_DOMAIN_DMA_API) + +struct iommu_domain { + unsigned type; + const struct iommu_ops *ops; + unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ + iommu_fault_handler_t handler; + void *handler_token; + struct iommu_domain_geometry geometry; + void *iova_cookie; +}; + +enum iommu_cap { + IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA + transactions */ + IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */ + IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ +}; + +/* + * Following constraints are specifc to FSL_PAMUV1: + * -aperture must be power of 2, and naturally aligned + * -number of windows must be power of 2, and address space size + * of each window is determined by aperture size / # of windows + * -the actual size of the mapped region of a window must be power + * of 2 starting with 4KB and physical address must be naturally + * aligned. + * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints. + * The caller can invoke iommu_domain_get_attr to check if the underlying + * iommu implementation supports these constraints. + */ + +enum iommu_attr { + DOMAIN_ATTR_GEOMETRY, + DOMAIN_ATTR_PAGING, + DOMAIN_ATTR_WINDOWS, + DOMAIN_ATTR_FSL_PAMU_STASH, + DOMAIN_ATTR_FSL_PAMU_ENABLE, + DOMAIN_ATTR_FSL_PAMUV1, + DOMAIN_ATTR_NESTING, /* two stages of translation */ + DOMAIN_ATTR_MAX, +}; + +/* These are the possible reserved region types */ +enum iommu_resv_type { + /* Memory regions which must be mapped 1:1 at all times */ + IOMMU_RESV_DIRECT, + /* Arbitrary "never map this or give it to a device" address ranges */ + IOMMU_RESV_RESERVED, + /* Hardware MSI region (untranslated) */ + IOMMU_RESV_MSI, + /* Software-managed MSI translation window */ + IOMMU_RESV_SW_MSI, +}; + +/** + * struct iommu_resv_region - descriptor for a reserved memory region + * @list: Linked list pointers + * @start: System physical start address of the region + * @length: Length of the region in bytes + * @prot: IOMMU Protection flags (READ/WRITE/...) + * @type: Type of the reserved region + */ +struct iommu_resv_region { + struct list_head list; + phys_addr_t start; + size_t length; + int prot; + enum iommu_resv_type type; +}; + +#ifdef CONFIG_IOMMU_API + +/** + * struct iommu_ops - iommu ops and capabilities + * @capable: check capability + * @domain_alloc: allocate iommu domain + * @domain_free: free iommu domain + * @attach_dev: attach device to an iommu domain + * @detach_dev: detach device from an iommu domain + * @map: map a physically contiguous memory region to an iommu domain + * @unmap: unmap a physically contiguous memory region from an iommu domain + * @flush_tlb_all: Synchronously flush all hardware TLBs for this domain + * @tlb_range_add: Add a given iova range to the flush queue for this domain + * @tlb_sync: Flush all queued ranges from the hardware TLBs and empty flush + * queue + * @iova_to_phys: translate iova to physical address + * @add_device: add device to iommu grouping + * @remove_device: remove device from iommu grouping + * @device_group: find iommu group for a particular device + * @domain_get_attr: Query domain attributes + * @domain_set_attr: Change domain attributes + * @get_resv_regions: Request list of reserved regions for a device + * @put_resv_regions: Free list of reserved regions for a device + * @apply_resv_region: Temporary helper call-back for iova reserved ranges + * @domain_window_enable: Configure and enable a particular window for a domain + * @domain_window_disable: Disable a particular window for a domain + * @domain_set_windows: Set the number of windows for a domain + * @domain_get_windows: Return the number of windows for a domain + * @of_xlate: add OF master IDs to iommu grouping + * @pgsize_bitmap: bitmap of all possible supported page sizes + */ +struct iommu_ops { + bool (*capable)(enum iommu_cap); + + /* Domain allocation and freeing by the iommu driver */ + struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); + void (*domain_free)(struct iommu_domain *); + + int (*attach_dev)(struct iommu_domain *domain, struct device *dev); + void (*detach_dev)(struct iommu_domain *domain, struct device *dev); + int (*map)(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot); + size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, + size_t size); + void (*flush_iotlb_all)(struct iommu_domain *domain); + void (*iotlb_range_add)(struct iommu_domain *domain, + unsigned long iova, size_t size); + void (*iotlb_sync)(struct iommu_domain *domain); + phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); + int (*add_device)(struct device *dev); + void (*remove_device)(struct device *dev); + struct iommu_group *(*device_group)(struct device *dev); + int (*domain_get_attr)(struct iommu_domain *domain, + enum iommu_attr attr, void *data); + int (*domain_set_attr)(struct iommu_domain *domain, + enum iommu_attr attr, void *data); + + /* Request/Free a list of reserved regions for a device */ + void (*get_resv_regions)(struct device *dev, struct list_head *list); + void (*put_resv_regions)(struct device *dev, struct list_head *list); + void (*apply_resv_region)(struct device *dev, + struct iommu_domain *domain, + struct iommu_resv_region *region); + + /* Window handling functions */ + int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, + phys_addr_t paddr, u64 size, int prot); + void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr); + /* Set the number of windows per domain */ + int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count); + /* Get the number of windows per domain */ + u32 (*domain_get_windows)(struct iommu_domain *domain); + + int (*of_xlate)(struct device *dev, struct of_phandle_args *args); + bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev); + + unsigned long pgsize_bitmap; +}; + +/** + * struct iommu_device - IOMMU core representation of one IOMMU hardware + * instance + * @list: Used by the iommu-core to keep a list of registered iommus + * @ops: iommu-ops for talking to this iommu + * @dev: struct device for sysfs handling + */ +struct iommu_device { + struct list_head list; + const struct iommu_ops *ops; + struct fwnode_handle *fwnode; + struct device *dev; +}; + +int iommu_device_register(struct iommu_device *iommu); +void iommu_device_unregister(struct iommu_device *iommu); +int iommu_device_sysfs_add(struct iommu_device *iommu, + struct device *parent, + const struct attribute_group **groups, + const char *fmt, ...) __printf(4, 5); +void iommu_device_sysfs_remove(struct iommu_device *iommu); +int iommu_device_link(struct iommu_device *iommu, struct device *link); +void iommu_device_unlink(struct iommu_device *iommu, struct device *link); + +static inline void iommu_device_set_ops(struct iommu_device *iommu, + const struct iommu_ops *ops) +{ + iommu->ops = ops; +} + +static inline void iommu_device_set_fwnode(struct iommu_device *iommu, + struct fwnode_handle *fwnode) +{ + iommu->fwnode = fwnode; +} + +static inline struct iommu_device *dev_to_iommu_device(struct device *dev) +{ + return (struct iommu_device *)dev_get_drvdata(dev); +} + +#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ +#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */ +#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */ +#define IOMMU_GROUP_NOTIFY_BOUND_DRIVER 4 /* Post Driver bind */ +#define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */ +#define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */ + +extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops); +extern bool iommu_present(struct bus_type *bus); +extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap); +extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus); +extern struct iommu_group *iommu_group_get_by_id(int id); +extern void iommu_domain_free(struct iommu_domain *domain); +extern int iommu_attach_device(struct iommu_domain *domain, + struct device *dev); +extern void iommu_detach_device(struct iommu_domain *domain, + struct device *dev); +extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); +extern int iommu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot); +extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, + size_t size); +extern size_t iommu_unmap_fast(struct iommu_domain *domain, + unsigned long iova, size_t size); +extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, + struct scatterlist *sg,unsigned int nents, int prot); +extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); +extern void iommu_set_fault_handler(struct iommu_domain *domain, + iommu_fault_handler_t handler, void *token); + +extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); +extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); +extern int iommu_request_dm_for_dev(struct device *dev); +extern struct iommu_resv_region * +iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, + enum iommu_resv_type type); +extern int iommu_get_group_resv_regions(struct iommu_group *group, + struct list_head *head); + +extern int iommu_attach_group(struct iommu_domain *domain, + struct iommu_group *group); +extern void iommu_detach_group(struct iommu_domain *domain, + struct iommu_group *group); +extern struct iommu_group *iommu_group_alloc(void); +extern void *iommu_group_get_iommudata(struct iommu_group *group); +extern void iommu_group_set_iommudata(struct iommu_group *group, + void *iommu_data, + void (*release)(void *iommu_data)); +extern int iommu_group_set_name(struct iommu_group *group, const char *name); +extern int iommu_group_add_device(struct iommu_group *group, + struct device *dev); +extern void iommu_group_remove_device(struct device *dev); +extern int iommu_group_for_each_dev(struct iommu_group *group, void *data, + int (*fn)(struct device *, void *)); +extern struct iommu_group *iommu_group_get(struct device *dev); +extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group); +extern void iommu_group_put(struct iommu_group *group); +extern int iommu_group_register_notifier(struct iommu_group *group, + struct notifier_block *nb); +extern int iommu_group_unregister_notifier(struct iommu_group *group, + struct notifier_block *nb); +extern int iommu_group_id(struct iommu_group *group); +extern struct iommu_group *iommu_group_get_for_dev(struct device *dev); +extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); + +extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr, + void *data); +extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, + void *data); + +/* Window handling function prototypes */ +extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, + phys_addr_t offset, u64 size, + int prot); +extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr); + +extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, + unsigned long iova, int flags); + +static inline void iommu_flush_tlb_all(struct iommu_domain *domain) +{ + if (domain->ops->flush_iotlb_all) + domain->ops->flush_iotlb_all(domain); +} + +static inline void iommu_tlb_range_add(struct iommu_domain *domain, + unsigned long iova, size_t size) +{ + if (domain->ops->iotlb_range_add) + domain->ops->iotlb_range_add(domain, iova, size); +} + +static inline void iommu_tlb_sync(struct iommu_domain *domain) +{ + if (domain->ops->iotlb_sync) + domain->ops->iotlb_sync(domain); +} + +/* PCI device grouping function */ +extern struct iommu_group *pci_device_group(struct device *dev); +/* Generic device grouping function */ +extern struct iommu_group *generic_device_group(struct device *dev); + +/** + * struct iommu_fwspec - per-device IOMMU instance data + * @ops: ops for this device's IOMMU + * @iommu_fwnode: firmware handle for this device's IOMMU + * @iommu_priv: IOMMU driver private data for this device + * @num_ids: number of associated device IDs + * @ids: IDs which this device may present to the IOMMU + */ +struct iommu_fwspec { + const struct iommu_ops *ops; + struct fwnode_handle *iommu_fwnode; + void *iommu_priv; + unsigned int num_ids; + u32 ids[1]; +}; + +int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, + const struct iommu_ops *ops); +void iommu_fwspec_free(struct device *dev); +int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); +const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode); + +#else /* CONFIG_IOMMU_API */ + +struct iommu_ops {}; +struct iommu_group {}; +struct iommu_fwspec {}; +struct iommu_device {}; + +static inline bool iommu_present(struct bus_type *bus) +{ + return false; +} + +static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap) +{ + return false; +} + +static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) +{ + return NULL; +} + +static inline struct iommu_group *iommu_group_get_by_id(int id) +{ + return NULL; +} + +static inline void iommu_domain_free(struct iommu_domain *domain) +{ +} + +static inline int iommu_attach_device(struct iommu_domain *domain, + struct device *dev) +{ + return -ENODEV; +} + +static inline void iommu_detach_device(struct iommu_domain *domain, + struct device *dev) +{ +} + +static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) +{ + return NULL; +} + +static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot) +{ + return -ENODEV; +} + +static inline size_t iommu_unmap(struct iommu_domain *domain, + unsigned long iova, size_t size) +{ + return 0; +} + +static inline size_t iommu_unmap_fast(struct iommu_domain *domain, + unsigned long iova, int gfp_order) +{ + return 0; +} + +static inline size_t iommu_map_sg(struct iommu_domain *domain, + unsigned long iova, struct scatterlist *sg, + unsigned int nents, int prot) +{ + return 0; +} + +static inline void iommu_flush_tlb_all(struct iommu_domain *domain) +{ +} + +static inline void iommu_tlb_range_add(struct iommu_domain *domain, + unsigned long iova, size_t size) +{ +} + +static inline void iommu_tlb_sync(struct iommu_domain *domain) +{ +} + +static inline int iommu_domain_window_enable(struct iommu_domain *domain, + u32 wnd_nr, phys_addr_t paddr, + u64 size, int prot) +{ + return -ENODEV; +} + +static inline void iommu_domain_window_disable(struct iommu_domain *domain, + u32 wnd_nr) +{ +} + +static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) +{ + return 0; +} + +static inline void iommu_set_fault_handler(struct iommu_domain *domain, + iommu_fault_handler_t handler, void *token) +{ +} + +static inline void iommu_get_resv_regions(struct device *dev, + struct list_head *list) +{ +} + +static inline void iommu_put_resv_regions(struct device *dev, + struct list_head *list) +{ +} + +static inline int iommu_get_group_resv_regions(struct iommu_group *group, + struct list_head *head) +{ + return -ENODEV; +} + +static inline int iommu_request_dm_for_dev(struct device *dev) +{ + return -ENODEV; +} + +static inline int iommu_attach_group(struct iommu_domain *domain, + struct iommu_group *group) +{ + return -ENODEV; +} + +static inline void iommu_detach_group(struct iommu_domain *domain, + struct iommu_group *group) +{ +} + +static inline struct iommu_group *iommu_group_alloc(void) +{ + return ERR_PTR(-ENODEV); +} + +static inline void *iommu_group_get_iommudata(struct iommu_group *group) +{ + return NULL; +} + +static inline void iommu_group_set_iommudata(struct iommu_group *group, + void *iommu_data, + void (*release)(void *iommu_data)) +{ +} + +static inline int iommu_group_set_name(struct iommu_group *group, + const char *name) +{ + return -ENODEV; +} + +static inline int iommu_group_add_device(struct iommu_group *group, + struct device *dev) +{ + return -ENODEV; +} + +static inline void iommu_group_remove_device(struct device *dev) +{ +} + +static inline int iommu_group_for_each_dev(struct iommu_group *group, + void *data, + int (*fn)(struct device *, void *)) +{ + return -ENODEV; +} + +static inline struct iommu_group *iommu_group_get(struct device *dev) +{ + return NULL; +} + +static inline void iommu_group_put(struct iommu_group *group) +{ +} + +static inline int iommu_group_register_notifier(struct iommu_group *group, + struct notifier_block *nb) +{ + return -ENODEV; +} + +static inline int iommu_group_unregister_notifier(struct iommu_group *group, + struct notifier_block *nb) +{ + return 0; +} + +static inline int iommu_group_id(struct iommu_group *group) +{ + return -ENODEV; +} + +static inline int iommu_domain_get_attr(struct iommu_domain *domain, + enum iommu_attr attr, void *data) +{ + return -EINVAL; +} + +static inline int iommu_domain_set_attr(struct iommu_domain *domain, + enum iommu_attr attr, void *data) +{ + return -EINVAL; +} + +static inline int iommu_device_register(struct iommu_device *iommu) +{ + return -ENODEV; +} + +static inline void iommu_device_set_ops(struct iommu_device *iommu, + const struct iommu_ops *ops) +{ +} + +static inline void iommu_device_set_fwnode(struct iommu_device *iommu, + struct fwnode_handle *fwnode) +{ +} + +static inline struct iommu_device *dev_to_iommu_device(struct device *dev) +{ + return NULL; +} + +static inline void iommu_device_unregister(struct iommu_device *iommu) +{ +} + +static inline int iommu_device_sysfs_add(struct iommu_device *iommu, + struct device *parent, + const struct attribute_group **groups, + const char *fmt, ...) +{ + return -ENODEV; +} + +static inline void iommu_device_sysfs_remove(struct iommu_device *iommu) +{ +} + +static inline int iommu_device_link(struct device *dev, struct device *link) +{ + return -EINVAL; +} + +static inline void iommu_device_unlink(struct device *dev, struct device *link) +{ +} + +static inline int iommu_fwspec_init(struct device *dev, + struct fwnode_handle *iommu_fwnode, + const struct iommu_ops *ops) +{ + return -ENODEV; +} + +static inline void iommu_fwspec_free(struct device *dev) +{ +} + +static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids, + int num_ids) +{ + return -ENODEV; +} + +static inline +const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) +{ + return NULL; +} + +#endif /* CONFIG_IOMMU_API */ + +#ifdef CONFIG_IOMMU_DEBUGFS +extern struct dentry *iommu_debugfs_dir; +void iommu_debugfs_setup(void); +#else +static inline void iommu_debugfs_setup(void) {} +#endif + +#endif /* __LINUX_IOMMU_H */ diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h new file mode 100644 index 000000000..b1d861cac --- /dev/null +++ b/include/linux/iopoll.h @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2012-2014 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_IOPOLL_H +#define _LINUX_IOPOLL_H + +#include +#include +#include +#include +#include +#include + +/** + * readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs + * @op: accessor function (takes @addr as its only argument) + * @addr: Address to poll + * @val: Variable to read the value into + * @cond: Break condition (usually involving @val) + * @sleep_us: Maximum time to sleep between reads in us (0 + * tight-loops). Should be less than ~20ms since usleep_range + * is used (see Documentation/timers/timers-howto.txt). + * @timeout_us: Timeout in us, 0 means never timeout + * + * Returns 0 on success and -ETIMEDOUT upon a timeout. In either + * case, the last read value at @addr is stored in @val. Must not + * be called from atomic context if sleep_us or timeout_us are used. + * + * When available, you'll probably want to use one of the specialized + * macros defined below rather than this macro directly. + */ +#define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \ +({ \ + u64 __timeout_us = (timeout_us); \ + unsigned long __sleep_us = (sleep_us); \ + ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ + might_sleep_if((__sleep_us) != 0); \ + for (;;) { \ + (val) = op(addr); \ + if (cond) \ + break; \ + if (__timeout_us && \ + ktime_compare(ktime_get(), __timeout) > 0) { \ + (val) = op(addr); \ + break; \ + } \ + if (__sleep_us) \ + usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ + } \ + (cond) ? 0 : -ETIMEDOUT; \ +}) + +/** + * readx_poll_timeout_atomic - Periodically poll an address until a condition is met or a timeout occurs + * @op: accessor function (takes @addr as its only argument) + * @addr: Address to poll + * @val: Variable to read the value into + * @cond: Break condition (usually involving @val) + * @delay_us: Time to udelay between reads in us (0 tight-loops). Should + * be less than ~10us since udelay is used (see + * Documentation/timers/timers-howto.txt). + * @timeout_us: Timeout in us, 0 means never timeout + * + * Returns 0 on success and -ETIMEDOUT upon a timeout. In either + * case, the last read value at @addr is stored in @val. + * + * When available, you'll probably want to use one of the specialized + * macros defined below rather than this macro directly. + */ +#define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \ +({ \ + u64 __timeout_us = (timeout_us); \ + unsigned long __delay_us = (delay_us); \ + ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ + for (;;) { \ + (val) = op(addr); \ + if (cond) \ + break; \ + if (__timeout_us && \ + ktime_compare(ktime_get(), __timeout) > 0) { \ + (val) = op(addr); \ + break; \ + } \ + if (__delay_us) \ + udelay(__delay_us); \ + } \ + (cond) ? 0 : -ETIMEDOUT; \ +}) + + +#define readb_poll_timeout(addr, val, cond, delay_us, timeout_us) \ + readx_poll_timeout(readb, addr, val, cond, delay_us, timeout_us) + +#define readb_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ + readx_poll_timeout_atomic(readb, addr, val, cond, delay_us, timeout_us) + +#define readw_poll_timeout(addr, val, cond, delay_us, timeout_us) \ + readx_poll_timeout(readw, addr, val, cond, delay_us, timeout_us) + +#define readw_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ + readx_poll_timeout_atomic(readw, addr, val, cond, delay_us, timeout_us) + +#define readl_poll_timeout(addr, val, cond, delay_us, timeout_us) \ + readx_poll_timeout(readl, addr, val, cond, delay_us, timeout_us) + +#define readl_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ + readx_poll_timeout_atomic(readl, addr, val, cond, delay_us, timeout_us) + +#define readq_poll_timeout(addr, val, cond, delay_us, timeout_us) \ + readx_poll_timeout(readq, addr, val, cond, delay_us, timeout_us) + +#define readq_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ + readx_poll_timeout_atomic(readq, addr, val, cond, delay_us, timeout_us) + +#define readb_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \ + readx_poll_timeout(readb_relaxed, addr, val, cond, delay_us, timeout_us) + +#define readb_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ + readx_poll_timeout_atomic(readb_relaxed, addr, val, cond, delay_us, timeout_us) + +#define readw_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \ + readx_poll_timeout(readw_relaxed, addr, val, cond, delay_us, timeout_us) + +#define readw_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ + readx_poll_timeout_atomic(readw_relaxed, addr, val, cond, delay_us, timeout_us) + +#define readl_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \ + readx_poll_timeout(readl_relaxed, addr, val, cond, delay_us, timeout_us) + +#define readl_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ + readx_poll_timeout_atomic(readl_relaxed, addr, val, cond, delay_us, timeout_us) + +#define readq_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \ + readx_poll_timeout(readq_relaxed, addr, val, cond, delay_us, timeout_us) + +#define readq_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ + readx_poll_timeout_atomic(readq_relaxed, addr, val, cond, delay_us, timeout_us) + +#endif /* _LINUX_IOPOLL_H */ diff --git a/include/linux/ioport.h b/include/linux/ioport.h new file mode 100644 index 000000000..da0ebaec2 --- /dev/null +++ b/include/linux/ioport.h @@ -0,0 +1,291 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ioport.h Definitions of routines for detecting, reserving and + * allocating system resources. + * + * Authors: Linus Torvalds + */ + +#ifndef _LINUX_IOPORT_H +#define _LINUX_IOPORT_H + +#ifndef __ASSEMBLY__ +#include +#include +/* + * Resources are tree-like, allowing + * nesting etc.. + */ +struct resource { + resource_size_t start; + resource_size_t end; + const char *name; + unsigned long flags; + unsigned long desc; + struct resource *parent, *sibling, *child; +}; + +/* + * IO resources have these defined flags. + * + * PCI devices expose these flags to userspace in the "resource" sysfs file, + * so don't move them. + */ +#define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */ + +#define IORESOURCE_TYPE_BITS 0x00001f00 /* Resource type */ +#define IORESOURCE_IO 0x00000100 /* PCI/ISA I/O ports */ +#define IORESOURCE_MEM 0x00000200 +#define IORESOURCE_REG 0x00000300 /* Register offsets */ +#define IORESOURCE_IRQ 0x00000400 +#define IORESOURCE_DMA 0x00000800 +#define IORESOURCE_BUS 0x00001000 + +#define IORESOURCE_PREFETCH 0x00002000 /* No side effects */ +#define IORESOURCE_READONLY 0x00004000 +#define IORESOURCE_CACHEABLE 0x00008000 +#define IORESOURCE_RANGELENGTH 0x00010000 +#define IORESOURCE_SHADOWABLE 0x00020000 + +#define IORESOURCE_SIZEALIGN 0x00040000 /* size indicates alignment */ +#define IORESOURCE_STARTALIGN 0x00080000 /* start field is alignment */ + +#define IORESOURCE_MEM_64 0x00100000 +#define IORESOURCE_WINDOW 0x00200000 /* forwarded by bridge */ +#define IORESOURCE_MUXED 0x00400000 /* Resource is software muxed */ + +#define IORESOURCE_EXT_TYPE_BITS 0x01000000 /* Resource extended types */ +#define IORESOURCE_SYSRAM 0x01000000 /* System RAM (modifier) */ + +#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */ + +#define IORESOURCE_DISABLED 0x10000000 +#define IORESOURCE_UNSET 0x20000000 /* No address assigned yet */ +#define IORESOURCE_AUTO 0x40000000 +#define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */ + +/* I/O resource extended types */ +#define IORESOURCE_SYSTEM_RAM (IORESOURCE_MEM|IORESOURCE_SYSRAM) + +/* PnP IRQ specific bits (IORESOURCE_BITS) */ +#define IORESOURCE_IRQ_HIGHEDGE (1<<0) +#define IORESOURCE_IRQ_LOWEDGE (1<<1) +#define IORESOURCE_IRQ_HIGHLEVEL (1<<2) +#define IORESOURCE_IRQ_LOWLEVEL (1<<3) +#define IORESOURCE_IRQ_SHAREABLE (1<<4) +#define IORESOURCE_IRQ_OPTIONAL (1<<5) + +/* PnP DMA specific bits (IORESOURCE_BITS) */ +#define IORESOURCE_DMA_TYPE_MASK (3<<0) +#define IORESOURCE_DMA_8BIT (0<<0) +#define IORESOURCE_DMA_8AND16BIT (1<<0) +#define IORESOURCE_DMA_16BIT (2<<0) + +#define IORESOURCE_DMA_MASTER (1<<2) +#define IORESOURCE_DMA_BYTE (1<<3) +#define IORESOURCE_DMA_WORD (1<<4) + +#define IORESOURCE_DMA_SPEED_MASK (3<<6) +#define IORESOURCE_DMA_COMPATIBLE (0<<6) +#define IORESOURCE_DMA_TYPEA (1<<6) +#define IORESOURCE_DMA_TYPEB (2<<6) +#define IORESOURCE_DMA_TYPEF (3<<6) + +/* PnP memory I/O specific bits (IORESOURCE_BITS) */ +#define IORESOURCE_MEM_WRITEABLE (1<<0) /* dup: IORESOURCE_READONLY */ +#define IORESOURCE_MEM_CACHEABLE (1<<1) /* dup: IORESOURCE_CACHEABLE */ +#define IORESOURCE_MEM_RANGELENGTH (1<<2) /* dup: IORESOURCE_RANGELENGTH */ +#define IORESOURCE_MEM_TYPE_MASK (3<<3) +#define IORESOURCE_MEM_8BIT (0<<3) +#define IORESOURCE_MEM_16BIT (1<<3) +#define IORESOURCE_MEM_8AND16BIT (2<<3) +#define IORESOURCE_MEM_32BIT (3<<3) +#define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */ +#define IORESOURCE_MEM_EXPANSIONROM (1<<6) + +/* PnP I/O specific bits (IORESOURCE_BITS) */ +#define IORESOURCE_IO_16BIT_ADDR (1<<0) +#define IORESOURCE_IO_FIXED (1<<1) +#define IORESOURCE_IO_SPARSE (1<<2) + +/* PCI ROM control bits (IORESOURCE_BITS) */ +#define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */ +#define IORESOURCE_ROM_SHADOW (1<<1) /* Use RAM image, not ROM BAR */ + +/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ +#define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ +#define IORESOURCE_PCI_EA_BEI (1<<5) /* BAR Equivalent Indicator */ + +/* + * I/O Resource Descriptors + * + * Descriptors are used by walk_iomem_res_desc() and region_intersects() + * for searching a specific resource range in the iomem table. Assign + * a new descriptor when a resource range supports the search interfaces. + * Otherwise, resource.desc must be set to IORES_DESC_NONE (0). + */ +enum { + IORES_DESC_NONE = 0, + IORES_DESC_CRASH_KERNEL = 1, + IORES_DESC_ACPI_TABLES = 2, + IORES_DESC_ACPI_NV_STORAGE = 3, + IORES_DESC_PERSISTENT_MEMORY = 4, + IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5, + IORES_DESC_DEVICE_PRIVATE_MEMORY = 6, + IORES_DESC_DEVICE_PUBLIC_MEMORY = 7, +}; + +/* helpers to define resources */ +#define DEFINE_RES_NAMED(_start, _size, _name, _flags) \ + { \ + .start = (_start), \ + .end = (_start) + (_size) - 1, \ + .name = (_name), \ + .flags = (_flags), \ + .desc = IORES_DESC_NONE, \ + } + +#define DEFINE_RES_IO_NAMED(_start, _size, _name) \ + DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_IO) +#define DEFINE_RES_IO(_start, _size) \ + DEFINE_RES_IO_NAMED((_start), (_size), NULL) + +#define DEFINE_RES_MEM_NAMED(_start, _size, _name) \ + DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_MEM) +#define DEFINE_RES_MEM(_start, _size) \ + DEFINE_RES_MEM_NAMED((_start), (_size), NULL) + +#define DEFINE_RES_IRQ_NAMED(_irq, _name) \ + DEFINE_RES_NAMED((_irq), 1, (_name), IORESOURCE_IRQ) +#define DEFINE_RES_IRQ(_irq) \ + DEFINE_RES_IRQ_NAMED((_irq), NULL) + +#define DEFINE_RES_DMA_NAMED(_dma, _name) \ + DEFINE_RES_NAMED((_dma), 1, (_name), IORESOURCE_DMA) +#define DEFINE_RES_DMA(_dma) \ + DEFINE_RES_DMA_NAMED((_dma), NULL) + +/* PC/ISA/whatever - the normal PC address spaces: IO and memory */ +extern struct resource ioport_resource; +extern struct resource iomem_resource; + +extern struct resource *request_resource_conflict(struct resource *root, struct resource *new); +extern int request_resource(struct resource *root, struct resource *new); +extern int release_resource(struct resource *new); +void release_child_resources(struct resource *new); +extern void reserve_region_with_split(struct resource *root, + resource_size_t start, resource_size_t end, + const char *name); +extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new); +extern int insert_resource(struct resource *parent, struct resource *new); +extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new); +extern int remove_resource(struct resource *old); +extern void arch_remove_reservations(struct resource *avail); +extern int allocate_resource(struct resource *root, struct resource *new, + resource_size_t size, resource_size_t min, + resource_size_t max, resource_size_t align, + resource_size_t (*alignf)(void *, + const struct resource *, + resource_size_t, + resource_size_t), + void *alignf_data); +struct resource *lookup_resource(struct resource *root, resource_size_t start); +int adjust_resource(struct resource *res, resource_size_t start, + resource_size_t size); +resource_size_t resource_alignment(struct resource *res); +static inline resource_size_t resource_size(const struct resource *res) +{ + return res->end - res->start + 1; +} +static inline unsigned long resource_type(const struct resource *res) +{ + return res->flags & IORESOURCE_TYPE_BITS; +} +static inline unsigned long resource_ext_type(const struct resource *res) +{ + return res->flags & IORESOURCE_EXT_TYPE_BITS; +} +/* True iff r1 completely contains r2 */ +static inline bool resource_contains(struct resource *r1, struct resource *r2) +{ + if (resource_type(r1) != resource_type(r2)) + return false; + if (r1->flags & IORESOURCE_UNSET || r2->flags & IORESOURCE_UNSET) + return false; + return r1->start <= r2->start && r1->end >= r2->end; +} + + +/* Convenience shorthand with allocation */ +#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0) +#define request_muxed_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), IORESOURCE_MUXED) +#define __request_mem_region(start,n,name, excl) __request_region(&iomem_resource, (start), (n), (name), excl) +#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name), 0) +#define request_mem_region_exclusive(start,n,name) \ + __request_region(&iomem_resource, (start), (n), (name), IORESOURCE_EXCLUSIVE) +#define rename_region(region, newname) do { (region)->name = (newname); } while (0) + +extern struct resource * __request_region(struct resource *, + resource_size_t start, + resource_size_t n, + const char *name, int flags); + +/* Compatibility cruft */ +#define release_region(start,n) __release_region(&ioport_resource, (start), (n)) +#define release_mem_region(start,n) __release_region(&iomem_resource, (start), (n)) + +extern void __release_region(struct resource *, resource_size_t, + resource_size_t); +#ifdef CONFIG_MEMORY_HOTREMOVE +extern int release_mem_region_adjustable(struct resource *, resource_size_t, + resource_size_t); +#endif + +/* Wrappers for managed devices */ +struct device; + +extern int devm_request_resource(struct device *dev, struct resource *root, + struct resource *new); +extern void devm_release_resource(struct device *dev, struct resource *new); + +#define devm_request_region(dev,start,n,name) \ + __devm_request_region(dev, &ioport_resource, (start), (n), (name)) +#define devm_request_mem_region(dev,start,n,name) \ + __devm_request_region(dev, &iomem_resource, (start), (n), (name)) + +extern struct resource * __devm_request_region(struct device *dev, + struct resource *parent, resource_size_t start, + resource_size_t n, const char *name); + +#define devm_release_region(dev, start, n) \ + __devm_release_region(dev, &ioport_resource, (start), (n)) +#define devm_release_mem_region(dev, start, n) \ + __devm_release_region(dev, &iomem_resource, (start), (n)) + +extern void __devm_release_region(struct device *dev, struct resource *parent, + resource_size_t start, resource_size_t n); +extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size); +extern bool iomem_is_exclusive(u64 addr); + +extern int +walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, + void *arg, int (*func)(unsigned long, unsigned long, void *)); +extern int +walk_mem_res(u64 start, u64 end, void *arg, + int (*func)(struct resource *, void *)); +extern int +walk_system_ram_res(u64 start, u64 end, void *arg, + int (*func)(struct resource *, void *)); +extern int +walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end, + void *arg, int (*func)(struct resource *, void *)); + +/* True if any part of r1 overlaps r2 */ +static inline bool resource_overlaps(struct resource *r1, struct resource *r2) +{ + return (r1->start <= r2->end && r1->end >= r2->start); +} + + +#endif /* __ASSEMBLY__ */ +#endif /* _LINUX_IOPORT_H */ diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h new file mode 100644 index 000000000..9e30ed644 --- /dev/null +++ b/include/linux/ioprio.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef IOPRIO_H +#define IOPRIO_H + +#include +#include +#include + +/* + * Gives us 8 prio classes with 13-bits of data for each class + */ +#define IOPRIO_CLASS_SHIFT (13) +#define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1) + +#define IOPRIO_PRIO_CLASS(mask) ((mask) >> IOPRIO_CLASS_SHIFT) +#define IOPRIO_PRIO_DATA(mask) ((mask) & IOPRIO_PRIO_MASK) +#define IOPRIO_PRIO_VALUE(class, data) (((class) << IOPRIO_CLASS_SHIFT) | data) + +#define ioprio_valid(mask) (IOPRIO_PRIO_CLASS((mask)) != IOPRIO_CLASS_NONE) + +/* + * These are the io priority groups as implemented by CFQ. RT is the realtime + * class, it always gets premium service. BE is the best-effort scheduling + * class, the default for any process. IDLE is the idle scheduling class, it + * is only served when no one else is using the disk. + */ +enum { + IOPRIO_CLASS_NONE, + IOPRIO_CLASS_RT, + IOPRIO_CLASS_BE, + IOPRIO_CLASS_IDLE, +}; + +/* + * 8 best effort priority levels are supported + */ +#define IOPRIO_BE_NR (8) + +enum { + IOPRIO_WHO_PROCESS = 1, + IOPRIO_WHO_PGRP, + IOPRIO_WHO_USER, +}; + +/* + * Fallback BE priority + */ +#define IOPRIO_NORM (4) + +/* + * if process has set io priority explicitly, use that. if not, convert + * the cpu scheduler nice value to an io priority + */ +static inline int task_nice_ioprio(struct task_struct *task) +{ + return (task_nice(task) + 20) / 5; +} + +/* + * This is for the case where the task hasn't asked for a specific IO class. + * Check for idle and rt task process, and return appropriate IO class. + */ +static inline int task_nice_ioclass(struct task_struct *task) +{ + if (task->policy == SCHED_IDLE) + return IOPRIO_CLASS_IDLE; + else if (task_is_realtime(task)) + return IOPRIO_CLASS_RT; + else + return IOPRIO_CLASS_BE; +} + +/* + * For inheritance, return the highest of the two given priorities + */ +extern int ioprio_best(unsigned short aprio, unsigned short bprio); + +extern int set_task_ioprio(struct task_struct *task, int ioprio); + +#ifdef CONFIG_BLOCK +extern int ioprio_check_cap(int ioprio); +#else +static inline int ioprio_check_cap(int ioprio) +{ + return -ENOTBLK; +} +#endif /* CONFIG_BLOCK */ + +#endif diff --git a/include/linux/iova.h b/include/linux/iova.h new file mode 100644 index 000000000..84fbe73d2 --- /dev/null +++ b/include/linux/iova.h @@ -0,0 +1,276 @@ +/* + * Copyright (c) 2006, Intel Corporation. + * + * This file is released under the GPLv2. + * + * Copyright (C) 2006-2008 Intel Corporation + * Author: Anil S Keshavamurthy + * + */ + +#ifndef _IOVA_H_ +#define _IOVA_H_ + +#include +#include +#include +#include +#include + +/* iova structure */ +struct iova { + struct rb_node node; + unsigned long pfn_hi; /* Highest allocated pfn */ + unsigned long pfn_lo; /* Lowest allocated pfn */ +}; + +struct iova_magazine; +struct iova_cpu_rcache; + +#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */ +#define MAX_GLOBAL_MAGS 32 /* magazines per bin */ + +struct iova_rcache { + spinlock_t lock; + unsigned long depot_size; + struct iova_magazine *depot[MAX_GLOBAL_MAGS]; + struct iova_cpu_rcache __percpu *cpu_rcaches; +}; + +struct iova_domain; + +/* Call-Back from IOVA code into IOMMU drivers */ +typedef void (* iova_flush_cb)(struct iova_domain *domain); + +/* Destructor for per-entry data */ +typedef void (* iova_entry_dtor)(unsigned long data); + +/* Number of entries per Flush Queue */ +#define IOVA_FQ_SIZE 256 + +/* Timeout (in ms) after which entries are flushed from the Flush-Queue */ +#define IOVA_FQ_TIMEOUT 10 + +/* Flush Queue entry for defered flushing */ +struct iova_fq_entry { + unsigned long iova_pfn; + unsigned long pages; + unsigned long data; + u64 counter; /* Flush counter when this entrie was added */ +}; + +/* Per-CPU Flush Queue structure */ +struct iova_fq { + struct iova_fq_entry entries[IOVA_FQ_SIZE]; + unsigned head, tail; + spinlock_t lock; +}; + +/* holds all the iova translations for a domain */ +struct iova_domain { + spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ + struct rb_root rbroot; /* iova domain rbtree root */ + struct rb_node *cached_node; /* Save last alloced node */ + struct rb_node *cached32_node; /* Save last 32-bit alloced node */ + unsigned long granule; /* pfn granularity for this domain */ + unsigned long start_pfn; /* Lower limit for this domain */ + unsigned long dma_32bit_pfn; + struct iova anchor; /* rbtree lookup anchor */ + struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */ + + iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU + TLBs */ + + iova_entry_dtor entry_dtor; /* IOMMU driver specific destructor for + iova entry */ + + struct iova_fq __percpu *fq; /* Flush Queue */ + + atomic64_t fq_flush_start_cnt; /* Number of TLB flushes that + have been started */ + + atomic64_t fq_flush_finish_cnt; /* Number of TLB flushes that + have been finished */ + + struct timer_list fq_timer; /* Timer to regularily empty the + flush-queues */ + atomic_t fq_timer_on; /* 1 when timer is active, 0 + when not */ +}; + +static inline unsigned long iova_size(struct iova *iova) +{ + return iova->pfn_hi - iova->pfn_lo + 1; +} + +static inline unsigned long iova_shift(struct iova_domain *iovad) +{ + return __ffs(iovad->granule); +} + +static inline unsigned long iova_mask(struct iova_domain *iovad) +{ + return iovad->granule - 1; +} + +static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova) +{ + return iova & iova_mask(iovad); +} + +static inline size_t iova_align(struct iova_domain *iovad, size_t size) +{ + return ALIGN(size, iovad->granule); +} + +static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova) +{ + return (dma_addr_t)iova->pfn_lo << iova_shift(iovad); +} + +static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) +{ + return iova >> iova_shift(iovad); +} + +#if IS_ENABLED(CONFIG_IOMMU_IOVA) +int iova_cache_get(void); +void iova_cache_put(void); + +struct iova *alloc_iova_mem(void); +void free_iova_mem(struct iova *iova); +void free_iova(struct iova_domain *iovad, unsigned long pfn); +void __free_iova(struct iova_domain *iovad, struct iova *iova); +struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size, + unsigned long limit_pfn, + bool size_aligned); +void free_iova_fast(struct iova_domain *iovad, unsigned long pfn, + unsigned long size); +void queue_iova(struct iova_domain *iovad, + unsigned long pfn, unsigned long pages, + unsigned long data); +unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size, + unsigned long limit_pfn, bool flush_rcache); +struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, + unsigned long pfn_hi); +void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); +void init_iova_domain(struct iova_domain *iovad, unsigned long granule, + unsigned long start_pfn); +bool has_iova_flush_queue(struct iova_domain *iovad); +int init_iova_flush_queue(struct iova_domain *iovad, + iova_flush_cb flush_cb, iova_entry_dtor entry_dtor); +struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); +void put_iova_domain(struct iova_domain *iovad); +struct iova *split_and_remove_iova(struct iova_domain *iovad, + struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi); +void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad); +#else +static inline int iova_cache_get(void) +{ + return -ENOTSUPP; +} + +static inline void iova_cache_put(void) +{ +} + +static inline struct iova *alloc_iova_mem(void) +{ + return NULL; +} + +static inline void free_iova_mem(struct iova *iova) +{ +} + +static inline void free_iova(struct iova_domain *iovad, unsigned long pfn) +{ +} + +static inline void __free_iova(struct iova_domain *iovad, struct iova *iova) +{ +} + +static inline struct iova *alloc_iova(struct iova_domain *iovad, + unsigned long size, + unsigned long limit_pfn, + bool size_aligned) +{ + return NULL; +} + +static inline void free_iova_fast(struct iova_domain *iovad, + unsigned long pfn, + unsigned long size) +{ +} + +static inline void queue_iova(struct iova_domain *iovad, + unsigned long pfn, unsigned long pages, + unsigned long data) +{ +} + +static inline unsigned long alloc_iova_fast(struct iova_domain *iovad, + unsigned long size, + unsigned long limit_pfn, + bool flush_rcache) +{ + return 0; +} + +static inline struct iova *reserve_iova(struct iova_domain *iovad, + unsigned long pfn_lo, + unsigned long pfn_hi) +{ + return NULL; +} + +static inline void copy_reserved_iova(struct iova_domain *from, + struct iova_domain *to) +{ +} + +static inline void init_iova_domain(struct iova_domain *iovad, + unsigned long granule, + unsigned long start_pfn) +{ +} + +static inline bool has_iova_flush_queue(struct iova_domain *iovad) +{ + return false; +} + +static inline int init_iova_flush_queue(struct iova_domain *iovad, + iova_flush_cb flush_cb, + iova_entry_dtor entry_dtor) +{ + return -ENODEV; +} + +static inline struct iova *find_iova(struct iova_domain *iovad, + unsigned long pfn) +{ + return NULL; +} + +static inline void put_iova_domain(struct iova_domain *iovad) +{ +} + +static inline struct iova *split_and_remove_iova(struct iova_domain *iovad, + struct iova *iova, + unsigned long pfn_lo, + unsigned long pfn_hi) +{ + return NULL; +} + +static inline void free_cpu_cached_iovas(unsigned int cpu, + struct iova_domain *iovad) +{ +} +#endif + +#endif diff --git a/include/linux/ip.h b/include/linux/ip.h new file mode 100644 index 000000000..492bc6513 --- /dev/null +++ b/include/linux/ip.h @@ -0,0 +1,37 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Definitions for the IP protocol. + * + * Version: @(#)ip.h 1.0.2 04/28/93 + * + * Authors: Fred N. van Kempen, + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_IP_H +#define _LINUX_IP_H + +#include +#include + +static inline struct iphdr *ip_hdr(const struct sk_buff *skb) +{ + return (struct iphdr *)skb_network_header(skb); +} + +static inline struct iphdr *inner_ip_hdr(const struct sk_buff *skb) +{ + return (struct iphdr *)skb_inner_network_header(skb); +} + +static inline struct iphdr *ipip_hdr(const struct sk_buff *skb) +{ + return (struct iphdr *)skb_transport_header(skb); +} +#endif /* _LINUX_IP_H */ diff --git a/include/linux/ipack.h b/include/linux/ipack.h new file mode 100644 index 000000000..8bddc3fbd --- /dev/null +++ b/include/linux/ipack.h @@ -0,0 +1,289 @@ +/* + * Industry-pack bus. + * + * Copyright (C) 2011-2012 CERN (www.cern.ch) + * Author: Samuel Iglesias Gonsalvez + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; version 2 of the License. + */ + +#include +#include +#include + +#define IPACK_IDPROM_OFFSET_I 0x01 +#define IPACK_IDPROM_OFFSET_P 0x03 +#define IPACK_IDPROM_OFFSET_A 0x05 +#define IPACK_IDPROM_OFFSET_C 0x07 +#define IPACK_IDPROM_OFFSET_MANUFACTURER_ID 0x09 +#define IPACK_IDPROM_OFFSET_MODEL 0x0B +#define IPACK_IDPROM_OFFSET_REVISION 0x0D +#define IPACK_IDPROM_OFFSET_RESERVED 0x0F +#define IPACK_IDPROM_OFFSET_DRIVER_ID_L 0x11 +#define IPACK_IDPROM_OFFSET_DRIVER_ID_H 0x13 +#define IPACK_IDPROM_OFFSET_NUM_BYTES 0x15 +#define IPACK_IDPROM_OFFSET_CRC 0x17 + +/* + * IndustryPack Fromat, Vendor and Device IDs. + */ + +/* ID section format versions */ +#define IPACK_ID_VERSION_INVALID 0x00 +#define IPACK_ID_VERSION_1 0x01 +#define IPACK_ID_VERSION_2 0x02 + +/* Vendors and devices. Sort key: vendor first, device next. */ +#define IPACK1_VENDOR_ID_RESERVED1 0x00 +#define IPACK1_VENDOR_ID_RESERVED2 0xFF +#define IPACK1_VENDOR_ID_UNREGISTRED01 0x01 +#define IPACK1_VENDOR_ID_UNREGISTRED02 0x02 +#define IPACK1_VENDOR_ID_UNREGISTRED03 0x03 +#define IPACK1_VENDOR_ID_UNREGISTRED04 0x04 +#define IPACK1_VENDOR_ID_UNREGISTRED05 0x05 +#define IPACK1_VENDOR_ID_UNREGISTRED06 0x06 +#define IPACK1_VENDOR_ID_UNREGISTRED07 0x07 +#define IPACK1_VENDOR_ID_UNREGISTRED08 0x08 +#define IPACK1_VENDOR_ID_UNREGISTRED09 0x09 +#define IPACK1_VENDOR_ID_UNREGISTRED10 0x0A +#define IPACK1_VENDOR_ID_UNREGISTRED11 0x0B +#define IPACK1_VENDOR_ID_UNREGISTRED12 0x0C +#define IPACK1_VENDOR_ID_UNREGISTRED13 0x0D +#define IPACK1_VENDOR_ID_UNREGISTRED14 0x0E +#define IPACK1_VENDOR_ID_UNREGISTRED15 0x0F + +#define IPACK1_VENDOR_ID_SBS 0xF0 +#define IPACK1_DEVICE_ID_SBS_OCTAL_232 0x22 +#define IPACK1_DEVICE_ID_SBS_OCTAL_422 0x2A +#define IPACK1_DEVICE_ID_SBS_OCTAL_485 0x48 + +struct ipack_bus_ops; +struct ipack_driver; + +enum ipack_space { + IPACK_IO_SPACE = 0, + IPACK_ID_SPACE, + IPACK_INT_SPACE, + IPACK_MEM8_SPACE, + IPACK_MEM16_SPACE, + /* Dummy for counting the number of entries. Must remain the last + * entry */ + IPACK_SPACE_COUNT, +}; + +/** + */ +struct ipack_region { + phys_addr_t start; + size_t size; +}; + +/** + * struct ipack_device + * + * @slot: Slot where the device is plugged in the carrier board + * @bus: ipack_bus_device where the device is plugged to. + * @id_space: Virtual address to ID space. + * @io_space: Virtual address to IO space. + * @mem_space: Virtual address to MEM space. + * @dev: device in kernel representation. + * + * Warning: Direct access to mapped memory is possible but the endianness + * is not the same with PCI carrier or VME carrier. The endianness is managed + * by the carrier board throught bus->ops. + */ +struct ipack_device { + unsigned int slot; + struct ipack_bus_device *bus; + struct device dev; + void (*release) (struct ipack_device *dev); + struct ipack_region region[IPACK_SPACE_COUNT]; + u8 *id; + size_t id_avail; + u32 id_vendor; + u32 id_device; + u8 id_format; + unsigned int id_crc_correct:1; + unsigned int speed_8mhz:1; + unsigned int speed_32mhz:1; +}; + +/** + * struct ipack_driver_ops -- Callbacks to IPack device driver + * + * @probe: Probe function + * @remove: Prepare imminent removal of the device. Services provided by the + * device should be revoked. + */ + +struct ipack_driver_ops { + int (*probe) (struct ipack_device *dev); + void (*remove) (struct ipack_device *dev); +}; + +/** + * struct ipack_driver -- Specific data to each ipack device driver + * + * @driver: Device driver kernel representation + * @ops: Callbacks provided by the IPack device driver + */ +struct ipack_driver { + struct device_driver driver; + const struct ipack_device_id *id_table; + const struct ipack_driver_ops *ops; +}; + +/** + * struct ipack_bus_ops - available operations on a bridge module + * + * @map_space: map IP address space + * @unmap_space: unmap IP address space + * @request_irq: request IRQ + * @free_irq: free IRQ + * @get_clockrate: Returns the clockrate the carrier is currently + * communicating with the device at. + * @set_clockrate: Sets the clock-rate for carrier / module communication. + * Should return -EINVAL if the requested speed is not supported. + * @get_error: Returns the error state for the slot the device is attached + * to. + * @get_timeout: Returns 1 if the communication with the device has + * previously timed out. + * @reset_timeout: Resets the state returned by get_timeout. + */ +struct ipack_bus_ops { + int (*request_irq) (struct ipack_device *dev, + irqreturn_t (*handler)(void *), void *arg); + int (*free_irq) (struct ipack_device *dev); + int (*get_clockrate) (struct ipack_device *dev); + int (*set_clockrate) (struct ipack_device *dev, int mherz); + int (*get_error) (struct ipack_device *dev); + int (*get_timeout) (struct ipack_device *dev); + int (*reset_timeout) (struct ipack_device *dev); +}; + +/** + * struct ipack_bus_device + * + * @dev: pointer to carrier device + * @slots: number of slots available + * @bus_nr: ipack bus number + * @ops: bus operations for the mezzanine drivers + */ +struct ipack_bus_device { + struct module *owner; + struct device *parent; + int slots; + int bus_nr; + const struct ipack_bus_ops *ops; +}; + +/** + * ipack_bus_register -- register a new ipack bus + * + * @parent: pointer to the parent device, if any. + * @slots: number of slots available in the bus device. + * @ops: bus operations for the mezzanine drivers. + * + * The carrier board device should call this function to register itself as + * available bus device in ipack. + */ +struct ipack_bus_device *ipack_bus_register(struct device *parent, int slots, + const struct ipack_bus_ops *ops, + struct module *owner); + +/** + * ipack_bus_unregister -- unregister an ipack bus + */ +int ipack_bus_unregister(struct ipack_bus_device *bus); + +/** + * ipack_driver_register -- Register a new ipack device driver + * + * Called by a ipack driver to register itself as a driver + * that can manage ipack devices. + */ +int ipack_driver_register(struct ipack_driver *edrv, struct module *owner, + const char *name); +void ipack_driver_unregister(struct ipack_driver *edrv); + +/** + * ipack_device_init -- initialize an IPack device + * @dev: the new device to initialize. + * + * Initialize a new IPack device ("module" in IndustryPack jargon). The call + * is done by the carrier driver. The carrier should populate the fields + * bus and slot as well as the region array of @dev prior to calling this + * function. The rest of the fields will be allocated and populated + * during initalization. + * + * Return zero on success or error code on failure. + * + * NOTE: _Never_ directly free @dev after calling this function, even + * if it returned an error! Always use ipack_put_device() to give up the + * reference initialized in this function instead. + */ +int ipack_device_init(struct ipack_device *dev); + +/** + * ipack_device_add -- Add an IPack device + * @dev: the new device to add. + * + * Add a new IPack device. The call is done by the carrier driver + * after calling ipack_device_init(). + * + * Return zero on success or error code on failure. + * + * NOTE: _Never_ directly free @dev after calling this function, even + * if it returned an error! Always use ipack_put_device() to give up the + * reference initialized in this function instead. + */ +int ipack_device_add(struct ipack_device *dev); +void ipack_device_del(struct ipack_device *dev); + +void ipack_get_device(struct ipack_device *dev); +void ipack_put_device(struct ipack_device *dev); + +/** + * DEFINE_IPACK_DEVICE_TABLE - macro used to describe a IndustryPack table + * @_table: device table name + * + * This macro is used to create a struct ipack_device_id array (a device table) + * in a generic manner. + */ +#define DEFINE_IPACK_DEVICE_TABLE(_table) \ + const struct ipack_device_id _table[] +/** + * IPACK_DEVICE - macro used to describe a specific IndustryPack device + * @_format: the format version (currently either 1 or 2, 8 bit value) + * @vend: the 8 or 24 bit IndustryPack Vendor ID + * @dev: the 8 or 16 bit IndustryPack Device ID + * + * This macro is used to create a struct ipack_device_id that matches a specific + * device. + */ +#define IPACK_DEVICE(_format, vend, dev) \ + .format = (_format), \ + .vendor = (vend), \ + .device = (dev) + +/** + * ipack_get_carrier - it increase the carrier ref. counter of + * the carrier module + * @dev: mezzanine device which wants to get the carrier + */ +static inline int ipack_get_carrier(struct ipack_device *dev) +{ + return try_module_get(dev->bus->owner); +} + +/** + * ipack_get_carrier - it decrease the carrier ref. counter of + * the carrier module + * @dev: mezzanine device which wants to get the carrier + */ +static inline void ipack_put_carrier(struct ipack_device *dev) +{ + module_put(dev->bus->owner); +} diff --git a/include/linux/ipc.h b/include/linux/ipc.h new file mode 100644 index 000000000..e1c9eea60 --- /dev/null +++ b/include/linux/ipc.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_IPC_H +#define _LINUX_IPC_H + +#include +#include +#include +#include +#include + +/* used by in-kernel data structures */ +struct kern_ipc_perm { + spinlock_t lock; + bool deleted; + int id; + key_t key; + kuid_t uid; + kgid_t gid; + kuid_t cuid; + kgid_t cgid; + umode_t mode; + unsigned long seq; + void *security; + + struct rhash_head khtnode; + + struct rcu_head rcu; + refcount_t refcount; +} ____cacheline_aligned_in_smp __randomize_layout; + +#endif /* _LINUX_IPC_H */ diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h new file mode 100644 index 000000000..36ffbe330 --- /dev/null +++ b/include/linux/ipc_namespace.h @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __IPC_NAMESPACE_H__ +#define __IPC_NAMESPACE_H__ + +#include +#include +#include +#include +#include +#include +#include +#include + +struct user_namespace; + +struct ipc_ids { + int in_use; + unsigned short seq; + struct rw_semaphore rwsem; + struct idr ipcs_idr; + int max_idx; +#ifdef CONFIG_CHECKPOINT_RESTORE + int next_id; +#endif + struct rhashtable key_ht; +}; + +struct ipc_namespace { + refcount_t count; + struct ipc_ids ids[3]; + + int sem_ctls[4]; + int used_sems; + + unsigned int msg_ctlmax; + unsigned int msg_ctlmnb; + unsigned int msg_ctlmni; + atomic_t msg_bytes; + atomic_t msg_hdrs; + + size_t shm_ctlmax; + size_t shm_ctlall; + unsigned long shm_tot; + int shm_ctlmni; + /* + * Defines whether IPC_RMID is forced for _all_ shm segments regardless + * of shmctl() + */ + int shm_rmid_forced; + + struct notifier_block ipcns_nb; + + /* The kern_mount of the mqueuefs sb. We take a ref on it */ + struct vfsmount *mq_mnt; + + /* # queues in this ns, protected by mq_lock */ + unsigned int mq_queues_count; + + /* next fields are set through sysctl */ + unsigned int mq_queues_max; /* initialized to DFLT_QUEUESMAX */ + unsigned int mq_msg_max; /* initialized to DFLT_MSGMAX */ + unsigned int mq_msgsize_max; /* initialized to DFLT_MSGSIZEMAX */ + unsigned int mq_msg_default; + unsigned int mq_msgsize_default; + + /* user_ns which owns the ipc ns */ + struct user_namespace *user_ns; + struct ucounts *ucounts; + + struct ns_common ns; +} __randomize_layout; + +extern struct ipc_namespace init_ipc_ns; +extern spinlock_t mq_lock; + +#ifdef CONFIG_SYSVIPC +extern void shm_destroy_orphaned(struct ipc_namespace *ns); +#else /* CONFIG_SYSVIPC */ +static inline void shm_destroy_orphaned(struct ipc_namespace *ns) {} +#endif /* CONFIG_SYSVIPC */ + +#ifdef CONFIG_POSIX_MQUEUE +extern int mq_init_ns(struct ipc_namespace *ns); +/* + * POSIX Message Queue default values: + * + * MIN_*: Lowest value an admin can set the maximum unprivileged limit to + * DFLT_*MAX: Default values for the maximum unprivileged limits + * DFLT_{MSG,MSGSIZE}: Default values used when the user doesn't supply + * an attribute to the open call and the queue must be created + * HARD_*: Highest value the maximums can be set to. These are enforced + * on CAP_SYS_RESOURCE apps as well making them inviolate (so make them + * suitably high) + * + * POSIX Requirements: + * Per app minimum openable message queues - 8. This does not map well + * to the fact that we limit the number of queues on a per namespace + * basis instead of a per app basis. So, make the default high enough + * that no given app should have a hard time opening 8 queues. + * Minimum maximum for HARD_MSGMAX - 32767. I bumped this to 65536. + * Minimum maximum for HARD_MSGSIZEMAX - POSIX is silent on this. However, + * we have run into a situation where running applications in the wild + * require this to be at least 5MB, and preferably 10MB, so I set the + * value to 16MB in hopes that this user is the worst of the bunch and + * the new maximum will handle anyone else. I may have to revisit this + * in the future. + */ +#define DFLT_QUEUESMAX 256 +#define MIN_MSGMAX 1 +#define DFLT_MSG 10U +#define DFLT_MSGMAX 10 +#define HARD_MSGMAX 65536 +#define MIN_MSGSIZEMAX 128 +#define DFLT_MSGSIZE 8192U +#define DFLT_MSGSIZEMAX 8192 +#define HARD_MSGSIZEMAX (16*1024*1024) +#else +static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; } +#endif + +#if defined(CONFIG_IPC_NS) +extern struct ipc_namespace *copy_ipcs(unsigned long flags, + struct user_namespace *user_ns, struct ipc_namespace *ns); + +static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns) +{ + if (ns) + refcount_inc(&ns->count); + return ns; +} + +static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns) +{ + if (ns) { + if (refcount_inc_not_zero(&ns->count)) + return ns; + } + + return NULL; +} + +extern void put_ipc_ns(struct ipc_namespace *ns); +#else +static inline struct ipc_namespace *copy_ipcs(unsigned long flags, + struct user_namespace *user_ns, struct ipc_namespace *ns) +{ + if (flags & CLONE_NEWIPC) + return ERR_PTR(-EINVAL); + + return ns; +} + +static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns) +{ + return ns; +} + +static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns) +{ + return ns; +} + +static inline void put_ipc_ns(struct ipc_namespace *ns) +{ +} +#endif + +#ifdef CONFIG_POSIX_MQUEUE_SYSCTL + +struct ctl_table_header; +extern struct ctl_table_header *mq_register_sysctl_table(void); + +#else /* CONFIG_POSIX_MQUEUE_SYSCTL */ + +static inline struct ctl_table_header *mq_register_sysctl_table(void) +{ + return NULL; +} + +#endif /* CONFIG_POSIX_MQUEUE_SYSCTL */ +#endif diff --git a/include/linux/ipmi-fru.h b/include/linux/ipmi-fru.h new file mode 100644 index 000000000..05c942262 --- /dev/null +++ b/include/linux/ipmi-fru.h @@ -0,0 +1,134 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2012 CERN (www.cern.ch) + * Author: Alessandro Rubini + * + * This work is part of the White Rabbit project, a research effort led + * by CERN, the European Institute for Nuclear Research. + */ +#ifndef __LINUX_IPMI_FRU_H__ +#define __LINUX_IPMI_FRU_H__ +#ifdef __KERNEL__ +# include +# include +#else +# include +# include +#endif + +/* + * These structures match the unaligned crap we have in FRU1011.pdf + * (http://download.intel.com/design/servers/ipmi/FRU1011.pdf) + */ + +/* chapter 8, page 5 */ +struct fru_common_header { + uint8_t format; /* 0x01 */ + uint8_t internal_use_off; /* multiple of 8 bytes */ + uint8_t chassis_info_off; /* multiple of 8 bytes */ + uint8_t board_area_off; /* multiple of 8 bytes */ + uint8_t product_area_off; /* multiple of 8 bytes */ + uint8_t multirecord_off; /* multiple of 8 bytes */ + uint8_t pad; /* must be 0 */ + uint8_t checksum; /* sum modulo 256 must be 0 */ +}; + +/* chapter 9, page 5 -- internal_use: not used by us */ + +/* chapter 10, page 6 -- chassis info: not used by us */ + +/* chapter 13, page 9 -- used by board_info_area below */ +struct fru_type_length { + uint8_t type_length; + uint8_t data[0]; +}; + +/* chapter 11, page 7 */ +struct fru_board_info_area { + uint8_t format; /* 0x01 */ + uint8_t area_len; /* multiple of 8 bytes */ + uint8_t language; /* I hope it's 0 */ + uint8_t mfg_date[3]; /* LSB, minutes since 1996-01-01 */ + struct fru_type_length tl[0]; /* type-length stuff follows */ + + /* + * the TL there are in order: + * Board Manufacturer + * Board Product Name + * Board Serial Number + * Board Part Number + * FRU File ID (may be null) + * more manufacturer-specific stuff + * 0xc1 as a terminator + * 0x00 pad to a multiple of 8 bytes - 1 + * checksum (sum of all stuff module 256 must be zero) + */ +}; + +enum fru_type { + FRU_TYPE_BINARY = 0x00, + FRU_TYPE_BCDPLUS = 0x40, + FRU_TYPE_ASCII6 = 0x80, + FRU_TYPE_ASCII = 0xc0, /* not ascii: depends on language */ +}; + +/* + * some helpers + */ +static inline struct fru_board_info_area *fru_get_board_area( + const struct fru_common_header *header) +{ + /* we know for sure that the header is 8 bytes in size */ + return (struct fru_board_info_area *)(header + header->board_area_off); +} + +static inline int fru_type(struct fru_type_length *tl) +{ + return tl->type_length & 0xc0; +} + +static inline int fru_length(struct fru_type_length *tl) +{ + return (tl->type_length & 0x3f) + 1; /* len of whole record */ +} + +/* assume ascii-latin1 encoding */ +static inline int fru_strlen(struct fru_type_length *tl) +{ + return fru_length(tl) - 1; +} + +static inline char *fru_strcpy(char *dest, struct fru_type_length *tl) +{ + int len = fru_strlen(tl); + memcpy(dest, tl->data, len); + dest[len] = '\0'; + return dest; +} + +static inline struct fru_type_length *fru_next_tl(struct fru_type_length *tl) +{ + return tl + fru_length(tl); +} + +static inline int fru_is_eof(struct fru_type_length *tl) +{ + return tl->type_length == 0xc1; +} + +/* + * External functions defined in fru-parse.c. + */ +extern int fru_header_cksum_ok(struct fru_common_header *header); +extern int fru_bia_cksum_ok(struct fru_board_info_area *bia); + +/* All these 4 return allocated strings by calling fru_alloc() */ +extern char *fru_get_board_manufacturer(struct fru_common_header *header); +extern char *fru_get_product_name(struct fru_common_header *header); +extern char *fru_get_serial_number(struct fru_common_header *header); +extern char *fru_get_part_number(struct fru_common_header *header); + +/* This must be defined by the caller of the above functions */ +extern void *fru_alloc(size_t size); + +#endif /* __LINUX_IMPI_FRU_H__ */ diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h new file mode 100644 index 000000000..41f5c086f --- /dev/null +++ b/include/linux/ipmi.h @@ -0,0 +1,336 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * ipmi.h + * + * MontaVista IPMI interface + * + * Author: MontaVista Software, Inc. + * Corey Minyard + * source@mvista.com + * + * Copyright 2002 MontaVista Software Inc. + * + */ +#ifndef __LINUX_IPMI_H +#define __LINUX_IPMI_H + +#include + +#include +#include +#include /* For acpi_handle */ + +struct module; +struct device; + +/* + * Opaque type for a IPMI message user. One of these is needed to + * send and receive messages. + */ +typedef struct ipmi_user *ipmi_user_t; + +/* + * Stuff coming from the receive interface comes as one of these. + * They are allocated, the receiver must free them with + * ipmi_free_recv_msg() when done with the message. The link is not + * used after the message is delivered, so the upper layer may use the + * link to build a linked list, if it likes. + */ +struct ipmi_recv_msg { + struct list_head link; + + /* + * The type of message as defined in the "Receive Types" + * defines above. + */ + int recv_type; + + struct ipmi_user *user; + struct ipmi_addr addr; + long msgid; + struct kernel_ipmi_msg msg; + + /* + * The user_msg_data is the data supplied when a message was + * sent, if this is a response to a sent message. If this is + * not a response to a sent message, then user_msg_data will + * be NULL. If the user above is NULL, then this will be the + * intf. + */ + void *user_msg_data; + + /* + * Call this when done with the message. It will presumably free + * the message and do any other necessary cleanup. + */ + void (*done)(struct ipmi_recv_msg *msg); + + /* + * Place-holder for the data, don't make any assumptions about + * the size or existence of this, since it may change. + */ + unsigned char msg_data[IPMI_MAX_MSG_LENGTH]; +}; + +/* Allocate and free the receive message. */ +void ipmi_free_recv_msg(struct ipmi_recv_msg *msg); + +struct ipmi_user_hndl { + /* + * Routine type to call when a message needs to be routed to + * the upper layer. This will be called with some locks held, + * the only IPMI routines that can be called are ipmi_request + * and the alloc/free operations. The handler_data is the + * variable supplied when the receive handler was registered. + */ + void (*ipmi_recv_hndl)(struct ipmi_recv_msg *msg, + void *user_msg_data); + + /* + * Called when the interface detects a watchdog pre-timeout. If + * this is NULL, it will be ignored for the user. + */ + void (*ipmi_watchdog_pretimeout)(void *handler_data); + + /* + * If not NULL, called at panic time after the interface has + * been set up to handle run to completion. + */ + void (*ipmi_panic_handler)(void *handler_data); + + /* + * Called when the interface has been removed. After this returns + * the user handle will be invalid. The interface may or may + * not be usable when this is called, but it will return errors + * if it is not usable. + */ + void (*shutdown)(void *handler_data); +}; + +/* Create a new user of the IPMI layer on the given interface number. */ +int ipmi_create_user(unsigned int if_num, + const struct ipmi_user_hndl *handler, + void *handler_data, + struct ipmi_user **user); + +/* + * Destroy the given user of the IPMI layer. Note that after this + * function returns, the system is guaranteed to not call any + * callbacks for the user. Thus as long as you destroy all the users + * before you unload a module, you will be safe. And if you destroy + * the users before you destroy the callback structures, it should be + * safe, too. + */ +int ipmi_destroy_user(struct ipmi_user *user); + +/* Get the IPMI version of the BMC we are talking to. */ +int ipmi_get_version(struct ipmi_user *user, + unsigned char *major, + unsigned char *minor); + +/* + * Set and get the slave address and LUN that we will use for our + * source messages. Note that this affects the interface, not just + * this user, so it will affect all users of this interface. This is + * so some initialization code can come in and do the OEM-specific + * things it takes to determine your address (if not the BMC) and set + * it for everyone else. Note that each channel can have its own + * address. + */ +int ipmi_set_my_address(struct ipmi_user *user, + unsigned int channel, + unsigned char address); +int ipmi_get_my_address(struct ipmi_user *user, + unsigned int channel, + unsigned char *address); +int ipmi_set_my_LUN(struct ipmi_user *user, + unsigned int channel, + unsigned char LUN); +int ipmi_get_my_LUN(struct ipmi_user *user, + unsigned int channel, + unsigned char *LUN); + +/* + * Like ipmi_request, but lets you specify the number of retries and + * the retry time. The retries is the number of times the message + * will be resent if no reply is received. If set to -1, the default + * value will be used. The retry time is the time in milliseconds + * between retries. If set to zero, the default value will be + * used. + * + * Don't use this unless you *really* have to. It's primarily for the + * IPMI over LAN converter; since the LAN stuff does its own retries, + * it makes no sense to do it here. However, this can be used if you + * have unusual requirements. + */ +int ipmi_request_settime(struct ipmi_user *user, + struct ipmi_addr *addr, + long msgid, + struct kernel_ipmi_msg *msg, + void *user_msg_data, + int priority, + int max_retries, + unsigned int retry_time_ms); + +/* + * Like ipmi_request, but with messages supplied. This will not + * allocate any memory, and the messages may be statically allocated + * (just make sure to do the "done" handling on them). Note that this + * is primarily for the watchdog timer, since it should be able to + * send messages even if no memory is available. This is subject to + * change as the system changes, so don't use it unless you REALLY + * have to. + */ +int ipmi_request_supply_msgs(struct ipmi_user *user, + struct ipmi_addr *addr, + long msgid, + struct kernel_ipmi_msg *msg, + void *user_msg_data, + void *supplied_smi, + struct ipmi_recv_msg *supplied_recv, + int priority); + +/* + * Poll the IPMI interface for the user. This causes the IPMI code to + * do an immediate check for information from the driver and handle + * anything that is immediately pending. This will not block in any + * way. This is useful if you need to spin waiting for something to + * happen in the IPMI driver. + */ +void ipmi_poll_interface(struct ipmi_user *user); + +/* + * When commands come in to the SMS, the user can register to receive + * them. Only one user can be listening on a specific netfn/cmd/chan tuple + * at a time, you will get an EBUSY error if the command is already + * registered. If a command is received that does not have a user + * registered, the driver will automatically return the proper + * error. Channels are specified as a bitfield, use IPMI_CHAN_ALL to + * mean all channels. + */ +int ipmi_register_for_cmd(struct ipmi_user *user, + unsigned char netfn, + unsigned char cmd, + unsigned int chans); +int ipmi_unregister_for_cmd(struct ipmi_user *user, + unsigned char netfn, + unsigned char cmd, + unsigned int chans); + +/* + * Go into a mode where the driver will not autonomously attempt to do + * things with the interface. It will still respond to attentions and + * interrupts, and it will expect that commands will complete. It + * will not automatcially check for flags, events, or things of that + * nature. + * + * This is primarily used for firmware upgrades. The idea is that + * when you go into firmware upgrade mode, you do this operation + * and the driver will not attempt to do anything but what you tell + * it or what the BMC asks for. + * + * Note that if you send a command that resets the BMC, the driver + * will still expect a response from that command. So the BMC should + * reset itself *after* the response is sent. Resetting before the + * response is just silly. + * + * If in auto maintenance mode, the driver will automatically go into + * maintenance mode for 30 seconds if it sees a cold reset, a warm + * reset, or a firmware NetFN. This means that code that uses only + * firmware NetFN commands to do upgrades will work automatically + * without change, assuming it sends a message every 30 seconds or + * less. + * + * See the IPMI_MAINTENANCE_MODE_xxx defines for what the mode means. + */ +int ipmi_get_maintenance_mode(struct ipmi_user *user); +int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode); + +/* + * When the user is created, it will not receive IPMI events by + * default. The user must set this to TRUE to get incoming events. + * The first user that sets this to TRUE will receive all events that + * have been queued while no one was waiting for events. + */ +int ipmi_set_gets_events(struct ipmi_user *user, bool val); + +/* + * Called when a new SMI is registered. This will also be called on + * every existing interface when a new watcher is registered with + * ipmi_smi_watcher_register(). + */ +struct ipmi_smi_watcher { + struct list_head link; + + /* + * You must set the owner to the current module, if you are in + * a module (generally just set it to "THIS_MODULE"). + */ + struct module *owner; + + /* + * These two are called with read locks held for the interface + * the watcher list. So you can add and remove users from the + * IPMI interface, send messages, etc., but you cannot add + * or remove SMI watchers or SMI interfaces. + */ + void (*new_smi)(int if_num, struct device *dev); + void (*smi_gone)(int if_num); +}; + +int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher); +int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher); + +/* + * The following are various helper functions for dealing with IPMI + * addresses. + */ + +/* Return the maximum length of an IPMI address given it's type. */ +unsigned int ipmi_addr_length(int addr_type); + +/* Validate that the given IPMI address is valid. */ +int ipmi_validate_addr(struct ipmi_addr *addr, int len); + +/* + * How did the IPMI driver find out about the device? + */ +enum ipmi_addr_src { + SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS, + SI_PCI, SI_DEVICETREE, SI_PLATFORM, SI_LAST +}; +const char *ipmi_addr_src_to_str(enum ipmi_addr_src src); + +union ipmi_smi_info_union { +#ifdef CONFIG_ACPI + /* + * the acpi_info element is defined for the SI_ACPI + * address type + */ + struct { + acpi_handle acpi_handle; + } acpi_info; +#endif +}; + +struct ipmi_smi_info { + enum ipmi_addr_src addr_src; + + /* + * Base device for the interface. Don't forget to put this when + * you are done. + */ + struct device *dev; + + /* + * The addr_info provides more detailed info for some IPMI + * devices, depending on the addr_src. Currently only SI_ACPI + * info is provided. + */ + union ipmi_smi_info_union addr_info; +}; + +/* This is to get the private info of struct ipmi_smi */ +extern int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data); + +#endif /* __LINUX_IPMI_H */ diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h new file mode 100644 index 000000000..1995ce146 --- /dev/null +++ b/include/linux/ipmi_smi.h @@ -0,0 +1,247 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * ipmi_smi.h + * + * MontaVista IPMI system management interface + * + * Author: MontaVista Software, Inc. + * Corey Minyard + * source@mvista.com + * + * Copyright 2002 MontaVista Software Inc. + * + */ + +#ifndef __LINUX_IPMI_SMI_H +#define __LINUX_IPMI_SMI_H + +#include +#include +#include +#include + +struct device; + +/* + * This files describes the interface for IPMI system management interface + * drivers to bind into the IPMI message handler. + */ + +/* Structure for the low-level drivers. */ +typedef struct ipmi_smi *ipmi_smi_t; + +/* + * Messages to/from the lower layer. The smi interface will take one + * of these to send. After the send has occurred and a response has + * been received, it will report this same data structure back up to + * the upper layer. If an error occurs, it should fill in the + * response with an error code in the completion code location. When + * asynchronous data is received, one of these is allocated, the + * data_size is set to zero and the response holds the data from the + * get message or get event command that the interface initiated. + * Note that it is the interfaces responsibility to detect + * asynchronous data and messages and request them from the + * interface. + */ +struct ipmi_smi_msg { + struct list_head link; + + long msgid; + void *user_data; + + int data_size; + unsigned char data[IPMI_MAX_MSG_LENGTH]; + + int rsp_size; + unsigned char rsp[IPMI_MAX_MSG_LENGTH]; + + /* Will be called when the system is done with the message + (presumably to free it). */ + void (*done)(struct ipmi_smi_msg *msg); +}; + +struct ipmi_smi_handlers { + struct module *owner; + + /* + * The low-level interface cannot start sending messages to + * the upper layer until this function is called. This may + * not be NULL, the lower layer must take the interface from + * this call. + */ + int (*start_processing)(void *send_info, + struct ipmi_smi *new_intf); + + /* + * When called, the low-level interface should disable all + * processing, it should be complete shut down when it returns. + */ + void (*shutdown)(void *send_info); + + /* + * Get the detailed private info of the low level interface and store + * it into the structure of ipmi_smi_data. For example: the + * ACPI device handle will be returned for the pnp_acpi IPMI device. + */ + int (*get_smi_info)(void *send_info, struct ipmi_smi_info *data); + + /* + * Called to enqueue an SMI message to be sent. This + * operation is not allowed to fail. If an error occurs, it + * should report back the error in a received message. It may + * do this in the current call context, since no write locks + * are held when this is run. Message are delivered one at + * a time by the message handler, a new message will not be + * delivered until the previous message is returned. + */ + void (*sender)(void *send_info, + struct ipmi_smi_msg *msg); + + /* + * Called by the upper layer to request that we try to get + * events from the BMC we are attached to. + */ + void (*request_events)(void *send_info); + + /* + * Called by the upper layer when some user requires that the + * interface watch for events, received messages, watchdog + * pretimeouts, or not. Used by the SMI to know if it should + * watch for these. This may be NULL if the SMI does not + * implement it. + */ + void (*set_need_watch)(void *send_info, bool enable); + + /* + * Called when flushing all pending messages. + */ + void (*flush_messages)(void *send_info); + + /* + * Called when the interface should go into "run to + * completion" mode. If this call sets the value to true, the + * interface should make sure that all messages are flushed + * out and that none are pending, and any new requests are run + * to completion immediately. + */ + void (*set_run_to_completion)(void *send_info, bool run_to_completion); + + /* + * Called to poll for work to do. This is so upper layers can + * poll for operations during things like crash dumps. + */ + void (*poll)(void *send_info); + + /* + * Enable/disable firmware maintenance mode. Note that this + * is *not* the modes defined, this is simply an on/off + * setting. The message handler does the mode handling. Note + * that this is called from interrupt context, so it cannot + * block. + */ + void (*set_maintenance_mode)(void *send_info, bool enable); +}; + +struct ipmi_device_id { + unsigned char device_id; + unsigned char device_revision; + unsigned char firmware_revision_1; + unsigned char firmware_revision_2; + unsigned char ipmi_version; + unsigned char additional_device_support; + unsigned int manufacturer_id; + unsigned int product_id; + unsigned char aux_firmware_revision[4]; + unsigned int aux_firmware_revision_set : 1; +}; + +#define ipmi_version_major(v) ((v)->ipmi_version & 0xf) +#define ipmi_version_minor(v) ((v)->ipmi_version >> 4) + +/* + * Take a pointer to an IPMI response and extract device id information from + * it. @netfn is in the IPMI_NETFN_ format, so may need to be shifted from + * a SI response. + */ +static inline int ipmi_demangle_device_id(uint8_t netfn, uint8_t cmd, + const unsigned char *data, + unsigned int data_len, + struct ipmi_device_id *id) +{ + if (data_len < 7) + return -EINVAL; + if (netfn != IPMI_NETFN_APP_RESPONSE || cmd != IPMI_GET_DEVICE_ID_CMD) + /* Strange, didn't get the response we expected. */ + return -EINVAL; + if (data[0] != 0) + /* That's odd, it shouldn't be able to fail. */ + return -EINVAL; + + data++; + data_len--; + + id->device_id = data[0]; + id->device_revision = data[1]; + id->firmware_revision_1 = data[2]; + id->firmware_revision_2 = data[3]; + id->ipmi_version = data[4]; + id->additional_device_support = data[5]; + if (data_len >= 11) { + id->manufacturer_id = (data[6] | (data[7] << 8) | + (data[8] << 16)); + id->product_id = data[9] | (data[10] << 8); + } else { + id->manufacturer_id = 0; + id->product_id = 0; + } + if (data_len >= 15) { + memcpy(id->aux_firmware_revision, data+11, 4); + id->aux_firmware_revision_set = 1; + } else + id->aux_firmware_revision_set = 0; + + return 0; +} + +/* + * Add a low-level interface to the IPMI driver. Note that if the + * interface doesn't know its slave address, it should pass in zero. + * The low-level interface should not deliver any messages to the + * upper layer until the start_processing() function in the handlers + * is called, and the lower layer must get the interface from that + * call. + */ +int ipmi_add_smi(struct module *owner, + const struct ipmi_smi_handlers *handlers, + void *send_info, + struct device *dev, + unsigned char slave_addr); + +#define ipmi_register_smi(handlers, send_info, dev, slave_addr) \ + ipmi_add_smi(THIS_MODULE, handlers, send_info, dev, slave_addr) + +/* + * Remove a low-level interface from the IPMI driver. This will + * return an error if the interface is still in use by a user. + */ +void ipmi_unregister_smi(struct ipmi_smi *intf); + +/* + * The lower layer reports received messages through this interface. + * The data_size should be zero if this is an asynchronous message. If + * the lower layer gets an error sending a message, it should format + * an error response in the message response. + */ +void ipmi_smi_msg_received(struct ipmi_smi *intf, + struct ipmi_smi_msg *msg); + +/* The lower layer received a watchdog pre-timeout on interface. */ +void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf); + +struct ipmi_smi_msg *ipmi_alloc_smi_msg(void); +static inline void ipmi_free_smi_msg(struct ipmi_smi_msg *msg) +{ + msg->done(msg); +} + +#endif /* __LINUX_IPMI_SMI_H */ diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h new file mode 100644 index 000000000..0ebd180e7 --- /dev/null +++ b/include/linux/ipv6.h @@ -0,0 +1,396 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _IPV6_H +#define _IPV6_H + +#include +#include + +#define ipv6_optlen(p) (((p)->hdrlen+1) << 3) +#define ipv6_authlen(p) (((p)->hdrlen+2) << 2) +/* + * This structure contains configuration options per IPv6 link. + */ +struct ipv6_devconf { + __s32 forwarding; + __s32 hop_limit; + __s32 mtu6; + __s32 accept_ra; + __s32 accept_redirects; + __s32 autoconf; + __s32 dad_transmits; + __s32 rtr_solicits; + __s32 rtr_solicit_interval; + __s32 rtr_solicit_max_interval; + __s32 rtr_solicit_delay; + __s32 force_mld_version; + __s32 mldv1_unsolicited_report_interval; + __s32 mldv2_unsolicited_report_interval; + __s32 use_tempaddr; + __s32 temp_valid_lft; + __s32 temp_prefered_lft; + __s32 regen_max_retry; + __s32 max_desync_factor; + __s32 max_addresses; + __s32 accept_ra_defrtr; + __s32 accept_ra_min_hop_limit; + __s32 accept_ra_pinfo; + __s32 ignore_routes_with_linkdown; +#ifdef CONFIG_IPV6_ROUTER_PREF + __s32 accept_ra_rtr_pref; + __s32 rtr_probe_interval; +#ifdef CONFIG_IPV6_ROUTE_INFO + __s32 accept_ra_rt_info_min_plen; + __s32 accept_ra_rt_info_max_plen; +#endif +#endif + __s32 proxy_ndp; + __s32 accept_source_route; + __s32 accept_ra_from_local; +#ifdef CONFIG_IPV6_OPTIMISTIC_DAD + __s32 optimistic_dad; + __s32 use_optimistic; +#endif +#ifdef CONFIG_IPV6_MROUTE + __s32 mc_forwarding; +#endif + __s32 disable_ipv6; + __s32 drop_unicast_in_l2_multicast; + __s32 accept_dad; + __s32 force_tllao; + __s32 ndisc_notify; + __s32 suppress_frag_ndisc; + __s32 accept_ra_mtu; + __s32 drop_unsolicited_na; + struct ipv6_stable_secret { + bool initialized; + struct in6_addr secret; + } stable_secret; + __s32 use_oif_addrs_only; + __s32 keep_addr_on_down; + __s32 seg6_enabled; +#ifdef CONFIG_IPV6_SEG6_HMAC + __s32 seg6_require_hmac; +#endif + __u32 enhanced_dad; + __u32 addr_gen_mode; + __s32 disable_policy; + __s32 ndisc_tclass; + + struct ctl_table_header *sysctl_header; +}; + +struct ipv6_params { + __s32 disable_ipv6; + __s32 autoconf; +}; +extern struct ipv6_params ipv6_defaults; +#include +#include + +#include + +static inline struct ipv6hdr *ipv6_hdr(const struct sk_buff *skb) +{ + return (struct ipv6hdr *)skb_network_header(skb); +} + +static inline struct ipv6hdr *inner_ipv6_hdr(const struct sk_buff *skb) +{ + return (struct ipv6hdr *)skb_inner_network_header(skb); +} + +static inline struct ipv6hdr *ipipv6_hdr(const struct sk_buff *skb) +{ + return (struct ipv6hdr *)skb_transport_header(skb); +} + +/* + This structure contains results of exthdrs parsing + as offsets from skb->nh. + */ + +struct inet6_skb_parm { + int iif; + __be16 ra; + __u16 dst0; + __u16 srcrt; + __u16 dst1; + __u16 lastopt; + __u16 nhoff; + __u16 flags; +#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) + __u16 dsthao; +#endif + __u16 frag_max_size; + +#define IP6SKB_XFRM_TRANSFORMED 1 +#define IP6SKB_FORWARDED 2 +#define IP6SKB_REROUTED 4 +#define IP6SKB_ROUTERALERT 8 +#define IP6SKB_FRAGMENTED 16 +#define IP6SKB_HOPBYHOP 32 +#define IP6SKB_L3SLAVE 64 +#define IP6SKB_JUMBOGRAM 128 +}; + +#if defined(CONFIG_NET_L3_MASTER_DEV) +static inline bool ipv6_l3mdev_skb(__u16 flags) +{ + return flags & IP6SKB_L3SLAVE; +} +#else +static inline bool ipv6_l3mdev_skb(__u16 flags) +{ + return false; +} +#endif + +#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb)) +#define IP6CBMTU(skb) ((struct ip6_mtuinfo *)((skb)->cb)) + +static inline int inet6_iif(const struct sk_buff *skb) +{ + bool l3_slave = ipv6_l3mdev_skb(IP6CB(skb)->flags); + + return l3_slave ? skb->skb_iif : IP6CB(skb)->iif; +} + +static inline bool inet6_is_jumbogram(const struct sk_buff *skb) +{ + return !!(IP6CB(skb)->flags & IP6SKB_JUMBOGRAM); +} + +/* can not be used in TCP layer after tcp_v6_fill_cb */ +static inline int inet6_sdif(const struct sk_buff *skb) +{ +#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) + if (skb && ipv6_l3mdev_skb(IP6CB(skb)->flags)) + return IP6CB(skb)->iif; +#endif + return 0; +} + +/* can not be used in TCP layer after tcp_v6_fill_cb */ +static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb) +{ +#if defined(CONFIG_NET_L3_MASTER_DEV) + if (!net->ipv4.sysctl_tcp_l3mdev_accept && + skb && ipv6_l3mdev_skb(IP6CB(skb)->flags)) + return true; +#endif + return false; +} + +struct tcp6_request_sock { + struct tcp_request_sock tcp6rsk_tcp; +}; + +struct ipv6_mc_socklist; +struct ipv6_ac_socklist; +struct ipv6_fl_socklist; + +struct inet6_cork { + struct ipv6_txoptions *opt; + u8 hop_limit; + u8 tclass; +}; + +/** + * struct ipv6_pinfo - ipv6 private area + * + * In the struct sock hierarchy (tcp6_sock, upd6_sock, etc) + * this _must_ be the last member, so that inet6_sk_generic + * is able to calculate its offset from the base struct sock + * by using the struct proto->slab_obj_size member. -acme + */ +struct ipv6_pinfo { + struct in6_addr saddr; + struct in6_pktinfo sticky_pktinfo; + const struct in6_addr *daddr_cache; +#ifdef CONFIG_IPV6_SUBTREES + const struct in6_addr *saddr_cache; +#endif + + __be32 flow_label; + __u32 frag_size; + + /* + * Packed in 16bits. + * Omit one shift by by putting the signed field at MSB. + */ +#if defined(__BIG_ENDIAN_BITFIELD) + __s16 hop_limit:9; + __u16 __unused_1:7; +#else + __u16 __unused_1:7; + __s16 hop_limit:9; +#endif + +#if defined(__BIG_ENDIAN_BITFIELD) + /* Packed in 16bits. */ + __s16 mcast_hops:9; + __u16 __unused_2:6, + mc_loop:1; +#else + __u16 mc_loop:1, + __unused_2:6; + __s16 mcast_hops:9; +#endif + int ucast_oif; + int mcast_oif; + + /* pktoption flags */ + union { + struct { + __u16 srcrt:1, + osrcrt:1, + rxinfo:1, + rxoinfo:1, + rxhlim:1, + rxohlim:1, + hopopts:1, + ohopopts:1, + dstopts:1, + odstopts:1, + rxflow:1, + rxtclass:1, + rxpmtu:1, + rxorigdstaddr:1, + recvfragsize:1; + /* 1 bits hole */ + } bits; + __u16 all; + } rxopt; + + /* sockopt flags */ + __u16 recverr:1, + sndflow:1, + repflow:1, + pmtudisc:3, + padding:1, /* 1 bit hole */ + srcprefs:3, /* 001: prefer temporary address + * 010: prefer public address + * 100: prefer care-of address + */ + dontfrag:1, + autoflowlabel:1, + autoflowlabel_set:1; + __u8 min_hopcount; + __u8 tclass; + __be32 rcv_flowinfo; + + __u32 dst_cookie; + __u32 rx_dst_cookie; + + struct ipv6_mc_socklist __rcu *ipv6_mc_list; + struct ipv6_ac_socklist *ipv6_ac_list; + struct ipv6_fl_socklist __rcu *ipv6_fl_list; + + struct ipv6_txoptions __rcu *opt; + struct sk_buff *pktoptions; + struct sk_buff *rxpmtu; + struct inet6_cork cork; +}; + +/* WARNING: don't change the layout of the members in {raw,udp,tcp}6_sock! */ +struct raw6_sock { + /* inet_sock has to be the first member of raw6_sock */ + struct inet_sock inet; + __u32 checksum; /* perform checksum */ + __u32 offset; /* checksum offset */ + struct icmp6_filter filter; + __u32 ip6mr_table; + /* ipv6_pinfo has to be the last member of raw6_sock, see inet6_sk_generic */ + struct ipv6_pinfo inet6; +}; + +struct udp6_sock { + struct udp_sock udp; + /* ipv6_pinfo has to be the last member of udp6_sock, see inet6_sk_generic */ + struct ipv6_pinfo inet6; +}; + +struct tcp6_sock { + struct tcp_sock tcp; + /* ipv6_pinfo has to be the last member of tcp6_sock, see inet6_sk_generic */ + struct ipv6_pinfo inet6; +}; + +extern int inet6_sk_rebuild_header(struct sock *sk); + +struct tcp6_timewait_sock { + struct tcp_timewait_sock tcp6tw_tcp; +}; + +#if IS_ENABLED(CONFIG_IPV6) +bool ipv6_mod_enabled(void); + +static inline struct ipv6_pinfo *inet6_sk(const struct sock *__sk) +{ + return sk_fullsock(__sk) ? inet_sk(__sk)->pinet6 : NULL; +} + +static inline struct raw6_sock *raw6_sk(const struct sock *sk) +{ + return (struct raw6_sock *)sk; +} + +static inline void inet_sk_copy_descendant(struct sock *sk_to, + const struct sock *sk_from) +{ + int ancestor_size = sizeof(struct inet_sock); + + if (sk_from->sk_family == PF_INET6) + ancestor_size += sizeof(struct ipv6_pinfo); + + __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size); +} + +#define __ipv6_only_sock(sk) (sk->sk_ipv6only) +#define ipv6_only_sock(sk) (__ipv6_only_sock(sk)) +#define ipv6_sk_rxinfo(sk) ((sk)->sk_family == PF_INET6 && \ + inet6_sk(sk)->rxopt.bits.rxinfo) + +static inline const struct in6_addr *inet6_rcv_saddr(const struct sock *sk) +{ + if (sk->sk_family == AF_INET6) + return &sk->sk_v6_rcv_saddr; + return NULL; +} + +static inline int inet_v6_ipv6only(const struct sock *sk) +{ + /* ipv6only field is at same position for timewait and other sockets */ + return ipv6_only_sock(sk); +} +#else +#define __ipv6_only_sock(sk) 0 +#define ipv6_only_sock(sk) 0 +#define ipv6_sk_rxinfo(sk) 0 + +static inline bool ipv6_mod_enabled(void) +{ + return false; +} + +static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk) +{ + return NULL; +} + +static inline struct inet6_request_sock * + inet6_rsk(const struct request_sock *rsk) +{ + return NULL; +} + +static inline struct raw6_sock *raw6_sk(const struct sock *sk) +{ + return NULL; +} + +#define inet6_rcv_saddr(__sk) NULL +#define tcp_twsk_ipv6only(__sk) 0 +#define inet_v6_ipv6only(__sk) 0 +#endif /* IS_ENABLED(CONFIG_IPV6) */ +#endif /* _IPV6_H */ diff --git a/include/linux/ipv6_route.h b/include/linux/ipv6_route.h new file mode 100644 index 000000000..25b5f1f5e --- /dev/null +++ b/include/linux/ipv6_route.h @@ -0,0 +1,19 @@ +/* + * Linux INET6 implementation + * + * Authors: + * Pedro Roque + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_IPV6_ROUTE_H +#define _LINUX_IPV6_ROUTE_H + +#include + +#define IPV6_EXTRACT_PREF(flag) (((flag) & RTF_PREF_MASK) >> 27) +#define IPV6_DECODE_PREF(pref) ((pref) ^ 2) /* 1:low,2:med,3:high */ +#endif diff --git a/include/linux/irq.h b/include/linux/irq.h new file mode 100644 index 000000000..950426741 --- /dev/null +++ b/include/linux/irq.h @@ -0,0 +1,1227 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_IRQ_H +#define _LINUX_IRQ_H + +/* + * Please do not include this file in generic code. There is currently + * no requirement for any architecture to implement anything held + * within this file. + * + * Thanks. --rmk + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +struct seq_file; +struct module; +struct msi_msg; +enum irqchip_irq_state; + +/* + * IRQ line status. + * + * Bits 0-7 are the same as the IRQF_* bits in linux/interrupt.h + * + * IRQ_TYPE_NONE - default, unspecified type + * IRQ_TYPE_EDGE_RISING - rising edge triggered + * IRQ_TYPE_EDGE_FALLING - falling edge triggered + * IRQ_TYPE_EDGE_BOTH - rising and falling edge triggered + * IRQ_TYPE_LEVEL_HIGH - high level triggered + * IRQ_TYPE_LEVEL_LOW - low level triggered + * IRQ_TYPE_LEVEL_MASK - Mask to filter out the level bits + * IRQ_TYPE_SENSE_MASK - Mask for all the above bits + * IRQ_TYPE_DEFAULT - For use by some PICs to ask irq_set_type + * to setup the HW to a sane default (used + * by irqdomain map() callbacks to synchronize + * the HW state and SW flags for a newly + * allocated descriptor). + * + * IRQ_TYPE_PROBE - Special flag for probing in progress + * + * Bits which can be modified via irq_set/clear/modify_status_flags() + * IRQ_LEVEL - Interrupt is level type. Will be also + * updated in the code when the above trigger + * bits are modified via irq_set_irq_type() + * IRQ_PER_CPU - Mark an interrupt PER_CPU. Will protect + * it from affinity setting + * IRQ_NOPROBE - Interrupt cannot be probed by autoprobing + * IRQ_NOREQUEST - Interrupt cannot be requested via + * request_irq() + * IRQ_NOTHREAD - Interrupt cannot be threaded + * IRQ_NOAUTOEN - Interrupt is not automatically enabled in + * request/setup_irq() + * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) + * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context + * IRQ_NESTED_THREAD - Interrupt nests into another thread + * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable + * IRQ_IS_POLLED - Always polled by another interrupt. Exclude + * it from the spurious interrupt detection + * mechanism and from core side polling. + * IRQ_DISABLE_UNLAZY - Disable lazy irq disable + */ +enum { + IRQ_TYPE_NONE = 0x00000000, + IRQ_TYPE_EDGE_RISING = 0x00000001, + IRQ_TYPE_EDGE_FALLING = 0x00000002, + IRQ_TYPE_EDGE_BOTH = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING), + IRQ_TYPE_LEVEL_HIGH = 0x00000004, + IRQ_TYPE_LEVEL_LOW = 0x00000008, + IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH), + IRQ_TYPE_SENSE_MASK = 0x0000000f, + IRQ_TYPE_DEFAULT = IRQ_TYPE_SENSE_MASK, + + IRQ_TYPE_PROBE = 0x00000010, + + IRQ_LEVEL = (1 << 8), + IRQ_PER_CPU = (1 << 9), + IRQ_NOPROBE = (1 << 10), + IRQ_NOREQUEST = (1 << 11), + IRQ_NOAUTOEN = (1 << 12), + IRQ_NO_BALANCING = (1 << 13), + IRQ_MOVE_PCNTXT = (1 << 14), + IRQ_NESTED_THREAD = (1 << 15), + IRQ_NOTHREAD = (1 << 16), + IRQ_PER_CPU_DEVID = (1 << 17), + IRQ_IS_POLLED = (1 << 18), + IRQ_DISABLE_UNLAZY = (1 << 19), +}; + +#define IRQF_MODIFY_MASK \ + (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ + IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ + IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ + IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY) + +#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) + +/* + * Return value for chip->irq_set_affinity() + * + * IRQ_SET_MASK_OK - OK, core updates irq_common_data.affinity + * IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity + * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to + * support stacked irqchips, which indicates skipping + * all descendent irqchips. + */ +enum { + IRQ_SET_MASK_OK = 0, + IRQ_SET_MASK_OK_NOCOPY, + IRQ_SET_MASK_OK_DONE, +}; + +struct msi_desc; +struct irq_domain; + +/** + * struct irq_common_data - per irq data shared by all irqchips + * @state_use_accessors: status information for irq chip functions. + * Use accessor functions to deal with it + * @node: node index useful for balancing + * @handler_data: per-IRQ data for the irq_chip methods + * @affinity: IRQ affinity on SMP. If this is an IPI + * related irq, then this is the mask of the + * CPUs to which an IPI can be sent. + * @effective_affinity: The effective IRQ affinity on SMP as some irq + * chips do not allow multi CPU destinations. + * A subset of @affinity. + * @msi_desc: MSI descriptor + * @ipi_offset: Offset of first IPI target cpu in @affinity. Optional. + */ +struct irq_common_data { + unsigned int __private state_use_accessors; +#ifdef CONFIG_NUMA + unsigned int node; +#endif + void *handler_data; + struct msi_desc *msi_desc; + cpumask_var_t affinity; +#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK + cpumask_var_t effective_affinity; +#endif +#ifdef CONFIG_GENERIC_IRQ_IPI + unsigned int ipi_offset; +#endif +}; + +/** + * struct irq_data - per irq chip data passed down to chip functions + * @mask: precomputed bitmask for accessing the chip registers + * @irq: interrupt number + * @hwirq: hardware interrupt number, local to the interrupt domain + * @common: point to data shared by all irqchips + * @chip: low level interrupt hardware access + * @domain: Interrupt translation domain; responsible for mapping + * between hwirq number and linux irq number. + * @parent_data: pointer to parent struct irq_data to support hierarchy + * irq_domain + * @chip_data: platform-specific per-chip private data for the chip + * methods, to allow shared chip implementations + */ +struct irq_data { + u32 mask; + unsigned int irq; + unsigned long hwirq; + struct irq_common_data *common; + struct irq_chip *chip; + struct irq_domain *domain; +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY + struct irq_data *parent_data; +#endif + void *chip_data; +}; + +/* + * Bit masks for irq_common_data.state_use_accessors + * + * IRQD_TRIGGER_MASK - Mask for the trigger type bits + * IRQD_SETAFFINITY_PENDING - Affinity setting is pending + * IRQD_ACTIVATED - Interrupt has already been activated + * IRQD_NO_BALANCING - Balancing disabled for this IRQ + * IRQD_PER_CPU - Interrupt is per cpu + * IRQD_AFFINITY_SET - Interrupt affinity was set + * IRQD_LEVEL - Interrupt is level triggered + * IRQD_WAKEUP_STATE - Interrupt is configured for wakeup + * from suspend + * IRDQ_MOVE_PCNTXT - Interrupt can be moved in process + * context + * IRQD_IRQ_DISABLED - Disabled state of the interrupt + * IRQD_IRQ_MASKED - Masked state of the interrupt + * IRQD_IRQ_INPROGRESS - In progress state of the interrupt + * IRQD_WAKEUP_ARMED - Wakeup mode armed + * IRQD_FORWARDED_TO_VCPU - The interrupt is forwarded to a VCPU + * IRQD_AFFINITY_MANAGED - Affinity is auto-managed by the kernel + * IRQD_IRQ_STARTED - Startup state of the interrupt + * IRQD_MANAGED_SHUTDOWN - Interrupt was shutdown due to empty affinity + * mask. Applies only to affinity managed irqs. + * IRQD_SINGLE_TARGET - IRQ allows only a single affinity target + * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set + * IRQD_CAN_RESERVE - Can use reservation mode + * IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change + * required + * IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call + * irq_chip::irq_set_affinity() when deactivated. + */ +enum { + IRQD_TRIGGER_MASK = 0xf, + IRQD_SETAFFINITY_PENDING = (1 << 8), + IRQD_ACTIVATED = (1 << 9), + IRQD_NO_BALANCING = (1 << 10), + IRQD_PER_CPU = (1 << 11), + IRQD_AFFINITY_SET = (1 << 12), + IRQD_LEVEL = (1 << 13), + IRQD_WAKEUP_STATE = (1 << 14), + IRQD_MOVE_PCNTXT = (1 << 15), + IRQD_IRQ_DISABLED = (1 << 16), + IRQD_IRQ_MASKED = (1 << 17), + IRQD_IRQ_INPROGRESS = (1 << 18), + IRQD_WAKEUP_ARMED = (1 << 19), + IRQD_FORWARDED_TO_VCPU = (1 << 20), + IRQD_AFFINITY_MANAGED = (1 << 21), + IRQD_IRQ_STARTED = (1 << 22), + IRQD_MANAGED_SHUTDOWN = (1 << 23), + IRQD_SINGLE_TARGET = (1 << 24), + IRQD_DEFAULT_TRIGGER_SET = (1 << 25), + IRQD_CAN_RESERVE = (1 << 26), + IRQD_MSI_NOMASK_QUIRK = (1 << 27), + IRQD_AFFINITY_ON_ACTIVATE = (1 << 29), +}; + +#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) + +static inline bool irqd_is_setaffinity_pending(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_SETAFFINITY_PENDING; +} + +static inline bool irqd_is_per_cpu(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_PER_CPU; +} + +static inline bool irqd_can_balance(struct irq_data *d) +{ + return !(__irqd_to_state(d) & (IRQD_PER_CPU | IRQD_NO_BALANCING)); +} + +static inline bool irqd_affinity_was_set(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_AFFINITY_SET; +} + +static inline void irqd_mark_affinity_was_set(struct irq_data *d) +{ + __irqd_to_state(d) |= IRQD_AFFINITY_SET; +} + +static inline bool irqd_trigger_type_was_set(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_DEFAULT_TRIGGER_SET; +} + +static inline u32 irqd_get_trigger_type(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_TRIGGER_MASK; +} + +/* + * Must only be called inside irq_chip.irq_set_type() functions or + * from the DT/ACPI setup code. + */ +static inline void irqd_set_trigger_type(struct irq_data *d, u32 type) +{ + __irqd_to_state(d) &= ~IRQD_TRIGGER_MASK; + __irqd_to_state(d) |= type & IRQD_TRIGGER_MASK; + __irqd_to_state(d) |= IRQD_DEFAULT_TRIGGER_SET; +} + +static inline bool irqd_is_level_type(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_LEVEL; +} + +/* + * Must only be called of irqchip.irq_set_affinity() or low level + * hieararchy domain allocation functions. + */ +static inline void irqd_set_single_target(struct irq_data *d) +{ + __irqd_to_state(d) |= IRQD_SINGLE_TARGET; +} + +static inline bool irqd_is_single_target(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_SINGLE_TARGET; +} + +static inline bool irqd_is_wakeup_set(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_WAKEUP_STATE; +} + +static inline bool irqd_can_move_in_process_context(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_MOVE_PCNTXT; +} + +static inline bool irqd_irq_disabled(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_IRQ_DISABLED; +} + +static inline bool irqd_irq_masked(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_IRQ_MASKED; +} + +static inline bool irqd_irq_inprogress(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_IRQ_INPROGRESS; +} + +static inline bool irqd_is_wakeup_armed(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_WAKEUP_ARMED; +} + +static inline bool irqd_is_forwarded_to_vcpu(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_FORWARDED_TO_VCPU; +} + +static inline void irqd_set_forwarded_to_vcpu(struct irq_data *d) +{ + __irqd_to_state(d) |= IRQD_FORWARDED_TO_VCPU; +} + +static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d) +{ + __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU; +} + +static inline bool irqd_affinity_is_managed(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED; +} + +static inline bool irqd_is_activated(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_ACTIVATED; +} + +static inline void irqd_set_activated(struct irq_data *d) +{ + __irqd_to_state(d) |= IRQD_ACTIVATED; +} + +static inline void irqd_clr_activated(struct irq_data *d) +{ + __irqd_to_state(d) &= ~IRQD_ACTIVATED; +} + +static inline bool irqd_is_started(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_IRQ_STARTED; +} + +static inline bool irqd_is_managed_and_shutdown(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN; +} + +static inline void irqd_set_can_reserve(struct irq_data *d) +{ + __irqd_to_state(d) |= IRQD_CAN_RESERVE; +} + +static inline void irqd_clr_can_reserve(struct irq_data *d) +{ + __irqd_to_state(d) &= ~IRQD_CAN_RESERVE; +} + +static inline bool irqd_can_reserve(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_CAN_RESERVE; +} + +static inline void irqd_set_msi_nomask_quirk(struct irq_data *d) +{ + __irqd_to_state(d) |= IRQD_MSI_NOMASK_QUIRK; +} + +static inline void irqd_clr_msi_nomask_quirk(struct irq_data *d) +{ + __irqd_to_state(d) &= ~IRQD_MSI_NOMASK_QUIRK; +} + +static inline bool irqd_msi_nomask_quirk(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK; +} + +static inline void irqd_set_affinity_on_activate(struct irq_data *d) +{ + __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE; +} + +static inline bool irqd_affinity_on_activate(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE; +} + +#undef __irqd_to_state + +static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) +{ + return d->hwirq; +} + +/** + * struct irq_chip - hardware interrupt chip descriptor + * + * @parent_device: pointer to parent device for irqchip + * @name: name for /proc/interrupts + * @irq_startup: start up the interrupt (defaults to ->enable if NULL) + * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) + * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL) + * @irq_disable: disable the interrupt + * @irq_ack: start of a new interrupt + * @irq_mask: mask an interrupt source + * @irq_mask_ack: ack and mask an interrupt source + * @irq_unmask: unmask an interrupt source + * @irq_eoi: end of interrupt + * @irq_set_affinity: Set the CPU affinity on SMP machines. If the force + * argument is true, it tells the driver to + * unconditionally apply the affinity setting. Sanity + * checks against the supplied affinity mask are not + * required. This is used for CPU hotplug where the + * target CPU is not yet set in the cpu_online_mask. + * @irq_retrigger: resend an IRQ to the CPU + * @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ + * @irq_set_wake: enable/disable power-management wake-on of an IRQ + * @irq_bus_lock: function to lock access to slow bus (i2c) chips + * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips + * @irq_cpu_online: configure an interrupt source for a secondary CPU + * @irq_cpu_offline: un-configure an interrupt source for a secondary CPU + * @irq_suspend: function called from core code on suspend once per + * chip, when one or more interrupts are installed + * @irq_resume: function called from core code on resume once per chip, + * when one ore more interrupts are installed + * @irq_pm_shutdown: function called from core code on shutdown once per chip + * @irq_calc_mask: Optional function to set irq_data.mask for special cases + * @irq_print_chip: optional to print special chip info in show_interrupts + * @irq_request_resources: optional to request resources before calling + * any other callback related to this irq + * @irq_release_resources: optional to release resources acquired with + * irq_request_resources + * @irq_compose_msi_msg: optional to compose message content for MSI + * @irq_write_msi_msg: optional to write message content for MSI + * @irq_get_irqchip_state: return the internal state of an interrupt + * @irq_set_irqchip_state: set the internal state of a interrupt + * @irq_set_vcpu_affinity: optional to target a vCPU in a virtual machine + * @ipi_send_single: send a single IPI to destination cpus + * @ipi_send_mask: send an IPI to destination cpus in cpumask + * @flags: chip specific flags + */ +struct irq_chip { + struct device *parent_device; + const char *name; + unsigned int (*irq_startup)(struct irq_data *data); + void (*irq_shutdown)(struct irq_data *data); + void (*irq_enable)(struct irq_data *data); + void (*irq_disable)(struct irq_data *data); + + void (*irq_ack)(struct irq_data *data); + void (*irq_mask)(struct irq_data *data); + void (*irq_mask_ack)(struct irq_data *data); + void (*irq_unmask)(struct irq_data *data); + void (*irq_eoi)(struct irq_data *data); + + int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force); + int (*irq_retrigger)(struct irq_data *data); + int (*irq_set_type)(struct irq_data *data, unsigned int flow_type); + int (*irq_set_wake)(struct irq_data *data, unsigned int on); + + void (*irq_bus_lock)(struct irq_data *data); + void (*irq_bus_sync_unlock)(struct irq_data *data); + + void (*irq_cpu_online)(struct irq_data *data); + void (*irq_cpu_offline)(struct irq_data *data); + + void (*irq_suspend)(struct irq_data *data); + void (*irq_resume)(struct irq_data *data); + void (*irq_pm_shutdown)(struct irq_data *data); + + void (*irq_calc_mask)(struct irq_data *data); + + void (*irq_print_chip)(struct irq_data *data, struct seq_file *p); + int (*irq_request_resources)(struct irq_data *data); + void (*irq_release_resources)(struct irq_data *data); + + void (*irq_compose_msi_msg)(struct irq_data *data, struct msi_msg *msg); + void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg); + + int (*irq_get_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool *state); + int (*irq_set_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool state); + + int (*irq_set_vcpu_affinity)(struct irq_data *data, void *vcpu_info); + + void (*ipi_send_single)(struct irq_data *data, unsigned int cpu); + void (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest); + + unsigned long flags; +}; + +/* + * irq_chip specific flags + * + * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type() + * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled + * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path + * IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks + * when irq enabled + * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip + * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask + * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode + * IRQCHIP_SUPPORTS_LEVEL_MSI Chip can provide two doorbells for Level MSIs + * IRQCHIP_AFFINITY_PRE_STARTUP: Default affinity update before startup + */ +enum { + IRQCHIP_SET_TYPE_MASKED = (1 << 0), + IRQCHIP_EOI_IF_HANDLED = (1 << 1), + IRQCHIP_MASK_ON_SUSPEND = (1 << 2), + IRQCHIP_ONOFFLINE_ENABLED = (1 << 3), + IRQCHIP_SKIP_SET_WAKE = (1 << 4), + IRQCHIP_ONESHOT_SAFE = (1 << 5), + IRQCHIP_EOI_THREADED = (1 << 6), + IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7), + IRQCHIP_AFFINITY_PRE_STARTUP = (1 << 10), +}; + +#include + +/* + * Pick up the arch-dependent methods: + */ +#include + +#ifndef NR_IRQS_LEGACY +# define NR_IRQS_LEGACY 0 +#endif + +#ifndef ARCH_IRQ_INIT_FLAGS +# define ARCH_IRQ_INIT_FLAGS 0 +#endif + +#define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS + +struct irqaction; +extern int setup_irq(unsigned int irq, struct irqaction *new); +extern void remove_irq(unsigned int irq, struct irqaction *act); +extern int setup_percpu_irq(unsigned int irq, struct irqaction *new); +extern void remove_percpu_irq(unsigned int irq, struct irqaction *act); + +extern void irq_cpu_online(void); +extern void irq_cpu_offline(void); +extern int irq_set_affinity_locked(struct irq_data *data, + const struct cpumask *cpumask, bool force); +extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info); + +#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_IRQ_MIGRATION) +extern void irq_migrate_all_off_this_cpu(void); +extern int irq_affinity_online_cpu(unsigned int cpu); +#else +# define irq_affinity_online_cpu NULL +#endif + +#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) +void __irq_move_irq(struct irq_data *data); +static inline void irq_move_irq(struct irq_data *data) +{ + if (unlikely(irqd_is_setaffinity_pending(data))) + __irq_move_irq(data); +} +void irq_move_masked_irq(struct irq_data *data); +void irq_force_complete_move(struct irq_desc *desc); +#else +static inline void irq_move_irq(struct irq_data *data) { } +static inline void irq_move_masked_irq(struct irq_data *data) { } +static inline void irq_force_complete_move(struct irq_desc *desc) { } +#endif + +extern int no_irq_affinity; + +#ifdef CONFIG_HARDIRQS_SW_RESEND +int irq_set_parent(int irq, int parent_irq); +#else +static inline int irq_set_parent(int irq, int parent_irq) +{ + return 0; +} +#endif + +/* + * Built-in IRQ handlers for various IRQ types, + * callable via desc->handle_irq() + */ +extern void handle_level_irq(struct irq_desc *desc); +extern void handle_fasteoi_irq(struct irq_desc *desc); +extern void handle_edge_irq(struct irq_desc *desc); +extern void handle_edge_eoi_irq(struct irq_desc *desc); +extern void handle_simple_irq(struct irq_desc *desc); +extern void handle_untracked_irq(struct irq_desc *desc); +extern void handle_percpu_irq(struct irq_desc *desc); +extern void handle_percpu_devid_irq(struct irq_desc *desc); +extern void handle_bad_irq(struct irq_desc *desc); +extern void handle_nested_irq(unsigned int irq); + +extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); +extern int irq_chip_pm_get(struct irq_data *data); +extern int irq_chip_pm_put(struct irq_data *data); +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY +extern void handle_fasteoi_ack_irq(struct irq_desc *desc); +extern void handle_fasteoi_mask_irq(struct irq_desc *desc); +extern void irq_chip_enable_parent(struct irq_data *data); +extern void irq_chip_disable_parent(struct irq_data *data); +extern void irq_chip_ack_parent(struct irq_data *data); +extern int irq_chip_retrigger_hierarchy(struct irq_data *data); +extern void irq_chip_mask_parent(struct irq_data *data); +extern void irq_chip_unmask_parent(struct irq_data *data); +extern void irq_chip_eoi_parent(struct irq_data *data); +extern int irq_chip_set_affinity_parent(struct irq_data *data, + const struct cpumask *dest, + bool force); +extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on); +extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, + void *vcpu_info); +extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type); +#endif + +/* Handling of unhandled and spurious interrupts: */ +extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret); + + +/* Enable/disable irq debugging output: */ +extern int noirqdebug_setup(char *str); + +/* Checks whether the interrupt can be requested by request_irq(): */ +extern int can_request_irq(unsigned int irq, unsigned long irqflags); + +/* Dummy irq-chip implementations: */ +extern struct irq_chip no_irq_chip; +extern struct irq_chip dummy_irq_chip; + +extern void +irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, + irq_flow_handler_t handle, const char *name); + +static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip, + irq_flow_handler_t handle) +{ + irq_set_chip_and_handler_name(irq, chip, handle, NULL); +} + +extern int irq_set_percpu_devid(unsigned int irq); +extern int irq_set_percpu_devid_partition(unsigned int irq, + const struct cpumask *affinity); +extern int irq_get_percpu_devid_partition(unsigned int irq, + struct cpumask *affinity); + +extern void +__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, + const char *name); + +static inline void +irq_set_handler(unsigned int irq, irq_flow_handler_t handle) +{ + __irq_set_handler(irq, handle, 0, NULL); +} + +/* + * Set a highlevel chained flow handler for a given IRQ. + * (a chained handler is automatically enabled and set to + * IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD) + */ +static inline void +irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle) +{ + __irq_set_handler(irq, handle, 1, NULL); +} + +/* + * Set a highlevel chained flow handler and its data for a given IRQ. + * (a chained handler is automatically enabled and set to + * IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD) + */ +void +irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, + void *data); + +void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); + +static inline void irq_set_status_flags(unsigned int irq, unsigned long set) +{ + irq_modify_status(irq, 0, set); +} + +static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr) +{ + irq_modify_status(irq, clr, 0); +} + +static inline void irq_set_noprobe(unsigned int irq) +{ + irq_modify_status(irq, 0, IRQ_NOPROBE); +} + +static inline void irq_set_probe(unsigned int irq) +{ + irq_modify_status(irq, IRQ_NOPROBE, 0); +} + +static inline void irq_set_nothread(unsigned int irq) +{ + irq_modify_status(irq, 0, IRQ_NOTHREAD); +} + +static inline void irq_set_thread(unsigned int irq) +{ + irq_modify_status(irq, IRQ_NOTHREAD, 0); +} + +static inline void irq_set_nested_thread(unsigned int irq, bool nest) +{ + if (nest) + irq_set_status_flags(irq, IRQ_NESTED_THREAD); + else + irq_clear_status_flags(irq, IRQ_NESTED_THREAD); +} + +static inline void irq_set_percpu_devid_flags(unsigned int irq) +{ + irq_set_status_flags(irq, + IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD | + IRQ_NOPROBE | IRQ_PER_CPU_DEVID); +} + +/* Set/get chip/data for an IRQ: */ +extern int irq_set_chip(unsigned int irq, struct irq_chip *chip); +extern int irq_set_handler_data(unsigned int irq, void *data); +extern int irq_set_chip_data(unsigned int irq, void *data); +extern int irq_set_irq_type(unsigned int irq, unsigned int type); +extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry); +extern int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, + struct msi_desc *entry); +extern struct irq_data *irq_get_irq_data(unsigned int irq); + +static inline struct irq_chip *irq_get_chip(unsigned int irq) +{ + struct irq_data *d = irq_get_irq_data(irq); + return d ? d->chip : NULL; +} + +static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d) +{ + return d->chip; +} + +static inline void *irq_get_chip_data(unsigned int irq) +{ + struct irq_data *d = irq_get_irq_data(irq); + return d ? d->chip_data : NULL; +} + +static inline void *irq_data_get_irq_chip_data(struct irq_data *d) +{ + return d->chip_data; +} + +static inline void *irq_get_handler_data(unsigned int irq) +{ + struct irq_data *d = irq_get_irq_data(irq); + return d ? d->common->handler_data : NULL; +} + +static inline void *irq_data_get_irq_handler_data(struct irq_data *d) +{ + return d->common->handler_data; +} + +static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) +{ + struct irq_data *d = irq_get_irq_data(irq); + return d ? d->common->msi_desc : NULL; +} + +static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d) +{ + return d->common->msi_desc; +} + +static inline u32 irq_get_trigger_type(unsigned int irq) +{ + struct irq_data *d = irq_get_irq_data(irq); + return d ? irqd_get_trigger_type(d) : 0; +} + +static inline int irq_common_data_get_node(struct irq_common_data *d) +{ +#ifdef CONFIG_NUMA + return d->node; +#else + return 0; +#endif +} + +static inline int irq_data_get_node(struct irq_data *d) +{ + return irq_common_data_get_node(d->common); +} + +static inline struct cpumask *irq_get_affinity_mask(int irq) +{ + struct irq_data *d = irq_get_irq_data(irq); + + return d ? d->common->affinity : NULL; +} + +static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d) +{ + return d->common->affinity; +} + +#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK +static inline +struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) +{ + return d->common->effective_affinity; +} +static inline void irq_data_update_effective_affinity(struct irq_data *d, + const struct cpumask *m) +{ + cpumask_copy(d->common->effective_affinity, m); +} +#else +static inline void irq_data_update_effective_affinity(struct irq_data *d, + const struct cpumask *m) +{ +} +static inline +struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) +{ + return d->common->affinity; +} +#endif + +unsigned int arch_dynirq_lower_bound(unsigned int from); + +int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, + struct module *owner, const struct cpumask *affinity); + +int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from, + unsigned int cnt, int node, struct module *owner, + const struct cpumask *affinity); + +/* use macros to avoid needing export.h for THIS_MODULE */ +#define irq_alloc_descs(irq, from, cnt, node) \ + __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL) + +#define irq_alloc_desc(node) \ + irq_alloc_descs(-1, 0, 1, node) + +#define irq_alloc_desc_at(at, node) \ + irq_alloc_descs(at, at, 1, node) + +#define irq_alloc_desc_from(from, node) \ + irq_alloc_descs(-1, from, 1, node) + +#define irq_alloc_descs_from(from, cnt, node) \ + irq_alloc_descs(-1, from, cnt, node) + +#define devm_irq_alloc_descs(dev, irq, from, cnt, node) \ + __devm_irq_alloc_descs(dev, irq, from, cnt, node, THIS_MODULE, NULL) + +#define devm_irq_alloc_desc(dev, node) \ + devm_irq_alloc_descs(dev, -1, 0, 1, node) + +#define devm_irq_alloc_desc_at(dev, at, node) \ + devm_irq_alloc_descs(dev, at, at, 1, node) + +#define devm_irq_alloc_desc_from(dev, from, node) \ + devm_irq_alloc_descs(dev, -1, from, 1, node) + +#define devm_irq_alloc_descs_from(dev, from, cnt, node) \ + devm_irq_alloc_descs(dev, -1, from, cnt, node) + +void irq_free_descs(unsigned int irq, unsigned int cnt); +static inline void irq_free_desc(unsigned int irq) +{ + irq_free_descs(irq, 1); +} + +#ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ +unsigned int irq_alloc_hwirqs(int cnt, int node); +static inline unsigned int irq_alloc_hwirq(int node) +{ + return irq_alloc_hwirqs(1, node); +} +void irq_free_hwirqs(unsigned int from, int cnt); +static inline void irq_free_hwirq(unsigned int irq) +{ + return irq_free_hwirqs(irq, 1); +} +int arch_setup_hwirq(unsigned int irq, int node); +void arch_teardown_hwirq(unsigned int irq); +#endif + +#ifdef CONFIG_GENERIC_IRQ_LEGACY +void irq_init_desc(unsigned int irq); +#endif + +/** + * struct irq_chip_regs - register offsets for struct irq_gci + * @enable: Enable register offset to reg_base + * @disable: Disable register offset to reg_base + * @mask: Mask register offset to reg_base + * @ack: Ack register offset to reg_base + * @eoi: Eoi register offset to reg_base + * @type: Type configuration register offset to reg_base + * @polarity: Polarity configuration register offset to reg_base + */ +struct irq_chip_regs { + unsigned long enable; + unsigned long disable; + unsigned long mask; + unsigned long ack; + unsigned long eoi; + unsigned long type; + unsigned long polarity; +}; + +/** + * struct irq_chip_type - Generic interrupt chip instance for a flow type + * @chip: The real interrupt chip which provides the callbacks + * @regs: Register offsets for this chip + * @handler: Flow handler associated with this chip + * @type: Chip can handle these flow types + * @mask_cache_priv: Cached mask register private to the chip type + * @mask_cache: Pointer to cached mask register + * + * A irq_generic_chip can have several instances of irq_chip_type when + * it requires different functions and register offsets for different + * flow types. + */ +struct irq_chip_type { + struct irq_chip chip; + struct irq_chip_regs regs; + irq_flow_handler_t handler; + u32 type; + u32 mask_cache_priv; + u32 *mask_cache; +}; + +/** + * struct irq_chip_generic - Generic irq chip data structure + * @lock: Lock to protect register and cache data access + * @reg_base: Register base address (virtual) + * @reg_readl: Alternate I/O accessor (defaults to readl if NULL) + * @reg_writel: Alternate I/O accessor (defaults to writel if NULL) + * @suspend: Function called from core code on suspend once per + * chip; can be useful instead of irq_chip::suspend to + * handle chip details even when no interrupts are in use + * @resume: Function called from core code on resume once per chip; + * can be useful instead of irq_chip::suspend to handle + * chip details even when no interrupts are in use + * @irq_base: Interrupt base nr for this chip + * @irq_cnt: Number of interrupts handled by this chip + * @mask_cache: Cached mask register shared between all chip types + * @type_cache: Cached type register + * @polarity_cache: Cached polarity register + * @wake_enabled: Interrupt can wakeup from suspend + * @wake_active: Interrupt is marked as an wakeup from suspend source + * @num_ct: Number of available irq_chip_type instances (usually 1) + * @private: Private data for non generic chip callbacks + * @installed: bitfield to denote installed interrupts + * @unused: bitfield to denote unused interrupts + * @domain: irq domain pointer + * @list: List head for keeping track of instances + * @chip_types: Array of interrupt irq_chip_types + * + * Note, that irq_chip_generic can have multiple irq_chip_type + * implementations which can be associated to a particular irq line of + * an irq_chip_generic instance. That allows to share and protect + * state in an irq_chip_generic instance when we need to implement + * different flow mechanisms (level/edge) for it. + */ +struct irq_chip_generic { + raw_spinlock_t lock; + void __iomem *reg_base; + u32 (*reg_readl)(void __iomem *addr); + void (*reg_writel)(u32 val, void __iomem *addr); + void (*suspend)(struct irq_chip_generic *gc); + void (*resume)(struct irq_chip_generic *gc); + unsigned int irq_base; + unsigned int irq_cnt; + u32 mask_cache; + u32 type_cache; + u32 polarity_cache; + u32 wake_enabled; + u32 wake_active; + unsigned int num_ct; + void *private; + unsigned long installed; + unsigned long unused; + struct irq_domain *domain; + struct list_head list; + struct irq_chip_type chip_types[0]; +}; + +/** + * enum irq_gc_flags - Initialization flags for generic irq chips + * @IRQ_GC_INIT_MASK_CACHE: Initialize the mask_cache by reading mask reg + * @IRQ_GC_INIT_NESTED_LOCK: Set the lock class of the irqs to nested for + * irq chips which need to call irq_set_wake() on + * the parent irq. Usually GPIO implementations + * @IRQ_GC_MASK_CACHE_PER_TYPE: Mask cache is chip type private + * @IRQ_GC_NO_MASK: Do not calculate irq_data->mask + * @IRQ_GC_BE_IO: Use big-endian register accesses (default: LE) + */ +enum irq_gc_flags { + IRQ_GC_INIT_MASK_CACHE = 1 << 0, + IRQ_GC_INIT_NESTED_LOCK = 1 << 1, + IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2, + IRQ_GC_NO_MASK = 1 << 3, + IRQ_GC_BE_IO = 1 << 4, +}; + +/* + * struct irq_domain_chip_generic - Generic irq chip data structure for irq domains + * @irqs_per_chip: Number of interrupts per chip + * @num_chips: Number of chips + * @irq_flags_to_set: IRQ* flags to set on irq setup + * @irq_flags_to_clear: IRQ* flags to clear on irq setup + * @gc_flags: Generic chip specific setup flags + * @gc: Array of pointers to generic interrupt chips + */ +struct irq_domain_chip_generic { + unsigned int irqs_per_chip; + unsigned int num_chips; + unsigned int irq_flags_to_clear; + unsigned int irq_flags_to_set; + enum irq_gc_flags gc_flags; + struct irq_chip_generic *gc[0]; +}; + +/* Generic chip callback functions */ +void irq_gc_noop(struct irq_data *d); +void irq_gc_mask_disable_reg(struct irq_data *d); +void irq_gc_mask_set_bit(struct irq_data *d); +void irq_gc_mask_clr_bit(struct irq_data *d); +void irq_gc_unmask_enable_reg(struct irq_data *d); +void irq_gc_ack_set_bit(struct irq_data *d); +void irq_gc_ack_clr_bit(struct irq_data *d); +void irq_gc_mask_disable_and_ack_set(struct irq_data *d); +void irq_gc_eoi(struct irq_data *d); +int irq_gc_set_wake(struct irq_data *d, unsigned int on); + +/* Setup functions for irq_chip_generic */ +int irq_map_generic_chip(struct irq_domain *d, unsigned int virq, + irq_hw_number_t hw_irq); +struct irq_chip_generic * +irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base, + void __iomem *reg_base, irq_flow_handler_t handler); +void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk, + enum irq_gc_flags flags, unsigned int clr, + unsigned int set); +int irq_setup_alt_chip(struct irq_data *d, unsigned int type); +void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk, + unsigned int clr, unsigned int set); + +struct irq_chip_generic * +devm_irq_alloc_generic_chip(struct device *dev, const char *name, int num_ct, + unsigned int irq_base, void __iomem *reg_base, + irq_flow_handler_t handler); +int devm_irq_setup_generic_chip(struct device *dev, struct irq_chip_generic *gc, + u32 msk, enum irq_gc_flags flags, + unsigned int clr, unsigned int set); + +struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq); + +int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip, + int num_ct, const char *name, + irq_flow_handler_t handler, + unsigned int clr, unsigned int set, + enum irq_gc_flags flags); + +#define irq_alloc_domain_generic_chips(d, irqs_per_chip, num_ct, name, \ + handler, clr, set, flags) \ +({ \ + MAYBE_BUILD_BUG_ON(irqs_per_chip > 32); \ + __irq_alloc_domain_generic_chips(d, irqs_per_chip, num_ct, name,\ + handler, clr, set, flags); \ +}) + +static inline void irq_free_generic_chip(struct irq_chip_generic *gc) +{ + kfree(gc); +} + +static inline void irq_destroy_generic_chip(struct irq_chip_generic *gc, + u32 msk, unsigned int clr, + unsigned int set) +{ + irq_remove_generic_chip(gc, msk, clr, set); + irq_free_generic_chip(gc); +} + +static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d) +{ + return container_of(d->chip, struct irq_chip_type, chip); +} + +#define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX) + +#ifdef CONFIG_SMP +static inline void irq_gc_lock(struct irq_chip_generic *gc) +{ + raw_spin_lock(&gc->lock); +} + +static inline void irq_gc_unlock(struct irq_chip_generic *gc) +{ + raw_spin_unlock(&gc->lock); +} +#else +static inline void irq_gc_lock(struct irq_chip_generic *gc) { } +static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } +#endif + +/* + * The irqsave variants are for usage in non interrupt code. Do not use + * them in irq_chip callbacks. Use irq_gc_lock() instead. + */ +#define irq_gc_lock_irqsave(gc, flags) \ + raw_spin_lock_irqsave(&(gc)->lock, flags) + +#define irq_gc_unlock_irqrestore(gc, flags) \ + raw_spin_unlock_irqrestore(&(gc)->lock, flags) + +static inline void irq_reg_writel(struct irq_chip_generic *gc, + u32 val, int reg_offset) +{ + if (gc->reg_writel) + gc->reg_writel(val, gc->reg_base + reg_offset); + else + writel(val, gc->reg_base + reg_offset); +} + +static inline u32 irq_reg_readl(struct irq_chip_generic *gc, + int reg_offset) +{ + if (gc->reg_readl) + return gc->reg_readl(gc->reg_base + reg_offset); + else + return readl(gc->reg_base + reg_offset); +} + +struct irq_matrix; +struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits, + unsigned int alloc_start, + unsigned int alloc_end); +void irq_matrix_online(struct irq_matrix *m); +void irq_matrix_offline(struct irq_matrix *m); +void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace); +int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk); +void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk); +int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk, + unsigned int *mapped_cpu); +void irq_matrix_reserve(struct irq_matrix *m); +void irq_matrix_remove_reserved(struct irq_matrix *m); +int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, + bool reserved, unsigned int *mapped_cpu); +void irq_matrix_free(struct irq_matrix *m, unsigned int cpu, + unsigned int bit, bool managed); +void irq_matrix_assign(struct irq_matrix *m, unsigned int bit); +unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown); +unsigned int irq_matrix_allocated(struct irq_matrix *m); +unsigned int irq_matrix_reserved(struct irq_matrix *m); +void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind); + +/* Contrary to Linux irqs, for hardware irqs the irq number 0 is valid */ +#define INVALID_HWIRQ (~0UL) +irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu); +int __ipi_send_single(struct irq_desc *desc, unsigned int cpu); +int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest); +int ipi_send_single(unsigned int virq, unsigned int cpu); +int ipi_send_mask(unsigned int virq, const struct cpumask *dest); + +#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER +/* + * Registers a generic IRQ handling function as the top-level IRQ handler in + * the system, which is generally the first C code called from an assembly + * architecture-specific interrupt handler. + * + * Returns 0 on success, or -EBUSY if an IRQ handler has already been + * registered. + */ +int __init set_handle_irq(void (*handle_irq)(struct pt_regs *)); + +/* + * Allows interrupt handlers to find the irqchip that's been registered as the + * top-level IRQ handler. + */ +extern void (*handle_arch_irq)(struct pt_regs *) __ro_after_init; +#endif + +#endif /* _LINUX_IRQ_H */ diff --git a/include/linux/irq_cpustat.h b/include/linux/irq_cpustat.h new file mode 100644 index 000000000..6e8895cd4 --- /dev/null +++ b/include/linux/irq_cpustat.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __irq_cpustat_h +#define __irq_cpustat_h + +/* + * Contains default mappings for irq_cpustat_t, used by almost every + * architecture. Some arch (like s390) have per cpu hardware pages and + * they define their own mappings for irq_stat. + * + * Keith Owens July 2000. + */ + + +/* + * Simple wrappers reducing source bloat. Define all irq_stat fields + * here, even ones that are arch dependent. That way we get common + * definitions instead of differing sets for each arch. + */ + +#ifndef __ARCH_IRQ_STAT +DECLARE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat); /* defined in asm/hardirq.h */ +#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat.member, cpu)) +#endif + +/* arch dependent irq_stat fields */ +#define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count) /* i386 */ + +#endif /* __irq_cpustat_h */ diff --git a/include/linux/irq_poll.h b/include/linux/irq_poll.h new file mode 100644 index 000000000..16aaeccb6 --- /dev/null +++ b/include/linux/irq_poll.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef IRQ_POLL_H +#define IRQ_POLL_H + +struct irq_poll; +typedef int (irq_poll_fn)(struct irq_poll *, int); + +struct irq_poll { + struct list_head list; + unsigned long state; + int weight; + irq_poll_fn *poll; +}; + +enum { + IRQ_POLL_F_SCHED = 0, + IRQ_POLL_F_DISABLE = 1, +}; + +extern void irq_poll_sched(struct irq_poll *); +extern void irq_poll_init(struct irq_poll *, int, irq_poll_fn *); +extern void irq_poll_complete(struct irq_poll *); +extern void irq_poll_enable(struct irq_poll *); +extern void irq_poll_disable(struct irq_poll *); + +#endif diff --git a/include/linux/irq_sim.h b/include/linux/irq_sim.h new file mode 100644 index 000000000..630a57e55 --- /dev/null +++ b/include/linux/irq_sim.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2017-2018 Bartosz Golaszewski + */ + +#ifndef _LINUX_IRQ_SIM_H +#define _LINUX_IRQ_SIM_H + +#include +#include + +/* + * Provides a framework for allocating simulated interrupts which can be + * requested like normal irqs and enqueued from process context. + */ + +struct irq_sim_work_ctx { + struct irq_work work; + int irq; +}; + +struct irq_sim_irq_ctx { + int irqnum; + bool enabled; +}; + +struct irq_sim { + struct irq_sim_work_ctx work_ctx; + int irq_base; + unsigned int irq_count; + struct irq_sim_irq_ctx *irqs; +}; + +int irq_sim_init(struct irq_sim *sim, unsigned int num_irqs); +int devm_irq_sim_init(struct device *dev, struct irq_sim *sim, + unsigned int num_irqs); +void irq_sim_fini(struct irq_sim *sim); +void irq_sim_fire(struct irq_sim *sim, unsigned int offset); +int irq_sim_irqnum(struct irq_sim *sim, unsigned int offset); + +#endif /* _LINUX_IRQ_SIM_H */ diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h new file mode 100644 index 000000000..b11fcdfd0 --- /dev/null +++ b/include/linux/irq_work.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_IRQ_WORK_H +#define _LINUX_IRQ_WORK_H + +#include + +/* + * An entry can be in one of four states: + * + * free NULL, 0 -> {claimed} : free to be used + * claimed NULL, 3 -> {pending} : claimed to be enqueued + * pending next, 3 -> {busy} : queued, pending callback + * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed + */ + +#define IRQ_WORK_PENDING BIT(0) +#define IRQ_WORK_BUSY BIT(1) + +/* Doesn't want IPI, wait for tick: */ +#define IRQ_WORK_LAZY BIT(2) + +#define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY) + +struct irq_work { + unsigned long flags; + struct llist_node llnode; + void (*func)(struct irq_work *); +}; + +static inline +void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) +{ + work->flags = 0; + work->func = func; +} + +#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), } + +bool irq_work_queue(struct irq_work *work); +bool irq_work_queue_on(struct irq_work *work, int cpu); + +void irq_work_tick(void); +void irq_work_sync(struct irq_work *work); + +#ifdef CONFIG_IRQ_WORK +#include + +void irq_work_run(void); +bool irq_work_needs_cpu(void); +#else +static inline bool irq_work_needs_cpu(void) { return false; } +static inline void irq_work_run(void) { } +#endif + +#endif /* _LINUX_IRQ_WORK_H */ diff --git a/include/linux/irqbypass.h b/include/linux/irqbypass.h new file mode 100644 index 000000000..f0f5d2671 --- /dev/null +++ b/include/linux/irqbypass.h @@ -0,0 +1,90 @@ +/* + * IRQ offload/bypass manager + * + * Copyright (C) 2015 Red Hat, Inc. + * Copyright (c) 2015 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef IRQBYPASS_H +#define IRQBYPASS_H + +#include + +struct irq_bypass_consumer; + +/* + * Theory of operation + * + * The IRQ bypass manager is a simple set of lists and callbacks that allows + * IRQ producers (ex. physical interrupt sources) to be matched to IRQ + * consumers (ex. virtualization hardware that allows IRQ bypass or offload) + * via a shared token (ex. eventfd_ctx). Producers and consumers register + * independently. When a token match is found, the optional @stop callback + * will be called for each participant. The pair will then be connected via + * the @add_* callbacks, and finally the optional @start callback will allow + * any final coordination. When either participant is unregistered, the + * process is repeated using the @del_* callbacks in place of the @add_* + * callbacks. Match tokens must be unique per producer/consumer, 1:N pairings + * are not supported. + */ + +/** + * struct irq_bypass_producer - IRQ bypass producer definition + * @node: IRQ bypass manager private list management + * @token: opaque token to match between producer and consumer (non-NULL) + * @irq: Linux IRQ number for the producer device + * @add_consumer: Connect the IRQ producer to an IRQ consumer (optional) + * @del_consumer: Disconnect the IRQ producer from an IRQ consumer (optional) + * @stop: Perform any quiesce operations necessary prior to add/del (optional) + * @start: Perform any startup operations necessary after add/del (optional) + * + * The IRQ bypass producer structure represents an interrupt source for + * participation in possible host bypass, for instance an interrupt vector + * for a physical device assigned to a VM. + */ +struct irq_bypass_producer { + struct list_head node; + void *token; + int irq; + int (*add_consumer)(struct irq_bypass_producer *, + struct irq_bypass_consumer *); + void (*del_consumer)(struct irq_bypass_producer *, + struct irq_bypass_consumer *); + void (*stop)(struct irq_bypass_producer *); + void (*start)(struct irq_bypass_producer *); +}; + +/** + * struct irq_bypass_consumer - IRQ bypass consumer definition + * @node: IRQ bypass manager private list management + * @token: opaque token to match between producer and consumer (non-NULL) + * @add_producer: Connect the IRQ consumer to an IRQ producer + * @del_producer: Disconnect the IRQ consumer from an IRQ producer + * @stop: Perform any quiesce operations necessary prior to add/del (optional) + * @start: Perform any startup operations necessary after add/del (optional) + * + * The IRQ bypass consumer structure represents an interrupt sink for + * participation in possible host bypass, for instance a hypervisor may + * support offloads to allow bypassing the host entirely or offload + * portions of the interrupt handling to the VM. + */ +struct irq_bypass_consumer { + struct list_head node; + void *token; + int (*add_producer)(struct irq_bypass_consumer *, + struct irq_bypass_producer *); + void (*del_producer)(struct irq_bypass_consumer *, + struct irq_bypass_producer *); + void (*stop)(struct irq_bypass_consumer *); + void (*start)(struct irq_bypass_consumer *); +}; + +int irq_bypass_register_producer(struct irq_bypass_producer *); +void irq_bypass_unregister_producer(struct irq_bypass_producer *); +int irq_bypass_register_consumer(struct irq_bypass_consumer *); +void irq_bypass_unregister_consumer(struct irq_bypass_consumer *); + +#endif /* IRQBYPASS_H */ diff --git a/include/linux/irqchip.h b/include/linux/irqchip.h new file mode 100644 index 000000000..89c34b200 --- /dev/null +++ b/include/linux/irqchip.h @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2012 Thomas Petazzoni + * + * Thomas Petazzoni + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef _LINUX_IRQCHIP_H +#define _LINUX_IRQCHIP_H + +#include +#include + +/* + * This macro must be used by the different irqchip drivers to declare + * the association between their DT compatible string and their + * initialization function. + * + * @name: name that must be unique accross all IRQCHIP_DECLARE of the + * same file. + * @compstr: compatible string of the irqchip driver + * @fn: initialization function + */ +#define IRQCHIP_DECLARE(name, compat, fn) OF_DECLARE_2(irqchip, name, compat, fn) + +/* + * This macro must be used by the different irqchip drivers to declare + * the association between their version and their initialization function. + * + * @name: name that must be unique accross all IRQCHIP_ACPI_DECLARE of the + * same file. + * @subtable: Subtable to be identified in MADT + * @validate: Function to be called on that subtable to check its validity. + * Can be NULL. + * @data: data to be checked by the validate function. + * @fn: initialization function + */ +#define IRQCHIP_ACPI_DECLARE(name, subtable, validate, data, fn) \ + ACPI_DECLARE_PROBE_ENTRY(irqchip, name, ACPI_SIG_MADT, \ + subtable, validate, data, fn) + +#ifdef CONFIG_IRQCHIP +void irqchip_init(void); +#else +static inline void irqchip_init(void) {} +#endif + +#endif diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h new file mode 100644 index 000000000..0a83b4379 --- /dev/null +++ b/include/linux/irqchip/arm-gic-common.h @@ -0,0 +1,36 @@ +/* + * include/linux/irqchip/arm-gic-common.h + * + * Copyright (C) 2016 ARM Limited, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __LINUX_IRQCHIP_ARM_GIC_COMMON_H +#define __LINUX_IRQCHIP_ARM_GIC_COMMON_H + +#include +#include + +enum gic_type { + GIC_V2, + GIC_V3, +}; + +struct gic_kvm_info { + /* GIC type */ + enum gic_type type; + /* Virtual CPU interface */ + struct resource vcpu; + /* Interrupt number */ + unsigned int maint_irq; + /* Virtual control interface */ + struct resource vctrl; + /* vlpi support */ + bool has_v4; +}; + +const struct gic_kvm_info *gic_get_kvm_info(void); + +#endif /* __LINUX_IRQCHIP_ARM_GIC_COMMON_H */ diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h new file mode 100644 index 000000000..1d21e98d6 --- /dev/null +++ b/include/linux/irqchip/arm-gic-v3.h @@ -0,0 +1,620 @@ +/* + * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved. + * Author: Marc Zyngier + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H +#define __LINUX_IRQCHIP_ARM_GIC_V3_H + +/* + * Distributor registers. We assume we're running non-secure, with ARE + * being set. Secure-only and non-ARE registers are not described. + */ +#define GICD_CTLR 0x0000 +#define GICD_TYPER 0x0004 +#define GICD_IIDR 0x0008 +#define GICD_STATUSR 0x0010 +#define GICD_SETSPI_NSR 0x0040 +#define GICD_CLRSPI_NSR 0x0048 +#define GICD_SETSPI_SR 0x0050 +#define GICD_CLRSPI_SR 0x0058 +#define GICD_SEIR 0x0068 +#define GICD_IGROUPR 0x0080 +#define GICD_ISENABLER 0x0100 +#define GICD_ICENABLER 0x0180 +#define GICD_ISPENDR 0x0200 +#define GICD_ICPENDR 0x0280 +#define GICD_ISACTIVER 0x0300 +#define GICD_ICACTIVER 0x0380 +#define GICD_IPRIORITYR 0x0400 +#define GICD_ICFGR 0x0C00 +#define GICD_IGRPMODR 0x0D00 +#define GICD_NSACR 0x0E00 +#define GICD_IROUTER 0x6000 +#define GICD_IDREGS 0xFFD0 +#define GICD_PIDR2 0xFFE8 + +/* + * Those registers are actually from GICv2, but the spec demands that they + * are implemented as RES0 if ARE is 1 (which we do in KVM's emulated GICv3). + */ +#define GICD_ITARGETSR 0x0800 +#define GICD_SGIR 0x0F00 +#define GICD_CPENDSGIR 0x0F10 +#define GICD_SPENDSGIR 0x0F20 + +#define GICD_CTLR_RWP (1U << 31) +#define GICD_CTLR_DS (1U << 6) +#define GICD_CTLR_ARE_NS (1U << 4) +#define GICD_CTLR_ENABLE_G1A (1U << 1) +#define GICD_CTLR_ENABLE_G1 (1U << 0) + +#define GICD_IIDR_IMPLEMENTER_SHIFT 0 +#define GICD_IIDR_IMPLEMENTER_MASK (0xfff << GICD_IIDR_IMPLEMENTER_SHIFT) +#define GICD_IIDR_REVISION_SHIFT 12 +#define GICD_IIDR_REVISION_MASK (0xf << GICD_IIDR_REVISION_SHIFT) +#define GICD_IIDR_VARIANT_SHIFT 16 +#define GICD_IIDR_VARIANT_MASK (0xf << GICD_IIDR_VARIANT_SHIFT) +#define GICD_IIDR_PRODUCT_ID_SHIFT 24 +#define GICD_IIDR_PRODUCT_ID_MASK (0xff << GICD_IIDR_PRODUCT_ID_SHIFT) + + +/* + * In systems with a single security state (what we emulate in KVM) + * the meaning of the interrupt group enable bits is slightly different + */ +#define GICD_CTLR_ENABLE_SS_G1 (1U << 1) +#define GICD_CTLR_ENABLE_SS_G0 (1U << 0) + +#define GICD_TYPER_RSS (1U << 26) +#define GICD_TYPER_LPIS (1U << 17) +#define GICD_TYPER_MBIS (1U << 16) + +#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1) +#define GICD_TYPER_NUM_LPIS(typer) ((((typer) >> 11) & 0x1f) + 1) +#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32) + +#define GICD_IROUTER_SPI_MODE_ONE (0U << 31) +#define GICD_IROUTER_SPI_MODE_ANY (1U << 31) + +#define GIC_PIDR2_ARCH_MASK 0xf0 +#define GIC_PIDR2_ARCH_GICv3 0x30 +#define GIC_PIDR2_ARCH_GICv4 0x40 + +#define GIC_V3_DIST_SIZE 0x10000 + +/* + * Re-Distributor registers, offsets from RD_base + */ +#define GICR_CTLR GICD_CTLR +#define GICR_IIDR 0x0004 +#define GICR_TYPER 0x0008 +#define GICR_STATUSR GICD_STATUSR +#define GICR_WAKER 0x0014 +#define GICR_SETLPIR 0x0040 +#define GICR_CLRLPIR 0x0048 +#define GICR_SEIR GICD_SEIR +#define GICR_PROPBASER 0x0070 +#define GICR_PENDBASER 0x0078 +#define GICR_INVLPIR 0x00A0 +#define GICR_INVALLR 0x00B0 +#define GICR_SYNCR 0x00C0 +#define GICR_MOVLPIR 0x0100 +#define GICR_MOVALLR 0x0110 +#define GICR_IDREGS GICD_IDREGS +#define GICR_PIDR2 GICD_PIDR2 + +#define GICR_CTLR_ENABLE_LPIS (1UL << 0) +#define GICR_CTLR_RWP (1UL << 3) + +#define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff) + +#define GICR_WAKER_ProcessorSleep (1U << 1) +#define GICR_WAKER_ChildrenAsleep (1U << 2) + +#define GIC_BASER_CACHE_nCnB 0ULL +#define GIC_BASER_CACHE_SameAsInner 0ULL +#define GIC_BASER_CACHE_nC 1ULL +#define GIC_BASER_CACHE_RaWt 2ULL +#define GIC_BASER_CACHE_RaWb 3ULL +#define GIC_BASER_CACHE_WaWt 4ULL +#define GIC_BASER_CACHE_WaWb 5ULL +#define GIC_BASER_CACHE_RaWaWt 6ULL +#define GIC_BASER_CACHE_RaWaWb 7ULL +#define GIC_BASER_CACHE_MASK 7ULL +#define GIC_BASER_NonShareable 0ULL +#define GIC_BASER_InnerShareable 1ULL +#define GIC_BASER_OuterShareable 2ULL +#define GIC_BASER_SHAREABILITY_MASK 3ULL + +#define GIC_BASER_CACHEABILITY(reg, inner_outer, type) \ + (GIC_BASER_CACHE_##type << reg##_##inner_outer##_CACHEABILITY_SHIFT) + +#define GIC_BASER_SHAREABILITY(reg, type) \ + (GIC_BASER_##type << reg##_SHAREABILITY_SHIFT) + +/* encode a size field of width @w containing @n - 1 units */ +#define GIC_ENCODE_SZ(n, w) (((unsigned long)(n) - 1) & GENMASK_ULL(((w) - 1), 0)) + +#define GICR_PROPBASER_SHAREABILITY_SHIFT (10) +#define GICR_PROPBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT (56) +#define GICR_PROPBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_PROPBASER, SHAREABILITY_MASK) +#define GICR_PROPBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, MASK) +#define GICR_PROPBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, MASK) +#define GICR_PROPBASER_CACHEABILITY_MASK GICR_PROPBASER_INNER_CACHEABILITY_MASK + +#define GICR_PROPBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable) + +#define GICR_PROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nCnB) +#define GICR_PROPBASER_nC GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nC) +#define GICR_PROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt) +#define GICR_PROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) +#define GICR_PROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWt) +#define GICR_PROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWb) +#define GICR_PROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWt) +#define GICR_PROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWb) + +#define GICR_PROPBASER_IDBITS_MASK (0x1f) +#define GICR_PROPBASER_ADDRESS(x) ((x) & GENMASK_ULL(51, 12)) +#define GICR_PENDBASER_ADDRESS(x) ((x) & GENMASK_ULL(51, 16)) + +#define GICR_PENDBASER_SHAREABILITY_SHIFT (10) +#define GICR_PENDBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT (56) +#define GICR_PENDBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_PENDBASER, SHAREABILITY_MASK) +#define GICR_PENDBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, MASK) +#define GICR_PENDBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, MASK) +#define GICR_PENDBASER_CACHEABILITY_MASK GICR_PENDBASER_INNER_CACHEABILITY_MASK + +#define GICR_PENDBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable) + +#define GICR_PENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nCnB) +#define GICR_PENDBASER_nC GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nC) +#define GICR_PENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt) +#define GICR_PENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) +#define GICR_PENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWt) +#define GICR_PENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWb) +#define GICR_PENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWt) +#define GICR_PENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWb) + +#define GICR_PENDBASER_PTZ BIT_ULL(62) + +/* + * Re-Distributor registers, offsets from SGI_base + */ +#define GICR_IGROUPR0 GICD_IGROUPR +#define GICR_ISENABLER0 GICD_ISENABLER +#define GICR_ICENABLER0 GICD_ICENABLER +#define GICR_ISPENDR0 GICD_ISPENDR +#define GICR_ICPENDR0 GICD_ICPENDR +#define GICR_ISACTIVER0 GICD_ISACTIVER +#define GICR_ICACTIVER0 GICD_ICACTIVER +#define GICR_IPRIORITYR0 GICD_IPRIORITYR +#define GICR_ICFGR0 GICD_ICFGR +#define GICR_IGRPMODR0 GICD_IGRPMODR +#define GICR_NSACR GICD_NSACR + +#define GICR_TYPER_PLPIS (1U << 0) +#define GICR_TYPER_VLPIS (1U << 1) +#define GICR_TYPER_DirectLPIS (1U << 3) +#define GICR_TYPER_LAST (1U << 4) + +#define GIC_V3_REDIST_SIZE 0x20000 + +#define LPI_PROP_GROUP1 (1 << 1) +#define LPI_PROP_ENABLED (1 << 0) + +/* + * Re-Distributor registers, offsets from VLPI_base + */ +#define GICR_VPROPBASER 0x0070 + +#define GICR_VPROPBASER_IDBITS_MASK 0x1f + +#define GICR_VPROPBASER_SHAREABILITY_SHIFT (10) +#define GICR_VPROPBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_VPROPBASER_OUTER_CACHEABILITY_SHIFT (56) + +#define GICR_VPROPBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_VPROPBASER, SHAREABILITY_MASK) +#define GICR_VPROPBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, MASK) +#define GICR_VPROPBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPROPBASER, OUTER, MASK) +#define GICR_VPROPBASER_CACHEABILITY_MASK \ + GICR_VPROPBASER_INNER_CACHEABILITY_MASK + +#define GICR_VPROPBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_VPROPBASER, InnerShareable) + +#define GICR_VPROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nCnB) +#define GICR_VPROPBASER_nC GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nC) +#define GICR_VPROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt) +#define GICR_VPROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWb) +#define GICR_VPROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWt) +#define GICR_VPROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWb) +#define GICR_VPROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWt) +#define GICR_VPROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWb) + +#define GICR_VPENDBASER 0x0078 + +#define GICR_VPENDBASER_SHAREABILITY_SHIFT (10) +#define GICR_VPENDBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_VPENDBASER_OUTER_CACHEABILITY_SHIFT (56) +#define GICR_VPENDBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_VPENDBASER, SHAREABILITY_MASK) +#define GICR_VPENDBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, MASK) +#define GICR_VPENDBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPENDBASER, OUTER, MASK) +#define GICR_VPENDBASER_CACHEABILITY_MASK \ + GICR_VPENDBASER_INNER_CACHEABILITY_MASK + +#define GICR_VPENDBASER_NonShareable \ + GIC_BASER_SHAREABILITY(GICR_VPENDBASER, NonShareable) + +#define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB) +#define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC) +#define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt) +#define GICR_VPENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWb) +#define GICR_VPENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWt) +#define GICR_VPENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWb) +#define GICR_VPENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWt) +#define GICR_VPENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWb) + +#define GICR_VPENDBASER_Dirty (1ULL << 60) +#define GICR_VPENDBASER_PendingLast (1ULL << 61) +#define GICR_VPENDBASER_IDAI (1ULL << 62) +#define GICR_VPENDBASER_Valid (1ULL << 63) + +/* + * ITS registers, offsets from ITS_base + */ +#define GITS_CTLR 0x0000 +#define GITS_IIDR 0x0004 +#define GITS_TYPER 0x0008 +#define GITS_CBASER 0x0080 +#define GITS_CWRITER 0x0088 +#define GITS_CREADR 0x0090 +#define GITS_BASER 0x0100 +#define GITS_IDREGS_BASE 0xffd0 +#define GITS_PIDR0 0xffe0 +#define GITS_PIDR1 0xffe4 +#define GITS_PIDR2 GICR_PIDR2 +#define GITS_PIDR4 0xffd0 +#define GITS_CIDR0 0xfff0 +#define GITS_CIDR1 0xfff4 +#define GITS_CIDR2 0xfff8 +#define GITS_CIDR3 0xfffc + +#define GITS_TRANSLATER 0x10040 + +#define GITS_CTLR_ENABLE (1U << 0) +#define GITS_CTLR_ImDe (1U << 1) +#define GITS_CTLR_ITS_NUMBER_SHIFT 4 +#define GITS_CTLR_ITS_NUMBER (0xFU << GITS_CTLR_ITS_NUMBER_SHIFT) +#define GITS_CTLR_QUIESCENT (1U << 31) + +#define GITS_TYPER_PLPIS (1UL << 0) +#define GITS_TYPER_VLPIS (1UL << 1) +#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4 +#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0xf) + 1) +#define GITS_TYPER_IDBITS_SHIFT 8 +#define GITS_TYPER_DEVBITS_SHIFT 13 +#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) +#define GITS_TYPER_PTA (1UL << 19) +#define GITS_TYPER_HCC_SHIFT 24 +#define GITS_TYPER_HCC(r) (((r) >> GITS_TYPER_HCC_SHIFT) & 0xff) +#define GITS_TYPER_VMOVP (1ULL << 37) + +#define GITS_IIDR_REV_SHIFT 12 +#define GITS_IIDR_REV_MASK (0xf << GITS_IIDR_REV_SHIFT) +#define GITS_IIDR_REV(r) (((r) >> GITS_IIDR_REV_SHIFT) & 0xf) +#define GITS_IIDR_PRODUCTID_SHIFT 24 + +#define GITS_CBASER_VALID (1ULL << 63) +#define GITS_CBASER_SHAREABILITY_SHIFT (10) +#define GITS_CBASER_INNER_CACHEABILITY_SHIFT (59) +#define GITS_CBASER_OUTER_CACHEABILITY_SHIFT (53) +#define GITS_CBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GITS_CBASER, SHAREABILITY_MASK) +#define GITS_CBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, MASK) +#define GITS_CBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_CBASER, OUTER, MASK) +#define GITS_CBASER_CACHEABILITY_MASK GITS_CBASER_INNER_CACHEABILITY_MASK + +#define GITS_CBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GITS_CBASER, InnerShareable) + +#define GITS_CBASER_nCnB GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nCnB) +#define GITS_CBASER_nC GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nC) +#define GITS_CBASER_RaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt) +#define GITS_CBASER_RaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWb) +#define GITS_CBASER_WaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWt) +#define GITS_CBASER_WaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWb) +#define GITS_CBASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWt) +#define GITS_CBASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWb) + +#define GITS_BASER_NR_REGS 8 + +#define GITS_BASER_VALID (1ULL << 63) +#define GITS_BASER_INDIRECT (1ULL << 62) + +#define GITS_BASER_INNER_CACHEABILITY_SHIFT (59) +#define GITS_BASER_OUTER_CACHEABILITY_SHIFT (53) +#define GITS_BASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_BASER, INNER, MASK) +#define GITS_BASER_CACHEABILITY_MASK GITS_BASER_INNER_CACHEABILITY_MASK +#define GITS_BASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, MASK) +#define GITS_BASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GITS_BASER, SHAREABILITY_MASK) + +#define GITS_BASER_nCnB GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nCnB) +#define GITS_BASER_nC GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nC) +#define GITS_BASER_RaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt) +#define GITS_BASER_RaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) +#define GITS_BASER_WaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWt) +#define GITS_BASER_WaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWb) +#define GITS_BASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWt) +#define GITS_BASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWb) + +#define GITS_BASER_TYPE_SHIFT (56) +#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) +#define GITS_BASER_ENTRY_SIZE_SHIFT (48) +#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1) +#define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48) +#define GITS_BASER_PHYS_52_to_48(phys) \ + (((phys) & GENMASK_ULL(47, 16)) | (((phys) >> 48) & 0xf) << 12) +#define GITS_BASER_SHAREABILITY_SHIFT (10) +#define GITS_BASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) +#define GITS_BASER_PAGE_SIZE_SHIFT (8) +#define GITS_BASER_PAGE_SIZE_4K (0ULL << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGE_SIZE_16K (1ULL << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGE_SIZE_64K (2ULL << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGE_SIZE_MASK (3ULL << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGES_MAX 256 +#define GITS_BASER_PAGES_SHIFT (0) +#define GITS_BASER_NR_PAGES(r) (((r) & 0xff) + 1) + +#define GITS_BASER_TYPE_NONE 0 +#define GITS_BASER_TYPE_DEVICE 1 +#define GITS_BASER_TYPE_VCPU 2 +#define GITS_BASER_TYPE_RESERVED3 3 +#define GITS_BASER_TYPE_COLLECTION 4 +#define GITS_BASER_TYPE_RESERVED5 5 +#define GITS_BASER_TYPE_RESERVED6 6 +#define GITS_BASER_TYPE_RESERVED7 7 + +#define GITS_LVL1_ENTRY_SIZE (8UL) + +/* + * ITS commands + */ +#define GITS_CMD_MAPD 0x08 +#define GITS_CMD_MAPC 0x09 +#define GITS_CMD_MAPTI 0x0a +#define GITS_CMD_MAPI 0x0b +#define GITS_CMD_MOVI 0x01 +#define GITS_CMD_DISCARD 0x0f +#define GITS_CMD_INV 0x0c +#define GITS_CMD_MOVALL 0x0e +#define GITS_CMD_INVALL 0x0d +#define GITS_CMD_INT 0x03 +#define GITS_CMD_CLEAR 0x04 +#define GITS_CMD_SYNC 0x05 + +/* + * GICv4 ITS specific commands + */ +#define GITS_CMD_GICv4(x) ((x) | 0x20) +#define GITS_CMD_VINVALL GITS_CMD_GICv4(GITS_CMD_INVALL) +#define GITS_CMD_VMAPP GITS_CMD_GICv4(GITS_CMD_MAPC) +#define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI) +#define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI) +#define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC) +/* VMOVP is the odd one, as it doesn't have a physical counterpart */ +#define GITS_CMD_VMOVP GITS_CMD_GICv4(2) + +/* + * ITS error numbers + */ +#define E_ITS_MOVI_UNMAPPED_INTERRUPT 0x010107 +#define E_ITS_MOVI_UNMAPPED_COLLECTION 0x010109 +#define E_ITS_INT_UNMAPPED_INTERRUPT 0x010307 +#define E_ITS_CLEAR_UNMAPPED_INTERRUPT 0x010507 +#define E_ITS_MAPD_DEVICE_OOR 0x010801 +#define E_ITS_MAPD_ITTSIZE_OOR 0x010802 +#define E_ITS_MAPC_PROCNUM_OOR 0x010902 +#define E_ITS_MAPC_COLLECTION_OOR 0x010903 +#define E_ITS_MAPTI_UNMAPPED_DEVICE 0x010a04 +#define E_ITS_MAPTI_ID_OOR 0x010a05 +#define E_ITS_MAPTI_PHYSICALID_OOR 0x010a06 +#define E_ITS_INV_UNMAPPED_INTERRUPT 0x010c07 +#define E_ITS_INVALL_UNMAPPED_COLLECTION 0x010d09 +#define E_ITS_MOVALL_PROCNUM_OOR 0x010e01 +#define E_ITS_DISCARD_UNMAPPED_INTERRUPT 0x010f07 + +/* + * CPU interface registers + */ +#define ICC_CTLR_EL1_EOImode_SHIFT (1) +#define ICC_CTLR_EL1_EOImode_drop_dir (0U << ICC_CTLR_EL1_EOImode_SHIFT) +#define ICC_CTLR_EL1_EOImode_drop (1U << ICC_CTLR_EL1_EOImode_SHIFT) +#define ICC_CTLR_EL1_EOImode_MASK (1 << ICC_CTLR_EL1_EOImode_SHIFT) +#define ICC_CTLR_EL1_CBPR_SHIFT 0 +#define ICC_CTLR_EL1_CBPR_MASK (1 << ICC_CTLR_EL1_CBPR_SHIFT) +#define ICC_CTLR_EL1_PRI_BITS_SHIFT 8 +#define ICC_CTLR_EL1_PRI_BITS_MASK (0x7 << ICC_CTLR_EL1_PRI_BITS_SHIFT) +#define ICC_CTLR_EL1_ID_BITS_SHIFT 11 +#define ICC_CTLR_EL1_ID_BITS_MASK (0x7 << ICC_CTLR_EL1_ID_BITS_SHIFT) +#define ICC_CTLR_EL1_SEIS_SHIFT 14 +#define ICC_CTLR_EL1_SEIS_MASK (0x1 << ICC_CTLR_EL1_SEIS_SHIFT) +#define ICC_CTLR_EL1_A3V_SHIFT 15 +#define ICC_CTLR_EL1_A3V_MASK (0x1 << ICC_CTLR_EL1_A3V_SHIFT) +#define ICC_CTLR_EL1_RSS (0x1 << 18) +#define ICC_PMR_EL1_SHIFT 0 +#define ICC_PMR_EL1_MASK (0xff << ICC_PMR_EL1_SHIFT) +#define ICC_BPR0_EL1_SHIFT 0 +#define ICC_BPR0_EL1_MASK (0x7 << ICC_BPR0_EL1_SHIFT) +#define ICC_BPR1_EL1_SHIFT 0 +#define ICC_BPR1_EL1_MASK (0x7 << ICC_BPR1_EL1_SHIFT) +#define ICC_IGRPEN0_EL1_SHIFT 0 +#define ICC_IGRPEN0_EL1_MASK (1 << ICC_IGRPEN0_EL1_SHIFT) +#define ICC_IGRPEN1_EL1_SHIFT 0 +#define ICC_IGRPEN1_EL1_MASK (1 << ICC_IGRPEN1_EL1_SHIFT) +#define ICC_SRE_EL1_DIB (1U << 2) +#define ICC_SRE_EL1_DFB (1U << 1) +#define ICC_SRE_EL1_SRE (1U << 0) + +/* + * Hypervisor interface registers (SRE only) + */ +#define ICH_LR_VIRTUAL_ID_MASK ((1ULL << 32) - 1) + +#define ICH_LR_EOI (1ULL << 41) +#define ICH_LR_GROUP (1ULL << 60) +#define ICH_LR_HW (1ULL << 61) +#define ICH_LR_STATE (3ULL << 62) +#define ICH_LR_PENDING_BIT (1ULL << 62) +#define ICH_LR_ACTIVE_BIT (1ULL << 63) +#define ICH_LR_PHYS_ID_SHIFT 32 +#define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT) +#define ICH_LR_PRIORITY_SHIFT 48 +#define ICH_LR_PRIORITY_MASK (0xffULL << ICH_LR_PRIORITY_SHIFT) + +/* These are for GICv2 emulation only */ +#define GICH_LR_VIRTUALID (0x3ffUL << 0) +#define GICH_LR_PHYSID_CPUID_SHIFT (10) +#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT) + +#define ICH_MISR_EOI (1 << 0) +#define ICH_MISR_U (1 << 1) + +#define ICH_HCR_EN (1 << 0) +#define ICH_HCR_UIE (1 << 1) +#define ICH_HCR_NPIE (1 << 3) +#define ICH_HCR_TC (1 << 10) +#define ICH_HCR_TALL0 (1 << 11) +#define ICH_HCR_TALL1 (1 << 12) +#define ICH_HCR_EOIcount_SHIFT 27 +#define ICH_HCR_EOIcount_MASK (0x1f << ICH_HCR_EOIcount_SHIFT) + +#define ICH_VMCR_ACK_CTL_SHIFT 2 +#define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT) +#define ICH_VMCR_FIQ_EN_SHIFT 3 +#define ICH_VMCR_FIQ_EN_MASK (1 << ICH_VMCR_FIQ_EN_SHIFT) +#define ICH_VMCR_CBPR_SHIFT 4 +#define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT) +#define ICH_VMCR_EOIM_SHIFT 9 +#define ICH_VMCR_EOIM_MASK (1 << ICH_VMCR_EOIM_SHIFT) +#define ICH_VMCR_BPR1_SHIFT 18 +#define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT) +#define ICH_VMCR_BPR0_SHIFT 21 +#define ICH_VMCR_BPR0_MASK (7 << ICH_VMCR_BPR0_SHIFT) +#define ICH_VMCR_PMR_SHIFT 24 +#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT) +#define ICH_VMCR_ENG0_SHIFT 0 +#define ICH_VMCR_ENG0_MASK (1 << ICH_VMCR_ENG0_SHIFT) +#define ICH_VMCR_ENG1_SHIFT 1 +#define ICH_VMCR_ENG1_MASK (1 << ICH_VMCR_ENG1_SHIFT) + +#define ICH_VTR_PRI_BITS_SHIFT 29 +#define ICH_VTR_PRI_BITS_MASK (7 << ICH_VTR_PRI_BITS_SHIFT) +#define ICH_VTR_ID_BITS_SHIFT 23 +#define ICH_VTR_ID_BITS_MASK (7 << ICH_VTR_ID_BITS_SHIFT) +#define ICH_VTR_SEIS_SHIFT 22 +#define ICH_VTR_SEIS_MASK (1 << ICH_VTR_SEIS_SHIFT) +#define ICH_VTR_A3V_SHIFT 21 +#define ICH_VTR_A3V_MASK (1 << ICH_VTR_A3V_SHIFT) + +#define ICC_IAR1_EL1_SPURIOUS 0x3ff + +#define ICC_SRE_EL2_SRE (1 << 0) +#define ICC_SRE_EL2_ENABLE (1 << 3) + +#define ICC_SGI1R_TARGET_LIST_SHIFT 0 +#define ICC_SGI1R_TARGET_LIST_MASK (0xffff << ICC_SGI1R_TARGET_LIST_SHIFT) +#define ICC_SGI1R_AFFINITY_1_SHIFT 16 +#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT) +#define ICC_SGI1R_SGI_ID_SHIFT 24 +#define ICC_SGI1R_SGI_ID_MASK (0xfULL << ICC_SGI1R_SGI_ID_SHIFT) +#define ICC_SGI1R_AFFINITY_2_SHIFT 32 +#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT) +#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40 +#define ICC_SGI1R_RS_SHIFT 44 +#define ICC_SGI1R_RS_MASK (0xfULL << ICC_SGI1R_RS_SHIFT) +#define ICC_SGI1R_AFFINITY_3_SHIFT 48 +#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT) + +#include + +#ifndef __ASSEMBLY__ + +/* + * We need a value to serve as a irq-type for LPIs. Choose one that will + * hopefully pique the interest of the reviewer. + */ +#define GIC_IRQ_TYPE_LPI 0xa110c8ed + +struct rdists { + struct { + void __iomem *rd_base; + struct page *pend_page; + phys_addr_t phys_base; + } __percpu *rdist; + struct page *prop_page; + u64 flags; + u32 gicd_typer; + bool has_vlpis; + bool has_direct_lpi; +}; + +struct irq_domain; +struct fwnode_handle; +int its_cpu_init(void); +int its_init(struct fwnode_handle *handle, struct rdists *rdists, + struct irq_domain *domain); +int mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent); + +static inline bool gic_enable_sre(void) +{ + u32 val; + + val = gic_read_sre(); + if (val & ICC_SRE_EL1_SRE) + return true; + + val |= ICC_SRE_EL1_SRE; + gic_write_sre(val); + val = gic_read_sre(); + + return !!(val & ICC_SRE_EL1_SRE); +} + +#endif + +#endif diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h new file mode 100644 index 000000000..fa683ea5c --- /dev/null +++ b/include/linux/irqchip/arm-gic-v4.h @@ -0,0 +1,115 @@ +/* + * Copyright (C) 2016,2017 ARM Limited, All Rights Reserved. + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __LINUX_IRQCHIP_ARM_GIC_V4_H +#define __LINUX_IRQCHIP_ARM_GIC_V4_H + +struct its_vpe; + +/* + * Maximum number of ITTs when GITS_TYPER.VMOVP == 0, using the + * ITSList mechanism to perform inter-ITS synchronization. + */ +#define GICv4_ITS_LIST_MAX 16 + +/* Embedded in kvm.arch */ +struct its_vm { + struct fwnode_handle *fwnode; + struct irq_domain *domain; + struct page *vprop_page; + struct its_vpe **vpes; + int nr_vpes; + irq_hw_number_t db_lpi_base; + unsigned long *db_bitmap; + int nr_db_lpis; + u32 vlpi_count[GICv4_ITS_LIST_MAX]; +}; + +/* Embedded in kvm_vcpu.arch */ +struct its_vpe { + struct page *vpt_page; + struct its_vm *its_vm; + /* Doorbell interrupt */ + int irq; + irq_hw_number_t vpe_db_lpi; + /* VPE proxy mapping */ + int vpe_proxy_event; + /* + * This collection ID is used to indirect the target + * redistributor for this VPE. The ID itself isn't involved in + * programming of the ITS. + */ + u16 col_idx; + /* Unique (system-wide) VPE identifier */ + u16 vpe_id; + /* Implementation Defined Area Invalid */ + bool idai; + /* Pending VLPIs on schedule out? */ + bool pending_last; +}; + +/* + * struct its_vlpi_map: structure describing the mapping of a + * VLPI. Only to be interpreted in the context of a physical interrupt + * it complements. To be used as the vcpu_info passed to + * irq_set_vcpu_affinity(). + * + * @vm: Pointer to the GICv4 notion of a VM + * @vpe: Pointer to the GICv4 notion of a virtual CPU (VPE) + * @vintid: Virtual LPI number + * @properties: Priority and enable bits (as written in the prop table) + * @db_enabled: Is the VPE doorbell to be generated? + */ +struct its_vlpi_map { + struct its_vm *vm; + struct its_vpe *vpe; + u32 vintid; + u8 properties; + bool db_enabled; +}; + +enum its_vcpu_info_cmd_type { + MAP_VLPI, + GET_VLPI, + PROP_UPDATE_VLPI, + PROP_UPDATE_AND_INV_VLPI, + SCHEDULE_VPE, + DESCHEDULE_VPE, + INVALL_VPE, +}; + +struct its_cmd_info { + enum its_vcpu_info_cmd_type cmd_type; + union { + struct its_vlpi_map *map; + u8 config; + }; +}; + +int its_alloc_vcpu_irqs(struct its_vm *vm); +void its_free_vcpu_irqs(struct its_vm *vm); +int its_schedule_vpe(struct its_vpe *vpe, bool on); +int its_invall_vpe(struct its_vpe *vpe); +int its_map_vlpi(int irq, struct its_vlpi_map *map); +int its_get_vlpi(int irq, struct its_vlpi_map *map); +int its_unmap_vlpi(int irq); +int its_prop_update_vlpi(int irq, u8 config, bool inv); + +struct irq_domain_ops; +int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops); + +#endif diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h new file mode 100644 index 000000000..6c4aaf040 --- /dev/null +++ b/include/linux/irqchip/arm-gic.h @@ -0,0 +1,178 @@ +/* + * include/linux/irqchip/arm-gic.h + * + * Copyright (C) 2002 ARM Limited, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __LINUX_IRQCHIP_ARM_GIC_H +#define __LINUX_IRQCHIP_ARM_GIC_H + +#define GIC_CPU_CTRL 0x00 +#define GIC_CPU_PRIMASK 0x04 +#define GIC_CPU_BINPOINT 0x08 +#define GIC_CPU_INTACK 0x0c +#define GIC_CPU_EOI 0x10 +#define GIC_CPU_RUNNINGPRI 0x14 +#define GIC_CPU_HIGHPRI 0x18 +#define GIC_CPU_ALIAS_BINPOINT 0x1c +#define GIC_CPU_ACTIVEPRIO 0xd0 +#define GIC_CPU_IDENT 0xfc +#define GIC_CPU_DEACTIVATE 0x1000 + +#define GICC_ENABLE 0x1 +#define GICC_INT_PRI_THRESHOLD 0xf0 + +#define GIC_CPU_CTRL_EnableGrp0_SHIFT 0 +#define GIC_CPU_CTRL_EnableGrp0 (1 << GIC_CPU_CTRL_EnableGrp0_SHIFT) +#define GIC_CPU_CTRL_EnableGrp1_SHIFT 1 +#define GIC_CPU_CTRL_EnableGrp1 (1 << GIC_CPU_CTRL_EnableGrp1_SHIFT) +#define GIC_CPU_CTRL_AckCtl_SHIFT 2 +#define GIC_CPU_CTRL_AckCtl (1 << GIC_CPU_CTRL_AckCtl_SHIFT) +#define GIC_CPU_CTRL_FIQEn_SHIFT 3 +#define GIC_CPU_CTRL_FIQEn (1 << GIC_CPU_CTRL_FIQEn_SHIFT) +#define GIC_CPU_CTRL_CBPR_SHIFT 4 +#define GIC_CPU_CTRL_CBPR (1 << GIC_CPU_CTRL_CBPR_SHIFT) +#define GIC_CPU_CTRL_EOImodeNS_SHIFT 9 +#define GIC_CPU_CTRL_EOImodeNS (1 << GIC_CPU_CTRL_EOImodeNS_SHIFT) + +#define GICC_IAR_INT_ID_MASK 0x3ff +#define GICC_INT_SPURIOUS 1023 +#define GICC_DIS_BYPASS_MASK 0x1e0 + +#define GIC_DIST_CTRL 0x000 +#define GIC_DIST_CTR 0x004 +#define GIC_DIST_IIDR 0x008 +#define GIC_DIST_IGROUP 0x080 +#define GIC_DIST_ENABLE_SET 0x100 +#define GIC_DIST_ENABLE_CLEAR 0x180 +#define GIC_DIST_PENDING_SET 0x200 +#define GIC_DIST_PENDING_CLEAR 0x280 +#define GIC_DIST_ACTIVE_SET 0x300 +#define GIC_DIST_ACTIVE_CLEAR 0x380 +#define GIC_DIST_PRI 0x400 +#define GIC_DIST_TARGET 0x800 +#define GIC_DIST_CONFIG 0xc00 +#define GIC_DIST_SOFTINT 0xf00 +#define GIC_DIST_SGI_PENDING_CLEAR 0xf10 +#define GIC_DIST_SGI_PENDING_SET 0xf20 + +#define GICD_ENABLE 0x1 +#define GICD_DISABLE 0x0 +#define GICD_INT_ACTLOW_LVLTRIG 0x0 +#define GICD_INT_EN_CLR_X32 0xffffffff +#define GICD_INT_EN_SET_SGI 0x0000ffff +#define GICD_INT_EN_CLR_PPI 0xffff0000 +#define GICD_INT_DEF_PRI 0xa0 +#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\ + (GICD_INT_DEF_PRI << 16) |\ + (GICD_INT_DEF_PRI << 8) |\ + GICD_INT_DEF_PRI) + +#define GICD_IIDR_IMPLEMENTER_SHIFT 0 +#define GICD_IIDR_IMPLEMENTER_MASK (0xfff << GICD_IIDR_IMPLEMENTER_SHIFT) +#define GICD_IIDR_REVISION_SHIFT 12 +#define GICD_IIDR_REVISION_MASK (0xf << GICD_IIDR_REVISION_SHIFT) +#define GICD_IIDR_VARIANT_SHIFT 16 +#define GICD_IIDR_VARIANT_MASK (0xf << GICD_IIDR_VARIANT_SHIFT) +#define GICD_IIDR_PRODUCT_ID_SHIFT 24 +#define GICD_IIDR_PRODUCT_ID_MASK (0xff << GICD_IIDR_PRODUCT_ID_SHIFT) + + +#define GICH_HCR 0x0 +#define GICH_VTR 0x4 +#define GICH_VMCR 0x8 +#define GICH_MISR 0x10 +#define GICH_EISR0 0x20 +#define GICH_EISR1 0x24 +#define GICH_ELRSR0 0x30 +#define GICH_ELRSR1 0x34 +#define GICH_APR 0xf0 +#define GICH_LR0 0x100 + +#define GICH_HCR_EN (1 << 0) +#define GICH_HCR_UIE (1 << 1) +#define GICH_HCR_NPIE (1 << 3) + +#define GICH_LR_VIRTUALID (0x3ff << 0) +#define GICH_LR_PHYSID_CPUID_SHIFT (10) +#define GICH_LR_PHYSID_CPUID (0x3ff << GICH_LR_PHYSID_CPUID_SHIFT) +#define GICH_LR_PRIORITY_SHIFT 23 +#define GICH_LR_STATE (3 << 28) +#define GICH_LR_PENDING_BIT (1 << 28) +#define GICH_LR_ACTIVE_BIT (1 << 29) +#define GICH_LR_EOI (1 << 19) +#define GICH_LR_GROUP1 (1 << 30) +#define GICH_LR_HW (1 << 31) + +#define GICH_VMCR_ENABLE_GRP0_SHIFT 0 +#define GICH_VMCR_ENABLE_GRP0_MASK (1 << GICH_VMCR_ENABLE_GRP0_SHIFT) +#define GICH_VMCR_ENABLE_GRP1_SHIFT 1 +#define GICH_VMCR_ENABLE_GRP1_MASK (1 << GICH_VMCR_ENABLE_GRP1_SHIFT) +#define GICH_VMCR_ACK_CTL_SHIFT 2 +#define GICH_VMCR_ACK_CTL_MASK (1 << GICH_VMCR_ACK_CTL_SHIFT) +#define GICH_VMCR_FIQ_EN_SHIFT 3 +#define GICH_VMCR_FIQ_EN_MASK (1 << GICH_VMCR_FIQ_EN_SHIFT) +#define GICH_VMCR_CBPR_SHIFT 4 +#define GICH_VMCR_CBPR_MASK (1 << GICH_VMCR_CBPR_SHIFT) +#define GICH_VMCR_EOI_MODE_SHIFT 9 +#define GICH_VMCR_EOI_MODE_MASK (1 << GICH_VMCR_EOI_MODE_SHIFT) + +#define GICH_VMCR_PRIMASK_SHIFT 27 +#define GICH_VMCR_PRIMASK_MASK (0x1f << GICH_VMCR_PRIMASK_SHIFT) +#define GICH_VMCR_BINPOINT_SHIFT 21 +#define GICH_VMCR_BINPOINT_MASK (0x7 << GICH_VMCR_BINPOINT_SHIFT) +#define GICH_VMCR_ALIAS_BINPOINT_SHIFT 18 +#define GICH_VMCR_ALIAS_BINPOINT_MASK (0x7 << GICH_VMCR_ALIAS_BINPOINT_SHIFT) + +#define GICH_MISR_EOI (1 << 0) +#define GICH_MISR_U (1 << 1) + +#define GICV_PMR_PRIORITY_SHIFT 3 +#define GICV_PMR_PRIORITY_MASK (0x1f << GICV_PMR_PRIORITY_SHIFT) + +#ifndef __ASSEMBLY__ + +#include + +struct device_node; +struct gic_chip_data; + +void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); +int gic_cpu_if_down(unsigned int gic_nr); +void gic_cpu_save(struct gic_chip_data *gic); +void gic_cpu_restore(struct gic_chip_data *gic); +void gic_dist_save(struct gic_chip_data *gic); +void gic_dist_restore(struct gic_chip_data *gic); + +/* + * Subdrivers that need some preparatory work can initialize their + * chips and call this to register their GICs. + */ +int gic_of_init(struct device_node *node, struct device_node *parent); + +/* + * Initialises and registers a non-root or child GIC chip. Memory for + * the gic_chip_data structure is dynamically allocated. + */ +int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq); + +/* + * Legacy platforms not converted to DT yet must use this to init + * their GIC + */ +void gic_init(unsigned int nr, int start, + void __iomem *dist , void __iomem *cpu); + +int gicv2m_init(struct fwnode_handle *parent_handle, + struct irq_domain *parent); + +void gic_send_sgi(unsigned int cpu_id, unsigned int irq); +int gic_get_cpu_id(unsigned int cpu); +void gic_migrate_target(unsigned int new_cpu_id); +unsigned long gic_get_sgir_physaddr(void); + +#endif /* __ASSEMBLY */ +#endif diff --git a/include/linux/irqchip/arm-vic.h b/include/linux/irqchip/arm-vic.h new file mode 100644 index 000000000..ba46c794b --- /dev/null +++ b/include/linux/irqchip/arm-vic.h @@ -0,0 +1,38 @@ +/* + * arch/arm/include/asm/hardware/vic.h + * + * Copyright (c) ARM Limited 2003. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef __ASM_ARM_HARDWARE_VIC_H +#define __ASM_ARM_HARDWARE_VIC_H + +#include + +#define VIC_RAW_STATUS 0x08 +#define VIC_INT_ENABLE 0x10 /* 1 = enable, 0 = disable */ +#define VIC_INT_ENABLE_CLEAR 0x14 + +struct device_node; +struct pt_regs; + +void __vic_init(void __iomem *base, int parent_irq, int irq_start, + u32 vic_sources, u32 resume_sources, struct device_node *node); +void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources); +int vic_init_cascaded(void __iomem *base, unsigned int parent_irq, + u32 vic_sources, u32 resume_sources); + +#endif diff --git a/include/linux/irqchip/chained_irq.h b/include/linux/irqchip/chained_irq.h new file mode 100644 index 000000000..adf4c30f3 --- /dev/null +++ b/include/linux/irqchip/chained_irq.h @@ -0,0 +1,52 @@ +/* + * Chained IRQ handlers support. + * + * Copyright (C) 2011 ARM Ltd. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef __IRQCHIP_CHAINED_IRQ_H +#define __IRQCHIP_CHAINED_IRQ_H + +#include + +/* + * Entry/exit functions for chained handlers where the primary IRQ chip + * may implement either fasteoi or level-trigger flow control. + */ +static inline void chained_irq_enter(struct irq_chip *chip, + struct irq_desc *desc) +{ + /* FastEOI controllers require no action on entry. */ + if (chip->irq_eoi) + return; + + if (chip->irq_mask_ack) { + chip->irq_mask_ack(&desc->irq_data); + } else { + chip->irq_mask(&desc->irq_data); + if (chip->irq_ack) + chip->irq_ack(&desc->irq_data); + } +} + +static inline void chained_irq_exit(struct irq_chip *chip, + struct irq_desc *desc) +{ + if (chip->irq_eoi) + chip->irq_eoi(&desc->irq_data); + else + chip->irq_unmask(&desc->irq_data); +} + +#endif /* __IRQCHIP_CHAINED_IRQ_H */ diff --git a/include/linux/irqchip/ingenic.h b/include/linux/irqchip/ingenic.h new file mode 100644 index 000000000..0ee319a40 --- /dev/null +++ b/include/linux/irqchip/ingenic.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2010, Lars-Peter Clausen + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef __LINUX_IRQCHIP_INGENIC_H__ +#define __LINUX_IRQCHIP_INGENIC_H__ + +#include + +extern void ingenic_intc_irq_suspend(struct irq_data *data); +extern void ingenic_intc_irq_resume(struct irq_data *data); + +#endif diff --git a/include/linux/irqchip/irq-bcm2836.h b/include/linux/irqchip/irq-bcm2836.h new file mode 100644 index 000000000..218a6e1b1 --- /dev/null +++ b/include/linux/irqchip/irq-bcm2836.h @@ -0,0 +1,70 @@ +/* + * Root interrupt controller for the BCM2836 (Raspberry Pi 2). + * + * Copyright 2015 Broadcom + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define LOCAL_CONTROL 0x000 +#define LOCAL_PRESCALER 0x008 + +/* + * The low 2 bits identify the CPU that the GPU IRQ goes to, and the + * next 2 bits identify the CPU that the GPU FIQ goes to. + */ +#define LOCAL_GPU_ROUTING 0x00c +/* When setting bits 0-3, enables PMU interrupts on that CPU. */ +#define LOCAL_PM_ROUTING_SET 0x010 +/* When setting bits 0-3, disables PMU interrupts on that CPU. */ +#define LOCAL_PM_ROUTING_CLR 0x014 +/* + * The low 4 bits of this are the CPU's timer IRQ enables, and the + * next 4 bits are the CPU's timer FIQ enables (which override the IRQ + * bits). + */ +#define LOCAL_TIMER_INT_CONTROL0 0x040 +/* + * The low 4 bits of this are the CPU's per-mailbox IRQ enables, and + * the next 4 bits are the CPU's per-mailbox FIQ enables (which + * override the IRQ bits). + */ +#define LOCAL_MAILBOX_INT_CONTROL0 0x050 +/* + * The CPU's interrupt status register. Bits are defined by the the + * LOCAL_IRQ_* bits below. + */ +#define LOCAL_IRQ_PENDING0 0x060 +/* Same status bits as above, but for FIQ. */ +#define LOCAL_FIQ_PENDING0 0x070 +/* + * Mailbox write-to-set bits. There are 16 mailboxes, 4 per CPU, and + * these bits are organized by mailbox number and then CPU number. We + * use mailbox 0 for IPIs. The mailbox's interrupt is raised while + * any bit is set. + */ +#define LOCAL_MAILBOX0_SET0 0x080 +#define LOCAL_MAILBOX3_SET0 0x08c +/* Mailbox write-to-clear bits. */ +#define LOCAL_MAILBOX0_CLR0 0x0c0 +#define LOCAL_MAILBOX3_CLR0 0x0cc + +#define LOCAL_IRQ_CNTPSIRQ 0 +#define LOCAL_IRQ_CNTPNSIRQ 1 +#define LOCAL_IRQ_CNTHPIRQ 2 +#define LOCAL_IRQ_CNTVIRQ 3 +#define LOCAL_IRQ_MAILBOX0 4 +#define LOCAL_IRQ_MAILBOX1 5 +#define LOCAL_IRQ_MAILBOX2 6 +#define LOCAL_IRQ_MAILBOX3 7 +#define LOCAL_IRQ_GPU_FAST 8 +#define LOCAL_IRQ_PMU_FAST 9 +#define LAST_IRQ LOCAL_IRQ_PMU_FAST diff --git a/include/linux/irqchip/irq-omap-intc.h b/include/linux/irqchip/irq-omap-intc.h new file mode 100644 index 000000000..f19ccee77 --- /dev/null +++ b/include/linux/irqchip/irq-omap-intc.h @@ -0,0 +1,28 @@ +/** + * irq-omap-intc.h - INTC Idle Functions + * + * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com + * + * Author: Felipe Balbi + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 of + * the License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H +#define __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H + +int omap_irq_pending(void); +void omap_intc_save_context(void); +void omap_intc_restore_context(void); +void omap3_intc_suspend(void); +void omap3_intc_prepare_idle(void); +void omap3_intc_resume_idle(void); + +#endif /* __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H */ diff --git a/include/linux/irqchip/irq-partition-percpu.h b/include/linux/irqchip/irq-partition-percpu.h new file mode 100644 index 000000000..87433a5d1 --- /dev/null +++ b/include/linux/irqchip/irq-partition-percpu.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2016 ARM Limited, All Rights Reserved. + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +struct partition_affinity { + cpumask_t mask; + void *partition_id; +}; + +struct partition_desc; + +#ifdef CONFIG_PARTITION_PERCPU +int partition_translate_id(struct partition_desc *desc, void *partition_id); +struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode, + struct partition_affinity *parts, + int nr_parts, + int chained_irq, + const struct irq_domain_ops *ops); +struct irq_domain *partition_get_domain(struct partition_desc *dsc); +#else +static inline int partition_translate_id(struct partition_desc *desc, + void *partition_id) +{ + return -EINVAL; +} + +static inline +struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode, + struct partition_affinity *parts, + int nr_parts, + int chained_irq, + const struct irq_domain_ops *ops) +{ + return NULL; +} + +static inline +struct irq_domain *partition_get_domain(struct partition_desc *dsc) +{ + return NULL; +} +#endif diff --git a/include/linux/irqchip/irq-sa11x0.h b/include/linux/irqchip/irq-sa11x0.h new file mode 100644 index 000000000..15db6829c --- /dev/null +++ b/include/linux/irqchip/irq-sa11x0.h @@ -0,0 +1,17 @@ +/* + * Generic IRQ handling for the SA11x0. + * + * Copyright (C) 2015 Dmitry Eremin-Solenikov + * Copyright (C) 1999-2001 Nicolas Pitre + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __INCLUDE_LINUX_IRQCHIP_IRQ_SA11x0_H +#define __INCLUDE_LINUX_IRQCHIP_IRQ_SA11x0_H + +void __init sa11x0_init_irq_nodt(int irq_start, resource_size_t io_start); + +#endif diff --git a/include/linux/irqchip/mmp.h b/include/linux/irqchip/mmp.h new file mode 100644 index 000000000..cb8455c87 --- /dev/null +++ b/include/linux/irqchip/mmp.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __IRQCHIP_MMP_H +#define __IRQCHIP_MMP_H + +extern struct irq_chip icu_irq_chip; + +#endif /* __IRQCHIP_MMP_H */ diff --git a/include/linux/irqchip/mxs.h b/include/linux/irqchip/mxs.h new file mode 100644 index 000000000..9039a538a --- /dev/null +++ b/include/linux/irqchip/mxs.h @@ -0,0 +1,14 @@ +/* + * Copyright (C) 2013 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_IRQCHIP_MXS_H +#define __LINUX_IRQCHIP_MXS_H + +extern void icoll_handle_irq(struct pt_regs *); + +#endif diff --git a/include/linux/irqchip/versatile-fpga.h b/include/linux/irqchip/versatile-fpga.h new file mode 100644 index 000000000..a978fc8c7 --- /dev/null +++ b/include/linux/irqchip/versatile-fpga.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef PLAT_FPGA_IRQ_H +#define PLAT_FPGA_IRQ_H + +struct device_node; +struct pt_regs; + +void fpga_handle_irq(struct pt_regs *regs); +void fpga_irq_init(void __iomem *, const char *, int, int, u32, + struct device_node *node); +int fpga_irq_of_init(struct device_node *node, + struct device_node *parent); + +#endif diff --git a/include/linux/irqchip/xtensa-mx.h b/include/linux/irqchip/xtensa-mx.h new file mode 100644 index 000000000..9c3b6ecc8 --- /dev/null +++ b/include/linux/irqchip/xtensa-mx.h @@ -0,0 +1,17 @@ +/* + * Xtensa MX interrupt distributor + * + * Copyright (C) 2002 - 2013 Tensilica, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef __LINUX_IRQCHIP_XTENSA_MX_H +#define __LINUX_IRQCHIP_XTENSA_MX_H + +struct device_node; +int xtensa_mx_init_legacy(struct device_node *interrupt_parent); + +#endif /* __LINUX_IRQCHIP_XTENSA_MX_H */ diff --git a/include/linux/irqchip/xtensa-pic.h b/include/linux/irqchip/xtensa-pic.h new file mode 100644 index 000000000..48718ae5a --- /dev/null +++ b/include/linux/irqchip/xtensa-pic.h @@ -0,0 +1,18 @@ +/* + * Xtensa built-in interrupt controller + * + * Copyright (C) 2002 - 2013 Tensilica, Inc. + * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef __LINUX_IRQCHIP_XTENSA_PIC_H +#define __LINUX_IRQCHIP_XTENSA_PIC_H + +struct device_node; +int xtensa_pic_init_legacy(struct device_node *interrupt_parent); + +#endif /* __LINUX_IRQCHIP_XTENSA_PIC_H */ diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h new file mode 100644 index 000000000..875c41b23 --- /dev/null +++ b/include/linux/irqdesc.h @@ -0,0 +1,276 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_IRQDESC_H +#define _LINUX_IRQDESC_H + +#include +#include +#include + +/* + * Core internal functions to deal with irq descriptors + */ + +struct irq_affinity_notify; +struct proc_dir_entry; +struct module; +struct irq_desc; +struct irq_domain; +struct pt_regs; + +/** + * struct irq_desc - interrupt descriptor + * @irq_common_data: per irq and chip data passed down to chip functions + * @kstat_irqs: irq stats per cpu + * @handle_irq: highlevel irq-events handler + * @preflow_handler: handler called before the flow handler (currently used by sparc) + * @action: the irq action chain + * @status: status information + * @core_internal_state__do_not_mess_with_it: core internal status information + * @depth: disable-depth, for nested irq_disable() calls + * @wake_depth: enable depth, for multiple irq_set_irq_wake() callers + * @irq_count: stats field to detect stalled irqs + * @last_unhandled: aging timer for unhandled count + * @irqs_unhandled: stats field for spurious unhandled interrupts + * @threads_handled: stats field for deferred spurious detection of threaded handlers + * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers + * @lock: locking for SMP + * @affinity_hint: hint to user space for preferred irq affinity + * @affinity_notify: context for notification of affinity changes + * @pending_mask: pending rebalanced interrupts + * @threads_oneshot: bitfield to handle shared oneshot threads + * @threads_active: number of irqaction threads currently running + * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers + * @nr_actions: number of installed actions on this descriptor + * @no_suspend_depth: number of irqactions on a irq descriptor with + * IRQF_NO_SUSPEND set + * @force_resume_depth: number of irqactions on a irq descriptor with + * IRQF_FORCE_RESUME set + * @rcu: rcu head for delayed free + * @kobj: kobject used to represent this struct in sysfs + * @request_mutex: mutex to protect request/free before locking desc->lock + * @dir: /proc/irq/ procfs entry + * @debugfs_file: dentry for the debugfs file + * @name: flow handler name for /proc/interrupts output + */ +struct irq_desc { + struct irq_common_data irq_common_data; + struct irq_data irq_data; + unsigned int __percpu *kstat_irqs; + irq_flow_handler_t handle_irq; +#ifdef CONFIG_IRQ_PREFLOW_FASTEOI + irq_preflow_handler_t preflow_handler; +#endif + struct irqaction *action; /* IRQ action list */ + unsigned int status_use_accessors; + unsigned int core_internal_state__do_not_mess_with_it; + unsigned int depth; /* nested irq disables */ + unsigned int wake_depth; /* nested wake enables */ + unsigned int tot_count; + unsigned int irq_count; /* For detecting broken IRQs */ + unsigned long last_unhandled; /* Aging timer for unhandled count */ + unsigned int irqs_unhandled; + atomic_t threads_handled; + int threads_handled_last; + raw_spinlock_t lock; + struct cpumask *percpu_enabled; + const struct cpumask *percpu_affinity; +#ifdef CONFIG_SMP + const struct cpumask *affinity_hint; + struct irq_affinity_notify *affinity_notify; +#ifdef CONFIG_GENERIC_PENDING_IRQ + cpumask_var_t pending_mask; +#endif +#endif + unsigned long threads_oneshot; + atomic_t threads_active; + wait_queue_head_t wait_for_threads; +#ifdef CONFIG_PM_SLEEP + unsigned int nr_actions; + unsigned int no_suspend_depth; + unsigned int cond_suspend_depth; + unsigned int force_resume_depth; +#endif +#ifdef CONFIG_PROC_FS + struct proc_dir_entry *dir; +#endif +#ifdef CONFIG_GENERIC_IRQ_DEBUGFS + struct dentry *debugfs_file; + const char *dev_name; +#endif +#ifdef CONFIG_SPARSE_IRQ + struct rcu_head rcu; + struct kobject kobj; +#endif + struct mutex request_mutex; + int parent_irq; + struct module *owner; + const char *name; +} ____cacheline_internodealigned_in_smp; + +#ifdef CONFIG_SPARSE_IRQ +extern void irq_lock_sparse(void); +extern void irq_unlock_sparse(void); +#else +static inline void irq_lock_sparse(void) { } +static inline void irq_unlock_sparse(void) { } +extern struct irq_desc irq_desc[NR_IRQS]; +#endif + +static inline struct irq_desc *irq_data_to_desc(struct irq_data *data) +{ + return container_of(data->common, struct irq_desc, irq_common_data); +} + +static inline unsigned int irq_desc_get_irq(struct irq_desc *desc) +{ + return desc->irq_data.irq; +} + +static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc) +{ + return &desc->irq_data; +} + +static inline struct irq_chip *irq_desc_get_chip(struct irq_desc *desc) +{ + return desc->irq_data.chip; +} + +static inline void *irq_desc_get_chip_data(struct irq_desc *desc) +{ + return desc->irq_data.chip_data; +} + +static inline void *irq_desc_get_handler_data(struct irq_desc *desc) +{ + return desc->irq_common_data.handler_data; +} + +/* + * Architectures call this to let the generic IRQ layer + * handle an interrupt. + */ +static inline void generic_handle_irq_desc(struct irq_desc *desc) +{ + desc->handle_irq(desc); +} + +int generic_handle_irq(unsigned int irq); + +#ifdef CONFIG_HANDLE_DOMAIN_IRQ +/* + * Convert a HW interrupt number to a logical one using a IRQ domain, + * and handle the result interrupt number. Return -EINVAL if + * conversion failed. Providing a NULL domain indicates that the + * conversion has already been done. + */ +int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq, + bool lookup, struct pt_regs *regs); + +static inline int handle_domain_irq(struct irq_domain *domain, + unsigned int hwirq, struct pt_regs *regs) +{ + return __handle_domain_irq(domain, hwirq, true, regs); +} +#endif + +/* Test to see if a driver has successfully requested an irq */ +static inline int irq_desc_has_action(struct irq_desc *desc) +{ + return desc->action != NULL; +} + +static inline int irq_has_action(unsigned int irq) +{ + return irq_desc_has_action(irq_to_desc(irq)); +} + +/** + * irq_set_handler_locked - Set irq handler from a locked region + * @data: Pointer to the irq_data structure which identifies the irq + * @handler: Flow control handler function for this interrupt + * + * Sets the handler in the irq descriptor associated to @data. + * + * Must be called with irq_desc locked and valid parameters. Typical + * call site is the irq_set_type() callback. + */ +static inline void irq_set_handler_locked(struct irq_data *data, + irq_flow_handler_t handler) +{ + struct irq_desc *desc = irq_data_to_desc(data); + + desc->handle_irq = handler; +} + +/** + * irq_set_chip_handler_name_locked - Set chip, handler and name from a locked region + * @data: Pointer to the irq_data structure for which the chip is set + * @chip: Pointer to the new irq chip + * @handler: Flow control handler function for this interrupt + * @name: Name of the interrupt + * + * Replace the irq chip at the proper hierarchy level in @data and + * sets the handler and name in the associated irq descriptor. + * + * Must be called with irq_desc locked and valid parameters. + */ +static inline void +irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip, + irq_flow_handler_t handler, const char *name) +{ + struct irq_desc *desc = irq_data_to_desc(data); + + desc->handle_irq = handler; + desc->name = name; + data->chip = chip; +} + +static inline bool irq_balancing_disabled(unsigned int irq) +{ + struct irq_desc *desc; + + desc = irq_to_desc(irq); + return desc->status_use_accessors & IRQ_NO_BALANCING_MASK; +} + +static inline bool irq_is_percpu(unsigned int irq) +{ + struct irq_desc *desc; + + desc = irq_to_desc(irq); + return desc->status_use_accessors & IRQ_PER_CPU; +} + +static inline bool irq_is_percpu_devid(unsigned int irq) +{ + struct irq_desc *desc; + + desc = irq_to_desc(irq); + return desc->status_use_accessors & IRQ_PER_CPU_DEVID; +} + +static inline void +irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class, + struct lock_class_key *request_class) +{ + struct irq_desc *desc = irq_to_desc(irq); + + if (desc) { + lockdep_set_class(&desc->lock, lock_class); + lockdep_set_class(&desc->request_mutex, request_class); + } +} + +#ifdef CONFIG_IRQ_PREFLOW_FASTEOI +static inline void +__irq_set_preflow_handler(unsigned int irq, irq_preflow_handler_t handler) +{ + struct irq_desc *desc; + + desc = irq_to_desc(irq); + desc->preflow_handler = handler; +} +#endif + +#endif diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h new file mode 100644 index 000000000..092445543 --- /dev/null +++ b/include/linux/irqdomain.h @@ -0,0 +1,589 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * irq_domain - IRQ translation domains + * + * Translation infrastructure between hw and linux irq numbers. This is + * helpful for interrupt controllers to implement mapping between hardware + * irq numbers and the Linux irq number space. + * + * irq_domains also have hooks for translating device tree or other + * firmware interrupt representations into a hardware irq number that + * can be mapped back to a Linux irq number without any extra platform + * support code. + * + * Interrupt controller "domain" data structure. This could be defined as a + * irq domain controller. That is, it handles the mapping between hardware + * and virtual interrupt numbers for a given interrupt domain. The domain + * structure is generally created by the PIC code for a given PIC instance + * (though a domain can cover more than one PIC if they have a flat number + * model). It's the domain callbacks that are responsible for setting the + * irq_chip on a given irq_desc after it's been mapped. + * + * The host code and data structures use a fwnode_handle pointer to + * identify the domain. In some cases, and in order to preserve source + * code compatibility, this fwnode pointer is "upgraded" to a DT + * device_node. For those firmware infrastructures that do not provide + * a unique identifier for an interrupt controller, the irq_domain + * code offers a fwnode allocator. + */ + +#ifndef _LINUX_IRQDOMAIN_H +#define _LINUX_IRQDOMAIN_H + +#include +#include +#include +#include +#include + +struct device_node; +struct irq_domain; +struct of_device_id; +struct irq_chip; +struct irq_data; +struct cpumask; +struct seq_file; + +/* Number of irqs reserved for a legacy isa controller */ +#define NUM_ISA_INTERRUPTS 16 + +#define IRQ_DOMAIN_IRQ_SPEC_PARAMS 16 + +/** + * struct irq_fwspec - generic IRQ specifier structure + * + * @fwnode: Pointer to a firmware-specific descriptor + * @param_count: Number of device-specific parameters + * @param: Device-specific parameters + * + * This structure, directly modeled after of_phandle_args, is used to + * pass a device-specific description of an interrupt. + */ +struct irq_fwspec { + struct fwnode_handle *fwnode; + int param_count; + u32 param[IRQ_DOMAIN_IRQ_SPEC_PARAMS]; +}; + +/* + * Should several domains have the same device node, but serve + * different purposes (for example one domain is for PCI/MSI, and the + * other for wired IRQs), they can be distinguished using a + * bus-specific token. Most domains are expected to only carry + * DOMAIN_BUS_ANY. + */ +enum irq_domain_bus_token { + DOMAIN_BUS_ANY = 0, + DOMAIN_BUS_WIRED, + DOMAIN_BUS_PCI_MSI, + DOMAIN_BUS_PLATFORM_MSI, + DOMAIN_BUS_NEXUS, + DOMAIN_BUS_IPI, + DOMAIN_BUS_FSL_MC_MSI, +}; + +/** + * struct irq_domain_ops - Methods for irq_domain objects + * @match: Match an interrupt controller device node to a host, returns + * 1 on a match + * @map: Create or update a mapping between a virtual irq number and a hw + * irq number. This is called only once for a given mapping. + * @unmap: Dispose of such a mapping + * @xlate: Given a device tree node and interrupt specifier, decode + * the hardware irq number and linux irq type value. + * + * Functions below are provided by the driver and called whenever a new mapping + * is created or an old mapping is disposed. The driver can then proceed to + * whatever internal data structures management is required. It also needs + * to setup the irq_desc when returning from map(). + */ +struct irq_domain_ops { + int (*match)(struct irq_domain *d, struct device_node *node, + enum irq_domain_bus_token bus_token); + int (*select)(struct irq_domain *d, struct irq_fwspec *fwspec, + enum irq_domain_bus_token bus_token); + int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw); + void (*unmap)(struct irq_domain *d, unsigned int virq); + int (*xlate)(struct irq_domain *d, struct device_node *node, + const u32 *intspec, unsigned int intsize, + unsigned long *out_hwirq, unsigned int *out_type); +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY + /* extended V2 interfaces to support hierarchy irq_domains */ + int (*alloc)(struct irq_domain *d, unsigned int virq, + unsigned int nr_irqs, void *arg); + void (*free)(struct irq_domain *d, unsigned int virq, + unsigned int nr_irqs); + int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool reserve); + void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data); + int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec, + unsigned long *out_hwirq, unsigned int *out_type); +#endif +#ifdef CONFIG_GENERIC_IRQ_DEBUGFS + void (*debug_show)(struct seq_file *m, struct irq_domain *d, + struct irq_data *irqd, int ind); +#endif +}; + +extern struct irq_domain_ops irq_generic_chip_ops; + +struct irq_domain_chip_generic; + +/** + * struct irq_domain - Hardware interrupt number translation object + * @link: Element in global irq_domain list. + * @name: Name of interrupt domain + * @ops: pointer to irq_domain methods + * @host_data: private data pointer for use by owner. Not touched by irq_domain + * core code. + * @flags: host per irq_domain flags + * @mapcount: The number of mapped interrupts + * + * Optional elements + * @fwnode: Pointer to firmware node associated with the irq_domain. Pretty easy + * to swap it for the of_node via the irq_domain_get_of_node accessor + * @gc: Pointer to a list of generic chips. There is a helper function for + * setting up one or more generic chips for interrupt controllers + * drivers using the generic chip library which uses this pointer. + * @parent: Pointer to parent irq_domain to support hierarchy irq_domains + * @debugfs_file: dentry for the domain debugfs file + * + * Revmap data, used internally by irq_domain + * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that + * support direct mapping + * @revmap_size: Size of the linear map table @linear_revmap[] + * @revmap_tree: Radix map tree for hwirqs that don't fit in the linear map + * @linear_revmap: Linear table of hwirq->virq reverse mappings + */ +struct irq_domain { + struct list_head link; + const char *name; + const struct irq_domain_ops *ops; + void *host_data; + unsigned int flags; + unsigned int mapcount; + + /* Optional data */ + struct fwnode_handle *fwnode; + enum irq_domain_bus_token bus_token; + struct irq_domain_chip_generic *gc; +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY + struct irq_domain *parent; +#endif +#ifdef CONFIG_GENERIC_IRQ_DEBUGFS + struct dentry *debugfs_file; +#endif + + /* reverse map data. The linear map gets appended to the irq_domain */ + irq_hw_number_t hwirq_max; + unsigned int revmap_direct_max_irq; + unsigned int revmap_size; + struct radix_tree_root revmap_tree; + struct mutex revmap_tree_mutex; + unsigned int linear_revmap[]; +}; + +/* Irq domain flags */ +enum { + /* Irq domain is hierarchical */ + IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0), + + /* Irq domain name was allocated in __irq_domain_add() */ + IRQ_DOMAIN_NAME_ALLOCATED = (1 << 1), + + /* Irq domain is an IPI domain with virq per cpu */ + IRQ_DOMAIN_FLAG_IPI_PER_CPU = (1 << 2), + + /* Irq domain is an IPI domain with single virq */ + IRQ_DOMAIN_FLAG_IPI_SINGLE = (1 << 3), + + /* Irq domain implements MSIs */ + IRQ_DOMAIN_FLAG_MSI = (1 << 4), + + /* Irq domain implements MSI remapping */ + IRQ_DOMAIN_FLAG_MSI_REMAP = (1 << 5), + + /* + * Quirk to handle MSI implementations which do not provide + * masking. Currently known to affect x86, but partially + * handled in core code. + */ + IRQ_DOMAIN_MSI_NOMASK_QUIRK = (1 << 6), + + /* + * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved + * for implementation specific purposes and ignored by the + * core code. + */ + IRQ_DOMAIN_FLAG_NONCORE = (1 << 16), +}; + +static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d) +{ + return to_of_node(d->fwnode); +} + +#ifdef CONFIG_IRQ_DOMAIN +struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id, + const char *name, void *data); + +enum { + IRQCHIP_FWNODE_REAL, + IRQCHIP_FWNODE_NAMED, + IRQCHIP_FWNODE_NAMED_ID, +}; + +static inline +struct fwnode_handle *irq_domain_alloc_named_fwnode(const char *name) +{ + return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED, 0, name, NULL); +} + +static inline +struct fwnode_handle *irq_domain_alloc_named_id_fwnode(const char *name, int id) +{ + return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED_ID, id, name, + NULL); +} + +static inline struct fwnode_handle *irq_domain_alloc_fwnode(void *data) +{ + return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_REAL, 0, NULL, data); +} + +void irq_domain_free_fwnode(struct fwnode_handle *fwnode); +struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size, + irq_hw_number_t hwirq_max, int direct_max, + const struct irq_domain_ops *ops, + void *host_data); +struct irq_domain *irq_domain_add_simple(struct device_node *of_node, + unsigned int size, + unsigned int first_irq, + const struct irq_domain_ops *ops, + void *host_data); +struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, + unsigned int size, + unsigned int first_irq, + irq_hw_number_t first_hwirq, + const struct irq_domain_ops *ops, + void *host_data); +extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, + enum irq_domain_bus_token bus_token); +extern bool irq_domain_check_msi_remap(void); +extern void irq_set_default_host(struct irq_domain *host); +extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, + irq_hw_number_t hwirq, int node, + const struct cpumask *affinity); + +static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node) +{ + return node ? &node->fwnode : NULL; +} + +extern const struct fwnode_operations irqchip_fwnode_ops; + +static inline bool is_fwnode_irqchip(struct fwnode_handle *fwnode) +{ + return fwnode && fwnode->ops == &irqchip_fwnode_ops; +} + +extern void irq_domain_update_bus_token(struct irq_domain *domain, + enum irq_domain_bus_token bus_token); + +static inline +struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode, + enum irq_domain_bus_token bus_token) +{ + struct irq_fwspec fwspec = { + .fwnode = fwnode, + }; + + return irq_find_matching_fwspec(&fwspec, bus_token); +} + +static inline struct irq_domain *irq_find_matching_host(struct device_node *node, + enum irq_domain_bus_token bus_token) +{ + return irq_find_matching_fwnode(of_node_to_fwnode(node), bus_token); +} + +static inline struct irq_domain *irq_find_host(struct device_node *node) +{ + struct irq_domain *d; + + d = irq_find_matching_host(node, DOMAIN_BUS_WIRED); + if (!d) + d = irq_find_matching_host(node, DOMAIN_BUS_ANY); + + return d; +} + +/** + * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain. + * @of_node: pointer to interrupt controller's device tree node. + * @size: Number of interrupts in the domain. + * @ops: map/unmap domain callbacks + * @host_data: Controller private data pointer + */ +static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_node, + unsigned int size, + const struct irq_domain_ops *ops, + void *host_data) +{ + return __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data); +} +static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, + unsigned int max_irq, + const struct irq_domain_ops *ops, + void *host_data) +{ + return __irq_domain_add(of_node_to_fwnode(of_node), 0, max_irq, max_irq, ops, host_data); +} +static inline struct irq_domain *irq_domain_add_legacy_isa( + struct device_node *of_node, + const struct irq_domain_ops *ops, + void *host_data) +{ + return irq_domain_add_legacy(of_node, NUM_ISA_INTERRUPTS, 0, 0, ops, + host_data); +} +static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node, + const struct irq_domain_ops *ops, + void *host_data) +{ + return __irq_domain_add(of_node_to_fwnode(of_node), 0, ~0, 0, ops, host_data); +} + +static inline struct irq_domain *irq_domain_create_linear(struct fwnode_handle *fwnode, + unsigned int size, + const struct irq_domain_ops *ops, + void *host_data) +{ + return __irq_domain_add(fwnode, size, size, 0, ops, host_data); +} + +static inline struct irq_domain *irq_domain_create_tree(struct fwnode_handle *fwnode, + const struct irq_domain_ops *ops, + void *host_data) +{ + return __irq_domain_add(fwnode, 0, ~0, 0, ops, host_data); +} + +extern void irq_domain_remove(struct irq_domain *host); + +extern int irq_domain_associate(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq); +extern void irq_domain_associate_many(struct irq_domain *domain, + unsigned int irq_base, + irq_hw_number_t hwirq_base, int count); +extern void irq_domain_disassociate(struct irq_domain *domain, + unsigned int irq); + +extern unsigned int irq_create_mapping(struct irq_domain *host, + irq_hw_number_t hwirq); +extern unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec); +extern void irq_dispose_mapping(unsigned int virq); + +/** + * irq_linear_revmap() - Find a linux irq from a hw irq number. + * @domain: domain owning this hardware interrupt + * @hwirq: hardware irq number in that domain space + * + * This is a fast path alternative to irq_find_mapping() that can be + * called directly by irq controller code to save a handful of + * instructions. It is always safe to call, but won't find irqs mapped + * using the radix tree. + */ +static inline unsigned int irq_linear_revmap(struct irq_domain *domain, + irq_hw_number_t hwirq) +{ + return hwirq < domain->revmap_size ? domain->linear_revmap[hwirq] : 0; +} +extern unsigned int irq_find_mapping(struct irq_domain *host, + irq_hw_number_t hwirq); +extern unsigned int irq_create_direct_mapping(struct irq_domain *host); +extern int irq_create_strict_mappings(struct irq_domain *domain, + unsigned int irq_base, + irq_hw_number_t hwirq_base, int count); + +static inline int irq_create_identity_mapping(struct irq_domain *host, + irq_hw_number_t hwirq) +{ + return irq_create_strict_mappings(host, hwirq, hwirq, 1); +} + +extern const struct irq_domain_ops irq_domain_simple_ops; + +/* stock xlate functions */ +int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr, + const u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, unsigned int *out_type); +int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr, + const u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, unsigned int *out_type); +int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr, + const u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, unsigned int *out_type); + +/* IPI functions */ +int irq_reserve_ipi(struct irq_domain *domain, const struct cpumask *dest); +int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest); + +/* V2 interfaces to support hierarchy IRQ domains. */ +extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, + unsigned int virq); +extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, + irq_hw_number_t hwirq, struct irq_chip *chip, + void *chip_data, irq_flow_handler_t handler, + void *handler_data, const char *handler_name); +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY +extern struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent, + unsigned int flags, unsigned int size, + struct fwnode_handle *fwnode, + const struct irq_domain_ops *ops, void *host_data); + +static inline struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent, + unsigned int flags, + unsigned int size, + struct device_node *node, + const struct irq_domain_ops *ops, + void *host_data) +{ + return irq_domain_create_hierarchy(parent, flags, size, + of_node_to_fwnode(node), + ops, host_data); +} + +extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, + unsigned int nr_irqs, int node, void *arg, + bool realloc, const struct cpumask *affinity); +extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs); +extern int irq_domain_activate_irq(struct irq_data *irq_data, bool early); +extern void irq_domain_deactivate_irq(struct irq_data *irq_data); + +static inline int irq_domain_alloc_irqs(struct irq_domain *domain, + unsigned int nr_irqs, int node, void *arg) +{ + return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false, + NULL); +} + +extern int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain, + unsigned int irq_base, + unsigned int nr_irqs, void *arg); +extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, + unsigned int virq, + irq_hw_number_t hwirq, + struct irq_chip *chip, + void *chip_data); +extern void irq_domain_reset_irq_data(struct irq_data *irq_data); +extern void irq_domain_free_irqs_common(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs); +extern void irq_domain_free_irqs_top(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs); + +extern int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg); +extern int irq_domain_pop_irq(struct irq_domain *domain, int virq); + +extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain, + unsigned int irq_base, + unsigned int nr_irqs, void *arg); + +extern void irq_domain_free_irqs_parent(struct irq_domain *domain, + unsigned int irq_base, + unsigned int nr_irqs); + +static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) +{ + return domain->flags & IRQ_DOMAIN_FLAG_HIERARCHY; +} + +static inline bool irq_domain_is_ipi(struct irq_domain *domain) +{ + return domain->flags & + (IRQ_DOMAIN_FLAG_IPI_PER_CPU | IRQ_DOMAIN_FLAG_IPI_SINGLE); +} + +static inline bool irq_domain_is_ipi_per_cpu(struct irq_domain *domain) +{ + return domain->flags & IRQ_DOMAIN_FLAG_IPI_PER_CPU; +} + +static inline bool irq_domain_is_ipi_single(struct irq_domain *domain) +{ + return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE; +} + +static inline bool irq_domain_is_msi(struct irq_domain *domain) +{ + return domain->flags & IRQ_DOMAIN_FLAG_MSI; +} + +static inline bool irq_domain_is_msi_remap(struct irq_domain *domain) +{ + return domain->flags & IRQ_DOMAIN_FLAG_MSI_REMAP; +} + +extern bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain); + +#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ +static inline int irq_domain_alloc_irqs(struct irq_domain *domain, + unsigned int nr_irqs, int node, void *arg) +{ + return -1; +} + +static inline void irq_domain_free_irqs(unsigned int virq, + unsigned int nr_irqs) { } + +static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) +{ + return false; +} + +static inline bool irq_domain_is_ipi(struct irq_domain *domain) +{ + return false; +} + +static inline bool irq_domain_is_ipi_per_cpu(struct irq_domain *domain) +{ + return false; +} + +static inline bool irq_domain_is_ipi_single(struct irq_domain *domain) +{ + return false; +} + +static inline bool irq_domain_is_msi(struct irq_domain *domain) +{ + return false; +} + +static inline bool irq_domain_is_msi_remap(struct irq_domain *domain) +{ + return false; +} + +static inline bool +irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain) +{ + return false; +} +#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ + +#else /* CONFIG_IRQ_DOMAIN */ +static inline void irq_dispose_mapping(unsigned int virq) { } +static inline struct irq_domain *irq_find_matching_fwnode( + struct fwnode_handle *fwnode, enum irq_domain_bus_token bus_token) +{ + return NULL; +} +static inline bool irq_domain_check_msi_remap(void) +{ + return false; +} +#endif /* !CONFIG_IRQ_DOMAIN */ + +#endif /* _LINUX_IRQDOMAIN_H */ diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h new file mode 100644 index 000000000..21619c92c --- /dev/null +++ b/include/linux/irqflags.h @@ -0,0 +1,171 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/irqflags.h + * + * IRQ flags tracing: follow the state of the hardirq and softirq flags and + * provide callbacks for transitions between ON and OFF states. + * + * This file gets included from lowlevel asm headers too, to provide + * wrapped versions of the local_irq_*() APIs, based on the + * raw_local_irq_*() macros from the lowlevel headers. + */ +#ifndef _LINUX_TRACE_IRQFLAGS_H +#define _LINUX_TRACE_IRQFLAGS_H + +#include +#include + +/* Currently trace_softirqs_on/off is used only by lockdep */ +#ifdef CONFIG_PROVE_LOCKING + extern void trace_softirqs_on(unsigned long ip); + extern void trace_softirqs_off(unsigned long ip); + extern void lockdep_hardirqs_on(unsigned long ip); + extern void lockdep_hardirqs_off(unsigned long ip); +#else + static inline void trace_softirqs_on(unsigned long ip) { } + static inline void trace_softirqs_off(unsigned long ip) { } + static inline void lockdep_hardirqs_on(unsigned long ip) { } + static inline void lockdep_hardirqs_off(unsigned long ip) { } +#endif + +#ifdef CONFIG_TRACE_IRQFLAGS + extern void trace_hardirqs_on(void); + extern void trace_hardirqs_off(void); +# define trace_hardirq_context(p) ((p)->hardirq_context) +# define trace_softirq_context(p) ((p)->softirq_context) +# define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled) +# define trace_softirqs_enabled(p) ((p)->softirqs_enabled) +# define trace_hardirq_enter() \ +do { \ + current->hardirq_context++; \ +} while (0) +# define trace_hardirq_exit() \ +do { \ + current->hardirq_context--; \ +} while (0) +# define lockdep_softirq_enter() \ +do { \ + current->softirq_context++; \ +} while (0) +# define lockdep_softirq_exit() \ +do { \ + current->softirq_context--; \ +} while (0) +#else +# define trace_hardirqs_on() do { } while (0) +# define trace_hardirqs_off() do { } while (0) +# define trace_hardirq_context(p) 0 +# define trace_softirq_context(p) 0 +# define trace_hardirqs_enabled(p) 0 +# define trace_softirqs_enabled(p) 0 +# define trace_hardirq_enter() do { } while (0) +# define trace_hardirq_exit() do { } while (0) +# define lockdep_softirq_enter() do { } while (0) +# define lockdep_softirq_exit() do { } while (0) +#endif + +#if defined(CONFIG_IRQSOFF_TRACER) || \ + defined(CONFIG_PREEMPT_TRACER) + extern void stop_critical_timings(void); + extern void start_critical_timings(void); +#else +# define stop_critical_timings() do { } while (0) +# define start_critical_timings() do { } while (0) +#endif + +/* + * Wrap the arch provided IRQ routines to provide appropriate checks. + */ +#define raw_local_irq_disable() arch_local_irq_disable() +#define raw_local_irq_enable() arch_local_irq_enable() +#define raw_local_irq_save(flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = arch_local_irq_save(); \ + } while (0) +#define raw_local_irq_restore(flags) \ + do { \ + typecheck(unsigned long, flags); \ + arch_local_irq_restore(flags); \ + } while (0) +#define raw_local_save_flags(flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = arch_local_save_flags(); \ + } while (0) +#define raw_irqs_disabled_flags(flags) \ + ({ \ + typecheck(unsigned long, flags); \ + arch_irqs_disabled_flags(flags); \ + }) +#define raw_irqs_disabled() (arch_irqs_disabled()) +#define raw_safe_halt() arch_safe_halt() + +/* + * The local_irq_*() APIs are equal to the raw_local_irq*() + * if !TRACE_IRQFLAGS. + */ +#ifdef CONFIG_TRACE_IRQFLAGS +#define local_irq_enable() \ + do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0) +#define local_irq_disable() \ + do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0) +#define local_irq_save(flags) \ + do { \ + raw_local_irq_save(flags); \ + trace_hardirqs_off(); \ + } while (0) + + +#define local_irq_restore(flags) \ + do { \ + if (raw_irqs_disabled_flags(flags)) { \ + raw_local_irq_restore(flags); \ + trace_hardirqs_off(); \ + } else { \ + trace_hardirqs_on(); \ + raw_local_irq_restore(flags); \ + } \ + } while (0) + +#define safe_halt() \ + do { \ + trace_hardirqs_on(); \ + raw_safe_halt(); \ + } while (0) + + +#else /* !CONFIG_TRACE_IRQFLAGS */ + +#define local_irq_enable() do { raw_local_irq_enable(); } while (0) +#define local_irq_disable() do { raw_local_irq_disable(); } while (0) +#define local_irq_save(flags) \ + do { \ + raw_local_irq_save(flags); \ + } while (0) +#define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0) +#define safe_halt() do { raw_safe_halt(); } while (0) + +#endif /* CONFIG_TRACE_IRQFLAGS */ + +#define local_save_flags(flags) raw_local_save_flags(flags) + +/* + * Some architectures don't define arch_irqs_disabled(), so even if either + * definition would be fine we need to use different ones for the time being + * to avoid build issues. + */ +#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT +#define irqs_disabled() \ + ({ \ + unsigned long _flags; \ + raw_local_save_flags(_flags); \ + raw_irqs_disabled_flags(_flags); \ + }) +#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */ +#define irqs_disabled() raw_irqs_disabled() +#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ + +#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags) + +#endif diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h new file mode 100644 index 000000000..1e6f4e712 --- /dev/null +++ b/include/linux/irqhandler.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_IRQHANDLER_H +#define _LINUX_IRQHANDLER_H + +/* + * Interrupt flow handler typedefs are defined here to avoid circular + * include dependencies. + */ + +struct irq_desc; +struct irq_data; +typedef void (*irq_flow_handler_t)(struct irq_desc *desc); +typedef void (*irq_preflow_handler_t)(struct irq_data *data); + +#endif diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h new file mode 100644 index 000000000..3496baa0b --- /dev/null +++ b/include/linux/irqnr.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_IRQNR_H +#define _LINUX_IRQNR_H + +#include + + +extern int nr_irqs; +extern struct irq_desc *irq_to_desc(unsigned int irq); +unsigned int irq_get_next_irq(unsigned int offset); + +# define for_each_irq_desc(irq, desc) \ + for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \ + irq++, desc = irq_to_desc(irq)) \ + if (!desc) \ + ; \ + else + + +# define for_each_irq_desc_reverse(irq, desc) \ + for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; \ + irq--, desc = irq_to_desc(irq)) \ + if (!desc) \ + ; \ + else + +# define for_each_active_irq(irq) \ + for (irq = irq_get_next_irq(0); irq < nr_irqs; \ + irq = irq_get_next_irq(irq + 1)) + +#define for_each_irq_nr(irq) \ + for (irq = 0; irq < nr_irqs; irq++) + +#endif diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h new file mode 100644 index 000000000..bd4c066ad --- /dev/null +++ b/include/linux/irqreturn.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_IRQRETURN_H +#define _LINUX_IRQRETURN_H + +/** + * enum irqreturn + * @IRQ_NONE interrupt was not from this device or was not handled + * @IRQ_HANDLED interrupt was handled by this device + * @IRQ_WAKE_THREAD handler requests to wake the handler thread + */ +enum irqreturn { + IRQ_NONE = (0 << 0), + IRQ_HANDLED = (1 << 0), + IRQ_WAKE_THREAD = (1 << 1), +}; + +typedef enum irqreturn irqreturn_t; +#define IRQ_RETVAL(x) ((x) ? IRQ_HANDLED : IRQ_NONE) + +#endif diff --git a/include/linux/isa.h b/include/linux/isa.h new file mode 100644 index 000000000..41336da0f --- /dev/null +++ b/include/linux/isa.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ISA bus. + */ + +#ifndef __LINUX_ISA_H +#define __LINUX_ISA_H + +#include +#include +#include + +struct isa_driver { + int (*match)(struct device *, unsigned int); + int (*probe)(struct device *, unsigned int); + int (*remove)(struct device *, unsigned int); + void (*shutdown)(struct device *, unsigned int); + int (*suspend)(struct device *, unsigned int, pm_message_t); + int (*resume)(struct device *, unsigned int); + + struct device_driver driver; + struct device *devices; +}; + +#define to_isa_driver(x) container_of((x), struct isa_driver, driver) + +#ifdef CONFIG_ISA_BUS_API +int isa_register_driver(struct isa_driver *, unsigned int); +void isa_unregister_driver(struct isa_driver *); +#else +static inline int isa_register_driver(struct isa_driver *d, unsigned int i) +{ + return -ENODEV; +} + +static inline void isa_unregister_driver(struct isa_driver *d) +{ +} +#endif + +/** + * module_isa_driver() - Helper macro for registering a ISA driver + * @__isa_driver: isa_driver struct + * @__num_isa_dev: number of devices to register + * + * Helper macro for ISA drivers which do not do anything special in module + * init/exit. This eliminates a lot of boilerplate code. Each module may only + * use this macro once, and calling it replaces module_init and module_exit. + */ +#define module_isa_driver(__isa_driver, __num_isa_dev) \ +static int __init __isa_driver##_init(void) \ +{ \ + return isa_register_driver(&(__isa_driver), __num_isa_dev); \ +} \ +module_init(__isa_driver##_init); \ +static void __exit __isa_driver##_exit(void) \ +{ \ + isa_unregister_driver(&(__isa_driver)); \ +} \ +module_exit(__isa_driver##_exit); + +/** + * max_num_isa_dev() - Maximum possible number registered of an ISA device + * @__ida_dev_ext: ISA device address extent + * + * The highest base address possible for an ISA device is 0x3FF; this results in + * 1024 possible base addresses. Dividing the number of possible base addresses + * by the address extent taken by each device results in the maximum number of + * devices on a system. + */ +#define max_num_isa_dev(__isa_dev_ext) (1024 / __isa_dev_ext) + +#endif /* __LINUX_ISA_H */ diff --git a/include/linux/isapnp.h b/include/linux/isapnp.h new file mode 100644 index 000000000..3c77bf9b1 --- /dev/null +++ b/include/linux/isapnp.h @@ -0,0 +1,121 @@ +/* + * ISA Plug & Play support + * Copyright (c) by Jaroslav Kysela + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef LINUX_ISAPNP_H +#define LINUX_ISAPNP_H + +#include +#include + +/* + * + */ + +#define ISAPNP_VENDOR(a,b,c) (((((a)-'A'+1)&0x3f)<<2)|\ + ((((b)-'A'+1)&0x18)>>3)|((((b)-'A'+1)&7)<<13)|\ + ((((c)-'A'+1)&0x1f)<<8)) +#define ISAPNP_DEVICE(x) ((((x)&0xf000)>>8)|\ + (((x)&0x0f00)>>8)|\ + (((x)&0x00f0)<<8)|\ + (((x)&0x000f)<<8)) +#define ISAPNP_FUNCTION(x) ISAPNP_DEVICE(x) + +/* + * + */ + +#ifdef __KERNEL__ +#include + +#define DEVICE_COUNT_COMPATIBLE 4 + +#define ISAPNP_CARD_DEVS 8 + +#define ISAPNP_CARD_ID(_va, _vb, _vc, _device) \ + .card_vendor = ISAPNP_VENDOR(_va, _vb, _vc), .card_device = ISAPNP_DEVICE(_device) +#define ISAPNP_CARD_END \ + .card_vendor = 0, .card_device = 0 +#define ISAPNP_DEVICE_ID(_va, _vb, _vc, _function) \ + { .vendor = ISAPNP_VENDOR(_va, _vb, _vc), .function = ISAPNP_FUNCTION(_function) } + +struct isapnp_card_id { + unsigned long driver_data; /* data private to the driver */ + unsigned short card_vendor, card_device; + struct { + unsigned short vendor, function; + } devs[ISAPNP_CARD_DEVS]; /* logical devices */ +}; + +#define ISAPNP_DEVICE_SINGLE(_cva, _cvb, _cvc, _cdevice, _dva, _dvb, _dvc, _dfunction) \ + .card_vendor = ISAPNP_VENDOR(_cva, _cvb, _cvc), .card_device = ISAPNP_DEVICE(_cdevice), \ + .vendor = ISAPNP_VENDOR(_dva, _dvb, _dvc), .function = ISAPNP_FUNCTION(_dfunction) +#define ISAPNP_DEVICE_SINGLE_END \ + .card_vendor = 0, .card_device = 0 + +#if defined(CONFIG_ISAPNP) || (defined(CONFIG_ISAPNP_MODULE) && defined(MODULE)) + +#define __ISAPNP__ + +/* lowlevel configuration */ +int isapnp_present(void); +int isapnp_cfg_begin(int csn, int device); +int isapnp_cfg_end(void); +unsigned char isapnp_read_byte(unsigned char idx); +void isapnp_write_byte(unsigned char idx, unsigned char val); + +#ifdef CONFIG_PROC_FS +int isapnp_proc_init(void); +int isapnp_proc_done(void); +#else +static inline int isapnp_proc_init(void) { return 0; } +static inline int isapnp_proc_done(void) { return 0; } +#endif + +/* compat */ +struct pnp_card *pnp_find_card(unsigned short vendor, + unsigned short device, + struct pnp_card *from); +struct pnp_dev *pnp_find_dev(struct pnp_card *card, + unsigned short vendor, + unsigned short function, + struct pnp_dev *from); + +#else /* !CONFIG_ISAPNP */ + +/* lowlevel configuration */ +static inline int isapnp_present(void) { return 0; } +static inline int isapnp_cfg_begin(int csn, int device) { return -ENODEV; } +static inline int isapnp_cfg_end(void) { return -ENODEV; } +static inline unsigned char isapnp_read_byte(unsigned char idx) { return 0xff; } +static inline void isapnp_write_byte(unsigned char idx, unsigned char val) { ; } + +static inline struct pnp_card *pnp_find_card(unsigned short vendor, + unsigned short device, + struct pnp_card *from) { return NULL; } +static inline struct pnp_dev *pnp_find_dev(struct pnp_card *card, + unsigned short vendor, + unsigned short function, + struct pnp_dev *from) { return NULL; } + +#endif /* CONFIG_ISAPNP */ + +#endif /* __KERNEL__ */ +#endif /* LINUX_ISAPNP_H */ diff --git a/include/linux/iscsi_boot_sysfs.h b/include/linux/iscsi_boot_sysfs.h new file mode 100644 index 000000000..10923d730 --- /dev/null +++ b/include/linux/iscsi_boot_sysfs.h @@ -0,0 +1,147 @@ +/* + * Export the iSCSI boot info to userland via sysfs. + * + * Copyright (C) 2010 Red Hat, Inc. All rights reserved. + * Copyright (C) 2010 Mike Christie + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License v2.0 as published by + * the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _ISCSI_BOOT_SYSFS_ +#define _ISCSI_BOOT_SYSFS_ + +/* + * The text attributes names for each of the kobjects. +*/ +enum iscsi_boot_eth_properties_enum { + ISCSI_BOOT_ETH_INDEX, + ISCSI_BOOT_ETH_FLAGS, + ISCSI_BOOT_ETH_IP_ADDR, + ISCSI_BOOT_ETH_PREFIX_LEN, + ISCSI_BOOT_ETH_SUBNET_MASK, + ISCSI_BOOT_ETH_ORIGIN, + ISCSI_BOOT_ETH_GATEWAY, + ISCSI_BOOT_ETH_PRIMARY_DNS, + ISCSI_BOOT_ETH_SECONDARY_DNS, + ISCSI_BOOT_ETH_DHCP, + ISCSI_BOOT_ETH_VLAN, + ISCSI_BOOT_ETH_MAC, + /* eth_pci_bdf - this is replaced by link to the device itself. */ + ISCSI_BOOT_ETH_HOSTNAME, + ISCSI_BOOT_ETH_END_MARKER, +}; + +enum iscsi_boot_tgt_properties_enum { + ISCSI_BOOT_TGT_INDEX, + ISCSI_BOOT_TGT_FLAGS, + ISCSI_BOOT_TGT_IP_ADDR, + ISCSI_BOOT_TGT_PORT, + ISCSI_BOOT_TGT_LUN, + ISCSI_BOOT_TGT_CHAP_TYPE, + ISCSI_BOOT_TGT_NIC_ASSOC, + ISCSI_BOOT_TGT_NAME, + ISCSI_BOOT_TGT_CHAP_NAME, + ISCSI_BOOT_TGT_CHAP_SECRET, + ISCSI_BOOT_TGT_REV_CHAP_NAME, + ISCSI_BOOT_TGT_REV_CHAP_SECRET, + ISCSI_BOOT_TGT_END_MARKER, +}; + +enum iscsi_boot_initiator_properties_enum { + ISCSI_BOOT_INI_INDEX, + ISCSI_BOOT_INI_FLAGS, + ISCSI_BOOT_INI_ISNS_SERVER, + ISCSI_BOOT_INI_SLP_SERVER, + ISCSI_BOOT_INI_PRI_RADIUS_SERVER, + ISCSI_BOOT_INI_SEC_RADIUS_SERVER, + ISCSI_BOOT_INI_INITIATOR_NAME, + ISCSI_BOOT_INI_END_MARKER, +}; + +enum iscsi_boot_acpitbl_properties_enum { + ISCSI_BOOT_ACPITBL_SIGNATURE, + ISCSI_BOOT_ACPITBL_OEM_ID, + ISCSI_BOOT_ACPITBL_OEM_TABLE_ID, +}; + +struct attribute_group; + +struct iscsi_boot_kobj { + struct kobject kobj; + struct attribute_group *attr_group; + struct list_head list; + + /* + * Pointer to store driver specific info. If set this will + * be freed for the LLD when the kobj release function is called. + */ + void *data; + /* + * Driver specific show function. + * + * The enum of the type. This can be any value of the above + * properties. + */ + ssize_t (*show) (void *data, int type, char *buf); + + /* + * Drivers specific visibility function. + * The function should return if they the attr should be readable + * writable or should not be shown. + * + * The enum of the type. This can be any value of the above + * properties. + */ + umode_t (*is_visible) (void *data, int type); + + /* + * Driver specific release function. + * + * The function should free the data passed in. + */ + void (*release) (void *data); +}; + +struct iscsi_boot_kset { + struct list_head kobj_list; + struct kset *kset; +}; + +struct iscsi_boot_kobj * +iscsi_boot_create_initiator(struct iscsi_boot_kset *boot_kset, int index, + void *data, + ssize_t (*show) (void *data, int type, char *buf), + umode_t (*is_visible) (void *data, int type), + void (*release) (void *data)); + +struct iscsi_boot_kobj * +iscsi_boot_create_ethernet(struct iscsi_boot_kset *boot_kset, int index, + void *data, + ssize_t (*show) (void *data, int type, char *buf), + umode_t (*is_visible) (void *data, int type), + void (*release) (void *data)); +struct iscsi_boot_kobj * +iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index, + void *data, + ssize_t (*show) (void *data, int type, char *buf), + umode_t (*is_visible) (void *data, int type), + void (*release) (void *data)); + +struct iscsi_boot_kobj * +iscsi_boot_create_acpitbl(struct iscsi_boot_kset *boot_kset, int index, + void *data, + ssize_t (*show)(void *data, int type, char *buf), + umode_t (*is_visible)(void *data, int type), + void (*release)(void *data)); + +struct iscsi_boot_kset *iscsi_boot_create_kset(const char *set_name); +struct iscsi_boot_kset *iscsi_boot_create_host_kset(unsigned int hostno); +void iscsi_boot_destroy_kset(struct iscsi_boot_kset *boot_kset); + +#endif diff --git a/include/linux/iscsi_ibft.h b/include/linux/iscsi_ibft.h new file mode 100644 index 000000000..605cc5c33 --- /dev/null +++ b/include/linux/iscsi_ibft.h @@ -0,0 +1,46 @@ +/* + * Copyright 2007 Red Hat, Inc. + * by Peter Jones + * Copyright 2007 IBM, Inc. + * by Konrad Rzeszutek + * Copyright 2008 + * by Konrad Rzeszutek + * + * This code exposes the iSCSI Boot Format Table to userland via sysfs. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License v2.0 as published by + * the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef ISCSI_IBFT_H +#define ISCSI_IBFT_H + +#include + +/* + * Logical location of iSCSI Boot Format Table. + * If the value is NULL there is no iBFT on the machine. + */ +extern struct acpi_table_ibft *ibft_addr; + +/* + * Routine used to find and reserve the iSCSI Boot Format Table. The + * mapped address is set in the ibft_addr variable. + */ +#ifdef CONFIG_ISCSI_IBFT_FIND +unsigned long find_ibft_region(unsigned long *sizep); +#else +static inline unsigned long find_ibft_region(unsigned long *sizep) +{ + *sizep = 0; + return 0; +} +#endif + +#endif /* ISCSI_IBFT_H */ diff --git a/include/linux/isdn.h b/include/linux/isdn.h new file mode 100644 index 000000000..df97c8444 --- /dev/null +++ b/include/linux/isdn.h @@ -0,0 +1,473 @@ +/* $Id: isdn.h,v 1.125.2.3 2004/02/10 01:07:14 keil Exp $ + * + * Main header for the Linux ISDN subsystem (linklevel). + * + * Copyright 1994,95,96 by Fritz Elfert (fritz@isdn4linux.de) + * Copyright 1995,96 by Thinking Objects Software GmbH Wuerzburg + * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de) + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + */ +#ifndef __ISDN_H__ +#define __ISDN_H__ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ISDN_TTY_MAJOR 43 +#define ISDN_TTYAUX_MAJOR 44 +#define ISDN_MAJOR 45 + +/* The minor-devicenumbers for Channel 0 and 1 are used as arguments for + * physical Channel-Mapping, so they MUST NOT be changed without changing + * the correspondent code in isdn.c + */ + +#define ISDN_MINOR_B 0 +#define ISDN_MINOR_BMAX (ISDN_MAX_CHANNELS-1) +#define ISDN_MINOR_CTRL 64 +#define ISDN_MINOR_CTRLMAX (64 + (ISDN_MAX_CHANNELS-1)) +#define ISDN_MINOR_PPP 128 +#define ISDN_MINOR_PPPMAX (128 + (ISDN_MAX_CHANNELS-1)) +#define ISDN_MINOR_STATUS 255 + +#ifdef CONFIG_ISDN_PPP + +#ifdef CONFIG_ISDN_PPP_VJ +# include +#endif + +#include +#include + +#include +#endif + +#ifdef CONFIG_ISDN_X25 +# include +#endif + +#include + +#define ISDN_DRVIOCTL_MASK 0x7f /* Mask for Device-ioctl */ + +/* Until now unused */ +#define ISDN_SERVICE_VOICE 1 +#define ISDN_SERVICE_AB 1<<1 +#define ISDN_SERVICE_X21 1<<2 +#define ISDN_SERVICE_G4 1<<3 +#define ISDN_SERVICE_BTX 1<<4 +#define ISDN_SERVICE_DFUE 1<<5 +#define ISDN_SERVICE_X25 1<<6 +#define ISDN_SERVICE_TTX 1<<7 +#define ISDN_SERVICE_MIXED 1<<8 +#define ISDN_SERVICE_FW 1<<9 +#define ISDN_SERVICE_GTEL 1<<10 +#define ISDN_SERVICE_BTXN 1<<11 +#define ISDN_SERVICE_BTEL 1<<12 + +/* Macros checking plain usage */ +#define USG_NONE(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_NONE) +#define USG_RAW(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_RAW) +#define USG_MODEM(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_MODEM) +#define USG_VOICE(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_VOICE) +#define USG_NET(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_NET) +#define USG_FAX(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_FAX) +#define USG_OUTGOING(x) ((x & ISDN_USAGE_OUTGOING)==ISDN_USAGE_OUTGOING) +#define USG_MODEMORVOICE(x) (((x & ISDN_USAGE_MASK)==ISDN_USAGE_MODEM) || \ + ((x & ISDN_USAGE_MASK)==ISDN_USAGE_VOICE) ) + +/* Timer-delays and scheduling-flags */ +#define ISDN_TIMER_RES 4 /* Main Timer-Resolution */ +#define ISDN_TIMER_02SEC (HZ/ISDN_TIMER_RES/5) /* Slow-Timer1 .2 sec */ +#define ISDN_TIMER_1SEC (HZ/ISDN_TIMER_RES) /* Slow-Timer2 1 sec */ +#define ISDN_TIMER_RINGING 5 /* tty RINGs = ISDN_TIMER_1SEC * this factor */ +#define ISDN_TIMER_KEEPINT 10 /* Cisco-Keepalive = ISDN_TIMER_1SEC * this factor */ +#define ISDN_TIMER_MODEMREAD 1 +#define ISDN_TIMER_MODEMPLUS 2 +#define ISDN_TIMER_MODEMRING 4 +#define ISDN_TIMER_MODEMXMIT 8 +#define ISDN_TIMER_NETDIAL 16 +#define ISDN_TIMER_NETHANGUP 32 +#define ISDN_TIMER_CARRIER 256 /* Wait for Carrier */ +#define ISDN_TIMER_FAST (ISDN_TIMER_MODEMREAD | ISDN_TIMER_MODEMPLUS | \ + ISDN_TIMER_MODEMXMIT) +#define ISDN_TIMER_SLOW (ISDN_TIMER_MODEMRING | ISDN_TIMER_NETHANGUP | \ + ISDN_TIMER_NETDIAL | ISDN_TIMER_CARRIER) + +/* Timeout-Values for isdn_net_dial() */ +#define ISDN_TIMER_DTIMEOUT10 (10*HZ/(ISDN_TIMER_02SEC*(ISDN_TIMER_RES+1))) +#define ISDN_TIMER_DTIMEOUT15 (15*HZ/(ISDN_TIMER_02SEC*(ISDN_TIMER_RES+1))) +#define ISDN_TIMER_DTIMEOUT60 (60*HZ/(ISDN_TIMER_02SEC*(ISDN_TIMER_RES+1))) + +/* GLOBAL_FLAGS */ +#define ISDN_GLOBAL_STOPPED 1 + +/*=================== Start of ip-over-ISDN stuff =========================*/ + +/* Feature- and status-flags for a net-interface */ +#define ISDN_NET_CONNECTED 0x01 /* Bound to ISDN-Channel */ +#define ISDN_NET_SECURE 0x02 /* Accept calls from phonelist only */ +#define ISDN_NET_CALLBACK 0x04 /* activate callback */ +#define ISDN_NET_CBHUP 0x08 /* hangup before callback */ +#define ISDN_NET_CBOUT 0x10 /* remote machine does callback */ + +#define ISDN_NET_MAGIC 0x49344C02 /* for paranoia-checking */ + +/* Phone-list-element */ +typedef struct { + void *next; + char num[ISDN_MSNLEN]; +} isdn_net_phone; + +/* + Principles when extending structures for generic encapsulation protocol + ("concap") support: + - Stuff which is hardware specific (here i4l-specific) goes in + the netdev -> local structure (here: isdn_net_local) + - Stuff which is encapsulation protocol specific goes in the structure + which holds the linux device structure (here: isdn_net_device) +*/ + +/* Local interface-data */ +typedef struct isdn_net_local_s { + ulong magic; + struct net_device_stats stats; /* Ethernet Statistics */ + int isdn_device; /* Index to isdn-device */ + int isdn_channel; /* Index to isdn-channel */ + int ppp_slot; /* PPPD device slot number */ + int pre_device; /* Preselected isdn-device */ + int pre_channel; /* Preselected isdn-channel */ + int exclusive; /* If non-zero idx to reserved chan.*/ + int flags; /* Connection-flags */ + int dialretry; /* Counter for Dialout-retries */ + int dialmax; /* Max. Number of Dial-retries */ + int cbdelay; /* Delay before Callback starts */ + int dtimer; /* Timeout-counter for dialing */ + char msn[ISDN_MSNLEN]; /* MSNs/EAZs for this interface */ + u_char cbhup; /* Flag: Reject Call before Callback*/ + u_char dialstate; /* State for dialing */ + u_char p_encap; /* Packet encapsulation */ + /* 0 = Ethernet over ISDN */ + /* 1 = RAW-IP */ + /* 2 = IP with type field */ + u_char l2_proto; /* Layer-2-protocol */ + /* See ISDN_PROTO_L2..-constants in */ + /* isdnif.h */ + /* 0 = X75/LAPB with I-Frames */ + /* 1 = X75/LAPB with UI-Frames */ + /* 2 = X75/LAPB with BUI-Frames */ + /* 3 = HDLC */ + u_char l3_proto; /* Layer-3-protocol */ + /* See ISDN_PROTO_L3..-constants in */ + /* isdnif.h */ + /* 0 = Transparent */ + int huptimer; /* Timeout-counter for auto-hangup */ + int charge; /* Counter for charging units */ + ulong chargetime; /* Timer for Charging info */ + int hupflags; /* Flags for charge-unit-hangup: */ + /* bit0: chargeint is invalid */ + /* bit1: Getting charge-interval */ + /* bit2: Do charge-unit-hangup */ + /* bit3: Do hangup even on incoming */ + int outgoing; /* Flag: outgoing call */ + int onhtime; /* Time to keep link up */ + int chargeint; /* Interval between charge-infos */ + int onum; /* Flag: at least 1 outgoing number */ + int cps; /* current speed of this interface */ + int transcount; /* byte-counter for cps-calculation */ + int sqfull; /* Flag: netdev-queue overloaded */ + ulong sqfull_stamp; /* Start-Time of overload */ + ulong slavedelay; /* Dynamic bundling delaytime */ + int triggercps; /* BogoCPS needed for trigger slave */ + isdn_net_phone *phone[2]; /* List of remote-phonenumbers */ + /* phone[0] = Incoming Numbers */ + /* phone[1] = Outgoing Numbers */ + isdn_net_phone *dial; /* Pointer to dialed number */ + struct net_device *master; /* Ptr to Master device for slaves */ + struct net_device *slave; /* Ptr to Slave device for masters */ + struct isdn_net_local_s *next; /* Ptr to next link in bundle */ + struct isdn_net_local_s *last; /* Ptr to last link in bundle */ + struct isdn_net_dev_s *netdev; /* Ptr to netdev */ + struct sk_buff_head super_tx_queue; /* List of supervisory frames to */ + /* be transmitted asap */ + atomic_t frame_cnt; /* number of frames currently */ + /* queued in HL driver */ + /* Ptr to orig. hard_header_cache */ + spinlock_t xmit_lock; /* used to protect the xmit path of */ + /* a particular channel (including */ + /* the frame_cnt */ + + int pppbind; /* ippp device for bindings */ + int dialtimeout; /* How long shall we try on dialing? (jiffies) */ + int dialwait; /* How long shall we wait after failed attempt? (jiffies) */ + ulong dialstarted; /* jiffies of first dialing-attempt */ + ulong dialwait_timer; /* jiffies of earliest next dialing-attempt */ + int huptimeout; /* How long will the connection be up? (seconds) */ +#ifdef CONFIG_ISDN_X25 + struct concap_device_ops *dops; /* callbacks used by encapsulator */ +#endif + /* use an own struct for that in later versions */ + ulong cisco_myseq; /* Local keepalive seq. for Cisco */ + ulong cisco_mineseen; /* returned keepalive seq. from remote */ + ulong cisco_yourseq; /* Remote keepalive seq. for Cisco */ + int cisco_keepalive_period; /* keepalive period */ + ulong cisco_last_slarp_in; /* jiffie of last keepalive packet we received */ + char cisco_line_state; /* state of line according to keepalive packets */ + char cisco_debserint; /* debugging flag of cisco hdlc with slarp */ + struct timer_list cisco_timer; + struct work_struct tqueue; +} isdn_net_local; + +/* the interface itself */ +typedef struct isdn_net_dev_s { + isdn_net_local *local; + isdn_net_local *queue; /* circular list of all bundled + channels, which are currently + online */ + spinlock_t queue_lock; /* lock to protect queue */ + void *next; /* Pointer to next isdn-interface */ + struct net_device *dev; /* interface to upper levels */ +#ifdef CONFIG_ISDN_PPP + ippp_bundle * pb; /* pointer to the common bundle structure + * with the per-bundle data */ +#endif +#ifdef CONFIG_ISDN_X25 + struct concap_proto *cprot; /* connection oriented encapsulation protocol */ +#endif + +} isdn_net_dev; + +/*===================== End of ip-over-ISDN stuff ===========================*/ + +/*======================= Start of ISDN-tty stuff ===========================*/ + +#define ISDN_ASYNC_MAGIC 0x49344C01 /* for paranoia-checking */ +#define ISDN_SERIAL_XMIT_SIZE 1024 /* Default bufsize for write */ +#define ISDN_SERIAL_XMIT_MAX 4000 /* Maximum bufsize for write */ + +#ifdef CONFIG_ISDN_AUDIO +/* For using sk_buffs with audio we need some private variables + * within each sk_buff. For this purpose, we declare a struct here, + * and put it always at the private skb->cb data array. A few macros help + * accessing the variables. + */ +typedef struct _isdn_audio_data { + unsigned short dle_count; + unsigned char lock; +} isdn_audio_data_t; + +#define ISDN_AUDIO_SKB_DLECOUNT(skb) (((isdn_audio_data_t *)&skb->cb[0])->dle_count) +#define ISDN_AUDIO_SKB_LOCK(skb) (((isdn_audio_data_t *)&skb->cb[0])->lock) +#endif + +/* Private data of AT-command-interpreter */ +typedef struct atemu { + u_char profile[ISDN_MODEM_NUMREG]; /* Modem-Regs. Profile 0 */ + u_char mdmreg[ISDN_MODEM_NUMREG]; /* Modem-Registers */ + char pmsn[ISDN_MSNLEN]; /* EAZ/MSNs Profile 0 */ + char msn[ISDN_MSNLEN]; /* EAZ/MSN */ + char plmsn[ISDN_LMSNLEN]; /* Listening MSNs Profile 0 */ + char lmsn[ISDN_LMSNLEN]; /* Listening MSNs */ + char cpn[ISDN_MSNLEN]; /* CalledPartyNumber on incoming call */ + char connmsg[ISDN_CMSGLEN]; /* CONNECT-Msg from HL-Driver */ +#ifdef CONFIG_ISDN_AUDIO + u_char vpar[10]; /* Voice-parameters */ + int lastDLE; /* Flag for voice-coding: DLE seen */ +#endif + int mdmcmdl; /* Length of Modem-Commandbuffer */ + int pluscount; /* Counter for +++ sequence */ + u_long lastplus; /* Timestamp of last + */ + int carrierwait; /* Seconds of carrier waiting */ + char mdmcmd[255]; /* Modem-Commandbuffer */ + unsigned int charge; /* Charge units of current connection */ +} atemu; + +/* Private data (similar to async_struct in ) */ +typedef struct modem_info { + int magic; + struct tty_port port; + int x_char; /* xon/xoff character */ + int mcr; /* Modem control register */ + int msr; /* Modem status register */ + int lsr; /* Line status register */ + int line; + int online; /* 1 = B-Channel is up, drop data */ + /* 2 = B-Channel is up, deliver d.*/ + int dialing; /* Dial in progress or ATA */ + int closing; + int rcvsched; /* Receive needs schedule */ + int isdn_driver; /* Index to isdn-driver */ + int isdn_channel; /* Index to isdn-channel */ + int drv_index; /* Index to dev->usage */ + int ncarrier; /* Flag: schedule NO CARRIER */ + unsigned char last_cause[8]; /* Last cause message */ + unsigned char last_num[ISDN_MSNLEN]; + /* Last phone-number */ + unsigned char last_l2; /* Last layer-2 protocol */ + unsigned char last_si; /* Last service */ + unsigned char last_lhup; /* Last hangup local? */ + unsigned char last_dir; /* Last direction (in or out) */ + struct timer_list nc_timer; /* Timer for delayed NO CARRIER */ + int send_outstanding;/* # of outstanding send-requests */ + int xmit_size; /* max. # of chars in xmit_buf */ + int xmit_count; /* # of chars in xmit_buf */ + struct sk_buff_head xmit_queue; /* transmit queue */ + atomic_t xmit_lock; /* Semaphore for isdn_tty_write */ +#ifdef CONFIG_ISDN_AUDIO + int vonline; /* Voice-channel status */ + /* Bit 0 = recording */ + /* Bit 1 = playback */ + /* Bit 2 = playback, DLE-ETX seen */ + struct sk_buff_head dtmf_queue; /* queue for dtmf results */ + void *adpcms; /* state for adpcm decompression */ + void *adpcmr; /* state for adpcm compression */ + void *dtmf_state; /* state for dtmf decoder */ + void *silence_state; /* state for silence detection */ +#endif +#ifdef CONFIG_ISDN_TTY_FAX + struct T30_s *fax; /* T30 Fax Group 3 data/interface */ + int faxonline; /* Fax-channel status */ +#endif + atemu emu; /* AT-emulator data */ + spinlock_t readlock; +} modem_info; + +#define ISDN_MODEM_WINSIZE 8 + +/* Description of one ISDN-tty */ +typedef struct _isdn_modem { + int refcount; /* Number of opens */ + struct tty_driver *tty_modem; /* tty-device */ + struct tty_struct *modem_table[ISDN_MAX_CHANNELS]; /* ?? copied from Orig */ + struct ktermios *modem_termios[ISDN_MAX_CHANNELS]; + struct ktermios *modem_termios_locked[ISDN_MAX_CHANNELS]; + modem_info info[ISDN_MAX_CHANNELS]; /* Private data */ +} isdn_modem_t; + +/*======================= End of ISDN-tty stuff ============================*/ + +/*======================== Start of V.110 stuff ============================*/ +#define V110_BUFSIZE 1024 + +typedef struct { + int nbytes; /* 1 Matrixbyte -> nbytes in stream */ + int nbits; /* Number of used bits in streambyte */ + unsigned char key; /* Bitmask in stream eg. 11 (nbits=2) */ + int decodelen; /* Amount of data in decodebuf */ + int SyncInit; /* Number of sync frames to send */ + unsigned char *OnlineFrame; /* Precalculated V110 idle frame */ + unsigned char *OfflineFrame; /* Precalculated V110 sync Frame */ + int framelen; /* Length of frames */ + int skbuser; /* Number of unacked userdata skbs */ + int skbidle; /* Number of unacked idle/sync skbs */ + int introducer; /* Local vars for decoder */ + int dbit; + unsigned char b; + int skbres; /* space to reserve in outgoing skb */ + int maxsize; /* maxbufsize of lowlevel driver */ + unsigned char *encodebuf; /* temporary buffer for encoding */ + unsigned char decodebuf[V110_BUFSIZE]; /* incomplete V110 matrices */ +} isdn_v110_stream; + +/*========================= End of V.110 stuff =============================*/ + +/*======================= Start of general stuff ===========================*/ + +typedef struct { + char *next; + char *private; +} infostruct; + +#define DRV_FLAG_RUNNING 1 +#define DRV_FLAG_REJBUS 2 +#define DRV_FLAG_LOADED 4 + +/* Description of hardware-level-driver */ +typedef struct _isdn_driver { + ulong online; /* Channel-Online flags */ + ulong flags; /* Misc driver Flags */ + int locks; /* Number of locks for this driver */ + int channels; /* Number of channels */ + wait_queue_head_t st_waitq; /* Wait-Queue for status-read's */ + int maxbufsize; /* Maximum Buffersize supported */ + unsigned long pktcount; /* Until now: unused */ + int stavail; /* Chars avail on Status-device */ + isdn_if *interface; /* Interface to driver */ + int *rcverr; /* Error-counters for B-Ch.-receive */ + int *rcvcount; /* Byte-counters for B-Ch.-receive */ +#ifdef CONFIG_ISDN_AUDIO + unsigned long DLEflag; /* Flags: Insert DLE at next read */ +#endif + struct sk_buff_head *rpqueue; /* Pointers to start of Rcv-Queue */ + wait_queue_head_t *rcv_waitq; /* Wait-Queues for B-Channel-Reads */ + wait_queue_head_t *snd_waitq; /* Wait-Queue for B-Channel-Send's */ + char msn2eaz[10][ISDN_MSNLEN]; /* Mapping-Table MSN->EAZ */ +} isdn_driver_t; + +/* Main driver-data */ +typedef struct isdn_devt { + struct module *owner; + spinlock_t lock; + unsigned short flags; /* Bitmapped Flags: */ + int drivers; /* Current number of drivers */ + int channels; /* Current number of channels */ + int net_verbose; /* Verbose-Flag */ + int modempoll; /* Flag: tty-read active */ + spinlock_t timerlock; + int tflags; /* Timer-Flags: */ + /* see ISDN_TIMER_..defines */ + int global_flags; + infostruct *infochain; /* List of open info-devs. */ + wait_queue_head_t info_waitq; /* Wait-Queue for isdninfo */ + struct timer_list timer; /* Misc.-function Timer */ + int chanmap[ISDN_MAX_CHANNELS]; /* Map minor->device-channel */ + int drvmap[ISDN_MAX_CHANNELS]; /* Map minor->driver-index */ + int usage[ISDN_MAX_CHANNELS]; /* Used by tty/ip/voice */ + char num[ISDN_MAX_CHANNELS][ISDN_MSNLEN]; + /* Remote number of active ch.*/ + int m_idx[ISDN_MAX_CHANNELS]; /* Index for mdm.... */ + isdn_driver_t *drv[ISDN_MAX_DRIVERS]; /* Array of drivers */ + isdn_net_dev *netdev; /* Linked list of net-if's */ + char drvid[ISDN_MAX_DRIVERS][20];/* Driver-ID */ + struct task_struct *profd; /* For iprofd */ + isdn_modem_t mdm; /* tty-driver-data */ + isdn_net_dev *rx_netdev[ISDN_MAX_CHANNELS]; /* rx netdev-pointers */ + isdn_net_dev *st_netdev[ISDN_MAX_CHANNELS]; /* stat netdev-pointers */ + ulong ibytes[ISDN_MAX_CHANNELS]; /* Statistics incoming bytes */ + ulong obytes[ISDN_MAX_CHANNELS]; /* Statistics outgoing bytes */ + int v110emu[ISDN_MAX_CHANNELS]; /* V.110 emulator-mode 0=none */ + atomic_t v110use[ISDN_MAX_CHANNELS]; /* Usage-Semaphore for stream */ + isdn_v110_stream *v110[ISDN_MAX_CHANNELS]; /* V.110 private data */ + struct mutex mtx; /* serialize list access*/ + unsigned long global_features; +} isdn_dev; + +extern isdn_dev *dev; + + +#endif /* __ISDN_H__ */ diff --git a/include/linux/isdn/capilli.h b/include/linux/isdn/capilli.h new file mode 100644 index 000000000..d75e1ad72 --- /dev/null +++ b/include/linux/isdn/capilli.h @@ -0,0 +1,113 @@ +/* $Id: capilli.h,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $ + * + * Kernel CAPI 2.0 Driver Interface for Linux + * + * Copyright 1999 by Carsten Paeth + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + */ + +#ifndef __CAPILLI_H__ +#define __CAPILLI_H__ + +#include +#include +#include +#include + +typedef struct capiloaddatapart { + int user; /* data in userspace ? */ + int len; + unsigned char *data; +} capiloaddatapart; + +typedef struct capiloaddata { + capiloaddatapart firmware; + capiloaddatapart configuration; +} capiloaddata; + +typedef struct capicardparams { + unsigned int port; + unsigned irq; + int cardtype; + int cardnr; + unsigned int membase; +} capicardparams; + +struct capi_ctr { + /* filled in before calling attach_capi_ctr */ + struct module *owner; + void *driverdata; /* driver specific */ + char name[32]; /* name of controller */ + char *driver_name; /* name of driver */ + int (*load_firmware)(struct capi_ctr *, capiloaddata *); + void (*reset_ctr)(struct capi_ctr *); + void (*register_appl)(struct capi_ctr *, u16 appl, + capi_register_params *); + void (*release_appl)(struct capi_ctr *, u16 appl); + u16 (*send_message)(struct capi_ctr *, struct sk_buff *skb); + + char *(*procinfo)(struct capi_ctr *); + int (*proc_show)(struct seq_file *, void *); + + /* filled in before calling ready callback */ + u8 manu[CAPI_MANUFACTURER_LEN]; /* CAPI_GET_MANUFACTURER */ + capi_version version; /* CAPI_GET_VERSION */ + capi_profile profile; /* CAPI_GET_PROFILE */ + u8 serial[CAPI_SERIAL_LEN]; /* CAPI_GET_SERIAL */ + + /* management information for kcapi */ + + unsigned long nrecvctlpkt; + unsigned long nrecvdatapkt; + unsigned long nsentctlpkt; + unsigned long nsentdatapkt; + + int cnr; /* controller number */ + unsigned short state; /* controller state */ + int blocked; /* output blocked */ + int traceflag; /* capi trace */ + wait_queue_head_t state_wait_queue; + + struct proc_dir_entry *procent; + char procfn[128]; +}; + +int attach_capi_ctr(struct capi_ctr *); +int detach_capi_ctr(struct capi_ctr *); + +void capi_ctr_ready(struct capi_ctr * card); +void capi_ctr_down(struct capi_ctr * card); +void capi_ctr_suspend_output(struct capi_ctr * card); +void capi_ctr_resume_output(struct capi_ctr * card); +void capi_ctr_handle_message(struct capi_ctr * card, u16 appl, struct sk_buff *skb); + +// --------------------------------------------------------------------------- +// needed for AVM capi drivers + +struct capi_driver { + char name[32]; /* driver name */ + char revision[32]; + + int (*add_card)(struct capi_driver *driver, capicardparams *data); + + /* management information for kcapi */ + struct list_head list; +}; + +void register_capi_driver(struct capi_driver *driver); +void unregister_capi_driver(struct capi_driver *driver); + +// --------------------------------------------------------------------------- +// library functions for use by hardware controller drivers + +void capilib_new_ncci(struct list_head *head, u16 applid, u32 ncci, u32 winsize); +void capilib_free_ncci(struct list_head *head, u16 applid, u32 ncci); +void capilib_release_appl(struct list_head *head, u16 applid); +void capilib_release(struct list_head *head); +void capilib_data_b3_conf(struct list_head *head, u16 applid, u32 ncci, u16 msgid); +u16 capilib_data_b3_req(struct list_head *head, u16 applid, u32 ncci, u16 msgid); + +#endif /* __CAPILLI_H__ */ diff --git a/include/linux/isdn/capiutil.h b/include/linux/isdn/capiutil.h new file mode 100644 index 000000000..44bd6046e --- /dev/null +++ b/include/linux/isdn/capiutil.h @@ -0,0 +1,516 @@ +/* $Id: capiutil.h,v 1.5.6.2 2001/09/23 22:24:33 kai Exp $ + * + * CAPI 2.0 defines & types + * + * From CAPI 2.0 Development Kit AVM 1995 (msg.c) + * Rewritten for Linux 1996 by Carsten Paeth + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + */ + +#ifndef __CAPIUTIL_H__ +#define __CAPIUTIL_H__ + +#include + +#define CAPIMSG_BASELEN 8 +#define CAPIMSG_U8(m, off) (m[off]) +#define CAPIMSG_U16(m, off) (m[off]|(m[(off)+1]<<8)) +#define CAPIMSG_U32(m, off) (m[off]|(m[(off)+1]<<8)|(m[(off)+2]<<16)|(m[(off)+3]<<24)) +#define CAPIMSG_LEN(m) CAPIMSG_U16(m,0) +#define CAPIMSG_APPID(m) CAPIMSG_U16(m,2) +#define CAPIMSG_COMMAND(m) CAPIMSG_U8(m,4) +#define CAPIMSG_SUBCOMMAND(m) CAPIMSG_U8(m,5) +#define CAPIMSG_CMD(m) (((m[4])<<8)|(m[5])) +#define CAPIMSG_MSGID(m) CAPIMSG_U16(m,6) +#define CAPIMSG_CONTROLLER(m) (m[8] & 0x7f) +#define CAPIMSG_CONTROL(m) CAPIMSG_U32(m, 8) +#define CAPIMSG_NCCI(m) CAPIMSG_CONTROL(m) +#define CAPIMSG_DATALEN(m) CAPIMSG_U16(m,16) /* DATA_B3_REQ */ + +static inline void capimsg_setu8(void *m, int off, __u8 val) +{ + ((__u8 *)m)[off] = val; +} + +static inline void capimsg_setu16(void *m, int off, __u16 val) +{ + ((__u8 *)m)[off] = val & 0xff; + ((__u8 *)m)[off+1] = (val >> 8) & 0xff; +} + +static inline void capimsg_setu32(void *m, int off, __u32 val) +{ + ((__u8 *)m)[off] = val & 0xff; + ((__u8 *)m)[off+1] = (val >> 8) & 0xff; + ((__u8 *)m)[off+2] = (val >> 16) & 0xff; + ((__u8 *)m)[off+3] = (val >> 24) & 0xff; +} + +#define CAPIMSG_SETLEN(m, len) capimsg_setu16(m, 0, len) +#define CAPIMSG_SETAPPID(m, applid) capimsg_setu16(m, 2, applid) +#define CAPIMSG_SETCOMMAND(m,cmd) capimsg_setu8(m, 4, cmd) +#define CAPIMSG_SETSUBCOMMAND(m, cmd) capimsg_setu8(m, 5, cmd) +#define CAPIMSG_SETMSGID(m, msgid) capimsg_setu16(m, 6, msgid) +#define CAPIMSG_SETCONTROL(m, contr) capimsg_setu32(m, 8, contr) +#define CAPIMSG_SETDATALEN(m, len) capimsg_setu16(m, 16, len) + +/*----- basic-type definitions -----*/ + +typedef __u8 *_cstruct; + +typedef enum { + CAPI_COMPOSE, + CAPI_DEFAULT +} _cmstruct; + +/* + The _cmsg structure contains all possible CAPI 2.0 parameter. + All parameters are stored here first. The function CAPI_CMSG_2_MESSAGE + assembles the parameter and builds CAPI2.0 conform messages. + CAPI_MESSAGE_2_CMSG disassembles CAPI 2.0 messages and stores the + parameter in the _cmsg structure + */ + +typedef struct { + /* Header */ + __u16 ApplId; + __u8 Command; + __u8 Subcommand; + __u16 Messagenumber; + + /* Parameter */ + union { + __u32 adrController; + __u32 adrPLCI; + __u32 adrNCCI; + } adr; + + _cmstruct AdditionalInfo; + _cstruct B1configuration; + __u16 B1protocol; + _cstruct B2configuration; + __u16 B2protocol; + _cstruct B3configuration; + __u16 B3protocol; + _cstruct BC; + _cstruct BChannelinformation; + _cmstruct BProtocol; + _cstruct CalledPartyNumber; + _cstruct CalledPartySubaddress; + _cstruct CallingPartyNumber; + _cstruct CallingPartySubaddress; + __u32 CIPmask; + __u32 CIPmask2; + __u16 CIPValue; + __u32 Class; + _cstruct ConnectedNumber; + _cstruct ConnectedSubaddress; + __u32 Data; + __u16 DataHandle; + __u16 DataLength; + _cstruct FacilityConfirmationParameter; + _cstruct Facilitydataarray; + _cstruct FacilityIndicationParameter; + _cstruct FacilityRequestParameter; + __u16 FacilitySelector; + __u16 Flags; + __u32 Function; + _cstruct HLC; + __u16 Info; + _cstruct InfoElement; + __u32 InfoMask; + __u16 InfoNumber; + _cstruct Keypadfacility; + _cstruct LLC; + _cstruct ManuData; + __u32 ManuID; + _cstruct NCPI; + __u16 Reason; + __u16 Reason_B3; + __u16 Reject; + _cstruct Useruserdata; + + /* intern */ + unsigned l, p; + unsigned char *par; + __u8 *m; + + /* buffer to construct message */ + __u8 buf[180]; + +} _cmsg; + +/* + * capi_cmsg2message() assembles the parameter from _cmsg to a CAPI 2.0 + * conform message + */ +unsigned capi_cmsg2message(_cmsg * cmsg, __u8 * msg); + +/* + * capi_message2cmsg disassembles a CAPI message an writes the parameter + * into _cmsg for easy access + */ +unsigned capi_message2cmsg(_cmsg * cmsg, __u8 * msg); + +/* + * capi_cmsg_header() fills the _cmsg structure with default values, so only + * parameter with non default values must be changed before sending the + * message. + */ +unsigned capi_cmsg_header(_cmsg * cmsg, __u16 _ApplId, + __u8 _Command, __u8 _Subcommand, + __u16 _Messagenumber, __u32 _Controller); + +/*-----------------------------------------------------------------------*/ + +/* + * Debugging / Tracing functions + */ + +char *capi_cmd2str(__u8 cmd, __u8 subcmd); + +typedef struct { + u_char *buf; + u_char *p; + size_t size; + size_t pos; +} _cdebbuf; + +#define CDEBUG_SIZE 1024 +#define CDEBUG_GSIZE 4096 + +void cdebbuf_free(_cdebbuf *cdb); +int cdebug_init(void); +void cdebug_exit(void); + +_cdebbuf *capi_cmsg2str(_cmsg *cmsg); +_cdebbuf *capi_message2str(__u8 *msg); + +/*-----------------------------------------------------------------------*/ + +static inline void capi_cmsg_answer(_cmsg * cmsg) +{ + cmsg->Subcommand |= 0x01; +} + +/*-----------------------------------------------------------------------*/ + +static inline void capi_fill_CONNECT_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + _cstruct NCPI) +{ + capi_cmsg_header(cmsg, ApplId, 0x82, 0x80, Messagenumber, adr); + cmsg->NCPI = NCPI; +} + +static inline void capi_fill_FACILITY_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u16 FacilitySelector, + _cstruct FacilityRequestParameter) +{ + capi_cmsg_header(cmsg, ApplId, 0x80, 0x80, Messagenumber, adr); + cmsg->FacilitySelector = FacilitySelector; + cmsg->FacilityRequestParameter = FacilityRequestParameter; +} + +static inline void capi_fill_INFO_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + _cstruct CalledPartyNumber, + _cstruct BChannelinformation, + _cstruct Keypadfacility, + _cstruct Useruserdata, + _cstruct Facilitydataarray) +{ + capi_cmsg_header(cmsg, ApplId, 0x08, 0x80, Messagenumber, adr); + cmsg->CalledPartyNumber = CalledPartyNumber; + cmsg->BChannelinformation = BChannelinformation; + cmsg->Keypadfacility = Keypadfacility; + cmsg->Useruserdata = Useruserdata; + cmsg->Facilitydataarray = Facilitydataarray; +} + +static inline void capi_fill_LISTEN_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u32 InfoMask, + __u32 CIPmask, + __u32 CIPmask2, + _cstruct CallingPartyNumber, + _cstruct CallingPartySubaddress) +{ + capi_cmsg_header(cmsg, ApplId, 0x05, 0x80, Messagenumber, adr); + cmsg->InfoMask = InfoMask; + cmsg->CIPmask = CIPmask; + cmsg->CIPmask2 = CIPmask2; + cmsg->CallingPartyNumber = CallingPartyNumber; + cmsg->CallingPartySubaddress = CallingPartySubaddress; +} + +static inline void capi_fill_ALERT_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + _cstruct BChannelinformation, + _cstruct Keypadfacility, + _cstruct Useruserdata, + _cstruct Facilitydataarray) +{ + capi_cmsg_header(cmsg, ApplId, 0x01, 0x80, Messagenumber, adr); + cmsg->BChannelinformation = BChannelinformation; + cmsg->Keypadfacility = Keypadfacility; + cmsg->Useruserdata = Useruserdata; + cmsg->Facilitydataarray = Facilitydataarray; +} + +static inline void capi_fill_CONNECT_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u16 CIPValue, + _cstruct CalledPartyNumber, + _cstruct CallingPartyNumber, + _cstruct CalledPartySubaddress, + _cstruct CallingPartySubaddress, + __u16 B1protocol, + __u16 B2protocol, + __u16 B3protocol, + _cstruct B1configuration, + _cstruct B2configuration, + _cstruct B3configuration, + _cstruct BC, + _cstruct LLC, + _cstruct HLC, + _cstruct BChannelinformation, + _cstruct Keypadfacility, + _cstruct Useruserdata, + _cstruct Facilitydataarray) +{ + + capi_cmsg_header(cmsg, ApplId, 0x02, 0x80, Messagenumber, adr); + cmsg->CIPValue = CIPValue; + cmsg->CalledPartyNumber = CalledPartyNumber; + cmsg->CallingPartyNumber = CallingPartyNumber; + cmsg->CalledPartySubaddress = CalledPartySubaddress; + cmsg->CallingPartySubaddress = CallingPartySubaddress; + cmsg->B1protocol = B1protocol; + cmsg->B2protocol = B2protocol; + cmsg->B3protocol = B3protocol; + cmsg->B1configuration = B1configuration; + cmsg->B2configuration = B2configuration; + cmsg->B3configuration = B3configuration; + cmsg->BC = BC; + cmsg->LLC = LLC; + cmsg->HLC = HLC; + cmsg->BChannelinformation = BChannelinformation; + cmsg->Keypadfacility = Keypadfacility; + cmsg->Useruserdata = Useruserdata; + cmsg->Facilitydataarray = Facilitydataarray; +} + +static inline void capi_fill_DATA_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u32 Data, + __u16 DataLength, + __u16 DataHandle, + __u16 Flags) +{ + + capi_cmsg_header(cmsg, ApplId, 0x86, 0x80, Messagenumber, adr); + cmsg->Data = Data; + cmsg->DataLength = DataLength; + cmsg->DataHandle = DataHandle; + cmsg->Flags = Flags; +} + +static inline void capi_fill_DISCONNECT_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + _cstruct BChannelinformation, + _cstruct Keypadfacility, + _cstruct Useruserdata, + _cstruct Facilitydataarray) +{ + + capi_cmsg_header(cmsg, ApplId, 0x04, 0x80, Messagenumber, adr); + cmsg->BChannelinformation = BChannelinformation; + cmsg->Keypadfacility = Keypadfacility; + cmsg->Useruserdata = Useruserdata; + cmsg->Facilitydataarray = Facilitydataarray; +} + +static inline void capi_fill_DISCONNECT_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + _cstruct NCPI) +{ + + capi_cmsg_header(cmsg, ApplId, 0x84, 0x80, Messagenumber, adr); + cmsg->NCPI = NCPI; +} + +static inline void capi_fill_MANUFACTURER_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u32 ManuID, + __u32 Class, + __u32 Function, + _cstruct ManuData) +{ + + capi_cmsg_header(cmsg, ApplId, 0xff, 0x80, Messagenumber, adr); + cmsg->ManuID = ManuID; + cmsg->Class = Class; + cmsg->Function = Function; + cmsg->ManuData = ManuData; +} + +static inline void capi_fill_RESET_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + _cstruct NCPI) +{ + + capi_cmsg_header(cmsg, ApplId, 0x87, 0x80, Messagenumber, adr); + cmsg->NCPI = NCPI; +} + +static inline void capi_fill_SELECT_B_PROTOCOL_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u16 B1protocol, + __u16 B2protocol, + __u16 B3protocol, + _cstruct B1configuration, + _cstruct B2configuration, + _cstruct B3configuration) +{ + + capi_cmsg_header(cmsg, ApplId, 0x41, 0x80, Messagenumber, adr); + cmsg->B1protocol = B1protocol; + cmsg->B2protocol = B2protocol; + cmsg->B3protocol = B3protocol; + cmsg->B1configuration = B1configuration; + cmsg->B2configuration = B2configuration; + cmsg->B3configuration = B3configuration; +} + +static inline void capi_fill_CONNECT_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u16 Reject, + __u16 B1protocol, + __u16 B2protocol, + __u16 B3protocol, + _cstruct B1configuration, + _cstruct B2configuration, + _cstruct B3configuration, + _cstruct ConnectedNumber, + _cstruct ConnectedSubaddress, + _cstruct LLC, + _cstruct BChannelinformation, + _cstruct Keypadfacility, + _cstruct Useruserdata, + _cstruct Facilitydataarray) +{ + capi_cmsg_header(cmsg, ApplId, 0x02, 0x83, Messagenumber, adr); + cmsg->Reject = Reject; + cmsg->B1protocol = B1protocol; + cmsg->B2protocol = B2protocol; + cmsg->B3protocol = B3protocol; + cmsg->B1configuration = B1configuration; + cmsg->B2configuration = B2configuration; + cmsg->B3configuration = B3configuration; + cmsg->ConnectedNumber = ConnectedNumber; + cmsg->ConnectedSubaddress = ConnectedSubaddress; + cmsg->LLC = LLC; + cmsg->BChannelinformation = BChannelinformation; + cmsg->Keypadfacility = Keypadfacility; + cmsg->Useruserdata = Useruserdata; + cmsg->Facilitydataarray = Facilitydataarray; +} + +static inline void capi_fill_CONNECT_ACTIVE_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr) +{ + + capi_cmsg_header(cmsg, ApplId, 0x03, 0x83, Messagenumber, adr); +} + +static inline void capi_fill_CONNECT_B3_ACTIVE_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr) +{ + + capi_cmsg_header(cmsg, ApplId, 0x83, 0x83, Messagenumber, adr); +} + +static inline void capi_fill_CONNECT_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u16 Reject, + _cstruct NCPI) +{ + capi_cmsg_header(cmsg, ApplId, 0x82, 0x83, Messagenumber, adr); + cmsg->Reject = Reject; + cmsg->NCPI = NCPI; +} + +static inline void capi_fill_CONNECT_B3_T90_ACTIVE_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr) +{ + + capi_cmsg_header(cmsg, ApplId, 0x88, 0x83, Messagenumber, adr); +} + +static inline void capi_fill_DATA_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u16 DataHandle) +{ + + capi_cmsg_header(cmsg, ApplId, 0x86, 0x83, Messagenumber, adr); + cmsg->DataHandle = DataHandle; +} + +static inline void capi_fill_DISCONNECT_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr) +{ + + capi_cmsg_header(cmsg, ApplId, 0x84, 0x83, Messagenumber, adr); +} + +static inline void capi_fill_DISCONNECT_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr) +{ + + capi_cmsg_header(cmsg, ApplId, 0x04, 0x83, Messagenumber, adr); +} + +static inline void capi_fill_FACILITY_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u16 FacilitySelector) +{ + + capi_cmsg_header(cmsg, ApplId, 0x80, 0x83, Messagenumber, adr); + cmsg->FacilitySelector = FacilitySelector; +} + +static inline void capi_fill_INFO_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr) +{ + + capi_cmsg_header(cmsg, ApplId, 0x08, 0x83, Messagenumber, adr); +} + +static inline void capi_fill_MANUFACTURER_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u32 ManuID, + __u32 Class, + __u32 Function, + _cstruct ManuData) +{ + + capi_cmsg_header(cmsg, ApplId, 0xff, 0x83, Messagenumber, adr); + cmsg->ManuID = ManuID; + cmsg->Class = Class; + cmsg->Function = Function; + cmsg->ManuData = ManuData; +} + +static inline void capi_fill_RESET_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr) +{ + + capi_cmsg_header(cmsg, ApplId, 0x87, 0x83, Messagenumber, adr); +} + +#endif /* __CAPIUTIL_H__ */ diff --git a/include/linux/isdn/hdlc.h b/include/linux/isdn/hdlc.h new file mode 100644 index 000000000..96521370c --- /dev/null +++ b/include/linux/isdn/hdlc.h @@ -0,0 +1,82 @@ +/* + * hdlc.h -- General purpose ISDN HDLC decoder. + * + * Implementation of a HDLC decoder/encoder in software. + * Necessary because some ISDN devices don't have HDLC + * controllers. + * + * Copyright (C) + * 2009 Karsten Keil + * 2002 Wolfgang Mües + * 2001 Frode Isaksen + * 2001 Kai Germaschewski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __ISDNHDLC_H__ +#define __ISDNHDLC_H__ + +struct isdnhdlc_vars { + int bit_shift; + int hdlc_bits1; + int data_bits; + int ffbit_shift; /* encoding only */ + int state; + int dstpos; + + u16 crc; + + u8 cbin; + u8 shift_reg; + u8 ffvalue; + + /* set if transferring data */ + u32 data_received:1; + /* set if D channel (send idle instead of flags) */ + u32 dchannel:1; + /* set if 56K adaptation */ + u32 do_adapt56:1; + /* set if in closing phase (need to send CRC + flag) */ + u32 do_closing:1; + /* set if data is bitreverse */ + u32 do_bitreverse:1; +}; + +/* Feature Flags */ +#define HDLC_56KBIT 0x01 +#define HDLC_DCHANNEL 0x02 +#define HDLC_BITREVERSE 0x04 + +/* + The return value from isdnhdlc_decode is + the frame length, 0 if no complete frame was decoded, + or a negative error number +*/ +#define HDLC_FRAMING_ERROR 1 +#define HDLC_CRC_ERROR 2 +#define HDLC_LENGTH_ERROR 3 + +extern void isdnhdlc_rcv_init(struct isdnhdlc_vars *hdlc, u32 features); + +extern int isdnhdlc_decode(struct isdnhdlc_vars *hdlc, const u8 *src, + int slen, int *count, u8 *dst, int dsize); + +extern void isdnhdlc_out_init(struct isdnhdlc_vars *hdlc, u32 features); + +extern int isdnhdlc_encode(struct isdnhdlc_vars *hdlc, const u8 *src, + u16 slen, int *count, u8 *dst, int dsize); + +#endif /* __ISDNHDLC_H__ */ diff --git a/include/linux/isdn_divertif.h b/include/linux/isdn_divertif.h new file mode 100644 index 000000000..19ab361f9 --- /dev/null +++ b/include/linux/isdn_divertif.h @@ -0,0 +1,35 @@ +/* $Id: isdn_divertif.h,v 1.4.6.1 2001/09/23 22:25:05 kai Exp $ + * + * Header for the diversion supplementary interface for i4l. + * + * Author Werner Cornelius (werner@titro.de) + * Copyright by Werner Cornelius (werner@titro.de) + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + */ +#ifndef _LINUX_ISDN_DIVERTIF_H +#define _LINUX_ISDN_DIVERTIF_H + +#include +#include +#include + +/***************************************************************/ +/* structure exchanging data between isdn hl and divert module */ +/***************************************************************/ +typedef struct + { ulong if_magic; /* magic info and version */ + int cmd; /* command */ + int (*stat_callback)(isdn_ctrl *); /* supplied by divert module when calling */ + int (*ll_cmd)(isdn_ctrl *); /* supplied by hl on return */ + char * (*drv_to_name)(int); /* map a driver id to name, supplied by hl */ + int (*name_to_drv)(char *); /* map a driver id to name, supplied by hl */ + } isdn_divert_if; + +/*********************/ +/* function register */ +/*********************/ +extern int DIVERT_REG_NAME(isdn_divert_if *); +#endif /* _LINUX_ISDN_DIVERTIF_H */ diff --git a/include/linux/isdn_ppp.h b/include/linux/isdn_ppp.h new file mode 100644 index 000000000..a0070c6df --- /dev/null +++ b/include/linux/isdn_ppp.h @@ -0,0 +1,194 @@ +/* Linux ISDN subsystem, sync PPP, interface to ipppd + * + * Copyright 1994-1999 by Fritz Elfert (fritz@isdn4linux.de) + * Copyright 1995,96 Thinking Objects Software GmbH Wuerzburg + * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de) + * Copyright 2000-2002 by Kai Germaschewski (kai@germaschewski.name) + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + */ +#ifndef _LINUX_ISDN_PPP_H +#define _LINUX_ISDN_PPP_H + + + + +#ifdef CONFIG_IPPP_FILTER +#include +#endif +#include + +#define DECOMP_ERR_NOMEM (-10) + +#define MP_END_FRAG 0x40 +#define MP_BEGIN_FRAG 0x80 + +#define MP_MAX_QUEUE_LEN 16 + +/* + * We need a way for the decompressor to influence the generation of CCP + * Reset-Requests in a variety of ways. The decompressor is already returning + * a lot of information (generated skb length, error conditions) so we use + * another parameter. This parameter is a pointer to a structure which is + * to be marked valid by the decompressor and only in this case is ever used. + * Furthermore, the only case where this data is used is when the decom- + * pressor returns DECOMP_ERROR. + * + * We use this same struct for the reset entry of the compressor to commu- + * nicate to its caller how to deal with sending of a Reset Ack. In this + * case, expra is not used, but other options still apply (suppressing + * sending with rsend, appending arbitrary data, etc). + */ + +#define IPPP_RESET_MAXDATABYTES 32 + +struct isdn_ppp_resetparams { + unsigned char valid:1; /* rw Is this structure filled at all ? */ + unsigned char rsend:1; /* rw Should we send one at all ? */ + unsigned char idval:1; /* rw Is the id field valid ? */ + unsigned char dtval:1; /* rw Is the data field valid ? */ + unsigned char expra:1; /* rw Is an Ack expected for this Req ? */ + unsigned char id; /* wo Send CCP ResetReq with this id */ + unsigned short maxdlen; /* ro Max bytes to be stored in data field */ + unsigned short dlen; /* rw Bytes stored in data field */ + unsigned char *data; /* wo Data for ResetReq info field */ +}; + +/* + * this is an 'old friend' from ppp-comp.h under a new name + * check the original include for more information + */ +struct isdn_ppp_compressor { + struct isdn_ppp_compressor *next, *prev; + struct module *owner; + int num; /* CCP compression protocol number */ + + void *(*alloc) (struct isdn_ppp_comp_data *); + void (*free) (void *state); + int (*init) (void *state, struct isdn_ppp_comp_data *, + int unit,int debug); + + /* The reset entry needs to get more exact information about the + ResetReq or ResetAck it was called with. The parameters are + obvious. If reset is called without a Req or Ack frame which + could be handed into it, code MUST be set to 0. Using rsparm, + the reset entry can control if and how a ResetAck is returned. */ + + void (*reset) (void *state, unsigned char code, unsigned char id, + unsigned char *data, unsigned len, + struct isdn_ppp_resetparams *rsparm); + + int (*compress) (void *state, struct sk_buff *in, + struct sk_buff *skb_out, int proto); + + int (*decompress) (void *state,struct sk_buff *in, + struct sk_buff *skb_out, + struct isdn_ppp_resetparams *rsparm); + + void (*incomp) (void *state, struct sk_buff *in,int proto); + void (*stat) (void *state, struct compstat *stats); +}; + +extern int isdn_ppp_register_compressor(struct isdn_ppp_compressor *); +extern int isdn_ppp_unregister_compressor(struct isdn_ppp_compressor *); +extern int isdn_ppp_dial_slave(char *); +extern int isdn_ppp_hangup_slave(char *); + +typedef struct { + unsigned long seqerrs; + unsigned long frame_drops; + unsigned long overflows; + unsigned long max_queue_len; +} isdn_mppp_stats; + +typedef struct { + int mp_mrru; /* unused */ + struct sk_buff * frags; /* fragments sl list -- use skb->next */ + long frames; /* number of frames in the frame list */ + unsigned int seq; /* last processed packet seq #: any packets + * with smaller seq # will be dropped + * unconditionally */ + spinlock_t lock; + int ref_ct; + /* statistics */ + isdn_mppp_stats stats; +} ippp_bundle; + +#define NUM_RCV_BUFFS 64 + +struct ippp_buf_queue { + struct ippp_buf_queue *next; + struct ippp_buf_queue *last; + char *buf; /* NULL here indicates end of queue */ + int len; +}; + +/* The data structure for one CCP reset transaction */ +enum ippp_ccp_reset_states { + CCPResetIdle, + CCPResetSentReq, + CCPResetRcvdReq, + CCPResetSentAck, + CCPResetRcvdAck +}; + +struct ippp_ccp_reset_state { + enum ippp_ccp_reset_states state; /* State of this transaction */ + struct ippp_struct *is; /* Backlink to device stuff */ + unsigned char id; /* Backlink id index */ + unsigned char ta:1; /* The timer is active (flag) */ + unsigned char expra:1; /* We expect a ResetAck at all */ + int dlen; /* Databytes stored in data */ + struct timer_list timer; /* For timeouts/retries */ + /* This is a hack but seems sufficient for the moment. We do not want + to have this be yet another allocation for some bytes, it is more + memory management overhead than the whole mess is worth. */ + unsigned char data[IPPP_RESET_MAXDATABYTES]; +}; + +/* The data structure keeping track of the currently outstanding CCP Reset + transactions. */ +struct ippp_ccp_reset { + struct ippp_ccp_reset_state *rs[256]; /* One per possible id */ + unsigned char lastid; /* Last id allocated by the engine */ +}; + +struct ippp_struct { + struct ippp_struct *next_link; + int state; + spinlock_t buflock; + struct ippp_buf_queue rq[NUM_RCV_BUFFS]; /* packet queue for isdn_ppp_read() */ + struct ippp_buf_queue *first; /* pointer to (current) first packet */ + struct ippp_buf_queue *last; /* pointer to (current) last used packet in queue */ + wait_queue_head_t wq; + struct task_struct *tk; + unsigned int mpppcfg; + unsigned int pppcfg; + unsigned int mru; + unsigned int mpmru; + unsigned int mpmtu; + unsigned int maxcid; + struct isdn_net_local_s *lp; + int unit; + int minor; + unsigned int last_link_seqno; + long mp_seqno; +#ifdef CONFIG_ISDN_PPP_VJ + unsigned char *cbuf; + struct slcompress *slcomp; +#endif +#ifdef CONFIG_IPPP_FILTER + struct bpf_prog *pass_filter; /* filter for packets to pass */ + struct bpf_prog *active_filter; /* filter for pkts to reset idle */ +#endif + unsigned long debug; + struct isdn_ppp_compressor *compressor,*decompressor; + struct isdn_ppp_compressor *link_compressor,*link_decompressor; + void *decomp_stat,*comp_stat,*link_decomp_stat,*link_comp_stat; + struct ippp_ccp_reset *reset; /* Allocated on demand, may never be needed */ + unsigned long compflags; +}; + +#endif /* _LINUX_ISDN_PPP_H */ diff --git a/include/linux/isdnif.h b/include/linux/isdnif.h new file mode 100644 index 000000000..8d80fdc68 --- /dev/null +++ b/include/linux/isdnif.h @@ -0,0 +1,505 @@ +/* $Id: isdnif.h,v 1.43.2.2 2004/01/12 23:08:35 keil Exp $ + * + * Linux ISDN subsystem + * Definition of the interface between the subsystem and its low-level drivers. + * + * Copyright 1994,95,96 by Fritz Elfert (fritz@isdn4linux.de) + * Copyright 1995,96 Thinking Objects Software GmbH Wuerzburg + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + */ +#ifndef __ISDNIF_H__ +#define __ISDNIF_H__ + + +#include +#include + +/***************************************************************************/ +/* Extensions made by Werner Cornelius (werner@ikt.de) */ +/* */ +/* The proceed command holds a incoming call in a state to leave processes */ +/* enough time to check whether ist should be accepted. */ +/* The PROT_IO Command extends the interface to make protocol dependent */ +/* features available (call diversion, call waiting...). */ +/* */ +/* The PROT_IO Command is executed with the desired driver id and the arg */ +/* parameter coded as follows: */ +/* The lower 8 bits of arg contain the desired protocol from ISDN_PTYPE */ +/* definitions. The upper 24 bits represent the protocol specific cmd/stat.*/ +/* Any additional data is protocol and command specific. */ +/* This mechanism also applies to the statcallb callback STAT_PROT. */ +/* */ +/* This suggested extension permits an easy expansion of protocol specific */ +/* handling. Extensions may be added at any time without changing the HL */ +/* driver code and not getting conflicts without certifications. */ +/* The well known CAPI 2.0 interface handles such extensions in a similar */ +/* way. Perhaps a protocol specific module may be added and separately */ +/* loaded and linked to the basic isdn module for handling. */ +/***************************************************************************/ + +/*****************/ +/* DSS1 commands */ +/*****************/ +#define DSS1_CMD_INVOKE ((0x00 << 8) | ISDN_PTYPE_EURO) /* invoke a supplementary service */ +#define DSS1_CMD_INVOKE_ABORT ((0x01 << 8) | ISDN_PTYPE_EURO) /* abort a invoke cmd */ + +/*******************************/ +/* DSS1 Status callback values */ +/*******************************/ +#define DSS1_STAT_INVOKE_RES ((0x80 << 8) | ISDN_PTYPE_EURO) /* Result for invocation */ +#define DSS1_STAT_INVOKE_ERR ((0x81 << 8) | ISDN_PTYPE_EURO) /* Error Return for invocation */ +#define DSS1_STAT_INVOKE_BRD ((0x82 << 8) | ISDN_PTYPE_EURO) /* Deliver invoke broadcast info */ + + +/*********************************************************************/ +/* structures for DSS1 commands and callback */ +/* */ +/* An action is invoked by sending a DSS1_CMD_INVOKE. The ll_id, proc*/ +/* timeout, datalen and data fields must be set before calling. */ +/* */ +/* The return value is a positive hl_id value also delivered in the */ +/* hl_id field. A value of zero signals no more left hl_id capacitys.*/ +/* A negative return value signals errors in LL. So if the return */ +/* value is <= 0 no action in LL will be taken -> request ignored */ +/* */ +/* The timeout field must be filled with a positive value specifying */ +/* the amount of time the INVOKED process waits for a reaction from */ +/* the network. */ +/* If a response (either error or result) is received during this */ +/* intervall, a reporting callback is initiated and the process will */ +/* be deleted, the hl identifier will be freed. */ +/* If no response is received during the specified intervall, a error*/ +/* callback is initiated with timeout set to -1 and a datalen set */ +/* to 0. */ +/* If timeout is set to a value <= 0 during INVOCATION the process is*/ +/* immediately deleted after sending the data. No callback occurs ! */ +/* */ +/* A currently waiting process may be aborted with INVOKE_ABORT. No */ +/* callback will occur when a process has been aborted. */ +/* */ +/* Broadcast invoke frames from the network are reported via the */ +/* STAT_INVOKE_BRD callback. The ll_id is set to 0, the other fields */ +/* are supplied by the network and not by the HL. */ +/*********************************************************************/ + +/*****************/ +/* NI1 commands */ +/*****************/ +#define NI1_CMD_INVOKE ((0x00 << 8) | ISDN_PTYPE_NI1) /* invoke a supplementary service */ +#define NI1_CMD_INVOKE_ABORT ((0x01 << 8) | ISDN_PTYPE_NI1) /* abort a invoke cmd */ + +/*******************************/ +/* NI1 Status callback values */ +/*******************************/ +#define NI1_STAT_INVOKE_RES ((0x80 << 8) | ISDN_PTYPE_NI1) /* Result for invocation */ +#define NI1_STAT_INVOKE_ERR ((0x81 << 8) | ISDN_PTYPE_NI1) /* Error Return for invocation */ +#define NI1_STAT_INVOKE_BRD ((0x82 << 8) | ISDN_PTYPE_NI1) /* Deliver invoke broadcast info */ + +typedef struct + { ulong ll_id; /* ID supplied by LL when executing */ + /* a command and returned by HL for */ + /* INVOKE_RES and INVOKE_ERR */ + int hl_id; /* ID supplied by HL when called */ + /* for executing a cmd and delivered */ + /* for results and errors */ + /* must be supplied by LL when aborting*/ + int proc; /* invoke procedure used by CMD_INVOKE */ + /* returned by callback and broadcast */ + int timeout; /* timeout for INVOKE CMD in ms */ + /* -1 in stat callback when timed out */ + /* error value when error callback */ + int datalen; /* length of cmd or stat data */ + u_char *data;/* pointer to data delivered or send */ + } isdn_cmd_stat; + +/* + * Commands from linklevel to lowlevel + * + */ +#define ISDN_CMD_IOCTL 0 /* Perform ioctl */ +#define ISDN_CMD_DIAL 1 /* Dial out */ +#define ISDN_CMD_ACCEPTD 2 /* Accept an incoming call on D-Chan. */ +#define ISDN_CMD_ACCEPTB 3 /* Request B-Channel connect. */ +#define ISDN_CMD_HANGUP 4 /* Hangup */ +#define ISDN_CMD_CLREAZ 5 /* Clear EAZ(s) of channel */ +#define ISDN_CMD_SETEAZ 6 /* Set EAZ(s) of channel */ +#define ISDN_CMD_GETEAZ 7 /* Get EAZ(s) of channel */ +#define ISDN_CMD_SETSIL 8 /* Set Service-Indicator-List of channel */ +#define ISDN_CMD_GETSIL 9 /* Get Service-Indicator-List of channel */ +#define ISDN_CMD_SETL2 10 /* Set B-Chan. Layer2-Parameter */ +#define ISDN_CMD_GETL2 11 /* Get B-Chan. Layer2-Parameter */ +#define ISDN_CMD_SETL3 12 /* Set B-Chan. Layer3-Parameter */ +#define ISDN_CMD_GETL3 13 /* Get B-Chan. Layer3-Parameter */ +// #define ISDN_CMD_LOCK 14 /* Signal usage by upper levels */ +// #define ISDN_CMD_UNLOCK 15 /* Release usage-lock */ +#define ISDN_CMD_SUSPEND 16 /* Suspend connection */ +#define ISDN_CMD_RESUME 17 /* Resume connection */ +#define ISDN_CMD_PROCEED 18 /* Proceed with call establishment */ +#define ISDN_CMD_ALERT 19 /* Alert after Proceeding */ +#define ISDN_CMD_REDIR 20 /* Redir a incoming call */ +#define ISDN_CMD_PROT_IO 21 /* Protocol specific commands */ +#define CAPI_PUT_MESSAGE 22 /* CAPI message send down or up */ +#define ISDN_CMD_FAXCMD 23 /* FAX commands to HL-driver */ +#define ISDN_CMD_AUDIO 24 /* DSP, DTMF, ... settings */ + +/* + * Status-Values delivered from lowlevel to linklevel via + * statcallb(). + * + */ +#define ISDN_STAT_STAVAIL 256 /* Raw status-data available */ +#define ISDN_STAT_ICALL 257 /* Incoming call detected */ +#define ISDN_STAT_RUN 258 /* Signal protocol-code is running */ +#define ISDN_STAT_STOP 259 /* Signal halt of protocol-code */ +#define ISDN_STAT_DCONN 260 /* Signal D-Channel connect */ +#define ISDN_STAT_BCONN 261 /* Signal B-Channel connect */ +#define ISDN_STAT_DHUP 262 /* Signal D-Channel disconnect */ +#define ISDN_STAT_BHUP 263 /* Signal B-Channel disconnect */ +#define ISDN_STAT_CINF 264 /* Charge-Info */ +#define ISDN_STAT_LOAD 265 /* Signal new lowlevel-driver is loaded */ +#define ISDN_STAT_UNLOAD 266 /* Signal unload of lowlevel-driver */ +#define ISDN_STAT_BSENT 267 /* Signal packet sent */ +#define ISDN_STAT_NODCH 268 /* Signal no D-Channel */ +#define ISDN_STAT_ADDCH 269 /* Add more Channels */ +#define ISDN_STAT_CAUSE 270 /* Cause-Message */ +#define ISDN_STAT_ICALLW 271 /* Incoming call without B-chan waiting */ +#define ISDN_STAT_REDIR 272 /* Redir result */ +#define ISDN_STAT_PROT 273 /* protocol IO specific callback */ +#define ISDN_STAT_DISPLAY 274 /* deliver a received display message */ +#define ISDN_STAT_L1ERR 275 /* Signal Layer-1 Error */ +#define ISDN_STAT_FAXIND 276 /* FAX indications from HL-driver */ +#define ISDN_STAT_AUDIO 277 /* DTMF, DSP indications */ +#define ISDN_STAT_DISCH 278 /* Disable/Enable channel usage */ + +/* + * Audio commands + */ +#define ISDN_AUDIO_SETDD 0 /* Set DTMF detection */ +#define ISDN_AUDIO_DTMF 1 /* Rx/Tx DTMF */ + +/* + * Values for errcode field + */ +#define ISDN_STAT_L1ERR_SEND 1 +#define ISDN_STAT_L1ERR_RECV 2 + +/* + * Values for feature-field of interface-struct. + */ +/* Layer 2 */ +#define ISDN_FEATURE_L2_X75I (0x0001 << ISDN_PROTO_L2_X75I) +#define ISDN_FEATURE_L2_X75UI (0x0001 << ISDN_PROTO_L2_X75UI) +#define ISDN_FEATURE_L2_X75BUI (0x0001 << ISDN_PROTO_L2_X75BUI) +#define ISDN_FEATURE_L2_HDLC (0x0001 << ISDN_PROTO_L2_HDLC) +#define ISDN_FEATURE_L2_TRANS (0x0001 << ISDN_PROTO_L2_TRANS) +#define ISDN_FEATURE_L2_X25DTE (0x0001 << ISDN_PROTO_L2_X25DTE) +#define ISDN_FEATURE_L2_X25DCE (0x0001 << ISDN_PROTO_L2_X25DCE) +#define ISDN_FEATURE_L2_V11096 (0x0001 << ISDN_PROTO_L2_V11096) +#define ISDN_FEATURE_L2_V11019 (0x0001 << ISDN_PROTO_L2_V11019) +#define ISDN_FEATURE_L2_V11038 (0x0001 << ISDN_PROTO_L2_V11038) +#define ISDN_FEATURE_L2_MODEM (0x0001 << ISDN_PROTO_L2_MODEM) +#define ISDN_FEATURE_L2_FAX (0x0001 << ISDN_PROTO_L2_FAX) +#define ISDN_FEATURE_L2_HDLC_56K (0x0001 << ISDN_PROTO_L2_HDLC_56K) + +#define ISDN_FEATURE_L2_MASK (0x0FFFF) /* Max. 16 protocols */ +#define ISDN_FEATURE_L2_SHIFT (0) + +/* Layer 3 */ +#define ISDN_FEATURE_L3_TRANS (0x10000 << ISDN_PROTO_L3_TRANS) +#define ISDN_FEATURE_L3_TRANSDSP (0x10000 << ISDN_PROTO_L3_TRANSDSP) +#define ISDN_FEATURE_L3_FCLASS2 (0x10000 << ISDN_PROTO_L3_FCLASS2) +#define ISDN_FEATURE_L3_FCLASS1 (0x10000 << ISDN_PROTO_L3_FCLASS1) + +#define ISDN_FEATURE_L3_MASK (0x0FF0000) /* Max. 8 Protocols */ +#define ISDN_FEATURE_L3_SHIFT (16) + +/* Signaling */ +#define ISDN_FEATURE_P_UNKNOWN (0x1000000 << ISDN_PTYPE_UNKNOWN) +#define ISDN_FEATURE_P_1TR6 (0x1000000 << ISDN_PTYPE_1TR6) +#define ISDN_FEATURE_P_EURO (0x1000000 << ISDN_PTYPE_EURO) +#define ISDN_FEATURE_P_NI1 (0x1000000 << ISDN_PTYPE_NI1) + +#define ISDN_FEATURE_P_MASK (0x0FF000000) /* Max. 8 Protocols */ +#define ISDN_FEATURE_P_SHIFT (24) + +typedef struct setup_parm { + unsigned char phone[32]; /* Remote Phone-Number */ + unsigned char eazmsn[32]; /* Local EAZ or MSN */ + unsigned char si1; /* Service Indicator 1 */ + unsigned char si2; /* Service Indicator 2 */ + unsigned char plan; /* Numbering plan */ + unsigned char screen; /* Screening info */ +} setup_parm; + + +#ifdef CONFIG_ISDN_TTY_FAX +/* T.30 Fax G3 */ + +#define FAXIDLEN 21 + +typedef struct T30_s { + /* session parameters */ + __u8 resolution; + __u8 rate; + __u8 width; + __u8 length; + __u8 compression; + __u8 ecm; + __u8 binary; + __u8 scantime; + __u8 id[FAXIDLEN]; + /* additional parameters */ + __u8 phase; + __u8 direction; + __u8 code; + __u8 badlin; + __u8 badmul; + __u8 bor; + __u8 fet; + __u8 pollid[FAXIDLEN]; + __u8 cq; + __u8 cr; + __u8 ctcrty; + __u8 minsp; + __u8 phcto; + __u8 rel; + __u8 nbc; + /* remote station parameters */ + __u8 r_resolution; + __u8 r_rate; + __u8 r_width; + __u8 r_length; + __u8 r_compression; + __u8 r_ecm; + __u8 r_binary; + __u8 r_scantime; + __u8 r_id[FAXIDLEN]; + __u8 r_code; +} __packed T30_s; + +#define ISDN_TTY_FAX_CONN_IN 0 +#define ISDN_TTY_FAX_CONN_OUT 1 + +#define ISDN_TTY_FAX_FCON 0 +#define ISDN_TTY_FAX_DIS 1 +#define ISDN_TTY_FAX_FTT 2 +#define ISDN_TTY_FAX_MCF 3 +#define ISDN_TTY_FAX_DCS 4 +#define ISDN_TTY_FAX_TRAIN_OK 5 +#define ISDN_TTY_FAX_EOP 6 +#define ISDN_TTY_FAX_EOM 7 +#define ISDN_TTY_FAX_MPS 8 +#define ISDN_TTY_FAX_DTC 9 +#define ISDN_TTY_FAX_RID 10 +#define ISDN_TTY_FAX_HNG 11 +#define ISDN_TTY_FAX_DT 12 +#define ISDN_TTY_FAX_FCON_I 13 +#define ISDN_TTY_FAX_DR 14 +#define ISDN_TTY_FAX_ET 15 +#define ISDN_TTY_FAX_CFR 16 +#define ISDN_TTY_FAX_PTS 17 +#define ISDN_TTY_FAX_SENT 18 + +#define ISDN_FAX_PHASE_IDLE 0 +#define ISDN_FAX_PHASE_A 1 +#define ISDN_FAX_PHASE_B 2 +#define ISDN_FAX_PHASE_C 3 +#define ISDN_FAX_PHASE_D 4 +#define ISDN_FAX_PHASE_E 5 + +#endif /* TTY_FAX */ + +#define ISDN_FAX_CLASS1_FAE 0 +#define ISDN_FAX_CLASS1_FTS 1 +#define ISDN_FAX_CLASS1_FRS 2 +#define ISDN_FAX_CLASS1_FTM 3 +#define ISDN_FAX_CLASS1_FRM 4 +#define ISDN_FAX_CLASS1_FTH 5 +#define ISDN_FAX_CLASS1_FRH 6 +#define ISDN_FAX_CLASS1_CTRL 7 + +#define ISDN_FAX_CLASS1_OK 0 +#define ISDN_FAX_CLASS1_CONNECT 1 +#define ISDN_FAX_CLASS1_NOCARR 2 +#define ISDN_FAX_CLASS1_ERROR 3 +#define ISDN_FAX_CLASS1_FCERROR 4 +#define ISDN_FAX_CLASS1_QUERY 5 + +typedef struct { + __u8 cmd; + __u8 subcmd; + __u8 para[50]; +} aux_s; + +#define AT_COMMAND 0 +#define AT_EQ_VALUE 1 +#define AT_QUERY 2 +#define AT_EQ_QUERY 3 + +/* CAPI structs */ + +/* this is compatible to the old union size */ +#define MAX_CAPI_PARA_LEN 50 + +typedef struct { + /* Header */ + __u16 Length; + __u16 ApplId; + __u8 Command; + __u8 Subcommand; + __u16 Messagenumber; + + /* Parameter */ + union { + __u32 Controller; + __u32 PLCI; + __u32 NCCI; + } adr; + __u8 para[MAX_CAPI_PARA_LEN]; +} capi_msg; + +/* + * Structure for exchanging above infos + * + */ +typedef struct { + int driver; /* Lowlevel-Driver-ID */ + int command; /* Command or Status (see above) */ + ulong arg; /* Additional Data */ + union { + ulong errcode; /* Type of error with STAT_L1ERR */ + int length; /* Amount of bytes sent with STAT_BSENT */ + u_char num[50]; /* Additional Data */ + setup_parm setup;/* For SETUP msg */ + capi_msg cmsg; /* For CAPI like messages */ + char display[85];/* display message data */ + isdn_cmd_stat isdn_io; /* ISDN IO-parameter/result */ + aux_s aux; /* for modem commands/indications */ +#ifdef CONFIG_ISDN_TTY_FAX + T30_s *fax; /* Pointer to ttys fax struct */ +#endif + ulong userdata; /* User Data */ + } parm; +} isdn_ctrl; + +#define dss1_io isdn_io +#define ni1_io isdn_io + +/* + * The interface-struct itself (initialized at load-time of lowlevel-driver) + * + * See Documentation/isdn/INTERFACE for a description, how the communication + * between the ISDN subsystem and its drivers is done. + * + */ +typedef struct { + struct module *owner; + + /* Number of channels supported by this driver + */ + int channels; + + /* + * Maximum Size of transmit/receive-buffer this driver supports. + */ + int maxbufsize; + + /* Feature-Flags for this driver. + * See defines ISDN_FEATURE_... for Values + */ + unsigned long features; + + /* + * Needed for calculating + * dev->hard_header_len = linklayer header + hl_hdrlen; + * Drivers, not supporting sk_buff's should set this to 0. + */ + unsigned short hl_hdrlen; + + /* + * Receive-Callback using sk_buff's + * Parameters: + * int Driver-ID + * int local channel-number (0 ...) + * struct sk_buff *skb received Data + */ + void (*rcvcallb_skb)(int, int, struct sk_buff *); + + /* Status-Callback + * Parameters: + * isdn_ctrl* + * driver = Driver ID. + * command = One of above ISDN_STAT_... constants. + * arg = depending on status-type. + * num = depending on status-type. + */ + int (*statcallb)(isdn_ctrl*); + + /* Send command + * Parameters: + * isdn_ctrl* + * driver = Driver ID. + * command = One of above ISDN_CMD_... constants. + * arg = depending on command. + * num = depending on command. + */ + int (*command)(isdn_ctrl*); + + /* + * Send data using sk_buff's + * Parameters: + * int driverId + * int local channel-number (0...) + * int Flag: Need ACK for this packet. + * struct sk_buff *skb Data to send + */ + int (*writebuf_skb) (int, int, int, struct sk_buff *); + + /* Send raw D-Channel-Commands + * Parameters: + * u_char pointer data + * int length of data + * int driverId + * int local channel-number (0 ...) + */ + int (*writecmd)(const u_char __user *, int, int, int); + + /* Read raw Status replies + * u_char pointer data (volatile) + * int length of buffer + * int driverId + * int local channel-number (0 ...) + */ + int (*readstat)(u_char __user *, int, int, int); + + char id[20]; +} isdn_if; + +/* + * Function which must be called by lowlevel-driver at loadtime with + * the following fields of above struct set: + * + * channels Number of channels that will be supported. + * hl_hdrlen Space to preserve in sk_buff's when sending. Drivers, not + * supporting sk_buff's should set this to 0. + * command Address of Command-Handler. + * features Bitwise coded Features of this driver. (use ISDN_FEATURE_...) + * writebuf_skb Address of Skbuff-Send-Handler. + * writecmd " " D-Channel " which accepts raw D-Ch-Commands. + * readstat " " D-Channel " which delivers raw Status-Data. + * + * The linklevel-driver fills the following fields: + * + * channels Driver-ID assigned to this driver. (Must be used on all + * subsequent callbacks. + * rcvcallb_skb Address of handler for received Skbuff's. + * statcallb " " " for status-changes. + * + */ +extern int register_isdn(isdn_if*); +#include + +#endif /* __ISDNIF_H__ */ diff --git a/include/linux/isicom.h b/include/linux/isicom.h new file mode 100644 index 000000000..7de6822d7 --- /dev/null +++ b/include/linux/isicom.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_ISICOM_H +#define _LINUX_ISICOM_H + +#define YES 1 +#define NO 0 + +/* + * ISICOM Driver definitions ... + * + */ + +#define ISICOM_NAME "ISICom" + +/* + * PCI definitions + */ + +#define DEVID_COUNT 9 +#define VENDOR_ID 0x10b5 + +/* + * These are now officially allocated numbers + */ + +#define ISICOM_NMAJOR 112 /* normal */ +#define ISICOM_CMAJOR 113 /* callout */ +#define ISICOM_MAGIC (('M' << 8) | 'T') + +#define WAKEUP_CHARS 256 /* hard coded for now */ +#define TX_SIZE 254 + +#define BOARD_COUNT 4 +#define PORT_COUNT (BOARD_COUNT*16) + +/* character sizes */ + +#define ISICOM_CS5 0x0000 +#define ISICOM_CS6 0x0001 +#define ISICOM_CS7 0x0002 +#define ISICOM_CS8 0x0003 + +/* stop bits */ + +#define ISICOM_1SB 0x0000 +#define ISICOM_2SB 0x0004 + +/* parity */ + +#define ISICOM_NOPAR 0x0000 +#define ISICOM_ODPAR 0x0008 +#define ISICOM_EVPAR 0x0018 + +/* flow control */ + +#define ISICOM_CTSRTS 0x03 +#define ISICOM_INITIATE_XONXOFF 0x04 +#define ISICOM_RESPOND_XONXOFF 0x08 + +#define BOARD(line) (((line) >> 4) & 0x3) + + /* isi kill queue bitmap */ + +#define ISICOM_KILLTX 0x01 +#define ISICOM_KILLRX 0x02 + + /* isi_board status bitmap */ + +#define FIRMWARE_LOADED 0x0001 +#define BOARD_ACTIVE 0x0002 +#define BOARD_INIT 0x0004 + + /* isi_port status bitmap */ + +#define ISI_CTS 0x1000 +#define ISI_DSR 0x2000 +#define ISI_RI 0x4000 +#define ISI_DCD 0x8000 +#define ISI_DTR 0x0100 +#define ISI_RTS 0x0200 + + +#define ISI_TXOK 0x0001 + +#endif /* ISICOM_H */ diff --git a/include/linux/iversion.h b/include/linux/iversion.h new file mode 100644 index 000000000..be50ef7ce --- /dev/null +++ b/include/linux/iversion.h @@ -0,0 +1,337 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_IVERSION_H +#define _LINUX_IVERSION_H + +#include + +/* + * The inode->i_version field: + * --------------------------- + * The change attribute (i_version) is mandated by NFSv4 and is mostly for + * knfsd, but is also used for other purposes (e.g. IMA). The i_version must + * appear different to observers if there was a change to the inode's data or + * metadata since it was last queried. + * + * Observers see the i_version as a 64-bit number that never decreases. If it + * remains the same since it was last checked, then nothing has changed in the + * inode. If it's different then something has changed. Observers cannot infer + * anything about the nature or magnitude of the changes from the value, only + * that the inode has changed in some fashion. + * + * Not all filesystems properly implement the i_version counter. Subsystems that + * want to use i_version field on an inode should first check whether the + * filesystem sets the SB_I_VERSION flag (usually via the IS_I_VERSION macro). + * + * Those that set SB_I_VERSION will automatically have their i_version counter + * incremented on writes to normal files. If the SB_I_VERSION is not set, then + * the VFS will not touch it on writes, and the filesystem can use it how it + * wishes. Note that the filesystem is always responsible for updating the + * i_version on namespace changes in directories (mkdir, rmdir, unlink, etc.). + * We consider these sorts of filesystems to have a kernel-managed i_version. + * + * It may be impractical for filesystems to keep i_version updates atomic with + * respect to the changes that cause them. They should, however, guarantee + * that i_version updates are never visible before the changes that caused + * them. Also, i_version updates should never be delayed longer than it takes + * the original change to reach disk. + * + * This implementation uses the low bit in the i_version field as a flag to + * track when the value has been queried. If it has not been queried since it + * was last incremented, we can skip the increment in most cases. + * + * In the event that we're updating the ctime, we will usually go ahead and + * bump the i_version anyway. Since that has to go to stable storage in some + * fashion, we might as well increment it as well. + * + * With this implementation, the value should always appear to observers to + * increase over time if the file has changed. It's recommended to use + * inode_eq_iversion() helper to compare values. + * + * Note that some filesystems (e.g. NFS and AFS) just use the field to store + * a server-provided value (for the most part). For that reason, those + * filesystems do not set SB_I_VERSION. These filesystems are considered to + * have a self-managed i_version. + * + * Persistently storing the i_version + * ---------------------------------- + * Queries of the i_version field are not gated on them hitting the backing + * store. It's always possible that the host could crash after allowing + * a query of the value but before it has made it to disk. + * + * To mitigate this problem, filesystems should always use + * inode_set_iversion_queried when loading an existing inode from disk. This + * ensures that the next attempted inode increment will result in the value + * changing. + * + * Storing the value to disk therefore does not count as a query, so those + * filesystems should use inode_peek_iversion to grab the value to be stored. + * There is no need to flag the value as having been queried in that case. + */ + +/* + * We borrow the lowest bit in the i_version to use as a flag to tell whether + * it has been queried since we last incremented it. If it has, then we must + * increment it on the next change. After that, we can clear the flag and + * avoid incrementing it again until it has again been queried. + */ +#define I_VERSION_QUERIED_SHIFT (1) +#define I_VERSION_QUERIED (1ULL << (I_VERSION_QUERIED_SHIFT - 1)) +#define I_VERSION_INCREMENT (1ULL << I_VERSION_QUERIED_SHIFT) + +/** + * inode_set_iversion_raw - set i_version to the specified raw value + * @inode: inode to set + * @val: new i_version value to set + * + * Set @inode's i_version field to @val. This function is for use by + * filesystems that self-manage the i_version. + * + * For example, the NFS client stores its NFSv4 change attribute in this way, + * and the AFS client stores the data_version from the server here. + */ +static inline void +inode_set_iversion_raw(struct inode *inode, u64 val) +{ + atomic64_set(&inode->i_version, val); +} + +/** + * inode_peek_iversion_raw - grab a "raw" iversion value + * @inode: inode from which i_version should be read + * + * Grab a "raw" inode->i_version value and return it. The i_version is not + * flagged or converted in any way. This is mostly used to access a self-managed + * i_version. + * + * With those filesystems, we want to treat the i_version as an entirely + * opaque value. + */ +static inline u64 +inode_peek_iversion_raw(const struct inode *inode) +{ + return atomic64_read(&inode->i_version); +} + +/** + * inode_set_iversion - set i_version to a particular value + * @inode: inode to set + * @val: new i_version value to set + * + * Set @inode's i_version field to @val. This function is for filesystems with + * a kernel-managed i_version, for initializing a newly-created inode from + * scratch. + * + * In this case, we do not set the QUERIED flag since we know that this value + * has never been queried. + */ +static inline void +inode_set_iversion(struct inode *inode, u64 val) +{ + inode_set_iversion_raw(inode, val << I_VERSION_QUERIED_SHIFT); +} + +/** + * inode_set_iversion_queried - set i_version to a particular value as quereied + * @inode: inode to set + * @val: new i_version value to set + * + * Set @inode's i_version field to @val, and flag it for increment on the next + * change. + * + * Filesystems that persistently store the i_version on disk should use this + * when loading an existing inode from disk. + * + * When loading in an i_version value from a backing store, we can't be certain + * that it wasn't previously viewed before being stored. Thus, we must assume + * that it was, to ensure that we don't end up handing out the same value for + * different versions of the same inode. + */ +static inline void +inode_set_iversion_queried(struct inode *inode, u64 val) +{ + inode_set_iversion_raw(inode, (val << I_VERSION_QUERIED_SHIFT) | + I_VERSION_QUERIED); +} + +/** + * inode_maybe_inc_iversion - increments i_version + * @inode: inode with the i_version that should be updated + * @force: increment the counter even if it's not necessary? + * + * Every time the inode is modified, the i_version field must be seen to have + * changed by any observer. + * + * If "force" is set or the QUERIED flag is set, then ensure that we increment + * the value, and clear the queried flag. + * + * In the common case where neither is set, then we can return "false" without + * updating i_version. + * + * If this function returns false, and no other metadata has changed, then we + * can avoid logging the metadata. + */ +static inline bool +inode_maybe_inc_iversion(struct inode *inode, bool force) +{ + u64 cur, old, new; + + /* + * The i_version field is not strictly ordered with any other inode + * information, but the legacy inode_inc_iversion code used a spinlock + * to serialize increments. + * + * Here, we add full memory barriers to ensure that any de-facto + * ordering with other info is preserved. + * + * This barrier pairs with the barrier in inode_query_iversion() + */ + smp_mb(); + cur = inode_peek_iversion_raw(inode); + for (;;) { + /* If flag is clear then we needn't do anything */ + if (!force && !(cur & I_VERSION_QUERIED)) + return false; + + /* Since lowest bit is flag, add 2 to avoid it */ + new = (cur & ~I_VERSION_QUERIED) + I_VERSION_INCREMENT; + + old = atomic64_cmpxchg(&inode->i_version, cur, new); + if (likely(old == cur)) + break; + cur = old; + } + return true; +} + + +/** + * inode_inc_iversion - forcibly increment i_version + * @inode: inode that needs to be updated + * + * Forcbily increment the i_version field. This always results in a change to + * the observable value. + */ +static inline void +inode_inc_iversion(struct inode *inode) +{ + inode_maybe_inc_iversion(inode, true); +} + +/** + * inode_iversion_need_inc - is the i_version in need of being incremented? + * @inode: inode to check + * + * Returns whether the inode->i_version counter needs incrementing on the next + * change. Just fetch the value and check the QUERIED flag. + */ +static inline bool +inode_iversion_need_inc(struct inode *inode) +{ + return inode_peek_iversion_raw(inode) & I_VERSION_QUERIED; +} + +/** + * inode_inc_iversion_raw - forcibly increment raw i_version + * @inode: inode that needs to be updated + * + * Forcbily increment the raw i_version field. This always results in a change + * to the raw value. + * + * NFS will use the i_version field to store the value from the server. It + * mostly treats it as opaque, but in the case where it holds a write + * delegation, it must increment the value itself. This function does that. + */ +static inline void +inode_inc_iversion_raw(struct inode *inode) +{ + atomic64_inc(&inode->i_version); +} + +/** + * inode_peek_iversion - read i_version without flagging it to be incremented + * @inode: inode from which i_version should be read + * + * Read the inode i_version counter for an inode without registering it as a + * query. + * + * This is typically used by local filesystems that need to store an i_version + * on disk. In that situation, it's not necessary to flag it as having been + * viewed, as the result won't be used to gauge changes from that point. + */ +static inline u64 +inode_peek_iversion(const struct inode *inode) +{ + return inode_peek_iversion_raw(inode) >> I_VERSION_QUERIED_SHIFT; +} + +/** + * inode_query_iversion - read i_version for later use + * @inode: inode from which i_version should be read + * + * Read the inode i_version counter. This should be used by callers that wish + * to store the returned i_version for later comparison. This will guarantee + * that a later query of the i_version will result in a different value if + * anything has changed. + * + * In this implementation, we fetch the current value, set the QUERIED flag and + * then try to swap it into place with a cmpxchg, if it wasn't already set. If + * that fails, we try again with the newly fetched value from the cmpxchg. + */ +static inline u64 +inode_query_iversion(struct inode *inode) +{ + u64 cur, old, new; + + cur = inode_peek_iversion_raw(inode); + for (;;) { + /* If flag is already set, then no need to swap */ + if (cur & I_VERSION_QUERIED) { + /* + * This barrier (and the implicit barrier in the + * cmpxchg below) pairs with the barrier in + * inode_maybe_inc_iversion(). + */ + smp_mb(); + break; + } + + new = cur | I_VERSION_QUERIED; + old = atomic64_cmpxchg(&inode->i_version, cur, new); + if (likely(old == cur)) + break; + cur = old; + } + return cur >> I_VERSION_QUERIED_SHIFT; +} + +/** + * inode_eq_iversion_raw - check whether the raw i_version counter has changed + * @inode: inode to check + * @old: old value to check against its i_version + * + * Compare the current raw i_version counter with a previous one. Returns true + * if they are the same or false if they are different. + */ +static inline bool +inode_eq_iversion_raw(const struct inode *inode, u64 old) +{ + return inode_peek_iversion_raw(inode) == old; +} + +/** + * inode_eq_iversion - check whether the i_version counter has changed + * @inode: inode to check + * @old: old value to check against its i_version + * + * Compare an i_version counter with a previous one. Returns true if they are + * the same, and false if they are different. + * + * Note that we don't need to set the QUERIED flag in this case, as the value + * in the inode is not being recorded for later use. + */ +static inline bool +inode_eq_iversion(const struct inode *inode, u64 old) +{ + return inode_peek_iversion(inode) == old; +} +#endif diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h new file mode 100644 index 000000000..268f3000d --- /dev/null +++ b/include/linux/jbd2.h @@ -0,0 +1,1668 @@ +/* + * linux/include/linux/jbd2.h + * + * Written by Stephen C. Tweedie + * + * Copyright 1998-2000 Red Hat, Inc --- All Rights Reserved + * + * This file is part of the Linux kernel and is made available under + * the terms of the GNU General Public License, version 2, or at your + * option, any later version, incorporated herein by reference. + * + * Definitions for transaction data structures for the buffer cache + * filesystem journaling support. + */ + +#ifndef _LINUX_JBD2_H +#define _LINUX_JBD2_H + +/* Allow this file to be included directly into e2fsprogs */ +#ifndef __KERNEL__ +#include "jfs_compat.h" +#define JBD2_DEBUG +#else + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#endif + +#define journal_oom_retry 1 + +/* + * Define JBD2_PARANIOD_IOFAIL to cause a kernel BUG() if ext4 finds + * certain classes of error which can occur due to failed IOs. Under + * normal use we want ext4 to continue after such errors, because + * hardware _can_ fail, but for debugging purposes when running tests on + * known-good hardware we may want to trap these errors. + */ +#undef JBD2_PARANOID_IOFAIL + +/* + * The default maximum commit age, in seconds. + */ +#define JBD2_DEFAULT_MAX_COMMIT_AGE 5 + +#ifdef CONFIG_JBD2_DEBUG +/* + * Define JBD2_EXPENSIVE_CHECKING to enable more expensive internal + * consistency checks. By default we don't do this unless + * CONFIG_JBD2_DEBUG is on. + */ +#define JBD2_EXPENSIVE_CHECKING +extern ushort jbd2_journal_enable_debug; +void __jbd2_debug(int level, const char *file, const char *func, + unsigned int line, const char *fmt, ...); + +#define jbd_debug(n, fmt, a...) \ + __jbd2_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a) +#else +#define jbd_debug(n, fmt, a...) /**/ +#endif + +extern void *jbd2_alloc(size_t size, gfp_t flags); +extern void jbd2_free(void *ptr, size_t size); + +#define JBD2_MIN_JOURNAL_BLOCKS 1024 + +#ifdef __KERNEL__ + +/** + * typedef handle_t - The handle_t type represents a single atomic update being performed by some process. + * + * All filesystem modifications made by the process go + * through this handle. Recursive operations (such as quota operations) + * are gathered into a single update. + * + * The buffer credits field is used to account for journaled buffers + * being modified by the running process. To ensure that there is + * enough log space for all outstanding operations, we need to limit the + * number of outstanding buffers possible at any time. When the + * operation completes, any buffer credits not used are credited back to + * the transaction, so that at all times we know how many buffers the + * outstanding updates on a transaction might possibly touch. + * + * This is an opaque datatype. + **/ +typedef struct jbd2_journal_handle handle_t; /* Atomic operation type */ + + +/** + * typedef journal_t - The journal_t maintains all of the journaling state information for a single filesystem. + * + * journal_t is linked to from the fs superblock structure. + * + * We use the journal_t to keep track of all outstanding transaction + * activity on the filesystem, and to manage the state of the log + * writing process. + * + * This is an opaque datatype. + **/ +typedef struct journal_s journal_t; /* Journal control structure */ +#endif + +/* + * Internal structures used by the logging mechanism: + */ + +#define JBD2_MAGIC_NUMBER 0xc03b3998U /* The first 4 bytes of /dev/random! */ + +/* + * On-disk structures + */ + +/* + * Descriptor block types: + */ + +#define JBD2_DESCRIPTOR_BLOCK 1 +#define JBD2_COMMIT_BLOCK 2 +#define JBD2_SUPERBLOCK_V1 3 +#define JBD2_SUPERBLOCK_V2 4 +#define JBD2_REVOKE_BLOCK 5 + +/* + * Standard header for all descriptor blocks: + */ +typedef struct journal_header_s +{ + __be32 h_magic; + __be32 h_blocktype; + __be32 h_sequence; +} journal_header_t; + +/* + * Checksum types. + */ +#define JBD2_CRC32_CHKSUM 1 +#define JBD2_MD5_CHKSUM 2 +#define JBD2_SHA1_CHKSUM 3 +#define JBD2_CRC32C_CHKSUM 4 + +#define JBD2_CRC32_CHKSUM_SIZE 4 + +#define JBD2_CHECKSUM_BYTES (32 / sizeof(u32)) +/* + * Commit block header for storing transactional checksums: + * + * NOTE: If FEATURE_COMPAT_CHECKSUM (checksum v1) is set, the h_chksum* + * fields are used to store a checksum of the descriptor and data blocks. + * + * If FEATURE_INCOMPAT_CSUM_V2 (checksum v2) is set, then the h_chksum + * field is used to store crc32c(uuid+commit_block). Each journal metadata + * block gets its own checksum, and data block checksums are stored in + * journal_block_tag (in the descriptor). The other h_chksum* fields are + * not used. + * + * If FEATURE_INCOMPAT_CSUM_V3 is set, the descriptor block uses + * journal_block_tag3_t to store a full 32-bit checksum. Everything else + * is the same as v2. + * + * Checksum v1, v2, and v3 are mutually exclusive features. + */ +struct commit_header { + __be32 h_magic; + __be32 h_blocktype; + __be32 h_sequence; + unsigned char h_chksum_type; + unsigned char h_chksum_size; + unsigned char h_padding[2]; + __be32 h_chksum[JBD2_CHECKSUM_BYTES]; + __be64 h_commit_sec; + __be32 h_commit_nsec; +}; + +/* + * The block tag: used to describe a single buffer in the journal. + * t_blocknr_high is only used if INCOMPAT_64BIT is set, so this + * raw struct shouldn't be used for pointer math or sizeof() - use + * journal_tag_bytes(journal) instead to compute this. + */ +typedef struct journal_block_tag3_s +{ + __be32 t_blocknr; /* The on-disk block number */ + __be32 t_flags; /* See below */ + __be32 t_blocknr_high; /* most-significant high 32bits. */ + __be32 t_checksum; /* crc32c(uuid+seq+block) */ +} journal_block_tag3_t; + +typedef struct journal_block_tag_s +{ + __be32 t_blocknr; /* The on-disk block number */ + __be16 t_checksum; /* truncated crc32c(uuid+seq+block) */ + __be16 t_flags; /* See below */ + __be32 t_blocknr_high; /* most-significant high 32bits. */ +} journal_block_tag_t; + +/* Tail of descriptor or revoke block, for checksumming */ +struct jbd2_journal_block_tail { + __be32 t_checksum; /* crc32c(uuid+descr_block) */ +}; + +/* + * The revoke descriptor: used on disk to describe a series of blocks to + * be revoked from the log + */ +typedef struct jbd2_journal_revoke_header_s +{ + journal_header_t r_header; + __be32 r_count; /* Count of bytes used in the block */ +} jbd2_journal_revoke_header_t; + +/* Definitions for the journal tag flags word: */ +#define JBD2_FLAG_ESCAPE 1 /* on-disk block is escaped */ +#define JBD2_FLAG_SAME_UUID 2 /* block has same uuid as previous */ +#define JBD2_FLAG_DELETED 4 /* block deleted by this transaction */ +#define JBD2_FLAG_LAST_TAG 8 /* last tag in this descriptor block */ + + +/* + * The journal superblock. All fields are in big-endian byte order. + */ +typedef struct journal_superblock_s +{ +/* 0x0000 */ + journal_header_t s_header; + +/* 0x000C */ + /* Static information describing the journal */ + __be32 s_blocksize; /* journal device blocksize */ + __be32 s_maxlen; /* total blocks in journal file */ + __be32 s_first; /* first block of log information */ + +/* 0x0018 */ + /* Dynamic information describing the current state of the log */ + __be32 s_sequence; /* first commit ID expected in log */ + __be32 s_start; /* blocknr of start of log */ + +/* 0x0020 */ + /* Error value, as set by jbd2_journal_abort(). */ + __be32 s_errno; + +/* 0x0024 */ + /* Remaining fields are only valid in a version-2 superblock */ + __be32 s_feature_compat; /* compatible feature set */ + __be32 s_feature_incompat; /* incompatible feature set */ + __be32 s_feature_ro_compat; /* readonly-compatible feature set */ +/* 0x0030 */ + __u8 s_uuid[16]; /* 128-bit uuid for journal */ + +/* 0x0040 */ + __be32 s_nr_users; /* Nr of filesystems sharing log */ + + __be32 s_dynsuper; /* Blocknr of dynamic superblock copy*/ + +/* 0x0048 */ + __be32 s_max_transaction; /* Limit of journal blocks per trans.*/ + __be32 s_max_trans_data; /* Limit of data blocks per trans. */ + +/* 0x0050 */ + __u8 s_checksum_type; /* checksum type */ + __u8 s_padding2[3]; + __u32 s_padding[42]; + __be32 s_checksum; /* crc32c(superblock) */ + +/* 0x0100 */ + __u8 s_users[16*48]; /* ids of all fs'es sharing the log */ +/* 0x0400 */ +} journal_superblock_t; + +/* Use the jbd2_{has,set,clear}_feature_* helpers; these will be removed */ +#define JBD2_HAS_COMPAT_FEATURE(j,mask) \ + ((j)->j_format_version >= 2 && \ + ((j)->j_superblock->s_feature_compat & cpu_to_be32((mask)))) +#define JBD2_HAS_RO_COMPAT_FEATURE(j,mask) \ + ((j)->j_format_version >= 2 && \ + ((j)->j_superblock->s_feature_ro_compat & cpu_to_be32((mask)))) +#define JBD2_HAS_INCOMPAT_FEATURE(j,mask) \ + ((j)->j_format_version >= 2 && \ + ((j)->j_superblock->s_feature_incompat & cpu_to_be32((mask)))) + +#define JBD2_FEATURE_COMPAT_CHECKSUM 0x00000001 + +#define JBD2_FEATURE_INCOMPAT_REVOKE 0x00000001 +#define JBD2_FEATURE_INCOMPAT_64BIT 0x00000002 +#define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT 0x00000004 +#define JBD2_FEATURE_INCOMPAT_CSUM_V2 0x00000008 +#define JBD2_FEATURE_INCOMPAT_CSUM_V3 0x00000010 + +/* See "journal feature predicate functions" below */ + +/* Features known to this kernel version: */ +#define JBD2_KNOWN_COMPAT_FEATURES JBD2_FEATURE_COMPAT_CHECKSUM +#define JBD2_KNOWN_ROCOMPAT_FEATURES 0 +#define JBD2_KNOWN_INCOMPAT_FEATURES (JBD2_FEATURE_INCOMPAT_REVOKE | \ + JBD2_FEATURE_INCOMPAT_64BIT | \ + JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | \ + JBD2_FEATURE_INCOMPAT_CSUM_V2 | \ + JBD2_FEATURE_INCOMPAT_CSUM_V3) + +#ifdef __KERNEL__ + +#include +#include + +enum jbd_state_bits { + BH_JBD /* Has an attached ext3 journal_head */ + = BH_PrivateStart, + BH_JWrite, /* Being written to log (@@@ DEBUGGING) */ + BH_Freed, /* Has been freed (truncated) */ + BH_Revoked, /* Has been revoked from the log */ + BH_RevokeValid, /* Revoked flag is valid */ + BH_JBDDirty, /* Is dirty but journaled */ + BH_State, /* Pins most journal_head state */ + BH_JournalHead, /* Pins bh->b_private and jh->b_bh */ + BH_Shadow, /* IO on shadow buffer is running */ + BH_Verified, /* Metadata block has been verified ok */ + BH_JBDPrivateStart, /* First bit available for private use by FS */ +}; + +BUFFER_FNS(JBD, jbd) +BUFFER_FNS(JWrite, jwrite) +BUFFER_FNS(JBDDirty, jbddirty) +TAS_BUFFER_FNS(JBDDirty, jbddirty) +BUFFER_FNS(Revoked, revoked) +TAS_BUFFER_FNS(Revoked, revoked) +BUFFER_FNS(RevokeValid, revokevalid) +TAS_BUFFER_FNS(RevokeValid, revokevalid) +BUFFER_FNS(Freed, freed) +BUFFER_FNS(Shadow, shadow) +BUFFER_FNS(Verified, verified) + +static inline struct buffer_head *jh2bh(struct journal_head *jh) +{ + return jh->b_bh; +} + +static inline struct journal_head *bh2jh(struct buffer_head *bh) +{ + return bh->b_private; +} + +static inline void jbd_lock_bh_state(struct buffer_head *bh) +{ + bit_spin_lock(BH_State, &bh->b_state); +} + +static inline int jbd_trylock_bh_state(struct buffer_head *bh) +{ + return bit_spin_trylock(BH_State, &bh->b_state); +} + +static inline int jbd_is_locked_bh_state(struct buffer_head *bh) +{ + return bit_spin_is_locked(BH_State, &bh->b_state); +} + +static inline void jbd_unlock_bh_state(struct buffer_head *bh) +{ + bit_spin_unlock(BH_State, &bh->b_state); +} + +static inline void jbd_lock_bh_journal_head(struct buffer_head *bh) +{ + bit_spin_lock(BH_JournalHead, &bh->b_state); +} + +static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) +{ + bit_spin_unlock(BH_JournalHead, &bh->b_state); +} + +#define J_ASSERT(assert) BUG_ON(!(assert)) + +#define J_ASSERT_BH(bh, expr) J_ASSERT(expr) +#define J_ASSERT_JH(jh, expr) J_ASSERT(expr) + +#if defined(JBD2_PARANOID_IOFAIL) +#define J_EXPECT(expr, why...) J_ASSERT(expr) +#define J_EXPECT_BH(bh, expr, why...) J_ASSERT_BH(bh, expr) +#define J_EXPECT_JH(jh, expr, why...) J_ASSERT_JH(jh, expr) +#else +#define __journal_expect(expr, why...) \ + ({ \ + int val = (expr); \ + if (!val) { \ + printk(KERN_ERR \ + "JBD2 unexpected failure: %s: %s;\n", \ + __func__, #expr); \ + printk(KERN_ERR why "\n"); \ + } \ + val; \ + }) +#define J_EXPECT(expr, why...) __journal_expect(expr, ## why) +#define J_EXPECT_BH(bh, expr, why...) __journal_expect(expr, ## why) +#define J_EXPECT_JH(jh, expr, why...) __journal_expect(expr, ## why) +#endif + +/* Flags in jbd_inode->i_flags */ +#define __JI_COMMIT_RUNNING 0 +#define __JI_WRITE_DATA 1 +#define __JI_WAIT_DATA 2 + +/* + * Commit of the inode data in progress. We use this flag to protect us from + * concurrent deletion of inode. We cannot use reference to inode for this + * since we cannot afford doing last iput() on behalf of kjournald + */ +#define JI_COMMIT_RUNNING (1 << __JI_COMMIT_RUNNING) +/* Write allocated dirty buffers in this inode before commit */ +#define JI_WRITE_DATA (1 << __JI_WRITE_DATA) +/* Wait for outstanding data writes for this inode before commit */ +#define JI_WAIT_DATA (1 << __JI_WAIT_DATA) + +/** + * struct jbd_inode - The jbd_inode type is the structure linking inodes in + * ordered mode present in a transaction so that we can sync them during commit. + */ +struct jbd2_inode { + /** + * @i_transaction: + * + * Which transaction does this inode belong to? Either the running + * transaction or the committing one. [j_list_lock] + */ + transaction_t *i_transaction; + + /** + * @i_next_transaction: + * + * Pointer to the running transaction modifying inode's data in case + * there is already a committing transaction touching it. [j_list_lock] + */ + transaction_t *i_next_transaction; + + /** + * @i_list: List of inodes in the i_transaction [j_list_lock] + */ + struct list_head i_list; + + /** + * @i_vfs_inode: + * + * VFS inode this inode belongs to [constant for lifetime of structure] + */ + struct inode *i_vfs_inode; + + /** + * @i_flags: Flags of inode [j_list_lock] + */ + unsigned long i_flags; + + /** + * @i_dirty_start: + * + * Offset in bytes where the dirty range for this inode starts. + * [j_list_lock] + */ + loff_t i_dirty_start; + + /** + * @i_dirty_end: + * + * Inclusive offset in bytes where the dirty range for this inode + * ends. [j_list_lock] + */ + loff_t i_dirty_end; +}; + +struct jbd2_revoke_table_s; + +/** + * struct handle_s - The handle_s type is the concrete type associated with + * handle_t. + * @h_transaction: Which compound transaction is this update a part of? + * @h_journal: Which journal handle belongs to - used iff h_reserved set. + * @h_rsv_handle: Handle reserved for finishing the logical operation. + * @h_buffer_credits: Number of remaining buffers we are allowed to dirty. + * @h_ref: Reference count on this handle. + * @h_err: Field for caller's use to track errors through large fs operations. + * @h_sync: Flag for sync-on-close. + * @h_jdata: Flag to force data journaling. + * @h_reserved: Flag for handle for reserved credits. + * @h_aborted: Flag indicating fatal error on handle. + * @h_type: For handle statistics. + * @h_line_no: For handle statistics. + * @h_start_jiffies: Handle Start time. + * @h_requested_credits: Holds @h_buffer_credits after handle is started. + * @saved_alloc_context: Saved context while transaction is open. + **/ + +/* Docbook can't yet cope with the bit fields, but will leave the documentation + * in so it can be fixed later. + */ + +struct jbd2_journal_handle +{ + union { + transaction_t *h_transaction; + /* Which journal handle belongs to - used iff h_reserved set */ + journal_t *h_journal; + }; + + handle_t *h_rsv_handle; + int h_buffer_credits; + int h_ref; + int h_err; + + /* Flags [no locking] */ + unsigned int h_sync: 1; + unsigned int h_jdata: 1; + unsigned int h_reserved: 1; + unsigned int h_aborted: 1; + unsigned int h_type: 8; + unsigned int h_line_no: 16; + + unsigned long h_start_jiffies; + unsigned int h_requested_credits; + + unsigned int saved_alloc_context; +}; + + +/* + * Some stats for checkpoint phase + */ +struct transaction_chp_stats_s { + unsigned long cs_chp_time; + __u32 cs_forced_to_close; + __u32 cs_written; + __u32 cs_dropped; +}; + +/* The transaction_t type is the guts of the journaling mechanism. It + * tracks a compound transaction through its various states: + * + * RUNNING: accepting new updates + * LOCKED: Updates still running but we don't accept new ones + * RUNDOWN: Updates are tidying up but have finished requesting + * new buffers to modify (state not used for now) + * FLUSH: All updates complete, but we are still writing to disk + * COMMIT: All data on disk, writing commit record + * FINISHED: We still have to keep the transaction for checkpointing. + * + * The transaction keeps track of all of the buffers modified by a + * running transaction, and all of the buffers committed but not yet + * flushed to home for finished transactions. + */ + +/* + * Lock ranking: + * + * j_list_lock + * ->jbd_lock_bh_journal_head() (This is "innermost") + * + * j_state_lock + * ->jbd_lock_bh_state() + * + * jbd_lock_bh_state() + * ->j_list_lock + * + * j_state_lock + * ->t_handle_lock + * + * j_state_lock + * ->j_list_lock (journal_unmap_buffer) + * + */ + +struct transaction_s +{ + /* Pointer to the journal for this transaction. [no locking] */ + journal_t *t_journal; + + /* Sequence number for this transaction [no locking] */ + tid_t t_tid; + + /* + * Transaction's current state + * [no locking - only kjournald2 alters this] + * [j_list_lock] guards transition of a transaction into T_FINISHED + * state and subsequent call of __jbd2_journal_drop_transaction() + * FIXME: needs barriers + * KLUDGE: [use j_state_lock] + */ + enum { + T_RUNNING, + T_LOCKED, + T_FLUSH, + T_COMMIT, + T_COMMIT_DFLUSH, + T_COMMIT_JFLUSH, + T_COMMIT_CALLBACK, + T_FINISHED + } t_state; + + /* + * Where in the log does this transaction's commit start? [no locking] + */ + unsigned long t_log_start; + + /* Number of buffers on the t_buffers list [j_list_lock] */ + int t_nr_buffers; + + /* + * Doubly-linked circular list of all buffers reserved but not yet + * modified by this transaction [j_list_lock] + */ + struct journal_head *t_reserved_list; + + /* + * Doubly-linked circular list of all metadata buffers owned by this + * transaction [j_list_lock] + */ + struct journal_head *t_buffers; + + /* + * Doubly-linked circular list of all forget buffers (superseded + * buffers which we can un-checkpoint once this transaction commits) + * [j_list_lock] + */ + struct journal_head *t_forget; + + /* + * Doubly-linked circular list of all buffers still to be flushed before + * this transaction can be checkpointed. [j_list_lock] + */ + struct journal_head *t_checkpoint_list; + + /* + * Doubly-linked circular list of all buffers submitted for IO while + * checkpointing. [j_list_lock] + */ + struct journal_head *t_checkpoint_io_list; + + /* + * Doubly-linked circular list of metadata buffers being shadowed by log + * IO. The IO buffers on the iobuf list and the shadow buffers on this + * list match each other one for one at all times. [j_list_lock] + */ + struct journal_head *t_shadow_list; + + /* + * List of inodes whose data we've modified in data=ordered mode. + * [j_list_lock] + */ + struct list_head t_inode_list; + + /* + * Protects info related to handles + */ + spinlock_t t_handle_lock; + + /* + * Longest time some handle had to wait for running transaction + */ + unsigned long t_max_wait; + + /* + * When transaction started + */ + unsigned long t_start; + + /* + * When commit was requested + */ + unsigned long t_requested; + + /* + * Checkpointing stats [j_checkpoint_sem] + */ + struct transaction_chp_stats_s t_chp_stats; + + /* + * Number of outstanding updates running on this transaction + * [t_handle_lock] + */ + atomic_t t_updates; + + /* + * Number of buffers reserved for use by all handles in this transaction + * handle but not yet modified. [t_handle_lock] + */ + atomic_t t_outstanding_credits; + + /* + * Forward and backward links for the circular list of all transactions + * awaiting checkpoint. [j_list_lock] + */ + transaction_t *t_cpnext, *t_cpprev; + + /* + * When will the transaction expire (become due for commit), in jiffies? + * [no locking] + */ + unsigned long t_expires; + + /* + * When this transaction started, in nanoseconds [no locking] + */ + ktime_t t_start_time; + + /* + * How many handles used this transaction? [t_handle_lock] + */ + atomic_t t_handle_count; + + /* + * This transaction is being forced and some process is + * waiting for it to finish. + */ + unsigned int t_synchronous_commit:1; + + /* Disk flush needs to be sent to fs partition [no locking] */ + int t_need_data_flush; + + /* + * For use by the filesystem to store fs-specific data + * structures associated with the transaction + */ + struct list_head t_private_list; +}; + +struct transaction_run_stats_s { + unsigned long rs_wait; + unsigned long rs_request_delay; + unsigned long rs_running; + unsigned long rs_locked; + unsigned long rs_flushing; + unsigned long rs_logging; + + __u32 rs_handle_count; + __u32 rs_blocks; + __u32 rs_blocks_logged; +}; + +struct transaction_stats_s { + unsigned long ts_tid; + unsigned long ts_requested; + struct transaction_run_stats_s run; +}; + +static inline unsigned long +jbd2_time_diff(unsigned long start, unsigned long end) +{ + if (end >= start) + return end - start; + + return end + (MAX_JIFFY_OFFSET - start); +} + +#define JBD2_NR_BATCH 64 + +/** + * struct journal_s - The journal_s type is the concrete type associated with + * journal_t. + */ +struct journal_s +{ + /** + * @j_flags: General journaling state flags [j_state_lock] + */ + unsigned long j_flags; + + /** + * @j_errno: + * + * Is there an outstanding uncleared error on the journal (from a prior + * abort)? [j_state_lock] + */ + int j_errno; + + /** + * @j_sb_buffer: The first part of the superblock buffer. + */ + struct buffer_head *j_sb_buffer; + + /** + * @j_superblock: The second part of the superblock buffer. + */ + journal_superblock_t *j_superblock; + + /** + * @j_format_version: Version of the superblock format. + */ + int j_format_version; + + /** + * @j_state_lock: Protect the various scalars in the journal. + */ + rwlock_t j_state_lock; + + /** + * @j_barrier_count: + * + * Number of processes waiting to create a barrier lock [j_state_lock] + */ + int j_barrier_count; + + /** + * @j_barrier: The barrier lock itself. + */ + struct mutex j_barrier; + + /** + * @j_running_transaction: + * + * Transactions: The current running transaction... + * [j_state_lock] [caller holding open handle] + */ + transaction_t *j_running_transaction; + + /** + * @j_committing_transaction: + * + * the transaction we are pushing to disk + * [j_state_lock] [caller holding open handle] + */ + transaction_t *j_committing_transaction; + + /** + * @j_checkpoint_transactions: + * + * ... and a linked circular list of all transactions waiting for + * checkpointing. [j_list_lock] + */ + transaction_t *j_checkpoint_transactions; + + /** + * @j_wait_transaction_locked: + * + * Wait queue for waiting for a locked transaction to start committing, + * or for a barrier lock to be released. + */ + wait_queue_head_t j_wait_transaction_locked; + + /** + * @j_wait_done_commit: Wait queue for waiting for commit to complete. + */ + wait_queue_head_t j_wait_done_commit; + + /** + * @j_wait_commit: Wait queue to trigger commit. + */ + wait_queue_head_t j_wait_commit; + + /** + * @j_wait_updates: Wait queue to wait for updates to complete. + */ + wait_queue_head_t j_wait_updates; + + /** + * @j_wait_reserved: + * + * Wait queue to wait for reserved buffer credits to drop. + */ + wait_queue_head_t j_wait_reserved; + + /** + * @j_checkpoint_mutex: + * + * Semaphore for locking against concurrent checkpoints. + */ + struct mutex j_checkpoint_mutex; + + /** + * @j_chkpt_bhs: + * + * List of buffer heads used by the checkpoint routine. This + * was moved from jbd2_log_do_checkpoint() to reduce stack + * usage. Access to this array is controlled by the + * @j_checkpoint_mutex. [j_checkpoint_mutex] + */ + struct buffer_head *j_chkpt_bhs[JBD2_NR_BATCH]; + + /** + * @j_head: + * + * Journal head: identifies the first unused block in the journal. + * [j_state_lock] + */ + unsigned long j_head; + + /** + * @j_tail: + * + * Journal tail: identifies the oldest still-used block in the journal. + * [j_state_lock] + */ + unsigned long j_tail; + + /** + * @j_free: + * + * Journal free: how many free blocks are there in the journal? + * [j_state_lock] + */ + unsigned long j_free; + + /** + * @j_first: + * + * The block number of the first usable block in the journal + * [j_state_lock]. + */ + unsigned long j_first; + + /** + * @j_last: + * + * The block number one beyond the last usable block in the journal + * [j_state_lock]. + */ + unsigned long j_last; + + /** + * @j_dev: Device where we store the journal. + */ + struct block_device *j_dev; + + /** + * @j_blocksize: Block size for the location where we store the journal. + */ + int j_blocksize; + + /** + * @j_blk_offset: + * + * Starting block offset into the device where we store the journal. + */ + unsigned long long j_blk_offset; + + /** + * @j_devname: Journal device name. + */ + char j_devname[BDEVNAME_SIZE+24]; + + /** + * @j_fs_dev: + * + * Device which holds the client fs. For internal journal this will be + * equal to j_dev. + */ + struct block_device *j_fs_dev; + + /** + * @j_maxlen: Total maximum capacity of the journal region on disk. + */ + unsigned int j_maxlen; + + /** + * @j_reserved_credits: + * + * Number of buffers reserved from the running transaction. + */ + atomic_t j_reserved_credits; + + /** + * @j_list_lock: Protects the buffer lists and internal buffer state. + */ + spinlock_t j_list_lock; + + /** + * @j_inode: + * + * Optional inode where we store the journal. If present, all + * journal block numbers are mapped into this inode via bmap(). + */ + struct inode *j_inode; + + /** + * @j_tail_sequence: + * + * Sequence number of the oldest transaction in the log [j_state_lock] + */ + tid_t j_tail_sequence; + + /** + * @j_transaction_sequence: + * + * Sequence number of the next transaction to grant [j_state_lock] + */ + tid_t j_transaction_sequence; + + /** + * @j_commit_sequence: + * + * Sequence number of the most recently committed transaction + * [j_state_lock]. + */ + tid_t j_commit_sequence; + + /** + * @j_commit_request: + * + * Sequence number of the most recent transaction wanting commit + * [j_state_lock] + */ + tid_t j_commit_request; + + /** + * @j_uuid: + * + * Journal uuid: identifies the object (filesystem, LVM volume etc) + * backed by this journal. This will eventually be replaced by an array + * of uuids, allowing us to index multiple devices within a single + * journal and to perform atomic updates across them. + */ + __u8 j_uuid[16]; + + /** + * @j_task: Pointer to the current commit thread for this journal. + */ + struct task_struct *j_task; + + /** + * @j_max_transaction_buffers: + * + * Maximum number of metadata buffers to allow in a single compound + * commit transaction. + */ + int j_max_transaction_buffers; + + /** + * @j_commit_interval: + * + * What is the maximum transaction lifetime before we begin a commit? + */ + unsigned long j_commit_interval; + + /** + * @j_commit_timer: The timer used to wakeup the commit thread. + */ + struct timer_list j_commit_timer; + + /** + * @j_revoke_lock: Protect the revoke table. + */ + spinlock_t j_revoke_lock; + + /** + * @j_revoke: + * + * The revoke table - maintains the list of revoked blocks in the + * current transaction. + */ + struct jbd2_revoke_table_s *j_revoke; + + /** + * @j_revoke_table: Alternate revoke tables for j_revoke. + */ + struct jbd2_revoke_table_s *j_revoke_table[2]; + + /** + * @j_wbuf: Array of bhs for jbd2_journal_commit_transaction. + */ + struct buffer_head **j_wbuf; + + /** + * @j_wbufsize: + * + * Size of @j_wbuf array. + */ + int j_wbufsize; + + /** + * @j_last_sync_writer: + * + * The pid of the last person to run a synchronous operation + * through the journal. + */ + pid_t j_last_sync_writer; + + /** + * @j_average_commit_time: + * + * The average amount of time in nanoseconds it takes to commit a + * transaction to disk. [j_state_lock] + */ + u64 j_average_commit_time; + + /** + * @j_min_batch_time: + * + * Minimum time that we should wait for additional filesystem operations + * to get batched into a synchronous handle in microseconds. + */ + u32 j_min_batch_time; + + /** + * @j_max_batch_time: + * + * Maximum time that we should wait for additional filesystem operations + * to get batched into a synchronous handle in microseconds. + */ + u32 j_max_batch_time; + + /** + * @j_commit_callback: + * + * This function is called when a transaction is closed. + */ + void (*j_commit_callback)(journal_t *, + transaction_t *); + + /* + * Journal statistics + */ + + /** + * @j_history_lock: Protect the transactions statistics history. + */ + spinlock_t j_history_lock; + + /** + * @j_proc_entry: procfs entry for the jbd statistics directory. + */ + struct proc_dir_entry *j_proc_entry; + + /** + * @j_stats: Overall statistics. + */ + struct transaction_stats_s j_stats; + + /** + * @j_failed_commit: Failed journal commit ID. + */ + unsigned int j_failed_commit; + + /** + * @j_private: + * + * An opaque pointer to fs-private information. ext3 puts its + * superblock pointer here. + */ + void *j_private; + + /** + * @j_chksum_driver: + * + * Reference to checksum algorithm driver via cryptoapi. + */ + struct crypto_shash *j_chksum_driver; + + /** + * @j_csum_seed: + * + * Precomputed journal UUID checksum for seeding other checksums. + */ + __u32 j_csum_seed; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /** + * @j_trans_commit_map: + * + * Lockdep entity to track transaction commit dependencies. Handles + * hold this "lock" for read, when we wait for commit, we acquire the + * "lock" for writing. This matches the properties of jbd2 journalling + * where the running transaction has to wait for all handles to be + * dropped to commit that transaction and also acquiring a handle may + * require transaction commit to finish. + */ + struct lockdep_map j_trans_commit_map; +#endif +}; + +#define jbd2_might_wait_for_commit(j) \ + do { \ + rwsem_acquire(&j->j_trans_commit_map, 0, 0, _THIS_IP_); \ + rwsem_release(&j->j_trans_commit_map, 1, _THIS_IP_); \ + } while (0) + +/* journal feature predicate functions */ +#define JBD2_FEATURE_COMPAT_FUNCS(name, flagname) \ +static inline bool jbd2_has_feature_##name(journal_t *j) \ +{ \ + return ((j)->j_format_version >= 2 && \ + ((j)->j_superblock->s_feature_compat & \ + cpu_to_be32(JBD2_FEATURE_COMPAT_##flagname)) != 0); \ +} \ +static inline void jbd2_set_feature_##name(journal_t *j) \ +{ \ + (j)->j_superblock->s_feature_compat |= \ + cpu_to_be32(JBD2_FEATURE_COMPAT_##flagname); \ +} \ +static inline void jbd2_clear_feature_##name(journal_t *j) \ +{ \ + (j)->j_superblock->s_feature_compat &= \ + ~cpu_to_be32(JBD2_FEATURE_COMPAT_##flagname); \ +} + +#define JBD2_FEATURE_RO_COMPAT_FUNCS(name, flagname) \ +static inline bool jbd2_has_feature_##name(journal_t *j) \ +{ \ + return ((j)->j_format_version >= 2 && \ + ((j)->j_superblock->s_feature_ro_compat & \ + cpu_to_be32(JBD2_FEATURE_RO_COMPAT_##flagname)) != 0); \ +} \ +static inline void jbd2_set_feature_##name(journal_t *j) \ +{ \ + (j)->j_superblock->s_feature_ro_compat |= \ + cpu_to_be32(JBD2_FEATURE_RO_COMPAT_##flagname); \ +} \ +static inline void jbd2_clear_feature_##name(journal_t *j) \ +{ \ + (j)->j_superblock->s_feature_ro_compat &= \ + ~cpu_to_be32(JBD2_FEATURE_RO_COMPAT_##flagname); \ +} + +#define JBD2_FEATURE_INCOMPAT_FUNCS(name, flagname) \ +static inline bool jbd2_has_feature_##name(journal_t *j) \ +{ \ + return ((j)->j_format_version >= 2 && \ + ((j)->j_superblock->s_feature_incompat & \ + cpu_to_be32(JBD2_FEATURE_INCOMPAT_##flagname)) != 0); \ +} \ +static inline void jbd2_set_feature_##name(journal_t *j) \ +{ \ + (j)->j_superblock->s_feature_incompat |= \ + cpu_to_be32(JBD2_FEATURE_INCOMPAT_##flagname); \ +} \ +static inline void jbd2_clear_feature_##name(journal_t *j) \ +{ \ + (j)->j_superblock->s_feature_incompat &= \ + ~cpu_to_be32(JBD2_FEATURE_INCOMPAT_##flagname); \ +} + +JBD2_FEATURE_COMPAT_FUNCS(checksum, CHECKSUM) + +JBD2_FEATURE_INCOMPAT_FUNCS(revoke, REVOKE) +JBD2_FEATURE_INCOMPAT_FUNCS(64bit, 64BIT) +JBD2_FEATURE_INCOMPAT_FUNCS(async_commit, ASYNC_COMMIT) +JBD2_FEATURE_INCOMPAT_FUNCS(csum2, CSUM_V2) +JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3) + +/* + * Journal flag definitions + */ +#define JBD2_UNMOUNT 0x001 /* Journal thread is being destroyed */ +#define JBD2_ABORT 0x002 /* Journaling has been aborted for errors. */ +#define JBD2_ACK_ERR 0x004 /* The errno in the sb has been acked */ +#define JBD2_FLUSHED 0x008 /* The journal superblock has been flushed */ +#define JBD2_LOADED 0x010 /* The journal superblock has been loaded */ +#define JBD2_BARRIER 0x020 /* Use IDE barriers */ +#define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file + * data write error in ordered + * mode */ +#define JBD2_REC_ERR 0x080 /* The errno in the sb has been recorded */ + +/* + * Function declarations for the journaling transaction and buffer + * management + */ + +/* Filing buffers */ +extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *); +extern void __jbd2_journal_refile_buffer(struct journal_head *); +extern void jbd2_journal_refile_buffer(journal_t *, struct journal_head *); +extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int); +extern void __journal_free_buffer(struct journal_head *bh); +extern void jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int); +extern void __journal_clean_data_list(transaction_t *transaction); +static inline void jbd2_file_log_bh(struct list_head *head, struct buffer_head *bh) +{ + list_add_tail(&bh->b_assoc_buffers, head); +} +static inline void jbd2_unfile_log_bh(struct buffer_head *bh) +{ + list_del_init(&bh->b_assoc_buffers); +} + +/* Log buffer allocation */ +struct buffer_head *jbd2_journal_get_descriptor_buffer(transaction_t *, int); +void jbd2_descriptor_block_csum_set(journal_t *, struct buffer_head *); +int jbd2_journal_next_log_block(journal_t *, unsigned long long *); +int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid, + unsigned long *block); +int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block); +void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block); + +/* Commit management */ +extern void jbd2_journal_commit_transaction(journal_t *); + +/* Checkpoint list management */ +void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy); +int __jbd2_journal_remove_checkpoint(struct journal_head *); +void jbd2_journal_destroy_checkpoint(journal_t *journal); +void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *); + + +/* + * Triggers + */ + +struct jbd2_buffer_trigger_type { + /* + * Fired a the moment data to write to the journal are known to be + * stable - so either at the moment b_frozen_data is created or just + * before a buffer is written to the journal. mapped_data is a mapped + * buffer that is the frozen data for commit. + */ + void (*t_frozen)(struct jbd2_buffer_trigger_type *type, + struct buffer_head *bh, void *mapped_data, + size_t size); + + /* + * Fired during journal abort for dirty buffers that will not be + * committed. + */ + void (*t_abort)(struct jbd2_buffer_trigger_type *type, + struct buffer_head *bh); +}; + +extern void jbd2_buffer_frozen_trigger(struct journal_head *jh, + void *mapped_data, + struct jbd2_buffer_trigger_type *triggers); +extern void jbd2_buffer_abort_trigger(struct journal_head *jh, + struct jbd2_buffer_trigger_type *triggers); + +/* Buffer IO */ +extern int jbd2_journal_write_metadata_buffer(transaction_t *transaction, + struct journal_head *jh_in, + struct buffer_head **bh_out, + sector_t blocknr); + +/* Transaction locking */ +extern void __wait_on_journal (journal_t *); + +/* Transaction cache support */ +extern void jbd2_journal_destroy_transaction_cache(void); +extern int __init jbd2_journal_init_transaction_cache(void); +extern void jbd2_journal_free_transaction(transaction_t *); + +/* + * Journal locking. + * + * We need to lock the journal during transaction state changes so that nobody + * ever tries to take a handle on the running transaction while we are in the + * middle of moving it to the commit phase. j_state_lock does this. + * + * Note that the locking is completely interrupt unsafe. We never touch + * journal structures from interrupts. + */ + +static inline handle_t *journal_current_handle(void) +{ + return current->journal_info; +} + +/* The journaling code user interface: + * + * Create and destroy handles + * Register buffer modifications against the current transaction. + */ + +extern handle_t *jbd2_journal_start(journal_t *, int nblocks); +extern handle_t *jbd2__journal_start(journal_t *, int blocks, int rsv_blocks, + gfp_t gfp_mask, unsigned int type, + unsigned int line_no); +extern int jbd2_journal_restart(handle_t *, int nblocks); +extern int jbd2__journal_restart(handle_t *, int nblocks, gfp_t gfp_mask); +extern int jbd2_journal_start_reserved(handle_t *handle, + unsigned int type, unsigned int line_no); +extern void jbd2_journal_free_reserved(handle_t *handle); +extern int jbd2_journal_extend (handle_t *, int nblocks); +extern int jbd2_journal_get_write_access(handle_t *, struct buffer_head *); +extern int jbd2_journal_get_create_access (handle_t *, struct buffer_head *); +extern int jbd2_journal_get_undo_access(handle_t *, struct buffer_head *); +void jbd2_journal_set_triggers(struct buffer_head *, + struct jbd2_buffer_trigger_type *type); +extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *); +extern int jbd2_journal_forget (handle_t *, struct buffer_head *); +extern void journal_sync_buffer (struct buffer_head *); +extern int jbd2_journal_invalidatepage(journal_t *, + struct page *, unsigned int, unsigned int); +extern int jbd2_journal_try_to_free_buffers(journal_t *, struct page *, gfp_t); +extern int jbd2_journal_stop(handle_t *); +extern int jbd2_journal_flush (journal_t *); +extern void jbd2_journal_lock_updates (journal_t *); +extern void jbd2_journal_unlock_updates (journal_t *); + +extern journal_t * jbd2_journal_init_dev(struct block_device *bdev, + struct block_device *fs_dev, + unsigned long long start, int len, int bsize); +extern journal_t * jbd2_journal_init_inode (struct inode *); +extern int jbd2_journal_update_format (journal_t *); +extern int jbd2_journal_check_used_features + (journal_t *, unsigned long, unsigned long, unsigned long); +extern int jbd2_journal_check_available_features + (journal_t *, unsigned long, unsigned long, unsigned long); +extern int jbd2_journal_set_features + (journal_t *, unsigned long, unsigned long, unsigned long); +extern void jbd2_journal_clear_features + (journal_t *, unsigned long, unsigned long, unsigned long); +extern int jbd2_journal_load (journal_t *journal); +extern int jbd2_journal_destroy (journal_t *); +extern int jbd2_journal_recover (journal_t *journal); +extern int jbd2_journal_wipe (journal_t *, int); +extern int jbd2_journal_skip_recovery (journal_t *); +extern void jbd2_journal_update_sb_errno(journal_t *); +extern int jbd2_journal_update_sb_log_tail (journal_t *, tid_t, + unsigned long, int); +extern void __jbd2_journal_abort_hard (journal_t *); +extern void jbd2_journal_abort (journal_t *, int); +extern int jbd2_journal_errno (journal_t *); +extern void jbd2_journal_ack_err (journal_t *); +extern int jbd2_journal_clear_err (journal_t *); +extern int jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *); +extern int jbd2_journal_force_commit(journal_t *); +extern int jbd2_journal_force_commit_nested(journal_t *); +extern int jbd2_journal_inode_add_write(handle_t *handle, struct jbd2_inode *inode); +extern int jbd2_journal_inode_add_wait(handle_t *handle, struct jbd2_inode *inode); +extern int jbd2_journal_inode_ranged_write(handle_t *handle, + struct jbd2_inode *inode, loff_t start_byte, + loff_t length); +extern int jbd2_journal_inode_ranged_wait(handle_t *handle, + struct jbd2_inode *inode, loff_t start_byte, + loff_t length); +extern int jbd2_journal_begin_ordered_truncate(journal_t *journal, + struct jbd2_inode *inode, loff_t new_size); +extern void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode); +extern void jbd2_journal_release_jbd_inode(journal_t *journal, struct jbd2_inode *jinode); + +/* + * journal_head management + */ +struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh); +struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh); +void jbd2_journal_put_journal_head(struct journal_head *jh); + +/* + * handle management + */ +extern struct kmem_cache *jbd2_handle_cache; + +static inline handle_t *jbd2_alloc_handle(gfp_t gfp_flags) +{ + return kmem_cache_zalloc(jbd2_handle_cache, gfp_flags); +} + +static inline void jbd2_free_handle(handle_t *handle) +{ + kmem_cache_free(jbd2_handle_cache, handle); +} + +/* + * jbd2_inode management (optional, for those file systems that want to use + * dynamically allocated jbd2_inode structures) + */ +extern struct kmem_cache *jbd2_inode_cache; + +static inline struct jbd2_inode *jbd2_alloc_inode(gfp_t gfp_flags) +{ + return kmem_cache_alloc(jbd2_inode_cache, gfp_flags); +} + +static inline void jbd2_free_inode(struct jbd2_inode *jinode) +{ + kmem_cache_free(jbd2_inode_cache, jinode); +} + +/* Primary revoke support */ +#define JOURNAL_REVOKE_DEFAULT_HASH 256 +extern int jbd2_journal_init_revoke(journal_t *, int); +extern void jbd2_journal_destroy_revoke_record_cache(void); +extern void jbd2_journal_destroy_revoke_table_cache(void); +extern int __init jbd2_journal_init_revoke_record_cache(void); +extern int __init jbd2_journal_init_revoke_table_cache(void); + +extern void jbd2_journal_destroy_revoke(journal_t *); +extern int jbd2_journal_revoke (handle_t *, unsigned long long, struct buffer_head *); +extern int jbd2_journal_cancel_revoke(handle_t *, struct journal_head *); +extern void jbd2_journal_write_revoke_records(transaction_t *transaction, + struct list_head *log_bufs); + +/* Recovery revoke support */ +extern int jbd2_journal_set_revoke(journal_t *, unsigned long long, tid_t); +extern int jbd2_journal_test_revoke(journal_t *, unsigned long long, tid_t); +extern void jbd2_journal_clear_revoke(journal_t *); +extern void jbd2_journal_switch_revoke_table(journal_t *journal); +extern void jbd2_clear_buffer_revoked_flags(journal_t *journal); + +/* + * The log thread user interface: + * + * Request space in the current transaction, and force transaction commit + * transitions on demand. + */ + +int jbd2_log_start_commit(journal_t *journal, tid_t tid); +int __jbd2_log_start_commit(journal_t *journal, tid_t tid); +int jbd2_journal_start_commit(journal_t *journal, tid_t *tid); +int jbd2_log_wait_commit(journal_t *journal, tid_t tid); +int jbd2_transaction_committed(journal_t *journal, tid_t tid); +int jbd2_complete_transaction(journal_t *journal, tid_t tid); +int jbd2_log_do_checkpoint(journal_t *journal); +int jbd2_trans_will_send_data_barrier(journal_t *journal, tid_t tid); + +void __jbd2_log_wait_for_space(journal_t *journal); +extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *); +extern int jbd2_cleanup_journal_tail(journal_t *); + +/* + * is_journal_abort + * + * Simple test wrapper function to test the JBD2_ABORT state flag. This + * bit, when set, indicates that we have had a fatal error somewhere, + * either inside the journaling layer or indicated to us by the client + * (eg. ext3), and that we and should not commit any further + * transactions. + */ + +static inline int is_journal_aborted(journal_t *journal) +{ + return journal->j_flags & JBD2_ABORT; +} + +static inline int is_handle_aborted(handle_t *handle) +{ + if (handle->h_aborted || !handle->h_transaction) + return 1; + return is_journal_aborted(handle->h_transaction->t_journal); +} + +static inline void jbd2_journal_abort_handle(handle_t *handle) +{ + handle->h_aborted = 1; +} + +#endif /* __KERNEL__ */ + +/* Comparison functions for transaction IDs: perform comparisons using + * modulo arithmetic so that they work over sequence number wraps. */ + +static inline int tid_gt(tid_t x, tid_t y) +{ + int difference = (x - y); + return (difference > 0); +} + +static inline int tid_geq(tid_t x, tid_t y) +{ + int difference = (x - y); + return (difference >= 0); +} + +extern int jbd2_journal_blocks_per_page(struct inode *inode); +extern size_t journal_tag_bytes(journal_t *journal); + +static inline bool jbd2_journal_has_csum_v2or3_feature(journal_t *j) +{ + return jbd2_has_feature_csum2(j) || jbd2_has_feature_csum3(j); +} + +static inline int jbd2_journal_has_csum_v2or3(journal_t *journal) +{ + WARN_ON_ONCE(jbd2_journal_has_csum_v2or3_feature(journal) && + journal->j_chksum_driver == NULL); + + return journal->j_chksum_driver != NULL; +} + +/* + * We reserve t_outstanding_credits >> JBD2_CONTROL_BLOCKS_SHIFT for + * transaction control blocks. + */ +#define JBD2_CONTROL_BLOCKS_SHIFT 5 + +/* + * Return the minimum number of blocks which must be free in the journal + * before a new transaction may be started. Must be called under j_state_lock. + */ +static inline int jbd2_space_needed(journal_t *journal) +{ + int nblocks = journal->j_max_transaction_buffers; + return nblocks + (nblocks >> JBD2_CONTROL_BLOCKS_SHIFT); +} + +/* + * Return number of free blocks in the log. Must be called under j_state_lock. + */ +static inline unsigned long jbd2_log_space_left(journal_t *journal) +{ + /* Allow for rounding errors */ + long free = journal->j_free - 32; + + if (journal->j_committing_transaction) { + unsigned long committing = atomic_read(&journal-> + j_committing_transaction->t_outstanding_credits); + + /* Transaction + control blocks */ + free -= committing + (committing >> JBD2_CONTROL_BLOCKS_SHIFT); + } + return max_t(long, free, 0); +} + +/* + * Definitions which augment the buffer_head layer + */ + +/* journaling buffer types */ +#define BJ_None 0 /* Not journaled */ +#define BJ_Metadata 1 /* Normal journaled metadata */ +#define BJ_Forget 2 /* Buffer superseded by this transaction */ +#define BJ_Shadow 3 /* Buffer contents being shadowed to the log */ +#define BJ_Reserved 4 /* Buffer is reserved for access by journal */ +#define BJ_Types 5 + +extern int jbd_blocks_per_page(struct inode *inode); + +/* JBD uses a CRC32 checksum */ +#define JBD_MAX_CHECKSUM_SIZE 4 + +static inline u32 jbd2_chksum(journal_t *journal, u32 crc, + const void *address, unsigned int length) +{ + struct { + struct shash_desc shash; + char ctx[JBD_MAX_CHECKSUM_SIZE]; + } desc; + int err; + + BUG_ON(crypto_shash_descsize(journal->j_chksum_driver) > + JBD_MAX_CHECKSUM_SIZE); + + desc.shash.tfm = journal->j_chksum_driver; + desc.shash.flags = 0; + *(u32 *)desc.ctx = crc; + + err = crypto_shash_update(&desc.shash, address, length); + BUG_ON(err); + + return *(u32 *)desc.ctx; +} + +/* Return most recent uncommitted transaction */ +static inline tid_t jbd2_get_latest_transaction(journal_t *journal) +{ + tid_t tid; + + read_lock(&journal->j_state_lock); + tid = journal->j_commit_request; + if (journal->j_running_transaction) + tid = journal->j_running_transaction->t_tid; + read_unlock(&journal->j_state_lock); + return tid; +} + +#ifdef __KERNEL__ + +#define buffer_trace_init(bh) do {} while (0) +#define print_buffer_fields(bh) do {} while (0) +#define print_buffer_trace(bh) do {} while (0) +#define BUFFER_TRACE(bh, info) do {} while (0) +#define BUFFER_TRACE2(bh, bh2, info) do {} while (0) +#define JBUFFER_TRACE(jh, info) do {} while (0) + +#endif /* __KERNEL__ */ + +#define EFSBADCRC EBADMSG /* Bad CRC detected */ +#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ + +#endif /* _LINUX_JBD2_H */ diff --git a/include/linux/jhash.h b/include/linux/jhash.h new file mode 100644 index 000000000..8037850f3 --- /dev/null +++ b/include/linux/jhash.h @@ -0,0 +1,174 @@ +#ifndef _LINUX_JHASH_H +#define _LINUX_JHASH_H + +/* jhash.h: Jenkins hash support. + * + * Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net) + * + * http://burtleburtle.net/bob/hash/ + * + * These are the credits from Bob's sources: + * + * lookup3.c, by Bob Jenkins, May 2006, Public Domain. + * + * These are functions for producing 32-bit hashes for hash table lookup. + * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final() + * are externally useful functions. Routines to test the hash are included + * if SELF_TEST is defined. You can use this free for any purpose. It's in + * the public domain. It has no warranty. + * + * Copyright (C) 2009-2010 Jozsef Kadlecsik (kadlec@blackhole.kfki.hu) + * + * I've modified Bob's hash to be useful in the Linux kernel, and + * any bugs present are my fault. + * Jozsef + */ +#include +#include + +/* Best hash sizes are of power of two */ +#define jhash_size(n) ((u32)1<<(n)) +/* Mask the hash value, i.e (value & jhash_mask(n)) instead of (value % n) */ +#define jhash_mask(n) (jhash_size(n)-1) + +/* __jhash_mix -- mix 3 32-bit values reversibly. */ +#define __jhash_mix(a, b, c) \ +{ \ + a -= c; a ^= rol32(c, 4); c += b; \ + b -= a; b ^= rol32(a, 6); a += c; \ + c -= b; c ^= rol32(b, 8); b += a; \ + a -= c; a ^= rol32(c, 16); c += b; \ + b -= a; b ^= rol32(a, 19); a += c; \ + c -= b; c ^= rol32(b, 4); b += a; \ +} + +/* __jhash_final - final mixing of 3 32-bit values (a,b,c) into c */ +#define __jhash_final(a, b, c) \ +{ \ + c ^= b; c -= rol32(b, 14); \ + a ^= c; a -= rol32(c, 11); \ + b ^= a; b -= rol32(a, 25); \ + c ^= b; c -= rol32(b, 16); \ + a ^= c; a -= rol32(c, 4); \ + b ^= a; b -= rol32(a, 14); \ + c ^= b; c -= rol32(b, 24); \ +} + +/* An arbitrary initial parameter */ +#define JHASH_INITVAL 0xdeadbeef + +/* jhash - hash an arbitrary key + * @k: sequence of bytes as key + * @length: the length of the key + * @initval: the previous hash, or an arbitray value + * + * The generic version, hashes an arbitrary sequence of bytes. + * No alignment or length assumptions are made about the input key. + * + * Returns the hash value of the key. The result depends on endianness. + */ +static inline u32 jhash(const void *key, u32 length, u32 initval) +{ + u32 a, b, c; + const u8 *k = key; + + /* Set up the internal state */ + a = b = c = JHASH_INITVAL + length + initval; + + /* All but the last block: affect some 32 bits of (a,b,c) */ + while (length > 12) { + a += __get_unaligned_cpu32(k); + b += __get_unaligned_cpu32(k + 4); + c += __get_unaligned_cpu32(k + 8); + __jhash_mix(a, b, c); + length -= 12; + k += 12; + } + /* Last block: affect all 32 bits of (c) */ + switch (length) { + case 12: c += (u32)k[11]<<24; /* fall through */ + case 11: c += (u32)k[10]<<16; /* fall through */ + case 10: c += (u32)k[9]<<8; /* fall through */ + case 9: c += k[8]; /* fall through */ + case 8: b += (u32)k[7]<<24; /* fall through */ + case 7: b += (u32)k[6]<<16; /* fall through */ + case 6: b += (u32)k[5]<<8; /* fall through */ + case 5: b += k[4]; /* fall through */ + case 4: a += (u32)k[3]<<24; /* fall through */ + case 3: a += (u32)k[2]<<16; /* fall through */ + case 2: a += (u32)k[1]<<8; /* fall through */ + case 1: a += k[0]; + __jhash_final(a, b, c); + case 0: /* Nothing left to add */ + break; + } + + return c; +} + +/* jhash2 - hash an array of u32's + * @k: the key which must be an array of u32's + * @length: the number of u32's in the key + * @initval: the previous hash, or an arbitray value + * + * Returns the hash value of the key. + */ +static inline u32 jhash2(const u32 *k, u32 length, u32 initval) +{ + u32 a, b, c; + + /* Set up the internal state */ + a = b = c = JHASH_INITVAL + (length<<2) + initval; + + /* Handle most of the key */ + while (length > 3) { + a += k[0]; + b += k[1]; + c += k[2]; + __jhash_mix(a, b, c); + length -= 3; + k += 3; + } + + /* Handle the last 3 u32's */ + switch (length) { + case 3: c += k[2]; /* fall through */ + case 2: b += k[1]; /* fall through */ + case 1: a += k[0]; + __jhash_final(a, b, c); + case 0: /* Nothing left to add */ + break; + } + + return c; +} + + +/* __jhash_nwords - hash exactly 3, 2 or 1 word(s) */ +static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval) +{ + a += initval; + b += initval; + c += initval; + + __jhash_final(a, b, c); + + return c; +} + +static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval) +{ + return __jhash_nwords(a, b, c, initval + JHASH_INITVAL + (3 << 2)); +} + +static inline u32 jhash_2words(u32 a, u32 b, u32 initval) +{ + return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2)); +} + +static inline u32 jhash_1word(u32 a, u32 initval) +{ + return __jhash_nwords(a, 0, 0, initval + JHASH_INITVAL + (1 << 2)); +} + +#endif /* _LINUX_JHASH_H */ diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h new file mode 100644 index 000000000..fa9282425 --- /dev/null +++ b/include/linux/jiffies.h @@ -0,0 +1,463 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_JIFFIES_H +#define _LINUX_JIFFIES_H + +#include +#include +#include +#include +#include +#include +#include /* for HZ */ +#include + +/* + * The following defines establish the engineering parameters of the PLL + * model. The HZ variable establishes the timer interrupt frequency, 100 Hz + * for the SunOS kernel, 256 Hz for the Ultrix kernel and 1024 Hz for the + * OSF/1 kernel. The SHIFT_HZ define expresses the same value as the + * nearest power of two in order to avoid hardware multiply operations. + */ +#if HZ >= 12 && HZ < 24 +# define SHIFT_HZ 4 +#elif HZ >= 24 && HZ < 48 +# define SHIFT_HZ 5 +#elif HZ >= 48 && HZ < 96 +# define SHIFT_HZ 6 +#elif HZ >= 96 && HZ < 192 +# define SHIFT_HZ 7 +#elif HZ >= 192 && HZ < 384 +# define SHIFT_HZ 8 +#elif HZ >= 384 && HZ < 768 +# define SHIFT_HZ 9 +#elif HZ >= 768 && HZ < 1536 +# define SHIFT_HZ 10 +#elif HZ >= 1536 && HZ < 3072 +# define SHIFT_HZ 11 +#elif HZ >= 3072 && HZ < 6144 +# define SHIFT_HZ 12 +#elif HZ >= 6144 && HZ < 12288 +# define SHIFT_HZ 13 +#else +# error Invalid value of HZ. +#endif + +/* Suppose we want to divide two numbers NOM and DEN: NOM/DEN, then we can + * improve accuracy by shifting LSH bits, hence calculating: + * (NOM << LSH) / DEN + * This however means trouble for large NOM, because (NOM << LSH) may no + * longer fit in 32 bits. The following way of calculating this gives us + * some slack, under the following conditions: + * - (NOM / DEN) fits in (32 - LSH) bits. + * - (NOM % DEN) fits in (32 - LSH) bits. + */ +#define SH_DIV(NOM,DEN,LSH) ( (((NOM) / (DEN)) << (LSH)) \ + + ((((NOM) % (DEN)) << (LSH)) + (DEN) / 2) / (DEN)) + +/* LATCH is used in the interval timer and ftape setup. */ +#define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */ + +extern int register_refined_jiffies(long clock_tick_rate); + +/* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */ +#define TICK_NSEC ((NSEC_PER_SEC+HZ/2)/HZ) + +/* TICK_USEC is the time between ticks in usec assuming SHIFTED_HZ */ +#define TICK_USEC ((USEC_PER_SEC + HZ/2) / HZ) + +/* USER_TICK_USEC is the time between ticks in usec assuming fake USER_HZ */ +#define USER_TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ) + +#ifndef __jiffy_arch_data +#define __jiffy_arch_data +#endif + +/* + * The 64-bit value is not atomic - you MUST NOT read it + * without sampling the sequence number in jiffies_lock. + * get_jiffies_64() will do this for you as appropriate. + */ +extern u64 __cacheline_aligned_in_smp jiffies_64; +extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffies; + +#if (BITS_PER_LONG < 64) +u64 get_jiffies_64(void); +#else +static inline u64 get_jiffies_64(void) +{ + return (u64)jiffies; +} +#endif + +/* + * These inlines deal with timer wrapping correctly. You are + * strongly encouraged to use them + * 1. Because people otherwise forget + * 2. Because if the timer wrap changes in future you won't have to + * alter your driver code. + * + * time_after(a,b) returns true if the time a is after time b. + * + * Do this with "<0" and ">=0" to only test the sign of the result. A + * good compiler would generate better code (and a really good compiler + * wouldn't care). Gcc is currently neither. + */ +#define time_after(a,b) \ + (typecheck(unsigned long, a) && \ + typecheck(unsigned long, b) && \ + ((long)((b) - (a)) < 0)) +#define time_before(a,b) time_after(b,a) + +#define time_after_eq(a,b) \ + (typecheck(unsigned long, a) && \ + typecheck(unsigned long, b) && \ + ((long)((a) - (b)) >= 0)) +#define time_before_eq(a,b) time_after_eq(b,a) + +/* + * Calculate whether a is in the range of [b, c]. + */ +#define time_in_range(a,b,c) \ + (time_after_eq(a,b) && \ + time_before_eq(a,c)) + +/* + * Calculate whether a is in the range of [b, c). + */ +#define time_in_range_open(a,b,c) \ + (time_after_eq(a,b) && \ + time_before(a,c)) + +/* Same as above, but does so with platform independent 64bit types. + * These must be used when utilizing jiffies_64 (i.e. return value of + * get_jiffies_64() */ +#define time_after64(a,b) \ + (typecheck(__u64, a) && \ + typecheck(__u64, b) && \ + ((__s64)((b) - (a)) < 0)) +#define time_before64(a,b) time_after64(b,a) + +#define time_after_eq64(a,b) \ + (typecheck(__u64, a) && \ + typecheck(__u64, b) && \ + ((__s64)((a) - (b)) >= 0)) +#define time_before_eq64(a,b) time_after_eq64(b,a) + +#define time_in_range64(a, b, c) \ + (time_after_eq64(a, b) && \ + time_before_eq64(a, c)) + +/* + * These four macros compare jiffies and 'a' for convenience. + */ + +/* time_is_before_jiffies(a) return true if a is before jiffies */ +#define time_is_before_jiffies(a) time_after(jiffies, a) +#define time_is_before_jiffies64(a) time_after64(get_jiffies_64(), a) + +/* time_is_after_jiffies(a) return true if a is after jiffies */ +#define time_is_after_jiffies(a) time_before(jiffies, a) +#define time_is_after_jiffies64(a) time_before64(get_jiffies_64(), a) + +/* time_is_before_eq_jiffies(a) return true if a is before or equal to jiffies*/ +#define time_is_before_eq_jiffies(a) time_after_eq(jiffies, a) +#define time_is_before_eq_jiffies64(a) time_after_eq64(get_jiffies_64(), a) + +/* time_is_after_eq_jiffies(a) return true if a is after or equal to jiffies*/ +#define time_is_after_eq_jiffies(a) time_before_eq(jiffies, a) +#define time_is_after_eq_jiffies64(a) time_before_eq64(get_jiffies_64(), a) + +/* + * Have the 32 bit jiffies value wrap 5 minutes after boot + * so jiffies wrap bugs show up earlier. + */ +#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) + +/* + * Change timeval to jiffies, trying to avoid the + * most obvious overflows.. + * + * And some not so obvious. + * + * Note that we don't want to return LONG_MAX, because + * for various timeout reasons we often end up having + * to wait "jiffies+1" in order to guarantee that we wait + * at _least_ "jiffies" - so "jiffies+1" had better still + * be positive. + */ +#define MAX_JIFFY_OFFSET ((LONG_MAX >> 1)-1) + +extern unsigned long preset_lpj; + +/* + * We want to do realistic conversions of time so we need to use the same + * values the update wall clock code uses as the jiffies size. This value + * is: TICK_NSEC (which is defined in timex.h). This + * is a constant and is in nanoseconds. We will use scaled math + * with a set of scales defined here as SEC_JIFFIE_SC, USEC_JIFFIE_SC and + * NSEC_JIFFIE_SC. Note that these defines contain nothing but + * constants and so are computed at compile time. SHIFT_HZ (computed in + * timex.h) adjusts the scaling for different HZ values. + + * Scaled math??? What is that? + * + * Scaled math is a way to do integer math on values that would, + * otherwise, either overflow, underflow, or cause undesired div + * instructions to appear in the execution path. In short, we "scale" + * up the operands so they take more bits (more precision, less + * underflow), do the desired operation and then "scale" the result back + * by the same amount. If we do the scaling by shifting we avoid the + * costly mpy and the dastardly div instructions. + + * Suppose, for example, we want to convert from seconds to jiffies + * where jiffies is defined in nanoseconds as NSEC_PER_JIFFIE. The + * simple math is: jiff = (sec * NSEC_PER_SEC) / NSEC_PER_JIFFIE; We + * observe that (NSEC_PER_SEC / NSEC_PER_JIFFIE) is a constant which we + * might calculate at compile time, however, the result will only have + * about 3-4 bits of precision (less for smaller values of HZ). + * + * So, we scale as follows: + * jiff = (sec) * (NSEC_PER_SEC / NSEC_PER_JIFFIE); + * jiff = ((sec) * ((NSEC_PER_SEC * SCALE)/ NSEC_PER_JIFFIE)) / SCALE; + * Then we make SCALE a power of two so: + * jiff = ((sec) * ((NSEC_PER_SEC << SCALE)/ NSEC_PER_JIFFIE)) >> SCALE; + * Now we define: + * #define SEC_CONV = ((NSEC_PER_SEC << SCALE)/ NSEC_PER_JIFFIE)) + * jiff = (sec * SEC_CONV) >> SCALE; + * + * Often the math we use will expand beyond 32-bits so we tell C how to + * do this and pass the 64-bit result of the mpy through the ">> SCALE" + * which should take the result back to 32-bits. We want this expansion + * to capture as much precision as possible. At the same time we don't + * want to overflow so we pick the SCALE to avoid this. In this file, + * that means using a different scale for each range of HZ values (as + * defined in timex.h). + * + * For those who want to know, gcc will give a 64-bit result from a "*" + * operator if the result is a long long AND at least one of the + * operands is cast to long long (usually just prior to the "*" so as + * not to confuse it into thinking it really has a 64-bit operand, + * which, buy the way, it can do, but it takes more code and at least 2 + * mpys). + + * We also need to be aware that one second in nanoseconds is only a + * couple of bits away from overflowing a 32-bit word, so we MUST use + * 64-bits to get the full range time in nanoseconds. + + */ + +/* + * Here are the scales we will use. One for seconds, nanoseconds and + * microseconds. + * + * Within the limits of cpp we do a rough cut at the SEC_JIFFIE_SC and + * check if the sign bit is set. If not, we bump the shift count by 1. + * (Gets an extra bit of precision where we can use it.) + * We know it is set for HZ = 1024 and HZ = 100 not for 1000. + * Haven't tested others. + + * Limits of cpp (for #if expressions) only long (no long long), but + * then we only need the most signicant bit. + */ + +#define SEC_JIFFIE_SC (31 - SHIFT_HZ) +#if !((((NSEC_PER_SEC << 2) / TICK_NSEC) << (SEC_JIFFIE_SC - 2)) & 0x80000000) +#undef SEC_JIFFIE_SC +#define SEC_JIFFIE_SC (32 - SHIFT_HZ) +#endif +#define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29) +#define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\ + TICK_NSEC -1) / (u64)TICK_NSEC)) + +#define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\ + TICK_NSEC -1) / (u64)TICK_NSEC)) +/* + * The maximum jiffie value is (MAX_INT >> 1). Here we translate that + * into seconds. The 64-bit case will overflow if we are not careful, + * so use the messy SH_DIV macro to do it. Still all constants. + */ +#if BITS_PER_LONG < 64 +# define MAX_SEC_IN_JIFFIES \ + (long)((u64)((u64)MAX_JIFFY_OFFSET * TICK_NSEC) / NSEC_PER_SEC) +#else /* take care of overflow on 64 bits machines */ +# define MAX_SEC_IN_JIFFIES \ + (SH_DIV((MAX_JIFFY_OFFSET >> SEC_JIFFIE_SC) * TICK_NSEC, NSEC_PER_SEC, 1) - 1) + +#endif + +/* + * Convert various time units to each other: + */ +extern unsigned int jiffies_to_msecs(const unsigned long j); +extern unsigned int jiffies_to_usecs(const unsigned long j); + +static inline u64 jiffies_to_nsecs(const unsigned long j) +{ + return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC; +} + +extern u64 jiffies64_to_nsecs(u64 j); + +extern unsigned long __msecs_to_jiffies(const unsigned int m); +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) +/* + * HZ is equal to or smaller than 1000, and 1000 is a nice round + * multiple of HZ, divide with the factor between them, but round + * upwards: + */ +static inline unsigned long _msecs_to_jiffies(const unsigned int m) +{ + return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); +} +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) +/* + * HZ is larger than 1000, and HZ is a nice round multiple of 1000 - + * simply multiply with the factor between them. + * + * But first make sure the multiplication result cannot overflow: + */ +static inline unsigned long _msecs_to_jiffies(const unsigned int m) +{ + if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; + return m * (HZ / MSEC_PER_SEC); +} +#else +/* + * Generic case - multiply, round and divide. But first check that if + * we are doing a net multiplication, that we wouldn't overflow: + */ +static inline unsigned long _msecs_to_jiffies(const unsigned int m) +{ + if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; + + return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32) >> MSEC_TO_HZ_SHR32; +} +#endif +/** + * msecs_to_jiffies: - convert milliseconds to jiffies + * @m: time in milliseconds + * + * conversion is done as follows: + * + * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET) + * + * - 'too large' values [that would result in larger than + * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too. + * + * - all other values are converted to jiffies by either multiplying + * the input value by a factor or dividing it with a factor and + * handling any 32-bit overflows. + * for the details see __msecs_to_jiffies() + * + * msecs_to_jiffies() checks for the passed in value being a constant + * via __builtin_constant_p() allowing gcc to eliminate most of the + * code, __msecs_to_jiffies() is called if the value passed does not + * allow constant folding and the actual conversion must be done at + * runtime. + * the HZ range specific helpers _msecs_to_jiffies() are called both + * directly here and from __msecs_to_jiffies() in the case where + * constant folding is not possible. + */ +static __always_inline unsigned long msecs_to_jiffies(const unsigned int m) +{ + if (__builtin_constant_p(m)) { + if ((int)m < 0) + return MAX_JIFFY_OFFSET; + return _msecs_to_jiffies(m); + } else { + return __msecs_to_jiffies(m); + } +} + +extern unsigned long __usecs_to_jiffies(const unsigned int u); +#if !(USEC_PER_SEC % HZ) +static inline unsigned long _usecs_to_jiffies(const unsigned int u) +{ + return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ); +} +#else +static inline unsigned long _usecs_to_jiffies(const unsigned int u) +{ + return (USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32) + >> USEC_TO_HZ_SHR32; +} +#endif + +/** + * usecs_to_jiffies: - convert microseconds to jiffies + * @u: time in microseconds + * + * conversion is done as follows: + * + * - 'too large' values [that would result in larger than + * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too. + * + * - all other values are converted to jiffies by either multiplying + * the input value by a factor or dividing it with a factor and + * handling any 32-bit overflows as for msecs_to_jiffies. + * + * usecs_to_jiffies() checks for the passed in value being a constant + * via __builtin_constant_p() allowing gcc to eliminate most of the + * code, __usecs_to_jiffies() is called if the value passed does not + * allow constant folding and the actual conversion must be done at + * runtime. + * the HZ range specific helpers _usecs_to_jiffies() are called both + * directly here and from __msecs_to_jiffies() in the case where + * constant folding is not possible. + */ +static __always_inline unsigned long usecs_to_jiffies(const unsigned int u) +{ + if (__builtin_constant_p(u)) { + if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; + return _usecs_to_jiffies(u); + } else { + return __usecs_to_jiffies(u); + } +} + +extern unsigned long timespec64_to_jiffies(const struct timespec64 *value); +extern void jiffies_to_timespec64(const unsigned long jiffies, + struct timespec64 *value); +static inline unsigned long timespec_to_jiffies(const struct timespec *value) +{ + struct timespec64 ts = timespec_to_timespec64(*value); + + return timespec64_to_jiffies(&ts); +} + +static inline void jiffies_to_timespec(const unsigned long jiffies, + struct timespec *value) +{ + struct timespec64 ts; + + jiffies_to_timespec64(jiffies, &ts); + *value = timespec64_to_timespec(ts); +} + +extern unsigned long timeval_to_jiffies(const struct timeval *value); +extern void jiffies_to_timeval(const unsigned long jiffies, + struct timeval *value); + +extern clock_t jiffies_to_clock_t(unsigned long x); +static inline clock_t jiffies_delta_to_clock_t(long delta) +{ + return jiffies_to_clock_t(max(0L, delta)); +} + +static inline unsigned int jiffies_delta_to_msecs(long delta) +{ + return jiffies_to_msecs(max(0L, delta)); +} + +extern unsigned long clock_t_to_jiffies(unsigned long x); +extern u64 jiffies_64_to_clock_t(u64 x); +extern u64 nsec_to_clock_t(u64 x); +extern u64 nsecs_to_jiffies64(u64 n); +extern unsigned long nsecs_to_jiffies(u64 n); + +#define TIMESTAMP_SIZE 30 + +#endif diff --git a/include/linux/journal-head.h b/include/linux/journal-head.h new file mode 100644 index 000000000..9fb870524 --- /dev/null +++ b/include/linux/journal-head.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/journal-head.h + * + * buffer_head fields for JBD + * + * 27 May 2001 Andrew Morton + * Created - pulled out of fs.h + */ + +#ifndef JOURNAL_HEAD_H_INCLUDED +#define JOURNAL_HEAD_H_INCLUDED + +typedef unsigned int tid_t; /* Unique transaction ID */ +typedef struct transaction_s transaction_t; /* Compound transaction type */ + + +struct buffer_head; + +struct journal_head { + /* + * Points back to our buffer_head. [jbd_lock_bh_journal_head()] + */ + struct buffer_head *b_bh; + + /* + * Reference count - see description in journal.c + * [jbd_lock_bh_journal_head()] + */ + int b_jcount; + + /* + * Journalling list for this buffer [jbd_lock_bh_state()] + * NOTE: We *cannot* combine this with b_modified into a bitfield + * as gcc would then (which the C standard allows but which is + * very unuseful) make 64-bit accesses to the bitfield and clobber + * b_jcount if its update races with bitfield modification. + */ + unsigned b_jlist; + + /* + * This flag signals the buffer has been modified by + * the currently running transaction + * [jbd_lock_bh_state()] + */ + unsigned b_modified; + + /* + * Copy of the buffer data frozen for writing to the log. + * [jbd_lock_bh_state()] + */ + char *b_frozen_data; + + /* + * Pointer to a saved copy of the buffer containing no uncommitted + * deallocation references, so that allocations can avoid overwriting + * uncommitted deletes. [jbd_lock_bh_state()] + */ + char *b_committed_data; + + /* + * Pointer to the compound transaction which owns this buffer's + * metadata: either the running transaction or the committing + * transaction (if there is one). Only applies to buffers on a + * transaction's data or metadata journaling list. + * [j_list_lock] [jbd_lock_bh_state()] + * Either of these locks is enough for reading, both are needed for + * changes. + */ + transaction_t *b_transaction; + + /* + * Pointer to the running compound transaction which is currently + * modifying the buffer's metadata, if there was already a transaction + * committing it when the new transaction touched it. + * [t_list_lock] [jbd_lock_bh_state()] + */ + transaction_t *b_next_transaction; + + /* + * Doubly-linked list of buffers on a transaction's data, metadata or + * forget queue. [t_list_lock] [jbd_lock_bh_state()] + */ + struct journal_head *b_tnext, *b_tprev; + + /* + * Pointer to the compound transaction against which this buffer + * is checkpointed. Only dirty buffers can be checkpointed. + * [j_list_lock] + */ + transaction_t *b_cp_transaction; + + /* + * Doubly-linked list of buffers still remaining to be flushed + * before an old transaction can be checkpointed. + * [j_list_lock] + */ + struct journal_head *b_cpnext, *b_cpprev; + + /* Trigger type */ + struct jbd2_buffer_trigger_type *b_triggers; + + /* Trigger type for the committing transaction's frozen data */ + struct jbd2_buffer_trigger_type *b_frozen_triggers; +}; + +#endif /* JOURNAL_HEAD_H_INCLUDED */ diff --git a/include/linux/joystick.h b/include/linux/joystick.h new file mode 100644 index 000000000..5153f5b92 --- /dev/null +++ b/include/linux/joystick.h @@ -0,0 +1,33 @@ +/* + * Copyright (C) 1996-2000 Vojtech Pavlik + * + * Sponsored by SuSE + */ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _LINUX_JOYSTICK_H +#define _LINUX_JOYSTICK_H + +#include + +#if BITS_PER_LONG == 64 +#define JS_DATA_SAVE_TYPE JS_DATA_SAVE_TYPE_64 +#elif BITS_PER_LONG == 32 +#define JS_DATA_SAVE_TYPE JS_DATA_SAVE_TYPE_32 +#else +#error Unexpected BITS_PER_LONG +#endif +#endif /* _LINUX_JOYSTICK_H */ diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h new file mode 100644 index 000000000..4c3e77687 --- /dev/null +++ b/include/linux/jump_label.h @@ -0,0 +1,443 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_JUMP_LABEL_H +#define _LINUX_JUMP_LABEL_H + +/* + * Jump label support + * + * Copyright (C) 2009-2012 Jason Baron + * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra + * + * DEPRECATED API: + * + * The use of 'struct static_key' directly, is now DEPRECATED. In addition + * static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following: + * + * struct static_key false = STATIC_KEY_INIT_FALSE; + * struct static_key true = STATIC_KEY_INIT_TRUE; + * static_key_true() + * static_key_false() + * + * The updated API replacements are: + * + * DEFINE_STATIC_KEY_TRUE(key); + * DEFINE_STATIC_KEY_FALSE(key); + * DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count); + * DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count); + * static_branch_likely() + * static_branch_unlikely() + * + * Jump labels provide an interface to generate dynamic branches using + * self-modifying code. Assuming toolchain and architecture support, if we + * define a "key" that is initially false via "DEFINE_STATIC_KEY_FALSE(key)", + * an "if (static_branch_unlikely(&key))" statement is an unconditional branch + * (which defaults to false - and the true block is placed out of line). + * Similarly, we can define an initially true key via + * "DEFINE_STATIC_KEY_TRUE(key)", and use it in the same + * "if (static_branch_unlikely(&key))", in which case we will generate an + * unconditional branch to the out-of-line true branch. Keys that are + * initially true or false can be using in both static_branch_unlikely() + * and static_branch_likely() statements. + * + * At runtime we can change the branch target by setting the key + * to true via a call to static_branch_enable(), or false using + * static_branch_disable(). If the direction of the branch is switched by + * these calls then we run-time modify the branch target via a + * no-op -> jump or jump -> no-op conversion. For example, for an + * initially false key that is used in an "if (static_branch_unlikely(&key))" + * statement, setting the key to true requires us to patch in a jump + * to the out-of-line of true branch. + * + * In addition to static_branch_{enable,disable}, we can also reference count + * the key or branch direction via static_branch_{inc,dec}. Thus, + * static_branch_inc() can be thought of as a 'make more true' and + * static_branch_dec() as a 'make more false'. + * + * Since this relies on modifying code, the branch modifying functions + * must be considered absolute slow paths (machine wide synchronization etc.). + * OTOH, since the affected branches are unconditional, their runtime overhead + * will be absolutely minimal, esp. in the default (off) case where the total + * effect is a single NOP of appropriate size. The on case will patch in a jump + * to the out-of-line block. + * + * When the control is directly exposed to userspace, it is prudent to delay the + * decrement to avoid high frequency code modifications which can (and do) + * cause significant performance degradation. Struct static_key_deferred and + * static_key_slow_dec_deferred() provide for this. + * + * Lacking toolchain and or architecture support, static keys fall back to a + * simple conditional branch. + * + * Additional babbling in: Documentation/static-keys.txt + */ + +#ifndef __ASSEMBLY__ + +#include +#include + +extern bool static_key_initialized; + +#define STATIC_KEY_CHECK_USE(key) WARN(!static_key_initialized, \ + "%s(): static key '%pS' used before call to jump_label_init()", \ + __func__, (key)) + +#ifdef CONFIG_JUMP_LABEL + +struct static_key { + atomic_t enabled; +/* + * Note: + * To make anonymous unions work with old compilers, the static + * initialization of them requires brackets. This creates a dependency + * on the order of the struct with the initializers. If any fields + * are added, STATIC_KEY_INIT_TRUE and STATIC_KEY_INIT_FALSE may need + * to be modified. + * + * bit 0 => 1 if key is initially true + * 0 if initially false + * bit 1 => 1 if points to struct static_key_mod + * 0 if points to struct jump_entry + */ + union { + unsigned long type; + struct jump_entry *entries; + struct static_key_mod *next; + }; +}; + +#else +struct static_key { + atomic_t enabled; +}; +#endif /* CONFIG_JUMP_LABEL */ +#endif /* __ASSEMBLY__ */ + +#ifdef CONFIG_JUMP_LABEL +#include +#endif + +#ifndef __ASSEMBLY__ + +enum jump_label_type { + JUMP_LABEL_NOP = 0, + JUMP_LABEL_JMP, +}; + +struct module; + +#ifdef CONFIG_JUMP_LABEL + +#define JUMP_TYPE_FALSE 0UL +#define JUMP_TYPE_TRUE 1UL +#define JUMP_TYPE_LINKED 2UL +#define JUMP_TYPE_MASK 3UL + +static __always_inline bool static_key_false(struct static_key *key) +{ + return arch_static_branch(key, false); +} + +static __always_inline bool static_key_true(struct static_key *key) +{ + return !arch_static_branch(key, true); +} + +extern struct jump_entry __start___jump_table[]; +extern struct jump_entry __stop___jump_table[]; + +extern void jump_label_init(void); +extern void jump_label_invalidate_initmem(void); +extern void jump_label_lock(void); +extern void jump_label_unlock(void); +extern void arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type); +extern void arch_jump_label_transform_static(struct jump_entry *entry, + enum jump_label_type type); +extern int jump_label_text_reserved(void *start, void *end); +extern void static_key_slow_inc(struct static_key *key); +extern void static_key_slow_dec(struct static_key *key); +extern void static_key_slow_inc_cpuslocked(struct static_key *key); +extern void static_key_slow_dec_cpuslocked(struct static_key *key); +extern void jump_label_apply_nops(struct module *mod); +extern int static_key_count(struct static_key *key); +extern void static_key_enable(struct static_key *key); +extern void static_key_disable(struct static_key *key); +extern void static_key_enable_cpuslocked(struct static_key *key); +extern void static_key_disable_cpuslocked(struct static_key *key); + +/* + * We should be using ATOMIC_INIT() for initializing .enabled, but + * the inclusion of atomic.h is problematic for inclusion of jump_label.h + * in 'low-level' headers. Thus, we are initializing .enabled with a + * raw value, but have added a BUILD_BUG_ON() to catch any issues in + * jump_label_init() see: kernel/jump_label.c. + */ +#define STATIC_KEY_INIT_TRUE \ + { .enabled = { 1 }, \ + { .entries = (void *)JUMP_TYPE_TRUE } } +#define STATIC_KEY_INIT_FALSE \ + { .enabled = { 0 }, \ + { .entries = (void *)JUMP_TYPE_FALSE } } + +#else /* !CONFIG_JUMP_LABEL */ + +#include +#include + +static inline int static_key_count(struct static_key *key) +{ + return atomic_read(&key->enabled); +} + +static __always_inline void jump_label_init(void) +{ + static_key_initialized = true; +} + +static inline void jump_label_invalidate_initmem(void) {} + +static __always_inline bool static_key_false(struct static_key *key) +{ + if (unlikely(static_key_count(key) > 0)) + return true; + return false; +} + +static __always_inline bool static_key_true(struct static_key *key) +{ + if (likely(static_key_count(key) > 0)) + return true; + return false; +} + +static inline void static_key_slow_inc(struct static_key *key) +{ + STATIC_KEY_CHECK_USE(key); + atomic_inc(&key->enabled); +} + +static inline void static_key_slow_dec(struct static_key *key) +{ + STATIC_KEY_CHECK_USE(key); + atomic_dec(&key->enabled); +} + +#define static_key_slow_inc_cpuslocked(key) static_key_slow_inc(key) +#define static_key_slow_dec_cpuslocked(key) static_key_slow_dec(key) + +static inline int jump_label_text_reserved(void *start, void *end) +{ + return 0; +} + +static inline void jump_label_lock(void) {} +static inline void jump_label_unlock(void) {} + +static inline int jump_label_apply_nops(struct module *mod) +{ + return 0; +} + +static inline void static_key_enable(struct static_key *key) +{ + STATIC_KEY_CHECK_USE(key); + + if (atomic_read(&key->enabled) != 0) { + WARN_ON_ONCE(atomic_read(&key->enabled) != 1); + return; + } + atomic_set(&key->enabled, 1); +} + +static inline void static_key_disable(struct static_key *key) +{ + STATIC_KEY_CHECK_USE(key); + + if (atomic_read(&key->enabled) != 1) { + WARN_ON_ONCE(atomic_read(&key->enabled) != 0); + return; + } + atomic_set(&key->enabled, 0); +} + +#define static_key_enable_cpuslocked(k) static_key_enable((k)) +#define static_key_disable_cpuslocked(k) static_key_disable((k)) + +#define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) } +#define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) } + +#endif /* CONFIG_JUMP_LABEL */ + +#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE +#define jump_label_enabled static_key_enabled + +/* -------------------------------------------------------------------------- */ + +/* + * Two type wrappers around static_key, such that we can use compile time + * type differentiation to emit the right code. + * + * All the below code is macros in order to play type games. + */ + +struct static_key_true { + struct static_key key; +}; + +struct static_key_false { + struct static_key key; +}; + +#define STATIC_KEY_TRUE_INIT (struct static_key_true) { .key = STATIC_KEY_INIT_TRUE, } +#define STATIC_KEY_FALSE_INIT (struct static_key_false){ .key = STATIC_KEY_INIT_FALSE, } + +#define DEFINE_STATIC_KEY_TRUE(name) \ + struct static_key_true name = STATIC_KEY_TRUE_INIT + +#define DEFINE_STATIC_KEY_TRUE_RO(name) \ + struct static_key_true name __ro_after_init = STATIC_KEY_TRUE_INIT + +#define DECLARE_STATIC_KEY_TRUE(name) \ + extern struct static_key_true name + +#define DEFINE_STATIC_KEY_FALSE(name) \ + struct static_key_false name = STATIC_KEY_FALSE_INIT + +#define DEFINE_STATIC_KEY_FALSE_RO(name) \ + struct static_key_false name __ro_after_init = STATIC_KEY_FALSE_INIT + +#define DECLARE_STATIC_KEY_FALSE(name) \ + extern struct static_key_false name + +#define DEFINE_STATIC_KEY_ARRAY_TRUE(name, count) \ + struct static_key_true name[count] = { \ + [0 ... (count) - 1] = STATIC_KEY_TRUE_INIT, \ + } + +#define DEFINE_STATIC_KEY_ARRAY_FALSE(name, count) \ + struct static_key_false name[count] = { \ + [0 ... (count) - 1] = STATIC_KEY_FALSE_INIT, \ + } + +extern bool ____wrong_branch_error(void); + +#define static_key_enabled(x) \ +({ \ + if (!__builtin_types_compatible_p(typeof(*x), struct static_key) && \ + !__builtin_types_compatible_p(typeof(*x), struct static_key_true) &&\ + !__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \ + ____wrong_branch_error(); \ + static_key_count((struct static_key *)x) > 0; \ +}) + +#ifdef CONFIG_JUMP_LABEL + +/* + * Combine the right initial value (type) with the right branch order + * to generate the desired result. + * + * + * type\branch| likely (1) | unlikely (0) + * -----------+-----------------------+------------------ + * | | + * true (1) | ... | ... + * | NOP | JMP L + * | | 1: ... + * | L: ... | + * | | + * | | L: + * | | jmp 1b + * | | + * -----------+-----------------------+------------------ + * | | + * false (0) | ... | ... + * | JMP L | NOP + * | | 1: ... + * | L: ... | + * | | + * | | L: + * | | jmp 1b + * | | + * -----------+-----------------------+------------------ + * + * The initial value is encoded in the LSB of static_key::entries, + * type: 0 = false, 1 = true. + * + * The branch type is encoded in the LSB of jump_entry::key, + * branch: 0 = unlikely, 1 = likely. + * + * This gives the following logic table: + * + * enabled type branch instuction + * -----------------------------+----------- + * 0 0 0 | NOP + * 0 0 1 | JMP + * 0 1 0 | NOP + * 0 1 1 | JMP + * + * 1 0 0 | JMP + * 1 0 1 | NOP + * 1 1 0 | JMP + * 1 1 1 | NOP + * + * Which gives the following functions: + * + * dynamic: instruction = enabled ^ branch + * static: instruction = type ^ branch + * + * See jump_label_type() / jump_label_init_type(). + */ + +#define static_branch_likely(x) \ +({ \ + bool branch; \ + if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \ + branch = !arch_static_branch(&(x)->key, true); \ + else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \ + branch = !arch_static_branch_jump(&(x)->key, true); \ + else \ + branch = ____wrong_branch_error(); \ + likely(branch); \ +}) + +#define static_branch_unlikely(x) \ +({ \ + bool branch; \ + if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \ + branch = arch_static_branch_jump(&(x)->key, false); \ + else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \ + branch = arch_static_branch(&(x)->key, false); \ + else \ + branch = ____wrong_branch_error(); \ + unlikely(branch); \ +}) + +#else /* !CONFIG_JUMP_LABEL */ + +#define static_branch_likely(x) likely(static_key_enabled(&(x)->key)) +#define static_branch_unlikely(x) unlikely(static_key_enabled(&(x)->key)) + +#endif /* CONFIG_JUMP_LABEL */ + +/* + * Advanced usage; refcount, branch is enabled when: count != 0 + */ + +#define static_branch_inc(x) static_key_slow_inc(&(x)->key) +#define static_branch_dec(x) static_key_slow_dec(&(x)->key) +#define static_branch_inc_cpuslocked(x) static_key_slow_inc_cpuslocked(&(x)->key) +#define static_branch_dec_cpuslocked(x) static_key_slow_dec_cpuslocked(&(x)->key) + +/* + * Normal usage; boolean enable/disable. + */ + +#define static_branch_enable(x) static_key_enable(&(x)->key) +#define static_branch_disable(x) static_key_disable(&(x)->key) +#define static_branch_enable_cpuslocked(x) static_key_enable_cpuslocked(&(x)->key) +#define static_branch_disable_cpuslocked(x) static_key_disable_cpuslocked(&(x)->key) + +#endif /* __ASSEMBLY__ */ + +#endif /* _LINUX_JUMP_LABEL_H */ diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h new file mode 100644 index 000000000..a49f2b45b --- /dev/null +++ b/include/linux/jump_label_ratelimit.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_JUMP_LABEL_RATELIMIT_H +#define _LINUX_JUMP_LABEL_RATELIMIT_H + +#include +#include + +#if defined(CONFIG_JUMP_LABEL) +struct static_key_deferred { + struct static_key key; + unsigned long timeout; + struct delayed_work work; +}; + +extern void static_key_slow_dec_deferred(struct static_key_deferred *key); +extern void static_key_deferred_flush(struct static_key_deferred *key); +extern void +jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl); + +#else /* !CONFIG_JUMP_LABEL */ +struct static_key_deferred { + struct static_key key; +}; +static inline void static_key_slow_dec_deferred(struct static_key_deferred *key) +{ + STATIC_KEY_CHECK_USE(key); + static_key_slow_dec(&key->key); +} +static inline void static_key_deferred_flush(struct static_key_deferred *key) +{ + STATIC_KEY_CHECK_USE(key); +} +static inline void +jump_label_rate_limit(struct static_key_deferred *key, + unsigned long rl) +{ + STATIC_KEY_CHECK_USE(key); +} +#endif /* CONFIG_JUMP_LABEL */ +#endif /* _LINUX_JUMP_LABEL_RATELIMIT_H */ diff --git a/include/linux/jz4740-adc.h b/include/linux/jz4740-adc.h new file mode 100644 index 000000000..19d995c8b --- /dev/null +++ b/include/linux/jz4740-adc.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __LINUX_JZ4740_ADC +#define __LINUX_JZ4740_ADC + +struct device; + +/* + * jz4740_adc_set_config - Configure a JZ4740 adc device + * @dev: Pointer to a jz4740-adc device + * @mask: Mask for the config value to be set + * @val: Value to be set + * + * This function can be used by the JZ4740 ADC mfd cells to configure their + * options in the shared config register. +*/ +int jz4740_adc_set_config(struct device *dev, uint32_t mask, uint32_t val); + +#define JZ_ADC_CONFIG_SPZZ BIT(31) +#define JZ_ADC_CONFIG_EX_IN BIT(30) +#define JZ_ADC_CONFIG_DNUM_MASK (0x7 << 16) +#define JZ_ADC_CONFIG_DMA_ENABLE BIT(15) +#define JZ_ADC_CONFIG_XYZ_MASK (0x2 << 13) +#define JZ_ADC_CONFIG_SAMPLE_NUM_MASK (0x7 << 10) +#define JZ_ADC_CONFIG_CLKDIV_MASK (0xf << 5) +#define JZ_ADC_CONFIG_BAT_MB BIT(4) + +#define JZ_ADC_CONFIG_DNUM(dnum) ((dnum) << 16) +#define JZ_ADC_CONFIG_XYZ_OFFSET(dnum) ((xyz) << 13) +#define JZ_ADC_CONFIG_SAMPLE_NUM(x) ((x) << 10) +#define JZ_ADC_CONFIG_CLKDIV(div) ((div) << 5) + +#endif diff --git a/include/linux/jz4780-nemc.h b/include/linux/jz4780-nemc.h new file mode 100644 index 000000000..e7f1cc7a2 --- /dev/null +++ b/include/linux/jz4780-nemc.h @@ -0,0 +1,43 @@ +/* + * JZ4780 NAND/external memory controller (NEMC) + * + * Copyright (c) 2015 Imagination Technologies + * Author: Alex Smith + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __LINUX_JZ4780_NEMC_H__ +#define __LINUX_JZ4780_NEMC_H__ + +#include + +struct device; + +/* + * Number of NEMC banks. Note that there are actually 6, but they are numbered + * from 1. + */ +#define JZ4780_NEMC_NUM_BANKS 7 + +/** + * enum jz4780_nemc_bank_type - device types which can be connected to a bank + * @JZ4780_NEMC_BANK_SRAM: SRAM + * @JZ4780_NEMC_BANK_NAND: NAND + */ +enum jz4780_nemc_bank_type { + JZ4780_NEMC_BANK_SRAM, + JZ4780_NEMC_BANK_NAND, +}; + +extern unsigned int jz4780_nemc_num_banks(struct device *dev); + +extern void jz4780_nemc_set_type(struct device *dev, unsigned int bank, + enum jz4780_nemc_bank_type type); +extern void jz4780_nemc_assert(struct device *dev, unsigned int bank, + bool assert); + +#endif /* __LINUX_JZ4780_NEMC_H__ */ diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h new file mode 100644 index 000000000..1f96ce2b4 --- /dev/null +++ b/include/linux/kallsyms.h @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Rewritten and vastly simplified by Rusty Russell for in-kernel + * module loader: + * Copyright 2002 Rusty Russell IBM Corporation + */ +#ifndef _LINUX_KALLSYMS_H +#define _LINUX_KALLSYMS_H + +#include +#include +#include +#include +#include + +#include + +#define KSYM_NAME_LEN 128 +#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \ + 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1) + +struct cred; +struct module; + +static inline int is_kernel_inittext(unsigned long addr) +{ + if (addr >= (unsigned long)_sinittext + && addr <= (unsigned long)_einittext) + return 1; + return 0; +} + +static inline int is_kernel_text(unsigned long addr) +{ + if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) || + arch_is_kernel_text(addr)) + return 1; + return in_gate_area_no_mm(addr); +} + +static inline int is_kernel(unsigned long addr) +{ + if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end) + return 1; + return in_gate_area_no_mm(addr); +} + +static inline int is_ksym_addr(unsigned long addr) +{ + if (IS_ENABLED(CONFIG_KALLSYMS_ALL)) + return is_kernel(addr); + + return is_kernel_text(addr) || is_kernel_inittext(addr); +} + +static inline void *dereference_symbol_descriptor(void *ptr) +{ +#ifdef HAVE_DEREFERENCE_FUNCTION_DESCRIPTOR + struct module *mod; + + ptr = dereference_kernel_function_descriptor(ptr); + if (is_ksym_addr((unsigned long)ptr)) + return ptr; + + preempt_disable(); + mod = __module_address((unsigned long)ptr); + preempt_enable(); + + if (mod) + ptr = dereference_module_function_descriptor(mod, ptr); +#endif + return ptr; +} + +#ifdef CONFIG_KALLSYMS +/* Lookup the address for a symbol. Returns 0 if not found. */ +unsigned long kallsyms_lookup_name(const char *name); + +/* Call a function on each kallsyms symbol in the core kernel */ +int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, + unsigned long), + void *data); + +extern int kallsyms_lookup_size_offset(unsigned long addr, + unsigned long *symbolsize, + unsigned long *offset); + +/* Lookup an address. modname is set to NULL if it's in the kernel. */ +const char *kallsyms_lookup(unsigned long addr, + unsigned long *symbolsize, + unsigned long *offset, + char **modname, char *namebuf); + +/* Look up a kernel symbol and return it in a text buffer. */ +extern int sprint_symbol(char *buffer, unsigned long address); +extern int sprint_symbol_no_offset(char *buffer, unsigned long address); +extern int sprint_backtrace(char *buffer, unsigned long address); + +int lookup_symbol_name(unsigned long addr, char *symname); +int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); + +/* How and when do we show kallsyms values? */ +extern bool kallsyms_show_value(const struct cred *cred); + +#else /* !CONFIG_KALLSYMS */ + +static inline unsigned long kallsyms_lookup_name(const char *name) +{ + return 0; +} + +static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *, + struct module *, + unsigned long), + void *data) +{ + return 0; +} + +static inline int kallsyms_lookup_size_offset(unsigned long addr, + unsigned long *symbolsize, + unsigned long *offset) +{ + return 0; +} + +static inline const char *kallsyms_lookup(unsigned long addr, + unsigned long *symbolsize, + unsigned long *offset, + char **modname, char *namebuf) +{ + return NULL; +} + +static inline int sprint_symbol(char *buffer, unsigned long addr) +{ + *buffer = '\0'; + return 0; +} + +static inline int sprint_symbol_no_offset(char *buffer, unsigned long addr) +{ + *buffer = '\0'; + return 0; +} + +static inline int sprint_backtrace(char *buffer, unsigned long addr) +{ + *buffer = '\0'; + return 0; +} + +static inline int lookup_symbol_name(unsigned long addr, char *symname) +{ + return -ERANGE; +} + +static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name) +{ + return -ERANGE; +} + +static inline bool kallsyms_show_value(const struct cred *cred) +{ + return false; +} + +#endif /*CONFIG_KALLSYMS*/ + +static inline void print_ip_sym(unsigned long ip) +{ + printk("[<%px>] %pS\n", (void *) ip, (void *) ip); +} + +#endif /*_LINUX_KALLSYMS_H*/ diff --git a/include/linux/kasan-checks.h b/include/linux/kasan-checks.h new file mode 100644 index 000000000..d31415065 --- /dev/null +++ b/include/linux/kasan-checks.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_KASAN_CHECKS_H +#define _LINUX_KASAN_CHECKS_H + +#ifdef CONFIG_KASAN +void kasan_check_read(const volatile void *p, unsigned int size); +void kasan_check_write(const volatile void *p, unsigned int size); +#else +static inline void kasan_check_read(const volatile void *p, unsigned int size) +{ } +static inline void kasan_check_write(const volatile void *p, unsigned int size) +{ } +#endif + +#endif diff --git a/include/linux/kasan.h b/include/linux/kasan.h new file mode 100644 index 000000000..46aae1299 --- /dev/null +++ b/include/linux/kasan.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_KASAN_H +#define _LINUX_KASAN_H + +#include + +struct kmem_cache; +struct page; +struct vm_struct; +struct task_struct; + +#ifdef CONFIG_KASAN + +#include +#include + +extern unsigned char kasan_zero_page[PAGE_SIZE]; +extern pte_t kasan_zero_pte[PTRS_PER_PTE]; +extern pmd_t kasan_zero_pmd[PTRS_PER_PMD]; +extern pud_t kasan_zero_pud[PTRS_PER_PUD]; +extern p4d_t kasan_zero_p4d[MAX_PTRS_PER_P4D]; + +int kasan_populate_zero_shadow(const void *shadow_start, + const void *shadow_end); + +static inline void *kasan_mem_to_shadow(const void *addr) +{ + return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT) + + KASAN_SHADOW_OFFSET; +} + +/* Enable reporting bugs after kasan_disable_current() */ +extern void kasan_enable_current(void); + +/* Disable reporting bugs for current task */ +extern void kasan_disable_current(void); + +void kasan_unpoison_shadow(const void *address, size_t size); + +void kasan_unpoison_task_stack(struct task_struct *task); +void kasan_unpoison_stack_above_sp_to(const void *watermark); + +void kasan_alloc_pages(struct page *page, unsigned int order); +void kasan_free_pages(struct page *page, unsigned int order); + +void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, + slab_flags_t *flags); +void kasan_cache_shrink(struct kmem_cache *cache); +void kasan_cache_shutdown(struct kmem_cache *cache); + +void kasan_poison_slab(struct page *page); +void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); +void kasan_poison_object_data(struct kmem_cache *cache, void *object); +void kasan_init_slab_obj(struct kmem_cache *cache, const void *object); + +void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags); +void kasan_kfree_large(void *ptr, unsigned long ip); +void kasan_poison_kfree(void *ptr, unsigned long ip); +void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, + gfp_t flags); +void kasan_krealloc(const void *object, size_t new_size, gfp_t flags); + +void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags); +bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip); + +struct kasan_cache { + int alloc_meta_offset; + int free_meta_offset; +}; + +int kasan_module_alloc(void *addr, size_t size); +void kasan_free_shadow(const struct vm_struct *vm); + +int kasan_add_zero_shadow(void *start, unsigned long size); +void kasan_remove_zero_shadow(void *start, unsigned long size); + +size_t ksize(const void *); +static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); } +size_t kasan_metadata_size(struct kmem_cache *cache); + +bool kasan_save_enable_multi_shot(void); +void kasan_restore_multi_shot(bool enabled); + +#else /* CONFIG_KASAN */ + +static inline void kasan_unpoison_shadow(const void *address, size_t size) {} + +static inline void kasan_unpoison_task_stack(struct task_struct *task) {} +static inline void kasan_unpoison_stack_above_sp_to(const void *watermark) {} + +static inline void kasan_enable_current(void) {} +static inline void kasan_disable_current(void) {} + +static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} +static inline void kasan_free_pages(struct page *page, unsigned int order) {} + +static inline void kasan_cache_create(struct kmem_cache *cache, + unsigned int *size, + slab_flags_t *flags) {} +static inline void kasan_cache_shrink(struct kmem_cache *cache) {} +static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} + +static inline void kasan_poison_slab(struct page *page) {} +static inline void kasan_unpoison_object_data(struct kmem_cache *cache, + void *object) {} +static inline void kasan_poison_object_data(struct kmem_cache *cache, + void *object) {} +static inline void kasan_init_slab_obj(struct kmem_cache *cache, + const void *object) {} + +static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {} +static inline void kasan_kfree_large(void *ptr, unsigned long ip) {} +static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {} +static inline void kasan_kmalloc(struct kmem_cache *s, const void *object, + size_t size, gfp_t flags) {} +static inline void kasan_krealloc(const void *object, size_t new_size, + gfp_t flags) {} + +static inline void kasan_slab_alloc(struct kmem_cache *s, void *object, + gfp_t flags) {} +static inline bool kasan_slab_free(struct kmem_cache *s, void *object, + unsigned long ip) +{ + return false; +} + +static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } +static inline void kasan_free_shadow(const struct vm_struct *vm) {} + +static inline int kasan_add_zero_shadow(void *start, unsigned long size) +{ + return 0; +} +static inline void kasan_remove_zero_shadow(void *start, + unsigned long size) +{} + +static inline void kasan_unpoison_slab(const void *ptr) { } +static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } + +#endif /* CONFIG_KASAN */ + +#endif /* LINUX_KASAN_H */ diff --git a/include/linux/kbd_diacr.h b/include/linux/kbd_diacr.h new file mode 100644 index 000000000..738c7340c --- /dev/null +++ b/include/linux/kbd_diacr.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DIACR_H +#define _DIACR_H +#include + +extern struct kbdiacruc accent_table[]; +extern unsigned int accent_table_size; + +#endif /* _DIACR_H */ diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h new file mode 100644 index 000000000..bb2246c8e --- /dev/null +++ b/include/linux/kbd_kern.h @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _KBD_KERN_H +#define _KBD_KERN_H + +#include +#include +#include + +extern struct tasklet_struct keyboard_tasklet; + +extern char *func_table[MAX_NR_FUNC]; +extern char func_buf[]; +extern char *funcbufptr; +extern int funcbufsize, funcbufleft; + +/* + * kbd->xxx contains the VC-local things (flag settings etc..) + * + * Note: externally visible are LED_SCR, LED_NUM, LED_CAP defined in kd.h + * The code in KDGETLED / KDSETLED depends on the internal and + * external order being the same. + * + * Note: lockstate is used as index in the array key_map. + */ +struct kbd_struct { + + unsigned char lockstate; +/* 8 modifiers - the names do not have any meaning at all; + they can be associated to arbitrarily chosen keys */ +#define VC_SHIFTLOCK KG_SHIFT /* shift lock mode */ +#define VC_ALTGRLOCK KG_ALTGR /* altgr lock mode */ +#define VC_CTRLLOCK KG_CTRL /* control lock mode */ +#define VC_ALTLOCK KG_ALT /* alt lock mode */ +#define VC_SHIFTLLOCK KG_SHIFTL /* shiftl lock mode */ +#define VC_SHIFTRLOCK KG_SHIFTR /* shiftr lock mode */ +#define VC_CTRLLLOCK KG_CTRLL /* ctrll lock mode */ +#define VC_CTRLRLOCK KG_CTRLR /* ctrlr lock mode */ + unsigned char slockstate; /* for `sticky' Shift, Ctrl, etc. */ + + unsigned char ledmode:1; +#define LED_SHOW_FLAGS 0 /* traditional state */ +#define LED_SHOW_IOCTL 1 /* only change leds upon ioctl */ + + unsigned char ledflagstate:4; /* flags, not lights */ + unsigned char default_ledflagstate:4; +#define VC_SCROLLOCK 0 /* scroll-lock mode */ +#define VC_NUMLOCK 1 /* numeric lock mode */ +#define VC_CAPSLOCK 2 /* capslock mode */ +#define VC_KANALOCK 3 /* kanalock mode */ + + unsigned char kbdmode:3; /* one 3-bit value */ +#define VC_XLATE 0 /* translate keycodes using keymap */ +#define VC_MEDIUMRAW 1 /* medium raw (keycode) mode */ +#define VC_RAW 2 /* raw (scancode) mode */ +#define VC_UNICODE 3 /* Unicode mode */ +#define VC_OFF 4 /* disabled mode */ + + unsigned char modeflags:5; +#define VC_APPLIC 0 /* application key mode */ +#define VC_CKMODE 1 /* cursor key mode */ +#define VC_REPEAT 2 /* keyboard repeat */ +#define VC_CRLF 3 /* 0 - enter sends CR, 1 - enter sends CRLF */ +#define VC_META 4 /* 0 - meta, 1 - meta=prefix with ESC */ +}; + +extern int kbd_init(void); + +extern void setledstate(struct kbd_struct *kbd, unsigned int led); + +extern int do_poke_blanked_console; + +extern void (*kbd_ledfunc)(unsigned int led); + +extern int set_console(int nr); +extern void schedule_console_callback(void); + +/* FIXME: review locking for vt.c callers */ +static inline void set_leds(void) +{ + tasklet_schedule(&keyboard_tasklet); +} + +static inline int vc_kbd_mode(struct kbd_struct * kbd, int flag) +{ + return ((kbd->modeflags >> flag) & 1); +} + +static inline int vc_kbd_led(struct kbd_struct * kbd, int flag) +{ + return ((kbd->ledflagstate >> flag) & 1); +} + +static inline void set_vc_kbd_mode(struct kbd_struct * kbd, int flag) +{ + kbd->modeflags |= 1 << flag; +} + +static inline void set_vc_kbd_led(struct kbd_struct * kbd, int flag) +{ + kbd->ledflagstate |= 1 << flag; +} + +static inline void clr_vc_kbd_mode(struct kbd_struct * kbd, int flag) +{ + kbd->modeflags &= ~(1 << flag); +} + +static inline void clr_vc_kbd_led(struct kbd_struct * kbd, int flag) +{ + kbd->ledflagstate &= ~(1 << flag); +} + +static inline void chg_vc_kbd_lock(struct kbd_struct * kbd, int flag) +{ + kbd->lockstate ^= 1 << flag; +} + +static inline void chg_vc_kbd_slock(struct kbd_struct * kbd, int flag) +{ + kbd->slockstate ^= 1 << flag; +} + +static inline void chg_vc_kbd_mode(struct kbd_struct * kbd, int flag) +{ + kbd->modeflags ^= 1 << flag; +} + +static inline void chg_vc_kbd_led(struct kbd_struct * kbd, int flag) +{ + kbd->ledflagstate ^= 1 << flag; +} + +#define U(x) ((x) ^ 0xf000) + +#define BRL_UC_ROW 0x2800 + +/* keyboard.c */ + +struct console; + +void compute_shiftstate(void); + +/* defkeymap.c */ + +extern unsigned int keymap_count; + +#endif diff --git a/include/linux/kbuild.h b/include/linux/kbuild.h new file mode 100644 index 000000000..e7be517aa --- /dev/null +++ b/include/linux/kbuild.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_KBUILD_H +#define __LINUX_KBUILD_H + +#define DEFINE(sym, val) \ + asm volatile("\n.ascii \"->" #sym " %0 " #val "\"" : : "i" (val)) + +#define BLANK() asm volatile("\n.ascii \"->\"" : : ) + +#define OFFSET(sym, str, mem) \ + DEFINE(sym, offsetof(struct str, mem)) + +#define COMMENT(x) \ + asm volatile("\n.ascii \"->#" x "\"") + +#endif diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h new file mode 100644 index 000000000..cc8fa109c --- /dev/null +++ b/include/linux/kconfig.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_KCONFIG_H +#define __LINUX_KCONFIG_H + +#include + +#ifdef CONFIG_CPU_BIG_ENDIAN +#define __BIG_ENDIAN 4321 +#else +#define __LITTLE_ENDIAN 1234 +#endif + +#define __ARG_PLACEHOLDER_1 0, +#define __take_second_arg(__ignored, val, ...) val + +/* + * The use of "&&" / "||" is limited in certain expressions. + * The following enable to calculate "and" / "or" with macro expansion only. + */ +#define __and(x, y) ___and(x, y) +#define ___and(x, y) ____and(__ARG_PLACEHOLDER_##x, y) +#define ____and(arg1_or_junk, y) __take_second_arg(arg1_or_junk y, 0) + +#define __or(x, y) ___or(x, y) +#define ___or(x, y) ____or(__ARG_PLACEHOLDER_##x, y) +#define ____or(arg1_or_junk, y) __take_second_arg(arg1_or_junk 1, y) + +/* + * Helper macros to use CONFIG_ options in C/CPP expressions. Note that + * these only work with boolean and tristate options. + */ + +/* + * Getting something that works in C and CPP for an arg that may or may + * not be defined is tricky. Here, if we have "#define CONFIG_BOOGER 1" + * we match on the placeholder define, insert the "0," for arg1 and generate + * the triplet (0, 1, 0). Then the last step cherry picks the 2nd arg (a one). + * When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when + * the last step cherry picks the 2nd arg, we get a zero. + */ +#define __is_defined(x) ___is_defined(x) +#define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val) +#define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0) + +/* + * IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0 + * otherwise. For boolean options, this is equivalent to + * IS_ENABLED(CONFIG_FOO). + */ +#define IS_BUILTIN(option) __is_defined(option) + +/* + * IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0 + * otherwise. + */ +#define IS_MODULE(option) __is_defined(option##_MODULE) + +/* + * IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled + * code can call a function defined in code compiled based on CONFIG_FOO. + * This is similar to IS_ENABLED(), but returns false when invoked from + * built-in code when CONFIG_FOO is set to 'm'. + */ +#define IS_REACHABLE(option) __or(IS_BUILTIN(option), \ + __and(IS_MODULE(option), __is_defined(MODULE))) + +/* + * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm', + * 0 otherwise. + */ +#define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option)) + +#endif /* __LINUX_KCONFIG_H */ diff --git a/include/linux/kcore.h b/include/linux/kcore.h new file mode 100644 index 000000000..c843f4a9c --- /dev/null +++ b/include/linux/kcore.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * /proc/kcore definitions + */ +#ifndef _LINUX_KCORE_H +#define _LINUX_KCORE_H + +enum kcore_type { + KCORE_TEXT, + KCORE_VMALLOC, + KCORE_RAM, + KCORE_VMEMMAP, + KCORE_USER, + KCORE_OTHER, + KCORE_REMAP, +}; + +struct kcore_list { + struct list_head list; + unsigned long addr; + unsigned long vaddr; + size_t size; + int type; +}; + +struct vmcore { + struct list_head list; + unsigned long long paddr; + unsigned long long size; + loff_t offset; +}; + +struct vmcoredd_node { + struct list_head list; /* List of dumps */ + void *buf; /* Buffer containing device's dump */ + unsigned int size; /* Size of the buffer */ +}; + +#ifdef CONFIG_PROC_KCORE +void __init kclist_add(struct kcore_list *, void *, size_t, int type); +static inline +void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz) +{ + m->vaddr = (unsigned long)vaddr; + kclist_add(m, addr, sz, KCORE_REMAP); +} + +extern int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn)); +#else +static inline +void kclist_add(struct kcore_list *new, void *addr, size_t size, int type) +{ +} + +static inline +void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz) +{ +} +#endif + +#endif /* _LINUX_KCORE_H */ diff --git a/include/linux/kcov.h b/include/linux/kcov.h new file mode 100644 index 000000000..b76a18070 --- /dev/null +++ b/include/linux/kcov.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_KCOV_H +#define _LINUX_KCOV_H + +#include + +struct task_struct; + +#ifdef CONFIG_KCOV + +enum kcov_mode { + /* Coverage collection is not enabled yet. */ + KCOV_MODE_DISABLED = 0, + /* KCOV was initialized, but tracing mode hasn't been chosen yet. */ + KCOV_MODE_INIT = 1, + /* + * Tracing coverage collection mode. + * Covered PCs are collected in a per-task buffer. + */ + KCOV_MODE_TRACE_PC = 2, + /* Collecting comparison operands mode. */ + KCOV_MODE_TRACE_CMP = 3, +}; + +#define KCOV_IN_CTXSW (1 << 30) + +void kcov_task_init(struct task_struct *t); +void kcov_task_exit(struct task_struct *t); + +#define kcov_prepare_switch(t) \ +do { \ + (t)->kcov_mode |= KCOV_IN_CTXSW; \ +} while (0) + +#define kcov_finish_switch(t) \ +do { \ + (t)->kcov_mode &= ~KCOV_IN_CTXSW; \ +} while (0) + +#else + +static inline void kcov_task_init(struct task_struct *t) {} +static inline void kcov_task_exit(struct task_struct *t) {} +static inline void kcov_prepare_switch(struct task_struct *t) {} +static inline void kcov_finish_switch(struct task_struct *t) {} + +#endif /* CONFIG_KCOV */ +#endif /* _LINUX_KCOV_H */ diff --git a/include/linux/kd.h b/include/linux/kd.h new file mode 100644 index 000000000..b130a18f8 --- /dev/null +++ b/include/linux/kd.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_KD_H +#define _LINUX_KD_H + +#include + +#define KD_FONT_FLAG_OLD 0x80000000 /* Invoked via old interface [compat] */ +#endif /* _LINUX_KD_H */ diff --git a/include/linux/kdb.h b/include/linux/kdb.h new file mode 100644 index 000000000..68bd88223 --- /dev/null +++ b/include/linux/kdb.h @@ -0,0 +1,221 @@ +#ifndef _KDB_H +#define _KDB_H + +/* + * Kernel Debugger Architecture Independent Global Headers + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved. + * Copyright (C) 2000 Stephane Eranian + * Copyright (C) 2009 Jason Wessel + */ + +/* Shifted versions of the command enable bits are be used if the command + * has no arguments (see kdb_check_flags). This allows commands, such as + * go, to have different permissions depending upon whether it is called + * with an argument. + */ +#define KDB_ENABLE_NO_ARGS_SHIFT 10 + +typedef enum { + KDB_ENABLE_ALL = (1 << 0), /* Enable everything */ + KDB_ENABLE_MEM_READ = (1 << 1), + KDB_ENABLE_MEM_WRITE = (1 << 2), + KDB_ENABLE_REG_READ = (1 << 3), + KDB_ENABLE_REG_WRITE = (1 << 4), + KDB_ENABLE_INSPECT = (1 << 5), + KDB_ENABLE_FLOW_CTRL = (1 << 6), + KDB_ENABLE_SIGNAL = (1 << 7), + KDB_ENABLE_REBOOT = (1 << 8), + /* User exposed values stop here, all remaining flags are + * exclusively used to describe a commands behaviour. + */ + + KDB_ENABLE_ALWAYS_SAFE = (1 << 9), + KDB_ENABLE_MASK = (1 << KDB_ENABLE_NO_ARGS_SHIFT) - 1, + + KDB_ENABLE_ALL_NO_ARGS = KDB_ENABLE_ALL << KDB_ENABLE_NO_ARGS_SHIFT, + KDB_ENABLE_MEM_READ_NO_ARGS = KDB_ENABLE_MEM_READ + << KDB_ENABLE_NO_ARGS_SHIFT, + KDB_ENABLE_MEM_WRITE_NO_ARGS = KDB_ENABLE_MEM_WRITE + << KDB_ENABLE_NO_ARGS_SHIFT, + KDB_ENABLE_REG_READ_NO_ARGS = KDB_ENABLE_REG_READ + << KDB_ENABLE_NO_ARGS_SHIFT, + KDB_ENABLE_REG_WRITE_NO_ARGS = KDB_ENABLE_REG_WRITE + << KDB_ENABLE_NO_ARGS_SHIFT, + KDB_ENABLE_INSPECT_NO_ARGS = KDB_ENABLE_INSPECT + << KDB_ENABLE_NO_ARGS_SHIFT, + KDB_ENABLE_FLOW_CTRL_NO_ARGS = KDB_ENABLE_FLOW_CTRL + << KDB_ENABLE_NO_ARGS_SHIFT, + KDB_ENABLE_SIGNAL_NO_ARGS = KDB_ENABLE_SIGNAL + << KDB_ENABLE_NO_ARGS_SHIFT, + KDB_ENABLE_REBOOT_NO_ARGS = KDB_ENABLE_REBOOT + << KDB_ENABLE_NO_ARGS_SHIFT, + KDB_ENABLE_ALWAYS_SAFE_NO_ARGS = KDB_ENABLE_ALWAYS_SAFE + << KDB_ENABLE_NO_ARGS_SHIFT, + KDB_ENABLE_MASK_NO_ARGS = KDB_ENABLE_MASK << KDB_ENABLE_NO_ARGS_SHIFT, + + KDB_REPEAT_NO_ARGS = 0x40000000, /* Repeat the command w/o arguments */ + KDB_REPEAT_WITH_ARGS = 0x80000000, /* Repeat the command with args */ +} kdb_cmdflags_t; + +typedef int (*kdb_func_t)(int, const char **); + +#ifdef CONFIG_KGDB_KDB +#include +#include +#include + +#define KDB_POLL_FUNC_MAX 5 +extern int kdb_poll_idx; + +/* + * kdb_initial_cpu is initialized to -1, and is set to the cpu + * number whenever the kernel debugger is entered. + */ +extern int kdb_initial_cpu; + +/* Types and messages used for dynamically added kdb shell commands */ + +#define KDB_MAXARGS 16 /* Maximum number of arguments to a function */ + +/* KDB return codes from a command or internal kdb function */ +#define KDB_NOTFOUND (-1) +#define KDB_ARGCOUNT (-2) +#define KDB_BADWIDTH (-3) +#define KDB_BADRADIX (-4) +#define KDB_NOTENV (-5) +#define KDB_NOENVVALUE (-6) +#define KDB_NOTIMP (-7) +#define KDB_ENVFULL (-8) +#define KDB_ENVBUFFULL (-9) +#define KDB_TOOMANYBPT (-10) +#define KDB_TOOMANYDBREGS (-11) +#define KDB_DUPBPT (-12) +#define KDB_BPTNOTFOUND (-13) +#define KDB_BADMODE (-14) +#define KDB_BADINT (-15) +#define KDB_INVADDRFMT (-16) +#define KDB_BADREG (-17) +#define KDB_BADCPUNUM (-18) +#define KDB_BADLENGTH (-19) +#define KDB_NOBP (-20) +#define KDB_BADADDR (-21) +#define KDB_NOPERM (-22) + +/* + * kdb_diemsg + * + * Contains a pointer to the last string supplied to the + * kernel 'die' panic function. + */ +extern const char *kdb_diemsg; + +#define KDB_FLAG_EARLYKDB (1 << 0) /* set from boot parameter kdb=early */ +#define KDB_FLAG_CATASTROPHIC (1 << 1) /* A catastrophic event has occurred */ +#define KDB_FLAG_CMD_INTERRUPT (1 << 2) /* Previous command was interrupted */ +#define KDB_FLAG_NOIPI (1 << 3) /* Do not send IPIs */ +#define KDB_FLAG_NO_CONSOLE (1 << 5) /* No console is available, + * kdb is disabled */ +#define KDB_FLAG_NO_VT_CONSOLE (1 << 6) /* No VT console is available, do + * not use keyboard */ +#define KDB_FLAG_NO_I8042 (1 << 7) /* No i8042 chip is available, do + * not use keyboard */ + +extern int kdb_flags; /* Global flags, see kdb_state for per cpu state */ + +extern void kdb_save_flags(void); +extern void kdb_restore_flags(void); + +#define KDB_FLAG(flag) (kdb_flags & KDB_FLAG_##flag) +#define KDB_FLAG_SET(flag) ((void)(kdb_flags |= KDB_FLAG_##flag)) +#define KDB_FLAG_CLEAR(flag) ((void)(kdb_flags &= ~KDB_FLAG_##flag)) + +/* + * External entry point for the kernel debugger. The pt_regs + * at the time of entry are supplied along with the reason for + * entry to the kernel debugger. + */ + +typedef enum { + KDB_REASON_ENTER = 1, /* KDB_ENTER() trap/fault - regs valid */ + KDB_REASON_ENTER_SLAVE, /* KDB_ENTER_SLAVE() trap/fault - regs valid */ + KDB_REASON_BREAK, /* Breakpoint inst. - regs valid */ + KDB_REASON_DEBUG, /* Debug Fault - regs valid */ + KDB_REASON_OOPS, /* Kernel Oops - regs valid */ + KDB_REASON_SWITCH, /* CPU switch - regs valid*/ + KDB_REASON_KEYBOARD, /* Keyboard entry - regs valid */ + KDB_REASON_NMI, /* Non-maskable interrupt; regs valid */ + KDB_REASON_RECURSE, /* Recursive entry to kdb; + * regs probably valid */ + KDB_REASON_SSTEP, /* Single Step trap. - regs valid */ + KDB_REASON_SYSTEM_NMI, /* In NMI due to SYSTEM cmd; regs valid */ +} kdb_reason_t; + +enum kdb_msgsrc { + KDB_MSGSRC_INTERNAL, /* direct call to kdb_printf() */ + KDB_MSGSRC_PRINTK, /* trapped from printk() */ +}; + +extern int kdb_trap_printk; +extern int kdb_printf_cpu; +extern __printf(2, 0) int vkdb_printf(enum kdb_msgsrc src, const char *fmt, + va_list args); +extern __printf(1, 2) int kdb_printf(const char *, ...); +typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...); + +extern void kdb_init(int level); + +/* Access to kdb specific polling devices */ +typedef int (*get_char_func)(void); +extern get_char_func kdb_poll_funcs[]; +extern int kdb_get_kbd_char(void); + +static inline +int kdb_process_cpu(const struct task_struct *p) +{ + unsigned int cpu = task_cpu(p); + if (cpu > num_possible_cpus()) + cpu = 0; + return cpu; +} + +/* kdb access to register set for stack dumping */ +extern struct pt_regs *kdb_current_regs; +#ifdef CONFIG_KALLSYMS +extern const char *kdb_walk_kallsyms(loff_t *pos); +#else /* ! CONFIG_KALLSYMS */ +static inline const char *kdb_walk_kallsyms(loff_t *pos) +{ + return NULL; +} +#endif /* ! CONFIG_KALLSYMS */ + +/* Dynamic kdb shell command registration */ +extern int kdb_register(char *, kdb_func_t, char *, char *, short); +extern int kdb_register_flags(char *, kdb_func_t, char *, char *, + short, kdb_cmdflags_t); +extern int kdb_unregister(char *); +#else /* ! CONFIG_KGDB_KDB */ +static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } +static inline void kdb_init(int level) {} +static inline int kdb_register(char *cmd, kdb_func_t func, char *usage, + char *help, short minlen) { return 0; } +static inline int kdb_register_flags(char *cmd, kdb_func_t func, char *usage, + char *help, short minlen, + kdb_cmdflags_t flags) { return 0; } +static inline int kdb_unregister(char *cmd) { return 0; } +#endif /* CONFIG_KGDB_KDB */ +enum { + KDB_NOT_INITIALIZED, + KDB_INIT_EARLY, + KDB_INIT_FULL, +}; + +extern int kdbgetintenv(const char *, int *); +extern int kdb_set(int, const char **); + +#endif /* !_KDB_H */ diff --git a/include/linux/kdebug.h b/include/linux/kdebug.h new file mode 100644 index 000000000..fd311565f --- /dev/null +++ b/include/linux/kdebug.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_KDEBUG_H +#define _LINUX_KDEBUG_H + +#include + +struct notifier_block; + +struct die_args { + struct pt_regs *regs; + const char *str; + long err; + int trapnr; + int signr; +}; + +int register_die_notifier(struct notifier_block *nb); +int unregister_die_notifier(struct notifier_block *nb); + +int notify_die(enum die_val val, const char *str, + struct pt_regs *regs, long err, int trap, int sig); + +#endif /* _LINUX_KDEBUG_H */ diff --git a/include/linux/kdev_t.h b/include/linux/kdev_t.h new file mode 100644 index 000000000..4856706fb --- /dev/null +++ b/include/linux/kdev_t.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_KDEV_T_H +#define _LINUX_KDEV_T_H + +#include + +#define MINORBITS 20 +#define MINORMASK ((1U << MINORBITS) - 1) + +#define MAJOR(dev) ((unsigned int) ((dev) >> MINORBITS)) +#define MINOR(dev) ((unsigned int) ((dev) & MINORMASK)) +#define MKDEV(ma,mi) (((ma) << MINORBITS) | (mi)) + +#define print_dev_t(buffer, dev) \ + sprintf((buffer), "%u:%u\n", MAJOR(dev), MINOR(dev)) + +#define format_dev_t(buffer, dev) \ + ({ \ + sprintf(buffer, "%u:%u", MAJOR(dev), MINOR(dev)); \ + buffer; \ + }) + +/* acceptable for old filesystems */ +static __always_inline bool old_valid_dev(dev_t dev) +{ + return MAJOR(dev) < 256 && MINOR(dev) < 256; +} + +static __always_inline u16 old_encode_dev(dev_t dev) +{ + return (MAJOR(dev) << 8) | MINOR(dev); +} + +static __always_inline dev_t old_decode_dev(u16 val) +{ + return MKDEV((val >> 8) & 255, val & 255); +} + +static __always_inline u32 new_encode_dev(dev_t dev) +{ + unsigned major = MAJOR(dev); + unsigned minor = MINOR(dev); + return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12); +} + +static __always_inline dev_t new_decode_dev(u32 dev) +{ + unsigned major = (dev & 0xfff00) >> 8; + unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00); + return MKDEV(major, minor); +} + +static __always_inline u64 huge_encode_dev(dev_t dev) +{ + return new_encode_dev(dev); +} + +static __always_inline dev_t huge_decode_dev(u64 dev) +{ + return new_decode_dev(dev); +} + +static __always_inline int sysv_valid_dev(dev_t dev) +{ + return MAJOR(dev) < (1<<14) && MINOR(dev) < (1<<18); +} + +static __always_inline u32 sysv_encode_dev(dev_t dev) +{ + return MINOR(dev) | (MAJOR(dev) << 18); +} + +static __always_inline unsigned sysv_major(u32 dev) +{ + return (dev >> 18) & 0x3fff; +} + +static __always_inline unsigned sysv_minor(u32 dev) +{ + return dev & 0x3ffff; +} + +#endif diff --git a/include/linux/kern_levels.h b/include/linux/kern_levels.h new file mode 100644 index 000000000..d237fe854 --- /dev/null +++ b/include/linux/kern_levels.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __KERN_LEVELS_H__ +#define __KERN_LEVELS_H__ + +#define KERN_SOH "\001" /* ASCII Start Of Header */ +#define KERN_SOH_ASCII '\001' + +#define KERN_EMERG KERN_SOH "0" /* system is unusable */ +#define KERN_ALERT KERN_SOH "1" /* action must be taken immediately */ +#define KERN_CRIT KERN_SOH "2" /* critical conditions */ +#define KERN_ERR KERN_SOH "3" /* error conditions */ +#define KERN_WARNING KERN_SOH "4" /* warning conditions */ +#define KERN_NOTICE KERN_SOH "5" /* normal but significant condition */ +#define KERN_INFO KERN_SOH "6" /* informational */ +#define KERN_DEBUG KERN_SOH "7" /* debug-level messages */ + +#define KERN_DEFAULT KERN_SOH "d" /* the default kernel loglevel */ + +/* + * Annotation for a "continued" line of log printout (only done after a + * line that had no enclosing \n). Only to be used by core/arch code + * during early bootup (a continued line is not SMP-safe otherwise). + */ +#define KERN_CONT KERN_SOH "c" + +/* integer equivalents of KERN_ */ +#define LOGLEVEL_SCHED -2 /* Deferred messages from sched code + * are set to this special level */ +#define LOGLEVEL_DEFAULT -1 /* default (or last) loglevel */ +#define LOGLEVEL_EMERG 0 /* system is unusable */ +#define LOGLEVEL_ALERT 1 /* action must be taken immediately */ +#define LOGLEVEL_CRIT 2 /* critical conditions */ +#define LOGLEVEL_ERR 3 /* error conditions */ +#define LOGLEVEL_WARNING 4 /* warning conditions */ +#define LOGLEVEL_NOTICE 5 /* normal but significant condition */ +#define LOGLEVEL_INFO 6 /* informational */ +#define LOGLEVEL_DEBUG 7 /* debug-level messages */ + +#endif diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h new file mode 100644 index 000000000..abd20ef93 --- /dev/null +++ b/include/linux/kernel-page-flags.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_KERNEL_PAGE_FLAGS_H +#define LINUX_KERNEL_PAGE_FLAGS_H + +#include + + +/* kernel hacking assistances + * WARNING: subject to change, never rely on them! + */ +#define KPF_RESERVED 32 +#define KPF_MLOCKED 33 +#define KPF_MAPPEDTODISK 34 +#define KPF_PRIVATE 35 +#define KPF_PRIVATE_2 36 +#define KPF_OWNER_PRIVATE 37 +#define KPF_ARCH 38 +#define KPF_UNCACHED 39 +#define KPF_SOFTDIRTY 40 + +#endif /* LINUX_KERNEL_PAGE_FLAGS_H */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h new file mode 100644 index 000000000..50733abbe --- /dev/null +++ b/include/linux/kernel.h @@ -0,0 +1,1036 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_KERNEL_H +#define _LINUX_KERNEL_H + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define USHRT_MAX ((u16)(~0U)) +#define SHRT_MAX ((s16)(USHRT_MAX>>1)) +#define SHRT_MIN ((s16)(-SHRT_MAX - 1)) +#define INT_MAX ((int)(~0U>>1)) +#define INT_MIN (-INT_MAX - 1) +#define UINT_MAX (~0U) +#define LONG_MAX ((long)(~0UL>>1)) +#define LONG_MIN (-LONG_MAX - 1) +#define ULONG_MAX (~0UL) +#define LLONG_MAX ((long long)(~0ULL>>1)) +#define LLONG_MIN (-LLONG_MAX - 1) +#define ULLONG_MAX (~0ULL) +#define SIZE_MAX (~(size_t)0) +#define PHYS_ADDR_MAX (~(phys_addr_t)0) + +#define U8_MAX ((u8)~0U) +#define S8_MAX ((s8)(U8_MAX>>1)) +#define S8_MIN ((s8)(-S8_MAX - 1)) +#define U16_MAX ((u16)~0U) +#define S16_MAX ((s16)(U16_MAX>>1)) +#define S16_MIN ((s16)(-S16_MAX - 1)) +#define U32_MAX ((u32)~0U) +#define S32_MAX ((s32)(U32_MAX>>1)) +#define S32_MIN ((s32)(-S32_MAX - 1)) +#define U64_MAX ((u64)~0ULL) +#define S64_MAX ((s64)(U64_MAX>>1)) +#define S64_MIN ((s64)(-S64_MAX - 1)) + +#define STACK_MAGIC 0xdeadbeef + +/** + * REPEAT_BYTE - repeat the value @x multiple times as an unsigned long value + * @x: value to repeat + * + * NOTE: @x is not checked for > 0xff; larger values produce odd results. + */ +#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) + +/* @a is a power of 2 value */ +#define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) +#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a)) +#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask)) +#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) +#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) + +/* generic data direction definitions */ +#define READ 0 +#define WRITE 1 + +/** + * ARRAY_SIZE - get the number of elements in array @arr + * @arr: array to be sized + */ +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) + +#define u64_to_user_ptr(x) ( \ +{ \ + typecheck(u64, (x)); \ + (void __user *)(uintptr_t)(x); \ +} \ +) + +/* + * This looks more complex than it should be. But we need to + * get the type for the ~ right in round_down (it needs to be + * as wide as the result!), and we want to evaluate the macro + * arguments just once each. + */ +#define __round_mask(x, y) ((__typeof__(x))((y)-1)) +/** + * round_up - round up to next specified power of 2 + * @x: the value to round + * @y: multiple to round up to (must be a power of 2) + * + * Rounds @x up to next multiple of @y (which must be a power of 2). + * To perform arbitrary rounding up, use roundup() below. + */ +#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) +/** + * round_down - round down to next specified power of 2 + * @x: the value to round + * @y: multiple to round down to (must be a power of 2) + * + * Rounds @x down to next multiple of @y (which must be a power of 2). + * To perform arbitrary rounding down, use rounddown() below. + */ +#define round_down(x, y) ((x) & ~__round_mask(x, y)) + +/** + * FIELD_SIZEOF - get the size of a struct's field + * @t: the target struct + * @f: the target struct's field + * Return: the size of @f in the struct definition without having a + * declared instance of @t. + */ +#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) + +#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP + +#define DIV_ROUND_DOWN_ULL(ll, d) \ + ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; }) + +#define DIV_ROUND_UP_ULL(ll, d) \ + DIV_ROUND_DOWN_ULL((unsigned long long)(ll) + (d) - 1, (d)) + +#if BITS_PER_LONG == 32 +# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d) +#else +# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d) +#endif + +/** + * roundup - round up to the next specified multiple + * @x: the value to up + * @y: multiple to round up to + * + * Rounds @x up to next multiple of @y. If @y will always be a power + * of 2, consider using the faster round_up(). + * + * The `const' here prevents gcc-3.3 from calling __divdi3 + */ +#define roundup(x, y) ( \ +{ \ + const typeof(y) __y = y; \ + (((x) + (__y - 1)) / __y) * __y; \ +} \ +) +/** + * rounddown - round down to next specified multiple + * @x: the value to round + * @y: multiple to round down to + * + * Rounds @x down to next multiple of @y. If @y will always be a power + * of 2, consider using the faster round_down(). + */ +#define rounddown(x, y) ( \ +{ \ + typeof(x) __x = (x); \ + __x - (__x % (y)); \ +} \ +) + +/* + * Divide positive or negative dividend by positive or negative divisor + * and round to closest integer. Result is undefined for negative + * divisors if the dividend variable type is unsigned and for negative + * dividends if the divisor variable type is unsigned. + */ +#define DIV_ROUND_CLOSEST(x, divisor)( \ +{ \ + typeof(x) __x = x; \ + typeof(divisor) __d = divisor; \ + (((typeof(x))-1) > 0 || \ + ((typeof(divisor))-1) > 0 || \ + (((__x) > 0) == ((__d) > 0))) ? \ + (((__x) + ((__d) / 2)) / (__d)) : \ + (((__x) - ((__d) / 2)) / (__d)); \ +} \ +) +/* + * Same as above but for u64 dividends. divisor must be a 32-bit + * number. + */ +#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \ +{ \ + typeof(divisor) __d = divisor; \ + unsigned long long _tmp = (x) + (__d) / 2; \ + do_div(_tmp, __d); \ + _tmp; \ +} \ +) + +/* + * Multiplies an integer by a fraction, while avoiding unnecessary + * overflow or loss of precision. + */ +#define mult_frac(x, numer, denom)( \ +{ \ + typeof(x) quot = (x) / (denom); \ + typeof(x) rem = (x) % (denom); \ + (quot * (numer)) + ((rem * (numer)) / (denom)); \ +} \ +) + + +#define _RET_IP_ (unsigned long)__builtin_return_address(0) +#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; }) + +#ifdef CONFIG_LBDAF +# include +# define sector_div(a, b) do_div(a, b) +#else +# define sector_div(n, b)( \ +{ \ + int _res; \ + _res = (n) % (b); \ + (n) /= (b); \ + _res; \ +} \ +) +#endif + +/** + * upper_32_bits - return bits 32-63 of a number + * @n: the number we're accessing + * + * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress + * the "right shift count >= width of type" warning when that quantity is + * 32-bits. + */ +#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16)) + +/** + * lower_32_bits - return bits 0-31 of a number + * @n: the number we're accessing + */ +#define lower_32_bits(n) ((u32)(n)) + +struct completion; +struct pt_regs; +struct user; + +#ifdef CONFIG_PREEMPT_VOLUNTARY +extern int _cond_resched(void); +# define might_resched() _cond_resched() +#else +# define might_resched() do { } while (0) +#endif + +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP + void ___might_sleep(const char *file, int line, int preempt_offset); + void __might_sleep(const char *file, int line, int preempt_offset); +/** + * might_sleep - annotation for functions that can sleep + * + * this macro will print a stack trace if it is executed in an atomic + * context (spinlock, irq-handler, ...). + * + * This is a useful debugging help to be able to catch problems early and not + * be bitten later when the calling function happens to sleep when it is not + * supposed to. + */ +# define might_sleep() \ + do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) +# define sched_annotate_sleep() (current->task_state_change = 0) +#else + static inline void ___might_sleep(const char *file, int line, + int preempt_offset) { } + static inline void __might_sleep(const char *file, int line, + int preempt_offset) { } +# define might_sleep() do { might_resched(); } while (0) +# define sched_annotate_sleep() do { } while (0) +#endif + +#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0) + +/** + * abs - return absolute value of an argument + * @x: the value. If it is unsigned type, it is converted to signed type first. + * char is treated as if it was signed (regardless of whether it really is) + * but the macro's return type is preserved as char. + * + * Return: an absolute value of x. + */ +#define abs(x) __abs_choose_expr(x, long long, \ + __abs_choose_expr(x, long, \ + __abs_choose_expr(x, int, \ + __abs_choose_expr(x, short, \ + __abs_choose_expr(x, char, \ + __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(x), char), \ + (char)({ signed char __x = (x); __x<0?-__x:__x; }), \ + ((void)0))))))) + +#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(x), signed type) || \ + __builtin_types_compatible_p(typeof(x), unsigned type), \ + ({ signed type __x = (x); __x < 0 ? -__x : __x; }), other) + +/** + * reciprocal_scale - "scale" a value into range [0, ep_ro) + * @val: value + * @ep_ro: right open interval endpoint + * + * Perform a "reciprocal multiplication" in order to "scale" a value into + * range [0, @ep_ro), where the upper interval endpoint is right-open. + * This is useful, e.g. for accessing a index of an array containing + * @ep_ro elements, for example. Think of it as sort of modulus, only that + * the result isn't that of modulo. ;) Note that if initial input is a + * small value, then result will return 0. + * + * Return: a result based on @val in interval [0, @ep_ro). + */ +static inline u32 reciprocal_scale(u32 val, u32 ep_ro) +{ + return (u32)(((u64) val * ep_ro) >> 32); +} + +#if defined(CONFIG_MMU) && \ + (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)) +#define might_fault() __might_fault(__FILE__, __LINE__) +void __might_fault(const char *file, int line); +#else +static inline void might_fault(void) { } +#endif + +extern struct atomic_notifier_head panic_notifier_list; +extern long (*panic_blink)(int state); +__printf(1, 2) +void panic(const char *fmt, ...) __noreturn __cold; +void nmi_panic(struct pt_regs *regs, const char *msg); +extern void oops_enter(void); +extern void oops_exit(void); +void print_oops_end_marker(void); +extern int oops_may_print(void); +void do_exit(long error_code) __noreturn; +void complete_and_exit(struct completion *, long) __noreturn; + +#ifdef CONFIG_ARCH_HAS_REFCOUNT +void refcount_error_report(struct pt_regs *regs, const char *err); +#else +static inline void refcount_error_report(struct pt_regs *regs, const char *err) +{ } +#endif + +/* Internal, do not use. */ +int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res); +int __must_check _kstrtol(const char *s, unsigned int base, long *res); + +int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res); +int __must_check kstrtoll(const char *s, unsigned int base, long long *res); + +/** + * kstrtoul - convert a string to an unsigned long + * @s: The start of the string. The string must be null-terminated, and may also + * include a single newline before its terminating null. The first character + * may also be a plus sign, but not a minus sign. + * @base: The number base to use. The maximum supported base is 16. If base is + * given as 0, then the base of the string is automatically detected with the + * conventional semantics - If it begins with 0x the number will be parsed as a + * hexadecimal (case insensitive), if it otherwise begins with 0, it will be + * parsed as an octal number. Otherwise it will be parsed as a decimal. + * @res: Where to write the result of the conversion on success. + * + * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. + * Used as a replacement for the obsolete simple_strtoull. Return code must + * be checked. +*/ +static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res) +{ + /* + * We want to shortcut function call, but + * __builtin_types_compatible_p(unsigned long, unsigned long long) = 0. + */ + if (sizeof(unsigned long) == sizeof(unsigned long long) && + __alignof__(unsigned long) == __alignof__(unsigned long long)) + return kstrtoull(s, base, (unsigned long long *)res); + else + return _kstrtoul(s, base, res); +} + +/** + * kstrtol - convert a string to a long + * @s: The start of the string. The string must be null-terminated, and may also + * include a single newline before its terminating null. The first character + * may also be a plus sign or a minus sign. + * @base: The number base to use. The maximum supported base is 16. If base is + * given as 0, then the base of the string is automatically detected with the + * conventional semantics - If it begins with 0x the number will be parsed as a + * hexadecimal (case insensitive), if it otherwise begins with 0, it will be + * parsed as an octal number. Otherwise it will be parsed as a decimal. + * @res: Where to write the result of the conversion on success. + * + * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. + * Used as a replacement for the obsolete simple_strtoull. Return code must + * be checked. + */ +static inline int __must_check kstrtol(const char *s, unsigned int base, long *res) +{ + /* + * We want to shortcut function call, but + * __builtin_types_compatible_p(long, long long) = 0. + */ + if (sizeof(long) == sizeof(long long) && + __alignof__(long) == __alignof__(long long)) + return kstrtoll(s, base, (long long *)res); + else + return _kstrtol(s, base, res); +} + +int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res); +int __must_check kstrtoint(const char *s, unsigned int base, int *res); + +static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res) +{ + return kstrtoull(s, base, res); +} + +static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res) +{ + return kstrtoll(s, base, res); +} + +static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res) +{ + return kstrtouint(s, base, res); +} + +static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res) +{ + return kstrtoint(s, base, res); +} + +int __must_check kstrtou16(const char *s, unsigned int base, u16 *res); +int __must_check kstrtos16(const char *s, unsigned int base, s16 *res); +int __must_check kstrtou8(const char *s, unsigned int base, u8 *res); +int __must_check kstrtos8(const char *s, unsigned int base, s8 *res); +int __must_check kstrtobool(const char *s, bool *res); + +int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res); +int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res); +int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res); +int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res); +int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res); +int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res); +int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res); +int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res); +int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res); +int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res); +int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res); + +static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res) +{ + return kstrtoull_from_user(s, count, base, res); +} + +static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res) +{ + return kstrtoll_from_user(s, count, base, res); +} + +static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res) +{ + return kstrtouint_from_user(s, count, base, res); +} + +static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res) +{ + return kstrtoint_from_user(s, count, base, res); +} + +/* Obsolete, do not use. Use kstrto instead */ + +extern unsigned long simple_strtoul(const char *,char **,unsigned int); +extern long simple_strtol(const char *,char **,unsigned int); +extern unsigned long long simple_strtoull(const char *,char **,unsigned int); +extern long long simple_strtoll(const char *,char **,unsigned int); + +extern int num_to_str(char *buf, int size, + unsigned long long num, unsigned int width); + +/* lib/printf utilities */ + +extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...); +extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list); +extern __printf(3, 4) +int snprintf(char *buf, size_t size, const char *fmt, ...); +extern __printf(3, 0) +int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +extern __printf(3, 4) +int scnprintf(char *buf, size_t size, const char *fmt, ...); +extern __printf(3, 0) +int vscnprintf(char *buf, size_t size, const char *fmt, va_list args); +extern __printf(2, 3) __malloc +char *kasprintf(gfp_t gfp, const char *fmt, ...); +extern __printf(2, 0) __malloc +char *kvasprintf(gfp_t gfp, const char *fmt, va_list args); +extern __printf(2, 0) +const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args); + +extern __scanf(2, 3) +int sscanf(const char *, const char *, ...); +extern __scanf(2, 0) +int vsscanf(const char *, const char *, va_list); + +extern int get_option(char **str, int *pint); +extern char *get_options(const char *str, int nints, int *ints); +extern unsigned long long memparse(const char *ptr, char **retptr); +extern bool parse_option_str(const char *str, const char *option); +extern char *next_arg(char *args, char **param, char **val); + +extern int core_kernel_text(unsigned long addr); +extern int init_kernel_text(unsigned long addr); +extern int core_kernel_data(unsigned long addr); +extern int __kernel_text_address(unsigned long addr); +extern int kernel_text_address(unsigned long addr); +extern int func_ptr_is_kernel_text(void *ptr); + +unsigned long int_sqrt(unsigned long); + +#if BITS_PER_LONG < 64 +u32 int_sqrt64(u64 x); +#else +static inline u32 int_sqrt64(u64 x) +{ + return (u32)int_sqrt(x); +} +#endif + +extern void bust_spinlocks(int yes); +extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */ +extern int panic_timeout; +extern int panic_on_oops; +extern int panic_on_unrecovered_nmi; +extern int panic_on_io_nmi; +extern int panic_on_warn; +extern int sysctl_panic_on_rcu_stall; +extern int sysctl_panic_on_stackoverflow; + +extern bool crash_kexec_post_notifiers; + +/* + * panic_cpu is used for synchronizing panic() and crash_kexec() execution. It + * holds a CPU number which is executing panic() currently. A value of + * PANIC_CPU_INVALID means no CPU has entered panic() or crash_kexec(). + */ +extern atomic_t panic_cpu; +#define PANIC_CPU_INVALID -1 + +/* + * Only to be used by arch init code. If the user over-wrote the default + * CONFIG_PANIC_TIMEOUT, honor it. + */ +static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout) +{ + if (panic_timeout == arch_default_timeout) + panic_timeout = timeout; +} +extern const char *print_tainted(void); +enum lockdep_ok { + LOCKDEP_STILL_OK, + LOCKDEP_NOW_UNRELIABLE +}; +extern void add_taint(unsigned flag, enum lockdep_ok); +extern int test_taint(unsigned flag); +extern unsigned long get_taint(void); +extern int root_mountflags; + +extern bool early_boot_irqs_disabled; + +/* + * Values used for system_state. Ordering of the states must not be changed + * as code checks for <, <=, >, >= STATE. + */ +extern enum system_states { + SYSTEM_BOOTING, + SYSTEM_SCHEDULING, + SYSTEM_RUNNING, + SYSTEM_HALT, + SYSTEM_POWER_OFF, + SYSTEM_RESTART, + SYSTEM_SUSPEND, +} system_state; + +/* This cannot be an enum because some may be used in assembly source. */ +#define TAINT_PROPRIETARY_MODULE 0 +#define TAINT_FORCED_MODULE 1 +#define TAINT_CPU_OUT_OF_SPEC 2 +#define TAINT_FORCED_RMMOD 3 +#define TAINT_MACHINE_CHECK 4 +#define TAINT_BAD_PAGE 5 +#define TAINT_USER 6 +#define TAINT_DIE 7 +#define TAINT_OVERRIDDEN_ACPI_TABLE 8 +#define TAINT_WARN 9 +#define TAINT_CRAP 10 +#define TAINT_FIRMWARE_WORKAROUND 11 +#define TAINT_OOT_MODULE 12 +#define TAINT_UNSIGNED_MODULE 13 +#define TAINT_SOFTLOCKUP 14 +#define TAINT_LIVEPATCH 15 +#define TAINT_AUX 16 +#define TAINT_RANDSTRUCT 17 +#define TAINT_FLAGS_COUNT 18 + +struct taint_flag { + char c_true; /* character printed when tainted */ + char c_false; /* character printed when not tainted */ + bool module; /* also show as a per-module taint flag */ +}; + +extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT]; + +extern const char hex_asc[]; +#define hex_asc_lo(x) hex_asc[((x) & 0x0f)] +#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4] + +static inline char *hex_byte_pack(char *buf, u8 byte) +{ + *buf++ = hex_asc_hi(byte); + *buf++ = hex_asc_lo(byte); + return buf; +} + +extern const char hex_asc_upper[]; +#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)] +#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4] + +static inline char *hex_byte_pack_upper(char *buf, u8 byte) +{ + *buf++ = hex_asc_upper_hi(byte); + *buf++ = hex_asc_upper_lo(byte); + return buf; +} + +extern int hex_to_bin(unsigned char ch); +extern int __must_check hex2bin(u8 *dst, const char *src, size_t count); +extern char *bin2hex(char *dst, const void *src, size_t count); + +bool mac_pton(const char *s, u8 *mac); + +/* + * General tracing related utility functions - trace_printk(), + * tracing_on/tracing_off and tracing_start()/tracing_stop + * + * Use tracing_on/tracing_off when you want to quickly turn on or off + * tracing. It simply enables or disables the recording of the trace events. + * This also corresponds to the user space /sys/kernel/debug/tracing/tracing_on + * file, which gives a means for the kernel and userspace to interact. + * Place a tracing_off() in the kernel where you want tracing to end. + * From user space, examine the trace, and then echo 1 > tracing_on + * to continue tracing. + * + * tracing_stop/tracing_start has slightly more overhead. It is used + * by things like suspend to ram where disabling the recording of the + * trace is not enough, but tracing must actually stop because things + * like calling smp_processor_id() may crash the system. + * + * Most likely, you want to use tracing_on/tracing_off. + */ + +enum ftrace_dump_mode { + DUMP_NONE, + DUMP_ALL, + DUMP_ORIG, +}; + +#ifdef CONFIG_TRACING +void tracing_on(void); +void tracing_off(void); +int tracing_is_on(void); +void tracing_snapshot(void); +void tracing_snapshot_alloc(void); + +extern void tracing_start(void); +extern void tracing_stop(void); + +static inline __printf(1, 2) +void ____trace_printk_check_format(const char *fmt, ...) +{ +} +#define __trace_printk_check_format(fmt, args...) \ +do { \ + if (0) \ + ____trace_printk_check_format(fmt, ##args); \ +} while (0) + +/** + * trace_printk - printf formatting in the ftrace buffer + * @fmt: the printf format for printing + * + * Note: __trace_printk is an internal function for trace_printk() and + * the @ip is passed in via the trace_printk() macro. + * + * This function allows a kernel developer to debug fast path sections + * that printk is not appropriate for. By scattering in various + * printk like tracing in the code, a developer can quickly see + * where problems are occurring. + * + * This is intended as a debugging tool for the developer only. + * Please refrain from leaving trace_printks scattered around in + * your code. (Extra memory is used for special buffers that are + * allocated when trace_printk() is used.) + * + * A little optimization trick is done here. If there's only one + * argument, there's no need to scan the string for printf formats. + * The trace_puts() will suffice. But how can we take advantage of + * using trace_puts() when trace_printk() has only one argument? + * By stringifying the args and checking the size we can tell + * whether or not there are args. __stringify((__VA_ARGS__)) will + * turn into "()\0" with a size of 3 when there are no args, anything + * else will be bigger. All we need to do is define a string to this, + * and then take its size and compare to 3. If it's bigger, use + * do_trace_printk() otherwise, optimize it to trace_puts(). Then just + * let gcc optimize the rest. + */ + +#define trace_printk(fmt, ...) \ +do { \ + char _______STR[] = __stringify((__VA_ARGS__)); \ + if (sizeof(_______STR) > 3) \ + do_trace_printk(fmt, ##__VA_ARGS__); \ + else \ + trace_puts(fmt); \ +} while (0) + +#define do_trace_printk(fmt, args...) \ +do { \ + static const char *trace_printk_fmt __used \ + __attribute__((section("__trace_printk_fmt"))) = \ + __builtin_constant_p(fmt) ? fmt : NULL; \ + \ + __trace_printk_check_format(fmt, ##args); \ + \ + if (__builtin_constant_p(fmt)) \ + __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \ + else \ + __trace_printk(_THIS_IP_, fmt, ##args); \ +} while (0) + +extern __printf(2, 3) +int __trace_bprintk(unsigned long ip, const char *fmt, ...); + +extern __printf(2, 3) +int __trace_printk(unsigned long ip, const char *fmt, ...); + +/** + * trace_puts - write a string into the ftrace buffer + * @str: the string to record + * + * Note: __trace_bputs is an internal function for trace_puts and + * the @ip is passed in via the trace_puts macro. + * + * This is similar to trace_printk() but is made for those really fast + * paths that a developer wants the least amount of "Heisenbug" effects, + * where the processing of the print format is still too much. + * + * This function allows a kernel developer to debug fast path sections + * that printk is not appropriate for. By scattering in various + * printk like tracing in the code, a developer can quickly see + * where problems are occurring. + * + * This is intended as a debugging tool for the developer only. + * Please refrain from leaving trace_puts scattered around in + * your code. (Extra memory is used for special buffers that are + * allocated when trace_puts() is used.) + * + * Returns: 0 if nothing was written, positive # if string was. + * (1 when __trace_bputs is used, strlen(str) when __trace_puts is used) + */ + +#define trace_puts(str) ({ \ + static const char *trace_printk_fmt __used \ + __attribute__((section("__trace_printk_fmt"))) = \ + __builtin_constant_p(str) ? str : NULL; \ + \ + if (__builtin_constant_p(str)) \ + __trace_bputs(_THIS_IP_, trace_printk_fmt); \ + else \ + __trace_puts(_THIS_IP_, str, strlen(str)); \ +}) +extern int __trace_bputs(unsigned long ip, const char *str); +extern int __trace_puts(unsigned long ip, const char *str, int size); + +extern void trace_dump_stack(int skip); + +/* + * The double __builtin_constant_p is because gcc will give us an error + * if we try to allocate the static variable to fmt if it is not a + * constant. Even with the outer if statement. + */ +#define ftrace_vprintk(fmt, vargs) \ +do { \ + if (__builtin_constant_p(fmt)) { \ + static const char *trace_printk_fmt __used \ + __attribute__((section("__trace_printk_fmt"))) = \ + __builtin_constant_p(fmt) ? fmt : NULL; \ + \ + __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \ + } else \ + __ftrace_vprintk(_THIS_IP_, fmt, vargs); \ +} while (0) + +extern __printf(2, 0) int +__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap); + +extern __printf(2, 0) int +__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); + +extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); +#else +static inline void tracing_start(void) { } +static inline void tracing_stop(void) { } +static inline void trace_dump_stack(int skip) { } + +static inline void tracing_on(void) { } +static inline void tracing_off(void) { } +static inline int tracing_is_on(void) { return 0; } +static inline void tracing_snapshot(void) { } +static inline void tracing_snapshot_alloc(void) { } + +static inline __printf(1, 2) +int trace_printk(const char *fmt, ...) +{ + return 0; +} +static __printf(1, 0) inline int +ftrace_vprintk(const char *fmt, va_list ap) +{ + return 0; +} +static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } +#endif /* CONFIG_TRACING */ + +/* + * min()/max()/clamp() macros must accomplish three things: + * + * - avoid multiple evaluations of the arguments (so side-effects like + * "x++" happen only once) when non-constant. + * - perform strict type-checking (to generate warnings instead of + * nasty runtime surprises). See the "unnecessary" pointer comparison + * in __typecheck(). + * - retain result as a constant expressions when called with only + * constant expressions (to avoid tripping VLA warnings in stack + * allocation usage). + */ +#define __typecheck(x, y) \ + (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1))) + +/* + * This returns a constant expression while determining if an argument is + * a constant expression, most importantly without evaluating the argument. + * Glory to Martin Uecker + */ +#define __is_constexpr(x) \ + (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8))) + +#define __no_side_effects(x, y) \ + (__is_constexpr(x) && __is_constexpr(y)) + +#define __safe_cmp(x, y) \ + (__typecheck(x, y) && __no_side_effects(x, y)) + +#define __cmp(x, y, op) ((x) op (y) ? (x) : (y)) + +#define __cmp_once(x, y, unique_x, unique_y, op) ({ \ + typeof(x) unique_x = (x); \ + typeof(y) unique_y = (y); \ + __cmp(unique_x, unique_y, op); }) + +#define __careful_cmp(x, y, op) \ + __builtin_choose_expr(__safe_cmp(x, y), \ + __cmp(x, y, op), \ + __cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op)) + +/** + * min - return minimum of two values of the same or compatible types + * @x: first value + * @y: second value + */ +#define min(x, y) __careful_cmp(x, y, <) + +/** + * max - return maximum of two values of the same or compatible types + * @x: first value + * @y: second value + */ +#define max(x, y) __careful_cmp(x, y, >) + +/** + * min3 - return minimum of three values + * @x: first value + * @y: second value + * @z: third value + */ +#define min3(x, y, z) min((typeof(x))min(x, y), z) + +/** + * max3 - return maximum of three values + * @x: first value + * @y: second value + * @z: third value + */ +#define max3(x, y, z) max((typeof(x))max(x, y), z) + +/** + * min_not_zero - return the minimum that is _not_ zero, unless both are zero + * @x: value1 + * @y: value2 + */ +#define min_not_zero(x, y) ({ \ + typeof(x) __x = (x); \ + typeof(y) __y = (y); \ + __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); }) + +/** + * clamp - return a value clamped to a given range with strict typechecking + * @val: current value + * @lo: lowest allowable value + * @hi: highest allowable value + * + * This macro does strict typechecking of @lo/@hi to make sure they are of the + * same type as @val. See the unnecessary pointer comparisons. + */ +#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) + +/* + * ..and if you can't take the strict + * types, you can specify one yourself. + * + * Or not use min/max/clamp at all, of course. + */ + +/** + * min_t - return minimum of two values, using the specified type + * @type: data type to use + * @x: first value + * @y: second value + */ +#define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <) + +/** + * max_t - return maximum of two values, using the specified type + * @type: data type to use + * @x: first value + * @y: second value + */ +#define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >) + +/** + * clamp_t - return a value clamped to a given range using a given type + * @type: the type of variable to use + * @val: current value + * @lo: minimum allowable value + * @hi: maximum allowable value + * + * This macro does no typechecking and uses temporary variables of type + * @type to make all the comparisons. + */ +#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi) + +/** + * clamp_val - return a value clamped to a given range using val's type + * @val: current value + * @lo: minimum allowable value + * @hi: maximum allowable value + * + * This macro does no typechecking and uses temporary variables of whatever + * type the input argument @val is. This is useful when @val is an unsigned + * type and @lo and @hi are literals that will otherwise be assigned a signed + * integer type. + */ +#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) + + +/** + * swap - swap values of @a and @b + * @a: first value + * @b: second value + */ +#define swap(a, b) \ + do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) + +/* This counts to 12. Any more, it will return 13th argument. */ +#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n +#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) + +#define __CONCAT(a, b) a ## b +#define CONCATENATE(a, b) __CONCAT(a, b) + +/** + * container_of - cast a member of a structure out to the containing structure + * @ptr: the pointer to the member. + * @type: the type of the container struct this is embedded in. + * @member: the name of the member within the struct. + * + */ +#define container_of(ptr, type, member) ({ \ + void *__mptr = (void *)(ptr); \ + BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \ + !__same_type(*(ptr), void), \ + "pointer type mismatch in container_of()"); \ + ((type *)(__mptr - offsetof(type, member))); }) + +/** + * container_of_safe - cast a member of a structure out to the containing structure + * @ptr: the pointer to the member. + * @type: the type of the container struct this is embedded in. + * @member: the name of the member within the struct. + * + * If IS_ERR_OR_NULL(ptr), ptr is returned unchanged. + */ +#define container_of_safe(ptr, type, member) ({ \ + void *__mptr = (void *)(ptr); \ + BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \ + !__same_type(*(ptr), void), \ + "pointer type mismatch in container_of()"); \ + IS_ERR_OR_NULL(__mptr) ? ERR_CAST(__mptr) : \ + ((type *)(__mptr - offsetof(type, member))); }) + +/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */ +#ifdef CONFIG_FTRACE_MCOUNT_RECORD +# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD +#endif + +/* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */ +#define VERIFY_OCTAL_PERMISSIONS(perms) \ + (BUILD_BUG_ON_ZERO((perms) < 0) + \ + BUILD_BUG_ON_ZERO((perms) > 0777) + \ + /* USER_READABLE >= GROUP_READABLE >= OTHER_READABLE */ \ + BUILD_BUG_ON_ZERO((((perms) >> 6) & 4) < (((perms) >> 3) & 4)) + \ + BUILD_BUG_ON_ZERO((((perms) >> 3) & 4) < ((perms) & 4)) + \ + /* USER_WRITABLE >= GROUP_WRITABLE */ \ + BUILD_BUG_ON_ZERO((((perms) >> 6) & 2) < (((perms) >> 3) & 2)) + \ + /* OTHER_WRITABLE? Generally considered a bad idea. */ \ + BUILD_BUG_ON_ZERO((perms) & 2) + \ + (perms)) +#endif diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h new file mode 100644 index 000000000..7ee2bb43b --- /dev/null +++ b/include/linux/kernel_stat.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_KERNEL_STAT_H +#define _LINUX_KERNEL_STAT_H + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * 'kernel_stat.h' contains the definitions needed for doing + * some kernel statistics (CPU usage, context switches ...), + * used by rstatd/perfmeter + */ + +enum cpu_usage_stat { + CPUTIME_USER, + CPUTIME_NICE, + CPUTIME_SYSTEM, + CPUTIME_SOFTIRQ, + CPUTIME_IRQ, + CPUTIME_IDLE, + CPUTIME_IOWAIT, + CPUTIME_STEAL, + CPUTIME_GUEST, + CPUTIME_GUEST_NICE, + NR_STATS, +}; + +struct kernel_cpustat { + u64 cpustat[NR_STATS]; +}; + +struct kernel_stat { + unsigned long irqs_sum; + unsigned int softirqs[NR_SOFTIRQS]; +}; + +DECLARE_PER_CPU(struct kernel_stat, kstat); +DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat); + +/* Must have preemption disabled for this to be meaningful. */ +#define kstat_this_cpu this_cpu_ptr(&kstat) +#define kcpustat_this_cpu this_cpu_ptr(&kernel_cpustat) +#define kstat_cpu(cpu) per_cpu(kstat, cpu) +#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu) + +extern unsigned long long nr_context_switches(void); + +extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu); +extern void kstat_incr_irq_this_cpu(unsigned int irq); + +static inline void kstat_incr_softirqs_this_cpu(unsigned int irq) +{ + __this_cpu_inc(kstat.softirqs[irq]); +} + +static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu) +{ + return kstat_cpu(cpu).softirqs[irq]; +} + +/* + * Number of interrupts per specific IRQ source, since bootup + */ +extern unsigned int kstat_irqs(unsigned int irq); +extern unsigned int kstat_irqs_usr(unsigned int irq); + +/* + * Number of interrupts per cpu, since bootup + */ +static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu) +{ + return kstat_cpu(cpu).irqs_sum; +} + +extern void account_user_time(struct task_struct *, u64); +extern void account_guest_time(struct task_struct *, u64); +extern void account_system_time(struct task_struct *, int, u64); +extern void account_system_index_time(struct task_struct *, u64, + enum cpu_usage_stat); +extern void account_steal_time(u64); +extern void account_idle_time(u64); + +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE +static inline void account_process_tick(struct task_struct *tsk, int user) +{ + vtime_flush(tsk); +} +#else +extern void account_process_tick(struct task_struct *, int user); +#endif + +extern void account_idle_ticks(unsigned long ticks); + +#endif /* _LINUX_KERNEL_STAT_H */ diff --git a/include/linux/kernelcapi.h b/include/linux/kernelcapi.h new file mode 100644 index 000000000..075fab5f9 --- /dev/null +++ b/include/linux/kernelcapi.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * $Id: kernelcapi.h,v 1.8.6.2 2001/02/07 11:31:31 kai Exp $ + * + * Kernel CAPI 2.0 Interface for Linux + * + * (c) Copyright 1997 by Carsten Paeth (calle@calle.in-berlin.de) + * + */ +#ifndef __KERNELCAPI_H__ +#define __KERNELCAPI_H__ + + +#include +#include +#include +#include +#include + +struct capi20_appl { + u16 applid; + capi_register_params rparam; + void (*recv_message)(struct capi20_appl *ap, struct sk_buff *skb); + void *private; + + /* internal to kernelcapi.o */ + unsigned long nrecvctlpkt; + unsigned long nrecvdatapkt; + unsigned long nsentctlpkt; + unsigned long nsentdatapkt; + struct mutex recv_mtx; + struct sk_buff_head recv_queue; + struct work_struct recv_work; + int release_in_progress; +}; + +u16 capi20_isinstalled(void); +u16 capi20_register(struct capi20_appl *ap); +u16 capi20_release(struct capi20_appl *ap); +u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb); +u16 capi20_get_manufacturer(u32 contr, u8 buf[CAPI_MANUFACTURER_LEN]); +u16 capi20_get_version(u32 contr, struct capi_version *verp); +u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN]); +u16 capi20_get_profile(u32 contr, struct capi_profile *profp); +int capi20_manufacturer(unsigned long cmd, void __user *data); + +#define CAPICTR_UP 0 +#define CAPICTR_DOWN 1 + +int register_capictr_notifier(struct notifier_block *nb); +int unregister_capictr_notifier(struct notifier_block *nb); + +#define CAPI_NOERROR 0x0000 + +#define CAPI_TOOMANYAPPLS 0x1001 +#define CAPI_LOGBLKSIZETOSMALL 0x1002 +#define CAPI_BUFFEXECEEDS64K 0x1003 +#define CAPI_MSGBUFSIZETOOSMALL 0x1004 +#define CAPI_ANZLOGCONNNOTSUPPORTED 0x1005 +#define CAPI_REGRESERVED 0x1006 +#define CAPI_REGBUSY 0x1007 +#define CAPI_REGOSRESOURCEERR 0x1008 +#define CAPI_REGNOTINSTALLED 0x1009 +#define CAPI_REGCTRLERNOTSUPPORTEXTEQUIP 0x100a +#define CAPI_REGCTRLERONLYSUPPORTEXTEQUIP 0x100b + +#define CAPI_ILLAPPNR 0x1101 +#define CAPI_ILLCMDORSUBCMDORMSGTOSMALL 0x1102 +#define CAPI_SENDQUEUEFULL 0x1103 +#define CAPI_RECEIVEQUEUEEMPTY 0x1104 +#define CAPI_RECEIVEOVERFLOW 0x1105 +#define CAPI_UNKNOWNNOTPAR 0x1106 +#define CAPI_MSGBUSY 0x1107 +#define CAPI_MSGOSRESOURCEERR 0x1108 +#define CAPI_MSGNOTINSTALLED 0x1109 +#define CAPI_MSGCTRLERNOTSUPPORTEXTEQUIP 0x110a +#define CAPI_MSGCTRLERONLYSUPPORTEXTEQUIP 0x110b + +typedef enum { + CapiMessageNotSupportedInCurrentState = 0x2001, + CapiIllContrPlciNcci = 0x2002, + CapiNoPlciAvailable = 0x2003, + CapiNoNcciAvailable = 0x2004, + CapiNoListenResourcesAvailable = 0x2005, + CapiNoFaxResourcesAvailable = 0x2006, + CapiIllMessageParmCoding = 0x2007, +} RESOURCE_CODING_PROBLEM; + +typedef enum { + CapiB1ProtocolNotSupported = 0x3001, + CapiB2ProtocolNotSupported = 0x3002, + CapiB3ProtocolNotSupported = 0x3003, + CapiB1ProtocolParameterNotSupported = 0x3004, + CapiB2ProtocolParameterNotSupported = 0x3005, + CapiB3ProtocolParameterNotSupported = 0x3006, + CapiBProtocolCombinationNotSupported = 0x3007, + CapiNcpiNotSupported = 0x3008, + CapiCipValueUnknown = 0x3009, + CapiFlagsNotSupported = 0x300a, + CapiFacilityNotSupported = 0x300b, + CapiDataLengthNotSupportedByCurrentProtocol = 0x300c, + CapiResetProcedureNotSupportedByCurrentProtocol = 0x300d, + CapiTeiAssignmentFailed = 0x300e, +} REQUESTED_SERVICES_PROBLEM; + +typedef enum { + CapiSuccess = 0x0000, + CapiSupplementaryServiceNotSupported = 0x300e, + CapiRequestNotAllowedInThisState = 0x3010, +} SUPPLEMENTARY_SERVICE_INFO; + +typedef enum { + CapiProtocolErrorLayer1 = 0x3301, + CapiProtocolErrorLayer2 = 0x3302, + CapiProtocolErrorLayer3 = 0x3303, + CapiTimeOut = 0x3303, // SuppServiceReason + CapiCallGivenToOtherApplication = 0x3304, +} CAPI_REASON; + +#endif /* __KERNELCAPI_H__ */ diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h new file mode 100644 index 000000000..84e9358d6 --- /dev/null +++ b/include/linux/kernfs.h @@ -0,0 +1,558 @@ +/* + * kernfs.h - pseudo filesystem decoupled from vfs locking + * + * This file is released under the GPLv2. + */ + +#ifndef __LINUX_KERNFS_H +#define __LINUX_KERNFS_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct file; +struct dentry; +struct iattr; +struct seq_file; +struct vm_area_struct; +struct super_block; +struct file_system_type; + +struct kernfs_open_node; +struct kernfs_iattrs; + +enum kernfs_node_type { + KERNFS_DIR = 0x0001, + KERNFS_FILE = 0x0002, + KERNFS_LINK = 0x0004, +}; + +#define KERNFS_TYPE_MASK 0x000f +#define KERNFS_FLAG_MASK ~KERNFS_TYPE_MASK + +enum kernfs_node_flag { + KERNFS_ACTIVATED = 0x0010, + KERNFS_NS = 0x0020, + KERNFS_HAS_SEQ_SHOW = 0x0040, + KERNFS_HAS_MMAP = 0x0080, + KERNFS_LOCKDEP = 0x0100, + KERNFS_SUICIDAL = 0x0400, + KERNFS_SUICIDED = 0x0800, + KERNFS_EMPTY_DIR = 0x1000, + KERNFS_HAS_RELEASE = 0x2000, +}; + +/* @flags for kernfs_create_root() */ +enum kernfs_root_flag { + /* + * kernfs_nodes are created in the deactivated state and invisible. + * They require explicit kernfs_activate() to become visible. This + * can be used to make related nodes become visible atomically + * after all nodes are created successfully. + */ + KERNFS_ROOT_CREATE_DEACTIVATED = 0x0001, + + /* + * For regular flies, if the opener has CAP_DAC_OVERRIDE, open(2) + * succeeds regardless of the RW permissions. sysfs had an extra + * layer of enforcement where open(2) fails with -EACCES regardless + * of CAP_DAC_OVERRIDE if the permission doesn't have the + * respective read or write access at all (none of S_IRUGO or + * S_IWUGO) or the respective operation isn't implemented. The + * following flag enables that behavior. + */ + KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK = 0x0002, + + /* + * The filesystem supports exportfs operation, so userspace can use + * fhandle to access nodes of the fs. + */ + KERNFS_ROOT_SUPPORT_EXPORTOP = 0x0004, +}; + +/* type-specific structures for kernfs_node union members */ +struct kernfs_elem_dir { + unsigned long subdirs; + /* children rbtree starts here and goes through kn->rb */ + struct rb_root children; + + /* + * The kernfs hierarchy this directory belongs to. This fits + * better directly in kernfs_node but is here to save space. + */ + struct kernfs_root *root; +}; + +struct kernfs_elem_symlink { + struct kernfs_node *target_kn; +}; + +struct kernfs_elem_attr { + const struct kernfs_ops *ops; + struct kernfs_open_node *open; + loff_t size; + struct kernfs_node *notify_next; /* for kernfs_notify() */ +}; + +/* represent a kernfs node */ +union kernfs_node_id { + struct { + /* + * blktrace will export this struct as a simplified 'struct + * fid' (which is a big data struction), so userspace can use + * it to find kernfs node. The layout must match the first two + * fields of 'struct fid' exactly. + */ + u32 ino; + u32 generation; + }; + u64 id; +}; + +/* + * kernfs_node - the building block of kernfs hierarchy. Each and every + * kernfs node is represented by single kernfs_node. Most fields are + * private to kernfs and shouldn't be accessed directly by kernfs users. + * + * As long as s_count reference is held, the kernfs_node itself is + * accessible. Dereferencing elem or any other outer entity requires + * active reference. + */ +struct kernfs_node { + atomic_t count; + atomic_t active; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif + /* + * Use kernfs_get_parent() and kernfs_name/path() instead of + * accessing the following two fields directly. If the node is + * never moved to a different parent, it is safe to access the + * parent directly. + */ + struct kernfs_node *parent; + const char *name; + + struct rb_node rb; + + const void *ns; /* namespace tag */ + unsigned int hash; /* ns + name hash */ + union { + struct kernfs_elem_dir dir; + struct kernfs_elem_symlink symlink; + struct kernfs_elem_attr attr; + }; + + void *priv; + + union kernfs_node_id id; + unsigned short flags; + umode_t mode; + struct kernfs_iattrs *iattr; +}; + +/* + * kernfs_syscall_ops may be specified on kernfs_create_root() to support + * syscalls. These optional callbacks are invoked on the matching syscalls + * and can perform any kernfs operations which don't necessarily have to be + * the exact operation requested. An active reference is held for each + * kernfs_node parameter. + */ +struct kernfs_syscall_ops { + int (*remount_fs)(struct kernfs_root *root, int *flags, char *data); + int (*show_options)(struct seq_file *sf, struct kernfs_root *root); + + int (*mkdir)(struct kernfs_node *parent, const char *name, + umode_t mode); + int (*rmdir)(struct kernfs_node *kn); + int (*rename)(struct kernfs_node *kn, struct kernfs_node *new_parent, + const char *new_name); + int (*show_path)(struct seq_file *sf, struct kernfs_node *kn, + struct kernfs_root *root); +}; + +struct kernfs_root { + /* published fields */ + struct kernfs_node *kn; + unsigned int flags; /* KERNFS_ROOT_* flags */ + + /* private fields, do not use outside kernfs proper */ + struct idr ino_idr; + u32 last_ino; + u32 next_generation; + struct kernfs_syscall_ops *syscall_ops; + + /* list of kernfs_super_info of this root, protected by kernfs_mutex */ + struct list_head supers; + + wait_queue_head_t deactivate_waitq; +}; + +struct kernfs_open_file { + /* published fields */ + struct kernfs_node *kn; + struct file *file; + struct seq_file *seq_file; + void *priv; + + /* private fields, do not use outside kernfs proper */ + struct mutex mutex; + struct mutex prealloc_mutex; + int event; + struct list_head list; + char *prealloc_buf; + + size_t atomic_write_len; + bool mmapped:1; + bool released:1; + const struct vm_operations_struct *vm_ops; +}; + +struct kernfs_ops { + /* + * Optional open/release methods. Both are called with + * @of->seq_file populated. + */ + int (*open)(struct kernfs_open_file *of); + void (*release)(struct kernfs_open_file *of); + + /* + * Read is handled by either seq_file or raw_read(). + * + * If seq_show() is present, seq_file path is active. Other seq + * operations are optional and if not implemented, the behavior is + * equivalent to single_open(). @sf->private points to the + * associated kernfs_open_file. + * + * read() is bounced through kernel buffer and a read larger than + * PAGE_SIZE results in partial operation of PAGE_SIZE. + */ + int (*seq_show)(struct seq_file *sf, void *v); + + void *(*seq_start)(struct seq_file *sf, loff_t *ppos); + void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos); + void (*seq_stop)(struct seq_file *sf, void *v); + + ssize_t (*read)(struct kernfs_open_file *of, char *buf, size_t bytes, + loff_t off); + + /* + * write() is bounced through kernel buffer. If atomic_write_len + * is not set, a write larger than PAGE_SIZE results in partial + * operations of PAGE_SIZE chunks. If atomic_write_len is set, + * writes upto the specified size are executed atomically but + * larger ones are rejected with -E2BIG. + */ + size_t atomic_write_len; + /* + * "prealloc" causes a buffer to be allocated at open for + * all read/write requests. As ->seq_show uses seq_read() + * which does its own allocation, it is incompatible with + * ->prealloc. Provide ->read and ->write with ->prealloc. + */ + bool prealloc; + ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t bytes, + loff_t off); + + int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lock_class_key lockdep_key; +#endif +}; + +#ifdef CONFIG_KERNFS + +static inline enum kernfs_node_type kernfs_type(struct kernfs_node *kn) +{ + return kn->flags & KERNFS_TYPE_MASK; +} + +/** + * kernfs_enable_ns - enable namespace under a directory + * @kn: directory of interest, should be empty + * + * This is to be called right after @kn is created to enable namespace + * under it. All children of @kn must have non-NULL namespace tags and + * only the ones which match the super_block's tag will be visible. + */ +static inline void kernfs_enable_ns(struct kernfs_node *kn) +{ + WARN_ON_ONCE(kernfs_type(kn) != KERNFS_DIR); + WARN_ON_ONCE(!RB_EMPTY_ROOT(&kn->dir.children)); + kn->flags |= KERNFS_NS; +} + +/** + * kernfs_ns_enabled - test whether namespace is enabled + * @kn: the node to test + * + * Test whether namespace filtering is enabled for the children of @ns. + */ +static inline bool kernfs_ns_enabled(struct kernfs_node *kn) +{ + return kn->flags & KERNFS_NS; +} + +int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen); +int kernfs_path_from_node(struct kernfs_node *root_kn, struct kernfs_node *kn, + char *buf, size_t buflen); +void pr_cont_kernfs_name(struct kernfs_node *kn); +void pr_cont_kernfs_path(struct kernfs_node *kn); +struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn); +struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent, + const char *name, const void *ns); +struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent, + const char *path, const void *ns); +void kernfs_get(struct kernfs_node *kn); +void kernfs_put(struct kernfs_node *kn); + +struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry); +struct kernfs_root *kernfs_root_from_sb(struct super_block *sb); +struct inode *kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn); + +struct dentry *kernfs_node_dentry(struct kernfs_node *kn, + struct super_block *sb); +struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops, + unsigned int flags, void *priv); +void kernfs_destroy_root(struct kernfs_root *root); + +struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent, + const char *name, umode_t mode, + kuid_t uid, kgid_t gid, + void *priv, const void *ns); +struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent, + const char *name); +struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent, + const char *name, umode_t mode, + kuid_t uid, kgid_t gid, + loff_t size, + const struct kernfs_ops *ops, + void *priv, const void *ns, + struct lock_class_key *key); +struct kernfs_node *kernfs_create_link(struct kernfs_node *parent, + const char *name, + struct kernfs_node *target); +void kernfs_activate(struct kernfs_node *kn); +void kernfs_remove(struct kernfs_node *kn); +void kernfs_break_active_protection(struct kernfs_node *kn); +void kernfs_unbreak_active_protection(struct kernfs_node *kn); +bool kernfs_remove_self(struct kernfs_node *kn); +int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name, + const void *ns); +int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent, + const char *new_name, const void *new_ns); +int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr); +void kernfs_notify(struct kernfs_node *kn); + +const void *kernfs_super_ns(struct super_block *sb); +struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags, + struct kernfs_root *root, unsigned long magic, + bool *new_sb_created, const void *ns); +void kernfs_kill_sb(struct super_block *sb); +struct super_block *kernfs_pin_sb(struct kernfs_root *root, const void *ns); + +void kernfs_init(void); + +struct kernfs_node *kernfs_get_node_by_id(struct kernfs_root *root, + const union kernfs_node_id *id); +#else /* CONFIG_KERNFS */ + +static inline enum kernfs_node_type kernfs_type(struct kernfs_node *kn) +{ return 0; } /* whatever */ + +static inline void kernfs_enable_ns(struct kernfs_node *kn) { } + +static inline bool kernfs_ns_enabled(struct kernfs_node *kn) +{ return false; } + +static inline int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen) +{ return -ENOSYS; } + +static inline int kernfs_path_from_node(struct kernfs_node *root_kn, + struct kernfs_node *kn, + char *buf, size_t buflen) +{ return -ENOSYS; } + +static inline void pr_cont_kernfs_name(struct kernfs_node *kn) { } +static inline void pr_cont_kernfs_path(struct kernfs_node *kn) { } + +static inline struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn) +{ return NULL; } + +static inline struct kernfs_node * +kernfs_find_and_get_ns(struct kernfs_node *parent, const char *name, + const void *ns) +{ return NULL; } +static inline struct kernfs_node * +kernfs_walk_and_get_ns(struct kernfs_node *parent, const char *path, + const void *ns) +{ return NULL; } + +static inline void kernfs_get(struct kernfs_node *kn) { } +static inline void kernfs_put(struct kernfs_node *kn) { } + +static inline struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry) +{ return NULL; } + +static inline struct kernfs_root *kernfs_root_from_sb(struct super_block *sb) +{ return NULL; } + +static inline struct inode * +kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn) +{ return NULL; } + +static inline struct kernfs_root * +kernfs_create_root(struct kernfs_syscall_ops *scops, unsigned int flags, + void *priv) +{ return ERR_PTR(-ENOSYS); } + +static inline void kernfs_destroy_root(struct kernfs_root *root) { } + +static inline struct kernfs_node * +kernfs_create_dir_ns(struct kernfs_node *parent, const char *name, + umode_t mode, kuid_t uid, kgid_t gid, + void *priv, const void *ns) +{ return ERR_PTR(-ENOSYS); } + +static inline struct kernfs_node * +__kernfs_create_file(struct kernfs_node *parent, const char *name, + umode_t mode, kuid_t uid, kgid_t gid, + loff_t size, const struct kernfs_ops *ops, + void *priv, const void *ns, struct lock_class_key *key) +{ return ERR_PTR(-ENOSYS); } + +static inline struct kernfs_node * +kernfs_create_link(struct kernfs_node *parent, const char *name, + struct kernfs_node *target) +{ return ERR_PTR(-ENOSYS); } + +static inline void kernfs_activate(struct kernfs_node *kn) { } + +static inline void kernfs_remove(struct kernfs_node *kn) { } + +static inline bool kernfs_remove_self(struct kernfs_node *kn) +{ return false; } + +static inline int kernfs_remove_by_name_ns(struct kernfs_node *kn, + const char *name, const void *ns) +{ return -ENOSYS; } + +static inline int kernfs_rename_ns(struct kernfs_node *kn, + struct kernfs_node *new_parent, + const char *new_name, const void *new_ns) +{ return -ENOSYS; } + +static inline int kernfs_setattr(struct kernfs_node *kn, + const struct iattr *iattr) +{ return -ENOSYS; } + +static inline void kernfs_notify(struct kernfs_node *kn) { } + +static inline const void *kernfs_super_ns(struct super_block *sb) +{ return NULL; } + +static inline struct dentry * +kernfs_mount_ns(struct file_system_type *fs_type, int flags, + struct kernfs_root *root, unsigned long magic, + bool *new_sb_created, const void *ns) +{ return ERR_PTR(-ENOSYS); } + +static inline void kernfs_kill_sb(struct super_block *sb) { } + +static inline void kernfs_init(void) { } + +#endif /* CONFIG_KERNFS */ + +/** + * kernfs_path - build full path of a given node + * @kn: kernfs_node of interest + * @buf: buffer to copy @kn's name into + * @buflen: size of @buf + * + * Builds and returns the full path of @kn in @buf of @buflen bytes. The + * path is built from the end of @buf so the returned pointer usually + * doesn't match @buf. If @buf isn't long enough, @buf is nul terminated + * and %NULL is returned. + */ +static inline int kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen) +{ + return kernfs_path_from_node(kn, NULL, buf, buflen); +} + +static inline struct kernfs_node * +kernfs_find_and_get(struct kernfs_node *kn, const char *name) +{ + return kernfs_find_and_get_ns(kn, name, NULL); +} + +static inline struct kernfs_node * +kernfs_walk_and_get(struct kernfs_node *kn, const char *path) +{ + return kernfs_walk_and_get_ns(kn, path, NULL); +} + +static inline struct kernfs_node * +kernfs_create_dir(struct kernfs_node *parent, const char *name, umode_t mode, + void *priv) +{ + return kernfs_create_dir_ns(parent, name, mode, + GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, + priv, NULL); +} + +static inline struct kernfs_node * +kernfs_create_file_ns(struct kernfs_node *parent, const char *name, + umode_t mode, kuid_t uid, kgid_t gid, + loff_t size, const struct kernfs_ops *ops, + void *priv, const void *ns) +{ + struct lock_class_key *key = NULL; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + key = (struct lock_class_key *)&ops->lockdep_key; +#endif + return __kernfs_create_file(parent, name, mode, uid, gid, + size, ops, priv, ns, key); +} + +static inline struct kernfs_node * +kernfs_create_file(struct kernfs_node *parent, const char *name, umode_t mode, + loff_t size, const struct kernfs_ops *ops, void *priv) +{ + return kernfs_create_file_ns(parent, name, mode, + GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, + size, ops, priv, NULL); +} + +static inline int kernfs_remove_by_name(struct kernfs_node *parent, + const char *name) +{ + return kernfs_remove_by_name_ns(parent, name, NULL); +} + +static inline int kernfs_rename(struct kernfs_node *kn, + struct kernfs_node *new_parent, + const char *new_name) +{ + return kernfs_rename_ns(kn, new_parent, new_name, NULL); +} + +static inline struct dentry * +kernfs_mount(struct file_system_type *fs_type, int flags, + struct kernfs_root *root, unsigned long magic, + bool *new_sb_created) +{ + return kernfs_mount_ns(fs_type, flags, root, + magic, new_sb_created, NULL); +} + +#endif /* __LINUX_KERNFS_H */ diff --git a/include/linux/kexec.h b/include/linux/kexec.h new file mode 100644 index 000000000..fe9f6f2dd --- /dev/null +++ b/include/linux/kexec.h @@ -0,0 +1,377 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_KEXEC_H +#define LINUX_KEXEC_H + +#define IND_DESTINATION_BIT 0 +#define IND_INDIRECTION_BIT 1 +#define IND_DONE_BIT 2 +#define IND_SOURCE_BIT 3 + +#define IND_DESTINATION (1 << IND_DESTINATION_BIT) +#define IND_INDIRECTION (1 << IND_INDIRECTION_BIT) +#define IND_DONE (1 << IND_DONE_BIT) +#define IND_SOURCE (1 << IND_SOURCE_BIT) +#define IND_FLAGS (IND_DESTINATION | IND_INDIRECTION | IND_DONE | IND_SOURCE) + +#if !defined(__ASSEMBLY__) + +#include +#include + +#include + +#ifdef CONFIG_KEXEC_CORE +#include +#include +#include +#include +#include + +/* Verify architecture specific macros are defined */ + +#ifndef KEXEC_SOURCE_MEMORY_LIMIT +#error KEXEC_SOURCE_MEMORY_LIMIT not defined +#endif + +#ifndef KEXEC_DESTINATION_MEMORY_LIMIT +#error KEXEC_DESTINATION_MEMORY_LIMIT not defined +#endif + +#ifndef KEXEC_CONTROL_MEMORY_LIMIT +#error KEXEC_CONTROL_MEMORY_LIMIT not defined +#endif + +#ifndef KEXEC_CONTROL_MEMORY_GFP +#define KEXEC_CONTROL_MEMORY_GFP (GFP_KERNEL | __GFP_NORETRY) +#endif + +#ifndef KEXEC_CONTROL_PAGE_SIZE +#error KEXEC_CONTROL_PAGE_SIZE not defined +#endif + +#ifndef KEXEC_ARCH +#error KEXEC_ARCH not defined +#endif + +#ifndef KEXEC_CRASH_CONTROL_MEMORY_LIMIT +#define KEXEC_CRASH_CONTROL_MEMORY_LIMIT KEXEC_CONTROL_MEMORY_LIMIT +#endif + +#ifndef KEXEC_CRASH_MEM_ALIGN +#define KEXEC_CRASH_MEM_ALIGN PAGE_SIZE +#endif + +#define KEXEC_CORE_NOTE_NAME CRASH_CORE_NOTE_NAME + +/* + * This structure is used to hold the arguments that are used when loading + * kernel binaries. + */ + +typedef unsigned long kimage_entry_t; + +struct kexec_segment { + /* + * This pointer can point to user memory if kexec_load() system + * call is used or will point to kernel memory if + * kexec_file_load() system call is used. + * + * Use ->buf when expecting to deal with user memory and use ->kbuf + * when expecting to deal with kernel memory. + */ + union { + void __user *buf; + void *kbuf; + }; + size_t bufsz; + unsigned long mem; + size_t memsz; +}; + +#ifdef CONFIG_COMPAT +struct compat_kexec_segment { + compat_uptr_t buf; + compat_size_t bufsz; + compat_ulong_t mem; /* User space sees this as a (void *) ... */ + compat_size_t memsz; +}; +#endif + +#ifdef CONFIG_KEXEC_FILE +struct purgatory_info { + /* + * Pointer to elf header at the beginning of kexec_purgatory. + * Note: kexec_purgatory is read only + */ + const Elf_Ehdr *ehdr; + /* + * Temporary, modifiable buffer for sechdrs used for relocation. + * This memory can be freed post image load. + */ + Elf_Shdr *sechdrs; + /* + * Temporary, modifiable buffer for stripped purgatory used for + * relocation. This memory can be freed post image load. + */ + void *purgatory_buf; +}; + +struct kimage; + +typedef int (kexec_probe_t)(const char *kernel_buf, unsigned long kernel_size); +typedef void *(kexec_load_t)(struct kimage *image, char *kernel_buf, + unsigned long kernel_len, char *initrd, + unsigned long initrd_len, char *cmdline, + unsigned long cmdline_len); +typedef int (kexec_cleanup_t)(void *loader_data); + +#ifdef CONFIG_KEXEC_VERIFY_SIG +typedef int (kexec_verify_sig_t)(const char *kernel_buf, + unsigned long kernel_len); +#endif + +struct kexec_file_ops { + kexec_probe_t *probe; + kexec_load_t *load; + kexec_cleanup_t *cleanup; +#ifdef CONFIG_KEXEC_VERIFY_SIG + kexec_verify_sig_t *verify_sig; +#endif +}; + +extern const struct kexec_file_ops * const kexec_file_loaders[]; + +int kexec_image_probe_default(struct kimage *image, void *buf, + unsigned long buf_len); + +/** + * struct kexec_buf - parameters for finding a place for a buffer in memory + * @image: kexec image in which memory to search. + * @buffer: Contents which will be copied to the allocated memory. + * @bufsz: Size of @buffer. + * @mem: On return will have address of the buffer in memory. + * @memsz: Size for the buffer in memory. + * @buf_align: Minimum alignment needed. + * @buf_min: The buffer can't be placed below this address. + * @buf_max: The buffer can't be placed above this address. + * @top_down: Allocate from top of memory. + */ +struct kexec_buf { + struct kimage *image; + void *buffer; + unsigned long bufsz; + unsigned long mem; + unsigned long memsz; + unsigned long buf_align; + unsigned long buf_min; + unsigned long buf_max; + bool top_down; +}; + +int kexec_load_purgatory(struct kimage *image, struct kexec_buf *kbuf); +int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name, + void *buf, unsigned int size, + bool get_value); +void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name); + +int __weak arch_kexec_apply_relocations_add(struct purgatory_info *pi, + Elf_Shdr *section, + const Elf_Shdr *relsec, + const Elf_Shdr *symtab); +int __weak arch_kexec_apply_relocations(struct purgatory_info *pi, + Elf_Shdr *section, + const Elf_Shdr *relsec, + const Elf_Shdr *symtab); + +int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf, + int (*func)(struct resource *, void *)); +extern int kexec_add_buffer(struct kexec_buf *kbuf); +int kexec_locate_mem_hole(struct kexec_buf *kbuf); + +/* Alignment required for elf header segment */ +#define ELF_CORE_HEADER_ALIGN 4096 + +struct crash_mem_range { + u64 start, end; +}; + +struct crash_mem { + unsigned int max_nr_ranges; + unsigned int nr_ranges; + struct crash_mem_range ranges[0]; +}; + +extern int crash_exclude_mem_range(struct crash_mem *mem, + unsigned long long mstart, + unsigned long long mend); +extern int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map, + void **addr, unsigned long *sz); +#endif /* CONFIG_KEXEC_FILE */ + +struct kimage { + kimage_entry_t head; + kimage_entry_t *entry; + kimage_entry_t *last_entry; + + unsigned long start; + struct page *control_code_page; + struct page *swap_page; + void *vmcoreinfo_data_copy; /* locates in the crash memory */ + + unsigned long nr_segments; + struct kexec_segment segment[KEXEC_SEGMENT_MAX]; + + struct list_head control_pages; + struct list_head dest_pages; + struct list_head unusable_pages; + + /* Address of next control page to allocate for crash kernels. */ + unsigned long control_page; + + /* Flags to indicate special processing */ + unsigned int type : 1; +#define KEXEC_TYPE_DEFAULT 0 +#define KEXEC_TYPE_CRASH 1 + unsigned int preserve_context : 1; + /* If set, we are using file mode kexec syscall */ + unsigned int file_mode:1; + +#ifdef ARCH_HAS_KIMAGE_ARCH + struct kimage_arch arch; +#endif + +#ifdef CONFIG_KEXEC_FILE + /* Additional fields for file based kexec syscall */ + void *kernel_buf; + unsigned long kernel_buf_len; + + void *initrd_buf; + unsigned long initrd_buf_len; + + char *cmdline_buf; + unsigned long cmdline_buf_len; + + /* File operations provided by image loader */ + const struct kexec_file_ops *fops; + + /* Image loader handling the kernel can store a pointer here */ + void *image_loader_data; + + /* Information for loading purgatory */ + struct purgatory_info purgatory_info; +#endif + +#ifdef CONFIG_IMA_KEXEC + /* Virtual address of IMA measurement buffer for kexec syscall */ + void *ima_buffer; +#endif +}; + +/* kexec interface functions */ +extern void machine_kexec(struct kimage *image); +extern int machine_kexec_prepare(struct kimage *image); +extern void machine_kexec_cleanup(struct kimage *image); +extern int kernel_kexec(void); +extern struct page *kimage_alloc_control_pages(struct kimage *image, + unsigned int order); +extern void __crash_kexec(struct pt_regs *); +extern void crash_kexec(struct pt_regs *); +int kexec_should_crash(struct task_struct *); +int kexec_crash_loaded(void); +void crash_save_cpu(struct pt_regs *regs, int cpu); +extern int kimage_crash_copy_vmcoreinfo(struct kimage *image); + +extern struct kimage *kexec_image; +extern struct kimage *kexec_crash_image; +extern int kexec_load_disabled; + +#ifndef kexec_flush_icache_page +#define kexec_flush_icache_page(page) +#endif + +/* List of defined/legal kexec flags */ +#ifndef CONFIG_KEXEC_JUMP +#define KEXEC_FLAGS KEXEC_ON_CRASH +#else +#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT) +#endif + +/* List of defined/legal kexec file flags */ +#define KEXEC_FILE_FLAGS (KEXEC_FILE_UNLOAD | KEXEC_FILE_ON_CRASH | \ + KEXEC_FILE_NO_INITRAMFS) + +/* Location of a reserved region to hold the crash kernel. + */ +extern struct resource crashk_res; +extern struct resource crashk_low_res; +extern note_buf_t __percpu *crash_notes; + +/* flag to track if kexec reboot is in progress */ +extern bool kexec_in_progress; + +int crash_shrink_memory(unsigned long new_size); +size_t crash_get_memory_size(void); +void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); + +void arch_kexec_protect_crashkres(void); +void arch_kexec_unprotect_crashkres(void); + +#ifndef page_to_boot_pfn +static inline unsigned long page_to_boot_pfn(struct page *page) +{ + return page_to_pfn(page); +} +#endif + +#ifndef boot_pfn_to_page +static inline struct page *boot_pfn_to_page(unsigned long boot_pfn) +{ + return pfn_to_page(boot_pfn); +} +#endif + +#ifndef phys_to_boot_phys +static inline unsigned long phys_to_boot_phys(phys_addr_t phys) +{ + return phys; +} +#endif + +#ifndef boot_phys_to_phys +static inline phys_addr_t boot_phys_to_phys(unsigned long boot_phys) +{ + return boot_phys; +} +#endif + +static inline unsigned long virt_to_boot_phys(void *addr) +{ + return phys_to_boot_phys(__pa((unsigned long)addr)); +} + +static inline void *boot_phys_to_virt(unsigned long entry) +{ + return phys_to_virt(boot_phys_to_phys(entry)); +} + +#ifndef arch_kexec_post_alloc_pages +static inline int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp) { return 0; } +#endif + +#ifndef arch_kexec_pre_free_pages +static inline void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) { } +#endif + +#else /* !CONFIG_KEXEC_CORE */ +struct pt_regs; +struct task_struct; +static inline void __crash_kexec(struct pt_regs *regs) { } +static inline void crash_kexec(struct pt_regs *regs) { } +static inline int kexec_should_crash(struct task_struct *p) { return 0; } +static inline int kexec_crash_loaded(void) { return 0; } +#define kexec_in_progress false +#endif /* CONFIG_KEXEC_CORE */ + +#endif /* !defined(__ASSEBMLY__) */ + +#endif /* LINUX_KEXEC_H */ diff --git a/include/linux/key-type.h b/include/linux/key-type.h new file mode 100644 index 000000000..3341ddac2 --- /dev/null +++ b/include/linux/key-type.h @@ -0,0 +1,182 @@ +/* Definitions for key type implementations + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _LINUX_KEY_TYPE_H +#define _LINUX_KEY_TYPE_H + +#include +#include + +#ifdef CONFIG_KEYS + +/* + * Pre-parsed payload, used by key add, update and instantiate. + * + * This struct will be cleared and data and datalen will be set with the data + * and length parameters from the caller and quotalen will be set from + * def_datalen from the key type. Then if the preparse() op is provided by the + * key type, that will be called. Then the struct will be passed to the + * instantiate() or the update() op. + * + * If the preparse() op is given, the free_preparse() op will be called to + * clear the contents. + */ +struct key_preparsed_payload { + char *description; /* Proposed key description (or NULL) */ + union key_payload payload; /* Proposed payload */ + const void *data; /* Raw data */ + size_t datalen; /* Raw datalen */ + size_t quotalen; /* Quota length for proposed payload */ + time64_t expiry; /* Expiry time of key */ +} __randomize_layout; + +typedef int (*request_key_actor_t)(struct key *auth_key, void *aux); + +/* + * Preparsed matching criterion. + */ +struct key_match_data { + /* Comparison function, defaults to exact description match, but can be + * overridden by type->match_preparse(). Should return true if a match + * is found and false if not. + */ + bool (*cmp)(const struct key *key, + const struct key_match_data *match_data); + + const void *raw_data; /* Raw match data */ + void *preparsed; /* For ->match_preparse() to stash stuff */ + unsigned lookup_type; /* Type of lookup for this search. */ +#define KEYRING_SEARCH_LOOKUP_DIRECT 0x0000 /* Direct lookup by description. */ +#define KEYRING_SEARCH_LOOKUP_ITERATE 0x0001 /* Iterative search. */ +}; + +/* + * kernel managed key type definition + */ +struct key_type { + /* name of the type */ + const char *name; + + /* default payload length for quota precalculation (optional) + * - this can be used instead of calling key_payload_reserve(), that + * function only needs to be called if the real datalen is different + */ + size_t def_datalen; + + /* vet a description */ + int (*vet_description)(const char *description); + + /* Preparse the data blob from userspace that is to be the payload, + * generating a proposed description and payload that will be handed to + * the instantiate() and update() ops. + */ + int (*preparse)(struct key_preparsed_payload *prep); + + /* Free a preparse data structure. + */ + void (*free_preparse)(struct key_preparsed_payload *prep); + + /* instantiate a key of this type + * - this method should call key_payload_reserve() to determine if the + * user's quota will hold the payload + */ + int (*instantiate)(struct key *key, struct key_preparsed_payload *prep); + + /* update a key of this type (optional) + * - this method should call key_payload_reserve() to recalculate the + * quota consumption + * - the key must be locked against read when modifying + */ + int (*update)(struct key *key, struct key_preparsed_payload *prep); + + /* Preparse the data supplied to ->match() (optional). The + * data to be preparsed can be found in match_data->raw_data. + * The lookup type can also be set by this function. + */ + int (*match_preparse)(struct key_match_data *match_data); + + /* Free preparsed match data (optional). This should be supplied it + * ->match_preparse() is supplied. */ + void (*match_free)(struct key_match_data *match_data); + + /* clear some of the data from a key on revokation (optional) + * - the key's semaphore will be write-locked by the caller + */ + void (*revoke)(struct key *key); + + /* clear the data from a key (optional) */ + void (*destroy)(struct key *key); + + /* describe a key */ + void (*describe)(const struct key *key, struct seq_file *p); + + /* read a key's data (optional) + * - permission checks will be done by the caller + * - the key's semaphore will be readlocked by the caller + * - should return the amount of data that could be read, no matter how + * much is copied into the buffer + * - shouldn't do the copy if the buffer is NULL + */ + long (*read)(const struct key *key, char *buffer, size_t buflen); + + /* handle request_key() for this type instead of invoking + * /sbin/request-key (optional) + * - key is the key to instantiate + * - authkey is the authority to assume when instantiating this key + * - op is the operation to be done, usually "create" + * - the call must not return until the instantiation process has run + * its course + */ + request_key_actor_t request_key; + + /* Look up a keyring access restriction (optional) + * + * - NULL is a valid return value (meaning the requested restriction + * is known but will never block addition of a key) + * - should return -EINVAL if the restriction is unknown + */ + struct key_restriction *(*lookup_restriction)(const char *params); + + /* internal fields */ + struct list_head link; /* link in types list */ + struct lock_class_key lock_class; /* key->sem lock class */ +} __randomize_layout; + +extern struct key_type key_type_keyring; + +extern int register_key_type(struct key_type *ktype); +extern void unregister_key_type(struct key_type *ktype); + +extern int key_payload_reserve(struct key *key, size_t datalen); +extern int key_instantiate_and_link(struct key *key, + const void *data, + size_t datalen, + struct key *keyring, + struct key *authkey); +extern int key_reject_and_link(struct key *key, + unsigned timeout, + unsigned error, + struct key *keyring, + struct key *authkey); +extern void complete_request_key(struct key *authkey, int error); + +static inline int key_negate_and_link(struct key *key, + unsigned timeout, + struct key *keyring, + struct key *authkey) +{ + return key_reject_and_link(key, timeout, ENOKEY, keyring, authkey); +} + +extern int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep); + +#endif /* CONFIG_KEYS */ +#endif /* _LINUX_KEY_TYPE_H */ diff --git a/include/linux/key.h b/include/linux/key.h new file mode 100644 index 000000000..3683c6a6f --- /dev/null +++ b/include/linux/key.h @@ -0,0 +1,425 @@ +/* Authentication token and access key management + * + * Copyright (C) 2004, 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * + * See Documentation/security/keys/core.rst for information on keys/keyrings. + */ + +#ifndef _LINUX_KEY_H +#define _LINUX_KEY_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __KERNEL__ +#include + +/* key handle serial number */ +typedef int32_t key_serial_t; + +/* key handle permissions mask */ +typedef uint32_t key_perm_t; + +struct key; + +#ifdef CONFIG_KEYS + +#undef KEY_DEBUGGING + +#define KEY_POS_VIEW 0x01000000 /* possessor can view a key's attributes */ +#define KEY_POS_READ 0x02000000 /* possessor can read key payload / view keyring */ +#define KEY_POS_WRITE 0x04000000 /* possessor can update key payload / add link to keyring */ +#define KEY_POS_SEARCH 0x08000000 /* possessor can find a key in search / search a keyring */ +#define KEY_POS_LINK 0x10000000 /* possessor can create a link to a key/keyring */ +#define KEY_POS_SETATTR 0x20000000 /* possessor can set key attributes */ +#define KEY_POS_ALL 0x3f000000 + +#define KEY_USR_VIEW 0x00010000 /* user permissions... */ +#define KEY_USR_READ 0x00020000 +#define KEY_USR_WRITE 0x00040000 +#define KEY_USR_SEARCH 0x00080000 +#define KEY_USR_LINK 0x00100000 +#define KEY_USR_SETATTR 0x00200000 +#define KEY_USR_ALL 0x003f0000 + +#define KEY_GRP_VIEW 0x00000100 /* group permissions... */ +#define KEY_GRP_READ 0x00000200 +#define KEY_GRP_WRITE 0x00000400 +#define KEY_GRP_SEARCH 0x00000800 +#define KEY_GRP_LINK 0x00001000 +#define KEY_GRP_SETATTR 0x00002000 +#define KEY_GRP_ALL 0x00003f00 + +#define KEY_OTH_VIEW 0x00000001 /* third party permissions... */ +#define KEY_OTH_READ 0x00000002 +#define KEY_OTH_WRITE 0x00000004 +#define KEY_OTH_SEARCH 0x00000008 +#define KEY_OTH_LINK 0x00000010 +#define KEY_OTH_SETATTR 0x00000020 +#define KEY_OTH_ALL 0x0000003f + +#define KEY_PERM_UNDEF 0xffffffff + +struct seq_file; +struct user_struct; +struct signal_struct; +struct cred; + +struct key_type; +struct key_owner; +struct keyring_list; +struct keyring_name; + +struct keyring_index_key { + struct key_type *type; + const char *description; + size_t desc_len; +}; + +union key_payload { + void __rcu *rcu_data0; + void *data[4]; +}; + +/*****************************************************************************/ +/* + * key reference with possession attribute handling + * + * NOTE! key_ref_t is a typedef'd pointer to a type that is not actually + * defined. This is because we abuse the bottom bit of the reference to carry a + * flag to indicate whether the calling process possesses that key in one of + * its keyrings. + * + * the key_ref_t has been made a separate type so that the compiler can reject + * attempts to dereference it without proper conversion. + * + * the three functions are used to assemble and disassemble references + */ +typedef struct __key_reference_with_attributes *key_ref_t; + +static inline key_ref_t make_key_ref(const struct key *key, + bool possession) +{ + return (key_ref_t) ((unsigned long) key | possession); +} + +static inline struct key *key_ref_to_ptr(const key_ref_t key_ref) +{ + return (struct key *) ((unsigned long) key_ref & ~1UL); +} + +static inline bool is_key_possessed(const key_ref_t key_ref) +{ + return (unsigned long) key_ref & 1UL; +} + +typedef int (*key_restrict_link_func_t)(struct key *dest_keyring, + const struct key_type *type, + const union key_payload *payload, + struct key *restriction_key); + +struct key_restriction { + key_restrict_link_func_t check; + struct key *key; + struct key_type *keytype; +}; + +enum key_state { + KEY_IS_UNINSTANTIATED, + KEY_IS_POSITIVE, /* Positively instantiated */ +}; + +/*****************************************************************************/ +/* + * authentication token / access credential / keyring + * - types of key include: + * - keyrings + * - disk encryption IDs + * - Kerberos TGTs and tickets + */ +struct key { + refcount_t usage; /* number of references */ + key_serial_t serial; /* key serial number */ + union { + struct list_head graveyard_link; + struct rb_node serial_node; + }; + struct rw_semaphore sem; /* change vs change sem */ + struct key_user *user; /* owner of this key */ + void *security; /* security data for this key */ + union { + time64_t expiry; /* time at which key expires (or 0) */ + time64_t revoked_at; /* time at which key was revoked */ + }; + time64_t last_used_at; /* last time used for LRU keyring discard */ + kuid_t uid; + kgid_t gid; + key_perm_t perm; /* access permissions */ + unsigned short quotalen; /* length added to quota */ + unsigned short datalen; /* payload data length + * - may not match RCU dereferenced payload + * - payload should contain own length + */ + short state; /* Key state (+) or rejection error (-) */ + +#ifdef KEY_DEBUGGING + unsigned magic; +#define KEY_DEBUG_MAGIC 0x18273645u +#endif + + unsigned long flags; /* status flags (change with bitops) */ +#define KEY_FLAG_DEAD 0 /* set if key type has been deleted */ +#define KEY_FLAG_REVOKED 1 /* set if key had been revoked */ +#define KEY_FLAG_IN_QUOTA 2 /* set if key consumes quota */ +#define KEY_FLAG_USER_CONSTRUCT 3 /* set if key is being constructed in userspace */ +#define KEY_FLAG_ROOT_CAN_CLEAR 4 /* set if key can be cleared by root without permission */ +#define KEY_FLAG_INVALIDATED 5 /* set if key has been invalidated */ +#define KEY_FLAG_BUILTIN 6 /* set if key is built in to the kernel */ +#define KEY_FLAG_ROOT_CAN_INVAL 7 /* set if key can be invalidated by root without permission */ +#define KEY_FLAG_KEEP 8 /* set if key should not be removed */ +#define KEY_FLAG_UID_KEYRING 9 /* set if key is a user or user session keyring */ + + /* the key type and key description string + * - the desc is used to match a key against search criteria + * - it should be a printable string + * - eg: for krb5 AFS, this might be "afs@REDHAT.COM" + */ + union { + struct keyring_index_key index_key; + struct { + struct key_type *type; /* type of key */ + char *description; + }; + }; + + /* key data + * - this is used to hold the data actually used in cryptography or + * whatever + */ + union { + union key_payload payload; + struct { + /* Keyring bits */ + struct list_head name_link; + struct assoc_array keys; + }; + }; + + /* This is set on a keyring to restrict the addition of a link to a key + * to it. If this structure isn't provided then it is assumed that the + * keyring is open to any addition. It is ignored for non-keyring + * keys. Only set this value using keyring_restrict(), keyring_alloc(), + * or key_alloc(). + * + * This is intended for use with rings of trusted keys whereby addition + * to the keyring needs to be controlled. KEY_ALLOC_BYPASS_RESTRICTION + * overrides this, allowing the kernel to add extra keys without + * restriction. + */ + struct key_restriction *restrict_link; +}; + +extern struct key *key_alloc(struct key_type *type, + const char *desc, + kuid_t uid, kgid_t gid, + const struct cred *cred, + key_perm_t perm, + unsigned long flags, + struct key_restriction *restrict_link); + + +#define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */ +#define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */ +#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */ +#define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */ +#define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */ +#define KEY_ALLOC_UID_KEYRING 0x0010 /* allocating a user or user session keyring */ +#define KEY_ALLOC_SET_KEEP 0x0020 /* Set the KEEP flag on the key/keyring */ + +extern void key_revoke(struct key *key); +extern void key_invalidate(struct key *key); +extern void key_put(struct key *key); + +static inline struct key *__key_get(struct key *key) +{ + refcount_inc(&key->usage); + return key; +} + +static inline struct key *key_get(struct key *key) +{ + return key ? __key_get(key) : key; +} + +static inline void key_ref_put(key_ref_t key_ref) +{ + key_put(key_ref_to_ptr(key_ref)); +} + +extern struct key *request_key(struct key_type *type, + const char *description, + const char *callout_info); + +extern struct key *request_key_with_auxdata(struct key_type *type, + const char *description, + const void *callout_info, + size_t callout_len, + void *aux); + +extern struct key *request_key_async(struct key_type *type, + const char *description, + const void *callout_info, + size_t callout_len); + +extern struct key *request_key_async_with_auxdata(struct key_type *type, + const char *description, + const void *callout_info, + size_t callout_len, + void *aux); + +extern int wait_for_key_construction(struct key *key, bool intr); + +extern int key_validate(const struct key *key); + +extern key_ref_t key_create_or_update(key_ref_t keyring, + const char *type, + const char *description, + const void *payload, + size_t plen, + key_perm_t perm, + unsigned long flags); + +extern int key_update(key_ref_t key, + const void *payload, + size_t plen); + +extern int key_link(struct key *keyring, + struct key *key); + +extern int key_unlink(struct key *keyring, + struct key *key); + +extern struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid, + const struct cred *cred, + key_perm_t perm, + unsigned long flags, + struct key_restriction *restrict_link, + struct key *dest); + +extern int restrict_link_reject(struct key *keyring, + const struct key_type *type, + const union key_payload *payload, + struct key *restriction_key); + +extern int keyring_clear(struct key *keyring); + +extern key_ref_t keyring_search(key_ref_t keyring, + struct key_type *type, + const char *description); + +extern int keyring_add_key(struct key *keyring, + struct key *key); + +extern int keyring_restrict(key_ref_t keyring, const char *type, + const char *restriction); + +extern struct key *key_lookup(key_serial_t id); + +static inline key_serial_t key_serial(const struct key *key) +{ + return key ? key->serial : 0; +} + +extern void key_set_timeout(struct key *, unsigned); + +/* + * The permissions required on a key that we're looking up. + */ +#define KEY_NEED_VIEW 0x01 /* Require permission to view attributes */ +#define KEY_NEED_READ 0x02 /* Require permission to read content */ +#define KEY_NEED_WRITE 0x04 /* Require permission to update / modify */ +#define KEY_NEED_SEARCH 0x08 /* Require permission to search (keyring) or find (key) */ +#define KEY_NEED_LINK 0x10 /* Require permission to link */ +#define KEY_NEED_SETATTR 0x20 /* Require permission to change attributes */ +#define KEY_NEED_ALL 0x3f /* All the above permissions */ + +static inline short key_read_state(const struct key *key) +{ + /* Barrier versus mark_key_instantiated(). */ + return smp_load_acquire(&key->state); +} + +/** + * key_is_positive - Determine if a key has been positively instantiated + * @key: The key to check. + * + * Return true if the specified key has been positively instantiated, false + * otherwise. + */ +static inline bool key_is_positive(const struct key *key) +{ + return key_read_state(key) == KEY_IS_POSITIVE; +} + +static inline bool key_is_negative(const struct key *key) +{ + return key_read_state(key) < 0; +} + +#define dereference_key_rcu(KEY) \ + (rcu_dereference((KEY)->payload.rcu_data0)) + +#define dereference_key_locked(KEY) \ + (rcu_dereference_protected((KEY)->payload.rcu_data0, \ + rwsem_is_locked(&((struct key *)(KEY))->sem))) + +#define rcu_assign_keypointer(KEY, PAYLOAD) \ +do { \ + rcu_assign_pointer((KEY)->payload.rcu_data0, (PAYLOAD)); \ +} while (0) + +#ifdef CONFIG_SYSCTL +extern struct ctl_table key_sysctls[]; +#endif +/* + * the userspace interface + */ +extern int install_thread_keyring_to_cred(struct cred *cred); +extern void key_fsuid_changed(struct task_struct *tsk); +extern void key_fsgid_changed(struct task_struct *tsk); +extern void key_init(void); + +#else /* CONFIG_KEYS */ + +#define key_validate(k) 0 +#define key_serial(k) 0 +#define key_get(k) ({ NULL; }) +#define key_revoke(k) do { } while(0) +#define key_invalidate(k) do { } while(0) +#define key_put(k) do { } while(0) +#define key_ref_put(k) do { } while(0) +#define make_key_ref(k, p) NULL +#define key_ref_to_ptr(k) NULL +#define is_key_possessed(k) 0 +#define key_fsuid_changed(t) do { } while(0) +#define key_fsgid_changed(t) do { } while(0) +#define key_init() do { } while(0) + +#endif /* CONFIG_KEYS */ +#endif /* __KERNEL__ */ +#endif /* _LINUX_KEY_H */ diff --git a/include/linux/keyboard.h b/include/linux/keyboard.h new file mode 100644 index 000000000..73d11e409 --- /dev/null +++ b/include/linux/keyboard.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_KEYBOARD_H +#define __LINUX_KEYBOARD_H + +#include + +struct notifier_block; +extern unsigned short *key_maps[MAX_NR_KEYMAPS]; +extern unsigned short plain_map[NR_KEYS]; + +struct keyboard_notifier_param { + struct vc_data *vc; /* VC on which the keyboard press was done */ + int down; /* Pressure of the key? */ + int shift; /* Current shift mask */ + int ledstate; /* Current led state */ + unsigned int value; /* keycode, unicode value or keysym */ +}; + +extern int register_keyboard_notifier(struct notifier_block *nb); +extern int unregister_keyboard_notifier(struct notifier_block *nb); +#endif diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h new file mode 100644 index 000000000..89fc8dc7b --- /dev/null +++ b/include/linux/kfifo.h @@ -0,0 +1,834 @@ +/* + * A generic kernel FIFO implementation + * + * Copyright (C) 2013 Stefani Seibold + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef _LINUX_KFIFO_H +#define _LINUX_KFIFO_H + +/* + * How to porting drivers to the new generic FIFO API: + * + * - Modify the declaration of the "struct kfifo *" object into a + * in-place "struct kfifo" object + * - Init the in-place object with kfifo_alloc() or kfifo_init() + * Note: The address of the in-place "struct kfifo" object must be + * passed as the first argument to this functions + * - Replace the use of __kfifo_put into kfifo_in and __kfifo_get + * into kfifo_out + * - Replace the use of kfifo_put into kfifo_in_spinlocked and kfifo_get + * into kfifo_out_spinlocked + * Note: the spinlock pointer formerly passed to kfifo_init/kfifo_alloc + * must be passed now to the kfifo_in_spinlocked and kfifo_out_spinlocked + * as the last parameter + * - The formerly __kfifo_* functions are renamed into kfifo_* + */ + +/* + * Note about locking: There is no locking required until only one reader + * and one writer is using the fifo and no kfifo_reset() will be called. + * kfifo_reset_out() can be safely used, until it will be only called + * in the reader thread. + * For multiple writer and one reader there is only a need to lock the writer. + * And vice versa for only one writer and multiple reader there is only a need + * to lock the reader. + */ + +#include +#include +#include +#include + +struct __kfifo { + unsigned int in; + unsigned int out; + unsigned int mask; + unsigned int esize; + void *data; +}; + +#define __STRUCT_KFIFO_COMMON(datatype, recsize, ptrtype) \ + union { \ + struct __kfifo kfifo; \ + datatype *type; \ + const datatype *const_type; \ + char (*rectype)[recsize]; \ + ptrtype *ptr; \ + ptrtype const *ptr_const; \ + } + +#define __STRUCT_KFIFO(type, size, recsize, ptrtype) \ +{ \ + __STRUCT_KFIFO_COMMON(type, recsize, ptrtype); \ + type buf[((size < 2) || (size & (size - 1))) ? -1 : size]; \ +} + +#define STRUCT_KFIFO(type, size) \ + struct __STRUCT_KFIFO(type, size, 0, type) + +#define __STRUCT_KFIFO_PTR(type, recsize, ptrtype) \ +{ \ + __STRUCT_KFIFO_COMMON(type, recsize, ptrtype); \ + type buf[0]; \ +} + +#define STRUCT_KFIFO_PTR(type) \ + struct __STRUCT_KFIFO_PTR(type, 0, type) + +/* + * define compatibility "struct kfifo" for dynamic allocated fifos + */ +struct kfifo __STRUCT_KFIFO_PTR(unsigned char, 0, void); + +#define STRUCT_KFIFO_REC_1(size) \ + struct __STRUCT_KFIFO(unsigned char, size, 1, void) + +#define STRUCT_KFIFO_REC_2(size) \ + struct __STRUCT_KFIFO(unsigned char, size, 2, void) + +/* + * define kfifo_rec types + */ +struct kfifo_rec_ptr_1 __STRUCT_KFIFO_PTR(unsigned char, 1, void); +struct kfifo_rec_ptr_2 __STRUCT_KFIFO_PTR(unsigned char, 2, void); + +/* + * helper macro to distinguish between real in place fifo where the fifo + * array is a part of the structure and the fifo type where the array is + * outside of the fifo structure. + */ +#define __is_kfifo_ptr(fifo) \ + (sizeof(*fifo) == sizeof(STRUCT_KFIFO_PTR(typeof(*(fifo)->type)))) + +/** + * DECLARE_KFIFO_PTR - macro to declare a fifo pointer object + * @fifo: name of the declared fifo + * @type: type of the fifo elements + */ +#define DECLARE_KFIFO_PTR(fifo, type) STRUCT_KFIFO_PTR(type) fifo + +/** + * DECLARE_KFIFO - macro to declare a fifo object + * @fifo: name of the declared fifo + * @type: type of the fifo elements + * @size: the number of elements in the fifo, this must be a power of 2 + */ +#define DECLARE_KFIFO(fifo, type, size) STRUCT_KFIFO(type, size) fifo + +/** + * INIT_KFIFO - Initialize a fifo declared by DECLARE_KFIFO + * @fifo: name of the declared fifo datatype + */ +#define INIT_KFIFO(fifo) \ +(void)({ \ + typeof(&(fifo)) __tmp = &(fifo); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + __kfifo->in = 0; \ + __kfifo->out = 0; \ + __kfifo->mask = __is_kfifo_ptr(__tmp) ? 0 : ARRAY_SIZE(__tmp->buf) - 1;\ + __kfifo->esize = sizeof(*__tmp->buf); \ + __kfifo->data = __is_kfifo_ptr(__tmp) ? NULL : __tmp->buf; \ +}) + +/** + * DEFINE_KFIFO - macro to define and initialize a fifo + * @fifo: name of the declared fifo datatype + * @type: type of the fifo elements + * @size: the number of elements in the fifo, this must be a power of 2 + * + * Note: the macro can be used for global and local fifo data type variables. + */ +#define DEFINE_KFIFO(fifo, type, size) \ + DECLARE_KFIFO(fifo, type, size) = \ + (typeof(fifo)) { \ + { \ + { \ + .in = 0, \ + .out = 0, \ + .mask = __is_kfifo_ptr(&(fifo)) ? \ + 0 : \ + ARRAY_SIZE((fifo).buf) - 1, \ + .esize = sizeof(*(fifo).buf), \ + .data = __is_kfifo_ptr(&(fifo)) ? \ + NULL : \ + (fifo).buf, \ + } \ + } \ + } + + +static inline unsigned int __must_check +__kfifo_uint_must_check_helper(unsigned int val) +{ + return val; +} + +static inline int __must_check +__kfifo_int_must_check_helper(int val) +{ + return val; +} + +/** + * kfifo_initialized - Check if the fifo is initialized + * @fifo: address of the fifo to check + * + * Return %true if fifo is initialized, otherwise %false. + * Assumes the fifo was 0 before. + */ +#define kfifo_initialized(fifo) ((fifo)->kfifo.mask) + +/** + * kfifo_esize - returns the size of the element managed by the fifo + * @fifo: address of the fifo to be used + */ +#define kfifo_esize(fifo) ((fifo)->kfifo.esize) + +/** + * kfifo_recsize - returns the size of the record length field + * @fifo: address of the fifo to be used + */ +#define kfifo_recsize(fifo) (sizeof(*(fifo)->rectype)) + +/** + * kfifo_size - returns the size of the fifo in elements + * @fifo: address of the fifo to be used + */ +#define kfifo_size(fifo) ((fifo)->kfifo.mask + 1) + +/** + * kfifo_reset - removes the entire fifo content + * @fifo: address of the fifo to be used + * + * Note: usage of kfifo_reset() is dangerous. It should be only called when the + * fifo is exclusived locked or when it is secured that no other thread is + * accessing the fifo. + */ +#define kfifo_reset(fifo) \ +(void)({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + __tmp->kfifo.in = __tmp->kfifo.out = 0; \ +}) + +/** + * kfifo_reset_out - skip fifo content + * @fifo: address of the fifo to be used + * + * Note: The usage of kfifo_reset_out() is safe until it will be only called + * from the reader thread and there is only one concurrent reader. Otherwise + * it is dangerous and must be handled in the same way as kfifo_reset(). + */ +#define kfifo_reset_out(fifo) \ +(void)({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + __tmp->kfifo.out = __tmp->kfifo.in; \ +}) + +/** + * kfifo_len - returns the number of used elements in the fifo + * @fifo: address of the fifo to be used + */ +#define kfifo_len(fifo) \ +({ \ + typeof((fifo) + 1) __tmpl = (fifo); \ + __tmpl->kfifo.in - __tmpl->kfifo.out; \ +}) + +/** + * kfifo_is_empty - returns true if the fifo is empty + * @fifo: address of the fifo to be used + */ +#define kfifo_is_empty(fifo) \ +({ \ + typeof((fifo) + 1) __tmpq = (fifo); \ + __tmpq->kfifo.in == __tmpq->kfifo.out; \ +}) + +/** + * kfifo_is_full - returns true if the fifo is full + * @fifo: address of the fifo to be used + */ +#define kfifo_is_full(fifo) \ +({ \ + typeof((fifo) + 1) __tmpq = (fifo); \ + kfifo_len(__tmpq) > __tmpq->kfifo.mask; \ +}) + +/** + * kfifo_avail - returns the number of unused elements in the fifo + * @fifo: address of the fifo to be used + */ +#define kfifo_avail(fifo) \ +__kfifo_uint_must_check_helper( \ +({ \ + typeof((fifo) + 1) __tmpq = (fifo); \ + const size_t __recsize = sizeof(*__tmpq->rectype); \ + unsigned int __avail = kfifo_size(__tmpq) - kfifo_len(__tmpq); \ + (__recsize) ? ((__avail <= __recsize) ? 0 : \ + __kfifo_max_r(__avail - __recsize, __recsize)) : \ + __avail; \ +}) \ +) + +/** + * kfifo_skip - skip output data + * @fifo: address of the fifo to be used + */ +#define kfifo_skip(fifo) \ +(void)({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + const size_t __recsize = sizeof(*__tmp->rectype); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + if (__recsize) \ + __kfifo_skip_r(__kfifo, __recsize); \ + else \ + __kfifo->out++; \ +}) + +/** + * kfifo_peek_len - gets the size of the next fifo record + * @fifo: address of the fifo to be used + * + * This function returns the size of the next fifo record in number of bytes. + */ +#define kfifo_peek_len(fifo) \ +__kfifo_uint_must_check_helper( \ +({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + const size_t __recsize = sizeof(*__tmp->rectype); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + (!__recsize) ? kfifo_len(__tmp) * sizeof(*__tmp->type) : \ + __kfifo_len_r(__kfifo, __recsize); \ +}) \ +) + +/** + * kfifo_alloc - dynamically allocates a new fifo buffer + * @fifo: pointer to the fifo + * @size: the number of elements in the fifo, this must be a power of 2 + * @gfp_mask: get_free_pages mask, passed to kmalloc() + * + * This macro dynamically allocates a new fifo buffer. + * + * The number of elements will be rounded-up to a power of 2. + * The fifo will be release with kfifo_free(). + * Return 0 if no error, otherwise an error code. + */ +#define kfifo_alloc(fifo, size, gfp_mask) \ +__kfifo_int_must_check_helper( \ +({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + __is_kfifo_ptr(__tmp) ? \ + __kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \ + -EINVAL; \ +}) \ +) + +/** + * kfifo_free - frees the fifo + * @fifo: the fifo to be freed + */ +#define kfifo_free(fifo) \ +({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + if (__is_kfifo_ptr(__tmp)) \ + __kfifo_free(__kfifo); \ +}) + +/** + * kfifo_init - initialize a fifo using a preallocated buffer + * @fifo: the fifo to assign the buffer + * @buffer: the preallocated buffer to be used + * @size: the size of the internal buffer, this have to be a power of 2 + * + * This macro initializes a fifo using a preallocated buffer. + * + * The number of elements will be rounded-up to a power of 2. + * Return 0 if no error, otherwise an error code. + */ +#define kfifo_init(fifo, buffer, size) \ +({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + __is_kfifo_ptr(__tmp) ? \ + __kfifo_init(__kfifo, buffer, size, sizeof(*__tmp->type)) : \ + -EINVAL; \ +}) + +/** + * kfifo_put - put data into the fifo + * @fifo: address of the fifo to be used + * @val: the data to be added + * + * This macro copies the given value into the fifo. + * It returns 0 if the fifo was full. Otherwise it returns the number + * processed elements. + * + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macro. + */ +#define kfifo_put(fifo, val) \ +({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + typeof(*__tmp->const_type) __val = (val); \ + unsigned int __ret; \ + size_t __recsize = sizeof(*__tmp->rectype); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + if (__recsize) \ + __ret = __kfifo_in_r(__kfifo, &__val, sizeof(__val), \ + __recsize); \ + else { \ + __ret = !kfifo_is_full(__tmp); \ + if (__ret) { \ + (__is_kfifo_ptr(__tmp) ? \ + ((typeof(__tmp->type))__kfifo->data) : \ + (__tmp->buf) \ + )[__kfifo->in & __tmp->kfifo.mask] = \ + *(typeof(__tmp->type))&__val; \ + smp_wmb(); \ + __kfifo->in++; \ + } \ + } \ + __ret; \ +}) + +/** + * kfifo_get - get data from the fifo + * @fifo: address of the fifo to be used + * @val: address where to store the data + * + * This macro reads the data from the fifo. + * It returns 0 if the fifo was empty. Otherwise it returns the number + * processed elements. + * + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macro. + */ +#define kfifo_get(fifo, val) \ +__kfifo_uint_must_check_helper( \ +({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + typeof(__tmp->ptr) __val = (val); \ + unsigned int __ret; \ + const size_t __recsize = sizeof(*__tmp->rectype); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + if (__recsize) \ + __ret = __kfifo_out_r(__kfifo, __val, sizeof(*__val), \ + __recsize); \ + else { \ + __ret = !kfifo_is_empty(__tmp); \ + if (__ret) { \ + *(typeof(__tmp->type))__val = \ + (__is_kfifo_ptr(__tmp) ? \ + ((typeof(__tmp->type))__kfifo->data) : \ + (__tmp->buf) \ + )[__kfifo->out & __tmp->kfifo.mask]; \ + smp_wmb(); \ + __kfifo->out++; \ + } \ + } \ + __ret; \ +}) \ +) + +/** + * kfifo_peek - get data from the fifo without removing + * @fifo: address of the fifo to be used + * @val: address where to store the data + * + * This reads the data from the fifo without removing it from the fifo. + * It returns 0 if the fifo was empty. Otherwise it returns the number + * processed elements. + * + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macro. + */ +#define kfifo_peek(fifo, val) \ +__kfifo_uint_must_check_helper( \ +({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + typeof(__tmp->ptr) __val = (val); \ + unsigned int __ret; \ + const size_t __recsize = sizeof(*__tmp->rectype); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + if (__recsize) \ + __ret = __kfifo_out_peek_r(__kfifo, __val, sizeof(*__val), \ + __recsize); \ + else { \ + __ret = !kfifo_is_empty(__tmp); \ + if (__ret) { \ + *(typeof(__tmp->type))__val = \ + (__is_kfifo_ptr(__tmp) ? \ + ((typeof(__tmp->type))__kfifo->data) : \ + (__tmp->buf) \ + )[__kfifo->out & __tmp->kfifo.mask]; \ + smp_wmb(); \ + } \ + } \ + __ret; \ +}) \ +) + +/** + * kfifo_in - put data into the fifo + * @fifo: address of the fifo to be used + * @buf: the data to be added + * @n: number of elements to be added + * + * This macro copies the given buffer into the fifo and returns the + * number of copied elements. + * + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macro. + */ +#define kfifo_in(fifo, buf, n) \ +({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + typeof(__tmp->ptr_const) __buf = (buf); \ + unsigned long __n = (n); \ + const size_t __recsize = sizeof(*__tmp->rectype); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + (__recsize) ?\ + __kfifo_in_r(__kfifo, __buf, __n, __recsize) : \ + __kfifo_in(__kfifo, __buf, __n); \ +}) + +/** + * kfifo_in_spinlocked - put data into the fifo using a spinlock for locking + * @fifo: address of the fifo to be used + * @buf: the data to be added + * @n: number of elements to be added + * @lock: pointer to the spinlock to use for locking + * + * This macro copies the given values buffer into the fifo and returns the + * number of copied elements. + */ +#define kfifo_in_spinlocked(fifo, buf, n, lock) \ +({ \ + unsigned long __flags; \ + unsigned int __ret; \ + spin_lock_irqsave(lock, __flags); \ + __ret = kfifo_in(fifo, buf, n); \ + spin_unlock_irqrestore(lock, __flags); \ + __ret; \ +}) + +/* alias for kfifo_in_spinlocked, will be removed in a future release */ +#define kfifo_in_locked(fifo, buf, n, lock) \ + kfifo_in_spinlocked(fifo, buf, n, lock) + +/** + * kfifo_out - get data from the fifo + * @fifo: address of the fifo to be used + * @buf: pointer to the storage buffer + * @n: max. number of elements to get + * + * This macro get some data from the fifo and return the numbers of elements + * copied. + * + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macro. + */ +#define kfifo_out(fifo, buf, n) \ +__kfifo_uint_must_check_helper( \ +({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + typeof(__tmp->ptr) __buf = (buf); \ + unsigned long __n = (n); \ + const size_t __recsize = sizeof(*__tmp->rectype); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + (__recsize) ?\ + __kfifo_out_r(__kfifo, __buf, __n, __recsize) : \ + __kfifo_out(__kfifo, __buf, __n); \ +}) \ +) + +/** + * kfifo_out_spinlocked - get data from the fifo using a spinlock for locking + * @fifo: address of the fifo to be used + * @buf: pointer to the storage buffer + * @n: max. number of elements to get + * @lock: pointer to the spinlock to use for locking + * + * This macro get the data from the fifo and return the numbers of elements + * copied. + */ +#define kfifo_out_spinlocked(fifo, buf, n, lock) \ +__kfifo_uint_must_check_helper( \ +({ \ + unsigned long __flags; \ + unsigned int __ret; \ + spin_lock_irqsave(lock, __flags); \ + __ret = kfifo_out(fifo, buf, n); \ + spin_unlock_irqrestore(lock, __flags); \ + __ret; \ +}) \ +) + +/* alias for kfifo_out_spinlocked, will be removed in a future release */ +#define kfifo_out_locked(fifo, buf, n, lock) \ + kfifo_out_spinlocked(fifo, buf, n, lock) + +/** + * kfifo_from_user - puts some data from user space into the fifo + * @fifo: address of the fifo to be used + * @from: pointer to the data to be added + * @len: the length of the data to be added + * @copied: pointer to output variable to store the number of copied bytes + * + * This macro copies at most @len bytes from the @from into the + * fifo, depending of the available space and returns -EFAULT/0. + * + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macro. + */ +#define kfifo_from_user(fifo, from, len, copied) \ +__kfifo_uint_must_check_helper( \ +({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + const void __user *__from = (from); \ + unsigned int __len = (len); \ + unsigned int *__copied = (copied); \ + const size_t __recsize = sizeof(*__tmp->rectype); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + (__recsize) ? \ + __kfifo_from_user_r(__kfifo, __from, __len, __copied, __recsize) : \ + __kfifo_from_user(__kfifo, __from, __len, __copied); \ +}) \ +) + +/** + * kfifo_to_user - copies data from the fifo into user space + * @fifo: address of the fifo to be used + * @to: where the data must be copied + * @len: the size of the destination buffer + * @copied: pointer to output variable to store the number of copied bytes + * + * This macro copies at most @len bytes from the fifo into the + * @to buffer and returns -EFAULT/0. + * + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macro. + */ +#define kfifo_to_user(fifo, to, len, copied) \ +__kfifo_uint_must_check_helper( \ +({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + void __user *__to = (to); \ + unsigned int __len = (len); \ + unsigned int *__copied = (copied); \ + const size_t __recsize = sizeof(*__tmp->rectype); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + (__recsize) ? \ + __kfifo_to_user_r(__kfifo, __to, __len, __copied, __recsize) : \ + __kfifo_to_user(__kfifo, __to, __len, __copied); \ +}) \ +) + +/** + * kfifo_dma_in_prepare - setup a scatterlist for DMA input + * @fifo: address of the fifo to be used + * @sgl: pointer to the scatterlist array + * @nents: number of entries in the scatterlist array + * @len: number of elements to transfer + * + * This macro fills a scatterlist for DMA input. + * It returns the number entries in the scatterlist array. + * + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macros. + */ +#define kfifo_dma_in_prepare(fifo, sgl, nents, len) \ +({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + struct scatterlist *__sgl = (sgl); \ + int __nents = (nents); \ + unsigned int __len = (len); \ + const size_t __recsize = sizeof(*__tmp->rectype); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + (__recsize) ? \ + __kfifo_dma_in_prepare_r(__kfifo, __sgl, __nents, __len, __recsize) : \ + __kfifo_dma_in_prepare(__kfifo, __sgl, __nents, __len); \ +}) + +/** + * kfifo_dma_in_finish - finish a DMA IN operation + * @fifo: address of the fifo to be used + * @len: number of bytes to received + * + * This macro finish a DMA IN operation. The in counter will be updated by + * the len parameter. No error checking will be done. + * + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macros. + */ +#define kfifo_dma_in_finish(fifo, len) \ +(void)({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + unsigned int __len = (len); \ + const size_t __recsize = sizeof(*__tmp->rectype); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + if (__recsize) \ + __kfifo_dma_in_finish_r(__kfifo, __len, __recsize); \ + else \ + __kfifo->in += __len / sizeof(*__tmp->type); \ +}) + +/** + * kfifo_dma_out_prepare - setup a scatterlist for DMA output + * @fifo: address of the fifo to be used + * @sgl: pointer to the scatterlist array + * @nents: number of entries in the scatterlist array + * @len: number of elements to transfer + * + * This macro fills a scatterlist for DMA output which at most @len bytes + * to transfer. + * It returns the number entries in the scatterlist array. + * A zero means there is no space available and the scatterlist is not filled. + * + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macros. + */ +#define kfifo_dma_out_prepare(fifo, sgl, nents, len) \ +({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + struct scatterlist *__sgl = (sgl); \ + int __nents = (nents); \ + unsigned int __len = (len); \ + const size_t __recsize = sizeof(*__tmp->rectype); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + (__recsize) ? \ + __kfifo_dma_out_prepare_r(__kfifo, __sgl, __nents, __len, __recsize) : \ + __kfifo_dma_out_prepare(__kfifo, __sgl, __nents, __len); \ +}) + +/** + * kfifo_dma_out_finish - finish a DMA OUT operation + * @fifo: address of the fifo to be used + * @len: number of bytes transferred + * + * This macro finish a DMA OUT operation. The out counter will be updated by + * the len parameter. No error checking will be done. + * + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macros. + */ +#define kfifo_dma_out_finish(fifo, len) \ +(void)({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + unsigned int __len = (len); \ + const size_t __recsize = sizeof(*__tmp->rectype); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + if (__recsize) \ + __kfifo_dma_out_finish_r(__kfifo, __recsize); \ + else \ + __kfifo->out += __len / sizeof(*__tmp->type); \ +}) + +/** + * kfifo_out_peek - gets some data from the fifo + * @fifo: address of the fifo to be used + * @buf: pointer to the storage buffer + * @n: max. number of elements to get + * + * This macro get the data from the fifo and return the numbers of elements + * copied. The data is not removed from the fifo. + * + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macro. + */ +#define kfifo_out_peek(fifo, buf, n) \ +__kfifo_uint_must_check_helper( \ +({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + typeof(__tmp->ptr) __buf = (buf); \ + unsigned long __n = (n); \ + const size_t __recsize = sizeof(*__tmp->rectype); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + (__recsize) ? \ + __kfifo_out_peek_r(__kfifo, __buf, __n, __recsize) : \ + __kfifo_out_peek(__kfifo, __buf, __n); \ +}) \ +) + +extern int __kfifo_alloc(struct __kfifo *fifo, unsigned int size, + size_t esize, gfp_t gfp_mask); + +extern void __kfifo_free(struct __kfifo *fifo); + +extern int __kfifo_init(struct __kfifo *fifo, void *buffer, + unsigned int size, size_t esize); + +extern unsigned int __kfifo_in(struct __kfifo *fifo, + const void *buf, unsigned int len); + +extern unsigned int __kfifo_out(struct __kfifo *fifo, + void *buf, unsigned int len); + +extern int __kfifo_from_user(struct __kfifo *fifo, + const void __user *from, unsigned long len, unsigned int *copied); + +extern int __kfifo_to_user(struct __kfifo *fifo, + void __user *to, unsigned long len, unsigned int *copied); + +extern unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo, + struct scatterlist *sgl, int nents, unsigned int len); + +extern unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo, + struct scatterlist *sgl, int nents, unsigned int len); + +extern unsigned int __kfifo_out_peek(struct __kfifo *fifo, + void *buf, unsigned int len); + +extern unsigned int __kfifo_in_r(struct __kfifo *fifo, + const void *buf, unsigned int len, size_t recsize); + +extern unsigned int __kfifo_out_r(struct __kfifo *fifo, + void *buf, unsigned int len, size_t recsize); + +extern int __kfifo_from_user_r(struct __kfifo *fifo, + const void __user *from, unsigned long len, unsigned int *copied, + size_t recsize); + +extern int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to, + unsigned long len, unsigned int *copied, size_t recsize); + +extern unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo, + struct scatterlist *sgl, int nents, unsigned int len, size_t recsize); + +extern void __kfifo_dma_in_finish_r(struct __kfifo *fifo, + unsigned int len, size_t recsize); + +extern unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo, + struct scatterlist *sgl, int nents, unsigned int len, size_t recsize); + +extern void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize); + +extern unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize); + +extern void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize); + +extern unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, + void *buf, unsigned int len, size_t recsize); + +extern unsigned int __kfifo_max_r(unsigned int len, size_t recsize); + +#endif diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h new file mode 100644 index 000000000..6be5545d3 --- /dev/null +++ b/include/linux/kgdb.h @@ -0,0 +1,327 @@ +/* + * This provides the callbacks and functions that KGDB needs to share between + * the core, I/O and arch-specific portions. + * + * Author: Amit Kale and + * Tom Rini + * + * 2001-2004 (c) Amit S. Kale and 2003-2005 (c) MontaVista Software, Inc. + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ +#ifndef _KGDB_H_ +#define _KGDB_H_ + +#include +#include +#include +#ifdef CONFIG_HAVE_ARCH_KGDB +#include +#endif + +#ifdef CONFIG_KGDB +struct pt_regs; + +/** + * kgdb_skipexception - (optional) exit kgdb_handle_exception early + * @exception: Exception vector number + * @regs: Current &struct pt_regs. + * + * On some architectures it is required to skip a breakpoint + * exception when it occurs after a breakpoint has been removed. + * This can be implemented in the architecture specific portion of kgdb. + */ +extern int kgdb_skipexception(int exception, struct pt_regs *regs); + +struct tasklet_struct; +struct task_struct; +struct uart_port; + +/** + * kgdb_breakpoint - compiled in breakpoint + * + * This will be implemented as a static inline per architecture. This + * function is called by the kgdb core to execute an architecture + * specific trap to cause kgdb to enter the exception processing. + * + */ +void kgdb_breakpoint(void); + +extern int kgdb_connected; +extern int kgdb_io_module_registered; + +extern atomic_t kgdb_setting_breakpoint; +extern atomic_t kgdb_cpu_doing_single_step; + +extern struct task_struct *kgdb_usethread; +extern struct task_struct *kgdb_contthread; + +enum kgdb_bptype { + BP_BREAKPOINT = 0, + BP_HARDWARE_BREAKPOINT, + BP_WRITE_WATCHPOINT, + BP_READ_WATCHPOINT, + BP_ACCESS_WATCHPOINT, + BP_POKE_BREAKPOINT, +}; + +enum kgdb_bpstate { + BP_UNDEFINED = 0, + BP_REMOVED, + BP_SET, + BP_ACTIVE +}; + +struct kgdb_bkpt { + unsigned long bpt_addr; + unsigned char saved_instr[BREAK_INSTR_SIZE]; + enum kgdb_bptype type; + enum kgdb_bpstate state; +}; + +struct dbg_reg_def_t { + char *name; + int size; + int offset; +}; + +#ifndef DBG_MAX_REG_NUM +#define DBG_MAX_REG_NUM 0 +#else +extern struct dbg_reg_def_t dbg_reg_def[]; +extern char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs); +extern int dbg_set_reg(int regno, void *mem, struct pt_regs *regs); +#endif +#ifndef KGDB_MAX_BREAKPOINTS +# define KGDB_MAX_BREAKPOINTS 1000 +#endif + +#define KGDB_HW_BREAKPOINT 1 + +/* + * Functions each KGDB-supporting architecture must provide: + */ + +/** + * kgdb_arch_init - Perform any architecture specific initalization. + * + * This function will handle the initalization of any architecture + * specific callbacks. + */ +extern int kgdb_arch_init(void); + +/** + * kgdb_arch_exit - Perform any architecture specific uninitalization. + * + * This function will handle the uninitalization of any architecture + * specific callbacks, for dynamic registration and unregistration. + */ +extern void kgdb_arch_exit(void); + +/** + * pt_regs_to_gdb_regs - Convert ptrace regs to GDB regs + * @gdb_regs: A pointer to hold the registers in the order GDB wants. + * @regs: The &struct pt_regs of the current process. + * + * Convert the pt_regs in @regs into the format for registers that + * GDB expects, stored in @gdb_regs. + */ +extern void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs); + +/** + * sleeping_thread_to_gdb_regs - Convert ptrace regs to GDB regs + * @gdb_regs: A pointer to hold the registers in the order GDB wants. + * @p: The &struct task_struct of the desired process. + * + * Convert the register values of the sleeping process in @p to + * the format that GDB expects. + * This function is called when kgdb does not have access to the + * &struct pt_regs and therefore it should fill the gdb registers + * @gdb_regs with what has been saved in &struct thread_struct + * thread field during switch_to. + */ +extern void +sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p); + +/** + * gdb_regs_to_pt_regs - Convert GDB regs to ptrace regs. + * @gdb_regs: A pointer to hold the registers we've received from GDB. + * @regs: A pointer to a &struct pt_regs to hold these values in. + * + * Convert the GDB regs in @gdb_regs into the pt_regs, and store them + * in @regs. + */ +extern void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs); + +/** + * kgdb_arch_handle_exception - Handle architecture specific GDB packets. + * @vector: The error vector of the exception that happened. + * @signo: The signal number of the exception that happened. + * @err_code: The error code of the exception that happened. + * @remcom_in_buffer: The buffer of the packet we have read. + * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into. + * @regs: The &struct pt_regs of the current process. + * + * This function MUST handle the 'c' and 's' command packets, + * as well packets to set / remove a hardware breakpoint, if used. + * If there are additional packets which the hardware needs to handle, + * they are handled here. The code should return -1 if it wants to + * process more packets, and a %0 or %1 if it wants to exit from the + * kgdb callback. + */ +extern int +kgdb_arch_handle_exception(int vector, int signo, int err_code, + char *remcom_in_buffer, + char *remcom_out_buffer, + struct pt_regs *regs); + +/** + * kgdb_roundup_cpus - Get other CPUs into a holding pattern + * @flags: Current IRQ state + * + * On SMP systems, we need to get the attention of the other CPUs + * and get them into a known state. This should do what is needed + * to get the other CPUs to call kgdb_wait(). Note that on some arches, + * the NMI approach is not used for rounding up all the CPUs. For example, + * in case of MIPS, smp_call_function() is used to roundup CPUs. In + * this case, we have to make sure that interrupts are enabled before + * calling smp_call_function(). The argument to this function is + * the flags that will be used when restoring the interrupts. There is + * local_irq_save() call before kgdb_roundup_cpus(). + * + * On non-SMP systems, this is not called. + */ +extern void kgdb_roundup_cpus(unsigned long flags); + +/** + * kgdb_arch_set_pc - Generic call back to the program counter + * @regs: Current &struct pt_regs. + * @pc: The new value for the program counter + * + * This function handles updating the program counter and requires an + * architecture specific implementation. + */ +extern void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc); + + +/* Optional functions. */ +extern int kgdb_validate_break_address(unsigned long addr); +extern int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt); +extern int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt); + +/** + * kgdb_arch_late - Perform any architecture specific initalization. + * + * This function will handle the late initalization of any + * architecture specific callbacks. This is an optional function for + * handling things like late initialization of hw breakpoints. The + * default implementation does nothing. + */ +extern void kgdb_arch_late(void); + + +/** + * struct kgdb_arch - Describe architecture specific values. + * @gdb_bpt_instr: The instruction to trigger a breakpoint. + * @flags: Flags for the breakpoint, currently just %KGDB_HW_BREAKPOINT. + * @set_breakpoint: Allow an architecture to specify how to set a software + * breakpoint. + * @remove_breakpoint: Allow an architecture to specify how to remove a + * software breakpoint. + * @set_hw_breakpoint: Allow an architecture to specify how to set a hardware + * breakpoint. + * @remove_hw_breakpoint: Allow an architecture to specify how to remove a + * hardware breakpoint. + * @disable_hw_break: Allow an architecture to specify how to disable + * hardware breakpoints for a single cpu. + * @remove_all_hw_break: Allow an architecture to specify how to remove all + * hardware breakpoints. + * @correct_hw_break: Allow an architecture to specify how to correct the + * hardware debug registers. + * @enable_nmi: Manage NMI-triggered entry to KGDB + */ +struct kgdb_arch { + unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE]; + unsigned long flags; + + int (*set_breakpoint)(unsigned long, char *); + int (*remove_breakpoint)(unsigned long, char *); + int (*set_hw_breakpoint)(unsigned long, int, enum kgdb_bptype); + int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype); + void (*disable_hw_break)(struct pt_regs *regs); + void (*remove_all_hw_break)(void); + void (*correct_hw_break)(void); + + void (*enable_nmi)(bool on); +}; + +/** + * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB. + * @name: Name of the I/O driver. + * @read_char: Pointer to a function that will return one char. + * @write_char: Pointer to a function that will write one char. + * @flush: Pointer to a function that will flush any pending writes. + * @init: Pointer to a function that will initialize the device. + * @pre_exception: Pointer to a function that will do any prep work for + * the I/O driver. + * @post_exception: Pointer to a function that will do any cleanup work + * for the I/O driver. + * @is_console: 1 if the end device is a console 0 if the I/O device is + * not a console + */ +struct kgdb_io { + const char *name; + int (*read_char) (void); + void (*write_char) (u8); + void (*flush) (void); + int (*init) (void); + void (*pre_exception) (void); + void (*post_exception) (void); + int is_console; +}; + +extern struct kgdb_arch arch_kgdb_ops; + +extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs); + +#ifdef CONFIG_SERIAL_KGDB_NMI +extern int kgdb_register_nmi_console(void); +extern int kgdb_unregister_nmi_console(void); +extern bool kgdb_nmi_poll_knock(void); +#else +static inline int kgdb_register_nmi_console(void) { return 0; } +static inline int kgdb_unregister_nmi_console(void) { return 0; } +static inline bool kgdb_nmi_poll_knock(void) { return 1; } +#endif + +extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops); +extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops); +extern struct kgdb_io *dbg_io_ops; + +extern int kgdb_hex2long(char **ptr, unsigned long *long_val); +extern char *kgdb_mem2hex(char *mem, char *buf, int count); +extern int kgdb_hex2mem(char *buf, char *mem, int count); + +extern int kgdb_isremovedbreak(unsigned long addr); +extern void kgdb_schedule_breakpoint(void); + +extern int +kgdb_handle_exception(int ex_vector, int signo, int err_code, + struct pt_regs *regs); +extern int kgdb_nmicallback(int cpu, void *regs); +extern int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code, + atomic_t *snd_rdy); +extern void gdbstub_exit(int status); + +extern int kgdb_single_step; +extern atomic_t kgdb_active; +#define in_dbg_master() \ + (irqs_disabled() && (smp_processor_id() == atomic_read(&kgdb_active))) +extern bool dbg_is_early; +extern void __init dbg_late_init(void); +#else /* ! CONFIG_KGDB */ +#define in_dbg_master() (0) +#define dbg_late_init() +#endif /* ! CONFIG_KGDB */ +#endif /* _KGDB_H_ */ diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h new file mode 100644 index 000000000..dc9a2eecc --- /dev/null +++ b/include/linux/khugepaged.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_KHUGEPAGED_H +#define _LINUX_KHUGEPAGED_H + +#include /* MMF_VM_HUGEPAGE */ + + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern struct attribute_group khugepaged_attr_group; + +extern int khugepaged_init(void); +extern void khugepaged_destroy(void); +extern int start_stop_khugepaged(void); +extern int __khugepaged_enter(struct mm_struct *mm); +extern void __khugepaged_exit(struct mm_struct *mm); +extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma, + unsigned long vm_flags); +extern void khugepaged_min_free_kbytes_update(void); + +#define khugepaged_enabled() \ + (transparent_hugepage_flags & \ + ((1<flags)) + return __khugepaged_enter(mm); + return 0; +} + +static inline void khugepaged_exit(struct mm_struct *mm) +{ + if (test_bit(MMF_VM_HUGEPAGE, &mm->flags)) + __khugepaged_exit(mm); +} + +static inline int khugepaged_enter(struct vm_area_struct *vma, + unsigned long vm_flags) +{ + if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) + if ((khugepaged_always() || + (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) && + !(vm_flags & VM_NOHUGEPAGE) && + !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) + if (__khugepaged_enter(vma->vm_mm)) + return -ENOMEM; + return 0; +} +#else /* CONFIG_TRANSPARENT_HUGEPAGE */ +static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) +{ + return 0; +} +static inline void khugepaged_exit(struct mm_struct *mm) +{ +} +static inline int khugepaged_enter(struct vm_area_struct *vma, + unsigned long vm_flags) +{ + return 0; +} +static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma, + unsigned long vm_flags) +{ + return 0; +} + +static inline void khugepaged_min_free_kbytes_update(void) +{ +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#endif /* _LINUX_KHUGEPAGED_H */ diff --git a/include/linux/klist.h b/include/linux/klist.h new file mode 100644 index 000000000..953f283f8 --- /dev/null +++ b/include/linux/klist.h @@ -0,0 +1,69 @@ +/* + * klist.h - Some generic list helpers, extending struct list_head a bit. + * + * Implementations are found in lib/klist.c + * + * + * Copyright (C) 2005 Patrick Mochel + * + * This file is rleased under the GPL v2. + */ + +#ifndef _LINUX_KLIST_H +#define _LINUX_KLIST_H + +#include +#include +#include + +struct klist_node; +struct klist { + spinlock_t k_lock; + struct list_head k_list; + void (*get)(struct klist_node *); + void (*put)(struct klist_node *); +} __attribute__ ((aligned (sizeof(void *)))); + +#define KLIST_INIT(_name, _get, _put) \ + { .k_lock = __SPIN_LOCK_UNLOCKED(_name.k_lock), \ + .k_list = LIST_HEAD_INIT(_name.k_list), \ + .get = _get, \ + .put = _put, } + +#define DEFINE_KLIST(_name, _get, _put) \ + struct klist _name = KLIST_INIT(_name, _get, _put) + +extern void klist_init(struct klist *k, void (*get)(struct klist_node *), + void (*put)(struct klist_node *)); + +struct klist_node { + void *n_klist; /* never access directly */ + struct list_head n_node; + struct kref n_ref; +}; + +extern void klist_add_tail(struct klist_node *n, struct klist *k); +extern void klist_add_head(struct klist_node *n, struct klist *k); +extern void klist_add_behind(struct klist_node *n, struct klist_node *pos); +extern void klist_add_before(struct klist_node *n, struct klist_node *pos); + +extern void klist_del(struct klist_node *n); +extern void klist_remove(struct klist_node *n); + +extern int klist_node_attached(struct klist_node *n); + + +struct klist_iter { + struct klist *i_klist; + struct klist_node *i_cur; +}; + + +extern void klist_iter_init(struct klist *k, struct klist_iter *i); +extern void klist_iter_init_node(struct klist *k, struct klist_iter *i, + struct klist_node *n); +extern void klist_iter_exit(struct klist_iter *i); +extern struct klist_node *klist_prev(struct klist_iter *i); +extern struct klist_node *klist_next(struct klist_iter *i); + +#endif diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h new file mode 100644 index 000000000..5ac416e2d --- /dev/null +++ b/include/linux/kmemleak.h @@ -0,0 +1,137 @@ +/* + * include/linux/kmemleak.h + * + * Copyright (C) 2008 ARM Limited + * Written by Catalin Marinas + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __KMEMLEAK_H +#define __KMEMLEAK_H + +#include +#include + +#ifdef CONFIG_DEBUG_KMEMLEAK + +extern void kmemleak_init(void) __init; +extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, + gfp_t gfp) __ref; +extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size, + gfp_t gfp) __ref; +extern void kmemleak_vmalloc(const struct vm_struct *area, size_t size, + gfp_t gfp) __ref; +extern void kmemleak_free(const void *ptr) __ref; +extern void kmemleak_free_part(const void *ptr, size_t size) __ref; +extern void kmemleak_free_percpu(const void __percpu *ptr) __ref; +extern void kmemleak_update_trace(const void *ptr) __ref; +extern void kmemleak_not_leak(const void *ptr) __ref; +extern void kmemleak_ignore(const void *ptr) __ref; +extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref; +extern void kmemleak_no_scan(const void *ptr) __ref; +extern void kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count, + gfp_t gfp) __ref; +extern void kmemleak_free_part_phys(phys_addr_t phys, size_t size) __ref; +extern void kmemleak_not_leak_phys(phys_addr_t phys) __ref; +extern void kmemleak_ignore_phys(phys_addr_t phys) __ref; + +static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, + int min_count, slab_flags_t flags, + gfp_t gfp) +{ + if (!(flags & SLAB_NOLEAKTRACE)) + kmemleak_alloc(ptr, size, min_count, gfp); +} + +static inline void kmemleak_free_recursive(const void *ptr, slab_flags_t flags) +{ + if (!(flags & SLAB_NOLEAKTRACE)) + kmemleak_free(ptr); +} + +static inline void kmemleak_erase(void **ptr) +{ + *ptr = NULL; +} + +#else + +static inline void kmemleak_init(void) +{ +} +static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count, + gfp_t gfp) +{ +} +static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, + int min_count, slab_flags_t flags, + gfp_t gfp) +{ +} +static inline void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size, + gfp_t gfp) +{ +} +static inline void kmemleak_vmalloc(const struct vm_struct *area, size_t size, + gfp_t gfp) +{ +} +static inline void kmemleak_free(const void *ptr) +{ +} +static inline void kmemleak_free_part(const void *ptr, size_t size) +{ +} +static inline void kmemleak_free_recursive(const void *ptr, slab_flags_t flags) +{ +} +static inline void kmemleak_free_percpu(const void __percpu *ptr) +{ +} +static inline void kmemleak_update_trace(const void *ptr) +{ +} +static inline void kmemleak_not_leak(const void *ptr) +{ +} +static inline void kmemleak_ignore(const void *ptr) +{ +} +static inline void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) +{ +} +static inline void kmemleak_erase(void **ptr) +{ +} +static inline void kmemleak_no_scan(const void *ptr) +{ +} +static inline void kmemleak_alloc_phys(phys_addr_t phys, size_t size, + int min_count, gfp_t gfp) +{ +} +static inline void kmemleak_free_part_phys(phys_addr_t phys, size_t size) +{ +} +static inline void kmemleak_not_leak_phys(phys_addr_t phys) +{ +} +static inline void kmemleak_ignore_phys(phys_addr_t phys) +{ +} + +#endif /* CONFIG_DEBUG_KMEMLEAK */ + +#endif /* __KMEMLEAK_H */ diff --git a/include/linux/kmod.h b/include/linux/kmod.h new file mode 100644 index 000000000..40c89ad4b --- /dev/null +++ b/include/linux/kmod.h @@ -0,0 +1,48 @@ +#ifndef __LINUX_KMOD_H__ +#define __LINUX_KMOD_H__ + +/* + * include/linux/kmod.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include + +#define KMOD_PATH_LEN 256 + +#ifdef CONFIG_MODULES +extern char modprobe_path[]; /* for sysctl */ +/* modprobe exit status on success, -ve on error. Return value + * usually useless though. */ +extern __printf(2, 3) +int __request_module(bool wait, const char *name, ...); +#define request_module(mod...) __request_module(true, mod) +#define request_module_nowait(mod...) __request_module(false, mod) +#define try_then_request_module(x, mod...) \ + ((x) ?: (__request_module(true, mod), (x))) +#else +static inline int request_module(const char *name, ...) { return -ENOSYS; } +static inline int request_module_nowait(const char *name, ...) { return -ENOSYS; } +#define try_then_request_module(x, mod...) (x) +#endif + +#endif /* __LINUX_KMOD_H__ */ diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h new file mode 100644 index 000000000..2e7a1e032 --- /dev/null +++ b/include/linux/kmsg_dump.h @@ -0,0 +1,117 @@ +/* + * linux/include/kmsg_dump.h + * + * Copyright (C) 2009 Net Insight AB + * + * Author: Simon Kagstrom + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ +#ifndef _LINUX_KMSG_DUMP_H +#define _LINUX_KMSG_DUMP_H + +#include +#include + +/* + * Keep this list arranged in rough order of priority. Anything listed after + * KMSG_DUMP_OOPS will not be logged by default unless printk.always_kmsg_dump + * is passed to the kernel. + */ +enum kmsg_dump_reason { + KMSG_DUMP_UNDEF, + KMSG_DUMP_PANIC, + KMSG_DUMP_OOPS, + KMSG_DUMP_EMERG, + KMSG_DUMP_RESTART, + KMSG_DUMP_HALT, + KMSG_DUMP_POWEROFF, +}; + +/** + * struct kmsg_dumper - kernel crash message dumper structure + * @list: Entry in the dumper list (private) + * @dump: Call into dumping code which will retrieve the data with + * through the record iterator + * @max_reason: filter for highest reason number that should be dumped + * @registered: Flag that specifies if this is already registered + */ +struct kmsg_dumper { + struct list_head list; + void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason); + enum kmsg_dump_reason max_reason; + bool active; + bool registered; + + /* private state of the kmsg iterator */ + u32 cur_idx; + u32 next_idx; + u64 cur_seq; + u64 next_seq; +}; + +#ifdef CONFIG_PRINTK +void kmsg_dump(enum kmsg_dump_reason reason); + +bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog, + char *line, size_t size, size_t *len); + +bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog, + char *line, size_t size, size_t *len); + +bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, + char *buf, size_t size, size_t *len); + +void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper); + +void kmsg_dump_rewind(struct kmsg_dumper *dumper); + +int kmsg_dump_register(struct kmsg_dumper *dumper); + +int kmsg_dump_unregister(struct kmsg_dumper *dumper); +#else +static inline void kmsg_dump(enum kmsg_dump_reason reason) +{ +} + +static inline bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, + bool syslog, const char *line, + size_t size, size_t *len) +{ + return false; +} + +static inline bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog, + const char *line, size_t size, size_t *len) +{ + return false; +} + +static inline bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, + char *buf, size_t size, size_t *len) +{ + return false; +} + +static inline void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper) +{ +} + +static inline void kmsg_dump_rewind(struct kmsg_dumper *dumper) +{ +} + +static inline int kmsg_dump_register(struct kmsg_dumper *dumper) +{ + return -EINVAL; +} + +static inline int kmsg_dump_unregister(struct kmsg_dumper *dumper) +{ + return -EINVAL; +} +#endif + +#endif /* _LINUX_KMSG_DUMP_H */ diff --git a/include/linux/kobj_map.h b/include/linux/kobj_map.h new file mode 100644 index 000000000..c9919f8b2 --- /dev/null +++ b/include/linux/kobj_map.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * kobj_map.h + */ + +#ifndef _KOBJ_MAP_H_ +#define _KOBJ_MAP_H_ + +#include + +typedef struct kobject *kobj_probe_t(dev_t, int *, void *); +struct kobj_map; + +int kobj_map(struct kobj_map *, dev_t, unsigned long, struct module *, + kobj_probe_t *, int (*)(dev_t, void *), void *); +void kobj_unmap(struct kobj_map *, dev_t, unsigned long); +struct kobject *kobj_lookup(struct kobj_map *, dev_t, int *); +struct kobj_map *kobj_map_init(kobj_probe_t *, struct mutex *); + +#endif /* _KOBJ_MAP_H_ */ diff --git a/include/linux/kobject.h b/include/linux/kobject.h new file mode 100644 index 000000000..1ab0d624f --- /dev/null +++ b/include/linux/kobject.h @@ -0,0 +1,247 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * kobject.h - generic kernel object infrastructure. + * + * Copyright (c) 2002-2003 Patrick Mochel + * Copyright (c) 2002-2003 Open Source Development Labs + * Copyright (c) 2006-2008 Greg Kroah-Hartman + * Copyright (c) 2006-2008 Novell Inc. + * + * Please read Documentation/kobject.txt before using the kobject + * interface, ESPECIALLY the parts about reference counts and object + * destructors. + */ + +#ifndef _KOBJECT_H_ +#define _KOBJECT_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define UEVENT_HELPER_PATH_LEN 256 +#define UEVENT_NUM_ENVP 32 /* number of env pointers */ +#define UEVENT_BUFFER_SIZE 2048 /* buffer for the variables */ + +#ifdef CONFIG_UEVENT_HELPER +/* path to the userspace helper executed on an event */ +extern char uevent_helper[]; +#endif + +/* counter to tag the uevent, read only except for the kobject core */ +extern u64 uevent_seqnum; + +/* + * The actions here must match the index to the string array + * in lib/kobject_uevent.c + * + * Do not add new actions here without checking with the driver-core + * maintainers. Action strings are not meant to express subsystem + * or device specific properties. In most cases you want to send a + * kobject_uevent_env(kobj, KOBJ_CHANGE, env) with additional event + * specific variables added to the event environment. + */ +enum kobject_action { + KOBJ_ADD, + KOBJ_REMOVE, + KOBJ_CHANGE, + KOBJ_MOVE, + KOBJ_ONLINE, + KOBJ_OFFLINE, + KOBJ_BIND, + KOBJ_UNBIND, + KOBJ_MAX +}; + +struct kobject { + const char *name; + struct list_head entry; + struct kobject *parent; + struct kset *kset; + struct kobj_type *ktype; + struct kernfs_node *sd; /* sysfs directory entry */ + struct kref kref; +#ifdef CONFIG_DEBUG_KOBJECT_RELEASE + struct delayed_work release; +#endif + unsigned int state_initialized:1; + unsigned int state_in_sysfs:1; + unsigned int state_add_uevent_sent:1; + unsigned int state_remove_uevent_sent:1; + unsigned int uevent_suppress:1; +}; + +extern __printf(2, 3) +int kobject_set_name(struct kobject *kobj, const char *name, ...); +extern __printf(2, 0) +int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, + va_list vargs); + +static inline const char *kobject_name(const struct kobject *kobj) +{ + return kobj->name; +} + +extern void kobject_init(struct kobject *kobj, struct kobj_type *ktype); +extern __printf(3, 4) __must_check +int kobject_add(struct kobject *kobj, struct kobject *parent, + const char *fmt, ...); +extern __printf(4, 5) __must_check +int kobject_init_and_add(struct kobject *kobj, + struct kobj_type *ktype, struct kobject *parent, + const char *fmt, ...); + +extern void kobject_del(struct kobject *kobj); + +extern struct kobject * __must_check kobject_create(void); +extern struct kobject * __must_check kobject_create_and_add(const char *name, + struct kobject *parent); + +extern int __must_check kobject_rename(struct kobject *, const char *new_name); +extern int __must_check kobject_move(struct kobject *, struct kobject *); + +extern struct kobject *kobject_get(struct kobject *kobj); +extern struct kobject * __must_check kobject_get_unless_zero( + struct kobject *kobj); +extern void kobject_put(struct kobject *kobj); + +extern const void *kobject_namespace(struct kobject *kobj); +extern void kobject_get_ownership(struct kobject *kobj, + kuid_t *uid, kgid_t *gid); +extern char *kobject_get_path(struct kobject *kobj, gfp_t flag); + +/** + * kobject_has_children - Returns whether a kobject has children. + * @kobj: the object to test + * + * This will return whether a kobject has other kobjects as children. + * + * It does NOT account for the presence of attribute files, only sub + * directories. It also assumes there is no concurrent addition or + * removal of such children, and thus relies on external locking. + */ +static inline bool kobject_has_children(struct kobject *kobj) +{ + WARN_ON_ONCE(kref_read(&kobj->kref) == 0); + + return kobj->sd && kobj->sd->dir.subdirs; +} + +struct kobj_type { + void (*release)(struct kobject *kobj); + const struct sysfs_ops *sysfs_ops; + struct attribute **default_attrs; + const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj); + const void *(*namespace)(struct kobject *kobj); + void (*get_ownership)(struct kobject *kobj, kuid_t *uid, kgid_t *gid); +}; + +struct kobj_uevent_env { + char *argv[3]; + char *envp[UEVENT_NUM_ENVP]; + int envp_idx; + char buf[UEVENT_BUFFER_SIZE]; + int buflen; +}; + +struct kset_uevent_ops { + int (* const filter)(struct kset *kset, struct kobject *kobj); + const char *(* const name)(struct kset *kset, struct kobject *kobj); + int (* const uevent)(struct kset *kset, struct kobject *kobj, + struct kobj_uevent_env *env); +}; + +struct kobj_attribute { + struct attribute attr; + ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr, + char *buf); + ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count); +}; + +extern const struct sysfs_ops kobj_sysfs_ops; + +struct sock; + +/** + * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem. + * + * A kset defines a group of kobjects. They can be individually + * different "types" but overall these kobjects all want to be grouped + * together and operated on in the same manner. ksets are used to + * define the attribute callbacks and other common events that happen to + * a kobject. + * + * @list: the list of all kobjects for this kset + * @list_lock: a lock for iterating over the kobjects + * @kobj: the embedded kobject for this kset (recursion, isn't it fun...) + * @uevent_ops: the set of uevent operations for this kset. These are + * called whenever a kobject has something happen to it so that the kset + * can add new environment variables, or filter out the uevents if so + * desired. + */ +struct kset { + struct list_head list; + spinlock_t list_lock; + struct kobject kobj; + const struct kset_uevent_ops *uevent_ops; +} __randomize_layout; + +extern void kset_init(struct kset *kset); +extern int __must_check kset_register(struct kset *kset); +extern void kset_unregister(struct kset *kset); +extern struct kset * __must_check kset_create_and_add(const char *name, + const struct kset_uevent_ops *u, + struct kobject *parent_kobj); + +static inline struct kset *to_kset(struct kobject *kobj) +{ + return kobj ? container_of(kobj, struct kset, kobj) : NULL; +} + +static inline struct kset *kset_get(struct kset *k) +{ + return k ? to_kset(kobject_get(&k->kobj)) : NULL; +} + +static inline void kset_put(struct kset *k) +{ + kobject_put(&k->kobj); +} + +static inline struct kobj_type *get_ktype(struct kobject *kobj) +{ + return kobj->ktype; +} + +extern struct kobject *kset_find_obj(struct kset *, const char *); + +/* The global /sys/kernel/ kobject for people to chain off of */ +extern struct kobject *kernel_kobj; +/* The global /sys/kernel/mm/ kobject for people to chain off of */ +extern struct kobject *mm_kobj; +/* The global /sys/hypervisor/ kobject for people to chain off of */ +extern struct kobject *hypervisor_kobj; +/* The global /sys/power/ kobject for people to chain off of */ +extern struct kobject *power_kobj; +/* The global /sys/firmware/ kobject for people to chain off of */ +extern struct kobject *firmware_kobj; + +int kobject_uevent(struct kobject *kobj, enum kobject_action action); +int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, + char *envp[]); +int kobject_synth_uevent(struct kobject *kobj, const char *buf, size_t count); + +__printf(2, 3) +int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...); + +#endif /* _KOBJECT_H_ */ diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h new file mode 100644 index 000000000..069aa2ebe --- /dev/null +++ b/include/linux/kobject_ns.h @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Kernel object name space definitions + * + * Copyright (c) 2002-2003 Patrick Mochel + * Copyright (c) 2002-2003 Open Source Development Labs + * Copyright (c) 2006-2008 Greg Kroah-Hartman + * Copyright (c) 2006-2008 Novell Inc. + * + * Split from kobject.h by David Howells (dhowells@redhat.com) + * + * Please read Documentation/kobject.txt before using the kobject + * interface, ESPECIALLY the parts about reference counts and object + * destructors. + */ + +#ifndef _LINUX_KOBJECT_NS_H +#define _LINUX_KOBJECT_NS_H + +struct sock; +struct kobject; + +/* + * Namespace types which are used to tag kobjects and sysfs entries. + * Network namespace will likely be the first. + */ +enum kobj_ns_type { + KOBJ_NS_TYPE_NONE = 0, + KOBJ_NS_TYPE_NET, + KOBJ_NS_TYPES +}; + +/* + * Callbacks so sysfs can determine namespaces + * @grab_current_ns: return a new reference to calling task's namespace + * @netlink_ns: return namespace to which a sock belongs (right?) + * @initial_ns: return the initial namespace (i.e. init_net_ns) + * @drop_ns: drops a reference to namespace + */ +struct kobj_ns_type_operations { + enum kobj_ns_type type; + bool (*current_may_mount)(void); + void *(*grab_current_ns)(void); + const void *(*netlink_ns)(struct sock *sk); + const void *(*initial_ns)(void); + void (*drop_ns)(void *); +}; + +int kobj_ns_type_register(const struct kobj_ns_type_operations *ops); +int kobj_ns_type_registered(enum kobj_ns_type type); +const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent); +const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj); + +bool kobj_ns_current_may_mount(enum kobj_ns_type type); +void *kobj_ns_grab_current(enum kobj_ns_type type); +const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk); +const void *kobj_ns_initial(enum kobj_ns_type type); +void kobj_ns_drop(enum kobj_ns_type type, void *ns); + +#endif /* _LINUX_KOBJECT_NS_H */ diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h new file mode 100644 index 000000000..304e7a0f6 --- /dev/null +++ b/include/linux/kprobes.h @@ -0,0 +1,472 @@ +#ifndef _LINUX_KPROBES_H +#define _LINUX_KPROBES_H +/* + * Kernel Probes (KProbes) + * include/linux/kprobes.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2002, 2004 + * + * 2002-Oct Created by Vamsi Krishna S Kernel + * Probes initial implementation ( includes suggestions from + * Rusty Russell). + * 2004-July Suparna Bhattacharya added jumper probes + * interface to access function arguments. + * 2005-May Hien Nguyen and Jim Keniston + * and Prasanna S Panchamukhi + * added function-return probes. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_KPROBES + +/* kprobe_status settings */ +#define KPROBE_HIT_ACTIVE 0x00000001 +#define KPROBE_HIT_SS 0x00000002 +#define KPROBE_REENTER 0x00000004 +#define KPROBE_HIT_SSDONE 0x00000008 + +#else /* CONFIG_KPROBES */ +#include +typedef int kprobe_opcode_t; +struct arch_specific_insn { + int dummy; +}; +#endif /* CONFIG_KPROBES */ + +struct kprobe; +struct pt_regs; +struct kretprobe; +struct kretprobe_instance; +typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *); +typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *, + unsigned long flags); +typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *, + int trapnr); +typedef int (*kretprobe_handler_t) (struct kretprobe_instance *, + struct pt_regs *); + +struct kprobe { + struct hlist_node hlist; + + /* list of kprobes for multi-handler support */ + struct list_head list; + + /*count the number of times this probe was temporarily disarmed */ + unsigned long nmissed; + + /* location of the probe point */ + kprobe_opcode_t *addr; + + /* Allow user to indicate symbol name of the probe point */ + const char *symbol_name; + + /* Offset into the symbol */ + unsigned int offset; + + /* Called before addr is executed. */ + kprobe_pre_handler_t pre_handler; + + /* Called after addr is executed, unless... */ + kprobe_post_handler_t post_handler; + + /* + * ... called if executing addr causes a fault (eg. page fault). + * Return 1 if it handled fault, otherwise kernel will see it. + */ + kprobe_fault_handler_t fault_handler; + + /* Saved opcode (which has been replaced with breakpoint) */ + kprobe_opcode_t opcode; + + /* copy of the original instruction */ + struct arch_specific_insn ainsn; + + /* + * Indicates various status flags. + * Protected by kprobe_mutex after this kprobe is registered. + */ + u32 flags; +}; + +/* Kprobe status flags */ +#define KPROBE_FLAG_GONE 1 /* breakpoint has already gone */ +#define KPROBE_FLAG_DISABLED 2 /* probe is temporarily disabled */ +#define KPROBE_FLAG_OPTIMIZED 4 /* + * probe is really optimized. + * NOTE: + * this flag is only for optimized_kprobe. + */ +#define KPROBE_FLAG_FTRACE 8 /* probe is using ftrace */ + +/* Has this kprobe gone ? */ +static inline int kprobe_gone(struct kprobe *p) +{ + return p->flags & KPROBE_FLAG_GONE; +} + +/* Is this kprobe disabled ? */ +static inline int kprobe_disabled(struct kprobe *p) +{ + return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE); +} + +/* Is this kprobe really running optimized path ? */ +static inline int kprobe_optimized(struct kprobe *p) +{ + return p->flags & KPROBE_FLAG_OPTIMIZED; +} + +/* Is this kprobe uses ftrace ? */ +static inline int kprobe_ftrace(struct kprobe *p) +{ + return p->flags & KPROBE_FLAG_FTRACE; +} + +/* + * Function-return probe - + * Note: + * User needs to provide a handler function, and initialize maxactive. + * maxactive - The maximum number of instances of the probed function that + * can be active concurrently. + * nmissed - tracks the number of times the probed function's return was + * ignored, due to maxactive being too low. + * + */ +struct kretprobe { + struct kprobe kp; + kretprobe_handler_t handler; + kretprobe_handler_t entry_handler; + int maxactive; + int nmissed; + size_t data_size; + struct hlist_head free_instances; + raw_spinlock_t lock; +}; + +#define KRETPROBE_MAX_DATA_SIZE 4096 + +struct kretprobe_instance { + struct hlist_node hlist; + struct kretprobe *rp; + kprobe_opcode_t *ret_addr; + struct task_struct *task; + void *fp; + char data[0]; +}; + +struct kretprobe_blackpoint { + const char *name; + void *addr; +}; + +struct kprobe_blacklist_entry { + struct list_head list; + unsigned long start_addr; + unsigned long end_addr; +}; + +#ifdef CONFIG_KPROBES +DECLARE_PER_CPU(struct kprobe *, current_kprobe); +DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); + +/* + * For #ifdef avoidance: + */ +static inline int kprobes_built_in(void) +{ + return 1; +} + +#ifdef CONFIG_KRETPROBES +extern void arch_prepare_kretprobe(struct kretprobe_instance *ri, + struct pt_regs *regs); +extern int arch_trampoline_kprobe(struct kprobe *p); +#else /* CONFIG_KRETPROBES */ +static inline void arch_prepare_kretprobe(struct kretprobe *rp, + struct pt_regs *regs) +{ +} +static inline int arch_trampoline_kprobe(struct kprobe *p) +{ + return 0; +} +#endif /* CONFIG_KRETPROBES */ + +extern struct kretprobe_blackpoint kretprobe_blacklist[]; + +static inline void kretprobe_assert(struct kretprobe_instance *ri, + unsigned long orig_ret_address, unsigned long trampoline_address) +{ + if (!orig_ret_address || (orig_ret_address == trampoline_address)) { + printk("kretprobe BUG!: Processing kretprobe %p @ %p\n", + ri->rp, ri->rp->kp.addr); + BUG(); + } +} + +#ifdef CONFIG_KPROBES_SANITY_TEST +extern int init_test_probes(void); +#else +static inline int init_test_probes(void) +{ + return 0; +} +#endif /* CONFIG_KPROBES_SANITY_TEST */ + +extern int arch_prepare_kprobe(struct kprobe *p); +extern void arch_arm_kprobe(struct kprobe *p); +extern void arch_disarm_kprobe(struct kprobe *p); +extern int arch_init_kprobes(void); +extern void show_registers(struct pt_regs *regs); +extern void kprobes_inc_nmissed_count(struct kprobe *p); +extern bool arch_within_kprobe_blacklist(unsigned long addr); +extern int arch_populate_kprobe_blacklist(void); +extern bool arch_kprobe_on_func_entry(unsigned long offset); +extern int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset); + +extern bool within_kprobe_blacklist(unsigned long addr); +extern int kprobe_add_ksym_blacklist(unsigned long entry); +extern int kprobe_add_area_blacklist(unsigned long start, unsigned long end); + +struct kprobe_insn_cache { + struct mutex mutex; + void *(*alloc)(void); /* allocate insn page */ + void (*free)(void *); /* free insn page */ + struct list_head pages; /* list of kprobe_insn_page */ + size_t insn_size; /* size of instruction slot */ + int nr_garbage; +}; + +#ifdef __ARCH_WANT_KPROBES_INSN_SLOT +extern kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c); +extern void __free_insn_slot(struct kprobe_insn_cache *c, + kprobe_opcode_t *slot, int dirty); +/* sleep-less address checking routine */ +extern bool __is_insn_slot_addr(struct kprobe_insn_cache *c, + unsigned long addr); + +#define DEFINE_INSN_CACHE_OPS(__name) \ +extern struct kprobe_insn_cache kprobe_##__name##_slots; \ + \ +static inline kprobe_opcode_t *get_##__name##_slot(void) \ +{ \ + return __get_insn_slot(&kprobe_##__name##_slots); \ +} \ + \ +static inline void free_##__name##_slot(kprobe_opcode_t *slot, int dirty)\ +{ \ + __free_insn_slot(&kprobe_##__name##_slots, slot, dirty); \ +} \ + \ +static inline bool is_kprobe_##__name##_slot(unsigned long addr) \ +{ \ + return __is_insn_slot_addr(&kprobe_##__name##_slots, addr); \ +} +#else /* __ARCH_WANT_KPROBES_INSN_SLOT */ +#define DEFINE_INSN_CACHE_OPS(__name) \ +static inline bool is_kprobe_##__name##_slot(unsigned long addr) \ +{ \ + return 0; \ +} +#endif + +DEFINE_INSN_CACHE_OPS(insn); + +#ifdef CONFIG_OPTPROBES +/* + * Internal structure for direct jump optimized probe + */ +struct optimized_kprobe { + struct kprobe kp; + struct list_head list; /* list for optimizing queue */ + struct arch_optimized_insn optinsn; +}; + +/* Architecture dependent functions for direct jump optimization */ +extern int arch_prepared_optinsn(struct arch_optimized_insn *optinsn); +extern int arch_check_optimized_kprobe(struct optimized_kprobe *op); +extern int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, + struct kprobe *orig); +extern void arch_remove_optimized_kprobe(struct optimized_kprobe *op); +extern void arch_optimize_kprobes(struct list_head *oplist); +extern void arch_unoptimize_kprobes(struct list_head *oplist, + struct list_head *done_list); +extern void arch_unoptimize_kprobe(struct optimized_kprobe *op); +extern int arch_within_optimized_kprobe(struct optimized_kprobe *op, + unsigned long addr); + +extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs); + +DEFINE_INSN_CACHE_OPS(optinsn); + +#ifdef CONFIG_SYSCTL +extern int sysctl_kprobes_optimization; +extern int proc_kprobes_optimization_handler(struct ctl_table *table, + int write, void __user *buffer, + size_t *length, loff_t *ppos); +#endif +extern void wait_for_kprobe_optimizer(void); +#else +static inline void wait_for_kprobe_optimizer(void) { } +#endif /* CONFIG_OPTPROBES */ +#ifdef CONFIG_KPROBES_ON_FTRACE +extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ops, struct pt_regs *regs); +extern int arch_prepare_kprobe_ftrace(struct kprobe *p); +#endif + +int arch_check_ftrace_location(struct kprobe *p); + +/* Get the kprobe at this addr (if any) - called with preemption disabled */ +struct kprobe *get_kprobe(void *addr); +void kretprobe_hash_lock(struct task_struct *tsk, + struct hlist_head **head, unsigned long *flags); +void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags); +struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk); + +/* kprobe_running() will just return the current_kprobe on this CPU */ +static inline struct kprobe *kprobe_running(void) +{ + return (__this_cpu_read(current_kprobe)); +} + +static inline void reset_current_kprobe(void) +{ + __this_cpu_write(current_kprobe, NULL); +} + +static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) +{ + return this_cpu_ptr(&kprobe_ctlblk); +} + +extern struct kprobe kprobe_busy; +void kprobe_busy_begin(void); +void kprobe_busy_end(void); + +kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset); +int register_kprobe(struct kprobe *p); +void unregister_kprobe(struct kprobe *p); +int register_kprobes(struct kprobe **kps, int num); +void unregister_kprobes(struct kprobe **kps, int num); +unsigned long arch_deref_entry_point(void *); + +int register_kretprobe(struct kretprobe *rp); +void unregister_kretprobe(struct kretprobe *rp); +int register_kretprobes(struct kretprobe **rps, int num); +void unregister_kretprobes(struct kretprobe **rps, int num); + +void kprobe_flush_task(struct task_struct *tk); +void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); + +int disable_kprobe(struct kprobe *kp); +int enable_kprobe(struct kprobe *kp); + +void dump_kprobe(struct kprobe *kp); + +#else /* !CONFIG_KPROBES: */ + +static inline int kprobes_built_in(void) +{ + return 0; +} +static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) +{ + return 0; +} +static inline struct kprobe *get_kprobe(void *addr) +{ + return NULL; +} +static inline struct kprobe *kprobe_running(void) +{ + return NULL; +} +static inline int register_kprobe(struct kprobe *p) +{ + return -ENOSYS; +} +static inline int register_kprobes(struct kprobe **kps, int num) +{ + return -ENOSYS; +} +static inline void unregister_kprobe(struct kprobe *p) +{ +} +static inline void unregister_kprobes(struct kprobe **kps, int num) +{ +} +static inline int register_kretprobe(struct kretprobe *rp) +{ + return -ENOSYS; +} +static inline int register_kretprobes(struct kretprobe **rps, int num) +{ + return -ENOSYS; +} +static inline void unregister_kretprobe(struct kretprobe *rp) +{ +} +static inline void unregister_kretprobes(struct kretprobe **rps, int num) +{ +} +static inline void kprobe_flush_task(struct task_struct *tk) +{ +} +static inline int disable_kprobe(struct kprobe *kp) +{ + return -ENOSYS; +} +static inline int enable_kprobe(struct kprobe *kp) +{ + return -ENOSYS; +} +#endif /* CONFIG_KPROBES */ +static inline int disable_kretprobe(struct kretprobe *rp) +{ + return disable_kprobe(&rp->kp); +} +static inline int enable_kretprobe(struct kretprobe *rp) +{ + return enable_kprobe(&rp->kp); +} + +#ifndef CONFIG_KPROBES +static inline bool is_kprobe_insn_slot(unsigned long addr) +{ + return false; +} +#endif +#ifndef CONFIG_OPTPROBES +static inline bool is_kprobe_optinsn_slot(unsigned long addr) +{ + return false; +} +#endif + +#endif /* _LINUX_KPROBES_H */ diff --git a/include/linux/kref.h b/include/linux/kref.h new file mode 100644 index 000000000..29220724b --- /dev/null +++ b/include/linux/kref.h @@ -0,0 +1,118 @@ +/* + * kref.h - library routines for handling generic reference counted objects + * + * Copyright (C) 2004 Greg Kroah-Hartman + * Copyright (C) 2004 IBM Corp. + * + * based on kobject.h which was: + * Copyright (C) 2002-2003 Patrick Mochel + * Copyright (C) 2002-2003 Open Source Development Labs + * + * This file is released under the GPLv2. + * + */ + +#ifndef _KREF_H_ +#define _KREF_H_ + +#include +#include + +struct kref { + refcount_t refcount; +}; + +#define KREF_INIT(n) { .refcount = REFCOUNT_INIT(n), } + +/** + * kref_init - initialize object. + * @kref: object in question. + */ +static inline void kref_init(struct kref *kref) +{ + refcount_set(&kref->refcount, 1); +} + +static inline unsigned int kref_read(const struct kref *kref) +{ + return refcount_read(&kref->refcount); +} + +/** + * kref_get - increment refcount for object. + * @kref: object. + */ +static inline void kref_get(struct kref *kref) +{ + refcount_inc(&kref->refcount); +} + +/** + * kref_put - decrement refcount for object. + * @kref: object. + * @release: pointer to the function that will clean up the object when the + * last reference to the object is released. + * This pointer is required, and it is not acceptable to pass kfree + * in as this function. If the caller does pass kfree to this + * function, you will be publicly mocked mercilessly by the kref + * maintainer, and anyone else who happens to notice it. You have + * been warned. + * + * Decrement the refcount, and if 0, call release(). + * Return 1 if the object was removed, otherwise return 0. Beware, if this + * function returns 0, you still can not count on the kref from remaining in + * memory. Only use the return value if you want to see if the kref is now + * gone, not present. + */ +static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)) +{ + if (refcount_dec_and_test(&kref->refcount)) { + release(kref); + return 1; + } + return 0; +} + +static inline int kref_put_mutex(struct kref *kref, + void (*release)(struct kref *kref), + struct mutex *lock) +{ + if (refcount_dec_and_mutex_lock(&kref->refcount, lock)) { + release(kref); + return 1; + } + return 0; +} + +static inline int kref_put_lock(struct kref *kref, + void (*release)(struct kref *kref), + spinlock_t *lock) +{ + if (refcount_dec_and_lock(&kref->refcount, lock)) { + release(kref); + return 1; + } + return 0; +} + +/** + * kref_get_unless_zero - Increment refcount for object unless it is zero. + * @kref: object. + * + * Return non-zero if the increment succeeded. Otherwise return 0. + * + * This function is intended to simplify locking around refcounting for + * objects that can be looked up from a lookup structure, and which are + * removed from that lookup structure in the object destructor. + * Operations on such objects require at least a read lock around + * lookup + kref_get, and a write lock around kref_put + remove from lookup + * structure. Furthermore, RCU implementations become extremely tricky. + * With a lookup followed by a kref_get_unless_zero *with return value check* + * locking in the kref_put path can be deferred to the actual removal from + * the lookup structure and RCU lookups become trivial. + */ +static inline int __must_check kref_get_unless_zero(struct kref *kref) +{ + return refcount_inc_not_zero(&kref->refcount); +} +#endif /* _KREF_H_ */ diff --git a/include/linux/ks0108.h b/include/linux/ks0108.h new file mode 100644 index 000000000..0738389b4 --- /dev/null +++ b/include/linux/ks0108.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Filename: ks0108.h + * Version: 0.1.0 + * Description: ks0108 LCD Controller driver header + * + * Author: Copyright (C) Miguel Ojeda Sandonis + * Date: 2006-10-31 + */ + +#ifndef _KS0108_H_ +#define _KS0108_H_ + +/* Write a byte to the data port */ +extern void ks0108_writedata(unsigned char byte); + +/* Write a byte to the control port */ +extern void ks0108_writecontrol(unsigned char byte); + +/* Set the controller's current display state (0..1) */ +extern void ks0108_displaystate(unsigned char state); + +/* Set the controller's current startline (0..63) */ +extern void ks0108_startline(unsigned char startline); + +/* Set the controller's current address (0..63) */ +extern void ks0108_address(unsigned char address); + +/* Set the controller's current page (0..7) */ +extern void ks0108_page(unsigned char page); + +/* Is the module inited? */ +extern unsigned char ks0108_isinited(void); + +#endif /* _KS0108_H_ */ diff --git a/include/linux/ks8842.h b/include/linux/ks8842.h new file mode 100644 index 000000000..14ba44522 --- /dev/null +++ b/include/linux/ks8842.h @@ -0,0 +1,38 @@ +/* + * ks8842.h KS8842 platform data struct definition + * Copyright (c) 2010 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _LINUX_KS8842_H +#define _LINUX_KS8842_H + +#include + +/** + * struct ks8842_platform_data - Platform data of the KS8842 network driver + * @macaddr: The MAC address of the device, set to all 0:s to use the on in + * the chip. + * @rx_dma_channel: The DMA channel to use for RX, -1 for none. + * @tx_dma_channel: The DMA channel to use for TX, -1 for none. + * + */ +struct ks8842_platform_data { + u8 macaddr[ETH_ALEN]; + int rx_dma_channel; + int tx_dma_channel; +}; + +#endif diff --git a/include/linux/ks8851_mll.h b/include/linux/ks8851_mll.h new file mode 100644 index 000000000..e9ccfb59e --- /dev/null +++ b/include/linux/ks8851_mll.h @@ -0,0 +1,33 @@ +/* + * ks8861_mll platform data struct definition + * Copyright (c) 2012 BTicino S.p.A. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _LINUX_KS8851_MLL_H +#define _LINUX_KS8851_MLL_H + +#include + +/** + * struct ks8851_mll_platform_data - Platform data of the KS8851_MLL network driver + * @macaddr: The MAC address of the device, set to all 0:s to use the on in + * the chip. + */ +struct ks8851_mll_platform_data { + u8 mac_addr[ETH_ALEN]; +}; + +#endif diff --git a/include/linux/ksm.h b/include/linux/ksm.h new file mode 100644 index 000000000..161e8164a --- /dev/null +++ b/include/linux/ksm.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_KSM_H +#define __LINUX_KSM_H +/* + * Memory merging support. + * + * This code enables dynamic sharing of identical pages found in different + * memory areas, even if they are not shared by fork(). + */ + +#include +#include +#include +#include +#include +#include + +struct stable_node; +struct mem_cgroup; + +#ifdef CONFIG_KSM +int ksm_madvise(struct vm_area_struct *vma, unsigned long start, + unsigned long end, int advice, unsigned long *vm_flags); +int __ksm_enter(struct mm_struct *mm); +void __ksm_exit(struct mm_struct *mm); + +static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) +{ + if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) + return __ksm_enter(mm); + return 0; +} + +static inline void ksm_exit(struct mm_struct *mm) +{ + if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) + __ksm_exit(mm); +} + +/* + * When do_swap_page() first faults in from swap what used to be a KSM page, + * no problem, it will be assigned to this vma's anon_vma; but thereafter, + * it might be faulted into a different anon_vma (or perhaps to a different + * offset in the same anon_vma). do_swap_page() cannot do all the locking + * needed to reconstitute a cross-anon_vma KSM page: for now it has to make + * a copy, and leave remerging the pages to a later pass of ksmd. + * + * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, + * but what if the vma was unmerged while the page was swapped out? + */ +struct page *ksm_might_need_to_copy(struct page *page, + struct vm_area_struct *vma, unsigned long address); + +void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); +void ksm_migrate_page(struct page *newpage, struct page *oldpage); + +#else /* !CONFIG_KSM */ + +static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) +{ + return 0; +} + +static inline void ksm_exit(struct mm_struct *mm) +{ +} + +#ifdef CONFIG_MMU +static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, + unsigned long end, int advice, unsigned long *vm_flags) +{ + return 0; +} + +static inline struct page *ksm_might_need_to_copy(struct page *page, + struct vm_area_struct *vma, unsigned long address) +{ + return page; +} + +static inline void rmap_walk_ksm(struct page *page, + struct rmap_walk_control *rwc) +{ +} + +static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) +{ +} +#endif /* CONFIG_MMU */ +#endif /* !CONFIG_KSM */ + +#endif /* __LINUX_KSM_H */ diff --git a/include/linux/kthread.h b/include/linux/kthread.h new file mode 100644 index 000000000..72308c38e --- /dev/null +++ b/include/linux/kthread.h @@ -0,0 +1,214 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_KTHREAD_H +#define _LINUX_KTHREAD_H +/* Simple interface for creating and stopping kernel threads without mess. */ +#include +#include +#include + +__printf(4, 5) +struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), + void *data, + int node, + const char namefmt[], ...); + +/** + * kthread_create - create a kthread on the current node + * @threadfn: the function to run in the thread + * @data: data pointer for @threadfn() + * @namefmt: printf-style format string for the thread name + * @arg...: arguments for @namefmt. + * + * This macro will create a kthread on the current node, leaving it in + * the stopped state. This is just a helper for kthread_create_on_node(); + * see the documentation there for more details. + */ +#define kthread_create(threadfn, data, namefmt, arg...) \ + kthread_create_on_node(threadfn, data, NUMA_NO_NODE, namefmt, ##arg) + + +struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), + void *data, + unsigned int cpu, + const char *namefmt); + +void kthread_set_per_cpu(struct task_struct *k, int cpu); +bool kthread_is_per_cpu(struct task_struct *k); + +/** + * kthread_run - create and wake a thread. + * @threadfn: the function to run until signal_pending(current). + * @data: data ptr for @threadfn. + * @namefmt: printf-style name for the thread. + * + * Description: Convenient wrapper for kthread_create() followed by + * wake_up_process(). Returns the kthread or ERR_PTR(-ENOMEM). + */ +#define kthread_run(threadfn, data, namefmt, ...) \ +({ \ + struct task_struct *__k \ + = kthread_create(threadfn, data, namefmt, ## __VA_ARGS__); \ + if (!IS_ERR(__k)) \ + wake_up_process(__k); \ + __k; \ +}) + +void free_kthread_struct(struct task_struct *k); +void kthread_bind(struct task_struct *k, unsigned int cpu); +void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask); +int kthread_stop(struct task_struct *k); +bool kthread_should_stop(void); +bool kthread_should_park(void); +bool kthread_freezable_should_stop(bool *was_frozen); +void *kthread_data(struct task_struct *k); +void *kthread_probe_data(struct task_struct *k); +int kthread_park(struct task_struct *k); +void kthread_unpark(struct task_struct *k); +void kthread_parkme(void); + +int kthreadd(void *unused); +extern struct task_struct *kthreadd_task; +extern int tsk_fork_get_node(struct task_struct *tsk); + +/* + * Simple work processor based on kthread. + * + * This provides easier way to make use of kthreads. A kthread_work + * can be queued and flushed using queue/kthread_flush_work() + * respectively. Queued kthread_works are processed by a kthread + * running kthread_worker_fn(). + */ +struct kthread_work; +typedef void (*kthread_work_func_t)(struct kthread_work *work); +void kthread_delayed_work_timer_fn(struct timer_list *t); + +enum { + KTW_FREEZABLE = 1 << 0, /* freeze during suspend */ +}; + +struct kthread_worker { + unsigned int flags; + spinlock_t lock; + struct list_head work_list; + struct list_head delayed_work_list; + struct task_struct *task; + struct kthread_work *current_work; +}; + +struct kthread_work { + struct list_head node; + kthread_work_func_t func; + struct kthread_worker *worker; + /* Number of canceling calls that are running at the moment. */ + int canceling; +}; + +struct kthread_delayed_work { + struct kthread_work work; + struct timer_list timer; +}; + +#define KTHREAD_WORKER_INIT(worker) { \ + .lock = __SPIN_LOCK_UNLOCKED((worker).lock), \ + .work_list = LIST_HEAD_INIT((worker).work_list), \ + .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\ + } + +#define KTHREAD_WORK_INIT(work, fn) { \ + .node = LIST_HEAD_INIT((work).node), \ + .func = (fn), \ + } + +#define KTHREAD_DELAYED_WORK_INIT(dwork, fn) { \ + .work = KTHREAD_WORK_INIT((dwork).work, (fn)), \ + .timer = __TIMER_INITIALIZER(kthread_delayed_work_timer_fn,\ + TIMER_IRQSAFE), \ + } + +#define DEFINE_KTHREAD_WORKER(worker) \ + struct kthread_worker worker = KTHREAD_WORKER_INIT(worker) + +#define DEFINE_KTHREAD_WORK(work, fn) \ + struct kthread_work work = KTHREAD_WORK_INIT(work, fn) + +#define DEFINE_KTHREAD_DELAYED_WORK(dwork, fn) \ + struct kthread_delayed_work dwork = \ + KTHREAD_DELAYED_WORK_INIT(dwork, fn) + +/* + * kthread_worker.lock needs its own lockdep class key when defined on + * stack with lockdep enabled. Use the following macros in such cases. + */ +#ifdef CONFIG_LOCKDEP +# define KTHREAD_WORKER_INIT_ONSTACK(worker) \ + ({ kthread_init_worker(&worker); worker; }) +# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \ + struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker) +#else +# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker) +#endif + +extern void __kthread_init_worker(struct kthread_worker *worker, + const char *name, struct lock_class_key *key); + +#define kthread_init_worker(worker) \ + do { \ + static struct lock_class_key __key; \ + __kthread_init_worker((worker), "("#worker")->lock", &__key); \ + } while (0) + +#define kthread_init_work(work, fn) \ + do { \ + memset((work), 0, sizeof(struct kthread_work)); \ + INIT_LIST_HEAD(&(work)->node); \ + (work)->func = (fn); \ + } while (0) + +#define kthread_init_delayed_work(dwork, fn) \ + do { \ + kthread_init_work(&(dwork)->work, (fn)); \ + __init_timer(&(dwork)->timer, \ + kthread_delayed_work_timer_fn, \ + TIMER_IRQSAFE); \ + } while (0) + +int kthread_worker_fn(void *worker_ptr); + +__printf(2, 3) +struct kthread_worker * +kthread_create_worker(unsigned int flags, const char namefmt[], ...); + +__printf(3, 4) struct kthread_worker * +kthread_create_worker_on_cpu(int cpu, unsigned int flags, + const char namefmt[], ...); + +bool kthread_queue_work(struct kthread_worker *worker, + struct kthread_work *work); + +bool kthread_queue_delayed_work(struct kthread_worker *worker, + struct kthread_delayed_work *dwork, + unsigned long delay); + +bool kthread_mod_delayed_work(struct kthread_worker *worker, + struct kthread_delayed_work *dwork, + unsigned long delay); + +void kthread_flush_work(struct kthread_work *work); +void kthread_flush_worker(struct kthread_worker *worker); + +bool kthread_cancel_work_sync(struct kthread_work *work); +bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work); + +void kthread_destroy_worker(struct kthread_worker *worker); + +#ifdef CONFIG_BLK_CGROUP +void kthread_associate_blkcg(struct cgroup_subsys_state *css); +struct cgroup_subsys_state *kthread_blkcg(void); +#else +static inline void kthread_associate_blkcg(struct cgroup_subsys_state *css) { } +static inline struct cgroup_subsys_state *kthread_blkcg(void) +{ + return NULL; +} +#endif +#endif /* _LINUX_KTHREAD_H */ diff --git a/include/linux/ktime.h b/include/linux/ktime.h new file mode 100644 index 000000000..b2bb44f87 --- /dev/null +++ b/include/linux/ktime.h @@ -0,0 +1,278 @@ +/* + * include/linux/ktime.h + * + * ktime_t - nanosecond-resolution time format. + * + * Copyright(C) 2005, Thomas Gleixner + * Copyright(C) 2005, Red Hat, Inc., Ingo Molnar + * + * data type definitions, declarations, prototypes and macros. + * + * Started by: Thomas Gleixner and Ingo Molnar + * + * Credits: + * + * Roman Zippel provided the ideas and primary code snippets of + * the ktime_t union and further simplifications of the original + * code. + * + * For licencing details see kernel-base/COPYING + */ +#ifndef _LINUX_KTIME_H +#define _LINUX_KTIME_H + +#include +#include + +/* Nanosecond scalar representation for kernel time values */ +typedef s64 ktime_t; + +/** + * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value + * @secs: seconds to set + * @nsecs: nanoseconds to set + * + * Return: The ktime_t representation of the value. + */ +static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs) +{ + if (unlikely(secs >= KTIME_SEC_MAX)) + return KTIME_MAX; + + return secs * NSEC_PER_SEC + (s64)nsecs; +} + +/* Subtract two ktime_t variables. rem = lhs -rhs: */ +#define ktime_sub(lhs, rhs) ((lhs) - (rhs)) + +/* Add two ktime_t variables. res = lhs + rhs: */ +#define ktime_add(lhs, rhs) ((lhs) + (rhs)) + +/* + * Same as ktime_add(), but avoids undefined behaviour on overflow; however, + * this means that you must check the result for overflow yourself. + */ +#define ktime_add_unsafe(lhs, rhs) ((u64) (lhs) + (rhs)) + +/* + * Add a ktime_t variable and a scalar nanosecond value. + * res = kt + nsval: + */ +#define ktime_add_ns(kt, nsval) ((kt) + (nsval)) + +/* + * Subtract a scalar nanosecod from a ktime_t variable + * res = kt - nsval: + */ +#define ktime_sub_ns(kt, nsval) ((kt) - (nsval)) + +/* convert a timespec to ktime_t format: */ +static inline ktime_t timespec_to_ktime(struct timespec ts) +{ + return ktime_set(ts.tv_sec, ts.tv_nsec); +} + +/* convert a timespec64 to ktime_t format: */ +static inline ktime_t timespec64_to_ktime(struct timespec64 ts) +{ + return ktime_set(ts.tv_sec, ts.tv_nsec); +} + +/* convert a timeval to ktime_t format: */ +static inline ktime_t timeval_to_ktime(struct timeval tv) +{ + return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC); +} + +/* Map the ktime_t to timespec conversion to ns_to_timespec function */ +#define ktime_to_timespec(kt) ns_to_timespec((kt)) + +/* Map the ktime_t to timespec conversion to ns_to_timespec function */ +#define ktime_to_timespec64(kt) ns_to_timespec64((kt)) + +/* Map the ktime_t to timeval conversion to ns_to_timeval function */ +#define ktime_to_timeval(kt) ns_to_timeval((kt)) + +/* Convert ktime_t to nanoseconds */ +static inline s64 ktime_to_ns(const ktime_t kt) +{ + return kt; +} + +/** + * ktime_compare - Compares two ktime_t variables for less, greater or equal + * @cmp1: comparable1 + * @cmp2: comparable2 + * + * Return: ... + * cmp1 < cmp2: return <0 + * cmp1 == cmp2: return 0 + * cmp1 > cmp2: return >0 + */ +static inline int ktime_compare(const ktime_t cmp1, const ktime_t cmp2) +{ + if (cmp1 < cmp2) + return -1; + if (cmp1 > cmp2) + return 1; + return 0; +} + +/** + * ktime_after - Compare if a ktime_t value is bigger than another one. + * @cmp1: comparable1 + * @cmp2: comparable2 + * + * Return: true if cmp1 happened after cmp2. + */ +static inline bool ktime_after(const ktime_t cmp1, const ktime_t cmp2) +{ + return ktime_compare(cmp1, cmp2) > 0; +} + +/** + * ktime_before - Compare if a ktime_t value is smaller than another one. + * @cmp1: comparable1 + * @cmp2: comparable2 + * + * Return: true if cmp1 happened before cmp2. + */ +static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2) +{ + return ktime_compare(cmp1, cmp2) < 0; +} + +#if BITS_PER_LONG < 64 +extern s64 __ktime_divns(const ktime_t kt, s64 div); +static inline s64 ktime_divns(const ktime_t kt, s64 div) +{ + /* + * Negative divisors could cause an inf loop, + * so bug out here. + */ + BUG_ON(div < 0); + if (__builtin_constant_p(div) && !(div >> 32)) { + s64 ns = kt; + u64 tmp = ns < 0 ? -ns : ns; + + do_div(tmp, div); + return ns < 0 ? -tmp : tmp; + } else { + return __ktime_divns(kt, div); + } +} +#else /* BITS_PER_LONG < 64 */ +static inline s64 ktime_divns(const ktime_t kt, s64 div) +{ + /* + * 32-bit implementation cannot handle negative divisors, + * so catch them on 64bit as well. + */ + WARN_ON(div < 0); + return kt / div; +} +#endif + +static inline s64 ktime_to_us(const ktime_t kt) +{ + return ktime_divns(kt, NSEC_PER_USEC); +} + +static inline s64 ktime_to_ms(const ktime_t kt) +{ + return ktime_divns(kt, NSEC_PER_MSEC); +} + +static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier) +{ + return ktime_to_us(ktime_sub(later, earlier)); +} + +static inline s64 ktime_ms_delta(const ktime_t later, const ktime_t earlier) +{ + return ktime_to_ms(ktime_sub(later, earlier)); +} + +static inline ktime_t ktime_add_us(const ktime_t kt, const u64 usec) +{ + return ktime_add_ns(kt, usec * NSEC_PER_USEC); +} + +static inline ktime_t ktime_add_ms(const ktime_t kt, const u64 msec) +{ + return ktime_add_ns(kt, msec * NSEC_PER_MSEC); +} + +static inline ktime_t ktime_sub_us(const ktime_t kt, const u64 usec) +{ + return ktime_sub_ns(kt, usec * NSEC_PER_USEC); +} + +static inline ktime_t ktime_sub_ms(const ktime_t kt, const u64 msec) +{ + return ktime_sub_ns(kt, msec * NSEC_PER_MSEC); +} + +extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs); + +/** + * ktime_to_timespec_cond - convert a ktime_t variable to timespec + * format only if the variable contains data + * @kt: the ktime_t variable to convert + * @ts: the timespec variable to store the result in + * + * Return: %true if there was a successful conversion, %false if kt was 0. + */ +static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt, + struct timespec *ts) +{ + if (kt) { + *ts = ktime_to_timespec(kt); + return true; + } else { + return false; + } +} + +/** + * ktime_to_timespec64_cond - convert a ktime_t variable to timespec64 + * format only if the variable contains data + * @kt: the ktime_t variable to convert + * @ts: the timespec variable to store the result in + * + * Return: %true if there was a successful conversion, %false if kt was 0. + */ +static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt, + struct timespec64 *ts) +{ + if (kt) { + *ts = ktime_to_timespec64(kt); + return true; + } else { + return false; + } +} + +/* + * The resolution of the clocks. The resolution value is returned in + * the clock_getres() system call to give application programmers an + * idea of the (in)accuracy of timers. Timer values are rounded up to + * this resolution values. + */ +#define LOW_RES_NSEC TICK_NSEC +#define KTIME_LOW_RES (LOW_RES_NSEC) + +static inline ktime_t ns_to_ktime(u64 ns) +{ + return ns; +} + +static inline ktime_t ms_to_ktime(u64 ms) +{ + return ms * NSEC_PER_MSEC; +} + +# include +# include + +#endif diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h new file mode 100644 index 000000000..827f70ce0 --- /dev/null +++ b/include/linux/kvm_host.h @@ -0,0 +1,1351 @@ +#ifndef __KVM_HOST_H +#define __KVM_HOST_H + +/* + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include + +#ifndef KVM_MAX_VCPU_ID +#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS +#endif + +/* + * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used + * in kvm, other bits are visible for userspace which are defined in + * include/linux/kvm_h. + */ +#define KVM_MEMSLOT_INVALID (1UL << 16) + +/* Two fragments for cross MMIO pages. */ +#define KVM_MAX_MMIO_FRAGMENTS 2 + +#ifndef KVM_ADDRESS_SPACE_NUM +#define KVM_ADDRESS_SPACE_NUM 1 +#endif + +/* + * For the normal pfn, the highest 12 bits should be zero, + * so we can mask bit 62 ~ bit 52 to indicate the error pfn, + * mask bit 63 to indicate the noslot pfn. + */ +#define KVM_PFN_ERR_MASK (0x7ffULL << 52) +#define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52) +#define KVM_PFN_NOSLOT (0x1ULL << 63) + +#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK) +#define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) +#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2) + +/* + * error pfns indicate that the gfn is in slot but faild to + * translate it to pfn on host. + */ +static inline bool is_error_pfn(kvm_pfn_t pfn) +{ + return !!(pfn & KVM_PFN_ERR_MASK); +} + +/* + * error_noslot pfns indicate that the gfn can not be + * translated to pfn - it is not in slot or failed to + * translate it to pfn. + */ +static inline bool is_error_noslot_pfn(kvm_pfn_t pfn) +{ + return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK); +} + +/* noslot pfn indicates that the gfn is not in slot. */ +static inline bool is_noslot_pfn(kvm_pfn_t pfn) +{ + return pfn == KVM_PFN_NOSLOT; +} + +/* + * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390) + * provide own defines and kvm_is_error_hva + */ +#ifndef KVM_HVA_ERR_BAD + +#define KVM_HVA_ERR_BAD (PAGE_OFFSET) +#define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE) + +static inline bool kvm_is_error_hva(unsigned long addr) +{ + return addr >= PAGE_OFFSET; +} + +#endif + +#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT)) + +static inline bool is_error_page(struct page *page) +{ + return IS_ERR(page); +} + +#define KVM_REQUEST_MASK GENMASK(7,0) +#define KVM_REQUEST_NO_WAKEUP BIT(8) +#define KVM_REQUEST_WAIT BIT(9) +/* + * Architecture-independent vcpu->requests bit members + * Bits 4-7 are reserved for more arch-independent bits. + */ +#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) +#define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) +#define KVM_REQ_PENDING_TIMER 2 +#define KVM_REQ_UNHALT 3 +#define KVM_REQUEST_ARCH_BASE 8 + +#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \ + BUILD_BUG_ON((unsigned)(nr) >= (FIELD_SIZEOF(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \ + (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \ +}) +#define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0) + +#define KVM_USERSPACE_IRQ_SOURCE_ID 0 +#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 + +extern struct kmem_cache *kvm_vcpu_cache; + +extern struct mutex kvm_lock; +extern struct list_head vm_list; + +struct kvm_io_range { + gpa_t addr; + int len; + struct kvm_io_device *dev; +}; + +#define NR_IOBUS_DEVS 1000 + +struct kvm_io_bus { + int dev_count; + int ioeventfd_count; + struct kvm_io_range range[]; +}; + +enum kvm_bus { + KVM_MMIO_BUS, + KVM_PIO_BUS, + KVM_VIRTIO_CCW_NOTIFY_BUS, + KVM_FAST_MMIO_BUS, + KVM_NR_BUSES +}; + +int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, + int len, const void *val); +int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, + gpa_t addr, int len, const void *val, long cookie); +int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, + int len, void *val); +int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, + int len, struct kvm_io_device *dev); +void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, + struct kvm_io_device *dev); +struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, + gpa_t addr); + +#ifdef CONFIG_KVM_ASYNC_PF +struct kvm_async_pf { + struct work_struct work; + struct list_head link; + struct list_head queue; + struct kvm_vcpu *vcpu; + struct mm_struct *mm; + gpa_t cr2_or_gpa; + unsigned long addr; + struct kvm_arch_async_pf arch; + bool wakeup_all; +}; + +void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); +void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); +int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, + unsigned long hva, struct kvm_arch_async_pf *arch); +int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); +#endif + +enum { + OUTSIDE_GUEST_MODE, + IN_GUEST_MODE, + EXITING_GUEST_MODE, + READING_SHADOW_PAGE_TABLES, +}; + +#define KVM_UNMAPPED_PAGE ((void *) 0x500 + POISON_POINTER_DELTA) + +struct kvm_host_map { + /* + * Only valid if the 'pfn' is managed by the host kernel (i.e. There is + * a 'struct page' for it. When using mem= kernel parameter some memory + * can be used as guest memory but they are not managed by host + * kernel). + * If 'pfn' is not managed by the host kernel, this field is + * initialized to KVM_UNMAPPED_PAGE. + */ + struct page *page; + void *hva; + kvm_pfn_t pfn; + kvm_pfn_t gfn; +}; + +/* + * Used to check if the mapping is valid or not. Never use 'kvm_host_map' + * directly to check for that. + */ +static inline bool kvm_vcpu_mapped(struct kvm_host_map *map) +{ + return !!map->hva; +} + +/* + * Sometimes a large or cross-page mmio needs to be broken up into separate + * exits for userspace servicing. + */ +struct kvm_mmio_fragment { + gpa_t gpa; + void *data; + unsigned len; +}; + +struct kvm_vcpu { + struct kvm *kvm; +#ifdef CONFIG_PREEMPT_NOTIFIERS + struct preempt_notifier preempt_notifier; +#endif + int cpu; + int vcpu_id; /* id given by userspace at creation */ + int vcpu_idx; /* index in kvm->vcpus array */ + int srcu_idx; + int mode; + u64 requests; + unsigned long guest_debug; + + int pre_pcpu; + struct list_head blocked_vcpu_list; + + struct mutex mutex; + struct kvm_run *run; + + int guest_xcr0_loaded; + struct swait_queue_head wq; + struct pid __rcu *pid; + int sigset_active; + sigset_t sigset; + struct kvm_vcpu_stat stat; + unsigned int halt_poll_ns; + bool valid_wakeup; + +#ifdef CONFIG_HAS_IOMEM + int mmio_needed; + int mmio_read_completed; + int mmio_is_write; + int mmio_cur_fragment; + int mmio_nr_fragments; + struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS]; +#endif + +#ifdef CONFIG_KVM_ASYNC_PF + struct { + u32 queued; + struct list_head queue; + struct list_head done; + spinlock_t lock; + } async_pf; +#endif + +#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT + /* + * Cpu relax intercept or pause loop exit optimization + * in_spin_loop: set when a vcpu does a pause loop exit + * or cpu relax intercepted. + * dy_eligible: indicates whether vcpu is eligible for directed yield. + */ + struct { + bool in_spin_loop; + bool dy_eligible; + } spin_loop; +#endif + bool preempted; + struct kvm_vcpu_arch arch; + struct dentry *debugfs_dentry; +}; + +static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) +{ + /* + * The memory barrier ensures a previous write to vcpu->requests cannot + * be reordered with the read of vcpu->mode. It pairs with the general + * memory barrier following the write of vcpu->mode in VCPU RUN. + */ + smp_mb__before_atomic(); + return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); +} + +/* + * Some of the bitops functions do not support too long bitmaps. + * This number must be determined not to exceed such limits. + */ +#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) + +struct kvm_memory_slot { + gfn_t base_gfn; + unsigned long npages; + unsigned long *dirty_bitmap; + struct kvm_arch_memory_slot arch; + unsigned long userspace_addr; + u32 flags; + short id; +}; + +static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) +{ + return ALIGN(memslot->npages, BITS_PER_LONG) / 8; +} + +static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot) +{ + unsigned long len = kvm_dirty_bitmap_bytes(memslot); + + return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap); +} + +struct kvm_s390_adapter_int { + u64 ind_addr; + u64 summary_addr; + u64 ind_offset; + u32 summary_offset; + u32 adapter_id; +}; + +struct kvm_hv_sint { + u32 vcpu; + u32 sint; +}; + +struct kvm_kernel_irq_routing_entry { + u32 gsi; + u32 type; + int (*set)(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_source_id, int level, + bool line_status); + union { + struct { + unsigned irqchip; + unsigned pin; + } irqchip; + struct { + u32 address_lo; + u32 address_hi; + u32 data; + u32 flags; + u32 devid; + } msi; + struct kvm_s390_adapter_int adapter; + struct kvm_hv_sint hv_sint; + }; + struct hlist_node link; +}; + +#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING +struct kvm_irq_routing_table { + int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS]; + u32 nr_rt_entries; + /* + * Array indexed by gsi. Each entry contains list of irq chips + * the gsi is connected to. + */ + struct hlist_head map[0]; +}; +#endif + +#ifndef KVM_PRIVATE_MEM_SLOTS +#define KVM_PRIVATE_MEM_SLOTS 0 +#endif + +#ifndef KVM_MEM_SLOTS_NUM +#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) +#endif + +#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE +static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) +{ + return 0; +} +#endif + +/* + * Note: + * memslots are not sorted by id anymore, please use id_to_memslot() + * to get the memslot by its id. + */ +struct kvm_memslots { + u64 generation; + struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM]; + /* The mapping table from slot id to the index in memslots[]. */ + short id_to_index[KVM_MEM_SLOTS_NUM]; + atomic_t lru_slot; + int used_slots; +}; + +struct kvm { + spinlock_t mmu_lock; + struct mutex slots_lock; + struct mm_struct *mm; /* userspace tied to this vm */ + struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM]; + struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; + + /* + * created_vcpus is protected by kvm->lock, and is incremented + * at the beginning of KVM_CREATE_VCPU. online_vcpus is only + * incremented after storing the kvm_vcpu pointer in vcpus, + * and is accessed atomically. + */ + atomic_t online_vcpus; + int created_vcpus; + int last_boosted_vcpu; + struct list_head vm_list; + struct mutex lock; + struct kvm_io_bus __rcu *buses[KVM_NR_BUSES]; +#ifdef CONFIG_HAVE_KVM_EVENTFD + struct { + spinlock_t lock; + struct list_head items; + struct list_head resampler_list; + struct mutex resampler_lock; + } irqfds; + struct list_head ioeventfds; +#endif + struct kvm_vm_stat stat; + struct kvm_arch arch; + refcount_t users_count; +#ifdef CONFIG_KVM_MMIO + struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; + spinlock_t ring_lock; + struct list_head coalesced_zones; +#endif + + struct mutex irq_lock; +#ifdef CONFIG_HAVE_KVM_IRQCHIP + /* + * Update side is protected by irq_lock. + */ + struct kvm_irq_routing_table __rcu *irq_routing; +#endif +#ifdef CONFIG_HAVE_KVM_IRQFD + struct hlist_head irq_ack_notifier_list; +#endif + +#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) + struct mmu_notifier mmu_notifier; + unsigned long mmu_notifier_seq; + long mmu_notifier_count; +#endif + long tlbs_dirty; + struct list_head devices; + struct dentry *debugfs_dentry; + struct kvm_stat_data **debugfs_stat_data; + struct srcu_struct srcu; + struct srcu_struct irq_srcu; + pid_t userspace_pid; +}; + +#define kvm_err(fmt, ...) \ + pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) +#define kvm_info(fmt, ...) \ + pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) +#define kvm_debug(fmt, ...) \ + pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) +#define kvm_debug_ratelimited(fmt, ...) \ + pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \ + ## __VA_ARGS__) +#define kvm_pr_unimpl(fmt, ...) \ + pr_err_ratelimited("kvm [%i]: " fmt, \ + task_tgid_nr(current), ## __VA_ARGS__) + +/* The guest did something we don't support. */ +#define vcpu_unimpl(vcpu, fmt, ...) \ + kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \ + (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__) + +#define vcpu_debug(vcpu, fmt, ...) \ + kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) +#define vcpu_debug_ratelimited(vcpu, fmt, ...) \ + kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \ + ## __VA_ARGS__) +#define vcpu_err(vcpu, fmt, ...) \ + kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) + +static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) +{ + return srcu_dereference_check(kvm->buses[idx], &kvm->srcu, + lockdep_is_held(&kvm->slots_lock) || + !refcount_read(&kvm->users_count)); +} + +static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) +{ + int num_vcpus = atomic_read(&kvm->online_vcpus); + i = array_index_nospec(i, num_vcpus); + + /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */ + smp_rmb(); + return kvm->vcpus[i]; +} + +#define kvm_for_each_vcpu(idx, vcpup, kvm) \ + for (idx = 0; \ + idx < atomic_read(&kvm->online_vcpus) && \ + (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ + idx++) + +static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) +{ + struct kvm_vcpu *vcpu = NULL; + int i; + + if (id < 0) + return NULL; + if (id < KVM_MAX_VCPUS) + vcpu = kvm_get_vcpu(kvm, id); + if (vcpu && vcpu->vcpu_id == id) + return vcpu; + kvm_for_each_vcpu(i, vcpu, kvm) + if (vcpu->vcpu_id == id) + return vcpu; + return NULL; +} + +static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu) +{ + return vcpu->vcpu_idx; +} + +#define kvm_for_each_memslot(memslot, slots) \ + for (memslot = &slots->memslots[0]; \ + memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ + memslot++) + +int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); +void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); + +void vcpu_load(struct kvm_vcpu *vcpu); +void vcpu_put(struct kvm_vcpu *vcpu); + +#ifdef __KVM_HAVE_IOAPIC +void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm); +void kvm_arch_post_irq_routing_update(struct kvm *kvm); +#else +static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm) +{ +} +static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm) +{ +} +#endif + +#ifdef CONFIG_HAVE_KVM_IRQFD +int kvm_irqfd_init(void); +void kvm_irqfd_exit(void); +#else +static inline int kvm_irqfd_init(void) +{ + return 0; +} + +static inline void kvm_irqfd_exit(void) +{ +} +#endif +int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, + struct module *module); +void kvm_exit(void); + +void kvm_get_kvm(struct kvm *kvm); +void kvm_put_kvm(struct kvm *kvm); + +static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) +{ + as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM); + return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu, + lockdep_is_held(&kvm->slots_lock) || + !refcount_read(&kvm->users_count)); +} + +static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) +{ + return __kvm_memslots(kvm, 0); +} + +static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu) +{ + int as_id = kvm_arch_vcpu_memslots_id(vcpu); + + return __kvm_memslots(vcpu->kvm, as_id); +} + +static inline struct kvm_memory_slot * +id_to_memslot(struct kvm_memslots *slots, int id) +{ + int index = slots->id_to_index[id]; + struct kvm_memory_slot *slot; + + slot = &slots->memslots[index]; + + WARN_ON(slot->id != id); + return slot; +} + +/* + * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: + * - create a new memory slot + * - delete an existing memory slot + * - modify an existing memory slot + * -- move it in the guest physical memory space + * -- just change its flags + * + * Since flags can be changed by some of these operations, the following + * differentiation is the best we can do for __kvm_set_memory_region(): + */ +enum kvm_mr_change { + KVM_MR_CREATE, + KVM_MR_DELETE, + KVM_MR_MOVE, + KVM_MR_FLAGS_ONLY, +}; + +int kvm_set_memory_region(struct kvm *kvm, + const struct kvm_userspace_memory_region *mem); +int __kvm_set_memory_region(struct kvm *kvm, + const struct kvm_userspace_memory_region *mem); +void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, + struct kvm_memory_slot *dont); +int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, + unsigned long npages); +void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen); +int kvm_arch_prepare_memory_region(struct kvm *kvm, + struct kvm_memory_slot *memslot, + const struct kvm_userspace_memory_region *mem, + enum kvm_mr_change change); +void kvm_arch_commit_memory_region(struct kvm *kvm, + const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + const struct kvm_memory_slot *new, + enum kvm_mr_change change); +bool kvm_largepages_enabled(void); +void kvm_disable_largepages(void); +/* flush all memory translations */ +void kvm_arch_flush_shadow_all(struct kvm *kvm); +/* flush memory translations pointing to 'slot' */ +void kvm_arch_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot); + +int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, + struct page **pages, int nr_pages); + +struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); +unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); +unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); +unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); +unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, + bool *writable); +void kvm_release_page_clean(struct page *page); +void kvm_release_page_dirty(struct page *page); +void kvm_set_page_accessed(struct page *page); + +kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); +kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); +kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, + bool *writable); +kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); +kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); +kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, + bool atomic, bool *async, bool write_fault, + bool *writable); + +void kvm_release_pfn_clean(kvm_pfn_t pfn); +void kvm_release_pfn_dirty(kvm_pfn_t pfn); +void kvm_set_pfn_dirty(kvm_pfn_t pfn); +void kvm_set_pfn_accessed(kvm_pfn_t pfn); +void kvm_get_pfn(kvm_pfn_t pfn); + +void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache); +int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, + int len); +int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, + unsigned long len); +int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); +int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + void *data, unsigned long len); +int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, + int offset, int len); +int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, + unsigned long len); +int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + void *data, unsigned long len); +int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + void *data, unsigned int offset, + unsigned long len); +int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + gpa_t gpa, unsigned long len); +int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); +int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); +struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); +bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); +unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn); +void mark_page_dirty(struct kvm *kvm, gfn_t gfn); + +struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); +struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn); +kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); +kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); +int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map); +int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, + struct gfn_to_pfn_cache *cache, bool atomic); +struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn); +void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty); +int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, + struct gfn_to_pfn_cache *cache, bool dirty, bool atomic); +unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); +unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); +int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, + int len); +int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, + unsigned long len); +int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, + unsigned long len); +int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data, + int offset, int len); +int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, + unsigned long len); +void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); + +void kvm_sigset_activate(struct kvm_vcpu *vcpu); +void kvm_sigset_deactivate(struct kvm_vcpu *vcpu); + +void kvm_vcpu_block(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); +bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); +void kvm_vcpu_kick(struct kvm_vcpu *vcpu); +int kvm_vcpu_yield_to(struct kvm_vcpu *target); +void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible); + +void kvm_flush_remote_tlbs(struct kvm *kvm); +void kvm_reload_remote_mmus(struct kvm *kvm); + +bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, + unsigned long *vcpu_bitmap, cpumask_var_t tmp); +bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); + +long kvm_arch_dev_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg); +long kvm_arch_vcpu_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg); +vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); + +int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext); + +int kvm_get_dirty_log(struct kvm *kvm, + struct kvm_dirty_log *log, int *is_dirty); + +int kvm_get_dirty_log_protect(struct kvm *kvm, + struct kvm_dirty_log *log, bool *is_dirty); + +void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, + struct kvm_memory_slot *slot, + gfn_t gfn_offset, + unsigned long mask); + +int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, + struct kvm_dirty_log *log); + +int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, + bool line_status); +long kvm_arch_vm_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg); + +int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); +int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); + +int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, + struct kvm_translation *tr); + +int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); +int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); +int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs); +int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs); +int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state); +int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state); +int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, + struct kvm_guest_debug *dbg); +int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); + +int kvm_arch_init(void *opaque); +void kvm_arch_exit(void); + +int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); + +void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu); + +void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); +void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); +struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); +int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); + +bool kvm_arch_has_vcpu_debugfs(void); +int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu); + +int kvm_arch_hardware_enable(void); +void kvm_arch_hardware_disable(void); +int kvm_arch_hardware_setup(void); +void kvm_arch_hardware_unsetup(void); +void kvm_arch_check_processor_compat(void *rtn); +int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); +bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu); +int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); +bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu); + +#ifndef __KVM_HAVE_ARCH_VM_ALLOC +/* + * All architectures that want to use vzalloc currently also + * need their own kvm_arch_alloc_vm implementation. + */ +static inline struct kvm *kvm_arch_alloc_vm(void) +{ + return kzalloc(sizeof(struct kvm), GFP_KERNEL); +} + +static inline void kvm_arch_free_vm(struct kvm *kvm) +{ + kfree(kvm); +} +#endif + +#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB +static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) +{ + return -ENOTSUPP; +} +#endif + +#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA +void kvm_arch_register_noncoherent_dma(struct kvm *kvm); +void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm); +bool kvm_arch_has_noncoherent_dma(struct kvm *kvm); +#else +static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm) +{ +} + +static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) +{ +} + +static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) +{ + return false; +} +#endif +#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE +void kvm_arch_start_assignment(struct kvm *kvm); +void kvm_arch_end_assignment(struct kvm *kvm); +bool kvm_arch_has_assigned_device(struct kvm *kvm); +#else +static inline void kvm_arch_start_assignment(struct kvm *kvm) +{ +} + +static inline void kvm_arch_end_assignment(struct kvm *kvm) +{ +} + +static inline bool kvm_arch_has_assigned_device(struct kvm *kvm) +{ + return false; +} +#endif + +static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) +{ +#ifdef __KVM_HAVE_ARCH_WQP + return vcpu->arch.wqp; +#else + return &vcpu->wq; +#endif +} + +#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED +/* + * returns true if the virtual interrupt controller is initialized and + * ready to accept virtual IRQ. On some architectures the virtual interrupt + * controller is dynamically instantiated and this is not always true. + */ +bool kvm_arch_intc_initialized(struct kvm *kvm); +#else +static inline bool kvm_arch_intc_initialized(struct kvm *kvm) +{ + return true; +} +#endif + +int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); +void kvm_arch_destroy_vm(struct kvm *kvm); +void kvm_arch_sync_events(struct kvm *kvm); + +int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); +void kvm_vcpu_kick(struct kvm_vcpu *vcpu); + +bool kvm_is_reserved_pfn(kvm_pfn_t pfn); +bool kvm_is_zone_device_pfn(kvm_pfn_t pfn); + +struct kvm_irq_ack_notifier { + struct hlist_node link; + unsigned gsi; + void (*irq_acked)(struct kvm_irq_ack_notifier *kian); +}; + +int kvm_irq_map_gsi(struct kvm *kvm, + struct kvm_kernel_irq_routing_entry *entries, int gsi); +int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin); + +int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, + bool line_status); +int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, + int irq_source_id, int level, bool line_status); +int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_source_id, + int level, bool line_status); +bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); +void kvm_notify_acked_gsi(struct kvm *kvm, int gsi); +void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); +void kvm_register_irq_ack_notifier(struct kvm *kvm, + struct kvm_irq_ack_notifier *kian); +void kvm_unregister_irq_ack_notifier(struct kvm *kvm, + struct kvm_irq_ack_notifier *kian); +int kvm_request_irq_source_id(struct kvm *kvm); +void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); + +/* + * search_memslots() and __gfn_to_memslot() are here because they are + * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c. + * gfn_to_memslot() itself isn't here as an inline because that would + * bloat other code too much. + */ +static inline struct kvm_memory_slot * +search_memslots(struct kvm_memslots *slots, gfn_t gfn) +{ + int start = 0, end = slots->used_slots; + int slot = atomic_read(&slots->lru_slot); + struct kvm_memory_slot *memslots = slots->memslots; + + if (gfn >= memslots[slot].base_gfn && + gfn < memslots[slot].base_gfn + memslots[slot].npages) + return &memslots[slot]; + + while (start < end) { + slot = start + (end - start) / 2; + + if (gfn >= memslots[slot].base_gfn) + end = slot; + else + start = slot + 1; + } + + if (start < slots->used_slots && gfn >= memslots[start].base_gfn && + gfn < memslots[start].base_gfn + memslots[start].npages) { + atomic_set(&slots->lru_slot, start); + return &memslots[start]; + } + + return NULL; +} + +static inline struct kvm_memory_slot * +__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) +{ + return search_memslots(slots, gfn); +} + +static inline unsigned long +__gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) +{ + /* + * The index was checked originally in search_memslots. To avoid + * that a malicious guest builds a Spectre gadget out of e.g. page + * table walks, do not let the processor speculate loads outside + * the guest's registered memslots. + */ + unsigned long offset = gfn - slot->base_gfn; + offset = array_index_nospec(offset, slot->npages); + return slot->userspace_addr + offset * PAGE_SIZE; +} + +static inline int memslot_id(struct kvm *kvm, gfn_t gfn) +{ + return gfn_to_memslot(kvm, gfn)->id; +} + +static inline gfn_t +hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot) +{ + gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT; + + return slot->base_gfn + gfn_offset; +} + +static inline gpa_t gfn_to_gpa(gfn_t gfn) +{ + return (gpa_t)gfn << PAGE_SHIFT; +} + +static inline gfn_t gpa_to_gfn(gpa_t gpa) +{ + return (gfn_t)(gpa >> PAGE_SHIFT); +} + +static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn) +{ + return (hpa_t)pfn << PAGE_SHIFT; +} + +static inline struct page *kvm_vcpu_gpa_to_page(struct kvm_vcpu *vcpu, + gpa_t gpa) +{ + return kvm_vcpu_gfn_to_page(vcpu, gpa_to_gfn(gpa)); +} + +static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) +{ + unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); + + return kvm_is_error_hva(hva); +} + +enum kvm_stat_kind { + KVM_STAT_VM, + KVM_STAT_VCPU, +}; + +struct kvm_stat_data { + int offset; + int mode; + struct kvm *kvm; +}; + +struct kvm_stats_debugfs_item { + const char *name; + int offset; + enum kvm_stat_kind kind; + int mode; +}; +extern struct kvm_stats_debugfs_item debugfs_entries[]; +extern struct dentry *kvm_debugfs_dir; + +#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) +static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) +{ + if (unlikely(kvm->mmu_notifier_count)) + return 1; + /* + * Ensure the read of mmu_notifier_count happens before the read + * of mmu_notifier_seq. This interacts with the smp_wmb() in + * mmu_notifier_invalidate_range_end to make sure that the caller + * either sees the old (non-zero) value of mmu_notifier_count or + * the new (incremented) value of mmu_notifier_seq. + * PowerPC Book3s HV KVM calls this under a per-page lock + * rather than under kvm->mmu_lock, for scalability, so + * can't rely on kvm->mmu_lock to keep things ordered. + */ + smp_rmb(); + if (kvm->mmu_notifier_seq != mmu_seq) + return 1; + return 0; +} +#endif + +#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING + +#define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */ + +bool kvm_arch_can_set_irq_routing(struct kvm *kvm); +int kvm_set_irq_routing(struct kvm *kvm, + const struct kvm_irq_routing_entry *entries, + unsigned nr, + unsigned flags); +int kvm_set_routing_entry(struct kvm *kvm, + struct kvm_kernel_irq_routing_entry *e, + const struct kvm_irq_routing_entry *ue); +void kvm_free_irq_routing(struct kvm *kvm); + +#else + +static inline void kvm_free_irq_routing(struct kvm *kvm) {} + +#endif + +int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); + +#ifdef CONFIG_HAVE_KVM_EVENTFD + +void kvm_eventfd_init(struct kvm *kvm); +int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); + +#ifdef CONFIG_HAVE_KVM_IRQFD +int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args); +void kvm_irqfd_release(struct kvm *kvm); +void kvm_irq_routing_update(struct kvm *); +#else +static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) +{ + return -EINVAL; +} + +static inline void kvm_irqfd_release(struct kvm *kvm) {} +#endif + +#else + +static inline void kvm_eventfd_init(struct kvm *kvm) {} + +static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) +{ + return -EINVAL; +} + +static inline void kvm_irqfd_release(struct kvm *kvm) {} + +#ifdef CONFIG_HAVE_KVM_IRQCHIP +static inline void kvm_irq_routing_update(struct kvm *kvm) +{ +} +#endif + +static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) +{ + return -ENOSYS; +} + +#endif /* CONFIG_HAVE_KVM_EVENTFD */ + +void kvm_arch_irq_routing_update(struct kvm *kvm); + +static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) +{ + /* + * Ensure the rest of the request is published to kvm_check_request's + * caller. Paired with the smp_mb__after_atomic in kvm_check_request. + */ + smp_wmb(); + set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); +} + +static inline bool kvm_request_pending(struct kvm_vcpu *vcpu) +{ + return READ_ONCE(vcpu->requests); +} + +static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu) +{ + return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); +} + +static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu) +{ + clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); +} + +static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) +{ + if (kvm_test_request(req, vcpu)) { + kvm_clear_request(req, vcpu); + + /* + * Ensure the rest of the request is visible to kvm_check_request's + * caller. Paired with the smp_wmb in kvm_make_request. + */ + smp_mb__after_atomic(); + return true; + } else { + return false; + } +} + +extern bool kvm_rebooting; + +extern unsigned int halt_poll_ns; +extern unsigned int halt_poll_ns_grow; +extern unsigned int halt_poll_ns_shrink; + +struct kvm_device { + struct kvm_device_ops *ops; + struct kvm *kvm; + void *private; + struct list_head vm_node; +}; + +/* create, destroy, and name are mandatory */ +struct kvm_device_ops { + const char *name; + + /* + * create is called holding kvm->lock and any operations not suitable + * to do while holding the lock should be deferred to init (see + * below). + */ + int (*create)(struct kvm_device *dev, u32 type); + + /* + * init is called after create if create is successful and is called + * outside of holding kvm->lock. + */ + void (*init)(struct kvm_device *dev); + + /* + * Destroy is responsible for freeing dev. + * + * Destroy may be called before or after destructors are called + * on emulated I/O regions, depending on whether a reference is + * held by a vcpu or other kvm component that gets destroyed + * after the emulated I/O. + */ + void (*destroy)(struct kvm_device *dev); + + int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); + int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); + int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); + long (*ioctl)(struct kvm_device *dev, unsigned int ioctl, + unsigned long arg); +}; + +void kvm_device_get(struct kvm_device *dev); +void kvm_device_put(struct kvm_device *dev); +struct kvm_device *kvm_device_from_filp(struct file *filp); +int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type); +void kvm_unregister_device_ops(u32 type); + +extern struct kvm_device_ops kvm_mpic_ops; +extern struct kvm_device_ops kvm_arm_vgic_v2_ops; +extern struct kvm_device_ops kvm_arm_vgic_v3_ops; + +#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT + +static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) +{ + vcpu->spin_loop.in_spin_loop = val; +} +static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) +{ + vcpu->spin_loop.dy_eligible = val; +} + +#else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ + +static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) +{ +} + +static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) +{ +} +#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ + +#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS +bool kvm_arch_has_irq_bypass(void); +int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, + struct irq_bypass_producer *); +void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *, + struct irq_bypass_producer *); +void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *); +void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *); +int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, + uint32_t guest_irq, bool set); +#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ + +#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS +/* If we wakeup during the poll time, was it a sucessful poll? */ +static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) +{ + return vcpu->valid_wakeup; +} + +#else +static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) +{ + return true; +} +#endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */ + +#ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL +long kvm_arch_vcpu_async_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg); +#else +static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, + unsigned int ioctl, + unsigned long arg) +{ + return -ENOIOCTLCMD; +} +#endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */ + +void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, + unsigned long start, unsigned long end); + +#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE +int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); +#else +static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) +{ + return 0; +} +#endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */ + +typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data); + +int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, + uintptr_t data, const char *name, + struct task_struct **thread_ptr); + +#endif diff --git a/include/linux/kvm_irqfd.h b/include/linux/kvm_irqfd.h new file mode 100644 index 000000000..76c2fbc59 --- /dev/null +++ b/include/linux/kvm_irqfd.h @@ -0,0 +1,71 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * irqfd: Allows an fd to be used to inject an interrupt to the guest + * Credit goes to Avi Kivity for the original idea. + */ + +#ifndef __LINUX_KVM_IRQFD_H +#define __LINUX_KVM_IRQFD_H + +#include +#include + +/* + * Resampling irqfds are a special variety of irqfds used to emulate + * level triggered interrupts. The interrupt is asserted on eventfd + * trigger. On acknowledgment through the irq ack notifier, the + * interrupt is de-asserted and userspace is notified through the + * resamplefd. All resamplers on the same gsi are de-asserted + * together, so we don't need to track the state of each individual + * user. We can also therefore share the same irq source ID. + */ +struct kvm_kernel_irqfd_resampler { + struct kvm *kvm; + /* + * List of resampling struct _irqfd objects sharing this gsi. + * RCU list modified under kvm->irqfds.resampler_lock + */ + struct list_head list; + struct kvm_irq_ack_notifier notifier; + /* + * Entry in list of kvm->irqfd.resampler_list. Use for sharing + * resamplers among irqfds on the same gsi. + * Accessed and modified under kvm->irqfds.resampler_lock + */ + struct list_head link; +}; + +struct kvm_kernel_irqfd { + /* Used for MSI fast-path */ + struct kvm *kvm; + wait_queue_entry_t wait; + /* Update side is protected by irqfds.lock */ + struct kvm_kernel_irq_routing_entry irq_entry; + seqcount_t irq_entry_sc; + /* Used for level IRQ fast-path */ + int gsi; + struct work_struct inject; + /* The resampler used by this irqfd (resampler-only) */ + struct kvm_kernel_irqfd_resampler *resampler; + /* Eventfd notified on resample (resampler-only) */ + struct eventfd_ctx *resamplefd; + /* Entry in list of irqfds for a resampler (resampler-only) */ + struct list_head resampler_link; + /* Used for setup/shutdown */ + struct eventfd_ctx *eventfd; + struct list_head list; + poll_table pt; + struct work_struct shutdown; + struct irq_bypass_consumer consumer; + struct irq_bypass_producer *producer; +}; + +#endif /* __LINUX_KVM_IRQFD_H */ diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h new file mode 100644 index 000000000..f23b90b02 --- /dev/null +++ b/include/linux/kvm_para.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_KVM_PARA_H +#define __LINUX_KVM_PARA_H + +#include + + +static inline bool kvm_para_has_feature(unsigned int feature) +{ + return !!(kvm_arch_para_features() & (1UL << feature)); +} + +static inline bool kvm_para_has_hint(unsigned int feature) +{ + return !!(kvm_arch_para_hints() & (1UL << feature)); +} +#endif /* __LINUX_KVM_PARA_H */ diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h new file mode 100644 index 000000000..a38729c82 --- /dev/null +++ b/include/linux/kvm_types.h @@ -0,0 +1,73 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef __KVM_TYPES_H__ +#define __KVM_TYPES_H__ + +struct kvm; +struct kvm_async_pf; +struct kvm_device_ops; +struct kvm_interrupt; +struct kvm_irq_routing_table; +struct kvm_memory_slot; +struct kvm_one_reg; +struct kvm_run; +struct kvm_userspace_memory_region; +struct kvm_vcpu; +struct kvm_vcpu_init; +struct kvm_memslots; + +enum kvm_mr_change; + +#include + +/* + * Address types: + * + * gva - guest virtual address + * gpa - guest physical address + * gfn - guest frame number + * hva - host virtual address + * hpa - host physical address + * hfn - host frame number + */ + +typedef unsigned long gva_t; +typedef u64 gpa_t; +typedef u64 gfn_t; + +typedef unsigned long hva_t; +typedef u64 hpa_t; +typedef u64 hfn_t; + +typedef hfn_t kvm_pfn_t; + +struct gfn_to_hva_cache { + u64 generation; + gpa_t gpa; + unsigned long hva; + unsigned long len; + struct kvm_memory_slot *memslot; +}; + +struct gfn_to_pfn_cache { + u64 generation; + gfn_t gfn; + kvm_pfn_t pfn; + bool dirty; +}; + +#endif /* __KVM_TYPES_H__ */ diff --git a/include/linux/l2tp.h b/include/linux/l2tp.h new file mode 100644 index 000000000..0402eda1a --- /dev/null +++ b/include/linux/l2tp.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * L2TP-over-IP socket for L2TPv3. + * + * Author: James Chapman + */ +#ifndef _LINUX_L2TP_H_ +#define _LINUX_L2TP_H_ + +#include +#include +#include + +#endif diff --git a/include/linux/lapb.h b/include/linux/lapb.h new file mode 100644 index 000000000..eb56472f2 --- /dev/null +++ b/include/linux/lapb.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * These are the public elements of the Linux LAPB module. + */ + +#ifndef LAPB_KERNEL_H +#define LAPB_KERNEL_H + +#define LAPB_OK 0 +#define LAPB_BADTOKEN 1 +#define LAPB_INVALUE 2 +#define LAPB_CONNECTED 3 +#define LAPB_NOTCONNECTED 4 +#define LAPB_REFUSED 5 +#define LAPB_TIMEDOUT 6 +#define LAPB_NOMEM 7 + +#define LAPB_STANDARD 0x00 +#define LAPB_EXTENDED 0x01 + +#define LAPB_SLP 0x00 +#define LAPB_MLP 0x02 + +#define LAPB_DTE 0x00 +#define LAPB_DCE 0x04 + +struct lapb_register_struct { + void (*connect_confirmation)(struct net_device *dev, int reason); + void (*connect_indication)(struct net_device *dev, int reason); + void (*disconnect_confirmation)(struct net_device *dev, int reason); + void (*disconnect_indication)(struct net_device *dev, int reason); + int (*data_indication)(struct net_device *dev, struct sk_buff *skb); + void (*data_transmit)(struct net_device *dev, struct sk_buff *skb); +}; + +struct lapb_parms_struct { + unsigned int t1; + unsigned int t1timer; + unsigned int t2; + unsigned int t2timer; + unsigned int n2; + unsigned int n2count; + unsigned int window; + unsigned int state; + unsigned int mode; +}; + +extern int lapb_register(struct net_device *dev, + const struct lapb_register_struct *callbacks); +extern int lapb_unregister(struct net_device *dev); +extern int lapb_getparms(struct net_device *dev, struct lapb_parms_struct *parms); +extern int lapb_setparms(struct net_device *dev, struct lapb_parms_struct *parms); +extern int lapb_connect_request(struct net_device *dev); +extern int lapb_disconnect_request(struct net_device *dev); +extern int lapb_data_request(struct net_device *dev, struct sk_buff *skb); +extern int lapb_data_received(struct net_device *dev, struct sk_buff *skb); + +#endif diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h new file mode 100644 index 000000000..7c560e0dc --- /dev/null +++ b/include/linux/latencytop.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * latencytop.h: Infrastructure for displaying latency + * + * (C) Copyright 2008 Intel Corporation + * Author: Arjan van de Ven + * + */ + +#ifndef _INCLUDE_GUARD_LATENCYTOP_H_ +#define _INCLUDE_GUARD_LATENCYTOP_H_ + +#include +struct task_struct; + +#ifdef CONFIG_LATENCYTOP + +#define LT_SAVECOUNT 32 +#define LT_BACKTRACEDEPTH 12 + +struct latency_record { + unsigned long backtrace[LT_BACKTRACEDEPTH]; + unsigned int count; + unsigned long time; + unsigned long max; +}; + + + +extern int latencytop_enabled; +void __account_scheduler_latency(struct task_struct *task, int usecs, int inter); +static inline void +account_scheduler_latency(struct task_struct *task, int usecs, int inter) +{ + if (unlikely(latencytop_enabled)) + __account_scheduler_latency(task, usecs, inter); +} + +void clear_all_latency_tracing(struct task_struct *p); + +extern int sysctl_latencytop(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); + +#else + +static inline void +account_scheduler_latency(struct task_struct *task, int usecs, int inter) +{ +} + +static inline void clear_all_latency_tracing(struct task_struct *p) +{ +} + +#endif + +#endif diff --git a/include/linux/lcd.h b/include/linux/lcd.h new file mode 100644 index 000000000..851eee8ff --- /dev/null +++ b/include/linux/lcd.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * LCD Lowlevel Control Abstraction + * + * Copyright (C) 2003,2004 Hewlett-Packard Company + * + */ + +#ifndef _LINUX_LCD_H +#define _LINUX_LCD_H + +#include +#include +#include +#include + +/* Notes on locking: + * + * lcd_device->ops_lock is an internal backlight lock protecting the ops + * field and no code outside the core should need to touch it. + * + * Access to set_power() is serialised by the update_lock mutex since + * most drivers seem to need this and historically get it wrong. + * + * Most drivers don't need locking on their get_power() method. + * If yours does, you need to implement it in the driver. You can use the + * update_lock mutex if appropriate. + * + * Any other use of the locks below is probably wrong. + */ + +struct lcd_device; +struct fb_info; + +struct lcd_properties { + /* The maximum value for contrast (read-only) */ + int max_contrast; +}; + +struct lcd_ops { + /* Get the LCD panel power status (0: full on, 1..3: controller + power on, flat panel power off, 4: full off), see FB_BLANK_XXX */ + int (*get_power)(struct lcd_device *); + /* + * Enable or disable power to the LCD(0: on; 4: off, see FB_BLANK_XXX) + * and this callback would be called proir to fb driver's callback. + * + * P.S. note that if early_set_power is not NULL then early fb notifier + * would be registered. + */ + int (*early_set_power)(struct lcd_device *, int power); + /* revert the effects of the early blank event. */ + int (*r_early_set_power)(struct lcd_device *, int power); + /* Enable or disable power to the LCD (0: on; 4: off, see FB_BLANK_XXX) */ + int (*set_power)(struct lcd_device *, int power); + /* Get the current contrast setting (0-max_contrast) */ + int (*get_contrast)(struct lcd_device *); + /* Set LCD panel contrast */ + int (*set_contrast)(struct lcd_device *, int contrast); + /* Set LCD panel mode (resolutions ...) */ + int (*set_mode)(struct lcd_device *, struct fb_videomode *); + /* Check if given framebuffer device is the one LCD is bound to; + return 0 if not, !=0 if it is. If NULL, lcd always matches the fb. */ + int (*check_fb)(struct lcd_device *, struct fb_info *); +}; + +struct lcd_device { + struct lcd_properties props; + /* This protects the 'ops' field. If 'ops' is NULL, the driver that + registered this device has been unloaded, and if class_get_devdata() + points to something in the body of that driver, it is also invalid. */ + struct mutex ops_lock; + /* If this is NULL, the backing module is unloaded */ + struct lcd_ops *ops; + /* Serialise access to set_power method */ + struct mutex update_lock; + /* The framebuffer notifier block */ + struct notifier_block fb_notif; + + struct device dev; +}; + +struct lcd_platform_data { + /* reset lcd panel device. */ + int (*reset)(struct lcd_device *ld); + /* on or off to lcd panel. if 'enable' is 0 then + lcd power off and 1, lcd power on. */ + int (*power_on)(struct lcd_device *ld, int enable); + + /* it indicates whether lcd panel was enabled + from bootloader or not. */ + int lcd_enabled; + /* it means delay for stable time when it becomes low to high + or high to low that is dependent on whether reset gpio is + low active or high active. */ + unsigned int reset_delay; + /* stable time needing to become lcd power on. */ + unsigned int power_on_delay; + /* stable time needing to become lcd power off. */ + unsigned int power_off_delay; + + /* it could be used for any purpose. */ + void *pdata; +}; + +static inline void lcd_set_power(struct lcd_device *ld, int power) +{ + mutex_lock(&ld->update_lock); + if (ld->ops && ld->ops->set_power) + ld->ops->set_power(ld, power); + mutex_unlock(&ld->update_lock); +} + +extern struct lcd_device *lcd_device_register(const char *name, + struct device *parent, void *devdata, struct lcd_ops *ops); +extern struct lcd_device *devm_lcd_device_register(struct device *dev, + const char *name, struct device *parent, + void *devdata, struct lcd_ops *ops); +extern void lcd_device_unregister(struct lcd_device *ld); +extern void devm_lcd_device_unregister(struct device *dev, + struct lcd_device *ld); + +#define to_lcd_device(obj) container_of(obj, struct lcd_device, dev) + +static inline void * lcd_get_data(struct lcd_device *ld_dev) +{ + return dev_get_drvdata(&ld_dev->dev); +} + + +#endif diff --git a/include/linux/lcm.h b/include/linux/lcm.h new file mode 100644 index 000000000..0db3efd56 --- /dev/null +++ b/include/linux/lcm.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LCM_H +#define _LCM_H + +#include + +unsigned long lcm(unsigned long a, unsigned long b) __attribute_const__; +unsigned long lcm_not_zero(unsigned long a, unsigned long b) __attribute_const__; + +#endif /* _LCM_H */ diff --git a/include/linux/led-class-flash.h b/include/linux/led-class-flash.h new file mode 100644 index 000000000..700efaa9e --- /dev/null +++ b/include/linux/led-class-flash.h @@ -0,0 +1,196 @@ +/* + * LED Flash class interface + * + * Copyright (C) 2015 Samsung Electronics Co., Ltd. + * Author: Jacek Anaszewski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#ifndef __LINUX_FLASH_LEDS_H_INCLUDED +#define __LINUX_FLASH_LEDS_H_INCLUDED + +#include + +struct device_node; +struct led_classdev_flash; + +/* + * Supported led fault bits - must be kept in synch + * with V4L2_FLASH_FAULT bits. + */ +#define LED_FAULT_OVER_VOLTAGE (1 << 0) +#define LED_FAULT_TIMEOUT (1 << 1) +#define LED_FAULT_OVER_TEMPERATURE (1 << 2) +#define LED_FAULT_SHORT_CIRCUIT (1 << 3) +#define LED_FAULT_OVER_CURRENT (1 << 4) +#define LED_FAULT_INDICATOR (1 << 5) +#define LED_FAULT_UNDER_VOLTAGE (1 << 6) +#define LED_FAULT_INPUT_VOLTAGE (1 << 7) +#define LED_FAULT_LED_OVER_TEMPERATURE (1 << 8) +#define LED_NUM_FLASH_FAULTS 9 + +#define LED_FLASH_SYSFS_GROUPS_SIZE 5 + +struct led_flash_ops { + /* set flash brightness */ + int (*flash_brightness_set)(struct led_classdev_flash *fled_cdev, + u32 brightness); + /* get flash brightness */ + int (*flash_brightness_get)(struct led_classdev_flash *fled_cdev, + u32 *brightness); + /* set flash strobe state */ + int (*strobe_set)(struct led_classdev_flash *fled_cdev, bool state); + /* get flash strobe state */ + int (*strobe_get)(struct led_classdev_flash *fled_cdev, bool *state); + /* set flash timeout */ + int (*timeout_set)(struct led_classdev_flash *fled_cdev, u32 timeout); + /* get the flash LED fault */ + int (*fault_get)(struct led_classdev_flash *fled_cdev, u32 *fault); +}; + +/* + * Current value of a flash setting along + * with its constraints. + */ +struct led_flash_setting { + /* maximum allowed value */ + u32 min; + /* maximum allowed value */ + u32 max; + /* step value */ + u32 step; + /* current value */ + u32 val; +}; + +struct led_classdev_flash { + /* led class device */ + struct led_classdev led_cdev; + + /* flash led specific ops */ + const struct led_flash_ops *ops; + + /* flash brightness value in microamperes along with its constraints */ + struct led_flash_setting brightness; + + /* flash timeout value in microseconds along with its constraints */ + struct led_flash_setting timeout; + + /* LED Flash class sysfs groups */ + const struct attribute_group *sysfs_groups[LED_FLASH_SYSFS_GROUPS_SIZE]; +}; + +static inline struct led_classdev_flash *lcdev_to_flcdev( + struct led_classdev *lcdev) +{ + return container_of(lcdev, struct led_classdev_flash, led_cdev); +} + +/** + * led_classdev_flash_register - register a new object of led_classdev class + * with support for flash LEDs + * @parent: the flash LED to register + * @fled_cdev: the led_classdev_flash structure for this device + * + * Returns: 0 on success or negative error value on failure + */ +extern int led_classdev_flash_register(struct device *parent, + struct led_classdev_flash *fled_cdev); + +/** + * led_classdev_flash_unregister - unregisters an object of led_classdev class + * with support for flash LEDs + * @fled_cdev: the flash LED to unregister + * + * Unregister a previously registered via led_classdev_flash_register object + */ +extern void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev); + +/** + * led_set_flash_strobe - setup flash strobe + * @fled_cdev: the flash LED to set strobe on + * @state: 1 - strobe flash, 0 - stop flash strobe + * + * Strobe the flash LED. + * + * Returns: 0 on success or negative error value on failure + */ +static inline int led_set_flash_strobe(struct led_classdev_flash *fled_cdev, + bool state) +{ + if (!fled_cdev) + return -EINVAL; + return fled_cdev->ops->strobe_set(fled_cdev, state); +} + +/** + * led_get_flash_strobe - get flash strobe status + * @fled_cdev: the flash LED to query + * @state: 1 - flash is strobing, 0 - flash is off + * + * Check whether the flash is strobing at the moment. + * + * Returns: 0 on success or negative error value on failure + */ +static inline int led_get_flash_strobe(struct led_classdev_flash *fled_cdev, + bool *state) +{ + if (!fled_cdev) + return -EINVAL; + if (fled_cdev->ops->strobe_get) + return fled_cdev->ops->strobe_get(fled_cdev, state); + + return -EINVAL; +} + +/** + * led_set_flash_brightness - set flash LED brightness + * @fled_cdev: the flash LED to set + * @brightness: the brightness to set it to + * + * Set a flash LED's brightness. + * + * Returns: 0 on success or negative error value on failure + */ +extern int led_set_flash_brightness(struct led_classdev_flash *fled_cdev, + u32 brightness); + +/** + * led_update_flash_brightness - update flash LED brightness + * @fled_cdev: the flash LED to query + * + * Get a flash LED's current brightness and update led_flash->brightness + * member with the obtained value. + * + * Returns: 0 on success or negative error value on failure + */ +extern int led_update_flash_brightness(struct led_classdev_flash *fled_cdev); + +/** + * led_set_flash_timeout - set flash LED timeout + * @fled_cdev: the flash LED to set + * @timeout: the flash timeout to set it to + * + * Set the flash strobe duration. + * + * Returns: 0 on success or negative error value on failure + */ +extern int led_set_flash_timeout(struct led_classdev_flash *fled_cdev, + u32 timeout); + +/** + * led_get_flash_fault - get the flash LED fault + * @fled_cdev: the flash LED to query + * @fault: bitmask containing flash faults + * + * Get the flash LED fault. + * + * Returns: 0 on success or negative error value on failure + */ +extern int led_get_flash_fault(struct led_classdev_flash *fled_cdev, + u32 *fault); + +#endif /* __LINUX_FLASH_LEDS_H_INCLUDED */ diff --git a/include/linux/led-lm3530.h b/include/linux/led-lm3530.h new file mode 100644 index 000000000..4b133479d --- /dev/null +++ b/include/linux/led-lm3530.h @@ -0,0 +1,121 @@ +/* + * Copyright (C) 2011 ST-Ericsson SA. + * Copyright (C) 2009 Motorola, Inc. + * + * License Terms: GNU General Public License v2 + * + * Simple driver for National Semiconductor LM35330 Backlight driver chip + * + * Author: Shreshtha Kumar SAHU + * based on leds-lm3530.c by Dan Murphy + */ + +#ifndef _LINUX_LED_LM3530_H__ +#define _LINUX_LED_LM3530_H__ + +#define LM3530_FS_CURR_5mA (0) /* Full Scale Current */ +#define LM3530_FS_CURR_8mA (1) +#define LM3530_FS_CURR_12mA (2) +#define LM3530_FS_CURR_15mA (3) +#define LM3530_FS_CURR_19mA (4) +#define LM3530_FS_CURR_22mA (5) +#define LM3530_FS_CURR_26mA (6) +#define LM3530_FS_CURR_29mA (7) + +#define LM3530_ALS_AVRG_TIME_32ms (0) /* ALS Averaging Time */ +#define LM3530_ALS_AVRG_TIME_64ms (1) +#define LM3530_ALS_AVRG_TIME_128ms (2) +#define LM3530_ALS_AVRG_TIME_256ms (3) +#define LM3530_ALS_AVRG_TIME_512ms (4) +#define LM3530_ALS_AVRG_TIME_1024ms (5) +#define LM3530_ALS_AVRG_TIME_2048ms (6) +#define LM3530_ALS_AVRG_TIME_4096ms (7) + +#define LM3530_RAMP_TIME_1ms (0) /* Brigtness Ramp Time */ +#define LM3530_RAMP_TIME_130ms (1) /* Max to 0 and vice versa */ +#define LM3530_RAMP_TIME_260ms (2) +#define LM3530_RAMP_TIME_520ms (3) +#define LM3530_RAMP_TIME_1s (4) +#define LM3530_RAMP_TIME_2s (5) +#define LM3530_RAMP_TIME_4s (6) +#define LM3530_RAMP_TIME_8s (7) + +/* ALS Resistor Select */ +#define LM3530_ALS_IMPD_Z (0x00) /* ALS Impedance */ +#define LM3530_ALS_IMPD_13_53kOhm (0x01) +#define LM3530_ALS_IMPD_9_01kOhm (0x02) +#define LM3530_ALS_IMPD_5_41kOhm (0x03) +#define LM3530_ALS_IMPD_2_27kOhm (0x04) +#define LM3530_ALS_IMPD_1_94kOhm (0x05) +#define LM3530_ALS_IMPD_1_81kOhm (0x06) +#define LM3530_ALS_IMPD_1_6kOhm (0x07) +#define LM3530_ALS_IMPD_1_138kOhm (0x08) +#define LM3530_ALS_IMPD_1_05kOhm (0x09) +#define LM3530_ALS_IMPD_1_011kOhm (0x0A) +#define LM3530_ALS_IMPD_941Ohm (0x0B) +#define LM3530_ALS_IMPD_759Ohm (0x0C) +#define LM3530_ALS_IMPD_719Ohm (0x0D) +#define LM3530_ALS_IMPD_700Ohm (0x0E) +#define LM3530_ALS_IMPD_667Ohm (0x0F) + +enum lm3530_mode { + LM3530_BL_MODE_MANUAL = 0, /* "man" */ + LM3530_BL_MODE_ALS, /* "als" */ + LM3530_BL_MODE_PWM, /* "pwm" */ +}; + +/* ALS input select */ +enum lm3530_als_mode { + LM3530_INPUT_AVRG = 0, /* ALS1 and ALS2 input average */ + LM3530_INPUT_ALS1, /* ALS1 Input */ + LM3530_INPUT_ALS2, /* ALS2 Input */ + LM3530_INPUT_CEIL, /* Max of ALS1 and ALS2 */ +}; + +/* PWM Platform Specific Data */ +struct lm3530_pwm_data { + void (*pwm_set_intensity) (int brightness, int max_brightness); + int (*pwm_get_intensity) (int max_brightness); +}; + +/** + * struct lm3530_platform_data + * @mode: mode of operation i.e. Manual, ALS or PWM + * @als_input_mode: select source of ALS input - ALS1/2 or average + * @max_current: full scale LED current + * @pwm_pol_hi: PWM input polarity - active high/active low + * @als_avrg_time: ALS input averaging time + * @brt_ramp_law: brightness mapping mode - exponential/linear + * @brt_ramp_fall: rate of fall of led current + * @brt_ramp_rise: rate of rise of led current + * @als1_resistor_sel: internal resistance from ALS1 input to ground + * @als2_resistor_sel: internal resistance from ALS2 input to ground + * @als_vmin: als input voltage calibrated for max brightness in mV + * @als_vmax: als input voltage calibrated for min brightness in mV + * @brt_val: brightness value (0-127) + * @pwm_data: PWM control functions (only valid when the mode is PWM) + */ +struct lm3530_platform_data { + enum lm3530_mode mode; + enum lm3530_als_mode als_input_mode; + + u8 max_current; + bool pwm_pol_hi; + u8 als_avrg_time; + + bool brt_ramp_law; + u8 brt_ramp_fall; + u8 brt_ramp_rise; + + u8 als1_resistor_sel; + u8 als2_resistor_sel; + + u32 als_vmin; + u32 als_vmax; + + u8 brt_val; + + struct lm3530_pwm_data pwm_data; +}; + +#endif /* _LINUX_LED_LM3530_H__ */ diff --git a/include/linux/leds-bd2802.h b/include/linux/leds-bd2802.h new file mode 100644 index 000000000..42f854a1a --- /dev/null +++ b/include/linux/leds-bd2802.h @@ -0,0 +1,26 @@ +/* + * leds-bd2802.h - RGB LED Driver + * + * Copyright (C) 2009 Samsung Electronics + * Kim Kyuwon + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Datasheet: http://www.rohm.com/products/databook/driver/pdf/bd2802gu-e.pdf + * + */ +#ifndef _LEDS_BD2802_H_ +#define _LEDS_BD2802_H_ + +struct bd2802_led_platform_data{ + int reset_gpio; + u8 rgb_time; +}; + +#define RGB_TIME(slopedown, slopeup, waveform) \ + ((slopedown) << 6 | (slopeup) << 4 | (waveform)) + +#endif /* _LEDS_BD2802_H_ */ + diff --git a/include/linux/leds-lp3944.h b/include/linux/leds-lp3944.h new file mode 100644 index 000000000..2618aa906 --- /dev/null +++ b/include/linux/leds-lp3944.h @@ -0,0 +1,50 @@ +/* + * leds-lp3944.h - platform data structure for lp3944 led controller + * + * Copyright (C) 2009 Antonio Ospite + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __LINUX_LEDS_LP3944_H +#define __LINUX_LEDS_LP3944_H + +#define LP3944_LED0 0 +#define LP3944_LED1 1 +#define LP3944_LED2 2 +#define LP3944_LED3 3 +#define LP3944_LED4 4 +#define LP3944_LED5 5 +#define LP3944_LED6 6 +#define LP3944_LED7 7 +#define LP3944_LEDS_MAX 8 + +#define LP3944_LED_STATUS_MASK 0x03 +enum lp3944_status { + LP3944_LED_STATUS_OFF = 0x0, + LP3944_LED_STATUS_ON = 0x1, + LP3944_LED_STATUS_DIM0 = 0x2, + LP3944_LED_STATUS_DIM1 = 0x3 +}; + +enum lp3944_type { + LP3944_LED_TYPE_NONE, + LP3944_LED_TYPE_LED, + LP3944_LED_TYPE_LED_INVERTED, +}; + +struct lp3944_led { + char *name; + enum lp3944_type type; + enum lp3944_status status; +}; + +struct lp3944_platform_data { + struct lp3944_led leds[LP3944_LEDS_MAX]; + u8 leds_size; +}; + +#endif /* __LINUX_LEDS_LP3944_H */ diff --git a/include/linux/leds-lp3952.h b/include/linux/leds-lp3952.h new file mode 100644 index 000000000..49b37ed8d --- /dev/null +++ b/include/linux/leds-lp3952.h @@ -0,0 +1,125 @@ +/* + * LED driver for TI lp3952 controller + * + * Copyright (C) 2016, DAQRI, LLC. + * Author: Tony Makkiel + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef LEDS_LP3952_H_ +#define LEDS_LP3952_H_ + +#define LP3952_NAME "lp3952" +#define LP3952_CMD_REG_COUNT 8 +#define LP3952_BRIGHT_MAX 4 +#define LP3952_LABEL_MAX_LEN 15 + +#define LP3952_REG_LED_CTRL 0x00 +#define LP3952_REG_R1_BLNK_TIME_CTRL 0x01 +#define LP3952_REG_R1_BLNK_CYCLE_CTRL 0x02 +#define LP3952_REG_G1_BLNK_TIME_CTRL 0x03 +#define LP3952_REG_G1_BLNK_CYCLE_CTRL 0x04 +#define LP3952_REG_B1_BLNK_TIME_CTRL 0x05 +#define LP3952_REG_B1_BLNK_CYCLE_CTRL 0x06 +#define LP3952_REG_ENABLES 0x0B +#define LP3952_REG_PAT_GEN_CTRL 0x11 +#define LP3952_REG_RGB1_MAX_I_CTRL 0x12 +#define LP3952_REG_RGB2_MAX_I_CTRL 0x13 +#define LP3952_REG_CMD_0 0x50 +#define LP3952_REG_RESET 0x60 +#define REG_MAX LP3952_REG_RESET + +#define LP3952_PATRN_LOOP BIT(1) +#define LP3952_PATRN_GEN_EN BIT(2) +#define LP3952_INT_B00ST_LDR BIT(2) +#define LP3952_ACTIVE_MODE BIT(6) +#define LP3952_LED_MASK_ALL 0x3f + +/* Transition Time in ms */ +enum lp3952_tt { + TT0, + TT55, + TT110, + TT221, + TT422, + TT885, + TT1770, + TT3539 +}; + +/* Command Execution Time in ms */ +enum lp3952_cet { + CET197, + CET393, + CET590, + CET786, + CET1180, + CET1376, + CET1573, + CET1769, + CET1966, + CET2163, + CET2359, + CET2556, + CET2763, + CET2949, + CET3146 +}; + +/* Max Current in % */ +enum lp3952_colour_I_log_0 { + I0, + I7, + I14, + I21, + I32, + I46, + I71, + I100 +}; + +enum lp3952_leds { + LP3952_BLUE_2, + LP3952_GREEN_2, + LP3952_RED_2, + LP3952_BLUE_1, + LP3952_GREEN_1, + LP3952_RED_1, + LP3952_LED_ALL +}; + +struct lp3952_ctrl_hdl { + struct led_classdev cdev; + char name[LP3952_LABEL_MAX_LEN]; + enum lp3952_leds channel; + void *priv; +}; + +struct ptrn_gen_cmd { + union { + struct { + u16 tt:3; + u16 b:3; + u16 cet:4; + u16 g:3; + u16 r:3; + }; + struct { + u8 lsb; + u8 msb; + } bytes; + }; +} __packed; + +struct lp3952_led_array { + struct regmap *regmap; + struct i2c_client *client; + struct gpio_desc *enable_gpio; + struct lp3952_ctrl_hdl leds[LP3952_LED_ALL]; +}; + +#endif /* LEDS_LP3952_H_ */ diff --git a/include/linux/leds-pca9532.h b/include/linux/leds-pca9532.h new file mode 100644 index 000000000..5e240b2b4 --- /dev/null +++ b/include/linux/leds-pca9532.h @@ -0,0 +1,47 @@ +/* + * pca9532.h - platform data structure for pca9532 led controller + * + * Copyright (C) 2008 Riku Voipio + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * Datasheet: http://www.nxp.com/acrobat/datasheets/PCA9532_3.pdf + * + */ + +#ifndef __LINUX_PCA9532_H +#define __LINUX_PCA9532_H + +#include +#include +#include + +enum pca9532_state { + PCA9532_OFF = 0x0, + PCA9532_ON = 0x1, + PCA9532_PWM0 = 0x2, + PCA9532_PWM1 = 0x3, + PCA9532_KEEP = 0xff, +}; + +struct pca9532_led { + u8 id; + struct i2c_client *client; + const char *name; + const char *default_trigger; + struct led_classdev ldev; + struct work_struct work; + u32 type; + enum pca9532_state state; +}; + +struct pca9532_platform_data { + struct pca9532_led leds[16]; + u8 pwm[2]; + u8 psc[2]; + int gpio_base; +}; + +#endif /* __LINUX_PCA9532_H */ diff --git a/include/linux/leds-regulator.h b/include/linux/leds-regulator.h new file mode 100644 index 000000000..e2337a8c9 --- /dev/null +++ b/include/linux/leds-regulator.h @@ -0,0 +1,46 @@ +/* + * leds-regulator.h - platform data structure for regulator driven LEDs. + * + * Copyright (C) 2009 Antonio Ospite + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __LINUX_LEDS_REGULATOR_H +#define __LINUX_LEDS_REGULATOR_H + +/* + * Use "vled" as supply id when declaring the regulator consumer: + * + * static struct regulator_consumer_supply pcap_regulator_VVIB_consumers [] = { + * { .dev_name = "leds-regulator.0", .supply = "vled" }, + * }; + * + * If you have several regulator driven LEDs, you can append a numerical id to + * .dev_name as done above, and use the same id when declaring the platform + * device: + * + * static struct led_regulator_platform_data a780_vibrator_data = { + * .name = "a780::vibrator", + * }; + * + * static struct platform_device a780_vibrator = { + * .name = "leds-regulator", + * .id = 0, + * .dev = { + * .platform_data = &a780_vibrator_data, + * }, + * }; + */ + +#include + +struct led_regulator_platform_data { + char *name; /* LED name as expected by LED class */ + enum led_brightness brightness; /* initial brightness value */ +}; + +#endif /* __LINUX_LEDS_REGULATOR_H */ diff --git a/include/linux/leds-tca6507.h b/include/linux/leds-tca6507.h new file mode 100644 index 000000000..dcabf4fa2 --- /dev/null +++ b/include/linux/leds-tca6507.h @@ -0,0 +1,34 @@ +/* + * TCA6507 LED chip driver. + * + * Copyright (C) 2011 Neil Brown + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef __LINUX_TCA6507_H +#define __LINUX_TCA6507_H +#include + +struct tca6507_platform_data { + struct led_platform_data leds; +#ifdef CONFIG_GPIOLIB + int gpio_base; + void (*setup)(unsigned gpio_base, unsigned ngpio); +#endif +}; + +#define TCA6507_MAKE_GPIO 1 +#endif /* __LINUX_TCA6507_H*/ diff --git a/include/linux/leds.h b/include/linux/leds.h new file mode 100644 index 000000000..834683d60 --- /dev/null +++ b/include/linux/leds.h @@ -0,0 +1,475 @@ +/* + * Driver model for leds and led triggers + * + * Copyright (C) 2005 John Lenz + * Copyright (C) 2005 Richard Purdie + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#ifndef __LINUX_LEDS_H_INCLUDED +#define __LINUX_LEDS_H_INCLUDED + +#include +#include +#include +#include +#include +#include +#include +#include + +struct device; +/* + * LED Core + */ + +enum led_brightness { + LED_OFF = 0, + LED_ON = 1, + LED_HALF = 127, + LED_FULL = 255, +}; + +struct led_classdev { + const char *name; + enum led_brightness brightness; + enum led_brightness max_brightness; + int flags; + + /* Lower 16 bits reflect status */ +#define LED_SUSPENDED BIT(0) +#define LED_UNREGISTERING BIT(1) + /* Upper 16 bits reflect control information */ +#define LED_CORE_SUSPENDRESUME BIT(16) +#define LED_SYSFS_DISABLE BIT(17) +#define LED_DEV_CAP_FLASH BIT(18) +#define LED_HW_PLUGGABLE BIT(19) +#define LED_PANIC_INDICATOR BIT(20) +#define LED_BRIGHT_HW_CHANGED BIT(21) +#define LED_RETAIN_AT_SHUTDOWN BIT(22) + + /* set_brightness_work / blink_timer flags, atomic, private. */ + unsigned long work_flags; + +#define LED_BLINK_SW 0 +#define LED_BLINK_ONESHOT 1 +#define LED_BLINK_ONESHOT_STOP 2 +#define LED_BLINK_INVERT 3 +#define LED_BLINK_BRIGHTNESS_CHANGE 4 +#define LED_BLINK_DISABLE 5 + + /* Set LED brightness level + * Must not sleep. Use brightness_set_blocking for drivers + * that can sleep while setting brightness. + */ + void (*brightness_set)(struct led_classdev *led_cdev, + enum led_brightness brightness); + /* + * Set LED brightness level immediately - it can block the caller for + * the time required for accessing a LED device register. + */ + int (*brightness_set_blocking)(struct led_classdev *led_cdev, + enum led_brightness brightness); + /* Get LED brightness level */ + enum led_brightness (*brightness_get)(struct led_classdev *led_cdev); + + /* + * Activate hardware accelerated blink, delays are in milliseconds + * and if both are zero then a sensible default should be chosen. + * The call should adjust the timings in that case and if it can't + * match the values specified exactly. + * Deactivate blinking again when the brightness is set to LED_OFF + * via the brightness_set() callback. + */ + int (*blink_set)(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off); + + struct device *dev; + const struct attribute_group **groups; + + struct list_head node; /* LED Device list */ + const char *default_trigger; /* Trigger to use */ + + unsigned long blink_delay_on, blink_delay_off; + struct timer_list blink_timer; + int blink_brightness; + int new_blink_brightness; + void (*flash_resume)(struct led_classdev *led_cdev); + + struct work_struct set_brightness_work; + int delayed_set_value; + +#ifdef CONFIG_LEDS_TRIGGERS + /* Protects the trigger data below */ + struct rw_semaphore trigger_lock; + + struct led_trigger *trigger; + struct list_head trig_list; + void *trigger_data; + /* true if activated - deactivate routine uses it to do cleanup */ + bool activated; +#endif + +#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED + int brightness_hw_changed; + struct kernfs_node *brightness_hw_changed_kn; +#endif + + /* Ensures consistent access to the LED Flash Class device */ + struct mutex led_access; +}; + +extern int of_led_classdev_register(struct device *parent, + struct device_node *np, + struct led_classdev *led_cdev); +#define led_classdev_register(parent, led_cdev) \ + of_led_classdev_register(parent, NULL, led_cdev) +extern int devm_of_led_classdev_register(struct device *parent, + struct device_node *np, + struct led_classdev *led_cdev); +#define devm_led_classdev_register(parent, led_cdev) \ + devm_of_led_classdev_register(parent, NULL, led_cdev) +extern void led_classdev_unregister(struct led_classdev *led_cdev); +extern void devm_led_classdev_unregister(struct device *parent, + struct led_classdev *led_cdev); +extern void led_classdev_suspend(struct led_classdev *led_cdev); +extern void led_classdev_resume(struct led_classdev *led_cdev); + +/** + * led_blink_set - set blinking with software fallback + * @led_cdev: the LED to start blinking + * @delay_on: the time it should be on (in ms) + * @delay_off: the time it should ble off (in ms) + * + * This function makes the LED blink, attempting to use the + * hardware acceleration if possible, but falling back to + * software blinking if there is no hardware blinking or if + * the LED refuses the passed values. + * + * Note that if software blinking is active, simply calling + * led_cdev->brightness_set() will not stop the blinking, + * use led_classdev_brightness_set() instead. + */ +extern void led_blink_set(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off); +/** + * led_blink_set_oneshot - do a oneshot software blink + * @led_cdev: the LED to start blinking + * @delay_on: the time it should be on (in ms) + * @delay_off: the time it should ble off (in ms) + * @invert: blink off, then on, leaving the led on + * + * This function makes the LED blink one time for delay_on + + * delay_off time, ignoring the request if another one-shot + * blink is already in progress. + * + * If invert is set, led blinks for delay_off first, then for + * delay_on and leave the led on after the on-off cycle. + */ +extern void led_blink_set_oneshot(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off, + int invert); +/** + * led_set_brightness - set LED brightness + * @led_cdev: the LED to set + * @brightness: the brightness to set it to + * + * Set an LED's brightness, and, if necessary, cancel the + * software blink timer that implements blinking when the + * hardware doesn't. This function is guaranteed not to sleep. + */ +extern void led_set_brightness(struct led_classdev *led_cdev, + enum led_brightness brightness); + +/** + * led_set_brightness_sync - set LED brightness synchronously + * @led_cdev: the LED to set + * @brightness: the brightness to set it to + * + * Set an LED's brightness immediately. This function will block + * the caller for the time required for accessing device registers, + * and it can sleep. + * + * Returns: 0 on success or negative error value on failure + */ +extern int led_set_brightness_sync(struct led_classdev *led_cdev, + enum led_brightness value); + +/** + * led_update_brightness - update LED brightness + * @led_cdev: the LED to query + * + * Get an LED's current brightness and update led_cdev->brightness + * member with the obtained value. + * + * Returns: 0 on success or negative error value on failure + */ +extern int led_update_brightness(struct led_classdev *led_cdev); + +/** + * led_sysfs_disable - disable LED sysfs interface + * @led_cdev: the LED to set + * + * Disable the led_cdev's sysfs interface. + */ +extern void led_sysfs_disable(struct led_classdev *led_cdev); + +/** + * led_sysfs_enable - enable LED sysfs interface + * @led_cdev: the LED to set + * + * Enable the led_cdev's sysfs interface. + */ +extern void led_sysfs_enable(struct led_classdev *led_cdev); + +/** + * led_sysfs_is_disabled - check if LED sysfs interface is disabled + * @led_cdev: the LED to query + * + * Returns: true if the led_cdev's sysfs interface is disabled. + */ +static inline bool led_sysfs_is_disabled(struct led_classdev *led_cdev) +{ + return led_cdev->flags & LED_SYSFS_DISABLE; +} + +/* + * LED Triggers + */ +/* Registration functions for simple triggers */ +#define DEFINE_LED_TRIGGER(x) static struct led_trigger *x; +#define DEFINE_LED_TRIGGER_GLOBAL(x) struct led_trigger *x; + +#ifdef CONFIG_LEDS_TRIGGERS + +#define TRIG_NAME_MAX 50 + +struct led_trigger { + /* Trigger Properties */ + const char *name; + int (*activate)(struct led_classdev *led_cdev); + void (*deactivate)(struct led_classdev *led_cdev); + + /* LEDs under control by this trigger (for simple triggers) */ + rwlock_t leddev_list_lock; + struct list_head led_cdevs; + + /* Link to next registered trigger */ + struct list_head next_trig; + + const struct attribute_group **groups; +}; + +/* + * Currently the attributes in struct led_trigger::groups are added directly to + * the LED device. As this might change in the future, the following + * macros abstract getting the LED device and its trigger_data from the dev + * parameter passed to the attribute accessor functions. + */ +#define led_trigger_get_led(dev) ((struct led_classdev *)dev_get_drvdata((dev))) +#define led_trigger_get_drvdata(dev) (led_get_trigger_data(led_trigger_get_led(dev))) + +ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count); +ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr, + char *buf); + +/* Registration functions for complex triggers */ +extern int led_trigger_register(struct led_trigger *trigger); +extern void led_trigger_unregister(struct led_trigger *trigger); +extern int devm_led_trigger_register(struct device *dev, + struct led_trigger *trigger); + +extern void led_trigger_register_simple(const char *name, + struct led_trigger **trigger); +extern void led_trigger_unregister_simple(struct led_trigger *trigger); +extern void led_trigger_event(struct led_trigger *trigger, + enum led_brightness event); +extern void led_trigger_blink(struct led_trigger *trigger, + unsigned long *delay_on, + unsigned long *delay_off); +extern void led_trigger_blink_oneshot(struct led_trigger *trigger, + unsigned long *delay_on, + unsigned long *delay_off, + int invert); +extern void led_trigger_set_default(struct led_classdev *led_cdev); +extern int led_trigger_set(struct led_classdev *led_cdev, + struct led_trigger *trigger); +extern void led_trigger_remove(struct led_classdev *led_cdev); + +static inline void led_set_trigger_data(struct led_classdev *led_cdev, + void *trigger_data) +{ + led_cdev->trigger_data = trigger_data; +} + +static inline void *led_get_trigger_data(struct led_classdev *led_cdev) +{ + return led_cdev->trigger_data; +} + +/** + * led_trigger_rename_static - rename a trigger + * @name: the new trigger name + * @trig: the LED trigger to rename + * + * Change a LED trigger name by copying the string passed in + * name into current trigger name, which MUST be large + * enough for the new string. + * + * Note that name must NOT point to the same string used + * during LED registration, as that could lead to races. + * + * This is meant to be used on triggers with statically + * allocated name. + */ +extern void led_trigger_rename_static(const char *name, + struct led_trigger *trig); + +#define module_led_trigger(__led_trigger) \ + module_driver(__led_trigger, led_trigger_register, \ + led_trigger_unregister) + +#else + +/* Trigger has no members */ +struct led_trigger {}; + +/* Trigger inline empty functions */ +static inline void led_trigger_register_simple(const char *name, + struct led_trigger **trigger) {} +static inline void led_trigger_unregister_simple(struct led_trigger *trigger) {} +static inline void led_trigger_event(struct led_trigger *trigger, + enum led_brightness event) {} +static inline void led_trigger_blink(struct led_trigger *trigger, + unsigned long *delay_on, + unsigned long *delay_off) {} +static inline void led_trigger_blink_oneshot(struct led_trigger *trigger, + unsigned long *delay_on, + unsigned long *delay_off, + int invert) {} +static inline void led_trigger_set_default(struct led_classdev *led_cdev) {} +static inline int led_trigger_set(struct led_classdev *led_cdev, + struct led_trigger *trigger) +{ + return 0; +} + +static inline void led_trigger_remove(struct led_classdev *led_cdev) {} +static inline void led_set_trigger_data(struct led_classdev *led_cdev) {} +static inline void *led_get_trigger_data(struct led_classdev *led_cdev) +{ + return NULL; +} + +#endif /* CONFIG_LEDS_TRIGGERS */ + +/* Trigger specific functions */ +#ifdef CONFIG_LEDS_TRIGGER_DISK +extern void ledtrig_disk_activity(bool write); +#else +static inline void ledtrig_disk_activity(bool write) {} +#endif + +#ifdef CONFIG_LEDS_TRIGGER_MTD +extern void ledtrig_mtd_activity(void); +#else +static inline void ledtrig_mtd_activity(void) {} +#endif + +#if defined(CONFIG_LEDS_TRIGGER_CAMERA) || defined(CONFIG_LEDS_TRIGGER_CAMERA_MODULE) +extern void ledtrig_flash_ctrl(bool on); +extern void ledtrig_torch_ctrl(bool on); +#else +static inline void ledtrig_flash_ctrl(bool on) {} +static inline void ledtrig_torch_ctrl(bool on) {} +#endif + +/* + * Generic LED platform data for describing LED names and default triggers. + */ +struct led_info { + const char *name; + const char *default_trigger; + int flags; +}; + +struct led_platform_data { + int num_leds; + struct led_info *leds; +}; + +struct gpio_desc; +typedef int (*gpio_blink_set_t)(struct gpio_desc *desc, int state, + unsigned long *delay_on, + unsigned long *delay_off); + +/* For the leds-gpio driver */ +struct gpio_led { + const char *name; + const char *default_trigger; + unsigned gpio; + unsigned active_low : 1; + unsigned retain_state_suspended : 1; + unsigned panic_indicator : 1; + unsigned default_state : 2; + unsigned retain_state_shutdown : 1; + /* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */ + struct gpio_desc *gpiod; +}; +#define LEDS_GPIO_DEFSTATE_OFF 0 +#define LEDS_GPIO_DEFSTATE_ON 1 +#define LEDS_GPIO_DEFSTATE_KEEP 2 + +struct gpio_led_platform_data { + int num_leds; + const struct gpio_led *leds; + +#define GPIO_LED_NO_BLINK_LOW 0 /* No blink GPIO state low */ +#define GPIO_LED_NO_BLINK_HIGH 1 /* No blink GPIO state high */ +#define GPIO_LED_BLINK 2 /* Please, blink */ + gpio_blink_set_t gpio_blink_set; +}; + +#ifdef CONFIG_NEW_LEDS +struct platform_device *gpio_led_register_device( + int id, const struct gpio_led_platform_data *pdata); +#else +static inline struct platform_device *gpio_led_register_device( + int id, const struct gpio_led_platform_data *pdata) +{ + return 0; +} +#endif + +enum cpu_led_event { + CPU_LED_IDLE_START, /* CPU enters idle */ + CPU_LED_IDLE_END, /* CPU idle ends */ + CPU_LED_START, /* Machine starts, especially resume */ + CPU_LED_STOP, /* Machine stops, especially suspend */ + CPU_LED_HALTED, /* Machine shutdown */ +}; +#ifdef CONFIG_LEDS_TRIGGER_CPU +extern void ledtrig_cpu(enum cpu_led_event evt); +#else +static inline void ledtrig_cpu(enum cpu_led_event evt) +{ + return; +} +#endif + +#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED +extern void led_classdev_notify_brightness_hw_changed( + struct led_classdev *led_cdev, enum led_brightness brightness); +#else +static inline void led_classdev_notify_brightness_hw_changed( + struct led_classdev *led_cdev, enum led_brightness brightness) { } +#endif + +#endif /* __LINUX_LEDS_H_INCLUDED */ diff --git a/include/linux/leds_pwm.h b/include/linux/leds_pwm.h new file mode 100644 index 000000000..93d101d28 --- /dev/null +++ b/include/linux/leds_pwm.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * PWM LED driver data - see drivers/leds/leds-pwm.c + */ +#ifndef __LINUX_LEDS_PWM_H +#define __LINUX_LEDS_PWM_H + +struct led_pwm { + const char *name; + const char *default_trigger; + unsigned pwm_id __deprecated; + u8 active_low; + unsigned max_brightness; + unsigned pwm_period_ns; +}; + +struct led_pwm_platform_data { + int num_leds; + struct led_pwm *leds; +}; + +#endif diff --git a/include/linux/libata.h b/include/linux/libata.h new file mode 100644 index 000000000..73cd01824 --- /dev/null +++ b/include/linux/libata.h @@ -0,0 +1,2001 @@ +/* + * Copyright 2003-2005 Red Hat, Inc. All rights reserved. + * Copyright 2003-2005 Jeff Garzik + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * + * libata documentation is available via 'make {ps|pdf}docs', + * as Documentation/driver-api/libata.rst + * + */ + +#ifndef __LINUX_LIBATA_H__ +#define __LINUX_LIBATA_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Define if arch has non-standard setup. This is a _PCI_ standard + * not a legacy or ISA standard. + */ +#ifdef CONFIG_ATA_NONSTANDARD +#include +#else +#define ATA_PRIMARY_IRQ(dev) 14 +#define ATA_SECONDARY_IRQ(dev) 15 +#endif + +/* + * compile-time options: to be removed as soon as all the drivers are + * converted to the new debugging mechanism + */ +#undef ATA_DEBUG /* debugging output */ +#undef ATA_VERBOSE_DEBUG /* yet more debugging output */ +#undef ATA_IRQ_TRAP /* define to ack screaming irqs */ +#undef ATA_NDEBUG /* define to disable quick runtime checks */ + + +/* note: prints function name for you */ +#ifdef ATA_DEBUG +#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) +#ifdef ATA_VERBOSE_DEBUG +#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) +#else +#define VPRINTK(fmt, args...) +#endif /* ATA_VERBOSE_DEBUG */ +#else +#define DPRINTK(fmt, args...) +#define VPRINTK(fmt, args...) +#endif /* ATA_DEBUG */ + +#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __func__, ## args) + +#define ata_print_version_once(dev, version) \ +({ \ + static bool __print_once; \ + \ + if (!__print_once) { \ + __print_once = true; \ + ata_print_version(dev, version); \ + } \ +}) + +/* NEW: debug levels */ +#define HAVE_LIBATA_MSG 1 + +enum { + ATA_MSG_DRV = 0x0001, + ATA_MSG_INFO = 0x0002, + ATA_MSG_PROBE = 0x0004, + ATA_MSG_WARN = 0x0008, + ATA_MSG_MALLOC = 0x0010, + ATA_MSG_CTL = 0x0020, + ATA_MSG_INTR = 0x0040, + ATA_MSG_ERR = 0x0080, +}; + +#define ata_msg_drv(p) ((p)->msg_enable & ATA_MSG_DRV) +#define ata_msg_info(p) ((p)->msg_enable & ATA_MSG_INFO) +#define ata_msg_probe(p) ((p)->msg_enable & ATA_MSG_PROBE) +#define ata_msg_warn(p) ((p)->msg_enable & ATA_MSG_WARN) +#define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC) +#define ata_msg_ctl(p) ((p)->msg_enable & ATA_MSG_CTL) +#define ata_msg_intr(p) ((p)->msg_enable & ATA_MSG_INTR) +#define ata_msg_err(p) ((p)->msg_enable & ATA_MSG_ERR) + +static inline u32 ata_msg_init(int dval, int default_msg_enable_bits) +{ + if (dval < 0 || dval >= (sizeof(u32) * 8)) + return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */ + if (!dval) + return 0; + return (1 << dval) - 1; +} + +/* defines only for the constants which don't work well as enums */ +#define ATA_TAG_POISON 0xfafbfcfdU + +enum { + /* various global constants */ + LIBATA_MAX_PRD = ATA_MAX_PRD / 2, + LIBATA_DUMB_MAX_PRD = ATA_MAX_PRD / 4, /* Worst case */ + ATA_DEF_QUEUE = 1, + ATA_MAX_QUEUE = 32, + ATA_TAG_INTERNAL = ATA_MAX_QUEUE, + ATA_SHORT_PAUSE = 16, + + ATAPI_MAX_DRAIN = 16 << 10, + + ATA_ALL_DEVICES = (1 << ATA_MAX_DEVICES) - 1, + + ATA_SHT_EMULATED = 1, + ATA_SHT_THIS_ID = -1, + ATA_SHT_USE_CLUSTERING = 1, + + /* struct ata_taskfile flags */ + ATA_TFLAG_LBA48 = (1 << 0), /* enable 48-bit LBA and "HOB" */ + ATA_TFLAG_ISADDR = (1 << 1), /* enable r/w to nsect/lba regs */ + ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */ + ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */ + ATA_TFLAG_LBA = (1 << 4), /* enable LBA */ + ATA_TFLAG_FUA = (1 << 5), /* enable FUA */ + ATA_TFLAG_POLLING = (1 << 6), /* set nIEN to 1 and use polling */ + + /* struct ata_device stuff */ + ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */ + ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */ + ATA_DFLAG_CDB_INTR = (1 << 2), /* device asserts INTRQ when ready for CDB */ + ATA_DFLAG_NCQ = (1 << 3), /* device supports NCQ */ + ATA_DFLAG_FLUSH_EXT = (1 << 4), /* do FLUSH_EXT instead of FLUSH */ + ATA_DFLAG_ACPI_PENDING = (1 << 5), /* ACPI resume action pending */ + ATA_DFLAG_ACPI_FAILED = (1 << 6), /* ACPI on devcfg has failed */ + ATA_DFLAG_AN = (1 << 7), /* AN configured */ + ATA_DFLAG_TRUSTED = (1 << 8), /* device supports trusted send/recv */ + ATA_DFLAG_DMADIR = (1 << 10), /* device requires DMADIR */ + ATA_DFLAG_CFG_MASK = (1 << 12) - 1, + + ATA_DFLAG_PIO = (1 << 12), /* device limited to PIO mode */ + ATA_DFLAG_NCQ_OFF = (1 << 13), /* device limited to non-NCQ mode */ + ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */ + ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */ + ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */ + ATA_DFLAG_UNLOCK_HPA = (1 << 18), /* unlock HPA */ + ATA_DFLAG_NCQ_SEND_RECV = (1 << 19), /* device supports NCQ SEND and RECV */ + ATA_DFLAG_NCQ_PRIO = (1 << 20), /* device supports NCQ priority */ + ATA_DFLAG_NCQ_PRIO_ENABLE = (1 << 21), /* Priority cmds sent to dev */ + ATA_DFLAG_INIT_MASK = (1 << 24) - 1, + + ATA_DFLAG_DETACH = (1 << 24), + ATA_DFLAG_DETACHED = (1 << 25), + + ATA_DFLAG_DA = (1 << 26), /* device supports Device Attention */ + ATA_DFLAG_DEVSLP = (1 << 27), /* device supports Device Sleep */ + ATA_DFLAG_ACPI_DISABLED = (1 << 28), /* ACPI for the device is disabled */ + ATA_DFLAG_D_SENSE = (1 << 29), /* Descriptor sense requested */ + ATA_DFLAG_ZAC = (1 << 30), /* ZAC device */ + + ATA_DEV_UNKNOWN = 0, /* unknown device */ + ATA_DEV_ATA = 1, /* ATA device */ + ATA_DEV_ATA_UNSUP = 2, /* ATA device (unsupported) */ + ATA_DEV_ATAPI = 3, /* ATAPI device */ + ATA_DEV_ATAPI_UNSUP = 4, /* ATAPI device (unsupported) */ + ATA_DEV_PMP = 5, /* SATA port multiplier */ + ATA_DEV_PMP_UNSUP = 6, /* SATA port multiplier (unsupported) */ + ATA_DEV_SEMB = 7, /* SEMB */ + ATA_DEV_SEMB_UNSUP = 8, /* SEMB (unsupported) */ + ATA_DEV_ZAC = 9, /* ZAC device */ + ATA_DEV_ZAC_UNSUP = 10, /* ZAC device (unsupported) */ + ATA_DEV_NONE = 11, /* no device */ + + /* struct ata_link flags */ + ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */ + ATA_LFLAG_NO_SRST = (1 << 2), /* avoid softreset */ + ATA_LFLAG_ASSUME_ATA = (1 << 3), /* assume ATA class */ + ATA_LFLAG_ASSUME_SEMB = (1 << 4), /* assume SEMB class */ + ATA_LFLAG_ASSUME_CLASS = ATA_LFLAG_ASSUME_ATA | ATA_LFLAG_ASSUME_SEMB, + ATA_LFLAG_NO_RETRY = (1 << 5), /* don't retry this link */ + ATA_LFLAG_DISABLED = (1 << 6), /* link is disabled */ + ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */ + ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */ + ATA_LFLAG_RST_ONCE = (1 << 9), /* limit recovery to one reset */ + ATA_LFLAG_CHANGED = (1 << 10), /* LPM state changed on this link */ + ATA_LFLAG_NO_DB_DELAY = (1 << 11), /* no debounce delay on link resume */ + + /* struct ata_port flags */ + ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ + /* (doesn't imply presence) */ + ATA_FLAG_SATA = (1 << 1), + ATA_FLAG_NO_LPM = (1 << 2), /* host not happy with LPM */ + ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */ + ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */ + ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */ + ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */ + ATA_FLAG_PIO_POLLING = (1 << 9), /* use polling PIO if LLD + * doesn't handle PIO interrupts */ + ATA_FLAG_NCQ = (1 << 10), /* host supports NCQ */ + ATA_FLAG_NO_POWEROFF_SPINDOWN = (1 << 11), /* don't spindown before poweroff */ + ATA_FLAG_NO_HIBERNATE_SPINDOWN = (1 << 12), /* don't spindown before hibernation */ + ATA_FLAG_DEBUGMSG = (1 << 13), + ATA_FLAG_FPDMA_AA = (1 << 14), /* driver supports Auto-Activate */ + ATA_FLAG_IGN_SIMPLEX = (1 << 15), /* ignore SIMPLEX */ + ATA_FLAG_NO_IORDY = (1 << 16), /* controller lacks iordy */ + ATA_FLAG_ACPI_SATA = (1 << 17), /* need native SATA ACPI layout */ + ATA_FLAG_AN = (1 << 18), /* controller supports AN */ + ATA_FLAG_PMP = (1 << 19), /* controller supports PMP */ + ATA_FLAG_FPDMA_AUX = (1 << 20), /* controller supports H2DFIS aux field */ + ATA_FLAG_EM = (1 << 21), /* driver supports enclosure + * management */ + ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity + * led */ + ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */ + ATA_FLAG_SAS_HOST = (1 << 24), /* SAS host */ + + /* bits 24:31 of ap->flags are reserved for LLD specific flags */ + + + /* struct ata_port pflags */ + ATA_PFLAG_EH_PENDING = (1 << 0), /* EH pending */ + ATA_PFLAG_EH_IN_PROGRESS = (1 << 1), /* EH in progress */ + ATA_PFLAG_FROZEN = (1 << 2), /* port is frozen */ + ATA_PFLAG_RECOVERED = (1 << 3), /* recovery action performed */ + ATA_PFLAG_LOADING = (1 << 4), /* boot/loading probe */ + ATA_PFLAG_SCSI_HOTPLUG = (1 << 6), /* SCSI hotplug scheduled */ + ATA_PFLAG_INITIALIZING = (1 << 7), /* being initialized, don't touch */ + ATA_PFLAG_RESETTING = (1 << 8), /* reset in progress */ + ATA_PFLAG_UNLOADING = (1 << 9), /* driver is being unloaded */ + ATA_PFLAG_UNLOADED = (1 << 10), /* driver is unloaded */ + + ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */ + ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */ + ATA_PFLAG_INIT_GTM_VALID = (1 << 19), /* initial gtm data valid */ + + ATA_PFLAG_PIO32 = (1 << 20), /* 32bit PIO */ + ATA_PFLAG_PIO32CHANGE = (1 << 21), /* 32bit PIO can be turned on/off */ + ATA_PFLAG_EXTERNAL = (1 << 22), /* eSATA/external port */ + + /* struct ata_queued_cmd flags */ + ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */ + ATA_QCFLAG_DMAMAP = (1 << 1), /* SG table is DMA mapped */ + ATA_QCFLAG_IO = (1 << 3), /* standard IO command */ + ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */ + ATA_QCFLAG_CLEAR_EXCL = (1 << 5), /* clear excl_link on completion */ + ATA_QCFLAG_QUIET = (1 << 6), /* don't report device error */ + ATA_QCFLAG_RETRY = (1 << 7), /* retry after failure */ + + ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */ + ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */ + ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */ + + /* host set flags */ + ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host only */ + ATA_HOST_STARTED = (1 << 1), /* Host started */ + ATA_HOST_PARALLEL_SCAN = (1 << 2), /* Ports on this host can be scanned in parallel */ + ATA_HOST_IGNORE_ATA = (1 << 3), /* Ignore ATA devices on this host. */ + + /* bits 24:31 of host->flags are reserved for LLD specific flags */ + + /* various lengths of time */ + ATA_TMOUT_BOOT = 30000, /* heuristic */ + ATA_TMOUT_BOOT_QUICK = 7000, /* heuristic */ + ATA_TMOUT_INTERNAL_QUICK = 5000, + ATA_TMOUT_MAX_PARK = 30000, + + /* + * GoVault needs 2s and iVDR disk HHD424020F7SV00 800ms. 2s + * is too much without parallel probing. Use 2s if parallel + * probing is available, 800ms otherwise. + */ + ATA_TMOUT_FF_WAIT_LONG = 2000, + ATA_TMOUT_FF_WAIT = 800, + + /* Spec mandates to wait for ">= 2ms" before checking status + * after reset. We wait 150ms, because that was the magic + * delay used for ATAPI devices in Hale Landis's ATADRVR, for + * the period of time between when the ATA command register is + * written, and then status is checked. Because waiting for + * "a while" before checking status is fine, post SRST, we + * perform this magic delay here as well. + * + * Old drivers/ide uses the 2mS rule and then waits for ready. + */ + ATA_WAIT_AFTER_RESET = 150, + + /* If PMP is supported, we have to do follow-up SRST. As some + * PMPs don't send D2H Reg FIS after hardreset, LLDs are + * advised to wait only for the following duration before + * doing SRST. + */ + ATA_TMOUT_PMP_SRST_WAIT = 5000, + + /* When the LPM policy is set to ATA_LPM_MAX_POWER, there might + * be a spurious PHY event, so ignore the first PHY event that + * occurs within 10s after the policy change. + */ + ATA_TMOUT_SPURIOUS_PHY = 10000, + + /* ATA bus states */ + BUS_UNKNOWN = 0, + BUS_DMA = 1, + BUS_IDLE = 2, + BUS_NOINTR = 3, + BUS_NODATA = 4, + BUS_TIMER = 5, + BUS_PIO = 6, + BUS_EDD = 7, + BUS_IDENTIFY = 8, + BUS_PACKET = 9, + + /* SATA port states */ + PORT_UNKNOWN = 0, + PORT_ENABLED = 1, + PORT_DISABLED = 2, + + /* encoding various smaller bitmaps into a single + * unsigned long bitmap + */ + ATA_NR_PIO_MODES = 7, + ATA_NR_MWDMA_MODES = 5, + ATA_NR_UDMA_MODES = 8, + + ATA_SHIFT_PIO = 0, + ATA_SHIFT_MWDMA = ATA_SHIFT_PIO + ATA_NR_PIO_MODES, + ATA_SHIFT_UDMA = ATA_SHIFT_MWDMA + ATA_NR_MWDMA_MODES, + ATA_SHIFT_PRIO = 6, + + ATA_PRIO_HIGH = 2, + /* size of buffer to pad xfers ending on unaligned boundaries */ + ATA_DMA_PAD_SZ = 4, + + /* ering size */ + ATA_ERING_SIZE = 32, + + /* return values for ->qc_defer */ + ATA_DEFER_LINK = 1, + ATA_DEFER_PORT = 2, + + /* desc_len for ata_eh_info and context */ + ATA_EH_DESC_LEN = 80, + + /* reset / recovery action types */ + ATA_EH_REVALIDATE = (1 << 0), + ATA_EH_SOFTRESET = (1 << 1), /* meaningful only in ->prereset */ + ATA_EH_HARDRESET = (1 << 2), /* meaningful only in ->prereset */ + ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET, + ATA_EH_ENABLE_LINK = (1 << 3), + ATA_EH_PARK = (1 << 5), /* unload heads and stop I/O */ + + ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK, + ATA_EH_ALL_ACTIONS = ATA_EH_REVALIDATE | ATA_EH_RESET | + ATA_EH_ENABLE_LINK, + + /* ata_eh_info->flags */ + ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ + ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ + ATA_EHI_QUIET = (1 << 3), /* be quiet */ + ATA_EHI_NO_RECOVERY = (1 << 4), /* no recovery */ + + ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */ + ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */ + ATA_EHI_PRINTINFO = (1 << 18), /* print configuration info */ + ATA_EHI_SETMODE = (1 << 19), /* configure transfer mode */ + ATA_EHI_POST_SETMODE = (1 << 20), /* revalidating after setmode */ + + ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET, + + /* mask of flags to transfer *to* the slave link */ + ATA_EHI_TO_SLAVE_MASK = ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, + + /* max tries if error condition is still set after ->error_handler */ + ATA_EH_MAX_TRIES = 5, + + /* sometimes resuming a link requires several retries */ + ATA_LINK_RESUME_TRIES = 5, + + /* how hard are we gonna try to probe/recover devices */ + ATA_PROBE_MAX_TRIES = 3, + ATA_EH_DEV_TRIES = 3, + ATA_EH_PMP_TRIES = 5, + ATA_EH_PMP_LINK_TRIES = 3, + + SATA_PMP_RW_TIMEOUT = 3000, /* PMP read/write timeout */ + + /* This should match the actual table size of + * ata_eh_cmd_timeout_table in libata-eh.c. + */ + ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 7, + + /* Horkage types. May be set by libata or controller on drives + (some horkage may be drive/controller pair dependent */ + + ATA_HORKAGE_DIAGNOSTIC = (1 << 0), /* Failed boot diag */ + ATA_HORKAGE_NODMA = (1 << 1), /* DMA problems */ + ATA_HORKAGE_NONCQ = (1 << 2), /* Don't use NCQ */ + ATA_HORKAGE_MAX_SEC_128 = (1 << 3), /* Limit max sects to 128 */ + ATA_HORKAGE_BROKEN_HPA = (1 << 4), /* Broken HPA */ + ATA_HORKAGE_DISABLE = (1 << 5), /* Disable it */ + ATA_HORKAGE_HPA_SIZE = (1 << 6), /* native size off by one */ + ATA_HORKAGE_IVB = (1 << 8), /* cbl det validity bit bugs */ + ATA_HORKAGE_STUCK_ERR = (1 << 9), /* stuck ERR on next PACKET */ + ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */ + ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands + not multiple of 16 bytes */ + ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */ + ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */ + ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */ + ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */ + ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ + ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */ + ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */ + ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */ + ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */ + ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */ + ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */ + ATA_HORKAGE_NO_DMA_LOG = (1 << 23), /* don't use DMA for log read */ + ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */ + ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */ + ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */ + ATA_HORKAGE_NO_NCQ_ON_ATI = (1 << 27), /* Disable NCQ on ATI chipset */ + + /* DMA mask for user DMA control: User visible values; DO NOT + renumber */ + ATA_DMA_MASK_ATA = (1 << 0), /* DMA on ATA Disk */ + ATA_DMA_MASK_ATAPI = (1 << 1), /* DMA on ATAPI */ + ATA_DMA_MASK_CFA = (1 << 2), /* DMA on CF Card */ + + /* ATAPI command types */ + ATAPI_READ = 0, /* READs */ + ATAPI_WRITE = 1, /* WRITEs */ + ATAPI_READ_CD = 2, /* READ CD [MSF] */ + ATAPI_PASS_THRU = 3, /* SAT pass-thru */ + ATAPI_MISC = 4, /* the rest */ + + /* Timing constants */ + ATA_TIMING_SETUP = (1 << 0), + ATA_TIMING_ACT8B = (1 << 1), + ATA_TIMING_REC8B = (1 << 2), + ATA_TIMING_CYC8B = (1 << 3), + ATA_TIMING_8BIT = ATA_TIMING_ACT8B | ATA_TIMING_REC8B | + ATA_TIMING_CYC8B, + ATA_TIMING_ACTIVE = (1 << 4), + ATA_TIMING_RECOVER = (1 << 5), + ATA_TIMING_DMACK_HOLD = (1 << 6), + ATA_TIMING_CYCLE = (1 << 7), + ATA_TIMING_UDMA = (1 << 8), + ATA_TIMING_ALL = ATA_TIMING_SETUP | ATA_TIMING_ACT8B | + ATA_TIMING_REC8B | ATA_TIMING_CYC8B | + ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER | + ATA_TIMING_DMACK_HOLD | ATA_TIMING_CYCLE | + ATA_TIMING_UDMA, + + /* ACPI constants */ + ATA_ACPI_FILTER_SETXFER = 1 << 0, + ATA_ACPI_FILTER_LOCK = 1 << 1, + ATA_ACPI_FILTER_DIPM = 1 << 2, + ATA_ACPI_FILTER_FPDMA_OFFSET = 1 << 3, /* FPDMA non-zero offset */ + ATA_ACPI_FILTER_FPDMA_AA = 1 << 4, /* FPDMA auto activate */ + + ATA_ACPI_FILTER_DEFAULT = ATA_ACPI_FILTER_SETXFER | + ATA_ACPI_FILTER_LOCK | + ATA_ACPI_FILTER_DIPM, +}; + +enum ata_xfer_mask { + ATA_MASK_PIO = ((1LU << ATA_NR_PIO_MODES) - 1) + << ATA_SHIFT_PIO, + ATA_MASK_MWDMA = ((1LU << ATA_NR_MWDMA_MODES) - 1) + << ATA_SHIFT_MWDMA, + ATA_MASK_UDMA = ((1LU << ATA_NR_UDMA_MODES) - 1) + << ATA_SHIFT_UDMA, +}; + +enum hsm_task_states { + HSM_ST_IDLE, /* no command on going */ + HSM_ST_FIRST, /* (waiting the device to) + write CDB or first data block */ + HSM_ST, /* (waiting the device to) transfer data */ + HSM_ST_LAST, /* (waiting the device to) complete command */ + HSM_ST_ERR, /* error */ +}; + +enum ata_completion_errors { + AC_ERR_OK = 0, /* no error */ + AC_ERR_DEV = (1 << 0), /* device reported error */ + AC_ERR_HSM = (1 << 1), /* host state machine violation */ + AC_ERR_TIMEOUT = (1 << 2), /* timeout */ + AC_ERR_MEDIA = (1 << 3), /* media error */ + AC_ERR_ATA_BUS = (1 << 4), /* ATA bus error */ + AC_ERR_HOST_BUS = (1 << 5), /* host bus error */ + AC_ERR_SYSTEM = (1 << 6), /* system error */ + AC_ERR_INVALID = (1 << 7), /* invalid argument */ + AC_ERR_OTHER = (1 << 8), /* unknown */ + AC_ERR_NODEV_HINT = (1 << 9), /* polling device detection hint */ + AC_ERR_NCQ = (1 << 10), /* marker for offending NCQ qc */ +}; + +/* + * Link power management policy: If you alter this, you also need to + * alter libata-scsi.c (for the ascii descriptions) + */ +enum ata_lpm_policy { + ATA_LPM_UNKNOWN, + ATA_LPM_MAX_POWER, + ATA_LPM_MED_POWER, + ATA_LPM_MED_POWER_WITH_DIPM, /* Med power + DIPM as win IRST does */ + ATA_LPM_MIN_POWER_WITH_PARTIAL, /* Min Power + partial and slumber */ + ATA_LPM_MIN_POWER, /* Min power + no partial (slumber only) */ +}; + +enum ata_lpm_hints { + ATA_LPM_EMPTY = (1 << 0), /* port empty/probing */ + ATA_LPM_HIPM = (1 << 1), /* may use HIPM */ + ATA_LPM_WAKE_ONLY = (1 << 2), /* only wake up link */ +}; + +/* forward declarations */ +struct scsi_device; +struct ata_port_operations; +struct ata_port; +struct ata_link; +struct ata_queued_cmd; + +/* typedefs */ +typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc); +typedef int (*ata_prereset_fn_t)(struct ata_link *link, unsigned long deadline); +typedef int (*ata_reset_fn_t)(struct ata_link *link, unsigned int *classes, + unsigned long deadline); +typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes); + +extern struct device_attribute dev_attr_link_power_management_policy; +extern struct device_attribute dev_attr_unload_heads; +extern struct device_attribute dev_attr_ncq_prio_enable; +extern struct device_attribute dev_attr_em_message_type; +extern struct device_attribute dev_attr_em_message; +extern struct device_attribute dev_attr_sw_activity; + +enum sw_activity { + OFF, + BLINK_ON, + BLINK_OFF, +}; + +struct ata_taskfile { + unsigned long flags; /* ATA_TFLAG_xxx */ + u8 protocol; /* ATA_PROT_xxx */ + + u8 ctl; /* control reg */ + + u8 hob_feature; /* additional data */ + u8 hob_nsect; /* to support LBA48 */ + u8 hob_lbal; + u8 hob_lbam; + u8 hob_lbah; + + u8 feature; + u8 nsect; + u8 lbal; + u8 lbam; + u8 lbah; + + u8 device; + + u8 command; /* IO operation */ + + u32 auxiliary; /* auxiliary field */ + /* from SATA 3.1 and */ + /* ATA-8 ACS-3 */ +}; + +#ifdef CONFIG_ATA_SFF +struct ata_ioports { + void __iomem *cmd_addr; + void __iomem *data_addr; + void __iomem *error_addr; + void __iomem *feature_addr; + void __iomem *nsect_addr; + void __iomem *lbal_addr; + void __iomem *lbam_addr; + void __iomem *lbah_addr; + void __iomem *device_addr; + void __iomem *status_addr; + void __iomem *command_addr; + void __iomem *altstatus_addr; + void __iomem *ctl_addr; +#ifdef CONFIG_ATA_BMDMA + void __iomem *bmdma_addr; +#endif /* CONFIG_ATA_BMDMA */ + void __iomem *scr_addr; +}; +#endif /* CONFIG_ATA_SFF */ + +struct ata_host { + spinlock_t lock; + struct device *dev; + void __iomem * const *iomap; + unsigned int n_ports; + unsigned int n_tags; /* nr of NCQ tags */ + void *private_data; + struct ata_port_operations *ops; + unsigned long flags; + struct kref kref; + + struct mutex eh_mutex; + struct task_struct *eh_owner; + + struct ata_port *simplex_claimed; /* channel owning the DMA */ + struct ata_port *ports[0]; +}; + +struct ata_queued_cmd { + struct ata_port *ap; + struct ata_device *dev; + + struct scsi_cmnd *scsicmd; + void (*scsidone)(struct scsi_cmnd *); + + struct ata_taskfile tf; + u8 cdb[ATAPI_CDB_LEN]; + + unsigned long flags; /* ATA_QCFLAG_xxx */ + unsigned int tag; /* libata core tag */ + unsigned int hw_tag; /* driver tag */ + unsigned int n_elem; + unsigned int orig_n_elem; + + int dma_dir; + + unsigned int sect_size; + + unsigned int nbytes; + unsigned int extrabytes; + unsigned int curbytes; + + struct scatterlist sgent; + + struct scatterlist *sg; + + struct scatterlist *cursg; + unsigned int cursg_ofs; + + unsigned int err_mask; + struct ata_taskfile result_tf; + ata_qc_cb_t complete_fn; + + void *private_data; + void *lldd_task; +}; + +struct ata_port_stats { + unsigned long unhandled_irq; + unsigned long idle_irq; + unsigned long rw_reqbuf; +}; + +struct ata_ering_entry { + unsigned int eflags; + unsigned int err_mask; + u64 timestamp; +}; + +struct ata_ering { + int cursor; + struct ata_ering_entry ring[ATA_ERING_SIZE]; +}; + +struct ata_device { + struct ata_link *link; + unsigned int devno; /* 0 or 1 */ + unsigned int horkage; /* List of broken features */ + unsigned long flags; /* ATA_DFLAG_xxx */ + struct scsi_device *sdev; /* attached SCSI device */ + void *private_data; +#ifdef CONFIG_ATA_ACPI + union acpi_object *gtf_cache; + unsigned int gtf_filter; +#endif +#ifdef CONFIG_SATA_ZPODD + void *zpodd; +#endif + struct device tdev; + /* n_sector is CLEAR_BEGIN, read comment above CLEAR_BEGIN */ + u64 n_sectors; /* size of device, if ATA */ + u64 n_native_sectors; /* native size, if ATA */ + unsigned int class; /* ATA_DEV_xxx */ + unsigned long unpark_deadline; + + u8 pio_mode; + u8 dma_mode; + u8 xfer_mode; + unsigned int xfer_shift; /* ATA_SHIFT_xxx */ + + unsigned int multi_count; /* sectors count for + READ/WRITE MULTIPLE */ + unsigned int max_sectors; /* per-device max sectors */ + unsigned int cdb_len; + + /* per-dev xfer mask */ + unsigned long pio_mask; + unsigned long mwdma_mask; + unsigned long udma_mask; + + /* for CHS addressing */ + u16 cylinders; /* Number of cylinders */ + u16 heads; /* Number of heads */ + u16 sectors; /* Number of sectors per track */ + + union { + u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */ + u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */ + } ____cacheline_aligned; + + /* DEVSLP Timing Variables from Identify Device Data Log */ + u8 devslp_timing[ATA_LOG_DEVSLP_SIZE]; + + /* NCQ send and receive log subcommand support */ + u8 ncq_send_recv_cmds[ATA_LOG_NCQ_SEND_RECV_SIZE]; + u8 ncq_non_data_cmds[ATA_LOG_NCQ_NON_DATA_SIZE]; + + /* ZAC zone configuration */ + u32 zac_zoned_cap; + u32 zac_zones_optimal_open; + u32 zac_zones_optimal_nonseq; + u32 zac_zones_max_open; + + /* error history */ + int spdn_cnt; + /* ering is CLEAR_END, read comment above CLEAR_END */ + struct ata_ering ering; +}; + +/* Fields between ATA_DEVICE_CLEAR_BEGIN and ATA_DEVICE_CLEAR_END are + * cleared to zero on ata_dev_init(). + */ +#define ATA_DEVICE_CLEAR_BEGIN offsetof(struct ata_device, n_sectors) +#define ATA_DEVICE_CLEAR_END offsetof(struct ata_device, ering) + +struct ata_eh_info { + struct ata_device *dev; /* offending device */ + u32 serror; /* SError from LLDD */ + unsigned int err_mask; /* port-wide err_mask */ + unsigned int action; /* ATA_EH_* action mask */ + unsigned int dev_action[ATA_MAX_DEVICES]; /* dev EH action */ + unsigned int flags; /* ATA_EHI_* flags */ + + unsigned int probe_mask; + + char desc[ATA_EH_DESC_LEN]; + int desc_len; +}; + +struct ata_eh_context { + struct ata_eh_info i; + int tries[ATA_MAX_DEVICES]; + int cmd_timeout_idx[ATA_MAX_DEVICES] + [ATA_EH_CMD_TIMEOUT_TABLE_SIZE]; + unsigned int classes[ATA_MAX_DEVICES]; + unsigned int did_probe_mask; + unsigned int unloaded_mask; + unsigned int saved_ncq_enabled; + u8 saved_xfer_mode[ATA_MAX_DEVICES]; + /* timestamp for the last reset attempt or success */ + unsigned long last_reset; +}; + +struct ata_acpi_drive +{ + u32 pio; + u32 dma; +} __packed; + +struct ata_acpi_gtm { + struct ata_acpi_drive drive[2]; + u32 flags; +} __packed; + +struct ata_link { + struct ata_port *ap; + int pmp; /* port multiplier port # */ + + struct device tdev; + unsigned int active_tag; /* active tag on this link */ + u32 sactive; /* active NCQ commands */ + + unsigned int flags; /* ATA_LFLAG_xxx */ + + u32 saved_scontrol; /* SControl on probe */ + unsigned int hw_sata_spd_limit; + unsigned int sata_spd_limit; + unsigned int sata_spd; /* current SATA PHY speed */ + enum ata_lpm_policy lpm_policy; + + /* record runtime error info, protected by host_set lock */ + struct ata_eh_info eh_info; + /* EH context */ + struct ata_eh_context eh_context; + + struct ata_device device[ATA_MAX_DEVICES]; + + unsigned long last_lpm_change; /* when last LPM change happened */ +}; +#define ATA_LINK_CLEAR_BEGIN offsetof(struct ata_link, active_tag) +#define ATA_LINK_CLEAR_END offsetof(struct ata_link, device[0]) + +struct ata_port { + struct Scsi_Host *scsi_host; /* our co-allocated scsi host */ + struct ata_port_operations *ops; + spinlock_t *lock; + /* Flags owned by the EH context. Only EH should touch these once the + port is active */ + unsigned long flags; /* ATA_FLAG_xxx */ + /* Flags that change dynamically, protected by ap->lock */ + unsigned int pflags; /* ATA_PFLAG_xxx */ + unsigned int print_id; /* user visible unique port ID */ + unsigned int local_port_no; /* host local port num */ + unsigned int port_no; /* 0 based port no. inside the host */ + +#ifdef CONFIG_ATA_SFF + struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ + u8 ctl; /* cache of ATA control register */ + u8 last_ctl; /* Cache last written value */ + struct ata_link* sff_pio_task_link; /* link currently used */ + struct delayed_work sff_pio_task; +#ifdef CONFIG_ATA_BMDMA + struct ata_bmdma_prd *bmdma_prd; /* BMDMA SG list */ + dma_addr_t bmdma_prd_dma; /* and its DMA mapping */ +#endif /* CONFIG_ATA_BMDMA */ +#endif /* CONFIG_ATA_SFF */ + + unsigned int pio_mask; + unsigned int mwdma_mask; + unsigned int udma_mask; + unsigned int cbl; /* cable type; ATA_CBL_xxx */ + + struct ata_queued_cmd qcmd[ATA_MAX_QUEUE + 1]; + unsigned long sas_tag_allocated; /* for sas tag allocation only */ + u64 qc_active; + int nr_active_links; /* #links with active qcs */ + unsigned int sas_last_tag; /* track next tag hw expects */ + + struct ata_link link; /* host default link */ + struct ata_link *slave_link; /* see ata_slave_link_init() */ + + int nr_pmp_links; /* nr of available PMP links */ + struct ata_link *pmp_link; /* array of PMP links */ + struct ata_link *excl_link; /* for PMP qc exclusion */ + + struct ata_port_stats stats; + struct ata_host *host; + struct device *dev; + struct device tdev; + + struct mutex scsi_scan_mutex; + struct delayed_work hotplug_task; + struct work_struct scsi_rescan_task; + + unsigned int hsm_task_state; + + u32 msg_enable; + struct list_head eh_done_q; + wait_queue_head_t eh_wait_q; + int eh_tries; + struct completion park_req_pending; + + pm_message_t pm_mesg; + enum ata_lpm_policy target_lpm_policy; + + struct timer_list fastdrain_timer; + unsigned long fastdrain_cnt; + + async_cookie_t cookie; + + int em_message_type; + void *private_data; + +#ifdef CONFIG_ATA_ACPI + struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */ +#endif + /* owned by EH */ + u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned; +}; + +/* The following initializer overrides a method to NULL whether one of + * its parent has the method defined or not. This is equivalent to + * ERR_PTR(-ENOENT). Unfortunately, ERR_PTR doesn't render a constant + * expression and thus can't be used as an initializer. + */ +#define ATA_OP_NULL (void *)(unsigned long)(-ENOENT) + +struct ata_port_operations { + /* + * Command execution + */ + int (*qc_defer)(struct ata_queued_cmd *qc); + int (*check_atapi_dma)(struct ata_queued_cmd *qc); + enum ata_completion_errors (*qc_prep)(struct ata_queued_cmd *qc); + unsigned int (*qc_issue)(struct ata_queued_cmd *qc); + bool (*qc_fill_rtf)(struct ata_queued_cmd *qc); + + /* + * Configuration and exception handling + */ + int (*cable_detect)(struct ata_port *ap); + unsigned long (*mode_filter)(struct ata_device *dev, unsigned long xfer_mask); + void (*set_piomode)(struct ata_port *ap, struct ata_device *dev); + void (*set_dmamode)(struct ata_port *ap, struct ata_device *dev); + int (*set_mode)(struct ata_link *link, struct ata_device **r_failed_dev); + unsigned int (*read_id)(struct ata_device *dev, struct ata_taskfile *tf, u16 *id); + + void (*dev_config)(struct ata_device *dev); + + void (*freeze)(struct ata_port *ap); + void (*thaw)(struct ata_port *ap); + ata_prereset_fn_t prereset; + ata_reset_fn_t softreset; + ata_reset_fn_t hardreset; + ata_postreset_fn_t postreset; + ata_prereset_fn_t pmp_prereset; + ata_reset_fn_t pmp_softreset; + ata_reset_fn_t pmp_hardreset; + ata_postreset_fn_t pmp_postreset; + void (*error_handler)(struct ata_port *ap); + void (*lost_interrupt)(struct ata_port *ap); + void (*post_internal_cmd)(struct ata_queued_cmd *qc); + void (*sched_eh)(struct ata_port *ap); + void (*end_eh)(struct ata_port *ap); + + /* + * Optional features + */ + int (*scr_read)(struct ata_link *link, unsigned int sc_reg, u32 *val); + int (*scr_write)(struct ata_link *link, unsigned int sc_reg, u32 val); + void (*pmp_attach)(struct ata_port *ap); + void (*pmp_detach)(struct ata_port *ap); + int (*set_lpm)(struct ata_link *link, enum ata_lpm_policy policy, + unsigned hints); + + /* + * Start, stop, suspend and resume + */ + int (*port_suspend)(struct ata_port *ap, pm_message_t mesg); + int (*port_resume)(struct ata_port *ap); + int (*port_start)(struct ata_port *ap); + void (*port_stop)(struct ata_port *ap); + void (*host_stop)(struct ata_host *host); + +#ifdef CONFIG_ATA_SFF + /* + * SFF / taskfile oriented ops + */ + void (*sff_dev_select)(struct ata_port *ap, unsigned int device); + void (*sff_set_devctl)(struct ata_port *ap, u8 ctl); + u8 (*sff_check_status)(struct ata_port *ap); + u8 (*sff_check_altstatus)(struct ata_port *ap); + void (*sff_tf_load)(struct ata_port *ap, const struct ata_taskfile *tf); + void (*sff_tf_read)(struct ata_port *ap, struct ata_taskfile *tf); + void (*sff_exec_command)(struct ata_port *ap, + const struct ata_taskfile *tf); + unsigned int (*sff_data_xfer)(struct ata_queued_cmd *qc, + unsigned char *buf, unsigned int buflen, int rw); + void (*sff_irq_on)(struct ata_port *); + bool (*sff_irq_check)(struct ata_port *); + void (*sff_irq_clear)(struct ata_port *); + void (*sff_drain_fifo)(struct ata_queued_cmd *qc); + +#ifdef CONFIG_ATA_BMDMA + void (*bmdma_setup)(struct ata_queued_cmd *qc); + void (*bmdma_start)(struct ata_queued_cmd *qc); + void (*bmdma_stop)(struct ata_queued_cmd *qc); + u8 (*bmdma_status)(struct ata_port *ap); +#endif /* CONFIG_ATA_BMDMA */ +#endif /* CONFIG_ATA_SFF */ + + ssize_t (*em_show)(struct ata_port *ap, char *buf); + ssize_t (*em_store)(struct ata_port *ap, const char *message, + size_t size); + ssize_t (*sw_activity_show)(struct ata_device *dev, char *buf); + ssize_t (*sw_activity_store)(struct ata_device *dev, + enum sw_activity val); + ssize_t (*transmit_led_message)(struct ata_port *ap, u32 state, + ssize_t size); + + /* + * Obsolete + */ + void (*phy_reset)(struct ata_port *ap); + void (*eng_timeout)(struct ata_port *ap); + + /* + * ->inherits must be the last field and all the preceding + * fields must be pointers. + */ + const struct ata_port_operations *inherits; +}; + +struct ata_port_info { + unsigned long flags; + unsigned long link_flags; + unsigned long pio_mask; + unsigned long mwdma_mask; + unsigned long udma_mask; + struct ata_port_operations *port_ops; + void *private_data; +}; + +struct ata_timing { + unsigned short mode; /* ATA mode */ + unsigned short setup; /* t1 */ + unsigned short act8b; /* t2 for 8-bit I/O */ + unsigned short rec8b; /* t2i for 8-bit I/O */ + unsigned short cyc8b; /* t0 for 8-bit I/O */ + unsigned short active; /* t2 or tD */ + unsigned short recover; /* t2i or tK */ + unsigned short dmack_hold; /* tj */ + unsigned short cycle; /* t0 */ + unsigned short udma; /* t2CYCTYP/2 */ +}; + +/* + * Core layer - drivers/ata/libata-core.c + */ +extern const unsigned long sata_deb_timing_normal[]; +extern const unsigned long sata_deb_timing_hotplug[]; +extern const unsigned long sata_deb_timing_long[]; + +extern struct ata_port_operations ata_dummy_port_ops; +extern const struct ata_port_info ata_dummy_port_info; + +static inline bool ata_is_atapi(u8 prot) +{ + return prot & ATA_PROT_FLAG_ATAPI; +} + +static inline bool ata_is_pio(u8 prot) +{ + return prot & ATA_PROT_FLAG_PIO; +} + +static inline bool ata_is_dma(u8 prot) +{ + return prot & ATA_PROT_FLAG_DMA; +} + +static inline bool ata_is_ncq(u8 prot) +{ + return prot & ATA_PROT_FLAG_NCQ; +} + +static inline bool ata_is_data(u8 prot) +{ + return prot & (ATA_PROT_FLAG_PIO | ATA_PROT_FLAG_DMA); +} + +static inline int is_multi_taskfile(struct ata_taskfile *tf) +{ + return (tf->command == ATA_CMD_READ_MULTI) || + (tf->command == ATA_CMD_WRITE_MULTI) || + (tf->command == ATA_CMD_READ_MULTI_EXT) || + (tf->command == ATA_CMD_WRITE_MULTI_EXT) || + (tf->command == ATA_CMD_WRITE_MULTI_FUA_EXT); +} + +static inline const unsigned long * +sata_ehc_deb_timing(struct ata_eh_context *ehc) +{ + if (ehc->i.flags & ATA_EHI_HOTPLUGGED) + return sata_deb_timing_hotplug; + else + return sata_deb_timing_normal; +} + +static inline int ata_port_is_dummy(struct ata_port *ap) +{ + return ap->ops == &ata_dummy_port_ops; +} + +extern int sata_set_spd(struct ata_link *link); +extern int ata_std_prereset(struct ata_link *link, unsigned long deadline); +extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, + int (*check_ready)(struct ata_link *link)); +extern int sata_link_debounce(struct ata_link *link, + const unsigned long *params, unsigned long deadline); +extern int sata_link_resume(struct ata_link *link, const unsigned long *params, + unsigned long deadline); +extern int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, + bool spm_wakeup); +extern int sata_link_hardreset(struct ata_link *link, + const unsigned long *timing, unsigned long deadline, + bool *online, int (*check_ready)(struct ata_link *)); +extern int sata_std_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); +extern void ata_std_postreset(struct ata_link *link, unsigned int *classes); + +extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports); +extern struct ata_host *ata_host_alloc_pinfo(struct device *dev, + const struct ata_port_info * const * ppi, int n_ports); +extern int ata_slave_link_init(struct ata_port *ap); +extern void ata_host_get(struct ata_host *host); +extern void ata_host_put(struct ata_host *host); +extern int ata_host_start(struct ata_host *host); +extern int ata_host_register(struct ata_host *host, + struct scsi_host_template *sht); +extern int ata_host_activate(struct ata_host *host, int irq, + irq_handler_t irq_handler, unsigned long irq_flags, + struct scsi_host_template *sht); +extern void ata_host_detach(struct ata_host *host); +extern void ata_host_init(struct ata_host *, struct device *, struct ata_port_operations *); +extern int ata_scsi_detect(struct scsi_host_template *sht); +extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); +extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd); +extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev, + int cmd, void __user *arg); +extern void ata_sas_port_destroy(struct ata_port *); +extern struct ata_port *ata_sas_port_alloc(struct ata_host *, + struct ata_port_info *, struct Scsi_Host *); +extern void ata_sas_async_probe(struct ata_port *ap); +extern int ata_sas_sync_probe(struct ata_port *ap); +extern int ata_sas_port_init(struct ata_port *); +extern int ata_sas_port_start(struct ata_port *ap); +extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap); +extern void ata_sas_tport_delete(struct ata_port *ap); +extern void ata_sas_port_stop(struct ata_port *ap); +extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *); +extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap); +extern int sata_scr_valid(struct ata_link *link); +extern int sata_scr_read(struct ata_link *link, int reg, u32 *val); +extern int sata_scr_write(struct ata_link *link, int reg, u32 val); +extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val); +extern bool ata_link_online(struct ata_link *link); +extern bool ata_link_offline(struct ata_link *link); +#ifdef CONFIG_PM +extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg); +extern void ata_host_resume(struct ata_host *host); +extern void ata_sas_port_suspend(struct ata_port *ap); +extern void ata_sas_port_resume(struct ata_port *ap); +#else +static inline void ata_sas_port_suspend(struct ata_port *ap) +{ +} +static inline void ata_sas_port_resume(struct ata_port *ap) +{ +} +#endif +extern int ata_ratelimit(void); +extern void ata_msleep(struct ata_port *ap, unsigned int msecs); +extern u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, + u32 val, unsigned long interval, unsigned long timeout); +extern int atapi_cmd_type(u8 opcode); +extern void ata_tf_to_fis(const struct ata_taskfile *tf, + u8 pmp, int is_cmd, u8 *fis); +extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf); +extern unsigned long ata_pack_xfermask(unsigned long pio_mask, + unsigned long mwdma_mask, unsigned long udma_mask); +extern void ata_unpack_xfermask(unsigned long xfer_mask, + unsigned long *pio_mask, unsigned long *mwdma_mask, + unsigned long *udma_mask); +extern u8 ata_xfer_mask2mode(unsigned long xfer_mask); +extern unsigned long ata_xfer_mode2mask(u8 xfer_mode); +extern int ata_xfer_mode2shift(unsigned long xfer_mode); +extern const char *ata_mode_string(unsigned long xfer_mask); +extern unsigned long ata_id_xfermask(const u16 *id); +extern int ata_std_qc_defer(struct ata_queued_cmd *qc); +extern enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc); +extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, + unsigned int n_elem); +extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); +extern void ata_dev_disable(struct ata_device *adev); +extern void ata_id_string(const u16 *id, unsigned char *s, + unsigned int ofs, unsigned int len); +extern void ata_id_c_string(const u16 *id, unsigned char *s, + unsigned int ofs, unsigned int len); +extern unsigned int ata_do_dev_read_id(struct ata_device *dev, + struct ata_taskfile *tf, u16 *id); +extern void ata_qc_complete(struct ata_queued_cmd *qc); +extern int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active); +extern u64 ata_qc_get_active(struct ata_port *ap); +extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd); +extern int ata_std_bios_param(struct scsi_device *sdev, + struct block_device *bdev, + sector_t capacity, int geom[]); +extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev); +extern int ata_scsi_slave_config(struct scsi_device *sdev); +extern void ata_scsi_slave_destroy(struct scsi_device *sdev); +extern int ata_scsi_change_queue_depth(struct scsi_device *sdev, + int queue_depth); +extern int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev, + int queue_depth); +extern struct ata_device *ata_dev_pair(struct ata_device *adev); +extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); +extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap); +extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q); +extern bool sata_lpm_ignore_phy_events(struct ata_link *link); + +extern int ata_cable_40wire(struct ata_port *ap); +extern int ata_cable_80wire(struct ata_port *ap); +extern int ata_cable_sata(struct ata_port *ap); +extern int ata_cable_ignore(struct ata_port *ap); +extern int ata_cable_unknown(struct ata_port *ap); + +/* Timing helpers */ +extern unsigned int ata_pio_need_iordy(const struct ata_device *); +extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode); +extern int ata_timing_compute(struct ata_device *, unsigned short, + struct ata_timing *, int, int); +extern void ata_timing_merge(const struct ata_timing *, + const struct ata_timing *, struct ata_timing *, + unsigned int); +extern u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle); + +/* PCI */ +#ifdef CONFIG_PCI +struct pci_dev; + +struct pci_bits { + unsigned int reg; /* PCI config register to read */ + unsigned int width; /* 1 (8 bit), 2 (16 bit), 4 (32 bit) */ + unsigned long mask; + unsigned long val; +}; + +extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits); +extern void ata_pci_shutdown_one(struct pci_dev *pdev); +extern void ata_pci_remove_one(struct pci_dev *pdev); + +#ifdef CONFIG_PM +extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg); +extern int __must_check ata_pci_device_do_resume(struct pci_dev *pdev); +extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); +extern int ata_pci_device_resume(struct pci_dev *pdev); +#endif /* CONFIG_PM */ +#endif /* CONFIG_PCI */ + +struct platform_device; + +extern int ata_platform_remove_one(struct platform_device *pdev); + +/* + * ACPI - drivers/ata/libata-acpi.c + */ +#ifdef CONFIG_ATA_ACPI +static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap) +{ + if (ap->pflags & ATA_PFLAG_INIT_GTM_VALID) + return &ap->__acpi_init_gtm; + return NULL; +} +int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm); +int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *stm); +unsigned long ata_acpi_gtm_xfermask(struct ata_device *dev, + const struct ata_acpi_gtm *gtm); +int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm); +#else +static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap) +{ + return NULL; +} + +static inline int ata_acpi_stm(const struct ata_port *ap, + struct ata_acpi_gtm *stm) +{ + return -ENOSYS; +} + +static inline int ata_acpi_gtm(const struct ata_port *ap, + struct ata_acpi_gtm *stm) +{ + return -ENOSYS; +} + +static inline unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev, + const struct ata_acpi_gtm *gtm) +{ + return 0; +} + +static inline int ata_acpi_cbl_80wire(struct ata_port *ap, + const struct ata_acpi_gtm *gtm) +{ + return 0; +} +#endif + +/* + * EH - drivers/ata/libata-eh.c + */ +extern void ata_port_schedule_eh(struct ata_port *ap); +extern void ata_port_wait_eh(struct ata_port *ap); +extern int ata_link_abort(struct ata_link *link); +extern int ata_port_abort(struct ata_port *ap); +extern int ata_port_freeze(struct ata_port *ap); +extern int sata_async_notification(struct ata_port *ap); + +extern void ata_eh_freeze_port(struct ata_port *ap); +extern void ata_eh_thaw_port(struct ata_port *ap); + +extern void ata_eh_qc_complete(struct ata_queued_cmd *qc); +extern void ata_eh_qc_retry(struct ata_queued_cmd *qc); +extern void ata_eh_analyze_ncq_error(struct ata_link *link); + +extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, + ata_reset_fn_t softreset, ata_reset_fn_t hardreset, + ata_postreset_fn_t postreset); +extern void ata_std_error_handler(struct ata_port *ap); +extern void ata_std_sched_eh(struct ata_port *ap); +extern void ata_std_end_eh(struct ata_port *ap); +extern int ata_link_nr_enabled(struct ata_link *link); + +/* + * Base operations to inherit from and initializers for sht + * + * Operations + * + * base : Common to all libata drivers. + * sata : SATA controllers w/ native interface. + * pmp : SATA controllers w/ PMP support. + * sff : SFF ATA controllers w/o BMDMA support. + * bmdma : SFF ATA controllers w/ BMDMA support. + * + * sht initializers + * + * BASE : Common to all libata drivers. The user must set + * sg_tablesize and dma_boundary. + * PIO : SFF ATA controllers w/ only PIO support. + * BMDMA : SFF ATA controllers w/ BMDMA support. sg_tablesize and + * dma_boundary are set to BMDMA limits. + * NCQ : SATA controllers supporting NCQ. The user must set + * sg_tablesize, dma_boundary and can_queue. + */ +extern const struct ata_port_operations ata_base_port_ops; +extern const struct ata_port_operations sata_port_ops; +extern struct device_attribute *ata_common_sdev_attrs[]; + +/* + * All sht initializers (BASE, PIO, BMDMA, NCQ) must be instantiated + * by the edge drivers. Because the 'module' field of sht must be the + * edge driver's module reference, otherwise the driver can be unloaded + * even if the scsi_device is being accessed. + */ +#define ATA_BASE_SHT(drv_name) \ + .module = THIS_MODULE, \ + .name = drv_name, \ + .ioctl = ata_scsi_ioctl, \ + .queuecommand = ata_scsi_queuecmd, \ + .can_queue = ATA_DEF_QUEUE, \ + .tag_alloc_policy = BLK_TAG_ALLOC_RR, \ + .this_id = ATA_SHT_THIS_ID, \ + .emulated = ATA_SHT_EMULATED, \ + .use_clustering = ATA_SHT_USE_CLUSTERING, \ + .proc_name = drv_name, \ + .slave_configure = ata_scsi_slave_config, \ + .slave_destroy = ata_scsi_slave_destroy, \ + .bios_param = ata_std_bios_param, \ + .unlock_native_capacity = ata_scsi_unlock_native_capacity, \ + .sdev_attrs = ata_common_sdev_attrs + +#define ATA_NCQ_SHT(drv_name) \ + ATA_BASE_SHT(drv_name), \ + .change_queue_depth = ata_scsi_change_queue_depth + +/* + * PMP helpers + */ +#ifdef CONFIG_SATA_PMP +static inline bool sata_pmp_supported(struct ata_port *ap) +{ + return ap->flags & ATA_FLAG_PMP; +} + +static inline bool sata_pmp_attached(struct ata_port *ap) +{ + return ap->nr_pmp_links != 0; +} + +static inline bool ata_is_host_link(const struct ata_link *link) +{ + return link == &link->ap->link || link == link->ap->slave_link; +} +#else /* CONFIG_SATA_PMP */ +static inline bool sata_pmp_supported(struct ata_port *ap) +{ + return false; +} + +static inline bool sata_pmp_attached(struct ata_port *ap) +{ + return false; +} + +static inline bool ata_is_host_link(const struct ata_link *link) +{ + return 1; +} +#endif /* CONFIG_SATA_PMP */ + +static inline int sata_srst_pmp(struct ata_link *link) +{ + if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) + return SATA_PMP_CTRL_PORT; + return link->pmp; +} + +/* + * printk helpers + */ +__printf(3, 4) +void ata_port_printk(const struct ata_port *ap, const char *level, + const char *fmt, ...); +__printf(3, 4) +void ata_link_printk(const struct ata_link *link, const char *level, + const char *fmt, ...); +__printf(3, 4) +void ata_dev_printk(const struct ata_device *dev, const char *level, + const char *fmt, ...); + +#define ata_port_err(ap, fmt, ...) \ + ata_port_printk(ap, KERN_ERR, fmt, ##__VA_ARGS__) +#define ata_port_warn(ap, fmt, ...) \ + ata_port_printk(ap, KERN_WARNING, fmt, ##__VA_ARGS__) +#define ata_port_notice(ap, fmt, ...) \ + ata_port_printk(ap, KERN_NOTICE, fmt, ##__VA_ARGS__) +#define ata_port_info(ap, fmt, ...) \ + ata_port_printk(ap, KERN_INFO, fmt, ##__VA_ARGS__) +#define ata_port_dbg(ap, fmt, ...) \ + ata_port_printk(ap, KERN_DEBUG, fmt, ##__VA_ARGS__) + +#define ata_link_err(link, fmt, ...) \ + ata_link_printk(link, KERN_ERR, fmt, ##__VA_ARGS__) +#define ata_link_warn(link, fmt, ...) \ + ata_link_printk(link, KERN_WARNING, fmt, ##__VA_ARGS__) +#define ata_link_notice(link, fmt, ...) \ + ata_link_printk(link, KERN_NOTICE, fmt, ##__VA_ARGS__) +#define ata_link_info(link, fmt, ...) \ + ata_link_printk(link, KERN_INFO, fmt, ##__VA_ARGS__) +#define ata_link_dbg(link, fmt, ...) \ + ata_link_printk(link, KERN_DEBUG, fmt, ##__VA_ARGS__) + +#define ata_dev_err(dev, fmt, ...) \ + ata_dev_printk(dev, KERN_ERR, fmt, ##__VA_ARGS__) +#define ata_dev_warn(dev, fmt, ...) \ + ata_dev_printk(dev, KERN_WARNING, fmt, ##__VA_ARGS__) +#define ata_dev_notice(dev, fmt, ...) \ + ata_dev_printk(dev, KERN_NOTICE, fmt, ##__VA_ARGS__) +#define ata_dev_info(dev, fmt, ...) \ + ata_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__) +#define ata_dev_dbg(dev, fmt, ...) \ + ata_dev_printk(dev, KERN_DEBUG, fmt, ##__VA_ARGS__) + +void ata_print_version(const struct device *dev, const char *version); + +/* + * ata_eh_info helpers + */ +extern __printf(2, 3) +void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...); +extern __printf(2, 3) +void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...); +extern void ata_ehi_clear_desc(struct ata_eh_info *ehi); + +static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi) +{ + ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1; + ehi->flags |= ATA_EHI_HOTPLUGGED; + ehi->action |= ATA_EH_RESET | ATA_EH_ENABLE_LINK; + ehi->err_mask |= AC_ERR_ATA_BUS; +} + +/* + * port description helpers + */ +extern __printf(2, 3) +void ata_port_desc(struct ata_port *ap, const char *fmt, ...); +#ifdef CONFIG_PCI +extern void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, + const char *name); +#endif + +static inline bool ata_tag_internal(unsigned int tag) +{ + return tag == ATA_TAG_INTERNAL; +} + +static inline bool ata_tag_valid(unsigned int tag) +{ + return tag < ATA_MAX_QUEUE || ata_tag_internal(tag); +} + +#define __ata_qc_for_each(ap, qc, tag, max_tag, fn) \ + for ((tag) = 0; (tag) < (max_tag) && \ + ({ qc = fn((ap), (tag)); 1; }); (tag)++) \ + +/* + * Internal use only, iterate commands ignoring error handling and + * status of 'qc'. + */ +#define ata_qc_for_each_raw(ap, qc, tag) \ + __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, __ata_qc_from_tag) + +/* + * Iterate all potential commands that can be queued + */ +#define ata_qc_for_each(ap, qc, tag) \ + __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, ata_qc_from_tag) + +/* + * Like ata_qc_for_each, but with the internal tag included + */ +#define ata_qc_for_each_with_internal(ap, qc, tag) \ + __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE + 1, ata_qc_from_tag) + +/* + * device helpers + */ +static inline unsigned int ata_class_enabled(unsigned int class) +{ + return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI || + class == ATA_DEV_PMP || class == ATA_DEV_SEMB || + class == ATA_DEV_ZAC; +} + +static inline unsigned int ata_class_disabled(unsigned int class) +{ + return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP || + class == ATA_DEV_PMP_UNSUP || class == ATA_DEV_SEMB_UNSUP || + class == ATA_DEV_ZAC_UNSUP; +} + +static inline unsigned int ata_class_absent(unsigned int class) +{ + return !ata_class_enabled(class) && !ata_class_disabled(class); +} + +static inline unsigned int ata_dev_enabled(const struct ata_device *dev) +{ + return ata_class_enabled(dev->class); +} + +static inline unsigned int ata_dev_disabled(const struct ata_device *dev) +{ + return ata_class_disabled(dev->class); +} + +static inline unsigned int ata_dev_absent(const struct ata_device *dev) +{ + return ata_class_absent(dev->class); +} + +/* + * link helpers + */ +static inline int ata_link_max_devices(const struct ata_link *link) +{ + if (ata_is_host_link(link) && link->ap->flags & ATA_FLAG_SLAVE_POSS) + return 2; + return 1; +} + +static inline int ata_link_active(struct ata_link *link) +{ + return ata_tag_valid(link->active_tag) || link->sactive; +} + +/* + * Iterators + * + * ATA_LITER_* constants are used to select link iteration mode and + * ATA_DITER_* device iteration mode. + * + * For a custom iteration directly using ata_{link|dev}_next(), if + * @link or @dev, respectively, is NULL, the first element is + * returned. @dev and @link can be any valid device or link and the + * next element according to the iteration mode will be returned. + * After the last element, NULL is returned. + */ +enum ata_link_iter_mode { + ATA_LITER_EDGE, /* if present, PMP links only; otherwise, + * host link. no slave link */ + ATA_LITER_HOST_FIRST, /* host link followed by PMP or slave links */ + ATA_LITER_PMP_FIRST, /* PMP links followed by host link, + * slave link still comes after host link */ +}; + +enum ata_dev_iter_mode { + ATA_DITER_ENABLED, + ATA_DITER_ENABLED_REVERSE, + ATA_DITER_ALL, + ATA_DITER_ALL_REVERSE, +}; + +extern struct ata_link *ata_link_next(struct ata_link *link, + struct ata_port *ap, + enum ata_link_iter_mode mode); + +extern struct ata_device *ata_dev_next(struct ata_device *dev, + struct ata_link *link, + enum ata_dev_iter_mode mode); + +/* + * Shortcut notation for iterations + * + * ata_for_each_link() iterates over each link of @ap according to + * @mode. @link points to the current link in the loop. @link is + * NULL after loop termination. ata_for_each_dev() works the same way + * except that it iterates over each device of @link. + * + * Note that the mode prefixes ATA_{L|D}ITER_ shouldn't need to be + * specified when using the following shorthand notations. Only the + * mode itself (EDGE, HOST_FIRST, ENABLED, etc...) should be + * specified. This not only increases brevity but also makes it + * impossible to use ATA_LITER_* for device iteration or vice-versa. + */ +#define ata_for_each_link(link, ap, mode) \ + for ((link) = ata_link_next(NULL, (ap), ATA_LITER_##mode); (link); \ + (link) = ata_link_next((link), (ap), ATA_LITER_##mode)) + +#define ata_for_each_dev(dev, link, mode) \ + for ((dev) = ata_dev_next(NULL, (link), ATA_DITER_##mode); (dev); \ + (dev) = ata_dev_next((dev), (link), ATA_DITER_##mode)) + +/** + * ata_ncq_enabled - Test whether NCQ is enabled + * @dev: ATA device to test for + * + * LOCKING: + * spin_lock_irqsave(host lock) + * + * RETURNS: + * 1 if NCQ is enabled for @dev, 0 otherwise. + */ +static inline int ata_ncq_enabled(struct ata_device *dev) +{ + return (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF | + ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ; +} + +static inline bool ata_fpdma_dsm_supported(struct ata_device *dev) +{ + return (dev->flags & ATA_DFLAG_NCQ_SEND_RECV) && + (dev->ncq_send_recv_cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] & + ATA_LOG_NCQ_SEND_RECV_DSM_TRIM); +} + +static inline bool ata_fpdma_read_log_supported(struct ata_device *dev) +{ + return (dev->flags & ATA_DFLAG_NCQ_SEND_RECV) && + (dev->ncq_send_recv_cmds[ATA_LOG_NCQ_SEND_RECV_RD_LOG_OFFSET] & + ATA_LOG_NCQ_SEND_RECV_RD_LOG_SUPPORTED); +} + +static inline bool ata_fpdma_zac_mgmt_in_supported(struct ata_device *dev) +{ + return (dev->flags & ATA_DFLAG_NCQ_SEND_RECV) && + (dev->ncq_send_recv_cmds[ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OFFSET] & + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_IN_SUPPORTED); +} + +static inline bool ata_fpdma_zac_mgmt_out_supported(struct ata_device *dev) +{ + return (dev->ncq_non_data_cmds[ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OFFSET] & + ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OUT); +} + +static inline void ata_qc_set_polling(struct ata_queued_cmd *qc) +{ + qc->tf.ctl |= ATA_NIEN; +} + +static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap, + unsigned int tag) +{ + if (ata_tag_valid(tag)) + return &ap->qcmd[tag]; + return NULL; +} + +static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap, + unsigned int tag) +{ + struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); + + if (unlikely(!qc) || !ap->ops->error_handler) + return qc; + + if ((qc->flags & (ATA_QCFLAG_ACTIVE | + ATA_QCFLAG_FAILED)) == ATA_QCFLAG_ACTIVE) + return qc; + + return NULL; +} + +static inline unsigned int ata_qc_raw_nbytes(struct ata_queued_cmd *qc) +{ + return qc->nbytes - min(qc->extrabytes, qc->nbytes); +} + +static inline void ata_tf_init(struct ata_device *dev, struct ata_taskfile *tf) +{ + memset(tf, 0, sizeof(*tf)); + +#ifdef CONFIG_ATA_SFF + tf->ctl = dev->link->ap->ctl; +#else + tf->ctl = ATA_DEVCTL_OBS; +#endif + if (dev->devno == 0) + tf->device = ATA_DEVICE_OBS; + else + tf->device = ATA_DEVICE_OBS | ATA_DEV1; +} + +static inline void ata_qc_reinit(struct ata_queued_cmd *qc) +{ + qc->dma_dir = DMA_NONE; + qc->sg = NULL; + qc->flags = 0; + qc->cursg = NULL; + qc->cursg_ofs = 0; + qc->nbytes = qc->extrabytes = qc->curbytes = 0; + qc->n_elem = 0; + qc->err_mask = 0; + qc->sect_size = ATA_SECT_SIZE; + + ata_tf_init(qc->dev, &qc->tf); + + /* init result_tf such that it indicates normal completion */ + qc->result_tf.command = ATA_DRDY; + qc->result_tf.feature = 0; +} + +static inline int ata_try_flush_cache(const struct ata_device *dev) +{ + return ata_id_wcache_enabled(dev->id) || + ata_id_has_flush(dev->id) || + ata_id_has_flush_ext(dev->id); +} + +static inline unsigned int ac_err_mask(u8 status) +{ + if (status & (ATA_BUSY | ATA_DRQ)) + return AC_ERR_HSM; + if (status & (ATA_ERR | ATA_DF)) + return AC_ERR_DEV; + return 0; +} + +static inline unsigned int __ac_err_mask(u8 status) +{ + unsigned int mask = ac_err_mask(status); + if (mask == 0) + return AC_ERR_OTHER; + return mask; +} + +static inline struct ata_port *ata_shost_to_port(struct Scsi_Host *host) +{ + return *(struct ata_port **)&host->hostdata[0]; +} + +static inline int ata_check_ready(u8 status) +{ + if (!(status & ATA_BUSY)) + return 1; + + /* 0xff indicates either no device or device not ready */ + if (status == 0xff) + return -ENODEV; + + return 0; +} + +static inline unsigned long ata_deadline(unsigned long from_jiffies, + unsigned long timeout_msecs) +{ + return from_jiffies + msecs_to_jiffies(timeout_msecs); +} + +/* Don't open code these in drivers as there are traps. Firstly the range may + change in future hardware and specs, secondly 0xFF means 'no DMA' but is + > UDMA_0. Dyma ddreigiau */ + +static inline int ata_using_mwdma(struct ata_device *adev) +{ + if (adev->dma_mode >= XFER_MW_DMA_0 && adev->dma_mode <= XFER_MW_DMA_4) + return 1; + return 0; +} + +static inline int ata_using_udma(struct ata_device *adev) +{ + if (adev->dma_mode >= XFER_UDMA_0 && adev->dma_mode <= XFER_UDMA_7) + return 1; + return 0; +} + +static inline int ata_dma_enabled(struct ata_device *adev) +{ + return (adev->dma_mode == 0xFF ? 0 : 1); +} + +/************************************************************************** + * PMP - drivers/ata/libata-pmp.c + */ +#ifdef CONFIG_SATA_PMP + +extern const struct ata_port_operations sata_pmp_port_ops; + +extern int sata_pmp_qc_defer_cmd_switch(struct ata_queued_cmd *qc); +extern void sata_pmp_error_handler(struct ata_port *ap); + +#else /* CONFIG_SATA_PMP */ + +#define sata_pmp_port_ops sata_port_ops +#define sata_pmp_qc_defer_cmd_switch ata_std_qc_defer +#define sata_pmp_error_handler ata_std_error_handler + +#endif /* CONFIG_SATA_PMP */ + + +/************************************************************************** + * SFF - drivers/ata/libata-sff.c + */ +#ifdef CONFIG_ATA_SFF + +extern const struct ata_port_operations ata_sff_port_ops; +extern const struct ata_port_operations ata_bmdma32_port_ops; + +/* PIO only, sg_tablesize and dma_boundary limits can be removed */ +#define ATA_PIO_SHT(drv_name) \ + ATA_BASE_SHT(drv_name), \ + .sg_tablesize = LIBATA_MAX_PRD, \ + .dma_boundary = ATA_DMA_BOUNDARY + +extern void ata_sff_dev_select(struct ata_port *ap, unsigned int device); +extern u8 ata_sff_check_status(struct ata_port *ap); +extern void ata_sff_pause(struct ata_port *ap); +extern void ata_sff_dma_pause(struct ata_port *ap); +extern int ata_sff_busy_sleep(struct ata_port *ap, + unsigned long timeout_pat, unsigned long timeout); +extern int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline); +extern void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf); +extern void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf); +extern void ata_sff_exec_command(struct ata_port *ap, + const struct ata_taskfile *tf); +extern unsigned int ata_sff_data_xfer(struct ata_queued_cmd *qc, + unsigned char *buf, unsigned int buflen, int rw); +extern unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc, + unsigned char *buf, unsigned int buflen, int rw); +extern void ata_sff_irq_on(struct ata_port *ap); +extern void ata_sff_irq_clear(struct ata_port *ap); +extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, + u8 status, int in_wq); +extern void ata_sff_queue_work(struct work_struct *work); +extern void ata_sff_queue_delayed_work(struct delayed_work *dwork, + unsigned long delay); +extern void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay); +extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc); +extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc); +extern unsigned int ata_sff_port_intr(struct ata_port *ap, + struct ata_queued_cmd *qc); +extern irqreturn_t ata_sff_interrupt(int irq, void *dev_instance); +extern void ata_sff_lost_interrupt(struct ata_port *ap); +extern void ata_sff_freeze(struct ata_port *ap); +extern void ata_sff_thaw(struct ata_port *ap); +extern int ata_sff_prereset(struct ata_link *link, unsigned long deadline); +extern unsigned int ata_sff_dev_classify(struct ata_device *dev, int present, + u8 *r_err); +extern int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask, + unsigned long deadline); +extern int ata_sff_softreset(struct ata_link *link, unsigned int *classes, + unsigned long deadline); +extern int sata_sff_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); +extern void ata_sff_postreset(struct ata_link *link, unsigned int *classes); +extern void ata_sff_drain_fifo(struct ata_queued_cmd *qc); +extern void ata_sff_error_handler(struct ata_port *ap); +extern void ata_sff_std_ports(struct ata_ioports *ioaddr); +#ifdef CONFIG_PCI +extern int ata_pci_sff_init_host(struct ata_host *host); +extern int ata_pci_sff_prepare_host(struct pci_dev *pdev, + const struct ata_port_info * const * ppi, + struct ata_host **r_host); +extern int ata_pci_sff_activate_host(struct ata_host *host, + irq_handler_t irq_handler, + struct scsi_host_template *sht); +extern int ata_pci_sff_init_one(struct pci_dev *pdev, + const struct ata_port_info * const * ppi, + struct scsi_host_template *sht, void *host_priv, int hflags); +#endif /* CONFIG_PCI */ + +#ifdef CONFIG_ATA_BMDMA + +extern const struct ata_port_operations ata_bmdma_port_ops; + +#define ATA_BMDMA_SHT(drv_name) \ + ATA_BASE_SHT(drv_name), \ + .sg_tablesize = LIBATA_MAX_PRD, \ + .dma_boundary = ATA_DMA_BOUNDARY + +extern enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc); +extern unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc); +extern enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc); +extern unsigned int ata_bmdma_port_intr(struct ata_port *ap, + struct ata_queued_cmd *qc); +extern irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance); +extern void ata_bmdma_error_handler(struct ata_port *ap); +extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc); +extern void ata_bmdma_irq_clear(struct ata_port *ap); +extern void ata_bmdma_setup(struct ata_queued_cmd *qc); +extern void ata_bmdma_start(struct ata_queued_cmd *qc); +extern void ata_bmdma_stop(struct ata_queued_cmd *qc); +extern u8 ata_bmdma_status(struct ata_port *ap); +extern int ata_bmdma_port_start(struct ata_port *ap); +extern int ata_bmdma_port_start32(struct ata_port *ap); + +#ifdef CONFIG_PCI +extern int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev); +extern void ata_pci_bmdma_init(struct ata_host *host); +extern int ata_pci_bmdma_prepare_host(struct pci_dev *pdev, + const struct ata_port_info * const * ppi, + struct ata_host **r_host); +extern int ata_pci_bmdma_init_one(struct pci_dev *pdev, + const struct ata_port_info * const * ppi, + struct scsi_host_template *sht, + void *host_priv, int hflags); +#endif /* CONFIG_PCI */ +#endif /* CONFIG_ATA_BMDMA */ + +/** + * ata_sff_busy_wait - Wait for a port status register + * @ap: Port to wait for. + * @bits: bits that must be clear + * @max: number of 10uS waits to perform + * + * Waits up to max*10 microseconds for the selected bits in the port's + * status register to be cleared. + * Returns final value of status register. + * + * LOCKING: + * Inherited from caller. + */ +static inline u8 ata_sff_busy_wait(struct ata_port *ap, unsigned int bits, + unsigned int max) +{ + u8 status; + + do { + udelay(10); + status = ap->ops->sff_check_status(ap); + max--; + } while (status != 0xff && (status & bits) && (max > 0)); + + return status; +} + +/** + * ata_wait_idle - Wait for a port to be idle. + * @ap: Port to wait for. + * + * Waits up to 10ms for port's BUSY and DRQ signals to clear. + * Returns final value of status register. + * + * LOCKING: + * Inherited from caller. + */ +static inline u8 ata_wait_idle(struct ata_port *ap) +{ + u8 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); + +#ifdef ATA_DEBUG + if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ))) + ata_port_printk(ap, KERN_DEBUG, "abnormal Status 0x%X\n", + status); +#endif + + return status; +} +#endif /* CONFIG_ATA_SFF */ + +#endif /* __LINUX_LIBATA_H__ */ diff --git a/include/linux/libfdt.h b/include/linux/libfdt.h new file mode 100644 index 000000000..90ed4ebfa --- /dev/null +++ b/include/linux/libfdt.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _INCLUDE_LIBFDT_H_ +#define _INCLUDE_LIBFDT_H_ + +#include +#include "../../scripts/dtc/libfdt/libfdt.h" + +#endif /* _INCLUDE_LIBFDT_H_ */ diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h new file mode 100644 index 000000000..1adf54aad --- /dev/null +++ b/include/linux/libfdt_env.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LIBFDT_ENV_H +#define LIBFDT_ENV_H + +#include /* For INT_MAX */ +#include + +#include + +#define INT32_MAX S32_MAX +#define UINT32_MAX U32_MAX + +typedef __be16 fdt16_t; +typedef __be32 fdt32_t; +typedef __be64 fdt64_t; + +#define fdt32_to_cpu(x) be32_to_cpu(x) +#define cpu_to_fdt32(x) cpu_to_be32(x) +#define fdt64_to_cpu(x) be64_to_cpu(x) +#define cpu_to_fdt64(x) cpu_to_be64(x) + +#endif /* LIBFDT_ENV_H */ diff --git a/include/linux/libgcc.h b/include/linux/libgcc.h new file mode 100644 index 000000000..32e1e0f4b --- /dev/null +++ b/include/linux/libgcc.h @@ -0,0 +1,43 @@ +/* + * include/lib/libgcc.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc. + */ + +#ifndef __LIB_LIBGCC_H +#define __LIB_LIBGCC_H + +#include + +typedef int word_type __attribute__ ((mode (__word__))); + +#ifdef __BIG_ENDIAN +struct DWstruct { + int high, low; +}; +#elif defined(__LITTLE_ENDIAN) +struct DWstruct { + int low, high; +}; +#else +#error I feel sick. +#endif + +typedef union { + struct DWstruct s; + long long ll; +} DWunion; + +#endif /* __ASM_LIBGCC_H */ diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h new file mode 100644 index 000000000..097072c5a --- /dev/null +++ b/include/linux/libnvdimm.h @@ -0,0 +1,222 @@ +/* + * libnvdimm - Non-volatile-memory Devices Subsystem + * + * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#ifndef __LIBNVDIMM_H__ +#define __LIBNVDIMM_H__ +#include +#include +#include +#include +#include + +struct badrange_entry { + u64 start; + u64 length; + struct list_head list; +}; + +struct badrange { + struct list_head list; + spinlock_t lock; +}; + +enum { + /* when a dimm supports both PMEM and BLK access a label is required */ + NDD_ALIASING = 0, + /* unarmed memory devices may not persist writes */ + NDD_UNARMED = 1, + /* locked memory devices should not be accessed */ + NDD_LOCKED = 2, + + /* need to set a limit somewhere, but yes, this is likely overkill */ + ND_IOCTL_MAX_BUFLEN = SZ_4M, + ND_CMD_MAX_ELEM = 5, + ND_CMD_MAX_ENVELOPE = 256, + ND_MAX_MAPPINGS = 32, + + /* region flag indicating to direct-map persistent memory by default */ + ND_REGION_PAGEMAP = 0, + /* + * Platform ensures entire CPU store data path is flushed to pmem on + * system power loss. + */ + ND_REGION_PERSIST_CACHE = 1, + /* + * Platform provides mechanisms to automatically flush outstanding + * write data from memory controler to pmem on system power loss. + * (ADR) + */ + ND_REGION_PERSIST_MEMCTRL = 2, + + /* mark newly adjusted resources as requiring a label update */ + DPA_RESOURCE_ADJUSTED = 1 << 0, +}; + +extern struct attribute_group nvdimm_bus_attribute_group; +extern struct attribute_group nvdimm_attribute_group; +extern struct attribute_group nd_device_attribute_group; +extern struct attribute_group nd_numa_attribute_group; +extern struct attribute_group nd_region_attribute_group; +extern struct attribute_group nd_mapping_attribute_group; + +struct nvdimm; +struct nvdimm_bus_descriptor; +typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc, + struct nvdimm *nvdimm, unsigned int cmd, void *buf, + unsigned int buf_len, int *cmd_rc); + +struct device_node; +struct nvdimm_bus_descriptor { + const struct attribute_group **attr_groups; + unsigned long bus_dsm_mask; + unsigned long cmd_mask; + struct module *module; + char *provider_name; + struct device_node *of_node; + ndctl_fn ndctl; + int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc); + int (*clear_to_send)(struct nvdimm_bus_descriptor *nd_desc, + struct nvdimm *nvdimm, unsigned int cmd); +}; + +struct nd_cmd_desc { + int in_num; + int out_num; + u32 in_sizes[ND_CMD_MAX_ELEM]; + int out_sizes[ND_CMD_MAX_ELEM]; +}; + +struct nd_interleave_set { + /* v1.1 definition of the interleave-set-cookie algorithm */ + u64 cookie1; + /* v1.2 definition of the interleave-set-cookie algorithm */ + u64 cookie2; + /* compatibility with initial buggy Linux implementation */ + u64 altcookie; + + guid_t type_guid; +}; + +struct nd_mapping_desc { + struct nvdimm *nvdimm; + u64 start; + u64 size; + int position; +}; + +struct nd_region_desc { + struct resource *res; + struct nd_mapping_desc *mapping; + u16 num_mappings; + const struct attribute_group **attr_groups; + struct nd_interleave_set *nd_set; + void *provider_data; + int num_lanes; + int numa_node; + unsigned long flags; + struct device_node *of_node; +}; + +struct device; +void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset, + size_t size, unsigned long flags); +static inline void __iomem *devm_nvdimm_ioremap(struct device *dev, + resource_size_t offset, size_t size) +{ + return (void __iomem *) devm_nvdimm_memremap(dev, offset, size, 0); +} + +struct nvdimm_bus; +struct module; +struct device; +struct nd_blk_region; +struct nd_blk_region_desc { + int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev); + int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, + void *iobuf, u64 len, int rw); + struct nd_region_desc ndr_desc; +}; + +static inline struct nd_blk_region_desc *to_blk_region_desc( + struct nd_region_desc *ndr_desc) +{ + return container_of(ndr_desc, struct nd_blk_region_desc, ndr_desc); + +} + +void badrange_init(struct badrange *badrange); +int badrange_add(struct badrange *badrange, u64 addr, u64 length); +void badrange_forget(struct badrange *badrange, phys_addr_t start, + unsigned int len); +int nvdimm_bus_add_badrange(struct nvdimm_bus *nvdimm_bus, u64 addr, + u64 length); +struct nvdimm_bus *nvdimm_bus_register(struct device *parent, + struct nvdimm_bus_descriptor *nfit_desc); +void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus); +struct nvdimm_bus *to_nvdimm_bus(struct device *dev); +struct nvdimm *to_nvdimm(struct device *dev); +struct nd_region *to_nd_region(struct device *dev); +struct device *nd_region_dev(struct nd_region *nd_region); +struct nd_blk_region *to_nd_blk_region(struct device *dev); +struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus); +struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus); +const char *nvdimm_name(struct nvdimm *nvdimm); +struct kobject *nvdimm_kobj(struct nvdimm *nvdimm); +unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm); +void *nvdimm_provider_data(struct nvdimm *nvdimm); +struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, + const struct attribute_group **groups, unsigned long flags, + unsigned long cmd_mask, int num_flush, + struct resource *flush_wpq); +const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd); +const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd); +u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd, + const struct nd_cmd_desc *desc, int idx, void *buf); +u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd, + const struct nd_cmd_desc *desc, int idx, const u32 *in_field, + const u32 *out_field, unsigned long remainder); +int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count); +struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus, + struct nd_region_desc *ndr_desc); +struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus, + struct nd_region_desc *ndr_desc); +struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus, + struct nd_region_desc *ndr_desc); +void *nd_region_provider_data(struct nd_region *nd_region); +void *nd_blk_region_provider_data(struct nd_blk_region *ndbr); +void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data); +struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr); +unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr); +unsigned int nd_region_acquire_lane(struct nd_region *nd_region); +void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane); +u64 nd_fletcher64(void *addr, size_t len, bool le); +void nvdimm_flush(struct nd_region *nd_region); +int nvdimm_has_flush(struct nd_region *nd_region); +int nvdimm_has_cache(struct nd_region *nd_region); + +#ifdef CONFIG_ARCH_HAS_PMEM_API +#define ARCH_MEMREMAP_PMEM MEMREMAP_WB +void arch_wb_cache_pmem(void *addr, size_t size); +void arch_invalidate_pmem(void *addr, size_t size); +#else +#define ARCH_MEMREMAP_PMEM MEMREMAP_WT +static inline void arch_wb_cache_pmem(void *addr, size_t size) +{ +} +static inline void arch_invalidate_pmem(void *addr, size_t size) +{ +} +#endif + +#endif /* __LIBNVDIMM_H__ */ diff --git a/include/linux/libps2.h b/include/linux/libps2.h new file mode 100644 index 000000000..5f18fe02a --- /dev/null +++ b/include/linux/libps2.h @@ -0,0 +1,64 @@ +#ifndef _LIBPS2_H +#define _LIBPS2_H + +/* + * Copyright (C) 1999-2002 Vojtech Pavlik + * Copyright (C) 2004 Dmitry Torokhov + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include +#include +#include +#include + +#define PS2_CMD_SETSCALE11 0x00e6 +#define PS2_CMD_SETRES 0x10e8 +#define PS2_CMD_GETID 0x02f2 +#define PS2_CMD_RESET_BAT 0x02ff + +#define PS2_RET_BAT 0xaa +#define PS2_RET_ID 0x00 +#define PS2_RET_ACK 0xfa +#define PS2_RET_NAK 0xfe +#define PS2_RET_ERR 0xfc + +#define PS2_FLAG_ACK BIT(0) /* Waiting for ACK/NAK */ +#define PS2_FLAG_CMD BIT(1) /* Waiting for a command to finish */ +#define PS2_FLAG_CMD1 BIT(2) /* Waiting for the first byte of command response */ +#define PS2_FLAG_WAITID BIT(3) /* Command executing is GET ID */ +#define PS2_FLAG_NAK BIT(4) /* Last transmission was NAKed */ +#define PS2_FLAG_ACK_CMD BIT(5) /* Waiting to ACK the command (first) byte */ + +struct ps2dev { + struct serio *serio; + + /* Ensures that only one command is executing at a time */ + struct mutex cmd_mutex; + + /* Used to signal completion from interrupt handler */ + wait_queue_head_t wait; + + unsigned long flags; + u8 cmdbuf[8]; + u8 cmdcnt; + u8 nak; +}; + +void ps2_init(struct ps2dev *ps2dev, struct serio *serio); +int ps2_sendbyte(struct ps2dev *ps2dev, u8 byte, unsigned int timeout); +void ps2_drain(struct ps2dev *ps2dev, size_t maxbytes, unsigned int timeout); +void ps2_begin_command(struct ps2dev *ps2dev); +void ps2_end_command(struct ps2dev *ps2dev); +int __ps2_command(struct ps2dev *ps2dev, u8 *param, unsigned int command); +int ps2_command(struct ps2dev *ps2dev, u8 *param, unsigned int command); +int ps2_sliced_command(struct ps2dev *ps2dev, u8 command); +bool ps2_handle_ack(struct ps2dev *ps2dev, u8 data); +bool ps2_handle_response(struct ps2dev *ps2dev, u8 data); +void ps2_cmd_aborted(struct ps2dev *ps2dev); +bool ps2_is_keyboard_id(u8 id); + +#endif /* _LIBPS2_H */ diff --git a/include/linux/license.h b/include/linux/license.h new file mode 100644 index 000000000..decdbf43c --- /dev/null +++ b/include/linux/license.h @@ -0,0 +1,14 @@ +#ifndef __LICENSE_H +#define __LICENSE_H + +static inline int license_is_gpl_compatible(const char *license) +{ + return (strcmp(license, "GPL") == 0 + || strcmp(license, "GPL v2") == 0 + || strcmp(license, "GPL and additional rights") == 0 + || strcmp(license, "Dual BSD/GPL") == 0 + || strcmp(license, "Dual MIT/GPL") == 0 + || strcmp(license, "Dual MPL/GPL") == 0); +} + +#endif diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h new file mode 100644 index 000000000..e9e0d1c7e --- /dev/null +++ b/include/linux/lightnvm.h @@ -0,0 +1,553 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef NVM_H +#define NVM_H + +#include +#include +#include + +enum { + NVM_IO_OK = 0, + NVM_IO_REQUEUE = 1, + NVM_IO_DONE = 2, + NVM_IO_ERR = 3, + + NVM_IOTYPE_NONE = 0, + NVM_IOTYPE_GC = 1, +}; + +/* common format */ +#define NVM_GEN_CH_BITS (8) +#define NVM_GEN_LUN_BITS (8) +#define NVM_GEN_BLK_BITS (16) +#define NVM_GEN_RESERVED (32) + +/* 1.2 format */ +#define NVM_12_PG_BITS (16) +#define NVM_12_PL_BITS (4) +#define NVM_12_SEC_BITS (4) +#define NVM_12_RESERVED (8) + +/* 2.0 format */ +#define NVM_20_SEC_BITS (24) +#define NVM_20_RESERVED (8) + +enum { + NVM_OCSSD_SPEC_12 = 12, + NVM_OCSSD_SPEC_20 = 20, +}; + +struct ppa_addr { + /* Generic structure for all addresses */ + union { + /* generic device format */ + struct { + u64 ch : NVM_GEN_CH_BITS; + u64 lun : NVM_GEN_LUN_BITS; + u64 blk : NVM_GEN_BLK_BITS; + u64 reserved : NVM_GEN_RESERVED; + } a; + + /* 1.2 device format */ + struct { + u64 ch : NVM_GEN_CH_BITS; + u64 lun : NVM_GEN_LUN_BITS; + u64 blk : NVM_GEN_BLK_BITS; + u64 pg : NVM_12_PG_BITS; + u64 pl : NVM_12_PL_BITS; + u64 sec : NVM_12_SEC_BITS; + u64 reserved : NVM_12_RESERVED; + } g; + + /* 2.0 device format */ + struct { + u64 grp : NVM_GEN_CH_BITS; + u64 pu : NVM_GEN_LUN_BITS; + u64 chk : NVM_GEN_BLK_BITS; + u64 sec : NVM_20_SEC_BITS; + u64 reserved : NVM_20_RESERVED; + } m; + + struct { + u64 line : 63; + u64 is_cached : 1; + } c; + + u64 ppa; + }; +}; + +struct nvm_rq; +struct nvm_id; +struct nvm_dev; +struct nvm_tgt_dev; +struct nvm_chk_meta; + +typedef int (nvm_id_fn)(struct nvm_dev *); +typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *); +typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int); +typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, struct nvm_chk_meta *, + sector_t, int); +typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); +typedef int (nvm_submit_io_sync_fn)(struct nvm_dev *, struct nvm_rq *); +typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *); +typedef void (nvm_destroy_dma_pool_fn)(void *); +typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t, + dma_addr_t *); +typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t); + +struct nvm_dev_ops { + nvm_id_fn *identity; + nvm_op_bb_tbl_fn *get_bb_tbl; + nvm_op_set_bb_fn *set_bb_tbl; + + nvm_get_chk_meta_fn *get_chk_meta; + + nvm_submit_io_fn *submit_io; + nvm_submit_io_sync_fn *submit_io_sync; + + nvm_create_dma_pool_fn *create_dma_pool; + nvm_destroy_dma_pool_fn *destroy_dma_pool; + nvm_dev_dma_alloc_fn *dev_dma_alloc; + nvm_dev_dma_free_fn *dev_dma_free; +}; + +#ifdef CONFIG_NVM + +#include +#include +#include +#include + +enum { + /* HW Responsibilities */ + NVM_RSP_L2P = 1 << 0, + NVM_RSP_ECC = 1 << 1, + + /* Physical Adressing Mode */ + NVM_ADDRMODE_LINEAR = 0, + NVM_ADDRMODE_CHANNEL = 1, + + /* Plane programming mode for LUN */ + NVM_PLANE_SINGLE = 1, + NVM_PLANE_DOUBLE = 2, + NVM_PLANE_QUAD = 4, + + /* Status codes */ + NVM_RSP_SUCCESS = 0x0, + NVM_RSP_NOT_CHANGEABLE = 0x1, + NVM_RSP_ERR_FAILWRITE = 0x40ff, + NVM_RSP_ERR_EMPTYPAGE = 0x42ff, + NVM_RSP_ERR_FAILECC = 0x4281, + NVM_RSP_ERR_FAILCRC = 0x4004, + NVM_RSP_WARN_HIGHECC = 0x4700, + + /* Device opcodes */ + NVM_OP_PWRITE = 0x91, + NVM_OP_PREAD = 0x92, + NVM_OP_ERASE = 0x90, + + /* PPA Command Flags */ + NVM_IO_SNGL_ACCESS = 0x0, + NVM_IO_DUAL_ACCESS = 0x1, + NVM_IO_QUAD_ACCESS = 0x2, + + /* NAND Access Modes */ + NVM_IO_SUSPEND = 0x80, + NVM_IO_SLC_MODE = 0x100, + NVM_IO_SCRAMBLE_ENABLE = 0x200, + + /* Block Types */ + NVM_BLK_T_FREE = 0x0, + NVM_BLK_T_BAD = 0x1, + NVM_BLK_T_GRWN_BAD = 0x2, + NVM_BLK_T_DEV = 0x4, + NVM_BLK_T_HOST = 0x8, + + /* Memory capabilities */ + NVM_ID_CAP_SLC = 0x1, + NVM_ID_CAP_CMD_SUSPEND = 0x2, + NVM_ID_CAP_SCRAMBLE = 0x4, + NVM_ID_CAP_ENCRYPT = 0x8, + + /* Memory types */ + NVM_ID_FMTYPE_SLC = 0, + NVM_ID_FMTYPE_MLC = 1, + + /* Device capabilities */ + NVM_ID_DCAP_BBLKMGMT = 0x1, + NVM_UD_DCAP_ECC = 0x2, +}; + +struct nvm_id_lp_mlc { + u16 num_pairs; + u8 pairs[886]; +}; + +struct nvm_id_lp_tbl { + __u8 id[8]; + struct nvm_id_lp_mlc mlc; +}; + +struct nvm_addrf_12 { + u8 ch_len; + u8 lun_len; + u8 blk_len; + u8 pg_len; + u8 pln_len; + u8 sec_len; + + u8 ch_offset; + u8 lun_offset; + u8 blk_offset; + u8 pg_offset; + u8 pln_offset; + u8 sec_offset; + + u64 ch_mask; + u64 lun_mask; + u64 blk_mask; + u64 pg_mask; + u64 pln_mask; + u64 sec_mask; +}; + +struct nvm_addrf { + u8 ch_len; + u8 lun_len; + u8 chk_len; + u8 sec_len; + u8 rsv_len[2]; + + u8 ch_offset; + u8 lun_offset; + u8 chk_offset; + u8 sec_offset; + u8 rsv_off[2]; + + u64 ch_mask; + u64 lun_mask; + u64 chk_mask; + u64 sec_mask; + u64 rsv_mask[2]; +}; + +enum { + /* Chunk states */ + NVM_CHK_ST_FREE = 1 << 0, + NVM_CHK_ST_CLOSED = 1 << 1, + NVM_CHK_ST_OPEN = 1 << 2, + NVM_CHK_ST_OFFLINE = 1 << 3, + + /* Chunk types */ + NVM_CHK_TP_W_SEQ = 1 << 0, + NVM_CHK_TP_W_RAN = 1 << 1, + NVM_CHK_TP_SZ_SPEC = 1 << 4, +}; + +/* + * Note: The structure size is linked to nvme_nvm_chk_meta such that the same + * buffer can be used when converting from little endian to cpu addressing. + */ +struct nvm_chk_meta { + u8 state; + u8 type; + u8 wi; + u8 rsvd[5]; + u64 slba; + u64 cnlb; + u64 wp; +}; + +struct nvm_target { + struct list_head list; + struct nvm_tgt_dev *dev; + struct nvm_tgt_type *type; + struct gendisk *disk; +}; + +#define ADDR_EMPTY (~0ULL) + +#define NVM_TARGET_DEFAULT_OP (101) +#define NVM_TARGET_MIN_OP (3) +#define NVM_TARGET_MAX_OP (80) + +#define NVM_VERSION_MAJOR 1 +#define NVM_VERSION_MINOR 0 +#define NVM_VERSION_PATCH 0 + +#define NVM_MAX_VLBA (64) /* max logical blocks in a vector command */ + +struct nvm_rq; +typedef void (nvm_end_io_fn)(struct nvm_rq *); + +struct nvm_rq { + struct nvm_tgt_dev *dev; + + struct bio *bio; + + union { + struct ppa_addr ppa_addr; + dma_addr_t dma_ppa_list; + }; + + struct ppa_addr *ppa_list; + + void *meta_list; + dma_addr_t dma_meta_list; + + nvm_end_io_fn *end_io; + + uint8_t opcode; + uint16_t nr_ppas; + uint16_t flags; + + u64 ppa_status; /* ppa media status */ + int error; + + void *private; +}; + +static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu) +{ + return pdu - sizeof(struct nvm_rq); +} + +static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata) +{ + return rqdata + 1; +} + +enum { + NVM_BLK_ST_FREE = 0x1, /* Free block */ + NVM_BLK_ST_TGT = 0x2, /* Block in use by target */ + NVM_BLK_ST_BAD = 0x8, /* Bad block */ +}; + +/* Instance geometry */ +struct nvm_geo { + /* device reported version */ + u8 major_ver_id; + u8 minor_ver_id; + + /* kernel short version */ + u8 version; + + /* instance specific geometry */ + int num_ch; + int num_lun; /* per channel */ + + /* calculated values */ + int all_luns; /* across channels */ + int all_chunks; /* across channels */ + + int op; /* over-provision in instance */ + + sector_t total_secs; /* across channels */ + + /* chunk geometry */ + u32 num_chk; /* chunks per lun */ + u32 clba; /* sectors per chunk */ + u16 csecs; /* sector size */ + u16 sos; /* out-of-band area size */ + + /* device write constrains */ + u32 ws_min; /* minimum write size */ + u32 ws_opt; /* optimal write size */ + u32 mw_cunits; /* distance required for successful read */ + u32 maxoc; /* maximum open chunks */ + u32 maxocpu; /* maximum open chunks per parallel unit */ + + /* device capabilities */ + u32 mccap; + + /* device timings */ + u32 trdt; /* Avg. Tread (ns) */ + u32 trdm; /* Max Tread (ns) */ + u32 tprt; /* Avg. Tprog (ns) */ + u32 tprm; /* Max Tprog (ns) */ + u32 tbet; /* Avg. Terase (ns) */ + u32 tbem; /* Max Terase (ns) */ + + /* generic address format */ + struct nvm_addrf addrf; + + /* 1.2 compatibility */ + u8 vmnt; + u32 cap; + u32 dom; + + u8 mtype; + u8 fmtype; + + u16 cpar; + u32 mpos; + + u8 num_pln; + u8 pln_mode; + u16 num_pg; + u16 fpg_sz; +}; + +/* sub-device structure */ +struct nvm_tgt_dev { + /* Device information */ + struct nvm_geo geo; + + /* Base ppas for target LUNs */ + struct ppa_addr *luns; + + struct request_queue *q; + + struct nvm_dev *parent; + void *map; +}; + +struct nvm_dev { + struct nvm_dev_ops *ops; + + struct list_head devices; + + /* Device information */ + struct nvm_geo geo; + + unsigned long *lun_map; + void *dma_pool; + + /* Backend device */ + struct request_queue *q; + char name[DISK_NAME_LEN]; + void *private_data; + + void *rmap; + + struct mutex mlock; + spinlock_t lock; + + /* target management */ + struct list_head area_list; + struct list_head targets; +}; + +static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev, + struct ppa_addr r) +{ + struct nvm_geo *geo = &dev->geo; + struct ppa_addr l; + + if (geo->version == NVM_OCSSD_SPEC_12) { + struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf; + + l.ppa = ((u64)r.g.ch) << ppaf->ch_offset; + l.ppa |= ((u64)r.g.lun) << ppaf->lun_offset; + l.ppa |= ((u64)r.g.blk) << ppaf->blk_offset; + l.ppa |= ((u64)r.g.pg) << ppaf->pg_offset; + l.ppa |= ((u64)r.g.pl) << ppaf->pln_offset; + l.ppa |= ((u64)r.g.sec) << ppaf->sec_offset; + } else { + struct nvm_addrf *lbaf = &geo->addrf; + + l.ppa = ((u64)r.m.grp) << lbaf->ch_offset; + l.ppa |= ((u64)r.m.pu) << lbaf->lun_offset; + l.ppa |= ((u64)r.m.chk) << lbaf->chk_offset; + l.ppa |= ((u64)r.m.sec) << lbaf->sec_offset; + } + + return l; +} + +static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev, + struct ppa_addr r) +{ + struct nvm_geo *geo = &dev->geo; + struct ppa_addr l; + + l.ppa = 0; + + if (geo->version == NVM_OCSSD_SPEC_12) { + struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf; + + l.g.ch = (r.ppa & ppaf->ch_mask) >> ppaf->ch_offset; + l.g.lun = (r.ppa & ppaf->lun_mask) >> ppaf->lun_offset; + l.g.blk = (r.ppa & ppaf->blk_mask) >> ppaf->blk_offset; + l.g.pg = (r.ppa & ppaf->pg_mask) >> ppaf->pg_offset; + l.g.pl = (r.ppa & ppaf->pln_mask) >> ppaf->pln_offset; + l.g.sec = (r.ppa & ppaf->sec_mask) >> ppaf->sec_offset; + } else { + struct nvm_addrf *lbaf = &geo->addrf; + + l.m.grp = (r.ppa & lbaf->ch_mask) >> lbaf->ch_offset; + l.m.pu = (r.ppa & lbaf->lun_mask) >> lbaf->lun_offset; + l.m.chk = (r.ppa & lbaf->chk_mask) >> lbaf->chk_offset; + l.m.sec = (r.ppa & lbaf->sec_mask) >> lbaf->sec_offset; + } + + return l; +} + +typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *); +typedef sector_t (nvm_tgt_capacity_fn)(void *); +typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *, + int flags); +typedef void (nvm_tgt_exit_fn)(void *, bool); +typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *); +typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *); + +struct nvm_tgt_type { + const char *name; + unsigned int version[3]; + + /* target entry points */ + nvm_tgt_make_rq_fn *make_rq; + nvm_tgt_capacity_fn *capacity; + + /* module-specific init/teardown */ + nvm_tgt_init_fn *init; + nvm_tgt_exit_fn *exit; + + /* sysfs */ + nvm_tgt_sysfs_init_fn *sysfs_init; + nvm_tgt_sysfs_exit_fn *sysfs_exit; + + /* For internal use */ + struct list_head list; + struct module *owner; +}; + +extern int nvm_register_tgt_type(struct nvm_tgt_type *); +extern void nvm_unregister_tgt_type(struct nvm_tgt_type *); + +extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *); +extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t); + +extern struct nvm_dev *nvm_alloc_dev(int); +extern int nvm_register(struct nvm_dev *); +extern void nvm_unregister(struct nvm_dev *); + + +extern int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, + struct nvm_chk_meta *meta, struct ppa_addr ppa, + int nchks); + +extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *, + int, int); +extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *); +extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *); +extern void nvm_end_io(struct nvm_rq *); +extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int); +extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *); + +#else /* CONFIG_NVM */ +struct nvm_dev_ops; + +static inline struct nvm_dev *nvm_alloc_dev(int node) +{ + return ERR_PTR(-EINVAL); +} +static inline int nvm_register(struct nvm_dev *dev) +{ + return -EINVAL; +} +static inline void nvm_unregister(struct nvm_dev *dev) {} +#endif /* CONFIG_NVM */ +#endif /* LIGHTNVM.H */ diff --git a/include/linux/linkage.h b/include/linux/linkage.h new file mode 100644 index 000000000..d7618c41f --- /dev/null +++ b/include/linux/linkage.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_LINKAGE_H +#define _LINUX_LINKAGE_H + +#include +#include +#include +#include + +/* Some toolchains use other characters (e.g. '`') to mark new line in macro */ +#ifndef ASM_NL +#define ASM_NL ; +#endif + +#ifdef __cplusplus +#define CPP_ASMLINKAGE extern "C" +#else +#define CPP_ASMLINKAGE +#endif + +#ifndef asmlinkage +#define asmlinkage CPP_ASMLINKAGE +#endif + +#ifndef cond_syscall +#define cond_syscall(x) asm( \ + ".weak " __stringify(x) "\n\t" \ + ".set " __stringify(x) "," \ + __stringify(sys_ni_syscall)) +#endif + +#ifndef SYSCALL_ALIAS +#define SYSCALL_ALIAS(alias, name) asm( \ + ".globl " __stringify(alias) "\n\t" \ + ".set " __stringify(alias) "," \ + __stringify(name)) +#endif + +#define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE) +#define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE) + +/* + * For assembly routines. + * + * Note when using these that you must specify the appropriate + * alignment directives yourself + */ +#define __PAGE_ALIGNED_DATA .section ".data..page_aligned", "aw" +#define __PAGE_ALIGNED_BSS .section ".bss..page_aligned", "aw" + +/* + * This is used by architectures to keep arguments on the stack + * untouched by the compiler by keeping them live until the end. + * The argument stack may be owned by the assembly-language + * caller, not the callee, and gcc doesn't always understand + * that. + * + * We have the return value, and a maximum of six arguments. + * + * This should always be followed by a "return ret" for the + * protection to work (ie no more work that the compiler might + * end up needing stack temporaries for). + */ +/* Assembly files may be compiled with -traditional .. */ +#ifndef __ASSEMBLY__ +#ifndef asmlinkage_protect +# define asmlinkage_protect(n, ret, args...) do { } while (0) +#endif +#endif + +#ifndef __ALIGN +#define __ALIGN .align 4,0x90 +#define __ALIGN_STR ".align 4,0x90" +#endif + +#ifdef __ASSEMBLY__ + +#ifndef LINKER_SCRIPT +#define ALIGN __ALIGN +#define ALIGN_STR __ALIGN_STR + +#ifndef ENTRY +#define ENTRY(name) \ + .globl name ASM_NL \ + ALIGN ASM_NL \ + name: +#endif +#endif /* LINKER_SCRIPT */ + +#ifndef WEAK +#define WEAK(name) \ + .weak name ASM_NL \ + name: +#endif + +#ifndef END +#define END(name) \ + .size name, .-name +#endif + +/* If symbol 'name' is treated as a subroutine (gets called, and returns) + * then please use ENDPROC to mark 'name' as STT_FUNC for the benefit of + * static analysis tools such as stack depth analyzer. + */ +#ifndef ENDPROC +#define ENDPROC(name) \ + .type name, @function ASM_NL \ + END(name) +#endif + +#endif + +#endif diff --git a/include/linux/linux_logo.h b/include/linux/linux_logo.h new file mode 100644 index 000000000..d4d5b93ef --- /dev/null +++ b/include/linux/linux_logo.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_LINUX_LOGO_H +#define _LINUX_LINUX_LOGO_H + +/* + * Linux logo to be displayed on boot + * + * Copyright (C) 1996 Larry Ewing (lewing@isc.tamu.edu) + * Copyright (C) 1996,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + * Copyright (C) 2001 Greg Banks + * Copyright (C) 2001 Jan-Benedict Glaw + * Copyright (C) 2003 Geert Uytterhoeven + * + * Serial_console ascii image can be any size, + * but should contain %s to display the version + */ + +#include + + +#define LINUX_LOGO_MONO 1 /* monochrome black/white */ +#define LINUX_LOGO_VGA16 2 /* 16 colors VGA text palette */ +#define LINUX_LOGO_CLUT224 3 /* 224 colors */ +#define LINUX_LOGO_GRAY256 4 /* 256 levels grayscale */ + + +struct linux_logo { + int type; /* one of LINUX_LOGO_* */ + unsigned int width; + unsigned int height; + unsigned int clutsize; /* LINUX_LOGO_CLUT224 only */ + const unsigned char *clut; /* LINUX_LOGO_CLUT224 only */ + const unsigned char *data; +}; + +extern const struct linux_logo logo_linux_mono; +extern const struct linux_logo logo_linux_vga16; +extern const struct linux_logo logo_linux_clut224; +extern const struct linux_logo logo_dec_clut224; +extern const struct linux_logo logo_mac_clut224; +extern const struct linux_logo logo_parisc_clut224; +extern const struct linux_logo logo_sgi_clut224; +extern const struct linux_logo logo_sun_clut224; +extern const struct linux_logo logo_superh_mono; +extern const struct linux_logo logo_superh_vga16; +extern const struct linux_logo logo_superh_clut224; +extern const struct linux_logo logo_spe_clut224; + +extern const struct linux_logo *fb_find_logo(int depth); +#ifdef CONFIG_FB_LOGO_EXTRA +extern void fb_append_extra_logo(const struct linux_logo *logo, + unsigned int n); +#else +static inline void fb_append_extra_logo(const struct linux_logo *logo, + unsigned int n) +{} +#endif + +#endif /* _LINUX_LINUX_LOGO_H */ diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h new file mode 100644 index 000000000..b72b8cdba --- /dev/null +++ b/include/linux/lis3lv02d.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LIS3LV02D_H_ +#define __LIS3LV02D_H_ + +/** + * struct lis3lv02d_platform_data - lis3 chip family platform data + * @click_flags: Click detection unit configuration + * @click_thresh_x: Click detection unit x axis threshold + * @click_thresh_y: Click detection unit y axis threshold + * @click_thresh_z: Click detection unit z axis threshold + * @click_time_limit: Click detection unit time parameter + * @click_latency: Click detection unit latency parameter + * @click_window: Click detection unit window parameter + * @irq_cfg: On chip irq source and type configuration (click / + * data available / wake up, open drain, polarity) + * @irq_flags1: Additional irq triggering flags for irq channel 0 + * @irq_flags2: Additional irq triggering flags for irq channel 1 + * @duration1: Wake up unit 1 duration parameter + * @duration2: Wake up unit 2 duration parameter + * @wakeup_flags: Wake up unit 1 flags + * @wakeup_thresh: Wake up unit 1 threshold value + * @wakeup_flags2: Wake up unit 2 flags + * @wakeup_thresh2: Wake up unit 2 threshold value + * @hipass_ctrl: High pass filter control (enable / disable, cut off + * frequency) + * @axis_x: Sensor orientation remapping for x-axis + * @axis_y: Sensor orientation remapping for y-axis + * @axis_z: Sensor orientation remapping for z-axis + * @driver_features: Enable bits for different features. Disabled by default + * @default_rate: Default sampling rate. 0 means reset default + * @setup_resources: Interrupt line setup call back function + * @release_resources: Interrupt line release call back function + * @st_min_limits[3]: Selftest acceptance minimum values + * @st_max_limits[3]: Selftest acceptance maximum values + * @irq2: Irq line 2 number + * + * Platform data is used to setup the sensor chip. Meaning of the different + * chip features can be found from the data sheet. It is publicly available + * at www.st.com web pages. Currently the platform data is used + * only for the 8 bit device. The 8 bit device has two wake up / free fall + * detection units and click detection unit. There are plenty of ways to + * configure the chip which makes is quite hard to explain deeper meaning of + * the fields here. Behaviour of the detection blocks varies heavily depending + * on the configuration. For example, interrupt detection block can use high + * pass filtered data which makes it react to the changes in the acceleration. + * Irq_flags can be used to enable interrupt detection on the both edges. + * With proper chip configuration this produces interrupt when some trigger + * starts and when it goes away. + */ + +struct lis3lv02d_platform_data { + /* please note: the 'click' feature is only supported for + * LIS[32]02DL variants of the chip and will be ignored for + * others */ +#define LIS3_CLICK_SINGLE_X (1 << 0) +#define LIS3_CLICK_DOUBLE_X (1 << 1) +#define LIS3_CLICK_SINGLE_Y (1 << 2) +#define LIS3_CLICK_DOUBLE_Y (1 << 3) +#define LIS3_CLICK_SINGLE_Z (1 << 4) +#define LIS3_CLICK_DOUBLE_Z (1 << 5) + unsigned char click_flags; + unsigned char click_thresh_x; + unsigned char click_thresh_y; + unsigned char click_thresh_z; + unsigned char click_time_limit; + unsigned char click_latency; + unsigned char click_window; + +#define LIS3_IRQ1_DISABLE (0 << 0) +#define LIS3_IRQ1_FF_WU_1 (1 << 0) +#define LIS3_IRQ1_FF_WU_2 (2 << 0) +#define LIS3_IRQ1_FF_WU_12 (3 << 0) +#define LIS3_IRQ1_DATA_READY (4 << 0) +#define LIS3_IRQ1_CLICK (7 << 0) +#define LIS3_IRQ1_MASK (7 << 0) +#define LIS3_IRQ2_DISABLE (0 << 3) +#define LIS3_IRQ2_FF_WU_1 (1 << 3) +#define LIS3_IRQ2_FF_WU_2 (2 << 3) +#define LIS3_IRQ2_FF_WU_12 (3 << 3) +#define LIS3_IRQ2_DATA_READY (4 << 3) +#define LIS3_IRQ2_CLICK (7 << 3) +#define LIS3_IRQ2_MASK (7 << 3) +#define LIS3_IRQ_OPEN_DRAIN (1 << 6) +#define LIS3_IRQ_ACTIVE_LOW (1 << 7) + unsigned char irq_cfg; + unsigned char irq_flags1; /* Additional irq edge / level flags */ + unsigned char irq_flags2; /* Additional irq edge / level flags */ + unsigned char duration1; + unsigned char duration2; +#define LIS3_WAKEUP_X_LO (1 << 0) +#define LIS3_WAKEUP_X_HI (1 << 1) +#define LIS3_WAKEUP_Y_LO (1 << 2) +#define LIS3_WAKEUP_Y_HI (1 << 3) +#define LIS3_WAKEUP_Z_LO (1 << 4) +#define LIS3_WAKEUP_Z_HI (1 << 5) + unsigned char wakeup_flags; + unsigned char wakeup_thresh; + unsigned char wakeup_flags2; + unsigned char wakeup_thresh2; +#define LIS3_HIPASS_CUTFF_8HZ 0 +#define LIS3_HIPASS_CUTFF_4HZ 1 +#define LIS3_HIPASS_CUTFF_2HZ 2 +#define LIS3_HIPASS_CUTFF_1HZ 3 +#define LIS3_HIPASS1_DISABLE (1 << 2) +#define LIS3_HIPASS2_DISABLE (1 << 3) + unsigned char hipass_ctrl; +#define LIS3_NO_MAP 0 +#define LIS3_DEV_X 1 +#define LIS3_DEV_Y 2 +#define LIS3_DEV_Z 3 +#define LIS3_INV_DEV_X -1 +#define LIS3_INV_DEV_Y -2 +#define LIS3_INV_DEV_Z -3 + s8 axis_x; + s8 axis_y; + s8 axis_z; +#define LIS3_USE_BLOCK_READ 0x02 + u16 driver_features; + int default_rate; + int (*setup_resources)(void); + int (*release_resources)(void); + /* Limits for selftest are specified in chip data sheet */ + s16 st_min_limits[3]; /* min pass limit x, y, z */ + s16 st_max_limits[3]; /* max pass limit x, y, z */ + int irq2; +}; + +#endif /* __LIS3LV02D_H_ */ diff --git a/include/linux/list.h b/include/linux/list.h new file mode 100644 index 000000000..d2c12ef7a --- /dev/null +++ b/include/linux/list.h @@ -0,0 +1,829 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_LIST_H +#define _LINUX_LIST_H + +#include +#include +#include +#include +#include + +/* + * Simple doubly linked list implementation. + * + * Some of the internal functions ("__xxx") are useful when + * manipulating whole lists rather than single entries, as + * sometimes we already know the next/prev entries and we can + * generate better code by using them directly rather than + * using the generic single-entry routines. + */ + +#define LIST_HEAD_INIT(name) { &(name), &(name) } + +#define LIST_HEAD(name) \ + struct list_head name = LIST_HEAD_INIT(name) + +static inline void INIT_LIST_HEAD(struct list_head *list) +{ + WRITE_ONCE(list->next, list); + list->prev = list; +} + +#ifdef CONFIG_DEBUG_LIST +extern bool __list_add_valid(struct list_head *new, + struct list_head *prev, + struct list_head *next); +extern bool __list_del_entry_valid(struct list_head *entry); +#else +static inline bool __list_add_valid(struct list_head *new, + struct list_head *prev, + struct list_head *next) +{ + return true; +} +static inline bool __list_del_entry_valid(struct list_head *entry) +{ + return true; +} +#endif + +/* + * Insert a new entry between two known consecutive entries. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +static inline void __list_add(struct list_head *new, + struct list_head *prev, + struct list_head *next) +{ + if (!__list_add_valid(new, prev, next)) + return; + + next->prev = new; + new->next = next; + new->prev = prev; + WRITE_ONCE(prev->next, new); +} + +/** + * list_add - add a new entry + * @new: new entry to be added + * @head: list head to add it after + * + * Insert a new entry after the specified head. + * This is good for implementing stacks. + */ +static inline void list_add(struct list_head *new, struct list_head *head) +{ + __list_add(new, head, head->next); +} + + +/** + * list_add_tail - add a new entry + * @new: new entry to be added + * @head: list head to add it before + * + * Insert a new entry before the specified head. + * This is useful for implementing queues. + */ +static inline void list_add_tail(struct list_head *new, struct list_head *head) +{ + __list_add(new, head->prev, head); +} + +/* + * Delete a list entry by making the prev/next entries + * point to each other. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +static inline void __list_del(struct list_head * prev, struct list_head * next) +{ + next->prev = prev; + WRITE_ONCE(prev->next, next); +} + +/** + * list_del - deletes entry from list. + * @entry: the element to delete from the list. + * Note: list_empty() on entry does not return true after this, the entry is + * in an undefined state. + */ +static inline void __list_del_entry(struct list_head *entry) +{ + if (!__list_del_entry_valid(entry)) + return; + + __list_del(entry->prev, entry->next); +} + +static inline void list_del(struct list_head *entry) +{ + __list_del_entry(entry); + entry->next = LIST_POISON1; + entry->prev = LIST_POISON2; +} + +/** + * list_replace - replace old entry by new one + * @old : the element to be replaced + * @new : the new element to insert + * + * If @old was empty, it will be overwritten. + */ +static inline void list_replace(struct list_head *old, + struct list_head *new) +{ + new->next = old->next; + new->next->prev = new; + new->prev = old->prev; + new->prev->next = new; +} + +static inline void list_replace_init(struct list_head *old, + struct list_head *new) +{ + list_replace(old, new); + INIT_LIST_HEAD(old); +} + +/** + * list_del_init - deletes entry from list and reinitialize it. + * @entry: the element to delete from the list. + */ +static inline void list_del_init(struct list_head *entry) +{ + __list_del_entry(entry); + INIT_LIST_HEAD(entry); +} + +/** + * list_move - delete from one list and add as another's head + * @list: the entry to move + * @head: the head that will precede our entry + */ +static inline void list_move(struct list_head *list, struct list_head *head) +{ + __list_del_entry(list); + list_add(list, head); +} + +/** + * list_move_tail - delete from one list and add as another's tail + * @list: the entry to move + * @head: the head that will follow our entry + */ +static inline void list_move_tail(struct list_head *list, + struct list_head *head) +{ + __list_del_entry(list); + list_add_tail(list, head); +} + +/** + * list_is_last - tests whether @list is the last entry in list @head + * @list: the entry to test + * @head: the head of the list + */ +static inline int list_is_last(const struct list_head *list, + const struct list_head *head) +{ + return list->next == head; +} + +/** + * list_empty - tests whether a list is empty + * @head: the list to test. + */ +static inline int list_empty(const struct list_head *head) +{ + return READ_ONCE(head->next) == head; +} + +/** + * list_empty_careful - tests whether a list is empty and not being modified + * @head: the list to test + * + * Description: + * tests whether a list is empty _and_ checks that no other CPU might be + * in the process of modifying either member (next or prev) + * + * NOTE: using list_empty_careful() without synchronization + * can only be safe if the only activity that can happen + * to the list entry is list_del_init(). Eg. it cannot be used + * if another CPU could re-list_add() it. + */ +static inline int list_empty_careful(const struct list_head *head) +{ + struct list_head *next = head->next; + return (next == head) && (next == head->prev); +} + +/** + * list_rotate_left - rotate the list to the left + * @head: the head of the list + */ +static inline void list_rotate_left(struct list_head *head) +{ + struct list_head *first; + + if (!list_empty(head)) { + first = head->next; + list_move_tail(first, head); + } +} + +/** + * list_is_singular - tests whether a list has just one entry. + * @head: the list to test. + */ +static inline int list_is_singular(const struct list_head *head) +{ + return !list_empty(head) && (head->next == head->prev); +} + +static inline void __list_cut_position(struct list_head *list, + struct list_head *head, struct list_head *entry) +{ + struct list_head *new_first = entry->next; + list->next = head->next; + list->next->prev = list; + list->prev = entry; + entry->next = list; + head->next = new_first; + new_first->prev = head; +} + +/** + * list_cut_position - cut a list into two + * @list: a new list to add all removed entries + * @head: a list with entries + * @entry: an entry within head, could be the head itself + * and if so we won't cut the list + * + * This helper moves the initial part of @head, up to and + * including @entry, from @head to @list. You should + * pass on @entry an element you know is on @head. @list + * should be an empty list or a list you do not care about + * losing its data. + * + */ +static inline void list_cut_position(struct list_head *list, + struct list_head *head, struct list_head *entry) +{ + if (list_empty(head)) + return; + if (list_is_singular(head) && + (head->next != entry && head != entry)) + return; + if (entry == head) + INIT_LIST_HEAD(list); + else + __list_cut_position(list, head, entry); +} + +/** + * list_cut_before - cut a list into two, before given entry + * @list: a new list to add all removed entries + * @head: a list with entries + * @entry: an entry within head, could be the head itself + * + * This helper moves the initial part of @head, up to but + * excluding @entry, from @head to @list. You should pass + * in @entry an element you know is on @head. @list should + * be an empty list or a list you do not care about losing + * its data. + * If @entry == @head, all entries on @head are moved to + * @list. + */ +static inline void list_cut_before(struct list_head *list, + struct list_head *head, + struct list_head *entry) +{ + if (head->next == entry) { + INIT_LIST_HEAD(list); + return; + } + list->next = head->next; + list->next->prev = list; + list->prev = entry->prev; + list->prev->next = list; + head->next = entry; + entry->prev = head; +} + +static inline void __list_splice(const struct list_head *list, + struct list_head *prev, + struct list_head *next) +{ + struct list_head *first = list->next; + struct list_head *last = list->prev; + + first->prev = prev; + prev->next = first; + + last->next = next; + next->prev = last; +} + +/** + * list_splice - join two lists, this is designed for stacks + * @list: the new list to add. + * @head: the place to add it in the first list. + */ +static inline void list_splice(const struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) + __list_splice(list, head, head->next); +} + +/** + * list_splice_tail - join two lists, each list being a queue + * @list: the new list to add. + * @head: the place to add it in the first list. + */ +static inline void list_splice_tail(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) + __list_splice(list, head->prev, head); +} + +/** + * list_splice_init - join two lists and reinitialise the emptied list. + * @list: the new list to add. + * @head: the place to add it in the first list. + * + * The list at @list is reinitialised + */ +static inline void list_splice_init(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) { + __list_splice(list, head, head->next); + INIT_LIST_HEAD(list); + } +} + +/** + * list_splice_tail_init - join two lists and reinitialise the emptied list + * @list: the new list to add. + * @head: the place to add it in the first list. + * + * Each of the lists is a queue. + * The list at @list is reinitialised + */ +static inline void list_splice_tail_init(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) { + __list_splice(list, head->prev, head); + INIT_LIST_HEAD(list); + } +} + +/** + * list_entry - get the struct for this entry + * @ptr: the &struct list_head pointer. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_head within the struct. + */ +#define list_entry(ptr, type, member) \ + container_of(ptr, type, member) + +/** + * list_first_entry - get the first element from a list + * @ptr: the list head to take the element from. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_head within the struct. + * + * Note, that list is expected to be not empty. + */ +#define list_first_entry(ptr, type, member) \ + list_entry((ptr)->next, type, member) + +/** + * list_last_entry - get the last element from a list + * @ptr: the list head to take the element from. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_head within the struct. + * + * Note, that list is expected to be not empty. + */ +#define list_last_entry(ptr, type, member) \ + list_entry((ptr)->prev, type, member) + +/** + * list_first_entry_or_null - get the first element from a list + * @ptr: the list head to take the element from. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_head within the struct. + * + * Note that if the list is empty, it returns NULL. + */ +#define list_first_entry_or_null(ptr, type, member) ({ \ + struct list_head *head__ = (ptr); \ + struct list_head *pos__ = READ_ONCE(head__->next); \ + pos__ != head__ ? list_entry(pos__, type, member) : NULL; \ +}) + +/** + * list_next_entry - get the next element in list + * @pos: the type * to cursor + * @member: the name of the list_head within the struct. + */ +#define list_next_entry(pos, member) \ + list_entry((pos)->member.next, typeof(*(pos)), member) + +/** + * list_prev_entry - get the prev element in list + * @pos: the type * to cursor + * @member: the name of the list_head within the struct. + */ +#define list_prev_entry(pos, member) \ + list_entry((pos)->member.prev, typeof(*(pos)), member) + +/** + * list_for_each - iterate over a list + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + */ +#define list_for_each(pos, head) \ + for (pos = (head)->next; pos != (head); pos = pos->next) + +/** + * list_for_each_prev - iterate over a list backwards + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + */ +#define list_for_each_prev(pos, head) \ + for (pos = (head)->prev; pos != (head); pos = pos->prev) + +/** + * list_for_each_safe - iterate over a list safe against removal of list entry + * @pos: the &struct list_head to use as a loop cursor. + * @n: another &struct list_head to use as temporary storage + * @head: the head for your list. + */ +#define list_for_each_safe(pos, n, head) \ + for (pos = (head)->next, n = pos->next; pos != (head); \ + pos = n, n = pos->next) + +/** + * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry + * @pos: the &struct list_head to use as a loop cursor. + * @n: another &struct list_head to use as temporary storage + * @head: the head for your list. + */ +#define list_for_each_prev_safe(pos, n, head) \ + for (pos = (head)->prev, n = pos->prev; \ + pos != (head); \ + pos = n, n = pos->prev) + +/** + * list_entry_is_head - test if the entry points to the head of the list + * @pos: the type * to cursor + * @head: the head for your list. + * @member: the name of the list_head within the struct. + */ +#define list_entry_is_head(pos, head, member) \ + (&pos->member == (head)) + +/** + * list_for_each_entry - iterate over list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_head within the struct. + */ +#define list_for_each_entry(pos, head, member) \ + for (pos = list_first_entry(head, typeof(*pos), member); \ + !list_entry_is_head(pos, head, member); \ + pos = list_next_entry(pos, member)) + +/** + * list_for_each_entry_reverse - iterate backwards over list of given type. + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_head within the struct. + */ +#define list_for_each_entry_reverse(pos, head, member) \ + for (pos = list_last_entry(head, typeof(*pos), member); \ + !list_entry_is_head(pos, head, member); \ + pos = list_prev_entry(pos, member)) + +/** + * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() + * @pos: the type * to use as a start point + * @head: the head of the list + * @member: the name of the list_head within the struct. + * + * Prepares a pos entry for use as a start point in list_for_each_entry_continue(). + */ +#define list_prepare_entry(pos, head, member) \ + ((pos) ? : list_entry(head, typeof(*pos), member)) + +/** + * list_for_each_entry_continue - continue iteration over list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_head within the struct. + * + * Continue to iterate over list of given type, continuing after + * the current position. + */ +#define list_for_each_entry_continue(pos, head, member) \ + for (pos = list_next_entry(pos, member); \ + !list_entry_is_head(pos, head, member); \ + pos = list_next_entry(pos, member)) + +/** + * list_for_each_entry_continue_reverse - iterate backwards from the given point + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_head within the struct. + * + * Start to iterate over list of given type backwards, continuing after + * the current position. + */ +#define list_for_each_entry_continue_reverse(pos, head, member) \ + for (pos = list_prev_entry(pos, member); \ + !list_entry_is_head(pos, head, member); \ + pos = list_prev_entry(pos, member)) + +/** + * list_for_each_entry_from - iterate over list of given type from the current point + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_head within the struct. + * + * Iterate over list of given type, continuing from current position. + */ +#define list_for_each_entry_from(pos, head, member) \ + for (; !list_entry_is_head(pos, head, member); \ + pos = list_next_entry(pos, member)) + +/** + * list_for_each_entry_from_reverse - iterate backwards over list of given type + * from the current point + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_head within the struct. + * + * Iterate backwards over list of given type, continuing from current position. + */ +#define list_for_each_entry_from_reverse(pos, head, member) \ + for (; !list_entry_is_head(pos, head, member); \ + pos = list_prev_entry(pos, member)) + +/** + * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_head within the struct. + */ +#define list_for_each_entry_safe(pos, n, head, member) \ + for (pos = list_first_entry(head, typeof(*pos), member), \ + n = list_next_entry(pos, member); \ + !list_entry_is_head(pos, head, member); \ + pos = n, n = list_next_entry(n, member)) + +/** + * list_for_each_entry_safe_continue - continue list iteration safe against removal + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_head within the struct. + * + * Iterate over list of given type, continuing after current point, + * safe against removal of list entry. + */ +#define list_for_each_entry_safe_continue(pos, n, head, member) \ + for (pos = list_next_entry(pos, member), \ + n = list_next_entry(pos, member); \ + !list_entry_is_head(pos, head, member); \ + pos = n, n = list_next_entry(n, member)) + +/** + * list_for_each_entry_safe_from - iterate over list from current point safe against removal + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_head within the struct. + * + * Iterate over list of given type from current point, safe against + * removal of list entry. + */ +#define list_for_each_entry_safe_from(pos, n, head, member) \ + for (n = list_next_entry(pos, member); \ + !list_entry_is_head(pos, head, member); \ + pos = n, n = list_next_entry(n, member)) + +/** + * list_for_each_entry_safe_reverse - iterate backwards over list safe against removal + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_head within the struct. + * + * Iterate backwards over list of given type, safe against removal + * of list entry. + */ +#define list_for_each_entry_safe_reverse(pos, n, head, member) \ + for (pos = list_last_entry(head, typeof(*pos), member), \ + n = list_prev_entry(pos, member); \ + !list_entry_is_head(pos, head, member); \ + pos = n, n = list_prev_entry(n, member)) + +/** + * list_safe_reset_next - reset a stale list_for_each_entry_safe loop + * @pos: the loop cursor used in the list_for_each_entry_safe loop + * @n: temporary storage used in list_for_each_entry_safe + * @member: the name of the list_head within the struct. + * + * list_safe_reset_next is not safe to use in general if the list may be + * modified concurrently (eg. the lock is dropped in the loop body). An + * exception to this is if the cursor element (pos) is pinned in the list, + * and list_safe_reset_next is called after re-taking the lock and before + * completing the current iteration of the loop body. + */ +#define list_safe_reset_next(pos, n, member) \ + n = list_next_entry(pos, member) + +/* + * Double linked lists with a single pointer list head. + * Mostly useful for hash tables where the two pointer list head is + * too wasteful. + * You lose the ability to access the tail in O(1). + */ + +#define HLIST_HEAD_INIT { .first = NULL } +#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } +#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) +static inline void INIT_HLIST_NODE(struct hlist_node *h) +{ + h->next = NULL; + h->pprev = NULL; +} + +static inline int hlist_unhashed(const struct hlist_node *h) +{ + return !h->pprev; +} + +static inline int hlist_empty(const struct hlist_head *h) +{ + return !READ_ONCE(h->first); +} + +static inline void __hlist_del(struct hlist_node *n) +{ + struct hlist_node *next = n->next; + struct hlist_node **pprev = n->pprev; + + WRITE_ONCE(*pprev, next); + if (next) + next->pprev = pprev; +} + +static inline void hlist_del(struct hlist_node *n) +{ + __hlist_del(n); + n->next = LIST_POISON1; + n->pprev = LIST_POISON2; +} + +static inline void hlist_del_init(struct hlist_node *n) +{ + if (!hlist_unhashed(n)) { + __hlist_del(n); + INIT_HLIST_NODE(n); + } +} + +static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) +{ + struct hlist_node *first = h->first; + n->next = first; + if (first) + first->pprev = &n->next; + WRITE_ONCE(h->first, n); + n->pprev = &h->first; +} + +/* next must be != NULL */ +static inline void hlist_add_before(struct hlist_node *n, + struct hlist_node *next) +{ + n->pprev = next->pprev; + n->next = next; + next->pprev = &n->next; + WRITE_ONCE(*(n->pprev), n); +} + +static inline void hlist_add_behind(struct hlist_node *n, + struct hlist_node *prev) +{ + n->next = prev->next; + WRITE_ONCE(prev->next, n); + n->pprev = &prev->next; + + if (n->next) + n->next->pprev = &n->next; +} + +/* after that we'll appear to be on some hlist and hlist_del will work */ +static inline void hlist_add_fake(struct hlist_node *n) +{ + n->pprev = &n->next; +} + +static inline bool hlist_fake(struct hlist_node *h) +{ + return h->pprev == &h->next; +} + +/* + * Check whether the node is the only node of the head without + * accessing head: + */ +static inline bool +hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h) +{ + return !n->next && n->pprev == &h->first; +} + +/* + * Move a list from one list head to another. Fixup the pprev + * reference of the first entry if it exists. + */ +static inline void hlist_move_list(struct hlist_head *old, + struct hlist_head *new) +{ + new->first = old->first; + if (new->first) + new->first->pprev = &new->first; + old->first = NULL; +} + +#define hlist_entry(ptr, type, member) container_of(ptr,type,member) + +#define hlist_for_each(pos, head) \ + for (pos = (head)->first; pos ; pos = pos->next) + +#define hlist_for_each_safe(pos, n, head) \ + for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ + pos = n) + +#define hlist_entry_safe(ptr, type, member) \ + ({ typeof(ptr) ____ptr = (ptr); \ + ____ptr ? hlist_entry(____ptr, type, member) : NULL; \ + }) + +/** + * hlist_for_each_entry - iterate over list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry(pos, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +/** + * hlist_for_each_entry_continue - iterate over a hlist continuing after current point + * @pos: the type * to use as a loop cursor. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_continue(pos, member) \ + for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +/** + * hlist_for_each_entry_from - iterate over a hlist continuing from current point + * @pos: the type * to use as a loop cursor. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_from(pos, member) \ + for (; pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +/** + * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry + * @pos: the type * to use as a loop cursor. + * @n: another &struct hlist_node to use as temporary storage + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_safe(pos, n, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\ + pos && ({ n = pos->member.next; 1; }); \ + pos = hlist_entry_safe(n, typeof(*pos), member)) + +#endif diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h new file mode 100644 index 000000000..3fc2cc57b --- /dev/null +++ b/include/linux/list_bl.h @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_LIST_BL_H +#define _LINUX_LIST_BL_H + +#include +#include + +/* + * Special version of lists, where head of the list has a lock in the lowest + * bit. This is useful for scalable hash tables without increasing memory + * footprint overhead. + * + * For modification operations, the 0 bit of hlist_bl_head->first + * pointer must be set. + * + * With some small modifications, this can easily be adapted to store several + * arbitrary bits (not just a single lock bit), if the need arises to store + * some fast and compact auxiliary data. + */ + +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) +#define LIST_BL_LOCKMASK 1UL +#else +#define LIST_BL_LOCKMASK 0UL +#endif + +#ifdef CONFIG_DEBUG_LIST +#define LIST_BL_BUG_ON(x) BUG_ON(x) +#else +#define LIST_BL_BUG_ON(x) +#endif + + +struct hlist_bl_head { + struct hlist_bl_node *first; +}; + +struct hlist_bl_node { + struct hlist_bl_node *next, **pprev; +}; +#define INIT_HLIST_BL_HEAD(ptr) \ + ((ptr)->first = NULL) + +static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) +{ + h->next = NULL; + h->pprev = NULL; +} + +#define hlist_bl_entry(ptr, type, member) container_of(ptr,type,member) + +static inline bool hlist_bl_unhashed(const struct hlist_bl_node *h) +{ + return !h->pprev; +} + +static inline struct hlist_bl_node *hlist_bl_first(struct hlist_bl_head *h) +{ + return (struct hlist_bl_node *) + ((unsigned long)h->first & ~LIST_BL_LOCKMASK); +} + +static inline void hlist_bl_set_first(struct hlist_bl_head *h, + struct hlist_bl_node *n) +{ + LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK); + LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) != + LIST_BL_LOCKMASK); + h->first = (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK); +} + +static inline bool hlist_bl_empty(const struct hlist_bl_head *h) +{ + return !((unsigned long)READ_ONCE(h->first) & ~LIST_BL_LOCKMASK); +} + +static inline void hlist_bl_add_head(struct hlist_bl_node *n, + struct hlist_bl_head *h) +{ + struct hlist_bl_node *first = hlist_bl_first(h); + + n->next = first; + if (first) + first->pprev = &n->next; + n->pprev = &h->first; + hlist_bl_set_first(h, n); +} + +static inline void __hlist_bl_del(struct hlist_bl_node *n) +{ + struct hlist_bl_node *next = n->next; + struct hlist_bl_node **pprev = n->pprev; + + LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK); + + /* pprev may be `first`, so be careful not to lose the lock bit */ + WRITE_ONCE(*pprev, + (struct hlist_bl_node *) + ((unsigned long)next | + ((unsigned long)*pprev & LIST_BL_LOCKMASK))); + if (next) + next->pprev = pprev; +} + +static inline void hlist_bl_del(struct hlist_bl_node *n) +{ + __hlist_bl_del(n); + n->next = LIST_POISON1; + n->pprev = LIST_POISON2; +} + +static inline void hlist_bl_del_init(struct hlist_bl_node *n) +{ + if (!hlist_bl_unhashed(n)) { + __hlist_bl_del(n); + INIT_HLIST_BL_NODE(n); + } +} + +static inline void hlist_bl_lock(struct hlist_bl_head *b) +{ + bit_spin_lock(0, (unsigned long *)b); +} + +static inline void hlist_bl_unlock(struct hlist_bl_head *b) +{ + __bit_spin_unlock(0, (unsigned long *)b); +} + +static inline bool hlist_bl_is_locked(struct hlist_bl_head *b) +{ + return bit_spin_is_locked(0, (unsigned long *)b); +} + +/** + * hlist_bl_for_each_entry - iterate over list of given type + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + * + */ +#define hlist_bl_for_each_entry(tpos, pos, head, member) \ + for (pos = hlist_bl_first(head); \ + pos && \ + ({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +/** + * hlist_bl_for_each_entry_safe - iterate over list of given type safe against removal of list entry + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @n: another &struct hlist_node to use as temporary storage + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_bl_for_each_entry_safe(tpos, pos, n, head, member) \ + for (pos = hlist_bl_first(head); \ + pos && ({ n = pos->next; 1; }) && \ + ({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1;}); \ + pos = n) + +#endif diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h new file mode 100644 index 000000000..d5ceb2839 --- /dev/null +++ b/include/linux/list_lru.h @@ -0,0 +1,222 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved. + * Authors: David Chinner and Glauber Costa + * + * Generic LRU infrastructure + */ +#ifndef _LRU_LIST_H +#define _LRU_LIST_H + +#include +#include +#include + +struct mem_cgroup; + +/* list_lru_walk_cb has to always return one of those */ +enum lru_status { + LRU_REMOVED, /* item removed from list */ + LRU_REMOVED_RETRY, /* item removed, but lock has been + dropped and reacquired */ + LRU_ROTATE, /* item referenced, give another pass */ + LRU_SKIP, /* item cannot be locked, skip */ + LRU_RETRY, /* item not freeable. May drop the lock + internally, but has to return locked. */ +}; + +struct list_lru_one { + struct list_head list; + /* may become negative during memcg reparenting */ + long nr_items; +}; + +struct list_lru_memcg { + struct rcu_head rcu; + /* array of per cgroup lists, indexed by memcg_cache_id */ + struct list_lru_one *lru[0]; +}; + +struct list_lru_node { + /* protects all lists on the node, including per cgroup */ + spinlock_t lock; + /* global list, used for the root cgroup in cgroup aware lrus */ + struct list_lru_one lru; +#ifdef CONFIG_MEMCG_KMEM + /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */ + struct list_lru_memcg __rcu *memcg_lrus; +#endif + long nr_items; +} ____cacheline_aligned_in_smp; + +struct list_lru { + struct list_lru_node *node; +#ifdef CONFIG_MEMCG_KMEM + struct list_head list; + int shrinker_id; + bool memcg_aware; +#endif +}; + +void list_lru_destroy(struct list_lru *lru); +int __list_lru_init(struct list_lru *lru, bool memcg_aware, + struct lock_class_key *key, struct shrinker *shrinker); + +#define list_lru_init(lru) \ + __list_lru_init((lru), false, NULL, NULL) +#define list_lru_init_key(lru, key) \ + __list_lru_init((lru), false, (key), NULL) +#define list_lru_init_memcg(lru, shrinker) \ + __list_lru_init((lru), true, NULL, shrinker) + +int memcg_update_all_list_lrus(int num_memcgs); +void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg); + +/** + * list_lru_add: add an element to the lru list's tail + * @list_lru: the lru pointer + * @item: the item to be added. + * + * If the element is already part of a list, this function returns doing + * nothing. Therefore the caller does not need to keep state about whether or + * not the element already belongs in the list and is allowed to lazy update + * it. Note however that this is valid for *a* list, not *this* list. If + * the caller organize itself in a way that elements can be in more than + * one type of list, it is up to the caller to fully remove the item from + * the previous list (with list_lru_del() for instance) before moving it + * to @list_lru + * + * Return value: true if the list was updated, false otherwise + */ +bool list_lru_add(struct list_lru *lru, struct list_head *item); + +/** + * list_lru_del: delete an element to the lru list + * @list_lru: the lru pointer + * @item: the item to be deleted. + * + * This function works analogously as list_lru_add in terms of list + * manipulation. The comments about an element already pertaining to + * a list are also valid for list_lru_del. + * + * Return value: true if the list was updated, false otherwise + */ +bool list_lru_del(struct list_lru *lru, struct list_head *item); + +/** + * list_lru_count_one: return the number of objects currently held by @lru + * @lru: the lru pointer. + * @nid: the node id to count from. + * @memcg: the cgroup to count from. + * + * Always return a non-negative number, 0 for empty lists. There is no + * guarantee that the list is not updated while the count is being computed. + * Callers that want such a guarantee need to provide an outer lock. + */ +unsigned long list_lru_count_one(struct list_lru *lru, + int nid, struct mem_cgroup *memcg); +unsigned long list_lru_count_node(struct list_lru *lru, int nid); + +static inline unsigned long list_lru_shrink_count(struct list_lru *lru, + struct shrink_control *sc) +{ + return list_lru_count_one(lru, sc->nid, sc->memcg); +} + +static inline unsigned long list_lru_count(struct list_lru *lru) +{ + long count = 0; + int nid; + + for_each_node_state(nid, N_NORMAL_MEMORY) + count += list_lru_count_node(lru, nid); + + return count; +} + +void list_lru_isolate(struct list_lru_one *list, struct list_head *item); +void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item, + struct list_head *head); + +typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item, + struct list_lru_one *list, spinlock_t *lock, void *cb_arg); + +/** + * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items. + * @lru: the lru pointer. + * @nid: the node id to scan from. + * @memcg: the cgroup to scan from. + * @isolate: callback function that is resposible for deciding what to do with + * the item currently being scanned + * @cb_arg: opaque type that will be passed to @isolate + * @nr_to_walk: how many items to scan. + * + * This function will scan all elements in a particular list_lru, calling the + * @isolate callback for each of those items, along with the current list + * spinlock and a caller-provided opaque. The @isolate callback can choose to + * drop the lock internally, but *must* return with the lock held. The callback + * will return an enum lru_status telling the list_lru infrastructure what to + * do with the object being scanned. + * + * Please note that nr_to_walk does not mean how many objects will be freed, + * just how many objects will be scanned. + * + * Return value: the number of objects effectively removed from the LRU. + */ +unsigned long list_lru_walk_one(struct list_lru *lru, + int nid, struct mem_cgroup *memcg, + list_lru_walk_cb isolate, void *cb_arg, + unsigned long *nr_to_walk); +/** + * list_lru_walk_one_irq: walk a list_lru, isolating and disposing freeable items. + * @lru: the lru pointer. + * @nid: the node id to scan from. + * @memcg: the cgroup to scan from. + * @isolate: callback function that is resposible for deciding what to do with + * the item currently being scanned + * @cb_arg: opaque type that will be passed to @isolate + * @nr_to_walk: how many items to scan. + * + * Same as @list_lru_walk_one except that the spinlock is acquired with + * spin_lock_irq(). + */ +unsigned long list_lru_walk_one_irq(struct list_lru *lru, + int nid, struct mem_cgroup *memcg, + list_lru_walk_cb isolate, void *cb_arg, + unsigned long *nr_to_walk); +unsigned long list_lru_walk_node(struct list_lru *lru, int nid, + list_lru_walk_cb isolate, void *cb_arg, + unsigned long *nr_to_walk); + +static inline unsigned long +list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc, + list_lru_walk_cb isolate, void *cb_arg) +{ + return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg, + &sc->nr_to_scan); +} + +static inline unsigned long +list_lru_shrink_walk_irq(struct list_lru *lru, struct shrink_control *sc, + list_lru_walk_cb isolate, void *cb_arg) +{ + return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg, + &sc->nr_to_scan); +} + +static inline unsigned long +list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate, + void *cb_arg, unsigned long nr_to_walk) +{ + long isolated = 0; + int nid; + + for_each_node_state(nid, N_NORMAL_MEMORY) { + isolated += list_lru_walk_node(lru, nid, isolate, + cb_arg, &nr_to_walk); + if (nr_to_walk <= 0) + break; + } + return isolated; +} +#endif /* _LRU_LIST_H */ diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h new file mode 100644 index 000000000..1ecd35664 --- /dev/null +++ b/include/linux/list_nulls.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_LIST_NULLS_H +#define _LINUX_LIST_NULLS_H + +#include +#include + +/* + * Special version of lists, where end of list is not a NULL pointer, + * but a 'nulls' marker, which can have many different values. + * (up to 2^31 different values guaranteed on all platforms) + * + * In the standard hlist, termination of a list is the NULL pointer. + * In this special 'nulls' variant, we use the fact that objects stored in + * a list are aligned on a word (4 or 8 bytes alignment). + * We therefore use the last significant bit of 'ptr' : + * Set to 1 : This is a 'nulls' end-of-list marker (ptr >> 1) + * Set to 0 : This is a pointer to some object (ptr) + */ + +struct hlist_nulls_head { + struct hlist_nulls_node *first; +}; + +struct hlist_nulls_node { + struct hlist_nulls_node *next, **pprev; +}; +#define NULLS_MARKER(value) (1UL | (((long)value) << 1)) +#define INIT_HLIST_NULLS_HEAD(ptr, nulls) \ + ((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls)) + +#define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member) + +#define hlist_nulls_entry_safe(ptr, type, member) \ + ({ typeof(ptr) ____ptr = (ptr); \ + !is_a_nulls(____ptr) ? hlist_nulls_entry(____ptr, type, member) : NULL; \ + }) +/** + * ptr_is_a_nulls - Test if a ptr is a nulls + * @ptr: ptr to be tested + * + */ +static inline int is_a_nulls(const struct hlist_nulls_node *ptr) +{ + return ((unsigned long)ptr & 1); +} + +/** + * get_nulls_value - Get the 'nulls' value of the end of chain + * @ptr: end of chain + * + * Should be called only if is_a_nulls(ptr); + */ +static inline unsigned long get_nulls_value(const struct hlist_nulls_node *ptr) +{ + return ((unsigned long)ptr) >> 1; +} + +static inline int hlist_nulls_unhashed(const struct hlist_nulls_node *h) +{ + return !h->pprev; +} + +static inline int hlist_nulls_empty(const struct hlist_nulls_head *h) +{ + return is_a_nulls(READ_ONCE(h->first)); +} + +static inline void hlist_nulls_add_head(struct hlist_nulls_node *n, + struct hlist_nulls_head *h) +{ + struct hlist_nulls_node *first = h->first; + + n->next = first; + WRITE_ONCE(n->pprev, &h->first); + h->first = n; + if (!is_a_nulls(first)) + WRITE_ONCE(first->pprev, &n->next); +} + +static inline void __hlist_nulls_del(struct hlist_nulls_node *n) +{ + struct hlist_nulls_node *next = n->next; + struct hlist_nulls_node **pprev = n->pprev; + + WRITE_ONCE(*pprev, next); + if (!is_a_nulls(next)) + WRITE_ONCE(next->pprev, pprev); +} + +static inline void hlist_nulls_del(struct hlist_nulls_node *n) +{ + __hlist_nulls_del(n); + WRITE_ONCE(n->pprev, LIST_POISON2); +} + +/** + * hlist_nulls_for_each_entry - iterate over list of given type + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + * + */ +#define hlist_nulls_for_each_entry(tpos, pos, head, member) \ + for (pos = (head)->first; \ + (!is_a_nulls(pos)) && \ + ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +/** + * hlist_nulls_for_each_entry_from - iterate over a hlist continuing from current point + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @member: the name of the hlist_node within the struct. + * + */ +#define hlist_nulls_for_each_entry_from(tpos, pos, member) \ + for (; (!is_a_nulls(pos)) && \ + ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +#endif diff --git a/include/linux/list_sort.h b/include/linux/list_sort.h new file mode 100644 index 000000000..ba79956e8 --- /dev/null +++ b/include/linux/list_sort.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_LIST_SORT_H +#define _LINUX_LIST_SORT_H + +#include + +struct list_head; + +void list_sort(void *priv, struct list_head *head, + int (*cmp)(void *priv, struct list_head *a, + struct list_head *b)); +#endif diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h new file mode 100644 index 000000000..aec44b1d9 --- /dev/null +++ b/include/linux/livepatch.h @@ -0,0 +1,214 @@ +/* + * livepatch.h - Kernel Live Patching Core + * + * Copyright (C) 2014 Seth Jennings + * Copyright (C) 2014 SUSE + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef _LINUX_LIVEPATCH_H_ +#define _LINUX_LIVEPATCH_H_ + +#include +#include +#include + +#if IS_ENABLED(CONFIG_LIVEPATCH) + +#include + +/* task patch states */ +#define KLP_UNDEFINED -1 +#define KLP_UNPATCHED 0 +#define KLP_PATCHED 1 + +/** + * struct klp_func - function structure for live patching + * @old_name: name of the function to be patched + * @new_func: pointer to the patched function code + * @old_sympos: a hint indicating which symbol position the old function + * can be found (optional) + * @old_addr: the address of the function being patched + * @kobj: kobject for sysfs resources + * @stack_node: list node for klp_ops func_stack list + * @old_size: size of the old function + * @new_size: size of the new function + * @patched: the func has been added to the klp_ops list + * @transition: the func is currently being applied or reverted + * + * The patched and transition variables define the func's patching state. When + * patching, a func is always in one of the following states: + * + * patched=0 transition=0: unpatched + * patched=0 transition=1: unpatched, temporary starting state + * patched=1 transition=1: patched, may be visible to some tasks + * patched=1 transition=0: patched, visible to all tasks + * + * And when unpatching, it goes in the reverse order: + * + * patched=1 transition=0: patched, visible to all tasks + * patched=1 transition=1: patched, may be visible to some tasks + * patched=0 transition=1: unpatched, temporary ending state + * patched=0 transition=0: unpatched + */ +struct klp_func { + /* external */ + const char *old_name; + void *new_func; + /* + * The old_sympos field is optional and can be used to resolve + * duplicate symbol names in livepatch objects. If this field is zero, + * it is expected the symbol is unique, otherwise patching fails. If + * this value is greater than zero then that occurrence of the symbol + * in kallsyms for the given object is used. + */ + unsigned long old_sympos; + + /* internal */ + unsigned long old_addr; + struct kobject kobj; + struct list_head stack_node; + unsigned long old_size, new_size; + bool patched; + bool transition; +}; + +struct klp_object; + +/** + * struct klp_callbacks - pre/post live-(un)patch callback structure + * @pre_patch: executed before code patching + * @post_patch: executed after code patching + * @pre_unpatch: executed before code unpatching + * @post_unpatch: executed after code unpatching + * @post_unpatch_enabled: flag indicating if post-unpatch callback + * should run + * + * All callbacks are optional. Only the pre-patch callback, if provided, + * will be unconditionally executed. If the parent klp_object fails to + * patch for any reason, including a non-zero error status returned from + * the pre-patch callback, no further callbacks will be executed. + */ +struct klp_callbacks { + int (*pre_patch)(struct klp_object *obj); + void (*post_patch)(struct klp_object *obj); + void (*pre_unpatch)(struct klp_object *obj); + void (*post_unpatch)(struct klp_object *obj); + bool post_unpatch_enabled; +}; + +/** + * struct klp_object - kernel object structure for live patching + * @name: module name (or NULL for vmlinux) + * @funcs: function entries for functions to be patched in the object + * @callbacks: functions to be executed pre/post (un)patching + * @kobj: kobject for sysfs resources + * @mod: kernel module associated with the patched object + * (NULL for vmlinux) + * @patched: the object's funcs have been added to the klp_ops list + */ +struct klp_object { + /* external */ + const char *name; + struct klp_func *funcs; + struct klp_callbacks callbacks; + + /* internal */ + struct kobject kobj; + struct module *mod; + bool patched; +}; + +/** + * struct klp_patch - patch structure for live patching + * @mod: reference to the live patch module + * @objs: object entries for kernel objects to be patched + * @list: list node for global list of registered patches + * @kobj: kobject for sysfs resources + * @enabled: the patch is enabled (but operation may be incomplete) + * @finish: for waiting till it is safe to remove the patch module + */ +struct klp_patch { + /* external */ + struct module *mod; + struct klp_object *objs; + + /* internal */ + struct list_head list; + struct kobject kobj; + bool enabled; + struct completion finish; +}; + +#define klp_for_each_object(patch, obj) \ + for (obj = patch->objs; obj->funcs || obj->name; obj++) + +#define klp_for_each_func(obj, func) \ + for (func = obj->funcs; \ + func->old_name || func->new_func || func->old_sympos; \ + func++) + +int klp_register_patch(struct klp_patch *); +int klp_unregister_patch(struct klp_patch *); +int klp_enable_patch(struct klp_patch *); +int klp_disable_patch(struct klp_patch *); + +void arch_klp_init_object_loaded(struct klp_patch *patch, + struct klp_object *obj); + +/* Called from the module loader during module coming/going states */ +int klp_module_coming(struct module *mod); +void klp_module_going(struct module *mod); + +void klp_copy_process(struct task_struct *child); +void klp_update_patch_state(struct task_struct *task); + +static inline bool klp_patch_pending(struct task_struct *task) +{ + return test_tsk_thread_flag(task, TIF_PATCH_PENDING); +} + +static inline bool klp_have_reliable_stack(void) +{ + return IS_ENABLED(CONFIG_STACKTRACE) && + IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE); +} + +typedef int (*klp_shadow_ctor_t)(void *obj, + void *shadow_data, + void *ctor_data); +typedef void (*klp_shadow_dtor_t)(void *obj, void *shadow_data); + +void *klp_shadow_get(void *obj, unsigned long id); +void *klp_shadow_alloc(void *obj, unsigned long id, + size_t size, gfp_t gfp_flags, + klp_shadow_ctor_t ctor, void *ctor_data); +void *klp_shadow_get_or_alloc(void *obj, unsigned long id, + size_t size, gfp_t gfp_flags, + klp_shadow_ctor_t ctor, void *ctor_data); +void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor); +void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor); + +#else /* !CONFIG_LIVEPATCH */ + +static inline int klp_module_coming(struct module *mod) { return 0; } +static inline void klp_module_going(struct module *mod) {} +static inline bool klp_patch_pending(struct task_struct *task) { return false; } +static inline void klp_update_patch_state(struct task_struct *task) {} +static inline void klp_copy_process(struct task_struct *child) {} + +#endif /* CONFIG_LIVEPATCH */ + +#endif /* _LINUX_LIVEPATCH_H_ */ diff --git a/include/linux/llc.h b/include/linux/llc.h new file mode 100644 index 000000000..b965314d0 --- /dev/null +++ b/include/linux/llc.h @@ -0,0 +1,23 @@ +/* + * IEEE 802.2 User Interface SAPs for Linux, data structures and indicators. + * + * Copyright (c) 2001 by Jay Schulist + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ +#ifndef __LINUX_LLC_H +#define __LINUX_LLC_H + +#include + +#define LLC_SAP_DYN_START 0xC0 +#define LLC_SAP_DYN_STOP 0xDE +#define LLC_SAP_DYN_TRIES 4 + +#define llc_ui_skb_cb(__skb) ((struct sockaddr_llc *)&((__skb)->cb[0])) +#endif /* __LINUX_LLC_H */ diff --git a/include/linux/llist.h b/include/linux/llist.h new file mode 100644 index 000000000..85abc2915 --- /dev/null +++ b/include/linux/llist.h @@ -0,0 +1,241 @@ +#ifndef LLIST_H +#define LLIST_H +/* + * Lock-less NULL terminated single linked list + * + * Cases where locking is not needed: + * If there are multiple producers and multiple consumers, llist_add can be + * used in producers and llist_del_all can be used in consumers simultaneously + * without locking. Also a single consumer can use llist_del_first while + * multiple producers simultaneously use llist_add, without any locking. + * + * Cases where locking is needed: + * If we have multiple consumers with llist_del_first used in one consumer, and + * llist_del_first or llist_del_all used in other consumers, then a lock is + * needed. This is because llist_del_first depends on list->first->next not + * changing, but without lock protection, there's no way to be sure about that + * if a preemption happens in the middle of the delete operation and on being + * preempted back, the list->first is the same as before causing the cmpxchg in + * llist_del_first to succeed. For example, while a llist_del_first operation + * is in progress in one consumer, then a llist_del_first, llist_add, + * llist_add (or llist_del_all, llist_add, llist_add) sequence in another + * consumer may cause violations. + * + * This can be summarized as follows: + * + * | add | del_first | del_all + * add | - | - | - + * del_first | | L | L + * del_all | | | - + * + * Where, a particular row's operation can happen concurrently with a column's + * operation, with "-" being no lock needed, while "L" being lock is needed. + * + * The list entries deleted via llist_del_all can be traversed with + * traversing function such as llist_for_each etc. But the list + * entries can not be traversed safely before deleted from the list. + * The order of deleted entries is from the newest to the oldest added + * one. If you want to traverse from the oldest to the newest, you + * must reverse the order by yourself before traversing. + * + * The basic atomic operation of this list is cmpxchg on long. On + * architectures that don't have NMI-safe cmpxchg implementation, the + * list can NOT be used in NMI handlers. So code that uses the list in + * an NMI handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. + * + * Copyright 2010,2011 Intel Corp. + * Author: Huang Ying + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation; + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include + +struct llist_head { + struct llist_node *first; +}; + +struct llist_node { + struct llist_node *next; +}; + +#define LLIST_HEAD_INIT(name) { NULL } +#define LLIST_HEAD(name) struct llist_head name = LLIST_HEAD_INIT(name) + +/** + * init_llist_head - initialize lock-less list head + * @head: the head for your lock-less list + */ +static inline void init_llist_head(struct llist_head *list) +{ + list->first = NULL; +} + +/** + * llist_entry - get the struct of this entry + * @ptr: the &struct llist_node pointer. + * @type: the type of the struct this is embedded in. + * @member: the name of the llist_node within the struct. + */ +#define llist_entry(ptr, type, member) \ + container_of(ptr, type, member) + +/** + * member_address_is_nonnull - check whether the member address is not NULL + * @ptr: the object pointer (struct type * that contains the llist_node) + * @member: the name of the llist_node within the struct. + * + * This macro is conceptually the same as + * &ptr->member != NULL + * but it works around the fact that compilers can decide that taking a member + * address is never a NULL pointer. + * + * Real objects that start at a high address and have a member at NULL are + * unlikely to exist, but such pointers may be returned e.g. by the + * container_of() macro. + */ +#define member_address_is_nonnull(ptr, member) \ + ((uintptr_t)(ptr) + offsetof(typeof(*(ptr)), member) != 0) + +/** + * llist_for_each - iterate over some deleted entries of a lock-less list + * @pos: the &struct llist_node to use as a loop cursor + * @node: the first entry of deleted list entries + * + * In general, some entries of the lock-less list can be traversed + * safely only after being deleted from list, so start with an entry + * instead of list head. + * + * If being used on entries deleted from lock-less list directly, the + * traverse order is from the newest to the oldest added entry. If + * you want to traverse from the oldest to the newest, you must + * reverse the order by yourself before traversing. + */ +#define llist_for_each(pos, node) \ + for ((pos) = (node); pos; (pos) = (pos)->next) + +/** + * llist_for_each_safe - iterate over some deleted entries of a lock-less list + * safe against removal of list entry + * @pos: the &struct llist_node to use as a loop cursor + * @n: another &struct llist_node to use as temporary storage + * @node: the first entry of deleted list entries + * + * In general, some entries of the lock-less list can be traversed + * safely only after being deleted from list, so start with an entry + * instead of list head. + * + * If being used on entries deleted from lock-less list directly, the + * traverse order is from the newest to the oldest added entry. If + * you want to traverse from the oldest to the newest, you must + * reverse the order by yourself before traversing. + */ +#define llist_for_each_safe(pos, n, node) \ + for ((pos) = (node); (pos) && ((n) = (pos)->next, true); (pos) = (n)) + +/** + * llist_for_each_entry - iterate over some deleted entries of lock-less list of given type + * @pos: the type * to use as a loop cursor. + * @node: the fist entry of deleted list entries. + * @member: the name of the llist_node with the struct. + * + * In general, some entries of the lock-less list can be traversed + * safely only after being removed from list, so start with an entry + * instead of list head. + * + * If being used on entries deleted from lock-less list directly, the + * traverse order is from the newest to the oldest added entry. If + * you want to traverse from the oldest to the newest, you must + * reverse the order by yourself before traversing. + */ +#define llist_for_each_entry(pos, node, member) \ + for ((pos) = llist_entry((node), typeof(*(pos)), member); \ + member_address_is_nonnull(pos, member); \ + (pos) = llist_entry((pos)->member.next, typeof(*(pos)), member)) + +/** + * llist_for_each_entry_safe - iterate over some deleted entries of lock-less list of given type + * safe against removal of list entry + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @node: the first entry of deleted list entries. + * @member: the name of the llist_node with the struct. + * + * In general, some entries of the lock-less list can be traversed + * safely only after being removed from list, so start with an entry + * instead of list head. + * + * If being used on entries deleted from lock-less list directly, the + * traverse order is from the newest to the oldest added entry. If + * you want to traverse from the oldest to the newest, you must + * reverse the order by yourself before traversing. + */ +#define llist_for_each_entry_safe(pos, n, node, member) \ + for (pos = llist_entry((node), typeof(*pos), member); \ + member_address_is_nonnull(pos, member) && \ + (n = llist_entry(pos->member.next, typeof(*n), member), true); \ + pos = n) + +/** + * llist_empty - tests whether a lock-less list is empty + * @head: the list to test + * + * Not guaranteed to be accurate or up to date. Just a quick way to + * test whether the list is empty without deleting something from the + * list. + */ +static inline bool llist_empty(const struct llist_head *head) +{ + return READ_ONCE(head->first) == NULL; +} + +static inline struct llist_node *llist_next(struct llist_node *node) +{ + return node->next; +} + +extern bool llist_add_batch(struct llist_node *new_first, + struct llist_node *new_last, + struct llist_head *head); +/** + * llist_add - add a new entry + * @new: new entry to be added + * @head: the head for your lock-less list + * + * Returns true if the list was empty prior to adding this entry. + */ +static inline bool llist_add(struct llist_node *new, struct llist_head *head) +{ + return llist_add_batch(new, new, head); +} + +/** + * llist_del_all - delete all entries from lock-less list + * @head: the head of lock-less list to delete all entries + * + * If list is empty, return NULL, otherwise, delete all entries and + * return the pointer to the first entry. The order of entries + * deleted is from the newest to the oldest added one. + */ +static inline struct llist_node *llist_del_all(struct llist_head *head) +{ + return xchg(&head->first, NULL); +} + +extern struct llist_node *llist_del_first(struct llist_head *head); + +struct llist_node *llist_reverse_order(struct llist_node *head); + +#endif /* LLIST_H */ diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h new file mode 100644 index 000000000..053a4ef3d --- /dev/null +++ b/include/linux/lockd/bind.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/lockd/bind.h + * + * This is the part of lockd visible to nfsd and the nfs client. + * + * Copyright (C) 1996, Olaf Kirch + */ + +#ifndef LINUX_LOCKD_BIND_H +#define LINUX_LOCKD_BIND_H + +#include +/* need xdr-encoded error codes too, so... */ +#include +#ifdef CONFIG_LOCKD_V4 +#include +#endif + +/* Dummy declarations */ +struct svc_rqst; +struct rpc_task; + +/* + * This is the set of functions for lockd->nfsd communication + */ +struct nlmsvc_binding { + __be32 (*fopen)(struct svc_rqst *, + struct nfs_fh *, + struct file **); + void (*fclose)(struct file *); +}; + +extern const struct nlmsvc_binding *nlmsvc_ops; + +/* + * Similar to nfs_client_initdata, but without the NFS-specific + * rpc_ops field. + */ +struct nlmclnt_initdata { + const char *hostname; + const struct sockaddr *address; + size_t addrlen; + unsigned short protocol; + u32 nfs_version; + int noresvport; + struct net *net; + const struct nlmclnt_operations *nlmclnt_ops; +}; + +/* + * Functions exported by the lockd module + */ + +extern struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init); +extern void nlmclnt_done(struct nlm_host *host); + +/* + * NLM client operations provide a means to modify RPC processing of NLM + * requests. Callbacks receive a pointer to data passed into the call to + * nlmclnt_proc(). + */ +struct nlmclnt_operations { + /* Called on successful allocation of nlm_rqst, use for allocation or + * reference counting. */ + void (*nlmclnt_alloc_call)(void *); + + /* Called in rpc_task_prepare for unlock. A return value of true + * indicates the callback has put the task to sleep on a waitqueue + * and NLM should not call rpc_call_start(). */ + bool (*nlmclnt_unlock_prepare)(struct rpc_task*, void *); + + /* Called when the nlm_rqst is freed, callbacks should clean up here */ + void (*nlmclnt_release_call)(void *); +}; + +extern int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl, void *data); +extern int lockd_up(struct net *net); +extern void lockd_down(struct net *net); + +#endif /* LINUX_LOCKD_BIND_H */ diff --git a/include/linux/lockd/debug.h b/include/linux/lockd/debug.h new file mode 100644 index 000000000..e536c5798 --- /dev/null +++ b/include/linux/lockd/debug.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/lockd/debug.h + * + * Debugging stuff. + * + * Copyright (C) 1996 Olaf Kirch + */ + +#ifndef LINUX_LOCKD_DEBUG_H +#define LINUX_LOCKD_DEBUG_H + +#ifdef __KERNEL__ + +#include + +/* + * Enable lockd debugging. + * Requires RPC_DEBUG. + */ +#undef ifdebug +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) +# define ifdebug(flag) if (unlikely(nlm_debug & NLMDBG_##flag)) +#else +# define ifdebug(flag) if (0) +#endif + +#endif /* __KERNEL__ */ + +/* + * Debug flags + */ +#define NLMDBG_SVC 0x0001 +#define NLMDBG_CLIENT 0x0002 +#define NLMDBG_CLNTLOCK 0x0004 +#define NLMDBG_SVCLOCK 0x0008 +#define NLMDBG_MONITOR 0x0010 +#define NLMDBG_CLNTSUBS 0x0020 +#define NLMDBG_SVCSUBS 0x0040 +#define NLMDBG_HOSTCACHE 0x0080 +#define NLMDBG_XDR 0x0100 +#define NLMDBG_ALL 0x7fff + +#endif /* LINUX_LOCKD_DEBUG_H */ diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h new file mode 100644 index 000000000..b065ef406 --- /dev/null +++ b/include/linux/lockd/lockd.h @@ -0,0 +1,374 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/lockd/lockd.h + * + * General-purpose lockd include file. + * + * Copyright (C) 1996 Olaf Kirch + */ + +#ifndef LINUX_LOCKD_LOCKD_H +#define LINUX_LOCKD_LOCKD_H + +#ifdef __KERNEL__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_LOCKD_V4 +#include +#endif +#include +#include + +/* + * Version string + */ +#define LOCKD_VERSION "0.5" + +/* + * Default timeout for RPC calls (seconds) + */ +#define LOCKD_DFLT_TIMEO 10 + +/* + * Lockd host handle (used both by the client and server personality). + */ +struct nlm_host { + struct hlist_node h_hash; /* doubly linked list */ + struct sockaddr_storage h_addr; /* peer address */ + size_t h_addrlen; + struct sockaddr_storage h_srcaddr; /* our address (optional) */ + size_t h_srcaddrlen; + struct rpc_clnt *h_rpcclnt; /* RPC client to talk to peer */ + char *h_name; /* remote hostname */ + u32 h_version; /* interface version */ + unsigned short h_proto; /* transport proto */ + unsigned short h_reclaiming : 1, + h_server : 1, /* server side, not client side */ + h_noresvport : 1, + h_inuse : 1; + wait_queue_head_t h_gracewait; /* wait while reclaiming */ + struct rw_semaphore h_rwsem; /* Reboot recovery lock */ + u32 h_state; /* pseudo-state counter */ + u32 h_nsmstate; /* true remote NSM state */ + u32 h_pidcount; /* Pseudopids */ + refcount_t h_count; /* reference count */ + struct mutex h_mutex; /* mutex for pmap binding */ + unsigned long h_nextrebind; /* next portmap call */ + unsigned long h_expires; /* eligible for GC */ + struct list_head h_lockowners; /* Lockowners for the client */ + spinlock_t h_lock; + struct list_head h_granted; /* Locks in GRANTED state */ + struct list_head h_reclaim; /* Locks in RECLAIM state */ + struct nsm_handle *h_nsmhandle; /* NSM status handle */ + char *h_addrbuf; /* address eyecatcher */ + struct net *net; /* host net */ + char nodename[UNX_MAXNODENAME + 1]; + const struct nlmclnt_operations *h_nlmclnt_ops; /* Callback ops for NLM users */ +}; + +/* + * The largest string sm_addrbuf should hold is a full-size IPv6 address + * (no "::" anywhere) with a scope ID. The buffer size is computed to + * hold eight groups of colon-separated four-hex-digit numbers, a + * percent sign, a scope id (at most 32 bits, in decimal), and NUL. + */ +#define NSM_ADDRBUF ((8 * 4 + 7) + (1 + 10) + 1) + +struct nsm_handle { + struct list_head sm_link; + refcount_t sm_count; + char *sm_mon_name; + char *sm_name; + struct sockaddr_storage sm_addr; + size_t sm_addrlen; + unsigned int sm_monitored : 1, + sm_sticky : 1; /* don't unmonitor */ + struct nsm_private sm_priv; + char sm_addrbuf[NSM_ADDRBUF]; +}; + +/* + * Rigorous type checking on sockaddr type conversions + */ +static inline struct sockaddr_in *nlm_addr_in(const struct nlm_host *host) +{ + return (struct sockaddr_in *)&host->h_addr; +} + +static inline struct sockaddr *nlm_addr(const struct nlm_host *host) +{ + return (struct sockaddr *)&host->h_addr; +} + +static inline struct sockaddr_in *nlm_srcaddr_in(const struct nlm_host *host) +{ + return (struct sockaddr_in *)&host->h_srcaddr; +} + +static inline struct sockaddr *nlm_srcaddr(const struct nlm_host *host) +{ + return (struct sockaddr *)&host->h_srcaddr; +} + +/* + * Map an fl_owner_t into a unique 32-bit "pid" + */ +struct nlm_lockowner { + struct list_head list; + refcount_t count; + + struct nlm_host *host; + fl_owner_t owner; + uint32_t pid; +}; + +struct nlm_wait; + +/* + * Memory chunk for NLM client RPC request. + */ +#define NLMCLNT_OHSIZE ((__NEW_UTS_LEN) + 10u) +struct nlm_rqst { + refcount_t a_count; + unsigned int a_flags; /* initial RPC task flags */ + struct nlm_host * a_host; /* host handle */ + struct nlm_args a_args; /* arguments */ + struct nlm_res a_res; /* result */ + struct nlm_block * a_block; + unsigned int a_retries; /* Retry count */ + u8 a_owner[NLMCLNT_OHSIZE]; + void * a_callback_data; /* sent to nlmclnt_operations callbacks */ +}; + +/* + * This struct describes a file held open by lockd on behalf of + * an NFS client. + */ +struct nlm_file { + struct hlist_node f_list; /* linked list */ + struct nfs_fh f_handle; /* NFS file handle */ + struct file * f_file; /* VFS file pointer */ + struct nlm_share * f_shares; /* DOS shares */ + struct list_head f_blocks; /* blocked locks */ + unsigned int f_locks; /* guesstimate # of locks */ + unsigned int f_count; /* reference count */ + struct mutex f_mutex; /* avoid concurrent access */ +}; + +/* + * This is a server block (i.e. a lock requested by some client which + * couldn't be granted because of a conflicting lock). + */ +#define NLM_NEVER (~(unsigned long) 0) +/* timeout on non-blocking call: */ +#define NLM_TIMEOUT (7 * HZ) + +struct nlm_block { + struct kref b_count; /* Reference count */ + struct list_head b_list; /* linked list of all blocks */ + struct list_head b_flist; /* linked list (per file) */ + struct nlm_rqst * b_call; /* RPC args & callback info */ + struct svc_serv * b_daemon; /* NLM service */ + struct nlm_host * b_host; /* host handle for RPC clnt */ + unsigned long b_when; /* next re-xmit */ + unsigned int b_id; /* block id */ + unsigned char b_granted; /* VFS granted lock */ + struct nlm_file * b_file; /* file in question */ + struct cache_req * b_cache_req; /* deferred request handling */ + struct cache_deferred_req * b_deferred_req; + unsigned int b_flags; /* block flags */ +#define B_QUEUED 1 /* lock queued */ +#define B_GOT_CALLBACK 2 /* got lock or conflicting lock */ +#define B_TIMED_OUT 4 /* filesystem too slow to respond */ +}; + +/* + * Global variables + */ +extern const struct rpc_program nlm_program; +extern const struct svc_procedure nlmsvc_procedures[]; +#ifdef CONFIG_LOCKD_V4 +extern const struct svc_procedure nlmsvc_procedures4[]; +#endif +extern int nlmsvc_grace_period; +extern unsigned long nlmsvc_timeout; +extern bool nsm_use_hostnames; +extern u32 nsm_local_state; + +/* + * Lockd client functions + */ +struct nlm_rqst * nlm_alloc_call(struct nlm_host *host); +int nlm_async_call(struct nlm_rqst *, u32, const struct rpc_call_ops *); +int nlm_async_reply(struct nlm_rqst *, u32, const struct rpc_call_ops *); +void nlmclnt_release_call(struct nlm_rqst *); +struct nlm_wait * nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl); +void nlmclnt_finish_block(struct nlm_wait *block); +int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout); +__be32 nlmclnt_grant(const struct sockaddr *addr, + const struct nlm_lock *lock); +void nlmclnt_recovery(struct nlm_host *); +int nlmclnt_reclaim(struct nlm_host *, struct file_lock *, + struct nlm_rqst *); +void nlmclnt_next_cookie(struct nlm_cookie *); + +/* + * Host cache + */ +struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap, + const size_t salen, + const unsigned short protocol, + const u32 version, + const char *hostname, + int noresvport, + struct net *net); +void nlmclnt_release_host(struct nlm_host *); +struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, + const char *hostname, + const size_t hostname_len); +void nlmsvc_release_host(struct nlm_host *); +struct rpc_clnt * nlm_bind_host(struct nlm_host *); +void nlm_rebind_host(struct nlm_host *); +struct nlm_host * nlm_get_host(struct nlm_host *); +void nlm_shutdown_hosts(void); +void nlm_shutdown_hosts_net(struct net *net); +void nlm_host_rebooted(const struct net *net, + const struct nlm_reboot *); + +/* + * Host monitoring + */ +int nsm_monitor(const struct nlm_host *host); +void nsm_unmonitor(const struct nlm_host *host); + +struct nsm_handle *nsm_get_handle(const struct net *net, + const struct sockaddr *sap, + const size_t salen, + const char *hostname, + const size_t hostname_len); +struct nsm_handle *nsm_reboot_lookup(const struct net *net, + const struct nlm_reboot *info); +void nsm_release(struct nsm_handle *nsm); + +/* + * This is used in garbage collection and resource reclaim + * A return value != 0 means destroy the lock/block/share + */ +typedef int (*nlm_host_match_fn_t)(void *cur, struct nlm_host *ref); + +/* + * Server-side lock handling + */ +__be32 nlmsvc_lock(struct svc_rqst *, struct nlm_file *, + struct nlm_host *, struct nlm_lock *, int, + struct nlm_cookie *, int); +__be32 nlmsvc_unlock(struct net *net, struct nlm_file *, struct nlm_lock *); +__be32 nlmsvc_testlock(struct svc_rqst *, struct nlm_file *, + struct nlm_host *, struct nlm_lock *, + struct nlm_lock *, struct nlm_cookie *); +__be32 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *, struct nlm_lock *); +unsigned long nlmsvc_retry_blocked(void); +void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *, + nlm_host_match_fn_t match); +void nlmsvc_grant_reply(struct nlm_cookie *, __be32); +void nlmsvc_release_call(struct nlm_rqst *); + +/* + * File handling for the server personality + */ +__be32 nlm_lookup_file(struct svc_rqst *, struct nlm_file **, + struct nfs_fh *); +void nlm_release_file(struct nlm_file *); +void nlmsvc_mark_resources(struct net *); +void nlmsvc_free_host_resources(struct nlm_host *); +void nlmsvc_invalidate_all(void); + +/* + * Cluster failover support + */ +int nlmsvc_unlock_all_by_sb(struct super_block *sb); +int nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr); + +static inline struct inode *nlmsvc_file_inode(struct nlm_file *file) +{ + return locks_inode(file->f_file); +} + +static inline int __nlm_privileged_request4(const struct sockaddr *sap) +{ + const struct sockaddr_in *sin = (struct sockaddr_in *)sap; + + if (ntohs(sin->sin_port) > 1023) + return 0; + + return ipv4_is_loopback(sin->sin_addr.s_addr); +} + +#if IS_ENABLED(CONFIG_IPV6) +static inline int __nlm_privileged_request6(const struct sockaddr *sap) +{ + const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; + + if (ntohs(sin6->sin6_port) > 1023) + return 0; + + if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED) + return ipv4_is_loopback(sin6->sin6_addr.s6_addr32[3]); + + return ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LOOPBACK; +} +#else /* IS_ENABLED(CONFIG_IPV6) */ +static inline int __nlm_privileged_request6(const struct sockaddr *sap) +{ + return 0; +} +#endif /* IS_ENABLED(CONFIG_IPV6) */ + +/* + * Ensure incoming requests are from local privileged callers. + * + * Return TRUE if sender is local and is connecting via a privileged port; + * otherwise return FALSE. + */ +static inline int nlm_privileged_requester(const struct svc_rqst *rqstp) +{ + const struct sockaddr *sap = svc_addr(rqstp); + + switch (sap->sa_family) { + case AF_INET: + return __nlm_privileged_request4(sap); + case AF_INET6: + return __nlm_privileged_request6(sap); + default: + return 0; + } +} + +/* + * Compare two NLM locks. + * When the second lock is of type F_UNLCK, this acts like a wildcard. + */ +static inline int nlm_compare_locks(const struct file_lock *fl1, + const struct file_lock *fl2) +{ + return locks_inode(fl1->fl_file) == locks_inode(fl2->fl_file) + && fl1->fl_pid == fl2->fl_pid + && fl1->fl_owner == fl2->fl_owner + && fl1->fl_start == fl2->fl_start + && fl1->fl_end == fl2->fl_end + &&(fl1->fl_type == fl2->fl_type || fl2->fl_type == F_UNLCK); +} + +extern const struct lock_manager_operations nlmsvc_lock_operations; + +#endif /* __KERNEL__ */ + +#endif /* LINUX_LOCKD_LOCKD_H */ diff --git a/include/linux/lockd/nlm.h b/include/linux/lockd/nlm.h new file mode 100644 index 000000000..6e343ef76 --- /dev/null +++ b/include/linux/lockd/nlm.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/lockd/nlm.h + * + * Declarations for the Network Lock Manager protocol. + * + * Copyright (C) 1996, Olaf Kirch + */ + +#ifndef LINUX_LOCKD_NLM_H +#define LINUX_LOCKD_NLM_H + + +/* Maximum file offset in file_lock.fl_end */ +# define NLM_OFFSET_MAX ((s32) 0x7fffffff) +# define NLM4_OFFSET_MAX ((s64) ((~(u64)0) >> 1)) + +/* Return states for NLM */ +enum { + NLM_LCK_GRANTED = 0, + NLM_LCK_DENIED = 1, + NLM_LCK_DENIED_NOLOCKS = 2, + NLM_LCK_BLOCKED = 3, + NLM_LCK_DENIED_GRACE_PERIOD = 4, +#ifdef CONFIG_LOCKD_V4 + NLM_DEADLCK = 5, + NLM_ROFS = 6, + NLM_STALE_FH = 7, + NLM_FBIG = 8, + NLM_FAILED = 9, +#endif +}; + +#define NLM_PROGRAM 100021 + +#define NLMPROC_NULL 0 +#define NLMPROC_TEST 1 +#define NLMPROC_LOCK 2 +#define NLMPROC_CANCEL 3 +#define NLMPROC_UNLOCK 4 +#define NLMPROC_GRANTED 5 +#define NLMPROC_TEST_MSG 6 +#define NLMPROC_LOCK_MSG 7 +#define NLMPROC_CANCEL_MSG 8 +#define NLMPROC_UNLOCK_MSG 9 +#define NLMPROC_GRANTED_MSG 10 +#define NLMPROC_TEST_RES 11 +#define NLMPROC_LOCK_RES 12 +#define NLMPROC_CANCEL_RES 13 +#define NLMPROC_UNLOCK_RES 14 +#define NLMPROC_GRANTED_RES 15 +#define NLMPROC_NSM_NOTIFY 16 /* statd callback */ +#define NLMPROC_SHARE 20 +#define NLMPROC_UNSHARE 21 +#define NLMPROC_NM_LOCK 22 +#define NLMPROC_FREE_ALL 23 + +#endif /* LINUX_LOCKD_NLM_H */ diff --git a/include/linux/lockd/share.h b/include/linux/lockd/share.h new file mode 100644 index 000000000..1f18a9faf --- /dev/null +++ b/include/linux/lockd/share.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/lockd/share.h + * + * DOS share management for lockd. + * + * Copyright (C) 1996, Olaf Kirch + */ + +#ifndef LINUX_LOCKD_SHARE_H +#define LINUX_LOCKD_SHARE_H + +/* + * DOS share for a specific file + */ +struct nlm_share { + struct nlm_share * s_next; /* linked list */ + struct nlm_host * s_host; /* client host */ + struct nlm_file * s_file; /* shared file */ + struct xdr_netobj s_owner; /* owner handle */ + u32 s_access; /* access mode */ + u32 s_mode; /* deny mode */ +}; + +__be32 nlmsvc_share_file(struct nlm_host *, struct nlm_file *, + struct nlm_args *); +__be32 nlmsvc_unshare_file(struct nlm_host *, struct nlm_file *, + struct nlm_args *); +void nlmsvc_traverse_shares(struct nlm_host *, struct nlm_file *, + nlm_host_match_fn_t); + +#endif /* LINUX_LOCKD_SHARE_H */ diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h new file mode 100644 index 000000000..7ab9f2643 --- /dev/null +++ b/include/linux/lockd/xdr.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/lockd/xdr.h + * + * XDR types for the NLM protocol + * + * Copyright (C) 1996 Olaf Kirch + */ + +#ifndef LOCKD_XDR_H +#define LOCKD_XDR_H + +#include +#include +#include + +#define SM_MAXSTRLEN 1024 +#define SM_PRIV_SIZE 16 + +struct nsm_private { + unsigned char data[SM_PRIV_SIZE]; +}; + +struct svc_rqst; + +#define NLM_MAXCOOKIELEN 32 +#define NLM_MAXSTRLEN 1024 + +#define nlm_granted cpu_to_be32(NLM_LCK_GRANTED) +#define nlm_lck_denied cpu_to_be32(NLM_LCK_DENIED) +#define nlm_lck_denied_nolocks cpu_to_be32(NLM_LCK_DENIED_NOLOCKS) +#define nlm_lck_blocked cpu_to_be32(NLM_LCK_BLOCKED) +#define nlm_lck_denied_grace_period cpu_to_be32(NLM_LCK_DENIED_GRACE_PERIOD) + +#define nlm_drop_reply cpu_to_be32(30000) + +/* Lock info passed via NLM */ +struct nlm_lock { + char * caller; + unsigned int len; /* length of "caller" */ + struct nfs_fh fh; + struct xdr_netobj oh; + u32 svid; + struct file_lock fl; +}; + +/* + * NLM cookies. Technically they can be 1K, but Linux only uses 8 bytes. + * FreeBSD uses 16, Apple Mac OS X 10.3 uses 20. Therefore we set it to + * 32 bytes. + */ + +struct nlm_cookie +{ + unsigned char data[NLM_MAXCOOKIELEN]; + unsigned int len; +}; + +/* + * Generic lockd arguments for all but sm_notify + */ +struct nlm_args { + struct nlm_cookie cookie; + struct nlm_lock lock; + u32 block; + u32 reclaim; + u32 state; + u32 monitor; + u32 fsm_access; + u32 fsm_mode; +}; + +typedef struct nlm_args nlm_args; + +/* + * Generic lockd result + */ +struct nlm_res { + struct nlm_cookie cookie; + __be32 status; + struct nlm_lock lock; +}; + +/* + * statd callback when client has rebooted + */ +struct nlm_reboot { + char *mon; + unsigned int len; + u32 state; + struct nsm_private priv; +}; + +/* + * Contents of statd callback when monitored host rebooted + */ +#define NLMSVC_XDRSIZE sizeof(struct nlm_args) + +int nlmsvc_decode_testargs(struct svc_rqst *, __be32 *); +int nlmsvc_encode_testres(struct svc_rqst *, __be32 *); +int nlmsvc_decode_lockargs(struct svc_rqst *, __be32 *); +int nlmsvc_decode_cancargs(struct svc_rqst *, __be32 *); +int nlmsvc_decode_unlockargs(struct svc_rqst *, __be32 *); +int nlmsvc_encode_res(struct svc_rqst *, __be32 *); +int nlmsvc_decode_res(struct svc_rqst *, __be32 *); +int nlmsvc_encode_void(struct svc_rqst *, __be32 *); +int nlmsvc_decode_void(struct svc_rqst *, __be32 *); +int nlmsvc_decode_shareargs(struct svc_rqst *, __be32 *); +int nlmsvc_encode_shareres(struct svc_rqst *, __be32 *); +int nlmsvc_decode_notify(struct svc_rqst *, __be32 *); +int nlmsvc_decode_reboot(struct svc_rqst *, __be32 *); +/* +int nlmclt_encode_testargs(struct rpc_rqst *, u32 *, struct nlm_args *); +int nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *); +int nlmclt_encode_cancargs(struct rpc_rqst *, u32 *, struct nlm_args *); +int nlmclt_encode_unlockargs(struct rpc_rqst *, u32 *, struct nlm_args *); + */ + +#endif /* LOCKD_XDR_H */ diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h new file mode 100644 index 000000000..e709fe592 --- /dev/null +++ b/include/linux/lockd/xdr4.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/lockd/xdr4.h + * + * XDR types for the NLM protocol + * + * Copyright (C) 1996 Olaf Kirch + */ + +#ifndef LOCKD_XDR4_H +#define LOCKD_XDR4_H + +#include +#include +#include +#include + +/* error codes new to NLMv4 */ +#define nlm4_deadlock cpu_to_be32(NLM_DEADLCK) +#define nlm4_rofs cpu_to_be32(NLM_ROFS) +#define nlm4_stale_fh cpu_to_be32(NLM_STALE_FH) +#define nlm4_fbig cpu_to_be32(NLM_FBIG) +#define nlm4_failed cpu_to_be32(NLM_FAILED) + + + +int nlm4svc_decode_testargs(struct svc_rqst *, __be32 *); +int nlm4svc_encode_testres(struct svc_rqst *, __be32 *); +int nlm4svc_decode_lockargs(struct svc_rqst *, __be32 *); +int nlm4svc_decode_cancargs(struct svc_rqst *, __be32 *); +int nlm4svc_decode_unlockargs(struct svc_rqst *, __be32 *); +int nlm4svc_encode_res(struct svc_rqst *, __be32 *); +int nlm4svc_decode_res(struct svc_rqst *, __be32 *); +int nlm4svc_encode_void(struct svc_rqst *, __be32 *); +int nlm4svc_decode_void(struct svc_rqst *, __be32 *); +int nlm4svc_decode_shareargs(struct svc_rqst *, __be32 *); +int nlm4svc_encode_shareres(struct svc_rqst *, __be32 *); +int nlm4svc_decode_notify(struct svc_rqst *, __be32 *); +int nlm4svc_decode_reboot(struct svc_rqst *, __be32 *); +/* +int nlmclt_encode_testargs(struct rpc_rqst *, u32 *, struct nlm_args *); +int nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *); +int nlmclt_encode_cancargs(struct rpc_rqst *, u32 *, struct nlm_args *); +int nlmclt_encode_unlockargs(struct rpc_rqst *, u32 *, struct nlm_args *); + */ +extern const struct rpc_version nlm_version4; + +#endif /* LOCKD_XDR4_H */ diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h new file mode 100644 index 000000000..b0d0b51c4 --- /dev/null +++ b/include/linux/lockdep.h @@ -0,0 +1,627 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Runtime locking correctness validator + * + * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra + * + * see Documentation/locking/lockdep-design.txt for more details. + */ +#ifndef __LINUX_LOCKDEP_H +#define __LINUX_LOCKDEP_H + +struct task_struct; +struct lockdep_map; + +/* for sysctl */ +extern int prove_locking; +extern int lock_stat; + +#define MAX_LOCKDEP_SUBCLASSES 8UL + +#include + +#ifdef CONFIG_LOCKDEP + +#include +#include +#include +#include + +/* + * We'd rather not expose kernel/lockdep_states.h this wide, but we do need + * the total number of states... :-( + */ +#define XXX_LOCK_USAGE_STATES (1+2*4) + +/* + * NR_LOCKDEP_CACHING_CLASSES ... Number of classes + * cached in the instance of lockdep_map + * + * Currently main class (subclass == 0) and signle depth subclass + * are cached in lockdep_map. This optimization is mainly targeting + * on rq->lock. double_rq_lock() acquires this highly competitive with + * single depth. + */ +#define NR_LOCKDEP_CACHING_CLASSES 2 + +/* + * Lock-classes are keyed via unique addresses, by embedding the + * lockclass-key into the kernel (or module) .data section. (For + * static locks we use the lock address itself as the key.) + */ +struct lockdep_subclass_key { + char __one_byte; +} __attribute__ ((__packed__)); + +struct lock_class_key { + struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; +}; + +extern struct lock_class_key __lockdep_no_validate__; + +#define LOCKSTAT_POINTS 4 + +/* + * The lock-class itself: + */ +struct lock_class { + /* + * class-hash: + */ + struct hlist_node hash_entry; + + /* + * global list of all lock-classes: + */ + struct list_head lock_entry; + + struct lockdep_subclass_key *key; + unsigned int subclass; + unsigned int dep_gen_id; + + /* + * IRQ/softirq usage tracking bits: + */ + unsigned long usage_mask; + struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; + + /* + * These fields represent a directed graph of lock dependencies, + * to every node we attach a list of "forward" and a list of + * "backward" graph nodes. + */ + struct list_head locks_after, locks_before; + + /* + * Generation counter, when doing certain classes of graph walking, + * to ensure that we check one node only once: + */ + unsigned int version; + + /* + * Statistics counter: + */ + unsigned long ops; + + const char *name; + int name_version; + +#ifdef CONFIG_LOCK_STAT + unsigned long contention_point[LOCKSTAT_POINTS]; + unsigned long contending_point[LOCKSTAT_POINTS]; +#endif +}; + +#ifdef CONFIG_LOCK_STAT +struct lock_time { + s64 min; + s64 max; + s64 total; + unsigned long nr; +}; + +enum bounce_type { + bounce_acquired_write, + bounce_acquired_read, + bounce_contended_write, + bounce_contended_read, + nr_bounce_types, + + bounce_acquired = bounce_acquired_write, + bounce_contended = bounce_contended_write, +}; + +struct lock_class_stats { + unsigned long contention_point[LOCKSTAT_POINTS]; + unsigned long contending_point[LOCKSTAT_POINTS]; + struct lock_time read_waittime; + struct lock_time write_waittime; + struct lock_time read_holdtime; + struct lock_time write_holdtime; + unsigned long bounces[nr_bounce_types]; +}; + +struct lock_class_stats lock_stats(struct lock_class *class); +void clear_lock_stats(struct lock_class *class); +#endif + +/* + * Map the lock object (the lock instance) to the lock-class object. + * This is embedded into specific lock instances: + */ +struct lockdep_map { + struct lock_class_key *key; + struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; + const char *name; +#ifdef CONFIG_LOCK_STAT + int cpu; + unsigned long ip; +#endif +}; + +static inline void lockdep_copy_map(struct lockdep_map *to, + struct lockdep_map *from) +{ + int i; + + *to = *from; + /* + * Since the class cache can be modified concurrently we could observe + * half pointers (64bit arch using 32bit copy insns). Therefore clear + * the caches and take the performance hit. + * + * XXX it doesn't work well with lockdep_set_class_and_subclass(), since + * that relies on cache abuse. + */ + for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) + to->class_cache[i] = NULL; +} + +/* + * Every lock has a list of other locks that were taken after it. + * We only grow the list, never remove from it: + */ +struct lock_list { + struct list_head entry; + struct lock_class *class; + struct stack_trace trace; + int distance; + + /* + * The parent field is used to implement breadth-first search, and the + * bit 0 is reused to indicate if the lock has been accessed in BFS. + */ + struct lock_list *parent; +}; + +/* + * We record lock dependency chains, so that we can cache them: + */ +struct lock_chain { + /* see BUILD_BUG_ON()s in lookup_chain_cache() */ + unsigned int irq_context : 2, + depth : 6, + base : 24; + /* 4 byte hole */ + struct hlist_node entry; + u64 chain_key; +}; + +#define MAX_LOCKDEP_KEYS_BITS 13 +/* + * Subtract one because we offset hlock->class_idx by 1 in order + * to make 0 mean no class. This avoids overflowing the class_idx + * bitfield and hitting the BUG in hlock_class(). + */ +#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1) + +struct held_lock { + /* + * One-way hash of the dependency chain up to this point. We + * hash the hashes step by step as the dependency chain grows. + * + * We use it for dependency-caching and we skip detection + * passes and dependency-updates if there is a cache-hit, so + * it is absolutely critical for 100% coverage of the validator + * to have a unique key value for every unique dependency path + * that can occur in the system, to make a unique hash value + * as likely as possible - hence the 64-bit width. + * + * The task struct holds the current hash value (initialized + * with zero), here we store the previous hash value: + */ + u64 prev_chain_key; + unsigned long acquire_ip; + struct lockdep_map *instance; + struct lockdep_map *nest_lock; +#ifdef CONFIG_LOCK_STAT + u64 waittime_stamp; + u64 holdtime_stamp; +#endif + unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; + /* + * The lock-stack is unified in that the lock chains of interrupt + * contexts nest ontop of process context chains, but we 'separate' + * the hashes by starting with 0 if we cross into an interrupt + * context, and we also keep do not add cross-context lock + * dependencies - the lock usage graph walking covers that area + * anyway, and we'd just unnecessarily increase the number of + * dependencies otherwise. [Note: hardirq and softirq contexts + * are separated from each other too.] + * + * The following field is used to detect when we cross into an + * interrupt context: + */ + unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ + unsigned int trylock:1; /* 16 bits */ + + unsigned int read:2; /* see lock_acquire() comment */ + unsigned int check:1; /* see lock_acquire() comment */ + unsigned int hardirqs_off:1; + unsigned int references:12; /* 32 bits */ + unsigned int pin_count; +}; + +/* + * Initialization, self-test and debugging-output methods: + */ +extern void lockdep_init(void); +extern void lockdep_reset(void); +extern void lockdep_reset_lock(struct lockdep_map *lock); +extern void lockdep_free_key_range(void *start, unsigned long size); +extern asmlinkage void lockdep_sys_exit(void); + +extern void lockdep_off(void); +extern void lockdep_on(void); + +/* + * These methods are used by specific locking variants (spinlocks, + * rwlocks, mutexes and rwsems) to pass init/acquire/release events + * to lockdep: + */ + +extern void lockdep_init_map(struct lockdep_map *lock, const char *name, + struct lock_class_key *key, int subclass); + +/* + * Reinitialize a lock key - for cases where there is special locking or + * special initialization of locks so that the validator gets the scope + * of dependencies wrong: they are either too broad (they need a class-split) + * or they are too narrow (they suffer from a false class-split): + */ +#define lockdep_set_class(lock, key) \ + lockdep_init_map(&(lock)->dep_map, #key, key, 0) +#define lockdep_set_class_and_name(lock, key, name) \ + lockdep_init_map(&(lock)->dep_map, name, key, 0) +#define lockdep_set_class_and_subclass(lock, key, sub) \ + lockdep_init_map(&(lock)->dep_map, #key, key, sub) +#define lockdep_set_subclass(lock, sub) \ + lockdep_init_map(&(lock)->dep_map, #lock, \ + (lock)->dep_map.key, sub) + +#define lockdep_set_novalidate_class(lock) \ + lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) +/* + * Compare locking classes + */ +#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) + +static inline int lockdep_match_key(struct lockdep_map *lock, + struct lock_class_key *key) +{ + return lock->key == key; +} + +/* + * Acquire a lock. + * + * Values for "read": + * + * 0: exclusive (write) acquire + * 1: read-acquire (no recursion allowed) + * 2: read-acquire with same-instance recursion allowed + * + * Values for check: + * + * 0: simple checks (freeing, held-at-exit-time, etc.) + * 1: full validation + */ +extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, + int trylock, int read, int check, + struct lockdep_map *nest_lock, unsigned long ip); + +extern void lock_release(struct lockdep_map *lock, int nested, + unsigned long ip); + +/* + * Same "read" as for lock_acquire(), except -1 means any. + */ +extern int lock_is_held_type(const struct lockdep_map *lock, int read); + +static inline int lock_is_held(const struct lockdep_map *lock) +{ + return lock_is_held_type(lock, -1); +} + +#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) +#define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r)) + +extern void lock_set_class(struct lockdep_map *lock, const char *name, + struct lock_class_key *key, unsigned int subclass, + unsigned long ip); + +static inline void lock_set_subclass(struct lockdep_map *lock, + unsigned int subclass, unsigned long ip) +{ + lock_set_class(lock, lock->name, lock->key, subclass, ip); +} + +extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip); + +struct pin_cookie { unsigned int val; }; + +#define NIL_COOKIE (struct pin_cookie){ .val = 0U, } + +extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock); +extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie); +extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); + +#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) + +#define lockdep_assert_held(l) do { \ + WARN_ON(debug_locks && !lockdep_is_held(l)); \ + } while (0) + +#define lockdep_assert_held_exclusive(l) do { \ + WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \ + } while (0) + +#define lockdep_assert_held_read(l) do { \ + WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \ + } while (0) + +#define lockdep_assert_held_once(l) do { \ + WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ + } while (0) + +#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) + +#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map) +#define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c)) +#define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c)) + +#else /* !CONFIG_LOCKDEP */ + +static inline void lockdep_off(void) +{ +} + +static inline void lockdep_on(void) +{ +} + +# define lock_acquire(l, s, t, r, c, n, i) do { } while (0) +# define lock_release(l, n, i) do { } while (0) +# define lock_downgrade(l, i) do { } while (0) +# define lock_set_class(l, n, k, s, i) do { } while (0) +# define lock_set_subclass(l, s, i) do { } while (0) +# define lockdep_init() do { } while (0) +# define lockdep_init_map(lock, name, key, sub) \ + do { (void)(name); (void)(key); } while (0) +# define lockdep_set_class(lock, key) do { (void)(key); } while (0) +# define lockdep_set_class_and_name(lock, key, name) \ + do { (void)(key); (void)(name); } while (0) +#define lockdep_set_class_and_subclass(lock, key, sub) \ + do { (void)(key); } while (0) +#define lockdep_set_subclass(lock, sub) do { } while (0) + +#define lockdep_set_novalidate_class(lock) do { } while (0) + +/* + * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP + * case since the result is not well defined and the caller should rather + * #ifdef the call himself. + */ + +# define lockdep_reset() do { debug_locks = 1; } while (0) +# define lockdep_free_key_range(start, size) do { } while (0) +# define lockdep_sys_exit() do { } while (0) +/* + * The class key takes no space if lockdep is disabled: + */ +struct lock_class_key { }; + +/* + * The lockdep_map takes no space if lockdep is disabled: + */ +struct lockdep_map { }; + +#define lockdep_depth(tsk) (0) + +#define lockdep_is_held_type(l, r) (1) + +#define lockdep_assert_held(l) do { (void)(l); } while (0) +#define lockdep_assert_held_exclusive(l) do { (void)(l); } while (0) +#define lockdep_assert_held_read(l) do { (void)(l); } while (0) +#define lockdep_assert_held_once(l) do { (void)(l); } while (0) + +#define lockdep_recursing(tsk) (0) + +struct pin_cookie { }; + +#define NIL_COOKIE (struct pin_cookie){ } + +#define lockdep_pin_lock(l) ({ struct pin_cookie cookie; cookie; }) +#define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0) +#define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0) + +#endif /* !LOCKDEP */ + +enum xhlock_context_t { + XHLOCK_HARD, + XHLOCK_SOFT, + XHLOCK_CTX_NR, +}; + +#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0) +/* + * To initialize a lockdep_map statically use this macro. + * Note that _name must not be NULL. + */ +#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ + { .name = (_name), .key = (void *)(_key), } + +static inline void lockdep_invariant_state(bool force) {} +static inline void lockdep_init_task(struct task_struct *task) {} +static inline void lockdep_free_task(struct task_struct *task) {} + +#ifdef CONFIG_LOCK_STAT + +extern void lock_contended(struct lockdep_map *lock, unsigned long ip); +extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); + +#define LOCK_CONTENDED(_lock, try, lock) \ +do { \ + if (!try(_lock)) { \ + lock_contended(&(_lock)->dep_map, _RET_IP_); \ + lock(_lock); \ + } \ + lock_acquired(&(_lock)->dep_map, _RET_IP_); \ +} while (0) + +#define LOCK_CONTENDED_RETURN(_lock, try, lock) \ +({ \ + int ____err = 0; \ + if (!try(_lock)) { \ + lock_contended(&(_lock)->dep_map, _RET_IP_); \ + ____err = lock(_lock); \ + } \ + if (!____err) \ + lock_acquired(&(_lock)->dep_map, _RET_IP_); \ + ____err; \ +}) + +#else /* CONFIG_LOCK_STAT */ + +#define lock_contended(lockdep_map, ip) do {} while (0) +#define lock_acquired(lockdep_map, ip) do {} while (0) + +#define LOCK_CONTENDED(_lock, try, lock) \ + lock(_lock) + +#define LOCK_CONTENDED_RETURN(_lock, try, lock) \ + lock(_lock) + +#endif /* CONFIG_LOCK_STAT */ + +#ifdef CONFIG_LOCKDEP + +/* + * On lockdep we dont want the hand-coded irq-enable of + * _raw_*_lock_flags() code, because lockdep assumes + * that interrupts are not re-enabled during lock-acquire: + */ +#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ + LOCK_CONTENDED((_lock), (try), (lock)) + +#else /* CONFIG_LOCKDEP */ + +#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ + lockfl((_lock), (flags)) + +#endif /* CONFIG_LOCKDEP */ + +#ifdef CONFIG_PROVE_LOCKING +extern void print_irqtrace_events(struct task_struct *curr); +#else +static inline void print_irqtrace_events(struct task_struct *curr) +{ +} +#endif + +/* + * For trivial one-depth nesting of a lock-class, the following + * global define can be used. (Subsystems with multiple levels + * of nesting should define their own lock-nesting subclasses.) + */ +#define SINGLE_DEPTH_NESTING 1 + +/* + * Map the dependency ops to NOP or to real lockdep ops, depending + * on the per lock-class debug mode: + */ + +#define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) +#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i) +#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i) + +#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) +#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) +#define spin_release(l, n, i) lock_release(l, n, i) + +#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) +#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) +#define rwlock_release(l, n, i) lock_release(l, n, i) + +#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) +#define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) +#define seqcount_release(l, n, i) lock_release(l, n, i) + +#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) +#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) +#define mutex_release(l, n, i) lock_release(l, n, i) + +#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) +#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) +#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) +#define rwsem_release(l, n, i) lock_release(l, n, i) + +#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) +#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) +#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) +#define lock_map_release(l) lock_release(l, 1, _THIS_IP_) + +#ifdef CONFIG_PROVE_LOCKING +# define might_lock(lock) \ +do { \ + typecheck(struct lockdep_map *, &(lock)->dep_map); \ + lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \ + lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ +} while (0) +# define might_lock_read(lock) \ +do { \ + typecheck(struct lockdep_map *, &(lock)->dep_map); \ + lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \ + lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ +} while (0) + +#define lockdep_assert_irqs_enabled() do { \ + WARN_ONCE(debug_locks && !current->lockdep_recursion && \ + !current->hardirqs_enabled, \ + "IRQs not enabled as expected\n"); \ + } while (0) + +#define lockdep_assert_irqs_disabled() do { \ + WARN_ONCE(debug_locks && !current->lockdep_recursion && \ + current->hardirqs_enabled, \ + "IRQs not disabled as expected\n"); \ + } while (0) + +#else +# define might_lock(lock) do { } while (0) +# define might_lock_read(lock) do { } while (0) +# define lockdep_assert_irqs_enabled() do { } while (0) +# define lockdep_assert_irqs_disabled() do { } while (0) +#endif + +#ifdef CONFIG_LOCKDEP +void lockdep_rcu_suspicious(const char *file, const int line, const char *s); +#else +static inline void +lockdep_rcu_suspicious(const char *file, const int line, const char *s) +{ +} +#endif + +#endif /* __LINUX_LOCKDEP_H */ diff --git a/include/linux/lockref.h b/include/linux/lockref.h new file mode 100644 index 000000000..99f17cc8e --- /dev/null +++ b/include/linux/lockref.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_LOCKREF_H +#define __LINUX_LOCKREF_H + +/* + * Locked reference counts. + * + * These are different from just plain atomic refcounts in that they + * are atomic with respect to the spinlock that goes with them. In + * particular, there can be implementations that don't actually get + * the spinlock for the common decrement/increment operations, but they + * still have to check that the operation is done semantically as if + * the spinlock had been taken (using a cmpxchg operation that covers + * both the lock and the count word, or using memory transactions, for + * example). + */ + +#include +#include + +#define USE_CMPXCHG_LOCKREF \ + (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \ + IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4) + +struct lockref { + union { +#if USE_CMPXCHG_LOCKREF + aligned_u64 lock_count; +#endif + struct { + spinlock_t lock; + int count; + }; + }; +}; + +extern void lockref_get(struct lockref *); +extern int lockref_put_return(struct lockref *); +extern int lockref_get_not_zero(struct lockref *); +extern int lockref_put_not_zero(struct lockref *); +extern int lockref_get_or_lock(struct lockref *); +extern int lockref_put_or_lock(struct lockref *); + +extern void lockref_mark_dead(struct lockref *); +extern int lockref_get_not_dead(struct lockref *); + +/* Must be called under spinlock for reliable results */ +static inline bool __lockref_is_dead(const struct lockref *l) +{ + return ((int)l->count < 0); +} + +#endif /* __LINUX_LOCKREF_H */ diff --git a/include/linux/log2.h b/include/linux/log2.h new file mode 100644 index 000000000..78496801c --- /dev/null +++ b/include/linux/log2.h @@ -0,0 +1,227 @@ +/* Integer base 2 logarithm calculation + * + * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _LINUX_LOG2_H +#define _LINUX_LOG2_H + +#include +#include + +/* + * non-constant log of base 2 calculators + * - the arch may override these in asm/bitops.h if they can be implemented + * more efficiently than using fls() and fls64() + * - the arch is not required to handle n==0 if implementing the fallback + */ +#ifndef CONFIG_ARCH_HAS_ILOG2_U32 +static inline __attribute__((const)) +int __ilog2_u32(u32 n) +{ + return fls(n) - 1; +} +#endif + +#ifndef CONFIG_ARCH_HAS_ILOG2_U64 +static inline __attribute__((const)) +int __ilog2_u64(u64 n) +{ + return fls64(n) - 1; +} +#endif + +/** + * is_power_of_2() - check if a value is a power of two + * @n: the value to check + * + * Determine whether some value is a power of two, where zero is + * *not* considered a power of two. + * Return: true if @n is a power of 2, otherwise false. + */ +static inline __attribute__((const)) +bool is_power_of_2(unsigned long n) +{ + return (n != 0 && ((n & (n - 1)) == 0)); +} + +/** + * __roundup_pow_of_two() - round up to nearest power of two + * @n: value to round up + */ +static inline __attribute__((const)) +unsigned long __roundup_pow_of_two(unsigned long n) +{ + return 1UL << fls_long(n - 1); +} + +/** + * __rounddown_pow_of_two() - round down to nearest power of two + * @n: value to round down + */ +static inline __attribute__((const)) +unsigned long __rounddown_pow_of_two(unsigned long n) +{ + return 1UL << (fls_long(n) - 1); +} + +/** + * const_ilog2 - log base 2 of 32-bit or a 64-bit constant unsigned value + * @n: parameter + * + * Use this where sparse expects a true constant expression, e.g. for array + * indices. + */ +#define const_ilog2(n) \ +( \ + __builtin_constant_p(n) ? ( \ + (n) < 2 ? 0 : \ + (n) & (1ULL << 63) ? 63 : \ + (n) & (1ULL << 62) ? 62 : \ + (n) & (1ULL << 61) ? 61 : \ + (n) & (1ULL << 60) ? 60 : \ + (n) & (1ULL << 59) ? 59 : \ + (n) & (1ULL << 58) ? 58 : \ + (n) & (1ULL << 57) ? 57 : \ + (n) & (1ULL << 56) ? 56 : \ + (n) & (1ULL << 55) ? 55 : \ + (n) & (1ULL << 54) ? 54 : \ + (n) & (1ULL << 53) ? 53 : \ + (n) & (1ULL << 52) ? 52 : \ + (n) & (1ULL << 51) ? 51 : \ + (n) & (1ULL << 50) ? 50 : \ + (n) & (1ULL << 49) ? 49 : \ + (n) & (1ULL << 48) ? 48 : \ + (n) & (1ULL << 47) ? 47 : \ + (n) & (1ULL << 46) ? 46 : \ + (n) & (1ULL << 45) ? 45 : \ + (n) & (1ULL << 44) ? 44 : \ + (n) & (1ULL << 43) ? 43 : \ + (n) & (1ULL << 42) ? 42 : \ + (n) & (1ULL << 41) ? 41 : \ + (n) & (1ULL << 40) ? 40 : \ + (n) & (1ULL << 39) ? 39 : \ + (n) & (1ULL << 38) ? 38 : \ + (n) & (1ULL << 37) ? 37 : \ + (n) & (1ULL << 36) ? 36 : \ + (n) & (1ULL << 35) ? 35 : \ + (n) & (1ULL << 34) ? 34 : \ + (n) & (1ULL << 33) ? 33 : \ + (n) & (1ULL << 32) ? 32 : \ + (n) & (1ULL << 31) ? 31 : \ + (n) & (1ULL << 30) ? 30 : \ + (n) & (1ULL << 29) ? 29 : \ + (n) & (1ULL << 28) ? 28 : \ + (n) & (1ULL << 27) ? 27 : \ + (n) & (1ULL << 26) ? 26 : \ + (n) & (1ULL << 25) ? 25 : \ + (n) & (1ULL << 24) ? 24 : \ + (n) & (1ULL << 23) ? 23 : \ + (n) & (1ULL << 22) ? 22 : \ + (n) & (1ULL << 21) ? 21 : \ + (n) & (1ULL << 20) ? 20 : \ + (n) & (1ULL << 19) ? 19 : \ + (n) & (1ULL << 18) ? 18 : \ + (n) & (1ULL << 17) ? 17 : \ + (n) & (1ULL << 16) ? 16 : \ + (n) & (1ULL << 15) ? 15 : \ + (n) & (1ULL << 14) ? 14 : \ + (n) & (1ULL << 13) ? 13 : \ + (n) & (1ULL << 12) ? 12 : \ + (n) & (1ULL << 11) ? 11 : \ + (n) & (1ULL << 10) ? 10 : \ + (n) & (1ULL << 9) ? 9 : \ + (n) & (1ULL << 8) ? 8 : \ + (n) & (1ULL << 7) ? 7 : \ + (n) & (1ULL << 6) ? 6 : \ + (n) & (1ULL << 5) ? 5 : \ + (n) & (1ULL << 4) ? 4 : \ + (n) & (1ULL << 3) ? 3 : \ + (n) & (1ULL << 2) ? 2 : \ + 1) : \ + -1) + +/** + * ilog2 - log base 2 of 32-bit or a 64-bit unsigned value + * @n: parameter + * + * constant-capable log of base 2 calculation + * - this can be used to initialise global variables from constant data, hence + * the massive ternary operator construction + * + * selects the appropriately-sized optimised version depending on sizeof(n) + */ +#define ilog2(n) \ +( \ + __builtin_constant_p(n) ? \ + const_ilog2(n) : \ + (sizeof(n) <= 4) ? \ + __ilog2_u32(n) : \ + __ilog2_u64(n) \ + ) + +/** + * roundup_pow_of_two - round the given value up to nearest power of two + * @n: parameter + * + * round the given value up to the nearest power of two + * - the result is undefined when n == 0 + * - this can be used to initialise global variables from constant data + */ +#define roundup_pow_of_two(n) \ +( \ + __builtin_constant_p(n) ? ( \ + ((n) == 1) ? 1 : \ + (1UL << (ilog2((n) - 1) + 1)) \ + ) : \ + __roundup_pow_of_two(n) \ + ) + +/** + * rounddown_pow_of_two - round the given value down to nearest power of two + * @n: parameter + * + * round the given value down to the nearest power of two + * - the result is undefined when n == 0 + * - this can be used to initialise global variables from constant data + */ +#define rounddown_pow_of_two(n) \ +( \ + __builtin_constant_p(n) ? ( \ + (1UL << ilog2(n))) : \ + __rounddown_pow_of_two(n) \ + ) + +static inline __attribute_const__ +int __order_base_2(unsigned long n) +{ + return n > 1 ? ilog2(n - 1) + 1 : 0; +} + +/** + * order_base_2 - calculate the (rounded up) base 2 order of the argument + * @n: parameter + * + * The first few values calculated by this routine: + * ob2(0) = 0 + * ob2(1) = 0 + * ob2(2) = 1 + * ob2(3) = 2 + * ob2(4) = 2 + * ob2(5) = 3 + * ... and so on. + */ +#define order_base_2(n) \ +( \ + __builtin_constant_p(n) ? ( \ + ((n) == 0 || (n) == 1) ? 0 : \ + ilog2((n) - 1) + 1) : \ + __order_base_2(n) \ +) +#endif /* _LINUX_LOG2_H */ diff --git a/include/linux/logic_pio.h b/include/linux/logic_pio.h new file mode 100644 index 000000000..88e1e6304 --- /dev/null +++ b/include/linux/logic_pio.h @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2017 HiSilicon Limited, All Rights Reserved. + * Author: Gabriele Paoloni + * Author: Zhichang Yuan + */ + +#ifndef __LINUX_LOGIC_PIO_H +#define __LINUX_LOGIC_PIO_H + +#include + +enum { + LOGIC_PIO_INDIRECT, /* Indirect IO flag */ + LOGIC_PIO_CPU_MMIO, /* Memory-mapped IO flag */ +}; + +struct logic_pio_hwaddr { + struct list_head list; + struct fwnode_handle *fwnode; + resource_size_t hw_start; + resource_size_t io_start; + resource_size_t size; /* range size populated */ + unsigned long flags; + + void *hostdata; + const struct logic_pio_host_ops *ops; +}; + +struct logic_pio_host_ops { + u32 (*in)(void *hostdata, unsigned long addr, size_t dwidth); + void (*out)(void *hostdata, unsigned long addr, u32 val, + size_t dwidth); + u32 (*ins)(void *hostdata, unsigned long addr, void *buffer, + size_t dwidth, unsigned int count); + void (*outs)(void *hostdata, unsigned long addr, const void *buffer, + size_t dwidth, unsigned int count); +}; + +#ifdef CONFIG_INDIRECT_PIO +u8 logic_inb(unsigned long addr); +void logic_outb(u8 value, unsigned long addr); +void logic_outw(u16 value, unsigned long addr); +void logic_outl(u32 value, unsigned long addr); +u16 logic_inw(unsigned long addr); +u32 logic_inl(unsigned long addr); +void logic_outb(u8 value, unsigned long addr); +void logic_outw(u16 value, unsigned long addr); +void logic_outl(u32 value, unsigned long addr); +void logic_insb(unsigned long addr, void *buffer, unsigned int count); +void logic_insl(unsigned long addr, void *buffer, unsigned int count); +void logic_insw(unsigned long addr, void *buffer, unsigned int count); +void logic_outsb(unsigned long addr, const void *buffer, unsigned int count); +void logic_outsw(unsigned long addr, const void *buffer, unsigned int count); +void logic_outsl(unsigned long addr, const void *buffer, unsigned int count); + +#ifndef inb +#define inb logic_inb +#endif + +#ifndef inw +#define inw logic_inw +#endif + +#ifndef inl +#define inl logic_inl +#endif + +#ifndef outb +#define outb logic_outb +#endif + +#ifndef outw +#define outw logic_outw +#endif + +#ifndef outl +#define outl logic_outl +#endif + +#ifndef insb +#define insb logic_insb +#endif + +#ifndef insw +#define insw logic_insw +#endif + +#ifndef insl +#define insl logic_insl +#endif + +#ifndef outsb +#define outsb logic_outsb +#endif + +#ifndef outsw +#define outsw logic_outsw +#endif + +#ifndef outsl +#define outsl logic_outsl +#endif + +/* + * We reserve 0x4000 bytes for Indirect IO as so far this library is only + * used by the HiSilicon LPC Host. If needed, we can reserve a wider IO + * area by redefining the macro below. + */ +#define PIO_INDIRECT_SIZE 0x4000 +#define MMIO_UPPER_LIMIT (IO_SPACE_LIMIT - PIO_INDIRECT_SIZE) +#else +#define MMIO_UPPER_LIMIT IO_SPACE_LIMIT +#endif /* CONFIG_INDIRECT_PIO */ + +struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode); +unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode, + resource_size_t hw_addr, resource_size_t size); +int logic_pio_register_range(struct logic_pio_hwaddr *newrange); +void logic_pio_unregister_range(struct logic_pio_hwaddr *range); +resource_size_t logic_pio_to_hwaddr(unsigned long pio); +unsigned long logic_pio_trans_cpuaddr(resource_size_t hw_addr); + +#endif /* __LINUX_LOGIC_PIO_H */ diff --git a/include/linux/lp.h b/include/linux/lp.h new file mode 100644 index 000000000..be8a07eb2 --- /dev/null +++ b/include/linux/lp.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * usr/include/linux/lp.h c.1991-1992 James Wiegand + * many modifications copyright (C) 1992 Michael K. Johnson + * Interrupt support added 1993 Nigel Gamble + * Removed 8255 status defines from inside __KERNEL__ Marcelo Tosatti + */ +#ifndef _LINUX_LP_H +#define _LINUX_LP_H + + +#include +#include +#include + +/* Magic numbers for defining port-device mappings */ +#define LP_PARPORT_UNSPEC -4 +#define LP_PARPORT_AUTO -3 +#define LP_PARPORT_OFF -2 +#define LP_PARPORT_NONE -1 + +#define LP_F(minor) lp_table[(minor)].flags /* flags for busy, etc. */ +#define LP_CHAR(minor) lp_table[(minor)].chars /* busy timeout */ +#define LP_TIME(minor) lp_table[(minor)].time /* wait time */ +#define LP_WAIT(minor) lp_table[(minor)].wait /* strobe wait */ +#define LP_IRQ(minor) lp_table[(minor)].dev->port->irq /* interrupt # */ + /* PARPORT_IRQ_NONE means polled */ +#ifdef LP_STATS +#define LP_STAT(minor) lp_table[(minor)].stats /* statistics area */ +#endif +#define LP_BUFFER_SIZE PAGE_SIZE + +#define LP_BASE(x) lp_table[(x)].dev->port->base + +#ifdef LP_STATS +struct lp_stats { + unsigned long chars; + unsigned long sleeps; + unsigned int maxrun; + unsigned int maxwait; + unsigned int meanwait; + unsigned int mdev; +}; +#endif + +struct lp_struct { + struct pardevice *dev; + unsigned long flags; + unsigned int chars; + unsigned int time; + unsigned int wait; + char *lp_buffer; +#ifdef LP_STATS + unsigned int lastcall; + unsigned int runchars; + struct lp_stats stats; +#endif + wait_queue_head_t waitq; + unsigned int last_error; + struct mutex port_mutex; + wait_queue_head_t dataq; + long timeout; + unsigned int best_mode; + unsigned int current_mode; + unsigned long bits; +}; + +/* + * The following constants describe the various signals of the printer port + * hardware. Note that the hardware inverts some signals and that some + * signals are active low. An example is LP_STROBE, which must be programmed + * with 1 for being active and 0 for being inactive, because the strobe signal + * gets inverted, but it is also active low. + */ + + +/* + * defines for 8255 control port + * base + 2 + * accessed with LP_C(minor) + */ +#define LP_PINTEN 0x10 /* high to read data in or-ed with data out */ +#define LP_PSELECP 0x08 /* inverted output, active low */ +#define LP_PINITP 0x04 /* unchanged output, active low */ +#define LP_PAUTOLF 0x02 /* inverted output, active low */ +#define LP_PSTROBE 0x01 /* short high output on raising edge */ + +/* + * the value written to ports to test existence. PC-style ports will + * return the value written. AT-style ports will return 0. so why not + * make them the same ? + */ +#define LP_DUMMY 0x00 + +/* + * This is the port delay time, in microseconds. + * It is used only in the lp_init() and lp_reset() routine. + */ +#define LP_DELAY 50 + +#endif diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h new file mode 100644 index 000000000..04fc6e6c7 --- /dev/null +++ b/include/linux/lru_cache.h @@ -0,0 +1,314 @@ +/* + lru_cache.c + + This file is part of DRBD by Philipp Reisner and Lars Ellenberg. + + Copyright (C) 2003-2008, LINBIT Information Technologies GmbH. + Copyright (C) 2003-2008, Philipp Reisner . + Copyright (C) 2003-2008, Lars Ellenberg . + + drbd is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + drbd is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with drbd; see the file COPYING. If not, write to + the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + + */ + +#ifndef LRU_CACHE_H +#define LRU_CACHE_H + +#include +#include +#include +#include /* for memset */ +#include + +/* +This header file (and its .c file; kernel-doc of functions see there) + define a helper framework to easily keep track of index:label associations, + and changes to an "active set" of objects, as well as pending transactions, + to persistently record those changes. + + We use an LRU policy if it is necessary to "cool down" a region currently in + the active set before we can "heat" a previously unused region. + + Because of this later property, it is called "lru_cache". + As it actually Tracks Objects in an Active SeT, we could also call it + toast (incidentally that is what may happen to the data on the + backend storage uppon next resync, if we don't get it right). + +What for? + +We replicate IO (more or less synchronously) to local and remote disk. + +For crash recovery after replication node failure, + we need to resync all regions that have been target of in-flight WRITE IO + (in use, or "hot", regions), as we don't know whether or not those WRITEs + have made it to stable storage. + + To avoid a "full resync", we need to persistently track these regions. + + This is known as "write intent log", and can be implemented as on-disk + (coarse or fine grained) bitmap, or other meta data. + + To avoid the overhead of frequent extra writes to this meta data area, + usually the condition is softened to regions that _may_ have been target of + in-flight WRITE IO, e.g. by only lazily clearing the on-disk write-intent + bitmap, trading frequency of meta data transactions against amount of + (possibly unnecessary) resync traffic. + + If we set a hard limit on the area that may be "hot" at any given time, we + limit the amount of resync traffic needed for crash recovery. + +For recovery after replication link failure, + we need to resync all blocks that have been changed on the other replica + in the mean time, or, if both replica have been changed independently [*], + all blocks that have been changed on either replica in the mean time. + [*] usually as a result of a cluster split-brain and insufficient protection. + but there are valid use cases to do this on purpose. + + Tracking those blocks can be implemented as "dirty bitmap". + Having it fine-grained reduces the amount of resync traffic. + It should also be persistent, to allow for reboots (or crashes) + while the replication link is down. + +There are various possible implementations for persistently storing +write intent log information, three of which are mentioned here. + +"Chunk dirtying" + The on-disk "dirty bitmap" may be re-used as "write-intent" bitmap as well. + To reduce the frequency of bitmap updates for write-intent log purposes, + one could dirty "chunks" (of some size) at a time of the (fine grained) + on-disk bitmap, while keeping the in-memory "dirty" bitmap as clean as + possible, flushing it to disk again when a previously "hot" (and on-disk + dirtied as full chunk) area "cools down" again (no IO in flight anymore, + and none expected in the near future either). + +"Explicit (coarse) write intent bitmap" + An other implementation could chose a (probably coarse) explicit bitmap, + for write-intent log purposes, additionally to the fine grained dirty bitmap. + +"Activity log" + Yet an other implementation may keep track of the hot regions, by starting + with an empty set, and writing down a journal of region numbers that have + become "hot", or have "cooled down" again. + + To be able to use a ring buffer for this journal of changes to the active + set, we not only record the actual changes to that set, but also record the + not changing members of the set in a round robin fashion. To do so, we use a + fixed (but configurable) number of slots which we can identify by index, and + associate region numbers (labels) with these indices. + For each transaction recording a change to the active set, we record the + change itself (index: -old_label, +new_label), and which index is associated + with which label (index: current_label) within a certain sliding window that + is moved further over the available indices with each such transaction. + + Thus, for crash recovery, if the ringbuffer is sufficiently large, we can + accurately reconstruct the active set. + + Sufficiently large depends only on maximum number of active objects, and the + size of the sliding window recording "index: current_label" associations within + each transaction. + + This is what we call the "activity log". + + Currently we need one activity log transaction per single label change, which + does not give much benefit over the "dirty chunks of bitmap" approach, other + than potentially less seeks. + + We plan to change the transaction format to support multiple changes per + transaction, which then would reduce several (disjoint, "random") updates to + the bitmap into one transaction to the activity log ring buffer. +*/ + +/* this defines an element in a tracked set + * .colision is for hash table lookup. + * When we process a new IO request, we know its sector, thus can deduce the + * region number (label) easily. To do the label -> object lookup without a + * full list walk, we use a simple hash table. + * + * .list is on one of three lists: + * in_use: currently in use (refcnt > 0, lc_number != LC_FREE) + * lru: unused but ready to be reused or recycled + * (lc_refcnt == 0, lc_number != LC_FREE), + * free: unused but ready to be recycled + * (lc_refcnt == 0, lc_number == LC_FREE), + * + * an element is said to be "in the active set", + * if either on "in_use" or "lru", i.e. lc_number != LC_FREE. + * + * DRBD currently (May 2009) only uses 61 elements on the resync lru_cache + * (total memory usage 2 pages), and up to 3833 elements on the act_log + * lru_cache, totalling ~215 kB for 64bit architecture, ~53 pages. + * + * We usually do not actually free these objects again, but only "recycle" + * them, as the change "index: -old_label, +LC_FREE" would need a transaction + * as well. Which also means that using a kmem_cache to allocate the objects + * from wastes some resources. + * But it avoids high order page allocations in kmalloc. + */ +struct lc_element { + struct hlist_node colision; + struct list_head list; /* LRU list or free list */ + unsigned refcnt; + /* back "pointer" into lc_cache->element[index], + * for paranoia, and for "lc_element_to_index" */ + unsigned lc_index; + /* if we want to track a larger set of objects, + * it needs to become arch independend u64 */ + unsigned lc_number; + /* special label when on free list */ +#define LC_FREE (~0U) + + /* for pending changes */ + unsigned lc_new_number; +}; + +struct lru_cache { + /* the least recently used item is kept at lru->prev */ + struct list_head lru; + struct list_head free; + struct list_head in_use; + struct list_head to_be_changed; + + /* the pre-created kmem cache to allocate the objects from */ + struct kmem_cache *lc_cache; + + /* size of tracked objects, used to memset(,0,) them in lc_reset */ + size_t element_size; + /* offset of struct lc_element member in the tracked object */ + size_t element_off; + + /* number of elements (indices) */ + unsigned int nr_elements; + /* Arbitrary limit on maximum tracked objects. Practical limit is much + * lower due to allocation failures, probably. For typical use cases, + * nr_elements should be a few thousand at most. + * This also limits the maximum value of lc_element.lc_index, allowing the + * 8 high bits of .lc_index to be overloaded with flags in the future. */ +#define LC_MAX_ACTIVE (1<<24) + + /* allow to accumulate a few (index:label) changes, + * but no more than max_pending_changes */ + unsigned int max_pending_changes; + /* number of elements currently on to_be_changed list */ + unsigned int pending_changes; + + /* statistics */ + unsigned used; /* number of elements currently on in_use list */ + unsigned long hits, misses, starving, locked, changed; + + /* see below: flag-bits for lru_cache */ + unsigned long flags; + + + void *lc_private; + const char *name; + + /* nr_elements there */ + struct hlist_head *lc_slot; + struct lc_element **lc_element; +}; + + +/* flag-bits for lru_cache */ +enum { + /* debugging aid, to catch concurrent access early. + * user needs to guarantee exclusive access by proper locking! */ + __LC_PARANOIA, + + /* annotate that the set is "dirty", possibly accumulating further + * changes, until a transaction is finally triggered */ + __LC_DIRTY, + + /* Locked, no further changes allowed. + * Also used to serialize changing transactions. */ + __LC_LOCKED, + + /* if we need to change the set, but currently there is no free nor + * unused element available, we are "starving", and must not give out + * further references, to guarantee that eventually some refcnt will + * drop to zero and we will be able to make progress again, changing + * the set, writing the transaction. + * if the statistics say we are frequently starving, + * nr_elements is too small. */ + __LC_STARVING, +}; +#define LC_PARANOIA (1<<__LC_PARANOIA) +#define LC_DIRTY (1<<__LC_DIRTY) +#define LC_LOCKED (1<<__LC_LOCKED) +#define LC_STARVING (1<<__LC_STARVING) + +extern struct lru_cache *lc_create(const char *name, struct kmem_cache *cache, + unsigned max_pending_changes, + unsigned e_count, size_t e_size, size_t e_off); +extern void lc_reset(struct lru_cache *lc); +extern void lc_destroy(struct lru_cache *lc); +extern void lc_set(struct lru_cache *lc, unsigned int enr, int index); +extern void lc_del(struct lru_cache *lc, struct lc_element *element); + +extern struct lc_element *lc_get_cumulative(struct lru_cache *lc, unsigned int enr); +extern struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr); +extern struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr); +extern struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr); +extern unsigned int lc_put(struct lru_cache *lc, struct lc_element *e); +extern void lc_committed(struct lru_cache *lc); + +struct seq_file; +extern void lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc); + +extern void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext, + void (*detail) (struct seq_file *, struct lc_element *)); + +/** + * lc_try_lock_for_transaction - can be used to stop lc_get() from changing the tracked set + * @lc: the lru cache to operate on + * + * Allows (expects) the set to be "dirty". Note that the reference counts and + * order on the active and lru lists may still change. Used to serialize + * changing transactions. Returns true if we aquired the lock. + */ +static inline int lc_try_lock_for_transaction(struct lru_cache *lc) +{ + return !test_and_set_bit(__LC_LOCKED, &lc->flags); +} + +/** + * lc_try_lock - variant to stop lc_get() from changing the tracked set + * @lc: the lru cache to operate on + * + * Note that the reference counts and order on the active and lru lists may + * still change. Only works on a "clean" set. Returns true if we aquired the + * lock, which means there are no pending changes, and any further attempt to + * change the set will not succeed until the next lc_unlock(). + */ +extern int lc_try_lock(struct lru_cache *lc); + +/** + * lc_unlock - unlock @lc, allow lc_get() to change the set again + * @lc: the lru cache to operate on + */ +static inline void lc_unlock(struct lru_cache *lc) +{ + clear_bit(__LC_DIRTY, &lc->flags); + clear_bit_unlock(__LC_LOCKED, &lc->flags); +} + +extern bool lc_is_used(struct lru_cache *lc, unsigned int enr); + +#define lc_entry(ptr, type, member) \ + container_of(ptr, type, member) + +extern struct lc_element *lc_element_by_index(struct lru_cache *lc, unsigned i); +extern unsigned int lc_index_of(struct lru_cache *lc, struct lc_element *e); + +#endif diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h new file mode 100644 index 000000000..915330abf --- /dev/null +++ b/include/linux/lsm_audit.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common LSM logging functions + * Heavily borrowed from selinux/avc.h + * + * Author : Etienne BASSET + * + * All credits to : Stephen Smalley, + * All BUGS to : Etienne BASSET + */ +#ifndef _LSM_COMMON_LOGGING_ +#define _LSM_COMMON_LOGGING_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct lsm_network_audit { + int netif; + struct sock *sk; + u16 family; + __be16 dport; + __be16 sport; + union { + struct { + __be32 daddr; + __be32 saddr; + } v4; + struct { + struct in6_addr daddr; + struct in6_addr saddr; + } v6; + } fam; +}; + +struct lsm_ioctlop_audit { + struct path path; + u16 cmd; +}; + +struct lsm_ibpkey_audit { + u64 subnet_prefix; + u16 pkey; +}; + +struct lsm_ibendport_audit { + char dev_name[IB_DEVICE_NAME_MAX]; + u8 port; +}; + +/* Auxiliary data to use in generating the audit record. */ +struct common_audit_data { + char type; +#define LSM_AUDIT_DATA_PATH 1 +#define LSM_AUDIT_DATA_NET 2 +#define LSM_AUDIT_DATA_CAP 3 +#define LSM_AUDIT_DATA_IPC 4 +#define LSM_AUDIT_DATA_TASK 5 +#define LSM_AUDIT_DATA_KEY 6 +#define LSM_AUDIT_DATA_NONE 7 +#define LSM_AUDIT_DATA_KMOD 8 +#define LSM_AUDIT_DATA_INODE 9 +#define LSM_AUDIT_DATA_DENTRY 10 +#define LSM_AUDIT_DATA_IOCTL_OP 11 +#define LSM_AUDIT_DATA_FILE 12 +#define LSM_AUDIT_DATA_IBPKEY 13 +#define LSM_AUDIT_DATA_IBENDPORT 14 + union { + struct path path; + struct dentry *dentry; + struct inode *inode; + struct lsm_network_audit *net; + int cap; + int ipc_id; + struct task_struct *tsk; +#ifdef CONFIG_KEYS + struct { + key_serial_t key; + char *key_desc; + } key_struct; +#endif + char *kmod_name; + struct lsm_ioctlop_audit *op; + struct file *file; + struct lsm_ibpkey_audit *ibpkey; + struct lsm_ibendport_audit *ibendport; + } u; + /* this union contains LSM specific data */ + union { +#ifdef CONFIG_SECURITY_SMACK + struct smack_audit_data *smack_audit_data; +#endif +#ifdef CONFIG_SECURITY_SELINUX + struct selinux_audit_data *selinux_audit_data; +#endif +#ifdef CONFIG_SECURITY_APPARMOR + struct apparmor_audit_data *apparmor_audit_data; +#endif + }; /* per LSM data pointer union */ +}; + +#define v4info fam.v4 +#define v6info fam.v6 + +int ipv4_skb_to_auditdata(struct sk_buff *skb, + struct common_audit_data *ad, u8 *proto); + +int ipv6_skb_to_auditdata(struct sk_buff *skb, + struct common_audit_data *ad, u8 *proto); + +void common_lsm_audit(struct common_audit_data *a, + void (*pre_audit)(struct audit_buffer *, void *), + void (*post_audit)(struct audit_buffer *, void *)); + +#endif diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h new file mode 100644 index 000000000..e65dace66 --- /dev/null +++ b/include/linux/lsm_hooks.h @@ -0,0 +1,2087 @@ +/* + * Linux Security Module interfaces + * + * Copyright (C) 2001 WireX Communications, Inc + * Copyright (C) 2001 Greg Kroah-Hartman + * Copyright (C) 2001 Networks Associates Technology, Inc + * Copyright (C) 2001 James Morris + * Copyright (C) 2001 Silicon Graphics, Inc. (Trust Technology Group) + * Copyright (C) 2015 Intel Corporation. + * Copyright (C) 2015 Casey Schaufler + * Copyright (C) 2016 Mellanox Techonologies + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Due to this file being licensed under the GPL there is controversy over + * whether this permits you to write a module that #includes this file + * without placing your module under the GPL. Please consult a lawyer for + * advice before doing this. + * + */ + +#ifndef __LINUX_LSM_HOOKS_H +#define __LINUX_LSM_HOOKS_H + +#include +#include +#include + +/** + * union security_list_options - Linux Security Module hook function list + * + * Security hooks for program execution operations. + * + * @bprm_set_creds: + * Save security information in the bprm->security field, typically based + * on information about the bprm->file, for later use by the apply_creds + * hook. This hook may also optionally check permissions (e.g. for + * transitions between security domains). + * This hook may be called multiple times during a single execve, e.g. for + * interpreters. The hook can tell whether it has already been called by + * checking to see if @bprm->security is non-NULL. If so, then the hook + * may decide either to retain the security information saved earlier or + * to replace it. The hook must set @bprm->secureexec to 1 if a "secure + * exec" has happened as a result of this hook call. The flag is used to + * indicate the need for a sanitized execution environment, and is also + * passed in the ELF auxiliary table on the initial stack to indicate + * whether libc should enable secure mode. + * @bprm contains the linux_binprm structure. + * Return 0 if the hook is successful and permission is granted. + * @bprm_check_security: + * This hook mediates the point when a search for a binary handler will + * begin. It allows a check the @bprm->security value which is set in the + * preceding set_creds call. The primary difference from set_creds is + * that the argv list and envp list are reliably available in @bprm. This + * hook may be called multiple times during a single execve; and in each + * pass set_creds is called first. + * @bprm contains the linux_binprm structure. + * Return 0 if the hook is successful and permission is granted. + * @bprm_committing_creds: + * Prepare to install the new security attributes of a process being + * transformed by an execve operation, based on the old credentials + * pointed to by @current->cred and the information set in @bprm->cred by + * the bprm_set_creds hook. @bprm points to the linux_binprm structure. + * This hook is a good place to perform state changes on the process such + * as closing open file descriptors to which access will no longer be + * granted when the attributes are changed. This is called immediately + * before commit_creds(). + * @bprm_committed_creds: + * Tidy up after the installation of the new security attributes of a + * process being transformed by an execve operation. The new credentials + * have, by this point, been set to @current->cred. @bprm points to the + * linux_binprm structure. This hook is a good place to perform state + * changes on the process such as clearing out non-inheritable signal + * state. This is called immediately after commit_creds(). + * + * Security hooks for filesystem operations. + * + * @sb_alloc_security: + * Allocate and attach a security structure to the sb->s_security field. + * The s_security field is initialized to NULL when the structure is + * allocated. + * @sb contains the super_block structure to be modified. + * Return 0 if operation was successful. + * @sb_free_security: + * Deallocate and clear the sb->s_security field. + * @sb contains the super_block structure to be modified. + * @sb_statfs: + * Check permission before obtaining filesystem statistics for the @mnt + * mountpoint. + * @dentry is a handle on the superblock for the filesystem. + * Return 0 if permission is granted. + * @sb_mount: + * Check permission before an object specified by @dev_name is mounted on + * the mount point named by @nd. For an ordinary mount, @dev_name + * identifies a device if the file system type requires a device. For a + * remount (@flags & MS_REMOUNT), @dev_name is irrelevant. For a + * loopback/bind mount (@flags & MS_BIND), @dev_name identifies the + * pathname of the object being mounted. + * @dev_name contains the name for object being mounted. + * @path contains the path for mount point object. + * @type contains the filesystem type. + * @flags contains the mount flags. + * @data contains the filesystem-specific data. + * Return 0 if permission is granted. + * @sb_copy_data: + * Allow mount option data to be copied prior to parsing by the filesystem, + * so that the security module can extract security-specific mount + * options cleanly (a filesystem may modify the data e.g. with strsep()). + * This also allows the original mount data to be stripped of security- + * specific options to avoid having to make filesystems aware of them. + * @type the type of filesystem being mounted. + * @orig the original mount data copied from userspace. + * @copy copied data which will be passed to the security module. + * Returns 0 if the copy was successful. + * @sb_remount: + * Extracts security system specific mount options and verifies no changes + * are being made to those options. + * @sb superblock being remounted + * @data contains the filesystem-specific data. + * Return 0 if permission is granted. + * @sb_umount: + * Check permission before the @mnt file system is unmounted. + * @mnt contains the mounted file system. + * @flags contains the unmount flags, e.g. MNT_FORCE. + * Return 0 if permission is granted. + * @sb_pivotroot: + * Check permission before pivoting the root filesystem. + * @old_path contains the path for the new location of the + * current root (put_old). + * @new_path contains the path for the new root (new_root). + * Return 0 if permission is granted. + * @sb_set_mnt_opts: + * Set the security relevant mount options used for a superblock + * @sb the superblock to set security mount options for + * @opts binary data structure containing all lsm mount data + * @sb_clone_mnt_opts: + * Copy all security options from a given superblock to another + * @oldsb old superblock which contain information to clone + * @newsb new superblock which needs filled in + * @sb_parse_opts_str: + * Parse a string of security data filling in the opts structure + * @options string containing all mount options known by the LSM + * @opts binary data structure usable by the LSM + * @dentry_init_security: + * Compute a context for a dentry as the inode is not yet available + * since NFSv4 has no label backed by an EA anyway. + * @dentry dentry to use in calculating the context. + * @mode mode used to determine resource type. + * @name name of the last path component used to create file + * @ctx pointer to place the pointer to the resulting context in. + * @ctxlen point to place the length of the resulting context. + * @dentry_create_files_as: + * Compute a context for a dentry as the inode is not yet available + * and set that context in passed in creds so that new files are + * created using that context. Context is calculated using the + * passed in creds and not the creds of the caller. + * @dentry dentry to use in calculating the context. + * @mode mode used to determine resource type. + * @name name of the last path component used to create file + * @old creds which should be used for context calculation + * @new creds to modify + * + * + * Security hooks for inode operations. + * + * @inode_alloc_security: + * Allocate and attach a security structure to @inode->i_security. The + * i_security field is initialized to NULL when the inode structure is + * allocated. + * @inode contains the inode structure. + * Return 0 if operation was successful. + * @inode_free_security: + * @inode contains the inode structure. + * Deallocate the inode security structure and set @inode->i_security to + * NULL. + * @inode_init_security: + * Obtain the security attribute name suffix and value to set on a newly + * created inode and set up the incore security field for the new inode. + * This hook is called by the fs code as part of the inode creation + * transaction and provides for atomic labeling of the inode, unlike + * the post_create/mkdir/... hooks called by the VFS. The hook function + * is expected to allocate the name and value via kmalloc, with the caller + * being responsible for calling kfree after using them. + * If the security module does not use security attributes or does + * not wish to put a security attribute on this particular inode, + * then it should return -EOPNOTSUPP to skip this processing. + * @inode contains the inode structure of the newly created inode. + * @dir contains the inode structure of the parent directory. + * @qstr contains the last path component of the new object + * @name will be set to the allocated name suffix (e.g. selinux). + * @value will be set to the allocated attribute value. + * @len will be set to the length of the value. + * Returns 0 if @name and @value have been successfully set, + * -EOPNOTSUPP if no security attribute is needed, or + * -ENOMEM on memory allocation failure. + * @inode_create: + * Check permission to create a regular file. + * @dir contains inode structure of the parent of the new file. + * @dentry contains the dentry structure for the file to be created. + * @mode contains the file mode of the file to be created. + * Return 0 if permission is granted. + * @inode_link: + * Check permission before creating a new hard link to a file. + * @old_dentry contains the dentry structure for an existing + * link to the file. + * @dir contains the inode structure of the parent directory + * of the new link. + * @new_dentry contains the dentry structure for the new link. + * Return 0 if permission is granted. + * @path_link: + * Check permission before creating a new hard link to a file. + * @old_dentry contains the dentry structure for an existing link + * to the file. + * @new_dir contains the path structure of the parent directory of + * the new link. + * @new_dentry contains the dentry structure for the new link. + * Return 0 if permission is granted. + * @inode_unlink: + * Check the permission to remove a hard link to a file. + * @dir contains the inode structure of parent directory of the file. + * @dentry contains the dentry structure for file to be unlinked. + * Return 0 if permission is granted. + * @path_unlink: + * Check the permission to remove a hard link to a file. + * @dir contains the path structure of parent directory of the file. + * @dentry contains the dentry structure for file to be unlinked. + * Return 0 if permission is granted. + * @inode_symlink: + * Check the permission to create a symbolic link to a file. + * @dir contains the inode structure of parent directory of + * the symbolic link. + * @dentry contains the dentry structure of the symbolic link. + * @old_name contains the pathname of file. + * Return 0 if permission is granted. + * @path_symlink: + * Check the permission to create a symbolic link to a file. + * @dir contains the path structure of parent directory of + * the symbolic link. + * @dentry contains the dentry structure of the symbolic link. + * @old_name contains the pathname of file. + * Return 0 if permission is granted. + * @inode_mkdir: + * Check permissions to create a new directory in the existing directory + * associated with inode structure @dir. + * @dir contains the inode structure of parent of the directory + * to be created. + * @dentry contains the dentry structure of new directory. + * @mode contains the mode of new directory. + * Return 0 if permission is granted. + * @path_mkdir: + * Check permissions to create a new directory in the existing directory + * associated with path structure @path. + * @dir contains the path structure of parent of the directory + * to be created. + * @dentry contains the dentry structure of new directory. + * @mode contains the mode of new directory. + * Return 0 if permission is granted. + * @inode_rmdir: + * Check the permission to remove a directory. + * @dir contains the inode structure of parent of the directory + * to be removed. + * @dentry contains the dentry structure of directory to be removed. + * Return 0 if permission is granted. + * @path_rmdir: + * Check the permission to remove a directory. + * @dir contains the path structure of parent of the directory to be + * removed. + * @dentry contains the dentry structure of directory to be removed. + * Return 0 if permission is granted. + * @inode_mknod: + * Check permissions when creating a special file (or a socket or a fifo + * file created via the mknod system call). Note that if mknod operation + * is being done for a regular file, then the create hook will be called + * and not this hook. + * @dir contains the inode structure of parent of the new file. + * @dentry contains the dentry structure of the new file. + * @mode contains the mode of the new file. + * @dev contains the device number. + * Return 0 if permission is granted. + * @path_mknod: + * Check permissions when creating a file. Note that this hook is called + * even if mknod operation is being done for a regular file. + * @dir contains the path structure of parent of the new file. + * @dentry contains the dentry structure of the new file. + * @mode contains the mode of the new file. + * @dev contains the undecoded device number. Use new_decode_dev() to get + * the decoded device number. + * Return 0 if permission is granted. + * @inode_rename: + * Check for permission to rename a file or directory. + * @old_dir contains the inode structure for parent of the old link. + * @old_dentry contains the dentry structure of the old link. + * @new_dir contains the inode structure for parent of the new link. + * @new_dentry contains the dentry structure of the new link. + * Return 0 if permission is granted. + * @path_rename: + * Check for permission to rename a file or directory. + * @old_dir contains the path structure for parent of the old link. + * @old_dentry contains the dentry structure of the old link. + * @new_dir contains the path structure for parent of the new link. + * @new_dentry contains the dentry structure of the new link. + * Return 0 if permission is granted. + * @path_chmod: + * Check for permission to change DAC's permission of a file or directory. + * @dentry contains the dentry structure. + * @mnt contains the vfsmnt structure. + * @mode contains DAC's mode. + * Return 0 if permission is granted. + * @path_chown: + * Check for permission to change owner/group of a file or directory. + * @path contains the path structure. + * @uid contains new owner's ID. + * @gid contains new group's ID. + * Return 0 if permission is granted. + * @path_chroot: + * Check for permission to change root directory. + * @path contains the path structure. + * Return 0 if permission is granted. + * @inode_readlink: + * Check the permission to read the symbolic link. + * @dentry contains the dentry structure for the file link. + * Return 0 if permission is granted. + * @inode_follow_link: + * Check permission to follow a symbolic link when looking up a pathname. + * @dentry contains the dentry structure for the link. + * @inode contains the inode, which itself is not stable in RCU-walk + * @rcu indicates whether we are in RCU-walk mode. + * Return 0 if permission is granted. + * @inode_permission: + * Check permission before accessing an inode. This hook is called by the + * existing Linux permission function, so a security module can use it to + * provide additional checking for existing Linux permission checks. + * Notice that this hook is called when a file is opened (as well as many + * other operations), whereas the file_security_ops permission hook is + * called when the actual read/write operations are performed. + * @inode contains the inode structure to check. + * @mask contains the permission mask. + * Return 0 if permission is granted. + * @inode_setattr: + * Check permission before setting file attributes. Note that the kernel + * call to notify_change is performed from several locations, whenever + * file attributes change (such as when a file is truncated, chown/chmod + * operations, transferring disk quotas, etc). + * @dentry contains the dentry structure for the file. + * @attr is the iattr structure containing the new file attributes. + * Return 0 if permission is granted. + * @path_truncate: + * Check permission before truncating a file. + * @path contains the path structure for the file. + * Return 0 if permission is granted. + * @inode_getattr: + * Check permission before obtaining file attributes. + * @path contains the path structure for the file. + * Return 0 if permission is granted. + * @inode_setxattr: + * Check permission before setting the extended attributes + * @value identified by @name for @dentry. + * Return 0 if permission is granted. + * @inode_post_setxattr: + * Update inode security field after successful setxattr operation. + * @value identified by @name for @dentry. + * @inode_getxattr: + * Check permission before obtaining the extended attributes + * identified by @name for @dentry. + * Return 0 if permission is granted. + * @inode_listxattr: + * Check permission before obtaining the list of extended attribute + * names for @dentry. + * Return 0 if permission is granted. + * @inode_removexattr: + * Check permission before removing the extended attribute + * identified by @name for @dentry. + * Return 0 if permission is granted. + * @inode_getsecurity: + * Retrieve a copy of the extended attribute representation of the + * security label associated with @name for @inode via @buffer. Note that + * @name is the remainder of the attribute name after the security prefix + * has been removed. @alloc is used to specify of the call should return a + * value via the buffer or just the value length Return size of buffer on + * success. + * @inode_setsecurity: + * Set the security label associated with @name for @inode from the + * extended attribute value @value. @size indicates the size of the + * @value in bytes. @flags may be XATTR_CREATE, XATTR_REPLACE, or 0. + * Note that @name is the remainder of the attribute name after the + * security. prefix has been removed. + * Return 0 on success. + * @inode_listsecurity: + * Copy the extended attribute names for the security labels + * associated with @inode into @buffer. The maximum size of @buffer + * is specified by @buffer_size. @buffer may be NULL to request + * the size of the buffer required. + * Returns number of bytes used/required on success. + * @inode_need_killpriv: + * Called when an inode has been changed. + * @dentry is the dentry being changed. + * Return <0 on error to abort the inode change operation. + * Return 0 if inode_killpriv does not need to be called. + * Return >0 if inode_killpriv does need to be called. + * @inode_killpriv: + * The setuid bit is being removed. Remove similar security labels. + * Called with the dentry->d_inode->i_mutex held. + * @dentry is the dentry being changed. + * Return 0 on success. If error is returned, then the operation + * causing setuid bit removal is failed. + * @inode_getsecid: + * Get the secid associated with the node. + * @inode contains a pointer to the inode. + * @secid contains a pointer to the location where result will be saved. + * In case of failure, @secid will be set to zero. + * @inode_copy_up: + * A file is about to be copied up from lower layer to upper layer of + * overlay filesystem. Security module can prepare a set of new creds + * and modify as need be and return new creds. Caller will switch to + * new creds temporarily to create new file and release newly allocated + * creds. + * @src indicates the union dentry of file that is being copied up. + * @new pointer to pointer to return newly allocated creds. + * Returns 0 on success or a negative error code on error. + * @inode_copy_up_xattr: + * Filter the xattrs being copied up when a unioned file is copied + * up from a lower layer to the union/overlay layer. + * @name indicates the name of the xattr. + * Returns 0 to accept the xattr, 1 to discard the xattr, -EOPNOTSUPP if + * security module does not know about attribute or a negative error code + * to abort the copy up. Note that the caller is responsible for reading + * and writing the xattrs as this hook is merely a filter. + * + * Security hooks for file operations + * + * @file_permission: + * Check file permissions before accessing an open file. This hook is + * called by various operations that read or write files. A security + * module can use this hook to perform additional checking on these + * operations, e.g. to revalidate permissions on use to support privilege + * bracketing or policy changes. Notice that this hook is used when the + * actual read/write operations are performed, whereas the + * inode_security_ops hook is called when a file is opened (as well as + * many other operations). + * Caveat: Although this hook can be used to revalidate permissions for + * various system call operations that read or write files, it does not + * address the revalidation of permissions for memory-mapped files. + * Security modules must handle this separately if they need such + * revalidation. + * @file contains the file structure being accessed. + * @mask contains the requested permissions. + * Return 0 if permission is granted. + * @file_alloc_security: + * Allocate and attach a security structure to the file->f_security field. + * The security field is initialized to NULL when the structure is first + * created. + * @file contains the file structure to secure. + * Return 0 if the hook is successful and permission is granted. + * @file_free_security: + * Deallocate and free any security structures stored in file->f_security. + * @file contains the file structure being modified. + * @file_ioctl: + * @file contains the file structure. + * @cmd contains the operation to perform. + * @arg contains the operational arguments. + * Check permission for an ioctl operation on @file. Note that @arg + * sometimes represents a user space pointer; in other cases, it may be a + * simple integer value. When @arg represents a user space pointer, it + * should never be used by the security module. + * Return 0 if permission is granted. + * @mmap_addr : + * Check permissions for a mmap operation at @addr. + * @addr contains virtual address that will be used for the operation. + * Return 0 if permission is granted. + * @mmap_file : + * Check permissions for a mmap operation. The @file may be NULL, e.g. + * if mapping anonymous memory. + * @file contains the file structure for file to map (may be NULL). + * @reqprot contains the protection requested by the application. + * @prot contains the protection that will be applied by the kernel. + * @flags contains the operational flags. + * Return 0 if permission is granted. + * @file_mprotect: + * Check permissions before changing memory access permissions. + * @vma contains the memory region to modify. + * @reqprot contains the protection requested by the application. + * @prot contains the protection that will be applied by the kernel. + * Return 0 if permission is granted. + * @file_lock: + * Check permission before performing file locking operations. + * Note: this hook mediates both flock and fcntl style locks. + * @file contains the file structure. + * @cmd contains the posix-translated lock operation to perform + * (e.g. F_RDLCK, F_WRLCK). + * Return 0 if permission is granted. + * @file_fcntl: + * Check permission before allowing the file operation specified by @cmd + * from being performed on the file @file. Note that @arg sometimes + * represents a user space pointer; in other cases, it may be a simple + * integer value. When @arg represents a user space pointer, it should + * never be used by the security module. + * @file contains the file structure. + * @cmd contains the operation to be performed. + * @arg contains the operational arguments. + * Return 0 if permission is granted. + * @file_set_fowner: + * Save owner security information (typically from current->security) in + * file->f_security for later use by the send_sigiotask hook. + * @file contains the file structure to update. + * Return 0 on success. + * @file_send_sigiotask: + * Check permission for the file owner @fown to send SIGIO or SIGURG to the + * process @tsk. Note that this hook is sometimes called from interrupt. + * Note that the fown_struct, @fown, is never outside the context of a + * struct file, so the file structure (and associated security information) + * can always be obtained: container_of(fown, struct file, f_owner) + * @tsk contains the structure of task receiving signal. + * @fown contains the file owner information. + * @sig is the signal that will be sent. When 0, kernel sends SIGIO. + * Return 0 if permission is granted. + * @file_receive: + * This hook allows security modules to control the ability of a process + * to receive an open file descriptor via socket IPC. + * @file contains the file structure being received. + * Return 0 if permission is granted. + * @file_open: + * Save open-time permission checking state for later use upon + * file_permission, and recheck access if anything has changed + * since inode_permission. + * + * Security hooks for task operations. + * + * @task_alloc: + * @task task being allocated. + * @clone_flags contains the flags indicating what should be shared. + * Handle allocation of task-related resources. + * Returns a zero on success, negative values on failure. + * @task_free: + * @task task about to be freed. + * Handle release of task-related resources. (Note that this can be called + * from interrupt context.) + * @cred_alloc_blank: + * @cred points to the credentials. + * @gfp indicates the atomicity of any memory allocations. + * Only allocate sufficient memory and attach to @cred such that + * cred_transfer() will not get ENOMEM. + * @cred_free: + * @cred points to the credentials. + * Deallocate and clear the cred->security field in a set of credentials. + * @cred_prepare: + * @new points to the new credentials. + * @old points to the original credentials. + * @gfp indicates the atomicity of any memory allocations. + * Prepare a new set of credentials by copying the data from the old set. + * @cred_transfer: + * @new points to the new credentials. + * @old points to the original credentials. + * Transfer data from original creds to new creds + * @cred_getsecid: + * Retrieve the security identifier of the cred structure @c + * @c contains the credentials, secid will be placed into @secid. + * In case of failure, @secid will be set to zero. + * @kernel_act_as: + * Set the credentials for a kernel service to act as (subjective context). + * @new points to the credentials to be modified. + * @secid specifies the security ID to be set + * The current task must be the one that nominated @secid. + * Return 0 if successful. + * @kernel_create_files_as: + * Set the file creation context in a set of credentials to be the same as + * the objective context of the specified inode. + * @new points to the credentials to be modified. + * @inode points to the inode to use as a reference. + * The current task must be the one that nominated @inode. + * Return 0 if successful. + * @kernel_module_request: + * Ability to trigger the kernel to automatically upcall to userspace for + * userspace to load a kernel module with the given name. + * @kmod_name name of the module requested by the kernel + * Return 0 if successful. + * @kernel_load_data: + * Load data provided by userspace. + * @id kernel load data identifier + * Return 0 if permission is granted. + * @kernel_read_file: + * Read a file specified by userspace. + * @file contains the file structure pointing to the file being read + * by the kernel. + * @id kernel read file identifier + * Return 0 if permission is granted. + * @kernel_post_read_file: + * Read a file specified by userspace. + * @file contains the file structure pointing to the file being read + * by the kernel. + * @buf pointer to buffer containing the file contents. + * @size length of the file contents. + * @id kernel read file identifier + * Return 0 if permission is granted. + * @task_fix_setuid: + * Update the module's state after setting one or more of the user + * identity attributes of the current process. The @flags parameter + * indicates which of the set*uid system calls invoked this hook. If + * @new is the set of credentials that will be installed. Modifications + * should be made to this rather than to @current->cred. + * @old is the set of credentials that are being replaces + * @flags contains one of the LSM_SETID_* values. + * Return 0 on success. + * @task_setpgid: + * Check permission before setting the process group identifier of the + * process @p to @pgid. + * @p contains the task_struct for process being modified. + * @pgid contains the new pgid. + * Return 0 if permission is granted. + * @task_getpgid: + * Check permission before getting the process group identifier of the + * process @p. + * @p contains the task_struct for the process. + * Return 0 if permission is granted. + * @task_getsid: + * Check permission before getting the session identifier of the process + * @p. + * @p contains the task_struct for the process. + * Return 0 if permission is granted. + * @task_getsecid: + * Retrieve the security identifier of the process @p. + * @p contains the task_struct for the process and place is into @secid. + * In case of failure, @secid will be set to zero. + * + * @task_setnice: + * Check permission before setting the nice value of @p to @nice. + * @p contains the task_struct of process. + * @nice contains the new nice value. + * Return 0 if permission is granted. + * @task_setioprio + * Check permission before setting the ioprio value of @p to @ioprio. + * @p contains the task_struct of process. + * @ioprio contains the new ioprio value + * Return 0 if permission is granted. + * @task_getioprio + * Check permission before getting the ioprio value of @p. + * @p contains the task_struct of process. + * Return 0 if permission is granted. + * @task_prlimit: + * Check permission before getting and/or setting the resource limits of + * another task. + * @cred points to the cred structure for the current task. + * @tcred points to the cred structure for the target task. + * @flags contains the LSM_PRLIMIT_* flag bits indicating whether the + * resource limits are being read, modified, or both. + * Return 0 if permission is granted. + * @task_setrlimit: + * Check permission before setting the resource limits of process @p + * for @resource to @new_rlim. The old resource limit values can + * be examined by dereferencing (p->signal->rlim + resource). + * @p points to the task_struct for the target task's group leader. + * @resource contains the resource whose limit is being set. + * @new_rlim contains the new limits for @resource. + * Return 0 if permission is granted. + * @task_setscheduler: + * Check permission before setting scheduling policy and/or parameters of + * process @p based on @policy and @lp. + * @p contains the task_struct for process. + * @policy contains the scheduling policy. + * @lp contains the scheduling parameters. + * Return 0 if permission is granted. + * @task_getscheduler: + * Check permission before obtaining scheduling information for process + * @p. + * @p contains the task_struct for process. + * Return 0 if permission is granted. + * @task_movememory + * Check permission before moving memory owned by process @p. + * @p contains the task_struct for process. + * Return 0 if permission is granted. + * @task_kill: + * Check permission before sending signal @sig to @p. @info can be NULL, + * the constant 1, or a pointer to a siginfo structure. If @info is 1 or + * SI_FROMKERNEL(info) is true, then the signal should be viewed as coming + * from the kernel and should typically be permitted. + * SIGIO signals are handled separately by the send_sigiotask hook in + * file_security_ops. + * @p contains the task_struct for process. + * @info contains the signal information. + * @sig contains the signal value. + * @cred contains the cred of the process where the signal originated, or + * NULL if the current task is the originator. + * Return 0 if permission is granted. + * @task_prctl: + * Check permission before performing a process control operation on the + * current process. + * @option contains the operation. + * @arg2 contains a argument. + * @arg3 contains a argument. + * @arg4 contains a argument. + * @arg5 contains a argument. + * Return -ENOSYS if no-one wanted to handle this op, any other value to + * cause prctl() to return immediately with that value. + * @task_to_inode: + * Set the security attributes for an inode based on an associated task's + * security attributes, e.g. for /proc/pid inodes. + * @p contains the task_struct for the task. + * @inode contains the inode structure for the inode. + * + * Security hooks for Netlink messaging. + * + * @netlink_send: + * Save security information for a netlink message so that permission + * checking can be performed when the message is processed. The security + * information can be saved using the eff_cap field of the + * netlink_skb_parms structure. Also may be used to provide fine + * grained control over message transmission. + * @sk associated sock of task sending the message. + * @skb contains the sk_buff structure for the netlink message. + * Return 0 if the information was successfully saved and message + * is allowed to be transmitted. + * + * Security hooks for Unix domain networking. + * + * @unix_stream_connect: + * Check permissions before establishing a Unix domain stream connection + * between @sock and @other. + * @sock contains the sock structure. + * @other contains the peer sock structure. + * @newsk contains the new sock structure. + * Return 0 if permission is granted. + * @unix_may_send: + * Check permissions before connecting or sending datagrams from @sock to + * @other. + * @sock contains the socket structure. + * @other contains the peer socket structure. + * Return 0 if permission is granted. + * + * The @unix_stream_connect and @unix_may_send hooks were necessary because + * Linux provides an alternative to the conventional file name space for Unix + * domain sockets. Whereas binding and connecting to sockets in the file name + * space is mediated by the typical file permissions (and caught by the mknod + * and permission hooks in inode_security_ops), binding and connecting to + * sockets in the abstract name space is completely unmediated. Sufficient + * control of Unix domain sockets in the abstract name space isn't possible + * using only the socket layer hooks, since we need to know the actual target + * socket, which is not looked up until we are inside the af_unix code. + * + * Security hooks for socket operations. + * + * @socket_create: + * Check permissions prior to creating a new socket. + * @family contains the requested protocol family. + * @type contains the requested communications type. + * @protocol contains the requested protocol. + * @kern set to 1 if a kernel socket. + * Return 0 if permission is granted. + * @socket_post_create: + * This hook allows a module to update or allocate a per-socket security + * structure. Note that the security field was not added directly to the + * socket structure, but rather, the socket security information is stored + * in the associated inode. Typically, the inode alloc_security hook will + * allocate and and attach security information to + * sock->inode->i_security. This hook may be used to update the + * sock->inode->i_security field with additional information that wasn't + * available when the inode was allocated. + * @sock contains the newly created socket structure. + * @family contains the requested protocol family. + * @type contains the requested communications type. + * @protocol contains the requested protocol. + * @kern set to 1 if a kernel socket. + * @socket_socketpair: + * Check permissions before creating a fresh pair of sockets. + * @socka contains the first socket structure. + * @sockb contains the second socket structure. + * Return 0 if permission is granted and the connection was established. + * @socket_bind: + * Check permission before socket protocol layer bind operation is + * performed and the socket @sock is bound to the address specified in the + * @address parameter. + * @sock contains the socket structure. + * @address contains the address to bind to. + * @addrlen contains the length of address. + * Return 0 if permission is granted. + * @socket_connect: + * Check permission before socket protocol layer connect operation + * attempts to connect socket @sock to a remote address, @address. + * @sock contains the socket structure. + * @address contains the address of remote endpoint. + * @addrlen contains the length of address. + * Return 0 if permission is granted. + * @socket_listen: + * Check permission before socket protocol layer listen operation. + * @sock contains the socket structure. + * @backlog contains the maximum length for the pending connection queue. + * Return 0 if permission is granted. + * @socket_accept: + * Check permission before accepting a new connection. Note that the new + * socket, @newsock, has been created and some information copied to it, + * but the accept operation has not actually been performed. + * @sock contains the listening socket structure. + * @newsock contains the newly created server socket for connection. + * Return 0 if permission is granted. + * @socket_sendmsg: + * Check permission before transmitting a message to another socket. + * @sock contains the socket structure. + * @msg contains the message to be transmitted. + * @size contains the size of message. + * Return 0 if permission is granted. + * @socket_recvmsg: + * Check permission before receiving a message from a socket. + * @sock contains the socket structure. + * @msg contains the message structure. + * @size contains the size of message structure. + * @flags contains the operational flags. + * Return 0 if permission is granted. + * @socket_getsockname: + * Check permission before the local address (name) of the socket object + * @sock is retrieved. + * @sock contains the socket structure. + * Return 0 if permission is granted. + * @socket_getpeername: + * Check permission before the remote address (name) of a socket object + * @sock is retrieved. + * @sock contains the socket structure. + * Return 0 if permission is granted. + * @socket_getsockopt: + * Check permissions before retrieving the options associated with socket + * @sock. + * @sock contains the socket structure. + * @level contains the protocol level to retrieve option from. + * @optname contains the name of option to retrieve. + * Return 0 if permission is granted. + * @socket_setsockopt: + * Check permissions before setting the options associated with socket + * @sock. + * @sock contains the socket structure. + * @level contains the protocol level to set options for. + * @optname contains the name of the option to set. + * Return 0 if permission is granted. + * @socket_shutdown: + * Checks permission before all or part of a connection on the socket + * @sock is shut down. + * @sock contains the socket structure. + * @how contains the flag indicating how future sends and receives + * are handled. + * Return 0 if permission is granted. + * @socket_sock_rcv_skb: + * Check permissions on incoming network packets. This hook is distinct + * from Netfilter's IP input hooks since it is the first time that the + * incoming sk_buff @skb has been associated with a particular socket, @sk. + * Must not sleep inside this hook because some callers hold spinlocks. + * @sk contains the sock (not socket) associated with the incoming sk_buff. + * @skb contains the incoming network data. + * @socket_getpeersec_stream: + * This hook allows the security module to provide peer socket security + * state for unix or connected tcp sockets to userspace via getsockopt + * SO_GETPEERSEC. For tcp sockets this can be meaningful if the + * socket is associated with an ipsec SA. + * @sock is the local socket. + * @optval userspace memory where the security state is to be copied. + * @optlen userspace int where the module should copy the actual length + * of the security state. + * @len as input is the maximum length to copy to userspace provided + * by the caller. + * Return 0 if all is well, otherwise, typical getsockopt return + * values. + * @socket_getpeersec_dgram: + * This hook allows the security module to provide peer socket security + * state for udp sockets on a per-packet basis to userspace via + * getsockopt SO_GETPEERSEC. The application must first have indicated + * the IP_PASSSEC option via getsockopt. It can then retrieve the + * security state returned by this hook for a packet via the SCM_SECURITY + * ancillary message type. + * @skb is the skbuff for the packet being queried + * @secdata is a pointer to a buffer in which to copy the security data + * @seclen is the maximum length for @secdata + * Return 0 on success, error on failure. + * @sk_alloc_security: + * Allocate and attach a security structure to the sk->sk_security field, + * which is used to copy security attributes between local stream sockets. + * @sk_free_security: + * Deallocate security structure. + * @sk_clone_security: + * Clone/copy security structure. + * @sk_getsecid: + * Retrieve the LSM-specific secid for the sock to enable caching + * of network authorizations. + * @sock_graft: + * Sets the socket's isec sid to the sock's sid. + * @inet_conn_request: + * Sets the openreq's sid to socket's sid with MLS portion taken + * from peer sid. + * @inet_csk_clone: + * Sets the new child socket's sid to the openreq sid. + * @inet_conn_established: + * Sets the connection's peersid to the secmark on skb. + * @secmark_relabel_packet: + * check if the process should be allowed to relabel packets to + * the given secid + * @security_secmark_refcount_inc + * tells the LSM to increment the number of secmark labeling rules loaded + * @security_secmark_refcount_dec + * tells the LSM to decrement the number of secmark labeling rules loaded + * @req_classify_flow: + * Sets the flow's sid to the openreq sid. + * @tun_dev_alloc_security: + * This hook allows a module to allocate a security structure for a TUN + * device. + * @security pointer to a security structure pointer. + * Returns a zero on success, negative values on failure. + * @tun_dev_free_security: + * This hook allows a module to free the security structure for a TUN + * device. + * @security pointer to the TUN device's security structure + * @tun_dev_create: + * Check permissions prior to creating a new TUN device. + * @tun_dev_attach_queue: + * Check permissions prior to attaching to a TUN device queue. + * @security pointer to the TUN device's security structure. + * @tun_dev_attach: + * This hook can be used by the module to update any security state + * associated with the TUN device's sock structure. + * @sk contains the existing sock structure. + * @security pointer to the TUN device's security structure. + * @tun_dev_open: + * This hook can be used by the module to update any security state + * associated with the TUN device's security structure. + * @security pointer to the TUN devices's security structure. + * + * Security hooks for SCTP + * + * @sctp_assoc_request: + * Passes the @ep and @chunk->skb of the association INIT packet to + * the security module. + * @ep pointer to sctp endpoint structure. + * @skb pointer to skbuff of association packet. + * Return 0 on success, error on failure. + * @sctp_bind_connect: + * Validiate permissions required for each address associated with sock + * @sk. Depending on @optname, the addresses will be treated as either + * for a connect or bind service. The @addrlen is calculated on each + * ipv4 and ipv6 address using sizeof(struct sockaddr_in) or + * sizeof(struct sockaddr_in6). + * @sk pointer to sock structure. + * @optname name of the option to validate. + * @address list containing one or more ipv4/ipv6 addresses. + * @addrlen total length of address(s). + * Return 0 on success, error on failure. + * @sctp_sk_clone: + * Called whenever a new socket is created by accept(2) (i.e. a TCP + * style socket) or when a socket is 'peeled off' e.g userspace + * calls sctp_peeloff(3). + * @ep pointer to current sctp endpoint structure. + * @sk pointer to current sock structure. + * @sk pointer to new sock structure. + * + * Security hooks for Infiniband + * + * @ib_pkey_access: + * Check permission to access a pkey when modifing a QP. + * @subnet_prefix the subnet prefix of the port being used. + * @pkey the pkey to be accessed. + * @sec pointer to a security structure. + * @ib_endport_manage_subnet: + * Check permissions to send and receive SMPs on a end port. + * @dev_name the IB device name (i.e. mlx4_0). + * @port_num the port number. + * @sec pointer to a security structure. + * @ib_alloc_security: + * Allocate a security structure for Infiniband objects. + * @sec pointer to a security structure pointer. + * Returns 0 on success, non-zero on failure + * @ib_free_security: + * Deallocate an Infiniband security structure. + * @sec contains the security structure to be freed. + * + * Security hooks for XFRM operations. + * + * @xfrm_policy_alloc_security: + * @ctxp is a pointer to the xfrm_sec_ctx being added to Security Policy + * Database used by the XFRM system. + * @sec_ctx contains the security context information being provided by + * the user-level policy update program (e.g., setkey). + * Allocate a security structure to the xp->security field; the security + * field is initialized to NULL when the xfrm_policy is allocated. + * Return 0 if operation was successful (memory to allocate, legal context) + * @gfp is to specify the context for the allocation + * @xfrm_policy_clone_security: + * @old_ctx contains an existing xfrm_sec_ctx. + * @new_ctxp contains a new xfrm_sec_ctx being cloned from old. + * Allocate a security structure in new_ctxp that contains the + * information from the old_ctx structure. + * Return 0 if operation was successful (memory to allocate). + * @xfrm_policy_free_security: + * @ctx contains the xfrm_sec_ctx + * Deallocate xp->security. + * @xfrm_policy_delete_security: + * @ctx contains the xfrm_sec_ctx. + * Authorize deletion of xp->security. + * @xfrm_state_alloc: + * @x contains the xfrm_state being added to the Security Association + * Database by the XFRM system. + * @sec_ctx contains the security context information being provided by + * the user-level SA generation program (e.g., setkey or racoon). + * Allocate a security structure to the x->security field; the security + * field is initialized to NULL when the xfrm_state is allocated. Set the + * context to correspond to sec_ctx. Return 0 if operation was successful + * (memory to allocate, legal context). + * @xfrm_state_alloc_acquire: + * @x contains the xfrm_state being added to the Security Association + * Database by the XFRM system. + * @polsec contains the policy's security context. + * @secid contains the secid from which to take the mls portion of the + * context. + * Allocate a security structure to the x->security field; the security + * field is initialized to NULL when the xfrm_state is allocated. Set the + * context to correspond to secid. Return 0 if operation was successful + * (memory to allocate, legal context). + * @xfrm_state_free_security: + * @x contains the xfrm_state. + * Deallocate x->security. + * @xfrm_state_delete_security: + * @x contains the xfrm_state. + * Authorize deletion of x->security. + * @xfrm_policy_lookup: + * @ctx contains the xfrm_sec_ctx for which the access control is being + * checked. + * @fl_secid contains the flow security label that is used to authorize + * access to the policy xp. + * @dir contains the direction of the flow (input or output). + * Check permission when a flow selects a xfrm_policy for processing + * XFRMs on a packet. The hook is called when selecting either a + * per-socket policy or a generic xfrm policy. + * Return 0 if permission is granted, -ESRCH otherwise, or -errno + * on other errors. + * @xfrm_state_pol_flow_match: + * @x contains the state to match. + * @xp contains the policy to check for a match. + * @fl contains the flow to check for a match. + * Return 1 if there is a match. + * @xfrm_decode_session: + * @skb points to skb to decode. + * @secid points to the flow key secid to set. + * @ckall says if all xfrms used should be checked for same secid. + * Return 0 if ckall is zero or all xfrms used have the same secid. + * + * Security hooks affecting all Key Management operations + * + * @key_alloc: + * Permit allocation of a key and assign security data. Note that key does + * not have a serial number assigned at this point. + * @key points to the key. + * @flags is the allocation flags + * Return 0 if permission is granted, -ve error otherwise. + * @key_free: + * Notification of destruction; free security data. + * @key points to the key. + * No return value. + * @key_permission: + * See whether a specific operational right is granted to a process on a + * key. + * @key_ref refers to the key (key pointer + possession attribute bit). + * @cred points to the credentials to provide the context against which to + * evaluate the security data on the key. + * @perm describes the combination of permissions required of this key. + * Return 0 if permission is granted, -ve error otherwise. + * @key_getsecurity: + * Get a textual representation of the security context attached to a key + * for the purposes of honouring KEYCTL_GETSECURITY. This function + * allocates the storage for the NUL-terminated string and the caller + * should free it. + * @key points to the key to be queried. + * @_buffer points to a pointer that should be set to point to the + * resulting string (if no label or an error occurs). + * Return the length of the string (including terminating NUL) or -ve if + * an error. + * May also return 0 (and a NULL buffer pointer) if there is no label. + * + * Security hooks affecting all System V IPC operations. + * + * @ipc_permission: + * Check permissions for access to IPC + * @ipcp contains the kernel IPC permission structure + * @flag contains the desired (requested) permission set + * Return 0 if permission is granted. + * @ipc_getsecid: + * Get the secid associated with the ipc object. + * @ipcp contains the kernel IPC permission structure. + * @secid contains a pointer to the location where result will be saved. + * In case of failure, @secid will be set to zero. + * + * Security hooks for individual messages held in System V IPC message queues + * @msg_msg_alloc_security: + * Allocate and attach a security structure to the msg->security field. + * The security field is initialized to NULL when the structure is first + * created. + * @msg contains the message structure to be modified. + * Return 0 if operation was successful and permission is granted. + * @msg_msg_free_security: + * Deallocate the security structure for this message. + * @msg contains the message structure to be modified. + * + * Security hooks for System V IPC Message Queues + * + * @msg_queue_alloc_security: + * Allocate and attach a security structure to the + * msq->q_perm.security field. The security field is initialized to + * NULL when the structure is first created. + * @msq contains the message queue structure to be modified. + * Return 0 if operation was successful and permission is granted. + * @msg_queue_free_security: + * Deallocate security structure for this message queue. + * @msq contains the message queue structure to be modified. + * @msg_queue_associate: + * Check permission when a message queue is requested through the + * msgget system call. This hook is only called when returning the + * message queue identifier for an existing message queue, not when a + * new message queue is created. + * @msq contains the message queue to act upon. + * @msqflg contains the operation control flags. + * Return 0 if permission is granted. + * @msg_queue_msgctl: + * Check permission when a message control operation specified by @cmd + * is to be performed on the message queue @msq. + * The @msq may be NULL, e.g. for IPC_INFO or MSG_INFO. + * @msq contains the message queue to act upon. May be NULL. + * @cmd contains the operation to be performed. + * Return 0 if permission is granted. + * @msg_queue_msgsnd: + * Check permission before a message, @msg, is enqueued on the message + * queue, @msq. + * @msq contains the message queue to send message to. + * @msg contains the message to be enqueued. + * @msqflg contains operational flags. + * Return 0 if permission is granted. + * @msg_queue_msgrcv: + * Check permission before a message, @msg, is removed from the message + * queue, @msq. The @target task structure contains a pointer to the + * process that will be receiving the message (not equal to the current + * process when inline receives are being performed). + * @msq contains the message queue to retrieve message from. + * @msg contains the message destination. + * @target contains the task structure for recipient process. + * @type contains the type of message requested. + * @mode contains the operational flags. + * Return 0 if permission is granted. + * + * Security hooks for System V Shared Memory Segments + * + * @shm_alloc_security: + * Allocate and attach a security structure to the shp->shm_perm.security + * field. The security field is initialized to NULL when the structure is + * first created. + * @shp contains the shared memory structure to be modified. + * Return 0 if operation was successful and permission is granted. + * @shm_free_security: + * Deallocate the security struct for this memory segment. + * @shp contains the shared memory structure to be modified. + * @shm_associate: + * Check permission when a shared memory region is requested through the + * shmget system call. This hook is only called when returning the shared + * memory region identifier for an existing region, not when a new shared + * memory region is created. + * @shp contains the shared memory structure to be modified. + * @shmflg contains the operation control flags. + * Return 0 if permission is granted. + * @shm_shmctl: + * Check permission when a shared memory control operation specified by + * @cmd is to be performed on the shared memory region @shp. + * The @shp may be NULL, e.g. for IPC_INFO or SHM_INFO. + * @shp contains shared memory structure to be modified. + * @cmd contains the operation to be performed. + * Return 0 if permission is granted. + * @shm_shmat: + * Check permissions prior to allowing the shmat system call to attach the + * shared memory segment @shp to the data segment of the calling process. + * The attaching address is specified by @shmaddr. + * @shp contains the shared memory structure to be modified. + * @shmaddr contains the address to attach memory region to. + * @shmflg contains the operational flags. + * Return 0 if permission is granted. + * + * Security hooks for System V Semaphores + * + * @sem_alloc_security: + * Allocate and attach a security structure to the sma->sem_perm.security + * field. The security field is initialized to NULL when the structure is + * first created. + * @sma contains the semaphore structure + * Return 0 if operation was successful and permission is granted. + * @sem_free_security: + * deallocate security struct for this semaphore + * @sma contains the semaphore structure. + * @sem_associate: + * Check permission when a semaphore is requested through the semget + * system call. This hook is only called when returning the semaphore + * identifier for an existing semaphore, not when a new one must be + * created. + * @sma contains the semaphore structure. + * @semflg contains the operation control flags. + * Return 0 if permission is granted. + * @sem_semctl: + * Check permission when a semaphore operation specified by @cmd is to be + * performed on the semaphore @sma. The @sma may be NULL, e.g. for + * IPC_INFO or SEM_INFO. + * @sma contains the semaphore structure. May be NULL. + * @cmd contains the operation to be performed. + * Return 0 if permission is granted. + * @sem_semop: + * Check permissions before performing operations on members of the + * semaphore set @sma. If the @alter flag is nonzero, the semaphore set + * may be modified. + * @sma contains the semaphore structure. + * @sops contains the operations to perform. + * @nsops contains the number of operations to perform. + * @alter contains the flag indicating whether changes are to be made. + * Return 0 if permission is granted. + * + * @binder_set_context_mgr: + * Check whether @mgr is allowed to be the binder context manager. + * @mgr contains the struct cred for the current binder process. + * Return 0 if permission is granted. + * @binder_transaction: + * Check whether @from is allowed to invoke a binder transaction call + * to @to. + * @from contains the struct cred for the sending process. + * @to contains the struct cred for the receiving process. + * @binder_transfer_binder: + * Check whether @from is allowed to transfer a binder reference to @to. + * @from contains the struct cred for the sending process. + * @to contains the struct cred for the receiving process. + * @binder_transfer_file: + * Check whether @from is allowed to transfer @file to @to. + * @from contains the struct cred for the sending process. + * @file contains the struct file being transferred. + * @to contains the struct cred for the receiving process. + * + * @ptrace_access_check: + * Check permission before allowing the current process to trace the + * @child process. + * Security modules may also want to perform a process tracing check + * during an execve in the set_security or apply_creds hooks of + * tracing check during an execve in the bprm_set_creds hook of + * binprm_security_ops if the process is being traced and its security + * attributes would be changed by the execve. + * @child contains the task_struct structure for the target process. + * @mode contains the PTRACE_MODE flags indicating the form of access. + * Return 0 if permission is granted. + * @ptrace_traceme: + * Check that the @parent process has sufficient permission to trace the + * current process before allowing the current process to present itself + * to the @parent process for tracing. + * @parent contains the task_struct structure for debugger process. + * Return 0 if permission is granted. + * @capget: + * Get the @effective, @inheritable, and @permitted capability sets for + * the @target process. The hook may also perform permission checking to + * determine if the current process is allowed to see the capability sets + * of the @target process. + * @target contains the task_struct structure for target process. + * @effective contains the effective capability set. + * @inheritable contains the inheritable capability set. + * @permitted contains the permitted capability set. + * Return 0 if the capability sets were successfully obtained. + * @capset: + * Set the @effective, @inheritable, and @permitted capability sets for + * the current process. + * @new contains the new credentials structure for target process. + * @old contains the current credentials structure for target process. + * @effective contains the effective capability set. + * @inheritable contains the inheritable capability set. + * @permitted contains the permitted capability set. + * Return 0 and update @new if permission is granted. + * @capable: + * Check whether the @tsk process has the @cap capability in the indicated + * credentials. + * @cred contains the credentials to use. + * @ns contains the user namespace we want the capability in + * @cap contains the capability . + * @opts contains options for the capable check + * Return 0 if the capability is granted for @tsk. + * @syslog: + * Check permission before accessing the kernel message ring or changing + * logging to the console. + * See the syslog(2) manual page for an explanation of the @type values. + * @type contains the type of action. + * @from_file indicates the context of action (if it came from /proc). + * Return 0 if permission is granted. + * @settime: + * Check permission to change the system time. + * struct timespec64 is defined in include/linux/time64.h and timezone + * is defined in include/linux/time.h + * @ts contains new time + * @tz contains new timezone + * Return 0 if permission is granted. + * @vm_enough_memory: + * Check permissions for allocating a new virtual mapping. + * @mm contains the mm struct it is being added to. + * @pages contains the number of pages. + * Return 0 if permission is granted. + * + * @ismaclabel: + * Check if the extended attribute specified by @name + * represents a MAC label. Returns 1 if name is a MAC + * attribute otherwise returns 0. + * @name full extended attribute name to check against + * LSM as a MAC label. + * + * @secid_to_secctx: + * Convert secid to security context. If secdata is NULL the length of + * the result will be returned in seclen, but no secdata will be returned. + * This does mean that the length could change between calls to check the + * length and the next call which actually allocates and returns the + * secdata. + * @secid contains the security ID. + * @secdata contains the pointer that stores the converted security + * context. + * @seclen pointer which contains the length of the data + * @secctx_to_secid: + * Convert security context to secid. + * @secid contains the pointer to the generated security ID. + * @secdata contains the security context. + * + * @release_secctx: + * Release the security context. + * @secdata contains the security context. + * @seclen contains the length of the security context. + * + * Security hooks for Audit + * + * @audit_rule_init: + * Allocate and initialize an LSM audit rule structure. + * @field contains the required Audit action. + * Fields flags are defined in include/linux/audit.h + * @op contains the operator the rule uses. + * @rulestr contains the context where the rule will be applied to. + * @lsmrule contains a pointer to receive the result. + * Return 0 if @lsmrule has been successfully set, + * -EINVAL in case of an invalid rule. + * + * @audit_rule_known: + * Specifies whether given @rule contains any fields related to + * current LSM. + * @rule contains the audit rule of interest. + * Return 1 in case of relation found, 0 otherwise. + * + * @audit_rule_match: + * Determine if given @secid matches a rule previously approved + * by @audit_rule_known. + * @secid contains the security id in question. + * @field contains the field which relates to current LSM. + * @op contains the operator that will be used for matching. + * @rule points to the audit rule that will be checked against. + * @actx points to the audit context associated with the check. + * Return 1 if secid matches the rule, 0 if it does not, -ERRNO on failure. + * + * @audit_rule_free: + * Deallocate the LSM audit rule structure previously allocated by + * audit_rule_init. + * @rule contains the allocated rule + * + * @inode_invalidate_secctx: + * Notify the security module that it must revalidate the security context + * of an inode. + * + * @inode_notifysecctx: + * Notify the security module of what the security context of an inode + * should be. Initializes the incore security context managed by the + * security module for this inode. Example usage: NFS client invokes + * this hook to initialize the security context in its incore inode to the + * value provided by the server for the file when the server returned the + * file's attributes to the client. + * + * Must be called with inode->i_mutex locked. + * + * @inode we wish to set the security context of. + * @ctx contains the string which we wish to set in the inode. + * @ctxlen contains the length of @ctx. + * + * @inode_setsecctx: + * Change the security context of an inode. Updates the + * incore security context managed by the security module and invokes the + * fs code as needed (via __vfs_setxattr_noperm) to update any backing + * xattrs that represent the context. Example usage: NFS server invokes + * this hook to change the security context in its incore inode and on the + * backing filesystem to a value provided by the client on a SETATTR + * operation. + * + * Must be called with inode->i_mutex locked. + * + * @dentry contains the inode we wish to set the security context of. + * @ctx contains the string which we wish to set in the inode. + * @ctxlen contains the length of @ctx. + * + * @inode_getsecctx: + * On success, returns 0 and fills out @ctx and @ctxlen with the security + * context for the given @inode. + * + * @inode we wish to get the security context of. + * @ctx is a pointer in which to place the allocated security context. + * @ctxlen points to the place to put the length of @ctx. + * + * Security hooks for using the eBPF maps and programs functionalities through + * eBPF syscalls. + * + * @bpf: + * Do a initial check for all bpf syscalls after the attribute is copied + * into the kernel. The actual security module can implement their own + * rules to check the specific cmd they need. + * + * @bpf_map: + * Do a check when the kernel generate and return a file descriptor for + * eBPF maps. + * + * @map: bpf map that we want to access + * @mask: the access flags + * + * @bpf_prog: + * Do a check when the kernel generate and return a file descriptor for + * eBPF programs. + * + * @prog: bpf prog that userspace want to use. + * + * @bpf_map_alloc_security: + * Initialize the security field inside bpf map. + * + * @bpf_map_free_security: + * Clean up the security information stored inside bpf map. + * + * @bpf_prog_alloc_security: + * Initialize the security field inside bpf program. + * + * @bpf_prog_free_security: + * Clean up the security information stored inside bpf prog. + * + */ +union security_list_options { + int (*binder_set_context_mgr)(const struct cred *mgr); + int (*binder_transaction)(const struct cred *from, + const struct cred *to); + int (*binder_transfer_binder)(const struct cred *from, + const struct cred *to); + int (*binder_transfer_file)(const struct cred *from, + const struct cred *to, + struct file *file); + + int (*ptrace_access_check)(struct task_struct *child, + unsigned int mode); + int (*ptrace_traceme)(struct task_struct *parent); + int (*capget)(struct task_struct *target, kernel_cap_t *effective, + kernel_cap_t *inheritable, kernel_cap_t *permitted); + int (*capset)(struct cred *new, const struct cred *old, + const kernel_cap_t *effective, + const kernel_cap_t *inheritable, + const kernel_cap_t *permitted); + int (*capable)(const struct cred *cred, + struct user_namespace *ns, + int cap, + unsigned int opts); + int (*quotactl)(int cmds, int type, int id, struct super_block *sb); + int (*quota_on)(struct dentry *dentry); + int (*syslog)(int type); + int (*settime)(const struct timespec64 *ts, const struct timezone *tz); + int (*vm_enough_memory)(struct mm_struct *mm, long pages); + + int (*bprm_set_creds)(struct linux_binprm *bprm); + int (*bprm_check_security)(struct linux_binprm *bprm); + void (*bprm_committing_creds)(struct linux_binprm *bprm); + void (*bprm_committed_creds)(struct linux_binprm *bprm); + + int (*sb_alloc_security)(struct super_block *sb); + void (*sb_free_security)(struct super_block *sb); + int (*sb_copy_data)(char *orig, char *copy); + int (*sb_remount)(struct super_block *sb, void *data); + int (*sb_kern_mount)(struct super_block *sb, int flags, void *data); + int (*sb_show_options)(struct seq_file *m, struct super_block *sb); + int (*sb_statfs)(struct dentry *dentry); + int (*sb_mount)(const char *dev_name, const struct path *path, + const char *type, unsigned long flags, void *data); + int (*sb_umount)(struct vfsmount *mnt, int flags); + int (*sb_pivotroot)(const struct path *old_path, const struct path *new_path); + int (*sb_set_mnt_opts)(struct super_block *sb, + struct security_mnt_opts *opts, + unsigned long kern_flags, + unsigned long *set_kern_flags); + int (*sb_clone_mnt_opts)(const struct super_block *oldsb, + struct super_block *newsb, + unsigned long kern_flags, + unsigned long *set_kern_flags); + int (*sb_parse_opts_str)(char *options, struct security_mnt_opts *opts); + int (*dentry_init_security)(struct dentry *dentry, int mode, + const struct qstr *name, void **ctx, + u32 *ctxlen); + int (*dentry_create_files_as)(struct dentry *dentry, int mode, + struct qstr *name, + const struct cred *old, + struct cred *new); + + +#ifdef CONFIG_SECURITY_PATH + int (*path_unlink)(const struct path *dir, struct dentry *dentry); + int (*path_mkdir)(const struct path *dir, struct dentry *dentry, + umode_t mode); + int (*path_rmdir)(const struct path *dir, struct dentry *dentry); + int (*path_mknod)(const struct path *dir, struct dentry *dentry, + umode_t mode, unsigned int dev); + int (*path_truncate)(const struct path *path); + int (*path_symlink)(const struct path *dir, struct dentry *dentry, + const char *old_name); + int (*path_link)(struct dentry *old_dentry, const struct path *new_dir, + struct dentry *new_dentry); + int (*path_rename)(const struct path *old_dir, struct dentry *old_dentry, + const struct path *new_dir, + struct dentry *new_dentry); + int (*path_chmod)(const struct path *path, umode_t mode); + int (*path_chown)(const struct path *path, kuid_t uid, kgid_t gid); + int (*path_chroot)(const struct path *path); +#endif + + int (*inode_alloc_security)(struct inode *inode); + void (*inode_free_security)(struct inode *inode); + int (*inode_init_security)(struct inode *inode, struct inode *dir, + const struct qstr *qstr, + const char **name, void **value, + size_t *len); + int (*inode_create)(struct inode *dir, struct dentry *dentry, + umode_t mode); + int (*inode_link)(struct dentry *old_dentry, struct inode *dir, + struct dentry *new_dentry); + int (*inode_unlink)(struct inode *dir, struct dentry *dentry); + int (*inode_symlink)(struct inode *dir, struct dentry *dentry, + const char *old_name); + int (*inode_mkdir)(struct inode *dir, struct dentry *dentry, + umode_t mode); + int (*inode_rmdir)(struct inode *dir, struct dentry *dentry); + int (*inode_mknod)(struct inode *dir, struct dentry *dentry, + umode_t mode, dev_t dev); + int (*inode_rename)(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, + struct dentry *new_dentry); + int (*inode_readlink)(struct dentry *dentry); + int (*inode_follow_link)(struct dentry *dentry, struct inode *inode, + bool rcu); + int (*inode_permission)(struct inode *inode, int mask); + int (*inode_setattr)(struct dentry *dentry, struct iattr *attr); + int (*inode_getattr)(const struct path *path); + int (*inode_setxattr)(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags); + void (*inode_post_setxattr)(struct dentry *dentry, const char *name, + const void *value, size_t size, + int flags); + int (*inode_getxattr)(struct dentry *dentry, const char *name); + int (*inode_listxattr)(struct dentry *dentry); + int (*inode_removexattr)(struct dentry *dentry, const char *name); + int (*inode_need_killpriv)(struct dentry *dentry); + int (*inode_killpriv)(struct dentry *dentry); + int (*inode_getsecurity)(struct inode *inode, const char *name, + void **buffer, bool alloc); + int (*inode_setsecurity)(struct inode *inode, const char *name, + const void *value, size_t size, + int flags); + int (*inode_listsecurity)(struct inode *inode, char *buffer, + size_t buffer_size); + void (*inode_getsecid)(struct inode *inode, u32 *secid); + int (*inode_copy_up)(struct dentry *src, struct cred **new); + int (*inode_copy_up_xattr)(const char *name); + + int (*file_permission)(struct file *file, int mask); + int (*file_alloc_security)(struct file *file); + void (*file_free_security)(struct file *file); + int (*file_ioctl)(struct file *file, unsigned int cmd, + unsigned long arg); + int (*mmap_addr)(unsigned long addr); + int (*mmap_file)(struct file *file, unsigned long reqprot, + unsigned long prot, unsigned long flags); + int (*file_mprotect)(struct vm_area_struct *vma, unsigned long reqprot, + unsigned long prot); + int (*file_lock)(struct file *file, unsigned int cmd); + int (*file_fcntl)(struct file *file, unsigned int cmd, + unsigned long arg); + void (*file_set_fowner)(struct file *file); + int (*file_send_sigiotask)(struct task_struct *tsk, + struct fown_struct *fown, int sig); + int (*file_receive)(struct file *file); + int (*file_open)(struct file *file); + + int (*task_alloc)(struct task_struct *task, unsigned long clone_flags); + void (*task_free)(struct task_struct *task); + int (*cred_alloc_blank)(struct cred *cred, gfp_t gfp); + void (*cred_free)(struct cred *cred); + int (*cred_prepare)(struct cred *new, const struct cred *old, + gfp_t gfp); + void (*cred_transfer)(struct cred *new, const struct cred *old); + void (*cred_getsecid)(const struct cred *c, u32 *secid); + int (*kernel_act_as)(struct cred *new, u32 secid); + int (*kernel_create_files_as)(struct cred *new, struct inode *inode); + int (*kernel_module_request)(char *kmod_name); + int (*kernel_load_data)(enum kernel_load_data_id id); + int (*kernel_read_file)(struct file *file, enum kernel_read_file_id id); + int (*kernel_post_read_file)(struct file *file, char *buf, loff_t size, + enum kernel_read_file_id id); + int (*task_fix_setuid)(struct cred *new, const struct cred *old, + int flags); + int (*task_setpgid)(struct task_struct *p, pid_t pgid); + int (*task_getpgid)(struct task_struct *p); + int (*task_getsid)(struct task_struct *p); + void (*task_getsecid)(struct task_struct *p, u32 *secid); + int (*task_setnice)(struct task_struct *p, int nice); + int (*task_setioprio)(struct task_struct *p, int ioprio); + int (*task_getioprio)(struct task_struct *p); + int (*task_prlimit)(const struct cred *cred, const struct cred *tcred, + unsigned int flags); + int (*task_setrlimit)(struct task_struct *p, unsigned int resource, + struct rlimit *new_rlim); + int (*task_setscheduler)(struct task_struct *p); + int (*task_getscheduler)(struct task_struct *p); + int (*task_movememory)(struct task_struct *p); + int (*task_kill)(struct task_struct *p, struct siginfo *info, + int sig, const struct cred *cred); + int (*task_prctl)(int option, unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5); + void (*task_to_inode)(struct task_struct *p, struct inode *inode); + + int (*ipc_permission)(struct kern_ipc_perm *ipcp, short flag); + void (*ipc_getsecid)(struct kern_ipc_perm *ipcp, u32 *secid); + + int (*msg_msg_alloc_security)(struct msg_msg *msg); + void (*msg_msg_free_security)(struct msg_msg *msg); + + int (*msg_queue_alloc_security)(struct kern_ipc_perm *msq); + void (*msg_queue_free_security)(struct kern_ipc_perm *msq); + int (*msg_queue_associate)(struct kern_ipc_perm *msq, int msqflg); + int (*msg_queue_msgctl)(struct kern_ipc_perm *msq, int cmd); + int (*msg_queue_msgsnd)(struct kern_ipc_perm *msq, struct msg_msg *msg, + int msqflg); + int (*msg_queue_msgrcv)(struct kern_ipc_perm *msq, struct msg_msg *msg, + struct task_struct *target, long type, + int mode); + + int (*shm_alloc_security)(struct kern_ipc_perm *shp); + void (*shm_free_security)(struct kern_ipc_perm *shp); + int (*shm_associate)(struct kern_ipc_perm *shp, int shmflg); + int (*shm_shmctl)(struct kern_ipc_perm *shp, int cmd); + int (*shm_shmat)(struct kern_ipc_perm *shp, char __user *shmaddr, + int shmflg); + + int (*sem_alloc_security)(struct kern_ipc_perm *sma); + void (*sem_free_security)(struct kern_ipc_perm *sma); + int (*sem_associate)(struct kern_ipc_perm *sma, int semflg); + int (*sem_semctl)(struct kern_ipc_perm *sma, int cmd); + int (*sem_semop)(struct kern_ipc_perm *sma, struct sembuf *sops, + unsigned nsops, int alter); + + int (*netlink_send)(struct sock *sk, struct sk_buff *skb); + + void (*d_instantiate)(struct dentry *dentry, struct inode *inode); + + int (*getprocattr)(struct task_struct *p, char *name, char **value); + int (*setprocattr)(const char *name, void *value, size_t size); + int (*ismaclabel)(const char *name); + int (*secid_to_secctx)(u32 secid, char **secdata, u32 *seclen); + int (*secctx_to_secid)(const char *secdata, u32 seclen, u32 *secid); + void (*release_secctx)(char *secdata, u32 seclen); + + void (*inode_invalidate_secctx)(struct inode *inode); + int (*inode_notifysecctx)(struct inode *inode, void *ctx, u32 ctxlen); + int (*inode_setsecctx)(struct dentry *dentry, void *ctx, u32 ctxlen); + int (*inode_getsecctx)(struct inode *inode, void **ctx, u32 *ctxlen); + +#ifdef CONFIG_SECURITY_NETWORK + int (*unix_stream_connect)(struct sock *sock, struct sock *other, + struct sock *newsk); + int (*unix_may_send)(struct socket *sock, struct socket *other); + + int (*socket_create)(int family, int type, int protocol, int kern); + int (*socket_post_create)(struct socket *sock, int family, int type, + int protocol, int kern); + int (*socket_socketpair)(struct socket *socka, struct socket *sockb); + int (*socket_bind)(struct socket *sock, struct sockaddr *address, + int addrlen); + int (*socket_connect)(struct socket *sock, struct sockaddr *address, + int addrlen); + int (*socket_listen)(struct socket *sock, int backlog); + int (*socket_accept)(struct socket *sock, struct socket *newsock); + int (*socket_sendmsg)(struct socket *sock, struct msghdr *msg, + int size); + int (*socket_recvmsg)(struct socket *sock, struct msghdr *msg, + int size, int flags); + int (*socket_getsockname)(struct socket *sock); + int (*socket_getpeername)(struct socket *sock); + int (*socket_getsockopt)(struct socket *sock, int level, int optname); + int (*socket_setsockopt)(struct socket *sock, int level, int optname); + int (*socket_shutdown)(struct socket *sock, int how); + int (*socket_sock_rcv_skb)(struct sock *sk, struct sk_buff *skb); + int (*socket_getpeersec_stream)(struct socket *sock, + char __user *optval, + int __user *optlen, unsigned len); + int (*socket_getpeersec_dgram)(struct socket *sock, + struct sk_buff *skb, u32 *secid); + int (*sk_alloc_security)(struct sock *sk, int family, gfp_t priority); + void (*sk_free_security)(struct sock *sk); + void (*sk_clone_security)(const struct sock *sk, struct sock *newsk); + void (*sk_getsecid)(struct sock *sk, u32 *secid); + void (*sock_graft)(struct sock *sk, struct socket *parent); + int (*inet_conn_request)(struct sock *sk, struct sk_buff *skb, + struct request_sock *req); + void (*inet_csk_clone)(struct sock *newsk, + const struct request_sock *req); + void (*inet_conn_established)(struct sock *sk, struct sk_buff *skb); + int (*secmark_relabel_packet)(u32 secid); + void (*secmark_refcount_inc)(void); + void (*secmark_refcount_dec)(void); + void (*req_classify_flow)(const struct request_sock *req, + struct flowi *fl); + int (*tun_dev_alloc_security)(void **security); + void (*tun_dev_free_security)(void *security); + int (*tun_dev_create)(void); + int (*tun_dev_attach_queue)(void *security); + int (*tun_dev_attach)(struct sock *sk, void *security); + int (*tun_dev_open)(void *security); + int (*sctp_assoc_request)(struct sctp_endpoint *ep, + struct sk_buff *skb); + int (*sctp_bind_connect)(struct sock *sk, int optname, + struct sockaddr *address, int addrlen); + void (*sctp_sk_clone)(struct sctp_endpoint *ep, struct sock *sk, + struct sock *newsk); +#endif /* CONFIG_SECURITY_NETWORK */ + +#ifdef CONFIG_SECURITY_INFINIBAND + int (*ib_pkey_access)(void *sec, u64 subnet_prefix, u16 pkey); + int (*ib_endport_manage_subnet)(void *sec, const char *dev_name, + u8 port_num); + int (*ib_alloc_security)(void **sec); + void (*ib_free_security)(void *sec); +#endif /* CONFIG_SECURITY_INFINIBAND */ + +#ifdef CONFIG_SECURITY_NETWORK_XFRM + int (*xfrm_policy_alloc_security)(struct xfrm_sec_ctx **ctxp, + struct xfrm_user_sec_ctx *sec_ctx, + gfp_t gfp); + int (*xfrm_policy_clone_security)(struct xfrm_sec_ctx *old_ctx, + struct xfrm_sec_ctx **new_ctx); + void (*xfrm_policy_free_security)(struct xfrm_sec_ctx *ctx); + int (*xfrm_policy_delete_security)(struct xfrm_sec_ctx *ctx); + int (*xfrm_state_alloc)(struct xfrm_state *x, + struct xfrm_user_sec_ctx *sec_ctx); + int (*xfrm_state_alloc_acquire)(struct xfrm_state *x, + struct xfrm_sec_ctx *polsec, + u32 secid); + void (*xfrm_state_free_security)(struct xfrm_state *x); + int (*xfrm_state_delete_security)(struct xfrm_state *x); + int (*xfrm_policy_lookup)(struct xfrm_sec_ctx *ctx, u32 fl_secid, + u8 dir); + int (*xfrm_state_pol_flow_match)(struct xfrm_state *x, + struct xfrm_policy *xp, + const struct flowi *fl); + int (*xfrm_decode_session)(struct sk_buff *skb, u32 *secid, int ckall); +#endif /* CONFIG_SECURITY_NETWORK_XFRM */ + + /* key management security hooks */ +#ifdef CONFIG_KEYS + int (*key_alloc)(struct key *key, const struct cred *cred, + unsigned long flags); + void (*key_free)(struct key *key); + int (*key_permission)(key_ref_t key_ref, const struct cred *cred, + unsigned perm); + int (*key_getsecurity)(struct key *key, char **_buffer); +#endif /* CONFIG_KEYS */ + +#ifdef CONFIG_AUDIT + int (*audit_rule_init)(u32 field, u32 op, char *rulestr, + void **lsmrule); + int (*audit_rule_known)(struct audit_krule *krule); + int (*audit_rule_match)(u32 secid, u32 field, u32 op, void *lsmrule, + struct audit_context *actx); + void (*audit_rule_free)(void *lsmrule); +#endif /* CONFIG_AUDIT */ + +#ifdef CONFIG_BPF_SYSCALL + int (*bpf)(int cmd, union bpf_attr *attr, + unsigned int size); + int (*bpf_map)(struct bpf_map *map, fmode_t fmode); + int (*bpf_prog)(struct bpf_prog *prog); + int (*bpf_map_alloc_security)(struct bpf_map *map); + void (*bpf_map_free_security)(struct bpf_map *map); + int (*bpf_prog_alloc_security)(struct bpf_prog_aux *aux); + void (*bpf_prog_free_security)(struct bpf_prog_aux *aux); +#endif /* CONFIG_BPF_SYSCALL */ +}; + +struct security_hook_heads { + struct hlist_head binder_set_context_mgr; + struct hlist_head binder_transaction; + struct hlist_head binder_transfer_binder; + struct hlist_head binder_transfer_file; + struct hlist_head ptrace_access_check; + struct hlist_head ptrace_traceme; + struct hlist_head capget; + struct hlist_head capset; + struct hlist_head capable; + struct hlist_head quotactl; + struct hlist_head quota_on; + struct hlist_head syslog; + struct hlist_head settime; + struct hlist_head vm_enough_memory; + struct hlist_head bprm_set_creds; + struct hlist_head bprm_check_security; + struct hlist_head bprm_committing_creds; + struct hlist_head bprm_committed_creds; + struct hlist_head sb_alloc_security; + struct hlist_head sb_free_security; + struct hlist_head sb_copy_data; + struct hlist_head sb_remount; + struct hlist_head sb_kern_mount; + struct hlist_head sb_show_options; + struct hlist_head sb_statfs; + struct hlist_head sb_mount; + struct hlist_head sb_umount; + struct hlist_head sb_pivotroot; + struct hlist_head sb_set_mnt_opts; + struct hlist_head sb_clone_mnt_opts; + struct hlist_head sb_parse_opts_str; + struct hlist_head dentry_init_security; + struct hlist_head dentry_create_files_as; +#ifdef CONFIG_SECURITY_PATH + struct hlist_head path_unlink; + struct hlist_head path_mkdir; + struct hlist_head path_rmdir; + struct hlist_head path_mknod; + struct hlist_head path_truncate; + struct hlist_head path_symlink; + struct hlist_head path_link; + struct hlist_head path_rename; + struct hlist_head path_chmod; + struct hlist_head path_chown; + struct hlist_head path_chroot; +#endif + struct hlist_head inode_alloc_security; + struct hlist_head inode_free_security; + struct hlist_head inode_init_security; + struct hlist_head inode_create; + struct hlist_head inode_link; + struct hlist_head inode_unlink; + struct hlist_head inode_symlink; + struct hlist_head inode_mkdir; + struct hlist_head inode_rmdir; + struct hlist_head inode_mknod; + struct hlist_head inode_rename; + struct hlist_head inode_readlink; + struct hlist_head inode_follow_link; + struct hlist_head inode_permission; + struct hlist_head inode_setattr; + struct hlist_head inode_getattr; + struct hlist_head inode_setxattr; + struct hlist_head inode_post_setxattr; + struct hlist_head inode_getxattr; + struct hlist_head inode_listxattr; + struct hlist_head inode_removexattr; + struct hlist_head inode_need_killpriv; + struct hlist_head inode_killpriv; + struct hlist_head inode_getsecurity; + struct hlist_head inode_setsecurity; + struct hlist_head inode_listsecurity; + struct hlist_head inode_getsecid; + struct hlist_head inode_copy_up; + struct hlist_head inode_copy_up_xattr; + struct hlist_head file_permission; + struct hlist_head file_alloc_security; + struct hlist_head file_free_security; + struct hlist_head file_ioctl; + struct hlist_head mmap_addr; + struct hlist_head mmap_file; + struct hlist_head file_mprotect; + struct hlist_head file_lock; + struct hlist_head file_fcntl; + struct hlist_head file_set_fowner; + struct hlist_head file_send_sigiotask; + struct hlist_head file_receive; + struct hlist_head file_open; + struct hlist_head task_alloc; + struct hlist_head task_free; + struct hlist_head cred_alloc_blank; + struct hlist_head cred_free; + struct hlist_head cred_prepare; + struct hlist_head cred_transfer; + struct hlist_head cred_getsecid; + struct hlist_head kernel_act_as; + struct hlist_head kernel_create_files_as; + struct hlist_head kernel_load_data; + struct hlist_head kernel_read_file; + struct hlist_head kernel_post_read_file; + struct hlist_head kernel_module_request; + struct hlist_head task_fix_setuid; + struct hlist_head task_setpgid; + struct hlist_head task_getpgid; + struct hlist_head task_getsid; + struct hlist_head task_getsecid; + struct hlist_head task_setnice; + struct hlist_head task_setioprio; + struct hlist_head task_getioprio; + struct hlist_head task_prlimit; + struct hlist_head task_setrlimit; + struct hlist_head task_setscheduler; + struct hlist_head task_getscheduler; + struct hlist_head task_movememory; + struct hlist_head task_kill; + struct hlist_head task_prctl; + struct hlist_head task_to_inode; + struct hlist_head ipc_permission; + struct hlist_head ipc_getsecid; + struct hlist_head msg_msg_alloc_security; + struct hlist_head msg_msg_free_security; + struct hlist_head msg_queue_alloc_security; + struct hlist_head msg_queue_free_security; + struct hlist_head msg_queue_associate; + struct hlist_head msg_queue_msgctl; + struct hlist_head msg_queue_msgsnd; + struct hlist_head msg_queue_msgrcv; + struct hlist_head shm_alloc_security; + struct hlist_head shm_free_security; + struct hlist_head shm_associate; + struct hlist_head shm_shmctl; + struct hlist_head shm_shmat; + struct hlist_head sem_alloc_security; + struct hlist_head sem_free_security; + struct hlist_head sem_associate; + struct hlist_head sem_semctl; + struct hlist_head sem_semop; + struct hlist_head netlink_send; + struct hlist_head d_instantiate; + struct hlist_head getprocattr; + struct hlist_head setprocattr; + struct hlist_head ismaclabel; + struct hlist_head secid_to_secctx; + struct hlist_head secctx_to_secid; + struct hlist_head release_secctx; + struct hlist_head inode_invalidate_secctx; + struct hlist_head inode_notifysecctx; + struct hlist_head inode_setsecctx; + struct hlist_head inode_getsecctx; +#ifdef CONFIG_SECURITY_NETWORK + struct hlist_head unix_stream_connect; + struct hlist_head unix_may_send; + struct hlist_head socket_create; + struct hlist_head socket_post_create; + struct hlist_head socket_socketpair; + struct hlist_head socket_bind; + struct hlist_head socket_connect; + struct hlist_head socket_listen; + struct hlist_head socket_accept; + struct hlist_head socket_sendmsg; + struct hlist_head socket_recvmsg; + struct hlist_head socket_getsockname; + struct hlist_head socket_getpeername; + struct hlist_head socket_getsockopt; + struct hlist_head socket_setsockopt; + struct hlist_head socket_shutdown; + struct hlist_head socket_sock_rcv_skb; + struct hlist_head socket_getpeersec_stream; + struct hlist_head socket_getpeersec_dgram; + struct hlist_head sk_alloc_security; + struct hlist_head sk_free_security; + struct hlist_head sk_clone_security; + struct hlist_head sk_getsecid; + struct hlist_head sock_graft; + struct hlist_head inet_conn_request; + struct hlist_head inet_csk_clone; + struct hlist_head inet_conn_established; + struct hlist_head secmark_relabel_packet; + struct hlist_head secmark_refcount_inc; + struct hlist_head secmark_refcount_dec; + struct hlist_head req_classify_flow; + struct hlist_head tun_dev_alloc_security; + struct hlist_head tun_dev_free_security; + struct hlist_head tun_dev_create; + struct hlist_head tun_dev_attach_queue; + struct hlist_head tun_dev_attach; + struct hlist_head tun_dev_open; + struct hlist_head sctp_assoc_request; + struct hlist_head sctp_bind_connect; + struct hlist_head sctp_sk_clone; +#endif /* CONFIG_SECURITY_NETWORK */ +#ifdef CONFIG_SECURITY_INFINIBAND + struct hlist_head ib_pkey_access; + struct hlist_head ib_endport_manage_subnet; + struct hlist_head ib_alloc_security; + struct hlist_head ib_free_security; +#endif /* CONFIG_SECURITY_INFINIBAND */ +#ifdef CONFIG_SECURITY_NETWORK_XFRM + struct hlist_head xfrm_policy_alloc_security; + struct hlist_head xfrm_policy_clone_security; + struct hlist_head xfrm_policy_free_security; + struct hlist_head xfrm_policy_delete_security; + struct hlist_head xfrm_state_alloc; + struct hlist_head xfrm_state_alloc_acquire; + struct hlist_head xfrm_state_free_security; + struct hlist_head xfrm_state_delete_security; + struct hlist_head xfrm_policy_lookup; + struct hlist_head xfrm_state_pol_flow_match; + struct hlist_head xfrm_decode_session; +#endif /* CONFIG_SECURITY_NETWORK_XFRM */ +#ifdef CONFIG_KEYS + struct hlist_head key_alloc; + struct hlist_head key_free; + struct hlist_head key_permission; + struct hlist_head key_getsecurity; +#endif /* CONFIG_KEYS */ +#ifdef CONFIG_AUDIT + struct hlist_head audit_rule_init; + struct hlist_head audit_rule_known; + struct hlist_head audit_rule_match; + struct hlist_head audit_rule_free; +#endif /* CONFIG_AUDIT */ +#ifdef CONFIG_BPF_SYSCALL + struct hlist_head bpf; + struct hlist_head bpf_map; + struct hlist_head bpf_prog; + struct hlist_head bpf_map_alloc_security; + struct hlist_head bpf_map_free_security; + struct hlist_head bpf_prog_alloc_security; + struct hlist_head bpf_prog_free_security; +#endif /* CONFIG_BPF_SYSCALL */ +} __randomize_layout; + +/* + * Security module hook list structure. + * For use with generic list macros for common operations. + */ +struct security_hook_list { + struct hlist_node list; + struct hlist_head *head; + union security_list_options hook; + char *lsm; +} __randomize_layout; + +/* + * Initializing a security_hook_list structure takes + * up a lot of space in a source file. This macro takes + * care of the common case and reduces the amount of + * text involved. + */ +#define LSM_HOOK_INIT(HEAD, HOOK) \ + { .head = &security_hook_heads.HEAD, .hook = { .HEAD = HOOK } } + +extern struct security_hook_heads security_hook_heads; +extern char *lsm_names; + +extern void security_add_hooks(struct security_hook_list *hooks, int count, + char *lsm); + +#ifdef CONFIG_SECURITY_SELINUX_DISABLE +/* + * Assuring the safety of deleting a security module is up to + * the security module involved. This may entail ordering the + * module's hook list in a particular way, refusing to disable + * the module once a policy is loaded or any number of other + * actions better imagined than described. + * + * The name of the configuration option reflects the only module + * that currently uses the mechanism. Any developer who thinks + * disabling their module is a good idea needs to be at least as + * careful as the SELinux team. + */ +static inline void security_delete_hooks(struct security_hook_list *hooks, + int count) +{ + int i; + + for (i = 0; i < count; i++) + hlist_del_rcu(&hooks[i].list); +} +#endif /* CONFIG_SECURITY_SELINUX_DISABLE */ + +/* Currently required to handle SELinux runtime hook disable. */ +#ifdef CONFIG_SECURITY_WRITABLE_HOOKS +#define __lsm_ro_after_init +#else +#define __lsm_ro_after_init __ro_after_init +#endif /* CONFIG_SECURITY_WRITABLE_HOOKS */ + +extern int __init security_module_enable(const char *module); +extern void __init capability_add_hooks(void); +#ifdef CONFIG_SECURITY_YAMA +extern void __init yama_add_hooks(void); +#else +static inline void __init yama_add_hooks(void) { } +#endif +#ifdef CONFIG_SECURITY_LOADPIN +void __init loadpin_add_hooks(void); +#else +static inline void loadpin_add_hooks(void) { }; +#endif + +#endif /* ! __LINUX_LSM_HOOKS_H */ diff --git a/include/linux/lz4.h b/include/linux/lz4.h new file mode 100644 index 000000000..394e3d921 --- /dev/null +++ b/include/linux/lz4.h @@ -0,0 +1,648 @@ +/* LZ4 Kernel Interface + * + * Copyright (C) 2013, LG Electronics, Kyungsik Lee + * Copyright (C) 2016, Sven Schmidt <4sschmid@informatik.uni-hamburg.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This file is based on the original header file + * for LZ4 - Fast LZ compression algorithm. + * + * LZ4 - Fast LZ compression algorithm + * Copyright (C) 2011-2016, Yann Collet. + * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * You can contact the author at : + * - LZ4 homepage : http://www.lz4.org + * - LZ4 source repository : https://github.com/lz4/lz4 + */ + +#ifndef __LZ4_H__ +#define __LZ4_H__ + +#include +#include /* memset, memcpy */ + +/*-************************************************************************ + * CONSTANTS + **************************************************************************/ +/* + * LZ4_MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes + * (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache + */ +#define LZ4_MEMORY_USAGE 14 + +#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */ +#define LZ4_COMPRESSBOUND(isize) (\ + (unsigned int)(isize) > (unsigned int)LZ4_MAX_INPUT_SIZE \ + ? 0 \ + : (isize) + ((isize)/255) + 16) + +#define LZ4_ACCELERATION_DEFAULT 1 +#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2) +#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE) +#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) + +#define LZ4HC_MIN_CLEVEL 3 +#define LZ4HC_DEFAULT_CLEVEL 9 +#define LZ4HC_MAX_CLEVEL 16 + +#define LZ4HC_DICTIONARY_LOGSIZE 16 +#define LZ4HC_MAXD (1<= LZ4_compressBound(inputSize). + * It also runs faster, so it's a recommended setting. + * If the function cannot compress 'source' into a more limited 'dest' budget, + * compression stops *immediately*, and the function result is zero. + * As a consequence, 'dest' content is not valid. + * + * Return: Number of bytes written into buffer 'dest' + * (necessarily <= maxOutputSize) or 0 if compression fails + */ +int LZ4_compress_default(const char *source, char *dest, int inputSize, + int maxOutputSize, void *wrkmem); + +/** + * LZ4_compress_fast() - As LZ4_compress_default providing an acceleration param + * @source: source address of the original data + * @dest: output buffer address of the compressed data + * @inputSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE + * @maxOutputSize: full or partial size of buffer 'dest' + * which must be already allocated + * @acceleration: acceleration factor + * @wrkmem: address of the working memory. + * This requires 'workmem' of LZ4_MEM_COMPRESS. + * + * Same as LZ4_compress_default(), but allows to select an "acceleration" + * factor. The larger the acceleration value, the faster the algorithm, + * but also the lesser the compression. It's a trade-off. It can be fine tuned, + * with each successive value providing roughly +~3% to speed. + * An acceleration value of "1" is the same as regular LZ4_compress_default() + * Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT, which is 1. + * + * Return: Number of bytes written into buffer 'dest' + * (necessarily <= maxOutputSize) or 0 if compression fails + */ +int LZ4_compress_fast(const char *source, char *dest, int inputSize, + int maxOutputSize, int acceleration, void *wrkmem); + +/** + * LZ4_compress_destSize() - Compress as much data as possible + * from source to dest + * @source: source address of the original data + * @dest: output buffer address of the compressed data + * @sourceSizePtr: will be modified to indicate how many bytes where read + * from 'source' to fill 'dest'. New value is necessarily <= old value. + * @targetDestSize: Size of buffer 'dest' which must be already allocated + * @wrkmem: address of the working memory. + * This requires 'workmem' of LZ4_MEM_COMPRESS. + * + * Reverse the logic, by compressing as much data as possible + * from 'source' buffer into already allocated buffer 'dest' + * of size 'targetDestSize'. + * This function either compresses the entire 'source' content into 'dest' + * if it's large enough, or fill 'dest' buffer completely with as much data as + * possible from 'source'. + * + * Return: Number of bytes written into 'dest' (necessarily <= targetDestSize) + * or 0 if compression fails + */ +int LZ4_compress_destSize(const char *source, char *dest, int *sourceSizePtr, + int targetDestSize, void *wrkmem); + +/*-************************************************************************ + * Decompression Functions + **************************************************************************/ + +/** + * LZ4_decompress_fast() - Decompresses data from 'source' into 'dest' + * @source: source address of the compressed data + * @dest: output buffer address of the uncompressed data + * which must be already allocated with 'originalSize' bytes + * @originalSize: is the original and therefore uncompressed size + * + * Decompresses data from 'source' into 'dest'. + * This function fully respect memory boundaries for properly formed + * compressed data. + * It is a bit faster than LZ4_decompress_safe(). + * However, it does not provide any protection against intentionally + * modified data stream (malicious input). + * Use this function in trusted environment only + * (data to decode comes from a trusted source). + * + * Return: number of bytes read from the source buffer + * or a negative result if decompression fails. + */ +int LZ4_decompress_fast(const char *source, char *dest, int originalSize); + +/** + * LZ4_decompress_safe() - Decompression protected against buffer overflow + * @source: source address of the compressed data + * @dest: output buffer address of the uncompressed data + * which must be already allocated + * @compressedSize: is the precise full size of the compressed block + * @maxDecompressedSize: is the size of 'dest' buffer + * + * Decompresses data fom 'source' into 'dest'. + * If the source stream is detected malformed, the function will + * stop decoding and return a negative result. + * This function is protected against buffer overflow exploits, + * including malicious data packets. It never writes outside output buffer, + * nor reads outside input buffer. + * + * Return: number of bytes decompressed into destination buffer + * (necessarily <= maxDecompressedSize) + * or a negative result in case of error + */ +int LZ4_decompress_safe(const char *source, char *dest, int compressedSize, + int maxDecompressedSize); + +/** + * LZ4_decompress_safe_partial() - Decompress a block of size 'compressedSize' + * at position 'source' into buffer 'dest' + * @source: source address of the compressed data + * @dest: output buffer address of the decompressed data which must be + * already allocated + * @compressedSize: is the precise full size of the compressed block. + * @targetOutputSize: the decompression operation will try + * to stop as soon as 'targetOutputSize' has been reached + * @maxDecompressedSize: is the size of destination buffer + * + * This function decompresses a compressed block of size 'compressedSize' + * at position 'source' into destination buffer 'dest' + * of size 'maxDecompressedSize'. + * The function tries to stop decompressing operation as soon as + * 'targetOutputSize' has been reached, reducing decompression time. + * This function never writes outside of output buffer, + * and never reads outside of input buffer. + * It is therefore protected against malicious data packets. + * + * Return: the number of bytes decoded in the destination buffer + * (necessarily <= maxDecompressedSize) + * or a negative result in case of error + * + */ +int LZ4_decompress_safe_partial(const char *source, char *dest, + int compressedSize, int targetOutputSize, int maxDecompressedSize); + +/*-************************************************************************ + * LZ4 HC Compression + **************************************************************************/ + +/** + * LZ4_compress_HC() - Compress data from `src` into `dst`, using HC algorithm + * @src: source address of the original data + * @dst: output buffer address of the compressed data + * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE + * @dstCapacity: full or partial size of buffer 'dst', + * which must be already allocated + * @compressionLevel: Recommended values are between 4 and 9, although any + * value between 1 and LZ4HC_MAX_CLEVEL will work. + * Values >LZ4HC_MAX_CLEVEL behave the same as 16. + * @wrkmem: address of the working memory. + * This requires 'wrkmem' of size LZ4HC_MEM_COMPRESS. + * + * Compress data from 'src' into 'dst', using the more powerful + * but slower "HC" algorithm. Compression is guaranteed to succeed if + * `dstCapacity >= LZ4_compressBound(srcSize) + * + * Return : the number of bytes written into 'dst' or 0 if compression fails. + */ +int LZ4_compress_HC(const char *src, char *dst, int srcSize, int dstCapacity, + int compressionLevel, void *wrkmem); + +/** + * LZ4_resetStreamHC() - Init an allocated 'LZ4_streamHC_t' structure + * @streamHCPtr: pointer to the 'LZ4_streamHC_t' structure + * @compressionLevel: Recommended values are between 4 and 9, although any + * value between 1 and LZ4HC_MAX_CLEVEL will work. + * Values >LZ4HC_MAX_CLEVEL behave the same as 16. + * + * An LZ4_streamHC_t structure can be allocated once + * and re-used multiple times. + * Use this function to init an allocated `LZ4_streamHC_t` structure + * and start a new compression. + */ +void LZ4_resetStreamHC(LZ4_streamHC_t *streamHCPtr, int compressionLevel); + +/** + * LZ4_loadDictHC() - Load a static dictionary into LZ4_streamHC + * @streamHCPtr: pointer to the LZ4HC_stream_t + * @dictionary: dictionary to load + * @dictSize: size of dictionary + * + * Use this function to load a static dictionary into LZ4HC_stream. + * Any previous data will be forgotten, only 'dictionary' + * will remain in memory. + * Loading a size of 0 is allowed. + * + * Return : dictionary size, in bytes (necessarily <= 64 KB) + */ +int LZ4_loadDictHC(LZ4_streamHC_t *streamHCPtr, const char *dictionary, + int dictSize); + +/** + * LZ4_compress_HC_continue() - Compress 'src' using data from previously + * compressed blocks as a dictionary using the HC algorithm + * @streamHCPtr: Pointer to the previous 'LZ4_streamHC_t' structure + * @src: source address of the original data + * @dst: output buffer address of the compressed data, + * which must be already allocated + * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE + * @maxDstSize: full or partial size of buffer 'dest' + * which must be already allocated + * + * These functions compress data in successive blocks of any size, using + * previous blocks as dictionary. One key assumption is that previous + * blocks (up to 64 KB) remain read-accessible while + * compressing next blocks. There is an exception for ring buffers, + * which can be smaller than 64 KB. + * Ring buffers scenario is automatically detected and handled by + * LZ4_compress_HC_continue(). + * Before starting compression, state must be properly initialized, + * using LZ4_resetStreamHC(). + * A first "fictional block" can then be designated as + * initial dictionary, using LZ4_loadDictHC() (Optional). + * Then, use LZ4_compress_HC_continue() + * to compress each successive block. Previous memory blocks + * (including initial dictionary when present) must remain accessible + * and unmodified during compression. + * 'dst' buffer should be sized to handle worst case scenarios, using + * LZ4_compressBound(), to ensure operation success. + * If, for any reason, previous data blocks can't be preserved unmodified + * in memory during next compression block, + * you must save it to a safer memory space, using LZ4_saveDictHC(). + * Return value of LZ4_saveDictHC() is the size of dictionary + * effectively saved into 'safeBuffer'. + * + * Return: Number of bytes written into buffer 'dst' or 0 if compression fails + */ +int LZ4_compress_HC_continue(LZ4_streamHC_t *streamHCPtr, const char *src, + char *dst, int srcSize, int maxDstSize); + +/** + * LZ4_saveDictHC() - Save static dictionary from LZ4HC_stream + * @streamHCPtr: pointer to the 'LZ4HC_stream_t' structure + * @safeBuffer: buffer to save dictionary to, must be already allocated + * @maxDictSize: size of 'safeBuffer' + * + * If previously compressed data block is not guaranteed + * to remain available at its memory location, + * save it into a safer place (char *safeBuffer). + * Note : you don't need to call LZ4_loadDictHC() afterwards, + * dictionary is immediately usable, you can therefore call + * LZ4_compress_HC_continue(). + * + * Return : saved dictionary size in bytes (necessarily <= maxDictSize), + * or 0 if error. + */ +int LZ4_saveDictHC(LZ4_streamHC_t *streamHCPtr, char *safeBuffer, + int maxDictSize); + +/*-********************************************* + * Streaming Compression Functions + ***********************************************/ + +/** + * LZ4_resetStream() - Init an allocated 'LZ4_stream_t' structure + * @LZ4_stream: pointer to the 'LZ4_stream_t' structure + * + * An LZ4_stream_t structure can be allocated once + * and re-used multiple times. + * Use this function to init an allocated `LZ4_stream_t` structure + * and start a new compression. + */ +void LZ4_resetStream(LZ4_stream_t *LZ4_stream); + +/** + * LZ4_loadDict() - Load a static dictionary into LZ4_stream + * @streamPtr: pointer to the LZ4_stream_t + * @dictionary: dictionary to load + * @dictSize: size of dictionary + * + * Use this function to load a static dictionary into LZ4_stream. + * Any previous data will be forgotten, only 'dictionary' + * will remain in memory. + * Loading a size of 0 is allowed. + * + * Return : dictionary size, in bytes (necessarily <= 64 KB) + */ +int LZ4_loadDict(LZ4_stream_t *streamPtr, const char *dictionary, + int dictSize); + +/** + * LZ4_saveDict() - Save static dictionary from LZ4_stream + * @streamPtr: pointer to the 'LZ4_stream_t' structure + * @safeBuffer: buffer to save dictionary to, must be already allocated + * @dictSize: size of 'safeBuffer' + * + * If previously compressed data block is not guaranteed + * to remain available at its memory location, + * save it into a safer place (char *safeBuffer). + * Note : you don't need to call LZ4_loadDict() afterwards, + * dictionary is immediately usable, you can therefore call + * LZ4_compress_fast_continue(). + * + * Return : saved dictionary size in bytes (necessarily <= dictSize), + * or 0 if error. + */ +int LZ4_saveDict(LZ4_stream_t *streamPtr, char *safeBuffer, int dictSize); + +/** + * LZ4_compress_fast_continue() - Compress 'src' using data from previously + * compressed blocks as a dictionary + * @streamPtr: Pointer to the previous 'LZ4_stream_t' structure + * @src: source address of the original data + * @dst: output buffer address of the compressed data, + * which must be already allocated + * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE + * @maxDstSize: full or partial size of buffer 'dest' + * which must be already allocated + * @acceleration: acceleration factor + * + * Compress buffer content 'src', using data from previously compressed blocks + * as dictionary to improve compression ratio. + * Important : Previous data blocks are assumed to still + * be present and unmodified ! + * If maxDstSize >= LZ4_compressBound(srcSize), + * compression is guaranteed to succeed, and runs faster. + * + * Return: Number of bytes written into buffer 'dst' or 0 if compression fails + */ +int LZ4_compress_fast_continue(LZ4_stream_t *streamPtr, const char *src, + char *dst, int srcSize, int maxDstSize, int acceleration); + +/** + * LZ4_setStreamDecode() - Instruct where to find dictionary + * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure + * @dictionary: dictionary to use + * @dictSize: size of dictionary + * + * Use this function to instruct where to find the dictionary. + * Setting a size of 0 is allowed (same effect as reset). + * + * Return: 1 if OK, 0 if error + */ +int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode, + const char *dictionary, int dictSize); + +/** + * LZ4_decompress_fast_continue() - Decompress blocks in streaming mode + * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure + * @source: source address of the compressed data + * @dest: output buffer address of the uncompressed data + * which must be already allocated + * @compressedSize: is the precise full size of the compressed block + * @maxDecompressedSize: is the size of 'dest' buffer + * + * These decoding function allows decompression of multiple blocks + * in "streaming" mode. + * Previously decoded blocks *must* remain available at the memory position + * where they were decoded (up to 64 KB) + * In the case of a ring buffers, decoding buffer must be either : + * - Exactly same size as encoding buffer, with same update rule + * (block boundaries at same positions) In which case, + * the decoding & encoding ring buffer can have any size, + * including very small ones ( < 64 KB). + * - Larger than encoding buffer, by a minimum of maxBlockSize more bytes. + * maxBlockSize is implementation dependent. + * It's the maximum size you intend to compress into a single block. + * In which case, encoding and decoding buffers do not need + * to be synchronized, and encoding ring buffer can have any size, + * including small ones ( < 64 KB). + * - _At least_ 64 KB + 8 bytes + maxBlockSize. + * In which case, encoding and decoding buffers do not need to be + * synchronized, and encoding ring buffer can have any size, + * including larger than decoding buffer. W + * Whenever these conditions are not possible, save the last 64KB of decoded + * data into a safe buffer, and indicate where it is saved + * using LZ4_setStreamDecode() + * + * Return: number of bytes decompressed into destination buffer + * (necessarily <= maxDecompressedSize) + * or a negative result in case of error + */ +int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode, + const char *source, char *dest, int compressedSize, + int maxDecompressedSize); + +/** + * LZ4_decompress_fast_continue() - Decompress blocks in streaming mode + * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure + * @source: source address of the compressed data + * @dest: output buffer address of the uncompressed data + * which must be already allocated with 'originalSize' bytes + * @originalSize: is the original and therefore uncompressed size + * + * These decoding function allows decompression of multiple blocks + * in "streaming" mode. + * Previously decoded blocks *must* remain available at the memory position + * where they were decoded (up to 64 KB) + * In the case of a ring buffers, decoding buffer must be either : + * - Exactly same size as encoding buffer, with same update rule + * (block boundaries at same positions) In which case, + * the decoding & encoding ring buffer can have any size, + * including very small ones ( < 64 KB). + * - Larger than encoding buffer, by a minimum of maxBlockSize more bytes. + * maxBlockSize is implementation dependent. + * It's the maximum size you intend to compress into a single block. + * In which case, encoding and decoding buffers do not need + * to be synchronized, and encoding ring buffer can have any size, + * including small ones ( < 64 KB). + * - _At least_ 64 KB + 8 bytes + maxBlockSize. + * In which case, encoding and decoding buffers do not need to be + * synchronized, and encoding ring buffer can have any size, + * including larger than decoding buffer. W + * Whenever these conditions are not possible, save the last 64KB of decoded + * data into a safe buffer, and indicate where it is saved + * using LZ4_setStreamDecode() + * + * Return: number of bytes decompressed into destination buffer + * (necessarily <= maxDecompressedSize) + * or a negative result in case of error + */ +int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode, + const char *source, char *dest, int originalSize); + +/** + * LZ4_decompress_safe_usingDict() - Same as LZ4_setStreamDecode() + * followed by LZ4_decompress_safe_continue() + * @source: source address of the compressed data + * @dest: output buffer address of the uncompressed data + * which must be already allocated + * @compressedSize: is the precise full size of the compressed block + * @maxDecompressedSize: is the size of 'dest' buffer + * @dictStart: pointer to the start of the dictionary in memory + * @dictSize: size of dictionary + * + * These decoding function works the same as + * a combination of LZ4_setStreamDecode() followed by + * LZ4_decompress_safe_continue() + * It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure. + * + * Return: number of bytes decompressed into destination buffer + * (necessarily <= maxDecompressedSize) + * or a negative result in case of error + */ +int LZ4_decompress_safe_usingDict(const char *source, char *dest, + int compressedSize, int maxDecompressedSize, const char *dictStart, + int dictSize); + +/** + * LZ4_decompress_fast_usingDict() - Same as LZ4_setStreamDecode() + * followed by LZ4_decompress_fast_continue() + * @source: source address of the compressed data + * @dest: output buffer address of the uncompressed data + * which must be already allocated with 'originalSize' bytes + * @originalSize: is the original and therefore uncompressed size + * @dictStart: pointer to the start of the dictionary in memory + * @dictSize: size of dictionary + * + * These decoding function works the same as + * a combination of LZ4_setStreamDecode() followed by + * LZ4_decompress_safe_continue() + * It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure. + * + * Return: number of bytes decompressed into destination buffer + * (necessarily <= maxDecompressedSize) + * or a negative result in case of error + */ +int LZ4_decompress_fast_usingDict(const char *source, char *dest, + int originalSize, const char *dictStart, int dictSize); + +#endif diff --git a/include/linux/lzo.h b/include/linux/lzo.h new file mode 100644 index 000000000..2ae27cb89 --- /dev/null +++ b/include/linux/lzo.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LZO_H__ +#define __LZO_H__ +/* + * LZO Public Kernel Interface + * A mini subset of the LZO real-time data compression library + * + * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer + * + * The full LZO package can be found at: + * http://www.oberhumer.com/opensource/lzo/ + * + * Changed for Linux kernel use by: + * Nitin Gupta + * Richard Purdie + */ + +#define LZO1X_1_MEM_COMPRESS (8192 * sizeof(unsigned short)) +#define LZO1X_MEM_COMPRESS LZO1X_1_MEM_COMPRESS + +#define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3) + +/* This requires 'wrkmem' of size LZO1X_1_MEM_COMPRESS */ +int lzo1x_1_compress(const unsigned char *src, size_t src_len, + unsigned char *dst, size_t *dst_len, void *wrkmem); + +/* safe decompression with overrun testing */ +int lzo1x_decompress_safe(const unsigned char *src, size_t src_len, + unsigned char *dst, size_t *dst_len); + +/* + * Return values (< 0 = Error) + */ +#define LZO_E_OK 0 +#define LZO_E_ERROR (-1) +#define LZO_E_OUT_OF_MEMORY (-2) +#define LZO_E_NOT_COMPRESSIBLE (-3) +#define LZO_E_INPUT_OVERRUN (-4) +#define LZO_E_OUTPUT_OVERRUN (-5) +#define LZO_E_LOOKBEHIND_OVERRUN (-6) +#define LZO_E_EOF_NOT_FOUND (-7) +#define LZO_E_INPUT_NOT_CONSUMED (-8) +#define LZO_E_NOT_YET_IMPLEMENTED (-9) +#define LZO_E_INVALID_ARGUMENT (-10) + +#endif diff --git a/include/linux/mISDNdsp.h b/include/linux/mISDNdsp.h new file mode 100644 index 000000000..00758f45f --- /dev/null +++ b/include/linux/mISDNdsp.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __mISDNdsp_H__ +#define __mISDNdsp_H__ + +struct mISDN_dsp_element_arg { + char *name; + char *def; + char *desc; +}; + +struct mISDN_dsp_element { + char *name; + void *(*new)(const char *arg); + void (*free)(void *p); + void (*process_tx)(void *p, unsigned char *data, int len); + void (*process_rx)(void *p, unsigned char *data, int len, + unsigned int txlen); + int num_args; + struct mISDN_dsp_element_arg + *args; +}; + +extern int mISDN_dsp_element_register(struct mISDN_dsp_element *elem); +extern void mISDN_dsp_element_unregister(struct mISDN_dsp_element *elem); + +struct dsp_features { + int hfc_id; /* unique id to identify the chip (or -1) */ + int hfc_dtmf; /* set if HFCmulti card supports dtmf */ + int hfc_conf; /* set if HFCmulti card supports conferences */ + int hfc_loops; /* set if card supports tone loops */ + int hfc_echocanhw; /* set if card supports echocancelation*/ + int pcm_id; /* unique id to identify the pcm bus (or -1) */ + int pcm_slots; /* number of slots on the pcm bus */ + int pcm_banks; /* number of IO banks of pcm bus */ + int unclocked; /* data is not clocked (has jitter/loss) */ + int unordered; /* data is unordered (packets have index) */ +}; + +#endif + diff --git a/include/linux/mISDNhw.h b/include/linux/mISDNhw.h new file mode 100644 index 000000000..9d96d5d4d --- /dev/null +++ b/include/linux/mISDNhw.h @@ -0,0 +1,201 @@ +/* + * + * Author Karsten Keil + * + * Basic declarations for the mISDN HW channels + * + * Copyright 2008 by Karsten Keil + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef MISDNHW_H +#define MISDNHW_H +#include +#include + +/* + * HW DEBUG 0xHHHHGGGG + * H - hardware driver specific bits + * G - for all drivers + */ + +#define DEBUG_HW 0x00000001 +#define DEBUG_HW_OPEN 0x00000002 +#define DEBUG_HW_DCHANNEL 0x00000100 +#define DEBUG_HW_DFIFO 0x00000200 +#define DEBUG_HW_BCHANNEL 0x00001000 +#define DEBUG_HW_BFIFO 0x00002000 + +#define MAX_DFRAME_LEN_L1 300 +#define MAX_MON_FRAME 32 +#define MAX_LOG_SPACE 2048 +#define MISDN_COPY_SIZE 32 + +/* channel->Flags bit field */ +#define FLG_TX_BUSY 0 /* tx_buf in use */ +#define FLG_TX_NEXT 1 /* next_skb in use */ +#define FLG_L1_BUSY 2 /* L1 is permanent busy */ +#define FLG_L2_ACTIVATED 3 /* activated from L2 */ +#define FLG_OPEN 5 /* channel is in use */ +#define FLG_ACTIVE 6 /* channel is activated */ +#define FLG_BUSY_TIMER 7 +/* channel type */ +#define FLG_DCHANNEL 8 /* channel is D-channel */ +#define FLG_BCHANNEL 9 /* channel is B-channel */ +#define FLG_ECHANNEL 10 /* channel is E-channel */ +#define FLG_TRANSPARENT 12 /* channel use transparent data */ +#define FLG_HDLC 13 /* channel use hdlc data */ +#define FLG_L2DATA 14 /* channel use L2 DATA primitivs */ +#define FLG_ORIGIN 15 /* channel is on origin site */ +/* channel specific stuff */ +#define FLG_FILLEMPTY 16 /* fill fifo on first frame (empty) */ +/* arcofi specific */ +#define FLG_ARCOFI_TIMER 17 +#define FLG_ARCOFI_ERROR 18 +/* isar specific */ +#define FLG_INITIALIZED 17 +#define FLG_DLEETX 18 +#define FLG_LASTDLE 19 +#define FLG_FIRST 20 +#define FLG_LASTDATA 21 +#define FLG_NMD_DATA 22 +#define FLG_FTI_RUN 23 +#define FLG_LL_OK 24 +#define FLG_LL_CONN 25 +#define FLG_DTMFSEND 26 +#define FLG_TX_EMPTY 27 +/* stop sending received data upstream */ +#define FLG_RX_OFF 28 +/* workq events */ +#define FLG_RECVQUEUE 30 +#define FLG_PHCHANGE 31 + +#define schedule_event(s, ev) do { \ + test_and_set_bit(ev, &((s)->Flags)); \ + schedule_work(&((s)->workq)); \ + } while (0) + +struct dchannel { + struct mISDNdevice dev; + u_long Flags; + struct work_struct workq; + void (*phfunc) (struct dchannel *); + u_int state; + void *l1; + void *hw; + int slot; /* multiport card channel slot */ + struct timer_list timer; + /* receive data */ + struct sk_buff *rx_skb; + int maxlen; + /* send data */ + struct sk_buff_head squeue; + struct sk_buff_head rqueue; + struct sk_buff *tx_skb; + int tx_idx; + int debug; + /* statistics */ + int err_crc; + int err_tx; + int err_rx; +}; + +typedef int (dchannel_l1callback)(struct dchannel *, u_int); +extern int create_l1(struct dchannel *, dchannel_l1callback *); + +/* private L1 commands */ +#define INFO0 0x8002 +#define INFO1 0x8102 +#define INFO2 0x8202 +#define INFO3_P8 0x8302 +#define INFO3_P10 0x8402 +#define INFO4_P8 0x8502 +#define INFO4_P10 0x8602 +#define LOSTFRAMING 0x8702 +#define ANYSIGNAL 0x8802 +#define HW_POWERDOWN 0x8902 +#define HW_RESET_REQ 0x8a02 +#define HW_POWERUP_REQ 0x8b02 +#define HW_DEACT_REQ 0x8c02 +#define HW_ACTIVATE_REQ 0x8e02 +#define HW_D_NOBLOCKED 0x8f02 +#define HW_RESET_IND 0x9002 +#define HW_POWERUP_IND 0x9102 +#define HW_DEACT_IND 0x9202 +#define HW_ACTIVATE_IND 0x9302 +#define HW_DEACT_CNF 0x9402 +#define HW_TESTLOOP 0x9502 +#define HW_TESTRX_RAW 0x9602 +#define HW_TESTRX_HDLC 0x9702 +#define HW_TESTRX_OFF 0x9802 +#define HW_TIMER3_IND 0x9902 +#define HW_TIMER3_VALUE 0x9a00 +#define HW_TIMER3_VMASK 0x00FF + +struct layer1; +extern int l1_event(struct layer1 *, u_int); + +#define MISDN_BCH_FILL_SIZE 4 + +struct bchannel { + struct mISDNchannel ch; + int nr; + u_long Flags; + struct work_struct workq; + u_int state; + void *hw; + int slot; /* multiport card channel slot */ + struct timer_list timer; + /* receive data */ + u8 fill[MISDN_BCH_FILL_SIZE]; + struct sk_buff *rx_skb; + unsigned short maxlen; + unsigned short init_maxlen; /* initial value */ + unsigned short next_maxlen; /* pending value */ + unsigned short minlen; /* for transparent data */ + unsigned short init_minlen; /* initial value */ + unsigned short next_minlen; /* pending value */ + /* send data */ + struct sk_buff *next_skb; + struct sk_buff *tx_skb; + struct sk_buff_head rqueue; + int rcount; + int tx_idx; + int debug; + /* statistics */ + int err_crc; + int err_tx; + int err_rx; + int dropcnt; +}; + +extern int mISDN_initdchannel(struct dchannel *, int, void *); +extern int mISDN_initbchannel(struct bchannel *, unsigned short, + unsigned short); +extern int mISDN_freedchannel(struct dchannel *); +extern void mISDN_clear_bchannel(struct bchannel *); +extern void mISDN_freebchannel(struct bchannel *); +extern int mISDN_ctrl_bchannel(struct bchannel *, struct mISDN_ctrl_req *); +extern void queue_ch_frame(struct mISDNchannel *, u_int, + int, struct sk_buff *); +extern int dchannel_senddata(struct dchannel *, struct sk_buff *); +extern int bchannel_senddata(struct bchannel *, struct sk_buff *); +extern int bchannel_get_rxbuf(struct bchannel *, int); +extern void recv_Dchannel(struct dchannel *); +extern void recv_Echannel(struct dchannel *, struct dchannel *); +extern void recv_Bchannel(struct bchannel *, unsigned int, bool); +extern void recv_Dchannel_skb(struct dchannel *, struct sk_buff *); +extern void recv_Bchannel_skb(struct bchannel *, struct sk_buff *); +extern int get_next_bframe(struct bchannel *); +extern int get_next_dframe(struct dchannel *); + +#endif diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h new file mode 100644 index 000000000..a7330eb3e --- /dev/null +++ b/include/linux/mISDNif.h @@ -0,0 +1,604 @@ +/* + * + * Author Karsten Keil + * + * Copyright 2008 by Karsten Keil + * + * This code is free software; you can redistribute it and/or modify + * it under the terms of the GNU LESSER GENERAL PUBLIC LICENSE + * version 2.1 as published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU LESSER GENERAL PUBLIC LICENSE for more details. + * + */ + +#ifndef mISDNIF_H +#define mISDNIF_H + +#include +#include +#include +#include + +/* + * ABI Version 32 bit + * + * <8 bit> Major version + * - changed if any interface become backwards incompatible + * + * <8 bit> Minor version + * - changed if any interface is extended but backwards compatible + * + * <16 bit> Release number + * - should be incremented on every checkin + */ +#define MISDN_MAJOR_VERSION 1 +#define MISDN_MINOR_VERSION 1 +#define MISDN_RELEASE 29 + +/* primitives for information exchange + * generell format + * <16 bit 0 > + * <8 bit command> + * BIT 8 = 1 LAYER private + * BIT 7 = 1 answer + * BIT 6 = 1 DATA + * <8 bit target layer mask> + * + * Layer = 00 is reserved for general commands + Layer = 01 L2 -> HW + Layer = 02 HW -> L2 + Layer = 04 L3 -> L2 + Layer = 08 L2 -> L3 + * Layer = FF is reserved for broadcast commands + */ + +#define MISDN_CMDMASK 0xff00 +#define MISDN_LAYERMASK 0x00ff + +/* generell commands */ +#define OPEN_CHANNEL 0x0100 +#define CLOSE_CHANNEL 0x0200 +#define CONTROL_CHANNEL 0x0300 +#define CHECK_DATA 0x0400 + +/* layer 2 -> layer 1 */ +#define PH_ACTIVATE_REQ 0x0101 +#define PH_DEACTIVATE_REQ 0x0201 +#define PH_DATA_REQ 0x2001 +#define MPH_ACTIVATE_REQ 0x0501 +#define MPH_DEACTIVATE_REQ 0x0601 +#define MPH_INFORMATION_REQ 0x0701 +#define PH_CONTROL_REQ 0x0801 + +/* layer 1 -> layer 2 */ +#define PH_ACTIVATE_IND 0x0102 +#define PH_ACTIVATE_CNF 0x4102 +#define PH_DEACTIVATE_IND 0x0202 +#define PH_DEACTIVATE_CNF 0x4202 +#define PH_DATA_IND 0x2002 +#define PH_DATA_E_IND 0x3002 +#define MPH_ACTIVATE_IND 0x0502 +#define MPH_DEACTIVATE_IND 0x0602 +#define MPH_INFORMATION_IND 0x0702 +#define PH_DATA_CNF 0x6002 +#define PH_CONTROL_IND 0x0802 +#define PH_CONTROL_CNF 0x4802 + +/* layer 3 -> layer 2 */ +#define DL_ESTABLISH_REQ 0x1004 +#define DL_RELEASE_REQ 0x1104 +#define DL_DATA_REQ 0x3004 +#define DL_UNITDATA_REQ 0x3104 +#define DL_INFORMATION_REQ 0x0004 + +/* layer 2 -> layer 3 */ +#define DL_ESTABLISH_IND 0x1008 +#define DL_ESTABLISH_CNF 0x5008 +#define DL_RELEASE_IND 0x1108 +#define DL_RELEASE_CNF 0x5108 +#define DL_DATA_IND 0x3008 +#define DL_UNITDATA_IND 0x3108 +#define DL_INFORMATION_IND 0x0008 + +/* intern layer 2 management */ +#define MDL_ASSIGN_REQ 0x1804 +#define MDL_ASSIGN_IND 0x1904 +#define MDL_REMOVE_REQ 0x1A04 +#define MDL_REMOVE_IND 0x1B04 +#define MDL_STATUS_UP_IND 0x1C04 +#define MDL_STATUS_DOWN_IND 0x1D04 +#define MDL_STATUS_UI_IND 0x1E04 +#define MDL_ERROR_IND 0x1F04 +#define MDL_ERROR_RSP 0x5F04 + +/* intern layer 2 */ +#define DL_TIMER200_IND 0x7004 +#define DL_TIMER203_IND 0x7304 +#define DL_INTERN_MSG 0x7804 + +/* DL_INFORMATION_IND types */ +#define DL_INFO_L2_CONNECT 0x0001 +#define DL_INFO_L2_REMOVED 0x0002 + +/* PH_CONTROL types */ +/* TOUCH TONE IS 0x20XX XX "0"..."9", "A","B","C","D","*","#" */ +#define DTMF_TONE_VAL 0x2000 +#define DTMF_TONE_MASK 0x007F +#define DTMF_TONE_START 0x2100 +#define DTMF_TONE_STOP 0x2200 +#define DTMF_HFC_COEF 0x4000 +#define DSP_CONF_JOIN 0x2403 +#define DSP_CONF_SPLIT 0x2404 +#define DSP_RECEIVE_OFF 0x2405 +#define DSP_RECEIVE_ON 0x2406 +#define DSP_ECHO_ON 0x2407 +#define DSP_ECHO_OFF 0x2408 +#define DSP_MIX_ON 0x2409 +#define DSP_MIX_OFF 0x240a +#define DSP_DELAY 0x240b +#define DSP_JITTER 0x240c +#define DSP_TXDATA_ON 0x240d +#define DSP_TXDATA_OFF 0x240e +#define DSP_TX_DEJITTER 0x240f +#define DSP_TX_DEJ_OFF 0x2410 +#define DSP_TONE_PATT_ON 0x2411 +#define DSP_TONE_PATT_OFF 0x2412 +#define DSP_VOL_CHANGE_TX 0x2413 +#define DSP_VOL_CHANGE_RX 0x2414 +#define DSP_BF_ENABLE_KEY 0x2415 +#define DSP_BF_DISABLE 0x2416 +#define DSP_BF_ACCEPT 0x2416 +#define DSP_BF_REJECT 0x2417 +#define DSP_PIPELINE_CFG 0x2418 +#define HFC_VOL_CHANGE_TX 0x2601 +#define HFC_VOL_CHANGE_RX 0x2602 +#define HFC_SPL_LOOP_ON 0x2603 +#define HFC_SPL_LOOP_OFF 0x2604 +/* for T30 FAX and analog modem */ +#define HW_MOD_FRM 0x4000 +#define HW_MOD_FRH 0x4001 +#define HW_MOD_FTM 0x4002 +#define HW_MOD_FTH 0x4003 +#define HW_MOD_FTS 0x4004 +#define HW_MOD_CONNECT 0x4010 +#define HW_MOD_OK 0x4011 +#define HW_MOD_NOCARR 0x4012 +#define HW_MOD_FCERROR 0x4013 +#define HW_MOD_READY 0x4014 +#define HW_MOD_LASTDATA 0x4015 + +/* DSP_TONE_PATT_ON parameter */ +#define TONE_OFF 0x0000 +#define TONE_GERMAN_DIALTONE 0x0001 +#define TONE_GERMAN_OLDDIALTONE 0x0002 +#define TONE_AMERICAN_DIALTONE 0x0003 +#define TONE_GERMAN_DIALPBX 0x0004 +#define TONE_GERMAN_OLDDIALPBX 0x0005 +#define TONE_AMERICAN_DIALPBX 0x0006 +#define TONE_GERMAN_RINGING 0x0007 +#define TONE_GERMAN_OLDRINGING 0x0008 +#define TONE_AMERICAN_RINGPBX 0x000b +#define TONE_GERMAN_RINGPBX 0x000c +#define TONE_GERMAN_OLDRINGPBX 0x000d +#define TONE_AMERICAN_RINGING 0x000e +#define TONE_GERMAN_BUSY 0x000f +#define TONE_GERMAN_OLDBUSY 0x0010 +#define TONE_AMERICAN_BUSY 0x0011 +#define TONE_GERMAN_HANGUP 0x0012 +#define TONE_GERMAN_OLDHANGUP 0x0013 +#define TONE_AMERICAN_HANGUP 0x0014 +#define TONE_SPECIAL_INFO 0x0015 +#define TONE_GERMAN_GASSENBESETZT 0x0016 +#define TONE_GERMAN_AUFSCHALTTON 0x0016 + +/* MPH_INFORMATION_IND */ +#define L1_SIGNAL_LOS_OFF 0x0010 +#define L1_SIGNAL_LOS_ON 0x0011 +#define L1_SIGNAL_AIS_OFF 0x0012 +#define L1_SIGNAL_AIS_ON 0x0013 +#define L1_SIGNAL_RDI_OFF 0x0014 +#define L1_SIGNAL_RDI_ON 0x0015 +#define L1_SIGNAL_SLIP_RX 0x0020 +#define L1_SIGNAL_SLIP_TX 0x0021 + +/* + * protocol ids + * D channel 1-31 + * B channel 33 - 63 + */ + +#define ISDN_P_NONE 0 +#define ISDN_P_BASE 0 +#define ISDN_P_TE_S0 0x01 +#define ISDN_P_NT_S0 0x02 +#define ISDN_P_TE_E1 0x03 +#define ISDN_P_NT_E1 0x04 +#define ISDN_P_TE_UP0 0x05 +#define ISDN_P_NT_UP0 0x06 + +#define IS_ISDN_P_TE(p) ((p == ISDN_P_TE_S0) || (p == ISDN_P_TE_E1) || \ + (p == ISDN_P_TE_UP0) || (p == ISDN_P_LAPD_TE)) +#define IS_ISDN_P_NT(p) ((p == ISDN_P_NT_S0) || (p == ISDN_P_NT_E1) || \ + (p == ISDN_P_NT_UP0) || (p == ISDN_P_LAPD_NT)) +#define IS_ISDN_P_S0(p) ((p == ISDN_P_TE_S0) || (p == ISDN_P_NT_S0)) +#define IS_ISDN_P_E1(p) ((p == ISDN_P_TE_E1) || (p == ISDN_P_NT_E1)) +#define IS_ISDN_P_UP0(p) ((p == ISDN_P_TE_UP0) || (p == ISDN_P_NT_UP0)) + + +#define ISDN_P_LAPD_TE 0x10 +#define ISDN_P_LAPD_NT 0x11 + +#define ISDN_P_B_MASK 0x1f +#define ISDN_P_B_START 0x20 + +#define ISDN_P_B_RAW 0x21 +#define ISDN_P_B_HDLC 0x22 +#define ISDN_P_B_X75SLP 0x23 +#define ISDN_P_B_L2DTMF 0x24 +#define ISDN_P_B_L2DSP 0x25 +#define ISDN_P_B_L2DSPHDLC 0x26 +#define ISDN_P_B_T30_FAX 0x27 +#define ISDN_P_B_MODEM_ASYNC 0x28 + +#define OPTION_L2_PMX 1 +#define OPTION_L2_PTP 2 +#define OPTION_L2_FIXEDTEI 3 +#define OPTION_L2_CLEANUP 4 +#define OPTION_L1_HOLD 5 + +/* should be in sync with linux/kobject.h:KOBJ_NAME_LEN */ +#define MISDN_MAX_IDLEN 20 + +struct mISDNhead { + unsigned int prim; + unsigned int id; +} __packed; + +#define MISDN_HEADER_LEN sizeof(struct mISDNhead) +#define MAX_DATA_SIZE 2048 +#define MAX_DATA_MEM (MAX_DATA_SIZE + MISDN_HEADER_LEN) +#define MAX_DFRAME_LEN 260 + +#define MISDN_ID_ADDR_MASK 0xFFFF +#define MISDN_ID_TEI_MASK 0xFF00 +#define MISDN_ID_SAPI_MASK 0x00FF +#define MISDN_ID_TEI_ANY 0x7F00 + +#define MISDN_ID_ANY 0xFFFF +#define MISDN_ID_NONE 0xFFFE + +#define GROUP_TEI 127 +#define TEI_SAPI 63 +#define CTRL_SAPI 0 + +#define MISDN_MAX_CHANNEL 127 +#define MISDN_CHMAP_SIZE ((MISDN_MAX_CHANNEL + 1) >> 3) + +#define SOL_MISDN 0 + +struct sockaddr_mISDN { + sa_family_t family; + unsigned char dev; + unsigned char channel; + unsigned char sapi; + unsigned char tei; +}; + +struct mISDNversion { + unsigned char major; + unsigned char minor; + unsigned short release; +}; + +struct mISDN_devinfo { + u_int id; + u_int Dprotocols; + u_int Bprotocols; + u_int protocol; + u_char channelmap[MISDN_CHMAP_SIZE]; + u_int nrbchan; + char name[MISDN_MAX_IDLEN]; +}; + +struct mISDN_devrename { + u_int id; + char name[MISDN_MAX_IDLEN]; /* new name */ +}; + +/* MPH_INFORMATION_REQ payload */ +struct ph_info_ch { + __u32 protocol; + __u64 Flags; +}; + +struct ph_info_dch { + struct ph_info_ch ch; + __u16 state; + __u16 num_bch; +}; + +struct ph_info { + struct ph_info_dch dch; + struct ph_info_ch bch[]; +}; + +/* timer device ioctl */ +#define IMADDTIMER _IOR('I', 64, int) +#define IMDELTIMER _IOR('I', 65, int) + +/* socket ioctls */ +#define IMGETVERSION _IOR('I', 66, int) +#define IMGETCOUNT _IOR('I', 67, int) +#define IMGETDEVINFO _IOR('I', 68, int) +#define IMCTRLREQ _IOR('I', 69, int) +#define IMCLEAR_L2 _IOR('I', 70, int) +#define IMSETDEVNAME _IOR('I', 71, struct mISDN_devrename) +#define IMHOLD_L1 _IOR('I', 72, int) + +static inline int +test_channelmap(u_int nr, u_char *map) +{ + if (nr <= MISDN_MAX_CHANNEL) + return map[nr >> 3] & (1 << (nr & 7)); + else + return 0; +} + +static inline void +set_channelmap(u_int nr, u_char *map) +{ + map[nr >> 3] |= (1 << (nr & 7)); +} + +static inline void +clear_channelmap(u_int nr, u_char *map) +{ + map[nr >> 3] &= ~(1 << (nr & 7)); +} + +/* CONTROL_CHANNEL parameters */ +#define MISDN_CTRL_GETOP 0x0000 +#define MISDN_CTRL_LOOP 0x0001 +#define MISDN_CTRL_CONNECT 0x0002 +#define MISDN_CTRL_DISCONNECT 0x0004 +#define MISDN_CTRL_RX_BUFFER 0x0008 +#define MISDN_CTRL_PCMCONNECT 0x0010 +#define MISDN_CTRL_PCMDISCONNECT 0x0020 +#define MISDN_CTRL_SETPEER 0x0040 +#define MISDN_CTRL_UNSETPEER 0x0080 +#define MISDN_CTRL_RX_OFF 0x0100 +#define MISDN_CTRL_FILL_EMPTY 0x0200 +#define MISDN_CTRL_GETPEER 0x0400 +#define MISDN_CTRL_L1_TIMER3 0x0800 +#define MISDN_CTRL_HW_FEATURES_OP 0x2000 +#define MISDN_CTRL_HW_FEATURES 0x2001 +#define MISDN_CTRL_HFC_OP 0x4000 +#define MISDN_CTRL_HFC_PCM_CONN 0x4001 +#define MISDN_CTRL_HFC_PCM_DISC 0x4002 +#define MISDN_CTRL_HFC_CONF_JOIN 0x4003 +#define MISDN_CTRL_HFC_CONF_SPLIT 0x4004 +#define MISDN_CTRL_HFC_RECEIVE_OFF 0x4005 +#define MISDN_CTRL_HFC_RECEIVE_ON 0x4006 +#define MISDN_CTRL_HFC_ECHOCAN_ON 0x4007 +#define MISDN_CTRL_HFC_ECHOCAN_OFF 0x4008 +#define MISDN_CTRL_HFC_WD_INIT 0x4009 +#define MISDN_CTRL_HFC_WD_RESET 0x400A + +/* special RX buffer value for MISDN_CTRL_RX_BUFFER request.p1 is the minimum + * buffer size request.p2 the maximum. Using MISDN_CTRL_RX_SIZE_IGNORE will + * not change the value, but still read back the actual stetting. + */ +#define MISDN_CTRL_RX_SIZE_IGNORE -1 + +/* socket options */ +#define MISDN_TIME_STAMP 0x0001 + +struct mISDN_ctrl_req { + int op; + int channel; + int p1; + int p2; +}; + +/* muxer options */ +#define MISDN_OPT_ALL 1 +#define MISDN_OPT_TEIMGR 2 + +#ifdef __KERNEL__ +#include +#include +#include +#include +#include + +#define DEBUG_CORE 0x000000ff +#define DEBUG_CORE_FUNC 0x00000002 +#define DEBUG_SOCKET 0x00000004 +#define DEBUG_MANAGER 0x00000008 +#define DEBUG_SEND_ERR 0x00000010 +#define DEBUG_MSG_THREAD 0x00000020 +#define DEBUG_QUEUE_FUNC 0x00000040 +#define DEBUG_L1 0x0000ff00 +#define DEBUG_L1_FSM 0x00000200 +#define DEBUG_L2 0x00ff0000 +#define DEBUG_L2_FSM 0x00020000 +#define DEBUG_L2_CTRL 0x00040000 +#define DEBUG_L2_RECV 0x00080000 +#define DEBUG_L2_TEI 0x00100000 +#define DEBUG_L2_TEIFSM 0x00200000 +#define DEBUG_TIMER 0x01000000 +#define DEBUG_CLOCK 0x02000000 + +#define mISDN_HEAD_P(s) ((struct mISDNhead *)&s->cb[0]) +#define mISDN_HEAD_PRIM(s) (((struct mISDNhead *)&s->cb[0])->prim) +#define mISDN_HEAD_ID(s) (((struct mISDNhead *)&s->cb[0])->id) + +/* socket states */ +#define MISDN_OPEN 1 +#define MISDN_BOUND 2 +#define MISDN_CLOSED 3 + +struct mISDNchannel; +struct mISDNdevice; +struct mISDNstack; +struct mISDNclock; + +struct channel_req { + u_int protocol; + struct sockaddr_mISDN adr; + struct mISDNchannel *ch; +}; + +typedef int (ctrl_func_t)(struct mISDNchannel *, u_int, void *); +typedef int (send_func_t)(struct mISDNchannel *, struct sk_buff *); +typedef int (create_func_t)(struct channel_req *); + +struct Bprotocol { + struct list_head list; + char *name; + u_int Bprotocols; + create_func_t *create; +}; + +struct mISDNchannel { + struct list_head list; + u_int protocol; + u_int nr; + u_long opt; + u_int addr; + struct mISDNstack *st; + struct mISDNchannel *peer; + send_func_t *send; + send_func_t *recv; + ctrl_func_t *ctrl; +}; + +struct mISDN_sock_list { + struct hlist_head head; + rwlock_t lock; +}; + +struct mISDN_sock { + struct sock sk; + struct mISDNchannel ch; + u_int cmask; + struct mISDNdevice *dev; +}; + + + +struct mISDNdevice { + struct mISDNchannel D; + u_int id; + u_int Dprotocols; + u_int Bprotocols; + u_int nrbchan; + u_char channelmap[MISDN_CHMAP_SIZE]; + struct list_head bchannels; + struct mISDNchannel *teimgr; + struct device dev; +}; + +struct mISDNstack { + u_long status; + struct mISDNdevice *dev; + struct task_struct *thread; + struct completion *notify; + wait_queue_head_t workq; + struct sk_buff_head msgq; + struct list_head layer2; + struct mISDNchannel *layer1; + struct mISDNchannel own; + struct mutex lmutex; /* protect lists */ + struct mISDN_sock_list l1sock; +#ifdef MISDN_MSG_STATS + u_int msg_cnt; + u_int sleep_cnt; + u_int stopped_cnt; +#endif +}; + +typedef int (clockctl_func_t)(void *, int); + +struct mISDNclock { + struct list_head list; + char name[64]; + int pri; + clockctl_func_t *ctl; + void *priv; +}; + +/* global alloc/queue functions */ + +static inline struct sk_buff * +mI_alloc_skb(unsigned int len, gfp_t gfp_mask) +{ + struct sk_buff *skb; + + skb = alloc_skb(len + MISDN_HEADER_LEN, gfp_mask); + if (likely(skb)) + skb_reserve(skb, MISDN_HEADER_LEN); + return skb; +} + +static inline struct sk_buff * +_alloc_mISDN_skb(u_int prim, u_int id, u_int len, void *dp, gfp_t gfp_mask) +{ + struct sk_buff *skb = mI_alloc_skb(len, gfp_mask); + struct mISDNhead *hh; + + if (!skb) + return NULL; + if (len) + skb_put_data(skb, dp, len); + hh = mISDN_HEAD_P(skb); + hh->prim = prim; + hh->id = id; + return skb; +} + +static inline void +_queue_data(struct mISDNchannel *ch, u_int prim, + u_int id, u_int len, void *dp, gfp_t gfp_mask) +{ + struct sk_buff *skb; + + if (!ch->peer) + return; + skb = _alloc_mISDN_skb(prim, id, len, dp, gfp_mask); + if (!skb) + return; + if (ch->recv(ch->peer, skb)) + dev_kfree_skb(skb); +} + +/* global register/unregister functions */ + +extern int mISDN_register_device(struct mISDNdevice *, + struct device *parent, char *name); +extern void mISDN_unregister_device(struct mISDNdevice *); +extern int mISDN_register_Bprotocol(struct Bprotocol *); +extern void mISDN_unregister_Bprotocol(struct Bprotocol *); +extern struct mISDNclock *mISDN_register_clock(char *, int, clockctl_func_t *, + void *); +extern void mISDN_unregister_clock(struct mISDNclock *); + +static inline struct mISDNdevice *dev_to_mISDN(struct device *dev) +{ + if (dev) + return dev_get_drvdata(dev); + else + return NULL; +} + +extern void set_channel_address(struct mISDNchannel *, u_int, u_int); +extern void mISDN_clock_update(struct mISDNclock *, int, ktime_t *); +extern unsigned short mISDN_clock_get(void); +extern const char *mISDNDevName4ch(struct mISDNchannel *); + +#endif /* __KERNEL__ */ +#endif /* mISDNIF_H */ diff --git a/include/linux/mailbox/brcm-message.h b/include/linux/mailbox/brcm-message.h new file mode 100644 index 000000000..c20b4843f --- /dev/null +++ b/include/linux/mailbox/brcm-message.h @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2016 Broadcom + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Common header for Broadcom mailbox messages which is shared across + * Broadcom SoCs and Broadcom mailbox client drivers. + */ + +#ifndef _LINUX_BRCM_MESSAGE_H_ +#define _LINUX_BRCM_MESSAGE_H_ + +#include + +enum brcm_message_type { + BRCM_MESSAGE_UNKNOWN = 0, + BRCM_MESSAGE_BATCH, + BRCM_MESSAGE_SPU, + BRCM_MESSAGE_SBA, + BRCM_MESSAGE_MAX, +}; + +struct brcm_sba_command { + u64 cmd; + u64 *cmd_dma; + dma_addr_t cmd_dma_addr; +#define BRCM_SBA_CMD_TYPE_A BIT(0) +#define BRCM_SBA_CMD_TYPE_B BIT(1) +#define BRCM_SBA_CMD_TYPE_C BIT(2) +#define BRCM_SBA_CMD_HAS_RESP BIT(3) +#define BRCM_SBA_CMD_HAS_OUTPUT BIT(4) + u64 flags; + dma_addr_t resp; + size_t resp_len; + dma_addr_t data; + size_t data_len; +}; + +struct brcm_message { + enum brcm_message_type type; + union { + struct { + struct brcm_message *msgs; + unsigned int msgs_queued; + unsigned int msgs_count; + } batch; + struct { + struct scatterlist *src; + struct scatterlist *dst; + } spu; + struct { + struct brcm_sba_command *cmds; + unsigned int cmds_count; + } sba; + }; + void *ctx; + int error; +}; + +#endif /* _LINUX_BRCM_MESSAGE_H_ */ diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h new file mode 100644 index 000000000..ccb73422c --- /dev/null +++ b/include/linux/mailbox/mtk-cmdq-mailbox.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018 MediaTek Inc. + * + */ + +#ifndef __MTK_CMDQ_MAILBOX_H__ +#define __MTK_CMDQ_MAILBOX_H__ + +#include +#include +#include + +#define CMDQ_INST_SIZE 8 /* instruction is 64-bit */ +#define CMDQ_SUBSYS_SHIFT 16 +#define CMDQ_OP_CODE_SHIFT 24 +#define CMDQ_JUMP_PASS CMDQ_INST_SIZE + +#define CMDQ_WFE_UPDATE BIT(31) +#define CMDQ_WFE_WAIT BIT(15) +#define CMDQ_WFE_WAIT_VALUE 0x1 + +/* + * CMDQ_CODE_MASK: + * set write mask + * format: op mask + * CMDQ_CODE_WRITE: + * write value into target register + * format: op subsys address value + * CMDQ_CODE_JUMP: + * jump by offset + * format: op offset + * CMDQ_CODE_WFE: + * wait for event and clear + * it is just clear if no wait + * format: [wait] op event update:1 to_wait:1 wait:1 + * [clear] op event update:1 to_wait:0 wait:0 + * CMDQ_CODE_EOC: + * end of command + * format: op irq_flag + */ +enum cmdq_code { + CMDQ_CODE_MASK = 0x02, + CMDQ_CODE_WRITE = 0x04, + CMDQ_CODE_JUMP = 0x10, + CMDQ_CODE_WFE = 0x20, + CMDQ_CODE_EOC = 0x40, +}; + +enum cmdq_cb_status { + CMDQ_CB_NORMAL = 0, + CMDQ_CB_ERROR +}; + +struct cmdq_cb_data { + enum cmdq_cb_status sta; + void *data; +}; + +typedef void (*cmdq_async_flush_cb)(struct cmdq_cb_data data); + +struct cmdq_task_cb { + cmdq_async_flush_cb cb; + void *data; +}; + +struct cmdq_pkt { + void *va_base; + dma_addr_t pa_base; + size_t cmd_buf_size; /* command occupied size */ + size_t buf_size; /* real buffer size */ + struct cmdq_task_cb cb; + struct cmdq_task_cb async_cb; + void *cl; +}; + +#endif /* __MTK_CMDQ_MAILBOX_H__ */ diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h new file mode 100644 index 000000000..443487109 --- /dev/null +++ b/include/linux/mailbox_client.h @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2013-2014 Linaro Ltd. + * Author: Jassi Brar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MAILBOX_CLIENT_H +#define __MAILBOX_CLIENT_H + +#include +#include + +struct mbox_chan; + +/** + * struct mbox_client - User of a mailbox + * @dev: The client device + * @tx_block: If the mbox_send_message should block until data is + * transmitted. + * @tx_tout: Max block period in ms before TX is assumed failure + * @knows_txdone: If the client could run the TX state machine. Usually + * if the client receives some ACK packet for transmission. + * Unused if the controller already has TX_Done/RTR IRQ. + * @rx_callback: Atomic callback to provide client the data received + * @tx_prepare: Atomic callback to ask client to prepare the payload + * before initiating the transmission if required. + * @tx_done: Atomic callback to tell client of data transmission + */ +struct mbox_client { + struct device *dev; + bool tx_block; + unsigned long tx_tout; + bool knows_txdone; + + void (*rx_callback)(struct mbox_client *cl, void *mssg); + void (*tx_prepare)(struct mbox_client *cl, void *mssg); + void (*tx_done)(struct mbox_client *cl, void *mssg, int r); +}; + +struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl, + const char *name); +struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index); +int mbox_send_message(struct mbox_chan *chan, void *mssg); +void mbox_client_txdone(struct mbox_chan *chan, int r); /* atomic */ +bool mbox_client_peek_data(struct mbox_chan *chan); /* atomic */ +void mbox_free_channel(struct mbox_chan *chan); /* may sleep */ + +#endif /* __MAILBOX_CLIENT_H */ diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h new file mode 100644 index 000000000..5a4524f66 --- /dev/null +++ b/include/linux/mailbox_controller.h @@ -0,0 +1,135 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MAILBOX_CONTROLLER_H +#define __MAILBOX_CONTROLLER_H + +#include +#include +#include +#include +#include + +struct mbox_chan; + +/** + * struct mbox_chan_ops - methods to control mailbox channels + * @send_data: The API asks the MBOX controller driver, in atomic + * context try to transmit a message on the bus. Returns 0 if + * data is accepted for transmission, -EBUSY while rejecting + * if the remote hasn't yet read the last data sent. Actual + * transmission of data is reported by the controller via + * mbox_chan_txdone (if it has some TX ACK irq). It must not + * sleep. + * @startup: Called when a client requests the chan. The controller + * could ask clients for additional parameters of communication + * to be provided via client's chan_data. This call may + * block. After this call the Controller must forward any + * data received on the chan by calling mbox_chan_received_data. + * The controller may do stuff that need to sleep. + * @shutdown: Called when a client relinquishes control of a chan. + * This call may block too. The controller must not forward + * any received data anymore. + * The controller may do stuff that need to sleep. + * @last_tx_done: If the controller sets 'txdone_poll', the API calls + * this to poll status of last TX. The controller must + * give priority to IRQ method over polling and never + * set both txdone_poll and txdone_irq. Only in polling + * mode 'send_data' is expected to return -EBUSY. + * The controller may do stuff that need to sleep/block. + * Used only if txdone_poll:=true && txdone_irq:=false + * @peek_data: Atomic check for any received data. Return true if controller + * has some data to push to the client. False otherwise. + */ +struct mbox_chan_ops { + int (*send_data)(struct mbox_chan *chan, void *data); + int (*startup)(struct mbox_chan *chan); + void (*shutdown)(struct mbox_chan *chan); + bool (*last_tx_done)(struct mbox_chan *chan); + bool (*peek_data)(struct mbox_chan *chan); +}; + +/** + * struct mbox_controller - Controller of a class of communication channels + * @dev: Device backing this controller + * @ops: Operators that work on each communication chan + * @chans: Array of channels + * @num_chans: Number of channels in the 'chans' array. + * @txdone_irq: Indicates if the controller can report to API when + * the last transmitted data was read by the remote. + * Eg, if it has some TX ACK irq. + * @txdone_poll: If the controller can read but not report the TX + * done. Ex, some register shows the TX status but + * no interrupt rises. Ignored if 'txdone_irq' is set. + * @txpoll_period: If 'txdone_poll' is in effect, the API polls for + * last TX's status after these many millisecs + * @of_xlate: Controller driver specific mapping of channel via DT + * @poll_hrt: API private. hrtimer used to poll for TXDONE on all + * channels. + * @node: API private. To hook into list of controllers. + */ +struct mbox_controller { + struct device *dev; + const struct mbox_chan_ops *ops; + struct mbox_chan *chans; + int num_chans; + bool txdone_irq; + bool txdone_poll; + unsigned txpoll_period; + struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox, + const struct of_phandle_args *sp); + /* Internal to API */ + struct hrtimer poll_hrt; + spinlock_t poll_hrt_lock; + struct list_head node; +}; + +/* + * The length of circular buffer for queuing messages from a client. + * 'msg_count' tracks the number of buffered messages while 'msg_free' + * is the index where the next message would be buffered. + * We shouldn't need it too big because every transfer is interrupt + * triggered and if we have lots of data to transfer, the interrupt + * latencies are going to be the bottleneck, not the buffer length. + * Besides, mbox_send_message could be called from atomic context and + * the client could also queue another message from the notifier 'tx_done' + * of the last transfer done. + * REVISIT: If too many platforms see the "Try increasing MBOX_TX_QUEUE_LEN" + * print, it needs to be taken from config option or somesuch. + */ +#define MBOX_TX_QUEUE_LEN 20 + +/** + * struct mbox_chan - s/w representation of a communication chan + * @mbox: Pointer to the parent/provider of this channel + * @txdone_method: Way to detect TXDone chosen by the API + * @cl: Pointer to the current owner of this channel + * @tx_complete: Transmission completion + * @active_req: Currently active request hook + * @msg_count: No. of mssg currently queued + * @msg_free: Index of next available mssg slot + * @msg_data: Hook for data packet + * @lock: Serialise access to the channel + * @con_priv: Hook for controller driver to attach private data + */ +struct mbox_chan { + struct mbox_controller *mbox; + unsigned txdone_method; + struct mbox_client *cl; + struct completion tx_complete; + void *active_req; + unsigned msg_count, msg_free; + void *msg_data[MBOX_TX_QUEUE_LEN]; + spinlock_t lock; /* Serialise access to the channel */ + void *con_priv; +}; + +int mbox_controller_register(struct mbox_controller *mbox); /* can sleep */ +void mbox_controller_unregister(struct mbox_controller *mbox); /* can sleep */ +void mbox_chan_received_data(struct mbox_chan *chan, void *data); /* atomic */ +void mbox_chan_txdone(struct mbox_chan *chan, int r); /* atomic */ + +#endif /* __MAILBOX_CONTROLLER_H */ diff --git a/include/linux/maple.h b/include/linux/maple.h new file mode 100644 index 000000000..9b140272e --- /dev/null +++ b/include/linux/maple.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_MAPLE_H +#define __LINUX_MAPLE_H + +#include + +struct device; +extern struct bus_type maple_bus_type; + +/* Maple Bus command and response codes */ +enum maple_code { + MAPLE_RESPONSE_FILEERR = -5, + MAPLE_RESPONSE_AGAIN, /* retransmit */ + MAPLE_RESPONSE_BADCMD, + MAPLE_RESPONSE_BADFUNC, + MAPLE_RESPONSE_NONE, /* unit didn't respond*/ + MAPLE_COMMAND_DEVINFO = 1, + MAPLE_COMMAND_ALLINFO, + MAPLE_COMMAND_RESET, + MAPLE_COMMAND_KILL, + MAPLE_RESPONSE_DEVINFO, + MAPLE_RESPONSE_ALLINFO, + MAPLE_RESPONSE_OK, + MAPLE_RESPONSE_DATATRF, + MAPLE_COMMAND_GETCOND, + MAPLE_COMMAND_GETMINFO, + MAPLE_COMMAND_BREAD, + MAPLE_COMMAND_BWRITE, + MAPLE_COMMAND_BSYNC, + MAPLE_COMMAND_SETCOND, + MAPLE_COMMAND_MICCONTROL +}; + +enum maple_file_errors { + MAPLE_FILEERR_INVALID_PARTITION = 0x01000000, + MAPLE_FILEERR_PHASE_ERROR = 0x02000000, + MAPLE_FILEERR_INVALID_BLOCK = 0x04000000, + MAPLE_FILEERR_WRITE_ERROR = 0x08000000, + MAPLE_FILEERR_INVALID_WRITE_LENGTH = 0x10000000, + MAPLE_FILEERR_BAD_CRC = 0x20000000 +}; + +struct maple_buffer { + char bufx[0x400]; + void *buf; +}; + +struct mapleq { + struct list_head list; + struct maple_device *dev; + struct maple_buffer *recvbuf; + void *sendbuf, *recvbuf_p2; + unsigned char length; + enum maple_code command; +}; + +struct maple_devinfo { + unsigned long function; + unsigned long function_data[3]; + unsigned char area_code; + unsigned char connector_direction; + char product_name[31]; + char product_licence[61]; + unsigned short standby_power; + unsigned short max_power; +}; + +struct maple_device { + struct maple_driver *driver; + struct mapleq *mq; + void (*callback) (struct mapleq * mq); + void (*fileerr_handler)(struct maple_device *mdev, void *recvbuf); + int (*can_unload)(struct maple_device *mdev); + unsigned long when, interval, function; + struct maple_devinfo devinfo; + unsigned char port, unit; + char product_name[32]; + char product_licence[64]; + atomic_t busy; + wait_queue_head_t maple_wait; + struct device dev; +}; + +struct maple_driver { + unsigned long function; + struct device_driver drv; +}; + +void maple_getcond_callback(struct maple_device *dev, + void (*callback) (struct mapleq * mq), + unsigned long interval, + unsigned long function); +int maple_driver_register(struct maple_driver *); +void maple_driver_unregister(struct maple_driver *); + +int maple_add_packet(struct maple_device *mdev, u32 function, + u32 command, u32 length, void *data); +void maple_clear_dev(struct maple_device *mdev); + +#define to_maple_dev(n) container_of(n, struct maple_device, dev) +#define to_maple_driver(n) container_of(n, struct maple_driver, drv) + +#define maple_get_drvdata(d) dev_get_drvdata(&(d)->dev) +#define maple_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, (p)) + +#endif /* __LINUX_MAPLE_H */ diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h new file mode 100644 index 000000000..9a488497e --- /dev/null +++ b/include/linux/marvell_phy.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _MARVELL_PHY_H +#define _MARVELL_PHY_H + +/* Mask used for ID comparisons */ +#define MARVELL_PHY_ID_MASK 0xfffffff0 + +/* Known PHY IDs */ +#define MARVELL_PHY_ID_88E1101 0x01410c60 +#define MARVELL_PHY_ID_88E1112 0x01410c90 +#define MARVELL_PHY_ID_88E1111 0x01410cc0 +#define MARVELL_PHY_ID_88E1118 0x01410e10 +#define MARVELL_PHY_ID_88E1121R 0x01410cb0 +#define MARVELL_PHY_ID_88E1145 0x01410cd0 +#define MARVELL_PHY_ID_88E1149R 0x01410e50 +#define MARVELL_PHY_ID_88E1240 0x01410e30 +#define MARVELL_PHY_ID_88E1318S 0x01410e90 +#define MARVELL_PHY_ID_88E1116R 0x01410e40 +#define MARVELL_PHY_ID_88E1510 0x01410dd0 +#define MARVELL_PHY_ID_88E1540 0x01410eb0 +#define MARVELL_PHY_ID_88E1545 0x01410ea0 +#define MARVELL_PHY_ID_88E3016 0x01410e60 + +/* These Ethernet switch families contain embedded PHYs, but they do + * not have a model ID. So the switch driver traps reads to the ID2 + * register and returns the switch family ID + */ +#define MARVELL_PHY_ID_88E6341_FAMILY 0x01410f41 +#define MARVELL_PHY_ID_88E6390_FAMILY 0x01410f90 + +#define MARVELL_PHY_FAMILY_ID(id) ((id) >> 4) + +/* struct phy_device dev_flags definitions */ +#define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 +#define MARVELL_PHY_M1118_DNS323_LEDS 0x00000002 + +#endif /* _MARVELL_PHY_H */ diff --git a/include/linux/math64.h b/include/linux/math64.h new file mode 100644 index 000000000..bb2c84afb --- /dev/null +++ b/include/linux/math64.h @@ -0,0 +1,287 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MATH64_H +#define _LINUX_MATH64_H + +#include +#include + +#if BITS_PER_LONG == 64 + +#define div64_long(x, y) div64_s64((x), (y)) +#define div64_ul(x, y) div64_u64((x), (y)) + +/** + * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder + * @dividend: unsigned 64bit dividend + * @divisor: unsigned 32bit divisor + * @remainder: pointer to unsigned 32bit remainder + * + * Return: sets ``*remainder``, then returns dividend / divisor + * + * This is commonly provided by 32bit archs to provide an optimized 64bit + * divide. + */ +static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) +{ + *remainder = dividend % divisor; + return dividend / divisor; +} + +/** + * div_s64_rem - signed 64bit divide with 32bit divisor with remainder + * @dividend: signed 64bit dividend + * @divisor: signed 32bit divisor + * @remainder: pointer to signed 32bit remainder + * + * Return: sets ``*remainder``, then returns dividend / divisor + */ +static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) +{ + *remainder = dividend % divisor; + return dividend / divisor; +} + +/** + * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder + * @dividend: unsigned 64bit dividend + * @divisor: unsigned 64bit divisor + * @remainder: pointer to unsigned 64bit remainder + * + * Return: sets ``*remainder``, then returns dividend / divisor + */ +static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) +{ + *remainder = dividend % divisor; + return dividend / divisor; +} + +/** + * div64_u64 - unsigned 64bit divide with 64bit divisor + * @dividend: unsigned 64bit dividend + * @divisor: unsigned 64bit divisor + * + * Return: dividend / divisor + */ +static inline u64 div64_u64(u64 dividend, u64 divisor) +{ + return dividend / divisor; +} + +/** + * div64_s64 - signed 64bit divide with 64bit divisor + * @dividend: signed 64bit dividend + * @divisor: signed 64bit divisor + * + * Return: dividend / divisor + */ +static inline s64 div64_s64(s64 dividend, s64 divisor) +{ + return dividend / divisor; +} + +#elif BITS_PER_LONG == 32 + +#define div64_long(x, y) div_s64((x), (y)) +#define div64_ul(x, y) div_u64((x), (y)) + +#ifndef div_u64_rem +static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) +{ + *remainder = do_div(dividend, divisor); + return dividend; +} +#endif + +#ifndef div_s64_rem +extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder); +#endif + +#ifndef div64_u64_rem +extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder); +#endif + +#ifndef div64_u64 +extern u64 div64_u64(u64 dividend, u64 divisor); +#endif + +#ifndef div64_s64 +extern s64 div64_s64(s64 dividend, s64 divisor); +#endif + +#endif /* BITS_PER_LONG */ + +/** + * div_u64 - unsigned 64bit divide with 32bit divisor + * @dividend: unsigned 64bit dividend + * @divisor: unsigned 32bit divisor + * + * This is the most common 64bit divide and should be used if possible, + * as many 32bit archs can optimize this variant better than a full 64bit + * divide. + */ +#ifndef div_u64 +static inline u64 div_u64(u64 dividend, u32 divisor) +{ + u32 remainder; + return div_u64_rem(dividend, divisor, &remainder); +} +#endif + +/** + * div_s64 - signed 64bit divide with 32bit divisor + * @dividend: signed 64bit dividend + * @divisor: signed 32bit divisor + */ +#ifndef div_s64 +static inline s64 div_s64(s64 dividend, s32 divisor) +{ + s32 remainder; + return div_s64_rem(dividend, divisor, &remainder); +} +#endif + +u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder); + +static __always_inline u32 +__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) +{ + u32 ret = 0; + + while (dividend >= divisor) { + /* The following asm() prevents the compiler from + optimising this loop into a modulo operation. */ + asm("" : "+rm"(dividend)); + + dividend -= divisor; + ret++; + } + + *remainder = dividend; + + return ret; +} + +#ifndef mul_u32_u32 +/* + * Many a GCC version messes this up and generates a 64x64 mult :-( + */ +static inline u64 mul_u32_u32(u32 a, u32 b) +{ + return (u64)a * b; +} +#endif + +#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) + +#ifndef mul_u64_u32_shr +static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) +{ + return (u64)(((unsigned __int128)a * mul) >> shift); +} +#endif /* mul_u64_u32_shr */ + +#ifndef mul_u64_u64_shr +static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift) +{ + return (u64)(((unsigned __int128)a * mul) >> shift); +} +#endif /* mul_u64_u64_shr */ + +#else + +#ifndef mul_u64_u32_shr +static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) +{ + u32 ah, al; + u64 ret; + + al = a; + ah = a >> 32; + + ret = mul_u32_u32(al, mul) >> shift; + if (ah) + ret += mul_u32_u32(ah, mul) << (32 - shift); + + return ret; +} +#endif /* mul_u64_u32_shr */ + +#ifndef mul_u64_u64_shr +static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift) +{ + union { + u64 ll; + struct { +#ifdef __BIG_ENDIAN + u32 high, low; +#else + u32 low, high; +#endif + } l; + } rl, rm, rn, rh, a0, b0; + u64 c; + + a0.ll = a; + b0.ll = b; + + rl.ll = mul_u32_u32(a0.l.low, b0.l.low); + rm.ll = mul_u32_u32(a0.l.low, b0.l.high); + rn.ll = mul_u32_u32(a0.l.high, b0.l.low); + rh.ll = mul_u32_u32(a0.l.high, b0.l.high); + + /* + * Each of these lines computes a 64-bit intermediate result into "c", + * starting at bits 32-95. The low 32-bits go into the result of the + * multiplication, the high 32-bits are carried into the next step. + */ + rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low; + rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low; + rh.l.high = (c >> 32) + rh.l.high; + + /* + * The 128-bit result of the multiplication is in rl.ll and rh.ll, + * shift it right and throw away the high part of the result. + */ + if (shift == 0) + return rl.ll; + if (shift < 64) + return (rl.ll >> shift) | (rh.ll << (64 - shift)); + return rh.ll >> (shift & 63); +} +#endif /* mul_u64_u64_shr */ + +#endif + +#ifndef mul_u64_u32_div +static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor) +{ + union { + u64 ll; + struct { +#ifdef __BIG_ENDIAN + u32 high, low; +#else + u32 low, high; +#endif + } l; + } u, rl, rh; + + u.ll = a; + rl.ll = mul_u32_u32(u.l.low, mul); + rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high; + + /* Bits 32-63 of the result will be in rh.l.low. */ + rl.l.high = do_div(rh.ll, divisor); + + /* Bits 0-31 of the result will be in rl.l.low. */ + do_div(rl.ll, divisor); + + rl.l.high = rh.l.low; + return rl.ll; +} +#endif /* mul_u64_u32_div */ + +#define DIV64_U64_ROUND_UP(ll, d) \ + ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); }) + +#endif /* _LINUX_MATH64_H */ diff --git a/include/linux/max17040_battery.h b/include/linux/max17040_battery.h new file mode 100644 index 000000000..ad97b06cf --- /dev/null +++ b/include/linux/max17040_battery.h @@ -0,0 +1,19 @@ +/* + * Copyright (C) 2009 Samsung Electronics + * Minkyu Kang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MAX17040_BATTERY_H_ +#define __MAX17040_BATTERY_H_ + +struct max17040_platform_data { + int (*battery_online)(void); + int (*charger_online)(void); + int (*charger_enable)(void); +}; + +#endif diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h new file mode 100644 index 000000000..20f1e3ff6 --- /dev/null +++ b/include/linux/mbcache.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MBCACHE_H +#define _LINUX_MBCACHE_H + +#include +#include +#include +#include +#include + +struct mb_cache; + +struct mb_cache_entry { + /* List of entries in cache - protected by cache->c_list_lock */ + struct list_head e_list; + /* Hash table list - protected by hash chain bitlock */ + struct hlist_bl_node e_hash_list; + atomic_t e_refcnt; + /* Key in hash - stable during lifetime of the entry */ + u32 e_key; + u32 e_referenced:1; + u32 e_reusable:1; + /* User provided value - stable during lifetime of the entry */ + u64 e_value; +}; + +struct mb_cache *mb_cache_create(int bucket_bits); +void mb_cache_destroy(struct mb_cache *cache); + +int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, + u64 value, bool reusable); +void __mb_cache_entry_free(struct mb_cache_entry *entry); +static inline int mb_cache_entry_put(struct mb_cache *cache, + struct mb_cache_entry *entry) +{ + if (!atomic_dec_and_test(&entry->e_refcnt)) + return 0; + __mb_cache_entry_free(entry); + return 1; +} + +void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value); +struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key, + u64 value); +struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache, + u32 key); +struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache, + struct mb_cache_entry *entry); +void mb_cache_entry_touch(struct mb_cache *cache, + struct mb_cache_entry *entry); + +#endif /* _LINUX_MBCACHE_H */ diff --git a/include/linux/mbus.h b/include/linux/mbus.h new file mode 100644 index 000000000..477314524 --- /dev/null +++ b/include/linux/mbus.h @@ -0,0 +1,109 @@ +/* + * Marvell MBUS common definitions. + * + * Copyright (C) 2008 Marvell Semiconductor + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __LINUX_MBUS_H +#define __LINUX_MBUS_H + +#include + +struct resource; + +struct mbus_dram_target_info +{ + /* + * The 4-bit MBUS target ID of the DRAM controller. + */ + u8 mbus_dram_target_id; + + /* + * The base address, size, and MBUS attribute ID for each + * of the possible DRAM chip selects. Peripherals are + * required to support at least 4 decode windows. + */ + int num_cs; + struct mbus_dram_window { + u8 cs_index; + u8 mbus_attr; + u64 base; + u64 size; + } cs[4]; +}; + +/* Flags for PCI/PCIe address decoding regions */ +#define MVEBU_MBUS_PCI_IO 0x1 +#define MVEBU_MBUS_PCI_MEM 0x2 +#define MVEBU_MBUS_PCI_WA 0x3 + +/* + * Magic value that explicits that we don't need a remapping-capable + * address decoding window. + */ +#define MVEBU_MBUS_NO_REMAP (0xffffffff) + +/* Maximum size of a mbus window name */ +#define MVEBU_MBUS_MAX_WINNAME_SZ 32 + +/* + * The Marvell mbus is to be found only on SOCs from the Orion family + * at the moment. Provide a dummy stub for other architectures. + */ +#ifdef CONFIG_PLAT_ORION +extern const struct mbus_dram_target_info *mv_mbus_dram_info(void); +extern const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(void); +int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size, u8 *target, + u8 *attr); +#else +static inline const struct mbus_dram_target_info *mv_mbus_dram_info(void) +{ + return NULL; +} +static inline const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(void) +{ + return NULL; +} +static inline int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size, + u8 *target, u8 *attr) +{ + /* + * On all ARM32 MVEBU platforms with MBus support, this stub + * function will not get called. The real function from the + * MBus driver is called instead. ARM64 MVEBU platforms like + * the Armada 3700 could use the mv_xor device driver which calls + * into this function + */ + return -EINVAL; +} +#endif + +#ifdef CONFIG_MVEBU_MBUS +int mvebu_mbus_save_cpu_target(u32 __iomem *store_addr); +void mvebu_mbus_get_pcie_mem_aperture(struct resource *res); +void mvebu_mbus_get_pcie_io_aperture(struct resource *res); +int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr); +int mvebu_mbus_add_window_remap_by_id(unsigned int target, + unsigned int attribute, + phys_addr_t base, size_t size, + phys_addr_t remap); +int mvebu_mbus_add_window_by_id(unsigned int target, unsigned int attribute, + phys_addr_t base, size_t size); +int mvebu_mbus_del_window(phys_addr_t base, size_t size); +int mvebu_mbus_init(const char *soc, phys_addr_t mbus_phys_base, + size_t mbus_size, phys_addr_t sdram_phys_base, + size_t sdram_size); +int mvebu_mbus_dt_init(bool is_coherent); +#else +static inline int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, + u8 *attr) +{ + return -EINVAL; +} +#endif /* CONFIG_MVEBU_MBUS */ + +#endif /* __LINUX_MBUS_H */ diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h new file mode 100644 index 000000000..0661af17a --- /dev/null +++ b/include/linux/mc146818rtc.h @@ -0,0 +1,129 @@ +/* mc146818rtc.h - register definitions for the Real-Time-Clock / CMOS RAM + * Copyright Torsten Duwe 1993 + * derived from Data Sheet, Copyright Motorola 1984 (!). + * It was written to be part of the Linux operating system. + */ +/* permission is hereby granted to copy, modify and redistribute this code + * in terms of the GNU Library General Public License, Version 2 or later, + * at your option. + */ + +#ifndef _MC146818RTC_H +#define _MC146818RTC_H + +#include +#include /* get the user-level API */ +#include /* register access macros */ +#include +#include +#include + +#ifdef __KERNEL__ +#include /* spinlock_t */ +extern spinlock_t rtc_lock; /* serialize CMOS RAM access */ + +/* Some RTCs extend the mc146818 register set to support alarms of more + * than 24 hours in the future; or dates that include a century code. + * This platform_data structure can pass this information to the driver. + * + * Also, some platforms need suspend()/resume() hooks to kick in special + * handling of wake alarms, e.g. activating ACPI BIOS hooks or setting up + * a separate wakeup alarm used by some almost-clone chips. + */ +struct cmos_rtc_board_info { + void (*wake_on)(struct device *dev); + void (*wake_off)(struct device *dev); + + u32 flags; +#define CMOS_RTC_FLAGS_NOFREQ (1 << 0) + int address_space; + + u8 rtc_day_alarm; /* zero, or register index */ + u8 rtc_mon_alarm; /* zero, or register index */ + u8 rtc_century; /* zero, or register index */ +}; +#endif + +/********************************************************************** + * register summary + **********************************************************************/ +#define RTC_SECONDS 0 +#define RTC_SECONDS_ALARM 1 +#define RTC_MINUTES 2 +#define RTC_MINUTES_ALARM 3 +#define RTC_HOURS 4 +#define RTC_HOURS_ALARM 5 +/* RTC_*_alarm is always true if 2 MSBs are set */ +# define RTC_ALARM_DONT_CARE 0xC0 + +#define RTC_DAY_OF_WEEK 6 +#define RTC_DAY_OF_MONTH 7 +#define RTC_MONTH 8 +#define RTC_YEAR 9 + +/* control registers - Moto names + */ +#define RTC_REG_A 10 +#define RTC_REG_B 11 +#define RTC_REG_C 12 +#define RTC_REG_D 13 + +/********************************************************************** + * register details + **********************************************************************/ +#define RTC_FREQ_SELECT RTC_REG_A + +/* update-in-progress - set to "1" 244 microsecs before RTC goes off the bus, + * reset after update (may take 1.984ms @ 32768Hz RefClock) is complete, + * totalling to a max high interval of 2.228 ms. + */ +# define RTC_UIP 0x80 +# define RTC_DIV_CTL 0x70 + /* divider control: refclock values 4.194 / 1.049 MHz / 32.768 kHz */ +# define RTC_REF_CLCK_4MHZ 0x00 +# define RTC_REF_CLCK_1MHZ 0x10 +# define RTC_REF_CLCK_32KHZ 0x20 + /* 2 values for divider stage reset, others for "testing purposes only" */ +# define RTC_DIV_RESET1 0x60 +# define RTC_DIV_RESET2 0x70 + /* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */ +# define RTC_RATE_SELECT 0x0F + +/**********************************************************************/ +#define RTC_CONTROL RTC_REG_B +# define RTC_SET 0x80 /* disable updates for clock setting */ +# define RTC_PIE 0x40 /* periodic interrupt enable */ +# define RTC_AIE 0x20 /* alarm interrupt enable */ +# define RTC_UIE 0x10 /* update-finished interrupt enable */ +# define RTC_SQWE 0x08 /* enable square-wave output */ +# define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */ +# define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */ +# define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */ + +/**********************************************************************/ +#define RTC_INTR_FLAGS RTC_REG_C +/* caution - cleared by read */ +# define RTC_IRQF 0x80 /* any of the following 3 is active */ +# define RTC_PF 0x40 +# define RTC_AF 0x20 +# define RTC_UF 0x10 + +/**********************************************************************/ +#define RTC_VALID RTC_REG_D +# define RTC_VRT 0x80 /* valid RAM and time */ +/**********************************************************************/ + +#ifndef ARCH_RTC_LOCATION /* Override by ? */ + +#define RTC_IO_EXTENT 0x8 +#define RTC_IO_EXTENT_USED 0x2 +#define RTC_IOMAPPED 1 /* Default to I/O mapping. */ + +#else +#define RTC_IO_EXTENT_USED RTC_IO_EXTENT +#endif /* ARCH_RTC_LOCATION */ + +unsigned int mc146818_get_time(struct rtc_time *time); +int mc146818_set_time(struct rtc_time *time); + +#endif /* _MC146818RTC_H */ diff --git a/include/linux/mc6821.h b/include/linux/mc6821.h new file mode 100644 index 000000000..8dffab19b --- /dev/null +++ b/include/linux/mc6821.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _MC6821_H_ +#define _MC6821_H_ + +/* + * This file describes the memery mapping of the MC6821 PIA. + * The unions describe overlayed registers. Which of them is used is + * determined by bit 2 of the corresponding control register. + * this files expects the PIA_REG_PADWIDTH to be defined the numeric + * value of the register spacing. + * + * Data came from MFC-31-Developer Kit (from Ralph Seidel, + * zodiac@darkness.gun.de) and Motorola Data Sheet (from + * Richard Hirst, srh@gpt.co.uk) + * + * 6.11.95 copyright Joerg Dorchain (dorchain@mpi-sb.mpg.de) + * + */ + +#ifndef PIA_REG_PADWIDTH +#define PIA_REG_PADWIDTH 255 +#endif + +struct pia { + union { + volatile u_char pra; + volatile u_char ddra; + } ua; + u_char pad1[PIA_REG_PADWIDTH]; + volatile u_char cra; + u_char pad2[PIA_REG_PADWIDTH]; + union { + volatile u_char prb; + volatile u_char ddrb; + } ub; + u_char pad3[PIA_REG_PADWIDTH]; + volatile u_char crb; + u_char pad4[PIA_REG_PADWIDTH]; +}; + +#define ppra ua.pra +#define pddra ua.ddra +#define pprb ub.prb +#define pddrb ub.ddrb + +#define PIA_C1_ENABLE_IRQ (1<<0) +#define PIA_C1_LOW_TO_HIGH (1<<1) +#define PIA_DDR (1<<2) +#define PIA_IRQ2 (1<<6) +#define PIA_IRQ1 (1<<7) + +#endif diff --git a/include/linux/mcb.h b/include/linux/mcb.h new file mode 100644 index 000000000..b1a0ad9d2 --- /dev/null +++ b/include/linux/mcb.h @@ -0,0 +1,142 @@ +/* + * MEN Chameleon Bus. + * + * Copyright (C) 2014 MEN Mikroelektronik GmbH (www.men.de) + * Author: Johannes Thumshirn + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; version 2 of the License. + */ +#ifndef _LINUX_MCB_H +#define _LINUX_MCB_H + +#include +#include +#include + +#define CHAMELEON_FILENAME_LEN 12 + +struct mcb_driver; +struct mcb_device; + +/** + * struct mcb_bus - MEN Chameleon Bus + * + * @dev: bus device + * @carrier: pointer to carrier device + * @bus_nr: mcb bus number + * @get_irq: callback to get IRQ number + * @revision: the FPGA's revision number + * @model: the FPGA's model number + * @filename: the FPGA's name + */ +struct mcb_bus { + struct device dev; + struct device *carrier; + int bus_nr; + u8 revision; + char model; + u8 minor; + char name[CHAMELEON_FILENAME_LEN + 1]; + int (*get_irq)(struct mcb_device *dev); +}; + +static inline struct mcb_bus *to_mcb_bus(struct device *dev) +{ + return container_of(dev, struct mcb_bus, dev); +} + +/** + * struct mcb_device - MEN Chameleon Bus device + * + * @dev: device in kernel representation + * @bus: mcb bus the device is plugged to + * @is_added: flag to check if device is added to bus + * @driver: associated mcb_driver + * @id: mcb device id + * @inst: instance in Chameleon table + * @group: group in Chameleon table + * @var: variant in Chameleon table + * @bar: BAR in Chameleon table + * @rev: revision in Chameleon table + * @irq: IRQ resource + * @memory: memory resource + */ +struct mcb_device { + struct device dev; + struct mcb_bus *bus; + bool is_added; + struct mcb_driver *driver; + u16 id; + int inst; + int group; + int var; + int bar; + int rev; + struct resource irq; + struct resource mem; + struct device *dma_dev; +}; + +static inline struct mcb_device *to_mcb_device(struct device *dev) +{ + return container_of(dev, struct mcb_device, dev); +} + +/** + * struct mcb_driver - MEN Chameleon Bus device driver + * + * @driver: device_driver + * @id_table: mcb id table + * @probe: probe callback + * @remove: remove callback + * @shutdown: shutdown callback + */ +struct mcb_driver { + struct device_driver driver; + const struct mcb_device_id *id_table; + int (*probe)(struct mcb_device *mdev, const struct mcb_device_id *id); + void (*remove)(struct mcb_device *mdev); + void (*shutdown)(struct mcb_device *mdev); +}; + +static inline struct mcb_driver *to_mcb_driver(struct device_driver *drv) +{ + return container_of(drv, struct mcb_driver, driver); +} + +static inline void *mcb_get_drvdata(struct mcb_device *dev) +{ + return dev_get_drvdata(&dev->dev); +} + +static inline void mcb_set_drvdata(struct mcb_device *dev, void *data) +{ + dev_set_drvdata(&dev->dev, data); +} + +extern int __must_check __mcb_register_driver(struct mcb_driver *drv, + struct module *owner, + const char *mod_name); +#define mcb_register_driver(driver) \ + __mcb_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) +extern void mcb_unregister_driver(struct mcb_driver *driver); +#define module_mcb_driver(__mcb_driver) \ + module_driver(__mcb_driver, mcb_register_driver, mcb_unregister_driver); +extern void mcb_bus_add_devices(const struct mcb_bus *bus); +extern int mcb_device_register(struct mcb_bus *bus, struct mcb_device *dev); +extern struct mcb_bus *mcb_alloc_bus(struct device *carrier); +extern struct mcb_bus *mcb_bus_get(struct mcb_bus *bus); +extern void mcb_bus_put(struct mcb_bus *bus); +extern struct mcb_device *mcb_alloc_dev(struct mcb_bus *bus); +extern void mcb_free_dev(struct mcb_device *dev); +extern void mcb_release_bus(struct mcb_bus *bus); +extern struct resource *mcb_request_mem(struct mcb_device *dev, + const char *name); +extern void mcb_release_mem(struct resource *mem); +extern int mcb_get_irq(struct mcb_device *dev); +extern struct resource *mcb_get_resource(struct mcb_device *dev, + unsigned int type); + +#endif /* _LINUX_MCB_H */ diff --git a/include/linux/mdev.h b/include/linux/mdev.h new file mode 100644 index 000000000..b6e048e10 --- /dev/null +++ b/include/linux/mdev.h @@ -0,0 +1,138 @@ +/* + * Mediated device definition + * + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * Author: Neo Jia + * Kirti Wankhede + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef MDEV_H +#define MDEV_H + +struct mdev_device; + +/** + * struct mdev_parent_ops - Structure to be registered for each parent device to + * register the device to mdev module. + * + * @owner: The module owner. + * @dev_attr_groups: Attributes of the parent device. + * @mdev_attr_groups: Attributes of the mediated device. + * @supported_type_groups: Attributes to define supported types. It is mandatory + * to provide supported types. + * @create: Called to allocate basic resources in parent device's + * driver for a particular mediated device. It is + * mandatory to provide create ops. + * @kobj: kobject of type for which 'create' is called. + * @mdev: mdev_device structure on of mediated device + * that is being created + * Returns integer: success (0) or error (< 0) + * @remove: Called to free resources in parent device's driver for a + * a mediated device. It is mandatory to provide 'remove' + * ops. + * @mdev: mdev_device device structure which is being + * destroyed + * Returns integer: success (0) or error (< 0) + * @open: Open mediated device. + * @mdev: mediated device. + * Returns integer: success (0) or error (< 0) + * @release: release mediated device + * @mdev: mediated device. + * @read: Read emulation callback + * @mdev: mediated device structure + * @buf: read buffer + * @count: number of bytes to read + * @ppos: address. + * Retuns number on bytes read on success or error. + * @write: Write emulation callback + * @mdev: mediated device structure + * @buf: write buffer + * @count: number of bytes to be written + * @ppos: address. + * Retuns number on bytes written on success or error. + * @ioctl: IOCTL callback + * @mdev: mediated device structure + * @cmd: ioctl command + * @arg: arguments to ioctl + * @mmap: mmap callback + * @mdev: mediated device structure + * @vma: vma structure + * Parent device that support mediated device should be registered with mdev + * module with mdev_parent_ops structure. + **/ +struct mdev_parent_ops { + struct module *owner; + const struct attribute_group **dev_attr_groups; + const struct attribute_group **mdev_attr_groups; + struct attribute_group **supported_type_groups; + + int (*create)(struct kobject *kobj, struct mdev_device *mdev); + int (*remove)(struct mdev_device *mdev); + int (*open)(struct mdev_device *mdev); + void (*release)(struct mdev_device *mdev); + ssize_t (*read)(struct mdev_device *mdev, char __user *buf, + size_t count, loff_t *ppos); + ssize_t (*write)(struct mdev_device *mdev, const char __user *buf, + size_t count, loff_t *ppos); + long (*ioctl)(struct mdev_device *mdev, unsigned int cmd, + unsigned long arg); + int (*mmap)(struct mdev_device *mdev, struct vm_area_struct *vma); +}; + +/* interface for exporting mdev supported type attributes */ +struct mdev_type_attribute { + struct attribute attr; + ssize_t (*show)(struct kobject *kobj, struct device *dev, char *buf); + ssize_t (*store)(struct kobject *kobj, struct device *dev, + const char *buf, size_t count); +}; + +#define MDEV_TYPE_ATTR(_name, _mode, _show, _store) \ +struct mdev_type_attribute mdev_type_attr_##_name = \ + __ATTR(_name, _mode, _show, _store) +#define MDEV_TYPE_ATTR_RW(_name) \ + struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RW(_name) +#define MDEV_TYPE_ATTR_RO(_name) \ + struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RO(_name) +#define MDEV_TYPE_ATTR_WO(_name) \ + struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_WO(_name) + +/** + * struct mdev_driver - Mediated device driver + * @name: driver name + * @probe: called when new device created + * @remove: called when device removed + * @driver: device driver structure + * + **/ +struct mdev_driver { + const char *name; + int (*probe)(struct device *dev); + void (*remove)(struct device *dev); + struct device_driver driver; +}; + +#define to_mdev_driver(drv) container_of(drv, struct mdev_driver, driver) + +extern void *mdev_get_drvdata(struct mdev_device *mdev); +extern void mdev_set_drvdata(struct mdev_device *mdev, void *data); +extern uuid_le mdev_uuid(struct mdev_device *mdev); + +extern struct bus_type mdev_bus_type; + +extern int mdev_register_device(struct device *dev, + const struct mdev_parent_ops *ops); +extern void mdev_unregister_device(struct device *dev); + +extern int mdev_register_driver(struct mdev_driver *drv, struct module *owner); +extern void mdev_unregister_driver(struct mdev_driver *drv); + +extern struct device *mdev_parent_dev(struct mdev_device *mdev); +extern struct device *mdev_dev(struct mdev_device *mdev); +extern struct mdev_device *mdev_from_dev(struct device *dev); + +#endif /* MDEV_H */ diff --git a/include/linux/mdio-bitbang.h b/include/linux/mdio-bitbang.h new file mode 100644 index 000000000..5d71e8a85 --- /dev/null +++ b/include/linux/mdio-bitbang.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_MDIO_BITBANG_H +#define __LINUX_MDIO_BITBANG_H + +#include + +struct module; + +struct mdiobb_ctrl; + +struct mdiobb_ops { + struct module *owner; + + /* Set the Management Data Clock high if level is one, + * low if level is zero. + */ + void (*set_mdc)(struct mdiobb_ctrl *ctrl, int level); + + /* Configure the Management Data I/O pin as an input if + * "output" is zero, or an output if "output" is one. + */ + void (*set_mdio_dir)(struct mdiobb_ctrl *ctrl, int output); + + /* Set the Management Data I/O pin high if value is one, + * low if "value" is zero. This may only be called + * when the MDIO pin is configured as an output. + */ + void (*set_mdio_data)(struct mdiobb_ctrl *ctrl, int value); + + /* Retrieve the state Management Data I/O pin. */ + int (*get_mdio_data)(struct mdiobb_ctrl *ctrl); +}; + +struct mdiobb_ctrl { + const struct mdiobb_ops *ops; +}; + +/* The returned bus is not yet registered with the phy layer. */ +struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl); + +/* The bus must already have been unregistered. */ +void free_mdio_bitbang(struct mii_bus *bus); + +#endif diff --git a/include/linux/mdio-gpio.h b/include/linux/mdio-gpio.h new file mode 100644 index 000000000..cea443a67 --- /dev/null +++ b/include/linux/mdio-gpio.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_MDIO_GPIO_H +#define __LINUX_MDIO_GPIO_H + +#define MDIO_GPIO_MDC 0 +#define MDIO_GPIO_MDIO 1 +#define MDIO_GPIO_MDO 2 + +#endif diff --git a/include/linux/mdio-mux.h b/include/linux/mdio-mux.h new file mode 100644 index 000000000..a5d58f221 --- /dev/null +++ b/include/linux/mdio-mux.h @@ -0,0 +1,32 @@ +/* + * MDIO bus multiplexer framwork. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2011, 2012 Cavium, Inc. + */ +#ifndef __LINUX_MDIO_MUX_H +#define __LINUX_MDIO_MUX_H +#include +#include + +/* mdio_mux_init() - Initialize a MDIO mux + * @dev The device owning the MDIO mux + * @mux_node The device node of the MDIO mux + * @switch_fn The function called for switching target MDIO child + * mux_handle A pointer to a (void *) used internaly by mdio-mux + * @data Private data used by switch_fn() + * @mux_bus An optional parent bus (Other case are to use parent_bus property) + */ +int mdio_mux_init(struct device *dev, + struct device_node *mux_node, + int (*switch_fn) (int cur, int desired, void *data), + void **mux_handle, + void *data, + struct mii_bus *mux_bus); + +void mdio_mux_uninit(void *mux_handle); + +#endif /* __LINUX_MDIO_MUX_H */ diff --git a/include/linux/mdio.h b/include/linux/mdio.h new file mode 100644 index 000000000..85325e110 --- /dev/null +++ b/include/linux/mdio.h @@ -0,0 +1,299 @@ +/* + * linux/mdio.h: definitions for MDIO (clause 45) transceivers + * Copyright 2006-2009 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#ifndef __LINUX_MDIO_H__ +#define __LINUX_MDIO_H__ + +#include +#include + +struct gpio_desc; +struct mii_bus; + +/* Multiple levels of nesting are possible. However typically this is + * limited to nested DSA like layer, a MUX layer, and the normal + * user. Instead of trying to handle the general case, just define + * these cases. + */ +enum mdio_mutex_lock_class { + MDIO_MUTEX_NORMAL, + MDIO_MUTEX_MUX, + MDIO_MUTEX_NESTED, +}; + +struct mdio_device { + struct device dev; + + struct mii_bus *bus; + char modalias[MDIO_NAME_SIZE]; + + int (*bus_match)(struct device *dev, struct device_driver *drv); + void (*device_free)(struct mdio_device *mdiodev); + void (*device_remove)(struct mdio_device *mdiodev); + + /* Bus address of the MDIO device (0-31) */ + int addr; + int flags; + struct gpio_desc *reset; + unsigned int reset_assert_delay; + unsigned int reset_deassert_delay; +}; +#define to_mdio_device(d) container_of(d, struct mdio_device, dev) + +/* struct mdio_driver_common: Common to all MDIO drivers */ +struct mdio_driver_common { + struct device_driver driver; + int flags; +}; +#define MDIO_DEVICE_FLAG_PHY 1 +#define to_mdio_common_driver(d) \ + container_of(d, struct mdio_driver_common, driver) + +/* struct mdio_driver: Generic MDIO driver */ +struct mdio_driver { + struct mdio_driver_common mdiodrv; + + /* + * Called during discovery. Used to set + * up device-specific structures, if any + */ + int (*probe)(struct mdio_device *mdiodev); + + /* Clears up any memory if needed */ + void (*remove)(struct mdio_device *mdiodev); + + /* Quiesces the device on system shutdown, turns off interrupts etc */ + void (*shutdown)(struct mdio_device *mdiodev); +}; +#define to_mdio_driver(d) \ + container_of(to_mdio_common_driver(d), struct mdio_driver, mdiodrv) + +void mdio_device_free(struct mdio_device *mdiodev); +struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr); +int mdio_device_register(struct mdio_device *mdiodev); +void mdio_device_remove(struct mdio_device *mdiodev); +void mdio_device_reset(struct mdio_device *mdiodev, int value); +int mdio_driver_register(struct mdio_driver *drv); +void mdio_driver_unregister(struct mdio_driver *drv); +int mdio_device_bus_match(struct device *dev, struct device_driver *drv); + +static inline bool mdio_phy_id_is_c45(int phy_id) +{ + return (phy_id & MDIO_PHY_ID_C45) && !(phy_id & ~MDIO_PHY_ID_C45_MASK); +} + +static inline __u16 mdio_phy_id_prtad(int phy_id) +{ + return (phy_id & MDIO_PHY_ID_PRTAD) >> 5; +} + +static inline __u16 mdio_phy_id_devad(int phy_id) +{ + return phy_id & MDIO_PHY_ID_DEVAD; +} + +/** + * struct mdio_if_info - Ethernet controller MDIO interface + * @prtad: PRTAD of the PHY (%MDIO_PRTAD_NONE if not present/unknown) + * @mmds: Mask of MMDs expected to be present in the PHY. This must be + * non-zero unless @prtad = %MDIO_PRTAD_NONE. + * @mode_support: MDIO modes supported. If %MDIO_SUPPORTS_C22 is set then + * MII register access will be passed through with @devad = + * %MDIO_DEVAD_NONE. If %MDIO_EMULATE_C22 is set then access to + * commonly used clause 22 registers will be translated into + * clause 45 registers. + * @dev: Net device structure + * @mdio_read: Register read function; returns value or negative error code + * @mdio_write: Register write function; returns 0 or negative error code + */ +struct mdio_if_info { + int prtad; + u32 mmds; + unsigned mode_support; + + struct net_device *dev; + int (*mdio_read)(struct net_device *dev, int prtad, int devad, + u16 addr); + int (*mdio_write)(struct net_device *dev, int prtad, int devad, + u16 addr, u16 val); +}; + +#define MDIO_PRTAD_NONE (-1) +#define MDIO_DEVAD_NONE (-1) +#define MDIO_SUPPORTS_C22 1 +#define MDIO_SUPPORTS_C45 2 +#define MDIO_EMULATE_C22 4 + +struct ethtool_cmd; +struct ethtool_pauseparam; +extern int mdio45_probe(struct mdio_if_info *mdio, int prtad); +extern int mdio_set_flag(const struct mdio_if_info *mdio, + int prtad, int devad, u16 addr, int mask, + bool sense); +extern int mdio45_links_ok(const struct mdio_if_info *mdio, u32 mmds); +extern int mdio45_nway_restart(const struct mdio_if_info *mdio); +extern void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio, + struct ethtool_cmd *ecmd, + u32 npage_adv, u32 npage_lpa); +extern void +mdio45_ethtool_ksettings_get_npage(const struct mdio_if_info *mdio, + struct ethtool_link_ksettings *cmd, + u32 npage_adv, u32 npage_lpa); + +/** + * mdio45_ethtool_gset - get settings for ETHTOOL_GSET + * @mdio: MDIO interface + * @ecmd: Ethtool request structure + * + * Since the CSRs for auto-negotiation using next pages are not fully + * standardised, this function does not attempt to decode them. Use + * mdio45_ethtool_gset_npage() to specify advertisement bits from next + * pages. + */ +static inline void mdio45_ethtool_gset(const struct mdio_if_info *mdio, + struct ethtool_cmd *ecmd) +{ + mdio45_ethtool_gset_npage(mdio, ecmd, 0, 0); +} + +/** + * mdio45_ethtool_ksettings_get - get settings for ETHTOOL_GLINKSETTINGS + * @mdio: MDIO interface + * @cmd: Ethtool request structure + * + * Since the CSRs for auto-negotiation using next pages are not fully + * standardised, this function does not attempt to decode them. Use + * mdio45_ethtool_ksettings_get_npage() to specify advertisement bits + * from next pages. + */ +static inline void +mdio45_ethtool_ksettings_get(const struct mdio_if_info *mdio, + struct ethtool_link_ksettings *cmd) +{ + mdio45_ethtool_ksettings_get_npage(mdio, cmd, 0, 0); +} + +extern int mdio_mii_ioctl(const struct mdio_if_info *mdio, + struct mii_ioctl_data *mii_data, int cmd); + +/** + * mmd_eee_cap_to_ethtool_sup_t + * @eee_cap: value of the MMD EEE Capability register + * + * A small helper function that translates MMD EEE Capability (3.20) bits + * to ethtool supported settings. + */ +static inline u32 mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap) +{ + u32 supported = 0; + + if (eee_cap & MDIO_EEE_100TX) + supported |= SUPPORTED_100baseT_Full; + if (eee_cap & MDIO_EEE_1000T) + supported |= SUPPORTED_1000baseT_Full; + if (eee_cap & MDIO_EEE_10GT) + supported |= SUPPORTED_10000baseT_Full; + if (eee_cap & MDIO_EEE_1000KX) + supported |= SUPPORTED_1000baseKX_Full; + if (eee_cap & MDIO_EEE_10GKX4) + supported |= SUPPORTED_10000baseKX4_Full; + if (eee_cap & MDIO_EEE_10GKR) + supported |= SUPPORTED_10000baseKR_Full; + + return supported; +} + +/** + * mmd_eee_adv_to_ethtool_adv_t + * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers + * + * A small helper function that translates the MMD EEE Advertisment (7.60) + * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement + * settings. + */ +static inline u32 mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv) +{ + u32 adv = 0; + + if (eee_adv & MDIO_EEE_100TX) + adv |= ADVERTISED_100baseT_Full; + if (eee_adv & MDIO_EEE_1000T) + adv |= ADVERTISED_1000baseT_Full; + if (eee_adv & MDIO_EEE_10GT) + adv |= ADVERTISED_10000baseT_Full; + if (eee_adv & MDIO_EEE_1000KX) + adv |= ADVERTISED_1000baseKX_Full; + if (eee_adv & MDIO_EEE_10GKX4) + adv |= ADVERTISED_10000baseKX4_Full; + if (eee_adv & MDIO_EEE_10GKR) + adv |= ADVERTISED_10000baseKR_Full; + + return adv; +} + +/** + * ethtool_adv_to_mmd_eee_adv_t + * @adv: the ethtool advertisement settings + * + * A small helper function that translates ethtool advertisement settings + * to EEE advertisements for the MMD EEE Advertisement (7.60) and + * MMD EEE Link Partner Ability (7.61) registers. + */ +static inline u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv) +{ + u16 reg = 0; + + if (adv & ADVERTISED_100baseT_Full) + reg |= MDIO_EEE_100TX; + if (adv & ADVERTISED_1000baseT_Full) + reg |= MDIO_EEE_1000T; + if (adv & ADVERTISED_10000baseT_Full) + reg |= MDIO_EEE_10GT; + if (adv & ADVERTISED_1000baseKX_Full) + reg |= MDIO_EEE_1000KX; + if (adv & ADVERTISED_10000baseKX4_Full) + reg |= MDIO_EEE_10GKX4; + if (adv & ADVERTISED_10000baseKR_Full) + reg |= MDIO_EEE_10GKR; + + return reg; +} + +int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); +int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); + +int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); +int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum); +int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); +int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val); + +int mdiobus_register_device(struct mdio_device *mdiodev); +int mdiobus_unregister_device(struct mdio_device *mdiodev); +bool mdiobus_is_registered_device(struct mii_bus *bus, int addr); +struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr); + +/** + * mdio_module_driver() - Helper macro for registering mdio drivers + * + * Helper macro for MDIO drivers which do not do anything special in module + * init/exit. Each module may only use this macro once, and calling it + * replaces module_init() and module_exit(). + */ +#define mdio_module_driver(_mdio_driver) \ +static int __init mdio_module_init(void) \ +{ \ + return mdio_driver_register(&_mdio_driver); \ +} \ +module_init(mdio_module_init); \ +static void __exit mdio_module_exit(void) \ +{ \ + mdio_driver_unregister(&_mdio_driver); \ +} \ +module_exit(mdio_module_exit) + +#endif /* __LINUX_MDIO_H__ */ diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h new file mode 100644 index 000000000..7fde40e17 --- /dev/null +++ b/include/linux/mei_cl_bus.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MEI_CL_BUS_H +#define _LINUX_MEI_CL_BUS_H + +#include +#include +#include + +struct mei_cl_device; +struct mei_device; + +typedef void (*mei_cldev_cb_t)(struct mei_cl_device *cldev); + +/** + * struct mei_cl_device - MEI device handle + * An mei_cl_device pointer is returned from mei_add_device() + * and links MEI bus clients to their actual ME host client pointer. + * Drivers for MEI devices will get an mei_cl_device pointer + * when being probed and shall use it for doing ME bus I/O. + * + * @bus_list: device on the bus list + * @bus: parent mei device + * @dev: linux driver model device pointer + * @me_cl: me client + * @cl: mei client + * @name: device name + * @rx_work: async work to execute Rx event callback + * @rx_cb: Drivers register this callback to get asynchronous ME + * Rx buffer pending notifications. + * @notif_work: async work to execute FW notif event callback + * @notif_cb: Drivers register this callback to get asynchronous ME + * FW notification pending notifications. + * + * @do_match: wheather device can be matched with a driver + * @is_added: device is already scanned + * @priv_data: client private data + */ +struct mei_cl_device { + struct list_head bus_list; + struct mei_device *bus; + struct device dev; + + struct mei_me_client *me_cl; + struct mei_cl *cl; + char name[MEI_CL_NAME_SIZE]; + + struct work_struct rx_work; + mei_cldev_cb_t rx_cb; + struct work_struct notif_work; + mei_cldev_cb_t notif_cb; + + unsigned int do_match:1; + unsigned int is_added:1; + + void *priv_data; +}; + +struct mei_cl_driver { + struct device_driver driver; + const char *name; + + const struct mei_cl_device_id *id_table; + + int (*probe)(struct mei_cl_device *cldev, + const struct mei_cl_device_id *id); + int (*remove)(struct mei_cl_device *cldev); +}; + +int __mei_cldev_driver_register(struct mei_cl_driver *cldrv, + struct module *owner); +#define mei_cldev_driver_register(cldrv) \ + __mei_cldev_driver_register(cldrv, THIS_MODULE) + +void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv); + +/** + * module_mei_cl_driver - Helper macro for registering mei cl driver + * + * @__mei_cldrv: mei_cl_driver structure + * + * Helper macro for mei cl drivers which do not do anything special in module + * init/exit, for eliminating a boilerplate code. + */ +#define module_mei_cl_driver(__mei_cldrv) \ + module_driver(__mei_cldrv, \ + mei_cldev_driver_register,\ + mei_cldev_driver_unregister) + +ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length); +ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length); +ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf, + size_t length); + +int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb); +int mei_cldev_register_notif_cb(struct mei_cl_device *cldev, + mei_cldev_cb_t notif_cb); + +const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev); +u8 mei_cldev_ver(const struct mei_cl_device *cldev); + +void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev); +void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data); + +int mei_cldev_enable(struct mei_cl_device *cldev); +int mei_cldev_disable(struct mei_cl_device *cldev); +bool mei_cldev_enabled(struct mei_cl_device *cldev); + +#endif /* _LINUX_MEI_CL_BUS_H */ diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h new file mode 100644 index 000000000..b310a9c18 --- /dev/null +++ b/include/linux/mem_encrypt.h @@ -0,0 +1,56 @@ +/* + * AMD Memory Encryption Support + * + * Copyright (C) 2016 Advanced Micro Devices, Inc. + * + * Author: Tom Lendacky + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MEM_ENCRYPT_H__ +#define __MEM_ENCRYPT_H__ + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_ARCH_HAS_MEM_ENCRYPT + +#include + +#else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */ + +#define sme_me_mask 0ULL + +static inline bool sme_active(void) { return false; } +static inline bool sev_active(void) { return false; } + +#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */ + +static inline bool mem_encrypt_active(void) +{ + return sme_me_mask; +} + +static inline u64 sme_get_me_mask(void) +{ + return sme_me_mask; +} + +#ifdef CONFIG_AMD_MEM_ENCRYPT +/* + * The __sme_set() and __sme_clr() macros are useful for adding or removing + * the encryption mask from a value (e.g. when dealing with pagetable + * entries). + */ +#define __sme_set(x) ((x) | sme_me_mask) +#define __sme_clr(x) ((x) & ~sme_me_mask) +#else +#define __sme_set(x) (x) +#define __sme_clr(x) (x) +#endif + +#endif /* __ASSEMBLY__ */ + +#endif /* __MEM_ENCRYPT_H__ */ diff --git a/include/linux/memblock.h b/include/linux/memblock.h new file mode 100644 index 000000000..2acdd046d --- /dev/null +++ b/include/linux/memblock.h @@ -0,0 +1,452 @@ +#ifndef _LINUX_MEMBLOCK_H +#define _LINUX_MEMBLOCK_H +#ifdef __KERNEL__ + +#ifdef CONFIG_HAVE_MEMBLOCK +/* + * Logical memory blocks. + * + * Copyright (C) 2001 Peter Bergner, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include + +#define INIT_MEMBLOCK_REGIONS 128 +#define INIT_PHYSMEM_REGIONS 4 + +/** + * enum memblock_flags - definition of memory region attributes + * @MEMBLOCK_NONE: no special request + * @MEMBLOCK_HOTPLUG: hotpluggable region + * @MEMBLOCK_MIRROR: mirrored region + * @MEMBLOCK_NOMAP: don't add to kernel direct mapping + */ +enum memblock_flags { + MEMBLOCK_NONE = 0x0, /* No special request */ + MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */ + MEMBLOCK_MIRROR = 0x2, /* mirrored region */ + MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */ +}; + +/** + * struct memblock_region - represents a memory region + * @base: physical address of the region + * @size: size of the region + * @flags: memory region attributes + * @nid: NUMA node id + */ +struct memblock_region { + phys_addr_t base; + phys_addr_t size; + enum memblock_flags flags; +#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP + int nid; +#endif +}; + +/** + * struct memblock_type - collection of memory regions of certain type + * @cnt: number of regions + * @max: size of the allocated array + * @total_size: size of all regions + * @regions: array of regions + * @name: the memory type symbolic name + */ +struct memblock_type { + unsigned long cnt; + unsigned long max; + phys_addr_t total_size; + struct memblock_region *regions; + char *name; +}; + +/** + * struct memblock - memblock allocator metadata + * @bottom_up: is bottom up direction? + * @current_limit: physical address of the current allocation limit + * @memory: usabe memory regions + * @reserved: reserved memory regions + * @physmem: all physical memory + */ +struct memblock { + bool bottom_up; /* is bottom up direction? */ + phys_addr_t current_limit; + struct memblock_type memory; + struct memblock_type reserved; +#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP + struct memblock_type physmem; +#endif +}; + +extern struct memblock memblock; +extern int memblock_debug; + +#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK +#define __init_memblock __meminit +#define __initdata_memblock __meminitdata +void memblock_discard(void); +#else +#define __init_memblock +#define __initdata_memblock +#endif + +#define memblock_dbg(fmt, ...) \ + if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) + +phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, + phys_addr_t start, phys_addr_t end, + int nid, enum memblock_flags flags); +phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, + phys_addr_t size, phys_addr_t align); +void memblock_allow_resize(void); +int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid); +int memblock_add(phys_addr_t base, phys_addr_t size); +int memblock_remove(phys_addr_t base, phys_addr_t size); +int memblock_free(phys_addr_t base, phys_addr_t size); +int memblock_reserve(phys_addr_t base, phys_addr_t size); +void memblock_trim_memory(phys_addr_t align); +bool memblock_overlaps_region(struct memblock_type *type, + phys_addr_t base, phys_addr_t size); +int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); +int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); +int memblock_mark_mirror(phys_addr_t base, phys_addr_t size); +int memblock_mark_nomap(phys_addr_t base, phys_addr_t size); +int memblock_clear_nomap(phys_addr_t base, phys_addr_t size); +enum memblock_flags choose_memblock_flags(void); + +/* Low level functions */ +int memblock_add_range(struct memblock_type *type, + phys_addr_t base, phys_addr_t size, + int nid, enum memblock_flags flags); + +void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags, + struct memblock_type *type_a, + struct memblock_type *type_b, phys_addr_t *out_start, + phys_addr_t *out_end, int *out_nid); + +void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags, + struct memblock_type *type_a, + struct memblock_type *type_b, phys_addr_t *out_start, + phys_addr_t *out_end, int *out_nid); + +void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, + phys_addr_t *out_end); + +void __memblock_free_early(phys_addr_t base, phys_addr_t size); +void __memblock_free_late(phys_addr_t base, phys_addr_t size); + +/** + * for_each_mem_range - iterate through memblock areas from type_a and not + * included in type_b. Or just type_a if type_b is NULL. + * @i: u64 used as loop variable + * @type_a: ptr to memblock_type to iterate + * @type_b: ptr to memblock_type which excludes from the iteration + * @nid: node selector, %NUMA_NO_NODE for all nodes + * @flags: pick from blocks based on memory attributes + * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL + * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL + * @p_nid: ptr to int for nid of the range, can be %NULL + */ +#define for_each_mem_range(i, type_a, type_b, nid, flags, \ + p_start, p_end, p_nid) \ + for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \ + p_start, p_end, p_nid); \ + i != (u64)ULLONG_MAX; \ + __next_mem_range(&i, nid, flags, type_a, type_b, \ + p_start, p_end, p_nid)) + +/** + * for_each_mem_range_rev - reverse iterate through memblock areas from + * type_a and not included in type_b. Or just type_a if type_b is NULL. + * @i: u64 used as loop variable + * @type_a: ptr to memblock_type to iterate + * @type_b: ptr to memblock_type which excludes from the iteration + * @nid: node selector, %NUMA_NO_NODE for all nodes + * @flags: pick from blocks based on memory attributes + * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL + * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL + * @p_nid: ptr to int for nid of the range, can be %NULL + */ +#define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \ + p_start, p_end, p_nid) \ + for (i = (u64)ULLONG_MAX, \ + __next_mem_range_rev(&i, nid, flags, type_a, type_b,\ + p_start, p_end, p_nid); \ + i != (u64)ULLONG_MAX; \ + __next_mem_range_rev(&i, nid, flags, type_a, type_b, \ + p_start, p_end, p_nid)) + +/** + * for_each_reserved_mem_region - iterate over all reserved memblock areas + * @i: u64 used as loop variable + * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL + * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL + * + * Walks over reserved areas of memblock. Available as soon as memblock + * is initialized. + */ +#define for_each_reserved_mem_region(i, p_start, p_end) \ + for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \ + i != (u64)ULLONG_MAX; \ + __next_reserved_mem_region(&i, p_start, p_end)) + +static inline bool memblock_is_hotpluggable(struct memblock_region *m) +{ + return m->flags & MEMBLOCK_HOTPLUG; +} + +static inline bool memblock_is_mirror(struct memblock_region *m) +{ + return m->flags & MEMBLOCK_MIRROR; +} + +static inline bool memblock_is_nomap(struct memblock_region *m) +{ + return m->flags & MEMBLOCK_NOMAP; +} + +#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP +int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, + unsigned long *end_pfn); +void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, + unsigned long *out_end_pfn, int *out_nid); + +/** + * for_each_mem_pfn_range - early memory pfn range iterator + * @i: an integer used as loop variable + * @nid: node selector, %MAX_NUMNODES for all nodes + * @p_start: ptr to ulong for start pfn of the range, can be %NULL + * @p_end: ptr to ulong for end pfn of the range, can be %NULL + * @p_nid: ptr to int for nid of the range, can be %NULL + * + * Walks over configured memory ranges. + */ +#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \ + for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \ + i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid)) +#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ + +/** + * for_each_free_mem_range - iterate through free memblock areas + * @i: u64 used as loop variable + * @nid: node selector, %NUMA_NO_NODE for all nodes + * @flags: pick from blocks based on memory attributes + * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL + * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL + * @p_nid: ptr to int for nid of the range, can be %NULL + * + * Walks over free (memory && !reserved) areas of memblock. Available as + * soon as memblock is initialized. + */ +#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \ + for_each_mem_range(i, &memblock.memory, &memblock.reserved, \ + nid, flags, p_start, p_end, p_nid) + +/** + * for_each_free_mem_range_reverse - rev-iterate through free memblock areas + * @i: u64 used as loop variable + * @nid: node selector, %NUMA_NO_NODE for all nodes + * @flags: pick from blocks based on memory attributes + * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL + * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL + * @p_nid: ptr to int for nid of the range, can be %NULL + * + * Walks over free (memory && !reserved) areas of memblock in reverse + * order. Available as soon as memblock is initialized. + */ +#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \ + p_nid) \ + for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \ + nid, flags, p_start, p_end, p_nid) + +static inline void memblock_set_region_flags(struct memblock_region *r, + enum memblock_flags flags) +{ + r->flags |= flags; +} + +static inline void memblock_clear_region_flags(struct memblock_region *r, + enum memblock_flags flags) +{ + r->flags &= ~flags; +} + +#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP +int memblock_set_node(phys_addr_t base, phys_addr_t size, + struct memblock_type *type, int nid); + +static inline void memblock_set_region_node(struct memblock_region *r, int nid) +{ + r->nid = nid; +} + +static inline int memblock_get_region_node(const struct memblock_region *r) +{ + return r->nid; +} +#else +static inline void memblock_set_region_node(struct memblock_region *r, int nid) +{ +} + +static inline int memblock_get_region_node(const struct memblock_region *r) +{ + return 0; +} +#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ + +phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid); +phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); + +phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align); + +/* + * Set the allocation direction to bottom-up or top-down. + */ +static inline void __init memblock_set_bottom_up(bool enable) +{ + memblock.bottom_up = enable; +} + +/* + * Check if the allocation direction is bottom-up or not. + * if this is true, that said, memblock will allocate memory + * in bottom-up direction. + */ +static inline bool memblock_bottom_up(void) +{ + return memblock.bottom_up; +} + +/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */ +#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) +#define MEMBLOCK_ALLOC_ACCESSIBLE 0 + +phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, + phys_addr_t start, phys_addr_t end, + enum memblock_flags flags); +phys_addr_t memblock_alloc_base_nid(phys_addr_t size, + phys_addr_t align, phys_addr_t max_addr, + int nid, enum memblock_flags flags); +phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align, + phys_addr_t max_addr); +phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align, + phys_addr_t max_addr); +phys_addr_t memblock_phys_mem_size(void); +phys_addr_t memblock_reserved_size(void); +phys_addr_t memblock_mem_size(unsigned long limit_pfn); +phys_addr_t memblock_start_of_DRAM(void); +phys_addr_t memblock_end_of_DRAM(void); +void memblock_enforce_memory_limit(phys_addr_t memory_limit); +void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size); +void memblock_mem_limit_remove_map(phys_addr_t limit); +bool memblock_is_memory(phys_addr_t addr); +bool memblock_is_map_memory(phys_addr_t addr); +bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size); +bool memblock_is_reserved(phys_addr_t addr); +bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); + +extern void __memblock_dump_all(void); + +static inline void memblock_dump_all(void) +{ + if (memblock_debug) + __memblock_dump_all(); +} + +/** + * memblock_set_current_limit - Set the current allocation limit to allow + * limiting allocations to what is currently + * accessible during boot + * @limit: New limit value (physical address) + */ +void memblock_set_current_limit(phys_addr_t limit); + + +phys_addr_t memblock_get_current_limit(void); + +/* + * pfn conversion functions + * + * While the memory MEMBLOCKs should always be page aligned, the reserved + * MEMBLOCKs may not be. This accessor attempt to provide a very clear + * idea of what they return for such non aligned MEMBLOCKs. + */ + +/** + * memblock_region_memory_base_pfn - get the lowest pfn of the memory region + * @reg: memblock_region structure + * + * Return: the lowest pfn intersecting with the memory region + */ +static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg) +{ + return PFN_UP(reg->base); +} + +/** + * memblock_region_memory_end_pfn - get the end pfn of the memory region + * @reg: memblock_region structure + * + * Return: the end_pfn of the reserved region + */ +static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg) +{ + return PFN_DOWN(reg->base + reg->size); +} + +/** + * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region + * @reg: memblock_region structure + * + * Return: the lowest pfn intersecting with the reserved region + */ +static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg) +{ + return PFN_DOWN(reg->base); +} + +/** + * memblock_region_reserved_end_pfn - get the end pfn of the reserved region + * @reg: memblock_region structure + * + * Return: the end_pfn of the reserved region + */ +static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg) +{ + return PFN_UP(reg->base + reg->size); +} + +#define for_each_memblock(memblock_type, region) \ + for (region = memblock.memblock_type.regions; \ + region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \ + region++) + +#define for_each_memblock_type(i, memblock_type, rgn) \ + for (i = 0, rgn = &memblock_type->regions[0]; \ + i < memblock_type->cnt; \ + i++, rgn = &memblock_type->regions[i]) + +#ifdef CONFIG_MEMTEST +extern void early_memtest(phys_addr_t start, phys_addr_t end); +#else +static inline void early_memtest(phys_addr_t start, phys_addr_t end) +{ +} +#endif +#else +static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align) +{ + return 0; +} +#endif /* CONFIG_HAVE_MEMBLOCK */ + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_MEMBLOCK_H */ diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h new file mode 100644 index 000000000..cc6b6532e --- /dev/null +++ b/include/linux/memcontrol.h @@ -0,0 +1,1338 @@ +/* memcontrol.h - Memory Controller + * + * Copyright IBM Corporation, 2007 + * Author Balbir Singh + * + * Copyright 2007 OpenVZ SWsoft Inc + * Author: Pavel Emelianov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _LINUX_MEMCONTROL_H +#define _LINUX_MEMCONTROL_H +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct mem_cgroup; +struct page; +struct mm_struct; +struct kmem_cache; + +/* Cgroup-specific page state, on top of universal node page state */ +enum memcg_stat_item { + MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS, + MEMCG_RSS, + MEMCG_RSS_HUGE, + MEMCG_SWAP, + MEMCG_SOCK, + /* XXX: why are these zone and not node counters? */ + MEMCG_KERNEL_STACK_KB, + MEMCG_NR_STAT, +}; + +enum memcg_memory_event { + MEMCG_LOW, + MEMCG_HIGH, + MEMCG_MAX, + MEMCG_OOM, + MEMCG_OOM_KILL, + MEMCG_SWAP_MAX, + MEMCG_SWAP_FAIL, + MEMCG_NR_MEMORY_EVENTS, +}; + +enum mem_cgroup_protection { + MEMCG_PROT_NONE, + MEMCG_PROT_LOW, + MEMCG_PROT_MIN, +}; + +struct mem_cgroup_reclaim_cookie { + pg_data_t *pgdat; + int priority; + unsigned int generation; +}; + +#ifdef CONFIG_MEMCG + +#define MEM_CGROUP_ID_SHIFT 16 +#define MEM_CGROUP_ID_MAX USHRT_MAX + +struct mem_cgroup_id { + int id; + atomic_t ref; +}; + +/* + * Per memcg event counter is incremented at every pagein/pageout. With THP, + * it will be incremated by the number of pages. This counter is used for + * for trigger some periodic events. This is straightforward and better + * than using jiffies etc. to handle periodic memcg event. + */ +enum mem_cgroup_events_target { + MEM_CGROUP_TARGET_THRESH, + MEM_CGROUP_TARGET_SOFTLIMIT, + MEM_CGROUP_TARGET_NUMAINFO, + MEM_CGROUP_NTARGETS, +}; + +struct mem_cgroup_stat_cpu { + long count[MEMCG_NR_STAT]; + unsigned long events[NR_VM_EVENT_ITEMS]; + unsigned long nr_page_events; + unsigned long targets[MEM_CGROUP_NTARGETS]; +}; + +struct mem_cgroup_reclaim_iter { + struct mem_cgroup *position; + /* scan generation, increased every round-trip */ + unsigned int generation; +}; + +struct lruvec_stat { + long count[NR_VM_NODE_STAT_ITEMS]; +}; + +/* + * Bitmap of shrinker::id corresponding to memcg-aware shrinkers, + * which have elements charged to this memcg. + */ +struct memcg_shrinker_map { + struct rcu_head rcu; + unsigned long map[0]; +}; + +/* + * per-zone information in memory controller. + */ +struct mem_cgroup_per_node { + struct lruvec lruvec; + + struct lruvec_stat __percpu *lruvec_stat_cpu; + atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS]; + + unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; + + struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1]; + +#ifdef CONFIG_MEMCG_KMEM + struct memcg_shrinker_map __rcu *shrinker_map; +#endif + struct rb_node tree_node; /* RB tree node */ + unsigned long usage_in_excess;/* Set to the value by which */ + /* the soft limit is exceeded*/ + bool on_tree; + bool congested; /* memcg has many dirty pages */ + /* backed by a congested BDI */ + + struct mem_cgroup *memcg; /* Back pointer, we cannot */ + /* use container_of */ +}; + +struct mem_cgroup_threshold { + struct eventfd_ctx *eventfd; + unsigned long threshold; +}; + +/* For threshold */ +struct mem_cgroup_threshold_ary { + /* An array index points to threshold just below or equal to usage. */ + int current_threshold; + /* Size of entries[] */ + unsigned int size; + /* Array of thresholds */ + struct mem_cgroup_threshold entries[0]; +}; + +struct mem_cgroup_thresholds { + /* Primary thresholds array */ + struct mem_cgroup_threshold_ary *primary; + /* + * Spare threshold array. + * This is needed to make mem_cgroup_unregister_event() "never fail". + * It must be able to store at least primary->size - 1 entries. + */ + struct mem_cgroup_threshold_ary *spare; +}; + +enum memcg_kmem_state { + KMEM_NONE, + KMEM_ALLOCATED, + KMEM_ONLINE, +}; + +#if defined(CONFIG_SMP) +struct memcg_padding { + char x[0]; +} ____cacheline_internodealigned_in_smp; +#define MEMCG_PADDING(name) struct memcg_padding name; +#else +#define MEMCG_PADDING(name) +#endif + +/* + * The memory controller data structure. The memory controller controls both + * page cache and RSS per cgroup. We would eventually like to provide + * statistics based on the statistics developed by Rik Van Riel for clock-pro, + * to help the administrator determine what knobs to tune. + */ +struct mem_cgroup { + struct cgroup_subsys_state css; + + /* Private memcg ID. Used to ID objects that outlive the cgroup */ + struct mem_cgroup_id id; + + /* Accounted resources */ + struct page_counter memory; + struct page_counter swap; + + /* Legacy consumer-oriented counters */ + struct page_counter memsw; + struct page_counter kmem; + struct page_counter tcpmem; + + /* Upper bound of normal memory consumption range */ + unsigned long high; + + /* Range enforcement for interrupt charges */ + struct work_struct high_work; + + unsigned long soft_limit; + + /* vmpressure notifications */ + struct vmpressure vmpressure; + + /* + * Should the accounting and control be hierarchical, per subtree? + */ + bool use_hierarchy; + + /* + * Should the OOM killer kill all belonging tasks, had it kill one? + */ + bool oom_group; + + /* protected by memcg_oom_lock */ + bool oom_lock; + int under_oom; + + int swappiness; + /* OOM-Killer disable */ + int oom_kill_disable; + + /* memory.events */ + struct cgroup_file events_file; + + /* handle for "memory.swap.events" */ + struct cgroup_file swap_events_file; + + /* protect arrays of thresholds */ + struct mutex thresholds_lock; + + /* thresholds for memory usage. RCU-protected */ + struct mem_cgroup_thresholds thresholds; + + /* thresholds for mem+swap usage. RCU-protected */ + struct mem_cgroup_thresholds memsw_thresholds; + + /* For oom notifier event fd */ + struct list_head oom_notify; + + /* + * Should we move charges of a task when a task is moved into this + * mem_cgroup ? And what type of charges should we move ? + */ + unsigned long move_charge_at_immigrate; + /* taken only while moving_account > 0 */ + spinlock_t move_lock; + unsigned long move_lock_flags; + + MEMCG_PADDING(_pad1_); + + /* + * set > 0 if pages under this cgroup are moving to other cgroup. + */ + atomic_t moving_account; + struct task_struct *move_lock_task; + + /* memory.stat */ + struct mem_cgroup_stat_cpu __percpu *stat_cpu; + + MEMCG_PADDING(_pad2_); + + atomic_long_t stat[MEMCG_NR_STAT]; + atomic_long_t events[NR_VM_EVENT_ITEMS]; + atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; + + unsigned long socket_pressure; + + /* Legacy tcp memory accounting */ + bool tcpmem_active; + int tcpmem_pressure; + +#ifdef CONFIG_MEMCG_KMEM + /* Index in the kmem_cache->memcg_params.memcg_caches array */ + int kmemcg_id; + enum memcg_kmem_state kmem_state; + struct list_head kmem_caches; +#endif + + int last_scanned_node; +#if MAX_NUMNODES > 1 + nodemask_t scan_nodes; + atomic_t numainfo_events; + atomic_t numainfo_updating; +#endif + +#ifdef CONFIG_CGROUP_WRITEBACK + struct list_head cgwb_list; + struct wb_domain cgwb_domain; +#endif + + /* List of events which userspace want to receive */ + struct list_head event_list; + spinlock_t event_list_lock; + + struct mem_cgroup_per_node *nodeinfo[0]; + /* WARNING: nodeinfo must be the last member here */ +}; + +/* + * size of first charge trial. "32" comes from vmscan.c's magic value. + * TODO: maybe necessary to use big numbers in big irons. + */ +#define MEMCG_CHARGE_BATCH 32U + +extern struct mem_cgroup *root_mem_cgroup; + +static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) +{ + return (memcg == root_mem_cgroup); +} + +static inline bool mem_cgroup_disabled(void) +{ + return !cgroup_subsys_enabled(memory_cgrp_subsys); +} + +enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root, + struct mem_cgroup *memcg); + +int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, + gfp_t gfp_mask, struct mem_cgroup **memcgp, + bool compound); +int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm, + gfp_t gfp_mask, struct mem_cgroup **memcgp, + bool compound); +void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, + bool lrucare, bool compound); +void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg, + bool compound); +void mem_cgroup_uncharge(struct page *page); +void mem_cgroup_uncharge_list(struct list_head *page_list); + +void mem_cgroup_migrate(struct page *oldpage, struct page *newpage); + +static struct mem_cgroup_per_node * +mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid) +{ + return memcg->nodeinfo[nid]; +} + +/** + * mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone + * @node: node of the wanted lruvec + * @memcg: memcg of the wanted lruvec + * + * Returns the lru list vector holding pages for a given @node or a given + * @memcg and @zone. This can be the node lruvec, if the memory controller + * is disabled. + */ +static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat, + struct mem_cgroup *memcg) +{ + struct mem_cgroup_per_node *mz; + struct lruvec *lruvec; + + if (mem_cgroup_disabled()) { + lruvec = node_lruvec(pgdat); + goto out; + } + + mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); + lruvec = &mz->lruvec; +out: + /* + * Since a node can be onlined after the mem_cgroup was created, + * we have to be prepared to initialize lruvec->pgdat here; + * and if offlined then reonlined, we need to reinitialize it. + */ + if (unlikely(lruvec->pgdat != pgdat)) + lruvec->pgdat = pgdat; + return lruvec; +} + +struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *); + +bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); +struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); + +struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); + +struct mem_cgroup *get_mem_cgroup_from_page(struct page *page); + +static inline +struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ + return css ? container_of(css, struct mem_cgroup, css) : NULL; +} + +static inline void mem_cgroup_put(struct mem_cgroup *memcg) +{ + if (memcg) + css_put(&memcg->css); +} + +#define mem_cgroup_from_counter(counter, member) \ + container_of(counter, struct mem_cgroup, member) + +struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, + struct mem_cgroup *, + struct mem_cgroup_reclaim_cookie *); +void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); +int mem_cgroup_scan_tasks(struct mem_cgroup *, + int (*)(struct task_struct *, void *), void *); + +static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) +{ + if (mem_cgroup_disabled()) + return 0; + + return memcg->id.id; +} +struct mem_cgroup *mem_cgroup_from_id(unsigned short id); + +static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) +{ + struct mem_cgroup_per_node *mz; + + if (mem_cgroup_disabled()) + return NULL; + + mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); + return mz->memcg; +} + +/** + * parent_mem_cgroup - find the accounting parent of a memcg + * @memcg: memcg whose parent to find + * + * Returns the parent memcg, or NULL if this is the root or the memory + * controller is in legacy no-hierarchy mode. + */ +static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) +{ + if (!memcg->memory.parent) + return NULL; + return mem_cgroup_from_counter(memcg->memory.parent, memory); +} + +static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, + struct mem_cgroup *root) +{ + if (root == memcg) + return true; + if (!root->use_hierarchy) + return false; + return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); +} + +static inline bool mm_match_cgroup(struct mm_struct *mm, + struct mem_cgroup *memcg) +{ + struct mem_cgroup *task_memcg; + bool match = false; + + rcu_read_lock(); + task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); + if (task_memcg) + match = mem_cgroup_is_descendant(task_memcg, memcg); + rcu_read_unlock(); + return match; +} + +struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page); +ino_t page_cgroup_ino(struct page *page); + +static inline bool mem_cgroup_online(struct mem_cgroup *memcg) +{ + if (mem_cgroup_disabled()) + return true; + return !!(memcg->css.flags & CSS_ONLINE); +} + +/* + * For memory reclaim. + */ +int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); + +void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, + int zid, int nr_pages); + +unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, + int nid, unsigned int lru_mask); + +static inline +unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) +{ + struct mem_cgroup_per_node *mz; + unsigned long nr_pages = 0; + int zid; + + mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); + for (zid = 0; zid < MAX_NR_ZONES; zid++) + nr_pages += mz->lru_zone_size[zid][lru]; + return nr_pages; +} + +static inline +unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, + enum lru_list lru, int zone_idx) +{ + struct mem_cgroup_per_node *mz; + + mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); + return mz->lru_zone_size[zone_idx][lru]; +} + +void mem_cgroup_handle_over_high(void); + +unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); + +void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, + struct task_struct *p); + +static inline void mem_cgroup_enter_user_fault(void) +{ + WARN_ON(current->in_user_fault); + current->in_user_fault = 1; +} + +static inline void mem_cgroup_exit_user_fault(void) +{ + WARN_ON(!current->in_user_fault); + current->in_user_fault = 0; +} + +static inline bool task_in_memcg_oom(struct task_struct *p) +{ + return p->memcg_in_oom; +} + +bool mem_cgroup_oom_synchronize(bool wait); +struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, + struct mem_cgroup *oom_domain); +void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); + +#ifdef CONFIG_MEMCG_SWAP +extern int do_swap_account; +#endif + +struct mem_cgroup *lock_page_memcg(struct page *page); +void __unlock_page_memcg(struct mem_cgroup *memcg); +void unlock_page_memcg(struct page *page); + +/* + * idx can be of type enum memcg_stat_item or node_stat_item. + * Keep in sync with memcg_exact_page_state(). + */ +static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, + int idx) +{ + long x = atomic_long_read(&memcg->stat[idx]); +#ifdef CONFIG_SMP + if (x < 0) + x = 0; +#endif + return x; +} + +/* idx can be of type enum memcg_stat_item or node_stat_item */ +static inline void __mod_memcg_state(struct mem_cgroup *memcg, + int idx, int val) +{ + long x; + + if (mem_cgroup_disabled()) + return; + + x = val + __this_cpu_read(memcg->stat_cpu->count[idx]); + if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { + atomic_long_add(x, &memcg->stat[idx]); + x = 0; + } + __this_cpu_write(memcg->stat_cpu->count[idx], x); +} + +/* idx can be of type enum memcg_stat_item or node_stat_item */ +static inline void mod_memcg_state(struct mem_cgroup *memcg, + int idx, int val) +{ + unsigned long flags; + + local_irq_save(flags); + __mod_memcg_state(memcg, idx, val); + local_irq_restore(flags); +} + +/** + * mod_memcg_page_state - update page state statistics + * @page: the page + * @idx: page state item to account + * @val: number of pages (positive or negative) + * + * The @page must be locked or the caller must use lock_page_memcg() + * to prevent double accounting when the page is concurrently being + * moved to another memcg: + * + * lock_page(page) or lock_page_memcg(page) + * if (TestClearPageState(page)) + * mod_memcg_page_state(page, state, -1); + * unlock_page(page) or unlock_page_memcg(page) + * + * Kernel pages are an exception to this, since they'll never move. + */ +static inline void __mod_memcg_page_state(struct page *page, + int idx, int val) +{ + if (page->mem_cgroup) + __mod_memcg_state(page->mem_cgroup, idx, val); +} + +static inline void mod_memcg_page_state(struct page *page, + int idx, int val) +{ + if (page->mem_cgroup) + mod_memcg_state(page->mem_cgroup, idx, val); +} + +static inline unsigned long lruvec_page_state(struct lruvec *lruvec, + enum node_stat_item idx) +{ + struct mem_cgroup_per_node *pn; + long x; + + if (mem_cgroup_disabled()) + return node_page_state(lruvec_pgdat(lruvec), idx); + + pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); + x = atomic_long_read(&pn->lruvec_stat[idx]); +#ifdef CONFIG_SMP + if (x < 0) + x = 0; +#endif + return x; +} + +static inline void __mod_lruvec_state(struct lruvec *lruvec, + enum node_stat_item idx, int val) +{ + struct mem_cgroup_per_node *pn; + long x; + + /* Update node */ + __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); + + if (mem_cgroup_disabled()) + return; + + pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); + + /* Update memcg */ + __mod_memcg_state(pn->memcg, idx, val); + + /* Update lruvec */ + x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); + if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { + atomic_long_add(x, &pn->lruvec_stat[idx]); + x = 0; + } + __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x); +} + +static inline void mod_lruvec_state(struct lruvec *lruvec, + enum node_stat_item idx, int val) +{ + unsigned long flags; + + local_irq_save(flags); + __mod_lruvec_state(lruvec, idx, val); + local_irq_restore(flags); +} + +static inline void __mod_lruvec_page_state(struct page *page, + enum node_stat_item idx, int val) +{ + pg_data_t *pgdat = page_pgdat(page); + struct lruvec *lruvec; + + /* Untracked pages have no memcg, no lruvec. Update only the node */ + if (!page->mem_cgroup) { + __mod_node_page_state(pgdat, idx, val); + return; + } + + lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup); + __mod_lruvec_state(lruvec, idx, val); +} + +static inline void mod_lruvec_page_state(struct page *page, + enum node_stat_item idx, int val) +{ + unsigned long flags; + + local_irq_save(flags); + __mod_lruvec_page_state(page, idx, val); + local_irq_restore(flags); +} + +unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, + gfp_t gfp_mask, + unsigned long *total_scanned); + +static inline void __count_memcg_events(struct mem_cgroup *memcg, + enum vm_event_item idx, + unsigned long count) +{ + unsigned long x; + + if (mem_cgroup_disabled()) + return; + + x = count + __this_cpu_read(memcg->stat_cpu->events[idx]); + if (unlikely(x > MEMCG_CHARGE_BATCH)) { + atomic_long_add(x, &memcg->events[idx]); + x = 0; + } + __this_cpu_write(memcg->stat_cpu->events[idx], x); +} + +static inline void count_memcg_events(struct mem_cgroup *memcg, + enum vm_event_item idx, + unsigned long count) +{ + unsigned long flags; + + local_irq_save(flags); + __count_memcg_events(memcg, idx, count); + local_irq_restore(flags); +} + +static inline void count_memcg_page_event(struct page *page, + enum vm_event_item idx) +{ + if (page->mem_cgroup) + count_memcg_events(page->mem_cgroup, idx, 1); +} + +static inline void count_memcg_event_mm(struct mm_struct *mm, + enum vm_event_item idx) +{ + struct mem_cgroup *memcg; + + if (mem_cgroup_disabled()) + return; + + rcu_read_lock(); + memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); + if (likely(memcg)) + count_memcg_events(memcg, idx, 1); + rcu_read_unlock(); +} + +static inline void memcg_memory_event(struct mem_cgroup *memcg, + enum memcg_memory_event event) +{ + atomic_long_inc(&memcg->memory_events[event]); + cgroup_file_notify(&memcg->events_file); +} + +static inline void memcg_memory_event_mm(struct mm_struct *mm, + enum memcg_memory_event event) +{ + struct mem_cgroup *memcg; + + if (mem_cgroup_disabled()) + return; + + rcu_read_lock(); + memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); + if (likely(memcg)) + memcg_memory_event(memcg, event); + rcu_read_unlock(); +} + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +void mem_cgroup_split_huge_fixup(struct page *head); +#endif + +#else /* CONFIG_MEMCG */ + +#define MEM_CGROUP_ID_SHIFT 0 +#define MEM_CGROUP_ID_MAX 0 + +struct mem_cgroup; + +static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) +{ + return true; +} + +static inline bool mem_cgroup_disabled(void) +{ + return true; +} + +static inline void memcg_memory_event(struct mem_cgroup *memcg, + enum memcg_memory_event event) +{ +} + +static inline void memcg_memory_event_mm(struct mm_struct *mm, + enum memcg_memory_event event) +{ +} + +static inline enum mem_cgroup_protection mem_cgroup_protected( + struct mem_cgroup *root, struct mem_cgroup *memcg) +{ + return MEMCG_PROT_NONE; +} + +static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, + gfp_t gfp_mask, + struct mem_cgroup **memcgp, + bool compound) +{ + *memcgp = NULL; + return 0; +} + +static inline int mem_cgroup_try_charge_delay(struct page *page, + struct mm_struct *mm, + gfp_t gfp_mask, + struct mem_cgroup **memcgp, + bool compound) +{ + *memcgp = NULL; + return 0; +} + +static inline void mem_cgroup_commit_charge(struct page *page, + struct mem_cgroup *memcg, + bool lrucare, bool compound) +{ +} + +static inline void mem_cgroup_cancel_charge(struct page *page, + struct mem_cgroup *memcg, + bool compound) +{ +} + +static inline void mem_cgroup_uncharge(struct page *page) +{ +} + +static inline void mem_cgroup_uncharge_list(struct list_head *page_list) +{ +} + +static inline void mem_cgroup_migrate(struct page *old, struct page *new) +{ +} + +static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat, + struct mem_cgroup *memcg) +{ + return node_lruvec(pgdat); +} + +static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, + struct pglist_data *pgdat) +{ + return &pgdat->lruvec; +} + +static inline bool mm_match_cgroup(struct mm_struct *mm, + struct mem_cgroup *memcg) +{ + return true; +} + +static inline bool task_in_mem_cgroup(struct task_struct *task, + const struct mem_cgroup *memcg) +{ + return true; +} + +static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) +{ + return NULL; +} + +static inline struct mem_cgroup *get_mem_cgroup_from_page(struct page *page) +{ + return NULL; +} + +static inline void mem_cgroup_put(struct mem_cgroup *memcg) +{ +} + +static inline struct mem_cgroup * +mem_cgroup_iter(struct mem_cgroup *root, + struct mem_cgroup *prev, + struct mem_cgroup_reclaim_cookie *reclaim) +{ + return NULL; +} + +static inline void mem_cgroup_iter_break(struct mem_cgroup *root, + struct mem_cgroup *prev) +{ +} + +static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, + int (*fn)(struct task_struct *, void *), void *arg) +{ + return 0; +} + +static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) +{ + return 0; +} + +static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) +{ + WARN_ON_ONCE(id); + /* XXX: This should always return root_mem_cgroup */ + return NULL; +} + +static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) +{ + return NULL; +} + +static inline bool mem_cgroup_online(struct mem_cgroup *memcg) +{ + return true; +} + +static inline unsigned long +mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) +{ + return 0; +} +static inline +unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, + enum lru_list lru, int zone_idx) +{ + return 0; +} + +static inline unsigned long +mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, + int nid, unsigned int lru_mask) +{ + return 0; +} + +static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) +{ + return 0; +} + +static inline void +mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) +{ +} + +static inline struct mem_cgroup *lock_page_memcg(struct page *page) +{ + return NULL; +} + +static inline void __unlock_page_memcg(struct mem_cgroup *memcg) +{ +} + +static inline void unlock_page_memcg(struct page *page) +{ +} + +static inline void mem_cgroup_handle_over_high(void) +{ +} + +static inline void mem_cgroup_enter_user_fault(void) +{ +} + +static inline void mem_cgroup_exit_user_fault(void) +{ +} + +static inline bool task_in_memcg_oom(struct task_struct *p) +{ + return false; +} + +static inline bool mem_cgroup_oom_synchronize(bool wait) +{ + return false; +} + +static inline struct mem_cgroup *mem_cgroup_get_oom_group( + struct task_struct *victim, struct mem_cgroup *oom_domain) +{ + return NULL; +} + +static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) +{ +} + +static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, + int idx) +{ + return 0; +} + +static inline void __mod_memcg_state(struct mem_cgroup *memcg, + int idx, + int nr) +{ +} + +static inline void mod_memcg_state(struct mem_cgroup *memcg, + int idx, + int nr) +{ +} + +static inline void __mod_memcg_page_state(struct page *page, + int idx, + int nr) +{ +} + +static inline void mod_memcg_page_state(struct page *page, + int idx, + int nr) +{ +} + +static inline unsigned long lruvec_page_state(struct lruvec *lruvec, + enum node_stat_item idx) +{ + return node_page_state(lruvec_pgdat(lruvec), idx); +} + +static inline void __mod_lruvec_state(struct lruvec *lruvec, + enum node_stat_item idx, int val) +{ + __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); +} + +static inline void mod_lruvec_state(struct lruvec *lruvec, + enum node_stat_item idx, int val) +{ + mod_node_page_state(lruvec_pgdat(lruvec), idx, val); +} + +static inline void __mod_lruvec_page_state(struct page *page, + enum node_stat_item idx, int val) +{ + __mod_node_page_state(page_pgdat(page), idx, val); +} + +static inline void mod_lruvec_page_state(struct page *page, + enum node_stat_item idx, int val) +{ + mod_node_page_state(page_pgdat(page), idx, val); +} + +static inline +unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, + gfp_t gfp_mask, + unsigned long *total_scanned) +{ + return 0; +} + +static inline void mem_cgroup_split_huge_fixup(struct page *head) +{ +} + +static inline void count_memcg_events(struct mem_cgroup *memcg, + enum vm_event_item idx, + unsigned long count) +{ +} + +static inline void count_memcg_page_event(struct page *page, + int idx) +{ +} + +static inline +void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) +{ +} +#endif /* CONFIG_MEMCG */ + +/* idx can be of type enum memcg_stat_item or node_stat_item */ +static inline void __inc_memcg_state(struct mem_cgroup *memcg, + int idx) +{ + __mod_memcg_state(memcg, idx, 1); +} + +/* idx can be of type enum memcg_stat_item or node_stat_item */ +static inline void __dec_memcg_state(struct mem_cgroup *memcg, + int idx) +{ + __mod_memcg_state(memcg, idx, -1); +} + +/* idx can be of type enum memcg_stat_item or node_stat_item */ +static inline void __inc_memcg_page_state(struct page *page, + int idx) +{ + __mod_memcg_page_state(page, idx, 1); +} + +/* idx can be of type enum memcg_stat_item or node_stat_item */ +static inline void __dec_memcg_page_state(struct page *page, + int idx) +{ + __mod_memcg_page_state(page, idx, -1); +} + +static inline void __inc_lruvec_state(struct lruvec *lruvec, + enum node_stat_item idx) +{ + __mod_lruvec_state(lruvec, idx, 1); +} + +static inline void __dec_lruvec_state(struct lruvec *lruvec, + enum node_stat_item idx) +{ + __mod_lruvec_state(lruvec, idx, -1); +} + +static inline void __inc_lruvec_page_state(struct page *page, + enum node_stat_item idx) +{ + __mod_lruvec_page_state(page, idx, 1); +} + +static inline void __dec_lruvec_page_state(struct page *page, + enum node_stat_item idx) +{ + __mod_lruvec_page_state(page, idx, -1); +} + +/* idx can be of type enum memcg_stat_item or node_stat_item */ +static inline void inc_memcg_state(struct mem_cgroup *memcg, + int idx) +{ + mod_memcg_state(memcg, idx, 1); +} + +/* idx can be of type enum memcg_stat_item or node_stat_item */ +static inline void dec_memcg_state(struct mem_cgroup *memcg, + int idx) +{ + mod_memcg_state(memcg, idx, -1); +} + +/* idx can be of type enum memcg_stat_item or node_stat_item */ +static inline void inc_memcg_page_state(struct page *page, + int idx) +{ + mod_memcg_page_state(page, idx, 1); +} + +/* idx can be of type enum memcg_stat_item or node_stat_item */ +static inline void dec_memcg_page_state(struct page *page, + int idx) +{ + mod_memcg_page_state(page, idx, -1); +} + +static inline void inc_lruvec_state(struct lruvec *lruvec, + enum node_stat_item idx) +{ + mod_lruvec_state(lruvec, idx, 1); +} + +static inline void dec_lruvec_state(struct lruvec *lruvec, + enum node_stat_item idx) +{ + mod_lruvec_state(lruvec, idx, -1); +} + +static inline void inc_lruvec_page_state(struct page *page, + enum node_stat_item idx) +{ + mod_lruvec_page_state(page, idx, 1); +} + +static inline void dec_lruvec_page_state(struct page *page, + enum node_stat_item idx) +{ + mod_lruvec_page_state(page, idx, -1); +} + +#ifdef CONFIG_CGROUP_WRITEBACK + +struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); +void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, + unsigned long *pheadroom, unsigned long *pdirty, + unsigned long *pwriteback); + +#else /* CONFIG_CGROUP_WRITEBACK */ + +static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) +{ + return NULL; +} + +static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, + unsigned long *pfilepages, + unsigned long *pheadroom, + unsigned long *pdirty, + unsigned long *pwriteback) +{ +} + +#endif /* CONFIG_CGROUP_WRITEBACK */ + +struct sock; +bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); +void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); +#ifdef CONFIG_MEMCG +extern struct static_key_false memcg_sockets_enabled_key; +#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) +void mem_cgroup_sk_alloc(struct sock *sk); +void mem_cgroup_sk_free(struct sock *sk); +static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) +{ + if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure) + return true; + do { + if (time_before(jiffies, memcg->socket_pressure)) + return true; + } while ((memcg = parent_mem_cgroup(memcg))); + return false; +} +#else +#define mem_cgroup_sockets_enabled 0 +static inline void mem_cgroup_sk_alloc(struct sock *sk) { }; +static inline void mem_cgroup_sk_free(struct sock *sk) { }; +static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) +{ + return false; +} +#endif + +struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep); +void memcg_kmem_put_cache(struct kmem_cache *cachep); +int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, + struct mem_cgroup *memcg); +int memcg_kmem_charge(struct page *page, gfp_t gfp, int order); +void memcg_kmem_uncharge(struct page *page, int order); + +#ifdef CONFIG_MEMCG_KMEM +extern struct static_key_false memcg_kmem_enabled_key; +extern struct workqueue_struct *memcg_kmem_cache_wq; + +extern int memcg_nr_cache_ids; +void memcg_get_cache_ids(void); +void memcg_put_cache_ids(void); + +/* + * Helper macro to loop through all memcg-specific caches. Callers must still + * check if the cache is valid (it is either valid or NULL). + * the slab_mutex must be held when looping through those caches + */ +#define for_each_memcg_cache_index(_idx) \ + for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++) + +static inline bool memcg_kmem_enabled(void) +{ + return static_branch_unlikely(&memcg_kmem_enabled_key); +} + +/* + * helper for accessing a memcg's index. It will be used as an index in the + * child cache array in kmem_cache, and also to derive its name. This function + * will return -1 when this is not a kmem-limited memcg. + */ +static inline int memcg_cache_id(struct mem_cgroup *memcg) +{ + return memcg ? memcg->kmemcg_id : -1; +} + +extern int memcg_expand_shrinker_maps(int new_id); + +extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg, + int nid, int shrinker_id); +#else +#define for_each_memcg_cache_index(_idx) \ + for (; NULL; ) + +static inline bool memcg_kmem_enabled(void) +{ + return false; +} + +static inline int memcg_cache_id(struct mem_cgroup *memcg) +{ + return -1; +} + +static inline void memcg_get_cache_ids(void) +{ +} + +static inline void memcg_put_cache_ids(void) +{ +} + +static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg, + int nid, int shrinker_id) { } +#endif /* CONFIG_MEMCG_KMEM */ + +#endif /* _LINUX_MEMCONTROL_H */ diff --git a/include/linux/memfd.h b/include/linux/memfd.h new file mode 100644 index 000000000..4f1600413 --- /dev/null +++ b/include/linux/memfd.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_MEMFD_H +#define __LINUX_MEMFD_H + +#include + +#ifdef CONFIG_MEMFD_CREATE +extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned long arg); +#else +static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned long a) +{ + return -EINVAL; +} +#endif + +#endif /* __LINUX_MEMFD_H */ diff --git a/include/linux/memory.h b/include/linux/memory.h new file mode 100644 index 000000000..5c411365c --- /dev/null +++ b/include/linux/memory.h @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/memory.h - generic memory definition + * + * This is mainly for topological representation. We define the + * basic "struct memory_block" here, which can be embedded in per-arch + * definitions or NUMA information. + * + * Basic handling of the devices is done in drivers/base/memory.c + * and system devices are handled in drivers/base/sys.c. + * + * Memory block are exported via sysfs in the class/memory/devices/ + * directory. + * + */ +#ifndef _LINUX_MEMORY_H_ +#define _LINUX_MEMORY_H_ + +#include +#include +#include +#include + +#define MIN_MEMORY_BLOCK_SIZE (1UL << SECTION_SIZE_BITS) + +struct memory_block { + unsigned long start_section_nr; + unsigned long end_section_nr; + unsigned long state; /* serialized by the dev->lock */ + int section_count; /* serialized by mem_sysfs_mutex */ + int online_type; /* for passing data to online routine */ + int phys_device; /* to which fru does this belong? */ + void *hw; /* optional pointer to fw/hw data */ + int (*phys_callback)(struct memory_block *); + struct device dev; + int nid; /* NID for this memory block */ +}; + +int arch_get_memory_phys_device(unsigned long start_pfn); +unsigned long memory_block_size_bytes(void); +int set_memory_block_size_order(unsigned int order); + +/* These states are exposed to userspace as text strings in sysfs */ +#define MEM_ONLINE (1<<0) /* exposed to userspace */ +#define MEM_GOING_OFFLINE (1<<1) /* exposed to userspace */ +#define MEM_OFFLINE (1<<2) /* exposed to userspace */ +#define MEM_GOING_ONLINE (1<<3) +#define MEM_CANCEL_ONLINE (1<<4) +#define MEM_CANCEL_OFFLINE (1<<5) + +struct memory_notify { + unsigned long start_pfn; + unsigned long nr_pages; + int status_change_nid_normal; + int status_change_nid_high; + int status_change_nid; +}; + +/* + * During pageblock isolation, count the number of pages within the + * range [start_pfn, start_pfn + nr_pages) which are owned by code + * in the notifier chain. + */ +#define MEM_ISOLATE_COUNT (1<<0) + +struct memory_isolate_notify { + unsigned long start_pfn; /* Start of range to check */ + unsigned int nr_pages; /* # pages in range to check */ + unsigned int pages_found; /* # pages owned found by callbacks */ +}; + +struct notifier_block; +struct mem_section; + +/* + * Priorities for the hotplug memory callback routines (stored in decreasing + * order in the callback chain) + */ +#define SLAB_CALLBACK_PRI 1 +#define IPC_CALLBACK_PRI 10 + +#ifndef CONFIG_MEMORY_HOTPLUG_SPARSE +static inline int memory_dev_init(void) +{ + return 0; +} +static inline int register_memory_notifier(struct notifier_block *nb) +{ + return 0; +} +static inline void unregister_memory_notifier(struct notifier_block *nb) +{ +} +static inline int memory_notify(unsigned long val, void *v) +{ + return 0; +} +static inline int register_memory_isolate_notifier(struct notifier_block *nb) +{ + return 0; +} +static inline void unregister_memory_isolate_notifier(struct notifier_block *nb) +{ +} +static inline int memory_isolate_notify(unsigned long val, void *v) +{ + return 0; +} +#else +extern int register_memory_notifier(struct notifier_block *nb); +extern void unregister_memory_notifier(struct notifier_block *nb); +extern int register_memory_isolate_notifier(struct notifier_block *nb); +extern void unregister_memory_isolate_notifier(struct notifier_block *nb); +int create_memory_block_devices(unsigned long start, unsigned long size); +void remove_memory_block_devices(unsigned long start, unsigned long size); +extern int memory_dev_init(void); +extern int memory_notify(unsigned long val, void *v); +extern int memory_isolate_notify(unsigned long val, void *v); +extern struct memory_block *find_memory_block_hinted(struct mem_section *, + struct memory_block *); +extern struct memory_block *find_memory_block(struct mem_section *); +typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *); +extern int for_each_memory_block(void *arg, walk_memory_blocks_func_t func); +#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION< +#include +#include +#include + +struct page; +struct zone; +struct pglist_data; +struct mem_section; +struct memory_block; +struct resource; +struct vmem_altmap; + +#ifdef CONFIG_MEMORY_HOTPLUG +/* + * Return page for the valid pfn only if the page is online. All pfn + * walkers which rely on the fully initialized page->flags and others + * should use this rather than pfn_valid && pfn_to_page + */ +#define pfn_to_online_page(pfn) \ +({ \ + struct page *___page = NULL; \ + unsigned long ___pfn = pfn; \ + unsigned long ___nr = pfn_to_section_nr(___pfn); \ + \ + if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \ + pfn_valid_within(___pfn)) \ + ___page = pfn_to_page(___pfn); \ + ___page; \ +}) + +/* + * Types for free bootmem stored in page->lru.next. These have to be in + * some random range in unsigned long space for debugging purposes. + */ +enum { + MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12, + SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE, + MIX_SECTION_INFO, + NODE_INFO, + MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO, +}; + +/* Types for control the zone type of onlined and offlined memory */ +enum { + MMOP_OFFLINE = -1, + MMOP_ONLINE_KEEP, + MMOP_ONLINE_KERNEL, + MMOP_ONLINE_MOVABLE, +}; + +/* + * Zone resizing functions + * + * Note: any attempt to resize a zone should has pgdat_resize_lock() + * zone_span_writelock() both held. This ensure the size of a zone + * can't be changed while pgdat_resize_lock() held. + */ +static inline unsigned zone_span_seqbegin(struct zone *zone) +{ + return read_seqbegin(&zone->span_seqlock); +} +static inline int zone_span_seqretry(struct zone *zone, unsigned iv) +{ + return read_seqretry(&zone->span_seqlock, iv); +} +static inline void zone_span_writelock(struct zone *zone) +{ + write_seqlock(&zone->span_seqlock); +} +static inline void zone_span_writeunlock(struct zone *zone) +{ + write_sequnlock(&zone->span_seqlock); +} +static inline void zone_seqlock_init(struct zone *zone) +{ + seqlock_init(&zone->span_seqlock); +} +extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages); +extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); +extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); +/* VM interface that may be used by firmware interface */ +extern int online_pages(unsigned long, unsigned long, int); +extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, + unsigned long *valid_start, unsigned long *valid_end); +extern void __offline_isolated_pages(unsigned long, unsigned long); + +typedef void (*online_page_callback_t)(struct page *page); + +extern int set_online_page_callback(online_page_callback_t callback); +extern int restore_online_page_callback(online_page_callback_t callback); + +extern void __online_page_set_limits(struct page *page); +extern void __online_page_increment_counters(struct page *page); +extern void __online_page_free(struct page *page); + +extern int try_online_node(int nid); + +extern bool memhp_auto_online; +/* If movable_node boot option specified */ +extern bool movable_node_enabled; +static inline bool movable_node_is_enabled(void) +{ + return movable_node_enabled; +} + +extern void arch_remove_memory(int nid, u64 start, u64 size, + struct vmem_altmap *altmap); +extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages, + struct vmem_altmap *altmap); + +/* reasonably generic interface to expand the physical pages */ +extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, + struct vmem_altmap *altmap, bool want_memblock); + +#ifndef CONFIG_ARCH_HAS_ADD_PAGES +static inline int add_pages(int nid, unsigned long start_pfn, + unsigned long nr_pages, struct vmem_altmap *altmap, + bool want_memblock) +{ + return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock); +} +#else /* ARCH_HAS_ADD_PAGES */ +int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, + struct vmem_altmap *altmap, bool want_memblock); +#endif /* ARCH_HAS_ADD_PAGES */ + +#ifdef CONFIG_NUMA +extern int memory_add_physaddr_to_nid(u64 start); +#else +static inline int memory_add_physaddr_to_nid(u64 start) +{ + return 0; +} +#endif + +#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION +/* + * For supporting node-hotadd, we have to allocate a new pgdat. + * + * If an arch has generic style NODE_DATA(), + * node_data[nid] = kzalloc() works well. But it depends on the architecture. + * + * In general, generic_alloc_nodedata() is used. + * Now, arch_free_nodedata() is just defined for error path of node_hot_add. + * + */ +extern pg_data_t *arch_alloc_nodedata(int nid); +extern void arch_free_nodedata(pg_data_t *pgdat); +extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat); + +#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ + +#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid) +#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat) + +#ifdef CONFIG_NUMA +/* + * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat. + * XXX: kmalloc_node() can't work well to get new node's memory at this time. + * Because, pgdat for the new node is not allocated/initialized yet itself. + * To use new node's memory, more consideration will be necessary. + */ +#define generic_alloc_nodedata(nid) \ +({ \ + kzalloc(sizeof(pg_data_t), GFP_KERNEL); \ +}) +/* + * This definition is just for error path in node hotadd. + * For node hotremove, we have to replace this. + */ +#define generic_free_nodedata(pgdat) kfree(pgdat) + +extern pg_data_t *node_data[]; +static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) +{ + node_data[nid] = pgdat; +} + +#else /* !CONFIG_NUMA */ + +/* never called */ +static inline pg_data_t *generic_alloc_nodedata(int nid) +{ + BUG(); + return NULL; +} +static inline void generic_free_nodedata(pg_data_t *pgdat) +{ +} +static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) +{ +} +#endif /* CONFIG_NUMA */ +#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ + +#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE +extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat); +#else +static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) +{ +} +#endif +extern void put_page_bootmem(struct page *page); +extern void get_page_bootmem(unsigned long ingo, struct page *page, + unsigned long type); + +void get_online_mems(void); +void put_online_mems(void); + +void mem_hotplug_begin(void); +void mem_hotplug_done(void); + +extern void set_zone_contiguous(struct zone *zone); +extern void clear_zone_contiguous(struct zone *zone); + +#else /* ! CONFIG_MEMORY_HOTPLUG */ +#define pfn_to_online_page(pfn) \ +({ \ + struct page *___page = NULL; \ + if (pfn_valid(pfn)) \ + ___page = pfn_to_page(pfn); \ + ___page; \ + }) + +static inline unsigned zone_span_seqbegin(struct zone *zone) +{ + return 0; +} +static inline int zone_span_seqretry(struct zone *zone, unsigned iv) +{ + return 0; +} +static inline void zone_span_writelock(struct zone *zone) {} +static inline void zone_span_writeunlock(struct zone *zone) {} +static inline void zone_seqlock_init(struct zone *zone) {} + +static inline int mhp_notimplemented(const char *func) +{ + printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func); + dump_stack(); + return -ENOSYS; +} + +static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) +{ +} + +static inline int try_online_node(int nid) +{ + return 0; +} + +static inline void get_online_mems(void) {} +static inline void put_online_mems(void) {} + +static inline void mem_hotplug_begin(void) {} +static inline void mem_hotplug_done(void) {} + +static inline bool movable_node_is_enabled(void) +{ + return false; +} +#endif /* ! CONFIG_MEMORY_HOTPLUG */ + +#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) +/* + * pgdat resizing functions + */ +static inline +void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) +{ + spin_lock_irqsave(&pgdat->node_size_lock, *flags); +} +static inline +void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) +{ + spin_unlock_irqrestore(&pgdat->node_size_lock, *flags); +} +static inline +void pgdat_resize_init(struct pglist_data *pgdat) +{ + spin_lock_init(&pgdat->node_size_lock); +} +#else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */ +/* + * Stub functions for when hotplug is off + */ +static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} +static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} +static inline void pgdat_resize_init(struct pglist_data *pgdat) {} +#endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */ + +#ifdef CONFIG_MEMORY_HOTREMOVE + +extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); +extern void try_offline_node(int nid); +extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); +extern void remove_memory(int nid, u64 start, u64 size); +extern void __remove_memory(int nid, u64 start, u64 size); + +#else +static inline bool is_mem_section_removable(unsigned long pfn, + unsigned long nr_pages) +{ + return false; +} + +static inline void try_offline_node(int nid) {} + +static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages) +{ + return -EINVAL; +} + +static inline void remove_memory(int nid, u64 start, u64 size) {} +static inline void __remove_memory(int nid, u64 start, u64 size) {} +#endif /* CONFIG_MEMORY_HOTREMOVE */ + +extern void __ref free_area_init_core_hotplug(int nid); +extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, + void *arg, int (*func)(struct memory_block *, void *)); +extern int __add_memory(int nid, u64 start, u64 size); +extern int add_memory(int nid, u64 start, u64 size); +extern int add_memory_resource(int nid, struct resource *resource, bool online); +extern int arch_add_memory(int nid, u64 start, u64 size, + struct vmem_altmap *altmap, bool want_memblock); +extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, + unsigned long nr_pages, struct vmem_altmap *altmap); +extern void remove_pfn_range_from_zone(struct zone *zone, + unsigned long start_pfn, + unsigned long nr_pages); +extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); +extern bool is_memblock_offlined(struct memory_block *mem); +extern int sparse_add_one_section(int nid, unsigned long start_pfn, + struct vmem_altmap *altmap); +extern void sparse_remove_one_section(struct mem_section *ms, + unsigned long map_offset, struct vmem_altmap *altmap); +extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, + unsigned long pnum); +extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages, + int online_type); +extern struct zone *zone_for_pfn_range(int online_type, int nid, + unsigned long start_pfn, unsigned long nr_pages); +#endif /* __LINUX_MEMORY_HOTPLUG_H */ diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h new file mode 100644 index 000000000..5228c62af --- /dev/null +++ b/include/linux/mempolicy.h @@ -0,0 +1,312 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * NUMA memory policies for Linux. + * Copyright 2003,2004 Andi Kleen SuSE Labs + */ +#ifndef _LINUX_MEMPOLICY_H +#define _LINUX_MEMPOLICY_H 1 + + +#include +#include +#include +#include +#include +#include +#include +#include + +struct mm_struct; + +#ifdef CONFIG_NUMA + +/* + * Describe a memory policy. + * + * A mempolicy can be either associated with a process or with a VMA. + * For VMA related allocations the VMA policy is preferred, otherwise + * the process policy is used. Interrupts ignore the memory policy + * of the current process. + * + * Locking policy for interlave: + * In process context there is no locking because only the process accesses + * its own state. All vma manipulation is somewhat protected by a down_read on + * mmap_sem. + * + * Freeing policy: + * Mempolicy objects are reference counted. A mempolicy will be freed when + * mpol_put() decrements the reference count to zero. + * + * Duplicating policy objects: + * mpol_dup() allocates a new mempolicy and copies the specified mempolicy + * to the new storage. The reference count of the new object is initialized + * to 1, representing the caller of mpol_dup(). + */ +struct mempolicy { + atomic_t refcnt; + unsigned short mode; /* See MPOL_* above */ + unsigned short flags; /* See set_mempolicy() MPOL_F_* above */ + union { + short preferred_node; /* preferred */ + nodemask_t nodes; /* interleave/bind */ + /* undefined for default */ + } v; + union { + nodemask_t cpuset_mems_allowed; /* relative to these nodes */ + nodemask_t user_nodemask; /* nodemask passed by user */ + } w; +}; + +/* + * Support for managing mempolicy data objects (clone, copy, destroy) + * The default fast path of a NULL MPOL_DEFAULT policy is always inlined. + */ + +extern void __mpol_put(struct mempolicy *pol); +static inline void mpol_put(struct mempolicy *pol) +{ + if (pol) + __mpol_put(pol); +} + +/* + * Does mempolicy pol need explicit unref after use? + * Currently only needed for shared policies. + */ +static inline int mpol_needs_cond_ref(struct mempolicy *pol) +{ + return (pol && (pol->flags & MPOL_F_SHARED)); +} + +static inline void mpol_cond_put(struct mempolicy *pol) +{ + if (mpol_needs_cond_ref(pol)) + __mpol_put(pol); +} + +extern struct mempolicy *__mpol_dup(struct mempolicy *pol); +static inline struct mempolicy *mpol_dup(struct mempolicy *pol) +{ + if (pol) + pol = __mpol_dup(pol); + return pol; +} + +#define vma_policy(vma) ((vma)->vm_policy) + +static inline void mpol_get(struct mempolicy *pol) +{ + if (pol) + atomic_inc(&pol->refcnt); +} + +extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b); +static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) +{ + if (a == b) + return true; + return __mpol_equal(a, b); +} + +/* + * Tree of shared policies for a shared memory region. + * Maintain the policies in a pseudo mm that contains vmas. The vmas + * carry the policy. As a special twist the pseudo mm is indexed in pages, not + * bytes, so that we can work with shared memory segments bigger than + * unsigned long. + */ + +struct sp_node { + struct rb_node nd; + unsigned long start, end; + struct mempolicy *policy; +}; + +struct shared_policy { + struct rb_root root; + rwlock_t lock; +}; + +int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst); +void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); +int mpol_set_shared_policy(struct shared_policy *info, + struct vm_area_struct *vma, + struct mempolicy *new); +void mpol_free_shared_policy(struct shared_policy *p); +struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, + unsigned long idx); + +struct mempolicy *get_task_policy(struct task_struct *p); +struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, + unsigned long addr); +bool vma_policy_mof(struct vm_area_struct *vma); + +extern void numa_default_policy(void); +extern void numa_policy_init(void); +extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new); +extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); + +extern int huge_node(struct vm_area_struct *vma, + unsigned long addr, gfp_t gfp_flags, + struct mempolicy **mpol, nodemask_t **nodemask); +extern bool init_nodemask_of_mempolicy(nodemask_t *mask); +extern bool mempolicy_nodemask_intersects(struct task_struct *tsk, + const nodemask_t *mask); +extern unsigned int mempolicy_slab_node(void); + +extern enum zone_type policy_zone; + +static inline void check_highest_zone(enum zone_type k) +{ + if (k > policy_zone && k != ZONE_MOVABLE) + policy_zone = k; +} + +int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, + const nodemask_t *to, int flags); + + +#ifdef CONFIG_TMPFS +extern int mpol_parse_str(char *str, struct mempolicy **mpol); +#endif + +extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); + +/* Check if a vma is migratable */ +static inline bool vma_migratable(struct vm_area_struct *vma) +{ + if (vma->vm_flags & (VM_IO | VM_PFNMAP)) + return false; + + /* + * DAX device mappings require predictable access latency, so avoid + * incurring periodic faults. + */ + if (vma_is_dax(vma)) + return false; + +#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION + if (vma->vm_flags & VM_HUGETLB) + return false; +#endif + + /* + * Migration allocates pages in the highest zone. If we cannot + * do so then migration (at least from node to node) is not + * possible. + */ + if (vma->vm_file && + gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) + < policy_zone) + return false; + return true; +} + +extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); +extern void mpol_put_task_policy(struct task_struct *); + +#else + +struct mempolicy {}; + +static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) +{ + return true; +} + +static inline void mpol_put(struct mempolicy *p) +{ +} + +static inline void mpol_cond_put(struct mempolicy *pol) +{ +} + +static inline void mpol_get(struct mempolicy *pol) +{ +} + +struct shared_policy {}; + +static inline void mpol_shared_policy_init(struct shared_policy *sp, + struct mempolicy *mpol) +{ +} + +static inline void mpol_free_shared_policy(struct shared_policy *p) +{ +} + +static inline struct mempolicy * +mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) +{ + return NULL; +} + +#define vma_policy(vma) NULL + +static inline int +vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) +{ + return 0; +} + +static inline void numa_policy_init(void) +{ +} + +static inline void numa_default_policy(void) +{ +} + +static inline void mpol_rebind_task(struct task_struct *tsk, + const nodemask_t *new) +{ +} + +static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) +{ +} + +static inline int huge_node(struct vm_area_struct *vma, + unsigned long addr, gfp_t gfp_flags, + struct mempolicy **mpol, nodemask_t **nodemask) +{ + *mpol = NULL; + *nodemask = NULL; + return 0; +} + +static inline bool init_nodemask_of_mempolicy(nodemask_t *m) +{ + return false; +} + +static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, + const nodemask_t *to, int flags) +{ + return 0; +} + +static inline void check_highest_zone(int k) +{ +} + +#ifdef CONFIG_TMPFS +static inline int mpol_parse_str(char *str, struct mempolicy **mpol) +{ + return 1; /* error */ +} +#endif + +static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma, + unsigned long address) +{ + return -1; /* no node preference */ +} + +static inline void mpol_put_task_policy(struct task_struct *task) +{ +} +#endif /* CONFIG_NUMA */ +#endif diff --git a/include/linux/mempool.h b/include/linux/mempool.h new file mode 100644 index 000000000..0c964ac10 --- /dev/null +++ b/include/linux/mempool.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * memory buffer pool support + */ +#ifndef _LINUX_MEMPOOL_H +#define _LINUX_MEMPOOL_H + +#include +#include + +struct kmem_cache; + +typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data); +typedef void (mempool_free_t)(void *element, void *pool_data); + +typedef struct mempool_s { + spinlock_t lock; + int min_nr; /* nr of elements at *elements */ + int curr_nr; /* Current nr of elements at *elements */ + void **elements; + + void *pool_data; + mempool_alloc_t *alloc; + mempool_free_t *free; + wait_queue_head_t wait; +} mempool_t; + +static inline bool mempool_initialized(mempool_t *pool) +{ + return pool->elements != NULL; +} + +void mempool_exit(mempool_t *pool); +int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data, + gfp_t gfp_mask, int node_id); +int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data); + +extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data); +extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data, + gfp_t gfp_mask, int nid); + +extern int mempool_resize(mempool_t *pool, int new_min_nr); +extern void mempool_destroy(mempool_t *pool); +extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc; +extern void mempool_free(void *element, mempool_t *pool); + +/* + * A mempool_alloc_t and mempool_free_t that get the memory from + * a slab cache that is passed in through pool_data. + * Note: the slab cache may not have a ctor function. + */ +void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data); +void mempool_free_slab(void *element, void *pool_data); + +static inline int +mempool_init_slab_pool(mempool_t *pool, int min_nr, struct kmem_cache *kc) +{ + return mempool_init(pool, min_nr, mempool_alloc_slab, + mempool_free_slab, (void *) kc); +} + +static inline mempool_t * +mempool_create_slab_pool(int min_nr, struct kmem_cache *kc) +{ + return mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab, + (void *) kc); +} + +/* + * a mempool_alloc_t and a mempool_free_t to kmalloc and kfree the + * amount of memory specified by pool_data + */ +void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data); +void mempool_kfree(void *element, void *pool_data); + +static inline int mempool_init_kmalloc_pool(mempool_t *pool, int min_nr, size_t size) +{ + return mempool_init(pool, min_nr, mempool_kmalloc, + mempool_kfree, (void *) size); +} + +static inline mempool_t *mempool_create_kmalloc_pool(int min_nr, size_t size) +{ + return mempool_create(min_nr, mempool_kmalloc, mempool_kfree, + (void *) size); +} + +/* + * A mempool_alloc_t and mempool_free_t for a simple page allocator that + * allocates pages of the order specified by pool_data + */ +void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data); +void mempool_free_pages(void *element, void *pool_data); + +static inline int mempool_init_page_pool(mempool_t *pool, int min_nr, int order) +{ + return mempool_init(pool, min_nr, mempool_alloc_pages, + mempool_free_pages, (void *)(long)order); +} + +static inline mempool_t *mempool_create_page_pool(int min_nr, int order) +{ + return mempool_create(min_nr, mempool_alloc_pages, mempool_free_pages, + (void *)(long)order); +} + +#endif /* _LINUX_MEMPOOL_H */ diff --git a/include/linux/memremap.h b/include/linux/memremap.h new file mode 100644 index 000000000..a84572cdc --- /dev/null +++ b/include/linux/memremap.h @@ -0,0 +1,169 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MEMREMAP_H_ +#define _LINUX_MEMREMAP_H_ +#include +#include + +#include + +struct resource; +struct device; + +/** + * struct vmem_altmap - pre-allocated storage for vmemmap_populate + * @base_pfn: base of the entire dev_pagemap mapping + * @reserve: pages mapped, but reserved for driver use (relative to @base) + * @free: free pages set aside in the mapping for memmap storage + * @align: pages reserved to meet allocation alignments + * @alloc: track pages consumed, private to vmemmap_populate() + */ +struct vmem_altmap { + const unsigned long base_pfn; + const unsigned long reserve; + unsigned long free; + unsigned long align; + unsigned long alloc; +}; + +/* + * Specialize ZONE_DEVICE memory into multiple types each having differents + * usage. + * + * MEMORY_DEVICE_PRIVATE: + * Device memory that is not directly addressable by the CPU: CPU can neither + * read nor write private memory. In this case, we do still have struct pages + * backing the device memory. Doing so simplifies the implementation, but it is + * important to remember that there are certain points at which the struct page + * must be treated as an opaque object, rather than a "normal" struct page. + * + * A more complete discussion of unaddressable memory may be found in + * include/linux/hmm.h and Documentation/vm/hmm.rst. + * + * MEMORY_DEVICE_PUBLIC: + * Device memory that is cache coherent from device and CPU point of view. This + * is use on platform that have an advance system bus (like CAPI or CCIX). A + * driver can hotplug the device memory using ZONE_DEVICE and with that memory + * type. Any page of a process can be migrated to such memory. However no one + * should be allow to pin such memory so that it can always be evicted. + * + * MEMORY_DEVICE_FS_DAX: + * Host memory that has similar access semantics as System RAM i.e. DMA + * coherent and supports page pinning. In support of coordinating page + * pinning vs other operations MEMORY_DEVICE_FS_DAX arranges for a + * wakeup event whenever a page is unpinned and becomes idle. This + * wakeup is used to coordinate physical address space management (ex: + * fs truncate/hole punch) vs pinned pages (ex: device dma). + */ +enum memory_type { + MEMORY_DEVICE_PRIVATE = 1, + MEMORY_DEVICE_PUBLIC, + MEMORY_DEVICE_FS_DAX, +}; + +/* + * For MEMORY_DEVICE_PRIVATE we use ZONE_DEVICE and extend it with two + * callbacks: + * page_fault() + * page_free() + * + * Additional notes about MEMORY_DEVICE_PRIVATE may be found in + * include/linux/hmm.h and Documentation/vm/hmm.rst. There is also a brief + * explanation in include/linux/memory_hotplug.h. + * + * The page_fault() callback must migrate page back, from device memory to + * system memory, so that the CPU can access it. This might fail for various + * reasons (device issues, device have been unplugged, ...). When such error + * conditions happen, the page_fault() callback must return VM_FAULT_SIGBUS and + * set the CPU page table entry to "poisoned". + * + * Note that because memory cgroup charges are transferred to the device memory, + * this should never fail due to memory restrictions. However, allocation + * of a regular system page might still fail because we are out of memory. If + * that happens, the page_fault() callback must return VM_FAULT_OOM. + * + * The page_fault() callback can also try to migrate back multiple pages in one + * chunk, as an optimization. It must, however, prioritize the faulting address + * over all the others. + * + * + * The page_free() callback is called once the page refcount reaches 1 + * (ZONE_DEVICE pages never reach 0 refcount unless there is a refcount bug. + * This allows the device driver to implement its own memory management.) + * + * For MEMORY_DEVICE_PUBLIC only the page_free() callback matter. + */ +typedef int (*dev_page_fault_t)(struct vm_area_struct *vma, + unsigned long addr, + const struct page *page, + unsigned int flags, + pmd_t *pmdp); +typedef void (*dev_page_free_t)(struct page *page, void *data); + +/** + * struct dev_pagemap - metadata for ZONE_DEVICE mappings + * @page_fault: callback when CPU fault on an unaddressable device page + * @page_free: free page callback when page refcount reaches 1 + * @altmap: pre-allocated/reserved memory for vmemmap allocations + * @res: physical address range covered by @ref + * @ref: reference count that pins the devm_memremap_pages() mapping + * @kill: callback to transition @ref to the dead state + * @dev: host device of the mapping for debug + * @data: private data pointer for page_free() + * @type: memory type: see MEMORY_* in memory_hotplug.h + */ +struct dev_pagemap { + dev_page_fault_t page_fault; + dev_page_free_t page_free; + struct vmem_altmap altmap; + bool altmap_valid; + struct resource res; + struct percpu_ref *ref; + void (*kill)(struct percpu_ref *ref); + struct device *dev; + void *data; + enum memory_type type; +}; + +#ifdef CONFIG_ZONE_DEVICE +void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap); +struct dev_pagemap *get_dev_pagemap(unsigned long pfn, + struct dev_pagemap *pgmap); + +unsigned long vmem_altmap_offset(struct vmem_altmap *altmap); +void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns); +#else +static inline void *devm_memremap_pages(struct device *dev, + struct dev_pagemap *pgmap) +{ + /* + * Fail attempts to call devm_memremap_pages() without + * ZONE_DEVICE support enabled, this requires callers to fall + * back to plain devm_memremap() based on config + */ + WARN_ON_ONCE(1); + return ERR_PTR(-ENXIO); +} + +static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn, + struct dev_pagemap *pgmap) +{ + return NULL; +} + +static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) +{ + return 0; +} + +static inline void vmem_altmap_free(struct vmem_altmap *altmap, + unsigned long nr_pfns) +{ +} +#endif /* CONFIG_ZONE_DEVICE */ + +static inline void put_dev_pagemap(struct dev_pagemap *pgmap) +{ + if (pgmap) + percpu_ref_put(pgmap->ref); +} +#endif /* _LINUX_MEMREMAP_H_ */ diff --git a/include/linux/memstick.h b/include/linux/memstick.h new file mode 100644 index 000000000..690c35a9d --- /dev/null +++ b/include/linux/memstick.h @@ -0,0 +1,347 @@ +/* + * Sony MemoryStick support + * + * Copyright (C) 2007 Alex Dubov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef _MEMSTICK_H +#define _MEMSTICK_H + +#include +#include +#include + +/*** Hardware based structures ***/ + +struct ms_status_register { + unsigned char reserved; + unsigned char interrupt; +#define MEMSTICK_INT_CMDNAK 0x01 +#define MEMSTICK_INT_IOREQ 0x08 +#define MEMSTICK_INT_IOBREQ 0x10 +#define MEMSTICK_INT_BREQ 0x20 +#define MEMSTICK_INT_ERR 0x40 +#define MEMSTICK_INT_CED 0x80 + + unsigned char status0; +#define MEMSTICK_STATUS0_WP 0x01 +#define MEMSTICK_STATUS0_SL 0x02 +#define MEMSTICK_STATUS0_BF 0x10 +#define MEMSTICK_STATUS0_BE 0x20 +#define MEMSTICK_STATUS0_FB0 0x40 +#define MEMSTICK_STATUS0_MB 0x80 + + unsigned char status1; +#define MEMSTICK_STATUS1_UCFG 0x01 +#define MEMSTICK_STATUS1_FGER 0x02 +#define MEMSTICK_STATUS1_UCEX 0x04 +#define MEMSTICK_STATUS1_EXER 0x08 +#define MEMSTICK_STATUS1_UCDT 0x10 +#define MEMSTICK_STATUS1_DTER 0x20 +#define MEMSTICK_STATUS1_FB1 0x40 +#define MEMSTICK_STATUS1_MB 0x80 +} __attribute__((packed)); + +struct ms_id_register { + unsigned char type; + unsigned char if_mode; + unsigned char category; + unsigned char class; +} __attribute__((packed)); + +struct ms_param_register { + unsigned char system; +#define MEMSTICK_SYS_PAM 0x08 +#define MEMSTICK_SYS_BAMD 0x80 + + unsigned char block_address_msb; + unsigned short block_address; + unsigned char cp; +#define MEMSTICK_CP_BLOCK 0x00 +#define MEMSTICK_CP_PAGE 0x20 +#define MEMSTICK_CP_EXTRA 0x40 +#define MEMSTICK_CP_OVERWRITE 0x80 + + unsigned char page_address; +} __attribute__((packed)); + +struct ms_extra_data_register { + unsigned char overwrite_flag; +#define MEMSTICK_OVERWRITE_UDST 0x10 +#define MEMSTICK_OVERWRITE_PGST1 0x20 +#define MEMSTICK_OVERWRITE_PGST0 0x40 +#define MEMSTICK_OVERWRITE_BKST 0x80 + + unsigned char management_flag; +#define MEMSTICK_MANAGEMENT_SYSFLG 0x04 +#define MEMSTICK_MANAGEMENT_ATFLG 0x08 +#define MEMSTICK_MANAGEMENT_SCMS1 0x10 +#define MEMSTICK_MANAGEMENT_SCMS0 0x20 + + unsigned short logical_address; +} __attribute__((packed)); + +struct ms_register { + struct ms_status_register status; + struct ms_id_register id; + unsigned char reserved[8]; + struct ms_param_register param; + struct ms_extra_data_register extra_data; +} __attribute__((packed)); + +struct mspro_param_register { + unsigned char system; +#define MEMSTICK_SYS_PAR4 0x00 +#define MEMSTICK_SYS_PAR8 0x40 +#define MEMSTICK_SYS_SERIAL 0x80 + + __be16 data_count; + __be32 data_address; + unsigned char tpc_param; +} __attribute__((packed)); + +struct mspro_io_info_register { + unsigned char version; + unsigned char io_category; + unsigned char current_req; + unsigned char card_opt_info; + unsigned char rdy_wait_time; +} __attribute__((packed)); + +struct mspro_io_func_register { + unsigned char func_enable; + unsigned char func_select; + unsigned char func_intmask; + unsigned char transfer_mode; +} __attribute__((packed)); + +struct mspro_io_cmd_register { + unsigned short tpc_param; + unsigned short data_count; + unsigned int data_address; +} __attribute__((packed)); + +struct mspro_register { + struct ms_status_register status; + struct ms_id_register id; + unsigned char reserved0[8]; + struct mspro_param_register param; + unsigned char reserved1[8]; + struct mspro_io_info_register io_info; + struct mspro_io_func_register io_func; + unsigned char reserved2[7]; + struct mspro_io_cmd_register io_cmd; + unsigned char io_int; + unsigned char io_int_func; +} __attribute__((packed)); + +struct ms_register_addr { + unsigned char r_offset; + unsigned char r_length; + unsigned char w_offset; + unsigned char w_length; +} __attribute__((packed)); + +enum memstick_tpc { + MS_TPC_READ_MG_STATUS = 0x01, + MS_TPC_READ_LONG_DATA = 0x02, + MS_TPC_READ_SHORT_DATA = 0x03, + MS_TPC_READ_MG_DATA = 0x03, + MS_TPC_READ_REG = 0x04, + MS_TPC_READ_QUAD_DATA = 0x05, + MS_TPC_READ_IO_DATA = 0x05, + MS_TPC_GET_INT = 0x07, + MS_TPC_SET_RW_REG_ADRS = 0x08, + MS_TPC_EX_SET_CMD = 0x09, + MS_TPC_WRITE_QUAD_DATA = 0x0a, + MS_TPC_WRITE_IO_DATA = 0x0a, + MS_TPC_WRITE_REG = 0x0b, + MS_TPC_WRITE_SHORT_DATA = 0x0c, + MS_TPC_WRITE_MG_DATA = 0x0c, + MS_TPC_WRITE_LONG_DATA = 0x0d, + MS_TPC_SET_CMD = 0x0e +}; + +enum memstick_command { + MS_CMD_BLOCK_END = 0x33, + MS_CMD_RESET = 0x3c, + MS_CMD_BLOCK_WRITE = 0x55, + MS_CMD_SLEEP = 0x5a, + MS_CMD_BLOCK_ERASE = 0x99, + MS_CMD_BLOCK_READ = 0xaa, + MS_CMD_CLEAR_BUF = 0xc3, + MS_CMD_FLASH_STOP = 0xcc, + MS_CMD_LOAD_ID = 0x60, + MS_CMD_CMP_ICV = 0x7f, + MSPRO_CMD_FORMAT = 0x10, + MSPRO_CMD_SLEEP = 0x11, + MSPRO_CMD_WAKEUP = 0x12, + MSPRO_CMD_READ_DATA = 0x20, + MSPRO_CMD_WRITE_DATA = 0x21, + MSPRO_CMD_READ_ATRB = 0x24, + MSPRO_CMD_STOP = 0x25, + MSPRO_CMD_ERASE = 0x26, + MSPRO_CMD_READ_QUAD = 0x27, + MSPRO_CMD_WRITE_QUAD = 0x28, + MSPRO_CMD_SET_IBD = 0x46, + MSPRO_CMD_GET_IBD = 0x47, + MSPRO_CMD_IN_IO_DATA = 0xb0, + MSPRO_CMD_OUT_IO_DATA = 0xb1, + MSPRO_CMD_READ_IO_ATRB = 0xb2, + MSPRO_CMD_IN_IO_FIFO = 0xb3, + MSPRO_CMD_OUT_IO_FIFO = 0xb4, + MSPRO_CMD_IN_IOM = 0xb5, + MSPRO_CMD_OUT_IOM = 0xb6, +}; + +/*** Driver structures and functions ***/ + +enum memstick_param { MEMSTICK_POWER = 1, MEMSTICK_INTERFACE }; + +#define MEMSTICK_POWER_OFF 0 +#define MEMSTICK_POWER_ON 1 + +#define MEMSTICK_SERIAL 0 +#define MEMSTICK_PAR4 1 +#define MEMSTICK_PAR8 2 + +struct memstick_host; +struct memstick_driver; + +struct memstick_device_id { + unsigned char match_flags; +#define MEMSTICK_MATCH_ALL 0x01 + + unsigned char type; +#define MEMSTICK_TYPE_LEGACY 0xff +#define MEMSTICK_TYPE_DUO 0x00 +#define MEMSTICK_TYPE_PRO 0x01 + + unsigned char category; +#define MEMSTICK_CATEGORY_STORAGE 0xff +#define MEMSTICK_CATEGORY_STORAGE_DUO 0x00 +#define MEMSTICK_CATEGORY_IO 0x01 +#define MEMSTICK_CATEGORY_IO_PRO 0x10 + + unsigned char class; +#define MEMSTICK_CLASS_FLASH 0xff +#define MEMSTICK_CLASS_DUO 0x00 +#define MEMSTICK_CLASS_ROM 0x01 +#define MEMSTICK_CLASS_RO 0x02 +#define MEMSTICK_CLASS_WP 0x03 +}; + +struct memstick_request { + unsigned char tpc; + unsigned char data_dir:1, + need_card_int:1, + long_data:1; + unsigned char int_reg; + int error; + union { + struct scatterlist sg; + struct { + unsigned char data_len; + unsigned char data[15]; + }; + }; +}; + +struct memstick_dev { + struct memstick_device_id id; + struct memstick_host *host; + struct ms_register_addr reg_addr; + struct completion mrq_complete; + struct memstick_request current_mrq; + + /* Check that media driver is still willing to operate the device. */ + int (*check)(struct memstick_dev *card); + /* Get next request from the media driver. */ + int (*next_request)(struct memstick_dev *card, + struct memstick_request **mrq); + /* Tell the media driver to stop doing things */ + void (*stop)(struct memstick_dev *card); + /* Allow the media driver to continue */ + void (*start)(struct memstick_dev *card); + + struct device dev; +}; + +struct memstick_host { + struct mutex lock; + unsigned int id; + unsigned int caps; +#define MEMSTICK_CAP_AUTO_GET_INT 1 +#define MEMSTICK_CAP_PAR4 2 +#define MEMSTICK_CAP_PAR8 4 + + struct work_struct media_checker; + struct device dev; + + struct memstick_dev *card; + unsigned int retries; + + /* Notify the host that some requests are pending. */ + void (*request)(struct memstick_host *host); + /* Set host IO parameters (power, clock, etc). */ + int (*set_param)(struct memstick_host *host, + enum memstick_param param, + int value); + unsigned long private[0] ____cacheline_aligned; +}; + +struct memstick_driver { + struct memstick_device_id *id_table; + int (*probe)(struct memstick_dev *card); + void (*remove)(struct memstick_dev *card); + int (*suspend)(struct memstick_dev *card, + pm_message_t state); + int (*resume)(struct memstick_dev *card); + + struct device_driver driver; +}; + +int memstick_register_driver(struct memstick_driver *drv); +void memstick_unregister_driver(struct memstick_driver *drv); + +struct memstick_host *memstick_alloc_host(unsigned int extra, + struct device *dev); + +int memstick_add_host(struct memstick_host *host); +void memstick_remove_host(struct memstick_host *host); +void memstick_free_host(struct memstick_host *host); +void memstick_detect_change(struct memstick_host *host); +void memstick_suspend_host(struct memstick_host *host); +void memstick_resume_host(struct memstick_host *host); + +void memstick_init_req_sg(struct memstick_request *mrq, unsigned char tpc, + const struct scatterlist *sg); +void memstick_init_req(struct memstick_request *mrq, unsigned char tpc, + const void *buf, size_t length); +int memstick_next_req(struct memstick_host *host, + struct memstick_request **mrq); +void memstick_new_req(struct memstick_host *host); + +int memstick_set_rw_addr(struct memstick_dev *card); + +static inline void *memstick_priv(struct memstick_host *host) +{ + return (void *)host->private; +} + +static inline void *memstick_get_drvdata(struct memstick_dev *card) +{ + return dev_get_drvdata(&card->dev); +} + +static inline void memstick_set_drvdata(struct memstick_dev *card, void *data) +{ + dev_set_drvdata(&card->dev, data); +} + +#endif diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h new file mode 100644 index 000000000..c118a7ec9 --- /dev/null +++ b/include/linux/mfd/88pm80x.h @@ -0,0 +1,373 @@ +/* + * Marvell 88PM80x Interface + * + * Copyright (C) 2012 Marvell International Ltd. + * Qiao Zhou + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_MFD_88PM80X_H +#define __LINUX_MFD_88PM80X_H + +#include +#include +#include +#include + +enum { + CHIP_INVALID = 0, + CHIP_PM800, + CHIP_PM805, + CHIP_PM860, + CHIP_MAX, +}; + +enum { + PM800_ID_BUCK1 = 0, + PM800_ID_BUCK2, + PM800_ID_BUCK3, + PM800_ID_BUCK4, + PM800_ID_BUCK5, + + PM800_ID_LDO1, + PM800_ID_LDO2, + PM800_ID_LDO3, + PM800_ID_LDO4, + PM800_ID_LDO5, + PM800_ID_LDO6, + PM800_ID_LDO7, + PM800_ID_LDO8, + PM800_ID_LDO9, + PM800_ID_LDO10, + PM800_ID_LDO11, + PM800_ID_LDO12, + PM800_ID_LDO13, + PM800_ID_LDO14, + PM800_ID_LDO15, + PM800_ID_LDO16, + PM800_ID_LDO17, + PM800_ID_LDO18, + PM800_ID_LDO19, + + PM800_ID_RG_MAX, +}; +#define PM800_MAX_REGULATOR PM800_ID_RG_MAX /* 5 Bucks, 19 LDOs */ +#define PM800_NUM_BUCK (5) /*5 Bucks */ +#define PM800_NUM_LDO (19) /*19 Bucks */ + +/* page 0 basic: slave adder 0x60 */ + +#define PM800_STATUS_1 (0x01) +#define PM800_ONKEY_STS1 BIT(0) +#define PM800_EXTON_STS1 BIT(1) +#define PM800_CHG_STS1 BIT(2) +#define PM800_BAT_STS1 BIT(3) +#define PM800_VBUS_STS1 BIT(4) +#define PM800_LDO_PGOOD_STS1 BIT(5) +#define PM800_BUCK_PGOOD_STS1 BIT(6) + +#define PM800_STATUS_2 (0x02) +#define PM800_RTC_ALARM_STS2 BIT(0) + +/* Wakeup Registers */ +#define PM800_WAKEUP1 (0x0D) + +#define PM800_WAKEUP2 (0x0E) +#define PM800_WAKEUP2_INV_INT BIT(0) +#define PM800_WAKEUP2_INT_CLEAR BIT(1) +#define PM800_WAKEUP2_INT_MASK BIT(2) + +#define PM800_POWER_UP_LOG (0x10) + +/* Referance and low power registers */ +#define PM800_LOW_POWER1 (0x20) +#define PM800_LOW_POWER2 (0x21) +#define PM800_LOW_POWER_CONFIG3 (0x22) +#define PM800_LOW_POWER_CONFIG4 (0x23) + +/* GPIO register */ +#define PM800_GPIO_0_1_CNTRL (0x30) +#define PM800_GPIO0_VAL BIT(0) +#define PM800_GPIO0_GPIO_MODE(x) (x << 1) +#define PM800_GPIO1_VAL BIT(4) +#define PM800_GPIO1_GPIO_MODE(x) (x << 5) + +#define PM800_GPIO_2_3_CNTRL (0x31) +#define PM800_GPIO2_VAL BIT(0) +#define PM800_GPIO2_GPIO_MODE(x) (x << 1) +#define PM800_GPIO3_VAL BIT(4) +#define PM800_GPIO3_GPIO_MODE(x) (x << 5) +#define PM800_GPIO3_MODE_MASK 0x1F +#define PM800_GPIO3_HEADSET_MODE PM800_GPIO3_GPIO_MODE(6) + +#define PM800_GPIO_4_CNTRL (0x32) +#define PM800_GPIO4_VAL BIT(0) +#define PM800_GPIO4_GPIO_MODE(x) (x << 1) + +#define PM800_HEADSET_CNTRL (0x38) +#define PM800_HEADSET_DET_EN BIT(7) +#define PM800_HSDET_SLP BIT(1) +/* PWM register */ +#define PM800_PWM1 (0x40) +#define PM800_PWM2 (0x41) +#define PM800_PWM3 (0x42) +#define PM800_PWM4 (0x43) + +/* RTC Registers */ +#define PM800_RTC_CONTROL (0xD0) +#define PM800_RTC_MISC1 (0xE1) +#define PM800_RTC_MISC2 (0xE2) +#define PM800_RTC_MISC3 (0xE3) +#define PM800_RTC_MISC4 (0xE4) +#define PM800_RTC_MISC5 (0xE7) +/* bit definitions of RTC Register 1 (0xD0) */ +#define PM800_ALARM1_EN BIT(0) +#define PM800_ALARM_WAKEUP BIT(4) +#define PM800_ALARM BIT(5) +#define PM800_RTC1_USE_XO BIT(7) + +/* Regulator Control Registers: BUCK1,BUCK5,LDO1 have DVC */ + +/* buck registers */ +#define PM800_SLEEP_BUCK1 (0x30) + +/* BUCK Sleep Mode Register 1: BUCK[1..4] */ +#define PM800_BUCK_SLP1 (0x5A) +#define PM800_BUCK1_SLP1_SHIFT 0 +#define PM800_BUCK1_SLP1_MASK (0x3 << PM800_BUCK1_SLP1_SHIFT) + +/* page 2 GPADC: slave adder 0x02 */ +#define PM800_GPADC_MEAS_EN1 (0x01) +#define PM800_MEAS_EN1_VBAT BIT(2) +#define PM800_GPADC_MEAS_EN2 (0x02) +#define PM800_MEAS_EN2_RFTMP BIT(0) +#define PM800_MEAS_GP0_EN BIT(2) +#define PM800_MEAS_GP1_EN BIT(3) +#define PM800_MEAS_GP2_EN BIT(4) +#define PM800_MEAS_GP3_EN BIT(5) +#define PM800_MEAS_GP4_EN BIT(6) + +#define PM800_GPADC_MISC_CONFIG1 (0x05) +#define PM800_GPADC_MISC_CONFIG2 (0x06) +#define PM800_GPADC_MISC_GPFSM_EN BIT(0) +#define PM800_GPADC_SLOW_MODE(x) (x << 3) + +#define PM800_GPADC_MISC_CONFIG3 (0x09) +#define PM800_GPADC_MISC_CONFIG4 (0x0A) + +#define PM800_GPADC_PREBIAS1 (0x0F) +#define PM800_GPADC0_GP_PREBIAS_TIME(x) (x << 0) +#define PM800_GPADC_PREBIAS2 (0x10) + +#define PM800_GP_BIAS_ENA1 (0x14) +#define PM800_GPADC_GP_BIAS_EN0 BIT(0) +#define PM800_GPADC_GP_BIAS_EN1 BIT(1) +#define PM800_GPADC_GP_BIAS_EN2 BIT(2) +#define PM800_GPADC_GP_BIAS_EN3 BIT(3) + +#define PM800_GP_BIAS_OUT1 (0x15) +#define PM800_BIAS_OUT_GP0 BIT(0) +#define PM800_BIAS_OUT_GP1 BIT(1) +#define PM800_BIAS_OUT_GP2 BIT(2) +#define PM800_BIAS_OUT_GP3 BIT(3) + +#define PM800_GPADC0_LOW_TH 0x20 +#define PM800_GPADC1_LOW_TH 0x21 +#define PM800_GPADC2_LOW_TH 0x22 +#define PM800_GPADC3_LOW_TH 0x23 +#define PM800_GPADC4_LOW_TH 0x24 + +#define PM800_GPADC0_UPP_TH 0x30 +#define PM800_GPADC1_UPP_TH 0x31 +#define PM800_GPADC2_UPP_TH 0x32 +#define PM800_GPADC3_UPP_TH 0x33 +#define PM800_GPADC4_UPP_TH 0x34 + +#define PM800_VBBAT_MEAS1 0x40 +#define PM800_VBBAT_MEAS2 0x41 +#define PM800_VBAT_MEAS1 0x42 +#define PM800_VBAT_MEAS2 0x43 +#define PM800_VSYS_MEAS1 0x44 +#define PM800_VSYS_MEAS2 0x45 +#define PM800_VCHG_MEAS1 0x46 +#define PM800_VCHG_MEAS2 0x47 +#define PM800_TINT_MEAS1 0x50 +#define PM800_TINT_MEAS2 0x51 +#define PM800_PMOD_MEAS1 0x52 +#define PM800_PMOD_MEAS2 0x53 + +#define PM800_GPADC0_MEAS1 0x54 +#define PM800_GPADC0_MEAS2 0x55 +#define PM800_GPADC1_MEAS1 0x56 +#define PM800_GPADC1_MEAS2 0x57 +#define PM800_GPADC2_MEAS1 0x58 +#define PM800_GPADC2_MEAS2 0x59 +#define PM800_GPADC3_MEAS1 0x5A +#define PM800_GPADC3_MEAS2 0x5B +#define PM800_GPADC4_MEAS1 0x5C +#define PM800_GPADC4_MEAS2 0x5D + +#define PM800_GPADC4_AVG1 0xA8 +#define PM800_GPADC4_AVG2 0xA9 + +/* 88PM805 Registers */ +#define PM805_MAIN_POWERUP (0x01) +#define PM805_INT_STATUS0 (0x02) /* for ena/dis all interrupts */ + +#define PM805_STATUS0_INT_CLEAR (1 << 0) +#define PM805_STATUS0_INV_INT (1 << 1) +#define PM800_STATUS0_INT_MASK (1 << 2) + +#define PM805_INT_STATUS1 (0x03) + +#define PM805_INT1_HP1_SHRT BIT(0) +#define PM805_INT1_HP2_SHRT BIT(1) +#define PM805_INT1_MIC_CONFLICT BIT(2) +#define PM805_INT1_CLIP_FAULT BIT(3) +#define PM805_INT1_LDO_OFF BIT(4) +#define PM805_INT1_SRC_DPLL_LOCK BIT(5) + +#define PM805_INT_STATUS2 (0x04) + +#define PM805_INT2_MIC_DET BIT(0) +#define PM805_INT2_SHRT_BTN_DET BIT(1) +#define PM805_INT2_VOLM_BTN_DET BIT(2) +#define PM805_INT2_VOLP_BTN_DET BIT(3) +#define PM805_INT2_RAW_PLL_FAULT BIT(4) +#define PM805_INT2_FINE_PLL_FAULT BIT(5) + +#define PM805_INT_MASK1 (0x05) +#define PM805_INT_MASK2 (0x06) +#define PM805_SHRT_BTN_DET BIT(1) + +/* number of status and int reg in a row */ +#define PM805_INT_REG_NUM (2) + +#define PM805_MIC_DET1 (0x07) +#define PM805_MIC_DET_EN_MIC_DET BIT(0) +#define PM805_MIC_DET2 (0x08) +#define PM805_MIC_DET_STATUS1 (0x09) + +#define PM805_MIC_DET_STATUS3 (0x0A) +#define PM805_AUTO_SEQ_STATUS1 (0x0B) +#define PM805_AUTO_SEQ_STATUS2 (0x0C) + +#define PM805_ADC_SETTING1 (0x10) +#define PM805_ADC_SETTING2 (0x11) +#define PM805_ADC_SETTING3 (0x11) +#define PM805_ADC_GAIN1 (0x12) +#define PM805_ADC_GAIN2 (0x13) +#define PM805_DMIC_SETTING (0x15) +#define PM805_DWS_SETTING (0x16) +#define PM805_MIC_CONFLICT_STS (0x17) + +#define PM805_PDM_SETTING1 (0x20) +#define PM805_PDM_SETTING2 (0x21) +#define PM805_PDM_SETTING3 (0x22) +#define PM805_PDM_CONTROL1 (0x23) +#define PM805_PDM_CONTROL2 (0x24) +#define PM805_PDM_CONTROL3 (0x25) + +#define PM805_HEADPHONE_SETTING (0x26) +#define PM805_HEADPHONE_GAIN_A2A (0x27) +#define PM805_HEADPHONE_SHORT_STATE (0x28) +#define PM805_EARPHONE_SETTING (0x29) +#define PM805_AUTO_SEQ_SETTING (0x2A) + +struct pm80x_rtc_pdata { + int vrtc; + int rtc_wakeup; +}; + +struct pm80x_subchip { + struct i2c_client *power_page; /* chip client for power page */ + struct i2c_client *gpadc_page; /* chip client for gpadc page */ + struct regmap *regmap_power; + struct regmap *regmap_gpadc; + unsigned short power_page_addr; /* power page I2C address */ + unsigned short gpadc_page_addr; /* gpadc page I2C address */ +}; + +struct pm80x_chip { + struct pm80x_subchip *subchip; + struct device *dev; + struct i2c_client *client; + struct i2c_client *companion; + struct regmap *regmap; + struct regmap_irq_chip *regmap_irq_chip; + struct regmap_irq_chip_data *irq_data; + int type; + int irq; + int irq_mode; + unsigned long wu_flag; + spinlock_t lock; +}; + +struct pm80x_platform_data { + struct pm80x_rtc_pdata *rtc; + /* + * For the regulator not defined, set regulators[not_defined] to be + * NULL. num_regulators are the number of regulators supposed to be + * initialized. If all regulators are not defined, set num_regulators + * to be 0. + */ + struct regulator_init_data *regulators[PM800_ID_RG_MAX]; + unsigned int num_regulators; + int irq_mode; /* Clear interrupt by read/write(0/1) */ + int batt_det; /* enable/disable */ + int (*plat_config)(struct pm80x_chip *chip, + struct pm80x_platform_data *pdata); +}; + +extern const struct dev_pm_ops pm80x_pm_ops; +extern const struct regmap_config pm80x_regmap_config; + +static inline int pm80x_request_irq(struct pm80x_chip *pm80x, int irq, + irq_handler_t handler, unsigned long flags, + const char *name, void *data) +{ + if (!pm80x->irq_data) + return -EINVAL; + return request_threaded_irq(regmap_irq_get_virq(pm80x->irq_data, irq), + NULL, handler, flags, name, data); +} + +static inline void pm80x_free_irq(struct pm80x_chip *pm80x, int irq, void *data) +{ + if (!pm80x->irq_data) + return; + free_irq(regmap_irq_get_virq(pm80x->irq_data, irq), data); +} + +#ifdef CONFIG_PM +static inline int pm80x_dev_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent); + int irq = platform_get_irq(pdev, 0); + + if (device_may_wakeup(dev)) + set_bit(irq, &chip->wu_flag); + + return 0; +} + +static inline int pm80x_dev_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent); + int irq = platform_get_irq(pdev, 0); + + if (device_may_wakeup(dev)) + clear_bit(irq, &chip->wu_flag); + + return 0; +} +#endif + +extern int pm80x_init(struct i2c_client *client); +extern int pm80x_deinit(void); +#endif /* __LINUX_MFD_88PM80X_H */ diff --git a/include/linux/mfd/88pm860x.h b/include/linux/mfd/88pm860x.h new file mode 100644 index 000000000..cd9753020 --- /dev/null +++ b/include/linux/mfd/88pm860x.h @@ -0,0 +1,487 @@ +/* + * Marvell 88PM860x Interface + * + * Copyright (C) 2009 Marvell International Ltd. + * Haojian Zhuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_MFD_88PM860X_H +#define __LINUX_MFD_88PM860X_H + +#include + +#define MFD_NAME_SIZE (40) + +enum { + CHIP_INVALID = 0, + CHIP_PM8606, + CHIP_PM8607, + CHIP_MAX, +}; + +enum { + PM8606_ID_INVALID, + PM8606_ID_BACKLIGHT, + PM8606_ID_LED, + PM8606_ID_VIBRATOR, + PM8606_ID_TOUCH, + PM8606_ID_SOUND, + PM8606_ID_CHARGER, + PM8606_ID_MAX, +}; + + +/* 8606 Registers */ +#define PM8606_DCM_BOOST (0x00) +#define PM8606_PWM (0x01) + +#define PM8607_MISC2 (0x42) + +/* Power Up Log Register */ +#define PM8607_POWER_UP_LOG (0x3F) + +/* Charger Control Registers */ +#define PM8607_CCNT (0x47) +#define PM8607_CHG_CTRL1 (0x48) +#define PM8607_CHG_CTRL2 (0x49) +#define PM8607_CHG_CTRL3 (0x4A) +#define PM8607_CHG_CTRL4 (0x4B) +#define PM8607_CHG_CTRL5 (0x4C) +#define PM8607_CHG_CTRL6 (0x4D) +#define PM8607_CHG_CTRL7 (0x4E) + +/* Backlight Registers */ +#define PM8606_WLED1A (0x02) +#define PM8606_WLED1B (0x03) +#define PM8606_WLED2A (0x04) +#define PM8606_WLED2B (0x05) +#define PM8606_WLED3A (0x06) +#define PM8606_WLED3B (0x07) + +/* LED Registers */ +#define PM8606_RGB2A (0x08) +#define PM8606_RGB2B (0x09) +#define PM8606_RGB2C (0x0A) +#define PM8606_RGB2D (0x0B) +#define PM8606_RGB1A (0x0C) +#define PM8606_RGB1B (0x0D) +#define PM8606_RGB1C (0x0E) +#define PM8606_RGB1D (0x0F) + +#define PM8606_PREREGULATORA (0x10) +#define PM8606_PREREGULATORB (0x11) +#define PM8606_VIBRATORA (0x12) +#define PM8606_VIBRATORB (0x13) +#define PM8606_VCHG (0x14) +#define PM8606_VSYS (0x15) +#define PM8606_MISC (0x16) +#define PM8606_CHIP_ID (0x17) +#define PM8606_STATUS (0x18) +#define PM8606_FLAGS (0x19) +#define PM8606_PROTECTA (0x1A) +#define PM8606_PROTECTB (0x1B) +#define PM8606_PROTECTC (0x1C) + +/* Bit definitions of PM8606 registers */ +#define PM8606_DCM_500MA (0x0) /* current limit */ +#define PM8606_DCM_750MA (0x1) +#define PM8606_DCM_1000MA (0x2) +#define PM8606_DCM_1250MA (0x3) +#define PM8606_DCM_250MV (0x0 << 2) +#define PM8606_DCM_300MV (0x1 << 2) +#define PM8606_DCM_350MV (0x2 << 2) +#define PM8606_DCM_400MV (0x3 << 2) + +#define PM8606_PWM_31200HZ (0x0) +#define PM8606_PWM_15600HZ (0x1) +#define PM8606_PWM_7800HZ (0x2) +#define PM8606_PWM_3900HZ (0x3) +#define PM8606_PWM_1950HZ (0x4) +#define PM8606_PWM_976HZ (0x5) +#define PM8606_PWM_488HZ (0x6) +#define PM8606_PWM_244HZ (0x7) +#define PM8606_PWM_FREQ_MASK (0x7) + +#define PM8606_WLED_ON (1 << 0) +#define PM8606_WLED_CURRENT(x) ((x & 0x1F) << 1) + +#define PM8606_LED_CURRENT(x) (((x >> 2) & 0x07) << 5) + +#define PM8606_VSYS_EN (1 << 1) + +#define PM8606_MISC_OSC_EN (1 << 4) + +enum { + PM8607_ID_BUCK1 = 0, + PM8607_ID_BUCK2, + PM8607_ID_BUCK3, + + PM8607_ID_LDO1, + PM8607_ID_LDO2, + PM8607_ID_LDO3, + PM8607_ID_LDO4, + PM8607_ID_LDO5, + PM8607_ID_LDO6, + PM8607_ID_LDO7, + PM8607_ID_LDO8, + PM8607_ID_LDO9, + PM8607_ID_LDO10, + PM8607_ID_LDO11, + PM8607_ID_LDO12, + PM8607_ID_LDO13, + PM8607_ID_LDO14, + PM8607_ID_LDO15, + PM8606_ID_PREG, + + PM8607_ID_RG_MAX, +}; + +/* 8607 chip ID is 0x40 or 0x50 */ +#define PM8607_VERSION_MASK (0xF0) /* 8607 chip ID mask */ + +/* Interrupt Registers */ +#define PM8607_STATUS_1 (0x01) +#define PM8607_STATUS_2 (0x02) +#define PM8607_INT_STATUS1 (0x03) +#define PM8607_INT_STATUS2 (0x04) +#define PM8607_INT_STATUS3 (0x05) +#define PM8607_INT_MASK_1 (0x06) +#define PM8607_INT_MASK_2 (0x07) +#define PM8607_INT_MASK_3 (0x08) + +/* Regulator Control Registers */ +#define PM8607_LDO1 (0x10) +#define PM8607_LDO2 (0x11) +#define PM8607_LDO3 (0x12) +#define PM8607_LDO4 (0x13) +#define PM8607_LDO5 (0x14) +#define PM8607_LDO6 (0x15) +#define PM8607_LDO7 (0x16) +#define PM8607_LDO8 (0x17) +#define PM8607_LDO9 (0x18) +#define PM8607_LDO10 (0x19) +#define PM8607_LDO12 (0x1A) +#define PM8607_LDO14 (0x1B) +#define PM8607_SLEEP_MODE1 (0x1C) +#define PM8607_SLEEP_MODE2 (0x1D) +#define PM8607_SLEEP_MODE3 (0x1E) +#define PM8607_SLEEP_MODE4 (0x1F) +#define PM8607_GO (0x20) +#define PM8607_SLEEP_BUCK1 (0x21) +#define PM8607_SLEEP_BUCK2 (0x22) +#define PM8607_SLEEP_BUCK3 (0x23) +#define PM8607_BUCK1 (0x24) +#define PM8607_BUCK2 (0x25) +#define PM8607_BUCK3 (0x26) +#define PM8607_BUCK_CONTROLS (0x27) +#define PM8607_SUPPLIES_EN11 (0x2B) +#define PM8607_SUPPLIES_EN12 (0x2C) +#define PM8607_GROUP1 (0x2D) +#define PM8607_GROUP2 (0x2E) +#define PM8607_GROUP3 (0x2F) +#define PM8607_GROUP4 (0x30) +#define PM8607_GROUP5 (0x31) +#define PM8607_GROUP6 (0x32) +#define PM8607_SUPPLIES_EN21 (0x33) +#define PM8607_SUPPLIES_EN22 (0x34) + +/* Vibrator Control Registers */ +#define PM8607_VIBRATOR_SET (0x28) +#define PM8607_VIBRATOR_PWM (0x29) + +/* GPADC Registers */ +#define PM8607_GP_BIAS1 (0x4F) +#define PM8607_MEAS_EN1 (0x50) +#define PM8607_MEAS_EN2 (0x51) +#define PM8607_MEAS_EN3 (0x52) +#define PM8607_MEAS_OFF_TIME1 (0x53) +#define PM8607_MEAS_OFF_TIME2 (0x54) +#define PM8607_TSI_PREBIAS (0x55) /* prebias time */ +#define PM8607_PD_PREBIAS (0x56) /* prebias time */ +#define PM8607_GPADC_MISC1 (0x57) + +/* bit definitions of MEAS_EN1*/ +#define PM8607_MEAS_EN1_VBAT (1 << 0) +#define PM8607_MEAS_EN1_VCHG (1 << 1) +#define PM8607_MEAS_EN1_VSYS (1 << 2) +#define PM8607_MEAS_EN1_TINT (1 << 3) +#define PM8607_MEAS_EN1_RFTMP (1 << 4) +#define PM8607_MEAS_EN1_TBAT (1 << 5) +#define PM8607_MEAS_EN1_GPADC2 (1 << 6) +#define PM8607_MEAS_EN1_GPADC3 (1 << 7) + +/* Battery Monitor Registers */ +#define PM8607_GP_BIAS2 (0x5A) +#define PM8607_VBAT_LOWTH (0x5B) +#define PM8607_VCHG_LOWTH (0x5C) +#define PM8607_VSYS_LOWTH (0x5D) +#define PM8607_TINT_LOWTH (0x5E) +#define PM8607_GPADC0_LOWTH (0x5F) +#define PM8607_GPADC1_LOWTH (0x60) +#define PM8607_GPADC2_LOWTH (0x61) +#define PM8607_GPADC3_LOWTH (0x62) +#define PM8607_VBAT_HIGHTH (0x63) +#define PM8607_VCHG_HIGHTH (0x64) +#define PM8607_VSYS_HIGHTH (0x65) +#define PM8607_TINT_HIGHTH (0x66) +#define PM8607_GPADC0_HIGHTH (0x67) +#define PM8607_GPADC1_HIGHTH (0x68) +#define PM8607_GPADC2_HIGHTH (0x69) +#define PM8607_GPADC3_HIGHTH (0x6A) +#define PM8607_IBAT_MEAS1 (0x6B) +#define PM8607_IBAT_MEAS2 (0x6C) +#define PM8607_VBAT_MEAS1 (0x6D) +#define PM8607_VBAT_MEAS2 (0x6E) +#define PM8607_VCHG_MEAS1 (0x6F) +#define PM8607_VCHG_MEAS2 (0x70) +#define PM8607_VSYS_MEAS1 (0x71) +#define PM8607_VSYS_MEAS2 (0x72) +#define PM8607_TINT_MEAS1 (0x73) +#define PM8607_TINT_MEAS2 (0x74) +#define PM8607_GPADC0_MEAS1 (0x75) +#define PM8607_GPADC0_MEAS2 (0x76) +#define PM8607_GPADC1_MEAS1 (0x77) +#define PM8607_GPADC1_MEAS2 (0x78) +#define PM8607_GPADC2_MEAS1 (0x79) +#define PM8607_GPADC2_MEAS2 (0x7A) +#define PM8607_GPADC3_MEAS1 (0x7B) +#define PM8607_GPADC3_MEAS2 (0x7C) +#define PM8607_CCNT_MEAS1 (0x95) +#define PM8607_CCNT_MEAS2 (0x96) +#define PM8607_VBAT_AVG (0x97) +#define PM8607_VCHG_AVG (0x98) +#define PM8607_VSYS_AVG (0x99) +#define PM8607_VBAT_MIN (0x9A) +#define PM8607_VCHG_MIN (0x9B) +#define PM8607_VSYS_MIN (0x9C) +#define PM8607_VBAT_MAX (0x9D) +#define PM8607_VCHG_MAX (0x9E) +#define PM8607_VSYS_MAX (0x9F) + +#define PM8607_GPADC_MISC2 (0x59) +#define PM8607_GPADC0_GP_BIAS_A0 (1 << 0) +#define PM8607_GPADC1_GP_BIAS_A1 (1 << 1) +#define PM8607_GPADC2_GP_BIAS_A2 (1 << 2) +#define PM8607_GPADC3_GP_BIAS_A3 (1 << 3) +#define PM8607_GPADC2_GP_BIAS_OUT2 (1 << 6) + +/* RTC Control Registers */ +#define PM8607_RTC1 (0xA0) +#define PM8607_RTC_COUNTER1 (0xA1) +#define PM8607_RTC_COUNTER2 (0xA2) +#define PM8607_RTC_COUNTER3 (0xA3) +#define PM8607_RTC_COUNTER4 (0xA4) +#define PM8607_RTC_EXPIRE1 (0xA5) +#define PM8607_RTC_EXPIRE2 (0xA6) +#define PM8607_RTC_EXPIRE3 (0xA7) +#define PM8607_RTC_EXPIRE4 (0xA8) +#define PM8607_RTC_TRIM1 (0xA9) +#define PM8607_RTC_TRIM2 (0xAA) +#define PM8607_RTC_TRIM3 (0xAB) +#define PM8607_RTC_TRIM4 (0xAC) +#define PM8607_RTC_MISC1 (0xAD) +#define PM8607_RTC_MISC2 (0xAE) +#define PM8607_RTC_MISC3 (0xAF) + +/* Misc Registers */ +#define PM8607_CHIP_ID (0x00) +#define PM8607_B0_MISC1 (0x0C) +#define PM8607_LDO1 (0x10) +#define PM8607_DVC3 (0x26) +#define PM8607_A1_MISC1 (0x40) + +/* bit definitions of Status Query Interface */ +#define PM8607_STATUS_CC (1 << 3) +#define PM8607_STATUS_PEN (1 << 4) +#define PM8607_STATUS_HEADSET (1 << 5) +#define PM8607_STATUS_HOOK (1 << 6) +#define PM8607_STATUS_MICIN (1 << 7) +#define PM8607_STATUS_ONKEY (1 << 8) +#define PM8607_STATUS_EXTON (1 << 9) +#define PM8607_STATUS_CHG (1 << 10) +#define PM8607_STATUS_BAT (1 << 11) +#define PM8607_STATUS_VBUS (1 << 12) +#define PM8607_STATUS_OV (1 << 13) + +/* bit definitions of BUCK3 */ +#define PM8607_BUCK3_DOUBLE (1 << 6) + +/* bit definitions of Misc1 */ +#define PM8607_A1_MISC1_PI2C (1 << 0) +#define PM8607_B0_MISC1_INV_INT (1 << 0) +#define PM8607_B0_MISC1_INT_CLEAR (1 << 1) +#define PM8607_B0_MISC1_INT_MASK (1 << 2) +#define PM8607_B0_MISC1_PI2C (1 << 3) +#define PM8607_B0_MISC1_RESET (1 << 6) + +/* bits definitions of GPADC */ +#define PM8607_GPADC_EN (1 << 0) +#define PM8607_GPADC_PREBIAS_MASK (3 << 1) +#define PM8607_GPADC_SLOT_CYCLE_MASK (3 << 3) /* slow mode */ +#define PM8607_GPADC_OFF_SCALE_MASK (3 << 5) /* GP sleep mode */ +#define PM8607_GPADC_SW_CAL_MASK (1 << 7) + +#define PM8607_PD_PREBIAS_MASK (0x1F << 0) +#define PM8607_PD_PRECHG_MASK (7 << 5) + +#define PM8606_REF_GP_OSC_OFF 0 +#define PM8606_REF_GP_OSC_ON 1 +#define PM8606_REF_GP_OSC_UNKNOWN 2 + +/* Clients of reference group and 8MHz oscillator in 88PM8606 */ +enum pm8606_ref_gp_and_osc_clients { + REF_GP_NO_CLIENTS = 0, + WLED1_DUTY = (1<<0), /*PF 0x02.7:0*/ + WLED2_DUTY = (1<<1), /*PF 0x04.7:0*/ + WLED3_DUTY = (1<<2), /*PF 0x06.7:0*/ + RGB1_ENABLE = (1<<3), /*PF 0x07.1*/ + RGB2_ENABLE = (1<<4), /*PF 0x07.2*/ + LDO_VBR_EN = (1<<5), /*PF 0x12.0*/ + REF_GP_MAX_CLIENT = 0xFFFF +}; + +/* Interrupt Number in 88PM8607 */ +enum { + PM8607_IRQ_ONKEY, + PM8607_IRQ_EXTON, + PM8607_IRQ_CHG, + PM8607_IRQ_BAT, + PM8607_IRQ_RTC, + PM8607_IRQ_CC, + PM8607_IRQ_VBAT, + PM8607_IRQ_VCHG, + PM8607_IRQ_VSYS, + PM8607_IRQ_TINT, + PM8607_IRQ_GPADC0, + PM8607_IRQ_GPADC1, + PM8607_IRQ_GPADC2, + PM8607_IRQ_GPADC3, + PM8607_IRQ_AUDIO_SHORT, + PM8607_IRQ_PEN, + PM8607_IRQ_HEADSET, + PM8607_IRQ_HOOK, + PM8607_IRQ_MICIN, + PM8607_IRQ_CHG_FAIL, + PM8607_IRQ_CHG_DONE, + PM8607_IRQ_CHG_FAULT, +}; + +enum { + PM8607_CHIP_A0 = 0x40, + PM8607_CHIP_A1 = 0x41, + PM8607_CHIP_B0 = 0x48, +}; + +struct pm860x_chip { + struct device *dev; + struct mutex irq_lock; + struct mutex osc_lock; + struct i2c_client *client; + struct i2c_client *companion; /* companion chip client */ + struct regmap *regmap; + struct regmap *regmap_companion; + + int buck3_double; /* DVC ramp slope double */ + int companion_addr; + unsigned short osc_vote; + int id; + int irq_mode; + int irq_base; + int core_irq; + unsigned char chip_version; + unsigned char osc_status; + + unsigned int wakeup_flag; +}; + +enum { + GI2C_PORT = 0, + PI2C_PORT, +}; + +struct pm860x_backlight_pdata { + int pwm; + int iset; +}; + +struct pm860x_led_pdata { + int iset; +}; + +struct pm860x_rtc_pdata { + int (*sync)(unsigned int ticks); + int vrtc; +}; + +struct pm860x_touch_pdata { + int gpadc_prebias; + int slot_cycle; + int off_scale; + int sw_cal; + int tsi_prebias; /* time, slot */ + int pen_prebias; /* time, slot */ + int pen_prechg; /* time, slot */ + int res_x; /* resistor of Xplate */ + unsigned long flags; +}; + +struct pm860x_power_pdata { + int max_capacity; + int resistor; +}; + +struct pm860x_platform_data { + struct pm860x_backlight_pdata *backlight; + struct pm860x_led_pdata *led; + struct pm860x_rtc_pdata *rtc; + struct pm860x_touch_pdata *touch; + struct pm860x_power_pdata *power; + struct regulator_init_data *buck1; + struct regulator_init_data *buck2; + struct regulator_init_data *buck3; + struct regulator_init_data *ldo1; + struct regulator_init_data *ldo2; + struct regulator_init_data *ldo3; + struct regulator_init_data *ldo4; + struct regulator_init_data *ldo5; + struct regulator_init_data *ldo6; + struct regulator_init_data *ldo7; + struct regulator_init_data *ldo8; + struct regulator_init_data *ldo9; + struct regulator_init_data *ldo10; + struct regulator_init_data *ldo12; + struct regulator_init_data *ldo_vibrator; + struct regulator_init_data *ldo14; + struct charger_desc *chg_desc; + + int companion_addr; /* I2C address of companion chip */ + int i2c_port; /* Controlled by GI2C or PI2C */ + int irq_mode; /* Clear interrupt by read/write(0/1) */ + int irq_base; /* IRQ base number of 88pm860x */ + int num_leds; + int num_backlights; +}; + +extern int pm8606_osc_enable(struct pm860x_chip *, unsigned short); +extern int pm8606_osc_disable(struct pm860x_chip *, unsigned short); + +extern int pm860x_reg_read(struct i2c_client *, int); +extern int pm860x_reg_write(struct i2c_client *, int, unsigned char); +extern int pm860x_bulk_read(struct i2c_client *, int, int, unsigned char *); +extern int pm860x_bulk_write(struct i2c_client *, int, int, unsigned char *); +extern int pm860x_set_bits(struct i2c_client *, int, unsigned char, + unsigned char); +extern int pm860x_page_reg_read(struct i2c_client *, int); +extern int pm860x_page_reg_write(struct i2c_client *, int, unsigned char); +extern int pm860x_page_bulk_read(struct i2c_client *, int, int, + unsigned char *); +extern int pm860x_page_bulk_write(struct i2c_client *, int, int, + unsigned char *); +extern int pm860x_page_set_bits(struct i2c_client *, int, unsigned char, + unsigned char); + +#endif /* __LINUX_MFD_88PM860X_H */ diff --git a/include/linux/mfd/aat2870.h b/include/linux/mfd/aat2870.h new file mode 100644 index 000000000..f7316c29b --- /dev/null +++ b/include/linux/mfd/aat2870.h @@ -0,0 +1,181 @@ +/* + * linux/include/linux/mfd/aat2870.h + * + * Copyright (c) 2011, NVIDIA Corporation. + * Author: Jin Park + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef __LINUX_MFD_AAT2870_H +#define __LINUX_MFD_AAT2870_H + +#include +#include + +/* Register offsets */ +#define AAT2870_BL_CH_EN 0x00 +#define AAT2870_BLM 0x01 +#define AAT2870_BLS 0x02 +#define AAT2870_BL1 0x03 +#define AAT2870_BL2 0x04 +#define AAT2870_BL3 0x05 +#define AAT2870_BL4 0x06 +#define AAT2870_BL5 0x07 +#define AAT2870_BL6 0x08 +#define AAT2870_BL7 0x09 +#define AAT2870_BL8 0x0A +#define AAT2870_FLR 0x0B +#define AAT2870_FM 0x0C +#define AAT2870_FS 0x0D +#define AAT2870_ALS_CFG0 0x0E +#define AAT2870_ALS_CFG1 0x0F +#define AAT2870_ALS_CFG2 0x10 +#define AAT2870_AMB 0x11 +#define AAT2870_ALS0 0x12 +#define AAT2870_ALS1 0x13 +#define AAT2870_ALS2 0x14 +#define AAT2870_ALS3 0x15 +#define AAT2870_ALS4 0x16 +#define AAT2870_ALS5 0x17 +#define AAT2870_ALS6 0x18 +#define AAT2870_ALS7 0x19 +#define AAT2870_ALS8 0x1A +#define AAT2870_ALS9 0x1B +#define AAT2870_ALSA 0x1C +#define AAT2870_ALSB 0x1D +#define AAT2870_ALSC 0x1E +#define AAT2870_ALSD 0x1F +#define AAT2870_ALSE 0x20 +#define AAT2870_ALSF 0x21 +#define AAT2870_SUB_SET 0x22 +#define AAT2870_SUB_CTRL 0x23 +#define AAT2870_LDO_AB 0x24 +#define AAT2870_LDO_CD 0x25 +#define AAT2870_LDO_EN 0x26 +#define AAT2870_REG_NUM 0x27 + +/* Device IDs */ +enum aat2870_id { + AAT2870_ID_BL, + AAT2870_ID_LDOA, + AAT2870_ID_LDOB, + AAT2870_ID_LDOC, + AAT2870_ID_LDOD +}; + +/* Backlight channels */ +#define AAT2870_BL_CH1 0x01 +#define AAT2870_BL_CH2 0x02 +#define AAT2870_BL_CH3 0x04 +#define AAT2870_BL_CH4 0x08 +#define AAT2870_BL_CH5 0x10 +#define AAT2870_BL_CH6 0x20 +#define AAT2870_BL_CH7 0x40 +#define AAT2870_BL_CH8 0x80 +#define AAT2870_BL_CH_ALL 0xFF + +/* Backlight current magnitude (mA) */ +enum aat2870_current { + AAT2870_CURRENT_0_45 = 1, + AAT2870_CURRENT_0_90, + AAT2870_CURRENT_1_80, + AAT2870_CURRENT_2_70, + AAT2870_CURRENT_3_60, + AAT2870_CURRENT_4_50, + AAT2870_CURRENT_5_40, + AAT2870_CURRENT_6_30, + AAT2870_CURRENT_7_20, + AAT2870_CURRENT_8_10, + AAT2870_CURRENT_9_00, + AAT2870_CURRENT_9_90, + AAT2870_CURRENT_10_8, + AAT2870_CURRENT_11_7, + AAT2870_CURRENT_12_6, + AAT2870_CURRENT_13_5, + AAT2870_CURRENT_14_4, + AAT2870_CURRENT_15_3, + AAT2870_CURRENT_16_2, + AAT2870_CURRENT_17_1, + AAT2870_CURRENT_18_0, + AAT2870_CURRENT_18_9, + AAT2870_CURRENT_19_8, + AAT2870_CURRENT_20_7, + AAT2870_CURRENT_21_6, + AAT2870_CURRENT_22_5, + AAT2870_CURRENT_23_4, + AAT2870_CURRENT_24_3, + AAT2870_CURRENT_25_2, + AAT2870_CURRENT_26_1, + AAT2870_CURRENT_27_0, + AAT2870_CURRENT_27_9 +}; + +struct aat2870_register { + bool readable; + bool writeable; + u8 value; +}; + +struct aat2870_data { + struct device *dev; + struct i2c_client *client; + + struct mutex io_lock; + struct aat2870_register *reg_cache; /* register cache */ + int en_pin; /* enable GPIO pin (if < 0, ignore this value) */ + bool is_enable; + + /* init and uninit for platform specified */ + int (*init)(struct aat2870_data *aat2870); + void (*uninit)(struct aat2870_data *aat2870); + + /* i2c io funcntions */ + int (*read)(struct aat2870_data *aat2870, u8 addr, u8 *val); + int (*write)(struct aat2870_data *aat2870, u8 addr, u8 val); + int (*update)(struct aat2870_data *aat2870, u8 addr, u8 mask, u8 val); + + /* for debugfs */ + struct dentry *dentry_root; + struct dentry *dentry_reg; +}; + +struct aat2870_subdev_info { + int id; + const char *name; + void *platform_data; +}; + +struct aat2870_platform_data { + int en_pin; /* enable GPIO pin (if < 0, ignore this value) */ + + struct aat2870_subdev_info *subdevs; + int num_subdevs; + + /* init and uninit for platform specified */ + int (*init)(struct aat2870_data *aat2870); + void (*uninit)(struct aat2870_data *aat2870); +}; + +struct aat2870_bl_platform_data { + /* backlight channels, default is AAT2870_BL_CH_ALL */ + int channels; + /* backlight current magnitude, default is AAT2870_CURRENT_27_9 */ + int max_current; + /* maximum brightness, default is 255 */ + int max_brightness; +}; + +#endif /* __LINUX_MFD_AAT2870_H */ diff --git a/include/linux/mfd/ab3100.h b/include/linux/mfd/ab3100.h new file mode 100644 index 000000000..afd3080bd --- /dev/null +++ b/include/linux/mfd/ab3100.h @@ -0,0 +1,129 @@ +/* + * Copyright (C) 2007-2009 ST-Ericsson AB + * License terms: GNU General Public License (GPL) version 2 + * AB3100 core access functions + * Author: Linus Walleij + * + */ + +#include + +struct device; + +#ifndef MFD_AB3100_H +#define MFD_AB3100_H + + +#define AB3100_P1A 0xc0 +#define AB3100_P1B 0xc1 +#define AB3100_P1C 0xc2 +#define AB3100_P1D 0xc3 +#define AB3100_P1E 0xc4 +#define AB3100_P1F 0xc5 +#define AB3100_P1G 0xc6 +#define AB3100_R2A 0xc7 +#define AB3100_R2B 0xc8 + +/* + * AB3100, EVENTA1, A2 and A3 event register flags + * these are catenated into a single 32-bit flag in the code + * for event notification broadcasts. + */ +#define AB3100_EVENTA1_ONSWA (0x01<<16) +#define AB3100_EVENTA1_ONSWB (0x02<<16) +#define AB3100_EVENTA1_ONSWC (0x04<<16) +#define AB3100_EVENTA1_DCIO (0x08<<16) +#define AB3100_EVENTA1_OVER_TEMP (0x10<<16) +#define AB3100_EVENTA1_SIM_OFF (0x20<<16) +#define AB3100_EVENTA1_VBUS (0x40<<16) +#define AB3100_EVENTA1_VSET_USB (0x80<<16) + +#define AB3100_EVENTA2_READY_TX (0x01<<8) +#define AB3100_EVENTA2_READY_RX (0x02<<8) +#define AB3100_EVENTA2_OVERRUN_ERROR (0x04<<8) +#define AB3100_EVENTA2_FRAMING_ERROR (0x08<<8) +#define AB3100_EVENTA2_CHARG_OVERCURRENT (0x10<<8) +#define AB3100_EVENTA2_MIDR (0x20<<8) +#define AB3100_EVENTA2_BATTERY_REM (0x40<<8) +#define AB3100_EVENTA2_ALARM (0x80<<8) + +#define AB3100_EVENTA3_ADC_TRIG5 (0x01) +#define AB3100_EVENTA3_ADC_TRIG4 (0x02) +#define AB3100_EVENTA3_ADC_TRIG3 (0x04) +#define AB3100_EVENTA3_ADC_TRIG2 (0x08) +#define AB3100_EVENTA3_ADC_TRIGVBAT (0x10) +#define AB3100_EVENTA3_ADC_TRIGVTX (0x20) +#define AB3100_EVENTA3_ADC_TRIG1 (0x40) +#define AB3100_EVENTA3_ADC_TRIG0 (0x80) + +/* AB3100, STR register flags */ +#define AB3100_STR_ONSWA (0x01) +#define AB3100_STR_ONSWB (0x02) +#define AB3100_STR_ONSWC (0x04) +#define AB3100_STR_DCIO (0x08) +#define AB3100_STR_BOOT_MODE (0x10) +#define AB3100_STR_SIM_OFF (0x20) +#define AB3100_STR_BATT_REMOVAL (0x40) +#define AB3100_STR_VBUS (0x80) + +/* + * AB3100 contains 8 regulators, one external regulator controller + * and a buck converter, further the LDO E and buck converter can + * have separate settings if they are in sleep mode, this is + * modeled as a separate regulator. + */ +#define AB3100_NUM_REGULATORS 10 + +/** + * struct ab3100 + * @access_mutex: lock out concurrent accesses to the AB3100 registers + * @dev: pointer to the containing device + * @i2c_client: I2C client for this chip + * @testreg_client: secondary client for test registers + * @chip_name: name of this chip variant + * @chip_id: 8 bit chip ID for this chip variant + * @event_subscribers: event subscribers are listed here + * @startup_events: a copy of the first reading of the event registers + * @startup_events_read: whether the first events have been read + * + * This struct is PRIVATE and devices using it should NOT + * access ANY fields. It is used as a token for calling the + * AB3100 functions. + */ +struct ab3100 { + struct mutex access_mutex; + struct device *dev; + struct i2c_client *i2c_client; + struct i2c_client *testreg_client; + char chip_name[32]; + u8 chip_id; + struct blocking_notifier_head event_subscribers; + u8 startup_events[3]; + bool startup_events_read; +}; + +/** + * struct ab3100_platform_data + * Data supplied to initialize board connections to the AB3100 + * @reg_constraints: regulator constraints for target board + * the order of these constraints are: LDO A, C, D, E, + * F, G, H, K, EXT and BUCK. + * @reg_initvals: initial values for the regulator registers + * plus two sleep settings for LDO E and the BUCK converter. + * exactly AB3100_NUM_REGULATORS+2 values must be sent in. + * Order: LDO A, C, E, E sleep, F, G, H, K, EXT, BUCK, + * BUCK sleep, LDO D. (LDO D need to be initialized last.) + * @external_voltage: voltage level of the external regulator. + */ +struct ab3100_platform_data { + struct regulator_init_data reg_constraints[AB3100_NUM_REGULATORS]; + u8 reg_initvals[AB3100_NUM_REGULATORS+2]; + int external_voltage; +}; + +int ab3100_event_register(struct ab3100 *ab3100, + struct notifier_block *nb); +int ab3100_event_unregister(struct ab3100 *ab3100, + struct notifier_block *nb); + +#endif /* MFD_AB3100_H */ diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h new file mode 100644 index 000000000..aa0941475 --- /dev/null +++ b/include/linux/mfd/abx500.h @@ -0,0 +1,347 @@ +/* + * Copyright (C) 2007-2009 ST-Ericsson AB + * License terms: GNU General Public License (GPL) version 2 + * + * ABX500 core access functions. + * The abx500 interface is used for the Analog Baseband chips. + * + * Author: Mattias Wallin + * Author: Mattias Nilsson + * Author: Bengt Jonsson + * Author: Rickard Andersson + */ + +#include + +struct device; + +#ifndef MFD_ABX500_H +#define MFD_ABX500_H + +/** + * struct abx500_init_setting + * Initial value of the registers for driver to use during setup. + */ +struct abx500_init_settings { + u8 bank; + u8 reg; + u8 setting; +}; + +/* Battery driver related data */ +/* + * ADC for the battery thermistor. + * When using the ABx500_ADC_THERM_BATCTRL the battery ID resistor is combined + * with a NTC resistor to both identify the battery and to measure its + * temperature. Different phone manufactures uses different techniques to both + * identify the battery and to read its temperature. + */ +enum abx500_adc_therm { + ABx500_ADC_THERM_BATCTRL, + ABx500_ADC_THERM_BATTEMP, +}; + +/** + * struct abx500_res_to_temp - defines one point in a temp to res curve. To + * be used in battery packs that combines the identification resistor with a + * NTC resistor. + * @temp: battery pack temperature in Celsius + * @resist: NTC resistor net total resistance + */ +struct abx500_res_to_temp { + int temp; + int resist; +}; + +/** + * struct abx500_v_to_cap - Table for translating voltage to capacity + * @voltage: Voltage in mV + * @capacity: Capacity in percent + */ +struct abx500_v_to_cap { + int voltage; + int capacity; +}; + +/* Forward declaration */ +struct abx500_fg; + +/** + * struct abx500_fg_parameters - Fuel gauge algorithm parameters, in seconds + * if not specified + * @recovery_sleep_timer: Time between measurements while recovering + * @recovery_total_time: Total recovery time + * @init_timer: Measurement interval during startup + * @init_discard_time: Time we discard voltage measurement at startup + * @init_total_time: Total init time during startup + * @high_curr_time: Time current has to be high to go to recovery + * @accu_charging: FG accumulation time while charging + * @accu_high_curr: FG accumulation time in high current mode + * @high_curr_threshold: High current threshold, in mA + * @lowbat_threshold: Low battery threshold, in mV + * @overbat_threshold: Over battery threshold, in mV + * @battok_falling_th_sel0 Threshold in mV for battOk signal sel0 + * Resolution in 50 mV step. + * @battok_raising_th_sel1 Threshold in mV for battOk signal sel1 + * Resolution in 50 mV step. + * @user_cap_limit Capacity reported from user must be within this + * limit to be considered as sane, in percentage + * points. + * @maint_thres This is the threshold where we stop reporting + * battery full while in maintenance, in per cent + * @pcut_enable: Enable power cut feature in ab8505 + * @pcut_max_time: Max time threshold + * @pcut_flag_time: Flagtime threshold + * @pcut_max_restart: Max number of restarts + * @pcut_debounce_time: Sets battery debounce time + */ +struct abx500_fg_parameters { + int recovery_sleep_timer; + int recovery_total_time; + int init_timer; + int init_discard_time; + int init_total_time; + int high_curr_time; + int accu_charging; + int accu_high_curr; + int high_curr_threshold; + int lowbat_threshold; + int overbat_threshold; + int battok_falling_th_sel0; + int battok_raising_th_sel1; + int user_cap_limit; + int maint_thres; + bool pcut_enable; + u8 pcut_max_time; + u8 pcut_flag_time; + u8 pcut_max_restart; + u8 pcut_debounce_time; +}; + +/** + * struct abx500_charger_maximization - struct used by the board config. + * @use_maxi: Enable maximization for this battery type + * @maxi_chg_curr: Maximum charger current allowed + * @maxi_wait_cycles: cycles to wait before setting charger current + * @charger_curr_step delta between two charger current settings (mA) + */ +struct abx500_maxim_parameters { + bool ena_maxi; + int chg_curr; + int wait_cycles; + int charger_curr_step; +}; + +/** + * struct abx500_battery_type - different batteries supported + * @name: battery technology + * @resis_high: battery upper resistance limit + * @resis_low: battery lower resistance limit + * @charge_full_design: Maximum battery capacity in mAh + * @nominal_voltage: Nominal voltage of the battery in mV + * @termination_vol: max voltage upto which battery can be charged + * @termination_curr battery charging termination current in mA + * @recharge_cap battery capacity limit that will trigger a new + * full charging cycle in the case where maintenan- + * -ce charging has been disabled + * @normal_cur_lvl: charger current in normal state in mA + * @normal_vol_lvl: charger voltage in normal state in mV + * @maint_a_cur_lvl: charger current in maintenance A state in mA + * @maint_a_vol_lvl: charger voltage in maintenance A state in mV + * @maint_a_chg_timer_h: charge time in maintenance A state + * @maint_b_cur_lvl: charger current in maintenance B state in mA + * @maint_b_vol_lvl: charger voltage in maintenance B state in mV + * @maint_b_chg_timer_h: charge time in maintenance B state + * @low_high_cur_lvl: charger current in temp low/high state in mA + * @low_high_vol_lvl: charger voltage in temp low/high state in mV' + * @battery_resistance: battery inner resistance in mOhm. + * @n_r_t_tbl_elements: number of elements in r_to_t_tbl + * @r_to_t_tbl: table containing resistance to temp points + * @n_v_cap_tbl_elements: number of elements in v_to_cap_tbl + * @v_to_cap_tbl: Voltage to capacity (in %) table + * @n_batres_tbl_elements number of elements in the batres_tbl + * @batres_tbl battery internal resistance vs temperature table + */ +struct abx500_battery_type { + int name; + int resis_high; + int resis_low; + int charge_full_design; + int nominal_voltage; + int termination_vol; + int termination_curr; + int recharge_cap; + int normal_cur_lvl; + int normal_vol_lvl; + int maint_a_cur_lvl; + int maint_a_vol_lvl; + int maint_a_chg_timer_h; + int maint_b_cur_lvl; + int maint_b_vol_lvl; + int maint_b_chg_timer_h; + int low_high_cur_lvl; + int low_high_vol_lvl; + int battery_resistance; + int n_temp_tbl_elements; + const struct abx500_res_to_temp *r_to_t_tbl; + int n_v_cap_tbl_elements; + const struct abx500_v_to_cap *v_to_cap_tbl; + int n_batres_tbl_elements; + const struct batres_vs_temp *batres_tbl; +}; + +/** + * struct abx500_bm_capacity_levels - abx500 capacity level data + * @critical: critical capacity level in percent + * @low: low capacity level in percent + * @normal: normal capacity level in percent + * @high: high capacity level in percent + * @full: full capacity level in percent + */ +struct abx500_bm_capacity_levels { + int critical; + int low; + int normal; + int high; + int full; +}; + +/** + * struct abx500_bm_charger_parameters - Charger specific parameters + * @usb_volt_max: maximum allowed USB charger voltage in mV + * @usb_curr_max: maximum allowed USB charger current in mA + * @ac_volt_max: maximum allowed AC charger voltage in mV + * @ac_curr_max: maximum allowed AC charger current in mA + */ +struct abx500_bm_charger_parameters { + int usb_volt_max; + int usb_curr_max; + int ac_volt_max; + int ac_curr_max; +}; + +/** + * struct abx500_bm_data - abx500 battery management data + * @temp_under under this temp, charging is stopped + * @temp_low between this temp and temp_under charging is reduced + * @temp_high between this temp and temp_over charging is reduced + * @temp_over over this temp, charging is stopped + * @temp_now present battery temperature + * @temp_interval_chg temperature measurement interval in s when charging + * @temp_interval_nochg temperature measurement interval in s when not charging + * @main_safety_tmr_h safety timer for main charger + * @usb_safety_tmr_h safety timer for usb charger + * @bkup_bat_v voltage which we charge the backup battery with + * @bkup_bat_i current which we charge the backup battery with + * @no_maintenance indicates that maintenance charging is disabled + * @capacity_scaling indicates whether capacity scaling is to be used + * @abx500_adc_therm placement of thermistor, batctrl or battemp adc + * @chg_unknown_bat flag to enable charging of unknown batteries + * @enable_overshoot flag to enable VBAT overshoot control + * @auto_trig flag to enable auto adc trigger + * @fg_res resistance of FG resistor in 0.1mOhm + * @n_btypes number of elements in array bat_type + * @batt_id index of the identified battery in array bat_type + * @interval_charging charge alg cycle period time when charging (sec) + * @interval_not_charging charge alg cycle period time when not charging (sec) + * @temp_hysteresis temperature hysteresis + * @gnd_lift_resistance Battery ground to phone ground resistance (mOhm) + * @n_chg_out_curr number of elements in array chg_output_curr + * @n_chg_in_curr number of elements in array chg_input_curr + * @chg_output_curr charger output current level map + * @chg_input_curr charger input current level map + * @maxi maximization parameters + * @cap_levels capacity in percent for the different capacity levels + * @bat_type table of supported battery types + * @chg_params charger parameters + * @fg_params fuel gauge parameters + */ +struct abx500_bm_data { + int temp_under; + int temp_low; + int temp_high; + int temp_over; + int temp_now; + int temp_interval_chg; + int temp_interval_nochg; + int main_safety_tmr_h; + int usb_safety_tmr_h; + int bkup_bat_v; + int bkup_bat_i; + bool autopower_cfg; + bool ac_enabled; + bool usb_enabled; + bool no_maintenance; + bool capacity_scaling; + bool chg_unknown_bat; + bool enable_overshoot; + bool auto_trig; + enum abx500_adc_therm adc_therm; + int fg_res; + int n_btypes; + int batt_id; + int interval_charging; + int interval_not_charging; + int temp_hysteresis; + int gnd_lift_resistance; + int n_chg_out_curr; + int n_chg_in_curr; + int *chg_output_curr; + int *chg_input_curr; + const struct abx500_maxim_parameters *maxi; + const struct abx500_bm_capacity_levels *cap_levels; + struct abx500_battery_type *bat_type; + const struct abx500_bm_charger_parameters *chg_params; + const struct abx500_fg_parameters *fg_params; +}; + +enum { + NTC_EXTERNAL = 0, + NTC_INTERNAL, +}; + +int ab8500_bm_of_probe(struct device *dev, + struct device_node *np, + struct abx500_bm_data *bm); + +int abx500_set_register_interruptible(struct device *dev, u8 bank, u8 reg, + u8 value); +int abx500_get_register_interruptible(struct device *dev, u8 bank, u8 reg, + u8 *value); +int abx500_get_register_page_interruptible(struct device *dev, u8 bank, + u8 first_reg, u8 *regvals, u8 numregs); +int abx500_set_register_page_interruptible(struct device *dev, u8 bank, + u8 first_reg, u8 *regvals, u8 numregs); +/** + * abx500_mask_and_set_register_inerruptible() - Modifies selected bits of a + * target register + * + * @dev: The AB sub device. + * @bank: The i2c bank number. + * @bitmask: The bit mask to use. + * @bitvalues: The new bit values. + * + * Updates the value of an AB register: + * value -> ((value & ~bitmask) | (bitvalues & bitmask)) + */ +int abx500_mask_and_set_register_interruptible(struct device *dev, u8 bank, + u8 reg, u8 bitmask, u8 bitvalues); +int abx500_get_chip_id(struct device *dev); +int abx500_event_registers_startup_state_get(struct device *dev, u8 *event); +int abx500_startup_irq_enabled(struct device *dev, unsigned int irq); + +struct abx500_ops { + int (*get_chip_id) (struct device *); + int (*get_register) (struct device *, u8, u8, u8 *); + int (*set_register) (struct device *, u8, u8, u8); + int (*get_register_page) (struct device *, u8, u8, u8 *, u8); + int (*set_register_page) (struct device *, u8, u8, u8 *, u8); + int (*mask_and_set_register) (struct device *, u8, u8, u8, u8); + int (*event_registers_startup_state_get) (struct device *, u8 *); + int (*startup_irq_enabled) (struct device *, unsigned int); + void (*dump_all_banks) (struct device *); +}; + +int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops); +void abx500_remove_ops(struct device *dev); +#endif diff --git a/include/linux/mfd/abx500/ab8500-bm.h b/include/linux/mfd/abx500/ab8500-bm.h new file mode 100644 index 000000000..c06daf3d4 --- /dev/null +++ b/include/linux/mfd/abx500/ab8500-bm.h @@ -0,0 +1,476 @@ +/* + * Copyright ST-Ericsson 2012. + * + * Author: Arun Murthy + * Licensed under GPLv2. + */ + +#ifndef _AB8500_BM_H +#define _AB8500_BM_H + +#include +#include + +/* + * System control 2 register offsets. + * bank = 0x02 + */ +#define AB8500_MAIN_WDOG_CTRL_REG 0x01 +#define AB8500_LOW_BAT_REG 0x03 +#define AB8500_BATT_OK_REG 0x04 +/* + * USB/ULPI register offsets + * Bank : 0x5 + */ +#define AB8500_USB_LINE_STAT_REG 0x80 +#define AB8500_USB_LINE_CTRL2_REG 0x82 +#define AB8500_USB_LINK1_STAT_REG 0x94 + +/* + * Charger / status register offfsets + * Bank : 0x0B + */ +#define AB8500_CH_STATUS1_REG 0x00 +#define AB8500_CH_STATUS2_REG 0x01 +#define AB8500_CH_USBCH_STAT1_REG 0x02 +#define AB8500_CH_USBCH_STAT2_REG 0x03 +#define AB8540_CH_USBCH_STAT3_REG 0x04 +#define AB8500_CH_STAT_REG 0x05 + +/* + * Charger / control register offfsets + * Bank : 0x0B + */ +#define AB8500_CH_VOLT_LVL_REG 0x40 +#define AB8500_CH_VOLT_LVL_MAX_REG 0x41 /*Only in Cut2.0*/ +#define AB8500_CH_OPT_CRNTLVL_REG 0x42 +#define AB8500_CH_OPT_CRNTLVL_MAX_REG 0x43 /*Only in Cut2.0*/ +#define AB8500_CH_WD_TIMER_REG 0x50 +#define AB8500_CHARG_WD_CTRL 0x51 +#define AB8500_BTEMP_HIGH_TH 0x52 +#define AB8500_LED_INDICATOR_PWM_CTRL 0x53 +#define AB8500_LED_INDICATOR_PWM_DUTY 0x54 +#define AB8500_BATT_OVV 0x55 +#define AB8500_CHARGER_CTRL 0x56 +#define AB8500_BAT_CTRL_CURRENT_SOURCE 0x60 /*Only in Cut2.0*/ + +/* + * Charger / main control register offsets + * Bank : 0x0B + */ +#define AB8500_MCH_CTRL1 0x80 +#define AB8500_MCH_CTRL2 0x81 +#define AB8500_MCH_IPT_CURLVL_REG 0x82 +#define AB8500_CH_WD_REG 0x83 + +/* + * Charger / USB control register offsets + * Bank : 0x0B + */ +#define AB8500_USBCH_CTRL1_REG 0xC0 +#define AB8500_USBCH_CTRL2_REG 0xC1 +#define AB8500_USBCH_IPT_CRNTLVL_REG 0xC2 +#define AB8540_USB_PP_MODE_REG 0xC5 +#define AB8540_USB_PP_CHR_REG 0xC6 + +/* + * Gas Gauge register offsets + * Bank : 0x0C + */ +#define AB8500_GASG_CC_CTRL_REG 0x00 +#define AB8500_GASG_CC_ACCU1_REG 0x01 +#define AB8500_GASG_CC_ACCU2_REG 0x02 +#define AB8500_GASG_CC_ACCU3_REG 0x03 +#define AB8500_GASG_CC_ACCU4_REG 0x04 +#define AB8500_GASG_CC_SMPL_CNTRL_REG 0x05 +#define AB8500_GASG_CC_SMPL_CNTRH_REG 0x06 +#define AB8500_GASG_CC_SMPL_CNVL_REG 0x07 +#define AB8500_GASG_CC_SMPL_CNVH_REG 0x08 +#define AB8500_GASG_CC_CNTR_AVGOFF_REG 0x09 +#define AB8500_GASG_CC_OFFSET_REG 0x0A +#define AB8500_GASG_CC_NCOV_ACCU 0x10 +#define AB8500_GASG_CC_NCOV_ACCU_CTRL 0x11 +#define AB8500_GASG_CC_NCOV_ACCU_LOW 0x12 +#define AB8500_GASG_CC_NCOV_ACCU_MED 0x13 +#define AB8500_GASG_CC_NCOV_ACCU_HIGH 0x14 + +/* + * Interrupt register offsets + * Bank : 0x0E + */ +#define AB8500_IT_SOURCE2_REG 0x01 +#define AB8500_IT_SOURCE21_REG 0x14 + +/* + * RTC register offsets + * Bank: 0x0F + */ +#define AB8500_RTC_BACKUP_CHG_REG 0x0C +#define AB8500_RTC_CC_CONF_REG 0x01 +#define AB8500_RTC_CTRL_REG 0x0B +#define AB8500_RTC_CTRL1_REG 0x11 + +/* + * OTP register offsets + * Bank : 0x15 + */ +#define AB8500_OTP_CONF_15 0x0E + +/* GPADC constants from AB8500 spec, UM0836 */ +#define ADC_RESOLUTION 1024 +#define ADC_CH_MAIN_MIN 0 +#define ADC_CH_MAIN_MAX 20030 +#define ADC_CH_VBUS_MIN 0 +#define ADC_CH_VBUS_MAX 20030 +#define ADC_CH_VBAT_MIN 2300 +#define ADC_CH_VBAT_MAX 4800 +#define ADC_CH_BKBAT_MIN 0 +#define ADC_CH_BKBAT_MAX 3200 + +/* Main charge i/p current */ +#define MAIN_CH_IP_CUR_0P9A 0x80 +#define MAIN_CH_IP_CUR_1P0A 0x90 +#define MAIN_CH_IP_CUR_1P1A 0xA0 +#define MAIN_CH_IP_CUR_1P2A 0xB0 +#define MAIN_CH_IP_CUR_1P3A 0xC0 +#define MAIN_CH_IP_CUR_1P4A 0xD0 +#define MAIN_CH_IP_CUR_1P5A 0xE0 + +/* ChVoltLevel */ +#define CH_VOL_LVL_3P5 0x00 +#define CH_VOL_LVL_4P0 0x14 +#define CH_VOL_LVL_4P05 0x16 +#define CH_VOL_LVL_4P1 0x1B +#define CH_VOL_LVL_4P15 0x20 +#define CH_VOL_LVL_4P2 0x25 +#define CH_VOL_LVL_4P6 0x4D + +/* ChOutputCurrentLevel */ +#define CH_OP_CUR_LVL_0P1 0x00 +#define CH_OP_CUR_LVL_0P2 0x01 +#define CH_OP_CUR_LVL_0P3 0x02 +#define CH_OP_CUR_LVL_0P4 0x03 +#define CH_OP_CUR_LVL_0P5 0x04 +#define CH_OP_CUR_LVL_0P6 0x05 +#define CH_OP_CUR_LVL_0P7 0x06 +#define CH_OP_CUR_LVL_0P8 0x07 +#define CH_OP_CUR_LVL_0P9 0x08 +#define CH_OP_CUR_LVL_1P4 0x0D +#define CH_OP_CUR_LVL_1P5 0x0E +#define CH_OP_CUR_LVL_1P6 0x0F +#define CH_OP_CUR_LVL_2P 0x3F + +/* BTEMP High thermal limits */ +#define BTEMP_HIGH_TH_57_0 0x00 +#define BTEMP_HIGH_TH_52 0x01 +#define BTEMP_HIGH_TH_57_1 0x02 +#define BTEMP_HIGH_TH_62 0x03 + +/* current is mA */ +#define USB_0P1A 100 +#define USB_0P2A 200 +#define USB_0P3A 300 +#define USB_0P4A 400 +#define USB_0P5A 500 + +#define LOW_BAT_3P1V 0x20 +#define LOW_BAT_2P3V 0x00 +#define LOW_BAT_RESET 0x01 +#define LOW_BAT_ENABLE 0x01 + +/* Backup battery constants */ +#define BUP_ICH_SEL_50UA 0x00 +#define BUP_ICH_SEL_150UA 0x04 +#define BUP_ICH_SEL_300UA 0x08 +#define BUP_ICH_SEL_700UA 0x0C + +enum bup_vch_sel { + BUP_VCH_SEL_2P5V, + BUP_VCH_SEL_2P6V, + BUP_VCH_SEL_2P8V, + BUP_VCH_SEL_3P1V, + /* + * Note that the following 5 values 2.7v, 2.9v, 3.0v, 3.2v, 3.3v + * are only available on ab8540. You can't choose these 5 + * voltage on ab8500/ab8505/ab9540. + */ + BUP_VCH_SEL_2P7V, + BUP_VCH_SEL_2P9V, + BUP_VCH_SEL_3P0V, + BUP_VCH_SEL_3P2V, + BUP_VCH_SEL_3P3V, +}; + +#define BUP_VCH_RANGE 0x02 +#define VBUP33_VRTCN 0x01 + +/* Battery OVV constants */ +#define BATT_OVV_ENA 0x02 +#define BATT_OVV_TH_3P7 0x00 +#define BATT_OVV_TH_4P75 0x01 + +/* A value to indicate over voltage */ +#define BATT_OVV_VALUE 4750 + +/* VBUS OVV constants */ +#define VBUS_OVV_SELECT_MASK 0x78 +#define VBUS_OVV_SELECT_5P6V 0x00 +#define VBUS_OVV_SELECT_5P7V 0x08 +#define VBUS_OVV_SELECT_5P8V 0x10 +#define VBUS_OVV_SELECT_5P9V 0x18 +#define VBUS_OVV_SELECT_6P0V 0x20 +#define VBUS_OVV_SELECT_6P1V 0x28 +#define VBUS_OVV_SELECT_6P2V 0x30 +#define VBUS_OVV_SELECT_6P3V 0x38 + +#define VBUS_AUTO_IN_CURR_LIM_ENA 0x04 + +/* Fuel Gauge constants */ +#define RESET_ACCU 0x02 +#define READ_REQ 0x01 +#define CC_DEEP_SLEEP_ENA 0x02 +#define CC_PWR_UP_ENA 0x01 +#define CC_SAMPLES_40 0x28 +#define RD_NCONV_ACCU_REQ 0x01 +#define CC_CALIB 0x08 +#define CC_INTAVGOFFSET_ENA 0x10 +#define CC_MUXOFFSET 0x80 +#define CC_INT_CAL_N_AVG_MASK 0x60 +#define CC_INT_CAL_SAMPLES_16 0x40 +#define CC_INT_CAL_SAMPLES_8 0x20 +#define CC_INT_CAL_SAMPLES_4 0x00 + +/* RTC constants */ +#define RTC_BUP_CH_ENA 0x10 + +/* BatCtrl Current Source Constants */ +#define BAT_CTRL_7U_ENA 0x01 +#define BAT_CTRL_20U_ENA 0x02 +#define BAT_CTRL_18U_ENA 0x01 +#define BAT_CTRL_16U_ENA 0x02 +#define BAT_CTRL_CMP_ENA 0x04 +#define FORCE_BAT_CTRL_CMP_HIGH 0x08 +#define BAT_CTRL_PULL_UP_ENA 0x10 + +/* Battery type */ +#define BATTERY_UNKNOWN 00 + +/* Registers for pcut feature in ab8505 and ab9540 */ +#define AB8505_RTC_PCUT_CTL_STATUS_REG 0x12 +#define AB8505_RTC_PCUT_TIME_REG 0x13 +#define AB8505_RTC_PCUT_MAX_TIME_REG 0x14 +#define AB8505_RTC_PCUT_FLAG_TIME_REG 0x15 +#define AB8505_RTC_PCUT_RESTART_REG 0x16 +#define AB8505_RTC_PCUT_DEBOUNCE_REG 0x17 + +/* USB Power Path constants for ab8540 */ +#define BUS_VSYS_VOL_SELECT_MASK 0x06 +#define BUS_VSYS_VOL_SELECT_3P6V 0x00 +#define BUS_VSYS_VOL_SELECT_3P325V 0x02 +#define BUS_VSYS_VOL_SELECT_3P9V 0x04 +#define BUS_VSYS_VOL_SELECT_4P3V 0x06 +#define BUS_POWER_PATH_MODE_ENA 0x01 +#define BUS_PP_PRECHG_CURRENT_MASK 0x0E +#define BUS_POWER_PATH_PRECHG_ENA 0x01 + +/** + * struct res_to_temp - defines one point in a temp to res curve. To + * be used in battery packs that combines the identification resistor with a + * NTC resistor. + * @temp: battery pack temperature in Celsius + * @resist: NTC resistor net total resistance + */ +struct res_to_temp { + int temp; + int resist; +}; + +/** + * struct batres_vs_temp - defines one point in a temp vs battery internal + * resistance curve. + * @temp: battery pack temperature in Celsius + * @resist: battery internal reistance in mOhm + */ +struct batres_vs_temp { + int temp; + int resist; +}; + +/* Forward declaration */ +struct ab8500_fg; + +/** + * struct ab8500_fg_parameters - Fuel gauge algorithm parameters, in seconds + * if not specified + * @recovery_sleep_timer: Time between measurements while recovering + * @recovery_total_time: Total recovery time + * @init_timer: Measurement interval during startup + * @init_discard_time: Time we discard voltage measurement at startup + * @init_total_time: Total init time during startup + * @high_curr_time: Time current has to be high to go to recovery + * @accu_charging: FG accumulation time while charging + * @accu_high_curr: FG accumulation time in high current mode + * @high_curr_threshold: High current threshold, in mA + * @lowbat_threshold: Low battery threshold, in mV + * @battok_falling_th_sel0 Threshold in mV for battOk signal sel0 + * Resolution in 50 mV step. + * @battok_raising_th_sel1 Threshold in mV for battOk signal sel1 + * Resolution in 50 mV step. + * @user_cap_limit Capacity reported from user must be within this + * limit to be considered as sane, in percentage + * points. + * @maint_thres This is the threshold where we stop reporting + * battery full while in maintenance, in per cent + * @pcut_enable: Enable power cut feature in ab8505 + * @pcut_max_time: Max time threshold + * @pcut_flag_time: Flagtime threshold + * @pcut_max_restart: Max number of restarts + * @pcut_debunce_time: Sets battery debounce time + */ +struct ab8500_fg_parameters { + int recovery_sleep_timer; + int recovery_total_time; + int init_timer; + int init_discard_time; + int init_total_time; + int high_curr_time; + int accu_charging; + int accu_high_curr; + int high_curr_threshold; + int lowbat_threshold; + int battok_falling_th_sel0; + int battok_raising_th_sel1; + int user_cap_limit; + int maint_thres; + bool pcut_enable; + u8 pcut_max_time; + u8 pcut_flag_time; + u8 pcut_max_restart; + u8 pcut_debunce_time; +}; + +/** + * struct ab8500_charger_maximization - struct used by the board config. + * @use_maxi: Enable maximization for this battery type + * @maxi_chg_curr: Maximum charger current allowed + * @maxi_wait_cycles: cycles to wait before setting charger current + * @charger_curr_step delta between two charger current settings (mA) + */ +struct ab8500_maxim_parameters { + bool ena_maxi; + int chg_curr; + int wait_cycles; + int charger_curr_step; +}; + +/** + * struct ab8500_bm_capacity_levels - ab8500 capacity level data + * @critical: critical capacity level in percent + * @low: low capacity level in percent + * @normal: normal capacity level in percent + * @high: high capacity level in percent + * @full: full capacity level in percent + */ +struct ab8500_bm_capacity_levels { + int critical; + int low; + int normal; + int high; + int full; +}; + +/** + * struct ab8500_bm_charger_parameters - Charger specific parameters + * @usb_volt_max: maximum allowed USB charger voltage in mV + * @usb_curr_max: maximum allowed USB charger current in mA + * @ac_volt_max: maximum allowed AC charger voltage in mV + * @ac_curr_max: maximum allowed AC charger current in mA + */ +struct ab8500_bm_charger_parameters { + int usb_volt_max; + int usb_curr_max; + int ac_volt_max; + int ac_curr_max; +}; + +/** + * struct ab8500_bm_data - ab8500 battery management data + * @temp_under under this temp, charging is stopped + * @temp_low between this temp and temp_under charging is reduced + * @temp_high between this temp and temp_over charging is reduced + * @temp_over over this temp, charging is stopped + * @temp_interval_chg temperature measurement interval in s when charging + * @temp_interval_nochg temperature measurement interval in s when not charging + * @main_safety_tmr_h safety timer for main charger + * @usb_safety_tmr_h safety timer for usb charger + * @bkup_bat_v voltage which we charge the backup battery with + * @bkup_bat_i current which we charge the backup battery with + * @no_maintenance indicates that maintenance charging is disabled + * @capacity_scaling indicates whether capacity scaling is to be used + * @adc_therm placement of thermistor, batctrl or battemp adc + * @chg_unknown_bat flag to enable charging of unknown batteries + * @enable_overshoot flag to enable VBAT overshoot control + * @fg_res resistance of FG resistor in 0.1mOhm + * @n_btypes number of elements in array bat_type + * @batt_id index of the identified battery in array bat_type + * @interval_charging charge alg cycle period time when charging (sec) + * @interval_not_charging charge alg cycle period time when not charging (sec) + * @temp_hysteresis temperature hysteresis + * @gnd_lift_resistance Battery ground to phone ground resistance (mOhm) + * @maxi: maximization parameters + * @cap_levels capacity in percent for the different capacity levels + * @bat_type table of supported battery types + * @chg_params charger parameters + * @fg_params fuel gauge parameters + */ +struct ab8500_bm_data { + int temp_under; + int temp_low; + int temp_high; + int temp_over; + int temp_interval_chg; + int temp_interval_nochg; + int main_safety_tmr_h; + int usb_safety_tmr_h; + int bkup_bat_v; + int bkup_bat_i; + bool no_maintenance; + bool capacity_scaling; + bool chg_unknown_bat; + bool enable_overshoot; + enum abx500_adc_therm adc_therm; + int fg_res; + int n_btypes; + int batt_id; + int interval_charging; + int interval_not_charging; + int temp_hysteresis; + int gnd_lift_resistance; + const struct ab8500_maxim_parameters *maxi; + const struct ab8500_bm_capacity_levels *cap_levels; + const struct ab8500_bm_charger_parameters *chg_params; + const struct ab8500_fg_parameters *fg_params; +}; + +struct ab8500_btemp; +struct ab8500_gpadc; +struct ab8500_fg; + +#ifdef CONFIG_AB8500_BM +extern struct abx500_bm_data ab8500_bm_data; + +void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA); +struct ab8500_btemp *ab8500_btemp_get(void); +int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp); +int ab8500_btemp_get_temp(struct ab8500_btemp *btemp); +struct ab8500_fg *ab8500_fg_get(void); +int ab8500_fg_inst_curr_blocking(struct ab8500_fg *dev); +int ab8500_fg_inst_curr_start(struct ab8500_fg *di); +int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res); +int ab8500_fg_inst_curr_started(struct ab8500_fg *di); +int ab8500_fg_inst_curr_done(struct ab8500_fg *di); + +#else +static struct abx500_bm_data ab8500_bm_data; +#endif +#endif /* _AB8500_BM_H */ diff --git a/include/linux/mfd/abx500/ab8500-codec.h b/include/linux/mfd/abx500/ab8500-codec.h new file mode 100644 index 000000000..d7079413d --- /dev/null +++ b/include/linux/mfd/abx500/ab8500-codec.h @@ -0,0 +1,54 @@ +/* + * Copyright (C) ST-Ericsson SA 2012 + * + * Author: Ola Lilja + * for ST-Ericsson. + * + * License terms: + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#ifndef AB8500_CORE_CODEC_H +#define AB8500_CORE_CODEC_H + +/* Mic-types */ +enum amic_type { + AMIC_TYPE_SINGLE_ENDED, + AMIC_TYPE_DIFFERENTIAL +}; + +/* Mic-biases */ +enum amic_micbias { + AMIC_MICBIAS_VAMIC1, + AMIC_MICBIAS_VAMIC2, + AMIC_MICBIAS_UNKNOWN +}; + +/* Bias-voltage */ +enum ear_cm_voltage { + EAR_CMV_0_95V, + EAR_CMV_1_10V, + EAR_CMV_1_27V, + EAR_CMV_1_58V, + EAR_CMV_UNKNOWN +}; + +/* Analog microphone settings */ +struct amic_settings { + enum amic_type mic1_type; + enum amic_type mic2_type; + enum amic_micbias mic1a_micbias; + enum amic_micbias mic1b_micbias; + enum amic_micbias mic2_micbias; +}; + +/* Platform data structure for the audio-parts of the AB8500 */ +struct ab8500_codec_platform_data { + struct amic_settings amics; + enum ear_cm_voltage ear_cmv; +}; + +#endif diff --git a/include/linux/mfd/abx500/ab8500-gpadc.h b/include/linux/mfd/abx500/ab8500-gpadc.h new file mode 100644 index 000000000..49ded0010 --- /dev/null +++ b/include/linux/mfd/abx500/ab8500-gpadc.h @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2010 ST-Ericsson SA + * Licensed under GPLv2. + * + * Author: Arun R Murthy + * Author: Daniel Willerud + * Author: M'boumba Cedric Madianga + */ + +#ifndef _AB8500_GPADC_H +#define _AB8500_GPADC_H + +/* GPADC source: From datasheet(ADCSwSel[4:0] in GPADCCtrl2 + * and ADCHwSel[4:0] in GPADCCtrl3 ) */ +#define BAT_CTRL 0x01 +#define BTEMP_BALL 0x02 +#define MAIN_CHARGER_V 0x03 +#define ACC_DETECT1 0x04 +#define ACC_DETECT2 0x05 +#define ADC_AUX1 0x06 +#define ADC_AUX2 0x07 +#define MAIN_BAT_V 0x08 +#define VBUS_V 0x09 +#define MAIN_CHARGER_C 0x0A +#define USB_CHARGER_C 0x0B +#define BK_BAT_V 0x0C +#define DIE_TEMP 0x0D +#define USB_ID 0x0E +#define XTAL_TEMP 0x12 +#define VBAT_TRUE_MEAS 0x13 +#define BAT_CTRL_AND_IBAT 0x1C +#define VBAT_MEAS_AND_IBAT 0x1D +#define VBAT_TRUE_MEAS_AND_IBAT 0x1E +#define BAT_TEMP_AND_IBAT 0x1F + +/* Virtual channel used only for ibat convertion to ampere + * Battery current conversion (ibat) cannot be requested as a single conversion + * but it is always in combination with other input requests + */ +#define IBAT_VIRTUAL_CHANNEL 0xFF + +#define SAMPLE_1 1 +#define SAMPLE_4 4 +#define SAMPLE_8 8 +#define SAMPLE_16 16 +#define RISING_EDGE 0 +#define FALLING_EDGE 1 + +/* Arbitrary ADC conversion type constants */ +#define ADC_SW 0 +#define ADC_HW 1 + +struct ab8500_gpadc; + +struct ab8500_gpadc *ab8500_gpadc_get(char *name); +int ab8500_gpadc_sw_hw_convert(struct ab8500_gpadc *gpadc, u8 channel, + u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type); +static inline int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 channel) +{ + return ab8500_gpadc_sw_hw_convert(gpadc, channel, + SAMPLE_16, 0, 0, ADC_SW); +} + +int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel, + u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type); +int ab8500_gpadc_double_read_raw(struct ab8500_gpadc *gpadc, u8 channel, + u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type, + int *ibat); +int ab8500_gpadc_ad_to_voltage(struct ab8500_gpadc *gpadc, + u8 channel, int ad_value); +void ab8540_gpadc_get_otp(struct ab8500_gpadc *gpadc, + u16 *vmain_l, u16 *vmain_h, u16 *btemp_l, u16 *btemp_h, + u16 *vbat_l, u16 *vbat_h, u16 *ibat_l, u16 *ibat_h); + +#endif /* _AB8500_GPADC_H */ diff --git a/include/linux/mfd/abx500/ab8500-sysctrl.h b/include/linux/mfd/abx500/ab8500-sysctrl.h new file mode 100644 index 000000000..01024d1ae --- /dev/null +++ b/include/linux/mfd/abx500/ab8500-sysctrl.h @@ -0,0 +1,301 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Mattias Nilsson for ST Ericsson. + * License terms: GNU General Public License (GPL) version 2 + */ +#ifndef __AB8500_SYSCTRL_H +#define __AB8500_SYSCTRL_H + +#include + +#ifdef CONFIG_AB8500_CORE + +int ab8500_sysctrl_read(u16 reg, u8 *value); +int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value); + +#else + +static inline int ab8500_sysctrl_read(u16 reg, u8 *value) +{ + return 0; +} + +static inline int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value) +{ + return 0; +} + +#endif /* CONFIG_AB8500_CORE */ + +static inline int ab8500_sysctrl_set(u16 reg, u8 bits) +{ + return ab8500_sysctrl_write(reg, bits, bits); +} + +static inline int ab8500_sysctrl_clear(u16 reg, u8 bits) +{ + return ab8500_sysctrl_write(reg, bits, 0); +} + +/* Registers */ +#define AB8500_TURNONSTATUS 0x100 +#define AB8500_RESETSTATUS 0x101 +#define AB8500_PONKEY1PRESSSTATUS 0x102 +#define AB8500_SYSCLKREQSTATUS 0x142 +#define AB8500_STW4500CTRL1 0x180 +#define AB8500_STW4500CTRL2 0x181 +#define AB8500_STW4500CTRL3 0x200 +#define AB8500_MAINWDOGCTRL 0x201 +#define AB8500_MAINWDOGTIMER 0x202 +#define AB8500_LOWBAT 0x203 +#define AB8500_BATTOK 0x204 +#define AB8500_SYSCLKTIMER 0x205 +#define AB8500_SMPSCLKCTRL 0x206 +#define AB8500_SMPSCLKSEL1 0x207 +#define AB8500_SMPSCLKSEL2 0x208 +#define AB8500_SMPSCLKSEL3 0x209 +#define AB8500_SYSULPCLKCONF 0x20A +#define AB8500_SYSULPCLKCTRL1 0x20B +#define AB8500_SYSCLKCTRL 0x20C +#define AB8500_SYSCLKREQ1VALID 0x20D +#define AB8500_SYSTEMCTRLSUP 0x20F +#define AB8500_SYSCLKREQ1RFCLKBUF 0x210 +#define AB8500_SYSCLKREQ2RFCLKBUF 0x211 +#define AB8500_SYSCLKREQ3RFCLKBUF 0x212 +#define AB8500_SYSCLKREQ4RFCLKBUF 0x213 +#define AB8500_SYSCLKREQ5RFCLKBUF 0x214 +#define AB8500_SYSCLKREQ6RFCLKBUF 0x215 +#define AB8500_SYSCLKREQ7RFCLKBUF 0x216 +#define AB8500_SYSCLKREQ8RFCLKBUF 0x217 +#define AB8500_DITHERCLKCTRL 0x220 +#define AB8500_SWATCTRL 0x230 +#define AB8500_HIQCLKCTRL 0x232 +#define AB8500_VSIMSYSCLKCTRL 0x233 +#define AB9540_SYSCLK12BUFCTRL 0x234 +#define AB9540_SYSCLK12CONFCTRL 0x235 +#define AB9540_SYSCLK12BUFCTRL2 0x236 +#define AB9540_SYSCLK12BUF1VALID 0x237 +#define AB9540_SYSCLK12BUF2VALID 0x238 +#define AB9540_SYSCLK12BUF3VALID 0x239 +#define AB9540_SYSCLK12BUF4VALID 0x23A + +/* Bits */ +#define AB8500_TURNONSTATUS_PORNVBAT BIT(0) +#define AB8500_TURNONSTATUS_PONKEY1DBF BIT(1) +#define AB8500_TURNONSTATUS_PONKEY2DBF BIT(2) +#define AB8500_TURNONSTATUS_RTCALARM BIT(3) +#define AB8500_TURNONSTATUS_MAINCHDET BIT(4) +#define AB8500_TURNONSTATUS_VBUSDET BIT(5) +#define AB8500_TURNONSTATUS_USBIDDETECT BIT(6) + +#define AB8500_RESETSTATUS_RESETN4500NSTATUS BIT(0) +#define AB8500_RESETSTATUS_SWRESETN4500NSTATUS BIT(2) + +#define AB8500_PONKEY1PRESSSTATUS_PONKEY1PRESSTIME_MASK 0x7F +#define AB8500_PONKEY1PRESSSTATUS_PONKEY1PRESSTIME_SHIFT 0 + +#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ1STATUS BIT(0) +#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ2STATUS BIT(1) +#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ3STATUS BIT(2) +#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ4STATUS BIT(3) +#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ5STATUS BIT(4) +#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ6STATUS BIT(5) +#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ7STATUS BIT(6) +#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ8STATUS BIT(7) + +#define AB8500_STW4500CTRL1_SWOFF BIT(0) +#define AB8500_STW4500CTRL1_SWRESET4500N BIT(1) +#define AB8500_STW4500CTRL1_THDB8500SWOFF BIT(2) + +#define AB8500_STW4500CTRL2_RESETNVAUX1VALID BIT(0) +#define AB8500_STW4500CTRL2_RESETNVAUX2VALID BIT(1) +#define AB8500_STW4500CTRL2_RESETNVAUX3VALID BIT(2) +#define AB8500_STW4500CTRL2_RESETNVMODVALID BIT(3) +#define AB8500_STW4500CTRL2_RESETNVEXTSUPPLY1VALID BIT(4) +#define AB8500_STW4500CTRL2_RESETNVEXTSUPPLY2VALID BIT(5) +#define AB8500_STW4500CTRL2_RESETNVEXTSUPPLY3VALID BIT(6) +#define AB8500_STW4500CTRL2_RESETNVSMPS1VALID BIT(7) + +#define AB8500_STW4500CTRL3_CLK32KOUT2DIS BIT(0) +#define AB8500_STW4500CTRL3_RESETAUDN BIT(1) +#define AB8500_STW4500CTRL3_RESETDENCN BIT(2) +#define AB8500_STW4500CTRL3_THSDENA BIT(3) + +#define AB8500_MAINWDOGCTRL_MAINWDOGENA BIT(0) +#define AB8500_MAINWDOGCTRL_MAINWDOGKICK BIT(1) +#define AB8500_MAINWDOGCTRL_WDEXPTURNONVALID BIT(4) + +#define AB8500_MAINWDOGTIMER_MAINWDOGTIMER_MASK 0x7F +#define AB8500_MAINWDOGTIMER_MAINWDOGTIMER_SHIFT 0 + +#define AB8500_LOWBAT_LOWBATENA BIT(0) +#define AB8500_LOWBAT_LOWBAT_MASK 0x7E +#define AB8500_LOWBAT_LOWBAT_SHIFT 1 + +#define AB8500_BATTOK_BATTOKSEL0THF_MASK 0x0F +#define AB8500_BATTOK_BATTOKSEL0THF_SHIFT 0 +#define AB8500_BATTOK_BATTOKSEL1THF_MASK 0xF0 +#define AB8500_BATTOK_BATTOKSEL1THF_SHIFT 4 + +#define AB8500_SYSCLKTIMER_SYSCLKTIMER_MASK 0x0F +#define AB8500_SYSCLKTIMER_SYSCLKTIMER_SHIFT 0 +#define AB8500_SYSCLKTIMER_SYSCLKTIMERADJ_MASK 0xF0 +#define AB8500_SYSCLKTIMER_SYSCLKTIMERADJ_SHIFT 4 + +#define AB8500_SMPSCLKCTRL_SMPSCLKINTSEL_MASK 0x03 +#define AB8500_SMPSCLKCTRL_SMPSCLKINTSEL_SHIFT 0 +#define AB8500_SMPSCLKCTRL_3M2CLKINTENA BIT(2) + +#define AB8500_SMPSCLKSEL1_VARMCLKSEL_MASK 0x07 +#define AB8500_SMPSCLKSEL1_VARMCLKSEL_SHIFT 0 +#define AB8500_SMPSCLKSEL1_VAPECLKSEL_MASK 0x38 +#define AB8500_SMPSCLKSEL1_VAPECLKSEL_SHIFT 3 + +#define AB8500_SMPSCLKSEL2_VMODCLKSEL_MASK 0x07 +#define AB8500_SMPSCLKSEL2_VMODCLKSEL_SHIFT 0 +#define AB8500_SMPSCLKSEL2_VSMPS1CLKSEL_MASK 0x38 +#define AB8500_SMPSCLKSEL2_VSMPS1CLKSEL_SHIFT 3 + +#define AB8500_SMPSCLKSEL3_VSMPS2CLKSEL_MASK 0x07 +#define AB8500_SMPSCLKSEL3_VSMPS2CLKSEL_SHIFT 0 +#define AB8500_SMPSCLKSEL3_VSMPS3CLKSEL_MASK 0x38 +#define AB8500_SMPSCLKSEL3_VSMPS3CLKSEL_SHIFT 3 + +#define AB8500_SYSULPCLKCONF_ULPCLKCONF_MASK 0x03 +#define AB8500_SYSULPCLKCONF_ULPCLKCONF_SHIFT 0 +#define AB8500_SYSULPCLKCONF_CLK27MHZSTRE BIT(2) +#define AB8500_SYSULPCLKCONF_TVOUTCLKDELN BIT(3) +#define AB8500_SYSULPCLKCONF_TVOUTCLKINV BIT(4) +#define AB8500_SYSULPCLKCONF_ULPCLKSTRE BIT(5) +#define AB8500_SYSULPCLKCONF_CLK27MHZBUFENA BIT(6) +#define AB8500_SYSULPCLKCONF_CLK27MHZPDENA BIT(7) + +#define AB8500_SYSULPCLKCTRL1_SYSULPCLKINTSEL_MASK 0x03 +#define AB8500_SYSULPCLKCTRL1_SYSULPCLKINTSEL_SHIFT 0 +#define AB8500_SYSULPCLKCTRL1_ULPCLKREQ BIT(2) +#define AB8500_SYSULPCLKCTRL1_4500SYSCLKREQ BIT(3) +#define AB8500_SYSULPCLKCTRL1_AUDIOCLKENA BIT(4) +#define AB8500_SYSULPCLKCTRL1_SYSCLKBUF2REQ BIT(5) +#define AB8500_SYSULPCLKCTRL1_SYSCLKBUF3REQ BIT(6) +#define AB8500_SYSULPCLKCTRL1_SYSCLKBUF4REQ BIT(7) + +#define AB8500_SYSCLKCTRL_TVOUTPLLENA BIT(0) +#define AB8500_SYSCLKCTRL_TVOUTCLKENA BIT(1) +#define AB8500_SYSCLKCTRL_USBCLKENA BIT(2) + +#define AB8500_SYSCLKREQ1VALID_SYSCLKREQ1VALID BIT(0) +#define AB8500_SYSCLKREQ1VALID_ULPCLKREQ1VALID BIT(1) +#define AB8500_SYSCLKREQ1VALID_USBSYSCLKREQ1VALID BIT(2) + +#define AB8500_SYSTEMCTRLSUP_EXTSUP12LPNCLKSEL_MASK 0x03 +#define AB8500_SYSTEMCTRLSUP_EXTSUP12LPNCLKSEL_SHIFT 0 +#define AB8500_SYSTEMCTRLSUP_EXTSUP3LPNCLKSEL_MASK 0x0C +#define AB8500_SYSTEMCTRLSUP_EXTSUP3LPNCLKSEL_SHIFT 2 +#define AB8500_SYSTEMCTRLSUP_INTDB8500NOD BIT(4) + +#define AB8500_SYSCLKREQ1RFCLKBUF_SYSCLKREQ1RFCLKBUF2 BIT(2) +#define AB8500_SYSCLKREQ1RFCLKBUF_SYSCLKREQ1RFCLKBUF3 BIT(3) +#define AB8500_SYSCLKREQ1RFCLKBUF_SYSCLKREQ1RFCLKBUF4 BIT(4) + +#define AB8500_SYSCLKREQ2RFCLKBUF_SYSCLKREQ2RFCLKBUF2 BIT(2) +#define AB8500_SYSCLKREQ2RFCLKBUF_SYSCLKREQ2RFCLKBUF3 BIT(3) +#define AB8500_SYSCLKREQ2RFCLKBUF_SYSCLKREQ2RFCLKBUF4 BIT(4) + +#define AB8500_SYSCLKREQ3RFCLKBUF_SYSCLKREQ3RFCLKBUF2 BIT(2) +#define AB8500_SYSCLKREQ3RFCLKBUF_SYSCLKREQ3RFCLKBUF3 BIT(3) +#define AB8500_SYSCLKREQ3RFCLKBUF_SYSCLKREQ3RFCLKBUF4 BIT(4) + +#define AB8500_SYSCLKREQ4RFCLKBUF_SYSCLKREQ4RFCLKBUF2 BIT(2) +#define AB8500_SYSCLKREQ4RFCLKBUF_SYSCLKREQ4RFCLKBUF3 BIT(3) +#define AB8500_SYSCLKREQ4RFCLKBUF_SYSCLKREQ4RFCLKBUF4 BIT(4) + +#define AB8500_SYSCLKREQ5RFCLKBUF_SYSCLKREQ5RFCLKBUF2 BIT(2) +#define AB8500_SYSCLKREQ5RFCLKBUF_SYSCLKREQ5RFCLKBUF3 BIT(3) +#define AB8500_SYSCLKREQ5RFCLKBUF_SYSCLKREQ5RFCLKBUF4 BIT(4) + +#define AB8500_SYSCLKREQ6RFCLKBUF_SYSCLKREQ6RFCLKBUF2 BIT(2) +#define AB8500_SYSCLKREQ6RFCLKBUF_SYSCLKREQ6RFCLKBUF3 BIT(3) +#define AB8500_SYSCLKREQ6RFCLKBUF_SYSCLKREQ6RFCLKBUF4 BIT(4) + +#define AB8500_SYSCLKREQ7RFCLKBUF_SYSCLKREQ7RFCLKBUF2 BIT(2) +#define AB8500_SYSCLKREQ7RFCLKBUF_SYSCLKREQ7RFCLKBUF3 BIT(3) +#define AB8500_SYSCLKREQ7RFCLKBUF_SYSCLKREQ7RFCLKBUF4 BIT(4) + +#define AB8500_SYSCLKREQ8RFCLKBUF_SYSCLKREQ8RFCLKBUF2 BIT(2) +#define AB8500_SYSCLKREQ8RFCLKBUF_SYSCLKREQ8RFCLKBUF3 BIT(3) +#define AB8500_SYSCLKREQ8RFCLKBUF_SYSCLKREQ8RFCLKBUF4 BIT(4) + +#define AB8500_DITHERCLKCTRL_VARMDITHERENA BIT(0) +#define AB8500_DITHERCLKCTRL_VSMPS3DITHERENA BIT(1) +#define AB8500_DITHERCLKCTRL_VSMPS1DITHERENA BIT(2) +#define AB8500_DITHERCLKCTRL_VSMPS2DITHERENA BIT(3) +#define AB8500_DITHERCLKCTRL_VMODDITHERENA BIT(4) +#define AB8500_DITHERCLKCTRL_VAPEDITHERENA BIT(5) +#define AB8500_DITHERCLKCTRL_DITHERDEL_MASK 0xC0 +#define AB8500_DITHERCLKCTRL_DITHERDEL_SHIFT 6 + +#define AB8500_SWATCTRL_UPDATERF BIT(0) +#define AB8500_SWATCTRL_SWATENABLE BIT(1) +#define AB8500_SWATCTRL_RFOFFTIMER_MASK 0x1C +#define AB8500_SWATCTRL_RFOFFTIMER_SHIFT 2 +#define AB8500_SWATCTRL_SWATBIT5 BIT(6) + +#define AB8500_HIQCLKCTRL_SYSCLKREQ1HIQENAVALID BIT(0) +#define AB8500_HIQCLKCTRL_SYSCLKREQ2HIQENAVALID BIT(1) +#define AB8500_HIQCLKCTRL_SYSCLKREQ3HIQENAVALID BIT(2) +#define AB8500_HIQCLKCTRL_SYSCLKREQ4HIQENAVALID BIT(3) +#define AB8500_HIQCLKCTRL_SYSCLKREQ5HIQENAVALID BIT(4) +#define AB8500_HIQCLKCTRL_SYSCLKREQ6HIQENAVALID BIT(5) +#define AB8500_HIQCLKCTRL_SYSCLKREQ7HIQENAVALID BIT(6) +#define AB8500_HIQCLKCTRL_SYSCLKREQ8HIQENAVALID BIT(7) + +#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ1VALID BIT(0) +#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ2VALID BIT(1) +#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ3VALID BIT(2) +#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ4VALID BIT(3) +#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ5VALID BIT(4) +#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ6VALID BIT(5) +#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ7VALID BIT(6) +#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ8VALID BIT(7) + +#define AB9540_SYSCLK12BUFCTRL_SYSCLK12BUF1ENA BIT(0) +#define AB9540_SYSCLK12BUFCTRL_SYSCLK12BUF2ENA BIT(1) +#define AB9540_SYSCLK12BUFCTRL_SYSCLK12BUF3ENA BIT(2) +#define AB9540_SYSCLK12BUFCTRL_SYSCLK12BUF4ENA BIT(3) +#define AB9540_SYSCLK12BUFCTRL_SYSCLK12BUFENA_MASK 0x0F +#define AB9540_SYSCLK12BUFCTRL_SYSCLK12BUF1STRE BIT(4) +#define AB9540_SYSCLK12BUFCTRL_SYSCLK12BUF2STRE BIT(5) +#define AB9540_SYSCLK12BUFCTRL_SYSCLK12BUF3STRE BIT(6) +#define AB9540_SYSCLK12BUFCTRL_SYSCLK12BUF4STRE BIT(7) +#define AB9540_SYSCLK12BUFCTRL_SYSCLK12BUFSTRE_MASK 0xF0 + +#define AB9540_SYSCLK12CONFCTRL_PLL26TO38ENA BIT(0) +#define AB9540_SYSCLK12CONFCTRL_SYSCLK12USBMUXSEL BIT(1) +#define AB9540_SYSCLK12CONFCTRL_INT384MHZMUXSEL0 BIT(2) +#define AB9540_SYSCLK12CONFCTRL_INT384MHZMUXSEL1 BIT(3) +#define AB9540_SYSCLK12CONFCTRL_SYSCLK12BUFMUX BIT(4) +#define AB9540_SYSCLK12CONFCTRL_SYSCLK12PLLMUX BIT(5) +#define AB9540_SYSCLK12CONFCTRL_SYSCLK2MUXVALID BIT(6) + +#define AB9540_SYSCLK12BUFCTRL2_SYSCLK12BUF1PDENA BIT(0) +#define AB9540_SYSCLK12BUFCTRL2_SYSCLK12BUF2PDENA BIT(1) +#define AB9540_SYSCLK12BUFCTRL2_SYSCLK12BUF3PDENA BIT(2) +#define AB9540_SYSCLK12BUFCTRL2_SYSCLK12BUF4PDENA BIT(3) + +#define AB9540_SYSCLK12BUF1VALID_SYSCLK12BUF1VALID_MASK 0xFF +#define AB9540_SYSCLK12BUF1VALID_SYSCLK12BUF1VALID_SHIFT 0 + +#define AB9540_SYSCLK12BUF2VALID_SYSCLK12BUF2VALID_MASK 0xFF +#define AB9540_SYSCLK12BUF2VALID_SYSCLK12BUF2VALID_SHIFT 0 + +#define AB9540_SYSCLK12BUF3VALID_SYSCLK12BUF3VALID_MASK 0xFF +#define AB9540_SYSCLK12BUF3VALID_SYSCLK12BUF3VALID_SHIFT 0 + +#define AB9540_SYSCLK12BUF4VALID_SYSCLK12BUF4VALID_MASK 0xFF +#define AB9540_SYSCLK12BUF4VALID_SYSCLK12BUF4VALID_SHIFT 0 + +#define AB8500_ENABLE_WD 0x1 +#define AB8500_KICK_WD 0x2 +#define AB8500_WD_RESTART_ON_EXPIRE 0x10 + +#endif /* __AB8500_SYSCTRL_H */ diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h new file mode 100644 index 000000000..d33c245e7 --- /dev/null +++ b/include/linux/mfd/abx500/ab8500.h @@ -0,0 +1,518 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License v2 + * Author: Srinidhi Kasagar + */ +#ifndef MFD_AB8500_H +#define MFD_AB8500_H + +#include +#include +#include + +struct device; + +/* + * AB IC versions + * + * AB8500_VERSION_AB8500 should be 0xFF but will never be read as need a + * non-supported multi-byte I2C access via PRCMU. Set to 0x00 to ease the + * print of version string. + */ +enum ab8500_version { + AB8500_VERSION_AB8500 = 0x0, + AB8500_VERSION_AB8505 = 0x1, + AB8500_VERSION_AB9540 = 0x2, + AB8500_VERSION_AB8540 = 0x4, + AB8500_VERSION_UNDEFINED, +}; + +/* AB8500 CIDs*/ +#define AB8500_CUTEARLY 0x00 +#define AB8500_CUT1P0 0x10 +#define AB8500_CUT1P1 0x11 +#define AB8500_CUT1P2 0x12 /* Only valid for AB8540 */ +#define AB8500_CUT2P0 0x20 +#define AB8500_CUT3P0 0x30 +#define AB8500_CUT3P3 0x33 + +/* + * AB8500 bank addresses + */ +#define AB8500_M_FSM_RANK 0x0 +#define AB8500_SYS_CTRL1_BLOCK 0x1 +#define AB8500_SYS_CTRL2_BLOCK 0x2 +#define AB8500_REGU_CTRL1 0x3 +#define AB8500_REGU_CTRL2 0x4 +#define AB8500_USB 0x5 +#define AB8500_TVOUT 0x6 +#define AB8500_DBI 0x7 +#define AB8500_ECI_AV_ACC 0x8 +#define AB8500_RESERVED 0x9 +#define AB8500_GPADC 0xA +#define AB8500_CHARGER 0xB +#define AB8500_GAS_GAUGE 0xC +#define AB8500_AUDIO 0xD +#define AB8500_INTERRUPT 0xE +#define AB8500_RTC 0xF +#define AB8500_MISC 0x10 +#define AB8500_DEVELOPMENT 0x11 +#define AB8500_DEBUG 0x12 +#define AB8500_PROD_TEST 0x13 +#define AB8500_STE_TEST 0x14 +#define AB8500_OTP_EMUL 0x15 + +#define AB8500_DEBUG_FIELD_LAST 0x16 + +/* + * Interrupts + * Values used to index into array ab8500_irq_regoffset[] defined in + * drivers/mdf/ab8500-core.c + */ +/* Definitions for AB8500, AB9540 and AB8540 */ +/* ab8500_irq_regoffset[0] -> IT[Source|Latch|Mask]1 */ +#define AB8500_INT_MAIN_EXT_CH_NOT_OK 0 /* not 8505/9540 */ +#define AB8500_INT_UN_PLUG_TV_DET 1 /* not 8505/9540/8540 */ +#define AB8500_INT_PLUG_TV_DET 2 /* not 8505/9540/8540 */ +#define AB8500_INT_TEMP_WARM 3 +#define AB8500_INT_PON_KEY2DB_F 4 +#define AB8500_INT_PON_KEY2DB_R 5 +#define AB8500_INT_PON_KEY1DB_F 6 +#define AB8500_INT_PON_KEY1DB_R 7 +/* ab8500_irq_regoffset[1] -> IT[Source|Latch|Mask]2 */ +#define AB8500_INT_BATT_OVV 8 +#define AB8500_INT_MAIN_CH_UNPLUG_DET 10 /* not 8505/8540 */ +#define AB8500_INT_MAIN_CH_PLUG_DET 11 /* not 8505/8540 */ +#define AB8500_INT_VBUS_DET_F 14 +#define AB8500_INT_VBUS_DET_R 15 +/* ab8500_irq_regoffset[2] -> IT[Source|Latch|Mask]3 */ +#define AB8500_INT_VBUS_CH_DROP_END 16 +#define AB8500_INT_RTC_60S 17 +#define AB8500_INT_RTC_ALARM 18 +#define AB8540_INT_BIF_INT 19 +#define AB8500_INT_BAT_CTRL_INDB 20 +#define AB8500_INT_CH_WD_EXP 21 +#define AB8500_INT_VBUS_OVV 22 +#define AB8500_INT_MAIN_CH_DROP_END 23 /* not 8505/9540/8540 */ +/* ab8500_irq_regoffset[3] -> IT[Source|Latch|Mask]4 */ +#define AB8500_INT_CCN_CONV_ACC 24 +#define AB8500_INT_INT_AUD 25 +#define AB8500_INT_CCEOC 26 +#define AB8500_INT_CC_INT_CALIB 27 +#define AB8500_INT_LOW_BAT_F 28 +#define AB8500_INT_LOW_BAT_R 29 +#define AB8500_INT_BUP_CHG_NOT_OK 30 +#define AB8500_INT_BUP_CHG_OK 31 +/* ab8500_irq_regoffset[4] -> IT[Source|Latch|Mask]5 */ +#define AB8500_INT_GP_HW_ADC_CONV_END 32 /* not 8505/8540 */ +#define AB8500_INT_ACC_DETECT_1DB_F 33 +#define AB8500_INT_ACC_DETECT_1DB_R 34 +#define AB8500_INT_ACC_DETECT_22DB_F 35 +#define AB8500_INT_ACC_DETECT_22DB_R 36 +#define AB8500_INT_ACC_DETECT_21DB_F 37 +#define AB8500_INT_ACC_DETECT_21DB_R 38 +#define AB8500_INT_GP_SW_ADC_CONV_END 39 +/* ab8500_irq_regoffset[5] -> IT[Source|Latch|Mask]7 */ +#define AB8500_INT_GPIO6R 40 /* not 8505/9540/8540 */ +#define AB8500_INT_GPIO7R 41 /* not 8505/9540/8540 */ +#define AB8500_INT_GPIO8R 42 /* not 8505/9540/8540 */ +#define AB8500_INT_GPIO9R 43 /* not 8505/9540/8540 */ +#define AB8500_INT_GPIO10R 44 /* not 8540 */ +#define AB8500_INT_GPIO11R 45 /* not 8540 */ +#define AB8500_INT_GPIO12R 46 /* not 8505/8540 */ +#define AB8500_INT_GPIO13R 47 /* not 8540 */ +/* ab8500_irq_regoffset[6] -> IT[Source|Latch|Mask]8 */ +#define AB8500_INT_GPIO24R 48 /* not 8505/8540 */ +#define AB8500_INT_GPIO25R 49 /* not 8505/8540 */ +#define AB8500_INT_GPIO36R 50 /* not 8505/9540/8540 */ +#define AB8500_INT_GPIO37R 51 /* not 8505/9540/8540 */ +#define AB8500_INT_GPIO38R 52 /* not 8505/9540/8540 */ +#define AB8500_INT_GPIO39R 53 /* not 8505/9540/8540 */ +#define AB8500_INT_GPIO40R 54 /* not 8540 */ +#define AB8500_INT_GPIO41R 55 /* not 8540 */ +/* ab8500_irq_regoffset[7] -> IT[Source|Latch|Mask]9 */ +#define AB8500_INT_GPIO6F 56 /* not 8505/9540 */ +#define AB8500_INT_GPIO7F 57 /* not 8505/9540 */ +#define AB8500_INT_GPIO8F 58 /* not 8505/9540 */ +#define AB8500_INT_GPIO9F 59 /* not 8505/9540 */ +#define AB8500_INT_GPIO10F 60 +#define AB8500_INT_GPIO11F 61 +#define AB8500_INT_GPIO12F 62 /* not 8505 */ +#define AB8500_INT_GPIO13F 63 +/* ab8500_irq_regoffset[8] -> IT[Source|Latch|Mask]10 */ +#define AB8500_INT_GPIO24F 64 /* not 8505/8540 */ +#define AB8500_INT_GPIO25F 65 /* not 8505/8540 */ +#define AB8500_INT_GPIO36F 66 /* not 8505/9540/8540 */ +#define AB8500_INT_GPIO37F 67 /* not 8505/9540/8540 */ +#define AB8500_INT_GPIO38F 68 /* not 8505/9540/8540 */ +#define AB8500_INT_GPIO39F 69 /* not 8505/9540/8540 */ +#define AB8500_INT_GPIO40F 70 /* not 8540 */ +#define AB8500_INT_GPIO41F 71 /* not 8540 */ +/* ab8500_irq_regoffset[9] -> IT[Source|Latch|Mask]12 */ +#define AB8500_INT_ADP_SOURCE_ERROR 72 +#define AB8500_INT_ADP_SINK_ERROR 73 +#define AB8500_INT_ADP_PROBE_PLUG 74 +#define AB8500_INT_ADP_PROBE_UNPLUG 75 +#define AB8500_INT_ADP_SENSE_OFF 76 +#define AB8500_INT_USB_PHY_POWER_ERR 78 +#define AB8500_INT_USB_LINK_STATUS 79 +/* ab8500_irq_regoffset[10] -> IT[Source|Latch|Mask]19 */ +#define AB8500_INT_BTEMP_LOW 80 +#define AB8500_INT_BTEMP_LOW_MEDIUM 81 +#define AB8500_INT_BTEMP_MEDIUM_HIGH 82 +#define AB8500_INT_BTEMP_HIGH 83 +/* ab8500_irq_regoffset[11] -> IT[Source|Latch|Mask]20 */ +#define AB8500_INT_SRP_DETECT 88 +#define AB8500_INT_USB_CHARGER_NOT_OKR 89 +#define AB8500_INT_ID_WAKEUP_R 90 +#define AB8500_INT_ID_DET_PLUGR 91 /* 8505/9540 cut2.0 */ +#define AB8500_INT_ID_DET_R1R 92 +#define AB8500_INT_ID_DET_R2R 93 +#define AB8500_INT_ID_DET_R3R 94 +#define AB8500_INT_ID_DET_R4R 95 +/* ab8500_irq_regoffset[12] -> IT[Source|Latch|Mask]21 */ +#define AB8500_INT_ID_WAKEUP_F 96 /* not 8505/9540 */ +#define AB8500_INT_ID_DET_PLUGF 97 /* 8505/9540 cut2.0 */ +#define AB8500_INT_ID_DET_R1F 98 /* not 8505/9540 */ +#define AB8500_INT_ID_DET_R2F 99 /* not 8505/9540 */ +#define AB8500_INT_ID_DET_R3F 100 /* not 8505/9540 */ +#define AB8500_INT_ID_DET_R4F 101 /* not 8505/9540 */ +#define AB8500_INT_CHAUTORESTARTAFTSEC 102 /* not 8505/9540 */ +#define AB8500_INT_CHSTOPBYSEC 103 +/* ab8500_irq_regoffset[13] -> IT[Source|Latch|Mask]22 */ +#define AB8500_INT_USB_CH_TH_PROT_F 104 +#define AB8500_INT_USB_CH_TH_PROT_R 105 +#define AB8500_INT_MAIN_CH_TH_PROT_F 106 /* not 8505/9540 */ +#define AB8500_INT_MAIN_CH_TH_PROT_R 107 /* not 8505/9540 */ +#define AB8500_INT_CHCURLIMNOHSCHIRP 109 +#define AB8500_INT_CHCURLIMHSCHIRP 110 +#define AB8500_INT_XTAL32K_KO 111 + +/* Definitions for AB9540 / AB8505 */ +/* ab8500_irq_regoffset[14] -> IT[Source|Latch|Mask]13 */ +#define AB9540_INT_GPIO50R 113 /* not 8540 */ +#define AB9540_INT_GPIO51R 114 /* not 8505/8540 */ +#define AB9540_INT_GPIO52R 115 /* not 8540 */ +#define AB9540_INT_GPIO53R 116 /* not 8540 */ +#define AB9540_INT_GPIO54R 117 /* not 8505/8540 */ +#define AB9540_INT_IEXT_CH_RF_BFN_R 118 +/* ab8500_irq_regoffset[15] -> IT[Source|Latch|Mask]14 */ +#define AB9540_INT_GPIO50F 121 /* not 8540 */ +#define AB9540_INT_GPIO51F 122 /* not 8505/8540 */ +#define AB9540_INT_GPIO52F 123 /* not 8540 */ +#define AB9540_INT_GPIO53F 124 /* not 8540 */ +#define AB9540_INT_GPIO54F 125 /* not 8505/8540 */ +#define AB9540_INT_IEXT_CH_RF_BFN_F 126 +/* ab8500_irq_regoffset[16] -> IT[Source|Latch|Mask]25 */ +#define AB8505_INT_KEYSTUCK 128 +#define AB8505_INT_IKR 129 +#define AB8505_INT_IKP 130 +#define AB8505_INT_KP 131 +#define AB8505_INT_KEYDEGLITCH 132 +#define AB8505_INT_MODPWRSTATUSF 134 +#define AB8505_INT_MODPWRSTATUSR 135 +/* ab8500_irq_regoffset[17] -> IT[Source|Latch|Mask]6 */ +#define AB8500_INT_HOOK_DET_NEG_F 138 +#define AB8500_INT_HOOK_DET_NEG_R 139 +#define AB8500_INT_HOOK_DET_POS_F 140 +#define AB8500_INT_HOOK_DET_POS_R 141 +#define AB8500_INT_PLUG_DET_COMP_F 142 +#define AB8500_INT_PLUG_DET_COMP_R 143 +/* ab8500_irq_regoffset[18] -> IT[Source|Latch|Mask]23 */ +#define AB8505_INT_COLL 144 +#define AB8505_INT_RESERR 145 +#define AB8505_INT_FRAERR 146 +#define AB8505_INT_COMERR 147 +#define AB8505_INT_SPDSET 148 +#define AB8505_INT_DSENT 149 +#define AB8505_INT_DREC 150 +#define AB8505_INT_ACC_INT 151 +/* ab8500_irq_regoffset[19] -> IT[Source|Latch|Mask]24 */ +#define AB8505_INT_NOPINT 152 +/* ab8540_irq_regoffset[20] -> IT[Source|Latch|Mask]26 */ +#define AB8540_INT_IDPLUGDETCOMPF 160 +#define AB8540_INT_IDPLUGDETCOMPR 161 +#define AB8540_INT_FMDETCOMPLOF 162 +#define AB8540_INT_FMDETCOMPLOR 163 +#define AB8540_INT_FMDETCOMPHIF 164 +#define AB8540_INT_FMDETCOMPHIR 165 +#define AB8540_INT_ID5VDETCOMPF 166 +#define AB8540_INT_ID5VDETCOMPR 167 +/* ab8540_irq_regoffset[21] -> IT[Source|Latch|Mask]27 */ +#define AB8540_INT_GPIO43F 168 +#define AB8540_INT_GPIO43R 169 +#define AB8540_INT_GPIO44F 170 +#define AB8540_INT_GPIO44R 171 +#define AB8540_INT_KEYPOSDETCOMPF 172 +#define AB8540_INT_KEYPOSDETCOMPR 173 +#define AB8540_INT_KEYNEGDETCOMPF 174 +#define AB8540_INT_KEYNEGDETCOMPR 175 +/* ab8540_irq_regoffset[22] -> IT[Source|Latch|Mask]28 */ +#define AB8540_INT_GPIO1VBATF 176 +#define AB8540_INT_GPIO1VBATR 177 +#define AB8540_INT_GPIO2VBATF 178 +#define AB8540_INT_GPIO2VBATR 179 +#define AB8540_INT_GPIO3VBATF 180 +#define AB8540_INT_GPIO3VBATR 181 +#define AB8540_INT_GPIO4VBATF 182 +#define AB8540_INT_GPIO4VBATR 183 +/* ab8540_irq_regoffset[23] -> IT[Source|Latch|Mask]29 */ +#define AB8540_INT_SYSCLKREQ2F 184 +#define AB8540_INT_SYSCLKREQ2R 185 +#define AB8540_INT_SYSCLKREQ3F 186 +#define AB8540_INT_SYSCLKREQ3R 187 +#define AB8540_INT_SYSCLKREQ4F 188 +#define AB8540_INT_SYSCLKREQ4R 189 +#define AB8540_INT_SYSCLKREQ5F 190 +#define AB8540_INT_SYSCLKREQ5R 191 +/* ab8540_irq_regoffset[24] -> IT[Source|Latch|Mask]30 */ +#define AB8540_INT_PWMOUT1F 192 +#define AB8540_INT_PWMOUT1R 193 +#define AB8540_INT_PWMCTRL0F 194 +#define AB8540_INT_PWMCTRL0R 195 +#define AB8540_INT_PWMCTRL1F 196 +#define AB8540_INT_PWMCTRL1R 197 +#define AB8540_INT_SYSCLKREQ6F 198 +#define AB8540_INT_SYSCLKREQ6R 199 +/* ab8540_irq_regoffset[25] -> IT[Source|Latch|Mask]31 */ +#define AB8540_INT_PWMEXTVIBRA1F 200 +#define AB8540_INT_PWMEXTVIBRA1R 201 +#define AB8540_INT_PWMEXTVIBRA2F 202 +#define AB8540_INT_PWMEXTVIBRA2R 203 +#define AB8540_INT_PWMOUT2F 204 +#define AB8540_INT_PWMOUT2R 205 +#define AB8540_INT_PWMOUT3F 206 +#define AB8540_INT_PWMOUT3R 207 +/* ab8540_irq_regoffset[26] -> IT[Source|Latch|Mask]32 */ +#define AB8540_INT_ADDATA2F 208 +#define AB8540_INT_ADDATA2R 209 +#define AB8540_INT_DADATA2F 210 +#define AB8540_INT_DADATA2R 211 +#define AB8540_INT_FSYNC2F 212 +#define AB8540_INT_FSYNC2R 213 +#define AB8540_INT_BITCLK2F 214 +#define AB8540_INT_BITCLK2R 215 +/* ab8540_irq_regoffset[27] -> IT[Source|Latch|Mask]33 */ +#define AB8540_INT_RTC_1S 216 + +/* + * AB8500_AB9540_NR_IRQS is used when configuring the IRQ numbers for the + * entire platform. This is a "compile time" constant so this must be set to + * the largest possible value that may be encountered with different AB SOCs. + * Of the currently supported AB devices, AB8500 and AB9540, it is the AB9540 + * which is larger. + */ +#define AB8500_NR_IRQS 112 +#define AB8505_NR_IRQS 153 +#define AB9540_NR_IRQS 153 +#define AB8540_NR_IRQS 216 +/* This is set to the roof of any AB8500 chip variant IRQ counts */ +#define AB8500_MAX_NR_IRQS AB8540_NR_IRQS + +#define AB8500_NUM_IRQ_REGS 14 +#define AB9540_NUM_IRQ_REGS 20 +#define AB8540_NUM_IRQ_REGS 27 + +/* Turn On Status Event */ +#define AB8500_POR_ON_VBAT 0x01 +#define AB8500_POW_KEY_1_ON 0x02 +#define AB8500_POW_KEY_2_ON 0x04 +#define AB8500_RTC_ALARM 0x08 +#define AB8500_MAIN_CH_DET 0x10 +#define AB8500_VBUS_DET 0x20 +#define AB8500_USB_ID_DET 0x40 + +/** + * struct ab8500 - ab8500 internal structure + * @dev: parent device + * @lock: read/write operations lock + * @irq_lock: genirq bus lock + * @transfer_ongoing: 0 if no transfer ongoing + * @irq: irq line + * @irq_domain: irq domain + * @version: chip version id (e.g. ab8500 or ab9540) + * @chip_id: chip revision id + * @write: register write + * @write_masked: masked register write + * @read: register read + * @rx_buf: rx buf for SPI + * @tx_buf: tx buf for SPI + * @mask: cache of IRQ regs for bus lock + * @oldmask: cache of previous IRQ regs for bus lock + * @mask_size: Actual number of valid entries in mask[], oldmask[] and + * irq_reg_offset + * @irq_reg_offset: Array of offsets into IRQ registers + */ +struct ab8500 { + struct device *dev; + struct mutex lock; + struct mutex irq_lock; + atomic_t transfer_ongoing; + int irq; + struct irq_domain *domain; + enum ab8500_version version; + u8 chip_id; + + int (*write)(struct ab8500 *ab8500, u16 addr, u8 data); + int (*write_masked)(struct ab8500 *ab8500, u16 addr, u8 mask, u8 data); + int (*read)(struct ab8500 *ab8500, u16 addr); + + unsigned long tx_buf[4]; + unsigned long rx_buf[4]; + + u8 *mask; + u8 *oldmask; + int mask_size; + const int *irq_reg_offset; + int it_latchhier_num; +}; + +struct ab8500_regulator_platform_data; +struct ab8500_codec_platform_data; +struct ab8500_sysctrl_platform_data; + +/** + * struct ab8500_platform_data - AB8500 platform data + * @irq_base: start of AB8500 IRQs, AB8500_NR_IRQS will be used + * @init: board-specific initialization after detection of ab8500 + * @regulator: machine-specific constraints for regulators + */ +struct ab8500_platform_data { + void (*init) (struct ab8500 *); + struct ab8500_regulator_platform_data *regulator; + struct ab8500_codec_platform_data *codec; + struct ab8500_sysctrl_platform_data *sysctrl; +}; + +extern int ab8500_init(struct ab8500 *ab8500, + enum ab8500_version version); +extern int ab8500_exit(struct ab8500 *ab8500); + +extern int ab8500_suspend(struct ab8500 *ab8500); + +static inline int is_ab8500(struct ab8500 *ab) +{ + return ab->version == AB8500_VERSION_AB8500; +} + +static inline int is_ab8505(struct ab8500 *ab) +{ + return ab->version == AB8500_VERSION_AB8505; +} + +static inline int is_ab9540(struct ab8500 *ab) +{ + return ab->version == AB8500_VERSION_AB9540; +} + +static inline int is_ab8540(struct ab8500 *ab) +{ + return ab->version == AB8500_VERSION_AB8540; +} + +/* exclude also ab8505, ab9540... */ +static inline int is_ab8500_1p0_or_earlier(struct ab8500 *ab) +{ + return (is_ab8500(ab) && (ab->chip_id <= AB8500_CUT1P0)); +} + +/* exclude also ab8505, ab9540... */ +static inline int is_ab8500_1p1_or_earlier(struct ab8500 *ab) +{ + return (is_ab8500(ab) && (ab->chip_id <= AB8500_CUT1P1)); +} + +/* exclude also ab8505, ab9540... */ +static inline int is_ab8500_2p0_or_earlier(struct ab8500 *ab) +{ + return (is_ab8500(ab) && (ab->chip_id <= AB8500_CUT2P0)); +} + +static inline int is_ab8500_3p3_or_earlier(struct ab8500 *ab) +{ + return (is_ab8500(ab) && (ab->chip_id <= AB8500_CUT3P3)); +} + +/* exclude also ab8505, ab9540... */ +static inline int is_ab8500_2p0(struct ab8500 *ab) +{ + return (is_ab8500(ab) && (ab->chip_id == AB8500_CUT2P0)); +} + +static inline int is_ab8505_1p0_or_earlier(struct ab8500 *ab) +{ + return (is_ab8505(ab) && (ab->chip_id <= AB8500_CUT1P0)); +} + +static inline int is_ab8505_2p0(struct ab8500 *ab) +{ + return (is_ab8505(ab) && (ab->chip_id == AB8500_CUT2P0)); +} + +static inline int is_ab9540_1p0_or_earlier(struct ab8500 *ab) +{ + return (is_ab9540(ab) && (ab->chip_id <= AB8500_CUT1P0)); +} + +static inline int is_ab9540_2p0(struct ab8500 *ab) +{ + return (is_ab9540(ab) && (ab->chip_id == AB8500_CUT2P0)); +} + +/* + * Be careful, the marketing name for this chip is 2.1 + * but the value read from the chip is 3.0 (0x30) + */ +static inline int is_ab9540_3p0(struct ab8500 *ab) +{ + return (is_ab9540(ab) && (ab->chip_id == AB8500_CUT3P0)); +} + +static inline int is_ab8540_1p0_or_earlier(struct ab8500 *ab) +{ + return is_ab8540(ab) && (ab->chip_id <= AB8500_CUT1P0); +} + +static inline int is_ab8540_1p1_or_earlier(struct ab8500 *ab) +{ + return is_ab8540(ab) && (ab->chip_id <= AB8500_CUT1P1); +} + +static inline int is_ab8540_1p2_or_earlier(struct ab8500 *ab) +{ + return is_ab8540(ab) && (ab->chip_id <= AB8500_CUT1P2); +} + +static inline int is_ab8540_2p0_or_earlier(struct ab8500 *ab) +{ + return is_ab8540(ab) && (ab->chip_id <= AB8500_CUT2P0); +} + +static inline int is_ab8540_2p0(struct ab8500 *ab) +{ + return is_ab8540(ab) && (ab->chip_id == AB8500_CUT2P0); +} + +static inline int is_ab8505_2p0_earlier(struct ab8500 *ab) +{ + return (is_ab8505(ab) && (ab->chip_id < AB8500_CUT2P0)); +} + +static inline int is_ab9540_2p0_or_earlier(struct ab8500 *ab) +{ + return (is_ab9540(ab) && (ab->chip_id < AB8500_CUT2P0)); +} + +void ab8500_override_turn_on_stat(u8 mask, u8 set); + +#ifdef CONFIG_AB8500_DEBUG +extern int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size); +void ab8500_dump_all_banks(struct device *dev); +void ab8500_debug_register_interrupt(int line); +#else +static inline void ab8500_dump_all_banks(struct device *dev) {} +static inline void ab8500_debug_register_interrupt(int line) {} +#endif + +#endif /* MFD_AB8500_H */ diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h new file mode 100644 index 000000000..12c38054f --- /dev/null +++ b/include/linux/mfd/abx500/ux500_chargalg.h @@ -0,0 +1,51 @@ +/* + * Copyright (C) ST-Ericsson SA 2012 + * Author: Johan Gardsmark for ST-Ericsson. + * License terms: GNU General Public License (GPL), version 2 + */ + +#ifndef _UX500_CHARGALG_H +#define _UX500_CHARGALG_H + +#include + +/* + * Valid only for supplies of type: + * - POWER_SUPPLY_TYPE_MAINS, + * - POWER_SUPPLY_TYPE_USB, + * because only them store as drv_data pointer to struct ux500_charger. + */ +#define psy_to_ux500_charger(x) power_supply_get_drvdata(x) + +/* Forward declaration */ +struct ux500_charger; + +struct ux500_charger_ops { + int (*enable) (struct ux500_charger *, int, int, int); + int (*check_enable) (struct ux500_charger *, int, int); + int (*kick_wd) (struct ux500_charger *); + int (*update_curr) (struct ux500_charger *, int); +}; + +/** + * struct ux500_charger - power supply ux500 charger sub class + * @psy power supply base class + * @ops ux500 charger operations + * @max_out_volt maximum output charger voltage in mV + * @max_out_curr maximum output charger current in mA + * @enabled indicates if this charger is used or not + * @external external charger unit (pm2xxx) + */ +struct ux500_charger { + struct power_supply *psy; + struct ux500_charger_ops ops; + int max_out_volt; + int max_out_curr; + int wdt_refresh; + bool enabled; + bool external; +}; + +extern struct blocking_notifier_head charger_notifier_list; + +#endif diff --git a/include/linux/mfd/ac100.h b/include/linux/mfd/ac100.h new file mode 100644 index 000000000..3c148f196 --- /dev/null +++ b/include/linux/mfd/ac100.h @@ -0,0 +1,178 @@ +/* + * Functions and registers to access AC100 codec / RTC combo IC. + * + * Copyright (C) 2016 Chen-Yu Tsai + * + * Chen-Yu Tsai + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_MFD_AC100_H +#define __LINUX_MFD_AC100_H + +#include + +struct ac100_dev { + struct device *dev; + struct regmap *regmap; +}; + +/* Audio codec related registers */ +#define AC100_CHIP_AUDIO_RST 0x00 +#define AC100_PLL_CTRL1 0x01 +#define AC100_PLL_CTRL2 0x02 +#define AC100_SYSCLK_CTRL 0x03 +#define AC100_MOD_CLK_ENA 0x04 +#define AC100_MOD_RST_CTRL 0x05 +#define AC100_I2S_SR_CTRL 0x06 + +/* I2S1 interface */ +#define AC100_I2S1_CLK_CTRL 0x10 +#define AC100_I2S1_SND_OUT_CTRL 0x11 +#define AC100_I2S1_SND_IN_CTRL 0x12 +#define AC100_I2S1_MXR_SRC 0x13 +#define AC100_I2S1_VOL_CTRL1 0x14 +#define AC100_I2S1_VOL_CTRL2 0x15 +#define AC100_I2S1_VOL_CTRL3 0x16 +#define AC100_I2S1_VOL_CTRL4 0x17 +#define AC100_I2S1_MXR_GAIN 0x18 + +/* I2S2 interface */ +#define AC100_I2S2_CLK_CTRL 0x20 +#define AC100_I2S2_SND_OUT_CTRL 0x21 +#define AC100_I2S2_SND_IN_CTRL 0x22 +#define AC100_I2S2_MXR_SRC 0x23 +#define AC100_I2S2_VOL_CTRL1 0x24 +#define AC100_I2S2_VOL_CTRL2 0x25 +#define AC100_I2S2_VOL_CTRL3 0x26 +#define AC100_I2S2_VOL_CTRL4 0x27 +#define AC100_I2S2_MXR_GAIN 0x28 + +/* I2S3 interface */ +#define AC100_I2S3_CLK_CTRL 0x30 +#define AC100_I2S3_SND_OUT_CTRL 0x31 +#define AC100_I2S3_SND_IN_CTRL 0x32 +#define AC100_I2S3_SIG_PATH_CTRL 0x33 + +/* ADC digital controls */ +#define AC100_ADC_DIG_CTRL 0x40 +#define AC100_ADC_VOL_CTRL 0x41 + +/* HMIC plug sensing / key detection */ +#define AC100_HMIC_CTRL1 0x44 +#define AC100_HMIC_CTRL2 0x45 +#define AC100_HMIC_STATUS 0x46 + +/* DAC digital controls */ +#define AC100_DAC_DIG_CTRL 0x48 +#define AC100_DAC_VOL_CTRL 0x49 +#define AC100_DAC_MXR_SRC 0x4c +#define AC100_DAC_MXR_GAIN 0x4d + +/* Analog controls */ +#define AC100_ADC_APC_CTRL 0x50 +#define AC100_ADC_SRC 0x51 +#define AC100_ADC_SRC_BST_CTRL 0x52 +#define AC100_OUT_MXR_DAC_A_CTRL 0x53 +#define AC100_OUT_MXR_SRC 0x54 +#define AC100_OUT_MXR_SRC_BST 0x55 +#define AC100_HPOUT_CTRL 0x56 +#define AC100_ERPOUT_CTRL 0x57 +#define AC100_SPKOUT_CTRL 0x58 +#define AC100_LINEOUT_CTRL 0x59 + +/* ADC digital audio processing (high pass filter & auto gain control */ +#define AC100_ADC_DAP_L_STA 0x80 +#define AC100_ADC_DAP_R_STA 0x81 +#define AC100_ADC_DAP_L_CTRL 0x82 +#define AC100_ADC_DAP_R_CTRL 0x83 +#define AC100_ADC_DAP_L_T_L 0x84 /* Left Target Level */ +#define AC100_ADC_DAP_R_T_L 0x85 /* Right Target Level */ +#define AC100_ADC_DAP_L_H_A_C 0x86 /* Left High Avg. Coef */ +#define AC100_ADC_DAP_L_L_A_C 0x87 /* Left Low Avg. Coef */ +#define AC100_ADC_DAP_R_H_A_C 0x88 /* Right High Avg. Coef */ +#define AC100_ADC_DAP_R_L_A_C 0x89 /* Right Low Avg. Coef */ +#define AC100_ADC_DAP_L_D_T 0x8a /* Left Decay Time */ +#define AC100_ADC_DAP_L_A_T 0x8b /* Left Attack Time */ +#define AC100_ADC_DAP_R_D_T 0x8c /* Right Decay Time */ +#define AC100_ADC_DAP_R_A_T 0x8d /* Right Attack Time */ +#define AC100_ADC_DAP_N_TH 0x8e /* Noise Threshold */ +#define AC100_ADC_DAP_L_H_N_A_C 0x8f /* Left High Noise Avg. Coef */ +#define AC100_ADC_DAP_L_L_N_A_C 0x90 /* Left Low Noise Avg. Coef */ +#define AC100_ADC_DAP_R_H_N_A_C 0x91 /* Right High Noise Avg. Coef */ +#define AC100_ADC_DAP_R_L_N_A_C 0x92 /* Right Low Noise Avg. Coef */ +#define AC100_ADC_DAP_H_HPF_C 0x93 /* High High-Pass-Filter Coef */ +#define AC100_ADC_DAP_L_HPF_C 0x94 /* Low High-Pass-Filter Coef */ +#define AC100_ADC_DAP_OPT 0x95 /* AGC Optimum */ + +/* DAC digital audio processing (high pass filter & dynamic range control) */ +#define AC100_DAC_DAP_CTRL 0xa0 +#define AC100_DAC_DAP_H_HPF_C 0xa1 /* High High-Pass-Filter Coef */ +#define AC100_DAC_DAP_L_HPF_C 0xa2 /* Low High-Pass-Filter Coef */ +#define AC100_DAC_DAP_L_H_E_A_C 0xa3 /* Left High Energy Avg Coef */ +#define AC100_DAC_DAP_L_L_E_A_C 0xa4 /* Left Low Energy Avg Coef */ +#define AC100_DAC_DAP_R_H_E_A_C 0xa5 /* Right High Energy Avg Coef */ +#define AC100_DAC_DAP_R_L_E_A_C 0xa6 /* Right Low Energy Avg Coef */ +#define AC100_DAC_DAP_H_G_D_T_C 0xa7 /* High Gain Delay Time Coef */ +#define AC100_DAC_DAP_L_G_D_T_C 0xa8 /* Low Gain Delay Time Coef */ +#define AC100_DAC_DAP_H_G_A_T_C 0xa9 /* High Gain Attack Time Coef */ +#define AC100_DAC_DAP_L_G_A_T_C 0xaa /* Low Gain Attack Time Coef */ +#define AC100_DAC_DAP_H_E_TH 0xab /* High Energy Threshold */ +#define AC100_DAC_DAP_L_E_TH 0xac /* Low Energy Threshold */ +#define AC100_DAC_DAP_H_G_K 0xad /* High Gain K parameter */ +#define AC100_DAC_DAP_L_G_K 0xae /* Low Gain K parameter */ +#define AC100_DAC_DAP_H_G_OFF 0xaf /* High Gain offset */ +#define AC100_DAC_DAP_L_G_OFF 0xb0 /* Low Gain offset */ +#define AC100_DAC_DAP_OPT 0xb1 /* DRC optimum */ + +/* Digital audio processing enable */ +#define AC100_ADC_DAP_ENA 0xb4 +#define AC100_DAC_DAP_ENA 0xb5 + +/* SRC control */ +#define AC100_SRC1_CTRL1 0xb8 +#define AC100_SRC1_CTRL2 0xb9 +#define AC100_SRC1_CTRL3 0xba +#define AC100_SRC1_CTRL4 0xbb +#define AC100_SRC2_CTRL1 0xbc +#define AC100_SRC2_CTRL2 0xbd +#define AC100_SRC2_CTRL3 0xbe +#define AC100_SRC2_CTRL4 0xbf + +/* RTC clk control */ +#define AC100_CLK32K_ANALOG_CTRL 0xc0 +#define AC100_CLKOUT_CTRL1 0xc1 +#define AC100_CLKOUT_CTRL2 0xc2 +#define AC100_CLKOUT_CTRL3 0xc3 + +/* RTC module */ +#define AC100_RTC_RST 0xc6 +#define AC100_RTC_CTRL 0xc7 +#define AC100_RTC_SEC 0xc8 /* second */ +#define AC100_RTC_MIN 0xc9 /* minute */ +#define AC100_RTC_HOU 0xca /* hour */ +#define AC100_RTC_WEE 0xcb /* weekday */ +#define AC100_RTC_DAY 0xcc /* day */ +#define AC100_RTC_MON 0xcd /* month */ +#define AC100_RTC_YEA 0xce /* year */ +#define AC100_RTC_UPD 0xcf /* update trigger */ + +/* RTC alarm */ +#define AC100_ALM_INT_ENA 0xd0 +#define AC100_ALM_INT_STA 0xd1 +#define AC100_ALM_SEC 0xd8 +#define AC100_ALM_MIN 0xd9 +#define AC100_ALM_HOU 0xda +#define AC100_ALM_WEE 0xdb +#define AC100_ALM_DAY 0xdc +#define AC100_ALM_MON 0xdd +#define AC100_ALM_YEA 0xde +#define AC100_ALM_UPD 0xdf + +/* RTC general purpose register 0 ~ 15 */ +#define AC100_RTC_GP(x) (0xe0 + (x)) + +#endif /* __LINUX_MFD_AC100_H */ diff --git a/include/linux/mfd/adp5520.h b/include/linux/mfd/adp5520.h new file mode 100644 index 000000000..ac37558a4 --- /dev/null +++ b/include/linux/mfd/adp5520.h @@ -0,0 +1,299 @@ +/* + * Definitions and platform data for Analog Devices + * ADP5520/ADP5501 MFD PMICs (Backlight, LED, GPIO and Keys) + * + * Copyright 2009 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + + +#ifndef __LINUX_MFD_ADP5520_H +#define __LINUX_MFD_ADP5520_H + +#define ID_ADP5520 5520 +#define ID_ADP5501 5501 + +/* + * ADP5520/ADP5501 Register Map + */ + +#define ADP5520_MODE_STATUS 0x00 +#define ADP5520_INTERRUPT_ENABLE 0x01 +#define ADP5520_BL_CONTROL 0x02 +#define ADP5520_BL_TIME 0x03 +#define ADP5520_BL_FADE 0x04 +#define ADP5520_DAYLIGHT_MAX 0x05 +#define ADP5520_DAYLIGHT_DIM 0x06 +#define ADP5520_OFFICE_MAX 0x07 +#define ADP5520_OFFICE_DIM 0x08 +#define ADP5520_DARK_MAX 0x09 +#define ADP5520_DARK_DIM 0x0A +#define ADP5520_BL_VALUE 0x0B +#define ADP5520_ALS_CMPR_CFG 0x0C +#define ADP5520_L2_TRIP 0x0D +#define ADP5520_L2_HYS 0x0E +#define ADP5520_L3_TRIP 0x0F +#define ADP5520_L3_HYS 0x10 +#define ADP5520_LED_CONTROL 0x11 +#define ADP5520_LED_TIME 0x12 +#define ADP5520_LED_FADE 0x13 +#define ADP5520_LED1_CURRENT 0x14 +#define ADP5520_LED2_CURRENT 0x15 +#define ADP5520_LED3_CURRENT 0x16 + +/* + * ADP5520 Register Map + */ + +#define ADP5520_GPIO_CFG_1 0x17 +#define ADP5520_GPIO_CFG_2 0x18 +#define ADP5520_GPIO_IN 0x19 +#define ADP5520_GPIO_OUT 0x1A +#define ADP5520_GPIO_INT_EN 0x1B +#define ADP5520_GPIO_INT_STAT 0x1C +#define ADP5520_GPIO_INT_LVL 0x1D +#define ADP5520_GPIO_DEBOUNCE 0x1E +#define ADP5520_GPIO_PULLUP 0x1F +#define ADP5520_KP_INT_STAT_1 0x20 +#define ADP5520_KP_INT_STAT_2 0x21 +#define ADP5520_KR_INT_STAT_1 0x22 +#define ADP5520_KR_INT_STAT_2 0x23 +#define ADP5520_KEY_STAT_1 0x24 +#define ADP5520_KEY_STAT_2 0x25 + +/* + * MODE_STATUS bits + */ + +#define ADP5520_nSTNBY (1 << 7) +#define ADP5520_BL_EN (1 << 6) +#define ADP5520_DIM_EN (1 << 5) +#define ADP5520_OVP_INT (1 << 4) +#define ADP5520_CMPR_INT (1 << 3) +#define ADP5520_GPI_INT (1 << 2) +#define ADP5520_KR_INT (1 << 1) +#define ADP5520_KP_INT (1 << 0) + +/* + * INTERRUPT_ENABLE bits + */ + +#define ADP5520_AUTO_LD_EN (1 << 4) +#define ADP5520_CMPR_IEN (1 << 3) +#define ADP5520_OVP_IEN (1 << 2) +#define ADP5520_KR_IEN (1 << 1) +#define ADP5520_KP_IEN (1 << 0) + +/* + * BL_CONTROL bits + */ + +#define ADP5520_BL_LVL ((x) << 5) +#define ADP5520_BL_LAW ((x) << 4) +#define ADP5520_BL_AUTO_ADJ (1 << 3) +#define ADP5520_OVP_EN (1 << 2) +#define ADP5520_FOVR (1 << 1) +#define ADP5520_KP_BL_EN (1 << 0) + +/* + * ALS_CMPR_CFG bits + */ + +#define ADP5520_L3_OUT (1 << 3) +#define ADP5520_L2_OUT (1 << 2) +#define ADP5520_L3_EN (1 << 1) + +#define ADP5020_MAX_BRIGHTNESS 0x7F + +#define FADE_VAL(in, out) ((0xF & (in)) | ((0xF & (out)) << 4)) +#define BL_CTRL_VAL(law, auto) (((1 & (auto)) << 3) | ((0x3 & (law)) << 4)) +#define ALS_CMPR_CFG_VAL(filt, l3_en) (((0x7 & filt) << 5) | l3_en) + +/* + * LEDs subdevice bits and masks + */ + +#define ADP5520_01_MAXLEDS 3 + +#define ADP5520_FLAG_LED_MASK 0x3 +#define ADP5520_FLAG_OFFT_SHIFT 8 +#define ADP5520_FLAG_OFFT_MASK 0x3 + +#define ADP5520_R3_MODE (1 << 5) +#define ADP5520_C3_MODE (1 << 4) +#define ADP5520_LED_LAW (1 << 3) +#define ADP5520_LED3_EN (1 << 2) +#define ADP5520_LED2_EN (1 << 1) +#define ADP5520_LED1_EN (1 << 0) + +/* + * GPIO subdevice bits and masks + */ + +#define ADP5520_MAXGPIOS 8 + +#define ADP5520_GPIO_C3 (1 << 7) /* LED2 or GPIO7 aka C3 */ +#define ADP5520_GPIO_C2 (1 << 6) +#define ADP5520_GPIO_C1 (1 << 5) +#define ADP5520_GPIO_C0 (1 << 4) +#define ADP5520_GPIO_R3 (1 << 3) /* LED3 or GPIO3 aka R3 */ +#define ADP5520_GPIO_R2 (1 << 2) +#define ADP5520_GPIO_R1 (1 << 1) +#define ADP5520_GPIO_R0 (1 << 0) + +struct adp5520_gpio_platform_data { + unsigned gpio_start; + u8 gpio_en_mask; + u8 gpio_pullup_mask; +}; + +/* + * Keypad subdevice bits and masks + */ + +#define ADP5520_MAXKEYS 16 + +#define ADP5520_COL_C3 (1 << 7) /* LED2 or GPIO7 aka C3 */ +#define ADP5520_COL_C2 (1 << 6) +#define ADP5520_COL_C1 (1 << 5) +#define ADP5520_COL_C0 (1 << 4) +#define ADP5520_ROW_R3 (1 << 3) /* LED3 or GPIO3 aka R3 */ +#define ADP5520_ROW_R2 (1 << 2) +#define ADP5520_ROW_R1 (1 << 1) +#define ADP5520_ROW_R0 (1 << 0) + +#define ADP5520_KEY(row, col) (col + row * 4) +#define ADP5520_KEYMAPSIZE ADP5520_MAXKEYS + +struct adp5520_keys_platform_data { + int rows_en_mask; /* Number of rows */ + int cols_en_mask; /* Number of columns */ + const unsigned short *keymap; /* Pointer to keymap */ + unsigned short keymapsize; /* Keymap size */ + unsigned repeat:1; /* Enable key repeat */ +}; + + +/* + * LEDs subdevice platform data + */ + +#define FLAG_ID_ADP5520_LED1_ADP5501_LED0 1 /* ADP5520 PIN ILED */ +#define FLAG_ID_ADP5520_LED2_ADP5501_LED1 2 /* ADP5520 PIN C3 */ +#define FLAG_ID_ADP5520_LED3_ADP5501_LED2 3 /* ADP5520 PIN R3 */ + +#define ADP5520_LED_DIS_BLINK (0 << ADP5520_FLAG_OFFT_SHIFT) +#define ADP5520_LED_OFFT_600ms (1 << ADP5520_FLAG_OFFT_SHIFT) +#define ADP5520_LED_OFFT_800ms (2 << ADP5520_FLAG_OFFT_SHIFT) +#define ADP5520_LED_OFFT_1200ms (3 << ADP5520_FLAG_OFFT_SHIFT) + +#define ADP5520_LED_ONT_200ms 0 +#define ADP5520_LED_ONT_600ms 1 +#define ADP5520_LED_ONT_800ms 2 +#define ADP5520_LED_ONT_1200ms 3 + +struct adp5520_leds_platform_data { + int num_leds; + struct led_info *leds; + u8 fade_in; /* Backlight Fade-In Timer */ + u8 fade_out; /* Backlight Fade-Out Timer */ + u8 led_on_time; +}; + +/* + * Backlight subdevice platform data + */ + +#define ADP5520_FADE_T_DIS 0 /* Fade Timer Disabled */ +#define ADP5520_FADE_T_300ms 1 /* 0.3 Sec */ +#define ADP5520_FADE_T_600ms 2 +#define ADP5520_FADE_T_900ms 3 +#define ADP5520_FADE_T_1200ms 4 +#define ADP5520_FADE_T_1500ms 5 +#define ADP5520_FADE_T_1800ms 6 +#define ADP5520_FADE_T_2100ms 7 +#define ADP5520_FADE_T_2400ms 8 +#define ADP5520_FADE_T_2700ms 9 +#define ADP5520_FADE_T_3000ms 10 +#define ADP5520_FADE_T_3500ms 11 +#define ADP5520_FADE_T_4000ms 12 +#define ADP5520_FADE_T_4500ms 13 +#define ADP5520_FADE_T_5000ms 14 +#define ADP5520_FADE_T_5500ms 15 /* 5.5 Sec */ + +#define ADP5520_BL_LAW_LINEAR 0 +#define ADP5520_BL_LAW_SQUARE 1 +#define ADP5520_BL_LAW_CUBIC1 2 +#define ADP5520_BL_LAW_CUBIC2 3 + +#define ADP5520_BL_AMBL_FILT_80ms 0 /* Light sensor filter time */ +#define ADP5520_BL_AMBL_FILT_160ms 1 +#define ADP5520_BL_AMBL_FILT_320ms 2 +#define ADP5520_BL_AMBL_FILT_640ms 3 +#define ADP5520_BL_AMBL_FILT_1280ms 4 +#define ADP5520_BL_AMBL_FILT_2560ms 5 +#define ADP5520_BL_AMBL_FILT_5120ms 6 +#define ADP5520_BL_AMBL_FILT_10240ms 7 /* 10.24 sec */ + + /* + * Blacklight current 0..30mA + */ +#define ADP5520_BL_CUR_mA(I) ((I * 127) / 30) + + /* + * L2 comparator current 0..1000uA + */ +#define ADP5520_L2_COMP_CURR_uA(I) ((I * 255) / 1000) + + /* + * L3 comparator current 0..127uA + */ +#define ADP5520_L3_COMP_CURR_uA(I) ((I * 255) / 127) + +struct adp5520_backlight_platform_data { + u8 fade_in; /* Backlight Fade-In Timer */ + u8 fade_out; /* Backlight Fade-Out Timer */ + u8 fade_led_law; /* fade-on/fade-off transfer characteristic */ + + u8 en_ambl_sens; /* 1 = enable ambient light sensor */ + u8 abml_filt; /* Light sensor filter time */ + u8 l1_daylight_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l1_daylight_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l2_office_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l2_office_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l3_dark_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l3_dark_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l2_trip; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1000 uA */ + u8 l2_hyst; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1000 uA */ + u8 l3_trip; /* use L3_COMP_CURR_uA(I) 0 <= I <= 127 uA */ + u8 l3_hyst; /* use L3_COMP_CURR_uA(I) 0 <= I <= 127 uA */ +}; + +/* + * MFD chip platform data + */ + +struct adp5520_platform_data { + struct adp5520_keys_platform_data *keys; + struct adp5520_gpio_platform_data *gpio; + struct adp5520_leds_platform_data *leds; + struct adp5520_backlight_platform_data *backlight; +}; + +/* + * MFD chip functions + */ + +extern int adp5520_read(struct device *dev, int reg, uint8_t *val); +extern int adp5520_write(struct device *dev, int reg, u8 val); +extern int adp5520_clr_bits(struct device *dev, int reg, uint8_t bit_mask); +extern int adp5520_set_bits(struct device *dev, int reg, uint8_t bit_mask); + +extern int adp5520_register_notifier(struct device *dev, + struct notifier_block *nb, unsigned int events); + +extern int adp5520_unregister_notifier(struct device *dev, + struct notifier_block *nb, unsigned int events); + +#endif /* __LINUX_MFD_ADP5520_H */ diff --git a/include/linux/mfd/altera-a10sr.h b/include/linux/mfd/altera-a10sr.h new file mode 100644 index 000000000..45a5e6e7d --- /dev/null +++ b/include/linux/mfd/altera-a10sr.h @@ -0,0 +1,85 @@ +/* + * Copyright Intel Corporation (C) 2014-2016. All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + * Declarations for Altera Arria10 MAX5 System Resource Chip + * + * Adapted from DA9052 + */ + +#ifndef __MFD_ALTERA_A10SR_H +#define __MFD_ALTERA_A10SR_H + +#include +#include +#include +#include +#include + +/* Write registers are always on even addresses */ +#define WRITE_REG_MASK 0xFE +/* Odd registers are always on odd addresses */ +#define READ_REG_MASK 0x01 + +#define ALTR_A10SR_BITS_PER_REGISTER 8 +/* + * To find the correct register, we divide the input GPIO by + * the number of GPIO in each register. We then need to multiply + * by 2 because the reads are at odd addresses. + */ +#define ALTR_A10SR_REG_OFFSET(X) (((X) / ALTR_A10SR_BITS_PER_REGISTER) << 1) +#define ALTR_A10SR_REG_BIT(X) ((X) % ALTR_A10SR_BITS_PER_REGISTER) +#define ALTR_A10SR_REG_BIT_CHG(X, Y) ((X) << ALTR_A10SR_REG_BIT(Y)) +#define ALTR_A10SR_REG_BIT_MASK(X) (1 << ALTR_A10SR_REG_BIT(X)) + +/* Arria10 System Controller Register Defines */ +#define ALTR_A10SR_NOP 0x00 /* No Change */ +#define ALTR_A10SR_VERSION_READ 0x00 /* MAX5 Version Read */ + +#define ALTR_A10SR_LED_REG 0x02 /* LED - Upper 4 bits */ +/* LED register Bit Definitions */ +#define ALTR_A10SR_LED_VALID_SHIFT 4 /* LED - Upper 4 bits valid */ +#define ALTR_A10SR_OUT_VALID_RANGE_LO ALTR_A10SR_LED_VALID_SHIFT +#define ALTR_A10SR_OUT_VALID_RANGE_HI 7 + +#define ALTR_A10SR_PBDSW_REG 0x04 /* PB & DIP SW - Input only */ +#define ALTR_A10SR_PBDSW_IRQ_REG 0x06 /* PB & DIP SW Flag Clear */ +/* Pushbutton & DIP Switch Bit Definitions */ +#define ALTR_A10SR_IN_VALID_RANGE_LO 8 +#define ALTR_A10SR_IN_VALID_RANGE_HI 15 + +#define ALTR_A10SR_PWR_GOOD1_REG 0x08 /* Power Good1 Read */ +#define ALTR_A10SR_PWR_GOOD2_REG 0x0A /* Power Good2 Read */ +#define ALTR_A10SR_PWR_GOOD3_REG 0x0C /* Power Good3 Read */ +#define ALTR_A10SR_FMCAB_REG 0x0E /* FMCA/B & PCIe Pwr Enable */ +#define ALTR_A10SR_HPS_RST_REG 0x10 /* HPS Reset */ +#define ALTR_A10SR_USB_QSPI_REG 0x12 /* USB, BQSPI, FILE Reset */ +#define ALTR_A10SR_SFPA_REG 0x14 /* SFPA Control Reg */ +#define ALTR_A10SR_SFPB_REG 0x16 /* SFPB Control Reg */ +#define ALTR_A10SR_I2C_M_REG 0x18 /* I2C Master Select */ +#define ALTR_A10SR_WARM_RST_REG 0x1A /* HPS Warm Reset */ +#define ALTR_A10SR_WR_KEY_REG 0x1C /* HPS Warm Reset Key */ +#define ALTR_A10SR_PMBUS_REG 0x1E /* HPS PM Bus */ + +/** + * struct altr_a10sr - Altera Max5 MFD device private data structure + * @dev: : this device + * @regmap: the regmap assigned to the parent device. + */ +struct altr_a10sr { + struct device *dev; + struct regmap *regmap; +}; + +#endif /* __MFD_ALTERA_A10SR_H */ diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h new file mode 100644 index 000000000..b31b3be7f --- /dev/null +++ b/include/linux/mfd/arizona/core.h @@ -0,0 +1,194 @@ +/* + * Arizona MFD internals + * + * Copyright 2012 Wolfson Microelectronics plc + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _WM_ARIZONA_CORE_H +#define _WM_ARIZONA_CORE_H + +#include +#include +#include +#include +#include +#include + +#define ARIZONA_MAX_CORE_SUPPLIES 2 + +enum { + ARIZONA_MCLK1, + ARIZONA_MCLK2, + ARIZONA_NUM_MCLK +}; + +enum arizona_type { + WM5102 = 1, + WM5110 = 2, + WM8997 = 3, + WM8280 = 4, + WM8998 = 5, + WM1814 = 6, + WM1831 = 7, + CS47L24 = 8, +}; + +#define ARIZONA_IRQ_GP1 0 +#define ARIZONA_IRQ_GP2 1 +#define ARIZONA_IRQ_GP3 2 +#define ARIZONA_IRQ_GP4 3 +#define ARIZONA_IRQ_GP5_FALL 4 +#define ARIZONA_IRQ_GP5_RISE 5 +#define ARIZONA_IRQ_JD_FALL 6 +#define ARIZONA_IRQ_JD_RISE 7 +#define ARIZONA_IRQ_DSP1_RAM_RDY 8 +#define ARIZONA_IRQ_DSP2_RAM_RDY 9 +#define ARIZONA_IRQ_DSP3_RAM_RDY 10 +#define ARIZONA_IRQ_DSP4_RAM_RDY 11 +#define ARIZONA_IRQ_DSP_IRQ1 12 +#define ARIZONA_IRQ_DSP_IRQ2 13 +#define ARIZONA_IRQ_DSP_IRQ3 14 +#define ARIZONA_IRQ_DSP_IRQ4 15 +#define ARIZONA_IRQ_DSP_IRQ5 16 +#define ARIZONA_IRQ_DSP_IRQ6 17 +#define ARIZONA_IRQ_DSP_IRQ7 18 +#define ARIZONA_IRQ_DSP_IRQ8 19 +#define ARIZONA_IRQ_SPK_OVERHEAT_WARN 20 +#define ARIZONA_IRQ_SPK_OVERHEAT 21 +#define ARIZONA_IRQ_MICDET 22 +#define ARIZONA_IRQ_HPDET 23 +#define ARIZONA_IRQ_WSEQ_DONE 24 +#define ARIZONA_IRQ_DRC2_SIG_DET 25 +#define ARIZONA_IRQ_DRC1_SIG_DET 26 +#define ARIZONA_IRQ_ASRC2_LOCK 27 +#define ARIZONA_IRQ_ASRC1_LOCK 28 +#define ARIZONA_IRQ_UNDERCLOCKED 29 +#define ARIZONA_IRQ_OVERCLOCKED 30 +#define ARIZONA_IRQ_FLL2_LOCK 31 +#define ARIZONA_IRQ_FLL1_LOCK 32 +#define ARIZONA_IRQ_CLKGEN_ERR 33 +#define ARIZONA_IRQ_CLKGEN_ERR_ASYNC 34 +#define ARIZONA_IRQ_ASRC_CFG_ERR 35 +#define ARIZONA_IRQ_AIF3_ERR 36 +#define ARIZONA_IRQ_AIF2_ERR 37 +#define ARIZONA_IRQ_AIF1_ERR 38 +#define ARIZONA_IRQ_CTRLIF_ERR 39 +#define ARIZONA_IRQ_MIXER_DROPPED_SAMPLES 40 +#define ARIZONA_IRQ_ASYNC_CLK_ENA_LOW 41 +#define ARIZONA_IRQ_SYSCLK_ENA_LOW 42 +#define ARIZONA_IRQ_ISRC1_CFG_ERR 43 +#define ARIZONA_IRQ_ISRC2_CFG_ERR 44 +#define ARIZONA_IRQ_BOOT_DONE 45 +#define ARIZONA_IRQ_DCS_DAC_DONE 46 +#define ARIZONA_IRQ_DCS_HP_DONE 47 +#define ARIZONA_IRQ_FLL2_CLOCK_OK 48 +#define ARIZONA_IRQ_FLL1_CLOCK_OK 49 +#define ARIZONA_IRQ_MICD_CLAMP_RISE 50 +#define ARIZONA_IRQ_MICD_CLAMP_FALL 51 +#define ARIZONA_IRQ_HP3R_DONE 52 +#define ARIZONA_IRQ_HP3L_DONE 53 +#define ARIZONA_IRQ_HP2R_DONE 54 +#define ARIZONA_IRQ_HP2L_DONE 55 +#define ARIZONA_IRQ_HP1R_DONE 56 +#define ARIZONA_IRQ_HP1L_DONE 57 +#define ARIZONA_IRQ_ISRC3_CFG_ERR 58 +#define ARIZONA_IRQ_DSP_SHARED_WR_COLL 59 +#define ARIZONA_IRQ_SPK_SHUTDOWN 60 +#define ARIZONA_IRQ_SPK1R_SHORT 61 +#define ARIZONA_IRQ_SPK1L_SHORT 62 +#define ARIZONA_IRQ_HP3R_SC_NEG 63 +#define ARIZONA_IRQ_HP3R_SC_POS 64 +#define ARIZONA_IRQ_HP3L_SC_NEG 65 +#define ARIZONA_IRQ_HP3L_SC_POS 66 +#define ARIZONA_IRQ_HP2R_SC_NEG 67 +#define ARIZONA_IRQ_HP2R_SC_POS 68 +#define ARIZONA_IRQ_HP2L_SC_NEG 69 +#define ARIZONA_IRQ_HP2L_SC_POS 70 +#define ARIZONA_IRQ_HP1R_SC_NEG 71 +#define ARIZONA_IRQ_HP1R_SC_POS 72 +#define ARIZONA_IRQ_HP1L_SC_NEG 73 +#define ARIZONA_IRQ_HP1L_SC_POS 74 + +#define ARIZONA_NUM_IRQ 75 + +struct snd_soc_dapm_context; + +struct arizona { + struct regmap *regmap; + struct device *dev; + + enum arizona_type type; + unsigned int rev; + + int num_core_supplies; + struct regulator_bulk_data core_supplies[ARIZONA_MAX_CORE_SUPPLIES]; + struct regulator *dcvdd; + bool has_fully_powered_off; + + struct arizona_pdata pdata; + + unsigned int external_dcvdd:1; + + int irq; + struct irq_domain *virq; + struct regmap_irq_chip_data *aod_irq_chip; + struct regmap_irq_chip_data *irq_chip; + + bool hpdet_clamp; + unsigned int hp_ena; + + struct mutex clk_lock; + int clk32k_ref; + + struct clk *mclk[ARIZONA_NUM_MCLK]; + + bool ctrlif_error; + + struct snd_soc_dapm_context *dapm; + + int tdm_width[ARIZONA_MAX_AIF]; + int tdm_slots[ARIZONA_MAX_AIF]; + + uint16_t dac_comp_coeff; + uint8_t dac_comp_enabled; + struct mutex dac_comp_lock; + + struct blocking_notifier_head notifier; +}; + +static inline int arizona_call_notifiers(struct arizona *arizona, + unsigned long event, + void *data) +{ + return blocking_notifier_call_chain(&arizona->notifier, event, data); +} + +int arizona_clk32k_enable(struct arizona *arizona); +int arizona_clk32k_disable(struct arizona *arizona); + +int arizona_request_irq(struct arizona *arizona, int irq, char *name, + irq_handler_t handler, void *data); +void arizona_free_irq(struct arizona *arizona, int irq, void *data); +int arizona_set_irq_wake(struct arizona *arizona, int irq, int on); + +#ifdef CONFIG_MFD_WM5102 +int wm5102_patch(struct arizona *arizona); +#else +static inline int wm5102_patch(struct arizona *arizona) +{ + return 0; +} +#endif + +int wm5110_patch(struct arizona *arizona); +int cs47l24_patch(struct arizona *arizona); +int wm8997_patch(struct arizona *arizona); +int wm8998_patch(struct arizona *arizona); + +#endif diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h new file mode 100644 index 000000000..0013075d4 --- /dev/null +++ b/include/linux/mfd/arizona/pdata.h @@ -0,0 +1,197 @@ +/* + * Platform data for Arizona devices + * + * Copyright 2012 Wolfson Microelectronics. PLC. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _ARIZONA_PDATA_H +#define _ARIZONA_PDATA_H + +#include +#include +#include + +#define ARIZONA_GPN_DIR_MASK 0x8000 /* GPN_DIR */ +#define ARIZONA_GPN_DIR_SHIFT 15 /* GPN_DIR */ +#define ARIZONA_GPN_DIR_WIDTH 1 /* GPN_DIR */ +#define ARIZONA_GPN_PU_MASK 0x4000 /* GPN_PU */ +#define ARIZONA_GPN_PU_SHIFT 14 /* GPN_PU */ +#define ARIZONA_GPN_PU_WIDTH 1 /* GPN_PU */ +#define ARIZONA_GPN_PD_MASK 0x2000 /* GPN_PD */ +#define ARIZONA_GPN_PD_SHIFT 13 /* GPN_PD */ +#define ARIZONA_GPN_PD_WIDTH 1 /* GPN_PD */ +#define ARIZONA_GPN_LVL_MASK 0x0800 /* GPN_LVL */ +#define ARIZONA_GPN_LVL_SHIFT 11 /* GPN_LVL */ +#define ARIZONA_GPN_LVL_WIDTH 1 /* GPN_LVL */ +#define ARIZONA_GPN_POL_MASK 0x0400 /* GPN_POL */ +#define ARIZONA_GPN_POL_SHIFT 10 /* GPN_POL */ +#define ARIZONA_GPN_POL_WIDTH 1 /* GPN_POL */ +#define ARIZONA_GPN_OP_CFG_MASK 0x0200 /* GPN_OP_CFG */ +#define ARIZONA_GPN_OP_CFG_SHIFT 9 /* GPN_OP_CFG */ +#define ARIZONA_GPN_OP_CFG_WIDTH 1 /* GPN_OP_CFG */ +#define ARIZONA_GPN_DB_MASK 0x0100 /* GPN_DB */ +#define ARIZONA_GPN_DB_SHIFT 8 /* GPN_DB */ +#define ARIZONA_GPN_DB_WIDTH 1 /* GPN_DB */ +#define ARIZONA_GPN_FN_MASK 0x007F /* GPN_FN - [6:0] */ +#define ARIZONA_GPN_FN_SHIFT 0 /* GPN_FN - [6:0] */ +#define ARIZONA_GPN_FN_WIDTH 7 /* GPN_FN - [6:0] */ + +#define ARIZONA_MAX_GPIO 5 + +#define ARIZONA_MAX_INPUT 4 + +#define ARIZONA_MAX_MICBIAS 3 + +#define ARIZONA_MAX_OUTPUT 6 + +#define ARIZONA_MAX_AIF 3 + +#define ARIZONA_HAP_ACT_ERM 0 +#define ARIZONA_HAP_ACT_LRA 2 + +#define ARIZONA_MAX_PDM_SPK 2 + +struct regulator_init_data; +struct gpio_desc; + +struct arizona_micbias { + int mV; /** Regulated voltage */ + unsigned int ext_cap:1; /** External capacitor fitted */ + unsigned int discharge:1; /** Actively discharge */ + unsigned int soft_start:1; /** Disable aggressive startup ramp rate */ + unsigned int bypass:1; /** Use bypass mode */ +}; + +struct arizona_micd_config { + unsigned int src; + unsigned int bias; + bool gpio; +}; + +struct arizona_micd_range { + int max; /** Ohms */ + int key; /** Key to report to input layer */ +}; + +struct arizona_pdata { + struct gpio_desc *reset; /** GPIO controlling /RESET, if any */ + + /** Regulator configuration for MICVDD */ + struct arizona_micsupp_pdata micvdd; + + /** Regulator configuration for LDO1 */ + struct arizona_ldo1_pdata ldo1; + + /** If a direct 32kHz clock is provided on an MCLK specify it here */ + int clk32k_src; + + /** Mode for primary IRQ (defaults to active low) */ + unsigned int irq_flags; + + /* Base GPIO */ + int gpio_base; + + /** Pin state for GPIO pins */ + unsigned int gpio_defaults[ARIZONA_MAX_GPIO]; + + /** + * Maximum number of channels clocks will be generated for, + * useful for systems where and I2S bus with multiple data + * lines is mastered. + */ + unsigned int max_channels_clocked[ARIZONA_MAX_AIF]; + + /** GPIO5 is used for jack detection */ + bool jd_gpio5; + + /** Internal pull on GPIO5 is disabled when used for jack detection */ + bool jd_gpio5_nopull; + + /** set to true if jackdet contact opens on insert */ + bool jd_invert; + + /** Use the headphone detect circuit to identify the accessory */ + bool hpdet_acc_id; + + /** Check for line output with HPDET method */ + bool hpdet_acc_id_line; + + /** GPIO used for mic isolation with HPDET */ + int hpdet_id_gpio; + + /** Channel to use for headphone detection */ + unsigned int hpdet_channel; + + /** Use software comparison to determine mic presence */ + bool micd_software_compare; + + /** Extra debounce timeout used during initial mic detection (ms) */ + unsigned int micd_detect_debounce; + + /** GPIO for mic detection polarity */ + int micd_pol_gpio; + + /** Mic detect ramp rate */ + unsigned int micd_bias_start_time; + + /** Mic detect sample rate */ + unsigned int micd_rate; + + /** Mic detect debounce level */ + unsigned int micd_dbtime; + + /** Mic detect timeout (ms) */ + unsigned int micd_timeout; + + /** Force MICBIAS on for mic detect */ + bool micd_force_micbias; + + /** Mic detect level parameters */ + const struct arizona_micd_range *micd_ranges; + int num_micd_ranges; + + /** Headset polarity configurations */ + struct arizona_micd_config *micd_configs; + int num_micd_configs; + + /** Reference voltage for DMIC inputs */ + int dmic_ref[ARIZONA_MAX_INPUT]; + + /** MICBIAS configurations */ + struct arizona_micbias micbias[ARIZONA_MAX_MICBIAS]; + + /** + * Mode of input structures + * One of the ARIZONA_INMODE_xxx values + * wm5102/wm5110/wm8280/wm8997: [0]=IN1 [1]=IN2 [2]=IN3 [3]=IN4 + * wm8998: [0]=IN1A [1]=IN2A [2]=IN1B [3]=IN2B + */ + int inmode[ARIZONA_MAX_INPUT]; + + /** Mode for outputs */ + int out_mono[ARIZONA_MAX_OUTPUT]; + + /** Limit output volumes */ + unsigned int out_vol_limit[2 * ARIZONA_MAX_OUTPUT]; + + /** PDM speaker mute setting */ + unsigned int spk_mute[ARIZONA_MAX_PDM_SPK]; + + /** PDM speaker format */ + unsigned int spk_fmt[ARIZONA_MAX_PDM_SPK]; + + /** Haptic actuator type */ + unsigned int hap_act; + + /** GPIO for primary IRQ (used for edge triggered emulation) */ + int irq_gpio; + + /** General purpose switch control */ + unsigned int gpsw; +}; + +#endif diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h new file mode 100644 index 000000000..0d06c5d0a --- /dev/null +++ b/include/linux/mfd/arizona/registers.h @@ -0,0 +1,8170 @@ +/* + * ARIZONA register definitions + * + * Copyright 2012 Wolfson Microelectronics plc + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _ARIZONA_REGISTERS_H +#define _ARIZONA_REGISTERS_H + +/* + * Register values. + */ +#define ARIZONA_SOFTWARE_RESET 0x00 +#define ARIZONA_DEVICE_REVISION 0x01 +#define ARIZONA_CTRL_IF_SPI_CFG_1 0x08 +#define ARIZONA_CTRL_IF_I2C1_CFG_1 0x09 +#define ARIZONA_CTRL_IF_I2C2_CFG_1 0x0A +#define ARIZONA_CTRL_IF_I2C1_CFG_2 0x0B +#define ARIZONA_CTRL_IF_I2C2_CFG_2 0x0C +#define ARIZONA_CTRL_IF_STATUS_1 0x0D +#define ARIZONA_WRITE_SEQUENCER_CTRL_0 0x16 +#define ARIZONA_WRITE_SEQUENCER_CTRL_1 0x17 +#define ARIZONA_WRITE_SEQUENCER_CTRL_2 0x18 +#define ARIZONA_WRITE_SEQUENCER_CTRL_3 0x19 +#define ARIZONA_WRITE_SEQUENCER_PROM 0x1A +#define ARIZONA_TONE_GENERATOR_1 0x20 +#define ARIZONA_TONE_GENERATOR_2 0x21 +#define ARIZONA_TONE_GENERATOR_3 0x22 +#define ARIZONA_TONE_GENERATOR_4 0x23 +#define ARIZONA_TONE_GENERATOR_5 0x24 +#define ARIZONA_PWM_DRIVE_1 0x30 +#define ARIZONA_PWM_DRIVE_2 0x31 +#define ARIZONA_PWM_DRIVE_3 0x32 +#define ARIZONA_WAKE_CONTROL 0x40 +#define ARIZONA_SEQUENCE_CONTROL 0x41 +#define ARIZONA_SPARE_TRIGGERS 0x42 +#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_1 0x61 +#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_2 0x62 +#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_3 0x63 +#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_4 0x64 +#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_1 0x66 +#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_2 0x67 +#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_3 0x68 +#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_4 0x69 +#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_5 0x6A +#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_6 0x6B +#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_7 0x6C +#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_8 0x6D +#define ARIZONA_COMFORT_NOISE_GENERATOR 0x70 +#define ARIZONA_HAPTICS_CONTROL_1 0x90 +#define ARIZONA_HAPTICS_CONTROL_2 0x91 +#define ARIZONA_HAPTICS_PHASE_1_INTENSITY 0x92 +#define ARIZONA_HAPTICS_PHASE_1_DURATION 0x93 +#define ARIZONA_HAPTICS_PHASE_2_INTENSITY 0x94 +#define ARIZONA_HAPTICS_PHASE_2_DURATION 0x95 +#define ARIZONA_HAPTICS_PHASE_3_INTENSITY 0x96 +#define ARIZONA_HAPTICS_PHASE_3_DURATION 0x97 +#define ARIZONA_HAPTICS_STATUS 0x98 +#define ARIZONA_CLOCK_32K_1 0x100 +#define ARIZONA_SYSTEM_CLOCK_1 0x101 +#define ARIZONA_SAMPLE_RATE_1 0x102 +#define ARIZONA_SAMPLE_RATE_2 0x103 +#define ARIZONA_SAMPLE_RATE_3 0x104 +#define ARIZONA_SAMPLE_RATE_1_STATUS 0x10A +#define ARIZONA_SAMPLE_RATE_2_STATUS 0x10B +#define ARIZONA_SAMPLE_RATE_3_STATUS 0x10C +#define ARIZONA_ASYNC_CLOCK_1 0x112 +#define ARIZONA_ASYNC_SAMPLE_RATE_1 0x113 +#define ARIZONA_ASYNC_SAMPLE_RATE_2 0x114 +#define ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS 0x11B +#define ARIZONA_ASYNC_SAMPLE_RATE_2_STATUS 0x11C +#define ARIZONA_OUTPUT_SYSTEM_CLOCK 0x149 +#define ARIZONA_OUTPUT_ASYNC_CLOCK 0x14A +#define ARIZONA_RATE_ESTIMATOR_1 0x152 +#define ARIZONA_RATE_ESTIMATOR_2 0x153 +#define ARIZONA_RATE_ESTIMATOR_3 0x154 +#define ARIZONA_RATE_ESTIMATOR_4 0x155 +#define ARIZONA_RATE_ESTIMATOR_5 0x156 +#define ARIZONA_DYNAMIC_FREQUENCY_SCALING_1 0x161 +#define ARIZONA_FLL1_CONTROL_1 0x171 +#define ARIZONA_FLL1_CONTROL_2 0x172 +#define ARIZONA_FLL1_CONTROL_3 0x173 +#define ARIZONA_FLL1_CONTROL_4 0x174 +#define ARIZONA_FLL1_CONTROL_5 0x175 +#define ARIZONA_FLL1_CONTROL_6 0x176 +#define ARIZONA_FLL1_LOOP_FILTER_TEST_1 0x177 +#define ARIZONA_FLL1_NCO_TEST_0 0x178 +#define ARIZONA_FLL1_CONTROL_7 0x179 +#define ARIZONA_FLL1_SYNCHRONISER_1 0x181 +#define ARIZONA_FLL1_SYNCHRONISER_2 0x182 +#define ARIZONA_FLL1_SYNCHRONISER_3 0x183 +#define ARIZONA_FLL1_SYNCHRONISER_4 0x184 +#define ARIZONA_FLL1_SYNCHRONISER_5 0x185 +#define ARIZONA_FLL1_SYNCHRONISER_6 0x186 +#define ARIZONA_FLL1_SYNCHRONISER_7 0x187 +#define ARIZONA_FLL1_SPREAD_SPECTRUM 0x189 +#define ARIZONA_FLL1_GPIO_CLOCK 0x18A +#define ARIZONA_FLL2_CONTROL_1 0x191 +#define ARIZONA_FLL2_CONTROL_2 0x192 +#define ARIZONA_FLL2_CONTROL_3 0x193 +#define ARIZONA_FLL2_CONTROL_4 0x194 +#define ARIZONA_FLL2_CONTROL_5 0x195 +#define ARIZONA_FLL2_CONTROL_6 0x196 +#define ARIZONA_FLL2_LOOP_FILTER_TEST_1 0x197 +#define ARIZONA_FLL2_NCO_TEST_0 0x198 +#define ARIZONA_FLL2_CONTROL_7 0x199 +#define ARIZONA_FLL2_SYNCHRONISER_1 0x1A1 +#define ARIZONA_FLL2_SYNCHRONISER_2 0x1A2 +#define ARIZONA_FLL2_SYNCHRONISER_3 0x1A3 +#define ARIZONA_FLL2_SYNCHRONISER_4 0x1A4 +#define ARIZONA_FLL2_SYNCHRONISER_5 0x1A5 +#define ARIZONA_FLL2_SYNCHRONISER_6 0x1A6 +#define ARIZONA_FLL2_SYNCHRONISER_7 0x1A7 +#define ARIZONA_FLL2_SPREAD_SPECTRUM 0x1A9 +#define ARIZONA_FLL2_GPIO_CLOCK 0x1AA +#define ARIZONA_MIC_CHARGE_PUMP_1 0x200 +#define ARIZONA_LDO1_CONTROL_1 0x210 +#define ARIZONA_LDO1_CONTROL_2 0x212 +#define ARIZONA_LDO2_CONTROL_1 0x213 +#define ARIZONA_MIC_BIAS_CTRL_1 0x218 +#define ARIZONA_MIC_BIAS_CTRL_2 0x219 +#define ARIZONA_MIC_BIAS_CTRL_3 0x21A +#define ARIZONA_HP_CTRL_1L 0x225 +#define ARIZONA_HP_CTRL_1R 0x226 +#define ARIZONA_ACCESSORY_DETECT_MODE_1 0x293 +#define ARIZONA_HEADPHONE_DETECT_1 0x29B +#define ARIZONA_HEADPHONE_DETECT_2 0x29C +#define ARIZONA_HP_DACVAL 0x29F +#define ARIZONA_MICD_CLAMP_CONTROL 0x2A2 +#define ARIZONA_MIC_DETECT_1 0x2A3 +#define ARIZONA_MIC_DETECT_2 0x2A4 +#define ARIZONA_MIC_DETECT_3 0x2A5 +#define ARIZONA_MIC_DETECT_LEVEL_1 0x2A6 +#define ARIZONA_MIC_DETECT_LEVEL_2 0x2A7 +#define ARIZONA_MIC_DETECT_LEVEL_3 0x2A8 +#define ARIZONA_MIC_DETECT_LEVEL_4 0x2A9 +#define ARIZONA_MIC_DETECT_4 0x2AB +#define ARIZONA_MIC_NOISE_MIX_CONTROL_1 0x2C3 +#define ARIZONA_ISOLATION_CONTROL 0x2CB +#define ARIZONA_JACK_DETECT_ANALOGUE 0x2D3 +#define ARIZONA_INPUT_ENABLES 0x300 +#define ARIZONA_INPUT_ENABLES_STATUS 0x301 +#define ARIZONA_INPUT_RATE 0x308 +#define ARIZONA_INPUT_VOLUME_RAMP 0x309 +#define ARIZONA_HPF_CONTROL 0x30C +#define ARIZONA_IN1L_CONTROL 0x310 +#define ARIZONA_ADC_DIGITAL_VOLUME_1L 0x311 +#define ARIZONA_DMIC1L_CONTROL 0x312 +#define ARIZONA_IN1R_CONTROL 0x314 +#define ARIZONA_ADC_DIGITAL_VOLUME_1R 0x315 +#define ARIZONA_DMIC1R_CONTROL 0x316 +#define ARIZONA_IN2L_CONTROL 0x318 +#define ARIZONA_ADC_DIGITAL_VOLUME_2L 0x319 +#define ARIZONA_DMIC2L_CONTROL 0x31A +#define ARIZONA_IN2R_CONTROL 0x31C +#define ARIZONA_ADC_DIGITAL_VOLUME_2R 0x31D +#define ARIZONA_DMIC2R_CONTROL 0x31E +#define ARIZONA_IN3L_CONTROL 0x320 +#define ARIZONA_ADC_DIGITAL_VOLUME_3L 0x321 +#define ARIZONA_DMIC3L_CONTROL 0x322 +#define ARIZONA_IN3R_CONTROL 0x324 +#define ARIZONA_ADC_DIGITAL_VOLUME_3R 0x325 +#define ARIZONA_DMIC3R_CONTROL 0x326 +#define ARIZONA_IN4L_CONTROL 0x328 +#define ARIZONA_ADC_DIGITAL_VOLUME_4L 0x329 +#define ARIZONA_DMIC4L_CONTROL 0x32A +#define ARIZONA_IN4R_CONTROL 0x32C +#define ARIZONA_ADC_DIGITAL_VOLUME_4R 0x32D +#define ARIZONA_DMIC4R_CONTROL 0x32E +#define ARIZONA_OUTPUT_ENABLES_1 0x400 +#define ARIZONA_OUTPUT_STATUS_1 0x401 +#define ARIZONA_RAW_OUTPUT_STATUS_1 0x406 +#define ARIZONA_OUTPUT_RATE_1 0x408 +#define ARIZONA_OUTPUT_VOLUME_RAMP 0x409 +#define ARIZONA_OUTPUT_PATH_CONFIG_1L 0x410 +#define ARIZONA_DAC_DIGITAL_VOLUME_1L 0x411 +#define ARIZONA_DAC_VOLUME_LIMIT_1L 0x412 +#define ARIZONA_NOISE_GATE_SELECT_1L 0x413 +#define ARIZONA_OUTPUT_PATH_CONFIG_1R 0x414 +#define ARIZONA_DAC_DIGITAL_VOLUME_1R 0x415 +#define ARIZONA_DAC_VOLUME_LIMIT_1R 0x416 +#define ARIZONA_NOISE_GATE_SELECT_1R 0x417 +#define ARIZONA_OUTPUT_PATH_CONFIG_2L 0x418 +#define ARIZONA_DAC_DIGITAL_VOLUME_2L 0x419 +#define ARIZONA_DAC_VOLUME_LIMIT_2L 0x41A +#define ARIZONA_NOISE_GATE_SELECT_2L 0x41B +#define ARIZONA_OUTPUT_PATH_CONFIG_2R 0x41C +#define ARIZONA_DAC_DIGITAL_VOLUME_2R 0x41D +#define ARIZONA_DAC_VOLUME_LIMIT_2R 0x41E +#define ARIZONA_NOISE_GATE_SELECT_2R 0x41F +#define ARIZONA_OUTPUT_PATH_CONFIG_3L 0x420 +#define ARIZONA_DAC_DIGITAL_VOLUME_3L 0x421 +#define ARIZONA_DAC_VOLUME_LIMIT_3L 0x422 +#define ARIZONA_NOISE_GATE_SELECT_3L 0x423 +#define ARIZONA_OUTPUT_PATH_CONFIG_3R 0x424 +#define ARIZONA_DAC_DIGITAL_VOLUME_3R 0x425 +#define ARIZONA_DAC_VOLUME_LIMIT_3R 0x426 +#define ARIZONA_NOISE_GATE_SELECT_3R 0x427 +#define ARIZONA_OUTPUT_PATH_CONFIG_4L 0x428 +#define ARIZONA_DAC_DIGITAL_VOLUME_4L 0x429 +#define ARIZONA_OUT_VOLUME_4L 0x42A +#define ARIZONA_NOISE_GATE_SELECT_4L 0x42B +#define ARIZONA_OUTPUT_PATH_CONFIG_4R 0x42C +#define ARIZONA_DAC_DIGITAL_VOLUME_4R 0x42D +#define ARIZONA_OUT_VOLUME_4R 0x42E +#define ARIZONA_NOISE_GATE_SELECT_4R 0x42F +#define ARIZONA_OUTPUT_PATH_CONFIG_5L 0x430 +#define ARIZONA_DAC_DIGITAL_VOLUME_5L 0x431 +#define ARIZONA_DAC_VOLUME_LIMIT_5L 0x432 +#define ARIZONA_NOISE_GATE_SELECT_5L 0x433 +#define ARIZONA_OUTPUT_PATH_CONFIG_5R 0x434 +#define ARIZONA_DAC_DIGITAL_VOLUME_5R 0x435 +#define ARIZONA_DAC_VOLUME_LIMIT_5R 0x436 +#define ARIZONA_NOISE_GATE_SELECT_5R 0x437 +#define ARIZONA_OUTPUT_PATH_CONFIG_6L 0x438 +#define ARIZONA_DAC_DIGITAL_VOLUME_6L 0x439 +#define ARIZONA_DAC_VOLUME_LIMIT_6L 0x43A +#define ARIZONA_NOISE_GATE_SELECT_6L 0x43B +#define ARIZONA_OUTPUT_PATH_CONFIG_6R 0x43C +#define ARIZONA_DAC_DIGITAL_VOLUME_6R 0x43D +#define ARIZONA_DAC_VOLUME_LIMIT_6R 0x43E +#define ARIZONA_NOISE_GATE_SELECT_6R 0x43F +#define ARIZONA_DRE_ENABLE 0x440 +#define ARIZONA_DRE_CONTROL_1 0x441 +#define ARIZONA_DRE_CONTROL_2 0x442 +#define ARIZONA_DRE_CONTROL_3 0x443 +#define ARIZONA_EDRE_ENABLE 0x448 +#define ARIZONA_DAC_AEC_CONTROL_1 0x450 +#define ARIZONA_DAC_AEC_CONTROL_2 0x451 +#define ARIZONA_NOISE_GATE_CONTROL 0x458 +#define ARIZONA_PDM_SPK1_CTRL_1 0x490 +#define ARIZONA_PDM_SPK1_CTRL_2 0x491 +#define ARIZONA_PDM_SPK2_CTRL_1 0x492 +#define ARIZONA_PDM_SPK2_CTRL_2 0x493 +#define ARIZONA_HP_TEST_CTRL_13 0x49A +#define ARIZONA_HP1_SHORT_CIRCUIT_CTRL 0x4A0 +#define ARIZONA_HP2_SHORT_CIRCUIT_CTRL 0x4A1 +#define ARIZONA_HP3_SHORT_CIRCUIT_CTRL 0x4A2 +#define ARIZONA_HP_TEST_CTRL_1 0x4A4 +#define ARIZONA_SPK_CTRL_2 0x4B5 +#define ARIZONA_SPK_CTRL_3 0x4B6 +#define ARIZONA_DAC_COMP_1 0x4DC +#define ARIZONA_DAC_COMP_2 0x4DD +#define ARIZONA_DAC_COMP_3 0x4DE +#define ARIZONA_DAC_COMP_4 0x4DF +#define ARIZONA_AIF1_BCLK_CTRL 0x500 +#define ARIZONA_AIF1_TX_PIN_CTRL 0x501 +#define ARIZONA_AIF1_RX_PIN_CTRL 0x502 +#define ARIZONA_AIF1_RATE_CTRL 0x503 +#define ARIZONA_AIF1_FORMAT 0x504 +#define ARIZONA_AIF1_TX_BCLK_RATE 0x505 +#define ARIZONA_AIF1_RX_BCLK_RATE 0x506 +#define ARIZONA_AIF1_FRAME_CTRL_1 0x507 +#define ARIZONA_AIF1_FRAME_CTRL_2 0x508 +#define ARIZONA_AIF1_FRAME_CTRL_3 0x509 +#define ARIZONA_AIF1_FRAME_CTRL_4 0x50A +#define ARIZONA_AIF1_FRAME_CTRL_5 0x50B +#define ARIZONA_AIF1_FRAME_CTRL_6 0x50C +#define ARIZONA_AIF1_FRAME_CTRL_7 0x50D +#define ARIZONA_AIF1_FRAME_CTRL_8 0x50E +#define ARIZONA_AIF1_FRAME_CTRL_9 0x50F +#define ARIZONA_AIF1_FRAME_CTRL_10 0x510 +#define ARIZONA_AIF1_FRAME_CTRL_11 0x511 +#define ARIZONA_AIF1_FRAME_CTRL_12 0x512 +#define ARIZONA_AIF1_FRAME_CTRL_13 0x513 +#define ARIZONA_AIF1_FRAME_CTRL_14 0x514 +#define ARIZONA_AIF1_FRAME_CTRL_15 0x515 +#define ARIZONA_AIF1_FRAME_CTRL_16 0x516 +#define ARIZONA_AIF1_FRAME_CTRL_17 0x517 +#define ARIZONA_AIF1_FRAME_CTRL_18 0x518 +#define ARIZONA_AIF1_TX_ENABLES 0x519 +#define ARIZONA_AIF1_RX_ENABLES 0x51A +#define ARIZONA_AIF1_FORCE_WRITE 0x51B +#define ARIZONA_AIF2_BCLK_CTRL 0x540 +#define ARIZONA_AIF2_TX_PIN_CTRL 0x541 +#define ARIZONA_AIF2_RX_PIN_CTRL 0x542 +#define ARIZONA_AIF2_RATE_CTRL 0x543 +#define ARIZONA_AIF2_FORMAT 0x544 +#define ARIZONA_AIF2_TX_BCLK_RATE 0x545 +#define ARIZONA_AIF2_RX_BCLK_RATE 0x546 +#define ARIZONA_AIF2_FRAME_CTRL_1 0x547 +#define ARIZONA_AIF2_FRAME_CTRL_2 0x548 +#define ARIZONA_AIF2_FRAME_CTRL_3 0x549 +#define ARIZONA_AIF2_FRAME_CTRL_4 0x54A +#define ARIZONA_AIF2_FRAME_CTRL_5 0x54B +#define ARIZONA_AIF2_FRAME_CTRL_6 0x54C +#define ARIZONA_AIF2_FRAME_CTRL_7 0x54D +#define ARIZONA_AIF2_FRAME_CTRL_8 0x54E +#define ARIZONA_AIF2_FRAME_CTRL_11 0x551 +#define ARIZONA_AIF2_FRAME_CTRL_12 0x552 +#define ARIZONA_AIF2_FRAME_CTRL_13 0x553 +#define ARIZONA_AIF2_FRAME_CTRL_14 0x554 +#define ARIZONA_AIF2_FRAME_CTRL_15 0x555 +#define ARIZONA_AIF2_FRAME_CTRL_16 0x556 +#define ARIZONA_AIF2_TX_ENABLES 0x559 +#define ARIZONA_AIF2_RX_ENABLES 0x55A +#define ARIZONA_AIF2_FORCE_WRITE 0x55B +#define ARIZONA_AIF3_BCLK_CTRL 0x580 +#define ARIZONA_AIF3_TX_PIN_CTRL 0x581 +#define ARIZONA_AIF3_RX_PIN_CTRL 0x582 +#define ARIZONA_AIF3_RATE_CTRL 0x583 +#define ARIZONA_AIF3_FORMAT 0x584 +#define ARIZONA_AIF3_TX_BCLK_RATE 0x585 +#define ARIZONA_AIF3_RX_BCLK_RATE 0x586 +#define ARIZONA_AIF3_FRAME_CTRL_1 0x587 +#define ARIZONA_AIF3_FRAME_CTRL_2 0x588 +#define ARIZONA_AIF3_FRAME_CTRL_3 0x589 +#define ARIZONA_AIF3_FRAME_CTRL_4 0x58A +#define ARIZONA_AIF3_FRAME_CTRL_11 0x591 +#define ARIZONA_AIF3_FRAME_CTRL_12 0x592 +#define ARIZONA_AIF3_TX_ENABLES 0x599 +#define ARIZONA_AIF3_RX_ENABLES 0x59A +#define ARIZONA_AIF3_FORCE_WRITE 0x59B +#define ARIZONA_SPD1_TX_CONTROL 0x5C2 +#define ARIZONA_SPD1_TX_CHANNEL_STATUS_1 0x5C3 +#define ARIZONA_SPD1_TX_CHANNEL_STATUS_2 0x5C4 +#define ARIZONA_SPD1_TX_CHANNEL_STATUS_3 0x5C5 +#define ARIZONA_SLIMBUS_FRAMER_REF_GEAR 0x5E3 +#define ARIZONA_SLIMBUS_RATES_1 0x5E5 +#define ARIZONA_SLIMBUS_RATES_2 0x5E6 +#define ARIZONA_SLIMBUS_RATES_3 0x5E7 +#define ARIZONA_SLIMBUS_RATES_4 0x5E8 +#define ARIZONA_SLIMBUS_RATES_5 0x5E9 +#define ARIZONA_SLIMBUS_RATES_6 0x5EA +#define ARIZONA_SLIMBUS_RATES_7 0x5EB +#define ARIZONA_SLIMBUS_RATES_8 0x5EC +#define ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE 0x5F5 +#define ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE 0x5F6 +#define ARIZONA_SLIMBUS_RX_PORT_STATUS 0x5F7 +#define ARIZONA_SLIMBUS_TX_PORT_STATUS 0x5F8 +#define ARIZONA_PWM1MIX_INPUT_1_SOURCE 0x640 +#define ARIZONA_PWM1MIX_INPUT_1_VOLUME 0x641 +#define ARIZONA_PWM1MIX_INPUT_2_SOURCE 0x642 +#define ARIZONA_PWM1MIX_INPUT_2_VOLUME 0x643 +#define ARIZONA_PWM1MIX_INPUT_3_SOURCE 0x644 +#define ARIZONA_PWM1MIX_INPUT_3_VOLUME 0x645 +#define ARIZONA_PWM1MIX_INPUT_4_SOURCE 0x646 +#define ARIZONA_PWM1MIX_INPUT_4_VOLUME 0x647 +#define ARIZONA_PWM2MIX_INPUT_1_SOURCE 0x648 +#define ARIZONA_PWM2MIX_INPUT_1_VOLUME 0x649 +#define ARIZONA_PWM2MIX_INPUT_2_SOURCE 0x64A +#define ARIZONA_PWM2MIX_INPUT_2_VOLUME 0x64B +#define ARIZONA_PWM2MIX_INPUT_3_SOURCE 0x64C +#define ARIZONA_PWM2MIX_INPUT_3_VOLUME 0x64D +#define ARIZONA_PWM2MIX_INPUT_4_SOURCE 0x64E +#define ARIZONA_PWM2MIX_INPUT_4_VOLUME 0x64F +#define ARIZONA_MICMIX_INPUT_1_SOURCE 0x660 +#define ARIZONA_MICMIX_INPUT_1_VOLUME 0x661 +#define ARIZONA_MICMIX_INPUT_2_SOURCE 0x662 +#define ARIZONA_MICMIX_INPUT_2_VOLUME 0x663 +#define ARIZONA_MICMIX_INPUT_3_SOURCE 0x664 +#define ARIZONA_MICMIX_INPUT_3_VOLUME 0x665 +#define ARIZONA_MICMIX_INPUT_4_SOURCE 0x666 +#define ARIZONA_MICMIX_INPUT_4_VOLUME 0x667 +#define ARIZONA_NOISEMIX_INPUT_1_SOURCE 0x668 +#define ARIZONA_NOISEMIX_INPUT_1_VOLUME 0x669 +#define ARIZONA_NOISEMIX_INPUT_2_SOURCE 0x66A +#define ARIZONA_NOISEMIX_INPUT_2_VOLUME 0x66B +#define ARIZONA_NOISEMIX_INPUT_3_SOURCE 0x66C +#define ARIZONA_NOISEMIX_INPUT_3_VOLUME 0x66D +#define ARIZONA_NOISEMIX_INPUT_4_SOURCE 0x66E +#define ARIZONA_NOISEMIX_INPUT_4_VOLUME 0x66F +#define ARIZONA_OUT1LMIX_INPUT_1_SOURCE 0x680 +#define ARIZONA_OUT1LMIX_INPUT_1_VOLUME 0x681 +#define ARIZONA_OUT1LMIX_INPUT_2_SOURCE 0x682 +#define ARIZONA_OUT1LMIX_INPUT_2_VOLUME 0x683 +#define ARIZONA_OUT1LMIX_INPUT_3_SOURCE 0x684 +#define ARIZONA_OUT1LMIX_INPUT_3_VOLUME 0x685 +#define ARIZONA_OUT1LMIX_INPUT_4_SOURCE 0x686 +#define ARIZONA_OUT1LMIX_INPUT_4_VOLUME 0x687 +#define ARIZONA_OUT1RMIX_INPUT_1_SOURCE 0x688 +#define ARIZONA_OUT1RMIX_INPUT_1_VOLUME 0x689 +#define ARIZONA_OUT1RMIX_INPUT_2_SOURCE 0x68A +#define ARIZONA_OUT1RMIX_INPUT_2_VOLUME 0x68B +#define ARIZONA_OUT1RMIX_INPUT_3_SOURCE 0x68C +#define ARIZONA_OUT1RMIX_INPUT_3_VOLUME 0x68D +#define ARIZONA_OUT1RMIX_INPUT_4_SOURCE 0x68E +#define ARIZONA_OUT1RMIX_INPUT_4_VOLUME 0x68F +#define ARIZONA_OUT2LMIX_INPUT_1_SOURCE 0x690 +#define ARIZONA_OUT2LMIX_INPUT_1_VOLUME 0x691 +#define ARIZONA_OUT2LMIX_INPUT_2_SOURCE 0x692 +#define ARIZONA_OUT2LMIX_INPUT_2_VOLUME 0x693 +#define ARIZONA_OUT2LMIX_INPUT_3_SOURCE 0x694 +#define ARIZONA_OUT2LMIX_INPUT_3_VOLUME 0x695 +#define ARIZONA_OUT2LMIX_INPUT_4_SOURCE 0x696 +#define ARIZONA_OUT2LMIX_INPUT_4_VOLUME 0x697 +#define ARIZONA_OUT2RMIX_INPUT_1_SOURCE 0x698 +#define ARIZONA_OUT2RMIX_INPUT_1_VOLUME 0x699 +#define ARIZONA_OUT2RMIX_INPUT_2_SOURCE 0x69A +#define ARIZONA_OUT2RMIX_INPUT_2_VOLUME 0x69B +#define ARIZONA_OUT2RMIX_INPUT_3_SOURCE 0x69C +#define ARIZONA_OUT2RMIX_INPUT_3_VOLUME 0x69D +#define ARIZONA_OUT2RMIX_INPUT_4_SOURCE 0x69E +#define ARIZONA_OUT2RMIX_INPUT_4_VOLUME 0x69F +#define ARIZONA_OUT3LMIX_INPUT_1_SOURCE 0x6A0 +#define ARIZONA_OUT3LMIX_INPUT_1_VOLUME 0x6A1 +#define ARIZONA_OUT3LMIX_INPUT_2_SOURCE 0x6A2 +#define ARIZONA_OUT3LMIX_INPUT_2_VOLUME 0x6A3 +#define ARIZONA_OUT3LMIX_INPUT_3_SOURCE 0x6A4 +#define ARIZONA_OUT3LMIX_INPUT_3_VOLUME 0x6A5 +#define ARIZONA_OUT3LMIX_INPUT_4_SOURCE 0x6A6 +#define ARIZONA_OUT3LMIX_INPUT_4_VOLUME 0x6A7 +#define ARIZONA_OUT3RMIX_INPUT_1_SOURCE 0x6A8 +#define ARIZONA_OUT3RMIX_INPUT_1_VOLUME 0x6A9 +#define ARIZONA_OUT3RMIX_INPUT_2_SOURCE 0x6AA +#define ARIZONA_OUT3RMIX_INPUT_2_VOLUME 0x6AB +#define ARIZONA_OUT3RMIX_INPUT_3_SOURCE 0x6AC +#define ARIZONA_OUT3RMIX_INPUT_3_VOLUME 0x6AD +#define ARIZONA_OUT3RMIX_INPUT_4_SOURCE 0x6AE +#define ARIZONA_OUT3RMIX_INPUT_4_VOLUME 0x6AF +#define ARIZONA_OUT4LMIX_INPUT_1_SOURCE 0x6B0 +#define ARIZONA_OUT4LMIX_INPUT_1_VOLUME 0x6B1 +#define ARIZONA_OUT4LMIX_INPUT_2_SOURCE 0x6B2 +#define ARIZONA_OUT4LMIX_INPUT_2_VOLUME 0x6B3 +#define ARIZONA_OUT4LMIX_INPUT_3_SOURCE 0x6B4 +#define ARIZONA_OUT4LMIX_INPUT_3_VOLUME 0x6B5 +#define ARIZONA_OUT4LMIX_INPUT_4_SOURCE 0x6B6 +#define ARIZONA_OUT4LMIX_INPUT_4_VOLUME 0x6B7 +#define ARIZONA_OUT4RMIX_INPUT_1_SOURCE 0x6B8 +#define ARIZONA_OUT4RMIX_INPUT_1_VOLUME 0x6B9 +#define ARIZONA_OUT4RMIX_INPUT_2_SOURCE 0x6BA +#define ARIZONA_OUT4RMIX_INPUT_2_VOLUME 0x6BB +#define ARIZONA_OUT4RMIX_INPUT_3_SOURCE 0x6BC +#define ARIZONA_OUT4RMIX_INPUT_3_VOLUME 0x6BD +#define ARIZONA_OUT4RMIX_INPUT_4_SOURCE 0x6BE +#define ARIZONA_OUT4RMIX_INPUT_4_VOLUME 0x6BF +#define ARIZONA_OUT5LMIX_INPUT_1_SOURCE 0x6C0 +#define ARIZONA_OUT5LMIX_INPUT_1_VOLUME 0x6C1 +#define ARIZONA_OUT5LMIX_INPUT_2_SOURCE 0x6C2 +#define ARIZONA_OUT5LMIX_INPUT_2_VOLUME 0x6C3 +#define ARIZONA_OUT5LMIX_INPUT_3_SOURCE 0x6C4 +#define ARIZONA_OUT5LMIX_INPUT_3_VOLUME 0x6C5 +#define ARIZONA_OUT5LMIX_INPUT_4_SOURCE 0x6C6 +#define ARIZONA_OUT5LMIX_INPUT_4_VOLUME 0x6C7 +#define ARIZONA_OUT5RMIX_INPUT_1_SOURCE 0x6C8 +#define ARIZONA_OUT5RMIX_INPUT_1_VOLUME 0x6C9 +#define ARIZONA_OUT5RMIX_INPUT_2_SOURCE 0x6CA +#define ARIZONA_OUT5RMIX_INPUT_2_VOLUME 0x6CB +#define ARIZONA_OUT5RMIX_INPUT_3_SOURCE 0x6CC +#define ARIZONA_OUT5RMIX_INPUT_3_VOLUME 0x6CD +#define ARIZONA_OUT5RMIX_INPUT_4_SOURCE 0x6CE +#define ARIZONA_OUT5RMIX_INPUT_4_VOLUME 0x6CF +#define ARIZONA_OUT6LMIX_INPUT_1_SOURCE 0x6D0 +#define ARIZONA_OUT6LMIX_INPUT_1_VOLUME 0x6D1 +#define ARIZONA_OUT6LMIX_INPUT_2_SOURCE 0x6D2 +#define ARIZONA_OUT6LMIX_INPUT_2_VOLUME 0x6D3 +#define ARIZONA_OUT6LMIX_INPUT_3_SOURCE 0x6D4 +#define ARIZONA_OUT6LMIX_INPUT_3_VOLUME 0x6D5 +#define ARIZONA_OUT6LMIX_INPUT_4_SOURCE 0x6D6 +#define ARIZONA_OUT6LMIX_INPUT_4_VOLUME 0x6D7 +#define ARIZONA_OUT6RMIX_INPUT_1_SOURCE 0x6D8 +#define ARIZONA_OUT6RMIX_INPUT_1_VOLUME 0x6D9 +#define ARIZONA_OUT6RMIX_INPUT_2_SOURCE 0x6DA +#define ARIZONA_OUT6RMIX_INPUT_2_VOLUME 0x6DB +#define ARIZONA_OUT6RMIX_INPUT_3_SOURCE 0x6DC +#define ARIZONA_OUT6RMIX_INPUT_3_VOLUME 0x6DD +#define ARIZONA_OUT6RMIX_INPUT_4_SOURCE 0x6DE +#define ARIZONA_OUT6RMIX_INPUT_4_VOLUME 0x6DF +#define ARIZONA_AIF1TX1MIX_INPUT_1_SOURCE 0x700 +#define ARIZONA_AIF1TX1MIX_INPUT_1_VOLUME 0x701 +#define ARIZONA_AIF1TX1MIX_INPUT_2_SOURCE 0x702 +#define ARIZONA_AIF1TX1MIX_INPUT_2_VOLUME 0x703 +#define ARIZONA_AIF1TX1MIX_INPUT_3_SOURCE 0x704 +#define ARIZONA_AIF1TX1MIX_INPUT_3_VOLUME 0x705 +#define ARIZONA_AIF1TX1MIX_INPUT_4_SOURCE 0x706 +#define ARIZONA_AIF1TX1MIX_INPUT_4_VOLUME 0x707 +#define ARIZONA_AIF1TX2MIX_INPUT_1_SOURCE 0x708 +#define ARIZONA_AIF1TX2MIX_INPUT_1_VOLUME 0x709 +#define ARIZONA_AIF1TX2MIX_INPUT_2_SOURCE 0x70A +#define ARIZONA_AIF1TX2MIX_INPUT_2_VOLUME 0x70B +#define ARIZONA_AIF1TX2MIX_INPUT_3_SOURCE 0x70C +#define ARIZONA_AIF1TX2MIX_INPUT_3_VOLUME 0x70D +#define ARIZONA_AIF1TX2MIX_INPUT_4_SOURCE 0x70E +#define ARIZONA_AIF1TX2MIX_INPUT_4_VOLUME 0x70F +#define ARIZONA_AIF1TX3MIX_INPUT_1_SOURCE 0x710 +#define ARIZONA_AIF1TX3MIX_INPUT_1_VOLUME 0x711 +#define ARIZONA_AIF1TX3MIX_INPUT_2_SOURCE 0x712 +#define ARIZONA_AIF1TX3MIX_INPUT_2_VOLUME 0x713 +#define ARIZONA_AIF1TX3MIX_INPUT_3_SOURCE 0x714 +#define ARIZONA_AIF1TX3MIX_INPUT_3_VOLUME 0x715 +#define ARIZONA_AIF1TX3MIX_INPUT_4_SOURCE 0x716 +#define ARIZONA_AIF1TX3MIX_INPUT_4_VOLUME 0x717 +#define ARIZONA_AIF1TX4MIX_INPUT_1_SOURCE 0x718 +#define ARIZONA_AIF1TX4MIX_INPUT_1_VOLUME 0x719 +#define ARIZONA_AIF1TX4MIX_INPUT_2_SOURCE 0x71A +#define ARIZONA_AIF1TX4MIX_INPUT_2_VOLUME 0x71B +#define ARIZONA_AIF1TX4MIX_INPUT_3_SOURCE 0x71C +#define ARIZONA_AIF1TX4MIX_INPUT_3_VOLUME 0x71D +#define ARIZONA_AIF1TX4MIX_INPUT_4_SOURCE 0x71E +#define ARIZONA_AIF1TX4MIX_INPUT_4_VOLUME 0x71F +#define ARIZONA_AIF1TX5MIX_INPUT_1_SOURCE 0x720 +#define ARIZONA_AIF1TX5MIX_INPUT_1_VOLUME 0x721 +#define ARIZONA_AIF1TX5MIX_INPUT_2_SOURCE 0x722 +#define ARIZONA_AIF1TX5MIX_INPUT_2_VOLUME 0x723 +#define ARIZONA_AIF1TX5MIX_INPUT_3_SOURCE 0x724 +#define ARIZONA_AIF1TX5MIX_INPUT_3_VOLUME 0x725 +#define ARIZONA_AIF1TX5MIX_INPUT_4_SOURCE 0x726 +#define ARIZONA_AIF1TX5MIX_INPUT_4_VOLUME 0x727 +#define ARIZONA_AIF1TX6MIX_INPUT_1_SOURCE 0x728 +#define ARIZONA_AIF1TX6MIX_INPUT_1_VOLUME 0x729 +#define ARIZONA_AIF1TX6MIX_INPUT_2_SOURCE 0x72A +#define ARIZONA_AIF1TX6MIX_INPUT_2_VOLUME 0x72B +#define ARIZONA_AIF1TX6MIX_INPUT_3_SOURCE 0x72C +#define ARIZONA_AIF1TX6MIX_INPUT_3_VOLUME 0x72D +#define ARIZONA_AIF1TX6MIX_INPUT_4_SOURCE 0x72E +#define ARIZONA_AIF1TX6MIX_INPUT_4_VOLUME 0x72F +#define ARIZONA_AIF1TX7MIX_INPUT_1_SOURCE 0x730 +#define ARIZONA_AIF1TX7MIX_INPUT_1_VOLUME 0x731 +#define ARIZONA_AIF1TX7MIX_INPUT_2_SOURCE 0x732 +#define ARIZONA_AIF1TX7MIX_INPUT_2_VOLUME 0x733 +#define ARIZONA_AIF1TX7MIX_INPUT_3_SOURCE 0x734 +#define ARIZONA_AIF1TX7MIX_INPUT_3_VOLUME 0x735 +#define ARIZONA_AIF1TX7MIX_INPUT_4_SOURCE 0x736 +#define ARIZONA_AIF1TX7MIX_INPUT_4_VOLUME 0x737 +#define ARIZONA_AIF1TX8MIX_INPUT_1_SOURCE 0x738 +#define ARIZONA_AIF1TX8MIX_INPUT_1_VOLUME 0x739 +#define ARIZONA_AIF1TX8MIX_INPUT_2_SOURCE 0x73A +#define ARIZONA_AIF1TX8MIX_INPUT_2_VOLUME 0x73B +#define ARIZONA_AIF1TX8MIX_INPUT_3_SOURCE 0x73C +#define ARIZONA_AIF1TX8MIX_INPUT_3_VOLUME 0x73D +#define ARIZONA_AIF1TX8MIX_INPUT_4_SOURCE 0x73E +#define ARIZONA_AIF1TX8MIX_INPUT_4_VOLUME 0x73F +#define ARIZONA_AIF2TX1MIX_INPUT_1_SOURCE 0x740 +#define ARIZONA_AIF2TX1MIX_INPUT_1_VOLUME 0x741 +#define ARIZONA_AIF2TX1MIX_INPUT_2_SOURCE 0x742 +#define ARIZONA_AIF2TX1MIX_INPUT_2_VOLUME 0x743 +#define ARIZONA_AIF2TX1MIX_INPUT_3_SOURCE 0x744 +#define ARIZONA_AIF2TX1MIX_INPUT_3_VOLUME 0x745 +#define ARIZONA_AIF2TX1MIX_INPUT_4_SOURCE 0x746 +#define ARIZONA_AIF2TX1MIX_INPUT_4_VOLUME 0x747 +#define ARIZONA_AIF2TX2MIX_INPUT_1_SOURCE 0x748 +#define ARIZONA_AIF2TX2MIX_INPUT_1_VOLUME 0x749 +#define ARIZONA_AIF2TX2MIX_INPUT_2_SOURCE 0x74A +#define ARIZONA_AIF2TX2MIX_INPUT_2_VOLUME 0x74B +#define ARIZONA_AIF2TX2MIX_INPUT_3_SOURCE 0x74C +#define ARIZONA_AIF2TX2MIX_INPUT_3_VOLUME 0x74D +#define ARIZONA_AIF2TX2MIX_INPUT_4_SOURCE 0x74E +#define ARIZONA_AIF2TX2MIX_INPUT_4_VOLUME 0x74F +#define ARIZONA_AIF2TX3MIX_INPUT_1_SOURCE 0x750 +#define ARIZONA_AIF2TX3MIX_INPUT_1_VOLUME 0x751 +#define ARIZONA_AIF2TX3MIX_INPUT_2_SOURCE 0x752 +#define ARIZONA_AIF2TX3MIX_INPUT_2_VOLUME 0x753 +#define ARIZONA_AIF2TX3MIX_INPUT_3_SOURCE 0x754 +#define ARIZONA_AIF2TX3MIX_INPUT_3_VOLUME 0x755 +#define ARIZONA_AIF2TX3MIX_INPUT_4_SOURCE 0x756 +#define ARIZONA_AIF2TX3MIX_INPUT_4_VOLUME 0x757 +#define ARIZONA_AIF2TX4MIX_INPUT_1_SOURCE 0x758 +#define ARIZONA_AIF2TX4MIX_INPUT_1_VOLUME 0x759 +#define ARIZONA_AIF2TX4MIX_INPUT_2_SOURCE 0x75A +#define ARIZONA_AIF2TX4MIX_INPUT_2_VOLUME 0x75B +#define ARIZONA_AIF2TX4MIX_INPUT_3_SOURCE 0x75C +#define ARIZONA_AIF2TX4MIX_INPUT_3_VOLUME 0x75D +#define ARIZONA_AIF2TX4MIX_INPUT_4_SOURCE 0x75E +#define ARIZONA_AIF2TX4MIX_INPUT_4_VOLUME 0x75F +#define ARIZONA_AIF2TX5MIX_INPUT_1_SOURCE 0x760 +#define ARIZONA_AIF2TX5MIX_INPUT_1_VOLUME 0x761 +#define ARIZONA_AIF2TX5MIX_INPUT_2_SOURCE 0x762 +#define ARIZONA_AIF2TX5MIX_INPUT_2_VOLUME 0x763 +#define ARIZONA_AIF2TX5MIX_INPUT_3_SOURCE 0x764 +#define ARIZONA_AIF2TX5MIX_INPUT_3_VOLUME 0x765 +#define ARIZONA_AIF2TX5MIX_INPUT_4_SOURCE 0x766 +#define ARIZONA_AIF2TX5MIX_INPUT_4_VOLUME 0x767 +#define ARIZONA_AIF2TX6MIX_INPUT_1_SOURCE 0x768 +#define ARIZONA_AIF2TX6MIX_INPUT_1_VOLUME 0x769 +#define ARIZONA_AIF2TX6MIX_INPUT_2_SOURCE 0x76A +#define ARIZONA_AIF2TX6MIX_INPUT_2_VOLUME 0x76B +#define ARIZONA_AIF2TX6MIX_INPUT_3_SOURCE 0x76C +#define ARIZONA_AIF2TX6MIX_INPUT_3_VOLUME 0x76D +#define ARIZONA_AIF2TX6MIX_INPUT_4_SOURCE 0x76E +#define ARIZONA_AIF2TX6MIX_INPUT_4_VOLUME 0x76F +#define ARIZONA_AIF3TX1MIX_INPUT_1_SOURCE 0x780 +#define ARIZONA_AIF3TX1MIX_INPUT_1_VOLUME 0x781 +#define ARIZONA_AIF3TX1MIX_INPUT_2_SOURCE 0x782 +#define ARIZONA_AIF3TX1MIX_INPUT_2_VOLUME 0x783 +#define ARIZONA_AIF3TX1MIX_INPUT_3_SOURCE 0x784 +#define ARIZONA_AIF3TX1MIX_INPUT_3_VOLUME 0x785 +#define ARIZONA_AIF3TX1MIX_INPUT_4_SOURCE 0x786 +#define ARIZONA_AIF3TX1MIX_INPUT_4_VOLUME 0x787 +#define ARIZONA_AIF3TX2MIX_INPUT_1_SOURCE 0x788 +#define ARIZONA_AIF3TX2MIX_INPUT_1_VOLUME 0x789 +#define ARIZONA_AIF3TX2MIX_INPUT_2_SOURCE 0x78A +#define ARIZONA_AIF3TX2MIX_INPUT_2_VOLUME 0x78B +#define ARIZONA_AIF3TX2MIX_INPUT_3_SOURCE 0x78C +#define ARIZONA_AIF3TX2MIX_INPUT_3_VOLUME 0x78D +#define ARIZONA_AIF3TX2MIX_INPUT_4_SOURCE 0x78E +#define ARIZONA_AIF3TX2MIX_INPUT_4_VOLUME 0x78F +#define ARIZONA_SLIMTX1MIX_INPUT_1_SOURCE 0x7C0 +#define ARIZONA_SLIMTX1MIX_INPUT_1_VOLUME 0x7C1 +#define ARIZONA_SLIMTX1MIX_INPUT_2_SOURCE 0x7C2 +#define ARIZONA_SLIMTX1MIX_INPUT_2_VOLUME 0x7C3 +#define ARIZONA_SLIMTX1MIX_INPUT_3_SOURCE 0x7C4 +#define ARIZONA_SLIMTX1MIX_INPUT_3_VOLUME 0x7C5 +#define ARIZONA_SLIMTX1MIX_INPUT_4_SOURCE 0x7C6 +#define ARIZONA_SLIMTX1MIX_INPUT_4_VOLUME 0x7C7 +#define ARIZONA_SLIMTX2MIX_INPUT_1_SOURCE 0x7C8 +#define ARIZONA_SLIMTX2MIX_INPUT_1_VOLUME 0x7C9 +#define ARIZONA_SLIMTX2MIX_INPUT_2_SOURCE 0x7CA +#define ARIZONA_SLIMTX2MIX_INPUT_2_VOLUME 0x7CB +#define ARIZONA_SLIMTX2MIX_INPUT_3_SOURCE 0x7CC +#define ARIZONA_SLIMTX2MIX_INPUT_3_VOLUME 0x7CD +#define ARIZONA_SLIMTX2MIX_INPUT_4_SOURCE 0x7CE +#define ARIZONA_SLIMTX2MIX_INPUT_4_VOLUME 0x7CF +#define ARIZONA_SLIMTX3MIX_INPUT_1_SOURCE 0x7D0 +#define ARIZONA_SLIMTX3MIX_INPUT_1_VOLUME 0x7D1 +#define ARIZONA_SLIMTX3MIX_INPUT_2_SOURCE 0x7D2 +#define ARIZONA_SLIMTX3MIX_INPUT_2_VOLUME 0x7D3 +#define ARIZONA_SLIMTX3MIX_INPUT_3_SOURCE 0x7D4 +#define ARIZONA_SLIMTX3MIX_INPUT_3_VOLUME 0x7D5 +#define ARIZONA_SLIMTX3MIX_INPUT_4_SOURCE 0x7D6 +#define ARIZONA_SLIMTX3MIX_INPUT_4_VOLUME 0x7D7 +#define ARIZONA_SLIMTX4MIX_INPUT_1_SOURCE 0x7D8 +#define ARIZONA_SLIMTX4MIX_INPUT_1_VOLUME 0x7D9 +#define ARIZONA_SLIMTX4MIX_INPUT_2_SOURCE 0x7DA +#define ARIZONA_SLIMTX4MIX_INPUT_2_VOLUME 0x7DB +#define ARIZONA_SLIMTX4MIX_INPUT_3_SOURCE 0x7DC +#define ARIZONA_SLIMTX4MIX_INPUT_3_VOLUME 0x7DD +#define ARIZONA_SLIMTX4MIX_INPUT_4_SOURCE 0x7DE +#define ARIZONA_SLIMTX4MIX_INPUT_4_VOLUME 0x7DF +#define ARIZONA_SLIMTX5MIX_INPUT_1_SOURCE 0x7E0 +#define ARIZONA_SLIMTX5MIX_INPUT_1_VOLUME 0x7E1 +#define ARIZONA_SLIMTX5MIX_INPUT_2_SOURCE 0x7E2 +#define ARIZONA_SLIMTX5MIX_INPUT_2_VOLUME 0x7E3 +#define ARIZONA_SLIMTX5MIX_INPUT_3_SOURCE 0x7E4 +#define ARIZONA_SLIMTX5MIX_INPUT_3_VOLUME 0x7E5 +#define ARIZONA_SLIMTX5MIX_INPUT_4_SOURCE 0x7E6 +#define ARIZONA_SLIMTX5MIX_INPUT_4_VOLUME 0x7E7 +#define ARIZONA_SLIMTX6MIX_INPUT_1_SOURCE 0x7E8 +#define ARIZONA_SLIMTX6MIX_INPUT_1_VOLUME 0x7E9 +#define ARIZONA_SLIMTX6MIX_INPUT_2_SOURCE 0x7EA +#define ARIZONA_SLIMTX6MIX_INPUT_2_VOLUME 0x7EB +#define ARIZONA_SLIMTX6MIX_INPUT_3_SOURCE 0x7EC +#define ARIZONA_SLIMTX6MIX_INPUT_3_VOLUME 0x7ED +#define ARIZONA_SLIMTX6MIX_INPUT_4_SOURCE 0x7EE +#define ARIZONA_SLIMTX6MIX_INPUT_4_VOLUME 0x7EF +#define ARIZONA_SLIMTX7MIX_INPUT_1_SOURCE 0x7F0 +#define ARIZONA_SLIMTX7MIX_INPUT_1_VOLUME 0x7F1 +#define ARIZONA_SLIMTX7MIX_INPUT_2_SOURCE 0x7F2 +#define ARIZONA_SLIMTX7MIX_INPUT_2_VOLUME 0x7F3 +#define ARIZONA_SLIMTX7MIX_INPUT_3_SOURCE 0x7F4 +#define ARIZONA_SLIMTX7MIX_INPUT_3_VOLUME 0x7F5 +#define ARIZONA_SLIMTX7MIX_INPUT_4_SOURCE 0x7F6 +#define ARIZONA_SLIMTX7MIX_INPUT_4_VOLUME 0x7F7 +#define ARIZONA_SLIMTX8MIX_INPUT_1_SOURCE 0x7F8 +#define ARIZONA_SLIMTX8MIX_INPUT_1_VOLUME 0x7F9 +#define ARIZONA_SLIMTX8MIX_INPUT_2_SOURCE 0x7FA +#define ARIZONA_SLIMTX8MIX_INPUT_2_VOLUME 0x7FB +#define ARIZONA_SLIMTX8MIX_INPUT_3_SOURCE 0x7FC +#define ARIZONA_SLIMTX8MIX_INPUT_3_VOLUME 0x7FD +#define ARIZONA_SLIMTX8MIX_INPUT_4_SOURCE 0x7FE +#define ARIZONA_SLIMTX8MIX_INPUT_4_VOLUME 0x7FF +#define ARIZONA_SPDIFTX1MIX_INPUT_1_SOURCE 0x800 +#define ARIZONA_SPDIFTX1MIX_INPUT_1_VOLUME 0x801 +#define ARIZONA_SPDIFTX2MIX_INPUT_1_SOURCE 0x808 +#define ARIZONA_SPDIFTX2MIX_INPUT_1_VOLUME 0x809 +#define ARIZONA_EQ1MIX_INPUT_1_SOURCE 0x880 +#define ARIZONA_EQ1MIX_INPUT_1_VOLUME 0x881 +#define ARIZONA_EQ1MIX_INPUT_2_SOURCE 0x882 +#define ARIZONA_EQ1MIX_INPUT_2_VOLUME 0x883 +#define ARIZONA_EQ1MIX_INPUT_3_SOURCE 0x884 +#define ARIZONA_EQ1MIX_INPUT_3_VOLUME 0x885 +#define ARIZONA_EQ1MIX_INPUT_4_SOURCE 0x886 +#define ARIZONA_EQ1MIX_INPUT_4_VOLUME 0x887 +#define ARIZONA_EQ2MIX_INPUT_1_SOURCE 0x888 +#define ARIZONA_EQ2MIX_INPUT_1_VOLUME 0x889 +#define ARIZONA_EQ2MIX_INPUT_2_SOURCE 0x88A +#define ARIZONA_EQ2MIX_INPUT_2_VOLUME 0x88B +#define ARIZONA_EQ2MIX_INPUT_3_SOURCE 0x88C +#define ARIZONA_EQ2MIX_INPUT_3_VOLUME 0x88D +#define ARIZONA_EQ2MIX_INPUT_4_SOURCE 0x88E +#define ARIZONA_EQ2MIX_INPUT_4_VOLUME 0x88F +#define ARIZONA_EQ3MIX_INPUT_1_SOURCE 0x890 +#define ARIZONA_EQ3MIX_INPUT_1_VOLUME 0x891 +#define ARIZONA_EQ3MIX_INPUT_2_SOURCE 0x892 +#define ARIZONA_EQ3MIX_INPUT_2_VOLUME 0x893 +#define ARIZONA_EQ3MIX_INPUT_3_SOURCE 0x894 +#define ARIZONA_EQ3MIX_INPUT_3_VOLUME 0x895 +#define ARIZONA_EQ3MIX_INPUT_4_SOURCE 0x896 +#define ARIZONA_EQ3MIX_INPUT_4_VOLUME 0x897 +#define ARIZONA_EQ4MIX_INPUT_1_SOURCE 0x898 +#define ARIZONA_EQ4MIX_INPUT_1_VOLUME 0x899 +#define ARIZONA_EQ4MIX_INPUT_2_SOURCE 0x89A +#define ARIZONA_EQ4MIX_INPUT_2_VOLUME 0x89B +#define ARIZONA_EQ4MIX_INPUT_3_SOURCE 0x89C +#define ARIZONA_EQ4MIX_INPUT_3_VOLUME 0x89D +#define ARIZONA_EQ4MIX_INPUT_4_SOURCE 0x89E +#define ARIZONA_EQ4MIX_INPUT_4_VOLUME 0x89F +#define ARIZONA_DRC1LMIX_INPUT_1_SOURCE 0x8C0 +#define ARIZONA_DRC1LMIX_INPUT_1_VOLUME 0x8C1 +#define ARIZONA_DRC1LMIX_INPUT_2_SOURCE 0x8C2 +#define ARIZONA_DRC1LMIX_INPUT_2_VOLUME 0x8C3 +#define ARIZONA_DRC1LMIX_INPUT_3_SOURCE 0x8C4 +#define ARIZONA_DRC1LMIX_INPUT_3_VOLUME 0x8C5 +#define ARIZONA_DRC1LMIX_INPUT_4_SOURCE 0x8C6 +#define ARIZONA_DRC1LMIX_INPUT_4_VOLUME 0x8C7 +#define ARIZONA_DRC1RMIX_INPUT_1_SOURCE 0x8C8 +#define ARIZONA_DRC1RMIX_INPUT_1_VOLUME 0x8C9 +#define ARIZONA_DRC1RMIX_INPUT_2_SOURCE 0x8CA +#define ARIZONA_DRC1RMIX_INPUT_2_VOLUME 0x8CB +#define ARIZONA_DRC1RMIX_INPUT_3_SOURCE 0x8CC +#define ARIZONA_DRC1RMIX_INPUT_3_VOLUME 0x8CD +#define ARIZONA_DRC1RMIX_INPUT_4_SOURCE 0x8CE +#define ARIZONA_DRC1RMIX_INPUT_4_VOLUME 0x8CF +#define ARIZONA_DRC2LMIX_INPUT_1_SOURCE 0x8D0 +#define ARIZONA_DRC2LMIX_INPUT_1_VOLUME 0x8D1 +#define ARIZONA_DRC2LMIX_INPUT_2_SOURCE 0x8D2 +#define ARIZONA_DRC2LMIX_INPUT_2_VOLUME 0x8D3 +#define ARIZONA_DRC2LMIX_INPUT_3_SOURCE 0x8D4 +#define ARIZONA_DRC2LMIX_INPUT_3_VOLUME 0x8D5 +#define ARIZONA_DRC2LMIX_INPUT_4_SOURCE 0x8D6 +#define ARIZONA_DRC2LMIX_INPUT_4_VOLUME 0x8D7 +#define ARIZONA_DRC2RMIX_INPUT_1_SOURCE 0x8D8 +#define ARIZONA_DRC2RMIX_INPUT_1_VOLUME 0x8D9 +#define ARIZONA_DRC2RMIX_INPUT_2_SOURCE 0x8DA +#define ARIZONA_DRC2RMIX_INPUT_2_VOLUME 0x8DB +#define ARIZONA_DRC2RMIX_INPUT_3_SOURCE 0x8DC +#define ARIZONA_DRC2RMIX_INPUT_3_VOLUME 0x8DD +#define ARIZONA_DRC2RMIX_INPUT_4_SOURCE 0x8DE +#define ARIZONA_DRC2RMIX_INPUT_4_VOLUME 0x8DF +#define ARIZONA_HPLP1MIX_INPUT_1_SOURCE 0x900 +#define ARIZONA_HPLP1MIX_INPUT_1_VOLUME 0x901 +#define ARIZONA_HPLP1MIX_INPUT_2_SOURCE 0x902 +#define ARIZONA_HPLP1MIX_INPUT_2_VOLUME 0x903 +#define ARIZONA_HPLP1MIX_INPUT_3_SOURCE 0x904 +#define ARIZONA_HPLP1MIX_INPUT_3_VOLUME 0x905 +#define ARIZONA_HPLP1MIX_INPUT_4_SOURCE 0x906 +#define ARIZONA_HPLP1MIX_INPUT_4_VOLUME 0x907 +#define ARIZONA_HPLP2MIX_INPUT_1_SOURCE 0x908 +#define ARIZONA_HPLP2MIX_INPUT_1_VOLUME 0x909 +#define ARIZONA_HPLP2MIX_INPUT_2_SOURCE 0x90A +#define ARIZONA_HPLP2MIX_INPUT_2_VOLUME 0x90B +#define ARIZONA_HPLP2MIX_INPUT_3_SOURCE 0x90C +#define ARIZONA_HPLP2MIX_INPUT_3_VOLUME 0x90D +#define ARIZONA_HPLP2MIX_INPUT_4_SOURCE 0x90E +#define ARIZONA_HPLP2MIX_INPUT_4_VOLUME 0x90F +#define ARIZONA_HPLP3MIX_INPUT_1_SOURCE 0x910 +#define ARIZONA_HPLP3MIX_INPUT_1_VOLUME 0x911 +#define ARIZONA_HPLP3MIX_INPUT_2_SOURCE 0x912 +#define ARIZONA_HPLP3MIX_INPUT_2_VOLUME 0x913 +#define ARIZONA_HPLP3MIX_INPUT_3_SOURCE 0x914 +#define ARIZONA_HPLP3MIX_INPUT_3_VOLUME 0x915 +#define ARIZONA_HPLP3MIX_INPUT_4_SOURCE 0x916 +#define ARIZONA_HPLP3MIX_INPUT_4_VOLUME 0x917 +#define ARIZONA_HPLP4MIX_INPUT_1_SOURCE 0x918 +#define ARIZONA_HPLP4MIX_INPUT_1_VOLUME 0x919 +#define ARIZONA_HPLP4MIX_INPUT_2_SOURCE 0x91A +#define ARIZONA_HPLP4MIX_INPUT_2_VOLUME 0x91B +#define ARIZONA_HPLP4MIX_INPUT_3_SOURCE 0x91C +#define ARIZONA_HPLP4MIX_INPUT_3_VOLUME 0x91D +#define ARIZONA_HPLP4MIX_INPUT_4_SOURCE 0x91E +#define ARIZONA_HPLP4MIX_INPUT_4_VOLUME 0x91F +#define ARIZONA_DSP1LMIX_INPUT_1_SOURCE 0x940 +#define ARIZONA_DSP1LMIX_INPUT_1_VOLUME 0x941 +#define ARIZONA_DSP1LMIX_INPUT_2_SOURCE 0x942 +#define ARIZONA_DSP1LMIX_INPUT_2_VOLUME 0x943 +#define ARIZONA_DSP1LMIX_INPUT_3_SOURCE 0x944 +#define ARIZONA_DSP1LMIX_INPUT_3_VOLUME 0x945 +#define ARIZONA_DSP1LMIX_INPUT_4_SOURCE 0x946 +#define ARIZONA_DSP1LMIX_INPUT_4_VOLUME 0x947 +#define ARIZONA_DSP1RMIX_INPUT_1_SOURCE 0x948 +#define ARIZONA_DSP1RMIX_INPUT_1_VOLUME 0x949 +#define ARIZONA_DSP1RMIX_INPUT_2_SOURCE 0x94A +#define ARIZONA_DSP1RMIX_INPUT_2_VOLUME 0x94B +#define ARIZONA_DSP1RMIX_INPUT_3_SOURCE 0x94C +#define ARIZONA_DSP1RMIX_INPUT_3_VOLUME 0x94D +#define ARIZONA_DSP1RMIX_INPUT_4_SOURCE 0x94E +#define ARIZONA_DSP1RMIX_INPUT_4_VOLUME 0x94F +#define ARIZONA_DSP1AUX1MIX_INPUT_1_SOURCE 0x950 +#define ARIZONA_DSP1AUX2MIX_INPUT_1_SOURCE 0x958 +#define ARIZONA_DSP1AUX3MIX_INPUT_1_SOURCE 0x960 +#define ARIZONA_DSP1AUX4MIX_INPUT_1_SOURCE 0x968 +#define ARIZONA_DSP1AUX5MIX_INPUT_1_SOURCE 0x970 +#define ARIZONA_DSP1AUX6MIX_INPUT_1_SOURCE 0x978 +#define ARIZONA_DSP2LMIX_INPUT_1_SOURCE 0x980 +#define ARIZONA_DSP2LMIX_INPUT_1_VOLUME 0x981 +#define ARIZONA_DSP2LMIX_INPUT_2_SOURCE 0x982 +#define ARIZONA_DSP2LMIX_INPUT_2_VOLUME 0x983 +#define ARIZONA_DSP2LMIX_INPUT_3_SOURCE 0x984 +#define ARIZONA_DSP2LMIX_INPUT_3_VOLUME 0x985 +#define ARIZONA_DSP2LMIX_INPUT_4_SOURCE 0x986 +#define ARIZONA_DSP2LMIX_INPUT_4_VOLUME 0x987 +#define ARIZONA_DSP2RMIX_INPUT_1_SOURCE 0x988 +#define ARIZONA_DSP2RMIX_INPUT_1_VOLUME 0x989 +#define ARIZONA_DSP2RMIX_INPUT_2_SOURCE 0x98A +#define ARIZONA_DSP2RMIX_INPUT_2_VOLUME 0x98B +#define ARIZONA_DSP2RMIX_INPUT_3_SOURCE 0x98C +#define ARIZONA_DSP2RMIX_INPUT_3_VOLUME 0x98D +#define ARIZONA_DSP2RMIX_INPUT_4_SOURCE 0x98E +#define ARIZONA_DSP2RMIX_INPUT_4_VOLUME 0x98F +#define ARIZONA_DSP2AUX1MIX_INPUT_1_SOURCE 0x990 +#define ARIZONA_DSP2AUX2MIX_INPUT_1_SOURCE 0x998 +#define ARIZONA_DSP2AUX3MIX_INPUT_1_SOURCE 0x9A0 +#define ARIZONA_DSP2AUX4MIX_INPUT_1_SOURCE 0x9A8 +#define ARIZONA_DSP2AUX5MIX_INPUT_1_SOURCE 0x9B0 +#define ARIZONA_DSP2AUX6MIX_INPUT_1_SOURCE 0x9B8 +#define ARIZONA_DSP3LMIX_INPUT_1_SOURCE 0x9C0 +#define ARIZONA_DSP3LMIX_INPUT_1_VOLUME 0x9C1 +#define ARIZONA_DSP3LMIX_INPUT_2_SOURCE 0x9C2 +#define ARIZONA_DSP3LMIX_INPUT_2_VOLUME 0x9C3 +#define ARIZONA_DSP3LMIX_INPUT_3_SOURCE 0x9C4 +#define ARIZONA_DSP3LMIX_INPUT_3_VOLUME 0x9C5 +#define ARIZONA_DSP3LMIX_INPUT_4_SOURCE 0x9C6 +#define ARIZONA_DSP3LMIX_INPUT_4_VOLUME 0x9C7 +#define ARIZONA_DSP3RMIX_INPUT_1_SOURCE 0x9C8 +#define ARIZONA_DSP3RMIX_INPUT_1_VOLUME 0x9C9 +#define ARIZONA_DSP3RMIX_INPUT_2_SOURCE 0x9CA +#define ARIZONA_DSP3RMIX_INPUT_2_VOLUME 0x9CB +#define ARIZONA_DSP3RMIX_INPUT_3_SOURCE 0x9CC +#define ARIZONA_DSP3RMIX_INPUT_3_VOLUME 0x9CD +#define ARIZONA_DSP3RMIX_INPUT_4_SOURCE 0x9CE +#define ARIZONA_DSP3RMIX_INPUT_4_VOLUME 0x9CF +#define ARIZONA_DSP3AUX1MIX_INPUT_1_SOURCE 0x9D0 +#define ARIZONA_DSP3AUX2MIX_INPUT_1_SOURCE 0x9D8 +#define ARIZONA_DSP3AUX3MIX_INPUT_1_SOURCE 0x9E0 +#define ARIZONA_DSP3AUX4MIX_INPUT_1_SOURCE 0x9E8 +#define ARIZONA_DSP3AUX5MIX_INPUT_1_SOURCE 0x9F0 +#define ARIZONA_DSP3AUX6MIX_INPUT_1_SOURCE 0x9F8 +#define ARIZONA_DSP4LMIX_INPUT_1_SOURCE 0xA00 +#define ARIZONA_DSP4LMIX_INPUT_1_VOLUME 0xA01 +#define ARIZONA_DSP4LMIX_INPUT_2_SOURCE 0xA02 +#define ARIZONA_DSP4LMIX_INPUT_2_VOLUME 0xA03 +#define ARIZONA_DSP4LMIX_INPUT_3_SOURCE 0xA04 +#define ARIZONA_DSP4LMIX_INPUT_3_VOLUME 0xA05 +#define ARIZONA_DSP4LMIX_INPUT_4_SOURCE 0xA06 +#define ARIZONA_DSP4LMIX_INPUT_4_VOLUME 0xA07 +#define ARIZONA_DSP4RMIX_INPUT_1_SOURCE 0xA08 +#define ARIZONA_DSP4RMIX_INPUT_1_VOLUME 0xA09 +#define ARIZONA_DSP4RMIX_INPUT_2_SOURCE 0xA0A +#define ARIZONA_DSP4RMIX_INPUT_2_VOLUME 0xA0B +#define ARIZONA_DSP4RMIX_INPUT_3_SOURCE 0xA0C +#define ARIZONA_DSP4RMIX_INPUT_3_VOLUME 0xA0D +#define ARIZONA_DSP4RMIX_INPUT_4_SOURCE 0xA0E +#define ARIZONA_DSP4RMIX_INPUT_4_VOLUME 0xA0F +#define ARIZONA_DSP4AUX1MIX_INPUT_1_SOURCE 0xA10 +#define ARIZONA_DSP4AUX2MIX_INPUT_1_SOURCE 0xA18 +#define ARIZONA_DSP4AUX3MIX_INPUT_1_SOURCE 0xA20 +#define ARIZONA_DSP4AUX4MIX_INPUT_1_SOURCE 0xA28 +#define ARIZONA_DSP4AUX5MIX_INPUT_1_SOURCE 0xA30 +#define ARIZONA_DSP4AUX6MIX_INPUT_1_SOURCE 0xA38 +#define ARIZONA_ASRC1LMIX_INPUT_1_SOURCE 0xA80 +#define ARIZONA_ASRC1RMIX_INPUT_1_SOURCE 0xA88 +#define ARIZONA_ASRC2LMIX_INPUT_1_SOURCE 0xA90 +#define ARIZONA_ASRC2RMIX_INPUT_1_SOURCE 0xA98 +#define ARIZONA_ISRC1DEC1MIX_INPUT_1_SOURCE 0xB00 +#define ARIZONA_ISRC1DEC2MIX_INPUT_1_SOURCE 0xB08 +#define ARIZONA_ISRC1DEC3MIX_INPUT_1_SOURCE 0xB10 +#define ARIZONA_ISRC1DEC4MIX_INPUT_1_SOURCE 0xB18 +#define ARIZONA_ISRC1INT1MIX_INPUT_1_SOURCE 0xB20 +#define ARIZONA_ISRC1INT2MIX_INPUT_1_SOURCE 0xB28 +#define ARIZONA_ISRC1INT3MIX_INPUT_1_SOURCE 0xB30 +#define ARIZONA_ISRC1INT4MIX_INPUT_1_SOURCE 0xB38 +#define ARIZONA_ISRC2DEC1MIX_INPUT_1_SOURCE 0xB40 +#define ARIZONA_ISRC2DEC2MIX_INPUT_1_SOURCE 0xB48 +#define ARIZONA_ISRC2DEC3MIX_INPUT_1_SOURCE 0xB50 +#define ARIZONA_ISRC2DEC4MIX_INPUT_1_SOURCE 0xB58 +#define ARIZONA_ISRC2INT1MIX_INPUT_1_SOURCE 0xB60 +#define ARIZONA_ISRC2INT2MIX_INPUT_1_SOURCE 0xB68 +#define ARIZONA_ISRC2INT3MIX_INPUT_1_SOURCE 0xB70 +#define ARIZONA_ISRC2INT4MIX_INPUT_1_SOURCE 0xB78 +#define ARIZONA_ISRC3DEC1MIX_INPUT_1_SOURCE 0xB80 +#define ARIZONA_ISRC3DEC2MIX_INPUT_1_SOURCE 0xB88 +#define ARIZONA_ISRC3DEC3MIX_INPUT_1_SOURCE 0xB90 +#define ARIZONA_ISRC3DEC4MIX_INPUT_1_SOURCE 0xB98 +#define ARIZONA_ISRC3INT1MIX_INPUT_1_SOURCE 0xBA0 +#define ARIZONA_ISRC3INT2MIX_INPUT_1_SOURCE 0xBA8 +#define ARIZONA_ISRC3INT3MIX_INPUT_1_SOURCE 0xBB0 +#define ARIZONA_ISRC3INT4MIX_INPUT_1_SOURCE 0xBB8 +#define ARIZONA_GPIO1_CTRL 0xC00 +#define ARIZONA_GPIO2_CTRL 0xC01 +#define ARIZONA_GPIO3_CTRL 0xC02 +#define ARIZONA_GPIO4_CTRL 0xC03 +#define ARIZONA_GPIO5_CTRL 0xC04 +#define ARIZONA_IRQ_CTRL_1 0xC0F +#define ARIZONA_GPIO_DEBOUNCE_CONFIG 0xC10 +#define ARIZONA_GP_SWITCH_1 0xC18 +#define ARIZONA_MISC_PAD_CTRL_1 0xC20 +#define ARIZONA_MISC_PAD_CTRL_2 0xC21 +#define ARIZONA_MISC_PAD_CTRL_3 0xC22 +#define ARIZONA_MISC_PAD_CTRL_4 0xC23 +#define ARIZONA_MISC_PAD_CTRL_5 0xC24 +#define ARIZONA_MISC_PAD_CTRL_6 0xC25 +#define ARIZONA_MISC_PAD_CTRL_7 0xC30 +#define ARIZONA_MISC_PAD_CTRL_8 0xC31 +#define ARIZONA_MISC_PAD_CTRL_9 0xC32 +#define ARIZONA_MISC_PAD_CTRL_10 0xC33 +#define ARIZONA_MISC_PAD_CTRL_11 0xC34 +#define ARIZONA_MISC_PAD_CTRL_12 0xC35 +#define ARIZONA_MISC_PAD_CTRL_13 0xC36 +#define ARIZONA_MISC_PAD_CTRL_14 0xC37 +#define ARIZONA_MISC_PAD_CTRL_15 0xC38 +#define ARIZONA_MISC_PAD_CTRL_16 0xC39 +#define ARIZONA_MISC_PAD_CTRL_17 0xC3A +#define ARIZONA_MISC_PAD_CTRL_18 0xC3B +#define ARIZONA_INTERRUPT_STATUS_1 0xD00 +#define ARIZONA_INTERRUPT_STATUS_2 0xD01 +#define ARIZONA_INTERRUPT_STATUS_3 0xD02 +#define ARIZONA_INTERRUPT_STATUS_4 0xD03 +#define ARIZONA_INTERRUPT_STATUS_5 0xD04 +#define ARIZONA_INTERRUPT_STATUS_6 0xD05 +#define ARIZONA_INTERRUPT_STATUS_1_MASK 0xD08 +#define ARIZONA_INTERRUPT_STATUS_2_MASK 0xD09 +#define ARIZONA_INTERRUPT_STATUS_3_MASK 0xD0A +#define ARIZONA_INTERRUPT_STATUS_4_MASK 0xD0B +#define ARIZONA_INTERRUPT_STATUS_5_MASK 0xD0C +#define ARIZONA_INTERRUPT_STATUS_6_MASK 0xD0D +#define ARIZONA_INTERRUPT_CONTROL 0xD0F +#define ARIZONA_IRQ2_STATUS_1 0xD10 +#define ARIZONA_IRQ2_STATUS_2 0xD11 +#define ARIZONA_IRQ2_STATUS_3 0xD12 +#define ARIZONA_IRQ2_STATUS_4 0xD13 +#define ARIZONA_IRQ2_STATUS_5 0xD14 +#define ARIZONA_IRQ2_STATUS_6 0xD15 +#define ARIZONA_IRQ2_STATUS_1_MASK 0xD18 +#define ARIZONA_IRQ2_STATUS_2_MASK 0xD19 +#define ARIZONA_IRQ2_STATUS_3_MASK 0xD1A +#define ARIZONA_IRQ2_STATUS_4_MASK 0xD1B +#define ARIZONA_IRQ2_STATUS_5_MASK 0xD1C +#define ARIZONA_IRQ2_STATUS_6_MASK 0xD1D +#define ARIZONA_IRQ2_CONTROL 0xD1F +#define ARIZONA_INTERRUPT_RAW_STATUS_2 0xD20 +#define ARIZONA_INTERRUPT_RAW_STATUS_3 0xD21 +#define ARIZONA_INTERRUPT_RAW_STATUS_4 0xD22 +#define ARIZONA_INTERRUPT_RAW_STATUS_5 0xD23 +#define ARIZONA_INTERRUPT_RAW_STATUS_6 0xD24 +#define ARIZONA_INTERRUPT_RAW_STATUS_7 0xD25 +#define ARIZONA_INTERRUPT_RAW_STATUS_8 0xD26 +#define ARIZONA_INTERRUPT_RAW_STATUS_9 0xD28 +#define ARIZONA_IRQ_PIN_STATUS 0xD40 +#define ARIZONA_ADSP2_IRQ0 0xD41 +#define ARIZONA_AOD_WKUP_AND_TRIG 0xD50 +#define ARIZONA_AOD_IRQ1 0xD51 +#define ARIZONA_AOD_IRQ2 0xD52 +#define ARIZONA_AOD_IRQ_MASK_IRQ1 0xD53 +#define ARIZONA_AOD_IRQ_MASK_IRQ2 0xD54 +#define ARIZONA_AOD_IRQ_RAW_STATUS 0xD55 +#define ARIZONA_JACK_DETECT_DEBOUNCE 0xD56 +#define ARIZONA_FX_CTRL1 0xE00 +#define ARIZONA_FX_CTRL2 0xE01 +#define ARIZONA_EQ1_1 0xE10 +#define ARIZONA_EQ1_2 0xE11 +#define ARIZONA_EQ1_3 0xE12 +#define ARIZONA_EQ1_4 0xE13 +#define ARIZONA_EQ1_5 0xE14 +#define ARIZONA_EQ1_6 0xE15 +#define ARIZONA_EQ1_7 0xE16 +#define ARIZONA_EQ1_8 0xE17 +#define ARIZONA_EQ1_9 0xE18 +#define ARIZONA_EQ1_10 0xE19 +#define ARIZONA_EQ1_11 0xE1A +#define ARIZONA_EQ1_12 0xE1B +#define ARIZONA_EQ1_13 0xE1C +#define ARIZONA_EQ1_14 0xE1D +#define ARIZONA_EQ1_15 0xE1E +#define ARIZONA_EQ1_16 0xE1F +#define ARIZONA_EQ1_17 0xE20 +#define ARIZONA_EQ1_18 0xE21 +#define ARIZONA_EQ1_19 0xE22 +#define ARIZONA_EQ1_20 0xE23 +#define ARIZONA_EQ1_21 0xE24 +#define ARIZONA_EQ2_1 0xE26 +#define ARIZONA_EQ2_2 0xE27 +#define ARIZONA_EQ2_3 0xE28 +#define ARIZONA_EQ2_4 0xE29 +#define ARIZONA_EQ2_5 0xE2A +#define ARIZONA_EQ2_6 0xE2B +#define ARIZONA_EQ2_7 0xE2C +#define ARIZONA_EQ2_8 0xE2D +#define ARIZONA_EQ2_9 0xE2E +#define ARIZONA_EQ2_10 0xE2F +#define ARIZONA_EQ2_11 0xE30 +#define ARIZONA_EQ2_12 0xE31 +#define ARIZONA_EQ2_13 0xE32 +#define ARIZONA_EQ2_14 0xE33 +#define ARIZONA_EQ2_15 0xE34 +#define ARIZONA_EQ2_16 0xE35 +#define ARIZONA_EQ2_17 0xE36 +#define ARIZONA_EQ2_18 0xE37 +#define ARIZONA_EQ2_19 0xE38 +#define ARIZONA_EQ2_20 0xE39 +#define ARIZONA_EQ2_21 0xE3A +#define ARIZONA_EQ3_1 0xE3C +#define ARIZONA_EQ3_2 0xE3D +#define ARIZONA_EQ3_3 0xE3E +#define ARIZONA_EQ3_4 0xE3F +#define ARIZONA_EQ3_5 0xE40 +#define ARIZONA_EQ3_6 0xE41 +#define ARIZONA_EQ3_7 0xE42 +#define ARIZONA_EQ3_8 0xE43 +#define ARIZONA_EQ3_9 0xE44 +#define ARIZONA_EQ3_10 0xE45 +#define ARIZONA_EQ3_11 0xE46 +#define ARIZONA_EQ3_12 0xE47 +#define ARIZONA_EQ3_13 0xE48 +#define ARIZONA_EQ3_14 0xE49 +#define ARIZONA_EQ3_15 0xE4A +#define ARIZONA_EQ3_16 0xE4B +#define ARIZONA_EQ3_17 0xE4C +#define ARIZONA_EQ3_18 0xE4D +#define ARIZONA_EQ3_19 0xE4E +#define ARIZONA_EQ3_20 0xE4F +#define ARIZONA_EQ3_21 0xE50 +#define ARIZONA_EQ4_1 0xE52 +#define ARIZONA_EQ4_2 0xE53 +#define ARIZONA_EQ4_3 0xE54 +#define ARIZONA_EQ4_4 0xE55 +#define ARIZONA_EQ4_5 0xE56 +#define ARIZONA_EQ4_6 0xE57 +#define ARIZONA_EQ4_7 0xE58 +#define ARIZONA_EQ4_8 0xE59 +#define ARIZONA_EQ4_9 0xE5A +#define ARIZONA_EQ4_10 0xE5B +#define ARIZONA_EQ4_11 0xE5C +#define ARIZONA_EQ4_12 0xE5D +#define ARIZONA_EQ4_13 0xE5E +#define ARIZONA_EQ4_14 0xE5F +#define ARIZONA_EQ4_15 0xE60 +#define ARIZONA_EQ4_16 0xE61 +#define ARIZONA_EQ4_17 0xE62 +#define ARIZONA_EQ4_18 0xE63 +#define ARIZONA_EQ4_19 0xE64 +#define ARIZONA_EQ4_20 0xE65 +#define ARIZONA_EQ4_21 0xE66 +#define ARIZONA_DRC1_CTRL1 0xE80 +#define ARIZONA_DRC1_CTRL2 0xE81 +#define ARIZONA_DRC1_CTRL3 0xE82 +#define ARIZONA_DRC1_CTRL4 0xE83 +#define ARIZONA_DRC1_CTRL5 0xE84 +#define ARIZONA_DRC2_CTRL1 0xE89 +#define ARIZONA_DRC2_CTRL2 0xE8A +#define ARIZONA_DRC2_CTRL3 0xE8B +#define ARIZONA_DRC2_CTRL4 0xE8C +#define ARIZONA_DRC2_CTRL5 0xE8D +#define ARIZONA_HPLPF1_1 0xEC0 +#define ARIZONA_HPLPF1_2 0xEC1 +#define ARIZONA_HPLPF2_1 0xEC4 +#define ARIZONA_HPLPF2_2 0xEC5 +#define ARIZONA_HPLPF3_1 0xEC8 +#define ARIZONA_HPLPF3_2 0xEC9 +#define ARIZONA_HPLPF4_1 0xECC +#define ARIZONA_HPLPF4_2 0xECD +#define ARIZONA_ASRC_ENABLE 0xEE0 +#define ARIZONA_ASRC_STATUS 0xEE1 +#define ARIZONA_ASRC_RATE1 0xEE2 +#define ARIZONA_ASRC_RATE2 0xEE3 +#define ARIZONA_ISRC_1_CTRL_1 0xEF0 +#define ARIZONA_ISRC_1_CTRL_2 0xEF1 +#define ARIZONA_ISRC_1_CTRL_3 0xEF2 +#define ARIZONA_ISRC_2_CTRL_1 0xEF3 +#define ARIZONA_ISRC_2_CTRL_2 0xEF4 +#define ARIZONA_ISRC_2_CTRL_3 0xEF5 +#define ARIZONA_ISRC_3_CTRL_1 0xEF6 +#define ARIZONA_ISRC_3_CTRL_2 0xEF7 +#define ARIZONA_ISRC_3_CTRL_3 0xEF8 +#define ARIZONA_CLOCK_CONTROL 0xF00 +#define ARIZONA_ANC_SRC 0xF01 +#define ARIZONA_DSP_STATUS 0xF02 +#define ARIZONA_ANC_COEFF_START 0xF08 +#define ARIZONA_ANC_COEFF_END 0xF12 +#define ARIZONA_FCL_FILTER_CONTROL 0xF15 +#define ARIZONA_FCL_ADC_REFORMATTER_CONTROL 0xF17 +#define ARIZONA_FCL_COEFF_START 0xF18 +#define ARIZONA_FCL_COEFF_END 0xF69 +#define ARIZONA_FCR_FILTER_CONTROL 0xF70 +#define ARIZONA_FCR_ADC_REFORMATTER_CONTROL 0xF72 +#define ARIZONA_FCR_COEFF_START 0xF73 +#define ARIZONA_FCR_COEFF_END 0xFC4 +#define ARIZONA_DSP1_CONTROL_1 0x1100 +#define ARIZONA_DSP1_CLOCKING_1 0x1101 +#define ARIZONA_DSP1_STATUS_1 0x1104 +#define ARIZONA_DSP1_STATUS_2 0x1105 +#define ARIZONA_DSP1_STATUS_3 0x1106 +#define ARIZONA_DSP1_STATUS_4 0x1107 +#define ARIZONA_DSP1_WDMA_BUFFER_1 0x1110 +#define ARIZONA_DSP1_WDMA_BUFFER_2 0x1111 +#define ARIZONA_DSP1_WDMA_BUFFER_3 0x1112 +#define ARIZONA_DSP1_WDMA_BUFFER_4 0x1113 +#define ARIZONA_DSP1_WDMA_BUFFER_5 0x1114 +#define ARIZONA_DSP1_WDMA_BUFFER_6 0x1115 +#define ARIZONA_DSP1_WDMA_BUFFER_7 0x1116 +#define ARIZONA_DSP1_WDMA_BUFFER_8 0x1117 +#define ARIZONA_DSP1_RDMA_BUFFER_1 0x1120 +#define ARIZONA_DSP1_RDMA_BUFFER_2 0x1121 +#define ARIZONA_DSP1_RDMA_BUFFER_3 0x1122 +#define ARIZONA_DSP1_RDMA_BUFFER_4 0x1123 +#define ARIZONA_DSP1_RDMA_BUFFER_5 0x1124 +#define ARIZONA_DSP1_RDMA_BUFFER_6 0x1125 +#define ARIZONA_DSP1_WDMA_CONFIG_1 0x1130 +#define ARIZONA_DSP1_WDMA_CONFIG_2 0x1131 +#define ARIZONA_DSP1_WDMA_OFFSET_1 0x1132 +#define ARIZONA_DSP1_RDMA_CONFIG_1 0x1134 +#define ARIZONA_DSP1_RDMA_OFFSET_1 0x1135 +#define ARIZONA_DSP1_EXTERNAL_START_SELECT_1 0x1138 +#define ARIZONA_DSP1_SCRATCH_0 0x1140 +#define ARIZONA_DSP1_SCRATCH_1 0x1141 +#define ARIZONA_DSP1_SCRATCH_2 0x1142 +#define ARIZONA_DSP1_SCRATCH_3 0x1143 +#define ARIZONA_DSP2_CONTROL_1 0x1200 +#define ARIZONA_DSP2_CLOCKING_1 0x1201 +#define ARIZONA_DSP2_STATUS_1 0x1204 +#define ARIZONA_DSP2_STATUS_2 0x1205 +#define ARIZONA_DSP2_STATUS_3 0x1206 +#define ARIZONA_DSP2_STATUS_4 0x1207 +#define ARIZONA_DSP2_WDMA_BUFFER_1 0x1210 +#define ARIZONA_DSP2_WDMA_BUFFER_2 0x1211 +#define ARIZONA_DSP2_WDMA_BUFFER_3 0x1212 +#define ARIZONA_DSP2_WDMA_BUFFER_4 0x1213 +#define ARIZONA_DSP2_WDMA_BUFFER_5 0x1214 +#define ARIZONA_DSP2_WDMA_BUFFER_6 0x1215 +#define ARIZONA_DSP2_WDMA_BUFFER_7 0x1216 +#define ARIZONA_DSP2_WDMA_BUFFER_8 0x1217 +#define ARIZONA_DSP2_RDMA_BUFFER_1 0x1220 +#define ARIZONA_DSP2_RDMA_BUFFER_2 0x1221 +#define ARIZONA_DSP2_RDMA_BUFFER_3 0x1222 +#define ARIZONA_DSP2_RDMA_BUFFER_4 0x1223 +#define ARIZONA_DSP2_RDMA_BUFFER_5 0x1224 +#define ARIZONA_DSP2_RDMA_BUFFER_6 0x1225 +#define ARIZONA_DSP2_WDMA_CONFIG_1 0x1230 +#define ARIZONA_DSP2_WDMA_CONFIG_2 0x1231 +#define ARIZONA_DSP2_WDMA_OFFSET_1 0x1232 +#define ARIZONA_DSP2_RDMA_CONFIG_1 0x1234 +#define ARIZONA_DSP2_RDMA_OFFSET_1 0x1235 +#define ARIZONA_DSP2_EXTERNAL_START_SELECT_1 0x1238 +#define ARIZONA_DSP2_SCRATCH_0 0x1240 +#define ARIZONA_DSP2_SCRATCH_1 0x1241 +#define ARIZONA_DSP2_SCRATCH_2 0x1242 +#define ARIZONA_DSP2_SCRATCH_3 0x1243 +#define ARIZONA_DSP3_CONTROL_1 0x1300 +#define ARIZONA_DSP3_CLOCKING_1 0x1301 +#define ARIZONA_DSP3_STATUS_1 0x1304 +#define ARIZONA_DSP3_STATUS_2 0x1305 +#define ARIZONA_DSP3_STATUS_3 0x1306 +#define ARIZONA_DSP3_STATUS_4 0x1307 +#define ARIZONA_DSP3_WDMA_BUFFER_1 0x1310 +#define ARIZONA_DSP3_WDMA_BUFFER_2 0x1311 +#define ARIZONA_DSP3_WDMA_BUFFER_3 0x1312 +#define ARIZONA_DSP3_WDMA_BUFFER_4 0x1313 +#define ARIZONA_DSP3_WDMA_BUFFER_5 0x1314 +#define ARIZONA_DSP3_WDMA_BUFFER_6 0x1315 +#define ARIZONA_DSP3_WDMA_BUFFER_7 0x1316 +#define ARIZONA_DSP3_WDMA_BUFFER_8 0x1317 +#define ARIZONA_DSP3_RDMA_BUFFER_1 0x1320 +#define ARIZONA_DSP3_RDMA_BUFFER_2 0x1321 +#define ARIZONA_DSP3_RDMA_BUFFER_3 0x1322 +#define ARIZONA_DSP3_RDMA_BUFFER_4 0x1323 +#define ARIZONA_DSP3_RDMA_BUFFER_5 0x1324 +#define ARIZONA_DSP3_RDMA_BUFFER_6 0x1325 +#define ARIZONA_DSP3_WDMA_CONFIG_1 0x1330 +#define ARIZONA_DSP3_WDMA_CONFIG_2 0x1331 +#define ARIZONA_DSP3_WDMA_OFFSET_1 0x1332 +#define ARIZONA_DSP3_RDMA_CONFIG_1 0x1334 +#define ARIZONA_DSP3_RDMA_OFFSET_1 0x1335 +#define ARIZONA_DSP3_EXTERNAL_START_SELECT_1 0x1338 +#define ARIZONA_DSP3_SCRATCH_0 0x1340 +#define ARIZONA_DSP3_SCRATCH_1 0x1341 +#define ARIZONA_DSP3_SCRATCH_2 0x1342 +#define ARIZONA_DSP3_SCRATCH_3 0x1343 +#define ARIZONA_DSP4_CONTROL_1 0x1400 +#define ARIZONA_DSP4_CLOCKING_1 0x1401 +#define ARIZONA_DSP4_STATUS_1 0x1404 +#define ARIZONA_DSP4_STATUS_2 0x1405 +#define ARIZONA_DSP4_STATUS_3 0x1406 +#define ARIZONA_DSP4_STATUS_4 0x1407 +#define ARIZONA_DSP4_WDMA_BUFFER_1 0x1410 +#define ARIZONA_DSP4_WDMA_BUFFER_2 0x1411 +#define ARIZONA_DSP4_WDMA_BUFFER_3 0x1412 +#define ARIZONA_DSP4_WDMA_BUFFER_4 0x1413 +#define ARIZONA_DSP4_WDMA_BUFFER_5 0x1414 +#define ARIZONA_DSP4_WDMA_BUFFER_6 0x1415 +#define ARIZONA_DSP4_WDMA_BUFFER_7 0x1416 +#define ARIZONA_DSP4_WDMA_BUFFER_8 0x1417 +#define ARIZONA_DSP4_RDMA_BUFFER_1 0x1420 +#define ARIZONA_DSP4_RDMA_BUFFER_2 0x1421 +#define ARIZONA_DSP4_RDMA_BUFFER_3 0x1422 +#define ARIZONA_DSP4_RDMA_BUFFER_4 0x1423 +#define ARIZONA_DSP4_RDMA_BUFFER_5 0x1424 +#define ARIZONA_DSP4_RDMA_BUFFER_6 0x1425 +#define ARIZONA_DSP4_WDMA_CONFIG_1 0x1430 +#define ARIZONA_DSP4_WDMA_CONFIG_2 0x1431 +#define ARIZONA_DSP4_WDMA_OFFSET_1 0x1432 +#define ARIZONA_DSP4_RDMA_CONFIG_1 0x1434 +#define ARIZONA_DSP4_RDMA_OFFSET_1 0x1435 +#define ARIZONA_DSP4_EXTERNAL_START_SELECT_1 0x1438 +#define ARIZONA_DSP4_SCRATCH_0 0x1440 +#define ARIZONA_DSP4_SCRATCH_1 0x1441 +#define ARIZONA_DSP4_SCRATCH_2 0x1442 +#define ARIZONA_DSP4_SCRATCH_3 0x1443 +#define ARIZONA_FRF_COEFF_1 0x1700 +#define ARIZONA_FRF_COEFF_2 0x1701 +#define ARIZONA_FRF_COEFF_3 0x1702 +#define ARIZONA_FRF_COEFF_4 0x1703 +#define ARIZONA_V2_DAC_COMP_1 0x1704 +#define ARIZONA_V2_DAC_COMP_2 0x1705 + + +/* + * Field Definitions. + */ + +/* + * R0 (0x00) - software reset + */ +#define ARIZONA_SW_RST_DEV_ID1_MASK 0xFFFF /* SW_RST_DEV_ID1 - [15:0] */ +#define ARIZONA_SW_RST_DEV_ID1_SHIFT 0 /* SW_RST_DEV_ID1 - [15:0] */ +#define ARIZONA_SW_RST_DEV_ID1_WIDTH 16 /* SW_RST_DEV_ID1 - [15:0] */ + +/* + * R1 (0x01) - Device Revision + */ +#define ARIZONA_DEVICE_REVISION_MASK 0x00FF /* DEVICE_REVISION - [7:0] */ +#define ARIZONA_DEVICE_REVISION_SHIFT 0 /* DEVICE_REVISION - [7:0] */ +#define ARIZONA_DEVICE_REVISION_WIDTH 8 /* DEVICE_REVISION - [7:0] */ + +/* + * R8 (0x08) - Ctrl IF SPI CFG 1 + */ +#define ARIZONA_SPI_CFG 0x0010 /* SPI_CFG */ +#define ARIZONA_SPI_CFG_MASK 0x0010 /* SPI_CFG */ +#define ARIZONA_SPI_CFG_SHIFT 4 /* SPI_CFG */ +#define ARIZONA_SPI_CFG_WIDTH 1 /* SPI_CFG */ +#define ARIZONA_SPI_4WIRE 0x0008 /* SPI_4WIRE */ +#define ARIZONA_SPI_4WIRE_MASK 0x0008 /* SPI_4WIRE */ +#define ARIZONA_SPI_4WIRE_SHIFT 3 /* SPI_4WIRE */ +#define ARIZONA_SPI_4WIRE_WIDTH 1 /* SPI_4WIRE */ +#define ARIZONA_SPI_AUTO_INC_MASK 0x0003 /* SPI_AUTO_INC - [1:0] */ +#define ARIZONA_SPI_AUTO_INC_SHIFT 0 /* SPI_AUTO_INC - [1:0] */ +#define ARIZONA_SPI_AUTO_INC_WIDTH 2 /* SPI_AUTO_INC - [1:0] */ + +/* + * R9 (0x09) - Ctrl IF I2C1 CFG 1 + */ +#define ARIZONA_I2C1_AUTO_INC_MASK 0x0003 /* I2C1_AUTO_INC - [1:0] */ +#define ARIZONA_I2C1_AUTO_INC_SHIFT 0 /* I2C1_AUTO_INC - [1:0] */ +#define ARIZONA_I2C1_AUTO_INC_WIDTH 2 /* I2C1_AUTO_INC - [1:0] */ + +/* + * R13 (0x0D) - Ctrl IF Status 1 + */ +#define ARIZONA_I2C1_BUSY 0x0020 /* I2C1_BUSY */ +#define ARIZONA_I2C1_BUSY_MASK 0x0020 /* I2C1_BUSY */ +#define ARIZONA_I2C1_BUSY_SHIFT 5 /* I2C1_BUSY */ +#define ARIZONA_I2C1_BUSY_WIDTH 1 /* I2C1_BUSY */ +#define ARIZONA_SPI_BUSY 0x0010 /* SPI_BUSY */ +#define ARIZONA_SPI_BUSY_MASK 0x0010 /* SPI_BUSY */ +#define ARIZONA_SPI_BUSY_SHIFT 4 /* SPI_BUSY */ +#define ARIZONA_SPI_BUSY_WIDTH 1 /* SPI_BUSY */ + +/* + * R22 (0x16) - Write Sequencer Ctrl 0 + */ +#define ARIZONA_WSEQ_ABORT 0x0800 /* WSEQ_ABORT */ +#define ARIZONA_WSEQ_ABORT_MASK 0x0800 /* WSEQ_ABORT */ +#define ARIZONA_WSEQ_ABORT_SHIFT 11 /* WSEQ_ABORT */ +#define ARIZONA_WSEQ_ABORT_WIDTH 1 /* WSEQ_ABORT */ +#define ARIZONA_WSEQ_START 0x0400 /* WSEQ_START */ +#define ARIZONA_WSEQ_START_MASK 0x0400 /* WSEQ_START */ +#define ARIZONA_WSEQ_START_SHIFT 10 /* WSEQ_START */ +#define ARIZONA_WSEQ_START_WIDTH 1 /* WSEQ_START */ +#define ARIZONA_WSEQ_ENA 0x0200 /* WSEQ_ENA */ +#define ARIZONA_WSEQ_ENA_MASK 0x0200 /* WSEQ_ENA */ +#define ARIZONA_WSEQ_ENA_SHIFT 9 /* WSEQ_ENA */ +#define ARIZONA_WSEQ_ENA_WIDTH 1 /* WSEQ_ENA */ +#define ARIZONA_WSEQ_START_INDEX_MASK 0x01FF /* WSEQ_START_INDEX - [8:0] */ +#define ARIZONA_WSEQ_START_INDEX_SHIFT 0 /* WSEQ_START_INDEX - [8:0] */ +#define ARIZONA_WSEQ_START_INDEX_WIDTH 9 /* WSEQ_START_INDEX - [8:0] */ + +/* + * R23 (0x17) - Write Sequencer Ctrl 1 + */ +#define ARIZONA_WSEQ_BUSY 0x0200 /* WSEQ_BUSY */ +#define ARIZONA_WSEQ_BUSY_MASK 0x0200 /* WSEQ_BUSY */ +#define ARIZONA_WSEQ_BUSY_SHIFT 9 /* WSEQ_BUSY */ +#define ARIZONA_WSEQ_BUSY_WIDTH 1 /* WSEQ_BUSY */ +#define ARIZONA_WSEQ_CURRENT_INDEX_MASK 0x01FF /* WSEQ_CURRENT_INDEX - [8:0] */ +#define ARIZONA_WSEQ_CURRENT_INDEX_SHIFT 0 /* WSEQ_CURRENT_INDEX - [8:0] */ +#define ARIZONA_WSEQ_CURRENT_INDEX_WIDTH 9 /* WSEQ_CURRENT_INDEX - [8:0] */ + +/* + * R24 (0x18) - Write Sequencer Ctrl 2 + */ +#define ARIZONA_LOAD_DEFAULTS 0x0002 /* LOAD_DEFAULTS */ +#define ARIZONA_LOAD_DEFAULTS_MASK 0x0002 /* LOAD_DEFAULTS */ +#define ARIZONA_LOAD_DEFAULTS_SHIFT 1 /* LOAD_DEFAULTS */ +#define ARIZONA_LOAD_DEFAULTS_WIDTH 1 /* LOAD_DEFAULTS */ +#define ARIZONA_WSEQ_LOAD_MEM 0x0001 /* WSEQ_LOAD_MEM */ +#define ARIZONA_WSEQ_LOAD_MEM_MASK 0x0001 /* WSEQ_LOAD_MEM */ +#define ARIZONA_WSEQ_LOAD_MEM_SHIFT 0 /* WSEQ_LOAD_MEM */ +#define ARIZONA_WSEQ_LOAD_MEM_WIDTH 1 /* WSEQ_LOAD_MEM */ + +/* + * R26 (0x1A) - Write Sequencer PROM + */ +#define ARIZONA_WSEQ_OTP_WRITE 0x0001 /* WSEQ_OTP_WRITE */ +#define ARIZONA_WSEQ_OTP_WRITE_MASK 0x0001 /* WSEQ_OTP_WRITE */ +#define ARIZONA_WSEQ_OTP_WRITE_SHIFT 0 /* WSEQ_OTP_WRITE */ +#define ARIZONA_WSEQ_OTP_WRITE_WIDTH 1 /* WSEQ_OTP_WRITE */ + +/* + * R32 (0x20) - Tone Generator 1 + */ +#define ARIZONA_TONE_RATE_MASK 0x7800 /* TONE_RATE - [14:11] */ +#define ARIZONA_TONE_RATE_SHIFT 11 /* TONE_RATE - [14:11] */ +#define ARIZONA_TONE_RATE_WIDTH 4 /* TONE_RATE - [14:11] */ +#define ARIZONA_TONE_OFFSET_MASK 0x0300 /* TONE_OFFSET - [9:8] */ +#define ARIZONA_TONE_OFFSET_SHIFT 8 /* TONE_OFFSET - [9:8] */ +#define ARIZONA_TONE_OFFSET_WIDTH 2 /* TONE_OFFSET - [9:8] */ +#define ARIZONA_TONE2_OVD 0x0020 /* TONE2_OVD */ +#define ARIZONA_TONE2_OVD_MASK 0x0020 /* TONE2_OVD */ +#define ARIZONA_TONE2_OVD_SHIFT 5 /* TONE2_OVD */ +#define ARIZONA_TONE2_OVD_WIDTH 1 /* TONE2_OVD */ +#define ARIZONA_TONE1_OVD 0x0010 /* TONE1_OVD */ +#define ARIZONA_TONE1_OVD_MASK 0x0010 /* TONE1_OVD */ +#define ARIZONA_TONE1_OVD_SHIFT 4 /* TONE1_OVD */ +#define ARIZONA_TONE1_OVD_WIDTH 1 /* TONE1_OVD */ +#define ARIZONA_TONE2_ENA 0x0002 /* TONE2_ENA */ +#define ARIZONA_TONE2_ENA_MASK 0x0002 /* TONE2_ENA */ +#define ARIZONA_TONE2_ENA_SHIFT 1 /* TONE2_ENA */ +#define ARIZONA_TONE2_ENA_WIDTH 1 /* TONE2_ENA */ +#define ARIZONA_TONE1_ENA 0x0001 /* TONE1_ENA */ +#define ARIZONA_TONE1_ENA_MASK 0x0001 /* TONE1_ENA */ +#define ARIZONA_TONE1_ENA_SHIFT 0 /* TONE1_ENA */ +#define ARIZONA_TONE1_ENA_WIDTH 1 /* TONE1_ENA */ + +/* + * R33 (0x21) - Tone Generator 2 + */ +#define ARIZONA_TONE1_LVL_0_MASK 0xFFFF /* TONE1_LVL - [15:0] */ +#define ARIZONA_TONE1_LVL_0_SHIFT 0 /* TONE1_LVL - [15:0] */ +#define ARIZONA_TONE1_LVL_0_WIDTH 16 /* TONE1_LVL - [15:0] */ + +/* + * R34 (0x22) - Tone Generator 3 + */ +#define ARIZONA_TONE1_LVL_MASK 0x00FF /* TONE1_LVL - [7:0] */ +#define ARIZONA_TONE1_LVL_SHIFT 0 /* TONE1_LVL - [7:0] */ +#define ARIZONA_TONE1_LVL_WIDTH 8 /* TONE1_LVL - [7:0] */ + +/* + * R35 (0x23) - Tone Generator 4 + */ +#define ARIZONA_TONE2_LVL_0_MASK 0xFFFF /* TONE2_LVL - [15:0] */ +#define ARIZONA_TONE2_LVL_0_SHIFT 0 /* TONE2_LVL - [15:0] */ +#define ARIZONA_TONE2_LVL_0_WIDTH 16 /* TONE2_LVL - [15:0] */ + +/* + * R36 (0x24) - Tone Generator 5 + */ +#define ARIZONA_TONE2_LVL_MASK 0x00FF /* TONE2_LVL - [7:0] */ +#define ARIZONA_TONE2_LVL_SHIFT 0 /* TONE2_LVL - [7:0] */ +#define ARIZONA_TONE2_LVL_WIDTH 8 /* TONE2_LVL - [7:0] */ + +/* + * R48 (0x30) - PWM Drive 1 + */ +#define ARIZONA_PWM_RATE_MASK 0x7800 /* PWM_RATE - [14:11] */ +#define ARIZONA_PWM_RATE_SHIFT 11 /* PWM_RATE - [14:11] */ +#define ARIZONA_PWM_RATE_WIDTH 4 /* PWM_RATE - [14:11] */ +#define ARIZONA_PWM_CLK_SEL_MASK 0x0700 /* PWM_CLK_SEL - [10:8] */ +#define ARIZONA_PWM_CLK_SEL_SHIFT 8 /* PWM_CLK_SEL - [10:8] */ +#define ARIZONA_PWM_CLK_SEL_WIDTH 3 /* PWM_CLK_SEL - [10:8] */ +#define ARIZONA_PWM2_OVD 0x0020 /* PWM2_OVD */ +#define ARIZONA_PWM2_OVD_MASK 0x0020 /* PWM2_OVD */ +#define ARIZONA_PWM2_OVD_SHIFT 5 /* PWM2_OVD */ +#define ARIZONA_PWM2_OVD_WIDTH 1 /* PWM2_OVD */ +#define ARIZONA_PWM1_OVD 0x0010 /* PWM1_OVD */ +#define ARIZONA_PWM1_OVD_MASK 0x0010 /* PWM1_OVD */ +#define ARIZONA_PWM1_OVD_SHIFT 4 /* PWM1_OVD */ +#define ARIZONA_PWM1_OVD_WIDTH 1 /* PWM1_OVD */ +#define ARIZONA_PWM2_ENA 0x0002 /* PWM2_ENA */ +#define ARIZONA_PWM2_ENA_MASK 0x0002 /* PWM2_ENA */ +#define ARIZONA_PWM2_ENA_SHIFT 1 /* PWM2_ENA */ +#define ARIZONA_PWM2_ENA_WIDTH 1 /* PWM2_ENA */ +#define ARIZONA_PWM1_ENA 0x0001 /* PWM1_ENA */ +#define ARIZONA_PWM1_ENA_MASK 0x0001 /* PWM1_ENA */ +#define ARIZONA_PWM1_ENA_SHIFT 0 /* PWM1_ENA */ +#define ARIZONA_PWM1_ENA_WIDTH 1 /* PWM1_ENA */ + +/* + * R49 (0x31) - PWM Drive 2 + */ +#define ARIZONA_PWM1_LVL_MASK 0x03FF /* PWM1_LVL - [9:0] */ +#define ARIZONA_PWM1_LVL_SHIFT 0 /* PWM1_LVL - [9:0] */ +#define ARIZONA_PWM1_LVL_WIDTH 10 /* PWM1_LVL - [9:0] */ + +/* + * R50 (0x32) - PWM Drive 3 + */ +#define ARIZONA_PWM2_LVL_MASK 0x03FF /* PWM2_LVL - [9:0] */ +#define ARIZONA_PWM2_LVL_SHIFT 0 /* PWM2_LVL - [9:0] */ +#define ARIZONA_PWM2_LVL_WIDTH 10 /* PWM2_LVL - [9:0] */ + +/* + * R64 (0x40) - Wake control + */ +#define ARIZONA_WKUP_MICD_CLAMP_FALL 0x0080 /* WKUP_MICD_CLAMP_FALL */ +#define ARIZONA_WKUP_MICD_CLAMP_FALL_MASK 0x0080 /* WKUP_MICD_CLAMP_FALL */ +#define ARIZONA_WKUP_MICD_CLAMP_FALL_SHIFT 7 /* WKUP_MICD_CLAMP_FALL */ +#define ARIZONA_WKUP_MICD_CLAMP_FALL_WIDTH 1 /* WKUP_MICD_CLAMP_FALL */ +#define ARIZONA_WKUP_MICD_CLAMP_RISE 0x0040 /* WKUP_MICD_CLAMP_RISE */ +#define ARIZONA_WKUP_MICD_CLAMP_RISE_MASK 0x0040 /* WKUP_MICD_CLAMP_RISE */ +#define ARIZONA_WKUP_MICD_CLAMP_RISE_SHIFT 6 /* WKUP_MICD_CLAMP_RISE */ +#define ARIZONA_WKUP_MICD_CLAMP_RISE_WIDTH 1 /* WKUP_MICD_CLAMP_RISE */ +#define ARIZONA_WKUP_GP5_FALL 0x0020 /* WKUP_GP5_FALL */ +#define ARIZONA_WKUP_GP5_FALL_MASK 0x0020 /* WKUP_GP5_FALL */ +#define ARIZONA_WKUP_GP5_FALL_SHIFT 5 /* WKUP_GP5_FALL */ +#define ARIZONA_WKUP_GP5_FALL_WIDTH 1 /* WKUP_GP5_FALL */ +#define ARIZONA_WKUP_GP5_RISE 0x0010 /* WKUP_GP5_RISE */ +#define ARIZONA_WKUP_GP5_RISE_MASK 0x0010 /* WKUP_GP5_RISE */ +#define ARIZONA_WKUP_GP5_RISE_SHIFT 4 /* WKUP_GP5_RISE */ +#define ARIZONA_WKUP_GP5_RISE_WIDTH 1 /* WKUP_GP5_RISE */ +#define ARIZONA_WKUP_JD1_FALL 0x0008 /* WKUP_JD1_FALL */ +#define ARIZONA_WKUP_JD1_FALL_MASK 0x0008 /* WKUP_JD1_FALL */ +#define ARIZONA_WKUP_JD1_FALL_SHIFT 3 /* WKUP_JD1_FALL */ +#define ARIZONA_WKUP_JD1_FALL_WIDTH 1 /* WKUP_JD1_FALL */ +#define ARIZONA_WKUP_JD1_RISE 0x0004 /* WKUP_JD1_RISE */ +#define ARIZONA_WKUP_JD1_RISE_MASK 0x0004 /* WKUP_JD1_RISE */ +#define ARIZONA_WKUP_JD1_RISE_SHIFT 2 /* WKUP_JD1_RISE */ +#define ARIZONA_WKUP_JD1_RISE_WIDTH 1 /* WKUP_JD1_RISE */ +#define ARIZONA_WKUP_JD2_FALL 0x0002 /* WKUP_JD2_FALL */ +#define ARIZONA_WKUP_JD2_FALL_MASK 0x0002 /* WKUP_JD2_FALL */ +#define ARIZONA_WKUP_JD2_FALL_SHIFT 1 /* WKUP_JD2_FALL */ +#define ARIZONA_WKUP_JD2_FALL_WIDTH 1 /* WKUP_JD2_FALL */ +#define ARIZONA_WKUP_JD2_RISE 0x0001 /* WKUP_JD2_RISE */ +#define ARIZONA_WKUP_JD2_RISE_MASK 0x0001 /* WKUP_JD2_RISE */ +#define ARIZONA_WKUP_JD2_RISE_SHIFT 0 /* WKUP_JD2_RISE */ +#define ARIZONA_WKUP_JD2_RISE_WIDTH 1 /* WKUP_JD2_RISE */ + +/* + * R65 (0x41) - Sequence control + */ +#define ARIZONA_WSEQ_ENA_GP5_FALL 0x0020 /* WSEQ_ENA_GP5_FALL */ +#define ARIZONA_WSEQ_ENA_GP5_FALL_MASK 0x0020 /* WSEQ_ENA_GP5_FALL */ +#define ARIZONA_WSEQ_ENA_GP5_FALL_SHIFT 5 /* WSEQ_ENA_GP5_FALL */ +#define ARIZONA_WSEQ_ENA_GP5_FALL_WIDTH 1 /* WSEQ_ENA_GP5_FALL */ +#define ARIZONA_WSEQ_ENA_GP5_RISE 0x0010 /* WSEQ_ENA_GP5_RISE */ +#define ARIZONA_WSEQ_ENA_GP5_RISE_MASK 0x0010 /* WSEQ_ENA_GP5_RISE */ +#define ARIZONA_WSEQ_ENA_GP5_RISE_SHIFT 4 /* WSEQ_ENA_GP5_RISE */ +#define ARIZONA_WSEQ_ENA_GP5_RISE_WIDTH 1 /* WSEQ_ENA_GP5_RISE */ +#define ARIZONA_WSEQ_ENA_JD1_FALL 0x0008 /* WSEQ_ENA_JD1_FALL */ +#define ARIZONA_WSEQ_ENA_JD1_FALL_MASK 0x0008 /* WSEQ_ENA_JD1_FALL */ +#define ARIZONA_WSEQ_ENA_JD1_FALL_SHIFT 3 /* WSEQ_ENA_JD1_FALL */ +#define ARIZONA_WSEQ_ENA_JD1_FALL_WIDTH 1 /* WSEQ_ENA_JD1_FALL */ +#define ARIZONA_WSEQ_ENA_JD1_RISE 0x0004 /* WSEQ_ENA_JD1_RISE */ +#define ARIZONA_WSEQ_ENA_JD1_RISE_MASK 0x0004 /* WSEQ_ENA_JD1_RISE */ +#define ARIZONA_WSEQ_ENA_JD1_RISE_SHIFT 2 /* WSEQ_ENA_JD1_RISE */ +#define ARIZONA_WSEQ_ENA_JD1_RISE_WIDTH 1 /* WSEQ_ENA_JD1_RISE */ +#define ARIZONA_WSEQ_ENA_JD2_FALL 0x0002 /* WSEQ_ENA_JD2_FALL */ +#define ARIZONA_WSEQ_ENA_JD2_FALL_MASK 0x0002 /* WSEQ_ENA_JD2_FALL */ +#define ARIZONA_WSEQ_ENA_JD2_FALL_SHIFT 1 /* WSEQ_ENA_JD2_FALL */ +#define ARIZONA_WSEQ_ENA_JD2_FALL_WIDTH 1 /* WSEQ_ENA_JD2_FALL */ +#define ARIZONA_WSEQ_ENA_JD2_RISE 0x0001 /* WSEQ_ENA_JD2_RISE */ +#define ARIZONA_WSEQ_ENA_JD2_RISE_MASK 0x0001 /* WSEQ_ENA_JD2_RISE */ +#define ARIZONA_WSEQ_ENA_JD2_RISE_SHIFT 0 /* WSEQ_ENA_JD2_RISE */ +#define ARIZONA_WSEQ_ENA_JD2_RISE_WIDTH 1 /* WSEQ_ENA_JD2_RISE */ + +/* + * R66 (0x42) - Spare Triggers + */ +#define ARIZONA_WS_TRG8 0x0080 /* WS_TRG8 */ +#define ARIZONA_WS_TRG8_MASK 0x0080 /* WS_TRG8 */ +#define ARIZONA_WS_TRG8_SHIFT 7 /* WS_TRG8 */ +#define ARIZONA_WS_TRG8_WIDTH 1 /* WS_TRG8 */ +#define ARIZONA_WS_TRG7 0x0040 /* WS_TRG7 */ +#define ARIZONA_WS_TRG7_MASK 0x0040 /* WS_TRG7 */ +#define ARIZONA_WS_TRG7_SHIFT 6 /* WS_TRG7 */ +#define ARIZONA_WS_TRG7_WIDTH 1 /* WS_TRG7 */ +#define ARIZONA_WS_TRG6 0x0020 /* WS_TRG6 */ +#define ARIZONA_WS_TRG6_MASK 0x0020 /* WS_TRG6 */ +#define ARIZONA_WS_TRG6_SHIFT 5 /* WS_TRG6 */ +#define ARIZONA_WS_TRG6_WIDTH 1 /* WS_TRG6 */ +#define ARIZONA_WS_TRG5 0x0010 /* WS_TRG5 */ +#define ARIZONA_WS_TRG5_MASK 0x0010 /* WS_TRG5 */ +#define ARIZONA_WS_TRG5_SHIFT 4 /* WS_TRG5 */ +#define ARIZONA_WS_TRG5_WIDTH 1 /* WS_TRG5 */ +#define ARIZONA_WS_TRG4 0x0008 /* WS_TRG4 */ +#define ARIZONA_WS_TRG4_MASK 0x0008 /* WS_TRG4 */ +#define ARIZONA_WS_TRG4_SHIFT 3 /* WS_TRG4 */ +#define ARIZONA_WS_TRG4_WIDTH 1 /* WS_TRG4 */ +#define ARIZONA_WS_TRG3 0x0004 /* WS_TRG3 */ +#define ARIZONA_WS_TRG3_MASK 0x0004 /* WS_TRG3 */ +#define ARIZONA_WS_TRG3_SHIFT 2 /* WS_TRG3 */ +#define ARIZONA_WS_TRG3_WIDTH 1 /* WS_TRG3 */ +#define ARIZONA_WS_TRG2 0x0002 /* WS_TRG2 */ +#define ARIZONA_WS_TRG2_MASK 0x0002 /* WS_TRG2 */ +#define ARIZONA_WS_TRG2_SHIFT 1 /* WS_TRG2 */ +#define ARIZONA_WS_TRG2_WIDTH 1 /* WS_TRG2 */ +#define ARIZONA_WS_TRG1 0x0001 /* WS_TRG1 */ +#define ARIZONA_WS_TRG1_MASK 0x0001 /* WS_TRG1 */ +#define ARIZONA_WS_TRG1_SHIFT 0 /* WS_TRG1 */ +#define ARIZONA_WS_TRG1_WIDTH 1 /* WS_TRG1 */ + +/* + * R97 (0x61) - Sample Rate Sequence Select 1 + */ +#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR_MASK 0x01FF /* WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR - [8:0] */ +#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR_SHIFT 0 /* WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR - [8:0] */ +#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR_WIDTH 9 /* WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR - [8:0] */ + +/* + * R98 (0x62) - Sample Rate Sequence Select 2 + */ +#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_B_SEQ_ADDR_MASK 0x01FF /* WSEQ_SAMPLE_RATE_DETECT_B_SEQ_ADDR - [8:0] */ +#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_B_SEQ_ADDR_SHIFT 0 /* WSEQ_SAMPLE_RATE_DETECT_B_SEQ_ADDR - [8:0] */ +#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_B_SEQ_ADDR_WIDTH 9 /* WSEQ_SAMPLE_RATE_DETECT_B_SEQ_ADDR - [8:0] */ + +/* + * R99 (0x63) - Sample Rate Sequence Select 3 + */ +#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_C_SEQ_ADDR_MASK 0x01FF /* WSEQ_SAMPLE_RATE_DETECT_C_SEQ_ADDR - [8:0] */ +#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_C_SEQ_ADDR_SHIFT 0 /* WSEQ_SAMPLE_RATE_DETECT_C_SEQ_ADDR - [8:0] */ +#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_C_SEQ_ADDR_WIDTH 9 /* WSEQ_SAMPLE_RATE_DETECT_C_SEQ_ADDR - [8:0] */ + +/* + * R100 (0x64) - Sample Rate Sequence Select 4 + */ +#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_D_SEQ_ADDR_MASK 0x01FF /* WSEQ_SAMPLE_RATE_DETECT_D_SEQ_ADDR - [8:0] */ +#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_D_SEQ_ADDR_SHIFT 0 /* WSEQ_SAMPLE_RATE_DETECT_D_SEQ_ADDR - [8:0] */ +#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_D_SEQ_ADDR_WIDTH 9 /* WSEQ_SAMPLE_RATE_DETECT_D_SEQ_ADDR - [8:0] */ + +/* + * R104 (0x68) - Always On Triggers Sequence Select 1 + */ +#define ARIZONA_WSEQ_GP5_RISE_SEQ_ADDR_MASK 0x01FF /* WSEQ_GP5_RISE_SEQ_ADDR - [8:0] */ +#define ARIZONA_WSEQ_GP5_RISE_SEQ_ADDR_SHIFT 0 /* WSEQ_GP5_RISE_SEQ_ADDR - [8:0] */ +#define ARIZONA_WSEQ_GP5_RISE_SEQ_ADDR_WIDTH 9 /* WSEQ_GP5_RISE_SEQ_ADDR - [8:0] */ + +/* + * R105 (0x69) - Always On Triggers Sequence Select 2 + */ +#define ARIZONA_WSEQ_GP5_FALL_SEQ_ADDR_MASK 0x01FF /* WSEQ_GP5_FALL_SEQ_ADDR - [8:0] */ +#define ARIZONA_WSEQ_GP5_FALL_SEQ_ADDR_SHIFT 0 /* WSEQ_GP5_FALL_SEQ_ADDR - [8:0] */ +#define ARIZONA_WSEQ_GP5_FALL_SEQ_ADDR_WIDTH 9 /* WSEQ_GP5_FALL_SEQ_ADDR - [8:0] */ + +/* + * R106 (0x6A) - Always On Triggers Sequence Select 3 + */ +#define ARIZONA_WSEQ_JD1_RISE_SEQ_ADDR_MASK 0x01FF /* WSEQ_JD1_RISE_SEQ_ADDR - [8:0] */ +#define ARIZONA_WSEQ_JD1_RISE_SEQ_ADDR_SHIFT 0 /* WSEQ_JD1_RISE_SEQ_ADDR - [8:0] */ +#define ARIZONA_WSEQ_JD1_RISE_SEQ_ADDR_WIDTH 9 /* WSEQ_JD1_RISE_SEQ_ADDR - [8:0] */ + +/* + * R107 (0x6B) - Always On Triggers Sequence Select 4 + */ +#define ARIZONA_WSEQ_JD1_FALL_SEQ_ADDR_MASK 0x01FF /* WSEQ_JD1_FALL_SEQ_ADDR - [8:0] */ +#define ARIZONA_WSEQ_JD1_FALL_SEQ_ADDR_SHIFT 0 /* WSEQ_JD1_FALL_SEQ_ADDR - [8:0] */ +#define ARIZONA_WSEQ_JD1_FALL_SEQ_ADDR_WIDTH 9 /* WSEQ_JD1_FALL_SEQ_ADDR - [8:0] */ + +/* + * R108 (0x6C) - Always On Triggers Sequence Select 5 + */ +#define ARIZONA_WSEQ_JD2_RISE_SEQ_ADDR_MASK 0x01FF /* WSEQ_JD2_RISE_SEQ_ADDR - [8:0] */ +#define ARIZONA_WSEQ_JD2_RISE_SEQ_ADDR_SHIFT 0 /* WSEQ_JD2_RISE_SEQ_ADDR - [8:0] */ +#define ARIZONA_WSEQ_JD2_RISE_SEQ_ADDR_WIDTH 9 /* WSEQ_JD2_RISE_SEQ_ADDR - [8:0] */ + +/* + * R109 (0x6D) - Always On Triggers Sequence Select 6 + */ +#define ARIZONA_WSEQ_JD2_FALL_SEQ_ADDR_MASK 0x01FF /* WSEQ_JD2_FALL_SEQ_ADDR - [8:0] */ +#define ARIZONA_WSEQ_JD2_FALL_SEQ_ADDR_SHIFT 0 /* WSEQ_JD2_FALL_SEQ_ADDR - [8:0] */ +#define ARIZONA_WSEQ_JD2_FALL_SEQ_ADDR_WIDTH 9 /* WSEQ_JD2_FALL_SEQ_ADDR - [8:0] */ + +/* + * R112 (0x70) - Comfort Noise Generator + */ +#define ARIZONA_NOISE_GEN_RATE_MASK 0x7800 /* NOISE_GEN_RATE - [14:11] */ +#define ARIZONA_NOISE_GEN_RATE_SHIFT 11 /* NOISE_GEN_RATE - [14:11] */ +#define ARIZONA_NOISE_GEN_RATE_WIDTH 4 /* NOISE_GEN_RATE - [14:11] */ +#define ARIZONA_NOISE_GEN_ENA 0x0020 /* NOISE_GEN_ENA */ +#define ARIZONA_NOISE_GEN_ENA_MASK 0x0020 /* NOISE_GEN_ENA */ +#define ARIZONA_NOISE_GEN_ENA_SHIFT 5 /* NOISE_GEN_ENA */ +#define ARIZONA_NOISE_GEN_ENA_WIDTH 1 /* NOISE_GEN_ENA */ +#define ARIZONA_NOISE_GEN_GAIN_MASK 0x001F /* NOISE_GEN_GAIN - [4:0] */ +#define ARIZONA_NOISE_GEN_GAIN_SHIFT 0 /* NOISE_GEN_GAIN - [4:0] */ +#define ARIZONA_NOISE_GEN_GAIN_WIDTH 5 /* NOISE_GEN_GAIN - [4:0] */ + +/* + * R144 (0x90) - Haptics Control 1 + */ +#define ARIZONA_HAP_RATE_MASK 0x7800 /* HAP_RATE - [14:11] */ +#define ARIZONA_HAP_RATE_SHIFT 11 /* HAP_RATE - [14:11] */ +#define ARIZONA_HAP_RATE_WIDTH 4 /* HAP_RATE - [14:11] */ +#define ARIZONA_ONESHOT_TRIG 0x0010 /* ONESHOT_TRIG */ +#define ARIZONA_ONESHOT_TRIG_MASK 0x0010 /* ONESHOT_TRIG */ +#define ARIZONA_ONESHOT_TRIG_SHIFT 4 /* ONESHOT_TRIG */ +#define ARIZONA_ONESHOT_TRIG_WIDTH 1 /* ONESHOT_TRIG */ +#define ARIZONA_HAP_CTRL_MASK 0x000C /* HAP_CTRL - [3:2] */ +#define ARIZONA_HAP_CTRL_SHIFT 2 /* HAP_CTRL - [3:2] */ +#define ARIZONA_HAP_CTRL_WIDTH 2 /* HAP_CTRL - [3:2] */ +#define ARIZONA_HAP_ACT 0x0002 /* HAP_ACT */ +#define ARIZONA_HAP_ACT_MASK 0x0002 /* HAP_ACT */ +#define ARIZONA_HAP_ACT_SHIFT 1 /* HAP_ACT */ +#define ARIZONA_HAP_ACT_WIDTH 1 /* HAP_ACT */ + +/* + * R145 (0x91) - Haptics Control 2 + */ +#define ARIZONA_LRA_FREQ_MASK 0x7FFF /* LRA_FREQ - [14:0] */ +#define ARIZONA_LRA_FREQ_SHIFT 0 /* LRA_FREQ - [14:0] */ +#define ARIZONA_LRA_FREQ_WIDTH 15 /* LRA_FREQ - [14:0] */ + +/* + * R146 (0x92) - Haptics phase 1 intensity + */ +#define ARIZONA_PHASE1_INTENSITY_MASK 0x00FF /* PHASE1_INTENSITY - [7:0] */ +#define ARIZONA_PHASE1_INTENSITY_SHIFT 0 /* PHASE1_INTENSITY - [7:0] */ +#define ARIZONA_PHASE1_INTENSITY_WIDTH 8 /* PHASE1_INTENSITY - [7:0] */ + +/* + * R147 (0x93) - Haptics phase 1 duration + */ +#define ARIZONA_PHASE1_DURATION_MASK 0x01FF /* PHASE1_DURATION - [8:0] */ +#define ARIZONA_PHASE1_DURATION_SHIFT 0 /* PHASE1_DURATION - [8:0] */ +#define ARIZONA_PHASE1_DURATION_WIDTH 9 /* PHASE1_DURATION - [8:0] */ + +/* + * R148 (0x94) - Haptics phase 2 intensity + */ +#define ARIZONA_PHASE2_INTENSITY_MASK 0x00FF /* PHASE2_INTENSITY - [7:0] */ +#define ARIZONA_PHASE2_INTENSITY_SHIFT 0 /* PHASE2_INTENSITY - [7:0] */ +#define ARIZONA_PHASE2_INTENSITY_WIDTH 8 /* PHASE2_INTENSITY - [7:0] */ + +/* + * R149 (0x95) - Haptics phase 2 duration + */ +#define ARIZONA_PHASE2_DURATION_MASK 0x07FF /* PHASE2_DURATION - [10:0] */ +#define ARIZONA_PHASE2_DURATION_SHIFT 0 /* PHASE2_DURATION - [10:0] */ +#define ARIZONA_PHASE2_DURATION_WIDTH 11 /* PHASE2_DURATION - [10:0] */ + +/* + * R150 (0x96) - Haptics phase 3 intensity + */ +#define ARIZONA_PHASE3_INTENSITY_MASK 0x00FF /* PHASE3_INTENSITY - [7:0] */ +#define ARIZONA_PHASE3_INTENSITY_SHIFT 0 /* PHASE3_INTENSITY - [7:0] */ +#define ARIZONA_PHASE3_INTENSITY_WIDTH 8 /* PHASE3_INTENSITY - [7:0] */ + +/* + * R151 (0x97) - Haptics phase 3 duration + */ +#define ARIZONA_PHASE3_DURATION_MASK 0x01FF /* PHASE3_DURATION - [8:0] */ +#define ARIZONA_PHASE3_DURATION_SHIFT 0 /* PHASE3_DURATION - [8:0] */ +#define ARIZONA_PHASE3_DURATION_WIDTH 9 /* PHASE3_DURATION - [8:0] */ + +/* + * R152 (0x98) - Haptics Status + */ +#define ARIZONA_ONESHOT_STS 0x0001 /* ONESHOT_STS */ +#define ARIZONA_ONESHOT_STS_MASK 0x0001 /* ONESHOT_STS */ +#define ARIZONA_ONESHOT_STS_SHIFT 0 /* ONESHOT_STS */ +#define ARIZONA_ONESHOT_STS_WIDTH 1 /* ONESHOT_STS */ + +/* + * R256 (0x100) - Clock 32k 1 + */ +#define ARIZONA_CLK_32K_ENA 0x0040 /* CLK_32K_ENA */ +#define ARIZONA_CLK_32K_ENA_MASK 0x0040 /* CLK_32K_ENA */ +#define ARIZONA_CLK_32K_ENA_SHIFT 6 /* CLK_32K_ENA */ +#define ARIZONA_CLK_32K_ENA_WIDTH 1 /* CLK_32K_ENA */ +#define ARIZONA_CLK_32K_SRC_MASK 0x0003 /* CLK_32K_SRC - [1:0] */ +#define ARIZONA_CLK_32K_SRC_SHIFT 0 /* CLK_32K_SRC - [1:0] */ +#define ARIZONA_CLK_32K_SRC_WIDTH 2 /* CLK_32K_SRC - [1:0] */ + +/* + * R257 (0x101) - System Clock 1 + */ +#define ARIZONA_SYSCLK_FRAC 0x8000 /* SYSCLK_FRAC */ +#define ARIZONA_SYSCLK_FRAC_MASK 0x8000 /* SYSCLK_FRAC */ +#define ARIZONA_SYSCLK_FRAC_SHIFT 15 /* SYSCLK_FRAC */ +#define ARIZONA_SYSCLK_FRAC_WIDTH 1 /* SYSCLK_FRAC */ +#define ARIZONA_SYSCLK_FREQ_MASK 0x0700 /* SYSCLK_FREQ - [10:8] */ +#define ARIZONA_SYSCLK_FREQ_SHIFT 8 /* SYSCLK_FREQ - [10:8] */ +#define ARIZONA_SYSCLK_FREQ_WIDTH 3 /* SYSCLK_FREQ - [10:8] */ +#define ARIZONA_SYSCLK_ENA 0x0040 /* SYSCLK_ENA */ +#define ARIZONA_SYSCLK_ENA_MASK 0x0040 /* SYSCLK_ENA */ +#define ARIZONA_SYSCLK_ENA_SHIFT 6 /* SYSCLK_ENA */ +#define ARIZONA_SYSCLK_ENA_WIDTH 1 /* SYSCLK_ENA */ +#define ARIZONA_SYSCLK_SRC_MASK 0x000F /* SYSCLK_SRC - [3:0] */ +#define ARIZONA_SYSCLK_SRC_SHIFT 0 /* SYSCLK_SRC - [3:0] */ +#define ARIZONA_SYSCLK_SRC_WIDTH 4 /* SYSCLK_SRC - [3:0] */ + +/* + * R258 (0x102) - Sample rate 1 + */ +#define ARIZONA_SAMPLE_RATE_1_MASK 0x001F /* SAMPLE_RATE_1 - [4:0] */ +#define ARIZONA_SAMPLE_RATE_1_SHIFT 0 /* SAMPLE_RATE_1 - [4:0] */ +#define ARIZONA_SAMPLE_RATE_1_WIDTH 5 /* SAMPLE_RATE_1 - [4:0] */ + +/* + * R259 (0x103) - Sample rate 2 + */ +#define ARIZONA_SAMPLE_RATE_2_MASK 0x001F /* SAMPLE_RATE_2 - [4:0] */ +#define ARIZONA_SAMPLE_RATE_2_SHIFT 0 /* SAMPLE_RATE_2 - [4:0] */ +#define ARIZONA_SAMPLE_RATE_2_WIDTH 5 /* SAMPLE_RATE_2 - [4:0] */ + +/* + * R260 (0x104) - Sample rate 3 + */ +#define ARIZONA_SAMPLE_RATE_3_MASK 0x001F /* SAMPLE_RATE_3 - [4:0] */ +#define ARIZONA_SAMPLE_RATE_3_SHIFT 0 /* SAMPLE_RATE_3 - [4:0] */ +#define ARIZONA_SAMPLE_RATE_3_WIDTH 5 /* SAMPLE_RATE_3 - [4:0] */ + +/* + * R266 (0x10A) - Sample rate 1 status + */ +#define ARIZONA_SAMPLE_RATE_1_STS_MASK 0x001F /* SAMPLE_RATE_1_STS - [4:0] */ +#define ARIZONA_SAMPLE_RATE_1_STS_SHIFT 0 /* SAMPLE_RATE_1_STS - [4:0] */ +#define ARIZONA_SAMPLE_RATE_1_STS_WIDTH 5 /* SAMPLE_RATE_1_STS - [4:0] */ + +/* + * R267 (0x10B) - Sample rate 2 status + */ +#define ARIZONA_SAMPLE_RATE_2_STS_MASK 0x001F /* SAMPLE_RATE_2_STS - [4:0] */ +#define ARIZONA_SAMPLE_RATE_2_STS_SHIFT 0 /* SAMPLE_RATE_2_STS - [4:0] */ +#define ARIZONA_SAMPLE_RATE_2_STS_WIDTH 5 /* SAMPLE_RATE_2_STS - [4:0] */ + +/* + * R268 (0x10C) - Sample rate 3 status + */ +#define ARIZONA_SAMPLE_RATE_3_STS_MASK 0x001F /* SAMPLE_RATE_3_STS - [4:0] */ +#define ARIZONA_SAMPLE_RATE_3_STS_SHIFT 0 /* SAMPLE_RATE_3_STS - [4:0] */ +#define ARIZONA_SAMPLE_RATE_3_STS_WIDTH 5 /* SAMPLE_RATE_3_STS - [4:0] */ + +/* + * R274 (0x112) - Async clock 1 + */ +#define ARIZONA_ASYNC_CLK_FREQ_MASK 0x0700 /* ASYNC_CLK_FREQ - [10:8] */ +#define ARIZONA_ASYNC_CLK_FREQ_SHIFT 8 /* ASYNC_CLK_FREQ - [10:8] */ +#define ARIZONA_ASYNC_CLK_FREQ_WIDTH 3 /* ASYNC_CLK_FREQ - [10:8] */ +#define ARIZONA_ASYNC_CLK_ENA 0x0040 /* ASYNC_CLK_ENA */ +#define ARIZONA_ASYNC_CLK_ENA_MASK 0x0040 /* ASYNC_CLK_ENA */ +#define ARIZONA_ASYNC_CLK_ENA_SHIFT 6 /* ASYNC_CLK_ENA */ +#define ARIZONA_ASYNC_CLK_ENA_WIDTH 1 /* ASYNC_CLK_ENA */ +#define ARIZONA_ASYNC_CLK_SRC_MASK 0x000F /* ASYNC_CLK_SRC - [3:0] */ +#define ARIZONA_ASYNC_CLK_SRC_SHIFT 0 /* ASYNC_CLK_SRC - [3:0] */ +#define ARIZONA_ASYNC_CLK_SRC_WIDTH 4 /* ASYNC_CLK_SRC - [3:0] */ + +/* + * R275 (0x113) - Async sample rate 1 + */ +#define ARIZONA_ASYNC_SAMPLE_RATE_1_MASK 0x001F /* ASYNC_SAMPLE_RATE_1 - [4:0] */ +#define ARIZONA_ASYNC_SAMPLE_RATE_1_SHIFT 0 /* ASYNC_SAMPLE_RATE_1 - [4:0] */ +#define ARIZONA_ASYNC_SAMPLE_RATE_1_WIDTH 5 /* ASYNC_SAMPLE_RATE_1 - [4:0] */ + +/* + * R276 (0x114) - Async sample rate 2 + */ +#define ARIZONA_ASYNC_SAMPLE_RATE_2_MASK 0x001F /* ASYNC_SAMPLE_RATE_2 - [4:0] */ +#define ARIZONA_ASYNC_SAMPLE_RATE_2_SHIFT 0 /* ASYNC_SAMPLE_RATE_2 - [4:0] */ +#define ARIZONA_ASYNC_SAMPLE_RATE_2_WIDTH 5 /* ASYNC_SAMPLE_RATE_2 - [4:0] */ + +/* + * R283 (0x11B) - Async sample rate 1 status + */ +#define ARIZONA_ASYNC_SAMPLE_RATE_1_STS_MASK 0x001F /* ASYNC_SAMPLE_RATE_1_STS - [4:0] */ +#define ARIZONA_ASYNC_SAMPLE_RATE_1_STS_SHIFT 0 /* ASYNC_SAMPLE_RATE_1_STS - [4:0] */ +#define ARIZONA_ASYNC_SAMPLE_RATE_1_STS_WIDTH 5 /* ASYNC_SAMPLE_RATE_1_STS - [4:0] */ + +/* + * R284 (0x11C) - Async sample rate 2 status + */ +#define ARIZONA_ASYNC_SAMPLE_RATE_2_STS_MASK 0x001F /* ASYNC_SAMPLE_RATE_2_STS - [4:0] */ +#define ARIZONA_ASYNC_SAMPLE_RATE_2_STS_SHIFT 0 /* ASYNC_SAMPLE_RATE_2_STS - [4:0] */ +#define ARIZONA_ASYNC_SAMPLE_RATE_2_STS_WIDTH 5 /* ASYNC_SAMPLE_RATE_2_STS - [4:0] */ + +/* + * R329 (0x149) - Output system clock + */ +#define ARIZONA_OPCLK_ENA 0x8000 /* OPCLK_ENA */ +#define ARIZONA_OPCLK_ENA_MASK 0x8000 /* OPCLK_ENA */ +#define ARIZONA_OPCLK_ENA_SHIFT 15 /* OPCLK_ENA */ +#define ARIZONA_OPCLK_ENA_WIDTH 1 /* OPCLK_ENA */ +#define ARIZONA_OPCLK_DIV_MASK 0x00F8 /* OPCLK_DIV - [7:3] */ +#define ARIZONA_OPCLK_DIV_SHIFT 3 /* OPCLK_DIV - [7:3] */ +#define ARIZONA_OPCLK_DIV_WIDTH 5 /* OPCLK_DIV - [7:3] */ +#define ARIZONA_OPCLK_SEL_MASK 0x0007 /* OPCLK_SEL - [2:0] */ +#define ARIZONA_OPCLK_SEL_SHIFT 0 /* OPCLK_SEL - [2:0] */ +#define ARIZONA_OPCLK_SEL_WIDTH 3 /* OPCLK_SEL - [2:0] */ + +/* + * R330 (0x14A) - Output async clock + */ +#define ARIZONA_OPCLK_ASYNC_ENA 0x8000 /* OPCLK_ASYNC_ENA */ +#define ARIZONA_OPCLK_ASYNC_ENA_MASK 0x8000 /* OPCLK_ASYNC_ENA */ +#define ARIZONA_OPCLK_ASYNC_ENA_SHIFT 15 /* OPCLK_ASYNC_ENA */ +#define ARIZONA_OPCLK_ASYNC_ENA_WIDTH 1 /* OPCLK_ASYNC_ENA */ +#define ARIZONA_OPCLK_ASYNC_DIV_MASK 0x00F8 /* OPCLK_ASYNC_DIV - [7:3] */ +#define ARIZONA_OPCLK_ASYNC_DIV_SHIFT 3 /* OPCLK_ASYNC_DIV - [7:3] */ +#define ARIZONA_OPCLK_ASYNC_DIV_WIDTH 5 /* OPCLK_ASYNC_DIV - [7:3] */ +#define ARIZONA_OPCLK_ASYNC_SEL_MASK 0x0007 /* OPCLK_ASYNC_SEL - [2:0] */ +#define ARIZONA_OPCLK_ASYNC_SEL_SHIFT 0 /* OPCLK_ASYNC_SEL - [2:0] */ +#define ARIZONA_OPCLK_ASYNC_SEL_WIDTH 3 /* OPCLK_ASYNC_SEL - [2:0] */ + +/* + * R338 (0x152) - Rate Estimator 1 + */ +#define ARIZONA_TRIG_ON_STARTUP 0x0010 /* TRIG_ON_STARTUP */ +#define ARIZONA_TRIG_ON_STARTUP_MASK 0x0010 /* TRIG_ON_STARTUP */ +#define ARIZONA_TRIG_ON_STARTUP_SHIFT 4 /* TRIG_ON_STARTUP */ +#define ARIZONA_TRIG_ON_STARTUP_WIDTH 1 /* TRIG_ON_STARTUP */ +#define ARIZONA_LRCLK_SRC_MASK 0x000E /* LRCLK_SRC - [3:1] */ +#define ARIZONA_LRCLK_SRC_SHIFT 1 /* LRCLK_SRC - [3:1] */ +#define ARIZONA_LRCLK_SRC_WIDTH 3 /* LRCLK_SRC - [3:1] */ +#define ARIZONA_RATE_EST_ENA 0x0001 /* RATE_EST_ENA */ +#define ARIZONA_RATE_EST_ENA_MASK 0x0001 /* RATE_EST_ENA */ +#define ARIZONA_RATE_EST_ENA_SHIFT 0 /* RATE_EST_ENA */ +#define ARIZONA_RATE_EST_ENA_WIDTH 1 /* RATE_EST_ENA */ + +/* + * R339 (0x153) - Rate Estimator 2 + */ +#define ARIZONA_SAMPLE_RATE_DETECT_A_MASK 0x001F /* SAMPLE_RATE_DETECT_A - [4:0] */ +#define ARIZONA_SAMPLE_RATE_DETECT_A_SHIFT 0 /* SAMPLE_RATE_DETECT_A - [4:0] */ +#define ARIZONA_SAMPLE_RATE_DETECT_A_WIDTH 5 /* SAMPLE_RATE_DETECT_A - [4:0] */ + +/* + * R340 (0x154) - Rate Estimator 3 + */ +#define ARIZONA_SAMPLE_RATE_DETECT_B_MASK 0x001F /* SAMPLE_RATE_DETECT_B - [4:0] */ +#define ARIZONA_SAMPLE_RATE_DETECT_B_SHIFT 0 /* SAMPLE_RATE_DETECT_B - [4:0] */ +#define ARIZONA_SAMPLE_RATE_DETECT_B_WIDTH 5 /* SAMPLE_RATE_DETECT_B - [4:0] */ + +/* + * R341 (0x155) - Rate Estimator 4 + */ +#define ARIZONA_SAMPLE_RATE_DETECT_C_MASK 0x001F /* SAMPLE_RATE_DETECT_C - [4:0] */ +#define ARIZONA_SAMPLE_RATE_DETECT_C_SHIFT 0 /* SAMPLE_RATE_DETECT_C - [4:0] */ +#define ARIZONA_SAMPLE_RATE_DETECT_C_WIDTH 5 /* SAMPLE_RATE_DETECT_C - [4:0] */ + +/* + * R342 (0x156) - Rate Estimator 5 + */ +#define ARIZONA_SAMPLE_RATE_DETECT_D_MASK 0x001F /* SAMPLE_RATE_DETECT_D - [4:0] */ +#define ARIZONA_SAMPLE_RATE_DETECT_D_SHIFT 0 /* SAMPLE_RATE_DETECT_D - [4:0] */ +#define ARIZONA_SAMPLE_RATE_DETECT_D_WIDTH 5 /* SAMPLE_RATE_DETECT_D - [4:0] */ + +/* + * R353 (0x161) - Dynamic Frequency Scaling 1 + */ +#define ARIZONA_SUBSYS_MAX_FREQ 0x0001 /* SUBSYS_MAX_FREQ */ +#define ARIZONA_SUBSYS_MAX_FREQ_SHIFT 0 /* SUBSYS_MAX_FREQ */ +#define ARIZONA_SUBSYS_MAX_FREQ_WIDTH 1 /* SUBSYS_MAX_FREQ */ + +/* + * R369 (0x171) - FLL1 Control 1 + */ +#define ARIZONA_FLL1_FREERUN 0x0002 /* FLL1_FREERUN */ +#define ARIZONA_FLL1_FREERUN_MASK 0x0002 /* FLL1_FREERUN */ +#define ARIZONA_FLL1_FREERUN_SHIFT 1 /* FLL1_FREERUN */ +#define ARIZONA_FLL1_FREERUN_WIDTH 1 /* FLL1_FREERUN */ +#define ARIZONA_FLL1_ENA 0x0001 /* FLL1_ENA */ +#define ARIZONA_FLL1_ENA_MASK 0x0001 /* FLL1_ENA */ +#define ARIZONA_FLL1_ENA_SHIFT 0 /* FLL1_ENA */ +#define ARIZONA_FLL1_ENA_WIDTH 1 /* FLL1_ENA */ + +/* + * R370 (0x172) - FLL1 Control 2 + */ +#define ARIZONA_FLL1_CTRL_UPD 0x8000 /* FLL1_CTRL_UPD */ +#define ARIZONA_FLL1_CTRL_UPD_MASK 0x8000 /* FLL1_CTRL_UPD */ +#define ARIZONA_FLL1_CTRL_UPD_SHIFT 15 /* FLL1_CTRL_UPD */ +#define ARIZONA_FLL1_CTRL_UPD_WIDTH 1 /* FLL1_CTRL_UPD */ +#define ARIZONA_FLL1_N_MASK 0x03FF /* FLL1_N - [9:0] */ +#define ARIZONA_FLL1_N_SHIFT 0 /* FLL1_N - [9:0] */ +#define ARIZONA_FLL1_N_WIDTH 10 /* FLL1_N - [9:0] */ + +/* + * R371 (0x173) - FLL1 Control 3 + */ +#define ARIZONA_FLL1_THETA_MASK 0xFFFF /* FLL1_THETA - [15:0] */ +#define ARIZONA_FLL1_THETA_SHIFT 0 /* FLL1_THETA - [15:0] */ +#define ARIZONA_FLL1_THETA_WIDTH 16 /* FLL1_THETA - [15:0] */ + +/* + * R372 (0x174) - FLL1 Control 4 + */ +#define ARIZONA_FLL1_LAMBDA_MASK 0xFFFF /* FLL1_LAMBDA - [15:0] */ +#define ARIZONA_FLL1_LAMBDA_SHIFT 0 /* FLL1_LAMBDA - [15:0] */ +#define ARIZONA_FLL1_LAMBDA_WIDTH 16 /* FLL1_LAMBDA - [15:0] */ + +/* + * R373 (0x175) - FLL1 Control 5 + */ +#define ARIZONA_FLL1_FRATIO_MASK 0x0F00 /* FLL1_FRATIO - [11:8] */ +#define ARIZONA_FLL1_FRATIO_SHIFT 8 /* FLL1_FRATIO - [11:8] */ +#define ARIZONA_FLL1_FRATIO_WIDTH 4 /* FLL1_FRATIO - [11:8] */ +#define ARIZONA_FLL1_OUTDIV_MASK 0x000E /* FLL1_OUTDIV - [3:1] */ +#define ARIZONA_FLL1_OUTDIV_SHIFT 1 /* FLL1_OUTDIV - [3:1] */ +#define ARIZONA_FLL1_OUTDIV_WIDTH 3 /* FLL1_OUTDIV - [3:1] */ + +/* + * R374 (0x176) - FLL1 Control 6 + */ +#define ARIZONA_FLL1_CLK_REF_DIV_MASK 0x00C0 /* FLL1_CLK_REF_DIV - [7:6] */ +#define ARIZONA_FLL1_CLK_REF_DIV_SHIFT 6 /* FLL1_CLK_REF_DIV - [7:6] */ +#define ARIZONA_FLL1_CLK_REF_DIV_WIDTH 2 /* FLL1_CLK_REF_DIV - [7:6] */ +#define ARIZONA_FLL1_CLK_REF_SRC_MASK 0x000F /* FLL1_CLK_REF_SRC - [3:0] */ +#define ARIZONA_FLL1_CLK_REF_SRC_SHIFT 0 /* FLL1_CLK_REF_SRC - [3:0] */ +#define ARIZONA_FLL1_CLK_REF_SRC_WIDTH 4 /* FLL1_CLK_REF_SRC - [3:0] */ + +/* + * R375 (0x177) - FLL1 Loop Filter Test 1 + */ +#define ARIZONA_FLL1_FRC_INTEG_UPD 0x8000 /* FLL1_FRC_INTEG_UPD */ +#define ARIZONA_FLL1_FRC_INTEG_UPD_MASK 0x8000 /* FLL1_FRC_INTEG_UPD */ +#define ARIZONA_FLL1_FRC_INTEG_UPD_SHIFT 15 /* FLL1_FRC_INTEG_UPD */ +#define ARIZONA_FLL1_FRC_INTEG_UPD_WIDTH 1 /* FLL1_FRC_INTEG_UPD */ +#define ARIZONA_FLL1_FRC_INTEG_VAL_MASK 0x0FFF /* FLL1_FRC_INTEG_VAL - [11:0] */ +#define ARIZONA_FLL1_FRC_INTEG_VAL_SHIFT 0 /* FLL1_FRC_INTEG_VAL - [11:0] */ +#define ARIZONA_FLL1_FRC_INTEG_VAL_WIDTH 12 /* FLL1_FRC_INTEG_VAL - [11:0] */ + +/* + * R377 (0x179) - FLL1 Control 7 + */ +#define ARIZONA_FLL1_GAIN_MASK 0x003c /* FLL1_GAIN */ +#define ARIZONA_FLL1_GAIN_SHIFT 2 /* FLL1_GAIN */ +#define ARIZONA_FLL1_GAIN_WIDTH 4 /* FLL1_GAIN */ + +/* + * R385 (0x181) - FLL1 Synchroniser 1 + */ +#define ARIZONA_FLL1_SYNC_ENA 0x0001 /* FLL1_SYNC_ENA */ +#define ARIZONA_FLL1_SYNC_ENA_MASK 0x0001 /* FLL1_SYNC_ENA */ +#define ARIZONA_FLL1_SYNC_ENA_SHIFT 0 /* FLL1_SYNC_ENA */ +#define ARIZONA_FLL1_SYNC_ENA_WIDTH 1 /* FLL1_SYNC_ENA */ + +/* + * R386 (0x182) - FLL1 Synchroniser 2 + */ +#define ARIZONA_FLL1_SYNC_N_MASK 0x03FF /* FLL1_SYNC_N - [9:0] */ +#define ARIZONA_FLL1_SYNC_N_SHIFT 0 /* FLL1_SYNC_N - [9:0] */ +#define ARIZONA_FLL1_SYNC_N_WIDTH 10 /* FLL1_SYNC_N - [9:0] */ + +/* + * R387 (0x183) - FLL1 Synchroniser 3 + */ +#define ARIZONA_FLL1_SYNC_THETA_MASK 0xFFFF /* FLL1_SYNC_THETA - [15:0] */ +#define ARIZONA_FLL1_SYNC_THETA_SHIFT 0 /* FLL1_SYNC_THETA - [15:0] */ +#define ARIZONA_FLL1_SYNC_THETA_WIDTH 16 /* FLL1_SYNC_THETA - [15:0] */ + +/* + * R388 (0x184) - FLL1 Synchroniser 4 + */ +#define ARIZONA_FLL1_SYNC_LAMBDA_MASK 0xFFFF /* FLL1_SYNC_LAMBDA - [15:0] */ +#define ARIZONA_FLL1_SYNC_LAMBDA_SHIFT 0 /* FLL1_SYNC_LAMBDA - [15:0] */ +#define ARIZONA_FLL1_SYNC_LAMBDA_WIDTH 16 /* FLL1_SYNC_LAMBDA - [15:0] */ + +/* + * R389 (0x185) - FLL1 Synchroniser 5 + */ +#define ARIZONA_FLL1_SYNC_FRATIO_MASK 0x0700 /* FLL1_SYNC_FRATIO - [10:8] */ +#define ARIZONA_FLL1_SYNC_FRATIO_SHIFT 8 /* FLL1_SYNC_FRATIO - [10:8] */ +#define ARIZONA_FLL1_SYNC_FRATIO_WIDTH 3 /* FLL1_SYNC_FRATIO - [10:8] */ + +/* + * R390 (0x186) - FLL1 Synchroniser 6 + */ +#define ARIZONA_FLL1_CLK_SYNC_DIV_MASK 0x00C0 /* FLL1_CLK_SYNC_DIV - [7:6] */ +#define ARIZONA_FLL1_CLK_SYNC_DIV_SHIFT 6 /* FLL1_CLK_SYNC_DIV - [7:6] */ +#define ARIZONA_FLL1_CLK_SYNC_DIV_WIDTH 2 /* FLL1_CLK_SYNC_DIV - [7:6] */ +#define ARIZONA_FLL1_CLK_SYNC_SRC_MASK 0x000F /* FLL1_CLK_SYNC_SRC - [3:0] */ +#define ARIZONA_FLL1_CLK_SYNC_SRC_SHIFT 0 /* FLL1_CLK_SYNC_SRC - [3:0] */ +#define ARIZONA_FLL1_CLK_SYNC_SRC_WIDTH 4 /* FLL1_CLK_SYNC_SRC - [3:0] */ + +/* + * R391 (0x187) - FLL1 Synchroniser 7 + */ +#define ARIZONA_FLL1_SYNC_GAIN_MASK 0x003c /* FLL1_SYNC_GAIN */ +#define ARIZONA_FLL1_SYNC_GAIN_SHIFT 2 /* FLL1_SYNC_GAIN */ +#define ARIZONA_FLL1_SYNC_GAIN_WIDTH 4 /* FLL1_SYNC_GAIN */ +#define ARIZONA_FLL1_SYNC_BW 0x0001 /* FLL1_SYNC_BW */ +#define ARIZONA_FLL1_SYNC_BW_MASK 0x0001 /* FLL1_SYNC_BW */ +#define ARIZONA_FLL1_SYNC_BW_SHIFT 0 /* FLL1_SYNC_BW */ +#define ARIZONA_FLL1_SYNC_BW_WIDTH 1 /* FLL1_SYNC_BW */ + +/* + * R393 (0x189) - FLL1 Spread Spectrum + */ +#define ARIZONA_FLL1_SS_AMPL_MASK 0x0030 /* FLL1_SS_AMPL - [5:4] */ +#define ARIZONA_FLL1_SS_AMPL_SHIFT 4 /* FLL1_SS_AMPL - [5:4] */ +#define ARIZONA_FLL1_SS_AMPL_WIDTH 2 /* FLL1_SS_AMPL - [5:4] */ +#define ARIZONA_FLL1_SS_FREQ_MASK 0x000C /* FLL1_SS_FREQ - [3:2] */ +#define ARIZONA_FLL1_SS_FREQ_SHIFT 2 /* FLL1_SS_FREQ - [3:2] */ +#define ARIZONA_FLL1_SS_FREQ_WIDTH 2 /* FLL1_SS_FREQ - [3:2] */ +#define ARIZONA_FLL1_SS_SEL_MASK 0x0003 /* FLL1_SS_SEL - [1:0] */ +#define ARIZONA_FLL1_SS_SEL_SHIFT 0 /* FLL1_SS_SEL - [1:0] */ +#define ARIZONA_FLL1_SS_SEL_WIDTH 2 /* FLL1_SS_SEL - [1:0] */ + +/* + * R394 (0x18A) - FLL1 GPIO Clock + */ +#define ARIZONA_FLL1_GPDIV_MASK 0x00FE /* FLL1_GPDIV - [7:1] */ +#define ARIZONA_FLL1_GPDIV_SHIFT 1 /* FLL1_GPDIV - [7:1] */ +#define ARIZONA_FLL1_GPDIV_WIDTH 7 /* FLL1_GPDIV - [7:1] */ +#define ARIZONA_FLL1_GPDIV_ENA 0x0001 /* FLL1_GPDIV_ENA */ +#define ARIZONA_FLL1_GPDIV_ENA_MASK 0x0001 /* FLL1_GPDIV_ENA */ +#define ARIZONA_FLL1_GPDIV_ENA_SHIFT 0 /* FLL1_GPDIV_ENA */ +#define ARIZONA_FLL1_GPDIV_ENA_WIDTH 1 /* FLL1_GPDIV_ENA */ + +/* + * R401 (0x191) - FLL2 Control 1 + */ +#define ARIZONA_FLL2_FREERUN 0x0002 /* FLL2_FREERUN */ +#define ARIZONA_FLL2_FREERUN_MASK 0x0002 /* FLL2_FREERUN */ +#define ARIZONA_FLL2_FREERUN_SHIFT 1 /* FLL2_FREERUN */ +#define ARIZONA_FLL2_FREERUN_WIDTH 1 /* FLL2_FREERUN */ +#define ARIZONA_FLL2_ENA 0x0001 /* FLL2_ENA */ +#define ARIZONA_FLL2_ENA_MASK 0x0001 /* FLL2_ENA */ +#define ARIZONA_FLL2_ENA_SHIFT 0 /* FLL2_ENA */ +#define ARIZONA_FLL2_ENA_WIDTH 1 /* FLL2_ENA */ + +/* + * R402 (0x192) - FLL2 Control 2 + */ +#define ARIZONA_FLL2_CTRL_UPD 0x8000 /* FLL2_CTRL_UPD */ +#define ARIZONA_FLL2_CTRL_UPD_MASK 0x8000 /* FLL2_CTRL_UPD */ +#define ARIZONA_FLL2_CTRL_UPD_SHIFT 15 /* FLL2_CTRL_UPD */ +#define ARIZONA_FLL2_CTRL_UPD_WIDTH 1 /* FLL2_CTRL_UPD */ +#define ARIZONA_FLL2_N_MASK 0x03FF /* FLL2_N - [9:0] */ +#define ARIZONA_FLL2_N_SHIFT 0 /* FLL2_N - [9:0] */ +#define ARIZONA_FLL2_N_WIDTH 10 /* FLL2_N - [9:0] */ + +/* + * R403 (0x193) - FLL2 Control 3 + */ +#define ARIZONA_FLL2_THETA_MASK 0xFFFF /* FLL2_THETA - [15:0] */ +#define ARIZONA_FLL2_THETA_SHIFT 0 /* FLL2_THETA - [15:0] */ +#define ARIZONA_FLL2_THETA_WIDTH 16 /* FLL2_THETA - [15:0] */ + +/* + * R404 (0x194) - FLL2 Control 4 + */ +#define ARIZONA_FLL2_LAMBDA_MASK 0xFFFF /* FLL2_LAMBDA - [15:0] */ +#define ARIZONA_FLL2_LAMBDA_SHIFT 0 /* FLL2_LAMBDA - [15:0] */ +#define ARIZONA_FLL2_LAMBDA_WIDTH 16 /* FLL2_LAMBDA - [15:0] */ + +/* + * R405 (0x195) - FLL2 Control 5 + */ +#define ARIZONA_FLL2_FRATIO_MASK 0x0700 /* FLL2_FRATIO - [10:8] */ +#define ARIZONA_FLL2_FRATIO_SHIFT 8 /* FLL2_FRATIO - [10:8] */ +#define ARIZONA_FLL2_FRATIO_WIDTH 3 /* FLL2_FRATIO - [10:8] */ +#define ARIZONA_FLL2_OUTDIV_MASK 0x000E /* FLL2_OUTDIV - [3:1] */ +#define ARIZONA_FLL2_OUTDIV_SHIFT 1 /* FLL2_OUTDIV - [3:1] */ +#define ARIZONA_FLL2_OUTDIV_WIDTH 3 /* FLL2_OUTDIV - [3:1] */ + +/* + * R406 (0x196) - FLL2 Control 6 + */ +#define ARIZONA_FLL2_CLK_REF_DIV_MASK 0x00C0 /* FLL2_CLK_REF_DIV - [7:6] */ +#define ARIZONA_FLL2_CLK_REF_DIV_SHIFT 6 /* FLL2_CLK_REF_DIV - [7:6] */ +#define ARIZONA_FLL2_CLK_REF_DIV_WIDTH 2 /* FLL2_CLK_REF_DIV - [7:6] */ +#define ARIZONA_FLL2_CLK_REF_SRC_MASK 0x000F /* FLL2_CLK_REF_SRC - [3:0] */ +#define ARIZONA_FLL2_CLK_REF_SRC_SHIFT 0 /* FLL2_CLK_REF_SRC - [3:0] */ +#define ARIZONA_FLL2_CLK_REF_SRC_WIDTH 4 /* FLL2_CLK_REF_SRC - [3:0] */ + +/* + * R407 (0x197) - FLL2 Loop Filter Test 1 + */ +#define ARIZONA_FLL2_FRC_INTEG_UPD 0x8000 /* FLL2_FRC_INTEG_UPD */ +#define ARIZONA_FLL2_FRC_INTEG_UPD_MASK 0x8000 /* FLL2_FRC_INTEG_UPD */ +#define ARIZONA_FLL2_FRC_INTEG_UPD_SHIFT 15 /* FLL2_FRC_INTEG_UPD */ +#define ARIZONA_FLL2_FRC_INTEG_UPD_WIDTH 1 /* FLL2_FRC_INTEG_UPD */ +#define ARIZONA_FLL2_FRC_INTEG_VAL_MASK 0x0FFF /* FLL2_FRC_INTEG_VAL - [11:0] */ +#define ARIZONA_FLL2_FRC_INTEG_VAL_SHIFT 0 /* FLL2_FRC_INTEG_VAL - [11:0] */ +#define ARIZONA_FLL2_FRC_INTEG_VAL_WIDTH 12 /* FLL2_FRC_INTEG_VAL - [11:0] */ + +/* + * R409 (0x199) - FLL2 Control 7 + */ +#define ARIZONA_FLL2_GAIN_MASK 0x003c /* FLL2_GAIN */ +#define ARIZONA_FLL2_GAIN_SHIFT 2 /* FLL2_GAIN */ +#define ARIZONA_FLL2_GAIN_WIDTH 4 /* FLL2_GAIN */ + +/* + * R417 (0x1A1) - FLL2 Synchroniser 1 + */ +#define ARIZONA_FLL2_SYNC_ENA 0x0001 /* FLL2_SYNC_ENA */ +#define ARIZONA_FLL2_SYNC_ENA_MASK 0x0001 /* FLL2_SYNC_ENA */ +#define ARIZONA_FLL2_SYNC_ENA_SHIFT 0 /* FLL2_SYNC_ENA */ +#define ARIZONA_FLL2_SYNC_ENA_WIDTH 1 /* FLL2_SYNC_ENA */ + +/* + * R418 (0x1A2) - FLL2 Synchroniser 2 + */ +#define ARIZONA_FLL2_SYNC_N_MASK 0x03FF /* FLL2_SYNC_N - [9:0] */ +#define ARIZONA_FLL2_SYNC_N_SHIFT 0 /* FLL2_SYNC_N - [9:0] */ +#define ARIZONA_FLL2_SYNC_N_WIDTH 10 /* FLL2_SYNC_N - [9:0] */ + +/* + * R419 (0x1A3) - FLL2 Synchroniser 3 + */ +#define ARIZONA_FLL2_SYNC_THETA_MASK 0xFFFF /* FLL2_SYNC_THETA - [15:0] */ +#define ARIZONA_FLL2_SYNC_THETA_SHIFT 0 /* FLL2_SYNC_THETA - [15:0] */ +#define ARIZONA_FLL2_SYNC_THETA_WIDTH 16 /* FLL2_SYNC_THETA - [15:0] */ + +/* + * R420 (0x1A4) - FLL2 Synchroniser 4 + */ +#define ARIZONA_FLL2_SYNC_LAMBDA_MASK 0xFFFF /* FLL2_SYNC_LAMBDA - [15:0] */ +#define ARIZONA_FLL2_SYNC_LAMBDA_SHIFT 0 /* FLL2_SYNC_LAMBDA - [15:0] */ +#define ARIZONA_FLL2_SYNC_LAMBDA_WIDTH 16 /* FLL2_SYNC_LAMBDA - [15:0] */ + +/* + * R421 (0x1A5) - FLL2 Synchroniser 5 + */ +#define ARIZONA_FLL2_SYNC_FRATIO_MASK 0x0700 /* FLL2_SYNC_FRATIO - [10:8] */ +#define ARIZONA_FLL2_SYNC_FRATIO_SHIFT 8 /* FLL2_SYNC_FRATIO - [10:8] */ +#define ARIZONA_FLL2_SYNC_FRATIO_WIDTH 3 /* FLL2_SYNC_FRATIO - [10:8] */ + +/* + * R422 (0x1A6) - FLL2 Synchroniser 6 + */ +#define ARIZONA_FLL2_CLK_SYNC_DIV_MASK 0x00C0 /* FLL2_CLK_SYNC_DIV - [7:6] */ +#define ARIZONA_FLL2_CLK_SYNC_DIV_SHIFT 6 /* FLL2_CLK_SYNC_DIV - [7:6] */ +#define ARIZONA_FLL2_CLK_SYNC_DIV_WIDTH 2 /* FLL2_CLK_SYNC_DIV - [7:6] */ +#define ARIZONA_FLL2_CLK_SYNC_SRC_MASK 0x000F /* FLL2_CLK_SYNC_SRC - [3:0] */ +#define ARIZONA_FLL2_CLK_SYNC_SRC_SHIFT 0 /* FLL2_CLK_SYNC_SRC - [3:0] */ +#define ARIZONA_FLL2_CLK_SYNC_SRC_WIDTH 4 /* FLL2_CLK_SYNC_SRC - [3:0] */ + +/* + * R423 (0x1A7) - FLL2 Synchroniser 7 + */ +#define ARIZONA_FLL2_SYNC_GAIN_MASK 0x003c /* FLL2_SYNC_GAIN */ +#define ARIZONA_FLL2_SYNC_GAIN_SHIFT 2 /* FLL2_SYNC_GAIN */ +#define ARIZONA_FLL2_SYNC_GAIN_WIDTH 4 /* FLL2_SYNC_GAIN */ +#define ARIZONA_FLL2_SYNC_BW 0x0001 /* FLL2_SYNC_BW */ +#define ARIZONA_FLL2_SYNC_BW_MASK 0x0001 /* FLL2_SYNC_BW */ +#define ARIZONA_FLL2_SYNC_BW_SHIFT 0 /* FLL2_SYNC_BW */ +#define ARIZONA_FLL2_SYNC_BW_WIDTH 1 /* FLL2_SYNC_BW */ + +/* + * R425 (0x1A9) - FLL2 Spread Spectrum + */ +#define ARIZONA_FLL2_SS_AMPL_MASK 0x0030 /* FLL2_SS_AMPL - [5:4] */ +#define ARIZONA_FLL2_SS_AMPL_SHIFT 4 /* FLL2_SS_AMPL - [5:4] */ +#define ARIZONA_FLL2_SS_AMPL_WIDTH 2 /* FLL2_SS_AMPL - [5:4] */ +#define ARIZONA_FLL2_SS_FREQ_MASK 0x000C /* FLL2_SS_FREQ - [3:2] */ +#define ARIZONA_FLL2_SS_FREQ_SHIFT 2 /* FLL2_SS_FREQ - [3:2] */ +#define ARIZONA_FLL2_SS_FREQ_WIDTH 2 /* FLL2_SS_FREQ - [3:2] */ +#define ARIZONA_FLL2_SS_SEL_MASK 0x0003 /* FLL2_SS_SEL - [1:0] */ +#define ARIZONA_FLL2_SS_SEL_SHIFT 0 /* FLL2_SS_SEL - [1:0] */ +#define ARIZONA_FLL2_SS_SEL_WIDTH 2 /* FLL2_SS_SEL - [1:0] */ + +/* + * R426 (0x1AA) - FLL2 GPIO Clock + */ +#define ARIZONA_FLL2_GPDIV_MASK 0x00FE /* FLL2_GPDIV - [7:1] */ +#define ARIZONA_FLL2_GPDIV_SHIFT 1 /* FLL2_GPDIV - [7:1] */ +#define ARIZONA_FLL2_GPDIV_WIDTH 7 /* FLL2_GPDIV - [7:1] */ +#define ARIZONA_FLL2_GPDIV_ENA 0x0001 /* FLL2_GPDIV_ENA */ +#define ARIZONA_FLL2_GPDIV_ENA_MASK 0x0001 /* FLL2_GPDIV_ENA */ +#define ARIZONA_FLL2_GPDIV_ENA_SHIFT 0 /* FLL2_GPDIV_ENA */ +#define ARIZONA_FLL2_GPDIV_ENA_WIDTH 1 /* FLL2_GPDIV_ENA */ + +/* + * R512 (0x200) - Mic Charge Pump 1 + */ +#define ARIZONA_CPMIC_DISCH 0x0004 /* CPMIC_DISCH */ +#define ARIZONA_CPMIC_DISCH_MASK 0x0004 /* CPMIC_DISCH */ +#define ARIZONA_CPMIC_DISCH_SHIFT 2 /* CPMIC_DISCH */ +#define ARIZONA_CPMIC_DISCH_WIDTH 1 /* CPMIC_DISCH */ +#define ARIZONA_CPMIC_BYPASS 0x0002 /* CPMIC_BYPASS */ +#define ARIZONA_CPMIC_BYPASS_MASK 0x0002 /* CPMIC_BYPASS */ +#define ARIZONA_CPMIC_BYPASS_SHIFT 1 /* CPMIC_BYPASS */ +#define ARIZONA_CPMIC_BYPASS_WIDTH 1 /* CPMIC_BYPASS */ +#define ARIZONA_CPMIC_ENA 0x0001 /* CPMIC_ENA */ +#define ARIZONA_CPMIC_ENA_MASK 0x0001 /* CPMIC_ENA */ +#define ARIZONA_CPMIC_ENA_SHIFT 0 /* CPMIC_ENA */ +#define ARIZONA_CPMIC_ENA_WIDTH 1 /* CPMIC_ENA */ + +/* + * R528 (0x210) - LDO1 Control 1 + */ +#define ARIZONA_LDO1_VSEL_MASK 0x07E0 /* LDO1_VSEL - [10:5] */ +#define ARIZONA_LDO1_VSEL_SHIFT 5 /* LDO1_VSEL - [10:5] */ +#define ARIZONA_LDO1_VSEL_WIDTH 6 /* LDO1_VSEL - [10:5] */ +#define ARIZONA_LDO1_FAST 0x0010 /* LDO1_FAST */ +#define ARIZONA_LDO1_FAST_MASK 0x0010 /* LDO1_FAST */ +#define ARIZONA_LDO1_FAST_SHIFT 4 /* LDO1_FAST */ +#define ARIZONA_LDO1_FAST_WIDTH 1 /* LDO1_FAST */ +#define ARIZONA_LDO1_DISCH 0x0004 /* LDO1_DISCH */ +#define ARIZONA_LDO1_DISCH_MASK 0x0004 /* LDO1_DISCH */ +#define ARIZONA_LDO1_DISCH_SHIFT 2 /* LDO1_DISCH */ +#define ARIZONA_LDO1_DISCH_WIDTH 1 /* LDO1_DISCH */ +#define ARIZONA_LDO1_BYPASS 0x0002 /* LDO1_BYPASS */ +#define ARIZONA_LDO1_BYPASS_MASK 0x0002 /* LDO1_BYPASS */ +#define ARIZONA_LDO1_BYPASS_SHIFT 1 /* LDO1_BYPASS */ +#define ARIZONA_LDO1_BYPASS_WIDTH 1 /* LDO1_BYPASS */ +#define ARIZONA_LDO1_ENA 0x0001 /* LDO1_ENA */ +#define ARIZONA_LDO1_ENA_MASK 0x0001 /* LDO1_ENA */ +#define ARIZONA_LDO1_ENA_SHIFT 0 /* LDO1_ENA */ +#define ARIZONA_LDO1_ENA_WIDTH 1 /* LDO1_ENA */ + +/* + * R530 (0x212) - LDO1 Control 2 + */ +#define ARIZONA_LDO1_HI_PWR 0x0001 /* LDO1_HI_PWR */ +#define ARIZONA_LDO1_HI_PWR_SHIFT 0 /* LDO1_HI_PWR */ +#define ARIZONA_LDO1_HI_PWR_WIDTH 1 /* LDO1_HI_PWR */ + +/* + * R531 (0x213) - LDO2 Control 1 + */ +#define ARIZONA_LDO2_VSEL_MASK 0x07E0 /* LDO2_VSEL - [10:5] */ +#define ARIZONA_LDO2_VSEL_SHIFT 5 /* LDO2_VSEL - [10:5] */ +#define ARIZONA_LDO2_VSEL_WIDTH 6 /* LDO2_VSEL - [10:5] */ +#define ARIZONA_LDO2_FAST 0x0010 /* LDO2_FAST */ +#define ARIZONA_LDO2_FAST_MASK 0x0010 /* LDO2_FAST */ +#define ARIZONA_LDO2_FAST_SHIFT 4 /* LDO2_FAST */ +#define ARIZONA_LDO2_FAST_WIDTH 1 /* LDO2_FAST */ +#define ARIZONA_LDO2_DISCH 0x0004 /* LDO2_DISCH */ +#define ARIZONA_LDO2_DISCH_MASK 0x0004 /* LDO2_DISCH */ +#define ARIZONA_LDO2_DISCH_SHIFT 2 /* LDO2_DISCH */ +#define ARIZONA_LDO2_DISCH_WIDTH 1 /* LDO2_DISCH */ +#define ARIZONA_LDO2_BYPASS 0x0002 /* LDO2_BYPASS */ +#define ARIZONA_LDO2_BYPASS_MASK 0x0002 /* LDO2_BYPASS */ +#define ARIZONA_LDO2_BYPASS_SHIFT 1 /* LDO2_BYPASS */ +#define ARIZONA_LDO2_BYPASS_WIDTH 1 /* LDO2_BYPASS */ +#define ARIZONA_LDO2_ENA 0x0001 /* LDO2_ENA */ +#define ARIZONA_LDO2_ENA_MASK 0x0001 /* LDO2_ENA */ +#define ARIZONA_LDO2_ENA_SHIFT 0 /* LDO2_ENA */ +#define ARIZONA_LDO2_ENA_WIDTH 1 /* LDO2_ENA */ + +/* + * R536 (0x218) - Mic Bias Ctrl 1 + */ +#define ARIZONA_MICB1_EXT_CAP 0x8000 /* MICB1_EXT_CAP */ +#define ARIZONA_MICB1_EXT_CAP_MASK 0x8000 /* MICB1_EXT_CAP */ +#define ARIZONA_MICB1_EXT_CAP_SHIFT 15 /* MICB1_EXT_CAP */ +#define ARIZONA_MICB1_EXT_CAP_WIDTH 1 /* MICB1_EXT_CAP */ +#define ARIZONA_MICB1_LVL_MASK 0x01E0 /* MICB1_LVL - [8:5] */ +#define ARIZONA_MICB1_LVL_SHIFT 5 /* MICB1_LVL - [8:5] */ +#define ARIZONA_MICB1_LVL_WIDTH 4 /* MICB1_LVL - [8:5] */ +#define ARIZONA_MICB1_FAST 0x0010 /* MICB1_FAST */ +#define ARIZONA_MICB1_FAST_MASK 0x0010 /* MICB1_FAST */ +#define ARIZONA_MICB1_FAST_SHIFT 4 /* MICB1_FAST */ +#define ARIZONA_MICB1_FAST_WIDTH 1 /* MICB1_FAST */ +#define ARIZONA_MICB1_RATE 0x0008 /* MICB1_RATE */ +#define ARIZONA_MICB1_RATE_MASK 0x0008 /* MICB1_RATE */ +#define ARIZONA_MICB1_RATE_SHIFT 3 /* MICB1_RATE */ +#define ARIZONA_MICB1_RATE_WIDTH 1 /* MICB1_RATE */ +#define ARIZONA_MICB1_DISCH 0x0004 /* MICB1_DISCH */ +#define ARIZONA_MICB1_DISCH_MASK 0x0004 /* MICB1_DISCH */ +#define ARIZONA_MICB1_DISCH_SHIFT 2 /* MICB1_DISCH */ +#define ARIZONA_MICB1_DISCH_WIDTH 1 /* MICB1_DISCH */ +#define ARIZONA_MICB1_BYPASS 0x0002 /* MICB1_BYPASS */ +#define ARIZONA_MICB1_BYPASS_MASK 0x0002 /* MICB1_BYPASS */ +#define ARIZONA_MICB1_BYPASS_SHIFT 1 /* MICB1_BYPASS */ +#define ARIZONA_MICB1_BYPASS_WIDTH 1 /* MICB1_BYPASS */ +#define ARIZONA_MICB1_ENA 0x0001 /* MICB1_ENA */ +#define ARIZONA_MICB1_ENA_MASK 0x0001 /* MICB1_ENA */ +#define ARIZONA_MICB1_ENA_SHIFT 0 /* MICB1_ENA */ +#define ARIZONA_MICB1_ENA_WIDTH 1 /* MICB1_ENA */ + +/* + * R537 (0x219) - Mic Bias Ctrl 2 + */ +#define ARIZONA_MICB2_EXT_CAP 0x8000 /* MICB2_EXT_CAP */ +#define ARIZONA_MICB2_EXT_CAP_MASK 0x8000 /* MICB2_EXT_CAP */ +#define ARIZONA_MICB2_EXT_CAP_SHIFT 15 /* MICB2_EXT_CAP */ +#define ARIZONA_MICB2_EXT_CAP_WIDTH 1 /* MICB2_EXT_CAP */ +#define ARIZONA_MICB2_LVL_MASK 0x01E0 /* MICB2_LVL - [8:5] */ +#define ARIZONA_MICB2_LVL_SHIFT 5 /* MICB2_LVL - [8:5] */ +#define ARIZONA_MICB2_LVL_WIDTH 4 /* MICB2_LVL - [8:5] */ +#define ARIZONA_MICB2_FAST 0x0010 /* MICB2_FAST */ +#define ARIZONA_MICB2_FAST_MASK 0x0010 /* MICB2_FAST */ +#define ARIZONA_MICB2_FAST_SHIFT 4 /* MICB2_FAST */ +#define ARIZONA_MICB2_FAST_WIDTH 1 /* MICB2_FAST */ +#define ARIZONA_MICB2_RATE 0x0008 /* MICB2_RATE */ +#define ARIZONA_MICB2_RATE_MASK 0x0008 /* MICB2_RATE */ +#define ARIZONA_MICB2_RATE_SHIFT 3 /* MICB2_RATE */ +#define ARIZONA_MICB2_RATE_WIDTH 1 /* MICB2_RATE */ +#define ARIZONA_MICB2_DISCH 0x0004 /* MICB2_DISCH */ +#define ARIZONA_MICB2_DISCH_MASK 0x0004 /* MICB2_DISCH */ +#define ARIZONA_MICB2_DISCH_SHIFT 2 /* MICB2_DISCH */ +#define ARIZONA_MICB2_DISCH_WIDTH 1 /* MICB2_DISCH */ +#define ARIZONA_MICB2_BYPASS 0x0002 /* MICB2_BYPASS */ +#define ARIZONA_MICB2_BYPASS_MASK 0x0002 /* MICB2_BYPASS */ +#define ARIZONA_MICB2_BYPASS_SHIFT 1 /* MICB2_BYPASS */ +#define ARIZONA_MICB2_BYPASS_WIDTH 1 /* MICB2_BYPASS */ +#define ARIZONA_MICB2_ENA 0x0001 /* MICB2_ENA */ +#define ARIZONA_MICB2_ENA_MASK 0x0001 /* MICB2_ENA */ +#define ARIZONA_MICB2_ENA_SHIFT 0 /* MICB2_ENA */ +#define ARIZONA_MICB2_ENA_WIDTH 1 /* MICB2_ENA */ + +/* + * R538 (0x21A) - Mic Bias Ctrl 3 + */ +#define ARIZONA_MICB3_EXT_CAP 0x8000 /* MICB3_EXT_CAP */ +#define ARIZONA_MICB3_EXT_CAP_MASK 0x8000 /* MICB3_EXT_CAP */ +#define ARIZONA_MICB3_EXT_CAP_SHIFT 15 /* MICB3_EXT_CAP */ +#define ARIZONA_MICB3_EXT_CAP_WIDTH 1 /* MICB3_EXT_CAP */ +#define ARIZONA_MICB3_LVL_MASK 0x01E0 /* MICB3_LVL - [8:5] */ +#define ARIZONA_MICB3_LVL_SHIFT 5 /* MICB3_LVL - [8:5] */ +#define ARIZONA_MICB3_LVL_WIDTH 4 /* MICB3_LVL - [8:5] */ +#define ARIZONA_MICB3_FAST 0x0010 /* MICB3_FAST */ +#define ARIZONA_MICB3_FAST_MASK 0x0010 /* MICB3_FAST */ +#define ARIZONA_MICB3_FAST_SHIFT 4 /* MICB3_FAST */ +#define ARIZONA_MICB3_FAST_WIDTH 1 /* MICB3_FAST */ +#define ARIZONA_MICB3_RATE 0x0008 /* MICB3_RATE */ +#define ARIZONA_MICB3_RATE_MASK 0x0008 /* MICB3_RATE */ +#define ARIZONA_MICB3_RATE_SHIFT 3 /* MICB3_RATE */ +#define ARIZONA_MICB3_RATE_WIDTH 1 /* MICB3_RATE */ +#define ARIZONA_MICB3_DISCH 0x0004 /* MICB3_DISCH */ +#define ARIZONA_MICB3_DISCH_MASK 0x0004 /* MICB3_DISCH */ +#define ARIZONA_MICB3_DISCH_SHIFT 2 /* MICB3_DISCH */ +#define ARIZONA_MICB3_DISCH_WIDTH 1 /* MICB3_DISCH */ +#define ARIZONA_MICB3_BYPASS 0x0002 /* MICB3_BYPASS */ +#define ARIZONA_MICB3_BYPASS_MASK 0x0002 /* MICB3_BYPASS */ +#define ARIZONA_MICB3_BYPASS_SHIFT 1 /* MICB3_BYPASS */ +#define ARIZONA_MICB3_BYPASS_WIDTH 1 /* MICB3_BYPASS */ +#define ARIZONA_MICB3_ENA 0x0001 /* MICB3_ENA */ +#define ARIZONA_MICB3_ENA_MASK 0x0001 /* MICB3_ENA */ +#define ARIZONA_MICB3_ENA_SHIFT 0 /* MICB3_ENA */ +#define ARIZONA_MICB3_ENA_WIDTH 1 /* MICB3_ENA */ + +/* + * R549 (0x225) - HP Ctrl 1L + */ +#define ARIZONA_RMV_SHRT_HP1L 0x4000 /* RMV_SHRT_HP1L */ +#define ARIZONA_RMV_SHRT_HP1L_MASK 0x4000 /* RMV_SHRT_HP1L */ +#define ARIZONA_RMV_SHRT_HP1L_SHIFT 14 /* RMV_SHRT_HP1L */ +#define ARIZONA_RMV_SHRT_HP1L_WIDTH 1 /* RMV_SHRT_HP1L */ +#define ARIZONA_HP1L_FLWR 0x0004 /* HP1L_FLWR */ +#define ARIZONA_HP1L_FLWR_MASK 0x0004 /* HP1L_FLWR */ +#define ARIZONA_HP1L_FLWR_SHIFT 2 /* HP1L_FLWR */ +#define ARIZONA_HP1L_FLWR_WIDTH 1 /* HP1L_FLWR */ +#define ARIZONA_HP1L_SHRTI 0x0002 /* HP1L_SHRTI */ +#define ARIZONA_HP1L_SHRTI_MASK 0x0002 /* HP1L_SHRTI */ +#define ARIZONA_HP1L_SHRTI_SHIFT 1 /* HP1L_SHRTI */ +#define ARIZONA_HP1L_SHRTI_WIDTH 1 /* HP1L_SHRTI */ +#define ARIZONA_HP1L_SHRTO 0x0001 /* HP1L_SHRTO */ +#define ARIZONA_HP1L_SHRTO_MASK 0x0001 /* HP1L_SHRTO */ +#define ARIZONA_HP1L_SHRTO_SHIFT 0 /* HP1L_SHRTO */ +#define ARIZONA_HP1L_SHRTO_WIDTH 1 /* HP1L_SHRTO */ + +/* + * R550 (0x226) - HP Ctrl 1R + */ +#define ARIZONA_RMV_SHRT_HP1R 0x4000 /* RMV_SHRT_HP1R */ +#define ARIZONA_RMV_SHRT_HP1R_MASK 0x4000 /* RMV_SHRT_HP1R */ +#define ARIZONA_RMV_SHRT_HP1R_SHIFT 14 /* RMV_SHRT_HP1R */ +#define ARIZONA_RMV_SHRT_HP1R_WIDTH 1 /* RMV_SHRT_HP1R */ +#define ARIZONA_HP1R_FLWR 0x0004 /* HP1R_FLWR */ +#define ARIZONA_HP1R_FLWR_MASK 0x0004 /* HP1R_FLWR */ +#define ARIZONA_HP1R_FLWR_SHIFT 2 /* HP1R_FLWR */ +#define ARIZONA_HP1R_FLWR_WIDTH 1 /* HP1R_FLWR */ +#define ARIZONA_HP1R_SHRTI 0x0002 /* HP1R_SHRTI */ +#define ARIZONA_HP1R_SHRTI_MASK 0x0002 /* HP1R_SHRTI */ +#define ARIZONA_HP1R_SHRTI_SHIFT 1 /* HP1R_SHRTI */ +#define ARIZONA_HP1R_SHRTI_WIDTH 1 /* HP1R_SHRTI */ +#define ARIZONA_HP1R_SHRTO 0x0001 /* HP1R_SHRTO */ +#define ARIZONA_HP1R_SHRTO_MASK 0x0001 /* HP1R_SHRTO */ +#define ARIZONA_HP1R_SHRTO_SHIFT 0 /* HP1R_SHRTO */ +#define ARIZONA_HP1R_SHRTO_WIDTH 1 /* HP1R_SHRTO */ + +/* + * R659 (0x293) - Accessory Detect Mode 1 + */ +#define ARIZONA_ACCDET_SRC 0x2000 /* ACCDET_SRC */ +#define ARIZONA_ACCDET_SRC_MASK 0x2000 /* ACCDET_SRC */ +#define ARIZONA_ACCDET_SRC_SHIFT 13 /* ACCDET_SRC */ +#define ARIZONA_ACCDET_SRC_WIDTH 1 /* ACCDET_SRC */ +#define ARIZONA_ACCDET_MODE_MASK 0x0007 /* ACCDET_MODE - [2:0] */ +#define ARIZONA_ACCDET_MODE_SHIFT 0 /* ACCDET_MODE - [2:0] */ +#define ARIZONA_ACCDET_MODE_WIDTH 3 /* ACCDET_MODE - [2:0] */ + +/* + * R667 (0x29B) - Headphone Detect 1 + */ +#define ARIZONA_HP_IMPEDANCE_RANGE_MASK 0x0600 /* HP_IMPEDANCE_RANGE - [10:9] */ +#define ARIZONA_HP_IMPEDANCE_RANGE_SHIFT 9 /* HP_IMPEDANCE_RANGE - [10:9] */ +#define ARIZONA_HP_IMPEDANCE_RANGE_WIDTH 2 /* HP_IMPEDANCE_RANGE - [10:9] */ +#define ARIZONA_HP_STEP_SIZE 0x0100 /* HP_STEP_SIZE */ +#define ARIZONA_HP_STEP_SIZE_MASK 0x0100 /* HP_STEP_SIZE */ +#define ARIZONA_HP_STEP_SIZE_SHIFT 8 /* HP_STEP_SIZE */ +#define ARIZONA_HP_STEP_SIZE_WIDTH 1 /* HP_STEP_SIZE */ +#define ARIZONA_HP_HOLDTIME_MASK 0x00E0 /* HP_HOLDTIME - [7:5] */ +#define ARIZONA_HP_HOLDTIME_SHIFT 5 /* HP_HOLDTIME - [7:5] */ +#define ARIZONA_HP_HOLDTIME_WIDTH 3 /* HP_HOLDTIME - [7:5] */ +#define ARIZONA_HP_CLK_DIV_MASK 0x0018 /* HP_CLK_DIV - [4:3] */ +#define ARIZONA_HP_CLK_DIV_SHIFT 3 /* HP_CLK_DIV - [4:3] */ +#define ARIZONA_HP_CLK_DIV_WIDTH 2 /* HP_CLK_DIV - [4:3] */ +#define ARIZONA_HP_IDAC_STEER 0x0004 /* HP_IDAC_STEER */ +#define ARIZONA_HP_IDAC_STEER_MASK 0x0004 /* HP_IDAC_STEER */ +#define ARIZONA_HP_IDAC_STEER_SHIFT 2 /* HP_IDAC_STEER */ +#define ARIZONA_HP_IDAC_STEER_WIDTH 1 /* HP_IDAC_STEER */ +#define WM8998_HP_RATE_MASK 0x0006 /* HP_RATE - [2:1] */ +#define WM8998_HP_RATE_SHIFT 1 /* HP_RATE - [2:1] */ +#define WM8998_HP_RATE_WIDTH 2 /* HP_RATE - [2:1] */ +#define ARIZONA_HP_RATE 0x0002 /* HP_RATE */ +#define ARIZONA_HP_RATE_MASK 0x0002 /* HP_RATE */ +#define ARIZONA_HP_RATE_SHIFT 1 /* HP_RATE */ +#define ARIZONA_HP_RATE_WIDTH 1 /* HP_RATE */ +#define ARIZONA_HP_POLL 0x0001 /* HP_POLL */ +#define ARIZONA_HP_POLL_MASK 0x0001 /* HP_POLL */ +#define ARIZONA_HP_POLL_SHIFT 0 /* HP_POLL */ +#define ARIZONA_HP_POLL_WIDTH 1 /* HP_POLL */ + +/* + * R668 (0x29C) - Headphone Detect 2 + */ +#define ARIZONA_HP_DONE 0x0080 /* HP_DONE */ +#define ARIZONA_HP_DONE_MASK 0x0080 /* HP_DONE */ +#define ARIZONA_HP_DONE_SHIFT 7 /* HP_DONE */ +#define ARIZONA_HP_DONE_WIDTH 1 /* HP_DONE */ +#define ARIZONA_HP_LVL_MASK 0x007F /* HP_LVL - [6:0] */ +#define ARIZONA_HP_LVL_SHIFT 0 /* HP_LVL - [6:0] */ +#define ARIZONA_HP_LVL_WIDTH 7 /* HP_LVL - [6:0] */ + +#define ARIZONA_HP_DONE_B 0x8000 /* HP_DONE */ +#define ARIZONA_HP_DONE_B_MASK 0x8000 /* HP_DONE */ +#define ARIZONA_HP_DONE_B_SHIFT 15 /* HP_DONE */ +#define ARIZONA_HP_DONE_B_WIDTH 1 /* HP_DONE */ +#define ARIZONA_HP_LVL_B_MASK 0x7FFF /* HP_LVL - [14:0] */ +#define ARIZONA_HP_LVL_B_SHIFT 0 /* HP_LVL - [14:0] */ +#define ARIZONA_HP_LVL_B_WIDTH 15 /* HP_LVL - [14:0] */ + +/* + * R674 (0x2A2) - MICD clamp control + */ +#define ARIZONA_MICD_CLAMP_MODE_MASK 0x000F /* MICD_CLAMP_MODE - [3:0] */ +#define ARIZONA_MICD_CLAMP_MODE_SHIFT 0 /* MICD_CLAMP_MODE - [3:0] */ +#define ARIZONA_MICD_CLAMP_MODE_WIDTH 4 /* MICD_CLAMP_MODE - [3:0] */ + +/* + * R675 (0x2A3) - Mic Detect 1 + */ +#define ARIZONA_MICD_BIAS_STARTTIME_MASK 0xF000 /* MICD_BIAS_STARTTIME - [15:12] */ +#define ARIZONA_MICD_BIAS_STARTTIME_SHIFT 12 /* MICD_BIAS_STARTTIME - [15:12] */ +#define ARIZONA_MICD_BIAS_STARTTIME_WIDTH 4 /* MICD_BIAS_STARTTIME - [15:12] */ +#define ARIZONA_MICD_RATE_MASK 0x0F00 /* MICD_RATE - [11:8] */ +#define ARIZONA_MICD_RATE_SHIFT 8 /* MICD_RATE - [11:8] */ +#define ARIZONA_MICD_RATE_WIDTH 4 /* MICD_RATE - [11:8] */ +#define ARIZONA_MICD_BIAS_SRC_MASK 0x0030 /* MICD_BIAS_SRC - [5:4] */ +#define ARIZONA_MICD_BIAS_SRC_SHIFT 4 /* MICD_BIAS_SRC - [5:4] */ +#define ARIZONA_MICD_BIAS_SRC_WIDTH 2 /* MICD_BIAS_SRC - [5:4] */ +#define ARIZONA_MICD_DBTIME 0x0002 /* MICD_DBTIME */ +#define ARIZONA_MICD_DBTIME_MASK 0x0002 /* MICD_DBTIME */ +#define ARIZONA_MICD_DBTIME_SHIFT 1 /* MICD_DBTIME */ +#define ARIZONA_MICD_DBTIME_WIDTH 1 /* MICD_DBTIME */ +#define ARIZONA_MICD_ENA 0x0001 /* MICD_ENA */ +#define ARIZONA_MICD_ENA_MASK 0x0001 /* MICD_ENA */ +#define ARIZONA_MICD_ENA_SHIFT 0 /* MICD_ENA */ +#define ARIZONA_MICD_ENA_WIDTH 1 /* MICD_ENA */ + +/* + * R676 (0x2A4) - Mic Detect 2 + */ +#define ARIZONA_MICD_LVL_SEL_MASK 0x00FF /* MICD_LVL_SEL - [7:0] */ +#define ARIZONA_MICD_LVL_SEL_SHIFT 0 /* MICD_LVL_SEL - [7:0] */ +#define ARIZONA_MICD_LVL_SEL_WIDTH 8 /* MICD_LVL_SEL - [7:0] */ + +/* + * R677 (0x2A5) - Mic Detect 3 + */ +#define ARIZONA_MICD_LVL_0 0x0004 /* MICD_LVL - [2] */ +#define ARIZONA_MICD_LVL_1 0x0008 /* MICD_LVL - [3] */ +#define ARIZONA_MICD_LVL_2 0x0010 /* MICD_LVL - [4] */ +#define ARIZONA_MICD_LVL_3 0x0020 /* MICD_LVL - [5] */ +#define ARIZONA_MICD_LVL_4 0x0040 /* MICD_LVL - [6] */ +#define ARIZONA_MICD_LVL_5 0x0080 /* MICD_LVL - [7] */ +#define ARIZONA_MICD_LVL_6 0x0100 /* MICD_LVL - [8] */ +#define ARIZONA_MICD_LVL_7 0x0200 /* MICD_LVL - [9] */ +#define ARIZONA_MICD_LVL_8 0x0400 /* MICD_LVL - [10] */ +#define ARIZONA_MICD_LVL_MASK 0x07FC /* MICD_LVL - [10:2] */ +#define ARIZONA_MICD_LVL_SHIFT 2 /* MICD_LVL - [10:2] */ +#define ARIZONA_MICD_LVL_WIDTH 9 /* MICD_LVL - [10:2] */ +#define ARIZONA_MICD_VALID 0x0002 /* MICD_VALID */ +#define ARIZONA_MICD_VALID_MASK 0x0002 /* MICD_VALID */ +#define ARIZONA_MICD_VALID_SHIFT 1 /* MICD_VALID */ +#define ARIZONA_MICD_VALID_WIDTH 1 /* MICD_VALID */ +#define ARIZONA_MICD_STS 0x0001 /* MICD_STS */ +#define ARIZONA_MICD_STS_MASK 0x0001 /* MICD_STS */ +#define ARIZONA_MICD_STS_SHIFT 0 /* MICD_STS */ +#define ARIZONA_MICD_STS_WIDTH 1 /* MICD_STS */ + +/* + * R683 (0x2AB) - Mic Detect 4 + */ +#define ARIZONA_MICDET_ADCVAL_DIFF_MASK 0xFF00 /* MICDET_ADCVAL_DIFF - [15:8] */ +#define ARIZONA_MICDET_ADCVAL_DIFF_SHIFT 8 /* MICDET_ADCVAL_DIFF - [15:8] */ +#define ARIZONA_MICDET_ADCVAL_DIFF_WIDTH 8 /* MICDET_ADCVAL_DIFF - [15:8] */ +#define ARIZONA_MICDET_ADCVAL_MASK 0x007F /* MICDET_ADCVAL - [15:8] */ +#define ARIZONA_MICDET_ADCVAL_SHIFT 0 /* MICDET_ADCVAL - [15:8] */ +#define ARIZONA_MICDET_ADCVAL_WIDTH 7 /* MICDET_ADCVAL - [15:8] */ + +/* + * R707 (0x2C3) - Mic noise mix control 1 + */ +#define ARIZONA_MICMUTE_RATE_MASK 0x7800 /* MICMUTE_RATE - [14:11] */ +#define ARIZONA_MICMUTE_RATE_SHIFT 11 /* MICMUTE_RATE - [14:11] */ +#define ARIZONA_MICMUTE_RATE_WIDTH 4 /* MICMUTE_RATE - [14:11] */ +#define ARIZONA_MICMUTE_MIX_ENA 0x0040 /* MICMUTE_MIX_ENA */ +#define ARIZONA_MICMUTE_MIX_ENA_MASK 0x0040 /* MICMUTE_MIX_ENA */ +#define ARIZONA_MICMUTE_MIX_ENA_SHIFT 6 /* MICMUTE_MIX_ENA */ +#define ARIZONA_MICMUTE_MIX_ENA_WIDTH 1 /* MICMUTE_MIX_ENA */ + +/* + * R715 (0x2CB) - Isolation control + */ +#define ARIZONA_ISOLATE_DCVDD1 0x0001 /* ISOLATE_DCVDD1 */ +#define ARIZONA_ISOLATE_DCVDD1_MASK 0x0001 /* ISOLATE_DCVDD1 */ +#define ARIZONA_ISOLATE_DCVDD1_SHIFT 0 /* ISOLATE_DCVDD1 */ +#define ARIZONA_ISOLATE_DCVDD1_WIDTH 1 /* ISOLATE_DCVDD1 */ + +/* + * R723 (0x2D3) - Jack detect analogue + */ +#define ARIZONA_JD2_ENA 0x0002 /* JD2_ENA */ +#define ARIZONA_JD2_ENA_MASK 0x0002 /* JD2_ENA */ +#define ARIZONA_JD2_ENA_SHIFT 1 /* JD2_ENA */ +#define ARIZONA_JD2_ENA_WIDTH 1 /* JD2_ENA */ +#define ARIZONA_JD1_ENA 0x0001 /* JD1_ENA */ +#define ARIZONA_JD1_ENA_MASK 0x0001 /* JD1_ENA */ +#define ARIZONA_JD1_ENA_SHIFT 0 /* JD1_ENA */ +#define ARIZONA_JD1_ENA_WIDTH 1 /* JD1_ENA */ + +/* + * R768 (0x300) - Input Enables + */ +#define ARIZONA_IN4L_ENA 0x0080 /* IN4L_ENA */ +#define ARIZONA_IN4L_ENA_MASK 0x0080 /* IN4L_ENA */ +#define ARIZONA_IN4L_ENA_SHIFT 7 /* IN4L_ENA */ +#define ARIZONA_IN4L_ENA_WIDTH 1 /* IN4L_ENA */ +#define ARIZONA_IN4R_ENA 0x0040 /* IN4R_ENA */ +#define ARIZONA_IN4R_ENA_MASK 0x0040 /* IN4R_ENA */ +#define ARIZONA_IN4R_ENA_SHIFT 6 /* IN4R_ENA */ +#define ARIZONA_IN4R_ENA_WIDTH 1 /* IN4R_ENA */ +#define ARIZONA_IN3L_ENA 0x0020 /* IN3L_ENA */ +#define ARIZONA_IN3L_ENA_MASK 0x0020 /* IN3L_ENA */ +#define ARIZONA_IN3L_ENA_SHIFT 5 /* IN3L_ENA */ +#define ARIZONA_IN3L_ENA_WIDTH 1 /* IN3L_ENA */ +#define ARIZONA_IN3R_ENA 0x0010 /* IN3R_ENA */ +#define ARIZONA_IN3R_ENA_MASK 0x0010 /* IN3R_ENA */ +#define ARIZONA_IN3R_ENA_SHIFT 4 /* IN3R_ENA */ +#define ARIZONA_IN3R_ENA_WIDTH 1 /* IN3R_ENA */ +#define ARIZONA_IN2L_ENA 0x0008 /* IN2L_ENA */ +#define ARIZONA_IN2L_ENA_MASK 0x0008 /* IN2L_ENA */ +#define ARIZONA_IN2L_ENA_SHIFT 3 /* IN2L_ENA */ +#define ARIZONA_IN2L_ENA_WIDTH 1 /* IN2L_ENA */ +#define ARIZONA_IN2R_ENA 0x0004 /* IN2R_ENA */ +#define ARIZONA_IN2R_ENA_MASK 0x0004 /* IN2R_ENA */ +#define ARIZONA_IN2R_ENA_SHIFT 2 /* IN2R_ENA */ +#define ARIZONA_IN2R_ENA_WIDTH 1 /* IN2R_ENA */ +#define ARIZONA_IN1L_ENA 0x0002 /* IN1L_ENA */ +#define ARIZONA_IN1L_ENA_MASK 0x0002 /* IN1L_ENA */ +#define ARIZONA_IN1L_ENA_SHIFT 1 /* IN1L_ENA */ +#define ARIZONA_IN1L_ENA_WIDTH 1 /* IN1L_ENA */ +#define ARIZONA_IN1R_ENA 0x0001 /* IN1R_ENA */ +#define ARIZONA_IN1R_ENA_MASK 0x0001 /* IN1R_ENA */ +#define ARIZONA_IN1R_ENA_SHIFT 0 /* IN1R_ENA */ +#define ARIZONA_IN1R_ENA_WIDTH 1 /* IN1R_ENA */ + +/* + * R776 (0x308) - Input Rate + */ +#define ARIZONA_IN_RATE_MASK 0x7800 /* IN_RATE - [14:11] */ +#define ARIZONA_IN_RATE_SHIFT 11 /* IN_RATE - [14:11] */ +#define ARIZONA_IN_RATE_WIDTH 4 /* IN_RATE - [14:11] */ + +/* + * R777 (0x309) - Input Volume Ramp + */ +#define ARIZONA_IN_VD_RAMP_MASK 0x0070 /* IN_VD_RAMP - [6:4] */ +#define ARIZONA_IN_VD_RAMP_SHIFT 4 /* IN_VD_RAMP - [6:4] */ +#define ARIZONA_IN_VD_RAMP_WIDTH 3 /* IN_VD_RAMP - [6:4] */ +#define ARIZONA_IN_VI_RAMP_MASK 0x0007 /* IN_VI_RAMP - [2:0] */ +#define ARIZONA_IN_VI_RAMP_SHIFT 0 /* IN_VI_RAMP - [2:0] */ +#define ARIZONA_IN_VI_RAMP_WIDTH 3 /* IN_VI_RAMP - [2:0] */ + +/* + * R780 (0x30C) - HPF Control + */ +#define ARIZONA_IN_HPF_CUT_MASK 0x0007 /* IN_HPF_CUT [2:0] */ +#define ARIZONA_IN_HPF_CUT_SHIFT 0 /* IN_HPF_CUT [2:0] */ +#define ARIZONA_IN_HPF_CUT_WIDTH 3 /* IN_HPF_CUT [2:0] */ + +/* + * R784 (0x310) - IN1L Control + */ +#define ARIZONA_IN1L_HPF_MASK 0x8000 /* IN1L_HPF - [15] */ +#define ARIZONA_IN1L_HPF_SHIFT 15 /* IN1L_HPF - [15] */ +#define ARIZONA_IN1L_HPF_WIDTH 1 /* IN1L_HPF - [15] */ +#define ARIZONA_IN1_OSR_MASK 0x6000 /* IN1_OSR - [14:13] */ +#define ARIZONA_IN1_OSR_SHIFT 13 /* IN1_OSR - [14:13] */ +#define ARIZONA_IN1_OSR_WIDTH 2 /* IN1_OSR - [14:13] */ +#define ARIZONA_IN1_DMIC_SUP_MASK 0x1800 /* IN1_DMIC_SUP - [12:11] */ +#define ARIZONA_IN1_DMIC_SUP_SHIFT 11 /* IN1_DMIC_SUP - [12:11] */ +#define ARIZONA_IN1_DMIC_SUP_WIDTH 2 /* IN1_DMIC_SUP - [12:11] */ +#define ARIZONA_IN1_MODE_MASK 0x0400 /* IN1_MODE - [10] */ +#define ARIZONA_IN1_MODE_SHIFT 10 /* IN1_MODE - [10] */ +#define ARIZONA_IN1_MODE_WIDTH 1 /* IN1_MODE - [10] */ +#define ARIZONA_IN1_SINGLE_ENDED_MASK 0x0200 /* IN1_MODE - [9] */ +#define ARIZONA_IN1_SINGLE_ENDED_SHIFT 9 /* IN1_MODE - [9] */ +#define ARIZONA_IN1_SINGLE_ENDED_WIDTH 1 /* IN1_MODE - [9] */ +#define ARIZONA_IN1L_PGA_VOL_MASK 0x00FE /* IN1L_PGA_VOL - [7:1] */ +#define ARIZONA_IN1L_PGA_VOL_SHIFT 1 /* IN1L_PGA_VOL - [7:1] */ +#define ARIZONA_IN1L_PGA_VOL_WIDTH 7 /* IN1L_PGA_VOL - [7:1] */ + +/* + * R785 (0x311) - ADC Digital Volume 1L + */ +#define ARIZONA_IN1L_SRC_MASK 0x4000 /* IN1L_SRC - [14] */ +#define ARIZONA_IN1L_SRC_SHIFT 14 /* IN1L_SRC - [14] */ +#define ARIZONA_IN1L_SRC_WIDTH 1 /* IN1L_SRC - [14] */ +#define ARIZONA_IN1L_SRC_SE_MASK 0x2000 /* IN1L_SRC - [13] */ +#define ARIZONA_IN1L_SRC_SE_SHIFT 13 /* IN1L_SRC - [13] */ +#define ARIZONA_IN1L_SRC_SE_WIDTH 1 /* IN1L_SRC - [13] */ +#define ARIZONA_IN_VU 0x0200 /* IN_VU */ +#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */ +#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */ +#define ARIZONA_IN_VU_WIDTH 1 /* IN_VU */ +#define ARIZONA_IN1L_MUTE 0x0100 /* IN1L_MUTE */ +#define ARIZONA_IN1L_MUTE_MASK 0x0100 /* IN1L_MUTE */ +#define ARIZONA_IN1L_MUTE_SHIFT 8 /* IN1L_MUTE */ +#define ARIZONA_IN1L_MUTE_WIDTH 1 /* IN1L_MUTE */ +#define ARIZONA_IN1L_DIG_VOL_MASK 0x00FF /* IN1L_DIG_VOL - [7:0] */ +#define ARIZONA_IN1L_DIG_VOL_SHIFT 0 /* IN1L_DIG_VOL - [7:0] */ +#define ARIZONA_IN1L_DIG_VOL_WIDTH 8 /* IN1L_DIG_VOL - [7:0] */ + +/* + * R786 (0x312) - DMIC1L Control + */ +#define ARIZONA_IN1_DMICL_DLY_MASK 0x003F /* IN1_DMICL_DLY - [5:0] */ +#define ARIZONA_IN1_DMICL_DLY_SHIFT 0 /* IN1_DMICL_DLY - [5:0] */ +#define ARIZONA_IN1_DMICL_DLY_WIDTH 6 /* IN1_DMICL_DLY - [5:0] */ + +/* + * R788 (0x314) - IN1R Control + */ +#define ARIZONA_IN1R_HPF_MASK 0x8000 /* IN1R_HPF - [15] */ +#define ARIZONA_IN1R_HPF_SHIFT 15 /* IN1R_HPF - [15] */ +#define ARIZONA_IN1R_HPF_WIDTH 1 /* IN1R_HPF - [15] */ +#define ARIZONA_IN1R_PGA_VOL_MASK 0x00FE /* IN1R_PGA_VOL - [7:1] */ +#define ARIZONA_IN1R_PGA_VOL_SHIFT 1 /* IN1R_PGA_VOL - [7:1] */ +#define ARIZONA_IN1R_PGA_VOL_WIDTH 7 /* IN1R_PGA_VOL - [7:1] */ + +/* + * R789 (0x315) - ADC Digital Volume 1R + */ +#define ARIZONA_IN1R_SRC_MASK 0x4000 /* IN1R_SRC - [14] */ +#define ARIZONA_IN1R_SRC_SHIFT 14 /* IN1R_SRC - [14] */ +#define ARIZONA_IN1R_SRC_WIDTH 1 /* IN1R_SRC - [14] */ +#define ARIZONA_IN1R_SRC_SE_MASK 0x2000 /* IN1R_SRC - [13] */ +#define ARIZONA_IN1R_SRC_SE_SHIFT 13 /* IN1R_SRC - [13] */ +#define ARIZONA_IN1R_SRC_SE_WIDTH 1 /* IN1R_SRC - [13] */ +#define ARIZONA_IN_VU 0x0200 /* IN_VU */ +#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */ +#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */ +#define ARIZONA_IN_VU_WIDTH 1 /* IN_VU */ +#define ARIZONA_IN1R_MUTE 0x0100 /* IN1R_MUTE */ +#define ARIZONA_IN1R_MUTE_MASK 0x0100 /* IN1R_MUTE */ +#define ARIZONA_IN1R_MUTE_SHIFT 8 /* IN1R_MUTE */ +#define ARIZONA_IN1R_MUTE_WIDTH 1 /* IN1R_MUTE */ +#define ARIZONA_IN1R_DIG_VOL_MASK 0x00FF /* IN1R_DIG_VOL - [7:0] */ +#define ARIZONA_IN1R_DIG_VOL_SHIFT 0 /* IN1R_DIG_VOL - [7:0] */ +#define ARIZONA_IN1R_DIG_VOL_WIDTH 8 /* IN1R_DIG_VOL - [7:0] */ + +/* + * R790 (0x316) - DMIC1R Control + */ +#define ARIZONA_IN1_DMICR_DLY_MASK 0x003F /* IN1_DMICR_DLY - [5:0] */ +#define ARIZONA_IN1_DMICR_DLY_SHIFT 0 /* IN1_DMICR_DLY - [5:0] */ +#define ARIZONA_IN1_DMICR_DLY_WIDTH 6 /* IN1_DMICR_DLY - [5:0] */ + +/* + * R792 (0x318) - IN2L Control + */ +#define ARIZONA_IN2L_HPF_MASK 0x8000 /* IN2L_HPF - [15] */ +#define ARIZONA_IN2L_HPF_SHIFT 15 /* IN2L_HPF - [15] */ +#define ARIZONA_IN2L_HPF_WIDTH 1 /* IN2L_HPF - [15] */ +#define ARIZONA_IN2_OSR_MASK 0x6000 /* IN2_OSR - [14:13] */ +#define ARIZONA_IN2_OSR_SHIFT 13 /* IN2_OSR - [14:13] */ +#define ARIZONA_IN2_OSR_WIDTH 2 /* IN2_OSR - [14:13] */ +#define ARIZONA_IN2_DMIC_SUP_MASK 0x1800 /* IN2_DMIC_SUP - [12:11] */ +#define ARIZONA_IN2_DMIC_SUP_SHIFT 11 /* IN2_DMIC_SUP - [12:11] */ +#define ARIZONA_IN2_DMIC_SUP_WIDTH 2 /* IN2_DMIC_SUP - [12:11] */ +#define ARIZONA_IN2_MODE_MASK 0x0400 /* IN2_MODE - [10] */ +#define ARIZONA_IN2_MODE_SHIFT 10 /* IN2_MODE - [10] */ +#define ARIZONA_IN2_MODE_WIDTH 1 /* IN2_MODE - [10] */ +#define ARIZONA_IN2_SINGLE_ENDED_MASK 0x0200 /* IN2_MODE - [9] */ +#define ARIZONA_IN2_SINGLE_ENDED_SHIFT 9 /* IN2_MODE - [9] */ +#define ARIZONA_IN2_SINGLE_ENDED_WIDTH 1 /* IN2_MODE - [9] */ +#define ARIZONA_IN2L_PGA_VOL_MASK 0x00FE /* IN2L_PGA_VOL - [7:1] */ +#define ARIZONA_IN2L_PGA_VOL_SHIFT 1 /* IN2L_PGA_VOL - [7:1] */ +#define ARIZONA_IN2L_PGA_VOL_WIDTH 7 /* IN2L_PGA_VOL - [7:1] */ + +/* + * R793 (0x319) - ADC Digital Volume 2L + */ +#define ARIZONA_IN2L_SRC_MASK 0x4000 /* IN2L_SRC - [14] */ +#define ARIZONA_IN2L_SRC_SHIFT 14 /* IN2L_SRC - [14] */ +#define ARIZONA_IN2L_SRC_WIDTH 1 /* IN2L_SRC - [14] */ +#define ARIZONA_IN2L_SRC_SE_MASK 0x2000 /* IN2L_SRC - [13] */ +#define ARIZONA_IN2L_SRC_SE_SHIFT 13 /* IN2L_SRC - [13] */ +#define ARIZONA_IN2L_SRC_SE_WIDTH 1 /* IN2L_SRC - [13] */ +#define ARIZONA_IN_VU 0x0200 /* IN_VU */ +#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */ +#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */ +#define ARIZONA_IN_VU_WIDTH 1 /* IN_VU */ +#define ARIZONA_IN2L_MUTE 0x0100 /* IN2L_MUTE */ +#define ARIZONA_IN2L_MUTE_MASK 0x0100 /* IN2L_MUTE */ +#define ARIZONA_IN2L_MUTE_SHIFT 8 /* IN2L_MUTE */ +#define ARIZONA_IN2L_MUTE_WIDTH 1 /* IN2L_MUTE */ +#define ARIZONA_IN2L_DIG_VOL_MASK 0x00FF /* IN2L_DIG_VOL - [7:0] */ +#define ARIZONA_IN2L_DIG_VOL_SHIFT 0 /* IN2L_DIG_VOL - [7:0] */ +#define ARIZONA_IN2L_DIG_VOL_WIDTH 8 /* IN2L_DIG_VOL - [7:0] */ + +/* + * R794 (0x31A) - DMIC2L Control + */ +#define ARIZONA_IN2_DMICL_DLY_MASK 0x003F /* IN2_DMICL_DLY - [5:0] */ +#define ARIZONA_IN2_DMICL_DLY_SHIFT 0 /* IN2_DMICL_DLY - [5:0] */ +#define ARIZONA_IN2_DMICL_DLY_WIDTH 6 /* IN2_DMICL_DLY - [5:0] */ + +/* + * R796 (0x31C) - IN2R Control + */ +#define ARIZONA_IN2R_HPF_MASK 0x8000 /* IN2R_HPF - [15] */ +#define ARIZONA_IN2R_HPF_SHIFT 15 /* IN2R_HPF - [15] */ +#define ARIZONA_IN2R_HPF_WIDTH 1 /* IN2R_HPF - [15] */ +#define ARIZONA_IN2R_PGA_VOL_MASK 0x00FE /* IN2R_PGA_VOL - [7:1] */ +#define ARIZONA_IN2R_PGA_VOL_SHIFT 1 /* IN2R_PGA_VOL - [7:1] */ +#define ARIZONA_IN2R_PGA_VOL_WIDTH 7 /* IN2R_PGA_VOL - [7:1] */ + +/* + * R797 (0x31D) - ADC Digital Volume 2R + */ +#define ARIZONA_IN_VU 0x0200 /* IN_VU */ +#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */ +#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */ +#define ARIZONA_IN_VU_WIDTH 1 /* IN_VU */ +#define ARIZONA_IN2R_MUTE 0x0100 /* IN2R_MUTE */ +#define ARIZONA_IN2R_MUTE_MASK 0x0100 /* IN2R_MUTE */ +#define ARIZONA_IN2R_MUTE_SHIFT 8 /* IN2R_MUTE */ +#define ARIZONA_IN2R_MUTE_WIDTH 1 /* IN2R_MUTE */ +#define ARIZONA_IN2R_DIG_VOL_MASK 0x00FF /* IN2R_DIG_VOL - [7:0] */ +#define ARIZONA_IN2R_DIG_VOL_SHIFT 0 /* IN2R_DIG_VOL - [7:0] */ +#define ARIZONA_IN2R_DIG_VOL_WIDTH 8 /* IN2R_DIG_VOL - [7:0] */ + +/* + * R798 (0x31E) - DMIC2R Control + */ +#define ARIZONA_IN2_DMICR_DLY_MASK 0x003F /* IN2_DMICR_DLY - [5:0] */ +#define ARIZONA_IN2_DMICR_DLY_SHIFT 0 /* IN2_DMICR_DLY - [5:0] */ +#define ARIZONA_IN2_DMICR_DLY_WIDTH 6 /* IN2_DMICR_DLY - [5:0] */ + +/* + * R800 (0x320) - IN3L Control + */ +#define ARIZONA_IN3L_HPF_MASK 0x8000 /* IN3L_HPF - [15] */ +#define ARIZONA_IN3L_HPF_SHIFT 15 /* IN3L_HPF - [15] */ +#define ARIZONA_IN3L_HPF_WIDTH 1 /* IN3L_HPF - [15] */ +#define ARIZONA_IN3_OSR_MASK 0x6000 /* IN3_OSR - [14:13] */ +#define ARIZONA_IN3_OSR_SHIFT 13 /* IN3_OSR - [14:13] */ +#define ARIZONA_IN3_OSR_WIDTH 2 /* IN3_OSR - [14:13] */ +#define ARIZONA_IN3_DMIC_SUP_MASK 0x1800 /* IN3_DMIC_SUP - [12:11] */ +#define ARIZONA_IN3_DMIC_SUP_SHIFT 11 /* IN3_DMIC_SUP - [12:11] */ +#define ARIZONA_IN3_DMIC_SUP_WIDTH 2 /* IN3_DMIC_SUP - [12:11] */ +#define ARIZONA_IN3_MODE_MASK 0x0400 /* IN3_MODE - [10] */ +#define ARIZONA_IN3_MODE_SHIFT 10 /* IN3_MODE - [10] */ +#define ARIZONA_IN3_MODE_WIDTH 1 /* IN3_MODE - [10] */ +#define ARIZONA_IN3_SINGLE_ENDED_MASK 0x0200 /* IN3_MODE - [9] */ +#define ARIZONA_IN3_SINGLE_ENDED_SHIFT 9 /* IN3_MODE - [9] */ +#define ARIZONA_IN3_SINGLE_ENDED_WIDTH 1 /* IN3_MODE - [9] */ +#define ARIZONA_IN3L_PGA_VOL_MASK 0x00FE /* IN3L_PGA_VOL - [7:1] */ +#define ARIZONA_IN3L_PGA_VOL_SHIFT 1 /* IN3L_PGA_VOL - [7:1] */ +#define ARIZONA_IN3L_PGA_VOL_WIDTH 7 /* IN3L_PGA_VOL - [7:1] */ + +/* + * R801 (0x321) - ADC Digital Volume 3L + */ +#define ARIZONA_IN_VU 0x0200 /* IN_VU */ +#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */ +#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */ +#define ARIZONA_IN_VU_WIDTH 1 /* IN_VU */ +#define ARIZONA_IN3L_MUTE 0x0100 /* IN3L_MUTE */ +#define ARIZONA_IN3L_MUTE_MASK 0x0100 /* IN3L_MUTE */ +#define ARIZONA_IN3L_MUTE_SHIFT 8 /* IN3L_MUTE */ +#define ARIZONA_IN3L_MUTE_WIDTH 1 /* IN3L_MUTE */ +#define ARIZONA_IN3L_DIG_VOL_MASK 0x00FF /* IN3L_DIG_VOL - [7:0] */ +#define ARIZONA_IN3L_DIG_VOL_SHIFT 0 /* IN3L_DIG_VOL - [7:0] */ +#define ARIZONA_IN3L_DIG_VOL_WIDTH 8 /* IN3L_DIG_VOL - [7:0] */ + +/* + * R802 (0x322) - DMIC3L Control + */ +#define ARIZONA_IN3_DMICL_DLY_MASK 0x003F /* IN3_DMICL_DLY - [5:0] */ +#define ARIZONA_IN3_DMICL_DLY_SHIFT 0 /* IN3_DMICL_DLY - [5:0] */ +#define ARIZONA_IN3_DMICL_DLY_WIDTH 6 /* IN3_DMICL_DLY - [5:0] */ + +/* + * R804 (0x324) - IN3R Control + */ +#define ARIZONA_IN3R_HPF_MASK 0x8000 /* IN3R_HPF - [15] */ +#define ARIZONA_IN3R_HPF_SHIFT 15 /* IN3R_HPF - [15] */ +#define ARIZONA_IN3R_HPF_WIDTH 1 /* IN3R_HPF - [15] */ +#define ARIZONA_IN3R_PGA_VOL_MASK 0x00FE /* IN3R_PGA_VOL - [7:1] */ +#define ARIZONA_IN3R_PGA_VOL_SHIFT 1 /* IN3R_PGA_VOL - [7:1] */ +#define ARIZONA_IN3R_PGA_VOL_WIDTH 7 /* IN3R_PGA_VOL - [7:1] */ + +/* + * R805 (0x325) - ADC Digital Volume 3R + */ +#define ARIZONA_IN_VU 0x0200 /* IN_VU */ +#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */ +#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */ +#define ARIZONA_IN_VU_WIDTH 1 /* IN_VU */ +#define ARIZONA_IN3R_MUTE 0x0100 /* IN3R_MUTE */ +#define ARIZONA_IN3R_MUTE_MASK 0x0100 /* IN3R_MUTE */ +#define ARIZONA_IN3R_MUTE_SHIFT 8 /* IN3R_MUTE */ +#define ARIZONA_IN3R_MUTE_WIDTH 1 /* IN3R_MUTE */ +#define ARIZONA_IN3R_DIG_VOL_MASK 0x00FF /* IN3R_DIG_VOL - [7:0] */ +#define ARIZONA_IN3R_DIG_VOL_SHIFT 0 /* IN3R_DIG_VOL - [7:0] */ +#define ARIZONA_IN3R_DIG_VOL_WIDTH 8 /* IN3R_DIG_VOL - [7:0] */ + +/* + * R806 (0x326) - DMIC3R Control + */ +#define ARIZONA_IN3_DMICR_DLY_MASK 0x003F /* IN3_DMICR_DLY - [5:0] */ +#define ARIZONA_IN3_DMICR_DLY_SHIFT 0 /* IN3_DMICR_DLY - [5:0] */ +#define ARIZONA_IN3_DMICR_DLY_WIDTH 6 /* IN3_DMICR_DLY - [5:0] */ + +/* + * R808 (0x328) - IN4 Control + */ +#define ARIZONA_IN4L_HPF_MASK 0x8000 /* IN4L_HPF - [15] */ +#define ARIZONA_IN4L_HPF_SHIFT 15 /* IN4L_HPF - [15] */ +#define ARIZONA_IN4L_HPF_WIDTH 1 /* IN4L_HPF - [15] */ +#define ARIZONA_IN4_OSR_MASK 0x6000 /* IN4_OSR - [14:13] */ +#define ARIZONA_IN4_OSR_SHIFT 13 /* IN4_OSR - [14:13] */ +#define ARIZONA_IN4_OSR_WIDTH 2 /* IN4_OSR - [14:13] */ +#define ARIZONA_IN4_DMIC_SUP_MASK 0x1800 /* IN4_DMIC_SUP - [12:11] */ +#define ARIZONA_IN4_DMIC_SUP_SHIFT 11 /* IN4_DMIC_SUP - [12:11] */ +#define ARIZONA_IN4_DMIC_SUP_WIDTH 2 /* IN4_DMIC_SUP - [12:11] */ + +/* + * R809 (0x329) - ADC Digital Volume 4L + */ +#define ARIZONA_IN_VU 0x0200 /* IN_VU */ +#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */ +#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */ +#define ARIZONA_IN_VU_WIDTH 1 /* IN_VU */ +#define ARIZONA_IN4L_MUTE 0x0100 /* IN4L_MUTE */ +#define ARIZONA_IN4L_MUTE_MASK 0x0100 /* IN4L_MUTE */ +#define ARIZONA_IN4L_MUTE_SHIFT 8 /* IN4L_MUTE */ +#define ARIZONA_IN4L_MUTE_WIDTH 1 /* IN4L_MUTE */ +#define ARIZONA_IN4L_DIG_VOL_MASK 0x00FF /* IN4L_DIG_VOL - [7:0] */ +#define ARIZONA_IN4L_DIG_VOL_SHIFT 0 /* IN4L_DIG_VOL - [7:0] */ +#define ARIZONA_IN4L_DIG_VOL_WIDTH 8 /* IN4L_DIG_VOL - [7:0] */ + +/* + * R810 (0x32A) - DMIC4L Control + */ +#define ARIZONA_IN4L_DMIC_DLY_MASK 0x003F /* IN4L_DMIC_DLY - [5:0] */ +#define ARIZONA_IN4L_DMIC_DLY_SHIFT 0 /* IN4L_DMIC_DLY - [5:0] */ +#define ARIZONA_IN4L_DMIC_DLY_WIDTH 6 /* IN4L_DMIC_DLY - [5:0] */ + +/* + * R812 (0x32C) - IN4R Control + */ +#define ARIZONA_IN4R_HPF_MASK 0x8000 /* IN4R_HPF - [15] */ +#define ARIZONA_IN4R_HPF_SHIFT 15 /* IN4R_HPF - [15] */ +#define ARIZONA_IN4R_HPF_WIDTH 1 /* IN4R_HPF - [15] */ + +/* + * R813 (0x32D) - ADC Digital Volume 4R + */ +#define ARIZONA_IN_VU 0x0200 /* IN_VU */ +#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */ +#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */ +#define ARIZONA_IN_VU_WIDTH 1 /* IN_VU */ +#define ARIZONA_IN4R_MUTE 0x0100 /* IN4R_MUTE */ +#define ARIZONA_IN4R_MUTE_MASK 0x0100 /* IN4R_MUTE */ +#define ARIZONA_IN4R_MUTE_SHIFT 8 /* IN4R_MUTE */ +#define ARIZONA_IN4R_MUTE_WIDTH 1 /* IN4R_MUTE */ +#define ARIZONA_IN4R_DIG_VOL_MASK 0x00FF /* IN4R_DIG_VOL - [7:0] */ +#define ARIZONA_IN4R_DIG_VOL_SHIFT 0 /* IN4R_DIG_VOL - [7:0] */ +#define ARIZONA_IN4R_DIG_VOL_WIDTH 8 /* IN4R_DIG_VOL - [7:0] */ + +/* + * R814 (0x32E) - DMIC4R Control + */ +#define ARIZONA_IN4R_DMIC_DLY_MASK 0x003F /* IN4R_DMIC_DLY - [5:0] */ +#define ARIZONA_IN4R_DMIC_DLY_SHIFT 0 /* IN4R_DMIC_DLY - [5:0] */ +#define ARIZONA_IN4R_DMIC_DLY_WIDTH 6 /* IN4R_DMIC_DLY - [5:0] */ + +/* + * R1024 (0x400) - Output Enables 1 + */ +#define ARIZONA_OUT6L_ENA 0x0800 /* OUT6L_ENA */ +#define ARIZONA_OUT6L_ENA_MASK 0x0800 /* OUT6L_ENA */ +#define ARIZONA_OUT6L_ENA_SHIFT 11 /* OUT6L_ENA */ +#define ARIZONA_OUT6L_ENA_WIDTH 1 /* OUT6L_ENA */ +#define ARIZONA_OUT6R_ENA 0x0400 /* OUT6R_ENA */ +#define ARIZONA_OUT6R_ENA_MASK 0x0400 /* OUT6R_ENA */ +#define ARIZONA_OUT6R_ENA_SHIFT 10 /* OUT6R_ENA */ +#define ARIZONA_OUT6R_ENA_WIDTH 1 /* OUT6R_ENA */ +#define ARIZONA_OUT5L_ENA 0x0200 /* OUT5L_ENA */ +#define ARIZONA_OUT5L_ENA_MASK 0x0200 /* OUT5L_ENA */ +#define ARIZONA_OUT5L_ENA_SHIFT 9 /* OUT5L_ENA */ +#define ARIZONA_OUT5L_ENA_WIDTH 1 /* OUT5L_ENA */ +#define ARIZONA_OUT5R_ENA 0x0100 /* OUT5R_ENA */ +#define ARIZONA_OUT5R_ENA_MASK 0x0100 /* OUT5R_ENA */ +#define ARIZONA_OUT5R_ENA_SHIFT 8 /* OUT5R_ENA */ +#define ARIZONA_OUT5R_ENA_WIDTH 1 /* OUT5R_ENA */ +#define ARIZONA_OUT4L_ENA 0x0080 /* OUT4L_ENA */ +#define ARIZONA_OUT4L_ENA_MASK 0x0080 /* OUT4L_ENA */ +#define ARIZONA_OUT4L_ENA_SHIFT 7 /* OUT4L_ENA */ +#define ARIZONA_OUT4L_ENA_WIDTH 1 /* OUT4L_ENA */ +#define ARIZONA_OUT4R_ENA 0x0040 /* OUT4R_ENA */ +#define ARIZONA_OUT4R_ENA_MASK 0x0040 /* OUT4R_ENA */ +#define ARIZONA_OUT4R_ENA_SHIFT 6 /* OUT4R_ENA */ +#define ARIZONA_OUT4R_ENA_WIDTH 1 /* OUT4R_ENA */ +#define ARIZONA_OUT3L_ENA 0x0020 /* OUT3L_ENA */ +#define ARIZONA_OUT3L_ENA_MASK 0x0020 /* OUT3L_ENA */ +#define ARIZONA_OUT3L_ENA_SHIFT 5 /* OUT3L_ENA */ +#define ARIZONA_OUT3L_ENA_WIDTH 1 /* OUT3L_ENA */ +#define ARIZONA_OUT3R_ENA 0x0010 /* OUT3R_ENA */ +#define ARIZONA_OUT3R_ENA_MASK 0x0010 /* OUT3R_ENA */ +#define ARIZONA_OUT3R_ENA_SHIFT 4 /* OUT3R_ENA */ +#define ARIZONA_OUT3R_ENA_WIDTH 1 /* OUT3R_ENA */ +#define ARIZONA_OUT2L_ENA 0x0008 /* OUT2L_ENA */ +#define ARIZONA_OUT2L_ENA_MASK 0x0008 /* OUT2L_ENA */ +#define ARIZONA_OUT2L_ENA_SHIFT 3 /* OUT2L_ENA */ +#define ARIZONA_OUT2L_ENA_WIDTH 1 /* OUT2L_ENA */ +#define ARIZONA_OUT2R_ENA 0x0004 /* OUT2R_ENA */ +#define ARIZONA_OUT2R_ENA_MASK 0x0004 /* OUT2R_ENA */ +#define ARIZONA_OUT2R_ENA_SHIFT 2 /* OUT2R_ENA */ +#define ARIZONA_OUT2R_ENA_WIDTH 1 /* OUT2R_ENA */ +#define ARIZONA_OUT1L_ENA 0x0002 /* OUT1L_ENA */ +#define ARIZONA_OUT1L_ENA_MASK 0x0002 /* OUT1L_ENA */ +#define ARIZONA_OUT1L_ENA_SHIFT 1 /* OUT1L_ENA */ +#define ARIZONA_OUT1L_ENA_WIDTH 1 /* OUT1L_ENA */ +#define ARIZONA_OUT1R_ENA 0x0001 /* OUT1R_ENA */ +#define ARIZONA_OUT1R_ENA_MASK 0x0001 /* OUT1R_ENA */ +#define ARIZONA_OUT1R_ENA_SHIFT 0 /* OUT1R_ENA */ +#define ARIZONA_OUT1R_ENA_WIDTH 1 /* OUT1R_ENA */ + +/* + * R1025 (0x401) - Output Status 1 + */ +#define ARIZONA_OUT6L_ENA_STS 0x0800 /* OUT6L_ENA_STS */ +#define ARIZONA_OUT6L_ENA_STS_MASK 0x0800 /* OUT6L_ENA_STS */ +#define ARIZONA_OUT6L_ENA_STS_SHIFT 11 /* OUT6L_ENA_STS */ +#define ARIZONA_OUT6L_ENA_STS_WIDTH 1 /* OUT6L_ENA_STS */ +#define ARIZONA_OUT6R_ENA_STS 0x0400 /* OUT6R_ENA_STS */ +#define ARIZONA_OUT6R_ENA_STS_MASK 0x0400 /* OUT6R_ENA_STS */ +#define ARIZONA_OUT6R_ENA_STS_SHIFT 10 /* OUT6R_ENA_STS */ +#define ARIZONA_OUT6R_ENA_STS_WIDTH 1 /* OUT6R_ENA_STS */ +#define ARIZONA_OUT5L_ENA_STS 0x0200 /* OUT5L_ENA_STS */ +#define ARIZONA_OUT5L_ENA_STS_MASK 0x0200 /* OUT5L_ENA_STS */ +#define ARIZONA_OUT5L_ENA_STS_SHIFT 9 /* OUT5L_ENA_STS */ +#define ARIZONA_OUT5L_ENA_STS_WIDTH 1 /* OUT5L_ENA_STS */ +#define ARIZONA_OUT5R_ENA_STS 0x0100 /* OUT5R_ENA_STS */ +#define ARIZONA_OUT5R_ENA_STS_MASK 0x0100 /* OUT5R_ENA_STS */ +#define ARIZONA_OUT5R_ENA_STS_SHIFT 8 /* OUT5R_ENA_STS */ +#define ARIZONA_OUT5R_ENA_STS_WIDTH 1 /* OUT5R_ENA_STS */ +#define ARIZONA_OUT4L_ENA_STS 0x0080 /* OUT4L_ENA_STS */ +#define ARIZONA_OUT4L_ENA_STS_MASK 0x0080 /* OUT4L_ENA_STS */ +#define ARIZONA_OUT4L_ENA_STS_SHIFT 7 /* OUT4L_ENA_STS */ +#define ARIZONA_OUT4L_ENA_STS_WIDTH 1 /* OUT4L_ENA_STS */ +#define ARIZONA_OUT4R_ENA_STS 0x0040 /* OUT4R_ENA_STS */ +#define ARIZONA_OUT4R_ENA_STS_MASK 0x0040 /* OUT4R_ENA_STS */ +#define ARIZONA_OUT4R_ENA_STS_SHIFT 6 /* OUT4R_ENA_STS */ +#define ARIZONA_OUT4R_ENA_STS_WIDTH 1 /* OUT4R_ENA_STS */ + +/* + * R1032 (0x408) - Output Rate 1 + */ +#define ARIZONA_OUT_RATE_MASK 0x7800 /* OUT_RATE - [14:11] */ +#define ARIZONA_OUT_RATE_SHIFT 11 /* OUT_RATE - [14:11] */ +#define ARIZONA_OUT_RATE_WIDTH 4 /* OUT_RATE - [14:11] */ + +/* + * R1033 (0x409) - Output Volume Ramp + */ +#define ARIZONA_OUT_VD_RAMP_MASK 0x0070 /* OUT_VD_RAMP - [6:4] */ +#define ARIZONA_OUT_VD_RAMP_SHIFT 4 /* OUT_VD_RAMP - [6:4] */ +#define ARIZONA_OUT_VD_RAMP_WIDTH 3 /* OUT_VD_RAMP - [6:4] */ +#define ARIZONA_OUT_VI_RAMP_MASK 0x0007 /* OUT_VI_RAMP - [2:0] */ +#define ARIZONA_OUT_VI_RAMP_SHIFT 0 /* OUT_VI_RAMP - [2:0] */ +#define ARIZONA_OUT_VI_RAMP_WIDTH 3 /* OUT_VI_RAMP - [2:0] */ + +/* + * R1040 (0x410) - Output Path Config 1L + */ +#define ARIZONA_OUT1_LP_MODE 0x8000 /* OUT1_LP_MODE */ +#define ARIZONA_OUT1_LP_MODE_MASK 0x8000 /* OUT1_LP_MODE */ +#define ARIZONA_OUT1_LP_MODE_SHIFT 15 /* OUT1_LP_MODE */ +#define ARIZONA_OUT1_LP_MODE_WIDTH 1 /* OUT1_LP_MODE */ +#define ARIZONA_OUT1_OSR 0x2000 /* OUT1_OSR */ +#define ARIZONA_OUT1_OSR_MASK 0x2000 /* OUT1_OSR */ +#define ARIZONA_OUT1_OSR_SHIFT 13 /* OUT1_OSR */ +#define ARIZONA_OUT1_OSR_WIDTH 1 /* OUT1_OSR */ +#define ARIZONA_OUT1_MONO 0x1000 /* OUT1_MONO */ +#define ARIZONA_OUT1_MONO_MASK 0x1000 /* OUT1_MONO */ +#define ARIZONA_OUT1_MONO_SHIFT 12 /* OUT1_MONO */ +#define ARIZONA_OUT1_MONO_WIDTH 1 /* OUT1_MONO */ +#define ARIZONA_OUT1L_ANC_SRC_MASK 0x0C00 /* OUT1L_ANC_SRC - [11:10] */ +#define ARIZONA_OUT1L_ANC_SRC_SHIFT 10 /* OUT1L_ANC_SRC - [11:10] */ +#define ARIZONA_OUT1L_ANC_SRC_WIDTH 2 /* OUT1L_ANC_SRC - [11:10] */ +#define ARIZONA_OUT1L_PGA_VOL_MASK 0x00FE /* OUT1L_PGA_VOL - [7:1] */ +#define ARIZONA_OUT1L_PGA_VOL_SHIFT 1 /* OUT1L_PGA_VOL - [7:1] */ +#define ARIZONA_OUT1L_PGA_VOL_WIDTH 7 /* OUT1L_PGA_VOL - [7:1] */ + +/* + * R1041 (0x411) - DAC Digital Volume 1L + */ +#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */ +#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */ +#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */ +#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */ +#define ARIZONA_OUT1L_MUTE 0x0100 /* OUT1L_MUTE */ +#define ARIZONA_OUT1L_MUTE_MASK 0x0100 /* OUT1L_MUTE */ +#define ARIZONA_OUT1L_MUTE_SHIFT 8 /* OUT1L_MUTE */ +#define ARIZONA_OUT1L_MUTE_WIDTH 1 /* OUT1L_MUTE */ +#define ARIZONA_OUT1L_VOL_MASK 0x00FF /* OUT1L_VOL - [7:0] */ +#define ARIZONA_OUT1L_VOL_SHIFT 0 /* OUT1L_VOL - [7:0] */ +#define ARIZONA_OUT1L_VOL_WIDTH 8 /* OUT1L_VOL - [7:0] */ + +/* + * R1042 (0x412) - DAC Volume Limit 1L + */ +#define ARIZONA_OUT1L_VOL_LIM_MASK 0x00FF /* OUT1L_VOL_LIM - [7:0] */ +#define ARIZONA_OUT1L_VOL_LIM_SHIFT 0 /* OUT1L_VOL_LIM - [7:0] */ +#define ARIZONA_OUT1L_VOL_LIM_WIDTH 8 /* OUT1L_VOL_LIM - [7:0] */ + +/* + * R1043 (0x413) - Noise Gate Select 1L + */ +#define ARIZONA_OUT1L_NGATE_SRC_MASK 0x0FFF /* OUT1L_NGATE_SRC - [11:0] */ +#define ARIZONA_OUT1L_NGATE_SRC_SHIFT 0 /* OUT1L_NGATE_SRC - [11:0] */ +#define ARIZONA_OUT1L_NGATE_SRC_WIDTH 12 /* OUT1L_NGATE_SRC - [11:0] */ + +/* + * R1044 (0x414) - Output Path Config 1R + */ +#define ARIZONA_OUT1R_ANC_SRC_MASK 0x0C00 /* OUT1R_ANC_SRC - [11:10] */ +#define ARIZONA_OUT1R_ANC_SRC_SHIFT 10 /* OUT1R_ANC_SRC - [11:10] */ +#define ARIZONA_OUT1R_ANC_SRC_WIDTH 2 /* OUT1R_ANC_SRC - [11:10] */ +#define ARIZONA_OUT1R_PGA_VOL_MASK 0x00FE /* OUT1R_PGA_VOL - [7:1] */ +#define ARIZONA_OUT1R_PGA_VOL_SHIFT 1 /* OUT1R_PGA_VOL - [7:1] */ +#define ARIZONA_OUT1R_PGA_VOL_WIDTH 7 /* OUT1R_PGA_VOL - [7:1] */ + +/* + * R1045 (0x415) - DAC Digital Volume 1R + */ +#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */ +#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */ +#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */ +#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */ +#define ARIZONA_OUT1R_MUTE 0x0100 /* OUT1R_MUTE */ +#define ARIZONA_OUT1R_MUTE_MASK 0x0100 /* OUT1R_MUTE */ +#define ARIZONA_OUT1R_MUTE_SHIFT 8 /* OUT1R_MUTE */ +#define ARIZONA_OUT1R_MUTE_WIDTH 1 /* OUT1R_MUTE */ +#define ARIZONA_OUT1R_VOL_MASK 0x00FF /* OUT1R_VOL - [7:0] */ +#define ARIZONA_OUT1R_VOL_SHIFT 0 /* OUT1R_VOL - [7:0] */ +#define ARIZONA_OUT1R_VOL_WIDTH 8 /* OUT1R_VOL - [7:0] */ + +/* + * R1046 (0x416) - DAC Volume Limit 1R + */ +#define ARIZONA_OUT1R_VOL_LIM_MASK 0x00FF /* OUT1R_VOL_LIM - [7:0] */ +#define ARIZONA_OUT1R_VOL_LIM_SHIFT 0 /* OUT1R_VOL_LIM - [7:0] */ +#define ARIZONA_OUT1R_VOL_LIM_WIDTH 8 /* OUT1R_VOL_LIM - [7:0] */ + +/* + * R1047 (0x417) - Noise Gate Select 1R + */ +#define ARIZONA_OUT1R_NGATE_SRC_MASK 0x0FFF /* OUT1R_NGATE_SRC - [11:0] */ +#define ARIZONA_OUT1R_NGATE_SRC_SHIFT 0 /* OUT1R_NGATE_SRC - [11:0] */ +#define ARIZONA_OUT1R_NGATE_SRC_WIDTH 12 /* OUT1R_NGATE_SRC - [11:0] */ + +/* + * R1048 (0x418) - Output Path Config 2L + */ +#define ARIZONA_OUT2_LP_MODE 0x8000 /* OUT2_LP_MODE */ +#define ARIZONA_OUT2_LP_MODE_MASK 0x8000 /* OUT2_LP_MODE */ +#define ARIZONA_OUT2_LP_MODE_SHIFT 15 /* OUT2_LP_MODE */ +#define ARIZONA_OUT2_LP_MODE_WIDTH 1 /* OUT2_LP_MODE */ +#define ARIZONA_OUT2_OSR 0x2000 /* OUT2_OSR */ +#define ARIZONA_OUT2_OSR_MASK 0x2000 /* OUT2_OSR */ +#define ARIZONA_OUT2_OSR_SHIFT 13 /* OUT2_OSR */ +#define ARIZONA_OUT2_OSR_WIDTH 1 /* OUT2_OSR */ +#define ARIZONA_OUT2_MONO 0x1000 /* OUT2_MONO */ +#define ARIZONA_OUT2_MONO_MASK 0x1000 /* OUT2_MONO */ +#define ARIZONA_OUT2_MONO_SHIFT 12 /* OUT2_MONO */ +#define ARIZONA_OUT2_MONO_WIDTH 1 /* OUT2_MONO */ +#define ARIZONA_OUT2L_ANC_SRC_MASK 0x0C00 /* OUT2L_ANC_SRC - [11:10] */ +#define ARIZONA_OUT2L_ANC_SRC_SHIFT 10 /* OUT2L_ANC_SRC - [11:10] */ +#define ARIZONA_OUT2L_ANC_SRC_WIDTH 2 /* OUT2L_ANC_SRC - [11:10] */ +#define ARIZONA_OUT2L_PGA_VOL_MASK 0x00FE /* OUT2L_PGA_VOL - [7:1] */ +#define ARIZONA_OUT2L_PGA_VOL_SHIFT 1 /* OUT2L_PGA_VOL - [7:1] */ +#define ARIZONA_OUT2L_PGA_VOL_WIDTH 7 /* OUT2L_PGA_VOL - [7:1] */ + +/* + * R1049 (0x419) - DAC Digital Volume 2L + */ +#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */ +#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */ +#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */ +#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */ +#define ARIZONA_OUT2L_MUTE 0x0100 /* OUT2L_MUTE */ +#define ARIZONA_OUT2L_MUTE_MASK 0x0100 /* OUT2L_MUTE */ +#define ARIZONA_OUT2L_MUTE_SHIFT 8 /* OUT2L_MUTE */ +#define ARIZONA_OUT2L_MUTE_WIDTH 1 /* OUT2L_MUTE */ +#define ARIZONA_OUT2L_VOL_MASK 0x00FF /* OUT2L_VOL - [7:0] */ +#define ARIZONA_OUT2L_VOL_SHIFT 0 /* OUT2L_VOL - [7:0] */ +#define ARIZONA_OUT2L_VOL_WIDTH 8 /* OUT2L_VOL - [7:0] */ + +/* + * R1050 (0x41A) - DAC Volume Limit 2L + */ +#define ARIZONA_OUT2L_VOL_LIM_MASK 0x00FF /* OUT2L_VOL_LIM - [7:0] */ +#define ARIZONA_OUT2L_VOL_LIM_SHIFT 0 /* OUT2L_VOL_LIM - [7:0] */ +#define ARIZONA_OUT2L_VOL_LIM_WIDTH 8 /* OUT2L_VOL_LIM - [7:0] */ + +/* + * R1051 (0x41B) - Noise Gate Select 2L + */ +#define ARIZONA_OUT2L_NGATE_SRC_MASK 0x0FFF /* OUT2L_NGATE_SRC - [11:0] */ +#define ARIZONA_OUT2L_NGATE_SRC_SHIFT 0 /* OUT2L_NGATE_SRC - [11:0] */ +#define ARIZONA_OUT2L_NGATE_SRC_WIDTH 12 /* OUT2L_NGATE_SRC - [11:0] */ + +/* + * R1052 (0x41C) - Output Path Config 2R + */ +#define ARIZONA_OUT2R_ANC_SRC_MASK 0x0C00 /* OUT2R_ANC_SRC - [11:10] */ +#define ARIZONA_OUT2R_ANC_SRC_SHIFT 10 /* OUT2R_ANC_SRC - [11:10] */ +#define ARIZONA_OUT2R_ANC_SRC_WIDTH 2 /* OUT2R_ANC_SRC - [11:10] */ +#define ARIZONA_OUT2R_PGA_VOL_MASK 0x00FE /* OUT2R_PGA_VOL - [7:1] */ +#define ARIZONA_OUT2R_PGA_VOL_SHIFT 1 /* OUT2R_PGA_VOL - [7:1] */ +#define ARIZONA_OUT2R_PGA_VOL_WIDTH 7 /* OUT2R_PGA_VOL - [7:1] */ + +/* + * R1053 (0x41D) - DAC Digital Volume 2R + */ +#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */ +#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */ +#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */ +#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */ +#define ARIZONA_OUT2R_MUTE 0x0100 /* OUT2R_MUTE */ +#define ARIZONA_OUT2R_MUTE_MASK 0x0100 /* OUT2R_MUTE */ +#define ARIZONA_OUT2R_MUTE_SHIFT 8 /* OUT2R_MUTE */ +#define ARIZONA_OUT2R_MUTE_WIDTH 1 /* OUT2R_MUTE */ +#define ARIZONA_OUT2R_VOL_MASK 0x00FF /* OUT2R_VOL - [7:0] */ +#define ARIZONA_OUT2R_VOL_SHIFT 0 /* OUT2R_VOL - [7:0] */ +#define ARIZONA_OUT2R_VOL_WIDTH 8 /* OUT2R_VOL - [7:0] */ + +/* + * R1054 (0x41E) - DAC Volume Limit 2R + */ +#define ARIZONA_OUT2R_VOL_LIM_MASK 0x00FF /* OUT2R_VOL_LIM - [7:0] */ +#define ARIZONA_OUT2R_VOL_LIM_SHIFT 0 /* OUT2R_VOL_LIM - [7:0] */ +#define ARIZONA_OUT2R_VOL_LIM_WIDTH 8 /* OUT2R_VOL_LIM - [7:0] */ + +/* + * R1055 (0x41F) - Noise Gate Select 2R + */ +#define ARIZONA_OUT2R_NGATE_SRC_MASK 0x0FFF /* OUT2R_NGATE_SRC - [11:0] */ +#define ARIZONA_OUT2R_NGATE_SRC_SHIFT 0 /* OUT2R_NGATE_SRC - [11:0] */ +#define ARIZONA_OUT2R_NGATE_SRC_WIDTH 12 /* OUT2R_NGATE_SRC - [11:0] */ + +/* + * R1056 (0x420) - Output Path Config 3L + */ +#define ARIZONA_OUT3_LP_MODE 0x8000 /* OUT3_LP_MODE */ +#define ARIZONA_OUT3_LP_MODE_MASK 0x8000 /* OUT3_LP_MODE */ +#define ARIZONA_OUT3_LP_MODE_SHIFT 15 /* OUT3_LP_MODE */ +#define ARIZONA_OUT3_LP_MODE_WIDTH 1 /* OUT3_LP_MODE */ +#define ARIZONA_OUT3_OSR 0x2000 /* OUT3_OSR */ +#define ARIZONA_OUT3_OSR_MASK 0x2000 /* OUT3_OSR */ +#define ARIZONA_OUT3_OSR_SHIFT 13 /* OUT3_OSR */ +#define ARIZONA_OUT3_OSR_WIDTH 1 /* OUT3_OSR */ +#define ARIZONA_OUT3_MONO 0x1000 /* OUT3_MONO */ +#define ARIZONA_OUT3_MONO_MASK 0x1000 /* OUT3_MONO */ +#define ARIZONA_OUT3_MONO_SHIFT 12 /* OUT3_MONO */ +#define ARIZONA_OUT3_MONO_WIDTH 1 /* OUT3_MONO */ +#define ARIZONA_OUT3L_ANC_SRC_MASK 0x0C00 /* OUT3L_ANC_SRC - [11:10] */ +#define ARIZONA_OUT3L_ANC_SRC_SHIFT 10 /* OUT3L_ANC_SRC - [11:10] */ +#define ARIZONA_OUT3L_ANC_SRC_WIDTH 2 /* OUT3L_ANC_SRC - [11:10] */ +#define ARIZONA_OUT3L_PGA_VOL_MASK 0x00FE /* OUT3L_PGA_VOL - [7:1] */ +#define ARIZONA_OUT3L_PGA_VOL_SHIFT 1 /* OUT3L_PGA_VOL - [7:1] */ +#define ARIZONA_OUT3L_PGA_VOL_WIDTH 7 /* OUT3L_PGA_VOL - [7:1] */ + +/* + * R1057 (0x421) - DAC Digital Volume 3L + */ +#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */ +#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */ +#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */ +#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */ +#define ARIZONA_OUT3L_MUTE 0x0100 /* OUT3L_MUTE */ +#define ARIZONA_OUT3L_MUTE_MASK 0x0100 /* OUT3L_MUTE */ +#define ARIZONA_OUT3L_MUTE_SHIFT 8 /* OUT3L_MUTE */ +#define ARIZONA_OUT3L_MUTE_WIDTH 1 /* OUT3L_MUTE */ +#define ARIZONA_OUT3L_VOL_MASK 0x00FF /* OUT3L_VOL - [7:0] */ +#define ARIZONA_OUT3L_VOL_SHIFT 0 /* OUT3L_VOL - [7:0] */ +#define ARIZONA_OUT3L_VOL_WIDTH 8 /* OUT3L_VOL - [7:0] */ + +/* + * R1058 (0x422) - DAC Volume Limit 3L + */ +#define ARIZONA_OUT3L_VOL_LIM_MASK 0x00FF /* OUT3L_VOL_LIM - [7:0] */ +#define ARIZONA_OUT3L_VOL_LIM_SHIFT 0 /* OUT3L_VOL_LIM - [7:0] */ +#define ARIZONA_OUT3L_VOL_LIM_WIDTH 8 /* OUT3L_VOL_LIM - [7:0] */ + +/* + * R1059 (0x423) - Noise Gate Select 3L + */ +#define ARIZONA_OUT3_NGATE_SRC_MASK 0x0FFF /* OUT3_NGATE_SRC - [11:0] */ +#define ARIZONA_OUT3_NGATE_SRC_SHIFT 0 /* OUT3_NGATE_SRC - [11:0] */ +#define ARIZONA_OUT3_NGATE_SRC_WIDTH 12 /* OUT3_NGATE_SRC - [11:0] */ + +/* + * R1060 (0x424) - Output Path Config 3R + */ +#define ARIZONA_OUT3R_PGA_VOL_MASK 0x00FE /* OUT3R_PGA_VOL - [7:1] */ +#define ARIZONA_OUT3R_PGA_VOL_SHIFT 1 /* OUT3R_PGA_VOL - [7:1] */ +#define ARIZONA_OUT3R_PGA_VOL_WIDTH 7 /* OUT3R_PGA_VOL - [7:1] */ + +/* + * R1061 (0x425) - DAC Digital Volume 3R + */ +#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */ +#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */ +#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */ +#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */ +#define ARIZONA_OUT3R_MUTE 0x0100 /* OUT3R_MUTE */ +#define ARIZONA_OUT3R_MUTE_MASK 0x0100 /* OUT3R_MUTE */ +#define ARIZONA_OUT3R_MUTE_SHIFT 8 /* OUT3R_MUTE */ +#define ARIZONA_OUT3R_MUTE_WIDTH 1 /* OUT3R_MUTE */ +#define ARIZONA_OUT3R_VOL_MASK 0x00FF /* OUT3R_VOL - [7:0] */ +#define ARIZONA_OUT3R_VOL_SHIFT 0 /* OUT3R_VOL - [7:0] */ +#define ARIZONA_OUT3R_VOL_WIDTH 8 /* OUT3R_VOL - [7:0] */ + +/* + * R1062 (0x426) - DAC Volume Limit 3R + */ +#define ARIZONA_OUT3R_ANC_SRC_MASK 0x0C00 /* OUT3R_ANC_SRC - [11:10] */ +#define ARIZONA_OUT3R_ANC_SRC_SHIFT 10 /* OUT3R_ANC_SRC - [11:10] */ +#define ARIZONA_OUT3R_ANC_SRC_WIDTH 2 /* OUT3R_ANC_SRC - [11:10] */ +#define ARIZONA_OUT3R_VOL_LIM_MASK 0x00FF /* OUT3R_VOL_LIM - [7:0] */ +#define ARIZONA_OUT3R_VOL_LIM_SHIFT 0 /* OUT3R_VOL_LIM - [7:0] */ +#define ARIZONA_OUT3R_VOL_LIM_WIDTH 8 /* OUT3R_VOL_LIM - [7:0] */ + +/* + * R1064 (0x428) - Output Path Config 4L + */ +#define ARIZONA_OUT4_OSR 0x2000 /* OUT4_OSR */ +#define ARIZONA_OUT4_OSR_MASK 0x2000 /* OUT4_OSR */ +#define ARIZONA_OUT4_OSR_SHIFT 13 /* OUT4_OSR */ +#define ARIZONA_OUT4_OSR_WIDTH 1 /* OUT4_OSR */ +#define ARIZONA_OUT4L_ANC_SRC_MASK 0x0C00 /* OUT4L_ANC_SRC - [11:10] */ +#define ARIZONA_OUT4L_ANC_SRC_SHIFT 10 /* OUT4L_ANC_SRC - [11:10] */ +#define ARIZONA_OUT4L_ANC_SRC_WIDTH 2 /* OUT4L_ANC_SRC - [11:10] */ + +/* + * R1065 (0x429) - DAC Digital Volume 4L + */ +#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */ +#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */ +#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */ +#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */ +#define ARIZONA_OUT4L_MUTE 0x0100 /* OUT4L_MUTE */ +#define ARIZONA_OUT4L_MUTE_MASK 0x0100 /* OUT4L_MUTE */ +#define ARIZONA_OUT4L_MUTE_SHIFT 8 /* OUT4L_MUTE */ +#define ARIZONA_OUT4L_MUTE_WIDTH 1 /* OUT4L_MUTE */ +#define ARIZONA_OUT4L_VOL_MASK 0x00FF /* OUT4L_VOL - [7:0] */ +#define ARIZONA_OUT4L_VOL_SHIFT 0 /* OUT4L_VOL - [7:0] */ +#define ARIZONA_OUT4L_VOL_WIDTH 8 /* OUT4L_VOL - [7:0] */ + +/* + * R1066 (0x42A) - Out Volume 4L + */ +#define ARIZONA_OUT4L_VOL_LIM_MASK 0x00FF /* OUT4L_VOL_LIM - [7:0] */ +#define ARIZONA_OUT4L_VOL_LIM_SHIFT 0 /* OUT4L_VOL_LIM - [7:0] */ +#define ARIZONA_OUT4L_VOL_LIM_WIDTH 8 /* OUT4L_VOL_LIM - [7:0] */ + +/* + * R1067 (0x42B) - Noise Gate Select 4L + */ +#define ARIZONA_OUT4L_NGATE_SRC_MASK 0x0FFF /* OUT4L_NGATE_SRC - [11:0] */ +#define ARIZONA_OUT4L_NGATE_SRC_SHIFT 0 /* OUT4L_NGATE_SRC - [11:0] */ +#define ARIZONA_OUT4L_NGATE_SRC_WIDTH 12 /* OUT4L_NGATE_SRC - [11:0] */ + +/* + * R1068 (0x42C) - Output Path Config 4R + */ +#define ARIZONA_OUT4R_ANC_SRC_MASK 0x0C00 /* OUT4R_ANC_SRC - [11:10] */ +#define ARIZONA_OUT4R_ANC_SRC_SHIFT 10 /* OUT4R_ANC_SRC - [11:10] */ +#define ARIZONA_OUT4R_ANC_SRC_WIDTH 2 /* OUT4R_ANC_SRC - [11:10] */ + +/* + * R1069 (0x42D) - DAC Digital Volume 4R + */ +#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */ +#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */ +#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */ +#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */ +#define ARIZONA_OUT4R_MUTE 0x0100 /* OUT4R_MUTE */ +#define ARIZONA_OUT4R_MUTE_MASK 0x0100 /* OUT4R_MUTE */ +#define ARIZONA_OUT4R_MUTE_SHIFT 8 /* OUT4R_MUTE */ +#define ARIZONA_OUT4R_MUTE_WIDTH 1 /* OUT4R_MUTE */ +#define ARIZONA_OUT4R_VOL_MASK 0x00FF /* OUT4R_VOL - [7:0] */ +#define ARIZONA_OUT4R_VOL_SHIFT 0 /* OUT4R_VOL - [7:0] */ +#define ARIZONA_OUT4R_VOL_WIDTH 8 /* OUT4R_VOL - [7:0] */ + +/* + * R1070 (0x42E) - Out Volume 4R + */ +#define ARIZONA_OUT4R_VOL_LIM_MASK 0x00FF /* OUT4R_VOL_LIM - [7:0] */ +#define ARIZONA_OUT4R_VOL_LIM_SHIFT 0 /* OUT4R_VOL_LIM - [7:0] */ +#define ARIZONA_OUT4R_VOL_LIM_WIDTH 8 /* OUT4R_VOL_LIM - [7:0] */ + +/* + * R1071 (0x42F) - Noise Gate Select 4R + */ +#define ARIZONA_OUT4R_NGATE_SRC_MASK 0x0FFF /* OUT4R_NGATE_SRC - [11:0] */ +#define ARIZONA_OUT4R_NGATE_SRC_SHIFT 0 /* OUT4R_NGATE_SRC - [11:0] */ +#define ARIZONA_OUT4R_NGATE_SRC_WIDTH 12 /* OUT4R_NGATE_SRC - [11:0] */ + +/* + * R1072 (0x430) - Output Path Config 5L + */ +#define ARIZONA_OUT5_OSR 0x2000 /* OUT5_OSR */ +#define ARIZONA_OUT5_OSR_MASK 0x2000 /* OUT5_OSR */ +#define ARIZONA_OUT5_OSR_SHIFT 13 /* OUT5_OSR */ +#define ARIZONA_OUT5_OSR_WIDTH 1 /* OUT5_OSR */ +#define ARIZONA_OUT5L_ANC_SRC_MASK 0x0C00 /* OUT5L_ANC_SRC - [11:10] */ +#define ARIZONA_OUT5L_ANC_SRC_SHIFT 10 /* OUT5L_ANC_SRC - [11:10] */ +#define ARIZONA_OUT5L_ANC_SRC_WIDTH 2 /* OUT5L_ANC_SRC - [11:10] */ + +/* + * R1073 (0x431) - DAC Digital Volume 5L + */ +#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */ +#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */ +#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */ +#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */ +#define ARIZONA_OUT5L_MUTE 0x0100 /* OUT5L_MUTE */ +#define ARIZONA_OUT5L_MUTE_MASK 0x0100 /* OUT5L_MUTE */ +#define ARIZONA_OUT5L_MUTE_SHIFT 8 /* OUT5L_MUTE */ +#define ARIZONA_OUT5L_MUTE_WIDTH 1 /* OUT5L_MUTE */ +#define ARIZONA_OUT5L_VOL_MASK 0x00FF /* OUT5L_VOL - [7:0] */ +#define ARIZONA_OUT5L_VOL_SHIFT 0 /* OUT5L_VOL - [7:0] */ +#define ARIZONA_OUT5L_VOL_WIDTH 8 /* OUT5L_VOL - [7:0] */ + +/* + * R1074 (0x432) - DAC Volume Limit 5L + */ +#define ARIZONA_OUT5L_VOL_LIM_MASK 0x00FF /* OUT5L_VOL_LIM - [7:0] */ +#define ARIZONA_OUT5L_VOL_LIM_SHIFT 0 /* OUT5L_VOL_LIM - [7:0] */ +#define ARIZONA_OUT5L_VOL_LIM_WIDTH 8 /* OUT5L_VOL_LIM - [7:0] */ + +/* + * R1075 (0x433) - Noise Gate Select 5L + */ +#define ARIZONA_OUT5L_NGATE_SRC_MASK 0x0FFF /* OUT5L_NGATE_SRC - [11:0] */ +#define ARIZONA_OUT5L_NGATE_SRC_SHIFT 0 /* OUT5L_NGATE_SRC - [11:0] */ +#define ARIZONA_OUT5L_NGATE_SRC_WIDTH 12 /* OUT5L_NGATE_SRC - [11:0] */ + +/* + * R1076 (0x434) - Output Path Config 5R + */ +#define ARIZONA_OUT5R_ANC_SRC_MASK 0x0C00 /* OUT5R_ANC_SRC - [11:10] */ +#define ARIZONA_OUT5R_ANC_SRC_SHIFT 10 /* OUT5R_ANC_SRC - [11:10] */ +#define ARIZONA_OUT5R_ANC_SRC_WIDTH 2 /* OUT5R_ANC_SRC - [11:10] */ + +/* + * R1077 (0x435) - DAC Digital Volume 5R + */ +#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */ +#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */ +#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */ +#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */ +#define ARIZONA_OUT5R_MUTE 0x0100 /* OUT5R_MUTE */ +#define ARIZONA_OUT5R_MUTE_MASK 0x0100 /* OUT5R_MUTE */ +#define ARIZONA_OUT5R_MUTE_SHIFT 8 /* OUT5R_MUTE */ +#define ARIZONA_OUT5R_MUTE_WIDTH 1 /* OUT5R_MUTE */ +#define ARIZONA_OUT5R_VOL_MASK 0x00FF /* OUT5R_VOL - [7:0] */ +#define ARIZONA_OUT5R_VOL_SHIFT 0 /* OUT5R_VOL - [7:0] */ +#define ARIZONA_OUT5R_VOL_WIDTH 8 /* OUT5R_VOL - [7:0] */ + +/* + * R1078 (0x436) - DAC Volume Limit 5R + */ +#define ARIZONA_OUT5R_VOL_LIM_MASK 0x00FF /* OUT5R_VOL_LIM - [7:0] */ +#define ARIZONA_OUT5R_VOL_LIM_SHIFT 0 /* OUT5R_VOL_LIM - [7:0] */ +#define ARIZONA_OUT5R_VOL_LIM_WIDTH 8 /* OUT5R_VOL_LIM - [7:0] */ + +/* + * R1079 (0x437) - Noise Gate Select 5R + */ +#define ARIZONA_OUT5R_NGATE_SRC_MASK 0x0FFF /* OUT5R_NGATE_SRC - [11:0] */ +#define ARIZONA_OUT5R_NGATE_SRC_SHIFT 0 /* OUT5R_NGATE_SRC - [11:0] */ +#define ARIZONA_OUT5R_NGATE_SRC_WIDTH 12 /* OUT5R_NGATE_SRC - [11:0] */ + +/* + * R1080 (0x438) - Output Path Config 6L + */ +#define ARIZONA_OUT6_OSR 0x2000 /* OUT6_OSR */ +#define ARIZONA_OUT6_OSR_MASK 0x2000 /* OUT6_OSR */ +#define ARIZONA_OUT6_OSR_SHIFT 13 /* OUT6_OSR */ +#define ARIZONA_OUT6_OSR_WIDTH 1 /* OUT6_OSR */ +#define ARIZONA_OUT6L_ANC_SRC_MASK 0x0C00 /* OUT6L_ANC_SRC - [11:10] */ +#define ARIZONA_OUT6L_ANC_SRC_SHIFT 10 /* OUT6L_ANC_SRC - [11:10] */ +#define ARIZONA_OUT6L_ANC_SRC_WIDTH 2 /* OUT6L_ANC_SRC - [11:10] */ + +/* + * R1081 (0x439) - DAC Digital Volume 6L + */ +#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */ +#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */ +#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */ +#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */ +#define ARIZONA_OUT6L_MUTE 0x0100 /* OUT6L_MUTE */ +#define ARIZONA_OUT6L_MUTE_MASK 0x0100 /* OUT6L_MUTE */ +#define ARIZONA_OUT6L_MUTE_SHIFT 8 /* OUT6L_MUTE */ +#define ARIZONA_OUT6L_MUTE_WIDTH 1 /* OUT6L_MUTE */ +#define ARIZONA_OUT6L_VOL_MASK 0x00FF /* OUT6L_VOL - [7:0] */ +#define ARIZONA_OUT6L_VOL_SHIFT 0 /* OUT6L_VOL - [7:0] */ +#define ARIZONA_OUT6L_VOL_WIDTH 8 /* OUT6L_VOL - [7:0] */ + +/* + * R1082 (0x43A) - DAC Volume Limit 6L + */ +#define ARIZONA_OUT6L_VOL_LIM_MASK 0x00FF /* OUT6L_VOL_LIM - [7:0] */ +#define ARIZONA_OUT6L_VOL_LIM_SHIFT 0 /* OUT6L_VOL_LIM - [7:0] */ +#define ARIZONA_OUT6L_VOL_LIM_WIDTH 8 /* OUT6L_VOL_LIM - [7:0] */ + +/* + * R1083 (0x43B) - Noise Gate Select 6L + */ +#define ARIZONA_OUT6L_NGATE_SRC_MASK 0x0FFF /* OUT6L_NGATE_SRC - [11:0] */ +#define ARIZONA_OUT6L_NGATE_SRC_SHIFT 0 /* OUT6L_NGATE_SRC - [11:0] */ +#define ARIZONA_OUT6L_NGATE_SRC_WIDTH 12 /* OUT6L_NGATE_SRC - [11:0] */ + +/* + * R1084 (0x43C) - Output Path Config 6R + */ +#define ARIZONA_OUT6R_ANC_SRC_MASK 0x0C00 /* OUT6R_ANC_SRC - [11:10] */ +#define ARIZONA_OUT6R_ANC_SRC_SHIFT 10 /* OUT6R_ANC_SRC - [11:10] */ +#define ARIZONA_OUT6R_ANC_SRC_WIDTH 2 /* OUT6R_ANC_SRC - [11:10] */ + +/* + * R1085 (0x43D) - DAC Digital Volume 6R + */ +#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */ +#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */ +#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */ +#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */ +#define ARIZONA_OUT6R_MUTE 0x0100 /* OUT6R_MUTE */ +#define ARIZONA_OUT6R_MUTE_MASK 0x0100 /* OUT6R_MUTE */ +#define ARIZONA_OUT6R_MUTE_SHIFT 8 /* OUT6R_MUTE */ +#define ARIZONA_OUT6R_MUTE_WIDTH 1 /* OUT6R_MUTE */ +#define ARIZONA_OUT6R_VOL_MASK 0x00FF /* OUT6R_VOL - [7:0] */ +#define ARIZONA_OUT6R_VOL_SHIFT 0 /* OUT6R_VOL - [7:0] */ +#define ARIZONA_OUT6R_VOL_WIDTH 8 /* OUT6R_VOL - [7:0] */ + +/* + * R1086 (0x43E) - DAC Volume Limit 6R + */ +#define ARIZONA_OUT6R_VOL_LIM_MASK 0x00FF /* OUT6R_VOL_LIM - [7:0] */ +#define ARIZONA_OUT6R_VOL_LIM_SHIFT 0 /* OUT6R_VOL_LIM - [7:0] */ +#define ARIZONA_OUT6R_VOL_LIM_WIDTH 8 /* OUT6R_VOL_LIM - [7:0] */ + +/* + * R1087 (0x43F) - Noise Gate Select 6R + */ +#define ARIZONA_OUT6R_NGATE_SRC_MASK 0x0FFF /* OUT6R_NGATE_SRC - [11:0] */ +#define ARIZONA_OUT6R_NGATE_SRC_SHIFT 0 /* OUT6R_NGATE_SRC - [11:0] */ +#define ARIZONA_OUT6R_NGATE_SRC_WIDTH 12 /* OUT6R_NGATE_SRC - [11:0] */ + +/* + * R1088 (0x440) - DRE Enable + */ +#define ARIZONA_DRE3R_ENA 0x0020 /* DRE3R_ENA */ +#define ARIZONA_DRE3R_ENA_MASK 0x0020 /* DRE3R_ENA */ +#define ARIZONA_DRE3R_ENA_SHIFT 5 /* DRE3R_ENA */ +#define ARIZONA_DRE3R_ENA_WIDTH 1 /* DRE3R_ENA */ +#define ARIZONA_DRE3L_ENA 0x0010 /* DRE3L_ENA */ +#define ARIZONA_DRE3L_ENA_MASK 0x0010 /* DRE3L_ENA */ +#define ARIZONA_DRE3L_ENA_SHIFT 4 /* DRE3L_ENA */ +#define ARIZONA_DRE3L_ENA_WIDTH 1 /* DRE3L_ENA */ +#define ARIZONA_DRE2R_ENA 0x0008 /* DRE2R_ENA */ +#define ARIZONA_DRE2R_ENA_MASK 0x0008 /* DRE2R_ENA */ +#define ARIZONA_DRE2R_ENA_SHIFT 3 /* DRE2R_ENA */ +#define ARIZONA_DRE2R_ENA_WIDTH 1 /* DRE2R_ENA */ +#define ARIZONA_DRE2L_ENA 0x0004 /* DRE2L_ENA */ +#define ARIZONA_DRE2L_ENA_MASK 0x0004 /* DRE2L_ENA */ +#define ARIZONA_DRE2L_ENA_SHIFT 2 /* DRE2L_ENA */ +#define ARIZONA_DRE2L_ENA_WIDTH 1 /* DRE2L_ENA */ +#define ARIZONA_DRE1R_ENA 0x0002 /* DRE1R_ENA */ +#define ARIZONA_DRE1R_ENA_MASK 0x0002 /* DRE1R_ENA */ +#define ARIZONA_DRE1R_ENA_SHIFT 1 /* DRE1R_ENA */ +#define ARIZONA_DRE1R_ENA_WIDTH 1 /* DRE1R_ENA */ +#define ARIZONA_DRE1L_ENA 0x0001 /* DRE1L_ENA */ +#define ARIZONA_DRE1L_ENA_MASK 0x0001 /* DRE1L_ENA */ +#define ARIZONA_DRE1L_ENA_SHIFT 0 /* DRE1L_ENA */ +#define ARIZONA_DRE1L_ENA_WIDTH 1 /* DRE1L_ENA */ + +/* + * R1088 (0x440) - DRE Enable (WM8998) + */ +#define WM8998_DRE3L_ENA 0x0020 /* DRE3L_ENA */ +#define WM8998_DRE3L_ENA_MASK 0x0020 /* DRE3L_ENA */ +#define WM8998_DRE3L_ENA_SHIFT 5 /* DRE3L_ENA */ +#define WM8998_DRE3L_ENA_WIDTH 1 /* DRE3L_ENA */ +#define WM8998_DRE2L_ENA 0x0008 /* DRE2L_ENA */ +#define WM8998_DRE2L_ENA_MASK 0x0008 /* DRE2L_ENA */ +#define WM8998_DRE2L_ENA_SHIFT 3 /* DRE2L_ENA */ +#define WM8998_DRE2L_ENA_WIDTH 1 /* DRE2L_ENA */ +#define WM8998_DRE2R_ENA 0x0004 /* DRE2R_ENA */ +#define WM8998_DRE2R_ENA_MASK 0x0004 /* DRE2R_ENA */ +#define WM8998_DRE2R_ENA_SHIFT 2 /* DRE2R_ENA */ +#define WM8998_DRE2R_ENA_WIDTH 1 /* DRE2R_ENA */ +#define WM8998_DRE1L_ENA 0x0002 /* DRE1L_ENA */ +#define WM8998_DRE1L_ENA_MASK 0x0002 /* DRE1L_ENA */ +#define WM8998_DRE1L_ENA_SHIFT 1 /* DRE1L_ENA */ +#define WM8998_DRE1L_ENA_WIDTH 1 /* DRE1L_ENA */ +#define WM8998_DRE1R_ENA 0x0001 /* DRE1R_ENA */ +#define WM8998_DRE1R_ENA_MASK 0x0001 /* DRE1R_ENA */ +#define WM8998_DRE1R_ENA_SHIFT 0 /* DRE1R_ENA */ +#define WM8998_DRE1R_ENA_WIDTH 1 /* DRE1R_ENA */ + +/* + * R1089 (0x441) - DRE Control 1 + */ +#define ARIZONA_DRE_ENV_TC_FAST_MASK 0x0F00 /* DRE_ENV_TC_FAST - [11:8] */ +#define ARIZONA_DRE_ENV_TC_FAST_SHIFT 8 /* DRE_ENV_TC_FAST - [11:8] */ +#define ARIZONA_DRE_ENV_TC_FAST_WIDTH 4 /* DRE_ENV_TC_FAST - [11:8] */ + +/* + * R1090 (0x442) - DRE Control 2 + */ +#define ARIZONA_DRE_T_LOW_MASK 0x3F00 /* DRE_T_LOW - [13:8] */ +#define ARIZONA_DRE_T_LOW_SHIFT 8 /* DRE_T_LOW - [13:8] */ +#define ARIZONA_DRE_T_LOW_WIDTH 6 /* DRE_T_LOW - [13:8] */ +#define ARIZONA_DRE_ALOG_VOL_DELAY_MASK 0x000F /* DRE_ALOG_VOL_DELAY - [3:0] */ +#define ARIZONA_DRE_ALOG_VOL_DELAY_SHIFT 0 /* DRE_ALOG_VOL_DELAY - [3:0] */ +#define ARIZONA_DRE_ALOG_VOL_DELAY_WIDTH 4 /* DRE_ALOG_VOL_DELAY - [3:0] */ + +/* + * R1091 (0x443) - DRE Control 3 + */ +#define ARIZONA_DRE_GAIN_SHIFT_MASK 0xC000 /* DRE_GAIN_SHIFT - [15:14] */ +#define ARIZONA_DRE_GAIN_SHIFT_SHIFT 14 /* DRE_GAIN_SHIFT - [15:14] */ +#define ARIZONA_DRE_GAIN_SHIFT_WIDTH 2 /* DRE_GAIN_SHIFT - [15:14] */ +#define ARIZONA_DRE_LOW_LEVEL_ABS_MASK 0x000F /* LOW_LEVEL_ABS - [3:0] */ +#define ARIZONA_DRE_LOW_LEVEL_ABS_SHIFT 0 /* LOW_LEVEL_ABS - [3:0] */ +#define ARIZONA_DRE_LOW_LEVEL_ABS_WIDTH 4 /* LOW_LEVEL_ABS - [3:0] */ + +/* R486 (0x448) - EDRE_Enable + */ +#define ARIZONA_EDRE_OUT4L_THR2_ENA 0x0200 /* EDRE_OUT4L_THR2_ENA */ +#define ARIZONA_EDRE_OUT4L_THR2_ENA_MASK 0x0200 /* EDRE_OUT4L_THR2_ENA */ +#define ARIZONA_EDRE_OUT4L_THR2_ENA_SHIFT 9 /* EDRE_OUT4L_THR2_ENA */ +#define ARIZONA_EDRE_OUT4L_THR2_ENA_WIDTH 1 /* EDRE_OUT4L_THR2_ENA */ +#define ARIZONA_EDRE_OUT4R_THR2_ENA 0x0100 /* EDRE_OUT4R_THR2_ENA */ +#define ARIZONA_EDRE_OUT4R_THR2_ENA_MASK 0x0100 /* EDRE_OUT4R_THR2_ENA */ +#define ARIZONA_EDRE_OUT4R_THR2_ENA_SHIFT 8 /* EDRE_OUT4R_THR2_ENA */ +#define ARIZONA_EDRE_OUT4R_THR2_ENA_WIDTH 1 /* EDRE_OUT4R_THR2_ENA */ +#define ARIZONA_EDRE_OUT4L_THR1_ENA 0x0080 /* EDRE_OUT4L_THR1_ENA */ +#define ARIZONA_EDRE_OUT4L_THR1_ENA_MASK 0x0080 /* EDRE_OUT4L_THR1_ENA */ +#define ARIZONA_EDRE_OUT4L_THR1_ENA_SHIFT 7 /* EDRE_OUT4L_THR1_ENA */ +#define ARIZONA_EDRE_OUT4L_THR1_ENA_WIDTH 1 /* EDRE_OUT4L_THR1_ENA */ +#define ARIZONA_EDRE_OUT4R_THR1_ENA 0x0040 /* EDRE_OUT4R_THR1_ENA */ +#define ARIZONA_EDRE_OUT4R_THR1_ENA_MASK 0x0040 /* EDRE_OUT4R_THR1_ENA */ +#define ARIZONA_EDRE_OUT4R_THR1_ENA_SHIFT 6 /* EDRE_OUT4R_THR1_ENA */ +#define ARIZONA_EDRE_OUT4R_THR1_ENA_WIDTH 1 /* EDRE_OUT4R_THR1_ENA */ +#define ARIZONA_EDRE_OUT3L_THR1_ENA 0x0020 /* EDRE_OUT3L_THR1_ENA */ +#define ARIZONA_EDRE_OUT3L_THR1_ENA_MASK 0x0020 /* EDRE_OUT3L_THR1_ENA */ +#define ARIZONA_EDRE_OUT3L_THR1_ENA_SHIFT 5 /* EDRE_OUT3L_THR1_ENA */ +#define ARIZONA_EDRE_OUT3L_THR1_ENA_WIDTH 1 /* EDRE_OUT3L_THR1_ENA */ +#define ARIZONA_EDRE_OUT3R_THR1_ENA 0x0010 /* EDRE_OUT3R_THR1_ENA */ +#define ARIZONA_EDRE_OUT3R_THR1_ENA_MASK 0x0010 /* EDRE_OUT3R_THR1_ENA */ +#define ARIZONA_EDRE_OUT3R_THR1_ENA_SHIFT 4 /* EDRE_OUT3R_THR1_ENA */ +#define ARIZONA_EDRE_OUT3R_THR1_ENA_WIDTH 1 /* EDRE_OUT3R_THR1_ENA */ +#define ARIZONA_EDRE_OUT2L_THR1_ENA 0x0008 /* EDRE_OUT2L_THR1_ENA */ +#define ARIZONA_EDRE_OUT2L_THR1_ENA_MASK 0x0008 /* EDRE_OUT2L_THR1_ENA */ +#define ARIZONA_EDRE_OUT2L_THR1_ENA_SHIFT 3 /* EDRE_OUT2L_THR1_ENA */ +#define ARIZONA_EDRE_OUT2L_THR1_ENA_WIDTH 1 /* EDRE_OUT2L_THR1_ENA */ +#define ARIZONA_EDRE_OUT2R_THR1_ENA 0x0004 /* EDRE_OUT2R_THR1_ENA */ +#define ARIZONA_EDRE_OUT2R_THR1_ENA_MASK 0x0004 /* EDRE_OUT2R_THR1_ENA */ +#define ARIZONA_EDRE_OUT2R_THR1_ENA_SHIFT 2 /* EDRE_OUT2R_THR1_ENA */ +#define ARIZONA_EDRE_OUT2R_THR1_ENA_WIDTH 1 /* EDRE_OUT2R_THR1_ENA */ +#define ARIZONA_EDRE_OUT1L_THR1_ENA 0x0002 /* EDRE_OUT1L_THR1_ENA */ +#define ARIZONA_EDRE_OUT1L_THR1_ENA_MASK 0x0002 /* EDRE_OUT1L_THR1_ENA */ +#define ARIZONA_EDRE_OUT1L_THR1_ENA_SHIFT 1 /* EDRE_OUT1L_THR1_ENA */ +#define ARIZONA_EDRE_OUT1L_THR1_ENA_WIDTH 1 /* EDRE_OUT1L_THR1_ENA */ +#define ARIZONA_EDRE_OUT1R_THR1_ENA 0x0001 /* EDRE_OUT1R_THR1_ENA */ +#define ARIZONA_EDRE_OUT1R_THR1_ENA_MASK 0x0001 /* EDRE_OUT1R_THR1_ENA */ +#define ARIZONA_EDRE_OUT1R_THR1_ENA_SHIFT 0 /* EDRE_OUT1R_THR1_ENA */ +#define ARIZONA_EDRE_OUT1R_THR1_ENA_WIDTH 1 /* EDRE_OUT1R_THR1_ENA */ + +/* + * R1104 (0x450) - DAC AEC Control 1 + */ +#define ARIZONA_AEC_LOOPBACK_SRC_MASK 0x003C /* AEC_LOOPBACK_SRC - [5:2] */ +#define ARIZONA_AEC_LOOPBACK_SRC_SHIFT 2 /* AEC_LOOPBACK_SRC - [5:2] */ +#define ARIZONA_AEC_LOOPBACK_SRC_WIDTH 4 /* AEC_LOOPBACK_SRC - [5:2] */ +#define ARIZONA_AEC_ENA_STS 0x0002 /* AEC_ENA_STS */ +#define ARIZONA_AEC_ENA_STS_MASK 0x0002 /* AEC_ENA_STS */ +#define ARIZONA_AEC_ENA_STS_SHIFT 1 /* AEC_ENA_STS */ +#define ARIZONA_AEC_ENA_STS_WIDTH 1 /* AEC_ENA_STS */ +#define ARIZONA_AEC_LOOPBACK_ENA 0x0001 /* AEC_LOOPBACK_ENA */ +#define ARIZONA_AEC_LOOPBACK_ENA_MASK 0x0001 /* AEC_LOOPBACK_ENA */ +#define ARIZONA_AEC_LOOPBACK_ENA_SHIFT 0 /* AEC_LOOPBACK_ENA */ +#define ARIZONA_AEC_LOOPBACK_ENA_WIDTH 1 /* AEC_LOOPBACK_ENA */ + +/* + * R1112 (0x458) - Noise Gate Control + */ +#define ARIZONA_NGATE_HOLD_MASK 0x0030 /* NGATE_HOLD - [5:4] */ +#define ARIZONA_NGATE_HOLD_SHIFT 4 /* NGATE_HOLD - [5:4] */ +#define ARIZONA_NGATE_HOLD_WIDTH 2 /* NGATE_HOLD - [5:4] */ +#define ARIZONA_NGATE_THR_MASK 0x000E /* NGATE_THR - [3:1] */ +#define ARIZONA_NGATE_THR_SHIFT 1 /* NGATE_THR - [3:1] */ +#define ARIZONA_NGATE_THR_WIDTH 3 /* NGATE_THR - [3:1] */ +#define ARIZONA_NGATE_ENA 0x0001 /* NGATE_ENA */ +#define ARIZONA_NGATE_ENA_MASK 0x0001 /* NGATE_ENA */ +#define ARIZONA_NGATE_ENA_SHIFT 0 /* NGATE_ENA */ +#define ARIZONA_NGATE_ENA_WIDTH 1 /* NGATE_ENA */ + +/* + * R1168 (0x490) - PDM SPK1 CTRL 1 + */ +#define ARIZONA_SPK1R_MUTE 0x2000 /* SPK1R_MUTE */ +#define ARIZONA_SPK1R_MUTE_MASK 0x2000 /* SPK1R_MUTE */ +#define ARIZONA_SPK1R_MUTE_SHIFT 13 /* SPK1R_MUTE */ +#define ARIZONA_SPK1R_MUTE_WIDTH 1 /* SPK1R_MUTE */ +#define ARIZONA_SPK1L_MUTE 0x1000 /* SPK1L_MUTE */ +#define ARIZONA_SPK1L_MUTE_MASK 0x1000 /* SPK1L_MUTE */ +#define ARIZONA_SPK1L_MUTE_SHIFT 12 /* SPK1L_MUTE */ +#define ARIZONA_SPK1L_MUTE_WIDTH 1 /* SPK1L_MUTE */ +#define ARIZONA_SPK1_MUTE_ENDIAN 0x0100 /* SPK1_MUTE_ENDIAN */ +#define ARIZONA_SPK1_MUTE_ENDIAN_MASK 0x0100 /* SPK1_MUTE_ENDIAN */ +#define ARIZONA_SPK1_MUTE_ENDIAN_SHIFT 8 /* SPK1_MUTE_ENDIAN */ +#define ARIZONA_SPK1_MUTE_ENDIAN_WIDTH 1 /* SPK1_MUTE_ENDIAN */ +#define ARIZONA_SPK1_MUTE_SEQ1_MASK 0x00FF /* SPK1_MUTE_SEQ1 - [7:0] */ +#define ARIZONA_SPK1_MUTE_SEQ1_SHIFT 0 /* SPK1_MUTE_SEQ1 - [7:0] */ +#define ARIZONA_SPK1_MUTE_SEQ1_WIDTH 8 /* SPK1_MUTE_SEQ1 - [7:0] */ + +/* + * R1169 (0x491) - PDM SPK1 CTRL 2 + */ +#define ARIZONA_SPK1_FMT 0x0001 /* SPK1_FMT */ +#define ARIZONA_SPK1_FMT_MASK 0x0001 /* SPK1_FMT */ +#define ARIZONA_SPK1_FMT_SHIFT 0 /* SPK1_FMT */ +#define ARIZONA_SPK1_FMT_WIDTH 1 /* SPK1_FMT */ + +/* + * R1170 (0x492) - PDM SPK2 CTRL 1 + */ +#define ARIZONA_SPK2R_MUTE 0x2000 /* SPK2R_MUTE */ +#define ARIZONA_SPK2R_MUTE_MASK 0x2000 /* SPK2R_MUTE */ +#define ARIZONA_SPK2R_MUTE_SHIFT 13 /* SPK2R_MUTE */ +#define ARIZONA_SPK2R_MUTE_WIDTH 1 /* SPK2R_MUTE */ +#define ARIZONA_SPK2L_MUTE 0x1000 /* SPK2L_MUTE */ +#define ARIZONA_SPK2L_MUTE_MASK 0x1000 /* SPK2L_MUTE */ +#define ARIZONA_SPK2L_MUTE_SHIFT 12 /* SPK2L_MUTE */ +#define ARIZONA_SPK2L_MUTE_WIDTH 1 /* SPK2L_MUTE */ +#define ARIZONA_SPK2_MUTE_ENDIAN 0x0100 /* SPK2_MUTE_ENDIAN */ +#define ARIZONA_SPK2_MUTE_ENDIAN_MASK 0x0100 /* SPK2_MUTE_ENDIAN */ +#define ARIZONA_SPK2_MUTE_ENDIAN_SHIFT 8 /* SPK2_MUTE_ENDIAN */ +#define ARIZONA_SPK2_MUTE_ENDIAN_WIDTH 1 /* SPK2_MUTE_ENDIAN */ +#define ARIZONA_SPK2_MUTE_SEQ_MASK 0x00FF /* SPK2_MUTE_SEQ - [7:0] */ +#define ARIZONA_SPK2_MUTE_SEQ_SHIFT 0 /* SPK2_MUTE_SEQ - [7:0] */ +#define ARIZONA_SPK2_MUTE_SEQ_WIDTH 8 /* SPK2_MUTE_SEQ - [7:0] */ + +/* + * R1171 (0x493) - PDM SPK2 CTRL 2 + */ +#define ARIZONA_SPK2_FMT 0x0001 /* SPK2_FMT */ +#define ARIZONA_SPK2_FMT_MASK 0x0001 /* SPK2_FMT */ +#define ARIZONA_SPK2_FMT_SHIFT 0 /* SPK2_FMT */ +#define ARIZONA_SPK2_FMT_WIDTH 1 /* SPK2_FMT */ + +/* + * R1184 (0x4A0) - HP1 Short Circuit Ctrl + */ +#define ARIZONA_HP1_SC_ENA 0x1000 /* HP1_SC_ENA */ +#define ARIZONA_HP1_SC_ENA_MASK 0x1000 /* HP1_SC_ENA */ +#define ARIZONA_HP1_SC_ENA_SHIFT 12 /* HP1_SC_ENA */ +#define ARIZONA_HP1_SC_ENA_WIDTH 1 /* HP1_SC_ENA */ + +/* + * R1185 (0x4A1) - HP2 Short Circuit Ctrl + */ +#define ARIZONA_HP2_SC_ENA 0x1000 /* HP2_SC_ENA */ +#define ARIZONA_HP2_SC_ENA_MASK 0x1000 /* HP2_SC_ENA */ +#define ARIZONA_HP2_SC_ENA_SHIFT 12 /* HP2_SC_ENA */ +#define ARIZONA_HP2_SC_ENA_WIDTH 1 /* HP2_SC_ENA */ + +/* + * R1186 (0x4A2) - HP3 Short Circuit Ctrl + */ +#define ARIZONA_HP3_SC_ENA 0x1000 /* HP3_SC_ENA */ +#define ARIZONA_HP3_SC_ENA_MASK 0x1000 /* HP3_SC_ENA */ +#define ARIZONA_HP3_SC_ENA_SHIFT 12 /* HP3_SC_ENA */ +#define ARIZONA_HP3_SC_ENA_WIDTH 1 /* HP3_SC_ENA */ + +/* + * R1188 (0x4A4) HP Test Ctrl 1 + */ +#define ARIZONA_HP1_TST_CAP_SEL_MASK 0x0003 /* HP1_TST_CAP_SEL - [1:0] */ +#define ARIZONA_HP1_TST_CAP_SEL_SHIFT 0 /* HP1_TST_CAP_SEL - [1:0] */ +#define ARIZONA_HP1_TST_CAP_SEL_WIDTH 2 /* HP1_TST_CAP_SEL - [1:0] */ + +/* + * R1244 (0x4DC) - DAC comp 1 + */ +#define ARIZONA_OUT_COMP_COEFF_MASK 0xFFFF /* OUT_COMP_COEFF - [15:0] */ +#define ARIZONA_OUT_COMP_COEFF_SHIFT 0 /* OUT_COMP_COEFF - [15:0] */ +#define ARIZONA_OUT_COMP_COEFF_WIDTH 16 /* OUT_COMP_COEFF - [15:0] */ + +/* + * R1245 (0x4DD) - DAC comp 2 + */ +#define ARIZONA_OUT_COMP_COEFF_1 0x0002 /* OUT_COMP_COEFF */ +#define ARIZONA_OUT_COMP_COEFF_1_MASK 0x0002 /* OUT_COMP_COEFF */ +#define ARIZONA_OUT_COMP_COEFF_1_SHIFT 1 /* OUT_COMP_COEFF */ +#define ARIZONA_OUT_COMP_COEFF_1_WIDTH 1 /* OUT_COMP_COEFF */ +#define ARIZONA_OUT_COMP_COEFF_SEL 0x0001 /* OUT_COMP_COEFF_SEL */ +#define ARIZONA_OUT_COMP_COEFF_SEL_MASK 0x0001 /* OUT_COMP_COEFF_SEL */ +#define ARIZONA_OUT_COMP_COEFF_SEL_SHIFT 0 /* OUT_COMP_COEFF_SEL */ +#define ARIZONA_OUT_COMP_COEFF_SEL_WIDTH 1 /* OUT_COMP_COEFF_SEL */ + +/* + * R1246 (0x4DE) - DAC comp 3 + */ +#define ARIZONA_AEC_COMP_COEFF_MASK 0xFFFF /* AEC_COMP_COEFF - [15:0] */ +#define ARIZONA_AEC_COMP_COEFF_SHIFT 0 /* AEC_COMP_COEFF - [15:0] */ +#define ARIZONA_AEC_COMP_COEFF_WIDTH 16 /* AEC_COMP_COEFF - [15:0] */ + +/* + * R1247 (0x4DF) - DAC comp 4 + */ +#define ARIZONA_AEC_COMP_COEFF_1 0x0002 /* AEC_COMP_COEFF */ +#define ARIZONA_AEC_COMP_COEFF_1_MASK 0x0002 /* AEC_COMP_COEFF */ +#define ARIZONA_AEC_COMP_COEFF_1_SHIFT 1 /* AEC_COMP_COEFF */ +#define ARIZONA_AEC_COMP_COEFF_1_WIDTH 1 /* AEC_COMP_COEFF */ +#define ARIZONA_AEC_COMP_COEFF_SEL 0x0001 /* AEC_COMP_COEFF_SEL */ +#define ARIZONA_AEC_COMP_COEFF_SEL_MASK 0x0001 /* AEC_COMP_COEFF_SEL */ +#define ARIZONA_AEC_COMP_COEFF_SEL_SHIFT 0 /* AEC_COMP_COEFF_SEL */ +#define ARIZONA_AEC_COMP_COEFF_SEL_WIDTH 1 /* AEC_COMP_COEFF_SEL */ + +/* + * R1280 (0x500) - AIF1 BCLK Ctrl + */ +#define ARIZONA_AIF1_BCLK_INV 0x0080 /* AIF1_BCLK_INV */ +#define ARIZONA_AIF1_BCLK_INV_MASK 0x0080 /* AIF1_BCLK_INV */ +#define ARIZONA_AIF1_BCLK_INV_SHIFT 7 /* AIF1_BCLK_INV */ +#define ARIZONA_AIF1_BCLK_INV_WIDTH 1 /* AIF1_BCLK_INV */ +#define ARIZONA_AIF1_BCLK_FRC 0x0040 /* AIF1_BCLK_FRC */ +#define ARIZONA_AIF1_BCLK_FRC_MASK 0x0040 /* AIF1_BCLK_FRC */ +#define ARIZONA_AIF1_BCLK_FRC_SHIFT 6 /* AIF1_BCLK_FRC */ +#define ARIZONA_AIF1_BCLK_FRC_WIDTH 1 /* AIF1_BCLK_FRC */ +#define ARIZONA_AIF1_BCLK_MSTR 0x0020 /* AIF1_BCLK_MSTR */ +#define ARIZONA_AIF1_BCLK_MSTR_MASK 0x0020 /* AIF1_BCLK_MSTR */ +#define ARIZONA_AIF1_BCLK_MSTR_SHIFT 5 /* AIF1_BCLK_MSTR */ +#define ARIZONA_AIF1_BCLK_MSTR_WIDTH 1 /* AIF1_BCLK_MSTR */ +#define ARIZONA_AIF1_BCLK_FREQ_MASK 0x001F /* AIF1_BCLK_FREQ - [4:0] */ +#define ARIZONA_AIF1_BCLK_FREQ_SHIFT 0 /* AIF1_BCLK_FREQ - [4:0] */ +#define ARIZONA_AIF1_BCLK_FREQ_WIDTH 5 /* AIF1_BCLK_FREQ - [4:0] */ + +/* + * R1281 (0x501) - AIF1 Tx Pin Ctrl + */ +#define ARIZONA_AIF1TX_DAT_TRI 0x0020 /* AIF1TX_DAT_TRI */ +#define ARIZONA_AIF1TX_DAT_TRI_MASK 0x0020 /* AIF1TX_DAT_TRI */ +#define ARIZONA_AIF1TX_DAT_TRI_SHIFT 5 /* AIF1TX_DAT_TRI */ +#define ARIZONA_AIF1TX_DAT_TRI_WIDTH 1 /* AIF1TX_DAT_TRI */ +#define ARIZONA_AIF1TX_LRCLK_SRC 0x0008 /* AIF1TX_LRCLK_SRC */ +#define ARIZONA_AIF1TX_LRCLK_SRC_MASK 0x0008 /* AIF1TX_LRCLK_SRC */ +#define ARIZONA_AIF1TX_LRCLK_SRC_SHIFT 3 /* AIF1TX_LRCLK_SRC */ +#define ARIZONA_AIF1TX_LRCLK_SRC_WIDTH 1 /* AIF1TX_LRCLK_SRC */ +#define ARIZONA_AIF1TX_LRCLK_INV 0x0004 /* AIF1TX_LRCLK_INV */ +#define ARIZONA_AIF1TX_LRCLK_INV_MASK 0x0004 /* AIF1TX_LRCLK_INV */ +#define ARIZONA_AIF1TX_LRCLK_INV_SHIFT 2 /* AIF1TX_LRCLK_INV */ +#define ARIZONA_AIF1TX_LRCLK_INV_WIDTH 1 /* AIF1TX_LRCLK_INV */ +#define ARIZONA_AIF1TX_LRCLK_FRC 0x0002 /* AIF1TX_LRCLK_FRC */ +#define ARIZONA_AIF1TX_LRCLK_FRC_MASK 0x0002 /* AIF1TX_LRCLK_FRC */ +#define ARIZONA_AIF1TX_LRCLK_FRC_SHIFT 1 /* AIF1TX_LRCLK_FRC */ +#define ARIZONA_AIF1TX_LRCLK_FRC_WIDTH 1 /* AIF1TX_LRCLK_FRC */ +#define ARIZONA_AIF1TX_LRCLK_MSTR 0x0001 /* AIF1TX_LRCLK_MSTR */ +#define ARIZONA_AIF1TX_LRCLK_MSTR_MASK 0x0001 /* AIF1TX_LRCLK_MSTR */ +#define ARIZONA_AIF1TX_LRCLK_MSTR_SHIFT 0 /* AIF1TX_LRCLK_MSTR */ +#define ARIZONA_AIF1TX_LRCLK_MSTR_WIDTH 1 /* AIF1TX_LRCLK_MSTR */ + +/* + * R1282 (0x502) - AIF1 Rx Pin Ctrl + */ +#define ARIZONA_AIF1RX_LRCLK_INV 0x0004 /* AIF1RX_LRCLK_INV */ +#define ARIZONA_AIF1RX_LRCLK_INV_MASK 0x0004 /* AIF1RX_LRCLK_INV */ +#define ARIZONA_AIF1RX_LRCLK_INV_SHIFT 2 /* AIF1RX_LRCLK_INV */ +#define ARIZONA_AIF1RX_LRCLK_INV_WIDTH 1 /* AIF1RX_LRCLK_INV */ +#define ARIZONA_AIF1RX_LRCLK_FRC 0x0002 /* AIF1RX_LRCLK_FRC */ +#define ARIZONA_AIF1RX_LRCLK_FRC_MASK 0x0002 /* AIF1RX_LRCLK_FRC */ +#define ARIZONA_AIF1RX_LRCLK_FRC_SHIFT 1 /* AIF1RX_LRCLK_FRC */ +#define ARIZONA_AIF1RX_LRCLK_FRC_WIDTH 1 /* AIF1RX_LRCLK_FRC */ +#define ARIZONA_AIF1RX_LRCLK_MSTR 0x0001 /* AIF1RX_LRCLK_MSTR */ +#define ARIZONA_AIF1RX_LRCLK_MSTR_MASK 0x0001 /* AIF1RX_LRCLK_MSTR */ +#define ARIZONA_AIF1RX_LRCLK_MSTR_SHIFT 0 /* AIF1RX_LRCLK_MSTR */ +#define ARIZONA_AIF1RX_LRCLK_MSTR_WIDTH 1 /* AIF1RX_LRCLK_MSTR */ + +/* + * R1283 (0x503) - AIF1 Rate Ctrl + */ +#define ARIZONA_AIF1_RATE_MASK 0x7800 /* AIF1_RATE - [14:11] */ +#define ARIZONA_AIF1_RATE_SHIFT 11 /* AIF1_RATE - [14:11] */ +#define ARIZONA_AIF1_RATE_WIDTH 4 /* AIF1_RATE - [14:11] */ +#define ARIZONA_AIF1_TRI 0x0040 /* AIF1_TRI */ +#define ARIZONA_AIF1_TRI_MASK 0x0040 /* AIF1_TRI */ +#define ARIZONA_AIF1_TRI_SHIFT 6 /* AIF1_TRI */ +#define ARIZONA_AIF1_TRI_WIDTH 1 /* AIF1_TRI */ + +/* + * R1284 (0x504) - AIF1 Format + */ +#define ARIZONA_AIF1_FMT_MASK 0x0007 /* AIF1_FMT - [2:0] */ +#define ARIZONA_AIF1_FMT_SHIFT 0 /* AIF1_FMT - [2:0] */ +#define ARIZONA_AIF1_FMT_WIDTH 3 /* AIF1_FMT - [2:0] */ + +/* + * R1285 (0x505) - AIF1 Tx BCLK Rate + */ +#define ARIZONA_AIF1TX_BCPF_MASK 0x1FFF /* AIF1TX_BCPF - [12:0] */ +#define ARIZONA_AIF1TX_BCPF_SHIFT 0 /* AIF1TX_BCPF - [12:0] */ +#define ARIZONA_AIF1TX_BCPF_WIDTH 13 /* AIF1TX_BCPF - [12:0] */ + +/* + * R1286 (0x506) - AIF1 Rx BCLK Rate + */ +#define ARIZONA_AIF1RX_BCPF_MASK 0x1FFF /* AIF1RX_BCPF - [12:0] */ +#define ARIZONA_AIF1RX_BCPF_SHIFT 0 /* AIF1RX_BCPF - [12:0] */ +#define ARIZONA_AIF1RX_BCPF_WIDTH 13 /* AIF1RX_BCPF - [12:0] */ + +/* + * R1287 (0x507) - AIF1 Frame Ctrl 1 + */ +#define ARIZONA_AIF1TX_WL_MASK 0x3F00 /* AIF1TX_WL - [13:8] */ +#define ARIZONA_AIF1TX_WL_SHIFT 8 /* AIF1TX_WL - [13:8] */ +#define ARIZONA_AIF1TX_WL_WIDTH 6 /* AIF1TX_WL - [13:8] */ +#define ARIZONA_AIF1TX_SLOT_LEN_MASK 0x00FF /* AIF1TX_SLOT_LEN - [7:0] */ +#define ARIZONA_AIF1TX_SLOT_LEN_SHIFT 0 /* AIF1TX_SLOT_LEN - [7:0] */ +#define ARIZONA_AIF1TX_SLOT_LEN_WIDTH 8 /* AIF1TX_SLOT_LEN - [7:0] */ + +/* + * R1288 (0x508) - AIF1 Frame Ctrl 2 + */ +#define ARIZONA_AIF1RX_WL_MASK 0x3F00 /* AIF1RX_WL - [13:8] */ +#define ARIZONA_AIF1RX_WL_SHIFT 8 /* AIF1RX_WL - [13:8] */ +#define ARIZONA_AIF1RX_WL_WIDTH 6 /* AIF1RX_WL - [13:8] */ +#define ARIZONA_AIF1RX_SLOT_LEN_MASK 0x00FF /* AIF1RX_SLOT_LEN - [7:0] */ +#define ARIZONA_AIF1RX_SLOT_LEN_SHIFT 0 /* AIF1RX_SLOT_LEN - [7:0] */ +#define ARIZONA_AIF1RX_SLOT_LEN_WIDTH 8 /* AIF1RX_SLOT_LEN - [7:0] */ + +/* + * R1289 (0x509) - AIF1 Frame Ctrl 3 + */ +#define ARIZONA_AIF1TX1_SLOT_MASK 0x003F /* AIF1TX1_SLOT - [5:0] */ +#define ARIZONA_AIF1TX1_SLOT_SHIFT 0 /* AIF1TX1_SLOT - [5:0] */ +#define ARIZONA_AIF1TX1_SLOT_WIDTH 6 /* AIF1TX1_SLOT - [5:0] */ + +/* + * R1290 (0x50A) - AIF1 Frame Ctrl 4 + */ +#define ARIZONA_AIF1TX2_SLOT_MASK 0x003F /* AIF1TX2_SLOT - [5:0] */ +#define ARIZONA_AIF1TX2_SLOT_SHIFT 0 /* AIF1TX2_SLOT - [5:0] */ +#define ARIZONA_AIF1TX2_SLOT_WIDTH 6 /* AIF1TX2_SLOT - [5:0] */ + +/* + * R1291 (0x50B) - AIF1 Frame Ctrl 5 + */ +#define ARIZONA_AIF1TX3_SLOT_MASK 0x003F /* AIF1TX3_SLOT - [5:0] */ +#define ARIZONA_AIF1TX3_SLOT_SHIFT 0 /* AIF1TX3_SLOT - [5:0] */ +#define ARIZONA_AIF1TX3_SLOT_WIDTH 6 /* AIF1TX3_SLOT - [5:0] */ + +/* + * R1292 (0x50C) - AIF1 Frame Ctrl 6 + */ +#define ARIZONA_AIF1TX4_SLOT_MASK 0x003F /* AIF1TX4_SLOT - [5:0] */ +#define ARIZONA_AIF1TX4_SLOT_SHIFT 0 /* AIF1TX4_SLOT - [5:0] */ +#define ARIZONA_AIF1TX4_SLOT_WIDTH 6 /* AIF1TX4_SLOT - [5:0] */ + +/* + * R1293 (0x50D) - AIF1 Frame Ctrl 7 + */ +#define ARIZONA_AIF1TX5_SLOT_MASK 0x003F /* AIF1TX5_SLOT - [5:0] */ +#define ARIZONA_AIF1TX5_SLOT_SHIFT 0 /* AIF1TX5_SLOT - [5:0] */ +#define ARIZONA_AIF1TX5_SLOT_WIDTH 6 /* AIF1TX5_SLOT - [5:0] */ + +/* + * R1294 (0x50E) - AIF1 Frame Ctrl 8 + */ +#define ARIZONA_AIF1TX6_SLOT_MASK 0x003F /* AIF1TX6_SLOT - [5:0] */ +#define ARIZONA_AIF1TX6_SLOT_SHIFT 0 /* AIF1TX6_SLOT - [5:0] */ +#define ARIZONA_AIF1TX6_SLOT_WIDTH 6 /* AIF1TX6_SLOT - [5:0] */ + +/* + * R1295 (0x50F) - AIF1 Frame Ctrl 9 + */ +#define ARIZONA_AIF1TX7_SLOT_MASK 0x003F /* AIF1TX7_SLOT - [5:0] */ +#define ARIZONA_AIF1TX7_SLOT_SHIFT 0 /* AIF1TX7_SLOT - [5:0] */ +#define ARIZONA_AIF1TX7_SLOT_WIDTH 6 /* AIF1TX7_SLOT - [5:0] */ + +/* + * R1296 (0x510) - AIF1 Frame Ctrl 10 + */ +#define ARIZONA_AIF1TX8_SLOT_MASK 0x003F /* AIF1TX8_SLOT - [5:0] */ +#define ARIZONA_AIF1TX8_SLOT_SHIFT 0 /* AIF1TX8_SLOT - [5:0] */ +#define ARIZONA_AIF1TX8_SLOT_WIDTH 6 /* AIF1TX8_SLOT - [5:0] */ + +/* + * R1297 (0x511) - AIF1 Frame Ctrl 11 + */ +#define ARIZONA_AIF1RX1_SLOT_MASK 0x003F /* AIF1RX1_SLOT - [5:0] */ +#define ARIZONA_AIF1RX1_SLOT_SHIFT 0 /* AIF1RX1_SLOT - [5:0] */ +#define ARIZONA_AIF1RX1_SLOT_WIDTH 6 /* AIF1RX1_SLOT - [5:0] */ + +/* + * R1298 (0x512) - AIF1 Frame Ctrl 12 + */ +#define ARIZONA_AIF1RX2_SLOT_MASK 0x003F /* AIF1RX2_SLOT - [5:0] */ +#define ARIZONA_AIF1RX2_SLOT_SHIFT 0 /* AIF1RX2_SLOT - [5:0] */ +#define ARIZONA_AIF1RX2_SLOT_WIDTH 6 /* AIF1RX2_SLOT - [5:0] */ + +/* + * R1299 (0x513) - AIF1 Frame Ctrl 13 + */ +#define ARIZONA_AIF1RX3_SLOT_MASK 0x003F /* AIF1RX3_SLOT - [5:0] */ +#define ARIZONA_AIF1RX3_SLOT_SHIFT 0 /* AIF1RX3_SLOT - [5:0] */ +#define ARIZONA_AIF1RX3_SLOT_WIDTH 6 /* AIF1RX3_SLOT - [5:0] */ + +/* + * R1300 (0x514) - AIF1 Frame Ctrl 14 + */ +#define ARIZONA_AIF1RX4_SLOT_MASK 0x003F /* AIF1RX4_SLOT - [5:0] */ +#define ARIZONA_AIF1RX4_SLOT_SHIFT 0 /* AIF1RX4_SLOT - [5:0] */ +#define ARIZONA_AIF1RX4_SLOT_WIDTH 6 /* AIF1RX4_SLOT - [5:0] */ + +/* + * R1301 (0x515) - AIF1 Frame Ctrl 15 + */ +#define ARIZONA_AIF1RX5_SLOT_MASK 0x003F /* AIF1RX5_SLOT - [5:0] */ +#define ARIZONA_AIF1RX5_SLOT_SHIFT 0 /* AIF1RX5_SLOT - [5:0] */ +#define ARIZONA_AIF1RX5_SLOT_WIDTH 6 /* AIF1RX5_SLOT - [5:0] */ + +/* + * R1302 (0x516) - AIF1 Frame Ctrl 16 + */ +#define ARIZONA_AIF1RX6_SLOT_MASK 0x003F /* AIF1RX6_SLOT - [5:0] */ +#define ARIZONA_AIF1RX6_SLOT_SHIFT 0 /* AIF1RX6_SLOT - [5:0] */ +#define ARIZONA_AIF1RX6_SLOT_WIDTH 6 /* AIF1RX6_SLOT - [5:0] */ + +/* + * R1303 (0x517) - AIF1 Frame Ctrl 17 + */ +#define ARIZONA_AIF1RX7_SLOT_MASK 0x003F /* AIF1RX7_SLOT - [5:0] */ +#define ARIZONA_AIF1RX7_SLOT_SHIFT 0 /* AIF1RX7_SLOT - [5:0] */ +#define ARIZONA_AIF1RX7_SLOT_WIDTH 6 /* AIF1RX7_SLOT - [5:0] */ + +/* + * R1304 (0x518) - AIF1 Frame Ctrl 18 + */ +#define ARIZONA_AIF1RX8_SLOT_MASK 0x003F /* AIF1RX8_SLOT - [5:0] */ +#define ARIZONA_AIF1RX8_SLOT_SHIFT 0 /* AIF1RX8_SLOT - [5:0] */ +#define ARIZONA_AIF1RX8_SLOT_WIDTH 6 /* AIF1RX8_SLOT - [5:0] */ + +/* + * R1305 (0x519) - AIF1 Tx Enables + */ +#define ARIZONA_AIF1TX8_ENA 0x0080 /* AIF1TX8_ENA */ +#define ARIZONA_AIF1TX8_ENA_MASK 0x0080 /* AIF1TX8_ENA */ +#define ARIZONA_AIF1TX8_ENA_SHIFT 7 /* AIF1TX8_ENA */ +#define ARIZONA_AIF1TX8_ENA_WIDTH 1 /* AIF1TX8_ENA */ +#define ARIZONA_AIF1TX7_ENA 0x0040 /* AIF1TX7_ENA */ +#define ARIZONA_AIF1TX7_ENA_MASK 0x0040 /* AIF1TX7_ENA */ +#define ARIZONA_AIF1TX7_ENA_SHIFT 6 /* AIF1TX7_ENA */ +#define ARIZONA_AIF1TX7_ENA_WIDTH 1 /* AIF1TX7_ENA */ +#define ARIZONA_AIF1TX6_ENA 0x0020 /* AIF1TX6_ENA */ +#define ARIZONA_AIF1TX6_ENA_MASK 0x0020 /* AIF1TX6_ENA */ +#define ARIZONA_AIF1TX6_ENA_SHIFT 5 /* AIF1TX6_ENA */ +#define ARIZONA_AIF1TX6_ENA_WIDTH 1 /* AIF1TX6_ENA */ +#define ARIZONA_AIF1TX5_ENA 0x0010 /* AIF1TX5_ENA */ +#define ARIZONA_AIF1TX5_ENA_MASK 0x0010 /* AIF1TX5_ENA */ +#define ARIZONA_AIF1TX5_ENA_SHIFT 4 /* AIF1TX5_ENA */ +#define ARIZONA_AIF1TX5_ENA_WIDTH 1 /* AIF1TX5_ENA */ +#define ARIZONA_AIF1TX4_ENA 0x0008 /* AIF1TX4_ENA */ +#define ARIZONA_AIF1TX4_ENA_MASK 0x0008 /* AIF1TX4_ENA */ +#define ARIZONA_AIF1TX4_ENA_SHIFT 3 /* AIF1TX4_ENA */ +#define ARIZONA_AIF1TX4_ENA_WIDTH 1 /* AIF1TX4_ENA */ +#define ARIZONA_AIF1TX3_ENA 0x0004 /* AIF1TX3_ENA */ +#define ARIZONA_AIF1TX3_ENA_MASK 0x0004 /* AIF1TX3_ENA */ +#define ARIZONA_AIF1TX3_ENA_SHIFT 2 /* AIF1TX3_ENA */ +#define ARIZONA_AIF1TX3_ENA_WIDTH 1 /* AIF1TX3_ENA */ +#define ARIZONA_AIF1TX2_ENA 0x0002 /* AIF1TX2_ENA */ +#define ARIZONA_AIF1TX2_ENA_MASK 0x0002 /* AIF1TX2_ENA */ +#define ARIZONA_AIF1TX2_ENA_SHIFT 1 /* AIF1TX2_ENA */ +#define ARIZONA_AIF1TX2_ENA_WIDTH 1 /* AIF1TX2_ENA */ +#define ARIZONA_AIF1TX1_ENA 0x0001 /* AIF1TX1_ENA */ +#define ARIZONA_AIF1TX1_ENA_MASK 0x0001 /* AIF1TX1_ENA */ +#define ARIZONA_AIF1TX1_ENA_SHIFT 0 /* AIF1TX1_ENA */ +#define ARIZONA_AIF1TX1_ENA_WIDTH 1 /* AIF1TX1_ENA */ + +/* + * R1306 (0x51A) - AIF1 Rx Enables + */ +#define ARIZONA_AIF1RX8_ENA 0x0080 /* AIF1RX8_ENA */ +#define ARIZONA_AIF1RX8_ENA_MASK 0x0080 /* AIF1RX8_ENA */ +#define ARIZONA_AIF1RX8_ENA_SHIFT 7 /* AIF1RX8_ENA */ +#define ARIZONA_AIF1RX8_ENA_WIDTH 1 /* AIF1RX8_ENA */ +#define ARIZONA_AIF1RX7_ENA 0x0040 /* AIF1RX7_ENA */ +#define ARIZONA_AIF1RX7_ENA_MASK 0x0040 /* AIF1RX7_ENA */ +#define ARIZONA_AIF1RX7_ENA_SHIFT 6 /* AIF1RX7_ENA */ +#define ARIZONA_AIF1RX7_ENA_WIDTH 1 /* AIF1RX7_ENA */ +#define ARIZONA_AIF1RX6_ENA 0x0020 /* AIF1RX6_ENA */ +#define ARIZONA_AIF1RX6_ENA_MASK 0x0020 /* AIF1RX6_ENA */ +#define ARIZONA_AIF1RX6_ENA_SHIFT 5 /* AIF1RX6_ENA */ +#define ARIZONA_AIF1RX6_ENA_WIDTH 1 /* AIF1RX6_ENA */ +#define ARIZONA_AIF1RX5_ENA 0x0010 /* AIF1RX5_ENA */ +#define ARIZONA_AIF1RX5_ENA_MASK 0x0010 /* AIF1RX5_ENA */ +#define ARIZONA_AIF1RX5_ENA_SHIFT 4 /* AIF1RX5_ENA */ +#define ARIZONA_AIF1RX5_ENA_WIDTH 1 /* AIF1RX5_ENA */ +#define ARIZONA_AIF1RX4_ENA 0x0008 /* AIF1RX4_ENA */ +#define ARIZONA_AIF1RX4_ENA_MASK 0x0008 /* AIF1RX4_ENA */ +#define ARIZONA_AIF1RX4_ENA_SHIFT 3 /* AIF1RX4_ENA */ +#define ARIZONA_AIF1RX4_ENA_WIDTH 1 /* AIF1RX4_ENA */ +#define ARIZONA_AIF1RX3_ENA 0x0004 /* AIF1RX3_ENA */ +#define ARIZONA_AIF1RX3_ENA_MASK 0x0004 /* AIF1RX3_ENA */ +#define ARIZONA_AIF1RX3_ENA_SHIFT 2 /* AIF1RX3_ENA */ +#define ARIZONA_AIF1RX3_ENA_WIDTH 1 /* AIF1RX3_ENA */ +#define ARIZONA_AIF1RX2_ENA 0x0002 /* AIF1RX2_ENA */ +#define ARIZONA_AIF1RX2_ENA_MASK 0x0002 /* AIF1RX2_ENA */ +#define ARIZONA_AIF1RX2_ENA_SHIFT 1 /* AIF1RX2_ENA */ +#define ARIZONA_AIF1RX2_ENA_WIDTH 1 /* AIF1RX2_ENA */ +#define ARIZONA_AIF1RX1_ENA 0x0001 /* AIF1RX1_ENA */ +#define ARIZONA_AIF1RX1_ENA_MASK 0x0001 /* AIF1RX1_ENA */ +#define ARIZONA_AIF1RX1_ENA_SHIFT 0 /* AIF1RX1_ENA */ +#define ARIZONA_AIF1RX1_ENA_WIDTH 1 /* AIF1RX1_ENA */ + +/* + * R1307 (0x51B) - AIF1 Force Write + */ +#define ARIZONA_AIF1_FRC_WR 0x0001 /* AIF1_FRC_WR */ +#define ARIZONA_AIF1_FRC_WR_MASK 0x0001 /* AIF1_FRC_WR */ +#define ARIZONA_AIF1_FRC_WR_SHIFT 0 /* AIF1_FRC_WR */ +#define ARIZONA_AIF1_FRC_WR_WIDTH 1 /* AIF1_FRC_WR */ + +/* + * R1344 (0x540) - AIF2 BCLK Ctrl + */ +#define ARIZONA_AIF2_BCLK_INV 0x0080 /* AIF2_BCLK_INV */ +#define ARIZONA_AIF2_BCLK_INV_MASK 0x0080 /* AIF2_BCLK_INV */ +#define ARIZONA_AIF2_BCLK_INV_SHIFT 7 /* AIF2_BCLK_INV */ +#define ARIZONA_AIF2_BCLK_INV_WIDTH 1 /* AIF2_BCLK_INV */ +#define ARIZONA_AIF2_BCLK_FRC 0x0040 /* AIF2_BCLK_FRC */ +#define ARIZONA_AIF2_BCLK_FRC_MASK 0x0040 /* AIF2_BCLK_FRC */ +#define ARIZONA_AIF2_BCLK_FRC_SHIFT 6 /* AIF2_BCLK_FRC */ +#define ARIZONA_AIF2_BCLK_FRC_WIDTH 1 /* AIF2_BCLK_FRC */ +#define ARIZONA_AIF2_BCLK_MSTR 0x0020 /* AIF2_BCLK_MSTR */ +#define ARIZONA_AIF2_BCLK_MSTR_MASK 0x0020 /* AIF2_BCLK_MSTR */ +#define ARIZONA_AIF2_BCLK_MSTR_SHIFT 5 /* AIF2_BCLK_MSTR */ +#define ARIZONA_AIF2_BCLK_MSTR_WIDTH 1 /* AIF2_BCLK_MSTR */ +#define ARIZONA_AIF2_BCLK_FREQ_MASK 0x001F /* AIF2_BCLK_FREQ - [4:0] */ +#define ARIZONA_AIF2_BCLK_FREQ_SHIFT 0 /* AIF2_BCLK_FREQ - [4:0] */ +#define ARIZONA_AIF2_BCLK_FREQ_WIDTH 5 /* AIF2_BCLK_FREQ - [4:0] */ + +/* + * R1345 (0x541) - AIF2 Tx Pin Ctrl + */ +#define ARIZONA_AIF2TX_DAT_TRI 0x0020 /* AIF2TX_DAT_TRI */ +#define ARIZONA_AIF2TX_DAT_TRI_MASK 0x0020 /* AIF2TX_DAT_TRI */ +#define ARIZONA_AIF2TX_DAT_TRI_SHIFT 5 /* AIF2TX_DAT_TRI */ +#define ARIZONA_AIF2TX_DAT_TRI_WIDTH 1 /* AIF2TX_DAT_TRI */ +#define ARIZONA_AIF2TX_LRCLK_SRC 0x0008 /* AIF2TX_LRCLK_SRC */ +#define ARIZONA_AIF2TX_LRCLK_SRC_MASK 0x0008 /* AIF2TX_LRCLK_SRC */ +#define ARIZONA_AIF2TX_LRCLK_SRC_SHIFT 3 /* AIF2TX_LRCLK_SRC */ +#define ARIZONA_AIF2TX_LRCLK_SRC_WIDTH 1 /* AIF2TX_LRCLK_SRC */ +#define ARIZONA_AIF2TX_LRCLK_INV 0x0004 /* AIF2TX_LRCLK_INV */ +#define ARIZONA_AIF2TX_LRCLK_INV_MASK 0x0004 /* AIF2TX_LRCLK_INV */ +#define ARIZONA_AIF2TX_LRCLK_INV_SHIFT 2 /* AIF2TX_LRCLK_INV */ +#define ARIZONA_AIF2TX_LRCLK_INV_WIDTH 1 /* AIF2TX_LRCLK_INV */ +#define ARIZONA_AIF2TX_LRCLK_FRC 0x0002 /* AIF2TX_LRCLK_FRC */ +#define ARIZONA_AIF2TX_LRCLK_FRC_MASK 0x0002 /* AIF2TX_LRCLK_FRC */ +#define ARIZONA_AIF2TX_LRCLK_FRC_SHIFT 1 /* AIF2TX_LRCLK_FRC */ +#define ARIZONA_AIF2TX_LRCLK_FRC_WIDTH 1 /* AIF2TX_LRCLK_FRC */ +#define ARIZONA_AIF2TX_LRCLK_MSTR 0x0001 /* AIF2TX_LRCLK_MSTR */ +#define ARIZONA_AIF2TX_LRCLK_MSTR_MASK 0x0001 /* AIF2TX_LRCLK_MSTR */ +#define ARIZONA_AIF2TX_LRCLK_MSTR_SHIFT 0 /* AIF2TX_LRCLK_MSTR */ +#define ARIZONA_AIF2TX_LRCLK_MSTR_WIDTH 1 /* AIF2TX_LRCLK_MSTR */ + +/* + * R1346 (0x542) - AIF2 Rx Pin Ctrl + */ +#define ARIZONA_AIF2RX_LRCLK_INV 0x0004 /* AIF2RX_LRCLK_INV */ +#define ARIZONA_AIF2RX_LRCLK_INV_MASK 0x0004 /* AIF2RX_LRCLK_INV */ +#define ARIZONA_AIF2RX_LRCLK_INV_SHIFT 2 /* AIF2RX_LRCLK_INV */ +#define ARIZONA_AIF2RX_LRCLK_INV_WIDTH 1 /* AIF2RX_LRCLK_INV */ +#define ARIZONA_AIF2RX_LRCLK_FRC 0x0002 /* AIF2RX_LRCLK_FRC */ +#define ARIZONA_AIF2RX_LRCLK_FRC_MASK 0x0002 /* AIF2RX_LRCLK_FRC */ +#define ARIZONA_AIF2RX_LRCLK_FRC_SHIFT 1 /* AIF2RX_LRCLK_FRC */ +#define ARIZONA_AIF2RX_LRCLK_FRC_WIDTH 1 /* AIF2RX_LRCLK_FRC */ +#define ARIZONA_AIF2RX_LRCLK_MSTR 0x0001 /* AIF2RX_LRCLK_MSTR */ +#define ARIZONA_AIF2RX_LRCLK_MSTR_MASK 0x0001 /* AIF2RX_LRCLK_MSTR */ +#define ARIZONA_AIF2RX_LRCLK_MSTR_SHIFT 0 /* AIF2RX_LRCLK_MSTR */ +#define ARIZONA_AIF2RX_LRCLK_MSTR_WIDTH 1 /* AIF2RX_LRCLK_MSTR */ + +/* + * R1347 (0x543) - AIF2 Rate Ctrl + */ +#define ARIZONA_AIF2_RATE_MASK 0x7800 /* AIF2_RATE - [14:11] */ +#define ARIZONA_AIF2_RATE_SHIFT 11 /* AIF2_RATE - [14:11] */ +#define ARIZONA_AIF2_RATE_WIDTH 4 /* AIF2_RATE - [14:11] */ +#define ARIZONA_AIF2_TRI 0x0040 /* AIF2_TRI */ +#define ARIZONA_AIF2_TRI_MASK 0x0040 /* AIF2_TRI */ +#define ARIZONA_AIF2_TRI_SHIFT 6 /* AIF2_TRI */ +#define ARIZONA_AIF2_TRI_WIDTH 1 /* AIF2_TRI */ + +/* + * R1348 (0x544) - AIF2 Format + */ +#define ARIZONA_AIF2_FMT_MASK 0x0007 /* AIF2_FMT - [2:0] */ +#define ARIZONA_AIF2_FMT_SHIFT 0 /* AIF2_FMT - [2:0] */ +#define ARIZONA_AIF2_FMT_WIDTH 3 /* AIF2_FMT - [2:0] */ + +/* + * R1349 (0x545) - AIF2 Tx BCLK Rate + */ +#define ARIZONA_AIF2TX_BCPF_MASK 0x1FFF /* AIF2TX_BCPF - [12:0] */ +#define ARIZONA_AIF2TX_BCPF_SHIFT 0 /* AIF2TX_BCPF - [12:0] */ +#define ARIZONA_AIF2TX_BCPF_WIDTH 13 /* AIF2TX_BCPF - [12:0] */ + +/* + * R1350 (0x546) - AIF2 Rx BCLK Rate + */ +#define ARIZONA_AIF2RX_BCPF_MASK 0x1FFF /* AIF2RX_BCPF - [12:0] */ +#define ARIZONA_AIF2RX_BCPF_SHIFT 0 /* AIF2RX_BCPF - [12:0] */ +#define ARIZONA_AIF2RX_BCPF_WIDTH 13 /* AIF2RX_BCPF - [12:0] */ + +/* + * R1351 (0x547) - AIF2 Frame Ctrl 1 + */ +#define ARIZONA_AIF2TX_WL_MASK 0x3F00 /* AIF2TX_WL - [13:8] */ +#define ARIZONA_AIF2TX_WL_SHIFT 8 /* AIF2TX_WL - [13:8] */ +#define ARIZONA_AIF2TX_WL_WIDTH 6 /* AIF2TX_WL - [13:8] */ +#define ARIZONA_AIF2TX_SLOT_LEN_MASK 0x00FF /* AIF2TX_SLOT_LEN - [7:0] */ +#define ARIZONA_AIF2TX_SLOT_LEN_SHIFT 0 /* AIF2TX_SLOT_LEN - [7:0] */ +#define ARIZONA_AIF2TX_SLOT_LEN_WIDTH 8 /* AIF2TX_SLOT_LEN - [7:0] */ + +/* + * R1352 (0x548) - AIF2 Frame Ctrl 2 + */ +#define ARIZONA_AIF2RX_WL_MASK 0x3F00 /* AIF2RX_WL - [13:8] */ +#define ARIZONA_AIF2RX_WL_SHIFT 8 /* AIF2RX_WL - [13:8] */ +#define ARIZONA_AIF2RX_WL_WIDTH 6 /* AIF2RX_WL - [13:8] */ +#define ARIZONA_AIF2RX_SLOT_LEN_MASK 0x00FF /* AIF2RX_SLOT_LEN - [7:0] */ +#define ARIZONA_AIF2RX_SLOT_LEN_SHIFT 0 /* AIF2RX_SLOT_LEN - [7:0] */ +#define ARIZONA_AIF2RX_SLOT_LEN_WIDTH 8 /* AIF2RX_SLOT_LEN - [7:0] */ + +/* + * R1353 (0x549) - AIF2 Frame Ctrl 3 + */ +#define ARIZONA_AIF2TX1_SLOT_MASK 0x003F /* AIF2TX1_SLOT - [5:0] */ +#define ARIZONA_AIF2TX1_SLOT_SHIFT 0 /* AIF2TX1_SLOT - [5:0] */ +#define ARIZONA_AIF2TX1_SLOT_WIDTH 6 /* AIF2TX1_SLOT - [5:0] */ + +/* + * R1354 (0x54A) - AIF2 Frame Ctrl 4 + */ +#define ARIZONA_AIF2TX2_SLOT_MASK 0x003F /* AIF2TX2_SLOT - [5:0] */ +#define ARIZONA_AIF2TX2_SLOT_SHIFT 0 /* AIF2TX2_SLOT - [5:0] */ +#define ARIZONA_AIF2TX2_SLOT_WIDTH 6 /* AIF2TX2_SLOT - [5:0] */ + +/* + * R1355 (0x54B) - AIF2 Frame Ctrl 5 + */ +#define ARIZONA_AIF2TX3_SLOT_MASK 0x003F /* AIF2TX3_SLOT - [5:0] */ +#define ARIZONA_AIF2TX3_SLOT_SHIFT 0 /* AIF2TX3_SLOT - [5:0] */ +#define ARIZONA_AIF2TX3_SLOT_WIDTH 6 /* AIF2TX3_SLOT - [5:0] */ + +/* + * R1356 (0x54C) - AIF2 Frame Ctrl 6 + */ +#define ARIZONA_AIF2TX4_SLOT_MASK 0x003F /* AIF2TX4_SLOT - [5:0] */ +#define ARIZONA_AIF2TX4_SLOT_SHIFT 0 /* AIF2TX4_SLOT - [5:0] */ +#define ARIZONA_AIF2TX4_SLOT_WIDTH 6 /* AIF2TX4_SLOT - [5:0] */ + + +/* + * R1357 (0x54D) - AIF2 Frame Ctrl 7 + */ +#define ARIZONA_AIF2TX5_SLOT_MASK 0x003F /* AIF2TX5_SLOT - [5:0] */ +#define ARIZONA_AIF2TX5_SLOT_SHIFT 0 /* AIF2TX5_SLOT - [5:0] */ +#define ARIZONA_AIF2TX5_SLOT_WIDTH 6 /* AIF2TX5_SLOT - [5:0] */ + +/* + * R1358 (0x54E) - AIF2 Frame Ctrl 8 + */ +#define ARIZONA_AIF2TX6_SLOT_MASK 0x003F /* AIF2TX6_SLOT - [5:0] */ +#define ARIZONA_AIF2TX6_SLOT_SHIFT 0 /* AIF2TX6_SLOT - [5:0] */ +#define ARIZONA_AIF2TX6_SLOT_WIDTH 6 /* AIF2TX6_SLOT - [5:0] */ + +/* + * R1361 (0x551) - AIF2 Frame Ctrl 11 + */ +#define ARIZONA_AIF2RX1_SLOT_MASK 0x003F /* AIF2RX1_SLOT - [5:0] */ +#define ARIZONA_AIF2RX1_SLOT_SHIFT 0 /* AIF2RX1_SLOT - [5:0] */ +#define ARIZONA_AIF2RX1_SLOT_WIDTH 6 /* AIF2RX1_SLOT - [5:0] */ + +/* + * R1362 (0x552) - AIF2 Frame Ctrl 12 + */ +#define ARIZONA_AIF2RX2_SLOT_MASK 0x003F /* AIF2RX2_SLOT - [5:0] */ +#define ARIZONA_AIF2RX2_SLOT_SHIFT 0 /* AIF2RX2_SLOT - [5:0] */ +#define ARIZONA_AIF2RX2_SLOT_WIDTH 6 /* AIF2RX2_SLOT - [5:0] */ + +/* + * R1363 (0x553) - AIF2 Frame Ctrl 13 + */ +#define ARIZONA_AIF2RX3_SLOT_MASK 0x003F /* AIF2RX3_SLOT - [5:0] */ +#define ARIZONA_AIF2RX3_SLOT_SHIFT 0 /* AIF2RX3_SLOT - [5:0] */ +#define ARIZONA_AIF2RX3_SLOT_WIDTH 6 /* AIF2RX3_SLOT - [5:0] */ + +/* + * R1364 (0x554) - AIF2 Frame Ctrl 14 + */ +#define ARIZONA_AIF2RX4_SLOT_MASK 0x003F /* AIF2RX4_SLOT - [5:0] */ +#define ARIZONA_AIF2RX4_SLOT_SHIFT 0 /* AIF2RX4_SLOT - [5:0] */ +#define ARIZONA_AIF2RX4_SLOT_WIDTH 6 /* AIF2RX4_SLOT - [5:0] */ + +/* + * R1365 (0x555) - AIF2 Frame Ctrl 15 + */ +#define ARIZONA_AIF2RX5_SLOT_MASK 0x003F /* AIF2RX5_SLOT - [5:0] */ +#define ARIZONA_AIF2RX5_SLOT_SHIFT 0 /* AIF2RX5_SLOT - [5:0] */ +#define ARIZONA_AIF2RX5_SLOT_WIDTH 6 /* AIF2RX5_SLOT - [5:0] */ + +/* + * R1366 (0x556) - AIF2 Frame Ctrl 16 + */ +#define ARIZONA_AIF2RX6_SLOT_MASK 0x003F /* AIF2RX6_SLOT - [5:0] */ +#define ARIZONA_AIF2RX6_SLOT_SHIFT 0 /* AIF2RX6_SLOT - [5:0] */ +#define ARIZONA_AIF2RX6_SLOT_WIDTH 6 /* AIF2RX6_SLOT - [5:0] */ + +/* + * R1369 (0x559) - AIF2 Tx Enables + */ +#define ARIZONA_AIF2TX6_ENA 0x0020 /* AIF2TX6_ENA */ +#define ARIZONA_AIF2TX6_ENA_MASK 0x0020 /* AIF2TX6_ENA */ +#define ARIZONA_AIF2TX6_ENA_SHIFT 5 /* AIF2TX6_ENA */ +#define ARIZONA_AIF2TX6_ENA_WIDTH 1 /* AIF2TX6_ENA */ +#define ARIZONA_AIF2TX5_ENA 0x0010 /* AIF2TX5_ENA */ +#define ARIZONA_AIF2TX5_ENA_MASK 0x0010 /* AIF2TX5_ENA */ +#define ARIZONA_AIF2TX5_ENA_SHIFT 4 /* AIF2TX5_ENA */ +#define ARIZONA_AIF2TX5_ENA_WIDTH 1 /* AIF2TX5_ENA */ +#define ARIZONA_AIF2TX4_ENA 0x0008 /* AIF2TX4_ENA */ +#define ARIZONA_AIF2TX4_ENA_MASK 0x0008 /* AIF2TX4_ENA */ +#define ARIZONA_AIF2TX4_ENA_SHIFT 3 /* AIF2TX4_ENA */ +#define ARIZONA_AIF2TX4_ENA_WIDTH 1 /* AIF2TX4_ENA */ +#define ARIZONA_AIF2TX3_ENA 0x0004 /* AIF2TX3_ENA */ +#define ARIZONA_AIF2TX3_ENA_MASK 0x0004 /* AIF2TX3_ENA */ +#define ARIZONA_AIF2TX3_ENA_SHIFT 2 /* AIF2TX3_ENA */ +#define ARIZONA_AIF2TX3_ENA_WIDTH 1 /* AIF2TX3_ENA */ +#define ARIZONA_AIF2TX2_ENA 0x0002 /* AIF2TX2_ENA */ +#define ARIZONA_AIF2TX2_ENA_MASK 0x0002 /* AIF2TX2_ENA */ +#define ARIZONA_AIF2TX2_ENA_SHIFT 1 /* AIF2TX2_ENA */ +#define ARIZONA_AIF2TX2_ENA_WIDTH 1 /* AIF2TX2_ENA */ +#define ARIZONA_AIF2TX1_ENA 0x0001 /* AIF2TX1_ENA */ +#define ARIZONA_AIF2TX1_ENA_MASK 0x0001 /* AIF2TX1_ENA */ +#define ARIZONA_AIF2TX1_ENA_SHIFT 0 /* AIF2TX1_ENA */ +#define ARIZONA_AIF2TX1_ENA_WIDTH 1 /* AIF2TX1_ENA */ + +/* + * R1370 (0x55A) - AIF2 Rx Enables + */ +#define ARIZONA_AIF2RX6_ENA 0x0020 /* AIF2RX6_ENA */ +#define ARIZONA_AIF2RX6_ENA_MASK 0x0020 /* AIF2RX6_ENA */ +#define ARIZONA_AIF2RX6_ENA_SHIFT 5 /* AIF2RX6_ENA */ +#define ARIZONA_AIF2RX6_ENA_WIDTH 1 /* AIF2RX6_ENA */ +#define ARIZONA_AIF2RX5_ENA 0x0010 /* AIF2RX5_ENA */ +#define ARIZONA_AIF2RX5_ENA_MASK 0x0010 /* AIF2RX5_ENA */ +#define ARIZONA_AIF2RX5_ENA_SHIFT 4 /* AIF2RX5_ENA */ +#define ARIZONA_AIF2RX5_ENA_WIDTH 1 /* AIF2RX5_ENA */ +#define ARIZONA_AIF2RX4_ENA 0x0008 /* AIF2RX4_ENA */ +#define ARIZONA_AIF2RX4_ENA_MASK 0x0008 /* AIF2RX4_ENA */ +#define ARIZONA_AIF2RX4_ENA_SHIFT 3 /* AIF2RX4_ENA */ +#define ARIZONA_AIF2RX4_ENA_WIDTH 1 /* AIF2RX4_ENA */ +#define ARIZONA_AIF2RX3_ENA 0x0004 /* AIF2RX3_ENA */ +#define ARIZONA_AIF2RX3_ENA_MASK 0x0004 /* AIF2RX3_ENA */ +#define ARIZONA_AIF2RX3_ENA_SHIFT 2 /* AIF2RX3_ENA */ +#define ARIZONA_AIF2RX3_ENA_WIDTH 1 /* AIF2RX3_ENA */ +#define ARIZONA_AIF2RX2_ENA 0x0002 /* AIF2RX2_ENA */ +#define ARIZONA_AIF2RX2_ENA_MASK 0x0002 /* AIF2RX2_ENA */ +#define ARIZONA_AIF2RX2_ENA_SHIFT 1 /* AIF2RX2_ENA */ +#define ARIZONA_AIF2RX2_ENA_WIDTH 1 /* AIF2RX2_ENA */ +#define ARIZONA_AIF2RX1_ENA 0x0001 /* AIF2RX1_ENA */ +#define ARIZONA_AIF2RX1_ENA_MASK 0x0001 /* AIF2RX1_ENA */ +#define ARIZONA_AIF2RX1_ENA_SHIFT 0 /* AIF2RX1_ENA */ +#define ARIZONA_AIF2RX1_ENA_WIDTH 1 /* AIF2RX1_ENA */ + +/* + * R1371 (0x55B) - AIF2 Force Write + */ +#define ARIZONA_AIF2_FRC_WR 0x0001 /* AIF2_FRC_WR */ +#define ARIZONA_AIF2_FRC_WR_MASK 0x0001 /* AIF2_FRC_WR */ +#define ARIZONA_AIF2_FRC_WR_SHIFT 0 /* AIF2_FRC_WR */ +#define ARIZONA_AIF2_FRC_WR_WIDTH 1 /* AIF2_FRC_WR */ + +/* + * R1408 (0x580) - AIF3 BCLK Ctrl + */ +#define ARIZONA_AIF3_BCLK_INV 0x0080 /* AIF3_BCLK_INV */ +#define ARIZONA_AIF3_BCLK_INV_MASK 0x0080 /* AIF3_BCLK_INV */ +#define ARIZONA_AIF3_BCLK_INV_SHIFT 7 /* AIF3_BCLK_INV */ +#define ARIZONA_AIF3_BCLK_INV_WIDTH 1 /* AIF3_BCLK_INV */ +#define ARIZONA_AIF3_BCLK_FRC 0x0040 /* AIF3_BCLK_FRC */ +#define ARIZONA_AIF3_BCLK_FRC_MASK 0x0040 /* AIF3_BCLK_FRC */ +#define ARIZONA_AIF3_BCLK_FRC_SHIFT 6 /* AIF3_BCLK_FRC */ +#define ARIZONA_AIF3_BCLK_FRC_WIDTH 1 /* AIF3_BCLK_FRC */ +#define ARIZONA_AIF3_BCLK_MSTR 0x0020 /* AIF3_BCLK_MSTR */ +#define ARIZONA_AIF3_BCLK_MSTR_MASK 0x0020 /* AIF3_BCLK_MSTR */ +#define ARIZONA_AIF3_BCLK_MSTR_SHIFT 5 /* AIF3_BCLK_MSTR */ +#define ARIZONA_AIF3_BCLK_MSTR_WIDTH 1 /* AIF3_BCLK_MSTR */ +#define ARIZONA_AIF3_BCLK_FREQ_MASK 0x001F /* AIF3_BCLK_FREQ - [4:0] */ +#define ARIZONA_AIF3_BCLK_FREQ_SHIFT 0 /* AIF3_BCLK_FREQ - [4:0] */ +#define ARIZONA_AIF3_BCLK_FREQ_WIDTH 5 /* AIF3_BCLK_FREQ - [4:0] */ + +/* + * R1409 (0x581) - AIF3 Tx Pin Ctrl + */ +#define ARIZONA_AIF3TX_DAT_TRI 0x0020 /* AIF3TX_DAT_TRI */ +#define ARIZONA_AIF3TX_DAT_TRI_MASK 0x0020 /* AIF3TX_DAT_TRI */ +#define ARIZONA_AIF3TX_DAT_TRI_SHIFT 5 /* AIF3TX_DAT_TRI */ +#define ARIZONA_AIF3TX_DAT_TRI_WIDTH 1 /* AIF3TX_DAT_TRI */ +#define ARIZONA_AIF3TX_LRCLK_SRC 0x0008 /* AIF3TX_LRCLK_SRC */ +#define ARIZONA_AIF3TX_LRCLK_SRC_MASK 0x0008 /* AIF3TX_LRCLK_SRC */ +#define ARIZONA_AIF3TX_LRCLK_SRC_SHIFT 3 /* AIF3TX_LRCLK_SRC */ +#define ARIZONA_AIF3TX_LRCLK_SRC_WIDTH 1 /* AIF3TX_LRCLK_SRC */ +#define ARIZONA_AIF3TX_LRCLK_INV 0x0004 /* AIF3TX_LRCLK_INV */ +#define ARIZONA_AIF3TX_LRCLK_INV_MASK 0x0004 /* AIF3TX_LRCLK_INV */ +#define ARIZONA_AIF3TX_LRCLK_INV_SHIFT 2 /* AIF3TX_LRCLK_INV */ +#define ARIZONA_AIF3TX_LRCLK_INV_WIDTH 1 /* AIF3TX_LRCLK_INV */ +#define ARIZONA_AIF3TX_LRCLK_FRC 0x0002 /* AIF3TX_LRCLK_FRC */ +#define ARIZONA_AIF3TX_LRCLK_FRC_MASK 0x0002 /* AIF3TX_LRCLK_FRC */ +#define ARIZONA_AIF3TX_LRCLK_FRC_SHIFT 1 /* AIF3TX_LRCLK_FRC */ +#define ARIZONA_AIF3TX_LRCLK_FRC_WIDTH 1 /* AIF3TX_LRCLK_FRC */ +#define ARIZONA_AIF3TX_LRCLK_MSTR 0x0001 /* AIF3TX_LRCLK_MSTR */ +#define ARIZONA_AIF3TX_LRCLK_MSTR_MASK 0x0001 /* AIF3TX_LRCLK_MSTR */ +#define ARIZONA_AIF3TX_LRCLK_MSTR_SHIFT 0 /* AIF3TX_LRCLK_MSTR */ +#define ARIZONA_AIF3TX_LRCLK_MSTR_WIDTH 1 /* AIF3TX_LRCLK_MSTR */ + +/* + * R1410 (0x582) - AIF3 Rx Pin Ctrl + */ +#define ARIZONA_AIF3RX_LRCLK_INV 0x0004 /* AIF3RX_LRCLK_INV */ +#define ARIZONA_AIF3RX_LRCLK_INV_MASK 0x0004 /* AIF3RX_LRCLK_INV */ +#define ARIZONA_AIF3RX_LRCLK_INV_SHIFT 2 /* AIF3RX_LRCLK_INV */ +#define ARIZONA_AIF3RX_LRCLK_INV_WIDTH 1 /* AIF3RX_LRCLK_INV */ +#define ARIZONA_AIF3RX_LRCLK_FRC 0x0002 /* AIF3RX_LRCLK_FRC */ +#define ARIZONA_AIF3RX_LRCLK_FRC_MASK 0x0002 /* AIF3RX_LRCLK_FRC */ +#define ARIZONA_AIF3RX_LRCLK_FRC_SHIFT 1 /* AIF3RX_LRCLK_FRC */ +#define ARIZONA_AIF3RX_LRCLK_FRC_WIDTH 1 /* AIF3RX_LRCLK_FRC */ +#define ARIZONA_AIF3RX_LRCLK_MSTR 0x0001 /* AIF3RX_LRCLK_MSTR */ +#define ARIZONA_AIF3RX_LRCLK_MSTR_MASK 0x0001 /* AIF3RX_LRCLK_MSTR */ +#define ARIZONA_AIF3RX_LRCLK_MSTR_SHIFT 0 /* AIF3RX_LRCLK_MSTR */ +#define ARIZONA_AIF3RX_LRCLK_MSTR_WIDTH 1 /* AIF3RX_LRCLK_MSTR */ + +/* + * R1411 (0x583) - AIF3 Rate Ctrl + */ +#define ARIZONA_AIF3_RATE_MASK 0x7800 /* AIF3_RATE - [14:11] */ +#define ARIZONA_AIF3_RATE_SHIFT 11 /* AIF3_RATE - [14:11] */ +#define ARIZONA_AIF3_RATE_WIDTH 4 /* AIF3_RATE - [14:11] */ +#define ARIZONA_AIF3_TRI 0x0040 /* AIF3_TRI */ +#define ARIZONA_AIF3_TRI_MASK 0x0040 /* AIF3_TRI */ +#define ARIZONA_AIF3_TRI_SHIFT 6 /* AIF3_TRI */ +#define ARIZONA_AIF3_TRI_WIDTH 1 /* AIF3_TRI */ + +/* + * R1412 (0x584) - AIF3 Format + */ +#define ARIZONA_AIF3_FMT_MASK 0x0007 /* AIF3_FMT - [2:0] */ +#define ARIZONA_AIF3_FMT_SHIFT 0 /* AIF3_FMT - [2:0] */ +#define ARIZONA_AIF3_FMT_WIDTH 3 /* AIF3_FMT - [2:0] */ + +/* + * R1413 (0x585) - AIF3 Tx BCLK Rate + */ +#define ARIZONA_AIF3TX_BCPF_MASK 0x1FFF /* AIF3TX_BCPF - [12:0] */ +#define ARIZONA_AIF3TX_BCPF_SHIFT 0 /* AIF3TX_BCPF - [12:0] */ +#define ARIZONA_AIF3TX_BCPF_WIDTH 13 /* AIF3TX_BCPF - [12:0] */ + +/* + * R1414 (0x586) - AIF3 Rx BCLK Rate + */ +#define ARIZONA_AIF3RX_BCPF_MASK 0x1FFF /* AIF3RX_BCPF - [12:0] */ +#define ARIZONA_AIF3RX_BCPF_SHIFT 0 /* AIF3RX_BCPF - [12:0] */ +#define ARIZONA_AIF3RX_BCPF_WIDTH 13 /* AIF3RX_BCPF - [12:0] */ + +/* + * R1415 (0x587) - AIF3 Frame Ctrl 1 + */ +#define ARIZONA_AIF3TX_WL_MASK 0x3F00 /* AIF3TX_WL - [13:8] */ +#define ARIZONA_AIF3TX_WL_SHIFT 8 /* AIF3TX_WL - [13:8] */ +#define ARIZONA_AIF3TX_WL_WIDTH 6 /* AIF3TX_WL - [13:8] */ +#define ARIZONA_AIF3TX_SLOT_LEN_MASK 0x00FF /* AIF3TX_SLOT_LEN - [7:0] */ +#define ARIZONA_AIF3TX_SLOT_LEN_SHIFT 0 /* AIF3TX_SLOT_LEN - [7:0] */ +#define ARIZONA_AIF3TX_SLOT_LEN_WIDTH 8 /* AIF3TX_SLOT_LEN - [7:0] */ + +/* + * R1416 (0x588) - AIF3 Frame Ctrl 2 + */ +#define ARIZONA_AIF3RX_WL_MASK 0x3F00 /* AIF3RX_WL - [13:8] */ +#define ARIZONA_AIF3RX_WL_SHIFT 8 /* AIF3RX_WL - [13:8] */ +#define ARIZONA_AIF3RX_WL_WIDTH 6 /* AIF3RX_WL - [13:8] */ +#define ARIZONA_AIF3RX_SLOT_LEN_MASK 0x00FF /* AIF3RX_SLOT_LEN - [7:0] */ +#define ARIZONA_AIF3RX_SLOT_LEN_SHIFT 0 /* AIF3RX_SLOT_LEN - [7:0] */ +#define ARIZONA_AIF3RX_SLOT_LEN_WIDTH 8 /* AIF3RX_SLOT_LEN - [7:0] */ + +/* + * R1417 (0x589) - AIF3 Frame Ctrl 3 + */ +#define ARIZONA_AIF3TX1_SLOT_MASK 0x003F /* AIF3TX1_SLOT - [5:0] */ +#define ARIZONA_AIF3TX1_SLOT_SHIFT 0 /* AIF3TX1_SLOT - [5:0] */ +#define ARIZONA_AIF3TX1_SLOT_WIDTH 6 /* AIF3TX1_SLOT - [5:0] */ + +/* + * R1418 (0x58A) - AIF3 Frame Ctrl 4 + */ +#define ARIZONA_AIF3TX2_SLOT_MASK 0x003F /* AIF3TX2_SLOT - [5:0] */ +#define ARIZONA_AIF3TX2_SLOT_SHIFT 0 /* AIF3TX2_SLOT - [5:0] */ +#define ARIZONA_AIF3TX2_SLOT_WIDTH 6 /* AIF3TX2_SLOT - [5:0] */ + +/* + * R1425 (0x591) - AIF3 Frame Ctrl 11 + */ +#define ARIZONA_AIF3RX1_SLOT_MASK 0x003F /* AIF3RX1_SLOT - [5:0] */ +#define ARIZONA_AIF3RX1_SLOT_SHIFT 0 /* AIF3RX1_SLOT - [5:0] */ +#define ARIZONA_AIF3RX1_SLOT_WIDTH 6 /* AIF3RX1_SLOT - [5:0] */ + +/* + * R1426 (0x592) - AIF3 Frame Ctrl 12 + */ +#define ARIZONA_AIF3RX2_SLOT_MASK 0x003F /* AIF3RX2_SLOT - [5:0] */ +#define ARIZONA_AIF3RX2_SLOT_SHIFT 0 /* AIF3RX2_SLOT - [5:0] */ +#define ARIZONA_AIF3RX2_SLOT_WIDTH 6 /* AIF3RX2_SLOT - [5:0] */ + +/* + * R1433 (0x599) - AIF3 Tx Enables + */ +#define ARIZONA_AIF3TX2_ENA 0x0002 /* AIF3TX2_ENA */ +#define ARIZONA_AIF3TX2_ENA_MASK 0x0002 /* AIF3TX2_ENA */ +#define ARIZONA_AIF3TX2_ENA_SHIFT 1 /* AIF3TX2_ENA */ +#define ARIZONA_AIF3TX2_ENA_WIDTH 1 /* AIF3TX2_ENA */ +#define ARIZONA_AIF3TX1_ENA 0x0001 /* AIF3TX1_ENA */ +#define ARIZONA_AIF3TX1_ENA_MASK 0x0001 /* AIF3TX1_ENA */ +#define ARIZONA_AIF3TX1_ENA_SHIFT 0 /* AIF3TX1_ENA */ +#define ARIZONA_AIF3TX1_ENA_WIDTH 1 /* AIF3TX1_ENA */ + +/* + * R1434 (0x59A) - AIF3 Rx Enables + */ +#define ARIZONA_AIF3RX2_ENA 0x0002 /* AIF3RX2_ENA */ +#define ARIZONA_AIF3RX2_ENA_MASK 0x0002 /* AIF3RX2_ENA */ +#define ARIZONA_AIF3RX2_ENA_SHIFT 1 /* AIF3RX2_ENA */ +#define ARIZONA_AIF3RX2_ENA_WIDTH 1 /* AIF3RX2_ENA */ +#define ARIZONA_AIF3RX1_ENA 0x0001 /* AIF3RX1_ENA */ +#define ARIZONA_AIF3RX1_ENA_MASK 0x0001 /* AIF3RX1_ENA */ +#define ARIZONA_AIF3RX1_ENA_SHIFT 0 /* AIF3RX1_ENA */ +#define ARIZONA_AIF3RX1_ENA_WIDTH 1 /* AIF3RX1_ENA */ + +/* + * R1435 (0x59B) - AIF3 Force Write + */ +#define ARIZONA_AIF3_FRC_WR 0x0001 /* AIF3_FRC_WR */ +#define ARIZONA_AIF3_FRC_WR_MASK 0x0001 /* AIF3_FRC_WR */ +#define ARIZONA_AIF3_FRC_WR_SHIFT 0 /* AIF3_FRC_WR */ +#define ARIZONA_AIF3_FRC_WR_WIDTH 1 /* AIF3_FRC_WR */ + +/* + * R1474 (0x5C2) - SPD1 TX Control + */ +#define ARIZONA_SPD1_VAL2 0x2000 /* SPD1_VAL2 */ +#define ARIZONA_SPD1_VAL2_MASK 0x2000 /* SPD1_VAL2 */ +#define ARIZONA_SPD1_VAL2_SHIFT 13 /* SPD1_VAL2 */ +#define ARIZONA_SPD1_VAL2_WIDTH 1 /* SPD1_VAL2 */ +#define ARIZONA_SPD1_VAL1 0x1000 /* SPD1_VAL1 */ +#define ARIZONA_SPD1_VAL1_MASK 0x1000 /* SPD1_VAL1 */ +#define ARIZONA_SPD1_VAL1_SHIFT 12 /* SPD1_VAL1 */ +#define ARIZONA_SPD1_VAL1_WIDTH 1 /* SPD1_VAL1 */ +#define ARIZONA_SPD1_RATE_MASK 0x00F0 /* SPD1_RATE */ +#define ARIZONA_SPD1_RATE_SHIFT 4 /* SPD1_RATE */ +#define ARIZONA_SPD1_RATE_WIDTH 4 /* SPD1_RATE */ +#define ARIZONA_SPD1_ENA 0x0001 /* SPD1_ENA */ +#define ARIZONA_SPD1_ENA_MASK 0x0001 /* SPD1_ENA */ +#define ARIZONA_SPD1_ENA_SHIFT 0 /* SPD1_ENA */ +#define ARIZONA_SPD1_ENA_WIDTH 1 /* SPD1_ENA */ + +/* + * R1475 (0x5C3) - SPD1 TX Channel Status 1 + */ +#define ARIZONA_SPD1_CATCODE_MASK 0xFF00 /* SPD1_CATCODE */ +#define ARIZONA_SPD1_CATCODE_SHIFT 8 /* SPD1_CATCODE */ +#define ARIZONA_SPD1_CATCODE_WIDTH 8 /* SPD1_CATCODE */ +#define ARIZONA_SPD1_CHSTMODE_MASK 0x00C0 /* SPD1_CHSTMODE */ +#define ARIZONA_SPD1_CHSTMODE_SHIFT 6 /* SPD1_CHSTMODE */ +#define ARIZONA_SPD1_CHSTMODE_WIDTH 2 /* SPD1_CHSTMODE */ +#define ARIZONA_SPD1_PREEMPH_MASK 0x0038 /* SPD1_PREEMPH */ +#define ARIZONA_SPD1_PREEMPH_SHIFT 3 /* SPD1_PREEMPH */ +#define ARIZONA_SPD1_PREEMPH_WIDTH 3 /* SPD1_PREEMPH */ +#define ARIZONA_SPD1_NOCOPY 0x0004 /* SPD1_NOCOPY */ +#define ARIZONA_SPD1_NOCOPY_MASK 0x0004 /* SPD1_NOCOPY */ +#define ARIZONA_SPD1_NOCOPY_SHIFT 2 /* SPD1_NOCOPY */ +#define ARIZONA_SPD1_NOCOPY_WIDTH 1 /* SPD1_NOCOPY */ +#define ARIZONA_SPD1_NOAUDIO 0x0002 /* SPD1_NOAUDIO */ +#define ARIZONA_SPD1_NOAUDIO_MASK 0x0002 /* SPD1_NOAUDIO */ +#define ARIZONA_SPD1_NOAUDIO_SHIFT 1 /* SPD1_NOAUDIO */ +#define ARIZONA_SPD1_NOAUDIO_WIDTH 1 /* SPD1_NOAUDIO */ +#define ARIZONA_SPD1_PRO 0x0001 /* SPD1_PRO */ +#define ARIZONA_SPD1_PRO_MASK 0x0001 /* SPD1_PRO */ +#define ARIZONA_SPD1_PRO_SHIFT 0 /* SPD1_PRO */ +#define ARIZONA_SPD1_PRO_WIDTH 1 /* SPD1_PRO */ + +/* + * R1475 (0x5C4) - SPD1 TX Channel Status 2 + */ +#define ARIZONA_SPD1_FREQ_MASK 0xF000 /* SPD1_FREQ */ +#define ARIZONA_SPD1_FREQ_SHIFT 12 /* SPD1_FREQ */ +#define ARIZONA_SPD1_FREQ_WIDTH 4 /* SPD1_FREQ */ +#define ARIZONA_SPD1_CHNUM2_MASK 0x0F00 /* SPD1_CHNUM2 */ +#define ARIZONA_SPD1_CHNUM2_SHIFT 8 /* SPD1_CHNUM2 */ +#define ARIZONA_SPD1_CHNUM2_WIDTH 4 /* SPD1_CHNUM2 */ +#define ARIZONA_SPD1_CHNUM1_MASK 0x00F0 /* SPD1_CHNUM1 */ +#define ARIZONA_SPD1_CHNUM1_SHIFT 4 /* SPD1_CHNUM1 */ +#define ARIZONA_SPD1_CHNUM1_WIDTH 4 /* SPD1_CHNUM1 */ +#define ARIZONA_SPD1_SRCNUM_MASK 0x000F /* SPD1_SRCNUM */ +#define ARIZONA_SPD1_SRCNUM_SHIFT 0 /* SPD1_SRCNUM */ +#define ARIZONA_SPD1_SRCNUM_WIDTH 4 /* SPD1_SRCNUM */ + +/* + * R1475 (0x5C5) - SPD1 TX Channel Status 3 + */ +#define ARIZONA_SPD1_ORGSAMP_MASK 0x0F00 /* SPD1_ORGSAMP */ +#define ARIZONA_SPD1_ORGSAMP_SHIFT 8 /* SPD1_ORGSAMP */ +#define ARIZONA_SPD1_ORGSAMP_WIDTH 4 /* SPD1_ORGSAMP */ +#define ARIZONA_SPD1_TXWL_MASK 0x00E0 /* SPD1_TXWL */ +#define ARIZONA_SPD1_TXWL_SHIFT 5 /* SPD1_TXWL */ +#define ARIZONA_SPD1_TXWL_WIDTH 3 /* SPD1_TXWL */ +#define ARIZONA_SPD1_MAXWL 0x0010 /* SPD1_MAXWL */ +#define ARIZONA_SPD1_MAXWL_MASK 0x0010 /* SPD1_MAXWL */ +#define ARIZONA_SPD1_MAXWL_SHIFT 4 /* SPD1_MAXWL */ +#define ARIZONA_SPD1_MAXWL_WIDTH 1 /* SPD1_MAXWL */ +#define ARIZONA_SPD1_CS31_30_MASK 0x000C /* SPD1_CS31_30 */ +#define ARIZONA_SPD1_CS31_30_SHIFT 2 /* SPD1_CS31_30 */ +#define ARIZONA_SPD1_CS31_30_WIDTH 2 /* SPD1_CS31_30 */ +#define ARIZONA_SPD1_CLKACU_MASK 0x0003 /* SPD1_CLKACU */ +#define ARIZONA_SPD1_CLKACU_SHIFT 2 /* SPD1_CLKACU */ +#define ARIZONA_SPD1_CLKACU_WIDTH 0 /* SPD1_CLKACU */ + +/* + * R1507 (0x5E3) - SLIMbus Framer Ref Gear + */ +#define ARIZONA_SLIMCLK_SRC 0x0010 /* SLIMCLK_SRC */ +#define ARIZONA_SLIMCLK_SRC_MASK 0x0010 /* SLIMCLK_SRC */ +#define ARIZONA_SLIMCLK_SRC_SHIFT 4 /* SLIMCLK_SRC */ +#define ARIZONA_SLIMCLK_SRC_WIDTH 1 /* SLIMCLK_SRC */ +#define ARIZONA_FRAMER_REF_GEAR_MASK 0x000F /* FRAMER_REF_GEAR - [3:0] */ +#define ARIZONA_FRAMER_REF_GEAR_SHIFT 0 /* FRAMER_REF_GEAR - [3:0] */ +#define ARIZONA_FRAMER_REF_GEAR_WIDTH 4 /* FRAMER_REF_GEAR - [3:0] */ + +/* + * R1509 (0x5E5) - SLIMbus Rates 1 + */ +#define ARIZONA_SLIMRX2_RATE_MASK 0x7800 /* SLIMRX2_RATE - [14:11] */ +#define ARIZONA_SLIMRX2_RATE_SHIFT 11 /* SLIMRX2_RATE - [14:11] */ +#define ARIZONA_SLIMRX2_RATE_WIDTH 4 /* SLIMRX2_RATE - [14:11] */ +#define ARIZONA_SLIMRX1_RATE_MASK 0x0078 /* SLIMRX1_RATE - [6:3] */ +#define ARIZONA_SLIMRX1_RATE_SHIFT 3 /* SLIMRX1_RATE - [6:3] */ +#define ARIZONA_SLIMRX1_RATE_WIDTH 4 /* SLIMRX1_RATE - [6:3] */ + +/* + * R1510 (0x5E6) - SLIMbus Rates 2 + */ +#define ARIZONA_SLIMRX4_RATE_MASK 0x7800 /* SLIMRX4_RATE - [14:11] */ +#define ARIZONA_SLIMRX4_RATE_SHIFT 11 /* SLIMRX4_RATE - [14:11] */ +#define ARIZONA_SLIMRX4_RATE_WIDTH 4 /* SLIMRX4_RATE - [14:11] */ +#define ARIZONA_SLIMRX3_RATE_MASK 0x0078 /* SLIMRX3_RATE - [6:3] */ +#define ARIZONA_SLIMRX3_RATE_SHIFT 3 /* SLIMRX3_RATE - [6:3] */ +#define ARIZONA_SLIMRX3_RATE_WIDTH 4 /* SLIMRX3_RATE - [6:3] */ + +/* + * R1511 (0x5E7) - SLIMbus Rates 3 + */ +#define ARIZONA_SLIMRX6_RATE_MASK 0x7800 /* SLIMRX6_RATE - [14:11] */ +#define ARIZONA_SLIMRX6_RATE_SHIFT 11 /* SLIMRX6_RATE - [14:11] */ +#define ARIZONA_SLIMRX6_RATE_WIDTH 4 /* SLIMRX6_RATE - [14:11] */ +#define ARIZONA_SLIMRX5_RATE_MASK 0x0078 /* SLIMRX5_RATE - [6:3] */ +#define ARIZONA_SLIMRX5_RATE_SHIFT 3 /* SLIMRX5_RATE - [6:3] */ +#define ARIZONA_SLIMRX5_RATE_WIDTH 4 /* SLIMRX5_RATE - [6:3] */ + +/* + * R1512 (0x5E8) - SLIMbus Rates 4 + */ +#define ARIZONA_SLIMRX8_RATE_MASK 0x7800 /* SLIMRX8_RATE - [14:11] */ +#define ARIZONA_SLIMRX8_RATE_SHIFT 11 /* SLIMRX8_RATE - [14:11] */ +#define ARIZONA_SLIMRX8_RATE_WIDTH 4 /* SLIMRX8_RATE - [14:11] */ +#define ARIZONA_SLIMRX7_RATE_MASK 0x0078 /* SLIMRX7_RATE - [6:3] */ +#define ARIZONA_SLIMRX7_RATE_SHIFT 3 /* SLIMRX7_RATE - [6:3] */ +#define ARIZONA_SLIMRX7_RATE_WIDTH 4 /* SLIMRX7_RATE - [6:3] */ + +/* + * R1513 (0x5E9) - SLIMbus Rates 5 + */ +#define ARIZONA_SLIMTX2_RATE_MASK 0x7800 /* SLIMTX2_RATE - [14:11] */ +#define ARIZONA_SLIMTX2_RATE_SHIFT 11 /* SLIMTX2_RATE - [14:11] */ +#define ARIZONA_SLIMTX2_RATE_WIDTH 4 /* SLIMTX2_RATE - [14:11] */ +#define ARIZONA_SLIMTX1_RATE_MASK 0x0078 /* SLIMTX1_RATE - [6:3] */ +#define ARIZONA_SLIMTX1_RATE_SHIFT 3 /* SLIMTX1_RATE - [6:3] */ +#define ARIZONA_SLIMTX1_RATE_WIDTH 4 /* SLIMTX1_RATE - [6:3] */ + +/* + * R1514 (0x5EA) - SLIMbus Rates 6 + */ +#define ARIZONA_SLIMTX4_RATE_MASK 0x7800 /* SLIMTX4_RATE - [14:11] */ +#define ARIZONA_SLIMTX4_RATE_SHIFT 11 /* SLIMTX4_RATE - [14:11] */ +#define ARIZONA_SLIMTX4_RATE_WIDTH 4 /* SLIMTX4_RATE - [14:11] */ +#define ARIZONA_SLIMTX3_RATE_MASK 0x0078 /* SLIMTX3_RATE - [6:3] */ +#define ARIZONA_SLIMTX3_RATE_SHIFT 3 /* SLIMTX3_RATE - [6:3] */ +#define ARIZONA_SLIMTX3_RATE_WIDTH 4 /* SLIMTX3_RATE - [6:3] */ + +/* + * R1515 (0x5EB) - SLIMbus Rates 7 + */ +#define ARIZONA_SLIMTX6_RATE_MASK 0x7800 /* SLIMTX6_RATE - [14:11] */ +#define ARIZONA_SLIMTX6_RATE_SHIFT 11 /* SLIMTX6_RATE - [14:11] */ +#define ARIZONA_SLIMTX6_RATE_WIDTH 4 /* SLIMTX6_RATE - [14:11] */ +#define ARIZONA_SLIMTX5_RATE_MASK 0x0078 /* SLIMTX5_RATE - [6:3] */ +#define ARIZONA_SLIMTX5_RATE_SHIFT 3 /* SLIMTX5_RATE - [6:3] */ +#define ARIZONA_SLIMTX5_RATE_WIDTH 4 /* SLIMTX5_RATE - [6:3] */ + +/* + * R1516 (0x5EC) - SLIMbus Rates 8 + */ +#define ARIZONA_SLIMTX8_RATE_MASK 0x7800 /* SLIMTX8_RATE - [14:11] */ +#define ARIZONA_SLIMTX8_RATE_SHIFT 11 /* SLIMTX8_RATE - [14:11] */ +#define ARIZONA_SLIMTX8_RATE_WIDTH 4 /* SLIMTX8_RATE - [14:11] */ +#define ARIZONA_SLIMTX7_RATE_MASK 0x0078 /* SLIMTX7_RATE - [6:3] */ +#define ARIZONA_SLIMTX7_RATE_SHIFT 3 /* SLIMTX7_RATE - [6:3] */ +#define ARIZONA_SLIMTX7_RATE_WIDTH 4 /* SLIMTX7_RATE - [6:3] */ + +/* + * R1525 (0x5F5) - SLIMbus RX Channel Enable + */ +#define ARIZONA_SLIMRX8_ENA 0x0080 /* SLIMRX8_ENA */ +#define ARIZONA_SLIMRX8_ENA_MASK 0x0080 /* SLIMRX8_ENA */ +#define ARIZONA_SLIMRX8_ENA_SHIFT 7 /* SLIMRX8_ENA */ +#define ARIZONA_SLIMRX8_ENA_WIDTH 1 /* SLIMRX8_ENA */ +#define ARIZONA_SLIMRX7_ENA 0x0040 /* SLIMRX7_ENA */ +#define ARIZONA_SLIMRX7_ENA_MASK 0x0040 /* SLIMRX7_ENA */ +#define ARIZONA_SLIMRX7_ENA_SHIFT 6 /* SLIMRX7_ENA */ +#define ARIZONA_SLIMRX7_ENA_WIDTH 1 /* SLIMRX7_ENA */ +#define ARIZONA_SLIMRX6_ENA 0x0020 /* SLIMRX6_ENA */ +#define ARIZONA_SLIMRX6_ENA_MASK 0x0020 /* SLIMRX6_ENA */ +#define ARIZONA_SLIMRX6_ENA_SHIFT 5 /* SLIMRX6_ENA */ +#define ARIZONA_SLIMRX6_ENA_WIDTH 1 /* SLIMRX6_ENA */ +#define ARIZONA_SLIMRX5_ENA 0x0010 /* SLIMRX5_ENA */ +#define ARIZONA_SLIMRX5_ENA_MASK 0x0010 /* SLIMRX5_ENA */ +#define ARIZONA_SLIMRX5_ENA_SHIFT 4 /* SLIMRX5_ENA */ +#define ARIZONA_SLIMRX5_ENA_WIDTH 1 /* SLIMRX5_ENA */ +#define ARIZONA_SLIMRX4_ENA 0x0008 /* SLIMRX4_ENA */ +#define ARIZONA_SLIMRX4_ENA_MASK 0x0008 /* SLIMRX4_ENA */ +#define ARIZONA_SLIMRX4_ENA_SHIFT 3 /* SLIMRX4_ENA */ +#define ARIZONA_SLIMRX4_ENA_WIDTH 1 /* SLIMRX4_ENA */ +#define ARIZONA_SLIMRX3_ENA 0x0004 /* SLIMRX3_ENA */ +#define ARIZONA_SLIMRX3_ENA_MASK 0x0004 /* SLIMRX3_ENA */ +#define ARIZONA_SLIMRX3_ENA_SHIFT 2 /* SLIMRX3_ENA */ +#define ARIZONA_SLIMRX3_ENA_WIDTH 1 /* SLIMRX3_ENA */ +#define ARIZONA_SLIMRX2_ENA 0x0002 /* SLIMRX2_ENA */ +#define ARIZONA_SLIMRX2_ENA_MASK 0x0002 /* SLIMRX2_ENA */ +#define ARIZONA_SLIMRX2_ENA_SHIFT 1 /* SLIMRX2_ENA */ +#define ARIZONA_SLIMRX2_ENA_WIDTH 1 /* SLIMRX2_ENA */ +#define ARIZONA_SLIMRX1_ENA 0x0001 /* SLIMRX1_ENA */ +#define ARIZONA_SLIMRX1_ENA_MASK 0x0001 /* SLIMRX1_ENA */ +#define ARIZONA_SLIMRX1_ENA_SHIFT 0 /* SLIMRX1_ENA */ +#define ARIZONA_SLIMRX1_ENA_WIDTH 1 /* SLIMRX1_ENA */ + +/* + * R1526 (0x5F6) - SLIMbus TX Channel Enable + */ +#define ARIZONA_SLIMTX8_ENA 0x0080 /* SLIMTX8_ENA */ +#define ARIZONA_SLIMTX8_ENA_MASK 0x0080 /* SLIMTX8_ENA */ +#define ARIZONA_SLIMTX8_ENA_SHIFT 7 /* SLIMTX8_ENA */ +#define ARIZONA_SLIMTX8_ENA_WIDTH 1 /* SLIMTX8_ENA */ +#define ARIZONA_SLIMTX7_ENA 0x0040 /* SLIMTX7_ENA */ +#define ARIZONA_SLIMTX7_ENA_MASK 0x0040 /* SLIMTX7_ENA */ +#define ARIZONA_SLIMTX7_ENA_SHIFT 6 /* SLIMTX7_ENA */ +#define ARIZONA_SLIMTX7_ENA_WIDTH 1 /* SLIMTX7_ENA */ +#define ARIZONA_SLIMTX6_ENA 0x0020 /* SLIMTX6_ENA */ +#define ARIZONA_SLIMTX6_ENA_MASK 0x0020 /* SLIMTX6_ENA */ +#define ARIZONA_SLIMTX6_ENA_SHIFT 5 /* SLIMTX6_ENA */ +#define ARIZONA_SLIMTX6_ENA_WIDTH 1 /* SLIMTX6_ENA */ +#define ARIZONA_SLIMTX5_ENA 0x0010 /* SLIMTX5_ENA */ +#define ARIZONA_SLIMTX5_ENA_MASK 0x0010 /* SLIMTX5_ENA */ +#define ARIZONA_SLIMTX5_ENA_SHIFT 4 /* SLIMTX5_ENA */ +#define ARIZONA_SLIMTX5_ENA_WIDTH 1 /* SLIMTX5_ENA */ +#define ARIZONA_SLIMTX4_ENA 0x0008 /* SLIMTX4_ENA */ +#define ARIZONA_SLIMTX4_ENA_MASK 0x0008 /* SLIMTX4_ENA */ +#define ARIZONA_SLIMTX4_ENA_SHIFT 3 /* SLIMTX4_ENA */ +#define ARIZONA_SLIMTX4_ENA_WIDTH 1 /* SLIMTX4_ENA */ +#define ARIZONA_SLIMTX3_ENA 0x0004 /* SLIMTX3_ENA */ +#define ARIZONA_SLIMTX3_ENA_MASK 0x0004 /* SLIMTX3_ENA */ +#define ARIZONA_SLIMTX3_ENA_SHIFT 2 /* SLIMTX3_ENA */ +#define ARIZONA_SLIMTX3_ENA_WIDTH 1 /* SLIMTX3_ENA */ +#define ARIZONA_SLIMTX2_ENA 0x0002 /* SLIMTX2_ENA */ +#define ARIZONA_SLIMTX2_ENA_MASK 0x0002 /* SLIMTX2_ENA */ +#define ARIZONA_SLIMTX2_ENA_SHIFT 1 /* SLIMTX2_ENA */ +#define ARIZONA_SLIMTX2_ENA_WIDTH 1 /* SLIMTX2_ENA */ +#define ARIZONA_SLIMTX1_ENA 0x0001 /* SLIMTX1_ENA */ +#define ARIZONA_SLIMTX1_ENA_MASK 0x0001 /* SLIMTX1_ENA */ +#define ARIZONA_SLIMTX1_ENA_SHIFT 0 /* SLIMTX1_ENA */ +#define ARIZONA_SLIMTX1_ENA_WIDTH 1 /* SLIMTX1_ENA */ + +/* + * R1527 (0x5F7) - SLIMbus RX Port Status + */ +#define ARIZONA_SLIMRX8_PORT_STS 0x0080 /* SLIMRX8_PORT_STS */ +#define ARIZONA_SLIMRX8_PORT_STS_MASK 0x0080 /* SLIMRX8_PORT_STS */ +#define ARIZONA_SLIMRX8_PORT_STS_SHIFT 7 /* SLIMRX8_PORT_STS */ +#define ARIZONA_SLIMRX8_PORT_STS_WIDTH 1 /* SLIMRX8_PORT_STS */ +#define ARIZONA_SLIMRX7_PORT_STS 0x0040 /* SLIMRX7_PORT_STS */ +#define ARIZONA_SLIMRX7_PORT_STS_MASK 0x0040 /* SLIMRX7_PORT_STS */ +#define ARIZONA_SLIMRX7_PORT_STS_SHIFT 6 /* SLIMRX7_PORT_STS */ +#define ARIZONA_SLIMRX7_PORT_STS_WIDTH 1 /* SLIMRX7_PORT_STS */ +#define ARIZONA_SLIMRX6_PORT_STS 0x0020 /* SLIMRX6_PORT_STS */ +#define ARIZONA_SLIMRX6_PORT_STS_MASK 0x0020 /* SLIMRX6_PORT_STS */ +#define ARIZONA_SLIMRX6_PORT_STS_SHIFT 5 /* SLIMRX6_PORT_STS */ +#define ARIZONA_SLIMRX6_PORT_STS_WIDTH 1 /* SLIMRX6_PORT_STS */ +#define ARIZONA_SLIMRX5_PORT_STS 0x0010 /* SLIMRX5_PORT_STS */ +#define ARIZONA_SLIMRX5_PORT_STS_MASK 0x0010 /* SLIMRX5_PORT_STS */ +#define ARIZONA_SLIMRX5_PORT_STS_SHIFT 4 /* SLIMRX5_PORT_STS */ +#define ARIZONA_SLIMRX5_PORT_STS_WIDTH 1 /* SLIMRX5_PORT_STS */ +#define ARIZONA_SLIMRX4_PORT_STS 0x0008 /* SLIMRX4_PORT_STS */ +#define ARIZONA_SLIMRX4_PORT_STS_MASK 0x0008 /* SLIMRX4_PORT_STS */ +#define ARIZONA_SLIMRX4_PORT_STS_SHIFT 3 /* SLIMRX4_PORT_STS */ +#define ARIZONA_SLIMRX4_PORT_STS_WIDTH 1 /* SLIMRX4_PORT_STS */ +#define ARIZONA_SLIMRX3_PORT_STS 0x0004 /* SLIMRX3_PORT_STS */ +#define ARIZONA_SLIMRX3_PORT_STS_MASK 0x0004 /* SLIMRX3_PORT_STS */ +#define ARIZONA_SLIMRX3_PORT_STS_SHIFT 2 /* SLIMRX3_PORT_STS */ +#define ARIZONA_SLIMRX3_PORT_STS_WIDTH 1 /* SLIMRX3_PORT_STS */ +#define ARIZONA_SLIMRX2_PORT_STS 0x0002 /* SLIMRX2_PORT_STS */ +#define ARIZONA_SLIMRX2_PORT_STS_MASK 0x0002 /* SLIMRX2_PORT_STS */ +#define ARIZONA_SLIMRX2_PORT_STS_SHIFT 1 /* SLIMRX2_PORT_STS */ +#define ARIZONA_SLIMRX2_PORT_STS_WIDTH 1 /* SLIMRX2_PORT_STS */ +#define ARIZONA_SLIMRX1_PORT_STS 0x0001 /* SLIMRX1_PORT_STS */ +#define ARIZONA_SLIMRX1_PORT_STS_MASK 0x0001 /* SLIMRX1_PORT_STS */ +#define ARIZONA_SLIMRX1_PORT_STS_SHIFT 0 /* SLIMRX1_PORT_STS */ +#define ARIZONA_SLIMRX1_PORT_STS_WIDTH 1 /* SLIMRX1_PORT_STS */ + +/* + * R1528 (0x5F8) - SLIMbus TX Port Status + */ +#define ARIZONA_SLIMTX8_PORT_STS 0x0080 /* SLIMTX8_PORT_STS */ +#define ARIZONA_SLIMTX8_PORT_STS_MASK 0x0080 /* SLIMTX8_PORT_STS */ +#define ARIZONA_SLIMTX8_PORT_STS_SHIFT 7 /* SLIMTX8_PORT_STS */ +#define ARIZONA_SLIMTX8_PORT_STS_WIDTH 1 /* SLIMTX8_PORT_STS */ +#define ARIZONA_SLIMTX7_PORT_STS 0x0040 /* SLIMTX7_PORT_STS */ +#define ARIZONA_SLIMTX7_PORT_STS_MASK 0x0040 /* SLIMTX7_PORT_STS */ +#define ARIZONA_SLIMTX7_PORT_STS_SHIFT 6 /* SLIMTX7_PORT_STS */ +#define ARIZONA_SLIMTX7_PORT_STS_WIDTH 1 /* SLIMTX7_PORT_STS */ +#define ARIZONA_SLIMTX6_PORT_STS 0x0020 /* SLIMTX6_PORT_STS */ +#define ARIZONA_SLIMTX6_PORT_STS_MASK 0x0020 /* SLIMTX6_PORT_STS */ +#define ARIZONA_SLIMTX6_PORT_STS_SHIFT 5 /* SLIMTX6_PORT_STS */ +#define ARIZONA_SLIMTX6_PORT_STS_WIDTH 1 /* SLIMTX6_PORT_STS */ +#define ARIZONA_SLIMTX5_PORT_STS 0x0010 /* SLIMTX5_PORT_STS */ +#define ARIZONA_SLIMTX5_PORT_STS_MASK 0x0010 /* SLIMTX5_PORT_STS */ +#define ARIZONA_SLIMTX5_PORT_STS_SHIFT 4 /* SLIMTX5_PORT_STS */ +#define ARIZONA_SLIMTX5_PORT_STS_WIDTH 1 /* SLIMTX5_PORT_STS */ +#define ARIZONA_SLIMTX4_PORT_STS 0x0008 /* SLIMTX4_PORT_STS */ +#define ARIZONA_SLIMTX4_PORT_STS_MASK 0x0008 /* SLIMTX4_PORT_STS */ +#define ARIZONA_SLIMTX4_PORT_STS_SHIFT 3 /* SLIMTX4_PORT_STS */ +#define ARIZONA_SLIMTX4_PORT_STS_WIDTH 1 /* SLIMTX4_PORT_STS */ +#define ARIZONA_SLIMTX3_PORT_STS 0x0004 /* SLIMTX3_PORT_STS */ +#define ARIZONA_SLIMTX3_PORT_STS_MASK 0x0004 /* SLIMTX3_PORT_STS */ +#define ARIZONA_SLIMTX3_PORT_STS_SHIFT 2 /* SLIMTX3_PORT_STS */ +#define ARIZONA_SLIMTX3_PORT_STS_WIDTH 1 /* SLIMTX3_PORT_STS */ +#define ARIZONA_SLIMTX2_PORT_STS 0x0002 /* SLIMTX2_PORT_STS */ +#define ARIZONA_SLIMTX2_PORT_STS_MASK 0x0002 /* SLIMTX2_PORT_STS */ +#define ARIZONA_SLIMTX2_PORT_STS_SHIFT 1 /* SLIMTX2_PORT_STS */ +#define ARIZONA_SLIMTX2_PORT_STS_WIDTH 1 /* SLIMTX2_PORT_STS */ +#define ARIZONA_SLIMTX1_PORT_STS 0x0001 /* SLIMTX1_PORT_STS */ +#define ARIZONA_SLIMTX1_PORT_STS_MASK 0x0001 /* SLIMTX1_PORT_STS */ +#define ARIZONA_SLIMTX1_PORT_STS_SHIFT 0 /* SLIMTX1_PORT_STS */ +#define ARIZONA_SLIMTX1_PORT_STS_WIDTH 1 /* SLIMTX1_PORT_STS */ + +/* + * R3087 (0xC0F) - IRQ CTRL 1 + */ +#define ARIZONA_IRQ_POL 0x0400 /* IRQ_POL */ +#define ARIZONA_IRQ_POL_MASK 0x0400 /* IRQ_POL */ +#define ARIZONA_IRQ_POL_SHIFT 10 /* IRQ_POL */ +#define ARIZONA_IRQ_POL_WIDTH 1 /* IRQ_POL */ +#define ARIZONA_IRQ_OP_CFG 0x0200 /* IRQ_OP_CFG */ +#define ARIZONA_IRQ_OP_CFG_MASK 0x0200 /* IRQ_OP_CFG */ +#define ARIZONA_IRQ_OP_CFG_SHIFT 9 /* IRQ_OP_CFG */ +#define ARIZONA_IRQ_OP_CFG_WIDTH 1 /* IRQ_OP_CFG */ + +/* + * R3088 (0xC10) - GPIO Debounce Config + */ +#define ARIZONA_GP_DBTIME_MASK 0xF000 /* GP_DBTIME - [15:12] */ +#define ARIZONA_GP_DBTIME_SHIFT 12 /* GP_DBTIME - [15:12] */ +#define ARIZONA_GP_DBTIME_WIDTH 4 /* GP_DBTIME - [15:12] */ + +/* + * R3096 (0xC18) - GP Switch 1 + */ +#define ARIZONA_SW1_MODE_MASK 0x0003 /* SW1_MODE - [1:0] */ +#define ARIZONA_SW1_MODE_SHIFT 0 /* SW1_MODE - [1:0] */ +#define ARIZONA_SW1_MODE_WIDTH 2 /* SW1_MODE - [1:0] */ + +/* + * R3104 (0xC20) - Misc Pad Ctrl 1 + */ +#define ARIZONA_LDO1ENA_PD 0x8000 /* LDO1ENA_PD */ +#define ARIZONA_LDO1ENA_PD_MASK 0x8000 /* LDO1ENA_PD */ +#define ARIZONA_LDO1ENA_PD_SHIFT 15 /* LDO1ENA_PD */ +#define ARIZONA_LDO1ENA_PD_WIDTH 1 /* LDO1ENA_PD */ +#define ARIZONA_MCLK2_PD 0x2000 /* MCLK2_PD */ +#define ARIZONA_MCLK2_PD_MASK 0x2000 /* MCLK2_PD */ +#define ARIZONA_MCLK2_PD_SHIFT 13 /* MCLK2_PD */ +#define ARIZONA_MCLK2_PD_WIDTH 1 /* MCLK2_PD */ +#define ARIZONA_RSTB_PU 0x0002 /* RSTB_PU */ +#define ARIZONA_RSTB_PU_MASK 0x0002 /* RSTB_PU */ +#define ARIZONA_RSTB_PU_SHIFT 1 /* RSTB_PU */ +#define ARIZONA_RSTB_PU_WIDTH 1 /* RSTB_PU */ + +/* + * R3105 (0xC21) - Misc Pad Ctrl 2 + */ +#define ARIZONA_MCLK1_PD 0x1000 /* MCLK1_PD */ +#define ARIZONA_MCLK1_PD_MASK 0x1000 /* MCLK1_PD */ +#define ARIZONA_MCLK1_PD_SHIFT 12 /* MCLK1_PD */ +#define ARIZONA_MCLK1_PD_WIDTH 1 /* MCLK1_PD */ +#define ARIZONA_MICD_PD 0x0100 /* MICD_PD */ +#define ARIZONA_MICD_PD_MASK 0x0100 /* MICD_PD */ +#define ARIZONA_MICD_PD_SHIFT 8 /* MICD_PD */ +#define ARIZONA_MICD_PD_WIDTH 1 /* MICD_PD */ +#define ARIZONA_ADDR_PD 0x0001 /* ADDR_PD */ +#define ARIZONA_ADDR_PD_MASK 0x0001 /* ADDR_PD */ +#define ARIZONA_ADDR_PD_SHIFT 0 /* ADDR_PD */ +#define ARIZONA_ADDR_PD_WIDTH 1 /* ADDR_PD */ + +/* + * R3106 (0xC22) - Misc Pad Ctrl 3 + */ +#define ARIZONA_DMICDAT4_PD 0x0008 /* DMICDAT4_PD */ +#define ARIZONA_DMICDAT4_PD_MASK 0x0008 /* DMICDAT4_PD */ +#define ARIZONA_DMICDAT4_PD_SHIFT 3 /* DMICDAT4_PD */ +#define ARIZONA_DMICDAT4_PD_WIDTH 1 /* DMICDAT4_PD */ +#define ARIZONA_DMICDAT3_PD 0x0004 /* DMICDAT3_PD */ +#define ARIZONA_DMICDAT3_PD_MASK 0x0004 /* DMICDAT3_PD */ +#define ARIZONA_DMICDAT3_PD_SHIFT 2 /* DMICDAT3_PD */ +#define ARIZONA_DMICDAT3_PD_WIDTH 1 /* DMICDAT3_PD */ +#define ARIZONA_DMICDAT2_PD 0x0002 /* DMICDAT2_PD */ +#define ARIZONA_DMICDAT2_PD_MASK 0x0002 /* DMICDAT2_PD */ +#define ARIZONA_DMICDAT2_PD_SHIFT 1 /* DMICDAT2_PD */ +#define ARIZONA_DMICDAT2_PD_WIDTH 1 /* DMICDAT2_PD */ +#define ARIZONA_DMICDAT1_PD 0x0001 /* DMICDAT1_PD */ +#define ARIZONA_DMICDAT1_PD_MASK 0x0001 /* DMICDAT1_PD */ +#define ARIZONA_DMICDAT1_PD_SHIFT 0 /* DMICDAT1_PD */ +#define ARIZONA_DMICDAT1_PD_WIDTH 1 /* DMICDAT1_PD */ + +/* + * R3107 (0xC23) - Misc Pad Ctrl 4 + */ +#define ARIZONA_AIF1RXLRCLK_PU 0x0020 /* AIF1RXLRCLK_PU */ +#define ARIZONA_AIF1RXLRCLK_PU_MASK 0x0020 /* AIF1RXLRCLK_PU */ +#define ARIZONA_AIF1RXLRCLK_PU_SHIFT 5 /* AIF1RXLRCLK_PU */ +#define ARIZONA_AIF1RXLRCLK_PU_WIDTH 1 /* AIF1RXLRCLK_PU */ +#define ARIZONA_AIF1RXLRCLK_PD 0x0010 /* AIF1RXLRCLK_PD */ +#define ARIZONA_AIF1RXLRCLK_PD_MASK 0x0010 /* AIF1RXLRCLK_PD */ +#define ARIZONA_AIF1RXLRCLK_PD_SHIFT 4 /* AIF1RXLRCLK_PD */ +#define ARIZONA_AIF1RXLRCLK_PD_WIDTH 1 /* AIF1RXLRCLK_PD */ +#define ARIZONA_AIF1BCLK_PU 0x0008 /* AIF1BCLK_PU */ +#define ARIZONA_AIF1BCLK_PU_MASK 0x0008 /* AIF1BCLK_PU */ +#define ARIZONA_AIF1BCLK_PU_SHIFT 3 /* AIF1BCLK_PU */ +#define ARIZONA_AIF1BCLK_PU_WIDTH 1 /* AIF1BCLK_PU */ +#define ARIZONA_AIF1BCLK_PD 0x0004 /* AIF1BCLK_PD */ +#define ARIZONA_AIF1BCLK_PD_MASK 0x0004 /* AIF1BCLK_PD */ +#define ARIZONA_AIF1BCLK_PD_SHIFT 2 /* AIF1BCLK_PD */ +#define ARIZONA_AIF1BCLK_PD_WIDTH 1 /* AIF1BCLK_PD */ +#define ARIZONA_AIF1RXDAT_PU 0x0002 /* AIF1RXDAT_PU */ +#define ARIZONA_AIF1RXDAT_PU_MASK 0x0002 /* AIF1RXDAT_PU */ +#define ARIZONA_AIF1RXDAT_PU_SHIFT 1 /* AIF1RXDAT_PU */ +#define ARIZONA_AIF1RXDAT_PU_WIDTH 1 /* AIF1RXDAT_PU */ +#define ARIZONA_AIF1RXDAT_PD 0x0001 /* AIF1RXDAT_PD */ +#define ARIZONA_AIF1RXDAT_PD_MASK 0x0001 /* AIF1RXDAT_PD */ +#define ARIZONA_AIF1RXDAT_PD_SHIFT 0 /* AIF1RXDAT_PD */ +#define ARIZONA_AIF1RXDAT_PD_WIDTH 1 /* AIF1RXDAT_PD */ + +/* + * R3108 (0xC24) - Misc Pad Ctrl 5 + */ +#define ARIZONA_AIF2RXLRCLK_PU 0x0020 /* AIF2RXLRCLK_PU */ +#define ARIZONA_AIF2RXLRCLK_PU_MASK 0x0020 /* AIF2RXLRCLK_PU */ +#define ARIZONA_AIF2RXLRCLK_PU_SHIFT 5 /* AIF2RXLRCLK_PU */ +#define ARIZONA_AIF2RXLRCLK_PU_WIDTH 1 /* AIF2RXLRCLK_PU */ +#define ARIZONA_AIF2RXLRCLK_PD 0x0010 /* AIF2RXLRCLK_PD */ +#define ARIZONA_AIF2RXLRCLK_PD_MASK 0x0010 /* AIF2RXLRCLK_PD */ +#define ARIZONA_AIF2RXLRCLK_PD_SHIFT 4 /* AIF2RXLRCLK_PD */ +#define ARIZONA_AIF2RXLRCLK_PD_WIDTH 1 /* AIF2RXLRCLK_PD */ +#define ARIZONA_AIF2BCLK_PU 0x0008 /* AIF2BCLK_PU */ +#define ARIZONA_AIF2BCLK_PU_MASK 0x0008 /* AIF2BCLK_PU */ +#define ARIZONA_AIF2BCLK_PU_SHIFT 3 /* AIF2BCLK_PU */ +#define ARIZONA_AIF2BCLK_PU_WIDTH 1 /* AIF2BCLK_PU */ +#define ARIZONA_AIF2BCLK_PD 0x0004 /* AIF2BCLK_PD */ +#define ARIZONA_AIF2BCLK_PD_MASK 0x0004 /* AIF2BCLK_PD */ +#define ARIZONA_AIF2BCLK_PD_SHIFT 2 /* AIF2BCLK_PD */ +#define ARIZONA_AIF2BCLK_PD_WIDTH 1 /* AIF2BCLK_PD */ +#define ARIZONA_AIF2RXDAT_PU 0x0002 /* AIF2RXDAT_PU */ +#define ARIZONA_AIF2RXDAT_PU_MASK 0x0002 /* AIF2RXDAT_PU */ +#define ARIZONA_AIF2RXDAT_PU_SHIFT 1 /* AIF2RXDAT_PU */ +#define ARIZONA_AIF2RXDAT_PU_WIDTH 1 /* AIF2RXDAT_PU */ +#define ARIZONA_AIF2RXDAT_PD 0x0001 /* AIF2RXDAT_PD */ +#define ARIZONA_AIF2RXDAT_PD_MASK 0x0001 /* AIF2RXDAT_PD */ +#define ARIZONA_AIF2RXDAT_PD_SHIFT 0 /* AIF2RXDAT_PD */ +#define ARIZONA_AIF2RXDAT_PD_WIDTH 1 /* AIF2RXDAT_PD */ + +/* + * R3109 (0xC25) - Misc Pad Ctrl 6 + */ +#define ARIZONA_AIF3RXLRCLK_PU 0x0020 /* AIF3RXLRCLK_PU */ +#define ARIZONA_AIF3RXLRCLK_PU_MASK 0x0020 /* AIF3RXLRCLK_PU */ +#define ARIZONA_AIF3RXLRCLK_PU_SHIFT 5 /* AIF3RXLRCLK_PU */ +#define ARIZONA_AIF3RXLRCLK_PU_WIDTH 1 /* AIF3RXLRCLK_PU */ +#define ARIZONA_AIF3RXLRCLK_PD 0x0010 /* AIF3RXLRCLK_PD */ +#define ARIZONA_AIF3RXLRCLK_PD_MASK 0x0010 /* AIF3RXLRCLK_PD */ +#define ARIZONA_AIF3RXLRCLK_PD_SHIFT 4 /* AIF3RXLRCLK_PD */ +#define ARIZONA_AIF3RXLRCLK_PD_WIDTH 1 /* AIF3RXLRCLK_PD */ +#define ARIZONA_AIF3BCLK_PU 0x0008 /* AIF3BCLK_PU */ +#define ARIZONA_AIF3BCLK_PU_MASK 0x0008 /* AIF3BCLK_PU */ +#define ARIZONA_AIF3BCLK_PU_SHIFT 3 /* AIF3BCLK_PU */ +#define ARIZONA_AIF3BCLK_PU_WIDTH 1 /* AIF3BCLK_PU */ +#define ARIZONA_AIF3BCLK_PD 0x0004 /* AIF3BCLK_PD */ +#define ARIZONA_AIF3BCLK_PD_MASK 0x0004 /* AIF3BCLK_PD */ +#define ARIZONA_AIF3BCLK_PD_SHIFT 2 /* AIF3BCLK_PD */ +#define ARIZONA_AIF3BCLK_PD_WIDTH 1 /* AIF3BCLK_PD */ +#define ARIZONA_AIF3RXDAT_PU 0x0002 /* AIF3RXDAT_PU */ +#define ARIZONA_AIF3RXDAT_PU_MASK 0x0002 /* AIF3RXDAT_PU */ +#define ARIZONA_AIF3RXDAT_PU_SHIFT 1 /* AIF3RXDAT_PU */ +#define ARIZONA_AIF3RXDAT_PU_WIDTH 1 /* AIF3RXDAT_PU */ +#define ARIZONA_AIF3RXDAT_PD 0x0001 /* AIF3RXDAT_PD */ +#define ARIZONA_AIF3RXDAT_PD_MASK 0x0001 /* AIF3RXDAT_PD */ +#define ARIZONA_AIF3RXDAT_PD_SHIFT 0 /* AIF3RXDAT_PD */ +#define ARIZONA_AIF3RXDAT_PD_WIDTH 1 /* AIF3RXDAT_PD */ + +/* + * R3328 (0xD00) - Interrupt Status 1 + */ +#define ARIZONA_GP4_EINT1 0x0008 /* GP4_EINT1 */ +#define ARIZONA_GP4_EINT1_MASK 0x0008 /* GP4_EINT1 */ +#define ARIZONA_GP4_EINT1_SHIFT 3 /* GP4_EINT1 */ +#define ARIZONA_GP4_EINT1_WIDTH 1 /* GP4_EINT1 */ +#define ARIZONA_GP3_EINT1 0x0004 /* GP3_EINT1 */ +#define ARIZONA_GP3_EINT1_MASK 0x0004 /* GP3_EINT1 */ +#define ARIZONA_GP3_EINT1_SHIFT 2 /* GP3_EINT1 */ +#define ARIZONA_GP3_EINT1_WIDTH 1 /* GP3_EINT1 */ +#define ARIZONA_GP2_EINT1 0x0002 /* GP2_EINT1 */ +#define ARIZONA_GP2_EINT1_MASK 0x0002 /* GP2_EINT1 */ +#define ARIZONA_GP2_EINT1_SHIFT 1 /* GP2_EINT1 */ +#define ARIZONA_GP2_EINT1_WIDTH 1 /* GP2_EINT1 */ +#define ARIZONA_GP1_EINT1 0x0001 /* GP1_EINT1 */ +#define ARIZONA_GP1_EINT1_MASK 0x0001 /* GP1_EINT1 */ +#define ARIZONA_GP1_EINT1_SHIFT 0 /* GP1_EINT1 */ +#define ARIZONA_GP1_EINT1_WIDTH 1 /* GP1_EINT1 */ + +/* + * R3329 (0xD01) - Interrupt Status 2 + */ +#define ARIZONA_DSP4_RAM_RDY_EINT1 0x0800 /* DSP4_RAM_RDY_EINT1 */ +#define ARIZONA_DSP4_RAM_RDY_EINT1_MASK 0x0800 /* DSP4_RAM_RDY_EINT1 */ +#define ARIZONA_DSP4_RAM_RDY_EINT1_SHIFT 11 /* DSP4_RAM_RDY_EINT1 */ +#define ARIZONA_DSP4_RAM_RDY_EINT1_WIDTH 1 /* DSP4_RAM_RDY_EINT1 */ +#define ARIZONA_DSP3_RAM_RDY_EINT1 0x0400 /* DSP3_RAM_RDY_EINT1 */ +#define ARIZONA_DSP3_RAM_RDY_EINT1_MASK 0x0400 /* DSP3_RAM_RDY_EINT1 */ +#define ARIZONA_DSP3_RAM_RDY_EINT1_SHIFT 10 /* DSP3_RAM_RDY_EINT1 */ +#define ARIZONA_DSP3_RAM_RDY_EINT1_WIDTH 1 /* DSP3_RAM_RDY_EINT1 */ +#define ARIZONA_DSP2_RAM_RDY_EINT1 0x0200 /* DSP2_RAM_RDY_EINT1 */ +#define ARIZONA_DSP2_RAM_RDY_EINT1_MASK 0x0200 /* DSP2_RAM_RDY_EINT1 */ +#define ARIZONA_DSP2_RAM_RDY_EINT1_SHIFT 9 /* DSP2_RAM_RDY_EINT1 */ +#define ARIZONA_DSP2_RAM_RDY_EINT1_WIDTH 1 /* DSP2_RAM_RDY_EINT1 */ +#define ARIZONA_DSP1_RAM_RDY_EINT1 0x0100 /* DSP1_RAM_RDY_EINT1 */ +#define ARIZONA_DSP1_RAM_RDY_EINT1_MASK 0x0100 /* DSP1_RAM_RDY_EINT1 */ +#define ARIZONA_DSP1_RAM_RDY_EINT1_SHIFT 8 /* DSP1_RAM_RDY_EINT1 */ +#define ARIZONA_DSP1_RAM_RDY_EINT1_WIDTH 1 /* DSP1_RAM_RDY_EINT1 */ +#define ARIZONA_DSP_IRQ8_EINT1 0x0080 /* DSP_IRQ8_EINT1 */ +#define ARIZONA_DSP_IRQ8_EINT1_MASK 0x0080 /* DSP_IRQ8_EINT1 */ +#define ARIZONA_DSP_IRQ8_EINT1_SHIFT 7 /* DSP_IRQ8_EINT1 */ +#define ARIZONA_DSP_IRQ8_EINT1_WIDTH 1 /* DSP_IRQ8_EINT1 */ +#define ARIZONA_DSP_IRQ7_EINT1 0x0040 /* DSP_IRQ7_EINT1 */ +#define ARIZONA_DSP_IRQ7_EINT1_MASK 0x0040 /* DSP_IRQ7_EINT1 */ +#define ARIZONA_DSP_IRQ7_EINT1_SHIFT 6 /* DSP_IRQ7_EINT1 */ +#define ARIZONA_DSP_IRQ7_EINT1_WIDTH 1 /* DSP_IRQ7_EINT1 */ +#define ARIZONA_DSP_IRQ6_EINT1 0x0020 /* DSP_IRQ6_EINT1 */ +#define ARIZONA_DSP_IRQ6_EINT1_MASK 0x0020 /* DSP_IRQ6_EINT1 */ +#define ARIZONA_DSP_IRQ6_EINT1_SHIFT 5 /* DSP_IRQ6_EINT1 */ +#define ARIZONA_DSP_IRQ6_EINT1_WIDTH 1 /* DSP_IRQ6_EINT1 */ +#define ARIZONA_DSP_IRQ5_EINT1 0x0010 /* DSP_IRQ5_EINT1 */ +#define ARIZONA_DSP_IRQ5_EINT1_MASK 0x0010 /* DSP_IRQ5_EINT1 */ +#define ARIZONA_DSP_IRQ5_EINT1_SHIFT 4 /* DSP_IRQ5_EINT1 */ +#define ARIZONA_DSP_IRQ5_EINT1_WIDTH 1 /* DSP_IRQ5_EINT1 */ +#define ARIZONA_DSP_IRQ4_EINT1 0x0008 /* DSP_IRQ4_EINT1 */ +#define ARIZONA_DSP_IRQ4_EINT1_MASK 0x0008 /* DSP_IRQ4_EINT1 */ +#define ARIZONA_DSP_IRQ4_EINT1_SHIFT 3 /* DSP_IRQ4_EINT1 */ +#define ARIZONA_DSP_IRQ4_EINT1_WIDTH 1 /* DSP_IRQ4_EINT1 */ +#define ARIZONA_DSP_IRQ3_EINT1 0x0004 /* DSP_IRQ3_EINT1 */ +#define ARIZONA_DSP_IRQ3_EINT1_MASK 0x0004 /* DSP_IRQ3_EINT1 */ +#define ARIZONA_DSP_IRQ3_EINT1_SHIFT 2 /* DSP_IRQ3_EINT1 */ +#define ARIZONA_DSP_IRQ3_EINT1_WIDTH 1 /* DSP_IRQ3_EINT1 */ +#define ARIZONA_DSP_IRQ2_EINT1 0x0002 /* DSP_IRQ2_EINT1 */ +#define ARIZONA_DSP_IRQ2_EINT1_MASK 0x0002 /* DSP_IRQ2_EINT1 */ +#define ARIZONA_DSP_IRQ2_EINT1_SHIFT 1 /* DSP_IRQ2_EINT1 */ +#define ARIZONA_DSP_IRQ2_EINT1_WIDTH 1 /* DSP_IRQ2_EINT1 */ +#define ARIZONA_DSP_IRQ1_EINT1 0x0001 /* DSP_IRQ1_EINT1 */ +#define ARIZONA_DSP_IRQ1_EINT1_MASK 0x0001 /* DSP_IRQ1_EINT1 */ +#define ARIZONA_DSP_IRQ1_EINT1_SHIFT 0 /* DSP_IRQ1_EINT1 */ +#define ARIZONA_DSP_IRQ1_EINT1_WIDTH 1 /* DSP_IRQ1_EINT1 */ + +/* + * R3330 (0xD02) - Interrupt Status 3 + */ +#define ARIZONA_SPK_OVERHEAT_WARN_EINT1 0x8000 /* SPK_OVERHEAT_WARN_EINT1 */ +#define ARIZONA_SPK_OVERHEAT_WARN_EINT1_MASK 0x8000 /* SPK_OVERHEAD_WARN_EINT1 */ +#define ARIZONA_SPK_OVERHEAT_WARN_EINT1_SHIFT 15 /* SPK_OVERHEAT_WARN_EINT1 */ +#define ARIZONA_SPK_OVERHEAT_WARN_EINT1_WIDTH 1 /* SPK_OVERHEAT_WARN_EINT1 */ +#define ARIZONA_SPK_OVERHEAT_EINT1 0x4000 /* SPK_OVERHEAT_EINT1 */ +#define ARIZONA_SPK_OVERHEAT_EINT1_MASK 0x4000 /* SPK_OVERHEAT_EINT1 */ +#define ARIZONA_SPK_OVERHEAT_EINT1_SHIFT 14 /* SPK_OVERHEAT_EINT1 */ +#define ARIZONA_SPK_OVERHEAT_EINT1_WIDTH 1 /* SPK_OVERHEAT_EINT1 */ +#define ARIZONA_HPDET_EINT1 0x2000 /* HPDET_EINT1 */ +#define ARIZONA_HPDET_EINT1_MASK 0x2000 /* HPDET_EINT1 */ +#define ARIZONA_HPDET_EINT1_SHIFT 13 /* HPDET_EINT1 */ +#define ARIZONA_HPDET_EINT1_WIDTH 1 /* HPDET_EINT1 */ +#define ARIZONA_MICDET_EINT1 0x1000 /* MICDET_EINT1 */ +#define ARIZONA_MICDET_EINT1_MASK 0x1000 /* MICDET_EINT1 */ +#define ARIZONA_MICDET_EINT1_SHIFT 12 /* MICDET_EINT1 */ +#define ARIZONA_MICDET_EINT1_WIDTH 1 /* MICDET_EINT1 */ +#define ARIZONA_WSEQ_DONE_EINT1 0x0800 /* WSEQ_DONE_EINT1 */ +#define ARIZONA_WSEQ_DONE_EINT1_MASK 0x0800 /* WSEQ_DONE_EINT1 */ +#define ARIZONA_WSEQ_DONE_EINT1_SHIFT 11 /* WSEQ_DONE_EINT1 */ +#define ARIZONA_WSEQ_DONE_EINT1_WIDTH 1 /* WSEQ_DONE_EINT1 */ +#define ARIZONA_DRC2_SIG_DET_EINT1 0x0400 /* DRC2_SIG_DET_EINT1 */ +#define ARIZONA_DRC2_SIG_DET_EINT1_MASK 0x0400 /* DRC2_SIG_DET_EINT1 */ +#define ARIZONA_DRC2_SIG_DET_EINT1_SHIFT 10 /* DRC2_SIG_DET_EINT1 */ +#define ARIZONA_DRC2_SIG_DET_EINT1_WIDTH 1 /* DRC2_SIG_DET_EINT1 */ +#define ARIZONA_DRC1_SIG_DET_EINT1 0x0200 /* DRC1_SIG_DET_EINT1 */ +#define ARIZONA_DRC1_SIG_DET_EINT1_MASK 0x0200 /* DRC1_SIG_DET_EINT1 */ +#define ARIZONA_DRC1_SIG_DET_EINT1_SHIFT 9 /* DRC1_SIG_DET_EINT1 */ +#define ARIZONA_DRC1_SIG_DET_EINT1_WIDTH 1 /* DRC1_SIG_DET_EINT1 */ +#define ARIZONA_ASRC2_LOCK_EINT1 0x0100 /* ASRC2_LOCK_EINT1 */ +#define ARIZONA_ASRC2_LOCK_EINT1_MASK 0x0100 /* ASRC2_LOCK_EINT1 */ +#define ARIZONA_ASRC2_LOCK_EINT1_SHIFT 8 /* ASRC2_LOCK_EINT1 */ +#define ARIZONA_ASRC2_LOCK_EINT1_WIDTH 1 /* ASRC2_LOCK_EINT1 */ +#define ARIZONA_ASRC1_LOCK_EINT1 0x0080 /* ASRC1_LOCK_EINT1 */ +#define ARIZONA_ASRC1_LOCK_EINT1_MASK 0x0080 /* ASRC1_LOCK_EINT1 */ +#define ARIZONA_ASRC1_LOCK_EINT1_SHIFT 7 /* ASRC1_LOCK_EINT1 */ +#define ARIZONA_ASRC1_LOCK_EINT1_WIDTH 1 /* ASRC1_LOCK_EINT1 */ +#define ARIZONA_UNDERCLOCKED_EINT1 0x0040 /* UNDERCLOCKED_EINT1 */ +#define ARIZONA_UNDERCLOCKED_EINT1_MASK 0x0040 /* UNDERCLOCKED_EINT1 */ +#define ARIZONA_UNDERCLOCKED_EINT1_SHIFT 6 /* UNDERCLOCKED_EINT1 */ +#define ARIZONA_UNDERCLOCKED_EINT1_WIDTH 1 /* UNDERCLOCKED_EINT1 */ +#define ARIZONA_OVERCLOCKED_EINT1 0x0020 /* OVERCLOCKED_EINT1 */ +#define ARIZONA_OVERCLOCKED_EINT1_MASK 0x0020 /* OVERCLOCKED_EINT1 */ +#define ARIZONA_OVERCLOCKED_EINT1_SHIFT 5 /* OVERCLOCKED_EINT1 */ +#define ARIZONA_OVERCLOCKED_EINT1_WIDTH 1 /* OVERCLOCKED_EINT1 */ +#define ARIZONA_FLL2_LOCK_EINT1 0x0008 /* FLL2_LOCK_EINT1 */ +#define ARIZONA_FLL2_LOCK_EINT1_MASK 0x0008 /* FLL2_LOCK_EINT1 */ +#define ARIZONA_FLL2_LOCK_EINT1_SHIFT 3 /* FLL2_LOCK_EINT1 */ +#define ARIZONA_FLL2_LOCK_EINT1_WIDTH 1 /* FLL2_LOCK_EINT1 */ +#define ARIZONA_FLL1_LOCK_EINT1 0x0004 /* FLL1_LOCK_EINT1 */ +#define ARIZONA_FLL1_LOCK_EINT1_MASK 0x0004 /* FLL1_LOCK_EINT1 */ +#define ARIZONA_FLL1_LOCK_EINT1_SHIFT 2 /* FLL1_LOCK_EINT1 */ +#define ARIZONA_FLL1_LOCK_EINT1_WIDTH 1 /* FLL1_LOCK_EINT1 */ +#define ARIZONA_CLKGEN_ERR_EINT1 0x0002 /* CLKGEN_ERR_EINT1 */ +#define ARIZONA_CLKGEN_ERR_EINT1_MASK 0x0002 /* CLKGEN_ERR_EINT1 */ +#define ARIZONA_CLKGEN_ERR_EINT1_SHIFT 1 /* CLKGEN_ERR_EINT1 */ +#define ARIZONA_CLKGEN_ERR_EINT1_WIDTH 1 /* CLKGEN_ERR_EINT1 */ +#define ARIZONA_CLKGEN_ERR_ASYNC_EINT1 0x0001 /* CLKGEN_ERR_ASYNC_EINT1 */ +#define ARIZONA_CLKGEN_ERR_ASYNC_EINT1_MASK 0x0001 /* CLKGEN_ERR_ASYNC_EINT1 */ +#define ARIZONA_CLKGEN_ERR_ASYNC_EINT1_SHIFT 0 /* CLKGEN_ERR_ASYNC_EINT1 */ +#define ARIZONA_CLKGEN_ERR_ASYNC_EINT1_WIDTH 1 /* CLKGEN_ERR_ASYNC_EINT1 */ + +/* + * R3331 (0xD03) - Interrupt Status 4 + */ +#define ARIZONA_ASRC_CFG_ERR_EINT1 0x8000 /* ASRC_CFG_ERR_EINT1 */ +#define ARIZONA_ASRC_CFG_ERR_EINT1_MASK 0x8000 /* ASRC_CFG_ERR_EINT1 */ +#define ARIZONA_ASRC_CFG_ERR_EINT1_SHIFT 15 /* ASRC_CFG_ERR_EINT1 */ +#define ARIZONA_ASRC_CFG_ERR_EINT1_WIDTH 1 /* ASRC_CFG_ERR_EINT1 */ +#define ARIZONA_AIF3_ERR_EINT1 0x4000 /* AIF3_ERR_EINT1 */ +#define ARIZONA_AIF3_ERR_EINT1_MASK 0x4000 /* AIF3_ERR_EINT1 */ +#define ARIZONA_AIF3_ERR_EINT1_SHIFT 14 /* AIF3_ERR_EINT1 */ +#define ARIZONA_AIF3_ERR_EINT1_WIDTH 1 /* AIF3_ERR_EINT1 */ +#define ARIZONA_AIF2_ERR_EINT1 0x2000 /* AIF2_ERR_EINT1 */ +#define ARIZONA_AIF2_ERR_EINT1_MASK 0x2000 /* AIF2_ERR_EINT1 */ +#define ARIZONA_AIF2_ERR_EINT1_SHIFT 13 /* AIF2_ERR_EINT1 */ +#define ARIZONA_AIF2_ERR_EINT1_WIDTH 1 /* AIF2_ERR_EINT1 */ +#define ARIZONA_AIF1_ERR_EINT1 0x1000 /* AIF1_ERR_EINT1 */ +#define ARIZONA_AIF1_ERR_EINT1_MASK 0x1000 /* AIF1_ERR_EINT1 */ +#define ARIZONA_AIF1_ERR_EINT1_SHIFT 12 /* AIF1_ERR_EINT1 */ +#define ARIZONA_AIF1_ERR_EINT1_WIDTH 1 /* AIF1_ERR_EINT1 */ +#define ARIZONA_CTRLIF_ERR_EINT1 0x0800 /* CTRLIF_ERR_EINT1 */ +#define ARIZONA_CTRLIF_ERR_EINT1_MASK 0x0800 /* CTRLIF_ERR_EINT1 */ +#define ARIZONA_CTRLIF_ERR_EINT1_SHIFT 11 /* CTRLIF_ERR_EINT1 */ +#define ARIZONA_CTRLIF_ERR_EINT1_WIDTH 1 /* CTRLIF_ERR_EINT1 */ +#define ARIZONA_MIXER_DROPPED_SAMPLE_EINT1 0x0400 /* MIXER_DROPPED_SAMPLE_EINT1 */ +#define ARIZONA_MIXER_DROPPED_SAMPLE_EINT1_MASK 0x0400 /* MIXER_DROPPED_SAMPLE_EINT1 */ +#define ARIZONA_MIXER_DROPPED_SAMPLE_EINT1_SHIFT 10 /* MIXER_DROPPED_SAMPLE_EINT1 */ +#define ARIZONA_MIXER_DROPPED_SAMPLE_EINT1_WIDTH 1 /* MIXER_DROPPED_SAMPLE_EINT1 */ +#define ARIZONA_ASYNC_CLK_ENA_LOW_EINT1 0x0200 /* ASYNC_CLK_ENA_LOW_EINT1 */ +#define ARIZONA_ASYNC_CLK_ENA_LOW_EINT1_MASK 0x0200 /* ASYNC_CLK_ENA_LOW_EINT1 */ +#define ARIZONA_ASYNC_CLK_ENA_LOW_EINT1_SHIFT 9 /* ASYNC_CLK_ENA_LOW_EINT1 */ +#define ARIZONA_ASYNC_CLK_ENA_LOW_EINT1_WIDTH 1 /* ASYNC_CLK_ENA_LOW_EINT1 */ +#define ARIZONA_SYSCLK_ENA_LOW_EINT1 0x0100 /* SYSCLK_ENA_LOW_EINT1 */ +#define ARIZONA_SYSCLK_ENA_LOW_EINT1_MASK 0x0100 /* SYSCLK_ENA_LOW_EINT1 */ +#define ARIZONA_SYSCLK_ENA_LOW_EINT1_SHIFT 8 /* SYSCLK_ENA_LOW_EINT1 */ +#define ARIZONA_SYSCLK_ENA_LOW_EINT1_WIDTH 1 /* SYSCLK_ENA_LOW_EINT1 */ +#define ARIZONA_ISRC1_CFG_ERR_EINT1 0x0080 /* ISRC1_CFG_ERR_EINT1 */ +#define ARIZONA_ISRC1_CFG_ERR_EINT1_MASK 0x0080 /* ISRC1_CFG_ERR_EINT1 */ +#define ARIZONA_ISRC1_CFG_ERR_EINT1_SHIFT 7 /* ISRC1_CFG_ERR_EINT1 */ +#define ARIZONA_ISRC1_CFG_ERR_EINT1_WIDTH 1 /* ISRC1_CFG_ERR_EINT1 */ +#define ARIZONA_ISRC2_CFG_ERR_EINT1 0x0040 /* ISRC2_CFG_ERR_EINT1 */ +#define ARIZONA_ISRC2_CFG_ERR_EINT1_MASK 0x0040 /* ISRC2_CFG_ERR_EINT1 */ +#define ARIZONA_ISRC2_CFG_ERR_EINT1_SHIFT 6 /* ISRC2_CFG_ERR_EINT1 */ +#define ARIZONA_ISRC2_CFG_ERR_EINT1_WIDTH 1 /* ISRC2_CFG_ERR_EINT1 */ +#define ARIZONA_HP3R_DONE_EINT1 0x0020 /* HP3R_DONE_EINT1 */ +#define ARIZONA_HP3R_DONE_EINT1_MASK 0x0020 /* HP3R_DONE_EINT1 */ +#define ARIZONA_HP3R_DONE_EINT1_SHIFT 5 /* HP3R_DONE_EINT1 */ +#define ARIZONA_HP3R_DONE_EINT1_WIDTH 1 /* HP3R_DONE_EINT1 */ +#define ARIZONA_HP3L_DONE_EINT1 0x0010 /* HP3L_DONE_EINT1 */ +#define ARIZONA_HP3L_DONE_EINT1_MASK 0x0010 /* HP3L_DONE_EINT1 */ +#define ARIZONA_HP3L_DONE_EINT1_SHIFT 4 /* HP3L_DONE_EINT1 */ +#define ARIZONA_HP3L_DONE_EINT1_WIDTH 1 /* HP3L_DONE_EINT1 */ +#define ARIZONA_HP2R_DONE_EINT1 0x0008 /* HP2R_DONE_EINT1 */ +#define ARIZONA_HP2R_DONE_EINT1_MASK 0x0008 /* HP2R_DONE_EINT1 */ +#define ARIZONA_HP2R_DONE_EINT1_SHIFT 3 /* HP2R_DONE_EINT1 */ +#define ARIZONA_HP2R_DONE_EINT1_WIDTH 1 /* HP2R_DONE_EINT1 */ +#define ARIZONA_HP2L_DONE_EINT1 0x0004 /* HP2L_DONE_EINT1 */ +#define ARIZONA_HP2L_DONE_EINT1_MASK 0x0004 /* HP2L_DONE_EINT1 */ +#define ARIZONA_HP2L_DONE_EINT1_SHIFT 2 /* HP2L_DONE_EINT1 */ +#define ARIZONA_HP2L_DONE_EINT1_WIDTH 1 /* HP2L_DONE_EINT1 */ +#define ARIZONA_HP1R_DONE_EINT1 0x0002 /* HP1R_DONE_EINT1 */ +#define ARIZONA_HP1R_DONE_EINT1_MASK 0x0002 /* HP1R_DONE_EINT1 */ +#define ARIZONA_HP1R_DONE_EINT1_SHIFT 1 /* HP1R_DONE_EINT1 */ +#define ARIZONA_HP1R_DONE_EINT1_WIDTH 1 /* HP1R_DONE_EINT1 */ +#define ARIZONA_HP1L_DONE_EINT1 0x0001 /* HP1L_DONE_EINT1 */ +#define ARIZONA_HP1L_DONE_EINT1_MASK 0x0001 /* HP1L_DONE_EINT1 */ +#define ARIZONA_HP1L_DONE_EINT1_SHIFT 0 /* HP1L_DONE_EINT1 */ +#define ARIZONA_HP1L_DONE_EINT1_WIDTH 1 /* HP1L_DONE_EINT1 */ + +/* + * R3331 (0xD03) - Interrupt Status 4 (Alternate layout) + * + * Alternate layout used on later devices, note only fields that have moved + * are specified + */ +#define ARIZONA_V2_AIF3_ERR_EINT1 0x8000 /* AIF3_ERR_EINT1 */ +#define ARIZONA_V2_AIF3_ERR_EINT1_MASK 0x8000 /* AIF3_ERR_EINT1 */ +#define ARIZONA_V2_AIF3_ERR_EINT1_SHIFT 15 /* AIF3_ERR_EINT1 */ +#define ARIZONA_V2_AIF3_ERR_EINT1_WIDTH 1 /* AIF3_ERR_EINT1 */ +#define ARIZONA_V2_AIF2_ERR_EINT1 0x4000 /* AIF2_ERR_EINT1 */ +#define ARIZONA_V2_AIF2_ERR_EINT1_MASK 0x4000 /* AIF2_ERR_EINT1 */ +#define ARIZONA_V2_AIF2_ERR_EINT1_SHIFT 14 /* AIF2_ERR_EINT1 */ +#define ARIZONA_V2_AIF2_ERR_EINT1_WIDTH 1 /* AIF2_ERR_EINT1 */ +#define ARIZONA_V2_AIF1_ERR_EINT1 0x2000 /* AIF1_ERR_EINT1 */ +#define ARIZONA_V2_AIF1_ERR_EINT1_MASK 0x2000 /* AIF1_ERR_EINT1 */ +#define ARIZONA_V2_AIF1_ERR_EINT1_SHIFT 13 /* AIF1_ERR_EINT1 */ +#define ARIZONA_V2_AIF1_ERR_EINT1_WIDTH 1 /* AIF1_ERR_EINT1 */ +#define ARIZONA_V2_CTRLIF_ERR_EINT1 0x1000 /* CTRLIF_ERR_EINT1 */ +#define ARIZONA_V2_CTRLIF_ERR_EINT1_MASK 0x1000 /* CTRLIF_ERR_EINT1 */ +#define ARIZONA_V2_CTRLIF_ERR_EINT1_SHIFT 12 /* CTRLIF_ERR_EINT1 */ +#define ARIZONA_V2_CTRLIF_ERR_EINT1_WIDTH 1 /* CTRLIF_ERR_EINT1 */ +#define ARIZONA_V2_MIXER_DROPPED_SAMPLE_EINT1 0x0800 /* MIXER_DROPPED_SAMPLE_EINT1 */ +#define ARIZONA_V2_MIXER_DROPPED_SAMPLE_EINT1_MASK 0x0800 /* MIXER_DROPPED_SAMPLE_EINT1 */ +#define ARIZONA_V2_MIXER_DROPPED_SAMPLE_EINT1_SHIFT 11 /* MIXER_DROPPED_SAMPLE_EINT1 */ +#define ARIZONA_V2_MIXER_DROPPED_SAMPLE_EINT1_WIDTH 1 /* MIXER_DROPPED_SAMPLE_EINT1 */ +#define ARIZONA_V2_ASYNC_CLK_ENA_LOW_EINT1 0x0400 /* ASYNC_CLK_ENA_LOW_EINT1 */ +#define ARIZONA_V2_ASYNC_CLK_ENA_LOW_EINT1_MASK 0x0400 /* ASYNC_CLK_ENA_LOW_EINT1 */ +#define ARIZONA_V2_ASYNC_CLK_ENA_LOW_EINT1_SHIFT 10 /* ASYNC_CLK_ENA_LOW_EINT1 */ +#define ARIZONA_V2_ASYNC_CLK_ENA_LOW_EINT1_WIDTH 1 /* ASYNC_CLK_ENA_LOW_EINT1 */ +#define ARIZONA_V2_SYSCLK_ENA_LOW_EINT1 0x0200 /* SYSCLK_ENA_LOW_EINT1 */ +#define ARIZONA_V2_SYSCLK_ENA_LOW_EINT1_MASK 0x0200 /* SYSCLK_ENA_LOW_EINT1 */ +#define ARIZONA_V2_SYSCLK_ENA_LOW_EINT1_SHIFT 9 /* SYSCLK_ENA_LOW_EINT1 */ +#define ARIZONA_V2_SYSCLK_ENA_LOW_EINT1_WIDTH 1 /* SYSCLK_ENA_LOW_EINT1 */ +#define ARIZONA_V2_ISRC1_CFG_ERR_EINT1 0x0100 /* ISRC1_CFG_ERR_EINT1 */ +#define ARIZONA_V2_ISRC1_CFG_ERR_EINT1_MASK 0x0100 /* ISRC1_CFG_ERR_EINT1 */ +#define ARIZONA_V2_ISRC1_CFG_ERR_EINT1_SHIFT 8 /* ISRC1_CFG_ERR_EINT1 */ +#define ARIZONA_V2_ISRC1_CFG_ERR_EINT1_WIDTH 1 /* ISRC1_CFG_ERR_EINT1 */ +#define ARIZONA_V2_ISRC2_CFG_ERR_EINT1 0x0080 /* ISRC2_CFG_ERR_EINT1 */ +#define ARIZONA_V2_ISRC2_CFG_ERR_EINT1_MASK 0x0080 /* ISRC2_CFG_ERR_EINT1 */ +#define ARIZONA_V2_ISRC2_CFG_ERR_EINT1_SHIFT 7 /* ISRC2_CFG_ERR_EINT1 */ +#define ARIZONA_V2_ISRC2_CFG_ERR_EINT1_WIDTH 1 /* ISRC2_CFG_ERR_EINT1 */ +#define ARIZONA_V2_ISRC3_CFG_ERR_EINT1 0x0040 /* ISRC3_CFG_ERR_EINT1 */ +#define ARIZONA_V2_ISRC3_CFG_ERR_EINT1_MASK 0x0040 /* ISRC3_CFG_ERR_EINT1 */ +#define ARIZONA_V2_ISRC3_CFG_ERR_EINT1_SHIFT 6 /* ISRC3_CFG_ERR_EINT1 */ +#define ARIZONA_V2_ISRC3_CFG_ERR_EINT1_WIDTH 1 /* ISRC3_CFG_ERR_EINT1 */ + +/* + * R3332 (0xD04) - Interrupt Status 5 + */ +#define ARIZONA_BOOT_DONE_EINT1 0x0100 /* BOOT_DONE_EINT1 */ +#define ARIZONA_BOOT_DONE_EINT1_MASK 0x0100 /* BOOT_DONE_EINT1 */ +#define ARIZONA_BOOT_DONE_EINT1_SHIFT 8 /* BOOT_DONE_EINT1 */ +#define ARIZONA_BOOT_DONE_EINT1_WIDTH 1 /* BOOT_DONE_EINT1 */ +#define ARIZONA_DCS_DAC_DONE_EINT1 0x0080 /* DCS_DAC_DONE_EINT1 */ +#define ARIZONA_DCS_DAC_DONE_EINT1_MASK 0x0080 /* DCS_DAC_DONE_EINT1 */ +#define ARIZONA_DCS_DAC_DONE_EINT1_SHIFT 7 /* DCS_DAC_DONE_EINT1 */ +#define ARIZONA_DCS_DAC_DONE_EINT1_WIDTH 1 /* DCS_DAC_DONE_EINT1 */ +#define ARIZONA_DCS_HP_DONE_EINT1 0x0040 /* DCS_HP_DONE_EINT1 */ +#define ARIZONA_DCS_HP_DONE_EINT1_MASK 0x0040 /* DCS_HP_DONE_EINT1 */ +#define ARIZONA_DCS_HP_DONE_EINT1_SHIFT 6 /* DCS_HP_DONE_EINT1 */ +#define ARIZONA_DCS_HP_DONE_EINT1_WIDTH 1 /* DCS_HP_DONE_EINT1 */ +#define ARIZONA_FLL2_CLOCK_OK_EINT1 0x0002 /* FLL2_CLOCK_OK_EINT1 */ +#define ARIZONA_FLL2_CLOCK_OK_EINT1_MASK 0x0002 /* FLL2_CLOCK_OK_EINT1 */ +#define ARIZONA_FLL2_CLOCK_OK_EINT1_SHIFT 1 /* FLL2_CLOCK_OK_EINT1 */ +#define ARIZONA_FLL2_CLOCK_OK_EINT1_WIDTH 1 /* FLL2_CLOCK_OK_EINT1 */ +#define ARIZONA_FLL1_CLOCK_OK_EINT1 0x0001 /* FLL1_CLOCK_OK_EINT1 */ +#define ARIZONA_FLL1_CLOCK_OK_EINT1_MASK 0x0001 /* FLL1_CLOCK_OK_EINT1 */ +#define ARIZONA_FLL1_CLOCK_OK_EINT1_SHIFT 0 /* FLL1_CLOCK_OK_EINT1 */ +#define ARIZONA_FLL1_CLOCK_OK_EINT1_WIDTH 1 /* FLL1_CLOCK_OK_EINT1 */ + +/* + * R3332 (0xD05) - Interrupt Status 5 (Alternate layout) + * + * Alternate layout used on later devices, note only fields that have moved + * are specified + */ +#define ARIZONA_V2_ASRC_CFG_ERR_EINT1 0x0008 /* ASRC_CFG_ERR_EINT1 */ +#define ARIZONA_V2_ASRC_CFG_ERR_EINT1_MASK 0x0008 /* ASRC_CFG_ERR_EINT1 */ +#define ARIZONA_V2_ASRC_CFG_ERR_EINT1_SHIFT 3 /* ASRC_CFG_ERR_EINT1 */ +#define ARIZONA_V2_ASRC_CFG_ERR_EINT1_WIDTH 1 /* ASRC_CFG_ERR_EINT1 */ + +/* + * R3333 (0xD05) - Interrupt Status 6 + */ +#define ARIZONA_DSP_SHARED_WR_COLL_EINT1 0x8000 /* DSP_SHARED_WR_COLL_EINT1 */ +#define ARIZONA_DSP_SHARED_WR_COLL_EINT1_MASK 0x8000 /* DSP_SHARED_WR_COLL_EINT1 */ +#define ARIZONA_DSP_SHARED_WR_COLL_EINT1_SHIFT 15 /* DSP_SHARED_WR_COLL_EINT1 */ +#define ARIZONA_DSP_SHARED_WR_COLL_EINT1_WIDTH 1 /* DSP_SHARED_WR_COLL_EINT1 */ +#define ARIZONA_SPK_SHUTDOWN_EINT1 0x4000 /* SPK_SHUTDOWN_EINT1 */ +#define ARIZONA_SPK_SHUTDOWN_EINT1_MASK 0x4000 /* SPK_SHUTDOWN_EINT1 */ +#define ARIZONA_SPK_SHUTDOWN_EINT1_SHIFT 14 /* SPK_SHUTDOWN_EINT1 */ +#define ARIZONA_SPK_SHUTDOWN_EINT1_WIDTH 1 /* SPK_SHUTDOWN_EINT1 */ +#define ARIZONA_SPK1R_SHORT_EINT1 0x2000 /* SPK1R_SHORT_EINT1 */ +#define ARIZONA_SPK1R_SHORT_EINT1_MASK 0x2000 /* SPK1R_SHORT_EINT1 */ +#define ARIZONA_SPK1R_SHORT_EINT1_SHIFT 13 /* SPK1R_SHORT_EINT1 */ +#define ARIZONA_SPK1R_SHORT_EINT1_WIDTH 1 /* SPK1R_SHORT_EINT1 */ +#define ARIZONA_SPK1L_SHORT_EINT1 0x1000 /* SPK1L_SHORT_EINT1 */ +#define ARIZONA_SPK1L_SHORT_EINT1_MASK 0x1000 /* SPK1L_SHORT_EINT1 */ +#define ARIZONA_SPK1L_SHORT_EINT1_SHIFT 12 /* SPK1L_SHORT_EINT1 */ +#define ARIZONA_SPK1L_SHORT_EINT1_WIDTH 1 /* SPK1L_SHORT_EINT1 */ +#define ARIZONA_HP3R_SC_NEG_EINT1 0x0800 /* HP3R_SC_NEG_EINT1 */ +#define ARIZONA_HP3R_SC_NEG_EINT1_MASK 0x0800 /* HP3R_SC_NEG_EINT1 */ +#define ARIZONA_HP3R_SC_NEG_EINT1_SHIFT 11 /* HP3R_SC_NEG_EINT1 */ +#define ARIZONA_HP3R_SC_NEG_EINT1_WIDTH 1 /* HP3R_SC_NEG_EINT1 */ +#define ARIZONA_HP3R_SC_POS_EINT1 0x0400 /* HP3R_SC_POS_EINT1 */ +#define ARIZONA_HP3R_SC_POS_EINT1_MASK 0x0400 /* HP3R_SC_POS_EINT1 */ +#define ARIZONA_HP3R_SC_POS_EINT1_SHIFT 10 /* HP3R_SC_POS_EINT1 */ +#define ARIZONA_HP3R_SC_POS_EINT1_WIDTH 1 /* HP3R_SC_POS_EINT1 */ +#define ARIZONA_HP3L_SC_NEG_EINT1 0x0200 /* HP3L_SC_NEG_EINT1 */ +#define ARIZONA_HP3L_SC_NEG_EINT1_MASK 0x0200 /* HP3L_SC_NEG_EINT1 */ +#define ARIZONA_HP3L_SC_NEG_EINT1_SHIFT 9 /* HP3L_SC_NEG_EINT1 */ +#define ARIZONA_HP3L_SC_NEG_EINT1_WIDTH 1 /* HP3L_SC_NEG_EINT1 */ +#define ARIZONA_HP3L_SC_POS_EINT1 0x0100 /* HP3L_SC_POS_EINT1 */ +#define ARIZONA_HP3L_SC_POS_EINT1_MASK 0x0100 /* HP3L_SC_POS_EINT1 */ +#define ARIZONA_HP3L_SC_POS_EINT1_SHIFT 8 /* HP3L_SC_POS_EINT1 */ +#define ARIZONA_HP3L_SC_POS_EINT1_WIDTH 1 /* HP3L_SC_POS_EINT1 */ +#define ARIZONA_HP2R_SC_NEG_EINT1 0x0080 /* HP2R_SC_NEG_EINT1 */ +#define ARIZONA_HP2R_SC_NEG_EINT1_MASK 0x0080 /* HP2R_SC_NEG_EINT1 */ +#define ARIZONA_HP2R_SC_NEG_EINT1_SHIFT 7 /* HP2R_SC_NEG_EINT1 */ +#define ARIZONA_HP2R_SC_NEG_EINT1_WIDTH 1 /* HP2R_SC_NEG_EINT1 */ +#define ARIZONA_HP2R_SC_POS_EINT1 0x0040 /* HP2R_SC_POS_EINT1 */ +#define ARIZONA_HP2R_SC_POS_EINT1_MASK 0x0040 /* HP2R_SC_POS_EINT1 */ +#define ARIZONA_HP2R_SC_POS_EINT1_SHIFT 6 /* HP2R_SC_POS_EINT1 */ +#define ARIZONA_HP2R_SC_POS_EINT1_WIDTH 1 /* HP2R_SC_POS_EINT1 */ +#define ARIZONA_HP2L_SC_NEG_EINT1 0x0020 /* HP2L_SC_NEG_EINT1 */ +#define ARIZONA_HP2L_SC_NEG_EINT1_MASK 0x0020 /* HP2L_SC_NEG_EINT1 */ +#define ARIZONA_HP2L_SC_NEG_EINT1_SHIFT 5 /* HP2L_SC_NEG_EINT1 */ +#define ARIZONA_HP2L_SC_NEG_EINT1_WIDTH 1 /* HP2L_SC_NEG_EINT1 */ +#define ARIZONA_HP2L_SC_POS_EINT1 0x0010 /* HP2L_SC_POS_EINT1 */ +#define ARIZONA_HP2L_SC_POS_EINT1_MASK 0x0010 /* HP2L_SC_POS_EINT1 */ +#define ARIZONA_HP2L_SC_POS_EINT1_SHIFT 4 /* HP2L_SC_POS_EINT1 */ +#define ARIZONA_HP2L_SC_POS_EINT1_WIDTH 1 /* HP2L_SC_POS_EINT1 */ +#define ARIZONA_HP1R_SC_NEG_EINT1 0x0008 /* HP1R_SC_NEG_EINT1 */ +#define ARIZONA_HP1R_SC_NEG_EINT1_MASK 0x0008 /* HP1R_SC_NEG_EINT1 */ +#define ARIZONA_HP1R_SC_NEG_EINT1_SHIFT 3 /* HP1R_SC_NEG_EINT1 */ +#define ARIZONA_HP1R_SC_NEG_EINT1_WIDTH 1 /* HP1R_SC_NEG_EINT1 */ +#define ARIZONA_HP1R_SC_POS_EINT1 0x0004 /* HP1R_SC_POS_EINT1 */ +#define ARIZONA_HP1R_SC_POS_EINT1_MASK 0x0004 /* HP1R_SC_POS_EINT1 */ +#define ARIZONA_HP1R_SC_POS_EINT1_SHIFT 2 /* HP1R_SC_POS_EINT1 */ +#define ARIZONA_HP1R_SC_POS_EINT1_WIDTH 1 /* HP1R_SC_POS_EINT1 */ +#define ARIZONA_HP1L_SC_NEG_EINT1 0x0002 /* HP1L_SC_NEG_EINT1 */ +#define ARIZONA_HP1L_SC_NEG_EINT1_MASK 0x0002 /* HP1L_SC_NEG_EINT1 */ +#define ARIZONA_HP1L_SC_NEG_EINT1_SHIFT 1 /* HP1L_SC_NEG_EINT1 */ +#define ARIZONA_HP1L_SC_NEG_EINT1_WIDTH 1 /* HP1L_SC_NEG_EINT1 */ +#define ARIZONA_HP1L_SC_POS_EINT1 0x0001 /* HP1L_SC_POS_EINT1 */ +#define ARIZONA_HP1L_SC_POS_EINT1_MASK 0x0001 /* HP1L_SC_POS_EINT1 */ +#define ARIZONA_HP1L_SC_POS_EINT1_SHIFT 0 /* HP1L_SC_POS_EINT1 */ +#define ARIZONA_HP1L_SC_POS_EINT1_WIDTH 1 /* HP1L_SC_POS_EINT1 */ + +/* + * R3336 (0xD08) - Interrupt Status 1 Mask + */ +#define ARIZONA_IM_GP4_EINT1 0x0008 /* IM_GP4_EINT1 */ +#define ARIZONA_IM_GP4_EINT1_MASK 0x0008 /* IM_GP4_EINT1 */ +#define ARIZONA_IM_GP4_EINT1_SHIFT 3 /* IM_GP4_EINT1 */ +#define ARIZONA_IM_GP4_EINT1_WIDTH 1 /* IM_GP4_EINT1 */ +#define ARIZONA_IM_GP3_EINT1 0x0004 /* IM_GP3_EINT1 */ +#define ARIZONA_IM_GP3_EINT1_MASK 0x0004 /* IM_GP3_EINT1 */ +#define ARIZONA_IM_GP3_EINT1_SHIFT 2 /* IM_GP3_EINT1 */ +#define ARIZONA_IM_GP3_EINT1_WIDTH 1 /* IM_GP3_EINT1 */ +#define ARIZONA_IM_GP2_EINT1 0x0002 /* IM_GP2_EINT1 */ +#define ARIZONA_IM_GP2_EINT1_MASK 0x0002 /* IM_GP2_EINT1 */ +#define ARIZONA_IM_GP2_EINT1_SHIFT 1 /* IM_GP2_EINT1 */ +#define ARIZONA_IM_GP2_EINT1_WIDTH 1 /* IM_GP2_EINT1 */ +#define ARIZONA_IM_GP1_EINT1 0x0001 /* IM_GP1_EINT1 */ +#define ARIZONA_IM_GP1_EINT1_MASK 0x0001 /* IM_GP1_EINT1 */ +#define ARIZONA_IM_GP1_EINT1_SHIFT 0 /* IM_GP1_EINT1 */ +#define ARIZONA_IM_GP1_EINT1_WIDTH 1 /* IM_GP1_EINT1 */ + +/* + * R3337 (0xD09) - Interrupt Status 2 Mask + */ +#define ARIZONA_IM_DSP1_RAM_RDY_EINT1 0x0100 /* IM_DSP1_RAM_RDY_EINT1 */ +#define ARIZONA_IM_DSP1_RAM_RDY_EINT1_MASK 0x0100 /* IM_DSP1_RAM_RDY_EINT1 */ +#define ARIZONA_IM_DSP1_RAM_RDY_EINT1_SHIFT 8 /* IM_DSP1_RAM_RDY_EINT1 */ +#define ARIZONA_IM_DSP1_RAM_RDY_EINT1_WIDTH 1 /* IM_DSP1_RAM_RDY_EINT1 */ +#define ARIZONA_IM_DSP_IRQ2_EINT1 0x0002 /* IM_DSP_IRQ2_EINT1 */ +#define ARIZONA_IM_DSP_IRQ2_EINT1_MASK 0x0002 /* IM_DSP_IRQ2_EINT1 */ +#define ARIZONA_IM_DSP_IRQ2_EINT1_SHIFT 1 /* IM_DSP_IRQ2_EINT1 */ +#define ARIZONA_IM_DSP_IRQ2_EINT1_WIDTH 1 /* IM_DSP_IRQ2_EINT1 */ +#define ARIZONA_IM_DSP_IRQ1_EINT1 0x0001 /* IM_DSP_IRQ1_EINT1 */ +#define ARIZONA_IM_DSP_IRQ1_EINT1_MASK 0x0001 /* IM_DSP_IRQ1_EINT1 */ +#define ARIZONA_IM_DSP_IRQ1_EINT1_SHIFT 0 /* IM_DSP_IRQ1_EINT1 */ +#define ARIZONA_IM_DSP_IRQ1_EINT1_WIDTH 1 /* IM_DSP_IRQ1_EINT1 */ + +/* + * R3338 (0xD0A) - Interrupt Status 3 Mask + */ +#define ARIZONA_IM_SPK_OVERHEAT_WARN_EINT1 0x8000 /* IM_SPK_OVERHEAT_WARN_EINT1 */ +#define ARIZONA_IM_SPK_OVERHEAT_WARN_EINT1_MASK 0x8000 /* IM_SPK_OVERHEAT_WARN_EINT1 */ +#define ARIZONA_IM_SPK_OVERHEAT_WARN_EINT1_SHIFT 15 /* IM_SPK_OVERHEAT_WARN_EINT1 */ +#define ARIZONA_IM_SPK_OVERHEAT_WARN_EINT1_WIDTH 1 /* IM_SPK_OVERHEAT_WARN_EINT1 */ +#define ARIZONA_IM_SPK_OVERHEAT_EINT1 0x4000 /* IM_SPK_OVERHEAT_EINT1 */ +#define ARIZONA_IM_SPK_OVERHEAT_EINT1_MASK 0x4000 /* IM_SPK_OVERHEAT_EINT1 */ +#define ARIZONA_IM_SPK_OVERHEAT_EINT1_SHIFT 14 /* IM_SPK_OVERHEAT_EINT1 */ +#define ARIZONA_IM_SPK_OVERHEAT_EINT1_WIDTH 1 /* IM_SPK_OVERHEAT_EINT1 */ +#define ARIZONA_IM_HPDET_EINT1 0x2000 /* IM_HPDET_EINT1 */ +#define ARIZONA_IM_HPDET_EINT1_MASK 0x2000 /* IM_HPDET_EINT1 */ +#define ARIZONA_IM_HPDET_EINT1_SHIFT 13 /* IM_HPDET_EINT1 */ +#define ARIZONA_IM_HPDET_EINT1_WIDTH 1 /* IM_HPDET_EINT1 */ +#define ARIZONA_IM_MICDET_EINT1 0x1000 /* IM_MICDET_EINT1 */ +#define ARIZONA_IM_MICDET_EINT1_MASK 0x1000 /* IM_MICDET_EINT1 */ +#define ARIZONA_IM_MICDET_EINT1_SHIFT 12 /* IM_MICDET_EINT1 */ +#define ARIZONA_IM_MICDET_EINT1_WIDTH 1 /* IM_MICDET_EINT1 */ +#define ARIZONA_IM_WSEQ_DONE_EINT1 0x0800 /* IM_WSEQ_DONE_EINT1 */ +#define ARIZONA_IM_WSEQ_DONE_EINT1_MASK 0x0800 /* IM_WSEQ_DONE_EINT1 */ +#define ARIZONA_IM_WSEQ_DONE_EINT1_SHIFT 11 /* IM_WSEQ_DONE_EINT1 */ +#define ARIZONA_IM_WSEQ_DONE_EINT1_WIDTH 1 /* IM_WSEQ_DONE_EINT1 */ +#define ARIZONA_IM_DRC2_SIG_DET_EINT1 0x0400 /* IM_DRC2_SIG_DET_EINT1 */ +#define ARIZONA_IM_DRC2_SIG_DET_EINT1_MASK 0x0400 /* IM_DRC2_SIG_DET_EINT1 */ +#define ARIZONA_IM_DRC2_SIG_DET_EINT1_SHIFT 10 /* IM_DRC2_SIG_DET_EINT1 */ +#define ARIZONA_IM_DRC2_SIG_DET_EINT1_WIDTH 1 /* IM_DRC2_SIG_DET_EINT1 */ +#define ARIZONA_IM_DRC1_SIG_DET_EINT1 0x0200 /* IM_DRC1_SIG_DET_EINT1 */ +#define ARIZONA_IM_DRC1_SIG_DET_EINT1_MASK 0x0200 /* IM_DRC1_SIG_DET_EINT1 */ +#define ARIZONA_IM_DRC1_SIG_DET_EINT1_SHIFT 9 /* IM_DRC1_SIG_DET_EINT1 */ +#define ARIZONA_IM_DRC1_SIG_DET_EINT1_WIDTH 1 /* IM_DRC1_SIG_DET_EINT1 */ +#define ARIZONA_IM_ASRC2_LOCK_EINT1 0x0100 /* IM_ASRC2_LOCK_EINT1 */ +#define ARIZONA_IM_ASRC2_LOCK_EINT1_MASK 0x0100 /* IM_ASRC2_LOCK_EINT1 */ +#define ARIZONA_IM_ASRC2_LOCK_EINT1_SHIFT 8 /* IM_ASRC2_LOCK_EINT1 */ +#define ARIZONA_IM_ASRC2_LOCK_EINT1_WIDTH 1 /* IM_ASRC2_LOCK_EINT1 */ +#define ARIZONA_IM_ASRC1_LOCK_EINT1 0x0080 /* IM_ASRC1_LOCK_EINT1 */ +#define ARIZONA_IM_ASRC1_LOCK_EINT1_MASK 0x0080 /* IM_ASRC1_LOCK_EINT1 */ +#define ARIZONA_IM_ASRC1_LOCK_EINT1_SHIFT 7 /* IM_ASRC1_LOCK_EINT1 */ +#define ARIZONA_IM_ASRC1_LOCK_EINT1_WIDTH 1 /* IM_ASRC1_LOCK_EINT1 */ +#define ARIZONA_IM_UNDERCLOCKED_EINT1 0x0040 /* IM_UNDERCLOCKED_EINT1 */ +#define ARIZONA_IM_UNDERCLOCKED_EINT1_MASK 0x0040 /* IM_UNDERCLOCKED_EINT1 */ +#define ARIZONA_IM_UNDERCLOCKED_EINT1_SHIFT 6 /* IM_UNDERCLOCKED_EINT1 */ +#define ARIZONA_IM_UNDERCLOCKED_EINT1_WIDTH 1 /* IM_UNDERCLOCKED_EINT1 */ +#define ARIZONA_IM_OVERCLOCKED_EINT1 0x0020 /* IM_OVERCLOCKED_EINT1 */ +#define ARIZONA_IM_OVERCLOCKED_EINT1_MASK 0x0020 /* IM_OVERCLOCKED_EINT1 */ +#define ARIZONA_IM_OVERCLOCKED_EINT1_SHIFT 5 /* IM_OVERCLOCKED_EINT1 */ +#define ARIZONA_IM_OVERCLOCKED_EINT1_WIDTH 1 /* IM_OVERCLOCKED_EINT1 */ +#define ARIZONA_IM_FLL2_LOCK_EINT1 0x0008 /* IM_FLL2_LOCK_EINT1 */ +#define ARIZONA_IM_FLL2_LOCK_EINT1_MASK 0x0008 /* IM_FLL2_LOCK_EINT1 */ +#define ARIZONA_IM_FLL2_LOCK_EINT1_SHIFT 3 /* IM_FLL2_LOCK_EINT1 */ +#define ARIZONA_IM_FLL2_LOCK_EINT1_WIDTH 1 /* IM_FLL2_LOCK_EINT1 */ +#define ARIZONA_IM_FLL1_LOCK_EINT1 0x0004 /* IM_FLL1_LOCK_EINT1 */ +#define ARIZONA_IM_FLL1_LOCK_EINT1_MASK 0x0004 /* IM_FLL1_LOCK_EINT1 */ +#define ARIZONA_IM_FLL1_LOCK_EINT1_SHIFT 2 /* IM_FLL1_LOCK_EINT1 */ +#define ARIZONA_IM_FLL1_LOCK_EINT1_WIDTH 1 /* IM_FLL1_LOCK_EINT1 */ +#define ARIZONA_IM_CLKGEN_ERR_EINT1 0x0002 /* IM_CLKGEN_ERR_EINT1 */ +#define ARIZONA_IM_CLKGEN_ERR_EINT1_MASK 0x0002 /* IM_CLKGEN_ERR_EINT1 */ +#define ARIZONA_IM_CLKGEN_ERR_EINT1_SHIFT 1 /* IM_CLKGEN_ERR_EINT1 */ +#define ARIZONA_IM_CLKGEN_ERR_EINT1_WIDTH 1 /* IM_CLKGEN_ERR_EINT1 */ +#define ARIZONA_IM_CLKGEN_ERR_ASYNC_EINT1 0x0001 /* IM_CLKGEN_ERR_ASYNC_EINT1 */ +#define ARIZONA_IM_CLKGEN_ERR_ASYNC_EINT1_MASK 0x0001 /* IM_CLKGEN_ERR_ASYNC_EINT1 */ +#define ARIZONA_IM_CLKGEN_ERR_ASYNC_EINT1_SHIFT 0 /* IM_CLKGEN_ERR_ASYNC_EINT1 */ +#define ARIZONA_IM_CLKGEN_ERR_ASYNC_EINT1_WIDTH 1 /* IM_CLKGEN_ERR_ASYNC_EINT1 */ + +/* + * R3339 (0xD0B) - Interrupt Status 4 Mask + */ +#define ARIZONA_IM_ASRC_CFG_ERR_EINT1 0x8000 /* IM_ASRC_CFG_ERR_EINT1 */ +#define ARIZONA_IM_ASRC_CFG_ERR_EINT1_MASK 0x8000 /* IM_ASRC_CFG_ERR_EINT1 */ +#define ARIZONA_IM_ASRC_CFG_ERR_EINT1_SHIFT 15 /* IM_ASRC_CFG_ERR_EINT1 */ +#define ARIZONA_IM_ASRC_CFG_ERR_EINT1_WIDTH 1 /* IM_ASRC_CFG_ERR_EINT1 */ +#define ARIZONA_IM_AIF3_ERR_EINT1 0x4000 /* IM_AIF3_ERR_EINT1 */ +#define ARIZONA_IM_AIF3_ERR_EINT1_MASK 0x4000 /* IM_AIF3_ERR_EINT1 */ +#define ARIZONA_IM_AIF3_ERR_EINT1_SHIFT 14 /* IM_AIF3_ERR_EINT1 */ +#define ARIZONA_IM_AIF3_ERR_EINT1_WIDTH 1 /* IM_AIF3_ERR_EINT1 */ +#define ARIZONA_IM_AIF2_ERR_EINT1 0x2000 /* IM_AIF2_ERR_EINT1 */ +#define ARIZONA_IM_AIF2_ERR_EINT1_MASK 0x2000 /* IM_AIF2_ERR_EINT1 */ +#define ARIZONA_IM_AIF2_ERR_EINT1_SHIFT 13 /* IM_AIF2_ERR_EINT1 */ +#define ARIZONA_IM_AIF2_ERR_EINT1_WIDTH 1 /* IM_AIF2_ERR_EINT1 */ +#define ARIZONA_IM_AIF1_ERR_EINT1 0x1000 /* IM_AIF1_ERR_EINT1 */ +#define ARIZONA_IM_AIF1_ERR_EINT1_MASK 0x1000 /* IM_AIF1_ERR_EINT1 */ +#define ARIZONA_IM_AIF1_ERR_EINT1_SHIFT 12 /* IM_AIF1_ERR_EINT1 */ +#define ARIZONA_IM_AIF1_ERR_EINT1_WIDTH 1 /* IM_AIF1_ERR_EINT1 */ +#define ARIZONA_IM_CTRLIF_ERR_EINT1 0x0800 /* IM_CTRLIF_ERR_EINT1 */ +#define ARIZONA_IM_CTRLIF_ERR_EINT1_MASK 0x0800 /* IM_CTRLIF_ERR_EINT1 */ +#define ARIZONA_IM_CTRLIF_ERR_EINT1_SHIFT 11 /* IM_CTRLIF_ERR_EINT1 */ +#define ARIZONA_IM_CTRLIF_ERR_EINT1_WIDTH 1 /* IM_CTRLIF_ERR_EINT1 */ +#define ARIZONA_IM_MIXER_DROPPED_SAMPLE_EINT1 0x0400 /* IM_MIXER_DROPPED_SAMPLE_EINT1 */ +#define ARIZONA_IM_MIXER_DROPPED_SAMPLE_EINT1_MASK 0x0400 /* IM_MIXER_DROPPED_SAMPLE_EINT1 */ +#define ARIZONA_IM_MIXER_DROPPED_SAMPLE_EINT1_SHIFT 10 /* IM_MIXER_DROPPED_SAMPLE_EINT1 */ +#define ARIZONA_IM_MIXER_DROPPED_SAMPLE_EINT1_WIDTH 1 /* IM_MIXER_DROPPED_SAMPLE_EINT1 */ +#define ARIZONA_IM_ASYNC_CLK_ENA_LOW_EINT1 0x0200 /* IM_ASYNC_CLK_ENA_LOW_EINT1 */ +#define ARIZONA_IM_ASYNC_CLK_ENA_LOW_EINT1_MASK 0x0200 /* IM_ASYNC_CLK_ENA_LOW_EINT1 */ +#define ARIZONA_IM_ASYNC_CLK_ENA_LOW_EINT1_SHIFT 9 /* IM_ASYNC_CLK_ENA_LOW_EINT1 */ +#define ARIZONA_IM_ASYNC_CLK_ENA_LOW_EINT1_WIDTH 1 /* IM_ASYNC_CLK_ENA_LOW_EINT1 */ +#define ARIZONA_IM_SYSCLK_ENA_LOW_EINT1 0x0100 /* IM_SYSCLK_ENA_LOW_EINT1 */ +#define ARIZONA_IM_SYSCLK_ENA_LOW_EINT1_MASK 0x0100 /* IM_SYSCLK_ENA_LOW_EINT1 */ +#define ARIZONA_IM_SYSCLK_ENA_LOW_EINT1_SHIFT 8 /* IM_SYSCLK_ENA_LOW_EINT1 */ +#define ARIZONA_IM_SYSCLK_ENA_LOW_EINT1_WIDTH 1 /* IM_SYSCLK_ENA_LOW_EINT1 */ +#define ARIZONA_IM_ISRC1_CFG_ERR_EINT1 0x0080 /* IM_ISRC1_CFG_ERR_EINT1 */ +#define ARIZONA_IM_ISRC1_CFG_ERR_EINT1_MASK 0x0080 /* IM_ISRC1_CFG_ERR_EINT1 */ +#define ARIZONA_IM_ISRC1_CFG_ERR_EINT1_SHIFT 7 /* IM_ISRC1_CFG_ERR_EINT1 */ +#define ARIZONA_IM_ISRC1_CFG_ERR_EINT1_WIDTH 1 /* IM_ISRC1_CFG_ERR_EINT1 */ +#define ARIZONA_IM_ISRC2_CFG_ERR_EINT1 0x0040 /* IM_ISRC2_CFG_ERR_EINT1 */ +#define ARIZONA_IM_ISRC2_CFG_ERR_EINT1_MASK 0x0040 /* IM_ISRC2_CFG_ERR_EINT1 */ +#define ARIZONA_IM_ISRC2_CFG_ERR_EINT1_SHIFT 6 /* IM_ISRC2_CFG_ERR_EINT1 */ +#define ARIZONA_IM_ISRC2_CFG_ERR_EINT1_WIDTH 1 /* IM_ISRC2_CFG_ERR_EINT1 */ +#define ARIZONA_IM_HP3R_DONE_EINT1 0x0020 /* IM_HP3R_DONE_EINT1 */ +#define ARIZONA_IM_HP3R_DONE_EINT1_MASK 0x0020 /* IM_HP3R_DONE_EINT1 */ +#define ARIZONA_IM_HP3R_DONE_EINT1_SHIFT 5 /* IM_HP3R_DONE_EINT1 */ +#define ARIZONA_IM_HP3R_DONE_EINT1_WIDTH 1 /* IM_HP3R_DONE_EINT1 */ +#define ARIZONA_IM_HP3L_DONE_EINT1 0x0010 /* IM_HP3L_DONE_EINT1 */ +#define ARIZONA_IM_HP3L_DONE_EINT1_MASK 0x0010 /* IM_HP3L_DONE_EINT1 */ +#define ARIZONA_IM_HP3L_DONE_EINT1_SHIFT 4 /* IM_HP3L_DONE_EINT1 */ +#define ARIZONA_IM_HP3L_DONE_EINT1_WIDTH 1 /* IM_HP3L_DONE_EINT1 */ +#define ARIZONA_IM_HP2R_DONE_EINT1 0x0008 /* IM_HP2R_DONE_EINT1 */ +#define ARIZONA_IM_HP2R_DONE_EINT1_MASK 0x0008 /* IM_HP2R_DONE_EINT1 */ +#define ARIZONA_IM_HP2R_DONE_EINT1_SHIFT 3 /* IM_HP2R_DONE_EINT1 */ +#define ARIZONA_IM_HP2R_DONE_EINT1_WIDTH 1 /* IM_HP2R_DONE_EINT1 */ +#define ARIZONA_IM_HP2L_DONE_EINT1 0x0004 /* IM_HP2L_DONE_EINT1 */ +#define ARIZONA_IM_HP2L_DONE_EINT1_MASK 0x0004 /* IM_HP2L_DONE_EINT1 */ +#define ARIZONA_IM_HP2L_DONE_EINT1_SHIFT 2 /* IM_HP2L_DONE_EINT1 */ +#define ARIZONA_IM_HP2L_DONE_EINT1_WIDTH 1 /* IM_HP2L_DONE_EINT1 */ +#define ARIZONA_IM_HP1R_DONE_EINT1 0x0002 /* IM_HP1R_DONE_EINT1 */ +#define ARIZONA_IM_HP1R_DONE_EINT1_MASK 0x0002 /* IM_HP1R_DONE_EINT1 */ +#define ARIZONA_IM_HP1R_DONE_EINT1_SHIFT 1 /* IM_HP1R_DONE_EINT1 */ +#define ARIZONA_IM_HP1R_DONE_EINT1_WIDTH 1 /* IM_HP1R_DONE_EINT1 */ +#define ARIZONA_IM_HP1L_DONE_EINT1 0x0001 /* IM_HP1L_DONE_EINT1 */ +#define ARIZONA_IM_HP1L_DONE_EINT1_MASK 0x0001 /* IM_HP1L_DONE_EINT1 */ +#define ARIZONA_IM_HP1L_DONE_EINT1_SHIFT 0 /* IM_HP1L_DONE_EINT1 */ +#define ARIZONA_IM_HP1L_DONE_EINT1_WIDTH 1 /* IM_HP1L_DONE_EINT1 */ + +/* + * R3339 (0xD0B) - Interrupt Status 4 Mask (Alternate layout) + * + * Alternate layout used on later devices, note only fields that have moved + * are specified + */ +#define ARIZONA_V2_IM_AIF3_ERR_EINT1 0x8000 /* IM_AIF3_ERR_EINT1 */ +#define ARIZONA_V2_IM_AIF3_ERR_EINT1_MASK 0x8000 /* IM_AIF3_ERR_EINT1 */ +#define ARIZONA_V2_IM_AIF3_ERR_EINT1_SHIFT 15 /* IM_AIF3_ERR_EINT1 */ +#define ARIZONA_V2_IM_AIF3_ERR_EINT1_WIDTH 1 /* IM_AIF3_ERR_EINT1 */ +#define ARIZONA_V2_IM_AIF2_ERR_EINT1 0x4000 /* IM_AIF2_ERR_EINT1 */ +#define ARIZONA_V2_IM_AIF2_ERR_EINT1_MASK 0x4000 /* IM_AIF2_ERR_EINT1 */ +#define ARIZONA_V2_IM_AIF2_ERR_EINT1_SHIFT 14 /* IM_AIF2_ERR_EINT1 */ +#define ARIZONA_V2_IM_AIF2_ERR_EINT1_WIDTH 1 /* IM_AIF2_ERR_EINT1 */ +#define ARIZONA_V2_IM_AIF1_ERR_EINT1 0x2000 /* IM_AIF1_ERR_EINT1 */ +#define ARIZONA_V2_IM_AIF1_ERR_EINT1_MASK 0x2000 /* IM_AIF1_ERR_EINT1 */ +#define ARIZONA_V2_IM_AIF1_ERR_EINT1_SHIFT 13 /* IM_AIF1_ERR_EINT1 */ +#define ARIZONA_V2_IM_AIF1_ERR_EINT1_WIDTH 1 /* IM_AIF1_ERR_EINT1 */ +#define ARIZONA_V2_IM_CTRLIF_ERR_EINT1 0x1000 /* IM_CTRLIF_ERR_EINT1 */ +#define ARIZONA_V2_IM_CTRLIF_ERR_EINT1_MASK 0x1000 /* IM_CTRLIF_ERR_EINT1 */ +#define ARIZONA_V2_IM_CTRLIF_ERR_EINT1_SHIFT 12 /* IM_CTRLIF_ERR_EINT1 */ +#define ARIZONA_V2_IM_CTRLIF_ERR_EINT1_WIDTH 1 /* IM_CTRLIF_ERR_EINT1 */ +#define ARIZONA_V2_IM_MIXER_DROPPED_SAMPLE_EINT1 0x0800 /* IM_MIXER_DROPPED_SAMPLE_EINT1 */ +#define ARIZONA_V2_IM_MIXER_DROPPED_SAMPLE_EINT1_MASK 0x0800 /* IM_MIXER_DROPPED_SAMPLE_EINT1 */ +#define ARIZONA_V2_IM_MIXER_DROPPED_SAMPLE_EINT1_SHIFT 11 /* IM_MIXER_DROPPED_SAMPLE_EINT1 */ +#define ARIZONA_V2_IM_MIXER_DROPPED_SAMPLE_EINT1_WIDTH 1 /* IM_MIXER_DROPPED_SAMPLE_EINT1 */ +#define ARIZONA_V2_IM_ASYNC_CLK_ENA_LOW_EINT1 0x0400 /* IM_ASYNC_CLK_ENA_LOW_EINT1 */ +#define ARIZONA_V2_IM_ASYNC_CLK_ENA_LOW_EINT1_MASK 0x0400 /* IM_ASYNC_CLK_ENA_LOW_EINT1 */ +#define ARIZONA_V2_IM_ASYNC_CLK_ENA_LOW_EINT1_SHIFT 10 /* IM_ASYNC_CLK_ENA_LOW_EINT1 */ +#define ARIZONA_V2_IM_ASYNC_CLK_ENA_LOW_EINT1_WIDTH 1 /* IM_ASYNC_CLK_ENA_LOW_EINT1 */ +#define ARIZONA_V2_IM_SYSCLK_ENA_LOW_EINT1 0x0200 /* IM_SYSCLK_ENA_LOW_EINT1 */ +#define ARIZONA_V2_IM_SYSCLK_ENA_LOW_EINT1_MASK 0x0200 /* IM_SYSCLK_ENA_LOW_EINT1 */ +#define ARIZONA_V2_IM_SYSCLK_ENA_LOW_EINT1_SHIFT 9 /* IM_SYSCLK_ENA_LOW_EINT1 */ +#define ARIZONA_V2_IM_SYSCLK_ENA_LOW_EINT1_WIDTH 1 /* IM_SYSCLK_ENA_LOW_EINT1 */ +#define ARIZONA_V2_IM_ISRC1_CFG_ERR_EINT1 0x0100 /* IM_ISRC1_CFG_ERR_EINT1 */ +#define ARIZONA_V2_IM_ISRC1_CFG_ERR_EINT1_MASK 0x0100 /* IM_ISRC1_CFG_ERR_EINT1 */ +#define ARIZONA_V2_IM_ISRC1_CFG_ERR_EINT1_SHIFT 8 /* IM_ISRC1_CFG_ERR_EINT1 */ +#define ARIZONA_V2_IM_ISRC1_CFG_ERR_EINT1_WIDTH 1 /* IM_ISRC1_CFG_ERR_EINT1 */ +#define ARIZONA_V2_IM_ISRC2_CFG_ERR_EINT1 0x0080 /* IM_ISRC2_CFG_ERR_EINT1 */ +#define ARIZONA_V2_IM_ISRC2_CFG_ERR_EINT1_MASK 0x0080 /* IM_ISRC2_CFG_ERR_EINT1 */ +#define ARIZONA_V2_IM_ISRC2_CFG_ERR_EINT1_SHIFT 7 /* IM_ISRC2_CFG_ERR_EINT1 */ +#define ARIZONA_V2_IM_ISRC2_CFG_ERR_EINT1_WIDTH 1 /* IM_ISRC2_CFG_ERR_EINT1 */ +#define ARIZONA_V2_IM_ISRC3_CFG_ERR_EINT1 0x0040 /* IM_ISRC3_CFG_ERR_EINT1 */ +#define ARIZONA_V2_IM_ISRC3_CFG_ERR_EINT1_MASK 0x0040 /* IM_ISRC3_CFG_ERR_EINT1 */ +#define ARIZONA_V2_IM_ISRC3_CFG_ERR_EINT1_SHIFT 6 /* IM_ISRC3_CFG_ERR_EINT1 */ +#define ARIZONA_V2_IM_ISRC3_CFG_ERR_EINT1_WIDTH 1 /* IM_ISRC3_CFG_ERR_EINT1 */ + +/* + * R3340 (0xD0C) - Interrupt Status 5 Mask + */ +#define ARIZONA_IM_BOOT_DONE_EINT1 0x0100 /* IM_BOOT_DONE_EINT1 */ +#define ARIZONA_IM_BOOT_DONE_EINT1_MASK 0x0100 /* IM_BOOT_DONE_EINT1 */ +#define ARIZONA_IM_BOOT_DONE_EINT1_SHIFT 8 /* IM_BOOT_DONE_EINT1 */ +#define ARIZONA_IM_BOOT_DONE_EINT1_WIDTH 1 /* IM_BOOT_DONE_EINT1 */ +#define ARIZONA_IM_DCS_DAC_DONE_EINT1 0x0080 /* IM_DCS_DAC_DONE_EINT1 */ +#define ARIZONA_IM_DCS_DAC_DONE_EINT1_MASK 0x0080 /* IM_DCS_DAC_DONE_EINT1 */ +#define ARIZONA_IM_DCS_DAC_DONE_EINT1_SHIFT 7 /* IM_DCS_DAC_DONE_EINT1 */ +#define ARIZONA_IM_DCS_DAC_DONE_EINT1_WIDTH 1 /* IM_DCS_DAC_DONE_EINT1 */ +#define ARIZONA_IM_DCS_HP_DONE_EINT1 0x0040 /* IM_DCS_HP_DONE_EINT1 */ +#define ARIZONA_IM_DCS_HP_DONE_EINT1_MASK 0x0040 /* IM_DCS_HP_DONE_EINT1 */ +#define ARIZONA_IM_DCS_HP_DONE_EINT1_SHIFT 6 /* IM_DCS_HP_DONE_EINT1 */ +#define ARIZONA_IM_DCS_HP_DONE_EINT1_WIDTH 1 /* IM_DCS_HP_DONE_EINT1 */ +#define ARIZONA_IM_FLL2_CLOCK_OK_EINT1 0x0002 /* IM_FLL2_CLOCK_OK_EINT1 */ +#define ARIZONA_IM_FLL2_CLOCK_OK_EINT1_MASK 0x0002 /* IM_FLL2_CLOCK_OK_EINT1 */ +#define ARIZONA_IM_FLL2_CLOCK_OK_EINT1_SHIFT 1 /* IM_FLL2_CLOCK_OK_EINT1 */ +#define ARIZONA_IM_FLL2_CLOCK_OK_EINT1_WIDTH 1 /* IM_FLL2_CLOCK_OK_EINT1 */ +#define ARIZONA_IM_FLL1_CLOCK_OK_EINT1 0x0001 /* IM_FLL1_CLOCK_OK_EINT1 */ +#define ARIZONA_IM_FLL1_CLOCK_OK_EINT1_MASK 0x0001 /* IM_FLL1_CLOCK_OK_EINT1 */ +#define ARIZONA_IM_FLL1_CLOCK_OK_EINT1_SHIFT 0 /* IM_FLL1_CLOCK_OK_EINT1 */ +#define ARIZONA_IM_FLL1_CLOCK_OK_EINT1_WIDTH 1 /* IM_FLL1_CLOCK_OK_EINT1 */ + +/* + * R3340 (0xD0C) - Interrupt Status 5 Mask (Alternate layout) + * + * Alternate layout used on later devices, note only fields that have moved + * are specified + */ +#define ARIZONA_V2_IM_ASRC_CFG_ERR_EINT1 0x0008 /* IM_ASRC_CFG_ERR_EINT1 */ +#define ARIZONA_V2_IM_ASRC_CFG_ERR_EINT1_MASK 0x0008 /* IM_ASRC_CFG_ERR_EINT1 */ +#define ARIZONA_V2_IM_ASRC_CFG_ERR_EINT1_SHIFT 3 /* IM_ASRC_CFG_ERR_EINT1 */ +#define ARIZONA_V2_IM_ASRC_CFG_ERR_EINT1_WIDTH 1 /* IM_ASRC_CFG_ERR_EINT1 */ + +/* + * R3341 (0xD0D) - Interrupt Status 6 Mask + */ +#define ARIZONA_IM_DSP_SHARED_WR_COLL_EINT1 0x8000 /* IM_DSP_SHARED_WR_COLL_EINT1 */ +#define ARIZONA_IM_DSP_SHARED_WR_COLL_EINT1_MASK 0x8000 /* IM_DSP_SHARED_WR_COLL_EINT1 */ +#define ARIZONA_IM_DSP_SHARED_WR_COLL_EINT1_SHIFT 15 /* IM_DSP_SHARED_WR_COLL_EINT1 */ +#define ARIZONA_IM_DSP_SHARED_WR_COLL_EINT1_WIDTH 1 /* IM_DSP_SHARED_WR_COLL_EINT1 */ +#define ARIZONA_IM_SPK_SHUTDOWN_EINT1 0x4000 /* IM_SPK_SHUTDOWN_EINT1 */ +#define ARIZONA_IM_SPK_SHUTDOWN_EINT1_MASK 0x4000 /* IM_SPK_SHUTDOWN_EINT1 */ +#define ARIZONA_IM_SPK_SHUTDOWN_EINT1_SHIFT 14 /* IM_SPK_SHUTDOWN_EINT1 */ +#define ARIZONA_IM_SPK_SHUTDOWN_EINT1_WIDTH 1 /* IM_SPK_SHUTDOWN_EINT1 */ +#define ARIZONA_IM_SPK1R_SHORT_EINT1 0x2000 /* IM_SPK1R_SHORT_EINT1 */ +#define ARIZONA_IM_SPK1R_SHORT_EINT1_MASK 0x2000 /* IM_SPK1R_SHORT_EINT1 */ +#define ARIZONA_IM_SPK1R_SHORT_EINT1_SHIFT 13 /* IM_SPK1R_SHORT_EINT1 */ +#define ARIZONA_IM_SPK1R_SHORT_EINT1_WIDTH 1 /* IM_SPK1R_SHORT_EINT1 */ +#define ARIZONA_IM_SPK1L_SHORT_EINT1 0x1000 /* IM_SPK1L_SHORT_EINT1 */ +#define ARIZONA_IM_SPK1L_SHORT_EINT1_MASK 0x1000 /* IM_SPK1L_SHORT_EINT1 */ +#define ARIZONA_IM_SPK1L_SHORT_EINT1_SHIFT 12 /* IM_SPK1L_SHORT_EINT1 */ +#define ARIZONA_IM_SPK1L_SHORT_EINT1_WIDTH 1 /* IM_SPK1L_SHORT_EINT1 */ +#define ARIZONA_IM_HP3R_SC_NEG_EINT1 0x0800 /* IM_HP3R_SC_NEG_EINT1 */ +#define ARIZONA_IM_HP3R_SC_NEG_EINT1_MASK 0x0800 /* IM_HP3R_SC_NEG_EINT1 */ +#define ARIZONA_IM_HP3R_SC_NEG_EINT1_SHIFT 11 /* IM_HP3R_SC_NEG_EINT1 */ +#define ARIZONA_IM_HP3R_SC_NEG_EINT1_WIDTH 1 /* IM_HP3R_SC_NEG_EINT1 */ +#define ARIZONA_IM_HP3R_SC_POS_EINT1 0x0400 /* IM_HP3R_SC_POS_EINT1 */ +#define ARIZONA_IM_HP3R_SC_POS_EINT1_MASK 0x0400 /* IM_HP3R_SC_POS_EINT1 */ +#define ARIZONA_IM_HP3R_SC_POS_EINT1_SHIFT 10 /* IM_HP3R_SC_POS_EINT1 */ +#define ARIZONA_IM_HP3R_SC_POS_EINT1_WIDTH 1 /* IM_HP3R_SC_POS_EINT1 */ +#define ARIZONA_IM_HP3L_SC_NEG_EINT1 0x0200 /* IM_HP3L_SC_NEG_EINT1 */ +#define ARIZONA_IM_HP3L_SC_NEG_EINT1_MASK 0x0200 /* IM_HP3L_SC_NEG_EINT1 */ +#define ARIZONA_IM_HP3L_SC_NEG_EINT1_SHIFT 9 /* IM_HP3L_SC_NEG_EINT1 */ +#define ARIZONA_IM_HP3L_SC_NEG_EINT1_WIDTH 1 /* IM_HP3L_SC_NEG_EINT1 */ +#define ARIZONA_IM_HP3L_SC_POS_EINT1 0x0100 /* IM_HP3L_SC_POS_EINT1 */ +#define ARIZONA_IM_HP3L_SC_POS_EINT1_MASK 0x0100 /* IM_HP3L_SC_POS_EINT1 */ +#define ARIZONA_IM_HP3L_SC_POS_EINT1_SHIFT 8 /* IM_HP3L_SC_POS_EINT1 */ +#define ARIZONA_IM_HP3L_SC_POS_EINT1_WIDTH 1 /* IM_HP3L_SC_POS_EINT1 */ +#define ARIZONA_IM_HP2R_SC_NEG_EINT1 0x0080 /* IM_HP2R_SC_NEG_EINT1 */ +#define ARIZONA_IM_HP2R_SC_NEG_EINT1_MASK 0x0080 /* IM_HP2R_SC_NEG_EINT1 */ +#define ARIZONA_IM_HP2R_SC_NEG_EINT1_SHIFT 7 /* IM_HP2R_SC_NEG_EINT1 */ +#define ARIZONA_IM_HP2R_SC_NEG_EINT1_WIDTH 1 /* IM_HP2R_SC_NEG_EINT1 */ +#define ARIZONA_IM_HP2R_SC_POS_EINT1 0x0040 /* IM_HP2R_SC_POS_EINT1 */ +#define ARIZONA_IM_HP2R_SC_POS_EINT1_MASK 0x0040 /* IM_HP2R_SC_POS_EINT1 */ +#define ARIZONA_IM_HP2R_SC_POS_EINT1_SHIFT 6 /* IM_HP2R_SC_POS_EINT1 */ +#define ARIZONA_IM_HP2R_SC_POS_EINT1_WIDTH 1 /* IM_HP2R_SC_POS_EINT1 */ +#define ARIZONA_IM_HP2L_SC_NEG_EINT1 0x0020 /* IM_HP2L_SC_NEG_EINT1 */ +#define ARIZONA_IM_HP2L_SC_NEG_EINT1_MASK 0x0020 /* IM_HP2L_SC_NEG_EINT1 */ +#define ARIZONA_IM_HP2L_SC_NEG_EINT1_SHIFT 5 /* IM_HP2L_SC_NEG_EINT1 */ +#define ARIZONA_IM_HP2L_SC_NEG_EINT1_WIDTH 1 /* IM_HP2L_SC_NEG_EINT1 */ +#define ARIZONA_IM_HP2L_SC_POS_EINT1 0x0010 /* IM_HP2L_SC_POS_EINT1 */ +#define ARIZONA_IM_HP2L_SC_POS_EINT1_MASK 0x0010 /* IM_HP2L_SC_POS_EINT1 */ +#define ARIZONA_IM_HP2L_SC_POS_EINT1_SHIFT 4 /* IM_HP2L_SC_POS_EINT1 */ +#define ARIZONA_IM_HP2L_SC_POS_EINT1_WIDTH 1 /* IM_HP2L_SC_POS_EINT1 */ +#define ARIZONA_IM_HP1R_SC_NEG_EINT1 0x0008 /* IM_HP1R_SC_NEG_EINT1 */ +#define ARIZONA_IM_HP1R_SC_NEG_EINT1_MASK 0x0008 /* IM_HP1R_SC_NEG_EINT1 */ +#define ARIZONA_IM_HP1R_SC_NEG_EINT1_SHIFT 3 /* IM_HP1R_SC_NEG_EINT1 */ +#define ARIZONA_IM_HP1R_SC_NEG_EINT1_WIDTH 1 /* IM_HP1R_SC_NEG_EINT1 */ +#define ARIZONA_IM_HP1R_SC_POS_EINT1 0x0004 /* IM_HP1R_SC_POS_EINT1 */ +#define ARIZONA_IM_HP1R_SC_POS_EINT1_MASK 0x0004 /* IM_HP1R_SC_POS_EINT1 */ +#define ARIZONA_IM_HP1R_SC_POS_EINT1_SHIFT 2 /* IM_HP1R_SC_POS_EINT1 */ +#define ARIZONA_IM_HP1R_SC_POS_EINT1_WIDTH 1 /* IM_HP1R_SC_POS_EINT1 */ +#define ARIZONA_IM_HP1L_SC_NEG_EINT1 0x0002 /* IM_HP1L_SC_NEG_EINT1 */ +#define ARIZONA_IM_HP1L_SC_NEG_EINT1_MASK 0x0002 /* IM_HP1L_SC_NEG_EINT1 */ +#define ARIZONA_IM_HP1L_SC_NEG_EINT1_SHIFT 1 /* IM_HP1L_SC_NEG_EINT1 */ +#define ARIZONA_IM_HP1L_SC_NEG_EINT1_WIDTH 1 /* IM_HP1L_SC_NEG_EINT1 */ +#define ARIZONA_IM_HP1L_SC_POS_EINT1 0x0001 /* IM_HP1L_SC_POS_EINT1 */ +#define ARIZONA_IM_HP1L_SC_POS_EINT1_MASK 0x0001 /* IM_HP1L_SC_POS_EINT1 */ +#define ARIZONA_IM_HP1L_SC_POS_EINT1_SHIFT 0 /* IM_HP1L_SC_POS_EINT1 */ +#define ARIZONA_IM_HP1L_SC_POS_EINT1_WIDTH 1 /* IM_HP1L_SC_POS_EINT1 */ + +/* + * R3343 (0xD0F) - Interrupt Control + */ +#define ARIZONA_IM_IRQ1 0x0001 /* IM_IRQ1 */ +#define ARIZONA_IM_IRQ1_MASK 0x0001 /* IM_IRQ1 */ +#define ARIZONA_IM_IRQ1_SHIFT 0 /* IM_IRQ1 */ +#define ARIZONA_IM_IRQ1_WIDTH 1 /* IM_IRQ1 */ + +/* + * R3344 (0xD10) - IRQ2 Status 1 + */ +#define ARIZONA_GP4_EINT2 0x0008 /* GP4_EINT2 */ +#define ARIZONA_GP4_EINT2_MASK 0x0008 /* GP4_EINT2 */ +#define ARIZONA_GP4_EINT2_SHIFT 3 /* GP4_EINT2 */ +#define ARIZONA_GP4_EINT2_WIDTH 1 /* GP4_EINT2 */ +#define ARIZONA_GP3_EINT2 0x0004 /* GP3_EINT2 */ +#define ARIZONA_GP3_EINT2_MASK 0x0004 /* GP3_EINT2 */ +#define ARIZONA_GP3_EINT2_SHIFT 2 /* GP3_EINT2 */ +#define ARIZONA_GP3_EINT2_WIDTH 1 /* GP3_EINT2 */ +#define ARIZONA_GP2_EINT2 0x0002 /* GP2_EINT2 */ +#define ARIZONA_GP2_EINT2_MASK 0x0002 /* GP2_EINT2 */ +#define ARIZONA_GP2_EINT2_SHIFT 1 /* GP2_EINT2 */ +#define ARIZONA_GP2_EINT2_WIDTH 1 /* GP2_EINT2 */ +#define ARIZONA_GP1_EINT2 0x0001 /* GP1_EINT2 */ +#define ARIZONA_GP1_EINT2_MASK 0x0001 /* GP1_EINT2 */ +#define ARIZONA_GP1_EINT2_SHIFT 0 /* GP1_EINT2 */ +#define ARIZONA_GP1_EINT2_WIDTH 1 /* GP1_EINT2 */ + +/* + * R3345 (0xD11) - IRQ2 Status 2 + */ +#define ARIZONA_DSP1_RAM_RDY_EINT2 0x0100 /* DSP1_RAM_RDY_EINT2 */ +#define ARIZONA_DSP1_RAM_RDY_EINT2_MASK 0x0100 /* DSP1_RAM_RDY_EINT2 */ +#define ARIZONA_DSP1_RAM_RDY_EINT2_SHIFT 8 /* DSP1_RAM_RDY_EINT2 */ +#define ARIZONA_DSP1_RAM_RDY_EINT2_WIDTH 1 /* DSP1_RAM_RDY_EINT2 */ +#define ARIZONA_DSP_IRQ2_EINT2 0x0002 /* DSP_IRQ2_EINT2 */ +#define ARIZONA_DSP_IRQ2_EINT2_MASK 0x0002 /* DSP_IRQ2_EINT2 */ +#define ARIZONA_DSP_IRQ2_EINT2_SHIFT 1 /* DSP_IRQ2_EINT2 */ +#define ARIZONA_DSP_IRQ2_EINT2_WIDTH 1 /* DSP_IRQ2_EINT2 */ +#define ARIZONA_DSP_IRQ1_EINT2 0x0001 /* DSP_IRQ1_EINT2 */ +#define ARIZONA_DSP_IRQ1_EINT2_MASK 0x0001 /* DSP_IRQ1_EINT2 */ +#define ARIZONA_DSP_IRQ1_EINT2_SHIFT 0 /* DSP_IRQ1_EINT2 */ +#define ARIZONA_DSP_IRQ1_EINT2_WIDTH 1 /* DSP_IRQ1_EINT2 */ + +/* + * R3346 (0xD12) - IRQ2 Status 3 + */ +#define ARIZONA_SPK_OVERHEAT_WARN_EINT2 0x8000 /* SPK_OVERHEAT_WARN_EINT2 */ +#define ARIZONA_SPK_OVERHEAT_WARN_EINT2_MASK 0x8000 /* SPK_OVERHEAT_WARN_EINT2 */ +#define ARIZONA_SPK_OVERHEAT_WARN_EINT2_SHIFT 15 /* SPK_OVERHEAT_WARN_EINT2 */ +#define ARIZONA_SPK_OVERHEAT_WARN_EINT2_WIDTH 1 /* SPK_OVERHEAT_WARN_EINT2 */ +#define ARIZONA_SPK_OVERHEAT_EINT2 0x4000 /* SPK_OVERHEAT_EINT2 */ +#define ARIZONA_SPK_OVERHEAT_EINT2_MASK 0x4000 /* SPK_OVERHEAT_EINT2 */ +#define ARIZONA_SPK_OVERHEAT_EINT2_SHIFT 14 /* SPK_OVERHEAT_EINT2 */ +#define ARIZONA_SPK_OVERHEAT_EINT2_WIDTH 1 /* SPK_OVERHEAT_EINT2 */ +#define ARIZONA_HPDET_EINT2 0x2000 /* HPDET_EINT2 */ +#define ARIZONA_HPDET_EINT2_MASK 0x2000 /* HPDET_EINT2 */ +#define ARIZONA_HPDET_EINT2_SHIFT 13 /* HPDET_EINT2 */ +#define ARIZONA_HPDET_EINT2_WIDTH 1 /* HPDET_EINT2 */ +#define ARIZONA_MICDET_EINT2 0x1000 /* MICDET_EINT2 */ +#define ARIZONA_MICDET_EINT2_MASK 0x1000 /* MICDET_EINT2 */ +#define ARIZONA_MICDET_EINT2_SHIFT 12 /* MICDET_EINT2 */ +#define ARIZONA_MICDET_EINT2_WIDTH 1 /* MICDET_EINT2 */ +#define ARIZONA_WSEQ_DONE_EINT2 0x0800 /* WSEQ_DONE_EINT2 */ +#define ARIZONA_WSEQ_DONE_EINT2_MASK 0x0800 /* WSEQ_DONE_EINT2 */ +#define ARIZONA_WSEQ_DONE_EINT2_SHIFT 11 /* WSEQ_DONE_EINT2 */ +#define ARIZONA_WSEQ_DONE_EINT2_WIDTH 1 /* WSEQ_DONE_EINT2 */ +#define ARIZONA_DRC2_SIG_DET_EINT2 0x0400 /* DRC2_SIG_DET_EINT2 */ +#define ARIZONA_DRC2_SIG_DET_EINT2_MASK 0x0400 /* DRC2_SIG_DET_EINT2 */ +#define ARIZONA_DRC2_SIG_DET_EINT2_SHIFT 10 /* DRC2_SIG_DET_EINT2 */ +#define ARIZONA_DRC2_SIG_DET_EINT2_WIDTH 1 /* DRC2_SIG_DET_EINT2 */ +#define ARIZONA_DRC1_SIG_DET_EINT2 0x0200 /* DRC1_SIG_DET_EINT2 */ +#define ARIZONA_DRC1_SIG_DET_EINT2_MASK 0x0200 /* DRC1_SIG_DET_EINT2 */ +#define ARIZONA_DRC1_SIG_DET_EINT2_SHIFT 9 /* DRC1_SIG_DET_EINT2 */ +#define ARIZONA_DRC1_SIG_DET_EINT2_WIDTH 1 /* DRC1_SIG_DET_EINT2 */ +#define ARIZONA_ASRC2_LOCK_EINT2 0x0100 /* ASRC2_LOCK_EINT2 */ +#define ARIZONA_ASRC2_LOCK_EINT2_MASK 0x0100 /* ASRC2_LOCK_EINT2 */ +#define ARIZONA_ASRC2_LOCK_EINT2_SHIFT 8 /* ASRC2_LOCK_EINT2 */ +#define ARIZONA_ASRC2_LOCK_EINT2_WIDTH 1 /* ASRC2_LOCK_EINT2 */ +#define ARIZONA_ASRC1_LOCK_EINT2 0x0080 /* ASRC1_LOCK_EINT2 */ +#define ARIZONA_ASRC1_LOCK_EINT2_MASK 0x0080 /* ASRC1_LOCK_EINT2 */ +#define ARIZONA_ASRC1_LOCK_EINT2_SHIFT 7 /* ASRC1_LOCK_EINT2 */ +#define ARIZONA_ASRC1_LOCK_EINT2_WIDTH 1 /* ASRC1_LOCK_EINT2 */ +#define ARIZONA_UNDERCLOCKED_EINT2 0x0040 /* UNDERCLOCKED_EINT2 */ +#define ARIZONA_UNDERCLOCKED_EINT2_MASK 0x0040 /* UNDERCLOCKED_EINT2 */ +#define ARIZONA_UNDERCLOCKED_EINT2_SHIFT 6 /* UNDERCLOCKED_EINT2 */ +#define ARIZONA_UNDERCLOCKED_EINT2_WIDTH 1 /* UNDERCLOCKED_EINT2 */ +#define ARIZONA_OVERCLOCKED_EINT2 0x0020 /* OVERCLOCKED_EINT2 */ +#define ARIZONA_OVERCLOCKED_EINT2_MASK 0x0020 /* OVERCLOCKED_EINT2 */ +#define ARIZONA_OVERCLOCKED_EINT2_SHIFT 5 /* OVERCLOCKED_EINT2 */ +#define ARIZONA_OVERCLOCKED_EINT2_WIDTH 1 /* OVERCLOCKED_EINT2 */ +#define ARIZONA_FLL2_LOCK_EINT2 0x0008 /* FLL2_LOCK_EINT2 */ +#define ARIZONA_FLL2_LOCK_EINT2_MASK 0x0008 /* FLL2_LOCK_EINT2 */ +#define ARIZONA_FLL2_LOCK_EINT2_SHIFT 3 /* FLL2_LOCK_EINT2 */ +#define ARIZONA_FLL2_LOCK_EINT2_WIDTH 1 /* FLL2_LOCK_EINT2 */ +#define ARIZONA_FLL1_LOCK_EINT2 0x0004 /* FLL1_LOCK_EINT2 */ +#define ARIZONA_FLL1_LOCK_EINT2_MASK 0x0004 /* FLL1_LOCK_EINT2 */ +#define ARIZONA_FLL1_LOCK_EINT2_SHIFT 2 /* FLL1_LOCK_EINT2 */ +#define ARIZONA_FLL1_LOCK_EINT2_WIDTH 1 /* FLL1_LOCK_EINT2 */ +#define ARIZONA_CLKGEN_ERR_EINT2 0x0002 /* CLKGEN_ERR_EINT2 */ +#define ARIZONA_CLKGEN_ERR_EINT2_MASK 0x0002 /* CLKGEN_ERR_EINT2 */ +#define ARIZONA_CLKGEN_ERR_EINT2_SHIFT 1 /* CLKGEN_ERR_EINT2 */ +#define ARIZONA_CLKGEN_ERR_EINT2_WIDTH 1 /* CLKGEN_ERR_EINT2 */ +#define ARIZONA_CLKGEN_ERR_ASYNC_EINT2 0x0001 /* CLKGEN_ERR_ASYNC_EINT2 */ +#define ARIZONA_CLKGEN_ERR_ASYNC_EINT2_MASK 0x0001 /* CLKGEN_ERR_ASYNC_EINT2 */ +#define ARIZONA_CLKGEN_ERR_ASYNC_EINT2_SHIFT 0 /* CLKGEN_ERR_ASYNC_EINT2 */ +#define ARIZONA_CLKGEN_ERR_ASYNC_EINT2_WIDTH 1 /* CLKGEN_ERR_ASYNC_EINT2 */ + +/* + * R3347 (0xD13) - IRQ2 Status 4 + */ +#define ARIZONA_ASRC_CFG_ERR_EINT2 0x8000 /* ASRC_CFG_ERR_EINT2 */ +#define ARIZONA_ASRC_CFG_ERR_EINT2_MASK 0x8000 /* ASRC_CFG_ERR_EINT2 */ +#define ARIZONA_ASRC_CFG_ERR_EINT2_SHIFT 15 /* ASRC_CFG_ERR_EINT2 */ +#define ARIZONA_ASRC_CFG_ERR_EINT2_WIDTH 1 /* ASRC_CFG_ERR_EINT2 */ +#define ARIZONA_AIF3_ERR_EINT2 0x4000 /* AIF3_ERR_EINT2 */ +#define ARIZONA_AIF3_ERR_EINT2_MASK 0x4000 /* AIF3_ERR_EINT2 */ +#define ARIZONA_AIF3_ERR_EINT2_SHIFT 14 /* AIF3_ERR_EINT2 */ +#define ARIZONA_AIF3_ERR_EINT2_WIDTH 1 /* AIF3_ERR_EINT2 */ +#define ARIZONA_AIF2_ERR_EINT2 0x2000 /* AIF2_ERR_EINT2 */ +#define ARIZONA_AIF2_ERR_EINT2_MASK 0x2000 /* AIF2_ERR_EINT2 */ +#define ARIZONA_AIF2_ERR_EINT2_SHIFT 13 /* AIF2_ERR_EINT2 */ +#define ARIZONA_AIF2_ERR_EINT2_WIDTH 1 /* AIF2_ERR_EINT2 */ +#define ARIZONA_AIF1_ERR_EINT2 0x1000 /* AIF1_ERR_EINT2 */ +#define ARIZONA_AIF1_ERR_EINT2_MASK 0x1000 /* AIF1_ERR_EINT2 */ +#define ARIZONA_AIF1_ERR_EINT2_SHIFT 12 /* AIF1_ERR_EINT2 */ +#define ARIZONA_AIF1_ERR_EINT2_WIDTH 1 /* AIF1_ERR_EINT2 */ +#define ARIZONA_CTRLIF_ERR_EINT2 0x0800 /* CTRLIF_ERR_EINT2 */ +#define ARIZONA_CTRLIF_ERR_EINT2_MASK 0x0800 /* CTRLIF_ERR_EINT2 */ +#define ARIZONA_CTRLIF_ERR_EINT2_SHIFT 11 /* CTRLIF_ERR_EINT2 */ +#define ARIZONA_CTRLIF_ERR_EINT2_WIDTH 1 /* CTRLIF_ERR_EINT2 */ +#define ARIZONA_MIXER_DROPPED_SAMPLE_EINT2 0x0400 /* MIXER_DROPPED_SAMPLE_EINT2 */ +#define ARIZONA_MIXER_DROPPED_SAMPLE_EINT2_MASK 0x0400 /* MIXER_DROPPED_SAMPLE_EINT2 */ +#define ARIZONA_MIXER_DROPPED_SAMPLE_EINT2_SHIFT 10 /* MIXER_DROPPED_SAMPLE_EINT2 */ +#define ARIZONA_MIXER_DROPPED_SAMPLE_EINT2_WIDTH 1 /* MIXER_DROPPED_SAMPLE_EINT2 */ +#define ARIZONA_ASYNC_CLK_ENA_LOW_EINT2 0x0200 /* ASYNC_CLK_ENA_LOW_EINT2 */ +#define ARIZONA_ASYNC_CLK_ENA_LOW_EINT2_MASK 0x0200 /* ASYNC_CLK_ENA_LOW_EINT2 */ +#define ARIZONA_ASYNC_CLK_ENA_LOW_EINT2_SHIFT 9 /* ASYNC_CLK_ENA_LOW_EINT2 */ +#define ARIZONA_ASYNC_CLK_ENA_LOW_EINT2_WIDTH 1 /* ASYNC_CLK_ENA_LOW_EINT2 */ +#define ARIZONA_SYSCLK_ENA_LOW_EINT2 0x0100 /* SYSCLK_ENA_LOW_EINT2 */ +#define ARIZONA_SYSCLK_ENA_LOW_EINT2_MASK 0x0100 /* SYSCLK_ENA_LOW_EINT2 */ +#define ARIZONA_SYSCLK_ENA_LOW_EINT2_SHIFT 8 /* SYSCLK_ENA_LOW_EINT2 */ +#define ARIZONA_SYSCLK_ENA_LOW_EINT2_WIDTH 1 /* SYSCLK_ENA_LOW_EINT2 */ +#define ARIZONA_ISRC1_CFG_ERR_EINT2 0x0080 /* ISRC1_CFG_ERR_EINT2 */ +#define ARIZONA_ISRC1_CFG_ERR_EINT2_MASK 0x0080 /* ISRC1_CFG_ERR_EINT2 */ +#define ARIZONA_ISRC1_CFG_ERR_EINT2_SHIFT 7 /* ISRC1_CFG_ERR_EINT2 */ +#define ARIZONA_ISRC1_CFG_ERR_EINT2_WIDTH 1 /* ISRC1_CFG_ERR_EINT2 */ +#define ARIZONA_ISRC2_CFG_ERR_EINT2 0x0040 /* ISRC2_CFG_ERR_EINT2 */ +#define ARIZONA_ISRC2_CFG_ERR_EINT2_MASK 0x0040 /* ISRC2_CFG_ERR_EINT2 */ +#define ARIZONA_ISRC2_CFG_ERR_EINT2_SHIFT 6 /* ISRC2_CFG_ERR_EINT2 */ +#define ARIZONA_ISRC2_CFG_ERR_EINT2_WIDTH 1 /* ISRC2_CFG_ERR_EINT2 */ +#define ARIZONA_HP3R_DONE_EINT2 0x0020 /* HP3R_DONE_EINT2 */ +#define ARIZONA_HP3R_DONE_EINT2_MASK 0x0020 /* HP3R_DONE_EINT2 */ +#define ARIZONA_HP3R_DONE_EINT2_SHIFT 5 /* HP3R_DONE_EINT2 */ +#define ARIZONA_HP3R_DONE_EINT2_WIDTH 1 /* HP3R_DONE_EINT2 */ +#define ARIZONA_HP3L_DONE_EINT2 0x0010 /* HP3L_DONE_EINT2 */ +#define ARIZONA_HP3L_DONE_EINT2_MASK 0x0010 /* HP3L_DONE_EINT2 */ +#define ARIZONA_HP3L_DONE_EINT2_SHIFT 4 /* HP3L_DONE_EINT2 */ +#define ARIZONA_HP3L_DONE_EINT2_WIDTH 1 /* HP3L_DONE_EINT2 */ +#define ARIZONA_HP2R_DONE_EINT2 0x0008 /* HP2R_DONE_EINT2 */ +#define ARIZONA_HP2R_DONE_EINT2_MASK 0x0008 /* HP2R_DONE_EINT2 */ +#define ARIZONA_HP2R_DONE_EINT2_SHIFT 3 /* HP2R_DONE_EINT2 */ +#define ARIZONA_HP2R_DONE_EINT2_WIDTH 1 /* HP2R_DONE_EINT2 */ +#define ARIZONA_HP2L_DONE_EINT2 0x0004 /* HP2L_DONE_EINT2 */ +#define ARIZONA_HP2L_DONE_EINT2_MASK 0x0004 /* HP2L_DONE_EINT2 */ +#define ARIZONA_HP2L_DONE_EINT2_SHIFT 2 /* HP2L_DONE_EINT2 */ +#define ARIZONA_HP2L_DONE_EINT2_WIDTH 1 /* HP2L_DONE_EINT2 */ +#define ARIZONA_HP1R_DONE_EINT2 0x0002 /* HP1R_DONE_EINT2 */ +#define ARIZONA_HP1R_DONE_EINT2_MASK 0x0002 /* HP1R_DONE_EINT2 */ +#define ARIZONA_HP1R_DONE_EINT2_SHIFT 1 /* HP1R_DONE_EINT2 */ +#define ARIZONA_HP1R_DONE_EINT2_WIDTH 1 /* HP1R_DONE_EINT2 */ +#define ARIZONA_HP1L_DONE_EINT2 0x0001 /* HP1L_DONE_EINT2 */ +#define ARIZONA_HP1L_DONE_EINT2_MASK 0x0001 /* HP1L_DONE_EINT2 */ +#define ARIZONA_HP1L_DONE_EINT2_SHIFT 0 /* HP1L_DONE_EINT2 */ +#define ARIZONA_HP1L_DONE_EINT2_WIDTH 1 /* HP1L_DONE_EINT2 */ + +/* + * R3347 (0xD13) - IRQ2 Status 4 (Alternate layout) + * + * Alternate layout used on later devices, note only fields that have moved + * are specified + */ +#define ARIZONA_V2_AIF3_ERR_EINT2 0x8000 /* AIF3_ERR_EINT2 */ +#define ARIZONA_V2_AIF3_ERR_EINT2_MASK 0x8000 /* AIF3_ERR_EINT2 */ +#define ARIZONA_V2_AIF3_ERR_EINT2_SHIFT 15 /* AIF3_ERR_EINT2 */ +#define ARIZONA_V2_AIF3_ERR_EINT2_WIDTH 1 /* AIF3_ERR_EINT2 */ +#define ARIZONA_V2_AIF2_ERR_EINT2 0x4000 /* AIF2_ERR_EINT2 */ +#define ARIZONA_V2_AIF2_ERR_EINT2_MASK 0x4000 /* AIF2_ERR_EINT2 */ +#define ARIZONA_V2_AIF2_ERR_EINT2_SHIFT 14 /* AIF2_ERR_EINT2 */ +#define ARIZONA_V2_AIF2_ERR_EINT2_WIDTH 1 /* AIF2_ERR_EINT2 */ +#define ARIZONA_V2_AIF1_ERR_EINT2 0x2000 /* AIF1_ERR_EINT2 */ +#define ARIZONA_V2_AIF1_ERR_EINT2_MASK 0x2000 /* AIF1_ERR_EINT2 */ +#define ARIZONA_V2_AIF1_ERR_EINT2_SHIFT 13 /* AIF1_ERR_EINT2 */ +#define ARIZONA_V2_AIF1_ERR_EINT2_WIDTH 1 /* AIF1_ERR_EINT2 */ +#define ARIZONA_V2_CTRLIF_ERR_EINT2 0x1000 /* CTRLIF_ERR_EINT2 */ +#define ARIZONA_V2_CTRLIF_ERR_EINT2_MASK 0x1000 /* CTRLIF_ERR_EINT2 */ +#define ARIZONA_V2_CTRLIF_ERR_EINT2_SHIFT 12 /* CTRLIF_ERR_EINT2 */ +#define ARIZONA_V2_CTRLIF_ERR_EINT2_WIDTH 1 /* CTRLIF_ERR_EINT2 */ +#define ARIZONA_V2_MIXER_DROPPED_SAMPLE_EINT2 0x0800 /* MIXER_DROPPED_SAMPLE_EINT2 */ +#define ARIZONA_V2_MIXER_DROPPED_SAMPLE_EINT2_MASK 0x0800 /* MIXER_DROPPED_SAMPLE_EINT2 */ +#define ARIZONA_V2_MIXER_DROPPED_SAMPLE_EINT2_SHIFT 11 /* MIXER_DROPPED_SAMPLE_EINT2 */ +#define ARIZONA_V2_MIXER_DROPPED_SAMPLE_EINT2_WIDTH 1 /* MIXER_DROPPED_SAMPLE_EINT2 */ +#define ARIZONA_V2_ASYNC_CLK_ENA_LOW_EINT2 0x0400 /* ASYNC_CLK_ENA_LOW_EINT2 */ +#define ARIZONA_V2_ASYNC_CLK_ENA_LOW_EINT2_MASK 0x0400 /* ASYNC_CLK_ENA_LOW_EINT2 */ +#define ARIZONA_V2_ASYNC_CLK_ENA_LOW_EINT2_SHIFT 10 /* ASYNC_CLK_ENA_LOW_EINT2 */ +#define ARIZONA_V2_ASYNC_CLK_ENA_LOW_EINT2_WIDTH 1 /* ASYNC_CLK_ENA_LOW_EINT2 */ +#define ARIZONA_V2_SYSCLK_ENA_LOW_EINT2 0x0200 /* SYSCLK_ENA_LOW_EINT2 */ +#define ARIZONA_V2_SYSCLK_ENA_LOW_EINT2_MASK 0x0200 /* SYSCLK_ENA_LOW_EINT2 */ +#define ARIZONA_V2_SYSCLK_ENA_LOW_EINT2_SHIFT 9 /* SYSCLK_ENA_LOW_EINT2 */ +#define ARIZONA_V2_SYSCLK_ENA_LOW_EINT2_WIDTH 1 /* SYSCLK_ENA_LOW_EINT2 */ +#define ARIZONA_V2_ISRC1_CFG_ERR_EINT2 0x0100 /* ISRC1_CFG_ERR_EINT2 */ +#define ARIZONA_V2_ISRC1_CFG_ERR_EINT2_MASK 0x0100 /* ISRC1_CFG_ERR_EINT2 */ +#define ARIZONA_V2_ISRC1_CFG_ERR_EINT2_SHIFT 8 /* ISRC1_CFG_ERR_EINT2 */ +#define ARIZONA_V2_ISRC1_CFG_ERR_EINT2_WIDTH 1 /* ISRC1_CFG_ERR_EINT2 */ +#define ARIZONA_V2_ISRC2_CFG_ERR_EINT2 0x0080 /* ISRC2_CFG_ERR_EINT2 */ +#define ARIZONA_V2_ISRC2_CFG_ERR_EINT2_MASK 0x0080 /* ISRC2_CFG_ERR_EINT2 */ +#define ARIZONA_V2_ISRC2_CFG_ERR_EINT2_SHIFT 7 /* ISRC2_CFG_ERR_EINT2 */ +#define ARIZONA_V2_ISRC2_CFG_ERR_EINT2_WIDTH 1 /* ISRC2_CFG_ERR_EINT2 */ +#define ARIZONA_V2_ISRC3_CFG_ERR_EINT2 0x0040 /* ISRC3_CFG_ERR_EINT2 */ +#define ARIZONA_V2_ISRC3_CFG_ERR_EINT2_MASK 0x0040 /* ISRC3_CFG_ERR_EINT2 */ +#define ARIZONA_V2_ISRC3_CFG_ERR_EINT2_SHIFT 6 /* ISRC3_CFG_ERR_EINT2 */ +#define ARIZONA_V2_ISRC3_CFG_ERR_EINT2_WIDTH 1 /* ISRC3_CFG_ERR_EINT2 */ + +/* + * R3348 (0xD14) - IRQ2 Status 5 + */ +#define ARIZONA_BOOT_DONE_EINT2 0x0100 /* BOOT_DONE_EINT2 */ +#define ARIZONA_BOOT_DONE_EINT2_MASK 0x0100 /* BOOT_DONE_EINT2 */ +#define ARIZONA_BOOT_DONE_EINT2_SHIFT 8 /* BOOT_DONE_EINT2 */ +#define ARIZONA_BOOT_DONE_EINT2_WIDTH 1 /* BOOT_DONE_EINT2 */ +#define ARIZONA_DCS_DAC_DONE_EINT2 0x0080 /* DCS_DAC_DONE_EINT2 */ +#define ARIZONA_DCS_DAC_DONE_EINT2_MASK 0x0080 /* DCS_DAC_DONE_EINT2 */ +#define ARIZONA_DCS_DAC_DONE_EINT2_SHIFT 7 /* DCS_DAC_DONE_EINT2 */ +#define ARIZONA_DCS_DAC_DONE_EINT2_WIDTH 1 /* DCS_DAC_DONE_EINT2 */ +#define ARIZONA_DCS_HP_DONE_EINT2 0x0040 /* DCS_HP_DONE_EINT2 */ +#define ARIZONA_DCS_HP_DONE_EINT2_MASK 0x0040 /* DCS_HP_DONE_EINT2 */ +#define ARIZONA_DCS_HP_DONE_EINT2_SHIFT 6 /* DCS_HP_DONE_EINT2 */ +#define ARIZONA_DCS_HP_DONE_EINT2_WIDTH 1 /* DCS_HP_DONE_EINT2 */ +#define ARIZONA_FLL2_CLOCK_OK_EINT2 0x0002 /* FLL2_CLOCK_OK_EINT2 */ +#define ARIZONA_FLL2_CLOCK_OK_EINT2_MASK 0x0002 /* FLL2_CLOCK_OK_EINT2 */ +#define ARIZONA_FLL2_CLOCK_OK_EINT2_SHIFT 1 /* FLL2_CLOCK_OK_EINT2 */ +#define ARIZONA_FLL2_CLOCK_OK_EINT2_WIDTH 1 /* FLL2_CLOCK_OK_EINT2 */ +#define ARIZONA_FLL1_CLOCK_OK_EINT2 0x0001 /* FLL1_CLOCK_OK_EINT2 */ +#define ARIZONA_FLL1_CLOCK_OK_EINT2_MASK 0x0001 /* FLL1_CLOCK_OK_EINT2 */ +#define ARIZONA_FLL1_CLOCK_OK_EINT2_SHIFT 0 /* FLL1_CLOCK_OK_EINT2 */ +#define ARIZONA_FLL1_CLOCK_OK_EINT2_WIDTH 1 /* FLL1_CLOCK_OK_EINT2 */ + +/* + * R3348 (0xD14) - IRQ2 Status 5 (Alternate layout) + * + * Alternate layout used on later devices, note only fields that have moved + * are specified + */ +#define ARIZONA_V2_ASRC_CFG_ERR_EINT2 0x0008 /* ASRC_CFG_ERR_EINT2 */ +#define ARIZONA_V2_ASRC_CFG_ERR_EINT2_MASK 0x0008 /* ASRC_CFG_ERR_EINT2 */ +#define ARIZONA_V2_ASRC_CFG_ERR_EINT2_SHIFT 3 /* ASRC_CFG_ERR_EINT2 */ +#define ARIZONA_V2_ASRC_CFG_ERR_EINT2_WIDTH 1 /* ASRC_CFG_ERR_EINT2 */ + +/* + * R3349 (0xD15) - IRQ2 Status 6 + */ +#define ARIZONA_DSP_SHARED_WR_COLL_EINT2 0x8000 /* DSP_SHARED_WR_COLL_EINT2 */ +#define ARIZONA_DSP_SHARED_WR_COLL_EINT2_MASK 0x8000 /* DSP_SHARED_WR_COLL_EINT2 */ +#define ARIZONA_DSP_SHARED_WR_COLL_EINT2_SHIFT 15 /* DSP_SHARED_WR_COLL_EINT2 */ +#define ARIZONA_DSP_SHARED_WR_COLL_EINT2_WIDTH 1 /* DSP_SHARED_WR_COLL_EINT2 */ +#define ARIZONA_SPK_SHUTDOWN_EINT2 0x4000 /* SPK_SHUTDOWN_EINT2 */ +#define ARIZONA_SPK_SHUTDOWN_EINT2_MASK 0x4000 /* SPK_SHUTDOWN_EINT2 */ +#define ARIZONA_SPK_SHUTDOWN_EINT2_SHIFT 14 /* SPK_SHUTDOWN_EINT2 */ +#define ARIZONA_SPK_SHUTDOWN_EINT2_WIDTH 1 /* SPK_SHUTDOWN_EINT2 */ +#define ARIZONA_SPK1R_SHORT_EINT2 0x2000 /* SPK1R_SHORT_EINT2 */ +#define ARIZONA_SPK1R_SHORT_EINT2_MASK 0x2000 /* SPK1R_SHORT_EINT2 */ +#define ARIZONA_SPK1R_SHORT_EINT2_SHIFT 13 /* SPK1R_SHORT_EINT2 */ +#define ARIZONA_SPK1R_SHORT_EINT2_WIDTH 1 /* SPK1R_SHORT_EINT2 */ +#define ARIZONA_SPK1L_SHORT_EINT2 0x1000 /* SPK1L_SHORT_EINT2 */ +#define ARIZONA_SPK1L_SHORT_EINT2_MASK 0x1000 /* SPK1L_SHORT_EINT2 */ +#define ARIZONA_SPK1L_SHORT_EINT2_SHIFT 12 /* SPK1L_SHORT_EINT2 */ +#define ARIZONA_SPK1L_SHORT_EINT2_WIDTH 1 /* SPK1L_SHORT_EINT2 */ +#define ARIZONA_HP3R_SC_NEG_EINT2 0x0800 /* HP3R_SC_NEG_EINT2 */ +#define ARIZONA_HP3R_SC_NEG_EINT2_MASK 0x0800 /* HP3R_SC_NEG_EINT2 */ +#define ARIZONA_HP3R_SC_NEG_EINT2_SHIFT 11 /* HP3R_SC_NEG_EINT2 */ +#define ARIZONA_HP3R_SC_NEG_EINT2_WIDTH 1 /* HP3R_SC_NEG_EINT2 */ +#define ARIZONA_HP3R_SC_POS_EINT2 0x0400 /* HP3R_SC_POS_EINT2 */ +#define ARIZONA_HP3R_SC_POS_EINT2_MASK 0x0400 /* HP3R_SC_POS_EINT2 */ +#define ARIZONA_HP3R_SC_POS_EINT2_SHIFT 10 /* HP3R_SC_POS_EINT2 */ +#define ARIZONA_HP3R_SC_POS_EINT2_WIDTH 1 /* HP3R_SC_POS_EINT2 */ +#define ARIZONA_HP3L_SC_NEG_EINT2 0x0200 /* HP3L_SC_NEG_EINT2 */ +#define ARIZONA_HP3L_SC_NEG_EINT2_MASK 0x0200 /* HP3L_SC_NEG_EINT2 */ +#define ARIZONA_HP3L_SC_NEG_EINT2_SHIFT 9 /* HP3L_SC_NEG_EINT2 */ +#define ARIZONA_HP3L_SC_NEG_EINT2_WIDTH 1 /* HP3L_SC_NEG_EINT2 */ +#define ARIZONA_HP3L_SC_POS_EINT2 0x0100 /* HP3L_SC_POS_EINT2 */ +#define ARIZONA_HP3L_SC_POS_EINT2_MASK 0x0100 /* HP3L_SC_POS_EINT2 */ +#define ARIZONA_HP3L_SC_POS_EINT2_SHIFT 8 /* HP3L_SC_POS_EINT2 */ +#define ARIZONA_HP3L_SC_POS_EINT2_WIDTH 1 /* HP3L_SC_POS_EINT2 */ +#define ARIZONA_HP2R_SC_NEG_EINT2 0x0080 /* HP2R_SC_NEG_EINT2 */ +#define ARIZONA_HP2R_SC_NEG_EINT2_MASK 0x0080 /* HP2R_SC_NEG_EINT2 */ +#define ARIZONA_HP2R_SC_NEG_EINT2_SHIFT 7 /* HP2R_SC_NEG_EINT2 */ +#define ARIZONA_HP2R_SC_NEG_EINT2_WIDTH 1 /* HP2R_SC_NEG_EINT2 */ +#define ARIZONA_HP2R_SC_POS_EINT2 0x0040 /* HP2R_SC_POS_EINT2 */ +#define ARIZONA_HP2R_SC_POS_EINT2_MASK 0x0040 /* HP2R_SC_POS_EINT2 */ +#define ARIZONA_HP2R_SC_POS_EINT2_SHIFT 6 /* HP2R_SC_POS_EINT2 */ +#define ARIZONA_HP2R_SC_POS_EINT2_WIDTH 1 /* HP2R_SC_POS_EINT2 */ +#define ARIZONA_HP2L_SC_NEG_EINT2 0x0020 /* HP2L_SC_NEG_EINT2 */ +#define ARIZONA_HP2L_SC_NEG_EINT2_MASK 0x0020 /* HP2L_SC_NEG_EINT2 */ +#define ARIZONA_HP2L_SC_NEG_EINT2_SHIFT 5 /* HP2L_SC_NEG_EINT2 */ +#define ARIZONA_HP2L_SC_NEG_EINT2_WIDTH 1 /* HP2L_SC_NEG_EINT2 */ +#define ARIZONA_HP2L_SC_POS_EINT2 0x0010 /* HP2L_SC_POS_EINT2 */ +#define ARIZONA_HP2L_SC_POS_EINT2_MASK 0x0010 /* HP2L_SC_POS_EINT2 */ +#define ARIZONA_HP2L_SC_POS_EINT2_SHIFT 4 /* HP2L_SC_POS_EINT2 */ +#define ARIZONA_HP2L_SC_POS_EINT2_WIDTH 1 /* HP2L_SC_POS_EINT2 */ +#define ARIZONA_HP1R_SC_NEG_EINT2 0x0008 /* HP1R_SC_NEG_EINT2 */ +#define ARIZONA_HP1R_SC_NEG_EINT2_MASK 0x0008 /* HP1R_SC_NEG_EINT2 */ +#define ARIZONA_HP1R_SC_NEG_EINT2_SHIFT 3 /* HP1R_SC_NEG_EINT2 */ +#define ARIZONA_HP1R_SC_NEG_EINT2_WIDTH 1 /* HP1R_SC_NEG_EINT2 */ +#define ARIZONA_HP1R_SC_POS_EINT2 0x0004 /* HP1R_SC_POS_EINT2 */ +#define ARIZONA_HP1R_SC_POS_EINT2_MASK 0x0004 /* HP1R_SC_POS_EINT2 */ +#define ARIZONA_HP1R_SC_POS_EINT2_SHIFT 2 /* HP1R_SC_POS_EINT2 */ +#define ARIZONA_HP1R_SC_POS_EINT2_WIDTH 1 /* HP1R_SC_POS_EINT2 */ +#define ARIZONA_HP1L_SC_NEG_EINT2 0x0002 /* HP1L_SC_NEG_EINT2 */ +#define ARIZONA_HP1L_SC_NEG_EINT2_MASK 0x0002 /* HP1L_SC_NEG_EINT2 */ +#define ARIZONA_HP1L_SC_NEG_EINT2_SHIFT 1 /* HP1L_SC_NEG_EINT2 */ +#define ARIZONA_HP1L_SC_NEG_EINT2_WIDTH 1 /* HP1L_SC_NEG_EINT2 */ +#define ARIZONA_HP1L_SC_POS_EINT2 0x0001 /* HP1L_SC_POS_EINT2 */ +#define ARIZONA_HP1L_SC_POS_EINT2_MASK 0x0001 /* HP1L_SC_POS_EINT2 */ +#define ARIZONA_HP1L_SC_POS_EINT2_SHIFT 0 /* HP1L_SC_POS_EINT2 */ +#define ARIZONA_HP1L_SC_POS_EINT2_WIDTH 1 /* HP1L_SC_POS_EINT2 */ + +/* + * R3352 (0xD18) - IRQ2 Status 1 Mask + */ +#define ARIZONA_IM_GP4_EINT2 0x0008 /* IM_GP4_EINT2 */ +#define ARIZONA_IM_GP4_EINT2_MASK 0x0008 /* IM_GP4_EINT2 */ +#define ARIZONA_IM_GP4_EINT2_SHIFT 3 /* IM_GP4_EINT2 */ +#define ARIZONA_IM_GP4_EINT2_WIDTH 1 /* IM_GP4_EINT2 */ +#define ARIZONA_IM_GP3_EINT2 0x0004 /* IM_GP3_EINT2 */ +#define ARIZONA_IM_GP3_EINT2_MASK 0x0004 /* IM_GP3_EINT2 */ +#define ARIZONA_IM_GP3_EINT2_SHIFT 2 /* IM_GP3_EINT2 */ +#define ARIZONA_IM_GP3_EINT2_WIDTH 1 /* IM_GP3_EINT2 */ +#define ARIZONA_IM_GP2_EINT2 0x0002 /* IM_GP2_EINT2 */ +#define ARIZONA_IM_GP2_EINT2_MASK 0x0002 /* IM_GP2_EINT2 */ +#define ARIZONA_IM_GP2_EINT2_SHIFT 1 /* IM_GP2_EINT2 */ +#define ARIZONA_IM_GP2_EINT2_WIDTH 1 /* IM_GP2_EINT2 */ +#define ARIZONA_IM_GP1_EINT2 0x0001 /* IM_GP1_EINT2 */ +#define ARIZONA_IM_GP1_EINT2_MASK 0x0001 /* IM_GP1_EINT2 */ +#define ARIZONA_IM_GP1_EINT2_SHIFT 0 /* IM_GP1_EINT2 */ +#define ARIZONA_IM_GP1_EINT2_WIDTH 1 /* IM_GP1_EINT2 */ + +/* + * R3353 (0xD19) - IRQ2 Status 2 Mask + */ +#define ARIZONA_IM_DSP1_RAM_RDY_EINT2 0x0100 /* IM_DSP1_RAM_RDY_EINT2 */ +#define ARIZONA_IM_DSP1_RAM_RDY_EINT2_MASK 0x0100 /* IM_DSP1_RAM_RDY_EINT2 */ +#define ARIZONA_IM_DSP1_RAM_RDY_EINT2_SHIFT 8 /* IM_DSP1_RAM_RDY_EINT2 */ +#define ARIZONA_IM_DSP1_RAM_RDY_EINT2_WIDTH 1 /* IM_DSP1_RAM_RDY_EINT2 */ +#define ARIZONA_IM_DSP_IRQ2_EINT2 0x0002 /* IM_DSP_IRQ2_EINT2 */ +#define ARIZONA_IM_DSP_IRQ2_EINT2_MASK 0x0002 /* IM_DSP_IRQ2_EINT2 */ +#define ARIZONA_IM_DSP_IRQ2_EINT2_SHIFT 1 /* IM_DSP_IRQ2_EINT2 */ +#define ARIZONA_IM_DSP_IRQ2_EINT2_WIDTH 1 /* IM_DSP_IRQ2_EINT2 */ +#define ARIZONA_IM_DSP_IRQ1_EINT2 0x0001 /* IM_DSP_IRQ1_EINT2 */ +#define ARIZONA_IM_DSP_IRQ1_EINT2_MASK 0x0001 /* IM_DSP_IRQ1_EINT2 */ +#define ARIZONA_IM_DSP_IRQ1_EINT2_SHIFT 0 /* IM_DSP_IRQ1_EINT2 */ +#define ARIZONA_IM_DSP_IRQ1_EINT2_WIDTH 1 /* IM_DSP_IRQ1_EINT2 */ + +/* + * R3354 (0xD1A) - IRQ2 Status 3 Mask + */ +#define ARIZONA_IM_SPK_OVERHEAT_WARN_EINT2 0x8000 /* IM_SPK_OVERHEAT_WARN_EINT2 */ +#define ARIZONA_IM_SPK_OVERHEAT_WARN_EINT2_MASK 0x8000 /* IM_SPK_OVERHEAT_WARN_EINT2 */ +#define ARIZONA_IM_SPK_OVERHEAT_WARN_EINT2_SHIFT 15 /* IM_SPK_OVERHEAT_WARN_EINT2 */ +#define ARIZONA_IM_SPK_OVERHEAT_WARN_EINT2_WIDTH 1 /* IM_SPK_OVERHEAT_WARN_EINT2 */ +#define ARIZONA_IM_SPK_OVERHEAT_EINT2 0x4000 /* IM_SPK_OVERHEAT_EINT2 */ +#define ARIZONA_IM_SPK_OVERHEAT_EINT2_MASK 0x4000 /* IM_SPK_OVERHEAT_EINT2 */ +#define ARIZONA_IM_SPK_OVERHEAT_EINT2_SHIFT 14 /* IM_SPK_OVERHEAT_EINT2 */ +#define ARIZONA_IM_SPK_OVERHEAT_EINT2_WIDTH 1 /* IM_SPK_OVERHEAT_EINT2 */ +#define ARIZONA_IM_HPDET_EINT2 0x2000 /* IM_HPDET_EINT2 */ +#define ARIZONA_IM_HPDET_EINT2_MASK 0x2000 /* IM_HPDET_EINT2 */ +#define ARIZONA_IM_HPDET_EINT2_SHIFT 13 /* IM_HPDET_EINT2 */ +#define ARIZONA_IM_HPDET_EINT2_WIDTH 1 /* IM_HPDET_EINT2 */ +#define ARIZONA_IM_MICDET_EINT2 0x1000 /* IM_MICDET_EINT2 */ +#define ARIZONA_IM_MICDET_EINT2_MASK 0x1000 /* IM_MICDET_EINT2 */ +#define ARIZONA_IM_MICDET_EINT2_SHIFT 12 /* IM_MICDET_EINT2 */ +#define ARIZONA_IM_MICDET_EINT2_WIDTH 1 /* IM_MICDET_EINT2 */ +#define ARIZONA_IM_WSEQ_DONE_EINT2 0x0800 /* IM_WSEQ_DONE_EINT2 */ +#define ARIZONA_IM_WSEQ_DONE_EINT2_MASK 0x0800 /* IM_WSEQ_DONE_EINT2 */ +#define ARIZONA_IM_WSEQ_DONE_EINT2_SHIFT 11 /* IM_WSEQ_DONE_EINT2 */ +#define ARIZONA_IM_WSEQ_DONE_EINT2_WIDTH 1 /* IM_WSEQ_DONE_EINT2 */ +#define ARIZONA_IM_DRC2_SIG_DET_EINT2 0x0400 /* IM_DRC2_SIG_DET_EINT2 */ +#define ARIZONA_IM_DRC2_SIG_DET_EINT2_MASK 0x0400 /* IM_DRC2_SIG_DET_EINT2 */ +#define ARIZONA_IM_DRC2_SIG_DET_EINT2_SHIFT 10 /* IM_DRC2_SIG_DET_EINT2 */ +#define ARIZONA_IM_DRC2_SIG_DET_EINT2_WIDTH 1 /* IM_DRC2_SIG_DET_EINT2 */ +#define ARIZONA_IM_DRC1_SIG_DET_EINT2 0x0200 /* IM_DRC1_SIG_DET_EINT2 */ +#define ARIZONA_IM_DRC1_SIG_DET_EINT2_MASK 0x0200 /* IM_DRC1_SIG_DET_EINT2 */ +#define ARIZONA_IM_DRC1_SIG_DET_EINT2_SHIFT 9 /* IM_DRC1_SIG_DET_EINT2 */ +#define ARIZONA_IM_DRC1_SIG_DET_EINT2_WIDTH 1 /* IM_DRC1_SIG_DET_EINT2 */ +#define ARIZONA_IM_ASRC2_LOCK_EINT2 0x0100 /* IM_ASRC2_LOCK_EINT2 */ +#define ARIZONA_IM_ASRC2_LOCK_EINT2_MASK 0x0100 /* IM_ASRC2_LOCK_EINT2 */ +#define ARIZONA_IM_ASRC2_LOCK_EINT2_SHIFT 8 /* IM_ASRC2_LOCK_EINT2 */ +#define ARIZONA_IM_ASRC2_LOCK_EINT2_WIDTH 1 /* IM_ASRC2_LOCK_EINT2 */ +#define ARIZONA_IM_ASRC1_LOCK_EINT2 0x0080 /* IM_ASRC1_LOCK_EINT2 */ +#define ARIZONA_IM_ASRC1_LOCK_EINT2_MASK 0x0080 /* IM_ASRC1_LOCK_EINT2 */ +#define ARIZONA_IM_ASRC1_LOCK_EINT2_SHIFT 7 /* IM_ASRC1_LOCK_EINT2 */ +#define ARIZONA_IM_ASRC1_LOCK_EINT2_WIDTH 1 /* IM_ASRC1_LOCK_EINT2 */ +#define ARIZONA_IM_UNDERCLOCKED_EINT2 0x0040 /* IM_UNDERCLOCKED_EINT2 */ +#define ARIZONA_IM_UNDERCLOCKED_EINT2_MASK 0x0040 /* IM_UNDERCLOCKED_EINT2 */ +#define ARIZONA_IM_UNDERCLOCKED_EINT2_SHIFT 6 /* IM_UNDERCLOCKED_EINT2 */ +#define ARIZONA_IM_UNDERCLOCKED_EINT2_WIDTH 1 /* IM_UNDERCLOCKED_EINT2 */ +#define ARIZONA_IM_OVERCLOCKED_EINT2 0x0020 /* IM_OVERCLOCKED_EINT2 */ +#define ARIZONA_IM_OVERCLOCKED_EINT2_MASK 0x0020 /* IM_OVERCLOCKED_EINT2 */ +#define ARIZONA_IM_OVERCLOCKED_EINT2_SHIFT 5 /* IM_OVERCLOCKED_EINT2 */ +#define ARIZONA_IM_OVERCLOCKED_EINT2_WIDTH 1 /* IM_OVERCLOCKED_EINT2 */ +#define ARIZONA_IM_FLL2_LOCK_EINT2 0x0008 /* IM_FLL2_LOCK_EINT2 */ +#define ARIZONA_IM_FLL2_LOCK_EINT2_MASK 0x0008 /* IM_FLL2_LOCK_EINT2 */ +#define ARIZONA_IM_FLL2_LOCK_EINT2_SHIFT 3 /* IM_FLL2_LOCK_EINT2 */ +#define ARIZONA_IM_FLL2_LOCK_EINT2_WIDTH 1 /* IM_FLL2_LOCK_EINT2 */ +#define ARIZONA_IM_FLL1_LOCK_EINT2 0x0004 /* IM_FLL1_LOCK_EINT2 */ +#define ARIZONA_IM_FLL1_LOCK_EINT2_MASK 0x0004 /* IM_FLL1_LOCK_EINT2 */ +#define ARIZONA_IM_FLL1_LOCK_EINT2_SHIFT 2 /* IM_FLL1_LOCK_EINT2 */ +#define ARIZONA_IM_FLL1_LOCK_EINT2_WIDTH 1 /* IM_FLL1_LOCK_EINT2 */ +#define ARIZONA_IM_CLKGEN_ERR_EINT2 0x0002 /* IM_CLKGEN_ERR_EINT2 */ +#define ARIZONA_IM_CLKGEN_ERR_EINT2_MASK 0x0002 /* IM_CLKGEN_ERR_EINT2 */ +#define ARIZONA_IM_CLKGEN_ERR_EINT2_SHIFT 1 /* IM_CLKGEN_ERR_EINT2 */ +#define ARIZONA_IM_CLKGEN_ERR_EINT2_WIDTH 1 /* IM_CLKGEN_ERR_EINT2 */ +#define ARIZONA_IM_CLKGEN_ERR_ASYNC_EINT2 0x0001 /* IM_CLKGEN_ERR_ASYNC_EINT2 */ +#define ARIZONA_IM_CLKGEN_ERR_ASYNC_EINT2_MASK 0x0001 /* IM_CLKGEN_ERR_ASYNC_EINT2 */ +#define ARIZONA_IM_CLKGEN_ERR_ASYNC_EINT2_SHIFT 0 /* IM_CLKGEN_ERR_ASYNC_EINT2 */ +#define ARIZONA_IM_CLKGEN_ERR_ASYNC_EINT2_WIDTH 1 /* IM_CLKGEN_ERR_ASYNC_EINT2 */ + +/* + * R3355 (0xD1B) - IRQ2 Status 4 Mask + */ +#define ARIZONA_IM_ASRC_CFG_ERR_EINT2 0x8000 /* IM_ASRC_CFG_ERR_EINT2 */ +#define ARIZONA_IM_ASRC_CFG_ERR_EINT2_MASK 0x8000 /* IM_ASRC_CFG_ERR_EINT2 */ +#define ARIZONA_IM_ASRC_CFG_ERR_EINT2_SHIFT 15 /* IM_ASRC_CFG_ERR_EINT2 */ +#define ARIZONA_IM_ASRC_CFG_ERR_EINT2_WIDTH 1 /* IM_ASRC_CFG_ERR_EINT2 */ +#define ARIZONA_IM_AIF3_ERR_EINT2 0x4000 /* IM_AIF3_ERR_EINT2 */ +#define ARIZONA_IM_AIF3_ERR_EINT2_MASK 0x4000 /* IM_AIF3_ERR_EINT2 */ +#define ARIZONA_IM_AIF3_ERR_EINT2_SHIFT 14 /* IM_AIF3_ERR_EINT2 */ +#define ARIZONA_IM_AIF3_ERR_EINT2_WIDTH 1 /* IM_AIF3_ERR_EINT2 */ +#define ARIZONA_IM_AIF2_ERR_EINT2 0x2000 /* IM_AIF2_ERR_EINT2 */ +#define ARIZONA_IM_AIF2_ERR_EINT2_MASK 0x2000 /* IM_AIF2_ERR_EINT2 */ +#define ARIZONA_IM_AIF2_ERR_EINT2_SHIFT 13 /* IM_AIF2_ERR_EINT2 */ +#define ARIZONA_IM_AIF2_ERR_EINT2_WIDTH 1 /* IM_AIF2_ERR_EINT2 */ +#define ARIZONA_IM_AIF1_ERR_EINT2 0x1000 /* IM_AIF1_ERR_EINT2 */ +#define ARIZONA_IM_AIF1_ERR_EINT2_MASK 0x1000 /* IM_AIF1_ERR_EINT2 */ +#define ARIZONA_IM_AIF1_ERR_EINT2_SHIFT 12 /* IM_AIF1_ERR_EINT2 */ +#define ARIZONA_IM_AIF1_ERR_EINT2_WIDTH 1 /* IM_AIF1_ERR_EINT2 */ +#define ARIZONA_IM_CTRLIF_ERR_EINT2 0x0800 /* IM_CTRLIF_ERR_EINT2 */ +#define ARIZONA_IM_CTRLIF_ERR_EINT2_MASK 0x0800 /* IM_CTRLIF_ERR_EINT2 */ +#define ARIZONA_IM_CTRLIF_ERR_EINT2_SHIFT 11 /* IM_CTRLIF_ERR_EINT2 */ +#define ARIZONA_IM_CTRLIF_ERR_EINT2_WIDTH 1 /* IM_CTRLIF_ERR_EINT2 */ +#define ARIZONA_IM_MIXER_DROPPED_SAMPLE_EINT2 0x0400 /* IM_MIXER_DROPPED_SAMPLE_EINT2 */ +#define ARIZONA_IM_MIXER_DROPPED_SAMPLE_EINT2_MASK 0x0400 /* IM_MIXER_DROPPED_SAMPLE_EINT2 */ +#define ARIZONA_IM_MIXER_DROPPED_SAMPLE_EINT2_SHIFT 10 /* IM_MIXER_DROPPED_SAMPLE_EINT2 */ +#define ARIZONA_IM_MIXER_DROPPED_SAMPLE_EINT2_WIDTH 1 /* IM_MIXER_DROPPED_SAMPLE_EINT2 */ +#define ARIZONA_IM_ASYNC_CLK_ENA_LOW_EINT2 0x0200 /* IM_ASYNC_CLK_ENA_LOW_EINT2 */ +#define ARIZONA_IM_ASYNC_CLK_ENA_LOW_EINT2_MASK 0x0200 /* IM_ASYNC_CLK_ENA_LOW_EINT2 */ +#define ARIZONA_IM_ASYNC_CLK_ENA_LOW_EINT2_SHIFT 9 /* IM_ASYNC_CLK_ENA_LOW_EINT2 */ +#define ARIZONA_IM_ASYNC_CLK_ENA_LOW_EINT2_WIDTH 1 /* IM_ASYNC_CLK_ENA_LOW_EINT2 */ +#define ARIZONA_IM_SYSCLK_ENA_LOW_EINT2 0x0100 /* IM_SYSCLK_ENA_LOW_EINT2 */ +#define ARIZONA_IM_SYSCLK_ENA_LOW_EINT2_MASK 0x0100 /* IM_SYSCLK_ENA_LOW_EINT2 */ +#define ARIZONA_IM_SYSCLK_ENA_LOW_EINT2_SHIFT 8 /* IM_SYSCLK_ENA_LOW_EINT2 */ +#define ARIZONA_IM_SYSCLK_ENA_LOW_EINT2_WIDTH 1 /* IM_SYSCLK_ENA_LOW_EINT2 */ +#define ARIZONA_IM_ISRC1_CFG_ERR_EINT2 0x0080 /* IM_ISRC1_CFG_ERR_EINT2 */ +#define ARIZONA_IM_ISRC1_CFG_ERR_EINT2_MASK 0x0080 /* IM_ISRC1_CFG_ERR_EINT2 */ +#define ARIZONA_IM_ISRC1_CFG_ERR_EINT2_SHIFT 7 /* IM_ISRC1_CFG_ERR_EINT2 */ +#define ARIZONA_IM_ISRC1_CFG_ERR_EINT2_WIDTH 1 /* IM_ISRC1_CFG_ERR_EINT2 */ +#define ARIZONA_IM_ISRC2_CFG_ERR_EINT2 0x0040 /* IM_ISRC2_CFG_ERR_EINT2 */ +#define ARIZONA_IM_ISRC2_CFG_ERR_EINT2_MASK 0x0040 /* IM_ISRC2_CFG_ERR_EINT2 */ +#define ARIZONA_IM_ISRC2_CFG_ERR_EINT2_SHIFT 6 /* IM_ISRC2_CFG_ERR_EINT2 */ +#define ARIZONA_IM_ISRC2_CFG_ERR_EINT2_WIDTH 1 /* IM_ISRC2_CFG_ERR_EINT2 */ +#define ARIZONA_IM_HP3R_DONE_EINT2 0x0020 /* IM_HP3R_DONE_EINT2 */ +#define ARIZONA_IM_HP3R_DONE_EINT2_MASK 0x0020 /* IM_HP3R_DONE_EINT2 */ +#define ARIZONA_IM_HP3R_DONE_EINT2_SHIFT 5 /* IM_HP3R_DONE_EINT2 */ +#define ARIZONA_IM_HP3R_DONE_EINT2_WIDTH 1 /* IM_HP3R_DONE_EINT2 */ +#define ARIZONA_IM_HP3L_DONE_EINT2 0x0010 /* IM_HP3L_DONE_EINT2 */ +#define ARIZONA_IM_HP3L_DONE_EINT2_MASK 0x0010 /* IM_HP3L_DONE_EINT2 */ +#define ARIZONA_IM_HP3L_DONE_EINT2_SHIFT 4 /* IM_HP3L_DONE_EINT2 */ +#define ARIZONA_IM_HP3L_DONE_EINT2_WIDTH 1 /* IM_HP3L_DONE_EINT2 */ +#define ARIZONA_IM_HP2R_DONE_EINT2 0x0008 /* IM_HP2R_DONE_EINT2 */ +#define ARIZONA_IM_HP2R_DONE_EINT2_MASK 0x0008 /* IM_HP2R_DONE_EINT2 */ +#define ARIZONA_IM_HP2R_DONE_EINT2_SHIFT 3 /* IM_HP2R_DONE_EINT2 */ +#define ARIZONA_IM_HP2R_DONE_EINT2_WIDTH 1 /* IM_HP2R_DONE_EINT2 */ +#define ARIZONA_IM_HP2L_DONE_EINT2 0x0004 /* IM_HP2L_DONE_EINT2 */ +#define ARIZONA_IM_HP2L_DONE_EINT2_MASK 0x0004 /* IM_HP2L_DONE_EINT2 */ +#define ARIZONA_IM_HP2L_DONE_EINT2_SHIFT 2 /* IM_HP2L_DONE_EINT2 */ +#define ARIZONA_IM_HP2L_DONE_EINT2_WIDTH 1 /* IM_HP2L_DONE_EINT2 */ +#define ARIZONA_IM_HP1R_DONE_EINT2 0x0002 /* IM_HP1R_DONE_EINT2 */ +#define ARIZONA_IM_HP1R_DONE_EINT2_MASK 0x0002 /* IM_HP1R_DONE_EINT2 */ +#define ARIZONA_IM_HP1R_DONE_EINT2_SHIFT 1 /* IM_HP1R_DONE_EINT2 */ +#define ARIZONA_IM_HP1R_DONE_EINT2_WIDTH 1 /* IM_HP1R_DONE_EINT2 */ +#define ARIZONA_IM_HP1L_DONE_EINT2 0x0001 /* IM_HP1L_DONE_EINT2 */ +#define ARIZONA_IM_HP1L_DONE_EINT2_MASK 0x0001 /* IM_HP1L_DONE_EINT2 */ +#define ARIZONA_IM_HP1L_DONE_EINT2_SHIFT 0 /* IM_HP1L_DONE_EINT2 */ +#define ARIZONA_IM_HP1L_DONE_EINT2_WIDTH 1 /* IM_HP1L_DONE_EINT2 */ + +/* + * R3355 (0xD1B) - IRQ2 Status 4 Mask (Alternate layout) + * + * Alternate layout used on later devices, note only fields that have moved + * are specified + */ +#define ARIZONA_V2_IM_AIF3_ERR_EINT2 0x8000 /* IM_AIF3_ERR_EINT2 */ +#define ARIZONA_V2_IM_AIF3_ERR_EINT2_MASK 0x8000 /* IM_AIF3_ERR_EINT2 */ +#define ARIZONA_V2_IM_AIF3_ERR_EINT2_SHIFT 15 /* IM_AIF3_ERR_EINT2 */ +#define ARIZONA_V2_IM_AIF3_ERR_EINT2_WIDTH 1 /* IM_AIF3_ERR_EINT2 */ +#define ARIZONA_V2_IM_AIF2_ERR_EINT2 0x4000 /* IM_AIF2_ERR_EINT2 */ +#define ARIZONA_V2_IM_AIF2_ERR_EINT2_MASK 0x4000 /* IM_AIF2_ERR_EINT2 */ +#define ARIZONA_V2_IM_AIF2_ERR_EINT2_SHIFT 14 /* IM_AIF2_ERR_EINT2 */ +#define ARIZONA_V2_IM_AIF2_ERR_EINT2_WIDTH 1 /* IM_AIF2_ERR_EINT2 */ +#define ARIZONA_V2_IM_AIF1_ERR_EINT2 0x2000 /* IM_AIF1_ERR_EINT2 */ +#define ARIZONA_V2_IM_AIF1_ERR_EINT2_MASK 0x2000 /* IM_AIF1_ERR_EINT2 */ +#define ARIZONA_V2_IM_AIF1_ERR_EINT2_SHIFT 13 /* IM_AIF1_ERR_EINT2 */ +#define ARIZONA_V2_IM_AIF1_ERR_EINT2_WIDTH 1 /* IM_AIF1_ERR_EINT2 */ +#define ARIZONA_V2_IM_CTRLIF_ERR_EINT2 0x1000 /* IM_CTRLIF_ERR_EINT2 */ +#define ARIZONA_V2_IM_CTRLIF_ERR_EINT2_MASK 0x1000 /* IM_CTRLIF_ERR_EINT2 */ +#define ARIZONA_V2_IM_CTRLIF_ERR_EINT2_SHIFT 12 /* IM_CTRLIF_ERR_EINT2 */ +#define ARIZONA_V2_IM_CTRLIF_ERR_EINT2_WIDTH 1 /* IM_CTRLIF_ERR_EINT2 */ +#define ARIZONA_V2_IM_MIXER_DROPPED_SAMPLE_EINT2 0x0800 /* IM_MIXER_DROPPED_SAMPLE_EINT2 */ +#define ARIZONA_V2_IM_MIXER_DROPPED_SAMPLE_EINT2_MASK 0x0800 /* IM_MIXER_DROPPED_SAMPLE_EINT2 */ +#define ARIZONA_V2_IM_MIXER_DROPPED_SAMPLE_EINT2_SHIFT 11 /* IM_MIXER_DROPPED_SAMPLE_EINT2 */ +#define ARIZONA_V2_IM_MIXER_DROPPED_SAMPLE_EINT2_WIDTH 1 /* IM_MIXER_DROPPED_SAMPLE_EINT2 */ +#define ARIZONA_V2_IM_ASYNC_CLK_ENA_LOW_EINT2 0x0400 /* IM_ASYNC_CLK_ENA_LOW_EINT2 */ +#define ARIZONA_V2_IM_ASYNC_CLK_ENA_LOW_EINT2_MASK 0x0400 /* IM_ASYNC_CLK_ENA_LOW_EINT2 */ +#define ARIZONA_V2_IM_ASYNC_CLK_ENA_LOW_EINT2_SHIFT 10 /* IM_ASYNC_CLK_ENA_LOW_EINT2 */ +#define ARIZONA_V2_IM_ASYNC_CLK_ENA_LOW_EINT2_WIDTH 1 /* IM_ASYNC_CLK_ENA_LOW_EINT2 */ +#define ARIZONA_V2_IM_SYSCLK_ENA_LOW_EINT2 0x0200 /* IM_SYSCLK_ENA_LOW_EINT2 */ +#define ARIZONA_V2_IM_SYSCLK_ENA_LOW_EINT2_MASK 0x0200 /* IM_SYSCLK_ENA_LOW_EINT2 */ +#define ARIZONA_V2_IM_SYSCLK_ENA_LOW_EINT2_SHIFT 9 /* IM_SYSCLK_ENA_LOW_EINT2 */ +#define ARIZONA_V2_IM_SYSCLK_ENA_LOW_EINT2_WIDTH 1 /* IM_SYSCLK_ENA_LOW_EINT2 */ +#define ARIZONA_V2_IM_ISRC1_CFG_ERR_EINT2 0x0100 /* IM_ISRC1_CFG_ERR_EINT2 */ +#define ARIZONA_V2_IM_ISRC1_CFG_ERR_EINT2_MASK 0x0100 /* IM_ISRC1_CFG_ERR_EINT2 */ +#define ARIZONA_V2_IM_ISRC1_CFG_ERR_EINT2_SHIFT 8 /* IM_ISRC1_CFG_ERR_EINT2 */ +#define ARIZONA_V2_IM_ISRC1_CFG_ERR_EINT2_WIDTH 1 /* IM_ISRC1_CFG_ERR_EINT2 */ +#define ARIZONA_V2_IM_ISRC2_CFG_ERR_EINT2 0x0080 /* IM_ISRC2_CFG_ERR_EINT2 */ +#define ARIZONA_V2_IM_ISRC2_CFG_ERR_EINT2_MASK 0x0080 /* IM_ISRC2_CFG_ERR_EINT2 */ +#define ARIZONA_V2_IM_ISRC2_CFG_ERR_EINT2_SHIFT 7 /* IM_ISRC2_CFG_ERR_EINT2 */ +#define ARIZONA_V2_IM_ISRC2_CFG_ERR_EINT2_WIDTH 1 /* IM_ISRC2_CFG_ERR_EINT2 */ +#define ARIZONA_V2_IM_ISRC3_CFG_ERR_EINT2 0x0040 /* IM_ISRC3_CFG_ERR_EINT2 */ +#define ARIZONA_V2_IM_ISRC3_CFG_ERR_EINT2_MASK 0x0040 /* IM_ISRC3_CFG_ERR_EINT2 */ +#define ARIZONA_V2_IM_ISRC3_CFG_ERR_EINT2_SHIFT 6 /* IM_ISRC3_CFG_ERR_EINT2 */ +#define ARIZONA_V2_IM_ISRC3_CFG_ERR_EINT2_WIDTH 1 /* IM_ISRC3_CFG_ERR_EINT2 */ + +/* + * R3356 (0xD1C) - IRQ2 Status 5 Mask + */ + +#define ARIZONA_IM_BOOT_DONE_EINT2 0x0100 /* IM_BOOT_DONE_EINT2 */ +#define ARIZONA_IM_BOOT_DONE_EINT2_MASK 0x0100 /* IM_BOOT_DONE_EINT2 */ +#define ARIZONA_IM_BOOT_DONE_EINT2_SHIFT 8 /* IM_BOOT_DONE_EINT2 */ +#define ARIZONA_IM_BOOT_DONE_EINT2_WIDTH 1 /* IM_BOOT_DONE_EINT2 */ +#define ARIZONA_IM_DCS_DAC_DONE_EINT2 0x0080 /* IM_DCS_DAC_DONE_EINT2 */ +#define ARIZONA_IM_DCS_DAC_DONE_EINT2_MASK 0x0080 /* IM_DCS_DAC_DONE_EINT2 */ +#define ARIZONA_IM_DCS_DAC_DONE_EINT2_SHIFT 7 /* IM_DCS_DAC_DONE_EINT2 */ +#define ARIZONA_IM_DCS_DAC_DONE_EINT2_WIDTH 1 /* IM_DCS_DAC_DONE_EINT2 */ +#define ARIZONA_IM_DCS_HP_DONE_EINT2 0x0040 /* IM_DCS_HP_DONE_EINT2 */ +#define ARIZONA_IM_DCS_HP_DONE_EINT2_MASK 0x0040 /* IM_DCS_HP_DONE_EINT2 */ +#define ARIZONA_IM_DCS_HP_DONE_EINT2_SHIFT 6 /* IM_DCS_HP_DONE_EINT2 */ +#define ARIZONA_IM_DCS_HP_DONE_EINT2_WIDTH 1 /* IM_DCS_HP_DONE_EINT2 */ +#define ARIZONA_IM_FLL2_CLOCK_OK_EINT2 0x0002 /* IM_FLL2_CLOCK_OK_EINT2 */ +#define ARIZONA_IM_FLL2_CLOCK_OK_EINT2_MASK 0x0002 /* IM_FLL2_CLOCK_OK_EINT2 */ +#define ARIZONA_IM_FLL2_CLOCK_OK_EINT2_SHIFT 1 /* IM_FLL2_CLOCK_OK_EINT2 */ +#define ARIZONA_IM_FLL2_CLOCK_OK_EINT2_WIDTH 1 /* IM_FLL2_CLOCK_OK_EINT2 */ +#define ARIZONA_IM_FLL1_CLOCK_OK_EINT2 0x0001 /* IM_FLL1_CLOCK_OK_EINT2 */ +#define ARIZONA_IM_FLL1_CLOCK_OK_EINT2_MASK 0x0001 /* IM_FLL1_CLOCK_OK_EINT2 */ +#define ARIZONA_IM_FLL1_CLOCK_OK_EINT2_SHIFT 0 /* IM_FLL1_CLOCK_OK_EINT2 */ +#define ARIZONA_IM_FLL1_CLOCK_OK_EINT2_WIDTH 1 /* IM_FLL1_CLOCK_OK_EINT2 */ + +/* + * R3340 (0xD0C) - Interrupt Status 5 Mask (Alternate layout) + * + * Alternate layout used on later devices, note only fields that have moved + * are specified + */ +#define ARIZONA_V2_IM_ASRC_CFG_ERR_EINT2 0x0008 /* IM_ASRC_CFG_ERR_EINT2 */ +#define ARIZONA_V2_IM_ASRC_CFG_ERR_EINT2_MASK 0x0008 /* IM_ASRC_CFG_ERR_EINT2 */ +#define ARIZONA_V2_IM_ASRC_CFG_ERR_EINT2_SHIFT 3 /* IM_ASRC_CFG_ERR_EINT2 */ +#define ARIZONA_V2_IM_ASRC_CFG_ERR_EINT2_WIDTH 1 /* IM_ASRC_CFG_ERR_EINT2 */ + +/* + * R3357 (0xD1D) - IRQ2 Status 6 Mask + */ +#define ARIZONA_IM_DSP_SHARED_WR_COLL_EINT2 0x8000 /* IM_DSP_SHARED_WR_COLL_EINT2 */ +#define ARIZONA_IM_DSP_SHARED_WR_COLL_EINT2_MASK 0x8000 /* IM_DSP_SHARED_WR_COLL_EINT2 */ +#define ARIZONA_IM_DSP_SHARED_WR_COLL_EINT2_SHIFT 15 /* IM_DSP_SHARED_WR_COLL_EINT2 */ +#define ARIZONA_IM_DSP_SHARED_WR_COLL_EINT2_WIDTH 1 /* IM_DSP_SHARED_WR_COLL_EINT2 */ +#define ARIZONA_IM_SPK_SHUTDOWN_EINT2 0x4000 /* IM_SPK_SHUTDOWN_EINT2 */ +#define ARIZONA_IM_SPK_SHUTDOWN_EINT2_MASK 0x4000 /* IM_SPK_SHUTDOWN_EINT2 */ +#define ARIZONA_IM_SPK_SHUTDOWN_EINT2_SHIFT 14 /* IM_SPK_SHUTDOWN_EINT2 */ +#define ARIZONA_IM_SPK_SHUTDOWN_EINT2_WIDTH 1 /* IM_SPK_SHUTDOWN_EINT2 */ +#define ARIZONA_IM_SPK1R_SHORT_EINT2 0x2000 /* IM_SPK1R_SHORT_EINT2 */ +#define ARIZONA_IM_SPK1R_SHORT_EINT2_MASK 0x2000 /* IM_SPK1R_SHORT_EINT2 */ +#define ARIZONA_IM_SPK1R_SHORT_EINT2_SHIFT 13 /* IM_SPK1R_SHORT_EINT2 */ +#define ARIZONA_IM_SPK1R_SHORT_EINT2_WIDTH 1 /* IM_SPK1R_SHORT_EINT2 */ +#define ARIZONA_IM_SPK1L_SHORT_EINT2 0x1000 /* IM_SPK1L_SHORT_EINT2 */ +#define ARIZONA_IM_SPK1L_SHORT_EINT2_MASK 0x1000 /* IM_SPK1L_SHORT_EINT2 */ +#define ARIZONA_IM_SPK1L_SHORT_EINT2_SHIFT 12 /* IM_SPK1L_SHORT_EINT2 */ +#define ARIZONA_IM_SPK1L_SHORT_EINT2_WIDTH 1 /* IM_SPK1L_SHORT_EINT2 */ +#define ARIZONA_IM_HP3R_SC_NEG_EINT2 0x0800 /* IM_HP3R_SC_NEG_EINT2 */ +#define ARIZONA_IM_HP3R_SC_NEG_EINT2_MASK 0x0800 /* IM_HP3R_SC_NEG_EINT2 */ +#define ARIZONA_IM_HP3R_SC_NEG_EINT2_SHIFT 11 /* IM_HP3R_SC_NEG_EINT2 */ +#define ARIZONA_IM_HP3R_SC_NEG_EINT2_WIDTH 1 /* IM_HP3R_SC_NEG_EINT2 */ +#define ARIZONA_IM_HP3R_SC_POS_EINT2 0x0400 /* IM_HP3R_SC_POS_EINT2 */ +#define ARIZONA_IM_HP3R_SC_POS_EINT2_MASK 0x0400 /* IM_HP3R_SC_POS_EINT2 */ +#define ARIZONA_IM_HP3R_SC_POS_EINT2_SHIFT 10 /* IM_HP3R_SC_POS_EINT2 */ +#define ARIZONA_IM_HP3R_SC_POS_EINT2_WIDTH 1 /* IM_HP3R_SC_POS_EINT2 */ +#define ARIZONA_IM_HP3L_SC_NEG_EINT2 0x0200 /* IM_HP3L_SC_NEG_EINT2 */ +#define ARIZONA_IM_HP3L_SC_NEG_EINT2_MASK 0x0200 /* IM_HP3L_SC_NEG_EINT2 */ +#define ARIZONA_IM_HP3L_SC_NEG_EINT2_SHIFT 9 /* IM_HP3L_SC_NEG_EINT2 */ +#define ARIZONA_IM_HP3L_SC_NEG_EINT2_WIDTH 1 /* IM_HP3L_SC_NEG_EINT2 */ +#define ARIZONA_IM_HP3L_SC_POS_EINT2 0x0100 /* IM_HP3L_SC_POS_EINT2 */ +#define ARIZONA_IM_HP3L_SC_POS_EINT2_MASK 0x0100 /* IM_HP3L_SC_POS_EINT2 */ +#define ARIZONA_IM_HP3L_SC_POS_EINT2_SHIFT 8 /* IM_HP3L_SC_POS_EINT2 */ +#define ARIZONA_IM_HP3L_SC_POS_EINT2_WIDTH 1 /* IM_HP3L_SC_POS_EINT2 */ +#define ARIZONA_IM_HP2R_SC_NEG_EINT2 0x0080 /* IM_HP2R_SC_NEG_EINT2 */ +#define ARIZONA_IM_HP2R_SC_NEG_EINT2_MASK 0x0080 /* IM_HP2R_SC_NEG_EINT2 */ +#define ARIZONA_IM_HP2R_SC_NEG_EINT2_SHIFT 7 /* IM_HP2R_SC_NEG_EINT2 */ +#define ARIZONA_IM_HP2R_SC_NEG_EINT2_WIDTH 1 /* IM_HP2R_SC_NEG_EINT2 */ +#define ARIZONA_IM_HP2R_SC_POS_EINT2 0x0040 /* IM_HP2R_SC_POS_EINT2 */ +#define ARIZONA_IM_HP2R_SC_POS_EINT2_MASK 0x0040 /* IM_HP2R_SC_POS_EINT2 */ +#define ARIZONA_IM_HP2R_SC_POS_EINT2_SHIFT 6 /* IM_HP2R_SC_POS_EINT2 */ +#define ARIZONA_IM_HP2R_SC_POS_EINT2_WIDTH 1 /* IM_HP2R_SC_POS_EINT2 */ +#define ARIZONA_IM_HP2L_SC_NEG_EINT2 0x0020 /* IM_HP2L_SC_NEG_EINT2 */ +#define ARIZONA_IM_HP2L_SC_NEG_EINT2_MASK 0x0020 /* IM_HP2L_SC_NEG_EINT2 */ +#define ARIZONA_IM_HP2L_SC_NEG_EINT2_SHIFT 5 /* IM_HP2L_SC_NEG_EINT2 */ +#define ARIZONA_IM_HP2L_SC_NEG_EINT2_WIDTH 1 /* IM_HP2L_SC_NEG_EINT2 */ +#define ARIZONA_IM_HP2L_SC_POS_EINT2 0x0010 /* IM_HP2L_SC_POS_EINT2 */ +#define ARIZONA_IM_HP2L_SC_POS_EINT2_MASK 0x0010 /* IM_HP2L_SC_POS_EINT2 */ +#define ARIZONA_IM_HP2L_SC_POS_EINT2_SHIFT 4 /* IM_HP2L_SC_POS_EINT2 */ +#define ARIZONA_IM_HP2L_SC_POS_EINT2_WIDTH 1 /* IM_HP2L_SC_POS_EINT2 */ +#define ARIZONA_IM_HP1R_SC_NEG_EINT2 0x0008 /* IM_HP1R_SC_NEG_EINT2 */ +#define ARIZONA_IM_HP1R_SC_NEG_EINT2_MASK 0x0008 /* IM_HP1R_SC_NEG_EINT2 */ +#define ARIZONA_IM_HP1R_SC_NEG_EINT2_SHIFT 3 /* IM_HP1R_SC_NEG_EINT2 */ +#define ARIZONA_IM_HP1R_SC_NEG_EINT2_WIDTH 1 /* IM_HP1R_SC_NEG_EINT2 */ +#define ARIZONA_IM_HP1R_SC_POS_EINT2 0x0004 /* IM_HP1R_SC_POS_EINT2 */ +#define ARIZONA_IM_HP1R_SC_POS_EINT2_MASK 0x0004 /* IM_HP1R_SC_POS_EINT2 */ +#define ARIZONA_IM_HP1R_SC_POS_EINT2_SHIFT 2 /* IM_HP1R_SC_POS_EINT2 */ +#define ARIZONA_IM_HP1R_SC_POS_EINT2_WIDTH 1 /* IM_HP1R_SC_POS_EINT2 */ +#define ARIZONA_IM_HP1L_SC_NEG_EINT2 0x0002 /* IM_HP1L_SC_NEG_EINT2 */ +#define ARIZONA_IM_HP1L_SC_NEG_EINT2_MASK 0x0002 /* IM_HP1L_SC_NEG_EINT2 */ +#define ARIZONA_IM_HP1L_SC_NEG_EINT2_SHIFT 1 /* IM_HP1L_SC_NEG_EINT2 */ +#define ARIZONA_IM_HP1L_SC_NEG_EINT2_WIDTH 1 /* IM_HP1L_SC_NEG_EINT2 */ +#define ARIZONA_IM_HP1L_SC_POS_EINT2 0x0001 /* IM_HP1L_SC_POS_EINT2 */ +#define ARIZONA_IM_HP1L_SC_POS_EINT2_MASK 0x0001 /* IM_HP1L_SC_POS_EINT2 */ +#define ARIZONA_IM_HP1L_SC_POS_EINT2_SHIFT 0 /* IM_HP1L_SC_POS_EINT2 */ +#define ARIZONA_IM_HP1L_SC_POS_EINT2_WIDTH 1 /* IM_HP1L_SC_POS_EINT2 */ + +/* + * R3359 (0xD1F) - IRQ2 Control + */ +#define ARIZONA_IM_IRQ2 0x0001 /* IM_IRQ2 */ +#define ARIZONA_IM_IRQ2_MASK 0x0001 /* IM_IRQ2 */ +#define ARIZONA_IM_IRQ2_SHIFT 0 /* IM_IRQ2 */ +#define ARIZONA_IM_IRQ2_WIDTH 1 /* IM_IRQ2 */ + +/* + * R3360 (0xD20) - Interrupt Raw Status 2 + */ +#define ARIZONA_DSP1_RAM_RDY_STS 0x0100 /* DSP1_RAM_RDY_STS */ +#define ARIZONA_DSP1_RAM_RDY_STS_MASK 0x0100 /* DSP1_RAM_RDY_STS */ +#define ARIZONA_DSP1_RAM_RDY_STS_SHIFT 8 /* DSP1_RAM_RDY_STS */ +#define ARIZONA_DSP1_RAM_RDY_STS_WIDTH 1 /* DSP1_RAM_RDY_STS */ +#define ARIZONA_DSP_IRQ2_STS 0x0002 /* DSP_IRQ2_STS */ +#define ARIZONA_DSP_IRQ2_STS_MASK 0x0002 /* DSP_IRQ2_STS */ +#define ARIZONA_DSP_IRQ2_STS_SHIFT 1 /* DSP_IRQ2_STS */ +#define ARIZONA_DSP_IRQ2_STS_WIDTH 1 /* DSP_IRQ2_STS */ +#define ARIZONA_DSP_IRQ1_STS 0x0001 /* DSP_IRQ1_STS */ +#define ARIZONA_DSP_IRQ1_STS_MASK 0x0001 /* DSP_IRQ1_STS */ +#define ARIZONA_DSP_IRQ1_STS_SHIFT 0 /* DSP_IRQ1_STS */ +#define ARIZONA_DSP_IRQ1_STS_WIDTH 1 /* DSP_IRQ1_STS */ + +/* + * R3361 (0xD21) - Interrupt Raw Status 3 + */ +#define ARIZONA_SPK_OVERHEAT_WARN_STS 0x8000 /* SPK_OVERHEAT_WARN_STS */ +#define ARIZONA_SPK_OVERHEAT_WARN_STS_MASK 0x8000 /* SPK_OVERHEAT_WARN_STS */ +#define ARIZONA_SPK_OVERHEAT_WARN_STS_SHIFT 15 /* SPK_OVERHEAT_WARN_STS */ +#define ARIZONA_SPK_OVERHEAT_WARN_STS_WIDTH 1 /* SPK_OVERHEAT_WARN_STS */ +#define ARIZONA_SPK_OVERHEAT_STS 0x4000 /* SPK_OVERHEAT_STS */ +#define ARIZONA_SPK_OVERHEAT_STS_MASK 0x4000 /* SPK_OVERHEAT_STS */ +#define ARIZONA_SPK_OVERHEAT_STS_SHIFT 14 /* SPK_OVERHEAT_STS */ +#define ARIZONA_SPK_OVERHEAT_STS_WIDTH 1 /* SPK_OVERHEAT_STS */ +#define ARIZONA_HPDET_STS 0x2000 /* HPDET_STS */ +#define ARIZONA_HPDET_STS_MASK 0x2000 /* HPDET_STS */ +#define ARIZONA_HPDET_STS_SHIFT 13 /* HPDET_STS */ +#define ARIZONA_HPDET_STS_WIDTH 1 /* HPDET_STS */ +#define ARIZONA_MICDET_STS 0x1000 /* MICDET_STS */ +#define ARIZONA_MICDET_STS_MASK 0x1000 /* MICDET_STS */ +#define ARIZONA_MICDET_STS_SHIFT 12 /* MICDET_STS */ +#define ARIZONA_MICDET_STS_WIDTH 1 /* MICDET_STS */ +#define ARIZONA_WSEQ_DONE_STS 0x0800 /* WSEQ_DONE_STS */ +#define ARIZONA_WSEQ_DONE_STS_MASK 0x0800 /* WSEQ_DONE_STS */ +#define ARIZONA_WSEQ_DONE_STS_SHIFT 11 /* WSEQ_DONE_STS */ +#define ARIZONA_WSEQ_DONE_STS_WIDTH 1 /* WSEQ_DONE_STS */ +#define ARIZONA_DRC2_SIG_DET_STS 0x0400 /* DRC2_SIG_DET_STS */ +#define ARIZONA_DRC2_SIG_DET_STS_MASK 0x0400 /* DRC2_SIG_DET_STS */ +#define ARIZONA_DRC2_SIG_DET_STS_SHIFT 10 /* DRC2_SIG_DET_STS */ +#define ARIZONA_DRC2_SIG_DET_STS_WIDTH 1 /* DRC2_SIG_DET_STS */ +#define ARIZONA_DRC1_SIG_DET_STS 0x0200 /* DRC1_SIG_DET_STS */ +#define ARIZONA_DRC1_SIG_DET_STS_MASK 0x0200 /* DRC1_SIG_DET_STS */ +#define ARIZONA_DRC1_SIG_DET_STS_SHIFT 9 /* DRC1_SIG_DET_STS */ +#define ARIZONA_DRC1_SIG_DET_STS_WIDTH 1 /* DRC1_SIG_DET_STS */ +#define ARIZONA_ASRC2_LOCK_STS 0x0100 /* ASRC2_LOCK_STS */ +#define ARIZONA_ASRC2_LOCK_STS_MASK 0x0100 /* ASRC2_LOCK_STS */ +#define ARIZONA_ASRC2_LOCK_STS_SHIFT 8 /* ASRC2_LOCK_STS */ +#define ARIZONA_ASRC2_LOCK_STS_WIDTH 1 /* ASRC2_LOCK_STS */ +#define ARIZONA_ASRC1_LOCK_STS 0x0080 /* ASRC1_LOCK_STS */ +#define ARIZONA_ASRC1_LOCK_STS_MASK 0x0080 /* ASRC1_LOCK_STS */ +#define ARIZONA_ASRC1_LOCK_STS_SHIFT 7 /* ASRC1_LOCK_STS */ +#define ARIZONA_ASRC1_LOCK_STS_WIDTH 1 /* ASRC1_LOCK_STS */ +#define ARIZONA_UNDERCLOCKED_STS 0x0040 /* UNDERCLOCKED_STS */ +#define ARIZONA_UNDERCLOCKED_STS_MASK 0x0040 /* UNDERCLOCKED_STS */ +#define ARIZONA_UNDERCLOCKED_STS_SHIFT 6 /* UNDERCLOCKED_STS */ +#define ARIZONA_UNDERCLOCKED_STS_WIDTH 1 /* UNDERCLOCKED_STS */ +#define ARIZONA_OVERCLOCKED_STS 0x0020 /* OVERCLOCKED_STS */ +#define ARIZONA_OVERCLOCKED_STS_MASK 0x0020 /* OVERCLOCKED_STS */ +#define ARIZONA_OVERCLOCKED_STS_SHIFT 5 /* OVERCLOCKED_STS */ +#define ARIZONA_OVERCLOCKED_STS_WIDTH 1 /* OVERCLOCKED_STS */ +#define ARIZONA_FLL2_LOCK_STS 0x0008 /* FLL2_LOCK_STS */ +#define ARIZONA_FLL2_LOCK_STS_MASK 0x0008 /* FLL2_LOCK_STS */ +#define ARIZONA_FLL2_LOCK_STS_SHIFT 3 /* FLL2_LOCK_STS */ +#define ARIZONA_FLL2_LOCK_STS_WIDTH 1 /* FLL2_LOCK_STS */ +#define ARIZONA_FLL1_LOCK_STS 0x0004 /* FLL1_LOCK_STS */ +#define ARIZONA_FLL1_LOCK_STS_MASK 0x0004 /* FLL1_LOCK_STS */ +#define ARIZONA_FLL1_LOCK_STS_SHIFT 2 /* FLL1_LOCK_STS */ +#define ARIZONA_FLL1_LOCK_STS_WIDTH 1 /* FLL1_LOCK_STS */ +#define ARIZONA_CLKGEN_ERR_STS 0x0002 /* CLKGEN_ERR_STS */ +#define ARIZONA_CLKGEN_ERR_STS_MASK 0x0002 /* CLKGEN_ERR_STS */ +#define ARIZONA_CLKGEN_ERR_STS_SHIFT 1 /* CLKGEN_ERR_STS */ +#define ARIZONA_CLKGEN_ERR_STS_WIDTH 1 /* CLKGEN_ERR_STS */ +#define ARIZONA_CLKGEN_ERR_ASYNC_STS 0x0001 /* CLKGEN_ERR_ASYNC_STS */ +#define ARIZONA_CLKGEN_ERR_ASYNC_STS_MASK 0x0001 /* CLKGEN_ERR_ASYNC_STS */ +#define ARIZONA_CLKGEN_ERR_ASYNC_STS_SHIFT 0 /* CLKGEN_ERR_ASYNC_STS */ +#define ARIZONA_CLKGEN_ERR_ASYNC_STS_WIDTH 1 /* CLKGEN_ERR_ASYNC_STS */ + +/* + * R3362 (0xD22) - Interrupt Raw Status 4 + */ +#define ARIZONA_ASRC_CFG_ERR_STS 0x8000 /* ASRC_CFG_ERR_STS */ +#define ARIZONA_ASRC_CFG_ERR_STS_MASK 0x8000 /* ASRC_CFG_ERR_STS */ +#define ARIZONA_ASRC_CFG_ERR_STS_SHIFT 15 /* ASRC_CFG_ERR_STS */ +#define ARIZONA_ASRC_CFG_ERR_STS_WIDTH 1 /* ASRC_CFG_ERR_STS */ +#define ARIZONA_AIF3_ERR_STS 0x4000 /* AIF3_ERR_STS */ +#define ARIZONA_AIF3_ERR_STS_MASK 0x4000 /* AIF3_ERR_STS */ +#define ARIZONA_AIF3_ERR_STS_SHIFT 14 /* AIF3_ERR_STS */ +#define ARIZONA_AIF3_ERR_STS_WIDTH 1 /* AIF3_ERR_STS */ +#define ARIZONA_AIF2_ERR_STS 0x2000 /* AIF2_ERR_STS */ +#define ARIZONA_AIF2_ERR_STS_MASK 0x2000 /* AIF2_ERR_STS */ +#define ARIZONA_AIF2_ERR_STS_SHIFT 13 /* AIF2_ERR_STS */ +#define ARIZONA_AIF2_ERR_STS_WIDTH 1 /* AIF2_ERR_STS */ +#define ARIZONA_AIF1_ERR_STS 0x1000 /* AIF1_ERR_STS */ +#define ARIZONA_AIF1_ERR_STS_MASK 0x1000 /* AIF1_ERR_STS */ +#define ARIZONA_AIF1_ERR_STS_SHIFT 12 /* AIF1_ERR_STS */ +#define ARIZONA_AIF1_ERR_STS_WIDTH 1 /* AIF1_ERR_STS */ +#define ARIZONA_CTRLIF_ERR_STS 0x0800 /* CTRLIF_ERR_STS */ +#define ARIZONA_CTRLIF_ERR_STS_MASK 0x0800 /* CTRLIF_ERR_STS */ +#define ARIZONA_CTRLIF_ERR_STS_SHIFT 11 /* CTRLIF_ERR_STS */ +#define ARIZONA_CTRLIF_ERR_STS_WIDTH 1 /* CTRLIF_ERR_STS */ +#define ARIZONA_MIXER_DROPPED_SAMPLE_STS 0x0400 /* MIXER_DROPPED_SAMPLE_STS */ +#define ARIZONA_MIXER_DROPPED_SAMPLE_STS_MASK 0x0400 /* MIXER_DROPPED_SAMPLE_STS */ +#define ARIZONA_MIXER_DROPPED_SAMPLE_STS_SHIFT 10 /* MIXER_DROPPED_SAMPLE_STS */ +#define ARIZONA_MIXER_DROPPED_SAMPLE_STS_WIDTH 1 /* MIXER_DROPPED_SAMPLE_STS */ +#define ARIZONA_ASYNC_CLK_ENA_LOW_STS 0x0200 /* ASYNC_CLK_ENA_LOW_STS */ +#define ARIZONA_ASYNC_CLK_ENA_LOW_STS_MASK 0x0200 /* ASYNC_CLK_ENA_LOW_STS */ +#define ARIZONA_ASYNC_CLK_ENA_LOW_STS_SHIFT 9 /* ASYNC_CLK_ENA_LOW_STS */ +#define ARIZONA_ASYNC_CLK_ENA_LOW_STS_WIDTH 1 /* ASYNC_CLK_ENA_LOW_STS */ +#define ARIZONA_SYSCLK_ENA_LOW_STS 0x0100 /* SYSCLK_ENA_LOW_STS */ +#define ARIZONA_SYSCLK_ENA_LOW_STS_MASK 0x0100 /* SYSCLK_ENA_LOW_STS */ +#define ARIZONA_SYSCLK_ENA_LOW_STS_SHIFT 8 /* SYSCLK_ENA_LOW_STS */ +#define ARIZONA_SYSCLK_ENA_LOW_STS_WIDTH 1 /* SYSCLK_ENA_LOW_STS */ +#define ARIZONA_ISRC1_CFG_ERR_STS 0x0080 /* ISRC1_CFG_ERR_STS */ +#define ARIZONA_ISRC1_CFG_ERR_STS_MASK 0x0080 /* ISRC1_CFG_ERR_STS */ +#define ARIZONA_ISRC1_CFG_ERR_STS_SHIFT 7 /* ISRC1_CFG_ERR_STS */ +#define ARIZONA_ISRC1_CFG_ERR_STS_WIDTH 1 /* ISRC1_CFG_ERR_STS */ +#define ARIZONA_ISRC2_CFG_ERR_STS 0x0040 /* ISRC2_CFG_ERR_STS */ +#define ARIZONA_ISRC2_CFG_ERR_STS_MASK 0x0040 /* ISRC2_CFG_ERR_STS */ +#define ARIZONA_ISRC2_CFG_ERR_STS_SHIFT 6 /* ISRC2_CFG_ERR_STS */ +#define ARIZONA_ISRC2_CFG_ERR_STS_WIDTH 1 /* ISRC2_CFG_ERR_STS */ +#define ARIZONA_HP3R_DONE_STS 0x0020 /* HP3R_DONE_STS */ +#define ARIZONA_HP3R_DONE_STS_MASK 0x0020 /* HP3R_DONE_STS */ +#define ARIZONA_HP3R_DONE_STS_SHIFT 5 /* HP3R_DONE_STS */ +#define ARIZONA_HP3R_DONE_STS_WIDTH 1 /* HP3R_DONE_STS */ +#define ARIZONA_HP3L_DONE_STS 0x0010 /* HP3L_DONE_STS */ +#define ARIZONA_HP3L_DONE_STS_MASK 0x0010 /* HP3L_DONE_STS */ +#define ARIZONA_HP3L_DONE_STS_SHIFT 4 /* HP3L_DONE_STS */ +#define ARIZONA_HP3L_DONE_STS_WIDTH 1 /* HP3L_DONE_STS */ +#define ARIZONA_HP2R_DONE_STS 0x0008 /* HP2R_DONE_STS */ +#define ARIZONA_HP2R_DONE_STS_MASK 0x0008 /* HP2R_DONE_STS */ +#define ARIZONA_HP2R_DONE_STS_SHIFT 3 /* HP2R_DONE_STS */ +#define ARIZONA_HP2R_DONE_STS_WIDTH 1 /* HP2R_DONE_STS */ +#define ARIZONA_HP2L_DONE_STS 0x0004 /* HP2L_DONE_STS */ +#define ARIZONA_HP2L_DONE_STS_MASK 0x0004 /* HP2L_DONE_STS */ +#define ARIZONA_HP2L_DONE_STS_SHIFT 2 /* HP2L_DONE_STS */ +#define ARIZONA_HP2L_DONE_STS_WIDTH 1 /* HP2L_DONE_STS */ +#define ARIZONA_HP1R_DONE_STS 0x0002 /* HP1R_DONE_STS */ +#define ARIZONA_HP1R_DONE_STS_MASK 0x0002 /* HP1R_DONE_STS */ +#define ARIZONA_HP1R_DONE_STS_SHIFT 1 /* HP1R_DONE_STS */ +#define ARIZONA_HP1R_DONE_STS_WIDTH 1 /* HP1R_DONE_STS */ +#define ARIZONA_HP1L_DONE_STS 0x0001 /* HP1L_DONE_STS */ +#define ARIZONA_HP1L_DONE_STS_MASK 0x0001 /* HP1L_DONE_STS */ +#define ARIZONA_HP1L_DONE_STS_SHIFT 0 /* HP1L_DONE_STS */ +#define ARIZONA_HP1L_DONE_STS_WIDTH 1 /* HP1L_DONE_STS */ + +/* + * R3363 (0xD23) - Interrupt Raw Status 5 + */ +#define ARIZONA_BOOT_DONE_STS 0x0100 /* BOOT_DONE_STS */ +#define ARIZONA_BOOT_DONE_STS_MASK 0x0100 /* BOOT_DONE_STS */ +#define ARIZONA_BOOT_DONE_STS_SHIFT 8 /* BOOT_DONE_STS */ +#define ARIZONA_BOOT_DONE_STS_WIDTH 1 /* BOOT_DONE_STS */ +#define ARIZONA_DCS_DAC_DONE_STS 0x0080 /* DCS_DAC_DONE_STS */ +#define ARIZONA_DCS_DAC_DONE_STS_MASK 0x0080 /* DCS_DAC_DONE_STS */ +#define ARIZONA_DCS_DAC_DONE_STS_SHIFT 7 /* DCS_DAC_DONE_STS */ +#define ARIZONA_DCS_DAC_DONE_STS_WIDTH 1 /* DCS_DAC_DONE_STS */ +#define ARIZONA_DCS_HP_DONE_STS 0x0040 /* DCS_HP_DONE_STS */ +#define ARIZONA_DCS_HP_DONE_STS_MASK 0x0040 /* DCS_HP_DONE_STS */ +#define ARIZONA_DCS_HP_DONE_STS_SHIFT 6 /* DCS_HP_DONE_STS */ +#define ARIZONA_DCS_HP_DONE_STS_WIDTH 1 /* DCS_HP_DONE_STS */ +#define ARIZONA_FLL2_CLOCK_OK_STS 0x0002 /* FLL2_CLOCK_OK_STS */ +#define ARIZONA_FLL2_CLOCK_OK_STS_MASK 0x0002 /* FLL2_CLOCK_OK_STS */ +#define ARIZONA_FLL2_CLOCK_OK_STS_SHIFT 1 /* FLL2_CLOCK_OK_STS */ +#define ARIZONA_FLL2_CLOCK_OK_STS_WIDTH 1 /* FLL2_CLOCK_OK_STS */ +#define ARIZONA_FLL1_CLOCK_OK_STS 0x0001 /* FLL1_CLOCK_OK_STS */ +#define ARIZONA_FLL1_CLOCK_OK_STS_MASK 0x0001 /* FLL1_CLOCK_OK_STS */ +#define ARIZONA_FLL1_CLOCK_OK_STS_SHIFT 0 /* FLL1_CLOCK_OK_STS */ +#define ARIZONA_FLL1_CLOCK_OK_STS_WIDTH 1 /* FLL1_CLOCK_OK_STS */ + +/* + * R3364 (0xD24) - Interrupt Raw Status 6 + */ +#define ARIZONA_PWM_OVERCLOCKED_STS 0x2000 /* PWM_OVERCLOCKED_STS */ +#define ARIZONA_PWM_OVERCLOCKED_STS_MASK 0x2000 /* PWM_OVERCLOCKED_STS */ +#define ARIZONA_PWM_OVERCLOCKED_STS_SHIFT 13 /* PWM_OVERCLOCKED_STS */ +#define ARIZONA_PWM_OVERCLOCKED_STS_WIDTH 1 /* PWM_OVERCLOCKED_STS */ +#define ARIZONA_FX_CORE_OVERCLOCKED_STS 0x1000 /* FX_CORE_OVERCLOCKED_STS */ +#define ARIZONA_FX_CORE_OVERCLOCKED_STS_MASK 0x1000 /* FX_CORE_OVERCLOCKED_STS */ +#define ARIZONA_FX_CORE_OVERCLOCKED_STS_SHIFT 12 /* FX_CORE_OVERCLOCKED_STS */ +#define ARIZONA_FX_CORE_OVERCLOCKED_STS_WIDTH 1 /* FX_CORE_OVERCLOCKED_STS */ +#define ARIZONA_DAC_SYS_OVERCLOCKED_STS 0x0400 /* DAC_SYS_OVERCLOCKED_STS */ +#define ARIZONA_DAC_SYS_OVERCLOCKED_STS_MASK 0x0400 /* DAC_SYS_OVERCLOCKED_STS */ +#define ARIZONA_DAC_SYS_OVERCLOCKED_STS_SHIFT 10 /* DAC_SYS_OVERCLOCKED_STS */ +#define ARIZONA_DAC_SYS_OVERCLOCKED_STS_WIDTH 1 /* DAC_SYS_OVERCLOCKED_STS */ +#define ARIZONA_DAC_WARP_OVERCLOCKED_STS 0x0200 /* DAC_WARP_OVERCLOCKED_STS */ +#define ARIZONA_DAC_WARP_OVERCLOCKED_STS_MASK 0x0200 /* DAC_WARP_OVERCLOCKED_STS */ +#define ARIZONA_DAC_WARP_OVERCLOCKED_STS_SHIFT 9 /* DAC_WARP_OVERCLOCKED_STS */ +#define ARIZONA_DAC_WARP_OVERCLOCKED_STS_WIDTH 1 /* DAC_WARP_OVERCLOCKED_STS */ +#define ARIZONA_ADC_OVERCLOCKED_STS 0x0100 /* ADC_OVERCLOCKED_STS */ +#define ARIZONA_ADC_OVERCLOCKED_STS_MASK 0x0100 /* ADC_OVERCLOCKED_STS */ +#define ARIZONA_ADC_OVERCLOCKED_STS_SHIFT 8 /* ADC_OVERCLOCKED_STS */ +#define ARIZONA_ADC_OVERCLOCKED_STS_WIDTH 1 /* ADC_OVERCLOCKED_STS */ +#define ARIZONA_MIXER_OVERCLOCKED_STS 0x0080 /* MIXER_OVERCLOCKED_STS */ +#define ARIZONA_MIXER_OVERCLOCKED_STS_MASK 0x0080 /* MIXER_OVERCLOCKED_STS */ +#define ARIZONA_MIXER_OVERCLOCKED_STS_SHIFT 7 /* MIXER_OVERCLOCKED_STS */ +#define ARIZONA_MIXER_OVERCLOCKED_STS_WIDTH 1 /* MIXER_OVERCLOCKED_STS */ +#define ARIZONA_AIF3_ASYNC_OVERCLOCKED_STS 0x0040 /* AIF3_ASYNC_OVERCLOCKED_STS */ +#define ARIZONA_AIF3_ASYNC_OVERCLOCKED_STS_MASK 0x0040 /* AIF3_ASYNC_OVERCLOCKED_STS */ +#define ARIZONA_AIF3_ASYNC_OVERCLOCKED_STS_SHIFT 6 /* AIF3_ASYNC_OVERCLOCKED_STS */ +#define ARIZONA_AIF3_ASYNC_OVERCLOCKED_STS_WIDTH 1 /* AIF3_ASYNC_OVERCLOCKED_STS */ +#define ARIZONA_AIF2_ASYNC_OVERCLOCKED_STS 0x0020 /* AIF2_ASYNC_OVERCLOCKED_STS */ +#define ARIZONA_AIF2_ASYNC_OVERCLOCKED_STS_MASK 0x0020 /* AIF2_ASYNC_OVERCLOCKED_STS */ +#define ARIZONA_AIF2_ASYNC_OVERCLOCKED_STS_SHIFT 5 /* AIF2_ASYNC_OVERCLOCKED_STS */ +#define ARIZONA_AIF2_ASYNC_OVERCLOCKED_STS_WIDTH 1 /* AIF2_ASYNC_OVERCLOCKED_STS */ +#define ARIZONA_AIF1_ASYNC_OVERCLOCKED_STS 0x0010 /* AIF1_ASYNC_OVERCLOCKED_STS */ +#define ARIZONA_AIF1_ASYNC_OVERCLOCKED_STS_MASK 0x0010 /* AIF1_ASYNC_OVERCLOCKED_STS */ +#define ARIZONA_AIF1_ASYNC_OVERCLOCKED_STS_SHIFT 4 /* AIF1_ASYNC_OVERCLOCKED_STS */ +#define ARIZONA_AIF1_ASYNC_OVERCLOCKED_STS_WIDTH 1 /* AIF1_ASYNC_OVERCLOCKED_STS */ +#define ARIZONA_AIF3_SYNC_OVERCLOCKED_STS 0x0008 /* AIF3_SYNC_OVERCLOCKED_STS */ +#define ARIZONA_AIF3_SYNC_OVERCLOCKED_STS_MASK 0x0008 /* AIF3_SYNC_OVERCLOCKED_STS */ +#define ARIZONA_AIF3_SYNC_OVERCLOCKED_STS_SHIFT 3 /* AIF3_SYNC_OVERCLOCKED_STS */ +#define ARIZONA_AIF3_SYNC_OVERCLOCKED_STS_WIDTH 1 /* AIF3_SYNC_OVERCLOCKED_STS */ +#define ARIZONA_AIF2_SYNC_OVERCLOCKED_STS 0x0004 /* AIF2_SYNC_OVERCLOCKED_STS */ +#define ARIZONA_AIF2_SYNC_OVERCLOCKED_STS_MASK 0x0004 /* AIF2_SYNC_OVERCLOCKED_STS */ +#define ARIZONA_AIF2_SYNC_OVERCLOCKED_STS_SHIFT 2 /* AIF2_SYNC_OVERCLOCKED_STS */ +#define ARIZONA_AIF2_SYNC_OVERCLOCKED_STS_WIDTH 1 /* AIF2_SYNC_OVERCLOCKED_STS */ +#define ARIZONA_AIF1_SYNC_OVERCLOCKED_STS 0x0002 /* AIF1_SYNC_OVERCLOCKED_STS */ +#define ARIZONA_AIF1_SYNC_OVERCLOCKED_STS_MASK 0x0002 /* AIF1_SYNC_OVERCLOCKED_STS */ +#define ARIZONA_AIF1_SYNC_OVERCLOCKED_STS_SHIFT 1 /* AIF1_SYNC_OVERCLOCKED_STS */ +#define ARIZONA_AIF1_SYNC_OVERCLOCKED_STS_WIDTH 1 /* AIF1_SYNC_OVERCLOCKED_STS */ +#define ARIZONA_PAD_CTRL_OVERCLOCKED_STS 0x0001 /* PAD_CTRL_OVERCLOCKED_STS */ +#define ARIZONA_PAD_CTRL_OVERCLOCKED_STS_MASK 0x0001 /* PAD_CTRL_OVERCLOCKED_STS */ +#define ARIZONA_PAD_CTRL_OVERCLOCKED_STS_SHIFT 0 /* PAD_CTRL_OVERCLOCKED_STS */ +#define ARIZONA_PAD_CTRL_OVERCLOCKED_STS_WIDTH 1 /* PAD_CTRL_OVERCLOCKED_STS */ + +/* + * R3365 (0xD25) - Interrupt Raw Status 7 + */ +#define ARIZONA_SLIMBUS_SUBSYS_OVERCLOCKED_STS 0x8000 /* SLIMBUS_SUBSYS_OVERCLOCKED_STS */ +#define ARIZONA_SLIMBUS_SUBSYS_OVERCLOCKED_STS_MASK 0x8000 /* SLIMBUS_SUBSYS_OVERCLOCKED_STS */ +#define ARIZONA_SLIMBUS_SUBSYS_OVERCLOCKED_STS_SHIFT 15 /* SLIMBUS_SUBSYS_OVERCLOCKED_STS */ +#define ARIZONA_SLIMBUS_SUBSYS_OVERCLOCKED_STS_WIDTH 1 /* SLIMBUS_SUBSYS_OVERCLOCKED_STS */ +#define ARIZONA_SLIMBUS_ASYNC_OVERCLOCKED_STS 0x4000 /* SLIMBUS_ASYNC_OVERCLOCKED_STS */ +#define ARIZONA_SLIMBUS_ASYNC_OVERCLOCKED_STS_MASK 0x4000 /* SLIMBUS_ASYNC_OVERCLOCKED_STS */ +#define ARIZONA_SLIMBUS_ASYNC_OVERCLOCKED_STS_SHIFT 14 /* SLIMBUS_ASYNC_OVERCLOCKED_STS */ +#define ARIZONA_SLIMBUS_ASYNC_OVERCLOCKED_STS_WIDTH 1 /* SLIMBUS_ASYNC_OVERCLOCKED_STS */ +#define ARIZONA_SLIMBUS_SYNC_OVERCLOCKED_STS 0x2000 /* SLIMBUS_SYNC_OVERCLOCKED_STS */ +#define ARIZONA_SLIMBUS_SYNC_OVERCLOCKED_STS_MASK 0x2000 /* SLIMBUS_SYNC_OVERCLOCKED_STS */ +#define ARIZONA_SLIMBUS_SYNC_OVERCLOCKED_STS_SHIFT 13 /* SLIMBUS_SYNC_OVERCLOCKED_STS */ +#define ARIZONA_SLIMBUS_SYNC_OVERCLOCKED_STS_WIDTH 1 /* SLIMBUS_SYNC_OVERCLOCKED_STS */ +#define ARIZONA_ASRC_ASYNC_SYS_OVERCLOCKED_STS 0x1000 /* ASRC_ASYNC_SYS_OVERCLOCKED_STS */ +#define ARIZONA_ASRC_ASYNC_SYS_OVERCLOCKED_STS_MASK 0x1000 /* ASRC_ASYNC_SYS_OVERCLOCKED_STS */ +#define ARIZONA_ASRC_ASYNC_SYS_OVERCLOCKED_STS_SHIFT 12 /* ASRC_ASYNC_SYS_OVERCLOCKED_STS */ +#define ARIZONA_ASRC_ASYNC_SYS_OVERCLOCKED_STS_WIDTH 1 /* ASRC_ASYNC_SYS_OVERCLOCKED_STS */ +#define ARIZONA_ASRC_ASYNC_WARP_OVERCLOCKED_STS 0x0800 /* ASRC_ASYNC_WARP_OVERCLOCKED_STS */ +#define ARIZONA_ASRC_ASYNC_WARP_OVERCLOCKED_STS_MASK 0x0800 /* ASRC_ASYNC_WARP_OVERCLOCKED_STS */ +#define ARIZONA_ASRC_ASYNC_WARP_OVERCLOCKED_STS_SHIFT 11 /* ASRC_ASYNC_WARP_OVERCLOCKED_STS */ +#define ARIZONA_ASRC_ASYNC_WARP_OVERCLOCKED_STS_WIDTH 1 /* ASRC_ASYNC_WARP_OVERCLOCKED_STS */ +#define ARIZONA_ASRC_SYNC_SYS_OVERCLOCKED_STS 0x0400 /* ASRC_SYNC_SYS_OVERCLOCKED_STS */ +#define ARIZONA_ASRC_SYNC_SYS_OVERCLOCKED_STS_MASK 0x0400 /* ASRC_SYNC_SYS_OVERCLOCKED_STS */ +#define ARIZONA_ASRC_SYNC_SYS_OVERCLOCKED_STS_SHIFT 10 /* ASRC_SYNC_SYS_OVERCLOCKED_STS */ +#define ARIZONA_ASRC_SYNC_SYS_OVERCLOCKED_STS_WIDTH 1 /* ASRC_SYNC_SYS_OVERCLOCKED_STS */ +#define ARIZONA_ASRC_SYNC_WARP_OVERCLOCKED_STS 0x0200 /* ASRC_SYNC_WARP_OVERCLOCKED_STS */ +#define ARIZONA_ASRC_SYNC_WARP_OVERCLOCKED_STS_MASK 0x0200 /* ASRC_SYNC_WARP_OVERCLOCKED_STS */ +#define ARIZONA_ASRC_SYNC_WARP_OVERCLOCKED_STS_SHIFT 9 /* ASRC_SYNC_WARP_OVERCLOCKED_STS */ +#define ARIZONA_ASRC_SYNC_WARP_OVERCLOCKED_STS_WIDTH 1 /* ASRC_SYNC_WARP_OVERCLOCKED_STS */ +#define ARIZONA_ADSP2_1_OVERCLOCKED_STS 0x0008 /* ADSP2_1_OVERCLOCKED_STS */ +#define ARIZONA_ADSP2_1_OVERCLOCKED_STS_MASK 0x0008 /* ADSP2_1_OVERCLOCKED_STS */ +#define ARIZONA_ADSP2_1_OVERCLOCKED_STS_SHIFT 3 /* ADSP2_1_OVERCLOCKED_STS */ +#define ARIZONA_ADSP2_1_OVERCLOCKED_STS_WIDTH 1 /* ADSP2_1_OVERCLOCKED_STS */ +#define ARIZONA_ISRC3_OVERCLOCKED_STS 0x0004 /* ISRC3_OVERCLOCKED_STS */ +#define ARIZONA_ISRC3_OVERCLOCKED_STS_MASK 0x0004 /* ISRC3_OVERCLOCKED_STS */ +#define ARIZONA_ISRC3_OVERCLOCKED_STS_SHIFT 2 /* ISRC3_OVERCLOCKED_STS */ +#define ARIZONA_ISRC3_OVERCLOCKED_STS_WIDTH 1 /* ISRC3_OVERCLOCKED_STS */ +#define ARIZONA_ISRC2_OVERCLOCKED_STS 0x0002 /* ISRC2_OVERCLOCKED_STS */ +#define ARIZONA_ISRC2_OVERCLOCKED_STS_MASK 0x0002 /* ISRC2_OVERCLOCKED_STS */ +#define ARIZONA_ISRC2_OVERCLOCKED_STS_SHIFT 1 /* ISRC2_OVERCLOCKED_STS */ +#define ARIZONA_ISRC2_OVERCLOCKED_STS_WIDTH 1 /* ISRC2_OVERCLOCKED_STS */ +#define ARIZONA_ISRC1_OVERCLOCKED_STS 0x0001 /* ISRC1_OVERCLOCKED_STS */ +#define ARIZONA_ISRC1_OVERCLOCKED_STS_MASK 0x0001 /* ISRC1_OVERCLOCKED_STS */ +#define ARIZONA_ISRC1_OVERCLOCKED_STS_SHIFT 0 /* ISRC1_OVERCLOCKED_STS */ +#define ARIZONA_ISRC1_OVERCLOCKED_STS_WIDTH 1 /* ISRC1_OVERCLOCKED_STS */ + +/* + * R3366 (0xD26) - Interrupt Raw Status 8 + */ +#define ARIZONA_SPDIF_OVERCLOCKED_STS 0x8000 /* SPDIF_OVERCLOCKED_STS */ +#define ARIZONA_SPDIF_OVERCLOCKED_STS_MASK 0x8000 /* SPDIF_OVERCLOCKED_STS */ +#define ARIZONA_SPDIF_OVERCLOCKED_STS_SHIFT 15 /* SPDIF_OVERCLOCKED_STS */ +#define ARIZONA_SPDIF_OVERCLOCKED_STS_WIDTH 1 /* SPDIF_OVERCLOCKED_STS */ +#define ARIZONA_AIF3_UNDERCLOCKED_STS 0x0400 /* AIF3_UNDERCLOCKED_STS */ +#define ARIZONA_AIF3_UNDERCLOCKED_STS_MASK 0x0400 /* AIF3_UNDERCLOCKED_STS */ +#define ARIZONA_AIF3_UNDERCLOCKED_STS_SHIFT 10 /* AIF3_UNDERCLOCKED_STS */ +#define ARIZONA_AIF3_UNDERCLOCKED_STS_WIDTH 1 /* AIF3_UNDERCLOCKED_STS */ +#define ARIZONA_AIF2_UNDERCLOCKED_STS 0x0200 /* AIF2_UNDERCLOCKED_STS */ +#define ARIZONA_AIF2_UNDERCLOCKED_STS_MASK 0x0200 /* AIF2_UNDERCLOCKED_STS */ +#define ARIZONA_AIF2_UNDERCLOCKED_STS_SHIFT 9 /* AIF2_UNDERCLOCKED_STS */ +#define ARIZONA_AIF2_UNDERCLOCKED_STS_WIDTH 1 /* AIF2_UNDERCLOCKED_STS */ +#define ARIZONA_AIF1_UNDERCLOCKED_STS 0x0100 /* AIF1_UNDERCLOCKED_STS */ +#define ARIZONA_AIF1_UNDERCLOCKED_STS_MASK 0x0100 /* AIF1_UNDERCLOCKED_STS */ +#define ARIZONA_AIF1_UNDERCLOCKED_STS_SHIFT 8 /* AIF1_UNDERCLOCKED_STS */ +#define ARIZONA_AIF1_UNDERCLOCKED_STS_WIDTH 1 /* AIF1_UNDERCLOCKED_STS */ +#define ARIZONA_ISRC3_UNDERCLOCKED_STS 0x0080 /* ISRC3_UNDERCLOCKED_STS */ +#define ARIZONA_ISRC3_UNDERCLOCKED_STS_MASK 0x0080 /* ISRC3_UNDERCLOCKED_STS */ +#define ARIZONA_ISRC3_UNDERCLOCKED_STS_SHIFT 7 /* ISRC3_UNDERCLOCKED_STS */ +#define ARIZONA_ISRC3_UNDERCLOCKED_STS_WIDTH 1 /* ISRC3_UNDERCLOCKED_STS */ +#define ARIZONA_ISRC2_UNDERCLOCKED_STS 0x0040 /* ISRC2_UNDERCLOCKED_STS */ +#define ARIZONA_ISRC2_UNDERCLOCKED_STS_MASK 0x0040 /* ISRC2_UNDERCLOCKED_STS */ +#define ARIZONA_ISRC2_UNDERCLOCKED_STS_SHIFT 6 /* ISRC2_UNDERCLOCKED_STS */ +#define ARIZONA_ISRC2_UNDERCLOCKED_STS_WIDTH 1 /* ISRC2_UNDERCLOCKED_STS */ +#define ARIZONA_ISRC1_UNDERCLOCKED_STS 0x0020 /* ISRC1_UNDERCLOCKED_STS */ +#define ARIZONA_ISRC1_UNDERCLOCKED_STS_MASK 0x0020 /* ISRC1_UNDERCLOCKED_STS */ +#define ARIZONA_ISRC1_UNDERCLOCKED_STS_SHIFT 5 /* ISRC1_UNDERCLOCKED_STS */ +#define ARIZONA_ISRC1_UNDERCLOCKED_STS_WIDTH 1 /* ISRC1_UNDERCLOCKED_STS */ +#define ARIZONA_FX_UNDERCLOCKED_STS 0x0010 /* FX_UNDERCLOCKED_STS */ +#define ARIZONA_FX_UNDERCLOCKED_STS_MASK 0x0010 /* FX_UNDERCLOCKED_STS */ +#define ARIZONA_FX_UNDERCLOCKED_STS_SHIFT 4 /* FX_UNDERCLOCKED_STS */ +#define ARIZONA_FX_UNDERCLOCKED_STS_WIDTH 1 /* FX_UNDERCLOCKED_STS */ +#define ARIZONA_ASRC_UNDERCLOCKED_STS 0x0008 /* ASRC_UNDERCLOCKED_STS */ +#define ARIZONA_ASRC_UNDERCLOCKED_STS_MASK 0x0008 /* ASRC_UNDERCLOCKED_STS */ +#define ARIZONA_ASRC_UNDERCLOCKED_STS_SHIFT 3 /* ASRC_UNDERCLOCKED_STS */ +#define ARIZONA_ASRC_UNDERCLOCKED_STS_WIDTH 1 /* ASRC_UNDERCLOCKED_STS */ +#define ARIZONA_DAC_UNDERCLOCKED_STS 0x0004 /* DAC_UNDERCLOCKED_STS */ +#define ARIZONA_DAC_UNDERCLOCKED_STS_MASK 0x0004 /* DAC_UNDERCLOCKED_STS */ +#define ARIZONA_DAC_UNDERCLOCKED_STS_SHIFT 2 /* DAC_UNDERCLOCKED_STS */ +#define ARIZONA_DAC_UNDERCLOCKED_STS_WIDTH 1 /* DAC_UNDERCLOCKED_STS */ +#define ARIZONA_ADC_UNDERCLOCKED_STS 0x0002 /* ADC_UNDERCLOCKED_STS */ +#define ARIZONA_ADC_UNDERCLOCKED_STS_MASK 0x0002 /* ADC_UNDERCLOCKED_STS */ +#define ARIZONA_ADC_UNDERCLOCKED_STS_SHIFT 1 /* ADC_UNDERCLOCKED_STS */ +#define ARIZONA_ADC_UNDERCLOCKED_STS_WIDTH 1 /* ADC_UNDERCLOCKED_STS */ +#define ARIZONA_MIXER_UNDERCLOCKED_STS 0x0001 /* MIXER_UNDERCLOCKED_STS */ +#define ARIZONA_MIXER_UNDERCLOCKED_STS_MASK 0x0001 /* MIXER_UNDERCLOCKED_STS */ +#define ARIZONA_MIXER_UNDERCLOCKED_STS_SHIFT 0 /* MIXER_UNDERCLOCKED_STS */ +#define ARIZONA_MIXER_UNDERCLOCKED_STS_WIDTH 1 /* MIXER_UNDERCLOCKED_STS */ + +/* + * R3368 (0xD28) - Interrupt Raw Status 9 + */ +#define ARIZONA_DSP_SHARED_WR_COLL_STS 0x8000 /* DSP_SHARED_WR_COLL_STS */ +#define ARIZONA_DSP_SHARED_WR_COLL_STS_MASK 0x8000 /* DSP_SHARED_WR_COLL_STS */ +#define ARIZONA_DSP_SHARED_WR_COLL_STS_SHIFT 15 /* DSP_SHARED_WR_COLL_STS */ +#define ARIZONA_DSP_SHARED_WR_COLL_STS_WIDTH 1 /* DSP_SHARED_WR_COLL_STS */ +#define ARIZONA_SPK_SHUTDOWN_STS 0x4000 /* SPK_SHUTDOWN_STS */ +#define ARIZONA_SPK_SHUTDOWN_STS_MASK 0x4000 /* SPK_SHUTDOWN_STS */ +#define ARIZONA_SPK_SHUTDOWN_STS_SHIFT 14 /* SPK_SHUTDOWN_STS */ +#define ARIZONA_SPK_SHUTDOWN_STS_WIDTH 1 /* SPK_SHUTDOWN_STS */ +#define ARIZONA_SPK1R_SHORT_STS 0x2000 /* SPK1R_SHORT_STS */ +#define ARIZONA_SPK1R_SHORT_STS_MASK 0x2000 /* SPK1R_SHORT_STS */ +#define ARIZONA_SPK1R_SHORT_STS_SHIFT 13 /* SPK1R_SHORT_STS */ +#define ARIZONA_SPK1R_SHORT_STS_WIDTH 1 /* SPK1R_SHORT_STS */ +#define ARIZONA_SPK1L_SHORT_STS 0x1000 /* SPK1L_SHORT_STS */ +#define ARIZONA_SPK1L_SHORT_STS_MASK 0x1000 /* SPK1L_SHORT_STS */ +#define ARIZONA_SPK1L_SHORT_STS_SHIFT 12 /* SPK1L_SHORT_STS */ +#define ARIZONA_SPK1L_SHORT_STS_WIDTH 1 /* SPK1L_SHORT_STS */ +#define ARIZONA_HP3R_SC_NEG_STS 0x0800 /* HP3R_SC_NEG_STS */ +#define ARIZONA_HP3R_SC_NEG_STS_MASK 0x0800 /* HP3R_SC_NEG_STS */ +#define ARIZONA_HP3R_SC_NEG_STS_SHIFT 11 /* HP3R_SC_NEG_STS */ +#define ARIZONA_HP3R_SC_NEG_STS_WIDTH 1 /* HP3R_SC_NEG_STS */ +#define ARIZONA_HP3R_SC_POS_STS 0x0400 /* HP3R_SC_POS_STS */ +#define ARIZONA_HP3R_SC_POS_STS_MASK 0x0400 /* HP3R_SC_POS_STS */ +#define ARIZONA_HP3R_SC_POS_STS_SHIFT 10 /* HP3R_SC_POS_STS */ +#define ARIZONA_HP3R_SC_POS_STS_WIDTH 1 /* HP3R_SC_POS_STS */ +#define ARIZONA_HP3L_SC_NEG_STS 0x0200 /* HP3L_SC_NEG_STS */ +#define ARIZONA_HP3L_SC_NEG_STS_MASK 0x0200 /* HP3L_SC_NEG_STS */ +#define ARIZONA_HP3L_SC_NEG_STS_SHIFT 9 /* HP3L_SC_NEG_STS */ +#define ARIZONA_HP3L_SC_NEG_STS_WIDTH 1 /* HP3L_SC_NEG_STS */ +#define ARIZONA_HP3L_SC_POS_STS 0x0100 /* HP3L_SC_POS_STS */ +#define ARIZONA_HP3L_SC_POS_STS_MASK 0x0100 /* HP3L_SC_POS_STS */ +#define ARIZONA_HP3L_SC_POS_STS_SHIFT 8 /* HP3L_SC_POS_STS */ +#define ARIZONA_HP3L_SC_POS_STS_WIDTH 1 /* HP3L_SC_POS_STS */ +#define ARIZONA_HP2R_SC_NEG_STS 0x0080 /* HP2R_SC_NEG_STS */ +#define ARIZONA_HP2R_SC_NEG_STS_MASK 0x0080 /* HP2R_SC_NEG_STS */ +#define ARIZONA_HP2R_SC_NEG_STS_SHIFT 7 /* HP2R_SC_NEG_STS */ +#define ARIZONA_HP2R_SC_NEG_STS_WIDTH 1 /* HP2R_SC_NEG_STS */ +#define ARIZONA_HP2R_SC_POS_STS 0x0040 /* HP2R_SC_POS_STS */ +#define ARIZONA_HP2R_SC_POS_STS_MASK 0x0040 /* HP2R_SC_POS_STS */ +#define ARIZONA_HP2R_SC_POS_STS_SHIFT 6 /* HP2R_SC_POS_STS */ +#define ARIZONA_HP2R_SC_POS_STS_WIDTH 1 /* HP2R_SC_POS_STS */ +#define ARIZONA_HP2L_SC_NEG_STS 0x0020 /* HP2L_SC_NEG_STS */ +#define ARIZONA_HP2L_SC_NEG_STS_MASK 0x0020 /* HP2L_SC_NEG_STS */ +#define ARIZONA_HP2L_SC_NEG_STS_SHIFT 5 /* HP2L_SC_NEG_STS */ +#define ARIZONA_HP2L_SC_NEG_STS_WIDTH 1 /* HP2L_SC_NEG_STS */ +#define ARIZONA_HP2L_SC_POS_STS 0x0010 /* HP2L_SC_POS_STS */ +#define ARIZONA_HP2L_SC_POS_STS_MASK 0x0010 /* HP2L_SC_POS_STS */ +#define ARIZONA_HP2L_SC_POS_STS_SHIFT 4 /* HP2L_SC_POS_STS */ +#define ARIZONA_HP2L_SC_POS_STS_WIDTH 1 /* HP2L_SC_POS_STS */ +#define ARIZONA_HP1R_SC_NEG_STS 0x0008 /* HP1R_SC_NEG_STS */ +#define ARIZONA_HP1R_SC_NEG_STS_MASK 0x0008 /* HP1R_SC_NEG_STS */ +#define ARIZONA_HP1R_SC_NEG_STS_SHIFT 3 /* HP1R_SC_NEG_STS */ +#define ARIZONA_HP1R_SC_NEG_STS_WIDTH 1 /* HP1R_SC_NEG_STS */ +#define ARIZONA_HP1R_SC_POS_STS 0x0004 /* HP1R_SC_POS_STS */ +#define ARIZONA_HP1R_SC_POS_STS_MASK 0x0004 /* HP1R_SC_POS_STS */ +#define ARIZONA_HP1R_SC_POS_STS_SHIFT 2 /* HP1R_SC_POS_STS */ +#define ARIZONA_HP1R_SC_POS_STS_WIDTH 1 /* HP1R_SC_POS_STS */ +#define ARIZONA_HP1L_SC_NEG_STS 0x0002 /* HP1L_SC_NEG_STS */ +#define ARIZONA_HP1L_SC_NEG_STS_MASK 0x0002 /* HP1L_SC_NEG_STS */ +#define ARIZONA_HP1L_SC_NEG_STS_SHIFT 1 /* HP1L_SC_NEG_STS */ +#define ARIZONA_HP1L_SC_NEG_STS_WIDTH 1 /* HP1L_SC_NEG_STS */ +#define ARIZONA_HP1L_SC_POS_STS 0x0001 /* HP1L_SC_POS_STS */ +#define ARIZONA_HP1L_SC_POS_STS_MASK 0x0001 /* HP1L_SC_POS_STS */ +#define ARIZONA_HP1L_SC_POS_STS_SHIFT 0 /* HP1L_SC_POS_STS */ +#define ARIZONA_HP1L_SC_POS_STS_WIDTH 1 /* HP1L_SC_POS_STS */ + +/* + * R3392 (0xD40) - IRQ Pin Status + */ +#define ARIZONA_IRQ2_STS 0x0002 /* IRQ2_STS */ +#define ARIZONA_IRQ2_STS_MASK 0x0002 /* IRQ2_STS */ +#define ARIZONA_IRQ2_STS_SHIFT 1 /* IRQ2_STS */ +#define ARIZONA_IRQ2_STS_WIDTH 1 /* IRQ2_STS */ +#define ARIZONA_IRQ1_STS 0x0001 /* IRQ1_STS */ +#define ARIZONA_IRQ1_STS_MASK 0x0001 /* IRQ1_STS */ +#define ARIZONA_IRQ1_STS_SHIFT 0 /* IRQ1_STS */ +#define ARIZONA_IRQ1_STS_WIDTH 1 /* IRQ1_STS */ + +/* + * R3393 (0xD41) - ADSP2 IRQ0 + */ +#define ARIZONA_DSP_IRQ2 0x0002 /* DSP_IRQ2 */ +#define ARIZONA_DSP_IRQ2_MASK 0x0002 /* DSP_IRQ2 */ +#define ARIZONA_DSP_IRQ2_SHIFT 1 /* DSP_IRQ2 */ +#define ARIZONA_DSP_IRQ2_WIDTH 1 /* DSP_IRQ2 */ +#define ARIZONA_DSP_IRQ1 0x0001 /* DSP_IRQ1 */ +#define ARIZONA_DSP_IRQ1_MASK 0x0001 /* DSP_IRQ1 */ +#define ARIZONA_DSP_IRQ1_SHIFT 0 /* DSP_IRQ1 */ +#define ARIZONA_DSP_IRQ1_WIDTH 1 /* DSP_IRQ1 */ + +/* + * R3408 (0xD50) - AOD wkup and trig + */ +#define ARIZONA_MICD_CLAMP_FALL_TRIG_STS 0x0080 /* MICD_CLAMP_FALL_TRIG_STS */ +#define ARIZONA_MICD_CLAMP_FALL_TRIG_STS_MASK 0x0080 /* MICD_CLAMP_FALL_TRIG_STS */ +#define ARIZONA_MICD_CLAMP_FALL_TRIG_STS_SHIFT 7 /* MICD_CLAMP_FALL_TRIG_STS */ +#define ARIZONA_MICD_CLAMP_FALL_TRIG_STS_WIDTH 1 /* MICD_CLAMP_FALL_TRIG_STS */ +#define ARIZONA_MICD_CLAMP_RISE_TRIG_STS 0x0040 /* MICD_CLAMP_RISE_TRIG_STS */ +#define ARIZONA_MICD_CLAMP_RISE_TRIG_STS_MASK 0x0040 /* MICD_CLAMP_RISE_TRIG_STS */ +#define ARIZONA_MICD_CLAMP_RISE_TRIG_STS_SHIFT 6 /* MICD_CLAMP_RISE_TRIG_STS */ +#define ARIZONA_MICD_CLAMP_RISE_TRIG_STS_WIDTH 1 /* MICD_CLAMP_RISE_TRIG_STS */ +#define ARIZONA_GP5_FALL_TRIG_STS 0x0020 /* GP5_FALL_TRIG_STS */ +#define ARIZONA_GP5_FALL_TRIG_STS_MASK 0x0020 /* GP5_FALL_TRIG_STS */ +#define ARIZONA_GP5_FALL_TRIG_STS_SHIFT 5 /* GP5_FALL_TRIG_STS */ +#define ARIZONA_GP5_FALL_TRIG_STS_WIDTH 1 /* GP5_FALL_TRIG_STS */ +#define ARIZONA_GP5_RISE_TRIG_STS 0x0010 /* GP5_RISE_TRIG_STS */ +#define ARIZONA_GP5_RISE_TRIG_STS_MASK 0x0010 /* GP5_RISE_TRIG_STS */ +#define ARIZONA_GP5_RISE_TRIG_STS_SHIFT 4 /* GP5_RISE_TRIG_STS */ +#define ARIZONA_GP5_RISE_TRIG_STS_WIDTH 1 /* GP5_RISE_TRIG_STS */ +#define ARIZONA_JD1_FALL_TRIG_STS 0x0008 /* JD1_FALL_TRIG_STS */ +#define ARIZONA_JD1_FALL_TRIG_STS_MASK 0x0008 /* JD1_FALL_TRIG_STS */ +#define ARIZONA_JD1_FALL_TRIG_STS_SHIFT 3 /* JD1_FALL_TRIG_STS */ +#define ARIZONA_JD1_FALL_TRIG_STS_WIDTH 1 /* JD1_FALL_TRIG_STS */ +#define ARIZONA_JD1_RISE_TRIG_STS 0x0004 /* JD1_RISE_TRIG_STS */ +#define ARIZONA_JD1_RISE_TRIG_STS_MASK 0x0004 /* JD1_RISE_TRIG_STS */ +#define ARIZONA_JD1_RISE_TRIG_STS_SHIFT 2 /* JD1_RISE_TRIG_STS */ +#define ARIZONA_JD1_RISE_TRIG_STS_WIDTH 1 /* JD1_RISE_TRIG_STS */ +#define ARIZONA_JD2_FALL_TRIG_STS 0x0002 /* JD2_FALL_TRIG_STS */ +#define ARIZONA_JD2_FALL_TRIG_STS_MASK 0x0002 /* JD2_FALL_TRIG_STS */ +#define ARIZONA_JD2_FALL_TRIG_STS_SHIFT 1 /* JD2_FALL_TRIG_STS */ +#define ARIZONA_JD2_FALL_TRIG_STS_WIDTH 1 /* JD2_FALL_TRIG_STS */ +#define ARIZONA_JD2_RISE_TRIG_STS 0x0001 /* JD2_RISE_TRIG_STS */ +#define ARIZONA_JD2_RISE_TRIG_STS_MASK 0x0001 /* JD2_RISE_TRIG_STS */ +#define ARIZONA_JD2_RISE_TRIG_STS_SHIFT 0 /* JD2_RISE_TRIG_STS */ +#define ARIZONA_JD2_RISE_TRIG_STS_WIDTH 1 /* JD2_RISE_TRIG_STS */ + +/* + * R3409 (0xD51) - AOD IRQ1 + */ +#define ARIZONA_MICD_CLAMP_FALL_EINT1 0x0080 /* MICD_CLAMP_FALL_EINT1 */ +#define ARIZONA_MICD_CLAMP_FALL_EINT1_MASK 0x0080 /* MICD_CLAMP_FALL_EINT1 */ +#define ARIZONA_MICD_CLAMP_FALL_EINT1_SHIFT 7 /* MICD_CLAMP_FALL_EINT1 */ +#define ARIZONA_MICD_CLAMP_RISE_EINT1 0x0040 /* MICD_CLAMP_RISE_EINT1 */ +#define ARIZONA_MICD_CLAMP_RISE_EINT1_MASK 0x0040 /* MICD_CLAMP_RISE_EINT1 */ +#define ARIZONA_MICD_CLAMP_RISE_EINT1_SHIFT 6 /* MICD_CLAMP_RISE_EINT1 */ +#define ARIZONA_GP5_FALL_EINT1 0x0020 /* GP5_FALL_EINT1 */ +#define ARIZONA_GP5_FALL_EINT1_MASK 0x0020 /* GP5_FALL_EINT1 */ +#define ARIZONA_GP5_FALL_EINT1_SHIFT 5 /* GP5_FALL_EINT1 */ +#define ARIZONA_GP5_FALL_EINT1_WIDTH 1 /* GP5_FALL_EINT1 */ +#define ARIZONA_GP5_RISE_EINT1 0x0010 /* GP5_RISE_EINT1 */ +#define ARIZONA_GP5_RISE_EINT1_MASK 0x0010 /* GP5_RISE_EINT1 */ +#define ARIZONA_GP5_RISE_EINT1_SHIFT 4 /* GP5_RISE_EINT1 */ +#define ARIZONA_GP5_RISE_EINT1_WIDTH 1 /* GP5_RISE_EINT1 */ +#define ARIZONA_JD1_FALL_EINT1 0x0008 /* JD1_FALL_EINT1 */ +#define ARIZONA_JD1_FALL_EINT1_MASK 0x0008 /* JD1_FALL_EINT1 */ +#define ARIZONA_JD1_FALL_EINT1_SHIFT 3 /* JD1_FALL_EINT1 */ +#define ARIZONA_JD1_FALL_EINT1_WIDTH 1 /* JD1_FALL_EINT1 */ +#define ARIZONA_JD1_RISE_EINT1 0x0004 /* JD1_RISE_EINT1 */ +#define ARIZONA_JD1_RISE_EINT1_MASK 0x0004 /* JD1_RISE_EINT1 */ +#define ARIZONA_JD1_RISE_EINT1_SHIFT 2 /* JD1_RISE_EINT1 */ +#define ARIZONA_JD1_RISE_EINT1_WIDTH 1 /* JD1_RISE_EINT1 */ +#define ARIZONA_JD2_FALL_EINT1 0x0002 /* JD2_FALL_EINT1 */ +#define ARIZONA_JD2_FALL_EINT1_MASK 0x0002 /* JD2_FALL_EINT1 */ +#define ARIZONA_JD2_FALL_EINT1_SHIFT 1 /* JD2_FALL_EINT1 */ +#define ARIZONA_JD2_FALL_EINT1_WIDTH 1 /* JD2_FALL_EINT1 */ +#define ARIZONA_JD2_RISE_EINT1 0x0001 /* JD2_RISE_EINT1 */ +#define ARIZONA_JD2_RISE_EINT1_MASK 0x0001 /* JD2_RISE_EINT1 */ +#define ARIZONA_JD2_RISE_EINT1_SHIFT 0 /* JD2_RISE_EINT1 */ +#define ARIZONA_JD2_RISE_EINT1_WIDTH 1 /* JD2_RISE_EINT1 */ + +/* + * R3410 (0xD52) - AOD IRQ2 + */ +#define ARIZONA_MICD_CLAMP_FALL_EINT2 0x0080 /* MICD_CLAMP_FALL_EINT2 */ +#define ARIZONA_MICD_CLAMP_FALL_EINT2_MASK 0x0080 /* MICD_CLAMP_FALL_EINT2 */ +#define ARIZONA_MICD_CLAMP_FALL_EINT2_SHIFT 7 /* MICD_CLAMP_FALL_EINT2 */ +#define ARIZONA_MICD_CLAMP_RISE_EINT2 0x0040 /* MICD_CLAMP_RISE_EINT2 */ +#define ARIZONA_MICD_CLAMP_RISE_EINT2_MASK 0x0040 /* MICD_CLAMP_RISE_EINT2 */ +#define ARIZONA_MICD_CLAMP_RISE_EINT2_SHIFT 6 /* MICD_CLAMP_RISE_EINT2 */ +#define ARIZONA_GP5_FALL_EINT2 0x0020 /* GP5_FALL_EINT2 */ +#define ARIZONA_GP5_FALL_EINT2_MASK 0x0020 /* GP5_FALL_EINT2 */ +#define ARIZONA_GP5_FALL_EINT2_SHIFT 5 /* GP5_FALL_EINT2 */ +#define ARIZONA_GP5_FALL_EINT2_WIDTH 1 /* GP5_FALL_EINT2 */ +#define ARIZONA_GP5_RISE_EINT2 0x0010 /* GP5_RISE_EINT2 */ +#define ARIZONA_GP5_RISE_EINT2_MASK 0x0010 /* GP5_RISE_EINT2 */ +#define ARIZONA_GP5_RISE_EINT2_SHIFT 4 /* GP5_RISE_EINT2 */ +#define ARIZONA_GP5_RISE_EINT2_WIDTH 1 /* GP5_RISE_EINT2 */ +#define ARIZONA_JD1_FALL_EINT2 0x0008 /* JD1_FALL_EINT2 */ +#define ARIZONA_JD1_FALL_EINT2_MASK 0x0008 /* JD1_FALL_EINT2 */ +#define ARIZONA_JD1_FALL_EINT2_SHIFT 3 /* JD1_FALL_EINT2 */ +#define ARIZONA_JD1_FALL_EINT2_WIDTH 1 /* JD1_FALL_EINT2 */ +#define ARIZONA_JD1_RISE_EINT2 0x0004 /* JD1_RISE_EINT2 */ +#define ARIZONA_JD1_RISE_EINT2_MASK 0x0004 /* JD1_RISE_EINT2 */ +#define ARIZONA_JD1_RISE_EINT2_SHIFT 2 /* JD1_RISE_EINT2 */ +#define ARIZONA_JD1_RISE_EINT2_WIDTH 1 /* JD1_RISE_EINT2 */ +#define ARIZONA_JD2_FALL_EINT2 0x0002 /* JD2_FALL_EINT2 */ +#define ARIZONA_JD2_FALL_EINT2_MASK 0x0002 /* JD2_FALL_EINT2 */ +#define ARIZONA_JD2_FALL_EINT2_SHIFT 1 /* JD2_FALL_EINT2 */ +#define ARIZONA_JD2_FALL_EINT2_WIDTH 1 /* JD2_FALL_EINT2 */ +#define ARIZONA_JD2_RISE_EINT2 0x0001 /* JD2_RISE_EINT2 */ +#define ARIZONA_JD2_RISE_EINT2_MASK 0x0001 /* JD2_RISE_EINT2 */ +#define ARIZONA_JD2_RISE_EINT2_SHIFT 0 /* JD2_RISE_EINT2 */ +#define ARIZONA_JD2_RISE_EINT2_WIDTH 1 /* JD2_RISE_EINT2 */ + +/* + * R3411 (0xD53) - AOD IRQ Mask IRQ1 + */ +#define ARIZONA_IM_GP5_FALL_EINT1 0x0020 /* IM_GP5_FALL_EINT1 */ +#define ARIZONA_IM_GP5_FALL_EINT1_MASK 0x0020 /* IM_GP5_FALL_EINT1 */ +#define ARIZONA_IM_GP5_FALL_EINT1_SHIFT 5 /* IM_GP5_FALL_EINT1 */ +#define ARIZONA_IM_GP5_FALL_EINT1_WIDTH 1 /* IM_GP5_FALL_EINT1 */ +#define ARIZONA_IM_GP5_RISE_EINT1 0x0010 /* IM_GP5_RISE_EINT1 */ +#define ARIZONA_IM_GP5_RISE_EINT1_MASK 0x0010 /* IM_GP5_RISE_EINT1 */ +#define ARIZONA_IM_GP5_RISE_EINT1_SHIFT 4 /* IM_GP5_RISE_EINT1 */ +#define ARIZONA_IM_GP5_RISE_EINT1_WIDTH 1 /* IM_GP5_RISE_EINT1 */ +#define ARIZONA_IM_JD1_FALL_EINT1 0x0008 /* IM_JD1_FALL_EINT1 */ +#define ARIZONA_IM_JD1_FALL_EINT1_MASK 0x0008 /* IM_JD1_FALL_EINT1 */ +#define ARIZONA_IM_JD1_FALL_EINT1_SHIFT 3 /* IM_JD1_FALL_EINT1 */ +#define ARIZONA_IM_JD1_FALL_EINT1_WIDTH 1 /* IM_JD1_FALL_EINT1 */ +#define ARIZONA_IM_JD1_RISE_EINT1 0x0004 /* IM_JD1_RISE_EINT1 */ +#define ARIZONA_IM_JD1_RISE_EINT1_MASK 0x0004 /* IM_JD1_RISE_EINT1 */ +#define ARIZONA_IM_JD1_RISE_EINT1_SHIFT 2 /* IM_JD1_RISE_EINT1 */ +#define ARIZONA_IM_JD1_RISE_EINT1_WIDTH 1 /* IM_JD1_RISE_EINT1 */ +#define ARIZONA_IM_JD2_FALL_EINT1 0x0002 /* IM_JD2_FALL_EINT1 */ +#define ARIZONA_IM_JD2_FALL_EINT1_MASK 0x0002 /* IM_JD2_FALL_EINT1 */ +#define ARIZONA_IM_JD2_FALL_EINT1_SHIFT 1 /* IM_JD2_FALL_EINT1 */ +#define ARIZONA_IM_JD2_FALL_EINT1_WIDTH 1 /* IM_JD2_FALL_EINT1 */ +#define ARIZONA_IM_JD2_RISE_EINT1 0x0001 /* IM_JD2_RISE_EINT1 */ +#define ARIZONA_IM_JD2_RISE_EINT1_MASK 0x0001 /* IM_JD2_RISE_EINT1 */ +#define ARIZONA_IM_JD2_RISE_EINT1_SHIFT 0 /* IM_JD2_RISE_EINT1 */ +#define ARIZONA_IM_JD2_RISE_EINT1_WIDTH 1 /* IM_JD2_RISE_EINT1 */ + +/* + * R3412 (0xD54) - AOD IRQ Mask IRQ2 + */ +#define ARIZONA_IM_GP5_FALL_EINT2 0x0020 /* IM_GP5_FALL_EINT2 */ +#define ARIZONA_IM_GP5_FALL_EINT2_MASK 0x0020 /* IM_GP5_FALL_EINT2 */ +#define ARIZONA_IM_GP5_FALL_EINT2_SHIFT 5 /* IM_GP5_FALL_EINT2 */ +#define ARIZONA_IM_GP5_FALL_EINT2_WIDTH 1 /* IM_GP5_FALL_EINT2 */ +#define ARIZONA_IM_GP5_RISE_EINT2 0x0010 /* IM_GP5_RISE_EINT2 */ +#define ARIZONA_IM_GP5_RISE_EINT2_MASK 0x0010 /* IM_GP5_RISE_EINT2 */ +#define ARIZONA_IM_GP5_RISE_EINT2_SHIFT 4 /* IM_GP5_RISE_EINT2 */ +#define ARIZONA_IM_GP5_RISE_EINT2_WIDTH 1 /* IM_GP5_RISE_EINT2 */ +#define ARIZONA_IM_JD1_FALL_EINT2 0x0008 /* IM_JD1_FALL_EINT2 */ +#define ARIZONA_IM_JD1_FALL_EINT2_MASK 0x0008 /* IM_JD1_FALL_EINT2 */ +#define ARIZONA_IM_JD1_FALL_EINT2_SHIFT 3 /* IM_JD1_FALL_EINT2 */ +#define ARIZONA_IM_JD1_FALL_EINT2_WIDTH 1 /* IM_JD1_FALL_EINT2 */ +#define ARIZONA_IM_JD1_RISE_EINT2 0x0004 /* IM_JD1_RISE_EINT2 */ +#define ARIZONA_IM_JD1_RISE_EINT2_MASK 0x0004 /* IM_JD1_RISE_EINT2 */ +#define ARIZONA_IM_JD1_RISE_EINT2_SHIFT 2 /* IM_JD1_RISE_EINT2 */ +#define ARIZONA_IM_JD1_RISE_EINT2_WIDTH 1 /* IM_JD1_RISE_EINT2 */ +#define ARIZONA_IM_JD2_FALL_EINT2 0x0002 /* IM_JD2_FALL_EINT2 */ +#define ARIZONA_IM_JD2_FALL_EINT2_MASK 0x0002 /* IM_JD2_FALL_EINT2 */ +#define ARIZONA_IM_JD2_FALL_EINT2_SHIFT 1 /* IM_JD2_FALL_EINT2 */ +#define ARIZONA_IM_JD2_FALL_EINT2_WIDTH 1 /* IM_JD2_FALL_EINT2 */ +#define ARIZONA_IM_JD2_RISE_EINT2 0x0001 /* IM_JD2_RISE_EINT2 */ +#define ARIZONA_IM_JD2_RISE_EINT2_MASK 0x0001 /* IM_JD2_RISE_EINT2 */ +#define ARIZONA_IM_JD2_RISE_EINT2_SHIFT 0 /* IM_JD2_RISE_EINT2 */ +#define ARIZONA_IM_JD2_RISE_EINT2_WIDTH 1 /* IM_JD2_RISE_EINT2 */ + +/* + * R3413 (0xD55) - AOD IRQ Raw Status + */ +#define ARIZONA_MICD_CLAMP_STS 0x0008 /* MICD_CLAMP_STS */ +#define ARIZONA_MICD_CLAMP_STS_MASK 0x0008 /* MICD_CLAMP_STS */ +#define ARIZONA_MICD_CLAMP_STS_SHIFT 3 /* MICD_CLAMP_STS */ +#define ARIZONA_MICD_CLAMP_STS_WIDTH 1 /* MICD_CLAMP_STS */ +#define ARIZONA_GP5_STS 0x0004 /* GP5_STS */ +#define ARIZONA_GP5_STS_MASK 0x0004 /* GP5_STS */ +#define ARIZONA_GP5_STS_SHIFT 2 /* GP5_STS */ +#define ARIZONA_GP5_STS_WIDTH 1 /* GP5_STS */ +#define ARIZONA_JD2_STS 0x0002 /* JD2_STS */ +#define ARIZONA_JD2_STS_MASK 0x0002 /* JD2_STS */ +#define ARIZONA_JD2_STS_SHIFT 1 /* JD2_STS */ +#define ARIZONA_JD2_STS_WIDTH 1 /* JD2_STS */ +#define ARIZONA_JD1_STS 0x0001 /* JD1_STS */ +#define ARIZONA_JD1_STS_MASK 0x0001 /* JD1_STS */ +#define ARIZONA_JD1_STS_SHIFT 0 /* JD1_STS */ +#define ARIZONA_JD1_STS_WIDTH 1 /* JD1_STS */ + +/* + * R3414 (0xD56) - Jack detect debounce + */ +#define ARIZONA_MICD_CLAMP_DB 0x0008 /* MICD_CLAMP_DB */ +#define ARIZONA_MICD_CLAMP_DB_MASK 0x0008 /* MICD_CLAMP_DB */ +#define ARIZONA_MICD_CLAMP_DB_SHIFT 3 /* MICD_CLAMP_DB */ +#define ARIZONA_MICD_CLAMP_DB_WIDTH 1 /* MICD_CLAMP_DB */ +#define ARIZONA_JD2_DB 0x0002 /* JD2_DB */ +#define ARIZONA_JD2_DB_MASK 0x0002 /* JD2_DB */ +#define ARIZONA_JD2_DB_SHIFT 1 /* JD2_DB */ +#define ARIZONA_JD2_DB_WIDTH 1 /* JD2_DB */ +#define ARIZONA_JD1_DB 0x0001 /* JD1_DB */ +#define ARIZONA_JD1_DB_MASK 0x0001 /* JD1_DB */ +#define ARIZONA_JD1_DB_SHIFT 0 /* JD1_DB */ +#define ARIZONA_JD1_DB_WIDTH 1 /* JD1_DB */ + +/* + * R3584 (0xE00) - FX_Ctrl1 + */ +#define ARIZONA_FX_RATE_MASK 0x7800 /* FX_RATE - [14:11] */ +#define ARIZONA_FX_RATE_SHIFT 11 /* FX_RATE - [14:11] */ +#define ARIZONA_FX_RATE_WIDTH 4 /* FX_RATE - [14:11] */ + +/* + * R3585 (0xE01) - FX_Ctrl2 + */ +#define ARIZONA_FX_STS_MASK 0xFFF0 /* FX_STS - [15:4] */ +#define ARIZONA_FX_STS_SHIFT 4 /* FX_STS - [15:4] */ +#define ARIZONA_FX_STS_WIDTH 12 /* FX_STS - [15:4] */ + +/* + * R3600 (0xE10) - EQ1_1 + */ +#define ARIZONA_EQ1_B1_GAIN_MASK 0xF800 /* EQ1_B1_GAIN - [15:11] */ +#define ARIZONA_EQ1_B1_GAIN_SHIFT 11 /* EQ1_B1_GAIN - [15:11] */ +#define ARIZONA_EQ1_B1_GAIN_WIDTH 5 /* EQ1_B1_GAIN - [15:11] */ +#define ARIZONA_EQ1_B2_GAIN_MASK 0x07C0 /* EQ1_B2_GAIN - [10:6] */ +#define ARIZONA_EQ1_B2_GAIN_SHIFT 6 /* EQ1_B2_GAIN - [10:6] */ +#define ARIZONA_EQ1_B2_GAIN_WIDTH 5 /* EQ1_B2_GAIN - [10:6] */ +#define ARIZONA_EQ1_B3_GAIN_MASK 0x003E /* EQ1_B3_GAIN - [5:1] */ +#define ARIZONA_EQ1_B3_GAIN_SHIFT 1 /* EQ1_B3_GAIN - [5:1] */ +#define ARIZONA_EQ1_B3_GAIN_WIDTH 5 /* EQ1_B3_GAIN - [5:1] */ +#define ARIZONA_EQ1_ENA 0x0001 /* EQ1_ENA */ +#define ARIZONA_EQ1_ENA_MASK 0x0001 /* EQ1_ENA */ +#define ARIZONA_EQ1_ENA_SHIFT 0 /* EQ1_ENA */ +#define ARIZONA_EQ1_ENA_WIDTH 1 /* EQ1_ENA */ + +/* + * R3601 (0xE11) - EQ1_2 + */ +#define ARIZONA_EQ1_B4_GAIN_MASK 0xF800 /* EQ1_B4_GAIN - [15:11] */ +#define ARIZONA_EQ1_B4_GAIN_SHIFT 11 /* EQ1_B4_GAIN - [15:11] */ +#define ARIZONA_EQ1_B4_GAIN_WIDTH 5 /* EQ1_B4_GAIN - [15:11] */ +#define ARIZONA_EQ1_B5_GAIN_MASK 0x07C0 /* EQ1_B5_GAIN - [10:6] */ +#define ARIZONA_EQ1_B5_GAIN_SHIFT 6 /* EQ1_B5_GAIN - [10:6] */ +#define ARIZONA_EQ1_B5_GAIN_WIDTH 5 /* EQ1_B5_GAIN - [10:6] */ +#define ARIZONA_EQ1_B1_MODE 0x0001 /* EQ1_B1_MODE */ +#define ARIZONA_EQ1_B1_MODE_MASK 0x0001 /* EQ1_B1_MODE */ +#define ARIZONA_EQ1_B1_MODE_SHIFT 0 /* EQ1_B1_MODE */ +#define ARIZONA_EQ1_B1_MODE_WIDTH 1 /* EQ1_B1_MODE */ + +/* + * R3602 (0xE12) - EQ1_3 + */ +#define ARIZONA_EQ1_B1_A_MASK 0xFFFF /* EQ1_B1_A - [15:0] */ +#define ARIZONA_EQ1_B1_A_SHIFT 0 /* EQ1_B1_A - [15:0] */ +#define ARIZONA_EQ1_B1_A_WIDTH 16 /* EQ1_B1_A - [15:0] */ + +/* + * R3603 (0xE13) - EQ1_4 + */ +#define ARIZONA_EQ1_B1_B_MASK 0xFFFF /* EQ1_B1_B - [15:0] */ +#define ARIZONA_EQ1_B1_B_SHIFT 0 /* EQ1_B1_B - [15:0] */ +#define ARIZONA_EQ1_B1_B_WIDTH 16 /* EQ1_B1_B - [15:0] */ + +/* + * R3604 (0xE14) - EQ1_5 + */ +#define ARIZONA_EQ1_B1_PG_MASK 0xFFFF /* EQ1_B1_PG - [15:0] */ +#define ARIZONA_EQ1_B1_PG_SHIFT 0 /* EQ1_B1_PG - [15:0] */ +#define ARIZONA_EQ1_B1_PG_WIDTH 16 /* EQ1_B1_PG - [15:0] */ + +/* + * R3605 (0xE15) - EQ1_6 + */ +#define ARIZONA_EQ1_B2_A_MASK 0xFFFF /* EQ1_B2_A - [15:0] */ +#define ARIZONA_EQ1_B2_A_SHIFT 0 /* EQ1_B2_A - [15:0] */ +#define ARIZONA_EQ1_B2_A_WIDTH 16 /* EQ1_B2_A - [15:0] */ + +/* + * R3606 (0xE16) - EQ1_7 + */ +#define ARIZONA_EQ1_B2_B_MASK 0xFFFF /* EQ1_B2_B - [15:0] */ +#define ARIZONA_EQ1_B2_B_SHIFT 0 /* EQ1_B2_B - [15:0] */ +#define ARIZONA_EQ1_B2_B_WIDTH 16 /* EQ1_B2_B - [15:0] */ + +/* + * R3607 (0xE17) - EQ1_8 + */ +#define ARIZONA_EQ1_B2_C_MASK 0xFFFF /* EQ1_B2_C - [15:0] */ +#define ARIZONA_EQ1_B2_C_SHIFT 0 /* EQ1_B2_C - [15:0] */ +#define ARIZONA_EQ1_B2_C_WIDTH 16 /* EQ1_B2_C - [15:0] */ + +/* + * R3608 (0xE18) - EQ1_9 + */ +#define ARIZONA_EQ1_B2_PG_MASK 0xFFFF /* EQ1_B2_PG - [15:0] */ +#define ARIZONA_EQ1_B2_PG_SHIFT 0 /* EQ1_B2_PG - [15:0] */ +#define ARIZONA_EQ1_B2_PG_WIDTH 16 /* EQ1_B2_PG - [15:0] */ + +/* + * R3609 (0xE19) - EQ1_10 + */ +#define ARIZONA_EQ1_B3_A_MASK 0xFFFF /* EQ1_B3_A - [15:0] */ +#define ARIZONA_EQ1_B3_A_SHIFT 0 /* EQ1_B3_A - [15:0] */ +#define ARIZONA_EQ1_B3_A_WIDTH 16 /* EQ1_B3_A - [15:0] */ + +/* + * R3610 (0xE1A) - EQ1_11 + */ +#define ARIZONA_EQ1_B3_B_MASK 0xFFFF /* EQ1_B3_B - [15:0] */ +#define ARIZONA_EQ1_B3_B_SHIFT 0 /* EQ1_B3_B - [15:0] */ +#define ARIZONA_EQ1_B3_B_WIDTH 16 /* EQ1_B3_B - [15:0] */ + +/* + * R3611 (0xE1B) - EQ1_12 + */ +#define ARIZONA_EQ1_B3_C_MASK 0xFFFF /* EQ1_B3_C - [15:0] */ +#define ARIZONA_EQ1_B3_C_SHIFT 0 /* EQ1_B3_C - [15:0] */ +#define ARIZONA_EQ1_B3_C_WIDTH 16 /* EQ1_B3_C - [15:0] */ + +/* + * R3612 (0xE1C) - EQ1_13 + */ +#define ARIZONA_EQ1_B3_PG_MASK 0xFFFF /* EQ1_B3_PG - [15:0] */ +#define ARIZONA_EQ1_B3_PG_SHIFT 0 /* EQ1_B3_PG - [15:0] */ +#define ARIZONA_EQ1_B3_PG_WIDTH 16 /* EQ1_B3_PG - [15:0] */ + +/* + * R3613 (0xE1D) - EQ1_14 + */ +#define ARIZONA_EQ1_B4_A_MASK 0xFFFF /* EQ1_B4_A - [15:0] */ +#define ARIZONA_EQ1_B4_A_SHIFT 0 /* EQ1_B4_A - [15:0] */ +#define ARIZONA_EQ1_B4_A_WIDTH 16 /* EQ1_B4_A - [15:0] */ + +/* + * R3614 (0xE1E) - EQ1_15 + */ +#define ARIZONA_EQ1_B4_B_MASK 0xFFFF /* EQ1_B4_B - [15:0] */ +#define ARIZONA_EQ1_B4_B_SHIFT 0 /* EQ1_B4_B - [15:0] */ +#define ARIZONA_EQ1_B4_B_WIDTH 16 /* EQ1_B4_B - [15:0] */ + +/* + * R3615 (0xE1F) - EQ1_16 + */ +#define ARIZONA_EQ1_B4_C_MASK 0xFFFF /* EQ1_B4_C - [15:0] */ +#define ARIZONA_EQ1_B4_C_SHIFT 0 /* EQ1_B4_C - [15:0] */ +#define ARIZONA_EQ1_B4_C_WIDTH 16 /* EQ1_B4_C - [15:0] */ + +/* + * R3616 (0xE20) - EQ1_17 + */ +#define ARIZONA_EQ1_B4_PG_MASK 0xFFFF /* EQ1_B4_PG - [15:0] */ +#define ARIZONA_EQ1_B4_PG_SHIFT 0 /* EQ1_B4_PG - [15:0] */ +#define ARIZONA_EQ1_B4_PG_WIDTH 16 /* EQ1_B4_PG - [15:0] */ + +/* + * R3617 (0xE21) - EQ1_18 + */ +#define ARIZONA_EQ1_B5_A_MASK 0xFFFF /* EQ1_B5_A - [15:0] */ +#define ARIZONA_EQ1_B5_A_SHIFT 0 /* EQ1_B5_A - [15:0] */ +#define ARIZONA_EQ1_B5_A_WIDTH 16 /* EQ1_B5_A - [15:0] */ + +/* + * R3618 (0xE22) - EQ1_19 + */ +#define ARIZONA_EQ1_B5_B_MASK 0xFFFF /* EQ1_B5_B - [15:0] */ +#define ARIZONA_EQ1_B5_B_SHIFT 0 /* EQ1_B5_B - [15:0] */ +#define ARIZONA_EQ1_B5_B_WIDTH 16 /* EQ1_B5_B - [15:0] */ + +/* + * R3619 (0xE23) - EQ1_20 + */ +#define ARIZONA_EQ1_B5_PG_MASK 0xFFFF /* EQ1_B5_PG - [15:0] */ +#define ARIZONA_EQ1_B5_PG_SHIFT 0 /* EQ1_B5_PG - [15:0] */ +#define ARIZONA_EQ1_B5_PG_WIDTH 16 /* EQ1_B5_PG - [15:0] */ + +/* + * R3620 (0xE24) - EQ1_21 + */ +#define ARIZONA_EQ1_B1_C_MASK 0xFFFF /* EQ1_B1_C - [15:0] */ +#define ARIZONA_EQ1_B1_C_SHIFT 0 /* EQ1_B1_C - [15:0] */ +#define ARIZONA_EQ1_B1_C_WIDTH 16 /* EQ1_B1_C - [15:0] */ + +/* + * R3622 (0xE26) - EQ2_1 + */ +#define ARIZONA_EQ2_B1_GAIN_MASK 0xF800 /* EQ2_B1_GAIN - [15:11] */ +#define ARIZONA_EQ2_B1_GAIN_SHIFT 11 /* EQ2_B1_GAIN - [15:11] */ +#define ARIZONA_EQ2_B1_GAIN_WIDTH 5 /* EQ2_B1_GAIN - [15:11] */ +#define ARIZONA_EQ2_B2_GAIN_MASK 0x07C0 /* EQ2_B2_GAIN - [10:6] */ +#define ARIZONA_EQ2_B2_GAIN_SHIFT 6 /* EQ2_B2_GAIN - [10:6] */ +#define ARIZONA_EQ2_B2_GAIN_WIDTH 5 /* EQ2_B2_GAIN - [10:6] */ +#define ARIZONA_EQ2_B3_GAIN_MASK 0x003E /* EQ2_B3_GAIN - [5:1] */ +#define ARIZONA_EQ2_B3_GAIN_SHIFT 1 /* EQ2_B3_GAIN - [5:1] */ +#define ARIZONA_EQ2_B3_GAIN_WIDTH 5 /* EQ2_B3_GAIN - [5:1] */ +#define ARIZONA_EQ2_ENA 0x0001 /* EQ2_ENA */ +#define ARIZONA_EQ2_ENA_MASK 0x0001 /* EQ2_ENA */ +#define ARIZONA_EQ2_ENA_SHIFT 0 /* EQ2_ENA */ +#define ARIZONA_EQ2_ENA_WIDTH 1 /* EQ2_ENA */ + +/* + * R3623 (0xE27) - EQ2_2 + */ +#define ARIZONA_EQ2_B4_GAIN_MASK 0xF800 /* EQ2_B4_GAIN - [15:11] */ +#define ARIZONA_EQ2_B4_GAIN_SHIFT 11 /* EQ2_B4_GAIN - [15:11] */ +#define ARIZONA_EQ2_B4_GAIN_WIDTH 5 /* EQ2_B4_GAIN - [15:11] */ +#define ARIZONA_EQ2_B5_GAIN_MASK 0x07C0 /* EQ2_B5_GAIN - [10:6] */ +#define ARIZONA_EQ2_B5_GAIN_SHIFT 6 /* EQ2_B5_GAIN - [10:6] */ +#define ARIZONA_EQ2_B5_GAIN_WIDTH 5 /* EQ2_B5_GAIN - [10:6] */ +#define ARIZONA_EQ2_B1_MODE 0x0001 /* EQ2_B1_MODE */ +#define ARIZONA_EQ2_B1_MODE_MASK 0x0001 /* EQ2_B1_MODE */ +#define ARIZONA_EQ2_B1_MODE_SHIFT 0 /* EQ2_B1_MODE */ +#define ARIZONA_EQ2_B1_MODE_WIDTH 1 /* EQ2_B1_MODE */ + +/* + * R3624 (0xE28) - EQ2_3 + */ +#define ARIZONA_EQ2_B1_A_MASK 0xFFFF /* EQ2_B1_A - [15:0] */ +#define ARIZONA_EQ2_B1_A_SHIFT 0 /* EQ2_B1_A - [15:0] */ +#define ARIZONA_EQ2_B1_A_WIDTH 16 /* EQ2_B1_A - [15:0] */ + +/* + * R3625 (0xE29) - EQ2_4 + */ +#define ARIZONA_EQ2_B1_B_MASK 0xFFFF /* EQ2_B1_B - [15:0] */ +#define ARIZONA_EQ2_B1_B_SHIFT 0 /* EQ2_B1_B - [15:0] */ +#define ARIZONA_EQ2_B1_B_WIDTH 16 /* EQ2_B1_B - [15:0] */ + +/* + * R3626 (0xE2A) - EQ2_5 + */ +#define ARIZONA_EQ2_B1_PG_MASK 0xFFFF /* EQ2_B1_PG - [15:0] */ +#define ARIZONA_EQ2_B1_PG_SHIFT 0 /* EQ2_B1_PG - [15:0] */ +#define ARIZONA_EQ2_B1_PG_WIDTH 16 /* EQ2_B1_PG - [15:0] */ + +/* + * R3627 (0xE2B) - EQ2_6 + */ +#define ARIZONA_EQ2_B2_A_MASK 0xFFFF /* EQ2_B2_A - [15:0] */ +#define ARIZONA_EQ2_B2_A_SHIFT 0 /* EQ2_B2_A - [15:0] */ +#define ARIZONA_EQ2_B2_A_WIDTH 16 /* EQ2_B2_A - [15:0] */ + +/* + * R3628 (0xE2C) - EQ2_7 + */ +#define ARIZONA_EQ2_B2_B_MASK 0xFFFF /* EQ2_B2_B - [15:0] */ +#define ARIZONA_EQ2_B2_B_SHIFT 0 /* EQ2_B2_B - [15:0] */ +#define ARIZONA_EQ2_B2_B_WIDTH 16 /* EQ2_B2_B - [15:0] */ + +/* + * R3629 (0xE2D) - EQ2_8 + */ +#define ARIZONA_EQ2_B2_C_MASK 0xFFFF /* EQ2_B2_C - [15:0] */ +#define ARIZONA_EQ2_B2_C_SHIFT 0 /* EQ2_B2_C - [15:0] */ +#define ARIZONA_EQ2_B2_C_WIDTH 16 /* EQ2_B2_C - [15:0] */ + +/* + * R3630 (0xE2E) - EQ2_9 + */ +#define ARIZONA_EQ2_B2_PG_MASK 0xFFFF /* EQ2_B2_PG - [15:0] */ +#define ARIZONA_EQ2_B2_PG_SHIFT 0 /* EQ2_B2_PG - [15:0] */ +#define ARIZONA_EQ2_B2_PG_WIDTH 16 /* EQ2_B2_PG - [15:0] */ + +/* + * R3631 (0xE2F) - EQ2_10 + */ +#define ARIZONA_EQ2_B3_A_MASK 0xFFFF /* EQ2_B3_A - [15:0] */ +#define ARIZONA_EQ2_B3_A_SHIFT 0 /* EQ2_B3_A - [15:0] */ +#define ARIZONA_EQ2_B3_A_WIDTH 16 /* EQ2_B3_A - [15:0] */ + +/* + * R3632 (0xE30) - EQ2_11 + */ +#define ARIZONA_EQ2_B3_B_MASK 0xFFFF /* EQ2_B3_B - [15:0] */ +#define ARIZONA_EQ2_B3_B_SHIFT 0 /* EQ2_B3_B - [15:0] */ +#define ARIZONA_EQ2_B3_B_WIDTH 16 /* EQ2_B3_B - [15:0] */ + +/* + * R3633 (0xE31) - EQ2_12 + */ +#define ARIZONA_EQ2_B3_C_MASK 0xFFFF /* EQ2_B3_C - [15:0] */ +#define ARIZONA_EQ2_B3_C_SHIFT 0 /* EQ2_B3_C - [15:0] */ +#define ARIZONA_EQ2_B3_C_WIDTH 16 /* EQ2_B3_C - [15:0] */ + +/* + * R3634 (0xE32) - EQ2_13 + */ +#define ARIZONA_EQ2_B3_PG_MASK 0xFFFF /* EQ2_B3_PG - [15:0] */ +#define ARIZONA_EQ2_B3_PG_SHIFT 0 /* EQ2_B3_PG - [15:0] */ +#define ARIZONA_EQ2_B3_PG_WIDTH 16 /* EQ2_B3_PG - [15:0] */ + +/* + * R3635 (0xE33) - EQ2_14 + */ +#define ARIZONA_EQ2_B4_A_MASK 0xFFFF /* EQ2_B4_A - [15:0] */ +#define ARIZONA_EQ2_B4_A_SHIFT 0 /* EQ2_B4_A - [15:0] */ +#define ARIZONA_EQ2_B4_A_WIDTH 16 /* EQ2_B4_A - [15:0] */ + +/* + * R3636 (0xE34) - EQ2_15 + */ +#define ARIZONA_EQ2_B4_B_MASK 0xFFFF /* EQ2_B4_B - [15:0] */ +#define ARIZONA_EQ2_B4_B_SHIFT 0 /* EQ2_B4_B - [15:0] */ +#define ARIZONA_EQ2_B4_B_WIDTH 16 /* EQ2_B4_B - [15:0] */ + +/* + * R3637 (0xE35) - EQ2_16 + */ +#define ARIZONA_EQ2_B4_C_MASK 0xFFFF /* EQ2_B4_C - [15:0] */ +#define ARIZONA_EQ2_B4_C_SHIFT 0 /* EQ2_B4_C - [15:0] */ +#define ARIZONA_EQ2_B4_C_WIDTH 16 /* EQ2_B4_C - [15:0] */ + +/* + * R3638 (0xE36) - EQ2_17 + */ +#define ARIZONA_EQ2_B4_PG_MASK 0xFFFF /* EQ2_B4_PG - [15:0] */ +#define ARIZONA_EQ2_B4_PG_SHIFT 0 /* EQ2_B4_PG - [15:0] */ +#define ARIZONA_EQ2_B4_PG_WIDTH 16 /* EQ2_B4_PG - [15:0] */ + +/* + * R3639 (0xE37) - EQ2_18 + */ +#define ARIZONA_EQ2_B5_A_MASK 0xFFFF /* EQ2_B5_A - [15:0] */ +#define ARIZONA_EQ2_B5_A_SHIFT 0 /* EQ2_B5_A - [15:0] */ +#define ARIZONA_EQ2_B5_A_WIDTH 16 /* EQ2_B5_A - [15:0] */ + +/* + * R3640 (0xE38) - EQ2_19 + */ +#define ARIZONA_EQ2_B5_B_MASK 0xFFFF /* EQ2_B5_B - [15:0] */ +#define ARIZONA_EQ2_B5_B_SHIFT 0 /* EQ2_B5_B - [15:0] */ +#define ARIZONA_EQ2_B5_B_WIDTH 16 /* EQ2_B5_B - [15:0] */ + +/* + * R3641 (0xE39) - EQ2_20 + */ +#define ARIZONA_EQ2_B5_PG_MASK 0xFFFF /* EQ2_B5_PG - [15:0] */ +#define ARIZONA_EQ2_B5_PG_SHIFT 0 /* EQ2_B5_PG - [15:0] */ +#define ARIZONA_EQ2_B5_PG_WIDTH 16 /* EQ2_B5_PG - [15:0] */ + +/* + * R3642 (0xE3A) - EQ2_21 + */ +#define ARIZONA_EQ2_B1_C_MASK 0xFFFF /* EQ2_B1_C - [15:0] */ +#define ARIZONA_EQ2_B1_C_SHIFT 0 /* EQ2_B1_C - [15:0] */ +#define ARIZONA_EQ2_B1_C_WIDTH 16 /* EQ2_B1_C - [15:0] */ + +/* + * R3644 (0xE3C) - EQ3_1 + */ +#define ARIZONA_EQ3_B1_GAIN_MASK 0xF800 /* EQ3_B1_GAIN - [15:11] */ +#define ARIZONA_EQ3_B1_GAIN_SHIFT 11 /* EQ3_B1_GAIN - [15:11] */ +#define ARIZONA_EQ3_B1_GAIN_WIDTH 5 /* EQ3_B1_GAIN - [15:11] */ +#define ARIZONA_EQ3_B2_GAIN_MASK 0x07C0 /* EQ3_B2_GAIN - [10:6] */ +#define ARIZONA_EQ3_B2_GAIN_SHIFT 6 /* EQ3_B2_GAIN - [10:6] */ +#define ARIZONA_EQ3_B2_GAIN_WIDTH 5 /* EQ3_B2_GAIN - [10:6] */ +#define ARIZONA_EQ3_B3_GAIN_MASK 0x003E /* EQ3_B3_GAIN - [5:1] */ +#define ARIZONA_EQ3_B3_GAIN_SHIFT 1 /* EQ3_B3_GAIN - [5:1] */ +#define ARIZONA_EQ3_B3_GAIN_WIDTH 5 /* EQ3_B3_GAIN - [5:1] */ +#define ARIZONA_EQ3_ENA 0x0001 /* EQ3_ENA */ +#define ARIZONA_EQ3_ENA_MASK 0x0001 /* EQ3_ENA */ +#define ARIZONA_EQ3_ENA_SHIFT 0 /* EQ3_ENA */ +#define ARIZONA_EQ3_ENA_WIDTH 1 /* EQ3_ENA */ + +/* + * R3645 (0xE3D) - EQ3_2 + */ +#define ARIZONA_EQ3_B4_GAIN_MASK 0xF800 /* EQ3_B4_GAIN - [15:11] */ +#define ARIZONA_EQ3_B4_GAIN_SHIFT 11 /* EQ3_B4_GAIN - [15:11] */ +#define ARIZONA_EQ3_B4_GAIN_WIDTH 5 /* EQ3_B4_GAIN - [15:11] */ +#define ARIZONA_EQ3_B5_GAIN_MASK 0x07C0 /* EQ3_B5_GAIN - [10:6] */ +#define ARIZONA_EQ3_B5_GAIN_SHIFT 6 /* EQ3_B5_GAIN - [10:6] */ +#define ARIZONA_EQ3_B5_GAIN_WIDTH 5 /* EQ3_B5_GAIN - [10:6] */ +#define ARIZONA_EQ3_B1_MODE 0x0001 /* EQ3_B1_MODE */ +#define ARIZONA_EQ3_B1_MODE_MASK 0x0001 /* EQ3_B1_MODE */ +#define ARIZONA_EQ3_B1_MODE_SHIFT 0 /* EQ3_B1_MODE */ +#define ARIZONA_EQ3_B1_MODE_WIDTH 1 /* EQ3_B1_MODE */ + +/* + * R3646 (0xE3E) - EQ3_3 + */ +#define ARIZONA_EQ3_B1_A_MASK 0xFFFF /* EQ3_B1_A - [15:0] */ +#define ARIZONA_EQ3_B1_A_SHIFT 0 /* EQ3_B1_A - [15:0] */ +#define ARIZONA_EQ3_B1_A_WIDTH 16 /* EQ3_B1_A - [15:0] */ + +/* + * R3647 (0xE3F) - EQ3_4 + */ +#define ARIZONA_EQ3_B1_B_MASK 0xFFFF /* EQ3_B1_B - [15:0] */ +#define ARIZONA_EQ3_B1_B_SHIFT 0 /* EQ3_B1_B - [15:0] */ +#define ARIZONA_EQ3_B1_B_WIDTH 16 /* EQ3_B1_B - [15:0] */ + +/* + * R3648 (0xE40) - EQ3_5 + */ +#define ARIZONA_EQ3_B1_PG_MASK 0xFFFF /* EQ3_B1_PG - [15:0] */ +#define ARIZONA_EQ3_B1_PG_SHIFT 0 /* EQ3_B1_PG - [15:0] */ +#define ARIZONA_EQ3_B1_PG_WIDTH 16 /* EQ3_B1_PG - [15:0] */ + +/* + * R3649 (0xE41) - EQ3_6 + */ +#define ARIZONA_EQ3_B2_A_MASK 0xFFFF /* EQ3_B2_A - [15:0] */ +#define ARIZONA_EQ3_B2_A_SHIFT 0 /* EQ3_B2_A - [15:0] */ +#define ARIZONA_EQ3_B2_A_WIDTH 16 /* EQ3_B2_A - [15:0] */ + +/* + * R3650 (0xE42) - EQ3_7 + */ +#define ARIZONA_EQ3_B2_B_MASK 0xFFFF /* EQ3_B2_B - [15:0] */ +#define ARIZONA_EQ3_B2_B_SHIFT 0 /* EQ3_B2_B - [15:0] */ +#define ARIZONA_EQ3_B2_B_WIDTH 16 /* EQ3_B2_B - [15:0] */ + +/* + * R3651 (0xE43) - EQ3_8 + */ +#define ARIZONA_EQ3_B2_C_MASK 0xFFFF /* EQ3_B2_C - [15:0] */ +#define ARIZONA_EQ3_B2_C_SHIFT 0 /* EQ3_B2_C - [15:0] */ +#define ARIZONA_EQ3_B2_C_WIDTH 16 /* EQ3_B2_C - [15:0] */ + +/* + * R3652 (0xE44) - EQ3_9 + */ +#define ARIZONA_EQ3_B2_PG_MASK 0xFFFF /* EQ3_B2_PG - [15:0] */ +#define ARIZONA_EQ3_B2_PG_SHIFT 0 /* EQ3_B2_PG - [15:0] */ +#define ARIZONA_EQ3_B2_PG_WIDTH 16 /* EQ3_B2_PG - [15:0] */ + +/* + * R3653 (0xE45) - EQ3_10 + */ +#define ARIZONA_EQ3_B3_A_MASK 0xFFFF /* EQ3_B3_A - [15:0] */ +#define ARIZONA_EQ3_B3_A_SHIFT 0 /* EQ3_B3_A - [15:0] */ +#define ARIZONA_EQ3_B3_A_WIDTH 16 /* EQ3_B3_A - [15:0] */ + +/* + * R3654 (0xE46) - EQ3_11 + */ +#define ARIZONA_EQ3_B3_B_MASK 0xFFFF /* EQ3_B3_B - [15:0] */ +#define ARIZONA_EQ3_B3_B_SHIFT 0 /* EQ3_B3_B - [15:0] */ +#define ARIZONA_EQ3_B3_B_WIDTH 16 /* EQ3_B3_B - [15:0] */ + +/* + * R3655 (0xE47) - EQ3_12 + */ +#define ARIZONA_EQ3_B3_C_MASK 0xFFFF /* EQ3_B3_C - [15:0] */ +#define ARIZONA_EQ3_B3_C_SHIFT 0 /* EQ3_B3_C - [15:0] */ +#define ARIZONA_EQ3_B3_C_WIDTH 16 /* EQ3_B3_C - [15:0] */ + +/* + * R3656 (0xE48) - EQ3_13 + */ +#define ARIZONA_EQ3_B3_PG_MASK 0xFFFF /* EQ3_B3_PG - [15:0] */ +#define ARIZONA_EQ3_B3_PG_SHIFT 0 /* EQ3_B3_PG - [15:0] */ +#define ARIZONA_EQ3_B3_PG_WIDTH 16 /* EQ3_B3_PG - [15:0] */ + +/* + * R3657 (0xE49) - EQ3_14 + */ +#define ARIZONA_EQ3_B4_A_MASK 0xFFFF /* EQ3_B4_A - [15:0] */ +#define ARIZONA_EQ3_B4_A_SHIFT 0 /* EQ3_B4_A - [15:0] */ +#define ARIZONA_EQ3_B4_A_WIDTH 16 /* EQ3_B4_A - [15:0] */ + +/* + * R3658 (0xE4A) - EQ3_15 + */ +#define ARIZONA_EQ3_B4_B_MASK 0xFFFF /* EQ3_B4_B - [15:0] */ +#define ARIZONA_EQ3_B4_B_SHIFT 0 /* EQ3_B4_B - [15:0] */ +#define ARIZONA_EQ3_B4_B_WIDTH 16 /* EQ3_B4_B - [15:0] */ + +/* + * R3659 (0xE4B) - EQ3_16 + */ +#define ARIZONA_EQ3_B4_C_MASK 0xFFFF /* EQ3_B4_C - [15:0] */ +#define ARIZONA_EQ3_B4_C_SHIFT 0 /* EQ3_B4_C - [15:0] */ +#define ARIZONA_EQ3_B4_C_WIDTH 16 /* EQ3_B4_C - [15:0] */ + +/* + * R3660 (0xE4C) - EQ3_17 + */ +#define ARIZONA_EQ3_B4_PG_MASK 0xFFFF /* EQ3_B4_PG - [15:0] */ +#define ARIZONA_EQ3_B4_PG_SHIFT 0 /* EQ3_B4_PG - [15:0] */ +#define ARIZONA_EQ3_B4_PG_WIDTH 16 /* EQ3_B4_PG - [15:0] */ + +/* + * R3661 (0xE4D) - EQ3_18 + */ +#define ARIZONA_EQ3_B5_A_MASK 0xFFFF /* EQ3_B5_A - [15:0] */ +#define ARIZONA_EQ3_B5_A_SHIFT 0 /* EQ3_B5_A - [15:0] */ +#define ARIZONA_EQ3_B5_A_WIDTH 16 /* EQ3_B5_A - [15:0] */ + +/* + * R3662 (0xE4E) - EQ3_19 + */ +#define ARIZONA_EQ3_B5_B_MASK 0xFFFF /* EQ3_B5_B - [15:0] */ +#define ARIZONA_EQ3_B5_B_SHIFT 0 /* EQ3_B5_B - [15:0] */ +#define ARIZONA_EQ3_B5_B_WIDTH 16 /* EQ3_B5_B - [15:0] */ + +/* + * R3663 (0xE4F) - EQ3_20 + */ +#define ARIZONA_EQ3_B5_PG_MASK 0xFFFF /* EQ3_B5_PG - [15:0] */ +#define ARIZONA_EQ3_B5_PG_SHIFT 0 /* EQ3_B5_PG - [15:0] */ +#define ARIZONA_EQ3_B5_PG_WIDTH 16 /* EQ3_B5_PG - [15:0] */ + +/* + * R3664 (0xE50) - EQ3_21 + */ +#define ARIZONA_EQ3_B1_C_MASK 0xFFFF /* EQ3_B1_C - [15:0] */ +#define ARIZONA_EQ3_B1_C_SHIFT 0 /* EQ3_B1_C - [15:0] */ +#define ARIZONA_EQ3_B1_C_WIDTH 16 /* EQ3_B1_C - [15:0] */ + +/* + * R3666 (0xE52) - EQ4_1 + */ +#define ARIZONA_EQ4_B1_GAIN_MASK 0xF800 /* EQ4_B1_GAIN - [15:11] */ +#define ARIZONA_EQ4_B1_GAIN_SHIFT 11 /* EQ4_B1_GAIN - [15:11] */ +#define ARIZONA_EQ4_B1_GAIN_WIDTH 5 /* EQ4_B1_GAIN - [15:11] */ +#define ARIZONA_EQ4_B2_GAIN_MASK 0x07C0 /* EQ4_B2_GAIN - [10:6] */ +#define ARIZONA_EQ4_B2_GAIN_SHIFT 6 /* EQ4_B2_GAIN - [10:6] */ +#define ARIZONA_EQ4_B2_GAIN_WIDTH 5 /* EQ4_B2_GAIN - [10:6] */ +#define ARIZONA_EQ4_B3_GAIN_MASK 0x003E /* EQ4_B3_GAIN - [5:1] */ +#define ARIZONA_EQ4_B3_GAIN_SHIFT 1 /* EQ4_B3_GAIN - [5:1] */ +#define ARIZONA_EQ4_B3_GAIN_WIDTH 5 /* EQ4_B3_GAIN - [5:1] */ +#define ARIZONA_EQ4_ENA 0x0001 /* EQ4_ENA */ +#define ARIZONA_EQ4_ENA_MASK 0x0001 /* EQ4_ENA */ +#define ARIZONA_EQ4_ENA_SHIFT 0 /* EQ4_ENA */ +#define ARIZONA_EQ4_ENA_WIDTH 1 /* EQ4_ENA */ + +/* + * R3667 (0xE53) - EQ4_2 + */ +#define ARIZONA_EQ4_B4_GAIN_MASK 0xF800 /* EQ4_B4_GAIN - [15:11] */ +#define ARIZONA_EQ4_B4_GAIN_SHIFT 11 /* EQ4_B4_GAIN - [15:11] */ +#define ARIZONA_EQ4_B4_GAIN_WIDTH 5 /* EQ4_B4_GAIN - [15:11] */ +#define ARIZONA_EQ4_B5_GAIN_MASK 0x07C0 /* EQ4_B5_GAIN - [10:6] */ +#define ARIZONA_EQ4_B5_GAIN_SHIFT 6 /* EQ4_B5_GAIN - [10:6] */ +#define ARIZONA_EQ4_B5_GAIN_WIDTH 5 /* EQ4_B5_GAIN - [10:6] */ +#define ARIZONA_EQ4_B1_MODE 0x0001 /* EQ4_B1_MODE */ +#define ARIZONA_EQ4_B1_MODE_MASK 0x0001 /* EQ4_B1_MODE */ +#define ARIZONA_EQ4_B1_MODE_SHIFT 0 /* EQ4_B1_MODE */ +#define ARIZONA_EQ4_B1_MODE_WIDTH 1 /* EQ4_B1_MODE */ + +/* + * R3668 (0xE54) - EQ4_3 + */ +#define ARIZONA_EQ4_B1_A_MASK 0xFFFF /* EQ4_B1_A - [15:0] */ +#define ARIZONA_EQ4_B1_A_SHIFT 0 /* EQ4_B1_A - [15:0] */ +#define ARIZONA_EQ4_B1_A_WIDTH 16 /* EQ4_B1_A - [15:0] */ + +/* + * R3669 (0xE55) - EQ4_4 + */ +#define ARIZONA_EQ4_B1_B_MASK 0xFFFF /* EQ4_B1_B - [15:0] */ +#define ARIZONA_EQ4_B1_B_SHIFT 0 /* EQ4_B1_B - [15:0] */ +#define ARIZONA_EQ4_B1_B_WIDTH 16 /* EQ4_B1_B - [15:0] */ + +/* + * R3670 (0xE56) - EQ4_5 + */ +#define ARIZONA_EQ4_B1_PG_MASK 0xFFFF /* EQ4_B1_PG - [15:0] */ +#define ARIZONA_EQ4_B1_PG_SHIFT 0 /* EQ4_B1_PG - [15:0] */ +#define ARIZONA_EQ4_B1_PG_WIDTH 16 /* EQ4_B1_PG - [15:0] */ + +/* + * R3671 (0xE57) - EQ4_6 + */ +#define ARIZONA_EQ4_B2_A_MASK 0xFFFF /* EQ4_B2_A - [15:0] */ +#define ARIZONA_EQ4_B2_A_SHIFT 0 /* EQ4_B2_A - [15:0] */ +#define ARIZONA_EQ4_B2_A_WIDTH 16 /* EQ4_B2_A - [15:0] */ + +/* + * R3672 (0xE58) - EQ4_7 + */ +#define ARIZONA_EQ4_B2_B_MASK 0xFFFF /* EQ4_B2_B - [15:0] */ +#define ARIZONA_EQ4_B2_B_SHIFT 0 /* EQ4_B2_B - [15:0] */ +#define ARIZONA_EQ4_B2_B_WIDTH 16 /* EQ4_B2_B - [15:0] */ + +/* + * R3673 (0xE59) - EQ4_8 + */ +#define ARIZONA_EQ4_B2_C_MASK 0xFFFF /* EQ4_B2_C - [15:0] */ +#define ARIZONA_EQ4_B2_C_SHIFT 0 /* EQ4_B2_C - [15:0] */ +#define ARIZONA_EQ4_B2_C_WIDTH 16 /* EQ4_B2_C - [15:0] */ + +/* + * R3674 (0xE5A) - EQ4_9 + */ +#define ARIZONA_EQ4_B2_PG_MASK 0xFFFF /* EQ4_B2_PG - [15:0] */ +#define ARIZONA_EQ4_B2_PG_SHIFT 0 /* EQ4_B2_PG - [15:0] */ +#define ARIZONA_EQ4_B2_PG_WIDTH 16 /* EQ4_B2_PG - [15:0] */ + +/* + * R3675 (0xE5B) - EQ4_10 + */ +#define ARIZONA_EQ4_B3_A_MASK 0xFFFF /* EQ4_B3_A - [15:0] */ +#define ARIZONA_EQ4_B3_A_SHIFT 0 /* EQ4_B3_A - [15:0] */ +#define ARIZONA_EQ4_B3_A_WIDTH 16 /* EQ4_B3_A - [15:0] */ + +/* + * R3676 (0xE5C) - EQ4_11 + */ +#define ARIZONA_EQ4_B3_B_MASK 0xFFFF /* EQ4_B3_B - [15:0] */ +#define ARIZONA_EQ4_B3_B_SHIFT 0 /* EQ4_B3_B - [15:0] */ +#define ARIZONA_EQ4_B3_B_WIDTH 16 /* EQ4_B3_B - [15:0] */ + +/* + * R3677 (0xE5D) - EQ4_12 + */ +#define ARIZONA_EQ4_B3_C_MASK 0xFFFF /* EQ4_B3_C - [15:0] */ +#define ARIZONA_EQ4_B3_C_SHIFT 0 /* EQ4_B3_C - [15:0] */ +#define ARIZONA_EQ4_B3_C_WIDTH 16 /* EQ4_B3_C - [15:0] */ + +/* + * R3678 (0xE5E) - EQ4_13 + */ +#define ARIZONA_EQ4_B3_PG_MASK 0xFFFF /* EQ4_B3_PG - [15:0] */ +#define ARIZONA_EQ4_B3_PG_SHIFT 0 /* EQ4_B3_PG - [15:0] */ +#define ARIZONA_EQ4_B3_PG_WIDTH 16 /* EQ4_B3_PG - [15:0] */ + +/* + * R3679 (0xE5F) - EQ4_14 + */ +#define ARIZONA_EQ4_B4_A_MASK 0xFFFF /* EQ4_B4_A - [15:0] */ +#define ARIZONA_EQ4_B4_A_SHIFT 0 /* EQ4_B4_A - [15:0] */ +#define ARIZONA_EQ4_B4_A_WIDTH 16 /* EQ4_B4_A - [15:0] */ + +/* + * R3680 (0xE60) - EQ4_15 + */ +#define ARIZONA_EQ4_B4_B_MASK 0xFFFF /* EQ4_B4_B - [15:0] */ +#define ARIZONA_EQ4_B4_B_SHIFT 0 /* EQ4_B4_B - [15:0] */ +#define ARIZONA_EQ4_B4_B_WIDTH 16 /* EQ4_B4_B - [15:0] */ + +/* + * R3681 (0xE61) - EQ4_16 + */ +#define ARIZONA_EQ4_B4_C_MASK 0xFFFF /* EQ4_B4_C - [15:0] */ +#define ARIZONA_EQ4_B4_C_SHIFT 0 /* EQ4_B4_C - [15:0] */ +#define ARIZONA_EQ4_B4_C_WIDTH 16 /* EQ4_B4_C - [15:0] */ + +/* + * R3682 (0xE62) - EQ4_17 + */ +#define ARIZONA_EQ4_B4_PG_MASK 0xFFFF /* EQ4_B4_PG - [15:0] */ +#define ARIZONA_EQ4_B4_PG_SHIFT 0 /* EQ4_B4_PG - [15:0] */ +#define ARIZONA_EQ4_B4_PG_WIDTH 16 /* EQ4_B4_PG - [15:0] */ + +/* + * R3683 (0xE63) - EQ4_18 + */ +#define ARIZONA_EQ4_B5_A_MASK 0xFFFF /* EQ4_B5_A - [15:0] */ +#define ARIZONA_EQ4_B5_A_SHIFT 0 /* EQ4_B5_A - [15:0] */ +#define ARIZONA_EQ4_B5_A_WIDTH 16 /* EQ4_B5_A - [15:0] */ + +/* + * R3684 (0xE64) - EQ4_19 + */ +#define ARIZONA_EQ4_B5_B_MASK 0xFFFF /* EQ4_B5_B - [15:0] */ +#define ARIZONA_EQ4_B5_B_SHIFT 0 /* EQ4_B5_B - [15:0] */ +#define ARIZONA_EQ4_B5_B_WIDTH 16 /* EQ4_B5_B - [15:0] */ + +/* + * R3685 (0xE65) - EQ4_20 + */ +#define ARIZONA_EQ4_B5_PG_MASK 0xFFFF /* EQ4_B5_PG - [15:0] */ +#define ARIZONA_EQ4_B5_PG_SHIFT 0 /* EQ4_B5_PG - [15:0] */ +#define ARIZONA_EQ4_B5_PG_WIDTH 16 /* EQ4_B5_PG - [15:0] */ + +/* + * R3686 (0xE66) - EQ4_21 + */ +#define ARIZONA_EQ4_B1_C_MASK 0xFFFF /* EQ4_B1_C - [15:0] */ +#define ARIZONA_EQ4_B1_C_SHIFT 0 /* EQ4_B1_C - [15:0] */ +#define ARIZONA_EQ4_B1_C_WIDTH 16 /* EQ4_B1_C - [15:0] */ + +/* + * R3712 (0xE80) - DRC1 ctrl1 + */ +#define ARIZONA_DRC1_SIG_DET_RMS_MASK 0xF800 /* DRC1_SIG_DET_RMS - [15:11] */ +#define ARIZONA_DRC1_SIG_DET_RMS_SHIFT 11 /* DRC1_SIG_DET_RMS - [15:11] */ +#define ARIZONA_DRC1_SIG_DET_RMS_WIDTH 5 /* DRC1_SIG_DET_RMS - [15:11] */ +#define ARIZONA_DRC1_SIG_DET_PK_MASK 0x0600 /* DRC1_SIG_DET_PK - [10:9] */ +#define ARIZONA_DRC1_SIG_DET_PK_SHIFT 9 /* DRC1_SIG_DET_PK - [10:9] */ +#define ARIZONA_DRC1_SIG_DET_PK_WIDTH 2 /* DRC1_SIG_DET_PK - [10:9] */ +#define ARIZONA_DRC1_NG_ENA 0x0100 /* DRC1_NG_ENA */ +#define ARIZONA_DRC1_NG_ENA_MASK 0x0100 /* DRC1_NG_ENA */ +#define ARIZONA_DRC1_NG_ENA_SHIFT 8 /* DRC1_NG_ENA */ +#define ARIZONA_DRC1_NG_ENA_WIDTH 1 /* DRC1_NG_ENA */ +#define ARIZONA_DRC1_SIG_DET_MODE 0x0080 /* DRC1_SIG_DET_MODE */ +#define ARIZONA_DRC1_SIG_DET_MODE_MASK 0x0080 /* DRC1_SIG_DET_MODE */ +#define ARIZONA_DRC1_SIG_DET_MODE_SHIFT 7 /* DRC1_SIG_DET_MODE */ +#define ARIZONA_DRC1_SIG_DET_MODE_WIDTH 1 /* DRC1_SIG_DET_MODE */ +#define ARIZONA_DRC1_SIG_DET 0x0040 /* DRC1_SIG_DET */ +#define ARIZONA_DRC1_SIG_DET_MASK 0x0040 /* DRC1_SIG_DET */ +#define ARIZONA_DRC1_SIG_DET_SHIFT 6 /* DRC1_SIG_DET */ +#define ARIZONA_DRC1_SIG_DET_WIDTH 1 /* DRC1_SIG_DET */ +#define ARIZONA_DRC1_KNEE2_OP_ENA 0x0020 /* DRC1_KNEE2_OP_ENA */ +#define ARIZONA_DRC1_KNEE2_OP_ENA_MASK 0x0020 /* DRC1_KNEE2_OP_ENA */ +#define ARIZONA_DRC1_KNEE2_OP_ENA_SHIFT 5 /* DRC1_KNEE2_OP_ENA */ +#define ARIZONA_DRC1_KNEE2_OP_ENA_WIDTH 1 /* DRC1_KNEE2_OP_ENA */ +#define ARIZONA_DRC1_QR 0x0010 /* DRC1_QR */ +#define ARIZONA_DRC1_QR_MASK 0x0010 /* DRC1_QR */ +#define ARIZONA_DRC1_QR_SHIFT 4 /* DRC1_QR */ +#define ARIZONA_DRC1_QR_WIDTH 1 /* DRC1_QR */ +#define ARIZONA_DRC1_ANTICLIP 0x0008 /* DRC1_ANTICLIP */ +#define ARIZONA_DRC1_ANTICLIP_MASK 0x0008 /* DRC1_ANTICLIP */ +#define ARIZONA_DRC1_ANTICLIP_SHIFT 3 /* DRC1_ANTICLIP */ +#define ARIZONA_DRC1_ANTICLIP_WIDTH 1 /* DRC1_ANTICLIP */ +#define ARIZONA_DRC1L_ENA 0x0002 /* DRC1L_ENA */ +#define ARIZONA_DRC1L_ENA_MASK 0x0002 /* DRC1L_ENA */ +#define ARIZONA_DRC1L_ENA_SHIFT 1 /* DRC1L_ENA */ +#define ARIZONA_DRC1L_ENA_WIDTH 1 /* DRC1L_ENA */ +#define ARIZONA_DRC1R_ENA 0x0001 /* DRC1R_ENA */ +#define ARIZONA_DRC1R_ENA_MASK 0x0001 /* DRC1R_ENA */ +#define ARIZONA_DRC1R_ENA_SHIFT 0 /* DRC1R_ENA */ +#define ARIZONA_DRC1R_ENA_WIDTH 1 /* DRC1R_ENA */ + +/* + * R3713 (0xE81) - DRC1 ctrl2 + */ +#define ARIZONA_DRC1_ATK_MASK 0x1E00 /* DRC1_ATK - [12:9] */ +#define ARIZONA_DRC1_ATK_SHIFT 9 /* DRC1_ATK - [12:9] */ +#define ARIZONA_DRC1_ATK_WIDTH 4 /* DRC1_ATK - [12:9] */ +#define ARIZONA_DRC1_DCY_MASK 0x01E0 /* DRC1_DCY - [8:5] */ +#define ARIZONA_DRC1_DCY_SHIFT 5 /* DRC1_DCY - [8:5] */ +#define ARIZONA_DRC1_DCY_WIDTH 4 /* DRC1_DCY - [8:5] */ +#define ARIZONA_DRC1_MINGAIN_MASK 0x001C /* DRC1_MINGAIN - [4:2] */ +#define ARIZONA_DRC1_MINGAIN_SHIFT 2 /* DRC1_MINGAIN - [4:2] */ +#define ARIZONA_DRC1_MINGAIN_WIDTH 3 /* DRC1_MINGAIN - [4:2] */ +#define ARIZONA_DRC1_MAXGAIN_MASK 0x0003 /* DRC1_MAXGAIN - [1:0] */ +#define ARIZONA_DRC1_MAXGAIN_SHIFT 0 /* DRC1_MAXGAIN - [1:0] */ +#define ARIZONA_DRC1_MAXGAIN_WIDTH 2 /* DRC1_MAXGAIN - [1:0] */ + +/* + * R3714 (0xE82) - DRC1 ctrl3 + */ +#define ARIZONA_DRC1_NG_MINGAIN_MASK 0xF000 /* DRC1_NG_MINGAIN - [15:12] */ +#define ARIZONA_DRC1_NG_MINGAIN_SHIFT 12 /* DRC1_NG_MINGAIN - [15:12] */ +#define ARIZONA_DRC1_NG_MINGAIN_WIDTH 4 /* DRC1_NG_MINGAIN - [15:12] */ +#define ARIZONA_DRC1_NG_EXP_MASK 0x0C00 /* DRC1_NG_EXP - [11:10] */ +#define ARIZONA_DRC1_NG_EXP_SHIFT 10 /* DRC1_NG_EXP - [11:10] */ +#define ARIZONA_DRC1_NG_EXP_WIDTH 2 /* DRC1_NG_EXP - [11:10] */ +#define ARIZONA_DRC1_QR_THR_MASK 0x0300 /* DRC1_QR_THR - [9:8] */ +#define ARIZONA_DRC1_QR_THR_SHIFT 8 /* DRC1_QR_THR - [9:8] */ +#define ARIZONA_DRC1_QR_THR_WIDTH 2 /* DRC1_QR_THR - [9:8] */ +#define ARIZONA_DRC1_QR_DCY_MASK 0x00C0 /* DRC1_QR_DCY - [7:6] */ +#define ARIZONA_DRC1_QR_DCY_SHIFT 6 /* DRC1_QR_DCY - [7:6] */ +#define ARIZONA_DRC1_QR_DCY_WIDTH 2 /* DRC1_QR_DCY - [7:6] */ +#define ARIZONA_DRC1_HI_COMP_MASK 0x0038 /* DRC1_HI_COMP - [5:3] */ +#define ARIZONA_DRC1_HI_COMP_SHIFT 3 /* DRC1_HI_COMP - [5:3] */ +#define ARIZONA_DRC1_HI_COMP_WIDTH 3 /* DRC1_HI_COMP - [5:3] */ +#define ARIZONA_DRC1_LO_COMP_MASK 0x0007 /* DRC1_LO_COMP - [2:0] */ +#define ARIZONA_DRC1_LO_COMP_SHIFT 0 /* DRC1_LO_COMP - [2:0] */ +#define ARIZONA_DRC1_LO_COMP_WIDTH 3 /* DRC1_LO_COMP - [2:0] */ + +/* + * R3715 (0xE83) - DRC1 ctrl4 + */ +#define ARIZONA_DRC1_KNEE_IP_MASK 0x07E0 /* DRC1_KNEE_IP - [10:5] */ +#define ARIZONA_DRC1_KNEE_IP_SHIFT 5 /* DRC1_KNEE_IP - [10:5] */ +#define ARIZONA_DRC1_KNEE_IP_WIDTH 6 /* DRC1_KNEE_IP - [10:5] */ +#define ARIZONA_DRC1_KNEE_OP_MASK 0x001F /* DRC1_KNEE_OP - [4:0] */ +#define ARIZONA_DRC1_KNEE_OP_SHIFT 0 /* DRC1_KNEE_OP - [4:0] */ +#define ARIZONA_DRC1_KNEE_OP_WIDTH 5 /* DRC1_KNEE_OP - [4:0] */ + +/* + * R3716 (0xE84) - DRC1 ctrl5 + */ +#define ARIZONA_DRC1_KNEE2_IP_MASK 0x03E0 /* DRC1_KNEE2_IP - [9:5] */ +#define ARIZONA_DRC1_KNEE2_IP_SHIFT 5 /* DRC1_KNEE2_IP - [9:5] */ +#define ARIZONA_DRC1_KNEE2_IP_WIDTH 5 /* DRC1_KNEE2_IP - [9:5] */ +#define ARIZONA_DRC1_KNEE2_OP_MASK 0x001F /* DRC1_KNEE2_OP - [4:0] */ +#define ARIZONA_DRC1_KNEE2_OP_SHIFT 0 /* DRC1_KNEE2_OP - [4:0] */ +#define ARIZONA_DRC1_KNEE2_OP_WIDTH 5 /* DRC1_KNEE2_OP - [4:0] */ + +/* + * R3721 (0xE89) - DRC2 ctrl1 + */ +#define ARIZONA_DRC2_SIG_DET_RMS_MASK 0xF800 /* DRC2_SIG_DET_RMS - [15:11] */ +#define ARIZONA_DRC2_SIG_DET_RMS_SHIFT 11 /* DRC2_SIG_DET_RMS - [15:11] */ +#define ARIZONA_DRC2_SIG_DET_RMS_WIDTH 5 /* DRC2_SIG_DET_RMS - [15:11] */ +#define ARIZONA_DRC2_SIG_DET_PK_MASK 0x0600 /* DRC2_SIG_DET_PK - [10:9] */ +#define ARIZONA_DRC2_SIG_DET_PK_SHIFT 9 /* DRC2_SIG_DET_PK - [10:9] */ +#define ARIZONA_DRC2_SIG_DET_PK_WIDTH 2 /* DRC2_SIG_DET_PK - [10:9] */ +#define ARIZONA_DRC2_NG_ENA 0x0100 /* DRC2_NG_ENA */ +#define ARIZONA_DRC2_NG_ENA_MASK 0x0100 /* DRC2_NG_ENA */ +#define ARIZONA_DRC2_NG_ENA_SHIFT 8 /* DRC2_NG_ENA */ +#define ARIZONA_DRC2_NG_ENA_WIDTH 1 /* DRC2_NG_ENA */ +#define ARIZONA_DRC2_SIG_DET_MODE 0x0080 /* DRC2_SIG_DET_MODE */ +#define ARIZONA_DRC2_SIG_DET_MODE_MASK 0x0080 /* DRC2_SIG_DET_MODE */ +#define ARIZONA_DRC2_SIG_DET_MODE_SHIFT 7 /* DRC2_SIG_DET_MODE */ +#define ARIZONA_DRC2_SIG_DET_MODE_WIDTH 1 /* DRC2_SIG_DET_MODE */ +#define ARIZONA_DRC2_SIG_DET 0x0040 /* DRC2_SIG_DET */ +#define ARIZONA_DRC2_SIG_DET_MASK 0x0040 /* DRC2_SIG_DET */ +#define ARIZONA_DRC2_SIG_DET_SHIFT 6 /* DRC2_SIG_DET */ +#define ARIZONA_DRC2_SIG_DET_WIDTH 1 /* DRC2_SIG_DET */ +#define ARIZONA_DRC2_KNEE2_OP_ENA 0x0020 /* DRC2_KNEE2_OP_ENA */ +#define ARIZONA_DRC2_KNEE2_OP_ENA_MASK 0x0020 /* DRC2_KNEE2_OP_ENA */ +#define ARIZONA_DRC2_KNEE2_OP_ENA_SHIFT 5 /* DRC2_KNEE2_OP_ENA */ +#define ARIZONA_DRC2_KNEE2_OP_ENA_WIDTH 1 /* DRC2_KNEE2_OP_ENA */ +#define ARIZONA_DRC2_QR 0x0010 /* DRC2_QR */ +#define ARIZONA_DRC2_QR_MASK 0x0010 /* DRC2_QR */ +#define ARIZONA_DRC2_QR_SHIFT 4 /* DRC2_QR */ +#define ARIZONA_DRC2_QR_WIDTH 1 /* DRC2_QR */ +#define ARIZONA_DRC2_ANTICLIP 0x0008 /* DRC2_ANTICLIP */ +#define ARIZONA_DRC2_ANTICLIP_MASK 0x0008 /* DRC2_ANTICLIP */ +#define ARIZONA_DRC2_ANTICLIP_SHIFT 3 /* DRC2_ANTICLIP */ +#define ARIZONA_DRC2_ANTICLIP_WIDTH 1 /* DRC2_ANTICLIP */ +#define ARIZONA_DRC2L_ENA 0x0002 /* DRC2L_ENA */ +#define ARIZONA_DRC2L_ENA_MASK 0x0002 /* DRC2L_ENA */ +#define ARIZONA_DRC2L_ENA_SHIFT 1 /* DRC2L_ENA */ +#define ARIZONA_DRC2L_ENA_WIDTH 1 /* DRC2L_ENA */ +#define ARIZONA_DRC2R_ENA 0x0001 /* DRC2R_ENA */ +#define ARIZONA_DRC2R_ENA_MASK 0x0001 /* DRC2R_ENA */ +#define ARIZONA_DRC2R_ENA_SHIFT 0 /* DRC2R_ENA */ +#define ARIZONA_DRC2R_ENA_WIDTH 1 /* DRC2R_ENA */ + +/* + * R3722 (0xE8A) - DRC2 ctrl2 + */ +#define ARIZONA_DRC2_ATK_MASK 0x1E00 /* DRC2_ATK - [12:9] */ +#define ARIZONA_DRC2_ATK_SHIFT 9 /* DRC2_ATK - [12:9] */ +#define ARIZONA_DRC2_ATK_WIDTH 4 /* DRC2_ATK - [12:9] */ +#define ARIZONA_DRC2_DCY_MASK 0x01E0 /* DRC2_DCY - [8:5] */ +#define ARIZONA_DRC2_DCY_SHIFT 5 /* DRC2_DCY - [8:5] */ +#define ARIZONA_DRC2_DCY_WIDTH 4 /* DRC2_DCY - [8:5] */ +#define ARIZONA_DRC2_MINGAIN_MASK 0x001C /* DRC2_MINGAIN - [4:2] */ +#define ARIZONA_DRC2_MINGAIN_SHIFT 2 /* DRC2_MINGAIN - [4:2] */ +#define ARIZONA_DRC2_MINGAIN_WIDTH 3 /* DRC2_MINGAIN - [4:2] */ +#define ARIZONA_DRC2_MAXGAIN_MASK 0x0003 /* DRC2_MAXGAIN - [1:0] */ +#define ARIZONA_DRC2_MAXGAIN_SHIFT 0 /* DRC2_MAXGAIN - [1:0] */ +#define ARIZONA_DRC2_MAXGAIN_WIDTH 2 /* DRC2_MAXGAIN - [1:0] */ + +/* + * R3723 (0xE8B) - DRC2 ctrl3 + */ +#define ARIZONA_DRC2_NG_MINGAIN_MASK 0xF000 /* DRC2_NG_MINGAIN - [15:12] */ +#define ARIZONA_DRC2_NG_MINGAIN_SHIFT 12 /* DRC2_NG_MINGAIN - [15:12] */ +#define ARIZONA_DRC2_NG_MINGAIN_WIDTH 4 /* DRC2_NG_MINGAIN - [15:12] */ +#define ARIZONA_DRC2_NG_EXP_MASK 0x0C00 /* DRC2_NG_EXP - [11:10] */ +#define ARIZONA_DRC2_NG_EXP_SHIFT 10 /* DRC2_NG_EXP - [11:10] */ +#define ARIZONA_DRC2_NG_EXP_WIDTH 2 /* DRC2_NG_EXP - [11:10] */ +#define ARIZONA_DRC2_QR_THR_MASK 0x0300 /* DRC2_QR_THR - [9:8] */ +#define ARIZONA_DRC2_QR_THR_SHIFT 8 /* DRC2_QR_THR - [9:8] */ +#define ARIZONA_DRC2_QR_THR_WIDTH 2 /* DRC2_QR_THR - [9:8] */ +#define ARIZONA_DRC2_QR_DCY_MASK 0x00C0 /* DRC2_QR_DCY - [7:6] */ +#define ARIZONA_DRC2_QR_DCY_SHIFT 6 /* DRC2_QR_DCY - [7:6] */ +#define ARIZONA_DRC2_QR_DCY_WIDTH 2 /* DRC2_QR_DCY - [7:6] */ +#define ARIZONA_DRC2_HI_COMP_MASK 0x0038 /* DRC2_HI_COMP - [5:3] */ +#define ARIZONA_DRC2_HI_COMP_SHIFT 3 /* DRC2_HI_COMP - [5:3] */ +#define ARIZONA_DRC2_HI_COMP_WIDTH 3 /* DRC2_HI_COMP - [5:3] */ +#define ARIZONA_DRC2_LO_COMP_MASK 0x0007 /* DRC2_LO_COMP - [2:0] */ +#define ARIZONA_DRC2_LO_COMP_SHIFT 0 /* DRC2_LO_COMP - [2:0] */ +#define ARIZONA_DRC2_LO_COMP_WIDTH 3 /* DRC2_LO_COMP - [2:0] */ + +/* + * R3724 (0xE8C) - DRC2 ctrl4 + */ +#define ARIZONA_DRC2_KNEE_IP_MASK 0x07E0 /* DRC2_KNEE_IP - [10:5] */ +#define ARIZONA_DRC2_KNEE_IP_SHIFT 5 /* DRC2_KNEE_IP - [10:5] */ +#define ARIZONA_DRC2_KNEE_IP_WIDTH 6 /* DRC2_KNEE_IP - [10:5] */ +#define ARIZONA_DRC2_KNEE_OP_MASK 0x001F /* DRC2_KNEE_OP - [4:0] */ +#define ARIZONA_DRC2_KNEE_OP_SHIFT 0 /* DRC2_KNEE_OP - [4:0] */ +#define ARIZONA_DRC2_KNEE_OP_WIDTH 5 /* DRC2_KNEE_OP - [4:0] */ + +/* + * R3725 (0xE8D) - DRC2 ctrl5 + */ +#define ARIZONA_DRC2_KNEE2_IP_MASK 0x03E0 /* DRC2_KNEE2_IP - [9:5] */ +#define ARIZONA_DRC2_KNEE2_IP_SHIFT 5 /* DRC2_KNEE2_IP - [9:5] */ +#define ARIZONA_DRC2_KNEE2_IP_WIDTH 5 /* DRC2_KNEE2_IP - [9:5] */ +#define ARIZONA_DRC2_KNEE2_OP_MASK 0x001F /* DRC2_KNEE2_OP - [4:0] */ +#define ARIZONA_DRC2_KNEE2_OP_SHIFT 0 /* DRC2_KNEE2_OP - [4:0] */ +#define ARIZONA_DRC2_KNEE2_OP_WIDTH 5 /* DRC2_KNEE2_OP - [4:0] */ + +/* + * R3776 (0xEC0) - HPLPF1_1 + */ +#define ARIZONA_LHPF1_MODE 0x0002 /* LHPF1_MODE */ +#define ARIZONA_LHPF1_MODE_MASK 0x0002 /* LHPF1_MODE */ +#define ARIZONA_LHPF1_MODE_SHIFT 1 /* LHPF1_MODE */ +#define ARIZONA_LHPF1_MODE_WIDTH 1 /* LHPF1_MODE */ +#define ARIZONA_LHPF1_ENA 0x0001 /* LHPF1_ENA */ +#define ARIZONA_LHPF1_ENA_MASK 0x0001 /* LHPF1_ENA */ +#define ARIZONA_LHPF1_ENA_SHIFT 0 /* LHPF1_ENA */ +#define ARIZONA_LHPF1_ENA_WIDTH 1 /* LHPF1_ENA */ + +/* + * R3777 (0xEC1) - HPLPF1_2 + */ +#define ARIZONA_LHPF1_COEFF_MASK 0xFFFF /* LHPF1_COEFF - [15:0] */ +#define ARIZONA_LHPF1_COEFF_SHIFT 0 /* LHPF1_COEFF - [15:0] */ +#define ARIZONA_LHPF1_COEFF_WIDTH 16 /* LHPF1_COEFF - [15:0] */ + +/* + * R3780 (0xEC4) - HPLPF2_1 + */ +#define ARIZONA_LHPF2_MODE 0x0002 /* LHPF2_MODE */ +#define ARIZONA_LHPF2_MODE_MASK 0x0002 /* LHPF2_MODE */ +#define ARIZONA_LHPF2_MODE_SHIFT 1 /* LHPF2_MODE */ +#define ARIZONA_LHPF2_MODE_WIDTH 1 /* LHPF2_MODE */ +#define ARIZONA_LHPF2_ENA 0x0001 /* LHPF2_ENA */ +#define ARIZONA_LHPF2_ENA_MASK 0x0001 /* LHPF2_ENA */ +#define ARIZONA_LHPF2_ENA_SHIFT 0 /* LHPF2_ENA */ +#define ARIZONA_LHPF2_ENA_WIDTH 1 /* LHPF2_ENA */ + +/* + * R3781 (0xEC5) - HPLPF2_2 + */ +#define ARIZONA_LHPF2_COEFF_MASK 0xFFFF /* LHPF2_COEFF - [15:0] */ +#define ARIZONA_LHPF2_COEFF_SHIFT 0 /* LHPF2_COEFF - [15:0] */ +#define ARIZONA_LHPF2_COEFF_WIDTH 16 /* LHPF2_COEFF - [15:0] */ + +/* + * R3784 (0xEC8) - HPLPF3_1 + */ +#define ARIZONA_LHPF3_MODE 0x0002 /* LHPF3_MODE */ +#define ARIZONA_LHPF3_MODE_MASK 0x0002 /* LHPF3_MODE */ +#define ARIZONA_LHPF3_MODE_SHIFT 1 /* LHPF3_MODE */ +#define ARIZONA_LHPF3_MODE_WIDTH 1 /* LHPF3_MODE */ +#define ARIZONA_LHPF3_ENA 0x0001 /* LHPF3_ENA */ +#define ARIZONA_LHPF3_ENA_MASK 0x0001 /* LHPF3_ENA */ +#define ARIZONA_LHPF3_ENA_SHIFT 0 /* LHPF3_ENA */ +#define ARIZONA_LHPF3_ENA_WIDTH 1 /* LHPF3_ENA */ + +/* + * R3785 (0xEC9) - HPLPF3_2 + */ +#define ARIZONA_LHPF3_COEFF_MASK 0xFFFF /* LHPF3_COEFF - [15:0] */ +#define ARIZONA_LHPF3_COEFF_SHIFT 0 /* LHPF3_COEFF - [15:0] */ +#define ARIZONA_LHPF3_COEFF_WIDTH 16 /* LHPF3_COEFF - [15:0] */ + +/* + * R3788 (0xECC) - HPLPF4_1 + */ +#define ARIZONA_LHPF4_MODE 0x0002 /* LHPF4_MODE */ +#define ARIZONA_LHPF4_MODE_MASK 0x0002 /* LHPF4_MODE */ +#define ARIZONA_LHPF4_MODE_SHIFT 1 /* LHPF4_MODE */ +#define ARIZONA_LHPF4_MODE_WIDTH 1 /* LHPF4_MODE */ +#define ARIZONA_LHPF4_ENA 0x0001 /* LHPF4_ENA */ +#define ARIZONA_LHPF4_ENA_MASK 0x0001 /* LHPF4_ENA */ +#define ARIZONA_LHPF4_ENA_SHIFT 0 /* LHPF4_ENA */ +#define ARIZONA_LHPF4_ENA_WIDTH 1 /* LHPF4_ENA */ + +/* + * R3789 (0xECD) - HPLPF4_2 + */ +#define ARIZONA_LHPF4_COEFF_MASK 0xFFFF /* LHPF4_COEFF - [15:0] */ +#define ARIZONA_LHPF4_COEFF_SHIFT 0 /* LHPF4_COEFF - [15:0] */ +#define ARIZONA_LHPF4_COEFF_WIDTH 16 /* LHPF4_COEFF - [15:0] */ + +/* + * R3808 (0xEE0) - ASRC_ENABLE + */ +#define ARIZONA_ASRC2L_ENA 0x0008 /* ASRC2L_ENA */ +#define ARIZONA_ASRC2L_ENA_MASK 0x0008 /* ASRC2L_ENA */ +#define ARIZONA_ASRC2L_ENA_SHIFT 3 /* ASRC2L_ENA */ +#define ARIZONA_ASRC2L_ENA_WIDTH 1 /* ASRC2L_ENA */ +#define ARIZONA_ASRC2R_ENA 0x0004 /* ASRC2R_ENA */ +#define ARIZONA_ASRC2R_ENA_MASK 0x0004 /* ASRC2R_ENA */ +#define ARIZONA_ASRC2R_ENA_SHIFT 2 /* ASRC2R_ENA */ +#define ARIZONA_ASRC2R_ENA_WIDTH 1 /* ASRC2R_ENA */ +#define ARIZONA_ASRC1L_ENA 0x0002 /* ASRC1L_ENA */ +#define ARIZONA_ASRC1L_ENA_MASK 0x0002 /* ASRC1L_ENA */ +#define ARIZONA_ASRC1L_ENA_SHIFT 1 /* ASRC1L_ENA */ +#define ARIZONA_ASRC1L_ENA_WIDTH 1 /* ASRC1L_ENA */ +#define ARIZONA_ASRC1R_ENA 0x0001 /* ASRC1R_ENA */ +#define ARIZONA_ASRC1R_ENA_MASK 0x0001 /* ASRC1R_ENA */ +#define ARIZONA_ASRC1R_ENA_SHIFT 0 /* ASRC1R_ENA */ +#define ARIZONA_ASRC1R_ENA_WIDTH 1 /* ASRC1R_ENA */ + +/* + * R3810 (0xEE2) - ASRC_RATE1 + */ +#define ARIZONA_ASRC_RATE1_MASK 0x7800 /* ASRC_RATE1 - [14:11] */ +#define ARIZONA_ASRC_RATE1_SHIFT 11 /* ASRC_RATE1 - [14:11] */ +#define ARIZONA_ASRC_RATE1_WIDTH 4 /* ASRC_RATE1 - [14:11] */ + +/* + * R3811 (0xEE3) - ASRC_RATE2 + */ +#define ARIZONA_ASRC_RATE2_MASK 0x7800 /* ASRC_RATE2 - [14:11] */ +#define ARIZONA_ASRC_RATE2_SHIFT 11 /* ASRC_RATE2 - [14:11] */ +#define ARIZONA_ASRC_RATE2_WIDTH 4 /* ASRC_RATE2 - [14:11] */ + +/* + * R3824 (0xEF0) - ISRC 1 CTRL 1 + */ +#define ARIZONA_ISRC1_FSH_MASK 0x7800 /* ISRC1_FSH - [14:11] */ +#define ARIZONA_ISRC1_FSH_SHIFT 11 /* ISRC1_FSH - [14:11] */ +#define ARIZONA_ISRC1_FSH_WIDTH 4 /* ISRC1_FSH - [14:11] */ +#define ARIZONA_ISRC1_CLK_SEL_MASK 0x0700 /* ISRC1_CLK_SEL - [10:8] */ +#define ARIZONA_ISRC1_CLK_SEL_SHIFT 8 /* ISRC1_CLK_SEL - [10:8] */ +#define ARIZONA_ISRC1_CLK_SEL_WIDTH 3 /* ISRC1_CLK_SEL - [10:8] */ + +/* + * R3825 (0xEF1) - ISRC 1 CTRL 2 + */ +#define ARIZONA_ISRC1_FSL_MASK 0x7800 /* ISRC1_FSL - [14:11] */ +#define ARIZONA_ISRC1_FSL_SHIFT 11 /* ISRC1_FSL - [14:11] */ +#define ARIZONA_ISRC1_FSL_WIDTH 4 /* ISRC1_FSL - [14:11] */ + +/* + * R3826 (0xEF2) - ISRC 1 CTRL 3 + */ +#define ARIZONA_ISRC1_INT0_ENA 0x8000 /* ISRC1_INT0_ENA */ +#define ARIZONA_ISRC1_INT0_ENA_MASK 0x8000 /* ISRC1_INT0_ENA */ +#define ARIZONA_ISRC1_INT0_ENA_SHIFT 15 /* ISRC1_INT0_ENA */ +#define ARIZONA_ISRC1_INT0_ENA_WIDTH 1 /* ISRC1_INT0_ENA */ +#define ARIZONA_ISRC1_INT1_ENA 0x4000 /* ISRC1_INT1_ENA */ +#define ARIZONA_ISRC1_INT1_ENA_MASK 0x4000 /* ISRC1_INT1_ENA */ +#define ARIZONA_ISRC1_INT1_ENA_SHIFT 14 /* ISRC1_INT1_ENA */ +#define ARIZONA_ISRC1_INT1_ENA_WIDTH 1 /* ISRC1_INT1_ENA */ +#define ARIZONA_ISRC1_INT2_ENA 0x2000 /* ISRC1_INT2_ENA */ +#define ARIZONA_ISRC1_INT2_ENA_MASK 0x2000 /* ISRC1_INT2_ENA */ +#define ARIZONA_ISRC1_INT2_ENA_SHIFT 13 /* ISRC1_INT2_ENA */ +#define ARIZONA_ISRC1_INT2_ENA_WIDTH 1 /* ISRC1_INT2_ENA */ +#define ARIZONA_ISRC1_INT3_ENA 0x1000 /* ISRC1_INT3_ENA */ +#define ARIZONA_ISRC1_INT3_ENA_MASK 0x1000 /* ISRC1_INT3_ENA */ +#define ARIZONA_ISRC1_INT3_ENA_SHIFT 12 /* ISRC1_INT3_ENA */ +#define ARIZONA_ISRC1_INT3_ENA_WIDTH 1 /* ISRC1_INT3_ENA */ +#define ARIZONA_ISRC1_DEC0_ENA 0x0200 /* ISRC1_DEC0_ENA */ +#define ARIZONA_ISRC1_DEC0_ENA_MASK 0x0200 /* ISRC1_DEC0_ENA */ +#define ARIZONA_ISRC1_DEC0_ENA_SHIFT 9 /* ISRC1_DEC0_ENA */ +#define ARIZONA_ISRC1_DEC0_ENA_WIDTH 1 /* ISRC1_DEC0_ENA */ +#define ARIZONA_ISRC1_DEC1_ENA 0x0100 /* ISRC1_DEC1_ENA */ +#define ARIZONA_ISRC1_DEC1_ENA_MASK 0x0100 /* ISRC1_DEC1_ENA */ +#define ARIZONA_ISRC1_DEC1_ENA_SHIFT 8 /* ISRC1_DEC1_ENA */ +#define ARIZONA_ISRC1_DEC1_ENA_WIDTH 1 /* ISRC1_DEC1_ENA */ +#define ARIZONA_ISRC1_DEC2_ENA 0x0080 /* ISRC1_DEC2_ENA */ +#define ARIZONA_ISRC1_DEC2_ENA_MASK 0x0080 /* ISRC1_DEC2_ENA */ +#define ARIZONA_ISRC1_DEC2_ENA_SHIFT 7 /* ISRC1_DEC2_ENA */ +#define ARIZONA_ISRC1_DEC2_ENA_WIDTH 1 /* ISRC1_DEC2_ENA */ +#define ARIZONA_ISRC1_DEC3_ENA 0x0040 /* ISRC1_DEC3_ENA */ +#define ARIZONA_ISRC1_DEC3_ENA_MASK 0x0040 /* ISRC1_DEC3_ENA */ +#define ARIZONA_ISRC1_DEC3_ENA_SHIFT 6 /* ISRC1_DEC3_ENA */ +#define ARIZONA_ISRC1_DEC3_ENA_WIDTH 1 /* ISRC1_DEC3_ENA */ +#define ARIZONA_ISRC1_NOTCH_ENA 0x0001 /* ISRC1_NOTCH_ENA */ +#define ARIZONA_ISRC1_NOTCH_ENA_MASK 0x0001 /* ISRC1_NOTCH_ENA */ +#define ARIZONA_ISRC1_NOTCH_ENA_SHIFT 0 /* ISRC1_NOTCH_ENA */ +#define ARIZONA_ISRC1_NOTCH_ENA_WIDTH 1 /* ISRC1_NOTCH_ENA */ + +/* + * R3827 (0xEF3) - ISRC 2 CTRL 1 + */ +#define ARIZONA_ISRC2_FSH_MASK 0x7800 /* ISRC2_FSH - [14:11] */ +#define ARIZONA_ISRC2_FSH_SHIFT 11 /* ISRC2_FSH - [14:11] */ +#define ARIZONA_ISRC2_FSH_WIDTH 4 /* ISRC2_FSH - [14:11] */ +#define ARIZONA_ISRC2_CLK_SEL_MASK 0x0700 /* ISRC2_CLK_SEL - [10:8] */ +#define ARIZONA_ISRC2_CLK_SEL_SHIFT 8 /* ISRC2_CLK_SEL - [10:8] */ +#define ARIZONA_ISRC2_CLK_SEL_WIDTH 3 /* ISRC2_CLK_SEL - [10:8] */ + +/* + * R3828 (0xEF4) - ISRC 2 CTRL 2 + */ +#define ARIZONA_ISRC2_FSL_MASK 0x7800 /* ISRC2_FSL - [14:11] */ +#define ARIZONA_ISRC2_FSL_SHIFT 11 /* ISRC2_FSL - [14:11] */ +#define ARIZONA_ISRC2_FSL_WIDTH 4 /* ISRC2_FSL - [14:11] */ + +/* + * R3829 (0xEF5) - ISRC 2 CTRL 3 + */ +#define ARIZONA_ISRC2_INT0_ENA 0x8000 /* ISRC2_INT0_ENA */ +#define ARIZONA_ISRC2_INT0_ENA_MASK 0x8000 /* ISRC2_INT0_ENA */ +#define ARIZONA_ISRC2_INT0_ENA_SHIFT 15 /* ISRC2_INT0_ENA */ +#define ARIZONA_ISRC2_INT0_ENA_WIDTH 1 /* ISRC2_INT0_ENA */ +#define ARIZONA_ISRC2_INT1_ENA 0x4000 /* ISRC2_INT1_ENA */ +#define ARIZONA_ISRC2_INT1_ENA_MASK 0x4000 /* ISRC2_INT1_ENA */ +#define ARIZONA_ISRC2_INT1_ENA_SHIFT 14 /* ISRC2_INT1_ENA */ +#define ARIZONA_ISRC2_INT1_ENA_WIDTH 1 /* ISRC2_INT1_ENA */ +#define ARIZONA_ISRC2_INT2_ENA 0x2000 /* ISRC2_INT2_ENA */ +#define ARIZONA_ISRC2_INT2_ENA_MASK 0x2000 /* ISRC2_INT2_ENA */ +#define ARIZONA_ISRC2_INT2_ENA_SHIFT 13 /* ISRC2_INT2_ENA */ +#define ARIZONA_ISRC2_INT2_ENA_WIDTH 1 /* ISRC2_INT2_ENA */ +#define ARIZONA_ISRC2_INT3_ENA 0x1000 /* ISRC2_INT3_ENA */ +#define ARIZONA_ISRC2_INT3_ENA_MASK 0x1000 /* ISRC2_INT3_ENA */ +#define ARIZONA_ISRC2_INT3_ENA_SHIFT 12 /* ISRC2_INT3_ENA */ +#define ARIZONA_ISRC2_INT3_ENA_WIDTH 1 /* ISRC2_INT3_ENA */ +#define ARIZONA_ISRC2_DEC0_ENA 0x0200 /* ISRC2_DEC0_ENA */ +#define ARIZONA_ISRC2_DEC0_ENA_MASK 0x0200 /* ISRC2_DEC0_ENA */ +#define ARIZONA_ISRC2_DEC0_ENA_SHIFT 9 /* ISRC2_DEC0_ENA */ +#define ARIZONA_ISRC2_DEC0_ENA_WIDTH 1 /* ISRC2_DEC0_ENA */ +#define ARIZONA_ISRC2_DEC1_ENA 0x0100 /* ISRC2_DEC1_ENA */ +#define ARIZONA_ISRC2_DEC1_ENA_MASK 0x0100 /* ISRC2_DEC1_ENA */ +#define ARIZONA_ISRC2_DEC1_ENA_SHIFT 8 /* ISRC2_DEC1_ENA */ +#define ARIZONA_ISRC2_DEC1_ENA_WIDTH 1 /* ISRC2_DEC1_ENA */ +#define ARIZONA_ISRC2_DEC2_ENA 0x0080 /* ISRC2_DEC2_ENA */ +#define ARIZONA_ISRC2_DEC2_ENA_MASK 0x0080 /* ISRC2_DEC2_ENA */ +#define ARIZONA_ISRC2_DEC2_ENA_SHIFT 7 /* ISRC2_DEC2_ENA */ +#define ARIZONA_ISRC2_DEC2_ENA_WIDTH 1 /* ISRC2_DEC2_ENA */ +#define ARIZONA_ISRC2_DEC3_ENA 0x0040 /* ISRC2_DEC3_ENA */ +#define ARIZONA_ISRC2_DEC3_ENA_MASK 0x0040 /* ISRC2_DEC3_ENA */ +#define ARIZONA_ISRC2_DEC3_ENA_SHIFT 6 /* ISRC2_DEC3_ENA */ +#define ARIZONA_ISRC2_DEC3_ENA_WIDTH 1 /* ISRC2_DEC3_ENA */ +#define ARIZONA_ISRC2_NOTCH_ENA 0x0001 /* ISRC2_NOTCH_ENA */ +#define ARIZONA_ISRC2_NOTCH_ENA_MASK 0x0001 /* ISRC2_NOTCH_ENA */ +#define ARIZONA_ISRC2_NOTCH_ENA_SHIFT 0 /* ISRC2_NOTCH_ENA */ +#define ARIZONA_ISRC2_NOTCH_ENA_WIDTH 1 /* ISRC2_NOTCH_ENA */ + +/* + * R3830 (0xEF6) - ISRC 3 CTRL 1 + */ +#define ARIZONA_ISRC3_FSH_MASK 0x7800 /* ISRC3_FSH - [14:11] */ +#define ARIZONA_ISRC3_FSH_SHIFT 11 /* ISRC3_FSH - [14:11] */ +#define ARIZONA_ISRC3_FSH_WIDTH 4 /* ISRC3_FSH - [14:11] */ +#define ARIZONA_ISRC3_CLK_SEL_MASK 0x0700 /* ISRC3_CLK_SEL - [10:8] */ +#define ARIZONA_ISRC3_CLK_SEL_SHIFT 8 /* ISRC3_CLK_SEL - [10:8] */ +#define ARIZONA_ISRC3_CLK_SEL_WIDTH 3 /* ISRC3_CLK_SEL - [10:8] */ + +/* + * R3831 (0xEF7) - ISRC 3 CTRL 2 + */ +#define ARIZONA_ISRC3_FSL_MASK 0x7800 /* ISRC3_FSL - [14:11] */ +#define ARIZONA_ISRC3_FSL_SHIFT 11 /* ISRC3_FSL - [14:11] */ +#define ARIZONA_ISRC3_FSL_WIDTH 4 /* ISRC3_FSL - [14:11] */ + +/* + * R3832 (0xEF8) - ISRC 3 CTRL 3 + */ +#define ARIZONA_ISRC3_INT0_ENA 0x8000 /* ISRC3_INT0_ENA */ +#define ARIZONA_ISRC3_INT0_ENA_MASK 0x8000 /* ISRC3_INT0_ENA */ +#define ARIZONA_ISRC3_INT0_ENA_SHIFT 15 /* ISRC3_INT0_ENA */ +#define ARIZONA_ISRC3_INT0_ENA_WIDTH 1 /* ISRC3_INT0_ENA */ +#define ARIZONA_ISRC3_INT1_ENA 0x4000 /* ISRC3_INT1_ENA */ +#define ARIZONA_ISRC3_INT1_ENA_MASK 0x4000 /* ISRC3_INT1_ENA */ +#define ARIZONA_ISRC3_INT1_ENA_SHIFT 14 /* ISRC3_INT1_ENA */ +#define ARIZONA_ISRC3_INT1_ENA_WIDTH 1 /* ISRC3_INT1_ENA */ +#define ARIZONA_ISRC3_INT2_ENA 0x2000 /* ISRC3_INT2_ENA */ +#define ARIZONA_ISRC3_INT2_ENA_MASK 0x2000 /* ISRC3_INT2_ENA */ +#define ARIZONA_ISRC3_INT2_ENA_SHIFT 13 /* ISRC3_INT2_ENA */ +#define ARIZONA_ISRC3_INT2_ENA_WIDTH 1 /* ISRC3_INT2_ENA */ +#define ARIZONA_ISRC3_INT3_ENA 0x1000 /* ISRC3_INT3_ENA */ +#define ARIZONA_ISRC3_INT3_ENA_MASK 0x1000 /* ISRC3_INT3_ENA */ +#define ARIZONA_ISRC3_INT3_ENA_SHIFT 12 /* ISRC3_INT3_ENA */ +#define ARIZONA_ISRC3_INT3_ENA_WIDTH 1 /* ISRC3_INT3_ENA */ +#define ARIZONA_ISRC3_DEC0_ENA 0x0200 /* ISRC3_DEC0_ENA */ +#define ARIZONA_ISRC3_DEC0_ENA_MASK 0x0200 /* ISRC3_DEC0_ENA */ +#define ARIZONA_ISRC3_DEC0_ENA_SHIFT 9 /* ISRC3_DEC0_ENA */ +#define ARIZONA_ISRC3_DEC0_ENA_WIDTH 1 /* ISRC3_DEC0_ENA */ +#define ARIZONA_ISRC3_DEC1_ENA 0x0100 /* ISRC3_DEC1_ENA */ +#define ARIZONA_ISRC3_DEC1_ENA_MASK 0x0100 /* ISRC3_DEC1_ENA */ +#define ARIZONA_ISRC3_DEC1_ENA_SHIFT 8 /* ISRC3_DEC1_ENA */ +#define ARIZONA_ISRC3_DEC1_ENA_WIDTH 1 /* ISRC3_DEC1_ENA */ +#define ARIZONA_ISRC3_DEC2_ENA 0x0080 /* ISRC3_DEC2_ENA */ +#define ARIZONA_ISRC3_DEC2_ENA_MASK 0x0080 /* ISRC3_DEC2_ENA */ +#define ARIZONA_ISRC3_DEC2_ENA_SHIFT 7 /* ISRC3_DEC2_ENA */ +#define ARIZONA_ISRC3_DEC2_ENA_WIDTH 1 /* ISRC3_DEC2_ENA */ +#define ARIZONA_ISRC3_DEC3_ENA 0x0040 /* ISRC3_DEC3_ENA */ +#define ARIZONA_ISRC3_DEC3_ENA_MASK 0x0040 /* ISRC3_DEC3_ENA */ +#define ARIZONA_ISRC3_DEC3_ENA_SHIFT 6 /* ISRC3_DEC3_ENA */ +#define ARIZONA_ISRC3_DEC3_ENA_WIDTH 1 /* ISRC3_DEC3_ENA */ +#define ARIZONA_ISRC3_NOTCH_ENA 0x0001 /* ISRC3_NOTCH_ENA */ +#define ARIZONA_ISRC3_NOTCH_ENA_MASK 0x0001 /* ISRC3_NOTCH_ENA */ +#define ARIZONA_ISRC3_NOTCH_ENA_SHIFT 0 /* ISRC3_NOTCH_ENA */ +#define ARIZONA_ISRC3_NOTCH_ENA_WIDTH 1 /* ISRC3_NOTCH_ENA */ + +/* + * R3840 (0xF00) - Clock Control + */ +#define ARIZONA_EXT_NG_SEL_CLR 0x0080 /* EXT_NG_SEL_CLR */ +#define ARIZONA_EXT_NG_SEL_CLR_MASK 0x0080 /* EXT_NG_SEL_CLR */ +#define ARIZONA_EXT_NG_SEL_CLR_SHIFT 7 /* EXT_NG_SEL_CLR */ +#define ARIZONA_EXT_NG_SEL_CLR_WIDTH 1 /* EXT_NG_SEL_CLR */ +#define ARIZONA_EXT_NG_SEL_SET 0x0040 /* EXT_NG_SEL_SET */ +#define ARIZONA_EXT_NG_SEL_SET_MASK 0x0040 /* EXT_NG_SEL_SET */ +#define ARIZONA_EXT_NG_SEL_SET_SHIFT 6 /* EXT_NG_SEL_SET */ +#define ARIZONA_EXT_NG_SEL_SET_WIDTH 1 /* EXT_NG_SEL_SET */ +#define ARIZONA_CLK_R_ENA_CLR 0x0020 /* CLK_R_ENA_CLR */ +#define ARIZONA_CLK_R_ENA_CLR_MASK 0x0020 /* CLK_R_ENA_CLR */ +#define ARIZONA_CLK_R_ENA_CLR_SHIFT 5 /* CLK_R_ENA_CLR */ +#define ARIZONA_CLK_R_ENA_CLR_WIDTH 1 /* CLK_R_ENA_CLR */ +#define ARIZONA_CLK_R_ENA_SET 0x0010 /* CLK_R_ENA_SET */ +#define ARIZONA_CLK_R_ENA_SET_MASK 0x0010 /* CLK_R_ENA_SET */ +#define ARIZONA_CLK_R_ENA_SET_SHIFT 4 /* CLK_R_ENA_SET */ +#define ARIZONA_CLK_R_ENA_SET_WIDTH 1 /* CLK_R_ENA_SET */ +#define ARIZONA_CLK_NG_ENA_CLR 0x0008 /* CLK_NG_ENA_CLR */ +#define ARIZONA_CLK_NG_ENA_CLR_MASK 0x0008 /* CLK_NG_ENA_CLR */ +#define ARIZONA_CLK_NG_ENA_CLR_SHIFT 3 /* CLK_NG_ENA_CLR */ +#define ARIZONA_CLK_NG_ENA_CLR_WIDTH 1 /* CLK_NG_ENA_CLR */ +#define ARIZONA_CLK_NG_ENA_SET 0x0004 /* CLK_NG_ENA_SET */ +#define ARIZONA_CLK_NG_ENA_SET_MASK 0x0004 /* CLK_NG_ENA_SET */ +#define ARIZONA_CLK_NG_ENA_SET_SHIFT 2 /* CLK_NG_ENA_SET */ +#define ARIZONA_CLK_NG_ENA_SET_WIDTH 1 /* CLK_NG_ENA_SET */ +#define ARIZONA_CLK_L_ENA_CLR 0x0002 /* CLK_L_ENA_CLR */ +#define ARIZONA_CLK_L_ENA_CLR_MASK 0x0002 /* CLK_L_ENA_CLR */ +#define ARIZONA_CLK_L_ENA_CLR_SHIFT 1 /* CLK_L_ENA_CLR */ +#define ARIZONA_CLK_L_ENA_CLR_WIDTH 1 /* CLK_L_ENA_CLR */ +#define ARIZONA_CLK_L_ENA_SET 0x0001 /* CLK_L_ENA_SET */ +#define ARIZONA_CLK_L_ENA_SET_MASK 0x0001 /* CLK_L_ENA_SET */ +#define ARIZONA_CLK_L_ENA_SET_SHIFT 0 /* CLK_L_ENA_SET */ +#define ARIZONA_CLK_L_ENA_SET_WIDTH 1 /* CLK_L_ENA_SET */ + +/* + * R3841 (0xF01) - ANC SRC + */ +#define ARIZONA_IN_RXANCR_SEL_MASK 0x0070 /* IN_RXANCR_SEL - [4:6] */ +#define ARIZONA_IN_RXANCR_SEL_SHIFT 4 /* IN_RXANCR_SEL - [4:6] */ +#define ARIZONA_IN_RXANCR_SEL_WIDTH 3 /* IN_RXANCR_SEL - [4:6] */ +#define ARIZONA_IN_RXANCL_SEL_MASK 0x0007 /* IN_RXANCL_SEL - [0:2] */ +#define ARIZONA_IN_RXANCL_SEL_SHIFT 0 /* IN_RXANCL_SEL - [0:2] */ +#define ARIZONA_IN_RXANCL_SEL_WIDTH 3 /* IN_RXANCL_SEL - [0:2] */ + +/* + * R3863 (0xF17) - FCL ADC Reformatter Control + */ +#define ARIZONA_FCL_MIC_MODE_SEL 0x000C /* FCL_MIC_MODE_SEL - [2:3] */ +#define ARIZONA_FCL_MIC_MODE_SEL_SHIFT 2 /* FCL_MIC_MODE_SEL - [2:3] */ +#define ARIZONA_FCL_MIC_MODE_SEL_WIDTH 2 /* FCL_MIC_MODE_SEL - [2:3] */ + +/* + * R3954 (0xF72) - FCR ADC Reformatter Control + */ +#define ARIZONA_FCR_MIC_MODE_SEL 0x000C /* FCR_MIC_MODE_SEL - [2:3] */ +#define ARIZONA_FCR_MIC_MODE_SEL_SHIFT 2 /* FCR_MIC_MODE_SEL - [2:3] */ +#define ARIZONA_FCR_MIC_MODE_SEL_WIDTH 2 /* FCR_MIC_MODE_SEL - [2:3] */ + +/* + * R4352 (0x1100) - DSP1 Control 1 + */ +#define ARIZONA_DSP1_RATE_MASK 0x7800 /* DSP1_RATE - [14:11] */ +#define ARIZONA_DSP1_RATE_SHIFT 11 /* DSP1_RATE - [14:11] */ +#define ARIZONA_DSP1_RATE_WIDTH 4 /* DSP1_RATE - [14:11] */ +#define ARIZONA_DSP1_MEM_ENA 0x0010 /* DSP1_MEM_ENA */ +#define ARIZONA_DSP1_MEM_ENA_MASK 0x0010 /* DSP1_MEM_ENA */ +#define ARIZONA_DSP1_MEM_ENA_SHIFT 4 /* DSP1_MEM_ENA */ +#define ARIZONA_DSP1_MEM_ENA_WIDTH 1 /* DSP1_MEM_ENA */ +#define ARIZONA_DSP1_SYS_ENA 0x0004 /* DSP1_SYS_ENA */ +#define ARIZONA_DSP1_SYS_ENA_MASK 0x0004 /* DSP1_SYS_ENA */ +#define ARIZONA_DSP1_SYS_ENA_SHIFT 2 /* DSP1_SYS_ENA */ +#define ARIZONA_DSP1_SYS_ENA_WIDTH 1 /* DSP1_SYS_ENA */ +#define ARIZONA_DSP1_CORE_ENA 0x0002 /* DSP1_CORE_ENA */ +#define ARIZONA_DSP1_CORE_ENA_MASK 0x0002 /* DSP1_CORE_ENA */ +#define ARIZONA_DSP1_CORE_ENA_SHIFT 1 /* DSP1_CORE_ENA */ +#define ARIZONA_DSP1_CORE_ENA_WIDTH 1 /* DSP1_CORE_ENA */ +#define ARIZONA_DSP1_START 0x0001 /* DSP1_START */ +#define ARIZONA_DSP1_START_MASK 0x0001 /* DSP1_START */ +#define ARIZONA_DSP1_START_SHIFT 0 /* DSP1_START */ +#define ARIZONA_DSP1_START_WIDTH 1 /* DSP1_START */ + +/* + * R4353 (0x1101) - DSP1 Clocking 1 + */ +#define ARIZONA_DSP1_CLK_SEL_MASK 0x0007 /* DSP1_CLK_SEL - [2:0] */ +#define ARIZONA_DSP1_CLK_SEL_SHIFT 0 /* DSP1_CLK_SEL - [2:0] */ +#define ARIZONA_DSP1_CLK_SEL_WIDTH 3 /* DSP1_CLK_SEL - [2:0] */ + +/* + * R4356 (0x1104) - DSP1 Status 1 + */ +#define ARIZONA_DSP1_RAM_RDY 0x0001 /* DSP1_RAM_RDY */ +#define ARIZONA_DSP1_RAM_RDY_MASK 0x0001 /* DSP1_RAM_RDY */ +#define ARIZONA_DSP1_RAM_RDY_SHIFT 0 /* DSP1_RAM_RDY */ +#define ARIZONA_DSP1_RAM_RDY_WIDTH 1 /* DSP1_RAM_RDY */ + +/* + * R4357 (0x1105) - DSP1 Status 2 + */ +#define ARIZONA_DSP1_PING_FULL 0x8000 /* DSP1_PING_FULL */ +#define ARIZONA_DSP1_PING_FULL_MASK 0x8000 /* DSP1_PING_FULL */ +#define ARIZONA_DSP1_PING_FULL_SHIFT 15 /* DSP1_PING_FULL */ +#define ARIZONA_DSP1_PING_FULL_WIDTH 1 /* DSP1_PING_FULL */ +#define ARIZONA_DSP1_PONG_FULL 0x4000 /* DSP1_PONG_FULL */ +#define ARIZONA_DSP1_PONG_FULL_MASK 0x4000 /* DSP1_PONG_FULL */ +#define ARIZONA_DSP1_PONG_FULL_SHIFT 14 /* DSP1_PONG_FULL */ +#define ARIZONA_DSP1_PONG_FULL_WIDTH 1 /* DSP1_PONG_FULL */ +#define ARIZONA_DSP1_WDMA_ACTIVE_CHANNELS_MASK 0x00FF /* DSP1_WDMA_ACTIVE_CHANNELS - [7:0] */ +#define ARIZONA_DSP1_WDMA_ACTIVE_CHANNELS_SHIFT 0 /* DSP1_WDMA_ACTIVE_CHANNELS - [7:0] */ +#define ARIZONA_DSP1_WDMA_ACTIVE_CHANNELS_WIDTH 8 /* DSP1_WDMA_ACTIVE_CHANNELS - [7:0] */ + +#endif diff --git a/include/linux/mfd/as3711.h b/include/linux/mfd/as3711.h new file mode 100644 index 000000000..ddd0b9533 --- /dev/null +++ b/include/linux/mfd/as3711.h @@ -0,0 +1,127 @@ +/* + * AS3711 PMIC MFC driver header + * + * Copyright (C) 2012 Renesas Electronics Corporation + * Author: Guennadi Liakhovetski, + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the version 2 of the GNU General Public License as + * published by the Free Software Foundation + */ + +#ifndef MFD_AS3711_H +#define MFD_AS3711_H + +/* + * Client data + */ + +/* Register addresses */ +#define AS3711_SD_1_VOLTAGE 0 /* Digital Step-Down */ +#define AS3711_SD_2_VOLTAGE 1 +#define AS3711_SD_3_VOLTAGE 2 +#define AS3711_SD_4_VOLTAGE 3 +#define AS3711_LDO_1_VOLTAGE 4 /* Analog LDO */ +#define AS3711_LDO_2_VOLTAGE 5 +#define AS3711_LDO_3_VOLTAGE 6 /* Digital LDO */ +#define AS3711_LDO_4_VOLTAGE 7 +#define AS3711_LDO_5_VOLTAGE 8 +#define AS3711_LDO_6_VOLTAGE 9 +#define AS3711_LDO_7_VOLTAGE 0xa +#define AS3711_LDO_8_VOLTAGE 0xb +#define AS3711_SD_CONTROL 0x10 +#define AS3711_GPIO_SIGNAL_OUT 0x20 +#define AS3711_GPIO_SIGNAL_IN 0x21 +#define AS3711_SD_CONTROL_1 0x30 +#define AS3711_SD_CONTROL_2 0x31 +#define AS3711_CURR_CONTROL 0x40 +#define AS3711_CURR1_VALUE 0x43 +#define AS3711_CURR2_VALUE 0x44 +#define AS3711_CURR3_VALUE 0x45 +#define AS3711_STEPUP_CONTROL_1 0x50 +#define AS3711_STEPUP_CONTROL_2 0x51 +#define AS3711_STEPUP_CONTROL_4 0x53 +#define AS3711_STEPUP_CONTROL_5 0x54 +#define AS3711_REG_STATUS 0x73 +#define AS3711_INTERRUPT_STATUS_1 0x77 +#define AS3711_INTERRUPT_STATUS_2 0x78 +#define AS3711_INTERRUPT_STATUS_3 0x79 +#define AS3711_CHARGER_STATUS_1 0x86 +#define AS3711_CHARGER_STATUS_2 0x87 +#define AS3711_ASIC_ID_1 0x90 +#define AS3711_ASIC_ID_2 0x91 + +#define AS3711_MAX_REG AS3711_ASIC_ID_2 +#define AS3711_NUM_REGS (AS3711_MAX_REG + 1) + +/* Regulators */ +enum { + AS3711_REGULATOR_SD_1, + AS3711_REGULATOR_SD_2, + AS3711_REGULATOR_SD_3, + AS3711_REGULATOR_SD_4, + AS3711_REGULATOR_LDO_1, + AS3711_REGULATOR_LDO_2, + AS3711_REGULATOR_LDO_3, + AS3711_REGULATOR_LDO_4, + AS3711_REGULATOR_LDO_5, + AS3711_REGULATOR_LDO_6, + AS3711_REGULATOR_LDO_7, + AS3711_REGULATOR_LDO_8, + + AS3711_REGULATOR_MAX, +}; + +struct device; +struct regmap; + +struct as3711 { + struct device *dev; + struct regmap *regmap; +}; + +#define AS3711_MAX_STEPDOWN 4 +#define AS3711_MAX_STEPUP 2 +#define AS3711_MAX_LDO 8 + +enum as3711_su2_feedback { + AS3711_SU2_VOLTAGE, + AS3711_SU2_CURR1, + AS3711_SU2_CURR2, + AS3711_SU2_CURR3, + AS3711_SU2_CURR_AUTO, +}; + +enum as3711_su2_fbprot { + AS3711_SU2_LX_SD4, + AS3711_SU2_GPIO2, + AS3711_SU2_GPIO3, + AS3711_SU2_GPIO4, +}; + +/* + * Platform data + */ + +struct as3711_regulator_pdata { + struct regulator_init_data *init_data[AS3711_REGULATOR_MAX]; +}; + +struct as3711_bl_pdata { + bool su1_fb; + int su1_max_uA; + bool su2_fb; + int su2_max_uA; + enum as3711_su2_feedback su2_feedback; + enum as3711_su2_fbprot su2_fbprot; + bool su2_auto_curr1; + bool su2_auto_curr2; + bool su2_auto_curr3; +}; + +struct as3711_platform_data { + struct as3711_regulator_pdata regulator; + struct as3711_bl_pdata backlight; +}; + +#endif diff --git a/include/linux/mfd/as3722.h b/include/linux/mfd/as3722.h new file mode 100644 index 000000000..b404a5af9 --- /dev/null +++ b/include/linux/mfd/as3722.h @@ -0,0 +1,432 @@ +/* + * as3722 definitions + * + * Copyright (C) 2013 ams + * Copyright (c) 2013, NVIDIA Corporation. All rights reserved. + * + * Author: Florian Lobmaier + * Author: Laxman Dewangan + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#ifndef __LINUX_MFD_AS3722_H__ +#define __LINUX_MFD_AS3722_H__ + +#include + +/* AS3722 registers */ +#define AS3722_SD0_VOLTAGE_REG 0x00 +#define AS3722_SD1_VOLTAGE_REG 0x01 +#define AS3722_SD2_VOLTAGE_REG 0x02 +#define AS3722_SD3_VOLTAGE_REG 0x03 +#define AS3722_SD4_VOLTAGE_REG 0x04 +#define AS3722_SD5_VOLTAGE_REG 0x05 +#define AS3722_SD6_VOLTAGE_REG 0x06 +#define AS3722_GPIO0_CONTROL_REG 0x08 +#define AS3722_GPIO1_CONTROL_REG 0x09 +#define AS3722_GPIO2_CONTROL_REG 0x0A +#define AS3722_GPIO3_CONTROL_REG 0x0B +#define AS3722_GPIO4_CONTROL_REG 0x0C +#define AS3722_GPIO5_CONTROL_REG 0x0D +#define AS3722_GPIO6_CONTROL_REG 0x0E +#define AS3722_GPIO7_CONTROL_REG 0x0F +#define AS3722_LDO0_VOLTAGE_REG 0x10 +#define AS3722_LDO1_VOLTAGE_REG 0x11 +#define AS3722_LDO2_VOLTAGE_REG 0x12 +#define AS3722_LDO3_VOLTAGE_REG 0x13 +#define AS3722_LDO4_VOLTAGE_REG 0x14 +#define AS3722_LDO5_VOLTAGE_REG 0x15 +#define AS3722_LDO6_VOLTAGE_REG 0x16 +#define AS3722_LDO7_VOLTAGE_REG 0x17 +#define AS3722_LDO9_VOLTAGE_REG 0x19 +#define AS3722_LDO10_VOLTAGE_REG 0x1A +#define AS3722_LDO11_VOLTAGE_REG 0x1B +#define AS3722_GPIO_DEB1_REG 0x1E +#define AS3722_GPIO_DEB2_REG 0x1F +#define AS3722_GPIO_SIGNAL_OUT_REG 0x20 +#define AS3722_GPIO_SIGNAL_IN_REG 0x21 +#define AS3722_REG_SEQU_MOD1_REG 0x22 +#define AS3722_REG_SEQU_MOD2_REG 0x23 +#define AS3722_REG_SEQU_MOD3_REG 0x24 +#define AS3722_SD_PHSW_CTRL_REG 0x27 +#define AS3722_SD_PHSW_STATUS 0x28 +#define AS3722_SD0_CONTROL_REG 0x29 +#define AS3722_SD1_CONTROL_REG 0x2A +#define AS3722_SDmph_CONTROL_REG 0x2B +#define AS3722_SD23_CONTROL_REG 0x2C +#define AS3722_SD4_CONTROL_REG 0x2D +#define AS3722_SD5_CONTROL_REG 0x2E +#define AS3722_SD6_CONTROL_REG 0x2F +#define AS3722_SD_DVM_REG 0x30 +#define AS3722_RESET_REASON_REG 0x31 +#define AS3722_BATTERY_VOLTAGE_MONITOR_REG 0x32 +#define AS3722_STARTUP_CONTROL_REG 0x33 +#define AS3722_RESET_TIMER_REG 0x34 +#define AS3722_REFERENCE_CONTROL_REG 0x35 +#define AS3722_RESET_CONTROL_REG 0x36 +#define AS3722_OVER_TEMP_CONTROL_REG 0x37 +#define AS3722_WATCHDOG_CONTROL_REG 0x38 +#define AS3722_REG_STANDBY_MOD1_REG 0x39 +#define AS3722_REG_STANDBY_MOD2_REG 0x3A +#define AS3722_REG_STANDBY_MOD3_REG 0x3B +#define AS3722_ENABLE_CTRL1_REG 0x3C +#define AS3722_ENABLE_CTRL2_REG 0x3D +#define AS3722_ENABLE_CTRL3_REG 0x3E +#define AS3722_ENABLE_CTRL4_REG 0x3F +#define AS3722_ENABLE_CTRL5_REG 0x40 +#define AS3722_PWM_CONTROL_L_REG 0x41 +#define AS3722_PWM_CONTROL_H_REG 0x42 +#define AS3722_WATCHDOG_TIMER_REG 0x46 +#define AS3722_WATCHDOG_SOFTWARE_SIGNAL_REG 0x48 +#define AS3722_IOVOLTAGE_REG 0x49 +#define AS3722_BATTERY_VOLTAGE_MONITOR2_REG 0x4A +#define AS3722_SD_CONTROL_REG 0x4D +#define AS3722_LDOCONTROL0_REG 0x4E +#define AS3722_LDOCONTROL1_REG 0x4F +#define AS3722_SD0_PROTECT_REG 0x50 +#define AS3722_SD6_PROTECT_REG 0x51 +#define AS3722_PWM_VCONTROL1_REG 0x52 +#define AS3722_PWM_VCONTROL2_REG 0x53 +#define AS3722_PWM_VCONTROL3_REG 0x54 +#define AS3722_PWM_VCONTROL4_REG 0x55 +#define AS3722_BB_CHARGER_REG 0x57 +#define AS3722_CTRL_SEQU1_REG 0x58 +#define AS3722_CTRL_SEQU2_REG 0x59 +#define AS3722_OVCURRENT_REG 0x5A +#define AS3722_OVCURRENT_DEB_REG 0x5B +#define AS3722_SDLV_DEB_REG 0x5C +#define AS3722_OC_PG_CTRL_REG 0x5D +#define AS3722_OC_PG_CTRL2_REG 0x5E +#define AS3722_CTRL_STATUS 0x5F +#define AS3722_RTC_CONTROL_REG 0x60 +#define AS3722_RTC_SECOND_REG 0x61 +#define AS3722_RTC_MINUTE_REG 0x62 +#define AS3722_RTC_HOUR_REG 0x63 +#define AS3722_RTC_DAY_REG 0x64 +#define AS3722_RTC_MONTH_REG 0x65 +#define AS3722_RTC_YEAR_REG 0x66 +#define AS3722_RTC_ALARM_SECOND_REG 0x67 +#define AS3722_RTC_ALARM_MINUTE_REG 0x68 +#define AS3722_RTC_ALARM_HOUR_REG 0x69 +#define AS3722_RTC_ALARM_DAY_REG 0x6A +#define AS3722_RTC_ALARM_MONTH_REG 0x6B +#define AS3722_RTC_ALARM_YEAR_REG 0x6C +#define AS3722_SRAM_REG 0x6D +#define AS3722_RTC_ACCESS_REG 0x6F +#define AS3722_RTC_STATUS_REG 0x73 +#define AS3722_INTERRUPT_MASK1_REG 0x74 +#define AS3722_INTERRUPT_MASK2_REG 0x75 +#define AS3722_INTERRUPT_MASK3_REG 0x76 +#define AS3722_INTERRUPT_MASK4_REG 0x77 +#define AS3722_INTERRUPT_STATUS1_REG 0x78 +#define AS3722_INTERRUPT_STATUS2_REG 0x79 +#define AS3722_INTERRUPT_STATUS3_REG 0x7A +#define AS3722_INTERRUPT_STATUS4_REG 0x7B +#define AS3722_TEMP_STATUS_REG 0x7D +#define AS3722_ADC0_CONTROL_REG 0x80 +#define AS3722_ADC1_CONTROL_REG 0x81 +#define AS3722_ADC0_MSB_RESULT_REG 0x82 +#define AS3722_ADC0_LSB_RESULT_REG 0x83 +#define AS3722_ADC1_MSB_RESULT_REG 0x84 +#define AS3722_ADC1_LSB_RESULT_REG 0x85 +#define AS3722_ADC1_THRESHOLD_HI_MSB_REG 0x86 +#define AS3722_ADC1_THRESHOLD_HI_LSB_REG 0x87 +#define AS3722_ADC1_THRESHOLD_LO_MSB_REG 0x88 +#define AS3722_ADC1_THRESHOLD_LO_LSB_REG 0x89 +#define AS3722_ADC_CONFIGURATION_REG 0x8A +#define AS3722_ASIC_ID1_REG 0x90 +#define AS3722_ASIC_ID2_REG 0x91 +#define AS3722_LOCK_REG 0x9E +#define AS3722_FUSE7_REG 0xA7 +#define AS3722_MAX_REGISTER 0xF4 + +#define AS3722_SD0_EXT_ENABLE_MASK 0x03 +#define AS3722_SD1_EXT_ENABLE_MASK 0x0C +#define AS3722_SD2_EXT_ENABLE_MASK 0x30 +#define AS3722_SD3_EXT_ENABLE_MASK 0xC0 +#define AS3722_SD4_EXT_ENABLE_MASK 0x03 +#define AS3722_SD5_EXT_ENABLE_MASK 0x0C +#define AS3722_SD6_EXT_ENABLE_MASK 0x30 +#define AS3722_LDO0_EXT_ENABLE_MASK 0x03 +#define AS3722_LDO1_EXT_ENABLE_MASK 0x0C +#define AS3722_LDO2_EXT_ENABLE_MASK 0x30 +#define AS3722_LDO3_EXT_ENABLE_MASK 0xC0 +#define AS3722_LDO4_EXT_ENABLE_MASK 0x03 +#define AS3722_LDO5_EXT_ENABLE_MASK 0x0C +#define AS3722_LDO6_EXT_ENABLE_MASK 0x30 +#define AS3722_LDO7_EXT_ENABLE_MASK 0xC0 +#define AS3722_LDO9_EXT_ENABLE_MASK 0x0C +#define AS3722_LDO10_EXT_ENABLE_MASK 0x30 +#define AS3722_LDO11_EXT_ENABLE_MASK 0xC0 + +#define AS3722_OVCURRENT_SD0_ALARM_MASK 0x07 +#define AS3722_OVCURRENT_SD0_ALARM_SHIFT 0x01 +#define AS3722_OVCURRENT_SD0_TRIP_MASK 0x18 +#define AS3722_OVCURRENT_SD0_TRIP_SHIFT 0x03 +#define AS3722_OVCURRENT_SD1_TRIP_MASK 0x60 +#define AS3722_OVCURRENT_SD1_TRIP_SHIFT 0x05 + +#define AS3722_OVCURRENT_SD6_ALARM_MASK 0x07 +#define AS3722_OVCURRENT_SD6_ALARM_SHIFT 0x01 +#define AS3722_OVCURRENT_SD6_TRIP_MASK 0x18 +#define AS3722_OVCURRENT_SD6_TRIP_SHIFT 0x03 + +/* AS3722 register bits and bit masks */ +#define AS3722_LDO_ILIMIT_MASK BIT(7) +#define AS3722_LDO_ILIMIT_BIT BIT(7) +#define AS3722_LDO0_VSEL_MASK 0x1F +#define AS3722_LDO0_VSEL_MIN 0x01 +#define AS3722_LDO0_VSEL_MAX 0x12 +#define AS3722_LDO0_NUM_VOLT 0x12 +#define AS3722_LDO3_VSEL_MASK 0x3F +#define AS3722_LDO3_VSEL_MIN 0x01 +#define AS3722_LDO3_VSEL_MAX 0x2D +#define AS3722_LDO3_NUM_VOLT 0x2D +#define AS3722_LDO6_VSEL_BYPASS 0x3F +#define AS3722_LDO_VSEL_MASK 0x7F +#define AS3722_LDO_VSEL_MIN 0x01 +#define AS3722_LDO_VSEL_MAX 0x7F +#define AS3722_LDO_VSEL_DNU_MIN 0x25 +#define AS3722_LDO_VSEL_DNU_MAX 0x3F +#define AS3722_LDO_NUM_VOLT 0x80 + +#define AS3722_LDO0_CTRL BIT(0) +#define AS3722_LDO1_CTRL BIT(1) +#define AS3722_LDO2_CTRL BIT(2) +#define AS3722_LDO3_CTRL BIT(3) +#define AS3722_LDO4_CTRL BIT(4) +#define AS3722_LDO5_CTRL BIT(5) +#define AS3722_LDO6_CTRL BIT(6) +#define AS3722_LDO7_CTRL BIT(7) +#define AS3722_LDO9_CTRL BIT(1) +#define AS3722_LDO10_CTRL BIT(2) +#define AS3722_LDO11_CTRL BIT(3) + +#define AS3722_LDO3_MODE_MASK (3 << 6) +#define AS3722_LDO3_MODE_VAL(n) (((n) & 0x3) << 6) +#define AS3722_LDO3_MODE_PMOS AS3722_LDO3_MODE_VAL(0) +#define AS3722_LDO3_MODE_PMOS_TRACKING AS3722_LDO3_MODE_VAL(1) +#define AS3722_LDO3_MODE_NMOS AS3722_LDO3_MODE_VAL(2) +#define AS3722_LDO3_MODE_SWITCH AS3722_LDO3_MODE_VAL(3) + +#define AS3722_SD_VSEL_MASK 0x7F +#define AS3722_SD0_VSEL_MIN 0x01 +#define AS3722_SD0_VSEL_MAX 0x5A +#define AS3722_SD0_VSEL_LOW_VOL_MAX 0x6E +#define AS3722_SD2_VSEL_MIN 0x01 +#define AS3722_SD2_VSEL_MAX 0x7F + +#define AS3722_SDn_CTRL(n) BIT(n) + +#define AS3722_SD0_MODE_FAST BIT(4) +#define AS3722_SD1_MODE_FAST BIT(4) +#define AS3722_SD2_MODE_FAST BIT(2) +#define AS3722_SD3_MODE_FAST BIT(6) +#define AS3722_SD4_MODE_FAST BIT(2) +#define AS3722_SD5_MODE_FAST BIT(2) +#define AS3722_SD6_MODE_FAST BIT(4) + +#define AS3722_POWER_OFF BIT(1) + +#define AS3722_INTERRUPT_MASK1_LID BIT(0) +#define AS3722_INTERRUPT_MASK1_ACOK BIT(1) +#define AS3722_INTERRUPT_MASK1_ENABLE1 BIT(2) +#define AS3722_INTERRUPT_MASK1_OCURR_ALARM_SD0 BIT(3) +#define AS3722_INTERRUPT_MASK1_ONKEY_LONG BIT(4) +#define AS3722_INTERRUPT_MASK1_ONKEY BIT(5) +#define AS3722_INTERRUPT_MASK1_OVTMP BIT(6) +#define AS3722_INTERRUPT_MASK1_LOWBAT BIT(7) + +#define AS3722_INTERRUPT_MASK2_SD0_LV BIT(0) +#define AS3722_INTERRUPT_MASK2_SD1_LV BIT(1) +#define AS3722_INTERRUPT_MASK2_SD2345_LV BIT(2) +#define AS3722_INTERRUPT_MASK2_PWM1_OV_PROT BIT(3) +#define AS3722_INTERRUPT_MASK2_PWM2_OV_PROT BIT(4) +#define AS3722_INTERRUPT_MASK2_ENABLE2 BIT(5) +#define AS3722_INTERRUPT_MASK2_SD6_LV BIT(6) +#define AS3722_INTERRUPT_MASK2_RTC_REP BIT(7) + +#define AS3722_INTERRUPT_MASK3_RTC_ALARM BIT(0) +#define AS3722_INTERRUPT_MASK3_GPIO1 BIT(1) +#define AS3722_INTERRUPT_MASK3_GPIO2 BIT(2) +#define AS3722_INTERRUPT_MASK3_GPIO3 BIT(3) +#define AS3722_INTERRUPT_MASK3_GPIO4 BIT(4) +#define AS3722_INTERRUPT_MASK3_GPIO5 BIT(5) +#define AS3722_INTERRUPT_MASK3_WATCHDOG BIT(6) +#define AS3722_INTERRUPT_MASK3_ENABLE3 BIT(7) + +#define AS3722_INTERRUPT_MASK4_TEMP_SD0_SHUTDOWN BIT(0) +#define AS3722_INTERRUPT_MASK4_TEMP_SD1_SHUTDOWN BIT(1) +#define AS3722_INTERRUPT_MASK4_TEMP_SD6_SHUTDOWN BIT(2) +#define AS3722_INTERRUPT_MASK4_TEMP_SD0_ALARM BIT(3) +#define AS3722_INTERRUPT_MASK4_TEMP_SD1_ALARM BIT(4) +#define AS3722_INTERRUPT_MASK4_TEMP_SD6_ALARM BIT(5) +#define AS3722_INTERRUPT_MASK4_OCCUR_ALARM_SD6 BIT(6) +#define AS3722_INTERRUPT_MASK4_ADC BIT(7) + +#define AS3722_ADC1_INTERVAL_TIME BIT(0) +#define AS3722_ADC1_INT_MODE_ON BIT(1) +#define AS3722_ADC_BUF_ON BIT(2) +#define AS3722_ADC1_LOW_VOLTAGE_RANGE BIT(5) +#define AS3722_ADC1_INTEVAL_SCAN BIT(6) +#define AS3722_ADC1_INT_MASK BIT(7) + +#define AS3722_ADC_MSB_VAL_MASK 0x7F +#define AS3722_ADC_LSB_VAL_MASK 0x07 + +#define AS3722_ADC0_CONV_START BIT(7) +#define AS3722_ADC0_CONV_NOTREADY BIT(7) +#define AS3722_ADC0_SOURCE_SELECT_MASK 0x1F + +#define AS3722_ADC1_CONV_START BIT(7) +#define AS3722_ADC1_CONV_NOTREADY BIT(7) +#define AS3722_ADC1_SOURCE_SELECT_MASK 0x1F + +#define AS3722_CTRL_SEQU1_AC_OK_PWR_ON BIT(0) + +/* GPIO modes */ +#define AS3722_GPIO_MODE_MASK 0x07 +#define AS3722_GPIO_MODE_INPUT 0x00 +#define AS3722_GPIO_MODE_OUTPUT_VDDH 0x01 +#define AS3722_GPIO_MODE_IO_OPEN_DRAIN 0x02 +#define AS3722_GPIO_MODE_ADC_IN 0x03 +#define AS3722_GPIO_MODE_INPUT_PULL_UP 0x04 +#define AS3722_GPIO_MODE_INPUT_PULL_DOWN 0x05 +#define AS3722_GPIO_MODE_IO_OPEN_DRAIN_PULL_UP 0x06 +#define AS3722_GPIO_MODE_OUTPUT_VDDL 0x07 +#define AS3722_GPIO_MODE_VAL(n) ((n) & AS3722_GPIO_MODE_MASK) + +#define AS3722_GPIO_INV BIT(7) +#define AS3722_GPIO_IOSF_MASK 0x78 +#define AS3722_GPIO_IOSF_VAL(n) (((n) & 0xF) << 3) +#define AS3722_GPIO_IOSF_NORMAL AS3722_GPIO_IOSF_VAL(0) +#define AS3722_GPIO_IOSF_INTERRUPT_OUT AS3722_GPIO_IOSF_VAL(1) +#define AS3722_GPIO_IOSF_VSUP_LOW_OUT AS3722_GPIO_IOSF_VAL(2) +#define AS3722_GPIO_IOSF_GPIO_INTERRUPT_IN AS3722_GPIO_IOSF_VAL(3) +#define AS3722_GPIO_IOSF_ISINK_PWM_IN AS3722_GPIO_IOSF_VAL(4) +#define AS3722_GPIO_IOSF_VOLTAGE_STBY AS3722_GPIO_IOSF_VAL(5) +#define AS3722_GPIO_IOSF_SD0_OUT AS3722_GPIO_IOSF_VAL(6) +#define AS3722_GPIO_IOSF_PWR_GOOD_OUT AS3722_GPIO_IOSF_VAL(7) +#define AS3722_GPIO_IOSF_Q32K_OUT AS3722_GPIO_IOSF_VAL(8) +#define AS3722_GPIO_IOSF_WATCHDOG_IN AS3722_GPIO_IOSF_VAL(9) +#define AS3722_GPIO_IOSF_SOFT_RESET_IN AS3722_GPIO_IOSF_VAL(11) +#define AS3722_GPIO_IOSF_PWM_OUT AS3722_GPIO_IOSF_VAL(12) +#define AS3722_GPIO_IOSF_VSUP_LOW_DEB_OUT AS3722_GPIO_IOSF_VAL(13) +#define AS3722_GPIO_IOSF_SD6_LOW_VOLT_LOW AS3722_GPIO_IOSF_VAL(14) + +#define AS3722_GPIOn_SIGNAL(n) BIT(n) +#define AS3722_GPIOn_CONTROL_REG(n) (AS3722_GPIO0_CONTROL_REG + n) +#define AS3722_I2C_PULL_UP BIT(4) +#define AS3722_INT_PULL_UP BIT(5) + +#define AS3722_RTC_REP_WAKEUP_EN BIT(0) +#define AS3722_RTC_ALARM_WAKEUP_EN BIT(1) +#define AS3722_RTC_ON BIT(2) +#define AS3722_RTC_IRQMODE BIT(3) +#define AS3722_RTC_CLK32K_OUT_EN BIT(5) + +#define AS3722_WATCHDOG_TIMER_MAX 0x7F +#define AS3722_WATCHDOG_ON BIT(0) +#define AS3722_WATCHDOG_SW_SIG BIT(0) + +#define AS3722_EXT_CONTROL_ENABLE1 0x1 +#define AS3722_EXT_CONTROL_ENABLE2 0x2 +#define AS3722_EXT_CONTROL_ENABLE3 0x3 + +#define AS3722_FUSE7_SD0_LOW_VOLTAGE BIT(4) + +/* Interrupt IDs */ +enum as3722_irq { + AS3722_IRQ_LID, + AS3722_IRQ_ACOK, + AS3722_IRQ_ENABLE1, + AS3722_IRQ_OCCUR_ALARM_SD0, + AS3722_IRQ_ONKEY_LONG_PRESS, + AS3722_IRQ_ONKEY, + AS3722_IRQ_OVTMP, + AS3722_IRQ_LOWBAT, + AS3722_IRQ_SD0_LV, + AS3722_IRQ_SD1_LV, + AS3722_IRQ_SD2_LV, + AS3722_IRQ_PWM1_OV_PROT, + AS3722_IRQ_PWM2_OV_PROT, + AS3722_IRQ_ENABLE2, + AS3722_IRQ_SD6_LV, + AS3722_IRQ_RTC_REP, + AS3722_IRQ_RTC_ALARM, + AS3722_IRQ_GPIO1, + AS3722_IRQ_GPIO2, + AS3722_IRQ_GPIO3, + AS3722_IRQ_GPIO4, + AS3722_IRQ_GPIO5, + AS3722_IRQ_WATCHDOG, + AS3722_IRQ_ENABLE3, + AS3722_IRQ_TEMP_SD0_SHUTDOWN, + AS3722_IRQ_TEMP_SD1_SHUTDOWN, + AS3722_IRQ_TEMP_SD2_SHUTDOWN, + AS3722_IRQ_TEMP_SD0_ALARM, + AS3722_IRQ_TEMP_SD1_ALARM, + AS3722_IRQ_TEMP_SD6_ALARM, + AS3722_IRQ_OCCUR_ALARM_SD6, + AS3722_IRQ_ADC, + AS3722_IRQ_MAX, +}; + +struct as3722 { + struct device *dev; + struct regmap *regmap; + int chip_irq; + unsigned long irq_flags; + bool en_intern_int_pullup; + bool en_intern_i2c_pullup; + bool en_ac_ok_pwr_on; + struct regmap_irq_chip_data *irq_data; +}; + +static inline int as3722_read(struct as3722 *as3722, u32 reg, u32 *dest) +{ + return regmap_read(as3722->regmap, reg, dest); +} + +static inline int as3722_write(struct as3722 *as3722, u32 reg, u32 value) +{ + return regmap_write(as3722->regmap, reg, value); +} + +static inline int as3722_block_read(struct as3722 *as3722, u32 reg, + int count, u8 *buf) +{ + return regmap_bulk_read(as3722->regmap, reg, buf, count); +} + +static inline int as3722_block_write(struct as3722 *as3722, u32 reg, + int count, u8 *data) +{ + return regmap_bulk_write(as3722->regmap, reg, data, count); +} + +static inline int as3722_update_bits(struct as3722 *as3722, u32 reg, + u32 mask, u8 val) +{ + return regmap_update_bits(as3722->regmap, reg, mask, val); +} + +static inline int as3722_irq_get_virq(struct as3722 *as3722, int irq) +{ + return regmap_irq_get_virq(as3722->irq_data, irq); +} +#endif /* __LINUX_MFD_AS3722_H__ */ diff --git a/include/linux/mfd/asic3.h b/include/linux/mfd/asic3.h new file mode 100644 index 000000000..e1148d037 --- /dev/null +++ b/include/linux/mfd/asic3.h @@ -0,0 +1,316 @@ +/* + * include/linux/mfd/asic3.h + * + * Compaq ASIC3 headers. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Copyright 2001 Compaq Computer Corporation. + * Copyright 2007-2008 OpenedHand Ltd. + */ + +#ifndef __ASIC3_H__ +#define __ASIC3_H__ + +#include + +struct led_classdev; +struct asic3_led { + const char *name; + const char *default_trigger; + struct led_classdev *cdev; +}; + +struct asic3_platform_data { + u16 *gpio_config; + unsigned int gpio_config_num; + + unsigned int irq_base; + + unsigned int gpio_base; + + unsigned int clock_rate; + + struct asic3_led *leds; +}; + +#define ASIC3_NUM_GPIO_BANKS 4 +#define ASIC3_GPIOS_PER_BANK 16 +#define ASIC3_NUM_GPIOS 64 +#define ASIC3_NR_IRQS ASIC3_NUM_GPIOS + 6 + +#define ASIC3_IRQ_LED0 64 +#define ASIC3_IRQ_LED1 65 +#define ASIC3_IRQ_LED2 66 +#define ASIC3_IRQ_SPI 67 +#define ASIC3_IRQ_SMBUS 68 +#define ASIC3_IRQ_OWM 69 + +#define ASIC3_TO_GPIO(gpio) (NR_BUILTIN_GPIO + (gpio)) + +#define ASIC3_GPIO_BANK_A 0 +#define ASIC3_GPIO_BANK_B 1 +#define ASIC3_GPIO_BANK_C 2 +#define ASIC3_GPIO_BANK_D 3 + +#define ASIC3_GPIO(bank, gpio) \ + ((ASIC3_GPIOS_PER_BANK * ASIC3_GPIO_BANK_##bank) + (gpio)) +#define ASIC3_GPIO_bit(gpio) (1 << (gpio & 0xf)) +/* All offsets below are specified with this address bus shift */ +#define ASIC3_DEFAULT_ADDR_SHIFT 2 + +#define ASIC3_OFFSET(base, reg) (ASIC3_##base##_BASE + ASIC3_##base##_##reg) +#define ASIC3_GPIO_OFFSET(base, reg) \ + (ASIC3_GPIO_##base##_BASE + ASIC3_GPIO_##reg) + +#define ASIC3_GPIO_A_BASE 0x0000 +#define ASIC3_GPIO_B_BASE 0x0100 +#define ASIC3_GPIO_C_BASE 0x0200 +#define ASIC3_GPIO_D_BASE 0x0300 + +#define ASIC3_GPIO_TO_BANK(gpio) ((gpio) >> 4) +#define ASIC3_GPIO_TO_BIT(gpio) ((gpio) - \ + (ASIC3_GPIOS_PER_BANK * ((gpio) >> 4))) +#define ASIC3_GPIO_TO_MASK(gpio) (1 << ASIC3_GPIO_TO_BIT(gpio)) +#define ASIC3_GPIO_TO_BASE(gpio) (ASIC3_GPIO_A_BASE + (((gpio) >> 4) * 0x0100)) +#define ASIC3_BANK_TO_BASE(bank) (ASIC3_GPIO_A_BASE + ((bank) * 0x100)) + +#define ASIC3_GPIO_MASK 0x00 /* R/W 0:don't mask */ +#define ASIC3_GPIO_DIRECTION 0x04 /* R/W 0:input */ +#define ASIC3_GPIO_OUT 0x08 /* R/W 0:output low */ +#define ASIC3_GPIO_TRIGGER_TYPE 0x0c /* R/W 0:level */ +#define ASIC3_GPIO_EDGE_TRIGGER 0x10 /* R/W 0:falling */ +#define ASIC3_GPIO_LEVEL_TRIGGER 0x14 /* R/W 0:low level detect */ +#define ASIC3_GPIO_SLEEP_MASK 0x18 /* R/W 0:don't mask in sleep mode */ +#define ASIC3_GPIO_SLEEP_OUT 0x1c /* R/W level 0:low in sleep mode */ +#define ASIC3_GPIO_BAT_FAULT_OUT 0x20 /* R/W level 0:low in batt_fault */ +#define ASIC3_GPIO_INT_STATUS 0x24 /* R/W 0:none, 1:detect */ +#define ASIC3_GPIO_ALT_FUNCTION 0x28 /* R/W 1:LED register control */ +#define ASIC3_GPIO_SLEEP_CONF 0x2c /* + * R/W bit 1: autosleep + * 0: disable gposlpout in normal mode, + * enable gposlpout in sleep mode. + */ +#define ASIC3_GPIO_STATUS 0x30 /* R Pin status */ + +/* + * ASIC3 GPIO config + * + * Bits 0..6 gpio number + * Bits 7..13 Alternate function + * Bit 14 Direction + * Bit 15 Initial value + * + */ +#define ASIC3_CONFIG_GPIO_PIN(config) ((config) & 0x7f) +#define ASIC3_CONFIG_GPIO_ALT(config) (((config) & (0x7f << 7)) >> 7) +#define ASIC3_CONFIG_GPIO_DIR(config) ((config & (1 << 14)) >> 14) +#define ASIC3_CONFIG_GPIO_INIT(config) ((config & (1 << 15)) >> 15) +#define ASIC3_CONFIG_GPIO(gpio, alt, dir, init) (((gpio) & 0x7f) \ + | (((alt) & 0x7f) << 7) | (((dir) & 0x1) << 14) \ + | (((init) & 0x1) << 15)) +#define ASIC3_CONFIG_GPIO_DEFAULT(gpio, dir, init) \ + ASIC3_CONFIG_GPIO((gpio), 0, (dir), (init)) +#define ASIC3_CONFIG_GPIO_DEFAULT_OUT(gpio, init) \ + ASIC3_CONFIG_GPIO((gpio), 0, 1, (init)) + +/* + * Alternate functions + */ +#define ASIC3_GPIOA11_PWM0 ASIC3_CONFIG_GPIO(11, 1, 1, 0) +#define ASIC3_GPIOA12_PWM1 ASIC3_CONFIG_GPIO(12, 1, 1, 0) +#define ASIC3_GPIOA15_CONTROL_CX ASIC3_CONFIG_GPIO(15, 1, 1, 0) +#define ASIC3_GPIOC0_LED0 ASIC3_CONFIG_GPIO(32, 1, 0, 0) +#define ASIC3_GPIOC1_LED1 ASIC3_CONFIG_GPIO(33, 1, 0, 0) +#define ASIC3_GPIOC2_LED2 ASIC3_CONFIG_GPIO(34, 1, 0, 0) +#define ASIC3_GPIOC3_SPI_RXD ASIC3_CONFIG_GPIO(35, 1, 0, 0) +#define ASIC3_GPIOC4_CF_nCD ASIC3_CONFIG_GPIO(36, 1, 0, 0) +#define ASIC3_GPIOC4_SPI_TXD ASIC3_CONFIG_GPIO(36, 1, 1, 0) +#define ASIC3_GPIOC5_SPI_CLK ASIC3_CONFIG_GPIO(37, 1, 1, 0) +#define ASIC3_GPIOC5_nCIOW ASIC3_CONFIG_GPIO(37, 1, 1, 0) +#define ASIC3_GPIOC6_nCIOR ASIC3_CONFIG_GPIO(38, 1, 1, 0) +#define ASIC3_GPIOC7_nPCE_1 ASIC3_CONFIG_GPIO(39, 1, 0, 0) +#define ASIC3_GPIOC8_nPCE_2 ASIC3_CONFIG_GPIO(40, 1, 0, 0) +#define ASIC3_GPIOC9_nPOE ASIC3_CONFIG_GPIO(41, 1, 0, 0) +#define ASIC3_GPIOC10_nPWE ASIC3_CONFIG_GPIO(42, 1, 0, 0) +#define ASIC3_GPIOC11_PSKTSEL ASIC3_CONFIG_GPIO(43, 1, 0, 0) +#define ASIC3_GPIOC12_nPREG ASIC3_CONFIG_GPIO(44, 1, 0, 0) +#define ASIC3_GPIOC13_nPWAIT ASIC3_CONFIG_GPIO(45, 1, 1, 0) +#define ASIC3_GPIOC14_nPIOIS16 ASIC3_CONFIG_GPIO(46, 1, 1, 0) +#define ASIC3_GPIOC15_nPIOR ASIC3_CONFIG_GPIO(47, 1, 0, 0) +#define ASIC3_GPIOD4_CF_nCD ASIC3_CONFIG_GPIO(52, 1, 0, 0) +#define ASIC3_GPIOD11_nCIOIS16 ASIC3_CONFIG_GPIO(59, 1, 0, 0) +#define ASIC3_GPIOD12_nCWAIT ASIC3_CONFIG_GPIO(60, 1, 0, 0) +#define ASIC3_GPIOD15_nPIOW ASIC3_CONFIG_GPIO(63, 1, 0, 0) + + +#define ASIC3_SPI_Base 0x0400 +#define ASIC3_SPI_Control 0x0000 +#define ASIC3_SPI_TxData 0x0004 +#define ASIC3_SPI_RxData 0x0008 +#define ASIC3_SPI_Int 0x000c +#define ASIC3_SPI_Status 0x0010 + +#define SPI_CONTROL_SPR(clk) ((clk) & 0x0f) /* Clock rate */ + +#define ASIC3_PWM_0_Base 0x0500 +#define ASIC3_PWM_1_Base 0x0600 +#define ASIC3_PWM_TimeBase 0x0000 +#define ASIC3_PWM_PeriodTime 0x0004 +#define ASIC3_PWM_DutyTime 0x0008 + +#define PWM_TIMEBASE_VALUE(x) ((x)&0xf) /* Low 4 bits sets time base */ +#define PWM_TIMEBASE_ENABLE (1 << 4) /* Enable clock */ + +#define ASIC3_NUM_LEDS 3 +#define ASIC3_LED_0_Base 0x0700 +#define ASIC3_LED_1_Base 0x0800 +#define ASIC3_LED_2_Base 0x0900 +#define ASIC3_LED_TimeBase 0x0000 /* R/W 7 bits */ +#define ASIC3_LED_PeriodTime 0x0004 /* R/W 12 bits */ +#define ASIC3_LED_DutyTime 0x0008 /* R/W 12 bits */ +#define ASIC3_LED_AutoStopCount 0x000c /* R/W 16 bits */ + +/* LED TimeBase bits - match ASIC2 */ +#define LED_TBS 0x0f /* Low 4 bits sets time base, max = 13 */ + /* Note: max = 5 on hx4700 */ + /* 0: maximum time base */ + /* 1: maximum time base / 2 */ + /* n: maximum time base / 2^n */ + +#define LED_EN (1 << 4) /* LED ON/OFF 0:off, 1:on */ +#define LED_AUTOSTOP (1 << 5) /* LED ON/OFF auto stop 0:disable, 1:enable */ +#define LED_ALWAYS (1 << 6) /* LED Interrupt Mask 0:No mask, 1:mask */ + +#define ASIC3_CLOCK_BASE 0x0A00 +#define ASIC3_CLOCK_CDEX 0x00 +#define ASIC3_CLOCK_SEL 0x04 + +#define CLOCK_CDEX_SOURCE (1 << 0) /* 2 bits */ +#define CLOCK_CDEX_SOURCE0 (1 << 0) +#define CLOCK_CDEX_SOURCE1 (1 << 1) +#define CLOCK_CDEX_SPI (1 << 2) +#define CLOCK_CDEX_OWM (1 << 3) +#define CLOCK_CDEX_PWM0 (1 << 4) +#define CLOCK_CDEX_PWM1 (1 << 5) +#define CLOCK_CDEX_LED0 (1 << 6) +#define CLOCK_CDEX_LED1 (1 << 7) +#define CLOCK_CDEX_LED2 (1 << 8) + +/* Clocks settings: 1 for 24.576 MHz, 0 for 12.288Mhz */ +#define CLOCK_CDEX_SD_HOST (1 << 9) /* R/W: SD host clock source */ +#define CLOCK_CDEX_SD_BUS (1 << 10) /* R/W: SD bus clock source ctrl */ +#define CLOCK_CDEX_SMBUS (1 << 11) +#define CLOCK_CDEX_CONTROL_CX (1 << 12) + +#define CLOCK_CDEX_EX0 (1 << 13) /* R/W: 32.768 kHz crystal */ +#define CLOCK_CDEX_EX1 (1 << 14) /* R/W: 24.576 MHz crystal */ + +#define CLOCK_SEL_SD_HCLK_SEL (1 << 0) /* R/W: SDIO host clock select */ +#define CLOCK_SEL_SD_BCLK_SEL (1 << 1) /* R/W: SDIO bus clock select */ + +/* R/W: INT clock source control (32.768 kHz) */ +#define CLOCK_SEL_CX (1 << 2) + + +#define ASIC3_INTR_BASE 0x0B00 + +#define ASIC3_INTR_INT_MASK 0x00 /* Interrupt mask control */ +#define ASIC3_INTR_P_INT_STAT 0x04 /* Peripheral interrupt status */ +#define ASIC3_INTR_INT_CPS 0x08 /* Interrupt timer clock pre-scale */ +#define ASIC3_INTR_INT_TBS 0x0c /* Interrupt timer set */ + +#define ASIC3_INTMASK_GINTMASK (1 << 0) /* Global INTs mask 1:enable */ +#define ASIC3_INTMASK_GINTEL (1 << 1) /* 1: rising edge, 0: hi level */ +#define ASIC3_INTMASK_MASK0 (1 << 2) +#define ASIC3_INTMASK_MASK1 (1 << 3) +#define ASIC3_INTMASK_MASK2 (1 << 4) +#define ASIC3_INTMASK_MASK3 (1 << 5) +#define ASIC3_INTMASK_MASK4 (1 << 6) +#define ASIC3_INTMASK_MASK5 (1 << 7) + +#define ASIC3_INTR_PERIPHERAL_A (1 << 0) +#define ASIC3_INTR_PERIPHERAL_B (1 << 1) +#define ASIC3_INTR_PERIPHERAL_C (1 << 2) +#define ASIC3_INTR_PERIPHERAL_D (1 << 3) +#define ASIC3_INTR_LED0 (1 << 4) +#define ASIC3_INTR_LED1 (1 << 5) +#define ASIC3_INTR_LED2 (1 << 6) +#define ASIC3_INTR_SPI (1 << 7) +#define ASIC3_INTR_SMBUS (1 << 8) +#define ASIC3_INTR_OWM (1 << 9) + +#define ASIC3_INTR_CPS(x) ((x)&0x0f) /* 4 bits, max 14 */ +#define ASIC3_INTR_CPS_SET (1 << 4) /* Time base enable */ + + +/* Basic control of the SD ASIC */ +#define ASIC3_SDHWCTRL_BASE 0x0E00 +#define ASIC3_SDHWCTRL_SDCONF 0x00 + +#define ASIC3_SDHWCTRL_SUSPEND (1 << 0) /* 1=suspend all SD operations */ +#define ASIC3_SDHWCTRL_CLKSEL (1 << 1) /* 1=SDICK, 0=HCLK */ +#define ASIC3_SDHWCTRL_PCLR (1 << 2) /* All registers of SDIO cleared */ +#define ASIC3_SDHWCTRL_LEVCD (1 << 3) /* SD card detection: 0:low */ + +/* SD card write protection: 0=high */ +#define ASIC3_SDHWCTRL_LEVWP (1 << 4) +#define ASIC3_SDHWCTRL_SDLED (1 << 5) /* SD card LED signal 0=disable */ + +/* SD card power supply ctrl 1=enable */ +#define ASIC3_SDHWCTRL_SDPWR (1 << 6) + +#define ASIC3_EXTCF_BASE 0x1100 + +#define ASIC3_EXTCF_SELECT 0x00 +#define ASIC3_EXTCF_RESET 0x04 + +#define ASIC3_EXTCF_SMOD0 (1 << 0) /* slot number of mode 0 */ +#define ASIC3_EXTCF_SMOD1 (1 << 1) /* slot number of mode 1 */ +#define ASIC3_EXTCF_SMOD2 (1 << 2) /* slot number of mode 2 */ +#define ASIC3_EXTCF_OWM_EN (1 << 4) /* enable onewire module */ +#define ASIC3_EXTCF_OWM_SMB (1 << 5) /* OWM bus selection */ +#define ASIC3_EXTCF_OWM_RESET (1 << 6) /* ?? used by OWM and CF */ +#define ASIC3_EXTCF_CF0_SLEEP_MODE (1 << 7) /* CF0 sleep state */ +#define ASIC3_EXTCF_CF1_SLEEP_MODE (1 << 8) /* CF1 sleep state */ +#define ASIC3_EXTCF_CF0_PWAIT_EN (1 << 10) /* CF0 PWAIT_n control */ +#define ASIC3_EXTCF_CF1_PWAIT_EN (1 << 11) /* CF1 PWAIT_n control */ +#define ASIC3_EXTCF_CF0_BUF_EN (1 << 12) /* CF0 buffer control */ +#define ASIC3_EXTCF_CF1_BUF_EN (1 << 13) /* CF1 buffer control */ +#define ASIC3_EXTCF_SD_MEM_ENABLE (1 << 14) +#define ASIC3_EXTCF_CF_SLEEP (1 << 15) /* CF sleep mode control */ + +/********************************************* + * The Onewire interface (DS1WM) is handled + * by the ds1wm driver. + * + *********************************************/ + +#define ASIC3_OWM_BASE 0xC00 + +/***************************************************************************** + * The SD configuration registers are at a completely different location + * in memory. They are divided into three sets of registers: + * + * SD_CONFIG Core configuration register + * SD_CTRL Control registers for SD operations + * SDIO_CTRL Control registers for SDIO operations + * + *****************************************************************************/ +#define ASIC3_SD_CONFIG_BASE 0x0400 /* Assumes 32 bit addressing */ +#define ASIC3_SD_CONFIG_SIZE 0x0200 /* Assumes 32 bit addressing */ +#define ASIC3_SD_CTRL_BASE 0x1000 +#define ASIC3_SDIO_CTRL_BASE 0x1200 + +#define ASIC3_MAP_SIZE_32BIT 0x2000 +#define ASIC3_MAP_SIZE_16BIT 0x1000 + +/* Functions needed by leds-asic3 */ + +struct asic3; +extern void asic3_write_register(struct asic3 *asic, unsigned int reg, u32 val); +extern u32 asic3_read_register(struct asic3 *asic, unsigned int reg); + +#endif /* __ASIC3_H__ */ diff --git a/include/linux/mfd/atmel-hlcdc.h b/include/linux/mfd/atmel-hlcdc.h new file mode 100644 index 000000000..1279ab164 --- /dev/null +++ b/include/linux/mfd/atmel-hlcdc.h @@ -0,0 +1,85 @@ +/* + * Copyright (C) 2014 Free Electrons + * Copyright (C) 2014 Atmel + * + * Author: Boris BREZILLON + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __LINUX_MFD_HLCDC_H +#define __LINUX_MFD_HLCDC_H + +#include +#include + +#define ATMEL_HLCDC_CFG(i) ((i) * 0x4) +#define ATMEL_HLCDC_SIG_CFG LCDCFG(5) +#define ATMEL_HLCDC_HSPOL BIT(0) +#define ATMEL_HLCDC_VSPOL BIT(1) +#define ATMEL_HLCDC_VSPDLYS BIT(2) +#define ATMEL_HLCDC_VSPDLYE BIT(3) +#define ATMEL_HLCDC_DISPPOL BIT(4) +#define ATMEL_HLCDC_DITHER BIT(6) +#define ATMEL_HLCDC_DISPDLY BIT(7) +#define ATMEL_HLCDC_MODE_MASK GENMASK(9, 8) +#define ATMEL_HLCDC_PP BIT(10) +#define ATMEL_HLCDC_VSPSU BIT(12) +#define ATMEL_HLCDC_VSPHO BIT(13) +#define ATMEL_HLCDC_GUARDTIME_MASK GENMASK(20, 16) + +#define ATMEL_HLCDC_EN 0x20 +#define ATMEL_HLCDC_DIS 0x24 +#define ATMEL_HLCDC_SR 0x28 +#define ATMEL_HLCDC_IER 0x2c +#define ATMEL_HLCDC_IDR 0x30 +#define ATMEL_HLCDC_IMR 0x34 +#define ATMEL_HLCDC_ISR 0x38 + +#define ATMEL_HLCDC_CLKPOL BIT(0) +#define ATMEL_HLCDC_CLKSEL BIT(2) +#define ATMEL_HLCDC_CLKPWMSEL BIT(3) +#define ATMEL_HLCDC_CGDIS(i) BIT(8 + (i)) +#define ATMEL_HLCDC_CLKDIV_SHFT 16 +#define ATMEL_HLCDC_CLKDIV_MASK GENMASK(23, 16) +#define ATMEL_HLCDC_CLKDIV(div) ((div - 2) << ATMEL_HLCDC_CLKDIV_SHFT) + +#define ATMEL_HLCDC_PIXEL_CLK BIT(0) +#define ATMEL_HLCDC_SYNC BIT(1) +#define ATMEL_HLCDC_DISP BIT(2) +#define ATMEL_HLCDC_PWM BIT(3) +#define ATMEL_HLCDC_SIP BIT(4) + +#define ATMEL_HLCDC_SOF BIT(0) +#define ATMEL_HLCDC_SYNCDIS BIT(1) +#define ATMEL_HLCDC_FIFOERR BIT(4) +#define ATMEL_HLCDC_LAYER_STATUS(x) BIT((x) + 8) + +/** + * Structure shared by the MFD device and its subdevices. + * + * @regmap: register map used to access HLCDC IP registers + * @periph_clk: the hlcdc peripheral clock + * @sys_clk: the hlcdc system clock + * @slow_clk: the system slow clk + * @irq: the hlcdc irq + */ +struct atmel_hlcdc { + struct regmap *regmap; + struct clk *periph_clk; + struct clk *sys_clk; + struct clk *slow_clk; + int irq; +}; + +#endif /* __LINUX_MFD_HLCDC_H */ diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h new file mode 100644 index 000000000..517e60eec --- /dev/null +++ b/include/linux/mfd/axp20x.h @@ -0,0 +1,703 @@ +/* + * Functions and registers to access AXP20X power management chip. + * + * Copyright (C) 2013, Carlo Caione + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_MFD_AXP20X_H +#define __LINUX_MFD_AXP20X_H + +#include + +enum axp20x_variants { + AXP152_ID = 0, + AXP202_ID, + AXP209_ID, + AXP221_ID, + AXP223_ID, + AXP288_ID, + AXP803_ID, + AXP806_ID, + AXP809_ID, + AXP813_ID, + NR_AXP20X_VARIANTS, +}; + +#define AXP20X_DATACACHE(m) (0x04 + (m)) + +/* Power supply */ +#define AXP152_PWR_OP_MODE 0x01 +#define AXP152_LDO3456_DC1234_CTRL 0x12 +#define AXP152_ALDO_OP_MODE 0x13 +#define AXP152_LDO0_CTRL 0x15 +#define AXP152_DCDC2_V_OUT 0x23 +#define AXP152_DCDC2_V_SCAL 0x25 +#define AXP152_DCDC1_V_OUT 0x26 +#define AXP152_DCDC3_V_OUT 0x27 +#define AXP152_ALDO12_V_OUT 0x28 +#define AXP152_DLDO1_V_OUT 0x29 +#define AXP152_DLDO2_V_OUT 0x2a +#define AXP152_DCDC4_V_OUT 0x2b +#define AXP152_V_OFF 0x31 +#define AXP152_OFF_CTRL 0x32 +#define AXP152_PEK_KEY 0x36 +#define AXP152_DCDC_FREQ 0x37 +#define AXP152_DCDC_MODE 0x80 + +#define AXP20X_PWR_INPUT_STATUS 0x00 +#define AXP20X_PWR_OP_MODE 0x01 +#define AXP20X_USB_OTG_STATUS 0x02 +#define AXP20X_PWR_OUT_CTRL 0x12 +#define AXP20X_DCDC2_V_OUT 0x23 +#define AXP20X_DCDC2_LDO3_V_SCAL 0x25 +#define AXP20X_DCDC3_V_OUT 0x27 +#define AXP20X_LDO24_V_OUT 0x28 +#define AXP20X_LDO3_V_OUT 0x29 +#define AXP20X_VBUS_IPSOUT_MGMT 0x30 +#define AXP20X_V_OFF 0x31 +#define AXP20X_OFF_CTRL 0x32 +#define AXP20X_CHRG_CTRL1 0x33 +#define AXP20X_CHRG_CTRL2 0x34 +#define AXP20X_CHRG_BAK_CTRL 0x35 +#define AXP20X_PEK_KEY 0x36 +#define AXP20X_DCDC_FREQ 0x37 +#define AXP20X_V_LTF_CHRG 0x38 +#define AXP20X_V_HTF_CHRG 0x39 +#define AXP20X_APS_WARN_L1 0x3a +#define AXP20X_APS_WARN_L2 0x3b +#define AXP20X_V_LTF_DISCHRG 0x3c +#define AXP20X_V_HTF_DISCHRG 0x3d + +#define AXP22X_PWR_OUT_CTRL1 0x10 +#define AXP22X_PWR_OUT_CTRL2 0x12 +#define AXP22X_PWR_OUT_CTRL3 0x13 +#define AXP22X_DLDO1_V_OUT 0x15 +#define AXP22X_DLDO2_V_OUT 0x16 +#define AXP22X_DLDO3_V_OUT 0x17 +#define AXP22X_DLDO4_V_OUT 0x18 +#define AXP22X_ELDO1_V_OUT 0x19 +#define AXP22X_ELDO2_V_OUT 0x1a +#define AXP22X_ELDO3_V_OUT 0x1b +#define AXP22X_DC5LDO_V_OUT 0x1c +#define AXP22X_DCDC1_V_OUT 0x21 +#define AXP22X_DCDC2_V_OUT 0x22 +#define AXP22X_DCDC3_V_OUT 0x23 +#define AXP22X_DCDC4_V_OUT 0x24 +#define AXP22X_DCDC5_V_OUT 0x25 +#define AXP22X_DCDC23_V_RAMP_CTRL 0x27 +#define AXP22X_ALDO1_V_OUT 0x28 +#define AXP22X_ALDO2_V_OUT 0x29 +#define AXP22X_ALDO3_V_OUT 0x2a +#define AXP22X_CHRG_CTRL3 0x35 + +#define AXP806_STARTUP_SRC 0x00 +#define AXP806_CHIP_ID 0x03 +#define AXP806_PWR_OUT_CTRL1 0x10 +#define AXP806_PWR_OUT_CTRL2 0x11 +#define AXP806_DCDCA_V_CTRL 0x12 +#define AXP806_DCDCB_V_CTRL 0x13 +#define AXP806_DCDCC_V_CTRL 0x14 +#define AXP806_DCDCD_V_CTRL 0x15 +#define AXP806_DCDCE_V_CTRL 0x16 +#define AXP806_ALDO1_V_CTRL 0x17 +#define AXP806_ALDO2_V_CTRL 0x18 +#define AXP806_ALDO3_V_CTRL 0x19 +#define AXP806_DCDC_MODE_CTRL1 0x1a +#define AXP806_DCDC_MODE_CTRL2 0x1b +#define AXP806_DCDC_FREQ_CTRL 0x1c +#define AXP806_BLDO1_V_CTRL 0x20 +#define AXP806_BLDO2_V_CTRL 0x21 +#define AXP806_BLDO3_V_CTRL 0x22 +#define AXP806_BLDO4_V_CTRL 0x23 +#define AXP806_CLDO1_V_CTRL 0x24 +#define AXP806_CLDO2_V_CTRL 0x25 +#define AXP806_CLDO3_V_CTRL 0x26 +#define AXP806_VREF_TEMP_WARN_L 0xf3 +#define AXP806_BUS_ADDR_EXT 0xfe +#define AXP806_REG_ADDR_EXT 0xff + +#define AXP803_POLYPHASE_CTRL 0x14 +#define AXP803_FLDO1_V_OUT 0x1c +#define AXP803_FLDO2_V_OUT 0x1d +#define AXP803_DCDC1_V_OUT 0x20 +#define AXP803_DCDC2_V_OUT 0x21 +#define AXP803_DCDC3_V_OUT 0x22 +#define AXP803_DCDC4_V_OUT 0x23 +#define AXP803_DCDC5_V_OUT 0x24 +#define AXP803_DCDC6_V_OUT 0x25 +#define AXP803_DCDC_FREQ_CTRL 0x3b + +/* Other DCDC regulator control registers are the same as AXP803 */ +#define AXP813_DCDC7_V_OUT 0x26 + +/* Interrupt */ +#define AXP152_IRQ1_EN 0x40 +#define AXP152_IRQ2_EN 0x41 +#define AXP152_IRQ3_EN 0x42 +#define AXP152_IRQ1_STATE 0x48 +#define AXP152_IRQ2_STATE 0x49 +#define AXP152_IRQ3_STATE 0x4a + +#define AXP20X_IRQ1_EN 0x40 +#define AXP20X_IRQ2_EN 0x41 +#define AXP20X_IRQ3_EN 0x42 +#define AXP20X_IRQ4_EN 0x43 +#define AXP20X_IRQ5_EN 0x44 +#define AXP20X_IRQ6_EN 0x45 +#define AXP20X_IRQ1_STATE 0x48 +#define AXP20X_IRQ2_STATE 0x49 +#define AXP20X_IRQ3_STATE 0x4a +#define AXP20X_IRQ4_STATE 0x4b +#define AXP20X_IRQ5_STATE 0x4c +#define AXP20X_IRQ6_STATE 0x4d + +/* ADC */ +#define AXP20X_ACIN_V_ADC_H 0x56 +#define AXP20X_ACIN_V_ADC_L 0x57 +#define AXP20X_ACIN_I_ADC_H 0x58 +#define AXP20X_ACIN_I_ADC_L 0x59 +#define AXP20X_VBUS_V_ADC_H 0x5a +#define AXP20X_VBUS_V_ADC_L 0x5b +#define AXP20X_VBUS_I_ADC_H 0x5c +#define AXP20X_VBUS_I_ADC_L 0x5d +#define AXP20X_TEMP_ADC_H 0x5e +#define AXP20X_TEMP_ADC_L 0x5f +#define AXP20X_TS_IN_H 0x62 +#define AXP20X_TS_IN_L 0x63 +#define AXP20X_GPIO0_V_ADC_H 0x64 +#define AXP20X_GPIO0_V_ADC_L 0x65 +#define AXP20X_GPIO1_V_ADC_H 0x66 +#define AXP20X_GPIO1_V_ADC_L 0x67 +#define AXP20X_PWR_BATT_H 0x70 +#define AXP20X_PWR_BATT_M 0x71 +#define AXP20X_PWR_BATT_L 0x72 +#define AXP20X_BATT_V_H 0x78 +#define AXP20X_BATT_V_L 0x79 +#define AXP20X_BATT_CHRG_I_H 0x7a +#define AXP20X_BATT_CHRG_I_L 0x7b +#define AXP20X_BATT_DISCHRG_I_H 0x7c +#define AXP20X_BATT_DISCHRG_I_L 0x7d +#define AXP20X_IPSOUT_V_HIGH_H 0x7e +#define AXP20X_IPSOUT_V_HIGH_L 0x7f + +/* Power supply */ +#define AXP20X_DCDC_MODE 0x80 +#define AXP20X_ADC_EN1 0x82 +#define AXP20X_ADC_EN2 0x83 +#define AXP20X_ADC_RATE 0x84 +#define AXP20X_GPIO10_IN_RANGE 0x85 +#define AXP20X_GPIO1_ADC_IRQ_RIS 0x86 +#define AXP20X_GPIO1_ADC_IRQ_FAL 0x87 +#define AXP20X_TIMER_CTRL 0x8a +#define AXP20X_VBUS_MON 0x8b +#define AXP20X_OVER_TMP 0x8f + +#define AXP22X_PWREN_CTRL1 0x8c +#define AXP22X_PWREN_CTRL2 0x8d + +/* GPIO */ +#define AXP152_GPIO0_CTRL 0x90 +#define AXP152_GPIO1_CTRL 0x91 +#define AXP152_GPIO2_CTRL 0x92 +#define AXP152_GPIO3_CTRL 0x93 +#define AXP152_LDOGPIO2_V_OUT 0x96 +#define AXP152_GPIO_INPUT 0x97 +#define AXP152_PWM0_FREQ_X 0x98 +#define AXP152_PWM0_FREQ_Y 0x99 +#define AXP152_PWM0_DUTY_CYCLE 0x9a +#define AXP152_PWM1_FREQ_X 0x9b +#define AXP152_PWM1_FREQ_Y 0x9c +#define AXP152_PWM1_DUTY_CYCLE 0x9d + +#define AXP20X_GPIO0_CTRL 0x90 +#define AXP20X_LDO5_V_OUT 0x91 +#define AXP20X_GPIO1_CTRL 0x92 +#define AXP20X_GPIO2_CTRL 0x93 +#define AXP20X_GPIO20_SS 0x94 +#define AXP20X_GPIO3_CTRL 0x95 + +#define AXP22X_LDO_IO0_V_OUT 0x91 +#define AXP22X_LDO_IO1_V_OUT 0x93 +#define AXP22X_GPIO_STATE 0x94 +#define AXP22X_GPIO_PULL_DOWN 0x95 + +/* Battery */ +#define AXP20X_CHRG_CC_31_24 0xb0 +#define AXP20X_CHRG_CC_23_16 0xb1 +#define AXP20X_CHRG_CC_15_8 0xb2 +#define AXP20X_CHRG_CC_7_0 0xb3 +#define AXP20X_DISCHRG_CC_31_24 0xb4 +#define AXP20X_DISCHRG_CC_23_16 0xb5 +#define AXP20X_DISCHRG_CC_15_8 0xb6 +#define AXP20X_DISCHRG_CC_7_0 0xb7 +#define AXP20X_CC_CTRL 0xb8 +#define AXP20X_FG_RES 0xb9 + +/* OCV */ +#define AXP20X_RDC_H 0xba +#define AXP20X_RDC_L 0xbb +#define AXP20X_OCV(m) (0xc0 + (m)) +#define AXP20X_OCV_MAX 0xf + +/* AXP22X specific registers */ +#define AXP22X_PMIC_TEMP_H 0x56 +#define AXP22X_PMIC_TEMP_L 0x57 +#define AXP22X_TS_ADC_H 0x58 +#define AXP22X_TS_ADC_L 0x59 +#define AXP22X_BATLOW_THRES1 0xe6 + +/* AXP288/AXP803 specific registers */ +#define AXP288_POWER_REASON 0x02 +#define AXP288_BC_GLOBAL 0x2c +#define AXP288_BC_VBUS_CNTL 0x2d +#define AXP288_BC_USB_STAT 0x2e +#define AXP288_BC_DET_STAT 0x2f +#define AXP288_PMIC_ADC_H 0x56 +#define AXP288_PMIC_ADC_L 0x57 +#define AXP288_TS_ADC_H 0x58 +#define AXP288_TS_ADC_L 0x59 +#define AXP288_GP_ADC_H 0x5a +#define AXP288_GP_ADC_L 0x5b +#define AXP288_ADC_TS_PIN_CTRL 0x84 +#define AXP288_RT_BATT_V_H 0xa0 +#define AXP288_RT_BATT_V_L 0xa1 + +#define AXP813_ADC_RATE 0x85 + +/* Fuel Gauge */ +#define AXP288_FG_RDC1_REG 0xba +#define AXP288_FG_RDC0_REG 0xbb +#define AXP288_FG_OCVH_REG 0xbc +#define AXP288_FG_OCVL_REG 0xbd +#define AXP288_FG_OCV_CURVE_REG 0xc0 +#define AXP288_FG_DES_CAP1_REG 0xe0 +#define AXP288_FG_DES_CAP0_REG 0xe1 +#define AXP288_FG_CC_MTR1_REG 0xe2 +#define AXP288_FG_CC_MTR0_REG 0xe3 +#define AXP288_FG_OCV_CAP_REG 0xe4 +#define AXP288_FG_CC_CAP_REG 0xe5 +#define AXP288_FG_LOW_CAP_REG 0xe6 +#define AXP288_FG_TUNE0 0xe8 +#define AXP288_FG_TUNE1 0xe9 +#define AXP288_FG_TUNE2 0xea +#define AXP288_FG_TUNE3 0xeb +#define AXP288_FG_TUNE4 0xec +#define AXP288_FG_TUNE5 0xed + +/* Regulators IDs */ +enum { + AXP20X_LDO1 = 0, + AXP20X_LDO2, + AXP20X_LDO3, + AXP20X_LDO4, + AXP20X_LDO5, + AXP20X_DCDC2, + AXP20X_DCDC3, + AXP20X_REG_ID_MAX, +}; + +enum { + AXP22X_DCDC1 = 0, + AXP22X_DCDC2, + AXP22X_DCDC3, + AXP22X_DCDC4, + AXP22X_DCDC5, + AXP22X_DC1SW, + AXP22X_DC5LDO, + AXP22X_ALDO1, + AXP22X_ALDO2, + AXP22X_ALDO3, + AXP22X_ELDO1, + AXP22X_ELDO2, + AXP22X_ELDO3, + AXP22X_DLDO1, + AXP22X_DLDO2, + AXP22X_DLDO3, + AXP22X_DLDO4, + AXP22X_RTC_LDO, + AXP22X_LDO_IO0, + AXP22X_LDO_IO1, + AXP22X_REG_ID_MAX, +}; + +enum { + AXP806_DCDCA = 0, + AXP806_DCDCB, + AXP806_DCDCC, + AXP806_DCDCD, + AXP806_DCDCE, + AXP806_ALDO1, + AXP806_ALDO2, + AXP806_ALDO3, + AXP806_BLDO1, + AXP806_BLDO2, + AXP806_BLDO3, + AXP806_BLDO4, + AXP806_CLDO1, + AXP806_CLDO2, + AXP806_CLDO3, + AXP806_SW, + AXP806_REG_ID_MAX, +}; + +enum { + AXP809_DCDC1 = 0, + AXP809_DCDC2, + AXP809_DCDC3, + AXP809_DCDC4, + AXP809_DCDC5, + AXP809_DC1SW, + AXP809_DC5LDO, + AXP809_ALDO1, + AXP809_ALDO2, + AXP809_ALDO3, + AXP809_ELDO1, + AXP809_ELDO2, + AXP809_ELDO3, + AXP809_DLDO1, + AXP809_DLDO2, + AXP809_RTC_LDO, + AXP809_LDO_IO0, + AXP809_LDO_IO1, + AXP809_SW, + AXP809_REG_ID_MAX, +}; + +enum { + AXP803_DCDC1 = 0, + AXP803_DCDC2, + AXP803_DCDC3, + AXP803_DCDC4, + AXP803_DCDC5, + AXP803_DCDC6, + AXP803_DC1SW, + AXP803_ALDO1, + AXP803_ALDO2, + AXP803_ALDO3, + AXP803_DLDO1, + AXP803_DLDO2, + AXP803_DLDO3, + AXP803_DLDO4, + AXP803_ELDO1, + AXP803_ELDO2, + AXP803_ELDO3, + AXP803_FLDO1, + AXP803_FLDO2, + AXP803_RTC_LDO, + AXP803_LDO_IO0, + AXP803_LDO_IO1, + AXP803_REG_ID_MAX, +}; + +enum { + AXP813_DCDC1 = 0, + AXP813_DCDC2, + AXP813_DCDC3, + AXP813_DCDC4, + AXP813_DCDC5, + AXP813_DCDC6, + AXP813_DCDC7, + AXP813_ALDO1, + AXP813_ALDO2, + AXP813_ALDO3, + AXP813_DLDO1, + AXP813_DLDO2, + AXP813_DLDO3, + AXP813_DLDO4, + AXP813_ELDO1, + AXP813_ELDO2, + AXP813_ELDO3, + AXP813_FLDO1, + AXP813_FLDO2, + AXP813_FLDO3, + AXP813_RTC_LDO, + AXP813_LDO_IO0, + AXP813_LDO_IO1, + AXP813_SW, + AXP813_REG_ID_MAX, +}; + +/* IRQs */ +enum { + AXP152_IRQ_LDO0IN_CONNECT = 1, + AXP152_IRQ_LDO0IN_REMOVAL, + AXP152_IRQ_ALDO0IN_CONNECT, + AXP152_IRQ_ALDO0IN_REMOVAL, + AXP152_IRQ_DCDC1_V_LOW, + AXP152_IRQ_DCDC2_V_LOW, + AXP152_IRQ_DCDC3_V_LOW, + AXP152_IRQ_DCDC4_V_LOW, + AXP152_IRQ_PEK_SHORT, + AXP152_IRQ_PEK_LONG, + AXP152_IRQ_TIMER, + AXP152_IRQ_PEK_RIS_EDGE, + AXP152_IRQ_PEK_FAL_EDGE, + AXP152_IRQ_GPIO3_INPUT, + AXP152_IRQ_GPIO2_INPUT, + AXP152_IRQ_GPIO1_INPUT, + AXP152_IRQ_GPIO0_INPUT, +}; + +enum { + AXP20X_IRQ_ACIN_OVER_V = 1, + AXP20X_IRQ_ACIN_PLUGIN, + AXP20X_IRQ_ACIN_REMOVAL, + AXP20X_IRQ_VBUS_OVER_V, + AXP20X_IRQ_VBUS_PLUGIN, + AXP20X_IRQ_VBUS_REMOVAL, + AXP20X_IRQ_VBUS_V_LOW, + AXP20X_IRQ_BATT_PLUGIN, + AXP20X_IRQ_BATT_REMOVAL, + AXP20X_IRQ_BATT_ENT_ACT_MODE, + AXP20X_IRQ_BATT_EXIT_ACT_MODE, + AXP20X_IRQ_CHARG, + AXP20X_IRQ_CHARG_DONE, + AXP20X_IRQ_BATT_TEMP_HIGH, + AXP20X_IRQ_BATT_TEMP_LOW, + AXP20X_IRQ_DIE_TEMP_HIGH, + AXP20X_IRQ_CHARG_I_LOW, + AXP20X_IRQ_DCDC1_V_LONG, + AXP20X_IRQ_DCDC2_V_LONG, + AXP20X_IRQ_DCDC3_V_LONG, + AXP20X_IRQ_PEK_SHORT = 22, + AXP20X_IRQ_PEK_LONG, + AXP20X_IRQ_N_OE_PWR_ON, + AXP20X_IRQ_N_OE_PWR_OFF, + AXP20X_IRQ_VBUS_VALID, + AXP20X_IRQ_VBUS_NOT_VALID, + AXP20X_IRQ_VBUS_SESS_VALID, + AXP20X_IRQ_VBUS_SESS_END, + AXP20X_IRQ_LOW_PWR_LVL1, + AXP20X_IRQ_LOW_PWR_LVL2, + AXP20X_IRQ_TIMER, + AXP20X_IRQ_PEK_RIS_EDGE, + AXP20X_IRQ_PEK_FAL_EDGE, + AXP20X_IRQ_GPIO3_INPUT, + AXP20X_IRQ_GPIO2_INPUT, + AXP20X_IRQ_GPIO1_INPUT, + AXP20X_IRQ_GPIO0_INPUT, +}; + +enum axp22x_irqs { + AXP22X_IRQ_ACIN_OVER_V = 1, + AXP22X_IRQ_ACIN_PLUGIN, + AXP22X_IRQ_ACIN_REMOVAL, + AXP22X_IRQ_VBUS_OVER_V, + AXP22X_IRQ_VBUS_PLUGIN, + AXP22X_IRQ_VBUS_REMOVAL, + AXP22X_IRQ_VBUS_V_LOW, + AXP22X_IRQ_BATT_PLUGIN, + AXP22X_IRQ_BATT_REMOVAL, + AXP22X_IRQ_BATT_ENT_ACT_MODE, + AXP22X_IRQ_BATT_EXIT_ACT_MODE, + AXP22X_IRQ_CHARG, + AXP22X_IRQ_CHARG_DONE, + AXP22X_IRQ_BATT_TEMP_HIGH, + AXP22X_IRQ_BATT_TEMP_LOW, + AXP22X_IRQ_DIE_TEMP_HIGH, + AXP22X_IRQ_PEK_SHORT, + AXP22X_IRQ_PEK_LONG, + AXP22X_IRQ_LOW_PWR_LVL1, + AXP22X_IRQ_LOW_PWR_LVL2, + AXP22X_IRQ_TIMER, + AXP22X_IRQ_PEK_RIS_EDGE, + AXP22X_IRQ_PEK_FAL_EDGE, + AXP22X_IRQ_GPIO1_INPUT, + AXP22X_IRQ_GPIO0_INPUT, +}; + +enum axp288_irqs { + AXP288_IRQ_VBUS_FALL = 2, + AXP288_IRQ_VBUS_RISE, + AXP288_IRQ_OV, + AXP288_IRQ_FALLING_ALT, + AXP288_IRQ_RISING_ALT, + AXP288_IRQ_OV_ALT, + AXP288_IRQ_DONE = 10, + AXP288_IRQ_CHARGING, + AXP288_IRQ_SAFE_QUIT, + AXP288_IRQ_SAFE_ENTER, + AXP288_IRQ_ABSENT, + AXP288_IRQ_APPEND, + AXP288_IRQ_QWBTU, + AXP288_IRQ_WBTU, + AXP288_IRQ_QWBTO, + AXP288_IRQ_WBTO, + AXP288_IRQ_QCBTU, + AXP288_IRQ_CBTU, + AXP288_IRQ_QCBTO, + AXP288_IRQ_CBTO, + AXP288_IRQ_WL2, + AXP288_IRQ_WL1, + AXP288_IRQ_GPADC, + AXP288_IRQ_OT = 31, + AXP288_IRQ_GPIO0, + AXP288_IRQ_GPIO1, + AXP288_IRQ_POKO, + AXP288_IRQ_POKL, + AXP288_IRQ_POKS, + AXP288_IRQ_POKN, + AXP288_IRQ_POKP, + AXP288_IRQ_TIMER, + AXP288_IRQ_MV_CHNG, + AXP288_IRQ_BC_USB_CHNG, +}; + +enum axp803_irqs { + AXP803_IRQ_ACIN_OVER_V = 1, + AXP803_IRQ_ACIN_PLUGIN, + AXP803_IRQ_ACIN_REMOVAL, + AXP803_IRQ_VBUS_OVER_V, + AXP803_IRQ_VBUS_PLUGIN, + AXP803_IRQ_VBUS_REMOVAL, + AXP803_IRQ_BATT_PLUGIN, + AXP803_IRQ_BATT_REMOVAL, + AXP803_IRQ_BATT_ENT_ACT_MODE, + AXP803_IRQ_BATT_EXIT_ACT_MODE, + AXP803_IRQ_CHARG, + AXP803_IRQ_CHARG_DONE, + AXP803_IRQ_BATT_CHG_TEMP_HIGH, + AXP803_IRQ_BATT_CHG_TEMP_HIGH_END, + AXP803_IRQ_BATT_CHG_TEMP_LOW, + AXP803_IRQ_BATT_CHG_TEMP_LOW_END, + AXP803_IRQ_BATT_ACT_TEMP_HIGH, + AXP803_IRQ_BATT_ACT_TEMP_HIGH_END, + AXP803_IRQ_BATT_ACT_TEMP_LOW, + AXP803_IRQ_BATT_ACT_TEMP_LOW_END, + AXP803_IRQ_DIE_TEMP_HIGH, + AXP803_IRQ_GPADC, + AXP803_IRQ_LOW_PWR_LVL1, + AXP803_IRQ_LOW_PWR_LVL2, + AXP803_IRQ_TIMER, + AXP803_IRQ_PEK_RIS_EDGE, + AXP803_IRQ_PEK_FAL_EDGE, + AXP803_IRQ_PEK_SHORT, + AXP803_IRQ_PEK_LONG, + AXP803_IRQ_PEK_OVER_OFF, + AXP803_IRQ_GPIO1_INPUT, + AXP803_IRQ_GPIO0_INPUT, + AXP803_IRQ_BC_USB_CHNG, + AXP803_IRQ_MV_CHNG, +}; + +enum axp806_irqs { + AXP806_IRQ_DIE_TEMP_HIGH_LV1, + AXP806_IRQ_DIE_TEMP_HIGH_LV2, + AXP806_IRQ_DCDCA_V_LOW, + AXP806_IRQ_DCDCB_V_LOW, + AXP806_IRQ_DCDCC_V_LOW, + AXP806_IRQ_DCDCD_V_LOW, + AXP806_IRQ_DCDCE_V_LOW, + AXP806_IRQ_POK_LONG, + AXP806_IRQ_POK_SHORT, + AXP806_IRQ_WAKEUP, + AXP806_IRQ_POK_FALL, + AXP806_IRQ_POK_RISE, +}; + +enum axp809_irqs { + AXP809_IRQ_ACIN_OVER_V = 1, + AXP809_IRQ_ACIN_PLUGIN, + AXP809_IRQ_ACIN_REMOVAL, + AXP809_IRQ_VBUS_OVER_V, + AXP809_IRQ_VBUS_PLUGIN, + AXP809_IRQ_VBUS_REMOVAL, + AXP809_IRQ_VBUS_V_LOW, + AXP809_IRQ_BATT_PLUGIN, + AXP809_IRQ_BATT_REMOVAL, + AXP809_IRQ_BATT_ENT_ACT_MODE, + AXP809_IRQ_BATT_EXIT_ACT_MODE, + AXP809_IRQ_CHARG, + AXP809_IRQ_CHARG_DONE, + AXP809_IRQ_BATT_CHG_TEMP_HIGH, + AXP809_IRQ_BATT_CHG_TEMP_HIGH_END, + AXP809_IRQ_BATT_CHG_TEMP_LOW, + AXP809_IRQ_BATT_CHG_TEMP_LOW_END, + AXP809_IRQ_BATT_ACT_TEMP_HIGH, + AXP809_IRQ_BATT_ACT_TEMP_HIGH_END, + AXP809_IRQ_BATT_ACT_TEMP_LOW, + AXP809_IRQ_BATT_ACT_TEMP_LOW_END, + AXP809_IRQ_DIE_TEMP_HIGH, + AXP809_IRQ_LOW_PWR_LVL1, + AXP809_IRQ_LOW_PWR_LVL2, + AXP809_IRQ_TIMER, + AXP809_IRQ_PEK_RIS_EDGE, + AXP809_IRQ_PEK_FAL_EDGE, + AXP809_IRQ_PEK_SHORT, + AXP809_IRQ_PEK_LONG, + AXP809_IRQ_PEK_OVER_OFF, + AXP809_IRQ_GPIO1_INPUT, + AXP809_IRQ_GPIO0_INPUT, +}; + +struct axp20x_dev { + struct device *dev; + int irq; + unsigned long irq_flags; + struct regmap *regmap; + struct regmap_irq_chip_data *regmap_irqc; + long variant; + int nr_cells; + const struct mfd_cell *cells; + const struct regmap_config *regmap_cfg; + const struct regmap_irq_chip *regmap_irq_chip; +}; + +/* generic helper function for reading 9-16 bit wide regs */ +static inline int axp20x_read_variable_width(struct regmap *regmap, + unsigned int reg, unsigned int width) +{ + unsigned int reg_val, result; + int err; + + err = regmap_read(regmap, reg, ®_val); + if (err) + return err; + + result = reg_val << (width - 8); + + err = regmap_read(regmap, reg + 1, ®_val); + if (err) + return err; + + result |= reg_val; + + return result; +} + +/** + * axp20x_match_device(): Setup axp20x variant related fields + * + * @axp20x: axp20x device to setup (.dev field must be set) + * @dev: device associated with this axp20x device + * + * This lets the axp20x core configure the mfd cells and register maps + * for later use. + */ +int axp20x_match_device(struct axp20x_dev *axp20x); + +/** + * axp20x_device_probe(): Probe a configured axp20x device + * + * @axp20x: axp20x device to probe (must be configured) + * + * This function lets the axp20x core register the axp20x mfd devices + * and irqchip. The axp20x device passed in must be fully configured + * with axp20x_match_device, its irq set, and regmap created. + */ +int axp20x_device_probe(struct axp20x_dev *axp20x); + +/** + * axp20x_device_remove(): Remove a axp20x device + * + * @axp20x: axp20x device to remove + * + * This tells the axp20x core to remove the associated mfd devices + */ +int axp20x_device_remove(struct axp20x_dev *axp20x); + +#endif /* __LINUX_MFD_AXP20X_H */ diff --git a/include/linux/mfd/bcm590xx.h b/include/linux/mfd/bcm590xx.h new file mode 100644 index 000000000..267aedee1 --- /dev/null +++ b/include/linux/mfd/bcm590xx.h @@ -0,0 +1,34 @@ +/* + * Broadcom BCM590xx PMU + * + * Copyright 2014 Linaro Limited + * Author: Matt Porter + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MFD_BCM590XX_H +#define __LINUX_MFD_BCM590XX_H + +#include +#include +#include + +/* max register address */ +#define BCM590XX_MAX_REGISTER_PRI 0xe7 +#define BCM590XX_MAX_REGISTER_SEC 0xf0 + +struct bcm590xx { + struct device *dev; + struct i2c_client *i2c_pri; + struct i2c_client *i2c_sec; + struct regmap *regmap_pri; + struct regmap *regmap_sec; + unsigned int id; +}; + +#endif /* __LINUX_MFD_BCM590XX_H */ diff --git a/include/linux/mfd/bd9571mwv.h b/include/linux/mfd/bd9571mwv.h new file mode 100644 index 000000000..eb05569f7 --- /dev/null +++ b/include/linux/mfd/bd9571mwv.h @@ -0,0 +1,120 @@ +/* + * ROHM BD9571MWV-M driver + * + * Copyright (C) 2017 Marek Vasut + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether expressed or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License version 2 for more details. + * + * Based on the TPS65086 driver + */ + +#ifndef __LINUX_MFD_BD9571MWV_H +#define __LINUX_MFD_BD9571MWV_H + +#include +#include + +/* List of registers for BD9571MWV */ +#define BD9571MWV_VENDOR_CODE 0x00 +#define BD9571MWV_VENDOR_CODE_VAL 0xdb +#define BD9571MWV_PRODUCT_CODE 0x01 +#define BD9571MWV_PRODUCT_CODE_VAL 0x60 +#define BD9571MWV_PRODUCT_REVISION 0x02 + +#define BD9571MWV_I2C_FUSA_MODE 0x10 +#define BD9571MWV_I2C_MD2_E1_BIT_1 0x11 +#define BD9571MWV_I2C_MD2_E1_BIT_2 0x12 + +#define BD9571MWV_BKUP_MODE_CNT 0x20 +#define BD9571MWV_BKUP_MODE_CNT_KEEPON_MASK GENMASK(3, 0) +#define BD9571MWV_BKUP_MODE_CNT_KEEPON_DDR0 BIT(0) +#define BD9571MWV_BKUP_MODE_CNT_KEEPON_DDR1 BIT(1) +#define BD9571MWV_BKUP_MODE_CNT_KEEPON_DDR0C BIT(2) +#define BD9571MWV_BKUP_MODE_CNT_KEEPON_DDR1C BIT(3) +#define BD9571MWV_BKUP_MODE_STATUS 0x21 +#define BD9571MWV_BKUP_RECOVERY_CNT 0x22 +#define BD9571MWV_BKUP_CTRL_TIM_CNT 0x23 +#define BD9571MWV_WAITBKUP_WDT_CNT 0x24 +#define BD9571MWV_128H_TIM_CNT 0x26 +#define BD9571MWV_QLLM_CNT 0x27 + +#define BD9571MWV_AVS_SET_MONI 0x31 +#define BD9571MWV_AVS_SET_MONI_MASK 0x3 +#define BD9571MWV_AVS_VD09_VID(n) (0x32 + (n)) +#define BD9571MWV_AVS_DVFS_VID(n) (0x36 + (n)) + +#define BD9571MWV_VD18_VID 0x42 +#define BD9571MWV_VD25_VID 0x43 +#define BD9571MWV_VD33_VID 0x44 + +#define BD9571MWV_DVFS_VINIT 0x50 +#define BD9571MWV_DVFS_SETVMAX 0x52 +#define BD9571MWV_DVFS_BOOSTVID 0x53 +#define BD9571MWV_DVFS_SETVID 0x54 +#define BD9571MWV_DVFS_MONIVDAC 0x55 +#define BD9571MWV_DVFS_PGD_CNT 0x56 + +#define BD9571MWV_GPIO_DIR 0x60 +#define BD9571MWV_GPIO_OUT 0x61 +#define BD9571MWV_GPIO_IN 0x62 +#define BD9571MWV_GPIO_DEB 0x63 +#define BD9571MWV_GPIO_INT_SET 0x64 +#define BD9571MWV_GPIO_INT 0x65 +#define BD9571MWV_GPIO_INTMASK 0x66 + +#define BD9571MWV_REG_KEEP(n) (0x70 + (n)) + +#define BD9571MWV_PMIC_INTERNAL_STATUS 0x80 +#define BD9571MWV_PROT_ERROR_STATUS0 0x81 +#define BD9571MWV_PROT_ERROR_STATUS1 0x82 +#define BD9571MWV_PROT_ERROR_STATUS2 0x83 +#define BD9571MWV_PROT_ERROR_STATUS3 0x84 +#define BD9571MWV_PROT_ERROR_STATUS4 0x85 + +#define BD9571MWV_INT_INTREQ 0x90 +#define BD9571MWV_INT_INTREQ_MD1_INT BIT(0) +#define BD9571MWV_INT_INTREQ_MD2_E1_INT BIT(1) +#define BD9571MWV_INT_INTREQ_MD2_E2_INT BIT(2) +#define BD9571MWV_INT_INTREQ_PROT_ERR_INT BIT(3) +#define BD9571MWV_INT_INTREQ_GP_INT BIT(4) +#define BD9571MWV_INT_INTREQ_128H_OF_INT BIT(5) +#define BD9571MWV_INT_INTREQ_WDT_OF_INT BIT(6) +#define BD9571MWV_INT_INTREQ_BKUP_TRG_INT BIT(7) +#define BD9571MWV_INT_INTMASK 0x91 + +#define BD9571MWV_ACCESS_KEY 0xff + +/* Define the BD9571MWV IRQ numbers */ +enum bd9571mwv_irqs { + BD9571MWV_IRQ_MD1, + BD9571MWV_IRQ_MD2_E1, + BD9571MWV_IRQ_MD2_E2, + BD9571MWV_IRQ_PROT_ERR, + BD9571MWV_IRQ_GP, + BD9571MWV_IRQ_128H_OF, + BD9571MWV_IRQ_WDT_OF, + BD9571MWV_IRQ_BKUP_TRG, +}; + +/** + * struct bd9571mwv - state holder for the bd9571mwv driver + * + * Device data may be used to access the BD9571MWV chip + */ +struct bd9571mwv { + struct device *dev; + struct regmap *regmap; + + /* IRQ Data */ + int irq; + struct regmap_irq_chip_data *irq_data; +}; + +#endif /* __LINUX_MFD_BD9571MWV_H */ diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h new file mode 100644 index 000000000..99c0395fe --- /dev/null +++ b/include/linux/mfd/core.h @@ -0,0 +1,138 @@ +/* + * drivers/mfd/mfd-core.h + * + * core MFD support + * Copyright (c) 2006 Ian Molton + * Copyright (c) 2007 Dmitry Baryshkov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef MFD_CORE_H +#define MFD_CORE_H + +#include + +struct irq_domain; +struct property_entry; + +/* Matches ACPI PNP id, either _HID or _CID, or ACPI _ADR */ +struct mfd_cell_acpi_match { + const char *pnpid; + const unsigned long long adr; +}; + +/* + * This struct describes the MFD part ("cell"). + * After registration the copy of this structure will become the platform data + * of the resulting platform_device + */ +struct mfd_cell { + const char *name; + int id; + + /* refcounting for multiple drivers to use a single cell */ + atomic_t *usage_count; + int (*enable)(struct platform_device *dev); + int (*disable)(struct platform_device *dev); + + int (*suspend)(struct platform_device *dev); + int (*resume)(struct platform_device *dev); + + /* platform data passed to the sub devices drivers */ + void *platform_data; + size_t pdata_size; + + /* device properties passed to the sub devices drivers */ + struct property_entry *properties; + + /* + * Device Tree compatible string + * See: Documentation/devicetree/usage-model.txt Chapter 2.2 for details + */ + const char *of_compatible; + + /* Matches ACPI */ + const struct mfd_cell_acpi_match *acpi_match; + + /* + * These resources can be specified relative to the parent device. + * For accessing hardware you should use resources from the platform dev + */ + int num_resources; + const struct resource *resources; + + /* don't check for resource conflicts */ + bool ignore_resource_conflicts; + + /* + * Disable runtime PM callbacks for this subdevice - see + * pm_runtime_no_callbacks(). + */ + bool pm_runtime_no_callbacks; + + /* A list of regulator supplies that should be mapped to the MFD + * device rather than the child device when requested + */ + const char * const *parent_supplies; + int num_parent_supplies; +}; + +/* + * Convenience functions for clients using shared cells. Refcounting + * happens automatically, with the cell's enable/disable callbacks + * being called only when a device is first being enabled or no other + * clients are making use of it. + */ +extern int mfd_cell_enable(struct platform_device *pdev); +extern int mfd_cell_disable(struct platform_device *pdev); + +/* + * "Clone" multiple platform devices for a single cell. This is to be used + * for devices that have multiple users of a cell. For example, if an mfd + * driver wants the cell "foo" to be used by a GPIO driver, an MTD driver, + * and a platform driver, the following bit of code would be use after first + * calling mfd_add_devices(): + * + * const char *fclones[] = { "foo-gpio", "foo-mtd" }; + * err = mfd_clone_cells("foo", fclones, ARRAY_SIZE(fclones)); + * + * Each driver (MTD, GPIO, and platform driver) would then register + * platform_drivers for "foo-mtd", "foo-gpio", and "foo", respectively. + * The cell's .enable/.disable hooks should be used to deal with hardware + * resource contention. + */ +extern int mfd_clone_cell(const char *cell, const char **clones, + size_t n_clones); + +/* + * Given a platform device that's been created by mfd_add_devices(), fetch + * the mfd_cell that created it. + */ +static inline const struct mfd_cell *mfd_get_cell(struct platform_device *pdev) +{ + return pdev->mfd_cell; +} + +extern int mfd_add_devices(struct device *parent, int id, + const struct mfd_cell *cells, int n_devs, + struct resource *mem_base, + int irq_base, struct irq_domain *irq_domain); + +static inline int mfd_add_hotplug_devices(struct device *parent, + const struct mfd_cell *cells, int n_devs) +{ + return mfd_add_devices(parent, PLATFORM_DEVID_AUTO, cells, n_devs, + NULL, 0, NULL); +} + +extern void mfd_remove_devices(struct device *parent); + +extern int devm_mfd_add_devices(struct device *dev, int id, + const struct mfd_cell *cells, int n_devs, + struct resource *mem_base, + int irq_base, struct irq_domain *irq_domain); +#endif diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h new file mode 100644 index 000000000..20949dde3 --- /dev/null +++ b/include/linux/mfd/cros_ec.h @@ -0,0 +1,335 @@ +/* + * ChromeOS EC multi-function device + * + * Copyright (C) 2012 Google, Inc + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_MFD_CROS_EC_H +#define __LINUX_MFD_CROS_EC_H + +#include +#include +#include +#include +#include + +#define CROS_EC_DEV_NAME "cros_ec" +#define CROS_EC_DEV_PD_NAME "cros_pd" + +/* + * The EC is unresponsive for a time after a reboot command. Add a + * simple delay to make sure that the bus stays locked. + */ +#define EC_REBOOT_DELAY_MS 50 + +/* + * Max bus-specific overhead incurred by request/responses. + * I2C requires 1 additional byte for requests. + * I2C requires 2 additional bytes for responses. + * SPI requires up to 32 additional bytes for responses. + * */ +#define EC_PROTO_VERSION_UNKNOWN 0 +#define EC_MAX_REQUEST_OVERHEAD 1 +#define EC_MAX_RESPONSE_OVERHEAD 32 + +/* + * Command interface between EC and AP, for LPC, I2C and SPI interfaces. + */ +enum { + EC_MSG_TX_HEADER_BYTES = 3, + EC_MSG_TX_TRAILER_BYTES = 1, + EC_MSG_TX_PROTO_BYTES = EC_MSG_TX_HEADER_BYTES + + EC_MSG_TX_TRAILER_BYTES, + EC_MSG_RX_PROTO_BYTES = 3, + + /* Max length of messages for proto 2*/ + EC_PROTO2_MSG_BYTES = EC_PROTO2_MAX_PARAM_SIZE + + EC_MSG_TX_PROTO_BYTES, + + EC_MAX_MSG_BYTES = 64 * 1024, +}; + +/* + * @version: Command version number (often 0) + * @command: Command to send (EC_CMD_...) + * @outsize: Outgoing length in bytes + * @insize: Max number of bytes to accept from EC + * @result: EC's response to the command (separate from communication failure) + * @data: Where to put the incoming data from EC and outgoing data to EC + */ +struct cros_ec_command { + uint32_t version; + uint32_t command; + uint32_t outsize; + uint32_t insize; + uint32_t result; + uint8_t data[0]; +}; + +/** + * struct cros_ec_device - Information about a ChromeOS EC device + * + * @phys_name: name of physical comms layer (e.g. 'i2c-4') + * @dev: Device pointer for physical comms device + * @was_wake_device: true if this device was set to wake the system from + * sleep at the last suspend + * @cmd_readmem: direct read of the EC memory-mapped region, if supported + * @offset is within EC_LPC_ADDR_MEMMAP region. + * @bytes: number of bytes to read. zero means "read a string" (including + * the trailing '\0'). At most only EC_MEMMAP_SIZE bytes can be read. + * Caller must ensure that the buffer is large enough for the result when + * reading a string. + * + * @priv: Private data + * @irq: Interrupt to use + * @id: Device id + * @din: input buffer (for data from EC) + * @dout: output buffer (for data to EC) + * \note + * These two buffers will always be dword-aligned and include enough + * space for up to 7 word-alignment bytes also, so we can ensure that + * the body of the message is always dword-aligned (64-bit). + * We use this alignment to keep ARM and x86 happy. Probably word + * alignment would be OK, there might be a small performance advantage + * to using dword. + * @din_size: size of din buffer to allocate (zero to use static din) + * @dout_size: size of dout buffer to allocate (zero to use static dout) + * @wake_enabled: true if this device can wake the system from sleep + * @suspended: true if this device had been suspended + * @cmd_xfer: send command to EC and get response + * Returns the number of bytes received if the communication succeeded, but + * that doesn't mean the EC was happy with the command. The caller + * should check msg.result for the EC's result code. + * @pkt_xfer: send packet to EC and get response + * @lock: one transaction at a time + * @mkbp_event_supported: true if this EC supports the MKBP event protocol. + * @event_notifier: interrupt event notifier for transport devices. + * @event_data: raw payload transferred with the MKBP event. + * @event_size: size in bytes of the event data. + */ +struct cros_ec_device { + + /* These are used by other drivers that want to talk to the EC */ + const char *phys_name; + struct device *dev; + bool was_wake_device; + struct class *cros_class; + int (*cmd_readmem)(struct cros_ec_device *ec, unsigned int offset, + unsigned int bytes, void *dest); + + /* These are used to implement the platform-specific interface */ + u16 max_request; + u16 max_response; + u16 max_passthru; + u16 proto_version; + void *priv; + int irq; + u8 *din; + u8 *dout; + int din_size; + int dout_size; + bool wake_enabled; + bool suspended; + int (*cmd_xfer)(struct cros_ec_device *ec, + struct cros_ec_command *msg); + int (*pkt_xfer)(struct cros_ec_device *ec, + struct cros_ec_command *msg); + struct mutex lock; + bool mkbp_event_supported; + struct blocking_notifier_head event_notifier; + + struct ec_response_get_next_event_v1 event_data; + int event_size; + u32 host_event_wake_mask; +}; + +/** + * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information + * + * @sensor_num: Id of the sensor, as reported by the EC. + */ +struct cros_ec_sensor_platform { + u8 sensor_num; +}; + +/* struct cros_ec_platform - ChromeOS EC platform information + * + * @ec_name: name of EC device (e.g. 'cros-ec', 'cros-pd', ...) + * used in /dev/ and sysfs. + * @cmd_offset: offset to apply for each command. Set when + * registering a devicde behind another one. + */ +struct cros_ec_platform { + const char *ec_name; + u16 cmd_offset; +}; + +struct cros_ec_debugfs; + +/* + * struct cros_ec_dev - ChromeOS EC device entry point + * + * @class_dev: Device structure used in sysfs + * @cdev: Character device structure in /dev + * @ec_dev: cros_ec_device structure to talk to the physical device + * @dev: pointer to the platform device + * @debug_info: cros_ec_debugfs structure for debugging information + * @has_kb_wake_angle: true if at least 2 accelerometer are connected to the EC. + * @cmd_offset: offset to apply for each command. + */ +struct cros_ec_dev { + struct device class_dev; + struct cdev cdev; + struct cros_ec_device *ec_dev; + struct device *dev; + struct cros_ec_debugfs *debug_info; + bool has_kb_wake_angle; + u16 cmd_offset; + u32 features[2]; +}; + +#define to_cros_ec_dev(dev) container_of(dev, struct cros_ec_dev, class_dev) + +/** + * cros_ec_suspend - Handle a suspend operation for the ChromeOS EC device + * + * This can be called by drivers to handle a suspend event. + * + * ec_dev: Device to suspend + * @return 0 if ok, -ve on error + */ +int cros_ec_suspend(struct cros_ec_device *ec_dev); + +/** + * cros_ec_resume - Handle a resume operation for the ChromeOS EC device + * + * This can be called by drivers to handle a resume event. + * + * @ec_dev: Device to resume + * @return 0 if ok, -ve on error + */ +int cros_ec_resume(struct cros_ec_device *ec_dev); + +/** + * cros_ec_prepare_tx - Prepare an outgoing message in the output buffer + * + * This is intended to be used by all ChromeOS EC drivers, but at present + * only SPI uses it. Once LPC uses the same protocol it can start using it. + * I2C could use it now, with a refactor of the existing code. + * + * @ec_dev: Device to register + * @msg: Message to write + */ +int cros_ec_prepare_tx(struct cros_ec_device *ec_dev, + struct cros_ec_command *msg); + +/** + * cros_ec_check_result - Check ec_msg->result + * + * This is used by ChromeOS EC drivers to check the ec_msg->result for + * errors and to warn about them. + * + * @ec_dev: EC device + * @msg: Message to check + */ +int cros_ec_check_result(struct cros_ec_device *ec_dev, + struct cros_ec_command *msg); + +/** + * cros_ec_cmd_xfer - Send a command to the ChromeOS EC + * + * Call this to send a command to the ChromeOS EC. This should be used + * instead of calling the EC's cmd_xfer() callback directly. + * + * @ec_dev: EC device + * @msg: Message to write + */ +int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, + struct cros_ec_command *msg); + +/** + * cros_ec_cmd_xfer_status - Send a command to the ChromeOS EC + * + * This function is identical to cros_ec_cmd_xfer, except it returns success + * status only if both the command was transmitted successfully and the EC + * replied with success status. It's not necessary to check msg->result when + * using this function. + * + * @ec_dev: EC device + * @msg: Message to write + * @return: Num. of bytes transferred on success, <0 on failure + */ +int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev, + struct cros_ec_command *msg); + +/** + * cros_ec_remove - Remove a ChromeOS EC + * + * Call this to deregister a ChromeOS EC, then clean up any private data. + * + * @ec_dev: Device to register + * @return 0 if ok, -ve on error + */ +int cros_ec_remove(struct cros_ec_device *ec_dev); + +/** + * cros_ec_register - Register a new ChromeOS EC, using the provided info + * + * Before calling this, allocate a pointer to a new device and then fill + * in all the fields up to the --private-- marker. + * + * @ec_dev: Device to register + * @return 0 if ok, -ve on error + */ +int cros_ec_register(struct cros_ec_device *ec_dev); + +/** + * cros_ec_query_all - Query the protocol version supported by the ChromeOS EC + * + * @ec_dev: Device to register + * @return 0 if ok, -ve on error + */ +int cros_ec_query_all(struct cros_ec_device *ec_dev); + +/** + * cros_ec_get_next_event - Fetch next event from the ChromeOS EC + * + * @ec_dev: Device to fetch event from + * @wake_event: Pointer to a bool set to true upon return if the event might be + * treated as a wake event. Ignored if null. + * + * Returns: 0 on success, Linux error number on failure + */ +int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event); + +/** + * cros_ec_get_host_event - Return a mask of event set by the EC. + * + * When MKBP is supported, when the EC raises an interrupt, + * We collect the events raised and call the functions in the ec notifier. + * + * This function is a helper to know which events are raised. + */ +u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev); + +/* sysfs stuff */ +extern struct attribute_group cros_ec_attr_group; +extern struct attribute_group cros_ec_lightbar_attr_group; +extern struct attribute_group cros_ec_vbc_attr_group; + +/* debugfs stuff */ +int cros_ec_debugfs_init(struct cros_ec_dev *ec); +void cros_ec_debugfs_remove(struct cros_ec_dev *ec); +void cros_ec_debugfs_suspend(struct cros_ec_dev *ec); +void cros_ec_debugfs_resume(struct cros_ec_dev *ec); + +#endif /* __LINUX_MFD_CROS_EC_H */ diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h new file mode 100644 index 000000000..6e1ab9bea --- /dev/null +++ b/include/linux/mfd/cros_ec_commands.h @@ -0,0 +1,3270 @@ +/* + * Host communication command constants for ChromeOS EC + * + * Copyright (C) 2012 Google, Inc + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * The ChromeOS EC multi function device is used to mux all the requests + * to the EC device for its multiple features: keyboard controller, + * battery charging and regulator control, firmware update. + * + * NOTE: This file is copied verbatim from the ChromeOS EC Open Source + * project in an attempt to make future updates easy to make. + */ + +#ifndef __CROS_EC_COMMANDS_H +#define __CROS_EC_COMMANDS_H + +/* + * Current version of this protocol + * + * TODO(crosbug.com/p/11223): This is effectively useless; protocol is + * determined in other ways. Remove this once the kernel code no longer + * depends on it. + */ +#define EC_PROTO_VERSION 0x00000002 + +/* Command version mask */ +#define EC_VER_MASK(version) (1UL << (version)) + +/* I/O addresses for ACPI commands */ +#define EC_LPC_ADDR_ACPI_DATA 0x62 +#define EC_LPC_ADDR_ACPI_CMD 0x66 + +/* I/O addresses for host command */ +#define EC_LPC_ADDR_HOST_DATA 0x200 +#define EC_LPC_ADDR_HOST_CMD 0x204 + +/* I/O addresses for host command args and params */ +/* Protocol version 2 */ +#define EC_LPC_ADDR_HOST_ARGS 0x800 /* And 0x801, 0x802, 0x803 */ +#define EC_LPC_ADDR_HOST_PARAM 0x804 /* For version 2 params; size is + * EC_PROTO2_MAX_PARAM_SIZE */ +/* Protocol version 3 */ +#define EC_LPC_ADDR_HOST_PACKET 0x800 /* Offset of version 3 packet */ +#define EC_LPC_HOST_PACKET_SIZE 0x100 /* Max size of version 3 packet */ + +/* The actual block is 0x800-0x8ff, but some BIOSes think it's 0x880-0x8ff + * and they tell the kernel that so we have to think of it as two parts. */ +#define EC_HOST_CMD_REGION0 0x800 +#define EC_HOST_CMD_REGION1 0x880 +#define EC_HOST_CMD_REGION_SIZE 0x80 + +/* EC command register bit functions */ +#define EC_LPC_CMDR_DATA (1 << 0) /* Data ready for host to read */ +#define EC_LPC_CMDR_PENDING (1 << 1) /* Write pending to EC */ +#define EC_LPC_CMDR_BUSY (1 << 2) /* EC is busy processing a command */ +#define EC_LPC_CMDR_CMD (1 << 3) /* Last host write was a command */ +#define EC_LPC_CMDR_ACPI_BRST (1 << 4) /* Burst mode (not used) */ +#define EC_LPC_CMDR_SCI (1 << 5) /* SCI event is pending */ +#define EC_LPC_CMDR_SMI (1 << 6) /* SMI event is pending */ + +#define EC_LPC_ADDR_MEMMAP 0x900 +#define EC_MEMMAP_SIZE 255 /* ACPI IO buffer max is 255 bytes */ +#define EC_MEMMAP_TEXT_MAX 8 /* Size of a string in the memory map */ + +/* The offset address of each type of data in mapped memory. */ +#define EC_MEMMAP_TEMP_SENSOR 0x00 /* Temp sensors 0x00 - 0x0f */ +#define EC_MEMMAP_FAN 0x10 /* Fan speeds 0x10 - 0x17 */ +#define EC_MEMMAP_TEMP_SENSOR_B 0x18 /* More temp sensors 0x18 - 0x1f */ +#define EC_MEMMAP_ID 0x20 /* 0x20 == 'E', 0x21 == 'C' */ +#define EC_MEMMAP_ID_VERSION 0x22 /* Version of data in 0x20 - 0x2f */ +#define EC_MEMMAP_THERMAL_VERSION 0x23 /* Version of data in 0x00 - 0x1f */ +#define EC_MEMMAP_BATTERY_VERSION 0x24 /* Version of data in 0x40 - 0x7f */ +#define EC_MEMMAP_SWITCHES_VERSION 0x25 /* Version of data in 0x30 - 0x33 */ +#define EC_MEMMAP_EVENTS_VERSION 0x26 /* Version of data in 0x34 - 0x3f */ +#define EC_MEMMAP_HOST_CMD_FLAGS 0x27 /* Host cmd interface flags (8 bits) */ +/* Unused 0x28 - 0x2f */ +#define EC_MEMMAP_SWITCHES 0x30 /* 8 bits */ +/* Unused 0x31 - 0x33 */ +#define EC_MEMMAP_HOST_EVENTS 0x34 /* 32 bits */ +/* Reserve 0x38 - 0x3f for additional host event-related stuff */ +/* Battery values are all 32 bits */ +#define EC_MEMMAP_BATT_VOLT 0x40 /* Battery Present Voltage */ +#define EC_MEMMAP_BATT_RATE 0x44 /* Battery Present Rate */ +#define EC_MEMMAP_BATT_CAP 0x48 /* Battery Remaining Capacity */ +#define EC_MEMMAP_BATT_FLAG 0x4c /* Battery State, defined below */ +#define EC_MEMMAP_BATT_DCAP 0x50 /* Battery Design Capacity */ +#define EC_MEMMAP_BATT_DVLT 0x54 /* Battery Design Voltage */ +#define EC_MEMMAP_BATT_LFCC 0x58 /* Battery Last Full Charge Capacity */ +#define EC_MEMMAP_BATT_CCNT 0x5c /* Battery Cycle Count */ +/* Strings are all 8 bytes (EC_MEMMAP_TEXT_MAX) */ +#define EC_MEMMAP_BATT_MFGR 0x60 /* Battery Manufacturer String */ +#define EC_MEMMAP_BATT_MODEL 0x68 /* Battery Model Number String */ +#define EC_MEMMAP_BATT_SERIAL 0x70 /* Battery Serial Number String */ +#define EC_MEMMAP_BATT_TYPE 0x78 /* Battery Type String */ +#define EC_MEMMAP_ALS 0x80 /* ALS readings in lux (2 X 16 bits) */ +/* Unused 0x84 - 0x8f */ +#define EC_MEMMAP_ACC_STATUS 0x90 /* Accelerometer status (8 bits )*/ +/* Unused 0x91 */ +#define EC_MEMMAP_ACC_DATA 0x92 /* Accelerometer data 0x92 - 0x9f */ +#define EC_MEMMAP_GYRO_DATA 0xa0 /* Gyroscope data 0xa0 - 0xa5 */ +/* Unused 0xa6 - 0xfe (remember, 0xff is NOT part of the memmap region) */ + + +/* Define the format of the accelerometer mapped memory status byte. */ +#define EC_MEMMAP_ACC_STATUS_SAMPLE_ID_MASK 0x0f +#define EC_MEMMAP_ACC_STATUS_BUSY_BIT (1 << 4) +#define EC_MEMMAP_ACC_STATUS_PRESENCE_BIT (1 << 7) + +/* Number of temp sensors at EC_MEMMAP_TEMP_SENSOR */ +#define EC_TEMP_SENSOR_ENTRIES 16 +/* + * Number of temp sensors at EC_MEMMAP_TEMP_SENSOR_B. + * + * Valid only if EC_MEMMAP_THERMAL_VERSION returns >= 2. + */ +#define EC_TEMP_SENSOR_B_ENTRIES 8 + +/* Special values for mapped temperature sensors */ +#define EC_TEMP_SENSOR_NOT_PRESENT 0xff +#define EC_TEMP_SENSOR_ERROR 0xfe +#define EC_TEMP_SENSOR_NOT_POWERED 0xfd +#define EC_TEMP_SENSOR_NOT_CALIBRATED 0xfc +/* + * The offset of temperature value stored in mapped memory. This allows + * reporting a temperature range of 200K to 454K = -73C to 181C. + */ +#define EC_TEMP_SENSOR_OFFSET 200 + +/* + * Number of ALS readings at EC_MEMMAP_ALS + */ +#define EC_ALS_ENTRIES 2 + +/* + * The default value a temperature sensor will return when it is present but + * has not been read this boot. This is a reasonable number to avoid + * triggering alarms on the host. + */ +#define EC_TEMP_SENSOR_DEFAULT (296 - EC_TEMP_SENSOR_OFFSET) + +#define EC_FAN_SPEED_ENTRIES 4 /* Number of fans at EC_MEMMAP_FAN */ +#define EC_FAN_SPEED_NOT_PRESENT 0xffff /* Entry not present */ +#define EC_FAN_SPEED_STALLED 0xfffe /* Fan stalled */ + +/* Battery bit flags at EC_MEMMAP_BATT_FLAG. */ +#define EC_BATT_FLAG_AC_PRESENT 0x01 +#define EC_BATT_FLAG_BATT_PRESENT 0x02 +#define EC_BATT_FLAG_DISCHARGING 0x04 +#define EC_BATT_FLAG_CHARGING 0x08 +#define EC_BATT_FLAG_LEVEL_CRITICAL 0x10 + +/* Switch flags at EC_MEMMAP_SWITCHES */ +#define EC_SWITCH_LID_OPEN 0x01 +#define EC_SWITCH_POWER_BUTTON_PRESSED 0x02 +#define EC_SWITCH_WRITE_PROTECT_DISABLED 0x04 +/* Was recovery requested via keyboard; now unused. */ +#define EC_SWITCH_IGNORE1 0x08 +/* Recovery requested via dedicated signal (from servo board) */ +#define EC_SWITCH_DEDICATED_RECOVERY 0x10 +/* Was fake developer mode switch; now unused. Remove in next refactor. */ +#define EC_SWITCH_IGNORE0 0x20 + +/* Host command interface flags */ +/* Host command interface supports LPC args (LPC interface only) */ +#define EC_HOST_CMD_FLAG_LPC_ARGS_SUPPORTED 0x01 +/* Host command interface supports version 3 protocol */ +#define EC_HOST_CMD_FLAG_VERSION_3 0x02 + +/* Wireless switch flags */ +#define EC_WIRELESS_SWITCH_ALL ~0x00 /* All flags */ +#define EC_WIRELESS_SWITCH_WLAN 0x01 /* WLAN radio */ +#define EC_WIRELESS_SWITCH_BLUETOOTH 0x02 /* Bluetooth radio */ +#define EC_WIRELESS_SWITCH_WWAN 0x04 /* WWAN power */ +#define EC_WIRELESS_SWITCH_WLAN_POWER 0x08 /* WLAN power */ + +/* + * This header file is used in coreboot both in C and ACPI code. The ACPI code + * is pre-processed to handle constants but the ASL compiler is unable to + * handle actual C code so keep it separate. + */ +#ifndef __ACPI__ + +/* + * Define __packed if someone hasn't beat us to it. Linux kernel style + * checking prefers __packed over __attribute__((packed)). + */ +#ifndef __packed +#define __packed __attribute__((packed)) +#endif + +/* LPC command status byte masks */ +/* EC has written a byte in the data register and host hasn't read it yet */ +#define EC_LPC_STATUS_TO_HOST 0x01 +/* Host has written a command/data byte and the EC hasn't read it yet */ +#define EC_LPC_STATUS_FROM_HOST 0x02 +/* EC is processing a command */ +#define EC_LPC_STATUS_PROCESSING 0x04 +/* Last write to EC was a command, not data */ +#define EC_LPC_STATUS_LAST_CMD 0x08 +/* EC is in burst mode. Unsupported by Chrome EC, so this bit is never set */ +#define EC_LPC_STATUS_BURST_MODE 0x10 +/* SCI event is pending (requesting SCI query) */ +#define EC_LPC_STATUS_SCI_PENDING 0x20 +/* SMI event is pending (requesting SMI query) */ +#define EC_LPC_STATUS_SMI_PENDING 0x40 +/* (reserved) */ +#define EC_LPC_STATUS_RESERVED 0x80 + +/* + * EC is busy. This covers both the EC processing a command, and the host has + * written a new command but the EC hasn't picked it up yet. + */ +#define EC_LPC_STATUS_BUSY_MASK \ + (EC_LPC_STATUS_FROM_HOST | EC_LPC_STATUS_PROCESSING) + +/* Host command response codes */ +enum ec_status { + EC_RES_SUCCESS = 0, + EC_RES_INVALID_COMMAND = 1, + EC_RES_ERROR = 2, + EC_RES_INVALID_PARAM = 3, + EC_RES_ACCESS_DENIED = 4, + EC_RES_INVALID_RESPONSE = 5, + EC_RES_INVALID_VERSION = 6, + EC_RES_INVALID_CHECKSUM = 7, + EC_RES_IN_PROGRESS = 8, /* Accepted, command in progress */ + EC_RES_UNAVAILABLE = 9, /* No response available */ + EC_RES_TIMEOUT = 10, /* We got a timeout */ + EC_RES_OVERFLOW = 11, /* Table / data overflow */ + EC_RES_INVALID_HEADER = 12, /* Header contains invalid data */ + EC_RES_REQUEST_TRUNCATED = 13, /* Didn't get the entire request */ + EC_RES_RESPONSE_TOO_BIG = 14 /* Response was too big to handle */ +}; + +/* + * Host event codes. Note these are 1-based, not 0-based, because ACPI query + * EC command uses code 0 to mean "no event pending". We explicitly specify + * each value in the enum listing so they won't change if we delete/insert an + * item or rearrange the list (it needs to be stable across platforms, not + * just within a single compiled instance). + */ +enum host_event_code { + EC_HOST_EVENT_LID_CLOSED = 1, + EC_HOST_EVENT_LID_OPEN = 2, + EC_HOST_EVENT_POWER_BUTTON = 3, + EC_HOST_EVENT_AC_CONNECTED = 4, + EC_HOST_EVENT_AC_DISCONNECTED = 5, + EC_HOST_EVENT_BATTERY_LOW = 6, + EC_HOST_EVENT_BATTERY_CRITICAL = 7, + EC_HOST_EVENT_BATTERY = 8, + EC_HOST_EVENT_THERMAL_THRESHOLD = 9, + EC_HOST_EVENT_THERMAL_OVERLOAD = 10, + EC_HOST_EVENT_THERMAL = 11, + EC_HOST_EVENT_USB_CHARGER = 12, + EC_HOST_EVENT_KEY_PRESSED = 13, + /* + * EC has finished initializing the host interface. The host can check + * for this event following sending a EC_CMD_REBOOT_EC command to + * determine when the EC is ready to accept subsequent commands. + */ + EC_HOST_EVENT_INTERFACE_READY = 14, + /* Keyboard recovery combo has been pressed */ + EC_HOST_EVENT_KEYBOARD_RECOVERY = 15, + + /* Shutdown due to thermal overload */ + EC_HOST_EVENT_THERMAL_SHUTDOWN = 16, + /* Shutdown due to battery level too low */ + EC_HOST_EVENT_BATTERY_SHUTDOWN = 17, + + /* Suggest that the AP throttle itself */ + EC_HOST_EVENT_THROTTLE_START = 18, + /* Suggest that the AP resume normal speed */ + EC_HOST_EVENT_THROTTLE_STOP = 19, + + /* Hang detect logic detected a hang and host event timeout expired */ + EC_HOST_EVENT_HANG_DETECT = 20, + /* Hang detect logic detected a hang and warm rebooted the AP */ + EC_HOST_EVENT_HANG_REBOOT = 21, + /* PD MCU triggering host event */ + EC_HOST_EVENT_PD_MCU = 22, + + /* EC desires to change state of host-controlled USB mux */ + EC_HOST_EVENT_USB_MUX = 28, + + /* EC RTC event occurred */ + EC_HOST_EVENT_RTC = 26, + + /* + * The high bit of the event mask is not used as a host event code. If + * it reads back as set, then the entire event mask should be + * considered invalid by the host. This can happen when reading the + * raw event status via EC_MEMMAP_HOST_EVENTS but the LPC interface is + * not initialized on the EC, or improperly configured on the host. + */ + EC_HOST_EVENT_INVALID = 32 +}; +/* Host event mask */ +#define EC_HOST_EVENT_MASK(event_code) (1UL << ((event_code) - 1)) + +/* Arguments at EC_LPC_ADDR_HOST_ARGS */ +struct ec_lpc_host_args { + uint8_t flags; + uint8_t command_version; + uint8_t data_size; + /* + * Checksum; sum of command + flags + command_version + data_size + + * all params/response data bytes. + */ + uint8_t checksum; +} __packed; + +/* Flags for ec_lpc_host_args.flags */ +/* + * Args are from host. Data area at EC_LPC_ADDR_HOST_PARAM contains command + * params. + * + * If EC gets a command and this flag is not set, this is an old-style command. + * Command version is 0 and params from host are at EC_LPC_ADDR_OLD_PARAM with + * unknown length. EC must respond with an old-style response (that is, + * withouth setting EC_HOST_ARGS_FLAG_TO_HOST). + */ +#define EC_HOST_ARGS_FLAG_FROM_HOST 0x01 +/* + * Args are from EC. Data area at EC_LPC_ADDR_HOST_PARAM contains response. + * + * If EC responds to a command and this flag is not set, this is an old-style + * response. Command version is 0 and response data from EC is at + * EC_LPC_ADDR_OLD_PARAM with unknown length. + */ +#define EC_HOST_ARGS_FLAG_TO_HOST 0x02 + +/*****************************************************************************/ +/* + * Byte codes returned by EC over SPI interface. + * + * These can be used by the AP to debug the EC interface, and to determine + * when the EC is not in a state where it will ever get around to responding + * to the AP. + * + * Example of sequence of bytes read from EC for a current good transfer: + * 1. - - AP asserts chip select (CS#) + * 2. EC_SPI_OLD_READY - AP sends first byte(s) of request + * 3. - - EC starts handling CS# interrupt + * 4. EC_SPI_RECEIVING - AP sends remaining byte(s) of request + * 5. EC_SPI_PROCESSING - EC starts processing request; AP is clocking in + * bytes looking for EC_SPI_FRAME_START + * 6. - - EC finishes processing and sets up response + * 7. EC_SPI_FRAME_START - AP reads frame byte + * 8. (response packet) - AP reads response packet + * 9. EC_SPI_PAST_END - Any additional bytes read by AP + * 10 - - AP deasserts chip select + * 11 - - EC processes CS# interrupt and sets up DMA for + * next request + * + * If the AP is waiting for EC_SPI_FRAME_START and sees any value other than + * the following byte values: + * EC_SPI_OLD_READY + * EC_SPI_RX_READY + * EC_SPI_RECEIVING + * EC_SPI_PROCESSING + * + * Then the EC found an error in the request, or was not ready for the request + * and lost data. The AP should give up waiting for EC_SPI_FRAME_START, + * because the EC is unable to tell when the AP is done sending its request. + */ + +/* + * Framing byte which precedes a response packet from the EC. After sending a + * request, the AP will clock in bytes until it sees the framing byte, then + * clock in the response packet. + */ +#define EC_SPI_FRAME_START 0xec + +/* + * Padding bytes which are clocked out after the end of a response packet. + */ +#define EC_SPI_PAST_END 0xed + +/* + * EC is ready to receive, and has ignored the byte sent by the AP. EC expects + * that the AP will send a valid packet header (starting with + * EC_COMMAND_PROTOCOL_3) in the next 32 bytes. + */ +#define EC_SPI_RX_READY 0xf8 + +/* + * EC has started receiving the request from the AP, but hasn't started + * processing it yet. + */ +#define EC_SPI_RECEIVING 0xf9 + +/* EC has received the entire request from the AP and is processing it. */ +#define EC_SPI_PROCESSING 0xfa + +/* + * EC received bad data from the AP, such as a packet header with an invalid + * length. EC will ignore all data until chip select deasserts. + */ +#define EC_SPI_RX_BAD_DATA 0xfb + +/* + * EC received data from the AP before it was ready. That is, the AP asserted + * chip select and started clocking data before the EC was ready to receive it. + * EC will ignore all data until chip select deasserts. + */ +#define EC_SPI_NOT_READY 0xfc + +/* + * EC was ready to receive a request from the AP. EC has treated the byte sent + * by the AP as part of a request packet, or (for old-style ECs) is processing + * a fully received packet but is not ready to respond yet. + */ +#define EC_SPI_OLD_READY 0xfd + +/*****************************************************************************/ + +/* + * Protocol version 2 for I2C and SPI send a request this way: + * + * 0 EC_CMD_VERSION0 + (command version) + * 1 Command number + * 2 Length of params = N + * 3..N+2 Params, if any + * N+3 8-bit checksum of bytes 0..N+2 + * + * The corresponding response is: + * + * 0 Result code (EC_RES_*) + * 1 Length of params = M + * 2..M+1 Params, if any + * M+2 8-bit checksum of bytes 0..M+1 + */ +#define EC_PROTO2_REQUEST_HEADER_BYTES 3 +#define EC_PROTO2_REQUEST_TRAILER_BYTES 1 +#define EC_PROTO2_REQUEST_OVERHEAD (EC_PROTO2_REQUEST_HEADER_BYTES + \ + EC_PROTO2_REQUEST_TRAILER_BYTES) + +#define EC_PROTO2_RESPONSE_HEADER_BYTES 2 +#define EC_PROTO2_RESPONSE_TRAILER_BYTES 1 +#define EC_PROTO2_RESPONSE_OVERHEAD (EC_PROTO2_RESPONSE_HEADER_BYTES + \ + EC_PROTO2_RESPONSE_TRAILER_BYTES) + +/* Parameter length was limited by the LPC interface */ +#define EC_PROTO2_MAX_PARAM_SIZE 0xfc + +/* Maximum request and response packet sizes for protocol version 2 */ +#define EC_PROTO2_MAX_REQUEST_SIZE (EC_PROTO2_REQUEST_OVERHEAD + \ + EC_PROTO2_MAX_PARAM_SIZE) +#define EC_PROTO2_MAX_RESPONSE_SIZE (EC_PROTO2_RESPONSE_OVERHEAD + \ + EC_PROTO2_MAX_PARAM_SIZE) + +/*****************************************************************************/ + +/* + * Value written to legacy command port / prefix byte to indicate protocol + * 3+ structs are being used. Usage is bus-dependent. + */ +#define EC_COMMAND_PROTOCOL_3 0xda + +#define EC_HOST_REQUEST_VERSION 3 + +/* Version 3 request from host */ +struct ec_host_request { + /* Struct version (=3) + * + * EC will return EC_RES_INVALID_HEADER if it receives a header with a + * version it doesn't know how to parse. + */ + uint8_t struct_version; + + /* + * Checksum of request and data; sum of all bytes including checksum + * should total to 0. + */ + uint8_t checksum; + + /* Command code */ + uint16_t command; + + /* Command version */ + uint8_t command_version; + + /* Unused byte in current protocol version; set to 0 */ + uint8_t reserved; + + /* Length of data which follows this header */ + uint16_t data_len; +} __packed; + +#define EC_HOST_RESPONSE_VERSION 3 + +/* Version 3 response from EC */ +struct ec_host_response { + /* Struct version (=3) */ + uint8_t struct_version; + + /* + * Checksum of response and data; sum of all bytes including checksum + * should total to 0. + */ + uint8_t checksum; + + /* Result code (EC_RES_*) */ + uint16_t result; + + /* Length of data which follows this header */ + uint16_t data_len; + + /* Unused bytes in current protocol version; set to 0 */ + uint16_t reserved; +} __packed; + +/*****************************************************************************/ +/* + * Notes on commands: + * + * Each command is an 16-bit command value. Commands which take params or + * return response data specify structs for that data. If no struct is + * specified, the command does not input or output data, respectively. + * Parameter/response length is implicit in the structs. Some underlying + * communication protocols (I2C, SPI) may add length or checksum headers, but + * those are implementation-dependent and not defined here. + */ + +/*****************************************************************************/ +/* General / test commands */ + +/* + * Get protocol version, used to deal with non-backward compatible protocol + * changes. + */ +#define EC_CMD_PROTO_VERSION 0x00 + +struct ec_response_proto_version { + uint32_t version; +} __packed; + +/* + * Hello. This is a simple command to test the EC is responsive to + * commands. + */ +#define EC_CMD_HELLO 0x01 + +struct ec_params_hello { + uint32_t in_data; /* Pass anything here */ +} __packed; + +struct ec_response_hello { + uint32_t out_data; /* Output will be in_data + 0x01020304 */ +} __packed; + +/* Get version number */ +#define EC_CMD_GET_VERSION 0x02 + +enum ec_current_image { + EC_IMAGE_UNKNOWN = 0, + EC_IMAGE_RO, + EC_IMAGE_RW +}; + +struct ec_response_get_version { + /* Null-terminated version strings for RO, RW */ + char version_string_ro[32]; + char version_string_rw[32]; + char reserved[32]; /* Was previously RW-B string */ + uint32_t current_image; /* One of ec_current_image */ +} __packed; + +/* Read test */ +#define EC_CMD_READ_TEST 0x03 + +struct ec_params_read_test { + uint32_t offset; /* Starting value for read buffer */ + uint32_t size; /* Size to read in bytes */ +} __packed; + +struct ec_response_read_test { + uint32_t data[32]; +} __packed; + +/* + * Get build information + * + * Response is null-terminated string. + */ +#define EC_CMD_GET_BUILD_INFO 0x04 + +/* Get chip info */ +#define EC_CMD_GET_CHIP_INFO 0x05 + +struct ec_response_get_chip_info { + /* Null-terminated strings */ + char vendor[32]; + char name[32]; + char revision[32]; /* Mask version */ +} __packed; + +/* Get board HW version */ +#define EC_CMD_GET_BOARD_VERSION 0x06 + +struct ec_response_board_version { + uint16_t board_version; /* A monotonously incrementing number. */ +} __packed; + +/* + * Read memory-mapped data. + * + * This is an alternate interface to memory-mapped data for bus protocols + * which don't support direct-mapped memory - I2C, SPI, etc. + * + * Response is params.size bytes of data. + */ +#define EC_CMD_READ_MEMMAP 0x07 + +struct ec_params_read_memmap { + uint8_t offset; /* Offset in memmap (EC_MEMMAP_*) */ + uint8_t size; /* Size to read in bytes */ +} __packed; + +/* Read versions supported for a command */ +#define EC_CMD_GET_CMD_VERSIONS 0x08 + +struct ec_params_get_cmd_versions { + uint8_t cmd; /* Command to check */ +} __packed; + +struct ec_params_get_cmd_versions_v1 { + uint16_t cmd; /* Command to check */ +} __packed; + +struct ec_response_get_cmd_versions { + /* + * Mask of supported versions; use EC_VER_MASK() to compare with a + * desired version. + */ + uint32_t version_mask; +} __packed; + +/* + * Check EC communcations status (busy). This is needed on i2c/spi but not + * on lpc since it has its own out-of-band busy indicator. + * + * lpc must read the status from the command register. Attempting this on + * lpc will overwrite the args/parameter space and corrupt its data. + */ +#define EC_CMD_GET_COMMS_STATUS 0x09 + +/* Avoid using ec_status which is for return values */ +enum ec_comms_status { + EC_COMMS_STATUS_PROCESSING = 1 << 0, /* Processing cmd */ +}; + +struct ec_response_get_comms_status { + uint32_t flags; /* Mask of enum ec_comms_status */ +} __packed; + +/* Fake a variety of responses, purely for testing purposes. */ +#define EC_CMD_TEST_PROTOCOL 0x0a + +/* Tell the EC what to send back to us. */ +struct ec_params_test_protocol { + uint32_t ec_result; + uint32_t ret_len; + uint8_t buf[32]; +} __packed; + +/* Here it comes... */ +struct ec_response_test_protocol { + uint8_t buf[32]; +} __packed; + +/* Get prococol information */ +#define EC_CMD_GET_PROTOCOL_INFO 0x0b + +/* Flags for ec_response_get_protocol_info.flags */ +/* EC_RES_IN_PROGRESS may be returned if a command is slow */ +#define EC_PROTOCOL_INFO_IN_PROGRESS_SUPPORTED (1 << 0) + +struct ec_response_get_protocol_info { + /* Fields which exist if at least protocol version 3 supported */ + + /* Bitmask of protocol versions supported (1 << n means version n)*/ + uint32_t protocol_versions; + + /* Maximum request packet size, in bytes */ + uint16_t max_request_packet_size; + + /* Maximum response packet size, in bytes */ + uint16_t max_response_packet_size; + + /* Flags; see EC_PROTOCOL_INFO_* */ + uint32_t flags; +} __packed; + + +/*****************************************************************************/ +/* Get/Set miscellaneous values */ + +/* The upper byte of .flags tells what to do (nothing means "get") */ +#define EC_GSV_SET 0x80000000 + +/* The lower three bytes of .flags identifies the parameter, if that has + meaning for an individual command. */ +#define EC_GSV_PARAM_MASK 0x00ffffff + +struct ec_params_get_set_value { + uint32_t flags; + uint32_t value; +} __packed; + +struct ec_response_get_set_value { + uint32_t flags; + uint32_t value; +} __packed; + +/* More than one command can use these structs to get/set paramters. */ +#define EC_CMD_GSV_PAUSE_IN_S5 0x0c + +/*****************************************************************************/ +/* List the features supported by the firmware */ +#define EC_CMD_GET_FEATURES 0x0d + +/* Supported features */ +enum ec_feature_code { + /* + * This image contains a limited set of features. Another image + * in RW partition may support more features. + */ + EC_FEATURE_LIMITED = 0, + /* + * Commands for probing/reading/writing/erasing the flash in the + * EC are present. + */ + EC_FEATURE_FLASH = 1, + /* + * Can control the fan speed directly. + */ + EC_FEATURE_PWM_FAN = 2, + /* + * Can control the intensity of the keyboard backlight. + */ + EC_FEATURE_PWM_KEYB = 3, + /* + * Support Google lightbar, introduced on Pixel. + */ + EC_FEATURE_LIGHTBAR = 4, + /* Control of LEDs */ + EC_FEATURE_LED = 5, + /* Exposes an interface to control gyro and sensors. + * The host goes through the EC to access these sensors. + * In addition, the EC may provide composite sensors, like lid angle. + */ + EC_FEATURE_MOTION_SENSE = 6, + /* The keyboard is controlled by the EC */ + EC_FEATURE_KEYB = 7, + /* The AP can use part of the EC flash as persistent storage. */ + EC_FEATURE_PSTORE = 8, + /* The EC monitors BIOS port 80h, and can return POST codes. */ + EC_FEATURE_PORT80 = 9, + /* + * Thermal management: include TMP specific commands. + * Higher level than direct fan control. + */ + EC_FEATURE_THERMAL = 10, + /* Can switch the screen backlight on/off */ + EC_FEATURE_BKLIGHT_SWITCH = 11, + /* Can switch the wifi module on/off */ + EC_FEATURE_WIFI_SWITCH = 12, + /* Monitor host events, through for example SMI or SCI */ + EC_FEATURE_HOST_EVENTS = 13, + /* The EC exposes GPIO commands to control/monitor connected devices. */ + EC_FEATURE_GPIO = 14, + /* The EC can send i2c messages to downstream devices. */ + EC_FEATURE_I2C = 15, + /* Command to control charger are included */ + EC_FEATURE_CHARGER = 16, + /* Simple battery support. */ + EC_FEATURE_BATTERY = 17, + /* + * Support Smart battery protocol + * (Common Smart Battery System Interface Specification) + */ + EC_FEATURE_SMART_BATTERY = 18, + /* EC can dectect when the host hangs. */ + EC_FEATURE_HANG_DETECT = 19, + /* Report power information, for pit only */ + EC_FEATURE_PMU = 20, + /* Another Cros EC device is present downstream of this one */ + EC_FEATURE_SUB_MCU = 21, + /* Support USB Power delivery (PD) commands */ + EC_FEATURE_USB_PD = 22, + /* Control USB multiplexer, for audio through USB port for instance. */ + EC_FEATURE_USB_MUX = 23, + /* Motion Sensor code has an internal software FIFO */ + EC_FEATURE_MOTION_SENSE_FIFO = 24, + /* EC has RTC feature that can be controlled by host commands */ + EC_FEATURE_RTC = 27, + /* EC supports CEC commands */ + EC_FEATURE_CEC = 35, +}; + +#define EC_FEATURE_MASK_0(event_code) (1UL << (event_code % 32)) +#define EC_FEATURE_MASK_1(event_code) (1UL << (event_code - 32)) +struct ec_response_get_features { + uint32_t flags[2]; +} __packed; + +/*****************************************************************************/ +/* Flash commands */ + +/* Get flash info */ +#define EC_CMD_FLASH_INFO 0x10 + +/* Version 0 returns these fields */ +struct ec_response_flash_info { + /* Usable flash size, in bytes */ + uint32_t flash_size; + /* + * Write block size. Write offset and size must be a multiple + * of this. + */ + uint32_t write_block_size; + /* + * Erase block size. Erase offset and size must be a multiple + * of this. + */ + uint32_t erase_block_size; + /* + * Protection block size. Protection offset and size must be a + * multiple of this. + */ + uint32_t protect_block_size; +} __packed; + +/* Flags for version 1+ flash info command */ +/* EC flash erases bits to 0 instead of 1 */ +#define EC_FLASH_INFO_ERASE_TO_0 (1 << 0) + +/* + * Version 1 returns the same initial fields as version 0, with additional + * fields following. + * + * gcc anonymous structs don't seem to get along with the __packed directive; + * if they did we'd define the version 0 struct as a sub-struct of this one. + */ +struct ec_response_flash_info_1 { + /* Version 0 fields; see above for description */ + uint32_t flash_size; + uint32_t write_block_size; + uint32_t erase_block_size; + uint32_t protect_block_size; + + /* Version 1 adds these fields: */ + /* + * Ideal write size in bytes. Writes will be fastest if size is + * exactly this and offset is a multiple of this. For example, an EC + * may have a write buffer which can do half-page operations if data is + * aligned, and a slower word-at-a-time write mode. + */ + uint32_t write_ideal_size; + + /* Flags; see EC_FLASH_INFO_* */ + uint32_t flags; +} __packed; + +/* + * Read flash + * + * Response is params.size bytes of data. + */ +#define EC_CMD_FLASH_READ 0x11 + +struct ec_params_flash_read { + uint32_t offset; /* Byte offset to read */ + uint32_t size; /* Size to read in bytes */ +} __packed; + +/* Write flash */ +#define EC_CMD_FLASH_WRITE 0x12 +#define EC_VER_FLASH_WRITE 1 + +/* Version 0 of the flash command supported only 64 bytes of data */ +#define EC_FLASH_WRITE_VER0_SIZE 64 + +struct ec_params_flash_write { + uint32_t offset; /* Byte offset to write */ + uint32_t size; /* Size to write in bytes */ + /* Followed by data to write */ +} __packed; + +/* Erase flash */ +#define EC_CMD_FLASH_ERASE 0x13 + +struct ec_params_flash_erase { + uint32_t offset; /* Byte offset to erase */ + uint32_t size; /* Size to erase in bytes */ +} __packed; + +/* + * Get/set flash protection. + * + * If mask!=0, sets/clear the requested bits of flags. Depending on the + * firmware write protect GPIO, not all flags will take effect immediately; + * some flags require a subsequent hard reset to take effect. Check the + * returned flags bits to see what actually happened. + * + * If mask=0, simply returns the current flags state. + */ +#define EC_CMD_FLASH_PROTECT 0x15 +#define EC_VER_FLASH_PROTECT 1 /* Command version 1 */ + +/* Flags for flash protection */ +/* RO flash code protected when the EC boots */ +#define EC_FLASH_PROTECT_RO_AT_BOOT (1 << 0) +/* + * RO flash code protected now. If this bit is set, at-boot status cannot + * be changed. + */ +#define EC_FLASH_PROTECT_RO_NOW (1 << 1) +/* Entire flash code protected now, until reboot. */ +#define EC_FLASH_PROTECT_ALL_NOW (1 << 2) +/* Flash write protect GPIO is asserted now */ +#define EC_FLASH_PROTECT_GPIO_ASSERTED (1 << 3) +/* Error - at least one bank of flash is stuck locked, and cannot be unlocked */ +#define EC_FLASH_PROTECT_ERROR_STUCK (1 << 4) +/* + * Error - flash protection is in inconsistent state. At least one bank of + * flash which should be protected is not protected. Usually fixed by + * re-requesting the desired flags, or by a hard reset if that fails. + */ +#define EC_FLASH_PROTECT_ERROR_INCONSISTENT (1 << 5) +/* Entile flash code protected when the EC boots */ +#define EC_FLASH_PROTECT_ALL_AT_BOOT (1 << 6) + +struct ec_params_flash_protect { + uint32_t mask; /* Bits in flags to apply */ + uint32_t flags; /* New flags to apply */ +} __packed; + +struct ec_response_flash_protect { + /* Current value of flash protect flags */ + uint32_t flags; + /* + * Flags which are valid on this platform. This allows the caller + * to distinguish between flags which aren't set vs. flags which can't + * be set on this platform. + */ + uint32_t valid_flags; + /* Flags which can be changed given the current protection state */ + uint32_t writable_flags; +} __packed; + +/* + * Note: commands 0x14 - 0x19 version 0 were old commands to get/set flash + * write protect. These commands may be reused with version > 0. + */ + +/* Get the region offset/size */ +#define EC_CMD_FLASH_REGION_INFO 0x16 +#define EC_VER_FLASH_REGION_INFO 1 + +enum ec_flash_region { + /* Region which holds read-only EC image */ + EC_FLASH_REGION_RO = 0, + /* Region which holds rewritable EC image */ + EC_FLASH_REGION_RW, + /* + * Region which should be write-protected in the factory (a superset of + * EC_FLASH_REGION_RO) + */ + EC_FLASH_REGION_WP_RO, + /* Number of regions */ + EC_FLASH_REGION_COUNT, +}; + +struct ec_params_flash_region_info { + uint32_t region; /* enum ec_flash_region */ +} __packed; + +struct ec_response_flash_region_info { + uint32_t offset; + uint32_t size; +} __packed; + +/* Read/write VbNvContext */ +#define EC_CMD_VBNV_CONTEXT 0x17 +#define EC_VER_VBNV_CONTEXT 1 +#define EC_VBNV_BLOCK_SIZE 16 + +enum ec_vbnvcontext_op { + EC_VBNV_CONTEXT_OP_READ, + EC_VBNV_CONTEXT_OP_WRITE, +}; + +struct ec_params_vbnvcontext { + uint32_t op; + uint8_t block[EC_VBNV_BLOCK_SIZE]; +} __packed; + +struct ec_response_vbnvcontext { + uint8_t block[EC_VBNV_BLOCK_SIZE]; +} __packed; + +/*****************************************************************************/ +/* PWM commands */ + +/* Get fan target RPM */ +#define EC_CMD_PWM_GET_FAN_TARGET_RPM 0x20 + +struct ec_response_pwm_get_fan_rpm { + uint32_t rpm; +} __packed; + +/* Set target fan RPM */ +#define EC_CMD_PWM_SET_FAN_TARGET_RPM 0x21 + +struct ec_params_pwm_set_fan_target_rpm { + uint32_t rpm; +} __packed; + +/* Get keyboard backlight */ +#define EC_CMD_PWM_GET_KEYBOARD_BACKLIGHT 0x22 + +struct ec_response_pwm_get_keyboard_backlight { + uint8_t percent; + uint8_t enabled; +} __packed; + +/* Set keyboard backlight */ +#define EC_CMD_PWM_SET_KEYBOARD_BACKLIGHT 0x23 + +struct ec_params_pwm_set_keyboard_backlight { + uint8_t percent; +} __packed; + +/* Set target fan PWM duty cycle */ +#define EC_CMD_PWM_SET_FAN_DUTY 0x24 + +struct ec_params_pwm_set_fan_duty { + uint32_t percent; +} __packed; + +#define EC_CMD_PWM_SET_DUTY 0x25 +/* 16 bit duty cycle, 0xffff = 100% */ +#define EC_PWM_MAX_DUTY 0xffff + +enum ec_pwm_type { + /* All types, indexed by board-specific enum pwm_channel */ + EC_PWM_TYPE_GENERIC = 0, + /* Keyboard backlight */ + EC_PWM_TYPE_KB_LIGHT, + /* Display backlight */ + EC_PWM_TYPE_DISPLAY_LIGHT, + EC_PWM_TYPE_COUNT, +}; + +struct ec_params_pwm_set_duty { + uint16_t duty; /* Duty cycle, EC_PWM_MAX_DUTY = 100% */ + uint8_t pwm_type; /* ec_pwm_type */ + uint8_t index; /* Type-specific index, or 0 if unique */ +} __packed; + +#define EC_CMD_PWM_GET_DUTY 0x26 + +struct ec_params_pwm_get_duty { + uint8_t pwm_type; /* ec_pwm_type */ + uint8_t index; /* Type-specific index, or 0 if unique */ +} __packed; + +struct ec_response_pwm_get_duty { + uint16_t duty; /* Duty cycle, EC_PWM_MAX_DUTY = 100% */ +} __packed; + +/*****************************************************************************/ +/* + * Lightbar commands. This looks worse than it is. Since we only use one HOST + * command to say "talk to the lightbar", we put the "and tell it to do X" part + * into a subcommand. We'll make separate structs for subcommands with + * different input args, so that we know how much to expect. + */ +#define EC_CMD_LIGHTBAR_CMD 0x28 + +struct rgb_s { + uint8_t r, g, b; +}; + +#define LB_BATTERY_LEVELS 4 +/* List of tweakable parameters. NOTE: It's __packed so it can be sent in a + * host command, but the alignment is the same regardless. Keep it that way. + */ +struct lightbar_params_v0 { + /* Timing */ + int32_t google_ramp_up; + int32_t google_ramp_down; + int32_t s3s0_ramp_up; + int32_t s0_tick_delay[2]; /* AC=0/1 */ + int32_t s0a_tick_delay[2]; /* AC=0/1 */ + int32_t s0s3_ramp_down; + int32_t s3_sleep_for; + int32_t s3_ramp_up; + int32_t s3_ramp_down; + + /* Oscillation */ + uint8_t new_s0; + uint8_t osc_min[2]; /* AC=0/1 */ + uint8_t osc_max[2]; /* AC=0/1 */ + uint8_t w_ofs[2]; /* AC=0/1 */ + + /* Brightness limits based on the backlight and AC. */ + uint8_t bright_bl_off_fixed[2]; /* AC=0/1 */ + uint8_t bright_bl_on_min[2]; /* AC=0/1 */ + uint8_t bright_bl_on_max[2]; /* AC=0/1 */ + + /* Battery level thresholds */ + uint8_t battery_threshold[LB_BATTERY_LEVELS - 1]; + + /* Map [AC][battery_level] to color index */ + uint8_t s0_idx[2][LB_BATTERY_LEVELS]; /* AP is running */ + uint8_t s3_idx[2][LB_BATTERY_LEVELS]; /* AP is sleeping */ + + /* Color palette */ + struct rgb_s color[8]; /* 0-3 are Google colors */ +} __packed; + +struct lightbar_params_v1 { + /* Timing */ + int32_t google_ramp_up; + int32_t google_ramp_down; + int32_t s3s0_ramp_up; + int32_t s0_tick_delay[2]; /* AC=0/1 */ + int32_t s0a_tick_delay[2]; /* AC=0/1 */ + int32_t s0s3_ramp_down; + int32_t s3_sleep_for; + int32_t s3_ramp_up; + int32_t s3_ramp_down; + int32_t tap_tick_delay; + int32_t tap_display_time; + + /* Tap-for-battery params */ + uint8_t tap_pct_red; + uint8_t tap_pct_green; + uint8_t tap_seg_min_on; + uint8_t tap_seg_max_on; + uint8_t tap_seg_osc; + uint8_t tap_idx[3]; + + /* Oscillation */ + uint8_t osc_min[2]; /* AC=0/1 */ + uint8_t osc_max[2]; /* AC=0/1 */ + uint8_t w_ofs[2]; /* AC=0/1 */ + + /* Brightness limits based on the backlight and AC. */ + uint8_t bright_bl_off_fixed[2]; /* AC=0/1 */ + uint8_t bright_bl_on_min[2]; /* AC=0/1 */ + uint8_t bright_bl_on_max[2]; /* AC=0/1 */ + + /* Battery level thresholds */ + uint8_t battery_threshold[LB_BATTERY_LEVELS - 1]; + + /* Map [AC][battery_level] to color index */ + uint8_t s0_idx[2][LB_BATTERY_LEVELS]; /* AP is running */ + uint8_t s3_idx[2][LB_BATTERY_LEVELS]; /* AP is sleeping */ + + /* Color palette */ + struct rgb_s color[8]; /* 0-3 are Google colors */ +} __packed; + +/* Lightbar program */ +#define EC_LB_PROG_LEN 192 +struct lightbar_program { + uint8_t size; + uint8_t data[EC_LB_PROG_LEN]; +}; + +struct ec_params_lightbar { + uint8_t cmd; /* Command (see enum lightbar_command) */ + union { + struct { + /* no args */ + } dump, off, on, init, get_seq, get_params_v0, get_params_v1, + version, get_brightness, get_demo, suspend, resume; + + struct { + uint8_t num; + } set_brightness, seq, demo; + + struct { + uint8_t ctrl, reg, value; + } reg; + + struct { + uint8_t led, red, green, blue; + } set_rgb; + + struct { + uint8_t led; + } get_rgb; + + struct { + uint8_t enable; + } manual_suspend_ctrl; + + struct lightbar_params_v0 set_params_v0; + struct lightbar_params_v1 set_params_v1; + struct lightbar_program set_program; + }; +} __packed; + +struct ec_response_lightbar { + union { + struct { + struct { + uint8_t reg; + uint8_t ic0; + uint8_t ic1; + } vals[23]; + } dump; + + struct { + uint8_t num; + } get_seq, get_brightness, get_demo; + + struct lightbar_params_v0 get_params_v0; + struct lightbar_params_v1 get_params_v1; + + struct { + uint32_t num; + uint32_t flags; + } version; + + struct { + uint8_t red, green, blue; + } get_rgb; + + struct { + /* no return params */ + } off, on, init, set_brightness, seq, reg, set_rgb, + demo, set_params_v0, set_params_v1, + set_program, manual_suspend_ctrl, suspend, resume; + }; +} __packed; + +/* Lightbar commands */ +enum lightbar_command { + LIGHTBAR_CMD_DUMP = 0, + LIGHTBAR_CMD_OFF = 1, + LIGHTBAR_CMD_ON = 2, + LIGHTBAR_CMD_INIT = 3, + LIGHTBAR_CMD_SET_BRIGHTNESS = 4, + LIGHTBAR_CMD_SEQ = 5, + LIGHTBAR_CMD_REG = 6, + LIGHTBAR_CMD_SET_RGB = 7, + LIGHTBAR_CMD_GET_SEQ = 8, + LIGHTBAR_CMD_DEMO = 9, + LIGHTBAR_CMD_GET_PARAMS_V0 = 10, + LIGHTBAR_CMD_SET_PARAMS_V0 = 11, + LIGHTBAR_CMD_VERSION = 12, + LIGHTBAR_CMD_GET_BRIGHTNESS = 13, + LIGHTBAR_CMD_GET_RGB = 14, + LIGHTBAR_CMD_GET_DEMO = 15, + LIGHTBAR_CMD_GET_PARAMS_V1 = 16, + LIGHTBAR_CMD_SET_PARAMS_V1 = 17, + LIGHTBAR_CMD_SET_PROGRAM = 18, + LIGHTBAR_CMD_MANUAL_SUSPEND_CTRL = 19, + LIGHTBAR_CMD_SUSPEND = 20, + LIGHTBAR_CMD_RESUME = 21, + LIGHTBAR_NUM_CMDS +}; + +/*****************************************************************************/ +/* LED control commands */ + +#define EC_CMD_LED_CONTROL 0x29 + +enum ec_led_id { + /* LED to indicate battery state of charge */ + EC_LED_ID_BATTERY_LED = 0, + /* + * LED to indicate system power state (on or in suspend). + * May be on power button or on C-panel. + */ + EC_LED_ID_POWER_LED, + /* LED on power adapter or its plug */ + EC_LED_ID_ADAPTER_LED, + + EC_LED_ID_COUNT +}; + +/* LED control flags */ +#define EC_LED_FLAGS_QUERY (1 << 0) /* Query LED capability only */ +#define EC_LED_FLAGS_AUTO (1 << 1) /* Switch LED back to automatic control */ + +enum ec_led_colors { + EC_LED_COLOR_RED = 0, + EC_LED_COLOR_GREEN, + EC_LED_COLOR_BLUE, + EC_LED_COLOR_YELLOW, + EC_LED_COLOR_WHITE, + + EC_LED_COLOR_COUNT +}; + +struct ec_params_led_control { + uint8_t led_id; /* Which LED to control */ + uint8_t flags; /* Control flags */ + + uint8_t brightness[EC_LED_COLOR_COUNT]; +} __packed; + +struct ec_response_led_control { + /* + * Available brightness value range. + * + * Range 0 means color channel not present. + * Range 1 means on/off control. + * Other values means the LED is control by PWM. + */ + uint8_t brightness_range[EC_LED_COLOR_COUNT]; +} __packed; + +/*****************************************************************************/ +/* Verified boot commands */ + +/* + * Note: command code 0x29 version 0 was VBOOT_CMD in Link EVT; it may be + * reused for other purposes with version > 0. + */ + +/* Verified boot hash command */ +#define EC_CMD_VBOOT_HASH 0x2A + +struct ec_params_vboot_hash { + uint8_t cmd; /* enum ec_vboot_hash_cmd */ + uint8_t hash_type; /* enum ec_vboot_hash_type */ + uint8_t nonce_size; /* Nonce size; may be 0 */ + uint8_t reserved0; /* Reserved; set 0 */ + uint32_t offset; /* Offset in flash to hash */ + uint32_t size; /* Number of bytes to hash */ + uint8_t nonce_data[64]; /* Nonce data; ignored if nonce_size=0 */ +} __packed; + +struct ec_response_vboot_hash { + uint8_t status; /* enum ec_vboot_hash_status */ + uint8_t hash_type; /* enum ec_vboot_hash_type */ + uint8_t digest_size; /* Size of hash digest in bytes */ + uint8_t reserved0; /* Ignore; will be 0 */ + uint32_t offset; /* Offset in flash which was hashed */ + uint32_t size; /* Number of bytes hashed */ + uint8_t hash_digest[64]; /* Hash digest data */ +} __packed; + +enum ec_vboot_hash_cmd { + EC_VBOOT_HASH_GET = 0, /* Get current hash status */ + EC_VBOOT_HASH_ABORT = 1, /* Abort calculating current hash */ + EC_VBOOT_HASH_START = 2, /* Start computing a new hash */ + EC_VBOOT_HASH_RECALC = 3, /* Synchronously compute a new hash */ +}; + +enum ec_vboot_hash_type { + EC_VBOOT_HASH_TYPE_SHA256 = 0, /* SHA-256 */ +}; + +enum ec_vboot_hash_status { + EC_VBOOT_HASH_STATUS_NONE = 0, /* No hash (not started, or aborted) */ + EC_VBOOT_HASH_STATUS_DONE = 1, /* Finished computing a hash */ + EC_VBOOT_HASH_STATUS_BUSY = 2, /* Busy computing a hash */ +}; + +/* + * Special values for offset for EC_VBOOT_HASH_START and EC_VBOOT_HASH_RECALC. + * If one of these is specified, the EC will automatically update offset and + * size to the correct values for the specified image (RO or RW). + */ +#define EC_VBOOT_HASH_OFFSET_RO 0xfffffffe +#define EC_VBOOT_HASH_OFFSET_RW 0xfffffffd + +/*****************************************************************************/ +/* + * Motion sense commands. We'll make separate structs for sub-commands with + * different input args, so that we know how much to expect. + */ +#define EC_CMD_MOTION_SENSE_CMD 0x2B + +/* Motion sense commands */ +enum motionsense_command { + /* + * Dump command returns all motion sensor data including motion sense + * module flags and individual sensor flags. + */ + MOTIONSENSE_CMD_DUMP = 0, + + /* + * Info command returns data describing the details of a given sensor, + * including enum motionsensor_type, enum motionsensor_location, and + * enum motionsensor_chip. + */ + MOTIONSENSE_CMD_INFO = 1, + + /* + * EC Rate command is a setter/getter command for the EC sampling rate + * of all motion sensors in milliseconds. + */ + MOTIONSENSE_CMD_EC_RATE = 2, + + /* + * Sensor ODR command is a setter/getter command for the output data + * rate of a specific motion sensor in millihertz. + */ + MOTIONSENSE_CMD_SENSOR_ODR = 3, + + /* + * Sensor range command is a setter/getter command for the range of + * a specified motion sensor in +/-G's or +/- deg/s. + */ + MOTIONSENSE_CMD_SENSOR_RANGE = 4, + + /* + * Setter/getter command for the keyboard wake angle. When the lid + * angle is greater than this value, keyboard wake is disabled in S3, + * and when the lid angle goes less than this value, keyboard wake is + * enabled. Note, the lid angle measurement is an approximate, + * un-calibrated value, hence the wake angle isn't exact. + */ + MOTIONSENSE_CMD_KB_WAKE_ANGLE = 5, + + /* + * Returns a single sensor data. + */ + MOTIONSENSE_CMD_DATA = 6, + + /* + * Perform low level calibration.. On sensors that support it, ask to + * do offset calibration. + */ + MOTIONSENSE_CMD_PERFORM_CALIB = 10, + + /* + * Sensor Offset command is a setter/getter command for the offset used + * for calibration. The offsets can be calculated by the host, or via + * PERFORM_CALIB command. + */ + MOTIONSENSE_CMD_SENSOR_OFFSET = 11, + + /* Number of motionsense sub-commands. */ + MOTIONSENSE_NUM_CMDS +}; + +enum motionsensor_id { + EC_MOTION_SENSOR_ACCEL_BASE = 0, + EC_MOTION_SENSOR_ACCEL_LID = 1, + EC_MOTION_SENSOR_GYRO = 2, + + /* + * Note, if more sensors are added and this count changes, the padding + * in ec_response_motion_sense dump command must be modified. + */ + EC_MOTION_SENSOR_COUNT = 3 +}; + +/* List of motion sensor types. */ +enum motionsensor_type { + MOTIONSENSE_TYPE_ACCEL = 0, + MOTIONSENSE_TYPE_GYRO = 1, + MOTIONSENSE_TYPE_MAG = 2, + MOTIONSENSE_TYPE_PROX = 3, + MOTIONSENSE_TYPE_LIGHT = 4, + MOTIONSENSE_TYPE_ACTIVITY = 5, + MOTIONSENSE_TYPE_BARO = 6, + MOTIONSENSE_TYPE_MAX, +}; + +/* List of motion sensor locations. */ +enum motionsensor_location { + MOTIONSENSE_LOC_BASE = 0, + MOTIONSENSE_LOC_LID = 1, + MOTIONSENSE_LOC_MAX, +}; + +/* List of motion sensor chips. */ +enum motionsensor_chip { + MOTIONSENSE_CHIP_KXCJ9 = 0, +}; + +/* Module flag masks used for the dump sub-command. */ +#define MOTIONSENSE_MODULE_FLAG_ACTIVE (1<<0) + +/* Sensor flag masks used for the dump sub-command. */ +#define MOTIONSENSE_SENSOR_FLAG_PRESENT (1<<0) + +/* + * Send this value for the data element to only perform a read. If you + * send any other value, the EC will interpret it as data to set and will + * return the actual value set. + */ +#define EC_MOTION_SENSE_NO_VALUE -1 + +#define EC_MOTION_SENSE_INVALID_CALIB_TEMP 0x8000 + +/* Set Calibration information */ +#define MOTION_SENSE_SET_OFFSET 1 + +struct ec_response_motion_sensor_data { + /* Flags for each sensor. */ + uint8_t flags; + /* Sensor number the data comes from */ + uint8_t sensor_num; + /* Each sensor is up to 3-axis. */ + union { + int16_t data[3]; + struct { + uint16_t rsvd; + uint32_t timestamp; + } __packed; + struct { + uint8_t activity; /* motionsensor_activity */ + uint8_t state; + int16_t add_info[2]; + }; + }; +} __packed; + +struct ec_params_motion_sense { + uint8_t cmd; + union { + /* Used for MOTIONSENSE_CMD_DUMP. */ + struct { + /* no args */ + } dump; + + /* + * Used for MOTIONSENSE_CMD_EC_RATE and + * MOTIONSENSE_CMD_KB_WAKE_ANGLE. + */ + struct { + /* Data to set or EC_MOTION_SENSE_NO_VALUE to read. */ + int16_t data; + } ec_rate, kb_wake_angle; + + /* Used for MOTIONSENSE_CMD_SENSOR_OFFSET */ + struct { + uint8_t sensor_num; + + /* + * bit 0: If set (MOTION_SENSE_SET_OFFSET), set + * the calibration information in the EC. + * If unset, just retrieve calibration information. + */ + uint16_t flags; + + /* + * Temperature at calibration, in units of 0.01 C + * 0x8000: invalid / unknown. + * 0x0: 0C + * 0x7fff: +327.67C + */ + int16_t temp; + + /* + * Offset for calibration. + * Unit: + * Accelerometer: 1/1024 g + * Gyro: 1/1024 deg/s + * Compass: 1/16 uT + */ + int16_t offset[3]; + } __packed sensor_offset; + + /* Used for MOTIONSENSE_CMD_INFO. */ + struct { + uint8_t sensor_num; + } info; + + /* + * Used for MOTIONSENSE_CMD_SENSOR_ODR and + * MOTIONSENSE_CMD_SENSOR_RANGE. + */ + struct { + /* Should be element of enum motionsensor_id. */ + uint8_t sensor_num; + + /* Rounding flag, true for round-up, false for down. */ + uint8_t roundup; + + uint16_t reserved; + + /* Data to set or EC_MOTION_SENSE_NO_VALUE to read. */ + int32_t data; + } sensor_odr, sensor_range; + }; +} __packed; + +struct ec_response_motion_sense { + union { + /* Used for MOTIONSENSE_CMD_DUMP. */ + struct { + /* Flags representing the motion sensor module. */ + uint8_t module_flags; + + /* Number of sensors managed directly by the EC. */ + uint8_t sensor_count; + + /* + * Sensor data is truncated if response_max is too small + * for holding all the data. + */ + struct ec_response_motion_sensor_data sensor[0]; + } dump; + + /* Used for MOTIONSENSE_CMD_INFO. */ + struct { + /* Should be element of enum motionsensor_type. */ + uint8_t type; + + /* Should be element of enum motionsensor_location. */ + uint8_t location; + + /* Should be element of enum motionsensor_chip. */ + uint8_t chip; + } info; + + /* Used for MOTIONSENSE_CMD_DATA */ + struct ec_response_motion_sensor_data data; + + /* + * Used for MOTIONSENSE_CMD_EC_RATE, MOTIONSENSE_CMD_SENSOR_ODR, + * MOTIONSENSE_CMD_SENSOR_RANGE, and + * MOTIONSENSE_CMD_KB_WAKE_ANGLE. + */ + struct { + /* Current value of the parameter queried. */ + int32_t ret; + } ec_rate, sensor_odr, sensor_range, kb_wake_angle; + + /* Used for MOTIONSENSE_CMD_SENSOR_OFFSET */ + struct { + int16_t temp; + int16_t offset[3]; + } sensor_offset, perform_calib; + }; +} __packed; + +/*****************************************************************************/ +/* USB charging control commands */ + +/* Set USB port charging mode */ +#define EC_CMD_USB_CHARGE_SET_MODE 0x30 + +struct ec_params_usb_charge_set_mode { + uint8_t usb_port_id; + uint8_t mode; +} __packed; + +/*****************************************************************************/ +/* Persistent storage for host */ + +/* Maximum bytes that can be read/written in a single command */ +#define EC_PSTORE_SIZE_MAX 64 + +/* Get persistent storage info */ +#define EC_CMD_PSTORE_INFO 0x40 + +struct ec_response_pstore_info { + /* Persistent storage size, in bytes */ + uint32_t pstore_size; + /* Access size; read/write offset and size must be a multiple of this */ + uint32_t access_size; +} __packed; + +/* + * Read persistent storage + * + * Response is params.size bytes of data. + */ +#define EC_CMD_PSTORE_READ 0x41 + +struct ec_params_pstore_read { + uint32_t offset; /* Byte offset to read */ + uint32_t size; /* Size to read in bytes */ +} __packed; + +/* Write persistent storage */ +#define EC_CMD_PSTORE_WRITE 0x42 + +struct ec_params_pstore_write { + uint32_t offset; /* Byte offset to write */ + uint32_t size; /* Size to write in bytes */ + uint8_t data[EC_PSTORE_SIZE_MAX]; +} __packed; + +/*****************************************************************************/ +/* Real-time clock */ + +/* RTC params and response structures */ +struct ec_params_rtc { + uint32_t time; +} __packed; + +struct ec_response_rtc { + uint32_t time; +} __packed; + +/* These use ec_response_rtc */ +#define EC_CMD_RTC_GET_VALUE 0x44 +#define EC_CMD_RTC_GET_ALARM 0x45 + +/* These all use ec_params_rtc */ +#define EC_CMD_RTC_SET_VALUE 0x46 +#define EC_CMD_RTC_SET_ALARM 0x47 + +/* Pass as param to SET_ALARM to clear the current alarm */ +#define EC_RTC_ALARM_CLEAR 0 + +/*****************************************************************************/ +/* Port80 log access */ + +/* Maximum entries that can be read/written in a single command */ +#define EC_PORT80_SIZE_MAX 32 + +/* Get last port80 code from previous boot */ +#define EC_CMD_PORT80_LAST_BOOT 0x48 +#define EC_CMD_PORT80_READ 0x48 + +enum ec_port80_subcmd { + EC_PORT80_GET_INFO = 0, + EC_PORT80_READ_BUFFER, +}; + +struct ec_params_port80_read { + uint16_t subcmd; + union { + struct { + uint32_t offset; + uint32_t num_entries; + } read_buffer; + }; +} __packed; + +struct ec_response_port80_read { + union { + struct { + uint32_t writes; + uint32_t history_size; + uint32_t last_boot; + } get_info; + struct { + uint16_t codes[EC_PORT80_SIZE_MAX]; + } data; + }; +} __packed; + +struct ec_response_port80_last_boot { + uint16_t code; +} __packed; + +/*****************************************************************************/ +/* Thermal engine commands. Note that there are two implementations. We'll + * reuse the command number, but the data and behavior is incompatible. + * Version 0 is what originally shipped on Link. + * Version 1 separates the CPU thermal limits from the fan control. + */ + +#define EC_CMD_THERMAL_SET_THRESHOLD 0x50 +#define EC_CMD_THERMAL_GET_THRESHOLD 0x51 + +/* The version 0 structs are opaque. You have to know what they are for + * the get/set commands to make any sense. + */ + +/* Version 0 - set */ +struct ec_params_thermal_set_threshold { + uint8_t sensor_type; + uint8_t threshold_id; + uint16_t value; +} __packed; + +/* Version 0 - get */ +struct ec_params_thermal_get_threshold { + uint8_t sensor_type; + uint8_t threshold_id; +} __packed; + +struct ec_response_thermal_get_threshold { + uint16_t value; +} __packed; + + +/* The version 1 structs are visible. */ +enum ec_temp_thresholds { + EC_TEMP_THRESH_WARN = 0, + EC_TEMP_THRESH_HIGH, + EC_TEMP_THRESH_HALT, + + EC_TEMP_THRESH_COUNT +}; + +/* Thermal configuration for one temperature sensor. Temps are in degrees K. + * Zero values will be silently ignored by the thermal task. + */ +struct ec_thermal_config { + uint32_t temp_host[EC_TEMP_THRESH_COUNT]; /* levels of hotness */ + uint32_t temp_fan_off; /* no active cooling needed */ + uint32_t temp_fan_max; /* max active cooling needed */ +} __packed; + +/* Version 1 - get config for one sensor. */ +struct ec_params_thermal_get_threshold_v1 { + uint32_t sensor_num; +} __packed; +/* This returns a struct ec_thermal_config */ + +/* Version 1 - set config for one sensor. + * Use read-modify-write for best results! */ +struct ec_params_thermal_set_threshold_v1 { + uint32_t sensor_num; + struct ec_thermal_config cfg; +} __packed; +/* This returns no data */ + +/****************************************************************************/ + +/* Toggle automatic fan control */ +#define EC_CMD_THERMAL_AUTO_FAN_CTRL 0x52 + +/* Get TMP006 calibration data */ +#define EC_CMD_TMP006_GET_CALIBRATION 0x53 + +struct ec_params_tmp006_get_calibration { + uint8_t index; +} __packed; + +struct ec_response_tmp006_get_calibration { + float s0; + float b0; + float b1; + float b2; +} __packed; + +/* Set TMP006 calibration data */ +#define EC_CMD_TMP006_SET_CALIBRATION 0x54 + +struct ec_params_tmp006_set_calibration { + uint8_t index; + uint8_t reserved[3]; /* Reserved; set 0 */ + float s0; + float b0; + float b1; + float b2; +} __packed; + +/* Read raw TMP006 data */ +#define EC_CMD_TMP006_GET_RAW 0x55 + +struct ec_params_tmp006_get_raw { + uint8_t index; +} __packed; + +struct ec_response_tmp006_get_raw { + int32_t t; /* In 1/100 K */ + int32_t v; /* In nV */ +}; + +/*****************************************************************************/ +/* MKBP - Matrix KeyBoard Protocol */ + +/* + * Read key state + * + * Returns raw data for keyboard cols; see ec_response_mkbp_info.cols for + * expected response size. + * + * NOTE: This has been superseded by EC_CMD_MKBP_GET_NEXT_EVENT. If you wish + * to obtain the instantaneous state, use EC_CMD_MKBP_INFO with the type + * EC_MKBP_INFO_CURRENT and event EC_MKBP_EVENT_KEY_MATRIX. + */ +#define EC_CMD_MKBP_STATE 0x60 + +/* + * Provide information about various MKBP things. See enum ec_mkbp_info_type. + */ +#define EC_CMD_MKBP_INFO 0x61 + +struct ec_response_mkbp_info { + uint32_t rows; + uint32_t cols; + /* Formerly "switches", which was 0. */ + uint8_t reserved; +} __packed; + +struct ec_params_mkbp_info { + uint8_t info_type; + uint8_t event_type; +} __packed; + +enum ec_mkbp_info_type { + /* + * Info about the keyboard matrix: number of rows and columns. + * + * Returns struct ec_response_mkbp_info. + */ + EC_MKBP_INFO_KBD = 0, + + /* + * For buttons and switches, info about which specifically are + * supported. event_type must be set to one of the values in enum + * ec_mkbp_event. + * + * For EC_MKBP_EVENT_BUTTON and EC_MKBP_EVENT_SWITCH, returns a 4 byte + * bitmask indicating which buttons or switches are present. See the + * bit inidices below. + */ + EC_MKBP_INFO_SUPPORTED = 1, + + /* + * Instantaneous state of buttons and switches. + * + * event_type must be set to one of the values in enum ec_mkbp_event. + * + * For EC_MKBP_EVENT_KEY_MATRIX, returns uint8_t key_matrix[13] + * indicating the current state of the keyboard matrix. + * + * For EC_MKBP_EVENT_HOST_EVENT, return uint32_t host_event, the raw + * event state. + * + * For EC_MKBP_EVENT_BUTTON, returns uint32_t buttons, indicating the + * state of supported buttons. + * + * For EC_MKBP_EVENT_SWITCH, returns uint32_t switches, indicating the + * state of supported switches. + */ + EC_MKBP_INFO_CURRENT = 2, +}; + +/* Simulate key press */ +#define EC_CMD_MKBP_SIMULATE_KEY 0x62 + +struct ec_params_mkbp_simulate_key { + uint8_t col; + uint8_t row; + uint8_t pressed; +} __packed; + +/* Configure keyboard scanning */ +#define EC_CMD_MKBP_SET_CONFIG 0x64 +#define EC_CMD_MKBP_GET_CONFIG 0x65 + +/* flags */ +enum mkbp_config_flags { + EC_MKBP_FLAGS_ENABLE = 1, /* Enable keyboard scanning */ +}; + +enum mkbp_config_valid { + EC_MKBP_VALID_SCAN_PERIOD = 1 << 0, + EC_MKBP_VALID_POLL_TIMEOUT = 1 << 1, + EC_MKBP_VALID_MIN_POST_SCAN_DELAY = 1 << 3, + EC_MKBP_VALID_OUTPUT_SETTLE = 1 << 4, + EC_MKBP_VALID_DEBOUNCE_DOWN = 1 << 5, + EC_MKBP_VALID_DEBOUNCE_UP = 1 << 6, + EC_MKBP_VALID_FIFO_MAX_DEPTH = 1 << 7, +}; + +/* Configuration for our key scanning algorithm */ +struct ec_mkbp_config { + uint32_t valid_mask; /* valid fields */ + uint8_t flags; /* some flags (enum mkbp_config_flags) */ + uint8_t valid_flags; /* which flags are valid */ + uint16_t scan_period_us; /* period between start of scans */ + /* revert to interrupt mode after no activity for this long */ + uint32_t poll_timeout_us; + /* + * minimum post-scan relax time. Once we finish a scan we check + * the time until we are due to start the next one. If this time is + * shorter this field, we use this instead. + */ + uint16_t min_post_scan_delay_us; + /* delay between setting up output and waiting for it to settle */ + uint16_t output_settle_us; + uint16_t debounce_down_us; /* time for debounce on key down */ + uint16_t debounce_up_us; /* time for debounce on key up */ + /* maximum depth to allow for fifo (0 = no keyscan output) */ + uint8_t fifo_max_depth; +} __packed; + +struct ec_params_mkbp_set_config { + struct ec_mkbp_config config; +} __packed; + +struct ec_response_mkbp_get_config { + struct ec_mkbp_config config; +} __packed; + +/* Run the key scan emulation */ +#define EC_CMD_KEYSCAN_SEQ_CTRL 0x66 + +enum ec_keyscan_seq_cmd { + EC_KEYSCAN_SEQ_STATUS = 0, /* Get status information */ + EC_KEYSCAN_SEQ_CLEAR = 1, /* Clear sequence */ + EC_KEYSCAN_SEQ_ADD = 2, /* Add item to sequence */ + EC_KEYSCAN_SEQ_START = 3, /* Start running sequence */ + EC_KEYSCAN_SEQ_COLLECT = 4, /* Collect sequence summary data */ +}; + +enum ec_collect_flags { + /* + * Indicates this scan was processed by the EC. Due to timing, some + * scans may be skipped. + */ + EC_KEYSCAN_SEQ_FLAG_DONE = 1 << 0, +}; + +struct ec_collect_item { + uint8_t flags; /* some flags (enum ec_collect_flags) */ +}; + +struct ec_params_keyscan_seq_ctrl { + uint8_t cmd; /* Command to send (enum ec_keyscan_seq_cmd) */ + union { + struct { + uint8_t active; /* still active */ + uint8_t num_items; /* number of items */ + /* Current item being presented */ + uint8_t cur_item; + } status; + struct { + /* + * Absolute time for this scan, measured from the + * start of the sequence. + */ + uint32_t time_us; + uint8_t scan[0]; /* keyscan data */ + } add; + struct { + uint8_t start_item; /* First item to return */ + uint8_t num_items; /* Number of items to return */ + } collect; + }; +} __packed; + +struct ec_result_keyscan_seq_ctrl { + union { + struct { + uint8_t num_items; /* Number of items */ + /* Data for each item */ + struct ec_collect_item item[0]; + } collect; + }; +} __packed; + +/* + * Command for retrieving the next pending MKBP event from the EC device + * + * The device replies with UNAVAILABLE if there aren't any pending events. + */ +#define EC_CMD_GET_NEXT_EVENT 0x67 + +enum ec_mkbp_event { + /* Keyboard matrix changed. The event data is the new matrix state. */ + EC_MKBP_EVENT_KEY_MATRIX = 0, + + /* New host event. The event data is 4 bytes of host event flags. */ + EC_MKBP_EVENT_HOST_EVENT = 1, + + /* New Sensor FIFO data. The event data is fifo_info structure. */ + EC_MKBP_EVENT_SENSOR_FIFO = 2, + + /* The state of the non-matrixed buttons have changed. */ + EC_MKBP_EVENT_BUTTON = 3, + + /* The state of the switches have changed. */ + EC_MKBP_EVENT_SWITCH = 4, + + /* EC sent a sysrq command */ + EC_MKBP_EVENT_SYSRQ = 6, + + /* Notify the AP that something happened on CEC */ + EC_MKBP_EVENT_CEC_EVENT = 8, + + /* Send an incoming CEC message to the AP */ + EC_MKBP_EVENT_CEC_MESSAGE = 9, + + /* Number of MKBP events */ + EC_MKBP_EVENT_COUNT, +}; + +union ec_response_get_next_data { + uint8_t key_matrix[13]; + + /* Unaligned */ + uint32_t host_event; + + uint32_t buttons; + uint32_t switches; + uint32_t sysrq; +} __packed; + +union ec_response_get_next_data_v1 { + uint8_t key_matrix[16]; + uint32_t host_event; + uint32_t buttons; + uint32_t switches; + uint32_t sysrq; + uint32_t cec_events; + uint8_t cec_message[16]; +} __packed; + +struct ec_response_get_next_event { + uint8_t event_type; + /* Followed by event data if any */ + union ec_response_get_next_data data; +} __packed; + +struct ec_response_get_next_event_v1 { + uint8_t event_type; + /* Followed by event data if any */ + union ec_response_get_next_data_v1 data; +} __packed; + +/* Bit indices for buttons and switches.*/ +/* Buttons */ +#define EC_MKBP_POWER_BUTTON 0 +#define EC_MKBP_VOL_UP 1 +#define EC_MKBP_VOL_DOWN 2 + +/* Switches */ +#define EC_MKBP_LID_OPEN 0 +#define EC_MKBP_TABLET_MODE 1 + +/*****************************************************************************/ +/* Temperature sensor commands */ + +/* Read temperature sensor info */ +#define EC_CMD_TEMP_SENSOR_GET_INFO 0x70 + +struct ec_params_temp_sensor_get_info { + uint8_t id; +} __packed; + +struct ec_response_temp_sensor_get_info { + char sensor_name[32]; + uint8_t sensor_type; +} __packed; + +/*****************************************************************************/ + +/* + * Note: host commands 0x80 - 0x87 are reserved to avoid conflict with ACPI + * commands accidentally sent to the wrong interface. See the ACPI section + * below. + */ + +/*****************************************************************************/ +/* Host event commands */ + +/* + * Host event mask params and response structures, shared by all of the host + * event commands below. + */ +struct ec_params_host_event_mask { + uint32_t mask; +} __packed; + +struct ec_response_host_event_mask { + uint32_t mask; +} __packed; + +/* These all use ec_response_host_event_mask */ +#define EC_CMD_HOST_EVENT_GET_B 0x87 +#define EC_CMD_HOST_EVENT_GET_SMI_MASK 0x88 +#define EC_CMD_HOST_EVENT_GET_SCI_MASK 0x89 +#define EC_CMD_HOST_EVENT_GET_WAKE_MASK 0x8d + +/* These all use ec_params_host_event_mask */ +#define EC_CMD_HOST_EVENT_SET_SMI_MASK 0x8a +#define EC_CMD_HOST_EVENT_SET_SCI_MASK 0x8b +#define EC_CMD_HOST_EVENT_CLEAR 0x8c +#define EC_CMD_HOST_EVENT_SET_WAKE_MASK 0x8e +#define EC_CMD_HOST_EVENT_CLEAR_B 0x8f + +/*****************************************************************************/ +/* Switch commands */ + +/* Enable/disable LCD backlight */ +#define EC_CMD_SWITCH_ENABLE_BKLIGHT 0x90 + +struct ec_params_switch_enable_backlight { + uint8_t enabled; +} __packed; + +/* Enable/disable WLAN/Bluetooth */ +#define EC_CMD_SWITCH_ENABLE_WIRELESS 0x91 +#define EC_VER_SWITCH_ENABLE_WIRELESS 1 + +/* Version 0 params; no response */ +struct ec_params_switch_enable_wireless_v0 { + uint8_t enabled; +} __packed; + +/* Version 1 params */ +struct ec_params_switch_enable_wireless_v1 { + /* Flags to enable now */ + uint8_t now_flags; + + /* Which flags to copy from now_flags */ + uint8_t now_mask; + + /* + * Flags to leave enabled in S3, if they're on at the S0->S3 + * transition. (Other flags will be disabled by the S0->S3 + * transition.) + */ + uint8_t suspend_flags; + + /* Which flags to copy from suspend_flags */ + uint8_t suspend_mask; +} __packed; + +/* Version 1 response */ +struct ec_response_switch_enable_wireless_v1 { + /* Flags to enable now */ + uint8_t now_flags; + + /* Flags to leave enabled in S3 */ + uint8_t suspend_flags; +} __packed; + +/*****************************************************************************/ +/* GPIO commands. Only available on EC if write protect has been disabled. */ + +/* Set GPIO output value */ +#define EC_CMD_GPIO_SET 0x92 + +struct ec_params_gpio_set { + char name[32]; + uint8_t val; +} __packed; + +/* Get GPIO value */ +#define EC_CMD_GPIO_GET 0x93 + +/* Version 0 of input params and response */ +struct ec_params_gpio_get { + char name[32]; +} __packed; +struct ec_response_gpio_get { + uint8_t val; +} __packed; + +/* Version 1 of input params and response */ +struct ec_params_gpio_get_v1 { + uint8_t subcmd; + union { + struct { + char name[32]; + } get_value_by_name; + struct { + uint8_t index; + } get_info; + }; +} __packed; + +struct ec_response_gpio_get_v1 { + union { + struct { + uint8_t val; + } get_value_by_name, get_count; + struct { + uint8_t val; + char name[32]; + uint32_t flags; + } get_info; + }; +} __packed; + +enum gpio_get_subcmd { + EC_GPIO_GET_BY_NAME = 0, + EC_GPIO_GET_COUNT = 1, + EC_GPIO_GET_INFO = 2, +}; + +/*****************************************************************************/ +/* I2C commands. Only available when flash write protect is unlocked. */ + +/* + * TODO(crosbug.com/p/23570): These commands are deprecated, and will be + * removed soon. Use EC_CMD_I2C_XFER instead. + */ + +/* Read I2C bus */ +#define EC_CMD_I2C_READ 0x94 + +struct ec_params_i2c_read { + uint16_t addr; /* 8-bit address (7-bit shifted << 1) */ + uint8_t read_size; /* Either 8 or 16. */ + uint8_t port; + uint8_t offset; +} __packed; +struct ec_response_i2c_read { + uint16_t data; +} __packed; + +/* Write I2C bus */ +#define EC_CMD_I2C_WRITE 0x95 + +struct ec_params_i2c_write { + uint16_t data; + uint16_t addr; /* 8-bit address (7-bit shifted << 1) */ + uint8_t write_size; /* Either 8 or 16. */ + uint8_t port; + uint8_t offset; +} __packed; + +/*****************************************************************************/ +/* Charge state commands. Only available when flash write protect unlocked. */ + +/* Force charge state machine to stop charging the battery or force it to + * discharge the battery. + */ +#define EC_CMD_CHARGE_CONTROL 0x96 +#define EC_VER_CHARGE_CONTROL 1 + +enum ec_charge_control_mode { + CHARGE_CONTROL_NORMAL = 0, + CHARGE_CONTROL_IDLE, + CHARGE_CONTROL_DISCHARGE, +}; + +struct ec_params_charge_control { + uint32_t mode; /* enum charge_control_mode */ +} __packed; + +/*****************************************************************************/ +/* Console commands. Only available when flash write protect is unlocked. */ + +/* Snapshot console output buffer for use by EC_CMD_CONSOLE_READ. */ +#define EC_CMD_CONSOLE_SNAPSHOT 0x97 + +/* + * Read data from the saved snapshot. If the subcmd parameter is + * CONSOLE_READ_NEXT, this will return data starting from the beginning of + * the latest snapshot. If it is CONSOLE_READ_RECENT, it will start from the + * end of the previous snapshot. + * + * The params are only looked at in version >= 1 of this command. Prior + * versions will just default to CONSOLE_READ_NEXT behavior. + * + * Response is null-terminated string. Empty string, if there is no more + * remaining output. + */ +#define EC_CMD_CONSOLE_READ 0x98 + +enum ec_console_read_subcmd { + CONSOLE_READ_NEXT = 0, + CONSOLE_READ_RECENT +}; + +struct ec_params_console_read_v1 { + uint8_t subcmd; /* enum ec_console_read_subcmd */ +} __packed; + +/*****************************************************************************/ + +/* + * Cut off battery power immediately or after the host has shut down. + * + * return EC_RES_INVALID_COMMAND if unsupported by a board/battery. + * EC_RES_SUCCESS if the command was successful. + * EC_RES_ERROR if the cut off command failed. + */ + +#define EC_CMD_BATTERY_CUT_OFF 0x99 + +#define EC_BATTERY_CUTOFF_FLAG_AT_SHUTDOWN (1 << 0) + +struct ec_params_battery_cutoff { + uint8_t flags; +} __packed; + +/*****************************************************************************/ +/* USB port mux control. */ + +/* + * Switch USB mux or return to automatic switching. + */ +#define EC_CMD_USB_MUX 0x9a + +struct ec_params_usb_mux { + uint8_t mux; +} __packed; + +/*****************************************************************************/ +/* LDOs / FETs control. */ + +enum ec_ldo_state { + EC_LDO_STATE_OFF = 0, /* the LDO / FET is shut down */ + EC_LDO_STATE_ON = 1, /* the LDO / FET is ON / providing power */ +}; + +/* + * Switch on/off a LDO. + */ +#define EC_CMD_LDO_SET 0x9b + +struct ec_params_ldo_set { + uint8_t index; + uint8_t state; +} __packed; + +/* + * Get LDO state. + */ +#define EC_CMD_LDO_GET 0x9c + +struct ec_params_ldo_get { + uint8_t index; +} __packed; + +struct ec_response_ldo_get { + uint8_t state; +} __packed; + +/*****************************************************************************/ +/* Power info. */ + +/* + * Get power info. + */ +#define EC_CMD_POWER_INFO 0x9d + +struct ec_response_power_info { + uint32_t usb_dev_type; + uint16_t voltage_ac; + uint16_t voltage_system; + uint16_t current_system; + uint16_t usb_current_limit; +} __packed; + +/*****************************************************************************/ +/* I2C passthru command */ + +#define EC_CMD_I2C_PASSTHRU 0x9e + +/* Read data; if not present, message is a write */ +#define EC_I2C_FLAG_READ (1 << 15) + +/* Mask for address */ +#define EC_I2C_ADDR_MASK 0x3ff + +#define EC_I2C_STATUS_NAK (1 << 0) /* Transfer was not acknowledged */ +#define EC_I2C_STATUS_TIMEOUT (1 << 1) /* Timeout during transfer */ + +/* Any error */ +#define EC_I2C_STATUS_ERROR (EC_I2C_STATUS_NAK | EC_I2C_STATUS_TIMEOUT) + +struct ec_params_i2c_passthru_msg { + uint16_t addr_flags; /* I2C slave address (7 or 10 bits) and flags */ + uint16_t len; /* Number of bytes to read or write */ +} __packed; + +struct ec_params_i2c_passthru { + uint8_t port; /* I2C port number */ + uint8_t num_msgs; /* Number of messages */ + struct ec_params_i2c_passthru_msg msg[]; + /* Data to write for all messages is concatenated here */ +} __packed; + +struct ec_response_i2c_passthru { + uint8_t i2c_status; /* Status flags (EC_I2C_STATUS_...) */ + uint8_t num_msgs; /* Number of messages processed */ + uint8_t data[]; /* Data read by messages concatenated here */ +} __packed; + +/*****************************************************************************/ +/* Power button hang detect */ + +#define EC_CMD_HANG_DETECT 0x9f + +/* Reasons to start hang detection timer */ +/* Power button pressed */ +#define EC_HANG_START_ON_POWER_PRESS (1 << 0) + +/* Lid closed */ +#define EC_HANG_START_ON_LID_CLOSE (1 << 1) + + /* Lid opened */ +#define EC_HANG_START_ON_LID_OPEN (1 << 2) + +/* Start of AP S3->S0 transition (booting or resuming from suspend) */ +#define EC_HANG_START_ON_RESUME (1 << 3) + +/* Reasons to cancel hang detection */ + +/* Power button released */ +#define EC_HANG_STOP_ON_POWER_RELEASE (1 << 8) + +/* Any host command from AP received */ +#define EC_HANG_STOP_ON_HOST_COMMAND (1 << 9) + +/* Stop on end of AP S0->S3 transition (suspending or shutting down) */ +#define EC_HANG_STOP_ON_SUSPEND (1 << 10) + +/* + * If this flag is set, all the other fields are ignored, and the hang detect + * timer is started. This provides the AP a way to start the hang timer + * without reconfiguring any of the other hang detect settings. Note that + * you must previously have configured the timeouts. + */ +#define EC_HANG_START_NOW (1 << 30) + +/* + * If this flag is set, all the other fields are ignored (including + * EC_HANG_START_NOW). This provides the AP a way to stop the hang timer + * without reconfiguring any of the other hang detect settings. + */ +#define EC_HANG_STOP_NOW (1 << 31) + +struct ec_params_hang_detect { + /* Flags; see EC_HANG_* */ + uint32_t flags; + + /* Timeout in msec before generating host event, if enabled */ + uint16_t host_event_timeout_msec; + + /* Timeout in msec before generating warm reboot, if enabled */ + uint16_t warm_reboot_timeout_msec; +} __packed; + +/*****************************************************************************/ +/* Commands for battery charging */ + +/* + * This is the single catch-all host command to exchange data regarding the + * charge state machine (v2 and up). + */ +#define EC_CMD_CHARGE_STATE 0xa0 + +/* Subcommands for this host command */ +enum charge_state_command { + CHARGE_STATE_CMD_GET_STATE, + CHARGE_STATE_CMD_GET_PARAM, + CHARGE_STATE_CMD_SET_PARAM, + CHARGE_STATE_NUM_CMDS +}; + +/* + * Known param numbers are defined here. Ranges are reserved for board-specific + * params, which are handled by the particular implementations. + */ +enum charge_state_params { + CS_PARAM_CHG_VOLTAGE, /* charger voltage limit */ + CS_PARAM_CHG_CURRENT, /* charger current limit */ + CS_PARAM_CHG_INPUT_CURRENT, /* charger input current limit */ + CS_PARAM_CHG_STATUS, /* charger-specific status */ + CS_PARAM_CHG_OPTION, /* charger-specific options */ + /* How many so far? */ + CS_NUM_BASE_PARAMS, + + /* Range for CONFIG_CHARGER_PROFILE_OVERRIDE params */ + CS_PARAM_CUSTOM_PROFILE_MIN = 0x10000, + CS_PARAM_CUSTOM_PROFILE_MAX = 0x1ffff, + + /* Other custom param ranges go here... */ +}; + +struct ec_params_charge_state { + uint8_t cmd; /* enum charge_state_command */ + union { + struct { + /* no args */ + } get_state; + + struct { + uint32_t param; /* enum charge_state_param */ + } get_param; + + struct { + uint32_t param; /* param to set */ + uint32_t value; /* value to set */ + } set_param; + }; +} __packed; + +struct ec_response_charge_state { + union { + struct { + int ac; + int chg_voltage; + int chg_current; + int chg_input_current; + int batt_state_of_charge; + } get_state; + + struct { + uint32_t value; + } get_param; + struct { + /* no return values */ + } set_param; + }; +} __packed; + + +/* + * Set maximum battery charging current. + */ +#define EC_CMD_CHARGE_CURRENT_LIMIT 0xa1 + +struct ec_params_current_limit { + uint32_t limit; /* in mA */ +} __packed; + +/* + * Set maximum external voltage / current. + */ +#define EC_CMD_EXTERNAL_POWER_LIMIT 0x00A2 + +/* Command v0 is used only on Spring and is obsolete + unsupported */ +struct ec_params_external_power_limit_v1 { + uint16_t current_lim; /* in mA, or EC_POWER_LIMIT_NONE to clear limit */ + uint16_t voltage_lim; /* in mV, or EC_POWER_LIMIT_NONE to clear limit */ +} __packed; + +#define EC_POWER_LIMIT_NONE 0xffff + +/* Inform the EC when entering a sleep state */ +#define EC_CMD_HOST_SLEEP_EVENT 0xa9 + +enum host_sleep_event { + HOST_SLEEP_EVENT_S3_SUSPEND = 1, + HOST_SLEEP_EVENT_S3_RESUME = 2, + HOST_SLEEP_EVENT_S0IX_SUSPEND = 3, + HOST_SLEEP_EVENT_S0IX_RESUME = 4 +}; + +struct ec_params_host_sleep_event { + uint8_t sleep_event; +} __packed; + +/*****************************************************************************/ +/* Smart battery pass-through */ + +/* Get / Set 16-bit smart battery registers */ +#define EC_CMD_SB_READ_WORD 0xb0 +#define EC_CMD_SB_WRITE_WORD 0xb1 + +/* Get / Set string smart battery parameters + * formatted as SMBUS "block". + */ +#define EC_CMD_SB_READ_BLOCK 0xb2 +#define EC_CMD_SB_WRITE_BLOCK 0xb3 + +struct ec_params_sb_rd { + uint8_t reg; +} __packed; + +struct ec_response_sb_rd_word { + uint16_t value; +} __packed; + +struct ec_params_sb_wr_word { + uint8_t reg; + uint16_t value; +} __packed; + +struct ec_response_sb_rd_block { + uint8_t data[32]; +} __packed; + +struct ec_params_sb_wr_block { + uint8_t reg; + uint16_t data[32]; +} __packed; + +/*****************************************************************************/ +/* Battery vendor parameters + * + * Get or set vendor-specific parameters in the battery. Implementations may + * differ between boards or batteries. On a set operation, the response + * contains the actual value set, which may be rounded or clipped from the + * requested value. + */ + +#define EC_CMD_BATTERY_VENDOR_PARAM 0xb4 + +enum ec_battery_vendor_param_mode { + BATTERY_VENDOR_PARAM_MODE_GET = 0, + BATTERY_VENDOR_PARAM_MODE_SET, +}; + +struct ec_params_battery_vendor_param { + uint32_t param; + uint32_t value; + uint8_t mode; +} __packed; + +struct ec_response_battery_vendor_param { + uint32_t value; +} __packed; + +/*****************************************************************************/ +/* System commands */ + +/* + * TODO(crosbug.com/p/23747): This is a confusing name, since it doesn't + * necessarily reboot the EC. Rename to "image" or something similar? + */ +#define EC_CMD_REBOOT_EC 0xd2 + +/* Command */ +enum ec_reboot_cmd { + EC_REBOOT_CANCEL = 0, /* Cancel a pending reboot */ + EC_REBOOT_JUMP_RO = 1, /* Jump to RO without rebooting */ + EC_REBOOT_JUMP_RW = 2, /* Jump to RW without rebooting */ + /* (command 3 was jump to RW-B) */ + EC_REBOOT_COLD = 4, /* Cold-reboot */ + EC_REBOOT_DISABLE_JUMP = 5, /* Disable jump until next reboot */ + EC_REBOOT_HIBERNATE = 6 /* Hibernate EC */ +}; + +/* Flags for ec_params_reboot_ec.reboot_flags */ +#define EC_REBOOT_FLAG_RESERVED0 (1 << 0) /* Was recovery request */ +#define EC_REBOOT_FLAG_ON_AP_SHUTDOWN (1 << 1) /* Reboot after AP shutdown */ + +struct ec_params_reboot_ec { + uint8_t cmd; /* enum ec_reboot_cmd */ + uint8_t flags; /* See EC_REBOOT_FLAG_* */ +} __packed; + +/* + * Get information on last EC panic. + * + * Returns variable-length platform-dependent panic information. See panic.h + * for details. + */ +#define EC_CMD_GET_PANIC_INFO 0xd3 + +/*****************************************************************************/ +/* + * ACPI commands + * + * These are valid ONLY on the ACPI command/data port. + */ + +/* + * ACPI Read Embedded Controller + * + * This reads from ACPI memory space on the EC (EC_ACPI_MEM_*). + * + * Use the following sequence: + * + * - Write EC_CMD_ACPI_READ to EC_LPC_ADDR_ACPI_CMD + * - Wait for EC_LPC_CMDR_PENDING bit to clear + * - Write address to EC_LPC_ADDR_ACPI_DATA + * - Wait for EC_LPC_CMDR_DATA bit to set + * - Read value from EC_LPC_ADDR_ACPI_DATA + */ +#define EC_CMD_ACPI_READ 0x80 + +/* + * ACPI Write Embedded Controller + * + * This reads from ACPI memory space on the EC (EC_ACPI_MEM_*). + * + * Use the following sequence: + * + * - Write EC_CMD_ACPI_WRITE to EC_LPC_ADDR_ACPI_CMD + * - Wait for EC_LPC_CMDR_PENDING bit to clear + * - Write address to EC_LPC_ADDR_ACPI_DATA + * - Wait for EC_LPC_CMDR_PENDING bit to clear + * - Write value to EC_LPC_ADDR_ACPI_DATA + */ +#define EC_CMD_ACPI_WRITE 0x81 + +/* + * ACPI Query Embedded Controller + * + * This clears the lowest-order bit in the currently pending host events, and + * sets the result code to the 1-based index of the bit (event 0x00000001 = 1, + * event 0x80000000 = 32), or 0 if no event was pending. + */ +#define EC_CMD_ACPI_QUERY_EVENT 0x84 + +/* Valid addresses in ACPI memory space, for read/write commands */ + +/* Memory space version; set to EC_ACPI_MEM_VERSION_CURRENT */ +#define EC_ACPI_MEM_VERSION 0x00 +/* + * Test location; writing value here updates test compliment byte to (0xff - + * value). + */ +#define EC_ACPI_MEM_TEST 0x01 +/* Test compliment; writes here are ignored. */ +#define EC_ACPI_MEM_TEST_COMPLIMENT 0x02 + +/* Keyboard backlight brightness percent (0 - 100) */ +#define EC_ACPI_MEM_KEYBOARD_BACKLIGHT 0x03 +/* DPTF Target Fan Duty (0-100, 0xff for auto/none) */ +#define EC_ACPI_MEM_FAN_DUTY 0x04 + +/* + * DPTF temp thresholds. Any of the EC's temp sensors can have up to two + * independent thresholds attached to them. The current value of the ID + * register determines which sensor is affected by the THRESHOLD and COMMIT + * registers. The THRESHOLD register uses the same EC_TEMP_SENSOR_OFFSET scheme + * as the memory-mapped sensors. The COMMIT register applies those settings. + * + * The spec does not mandate any way to read back the threshold settings + * themselves, but when a threshold is crossed the AP needs a way to determine + * which sensor(s) are responsible. Each reading of the ID register clears and + * returns one sensor ID that has crossed one of its threshold (in either + * direction) since the last read. A value of 0xFF means "no new thresholds + * have tripped". Setting or enabling the thresholds for a sensor will clear + * the unread event count for that sensor. + */ +#define EC_ACPI_MEM_TEMP_ID 0x05 +#define EC_ACPI_MEM_TEMP_THRESHOLD 0x06 +#define EC_ACPI_MEM_TEMP_COMMIT 0x07 +/* + * Here are the bits for the COMMIT register: + * bit 0 selects the threshold index for the chosen sensor (0/1) + * bit 1 enables/disables the selected threshold (0 = off, 1 = on) + * Each write to the commit register affects one threshold. + */ +#define EC_ACPI_MEM_TEMP_COMMIT_SELECT_MASK (1 << 0) +#define EC_ACPI_MEM_TEMP_COMMIT_ENABLE_MASK (1 << 1) +/* + * Example: + * + * Set the thresholds for sensor 2 to 50 C and 60 C: + * write 2 to [0x05] -- select temp sensor 2 + * write 0x7b to [0x06] -- C_TO_K(50) - EC_TEMP_SENSOR_OFFSET + * write 0x2 to [0x07] -- enable threshold 0 with this value + * write 0x85 to [0x06] -- C_TO_K(60) - EC_TEMP_SENSOR_OFFSET + * write 0x3 to [0x07] -- enable threshold 1 with this value + * + * Disable the 60 C threshold, leaving the 50 C threshold unchanged: + * write 2 to [0x05] -- select temp sensor 2 + * write 0x1 to [0x07] -- disable threshold 1 + */ + +/* DPTF battery charging current limit */ +#define EC_ACPI_MEM_CHARGING_LIMIT 0x08 + +/* Charging limit is specified in 64 mA steps */ +#define EC_ACPI_MEM_CHARGING_LIMIT_STEP_MA 64 +/* Value to disable DPTF battery charging limit */ +#define EC_ACPI_MEM_CHARGING_LIMIT_DISABLED 0xff + +/* Current version of ACPI memory address space */ +#define EC_ACPI_MEM_VERSION_CURRENT 1 + + +/*****************************************************************************/ +/* + * HDMI CEC commands + * + * These commands are for sending and receiving message via HDMI CEC + */ +#define EC_MAX_CEC_MSG_LEN 16 + +/* CEC message from the AP to be written on the CEC bus */ +#define EC_CMD_CEC_WRITE_MSG 0x00B8 + +/** + * struct ec_params_cec_write - Message to write to the CEC bus + * @msg: message content to write to the CEC bus + */ +struct ec_params_cec_write { + uint8_t msg[EC_MAX_CEC_MSG_LEN]; +} __packed; + +/* Set various CEC parameters */ +#define EC_CMD_CEC_SET 0x00BA + +/** + * struct ec_params_cec_set - CEC parameters set + * @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS + * @val: in case cmd is CEC_CMD_ENABLE, this field can be 0 to disable CEC + * or 1 to enable CEC functionality, in case cmd is CEC_CMD_LOGICAL_ADDRESS, + * this field encodes the requested logical address between 0 and 15 + * or 0xff to unregister + */ +struct ec_params_cec_set { + uint8_t cmd; /* enum cec_command */ + uint8_t val; +} __packed; + +/* Read various CEC parameters */ +#define EC_CMD_CEC_GET 0x00BB + +/** + * struct ec_params_cec_get - CEC parameters get + * @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS + */ +struct ec_params_cec_get { + uint8_t cmd; /* enum cec_command */ +} __packed; + +/** + * struct ec_response_cec_get - CEC parameters get response + * @val: in case cmd was CEC_CMD_ENABLE, this field will 0 if CEC is + * disabled or 1 if CEC functionality is enabled, + * in case cmd was CEC_CMD_LOGICAL_ADDRESS, this will encode the + * configured logical address between 0 and 15 or 0xff if unregistered + */ +struct ec_response_cec_get { + uint8_t val; +} __packed; + +/* CEC parameters command */ +enum ec_cec_command { + /* CEC reading, writing and events enable */ + CEC_CMD_ENABLE, + /* CEC logical address */ + CEC_CMD_LOGICAL_ADDRESS, +}; + +/* Events from CEC to AP */ +enum mkbp_cec_event { + /* Outgoing message was acknowledged by a follower */ + EC_MKBP_CEC_SEND_OK = BIT(0), + /* Outgoing message was not acknowledged */ + EC_MKBP_CEC_SEND_FAILED = BIT(1), +}; + +/*****************************************************************************/ +/* + * Special commands + * + * These do not follow the normal rules for commands. See each command for + * details. + */ + +/* + * Reboot NOW + * + * This command will work even when the EC LPC interface is busy, because the + * reboot command is processed at interrupt level. Note that when the EC + * reboots, the host will reboot too, so there is no response to this command. + * + * Use EC_CMD_REBOOT_EC to reboot the EC more politely. + */ +#define EC_CMD_REBOOT 0xd1 /* Think "die" */ + +/* + * Resend last response (not supported on LPC). + * + * Returns EC_RES_UNAVAILABLE if there is no response available - for example, + * there was no previous command, or the previous command's response was too + * big to save. + */ +#define EC_CMD_RESEND_RESPONSE 0xdb + +/* + * This header byte on a command indicate version 0. Any header byte less + * than this means that we are talking to an old EC which doesn't support + * versioning. In that case, we assume version 0. + * + * Header bytes greater than this indicate a later version. For example, + * EC_CMD_VERSION0 + 1 means we are using version 1. + * + * The old EC interface must not use commands 0xdc or higher. + */ +#define EC_CMD_VERSION0 0xdc + +#endif /* !__ACPI__ */ + +/*****************************************************************************/ +/* + * PD commands + * + * These commands are for PD MCU communication. + */ + +/* EC to PD MCU exchange status command */ +#define EC_CMD_PD_EXCHANGE_STATUS 0x100 + +/* Status of EC being sent to PD */ +struct ec_params_pd_status { + int8_t batt_soc; /* battery state of charge */ +} __packed; + +/* Status of PD being sent back to EC */ +struct ec_response_pd_status { + int8_t status; /* PD MCU status */ + uint32_t curr_lim_ma; /* input current limit */ +} __packed; + +/* Set USB type-C port role and muxes */ +#define EC_CMD_USB_PD_CONTROL 0x101 + +enum usb_pd_control_role { + USB_PD_CTRL_ROLE_NO_CHANGE = 0, + USB_PD_CTRL_ROLE_TOGGLE_ON = 1, /* == AUTO */ + USB_PD_CTRL_ROLE_TOGGLE_OFF = 2, + USB_PD_CTRL_ROLE_FORCE_SINK = 3, + USB_PD_CTRL_ROLE_FORCE_SOURCE = 4, +}; + +enum usb_pd_control_mux { + USB_PD_CTRL_MUX_NO_CHANGE = 0, + USB_PD_CTRL_MUX_NONE = 1, + USB_PD_CTRL_MUX_USB = 2, + USB_PD_CTRL_MUX_DP = 3, + USB_PD_CTRL_MUX_DOCK = 4, + USB_PD_CTRL_MUX_AUTO = 5, +}; + +enum usb_pd_control_swap { + USB_PD_CTRL_SWAP_NONE = 0, + USB_PD_CTRL_SWAP_DATA = 1, + USB_PD_CTRL_SWAP_POWER = 2, + USB_PD_CTRL_SWAP_VCONN = 3, + USB_PD_CTRL_SWAP_COUNT +}; + +struct ec_params_usb_pd_control { + uint8_t port; + uint8_t role; + uint8_t mux; + uint8_t swap; +} __packed; + +#define PD_CTRL_RESP_ENABLED_COMMS (1 << 0) /* Communication enabled */ +#define PD_CTRL_RESP_ENABLED_CONNECTED (1 << 1) /* Device connected */ +#define PD_CTRL_RESP_ENABLED_PD_CAPABLE (1 << 2) /* Partner is PD capable */ + +#define PD_CTRL_RESP_ROLE_POWER BIT(0) /* 0=SNK/1=SRC */ +#define PD_CTRL_RESP_ROLE_DATA BIT(1) /* 0=UFP/1=DFP */ +#define PD_CTRL_RESP_ROLE_VCONN BIT(2) /* Vconn status */ +#define PD_CTRL_RESP_ROLE_DR_POWER BIT(3) /* Partner is dualrole power */ +#define PD_CTRL_RESP_ROLE_DR_DATA BIT(4) /* Partner is dualrole data */ +#define PD_CTRL_RESP_ROLE_USB_COMM BIT(5) /* Partner USB comm capable */ +#define PD_CTRL_RESP_ROLE_EXT_POWERED BIT(6) /* Partner externally powerd */ + +struct ec_response_usb_pd_control_v1 { + uint8_t enabled; + uint8_t role; + uint8_t polarity; + char state[32]; +} __packed; + +#define EC_CMD_USB_PD_PORTS 0x102 + +/* Maximum number of PD ports on a device, num_ports will be <= this */ +#define EC_USB_PD_MAX_PORTS 8 + +struct ec_response_usb_pd_ports { + uint8_t num_ports; +} __packed; + +#define EC_CMD_USB_PD_POWER_INFO 0x103 + +#define PD_POWER_CHARGING_PORT 0xff +struct ec_params_usb_pd_power_info { + uint8_t port; +} __packed; + +enum usb_chg_type { + USB_CHG_TYPE_NONE, + USB_CHG_TYPE_PD, + USB_CHG_TYPE_C, + USB_CHG_TYPE_PROPRIETARY, + USB_CHG_TYPE_BC12_DCP, + USB_CHG_TYPE_BC12_CDP, + USB_CHG_TYPE_BC12_SDP, + USB_CHG_TYPE_OTHER, + USB_CHG_TYPE_VBUS, + USB_CHG_TYPE_UNKNOWN, +}; +enum usb_power_roles { + USB_PD_PORT_POWER_DISCONNECTED, + USB_PD_PORT_POWER_SOURCE, + USB_PD_PORT_POWER_SINK, + USB_PD_PORT_POWER_SINK_NOT_CHARGING, +}; + +struct usb_chg_measures { + uint16_t voltage_max; + uint16_t voltage_now; + uint16_t current_max; + uint16_t current_lim; +} __packed; + +struct ec_response_usb_pd_power_info { + uint8_t role; + uint8_t type; + uint8_t dualrole; + uint8_t reserved1; + struct usb_chg_measures meas; + uint32_t max_power; +} __packed; + +struct ec_params_usb_pd_info_request { + uint8_t port; +} __packed; + +/* Read USB-PD Device discovery info */ +#define EC_CMD_USB_PD_DISCOVERY 0x0113 +struct ec_params_usb_pd_discovery_entry { + uint16_t vid; /* USB-IF VID */ + uint16_t pid; /* USB-IF PID */ + uint8_t ptype; /* product type (hub,periph,cable,ama) */ +} __packed; + +/* Override default charge behavior */ +#define EC_CMD_PD_CHARGE_PORT_OVERRIDE 0x0114 + +/* Negative port parameters have special meaning */ +enum usb_pd_override_ports { + OVERRIDE_DONT_CHARGE = -2, + OVERRIDE_OFF = -1, + /* [0, CONFIG_USB_PD_PORT_COUNT): Port# */ +}; + +struct ec_params_charge_port_override { + int16_t override_port; /* Override port# */ +} __packed; + +/* Read (and delete) one entry of PD event log */ +#define EC_CMD_PD_GET_LOG_ENTRY 0x0115 + +struct ec_response_pd_log { + uint32_t timestamp; /* relative timestamp in milliseconds */ + uint8_t type; /* event type : see PD_EVENT_xx below */ + uint8_t size_port; /* [7:5] port number [4:0] payload size in bytes */ + uint16_t data; /* type-defined data payload */ + uint8_t payload[0]; /* optional additional data payload: 0..16 bytes */ +} __packed; + +/* The timestamp is the microsecond counter shifted to get about a ms. */ +#define PD_LOG_TIMESTAMP_SHIFT 10 /* 1 LSB = 1024us */ + +#define PD_LOG_SIZE_MASK 0x1f +#define PD_LOG_PORT_MASK 0xe0 +#define PD_LOG_PORT_SHIFT 5 +#define PD_LOG_PORT_SIZE(port, size) (((port) << PD_LOG_PORT_SHIFT) | \ + ((size) & PD_LOG_SIZE_MASK)) +#define PD_LOG_PORT(size_port) ((size_port) >> PD_LOG_PORT_SHIFT) +#define PD_LOG_SIZE(size_port) ((size_port) & PD_LOG_SIZE_MASK) + +/* PD event log : entry types */ +/* PD MCU events */ +#define PD_EVENT_MCU_BASE 0x00 +#define PD_EVENT_MCU_CHARGE (PD_EVENT_MCU_BASE+0) +#define PD_EVENT_MCU_CONNECT (PD_EVENT_MCU_BASE+1) +/* Reserved for custom board event */ +#define PD_EVENT_MCU_BOARD_CUSTOM (PD_EVENT_MCU_BASE+2) +/* PD generic accessory events */ +#define PD_EVENT_ACC_BASE 0x20 +#define PD_EVENT_ACC_RW_FAIL (PD_EVENT_ACC_BASE+0) +#define PD_EVENT_ACC_RW_ERASE (PD_EVENT_ACC_BASE+1) +/* PD power supply events */ +#define PD_EVENT_PS_BASE 0x40 +#define PD_EVENT_PS_FAULT (PD_EVENT_PS_BASE+0) +/* PD video dongles events */ +#define PD_EVENT_VIDEO_BASE 0x60 +#define PD_EVENT_VIDEO_DP_MODE (PD_EVENT_VIDEO_BASE+0) +#define PD_EVENT_VIDEO_CODEC (PD_EVENT_VIDEO_BASE+1) +/* Returned in the "type" field, when there is no entry available */ +#define PD_EVENT_NO_ENTRY 0xff + +/* + * PD_EVENT_MCU_CHARGE event definition : + * the payload is "struct usb_chg_measures" + * the data field contains the port state flags as defined below : + */ +/* Port partner is a dual role device */ +#define CHARGE_FLAGS_DUAL_ROLE BIT(15) +/* Port is the pending override port */ +#define CHARGE_FLAGS_DELAYED_OVERRIDE BIT(14) +/* Port is the override port */ +#define CHARGE_FLAGS_OVERRIDE BIT(13) +/* Charger type */ +#define CHARGE_FLAGS_TYPE_SHIFT 3 +#define CHARGE_FLAGS_TYPE_MASK (0xf << CHARGE_FLAGS_TYPE_SHIFT) +/* Power delivery role */ +#define CHARGE_FLAGS_ROLE_MASK (7 << 0) + +/* + * PD_EVENT_PS_FAULT data field flags definition : + */ +#define PS_FAULT_OCP 1 +#define PS_FAULT_FAST_OCP 2 +#define PS_FAULT_OVP 3 +#define PS_FAULT_DISCH 4 + +/* + * PD_EVENT_VIDEO_CODEC payload is "struct mcdp_info". + */ +struct mcdp_version { + uint8_t major; + uint8_t minor; + uint16_t build; +} __packed; + +struct mcdp_info { + uint8_t family[2]; + uint8_t chipid[2]; + struct mcdp_version irom; + struct mcdp_version fw; +} __packed; + +/* struct mcdp_info field decoding */ +#define MCDP_CHIPID(chipid) ((chipid[0] << 8) | chipid[1]) +#define MCDP_FAMILY(family) ((family[0] << 8) | family[1]) + +/* Get info about USB-C SS muxes */ +#define EC_CMD_USB_PD_MUX_INFO 0x11a + +struct ec_params_usb_pd_mux_info { + uint8_t port; /* USB-C port number */ +} __packed; + +/* Flags representing mux state */ +#define USB_PD_MUX_USB_ENABLED (1 << 0) +#define USB_PD_MUX_DP_ENABLED (1 << 1) +#define USB_PD_MUX_POLARITY_INVERTED (1 << 2) +#define USB_PD_MUX_HPD_IRQ (1 << 3) + +struct ec_response_usb_pd_mux_info { + uint8_t flags; /* USB_PD_MUX_*-encoded USB mux state */ +} __packed; + +/*****************************************************************************/ +/* + * Passthru commands + * + * Some platforms have sub-processors chained to each other. For example. + * + * AP <--> EC <--> PD MCU + * + * The top 2 bits of the command number are used to indicate which device the + * command is intended for. Device 0 is always the device receiving the + * command; other device mapping is board-specific. + * + * When a device receives a command to be passed to a sub-processor, it passes + * it on with the device number set back to 0. This allows the sub-processor + * to remain blissfully unaware of whether the command originated on the next + * device up the chain, or was passed through from the AP. + * + * In the above example, if the AP wants to send command 0x0002 to the PD MCU, + * AP sends command 0x4002 to the EC + * EC sends command 0x0002 to the PD MCU + * EC forwards PD MCU response back to the AP + */ + +/* Offset and max command number for sub-device n */ +#define EC_CMD_PASSTHRU_OFFSET(n) (0x4000 * (n)) +#define EC_CMD_PASSTHRU_MAX(n) (EC_CMD_PASSTHRU_OFFSET(n) + 0x3fff) + +/*****************************************************************************/ +/* + * Deprecated constants. These constants have been renamed for clarity. The + * meaning and size has not changed. Programs that use the old names should + * switch to the new names soon, as the old names may not be carried forward + * forever. + */ +#define EC_HOST_PARAM_SIZE EC_PROTO2_MAX_PARAM_SIZE +#define EC_LPC_ADDR_OLD_PARAM EC_HOST_CMD_REGION1 +#define EC_OLD_PARAM_SIZE EC_HOST_CMD_REGION_SIZE + +#endif /* __CROS_EC_COMMANDS_H */ diff --git a/include/linux/mfd/cros_ec_lpc_mec.h b/include/linux/mfd/cros_ec_lpc_mec.h new file mode 100644 index 000000000..176496ddc --- /dev/null +++ b/include/linux/mfd/cros_ec_lpc_mec.h @@ -0,0 +1,90 @@ +/* + * cros_ec_lpc_mec - LPC variant I/O for Microchip EC + * + * Copyright (C) 2016 Google, Inc + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * This driver uses the Chrome OS EC byte-level message-based protocol for + * communicating the keyboard state (which keys are pressed) from a keyboard EC + * to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing, + * but everything else (including deghosting) is done here. The main + * motivation for this is to keep the EC firmware as simple as possible, since + * it cannot be easily upgraded and EC flash/IRAM space is relatively + * expensive. + */ + +#ifndef __LINUX_MFD_CROS_EC_MEC_H +#define __LINUX_MFD_CROS_EC_MEC_H + +#include + +enum cros_ec_lpc_mec_emi_access_mode { + /* 8-bit access */ + ACCESS_TYPE_BYTE = 0x0, + /* 16-bit access */ + ACCESS_TYPE_WORD = 0x1, + /* 32-bit access */ + ACCESS_TYPE_LONG = 0x2, + /* + * 32-bit access, read or write of MEC_EMI_EC_DATA_B3 causes the + * EC data register to be incremented. + */ + ACCESS_TYPE_LONG_AUTO_INCREMENT = 0x3, +}; + +enum cros_ec_lpc_mec_io_type { + MEC_IO_READ, + MEC_IO_WRITE, +}; + +/* Access IO ranges 0x800 thru 0x9ff using EMI interface instead of LPC */ +#define MEC_EMI_RANGE_START EC_HOST_CMD_REGION0 +#define MEC_EMI_RANGE_END (EC_LPC_ADDR_MEMMAP + EC_MEMMAP_SIZE) + +/* EMI registers are relative to base */ +#define MEC_EMI_BASE 0x800 +#define MEC_EMI_HOST_TO_EC (MEC_EMI_BASE + 0) +#define MEC_EMI_EC_TO_HOST (MEC_EMI_BASE + 1) +#define MEC_EMI_EC_ADDRESS_B0 (MEC_EMI_BASE + 2) +#define MEC_EMI_EC_ADDRESS_B1 (MEC_EMI_BASE + 3) +#define MEC_EMI_EC_DATA_B0 (MEC_EMI_BASE + 4) +#define MEC_EMI_EC_DATA_B1 (MEC_EMI_BASE + 5) +#define MEC_EMI_EC_DATA_B2 (MEC_EMI_BASE + 6) +#define MEC_EMI_EC_DATA_B3 (MEC_EMI_BASE + 7) + +/* + * cros_ec_lpc_mec_init + * + * Initialize MEC I/O. + */ +void cros_ec_lpc_mec_init(void); + +/* + * cros_ec_lpc_mec_destroy + * + * Cleanup MEC I/O. + */ +void cros_ec_lpc_mec_destroy(void); + +/** + * cros_ec_lpc_io_bytes_mec - Read / write bytes to MEC EMI port + * + * @io_type: MEC_IO_READ or MEC_IO_WRITE, depending on request + * @offset: Base read / write address + * @length: Number of bytes to read / write + * @buf: Destination / source buffer + * + * @return 8-bit checksum of all bytes read / written + */ +u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type, + unsigned int offset, unsigned int length, u8 *buf); + +#endif /* __LINUX_MFD_CROS_EC_MEC_H */ diff --git a/include/linux/mfd/cros_ec_lpc_reg.h b/include/linux/mfd/cros_ec_lpc_reg.h new file mode 100644 index 000000000..5560bef63 --- /dev/null +++ b/include/linux/mfd/cros_ec_lpc_reg.h @@ -0,0 +1,61 @@ +/* + * cros_ec_lpc_reg - LPC access to the Chrome OS Embedded Controller + * + * Copyright (C) 2016 Google, Inc + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * This driver uses the Chrome OS EC byte-level message-based protocol for + * communicating the keyboard state (which keys are pressed) from a keyboard EC + * to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing, + * but everything else (including deghosting) is done here. The main + * motivation for this is to keep the EC firmware as simple as possible, since + * it cannot be easily upgraded and EC flash/IRAM space is relatively + * expensive. + */ + +#ifndef __LINUX_MFD_CROS_EC_REG_H +#define __LINUX_MFD_CROS_EC_REG_H + +/** + * cros_ec_lpc_read_bytes - Read bytes from a given LPC-mapped address. + * Returns 8-bit checksum of all bytes read. + * + * @offset: Base read address + * @length: Number of bytes to read + * @dest: Destination buffer + */ +u8 cros_ec_lpc_read_bytes(unsigned int offset, unsigned int length, u8 *dest); + +/** + * cros_ec_lpc_write_bytes - Write bytes to a given LPC-mapped address. + * Returns 8-bit checksum of all bytes written. + * + * @offset: Base write address + * @length: Number of bytes to write + * @msg: Write data buffer + */ +u8 cros_ec_lpc_write_bytes(unsigned int offset, unsigned int length, u8 *msg); + +/** + * cros_ec_lpc_reg_init + * + * Initialize register I/O. + */ +void cros_ec_lpc_reg_init(void); + +/** + * cros_ec_lpc_reg_destroy + * + * Cleanup reg I/O. + */ +void cros_ec_lpc_reg_destroy(void); + +#endif /* __LINUX_MFD_CROS_EC_REG_H */ diff --git a/include/linux/mfd/da8xx-cfgchip.h b/include/linux/mfd/da8xx-cfgchip.h new file mode 100644 index 000000000..304985e28 --- /dev/null +++ b/include/linux/mfd/da8xx-cfgchip.h @@ -0,0 +1,153 @@ +/* + * TI DaVinci DA8xx CHIPCFGx registers for syscon consumers. + * + * Copyright (C) 2016 David Lechner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_MFD_DA8XX_CFGCHIP_H +#define __LINUX_MFD_DA8XX_CFGCHIP_H + +#include + +/* register offset (32-bit registers) */ +#define CFGCHIP(n) ((n) * 4) + +/* CFGCHIP0 (PLL0/EDMA3_0) register bits */ +#define CFGCHIP0_PLL_MASTER_LOCK BIT(4) +#define CFGCHIP0_EDMA30TC1DBS(n) ((n) << 2) +#define CFGCHIP0_EDMA30TC1DBS_MASK CFGCHIP0_EDMA30TC1DBS(0x3) +#define CFGCHIP0_EDMA30TC1DBS_16 CFGCHIP0_EDMA30TC1DBS(0x0) +#define CFGCHIP0_EDMA30TC1DBS_32 CFGCHIP0_EDMA30TC1DBS(0x1) +#define CFGCHIP0_EDMA30TC1DBS_64 CFGCHIP0_EDMA30TC1DBS(0x2) +#define CFGCHIP0_EDMA30TC0DBS(n) ((n) << 0) +#define CFGCHIP0_EDMA30TC0DBS_MASK CFGCHIP0_EDMA30TC0DBS(0x3) +#define CFGCHIP0_EDMA30TC0DBS_16 CFGCHIP0_EDMA30TC0DBS(0x0) +#define CFGCHIP0_EDMA30TC0DBS_32 CFGCHIP0_EDMA30TC0DBS(0x1) +#define CFGCHIP0_EDMA30TC0DBS_64 CFGCHIP0_EDMA30TC0DBS(0x2) + +/* CFGCHIP1 (eCAP/HPI/EDMA3_1/eHRPWM TBCLK/McASP0 AMUTEIN) register bits */ +#define CFGCHIP1_CAP2SRC(n) ((n) << 27) +#define CFGCHIP1_CAP2SRC_MASK CFGCHIP1_CAP2SRC(0x1f) +#define CFGCHIP1_CAP2SRC_ECAP_PIN CFGCHIP1_CAP2SRC(0x0) +#define CFGCHIP1_CAP2SRC_MCASP0_TX CFGCHIP1_CAP2SRC(0x1) +#define CFGCHIP1_CAP2SRC_MCASP0_RX CFGCHIP1_CAP2SRC(0x2) +#define CFGCHIP1_CAP2SRC_EMAC_C0_RX_THRESHOLD CFGCHIP1_CAP2SRC(0x7) +#define CFGCHIP1_CAP2SRC_EMAC_C0_RX CFGCHIP1_CAP2SRC(0x8) +#define CFGCHIP1_CAP2SRC_EMAC_C0_TX CFGCHIP1_CAP2SRC(0x9) +#define CFGCHIP1_CAP2SRC_EMAC_C0_MISC CFGCHIP1_CAP2SRC(0xa) +#define CFGCHIP1_CAP2SRC_EMAC_C1_RX_THRESHOLD CFGCHIP1_CAP2SRC(0xb) +#define CFGCHIP1_CAP2SRC_EMAC_C1_RX CFGCHIP1_CAP2SRC(0xc) +#define CFGCHIP1_CAP2SRC_EMAC_C1_TX CFGCHIP1_CAP2SRC(0xd) +#define CFGCHIP1_CAP2SRC_EMAC_C1_MISC CFGCHIP1_CAP2SRC(0xe) +#define CFGCHIP1_CAP2SRC_EMAC_C2_RX_THRESHOLD CFGCHIP1_CAP2SRC(0xf) +#define CFGCHIP1_CAP2SRC_EMAC_C2_RX CFGCHIP1_CAP2SRC(0x10) +#define CFGCHIP1_CAP2SRC_EMAC_C2_TX CFGCHIP1_CAP2SRC(0x11) +#define CFGCHIP1_CAP2SRC_EMAC_C2_MISC CFGCHIP1_CAP2SRC(0x12) +#define CFGCHIP1_CAP1SRC(n) ((n) << 22) +#define CFGCHIP1_CAP1SRC_MASK CFGCHIP1_CAP1SRC(0x1f) +#define CFGCHIP1_CAP1SRC_ECAP_PIN CFGCHIP1_CAP1SRC(0x0) +#define CFGCHIP1_CAP1SRC_MCASP0_TX CFGCHIP1_CAP1SRC(0x1) +#define CFGCHIP1_CAP1SRC_MCASP0_RX CFGCHIP1_CAP1SRC(0x2) +#define CFGCHIP1_CAP1SRC_EMAC_C0_RX_THRESHOLD CFGCHIP1_CAP1SRC(0x7) +#define CFGCHIP1_CAP1SRC_EMAC_C0_RX CFGCHIP1_CAP1SRC(0x8) +#define CFGCHIP1_CAP1SRC_EMAC_C0_TX CFGCHIP1_CAP1SRC(0x9) +#define CFGCHIP1_CAP1SRC_EMAC_C0_MISC CFGCHIP1_CAP1SRC(0xa) +#define CFGCHIP1_CAP1SRC_EMAC_C1_RX_THRESHOLD CFGCHIP1_CAP1SRC(0xb) +#define CFGCHIP1_CAP1SRC_EMAC_C1_RX CFGCHIP1_CAP1SRC(0xc) +#define CFGCHIP1_CAP1SRC_EMAC_C1_TX CFGCHIP1_CAP1SRC(0xd) +#define CFGCHIP1_CAP1SRC_EMAC_C1_MISC CFGCHIP1_CAP1SRC(0xe) +#define CFGCHIP1_CAP1SRC_EMAC_C2_RX_THRESHOLD CFGCHIP1_CAP1SRC(0xf) +#define CFGCHIP1_CAP1SRC_EMAC_C2_RX CFGCHIP1_CAP1SRC(0x10) +#define CFGCHIP1_CAP1SRC_EMAC_C2_TX CFGCHIP1_CAP1SRC(0x11) +#define CFGCHIP1_CAP1SRC_EMAC_C2_MISC CFGCHIP1_CAP1SRC(0x12) +#define CFGCHIP1_CAP0SRC(n) ((n) << 17) +#define CFGCHIP1_CAP0SRC_MASK CFGCHIP1_CAP0SRC(0x1f) +#define CFGCHIP1_CAP0SRC_ECAP_PIN CFGCHIP1_CAP0SRC(0x0) +#define CFGCHIP1_CAP0SRC_MCASP0_TX CFGCHIP1_CAP0SRC(0x1) +#define CFGCHIP1_CAP0SRC_MCASP0_RX CFGCHIP1_CAP0SRC(0x2) +#define CFGCHIP1_CAP0SRC_EMAC_C0_RX_THRESHOLD CFGCHIP1_CAP0SRC(0x7) +#define CFGCHIP1_CAP0SRC_EMAC_C0_RX CFGCHIP1_CAP0SRC(0x8) +#define CFGCHIP1_CAP0SRC_EMAC_C0_TX CFGCHIP1_CAP0SRC(0x9) +#define CFGCHIP1_CAP0SRC_EMAC_C0_MISC CFGCHIP1_CAP0SRC(0xa) +#define CFGCHIP1_CAP0SRC_EMAC_C1_RX_THRESHOLD CFGCHIP1_CAP0SRC(0xb) +#define CFGCHIP1_CAP0SRC_EMAC_C1_RX CFGCHIP1_CAP0SRC(0xc) +#define CFGCHIP1_CAP0SRC_EMAC_C1_TX CFGCHIP1_CAP0SRC(0xd) +#define CFGCHIP1_CAP0SRC_EMAC_C1_MISC CFGCHIP1_CAP0SRC(0xe) +#define CFGCHIP1_CAP0SRC_EMAC_C2_RX_THRESHOLD CFGCHIP1_CAP0SRC(0xf) +#define CFGCHIP1_CAP0SRC_EMAC_C2_RX CFGCHIP1_CAP0SRC(0x10) +#define CFGCHIP1_CAP0SRC_EMAC_C2_TX CFGCHIP1_CAP0SRC(0x11) +#define CFGCHIP1_CAP0SRC_EMAC_C2_MISC CFGCHIP1_CAP0SRC(0x12) +#define CFGCHIP1_HPIBYTEAD BIT(16) +#define CFGCHIP1_HPIENA BIT(15) +#define CFGCHIP0_EDMA31TC0DBS(n) ((n) << 13) +#define CFGCHIP0_EDMA31TC0DBS_MASK CFGCHIP0_EDMA31TC0DBS(0x3) +#define CFGCHIP0_EDMA31TC0DBS_16 CFGCHIP0_EDMA31TC0DBS(0x0) +#define CFGCHIP0_EDMA31TC0DBS_32 CFGCHIP0_EDMA31TC0DBS(0x1) +#define CFGCHIP0_EDMA31TC0DBS_64 CFGCHIP0_EDMA31TC0DBS(0x2) +#define CFGCHIP1_TBCLKSYNC BIT(12) +#define CFGCHIP1_AMUTESEL0(n) ((n) << 0) +#define CFGCHIP1_AMUTESEL0_MASK CFGCHIP1_AMUTESEL0(0xf) +#define CFGCHIP1_AMUTESEL0_LOW CFGCHIP1_AMUTESEL0(0x0) +#define CFGCHIP1_AMUTESEL0_BANK_0 CFGCHIP1_AMUTESEL0(0x1) +#define CFGCHIP1_AMUTESEL0_BANK_1 CFGCHIP1_AMUTESEL0(0x2) +#define CFGCHIP1_AMUTESEL0_BANK_2 CFGCHIP1_AMUTESEL0(0x3) +#define CFGCHIP1_AMUTESEL0_BANK_3 CFGCHIP1_AMUTESEL0(0x4) +#define CFGCHIP1_AMUTESEL0_BANK_4 CFGCHIP1_AMUTESEL0(0x5) +#define CFGCHIP1_AMUTESEL0_BANK_5 CFGCHIP1_AMUTESEL0(0x6) +#define CFGCHIP1_AMUTESEL0_BANK_6 CFGCHIP1_AMUTESEL0(0x7) +#define CFGCHIP1_AMUTESEL0_BANK_7 CFGCHIP1_AMUTESEL0(0x8) + +/* CFGCHIP2 (USB PHY) register bits */ +#define CFGCHIP2_PHYCLKGD BIT(17) +#define CFGCHIP2_VBUSSENSE BIT(16) +#define CFGCHIP2_RESET BIT(15) +#define CFGCHIP2_OTGMODE(n) ((n) << 13) +#define CFGCHIP2_OTGMODE_MASK CFGCHIP2_OTGMODE(0x3) +#define CFGCHIP2_OTGMODE_NO_OVERRIDE CFGCHIP2_OTGMODE(0x0) +#define CFGCHIP2_OTGMODE_FORCE_HOST CFGCHIP2_OTGMODE(0x1) +#define CFGCHIP2_OTGMODE_FORCE_DEVICE CFGCHIP2_OTGMODE(0x2) +#define CFGCHIP2_OTGMODE_FORCE_HOST_VBUS_LOW CFGCHIP2_OTGMODE(0x3) +#define CFGCHIP2_USB1PHYCLKMUX BIT(12) +#define CFGCHIP2_USB2PHYCLKMUX BIT(11) +#define CFGCHIP2_PHYPWRDN BIT(10) +#define CFGCHIP2_OTGPWRDN BIT(9) +#define CFGCHIP2_DATPOL BIT(8) +#define CFGCHIP2_USB1SUSPENDM BIT(7) +#define CFGCHIP2_PHY_PLLON BIT(6) +#define CFGCHIP2_SESENDEN BIT(5) +#define CFGCHIP2_VBDTCTEN BIT(4) +#define CFGCHIP2_REFFREQ(n) ((n) << 0) +#define CFGCHIP2_REFFREQ_MASK CFGCHIP2_REFFREQ(0xf) +#define CFGCHIP2_REFFREQ_12MHZ CFGCHIP2_REFFREQ(0x1) +#define CFGCHIP2_REFFREQ_24MHZ CFGCHIP2_REFFREQ(0x2) +#define CFGCHIP2_REFFREQ_48MHZ CFGCHIP2_REFFREQ(0x3) +#define CFGCHIP2_REFFREQ_19_2MHZ CFGCHIP2_REFFREQ(0x4) +#define CFGCHIP2_REFFREQ_38_4MHZ CFGCHIP2_REFFREQ(0x5) +#define CFGCHIP2_REFFREQ_13MHZ CFGCHIP2_REFFREQ(0x6) +#define CFGCHIP2_REFFREQ_26MHZ CFGCHIP2_REFFREQ(0x7) +#define CFGCHIP2_REFFREQ_20MHZ CFGCHIP2_REFFREQ(0x8) +#define CFGCHIP2_REFFREQ_40MHZ CFGCHIP2_REFFREQ(0x9) + +/* CFGCHIP3 (EMAC/uPP/PLL1/ASYNC3/PRU/DIV4.5/EMIFA) register bits */ +#define CFGCHIP3_RMII_SEL BIT(8) +#define CFGCHIP3_UPP_TX_CLKSRC BIT(6) +#define CFGCHIP3_PLL1_MASTER_LOCK BIT(5) +#define CFGCHIP3_ASYNC3_CLKSRC BIT(4) +#define CFGCHIP3_PRUEVTSEL BIT(3) +#define CFGCHIP3_DIV45PENA BIT(2) +#define CFGCHIP3_EMA_CLKSRC BIT(1) + +/* CFGCHIP4 (McASP0 AMUNTEIN) register bits */ +#define CFGCHIP4_AMUTECLR0 BIT(0) + +#endif /* __LINUX_MFD_DA8XX_CFGCHIP_H */ diff --git a/include/linux/mfd/da903x.h b/include/linux/mfd/da903x.h new file mode 100644 index 000000000..d1c57b8db --- /dev/null +++ b/include/linux/mfd/da903x.h @@ -0,0 +1,248 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_PMIC_DA903X_H +#define __LINUX_PMIC_DA903X_H + +/* Unified sub device IDs for DA9030/DA9034/DA9035 */ +enum { + DA9030_ID_LED_1, + DA9030_ID_LED_2, + DA9030_ID_LED_3, + DA9030_ID_LED_4, + DA9030_ID_LED_PC, + DA9030_ID_VIBRA, + DA9030_ID_WLED, + DA9030_ID_BUCK1, + DA9030_ID_BUCK2, + DA9030_ID_LDO1, + DA9030_ID_LDO2, + DA9030_ID_LDO3, + DA9030_ID_LDO4, + DA9030_ID_LDO5, + DA9030_ID_LDO6, + DA9030_ID_LDO7, + DA9030_ID_LDO8, + DA9030_ID_LDO9, + DA9030_ID_LDO10, + DA9030_ID_LDO11, + DA9030_ID_LDO12, + DA9030_ID_LDO13, + DA9030_ID_LDO14, + DA9030_ID_LDO15, + DA9030_ID_LDO16, + DA9030_ID_LDO17, + DA9030_ID_LDO18, + DA9030_ID_LDO19, + DA9030_ID_LDO_INT, /* LDO Internal */ + DA9030_ID_BAT, /* battery charger */ + + DA9034_ID_LED_1, + DA9034_ID_LED_2, + DA9034_ID_VIBRA, + DA9034_ID_WLED, + DA9034_ID_TOUCH, + + DA9034_ID_BUCK1, + DA9034_ID_BUCK2, + DA9034_ID_LDO1, + DA9034_ID_LDO2, + DA9034_ID_LDO3, + DA9034_ID_LDO4, + DA9034_ID_LDO5, + DA9034_ID_LDO6, + DA9034_ID_LDO7, + DA9034_ID_LDO8, + DA9034_ID_LDO9, + DA9034_ID_LDO10, + DA9034_ID_LDO11, + DA9034_ID_LDO12, + DA9034_ID_LDO13, + DA9034_ID_LDO14, + DA9034_ID_LDO15, + + DA9035_ID_BUCK3, +}; + +/* + * DA9030/DA9034 LEDs sub-devices uses generic "struct led_info" + * as the platform_data + */ + +/* DA9030 flags for "struct led_info" + */ +#define DA9030_LED_RATE_ON (0 << 5) +#define DA9030_LED_RATE_052S (1 << 5) +#define DA9030_LED_DUTY_1_16 (0 << 3) +#define DA9030_LED_DUTY_1_8 (1 << 3) +#define DA9030_LED_DUTY_1_4 (2 << 3) +#define DA9030_LED_DUTY_1_2 (3 << 3) + +#define DA9030_VIBRA_MODE_1P3V (0 << 1) +#define DA9030_VIBRA_MODE_2P7V (1 << 1) +#define DA9030_VIBRA_FREQ_1HZ (0 << 2) +#define DA9030_VIBRA_FREQ_2HZ (1 << 2) +#define DA9030_VIBRA_FREQ_4HZ (2 << 2) +#define DA9030_VIBRA_FREQ_8HZ (3 << 2) +#define DA9030_VIBRA_DUTY_ON (0 << 4) +#define DA9030_VIBRA_DUTY_75P (1 << 4) +#define DA9030_VIBRA_DUTY_50P (2 << 4) +#define DA9030_VIBRA_DUTY_25P (3 << 4) + +/* DA9034 flags for "struct led_info" */ +#define DA9034_LED_RAMP (1 << 7) + +/* DA9034 touch screen platform data */ +struct da9034_touch_pdata { + int interval_ms; /* sampling interval while pen down */ + int x_inverted; + int y_inverted; +}; + +struct da9034_backlight_pdata { + int output_current; /* output current of WLED, from 0-31 (in mA) */ +}; + +/* DA9030 battery charger data */ +struct power_supply_info; + +struct da9030_battery_info { + /* battery parameters */ + struct power_supply_info *battery_info; + + /* current and voltage to use for battery charging */ + unsigned int charge_milliamp; + unsigned int charge_millivolt; + + /* voltage thresholds (in millivolts) */ + int vbat_low; + int vbat_crit; + int vbat_charge_start; + int vbat_charge_stop; + int vbat_charge_restart; + + /* battery nominal minimal and maximal voltages in millivolts */ + int vcharge_min; + int vcharge_max; + + /* Temperature thresholds. These are DA9030 register values + "as is" and should be measured for each battery type */ + int tbat_low; + int tbat_high; + int tbat_restart; + + + /* battery monitor interval (seconds) */ + unsigned int batmon_interval; + + /* platform callbacks for battery low and critical events */ + void (*battery_low)(void); + void (*battery_critical)(void); +}; + +struct da903x_subdev_info { + int id; + const char *name; + void *platform_data; +}; + +struct da903x_platform_data { + int num_subdevs; + struct da903x_subdev_info *subdevs; +}; + +/* bit definitions for DA9030 events */ +#define DA9030_EVENT_ONKEY (1 << 0) +#define DA9030_EVENT_PWREN (1 << 1) +#define DA9030_EVENT_EXTON (1 << 2) +#define DA9030_EVENT_CHDET (1 << 3) +#define DA9030_EVENT_TBAT (1 << 4) +#define DA9030_EVENT_VBATMON (1 << 5) +#define DA9030_EVENT_VBATMON_TXON (1 << 6) +#define DA9030_EVENT_CHIOVER (1 << 7) +#define DA9030_EVENT_TCTO (1 << 8) +#define DA9030_EVENT_CCTO (1 << 9) +#define DA9030_EVENT_ADC_READY (1 << 10) +#define DA9030_EVENT_VBUS_4P4 (1 << 11) +#define DA9030_EVENT_VBUS_4P0 (1 << 12) +#define DA9030_EVENT_SESS_VALID (1 << 13) +#define DA9030_EVENT_SRP_DETECT (1 << 14) +#define DA9030_EVENT_WATCHDOG (1 << 15) +#define DA9030_EVENT_LDO15 (1 << 16) +#define DA9030_EVENT_LDO16 (1 << 17) +#define DA9030_EVENT_LDO17 (1 << 18) +#define DA9030_EVENT_LDO18 (1 << 19) +#define DA9030_EVENT_LDO19 (1 << 20) +#define DA9030_EVENT_BUCK2 (1 << 21) + +/* bit definitions for DA9034 events */ +#define DA9034_EVENT_ONKEY (1 << 0) +#define DA9034_EVENT_EXTON (1 << 2) +#define DA9034_EVENT_CHDET (1 << 3) +#define DA9034_EVENT_TBAT (1 << 4) +#define DA9034_EVENT_VBATMON (1 << 5) +#define DA9034_EVENT_REV_IOVER (1 << 6) +#define DA9034_EVENT_CH_IOVER (1 << 7) +#define DA9034_EVENT_CH_TCTO (1 << 8) +#define DA9034_EVENT_CH_CCTO (1 << 9) +#define DA9034_EVENT_USB_DEV (1 << 10) +#define DA9034_EVENT_OTGCP_IOVER (1 << 11) +#define DA9034_EVENT_VBUS_4P55 (1 << 12) +#define DA9034_EVENT_VBUS_3P8 (1 << 13) +#define DA9034_EVENT_SESS_1P8 (1 << 14) +#define DA9034_EVENT_SRP_READY (1 << 15) +#define DA9034_EVENT_ADC_MAN (1 << 16) +#define DA9034_EVENT_ADC_AUTO4 (1 << 17) +#define DA9034_EVENT_ADC_AUTO5 (1 << 18) +#define DA9034_EVENT_ADC_AUTO6 (1 << 19) +#define DA9034_EVENT_PEN_DOWN (1 << 20) +#define DA9034_EVENT_TSI_READY (1 << 21) +#define DA9034_EVENT_UART_TX (1 << 22) +#define DA9034_EVENT_UART_RX (1 << 23) +#define DA9034_EVENT_HEADSET (1 << 25) +#define DA9034_EVENT_HOOKSWITCH (1 << 26) +#define DA9034_EVENT_WATCHDOG (1 << 27) + +extern int da903x_register_notifier(struct device *dev, + struct notifier_block *nb, unsigned int events); +extern int da903x_unregister_notifier(struct device *dev, + struct notifier_block *nb, unsigned int events); + +/* Status Query Interface */ +#define DA9030_STATUS_ONKEY (1 << 0) +#define DA9030_STATUS_PWREN1 (1 << 1) +#define DA9030_STATUS_EXTON (1 << 2) +#define DA9030_STATUS_CHDET (1 << 3) +#define DA9030_STATUS_TBAT (1 << 4) +#define DA9030_STATUS_VBATMON (1 << 5) +#define DA9030_STATUS_VBATMON_TXON (1 << 6) +#define DA9030_STATUS_MCLKDET (1 << 7) + +#define DA9034_STATUS_ONKEY (1 << 0) +#define DA9034_STATUS_EXTON (1 << 2) +#define DA9034_STATUS_CHDET (1 << 3) +#define DA9034_STATUS_TBAT (1 << 4) +#define DA9034_STATUS_VBATMON (1 << 5) +#define DA9034_STATUS_PEN_DOWN (1 << 6) +#define DA9034_STATUS_MCLKDET (1 << 7) +#define DA9034_STATUS_USB_DEV (1 << 8) +#define DA9034_STATUS_HEADSET (1 << 9) +#define DA9034_STATUS_HOOKSWITCH (1 << 10) +#define DA9034_STATUS_REMCON (1 << 11) +#define DA9034_STATUS_VBUS_VALID_4P55 (1 << 12) +#define DA9034_STATUS_VBUS_VALID_3P8 (1 << 13) +#define DA9034_STATUS_SESS_VALID_1P8 (1 << 14) +#define DA9034_STATUS_SRP_READY (1 << 15) + +extern int da903x_query_status(struct device *dev, unsigned int status); + + +/* NOTE: the functions below are not intended for use outside + * of the DA903x sub-device drivers + */ +extern int da903x_write(struct device *dev, int reg, uint8_t val); +extern int da903x_writes(struct device *dev, int reg, int len, uint8_t *val); +extern int da903x_read(struct device *dev, int reg, uint8_t *val); +extern int da903x_reads(struct device *dev, int reg, int len, uint8_t *val); +extern int da903x_update(struct device *dev, int reg, uint8_t val, uint8_t mask); +extern int da903x_set_bits(struct device *dev, int reg, uint8_t bit_mask); +extern int da903x_clr_bits(struct device *dev, int reg, uint8_t bit_mask); +#endif /* __LINUX_PMIC_DA903X_H */ diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h new file mode 100644 index 000000000..ae5b66383 --- /dev/null +++ b/include/linux/mfd/da9052/da9052.h @@ -0,0 +1,232 @@ +/* + * da9052 declarations for DA9052 PMICs. + * + * Copyright(c) 2011 Dialog Semiconductor Ltd. + * + * Author: David Dajun Chen + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef __MFD_DA9052_DA9052_H +#define __MFD_DA9052_DA9052_H + +#include +#include +#include +#include +#include +#include + +#include + +/* Common - HWMON Channel Definations */ +#define DA9052_ADC_VDDOUT 0 +#define DA9052_ADC_ICH 1 +#define DA9052_ADC_TBAT 2 +#define DA9052_ADC_VBAT 3 +#define DA9052_ADC_IN4 4 +#define DA9052_ADC_IN5 5 +#define DA9052_ADC_IN6 6 +#define DA9052_ADC_TSI 7 +#define DA9052_ADC_TJUNC 8 +#define DA9052_ADC_VBBAT 9 + +/* TSI channel has its own 4 channel mux */ +#define DA9052_ADC_TSI_XP 70 +#define DA9052_ADC_TSI_XN 71 +#define DA9052_ADC_TSI_YP 72 +#define DA9052_ADC_TSI_YN 73 + +#define DA9052_IRQ_DCIN 0 +#define DA9052_IRQ_VBUS 1 +#define DA9052_IRQ_DCINREM 2 +#define DA9052_IRQ_VBUSREM 3 +#define DA9052_IRQ_VDDLOW 4 +#define DA9052_IRQ_ALARM 5 +#define DA9052_IRQ_SEQRDY 6 +#define DA9052_IRQ_COMP1V2 7 +#define DA9052_IRQ_NONKEY 8 +#define DA9052_IRQ_IDFLOAT 9 +#define DA9052_IRQ_IDGND 10 +#define DA9052_IRQ_CHGEND 11 +#define DA9052_IRQ_TBAT 12 +#define DA9052_IRQ_ADC_EOM 13 +#define DA9052_IRQ_PENDOWN 14 +#define DA9052_IRQ_TSIREADY 15 +#define DA9052_IRQ_GPI0 16 +#define DA9052_IRQ_GPI1 17 +#define DA9052_IRQ_GPI2 18 +#define DA9052_IRQ_GPI3 19 +#define DA9052_IRQ_GPI4 20 +#define DA9052_IRQ_GPI5 21 +#define DA9052_IRQ_GPI6 22 +#define DA9052_IRQ_GPI7 23 +#define DA9052_IRQ_GPI8 24 +#define DA9052_IRQ_GPI9 25 +#define DA9052_IRQ_GPI10 26 +#define DA9052_IRQ_GPI11 27 +#define DA9052_IRQ_GPI12 28 +#define DA9052_IRQ_GPI13 29 +#define DA9052_IRQ_GPI14 30 +#define DA9052_IRQ_GPI15 31 + +enum da9052_chip_id { + DA9052, + DA9053_AA, + DA9053_BA, + DA9053_BB, + DA9053_BC, +}; + +struct da9052_pdata; + +struct da9052 { + struct device *dev; + struct regmap *regmap; + + struct mutex auxadc_lock; + struct completion done; + + int irq_base; + struct regmap_irq_chip_data *irq_data; + u8 chip_id; + + int chip_irq; + + /* SOC I/O transfer related fixes for DA9052/53 */ + int (*fix_io) (struct da9052 *da9052, unsigned char reg); +}; + +/* ADC API */ +int da9052_adc_manual_read(struct da9052 *da9052, unsigned char channel); +int da9052_adc_read_temp(struct da9052 *da9052); + +/* Device I/O API */ +static inline int da9052_reg_read(struct da9052 *da9052, unsigned char reg) +{ + int val, ret; + + ret = regmap_read(da9052->regmap, reg, &val); + if (ret < 0) + return ret; + + if (da9052->fix_io) { + ret = da9052->fix_io(da9052, reg); + if (ret < 0) + return ret; + } + + return val; +} + +static inline int da9052_reg_write(struct da9052 *da9052, unsigned char reg, + unsigned char val) +{ + int ret; + + ret = regmap_write(da9052->regmap, reg, val); + if (ret < 0) + return ret; + + if (da9052->fix_io) { + ret = da9052->fix_io(da9052, reg); + if (ret < 0) + return ret; + } + + return ret; +} + +static inline int da9052_group_read(struct da9052 *da9052, unsigned char reg, + unsigned reg_cnt, unsigned char *val) +{ + int ret; + unsigned int tmp; + int i; + + for (i = 0; i < reg_cnt; i++) { + ret = regmap_read(da9052->regmap, reg + i, &tmp); + val[i] = (unsigned char)tmp; + if (ret < 0) + return ret; + } + + if (da9052->fix_io) { + ret = da9052->fix_io(da9052, reg); + if (ret < 0) + return ret; + } + + return ret; +} + +static inline int da9052_group_write(struct da9052 *da9052, unsigned char reg, + unsigned reg_cnt, unsigned char *val) +{ + int ret = 0; + int i; + + for (i = 0; i < reg_cnt; i++) { + ret = regmap_write(da9052->regmap, reg + i, val[i]); + if (ret < 0) + return ret; + } + + if (da9052->fix_io) { + ret = da9052->fix_io(da9052, reg); + if (ret < 0) + return ret; + } + + return ret; +} + +static inline int da9052_reg_update(struct da9052 *da9052, unsigned char reg, + unsigned char bit_mask, + unsigned char reg_val) +{ + int ret; + + ret = regmap_update_bits(da9052->regmap, reg, bit_mask, reg_val); + if (ret < 0) + return ret; + + if (da9052->fix_io) { + ret = da9052->fix_io(da9052, reg); + if (ret < 0) + return ret; + } + + return ret; +} + +int da9052_device_init(struct da9052 *da9052, u8 chip_id); +void da9052_device_exit(struct da9052 *da9052); + +extern const struct regmap_config da9052_regmap_config; + +int da9052_irq_init(struct da9052 *da9052); +int da9052_irq_exit(struct da9052 *da9052); +int da9052_request_irq(struct da9052 *da9052, int irq, char *name, + irq_handler_t handler, void *data); +void da9052_free_irq(struct da9052 *da9052, int irq, void *data); + +int da9052_enable_irq(struct da9052 *da9052, int irq); +int da9052_disable_irq(struct da9052 *da9052, int irq); +int da9052_disable_irq_nosync(struct da9052 *da9052, int irq); + +#endif /* __MFD_DA9052_DA9052_H */ diff --git a/include/linux/mfd/da9052/pdata.h b/include/linux/mfd/da9052/pdata.h new file mode 100644 index 000000000..62c5c3c29 --- /dev/null +++ b/include/linux/mfd/da9052/pdata.h @@ -0,0 +1,40 @@ +/* + * Platform data declarations for DA9052 PMICs. + * + * Copyright(c) 2011 Dialog Semiconductor Ltd. + * + * Author: David Dajun Chen + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef __MFD_DA9052_PDATA_H__ +#define __MFD_DA9052_PDATA_H__ + +#define DA9052_MAX_REGULATORS 14 + +struct da9052; + +struct da9052_pdata { + struct led_platform_data *pled; + int (*init) (struct da9052 *da9052); + int irq_base; + int gpio_base; + int use_for_apm; + struct regulator_init_data *regulators[DA9052_MAX_REGULATORS]; +}; + +#endif diff --git a/include/linux/mfd/da9052/reg.h b/include/linux/mfd/da9052/reg.h new file mode 100644 index 000000000..76780ea88 --- /dev/null +++ b/include/linux/mfd/da9052/reg.h @@ -0,0 +1,764 @@ +/* + * Register declarations for DA9052 PMICs. + * + * Copyright(c) 2011 Dialog Semiconductor Ltd. + * + * Author: David Dajun Chen + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef __LINUX_MFD_DA9052_REG_H +#define __LINUX_MFD_DA9052_REG_H + +/* PAGE REGISTERS */ +#define DA9052_PAGE0_CON_REG 0 +#define DA9052_PAGE1_CON_REG 128 + +/* STATUS REGISTERS */ +#define DA9052_STATUS_A_REG 1 +#define DA9052_STATUS_B_REG 2 +#define DA9052_STATUS_C_REG 3 +#define DA9052_STATUS_D_REG 4 + +/* PARK REGISTER */ +#define DA9052_PARK_REGISTER DA9052_STATUS_D_REG + +/* EVENT REGISTERS */ +#define DA9052_EVENT_A_REG 5 +#define DA9052_EVENT_B_REG 6 +#define DA9052_EVENT_C_REG 7 +#define DA9052_EVENT_D_REG 8 +#define DA9052_FAULTLOG_REG 9 + +/* IRQ REGISTERS */ +#define DA9052_IRQ_MASK_A_REG 10 +#define DA9052_IRQ_MASK_B_REG 11 +#define DA9052_IRQ_MASK_C_REG 12 +#define DA9052_IRQ_MASK_D_REG 13 + +/* CONTROL REGISTERS */ +#define DA9052_CONTROL_A_REG 14 +#define DA9052_CONTROL_B_REG 15 +#define DA9052_CONTROL_C_REG 16 +#define DA9052_CONTROL_D_REG 17 + +#define DA9052_PDDIS_REG 18 +#define DA9052_INTERFACE_REG 19 +#define DA9052_RESET_REG 20 + +/* GPIO REGISTERS */ +#define DA9052_GPIO_0_1_REG 21 +#define DA9052_GPIO_2_3_REG 22 +#define DA9052_GPIO_4_5_REG 23 +#define DA9052_GPIO_6_7_REG 24 +#define DA9052_GPIO_8_9_REG 25 +#define DA9052_GPIO_10_11_REG 26 +#define DA9052_GPIO_12_13_REG 27 +#define DA9052_GPIO_14_15_REG 28 + +/* POWER SEQUENCER CONTROL REGISTERS */ +#define DA9052_ID_0_1_REG 29 +#define DA9052_ID_2_3_REG 30 +#define DA9052_ID_4_5_REG 31 +#define DA9052_ID_6_7_REG 32 +#define DA9052_ID_8_9_REG 33 +#define DA9052_ID_10_11_REG 34 +#define DA9052_ID_12_13_REG 35 +#define DA9052_ID_14_15_REG 36 +#define DA9052_ID_16_17_REG 37 +#define DA9052_ID_18_19_REG 38 +#define DA9052_ID_20_21_REG 39 +#define DA9052_SEQ_STATUS_REG 40 +#define DA9052_SEQ_A_REG 41 +#define DA9052_SEQ_B_REG 42 +#define DA9052_SEQ_TIMER_REG 43 + +/* LDO AND BUCK REGISTERS */ +#define DA9052_BUCKA_REG 44 +#define DA9052_BUCKB_REG 45 +#define DA9052_BUCKCORE_REG 46 +#define DA9052_BUCKPRO_REG 47 +#define DA9052_BUCKMEM_REG 48 +#define DA9052_BUCKPERI_REG 49 +#define DA9052_LDO1_REG 50 +#define DA9052_LDO2_REG 51 +#define DA9052_LDO3_REG 52 +#define DA9052_LDO4_REG 53 +#define DA9052_LDO5_REG 54 +#define DA9052_LDO6_REG 55 +#define DA9052_LDO7_REG 56 +#define DA9052_LDO8_REG 57 +#define DA9052_LDO9_REG 58 +#define DA9052_LDO10_REG 59 +#define DA9052_SUPPLY_REG 60 +#define DA9052_PULLDOWN_REG 61 +#define DA9052_CHGBUCK_REG 62 +#define DA9052_WAITCONT_REG 63 +#define DA9052_ISET_REG 64 +#define DA9052_BATCHG_REG 65 + +/* BATTERY CONTROL REGISTRS */ +#define DA9052_CHG_CONT_REG 66 +#define DA9052_INPUT_CONT_REG 67 +#define DA9052_CHG_TIME_REG 68 +#define DA9052_BBAT_CONT_REG 69 + +/* LED CONTROL REGISTERS */ +#define DA9052_BOOST_REG 70 +#define DA9052_LED_CONT_REG 71 +#define DA9052_LEDMIN123_REG 72 +#define DA9052_LED1_CONF_REG 73 +#define DA9052_LED2_CONF_REG 74 +#define DA9052_LED3_CONF_REG 75 +#define DA9052_LED1CONT_REG 76 +#define DA9052_LED2CONT_REG 77 +#define DA9052_LED3CONT_REG 78 +#define DA9052_LED_CONT_4_REG 79 +#define DA9052_LED_CONT_5_REG 80 + +/* ADC CONTROL REGISTERS */ +#define DA9052_ADC_MAN_REG 81 +#define DA9052_ADC_CONT_REG 82 +#define DA9052_ADC_RES_L_REG 83 +#define DA9052_ADC_RES_H_REG 84 +#define DA9052_VDD_RES_REG 85 +#define DA9052_VDD_MON_REG 86 + +#define DA9052_ICHG_AV_REG 87 +#define DA9052_ICHG_THD_REG 88 +#define DA9052_ICHG_END_REG 89 +#define DA9052_TBAT_RES_REG 90 +#define DA9052_TBAT_HIGHP_REG 91 +#define DA9052_TBAT_HIGHN_REG 92 +#define DA9052_TBAT_LOW_REG 93 +#define DA9052_T_OFFSET_REG 94 + +#define DA9052_ADCIN4_RES_REG 95 +#define DA9052_AUTO4_HIGH_REG 96 +#define DA9052_AUTO4_LOW_REG 97 +#define DA9052_ADCIN5_RES_REG 98 +#define DA9052_AUTO5_HIGH_REG 99 +#define DA9052_AUTO5_LOW_REG 100 +#define DA9052_ADCIN6_RES_REG 101 +#define DA9052_AUTO6_HIGH_REG 102 +#define DA9052_AUTO6_LOW_REG 103 + +#define DA9052_TJUNC_RES_REG 104 + +/* TSI CONTROL REGISTERS */ +#define DA9052_TSI_CONT_A_REG 105 +#define DA9052_TSI_CONT_B_REG 106 +#define DA9052_TSI_X_MSB_REG 107 +#define DA9052_TSI_Y_MSB_REG 108 +#define DA9052_TSI_LSB_REG 109 +#define DA9052_TSI_Z_MSB_REG 110 + +/* RTC COUNT REGISTERS */ +#define DA9052_COUNT_S_REG 111 +#define DA9052_COUNT_MI_REG 112 +#define DA9052_COUNT_H_REG 113 +#define DA9052_COUNT_D_REG 114 +#define DA9052_COUNT_MO_REG 115 +#define DA9052_COUNT_Y_REG 116 + +/* RTC CONTROL REGISTERS */ +#define DA9052_ALARM_MI_REG 117 +#define DA9052_ALARM_H_REG 118 +#define DA9052_ALARM_D_REG 119 +#define DA9052_ALARM_MO_REG 120 +#define DA9052_ALARM_Y_REG 121 +#define DA9052_SECOND_A_REG 122 +#define DA9052_SECOND_B_REG 123 +#define DA9052_SECOND_C_REG 124 +#define DA9052_SECOND_D_REG 125 + +/* PAGE CONFIGURATION BIT */ +#define DA9052_PAGE_CONF 0X80 + +/* STATUS REGISTER A BITS */ +#define DA9052_STATUSA_VDATDET 0X80 +#define DA9052_STATUSA_VBUSSEL 0X40 +#define DA9052_STATUSA_DCINSEL 0X20 +#define DA9052_STATUSA_VBUSDET 0X10 +#define DA9052_STATUSA_DCINDET 0X08 +#define DA9052_STATUSA_IDGND 0X04 +#define DA9052_STATUSA_IDFLOAT 0X02 +#define DA9052_STATUSA_NONKEY 0X01 + +/* STATUS REGISTER B BITS */ +#define DA9052_STATUSB_COMPDET 0X80 +#define DA9052_STATUSB_SEQUENCING 0X40 +#define DA9052_STATUSB_GPFB2 0X20 +#define DA9052_STATUSB_CHGTO 0X10 +#define DA9052_STATUSB_CHGEND 0X08 +#define DA9052_STATUSB_CHGLIM 0X04 +#define DA9052_STATUSB_CHGPRE 0X02 +#define DA9052_STATUSB_CHGATT 0X01 + +/* STATUS REGISTER C BITS */ +#define DA9052_STATUSC_GPI7 0X80 +#define DA9052_STATUSC_GPI6 0X40 +#define DA9052_STATUSC_GPI5 0X20 +#define DA9052_STATUSC_GPI4 0X10 +#define DA9052_STATUSC_GPI3 0X08 +#define DA9052_STATUSC_GPI2 0X04 +#define DA9052_STATUSC_GPI1 0X02 +#define DA9052_STATUSC_GPI0 0X01 + +/* STATUS REGISTER D BITS */ +#define DA9052_STATUSD_GPI15 0X80 +#define DA9052_STATUSD_GPI14 0X40 +#define DA9052_STATUSD_GPI13 0X20 +#define DA9052_STATUSD_GPI12 0X10 +#define DA9052_STATUSD_GPI11 0X08 +#define DA9052_STATUSD_GPI10 0X04 +#define DA9052_STATUSD_GPI9 0X02 +#define DA9052_STATUSD_GPI8 0X01 + +/* EVENT REGISTER A BITS */ +#define DA9052_EVENTA_ECOMP1V2 0X80 +#define DA9052_EVENTA_ESEQRDY 0X40 +#define DA9052_EVENTA_EALRAM 0X20 +#define DA9052_EVENTA_EVDDLOW 0X10 +#define DA9052_EVENTA_EVBUSREM 0X08 +#define DA9052_EVENTA_EDCINREM 0X04 +#define DA9052_EVENTA_EVBUSDET 0X02 +#define DA9052_EVENTA_EDCINDET 0X01 + +/* EVENT REGISTER B BITS */ +#define DA9052_EVENTB_ETSIREADY 0X80 +#define DA9052_EVENTB_EPENDOWN 0X40 +#define DA9052_EVENTB_EADCEOM 0X20 +#define DA9052_EVENTB_ETBAT 0X10 +#define DA9052_EVENTB_ECHGEND 0X08 +#define DA9052_EVENTB_EIDGND 0X04 +#define DA9052_EVENTB_EIDFLOAT 0X02 +#define DA9052_EVENTB_ENONKEY 0X01 + +/* EVENT REGISTER C BITS */ +#define DA9052_EVENTC_EGPI7 0X80 +#define DA9052_EVENTC_EGPI6 0X40 +#define DA9052_EVENTC_EGPI5 0X20 +#define DA9052_EVENTC_EGPI4 0X10 +#define DA9052_EVENTC_EGPI3 0X08 +#define DA9052_EVENTC_EGPI2 0X04 +#define DA9052_EVENTC_EGPI1 0X02 +#define DA9052_EVENTC_EGPI0 0X01 + +/* EVENT REGISTER D BITS */ +#define DA9052_EVENTD_EGPI15 0X80 +#define DA9052_EVENTD_EGPI14 0X40 +#define DA9052_EVENTD_EGPI13 0X20 +#define DA9052_EVENTD_EGPI12 0X10 +#define DA9052_EVENTD_EGPI11 0X08 +#define DA9052_EVENTD_EGPI10 0X04 +#define DA9052_EVENTD_EGPI9 0X02 +#define DA9052_EVENTD_EGPI8 0X01 + +/* IRQ MASK REGISTERS BITS */ +#define DA9052_M_NONKEY 0X0100 + +/* TSI EVENT REGISTERS BITS */ +#define DA9052_E_PEN_DOWN 0X4000 +#define DA9052_E_TSI_READY 0X8000 + +/* FAULT LOG REGISTER BITS */ +#define DA9052_FAULTLOG_WAITSET 0X80 +#define DA9052_FAULTLOG_NSDSET 0X40 +#define DA9052_FAULTLOG_KEYSHUT 0X20 +#define DA9052_FAULTLOG_TEMPOVER 0X08 +#define DA9052_FAULTLOG_VDDSTART 0X04 +#define DA9052_FAULTLOG_VDDFAULT 0X02 +#define DA9052_FAULTLOG_TWDERROR 0X01 + +/* CONTROL REGISTER A BITS */ +#define DA9052_CONTROLA_GPIV 0X80 +#define DA9052_CONTROLA_PMOTYPE 0X20 +#define DA9052_CONTROLA_PMOV 0X10 +#define DA9052_CONTROLA_PMIV 0X08 +#define DA9052_CONTROLA_PMIFV 0X08 +#define DA9052_CONTROLA_PWR1EN 0X04 +#define DA9052_CONTROLA_PWREN 0X02 +#define DA9052_CONTROLA_SYSEN 0X01 + +/* CONTROL REGISTER B BITS */ +#define DA9052_CONTROLB_SHUTDOWN 0X80 +#define DA9052_CONTROLB_DEEPSLEEP 0X40 +#define DA9052_CONTROL_B_WRITEMODE 0X20 +#define DA9052_CONTROLB_BBATEN 0X10 +#define DA9052_CONTROLB_OTPREADEN 0X08 +#define DA9052_CONTROLB_AUTOBOOT 0X04 +#define DA9052_CONTROLB_ACTDIODE 0X02 +#define DA9052_CONTROLB_BUCKMERGE 0X01 + +/* CONTROL REGISTER C BITS */ +#define DA9052_CONTROLC_BLINKDUR 0X80 +#define DA9052_CONTROLC_BLINKFRQ 0X60 +#define DA9052_CONTROLC_DEBOUNCING 0X1C +#define DA9052_CONTROLC_PMFB2PIN 0X02 +#define DA9052_CONTROLC_PMFB1PIN 0X01 + +/* CONTROL REGISTER D BITS */ +#define DA9052_CONTROLD_WATCHDOG 0X80 +#define DA9052_CONTROLD_ACCDETEN 0X40 +#define DA9052_CONTROLD_GPI1415SD 0X20 +#define DA9052_CONTROLD_NONKEYSD 0X10 +#define DA9052_CONTROLD_KEEPACTEN 0X08 +#define DA9052_CONTROLD_TWDSCALE 0X07 + +/* POWER DOWN DISABLE REGISTER BITS */ +#define DA9052_PDDIS_PMCONTPD 0X80 +#define DA9052_PDDIS_OUT32KPD 0X40 +#define DA9052_PDDIS_CHGBBATPD 0X20 +#define DA9052_PDDIS_CHGPD 0X10 +#define DA9052_PDDIS_HS2WIREPD 0X08 +#define DA9052_PDDIS_PMIFPD 0X04 +#define DA9052_PDDIS_GPADCPD 0X02 +#define DA9052_PDDIS_GPIOPD 0X01 + +/* CONTROL REGISTER D BITS */ +#define DA9052_INTERFACE_IFBASEADDR 0XE0 +#define DA9052_INTERFACE_NCSPOL 0X10 +#define DA9052_INTERFACE_RWPOL 0X08 +#define DA9052_INTERFACE_CPHA 0X04 +#define DA9052_INTERFACE_CPOL 0X02 +#define DA9052_INTERFACE_IFTYPE 0X01 + +/* CONTROL REGISTER D BITS */ +#define DA9052_RESET_RESETEVENT 0XC0 +#define DA9052_RESET_RESETTIMER 0X3F + +/* GPIO REGISTERS */ +/* GPIO CONTROL REGISTER BITS */ +#define DA9052_GPIO_EVEN_PORT_PIN 0X03 +#define DA9052_GPIO_EVEN_PORT_TYPE 0X04 +#define DA9052_GPIO_EVEN_PORT_MODE 0X08 + +#define DA9052_GPIO_ODD_PORT_PIN 0X30 +#define DA9052_GPIO_ODD_PORT_TYPE 0X40 +#define DA9052_GPIO_ODD_PORT_MODE 0X80 + +/*POWER SEQUENCER REGISTER BITS */ +/* SEQ CONTROL REGISTER BITS FOR ID 0 AND 1 */ +#define DA9052_ID01_LDO1STEP 0XF0 +#define DA9052_ID01_SYSPRE 0X04 +#define DA9052_ID01_DEFSUPPLY 0X02 +#define DA9052_ID01_NRESMODE 0X01 + +/* SEQ CONTROL REGISTER BITS FOR ID 2 AND 3 */ +#define DA9052_ID23_LDO3STEP 0XF0 +#define DA9052_ID23_LDO2STEP 0X0F + +/* SEQ CONTROL REGISTER BITS FOR ID 4 AND 5 */ +#define DA9052_ID45_LDO5STEP 0XF0 +#define DA9052_ID45_LDO4STEP 0X0F + +/* SEQ CONTROL REGISTER BITS FOR ID 6 AND 7 */ +#define DA9052_ID67_LDO7STEP 0XF0 +#define DA9052_ID67_LDO6STEP 0X0F + +/* SEQ CONTROL REGISTER BITS FOR ID 8 AND 9 */ +#define DA9052_ID89_LDO9STEP 0XF0 +#define DA9052_ID89_LDO8STEP 0X0F + +/* SEQ CONTROL REGISTER BITS FOR ID 10 AND 11 */ +#define DA9052_ID1011_PDDISSTEP 0XF0 +#define DA9052_ID1011_LDO10STEP 0X0F + +/* SEQ CONTROL REGISTER BITS FOR ID 12 AND 13 */ +#define DA9052_ID1213_VMEMSWSTEP 0XF0 +#define DA9052_ID1213_VPERISWSTEP 0X0F + +/* SEQ CONTROL REGISTER BITS FOR ID 14 AND 15 */ +#define DA9052_ID1415_BUCKPROSTEP 0XF0 +#define DA9052_ID1415_BUCKCORESTEP 0X0F + +/* SEQ CONTROL REGISTER BITS FOR ID 16 AND 17 */ +#define DA9052_ID1617_BUCKPERISTEP 0XF0 +#define DA9052_ID1617_BUCKMEMSTEP 0X0F + +/* SEQ CONTROL REGISTER BITS FOR ID 18 AND 19 */ +#define DA9052_ID1819_GPRISE2STEP 0XF0 +#define DA9052_ID1819_GPRISE1STEP 0X0F + +/* SEQ CONTROL REGISTER BITS FOR ID 20 AND 21 */ +#define DA9052_ID2021_GPFALL2STEP 0XF0 +#define DA9052_ID2021_GPFALL1STEP 0X0F + +/* POWER SEQ STATUS REGISTER BITS */ +#define DA9052_SEQSTATUS_SEQPOINTER 0XF0 +#define DA9052_SEQSTATUS_WAITSTEP 0X0F + +/* POWER SEQ A REGISTER BITS */ +#define DA9052_SEQA_POWEREND 0XF0 +#define DA9052_SEQA_SYSTEMEND 0X0F + +/* POWER SEQ B REGISTER BITS */ +#define DA9052_SEQB_PARTDOWN 0XF0 +#define DA9052_SEQB_MAXCOUNT 0X0F + +/* POWER SEQ TIMER REGISTER BITS */ +#define DA9052_SEQTIMER_SEQDUMMY 0XF0 +#define DA9052_SEQTIMER_SEQTIME 0X0F + +/*POWER SUPPLY CONTROL REGISTER BITS */ +/* BUCK REGISTER A BITS */ +#define DA9052_BUCKA_BPROILIM 0XC0 +#define DA9052_BUCKA_BPROMODE 0X30 +#define DA9052_BUCKA_BCOREILIM 0X0C +#define DA9052_BUCKA_BCOREMODE 0X03 + +/* BUCK REGISTER B BITS */ +#define DA9052_BUCKB_BERIILIM 0XC0 +#define DA9052_BUCKB_BPERIMODE 0X30 +#define DA9052_BUCKB_BMEMILIM 0X0C +#define DA9052_BUCKB_BMEMMODE 0X03 + +/* BUCKCORE REGISTER BITS */ +#define DA9052_BUCKCORE_BCORECONF 0X80 +#define DA9052_BUCKCORE_BCOREEN 0X40 +#define DA9052_BUCKCORE_VBCORE 0X3F + +/* BUCKPRO REGISTER BITS */ +#define DA9052_BUCKPRO_BPROCONF 0X80 +#define DA9052_BUCKPRO_BPROEN 0X40 +#define DA9052_BUCKPRO_VBPRO 0X3F + +/* BUCKMEM REGISTER BITS */ +#define DA9052_BUCKMEM_BMEMCONF 0X80 +#define DA9052_BUCKMEM_BMEMEN 0X40 +#define DA9052_BUCKMEM_VBMEM 0X3F + +/* BUCKPERI REGISTER BITS */ +#define DA9052_BUCKPERI_BPERICONF 0X80 +#define DA9052_BUCKPERI_BPERIEN 0X40 +#define DA9052_BUCKPERI_BPERIHS 0X20 +#define DA9052_BUCKPERI_VBPERI 0X1F + +/* LDO1 REGISTER BITS */ +#define DA9052_LDO1_LDO1CONF 0X80 +#define DA9052_LDO1_LDO1EN 0X40 +#define DA9052_LDO1_VLDO1 0X1F + +/* LDO2 REGISTER BITS */ +#define DA9052_LDO2_LDO2CONF 0X80 +#define DA9052_LDO2_LDO2EN 0X40 +#define DA9052_LDO2_VLDO2 0X3F + +/* LDO3 REGISTER BITS */ +#define DA9052_LDO3_LDO3CONF 0X80 +#define DA9052_LDO3_LDO3EN 0X40 +#define DA9052_LDO3_VLDO3 0X3F + +/* LDO4 REGISTER BITS */ +#define DA9052_LDO4_LDO4CONF 0X80 +#define DA9052_LDO4_LDO4EN 0X40 +#define DA9052_LDO4_VLDO4 0X3F + +/* LDO5 REGISTER BITS */ +#define DA9052_LDO5_LDO5CONF 0X80 +#define DA9052_LDO5_LDO5EN 0X40 +#define DA9052_LDO5_VLDO5 0X3F + +/* LDO6 REGISTER BITS */ +#define DA9052_LDO6_LDO6CONF 0X80 +#define DA9052_LDO6_LDO6EN 0X40 +#define DA9052_LDO6_VLDO6 0X3F + +/* LDO7 REGISTER BITS */ +#define DA9052_LDO7_LDO7CONF 0X80 +#define DA9052_LDO7_LDO7EN 0X40 +#define DA9052_LDO7_VLDO7 0X3F + +/* LDO8 REGISTER BITS */ +#define DA9052_LDO8_LDO8CONF 0X80 +#define DA9052_LDO8_LDO8EN 0X40 +#define DA9052_LDO8_VLDO8 0X3F + +/* LDO9 REGISTER BITS */ +#define DA9052_LDO9_LDO9CONF 0X80 +#define DA9052_LDO9_LDO9EN 0X40 +#define DA9052_LDO9_VLDO9 0X3F + +/* LDO10 REGISTER BITS */ +#define DA9052_LDO10_LDO10CONF 0X80 +#define DA9052_LDO10_LDO10EN 0X40 +#define DA9052_LDO10_VLDO10 0X3F + +/* SUPPLY REGISTER BITS */ +#define DA9052_SUPPLY_VLOCK 0X80 +#define DA9052_SUPPLY_VMEMSWEN 0X40 +#define DA9052_SUPPLY_VPERISWEN 0X20 +#define DA9052_SUPPLY_VLDO3GO 0X10 +#define DA9052_SUPPLY_VLDO2GO 0X08 +#define DA9052_SUPPLY_VBMEMGO 0X04 +#define DA9052_SUPPLY_VBPROGO 0X02 +#define DA9052_SUPPLY_VBCOREGO 0X01 + +/* PULLDOWN REGISTER BITS */ +#define DA9052_PULLDOWN_LDO5PDDIS 0X20 +#define DA9052_PULLDOWN_LDO2PDDIS 0X10 +#define DA9052_PULLDOWN_LDO1PDDIS 0X08 +#define DA9052_PULLDOWN_MEMPDDIS 0X04 +#define DA9052_PULLDOWN_PROPDDIS 0X02 +#define DA9052_PULLDOWN_COREPDDIS 0X01 + +/* BAT CHARGER REGISTER BITS */ +/* CHARGER BUCK REGISTER BITS */ +#define DA9052_CHGBUCK_CHGTEMP 0X80 +#define DA9052_CHGBUCK_CHGUSBILIM 0X40 +#define DA9052_CHGBUCK_CHGBUCKLP 0X20 +#define DA9052_CHGBUCK_CHGBUCKEN 0X10 +#define DA9052_CHGBUCK_ISETBUCK 0X0F + +/* WAIT COUNTER REGISTER BITS */ +#define DA9052_WAITCONT_WAITDIR 0X80 +#define DA9052_WAITCONT_RTCCLOCK 0X40 +#define DA9052_WAITCONT_WAITMODE 0X20 +#define DA9052_WAITCONT_EN32KOUT 0X10 +#define DA9052_WAITCONT_DELAYTIME 0X0F + +/* ISET CONTROL REGISTER BITS */ +#define DA9052_ISET_ISETDCIN 0XF0 +#define DA9052_ISET_ISETVBUS 0X0F + +/* BATTERY CHARGER CONTROL REGISTER BITS */ +#define DA9052_BATCHG_ICHGPRE 0XC0 +#define DA9052_BATCHG_ICHGBAT 0X3F + +/* CHARGER COUNTER REGISTER BITS */ +#define DA9052_CHG_CONT_VCHG_BAT 0XF8 +#define DA9052_CHG_CONT_TCTR 0X07 + +/* INPUT CONTROL REGISTER BITS */ +#define DA9052_INPUT_CONT_TCTR_MODE 0X80 +#define DA9052_INPUT_CONT_VBUS_SUSP 0X10 +#define DA9052_INPUT_CONT_DCIN_SUSP 0X08 + +/* CHARGING TIME REGISTER BITS */ +#define DA9052_CHGTIME_CHGTIME 0XFF + +/* BACKUP BATTERY CONTROL REGISTER BITS */ +#define DA9052_BBATCONT_BCHARGERISET 0XF0 +#define DA9052_BBATCONT_BCHARGERVSET 0X0F + +/* LED REGISTERS BITS */ +/* LED BOOST REGISTER BITS */ +#define DA9052_BOOST_EBFAULT 0X80 +#define DA9052_BOOST_MBFAULT 0X40 +#define DA9052_BOOST_BOOSTFRQ 0X20 +#define DA9052_BOOST_BOOSTILIM 0X10 +#define DA9052_BOOST_LED3INEN 0X08 +#define DA9052_BOOST_LED2INEN 0X04 +#define DA9052_BOOST_LED1INEN 0X02 +#define DA9052_BOOST_BOOSTEN 0X01 + +/* LED CONTROL REGISTER BITS */ +#define DA9052_LEDCONT_SELLEDMODE 0X80 +#define DA9052_LEDCONT_LED3ICONT 0X40 +#define DA9052_LEDCONT_LED3RAMP 0X20 +#define DA9052_LEDCONT_LED3EN 0X10 +#define DA9052_LEDCONT_LED2RAMP 0X08 +#define DA9052_LEDCONT_LED2EN 0X04 +#define DA9052_LEDCONT_LED1RAMP 0X02 +#define DA9052_LEDCONT_LED1EN 0X01 + +/* LEDMIN123 REGISTER BIT */ +#define DA9052_LEDMIN123_LEDMINCURRENT 0XFF + +/* LED1CONF REGISTER BIT */ +#define DA9052_LED1CONF_LED1CURRENT 0XFF + +/* LED2CONF REGISTER BIT */ +#define DA9052_LED2CONF_LED2CURRENT 0XFF + +/* LED3CONF REGISTER BIT */ +#define DA9052_LED3CONF_LED3CURRENT 0XFF + +/* LED COUNT REGISTER BIT */ +#define DA9052_LED_CONT_DIM 0X80 + +/* ADC MAN REGISTERS BITS */ +#define DA9052_ADC_MAN_MAN_CONV 0X10 +#define DA9052_ADC_MAN_MUXSEL_VDDOUT 0X00 +#define DA9052_ADC_MAN_MUXSEL_ICH 0X01 +#define DA9052_ADC_MAN_MUXSEL_TBAT 0X02 +#define DA9052_ADC_MAN_MUXSEL_VBAT 0X03 +#define DA9052_ADC_MAN_MUXSEL_AD4 0X04 +#define DA9052_ADC_MAN_MUXSEL_AD5 0X05 +#define DA9052_ADC_MAN_MUXSEL_AD6 0X06 +#define DA9052_ADC_MAN_MUXSEL_VBBAT 0X09 + +/* ADC CONTROL REGSISTERS BITS */ +#define DA9052_ADCCONT_COMP1V2EN 0X80 +#define DA9052_ADCCONT_ADCMODE 0X40 +#define DA9052_ADCCONT_TBATISRCEN 0X20 +#define DA9052_ADCCONT_AD4ISRCEN 0X10 +#define DA9052_ADCCONT_AUTOAD6EN 0X08 +#define DA9052_ADCCONT_AUTOAD5EN 0X04 +#define DA9052_ADCCONT_AUTOAD4EN 0X02 +#define DA9052_ADCCONT_AUTOVDDEN 0X01 + +/* ADC 10 BIT MANUAL CONVERSION RESULT LOW REGISTER */ +#define DA9052_ADC_RES_LSB 0X03 + +/* ADC 10 BIT MANUAL CONVERSION RESULT HIGH REGISTER */ +#define DA9052_ADCRESH_ADCRESMSB 0XFF + +/* VDD RES REGSISTER BIT*/ +#define DA9052_VDDRES_VDDOUTRES 0XFF + +/* VDD MON REGSISTER BIT */ +#define DA9052_VDDMON_VDDOUTMON 0XFF + +/* ICHG_AV REGSISTER BIT */ +#define DA9052_ICHGAV_ICHGAV 0XFF + +/* ICHG_THD REGSISTER BIT */ +#define DA9052_ICHGTHD_ICHGTHD 0XFF + +/* ICHG_END REGSISTER BIT */ +#define DA9052_ICHGEND_ICHGEND 0XFF + +/* TBAT_RES REGSISTER BIT */ +#define DA9052_TBATRES_TBATRES 0XFF + +/* TBAT_HIGHP REGSISTER BIT */ +#define DA9052_TBATHIGHP_TBATHIGHP 0XFF + +/* TBAT_HIGHN REGSISTER BIT */ +#define DA9052_TBATHIGHN_TBATHIGHN 0XFF + +/* TBAT_LOW REGSISTER BIT */ +#define DA9052_TBATLOW_TBATLOW 0XFF + +/* T_OFFSET REGSISTER BIT */ +#define DA9052_TOFFSET_TOFFSET 0XFF + +/* ADCIN4_RES REGSISTER BIT */ +#define DA9052_ADCIN4RES_ADCIN4RES 0XFF + +/* ADCIN4_HIGH REGSISTER BIT */ +#define DA9052_AUTO4HIGH_AUTO4HIGH 0XFF + +/* ADCIN4_LOW REGSISTER BIT */ +#define DA9052_AUTO4LOW_AUTO4LOW 0XFF + +/* ADCIN5_RES REGSISTER BIT */ +#define DA9052_ADCIN5RES_ADCIN5RES 0XFF + +/* ADCIN5_HIGH REGSISTER BIT */ +#define DA9052_AUTO5HIGH_AUTOHIGH 0XFF + +/* ADCIN5_LOW REGSISTER BIT */ +#define DA9052_AUTO5LOW_AUTO5LOW 0XFF + +/* ADCIN6_RES REGSISTER BIT */ +#define DA9052_ADCIN6RES_ADCIN6RES 0XFF + +/* ADCIN6_HIGH REGSISTER BIT */ +#define DA9052_AUTO6HIGH_AUTO6HIGH 0XFF + +/* ADCIN6_LOW REGSISTER BIT */ +#define DA9052_AUTO6LOW_AUTO6LOW 0XFF + +/* TJUNC_RES REGSISTER BIT*/ +#define DA9052_TJUNCRES_TJUNCRES 0XFF + +/* TSI REGISTER */ +/* TSI CONTROL REGISTER A BITS */ +#define DA9052_TSICONTA_TSIDELAY 0XC0 +#define DA9052_TSICONTA_TSISKIP 0X38 +#define DA9052_TSICONTA_TSIMODE 0X04 +#define DA9052_TSICONTA_PENDETEN 0X02 +#define DA9052_TSICONTA_AUTOTSIEN 0X01 + +/* TSI CONTROL REGISTER B BITS */ +#define DA9052_TSICONTB_ADCREF 0X80 +#define DA9052_TSICONTB_TSIMAN 0X40 +#define DA9052_TSICONTB_TSIMUX_XP 0X00 +#define DA9052_TSICONTB_TSIMUX_YP 0X10 +#define DA9052_TSICONTB_TSIMUX_XN 0X20 +#define DA9052_TSICONTB_TSIMUX_YN 0X30 +#define DA9052_TSICONTB_TSISEL3 0X08 +#define DA9052_TSICONTB_TSISEL2 0X04 +#define DA9052_TSICONTB_TSISEL1 0X02 +#define DA9052_TSICONTB_TSISEL0 0X01 + +/* TSI X CO-ORDINATE MSB RESULT REGISTER BITS */ +#define DA9052_TSIXMSB_TSIXM 0XFF + +/* TSI Y CO-ORDINATE MSB RESULT REGISTER BITS */ +#define DA9052_TSIYMSB_TSIYM 0XFF + +/* TSI CO-ORDINATE LSB RESULT REGISTER BITS */ +#define DA9052_TSILSB_PENDOWN 0X40 +#define DA9052_TSILSB_TSIZL 0X30 +#define DA9052_TSILSB_TSIZL_SHIFT 4 +#define DA9052_TSILSB_TSIZL_BITS 2 +#define DA9052_TSILSB_TSIYL 0X0C +#define DA9052_TSILSB_TSIYL_SHIFT 2 +#define DA9052_TSILSB_TSIYL_BITS 2 +#define DA9052_TSILSB_TSIXL 0X03 +#define DA9052_TSILSB_TSIXL_SHIFT 0 +#define DA9052_TSILSB_TSIXL_BITS 2 + +/* TSI Z MEASUREMENT MSB RESULT REGISTER BIT */ +#define DA9052_TSIZMSB_TSIZM 0XFF + +/* RTC REGISTER */ +/* RTC TIMER SECONDS REGISTER BITS */ +#define DA9052_COUNTS_MONITOR 0X40 +#define DA9052_RTC_SEC 0X3F + +/* RTC TIMER MINUTES REGISTER BIT */ +#define DA9052_RTC_MIN 0X3F + +/* RTC TIMER HOUR REGISTER BIT */ +#define DA9052_RTC_HOUR 0X1F + +/* RTC TIMER DAYS REGISTER BIT */ +#define DA9052_RTC_DAY 0X1F + +/* RTC TIMER MONTHS REGISTER BIT */ +#define DA9052_RTC_MONTH 0X0F + +/* RTC TIMER YEARS REGISTER BIT */ +#define DA9052_RTC_YEAR 0X3F + +/* RTC ALARM MINUTES REGISTER BITS */ +#define DA9052_ALARMM_I_TICK_TYPE 0X80 +#define DA9052_ALARMMI_ALARMTYPE 0X40 + +/* RTC ALARM YEARS REGISTER BITS */ +#define DA9052_ALARM_Y_TICK_ON 0X80 +#define DA9052_ALARM_Y_ALARM_ON 0X40 + +/* RTC SECONDS REGISTER A BITS */ +#define DA9052_SECONDA_SECONDSA 0XFF + +/* RTC SECONDS REGISTER B BITS */ +#define DA9052_SECONDB_SECONDSB 0XFF + +/* RTC SECONDS REGISTER C BITS */ +#define DA9052_SECONDC_SECONDSC 0XFF + +/* RTC SECONDS REGISTER D BITS */ +#define DA9052_SECONDD_SECONDSD 0XFF + +#endif +/* __LINUX_MFD_DA9052_REG_H */ diff --git a/include/linux/mfd/da9055/core.h b/include/linux/mfd/da9055/core.h new file mode 100644 index 000000000..5dc743fd6 --- /dev/null +++ b/include/linux/mfd/da9055/core.h @@ -0,0 +1,94 @@ +/* + * da9055 declarations for DA9055 PMICs. + * + * Copyright(c) 2012 Dialog Semiconductor Ltd. + * + * Author: David Dajun Chen + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef __DA9055_CORE_H +#define __DA9055_CORE_H + +#include +#include + +/* + * PMIC IRQ + */ +#define DA9055_IRQ_ALARM 0x01 +#define DA9055_IRQ_TICK 0x02 +#define DA9055_IRQ_NONKEY 0x00 +#define DA9055_IRQ_REGULATOR 0x0B +#define DA9055_IRQ_HWMON 0x03 + +struct da9055_pdata; + +struct da9055 { + struct regmap *regmap; + struct regmap_irq_chip_data *irq_data; + struct device *dev; + struct i2c_client *i2c_client; + + int irq_base; + int chip_irq; +}; + +/* Device I/O */ +static inline int da9055_reg_read(struct da9055 *da9055, unsigned char reg) +{ + int val, ret; + + ret = regmap_read(da9055->regmap, reg, &val); + if (ret < 0) + return ret; + + return val; +} + +static inline int da9055_reg_write(struct da9055 *da9055, unsigned char reg, + unsigned char val) +{ + return regmap_write(da9055->regmap, reg, val); +} + +static inline int da9055_group_read(struct da9055 *da9055, unsigned char reg, + unsigned reg_cnt, unsigned char *val) +{ + return regmap_bulk_read(da9055->regmap, reg, val, reg_cnt); +} + +static inline int da9055_group_write(struct da9055 *da9055, unsigned char reg, + unsigned reg_cnt, unsigned char *val) +{ + return regmap_raw_write(da9055->regmap, reg, val, reg_cnt); +} + +static inline int da9055_reg_update(struct da9055 *da9055, unsigned char reg, + unsigned char bit_mask, + unsigned char reg_val) +{ + return regmap_update_bits(da9055->regmap, reg, bit_mask, reg_val); +} + +/* Generic Device API */ +int da9055_device_init(struct da9055 *da9055); +void da9055_device_exit(struct da9055 *da9055); + +extern const struct regmap_config da9055_regmap_config; + +#endif /* __DA9055_CORE_H */ diff --git a/include/linux/mfd/da9055/pdata.h b/include/linux/mfd/da9055/pdata.h new file mode 100644 index 000000000..1a94fa2ac --- /dev/null +++ b/include/linux/mfd/da9055/pdata.h @@ -0,0 +1,54 @@ +/* Copyright (C) 2012 Dialog Semiconductor Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ +#ifndef __DA9055_PDATA_H +#define __DA9055_PDATA_H + +#define DA9055_MAX_REGULATORS 8 + +struct da9055; +struct gpio_desc; + +enum gpio_select { + NO_GPIO = 0, + GPIO_1, + GPIO_2 +}; + +struct da9055_pdata { + int (*init) (struct da9055 *da9055); + int irq_base; + int gpio_base; + + struct regulator_init_data *regulators[DA9055_MAX_REGULATORS]; + /* Enable RTC in RESET Mode */ + bool reset_enable; + /* + * GPI muxed pin to control + * regulator state A/B, 0 if not available. + */ + int *gpio_ren; + /* + * GPI muxed pin to control + * regulator set, 0 if not available. + */ + int *gpio_rsel; + /* + * Regulator mode control bits value (GPI offset) that + * that controls the regulator state, 0 if not available. + */ + enum gpio_select *reg_ren; + /* + * Regulator mode control bits value (GPI offset) that + * controls the regulator set A/B, 0 if not available. + */ + enum gpio_select *reg_rsel; + /* GPIO descriptors to enable regulator, NULL if not available */ + struct gpio_desc **ena_gpiods; +}; +#endif /* __DA9055_PDATA_H */ diff --git a/include/linux/mfd/da9055/reg.h b/include/linux/mfd/da9055/reg.h new file mode 100644 index 000000000..2b592e072 --- /dev/null +++ b/include/linux/mfd/da9055/reg.h @@ -0,0 +1,699 @@ +/* + * DA9055 declarations for DA9055 PMICs. + * + * Copyright(c) 2012 Dialog Semiconductor Ltd. + * + * Author: David Dajun Chen + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef __DA9055_REG_H +#define __DA9055_REG_H + +/* + * PMIC registers + */ + /* PAGE0 */ +#define DA9055_REG_PAGE_CON 0x00 + +/* System Control and Event Registers */ +#define DA9055_REG_STATUS_A 0x01 +#define DA9055_REG_STATUS_B 0x02 +#define DA9055_REG_FAULT_LOG 0x03 +#define DA9055_REG_EVENT_A 0x04 +#define DA9055_REG_EVENT_B 0x05 +#define DA9055_REG_EVENT_C 0x06 +#define DA9055_REG_IRQ_MASK_A 0x07 +#define DA9055_REG_IRQ_MASK_B 0x08 +#define DA9055_REG_IRQ_MASK_C 0x09 +#define DA9055_REG_CONTROL_A 0x0A +#define DA9055_REG_CONTROL_B 0x0B +#define DA9055_REG_CONTROL_C 0x0C +#define DA9055_REG_CONTROL_D 0x0D +#define DA9055_REG_CONTROL_E 0x0E +#define DA9055_REG_PD_DIS 0x0F + +/* GPIO Control Registers */ +#define DA9055_REG_GPIO0_1 0x10 +#define DA9055_REG_GPIO2 0x11 +#define DA9055_REG_GPIO_MODE0_2 0x12 + +/* Regulator Control Registers */ +#define DA9055_REG_BCORE_CONT 0x13 +#define DA9055_REG_BMEM_CONT 0x14 +#define DA9055_REG_LDO1_CONT 0x15 +#define DA9055_REG_LDO2_CONT 0x16 +#define DA9055_REG_LDO3_CONT 0x17 +#define DA9055_REG_LDO4_CONT 0x18 +#define DA9055_REG_LDO5_CONT 0x19 +#define DA9055_REG_LDO6_CONT 0x1A + +/* GP-ADC Control Registers */ +#define DA9055_REG_ADC_MAN 0x1B +#define DA9055_REG_ADC_CONT 0x1C +#define DA9055_REG_VSYS_MON 0x1D +#define DA9055_REG_ADC_RES_L 0x1E +#define DA9055_REG_ADC_RES_H 0x1F +#define DA9055_REG_VSYS_RES 0x20 +#define DA9055_REG_ADCIN1_RES 0x21 +#define DA9055_REG_ADCIN2_RES 0x22 +#define DA9055_REG_ADCIN3_RES 0x23 + +/* Sequencer Control Registers */ +#define DA9055_REG_EN_32K 0x35 + +/* Regulator Setting Registers */ +#define DA9055_REG_BUCK_LIM 0x37 +#define DA9055_REG_BCORE_MODE 0x38 +#define DA9055_REG_VBCORE_A 0x39 +#define DA9055_REG_VBMEM_A 0x3A +#define DA9055_REG_VLDO1_A 0x3B +#define DA9055_REG_VLDO2_A 0x3C +#define DA9055_REG_VLDO3_A 0x3D +#define DA9055_REG_VLDO4_A 0x3E +#define DA9055_REG_VLDO5_A 0x3F +#define DA9055_REG_VLDO6_A 0x40 +#define DA9055_REG_VBCORE_B 0x41 +#define DA9055_REG_VBMEM_B 0x42 +#define DA9055_REG_VLDO1_B 0x43 +#define DA9055_REG_VLDO2_B 0x44 +#define DA9055_REG_VLDO3_B 0x45 +#define DA9055_REG_VLDO4_B 0x46 +#define DA9055_REG_VLDO5_B 0x47 +#define DA9055_REG_VLDO6_B 0x48 + +/* GP-ADC Threshold Registers */ +#define DA9055_REG_AUTO1_HIGH 0x49 +#define DA9055_REG_AUTO1_LOW 0x4A +#define DA9055_REG_AUTO2_HIGH 0x4B +#define DA9055_REG_AUTO2_LOW 0x4C +#define DA9055_REG_AUTO3_HIGH 0x4D +#define DA9055_REG_AUTO3_LOW 0x4E + +/* OTP */ +#define DA9055_REG_OPT_COUNT 0x50 +#define DA9055_REG_OPT_ADDR 0x51 +#define DA9055_REG_OPT_DATA 0x52 + +/* RTC Calendar and Alarm Registers */ +#define DA9055_REG_COUNT_S 0x53 +#define DA9055_REG_COUNT_MI 0x54 +#define DA9055_REG_COUNT_H 0x55 +#define DA9055_REG_COUNT_D 0x56 +#define DA9055_REG_COUNT_MO 0x57 +#define DA9055_REG_COUNT_Y 0x58 +#define DA9055_REG_ALARM_MI 0x59 +#define DA9055_REG_ALARM_H 0x5A +#define DA9055_REG_ALARM_D 0x5B +#define DA9055_REG_ALARM_MO 0x5C +#define DA9055_REG_ALARM_Y 0x5D +#define DA9055_REG_SECOND_A 0x5E +#define DA9055_REG_SECOND_B 0x5F +#define DA9055_REG_SECOND_C 0x60 +#define DA9055_REG_SECOND_D 0x61 + +/* Customer Trim and Configuration */ +#define DA9055_REG_T_OFFSET 0x63 +#define DA9055_REG_INTERFACE 0x64 +#define DA9055_REG_CONFIG_A 0x65 +#define DA9055_REG_CONFIG_B 0x66 +#define DA9055_REG_CONFIG_C 0x67 +#define DA9055_REG_CONFIG_D 0x68 +#define DA9055_REG_CONFIG_E 0x69 +#define DA9055_REG_TRIM_CLDR 0x6F + +/* General Purpose Registers */ +#define DA9055_REG_GP_ID_0 0x70 +#define DA9055_REG_GP_ID_1 0x71 +#define DA9055_REG_GP_ID_2 0x72 +#define DA9055_REG_GP_ID_3 0x73 +#define DA9055_REG_GP_ID_4 0x74 +#define DA9055_REG_GP_ID_5 0x75 +#define DA9055_REG_GP_ID_6 0x76 +#define DA9055_REG_GP_ID_7 0x77 +#define DA9055_REG_GP_ID_8 0x78 +#define DA9055_REG_GP_ID_9 0x79 +#define DA9055_REG_GP_ID_10 0x7A +#define DA9055_REG_GP_ID_11 0x7B +#define DA9055_REG_GP_ID_12 0x7C +#define DA9055_REG_GP_ID_13 0x7D +#define DA9055_REG_GP_ID_14 0x7E +#define DA9055_REG_GP_ID_15 0x7F +#define DA9055_REG_GP_ID_16 0x80 +#define DA9055_REG_GP_ID_17 0x81 +#define DA9055_REG_GP_ID_18 0x82 +#define DA9055_REG_GP_ID_19 0x83 + +#define DA9055_MAX_REGISTER_CNT DA9055_REG_GP_ID_19 + +/* + * PMIC registers bits + */ + +/* DA9055_REG_PAGE_CON (addr=0x00) */ +#define DA9055_PAGE_WRITE_MODE (0<<6) +#define DA9055_REPEAT_WRITE_MODE (1<<6) + +/* DA9055_REG_STATUS_A (addr=0x01) */ +#define DA9055_NOKEY_STS 0x01 +#define DA9055_WAKE_STS 0x02 +#define DA9055_DVC_BUSY_STS 0x04 +#define DA9055_COMP1V2_STS 0x08 +#define DA9055_NJIG_STS 0x10 +#define DA9055_LDO5_LIM_STS 0x20 +#define DA9055_LDO6_LIM_STS 0x40 + +/* DA9055_REG_STATUS_B (addr=0x02) */ +#define DA9055_GPI0_STS 0x01 +#define DA9055_GPI1_STS 0x02 +#define DA9055_GPI2_STS 0x04 + +/* DA9055_REG_FAULT_LOG (addr=0x03) */ +#define DA9055_TWD_ERROR_FLG 0x01 +#define DA9055_POR_FLG 0x02 +#define DA9055_VDD_FAULT_FLG 0x04 +#define DA9055_VDD_START_FLG 0x08 +#define DA9055_TEMP_CRIT_FLG 0x10 +#define DA9055_KEY_RESET_FLG 0x20 +#define DA9055_WAIT_SHUT_FLG 0x80 + +/* DA9055_REG_EVENT_A (addr=0x04) */ +#define DA9055_NOKEY_EINT 0x01 +#define DA9055_ALARM_EINT 0x02 +#define DA9055_TICK_EINT 0x04 +#define DA9055_ADC_RDY_EINT 0x08 +#define DA9055_SEQ_RDY_EINT 0x10 +#define DA9055_EVENTS_B_EINT 0x20 +#define DA9055_EVENTS_C_EINT 0x40 + +/* DA9055_REG_EVENT_B (addr=0x05) */ +#define DA9055_E_WAKE_EINT 0x01 +#define DA9055_E_TEMP_EINT 0x02 +#define DA9055_E_COMP1V2_EINT 0x04 +#define DA9055_E_LDO_LIM_EINT 0x08 +#define DA9055_E_NJIG_EINT 0x20 +#define DA9055_E_VDD_MON_EINT 0x40 +#define DA9055_E_VDD_WARN_EINT 0x80 + +/* DA9055_REG_EVENT_C (addr=0x06) */ +#define DA9055_E_GPI0_EINT 0x01 +#define DA9055_E_GPI1_EINT 0x02 +#define DA9055_E_GPI2_EINT 0x04 + +/* DA9055_REG_IRQ_MASK_A (addr=0x07) */ +#define DA9055_M_NONKEY_EINT 0x01 +#define DA9055_M_ALARM_EINT 0x02 +#define DA9055_M_TICK_EINT 0x04 +#define DA9055_M_ADC_RDY_EINT 0x08 +#define DA9055_M_SEQ_RDY_EINT 0x10 + +/* DA9055_REG_IRQ_MASK_B (addr=0x08) */ +#define DA9055_M_WAKE_EINT 0x01 +#define DA9055_M_TEMP_EINT 0x02 +#define DA9055_M_COMP_1V2_EINT 0x04 +#define DA9055_M_LDO_LIM_EINT 0x08 +#define DA9055_M_NJIG_EINT 0x20 +#define DA9055_M_VDD_MON_EINT 0x40 +#define DA9055_M_VDD_WARN_EINT 0x80 + +/* DA9055_REG_IRQ_MASK_C (addr=0x09) */ +#define DA9055_M_GPI0_EINT 0x01 +#define DA9055_M_GPI1_EINT 0x02 +#define DA9055_M_GPI2_EINT 0x04 + +/* DA9055_REG_CONTROL_A (addr=0xA) */ +#define DA9055_DEBOUNCING_SHIFT 0x00 +#define DA9055_DEBOUNCING_MASK 0x07 +#define DA9055_NRES_MODE_SHIFT 0x03 +#define DA9055_NRES_MODE_MASK 0x08 +#define DA9055_SLEW_RATE_SHIFT 0x04 +#define DA9055_SLEW_RATE_MASK 0x30 +#define DA9055_NOKEY_LOCK_SHIFT 0x06 +#define DA9055_NOKEY_LOCK_MASK 0x40 + +/* DA9055_REG_CONTROL_B (addr=0xB) */ +#define DA9055_RTC_MODE_PD 0x01 +#define DA9055_RTC_MODE_SD_SHIFT 0x01 +#define DA9055_RTC_MODE_SD 0x02 +#define DA9055_RTC_EN 0x04 +#define DA9055_ECO_MODE_SHIFT 0x03 +#define DA9055_ECO_MODE_MASK 0x08 +#define DA9055_TWDSCALE_SHIFT 4 +#define DA9055_TWDSCALE_MASK 0x70 +#define DA9055_V_LOCK_SHIFT 0x07 +#define DA9055_V_LOCK_MASK 0x80 + +/* DA9055_REG_CONTROL_C (addr=0xC) */ +#define DA9055_SYSTEM_EN_SHIFT 0x00 +#define DA9055_SYSTEM_EN_MASK 0x01 +#define DA9055_POWERN_EN_SHIFT 0x01 +#define DA9055_POWERN_EN_MASK 0x02 +#define DA9055_POWER1_EN_SHIFT 0x02 +#define DA9055_POWER1_EN_MASK 0x04 + +/* DA9055_REG_CONTROL_D (addr=0xD) */ +#define DA9055_STANDBY_SHIFT 0x02 +#define DA9055_STANDBY_MASK 0x08 +#define DA9055_AUTO_BOOT_SHIFT 0x03 +#define DA9055_AUTO_BOOT_MASK 0x04 + +/* DA9055_REG_CONTROL_E (addr=0xE) */ +#define DA9055_WATCHDOG_SHIFT 0x00 +#define DA9055_WATCHDOG_MASK 0x01 +#define DA9055_SHUTDOWN_SHIFT 0x01 +#define DA9055_SHUTDOWN_MASK 0x02 +#define DA9055_WAKE_UP_SHIFT 0x02 +#define DA9055_WAKE_UP_MASK 0x04 + +/* DA9055_REG_GPIO (addr=0x10/0x11) */ +#define DA9055_GPIO0_PIN_SHIFT 0x00 +#define DA9055_GPIO0_PIN_MASK 0x03 +#define DA9055_GPIO0_TYPE_SHIFT 0x02 +#define DA9055_GPIO0_TYPE_MASK 0x04 +#define DA9055_GPIO0_WEN_SHIFT 0x03 +#define DA9055_GPIO0_WEN_MASK 0x08 +#define DA9055_GPIO1_PIN_SHIFT 0x04 +#define DA9055_GPIO1_PIN_MASK 0x30 +#define DA9055_GPIO1_TYPE_SHIFT 0x06 +#define DA9055_GPIO1_TYPE_MASK 0x40 +#define DA9055_GPIO1_WEN_SHIFT 0x07 +#define DA9055_GPIO1_WEN_MASK 0x80 +#define DA9055_GPIO2_PIN_SHIFT 0x00 +#define DA9055_GPIO2_PIN_MASK 0x30 +#define DA9055_GPIO2_TYPE_SHIFT 0x02 +#define DA9055_GPIO2_TYPE_MASK 0x04 +#define DA9055_GPIO2_WEN_SHIFT 0x03 +#define DA9055_GPIO2_WEN_MASK 0x08 + +/* DA9055_REG_GPIO_MODE (addr=0x12) */ +#define DA9055_GPIO0_MODE_SHIFT 0x00 +#define DA9055_GPIO0_MODE_MASK 0x01 +#define DA9055_GPIO1_MODE_SHIFT 0x01 +#define DA9055_GPIO1_MODE_MASK 0x02 +#define DA9055_GPIO2_MODE_SHIFT 0x02 +#define DA9055_GPIO2_MODE_MASK 0x04 + +/* DA9055_REG_BCORE_CONT (addr=0x13) */ +#define DA9055_BCORE_EN_SHIFT 0x00 +#define DA9055_BCORE_EN_MASK 0x01 +#define DA9055_BCORE_GPI_SHIFT 0x01 +#define DA9055_BCORE_GPI_MASK 0x02 +#define DA9055_BCORE_PD_DIS_SHIFT 0x03 +#define DA9055_BCORE_PD_DIS_MASK 0x04 +#define DA9055_VBCORE_SEL_SHIFT 0x04 +#define DA9055_SEL_REG_A 0x0 +#define DA9055_SEL_REG_B 0x10 +#define DA9055_VBCORE_SEL_MASK 0x10 +#define DA9055_V_GPI_MASK 0x60 +#define DA9055_V_GPI_SHIFT 0x05 +#define DA9055_E_GPI_MASK 0x06 +#define DA9055_E_GPI_SHIFT 0x01 +#define DA9055_VBCORE_GPI_SHIFT 0x05 +#define DA9055_VBCORE_GPI_MASK 0x60 +#define DA9055_BCORE_CONF_SHIFT 0x07 +#define DA9055_BCORE_CONF_MASK 0x80 + +/* DA9055_REG_BMEM_CONT (addr=0x14) */ +#define DA9055_BMEM_EN_SHIFT 0x00 +#define DA9055_BMEM_EN_MASK 0x01 +#define DA9055_BMEM_GPI_SHIFT 0x01 +#define DA9055_BMEM_GPI_MASK 0x06 +#define DA9055_BMEM_PD_DIS_SHIFT 0x03 +#define DA9055_BMEM_PD_DIS_MASK 0x08 +#define DA9055_VBMEM_SEL_SHIT 0x04 +#define DA9055_VBMEM_SEL_VBMEM_A (0<<4) +#define DA9055_VBMEM_SEL_VBMEM_B (1<<4) +#define DA9055_VBMEM_SEL_MASK 0x10 +#define DA9055_VBMEM_GPI_SHIFT 0x05 +#define DA9055_VBMEM_GPI_MASK 0x60 +#define DA9055_BMEM_CONF_SHIFT 0x07 +#define DA9055_BMEM_CONF_MASK 0x80 + +/* DA9055_REG_LDO_CONT (addr=0x15-0x1A) */ +#define DA9055_LDO_EN_SHIFT 0x00 +#define DA9055_LDO_EN_MASK 0x01 +#define DA9055_LDO_GPI_SHIFT 0x01 +#define DA9055_LDO_GPI_MASK 0x06 +#define DA9055_LDO_PD_DIS_SHIFT 0x03 +#define DA9055_LDO_PD_DIS_MASK 0x08 +#define DA9055_VLDO_SEL_SHIFT 0x04 +#define DA9055_VLDO_SEL_MASK 0x10 +#define DA9055_VLDO_SEL_VLDO_A 0x00 +#define DA9055_VLDO_SEL_VLDO_B 0x01 +#define DA9055_VLDO_GPI_SHIFT 0x05 +#define DA9055_VLDO_GPI_MASK 0x60 +#define DA9055_LDO_CONF_SHIFT 0x07 +#define DA9055_LDO_CONF_MASK 0x80 +#define DA9055_REGUALTOR_SET_A 0x00 +#define DA9055_REGUALTOR_SET_B 0x10 + +/* DA9055_REG_ADC_MAN (addr=0x1B) */ +#define DA9055_ADC_MUX_SHIFT 0 +#define DA9055_ADC_MUX_MASK 0xF +#define DA9055_ADC_MUX_VSYS 0x0 +#define DA9055_ADC_MUX_ADCIN1 0x01 +#define DA9055_ADC_MUX_ADCIN2 0x02 +#define DA9055_ADC_MUX_ADCIN3 0x03 +#define DA9055_ADC_MUX_T_SENSE 0x04 +#define DA9055_ADC_MAN_SHIFT 0x04 +#define DA9055_ADC_MAN_CONV 0x10 +#define DA9055_ADC_LSB_MASK 0X03 +#define DA9055_ADC_MODE_MASK 0x20 +#define DA9055_ADC_MODE_SHIFT 5 +#define DA9055_ADC_MODE_1MS (1<<5) +#define DA9055_COMP1V2_EN_SHIFT 7 + +/* DA9055_REG_ADC_CONT (addr=0x1C) */ +#define DA9055_ADC_AUTO_VSYS_EN_SHIFT 0 +#define DA9055_ADC_AUTO_AD1_EN_SHIFT 1 +#define DA9055_ADC_AUTO_AD2_EN_SHIFT 2 +#define DA9055_ADC_AUTO_AD3_EN_SHIFT 3 +#define DA9055_ADC_ISRC_EN_SHIFT 4 +#define DA9055_ADC_ADCIN1_DEB_SHIFT 5 +#define DA9055_ADC_ADCIN2_DEB_SHIFT 6 +#define DA9055_ADC_ADCIN3_DEB_SHIFT 7 +#define DA9055_AD1_ISRC_MASK 0x10 +#define DA9055_AD1_ISRC_SHIFT 4 + +/* DA9055_REG_VSYS_MON (addr=0x1D) */ +#define DA9055_VSYS_VAL_SHIFT 0 +#define DA9055_VSYS_VAL_MASK 0xFF +#define DA9055_VSYS_VAL_BASE 0x00 +#define DA9055_VSYS_VAL_MAX DA9055_VSYS_VAL_MASK +#define DA9055_VSYS_VOLT_BASE 2500 +#define DA9055_VSYS_VOLT_INC 10 +#define DA9055_VSYS_STEPS 255 +#define DA9055_VSYS_VOLT_MIN 2500 + +/* DA9044_REG_XXX_RES (addr=0x20-0x23) */ +#define DA9055_ADC_VAL_SHIFT 0 +#define DA9055_ADC_VAL_MASK 0xFF +#define DA9055_ADC_VAL_BASE 0x00 +#define DA9055_ADC_VAL_MAX DA9055_ADC_VAL_MASK +#define DA9055_ADC_VOLT_BASE 0 +#define DA9055_ADC_VSYS_VOLT_BASE 2500 +#define DA9055_ADC_VOLT_INC 10 +#define DA9055_ADC_VSYS_VOLT_INC 12 +#define DA9055_ADC_STEPS 255 + +/* DA9055_REG_EN_32K (addr=0x35)*/ +#define DA9055_STARTUP_TIME_MASK 0x07 +#define DA9055_STARTUP_TIME_0S 0x0 +#define DA9055_STARTUP_TIME_0_52S 0x1 +#define DA9055_STARTUP_TIME_1S 0x2 +#define DA9055_CRYSTAL_EN 0x08 +#define DA9055_DELAY_MODE_EN 0x10 +#define DA9055_OUT_CLCK_GATED 0x20 +#define DA9055_RTC_CLOCK_GATED 0x40 +#define DA9055_EN_32KOUT_BUF 0x80 + +/* DA9055_REG_RESET (addr=0x36) */ +/* Timer up to 31.744 ms */ +#define DA9055_RESET_TIMER_VAL_SHIFT 0 +#define DA9055_RESET_LOW_VAL_MASK 0x3F +#define DA9055_RESET_LOW_VAL_BASE 0 +#define DA9055_RESET_LOW_VAL_MAX DA9055_RESET_LOW_VAL_MASK +#define DA9055_RESET_US_LOW_BASE 1024 /* min val in units of us */ +#define DA9055_RESET_US_LOW_INC 1024 /* inc val in units of us */ +#define DA9055_RESET_US_LOW_STEP 30 + +/* Timer up to 1048.576ms */ +#define DA9055_RESET_HIGH_VAL_MASK 0x3F +#define DA9055_RESET_HIGH_VAL_BASE 0 +#define DA9055_RESET_HIGH_VAL_MAX DA9055_RESET_HIGH_VAL_MASK +#define DA9055_RESET_US_HIGH_BASE 32768 /* min val in units of us */ +#define DA9055_RESET_US_HIGH_INC 32768 /* inv val in units of us */ +#define DA9055_RESET_US_HIGH_STEP 31 + +/* DA9055_REG_BUCK_ILIM (addr=0x37)*/ +#define DA9055_BMEM_ILIM_SHIFT 0 +#define DA9055_ILIM_MASK 0x3 +#define DA9055_ILIM_500MA 0x0 +#define DA9055_ILIM_600MA 0x1 +#define DA9055_ILIM_700MA 0x2 +#define DA9055_ILIM_800MA 0x3 +#define DA9055_BCORE_ILIM_SHIFT 2 + +/* DA9055_REG_BCORE_MODE (addr=0x38) */ +#define DA9055_BMEM_MODE_SHIFT 0 +#define DA9055_MODE_MASK 0x3 +#define DA9055_MODE_AB 0x0 +#define DA9055_MODE_SLEEP 0x1 +#define DA9055_MODE_SYNCHRO 0x2 +#define DA9055_MODE_AUTO 0x3 +#define DA9055_BCORE_MODE_SHIFT 2 + +/* DA9055_REG_VBCORE_A/B (addr=0x39/0x41)*/ +#define DA9055_VBCORE_VAL_SHIFT 0 +#define DA9055_VBCORE_VAL_MASK 0x3F +#define DA9055_VBCORE_VAL_BASE 0x09 +#define DA9055_VBCORE_VAL_MAX DA9055_VBCORE_VAL_MASK +#define DA9055_VBCORE_VOLT_BASE 750 +#define DA9055_VBCORE_VOLT_INC 25 +#define DA9055_VBCORE_STEPS 53 +#define DA9055_VBCORE_VOLT_MIN DA9055_VBCORE_VOLT_BASE +#define DA9055_BCORE_SL_SYNCHRO (0<<7) +#define DA9055_BCORE_SL_SLEEP (1<<7) + +/* DA9055_REG_VBMEM_A/B (addr=0x3A/0x42)*/ +#define DA9055_VBMEM_VAL_SHIFT 0 +#define DA9055_VBMEM_VAL_MASK 0x3F +#define DA9055_VBMEM_VAL_BASE 0x00 +#define DA9055_VBMEM_VAL_MAX DA9055_VBMEM_VAL_MASK +#define DA9055_VBMEM_VOLT_BASE 925 +#define DA9055_VBMEM_VOLT_INC 25 +#define DA9055_VBMEM_STEPS 63 +#define DA9055_VBMEM_VOLT_MIN DA9055_VBMEM_VOLT_BASE +#define DA9055_BCMEM_SL_SYNCHRO (0<<7) +#define DA9055_BCMEM_SL_SLEEP (1<<7) + + +/* DA9055_REG_VLDO (addr=0x3B-0x40/0x43-0x48)*/ +#define DA9055_VLDO_VAL_SHIFT 0 +#define DA9055_VLDO_VAL_MASK 0x3F +#define DA9055_VLDO6_VAL_MASK 0x7F +#define DA9055_VLDO_VAL_BASE 0x02 +#define DA9055_VLDO2_VAL_BASE 0x03 +#define DA9055_VLDO6_VAL_BASE 0x00 +#define DA9055_VLDO_VAL_MAX DA9055_VLDO_VAL_MASK +#define DA9055_VLDO6_VAL_MAX DA9055_VLDO6_VAL_MASK +#define DA9055_VLDO_VOLT_BASE 900 +#define DA9055_VLDO_VOLT_INC 50 +#define DA9055_VLDO6_VOLT_INC 20 +#define DA9055_VLDO_STEPS 48 +#define DA9055_VLDO5_STEPS 37 +#define DA9055_VLDO6_STEPS 120 +#define DA9055_VLDO_VOLT_MIN DA9055_VLDO_VOLT_BASE +#define DA9055_LDO_MODE_SHIFT 7 +#define DA9055_LDO_SL_NORMAL 0 +#define DA9055_LDO_SL_SLEEP 1 + +/* DA9055_REG_OTP_CONT (addr=0x50) */ +#define DA9055_OTP_TIM_NORMAL (0<<0) +#define DA9055_OTP_TIM_MARGINAL (1<<0) +#define DA9055_OTP_GP_RD_SHIFT 1 +#define DA9055_OTP_APPS_RD_SHIFT 2 +#define DA9055_PC_DONE_SHIFT 3 +#define DA9055_OTP_GP_LOCK_SHIFT 4 +#define DA9055_OTP_APPS_LOCK_SHIFT 5 +#define DA9055_OTP_CONF_LOCK_SHIFT 6 +#define DA9055_OTP_WRITE_DIS_SHIFT 7 + +/* DA9055_REG_COUNT_S (addr=0x53) */ +#define DA9055_RTC_SEC 0x3F +#define DA9055_RTC_MONITOR_EN 0x40 +#define DA9055_RTC_READ 0x80 + +/* DA9055_REG_COUNT_MI (addr=0x54) */ +#define DA9055_RTC_MIN 0x3F + +/* DA9055_REG_COUNT_H (addr=0x55) */ +#define DA9055_RTC_HOUR 0x1F + +/* DA9055_REG_COUNT_D (addr=0x56) */ +#define DA9055_RTC_DAY 0x1F + +/* DA9055_REG_COUNT_MO (addr=0x57) */ +#define DA9055_RTC_MONTH 0x0F + +/* DA9055_REG_COUNT_Y (addr=0x58) */ +#define DA9055_RTC_YEAR 0x3F +#define DA9055_RTC_YEAR_BASE 2000 + +/* DA9055_REG_ALARM_MI (addr=0x59) */ +#define DA9055_RTC_ALM_MIN 0x3F +#define DA9055_ALARM_STATUS_SHIFT 6 +#define DA9055_ALARM_STATUS_MASK 0x3 +#define DA9055_ALARM_STATUS_NO_ALARM 0x0 +#define DA9055_ALARM_STATUS_TICK 0x1 +#define DA9055_ALARM_STATUS_TIMER_ALARM 0x2 +#define DA9055_ALARM_STATUS_BOTH 0x3 + +/* DA9055_REG_ALARM_H (addr=0x5A) */ +#define DA9055_RTC_ALM_HOUR 0x1F + +/* DA9055_REG_ALARM_D (addr=0x5B) */ +#define DA9055_RTC_ALM_DAY 0x1F + +/* DA9055_REG_ALARM_MO (addr=0x5C) */ +#define DA9055_RTC_ALM_MONTH 0x0F +#define DA9055_RTC_TICK_WAKE_MASK 0x20 +#define DA9055_RTC_TICK_WAKE_SHIFT 5 +#define DA9055_RTC_TICK_TYPE 0x10 +#define DA9055_RTC_TICK_TYPE_SHIFT 0x4 +#define DA9055_RTC_TICK_SEC 0x0 +#define DA9055_RTC_TICK_MIN 0x1 +#define DA9055_ALARAM_TICK_WAKE 0x20 + +/* DA9055_REG_ALARM_Y (addr=0x5D) */ +#define DA9055_RTC_TICK_EN 0x80 +#define DA9055_RTC_ALM_EN 0x40 +#define DA9055_RTC_TICK_ALM_MASK 0xC0 +#define DA9055_RTC_ALM_YEAR 0x3F + +/* DA9055_REG_TRIM_CLDR (addr=0x62) */ +#define DA9055_TRIM_32K_SHIFT 0 +#define DA9055_TRIM_32K_MASK 0x7F +#define DA9055_TRIM_DECREMENT (1<<7) +#define DA9055_TRIM_INCREMENT (0<<7) +#define DA9055_TRIM_VAL_BASE 0x0 +#define DA9055_TRIM_PPM_BASE 0x0 /* min val in units of 0.1PPM */ +#define DA9055_TRIM_PPM_INC 19 /* min inc in units of 0.1PPM */ +#define DA9055_TRIM_STEPS 127 + +/* DA9055_REG_CONFIG_A (addr=0x65) */ +#define DA9055_PM_I_V_VDDCORE (0<<0) +#define DA9055_PM_I_V_VDD_IO (1<<0) +#define DA9055_VDD_FAULT_TYPE_ACT_LOW (0<<1) +#define DA9055_VDD_FAULT_TYPE_ACT_HIGH (1<<1) +#define DA9055_PM_O_TYPE_PUSH_PULL (0<<2) +#define DA9055_PM_O_TYPE_OPEN_DRAIN (1<<2) +#define DA9055_IRQ_TYPE_ACT_LOW (0<<3) +#define DA9055_IRQ_TYPE_ACT_HIGH (1<<3) +#define DA9055_NIRQ_MODE_IMM (0<<4) +#define DA9055_NIRQ_MODE_ACTIVE (1<<4) +#define DA9055_GPI_V_VDDCORE (0<<5) +#define DA9055_GPI_V_VDD_IO (1<<5) +#define DA9055_PM_IF_V_VDDCORE (0<<6) +#define DA9055_PM_IF_V_VDD_IO (1<<6) + +/* DA9055_REG_CONFIG_B (addr=0x66) */ +#define DA9055_VDD_FAULT_VAL_SHIFT 0 +#define DA9055_VDD_FAULT_VAL_MASK 0xF +#define DA9055_VDD_FAULT_VAL_BASE 0x0 +#define DA9055_VDD_FAULT_VAL_MAX DA9055_VDD_FAULT_VAL_MASK +#define DA9055_VDD_FAULT_VOLT_BASE 2500 +#define DA9055_VDD_FAULT_VOLT_INC 50 +#define DA9055_VDD_FAULT_STEPS 15 + +#define DA9055_VDD_HYST_VAL_SHIFT 4 +#define DA9055_VDD_HYST_VAL_MASK 0x7 +#define DA9055_VDD_HYST_VAL_BASE 0x0 +#define DA9055_VDD_HYST_VAL_MAX DA9055_VDD_HYST_VAL_MASK +#define DA9055_VDD_HYST_VOLT_BASE 100 +#define DA9055_VDD_HYST_VOLT_INC 50 +#define DA9055_VDD_HYST_STEPS 7 +#define DA9055_VDD_HYST_VOLT_MIN DA9055_VDD_HYST_VOLT_BASE + +#define DA9055_VDD_FAULT_EN_SHIFT 7 + +/* DA9055_REG_CONFIG_C (addr=0x67) */ +#define DA9055_BCORE_CLK_INV_SHIFT 0 +#define DA9055_BMEM_CLK_INV_SHIFT 1 +#define DA9055_NFAULT_CONF_SHIFT 2 +#define DA9055_LDO_SD_SHIFT 4 +#define DA9055_LDO5_BYP_SHIFT 6 +#define DA9055_LDO6_BYP_SHIFT 7 + +/* DA9055_REG_CONFIG_D (addr=0x68) */ +#define DA9055_NONKEY_PIN_SHIFT 0 +#define DA9055_NONKEY_PIN_MASK 0x3 +#define DA9055_NONKEY_PIN_PORT_MODE 0x0 +#define DA9055_NONKEY_PIN_KEY_MODE 0x1 +#define DA9055_NONKEY_PIN_MULTI_FUNC 0x2 +#define DA9055_NONKEY_PIN_DEDICT 0x3 +#define DA9055_NONKEY_SD_SHIFT 2 +#define DA9055_KEY_DELAY_SHIFT 3 +#define DA9055_KEY_DELAY_MASK 0x3 +#define DA9055_KEY_DELAY_4S 0x0 +#define DA9055_KEY_DELAY_6S 0x1 +#define DA9055_KEY_DELAY_8S 0x2 +#define DA9055_KEY_DELAY_10S 0x3 + +/* DA9055_REG_CONFIG_E (addr=0x69) */ +#define DA9055_GPIO_PUPD_PULL_UP 0x0 +#define DA9055_GPIO_PUPD_OPEN_DRAIN 0x1 +#define DA9055_GPIO0_PUPD_SHIFT 0 +#define DA9055_GPIO1_PUPD_SHIFT 1 +#define DA9055_GPIO2_PUPD_SHIFT 2 +#define DA9055_UVOV_DELAY_SHIFT 4 +#define DA9055_UVOV_DELAY_MASK 0x3 +#define DA9055_RESET_DURATION_SHIFT 6 +#define DA9055_RESET_DURATION_MASK 0x3 +#define DA9055_RESET_DURATION_0MS 0x0 +#define DA9055_RESET_DURATION_100MS 0x1 +#define DA9055_RESET_DURATION_500MS 0x2 +#define DA9055_RESET_DURATION_1000MS 0x3 + +/* DA9055_REG_MON_REG_1 (addr=0x6A) */ +#define DA9055_MON_THRES_SHIFT 0 +#define DA9055_MON_THRES_MASK 0x3 +#define DA9055_MON_RES_SHIFT 2 +#define DA9055_MON_DEB_SHIFT 3 +#define DA9055_MON_MODE_SHIFT 4 +#define DA9055_MON_MODE_MASK 0x3 +#define DA9055_START_MAX_SHIFT 6 +#define DA9055_START_MAX_MASK 0x3 + +/* DA9055_REG_MON_REG_2 (addr=0x6B) */ +#define DA9055_LDO1_MON_EN_SHIFT 0 +#define DA9055_LDO2_MON_EN_SHIFT 1 +#define DA9055_LDO3_MON_EN_SHIFT 2 +#define DA9055_LDO4_MON_EN_SHIFT 3 +#define DA9055_LDO5_MON_EN_SHIFT 4 +#define DA9055_LDO6_MON_EN_SHIFT 5 +#define DA9055_BCORE_MON_EN_SHIFT 6 +#define DA9055_BMEM_MON_EN_SHIFT 7 + +/* DA9055_REG_CONFIG_F (addr=0x6C) */ +#define DA9055_LDO1_DEF_SHIFT 0 +#define DA9055_LDO2_DEF_SHIFT 1 +#define DA9055_LDO3_DEF_SHIFT 2 +#define DA9055_LDO4_DEF_SHIFT 3 +#define DA9055_LDO5_DEF_SHIFT 4 +#define DA9055_LDO6_DEF_SHIFT 5 +#define DA9055_BCORE_DEF_SHIFT 6 +#define DA9055_BMEM_DEF_SHIFT 7 + +/* DA9055_REG_MON_REG_4 (addr=0x6D) */ +#define DA9055_MON_A8_IDX_SHIFT 0 +#define DA9055_MON_A89_IDX_MASK 0x3 +#define DA9055_MON_A89_IDX_NONE 0x0 +#define DA9055_MON_A89_IDX_BUCKCORE 0x1 +#define DA9055_MON_A89_IDX_LDO3 0x2 +#define DA9055_MON_A9_IDX_SHIFT 5 + +/* DA9055_REG_MON_REG_5 (addr=0x6E) */ +#define DA9055_MON_A10_IDX_SHIFT 0 +#define DA9055_MON_A10_IDX_MASK 0x3 +#define DA9055_MON_A10_IDX_NONE 0x0 +#define DA9055_MON_A10_IDX_LDO1 0x1 +#define DA9055_MON_A10_IDX_LDO2 0x2 +#define DA9055_MON_A10_IDX_LDO5 0x3 +#define DA9055_MON_A10_IDX_LDO6 0x4 + +#endif /* __DA9055_REG_H */ diff --git a/include/linux/mfd/da9062/core.h b/include/linux/mfd/da9062/core.h new file mode 100644 index 000000000..74d33a01d --- /dev/null +++ b/include/linux/mfd/da9062/core.h @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2015-2017 Dialog Semiconductor + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MFD_DA9062_CORE_H__ +#define __MFD_DA9062_CORE_H__ + +#include +#include + +enum da9062_compatible_types { + COMPAT_TYPE_DA9061 = 1, + COMPAT_TYPE_DA9062, +}; + +enum da9061_irqs { + /* IRQ A */ + DA9061_IRQ_ONKEY, + DA9061_IRQ_WDG_WARN, + DA9061_IRQ_SEQ_RDY, + /* IRQ B*/ + DA9061_IRQ_TEMP, + DA9061_IRQ_LDO_LIM, + DA9061_IRQ_DVC_RDY, + DA9061_IRQ_VDD_WARN, + /* IRQ C */ + DA9061_IRQ_GPI0, + DA9061_IRQ_GPI1, + DA9061_IRQ_GPI2, + DA9061_IRQ_GPI3, + DA9061_IRQ_GPI4, + + DA9061_NUM_IRQ, +}; + +enum da9062_irqs { + /* IRQ A */ + DA9062_IRQ_ONKEY, + DA9062_IRQ_ALARM, + DA9062_IRQ_TICK, + DA9062_IRQ_WDG_WARN, + DA9062_IRQ_SEQ_RDY, + /* IRQ B*/ + DA9062_IRQ_TEMP, + DA9062_IRQ_LDO_LIM, + DA9062_IRQ_DVC_RDY, + DA9062_IRQ_VDD_WARN, + /* IRQ C */ + DA9062_IRQ_GPI0, + DA9062_IRQ_GPI1, + DA9062_IRQ_GPI2, + DA9062_IRQ_GPI3, + DA9062_IRQ_GPI4, + + DA9062_NUM_IRQ, +}; + +struct da9062 { + struct device *dev; + struct regmap *regmap; + struct regmap_irq_chip_data *regmap_irq; + enum da9062_compatible_types chip_type; +}; + +#endif /* __MFD_DA9062_CORE_H__ */ diff --git a/include/linux/mfd/da9062/registers.h b/include/linux/mfd/da9062/registers.h new file mode 100644 index 000000000..18d576aed --- /dev/null +++ b/include/linux/mfd/da9062/registers.h @@ -0,0 +1,1109 @@ +/* + * Copyright (C) 2015-2017 Dialog Semiconductor + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __DA9062_H__ +#define __DA9062_H__ + +#define DA9062_PMIC_DEVICE_ID 0x62 +#define DA9062_PMIC_VARIANT_MRC_AA 0x01 +#define DA9062_PMIC_VARIANT_VRC_DA9061 0x01 +#define DA9062_PMIC_VARIANT_VRC_DA9062 0x02 + +#define DA9062_I2C_PAGE_SEL_SHIFT 1 + +/* + * Registers + */ + +#define DA9062AA_PAGE_CON 0x000 +#define DA9062AA_STATUS_A 0x001 +#define DA9062AA_STATUS_B 0x002 +#define DA9062AA_STATUS_D 0x004 +#define DA9062AA_FAULT_LOG 0x005 +#define DA9062AA_EVENT_A 0x006 +#define DA9062AA_EVENT_B 0x007 +#define DA9062AA_EVENT_C 0x008 +#define DA9062AA_IRQ_MASK_A 0x00A +#define DA9062AA_IRQ_MASK_B 0x00B +#define DA9062AA_IRQ_MASK_C 0x00C +#define DA9062AA_CONTROL_A 0x00E +#define DA9062AA_CONTROL_B 0x00F +#define DA9062AA_CONTROL_C 0x010 +#define DA9062AA_CONTROL_D 0x011 +#define DA9062AA_CONTROL_E 0x012 +#define DA9062AA_CONTROL_F 0x013 +#define DA9062AA_PD_DIS 0x014 +#define DA9062AA_GPIO_0_1 0x015 +#define DA9062AA_GPIO_2_3 0x016 +#define DA9062AA_GPIO_4 0x017 +#define DA9062AA_GPIO_WKUP_MODE 0x01C +#define DA9062AA_GPIO_MODE0_4 0x01D +#define DA9062AA_GPIO_OUT0_2 0x01E +#define DA9062AA_GPIO_OUT3_4 0x01F +#define DA9062AA_BUCK2_CONT 0x020 +#define DA9062AA_BUCK1_CONT 0x021 +#define DA9062AA_BUCK4_CONT 0x022 +#define DA9062AA_BUCK3_CONT 0x024 +#define DA9062AA_LDO1_CONT 0x026 +#define DA9062AA_LDO2_CONT 0x027 +#define DA9062AA_LDO3_CONT 0x028 +#define DA9062AA_LDO4_CONT 0x029 +#define DA9062AA_DVC_1 0x032 +#define DA9062AA_COUNT_S 0x040 +#define DA9062AA_COUNT_MI 0x041 +#define DA9062AA_COUNT_H 0x042 +#define DA9062AA_COUNT_D 0x043 +#define DA9062AA_COUNT_MO 0x044 +#define DA9062AA_COUNT_Y 0x045 +#define DA9062AA_ALARM_S 0x046 +#define DA9062AA_ALARM_MI 0x047 +#define DA9062AA_ALARM_H 0x048 +#define DA9062AA_ALARM_D 0x049 +#define DA9062AA_ALARM_MO 0x04A +#define DA9062AA_ALARM_Y 0x04B +#define DA9062AA_SECOND_A 0x04C +#define DA9062AA_SECOND_B 0x04D +#define DA9062AA_SECOND_C 0x04E +#define DA9062AA_SECOND_D 0x04F +#define DA9062AA_SEQ 0x081 +#define DA9062AA_SEQ_TIMER 0x082 +#define DA9062AA_ID_2_1 0x083 +#define DA9062AA_ID_4_3 0x084 +#define DA9062AA_ID_12_11 0x088 +#define DA9062AA_ID_14_13 0x089 +#define DA9062AA_ID_16_15 0x08A +#define DA9062AA_ID_22_21 0x08D +#define DA9062AA_ID_24_23 0x08E +#define DA9062AA_ID_26_25 0x08F +#define DA9062AA_ID_28_27 0x090 +#define DA9062AA_ID_30_29 0x091 +#define DA9062AA_ID_32_31 0x092 +#define DA9062AA_SEQ_A 0x095 +#define DA9062AA_SEQ_B 0x096 +#define DA9062AA_WAIT 0x097 +#define DA9062AA_EN_32K 0x098 +#define DA9062AA_RESET 0x099 +#define DA9062AA_BUCK_ILIM_A 0x09A +#define DA9062AA_BUCK_ILIM_B 0x09B +#define DA9062AA_BUCK_ILIM_C 0x09C +#define DA9062AA_BUCK2_CFG 0x09D +#define DA9062AA_BUCK1_CFG 0x09E +#define DA9062AA_BUCK4_CFG 0x09F +#define DA9062AA_BUCK3_CFG 0x0A0 +#define DA9062AA_VBUCK2_A 0x0A3 +#define DA9062AA_VBUCK1_A 0x0A4 +#define DA9062AA_VBUCK4_A 0x0A5 +#define DA9062AA_VBUCK3_A 0x0A7 +#define DA9062AA_VLDO1_A 0x0A9 +#define DA9062AA_VLDO2_A 0x0AA +#define DA9062AA_VLDO3_A 0x0AB +#define DA9062AA_VLDO4_A 0x0AC +#define DA9062AA_VBUCK2_B 0x0B4 +#define DA9062AA_VBUCK1_B 0x0B5 +#define DA9062AA_VBUCK4_B 0x0B6 +#define DA9062AA_VBUCK3_B 0x0B8 +#define DA9062AA_VLDO1_B 0x0BA +#define DA9062AA_VLDO2_B 0x0BB +#define DA9062AA_VLDO3_B 0x0BC +#define DA9062AA_VLDO4_B 0x0BD +#define DA9062AA_BBAT_CONT 0x0C5 +#define DA9062AA_INTERFACE 0x105 +#define DA9062AA_CONFIG_A 0x106 +#define DA9062AA_CONFIG_B 0x107 +#define DA9062AA_CONFIG_C 0x108 +#define DA9062AA_CONFIG_D 0x109 +#define DA9062AA_CONFIG_E 0x10A +#define DA9062AA_CONFIG_G 0x10C +#define DA9062AA_CONFIG_H 0x10D +#define DA9062AA_CONFIG_I 0x10E +#define DA9062AA_CONFIG_J 0x10F +#define DA9062AA_CONFIG_K 0x110 +#define DA9062AA_CONFIG_M 0x112 +#define DA9062AA_TRIM_CLDR 0x120 +#define DA9062AA_GP_ID_0 0x121 +#define DA9062AA_GP_ID_1 0x122 +#define DA9062AA_GP_ID_2 0x123 +#define DA9062AA_GP_ID_3 0x124 +#define DA9062AA_GP_ID_4 0x125 +#define DA9062AA_GP_ID_5 0x126 +#define DA9062AA_GP_ID_6 0x127 +#define DA9062AA_GP_ID_7 0x128 +#define DA9062AA_GP_ID_8 0x129 +#define DA9062AA_GP_ID_9 0x12A +#define DA9062AA_GP_ID_10 0x12B +#define DA9062AA_GP_ID_11 0x12C +#define DA9062AA_GP_ID_12 0x12D +#define DA9062AA_GP_ID_13 0x12E +#define DA9062AA_GP_ID_14 0x12F +#define DA9062AA_GP_ID_15 0x130 +#define DA9062AA_GP_ID_16 0x131 +#define DA9062AA_GP_ID_17 0x132 +#define DA9062AA_GP_ID_18 0x133 +#define DA9062AA_GP_ID_19 0x134 +#define DA9062AA_DEVICE_ID 0x181 +#define DA9062AA_VARIANT_ID 0x182 +#define DA9062AA_CUSTOMER_ID 0x183 +#define DA9062AA_CONFIG_ID 0x184 + +/* + * Bit fields + */ + +/* DA9062AA_PAGE_CON = 0x000 */ +#define DA9062AA_PAGE_SHIFT 0 +#define DA9062AA_PAGE_MASK 0x3f +#define DA9062AA_WRITE_MODE_SHIFT 6 +#define DA9062AA_WRITE_MODE_MASK BIT(6) +#define DA9062AA_REVERT_SHIFT 7 +#define DA9062AA_REVERT_MASK BIT(7) + +/* DA9062AA_STATUS_A = 0x001 */ +#define DA9062AA_NONKEY_SHIFT 0 +#define DA9062AA_NONKEY_MASK 0x01 +#define DA9062AA_DVC_BUSY_SHIFT 2 +#define DA9062AA_DVC_BUSY_MASK BIT(2) + +/* DA9062AA_STATUS_B = 0x002 */ +#define DA9062AA_GPI0_SHIFT 0 +#define DA9062AA_GPI0_MASK 0x01 +#define DA9062AA_GPI1_SHIFT 1 +#define DA9062AA_GPI1_MASK BIT(1) +#define DA9062AA_GPI2_SHIFT 2 +#define DA9062AA_GPI2_MASK BIT(2) +#define DA9062AA_GPI3_SHIFT 3 +#define DA9062AA_GPI3_MASK BIT(3) +#define DA9062AA_GPI4_SHIFT 4 +#define DA9062AA_GPI4_MASK BIT(4) + +/* DA9062AA_STATUS_D = 0x004 */ +#define DA9062AA_LDO1_ILIM_SHIFT 0 +#define DA9062AA_LDO1_ILIM_MASK 0x01 +#define DA9062AA_LDO2_ILIM_SHIFT 1 +#define DA9062AA_LDO2_ILIM_MASK BIT(1) +#define DA9062AA_LDO3_ILIM_SHIFT 2 +#define DA9062AA_LDO3_ILIM_MASK BIT(2) +#define DA9062AA_LDO4_ILIM_SHIFT 3 +#define DA9062AA_LDO4_ILIM_MASK BIT(3) + +/* DA9062AA_FAULT_LOG = 0x005 */ +#define DA9062AA_TWD_ERROR_SHIFT 0 +#define DA9062AA_TWD_ERROR_MASK 0x01 +#define DA9062AA_POR_SHIFT 1 +#define DA9062AA_POR_MASK BIT(1) +#define DA9062AA_VDD_FAULT_SHIFT 2 +#define DA9062AA_VDD_FAULT_MASK BIT(2) +#define DA9062AA_VDD_START_SHIFT 3 +#define DA9062AA_VDD_START_MASK BIT(3) +#define DA9062AA_TEMP_CRIT_SHIFT 4 +#define DA9062AA_TEMP_CRIT_MASK BIT(4) +#define DA9062AA_KEY_RESET_SHIFT 5 +#define DA9062AA_KEY_RESET_MASK BIT(5) +#define DA9062AA_NSHUTDOWN_SHIFT 6 +#define DA9062AA_NSHUTDOWN_MASK BIT(6) +#define DA9062AA_WAIT_SHUT_SHIFT 7 +#define DA9062AA_WAIT_SHUT_MASK BIT(7) + +/* DA9062AA_EVENT_A = 0x006 */ +#define DA9062AA_E_NONKEY_SHIFT 0 +#define DA9062AA_E_NONKEY_MASK 0x01 +#define DA9062AA_E_ALARM_SHIFT 1 +#define DA9062AA_E_ALARM_MASK BIT(1) +#define DA9062AA_E_TICK_SHIFT 2 +#define DA9062AA_E_TICK_MASK BIT(2) +#define DA9062AA_E_WDG_WARN_SHIFT 3 +#define DA9062AA_E_WDG_WARN_MASK BIT(3) +#define DA9062AA_E_SEQ_RDY_SHIFT 4 +#define DA9062AA_E_SEQ_RDY_MASK BIT(4) +#define DA9062AA_EVENTS_B_SHIFT 5 +#define DA9062AA_EVENTS_B_MASK BIT(5) +#define DA9062AA_EVENTS_C_SHIFT 6 +#define DA9062AA_EVENTS_C_MASK BIT(6) + +/* DA9062AA_EVENT_B = 0x007 */ +#define DA9062AA_E_TEMP_SHIFT 1 +#define DA9062AA_E_TEMP_MASK BIT(1) +#define DA9062AA_E_LDO_LIM_SHIFT 3 +#define DA9062AA_E_LDO_LIM_MASK BIT(3) +#define DA9062AA_E_DVC_RDY_SHIFT 5 +#define DA9062AA_E_DVC_RDY_MASK BIT(5) +#define DA9062AA_E_VDD_WARN_SHIFT 7 +#define DA9062AA_E_VDD_WARN_MASK BIT(7) + +/* DA9062AA_EVENT_C = 0x008 */ +#define DA9062AA_E_GPI0_SHIFT 0 +#define DA9062AA_E_GPI0_MASK 0x01 +#define DA9062AA_E_GPI1_SHIFT 1 +#define DA9062AA_E_GPI1_MASK BIT(1) +#define DA9062AA_E_GPI2_SHIFT 2 +#define DA9062AA_E_GPI2_MASK BIT(2) +#define DA9062AA_E_GPI3_SHIFT 3 +#define DA9062AA_E_GPI3_MASK BIT(3) +#define DA9062AA_E_GPI4_SHIFT 4 +#define DA9062AA_E_GPI4_MASK BIT(4) + +/* DA9062AA_IRQ_MASK_A = 0x00A */ +#define DA9062AA_M_NONKEY_SHIFT 0 +#define DA9062AA_M_NONKEY_MASK 0x01 +#define DA9062AA_M_ALARM_SHIFT 1 +#define DA9062AA_M_ALARM_MASK BIT(1) +#define DA9062AA_M_TICK_SHIFT 2 +#define DA9062AA_M_TICK_MASK BIT(2) +#define DA9062AA_M_WDG_WARN_SHIFT 3 +#define DA9062AA_M_WDG_WARN_MASK BIT(3) +#define DA9062AA_M_SEQ_RDY_SHIFT 4 +#define DA9062AA_M_SEQ_RDY_MASK BIT(4) + +/* DA9062AA_IRQ_MASK_B = 0x00B */ +#define DA9062AA_M_TEMP_SHIFT 1 +#define DA9062AA_M_TEMP_MASK BIT(1) +#define DA9062AA_M_LDO_LIM_SHIFT 3 +#define DA9062AA_M_LDO_LIM_MASK BIT(3) +#define DA9062AA_M_DVC_RDY_SHIFT 5 +#define DA9062AA_M_DVC_RDY_MASK BIT(5) +#define DA9062AA_M_VDD_WARN_SHIFT 7 +#define DA9062AA_M_VDD_WARN_MASK BIT(7) + +/* DA9062AA_IRQ_MASK_C = 0x00C */ +#define DA9062AA_M_GPI0_SHIFT 0 +#define DA9062AA_M_GPI0_MASK 0x01 +#define DA9062AA_M_GPI1_SHIFT 1 +#define DA9062AA_M_GPI1_MASK BIT(1) +#define DA9062AA_M_GPI2_SHIFT 2 +#define DA9062AA_M_GPI2_MASK BIT(2) +#define DA9062AA_M_GPI3_SHIFT 3 +#define DA9062AA_M_GPI3_MASK BIT(3) +#define DA9062AA_M_GPI4_SHIFT 4 +#define DA9062AA_M_GPI4_MASK BIT(4) + +/* DA9062AA_CONTROL_A = 0x00E */ +#define DA9062AA_SYSTEM_EN_SHIFT 0 +#define DA9062AA_SYSTEM_EN_MASK 0x01 +#define DA9062AA_POWER_EN_SHIFT 1 +#define DA9062AA_POWER_EN_MASK BIT(1) +#define DA9062AA_POWER1_EN_SHIFT 2 +#define DA9062AA_POWER1_EN_MASK BIT(2) +#define DA9062AA_STANDBY_SHIFT 3 +#define DA9062AA_STANDBY_MASK BIT(3) +#define DA9062AA_M_SYSTEM_EN_SHIFT 4 +#define DA9062AA_M_SYSTEM_EN_MASK BIT(4) +#define DA9062AA_M_POWER_EN_SHIFT 5 +#define DA9062AA_M_POWER_EN_MASK BIT(5) +#define DA9062AA_M_POWER1_EN_SHIFT 6 +#define DA9062AA_M_POWER1_EN_MASK BIT(6) + +/* DA9062AA_CONTROL_B = 0x00F */ +#define DA9062AA_WATCHDOG_PD_SHIFT 1 +#define DA9062AA_WATCHDOG_PD_MASK BIT(1) +#define DA9062AA_FREEZE_EN_SHIFT 2 +#define DA9062AA_FREEZE_EN_MASK BIT(2) +#define DA9062AA_NRES_MODE_SHIFT 3 +#define DA9062AA_NRES_MODE_MASK BIT(3) +#define DA9062AA_NONKEY_LOCK_SHIFT 4 +#define DA9062AA_NONKEY_LOCK_MASK BIT(4) +#define DA9062AA_NFREEZE_SHIFT 5 +#define DA9062AA_NFREEZE_MASK (0x03 << 5) +#define DA9062AA_BUCK_SLOWSTART_SHIFT 7 +#define DA9062AA_BUCK_SLOWSTART_MASK BIT(7) + +/* DA9062AA_CONTROL_C = 0x010 */ +#define DA9062AA_DEBOUNCING_SHIFT 0 +#define DA9062AA_DEBOUNCING_MASK 0x07 +#define DA9062AA_AUTO_BOOT_SHIFT 3 +#define DA9062AA_AUTO_BOOT_MASK BIT(3) +#define DA9062AA_OTPREAD_EN_SHIFT 4 +#define DA9062AA_OTPREAD_EN_MASK BIT(4) +#define DA9062AA_SLEW_RATE_SHIFT 5 +#define DA9062AA_SLEW_RATE_MASK (0x03 << 5) +#define DA9062AA_DEF_SUPPLY_SHIFT 7 +#define DA9062AA_DEF_SUPPLY_MASK BIT(7) + +/* DA9062AA_CONTROL_D = 0x011 */ +#define DA9062AA_TWDSCALE_SHIFT 0 +#define DA9062AA_TWDSCALE_MASK 0x07 + +/* DA9062AA_CONTROL_E = 0x012 */ +#define DA9062AA_RTC_MODE_PD_SHIFT 0 +#define DA9062AA_RTC_MODE_PD_MASK 0x01 +#define DA9062AA_RTC_MODE_SD_SHIFT 1 +#define DA9062AA_RTC_MODE_SD_MASK BIT(1) +#define DA9062AA_RTC_EN_SHIFT 2 +#define DA9062AA_RTC_EN_MASK BIT(2) +#define DA9062AA_V_LOCK_SHIFT 7 +#define DA9062AA_V_LOCK_MASK BIT(7) + +/* DA9062AA_CONTROL_F = 0x013 */ +#define DA9062AA_WATCHDOG_SHIFT 0 +#define DA9062AA_WATCHDOG_MASK 0x01 +#define DA9062AA_SHUTDOWN_SHIFT 1 +#define DA9062AA_SHUTDOWN_MASK BIT(1) +#define DA9062AA_WAKE_UP_SHIFT 2 +#define DA9062AA_WAKE_UP_MASK BIT(2) + +/* DA9062AA_PD_DIS = 0x014 */ +#define DA9062AA_GPI_DIS_SHIFT 0 +#define DA9062AA_GPI_DIS_MASK 0x01 +#define DA9062AA_PMIF_DIS_SHIFT 2 +#define DA9062AA_PMIF_DIS_MASK BIT(2) +#define DA9062AA_CLDR_PAUSE_SHIFT 4 +#define DA9062AA_CLDR_PAUSE_MASK BIT(4) +#define DA9062AA_BBAT_DIS_SHIFT 5 +#define DA9062AA_BBAT_DIS_MASK BIT(5) +#define DA9062AA_OUT32K_PAUSE_SHIFT 6 +#define DA9062AA_OUT32K_PAUSE_MASK BIT(6) +#define DA9062AA_PMCONT_DIS_SHIFT 7 +#define DA9062AA_PMCONT_DIS_MASK BIT(7) + +/* DA9062AA_GPIO_0_1 = 0x015 */ +#define DA9062AA_GPIO0_PIN_SHIFT 0 +#define DA9062AA_GPIO0_PIN_MASK 0x03 +#define DA9062AA_GPIO0_TYPE_SHIFT 2 +#define DA9062AA_GPIO0_TYPE_MASK BIT(2) +#define DA9062AA_GPIO0_WEN_SHIFT 3 +#define DA9062AA_GPIO0_WEN_MASK BIT(3) +#define DA9062AA_GPIO1_PIN_SHIFT 4 +#define DA9062AA_GPIO1_PIN_MASK (0x03 << 4) +#define DA9062AA_GPIO1_TYPE_SHIFT 6 +#define DA9062AA_GPIO1_TYPE_MASK BIT(6) +#define DA9062AA_GPIO1_WEN_SHIFT 7 +#define DA9062AA_GPIO1_WEN_MASK BIT(7) + +/* DA9062AA_GPIO_2_3 = 0x016 */ +#define DA9062AA_GPIO2_PIN_SHIFT 0 +#define DA9062AA_GPIO2_PIN_MASK 0x03 +#define DA9062AA_GPIO2_TYPE_SHIFT 2 +#define DA9062AA_GPIO2_TYPE_MASK BIT(2) +#define DA9062AA_GPIO2_WEN_SHIFT 3 +#define DA9062AA_GPIO2_WEN_MASK BIT(3) +#define DA9062AA_GPIO3_PIN_SHIFT 4 +#define DA9062AA_GPIO3_PIN_MASK (0x03 << 4) +#define DA9062AA_GPIO3_TYPE_SHIFT 6 +#define DA9062AA_GPIO3_TYPE_MASK BIT(6) +#define DA9062AA_GPIO3_WEN_SHIFT 7 +#define DA9062AA_GPIO3_WEN_MASK BIT(7) + +/* DA9062AA_GPIO_4 = 0x017 */ +#define DA9062AA_GPIO4_PIN_SHIFT 0 +#define DA9062AA_GPIO4_PIN_MASK 0x03 +#define DA9062AA_GPIO4_TYPE_SHIFT 2 +#define DA9062AA_GPIO4_TYPE_MASK BIT(2) +#define DA9062AA_GPIO4_WEN_SHIFT 3 +#define DA9062AA_GPIO4_WEN_MASK BIT(3) + +/* DA9062AA_GPIO_WKUP_MODE = 0x01C */ +#define DA9062AA_GPIO0_WKUP_MODE_SHIFT 0 +#define DA9062AA_GPIO0_WKUP_MODE_MASK 0x01 +#define DA9062AA_GPIO1_WKUP_MODE_SHIFT 1 +#define DA9062AA_GPIO1_WKUP_MODE_MASK BIT(1) +#define DA9062AA_GPIO2_WKUP_MODE_SHIFT 2 +#define DA9062AA_GPIO2_WKUP_MODE_MASK BIT(2) +#define DA9062AA_GPIO3_WKUP_MODE_SHIFT 3 +#define DA9062AA_GPIO3_WKUP_MODE_MASK BIT(3) +#define DA9062AA_GPIO4_WKUP_MODE_SHIFT 4 +#define DA9062AA_GPIO4_WKUP_MODE_MASK BIT(4) + +/* DA9062AA_GPIO_MODE0_4 = 0x01D */ +#define DA9062AA_GPIO0_MODE_SHIFT 0 +#define DA9062AA_GPIO0_MODE_MASK 0x01 +#define DA9062AA_GPIO1_MODE_SHIFT 1 +#define DA9062AA_GPIO1_MODE_MASK BIT(1) +#define DA9062AA_GPIO2_MODE_SHIFT 2 +#define DA9062AA_GPIO2_MODE_MASK BIT(2) +#define DA9062AA_GPIO3_MODE_SHIFT 3 +#define DA9062AA_GPIO3_MODE_MASK BIT(3) +#define DA9062AA_GPIO4_MODE_SHIFT 4 +#define DA9062AA_GPIO4_MODE_MASK BIT(4) + +/* DA9062AA_GPIO_OUT0_2 = 0x01E */ +#define DA9062AA_GPIO0_OUT_SHIFT 0 +#define DA9062AA_GPIO0_OUT_MASK 0x07 +#define DA9062AA_GPIO1_OUT_SHIFT 3 +#define DA9062AA_GPIO1_OUT_MASK (0x07 << 3) +#define DA9062AA_GPIO2_OUT_SHIFT 6 +#define DA9062AA_GPIO2_OUT_MASK (0x03 << 6) + +/* DA9062AA_GPIO_OUT3_4 = 0x01F */ +#define DA9062AA_GPIO3_OUT_SHIFT 0 +#define DA9062AA_GPIO3_OUT_MASK 0x07 +#define DA9062AA_GPIO4_OUT_SHIFT 3 +#define DA9062AA_GPIO4_OUT_MASK (0x03 << 3) + +/* DA9062AA_BUCK2_CONT = 0x020 */ +#define DA9062AA_BUCK2_EN_SHIFT 0 +#define DA9062AA_BUCK2_EN_MASK 0x01 +#define DA9062AA_BUCK2_GPI_SHIFT 1 +#define DA9062AA_BUCK2_GPI_MASK (0x03 << 1) +#define DA9062AA_BUCK2_CONF_SHIFT 3 +#define DA9062AA_BUCK2_CONF_MASK BIT(3) +#define DA9062AA_VBUCK2_GPI_SHIFT 5 +#define DA9062AA_VBUCK2_GPI_MASK (0x03 << 5) + +/* DA9062AA_BUCK1_CONT = 0x021 */ +#define DA9062AA_BUCK1_EN_SHIFT 0 +#define DA9062AA_BUCK1_EN_MASK 0x01 +#define DA9062AA_BUCK1_GPI_SHIFT 1 +#define DA9062AA_BUCK1_GPI_MASK (0x03 << 1) +#define DA9062AA_BUCK1_CONF_SHIFT 3 +#define DA9062AA_BUCK1_CONF_MASK BIT(3) +#define DA9062AA_VBUCK1_GPI_SHIFT 5 +#define DA9062AA_VBUCK1_GPI_MASK (0x03 << 5) + +/* DA9062AA_BUCK4_CONT = 0x022 */ +#define DA9062AA_BUCK4_EN_SHIFT 0 +#define DA9062AA_BUCK4_EN_MASK 0x01 +#define DA9062AA_BUCK4_GPI_SHIFT 1 +#define DA9062AA_BUCK4_GPI_MASK (0x03 << 1) +#define DA9062AA_BUCK4_CONF_SHIFT 3 +#define DA9062AA_BUCK4_CONF_MASK BIT(3) +#define DA9062AA_VBUCK4_GPI_SHIFT 5 +#define DA9062AA_VBUCK4_GPI_MASK (0x03 << 5) + +/* DA9062AA_BUCK3_CONT = 0x024 */ +#define DA9062AA_BUCK3_EN_SHIFT 0 +#define DA9062AA_BUCK3_EN_MASK 0x01 +#define DA9062AA_BUCK3_GPI_SHIFT 1 +#define DA9062AA_BUCK3_GPI_MASK (0x03 << 1) +#define DA9062AA_BUCK3_CONF_SHIFT 3 +#define DA9062AA_BUCK3_CONF_MASK BIT(3) +#define DA9062AA_VBUCK3_GPI_SHIFT 5 +#define DA9062AA_VBUCK3_GPI_MASK (0x03 << 5) + +/* DA9062AA_LDO1_CONT = 0x026 */ +#define DA9062AA_LDO1_EN_SHIFT 0 +#define DA9062AA_LDO1_EN_MASK 0x01 +#define DA9062AA_LDO1_GPI_SHIFT 1 +#define DA9062AA_LDO1_GPI_MASK (0x03 << 1) +#define DA9062AA_LDO1_PD_DIS_SHIFT 3 +#define DA9062AA_LDO1_PD_DIS_MASK BIT(3) +#define DA9062AA_VLDO1_GPI_SHIFT 5 +#define DA9062AA_VLDO1_GPI_MASK (0x03 << 5) +#define DA9062AA_LDO1_CONF_SHIFT 7 +#define DA9062AA_LDO1_CONF_MASK BIT(7) + +/* DA9062AA_LDO2_CONT = 0x027 */ +#define DA9062AA_LDO2_EN_SHIFT 0 +#define DA9062AA_LDO2_EN_MASK 0x01 +#define DA9062AA_LDO2_GPI_SHIFT 1 +#define DA9062AA_LDO2_GPI_MASK (0x03 << 1) +#define DA9062AA_LDO2_PD_DIS_SHIFT 3 +#define DA9062AA_LDO2_PD_DIS_MASK BIT(3) +#define DA9062AA_VLDO2_GPI_SHIFT 5 +#define DA9062AA_VLDO2_GPI_MASK (0x03 << 5) +#define DA9062AA_LDO2_CONF_SHIFT 7 +#define DA9062AA_LDO2_CONF_MASK BIT(7) + +/* DA9062AA_LDO3_CONT = 0x028 */ +#define DA9062AA_LDO3_EN_SHIFT 0 +#define DA9062AA_LDO3_EN_MASK 0x01 +#define DA9062AA_LDO3_GPI_SHIFT 1 +#define DA9062AA_LDO3_GPI_MASK (0x03 << 1) +#define DA9062AA_LDO3_PD_DIS_SHIFT 3 +#define DA9062AA_LDO3_PD_DIS_MASK BIT(3) +#define DA9062AA_VLDO3_GPI_SHIFT 5 +#define DA9062AA_VLDO3_GPI_MASK (0x03 << 5) +#define DA9062AA_LDO3_CONF_SHIFT 7 +#define DA9062AA_LDO3_CONF_MASK BIT(7) + +/* DA9062AA_LDO4_CONT = 0x029 */ +#define DA9062AA_LDO4_EN_SHIFT 0 +#define DA9062AA_LDO4_EN_MASK 0x01 +#define DA9062AA_LDO4_GPI_SHIFT 1 +#define DA9062AA_LDO4_GPI_MASK (0x03 << 1) +#define DA9062AA_LDO4_PD_DIS_SHIFT 3 +#define DA9062AA_LDO4_PD_DIS_MASK BIT(3) +#define DA9062AA_VLDO4_GPI_SHIFT 5 +#define DA9062AA_VLDO4_GPI_MASK (0x03 << 5) +#define DA9062AA_LDO4_CONF_SHIFT 7 +#define DA9062AA_LDO4_CONF_MASK BIT(7) + +/* DA9062AA_DVC_1 = 0x032 */ +#define DA9062AA_VBUCK1_SEL_SHIFT 0 +#define DA9062AA_VBUCK1_SEL_MASK 0x01 +#define DA9062AA_VBUCK2_SEL_SHIFT 1 +#define DA9062AA_VBUCK2_SEL_MASK BIT(1) +#define DA9062AA_VBUCK4_SEL_SHIFT 2 +#define DA9062AA_VBUCK4_SEL_MASK BIT(2) +#define DA9062AA_VBUCK3_SEL_SHIFT 3 +#define DA9062AA_VBUCK3_SEL_MASK BIT(3) +#define DA9062AA_VLDO1_SEL_SHIFT 4 +#define DA9062AA_VLDO1_SEL_MASK BIT(4) +#define DA9062AA_VLDO2_SEL_SHIFT 5 +#define DA9062AA_VLDO2_SEL_MASK BIT(5) +#define DA9062AA_VLDO3_SEL_SHIFT 6 +#define DA9062AA_VLDO3_SEL_MASK BIT(6) +#define DA9062AA_VLDO4_SEL_SHIFT 7 +#define DA9062AA_VLDO4_SEL_MASK BIT(7) + +/* DA9062AA_COUNT_S = 0x040 */ +#define DA9062AA_COUNT_SEC_SHIFT 0 +#define DA9062AA_COUNT_SEC_MASK 0x3f +#define DA9062AA_RTC_READ_SHIFT 7 +#define DA9062AA_RTC_READ_MASK BIT(7) + +/* DA9062AA_COUNT_MI = 0x041 */ +#define DA9062AA_COUNT_MIN_SHIFT 0 +#define DA9062AA_COUNT_MIN_MASK 0x3f + +/* DA9062AA_COUNT_H = 0x042 */ +#define DA9062AA_COUNT_HOUR_SHIFT 0 +#define DA9062AA_COUNT_HOUR_MASK 0x1f + +/* DA9062AA_COUNT_D = 0x043 */ +#define DA9062AA_COUNT_DAY_SHIFT 0 +#define DA9062AA_COUNT_DAY_MASK 0x1f + +/* DA9062AA_COUNT_MO = 0x044 */ +#define DA9062AA_COUNT_MONTH_SHIFT 0 +#define DA9062AA_COUNT_MONTH_MASK 0x0f + +/* DA9062AA_COUNT_Y = 0x045 */ +#define DA9062AA_COUNT_YEAR_SHIFT 0 +#define DA9062AA_COUNT_YEAR_MASK 0x3f +#define DA9062AA_MONITOR_SHIFT 6 +#define DA9062AA_MONITOR_MASK BIT(6) + +/* DA9062AA_ALARM_S = 0x046 */ +#define DA9062AA_ALARM_SEC_SHIFT 0 +#define DA9062AA_ALARM_SEC_MASK 0x3f +#define DA9062AA_ALARM_STATUS_SHIFT 6 +#define DA9062AA_ALARM_STATUS_MASK (0x03 << 6) + +/* DA9062AA_ALARM_MI = 0x047 */ +#define DA9062AA_ALARM_MIN_SHIFT 0 +#define DA9062AA_ALARM_MIN_MASK 0x3f + +/* DA9062AA_ALARM_H = 0x048 */ +#define DA9062AA_ALARM_HOUR_SHIFT 0 +#define DA9062AA_ALARM_HOUR_MASK 0x1f + +/* DA9062AA_ALARM_D = 0x049 */ +#define DA9062AA_ALARM_DAY_SHIFT 0 +#define DA9062AA_ALARM_DAY_MASK 0x1f + +/* DA9062AA_ALARM_MO = 0x04A */ +#define DA9062AA_ALARM_MONTH_SHIFT 0 +#define DA9062AA_ALARM_MONTH_MASK 0x0f +#define DA9062AA_TICK_TYPE_SHIFT 4 +#define DA9062AA_TICK_TYPE_MASK BIT(4) +#define DA9062AA_TICK_WAKE_SHIFT 5 +#define DA9062AA_TICK_WAKE_MASK BIT(5) + +/* DA9062AA_ALARM_Y = 0x04B */ +#define DA9062AA_ALARM_YEAR_SHIFT 0 +#define DA9062AA_ALARM_YEAR_MASK 0x3f +#define DA9062AA_ALARM_ON_SHIFT 6 +#define DA9062AA_ALARM_ON_MASK BIT(6) +#define DA9062AA_TICK_ON_SHIFT 7 +#define DA9062AA_TICK_ON_MASK BIT(7) + +/* DA9062AA_SECOND_A = 0x04C */ +#define DA9062AA_SECONDS_A_SHIFT 0 +#define DA9062AA_SECONDS_A_MASK 0xff + +/* DA9062AA_SECOND_B = 0x04D */ +#define DA9062AA_SECONDS_B_SHIFT 0 +#define DA9062AA_SECONDS_B_MASK 0xff + +/* DA9062AA_SECOND_C = 0x04E */ +#define DA9062AA_SECONDS_C_SHIFT 0 +#define DA9062AA_SECONDS_C_MASK 0xff + +/* DA9062AA_SECOND_D = 0x04F */ +#define DA9062AA_SECONDS_D_SHIFT 0 +#define DA9062AA_SECONDS_D_MASK 0xff + +/* DA9062AA_SEQ = 0x081 */ +#define DA9062AA_SEQ_POINTER_SHIFT 0 +#define DA9062AA_SEQ_POINTER_MASK 0x0f +#define DA9062AA_NXT_SEQ_START_SHIFT 4 +#define DA9062AA_NXT_SEQ_START_MASK (0x0f << 4) + +/* DA9062AA_SEQ_TIMER = 0x082 */ +#define DA9062AA_SEQ_TIME_SHIFT 0 +#define DA9062AA_SEQ_TIME_MASK 0x0f +#define DA9062AA_SEQ_DUMMY_SHIFT 4 +#define DA9062AA_SEQ_DUMMY_MASK (0x0f << 4) + +/* DA9062AA_ID_2_1 = 0x083 */ +#define DA9062AA_LDO1_STEP_SHIFT 0 +#define DA9062AA_LDO1_STEP_MASK 0x0f +#define DA9062AA_LDO2_STEP_SHIFT 4 +#define DA9062AA_LDO2_STEP_MASK (0x0f << 4) + +/* DA9062AA_ID_4_3 = 0x084 */ +#define DA9062AA_LDO3_STEP_SHIFT 0 +#define DA9062AA_LDO3_STEP_MASK 0x0f +#define DA9062AA_LDO4_STEP_SHIFT 4 +#define DA9062AA_LDO4_STEP_MASK (0x0f << 4) + +/* DA9062AA_ID_12_11 = 0x088 */ +#define DA9062AA_PD_DIS_STEP_SHIFT 4 +#define DA9062AA_PD_DIS_STEP_MASK (0x0f << 4) + +/* DA9062AA_ID_14_13 = 0x089 */ +#define DA9062AA_BUCK1_STEP_SHIFT 0 +#define DA9062AA_BUCK1_STEP_MASK 0x0f +#define DA9062AA_BUCK2_STEP_SHIFT 4 +#define DA9062AA_BUCK2_STEP_MASK (0x0f << 4) + +/* DA9062AA_ID_16_15 = 0x08A */ +#define DA9062AA_BUCK4_STEP_SHIFT 0 +#define DA9062AA_BUCK4_STEP_MASK 0x0f +#define DA9062AA_BUCK3_STEP_SHIFT 4 +#define DA9062AA_BUCK3_STEP_MASK (0x0f << 4) + +/* DA9062AA_ID_22_21 = 0x08D */ +#define DA9062AA_GP_RISE1_STEP_SHIFT 0 +#define DA9062AA_GP_RISE1_STEP_MASK 0x0f +#define DA9062AA_GP_FALL1_STEP_SHIFT 4 +#define DA9062AA_GP_FALL1_STEP_MASK (0x0f << 4) + +/* DA9062AA_ID_24_23 = 0x08E */ +#define DA9062AA_GP_RISE2_STEP_SHIFT 0 +#define DA9062AA_GP_RISE2_STEP_MASK 0x0f +#define DA9062AA_GP_FALL2_STEP_SHIFT 4 +#define DA9062AA_GP_FALL2_STEP_MASK (0x0f << 4) + +/* DA9062AA_ID_26_25 = 0x08F */ +#define DA9062AA_GP_RISE3_STEP_SHIFT 0 +#define DA9062AA_GP_RISE3_STEP_MASK 0x0f +#define DA9062AA_GP_FALL3_STEP_SHIFT 4 +#define DA9062AA_GP_FALL3_STEP_MASK (0x0f << 4) + +/* DA9062AA_ID_28_27 = 0x090 */ +#define DA9062AA_GP_RISE4_STEP_SHIFT 0 +#define DA9062AA_GP_RISE4_STEP_MASK 0x0f +#define DA9062AA_GP_FALL4_STEP_SHIFT 4 +#define DA9062AA_GP_FALL4_STEP_MASK (0x0f << 4) + +/* DA9062AA_ID_30_29 = 0x091 */ +#define DA9062AA_GP_RISE5_STEP_SHIFT 0 +#define DA9062AA_GP_RISE5_STEP_MASK 0x0f +#define DA9062AA_GP_FALL5_STEP_SHIFT 4 +#define DA9062AA_GP_FALL5_STEP_MASK (0x0f << 4) + +/* DA9062AA_ID_32_31 = 0x092 */ +#define DA9062AA_WAIT_STEP_SHIFT 0 +#define DA9062AA_WAIT_STEP_MASK 0x0f +#define DA9062AA_EN32K_STEP_SHIFT 4 +#define DA9062AA_EN32K_STEP_MASK (0x0f << 4) + +/* DA9062AA_SEQ_A = 0x095 */ +#define DA9062AA_SYSTEM_END_SHIFT 0 +#define DA9062AA_SYSTEM_END_MASK 0x0f +#define DA9062AA_POWER_END_SHIFT 4 +#define DA9062AA_POWER_END_MASK (0x0f << 4) + +/* DA9062AA_SEQ_B = 0x096 */ +#define DA9062AA_MAX_COUNT_SHIFT 0 +#define DA9062AA_MAX_COUNT_MASK 0x0f +#define DA9062AA_PART_DOWN_SHIFT 4 +#define DA9062AA_PART_DOWN_MASK (0x0f << 4) + +/* DA9062AA_WAIT = 0x097 */ +#define DA9062AA_WAIT_TIME_SHIFT 0 +#define DA9062AA_WAIT_TIME_MASK 0x0f +#define DA9062AA_WAIT_MODE_SHIFT 4 +#define DA9062AA_WAIT_MODE_MASK BIT(4) +#define DA9062AA_TIME_OUT_SHIFT 5 +#define DA9062AA_TIME_OUT_MASK BIT(5) +#define DA9062AA_WAIT_DIR_SHIFT 6 +#define DA9062AA_WAIT_DIR_MASK (0x03 << 6) + +/* DA9062AA_EN_32K = 0x098 */ +#define DA9062AA_STABILISATION_TIME_SHIFT 0 +#define DA9062AA_STABILISATION_TIME_MASK 0x07 +#define DA9062AA_CRYSTAL_SHIFT 3 +#define DA9062AA_CRYSTAL_MASK BIT(3) +#define DA9062AA_DELAY_MODE_SHIFT 4 +#define DA9062AA_DELAY_MODE_MASK BIT(4) +#define DA9062AA_OUT_CLOCK_SHIFT 5 +#define DA9062AA_OUT_CLOCK_MASK BIT(5) +#define DA9062AA_RTC_CLOCK_SHIFT 6 +#define DA9062AA_RTC_CLOCK_MASK BIT(6) +#define DA9062AA_EN_32KOUT_SHIFT 7 +#define DA9062AA_EN_32KOUT_MASK BIT(7) + +/* DA9062AA_RESET = 0x099 */ +#define DA9062AA_RESET_TIMER_SHIFT 0 +#define DA9062AA_RESET_TIMER_MASK 0x3f +#define DA9062AA_RESET_EVENT_SHIFT 6 +#define DA9062AA_RESET_EVENT_MASK (0x03 << 6) + +/* DA9062AA_BUCK_ILIM_A = 0x09A */ +#define DA9062AA_BUCK3_ILIM_SHIFT 0 +#define DA9062AA_BUCK3_ILIM_MASK 0x0f + +/* DA9062AA_BUCK_ILIM_B = 0x09B */ +#define DA9062AA_BUCK4_ILIM_SHIFT 0 +#define DA9062AA_BUCK4_ILIM_MASK 0x0f + +/* DA9062AA_BUCK_ILIM_C = 0x09C */ +#define DA9062AA_BUCK1_ILIM_SHIFT 0 +#define DA9062AA_BUCK1_ILIM_MASK 0x0f +#define DA9062AA_BUCK2_ILIM_SHIFT 4 +#define DA9062AA_BUCK2_ILIM_MASK (0x0f << 4) + +/* DA9062AA_BUCK2_CFG = 0x09D */ +#define DA9062AA_BUCK2_PD_DIS_SHIFT 5 +#define DA9062AA_BUCK2_PD_DIS_MASK BIT(5) +#define DA9062AA_BUCK2_MODE_SHIFT 6 +#define DA9062AA_BUCK2_MODE_MASK (0x03 << 6) + +/* DA9062AA_BUCK1_CFG = 0x09E */ +#define DA9062AA_BUCK1_PD_DIS_SHIFT 5 +#define DA9062AA_BUCK1_PD_DIS_MASK BIT(5) +#define DA9062AA_BUCK1_MODE_SHIFT 6 +#define DA9062AA_BUCK1_MODE_MASK (0x03 << 6) + +/* DA9062AA_BUCK4_CFG = 0x09F */ +#define DA9062AA_BUCK4_VTTR_EN_SHIFT 3 +#define DA9062AA_BUCK4_VTTR_EN_MASK BIT(3) +#define DA9062AA_BUCK4_VTT_EN_SHIFT 4 +#define DA9062AA_BUCK4_VTT_EN_MASK BIT(4) +#define DA9062AA_BUCK4_PD_DIS_SHIFT 5 +#define DA9062AA_BUCK4_PD_DIS_MASK BIT(5) +#define DA9062AA_BUCK4_MODE_SHIFT 6 +#define DA9062AA_BUCK4_MODE_MASK (0x03 << 6) + +/* DA9062AA_BUCK3_CFG = 0x0A0 */ +#define DA9062AA_BUCK3_PD_DIS_SHIFT 5 +#define DA9062AA_BUCK3_PD_DIS_MASK BIT(5) +#define DA9062AA_BUCK3_MODE_SHIFT 6 +#define DA9062AA_BUCK3_MODE_MASK (0x03 << 6) + +/* DA9062AA_VBUCK2_A = 0x0A3 */ +#define DA9062AA_VBUCK2_A_SHIFT 0 +#define DA9062AA_VBUCK2_A_MASK 0x7f +#define DA9062AA_BUCK2_SL_A_SHIFT 7 +#define DA9062AA_BUCK2_SL_A_MASK BIT(7) + +/* DA9062AA_VBUCK1_A = 0x0A4 */ +#define DA9062AA_VBUCK1_A_SHIFT 0 +#define DA9062AA_VBUCK1_A_MASK 0x7f +#define DA9062AA_BUCK1_SL_A_SHIFT 7 +#define DA9062AA_BUCK1_SL_A_MASK BIT(7) + +/* DA9062AA_VBUCK4_A = 0x0A5 */ +#define DA9062AA_VBUCK4_A_SHIFT 0 +#define DA9062AA_VBUCK4_A_MASK 0x7f +#define DA9062AA_BUCK4_SL_A_SHIFT 7 +#define DA9062AA_BUCK4_SL_A_MASK BIT(7) + +/* DA9062AA_VBUCK3_A = 0x0A7 */ +#define DA9062AA_VBUCK3_A_SHIFT 0 +#define DA9062AA_VBUCK3_A_MASK 0x7f +#define DA9062AA_BUCK3_SL_A_SHIFT 7 +#define DA9062AA_BUCK3_SL_A_MASK BIT(7) + +/* DA9062AA_VLDO1_A = 0x0A9 */ +#define DA9062AA_VLDO1_A_SHIFT 0 +#define DA9062AA_VLDO1_A_MASK 0x3f +#define DA9062AA_LDO1_SL_A_SHIFT 7 +#define DA9062AA_LDO1_SL_A_MASK BIT(7) + +/* DA9062AA_VLDO2_A = 0x0AA */ +#define DA9062AA_VLDO2_A_SHIFT 0 +#define DA9062AA_VLDO2_A_MASK 0x3f +#define DA9062AA_LDO2_SL_A_SHIFT 7 +#define DA9062AA_LDO2_SL_A_MASK BIT(7) + +/* DA9062AA_VLDO3_A = 0x0AB */ +#define DA9062AA_VLDO3_A_SHIFT 0 +#define DA9062AA_VLDO3_A_MASK 0x3f +#define DA9062AA_LDO3_SL_A_SHIFT 7 +#define DA9062AA_LDO3_SL_A_MASK BIT(7) + +/* DA9062AA_VLDO4_A = 0x0AC */ +#define DA9062AA_VLDO4_A_SHIFT 0 +#define DA9062AA_VLDO4_A_MASK 0x3f +#define DA9062AA_LDO4_SL_A_SHIFT 7 +#define DA9062AA_LDO4_SL_A_MASK BIT(7) + +/* DA9062AA_VBUCK2_B = 0x0B4 */ +#define DA9062AA_VBUCK2_B_SHIFT 0 +#define DA9062AA_VBUCK2_B_MASK 0x7f +#define DA9062AA_BUCK2_SL_B_SHIFT 7 +#define DA9062AA_BUCK2_SL_B_MASK BIT(7) + +/* DA9062AA_VBUCK1_B = 0x0B5 */ +#define DA9062AA_VBUCK1_B_SHIFT 0 +#define DA9062AA_VBUCK1_B_MASK 0x7f +#define DA9062AA_BUCK1_SL_B_SHIFT 7 +#define DA9062AA_BUCK1_SL_B_MASK BIT(7) + +/* DA9062AA_VBUCK4_B = 0x0B6 */ +#define DA9062AA_VBUCK4_B_SHIFT 0 +#define DA9062AA_VBUCK4_B_MASK 0x7f +#define DA9062AA_BUCK4_SL_B_SHIFT 7 +#define DA9062AA_BUCK4_SL_B_MASK BIT(7) + +/* DA9062AA_VBUCK3_B = 0x0B8 */ +#define DA9062AA_VBUCK3_B_SHIFT 0 +#define DA9062AA_VBUCK3_B_MASK 0x7f +#define DA9062AA_BUCK3_SL_B_SHIFT 7 +#define DA9062AA_BUCK3_SL_B_MASK BIT(7) + +/* DA9062AA_VLDO1_B = 0x0BA */ +#define DA9062AA_VLDO1_B_SHIFT 0 +#define DA9062AA_VLDO1_B_MASK 0x3f +#define DA9062AA_LDO1_SL_B_SHIFT 7 +#define DA9062AA_LDO1_SL_B_MASK BIT(7) + +/* DA9062AA_VLDO2_B = 0x0BB */ +#define DA9062AA_VLDO2_B_SHIFT 0 +#define DA9062AA_VLDO2_B_MASK 0x3f +#define DA9062AA_LDO2_SL_B_SHIFT 7 +#define DA9062AA_LDO2_SL_B_MASK BIT(7) + +/* DA9062AA_VLDO3_B = 0x0BC */ +#define DA9062AA_VLDO3_B_SHIFT 0 +#define DA9062AA_VLDO3_B_MASK 0x3f +#define DA9062AA_LDO3_SL_B_SHIFT 7 +#define DA9062AA_LDO3_SL_B_MASK BIT(7) + +/* DA9062AA_VLDO4_B = 0x0BD */ +#define DA9062AA_VLDO4_B_SHIFT 0 +#define DA9062AA_VLDO4_B_MASK 0x3f +#define DA9062AA_LDO4_SL_B_SHIFT 7 +#define DA9062AA_LDO4_SL_B_MASK BIT(7) + +/* DA9062AA_BBAT_CONT = 0x0C5 */ +#define DA9062AA_BCHG_VSET_SHIFT 0 +#define DA9062AA_BCHG_VSET_MASK 0x0f +#define DA9062AA_BCHG_ISET_SHIFT 4 +#define DA9062AA_BCHG_ISET_MASK (0x0f << 4) + +/* DA9062AA_INTERFACE = 0x105 */ +#define DA9062AA_IF_BASE_ADDR_SHIFT 4 +#define DA9062AA_IF_BASE_ADDR_MASK (0x0f << 4) + +/* DA9062AA_CONFIG_A = 0x106 */ +#define DA9062AA_PM_I_V_SHIFT 0 +#define DA9062AA_PM_I_V_MASK 0x01 +#define DA9062AA_PM_O_TYPE_SHIFT 2 +#define DA9062AA_PM_O_TYPE_MASK BIT(2) +#define DA9062AA_IRQ_TYPE_SHIFT 3 +#define DA9062AA_IRQ_TYPE_MASK BIT(3) +#define DA9062AA_PM_IF_V_SHIFT 4 +#define DA9062AA_PM_IF_V_MASK BIT(4) +#define DA9062AA_PM_IF_FMP_SHIFT 5 +#define DA9062AA_PM_IF_FMP_MASK BIT(5) +#define DA9062AA_PM_IF_HSM_SHIFT 6 +#define DA9062AA_PM_IF_HSM_MASK BIT(6) + +/* DA9062AA_CONFIG_B = 0x107 */ +#define DA9062AA_VDD_FAULT_ADJ_SHIFT 0 +#define DA9062AA_VDD_FAULT_ADJ_MASK 0x0f +#define DA9062AA_VDD_HYST_ADJ_SHIFT 4 +#define DA9062AA_VDD_HYST_ADJ_MASK (0x07 << 4) + +/* DA9062AA_CONFIG_C = 0x108 */ +#define DA9062AA_BUCK_ACTV_DISCHRG_SHIFT 2 +#define DA9062AA_BUCK_ACTV_DISCHRG_MASK BIT(2) +#define DA9062AA_BUCK1_CLK_INV_SHIFT 3 +#define DA9062AA_BUCK1_CLK_INV_MASK BIT(3) +#define DA9062AA_BUCK4_CLK_INV_SHIFT 4 +#define DA9062AA_BUCK4_CLK_INV_MASK BIT(4) +#define DA9062AA_BUCK3_CLK_INV_SHIFT 6 +#define DA9062AA_BUCK3_CLK_INV_MASK BIT(6) + +/* DA9062AA_CONFIG_D = 0x109 */ +#define DA9062AA_GPI_V_SHIFT 0 +#define DA9062AA_GPI_V_MASK 0x01 +#define DA9062AA_NIRQ_MODE_SHIFT 1 +#define DA9062AA_NIRQ_MODE_MASK BIT(1) +#define DA9062AA_SYSTEM_EN_RD_SHIFT 2 +#define DA9062AA_SYSTEM_EN_RD_MASK BIT(2) +#define DA9062AA_FORCE_RESET_SHIFT 5 +#define DA9062AA_FORCE_RESET_MASK BIT(5) + +/* DA9062AA_CONFIG_E = 0x10A */ +#define DA9062AA_BUCK1_AUTO_SHIFT 0 +#define DA9062AA_BUCK1_AUTO_MASK 0x01 +#define DA9062AA_BUCK2_AUTO_SHIFT 1 +#define DA9062AA_BUCK2_AUTO_MASK BIT(1) +#define DA9062AA_BUCK4_AUTO_SHIFT 2 +#define DA9062AA_BUCK4_AUTO_MASK BIT(2) +#define DA9062AA_BUCK3_AUTO_SHIFT 4 +#define DA9062AA_BUCK3_AUTO_MASK BIT(4) + +/* DA9062AA_CONFIG_G = 0x10C */ +#define DA9062AA_LDO1_AUTO_SHIFT 0 +#define DA9062AA_LDO1_AUTO_MASK 0x01 +#define DA9062AA_LDO2_AUTO_SHIFT 1 +#define DA9062AA_LDO2_AUTO_MASK BIT(1) +#define DA9062AA_LDO3_AUTO_SHIFT 2 +#define DA9062AA_LDO3_AUTO_MASK BIT(2) +#define DA9062AA_LDO4_AUTO_SHIFT 3 +#define DA9062AA_LDO4_AUTO_MASK BIT(3) + +/* DA9062AA_CONFIG_H = 0x10D */ +#define DA9062AA_BUCK1_2_MERGE_SHIFT 3 +#define DA9062AA_BUCK1_2_MERGE_MASK BIT(3) +#define DA9062AA_BUCK2_OD_SHIFT 5 +#define DA9062AA_BUCK2_OD_MASK BIT(5) +#define DA9062AA_BUCK1_OD_SHIFT 6 +#define DA9062AA_BUCK1_OD_MASK BIT(6) + +/* DA9062AA_CONFIG_I = 0x10E */ +#define DA9062AA_NONKEY_PIN_SHIFT 0 +#define DA9062AA_NONKEY_PIN_MASK 0x03 +#define DA9062AA_nONKEY_SD_SHIFT 2 +#define DA9062AA_nONKEY_SD_MASK BIT(2) +#define DA9062AA_WATCHDOG_SD_SHIFT 3 +#define DA9062AA_WATCHDOG_SD_MASK BIT(3) +#define DA9062AA_KEY_SD_MODE_SHIFT 4 +#define DA9062AA_KEY_SD_MODE_MASK BIT(4) +#define DA9062AA_HOST_SD_MODE_SHIFT 5 +#define DA9062AA_HOST_SD_MODE_MASK BIT(5) +#define DA9062AA_INT_SD_MODE_SHIFT 6 +#define DA9062AA_INT_SD_MODE_MASK BIT(6) +#define DA9062AA_LDO_SD_SHIFT 7 +#define DA9062AA_LDO_SD_MASK BIT(7) + +/* DA9062AA_CONFIG_J = 0x10F */ +#define DA9062AA_KEY_DELAY_SHIFT 0 +#define DA9062AA_KEY_DELAY_MASK 0x03 +#define DA9062AA_SHUT_DELAY_SHIFT 2 +#define DA9062AA_SHUT_DELAY_MASK (0x03 << 2) +#define DA9062AA_RESET_DURATION_SHIFT 4 +#define DA9062AA_RESET_DURATION_MASK (0x03 << 4) +#define DA9062AA_TWOWIRE_TO_SHIFT 6 +#define DA9062AA_TWOWIRE_TO_MASK BIT(6) +#define DA9062AA_IF_RESET_SHIFT 7 +#define DA9062AA_IF_RESET_MASK BIT(7) + +/* DA9062AA_CONFIG_K = 0x110 */ +#define DA9062AA_GPIO0_PUPD_SHIFT 0 +#define DA9062AA_GPIO0_PUPD_MASK 0x01 +#define DA9062AA_GPIO1_PUPD_SHIFT 1 +#define DA9062AA_GPIO1_PUPD_MASK BIT(1) +#define DA9062AA_GPIO2_PUPD_SHIFT 2 +#define DA9062AA_GPIO2_PUPD_MASK BIT(2) +#define DA9062AA_GPIO3_PUPD_SHIFT 3 +#define DA9062AA_GPIO3_PUPD_MASK BIT(3) +#define DA9062AA_GPIO4_PUPD_SHIFT 4 +#define DA9062AA_GPIO4_PUPD_MASK BIT(4) + +/* DA9062AA_CONFIG_M = 0x112 */ +#define DA9062AA_NSHUTDOWN_PU_SHIFT 1 +#define DA9062AA_NSHUTDOWN_PU_MASK BIT(1) +#define DA9062AA_WDG_MODE_SHIFT 3 +#define DA9062AA_WDG_MODE_MASK BIT(3) +#define DA9062AA_OSC_FRQ_SHIFT 4 +#define DA9062AA_OSC_FRQ_MASK (0x0f << 4) + +/* DA9062AA_TRIM_CLDR = 0x120 */ +#define DA9062AA_TRIM_CLDR_SHIFT 0 +#define DA9062AA_TRIM_CLDR_MASK 0xff + +/* DA9062AA_GP_ID_0 = 0x121 */ +#define DA9062AA_GP_0_SHIFT 0 +#define DA9062AA_GP_0_MASK 0xff + +/* DA9062AA_GP_ID_1 = 0x122 */ +#define DA9062AA_GP_1_SHIFT 0 +#define DA9062AA_GP_1_MASK 0xff + +/* DA9062AA_GP_ID_2 = 0x123 */ +#define DA9062AA_GP_2_SHIFT 0 +#define DA9062AA_GP_2_MASK 0xff + +/* DA9062AA_GP_ID_3 = 0x124 */ +#define DA9062AA_GP_3_SHIFT 0 +#define DA9062AA_GP_3_MASK 0xff + +/* DA9062AA_GP_ID_4 = 0x125 */ +#define DA9062AA_GP_4_SHIFT 0 +#define DA9062AA_GP_4_MASK 0xff + +/* DA9062AA_GP_ID_5 = 0x126 */ +#define DA9062AA_GP_5_SHIFT 0 +#define DA9062AA_GP_5_MASK 0xff + +/* DA9062AA_GP_ID_6 = 0x127 */ +#define DA9062AA_GP_6_SHIFT 0 +#define DA9062AA_GP_6_MASK 0xff + +/* DA9062AA_GP_ID_7 = 0x128 */ +#define DA9062AA_GP_7_SHIFT 0 +#define DA9062AA_GP_7_MASK 0xff + +/* DA9062AA_GP_ID_8 = 0x129 */ +#define DA9062AA_GP_8_SHIFT 0 +#define DA9062AA_GP_8_MASK 0xff + +/* DA9062AA_GP_ID_9 = 0x12A */ +#define DA9062AA_GP_9_SHIFT 0 +#define DA9062AA_GP_9_MASK 0xff + +/* DA9062AA_GP_ID_10 = 0x12B */ +#define DA9062AA_GP_10_SHIFT 0 +#define DA9062AA_GP_10_MASK 0xff + +/* DA9062AA_GP_ID_11 = 0x12C */ +#define DA9062AA_GP_11_SHIFT 0 +#define DA9062AA_GP_11_MASK 0xff + +/* DA9062AA_GP_ID_12 = 0x12D */ +#define DA9062AA_GP_12_SHIFT 0 +#define DA9062AA_GP_12_MASK 0xff + +/* DA9062AA_GP_ID_13 = 0x12E */ +#define DA9062AA_GP_13_SHIFT 0 +#define DA9062AA_GP_13_MASK 0xff + +/* DA9062AA_GP_ID_14 = 0x12F */ +#define DA9062AA_GP_14_SHIFT 0 +#define DA9062AA_GP_14_MASK 0xff + +/* DA9062AA_GP_ID_15 = 0x130 */ +#define DA9062AA_GP_15_SHIFT 0 +#define DA9062AA_GP_15_MASK 0xff + +/* DA9062AA_GP_ID_16 = 0x131 */ +#define DA9062AA_GP_16_SHIFT 0 +#define DA9062AA_GP_16_MASK 0xff + +/* DA9062AA_GP_ID_17 = 0x132 */ +#define DA9062AA_GP_17_SHIFT 0 +#define DA9062AA_GP_17_MASK 0xff + +/* DA9062AA_GP_ID_18 = 0x133 */ +#define DA9062AA_GP_18_SHIFT 0 +#define DA9062AA_GP_18_MASK 0xff + +/* DA9062AA_GP_ID_19 = 0x134 */ +#define DA9062AA_GP_19_SHIFT 0 +#define DA9062AA_GP_19_MASK 0xff + +/* DA9062AA_DEVICE_ID = 0x181 */ +#define DA9062AA_DEV_ID_SHIFT 0 +#define DA9062AA_DEV_ID_MASK 0xff + +/* DA9062AA_VARIANT_ID = 0x182 */ +#define DA9062AA_VRC_SHIFT 0 +#define DA9062AA_VRC_MASK 0x0f +#define DA9062AA_MRC_SHIFT 4 +#define DA9062AA_MRC_MASK (0x0f << 4) + +/* DA9062AA_CUSTOMER_ID = 0x183 */ +#define DA9062AA_CUST_ID_SHIFT 0 +#define DA9062AA_CUST_ID_MASK 0xff + +/* DA9062AA_CONFIG_ID = 0x184 */ +#define DA9062AA_CONFIG_REV_SHIFT 0 +#define DA9062AA_CONFIG_REV_MASK 0xff + +#endif /* __DA9062_H__ */ diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h new file mode 100644 index 000000000..71b09154e --- /dev/null +++ b/include/linux/mfd/da9063/core.h @@ -0,0 +1,97 @@ +/* + * Definitions for DA9063 MFD driver + * + * Copyright 2012 Dialog Semiconductor Ltd. + * + * Author: Michal Hajduk, Dialog Semiconductor + * Author: Krystian Garbaciak, Dialog Semiconductor + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_DA9063_CORE_H__ +#define __MFD_DA9063_CORE_H__ + +#include +#include + +/* DA9063 modules */ +#define DA9063_DRVNAME_CORE "da9063-core" +#define DA9063_DRVNAME_REGULATORS "da9063-regulators" +#define DA9063_DRVNAME_LEDS "da9063-leds" +#define DA9063_DRVNAME_WATCHDOG "da9063-watchdog" +#define DA9063_DRVNAME_HWMON "da9063-hwmon" +#define DA9063_DRVNAME_ONKEY "da9063-onkey" +#define DA9063_DRVNAME_RTC "da9063-rtc" +#define DA9063_DRVNAME_VIBRATION "da9063-vibration" + +#define PMIC_CHIP_ID_DA9063 0x61 + +enum da9063_type { + PMIC_TYPE_DA9063 = 0, + PMIC_TYPE_DA9063L, +}; + +enum da9063_variant_codes { + PMIC_DA9063_AD = 0x3, + PMIC_DA9063_BB = 0x5, + PMIC_DA9063_CA = 0x6, +}; + +/* Interrupts */ +enum da9063_irqs { + DA9063_IRQ_ONKEY = 0, + DA9063_IRQ_ALARM, + DA9063_IRQ_TICK, + DA9063_IRQ_ADC_RDY, + DA9063_IRQ_SEQ_RDY, + DA9063_IRQ_WAKE, + DA9063_IRQ_TEMP, + DA9063_IRQ_COMP_1V2, + DA9063_IRQ_LDO_LIM, + DA9063_IRQ_REG_UVOV, + DA9063_IRQ_DVC_RDY, + DA9063_IRQ_VDD_MON, + DA9063_IRQ_WARN, + DA9063_IRQ_GPI0, + DA9063_IRQ_GPI1, + DA9063_IRQ_GPI2, + DA9063_IRQ_GPI3, + DA9063_IRQ_GPI4, + DA9063_IRQ_GPI5, + DA9063_IRQ_GPI6, + DA9063_IRQ_GPI7, + DA9063_IRQ_GPI8, + DA9063_IRQ_GPI9, + DA9063_IRQ_GPI10, + DA9063_IRQ_GPI11, + DA9063_IRQ_GPI12, + DA9063_IRQ_GPI13, + DA9063_IRQ_GPI14, + DA9063_IRQ_GPI15, +}; + +struct da9063 { + /* Device */ + struct device *dev; + enum da9063_type type; + unsigned char variant_code; + unsigned int flags; + + /* Control interface */ + struct regmap *regmap; + + /* Interrupts */ + int chip_irq; + unsigned int irq_base; + struct regmap_irq_chip_data *regmap_irq; +}; + +int da9063_device_init(struct da9063 *da9063, unsigned int irq); +int da9063_irq_init(struct da9063 *da9063); + +#endif /* __MFD_DA9063_CORE_H__ */ diff --git a/include/linux/mfd/da9063/pdata.h b/include/linux/mfd/da9063/pdata.h new file mode 100644 index 000000000..50bed4f89 --- /dev/null +++ b/include/linux/mfd/da9063/pdata.h @@ -0,0 +1,114 @@ +/* + * Platform configuration options for DA9063 + * + * Copyright 2012 Dialog Semiconductor Ltd. + * + * Author: Michal Hajduk, Dialog Semiconductor + * Author: Krystian Garbaciak, Dialog Semiconductor + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_DA9063_PDATA_H__ +#define __MFD_DA9063_PDATA_H__ + +#include + +/* + * Regulator configuration + */ +/* DA9063 and DA9063L regulator IDs */ +enum { + /* BUCKs */ + DA9063_ID_BCORE1, + DA9063_ID_BCORE2, + DA9063_ID_BPRO, + DA9063_ID_BMEM, + DA9063_ID_BIO, + DA9063_ID_BPERI, + + /* BCORE1 and BCORE2 in merged mode */ + DA9063_ID_BCORES_MERGED, + /* BMEM and BIO in merged mode */ + DA9063_ID_BMEM_BIO_MERGED, + /* When two BUCKs are merged, they cannot be reused separately */ + + /* LDOs on both DA9063 and DA9063L */ + DA9063_ID_LDO3, + DA9063_ID_LDO7, + DA9063_ID_LDO8, + DA9063_ID_LDO9, + DA9063_ID_LDO11, + + /* DA9063-only LDOs */ + DA9063_ID_LDO1, + DA9063_ID_LDO2, + DA9063_ID_LDO4, + DA9063_ID_LDO5, + DA9063_ID_LDO6, + DA9063_ID_LDO10, +}; + +/* Regulators platform data */ +struct da9063_regulator_data { + int id; + struct regulator_init_data *initdata; +}; + +struct da9063_regulators_pdata { + unsigned n_regulators; + struct da9063_regulator_data *regulator_data; +}; + + +/* + * RGB LED configuration + */ +/* LED IDs for flags in struct led_info. */ +enum { + DA9063_GPIO11_LED, + DA9063_GPIO14_LED, + DA9063_GPIO15_LED, + + DA9063_LED_NUM +}; +#define DA9063_LED_ID_MASK 0x3 + +/* LED polarity for flags in struct led_info. */ +#define DA9063_LED_HIGH_LEVEL_ACTIVE 0x0 +#define DA9063_LED_LOW_LEVEL_ACTIVE 0x4 + + +/* + * General PMIC configuration + */ +/* HWMON ADC channels configuration */ +#define DA9063_FLG_FORCE_IN0_MANUAL_MODE 0x0010 +#define DA9063_FLG_FORCE_IN0_AUTO_MODE 0x0020 +#define DA9063_FLG_FORCE_IN1_MANUAL_MODE 0x0040 +#define DA9063_FLG_FORCE_IN1_AUTO_MODE 0x0080 +#define DA9063_FLG_FORCE_IN2_MANUAL_MODE 0x0100 +#define DA9063_FLG_FORCE_IN2_AUTO_MODE 0x0200 +#define DA9063_FLG_FORCE_IN3_MANUAL_MODE 0x0400 +#define DA9063_FLG_FORCE_IN3_AUTO_MODE 0x0800 + +/* Disable register caching. */ +#define DA9063_FLG_NO_CACHE 0x0008 + +struct da9063; + +/* DA9063 platform data */ +struct da9063_pdata { + int (*init)(struct da9063 *da9063); + int irq_base; + bool key_power; + unsigned flags; + struct da9063_regulators_pdata *regulators_pdata; + struct led_platform_data *leds_pdata; +}; + +#endif /* __MFD_DA9063_PDATA_H__ */ diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h new file mode 100644 index 000000000..844fc2973 --- /dev/null +++ b/include/linux/mfd/da9063/registers.h @@ -0,0 +1,1073 @@ +/* + * Registers definition for DA9063 modules + * + * Copyright 2012 Dialog Semiconductor Ltd. + * + * Author: Michal Hajduk, Dialog Semiconductor + * Author: Krystian Garbaciak, Dialog Semiconductor + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef _DA9063_REG_H +#define _DA9063_REG_H + +#define DA9063_I2C_PAGE_SEL_SHIFT 1 +#define DA9063_EVENT_REG_NUM 4 + +/* Page selection I2C or SPI always in the begining of any page. */ +/* Page 0 : I2C access 0x000 - 0x0FF SPI access 0x000 - 0x07F */ +/* Page 1 : SPI access 0x080 - 0x0FF */ +/* Page 2 : I2C access 0x100 - 0x1FF SPI access 0x100 - 0x17F */ +/* Page 3 : SPI access 0x180 - 0x1FF */ +#define DA9063_REG_PAGE_CON 0x00 + +/* System Control and Event Registers */ +#define DA9063_REG_STATUS_A 0x01 +#define DA9063_REG_STATUS_B 0x02 +#define DA9063_REG_STATUS_C 0x03 +#define DA9063_REG_STATUS_D 0x04 +#define DA9063_REG_FAULT_LOG 0x05 +#define DA9063_REG_EVENT_A 0x06 +#define DA9063_REG_EVENT_B 0x07 +#define DA9063_REG_EVENT_C 0x08 +#define DA9063_REG_EVENT_D 0x09 +#define DA9063_REG_IRQ_MASK_A 0x0A +#define DA9063_REG_IRQ_MASK_B 0x0B +#define DA9063_REG_IRQ_MASK_C 0x0C +#define DA9063_REG_IRQ_MASK_D 0x0D +#define DA9063_REG_CONTROL_A 0x0E +#define DA9063_REG_CONTROL_B 0x0F +#define DA9063_REG_CONTROL_C 0x10 +#define DA9063_REG_CONTROL_D 0x11 +#define DA9063_REG_CONTROL_E 0x12 +#define DA9063_REG_CONTROL_F 0x13 +#define DA9063_REG_PD_DIS 0x14 + +/* GPIO Control Registers */ +#define DA9063_REG_GPIO_0_1 0x15 +#define DA9063_REG_GPIO_2_3 0x16 +#define DA9063_REG_GPIO_4_5 0x17 +#define DA9063_REG_GPIO_6_7 0x18 +#define DA9063_REG_GPIO_8_9 0x19 +#define DA9063_REG_GPIO_10_11 0x1A +#define DA9063_REG_GPIO_12_13 0x1B +#define DA9063_REG_GPIO_14_15 0x1C +#define DA9063_REG_GPIO_MODE0_7 0x1D +#define DA9063_REG_GPIO_MODE8_15 0x1E +#define DA9063_REG_SWITCH_CONT 0x1F + +/* Regulator Control Registers */ +#define DA9063_REG_BCORE2_CONT 0x20 +#define DA9063_REG_BCORE1_CONT 0x21 +#define DA9063_REG_BPRO_CONT 0x22 +#define DA9063_REG_BMEM_CONT 0x23 +#define DA9063_REG_BIO_CONT 0x24 +#define DA9063_REG_BPERI_CONT 0x25 +#define DA9063_REG_LDO1_CONT 0x26 +#define DA9063_REG_LDO2_CONT 0x27 +#define DA9063_REG_LDO3_CONT 0x28 +#define DA9063_REG_LDO4_CONT 0x29 +#define DA9063_REG_LDO5_CONT 0x2A +#define DA9063_REG_LDO6_CONT 0x2B +#define DA9063_REG_LDO7_CONT 0x2C +#define DA9063_REG_LDO8_CONT 0x2D +#define DA9063_REG_LDO9_CONT 0x2E +#define DA9063_REG_LDO10_CONT 0x2F +#define DA9063_REG_LDO11_CONT 0x30 +#define DA9063_REG_SUPPLIES 0x31 +#define DA9063_REG_DVC_1 0x32 +#define DA9063_REG_DVC_2 0x33 + +/* GP-ADC Control Registers */ +#define DA9063_REG_ADC_MAN 0x34 +#define DA9063_REG_ADC_CONT 0x35 +#define DA9063_REG_VSYS_MON 0x36 +#define DA9063_REG_ADC_RES_L 0x37 +#define DA9063_REG_ADC_RES_H 0x38 +#define DA9063_REG_VSYS_RES 0x39 +#define DA9063_REG_ADCIN1_RES 0x3A +#define DA9063_REG_ADCIN2_RES 0x3B +#define DA9063_REG_ADCIN3_RES 0x3C +#define DA9063_REG_MON_A8_RES 0x3D +#define DA9063_REG_MON_A9_RES 0x3E +#define DA9063_REG_MON_A10_RES 0x3F + +/* RTC Calendar and Alarm Registers */ +#define DA9063_REG_COUNT_S 0x40 +#define DA9063_REG_COUNT_MI 0x41 +#define DA9063_REG_COUNT_H 0x42 +#define DA9063_REG_COUNT_D 0x43 +#define DA9063_REG_COUNT_MO 0x44 +#define DA9063_REG_COUNT_Y 0x45 + +#define DA9063_AD_REG_ALARM_MI 0x46 +#define DA9063_AD_REG_ALARM_H 0x47 +#define DA9063_AD_REG_ALARM_D 0x48 +#define DA9063_AD_REG_ALARM_MO 0x49 +#define DA9063_AD_REG_ALARM_Y 0x4A +#define DA9063_AD_REG_SECOND_A 0x4B +#define DA9063_AD_REG_SECOND_B 0x4C +#define DA9063_AD_REG_SECOND_C 0x4D +#define DA9063_AD_REG_SECOND_D 0x4E + +#define DA9063_BB_REG_ALARM_S 0x46 +#define DA9063_BB_REG_ALARM_MI 0x47 +#define DA9063_BB_REG_ALARM_H 0x48 +#define DA9063_BB_REG_ALARM_D 0x49 +#define DA9063_BB_REG_ALARM_MO 0x4A +#define DA9063_BB_REG_ALARM_Y 0x4B +#define DA9063_BB_REG_SECOND_A 0x4C +#define DA9063_BB_REG_SECOND_B 0x4D +#define DA9063_BB_REG_SECOND_C 0x4E +#define DA9063_BB_REG_SECOND_D 0x4F + +/* Sequencer Control Registers */ +#define DA9063_REG_SEQ 0x81 +#define DA9063_REG_SEQ_TIMER 0x82 +#define DA9063_REG_ID_2_1 0x83 +#define DA9063_REG_ID_4_3 0x84 +#define DA9063_REG_ID_6_5 0x85 +#define DA9063_REG_ID_8_7 0x86 +#define DA9063_REG_ID_10_9 0x87 +#define DA9063_REG_ID_12_11 0x88 +#define DA9063_REG_ID_14_13 0x89 +#define DA9063_REG_ID_16_15 0x8A +#define DA9063_REG_ID_18_17 0x8B +#define DA9063_REG_ID_20_19 0x8C +#define DA9063_REG_ID_22_21 0x8D +#define DA9063_REG_ID_24_23 0x8E +#define DA9063_REG_ID_26_25 0x8F +#define DA9063_REG_ID_28_27 0x90 +#define DA9063_REG_ID_30_29 0x91 +#define DA9063_REG_ID_32_31 0x92 +#define DA9063_REG_SEQ_A 0x95 +#define DA9063_REG_SEQ_B 0x96 +#define DA9063_REG_WAIT 0x97 +#define DA9063_REG_EN_32K 0x98 +#define DA9063_REG_RESET 0x99 + +/* Regulator Setting Registers */ +#define DA9063_REG_BUCK_ILIM_A 0x9A +#define DA9063_REG_BUCK_ILIM_B 0x9B +#define DA9063_REG_BUCK_ILIM_C 0x9C +#define DA9063_REG_BCORE2_CFG 0x9D +#define DA9063_REG_BCORE1_CFG 0x9E +#define DA9063_REG_BPRO_CFG 0x9F +#define DA9063_REG_BIO_CFG 0xA0 +#define DA9063_REG_BMEM_CFG 0xA1 +#define DA9063_REG_BPERI_CFG 0xA2 +#define DA9063_REG_VBCORE2_A 0xA3 +#define DA9063_REG_VBCORE1_A 0xA4 +#define DA9063_REG_VBPRO_A 0xA5 +#define DA9063_REG_VBMEM_A 0xA6 +#define DA9063_REG_VBIO_A 0xA7 +#define DA9063_REG_VBPERI_A 0xA8 +#define DA9063_REG_VLDO1_A 0xA9 +#define DA9063_REG_VLDO2_A 0xAA +#define DA9063_REG_VLDO3_A 0xAB +#define DA9063_REG_VLDO4_A 0xAC +#define DA9063_REG_VLDO5_A 0xAD +#define DA9063_REG_VLDO6_A 0xAE +#define DA9063_REG_VLDO7_A 0xAF +#define DA9063_REG_VLDO8_A 0xB0 +#define DA9063_REG_VLDO9_A 0xB1 +#define DA9063_REG_VLDO10_A 0xB2 +#define DA9063_REG_VLDO11_A 0xB3 +#define DA9063_REG_VBCORE2_B 0xB4 +#define DA9063_REG_VBCORE1_B 0xB5 +#define DA9063_REG_VBPRO_B 0xB6 +#define DA9063_REG_VBMEM_B 0xB7 +#define DA9063_REG_VBIO_B 0xB8 +#define DA9063_REG_VBPERI_B 0xB9 +#define DA9063_REG_VLDO1_B 0xBA +#define DA9063_REG_VLDO2_B 0xBB +#define DA9063_REG_VLDO3_B 0xBC +#define DA9063_REG_VLDO4_B 0xBD +#define DA9063_REG_VLDO5_B 0xBE +#define DA9063_REG_VLDO6_B 0xBF +#define DA9063_REG_VLDO7_B 0xC0 +#define DA9063_REG_VLDO8_B 0xC1 +#define DA9063_REG_VLDO9_B 0xC2 +#define DA9063_REG_VLDO10_B 0xC3 +#define DA9063_REG_VLDO11_B 0xC4 + +/* Backup Battery Charger Control Register */ +#define DA9063_REG_BBAT_CONT 0xC5 + +/* GPIO PWM (LED) */ +#define DA9063_REG_GPO11_LED 0xC6 +#define DA9063_REG_GPO14_LED 0xC7 +#define DA9063_REG_GPO15_LED 0xC8 + +/* GP-ADC Threshold Registers */ +#define DA9063_REG_ADC_CFG 0xC9 +#define DA9063_REG_AUTO1_HIGH 0xCA +#define DA9063_REG_AUTO1_LOW 0xCB +#define DA9063_REG_AUTO2_HIGH 0xCC +#define DA9063_REG_AUTO2_LOW 0xCD +#define DA9063_REG_AUTO3_HIGH 0xCE +#define DA9063_REG_AUTO3_LOW 0xCF + +/* DA9063 Configuration registers */ +/* OTP */ +#define DA9063_REG_OTP_CONT 0x101 +#define DA9063_REG_OTP_ADDR 0x102 +#define DA9063_REG_OTP_DATA 0x103 + +/* Customer Trim and Configuration */ +#define DA9063_REG_T_OFFSET 0x104 +#define DA9063_REG_INTERFACE 0x105 +#define DA9063_REG_CONFIG_A 0x106 +#define DA9063_REG_CONFIG_B 0x107 +#define DA9063_REG_CONFIG_C 0x108 +#define DA9063_REG_CONFIG_D 0x109 +#define DA9063_REG_CONFIG_E 0x10A +#define DA9063_REG_CONFIG_F 0x10B +#define DA9063_REG_CONFIG_G 0x10C +#define DA9063_REG_CONFIG_H 0x10D +#define DA9063_REG_CONFIG_I 0x10E +#define DA9063_REG_CONFIG_J 0x10F +#define DA9063_REG_CONFIG_K 0x110 +#define DA9063_REG_CONFIG_L 0x111 + +#define DA9063_AD_REG_MON_REG_1 0x112 +#define DA9063_AD_REG_MON_REG_2 0x113 +#define DA9063_AD_REG_MON_REG_3 0x114 +#define DA9063_AD_REG_MON_REG_4 0x115 +#define DA9063_AD_REG_MON_REG_5 0x116 +#define DA9063_AD_REG_MON_REG_6 0x117 +#define DA9063_AD_REG_TRIM_CLDR 0x118 + +#define DA9063_AD_REG_GP_ID_0 0x119 +#define DA9063_AD_REG_GP_ID_1 0x11A +#define DA9063_AD_REG_GP_ID_2 0x11B +#define DA9063_AD_REG_GP_ID_3 0x11C +#define DA9063_AD_REG_GP_ID_4 0x11D +#define DA9063_AD_REG_GP_ID_5 0x11E +#define DA9063_AD_REG_GP_ID_6 0x11F +#define DA9063_AD_REG_GP_ID_7 0x120 +#define DA9063_AD_REG_GP_ID_8 0x121 +#define DA9063_AD_REG_GP_ID_9 0x122 +#define DA9063_AD_REG_GP_ID_10 0x123 +#define DA9063_AD_REG_GP_ID_11 0x124 +#define DA9063_AD_REG_GP_ID_12 0x125 +#define DA9063_AD_REG_GP_ID_13 0x126 +#define DA9063_AD_REG_GP_ID_14 0x127 +#define DA9063_AD_REG_GP_ID_15 0x128 +#define DA9063_AD_REG_GP_ID_16 0x129 +#define DA9063_AD_REG_GP_ID_17 0x12A +#define DA9063_AD_REG_GP_ID_18 0x12B +#define DA9063_AD_REG_GP_ID_19 0x12C + +#define DA9063_BB_REG_CONFIG_M 0x112 +#define DA9063_BB_REG_CONFIG_N 0x113 + +#define DA9063_BB_REG_MON_REG_1 0x114 +#define DA9063_BB_REG_MON_REG_2 0x115 +#define DA9063_BB_REG_MON_REG_3 0x116 +#define DA9063_BB_REG_MON_REG_4 0x117 +#define DA9063_BB_REG_MON_REG_5 0x11E +#define DA9063_BB_REG_MON_REG_6 0x11F +#define DA9063_BB_REG_TRIM_CLDR 0x120 +/* General Purpose Registers */ +#define DA9063_BB_REG_GP_ID_0 0x121 +#define DA9063_BB_REG_GP_ID_1 0x122 +#define DA9063_BB_REG_GP_ID_2 0x123 +#define DA9063_BB_REG_GP_ID_3 0x124 +#define DA9063_BB_REG_GP_ID_4 0x125 +#define DA9063_BB_REG_GP_ID_5 0x126 +#define DA9063_BB_REG_GP_ID_6 0x127 +#define DA9063_BB_REG_GP_ID_7 0x128 +#define DA9063_BB_REG_GP_ID_8 0x129 +#define DA9063_BB_REG_GP_ID_9 0x12A +#define DA9063_BB_REG_GP_ID_10 0x12B +#define DA9063_BB_REG_GP_ID_11 0x12C +#define DA9063_BB_REG_GP_ID_12 0x12D +#define DA9063_BB_REG_GP_ID_13 0x12E +#define DA9063_BB_REG_GP_ID_14 0x12F +#define DA9063_BB_REG_GP_ID_15 0x130 +#define DA9063_BB_REG_GP_ID_16 0x131 +#define DA9063_BB_REG_GP_ID_17 0x132 +#define DA9063_BB_REG_GP_ID_18 0x133 +#define DA9063_BB_REG_GP_ID_19 0x134 + +/* Chip ID and variant */ +#define DA9063_REG_CHIP_ID 0x181 +#define DA9063_REG_CHIP_VARIANT 0x182 + +/* + * PMIC registers bits + */ +/* DA9063_REG_PAGE_CON (addr=0x00) */ +#define DA9063_PEG_PAGE_SHIFT 0 +#define DA9063_REG_PAGE_MASK 0x07 +#define DA9063_REG_PAGE0 0x00 +#define DA9063_REG_PAGE2 0x02 +#define DA9063_PAGE_WRITE_MODE 0x00 +#define DA9063_REPEAT_WRITE_MODE 0x40 +#define DA9063_PAGE_REVERT 0x80 + +/* DA9063_REG_STATUS_A (addr=0x01) */ +#define DA9063_NONKEY 0x01 +#define DA9063_WAKE 0x02 +#define DA9063_DVC_BUSY 0x04 +#define DA9063_COMP_1V2 0x08 + +/* DA9063_REG_STATUS_B (addr=0x02) */ +#define DA9063_GPI0 0x01 +#define DA9063_GPI1 0x02 +#define DA9063_GPI2 0x04 +#define DA9063_GPI3 0x08 +#define DA9063_GPI4 0x10 +#define DA9063_GPI5 0x20 +#define DA9063_GPI6 0x40 +#define DA9063_GPI7 0x80 + +/* DA9063_REG_STATUS_C (addr=0x03) */ +#define DA9063_GPI8 0x01 +#define DA9063_GPI9 0x02 +#define DA9063_GPI10 0x04 +#define DA9063_GPI11 0x08 +#define DA9063_GPI12 0x10 +#define DA9063_GPI13 0x20 +#define DA9063_GPI14 0x40 +#define DA9063_GPI15 0x80 + +/* DA9063_REG_STATUS_D (addr=0x04) */ +#define DA9063_LDO3_LIM 0x08 +#define DA9063_LDO4_LIM 0x10 +#define DA9063_LDO7_LIM 0x20 +#define DA9063_LDO8_LIM 0x40 +#define DA9063_LDO11_LIM 0x80 + +/* DA9063_REG_FAULT_LOG (addr=0x05) */ +#define DA9063_TWD_ERROR 0x01 +#define DA9063_POR 0x02 +#define DA9063_VDD_FAULT 0x04 +#define DA9063_VDD_START 0x08 +#define DA9063_TEMP_CRIT 0x10 +#define DA9063_KEY_RESET 0x20 +#define DA9063_NSHUTDOWN 0x40 +#define DA9063_WAIT_SHUT 0x80 + +/* DA9063_REG_EVENT_A (addr=0x06) */ +#define DA9063_E_NONKEY 0x01 +#define DA9063_E_ALARM 0x02 +#define DA9063_E_TICK 0x04 +#define DA9063_E_ADC_RDY 0x08 +#define DA9063_E_SEQ_RDY 0x10 +#define DA9063_EVENTS_B 0x20 +#define DA9063_EVENTS_C 0x40 +#define DA9063_EVENTS_D 0x80 + +/* DA9063_REG_EVENT_B (addr=0x07) */ +#define DA9063_E_WAKE 0x01 +#define DA9063_E_TEMP 0x02 +#define DA9063_E_COMP_1V2 0x04 +#define DA9063_E_LDO_LIM 0x08 +#define DA9063_E_REG_UVOV 0x10 +#define DA9063_E_DVC_RDY 0x20 +#define DA9063_E_VDD_MON 0x40 +#define DA9063_E_VDD_WARN 0x80 + +/* DA9063_REG_EVENT_C (addr=0x08) */ +#define DA9063_E_GPI0 0x01 +#define DA9063_E_GPI1 0x02 +#define DA9063_E_GPI2 0x04 +#define DA9063_E_GPI3 0x08 +#define DA9063_E_GPI4 0x10 +#define DA9063_E_GPI5 0x20 +#define DA9063_E_GPI6 0x40 +#define DA9063_E_GPI7 0x80 + +/* DA9063_REG_EVENT_D (addr=0x09) */ +#define DA9063_E_GPI8 0x01 +#define DA9063_E_GPI9 0x02 +#define DA9063_E_GPI10 0x04 +#define DA9063_E_GPI11 0x08 +#define DA9063_E_GPI12 0x10 +#define DA9063_E_GPI13 0x20 +#define DA9063_E_GPI14 0x40 +#define DA9063_E_GPI15 0x80 + +/* DA9063_REG_IRQ_MASK_A (addr=0x0A) */ +#define DA9063_M_ONKEY 0x01 +#define DA9063_M_ALARM 0x02 +#define DA9063_M_TICK 0x04 +#define DA9063_M_ADC_RDY 0x08 +#define DA9063_M_SEQ_RDY 0x10 + +/* DA9063_REG_IRQ_MASK_B (addr=0x0B) */ +#define DA9063_M_WAKE 0x01 +#define DA9063_M_TEMP 0x02 +#define DA9063_M_COMP_1V2 0x04 +#define DA9063_M_LDO_LIM 0x08 +#define DA9063_M_UVOV 0x10 +#define DA9063_M_DVC_RDY 0x20 +#define DA9063_M_VDD_MON 0x40 +#define DA9063_M_VDD_WARN 0x80 + +/* DA9063_REG_IRQ_MASK_C (addr=0x0C) */ +#define DA9063_M_GPI0 0x01 +#define DA9063_M_GPI1 0x02 +#define DA9063_M_GPI2 0x04 +#define DA9063_M_GPI3 0x08 +#define DA9063_M_GPI4 0x10 +#define DA9063_M_GPI5 0x20 +#define DA9063_M_GPI6 0x40 +#define DA9063_M_GPI7 0x80 + +/* DA9063_REG_IRQ_MASK_D (addr=0x0D) */ +#define DA9063_M_GPI8 0x01 +#define DA9063_M_GPI9 0x02 +#define DA9063_M_GPI10 0x04 +#define DA9063_M_GPI11 0x08 +#define DA9063_M_GPI12 0x10 +#define DA9063_M_GPI13 0x20 +#define DA9063_M_GPI14 0x40 +#define DA9063_M_GPI15 0x80 + +/* DA9063_REG_CONTROL_A (addr=0x0E) */ +#define DA9063_SYSTEM_EN 0x01 +#define DA9063_POWER_EN 0x02 +#define DA9063_POWER1_EN 0x04 +#define DA9063_STANDBY 0x08 +#define DA9063_M_SYSTEM_EN 0x10 +#define DA9063_M_POWER_EN 0x20 +#define DA9063_M_POWER1_EN 0x40 +#define DA9063_CP_EN 0x80 + +/* DA9063_REG_CONTROL_B (addr=0x0F) */ +#define DA9063_CHG_SEL 0x01 +#define DA9063_WATCHDOG_PD 0x02 +#define DA9063_BB_RESET_BLINKING 0x04 +#define DA9063_NRES_MODE 0x08 +#define DA9063_NONKEY_LOCK 0x10 +#define DA9063_BB_BUCK_SLOWSTART 0x80 + +/* DA9063_REG_CONTROL_C (addr=0x10) */ +#define DA9063_DEBOUNCING_MASK 0x07 +#define DA9063_DEBOUNCING_OFF 0x0 +#define DA9063_DEBOUNCING_0MS1 0x1 +#define DA9063_DEBOUNCING_1MS 0x2 +#define DA9063_DEBOUNCING_10MS24 0x3 +#define DA9063_DEBOUNCING_51MS2 0x4 +#define DA9063_DEBOUNCING_256MS 0x5 +#define DA9063_DEBOUNCING_512MS 0x6 +#define DA9063_DEBOUNCING_1024MS 0x7 + +#define DA9063_AUTO_BOOT 0x08 +#define DA9063_OTPREAD_EN 0x10 +#define DA9063_SLEW_RATE_MASK 0x60 +#define DA9063_SLEW_RATE_4US 0x00 +#define DA9063_SLEW_RATE_3US 0x20 +#define DA9063_SLEW_RATE_1US 0x40 +#define DA9063_SLEW_RATE_0US5 0x60 +#define DA9063_DEF_SUPPLY 0x80 + +/* DA9063_REG_CONTROL_D (addr=0x11) */ +#define DA9063_TWDSCALE_MASK 0x07 +#define DA9063_BLINK_FRQ_MASK 0x38 +#define DA9063_BLINK_FRQ_OFF 0x00 +#define DA9063_BLINK_FRQ_1S0 0x08 +#define DA9063_BLINK_FRQ_2S0 0x10 +#define DA9063_BLINK_FRQ_4S0 0x18 +#define DA9063_BLINK_FRQ_0S18 0x20 +#define DA9063_BLINK_FRQ_2S0_VDD 0x28 +#define DA9063_BLINK_FRQ_4S0_VDD 0x30 +#define DA9063_BLINK_FRQ_0S18_VDD 0x38 + +#define DA9063_BLINK_DUR_MASK 0xC0 +#define DA9063_BLINK_DUR_10MS 0x00 +#define DA9063_BLINK_DUR_20MS 0x40 +#define DA9063_BLINK_DUR_40MS 0x80 +#define DA9063_BLINK_DUR_20MSDBL 0xC0 + +/* DA9063_REG_CONTROL_E (addr=0x12) */ +#define DA9063_RTC_MODE_PD 0x01 +#define DA9063_RTC_MODE_SD 0x02 +#define DA9063_RTC_EN 0x04 +#define DA9063_ECO_MODE 0x08 +#define DA9063_PM_FB1_PIN 0x10 +#define DA9063_PM_FB2_PIN 0x20 +#define DA9063_PM_FB3_PIN 0x40 +#define DA9063_V_LOCK 0x80 + +/* DA9063_REG_CONTROL_F (addr=0x13) */ +#define DA9063_WATCHDOG 0x01 +#define DA9063_SHUTDOWN 0x02 +#define DA9063_WAKE_UP 0x04 + +/* DA9063_REG_PD_DIS (addr=0x14) */ +#define DA9063_GPI_DIS 0x01 +#define DA9063_GPADC_PAUSE 0x02 +#define DA9063_PMIF_DIS 0x04 +#define DA9063_HS2WIRE_DIS 0x08 +#define DA9063_BB_CLDR_PAUSE 0x10 +#define DA9063_BBAT_DIS 0x20 +#define DA9063_OUT_32K_PAUSE 0x40 +#define DA9063_PMCONT_DIS 0x80 + +/* DA9063_REG_GPIO_0_1 (addr=0x15) */ +#define DA9063_GPIO0_PIN_MASK 0x03 +#define DA9063_GPIO0_PIN_ADCIN1 0x00 +#define DA9063_GPIO0_PIN_GPI 0x01 +#define DA9063_GPIO0_PIN_GPO_OD 0x02 +#define DA9063_GPIO0_PIN_GPO 0x03 +#define DA9063_GPIO0_TYPE 0x04 +#define DA9063_GPIO0_TYPE_GPI_ACT_LOW 0x00 +#define DA9063_GPIO0_TYPE_GPO_VDD_IO1 0x00 +#define DA9063_GPIO0_TYPE_GPI_ACT_HIGH 0x04 +#define DA9063_GPIO0_TYPE_GPO_VDD_IO2 0x04 +#define DA9063_GPIO0_NO_WAKEUP 0x08 +#define DA9063_GPIO1_PIN_MASK 0x30 +#define DA9063_GPIO1_PIN_ADCIN2_COMP 0x00 +#define DA9063_GPIO1_PIN_GPI 0x10 +#define DA9063_GPIO1_PIN_GPO_OD 0x20 +#define DA9063_GPIO1_PIN_GPO 0x30 +#define DA9063_GPIO1_TYPE 0x40 +#define DA9063_GPIO1_TYPE_GPI_ACT_LOW 0x00 +#define DA9063_GPIO1_TYPE_GPO_VDD_IO1 0x00 +#define DA9063_GPIO1_TYPE_GPI_ACT_HIGH 0x04 +#define DA9063_GPIO1_TYPE_GPO_VDD_IO2 0x04 +#define DA9063_GPIO1_NO_WAKEUP 0x80 + +/* DA9063_REG_GPIO_2_3 (addr=0x16) */ +#define DA9063_GPIO2_PIN_MASK 0x03 +#define DA9063_GPIO2_PIN_ADCIN3 0x00 +#define DA9063_GPIO2_PIN_GPI 0x01 +#define DA9063_GPIO2_PIN_GPO_PSS 0x02 +#define DA9063_GPIO2_PIN_GPO 0x03 +#define DA9063_GPIO2_TYPE 0x04 +#define DA9063_GPIO2_TYPE_GPI_ACT_LOW 0x00 +#define DA9063_GPIO2_TYPE_GPO_VDD_IO1 0x00 +#define DA9063_GPIO2_TYPE_GPI_ACT_HIGH 0x04 +#define DA9063_GPIO2_TYPE_GPO_VDD_IO2 0x04 +#define DA9063_GPIO2_NO_WAKEUP 0x08 +#define DA9063_GPIO3_PIN_MASK 0x30 +#define DA9063_GPIO3_PIN_CORE_SW_G 0x00 +#define DA9063_GPIO3_PIN_GPI 0x10 +#define DA9063_GPIO3_PIN_GPO_OD 0x20 +#define DA9063_GPIO3_PIN_GPO 0x30 +#define DA9063_GPIO3_TYPE 0x40 +#define DA9063_GPIO3_TYPE_GPI_ACT_LOW 0x00 +#define DA9063_GPIO3_TYPE_GPO_VDD_IO1 0x00 +#define DA9063_GPIO3_TYPE_GPI_ACT_HIGH 0x04 +#define DA9063_GPIO3_TYPE_GPO_VDD_IO2 0x04 +#define DA9063_GPIO3_NO_WAKEUP 0x80 + +/* DA9063_REG_GPIO_4_5 (addr=0x17) */ +#define DA9063_GPIO4_PIN_MASK 0x03 +#define DA9063_GPIO4_PIN_CORE_SW_S 0x00 +#define DA9063_GPIO4_PIN_GPI 0x01 +#define DA9063_GPIO4_PIN_GPO_OD 0x02 +#define DA9063_GPIO4_PIN_GPO 0x03 +#define DA9063_GPIO4_TYPE 0x04 +#define DA9063_GPIO4_TYPE_GPI_ACT_LOW 0x00 +#define DA9063_GPIO4_TYPE_GPO_VDD_IO1 0x00 +#define DA9063_GPIO4_TYPE_GPI_ACT_HIGH 0x04 +#define DA9063_GPIO4_TYPE_GPO_VDD_IO2 0x04 +#define DA9063_GPIO4_NO_WAKEUP 0x08 +#define DA9063_GPIO5_PIN_MASK 0x30 +#define DA9063_GPIO5_PIN_PERI_SW_G 0x00 +#define DA9063_GPIO5_PIN_GPI 0x10 +#define DA9063_GPIO5_PIN_GPO_OD 0x20 +#define DA9063_GPIO5_PIN_GPO 0x30 +#define DA9063_GPIO5_TYPE 0x40 +#define DA9063_GPIO5_TYPE_GPI_ACT_LOW 0x00 +#define DA9063_GPIO5_TYPE_GPO_VDD_IO1 0x00 +#define DA9063_GPIO5_TYPE_GPI_ACT_HIGH 0x04 +#define DA9063_GPIO5_TYPE_GPO_VDD_IO2 0x04 +#define DA9063_GPIO5_NO_WAKEUP 0x80 + +/* DA9063_REG_GPIO_6_7 (addr=0x18) */ +#define DA9063_GPIO6_PIN_MASK 0x03 +#define DA9063_GPIO6_PIN_PERI_SW_S 0x00 +#define DA9063_GPIO6_PIN_GPI 0x01 +#define DA9063_GPIO6_PIN_GPO_OD 0x02 +#define DA9063_GPIO6_PIN_GPO 0x03 +#define DA9063_GPIO6_TYPE 0x04 +#define DA9063_GPIO6_TYPE_GPI_ACT_LOW 0x00 +#define DA9063_GPIO6_TYPE_GPO_VDD_IO1 0x00 +#define DA9063_GPIO6_TYPE_GPI_ACT_HIGH 0x04 +#define DA9063_GPIO6_TYPE_GPO_VDD_IO2 0x04 +#define DA9063_GPIO6_NO_WAKEUP 0x08 +#define DA9063_GPIO7_PIN_MASK 0x30 +#define DA9063_GPIO7_PIN_GPI 0x10 +#define DA9063_GPIO7_PIN_GPO_PSS 0x20 +#define DA9063_GPIO7_PIN_GPO 0x30 +#define DA9063_GPIO7_TYPE 0x40 +#define DA9063_GPIO7_TYPE_GPI_ACT_LOW 0x00 +#define DA9063_GPIO7_TYPE_GPO_VDD_IO1 0x00 +#define DA9063_GPIO7_TYPE_GPI_ACT_HIGH 0x04 +#define DA9063_GPIO7_TYPE_GPO_VDD_IO2 0x04 +#define DA9063_GPIO7_NO_WAKEUP 0x80 + +/* DA9063_REG_GPIO_8_9 (addr=0x19) */ +#define DA9063_GPIO8_PIN_MASK 0x03 +#define DA9063_GPIO8_PIN_GPI_SYS_EN 0x00 +#define DA9063_GPIO8_PIN_GPI 0x01 +#define DA9063_GPIO8_PIN_GPO_PSS 0x02 +#define DA9063_GPIO8_PIN_GPO 0x03 +#define DA9063_GPIO8_TYPE 0x04 +#define DA9063_GPIO8_TYPE_GPI_ACT_LOW 0x00 +#define DA9063_GPIO8_TYPE_GPO_VDD_IO1 0x00 +#define DA9063_GPIO8_TYPE_GPI_ACT_HIGH 0x04 +#define DA9063_GPIO8_TYPE_GPO_VDD_IO2 0x04 +#define DA9063_GPIO8_NO_WAKEUP 0x08 +#define DA9063_GPIO9_PIN_MASK 0x30 +#define DA9063_GPIO9_PIN_GPI_PWR_EN 0x00 +#define DA9063_GPIO9_PIN_GPI 0x10 +#define DA9063_GPIO9_PIN_GPO_PSS 0x20 +#define DA9063_GPIO9_PIN_GPO 0x30 +#define DA9063_GPIO9_TYPE 0x40 +#define DA9063_GPIO9_TYPE_GPI_ACT_LOW 0x00 +#define DA9063_GPIO9_TYPE_GPO_VDD_IO1 0x00 +#define DA9063_GPIO9_TYPE_GPI_ACT_HIGH 0x04 +#define DA9063_GPIO9_TYPE_GPO_VDD_IO2 0x04 +#define DA9063_GPIO9_NO_WAKEUP 0x80 + +/* DA9063_REG_GPIO_10_11 (addr=0x1A) */ +#define DA9063_GPIO10_PIN_MASK 0x03 +#define DA9063_GPIO10_PIN_GPI_PWR1_EN 0x00 +#define DA9063_GPIO10_PIN_GPI 0x01 +#define DA9063_GPIO10_PIN_GPO_OD 0x02 +#define DA9063_GPIO10_PIN_GPO 0x03 +#define DA9063_GPIO10_TYPE 0x04 +#define DA9063_GPIO10_TYPE_GPI_ACT_LOW 0x00 +#define DA9063_GPIO10_TYPE_GPO_VDD_IO1 0x00 +#define DA9063_GPIO10_TYPE_GPI_ACT_HIGH 0x04 +#define DA9063_GPIO10_TYPE_GPO_VDD_IO2 0x04 +#define DA9063_GPIO10_NO_WAKEUP 0x08 +#define DA9063_GPIO11_PIN_MASK 0x30 +#define DA9063_GPIO11_PIN_GPO_OD 0x00 +#define DA9063_GPIO11_PIN_GPI 0x10 +#define DA9063_GPIO11_PIN_GPO_PSS 0x20 +#define DA9063_GPIO11_PIN_GPO 0x30 +#define DA9063_GPIO11_TYPE 0x40 +#define DA9063_GPIO11_TYPE_GPI_ACT_LOW 0x00 +#define DA9063_GPIO11_TYPE_GPO_VDD_IO1 0x00 +#define DA9063_GPIO11_TYPE_GPI_ACT_HIGH 0x04 +#define DA9063_GPIO11_TYPE_GPO_VDD_IO2 0x04 +#define DA9063_GPIO11_NO_WAKEUP 0x80 + +/* DA9063_REG_GPIO_12_13 (addr=0x1B) */ +#define DA9063_GPIO12_PIN_MASK 0x03 +#define DA9063_GPIO12_PIN_NVDDFLT_OUT 0x00 +#define DA9063_GPIO12_PIN_GPI 0x01 +#define DA9063_GPIO12_PIN_VSYSMON_OUT 0x02 +#define DA9063_GPIO12_PIN_GPO 0x03 +#define DA9063_GPIO12_TYPE 0x04 +#define DA9063_GPIO12_TYPE_GPI_ACT_LOW 0x00 +#define DA9063_GPIO12_TYPE_GPO_VDD_IO1 0x00 +#define DA9063_GPIO12_TYPE_GPI_ACT_HIGH 0x04 +#define DA9063_GPIO12_TYPE_GPO_VDD_IO2 0x04 +#define DA9063_GPIO12_NO_WAKEUP 0x08 +#define DA9063_GPIO13_PIN_MASK 0x30 +#define DA9063_GPIO13_PIN_GPFB1_OUT 0x00 +#define DA9063_GPIO13_PIN_GPI 0x10 +#define DA9063_GPIO13_PIN_GPFB1_OUTOD 0x20 +#define DA9063_GPIO13_PIN_GPO 0x30 +#define DA9063_GPIO13_TYPE 0x40 +#define DA9063_GPIO13_TYPE_GPFB1_OUT 0x00 +#define DA9063_GPIO13_TYPE_GPI 0x00 +#define DA9063_GPIO13_TYPE_GPFB1_OUTOD 0x04 +#define DA9063_GPIO13_TYPE_GPO 0x04 +#define DA9063_GPIO13_NO_WAKEUP 0x80 + +/* DA9063_REG_GPIO_14_15 (addr=0x1C) */ +#define DA9063_GPIO14_PIN_MASK 0x03 +#define DA9063_GPIO14_PIN_GPO_OD 0x00 +#define DA9063_GPIO14_PIN_GPI 0x01 +#define DA9063_GPIO14_PIN_HS2DATA 0x02 +#define DA9063_GPIO14_PIN_GPO 0x03 +#define DA9063_GPIO14_TYPE 0x04 +#define DA9063_GPIO14_TYPE_GPI_ACT_LOW 0x00 +#define DA9063_GPIO14_TYPE_GPO_VDD_IO1 0x00 +#define DA9063_GPIO14_TYPE_GPI_ACT_HIGH 0x04 +#define DA9063_GPIO14_TYPE_GPO_VDD_IO2 0x04 +#define DA9063_GPIO14_NO_WAKEUP 0x08 +#define DA9063_GPIO15_PIN_MASK 0x30 +#define DA9063_GPIO15_PIN_GPO_OD 0x00 +#define DA9063_GPIO15_PIN_GPI 0x10 +#define DA9063_GPIO15_PIN_GPO 0x30 +#define DA9063_GPIO15_TYPE 0x40 +#define DA9063_GPIO15_TYPE_GPFB1_OUT 0x00 +#define DA9063_GPIO15_TYPE_GPI 0x00 +#define DA9063_GPIO15_TYPE_GPFB1_OUTOD 0x04 +#define DA9063_GPIO15_TYPE_GPO 0x04 +#define DA9063_GPIO15_NO_WAKEUP 0x80 + +/* DA9063_REG_GPIO_MODE0_7 (addr=0x1D) */ +#define DA9063_GPIO0_MODE 0x01 +#define DA9063_GPIO1_MODE 0x02 +#define DA9063_GPIO2_MODE 0x04 +#define DA9063_GPIO3_MODE 0x08 +#define DA9063_GPIO4_MODE 0x10 +#define DA9063_GPIO5_MODE 0x20 +#define DA9063_GPIO6_MODE 0x40 +#define DA9063_GPIO7_MODE 0x80 + +/* DA9063_REG_GPIO_MODE8_15 (addr=0x1E) */ +#define DA9063_GPIO8_MODE 0x01 +#define DA9063_GPIO9_MODE 0x02 +#define DA9063_GPIO10_MODE 0x04 +#define DA9063_GPIO11_MODE 0x08 +#define DA9063_GPIO11_MODE_LED_ACT_HIGH 0x00 +#define DA9063_GPIO11_MODE_LED_ACT_LOW 0x08 +#define DA9063_GPIO12_MODE 0x10 +#define DA9063_GPIO13_MODE 0x20 +#define DA9063_GPIO14_MODE 0x40 +#define DA9063_GPIO14_MODE_LED_ACT_HIGH 0x00 +#define DA9063_GPIO14_MODE_LED_ACT_LOW 0x40 +#define DA9063_GPIO15_MODE 0x80 +#define DA9063_GPIO15_MODE_LED_ACT_HIGH 0x00 +#define DA9063_GPIO15_MODE_LED_ACT_LOW 0x80 + +/* DA9063_REG_SWITCH_CONT (addr=0x1F) */ +#define DA9063_CORE_SW_GPI_MASK 0x03 +#define DA9063_CORE_SW_GPI_OFF 0x00 +#define DA9063_CORE_SW_GPI_GPIO1 0x01 +#define DA9063_CORE_SW_GPI_GPIO2 0x02 +#define DA9063_CORE_SW_GPI_GPIO13 0x03 +#define DA9063_PERI_SW_GPI_MASK 0x0C +#define DA9063_PERI_SW_GPI_OFF 0x00 +#define DA9063_PERI_SW_GPI_GPIO1 0x04 +#define DA9063_PERI_SW_GPI_GPIO2 0x08 +#define DA9063_PERI_SW_GPI_GPIO13 0x0C +#define DA9063_SWITCH_SR_MASK 0x30 +#define DA9063_SWITCH_SR_1MV 0x00 +#define DA9063_SWITCH_SR_5MV 0x10 +#define DA9063_SWITCH_SR_10MV 0x20 +#define DA9063_SWITCH_SR_50MV 0x30 +#define DA9063_CORE_SW_INTERNAL 0x40 +#define DA9063_CP_EN_MODE 0x80 + +/* DA9063_REGL_Bxxxx_CONT common bits (addr=0x20-0x25) */ +#define DA9063_BUCK_EN 0x01 +#define DA9063_BUCK_GPI_MASK 0x06 +#define DA9063_BUCK_GPI_OFF 0x00 +#define DA9063_BUCK_GPI_GPIO1 0x02 +#define DA9063_BUCK_GPI_GPIO2 0x04 +#define DA9063_BUCK_GPI_GPIO13 0x06 +#define DA9063_BUCK_CONF 0x08 +#define DA9063_VBUCK_GPI_MASK 0x60 +#define DA9063_VBUCK_GPI_OFF 0x00 +#define DA9063_VBUCK_GPI_GPIO1 0x20 +#define DA9063_VBUCK_GPI_GPIO2 0x40 +#define DA9063_VBUCK_GPI_GPIO13 0x60 + +/* DA9063_REG_BCORE1_CONT specific bits (addr=0x21) */ +#define DA9063_CORE_SW_EN 0x10 +#define DA9063_CORE_SW_CONF 0x80 + +/* DA9063_REG_BPERI_CONT specific bits (addr=0x25) */ +#define DA9063_PERI_SW_EN 0x10 +#define DA9063_PERI_SW_CONF 0x80 + +/* DA9063_REG_LDOx_CONT common bits (addr=0x26-0x30) */ +#define DA9063_LDO_EN 0x01 +#define DA9063_LDO_GPI_MASK 0x06 +#define DA9063_LDO_GPI_OFF 0x00 +#define DA9063_LDO_GPI_GPIO1 0x02 +#define DA9063_LDO_GPI_GPIO2 0x04 +#define DA9063_LDO_GPI_GPIO13 0x06 +#define DA9063_LDO_PD_DIS 0x08 +#define DA9063_VLDO_GPI_MASK 0x60 +#define DA9063_VLDO_GPI_OFF 0x00 +#define DA9063_VLDO_GPI_GPIO1 0x20 +#define DA9063_VLDO_GPI_GPIO2 0x40 +#define DA9063_VLDO_GPI_GPIO13 0x60 +#define DA9063_LDO_CONF 0x80 + +/* DA9063_REG_LDO5_CONT specific bits (addr=0x2A) */ +#define DA9063_VLDO5_SEL 0x10 + +/* DA9063_REG_LDO6_CONT specific bits (addr=0x2B) */ +#define DA9063_VLDO6_SEL 0x10 + +/* DA9063_REG_LDO7_CONT specific bits (addr=0x2C) */ +#define DA9063_VLDO7_SEL 0x10 + +/* DA9063_REG_LDO8_CONT specific bits (addr=0x2D) */ +#define DA9063_VLDO8_SEL 0x10 + +/* DA9063_REG_LDO9_CONT specific bits (addr=0x2E) */ +#define DA9063_VLDO9_SEL 0x10 + +/* DA9063_REG_LDO10_CONT specific bits (addr=0x2F) */ +#define DA9063_VLDO10_SEL 0x10 + +/* DA9063_REG_LDO11_CONT specific bits (addr=0x30) */ +#define DA9063_VLDO11_SEL 0x10 + +/* DA9063_REG_VIB (addr=0x31) */ +#define DA9063_VIB_SET_MASK 0x3F +#define DA9063_VIB_SET_OFF 0 +#define DA9063_VIB_SET_MAX 0x3F + +/* DA9063_REG_DVC_1 (addr=0x32) */ +#define DA9063_VBCORE1_SEL 0x01 +#define DA9063_VBCORE2_SEL 0x02 +#define DA9063_VBPRO_SEL 0x04 +#define DA9063_VBMEM_SEL 0x08 +#define DA9063_VBPERI_SEL 0x10 +#define DA9063_VLDO1_SEL 0x20 +#define DA9063_VLDO2_SEL 0x40 +#define DA9063_VLDO3_SEL 0x80 + +/* DA9063_REG_DVC_2 (addr=0x33) */ +#define DA9063_VBIO_SEL 0x01 +#define DA9063_VLDO4_SEL 0x80 + +/* DA9063_REG_ADC_MAN (addr=0x34) */ +#define DA9063_ADC_MUX_MASK 0x0F +#define DA9063_ADC_MUX_VSYS 0x00 +#define DA9063_ADC_MUX_ADCIN1 0x01 +#define DA9063_ADC_MUX_ADCIN2 0x02 +#define DA9063_ADC_MUX_ADCIN3 0x03 +#define DA9063_ADC_MUX_T_SENSE 0x04 +#define DA9063_ADC_MUX_VBBAT 0x05 +#define DA9063_ADC_MUX_LDO_G1 0x08 +#define DA9063_ADC_MUX_LDO_G2 0x09 +#define DA9063_ADC_MUX_LDO_G3 0x0A +#define DA9063_ADC_MAN 0x10 +#define DA9063_ADC_MODE 0x20 + +/* DA9063_REG_ADC_CONT (addr=0x35) */ +#define DA9063_ADC_AUTO_VSYS_EN 0x01 +#define DA9063_ADC_AUTO_AD1_EN 0x02 +#define DA9063_ADC_AUTO_AD2_EN 0x04 +#define DA9063_ADC_AUTO_AD3_EN 0x08 +#define DA9063_ADC_AD1_ISRC_EN 0x10 +#define DA9063_ADC_AD2_ISRC_EN 0x20 +#define DA9063_ADC_AD3_ISRC_EN 0x40 +#define DA9063_COMP1V2_EN 0x80 + +/* DA9063_REG_VSYS_MON (addr=0x36) */ +#define DA9063_VSYS_VAL_MASK 0xFF +#define DA9063_VSYS_VAL_BASE 0x00 + +/* DA9063_REG_ADC_RES_L (addr=0x37) */ +#define DA9063_ADC_RES_L_BITS 2 +#define DA9063_ADC_RES_L_MASK 0xC0 + +/* DA9063_REG_ADC_RES_H (addr=0x38) */ +#define DA9063_ADC_RES_M_BITS 8 +#define DA9063_ADC_RES_M_MASK 0xFF + +/* DA9063_REG_(xxx_RES/ADC_RES_H) (addr=0x39-0x3F) */ +#define DA9063_ADC_VAL_MASK 0xFF + +/* DA9063_REG_COUNT_S (addr=0x40) */ +#define DA9063_RTC_READ 0x80 +#define DA9063_COUNT_SEC_MASK 0x3F + +/* DA9063_REG_COUNT_MI (addr=0x41) */ +#define DA9063_COUNT_MIN_MASK 0x3F + +/* DA9063_REG_COUNT_H (addr=0x42) */ +#define DA9063_COUNT_HOUR_MASK 0x1F + +/* DA9063_REG_COUNT_D (addr=0x43) */ +#define DA9063_COUNT_DAY_MASK 0x1F + +/* DA9063_REG_COUNT_MO (addr=0x44) */ +#define DA9063_COUNT_MONTH_MASK 0x0F + +/* DA9063_REG_COUNT_Y (addr=0x45) */ +#define DA9063_COUNT_YEAR_MASK 0x3F +#define DA9063_MONITOR 0x40 + +/* DA9063_REG_ALARM_S (addr=0x46) */ +#define DA9063_BB_ALARM_S_MASK 0x3F +#define DA9063_ALARM_STATUS_ALARM 0x80 +#define DA9063_ALARM_STATUS_TICK 0x40 +/* DA9063_REG_ALARM_MI (addr=0x47) */ +#define DA9063_ALARM_MIN_MASK 0x3F + +/* DA9063_REG_ALARM_H (addr=0x48) */ +#define DA9063_ALARM_HOUR_MASK 0x1F + +/* DA9063_REG_ALARM_D (addr=0x49) */ +#define DA9063_ALARM_DAY_MASK 0x1F + +/* DA9063_REG_ALARM_MO (addr=0x4A) */ +#define DA9063_TICK_WAKE 0x20 +#define DA9063_TICK_TYPE 0x10 +#define DA9063_TICK_TYPE_SEC 0x00 +#define DA9063_TICK_TYPE_MIN 0x10 +#define DA9063_ALARM_MONTH_MASK 0x0F + +/* DA9063_REG_ALARM_Y (addr=0x4B) */ +#define DA9063_TICK_ON 0x80 +#define DA9063_ALARM_ON 0x40 +#define DA9063_ALARM_YEAR_MASK 0x3F + +/* DA9063_REG_WAIT (addr=0x97)*/ +#define DA9063_REG_WAIT_TIME_MASK 0xF +#define DA9063_WAIT_TIME_0_US 0x0 +#define DA9063_WAIT_TIME_512_US 0x1 +#define DA9063_WAIT_TIME_1_MS 0x2 +#define DA9063_WAIT_TIME_2_MS 0x3 +#define DA9063_WAIT_TIME_4_1_MS 0x4 +#define DA9063_WAIT_TIME_8_2_MS 0x5 +#define DA9063_WAIT_TIME_16_4_MS 0x6 +#define DA9063_WAIT_TIME_32_8_MS 0x7 +#define DA9063_WAIT_TIME_65_5_MS 0x8 +#define DA9063_WAIT_TIME_128_MS 0x9 +#define DA9063_WAIT_TIME_256_MS 0xA +#define DA9063_WAIT_TIME_512_MS 0xB +#define DA9063_WAIT_TIME_1_S 0xC +#define DA9063_WAIT_TIME_2_1_S 0xD + +/* DA9063_REG_EN_32K (addr=0x98)*/ +#define DA9063_STABILIZ_TIME_MASK 0x7 +#define DA9063_CRYSTAL 0x08 +#define DA9063_DELAY_MODE 0x10 +#define DA9063_OUT_CLOCK 0x20 +#define DA9063_RTC_CLOCK 0x40 +#define DA9063_OUT_32K_EN 0x80 + +/* DA9063_REG_CHIP_VARIANT */ +#define DA9063_CHIP_VARIANT_SHIFT 4 + +/* DA9063_REG_BUCK_ILIM_A (addr=0x9A) */ +#define DA9063_BIO_ILIM_MASK 0x0F +#define DA9063_BMEM_ILIM_MASK 0xF0 + +/* DA9063_REG_BUCK_ILIM_B (addr=0x9B) */ +#define DA9063_BPRO_ILIM_MASK 0x0F +#define DA9063_BPERI_ILIM_MASK 0xF0 + +/* DA9063_REG_BUCK_ILIM_C (addr=0x9C) */ +#define DA9063_BCORE1_ILIM_MASK 0x0F +#define DA9063_BCORE2_ILIM_MASK 0xF0 + +/* DA9063_REG_Bxxxx_CFG common bits (addr=0x9D-0xA2) */ +#define DA9063_BUCK_FB_MASK 0x07 +#define DA9063_BUCK_PD_DIS_MASK 0x20 +#define DA9063_BUCK_MODE_MASK 0xC0 +#define DA9063_BUCK_MODE_MANUAL 0x00 +#define DA9063_BUCK_MODE_SLEEP 0x40 +#define DA9063_BUCK_MODE_SYNC 0x80 +#define DA9063_BUCK_MODE_AUTO 0xC0 + +/* DA9063_REG_BPRO_CFG (addr=0x9F) */ +#define DA9063_BPRO_VTTR_EN 0x08 +#define DA9063_BPRO_VTT_EN 0x10 + +/* DA9063_REG_VBxxxx_A/B (addr=0xA3-0xA8, 0xB4-0xB9) */ +#define DA9063_VBUCK_MASK 0x7F +#define DA9063_VBUCK_BIAS 0 +#define DA9063_BUCK_SL 0x80 + +/* DA9063_REG_VLDOx_A/B (addr=0xA9-0x3, 0xBA-0xC4) */ +#define DA9063_LDO_SL 0x80 + +/* DA9063_REG_VLDO1_A/B (addr=0xA9, 0xBA) */ +#define DA9063_VLDO1_MASK 0x3F +#define DA9063_VLDO1_BIAS 0 + +/* DA9063_REG_VLDO2_A/B (addr=0xAA, 0xBB) */ +#define DA9063_VLDO2_MASK 0x3F +#define DA9063_VLDO2_BIAS 0 + +/* DA9063_REG_VLDO3_A/B (addr=0xAB, 0xBC) */ +#define DA9063_VLDO3_MASK 0x7F +#define DA9063_VLDO3_BIAS 0 + +/* DA9063_REG_VLDO4_A/B (addr=0xAC, 0xBD) */ +#define DA9063_VLDO4_MASK 0x7F +#define DA9063_VLDO4_BIAS 0 + +/* DA9063_REG_VLDO5_A/B (addr=0xAD, 0xBE) */ +#define DA9063_VLDO5_MASK 0x3F +#define DA9063_VLDO5_BIAS 2 + +/* DA9063_REG_VLDO6_A/B (addr=0xAE, 0xBF) */ +#define DA9063_VLDO6_MASK 0x3F +#define DA9063_VLDO6_BIAS 2 + +/* DA9063_REG_VLDO7_A/B (addr=0xAF, 0xC0) */ +#define DA9063_VLDO7_MASK 0x3F +#define DA9063_VLDO7_BIAS 2 + +/* DA9063_REG_VLDO8_A/B (addr=0xB0, 0xC1) */ +#define DA9063_VLDO8_MASK 0x3F +#define DA9063_VLDO8_BIAS 2 + +/* DA9063_REG_VLDO9_A/B (addr=0xB1, 0xC2) */ +#define DA9063_VLDO9_MASK 0x3F +#define DA9063_VLDO9_BIAS 3 + +/* DA9063_REG_VLDO10_A/B (addr=0xB2, 0xC3) */ +#define DA9063_VLDO10_MASK 0x3F +#define DA9063_VLDO10_BIAS 2 + +/* DA9063_REG_VLDO11_A/B (addr=0xB3, 0xC4) */ +#define DA9063_VLDO11_MASK 0x3F +#define DA9063_VLDO11_BIAS 2 + +/* DA9063_REG_GPO11_LED (addr=0xC6) */ +/* DA9063_REG_GPO14_LED (addr=0xC7) */ +/* DA9063_REG_GPO15_LED (addr=0xC8) */ +#define DA9063_GPIO_DIM 0x80 +#define DA9063_GPIO_PWM_MASK 0x7F + +/* DA9063_REG_CONFIG_H (addr=0x10D) */ +#define DA9063_PWM_CLK_MASK 0x01 +#define DA9063_PWM_CLK_PWM2MHZ 0x00 +#define DA9063_PWM_CLK_PWM1MHZ 0x01 +#define DA9063_LDO8_MODE_MASK 0x02 +#define DA9063_LDO8_MODE_LDO 0 +#define DA9063_LDO8_MODE_VIBR 0x02 +#define DA9063_MERGE_SENSE_MASK 0x04 +#define DA9063_MERGE_SENSE_GP_FB2 0x00 +#define DA9063_MERGE_SENSE_GPIO4 0x04 +#define DA9063_BCORE_MERGE 0x08 +#define DA9063_BPRO_OD 0x10 +#define DA9063_BCORE2_OD 0x20 +#define DA9063_BCORE1_OD 0x40 +#define DA9063_BUCK_MERGE 0x80 + +/* DA9063_REG_CONFIG_I (addr=0x10E) */ +#define DA9063_NONKEY_PIN_MASK 0x03 +#define DA9063_NONKEY_PIN_PORT 0x00 +#define DA9063_NONKEY_PIN_SWDOWN 0x01 +#define DA9063_NONKEY_PIN_AUTODOWN 0x02 +#define DA9063_NONKEY_PIN_AUTOFLPRT 0x03 + +/* DA9063_REG_MON_REG_5 (addr=0x116) */ +#define DA9063_MON_A8_IDX_MASK 0x07 +#define DA9063_MON_A8_IDX_NONE 0x00 +#define DA9063_MON_A8_IDX_BCORE1 0x01 +#define DA9063_MON_A8_IDX_BCORE2 0x02 +#define DA9063_MON_A8_IDX_BPRO 0x03 +#define DA9063_MON_A8_IDX_LDO3 0x04 +#define DA9063_MON_A8_IDX_LDO4 0x05 +#define DA9063_MON_A8_IDX_LDO11 0x06 +#define DA9063_MON_A9_IDX_MASK 0x70 +#define DA9063_MON_A9_IDX_NONE 0x00 +#define DA9063_MON_A9_IDX_BIO 0x01 +#define DA9063_MON_A9_IDX_BMEM 0x02 +#define DA9063_MON_A9_IDX_BPERI 0x03 +#define DA9063_MON_A9_IDX_LDO1 0x04 +#define DA9063_MON_A9_IDX_LDO2 0x05 +#define DA9063_MON_A9_IDX_LDO5 0x06 + +/* DA9063_REG_MON_REG_6 (addr=0x117) */ +#define DA9063_MON_A10_IDX_MASK 0x07 +#define DA9063_MON_A10_IDX_NONE 0x00 +#define DA9063_MON_A10_IDX_LDO6 0x01 +#define DA9063_MON_A10_IDX_LDO7 0x02 +#define DA9063_MON_A10_IDX_LDO8 0x03 +#define DA9063_MON_A10_IDX_LDO9 0x04 +#define DA9063_MON_A10_IDX_LDO10 0x05 + +#endif /* _DA9063_REG_H */ diff --git a/include/linux/mfd/da9150/core.h b/include/linux/mfd/da9150/core.h new file mode 100644 index 000000000..1bf50caeb --- /dev/null +++ b/include/linux/mfd/da9150/core.h @@ -0,0 +1,85 @@ +/* + * DA9150 MFD Driver - Core Data + * + * Copyright (c) 2014 Dialog Semiconductor + * + * Author: Adam Thomson + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __DA9150_CORE_H +#define __DA9150_CORE_H + +#include +#include +#include +#include + +/* I2C address paging */ +#define DA9150_REG_PAGE_SHIFT 8 +#define DA9150_REG_PAGE_MASK 0xFF + +/* IRQs */ +#define DA9150_NUM_IRQ_REGS 4 +#define DA9150_IRQ_VBUS 0 +#define DA9150_IRQ_CHG 1 +#define DA9150_IRQ_TCLASS 2 +#define DA9150_IRQ_TJUNC 3 +#define DA9150_IRQ_VFAULT 4 +#define DA9150_IRQ_CONF 5 +#define DA9150_IRQ_DAT 6 +#define DA9150_IRQ_DTYPE 7 +#define DA9150_IRQ_ID 8 +#define DA9150_IRQ_ADP 9 +#define DA9150_IRQ_SESS_END 10 +#define DA9150_IRQ_SESS_VLD 11 +#define DA9150_IRQ_FG 12 +#define DA9150_IRQ_GP 13 +#define DA9150_IRQ_TBAT 14 +#define DA9150_IRQ_GPIOA 15 +#define DA9150_IRQ_GPIOB 16 +#define DA9150_IRQ_GPIOC 17 +#define DA9150_IRQ_GPIOD 18 +#define DA9150_IRQ_GPADC 19 +#define DA9150_IRQ_WKUP 20 + +/* I2C sub-device address */ +#define DA9150_QIF_I2C_ADDR_LSB 0x5 + +struct da9150_fg_pdata { + u32 update_interval; /* msecs */ + u8 warn_soc_lvl; /* % value */ + u8 crit_soc_lvl; /* % value */ +}; + +struct da9150_pdata { + int irq_base; + struct da9150_fg_pdata *fg_pdata; +}; + +struct da9150 { + struct device *dev; + struct regmap *regmap; + struct i2c_client *core_qif; + + struct regmap_irq_chip_data *regmap_irq_data; + int irq; + int irq_base; +}; + +/* Device I/O - Query Interface for FG and standard register access */ +void da9150_read_qif(struct da9150 *da9150, u8 addr, int count, u8 *buf); +void da9150_write_qif(struct da9150 *da9150, u8 addr, int count, const u8 *buf); + +u8 da9150_reg_read(struct da9150 *da9150, u16 reg); +void da9150_reg_write(struct da9150 *da9150, u16 reg, u8 val); +void da9150_set_bits(struct da9150 *da9150, u16 reg, u8 mask, u8 val); + +void da9150_bulk_read(struct da9150 *da9150, u16 reg, int count, u8 *buf); +void da9150_bulk_write(struct da9150 *da9150, u16 reg, int count, const u8 *buf); + +#endif /* __DA9150_CORE_H */ diff --git a/include/linux/mfd/da9150/registers.h b/include/linux/mfd/da9150/registers.h new file mode 100644 index 000000000..27ca6ee4d --- /dev/null +++ b/include/linux/mfd/da9150/registers.h @@ -0,0 +1,1155 @@ +/* + * DA9150 MFD Driver - Registers + * + * Copyright (c) 2014 Dialog Semiconductor + * + * Author: Adam Thomson + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __DA9150_REGISTERS_H +#define __DA9150_REGISTERS_H + +#include + +/* Registers */ +#define DA9150_PAGE_CON 0x000 +#define DA9150_STATUS_A 0x068 +#define DA9150_STATUS_B 0x069 +#define DA9150_STATUS_C 0x06A +#define DA9150_STATUS_D 0x06B +#define DA9150_STATUS_E 0x06C +#define DA9150_STATUS_F 0x06D +#define DA9150_STATUS_G 0x06E +#define DA9150_STATUS_H 0x06F +#define DA9150_STATUS_I 0x070 +#define DA9150_STATUS_J 0x071 +#define DA9150_STATUS_K 0x072 +#define DA9150_STATUS_L 0x073 +#define DA9150_STATUS_N 0x074 +#define DA9150_FAULT_LOG_A 0x076 +#define DA9150_FAULT_LOG_B 0x077 +#define DA9150_EVENT_E 0x078 +#define DA9150_EVENT_F 0x079 +#define DA9150_EVENT_G 0x07A +#define DA9150_EVENT_H 0x07B +#define DA9150_IRQ_MASK_E 0x07C +#define DA9150_IRQ_MASK_F 0x07D +#define DA9150_IRQ_MASK_G 0x07E +#define DA9150_IRQ_MASK_H 0x07F +#define DA9150_PAGE_CON_1 0x080 +#define DA9150_CONFIG_A 0x0E0 +#define DA9150_CONFIG_B 0x0E1 +#define DA9150_CONFIG_C 0x0E2 +#define DA9150_CONFIG_D 0x0E3 +#define DA9150_CONFIG_E 0x0E4 +#define DA9150_CONTROL_A 0x0E5 +#define DA9150_CONTROL_B 0x0E6 +#define DA9150_CONTROL_C 0x0E7 +#define DA9150_GPIO_A_B 0x0E8 +#define DA9150_GPIO_C_D 0x0E9 +#define DA9150_GPIO_MODE_CONT 0x0EA +#define DA9150_GPIO_CTRL_B 0x0EB +#define DA9150_GPIO_CTRL_A 0x0EC +#define DA9150_GPIO_CTRL_C 0x0ED +#define DA9150_GPIO_CFG_A 0x0EE +#define DA9150_GPIO_CFG_B 0x0EF +#define DA9150_GPIO_CFG_C 0x0F0 +#define DA9150_GPADC_MAN 0x0F2 +#define DA9150_GPADC_RES_A 0x0F4 +#define DA9150_GPADC_RES_B 0x0F5 +#define DA9150_PAGE_CON_2 0x100 +#define DA9150_OTP_CONT_SHARED 0x101 +#define DA9150_INTERFACE_SHARED 0x105 +#define DA9150_CONFIG_A_SHARED 0x106 +#define DA9150_CONFIG_D_SHARED 0x109 +#define DA9150_ADETVB_CFG_C 0x150 +#define DA9150_ADETD_STAT 0x151 +#define DA9150_ADET_CMPSTAT 0x152 +#define DA9150_ADET_CTRL_A 0x153 +#define DA9150_ADETVB_CFG_B 0x154 +#define DA9150_ADETVB_CFG_A 0x155 +#define DA9150_ADETAC_CFG_A 0x156 +#define DA9150_ADDETAC_CFG_B 0x157 +#define DA9150_ADETAC_CFG_C 0x158 +#define DA9150_ADETAC_CFG_D 0x159 +#define DA9150_ADETVB_CFG_D 0x15A +#define DA9150_ADETID_CFG_A 0x15B +#define DA9150_ADET_RID_PT_CHG_H 0x15C +#define DA9150_ADET_RID_PT_CHG_L 0x15D +#define DA9150_PPR_TCTR_B 0x160 +#define DA9150_PPR_BKCTRL_A 0x163 +#define DA9150_PPR_BKCFG_A 0x164 +#define DA9150_PPR_BKCFG_B 0x165 +#define DA9150_PPR_CHGCTRL_A 0x166 +#define DA9150_PPR_CHGCTRL_B 0x167 +#define DA9150_PPR_CHGCTRL_C 0x168 +#define DA9150_PPR_TCTR_A 0x169 +#define DA9150_PPR_CHGCTRL_D 0x16A +#define DA9150_PPR_CHGCTRL_E 0x16B +#define DA9150_PPR_CHGCTRL_F 0x16C +#define DA9150_PPR_CHGCTRL_G 0x16D +#define DA9150_PPR_CHGCTRL_H 0x16E +#define DA9150_PPR_CHGCTRL_I 0x16F +#define DA9150_PPR_CHGCTRL_J 0x170 +#define DA9150_PPR_CHGCTRL_K 0x171 +#define DA9150_PPR_CHGCTRL_L 0x172 +#define DA9150_PPR_CHGCTRL_M 0x173 +#define DA9150_PPR_THYST_A 0x174 +#define DA9150_PPR_THYST_B 0x175 +#define DA9150_PPR_THYST_C 0x176 +#define DA9150_PPR_THYST_D 0x177 +#define DA9150_PPR_THYST_E 0x178 +#define DA9150_PPR_THYST_F 0x179 +#define DA9150_PPR_THYST_G 0x17A +#define DA9150_PAGE_CON_3 0x180 +#define DA9150_PAGE_CON_4 0x200 +#define DA9150_PAGE_CON_5 0x280 +#define DA9150_PAGE_CON_6 0x300 +#define DA9150_COREBTLD_STAT_A 0x302 +#define DA9150_COREBTLD_CTRL_A 0x303 +#define DA9150_CORE_CONFIG_A 0x304 +#define DA9150_CORE_CONFIG_C 0x305 +#define DA9150_CORE_CONFIG_B 0x306 +#define DA9150_CORE_CFG_DATA_A 0x307 +#define DA9150_CORE_CFG_DATA_B 0x308 +#define DA9150_CORE_CMD_A 0x309 +#define DA9150_CORE_DATA_A 0x30A +#define DA9150_CORE_DATA_B 0x30B +#define DA9150_CORE_DATA_C 0x30C +#define DA9150_CORE_DATA_D 0x30D +#define DA9150_CORE2WIRE_STAT_A 0x310 +#define DA9150_CORE2WIRE_CTRL_A 0x311 +#define DA9150_FW_CTRL_A 0x312 +#define DA9150_FW_CTRL_C 0x313 +#define DA9150_FW_CTRL_D 0x314 +#define DA9150_FG_CTRL_A 0x315 +#define DA9150_FG_CTRL_B 0x316 +#define DA9150_FW_CTRL_E 0x317 +#define DA9150_FW_CTRL_B 0x318 +#define DA9150_GPADC_CMAN 0x320 +#define DA9150_GPADC_CRES_A 0x322 +#define DA9150_GPADC_CRES_B 0x323 +#define DA9150_CC_CFG_A 0x328 +#define DA9150_CC_CFG_B 0x329 +#define DA9150_CC_ICHG_RES_A 0x32A +#define DA9150_CC_ICHG_RES_B 0x32B +#define DA9150_CC_IAVG_RES_A 0x32C +#define DA9150_CC_IAVG_RES_B 0x32D +#define DA9150_TAUX_CTRL_A 0x330 +#define DA9150_TAUX_RELOAD_H 0x332 +#define DA9150_TAUX_RELOAD_L 0x333 +#define DA9150_TAUX_VALUE_H 0x334 +#define DA9150_TAUX_VALUE_L 0x335 +#define DA9150_AUX_DATA_0 0x338 +#define DA9150_AUX_DATA_1 0x339 +#define DA9150_AUX_DATA_2 0x33A +#define DA9150_AUX_DATA_3 0x33B +#define DA9150_BIF_CTRL 0x340 +#define DA9150_TBAT_CTRL_A 0x342 +#define DA9150_TBAT_CTRL_B 0x343 +#define DA9150_TBAT_RES_A 0x344 +#define DA9150_TBAT_RES_B 0x345 + +/* DA9150_PAGE_CON = 0x000 */ +#define DA9150_PAGE_SHIFT 0 +#define DA9150_PAGE_MASK (0x3f << 0) +#define DA9150_I2C_PAGE_SHIFT 1 +#define DA9150_I2C_PAGE_MASK (0x1f << 1) +#define DA9150_WRITE_MODE_SHIFT 6 +#define DA9150_WRITE_MODE_MASK BIT(6) +#define DA9150_REVERT_SHIFT 7 +#define DA9150_REVERT_MASK BIT(7) + +/* DA9150_STATUS_A = 0x068 */ +#define DA9150_WKUP_STAT_SHIFT 2 +#define DA9150_WKUP_STAT_MASK (0x0f << 2) +#define DA9150_SLEEP_STAT_SHIFT 6 +#define DA9150_SLEEP_STAT_MASK (0x03 << 6) + +/* DA9150_STATUS_B = 0x069 */ +#define DA9150_VFAULT_STAT_SHIFT 0 +#define DA9150_VFAULT_STAT_MASK BIT(0) +#define DA9150_TFAULT_STAT_SHIFT 1 +#define DA9150_TFAULT_STAT_MASK BIT(1) + +/* DA9150_STATUS_C = 0x06A */ +#define DA9150_VDD33_STAT_SHIFT 0 +#define DA9150_VDD33_STAT_MASK BIT(0) +#define DA9150_VDD33_SLEEP_SHIFT 1 +#define DA9150_VDD33_SLEEP_MASK BIT(1) +#define DA9150_LFOSC_STAT_SHIFT 7 +#define DA9150_LFOSC_STAT_MASK BIT(7) + +/* DA9150_STATUS_D = 0x06B */ +#define DA9150_GPIOA_STAT_SHIFT 0 +#define DA9150_GPIOA_STAT_MASK BIT(0) +#define DA9150_GPIOB_STAT_SHIFT 1 +#define DA9150_GPIOB_STAT_MASK BIT(1) +#define DA9150_GPIOC_STAT_SHIFT 2 +#define DA9150_GPIOC_STAT_MASK BIT(2) +#define DA9150_GPIOD_STAT_SHIFT 3 +#define DA9150_GPIOD_STAT_MASK BIT(3) + +/* DA9150_STATUS_E = 0x06C */ +#define DA9150_DTYPE_SHIFT 0 +#define DA9150_DTYPE_MASK (0x1f << 0) +#define DA9150_DTYPE_DT_NIL (0x00 << 0) +#define DA9150_DTYPE_DT_USB_OTG BIT(0) +#define DA9150_DTYPE_DT_USB_STD (0x02 << 0) +#define DA9150_DTYPE_DT_USB_CHG (0x03 << 0) +#define DA9150_DTYPE_DT_ACA_CHG (0x04 << 0) +#define DA9150_DTYPE_DT_ACA_OTG (0x05 << 0) +#define DA9150_DTYPE_DT_ACA_DOC (0x06 << 0) +#define DA9150_DTYPE_DT_DED_CHG (0x07 << 0) +#define DA9150_DTYPE_DT_CR5_CHG (0x08 << 0) +#define DA9150_DTYPE_DT_CR4_CHG (0x0c << 0) +#define DA9150_DTYPE_DT_PT_CHG (0x11 << 0) +#define DA9150_DTYPE_DT_NN_ACC (0x16 << 0) +#define DA9150_DTYPE_DT_NN_CHG (0x17 << 0) + +/* DA9150_STATUS_F = 0x06D */ +#define DA9150_SESS_VLD_SHIFT 0 +#define DA9150_SESS_VLD_MASK BIT(0) +#define DA9150_ID_ERR_SHIFT 1 +#define DA9150_ID_ERR_MASK BIT(1) +#define DA9150_PT_CHG_SHIFT 2 +#define DA9150_PT_CHG_MASK BIT(2) + +/* DA9150_STATUS_G = 0x06E */ +#define DA9150_RID_SHIFT 0 +#define DA9150_RID_MASK (0xff << 0) + +/* DA9150_STATUS_H = 0x06F */ +#define DA9150_VBUS_STAT_SHIFT 0 +#define DA9150_VBUS_STAT_MASK (0x07 << 0) +#define DA9150_VBUS_STAT_OFF (0x00 << 0) +#define DA9150_VBUS_STAT_WAIT BIT(0) +#define DA9150_VBUS_STAT_CHG (0x02 << 0) +#define DA9150_VBUS_TRED_SHIFT 3 +#define DA9150_VBUS_TRED_MASK BIT(3) +#define DA9150_VBUS_DROP_STAT_SHIFT 4 +#define DA9150_VBUS_DROP_STAT_MASK (0x0f << 4) + +/* DA9150_STATUS_I = 0x070 */ +#define DA9150_VBUS_ISET_STAT_SHIFT 0 +#define DA9150_VBUS_ISET_STAT_MASK (0x1f << 0) +#define DA9150_VBUS_OT_SHIFT 7 +#define DA9150_VBUS_OT_MASK BIT(7) + +/* DA9150_STATUS_J = 0x071 */ +#define DA9150_CHG_STAT_SHIFT 0 +#define DA9150_CHG_STAT_MASK (0x0f << 0) +#define DA9150_CHG_STAT_OFF (0x00 << 0) +#define DA9150_CHG_STAT_SUSP BIT(0) +#define DA9150_CHG_STAT_ACT (0x02 << 0) +#define DA9150_CHG_STAT_PRE (0x03 << 0) +#define DA9150_CHG_STAT_CC (0x04 << 0) +#define DA9150_CHG_STAT_CV (0x05 << 0) +#define DA9150_CHG_STAT_FULL (0x06 << 0) +#define DA9150_CHG_STAT_TEMP (0x07 << 0) +#define DA9150_CHG_STAT_TIME (0x08 << 0) +#define DA9150_CHG_STAT_BAT (0x09 << 0) +#define DA9150_CHG_TEMP_SHIFT 4 +#define DA9150_CHG_TEMP_MASK (0x07 << 4) +#define DA9150_CHG_TEMP_UNDER (0x06 << 4) +#define DA9150_CHG_TEMP_OVER (0x07 << 4) +#define DA9150_CHG_IEND_STAT_SHIFT 7 +#define DA9150_CHG_IEND_STAT_MASK BIT(7) + +/* DA9150_STATUS_K = 0x072 */ +#define DA9150_CHG_IAV_H_SHIFT 0 +#define DA9150_CHG_IAV_H_MASK (0xff << 0) + +/* DA9150_STATUS_L = 0x073 */ +#define DA9150_CHG_IAV_L_SHIFT 5 +#define DA9150_CHG_IAV_L_MASK (0x07 << 5) + +/* DA9150_STATUS_N = 0x074 */ +#define DA9150_CHG_TIME_SHIFT 1 +#define DA9150_CHG_TIME_MASK BIT(1) +#define DA9150_CHG_TRED_SHIFT 2 +#define DA9150_CHG_TRED_MASK BIT(2) +#define DA9150_CHG_TJUNC_CLASS_SHIFT 3 +#define DA9150_CHG_TJUNC_CLASS_MASK (0x07 << 3) +#define DA9150_CHG_TJUNC_CLASS_6 (0x06 << 3) +#define DA9150_EBS_STAT_SHIFT 6 +#define DA9150_EBS_STAT_MASK BIT(6) +#define DA9150_CHG_BAT_REMOVED_SHIFT 7 +#define DA9150_CHG_BAT_REMOVED_MASK BIT(7) + +/* DA9150_FAULT_LOG_A = 0x076 */ +#define DA9150_TEMP_FAULT_SHIFT 0 +#define DA9150_TEMP_FAULT_MASK BIT(0) +#define DA9150_VSYS_FAULT_SHIFT 1 +#define DA9150_VSYS_FAULT_MASK BIT(1) +#define DA9150_START_FAULT_SHIFT 2 +#define DA9150_START_FAULT_MASK BIT(2) +#define DA9150_EXT_FAULT_SHIFT 3 +#define DA9150_EXT_FAULT_MASK BIT(3) +#define DA9150_POR_FAULT_SHIFT 4 +#define DA9150_POR_FAULT_MASK BIT(4) + +/* DA9150_FAULT_LOG_B = 0x077 */ +#define DA9150_VBUS_FAULT_SHIFT 0 +#define DA9150_VBUS_FAULT_MASK BIT(0) +#define DA9150_OTG_FAULT_SHIFT 1 +#define DA9150_OTG_FAULT_MASK BIT(1) + +/* DA9150_EVENT_E = 0x078 */ +#define DA9150_E_VBUS_SHIFT 0 +#define DA9150_E_VBUS_MASK BIT(0) +#define DA9150_E_CHG_SHIFT 1 +#define DA9150_E_CHG_MASK BIT(1) +#define DA9150_E_TCLASS_SHIFT 2 +#define DA9150_E_TCLASS_MASK BIT(2) +#define DA9150_E_TJUNC_SHIFT 3 +#define DA9150_E_TJUNC_MASK BIT(3) +#define DA9150_E_VFAULT_SHIFT 4 +#define DA9150_E_VFAULT_MASK BIT(4) +#define DA9150_EVENTS_H_SHIFT 5 +#define DA9150_EVENTS_H_MASK BIT(5) +#define DA9150_EVENTS_G_SHIFT 6 +#define DA9150_EVENTS_G_MASK BIT(6) +#define DA9150_EVENTS_F_SHIFT 7 +#define DA9150_EVENTS_F_MASK BIT(7) + +/* DA9150_EVENT_F = 0x079 */ +#define DA9150_E_CONF_SHIFT 0 +#define DA9150_E_CONF_MASK BIT(0) +#define DA9150_E_DAT_SHIFT 1 +#define DA9150_E_DAT_MASK BIT(1) +#define DA9150_E_DTYPE_SHIFT 3 +#define DA9150_E_DTYPE_MASK BIT(3) +#define DA9150_E_ID_SHIFT 4 +#define DA9150_E_ID_MASK BIT(4) +#define DA9150_E_ADP_SHIFT 5 +#define DA9150_E_ADP_MASK BIT(5) +#define DA9150_E_SESS_END_SHIFT 6 +#define DA9150_E_SESS_END_MASK BIT(6) +#define DA9150_E_SESS_VLD_SHIFT 7 +#define DA9150_E_SESS_VLD_MASK BIT(7) + +/* DA9150_EVENT_G = 0x07A */ +#define DA9150_E_FG_SHIFT 0 +#define DA9150_E_FG_MASK BIT(0) +#define DA9150_E_GP_SHIFT 1 +#define DA9150_E_GP_MASK BIT(1) +#define DA9150_E_TBAT_SHIFT 2 +#define DA9150_E_TBAT_MASK BIT(2) +#define DA9150_E_GPIOA_SHIFT 3 +#define DA9150_E_GPIOA_MASK BIT(3) +#define DA9150_E_GPIOB_SHIFT 4 +#define DA9150_E_GPIOB_MASK BIT(4) +#define DA9150_E_GPIOC_SHIFT 5 +#define DA9150_E_GPIOC_MASK BIT(5) +#define DA9150_E_GPIOD_SHIFT 6 +#define DA9150_E_GPIOD_MASK BIT(6) +#define DA9150_E_GPADC_SHIFT 7 +#define DA9150_E_GPADC_MASK BIT(7) + +/* DA9150_EVENT_H = 0x07B */ +#define DA9150_E_WKUP_SHIFT 0 +#define DA9150_E_WKUP_MASK BIT(0) + +/* DA9150_IRQ_MASK_E = 0x07C */ +#define DA9150_M_VBUS_SHIFT 0 +#define DA9150_M_VBUS_MASK BIT(0) +#define DA9150_M_CHG_SHIFT 1 +#define DA9150_M_CHG_MASK BIT(1) +#define DA9150_M_TJUNC_SHIFT 3 +#define DA9150_M_TJUNC_MASK BIT(3) +#define DA9150_M_VFAULT_SHIFT 4 +#define DA9150_M_VFAULT_MASK BIT(4) + +/* DA9150_IRQ_MASK_F = 0x07D */ +#define DA9150_M_CONF_SHIFT 0 +#define DA9150_M_CONF_MASK BIT(0) +#define DA9150_M_DAT_SHIFT 1 +#define DA9150_M_DAT_MASK BIT(1) +#define DA9150_M_DTYPE_SHIFT 3 +#define DA9150_M_DTYPE_MASK BIT(3) +#define DA9150_M_ID_SHIFT 4 +#define DA9150_M_ID_MASK BIT(4) +#define DA9150_M_ADP_SHIFT 5 +#define DA9150_M_ADP_MASK BIT(5) +#define DA9150_M_SESS_END_SHIFT 6 +#define DA9150_M_SESS_END_MASK BIT(6) +#define DA9150_M_SESS_VLD_SHIFT 7 +#define DA9150_M_SESS_VLD_MASK BIT(7) + +/* DA9150_IRQ_MASK_G = 0x07E */ +#define DA9150_M_FG_SHIFT 0 +#define DA9150_M_FG_MASK BIT(0) +#define DA9150_M_GP_SHIFT 1 +#define DA9150_M_GP_MASK BIT(1) +#define DA9150_M_TBAT_SHIFT 2 +#define DA9150_M_TBAT_MASK BIT(2) +#define DA9150_M_GPIOA_SHIFT 3 +#define DA9150_M_GPIOA_MASK BIT(3) +#define DA9150_M_GPIOB_SHIFT 4 +#define DA9150_M_GPIOB_MASK BIT(4) +#define DA9150_M_GPIOC_SHIFT 5 +#define DA9150_M_GPIOC_MASK BIT(5) +#define DA9150_M_GPIOD_SHIFT 6 +#define DA9150_M_GPIOD_MASK BIT(6) +#define DA9150_M_GPADC_SHIFT 7 +#define DA9150_M_GPADC_MASK BIT(7) + +/* DA9150_IRQ_MASK_H = 0x07F */ +#define DA9150_M_WKUP_SHIFT 0 +#define DA9150_M_WKUP_MASK BIT(0) + +/* DA9150_PAGE_CON_1 = 0x080 */ +#define DA9150_PAGE_SHIFT 0 +#define DA9150_PAGE_MASK (0x3f << 0) +#define DA9150_WRITE_MODE_SHIFT 6 +#define DA9150_WRITE_MODE_MASK BIT(6) +#define DA9150_REVERT_SHIFT 7 +#define DA9150_REVERT_MASK BIT(7) + +/* DA9150_CONFIG_A = 0x0E0 */ +#define DA9150_RESET_DUR_SHIFT 0 +#define DA9150_RESET_DUR_MASK (0x03 << 0) +#define DA9150_RESET_EXT_SHIFT 2 +#define DA9150_RESET_EXT_MASK (0x03 << 2) +#define DA9150_START_MAX_SHIFT 4 +#define DA9150_START_MAX_MASK (0x03 << 4) +#define DA9150_PS_WAIT_EN_SHIFT 6 +#define DA9150_PS_WAIT_EN_MASK BIT(6) +#define DA9150_PS_DISABLE_DIRECT_SHIFT 7 +#define DA9150_PS_DISABLE_DIRECT_MASK BIT(7) + +/* DA9150_CONFIG_B = 0x0E1 */ +#define DA9150_VFAULT_ADJ_SHIFT 0 +#define DA9150_VFAULT_ADJ_MASK (0x0f << 0) +#define DA9150_VFAULT_HYST_SHIFT 4 +#define DA9150_VFAULT_HYST_MASK (0x07 << 4) +#define DA9150_VFAULT_EN_SHIFT 7 +#define DA9150_VFAULT_EN_MASK BIT(7) + +/* DA9150_CONFIG_C = 0x0E2 */ +#define DA9150_VSYS_MIN_SHIFT 3 +#define DA9150_VSYS_MIN_MASK (0x1f << 3) + +/* DA9150_CONFIG_D = 0x0E3 */ +#define DA9150_LFOSC_EXT_SHIFT 0 +#define DA9150_LFOSC_EXT_MASK BIT(0) +#define DA9150_VDD33_DWN_SHIFT 1 +#define DA9150_VDD33_DWN_MASK BIT(1) +#define DA9150_WKUP_PM_EN_SHIFT 2 +#define DA9150_WKUP_PM_EN_MASK BIT(2) +#define DA9150_WKUP_CE_SEL_SHIFT 3 +#define DA9150_WKUP_CE_SEL_MASK (0x03 << 3) +#define DA9150_WKUP_CLK32K_EN_SHIFT 5 +#define DA9150_WKUP_CLK32K_EN_MASK BIT(5) +#define DA9150_DISABLE_DEL_SHIFT 7 +#define DA9150_DISABLE_DEL_MASK BIT(7) + +/* DA9150_CONFIG_E = 0x0E4 */ +#define DA9150_PM_SPKSUP_DIS_SHIFT 0 +#define DA9150_PM_SPKSUP_DIS_MASK BIT(0) +#define DA9150_PM_MERGE_SHIFT 1 +#define DA9150_PM_MERGE_MASK BIT(1) +#define DA9150_PM_SR_OFF_SHIFT 2 +#define DA9150_PM_SR_OFF_MASK BIT(2) +#define DA9150_PM_TIMEOUT_EN_SHIFT 3 +#define DA9150_PM_TIMEOUT_EN_MASK BIT(3) +#define DA9150_PM_DLY_SEL_SHIFT 4 +#define DA9150_PM_DLY_SEL_MASK (0x07 << 4) +#define DA9150_PM_OUT_DLY_SEL_SHIFT 7 +#define DA9150_PM_OUT_DLY_SEL_MASK BIT(7) + +/* DA9150_CONTROL_A = 0x0E5 */ +#define DA9150_VDD33_SL_SHIFT 0 +#define DA9150_VDD33_SL_MASK BIT(0) +#define DA9150_VDD33_LPM_SHIFT 1 +#define DA9150_VDD33_LPM_MASK (0x03 << 1) +#define DA9150_VDD33_EN_SHIFT 3 +#define DA9150_VDD33_EN_MASK BIT(3) +#define DA9150_GPI_LPM_SHIFT 6 +#define DA9150_GPI_LPM_MASK BIT(6) +#define DA9150_PM_IF_LPM_SHIFT 7 +#define DA9150_PM_IF_LPM_MASK BIT(7) + +/* DA9150_CONTROL_B = 0x0E6 */ +#define DA9150_LPM_SHIFT 0 +#define DA9150_LPM_MASK BIT(0) +#define DA9150_RESET_SHIFT 1 +#define DA9150_RESET_MASK BIT(1) +#define DA9150_RESET_USRCONF_EN_SHIFT 2 +#define DA9150_RESET_USRCONF_EN_MASK BIT(2) + +/* DA9150_CONTROL_C = 0x0E7 */ +#define DA9150_DISABLE_SHIFT 0 +#define DA9150_DISABLE_MASK BIT(0) + +/* DA9150_GPIO_A_B = 0x0E8 */ +#define DA9150_GPIOA_PIN_SHIFT 0 +#define DA9150_GPIOA_PIN_MASK (0x07 << 0) +#define DA9150_GPIOA_PIN_GPI (0x00 << 0) +#define DA9150_GPIOA_PIN_GPO_OD BIT(0) +#define DA9150_GPIOA_TYPE_SHIFT 3 +#define DA9150_GPIOA_TYPE_MASK BIT(3) +#define DA9150_GPIOB_PIN_SHIFT 4 +#define DA9150_GPIOB_PIN_MASK (0x07 << 4) +#define DA9150_GPIOB_PIN_GPI (0x00 << 4) +#define DA9150_GPIOB_PIN_GPO_OD BIT(4) +#define DA9150_GPIOB_TYPE_SHIFT 7 +#define DA9150_GPIOB_TYPE_MASK BIT(7) + +/* DA9150_GPIO_C_D = 0x0E9 */ +#define DA9150_GPIOC_PIN_SHIFT 0 +#define DA9150_GPIOC_PIN_MASK (0x07 << 0) +#define DA9150_GPIOC_PIN_GPI (0x00 << 0) +#define DA9150_GPIOC_PIN_GPO_OD BIT(0) +#define DA9150_GPIOC_TYPE_SHIFT 3 +#define DA9150_GPIOC_TYPE_MASK BIT(3) +#define DA9150_GPIOD_PIN_SHIFT 4 +#define DA9150_GPIOD_PIN_MASK (0x07 << 4) +#define DA9150_GPIOD_PIN_GPI (0x00 << 4) +#define DA9150_GPIOD_PIN_GPO_OD BIT(4) +#define DA9150_GPIOD_TYPE_SHIFT 7 +#define DA9150_GPIOD_TYPE_MASK BIT(7) + +/* DA9150_GPIO_MODE_CONT = 0x0EA */ +#define DA9150_GPIOA_MODE_SHIFT 0 +#define DA9150_GPIOA_MODE_MASK BIT(0) +#define DA9150_GPIOB_MODE_SHIFT 1 +#define DA9150_GPIOB_MODE_MASK BIT(1) +#define DA9150_GPIOC_MODE_SHIFT 2 +#define DA9150_GPIOC_MODE_MASK BIT(2) +#define DA9150_GPIOD_MODE_SHIFT 3 +#define DA9150_GPIOD_MODE_MASK BIT(3) +#define DA9150_GPIOA_CONT_SHIFT 4 +#define DA9150_GPIOA_CONT_MASK BIT(4) +#define DA9150_GPIOB_CONT_SHIFT 5 +#define DA9150_GPIOB_CONT_MASK BIT(5) +#define DA9150_GPIOC_CONT_SHIFT 6 +#define DA9150_GPIOC_CONT_MASK BIT(6) +#define DA9150_GPIOD_CONT_SHIFT 7 +#define DA9150_GPIOD_CONT_MASK BIT(7) + +/* DA9150_GPIO_CTRL_B = 0x0EB */ +#define DA9150_WAKE_PIN_SHIFT 0 +#define DA9150_WAKE_PIN_MASK (0x03 << 0) +#define DA9150_WAKE_MODE_SHIFT 2 +#define DA9150_WAKE_MODE_MASK BIT(2) +#define DA9150_WAKE_CONT_SHIFT 3 +#define DA9150_WAKE_CONT_MASK BIT(3) +#define DA9150_WAKE_DLY_SHIFT 4 +#define DA9150_WAKE_DLY_MASK BIT(4) + +/* DA9150_GPIO_CTRL_A = 0x0EC */ +#define DA9150_GPIOA_ANAEN_SHIFT 0 +#define DA9150_GPIOA_ANAEN_MASK BIT(0) +#define DA9150_GPIOB_ANAEN_SHIFT 1 +#define DA9150_GPIOB_ANAEN_MASK BIT(1) +#define DA9150_GPIOC_ANAEN_SHIFT 2 +#define DA9150_GPIOC_ANAEN_MASK BIT(2) +#define DA9150_GPIOD_ANAEN_SHIFT 3 +#define DA9150_GPIOD_ANAEN_MASK BIT(3) +#define DA9150_GPIO_ANAEN 0x01 +#define DA9150_GPIO_ANAEN_MASK 0x0F +#define DA9150_CHGLED_PIN_SHIFT 5 +#define DA9150_CHGLED_PIN_MASK (0x07 << 5) + +/* DA9150_GPIO_CTRL_C = 0x0ED */ +#define DA9150_CHGBL_DUR_SHIFT 0 +#define DA9150_CHGBL_DUR_MASK (0x03 << 0) +#define DA9150_CHGBL_DBL_SHIFT 2 +#define DA9150_CHGBL_DBL_MASK BIT(2) +#define DA9150_CHGBL_FRQ_SHIFT 3 +#define DA9150_CHGBL_FRQ_MASK (0x03 << 3) +#define DA9150_CHGBL_FLKR_SHIFT 5 +#define DA9150_CHGBL_FLKR_MASK BIT(5) + +/* DA9150_GPIO_CFG_A = 0x0EE */ +#define DA9150_CE_LPM_DEB_SHIFT 0 +#define DA9150_CE_LPM_DEB_MASK (0x07 << 0) + +/* DA9150_GPIO_CFG_B = 0x0EF */ +#define DA9150_GPIOA_PUPD_SHIFT 0 +#define DA9150_GPIOA_PUPD_MASK BIT(0) +#define DA9150_GPIOB_PUPD_SHIFT 1 +#define DA9150_GPIOB_PUPD_MASK BIT(1) +#define DA9150_GPIOC_PUPD_SHIFT 2 +#define DA9150_GPIOC_PUPD_MASK BIT(2) +#define DA9150_GPIOD_PUPD_SHIFT 3 +#define DA9150_GPIOD_PUPD_MASK BIT(3) +#define DA9150_GPIO_PUPD_MASK (0xF << 0) +#define DA9150_GPI_DEB_SHIFT 4 +#define DA9150_GPI_DEB_MASK (0x07 << 4) +#define DA9150_LPM_EN_SHIFT 7 +#define DA9150_LPM_EN_MASK BIT(7) + +/* DA9150_GPIO_CFG_C = 0x0F0 */ +#define DA9150_GPI_V_SHIFT 0 +#define DA9150_GPI_V_MASK BIT(0) +#define DA9150_VDDIO_INT_SHIFT 1 +#define DA9150_VDDIO_INT_MASK BIT(1) +#define DA9150_FAULT_PIN_SHIFT 3 +#define DA9150_FAULT_PIN_MASK (0x07 << 3) +#define DA9150_FAULT_TYPE_SHIFT 6 +#define DA9150_FAULT_TYPE_MASK BIT(6) +#define DA9150_NIRQ_PUPD_SHIFT 7 +#define DA9150_NIRQ_PUPD_MASK BIT(7) + +/* DA9150_GPADC_MAN = 0x0F2 */ +#define DA9150_GPADC_EN_SHIFT 0 +#define DA9150_GPADC_EN_MASK BIT(0) +#define DA9150_GPADC_MUX_SHIFT 1 +#define DA9150_GPADC_MUX_MASK (0x1f << 1) + +/* DA9150_GPADC_RES_A = 0x0F4 */ +#define DA9150_GPADC_RES_H_SHIFT 0 +#define DA9150_GPADC_RES_H_MASK (0xff << 0) + +/* DA9150_GPADC_RES_B = 0x0F5 */ +#define DA9150_GPADC_RUN_SHIFT 0 +#define DA9150_GPADC_RUN_MASK BIT(0) +#define DA9150_GPADC_RES_L_SHIFT 6 +#define DA9150_GPADC_RES_L_MASK (0x03 << 6) +#define DA9150_GPADC_RES_L_BITS 2 + +/* DA9150_PAGE_CON_2 = 0x100 */ +#define DA9150_PAGE_SHIFT 0 +#define DA9150_PAGE_MASK (0x3f << 0) +#define DA9150_WRITE_MODE_SHIFT 6 +#define DA9150_WRITE_MODE_MASK BIT(6) +#define DA9150_REVERT_SHIFT 7 +#define DA9150_REVERT_MASK BIT(7) + +/* DA9150_OTP_CONT_SHARED = 0x101 */ +#define DA9150_PC_DONE_SHIFT 3 +#define DA9150_PC_DONE_MASK BIT(3) + +/* DA9150_INTERFACE_SHARED = 0x105 */ +#define DA9150_IF_BASE_ADDR_SHIFT 4 +#define DA9150_IF_BASE_ADDR_MASK (0x0f << 4) + +/* DA9150_CONFIG_A_SHARED = 0x106 */ +#define DA9150_NIRQ_VDD_SHIFT 1 +#define DA9150_NIRQ_VDD_MASK BIT(1) +#define DA9150_NIRQ_PIN_SHIFT 2 +#define DA9150_NIRQ_PIN_MASK BIT(2) +#define DA9150_NIRQ_TYPE_SHIFT 3 +#define DA9150_NIRQ_TYPE_MASK BIT(3) +#define DA9150_PM_IF_V_SHIFT 4 +#define DA9150_PM_IF_V_MASK BIT(4) +#define DA9150_PM_IF_FMP_SHIFT 5 +#define DA9150_PM_IF_FMP_MASK BIT(5) +#define DA9150_PM_IF_HSM_SHIFT 6 +#define DA9150_PM_IF_HSM_MASK BIT(6) + +/* DA9150_CONFIG_D_SHARED = 0x109 */ +#define DA9150_NIRQ_MODE_SHIFT 1 +#define DA9150_NIRQ_MODE_MASK BIT(1) + +/* DA9150_ADETVB_CFG_C = 0x150 */ +#define DA9150_TADP_RISE_SHIFT 0 +#define DA9150_TADP_RISE_MASK (0xff << 0) + +/* DA9150_ADETD_STAT = 0x151 */ +#define DA9150_DCD_STAT_SHIFT 0 +#define DA9150_DCD_STAT_MASK BIT(0) +#define DA9150_PCD_STAT_SHIFT 1 +#define DA9150_PCD_STAT_MASK (0x03 << 1) +#define DA9150_SCD_STAT_SHIFT 3 +#define DA9150_SCD_STAT_MASK (0x03 << 3) +#define DA9150_DP_STAT_SHIFT 5 +#define DA9150_DP_STAT_MASK BIT(5) +#define DA9150_DM_STAT_SHIFT 6 +#define DA9150_DM_STAT_MASK BIT(6) + +/* DA9150_ADET_CMPSTAT = 0x152 */ +#define DA9150_DP_COMP_SHIFT 1 +#define DA9150_DP_COMP_MASK BIT(1) +#define DA9150_DM_COMP_SHIFT 2 +#define DA9150_DM_COMP_MASK BIT(2) +#define DA9150_ADP_SNS_COMP_SHIFT 3 +#define DA9150_ADP_SNS_COMP_MASK BIT(3) +#define DA9150_ADP_PRB_COMP_SHIFT 4 +#define DA9150_ADP_PRB_COMP_MASK BIT(4) +#define DA9150_ID_COMP_SHIFT 5 +#define DA9150_ID_COMP_MASK BIT(5) + +/* DA9150_ADET_CTRL_A = 0x153 */ +#define DA9150_AID_DAT_SHIFT 0 +#define DA9150_AID_DAT_MASK BIT(0) +#define DA9150_AID_ID_SHIFT 1 +#define DA9150_AID_ID_MASK BIT(1) +#define DA9150_AID_TRIG_SHIFT 2 +#define DA9150_AID_TRIG_MASK BIT(2) + +/* DA9150_ADETVB_CFG_B = 0x154 */ +#define DA9150_VB_MODE_SHIFT 0 +#define DA9150_VB_MODE_MASK (0x03 << 0) +#define DA9150_VB_MODE_VB_SESS BIT(0) + +#define DA9150_TADP_PRB_SHIFT 2 +#define DA9150_TADP_PRB_MASK BIT(2) +#define DA9150_DAT_RPD_EXT_SHIFT 5 +#define DA9150_DAT_RPD_EXT_MASK BIT(5) +#define DA9150_CONF_RPD_SHIFT 6 +#define DA9150_CONF_RPD_MASK BIT(6) +#define DA9150_CONF_SRP_SHIFT 7 +#define DA9150_CONF_SRP_MASK BIT(7) + +/* DA9150_ADETVB_CFG_A = 0x155 */ +#define DA9150_AID_MODE_SHIFT 0 +#define DA9150_AID_MODE_MASK (0x03 << 0) +#define DA9150_AID_EXT_POL_SHIFT 2 +#define DA9150_AID_EXT_POL_MASK BIT(2) + +/* DA9150_ADETAC_CFG_A = 0x156 */ +#define DA9150_ISET_CDP_SHIFT 0 +#define DA9150_ISET_CDP_MASK (0x1f << 0) +#define DA9150_CONF_DBP_SHIFT 5 +#define DA9150_CONF_DBP_MASK BIT(5) + +/* DA9150_ADDETAC_CFG_B = 0x157 */ +#define DA9150_ISET_DCHG_SHIFT 0 +#define DA9150_ISET_DCHG_MASK (0x1f << 0) +#define DA9150_CONF_GPIOA_SHIFT 5 +#define DA9150_CONF_GPIOA_MASK BIT(5) +#define DA9150_CONF_GPIOB_SHIFT 6 +#define DA9150_CONF_GPIOB_MASK BIT(6) +#define DA9150_AID_VB_SHIFT 7 +#define DA9150_AID_VB_MASK BIT(7) + +/* DA9150_ADETAC_CFG_C = 0x158 */ +#define DA9150_ISET_DEF_SHIFT 0 +#define DA9150_ISET_DEF_MASK (0x1f << 0) +#define DA9150_CONF_MODE_SHIFT 5 +#define DA9150_CONF_MODE_MASK (0x03 << 5) +#define DA9150_AID_CR_DIS_SHIFT 7 +#define DA9150_AID_CR_DIS_MASK BIT(7) + +/* DA9150_ADETAC_CFG_D = 0x159 */ +#define DA9150_ISET_UNIT_SHIFT 0 +#define DA9150_ISET_UNIT_MASK (0x1f << 0) +#define DA9150_AID_UNCLAMP_SHIFT 5 +#define DA9150_AID_UNCLAMP_MASK BIT(5) + +/* DA9150_ADETVB_CFG_D = 0x15A */ +#define DA9150_ID_MODE_SHIFT 0 +#define DA9150_ID_MODE_MASK (0x03 << 0) +#define DA9150_DAT_MODE_SHIFT 2 +#define DA9150_DAT_MODE_MASK (0x0f << 2) +#define DA9150_DAT_SWP_SHIFT 6 +#define DA9150_DAT_SWP_MASK BIT(6) +#define DA9150_DAT_CLAMP_EXT_SHIFT 7 +#define DA9150_DAT_CLAMP_EXT_MASK BIT(7) + +/* DA9150_ADETID_CFG_A = 0x15B */ +#define DA9150_TID_POLL_SHIFT 0 +#define DA9150_TID_POLL_MASK (0x07 << 0) +#define DA9150_RID_CONV_SHIFT 3 +#define DA9150_RID_CONV_MASK BIT(3) + +/* DA9150_ADET_RID_PT_CHG_H = 0x15C */ +#define DA9150_RID_PT_CHG_H_SHIFT 0 +#define DA9150_RID_PT_CHG_H_MASK (0xff << 0) + +/* DA9150_ADET_RID_PT_CHG_L = 0x15D */ +#define DA9150_RID_PT_CHG_L_SHIFT 6 +#define DA9150_RID_PT_CHG_L_MASK (0x03 << 6) + +/* DA9150_PPR_TCTR_B = 0x160 */ +#define DA9150_CHG_TCTR_VAL_SHIFT 0 +#define DA9150_CHG_TCTR_VAL_MASK (0xff << 0) + +/* DA9150_PPR_BKCTRL_A = 0x163 */ +#define DA9150_VBUS_MODE_SHIFT 0 +#define DA9150_VBUS_MODE_MASK (0x03 << 0) +#define DA9150_VBUS_MODE_CHG BIT(0) +#define DA9150_VBUS_MODE_OTG (0x02 << 0) +#define DA9150_VBUS_LPM_SHIFT 2 +#define DA9150_VBUS_LPM_MASK (0x03 << 2) +#define DA9150_VBUS_SUSP_SHIFT 4 +#define DA9150_VBUS_SUSP_MASK BIT(4) +#define DA9150_VBUS_PWM_SHIFT 5 +#define DA9150_VBUS_PWM_MASK BIT(5) +#define DA9150_VBUS_ISO_SHIFT 6 +#define DA9150_VBUS_ISO_MASK BIT(6) +#define DA9150_VBUS_LDO_SHIFT 7 +#define DA9150_VBUS_LDO_MASK BIT(7) + +/* DA9150_PPR_BKCFG_A = 0x164 */ +#define DA9150_VBUS_ISET_SHIFT 0 +#define DA9150_VBUS_ISET_MASK (0x1f << 0) +#define DA9150_VBUS_IMAX_SHIFT 5 +#define DA9150_VBUS_IMAX_MASK BIT(5) +#define DA9150_VBUS_IOTG_SHIFT 6 +#define DA9150_VBUS_IOTG_MASK (0x03 << 6) + +/* DA9150_PPR_BKCFG_B = 0x165 */ +#define DA9150_VBUS_DROP_SHIFT 0 +#define DA9150_VBUS_DROP_MASK (0x0f << 0) +#define DA9150_VBUS_FAULT_DIS_SHIFT 6 +#define DA9150_VBUS_FAULT_DIS_MASK BIT(6) +#define DA9150_OTG_FAULT_DIS_SHIFT 7 +#define DA9150_OTG_FAULT_DIS_MASK BIT(7) + +/* DA9150_PPR_CHGCTRL_A = 0x166 */ +#define DA9150_CHG_EN_SHIFT 0 +#define DA9150_CHG_EN_MASK BIT(0) + +/* DA9150_PPR_CHGCTRL_B = 0x167 */ +#define DA9150_CHG_VBAT_SHIFT 0 +#define DA9150_CHG_VBAT_MASK (0x1f << 0) +#define DA9150_CHG_VDROP_SHIFT 6 +#define DA9150_CHG_VDROP_MASK (0x03 << 6) + +/* DA9150_PPR_CHGCTRL_C = 0x168 */ +#define DA9150_CHG_VFAULT_SHIFT 0 +#define DA9150_CHG_VFAULT_MASK (0x0f << 0) +#define DA9150_CHG_IPRE_SHIFT 4 +#define DA9150_CHG_IPRE_MASK (0x03 << 4) + +/* DA9150_PPR_TCTR_A = 0x169 */ +#define DA9150_CHG_TCTR_SHIFT 0 +#define DA9150_CHG_TCTR_MASK (0x07 << 0) +#define DA9150_CHG_TCTR_MODE_SHIFT 4 +#define DA9150_CHG_TCTR_MODE_MASK BIT(4) + +/* DA9150_PPR_CHGCTRL_D = 0x16A */ +#define DA9150_CHG_IBAT_SHIFT 0 +#define DA9150_CHG_IBAT_MASK (0xff << 0) + +/* DA9150_PPR_CHGCTRL_E = 0x16B */ +#define DA9150_CHG_IEND_SHIFT 0 +#define DA9150_CHG_IEND_MASK (0xff << 0) + +/* DA9150_PPR_CHGCTRL_F = 0x16C */ +#define DA9150_CHG_VCOLD_SHIFT 0 +#define DA9150_CHG_VCOLD_MASK (0x1f << 0) +#define DA9150_TBAT_TQA_EN_SHIFT 6 +#define DA9150_TBAT_TQA_EN_MASK BIT(6) +#define DA9150_TBAT_TDP_EN_SHIFT 7 +#define DA9150_TBAT_TDP_EN_MASK BIT(7) + +/* DA9150_PPR_CHGCTRL_G = 0x16D */ +#define DA9150_CHG_VWARM_SHIFT 0 +#define DA9150_CHG_VWARM_MASK (0x1f << 0) + +/* DA9150_PPR_CHGCTRL_H = 0x16E */ +#define DA9150_CHG_VHOT_SHIFT 0 +#define DA9150_CHG_VHOT_MASK (0x1f << 0) + +/* DA9150_PPR_CHGCTRL_I = 0x16F */ +#define DA9150_CHG_ICOLD_SHIFT 0 +#define DA9150_CHG_ICOLD_MASK (0xff << 0) + +/* DA9150_PPR_CHGCTRL_J = 0x170 */ +#define DA9150_CHG_IWARM_SHIFT 0 +#define DA9150_CHG_IWARM_MASK (0xff << 0) + +/* DA9150_PPR_CHGCTRL_K = 0x171 */ +#define DA9150_CHG_IHOT_SHIFT 0 +#define DA9150_CHG_IHOT_MASK (0xff << 0) + +/* DA9150_PPR_CHGCTRL_L = 0x172 */ +#define DA9150_CHG_IBAT_TRED_SHIFT 0 +#define DA9150_CHG_IBAT_TRED_MASK (0xff << 0) + +/* DA9150_PPR_CHGCTRL_M = 0x173 */ +#define DA9150_CHG_VFLOAT_SHIFT 0 +#define DA9150_CHG_VFLOAT_MASK (0x0f << 0) +#define DA9150_CHG_LPM_SHIFT 5 +#define DA9150_CHG_LPM_MASK BIT(5) +#define DA9150_CHG_NBLO_SHIFT 6 +#define DA9150_CHG_NBLO_MASK BIT(6) +#define DA9150_EBS_EN_SHIFT 7 +#define DA9150_EBS_EN_MASK BIT(7) + +/* DA9150_PPR_THYST_A = 0x174 */ +#define DA9150_TBAT_T1_SHIFT 0 +#define DA9150_TBAT_T1_MASK (0xff << 0) + +/* DA9150_PPR_THYST_B = 0x175 */ +#define DA9150_TBAT_T2_SHIFT 0 +#define DA9150_TBAT_T2_MASK (0xff << 0) + +/* DA9150_PPR_THYST_C = 0x176 */ +#define DA9150_TBAT_T3_SHIFT 0 +#define DA9150_TBAT_T3_MASK (0xff << 0) + +/* DA9150_PPR_THYST_D = 0x177 */ +#define DA9150_TBAT_T4_SHIFT 0 +#define DA9150_TBAT_T4_MASK (0xff << 0) + +/* DA9150_PPR_THYST_E = 0x178 */ +#define DA9150_TBAT_T5_SHIFT 0 +#define DA9150_TBAT_T5_MASK (0xff << 0) + +/* DA9150_PPR_THYST_F = 0x179 */ +#define DA9150_TBAT_H1_SHIFT 0 +#define DA9150_TBAT_H1_MASK (0xff << 0) + +/* DA9150_PPR_THYST_G = 0x17A */ +#define DA9150_TBAT_H5_SHIFT 0 +#define DA9150_TBAT_H5_MASK (0xff << 0) + +/* DA9150_PAGE_CON_3 = 0x180 */ +#define DA9150_PAGE_SHIFT 0 +#define DA9150_PAGE_MASK (0x3f << 0) +#define DA9150_WRITE_MODE_SHIFT 6 +#define DA9150_WRITE_MODE_MASK BIT(6) +#define DA9150_REVERT_SHIFT 7 +#define DA9150_REVERT_MASK BIT(7) + +/* DA9150_PAGE_CON_4 = 0x200 */ +#define DA9150_PAGE_SHIFT 0 +#define DA9150_PAGE_MASK (0x3f << 0) +#define DA9150_WRITE_MODE_SHIFT 6 +#define DA9150_WRITE_MODE_MASK BIT(6) +#define DA9150_REVERT_SHIFT 7 +#define DA9150_REVERT_MASK BIT(7) + +/* DA9150_PAGE_CON_5 = 0x280 */ +#define DA9150_PAGE_SHIFT 0 +#define DA9150_PAGE_MASK (0x3f << 0) +#define DA9150_WRITE_MODE_SHIFT 6 +#define DA9150_WRITE_MODE_MASK BIT(6) +#define DA9150_REVERT_SHIFT 7 +#define DA9150_REVERT_MASK BIT(7) + +/* DA9150_PAGE_CON_6 = 0x300 */ +#define DA9150_PAGE_SHIFT 0 +#define DA9150_PAGE_MASK (0x3f << 0) +#define DA9150_WRITE_MODE_SHIFT 6 +#define DA9150_WRITE_MODE_MASK BIT(6) +#define DA9150_REVERT_SHIFT 7 +#define DA9150_REVERT_MASK BIT(7) + +/* DA9150_COREBTLD_STAT_A = 0x302 */ +#define DA9150_BOOTLD_STAT_SHIFT 0 +#define DA9150_BOOTLD_STAT_MASK (0x03 << 0) +#define DA9150_CORE_LOCKUP_SHIFT 2 +#define DA9150_CORE_LOCKUP_MASK BIT(2) + +/* DA9150_COREBTLD_CTRL_A = 0x303 */ +#define DA9150_CORE_RESET_SHIFT 0 +#define DA9150_CORE_RESET_MASK BIT(0) +#define DA9150_CORE_STOP_SHIFT 1 +#define DA9150_CORE_STOP_MASK BIT(1) + +/* DA9150_CORE_CONFIG_A = 0x304 */ +#define DA9150_CORE_MEMMUX_SHIFT 0 +#define DA9150_CORE_MEMMUX_MASK (0x03 << 0) +#define DA9150_WDT_AUTO_START_SHIFT 2 +#define DA9150_WDT_AUTO_START_MASK BIT(2) +#define DA9150_WDT_AUTO_LOCK_SHIFT 3 +#define DA9150_WDT_AUTO_LOCK_MASK BIT(3) +#define DA9150_WDT_HLT_NO_CLK_SHIFT 4 +#define DA9150_WDT_HLT_NO_CLK_MASK BIT(4) + +/* DA9150_CORE_CONFIG_C = 0x305 */ +#define DA9150_CORE_SW_SIZE_SHIFT 0 +#define DA9150_CORE_SW_SIZE_MASK (0xff << 0) + +/* DA9150_CORE_CONFIG_B = 0x306 */ +#define DA9150_BOOTLD_EN_SHIFT 0 +#define DA9150_BOOTLD_EN_MASK BIT(0) +#define DA9150_CORE_EN_SHIFT 2 +#define DA9150_CORE_EN_MASK BIT(2) +#define DA9150_CORE_SW_SRC_SHIFT 3 +#define DA9150_CORE_SW_SRC_MASK (0x07 << 3) +#define DA9150_DEEP_SLEEP_EN_SHIFT 7 +#define DA9150_DEEP_SLEEP_EN_MASK BIT(7) + +/* DA9150_CORE_CFG_DATA_A = 0x307 */ +#define DA9150_CORE_CFG_DT_A_SHIFT 0 +#define DA9150_CORE_CFG_DT_A_MASK (0xff << 0) + +/* DA9150_CORE_CFG_DATA_B = 0x308 */ +#define DA9150_CORE_CFG_DT_B_SHIFT 0 +#define DA9150_CORE_CFG_DT_B_MASK (0xff << 0) + +/* DA9150_CORE_CMD_A = 0x309 */ +#define DA9150_CORE_CMD_SHIFT 0 +#define DA9150_CORE_CMD_MASK (0xff << 0) + +/* DA9150_CORE_DATA_A = 0x30A */ +#define DA9150_CORE_DATA_0_SHIFT 0 +#define DA9150_CORE_DATA_0_MASK (0xff << 0) + +/* DA9150_CORE_DATA_B = 0x30B */ +#define DA9150_CORE_DATA_1_SHIFT 0 +#define DA9150_CORE_DATA_1_MASK (0xff << 0) + +/* DA9150_CORE_DATA_C = 0x30C */ +#define DA9150_CORE_DATA_2_SHIFT 0 +#define DA9150_CORE_DATA_2_MASK (0xff << 0) + +/* DA9150_CORE_DATA_D = 0x30D */ +#define DA9150_CORE_DATA_3_SHIFT 0 +#define DA9150_CORE_DATA_3_MASK (0xff << 0) + +/* DA9150_CORE2WIRE_STAT_A = 0x310 */ +#define DA9150_FW_FWDL_ERR_SHIFT 7 +#define DA9150_FW_FWDL_ERR_MASK BIT(7) + +/* DA9150_CORE2WIRE_CTRL_A = 0x311 */ +#define DA9150_FW_FWDL_EN_SHIFT 0 +#define DA9150_FW_FWDL_EN_MASK BIT(0) +#define DA9150_FG_QIF_EN_SHIFT 1 +#define DA9150_FG_QIF_EN_MASK BIT(1) +#define DA9150_CORE_BASE_ADDR_SHIFT 4 +#define DA9150_CORE_BASE_ADDR_MASK (0x0f << 4) + +/* DA9150_FW_CTRL_A = 0x312 */ +#define DA9150_FW_SEAL_SHIFT 0 +#define DA9150_FW_SEAL_MASK (0xff << 0) + +/* DA9150_FW_CTRL_C = 0x313 */ +#define DA9150_FW_FWDL_CRC_SHIFT 0 +#define DA9150_FW_FWDL_CRC_MASK (0xff << 0) + +/* DA9150_FW_CTRL_D = 0x314 */ +#define DA9150_FW_FWDL_BASE_SHIFT 0 +#define DA9150_FW_FWDL_BASE_MASK (0x0f << 0) + +/* DA9150_FG_CTRL_A = 0x315 */ +#define DA9150_FG_QIF_CODE_SHIFT 0 +#define DA9150_FG_QIF_CODE_MASK (0xff << 0) + +/* DA9150_FG_CTRL_B = 0x316 */ +#define DA9150_FG_QIF_VALUE_SHIFT 0 +#define DA9150_FG_QIF_VALUE_MASK (0xff << 0) + +/* DA9150_FW_CTRL_E = 0x317 */ +#define DA9150_FW_FWDL_SEG_SHIFT 0 +#define DA9150_FW_FWDL_SEG_MASK (0xff << 0) + +/* DA9150_FW_CTRL_B = 0x318 */ +#define DA9150_FW_FWDL_VALUE_SHIFT 0 +#define DA9150_FW_FWDL_VALUE_MASK (0xff << 0) + +/* DA9150_GPADC_CMAN = 0x320 */ +#define DA9150_GPADC_CEN_SHIFT 0 +#define DA9150_GPADC_CEN_MASK BIT(0) +#define DA9150_GPADC_CMUX_SHIFT 1 +#define DA9150_GPADC_CMUX_MASK (0x1f << 1) + +/* DA9150_GPADC_CRES_A = 0x322 */ +#define DA9150_GPADC_CRES_H_SHIFT 0 +#define DA9150_GPADC_CRES_H_MASK (0xff << 0) + +/* DA9150_GPADC_CRES_B = 0x323 */ +#define DA9150_GPADC_CRUN_SHIFT 0 +#define DA9150_GPADC_CRUN_MASK BIT(0) +#define DA9150_GPADC_CRES_L_SHIFT 6 +#define DA9150_GPADC_CRES_L_MASK (0x03 << 6) + +/* DA9150_CC_CFG_A = 0x328 */ +#define DA9150_CC_EN_SHIFT 0 +#define DA9150_CC_EN_MASK BIT(0) +#define DA9150_CC_TIMEBASE_SHIFT 1 +#define DA9150_CC_TIMEBASE_MASK (0x03 << 1) +#define DA9150_CC_CFG_SHIFT 5 +#define DA9150_CC_CFG_MASK (0x03 << 5) +#define DA9150_CC_ENDLESS_MODE_SHIFT 7 +#define DA9150_CC_ENDLESS_MODE_MASK BIT(7) + +/* DA9150_CC_CFG_B = 0x329 */ +#define DA9150_CC_OPT_SHIFT 0 +#define DA9150_CC_OPT_MASK (0x03 << 0) +#define DA9150_CC_PREAMP_SHIFT 2 +#define DA9150_CC_PREAMP_MASK (0x03 << 2) + +/* DA9150_CC_ICHG_RES_A = 0x32A */ +#define DA9150_CC_ICHG_RES_H_SHIFT 0 +#define DA9150_CC_ICHG_RES_H_MASK (0xff << 0) + +/* DA9150_CC_ICHG_RES_B = 0x32B */ +#define DA9150_CC_ICHG_RES_L_SHIFT 3 +#define DA9150_CC_ICHG_RES_L_MASK (0x1f << 3) + +/* DA9150_CC_IAVG_RES_A = 0x32C */ +#define DA9150_CC_IAVG_RES_H_SHIFT 0 +#define DA9150_CC_IAVG_RES_H_MASK (0xff << 0) + +/* DA9150_CC_IAVG_RES_B = 0x32D */ +#define DA9150_CC_IAVG_RES_L_SHIFT 0 +#define DA9150_CC_IAVG_RES_L_MASK (0xff << 0) + +/* DA9150_TAUX_CTRL_A = 0x330 */ +#define DA9150_TAUX_EN_SHIFT 0 +#define DA9150_TAUX_EN_MASK BIT(0) +#define DA9150_TAUX_MOD_SHIFT 1 +#define DA9150_TAUX_MOD_MASK BIT(1) +#define DA9150_TAUX_UPDATE_SHIFT 2 +#define DA9150_TAUX_UPDATE_MASK BIT(2) + +/* DA9150_TAUX_RELOAD_H = 0x332 */ +#define DA9150_TAUX_RLD_H_SHIFT 0 +#define DA9150_TAUX_RLD_H_MASK (0xff << 0) + +/* DA9150_TAUX_RELOAD_L = 0x333 */ +#define DA9150_TAUX_RLD_L_SHIFT 3 +#define DA9150_TAUX_RLD_L_MASK (0x1f << 3) + +/* DA9150_TAUX_VALUE_H = 0x334 */ +#define DA9150_TAUX_VAL_H_SHIFT 0 +#define DA9150_TAUX_VAL_H_MASK (0xff << 0) + +/* DA9150_TAUX_VALUE_L = 0x335 */ +#define DA9150_TAUX_VAL_L_SHIFT 3 +#define DA9150_TAUX_VAL_L_MASK (0x1f << 3) + +/* DA9150_AUX_DATA_0 = 0x338 */ +#define DA9150_AUX_DAT_0_SHIFT 0 +#define DA9150_AUX_DAT_0_MASK (0xff << 0) + +/* DA9150_AUX_DATA_1 = 0x339 */ +#define DA9150_AUX_DAT_1_SHIFT 0 +#define DA9150_AUX_DAT_1_MASK (0xff << 0) + +/* DA9150_AUX_DATA_2 = 0x33A */ +#define DA9150_AUX_DAT_2_SHIFT 0 +#define DA9150_AUX_DAT_2_MASK (0xff << 0) + +/* DA9150_AUX_DATA_3 = 0x33B */ +#define DA9150_AUX_DAT_3_SHIFT 0 +#define DA9150_AUX_DAT_3_MASK (0xff << 0) + +/* DA9150_BIF_CTRL = 0x340 */ +#define DA9150_BIF_ISRC_EN_SHIFT 0 +#define DA9150_BIF_ISRC_EN_MASK BIT(0) + +/* DA9150_TBAT_CTRL_A = 0x342 */ +#define DA9150_TBAT_EN_SHIFT 0 +#define DA9150_TBAT_EN_MASK BIT(0) +#define DA9150_TBAT_SW1_SHIFT 1 +#define DA9150_TBAT_SW1_MASK BIT(1) +#define DA9150_TBAT_SW2_SHIFT 2 +#define DA9150_TBAT_SW2_MASK BIT(2) + +/* DA9150_TBAT_CTRL_B = 0x343 */ +#define DA9150_TBAT_SW_FRC_SHIFT 0 +#define DA9150_TBAT_SW_FRC_MASK BIT(0) +#define DA9150_TBAT_STAT_SW1_SHIFT 1 +#define DA9150_TBAT_STAT_SW1_MASK BIT(1) +#define DA9150_TBAT_STAT_SW2_SHIFT 2 +#define DA9150_TBAT_STAT_SW2_MASK BIT(2) +#define DA9150_TBAT_HIGH_CURR_SHIFT 3 +#define DA9150_TBAT_HIGH_CURR_MASK BIT(3) + +/* DA9150_TBAT_RES_A = 0x344 */ +#define DA9150_TBAT_RES_H_SHIFT 0 +#define DA9150_TBAT_RES_H_MASK (0xff << 0) + +/* DA9150_TBAT_RES_B = 0x345 */ +#define DA9150_TBAT_RES_DIS_SHIFT 0 +#define DA9150_TBAT_RES_DIS_MASK BIT(0) +#define DA9150_TBAT_RES_L_SHIFT 6 +#define DA9150_TBAT_RES_L_MASK (0x03 << 6) + +#endif /* __DA9150_REGISTERS_H */ diff --git a/include/linux/mfd/davinci_voicecodec.h b/include/linux/mfd/davinci_voicecodec.h new file mode 100644 index 000000000..2c0127cb0 --- /dev/null +++ b/include/linux/mfd/davinci_voicecodec.h @@ -0,0 +1,117 @@ +/* + * DaVinci Voice Codec Core Interface for TI platforms + * + * Copyright (C) 2010 Texas Instruments, Inc + * + * Author: Miguel Aguilar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_MFD_DAVINCI_VOICECODEC_H_ +#define __LINUX_MFD_DAVINCI_VOICECODEC_H_ + +#include +#include +#include +#include + +struct regmap; + +/* + * Register values. + */ +#define DAVINCI_VC_PID 0x00 +#define DAVINCI_VC_CTRL 0x04 +#define DAVINCI_VC_INTEN 0x08 +#define DAVINCI_VC_INTSTATUS 0x0c +#define DAVINCI_VC_INTCLR 0x10 +#define DAVINCI_VC_EMUL_CTRL 0x14 +#define DAVINCI_VC_RFIFO 0x20 +#define DAVINCI_VC_WFIFO 0x24 +#define DAVINCI_VC_FIFOSTAT 0x28 +#define DAVINCI_VC_TST_CTRL 0x2C +#define DAVINCI_VC_REG05 0x94 +#define DAVINCI_VC_REG09 0xA4 +#define DAVINCI_VC_REG12 0xB0 + +/* DAVINCI_VC_CTRL bit fields */ +#define DAVINCI_VC_CTRL_MASK 0x5500 +#define DAVINCI_VC_CTRL_RSTADC BIT(0) +#define DAVINCI_VC_CTRL_RSTDAC BIT(1) +#define DAVINCI_VC_CTRL_RD_BITS_8 BIT(4) +#define DAVINCI_VC_CTRL_RD_UNSIGNED BIT(5) +#define DAVINCI_VC_CTRL_WD_BITS_8 BIT(6) +#define DAVINCI_VC_CTRL_WD_UNSIGNED BIT(7) +#define DAVINCI_VC_CTRL_RFIFOEN BIT(8) +#define DAVINCI_VC_CTRL_RFIFOCL BIT(9) +#define DAVINCI_VC_CTRL_RFIFOMD_WORD_1 BIT(10) +#define DAVINCI_VC_CTRL_WFIFOEN BIT(12) +#define DAVINCI_VC_CTRL_WFIFOCL BIT(13) +#define DAVINCI_VC_CTRL_WFIFOMD_WORD_1 BIT(14) + +/* DAVINCI_VC_INT bit fields */ +#define DAVINCI_VC_INT_MASK 0x3F +#define DAVINCI_VC_INT_RDRDY_MASK BIT(0) +#define DAVINCI_VC_INT_RERROVF_MASK BIT(1) +#define DAVINCI_VC_INT_RERRUDR_MASK BIT(2) +#define DAVINCI_VC_INT_WDREQ_MASK BIT(3) +#define DAVINCI_VC_INT_WERROVF_MASKBIT BIT(4) +#define DAVINCI_VC_INT_WERRUDR_MASK BIT(5) + +/* DAVINCI_VC_REG05 bit fields */ +#define DAVINCI_VC_REG05_PGA_GAIN 0x07 + +/* DAVINCI_VC_REG09 bit fields */ +#define DAVINCI_VC_REG09_MUTE 0x40 +#define DAVINCI_VC_REG09_DIG_ATTEN 0x3F + +/* DAVINCI_VC_REG12 bit fields */ +#define DAVINCI_VC_REG12_POWER_ALL_ON 0xFD +#define DAVINCI_VC_REG12_POWER_ALL_OFF 0x00 + +#define DAVINCI_VC_CELLS 2 + +enum davinci_vc_cells { + DAVINCI_VC_VCIF_CELL, + DAVINCI_VC_CQ93VC_CELL, +}; + +struct davinci_vcif { + struct platform_device *pdev; + u32 dma_tx_channel; + u32 dma_rx_channel; + dma_addr_t dma_tx_addr; + dma_addr_t dma_rx_addr; +}; + +struct davinci_vc { + /* Device data */ + struct device *dev; + struct platform_device *pdev; + struct clk *clk; + + /* Memory resources */ + void __iomem *base; + struct regmap *regmap; + + /* MFD cells */ + struct mfd_cell cells[DAVINCI_VC_CELLS]; + + /* Client devices */ + struct davinci_vcif davinci_vcif; +}; + +#endif diff --git a/include/linux/mfd/db8500-prcmu.h b/include/linux/mfd/db8500-prcmu.h new file mode 100644 index 000000000..7ba67b55b --- /dev/null +++ b/include/linux/mfd/db8500-prcmu.h @@ -0,0 +1,766 @@ +/* + * Copyright (C) STMicroelectronics 2009 + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License v2 + * Author: Kumar Sanghvi + * + * PRCMU f/w APIs + */ +#ifndef __MFD_DB8500_PRCMU_H +#define __MFD_DB8500_PRCMU_H + +#include +#include + +/* + * Registers + */ +#define DB8500_PRCM_LINE_VALUE 0x170 +#define DB8500_PRCM_LINE_VALUE_HSI_CAWAKE0 BIT(3) + +#define DB8500_PRCM_DSI_SW_RESET 0x324 +#define DB8500_PRCM_DSI_SW_RESET_DSI0_SW_RESETN BIT(0) +#define DB8500_PRCM_DSI_SW_RESET_DSI1_SW_RESETN BIT(1) +#define DB8500_PRCM_DSI_SW_RESET_DSI2_SW_RESETN BIT(2) + +/* This portion previously known as */ + +/** + * enum state - ON/OFF state definition + * @OFF: State is ON + * @ON: State is OFF + * + */ +enum state { + OFF = 0x0, + ON = 0x1, +}; + +/** + * enum ret_state - general purpose On/Off/Retention states + * + */ +enum ret_state { + OFFST = 0, + ONST = 1, + RETST = 2 +}; + +/** + * enum clk_arm - ARM Cortex A9 clock schemes + * @A9_OFF: + * @A9_BOOT: + * @A9_OPPT1: + * @A9_OPPT2: + * @A9_EXTCLK: + */ +enum clk_arm { + A9_OFF, + A9_BOOT, + A9_OPPT1, + A9_OPPT2, + A9_EXTCLK +}; + +/** + * enum clk_gen - GEN#0/GEN#1 clock schemes + * @GEN_OFF: + * @GEN_BOOT: + * @GEN_OPPT1: + */ +enum clk_gen { + GEN_OFF, + GEN_BOOT, + GEN_OPPT1, +}; + +/* some information between arm and xp70 */ + +/** + * enum romcode_write - Romcode message written by A9 AND read by XP70 + * @RDY_2_DS: Value set when ApDeepSleep state can be executed by XP70 + * @RDY_2_XP70_RST: Value set when 0x0F has been successfully polled by the + * romcode. The xp70 will go into self-reset + */ +enum romcode_write { + RDY_2_DS = 0x09, + RDY_2_XP70_RST = 0x10 +}; + +/** + * enum romcode_read - Romcode message written by XP70 and read by A9 + * @INIT: Init value when romcode field is not used + * @FS_2_DS: Value set when power state is going from ApExecute to + * ApDeepSleep + * @END_DS: Value set when ApDeepSleep power state is reached coming from + * ApExecute state + * @DS_TO_FS: Value set when power state is going from ApDeepSleep to + * ApExecute + * @END_FS: Value set when ApExecute power state is reached coming from + * ApDeepSleep state + * @SWR: Value set when power state is going to ApReset + * @END_SWR: Value set when the xp70 finished executing ApReset actions and + * waits for romcode acknowledgment to go to self-reset + */ +enum romcode_read { + INIT = 0x00, + FS_2_DS = 0x0A, + END_DS = 0x0B, + DS_TO_FS = 0x0C, + END_FS = 0x0D, + SWR = 0x0E, + END_SWR = 0x0F +}; + +/** + * enum ap_pwrst - current power states defined in PRCMU firmware + * @NO_PWRST: Current power state init + * @AP_BOOT: Current power state is apBoot + * @AP_EXECUTE: Current power state is apExecute + * @AP_DEEP_SLEEP: Current power state is apDeepSleep + * @AP_SLEEP: Current power state is apSleep + * @AP_IDLE: Current power state is apIdle + * @AP_RESET: Current power state is apReset + */ +enum ap_pwrst { + NO_PWRST = 0x00, + AP_BOOT = 0x01, + AP_EXECUTE = 0x02, + AP_DEEP_SLEEP = 0x03, + AP_SLEEP = 0x04, + AP_IDLE = 0x05, + AP_RESET = 0x06 +}; + +/** + * enum ap_pwrst_trans - Transition states defined in PRCMU firmware + * @NO_TRANSITION: No power state transition + * @APEXECUTE_TO_APSLEEP: Power state transition from ApExecute to ApSleep + * @APIDLE_TO_APSLEEP: Power state transition from ApIdle to ApSleep + * @APBOOT_TO_APEXECUTE: Power state transition from ApBoot to ApExecute + * @APEXECUTE_TO_APDEEPSLEEP: Power state transition from ApExecute to + * ApDeepSleep + * @APEXECUTE_TO_APIDLE: Power state transition from ApExecute to ApIdle + */ +enum ap_pwrst_trans { + PRCMU_AP_NO_CHANGE = 0x00, + APEXECUTE_TO_APSLEEP = 0x01, + APIDLE_TO_APSLEEP = 0x02, /* To be removed */ + PRCMU_AP_SLEEP = 0x01, + APBOOT_TO_APEXECUTE = 0x03, + APEXECUTE_TO_APDEEPSLEEP = 0x04, /* To be removed */ + PRCMU_AP_DEEP_SLEEP = 0x04, + APEXECUTE_TO_APIDLE = 0x05, /* To be removed */ + PRCMU_AP_IDLE = 0x05, + PRCMU_AP_DEEP_IDLE = 0x07, +}; + +/** + * enum hw_acc_state - State definition for hardware accelerator + * @HW_NO_CHANGE: The hardware accelerator state must remain unchanged + * @HW_OFF: The hardware accelerator must be switched off + * @HW_OFF_RAMRET: The hardware accelerator must be switched off with its + * internal RAM in retention + * @HW_ON: The hwa hardware accelerator hwa must be switched on + * + * NOTE! Deprecated, to be removed when all users switched over to use the + * regulator API. + */ +enum hw_acc_state { + HW_NO_CHANGE = 0x00, + HW_OFF = 0x01, + HW_OFF_RAMRET = 0x02, + HW_ON = 0x04 +}; + +/** + * enum mbox_2_arm_stat - Status messages definition for mbox_arm + * @BOOT_TO_EXECUTEOK: The apBoot to apExecute state transition has been + * completed + * @DEEPSLEEPOK: The apExecute to apDeepSleep state transition has been + * completed + * @SLEEPOK: The apExecute to apSleep state transition has been completed + * @IDLEOK: The apExecute to apIdle state transition has been completed + * @SOFTRESETOK: The A9 watchdog/ SoftReset state has been completed + * @SOFTRESETGO : The A9 watchdog/SoftReset state is on going + * @BOOT_TO_EXECUTE: The apBoot to apExecute state transition is on going + * @EXECUTE_TO_DEEPSLEEP: The apExecute to apDeepSleep state transition is on + * going + * @DEEPSLEEP_TO_EXECUTE: The apDeepSleep to apExecute state transition is on + * going + * @DEEPSLEEP_TO_EXECUTEOK: The apDeepSleep to apExecute state transition has + * been completed + * @EXECUTE_TO_SLEEP: The apExecute to apSleep state transition is on going + * @SLEEP_TO_EXECUTE: The apSleep to apExecute state transition is on going + * @SLEEP_TO_EXECUTEOK: The apSleep to apExecute state transition has been + * completed + * @EXECUTE_TO_IDLE: The apExecute to apIdle state transition is on going + * @IDLE_TO_EXECUTE: The apIdle to apExecute state transition is on going + * @IDLE_TO_EXECUTEOK: The apIdle to apExecute state transition has been + * completed + * @INIT_STATUS: Status init + */ +enum ap_pwrsttr_status { + BOOT_TO_EXECUTEOK = 0xFF, + DEEPSLEEPOK = 0xFE, + SLEEPOK = 0xFD, + IDLEOK = 0xFC, + SOFTRESETOK = 0xFB, + SOFTRESETGO = 0xFA, + BOOT_TO_EXECUTE = 0xF9, + EXECUTE_TO_DEEPSLEEP = 0xF8, + DEEPSLEEP_TO_EXECUTE = 0xF7, + DEEPSLEEP_TO_EXECUTEOK = 0xF6, + EXECUTE_TO_SLEEP = 0xF5, + SLEEP_TO_EXECUTE = 0xF4, + SLEEP_TO_EXECUTEOK = 0xF3, + EXECUTE_TO_IDLE = 0xF2, + IDLE_TO_EXECUTE = 0xF1, + IDLE_TO_EXECUTEOK = 0xF0, + RDYTODS_RETURNTOEXE = 0xEF, + NORDYTODS_RETURNTOEXE = 0xEE, + EXETOSLEEP_RETURNTOEXE = 0xED, + EXETOIDLE_RETURNTOEXE = 0xEC, + INIT_STATUS = 0xEB, + + /*error messages */ + INITERROR = 0x00, + PLLARMLOCKP_ER = 0x01, + PLLDDRLOCKP_ER = 0x02, + PLLSOCLOCKP_ER = 0x03, + PLLSOCK1LOCKP_ER = 0x04, + ARMWFI_ER = 0x05, + SYSCLKOK_ER = 0x06, + I2C_NACK_DATA_ER = 0x07, + BOOT_ER = 0x08, + I2C_STATUS_ALWAYS_1 = 0x0A, + I2C_NACK_REG_ADDR_ER = 0x0B, + I2C_NACK_DATA0123_ER = 0x1B, + I2C_NACK_ADDR_ER = 0x1F, + CURAPPWRSTISNOT_BOOT = 0x20, + CURAPPWRSTISNOT_EXECUTE = 0x21, + CURAPPWRSTISNOT_SLEEPMODE = 0x22, + CURAPPWRSTISNOT_CORRECTFORIT10 = 0x23, + FIFO4500WUISNOT_WUPEVENT = 0x24, + PLL32KLOCKP_ER = 0x29, + DDRDEEPSLEEPOK_ER = 0x2A, + ROMCODEREADY_ER = 0x50, + WUPBEFOREDS = 0x51, + DDRCONFIG_ER = 0x52, + WUPBEFORESLEEP = 0x53, + WUPBEFOREIDLE = 0x54 +}; /* earlier called as mbox_2_arm_stat */ + +/** + * enum dvfs_stat - DVFS status messages definition + * @DVFS_GO: A state transition DVFS is on going + * @DVFS_ARM100OPPOK: The state transition DVFS has been completed for 100OPP + * @DVFS_ARM50OPPOK: The state transition DVFS has been completed for 50OPP + * @DVFS_ARMEXTCLKOK: The state transition DVFS has been completed for EXTCLK + * @DVFS_NOCHGTCLKOK: The state transition DVFS has been completed for + * NOCHGCLK + * @DVFS_INITSTATUS: Value init + */ +enum dvfs_stat { + DVFS_GO = 0xFF, + DVFS_ARM100OPPOK = 0xFE, + DVFS_ARM50OPPOK = 0xFD, + DVFS_ARMEXTCLKOK = 0xFC, + DVFS_NOCHGTCLKOK = 0xFB, + DVFS_INITSTATUS = 0x00 +}; + +/** + * enum sva_mmdsp_stat - SVA MMDSP status messages + * @SVA_MMDSP_GO: SVAMMDSP interrupt has happened + * @SVA_MMDSP_INIT: Status init + */ +enum sva_mmdsp_stat { + SVA_MMDSP_GO = 0xFF, + SVA_MMDSP_INIT = 0x00 +}; + +/** + * enum sia_mmdsp_stat - SIA MMDSP status messages + * @SIA_MMDSP_GO: SIAMMDSP interrupt has happened + * @SIA_MMDSP_INIT: Status init + */ +enum sia_mmdsp_stat { + SIA_MMDSP_GO = 0xFF, + SIA_MMDSP_INIT = 0x00 +}; + +/** + * enum mbox_to_arm_err - Error messages definition + * @INIT_ERR: Init value + * @PLLARMLOCKP_ERR: PLLARM has not been correctly locked in given time + * @PLLDDRLOCKP_ERR: PLLDDR has not been correctly locked in the given time + * @PLLSOC0LOCKP_ERR: PLLSOC0 has not been correctly locked in the given time + * @PLLSOC1LOCKP_ERR: PLLSOC1 has not been correctly locked in the given time + * @ARMWFI_ERR: The ARM WFI has not been correctly executed in the given time + * @SYSCLKOK_ERR: The SYSCLK is not available in the given time + * @BOOT_ERR: Romcode has not validated the XP70 self reset in the given time + * @ROMCODESAVECONTEXT: The Romcode didn.t correctly save it secure context + * @VARMHIGHSPEEDVALTO_ERR: The ARM high speed supply value transfered + * through I2C has not been correctly executed in the given time + * @VARMHIGHSPEEDACCESS_ERR: The command value of VarmHighSpeedVal transfered + * through I2C has not been correctly executed in the given time + * @VARMLOWSPEEDVALTO_ERR:The ARM low speed supply value transfered through + * I2C has not been correctly executed in the given time + * @VARMLOWSPEEDACCESS_ERR: The command value of VarmLowSpeedVal transfered + * through I2C has not been correctly executed in the given time + * @VARMRETENTIONVALTO_ERR: The ARM retention supply value transfered through + * I2C has not been correctly executed in the given time + * @VARMRETENTIONACCESS_ERR: The command value of VarmRetentionVal transfered + * through I2C has not been correctly executed in the given time + * @VAPEHIGHSPEEDVALTO_ERR: The APE highspeed supply value transfered through + * I2C has not been correctly executed in the given time + * @VSAFEHPVALTO_ERR: The SAFE high power supply value transfered through I2C + * has not been correctly executed in the given time + * @VMODSEL1VALTO_ERR: The MODEM sel1 supply value transfered through I2C has + * not been correctly executed in the given time + * @VMODSEL2VALTO_ERR: The MODEM sel2 supply value transfered through I2C has + * not been correctly executed in the given time + * @VARMOFFACCESS_ERR: The command value of Varm ON/OFF transfered through + * I2C has not been correctly executed in the given time + * @VAPEOFFACCESS_ERR: The command value of Vape ON/OFF transfered through + * I2C has not been correctly executed in the given time + * @VARMRETACCES_ERR: The command value of Varm retention ON/OFF transfered + * through I2C has not been correctly executed in the given time + * @CURAPPWRSTISNOTBOOT:Generated when Arm want to do power state transition + * ApBoot to ApExecute but the power current state is not Apboot + * @CURAPPWRSTISNOTEXECUTE: Generated when Arm want to do power state + * transition from ApExecute to others power state but the + * power current state is not ApExecute + * @CURAPPWRSTISNOTSLEEPMODE: Generated when wake up events are transmitted + * but the power current state is not ApDeepSleep/ApSleep/ApIdle + * @CURAPPWRSTISNOTCORRECTDBG: Generated when wake up events are transmitted + * but the power current state is not correct + * @ARMREGU1VALTO_ERR:The ArmRegu1 value transferred through I2C has not + * been correctly executed in the given time + * @ARMREGU2VALTO_ERR: The ArmRegu2 value transferred through I2C has not + * been correctly executed in the given time + * @VAPEREGUVALTO_ERR: The VApeRegu value transfered through I2C has not + * been correctly executed in the given time + * @VSMPS3REGUVALTO_ERR: The VSmps3Regu value transfered through I2C has not + * been correctly executed in the given time + * @VMODREGUVALTO_ERR: The VModemRegu value transfered through I2C has not + * been correctly executed in the given time + */ +enum mbox_to_arm_err { + INIT_ERR = 0x00, + PLLARMLOCKP_ERR = 0x01, + PLLDDRLOCKP_ERR = 0x02, + PLLSOC0LOCKP_ERR = 0x03, + PLLSOC1LOCKP_ERR = 0x04, + ARMWFI_ERR = 0x05, + SYSCLKOK_ERR = 0x06, + BOOT_ERR = 0x07, + ROMCODESAVECONTEXT = 0x08, + VARMHIGHSPEEDVALTO_ERR = 0x10, + VARMHIGHSPEEDACCESS_ERR = 0x11, + VARMLOWSPEEDVALTO_ERR = 0x12, + VARMLOWSPEEDACCESS_ERR = 0x13, + VARMRETENTIONVALTO_ERR = 0x14, + VARMRETENTIONACCESS_ERR = 0x15, + VAPEHIGHSPEEDVALTO_ERR = 0x16, + VSAFEHPVALTO_ERR = 0x17, + VMODSEL1VALTO_ERR = 0x18, + VMODSEL2VALTO_ERR = 0x19, + VARMOFFACCESS_ERR = 0x1A, + VAPEOFFACCESS_ERR = 0x1B, + VARMRETACCES_ERR = 0x1C, + CURAPPWRSTISNOTBOOT = 0x20, + CURAPPWRSTISNOTEXECUTE = 0x21, + CURAPPWRSTISNOTSLEEPMODE = 0x22, + CURAPPWRSTISNOTCORRECTDBG = 0x23, + ARMREGU1VALTO_ERR = 0x24, + ARMREGU2VALTO_ERR = 0x25, + VAPEREGUVALTO_ERR = 0x26, + VSMPS3REGUVALTO_ERR = 0x27, + VMODREGUVALTO_ERR = 0x28 +}; + +enum hw_acc { + SVAMMDSP = 0, + SVAPIPE = 1, + SIAMMDSP = 2, + SIAPIPE = 3, + SGA = 4, + B2R2MCDE = 5, + ESRAM12 = 6, + ESRAM34 = 7, +}; + +enum cs_pwrmgt { + PWRDNCS0 = 0, + WKUPCS0 = 1, + PWRDNCS1 = 2, + WKUPCS1 = 3 +}; + +/* Defs related to autonomous power management */ + +/** + * enum sia_sva_pwr_policy - Power policy + * @NO_CHGT: No change + * @DSPOFF_HWPOFF: + * @DSPOFFRAMRET_HWPOFF: + * @DSPCLKOFF_HWPOFF: + * @DSPCLKOFF_HWPCLKOFF: + * + */ +enum sia_sva_pwr_policy { + NO_CHGT = 0x0, + DSPOFF_HWPOFF = 0x1, + DSPOFFRAMRET_HWPOFF = 0x2, + DSPCLKOFF_HWPOFF = 0x3, + DSPCLKOFF_HWPCLKOFF = 0x4, +}; + +/** + * enum auto_enable - Auto Power enable + * @AUTO_OFF: + * @AUTO_ON: + * + */ +enum auto_enable { + AUTO_OFF = 0x0, + AUTO_ON = 0x1, +}; + +/* End of file previously known as prcmu-fw-defs_v1.h */ + +/** + * enum prcmu_power_status - results from set_power_state + * @PRCMU_SLEEP_OK: Sleep went ok + * @PRCMU_DEEP_SLEEP_OK: DeepSleep went ok + * @PRCMU_IDLE_OK: Idle went ok + * @PRCMU_DEEPIDLE_OK: DeepIdle went ok + * @PRCMU_PRCMU2ARMPENDINGIT_ER: Pending interrupt detected + * @PRCMU_ARMPENDINGIT_ER: Pending interrupt detected + * + */ +enum prcmu_power_status { + PRCMU_SLEEP_OK = 0xf3, + PRCMU_DEEP_SLEEP_OK = 0xf6, + PRCMU_IDLE_OK = 0xf0, + PRCMU_DEEPIDLE_OK = 0xe3, + PRCMU_PRCMU2ARMPENDINGIT_ER = 0x91, + PRCMU_ARMPENDINGIT_ER = 0x93, +}; + +/* + * Definitions for autonomous power management configuration. + */ + +#define PRCMU_AUTO_PM_OFF 0 +#define PRCMU_AUTO_PM_ON 1 + +#define PRCMU_AUTO_PM_POWER_ON_HSEM BIT(0) +#define PRCMU_AUTO_PM_POWER_ON_ABB_FIFO_IT BIT(1) + +enum prcmu_auto_pm_policy { + PRCMU_AUTO_PM_POLICY_NO_CHANGE, + PRCMU_AUTO_PM_POLICY_DSP_OFF_HWP_OFF, + PRCMU_AUTO_PM_POLICY_DSP_OFF_RAMRET_HWP_OFF, + PRCMU_AUTO_PM_POLICY_DSP_CLK_OFF_HWP_OFF, + PRCMU_AUTO_PM_POLICY_DSP_CLK_OFF_HWP_CLK_OFF, +}; + +/** + * struct prcmu_auto_pm_config - Autonomous power management configuration. + * @sia_auto_pm_enable: SIA autonomous pm enable. (PRCMU_AUTO_PM_{OFF,ON}) + * @sia_power_on: SIA power ON enable. (PRCMU_AUTO_PM_POWER_ON_* bitmask) + * @sia_policy: SIA power policy. (enum prcmu_auto_pm_policy) + * @sva_auto_pm_enable: SVA autonomous pm enable. (PRCMU_AUTO_PM_{OFF,ON}) + * @sva_power_on: SVA power ON enable. (PRCMU_AUTO_PM_POWER_ON_* bitmask) + * @sva_policy: SVA power policy. (enum prcmu_auto_pm_policy) + */ +struct prcmu_auto_pm_config { + u8 sia_auto_pm_enable; + u8 sia_power_on; + u8 sia_policy; + u8 sva_auto_pm_enable; + u8 sva_power_on; + u8 sva_policy; +}; + +#ifdef CONFIG_MFD_DB8500_PRCMU + +void db8500_prcmu_early_init(u32 phy_base, u32 size); +int prcmu_set_rc_a2p(enum romcode_write); +enum romcode_read prcmu_get_rc_p2a(void); +enum ap_pwrst prcmu_get_xp70_current_state(void); +bool prcmu_has_arm_maxopp(void); +struct prcmu_fw_version *prcmu_get_fw_version(void); +int prcmu_release_usb_wakeup_state(void); +void prcmu_configure_auto_pm(struct prcmu_auto_pm_config *sleep, + struct prcmu_auto_pm_config *idle); +bool prcmu_is_auto_pm_enabled(void); + +int prcmu_config_clkout(u8 clkout, u8 source, u8 div); +int prcmu_set_clock_divider(u8 clock, u8 divider); +int db8500_prcmu_config_hotdog(u8 threshold); +int db8500_prcmu_config_hotmon(u8 low, u8 high); +int db8500_prcmu_start_temp_sense(u16 cycles32k); +int db8500_prcmu_stop_temp_sense(void); +int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size); +int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size); + +int prcmu_ac_wake_req(void); +void prcmu_ac_sleep_req(void); +void db8500_prcmu_modem_reset(void); + +int db8500_prcmu_config_a9wdog(u8 num, bool sleep_auto_off); +int db8500_prcmu_enable_a9wdog(u8 id); +int db8500_prcmu_disable_a9wdog(u8 id); +int db8500_prcmu_kick_a9wdog(u8 id); +int db8500_prcmu_load_a9wdog(u8 id, u32 val); + +void db8500_prcmu_system_reset(u16 reset_code); +int db8500_prcmu_set_power_state(u8 state, bool keep_ulp_clk, bool keep_ap_pll); +u8 db8500_prcmu_get_power_state_result(void); +void db8500_prcmu_enable_wakeups(u32 wakeups); +int db8500_prcmu_set_epod(u16 epod_id, u8 epod_state); +int db8500_prcmu_request_clock(u8 clock, bool enable); +int db8500_prcmu_set_display_clocks(void); +int db8500_prcmu_disable_dsipll(void); +int db8500_prcmu_enable_dsipll(void); +void db8500_prcmu_config_abb_event_readout(u32 abb_events); +void db8500_prcmu_get_abb_event_buffer(void __iomem **buf); +int db8500_prcmu_config_esram0_deep_sleep(u8 state); +u16 db8500_prcmu_get_reset_code(void); +bool db8500_prcmu_is_ac_wake_requested(void); +int db8500_prcmu_set_arm_opp(u8 opp); +int db8500_prcmu_get_arm_opp(void); +int db8500_prcmu_set_ape_opp(u8 opp); +int db8500_prcmu_get_ape_opp(void); +int db8500_prcmu_request_ape_opp_100_voltage(bool enable); +int db8500_prcmu_get_ddr_opp(void); + +u32 db8500_prcmu_read(unsigned int reg); +void db8500_prcmu_write(unsigned int reg, u32 value); +void db8500_prcmu_write_masked(unsigned int reg, u32 mask, u32 value); + +#else /* !CONFIG_MFD_DB8500_PRCMU */ + +static inline void db8500_prcmu_early_init(u32 phy_base, u32 size) {} + +static inline int prcmu_set_rc_a2p(enum romcode_write code) +{ + return 0; +} + +static inline enum romcode_read prcmu_get_rc_p2a(void) +{ + return INIT; +} + +static inline enum ap_pwrst prcmu_get_xp70_current_state(void) +{ + return AP_EXECUTE; +} + +static inline bool prcmu_has_arm_maxopp(void) +{ + return false; +} + +static inline struct prcmu_fw_version *prcmu_get_fw_version(void) +{ + return NULL; +} + +static inline int db8500_prcmu_set_ape_opp(u8 opp) +{ + return 0; +} + +static inline int db8500_prcmu_get_ape_opp(void) +{ + return APE_100_OPP; +} + +static inline int db8500_prcmu_request_ape_opp_100_voltage(bool enable) +{ + return 0; +} + +static inline int prcmu_release_usb_wakeup_state(void) +{ + return 0; +} + +static inline int db8500_prcmu_get_ddr_opp(void) +{ + return DDR_100_OPP; +} + +static inline void prcmu_configure_auto_pm(struct prcmu_auto_pm_config *sleep, + struct prcmu_auto_pm_config *idle) +{ +} + +static inline bool prcmu_is_auto_pm_enabled(void) +{ + return false; +} + +static inline int prcmu_config_clkout(u8 clkout, u8 source, u8 div) +{ + return 0; +} + +static inline int prcmu_set_clock_divider(u8 clock, u8 divider) +{ + return 0; +} + +static inline int db8500_prcmu_config_hotdog(u8 threshold) +{ + return 0; +} + +static inline int db8500_prcmu_config_hotmon(u8 low, u8 high) +{ + return 0; +} + +static inline int db8500_prcmu_start_temp_sense(u16 cycles32k) +{ + return 0; +} + +static inline int db8500_prcmu_stop_temp_sense(void) +{ + return 0; +} + +static inline int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size) +{ + return -ENOSYS; +} + +static inline int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size) +{ + return -ENOSYS; +} + +static inline int prcmu_ac_wake_req(void) +{ + return 0; +} + +static inline void prcmu_ac_sleep_req(void) {} + +static inline void db8500_prcmu_modem_reset(void) {} + +static inline void db8500_prcmu_system_reset(u16 reset_code) {} + +static inline int db8500_prcmu_set_power_state(u8 state, bool keep_ulp_clk, + bool keep_ap_pll) +{ + return 0; +} + +static inline u8 db8500_prcmu_get_power_state_result(void) +{ + return 0; +} + +static inline void db8500_prcmu_enable_wakeups(u32 wakeups) {} + +static inline int db8500_prcmu_set_epod(u16 epod_id, u8 epod_state) +{ + return 0; +} + +static inline int db8500_prcmu_request_clock(u8 clock, bool enable) +{ + return 0; +} + +static inline int db8500_prcmu_set_display_clocks(void) +{ + return 0; +} + +static inline int db8500_prcmu_disable_dsipll(void) +{ + return 0; +} + +static inline int db8500_prcmu_enable_dsipll(void) +{ + return 0; +} + +static inline int db8500_prcmu_config_esram0_deep_sleep(u8 state) +{ + return 0; +} + +static inline void db8500_prcmu_config_abb_event_readout(u32 abb_events) {} + +static inline void db8500_prcmu_get_abb_event_buffer(void __iomem **buf) {} + +static inline u16 db8500_prcmu_get_reset_code(void) +{ + return 0; +} + +static inline int db8500_prcmu_config_a9wdog(u8 num, bool sleep_auto_off) +{ + return 0; +} + +static inline int db8500_prcmu_enable_a9wdog(u8 id) +{ + return 0; +} + +static inline int db8500_prcmu_disable_a9wdog(u8 id) +{ + return 0; +} + +static inline int db8500_prcmu_kick_a9wdog(u8 id) +{ + return 0; +} + +static inline int db8500_prcmu_load_a9wdog(u8 id, u32 val) +{ + return 0; +} + +static inline bool db8500_prcmu_is_ac_wake_requested(void) +{ + return 0; +} + +static inline int db8500_prcmu_set_arm_opp(u8 opp) +{ + return 0; +} + +static inline int db8500_prcmu_get_arm_opp(void) +{ + return 0; +} + +static inline u32 db8500_prcmu_read(unsigned int reg) +{ + return 0; +} + +static inline void db8500_prcmu_write(unsigned int reg, u32 value) {} + +static inline void db8500_prcmu_write_masked(unsigned int reg, u32 mask, + u32 value) {} + +#endif /* !CONFIG_MFD_DB8500_PRCMU */ + +#endif /* __MFD_DB8500_PRCMU_H */ diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h new file mode 100644 index 000000000..2e2c6a63a --- /dev/null +++ b/include/linux/mfd/dbx500-prcmu.h @@ -0,0 +1,647 @@ +/* + * Copyright (C) ST Ericsson SA 2011 + * + * License Terms: GNU General Public License v2 + * + * STE Ux500 PRCMU API + */ +#ifndef __MACH_PRCMU_H +#define __MACH_PRCMU_H + +#include +#include +#include + +#include /* For clock identifiers */ + +/* Offset for the firmware version within the TCPM */ +#define DB8500_PRCMU_FW_VERSION_OFFSET 0xA4 +#define DBX540_PRCMU_FW_VERSION_OFFSET 0xA8 + +/* PRCMU Wakeup defines */ +enum prcmu_wakeup_index { + PRCMU_WAKEUP_INDEX_RTC, + PRCMU_WAKEUP_INDEX_RTT0, + PRCMU_WAKEUP_INDEX_RTT1, + PRCMU_WAKEUP_INDEX_HSI0, + PRCMU_WAKEUP_INDEX_HSI1, + PRCMU_WAKEUP_INDEX_USB, + PRCMU_WAKEUP_INDEX_ABB, + PRCMU_WAKEUP_INDEX_ABB_FIFO, + PRCMU_WAKEUP_INDEX_ARM, + PRCMU_WAKEUP_INDEX_CD_IRQ, + NUM_PRCMU_WAKEUP_INDICES +}; +#define PRCMU_WAKEUP(_name) (BIT(PRCMU_WAKEUP_INDEX_##_name)) + +/* EPOD (power domain) IDs */ + +/* + * DB8500 EPODs + * - EPOD_ID_SVAMMDSP: power domain for SVA MMDSP + * - EPOD_ID_SVAPIPE: power domain for SVA pipe + * - EPOD_ID_SIAMMDSP: power domain for SIA MMDSP + * - EPOD_ID_SIAPIPE: power domain for SIA pipe + * - EPOD_ID_SGA: power domain for SGA + * - EPOD_ID_B2R2_MCDE: power domain for B2R2 and MCDE + * - EPOD_ID_ESRAM12: power domain for ESRAM 1 and 2 + * - EPOD_ID_ESRAM34: power domain for ESRAM 3 and 4 + * - NUM_EPOD_ID: number of power domains + * + * TODO: These should be prefixed. + */ +#define EPOD_ID_SVAMMDSP 0 +#define EPOD_ID_SVAPIPE 1 +#define EPOD_ID_SIAMMDSP 2 +#define EPOD_ID_SIAPIPE 3 +#define EPOD_ID_SGA 4 +#define EPOD_ID_B2R2_MCDE 5 +#define EPOD_ID_ESRAM12 6 +#define EPOD_ID_ESRAM34 7 +#define NUM_EPOD_ID 8 + +/* + * state definition for EPOD (power domain) + * - EPOD_STATE_NO_CHANGE: The EPOD should remain unchanged + * - EPOD_STATE_OFF: The EPOD is switched off + * - EPOD_STATE_RAMRET: The EPOD is switched off with its internal RAM in + * retention + * - EPOD_STATE_ON_CLK_OFF: The EPOD is switched on, clock is still off + * - EPOD_STATE_ON: Same as above, but with clock enabled + */ +#define EPOD_STATE_NO_CHANGE 0x00 +#define EPOD_STATE_OFF 0x01 +#define EPOD_STATE_RAMRET 0x02 +#define EPOD_STATE_ON_CLK_OFF 0x03 +#define EPOD_STATE_ON 0x04 + +/* + * CLKOUT sources + */ +#define PRCMU_CLKSRC_CLK38M 0x00 +#define PRCMU_CLKSRC_ACLK 0x01 +#define PRCMU_CLKSRC_SYSCLK 0x02 +#define PRCMU_CLKSRC_LCDCLK 0x03 +#define PRCMU_CLKSRC_SDMMCCLK 0x04 +#define PRCMU_CLKSRC_TVCLK 0x05 +#define PRCMU_CLKSRC_TIMCLK 0x06 +#define PRCMU_CLKSRC_CLK009 0x07 +/* These are only valid for CLKOUT1: */ +#define PRCMU_CLKSRC_SIAMMDSPCLK 0x40 +#define PRCMU_CLKSRC_I2CCLK 0x41 +#define PRCMU_CLKSRC_MSP02CLK 0x42 +#define PRCMU_CLKSRC_ARMPLL_OBSCLK 0x43 +#define PRCMU_CLKSRC_HSIRXCLK 0x44 +#define PRCMU_CLKSRC_HSITXCLK 0x45 +#define PRCMU_CLKSRC_ARMCLKFIX 0x46 +#define PRCMU_CLKSRC_HDMICLK 0x47 + +/** + * enum prcmu_wdog_id - PRCMU watchdog IDs + * @PRCMU_WDOG_ALL: use all timers + * @PRCMU_WDOG_CPU1: use first CPU timer only + * @PRCMU_WDOG_CPU2: use second CPU timer conly + */ +enum prcmu_wdog_id { + PRCMU_WDOG_ALL = 0x00, + PRCMU_WDOG_CPU1 = 0x01, + PRCMU_WDOG_CPU2 = 0x02, +}; + +/** + * enum ape_opp - APE OPP states definition + * @APE_OPP_INIT: + * @APE_NO_CHANGE: The APE operating point is unchanged + * @APE_100_OPP: The new APE operating point is ape100opp + * @APE_50_OPP: 50% + * @APE_50_PARTLY_25_OPP: 50%, except some clocks at 25%. + */ +enum ape_opp { + APE_OPP_INIT = 0x00, + APE_NO_CHANGE = 0x01, + APE_100_OPP = 0x02, + APE_50_OPP = 0x03, + APE_50_PARTLY_25_OPP = 0xFF, +}; + +/** + * enum arm_opp - ARM OPP states definition + * @ARM_OPP_INIT: + * @ARM_NO_CHANGE: The ARM operating point is unchanged + * @ARM_100_OPP: The new ARM operating point is arm100opp + * @ARM_50_OPP: The new ARM operating point is arm50opp + * @ARM_MAX_OPP: Operating point is "max" (more than 100) + * @ARM_MAX_FREQ100OPP: Set max opp if available, else 100 + * @ARM_EXTCLK: The new ARM operating point is armExtClk + */ +enum arm_opp { + ARM_OPP_INIT = 0x00, + ARM_NO_CHANGE = 0x01, + ARM_100_OPP = 0x02, + ARM_50_OPP = 0x03, + ARM_MAX_OPP = 0x04, + ARM_MAX_FREQ100OPP = 0x05, + ARM_EXTCLK = 0x07 +}; + +/** + * enum ddr_opp - DDR OPP states definition + * @DDR_100_OPP: The new DDR operating point is ddr100opp + * @DDR_50_OPP: The new DDR operating point is ddr50opp + * @DDR_25_OPP: The new DDR operating point is ddr25opp + */ +enum ddr_opp { + DDR_100_OPP = 0x00, + DDR_50_OPP = 0x01, + DDR_25_OPP = 0x02, +}; + +/* + * Definitions for controlling ESRAM0 in deep sleep. + */ +#define ESRAM0_DEEP_SLEEP_STATE_OFF 1 +#define ESRAM0_DEEP_SLEEP_STATE_RET 2 + +/** + * enum ddr_pwrst - DDR power states definition + * @DDR_PWR_STATE_UNCHANGED: SDRAM and DDR controller state is unchanged + * @DDR_PWR_STATE_ON: + * @DDR_PWR_STATE_OFFLOWLAT: + * @DDR_PWR_STATE_OFFHIGHLAT: + */ +enum ddr_pwrst { + DDR_PWR_STATE_UNCHANGED = 0x00, + DDR_PWR_STATE_ON = 0x01, + DDR_PWR_STATE_OFFLOWLAT = 0x02, + DDR_PWR_STATE_OFFHIGHLAT = 0x03 +}; + +#define DB8500_PRCMU_LEGACY_OFFSET 0xDD4 + +#define PRCMU_FW_PROJECT_U8500 2 +#define PRCMU_FW_PROJECT_U8400 3 +#define PRCMU_FW_PROJECT_U9500 4 /* Customer specific */ +#define PRCMU_FW_PROJECT_U8500_MBB 5 +#define PRCMU_FW_PROJECT_U8500_C1 6 +#define PRCMU_FW_PROJECT_U8500_C2 7 +#define PRCMU_FW_PROJECT_U8500_C3 8 +#define PRCMU_FW_PROJECT_U8500_C4 9 +#define PRCMU_FW_PROJECT_U9500_MBL 10 +#define PRCMU_FW_PROJECT_U8500_MBL 11 /* Customer specific */ +#define PRCMU_FW_PROJECT_U8500_MBL2 12 /* Customer specific */ +#define PRCMU_FW_PROJECT_U8520 13 +#define PRCMU_FW_PROJECT_U8420 14 +#define PRCMU_FW_PROJECT_A9420 20 +/* [32..63] 9540 and derivatives */ +#define PRCMU_FW_PROJECT_U9540 32 +/* [64..95] 8540 and derivatives */ +#define PRCMU_FW_PROJECT_L8540 64 +/* [96..126] 8580 and derivatives */ +#define PRCMU_FW_PROJECT_L8580 96 + +#define PRCMU_FW_PROJECT_NAME_LEN 20 +struct prcmu_fw_version { + u32 project; /* Notice, project shifted with 8 on ux540 */ + u8 api_version; + u8 func_version; + u8 errata; + char project_name[PRCMU_FW_PROJECT_NAME_LEN]; +}; + +#include + +#if defined(CONFIG_UX500_SOC_DB8500) + +static inline void prcmu_early_init(u32 phy_base, u32 size) +{ + return db8500_prcmu_early_init(phy_base, size); +} + +static inline int prcmu_set_power_state(u8 state, bool keep_ulp_clk, + bool keep_ap_pll) +{ + return db8500_prcmu_set_power_state(state, keep_ulp_clk, + keep_ap_pll); +} + +static inline u8 prcmu_get_power_state_result(void) +{ + return db8500_prcmu_get_power_state_result(); +} + +static inline int prcmu_set_epod(u16 epod_id, u8 epod_state) +{ + return db8500_prcmu_set_epod(epod_id, epod_state); +} + +static inline void prcmu_enable_wakeups(u32 wakeups) +{ + db8500_prcmu_enable_wakeups(wakeups); +} + +static inline void prcmu_disable_wakeups(void) +{ + prcmu_enable_wakeups(0); +} + +static inline void prcmu_config_abb_event_readout(u32 abb_events) +{ + db8500_prcmu_config_abb_event_readout(abb_events); +} + +static inline void prcmu_get_abb_event_buffer(void __iomem **buf) +{ + db8500_prcmu_get_abb_event_buffer(buf); +} + +int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size); +int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size); +int prcmu_abb_write_masked(u8 slave, u8 reg, u8 *value, u8 *mask, u8 size); + +int prcmu_config_clkout(u8 clkout, u8 source, u8 div); + +static inline int prcmu_request_clock(u8 clock, bool enable) +{ + return db8500_prcmu_request_clock(clock, enable); +} + +unsigned long prcmu_clock_rate(u8 clock); +long prcmu_round_clock_rate(u8 clock, unsigned long rate); +int prcmu_set_clock_rate(u8 clock, unsigned long rate); + +static inline int prcmu_get_ddr_opp(void) +{ + return db8500_prcmu_get_ddr_opp(); +} + +static inline int prcmu_set_arm_opp(u8 opp) +{ + return db8500_prcmu_set_arm_opp(opp); +} + +static inline int prcmu_get_arm_opp(void) +{ + return db8500_prcmu_get_arm_opp(); +} + +static inline int prcmu_set_ape_opp(u8 opp) +{ + return db8500_prcmu_set_ape_opp(opp); +} + +static inline int prcmu_get_ape_opp(void) +{ + return db8500_prcmu_get_ape_opp(); +} + +static inline int prcmu_request_ape_opp_100_voltage(bool enable) +{ + return db8500_prcmu_request_ape_opp_100_voltage(enable); +} + +static inline void prcmu_system_reset(u16 reset_code) +{ + return db8500_prcmu_system_reset(reset_code); +} + +static inline u16 prcmu_get_reset_code(void) +{ + return db8500_prcmu_get_reset_code(); +} + +int prcmu_ac_wake_req(void); +void prcmu_ac_sleep_req(void); +static inline void prcmu_modem_reset(void) +{ + return db8500_prcmu_modem_reset(); +} + +static inline bool prcmu_is_ac_wake_requested(void) +{ + return db8500_prcmu_is_ac_wake_requested(); +} + +static inline int prcmu_set_display_clocks(void) +{ + return db8500_prcmu_set_display_clocks(); +} + +static inline int prcmu_disable_dsipll(void) +{ + return db8500_prcmu_disable_dsipll(); +} + +static inline int prcmu_enable_dsipll(void) +{ + return db8500_prcmu_enable_dsipll(); +} + +static inline int prcmu_config_esram0_deep_sleep(u8 state) +{ + return db8500_prcmu_config_esram0_deep_sleep(state); +} + +static inline int prcmu_config_hotdog(u8 threshold) +{ + return db8500_prcmu_config_hotdog(threshold); +} + +static inline int prcmu_config_hotmon(u8 low, u8 high) +{ + return db8500_prcmu_config_hotmon(low, high); +} + +static inline int prcmu_start_temp_sense(u16 cycles32k) +{ + return db8500_prcmu_start_temp_sense(cycles32k); +} + +static inline int prcmu_stop_temp_sense(void) +{ + return db8500_prcmu_stop_temp_sense(); +} + +static inline u32 prcmu_read(unsigned int reg) +{ + return db8500_prcmu_read(reg); +} + +static inline void prcmu_write(unsigned int reg, u32 value) +{ + db8500_prcmu_write(reg, value); +} + +static inline void prcmu_write_masked(unsigned int reg, u32 mask, u32 value) +{ + db8500_prcmu_write_masked(reg, mask, value); +} + +static inline int prcmu_enable_a9wdog(u8 id) +{ + return db8500_prcmu_enable_a9wdog(id); +} + +static inline int prcmu_disable_a9wdog(u8 id) +{ + return db8500_prcmu_disable_a9wdog(id); +} + +static inline int prcmu_kick_a9wdog(u8 id) +{ + return db8500_prcmu_kick_a9wdog(id); +} + +static inline int prcmu_load_a9wdog(u8 id, u32 timeout) +{ + return db8500_prcmu_load_a9wdog(id, timeout); +} + +static inline int prcmu_config_a9wdog(u8 num, bool sleep_auto_off) +{ + return db8500_prcmu_config_a9wdog(num, sleep_auto_off); +} +#else + +static inline void prcmu_early_init(u32 phy_base, u32 size) {} + +static inline int prcmu_set_power_state(u8 state, bool keep_ulp_clk, + bool keep_ap_pll) +{ + return 0; +} + +static inline int prcmu_set_epod(u16 epod_id, u8 epod_state) +{ + return 0; +} + +static inline void prcmu_enable_wakeups(u32 wakeups) {} + +static inline void prcmu_disable_wakeups(void) {} + +static inline int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size) +{ + return -ENOSYS; +} + +static inline int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size) +{ + return -ENOSYS; +} + +static inline int prcmu_abb_write_masked(u8 slave, u8 reg, u8 *value, u8 *mask, + u8 size) +{ + return -ENOSYS; +} + +static inline int prcmu_config_clkout(u8 clkout, u8 source, u8 div) +{ + return 0; +} + +static inline int prcmu_request_clock(u8 clock, bool enable) +{ + return 0; +} + +static inline long prcmu_round_clock_rate(u8 clock, unsigned long rate) +{ + return 0; +} + +static inline int prcmu_set_clock_rate(u8 clock, unsigned long rate) +{ + return 0; +} + +static inline unsigned long prcmu_clock_rate(u8 clock) +{ + return 0; +} + +static inline int prcmu_set_ape_opp(u8 opp) +{ + return 0; +} + +static inline int prcmu_get_ape_opp(void) +{ + return APE_100_OPP; +} + +static inline int prcmu_request_ape_opp_100_voltage(bool enable) +{ + return 0; +} + +static inline int prcmu_set_arm_opp(u8 opp) +{ + return 0; +} + +static inline int prcmu_get_arm_opp(void) +{ + return ARM_100_OPP; +} + +static inline int prcmu_get_ddr_opp(void) +{ + return DDR_100_OPP; +} + +static inline void prcmu_system_reset(u16 reset_code) {} + +static inline u16 prcmu_get_reset_code(void) +{ + return 0; +} + +static inline int prcmu_ac_wake_req(void) +{ + return 0; +} + +static inline void prcmu_ac_sleep_req(void) {} + +static inline void prcmu_modem_reset(void) {} + +static inline bool prcmu_is_ac_wake_requested(void) +{ + return false; +} + +static inline int prcmu_set_display_clocks(void) +{ + return 0; +} + +static inline int prcmu_disable_dsipll(void) +{ + return 0; +} + +static inline int prcmu_enable_dsipll(void) +{ + return 0; +} + +static inline int prcmu_config_esram0_deep_sleep(u8 state) +{ + return 0; +} + +static inline void prcmu_config_abb_event_readout(u32 abb_events) {} + +static inline void prcmu_get_abb_event_buffer(void __iomem **buf) +{ + *buf = NULL; +} + +static inline int prcmu_config_hotdog(u8 threshold) +{ + return 0; +} + +static inline int prcmu_config_hotmon(u8 low, u8 high) +{ + return 0; +} + +static inline int prcmu_start_temp_sense(u16 cycles32k) +{ + return 0; +} + +static inline int prcmu_stop_temp_sense(void) +{ + return 0; +} + +static inline u32 prcmu_read(unsigned int reg) +{ + return 0; +} + +static inline void prcmu_write(unsigned int reg, u32 value) {} + +static inline void prcmu_write_masked(unsigned int reg, u32 mask, u32 value) {} + +#endif + +static inline void prcmu_set(unsigned int reg, u32 bits) +{ + prcmu_write_masked(reg, bits, bits); +} + +static inline void prcmu_clear(unsigned int reg, u32 bits) +{ + prcmu_write_masked(reg, bits, 0); +} + +/* PRCMU QoS APE OPP class */ +#define PRCMU_QOS_APE_OPP 1 +#define PRCMU_QOS_DDR_OPP 2 +#define PRCMU_QOS_ARM_OPP 3 +#define PRCMU_QOS_DEFAULT_VALUE -1 + +#ifdef CONFIG_DBX500_PRCMU_QOS_POWER + +unsigned long prcmu_qos_get_cpufreq_opp_delay(void); +void prcmu_qos_set_cpufreq_opp_delay(unsigned long); +void prcmu_qos_force_opp(int, s32); +int prcmu_qos_requirement(int pm_qos_class); +int prcmu_qos_add_requirement(int pm_qos_class, char *name, s32 value); +int prcmu_qos_update_requirement(int pm_qos_class, char *name, s32 new_value); +void prcmu_qos_remove_requirement(int pm_qos_class, char *name); +int prcmu_qos_add_notifier(int prcmu_qos_class, + struct notifier_block *notifier); +int prcmu_qos_remove_notifier(int prcmu_qos_class, + struct notifier_block *notifier); + +#else + +static inline unsigned long prcmu_qos_get_cpufreq_opp_delay(void) +{ + return 0; +} + +static inline void prcmu_qos_set_cpufreq_opp_delay(unsigned long n) {} + +static inline void prcmu_qos_force_opp(int prcmu_qos_class, s32 i) {} + +static inline int prcmu_qos_requirement(int prcmu_qos_class) +{ + return 0; +} + +static inline int prcmu_qos_add_requirement(int prcmu_qos_class, + char *name, s32 value) +{ + return 0; +} + +static inline int prcmu_qos_update_requirement(int prcmu_qos_class, + char *name, s32 new_value) +{ + return 0; +} + +static inline void prcmu_qos_remove_requirement(int prcmu_qos_class, char *name) +{ +} + +static inline int prcmu_qos_add_notifier(int prcmu_qos_class, + struct notifier_block *notifier) +{ + return 0; +} +static inline int prcmu_qos_remove_notifier(int prcmu_qos_class, + struct notifier_block *notifier) +{ + return 0; +} + +#endif + +#endif /* __MACH_PRCMU_H */ diff --git a/include/linux/mfd/dln2.h b/include/linux/mfd/dln2.h new file mode 100644 index 000000000..4cade9aa8 --- /dev/null +++ b/include/linux/mfd/dln2.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_USB_DLN2_H +#define __LINUX_USB_DLN2_H + +#define DLN2_CMD(cmd, id) ((cmd) | ((id) << 8)) + +struct dln2_platform_data { + u16 handle; /* sub-driver handle (internally used only) */ + u8 port; /* I2C/SPI port */ +}; + +/** + * dln2_event_cb_t - event callback function signature + * + * @pdev - the sub-device that registered this callback + * @echo - the echo header field received in the message + * @data - the data payload + * @len - the data payload length + * + * The callback function is called in interrupt context and the data payload is + * only valid during the call. If the user needs later access of the data, it + * must copy it. + */ + +typedef void (*dln2_event_cb_t)(struct platform_device *pdev, u16 echo, + const void *data, int len); + +/** + * dl2n_register_event_cb - register a callback function for an event + * + * @pdev - the sub-device that registers the callback + * @event - the event for which to register a callback + * @event_cb - the callback function + * + * @return 0 in case of success, negative value in case of error + */ +int dln2_register_event_cb(struct platform_device *pdev, u16 event, + dln2_event_cb_t event_cb); + +/** + * dln2_unregister_event_cb - unregister the callback function for an event + * + * @pdev - the sub-device that registered the callback + * @event - the event for which to register a callback + */ +void dln2_unregister_event_cb(struct platform_device *pdev, u16 event); + +/** + * dln2_transfer - issue a DLN2 command and wait for a response and the + * associated data + * + * @pdev - the sub-device which is issuing this transfer + * @cmd - the command to be sent to the device + * @obuf - the buffer to be sent to the device; it can be NULL if the user + * doesn't need to transmit data with this command + * @obuf_len - the size of the buffer to be sent to the device + * @ibuf - any data associated with the response will be copied here; it can be + * NULL if the user doesn't need the response data + * @ibuf_len - must be initialized to the input buffer size; it will be modified + * to indicate the actual data transferred; + * + * @return 0 for success, negative value for errors + */ +int dln2_transfer(struct platform_device *pdev, u16 cmd, + const void *obuf, unsigned obuf_len, + void *ibuf, unsigned *ibuf_len); + +/** + * dln2_transfer_rx - variant of @dln2_transfer() where TX buffer is not needed + * + * @pdev - the sub-device which is issuing this transfer + * @cmd - the command to be sent to the device + * @ibuf - any data associated with the response will be copied here; it can be + * NULL if the user doesn't need the response data + * @ibuf_len - must be initialized to the input buffer size; it will be modified + * to indicate the actual data transferred; + * + * @return 0 for success, negative value for errors + */ + +static inline int dln2_transfer_rx(struct platform_device *pdev, u16 cmd, + void *ibuf, unsigned *ibuf_len) +{ + return dln2_transfer(pdev, cmd, NULL, 0, ibuf, ibuf_len); +} + +/** + * dln2_transfer_tx - variant of @dln2_transfer() where RX buffer is not needed + * + * @pdev - the sub-device which is issuing this transfer + * @cmd - the command to be sent to the device + * @obuf - the buffer to be sent to the device; it can be NULL if the + * user doesn't need to transmit data with this command + * @obuf_len - the size of the buffer to be sent to the device + * + * @return 0 for success, negative value for errors + */ +static inline int dln2_transfer_tx(struct platform_device *pdev, u16 cmd, + const void *obuf, unsigned obuf_len) +{ + return dln2_transfer(pdev, cmd, obuf, obuf_len, NULL, NULL); +} + +#endif diff --git a/include/linux/mfd/dm355evm_msp.h b/include/linux/mfd/dm355evm_msp.h new file mode 100644 index 000000000..372470350 --- /dev/null +++ b/include/linux/mfd/dm355evm_msp.h @@ -0,0 +1,79 @@ +/* + * dm355evm_msp.h - support MSP430 microcontroller on DM355EVM board + */ +#ifndef __LINUX_I2C_DM355EVM_MSP +#define __LINUX_I2C_DM355EVM_MSP + +/* + * Written against Spectrum's writeup for the A4 firmware revision, + * and tweaked to match source and rev D2 schematics by removing CPLD + * and NOR flash hooks (which were last appropriate in rev B boards). + * + * Note that the firmware supports a flavor of write posting ... to be + * sure a write completes, issue another read or write. + */ + +/* utilities to access "registers" emulated by msp430 firmware */ +extern int dm355evm_msp_write(u8 value, u8 reg); +extern int dm355evm_msp_read(u8 reg); + + +/* command/control registers */ +#define DM355EVM_MSP_COMMAND 0x00 +# define MSP_COMMAND_NULL 0 +# define MSP_COMMAND_RESET_COLD 1 +# define MSP_COMMAND_RESET_WARM 2 +# define MSP_COMMAND_RESET_WARM_I 3 +# define MSP_COMMAND_POWEROFF 4 +# define MSP_COMMAND_IR_REINIT 5 +#define DM355EVM_MSP_STATUS 0x01 +# define MSP_STATUS_BAD_OFFSET BIT(0) +# define MSP_STATUS_BAD_COMMAND BIT(1) +# define MSP_STATUS_POWER_ERROR BIT(2) +# define MSP_STATUS_RXBUF_OVERRUN BIT(3) +#define DM355EVM_MSP_RESET 0x02 /* 0 bits == in reset */ +# define MSP_RESET_DC5 BIT(0) +# define MSP_RESET_TVP5154 BIT(2) +# define MSP_RESET_IMAGER BIT(3) +# define MSP_RESET_ETHERNET BIT(4) +# define MSP_RESET_SYS BIT(5) +# define MSP_RESET_AIC33 BIT(7) + +/* GPIO registers ... bit patterns mostly match the source MSP ports */ +#define DM355EVM_MSP_LED 0x03 /* active low (MSP P4) */ +#define DM355EVM_MSP_SWITCH1 0x04 /* (MSP P5, masked) */ +# define MSP_SWITCH1_SW6_1 BIT(0) +# define MSP_SWITCH1_SW6_2 BIT(1) +# define MSP_SWITCH1_SW6_3 BIT(2) +# define MSP_SWITCH1_SW6_4 BIT(3) +# define MSP_SWITCH1_J1 BIT(4) /* NTSC/PAL */ +# define MSP_SWITCH1_MSP_INT BIT(5) /* active low */ +#define DM355EVM_MSP_SWITCH2 0x05 /* (MSP P6, masked) */ +# define MSP_SWITCH2_SW10 BIT(3) +# define MSP_SWITCH2_SW11 BIT(4) +# define MSP_SWITCH2_SW12 BIT(5) +# define MSP_SWITCH2_SW13 BIT(6) +# define MSP_SWITCH2_SW14 BIT(7) +#define DM355EVM_MSP_SDMMC 0x06 /* (MSP P2, masked) */ +# define MSP_SDMMC_0_WP BIT(1) +# define MSP_SDMMC_0_CD BIT(2) /* active low */ +# define MSP_SDMMC_1_WP BIT(3) +# define MSP_SDMMC_1_CD BIT(4) /* active low */ +#define DM355EVM_MSP_FIRMREV 0x07 /* not a GPIO (out of order) */ +#define DM355EVM_MSP_VIDEO_IN 0x08 /* (MSP P3, masked) */ +# define MSP_VIDEO_IMAGER BIT(7) /* low == tvp5146 */ + +/* power supply registers are currently omitted */ + +/* RTC registers */ +#define DM355EVM_MSP_RTC_0 0x12 /* LSB */ +#define DM355EVM_MSP_RTC_1 0x13 +#define DM355EVM_MSP_RTC_2 0x14 +#define DM355EVM_MSP_RTC_3 0x15 /* MSB */ + +/* input event queue registers; code == ((HIGH << 8) | LOW) */ +#define DM355EVM_MSP_INPUT_COUNT 0x16 /* decrement by reading LOW */ +#define DM355EVM_MSP_INPUT_HIGH 0x17 +#define DM355EVM_MSP_INPUT_LOW 0x18 + +#endif /* __LINUX_I2C_DM355EVM_MSP */ diff --git a/include/linux/mfd/ds1wm.h b/include/linux/mfd/ds1wm.h new file mode 100644 index 000000000..43dfca1c9 --- /dev/null +++ b/include/linux/mfd/ds1wm.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* MFD cell driver data for the DS1WM driver + * + * to be defined in the MFD device that is + * using this driver for one of his sub devices + */ + +struct ds1wm_driver_data { + int active_high; + int clock_rate; + /* in milliseconds, the amount of time to + * sleep following a reset pulse. Zero + * should work if your bus devices recover + * time respects the 1-wire spec since the + * ds1wm implements the precise timings of + * a reset pulse/presence detect sequence. + */ + unsigned int reset_recover_delay; + + /* Say 1 here for big endian Hardware + * (only relevant with bus-shift > 0 + */ + bool is_hw_big_endian; + + /* left shift of register number to get register address offsett. + * Only 0,1,2 allowed for 8,16 or 32 bit bus width respectively + */ + unsigned int bus_shift; +}; diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h new file mode 100644 index 000000000..ffde195e1 --- /dev/null +++ b/include/linux/mfd/ezx-pcap.h @@ -0,0 +1,254 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2009 Daniel Ribeiro + * + * For further information, please see http://wiki.openezx.org/PCAP2 + */ + +#ifndef EZX_PCAP_H +#define EZX_PCAP_H + +struct pcap_subdev { + int id; + const char *name; + void *platform_data; +}; + +struct pcap_platform_data { + unsigned int irq_base; + unsigned int config; + int gpio; + void (*init) (void *); /* board specific init */ + int num_subdevs; + struct pcap_subdev *subdevs; +}; + +struct pcap_chip; + +int ezx_pcap_write(struct pcap_chip *, u8, u32); +int ezx_pcap_read(struct pcap_chip *, u8, u32 *); +int ezx_pcap_set_bits(struct pcap_chip *, u8, u32, u32); +int pcap_to_irq(struct pcap_chip *, int); +int irq_to_pcap(struct pcap_chip *, int); +int pcap_adc_async(struct pcap_chip *, u8, u32, u8[], void *, void *); +int pcap_adc_sync(struct pcap_chip *, u8, u32, u8[], u16[]); +void pcap_set_ts_bits(struct pcap_chip *, u32); + +#define PCAP_SECOND_PORT 1 +#define PCAP_CS_AH 2 + +#define PCAP_REGISTER_WRITE_OP_BIT 0x80000000 +#define PCAP_REGISTER_READ_OP_BIT 0x00000000 + +#define PCAP_REGISTER_VALUE_MASK 0x01ffffff +#define PCAP_REGISTER_ADDRESS_MASK 0x7c000000 +#define PCAP_REGISTER_ADDRESS_SHIFT 26 +#define PCAP_REGISTER_NUMBER 32 +#define PCAP_CLEAR_INTERRUPT_REGISTER 0x01ffffff +#define PCAP_MASK_ALL_INTERRUPT 0x01ffffff + +/* registers accessible by both pcap ports */ +#define PCAP_REG_ISR 0x0 /* Interrupt Status */ +#define PCAP_REG_MSR 0x1 /* Interrupt Mask */ +#define PCAP_REG_PSTAT 0x2 /* Processor Status */ +#define PCAP_REG_VREG2 0x6 /* Regulator Bank 2 Control */ +#define PCAP_REG_AUXVREG 0x7 /* Auxiliary Regulator Control */ +#define PCAP_REG_BATT 0x8 /* Battery Control */ +#define PCAP_REG_ADC 0x9 /* AD Control */ +#define PCAP_REG_ADR 0xa /* AD Result */ +#define PCAP_REG_CODEC 0xb /* Audio Codec Control */ +#define PCAP_REG_RX_AMPS 0xc /* RX Audio Amplifiers Control */ +#define PCAP_REG_ST_DAC 0xd /* Stereo DAC Control */ +#define PCAP_REG_BUSCTRL 0x14 /* Connectivity Control */ +#define PCAP_REG_PERIPH 0x15 /* Peripheral Control */ +#define PCAP_REG_LOWPWR 0x18 /* Regulator Low Power Control */ +#define PCAP_REG_TX_AMPS 0x1a /* TX Audio Amplifiers Control */ +#define PCAP_REG_GP 0x1b /* General Purpose */ +#define PCAP_REG_TEST1 0x1c +#define PCAP_REG_TEST2 0x1d +#define PCAP_REG_VENDOR_TEST1 0x1e +#define PCAP_REG_VENDOR_TEST2 0x1f + +/* registers accessible by pcap port 1 only (a1200, e2 & e6) */ +#define PCAP_REG_INT_SEL 0x3 /* Interrupt Select */ +#define PCAP_REG_SWCTRL 0x4 /* Switching Regulator Control */ +#define PCAP_REG_VREG1 0x5 /* Regulator Bank 1 Control */ +#define PCAP_REG_RTC_TOD 0xe /* RTC Time of Day */ +#define PCAP_REG_RTC_TODA 0xf /* RTC Time of Day Alarm */ +#define PCAP_REG_RTC_DAY 0x10 /* RTC Day */ +#define PCAP_REG_RTC_DAYA 0x11 /* RTC Day Alarm */ +#define PCAP_REG_MTRTMR 0x12 /* AD Monitor Timer */ +#define PCAP_REG_PWR 0x13 /* Power Control */ +#define PCAP_REG_AUXVREG_MASK 0x16 /* Auxiliary Regulator Mask */ +#define PCAP_REG_VENDOR_REV 0x17 +#define PCAP_REG_PERIPH_MASK 0x19 /* Peripheral Mask */ + +/* PCAP2 Interrupts */ +#define PCAP_NIRQS 23 +#define PCAP_IRQ_ADCDONE 0 /* ADC done port 1 */ +#define PCAP_IRQ_TS 1 /* Touch Screen */ +#define PCAP_IRQ_1HZ 2 /* 1HZ timer */ +#define PCAP_IRQ_WH 3 /* ADC above high limit */ +#define PCAP_IRQ_WL 4 /* ADC below low limit */ +#define PCAP_IRQ_TODA 5 /* Time of day alarm */ +#define PCAP_IRQ_USB4V 6 /* USB above 4V */ +#define PCAP_IRQ_ONOFF 7 /* On/Off button */ +#define PCAP_IRQ_ONOFF2 8 /* On/Off button 2 */ +#define PCAP_IRQ_USB1V 9 /* USB above 1V */ +#define PCAP_IRQ_MOBPORT 10 +#define PCAP_IRQ_MIC 11 /* Mic attach/HS button */ +#define PCAP_IRQ_HS 12 /* Headset attach */ +#define PCAP_IRQ_ST 13 +#define PCAP_IRQ_PC 14 /* Power Cut */ +#define PCAP_IRQ_WARM 15 +#define PCAP_IRQ_EOL 16 /* Battery End Of Life */ +#define PCAP_IRQ_CLK 17 +#define PCAP_IRQ_SYSRST 18 /* System Reset */ +#define PCAP_IRQ_DUMMY 19 +#define PCAP_IRQ_ADCDONE2 20 /* ADC done port 2 */ +#define PCAP_IRQ_SOFTRESET 21 +#define PCAP_IRQ_MNEXB 22 + +/* voltage regulators */ +#define V1 0 +#define V2 1 +#define V3 2 +#define V4 3 +#define V5 4 +#define V6 5 +#define V7 6 +#define V8 7 +#define V9 8 +#define V10 9 +#define VAUX1 10 +#define VAUX2 11 +#define VAUX3 12 +#define VAUX4 13 +#define VSIM 14 +#define VSIM2 15 +#define VVIB 16 +#define SW1 17 +#define SW2 18 +#define SW3 19 +#define SW1S 20 +#define SW2S 21 + +#define PCAP_BATT_DAC_MASK 0x000000ff +#define PCAP_BATT_DAC_SHIFT 0 +#define PCAP_BATT_B_FDBK (1 << 8) +#define PCAP_BATT_EXT_ISENSE (1 << 9) +#define PCAP_BATT_V_COIN_MASK 0x00003c00 +#define PCAP_BATT_V_COIN_SHIFT 10 +#define PCAP_BATT_I_COIN (1 << 14) +#define PCAP_BATT_COIN_CH_EN (1 << 15) +#define PCAP_BATT_EOL_SEL_MASK 0x000e0000 +#define PCAP_BATT_EOL_SEL_SHIFT 17 +#define PCAP_BATT_EOL_CMP_EN (1 << 20) +#define PCAP_BATT_BATT_DET_EN (1 << 21) +#define PCAP_BATT_THERMBIAS_CTRL (1 << 22) + +#define PCAP_ADC_ADEN (1 << 0) +#define PCAP_ADC_RAND (1 << 1) +#define PCAP_ADC_AD_SEL1 (1 << 2) +#define PCAP_ADC_AD_SEL2 (1 << 3) +#define PCAP_ADC_ADA1_MASK 0x00000070 +#define PCAP_ADC_ADA1_SHIFT 4 +#define PCAP_ADC_ADA2_MASK 0x00000380 +#define PCAP_ADC_ADA2_SHIFT 7 +#define PCAP_ADC_ATO_MASK 0x00003c00 +#define PCAP_ADC_ATO_SHIFT 10 +#define PCAP_ADC_ATOX (1 << 14) +#define PCAP_ADC_MTR1 (1 << 15) +#define PCAP_ADC_MTR2 (1 << 16) +#define PCAP_ADC_TS_M_MASK 0x000e0000 +#define PCAP_ADC_TS_M_SHIFT 17 +#define PCAP_ADC_TS_REF_LOWPWR (1 << 20) +#define PCAP_ADC_TS_REFENB (1 << 21) +#define PCAP_ADC_BATT_I_POLARITY (1 << 22) +#define PCAP_ADC_BATT_I_ADC (1 << 23) + +#define PCAP_ADC_BANK_0 0 +#define PCAP_ADC_BANK_1 1 +/* ADC bank 0 */ +#define PCAP_ADC_CH_COIN 0 +#define PCAP_ADC_CH_BATT 1 +#define PCAP_ADC_CH_BPLUS 2 +#define PCAP_ADC_CH_MOBPORTB 3 +#define PCAP_ADC_CH_TEMPERATURE 4 +#define PCAP_ADC_CH_CHARGER_ID 5 +#define PCAP_ADC_CH_AD6 6 +/* ADC bank 1 */ +#define PCAP_ADC_CH_AD7 0 +#define PCAP_ADC_CH_AD8 1 +#define PCAP_ADC_CH_AD9 2 +#define PCAP_ADC_CH_TS_X1 3 +#define PCAP_ADC_CH_TS_X2 4 +#define PCAP_ADC_CH_TS_Y1 5 +#define PCAP_ADC_CH_TS_Y2 6 + +#define PCAP_ADC_T_NOW 0 +#define PCAP_ADC_T_IN_BURST 1 +#define PCAP_ADC_T_OUT_BURST 2 + +#define PCAP_ADC_ATO_IN_BURST 6 +#define PCAP_ADC_ATO_OUT_BURST 0 + +#define PCAP_ADC_TS_M_XY 1 +#define PCAP_ADC_TS_M_PRESSURE 2 +#define PCAP_ADC_TS_M_PLATE_X 3 +#define PCAP_ADC_TS_M_PLATE_Y 4 +#define PCAP_ADC_TS_M_STANDBY 5 +#define PCAP_ADC_TS_M_NONTS 6 + +#define PCAP_ADR_ADD1_MASK 0x000003ff +#define PCAP_ADR_ADD1_SHIFT 0 +#define PCAP_ADR_ADD2_MASK 0x000ffc00 +#define PCAP_ADR_ADD2_SHIFT 10 +#define PCAP_ADR_ADINC1 (1 << 20) +#define PCAP_ADR_ADINC2 (1 << 21) +#define PCAP_ADR_ASC (1 << 22) +#define PCAP_ADR_ONESHOT (1 << 23) + +#define PCAP_BUSCTRL_FSENB (1 << 0) +#define PCAP_BUSCTRL_USB_SUSPEND (1 << 1) +#define PCAP_BUSCTRL_USB_PU (1 << 2) +#define PCAP_BUSCTRL_USB_PD (1 << 3) +#define PCAP_BUSCTRL_VUSB_EN (1 << 4) +#define PCAP_BUSCTRL_USB_PS (1 << 5) +#define PCAP_BUSCTRL_VUSB_MSTR_EN (1 << 6) +#define PCAP_BUSCTRL_VBUS_PD_ENB (1 << 7) +#define PCAP_BUSCTRL_CURRLIM (1 << 8) +#define PCAP_BUSCTRL_RS232ENB (1 << 9) +#define PCAP_BUSCTRL_RS232_DIR (1 << 10) +#define PCAP_BUSCTRL_SE0_CONN (1 << 11) +#define PCAP_BUSCTRL_USB_PDM (1 << 12) +#define PCAP_BUSCTRL_BUS_PRI_ADJ (1 << 24) + +/* leds */ +#define PCAP_LED0 0 +#define PCAP_LED1 1 +#define PCAP_BL0 2 +#define PCAP_BL1 3 +#define PCAP_LED_3MA 0 +#define PCAP_LED_4MA 1 +#define PCAP_LED_5MA 2 +#define PCAP_LED_9MA 3 +#define PCAP_LED_T_MASK 0xf +#define PCAP_LED_C_MASK 0x3 +#define PCAP_BL_MASK 0x1f +#define PCAP_BL0_SHIFT 0 +#define PCAP_LED0_EN (1 << 5) +#define PCAP_LED1_EN (1 << 6) +#define PCAP_LED0_T_SHIFT 7 +#define PCAP_LED1_T_SHIFT 11 +#define PCAP_LED0_C_SHIFT 15 +#define PCAP_LED1_C_SHIFT 17 +#define PCAP_BL1_SHIFT 20 + +/* RTC */ +#define PCAP_RTC_DAY_MASK 0x3fff +#define PCAP_RTC_TOD_MASK 0xffff +#define PCAP_RTC_PC_MASK 0x7 +#define SEC_PER_DAY 86400 + +#endif diff --git a/include/linux/mfd/hi6421-pmic.h b/include/linux/mfd/hi6421-pmic.h new file mode 100644 index 000000000..2580c08db --- /dev/null +++ b/include/linux/mfd/hi6421-pmic.h @@ -0,0 +1,46 @@ +/* + * Header file for device driver Hi6421 PMIC + * + * Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd. + * http://www.hisilicon.com + * Copyright (c) <2013-2014> Linaro Ltd. + * http://www.linaro.org + * + * Author: Guodong Xu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __HI6421_PMIC_H +#define __HI6421_PMIC_H + +/* Hi6421 registers are mapped to memory bus in 4 bytes stride */ +#define HI6421_REG_TO_BUS_ADDR(x) (x << 2) + +/* Hi6421 maximum register number */ +#define HI6421_REG_MAX 0xFF + +/* Hi6421 OCP (over current protection) and DEB (debounce) control register */ +#define HI6421_OCP_DEB_CTRL_REG HI6421_REG_TO_BUS_ADDR(0x51) +#define HI6421_OCP_DEB_SEL_MASK 0x0C +#define HI6421_OCP_DEB_SEL_8MS 0x00 +#define HI6421_OCP_DEB_SEL_16MS 0x04 +#define HI6421_OCP_DEB_SEL_32MS 0x08 +#define HI6421_OCP_DEB_SEL_64MS 0x0C +#define HI6421_OCP_EN_DEBOUNCE_MASK 0x02 +#define HI6421_OCP_EN_DEBOUNCE_ENABLE 0x02 +#define HI6421_OCP_AUTO_STOP_MASK 0x01 +#define HI6421_OCP_AUTO_STOP_ENABLE 0x01 + +struct hi6421_pmic { + struct regmap *regmap; +}; + +enum hi6421_type { + HI6421 = 0, + HI6421_V530, +}; + +#endif /* __HI6421_PMIC_H */ diff --git a/include/linux/mfd/hi655x-pmic.h b/include/linux/mfd/hi655x-pmic.h new file mode 100644 index 000000000..62f03c2b1 --- /dev/null +++ b/include/linux/mfd/hi655x-pmic.h @@ -0,0 +1,64 @@ +/* + * Device driver for regulators in hi655x IC + * + * Copyright (c) 2016 Hisilicon. + * + * Authors: + * Chen Feng + * Fei Wang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __HI655X_PMIC_H +#define __HI655X_PMIC_H + +/* Hi655x registers are mapped to memory bus in 4 bytes stride */ +#define HI655X_STRIDE 4 +#define HI655X_BUS_ADDR(x) ((x) << 2) + +#define HI655X_BITS 8 + +#define HI655X_NR_IRQ 32 + +#define HI655X_IRQ_STAT_BASE (0x003 << 2) +#define HI655X_IRQ_MASK_BASE (0x007 << 2) +#define HI655X_ANA_IRQM_BASE (0x1b5 << 2) +#define HI655X_IRQ_ARRAY 4 +#define HI655X_IRQ_MASK 0xFF +#define HI655X_IRQ_CLR 0xFF +#define HI655X_VER_REG 0x00 + +#define PMU_VER_START 0x10 +#define PMU_VER_END 0x38 + +#define RESERVE_INT 7 +#define PWRON_D20R_INT 6 +#define PWRON_D20F_INT 5 +#define PWRON_D4SR_INT 4 +#define VSYS_6P0_D200UR_INT 3 +#define VSYS_UV_D3R_INT 2 +#define VSYS_2P5_R_INT 1 +#define OTMP_D1R_INT 0 + +#define RESERVE_INT_MASK BIT(RESERVE_INT) +#define PWRON_D20R_INT_MASK BIT(PWRON_D20R_INT) +#define PWRON_D20F_INT_MASK BIT(PWRON_D20F_INT) +#define PWRON_D4SR_INT_MASK BIT(PWRON_D4SR_INT) +#define VSYS_6P0_D200UR_INT_MASK BIT(VSYS_6P0_D200UR_INT) +#define VSYS_UV_D3R_INT_MASK BIT(VSYS_UV_D3R_INT) +#define VSYS_2P5_R_INT_MASK BIT(VSYS_2P5_R_INT) +#define OTMP_D1R_INT_MASK BIT(OTMP_D1R_INT) + +struct hi655x_pmic { + struct resource *res; + struct device *dev; + struct regmap *regmap; + int gpio; + unsigned int ver; + struct regmap_irq_chip_data *irq_data; +}; + +#endif diff --git a/include/linux/mfd/htc-pasic3.h b/include/linux/mfd/htc-pasic3.h new file mode 100644 index 000000000..3d3ed67bd --- /dev/null +++ b/include/linux/mfd/htc-pasic3.h @@ -0,0 +1,54 @@ +/* + * HTC PASIC3 driver - LEDs and DS1WM + * + * Copyright (c) 2007 Philipp Zabel + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive for + * more details. + * + */ + +#ifndef __PASIC3_H +#define __PASIC3_H + +#include +#include + +extern void pasic3_write_register(struct device *dev, u32 reg, u8 val); +extern u8 pasic3_read_register(struct device *dev, u32 reg); + +/* + * mask for registers 0x20,0x21,0x22 + */ +#define PASIC3_MASK_LED0 0x04 +#define PASIC3_MASK_LED1 0x08 +#define PASIC3_MASK_LED2 0x40 + +/* + * bits in register 0x06 + */ +#define PASIC3_BIT2_LED0 0x08 +#define PASIC3_BIT2_LED1 0x10 +#define PASIC3_BIT2_LED2 0x20 + +struct pasic3_led { + struct led_classdev led; + unsigned int hw_num; + unsigned int bit2; + unsigned int mask; + struct pasic3_leds_machinfo *pdata; +}; + +struct pasic3_leds_machinfo { + unsigned int num_leds; + unsigned int power_gpio; + struct pasic3_led *leds; +}; + +struct pasic3_platform_data { + struct pasic3_leds_machinfo *led_pdata; + unsigned int clock_rate; +}; + +#endif diff --git a/include/linux/mfd/imx25-tsadc.h b/include/linux/mfd/imx25-tsadc.h new file mode 100644 index 000000000..21f8adfef --- /dev/null +++ b/include/linux/mfd/imx25-tsadc.h @@ -0,0 +1,141 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_INCLUDE_MFD_IMX25_TSADC_H_ +#define _LINUX_INCLUDE_MFD_IMX25_TSADC_H_ + +struct regmap; +struct clk; + +struct mx25_tsadc { + struct regmap *regs; + struct irq_domain *domain; + struct clk *clk; +}; + +#define MX25_TSC_TGCR 0x00 +#define MX25_TSC_TGSR 0x04 +#define MX25_TSC_TICR 0x08 + +/* The same register layout for TC and GC queue */ +#define MX25_ADCQ_FIFO 0x00 +#define MX25_ADCQ_CR 0x04 +#define MX25_ADCQ_SR 0x08 +#define MX25_ADCQ_MR 0x0c +#define MX25_ADCQ_ITEM_7_0 0x20 +#define MX25_ADCQ_ITEM_15_8 0x24 +#define MX25_ADCQ_CFG(n) (0x40 + ((n) * 0x4)) + +#define MX25_ADCQ_MR_MASK 0xffffffff + +/* TGCR */ +#define MX25_TGCR_PDBTIME(x) ((x) << 25) +#define MX25_TGCR_PDBTIME_MASK GENMASK(31, 25) +#define MX25_TGCR_PDBEN BIT(24) +#define MX25_TGCR_PDEN BIT(23) +#define MX25_TGCR_ADCCLKCFG(x) ((x) << 16) +#define MX25_TGCR_GET_ADCCLK(x) (((x) >> 16) & 0x1f) +#define MX25_TGCR_INTREFEN BIT(10) +#define MX25_TGCR_POWERMODE_MASK GENMASK(9, 8) +#define MX25_TGCR_POWERMODE_SAVE (1 << 8) +#define MX25_TGCR_POWERMODE_ON (2 << 8) +#define MX25_TGCR_STLC BIT(5) +#define MX25_TGCR_SLPC BIT(4) +#define MX25_TGCR_FUNC_RST BIT(2) +#define MX25_TGCR_TSC_RST BIT(1) +#define MX25_TGCR_CLK_EN BIT(0) + +/* TGSR */ +#define MX25_TGSR_SLP_INT BIT(2) +#define MX25_TGSR_GCQ_INT BIT(1) +#define MX25_TGSR_TCQ_INT BIT(0) + +/* ADCQ_ITEM_* */ +#define _MX25_ADCQ_ITEM(item, x) ((x) << ((item) * 4)) +#define MX25_ADCQ_ITEM(item, x) ((item) >= 8 ? \ + _MX25_ADCQ_ITEM((item) - 8, (x)) : _MX25_ADCQ_ITEM((item), (x))) + +/* ADCQ_FIFO (TCQFIFO and GCQFIFO) */ +#define MX25_ADCQ_FIFO_DATA(x) (((x) >> 4) & 0xfff) +#define MX25_ADCQ_FIFO_ID(x) ((x) & 0xf) + +/* ADCQ_CR (TCQR and GCQR) */ +#define MX25_ADCQ_CR_PDCFG_LEVEL BIT(19) +#define MX25_ADCQ_CR_PDMSK BIT(18) +#define MX25_ADCQ_CR_FRST BIT(17) +#define MX25_ADCQ_CR_QRST BIT(16) +#define MX25_ADCQ_CR_RWAIT_MASK GENMASK(15, 12) +#define MX25_ADCQ_CR_RWAIT(x) ((x) << 12) +#define MX25_ADCQ_CR_WMRK_MASK GENMASK(11, 8) +#define MX25_ADCQ_CR_WMRK(x) ((x) << 8) +#define MX25_ADCQ_CR_LITEMID_MASK (0xf << 4) +#define MX25_ADCQ_CR_LITEMID(x) ((x) << 4) +#define MX25_ADCQ_CR_RPT BIT(3) +#define MX25_ADCQ_CR_FQS BIT(2) +#define MX25_ADCQ_CR_QSM_MASK GENMASK(1, 0) +#define MX25_ADCQ_CR_QSM_PD 0x1 +#define MX25_ADCQ_CR_QSM_FQS 0x2 +#define MX25_ADCQ_CR_QSM_FQS_PD 0x3 + +/* ADCQ_SR (TCQSR and GCQSR) */ +#define MX25_ADCQ_SR_FDRY BIT(15) +#define MX25_ADCQ_SR_FULL BIT(14) +#define MX25_ADCQ_SR_EMPT BIT(13) +#define MX25_ADCQ_SR_FDN(x) (((x) >> 8) & 0x1f) +#define MX25_ADCQ_SR_FRR BIT(6) +#define MX25_ADCQ_SR_FUR BIT(5) +#define MX25_ADCQ_SR_FOR BIT(4) +#define MX25_ADCQ_SR_EOQ BIT(1) +#define MX25_ADCQ_SR_PD BIT(0) + +/* ADCQ_MR (TCQMR and GCQMR) */ +#define MX25_ADCQ_MR_FDRY_DMA BIT(31) +#define MX25_ADCQ_MR_FER_DMA BIT(22) +#define MX25_ADCQ_MR_FUR_DMA BIT(21) +#define MX25_ADCQ_MR_FOR_DMA BIT(20) +#define MX25_ADCQ_MR_EOQ_DMA BIT(17) +#define MX25_ADCQ_MR_PD_DMA BIT(16) +#define MX25_ADCQ_MR_FDRY_IRQ BIT(15) +#define MX25_ADCQ_MR_FER_IRQ BIT(6) +#define MX25_ADCQ_MR_FUR_IRQ BIT(5) +#define MX25_ADCQ_MR_FOR_IRQ BIT(4) +#define MX25_ADCQ_MR_EOQ_IRQ BIT(1) +#define MX25_ADCQ_MR_PD_IRQ BIT(0) + +/* ADCQ_CFG (TICR, TCC0-7,GCC0-7) */ +#define MX25_ADCQ_CFG_SETTLING_TIME(x) ((x) << 24) +#define MX25_ADCQ_CFG_IGS (1 << 20) +#define MX25_ADCQ_CFG_NOS_MASK GENMASK(19, 16) +#define MX25_ADCQ_CFG_NOS(x) (((x) - 1) << 16) +#define MX25_ADCQ_CFG_WIPER (1 << 15) +#define MX25_ADCQ_CFG_YNLR (1 << 14) +#define MX25_ADCQ_CFG_YPLL_HIGH (0 << 12) +#define MX25_ADCQ_CFG_YPLL_OFF (1 << 12) +#define MX25_ADCQ_CFG_YPLL_LOW (3 << 12) +#define MX25_ADCQ_CFG_XNUR_HIGH (0 << 10) +#define MX25_ADCQ_CFG_XNUR_OFF (1 << 10) +#define MX25_ADCQ_CFG_XNUR_LOW (3 << 10) +#define MX25_ADCQ_CFG_XPUL_HIGH (0 << 9) +#define MX25_ADCQ_CFG_XPUL_OFF (1 << 9) +#define MX25_ADCQ_CFG_REFP(sel) ((sel) << 7) +#define MX25_ADCQ_CFG_REFP_YP MX25_ADCQ_CFG_REFP(0) +#define MX25_ADCQ_CFG_REFP_XP MX25_ADCQ_CFG_REFP(1) +#define MX25_ADCQ_CFG_REFP_EXT MX25_ADCQ_CFG_REFP(2) +#define MX25_ADCQ_CFG_REFP_INT MX25_ADCQ_CFG_REFP(3) +#define MX25_ADCQ_CFG_REFP_MASK GENMASK(8, 7) +#define MX25_ADCQ_CFG_IN(sel) ((sel) << 4) +#define MX25_ADCQ_CFG_IN_XP MX25_ADCQ_CFG_IN(0) +#define MX25_ADCQ_CFG_IN_YP MX25_ADCQ_CFG_IN(1) +#define MX25_ADCQ_CFG_IN_XN MX25_ADCQ_CFG_IN(2) +#define MX25_ADCQ_CFG_IN_YN MX25_ADCQ_CFG_IN(3) +#define MX25_ADCQ_CFG_IN_WIPER MX25_ADCQ_CFG_IN(4) +#define MX25_ADCQ_CFG_IN_AUX0 MX25_ADCQ_CFG_IN(5) +#define MX25_ADCQ_CFG_IN_AUX1 MX25_ADCQ_CFG_IN(6) +#define MX25_ADCQ_CFG_IN_AUX2 MX25_ADCQ_CFG_IN(7) +#define MX25_ADCQ_CFG_REFN(sel) ((sel) << 2) +#define MX25_ADCQ_CFG_REFN_XN MX25_ADCQ_CFG_REFN(0) +#define MX25_ADCQ_CFG_REFN_YN MX25_ADCQ_CFG_REFN(1) +#define MX25_ADCQ_CFG_REFN_NGND MX25_ADCQ_CFG_REFN(2) +#define MX25_ADCQ_CFG_REFN_NGND2 MX25_ADCQ_CFG_REFN(3) +#define MX25_ADCQ_CFG_REFN_MASK GENMASK(3, 2) +#define MX25_ADCQ_CFG_PENIACK (1 << 1) + +#endif /* _LINUX_INCLUDE_MFD_IMX25_TSADC_H_ */ diff --git a/include/linux/mfd/intel_msic.h b/include/linux/mfd/intel_msic.h new file mode 100644 index 000000000..439a7a617 --- /dev/null +++ b/include/linux/mfd/intel_msic.h @@ -0,0 +1,456 @@ +/* + * include/linux/mfd/intel_msic.h - Core interface for Intel MSIC + * + * Copyright (C) 2011, Intel Corporation + * Author: Mika Westerberg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_MFD_INTEL_MSIC_H__ +#define __LINUX_MFD_INTEL_MSIC_H__ + +/* ID */ +#define INTEL_MSIC_ID0 0x000 /* RO */ +#define INTEL_MSIC_ID1 0x001 /* RO */ + +/* IRQ */ +#define INTEL_MSIC_IRQLVL1 0x002 +#define INTEL_MSIC_ADC1INT 0x003 +#define INTEL_MSIC_CCINT 0x004 +#define INTEL_MSIC_PWRSRCINT 0x005 +#define INTEL_MSIC_PWRSRCINT1 0x006 +#define INTEL_MSIC_CHRINT 0x007 +#define INTEL_MSIC_CHRINT1 0x008 +#define INTEL_MSIC_RTCIRQ 0x009 +#define INTEL_MSIC_GPIO0LVIRQ 0x00a +#define INTEL_MSIC_GPIO1LVIRQ 0x00b +#define INTEL_MSIC_GPIOHVIRQ 0x00c +#define INTEL_MSIC_VRINT 0x00d +#define INTEL_MSIC_OCAUDIO 0x00e +#define INTEL_MSIC_ACCDET 0x00f +#define INTEL_MSIC_RESETIRQ1 0x010 +#define INTEL_MSIC_RESETIRQ2 0x011 +#define INTEL_MSIC_MADC1INT 0x012 +#define INTEL_MSIC_MCCINT 0x013 +#define INTEL_MSIC_MPWRSRCINT 0x014 +#define INTEL_MSIC_MPWRSRCINT1 0x015 +#define INTEL_MSIC_MCHRINT 0x016 +#define INTEL_MSIC_MCHRINT1 0x017 +#define INTEL_MSIC_RTCIRQMASK 0x018 +#define INTEL_MSIC_GPIO0LVIRQMASK 0x019 +#define INTEL_MSIC_GPIO1LVIRQMASK 0x01a +#define INTEL_MSIC_GPIOHVIRQMASK 0x01b +#define INTEL_MSIC_VRINTMASK 0x01c +#define INTEL_MSIC_OCAUDIOMASK 0x01d +#define INTEL_MSIC_ACCDETMASK 0x01e +#define INTEL_MSIC_RESETIRQ1MASK 0x01f +#define INTEL_MSIC_RESETIRQ2MASK 0x020 +#define INTEL_MSIC_IRQLVL1MSK 0x021 +#define INTEL_MSIC_PBCONFIG 0x03e +#define INTEL_MSIC_PBSTATUS 0x03f /* RO */ + +/* GPIO */ +#define INTEL_MSIC_GPIO0LV7CTLO 0x040 +#define INTEL_MSIC_GPIO0LV6CTLO 0x041 +#define INTEL_MSIC_GPIO0LV5CTLO 0x042 +#define INTEL_MSIC_GPIO0LV4CTLO 0x043 +#define INTEL_MSIC_GPIO0LV3CTLO 0x044 +#define INTEL_MSIC_GPIO0LV2CTLO 0x045 +#define INTEL_MSIC_GPIO0LV1CTLO 0x046 +#define INTEL_MSIC_GPIO0LV0CTLO 0x047 +#define INTEL_MSIC_GPIO1LV7CTLOS 0x048 +#define INTEL_MSIC_GPIO1LV6CTLO 0x049 +#define INTEL_MSIC_GPIO1LV5CTLO 0x04a +#define INTEL_MSIC_GPIO1LV4CTLO 0x04b +#define INTEL_MSIC_GPIO1LV3CTLO 0x04c +#define INTEL_MSIC_GPIO1LV2CTLO 0x04d +#define INTEL_MSIC_GPIO1LV1CTLO 0x04e +#define INTEL_MSIC_GPIO1LV0CTLO 0x04f +#define INTEL_MSIC_GPIO0LV7CTLI 0x050 +#define INTEL_MSIC_GPIO0LV6CTLI 0x051 +#define INTEL_MSIC_GPIO0LV5CTLI 0x052 +#define INTEL_MSIC_GPIO0LV4CTLI 0x053 +#define INTEL_MSIC_GPIO0LV3CTLI 0x054 +#define INTEL_MSIC_GPIO0LV2CTLI 0x055 +#define INTEL_MSIC_GPIO0LV1CTLI 0x056 +#define INTEL_MSIC_GPIO0LV0CTLI 0x057 +#define INTEL_MSIC_GPIO1LV7CTLIS 0x058 +#define INTEL_MSIC_GPIO1LV6CTLI 0x059 +#define INTEL_MSIC_GPIO1LV5CTLI 0x05a +#define INTEL_MSIC_GPIO1LV4CTLI 0x05b +#define INTEL_MSIC_GPIO1LV3CTLI 0x05c +#define INTEL_MSIC_GPIO1LV2CTLI 0x05d +#define INTEL_MSIC_GPIO1LV1CTLI 0x05e +#define INTEL_MSIC_GPIO1LV0CTLI 0x05f +#define INTEL_MSIC_PWM0CLKDIV1 0x061 +#define INTEL_MSIC_PWM0CLKDIV0 0x062 +#define INTEL_MSIC_PWM1CLKDIV1 0x063 +#define INTEL_MSIC_PWM1CLKDIV0 0x064 +#define INTEL_MSIC_PWM2CLKDIV1 0x065 +#define INTEL_MSIC_PWM2CLKDIV0 0x066 +#define INTEL_MSIC_PWM0DUTYCYCLE 0x067 +#define INTEL_MSIC_PWM1DUTYCYCLE 0x068 +#define INTEL_MSIC_PWM2DUTYCYCLE 0x069 +#define INTEL_MSIC_GPIO0HV3CTLO 0x06d +#define INTEL_MSIC_GPIO0HV2CTLO 0x06e +#define INTEL_MSIC_GPIO0HV1CTLO 0x06f +#define INTEL_MSIC_GPIO0HV0CTLO 0x070 +#define INTEL_MSIC_GPIO1HV3CTLO 0x071 +#define INTEL_MSIC_GPIO1HV2CTLO 0x072 +#define INTEL_MSIC_GPIO1HV1CTLO 0x073 +#define INTEL_MSIC_GPIO1HV0CTLO 0x074 +#define INTEL_MSIC_GPIO0HV3CTLI 0x075 +#define INTEL_MSIC_GPIO0HV2CTLI 0x076 +#define INTEL_MSIC_GPIO0HV1CTLI 0x077 +#define INTEL_MSIC_GPIO0HV0CTLI 0x078 +#define INTEL_MSIC_GPIO1HV3CTLI 0x079 +#define INTEL_MSIC_GPIO1HV2CTLI 0x07a +#define INTEL_MSIC_GPIO1HV1CTLI 0x07b +#define INTEL_MSIC_GPIO1HV0CTLI 0x07c + +/* SVID */ +#define INTEL_MSIC_SVIDCTRL0 0x080 +#define INTEL_MSIC_SVIDCTRL1 0x081 +#define INTEL_MSIC_SVIDCTRL2 0x082 +#define INTEL_MSIC_SVIDTXLASTPKT3 0x083 /* RO */ +#define INTEL_MSIC_SVIDTXLASTPKT2 0x084 /* RO */ +#define INTEL_MSIC_SVIDTXLASTPKT1 0x085 /* RO */ +#define INTEL_MSIC_SVIDTXLASTPKT0 0x086 /* RO */ +#define INTEL_MSIC_SVIDPKTOUTBYTE3 0x087 +#define INTEL_MSIC_SVIDPKTOUTBYTE2 0x088 +#define INTEL_MSIC_SVIDPKTOUTBYTE1 0x089 +#define INTEL_MSIC_SVIDPKTOUTBYTE0 0x08a +#define INTEL_MSIC_SVIDRXVPDEBUG1 0x08b +#define INTEL_MSIC_SVIDRXVPDEBUG0 0x08c +#define INTEL_MSIC_SVIDRXLASTPKT3 0x08d /* RO */ +#define INTEL_MSIC_SVIDRXLASTPKT2 0x08e /* RO */ +#define INTEL_MSIC_SVIDRXLASTPKT1 0x08f /* RO */ +#define INTEL_MSIC_SVIDRXLASTPKT0 0x090 /* RO */ +#define INTEL_MSIC_SVIDRXCHKSTATUS3 0x091 /* RO */ +#define INTEL_MSIC_SVIDRXCHKSTATUS2 0x092 /* RO */ +#define INTEL_MSIC_SVIDRXCHKSTATUS1 0x093 /* RO */ +#define INTEL_MSIC_SVIDRXCHKSTATUS0 0x094 /* RO */ + +/* VREG */ +#define INTEL_MSIC_VCCLATCH 0x0c0 +#define INTEL_MSIC_VNNLATCH 0x0c1 +#define INTEL_MSIC_VCCCNT 0x0c2 +#define INTEL_MSIC_SMPSRAMP 0x0c3 +#define INTEL_MSIC_VNNCNT 0x0c4 +#define INTEL_MSIC_VNNAONCNT 0x0c5 +#define INTEL_MSIC_VCC122AONCNT 0x0c6 +#define INTEL_MSIC_V180AONCNT 0x0c7 +#define INTEL_MSIC_V500CNT 0x0c8 +#define INTEL_MSIC_VIHFCNT 0x0c9 +#define INTEL_MSIC_LDORAMP1 0x0ca +#define INTEL_MSIC_LDORAMP2 0x0cb +#define INTEL_MSIC_VCC108AONCNT 0x0cc +#define INTEL_MSIC_VCC108ASCNT 0x0cd +#define INTEL_MSIC_VCC108CNT 0x0ce +#define INTEL_MSIC_VCCA100ASCNT 0x0cf +#define INTEL_MSIC_VCCA100CNT 0x0d0 +#define INTEL_MSIC_VCC180AONCNT 0x0d1 +#define INTEL_MSIC_VCC180CNT 0x0d2 +#define INTEL_MSIC_VCC330CNT 0x0d3 +#define INTEL_MSIC_VUSB330CNT 0x0d4 +#define INTEL_MSIC_VCCSDIOCNT 0x0d5 +#define INTEL_MSIC_VPROG1CNT 0x0d6 +#define INTEL_MSIC_VPROG2CNT 0x0d7 +#define INTEL_MSIC_VEMMCSCNT 0x0d8 +#define INTEL_MSIC_VEMMC1CNT 0x0d9 +#define INTEL_MSIC_VEMMC2CNT 0x0da +#define INTEL_MSIC_VAUDACNT 0x0db +#define INTEL_MSIC_VHSPCNT 0x0dc +#define INTEL_MSIC_VHSNCNT 0x0dd +#define INTEL_MSIC_VHDMICNT 0x0de +#define INTEL_MSIC_VOTGCNT 0x0df +#define INTEL_MSIC_V1P35CNT 0x0e0 +#define INTEL_MSIC_V330AONCNT 0x0e1 + +/* RESET */ +#define INTEL_MSIC_CHIPCNTRL 0x100 /* WO */ +#define INTEL_MSIC_ERCONFIG 0x101 + +/* BURST */ +#define INTEL_MSIC_BATCURRENTLIMIT12 0x102 +#define INTEL_MSIC_BATTIMELIMIT12 0x103 +#define INTEL_MSIC_BATTIMELIMIT3 0x104 +#define INTEL_MSIC_BATTIMEDB 0x105 +#define INTEL_MSIC_BRSTCONFIGOUTPUTS 0x106 +#define INTEL_MSIC_BRSTCONFIGACTIONS 0x107 +#define INTEL_MSIC_BURSTCONTROLSTATUS 0x108 + +/* RTC */ +#define INTEL_MSIC_RTCB1 0x140 /* RO */ +#define INTEL_MSIC_RTCB2 0x141 /* RO */ +#define INTEL_MSIC_RTCB3 0x142 /* RO */ +#define INTEL_MSIC_RTCB4 0x143 /* RO */ +#define INTEL_MSIC_RTCOB1 0x144 +#define INTEL_MSIC_RTCOB2 0x145 +#define INTEL_MSIC_RTCOB3 0x146 +#define INTEL_MSIC_RTCOB4 0x147 +#define INTEL_MSIC_RTCAB1 0x148 +#define INTEL_MSIC_RTCAB2 0x149 +#define INTEL_MSIC_RTCAB3 0x14a +#define INTEL_MSIC_RTCAB4 0x14b +#define INTEL_MSIC_RTCWAB1 0x14c +#define INTEL_MSIC_RTCWAB2 0x14d +#define INTEL_MSIC_RTCWAB3 0x14e +#define INTEL_MSIC_RTCWAB4 0x14f +#define INTEL_MSIC_RTCSC1 0x150 +#define INTEL_MSIC_RTCSC2 0x151 +#define INTEL_MSIC_RTCSC3 0x152 +#define INTEL_MSIC_RTCSC4 0x153 +#define INTEL_MSIC_RTCSTATUS 0x154 /* RO */ +#define INTEL_MSIC_RTCCONFIG1 0x155 +#define INTEL_MSIC_RTCCONFIG2 0x156 + +/* CHARGER */ +#define INTEL_MSIC_BDTIMER 0x180 +#define INTEL_MSIC_BATTRMV 0x181 +#define INTEL_MSIC_VBUSDET 0x182 +#define INTEL_MSIC_VBUSDET1 0x183 +#define INTEL_MSIC_ADPHVDET 0x184 +#define INTEL_MSIC_ADPLVDET 0x185 +#define INTEL_MSIC_ADPDETDBDM 0x186 +#define INTEL_MSIC_LOWBATTDET 0x187 +#define INTEL_MSIC_CHRCTRL 0x188 +#define INTEL_MSIC_CHRCVOLTAGE 0x189 +#define INTEL_MSIC_CHRCCURRENT 0x18a +#define INTEL_MSIC_SPCHARGER 0x18b +#define INTEL_MSIC_CHRTTIME 0x18c +#define INTEL_MSIC_CHRCTRL1 0x18d +#define INTEL_MSIC_PWRSRCLMT 0x18e +#define INTEL_MSIC_CHRSTWDT 0x18f +#define INTEL_MSIC_WDTWRITE 0x190 /* WO */ +#define INTEL_MSIC_CHRSAFELMT 0x191 +#define INTEL_MSIC_SPWRSRCINT 0x192 /* RO */ +#define INTEL_MSIC_SPWRSRCINT1 0x193 /* RO */ +#define INTEL_MSIC_CHRLEDPWM 0x194 +#define INTEL_MSIC_CHRLEDCTRL 0x195 + +/* ADC */ +#define INTEL_MSIC_ADC1CNTL1 0x1c0 +#define INTEL_MSIC_ADC1CNTL2 0x1c1 +#define INTEL_MSIC_ADC1CNTL3 0x1c2 +#define INTEL_MSIC_ADC1OFFSETH 0x1c3 /* RO */ +#define INTEL_MSIC_ADC1OFFSETL 0x1c4 /* RO */ +#define INTEL_MSIC_ADC1ADDR0 0x1c5 +#define INTEL_MSIC_ADC1ADDR1 0x1c6 +#define INTEL_MSIC_ADC1ADDR2 0x1c7 +#define INTEL_MSIC_ADC1ADDR3 0x1c8 +#define INTEL_MSIC_ADC1ADDR4 0x1c9 +#define INTEL_MSIC_ADC1ADDR5 0x1ca +#define INTEL_MSIC_ADC1ADDR6 0x1cb +#define INTEL_MSIC_ADC1ADDR7 0x1cc +#define INTEL_MSIC_ADC1ADDR8 0x1cd +#define INTEL_MSIC_ADC1ADDR9 0x1ce +#define INTEL_MSIC_ADC1ADDR10 0x1cf +#define INTEL_MSIC_ADC1ADDR11 0x1d0 +#define INTEL_MSIC_ADC1ADDR12 0x1d1 +#define INTEL_MSIC_ADC1ADDR13 0x1d2 +#define INTEL_MSIC_ADC1ADDR14 0x1d3 +#define INTEL_MSIC_ADC1SNS0H 0x1d4 /* RO */ +#define INTEL_MSIC_ADC1SNS0L 0x1d5 /* RO */ +#define INTEL_MSIC_ADC1SNS1H 0x1d6 /* RO */ +#define INTEL_MSIC_ADC1SNS1L 0x1d7 /* RO */ +#define INTEL_MSIC_ADC1SNS2H 0x1d8 /* RO */ +#define INTEL_MSIC_ADC1SNS2L 0x1d9 /* RO */ +#define INTEL_MSIC_ADC1SNS3H 0x1da /* RO */ +#define INTEL_MSIC_ADC1SNS3L 0x1db /* RO */ +#define INTEL_MSIC_ADC1SNS4H 0x1dc /* RO */ +#define INTEL_MSIC_ADC1SNS4L 0x1dd /* RO */ +#define INTEL_MSIC_ADC1SNS5H 0x1de /* RO */ +#define INTEL_MSIC_ADC1SNS5L 0x1df /* RO */ +#define INTEL_MSIC_ADC1SNS6H 0x1e0 /* RO */ +#define INTEL_MSIC_ADC1SNS6L 0x1e1 /* RO */ +#define INTEL_MSIC_ADC1SNS7H 0x1e2 /* RO */ +#define INTEL_MSIC_ADC1SNS7L 0x1e3 /* RO */ +#define INTEL_MSIC_ADC1SNS8H 0x1e4 /* RO */ +#define INTEL_MSIC_ADC1SNS8L 0x1e5 /* RO */ +#define INTEL_MSIC_ADC1SNS9H 0x1e6 /* RO */ +#define INTEL_MSIC_ADC1SNS9L 0x1e7 /* RO */ +#define INTEL_MSIC_ADC1SNS10H 0x1e8 /* RO */ +#define INTEL_MSIC_ADC1SNS10L 0x1e9 /* RO */ +#define INTEL_MSIC_ADC1SNS11H 0x1ea /* RO */ +#define INTEL_MSIC_ADC1SNS11L 0x1eb /* RO */ +#define INTEL_MSIC_ADC1SNS12H 0x1ec /* RO */ +#define INTEL_MSIC_ADC1SNS12L 0x1ed /* RO */ +#define INTEL_MSIC_ADC1SNS13H 0x1ee /* RO */ +#define INTEL_MSIC_ADC1SNS13L 0x1ef /* RO */ +#define INTEL_MSIC_ADC1SNS14H 0x1f0 /* RO */ +#define INTEL_MSIC_ADC1SNS14L 0x1f1 /* RO */ +#define INTEL_MSIC_ADC1BV0H 0x1f2 /* RO */ +#define INTEL_MSIC_ADC1BV0L 0x1f3 /* RO */ +#define INTEL_MSIC_ADC1BV1H 0x1f4 /* RO */ +#define INTEL_MSIC_ADC1BV1L 0x1f5 /* RO */ +#define INTEL_MSIC_ADC1BV2H 0x1f6 /* RO */ +#define INTEL_MSIC_ADC1BV2L 0x1f7 /* RO */ +#define INTEL_MSIC_ADC1BV3H 0x1f8 /* RO */ +#define INTEL_MSIC_ADC1BV3L 0x1f9 /* RO */ +#define INTEL_MSIC_ADC1BI0H 0x1fa /* RO */ +#define INTEL_MSIC_ADC1BI0L 0x1fb /* RO */ +#define INTEL_MSIC_ADC1BI1H 0x1fc /* RO */ +#define INTEL_MSIC_ADC1BI1L 0x1fd /* RO */ +#define INTEL_MSIC_ADC1BI2H 0x1fe /* RO */ +#define INTEL_MSIC_ADC1BI2L 0x1ff /* RO */ +#define INTEL_MSIC_ADC1BI3H 0x200 /* RO */ +#define INTEL_MSIC_ADC1BI3L 0x201 /* RO */ +#define INTEL_MSIC_CCCNTL 0x202 +#define INTEL_MSIC_CCOFFSETH 0x203 /* RO */ +#define INTEL_MSIC_CCOFFSETL 0x204 /* RO */ +#define INTEL_MSIC_CCADCHA 0x205 /* RO */ +#define INTEL_MSIC_CCADCLA 0x206 /* RO */ + +/* AUDIO */ +#define INTEL_MSIC_AUDPLLCTRL 0x240 +#define INTEL_MSIC_DMICBUF0123 0x241 +#define INTEL_MSIC_DMICBUF45 0x242 +#define INTEL_MSIC_DMICGPO 0x244 +#define INTEL_MSIC_DMICMUX 0x245 +#define INTEL_MSIC_DMICCLK 0x246 +#define INTEL_MSIC_MICBIAS 0x247 +#define INTEL_MSIC_ADCCONFIG 0x248 +#define INTEL_MSIC_MICAMP1 0x249 +#define INTEL_MSIC_MICAMP2 0x24a +#define INTEL_MSIC_NOISEMUX 0x24b +#define INTEL_MSIC_AUDIOMUX12 0x24c +#define INTEL_MSIC_AUDIOMUX34 0x24d +#define INTEL_MSIC_AUDIOSINC 0x24e +#define INTEL_MSIC_AUDIOTXEN 0x24f +#define INTEL_MSIC_HSEPRXCTRL 0x250 +#define INTEL_MSIC_IHFRXCTRL 0x251 +#define INTEL_MSIC_VOICETXVOL 0x252 +#define INTEL_MSIC_SIDETONEVOL 0x253 +#define INTEL_MSIC_MUSICSHARVOL 0x254 +#define INTEL_MSIC_VOICETXCTRL 0x255 +#define INTEL_MSIC_HSMIXER 0x256 +#define INTEL_MSIC_DACCONFIG 0x257 +#define INTEL_MSIC_SOFTMUTE 0x258 +#define INTEL_MSIC_HSLVOLCTRL 0x259 +#define INTEL_MSIC_HSRVOLCTRL 0x25a +#define INTEL_MSIC_IHFLVOLCTRL 0x25b +#define INTEL_MSIC_IHFRVOLCTRL 0x25c +#define INTEL_MSIC_DRIVEREN 0x25d +#define INTEL_MSIC_LINEOUTCTRL 0x25e +#define INTEL_MSIC_VIB1CTRL1 0x25f +#define INTEL_MSIC_VIB1CTRL2 0x260 +#define INTEL_MSIC_VIB1CTRL3 0x261 +#define INTEL_MSIC_VIB1SPIPCM_1 0x262 +#define INTEL_MSIC_VIB1SPIPCM_2 0x263 +#define INTEL_MSIC_VIB1CTRL5 0x264 +#define INTEL_MSIC_VIB2CTRL1 0x265 +#define INTEL_MSIC_VIB2CTRL2 0x266 +#define INTEL_MSIC_VIB2CTRL3 0x267 +#define INTEL_MSIC_VIB2SPIPCM_1 0x268 +#define INTEL_MSIC_VIB2SPIPCM_2 0x269 +#define INTEL_MSIC_VIB2CTRL5 0x26a +#define INTEL_MSIC_BTNCTRL1 0x26b +#define INTEL_MSIC_BTNCTRL2 0x26c +#define INTEL_MSIC_PCM1TXSLOT01 0x26d +#define INTEL_MSIC_PCM1TXSLOT23 0x26e +#define INTEL_MSIC_PCM1TXSLOT45 0x26f +#define INTEL_MSIC_PCM1RXSLOT0123 0x270 +#define INTEL_MSIC_PCM1RXSLOT045 0x271 +#define INTEL_MSIC_PCM2TXSLOT01 0x272 +#define INTEL_MSIC_PCM2TXSLOT23 0x273 +#define INTEL_MSIC_PCM2TXSLOT45 0x274 +#define INTEL_MSIC_PCM2RXSLOT01 0x275 +#define INTEL_MSIC_PCM2RXSLOT23 0x276 +#define INTEL_MSIC_PCM2RXSLOT45 0x277 +#define INTEL_MSIC_PCM1CTRL1 0x278 +#define INTEL_MSIC_PCM1CTRL2 0x279 +#define INTEL_MSIC_PCM1CTRL3 0x27a +#define INTEL_MSIC_PCM2CTRL1 0x27b +#define INTEL_MSIC_PCM2CTRL2 0x27c + +/* HDMI */ +#define INTEL_MSIC_HDMIPUEN 0x280 +#define INTEL_MSIC_HDMISTATUS 0x281 /* RO */ + +/* Physical address of the start of the MSIC interrupt tree in SRAM */ +#define INTEL_MSIC_IRQ_PHYS_BASE 0xffff7fc0 + +/** + * struct intel_msic_gpio_pdata - platform data for the MSIC GPIO driver + * @gpio_base: base number for the GPIOs + */ +struct intel_msic_gpio_pdata { + unsigned gpio_base; +}; + +/** + * struct intel_msic_ocd_pdata - platform data for the MSIC OCD driver + * @gpio: GPIO number used for OCD interrupts + * + * The MSIC MFD driver converts @gpio into an IRQ number and passes it to + * the OCD driver as %IORESOURCE_IRQ. + */ +struct intel_msic_ocd_pdata { + unsigned gpio; +}; + +/* MSIC embedded blocks (subdevices) */ +enum intel_msic_block { + INTEL_MSIC_BLOCK_TOUCH, + INTEL_MSIC_BLOCK_ADC, + INTEL_MSIC_BLOCK_BATTERY, + INTEL_MSIC_BLOCK_GPIO, + INTEL_MSIC_BLOCK_AUDIO, + INTEL_MSIC_BLOCK_HDMI, + INTEL_MSIC_BLOCK_THERMAL, + INTEL_MSIC_BLOCK_POWER_BTN, + INTEL_MSIC_BLOCK_OCD, + + INTEL_MSIC_BLOCK_LAST, +}; + +/** + * struct intel_msic_platform_data - platform data for the MSIC driver + * @irq: array of interrupt numbers, one per device. If @irq is set to %0 + * for a given block, the corresponding platform device is not + * created. For devices which don't have an interrupt, use %0xff + * (this is same as in SFI spec). + * @gpio: platform data for the MSIC GPIO driver + * @ocd: platform data for the MSIC OCD driver + * + * Once the MSIC driver is initialized, the register interface is ready to + * use. All the platform devices for subdevices are created after the + * register interface is ready so that we can guarantee its availability to + * the subdevice drivers. + * + * Interrupt numbers are passed to the subdevices via %IORESOURCE_IRQ + * resources of the created platform device. + */ +struct intel_msic_platform_data { + int irq[INTEL_MSIC_BLOCK_LAST]; + struct intel_msic_gpio_pdata *gpio; + struct intel_msic_ocd_pdata *ocd; +}; + +struct intel_msic; + +extern int intel_msic_reg_read(unsigned short reg, u8 *val); +extern int intel_msic_reg_write(unsigned short reg, u8 val); +extern int intel_msic_reg_update(unsigned short reg, u8 val, u8 mask); +extern int intel_msic_bulk_read(unsigned short *reg, u8 *buf, size_t count); +extern int intel_msic_bulk_write(unsigned short *reg, u8 *buf, size_t count); + +/* + * pdev_to_intel_msic - gets an MSIC instance from the platform device + * @pdev: platform device pointer + * + * The client drivers need to have pointer to the MSIC instance if they + * want to call intel_msic_irq_read(). This macro can be used for + * convenience to get the MSIC pointer from @pdev where needed. This is + * _only_ valid for devices which are managed by the MSIC. + */ +#define pdev_to_intel_msic(pdev) (dev_get_drvdata(pdev->dev.parent)) + +extern int intel_msic_irq_read(struct intel_msic *msic, unsigned short reg, + u8 *val); + +#endif /* __LINUX_MFD_INTEL_MSIC_H__ */ diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h new file mode 100644 index 000000000..806a4f095 --- /dev/null +++ b/include/linux/mfd/intel_soc_pmic.h @@ -0,0 +1,37 @@ +/* + * intel_soc_pmic.h - Intel SoC PMIC Driver + * + * Copyright (C) 2012-2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Author: Yang, Bin + * Author: Zhu, Lejun + */ + +#ifndef __INTEL_SOC_PMIC_H__ +#define __INTEL_SOC_PMIC_H__ + +#include + +struct intel_soc_pmic { + int irq; + struct regmap *regmap; + struct regmap_irq_chip_data *irq_chip_data; + struct regmap_irq_chip_data *irq_chip_data_pwrbtn; + struct regmap_irq_chip_data *irq_chip_data_tmu; + struct regmap_irq_chip_data *irq_chip_data_bcu; + struct regmap_irq_chip_data *irq_chip_data_adc; + struct regmap_irq_chip_data *irq_chip_data_chgr; + struct regmap_irq_chip_data *irq_chip_data_crit; + struct device *dev; +}; + +#endif /* __INTEL_SOC_PMIC_H__ */ diff --git a/include/linux/mfd/intel_soc_pmic_bxtwc.h b/include/linux/mfd/intel_soc_pmic_bxtwc.h new file mode 100644 index 000000000..0c351bc85 --- /dev/null +++ b/include/linux/mfd/intel_soc_pmic_bxtwc.h @@ -0,0 +1,67 @@ +/* + * Header file for Intel Broxton Whiskey Cove PMIC + * + * Copyright (C) 2015 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __INTEL_BXTWC_H__ +#define __INTEL_BXTWC_H__ + +/* BXT WC devices */ +#define BXTWC_DEVICE1_ADDR 0x4E +#define BXTWC_DEVICE2_ADDR 0x4F +#define BXTWC_DEVICE3_ADDR 0x5E + +/* device1 Registers */ +#define BXTWC_CHIPID 0x4E00 +#define BXTWC_CHIPVER 0x4E01 + +#define BXTWC_SCHGRIRQ0_ADDR 0x5E1A +#define BXTWC_CHGRCTRL0_ADDR 0x5E16 +#define BXTWC_CHGRCTRL1_ADDR 0x5E17 +#define BXTWC_CHGRCTRL2_ADDR 0x5E18 +#define BXTWC_CHGRSTATUS_ADDR 0x5E19 +#define BXTWC_THRMBATZONE_ADDR 0x4F22 + +#define BXTWC_USBPATH_ADDR 0x5E19 +#define BXTWC_USBPHYCTRL_ADDR 0x5E07 +#define BXTWC_USBIDCTRL_ADDR 0x5E05 +#define BXTWC_USBIDEN_MASK 0x01 +#define BXTWC_USBIDSTAT_ADDR 0x00FF +#define BXTWC_USBSRCDETSTATUS_ADDR 0x5E29 + +#define BXTWC_DBGUSBBC1_ADDR 0x5FE0 +#define BXTWC_DBGUSBBC2_ADDR 0x5FE1 +#define BXTWC_DBGUSBBCSTAT_ADDR 0x5FE2 + +#define BXTWC_WAKESRC_ADDR 0x4E22 +#define BXTWC_WAKESRC2_ADDR 0x4EE5 +#define BXTWC_CHRTTADDR_ADDR 0x5E22 +#define BXTWC_CHRTTDATA_ADDR 0x5E23 + +#define BXTWC_STHRMIRQ0_ADDR 0x4F19 +#define WC_MTHRMIRQ1_ADDR 0x4E12 +#define WC_STHRMIRQ1_ADDR 0x4F1A +#define WC_STHRMIRQ2_ADDR 0x4F1B + +#define BXTWC_THRMZN0H_ADDR 0x4F44 +#define BXTWC_THRMZN0L_ADDR 0x4F45 +#define BXTWC_THRMZN1H_ADDR 0x4F46 +#define BXTWC_THRMZN1L_ADDR 0x4F47 +#define BXTWC_THRMZN2H_ADDR 0x4F48 +#define BXTWC_THRMZN2L_ADDR 0x4F49 +#define BXTWC_THRMZN3H_ADDR 0x4F4A +#define BXTWC_THRMZN3L_ADDR 0x4F4B +#define BXTWC_THRMZN4H_ADDR 0x4F4C +#define BXTWC_THRMZN4L_ADDR 0x4F4D + +#endif diff --git a/include/linux/mfd/ipaq-micro.h b/include/linux/mfd/ipaq-micro.h new file mode 100644 index 000000000..ee48a4321 --- /dev/null +++ b/include/linux/mfd/ipaq-micro.h @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Header file for the compaq Micro MFD + */ + +#ifndef _MFD_IPAQ_MICRO_H_ +#define _MFD_IPAQ_MICRO_H_ + +#include +#include +#include + +#define TX_BUF_SIZE 32 +#define RX_BUF_SIZE 16 +#define CHAR_SOF 0x02 + +/* + * These are the different messages that can be sent to the microcontroller + * to control various aspects. + */ +#define MSG_VERSION 0x0 +#define MSG_KEYBOARD 0x2 +#define MSG_TOUCHSCREEN 0x3 +#define MSG_EEPROM_READ 0x4 +#define MSG_EEPROM_WRITE 0x5 +#define MSG_THERMAL_SENSOR 0x6 +#define MSG_NOTIFY_LED 0x8 +#define MSG_BATTERY 0x9 +#define MSG_SPI_READ 0xb +#define MSG_SPI_WRITE 0xc +#define MSG_BACKLIGHT 0xd /* H3600 only */ +#define MSG_CODEC_CTRL 0xe /* H3100 only */ +#define MSG_DISPLAY_CTRL 0xf /* H3100 only */ + +/* state of receiver parser */ +enum rx_state { + STATE_SOF = 0, /* Next byte should be start of frame */ + STATE_ID, /* Next byte is ID & message length */ + STATE_DATA, /* Next byte is a data byte */ + STATE_CHKSUM /* Next byte should be checksum */ +}; + +/** + * struct ipaq_micro_txdev - TX state + * @len: length of message in TX buffer + * @index: current index into TX buffer + * @buf: TX buffer + */ +struct ipaq_micro_txdev { + u8 len; + u8 index; + u8 buf[TX_BUF_SIZE]; +}; + +/** + * struct ipaq_micro_rxdev - RX state + * @state: context of RX state machine + * @chksum: calculated checksum + * @id: message ID from packet + * @len: RX buffer length + * @index: RX buffer index + * @buf: RX buffer + */ +struct ipaq_micro_rxdev { + enum rx_state state; + unsigned char chksum; + u8 id; + unsigned int len; + unsigned int index; + u8 buf[RX_BUF_SIZE]; +}; + +/** + * struct ipaq_micro_msg - message to the iPAQ microcontroller + * @id: 4-bit ID of the message + * @tx_len: length of TX data + * @tx_data: TX data to send + * @rx_len: length of receieved RX data + * @rx_data: RX data to recieve + * @ack: a completion that will be completed when RX is complete + * @node: list node if message gets queued + */ +struct ipaq_micro_msg { + u8 id; + u8 tx_len; + u8 tx_data[TX_BUF_SIZE]; + u8 rx_len; + u8 rx_data[RX_BUF_SIZE]; + struct completion ack; + struct list_head node; +}; + +/** + * struct ipaq_micro - iPAQ microcontroller state + * @dev: corresponding platform device + * @base: virtual memory base for underlying serial device + * @sdlc: virtual memory base for Synchronous Data Link Controller + * @version: version string + * @tx: TX state + * @rx: RX state + * @lock: lock for this state container + * @msg: current message + * @queue: message queue + * @key: callback for asynchronous key events + * @key_data: data to pass along with key events + * @ts: callback for asynchronous touchscreen events + * @ts_data: data to pass along with key events + */ +struct ipaq_micro { + struct device *dev; + void __iomem *base; + void __iomem *sdlc; + char version[5]; + struct ipaq_micro_txdev tx; /* transmit ISR state */ + struct ipaq_micro_rxdev rx; /* receive ISR state */ + spinlock_t lock; + struct ipaq_micro_msg *msg; + struct list_head queue; + void (*key) (void *data, int len, unsigned char *rxdata); + void *key_data; + void (*ts) (void *data, int len, unsigned char *rxdata); + void *ts_data; +}; + +extern int +ipaq_micro_tx_msg(struct ipaq_micro *micro, struct ipaq_micro_msg *msg); + +static inline int +ipaq_micro_tx_msg_sync(struct ipaq_micro *micro, + struct ipaq_micro_msg *msg) +{ + int ret; + + init_completion(&msg->ack); + ret = ipaq_micro_tx_msg(micro, msg); + wait_for_completion(&msg->ack); + + return ret; +} + +static inline int +ipaq_micro_tx_msg_async(struct ipaq_micro *micro, + struct ipaq_micro_msg *msg) +{ + init_completion(&msg->ack); + return ipaq_micro_tx_msg(micro, msg); +} + +#endif /* _MFD_IPAQ_MICRO_H_ */ diff --git a/include/linux/mfd/janz.h b/include/linux/mfd/janz.h new file mode 100644 index 000000000..e9994c469 --- /dev/null +++ b/include/linux/mfd/janz.h @@ -0,0 +1,54 @@ +/* + * Common Definitions for Janz MODULbus devices + * + * Copyright (c) 2010 Ira W. Snyder + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef JANZ_H +#define JANZ_H + +struct janz_platform_data { + /* MODULbus Module Number */ + unsigned int modno; +}; + +/* PLX bridge chip onboard registers */ +struct janz_cmodio_onboard_regs { + u8 unused1; + + /* + * Read access: interrupt status + * Write access: interrupt disable + */ + u8 int_disable; + u8 unused2; + + /* + * Read access: MODULbus number (hex switch) + * Write access: interrupt enable + */ + u8 int_enable; + u8 unused3; + + /* write-only */ + u8 reset_assert; + u8 unused4; + + /* write-only */ + u8 reset_deassert; + u8 unused5; + + /* read-write access to serial EEPROM */ + u8 eep; + u8 unused6; + + /* write-only access to EEPROM chip select */ + u8 enid; +}; + +#endif /* JANZ_H */ diff --git a/include/linux/mfd/kempld.h b/include/linux/mfd/kempld.h new file mode 100644 index 000000000..26e0b469e --- /dev/null +++ b/include/linux/mfd/kempld.h @@ -0,0 +1,129 @@ +/* + * Kontron PLD driver definitions + * + * Copyright (c) 2010-2012 Kontron Europe GmbH + * Author: Michael Brunner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License 2 as published + * by the Free Software Foundation. + */ + +#ifndef _LINUX_MFD_KEMPLD_H_ +#define _LINUX_MFD_KEMPLD_H_ + +/* kempld register definitions */ +#define KEMPLD_IOINDEX 0xa80 +#define KEMPLD_IODATA 0xa81 +#define KEMPLD_MUTEX_KEY 0x80 +#define KEMPLD_VERSION 0x00 +#define KEMPLD_VERSION_LSB 0x00 +#define KEMPLD_VERSION_MSB 0x01 +#define KEMPLD_VERSION_GET_MINOR(x) (x & 0x1f) +#define KEMPLD_VERSION_GET_MAJOR(x) ((x >> 5) & 0x1f) +#define KEMPLD_VERSION_GET_NUMBER(x) ((x >> 10) & 0xf) +#define KEMPLD_VERSION_GET_TYPE(x) ((x >> 14) & 0x3) +#define KEMPLD_BUILDNR 0x02 +#define KEMPLD_BUILDNR_LSB 0x02 +#define KEMPLD_BUILDNR_MSB 0x03 +#define KEMPLD_FEATURE 0x04 +#define KEMPLD_FEATURE_LSB 0x04 +#define KEMPLD_FEATURE_MSB 0x05 +#define KEMPLD_FEATURE_BIT_I2C (1 << 0) +#define KEMPLD_FEATURE_BIT_WATCHDOG (1 << 1) +#define KEMPLD_FEATURE_BIT_GPIO (1 << 2) +#define KEMPLD_FEATURE_MASK_UART (7 << 3) +#define KEMPLD_FEATURE_BIT_NMI (1 << 8) +#define KEMPLD_FEATURE_BIT_SMI (1 << 9) +#define KEMPLD_FEATURE_BIT_SCI (1 << 10) +#define KEMPLD_SPEC 0x06 +#define KEMPLD_SPEC_GET_MINOR(x) (x & 0x0f) +#define KEMPLD_SPEC_GET_MAJOR(x) ((x >> 4) & 0x0f) +#define KEMPLD_IRQ_GPIO 0x35 +#define KEMPLD_IRQ_I2C 0x36 +#define KEMPLD_CFG 0x37 +#define KEMPLD_CFG_GPIO_I2C_MUX (1 << 0) +#define KEMPLD_CFG_BIOS_WP (1 << 7) + +#define KEMPLD_CLK 33333333 + +#define KEMPLD_TYPE_RELEASE 0x0 +#define KEMPLD_TYPE_DEBUG 0x1 +#define KEMPLD_TYPE_CUSTOM 0x2 + +#define KEMPLD_VERSION_LEN 10 + +/** + * struct kempld_info - PLD device information structure + * @major: PLD major revision + * @minor: PLD minor revision + * @buildnr: PLD build number + * @number: PLD board specific index + * @type: PLD type + * @spec_major: PLD FW specification major revision + * @spec_minor: PLD FW specification minor revision + * @version: PLD version string + */ +struct kempld_info { + unsigned int major; + unsigned int minor; + unsigned int buildnr; + unsigned int number; + unsigned int type; + unsigned int spec_major; + unsigned int spec_minor; + char version[KEMPLD_VERSION_LEN]; +}; + +/** + * struct kempld_device_data - Internal representation of the PLD device + * @io_base: Pointer to the IO memory + * @io_index: Pointer to the IO index register + * @io_data: Pointer to the IO data register + * @pld_clock: PLD clock frequency + * @feature_mask: PLD feature mask + * @dev: Pointer to kernel device structure + * @info: KEMPLD info structure + * @lock: PLD mutex + */ +struct kempld_device_data { + void __iomem *io_base; + void __iomem *io_index; + void __iomem *io_data; + u32 pld_clock; + u32 feature_mask; + struct device *dev; + struct kempld_info info; + struct mutex lock; +}; + +/** + * struct kempld_platform_data - PLD hardware configuration structure + * @pld_clock: PLD clock frequency + * @gpio_base GPIO base pin number + * @ioresource: IO addresses of the PLD + * @get_mutex: PLD specific get_mutex callback + * @release_mutex: PLD specific release_mutex callback + * @get_info: PLD specific get_info callback + * @register_cells: PLD specific register_cells callback + */ +struct kempld_platform_data { + u32 pld_clock; + int gpio_base; + struct resource *ioresource; + void (*get_hardware_mutex) (struct kempld_device_data *); + void (*release_hardware_mutex) (struct kempld_device_data *); + int (*get_info) (struct kempld_device_data *); + int (*register_cells) (struct kempld_device_data *); +}; + +extern void kempld_get_mutex(struct kempld_device_data *pld); +extern void kempld_release_mutex(struct kempld_device_data *pld); +extern u8 kempld_read8(struct kempld_device_data *pld, u8 index); +extern void kempld_write8(struct kempld_device_data *pld, u8 index, u8 data); +extern u16 kempld_read16(struct kempld_device_data *pld, u8 index); +extern void kempld_write16(struct kempld_device_data *pld, u8 index, u16 data); +extern u32 kempld_read32(struct kempld_device_data *pld, u8 index); +extern void kempld_write32(struct kempld_device_data *pld, u8 index, u32 data); + +#endif /* _LINUX_MFD_KEMPLD_H_ */ diff --git a/include/linux/mfd/lm3533.h b/include/linux/mfd/lm3533.h new file mode 100644 index 000000000..594bc591f --- /dev/null +++ b/include/linux/mfd/lm3533.h @@ -0,0 +1,104 @@ +/* + * lm3533.h -- LM3533 interface + * + * Copyright (C) 2011-2012 Texas Instruments + * + * Author: Johan Hovold + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __LINUX_MFD_LM3533_H +#define __LINUX_MFD_LM3533_H + +#define LM3533_ATTR_RO(_name) \ + DEVICE_ATTR(_name, S_IRUGO, show_##_name, NULL) +#define LM3533_ATTR_RW(_name) \ + DEVICE_ATTR(_name, S_IRUGO | S_IWUSR , show_##_name, store_##_name) + +struct device; +struct regmap; + +struct lm3533 { + struct device *dev; + + struct regmap *regmap; + + int gpio_hwen; + int irq; + + unsigned have_als:1; + unsigned have_backlights:1; + unsigned have_leds:1; +}; + +struct lm3533_ctrlbank { + struct lm3533 *lm3533; + struct device *dev; + int id; +}; + +struct lm3533_als_platform_data { + unsigned pwm_mode:1; /* PWM input mode (default analog) */ + u8 r_select; /* 1 - 127 (ignored in PWM-mode) */ +}; + +struct lm3533_bl_platform_data { + char *name; + u16 max_current; /* 5000 - 29800 uA (800 uA step) */ + u8 default_brightness; /* 0 - 255 */ + u8 pwm; /* 0 - 0x3f */ +}; + +struct lm3533_led_platform_data { + char *name; + const char *default_trigger; + u16 max_current; /* 5000 - 29800 uA (800 uA step) */ + u8 pwm; /* 0 - 0x3f */ +}; + +enum lm3533_boost_freq { + LM3533_BOOST_FREQ_500KHZ, + LM3533_BOOST_FREQ_1000KHZ, +}; + +enum lm3533_boost_ovp { + LM3533_BOOST_OVP_16V, + LM3533_BOOST_OVP_24V, + LM3533_BOOST_OVP_32V, + LM3533_BOOST_OVP_40V, +}; + +struct lm3533_platform_data { + int gpio_hwen; + + enum lm3533_boost_ovp boost_ovp; + enum lm3533_boost_freq boost_freq; + + struct lm3533_als_platform_data *als; + + struct lm3533_bl_platform_data *backlights; + int num_backlights; + + struct lm3533_led_platform_data *leds; + int num_leds; +}; + +extern int lm3533_ctrlbank_enable(struct lm3533_ctrlbank *cb); +extern int lm3533_ctrlbank_disable(struct lm3533_ctrlbank *cb); + +extern int lm3533_ctrlbank_set_brightness(struct lm3533_ctrlbank *cb, u8 val); +extern int lm3533_ctrlbank_get_brightness(struct lm3533_ctrlbank *cb, u8 *val); +extern int lm3533_ctrlbank_set_max_current(struct lm3533_ctrlbank *cb, + u16 imax); +extern int lm3533_ctrlbank_set_pwm(struct lm3533_ctrlbank *cb, u8 val); +extern int lm3533_ctrlbank_get_pwm(struct lm3533_ctrlbank *cb, u8 *val); + +extern int lm3533_read(struct lm3533 *lm3533, u8 reg, u8 *val); +extern int lm3533_write(struct lm3533 *lm3533, u8 reg, u8 val); +extern int lm3533_update(struct lm3533 *lm3533, u8 reg, u8 val, u8 mask); + +#endif /* __LINUX_MFD_LM3533_H */ diff --git a/include/linux/mfd/lp3943.h b/include/linux/mfd/lp3943.h new file mode 100644 index 000000000..3490db782 --- /dev/null +++ b/include/linux/mfd/lp3943.h @@ -0,0 +1,114 @@ +/* + * TI/National Semiconductor LP3943 Device + * + * Copyright 2013 Texas Instruments + * + * Author: Milo Kim + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __MFD_LP3943_H__ +#define __MFD_LP3943_H__ + +#include +#include +#include + +/* Registers */ +#define LP3943_REG_GPIO_A 0x00 +#define LP3943_REG_GPIO_B 0x01 +#define LP3943_REG_PRESCALE0 0x02 +#define LP3943_REG_PWM0 0x03 +#define LP3943_REG_PRESCALE1 0x04 +#define LP3943_REG_PWM1 0x05 +#define LP3943_REG_MUX0 0x06 +#define LP3943_REG_MUX1 0x07 +#define LP3943_REG_MUX2 0x08 +#define LP3943_REG_MUX3 0x09 + +/* Bit description for LP3943_REG_MUX0 ~ 3 */ +#define LP3943_GPIO_IN 0x00 +#define LP3943_GPIO_OUT_HIGH 0x00 +#define LP3943_GPIO_OUT_LOW 0x01 +#define LP3943_DIM_PWM0 0x02 +#define LP3943_DIM_PWM1 0x03 + +#define LP3943_NUM_PWMS 2 + +enum lp3943_pwm_output { + LP3943_PWM_OUT0, + LP3943_PWM_OUT1, + LP3943_PWM_OUT2, + LP3943_PWM_OUT3, + LP3943_PWM_OUT4, + LP3943_PWM_OUT5, + LP3943_PWM_OUT6, + LP3943_PWM_OUT7, + LP3943_PWM_OUT8, + LP3943_PWM_OUT9, + LP3943_PWM_OUT10, + LP3943_PWM_OUT11, + LP3943_PWM_OUT12, + LP3943_PWM_OUT13, + LP3943_PWM_OUT14, + LP3943_PWM_OUT15, +}; + +/* + * struct lp3943_pwm_map + * @output: Output pins which are mapped to each PWM channel + * @num_outputs: Number of outputs + */ +struct lp3943_pwm_map { + enum lp3943_pwm_output *output; + int num_outputs; +}; + +/* + * struct lp3943_platform_data + * @pwms: Output channel definitions for PWM channel 0 and 1 + */ +struct lp3943_platform_data { + struct lp3943_pwm_map *pwms[LP3943_NUM_PWMS]; +}; + +/* + * struct lp3943_reg_cfg + * @reg: Register address + * @mask: Register bit mask to be updated + * @shift: Register bit shift + */ +struct lp3943_reg_cfg { + u8 reg; + u8 mask; + u8 shift; +}; + +/* + * struct lp3943 + * @dev: Parent device pointer + * @regmap: Used for I2C communication on accessing registers + * @pdata: LP3943 platform specific data + * @mux_cfg: Register configuration for pin MUX + * @pin_used: Bit mask for output pin used. + * This bitmask is used for pin assignment management. + * 1 = pin used, 0 = available. + * Only LSB 16 bits are used, but it is unsigned long type + * for atomic bitwise operations. + */ +struct lp3943 { + struct device *dev; + struct regmap *regmap; + struct lp3943_platform_data *pdata; + const struct lp3943_reg_cfg *mux_cfg; + unsigned long pin_used; +}; + +int lp3943_read_byte(struct lp3943 *lp3943, u8 reg, u8 *read); +int lp3943_write_byte(struct lp3943 *lp3943, u8 reg, u8 data); +int lp3943_update_bits(struct lp3943 *lp3943, u8 reg, u8 mask, u8 data); +#endif diff --git a/include/linux/mfd/lp873x.h b/include/linux/mfd/lp873x.h new file mode 100644 index 000000000..edbec8350 --- /dev/null +++ b/include/linux/mfd/lp873x.h @@ -0,0 +1,268 @@ +/* + * Functions to access LP873X power management chip. + * + * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_MFD_LP873X_H +#define __LINUX_MFD_LP873X_H + +#include +#include +#include + +/* LP873x chip id list */ +#define LP873X 0x00 + +/* All register addresses */ +#define LP873X_REG_DEV_REV 0X00 +#define LP873X_REG_OTP_REV 0X01 +#define LP873X_REG_BUCK0_CTRL_1 0X02 +#define LP873X_REG_BUCK0_CTRL_2 0X03 +#define LP873X_REG_BUCK1_CTRL_1 0X04 +#define LP873X_REG_BUCK1_CTRL_2 0X05 +#define LP873X_REG_BUCK0_VOUT 0X06 +#define LP873X_REG_BUCK1_VOUT 0X07 +#define LP873X_REG_LDO0_CTRL 0X08 +#define LP873X_REG_LDO1_CTRL 0X09 +#define LP873X_REG_LDO0_VOUT 0X0A +#define LP873X_REG_LDO1_VOUT 0X0B +#define LP873X_REG_BUCK0_DELAY 0X0C +#define LP873X_REG_BUCK1_DELAY 0X0D +#define LP873X_REG_LDO0_DELAY 0X0E +#define LP873X_REG_LDO1_DELAY 0X0F +#define LP873X_REG_GPO_DELAY 0X10 +#define LP873X_REG_GPO2_DELAY 0X11 +#define LP873X_REG_GPO_CTRL 0X12 +#define LP873X_REG_CONFIG 0X13 +#define LP873X_REG_PLL_CTRL 0X14 +#define LP873X_REG_PGOOD_CTRL1 0X15 +#define LP873X_REG_PGOOD_CTRL2 0X16 +#define LP873X_REG_PG_FAULT 0X17 +#define LP873X_REG_RESET 0X18 +#define LP873X_REG_INT_TOP_1 0X19 +#define LP873X_REG_INT_TOP_2 0X1A +#define LP873X_REG_INT_BUCK 0X1B +#define LP873X_REG_INT_LDO 0X1C +#define LP873X_REG_TOP_STAT 0X1D +#define LP873X_REG_BUCK_STAT 0X1E +#define LP873X_REG_LDO_STAT 0x1F +#define LP873X_REG_TOP_MASK_1 0x20 +#define LP873X_REG_TOP_MASK_2 0x21 +#define LP873X_REG_BUCK_MASK 0x22 +#define LP873X_REG_LDO_MASK 0x23 +#define LP873X_REG_SEL_I_LOAD 0x24 +#define LP873X_REG_I_LOAD_2 0x25 +#define LP873X_REG_I_LOAD_1 0x26 + +#define LP873X_REG_MAX LP873X_REG_I_LOAD_1 + +/* Register field definitions */ +#define LP873X_DEV_REV_DEV_ID 0xC0 +#define LP873X_DEV_REV_ALL_LAYER 0x30 +#define LP873X_DEV_REV_METAL_LAYER 0x0F + +#define LP873X_OTP_REV_OTP_ID 0xFF + +#define LP873X_BUCK0_CTRL_1_BUCK0_FPWM BIT(3) +#define LP873X_BUCK0_CTRL_1_BUCK0_RDIS_EN BIT(2) +#define LP873X_BUCK0_CTRL_1_BUCK0_EN_PIN_CTRL BIT(1) +#define LP873X_BUCK0_CTRL_1_BUCK0_EN BIT(0) + +#define LP873X_BUCK0_CTRL_2_BUCK0_ILIM 0x38 +#define LP873X_BUCK0_CTRL_2_BUCK0_SLEW_RATE 0x07 + +#define LP873X_BUCK1_CTRL_1_BUCK1_FPWM BIT(3) +#define LP873X_BUCK1_CTRL_1_BUCK1_RDIS_EN BIT(2) +#define LP873X_BUCK1_CTRL_1_BUCK1_EN_PIN_CTRL BIT(1) +#define LP873X_BUCK1_CTRL_1_BUCK1_EN BIT(0) + +#define LP873X_BUCK1_CTRL_2_BUCK1_ILIM 0x38 +#define LP873X_BUCK1_CTRL_2_BUCK1_SLEW_RATE 0x07 + +#define LP873X_BUCK0_VOUT_BUCK0_VSET 0xFF + +#define LP873X_BUCK1_VOUT_BUCK1_VSET 0xFF + +#define LP873X_LDO0_CTRL_LDO0_RDIS_EN BIT(2) +#define LP873X_LDO0_CTRL_LDO0_EN_PIN_CTRL BIT(1) +#define LP873X_LDO0_CTRL_LDO0_EN BIT(0) + +#define LP873X_LDO1_CTRL_LDO1_RDIS_EN BIT(2) +#define LP873X_LDO1_CTRL_LDO1_EN_PIN_CTRL BIT(1) +#define LP873X_LDO1_CTRL_LDO1_EN BIT(0) + +#define LP873X_LDO0_VOUT_LDO0_VSET 0x1F + +#define LP873X_LDO1_VOUT_LDO1_VSET 0x1F + +#define LP873X_BUCK0_DELAY_BUCK0_SD_DELAY 0xF0 +#define LP873X_BUCK0_DELAY_BUCK0_SU_DELAY 0x0F + +#define LP873X_BUCK1_DELAY_BUCK1_SD_DELAY 0xF0 +#define LP873X_BUCK1_DELAY_BUCK1_SU_DELAY 0x0F + +#define LP873X_LDO0_DELAY_LDO0_SD_DELAY 0xF0 +#define LP873X_LDO0_DELAY_LDO0_SU_DELAY 0x0F + +#define LP873X_LDO1_DELAY_LDO1_SD_DELAY 0xF0 +#define LP873X_LDO1_DELAY_LDO1_SU_DELAY 0x0F + +#define LP873X_GPO_DELAY_GPO_SD_DELAY 0xF0 +#define LP873X_GPO_DELAY_GPO_SU_DELAY 0x0F + +#define LP873X_GPO2_DELAY_GPO2_SD_DELAY 0xF0 +#define LP873X_GPO2_DELAY_GPO2_SU_DELAY 0x0F + +#define LP873X_GPO_CTRL_GPO2_OD BIT(6) +#define LP873X_GPO_CTRL_GPO2_EN_PIN_CTRL BIT(5) +#define LP873X_GPO_CTRL_GPO2_EN BIT(4) +#define LP873X_GPO_CTRL_GPO_OD BIT(2) +#define LP873X_GPO_CTRL_GPO_EN_PIN_CTRL BIT(1) +#define LP873X_GPO_CTRL_GPO_EN BIT(0) + +#define LP873X_CONFIG_SU_DELAY_SEL BIT(6) +#define LP873X_CONFIG_SD_DELAY_SEL BIT(5) +#define LP873X_CONFIG_CLKIN_PIN_SEL BIT(4) +#define LP873X_CONFIG_CLKIN_PD BIT(3) +#define LP873X_CONFIG_EN_PD BIT(2) +#define LP873X_CONFIG_TDIE_WARN_LEVEL BIT(1) +#define LP873X_EN_SPREAD_SPEC BIT(0) + +#define LP873X_PLL_CTRL_EN_PLL BIT(6) +#define LP873X_EXT_CLK_FREQ 0x1F + +#define LP873X_PGOOD_CTRL1_PGOOD_POL BIT(7) +#define LP873X_PGOOD_CTRL1_PGOOD_OD BIT(6) +#define LP873X_PGOOD_CTRL1_PGOOD_WINDOW_LDO BIT(5) +#define LP873X_PGOOD_CTRL1_PGOOD_WINDOWN_BUCK BIT(4) +#define LP873X_PGOOD_CTRL1_PGOOD_EN_PGOOD_LDO1 BIT(3) +#define LP873X_PGOOD_CTRL1_PGOOD_EN_PGOOD_LDO0 BIT(2) +#define LP873X_PGOOD_CTRL1_PGOOD_EN_PGOOD_BUCK1 BIT(1) +#define LP873X_PGOOD_CTRL1_PGOOD_EN_PGOOD_BUCK0 BIT(0) + +#define LP873X_PGOOD_CTRL2_EN_PGOOD_TWARN BIT(2) +#define LP873X_PGOOD_CTRL2_EN_PG_FAULT_GATE BIT(1) +#define LP873X_PGOOD_CTRL2_PGOOD_MODE BIT(0) + +#define LP873X_PG_FAULT_PG_FAULT_LDO1 BIT(3) +#define LP873X_PG_FAULT_PG_FAULT_LDO0 BIT(2) +#define LP873X_PG_FAULT_PG_FAULT_BUCK1 BIT(1) +#define LP873X_PG_FAULT_PG_FAULT_BUCK0 BIT(0) + +#define LP873X_RESET_SW_RESET BIT(0) + +#define LP873X_INT_TOP_1_PGOOD_INT BIT(7) +#define LP873X_INT_TOP_1_LDO_INT BIT(6) +#define LP873X_INT_TOP_1_BUCK_INT BIT(5) +#define LP873X_INT_TOP_1_SYNC_CLK_INT BIT(4) +#define LP873X_INT_TOP_1_TDIE_SD_INT BIT(3) +#define LP873X_INT_TOP_1_TDIE_WARN_INT BIT(2) +#define LP873X_INT_TOP_1_OVP_INT BIT(1) +#define LP873X_INT_TOP_1_I_MEAS_INT BIT(0) + +#define LP873X_INT_TOP_2_RESET_REG_INT BIT(0) + +#define LP873X_INT_BUCK_BUCK1_PG_INT BIT(6) +#define LP873X_INT_BUCK_BUCK1_SC_INT BIT(5) +#define LP873X_INT_BUCK_BUCK1_ILIM_INT BIT(4) +#define LP873X_INT_BUCK_BUCK0_PG_INT BIT(2) +#define LP873X_INT_BUCK_BUCK0_SC_INT BIT(1) +#define LP873X_INT_BUCK_BUCK0_ILIM_INT BIT(0) + +#define LP873X_INT_LDO_LDO1_PG_INT BIT(6) +#define LP873X_INT_LDO_LDO1_SC_INT BIT(5) +#define LP873X_INT_LDO_LDO1_ILIM_INT BIT(4) +#define LP873X_INT_LDO_LDO0_PG_INT BIT(2) +#define LP873X_INT_LDO_LDO0_SC_INT BIT(1) +#define LP873X_INT_LDO_LDO0_ILIM_INT BIT(0) + +#define LP873X_TOP_STAT_PGOOD_STAT BIT(7) +#define LP873X_TOP_STAT_SYNC_CLK_STAT BIT(4) +#define LP873X_TOP_STAT_TDIE_SD_STAT BIT(3) +#define LP873X_TOP_STAT_TDIE_WARN_STAT BIT(2) +#define LP873X_TOP_STAT_OVP_STAT BIT(1) + +#define LP873X_BUCK_STAT_BUCK1_STAT BIT(7) +#define LP873X_BUCK_STAT_BUCK1_PG_STAT BIT(6) +#define LP873X_BUCK_STAT_BUCK1_ILIM_STAT BIT(4) +#define LP873X_BUCK_STAT_BUCK0_STAT BIT(3) +#define LP873X_BUCK_STAT_BUCK0_PG_STAT BIT(2) +#define LP873X_BUCK_STAT_BUCK0_ILIM_STAT BIT(0) + +#define LP873X_LDO_STAT_LDO1_STAT BIT(7) +#define LP873X_LDO_STAT_LDO1_PG_STAT BIT(6) +#define LP873X_LDO_STAT_LDO1_ILIM_STAT BIT(4) +#define LP873X_LDO_STAT_LDO0_STAT BIT(3) +#define LP873X_LDO_STAT_LDO0_PG_STAT BIT(2) +#define LP873X_LDO_STAT_LDO0_ILIM_STAT BIT(0) + +#define LP873X_TOP_MASK_1_PGOOD_INT_MASK BIT(7) +#define LP873X_TOP_MASK_1_SYNC_CLK_MASK BIT(4) +#define LP873X_TOP_MASK_1_TDIE_WARN_MASK BIT(2) +#define LP873X_TOP_MASK_1_I_MEAS_MASK BIT(0) + +#define LP873X_TOP_MASK_2_RESET_REG_MASK BIT(0) + +#define LP873X_BUCK_MASK_BUCK1_PGF_MASK BIT(7) +#define LP873X_BUCK_MASK_BUCK1_PGR_MASK BIT(6) +#define LP873X_BUCK_MASK_BUCK1_ILIM_MASK BIT(4) +#define LP873X_BUCK_MASK_BUCK0_PGF_MASK BIT(3) +#define LP873X_BUCK_MASK_BUCK0_PGR_MASK BIT(2) +#define LP873X_BUCK_MASK_BUCK0_ILIM_MASK BIT(0) + +#define LP873X_LDO_MASK_LDO1_PGF_MASK BIT(7) +#define LP873X_LDO_MASK_LDO1_PGR_MASK BIT(6) +#define LP873X_LDO_MASK_LDO1_ILIM_MASK BIT(4) +#define LP873X_LDO_MASK_LDO0_PGF_MASK BIT(3) +#define LP873X_LDO_MASK_LDO0_PGR_MASK BIT(2) +#define LP873X_LDO_MASK_LDO0_ILIM_MASK BIT(0) + +#define LP873X_SEL_I_LOAD_CURRENT_BUCK_SELECT BIT(0) + +#define LP873X_I_LOAD_2_BUCK_LOAD_CURRENT BIT(0) + +#define LP873X_I_LOAD_1_BUCK_LOAD_CURRENT 0xFF + +#define LP873X_MAX_REG_ID LP873X_LDO_1 + +/* Number of step-down converters available */ +#define LP873X_NUM_BUCK 2 +/* Number of LDO voltage regulators available */ +#define LP873X_NUM_LDO 2 +/* Number of total regulators available */ +#define LP873X_NUM_REGULATOR (LP873X_NUM_BUCK + LP873X_NUM_LDO) + +enum lp873x_regulator_id { + /* BUCK's */ + LP873X_BUCK_0, + LP873X_BUCK_1, + /* LDOs */ + LP873X_LDO_0, + LP873X_LDO_1, +}; + +/** + * struct lp873x - state holder for the lp873x driver + * @dev: struct device pointer for MFD device + * @rev: revision of the lp873x + * @lock: lock guarding the data structure + * @regmap: register map of the lp873x PMIC + * + * Device data may be used to access the LP873X chip + */ +struct lp873x { + struct device *dev; + u8 rev; + struct regmap *regmap; +}; +#endif /* __LINUX_MFD_LP873X_H */ diff --git a/include/linux/mfd/lp87565.h b/include/linux/mfd/lp87565.h new file mode 100644 index 000000000..d0c91ba65 --- /dev/null +++ b/include/linux/mfd/lp87565.h @@ -0,0 +1,270 @@ +/* + * Functions to access LP87565 power management chip. + * + * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + */ + +#ifndef __LINUX_MFD_LP87565_H +#define __LINUX_MFD_LP87565_H + +#include +#include +#include + +enum lp87565_device_type { + LP87565_DEVICE_TYPE_UNKNOWN = 0, + LP87565_DEVICE_TYPE_LP87565_Q1, +}; + +/* All register addresses */ +#define LP87565_REG_DEV_REV 0X00 +#define LP87565_REG_OTP_REV 0X01 +#define LP87565_REG_BUCK0_CTRL_1 0X02 +#define LP87565_REG_BUCK0_CTRL_2 0X03 + +#define LP87565_REG_BUCK1_CTRL_1 0X04 +#define LP87565_REG_BUCK1_CTRL_2 0X05 + +#define LP87565_REG_BUCK2_CTRL_1 0X06 +#define LP87565_REG_BUCK2_CTRL_2 0X07 + +#define LP87565_REG_BUCK3_CTRL_1 0X08 +#define LP87565_REG_BUCK3_CTRL_2 0X09 + +#define LP87565_REG_BUCK0_VOUT 0X0A +#define LP87565_REG_BUCK0_FLOOR_VOUT 0X0B + +#define LP87565_REG_BUCK1_VOUT 0X0C +#define LP87565_REG_BUCK1_FLOOR_VOUT 0X0D + +#define LP87565_REG_BUCK2_VOUT 0X0E +#define LP87565_REG_BUCK2_FLOOR_VOUT 0X0F + +#define LP87565_REG_BUCK3_VOUT 0X10 +#define LP87565_REG_BUCK3_FLOOR_VOUT 0X11 + +#define LP87565_REG_BUCK0_DELAY 0X12 +#define LP87565_REG_BUCK1_DELAY 0X13 + +#define LP87565_REG_BUCK2_DELAY 0X14 +#define LP87565_REG_BUCK3_DELAY 0X15 + +#define LP87565_REG_GPO2_DELAY 0X16 +#define LP87565_REG_GPO3_DELAY 0X17 +#define LP87565_REG_RESET 0X18 +#define LP87565_REG_CONFIG 0X19 + +#define LP87565_REG_INT_TOP_1 0X1A +#define LP87565_REG_INT_TOP_2 0X1B + +#define LP87565_REG_INT_BUCK_0_1 0X1C +#define LP87565_REG_INT_BUCK_2_3 0X1D +#define LP87565_REG_TOP_STAT 0X1E +#define LP87565_REG_BUCK_0_1_STAT 0X1F +#define LP87565_REG_BUCK_2_3_STAT 0x20 + +#define LP87565_REG_TOP_MASK_1 0x21 +#define LP87565_REG_TOP_MASK_2 0x22 + +#define LP87565_REG_BUCK_0_1_MASK 0x23 +#define LP87565_REG_BUCK_2_3_MASK 0x24 +#define LP87565_REG_SEL_I_LOAD 0x25 + +#define LP87565_REG_I_LOAD_2 0x26 +#define LP87565_REG_I_LOAD_1 0x27 + +#define LP87565_REG_PGOOD_CTRL1 0x28 +#define LP87565_REG_PGOOD_CTRL2 0x29 +#define LP87565_REG_PGOOD_FLT 0x2A +#define LP87565_REG_PLL_CTRL 0x2B +#define LP87565_REG_PIN_FUNCTION 0x2C +#define LP87565_REG_GPIO_CONFIG 0x2D +#define LP87565_REG_GPIO_IN 0x2E +#define LP87565_REG_GPIO_OUT 0x2F + +#define LP87565_REG_MAX LP87565_REG_GPIO_OUT + +/* Register field definitions */ +#define LP87565_DEV_REV_DEV_ID 0xC0 +#define LP87565_DEV_REV_ALL_LAYER 0x30 +#define LP87565_DEV_REV_METAL_LAYER 0x0F + +#define LP87565_OTP_REV_OTP_ID 0xFF + +#define LP87565_BUCK_CTRL_1_EN BIT(7) +#define LP87565_BUCK_CTRL_1_EN_PIN_CTRL BIT(6) +#define LP87565_BUCK_CTRL_1_PIN_SELECT_EN 0x30 + +#define LP87565_BUCK_CTRL_1_ROOF_FLOOR_EN BIT(3) +#define LP87565_BUCK_CTRL_1_RDIS_EN BIT(2) +#define LP87565_BUCK_CTRL_1_FPWM BIT(1) +/* Bit0 is reserved for BUCK1 and BUCK3 and valid only for BUCK0 and BUCK2 */ +#define LP87565_BUCK_CTRL_1_FPWM_MP_0_2 BIT(0) + +#define LP87565_BUCK_CTRL_2_ILIM 0x38 +#define LP87565_BUCK_CTRL_2_SLEW_RATE 0x07 + +#define LP87565_BUCK_VSET 0xFF +#define LP87565_BUCK_FLOOR_VSET 0xFF + +#define LP87565_BUCK_SHUTDOWN_DELAY 0xF0 +#define LP87565_BUCK_STARTUP_DELAY 0x0F + +#define LP87565_GPIO_SHUTDOWN_DELAY 0xF0 +#define LP87565_GPIO_STARTUP_DELAY 0x0F + +#define LP87565_RESET_SW_RESET BIT(0) + +#define LP87565_CONFIG_DOUBLE_DELAY BIT(7) +#define LP87565_CONFIG_CLKIN_PD BIT(6) +#define LP87565_CONFIG_EN4_PD BIT(5) +#define LP87565_CONFIG_EN3_PD BIT(4) +#define LP87565_CONFIG_TDIE_WARN_LEVEL BIT(3) +#define LP87565_CONFIG_EN2_PD BIT(2) +#define LP87565_CONFIG_EN1_PD BIT(1) + +#define LP87565_INT_GPIO BIT(7) +#define LP87565_INT_BUCK23 BIT(6) +#define LP87565_INT_BUCK01 BIT(5) +#define LP87565_NO_SYNC_CLK BIT(4) +#define LP87565_TDIE_SD BIT(3) +#define LP87565_TDIE_WARN BIT(2) +#define LP87565_INT_OVP BIT(1) +#define LP87565_I_LOAD_READY BIT(0) + +#define LP87565_INT_TOP2_RESET_REG BIT(0) + +#define LP87565_BUCK1_PG_INT BIT(6) +#define LP87565_BUCK1_SC_INT BIT(5) +#define LP87565_BUCK1_ILIM_INT BIT(4) +#define LP87565_BUCK0_PG_INT BIT(2) +#define LP87565_BUCK0_SC_INT BIT(1) +#define LP87565_BUCK0_ILIM_INT BIT(0) + +#define LP87565_BUCK3_PG_INT BIT(6) +#define LP87565_BUCK3_SC_INT BIT(5) +#define LP87565_BUCK3_ILIM_INT BIT(4) +#define LP87565_BUCK2_PG_INT BIT(2) +#define LP87565_BUCK2_SC_INT BIT(1) +#define LP87565_BUCK2_ILIM_INT BIT(0) + +#define LP87565_SYNC_CLK_STAT BIT(4) +#define LP87565_TDIE_SD_STAT BIT(3) +#define LP87565_TDIE_WARN_STAT BIT(2) +#define LP87565_OVP_STAT BIT(1) + +#define LP87565_BUCK1_STAT BIT(7) +#define LP87565_BUCK1_PG_STAT BIT(6) +#define LP87565_BUCK1_ILIM_STAT BIT(4) +#define LP87565_BUCK0_STAT BIT(3) +#define LP87565_BUCK0_PG_STAT BIT(2) +#define LP87565_BUCK0_ILIM_STAT BIT(0) + +#define LP87565_BUCK3_STAT BIT(7) +#define LP87565_BUCK3_PG_STAT BIT(6) +#define LP87565_BUCK3_ILIM_STAT BIT(4) +#define LP87565_BUCK2_STAT BIT(3) +#define LP87565_BUCK2_PG_STAT BIT(2) +#define LP87565_BUCK2_ILIM_STAT BIT(0) + +#define LPL87565_GPIO_MASK BIT(7) +#define LPL87565_SYNC_CLK_MASK BIT(4) +#define LPL87565_TDIE_WARN_MASK BIT(2) +#define LPL87565_I_LOAD_READY_MASK BIT(0) + +#define LPL87565_RESET_REG_MASK BIT(0) + +#define LPL87565_BUCK1_PG_MASK BIT(6) +#define LPL87565_BUCK1_ILIM_MASK BIT(4) +#define LPL87565_BUCK0_PG_MASK BIT(2) +#define LPL87565_BUCK0_ILIM_MASK BIT(0) + +#define LPL87565_BUCK3_PG_MASK BIT(6) +#define LPL87565_BUCK3_ILIM_MASK BIT(4) +#define LPL87565_BUCK2_PG_MASK BIT(2) +#define LPL87565_BUCK2_ILIM_MASK BIT(0) + +#define LP87565_LOAD_CURRENT_BUCK_SELECT 0x3 + +#define LP87565_I_LOAD2_BUCK_LOAD_CURRENT 0x3 +#define LP87565_I_LOAD1_BUCK_LOAD_CURRENT 0xFF + +#define LP87565_PG3_SEL 0xC0 +#define LP87565_PG2_SEL 0x30 +#define LP87565_PG1_SEL 0x0C +#define LP87565_PG0_SEL 0x03 + +#define LP87565_HALF_DAY BIT(7) +#define LP87565_EN_PG0_NINT BIT(6) +#define LP87565_PGOOD_SET_DELAY BIT(5) +#define LP87565_EN_PGFLT_STAT BIT(4) +#define LP87565_PGOOD_WINDOW BIT(2) +#define LP87565_PGOOD_OD BIT(1) +#define LP87565_PGOOD_POL BIT(0) + +#define LP87565_PG3_FLT BIT(3) +#define LP87565_PG2_FLT BIT(2) +#define LP87565_PG1_FLT BIT(1) +#define LP87565_PG0_FLT BIT(0) + +#define LP87565_PLL_MODE 0xC0 +#define LP87565_EXT_CLK_FREQ 0x1F + +#define LP87565_EN_SPREAD_SPEC BIT(7) +#define LP87565_EN_PIN_CTRL_GPIO3 BIT(6) +#define LP87565_EN_PIN_SELECT_GPIO3 BIT(5) +#define LP87565_EN_PIN_CTRL_GPIO2 BIT(4) +#define LP87565_EN_PIN_SELECT_GPIO2 BIT(3) +#define LP87565_GPIO3_SEL BIT(2) +#define LP87565_GPIO2_SEL BIT(1) +#define LP87565_GPIO1_SEL BIT(0) + +#define LP87565_GOIO3_OD BIT(6) +#define LP87565_GOIO2_OD BIT(5) +#define LP87565_GOIO1_OD BIT(4) +#define LP87565_GOIO3_DIR BIT(2) +#define LP87565_GOIO2_DIR BIT(1) +#define LP87565_GOIO1_DIR BIT(0) + +#define LP87565_GOIO3_IN BIT(2) +#define LP87565_GOIO2_IN BIT(1) +#define LP87565_GOIO1_IN BIT(0) + +#define LP87565_GOIO3_OUT BIT(2) +#define LP87565_GOIO2_OUT BIT(1) +#define LP87565_GOIO1_OUT BIT(0) + +/* Number of step-down converters available */ +#define LP87565_NUM_BUCK 6 + +enum LP87565_regulator_id { + /* BUCK's */ + LP87565_BUCK_0, + LP87565_BUCK_1, + LP87565_BUCK_2, + LP87565_BUCK_3, + LP87565_BUCK_10, + LP87565_BUCK_23, +}; + +/** + * struct LP87565 - state holder for the LP87565 driver + * @dev: struct device pointer for MFD device + * @rev: revision of the LP87565 + * @dev_type: The device type for example lp87565-q1 + * @lock: lock guarding the data structure + * @regmap: register map of the LP87565 PMIC + * + * Device data may be used to access the LP87565 chip + */ +struct lp87565 { + struct device *dev; + u8 rev; + u8 dev_type; + struct regmap *regmap; +}; +#endif /* __LINUX_MFD_LP87565_H */ diff --git a/include/linux/mfd/lp8788-isink.h b/include/linux/mfd/lp8788-isink.h new file mode 100644 index 000000000..f38262d21 --- /dev/null +++ b/include/linux/mfd/lp8788-isink.h @@ -0,0 +1,52 @@ +/* + * TI LP8788 MFD - common definitions for current sinks + * + * Copyright 2012 Texas Instruments + * + * Author: Milo(Woogyom) Kim + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __ISINK_LP8788_H__ +#define __ISINK_LP8788_H__ + +/* register address */ +#define LP8788_ISINK_CTRL 0x99 +#define LP8788_ISINK12_IOUT 0x9A +#define LP8788_ISINK3_IOUT 0x9B +#define LP8788_ISINK1_PWM 0x9C +#define LP8788_ISINK2_PWM 0x9D +#define LP8788_ISINK3_PWM 0x9E + +/* mask bits */ +#define LP8788_ISINK1_IOUT_M 0x0F /* Addr 9Ah */ +#define LP8788_ISINK2_IOUT_M 0xF0 +#define LP8788_ISINK3_IOUT_M 0x0F /* Addr 9Bh */ + +/* 6 bits used for PWM code : Addr 9C ~ 9Eh */ +#define LP8788_ISINK_MAX_PWM 63 +#define LP8788_ISINK_SCALE_OFFSET 3 + +static const u8 lp8788_iout_addr[] = { + LP8788_ISINK12_IOUT, + LP8788_ISINK12_IOUT, + LP8788_ISINK3_IOUT, +}; + +static const u8 lp8788_iout_mask[] = { + LP8788_ISINK1_IOUT_M, + LP8788_ISINK2_IOUT_M, + LP8788_ISINK3_IOUT_M, +}; + +static const u8 lp8788_pwm_addr[] = { + LP8788_ISINK1_PWM, + LP8788_ISINK2_PWM, + LP8788_ISINK3_PWM, +}; + +#endif diff --git a/include/linux/mfd/lp8788.h b/include/linux/mfd/lp8788.h new file mode 100644 index 000000000..2010e0de3 --- /dev/null +++ b/include/linux/mfd/lp8788.h @@ -0,0 +1,334 @@ +/* + * TI LP8788 MFD Device + * + * Copyright 2012 Texas Instruments + * + * Author: Milo(Woogyom) Kim + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __MFD_LP8788_H__ +#define __MFD_LP8788_H__ + +#include +#include +#include +#include + +#define LP8788_DEV_BUCK "lp8788-buck" +#define LP8788_DEV_DLDO "lp8788-dldo" +#define LP8788_DEV_ALDO "lp8788-aldo" +#define LP8788_DEV_CHARGER "lp8788-charger" +#define LP8788_DEV_RTC "lp8788-rtc" +#define LP8788_DEV_BACKLIGHT "lp8788-backlight" +#define LP8788_DEV_VIBRATOR "lp8788-vibrator" +#define LP8788_DEV_KEYLED "lp8788-keyled" +#define LP8788_DEV_ADC "lp8788-adc" + +#define LP8788_NUM_BUCKS 4 +#define LP8788_NUM_DLDOS 12 +#define LP8788_NUM_ALDOS 10 +#define LP8788_NUM_BUCK2_DVS 2 + +#define LP8788_CHG_IRQ "CHG_IRQ" +#define LP8788_PRSW_IRQ "PRSW_IRQ" +#define LP8788_BATT_IRQ "BATT_IRQ" +#define LP8788_ALM_IRQ "ALARM_IRQ" + +enum lp8788_int_id { + /* interrup register 1 : Addr 00h */ + LP8788_INT_TSDL, + LP8788_INT_TSDH, + LP8788_INT_UVLO, + LP8788_INT_FLAGMON, + LP8788_INT_PWRON_TIME, + LP8788_INT_PWRON, + LP8788_INT_COMP1, + LP8788_INT_COMP2, + + /* interrupt register 2 : Addr 01h */ + LP8788_INT_CHG_INPUT_STATE, + LP8788_INT_CHG_STATE, + LP8788_INT_EOC, + LP8788_INT_CHG_RESTART, + LP8788_INT_RESTART_TIMEOUT, + LP8788_INT_FULLCHG_TIMEOUT, + LP8788_INT_PRECHG_TIMEOUT, + + /* interrupt register 3 : Addr 02h */ + LP8788_INT_RTC_ALARM1 = 17, + LP8788_INT_RTC_ALARM2, + LP8788_INT_ENTER_SYS_SUPPORT, + LP8788_INT_EXIT_SYS_SUPPORT, + LP8788_INT_BATT_LOW, + LP8788_INT_NO_BATT, + + LP8788_INT_MAX = 24, +}; + +enum lp8788_dvs_sel { + DVS_SEL_V0, + DVS_SEL_V1, + DVS_SEL_V2, + DVS_SEL_V3, +}; + +enum lp8788_ext_ldo_en_id { + EN_ALDO1, + EN_ALDO234, + EN_ALDO5, + EN_ALDO7, + EN_DLDO7, + EN_DLDO911, + EN_LDOS_MAX, +}; + +enum lp8788_charger_event { + NO_CHARGER, + CHARGER_DETECTED, +}; + +enum lp8788_bl_ctrl_mode { + LP8788_BL_REGISTER_ONLY, + LP8788_BL_COMB_PWM_BASED, /* PWM + I2C, changed by PWM input */ + LP8788_BL_COMB_REGISTER_BASED, /* PWM + I2C, changed by I2C */ +}; + +enum lp8788_bl_dim_mode { + LP8788_DIM_EXPONENTIAL, + LP8788_DIM_LINEAR, +}; + +enum lp8788_bl_full_scale_current { + LP8788_FULLSCALE_5000uA, + LP8788_FULLSCALE_8500uA, + LP8788_FULLSCALE_1200uA, + LP8788_FULLSCALE_1550uA, + LP8788_FULLSCALE_1900uA, + LP8788_FULLSCALE_2250uA, + LP8788_FULLSCALE_2600uA, + LP8788_FULLSCALE_2950uA, +}; + +enum lp8788_bl_ramp_step { + LP8788_RAMP_8us, + LP8788_RAMP_1024us, + LP8788_RAMP_2048us, + LP8788_RAMP_4096us, + LP8788_RAMP_8192us, + LP8788_RAMP_16384us, + LP8788_RAMP_32768us, + LP8788_RAMP_65538us, +}; + +enum lp8788_isink_scale { + LP8788_ISINK_SCALE_100mA, + LP8788_ISINK_SCALE_120mA, +}; + +enum lp8788_isink_number { + LP8788_ISINK_1, + LP8788_ISINK_2, + LP8788_ISINK_3, +}; + +enum lp8788_alarm_sel { + LP8788_ALARM_1, + LP8788_ALARM_2, + LP8788_ALARM_MAX, +}; + +enum lp8788_adc_id { + LPADC_VBATT_5P5, + LPADC_VIN_CHG, + LPADC_IBATT, + LPADC_IC_TEMP, + LPADC_VBATT_6P0, + LPADC_VBATT_5P0, + LPADC_ADC1, + LPADC_ADC2, + LPADC_VDD, + LPADC_VCOIN, + LPADC_VDD_LDO, + LPADC_ADC3, + LPADC_ADC4, + LPADC_MAX, +}; + +struct lp8788; + +/* + * lp8788_buck1_dvs + * @gpio : gpio pin number for dvs control + * @vsel : dvs selector for buck v1 register + */ +struct lp8788_buck1_dvs { + int gpio; + enum lp8788_dvs_sel vsel; +}; + +/* + * lp8788_buck2_dvs + * @gpio : two gpio pin numbers are used for dvs + * @vsel : dvs selector for buck v2 register + */ +struct lp8788_buck2_dvs { + int gpio[LP8788_NUM_BUCK2_DVS]; + enum lp8788_dvs_sel vsel; +}; + +/* + * struct lp8788_chg_param + * @addr : charging control register address (range : 0x11 ~ 0x1C) + * @val : charging parameter value + */ +struct lp8788_chg_param { + u8 addr; + u8 val; +}; + +/* + * struct lp8788_charger_platform_data + * @adc_vbatt : adc channel name for battery voltage + * @adc_batt_temp : adc channel name for battery temperature + * @max_vbatt_mv : used for calculating battery capacity + * @chg_params : initial charging parameters + * @num_chg_params : numbers of charging parameters + * @charger_event : the charger event can be reported to the platform side + */ +struct lp8788_charger_platform_data { + const char *adc_vbatt; + const char *adc_batt_temp; + unsigned int max_vbatt_mv; + struct lp8788_chg_param *chg_params; + int num_chg_params; + void (*charger_event) (struct lp8788 *lp, + enum lp8788_charger_event event); +}; + +/* + * struct lp8788_backlight_platform_data + * @name : backlight driver name. (default: "lcd-backlight") + * @initial_brightness : initial value of backlight brightness + * @bl_mode : brightness control by pwm or lp8788 register + * @dim_mode : dimming mode selection + * @full_scale : full scale current setting + * @rise_time : brightness ramp up step time + * @fall_time : brightness ramp down step time + * @pwm_pol : pwm polarity setting when bl_mode is pwm based + * @period_ns : platform specific pwm period value. unit is nano. + Only valid when bl_mode is LP8788_BL_COMB_PWM_BASED + */ +struct lp8788_backlight_platform_data { + char *name; + int initial_brightness; + enum lp8788_bl_ctrl_mode bl_mode; + enum lp8788_bl_dim_mode dim_mode; + enum lp8788_bl_full_scale_current full_scale; + enum lp8788_bl_ramp_step rise_time; + enum lp8788_bl_ramp_step fall_time; + enum pwm_polarity pwm_pol; + unsigned int period_ns; +}; + +/* + * struct lp8788_led_platform_data + * @name : led driver name. (default: "keyboard-backlight") + * @scale : current scale + * @num : current sink number + * @iout_code : current output value (Addr 9Ah ~ 9Bh) + */ +struct lp8788_led_platform_data { + char *name; + enum lp8788_isink_scale scale; + enum lp8788_isink_number num; + int iout_code; +}; + +/* + * struct lp8788_vib_platform_data + * @name : vibrator driver name + * @scale : current scale + * @num : current sink number + * @iout_code : current output value (Addr 9Ah ~ 9Bh) + * @pwm_code : PWM code value (Addr 9Ch ~ 9Eh) + */ +struct lp8788_vib_platform_data { + char *name; + enum lp8788_isink_scale scale; + enum lp8788_isink_number num; + int iout_code; + int pwm_code; +}; + +/* + * struct lp8788_platform_data + * @init_func : used for initializing registers + * before mfd driver is registered + * @buck_data : regulator initial data for buck + * @dldo_data : regulator initial data for digital ldo + * @aldo_data : regulator initial data for analog ldo + * @buck1_dvs : gpio configurations for buck1 dvs + * @buck2_dvs : gpio configurations for buck2 dvs + * @chg_pdata : platform data for charger driver + * @alarm_sel : rtc alarm selection (1 or 2) + * @bl_pdata : configurable data for backlight driver + * @led_pdata : configurable data for led driver + * @vib_pdata : configurable data for vibrator driver + * @adc_pdata : iio map data for adc driver + */ +struct lp8788_platform_data { + /* general system information */ + int (*init_func) (struct lp8788 *lp); + + /* regulators */ + struct regulator_init_data *buck_data[LP8788_NUM_BUCKS]; + struct regulator_init_data *dldo_data[LP8788_NUM_DLDOS]; + struct regulator_init_data *aldo_data[LP8788_NUM_ALDOS]; + struct lp8788_buck1_dvs *buck1_dvs; + struct lp8788_buck2_dvs *buck2_dvs; + + /* charger */ + struct lp8788_charger_platform_data *chg_pdata; + + /* rtc alarm */ + enum lp8788_alarm_sel alarm_sel; + + /* backlight */ + struct lp8788_backlight_platform_data *bl_pdata; + + /* current sinks */ + struct lp8788_led_platform_data *led_pdata; + struct lp8788_vib_platform_data *vib_pdata; + + /* adc iio map data */ + struct iio_map *adc_pdata; +}; + +/* + * struct lp8788 + * @dev : parent device pointer + * @regmap : used for i2c communcation on accessing registers + * @irqdm : interrupt domain for handling nested interrupt + * @irq : pin number of IRQ_N + * @pdata : lp8788 platform specific data + */ +struct lp8788 { + struct device *dev; + struct regmap *regmap; + struct irq_domain *irqdm; + int irq; + struct lp8788_platform_data *pdata; +}; + +int lp8788_irq_init(struct lp8788 *lp, int chip_irq); +void lp8788_irq_exit(struct lp8788 *lp); +int lp8788_read_byte(struct lp8788 *lp, u8 reg, u8 *data); +int lp8788_read_multi_bytes(struct lp8788 *lp, u8 reg, u8 *data, size_t count); +int lp8788_write_byte(struct lp8788 *lp, u8 reg, u8 data); +int lp8788_update_bits(struct lp8788 *lp, u8 reg, u8 mask, u8 data); +#endif diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h new file mode 100644 index 000000000..fba8fcb54 --- /dev/null +++ b/include/linux/mfd/lpc_ich.h @@ -0,0 +1,49 @@ +/* + * linux/drivers/mfd/lpc_ich.h + * + * Copyright (c) 2012 Extreme Engineering Solution, Inc. + * Author: Aaron Sierra + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#ifndef LPC_ICH_H +#define LPC_ICH_H + +#include + +/* GPIO resources */ +#define ICH_RES_GPIO 0 +#define ICH_RES_GPE0 1 + +/* GPIO compatibility */ +enum { + ICH_I3100_GPIO, + ICH_V5_GPIO, + ICH_V6_GPIO, + ICH_V7_GPIO, + ICH_V9_GPIO, + ICH_V10CORP_GPIO, + ICH_V10CONS_GPIO, + AVOTON_GPIO, +}; + +struct lpc_ich_info { + char name[32]; + unsigned int iTCO_version; + unsigned int gpio_version; + enum intel_spi_type spi_type; + u8 use_gpio; +}; + +#endif diff --git a/include/linux/mfd/madera/core.h b/include/linux/mfd/madera/core.h new file mode 100644 index 000000000..c33268184 --- /dev/null +++ b/include/linux/mfd/madera/core.h @@ -0,0 +1,187 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MFD internals for Cirrus Logic Madera codecs + * + * Copyright (C) 2015-2018 Cirrus Logic + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2. + */ + +#ifndef MADERA_CORE_H +#define MADERA_CORE_H + +#include +#include +#include +#include +#include +#include + +enum madera_type { + /* 0 is reserved for indicating failure to identify */ + CS47L35 = 1, + CS47L85 = 2, + CS47L90 = 3, + CS47L91 = 4, + WM1840 = 7, +}; + +#define MADERA_MAX_CORE_SUPPLIES 2 +#define MADERA_MAX_GPIOS 40 + +#define CS47L35_NUM_GPIOS 16 +#define CS47L85_NUM_GPIOS 40 +#define CS47L90_NUM_GPIOS 38 + +#define MADERA_MAX_MICBIAS 4 + +/* Notifier events */ +#define MADERA_NOTIFY_VOICE_TRIGGER 0x1 +#define MADERA_NOTIFY_HPDET 0x2 +#define MADERA_NOTIFY_MICDET 0x4 + +/* GPIO Function Definitions */ +#define MADERA_GP_FN_ALTERNATE 0x00 +#define MADERA_GP_FN_GPIO 0x01 +#define MADERA_GP_FN_DSP_GPIO 0x02 +#define MADERA_GP_FN_IRQ1 0x03 +#define MADERA_GP_FN_IRQ2 0x04 +#define MADERA_GP_FN_FLL1_CLOCK 0x10 +#define MADERA_GP_FN_FLL2_CLOCK 0x11 +#define MADERA_GP_FN_FLL3_CLOCK 0x12 +#define MADERA_GP_FN_FLLAO_CLOCK 0x13 +#define MADERA_GP_FN_FLL1_LOCK 0x18 +#define MADERA_GP_FN_FLL2_LOCK 0x19 +#define MADERA_GP_FN_FLL3_LOCK 0x1A +#define MADERA_GP_FN_FLLAO_LOCK 0x1B +#define MADERA_GP_FN_OPCLK_OUT 0x40 +#define MADERA_GP_FN_OPCLK_ASYNC_OUT 0x41 +#define MADERA_GP_FN_PWM1 0x48 +#define MADERA_GP_FN_PWM2 0x49 +#define MADERA_GP_FN_SPDIF_OUT 0x4C +#define MADERA_GP_FN_HEADPHONE_DET 0x50 +#define MADERA_GP_FN_MIC_DET 0x58 +#define MADERA_GP_FN_DRC1_SIGNAL_DETECT 0x80 +#define MADERA_GP_FN_DRC2_SIGNAL_DETECT 0x81 +#define MADERA_GP_FN_ASRC1_IN1_LOCK 0x88 +#define MADERA_GP_FN_ASRC1_IN2_LOCK 0x89 +#define MADERA_GP_FN_ASRC2_IN1_LOCK 0x8A +#define MADERA_GP_FN_ASRC2_IN2_LOCK 0x8B +#define MADERA_GP_FN_DSP_IRQ1 0xA0 +#define MADERA_GP_FN_DSP_IRQ2 0xA1 +#define MADERA_GP_FN_DSP_IRQ3 0xA2 +#define MADERA_GP_FN_DSP_IRQ4 0xA3 +#define MADERA_GP_FN_DSP_IRQ5 0xA4 +#define MADERA_GP_FN_DSP_IRQ6 0xA5 +#define MADERA_GP_FN_DSP_IRQ7 0xA6 +#define MADERA_GP_FN_DSP_IRQ8 0xA7 +#define MADERA_GP_FN_DSP_IRQ9 0xA8 +#define MADERA_GP_FN_DSP_IRQ10 0xA9 +#define MADERA_GP_FN_DSP_IRQ11 0xAA +#define MADERA_GP_FN_DSP_IRQ12 0xAB +#define MADERA_GP_FN_DSP_IRQ13 0xAC +#define MADERA_GP_FN_DSP_IRQ14 0xAD +#define MADERA_GP_FN_DSP_IRQ15 0xAE +#define MADERA_GP_FN_DSP_IRQ16 0xAF +#define MADERA_GP_FN_HPOUT1L_SC 0xB0 +#define MADERA_GP_FN_HPOUT1R_SC 0xB1 +#define MADERA_GP_FN_HPOUT2L_SC 0xB2 +#define MADERA_GP_FN_HPOUT2R_SC 0xB3 +#define MADERA_GP_FN_HPOUT3L_SC 0xB4 +#define MADERA_GP_FN_HPOUT4R_SC 0xB5 +#define MADERA_GP_FN_SPKOUTL_SC 0xB6 +#define MADERA_GP_FN_SPKOUTR_SC 0xB7 +#define MADERA_GP_FN_HPOUT1L_ENA 0xC0 +#define MADERA_GP_FN_HPOUT1R_ENA 0xC1 +#define MADERA_GP_FN_HPOUT2L_ENA 0xC2 +#define MADERA_GP_FN_HPOUT2R_ENA 0xC3 +#define MADERA_GP_FN_HPOUT3L_ENA 0xC4 +#define MADERA_GP_FN_HPOUT4R_ENA 0xC5 +#define MADERA_GP_FN_SPKOUTL_ENA 0xC6 +#define MADERA_GP_FN_SPKOUTR_ENA 0xC7 +#define MADERA_GP_FN_HPOUT1L_DIS 0xD0 +#define MADERA_GP_FN_HPOUT1R_DIS 0xD1 +#define MADERA_GP_FN_HPOUT2L_DIS 0xD2 +#define MADERA_GP_FN_HPOUT2R_DIS 0xD3 +#define MADERA_GP_FN_HPOUT3L_DIS 0xD4 +#define MADERA_GP_FN_HPOUT4R_DIS 0xD5 +#define MADERA_GP_FN_SPKOUTL_DIS 0xD6 +#define MADERA_GP_FN_SPKOUTR_DIS 0xD7 +#define MADERA_GP_FN_SPK_SHUTDOWN 0xE0 +#define MADERA_GP_FN_SPK_OVH_SHUTDOWN 0xE1 +#define MADERA_GP_FN_SPK_OVH_WARN 0xE2 +#define MADERA_GP_FN_TIMER1_STATUS 0x140 +#define MADERA_GP_FN_TIMER2_STATUS 0x141 +#define MADERA_GP_FN_TIMER3_STATUS 0x142 +#define MADERA_GP_FN_TIMER4_STATUS 0x143 +#define MADERA_GP_FN_TIMER5_STATUS 0x144 +#define MADERA_GP_FN_TIMER6_STATUS 0x145 +#define MADERA_GP_FN_TIMER7_STATUS 0x146 +#define MADERA_GP_FN_TIMER8_STATUS 0x147 +#define MADERA_GP_FN_EVENTLOG1_FIFO_STS 0x150 +#define MADERA_GP_FN_EVENTLOG2_FIFO_STS 0x151 +#define MADERA_GP_FN_EVENTLOG3_FIFO_STS 0x152 +#define MADERA_GP_FN_EVENTLOG4_FIFO_STS 0x153 +#define MADERA_GP_FN_EVENTLOG5_FIFO_STS 0x154 +#define MADERA_GP_FN_EVENTLOG6_FIFO_STS 0x155 +#define MADERA_GP_FN_EVENTLOG7_FIFO_STS 0x156 +#define MADERA_GP_FN_EVENTLOG8_FIFO_STS 0x157 + +struct snd_soc_dapm_context; + +/* + * struct madera - internal data shared by the set of Madera drivers + * + * This should not be used by anything except child drivers of the Madera MFD + * + * @regmap: pointer to the regmap instance for 16-bit registers + * @regmap_32bit: pointer to the regmap instance for 32-bit registers + * @dev: pointer to the MFD device + * @type: type of codec + * @rev: silicon revision + * @type_name: display name of this codec + * @num_core_supplies: number of core supply regulators + * @core_supplies: list of core supplies that are always required + * @dcvdd: pointer to DCVDD regulator + * @internal_dcvdd: true if DCVDD is supplied from the internal LDO1 + * @pdata: our pdata + * @irq_dev: the irqchip child driver device + * @irq: host irq number from SPI or I2C configuration + * @out_clamp: indicates output clamp state for each analogue output + * @out_shorted: indicates short circuit state for each analogue output + * @hp_ena: bitflags of enable state for the headphone outputs + * @num_micbias: number of MICBIAS outputs + * @num_childbias: number of child biases for each MICBIAS + * @dapm: pointer to codec driver DAPM context + * @notifier: notifier for signalling events to ASoC machine driver + */ +struct madera { + struct regmap *regmap; + struct regmap *regmap_32bit; + + struct device *dev; + + enum madera_type type; + unsigned int rev; + const char *type_name; + + int num_core_supplies; + struct regulator_bulk_data core_supplies[MADERA_MAX_CORE_SUPPLIES]; + struct regulator *dcvdd; + bool internal_dcvdd; + + struct madera_pdata pdata; + + struct device *irq_dev; + int irq; + + unsigned int num_micbias; + unsigned int num_childbias[MADERA_MAX_MICBIAS]; + + struct snd_soc_dapm_context *dapm; + + struct blocking_notifier_head notifier; +}; +#endif diff --git a/include/linux/mfd/madera/pdata.h b/include/linux/mfd/madera/pdata.h new file mode 100644 index 000000000..0b311f39c --- /dev/null +++ b/include/linux/mfd/madera/pdata.h @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Platform data for Cirrus Logic Madera codecs + * + * Copyright (C) 2015-2018 Cirrus Logic + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2. + */ + +#ifndef MADERA_PDATA_H +#define MADERA_PDATA_H + +#include +#include +#include +#include + +#define MADERA_MAX_MICBIAS 4 +#define MADERA_MAX_CHILD_MICBIAS 4 + +#define MADERA_MAX_GPSW 2 + +struct gpio_desc; +struct pinctrl_map; +struct madera_irqchip_pdata; +struct madera_codec_pdata; + +/** + * struct madera_pdata - Configuration data for Madera devices + * + * @reset: GPIO controlling /RESET (NULL = none) + * @ldo1: Substruct of pdata for the LDO1 regulator + * @micvdd: Substruct of pdata for the MICVDD regulator + * @irq_flags: Mode for primary IRQ (defaults to active low) + * @gpio_base: Base GPIO number + * @gpio_configs: Array of GPIO configurations (See Documentation/pinctrl.txt) + * @n_gpio_configs: Number of entries in gpio_configs + * @gpsw: General purpose switch mode setting. Depends on the external + * hardware connected to the switch. (See the SW1_MODE field + * in the datasheet for the available values for your codec) + */ +struct madera_pdata { + struct gpio_desc *reset; + + struct arizona_ldo1_pdata ldo1; + struct arizona_micsupp_pdata micvdd; + + unsigned int irq_flags; + int gpio_base; + + const struct pinctrl_map *gpio_configs; + int n_gpio_configs; + + u32 gpsw[MADERA_MAX_GPSW]; +}; + +#endif diff --git a/include/linux/mfd/madera/registers.h b/include/linux/mfd/madera/registers.h new file mode 100644 index 000000000..977e06101 --- /dev/null +++ b/include/linux/mfd/madera/registers.h @@ -0,0 +1,3968 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Madera register definitions + * + * Copyright (C) 2015-2018 Cirrus Logic + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2. + */ + +#ifndef MADERA_REGISTERS_H +#define MADERA_REGISTERS_H + +/* + * Register Addresses. + */ +#define MADERA_SOFTWARE_RESET 0x00 +#define MADERA_HARDWARE_REVISION 0x01 +#define MADERA_CTRL_IF_CFG_1 0x08 +#define MADERA_CTRL_IF_CFG_2 0x09 +#define MADERA_CTRL_IF_CFG_3 0x0A +#define MADERA_WRITE_SEQUENCER_CTRL_0 0x16 +#define MADERA_WRITE_SEQUENCER_CTRL_1 0x17 +#define MADERA_WRITE_SEQUENCER_CTRL_2 0x18 +#define MADERA_TONE_GENERATOR_1 0x20 +#define MADERA_TONE_GENERATOR_2 0x21 +#define MADERA_TONE_GENERATOR_3 0x22 +#define MADERA_TONE_GENERATOR_4 0x23 +#define MADERA_TONE_GENERATOR_5 0x24 +#define MADERA_PWM_DRIVE_1 0x30 +#define MADERA_PWM_DRIVE_2 0x31 +#define MADERA_PWM_DRIVE_3 0x32 +#define MADERA_SEQUENCE_CONTROL 0x41 +#define MADERA_SAMPLE_RATE_SEQUENCE_SELECT_1 0x61 +#define MADERA_SAMPLE_RATE_SEQUENCE_SELECT_2 0x62 +#define MADERA_SAMPLE_RATE_SEQUENCE_SELECT_3 0x63 +#define MADERA_SAMPLE_RATE_SEQUENCE_SELECT_4 0x64 +#define MADERA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_1 0x66 +#define MADERA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_2 0x67 +#define MADERA_HAPTICS_CONTROL_1 0x90 +#define MADERA_HAPTICS_CONTROL_2 0x91 +#define MADERA_HAPTICS_PHASE_1_INTENSITY 0x92 +#define MADERA_HAPTICS_PHASE_1_DURATION 0x93 +#define MADERA_HAPTICS_PHASE_2_INTENSITY 0x94 +#define MADERA_HAPTICS_PHASE_2_DURATION 0x95 +#define MADERA_HAPTICS_PHASE_3_INTENSITY 0x96 +#define MADERA_HAPTICS_PHASE_3_DURATION 0x97 +#define MADERA_HAPTICS_STATUS 0x98 +#define MADERA_COMFORT_NOISE_GENERATOR 0xA0 +#define MADERA_CLOCK_32K_1 0x100 +#define MADERA_SYSTEM_CLOCK_1 0x101 +#define MADERA_SAMPLE_RATE_1 0x102 +#define MADERA_SAMPLE_RATE_2 0x103 +#define MADERA_SAMPLE_RATE_3 0x104 +#define MADERA_SAMPLE_RATE_1_STATUS 0x10A +#define MADERA_SAMPLE_RATE_2_STATUS 0x10B +#define MADERA_SAMPLE_RATE_3_STATUS 0x10C +#define MADERA_ASYNC_CLOCK_1 0x112 +#define MADERA_ASYNC_SAMPLE_RATE_1 0x113 +#define MADERA_ASYNC_SAMPLE_RATE_2 0x114 +#define MADERA_ASYNC_SAMPLE_RATE_1_STATUS 0x11B +#define MADERA_ASYNC_SAMPLE_RATE_2_STATUS 0x11C +#define MADERA_DSP_CLOCK_1 0x120 +#define MADERA_DSP_CLOCK_2 0x122 +#define MADERA_OUTPUT_SYSTEM_CLOCK 0x149 +#define MADERA_OUTPUT_ASYNC_CLOCK 0x14A +#define MADERA_RATE_ESTIMATOR_1 0x152 +#define MADERA_RATE_ESTIMATOR_2 0x153 +#define MADERA_RATE_ESTIMATOR_3 0x154 +#define MADERA_RATE_ESTIMATOR_4 0x155 +#define MADERA_RATE_ESTIMATOR_5 0x156 +#define MADERA_FLL1_CONTROL_1 0x171 +#define MADERA_FLL1_CONTROL_2 0x172 +#define MADERA_FLL1_CONTROL_3 0x173 +#define MADERA_FLL1_CONTROL_4 0x174 +#define MADERA_FLL1_CONTROL_5 0x175 +#define MADERA_FLL1_CONTROL_6 0x176 +#define MADERA_FLL1_LOOP_FILTER_TEST_1 0x177 +#define MADERA_FLL1_NCO_TEST_0 0x178 +#define MADERA_FLL1_CONTROL_7 0x179 +#define MADERA_FLL1_EFS_2 0x17A +#define CS47L35_FLL1_SYNCHRONISER_1 0x17F +#define CS47L35_FLL1_SYNCHRONISER_2 0x180 +#define CS47L35_FLL1_SYNCHRONISER_3 0x181 +#define CS47L35_FLL1_SYNCHRONISER_4 0x182 +#define CS47L35_FLL1_SYNCHRONISER_5 0x183 +#define CS47L35_FLL1_SYNCHRONISER_6 0x184 +#define CS47L35_FLL1_SYNCHRONISER_7 0x185 +#define CS47L35_FLL1_SPREAD_SPECTRUM 0x187 +#define CS47L35_FLL1_GPIO_CLOCK 0x188 +#define MADERA_FLL1_SYNCHRONISER_1 0x181 +#define MADERA_FLL1_SYNCHRONISER_2 0x182 +#define MADERA_FLL1_SYNCHRONISER_3 0x183 +#define MADERA_FLL1_SYNCHRONISER_4 0x184 +#define MADERA_FLL1_SYNCHRONISER_5 0x185 +#define MADERA_FLL1_SYNCHRONISER_6 0x186 +#define MADERA_FLL1_SYNCHRONISER_7 0x187 +#define MADERA_FLL1_SPREAD_SPECTRUM 0x189 +#define MADERA_FLL1_GPIO_CLOCK 0x18A +#define MADERA_FLL2_CONTROL_1 0x191 +#define MADERA_FLL2_CONTROL_2 0x192 +#define MADERA_FLL2_CONTROL_3 0x193 +#define MADERA_FLL2_CONTROL_4 0x194 +#define MADERA_FLL2_CONTROL_5 0x195 +#define MADERA_FLL2_CONTROL_6 0x196 +#define MADERA_FLL2_LOOP_FILTER_TEST_1 0x197 +#define MADERA_FLL2_NCO_TEST_0 0x198 +#define MADERA_FLL2_CONTROL_7 0x199 +#define MADERA_FLL2_EFS_2 0x19A +#define MADERA_FLL2_SYNCHRONISER_1 0x1A1 +#define MADERA_FLL2_SYNCHRONISER_2 0x1A2 +#define MADERA_FLL2_SYNCHRONISER_3 0x1A3 +#define MADERA_FLL2_SYNCHRONISER_4 0x1A4 +#define MADERA_FLL2_SYNCHRONISER_5 0x1A5 +#define MADERA_FLL2_SYNCHRONISER_6 0x1A6 +#define MADERA_FLL2_SYNCHRONISER_7 0x1A7 +#define MADERA_FLL2_SPREAD_SPECTRUM 0x1A9 +#define MADERA_FLL2_GPIO_CLOCK 0x1AA +#define MADERA_FLL3_CONTROL_1 0x1B1 +#define MADERA_FLL3_CONTROL_2 0x1B2 +#define MADERA_FLL3_CONTROL_3 0x1B3 +#define MADERA_FLL3_CONTROL_4 0x1B4 +#define MADERA_FLL3_CONTROL_5 0x1B5 +#define MADERA_FLL3_CONTROL_6 0x1B6 +#define MADERA_FLL3_LOOP_FILTER_TEST_1 0x1B7 +#define MADERA_FLL3_NCO_TEST_0 0x1B8 +#define MADERA_FLL3_CONTROL_7 0x1B9 +#define MADERA_FLL3_SYNCHRONISER_1 0x1C1 +#define MADERA_FLL3_SYNCHRONISER_2 0x1C2 +#define MADERA_FLL3_SYNCHRONISER_3 0x1C3 +#define MADERA_FLL3_SYNCHRONISER_4 0x1C4 +#define MADERA_FLL3_SYNCHRONISER_5 0x1C5 +#define MADERA_FLL3_SYNCHRONISER_6 0x1C6 +#define MADERA_FLL3_SYNCHRONISER_7 0x1C7 +#define MADERA_FLL3_SPREAD_SPECTRUM 0x1C9 +#define MADERA_FLL3_GPIO_CLOCK 0x1CA +#define MADERA_FLLAO_CONTROL_1 0x1D1 +#define MADERA_FLLAO_CONTROL_2 0x1D2 +#define MADERA_FLLAO_CONTROL_3 0x1D3 +#define MADERA_FLLAO_CONTROL_4 0x1D4 +#define MADERA_FLLAO_CONTROL_5 0x1D5 +#define MADERA_FLLAO_CONTROL_6 0x1D6 +#define MADERA_FLLAO_CONTROL_7 0x1D8 +#define MADERA_FLLAO_CONTROL_8 0x1DA +#define MADERA_FLLAO_CONTROL_9 0x1DB +#define MADERA_FLLAO_CONTROL_10 0x1DC +#define MADERA_FLLAO_CONTROL_11 0x1DD +#define MADERA_MIC_CHARGE_PUMP_1 0x200 +#define MADERA_HP_CHARGE_PUMP_8 0x20B +#define MADERA_LDO1_CONTROL_1 0x210 +#define MADERA_LDO2_CONTROL_1 0x213 +#define MADERA_MIC_BIAS_CTRL_1 0x218 +#define MADERA_MIC_BIAS_CTRL_2 0x219 +#define MADERA_MIC_BIAS_CTRL_3 0x21A +#define MADERA_MIC_BIAS_CTRL_4 0x21B +#define MADERA_MIC_BIAS_CTRL_5 0x21C +#define MADERA_MIC_BIAS_CTRL_6 0x21E +#define MADERA_HP_CTRL_1L 0x225 +#define MADERA_HP_CTRL_1R 0x226 +#define MADERA_HP_CTRL_2L 0x227 +#define MADERA_HP_CTRL_2R 0x228 +#define MADERA_HP_CTRL_3L 0x229 +#define MADERA_HP_CTRL_3R 0x22A +#define MADERA_DCS_HP1L_CONTROL 0x232 +#define MADERA_DCS_HP1R_CONTROL 0x238 +#define MADERA_EDRE_HP_STEREO_CONTROL 0x27E +#define MADERA_ACCESSORY_DETECT_MODE_1 0x293 +#define MADERA_HEADPHONE_DETECT_0 0x299 +#define MADERA_HEADPHONE_DETECT_1 0x29B +#define MADERA_HEADPHONE_DETECT_2 0x29C +#define MADERA_HEADPHONE_DETECT_3 0x29D +#define MADERA_HEADPHONE_DETECT_4 0x29E +#define MADERA_HEADPHONE_DETECT_5 0x29F +#define MADERA_MIC_DETECT_1_CONTROL_0 0x2A2 +#define MADERA_MIC_DETECT_1_CONTROL_1 0x2A3 +#define MADERA_MIC_DETECT_1_CONTROL_2 0x2A4 +#define MADERA_MIC_DETECT_1_CONTROL_3 0x2A5 +#define MADERA_MIC_DETECT_1_LEVEL_1 0x2A6 +#define MADERA_MIC_DETECT_1_LEVEL_2 0x2A7 +#define MADERA_MIC_DETECT_1_LEVEL_3 0x2A8 +#define MADERA_MIC_DETECT_1_LEVEL_4 0x2A9 +#define MADERA_MIC_DETECT_1_CONTROL_4 0x2AB +#define MADERA_MIC_DETECT_2_CONTROL_0 0x2B2 +#define MADERA_MIC_DETECT_2_CONTROL_1 0x2B3 +#define MADERA_MIC_DETECT_2_CONTROL_2 0x2B4 +#define MADERA_MIC_DETECT_2_CONTROL_3 0x2B5 +#define MADERA_MIC_DETECT_2_LEVEL_1 0x2B6 +#define MADERA_MIC_DETECT_2_LEVEL_2 0x2B7 +#define MADERA_MIC_DETECT_2_LEVEL_3 0x2B8 +#define MADERA_MIC_DETECT_2_LEVEL_4 0x2B9 +#define MADERA_MIC_DETECT_2_CONTROL_4 0x2BB +#define MADERA_MICD_CLAMP_CONTROL 0x2C6 +#define MADERA_GP_SWITCH_1 0x2C8 +#define MADERA_JACK_DETECT_ANALOGUE 0x2D3 +#define MADERA_INPUT_ENABLES 0x300 +#define MADERA_INPUT_ENABLES_STATUS 0x301 +#define MADERA_INPUT_RATE 0x308 +#define MADERA_INPUT_VOLUME_RAMP 0x309 +#define MADERA_HPF_CONTROL 0x30C +#define MADERA_IN1L_CONTROL 0x310 +#define MADERA_ADC_DIGITAL_VOLUME_1L 0x311 +#define MADERA_DMIC1L_CONTROL 0x312 +#define MADERA_IN1L_RATE_CONTROL 0x313 +#define MADERA_IN1R_CONTROL 0x314 +#define MADERA_ADC_DIGITAL_VOLUME_1R 0x315 +#define MADERA_DMIC1R_CONTROL 0x316 +#define MADERA_IN1R_RATE_CONTROL 0x317 +#define MADERA_IN2L_CONTROL 0x318 +#define MADERA_ADC_DIGITAL_VOLUME_2L 0x319 +#define MADERA_DMIC2L_CONTROL 0x31A +#define MADERA_IN2L_RATE_CONTROL 0x31B +#define MADERA_IN2R_CONTROL 0x31C +#define MADERA_ADC_DIGITAL_VOLUME_2R 0x31D +#define MADERA_DMIC2R_CONTROL 0x31E +#define MADERA_IN2R_RATE_CONTROL 0x31F +#define MADERA_IN3L_CONTROL 0x320 +#define MADERA_ADC_DIGITAL_VOLUME_3L 0x321 +#define MADERA_DMIC3L_CONTROL 0x322 +#define MADERA_IN3L_RATE_CONTROL 0x323 +#define MADERA_IN3R_CONTROL 0x324 +#define MADERA_ADC_DIGITAL_VOLUME_3R 0x325 +#define MADERA_DMIC3R_CONTROL 0x326 +#define MADERA_IN3R_RATE_CONTROL 0x327 +#define MADERA_IN4L_CONTROL 0x328 +#define MADERA_ADC_DIGITAL_VOLUME_4L 0x329 +#define MADERA_DMIC4L_CONTROL 0x32A +#define MADERA_IN4L_RATE_CONTROL 0x32B +#define MADERA_IN4R_CONTROL 0x32C +#define MADERA_ADC_DIGITAL_VOLUME_4R 0x32D +#define MADERA_DMIC4R_CONTROL 0x32E +#define MADERA_IN4R_RATE_CONTROL 0x32F +#define MADERA_IN5L_CONTROL 0x330 +#define MADERA_ADC_DIGITAL_VOLUME_5L 0x331 +#define MADERA_DMIC5L_CONTROL 0x332 +#define MADERA_IN5L_RATE_CONTROL 0x333 +#define MADERA_IN5R_CONTROL 0x334 +#define MADERA_ADC_DIGITAL_VOLUME_5R 0x335 +#define MADERA_DMIC5R_CONTROL 0x336 +#define MADERA_IN5R_RATE_CONTROL 0x337 +#define MADERA_IN6L_CONTROL 0x338 +#define MADERA_ADC_DIGITAL_VOLUME_6L 0x339 +#define MADERA_DMIC6L_CONTROL 0x33A +#define MADERA_IN6R_CONTROL 0x33C +#define MADERA_ADC_DIGITAL_VOLUME_6R 0x33D +#define MADERA_DMIC6R_CONTROL 0x33E +#define MADERA_OUTPUT_ENABLES_1 0x400 +#define MADERA_OUTPUT_STATUS_1 0x401 +#define MADERA_RAW_OUTPUT_STATUS_1 0x406 +#define MADERA_OUTPUT_RATE_1 0x408 +#define MADERA_OUTPUT_VOLUME_RAMP 0x409 +#define MADERA_OUTPUT_PATH_CONFIG_1L 0x410 +#define MADERA_DAC_DIGITAL_VOLUME_1L 0x411 +#define MADERA_OUTPUT_PATH_CONFIG_1 0x412 +#define MADERA_NOISE_GATE_SELECT_1L 0x413 +#define MADERA_OUTPUT_PATH_CONFIG_1R 0x414 +#define MADERA_DAC_DIGITAL_VOLUME_1R 0x415 +#define MADERA_NOISE_GATE_SELECT_1R 0x417 +#define MADERA_OUTPUT_PATH_CONFIG_2L 0x418 +#define MADERA_DAC_DIGITAL_VOLUME_2L 0x419 +#define MADERA_OUTPUT_PATH_CONFIG_2 0x41A +#define MADERA_NOISE_GATE_SELECT_2L 0x41B +#define MADERA_OUTPUT_PATH_CONFIG_2R 0x41C +#define MADERA_DAC_DIGITAL_VOLUME_2R 0x41D +#define MADERA_NOISE_GATE_SELECT_2R 0x41F +#define MADERA_OUTPUT_PATH_CONFIG_3L 0x420 +#define MADERA_DAC_DIGITAL_VOLUME_3L 0x421 +#define MADERA_NOISE_GATE_SELECT_3L 0x423 +#define MADERA_OUTPUT_PATH_CONFIG_3R 0x424 +#define MADERA_DAC_DIGITAL_VOLUME_3R 0x425 +#define MADERA_NOISE_GATE_SELECT_3R 0x427 +#define MADERA_OUTPUT_PATH_CONFIG_4L 0x428 +#define MADERA_DAC_DIGITAL_VOLUME_4L 0x429 +#define MADERA_NOISE_GATE_SELECT_4L 0x42B +#define MADERA_OUTPUT_PATH_CONFIG_4R 0x42C +#define MADERA_DAC_DIGITAL_VOLUME_4R 0x42D +#define MADERA_NOISE_GATE_SELECT_4R 0x42F +#define MADERA_OUTPUT_PATH_CONFIG_5L 0x430 +#define MADERA_DAC_DIGITAL_VOLUME_5L 0x431 +#define MADERA_NOISE_GATE_SELECT_5L 0x433 +#define MADERA_OUTPUT_PATH_CONFIG_5R 0x434 +#define MADERA_DAC_DIGITAL_VOLUME_5R 0x435 +#define MADERA_NOISE_GATE_SELECT_5R 0x437 +#define MADERA_OUTPUT_PATH_CONFIG_6L 0x438 +#define MADERA_DAC_DIGITAL_VOLUME_6L 0x439 +#define MADERA_NOISE_GATE_SELECT_6L 0x43B +#define MADERA_OUTPUT_PATH_CONFIG_6R 0x43C +#define MADERA_DAC_DIGITAL_VOLUME_6R 0x43D +#define MADERA_NOISE_GATE_SELECT_6R 0x43F +#define MADERA_DRE_ENABLE 0x440 +#define MADERA_EDRE_ENABLE 0x448 +#define MADERA_EDRE_MANUAL 0x44A +#define MADERA_DAC_AEC_CONTROL_1 0x450 +#define MADERA_DAC_AEC_CONTROL_2 0x451 +#define MADERA_NOISE_GATE_CONTROL 0x458 +#define MADERA_PDM_SPK1_CTRL_1 0x490 +#define MADERA_PDM_SPK1_CTRL_2 0x491 +#define MADERA_PDM_SPK2_CTRL_1 0x492 +#define MADERA_PDM_SPK2_CTRL_2 0x493 +#define MADERA_HP1_SHORT_CIRCUIT_CTRL 0x4A0 +#define MADERA_HP2_SHORT_CIRCUIT_CTRL 0x4A1 +#define MADERA_HP3_SHORT_CIRCUIT_CTRL 0x4A2 +#define MADERA_HP_TEST_CTRL_1 0x4A4 +#define MADERA_HP_TEST_CTRL_5 0x4A8 +#define MADERA_HP_TEST_CTRL_6 0x4A9 +#define MADERA_AIF1_BCLK_CTRL 0x500 +#define MADERA_AIF1_TX_PIN_CTRL 0x501 +#define MADERA_AIF1_RX_PIN_CTRL 0x502 +#define MADERA_AIF1_RATE_CTRL 0x503 +#define MADERA_AIF1_FORMAT 0x504 +#define MADERA_AIF1_RX_BCLK_RATE 0x506 +#define MADERA_AIF1_FRAME_CTRL_1 0x507 +#define MADERA_AIF1_FRAME_CTRL_2 0x508 +#define MADERA_AIF1_FRAME_CTRL_3 0x509 +#define MADERA_AIF1_FRAME_CTRL_4 0x50A +#define MADERA_AIF1_FRAME_CTRL_5 0x50B +#define MADERA_AIF1_FRAME_CTRL_6 0x50C +#define MADERA_AIF1_FRAME_CTRL_7 0x50D +#define MADERA_AIF1_FRAME_CTRL_8 0x50E +#define MADERA_AIF1_FRAME_CTRL_9 0x50F +#define MADERA_AIF1_FRAME_CTRL_10 0x510 +#define MADERA_AIF1_FRAME_CTRL_11 0x511 +#define MADERA_AIF1_FRAME_CTRL_12 0x512 +#define MADERA_AIF1_FRAME_CTRL_13 0x513 +#define MADERA_AIF1_FRAME_CTRL_14 0x514 +#define MADERA_AIF1_FRAME_CTRL_15 0x515 +#define MADERA_AIF1_FRAME_CTRL_16 0x516 +#define MADERA_AIF1_FRAME_CTRL_17 0x517 +#define MADERA_AIF1_FRAME_CTRL_18 0x518 +#define MADERA_AIF1_TX_ENABLES 0x519 +#define MADERA_AIF1_RX_ENABLES 0x51A +#define MADERA_AIF1_FORCE_WRITE 0x51B +#define MADERA_AIF2_BCLK_CTRL 0x540 +#define MADERA_AIF2_TX_PIN_CTRL 0x541 +#define MADERA_AIF2_RX_PIN_CTRL 0x542 +#define MADERA_AIF2_RATE_CTRL 0x543 +#define MADERA_AIF2_FORMAT 0x544 +#define MADERA_AIF2_RX_BCLK_RATE 0x546 +#define MADERA_AIF2_FRAME_CTRL_1 0x547 +#define MADERA_AIF2_FRAME_CTRL_2 0x548 +#define MADERA_AIF2_FRAME_CTRL_3 0x549 +#define MADERA_AIF2_FRAME_CTRL_4 0x54A +#define MADERA_AIF2_FRAME_CTRL_5 0x54B +#define MADERA_AIF2_FRAME_CTRL_6 0x54C +#define MADERA_AIF2_FRAME_CTRL_7 0x54D +#define MADERA_AIF2_FRAME_CTRL_8 0x54E +#define MADERA_AIF2_FRAME_CTRL_9 0x54F +#define MADERA_AIF2_FRAME_CTRL_10 0x550 +#define MADERA_AIF2_FRAME_CTRL_11 0x551 +#define MADERA_AIF2_FRAME_CTRL_12 0x552 +#define MADERA_AIF2_FRAME_CTRL_13 0x553 +#define MADERA_AIF2_FRAME_CTRL_14 0x554 +#define MADERA_AIF2_FRAME_CTRL_15 0x555 +#define MADERA_AIF2_FRAME_CTRL_16 0x556 +#define MADERA_AIF2_FRAME_CTRL_17 0x557 +#define MADERA_AIF2_FRAME_CTRL_18 0x558 +#define MADERA_AIF2_TX_ENABLES 0x559 +#define MADERA_AIF2_RX_ENABLES 0x55A +#define MADERA_AIF2_FORCE_WRITE 0x55B +#define MADERA_AIF3_BCLK_CTRL 0x580 +#define MADERA_AIF3_TX_PIN_CTRL 0x581 +#define MADERA_AIF3_RX_PIN_CTRL 0x582 +#define MADERA_AIF3_RATE_CTRL 0x583 +#define MADERA_AIF3_FORMAT 0x584 +#define MADERA_AIF3_RX_BCLK_RATE 0x586 +#define MADERA_AIF3_FRAME_CTRL_1 0x587 +#define MADERA_AIF3_FRAME_CTRL_2 0x588 +#define MADERA_AIF3_FRAME_CTRL_3 0x589 +#define MADERA_AIF3_FRAME_CTRL_4 0x58A +#define MADERA_AIF3_FRAME_CTRL_11 0x591 +#define MADERA_AIF3_FRAME_CTRL_12 0x592 +#define MADERA_AIF3_TX_ENABLES 0x599 +#define MADERA_AIF3_RX_ENABLES 0x59A +#define MADERA_AIF3_FORCE_WRITE 0x59B +#define MADERA_AIF4_BCLK_CTRL 0x5A0 +#define MADERA_AIF4_TX_PIN_CTRL 0x5A1 +#define MADERA_AIF4_RX_PIN_CTRL 0x5A2 +#define MADERA_AIF4_RATE_CTRL 0x5A3 +#define MADERA_AIF4_FORMAT 0x5A4 +#define MADERA_AIF4_RX_BCLK_RATE 0x5A6 +#define MADERA_AIF4_FRAME_CTRL_1 0x5A7 +#define MADERA_AIF4_FRAME_CTRL_2 0x5A8 +#define MADERA_AIF4_FRAME_CTRL_3 0x5A9 +#define MADERA_AIF4_FRAME_CTRL_4 0x5AA +#define MADERA_AIF4_FRAME_CTRL_11 0x5B1 +#define MADERA_AIF4_FRAME_CTRL_12 0x5B2 +#define MADERA_AIF4_TX_ENABLES 0x5B9 +#define MADERA_AIF4_RX_ENABLES 0x5BA +#define MADERA_AIF4_FORCE_WRITE 0x5BB +#define MADERA_SPD1_TX_CONTROL 0x5C2 +#define MADERA_SPD1_TX_CHANNEL_STATUS_1 0x5C3 +#define MADERA_SPD1_TX_CHANNEL_STATUS_2 0x5C4 +#define MADERA_SPD1_TX_CHANNEL_STATUS_3 0x5C5 +#define MADERA_SLIMBUS_FRAMER_REF_GEAR 0x5E3 +#define MADERA_SLIMBUS_RATES_1 0x5E5 +#define MADERA_SLIMBUS_RATES_2 0x5E6 +#define MADERA_SLIMBUS_RATES_3 0x5E7 +#define MADERA_SLIMBUS_RATES_4 0x5E8 +#define MADERA_SLIMBUS_RATES_5 0x5E9 +#define MADERA_SLIMBUS_RATES_6 0x5EA +#define MADERA_SLIMBUS_RATES_7 0x5EB +#define MADERA_SLIMBUS_RATES_8 0x5EC +#define MADERA_SLIMBUS_RX_CHANNEL_ENABLE 0x5F5 +#define MADERA_SLIMBUS_TX_CHANNEL_ENABLE 0x5F6 +#define MADERA_SLIMBUS_RX_PORT_STATUS 0x5F7 +#define MADERA_SLIMBUS_TX_PORT_STATUS 0x5F8 +#define MADERA_PWM1MIX_INPUT_1_SOURCE 0x640 +#define MADERA_PWM1MIX_INPUT_1_VOLUME 0x641 +#define MADERA_PWM1MIX_INPUT_2_SOURCE 0x642 +#define MADERA_PWM1MIX_INPUT_2_VOLUME 0x643 +#define MADERA_PWM1MIX_INPUT_3_SOURCE 0x644 +#define MADERA_PWM1MIX_INPUT_3_VOLUME 0x645 +#define MADERA_PWM1MIX_INPUT_4_SOURCE 0x646 +#define MADERA_PWM1MIX_INPUT_4_VOLUME 0x647 +#define MADERA_PWM2MIX_INPUT_1_SOURCE 0x648 +#define MADERA_PWM2MIX_INPUT_1_VOLUME 0x649 +#define MADERA_PWM2MIX_INPUT_2_SOURCE 0x64A +#define MADERA_PWM2MIX_INPUT_2_VOLUME 0x64B +#define MADERA_PWM2MIX_INPUT_3_SOURCE 0x64C +#define MADERA_PWM2MIX_INPUT_3_VOLUME 0x64D +#define MADERA_PWM2MIX_INPUT_4_SOURCE 0x64E +#define MADERA_PWM2MIX_INPUT_4_VOLUME 0x64F +#define MADERA_OUT1LMIX_INPUT_1_SOURCE 0x680 +#define MADERA_OUT1LMIX_INPUT_1_VOLUME 0x681 +#define MADERA_OUT1LMIX_INPUT_2_SOURCE 0x682 +#define MADERA_OUT1LMIX_INPUT_2_VOLUME 0x683 +#define MADERA_OUT1LMIX_INPUT_3_SOURCE 0x684 +#define MADERA_OUT1LMIX_INPUT_3_VOLUME 0x685 +#define MADERA_OUT1LMIX_INPUT_4_SOURCE 0x686 +#define MADERA_OUT1LMIX_INPUT_4_VOLUME 0x687 +#define MADERA_OUT1RMIX_INPUT_1_SOURCE 0x688 +#define MADERA_OUT1RMIX_INPUT_1_VOLUME 0x689 +#define MADERA_OUT1RMIX_INPUT_2_SOURCE 0x68A +#define MADERA_OUT1RMIX_INPUT_2_VOLUME 0x68B +#define MADERA_OUT1RMIX_INPUT_3_SOURCE 0x68C +#define MADERA_OUT1RMIX_INPUT_3_VOLUME 0x68D +#define MADERA_OUT1RMIX_INPUT_4_SOURCE 0x68E +#define MADERA_OUT1RMIX_INPUT_4_VOLUME 0x68F +#define MADERA_OUT2LMIX_INPUT_1_SOURCE 0x690 +#define MADERA_OUT2LMIX_INPUT_1_VOLUME 0x691 +#define MADERA_OUT2LMIX_INPUT_2_SOURCE 0x692 +#define MADERA_OUT2LMIX_INPUT_2_VOLUME 0x693 +#define MADERA_OUT2LMIX_INPUT_3_SOURCE 0x694 +#define MADERA_OUT2LMIX_INPUT_3_VOLUME 0x695 +#define MADERA_OUT2LMIX_INPUT_4_SOURCE 0x696 +#define MADERA_OUT2LMIX_INPUT_4_VOLUME 0x697 +#define MADERA_OUT2RMIX_INPUT_1_SOURCE 0x698 +#define MADERA_OUT2RMIX_INPUT_1_VOLUME 0x699 +#define MADERA_OUT2RMIX_INPUT_2_SOURCE 0x69A +#define MADERA_OUT2RMIX_INPUT_2_VOLUME 0x69B +#define MADERA_OUT2RMIX_INPUT_3_SOURCE 0x69C +#define MADERA_OUT2RMIX_INPUT_3_VOLUME 0x69D +#define MADERA_OUT2RMIX_INPUT_4_SOURCE 0x69E +#define MADERA_OUT2RMIX_INPUT_4_VOLUME 0x69F +#define MADERA_OUT3LMIX_INPUT_1_SOURCE 0x6A0 +#define MADERA_OUT3LMIX_INPUT_1_VOLUME 0x6A1 +#define MADERA_OUT3LMIX_INPUT_2_SOURCE 0x6A2 +#define MADERA_OUT3LMIX_INPUT_2_VOLUME 0x6A3 +#define MADERA_OUT3LMIX_INPUT_3_SOURCE 0x6A4 +#define MADERA_OUT3LMIX_INPUT_3_VOLUME 0x6A5 +#define MADERA_OUT3LMIX_INPUT_4_SOURCE 0x6A6 +#define MADERA_OUT3LMIX_INPUT_4_VOLUME 0x6A7 +#define MADERA_OUT3RMIX_INPUT_1_SOURCE 0x6A8 +#define MADERA_OUT3RMIX_INPUT_1_VOLUME 0x6A9 +#define MADERA_OUT3RMIX_INPUT_2_SOURCE 0x6AA +#define MADERA_OUT3RMIX_INPUT_2_VOLUME 0x6AB +#define MADERA_OUT3RMIX_INPUT_3_SOURCE 0x6AC +#define MADERA_OUT3RMIX_INPUT_3_VOLUME 0x6AD +#define MADERA_OUT3RMIX_INPUT_4_SOURCE 0x6AE +#define MADERA_OUT3RMIX_INPUT_4_VOLUME 0x6AF +#define MADERA_OUT4LMIX_INPUT_1_SOURCE 0x6B0 +#define MADERA_OUT4LMIX_INPUT_1_VOLUME 0x6B1 +#define MADERA_OUT4LMIX_INPUT_2_SOURCE 0x6B2 +#define MADERA_OUT4LMIX_INPUT_2_VOLUME 0x6B3 +#define MADERA_OUT4LMIX_INPUT_3_SOURCE 0x6B4 +#define MADERA_OUT4LMIX_INPUT_3_VOLUME 0x6B5 +#define MADERA_OUT4LMIX_INPUT_4_SOURCE 0x6B6 +#define MADERA_OUT4LMIX_INPUT_4_VOLUME 0x6B7 +#define MADERA_OUT4RMIX_INPUT_1_SOURCE 0x6B8 +#define MADERA_OUT4RMIX_INPUT_1_VOLUME 0x6B9 +#define MADERA_OUT4RMIX_INPUT_2_SOURCE 0x6BA +#define MADERA_OUT4RMIX_INPUT_2_VOLUME 0x6BB +#define MADERA_OUT4RMIX_INPUT_3_SOURCE 0x6BC +#define MADERA_OUT4RMIX_INPUT_3_VOLUME 0x6BD +#define MADERA_OUT4RMIX_INPUT_4_SOURCE 0x6BE +#define MADERA_OUT4RMIX_INPUT_4_VOLUME 0x6BF +#define MADERA_OUT5LMIX_INPUT_1_SOURCE 0x6C0 +#define MADERA_OUT5LMIX_INPUT_1_VOLUME 0x6C1 +#define MADERA_OUT5LMIX_INPUT_2_SOURCE 0x6C2 +#define MADERA_OUT5LMIX_INPUT_2_VOLUME 0x6C3 +#define MADERA_OUT5LMIX_INPUT_3_SOURCE 0x6C4 +#define MADERA_OUT5LMIX_INPUT_3_VOLUME 0x6C5 +#define MADERA_OUT5LMIX_INPUT_4_SOURCE 0x6C6 +#define MADERA_OUT5LMIX_INPUT_4_VOLUME 0x6C7 +#define MADERA_OUT5RMIX_INPUT_1_SOURCE 0x6C8 +#define MADERA_OUT5RMIX_INPUT_1_VOLUME 0x6C9 +#define MADERA_OUT5RMIX_INPUT_2_SOURCE 0x6CA +#define MADERA_OUT5RMIX_INPUT_2_VOLUME 0x6CB +#define MADERA_OUT5RMIX_INPUT_3_SOURCE 0x6CC +#define MADERA_OUT5RMIX_INPUT_3_VOLUME 0x6CD +#define MADERA_OUT5RMIX_INPUT_4_SOURCE 0x6CE +#define MADERA_OUT5RMIX_INPUT_4_VOLUME 0x6CF +#define MADERA_OUT6LMIX_INPUT_1_SOURCE 0x6D0 +#define MADERA_OUT6LMIX_INPUT_1_VOLUME 0x6D1 +#define MADERA_OUT6LMIX_INPUT_2_SOURCE 0x6D2 +#define MADERA_OUT6LMIX_INPUT_2_VOLUME 0x6D3 +#define MADERA_OUT6LMIX_INPUT_3_SOURCE 0x6D4 +#define MADERA_OUT6LMIX_INPUT_3_VOLUME 0x6D5 +#define MADERA_OUT6LMIX_INPUT_4_SOURCE 0x6D6 +#define MADERA_OUT6LMIX_INPUT_4_VOLUME 0x6D7 +#define MADERA_OUT6RMIX_INPUT_1_SOURCE 0x6D8 +#define MADERA_OUT6RMIX_INPUT_1_VOLUME 0x6D9 +#define MADERA_OUT6RMIX_INPUT_2_SOURCE 0x6DA +#define MADERA_OUT6RMIX_INPUT_2_VOLUME 0x6DB +#define MADERA_OUT6RMIX_INPUT_3_SOURCE 0x6DC +#define MADERA_OUT6RMIX_INPUT_3_VOLUME 0x6DD +#define MADERA_OUT6RMIX_INPUT_4_SOURCE 0x6DE +#define MADERA_OUT6RMIX_INPUT_4_VOLUME 0x6DF +#define MADERA_AIF1TX1MIX_INPUT_1_SOURCE 0x700 +#define MADERA_AIF1TX1MIX_INPUT_1_VOLUME 0x701 +#define MADERA_AIF1TX1MIX_INPUT_2_SOURCE 0x702 +#define MADERA_AIF1TX1MIX_INPUT_2_VOLUME 0x703 +#define MADERA_AIF1TX1MIX_INPUT_3_SOURCE 0x704 +#define MADERA_AIF1TX1MIX_INPUT_3_VOLUME 0x705 +#define MADERA_AIF1TX1MIX_INPUT_4_SOURCE 0x706 +#define MADERA_AIF1TX1MIX_INPUT_4_VOLUME 0x707 +#define MADERA_AIF1TX2MIX_INPUT_1_SOURCE 0x708 +#define MADERA_AIF1TX2MIX_INPUT_1_VOLUME 0x709 +#define MADERA_AIF1TX2MIX_INPUT_2_SOURCE 0x70A +#define MADERA_AIF1TX2MIX_INPUT_2_VOLUME 0x70B +#define MADERA_AIF1TX2MIX_INPUT_3_SOURCE 0x70C +#define MADERA_AIF1TX2MIX_INPUT_3_VOLUME 0x70D +#define MADERA_AIF1TX2MIX_INPUT_4_SOURCE 0x70E +#define MADERA_AIF1TX2MIX_INPUT_4_VOLUME 0x70F +#define MADERA_AIF1TX3MIX_INPUT_1_SOURCE 0x710 +#define MADERA_AIF1TX3MIX_INPUT_1_VOLUME 0x711 +#define MADERA_AIF1TX3MIX_INPUT_2_SOURCE 0x712 +#define MADERA_AIF1TX3MIX_INPUT_2_VOLUME 0x713 +#define MADERA_AIF1TX3MIX_INPUT_3_SOURCE 0x714 +#define MADERA_AIF1TX3MIX_INPUT_3_VOLUME 0x715 +#define MADERA_AIF1TX3MIX_INPUT_4_SOURCE 0x716 +#define MADERA_AIF1TX3MIX_INPUT_4_VOLUME 0x717 +#define MADERA_AIF1TX4MIX_INPUT_1_SOURCE 0x718 +#define MADERA_AIF1TX4MIX_INPUT_1_VOLUME 0x719 +#define MADERA_AIF1TX4MIX_INPUT_2_SOURCE 0x71A +#define MADERA_AIF1TX4MIX_INPUT_2_VOLUME 0x71B +#define MADERA_AIF1TX4MIX_INPUT_3_SOURCE 0x71C +#define MADERA_AIF1TX4MIX_INPUT_3_VOLUME 0x71D +#define MADERA_AIF1TX4MIX_INPUT_4_SOURCE 0x71E +#define MADERA_AIF1TX4MIX_INPUT_4_VOLUME 0x71F +#define MADERA_AIF1TX5MIX_INPUT_1_SOURCE 0x720 +#define MADERA_AIF1TX5MIX_INPUT_1_VOLUME 0x721 +#define MADERA_AIF1TX5MIX_INPUT_2_SOURCE 0x722 +#define MADERA_AIF1TX5MIX_INPUT_2_VOLUME 0x723 +#define MADERA_AIF1TX5MIX_INPUT_3_SOURCE 0x724 +#define MADERA_AIF1TX5MIX_INPUT_3_VOLUME 0x725 +#define MADERA_AIF1TX5MIX_INPUT_4_SOURCE 0x726 +#define MADERA_AIF1TX5MIX_INPUT_4_VOLUME 0x727 +#define MADERA_AIF1TX6MIX_INPUT_1_SOURCE 0x728 +#define MADERA_AIF1TX6MIX_INPUT_1_VOLUME 0x729 +#define MADERA_AIF1TX6MIX_INPUT_2_SOURCE 0x72A +#define MADERA_AIF1TX6MIX_INPUT_2_VOLUME 0x72B +#define MADERA_AIF1TX6MIX_INPUT_3_SOURCE 0x72C +#define MADERA_AIF1TX6MIX_INPUT_3_VOLUME 0x72D +#define MADERA_AIF1TX6MIX_INPUT_4_SOURCE 0x72E +#define MADERA_AIF1TX6MIX_INPUT_4_VOLUME 0x72F +#define MADERA_AIF1TX7MIX_INPUT_1_SOURCE 0x730 +#define MADERA_AIF1TX7MIX_INPUT_1_VOLUME 0x731 +#define MADERA_AIF1TX7MIX_INPUT_2_SOURCE 0x732 +#define MADERA_AIF1TX7MIX_INPUT_2_VOLUME 0x733 +#define MADERA_AIF1TX7MIX_INPUT_3_SOURCE 0x734 +#define MADERA_AIF1TX7MIX_INPUT_3_VOLUME 0x735 +#define MADERA_AIF1TX7MIX_INPUT_4_SOURCE 0x736 +#define MADERA_AIF1TX7MIX_INPUT_4_VOLUME 0x737 +#define MADERA_AIF1TX8MIX_INPUT_1_SOURCE 0x738 +#define MADERA_AIF1TX8MIX_INPUT_1_VOLUME 0x739 +#define MADERA_AIF1TX8MIX_INPUT_2_SOURCE 0x73A +#define MADERA_AIF1TX8MIX_INPUT_2_VOLUME 0x73B +#define MADERA_AIF1TX8MIX_INPUT_3_SOURCE 0x73C +#define MADERA_AIF1TX8MIX_INPUT_3_VOLUME 0x73D +#define MADERA_AIF1TX8MIX_INPUT_4_SOURCE 0x73E +#define MADERA_AIF1TX8MIX_INPUT_4_VOLUME 0x73F +#define MADERA_AIF2TX1MIX_INPUT_1_SOURCE 0x740 +#define MADERA_AIF2TX1MIX_INPUT_1_VOLUME 0x741 +#define MADERA_AIF2TX1MIX_INPUT_2_SOURCE 0x742 +#define MADERA_AIF2TX1MIX_INPUT_2_VOLUME 0x743 +#define MADERA_AIF2TX1MIX_INPUT_3_SOURCE 0x744 +#define MADERA_AIF2TX1MIX_INPUT_3_VOLUME 0x745 +#define MADERA_AIF2TX1MIX_INPUT_4_SOURCE 0x746 +#define MADERA_AIF2TX1MIX_INPUT_4_VOLUME 0x747 +#define MADERA_AIF2TX2MIX_INPUT_1_SOURCE 0x748 +#define MADERA_AIF2TX2MIX_INPUT_1_VOLUME 0x749 +#define MADERA_AIF2TX2MIX_INPUT_2_SOURCE 0x74A +#define MADERA_AIF2TX2MIX_INPUT_2_VOLUME 0x74B +#define MADERA_AIF2TX2MIX_INPUT_3_SOURCE 0x74C +#define MADERA_AIF2TX2MIX_INPUT_3_VOLUME 0x74D +#define MADERA_AIF2TX2MIX_INPUT_4_SOURCE 0x74E +#define MADERA_AIF2TX2MIX_INPUT_4_VOLUME 0x74F +#define MADERA_AIF2TX3MIX_INPUT_1_SOURCE 0x750 +#define MADERA_AIF2TX3MIX_INPUT_1_VOLUME 0x751 +#define MADERA_AIF2TX3MIX_INPUT_2_SOURCE 0x752 +#define MADERA_AIF2TX3MIX_INPUT_2_VOLUME 0x753 +#define MADERA_AIF2TX3MIX_INPUT_3_SOURCE 0x754 +#define MADERA_AIF2TX3MIX_INPUT_3_VOLUME 0x755 +#define MADERA_AIF2TX3MIX_INPUT_4_SOURCE 0x756 +#define MADERA_AIF2TX3MIX_INPUT_4_VOLUME 0x757 +#define MADERA_AIF2TX4MIX_INPUT_1_SOURCE 0x758 +#define MADERA_AIF2TX4MIX_INPUT_1_VOLUME 0x759 +#define MADERA_AIF2TX4MIX_INPUT_2_SOURCE 0x75A +#define MADERA_AIF2TX4MIX_INPUT_2_VOLUME 0x75B +#define MADERA_AIF2TX4MIX_INPUT_3_SOURCE 0x75C +#define MADERA_AIF2TX4MIX_INPUT_3_VOLUME 0x75D +#define MADERA_AIF2TX4MIX_INPUT_4_SOURCE 0x75E +#define MADERA_AIF2TX4MIX_INPUT_4_VOLUME 0x75F +#define MADERA_AIF2TX5MIX_INPUT_1_SOURCE 0x760 +#define MADERA_AIF2TX5MIX_INPUT_1_VOLUME 0x761 +#define MADERA_AIF2TX5MIX_INPUT_2_SOURCE 0x762 +#define MADERA_AIF2TX5MIX_INPUT_2_VOLUME 0x763 +#define MADERA_AIF2TX5MIX_INPUT_3_SOURCE 0x764 +#define MADERA_AIF2TX5MIX_INPUT_3_VOLUME 0x765 +#define MADERA_AIF2TX5MIX_INPUT_4_SOURCE 0x766 +#define MADERA_AIF2TX5MIX_INPUT_4_VOLUME 0x767 +#define MADERA_AIF2TX6MIX_INPUT_1_SOURCE 0x768 +#define MADERA_AIF2TX6MIX_INPUT_1_VOLUME 0x769 +#define MADERA_AIF2TX6MIX_INPUT_2_SOURCE 0x76A +#define MADERA_AIF2TX6MIX_INPUT_2_VOLUME 0x76B +#define MADERA_AIF2TX6MIX_INPUT_3_SOURCE 0x76C +#define MADERA_AIF2TX6MIX_INPUT_3_VOLUME 0x76D +#define MADERA_AIF2TX6MIX_INPUT_4_SOURCE 0x76E +#define MADERA_AIF2TX6MIX_INPUT_4_VOLUME 0x76F +#define MADERA_AIF2TX7MIX_INPUT_1_SOURCE 0x770 +#define MADERA_AIF2TX7MIX_INPUT_1_VOLUME 0x771 +#define MADERA_AIF2TX7MIX_INPUT_2_SOURCE 0x772 +#define MADERA_AIF2TX7MIX_INPUT_2_VOLUME 0x773 +#define MADERA_AIF2TX7MIX_INPUT_3_SOURCE 0x774 +#define MADERA_AIF2TX7MIX_INPUT_3_VOLUME 0x775 +#define MADERA_AIF2TX7MIX_INPUT_4_SOURCE 0x776 +#define MADERA_AIF2TX7MIX_INPUT_4_VOLUME 0x777 +#define MADERA_AIF2TX8MIX_INPUT_1_SOURCE 0x778 +#define MADERA_AIF2TX8MIX_INPUT_1_VOLUME 0x779 +#define MADERA_AIF2TX8MIX_INPUT_2_SOURCE 0x77A +#define MADERA_AIF2TX8MIX_INPUT_2_VOLUME 0x77B +#define MADERA_AIF2TX8MIX_INPUT_3_SOURCE 0x77C +#define MADERA_AIF2TX8MIX_INPUT_3_VOLUME 0x77D +#define MADERA_AIF2TX8MIX_INPUT_4_SOURCE 0x77E +#define MADERA_AIF2TX8MIX_INPUT_4_VOLUME 0x77F +#define MADERA_AIF3TX1MIX_INPUT_1_SOURCE 0x780 +#define MADERA_AIF3TX1MIX_INPUT_1_VOLUME 0x781 +#define MADERA_AIF3TX1MIX_INPUT_2_SOURCE 0x782 +#define MADERA_AIF3TX1MIX_INPUT_2_VOLUME 0x783 +#define MADERA_AIF3TX1MIX_INPUT_3_SOURCE 0x784 +#define MADERA_AIF3TX1MIX_INPUT_3_VOLUME 0x785 +#define MADERA_AIF3TX1MIX_INPUT_4_SOURCE 0x786 +#define MADERA_AIF3TX1MIX_INPUT_4_VOLUME 0x787 +#define MADERA_AIF3TX2MIX_INPUT_1_SOURCE 0x788 +#define MADERA_AIF3TX2MIX_INPUT_1_VOLUME 0x789 +#define MADERA_AIF3TX2MIX_INPUT_2_SOURCE 0x78A +#define MADERA_AIF3TX2MIX_INPUT_2_VOLUME 0x78B +#define MADERA_AIF3TX2MIX_INPUT_3_SOURCE 0x78C +#define MADERA_AIF3TX2MIX_INPUT_3_VOLUME 0x78D +#define MADERA_AIF3TX2MIX_INPUT_4_SOURCE 0x78E +#define MADERA_AIF3TX2MIX_INPUT_4_VOLUME 0x78F +#define MADERA_AIF4TX1MIX_INPUT_1_SOURCE 0x7A0 +#define MADERA_AIF4TX1MIX_INPUT_1_VOLUME 0x7A1 +#define MADERA_AIF4TX1MIX_INPUT_2_SOURCE 0x7A2 +#define MADERA_AIF4TX1MIX_INPUT_2_VOLUME 0x7A3 +#define MADERA_AIF4TX1MIX_INPUT_3_SOURCE 0x7A4 +#define MADERA_AIF4TX1MIX_INPUT_3_VOLUME 0x7A5 +#define MADERA_AIF4TX1MIX_INPUT_4_SOURCE 0x7A6 +#define MADERA_AIF4TX1MIX_INPUT_4_VOLUME 0x7A7 +#define MADERA_AIF4TX2MIX_INPUT_1_SOURCE 0x7A8 +#define MADERA_AIF4TX2MIX_INPUT_1_VOLUME 0x7A9 +#define MADERA_AIF4TX2MIX_INPUT_2_SOURCE 0x7AA +#define MADERA_AIF4TX2MIX_INPUT_2_VOLUME 0x7AB +#define MADERA_AIF4TX2MIX_INPUT_3_SOURCE 0x7AC +#define MADERA_AIF4TX2MIX_INPUT_3_VOLUME 0x7AD +#define MADERA_AIF4TX2MIX_INPUT_4_SOURCE 0x7AE +#define MADERA_AIF4TX2MIX_INPUT_4_VOLUME 0x7AF +#define MADERA_SLIMTX1MIX_INPUT_1_SOURCE 0x7C0 +#define MADERA_SLIMTX1MIX_INPUT_1_VOLUME 0x7C1 +#define MADERA_SLIMTX1MIX_INPUT_2_SOURCE 0x7C2 +#define MADERA_SLIMTX1MIX_INPUT_2_VOLUME 0x7C3 +#define MADERA_SLIMTX1MIX_INPUT_3_SOURCE 0x7C4 +#define MADERA_SLIMTX1MIX_INPUT_3_VOLUME 0x7C5 +#define MADERA_SLIMTX1MIX_INPUT_4_SOURCE 0x7C6 +#define MADERA_SLIMTX1MIX_INPUT_4_VOLUME 0x7C7 +#define MADERA_SLIMTX2MIX_INPUT_1_SOURCE 0x7C8 +#define MADERA_SLIMTX2MIX_INPUT_1_VOLUME 0x7C9 +#define MADERA_SLIMTX2MIX_INPUT_2_SOURCE 0x7CA +#define MADERA_SLIMTX2MIX_INPUT_2_VOLUME 0x7CB +#define MADERA_SLIMTX2MIX_INPUT_3_SOURCE 0x7CC +#define MADERA_SLIMTX2MIX_INPUT_3_VOLUME 0x7CD +#define MADERA_SLIMTX2MIX_INPUT_4_SOURCE 0x7CE +#define MADERA_SLIMTX2MIX_INPUT_4_VOLUME 0x7CF +#define MADERA_SLIMTX3MIX_INPUT_1_SOURCE 0x7D0 +#define MADERA_SLIMTX3MIX_INPUT_1_VOLUME 0x7D1 +#define MADERA_SLIMTX3MIX_INPUT_2_SOURCE 0x7D2 +#define MADERA_SLIMTX3MIX_INPUT_2_VOLUME 0x7D3 +#define MADERA_SLIMTX3MIX_INPUT_3_SOURCE 0x7D4 +#define MADERA_SLIMTX3MIX_INPUT_3_VOLUME 0x7D5 +#define MADERA_SLIMTX3MIX_INPUT_4_SOURCE 0x7D6 +#define MADERA_SLIMTX3MIX_INPUT_4_VOLUME 0x7D7 +#define MADERA_SLIMTX4MIX_INPUT_1_SOURCE 0x7D8 +#define MADERA_SLIMTX4MIX_INPUT_1_VOLUME 0x7D9 +#define MADERA_SLIMTX4MIX_INPUT_2_SOURCE 0x7DA +#define MADERA_SLIMTX4MIX_INPUT_2_VOLUME 0x7DB +#define MADERA_SLIMTX4MIX_INPUT_3_SOURCE 0x7DC +#define MADERA_SLIMTX4MIX_INPUT_3_VOLUME 0x7DD +#define MADERA_SLIMTX4MIX_INPUT_4_SOURCE 0x7DE +#define MADERA_SLIMTX4MIX_INPUT_4_VOLUME 0x7DF +#define MADERA_SLIMTX5MIX_INPUT_1_SOURCE 0x7E0 +#define MADERA_SLIMTX5MIX_INPUT_1_VOLUME 0x7E1 +#define MADERA_SLIMTX5MIX_INPUT_2_SOURCE 0x7E2 +#define MADERA_SLIMTX5MIX_INPUT_2_VOLUME 0x7E3 +#define MADERA_SLIMTX5MIX_INPUT_3_SOURCE 0x7E4 +#define MADERA_SLIMTX5MIX_INPUT_3_VOLUME 0x7E5 +#define MADERA_SLIMTX5MIX_INPUT_4_SOURCE 0x7E6 +#define MADERA_SLIMTX5MIX_INPUT_4_VOLUME 0x7E7 +#define MADERA_SLIMTX6MIX_INPUT_1_SOURCE 0x7E8 +#define MADERA_SLIMTX6MIX_INPUT_1_VOLUME 0x7E9 +#define MADERA_SLIMTX6MIX_INPUT_2_SOURCE 0x7EA +#define MADERA_SLIMTX6MIX_INPUT_2_VOLUME 0x7EB +#define MADERA_SLIMTX6MIX_INPUT_3_SOURCE 0x7EC +#define MADERA_SLIMTX6MIX_INPUT_3_VOLUME 0x7ED +#define MADERA_SLIMTX6MIX_INPUT_4_SOURCE 0x7EE +#define MADERA_SLIMTX6MIX_INPUT_4_VOLUME 0x7EF +#define MADERA_SLIMTX7MIX_INPUT_1_SOURCE 0x7F0 +#define MADERA_SLIMTX7MIX_INPUT_1_VOLUME 0x7F1 +#define MADERA_SLIMTX7MIX_INPUT_2_SOURCE 0x7F2 +#define MADERA_SLIMTX7MIX_INPUT_2_VOLUME 0x7F3 +#define MADERA_SLIMTX7MIX_INPUT_3_SOURCE 0x7F4 +#define MADERA_SLIMTX7MIX_INPUT_3_VOLUME 0x7F5 +#define MADERA_SLIMTX7MIX_INPUT_4_SOURCE 0x7F6 +#define MADERA_SLIMTX7MIX_INPUT_4_VOLUME 0x7F7 +#define MADERA_SLIMTX8MIX_INPUT_1_SOURCE 0x7F8 +#define MADERA_SLIMTX8MIX_INPUT_1_VOLUME 0x7F9 +#define MADERA_SLIMTX8MIX_INPUT_2_SOURCE 0x7FA +#define MADERA_SLIMTX8MIX_INPUT_2_VOLUME 0x7FB +#define MADERA_SLIMTX8MIX_INPUT_3_SOURCE 0x7FC +#define MADERA_SLIMTX8MIX_INPUT_3_VOLUME 0x7FD +#define MADERA_SLIMTX8MIX_INPUT_4_SOURCE 0x7FE +#define MADERA_SLIMTX8MIX_INPUT_4_VOLUME 0x7FF +#define MADERA_SPDIF1TX1MIX_INPUT_1_SOURCE 0x800 +#define MADERA_SPDIF1TX1MIX_INPUT_1_VOLUME 0x801 +#define MADERA_SPDIF1TX2MIX_INPUT_1_SOURCE 0x808 +#define MADERA_SPDIF1TX2MIX_INPUT_1_VOLUME 0x809 +#define MADERA_EQ1MIX_INPUT_1_SOURCE 0x880 +#define MADERA_EQ1MIX_INPUT_1_VOLUME 0x881 +#define MADERA_EQ1MIX_INPUT_2_SOURCE 0x882 +#define MADERA_EQ1MIX_INPUT_2_VOLUME 0x883 +#define MADERA_EQ1MIX_INPUT_3_SOURCE 0x884 +#define MADERA_EQ1MIX_INPUT_3_VOLUME 0x885 +#define MADERA_EQ1MIX_INPUT_4_SOURCE 0x886 +#define MADERA_EQ1MIX_INPUT_4_VOLUME 0x887 +#define MADERA_EQ2MIX_INPUT_1_SOURCE 0x888 +#define MADERA_EQ2MIX_INPUT_1_VOLUME 0x889 +#define MADERA_EQ2MIX_INPUT_2_SOURCE 0x88A +#define MADERA_EQ2MIX_INPUT_2_VOLUME 0x88B +#define MADERA_EQ2MIX_INPUT_3_SOURCE 0x88C +#define MADERA_EQ2MIX_INPUT_3_VOLUME 0x88D +#define MADERA_EQ2MIX_INPUT_4_SOURCE 0x88E +#define MADERA_EQ2MIX_INPUT_4_VOLUME 0x88F +#define MADERA_EQ3MIX_INPUT_1_SOURCE 0x890 +#define MADERA_EQ3MIX_INPUT_1_VOLUME 0x891 +#define MADERA_EQ3MIX_INPUT_2_SOURCE 0x892 +#define MADERA_EQ3MIX_INPUT_2_VOLUME 0x893 +#define MADERA_EQ3MIX_INPUT_3_SOURCE 0x894 +#define MADERA_EQ3MIX_INPUT_3_VOLUME 0x895 +#define MADERA_EQ3MIX_INPUT_4_SOURCE 0x896 +#define MADERA_EQ3MIX_INPUT_4_VOLUME 0x897 +#define MADERA_EQ4MIX_INPUT_1_SOURCE 0x898 +#define MADERA_EQ4MIX_INPUT_1_VOLUME 0x899 +#define MADERA_EQ4MIX_INPUT_2_SOURCE 0x89A +#define MADERA_EQ4MIX_INPUT_2_VOLUME 0x89B +#define MADERA_EQ4MIX_INPUT_3_SOURCE 0x89C +#define MADERA_EQ4MIX_INPUT_3_VOLUME 0x89D +#define MADERA_EQ4MIX_INPUT_4_SOURCE 0x89E +#define MADERA_EQ4MIX_INPUT_4_VOLUME 0x89F +#define MADERA_DRC1LMIX_INPUT_1_SOURCE 0x8C0 +#define MADERA_DRC1LMIX_INPUT_1_VOLUME 0x8C1 +#define MADERA_DRC1LMIX_INPUT_2_SOURCE 0x8C2 +#define MADERA_DRC1LMIX_INPUT_2_VOLUME 0x8C3 +#define MADERA_DRC1LMIX_INPUT_3_SOURCE 0x8C4 +#define MADERA_DRC1LMIX_INPUT_3_VOLUME 0x8C5 +#define MADERA_DRC1LMIX_INPUT_4_SOURCE 0x8C6 +#define MADERA_DRC1LMIX_INPUT_4_VOLUME 0x8C7 +#define MADERA_DRC1RMIX_INPUT_1_SOURCE 0x8C8 +#define MADERA_DRC1RMIX_INPUT_1_VOLUME 0x8C9 +#define MADERA_DRC1RMIX_INPUT_2_SOURCE 0x8CA +#define MADERA_DRC1RMIX_INPUT_2_VOLUME 0x8CB +#define MADERA_DRC1RMIX_INPUT_3_SOURCE 0x8CC +#define MADERA_DRC1RMIX_INPUT_3_VOLUME 0x8CD +#define MADERA_DRC1RMIX_INPUT_4_SOURCE 0x8CE +#define MADERA_DRC1RMIX_INPUT_4_VOLUME 0x8CF +#define MADERA_DRC2LMIX_INPUT_1_SOURCE 0x8D0 +#define MADERA_DRC2LMIX_INPUT_1_VOLUME 0x8D1 +#define MADERA_DRC2LMIX_INPUT_2_SOURCE 0x8D2 +#define MADERA_DRC2LMIX_INPUT_2_VOLUME 0x8D3 +#define MADERA_DRC2LMIX_INPUT_3_SOURCE 0x8D4 +#define MADERA_DRC2LMIX_INPUT_3_VOLUME 0x8D5 +#define MADERA_DRC2LMIX_INPUT_4_SOURCE 0x8D6 +#define MADERA_DRC2LMIX_INPUT_4_VOLUME 0x8D7 +#define MADERA_DRC2RMIX_INPUT_1_SOURCE 0x8D8 +#define MADERA_DRC2RMIX_INPUT_1_VOLUME 0x8D9 +#define MADERA_DRC2RMIX_INPUT_2_SOURCE 0x8DA +#define MADERA_DRC2RMIX_INPUT_2_VOLUME 0x8DB +#define MADERA_DRC2RMIX_INPUT_3_SOURCE 0x8DC +#define MADERA_DRC2RMIX_INPUT_3_VOLUME 0x8DD +#define MADERA_DRC2RMIX_INPUT_4_SOURCE 0x8DE +#define MADERA_DRC2RMIX_INPUT_4_VOLUME 0x8DF +#define MADERA_HPLP1MIX_INPUT_1_SOURCE 0x900 +#define MADERA_HPLP1MIX_INPUT_1_VOLUME 0x901 +#define MADERA_HPLP1MIX_INPUT_2_SOURCE 0x902 +#define MADERA_HPLP1MIX_INPUT_2_VOLUME 0x903 +#define MADERA_HPLP1MIX_INPUT_3_SOURCE 0x904 +#define MADERA_HPLP1MIX_INPUT_3_VOLUME 0x905 +#define MADERA_HPLP1MIX_INPUT_4_SOURCE 0x906 +#define MADERA_HPLP1MIX_INPUT_4_VOLUME 0x907 +#define MADERA_HPLP2MIX_INPUT_1_SOURCE 0x908 +#define MADERA_HPLP2MIX_INPUT_1_VOLUME 0x909 +#define MADERA_HPLP2MIX_INPUT_2_SOURCE 0x90A +#define MADERA_HPLP2MIX_INPUT_2_VOLUME 0x90B +#define MADERA_HPLP2MIX_INPUT_3_SOURCE 0x90C +#define MADERA_HPLP2MIX_INPUT_3_VOLUME 0x90D +#define MADERA_HPLP2MIX_INPUT_4_SOURCE 0x90E +#define MADERA_HPLP2MIX_INPUT_4_VOLUME 0x90F +#define MADERA_HPLP3MIX_INPUT_1_SOURCE 0x910 +#define MADERA_HPLP3MIX_INPUT_1_VOLUME 0x911 +#define MADERA_HPLP3MIX_INPUT_2_SOURCE 0x912 +#define MADERA_HPLP3MIX_INPUT_2_VOLUME 0x913 +#define MADERA_HPLP3MIX_INPUT_3_SOURCE 0x914 +#define MADERA_HPLP3MIX_INPUT_3_VOLUME 0x915 +#define MADERA_HPLP3MIX_INPUT_4_SOURCE 0x916 +#define MADERA_HPLP3MIX_INPUT_4_VOLUME 0x917 +#define MADERA_HPLP4MIX_INPUT_1_SOURCE 0x918 +#define MADERA_HPLP4MIX_INPUT_1_VOLUME 0x919 +#define MADERA_HPLP4MIX_INPUT_2_SOURCE 0x91A +#define MADERA_HPLP4MIX_INPUT_2_VOLUME 0x91B +#define MADERA_HPLP4MIX_INPUT_3_SOURCE 0x91C +#define MADERA_HPLP4MIX_INPUT_3_VOLUME 0x91D +#define MADERA_HPLP4MIX_INPUT_4_SOURCE 0x91E +#define MADERA_HPLP4MIX_INPUT_4_VOLUME 0x91F +#define MADERA_DSP1LMIX_INPUT_1_SOURCE 0x940 +#define MADERA_DSP1LMIX_INPUT_1_VOLUME 0x941 +#define MADERA_DSP1LMIX_INPUT_2_SOURCE 0x942 +#define MADERA_DSP1LMIX_INPUT_2_VOLUME 0x943 +#define MADERA_DSP1LMIX_INPUT_3_SOURCE 0x944 +#define MADERA_DSP1LMIX_INPUT_3_VOLUME 0x945 +#define MADERA_DSP1LMIX_INPUT_4_SOURCE 0x946 +#define MADERA_DSP1LMIX_INPUT_4_VOLUME 0x947 +#define MADERA_DSP1RMIX_INPUT_1_SOURCE 0x948 +#define MADERA_DSP1RMIX_INPUT_1_VOLUME 0x949 +#define MADERA_DSP1RMIX_INPUT_2_SOURCE 0x94A +#define MADERA_DSP1RMIX_INPUT_2_VOLUME 0x94B +#define MADERA_DSP1RMIX_INPUT_3_SOURCE 0x94C +#define MADERA_DSP1RMIX_INPUT_3_VOLUME 0x94D +#define MADERA_DSP1RMIX_INPUT_4_SOURCE 0x94E +#define MADERA_DSP1RMIX_INPUT_4_VOLUME 0x94F +#define MADERA_DSP1AUX1MIX_INPUT_1_SOURCE 0x950 +#define MADERA_DSP1AUX2MIX_INPUT_1_SOURCE 0x958 +#define MADERA_DSP1AUX3MIX_INPUT_1_SOURCE 0x960 +#define MADERA_DSP1AUX4MIX_INPUT_1_SOURCE 0x968 +#define MADERA_DSP1AUX5MIX_INPUT_1_SOURCE 0x970 +#define MADERA_DSP1AUX6MIX_INPUT_1_SOURCE 0x978 +#define MADERA_DSP2LMIX_INPUT_1_SOURCE 0x980 +#define MADERA_DSP2LMIX_INPUT_1_VOLUME 0x981 +#define MADERA_DSP2LMIX_INPUT_2_SOURCE 0x982 +#define MADERA_DSP2LMIX_INPUT_2_VOLUME 0x983 +#define MADERA_DSP2LMIX_INPUT_3_SOURCE 0x984 +#define MADERA_DSP2LMIX_INPUT_3_VOLUME 0x985 +#define MADERA_DSP2LMIX_INPUT_4_SOURCE 0x986 +#define MADERA_DSP2LMIX_INPUT_4_VOLUME 0x987 +#define MADERA_DSP2RMIX_INPUT_1_SOURCE 0x988 +#define MADERA_DSP2RMIX_INPUT_1_VOLUME 0x989 +#define MADERA_DSP2RMIX_INPUT_2_SOURCE 0x98A +#define MADERA_DSP2RMIX_INPUT_2_VOLUME 0x98B +#define MADERA_DSP2RMIX_INPUT_3_SOURCE 0x98C +#define MADERA_DSP2RMIX_INPUT_3_VOLUME 0x98D +#define MADERA_DSP2RMIX_INPUT_4_SOURCE 0x98E +#define MADERA_DSP2RMIX_INPUT_4_VOLUME 0x98F +#define MADERA_DSP2AUX1MIX_INPUT_1_SOURCE 0x990 +#define MADERA_DSP2AUX2MIX_INPUT_1_SOURCE 0x998 +#define MADERA_DSP2AUX3MIX_INPUT_1_SOURCE 0x9A0 +#define MADERA_DSP2AUX4MIX_INPUT_1_SOURCE 0x9A8 +#define MADERA_DSP2AUX5MIX_INPUT_1_SOURCE 0x9B0 +#define MADERA_DSP2AUX6MIX_INPUT_1_SOURCE 0x9B8 +#define MADERA_DSP3LMIX_INPUT_1_SOURCE 0x9C0 +#define MADERA_DSP3LMIX_INPUT_1_VOLUME 0x9C1 +#define MADERA_DSP3LMIX_INPUT_2_SOURCE 0x9C2 +#define MADERA_DSP3LMIX_INPUT_2_VOLUME 0x9C3 +#define MADERA_DSP3LMIX_INPUT_3_SOURCE 0x9C4 +#define MADERA_DSP3LMIX_INPUT_3_VOLUME 0x9C5 +#define MADERA_DSP3LMIX_INPUT_4_SOURCE 0x9C6 +#define MADERA_DSP3LMIX_INPUT_4_VOLUME 0x9C7 +#define MADERA_DSP3RMIX_INPUT_1_SOURCE 0x9C8 +#define MADERA_DSP3RMIX_INPUT_1_VOLUME 0x9C9 +#define MADERA_DSP3RMIX_INPUT_2_SOURCE 0x9CA +#define MADERA_DSP3RMIX_INPUT_2_VOLUME 0x9CB +#define MADERA_DSP3RMIX_INPUT_3_SOURCE 0x9CC +#define MADERA_DSP3RMIX_INPUT_3_VOLUME 0x9CD +#define MADERA_DSP3RMIX_INPUT_4_SOURCE 0x9CE +#define MADERA_DSP3RMIX_INPUT_4_VOLUME 0x9CF +#define MADERA_DSP3AUX1MIX_INPUT_1_SOURCE 0x9D0 +#define MADERA_DSP3AUX2MIX_INPUT_1_SOURCE 0x9D8 +#define MADERA_DSP3AUX3MIX_INPUT_1_SOURCE 0x9E0 +#define MADERA_DSP3AUX4MIX_INPUT_1_SOURCE 0x9E8 +#define MADERA_DSP3AUX5MIX_INPUT_1_SOURCE 0x9F0 +#define MADERA_DSP3AUX6MIX_INPUT_1_SOURCE 0x9F8 +#define MADERA_DSP4LMIX_INPUT_1_SOURCE 0xA00 +#define MADERA_DSP4LMIX_INPUT_1_VOLUME 0xA01 +#define MADERA_DSP4LMIX_INPUT_2_SOURCE 0xA02 +#define MADERA_DSP4LMIX_INPUT_2_VOLUME 0xA03 +#define MADERA_DSP4LMIX_INPUT_3_SOURCE 0xA04 +#define MADERA_DSP4LMIX_INPUT_3_VOLUME 0xA05 +#define MADERA_DSP4LMIX_INPUT_4_SOURCE 0xA06 +#define MADERA_DSP4LMIX_INPUT_4_VOLUME 0xA07 +#define MADERA_DSP4RMIX_INPUT_1_SOURCE 0xA08 +#define MADERA_DSP4RMIX_INPUT_1_VOLUME 0xA09 +#define MADERA_DSP4RMIX_INPUT_2_SOURCE 0xA0A +#define MADERA_DSP4RMIX_INPUT_2_VOLUME 0xA0B +#define MADERA_DSP4RMIX_INPUT_3_SOURCE 0xA0C +#define MADERA_DSP4RMIX_INPUT_3_VOLUME 0xA0D +#define MADERA_DSP4RMIX_INPUT_4_SOURCE 0xA0E +#define MADERA_DSP4RMIX_INPUT_4_VOLUME 0xA0F +#define MADERA_DSP4AUX1MIX_INPUT_1_SOURCE 0xA10 +#define MADERA_DSP4AUX2MIX_INPUT_1_SOURCE 0xA18 +#define MADERA_DSP4AUX3MIX_INPUT_1_SOURCE 0xA20 +#define MADERA_DSP4AUX4MIX_INPUT_1_SOURCE 0xA28 +#define MADERA_DSP4AUX5MIX_INPUT_1_SOURCE 0xA30 +#define MADERA_DSP4AUX6MIX_INPUT_1_SOURCE 0xA38 +#define MADERA_DSP5LMIX_INPUT_1_SOURCE 0xA40 +#define MADERA_DSP5LMIX_INPUT_1_VOLUME 0xA41 +#define MADERA_DSP5LMIX_INPUT_2_SOURCE 0xA42 +#define MADERA_DSP5LMIX_INPUT_2_VOLUME 0xA43 +#define MADERA_DSP5LMIX_INPUT_3_SOURCE 0xA44 +#define MADERA_DSP5LMIX_INPUT_3_VOLUME 0xA45 +#define MADERA_DSP5LMIX_INPUT_4_SOURCE 0xA46 +#define MADERA_DSP5LMIX_INPUT_4_VOLUME 0xA47 +#define MADERA_DSP5RMIX_INPUT_1_SOURCE 0xA48 +#define MADERA_DSP5RMIX_INPUT_1_VOLUME 0xA49 +#define MADERA_DSP5RMIX_INPUT_2_SOURCE 0xA4A +#define MADERA_DSP5RMIX_INPUT_2_VOLUME 0xA4B +#define MADERA_DSP5RMIX_INPUT_3_SOURCE 0xA4C +#define MADERA_DSP5RMIX_INPUT_3_VOLUME 0xA4D +#define MADERA_DSP5RMIX_INPUT_4_SOURCE 0xA4E +#define MADERA_DSP5RMIX_INPUT_4_VOLUME 0xA4F +#define MADERA_DSP5AUX1MIX_INPUT_1_SOURCE 0xA50 +#define MADERA_DSP5AUX2MIX_INPUT_1_SOURCE 0xA58 +#define MADERA_DSP5AUX3MIX_INPUT_1_SOURCE 0xA60 +#define MADERA_DSP5AUX4MIX_INPUT_1_SOURCE 0xA68 +#define MADERA_DSP5AUX5MIX_INPUT_1_SOURCE 0xA70 +#define MADERA_DSP5AUX6MIX_INPUT_1_SOURCE 0xA78 +#define MADERA_ASRC1_1LMIX_INPUT_1_SOURCE 0xA80 +#define MADERA_ASRC1_1RMIX_INPUT_1_SOURCE 0xA88 +#define MADERA_ASRC1_2LMIX_INPUT_1_SOURCE 0xA90 +#define MADERA_ASRC1_2RMIX_INPUT_1_SOURCE 0xA98 +#define MADERA_ASRC2_1LMIX_INPUT_1_SOURCE 0xAA0 +#define MADERA_ASRC2_1RMIX_INPUT_1_SOURCE 0xAA8 +#define MADERA_ASRC2_2LMIX_INPUT_1_SOURCE 0xAB0 +#define MADERA_ASRC2_2RMIX_INPUT_1_SOURCE 0xAB8 +#define MADERA_ISRC1DEC1MIX_INPUT_1_SOURCE 0xB00 +#define MADERA_ISRC1DEC2MIX_INPUT_1_SOURCE 0xB08 +#define MADERA_ISRC1DEC3MIX_INPUT_1_SOURCE 0xB10 +#define MADERA_ISRC1DEC4MIX_INPUT_1_SOURCE 0xB18 +#define MADERA_ISRC1INT1MIX_INPUT_1_SOURCE 0xB20 +#define MADERA_ISRC1INT2MIX_INPUT_1_SOURCE 0xB28 +#define MADERA_ISRC1INT3MIX_INPUT_1_SOURCE 0xB30 +#define MADERA_ISRC1INT4MIX_INPUT_1_SOURCE 0xB38 +#define MADERA_ISRC2DEC1MIX_INPUT_1_SOURCE 0xB40 +#define MADERA_ISRC2DEC2MIX_INPUT_1_SOURCE 0xB48 +#define MADERA_ISRC2DEC3MIX_INPUT_1_SOURCE 0xB50 +#define MADERA_ISRC2DEC4MIX_INPUT_1_SOURCE 0xB58 +#define MADERA_ISRC2INT1MIX_INPUT_1_SOURCE 0xB60 +#define MADERA_ISRC2INT2MIX_INPUT_1_SOURCE 0xB68 +#define MADERA_ISRC2INT3MIX_INPUT_1_SOURCE 0xB70 +#define MADERA_ISRC2INT4MIX_INPUT_1_SOURCE 0xB78 +#define MADERA_ISRC3DEC1MIX_INPUT_1_SOURCE 0xB80 +#define MADERA_ISRC3DEC2MIX_INPUT_1_SOURCE 0xB88 +#define MADERA_ISRC3DEC3MIX_INPUT_1_SOURCE 0xB90 +#define MADERA_ISRC3DEC4MIX_INPUT_1_SOURCE 0xB98 +#define MADERA_ISRC3INT1MIX_INPUT_1_SOURCE 0xBA0 +#define MADERA_ISRC3INT2MIX_INPUT_1_SOURCE 0xBA8 +#define MADERA_ISRC3INT3MIX_INPUT_1_SOURCE 0xBB0 +#define MADERA_ISRC3INT4MIX_INPUT_1_SOURCE 0xBB8 +#define MADERA_ISRC4DEC1MIX_INPUT_1_SOURCE 0xBC0 +#define MADERA_ISRC4DEC2MIX_INPUT_1_SOURCE 0xBC8 +#define MADERA_ISRC4INT1MIX_INPUT_1_SOURCE 0xBE0 +#define MADERA_ISRC4INT2MIX_INPUT_1_SOURCE 0xBE8 +#define MADERA_DSP6LMIX_INPUT_1_SOURCE 0xC00 +#define MADERA_DSP6LMIX_INPUT_1_VOLUME 0xC01 +#define MADERA_DSP6LMIX_INPUT_2_SOURCE 0xC02 +#define MADERA_DSP6LMIX_INPUT_2_VOLUME 0xC03 +#define MADERA_DSP6LMIX_INPUT_3_SOURCE 0xC04 +#define MADERA_DSP6LMIX_INPUT_3_VOLUME 0xC05 +#define MADERA_DSP6LMIX_INPUT_4_SOURCE 0xC06 +#define MADERA_DSP6LMIX_INPUT_4_VOLUME 0xC07 +#define MADERA_DSP6RMIX_INPUT_1_SOURCE 0xC08 +#define MADERA_DSP6RMIX_INPUT_1_VOLUME 0xC09 +#define MADERA_DSP6RMIX_INPUT_2_SOURCE 0xC0A +#define MADERA_DSP6RMIX_INPUT_2_VOLUME 0xC0B +#define MADERA_DSP6RMIX_INPUT_3_SOURCE 0xC0C +#define MADERA_DSP6RMIX_INPUT_3_VOLUME 0xC0D +#define MADERA_DSP6RMIX_INPUT_4_SOURCE 0xC0E +#define MADERA_DSP6RMIX_INPUT_4_VOLUME 0xC0F +#define MADERA_DSP6AUX1MIX_INPUT_1_SOURCE 0xC10 +#define MADERA_DSP6AUX2MIX_INPUT_1_SOURCE 0xC18 +#define MADERA_DSP6AUX3MIX_INPUT_1_SOURCE 0xC20 +#define MADERA_DSP6AUX4MIX_INPUT_1_SOURCE 0xC28 +#define MADERA_DSP6AUX5MIX_INPUT_1_SOURCE 0xC30 +#define MADERA_DSP6AUX6MIX_INPUT_1_SOURCE 0xC38 +#define MADERA_DSP7LMIX_INPUT_1_SOURCE 0xC40 +#define MADERA_DSP7LMIX_INPUT_1_VOLUME 0xC41 +#define MADERA_DSP7LMIX_INPUT_2_SOURCE 0xC42 +#define MADERA_DSP7LMIX_INPUT_2_VOLUME 0xC43 +#define MADERA_DSP7LMIX_INPUT_3_SOURCE 0xC44 +#define MADERA_DSP7LMIX_INPUT_3_VOLUME 0xC45 +#define MADERA_DSP7LMIX_INPUT_4_SOURCE 0xC46 +#define MADERA_DSP7LMIX_INPUT_4_VOLUME 0xC47 +#define MADERA_DSP7RMIX_INPUT_1_SOURCE 0xC48 +#define MADERA_DSP7RMIX_INPUT_1_VOLUME 0xC49 +#define MADERA_DSP7RMIX_INPUT_2_SOURCE 0xC4A +#define MADERA_DSP7RMIX_INPUT_2_VOLUME 0xC4B +#define MADERA_DSP7RMIX_INPUT_3_SOURCE 0xC4C +#define MADERA_DSP7RMIX_INPUT_3_VOLUME 0xC4D +#define MADERA_DSP7RMIX_INPUT_4_SOURCE 0xC4E +#define MADERA_DSP7RMIX_INPUT_4_VOLUME 0xC4F +#define MADERA_DSP7AUX1MIX_INPUT_1_SOURCE 0xC50 +#define MADERA_DSP7AUX2MIX_INPUT_1_SOURCE 0xC58 +#define MADERA_DSP7AUX3MIX_INPUT_1_SOURCE 0xC60 +#define MADERA_DSP7AUX4MIX_INPUT_1_SOURCE 0xC68 +#define MADERA_DSP7AUX5MIX_INPUT_1_SOURCE 0xC70 +#define MADERA_DSP7AUX6MIX_INPUT_1_SOURCE 0xC78 +#define MADERA_DFC1MIX_INPUT_1_SOURCE 0xDC0 +#define MADERA_DFC2MIX_INPUT_1_SOURCE 0xDC8 +#define MADERA_DFC3MIX_INPUT_1_SOURCE 0xDD0 +#define MADERA_DFC4MIX_INPUT_1_SOURCE 0xDD8 +#define MADERA_DFC5MIX_INPUT_1_SOURCE 0xDE0 +#define MADERA_DFC6MIX_INPUT_1_SOURCE 0xDE8 +#define MADERA_DFC7MIX_INPUT_1_SOURCE 0xDF0 +#define MADERA_DFC8MIX_INPUT_1_SOURCE 0xDF8 +#define MADERA_FX_CTRL1 0xE00 +#define MADERA_FX_CTRL2 0xE01 +#define MADERA_EQ1_1 0xE10 +#define MADERA_EQ1_2 0xE11 +#define MADERA_EQ1_21 0xE24 +#define MADERA_EQ2_1 0xE26 +#define MADERA_EQ2_2 0xE27 +#define MADERA_EQ2_21 0xE3A +#define MADERA_EQ3_1 0xE3C +#define MADERA_EQ3_2 0xE3D +#define MADERA_EQ3_21 0xE50 +#define MADERA_EQ4_1 0xE52 +#define MADERA_EQ4_2 0xE53 +#define MADERA_EQ4_21 0xE66 +#define MADERA_DRC1_CTRL1 0xE80 +#define MADERA_DRC1_CTRL2 0xE81 +#define MADERA_DRC1_CTRL3 0xE82 +#define MADERA_DRC1_CTRL4 0xE83 +#define MADERA_DRC1_CTRL5 0xE84 +#define MADERA_DRC2_CTRL1 0xE88 +#define MADERA_DRC2_CTRL2 0xE89 +#define MADERA_DRC2_CTRL3 0xE8A +#define MADERA_DRC2_CTRL4 0xE8B +#define MADERA_DRC2_CTRL5 0xE8C +#define MADERA_HPLPF1_1 0xEC0 +#define MADERA_HPLPF1_2 0xEC1 +#define MADERA_HPLPF2_1 0xEC4 +#define MADERA_HPLPF2_2 0xEC5 +#define MADERA_HPLPF3_1 0xEC8 +#define MADERA_HPLPF3_2 0xEC9 +#define MADERA_HPLPF4_1 0xECC +#define MADERA_HPLPF4_2 0xECD +#define MADERA_ASRC2_ENABLE 0xED0 +#define MADERA_ASRC2_STATUS 0xED1 +#define MADERA_ASRC2_RATE1 0xED2 +#define MADERA_ASRC2_RATE2 0xED3 +#define MADERA_ASRC1_ENABLE 0xEE0 +#define MADERA_ASRC1_STATUS 0xEE1 +#define MADERA_ASRC1_RATE1 0xEE2 +#define MADERA_ASRC1_RATE2 0xEE3 +#define MADERA_ISRC_1_CTRL_1 0xEF0 +#define MADERA_ISRC_1_CTRL_2 0xEF1 +#define MADERA_ISRC_1_CTRL_3 0xEF2 +#define MADERA_ISRC_2_CTRL_1 0xEF3 +#define MADERA_ISRC_2_CTRL_2 0xEF4 +#define MADERA_ISRC_2_CTRL_3 0xEF5 +#define MADERA_ISRC_3_CTRL_1 0xEF6 +#define MADERA_ISRC_3_CTRL_2 0xEF7 +#define MADERA_ISRC_3_CTRL_3 0xEF8 +#define MADERA_ISRC_4_CTRL_1 0xEF9 +#define MADERA_ISRC_4_CTRL_2 0xEFA +#define MADERA_ISRC_4_CTRL_3 0xEFB +#define MADERA_CLOCK_CONTROL 0xF00 +#define MADERA_ANC_SRC 0xF01 +#define MADERA_DSP_STATUS 0xF02 +#define MADERA_ANC_COEFF_START 0xF08 +#define MADERA_ANC_COEFF_END 0xF12 +#define MADERA_FCL_FILTER_CONTROL 0xF15 +#define MADERA_FCL_ADC_REFORMATTER_CONTROL 0xF17 +#define MADERA_FCL_COEFF_START 0xF18 +#define MADERA_FCL_COEFF_END 0xF69 +#define MADERA_FCR_FILTER_CONTROL 0xF71 +#define MADERA_FCR_ADC_REFORMATTER_CONTROL 0xF73 +#define MADERA_FCR_COEFF_START 0xF74 +#define MADERA_FCR_COEFF_END 0xFC5 +#define MADERA_DAC_COMP_1 0x1300 +#define MADERA_DAC_COMP_2 0x1302 +#define MADERA_FRF_COEFFICIENT_1L_1 0x1380 +#define MADERA_FRF_COEFFICIENT_1L_2 0x1381 +#define MADERA_FRF_COEFFICIENT_1L_3 0x1382 +#define MADERA_FRF_COEFFICIENT_1L_4 0x1383 +#define MADERA_FRF_COEFFICIENT_1R_1 0x1390 +#define MADERA_FRF_COEFFICIENT_1R_2 0x1391 +#define MADERA_FRF_COEFFICIENT_1R_3 0x1392 +#define MADERA_FRF_COEFFICIENT_1R_4 0x1393 +#define MADERA_FRF_COEFFICIENT_2L_1 0x13A0 +#define MADERA_FRF_COEFFICIENT_2L_2 0x13A1 +#define MADERA_FRF_COEFFICIENT_2L_3 0x13A2 +#define MADERA_FRF_COEFFICIENT_2L_4 0x13A3 +#define MADERA_FRF_COEFFICIENT_2R_1 0x13B0 +#define MADERA_FRF_COEFFICIENT_2R_2 0x13B1 +#define MADERA_FRF_COEFFICIENT_2R_3 0x13B2 +#define MADERA_FRF_COEFFICIENT_2R_4 0x13B3 +#define MADERA_FRF_COEFFICIENT_3L_1 0x13C0 +#define MADERA_FRF_COEFFICIENT_3L_2 0x13C1 +#define MADERA_FRF_COEFFICIENT_3L_3 0x13C2 +#define MADERA_FRF_COEFFICIENT_3L_4 0x13C3 +#define MADERA_FRF_COEFFICIENT_3R_1 0x13D0 +#define MADERA_FRF_COEFFICIENT_3R_2 0x13D1 +#define MADERA_FRF_COEFFICIENT_3R_3 0x13D2 +#define MADERA_FRF_COEFFICIENT_3R_4 0x13D3 +#define MADERA_FRF_COEFFICIENT_4L_1 0x13E0 +#define MADERA_FRF_COEFFICIENT_4L_2 0x13E1 +#define MADERA_FRF_COEFFICIENT_4L_3 0x13E2 +#define MADERA_FRF_COEFFICIENT_4L_4 0x13E3 +#define MADERA_FRF_COEFFICIENT_4R_1 0x13F0 +#define MADERA_FRF_COEFFICIENT_4R_2 0x13F1 +#define MADERA_FRF_COEFFICIENT_4R_3 0x13F2 +#define MADERA_FRF_COEFFICIENT_4R_4 0x13F3 +#define CS47L35_FRF_COEFFICIENT_4L_1 0x13A0 +#define CS47L35_FRF_COEFFICIENT_4L_2 0x13A1 +#define CS47L35_FRF_COEFFICIENT_4L_3 0x13A2 +#define CS47L35_FRF_COEFFICIENT_4L_4 0x13A3 +#define CS47L35_FRF_COEFFICIENT_5L_1 0x13B0 +#define CS47L35_FRF_COEFFICIENT_5L_2 0x13B1 +#define CS47L35_FRF_COEFFICIENT_5L_3 0x13B2 +#define CS47L35_FRF_COEFFICIENT_5L_4 0x13B3 +#define CS47L35_FRF_COEFFICIENT_5R_1 0x13C0 +#define CS47L35_FRF_COEFFICIENT_5R_2 0x13C1 +#define CS47L35_FRF_COEFFICIENT_5R_3 0x13C2 +#define CS47L35_FRF_COEFFICIENT_5R_4 0x13C3 +#define MADERA_FRF_COEFFICIENT_5L_1 0x1400 +#define MADERA_FRF_COEFFICIENT_5L_2 0x1401 +#define MADERA_FRF_COEFFICIENT_5L_3 0x1402 +#define MADERA_FRF_COEFFICIENT_5L_4 0x1403 +#define MADERA_FRF_COEFFICIENT_5R_1 0x1410 +#define MADERA_FRF_COEFFICIENT_5R_2 0x1411 +#define MADERA_FRF_COEFFICIENT_5R_3 0x1412 +#define MADERA_FRF_COEFFICIENT_5R_4 0x1413 +#define MADERA_FRF_COEFFICIENT_6L_1 0x1420 +#define MADERA_FRF_COEFFICIENT_6L_2 0x1421 +#define MADERA_FRF_COEFFICIENT_6L_3 0x1422 +#define MADERA_FRF_COEFFICIENT_6L_4 0x1423 +#define MADERA_FRF_COEFFICIENT_6R_1 0x1430 +#define MADERA_FRF_COEFFICIENT_6R_2 0x1431 +#define MADERA_FRF_COEFFICIENT_6R_3 0x1432 +#define MADERA_FRF_COEFFICIENT_6R_4 0x1433 +#define MADERA_DFC1_CTRL 0x1480 +#define MADERA_DFC1_RX 0x1482 +#define MADERA_DFC1_TX 0x1484 +#define MADERA_DFC2_CTRL 0x1486 +#define MADERA_DFC2_RX 0x1488 +#define MADERA_DFC2_TX 0x148A +#define MADERA_DFC3_CTRL 0x148C +#define MADERA_DFC3_RX 0x148E +#define MADERA_DFC3_TX 0x1490 +#define MADERA_DFC4_CTRL 0x1492 +#define MADERA_DFC4_RX 0x1494 +#define MADERA_DFC4_TX 0x1496 +#define MADERA_DFC5_CTRL 0x1498 +#define MADERA_DFC5_RX 0x149A +#define MADERA_DFC5_TX 0x149C +#define MADERA_DFC6_CTRL 0x149E +#define MADERA_DFC6_RX 0x14A0 +#define MADERA_DFC6_TX 0x14A2 +#define MADERA_DFC7_CTRL 0x14A4 +#define MADERA_DFC7_RX 0x14A6 +#define MADERA_DFC7_TX 0x14A8 +#define MADERA_DFC8_CTRL 0x14AA +#define MADERA_DFC8_RX 0x14AC +#define MADERA_DFC8_TX 0x14AE +#define MADERA_DFC_STATUS 0x14B6 +#define MADERA_ADSP2_IRQ0 0x1600 +#define MADERA_ADSP2_IRQ1 0x1601 +#define MADERA_ADSP2_IRQ2 0x1602 +#define MADERA_ADSP2_IRQ3 0x1603 +#define MADERA_ADSP2_IRQ4 0x1604 +#define MADERA_ADSP2_IRQ5 0x1605 +#define MADERA_ADSP2_IRQ6 0x1606 +#define MADERA_ADSP2_IRQ7 0x1607 +#define MADERA_GPIO1_CTRL_1 0x1700 +#define MADERA_GPIO1_CTRL_2 0x1701 +#define MADERA_GPIO2_CTRL_1 0x1702 +#define MADERA_GPIO2_CTRL_2 0x1703 +#define MADERA_GPIO16_CTRL_1 0x171E +#define MADERA_GPIO16_CTRL_2 0x171F +#define MADERA_GPIO38_CTRL_1 0x174A +#define MADERA_GPIO38_CTRL_2 0x174B +#define MADERA_GPIO40_CTRL_1 0x174E +#define MADERA_GPIO40_CTRL_2 0x174F +#define MADERA_IRQ1_STATUS_1 0x1800 +#define MADERA_IRQ1_STATUS_2 0x1801 +#define MADERA_IRQ1_STATUS_6 0x1805 +#define MADERA_IRQ1_STATUS_7 0x1806 +#define MADERA_IRQ1_STATUS_9 0x1808 +#define MADERA_IRQ1_STATUS_11 0x180A +#define MADERA_IRQ1_STATUS_12 0x180B +#define MADERA_IRQ1_STATUS_15 0x180E +#define MADERA_IRQ1_STATUS_33 0x1820 +#define MADERA_IRQ1_MASK_1 0x1840 +#define MADERA_IRQ1_MASK_2 0x1841 +#define MADERA_IRQ1_MASK_6 0x1845 +#define MADERA_IRQ1_MASK_33 0x1860 +#define MADERA_IRQ1_RAW_STATUS_1 0x1880 +#define MADERA_IRQ1_RAW_STATUS_2 0x1881 +#define MADERA_IRQ1_RAW_STATUS_7 0x1886 +#define MADERA_IRQ1_RAW_STATUS_15 0x188E +#define MADERA_IRQ1_RAW_STATUS_33 0x18A0 +#define MADERA_INTERRUPT_DEBOUNCE_7 0x1A06 +#define MADERA_INTERRUPT_DEBOUNCE_15 0x1A0E +#define MADERA_IRQ1_CTRL 0x1A80 +#define MADERA_IRQ2_CTRL 0x1A82 +#define MADERA_INTERRUPT_RAW_STATUS_1 0x1AA0 +#define MADERA_WSEQ_SEQUENCE_1 0x3000 +#define MADERA_WSEQ_SEQUENCE_252 0x31F6 +#define CS47L35_OTP_HPDET_CAL_1 0x31F8 +#define CS47L35_OTP_HPDET_CAL_2 0x31FA +#define MADERA_WSEQ_SEQUENCE_508 0x33F6 +#define CS47L85_OTP_HPDET_CAL_1 0x33F8 +#define CS47L85_OTP_HPDET_CAL_2 0x33FA +#define MADERA_OTP_HPDET_CAL_1 0x20004 +#define MADERA_OTP_HPDET_CAL_2 0x20006 +#define MADERA_DSP1_CONFIG_1 0x0FFE00 +#define MADERA_DSP1_CONFIG_2 0x0FFE02 +#define MADERA_DSP1_SCRATCH_1 0x0FFE40 +#define MADERA_DSP1_SCRATCH_2 0x0FFE42 +#define MADERA_DSP1_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0xFFE7C +#define MADERA_DSP2_CONFIG_1 0x17FE00 +#define MADERA_DSP2_CONFIG_2 0x17FE02 +#define MADERA_DSP2_SCRATCH_1 0x17FE40 +#define MADERA_DSP2_SCRATCH_2 0x17FE42 +#define MADERA_DSP2_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0x17FE7C +#define MADERA_DSP3_CONFIG_1 0x1FFE00 +#define MADERA_DSP3_CONFIG_2 0x1FFE02 +#define MADERA_DSP3_SCRATCH_1 0x1FFE40 +#define MADERA_DSP3_SCRATCH_2 0x1FFE42 +#define MADERA_DSP3_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0x1FFE7C +#define MADERA_DSP4_CONFIG_1 0x27FE00 +#define MADERA_DSP4_CONFIG_2 0x27FE02 +#define MADERA_DSP4_SCRATCH_1 0x27FE40 +#define MADERA_DSP4_SCRATCH_2 0x27FE42 +#define MADERA_DSP4_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0x27FE7C +#define MADERA_DSP5_CONFIG_1 0x2FFE00 +#define MADERA_DSP5_CONFIG_2 0x2FFE02 +#define MADERA_DSP5_SCRATCH_1 0x2FFE40 +#define MADERA_DSP5_SCRATCH_2 0x2FFE42 +#define MADERA_DSP5_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0x2FFE7C +#define MADERA_DSP6_CONFIG_1 0x37FE00 +#define MADERA_DSP6_CONFIG_2 0x37FE02 +#define MADERA_DSP6_SCRATCH_1 0x37FE40 +#define MADERA_DSP6_SCRATCH_2 0x37FE42 +#define MADERA_DSP6_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0x37FE7C +#define MADERA_DSP7_CONFIG_1 0x3FFE00 +#define MADERA_DSP7_CONFIG_2 0x3FFE02 +#define MADERA_DSP7_SCRATCH_1 0x3FFE40 +#define MADERA_DSP7_SCRATCH_2 0x3FFE42 +#define MADERA_DSP7_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0x3FFE7C + +/* (0x0000) Software_Reset */ +#define MADERA_SW_RST_DEV_ID1_MASK 0xFFFF +#define MADERA_SW_RST_DEV_ID1_SHIFT 0 +#define MADERA_SW_RST_DEV_ID1_WIDTH 16 + +/* (0x0001) Hardware_Revision */ +#define MADERA_HW_REVISION_MASK 0x00FF +#define MADERA_HW_REVISION_SHIFT 0 +#define MADERA_HW_REVISION_WIDTH 8 + +/* (0x0020) Tone_Generator_1 */ +#define MADERA_TONE2_ENA 0x0002 +#define MADERA_TONE2_ENA_MASK 0x0002 +#define MADERA_TONE2_ENA_SHIFT 1 +#define MADERA_TONE2_ENA_WIDTH 1 +#define MADERA_TONE1_ENA 0x0001 +#define MADERA_TONE1_ENA_MASK 0x0001 +#define MADERA_TONE1_ENA_SHIFT 0 +#define MADERA_TONE1_ENA_WIDTH 1 + +/* (0x0021) Tone_Generator_2 */ +#define MADERA_TONE1_LVL_0_MASK 0xFFFF +#define MADERA_TONE1_LVL_0_SHIFT 0 +#define MADERA_TONE1_LVL_0_WIDTH 16 + +/* (0x0022) Tone_Generator_3 */ +#define MADERA_TONE1_LVL_MASK 0x00FF +#define MADERA_TONE1_LVL_SHIFT 0 +#define MADERA_TONE1_LVL_WIDTH 8 + +/* (0x0023) Tone_Generator_4 */ +#define MADERA_TONE2_LVL_0_MASK 0xFFFF +#define MADERA_TONE2_LVL_0_SHIFT 0 +#define MADERA_TONE2_LVL_0_WIDTH 16 + +/* (0x0024) Tone_Generator_5 */ +#define MADERA_TONE2_LVL_MASK 0x00FF +#define MADERA_TONE2_LVL_SHIFT 0 +#define MADERA_TONE2_LVL_WIDTH 8 + +/* (0x0030) PWM_Drive_1 */ +#define MADERA_PWM2_ENA 0x0002 +#define MADERA_PWM2_ENA_MASK 0x0002 +#define MADERA_PWM2_ENA_SHIFT 1 +#define MADERA_PWM2_ENA_WIDTH 1 +#define MADERA_PWM1_ENA 0x0001 +#define MADERA_PWM1_ENA_MASK 0x0001 +#define MADERA_PWM1_ENA_SHIFT 0 +#define MADERA_PWM1_ENA_WIDTH 1 + +/* (0x00A0) Comfort_Noise_Generator */ +#define MADERA_NOISE_GEN_ENA 0x0020 +#define MADERA_NOISE_GEN_ENA_MASK 0x0020 +#define MADERA_NOISE_GEN_ENA_SHIFT 5 +#define MADERA_NOISE_GEN_ENA_WIDTH 1 +#define MADERA_NOISE_GEN_GAIN_MASK 0x001F +#define MADERA_NOISE_GEN_GAIN_SHIFT 0 +#define MADERA_NOISE_GEN_GAIN_WIDTH 5 + +/* (0x0100) Clock_32k_1 */ +#define MADERA_CLK_32K_ENA 0x0040 +#define MADERA_CLK_32K_ENA_MASK 0x0040 +#define MADERA_CLK_32K_ENA_SHIFT 6 +#define MADERA_CLK_32K_ENA_WIDTH 1 +#define MADERA_CLK_32K_SRC_MASK 0x0003 +#define MADERA_CLK_32K_SRC_SHIFT 0 +#define MADERA_CLK_32K_SRC_WIDTH 2 + +/* (0x0101) System_Clock_1 */ +#define MADERA_SYSCLK_FRAC 0x8000 +#define MADERA_SYSCLK_FRAC_MASK 0x8000 +#define MADERA_SYSCLK_FRAC_SHIFT 15 +#define MADERA_SYSCLK_FRAC_WIDTH 1 +#define MADERA_SYSCLK_FREQ_MASK 0x0700 +#define MADERA_SYSCLK_FREQ_SHIFT 8 +#define MADERA_SYSCLK_FREQ_WIDTH 3 +#define MADERA_SYSCLK_ENA 0x0040 +#define MADERA_SYSCLK_ENA_MASK 0x0040 +#define MADERA_SYSCLK_ENA_SHIFT 6 +#define MADERA_SYSCLK_ENA_WIDTH 1 +#define MADERA_SYSCLK_SRC_MASK 0x000F +#define MADERA_SYSCLK_SRC_SHIFT 0 +#define MADERA_SYSCLK_SRC_WIDTH 4 + +/* (0x0102) Sample_rate_1 */ +#define MADERA_SAMPLE_RATE_1_MASK 0x001F +#define MADERA_SAMPLE_RATE_1_SHIFT 0 +#define MADERA_SAMPLE_RATE_1_WIDTH 5 + +/* (0x0103) Sample_rate_2 */ +#define MADERA_SAMPLE_RATE_2_MASK 0x001F +#define MADERA_SAMPLE_RATE_2_SHIFT 0 +#define MADERA_SAMPLE_RATE_2_WIDTH 5 + +/* (0x0104) Sample_rate_3 */ +#define MADERA_SAMPLE_RATE_3_MASK 0x001F +#define MADERA_SAMPLE_RATE_3_SHIFT 0 +#define MADERA_SAMPLE_RATE_3_WIDTH 5 + +/* (0x0112) Async_clock_1 */ +#define MADERA_ASYNC_CLK_FREQ_MASK 0x0700 +#define MADERA_ASYNC_CLK_FREQ_SHIFT 8 +#define MADERA_ASYNC_CLK_FREQ_WIDTH 3 +#define MADERA_ASYNC_CLK_ENA 0x0040 +#define MADERA_ASYNC_CLK_ENA_MASK 0x0040 +#define MADERA_ASYNC_CLK_ENA_SHIFT 6 +#define MADERA_ASYNC_CLK_ENA_WIDTH 1 +#define MADERA_ASYNC_CLK_SRC_MASK 0x000F +#define MADERA_ASYNC_CLK_SRC_SHIFT 0 +#define MADERA_ASYNC_CLK_SRC_WIDTH 4 + +/* (0x0113) Async_sample_rate_1 */ +#define MADERA_ASYNC_SAMPLE_RATE_1_MASK 0x001F +#define MADERA_ASYNC_SAMPLE_RATE_1_SHIFT 0 +#define MADERA_ASYNC_SAMPLE_RATE_1_WIDTH 5 + +/* (0x0114) Async_sample_rate_2 */ +#define MADERA_ASYNC_SAMPLE_RATE_2_MASK 0x001F +#define MADERA_ASYNC_SAMPLE_RATE_2_SHIFT 0 +#define MADERA_ASYNC_SAMPLE_RATE_2_WIDTH 5 + +/* (0x0120) DSP_Clock_1 */ +#define MADERA_DSP_CLK_FREQ_LEGACY 0x0700 +#define MADERA_DSP_CLK_FREQ_LEGACY_MASK 0x0700 +#define MADERA_DSP_CLK_FREQ_LEGACY_SHIFT 8 +#define MADERA_DSP_CLK_FREQ_LEGACY_WIDTH 3 +#define MADERA_DSP_CLK_ENA 0x0040 +#define MADERA_DSP_CLK_ENA_MASK 0x0040 +#define MADERA_DSP_CLK_ENA_SHIFT 6 +#define MADERA_DSP_CLK_ENA_WIDTH 1 +#define MADERA_DSP_CLK_SRC 0x000F +#define MADERA_DSP_CLK_SRC_MASK 0x000F +#define MADERA_DSP_CLK_SRC_SHIFT 0 +#define MADERA_DSP_CLK_SRC_WIDTH 4 + +/* (0x0122) DSP_Clock_2 */ +#define MADERA_DSP_CLK_FREQ_MASK 0x03FF +#define MADERA_DSP_CLK_FREQ_SHIFT 0 +#define MADERA_DSP_CLK_FREQ_WIDTH 10 + +/* (0x0149) Output_system_clock */ +#define MADERA_OPCLK_ENA 0x8000 +#define MADERA_OPCLK_ENA_MASK 0x8000 +#define MADERA_OPCLK_ENA_SHIFT 15 +#define MADERA_OPCLK_ENA_WIDTH 1 +#define MADERA_OPCLK_DIV_MASK 0x00F8 +#define MADERA_OPCLK_DIV_SHIFT 3 +#define MADERA_OPCLK_DIV_WIDTH 5 +#define MADERA_OPCLK_SEL_MASK 0x0007 +#define MADERA_OPCLK_SEL_SHIFT 0 +#define MADERA_OPCLK_SEL_WIDTH 3 + +/* (0x014A) Output_async_clock */ +#define MADERA_OPCLK_ASYNC_ENA 0x8000 +#define MADERA_OPCLK_ASYNC_ENA_MASK 0x8000 +#define MADERA_OPCLK_ASYNC_ENA_SHIFT 15 +#define MADERA_OPCLK_ASYNC_ENA_WIDTH 1 +#define MADERA_OPCLK_ASYNC_DIV_MASK 0x00F8 +#define MADERA_OPCLK_ASYNC_DIV_SHIFT 3 +#define MADERA_OPCLK_ASYNC_DIV_WIDTH 5 +#define MADERA_OPCLK_ASYNC_SEL_MASK 0x0007 +#define MADERA_OPCLK_ASYNC_SEL_SHIFT 0 +#define MADERA_OPCLK_ASYNC_SEL_WIDTH 3 + +/* (0x0171) FLL1_Control_1 */ +#define MADERA_FLL1_FREERUN 0x0002 +#define MADERA_FLL1_FREERUN_MASK 0x0002 +#define MADERA_FLL1_FREERUN_SHIFT 1 +#define MADERA_FLL1_FREERUN_WIDTH 1 +#define MADERA_FLL1_ENA 0x0001 +#define MADERA_FLL1_ENA_MASK 0x0001 +#define MADERA_FLL1_ENA_SHIFT 0 +#define MADERA_FLL1_ENA_WIDTH 1 + +/* (0x0172) FLL1_Control_2 */ +#define MADERA_FLL1_CTRL_UPD 0x8000 +#define MADERA_FLL1_CTRL_UPD_MASK 0x8000 +#define MADERA_FLL1_CTRL_UPD_SHIFT 15 +#define MADERA_FLL1_CTRL_UPD_WIDTH 1 +#define MADERA_FLL1_N_MASK 0x03FF +#define MADERA_FLL1_N_SHIFT 0 +#define MADERA_FLL1_N_WIDTH 10 + +/* (0x0173) FLL1_Control_3 */ +#define MADERA_FLL1_THETA_MASK 0xFFFF +#define MADERA_FLL1_THETA_SHIFT 0 +#define MADERA_FLL1_THETA_WIDTH 16 + +/* (0x0174) FLL1_Control_4 */ +#define MADERA_FLL1_LAMBDA_MASK 0xFFFF +#define MADERA_FLL1_LAMBDA_SHIFT 0 +#define MADERA_FLL1_LAMBDA_WIDTH 16 + +/* (0x0175) FLL1_Control_5 */ +#define MADERA_FLL1_FRATIO_MASK 0x0F00 +#define MADERA_FLL1_FRATIO_SHIFT 8 +#define MADERA_FLL1_FRATIO_WIDTH 4 + +/* (0x0176) FLL1_Control_6 */ +#define MADERA_FLL1_REFCLK_DIV_MASK 0x00C0 +#define MADERA_FLL1_REFCLK_DIV_SHIFT 6 +#define MADERA_FLL1_REFCLK_DIV_WIDTH 2 +#define MADERA_FLL1_REFCLK_SRC_MASK 0x000F +#define MADERA_FLL1_REFCLK_SRC_SHIFT 0 +#define MADERA_FLL1_REFCLK_SRC_WIDTH 4 + +/* (0x0177) FLL1_Loop_Filter_Test_1 */ +#define MADERA_FLL1_FRC_INTEG_UPD 0x8000 +#define MADERA_FLL1_FRC_INTEG_UPD_MASK 0x8000 +#define MADERA_FLL1_FRC_INTEG_UPD_SHIFT 15 +#define MADERA_FLL1_FRC_INTEG_UPD_WIDTH 1 +#define MADERA_FLL1_FRC_INTEG_VAL_MASK 0x0FFF +#define MADERA_FLL1_FRC_INTEG_VAL_SHIFT 0 +#define MADERA_FLL1_FRC_INTEG_VAL_WIDTH 12 + +/* (0x0179) FLL1_Control_7 */ +#define MADERA_FLL1_GAIN_MASK 0x003c +#define MADERA_FLL1_GAIN_SHIFT 2 +#define MADERA_FLL1_GAIN_WIDTH 4 + +/* (0x017A) FLL1_EFS_2 */ +#define MADERA_FLL1_PHASE_GAIN_MASK 0xF000 +#define MADERA_FLL1_PHASE_GAIN_SHIFT 12 +#define MADERA_FLL1_PHASE_GAIN_WIDTH 4 +#define MADERA_FLL1_PHASE_ENA_MASK 0x0800 +#define MADERA_FLL1_PHASE_ENA_SHIFT 11 +#define MADERA_FLL1_PHASE_ENA_WIDTH 1 + +/* (0x0181) FLL1_Synchroniser_1 */ +#define MADERA_FLL1_SYNC_ENA 0x0001 +#define MADERA_FLL1_SYNC_ENA_MASK 0x0001 +#define MADERA_FLL1_SYNC_ENA_SHIFT 0 +#define MADERA_FLL1_SYNC_ENA_WIDTH 1 + +/* (0x0182) FLL1_Synchroniser_2 */ +#define MADERA_FLL1_SYNC_N_MASK 0x03FF +#define MADERA_FLL1_SYNC_N_SHIFT 0 +#define MADERA_FLL1_SYNC_N_WIDTH 10 + +/* (0x0183) FLL1_Synchroniser_3 */ +#define MADERA_FLL1_SYNC_THETA_MASK 0xFFFF +#define MADERA_FLL1_SYNC_THETA_SHIFT 0 +#define MADERA_FLL1_SYNC_THETA_WIDTH 16 + +/* (0x0184) FLL1_Synchroniser_4 */ +#define MADERA_FLL1_SYNC_LAMBDA_MASK 0xFFFF +#define MADERA_FLL1_SYNC_LAMBDA_SHIFT 0 +#define MADERA_FLL1_SYNC_LAMBDA_WIDTH 16 + +/* (0x0185) FLL1_Synchroniser_5 */ +#define MADERA_FLL1_SYNC_FRATIO_MASK 0x0700 +#define MADERA_FLL1_SYNC_FRATIO_SHIFT 8 +#define MADERA_FLL1_SYNC_FRATIO_WIDTH 3 + +/* (0x0186) FLL1_Synchroniser_6 */ +#define MADERA_FLL1_SYNCCLK_DIV_MASK 0x00C0 +#define MADERA_FLL1_SYNCCLK_DIV_SHIFT 6 +#define MADERA_FLL1_SYNCCLK_DIV_WIDTH 2 +#define MADERA_FLL1_SYNCCLK_SRC_MASK 0x000F +#define MADERA_FLL1_SYNCCLK_SRC_SHIFT 0 +#define MADERA_FLL1_SYNCCLK_SRC_WIDTH 4 + +/* (0x0187) FLL1_Synchroniser_7 */ +#define MADERA_FLL1_SYNC_GAIN_MASK 0x003c +#define MADERA_FLL1_SYNC_GAIN_SHIFT 2 +#define MADERA_FLL1_SYNC_GAIN_WIDTH 4 +#define MADERA_FLL1_SYNC_DFSAT 0x0001 +#define MADERA_FLL1_SYNC_DFSAT_MASK 0x0001 +#define MADERA_FLL1_SYNC_DFSAT_SHIFT 0 +#define MADERA_FLL1_SYNC_DFSAT_WIDTH 1 + +/* (0x01D1) FLL_AO_Control_1 */ +#define MADERA_FLL_AO_HOLD 0x0004 +#define MADERA_FLL_AO_HOLD_MASK 0x0004 +#define MADERA_FLL_AO_HOLD_SHIFT 2 +#define MADERA_FLL_AO_HOLD_WIDTH 1 +#define MADERA_FLL_AO_FREERUN 0x0002 +#define MADERA_FLL_AO_FREERUN_MASK 0x0002 +#define MADERA_FLL_AO_FREERUN_SHIFT 1 +#define MADERA_FLL_AO_FREERUN_WIDTH 1 +#define MADERA_FLL_AO_ENA 0x0001 +#define MADERA_FLL_AO_ENA_MASK 0x0001 +#define MADERA_FLL_AO_ENA_SHIFT 0 +#define MADERA_FLL_AO_ENA_WIDTH 1 + +/* (0x01D2) FLL_AO_Control_2 */ +#define MADERA_FLL_AO_CTRL_UPD 0x8000 +#define MADERA_FLL_AO_CTRL_UPD_MASK 0x8000 +#define MADERA_FLL_AO_CTRL_UPD_SHIFT 15 +#define MADERA_FLL_AO_CTRL_UPD_WIDTH 1 + +/* (0x01D6) FLL_AO_Control_6 */ +#define MADERA_FLL_AO_REFCLK_SRC_MASK 0x000F +#define MADERA_FLL_AO_REFCLK_SRC_SHIFT 0 +#define MADERA_FLL_AO_REFCLK_SRC_WIDTH 4 + +/* (0x0200) Mic_Charge_Pump_1 */ +#define MADERA_CPMIC_BYPASS 0x0002 +#define MADERA_CPMIC_BYPASS_MASK 0x0002 +#define MADERA_CPMIC_BYPASS_SHIFT 1 +#define MADERA_CPMIC_BYPASS_WIDTH 1 +#define MADERA_CPMIC_ENA 0x0001 +#define MADERA_CPMIC_ENA_MASK 0x0001 +#define MADERA_CPMIC_ENA_SHIFT 0 +#define MADERA_CPMIC_ENA_WIDTH 1 + +/* (0x0210) LDO1_Control_1 */ +#define MADERA_LDO1_VSEL_MASK 0x07E0 +#define MADERA_LDO1_VSEL_SHIFT 5 +#define MADERA_LDO1_VSEL_WIDTH 6 +#define MADERA_LDO1_FAST 0x0010 +#define MADERA_LDO1_FAST_MASK 0x0010 +#define MADERA_LDO1_FAST_SHIFT 4 +#define MADERA_LDO1_FAST_WIDTH 1 +#define MADERA_LDO1_DISCH 0x0004 +#define MADERA_LDO1_DISCH_MASK 0x0004 +#define MADERA_LDO1_DISCH_SHIFT 2 +#define MADERA_LDO1_DISCH_WIDTH 1 +#define MADERA_LDO1_BYPASS 0x0002 +#define MADERA_LDO1_BYPASS_MASK 0x0002 +#define MADERA_LDO1_BYPASS_SHIFT 1 +#define MADERA_LDO1_BYPASS_WIDTH 1 +#define MADERA_LDO1_ENA 0x0001 +#define MADERA_LDO1_ENA_MASK 0x0001 +#define MADERA_LDO1_ENA_SHIFT 0 +#define MADERA_LDO1_ENA_WIDTH 1 + +/* (0x0213) LDO2_Control_1 */ +#define MADERA_LDO2_VSEL_MASK 0x07E0 +#define MADERA_LDO2_VSEL_SHIFT 5 +#define MADERA_LDO2_VSEL_WIDTH 6 +#define MADERA_LDO2_FAST 0x0010 +#define MADERA_LDO2_FAST_MASK 0x0010 +#define MADERA_LDO2_FAST_SHIFT 4 +#define MADERA_LDO2_FAST_WIDTH 1 +#define MADERA_LDO2_DISCH 0x0004 +#define MADERA_LDO2_DISCH_MASK 0x0004 +#define MADERA_LDO2_DISCH_SHIFT 2 +#define MADERA_LDO2_DISCH_WIDTH 1 +#define MADERA_LDO2_BYPASS 0x0002 +#define MADERA_LDO2_BYPASS_MASK 0x0002 +#define MADERA_LDO2_BYPASS_SHIFT 1 +#define MADERA_LDO2_BYPASS_WIDTH 1 +#define MADERA_LDO2_ENA 0x0001 +#define MADERA_LDO2_ENA_MASK 0x0001 +#define MADERA_LDO2_ENA_SHIFT 0 +#define MADERA_LDO2_ENA_WIDTH 1 + +/* (0x0218) Mic_Bias_Ctrl_1 */ +#define MADERA_MICB1_ENA 0x0001 +#define MADERA_MICB1_ENA_MASK 0x0001 +#define MADERA_MICB1_ENA_SHIFT 0 +#define MADERA_MICB1_ENA_WIDTH 1 + +/* (0x021C) Mic_Bias_Ctrl_5 */ +#define MADERA_MICB1D_ENA 0x1000 +#define MADERA_MICB1D_ENA_MASK 0x1000 +#define MADERA_MICB1D_ENA_SHIFT 12 +#define MADERA_MICB1D_ENA_WIDTH 1 +#define MADERA_MICB1C_ENA 0x0100 +#define MADERA_MICB1C_ENA_MASK 0x0100 +#define MADERA_MICB1C_ENA_SHIFT 8 +#define MADERA_MICB1C_ENA_WIDTH 1 +#define MADERA_MICB1B_ENA 0x0010 +#define MADERA_MICB1B_ENA_MASK 0x0010 +#define MADERA_MICB1B_ENA_SHIFT 4 +#define MADERA_MICB1B_ENA_WIDTH 1 +#define MADERA_MICB1A_ENA 0x0001 +#define MADERA_MICB1A_ENA_MASK 0x0001 +#define MADERA_MICB1A_ENA_SHIFT 0 +#define MADERA_MICB1A_ENA_WIDTH 1 + +/* (0x021E) Mic_Bias_Ctrl_6 */ +#define MADERA_MICB2D_ENA 0x1000 +#define MADERA_MICB2D_ENA_MASK 0x1000 +#define MADERA_MICB2D_ENA_SHIFT 12 +#define MADERA_MICB2D_ENA_WIDTH 1 +#define MADERA_MICB2C_ENA 0x0100 +#define MADERA_MICB2C_ENA_MASK 0x0100 +#define MADERA_MICB2C_ENA_SHIFT 8 +#define MADERA_MICB2C_ENA_WIDTH 1 +#define MADERA_MICB2B_ENA 0x0010 +#define MADERA_MICB2B_ENA_MASK 0x0010 +#define MADERA_MICB2B_ENA_SHIFT 4 +#define MADERA_MICB2B_ENA_WIDTH 1 +#define MADERA_MICB2A_ENA 0x0001 +#define MADERA_MICB2A_ENA_MASK 0x0001 +#define MADERA_MICB2A_ENA_SHIFT 0 +#define MADERA_MICB2A_ENA_WIDTH 1 + +/* (0x0225) - HP Ctrl 1L */ +#define MADERA_RMV_SHRT_HP1L 0x4000 +#define MADERA_RMV_SHRT_HP1L_MASK 0x4000 +#define MADERA_RMV_SHRT_HP1L_SHIFT 14 +#define MADERA_RMV_SHRT_HP1L_WIDTH 1 +#define MADERA_HP1L_FLWR 0x0004 +#define MADERA_HP1L_FLWR_MASK 0x0004 +#define MADERA_HP1L_FLWR_SHIFT 2 +#define MADERA_HP1L_FLWR_WIDTH 1 +#define MADERA_HP1L_SHRTI 0x0002 +#define MADERA_HP1L_SHRTI_MASK 0x0002 +#define MADERA_HP1L_SHRTI_SHIFT 1 +#define MADERA_HP1L_SHRTI_WIDTH 1 +#define MADERA_HP1L_SHRTO 0x0001 +#define MADERA_HP1L_SHRTO_MASK 0x0001 +#define MADERA_HP1L_SHRTO_SHIFT 0 +#define MADERA_HP1L_SHRTO_WIDTH 1 + +/* (0x0226) - HP Ctrl 1R */ +#define MADERA_RMV_SHRT_HP1R 0x4000 +#define MADERA_RMV_SHRT_HP1R_MASK 0x4000 +#define MADERA_RMV_SHRT_HP1R_SHIFT 14 +#define MADERA_RMV_SHRT_HP1R_WIDTH 1 +#define MADERA_HP1R_FLWR 0x0004 +#define MADERA_HP1R_FLWR_MASK 0x0004 +#define MADERA_HP1R_FLWR_SHIFT 2 +#define MADERA_HP1R_FLWR_WIDTH 1 +#define MADERA_HP1R_SHRTI 0x0002 +#define MADERA_HP1R_SHRTI_MASK 0x0002 +#define MADERA_HP1R_SHRTI_SHIFT 1 +#define MADERA_HP1R_SHRTI_WIDTH 1 +#define MADERA_HP1R_SHRTO 0x0001 +#define MADERA_HP1R_SHRTO_MASK 0x0001 +#define MADERA_HP1R_SHRTO_SHIFT 0 +#define MADERA_HP1R_SHRTO_WIDTH 1 + +/* (0x0293) Accessory_Detect_Mode_1 */ +#define MADERA_ACCDET_SRC 0x2000 +#define MADERA_ACCDET_SRC_MASK 0x2000 +#define MADERA_ACCDET_SRC_SHIFT 13 +#define MADERA_ACCDET_SRC_WIDTH 1 +#define MADERA_ACCDET_POLARITY_INV_ENA 0x0080 +#define MADERA_ACCDET_POLARITY_INV_ENA_MASK 0x0080 +#define MADERA_ACCDET_POLARITY_INV_ENA_SHIFT 7 +#define MADERA_ACCDET_POLARITY_INV_ENA_WIDTH 1 +#define MADERA_ACCDET_MODE_MASK 0x0007 +#define MADERA_ACCDET_MODE_SHIFT 0 +#define MADERA_ACCDET_MODE_WIDTH 3 + +/* (0x0299) Headphone_Detect_0 */ +#define MADERA_HPD_GND_SEL 0x0007 +#define MADERA_HPD_GND_SEL_MASK 0x0007 +#define MADERA_HPD_GND_SEL_SHIFT 0 +#define MADERA_HPD_GND_SEL_WIDTH 3 +#define MADERA_HPD_SENSE_SEL 0x00F0 +#define MADERA_HPD_SENSE_SEL_MASK 0x00F0 +#define MADERA_HPD_SENSE_SEL_SHIFT 4 +#define MADERA_HPD_SENSE_SEL_WIDTH 4 +#define MADERA_HPD_FRC_SEL 0x0F00 +#define MADERA_HPD_FRC_SEL_MASK 0x0F00 +#define MADERA_HPD_FRC_SEL_SHIFT 8 +#define MADERA_HPD_FRC_SEL_WIDTH 4 +#define MADERA_HPD_OUT_SEL 0x7000 +#define MADERA_HPD_OUT_SEL_MASK 0x7000 +#define MADERA_HPD_OUT_SEL_SHIFT 12 +#define MADERA_HPD_OUT_SEL_WIDTH 3 +#define MADERA_HPD_OVD_ENA_SEL 0x8000 +#define MADERA_HPD_OVD_ENA_SEL_MASK 0x8000 +#define MADERA_HPD_OVD_ENA_SEL_SHIFT 15 +#define MADERA_HPD_OVD_ENA_SEL_WIDTH 1 + +/* (0x029B) Headphone_Detect_1 */ +#define MADERA_HP_IMPEDANCE_RANGE_MASK 0x0600 +#define MADERA_HP_IMPEDANCE_RANGE_SHIFT 9 +#define MADERA_HP_IMPEDANCE_RANGE_WIDTH 2 +#define MADERA_HP_STEP_SIZE 0x0100 +#define MADERA_HP_STEP_SIZE_MASK 0x0100 +#define MADERA_HP_STEP_SIZE_SHIFT 8 +#define MADERA_HP_STEP_SIZE_WIDTH 1 +#define MADERA_HP_CLK_DIV_MASK 0x0018 +#define MADERA_HP_CLK_DIV_SHIFT 3 +#define MADERA_HP_CLK_DIV_WIDTH 2 +#define MADERA_HP_RATE_MASK 0x0006 +#define MADERA_HP_RATE_SHIFT 1 +#define MADERA_HP_RATE_WIDTH 2 +#define MADERA_HP_POLL 0x0001 +#define MADERA_HP_POLL_MASK 0x0001 +#define MADERA_HP_POLL_SHIFT 0 +#define MADERA_HP_POLL_WIDTH 1 + +/* (0x029C) Headphone_Detect_2 */ +#define MADERA_HP_DONE_MASK 0x8000 +#define MADERA_HP_DONE_SHIFT 15 +#define MADERA_HP_DONE_WIDTH 1 +#define MADERA_HP_LVL_MASK 0x7FFF +#define MADERA_HP_LVL_SHIFT 0 +#define MADERA_HP_LVL_WIDTH 15 + +/* (0x029D) Headphone_Detect_3 */ +#define MADERA_HP_DACVAL_MASK 0x03FF +#define MADERA_HP_DACVAL_SHIFT 0 +#define MADERA_HP_DACVAL_WIDTH 10 + +/* (0x029F) - Headphone Detect 5 */ +#define MADERA_HP_DACVAL_DOWN_MASK 0x03FF +#define MADERA_HP_DACVAL_DOWN_SHIFT 0 +#define MADERA_HP_DACVAL_DOWN_WIDTH 10 + +/* (0x02A2) Mic_Detect_1_Control_0 */ +#define MADERA_MICD1_GND_MASK 0x0007 +#define MADERA_MICD1_GND_SHIFT 0 +#define MADERA_MICD1_GND_WIDTH 3 +#define MADERA_MICD1_SENSE_MASK 0x00F0 +#define MADERA_MICD1_SENSE_SHIFT 4 +#define MADERA_MICD1_SENSE_WIDTH 4 +#define MADERA_MICD1_ADC_MODE_MASK 0x8000 +#define MADERA_MICD1_ADC_MODE_SHIFT 15 +#define MADERA_MICD1_ADC_MODE_WIDTH 1 + +/* (0x02A3) Mic_Detect_1_Control_1 */ +#define MADERA_MICD_BIAS_STARTTIME_MASK 0xF000 +#define MADERA_MICD_BIAS_STARTTIME_SHIFT 12 +#define MADERA_MICD_BIAS_STARTTIME_WIDTH 4 +#define MADERA_MICD_RATE_MASK 0x0F00 +#define MADERA_MICD_RATE_SHIFT 8 +#define MADERA_MICD_RATE_WIDTH 4 +#define MADERA_MICD_BIAS_SRC_MASK 0x00F0 +#define MADERA_MICD_BIAS_SRC_SHIFT 4 +#define MADERA_MICD_BIAS_SRC_WIDTH 4 +#define MADERA_MICD_DBTIME 0x0002 +#define MADERA_MICD_DBTIME_MASK 0x0002 +#define MADERA_MICD_DBTIME_SHIFT 1 +#define MADERA_MICD_DBTIME_WIDTH 1 +#define MADERA_MICD_ENA 0x0001 +#define MADERA_MICD_ENA_MASK 0x0001 +#define MADERA_MICD_ENA_SHIFT 0 +#define MADERA_MICD_ENA_WIDTH 1 + +/* (0x02A4) Mic_Detect_1_Control_2 */ +#define MADERA_MICD_LVL_SEL_MASK 0x00FF +#define MADERA_MICD_LVL_SEL_SHIFT 0 +#define MADERA_MICD_LVL_SEL_WIDTH 8 + +/* (0x02A5) Mic_Detect_1_Control_3 */ +#define MADERA_MICD_LVL_0 0x0004 +#define MADERA_MICD_LVL_1 0x0008 +#define MADERA_MICD_LVL_2 0x0010 +#define MADERA_MICD_LVL_3 0x0020 +#define MADERA_MICD_LVL_4 0x0040 +#define MADERA_MICD_LVL_5 0x0080 +#define MADERA_MICD_LVL_6 0x0100 +#define MADERA_MICD_LVL_7 0x0200 +#define MADERA_MICD_LVL_8 0x0400 +#define MADERA_MICD_LVL_MASK 0x07FC +#define MADERA_MICD_LVL_SHIFT 2 +#define MADERA_MICD_LVL_WIDTH 9 +#define MADERA_MICD_VALID 0x0002 +#define MADERA_MICD_VALID_MASK 0x0002 +#define MADERA_MICD_VALID_SHIFT 1 +#define MADERA_MICD_VALID_WIDTH 1 +#define MADERA_MICD_STS 0x0001 +#define MADERA_MICD_STS_MASK 0x0001 +#define MADERA_MICD_STS_SHIFT 0 +#define MADERA_MICD_STS_WIDTH 1 + +/* (0x02AB) Mic_Detect_1_Control_4 */ +#define MADERA_MICDET_ADCVAL_DIFF_MASK 0xFF00 +#define MADERA_MICDET_ADCVAL_DIFF_SHIFT 8 +#define MADERA_MICDET_ADCVAL_DIFF_WIDTH 8 +#define MADERA_MICDET_ADCVAL_MASK 0x007F +#define MADERA_MICDET_ADCVAL_SHIFT 0 +#define MADERA_MICDET_ADCVAL_WIDTH 7 + +/* (0x02C6) Micd_Clamp_control */ +#define MADERA_MICD_CLAMP_OVD 0x0010 +#define MADERA_MICD_CLAMP_OVD_MASK 0x0010 +#define MADERA_MICD_CLAMP_OVD_SHIFT 4 +#define MADERA_MICD_CLAMP_OVD_WIDTH 1 +#define MADERA_MICD_CLAMP_MODE_MASK 0x000F +#define MADERA_MICD_CLAMP_MODE_SHIFT 0 +#define MADERA_MICD_CLAMP_MODE_WIDTH 4 + +/* (0x02C8) GP_Switch_1 */ +#define MADERA_SW2_MODE_MASK 0x000C +#define MADERA_SW2_MODE_SHIFT 2 +#define MADERA_SW2_MODE_WIDTH 2 +#define MADERA_SW1_MODE_MASK 0x0003 +#define MADERA_SW1_MODE_SHIFT 0 +#define MADERA_SW1_MODE_WIDTH 2 + +/* (0x02D3) Jack_detect_analogue */ +#define MADERA_JD2_ENA 0x0002 +#define MADERA_JD2_ENA_MASK 0x0002 +#define MADERA_JD2_ENA_SHIFT 1 +#define MADERA_JD2_ENA_WIDTH 1 +#define MADERA_JD1_ENA 0x0001 +#define MADERA_JD1_ENA_MASK 0x0001 +#define MADERA_JD1_ENA_SHIFT 0 +#define MADERA_JD1_ENA_WIDTH 1 + +/* (0x0300) Input_Enables */ +#define MADERA_IN6L_ENA 0x0800 +#define MADERA_IN6L_ENA_MASK 0x0800 +#define MADERA_IN6L_ENA_SHIFT 11 +#define MADERA_IN6L_ENA_WIDTH 1 +#define MADERA_IN6R_ENA 0x0400 +#define MADERA_IN6R_ENA_MASK 0x0400 +#define MADERA_IN6R_ENA_SHIFT 10 +#define MADERA_IN6R_ENA_WIDTH 1 +#define MADERA_IN5L_ENA 0x0200 +#define MADERA_IN5L_ENA_MASK 0x0200 +#define MADERA_IN5L_ENA_SHIFT 9 +#define MADERA_IN5L_ENA_WIDTH 1 +#define MADERA_IN5R_ENA 0x0100 +#define MADERA_IN5R_ENA_MASK 0x0100 +#define MADERA_IN5R_ENA_SHIFT 8 +#define MADERA_IN5R_ENA_WIDTH 1 +#define MADERA_IN4L_ENA 0x0080 +#define MADERA_IN4L_ENA_MASK 0x0080 +#define MADERA_IN4L_ENA_SHIFT 7 +#define MADERA_IN4L_ENA_WIDTH 1 +#define MADERA_IN4R_ENA 0x0040 +#define MADERA_IN4R_ENA_MASK 0x0040 +#define MADERA_IN4R_ENA_SHIFT 6 +#define MADERA_IN4R_ENA_WIDTH 1 +#define MADERA_IN3L_ENA 0x0020 +#define MADERA_IN3L_ENA_MASK 0x0020 +#define MADERA_IN3L_ENA_SHIFT 5 +#define MADERA_IN3L_ENA_WIDTH 1 +#define MADERA_IN3R_ENA 0x0010 +#define MADERA_IN3R_ENA_MASK 0x0010 +#define MADERA_IN3R_ENA_SHIFT 4 +#define MADERA_IN3R_ENA_WIDTH 1 +#define MADERA_IN2L_ENA 0x0008 +#define MADERA_IN2L_ENA_MASK 0x0008 +#define MADERA_IN2L_ENA_SHIFT 3 +#define MADERA_IN2L_ENA_WIDTH 1 +#define MADERA_IN2R_ENA 0x0004 +#define MADERA_IN2R_ENA_MASK 0x0004 +#define MADERA_IN2R_ENA_SHIFT 2 +#define MADERA_IN2R_ENA_WIDTH 1 +#define MADERA_IN1L_ENA 0x0002 +#define MADERA_IN1L_ENA_MASK 0x0002 +#define MADERA_IN1L_ENA_SHIFT 1 +#define MADERA_IN1L_ENA_WIDTH 1 +#define MADERA_IN1R_ENA 0x0001 +#define MADERA_IN1R_ENA_MASK 0x0001 +#define MADERA_IN1R_ENA_SHIFT 0 +#define MADERA_IN1R_ENA_WIDTH 1 + +/* (0x0308) Input_Rate */ +#define MADERA_IN_RATE_MASK 0xF800 +#define MADERA_IN_RATE_SHIFT 11 +#define MADERA_IN_RATE_WIDTH 5 +#define MADERA_IN_MODE_MASK 0x0400 +#define MADERA_IN_MODE_SHIFT 10 +#define MADERA_IN_MODE_WIDTH 1 + +/* (0x0309) Input_Volume_Ramp */ +#define MADERA_IN_VD_RAMP_MASK 0x0070 +#define MADERA_IN_VD_RAMP_SHIFT 4 +#define MADERA_IN_VD_RAMP_WIDTH 3 +#define MADERA_IN_VI_RAMP_MASK 0x0007 +#define MADERA_IN_VI_RAMP_SHIFT 0 +#define MADERA_IN_VI_RAMP_WIDTH 3 + +/* (0x030C) HPF_Control */ +#define MADERA_IN_HPF_CUT_MASK 0x0007 +#define MADERA_IN_HPF_CUT_SHIFT 0 +#define MADERA_IN_HPF_CUT_WIDTH 3 + +/* (0x0310) IN1L_Control */ +#define MADERA_IN1L_HPF_MASK 0x8000 +#define MADERA_IN1L_HPF_SHIFT 15 +#define MADERA_IN1L_HPF_WIDTH 1 +#define MADERA_IN1_DMIC_SUP_MASK 0x1800 +#define MADERA_IN1_DMIC_SUP_SHIFT 11 +#define MADERA_IN1_DMIC_SUP_WIDTH 2 +#define MADERA_IN1_MODE_MASK 0x0400 +#define MADERA_IN1_MODE_SHIFT 10 +#define MADERA_IN1_MODE_WIDTH 1 +#define MADERA_IN1L_PGA_VOL_MASK 0x00FE +#define MADERA_IN1L_PGA_VOL_SHIFT 1 +#define MADERA_IN1L_PGA_VOL_WIDTH 7 + +/* (0x0311) ADC_Digital_Volume_1L */ +#define MADERA_IN1L_SRC_MASK 0x4000 +#define MADERA_IN1L_SRC_SHIFT 14 +#define MADERA_IN1L_SRC_WIDTH 1 +#define MADERA_IN1L_SRC_SE_MASK 0x2000 +#define MADERA_IN1L_SRC_SE_SHIFT 13 +#define MADERA_IN1L_SRC_SE_WIDTH 1 +#define MADERA_IN1L_LP_MODE 0x0800 +#define MADERA_IN1L_LP_MODE_MASK 0x0800 +#define MADERA_IN1L_LP_MODE_SHIFT 11 +#define MADERA_IN1L_LP_MODE_WIDTH 1 +#define MADERA_IN_VU 0x0200 +#define MADERA_IN_VU_MASK 0x0200 +#define MADERA_IN_VU_SHIFT 9 +#define MADERA_IN_VU_WIDTH 1 +#define MADERA_IN1L_MUTE 0x0100 +#define MADERA_IN1L_MUTE_MASK 0x0100 +#define MADERA_IN1L_MUTE_SHIFT 8 +#define MADERA_IN1L_MUTE_WIDTH 1 +#define MADERA_IN1L_DIG_VOL_MASK 0x00FF +#define MADERA_IN1L_DIG_VOL_SHIFT 0 +#define MADERA_IN1L_DIG_VOL_WIDTH 8 + +/* (0x0312) DMIC1L_Control */ +#define MADERA_IN1_OSR_MASK 0x0700 +#define MADERA_IN1_OSR_SHIFT 8 +#define MADERA_IN1_OSR_WIDTH 3 + +/* (0x0313) IN1L_Rate_Control */ +#define MADERA_IN1L_RATE_MASK 0xF800 +#define MADERA_IN1L_RATE_SHIFT 11 +#define MADERA_IN1L_RATE_WIDTH 5 + +/* (0x0314) IN1R_Control */ +#define MADERA_IN1R_HPF_MASK 0x8000 +#define MADERA_IN1R_HPF_SHIFT 15 +#define MADERA_IN1R_HPF_WIDTH 1 +#define MADERA_IN1R_PGA_VOL_MASK 0x00FE +#define MADERA_IN1R_PGA_VOL_SHIFT 1 +#define MADERA_IN1R_PGA_VOL_WIDTH 7 +#define MADERA_IN1_DMICCLK_SRC_MASK 0x1800 +#define MADERA_IN1_DMICCLK_SRC_SHIFT 11 +#define MADERA_IN1_DMICCLK_SRC_WIDTH 2 + +/* (0x0315) ADC_Digital_Volume_1R */ +#define MADERA_IN1R_SRC_MASK 0x4000 +#define MADERA_IN1R_SRC_SHIFT 14 +#define MADERA_IN1R_SRC_WIDTH 1 +#define MADERA_IN1R_SRC_SE_MASK 0x2000 +#define MADERA_IN1R_SRC_SE_SHIFT 13 +#define MADERA_IN1R_SRC_SE_WIDTH 1 +#define MADERA_IN1R_LP_MODE 0x0800 +#define MADERA_IN1R_LP_MODE_MASK 0x0800 +#define MADERA_IN1R_LP_MODE_SHIFT 11 +#define MADERA_IN1R_LP_MODE_WIDTH 1 +#define MADERA_IN1R_MUTE 0x0100 +#define MADERA_IN1R_MUTE_MASK 0x0100 +#define MADERA_IN1R_MUTE_SHIFT 8 +#define MADERA_IN1R_MUTE_WIDTH 1 +#define MADERA_IN1R_DIG_VOL_MASK 0x00FF +#define MADERA_IN1R_DIG_VOL_SHIFT 0 +#define MADERA_IN1R_DIG_VOL_WIDTH 8 + +/* (0x0317) IN1R_Rate_Control */ +#define MADERA_IN1R_RATE_MASK 0xF800 +#define MADERA_IN1R_RATE_SHIFT 11 +#define MADERA_IN1R_RATE_WIDTH 5 + +/* (0x0318) IN2L_Control */ +#define MADERA_IN2L_HPF_MASK 0x8000 +#define MADERA_IN2L_HPF_SHIFT 15 +#define MADERA_IN2L_HPF_WIDTH 1 +#define MADERA_IN2_DMIC_SUP_MASK 0x1800 +#define MADERA_IN2_DMIC_SUP_SHIFT 11 +#define MADERA_IN2_DMIC_SUP_WIDTH 2 +#define MADERA_IN2_MODE_MASK 0x0400 +#define MADERA_IN2_MODE_SHIFT 10 +#define MADERA_IN2_MODE_WIDTH 1 +#define MADERA_IN2L_PGA_VOL_MASK 0x00FE +#define MADERA_IN2L_PGA_VOL_SHIFT 1 +#define MADERA_IN2L_PGA_VOL_WIDTH 7 + +/* (0x0319) ADC_Digital_Volume_2L */ +#define MADERA_IN2L_SRC_MASK 0x4000 +#define MADERA_IN2L_SRC_SHIFT 14 +#define MADERA_IN2L_SRC_WIDTH 1 +#define MADERA_IN2L_SRC_SE_MASK 0x2000 +#define MADERA_IN2L_SRC_SE_SHIFT 13 +#define MADERA_IN2L_SRC_SE_WIDTH 1 +#define MADERA_IN2L_LP_MODE 0x0800 +#define MADERA_IN2L_LP_MODE_MASK 0x0800 +#define MADERA_IN2L_LP_MODE_SHIFT 11 +#define MADERA_IN2L_LP_MODE_WIDTH 1 +#define MADERA_IN2L_MUTE 0x0100 +#define MADERA_IN2L_MUTE_MASK 0x0100 +#define MADERA_IN2L_MUTE_SHIFT 8 +#define MADERA_IN2L_MUTE_WIDTH 1 +#define MADERA_IN2L_DIG_VOL_MASK 0x00FF +#define MADERA_IN2L_DIG_VOL_SHIFT 0 +#define MADERA_IN2L_DIG_VOL_WIDTH 8 + +/* (0x031A) DMIC2L_Control */ +#define MADERA_IN2_OSR_MASK 0x0700 +#define MADERA_IN2_OSR_SHIFT 8 +#define MADERA_IN2_OSR_WIDTH 3 + +/* (0x031C) IN2R_Control */ +#define MADERA_IN2R_HPF_MASK 0x8000 +#define MADERA_IN2R_HPF_SHIFT 15 +#define MADERA_IN2R_HPF_WIDTH 1 +#define MADERA_IN2R_PGA_VOL_MASK 0x00FE +#define MADERA_IN2R_PGA_VOL_SHIFT 1 +#define MADERA_IN2R_PGA_VOL_WIDTH 7 +#define MADERA_IN2_DMICCLK_SRC_MASK 0x1800 +#define MADERA_IN2_DMICCLK_SRC_SHIFT 11 +#define MADERA_IN2_DMICCLK_SRC_WIDTH 2 + +/* (0x031D) ADC_Digital_Volume_2R */ +#define MADERA_IN2R_SRC_MASK 0x4000 +#define MADERA_IN2R_SRC_SHIFT 14 +#define MADERA_IN2R_SRC_WIDTH 1 +#define MADERA_IN2R_SRC_SE_MASK 0x2000 +#define MADERA_IN2R_SRC_SE_SHIFT 13 +#define MADERA_IN2R_SRC_SE_WIDTH 1 +#define MADERA_IN2R_LP_MODE 0x0800 +#define MADERA_IN2R_LP_MODE_MASK 0x0800 +#define MADERA_IN2R_LP_MODE_SHIFT 11 +#define MADERA_IN2R_LP_MODE_WIDTH 1 +#define MADERA_IN2R_MUTE 0x0100 +#define MADERA_IN2R_MUTE_MASK 0x0100 +#define MADERA_IN2R_MUTE_SHIFT 8 +#define MADERA_IN2R_MUTE_WIDTH 1 +#define MADERA_IN2R_DIG_VOL_MASK 0x00FF +#define MADERA_IN2R_DIG_VOL_SHIFT 0 +#define MADERA_IN2R_DIG_VOL_WIDTH 8 + +/* (0x0320) IN3L_Control */ +#define MADERA_IN3L_HPF_MASK 0x8000 +#define MADERA_IN3L_HPF_SHIFT 15 +#define MADERA_IN3L_HPF_WIDTH 1 +#define MADERA_IN3_DMIC_SUP_MASK 0x1800 +#define MADERA_IN3_DMIC_SUP_SHIFT 11 +#define MADERA_IN3_DMIC_SUP_WIDTH 2 +#define MADERA_IN3_MODE_MASK 0x0400 +#define MADERA_IN3_MODE_SHIFT 10 +#define MADERA_IN3_MODE_WIDTH 1 +#define MADERA_IN3L_PGA_VOL_MASK 0x00FE +#define MADERA_IN3L_PGA_VOL_SHIFT 1 +#define MADERA_IN3L_PGA_VOL_WIDTH 7 + +/* (0x0321) ADC_Digital_Volume_3L */ +#define MADERA_IN3L_MUTE 0x0100 +#define MADERA_IN3L_MUTE_MASK 0x0100 +#define MADERA_IN3L_MUTE_SHIFT 8 +#define MADERA_IN3L_MUTE_WIDTH 1 +#define MADERA_IN3L_DIG_VOL_MASK 0x00FF +#define MADERA_IN3L_DIG_VOL_SHIFT 0 +#define MADERA_IN3L_DIG_VOL_WIDTH 8 + +/* (0x0322) DMIC3L_Control */ +#define MADERA_IN3_OSR_MASK 0x0700 +#define MADERA_IN3_OSR_SHIFT 8 +#define MADERA_IN3_OSR_WIDTH 3 + +/* (0x0324) IN3R_Control */ +#define MADERA_IN3R_HPF_MASK 0x8000 +#define MADERA_IN3R_HPF_SHIFT 15 +#define MADERA_IN3R_HPF_WIDTH 1 +#define MADERA_IN3R_PGA_VOL_MASK 0x00FE +#define MADERA_IN3R_PGA_VOL_SHIFT 1 +#define MADERA_IN3R_PGA_VOL_WIDTH 7 +#define MADERA_IN3_DMICCLK_SRC_MASK 0x1800 +#define MADERA_IN3_DMICCLK_SRC_SHIFT 11 +#define MADERA_IN3_DMICCLK_SRC_WIDTH 2 + +/* (0x0325) ADC_Digital_Volume_3R */ +#define MADERA_IN3R_MUTE 0x0100 +#define MADERA_IN3R_MUTE_MASK 0x0100 +#define MADERA_IN3R_MUTE_SHIFT 8 +#define MADERA_IN3R_MUTE_WIDTH 1 +#define MADERA_IN3R_DIG_VOL_MASK 0x00FF +#define MADERA_IN3R_DIG_VOL_SHIFT 0 +#define MADERA_IN3R_DIG_VOL_WIDTH 8 + +/* (0x0328) IN4L_Control */ +#define MADERA_IN4L_HPF_MASK 0x8000 +#define MADERA_IN4L_HPF_SHIFT 15 +#define MADERA_IN4L_HPF_WIDTH 1 +#define MADERA_IN4_DMIC_SUP_MASK 0x1800 +#define MADERA_IN4_DMIC_SUP_SHIFT 11 +#define MADERA_IN4_DMIC_SUP_WIDTH 2 + +/* (0x0329) ADC_Digital_Volume_4L */ +#define MADERA_IN4L_MUTE 0x0100 +#define MADERA_IN4L_MUTE_MASK 0x0100 +#define MADERA_IN4L_MUTE_SHIFT 8 +#define MADERA_IN4L_MUTE_WIDTH 1 +#define MADERA_IN4L_DIG_VOL_MASK 0x00FF +#define MADERA_IN4L_DIG_VOL_SHIFT 0 +#define MADERA_IN4L_DIG_VOL_WIDTH 8 + +/* (0x032A) DMIC4L_Control */ +#define MADERA_IN4_OSR_MASK 0x0700 +#define MADERA_IN4_OSR_SHIFT 8 +#define MADERA_IN4_OSR_WIDTH 3 + +/* (0x032C) IN4R_Control */ +#define MADERA_IN4R_HPF_MASK 0x8000 +#define MADERA_IN4R_HPF_SHIFT 15 +#define MADERA_IN4R_HPF_WIDTH 1 +#define MADERA_IN4_DMICCLK_SRC_MASK 0x1800 +#define MADERA_IN4_DMICCLK_SRC_SHIFT 11 +#define MADERA_IN4_DMICCLK_SRC_WIDTH 2 + +/* (0x032D) ADC_Digital_Volume_4R */ +#define MADERA_IN4R_MUTE 0x0100 +#define MADERA_IN4R_MUTE_MASK 0x0100 +#define MADERA_IN4R_MUTE_SHIFT 8 +#define MADERA_IN4R_MUTE_WIDTH 1 +#define MADERA_IN4R_DIG_VOL_MASK 0x00FF +#define MADERA_IN4R_DIG_VOL_SHIFT 0 +#define MADERA_IN4R_DIG_VOL_WIDTH 8 + +/* (0x0330) IN5L_Control */ +#define MADERA_IN5L_HPF_MASK 0x8000 +#define MADERA_IN5L_HPF_SHIFT 15 +#define MADERA_IN5L_HPF_WIDTH 1 +#define MADERA_IN5_DMIC_SUP_MASK 0x1800 +#define MADERA_IN5_DMIC_SUP_SHIFT 11 +#define MADERA_IN5_DMIC_SUP_WIDTH 2 + +/* (0x0331) ADC_Digital_Volume_5L */ +#define MADERA_IN5L_MUTE 0x0100 +#define MADERA_IN5L_MUTE_MASK 0x0100 +#define MADERA_IN5L_MUTE_SHIFT 8 +#define MADERA_IN5L_MUTE_WIDTH 1 +#define MADERA_IN5L_DIG_VOL_MASK 0x00FF +#define MADERA_IN5L_DIG_VOL_SHIFT 0 +#define MADERA_IN5L_DIG_VOL_WIDTH 8 + +/* (0x0332) DMIC5L_Control */ +#define MADERA_IN5_OSR_MASK 0x0700 +#define MADERA_IN5_OSR_SHIFT 8 +#define MADERA_IN5_OSR_WIDTH 3 + +/* (0x0334) IN5R_Control */ +#define MADERA_IN5R_HPF_MASK 0x8000 +#define MADERA_IN5R_HPF_SHIFT 15 +#define MADERA_IN5R_HPF_WIDTH 1 +#define MADERA_IN5_DMICCLK_SRC_MASK 0x1800 +#define MADERA_IN5_DMICCLK_SRC_SHIFT 11 +#define MADERA_IN5_DMICCLK_SRC_WIDTH 2 + +/* (0x0335) ADC_Digital_Volume_5R */ +#define MADERA_IN5R_MUTE 0x0100 +#define MADERA_IN5R_MUTE_MASK 0x0100 +#define MADERA_IN5R_MUTE_SHIFT 8 +#define MADERA_IN5R_MUTE_WIDTH 1 +#define MADERA_IN5R_DIG_VOL_MASK 0x00FF +#define MADERA_IN5R_DIG_VOL_SHIFT 0 +#define MADERA_IN5R_DIG_VOL_WIDTH 8 + +/* (0x0338) IN6L_Control */ +#define MADERA_IN6L_HPF_MASK 0x8000 +#define MADERA_IN6L_HPF_SHIFT 15 +#define MADERA_IN6L_HPF_WIDTH 1 +#define MADERA_IN6_DMIC_SUP_MASK 0x1800 +#define MADERA_IN6_DMIC_SUP_SHIFT 11 +#define MADERA_IN6_DMIC_SUP_WIDTH 2 + +/* (0x0339) ADC_Digital_Volume_6L */ +#define MADERA_IN6L_MUTE 0x0100 +#define MADERA_IN6L_MUTE_MASK 0x0100 +#define MADERA_IN6L_MUTE_SHIFT 8 +#define MADERA_IN6L_MUTE_WIDTH 1 +#define MADERA_IN6L_DIG_VOL_MASK 0x00FF +#define MADERA_IN6L_DIG_VOL_SHIFT 0 +#define MADERA_IN6L_DIG_VOL_WIDTH 8 + +/* (0x033A) DMIC6L_Control */ +#define MADERA_IN6_OSR_MASK 0x0700 +#define MADERA_IN6_OSR_SHIFT 8 +#define MADERA_IN6_OSR_WIDTH 3 + +/* (0x033C) IN6R_Control */ +#define MADERA_IN6R_HPF_MASK 0x8000 +#define MADERA_IN6R_HPF_SHIFT 15 +#define MADERA_IN6R_HPF_WIDTH 1 + +/* (0x033D) ADC_Digital_Volume_6R */ +#define MADERA_IN6R_MUTE 0x0100 +#define MADERA_IN6R_MUTE_MASK 0x0100 +#define MADERA_IN6R_MUTE_SHIFT 8 +#define MADERA_IN6R_MUTE_WIDTH 1 +#define MADERA_IN6R_DIG_VOL_MASK 0x00FF +#define MADERA_IN6R_DIG_VOL_SHIFT 0 +#define MADERA_IN6R_DIG_VOL_WIDTH 8 + +/* (0x033E) DMIC6R_Control */ +#define MADERA_IN6_DMICCLK_SRC_MASK 0x1800 +#define MADERA_IN6_DMICCLK_SRC_SHIFT 11 +#define MADERA_IN6_DMICCLK_SRC_WIDTH 2 + +/* (0x0400) Output_Enables_1 */ +#define MADERA_EP_SEL 0x8000 +#define MADERA_EP_SEL_MASK 0x8000 +#define MADERA_EP_SEL_SHIFT 15 +#define MADERA_EP_SEL_WIDTH 1 +#define MADERA_OUT6L_ENA 0x0800 +#define MADERA_OUT6L_ENA_MASK 0x0800 +#define MADERA_OUT6L_ENA_SHIFT 11 +#define MADERA_OUT6L_ENA_WIDTH 1 +#define MADERA_OUT6R_ENA 0x0400 +#define MADERA_OUT6R_ENA_MASK 0x0400 +#define MADERA_OUT6R_ENA_SHIFT 10 +#define MADERA_OUT6R_ENA_WIDTH 1 +#define MADERA_OUT5L_ENA 0x0200 +#define MADERA_OUT5L_ENA_MASK 0x0200 +#define MADERA_OUT5L_ENA_SHIFT 9 +#define MADERA_OUT5L_ENA_WIDTH 1 +#define MADERA_OUT5R_ENA 0x0100 +#define MADERA_OUT5R_ENA_MASK 0x0100 +#define MADERA_OUT5R_ENA_SHIFT 8 +#define MADERA_OUT5R_ENA_WIDTH 1 +#define MADERA_OUT4L_ENA 0x0080 +#define MADERA_OUT4L_ENA_MASK 0x0080 +#define MADERA_OUT4L_ENA_SHIFT 7 +#define MADERA_OUT4L_ENA_WIDTH 1 +#define MADERA_OUT4R_ENA 0x0040 +#define MADERA_OUT4R_ENA_MASK 0x0040 +#define MADERA_OUT4R_ENA_SHIFT 6 +#define MADERA_OUT4R_ENA_WIDTH 1 +#define MADERA_OUT3L_ENA 0x0020 +#define MADERA_OUT3L_ENA_MASK 0x0020 +#define MADERA_OUT3L_ENA_SHIFT 5 +#define MADERA_OUT3L_ENA_WIDTH 1 +#define MADERA_OUT3R_ENA 0x0010 +#define MADERA_OUT3R_ENA_MASK 0x0010 +#define MADERA_OUT3R_ENA_SHIFT 4 +#define MADERA_OUT3R_ENA_WIDTH 1 +#define MADERA_OUT2L_ENA 0x0008 +#define MADERA_OUT2L_ENA_MASK 0x0008 +#define MADERA_OUT2L_ENA_SHIFT 3 +#define MADERA_OUT2L_ENA_WIDTH 1 +#define MADERA_OUT2R_ENA 0x0004 +#define MADERA_OUT2R_ENA_MASK 0x0004 +#define MADERA_OUT2R_ENA_SHIFT 2 +#define MADERA_OUT2R_ENA_WIDTH 1 +#define MADERA_OUT1L_ENA 0x0002 +#define MADERA_OUT1L_ENA_MASK 0x0002 +#define MADERA_OUT1L_ENA_SHIFT 1 +#define MADERA_OUT1L_ENA_WIDTH 1 +#define MADERA_OUT1R_ENA 0x0001 +#define MADERA_OUT1R_ENA_MASK 0x0001 +#define MADERA_OUT1R_ENA_SHIFT 0 +#define MADERA_OUT1R_ENA_WIDTH 1 + +/* (0x0409) Output_Volume_Ramp */ +#define MADERA_OUT_VD_RAMP_MASK 0x0070 +#define MADERA_OUT_VD_RAMP_SHIFT 4 +#define MADERA_OUT_VD_RAMP_WIDTH 3 +#define MADERA_OUT_VI_RAMP_MASK 0x0007 +#define MADERA_OUT_VI_RAMP_SHIFT 0 +#define MADERA_OUT_VI_RAMP_WIDTH 3 + +/* (0x0410) Output_Path_Config_1L */ +#define MADERA_OUT1_MONO 0x1000 +#define MADERA_OUT1_MONO_MASK 0x1000 +#define MADERA_OUT1_MONO_SHIFT 12 +#define MADERA_OUT1_MONO_WIDTH 1 +#define MADERA_OUT1L_ANC_SRC_MASK 0x0C00 +#define MADERA_OUT1L_ANC_SRC_SHIFT 10 +#define MADERA_OUT1L_ANC_SRC_WIDTH 2 + +/* (0x0411) DAC_Digital_Volume_1L */ +#define MADERA_OUT1L_VU 0x0200 +#define MADERA_OUT1L_VU_MASK 0x0200 +#define MADERA_OUT1L_VU_SHIFT 9 +#define MADERA_OUT1L_VU_WIDTH 1 +#define MADERA_OUT1L_MUTE 0x0100 +#define MADERA_OUT1L_MUTE_MASK 0x0100 +#define MADERA_OUT1L_MUTE_SHIFT 8 +#define MADERA_OUT1L_MUTE_WIDTH 1 +#define MADERA_OUT1L_VOL_MASK 0x00FF +#define MADERA_OUT1L_VOL_SHIFT 0 +#define MADERA_OUT1L_VOL_WIDTH 8 + +/* (0x0412) Output_Path_Config_1 */ +#define MADERA_HP1_GND_SEL_MASK 0x0007 +#define MADERA_HP1_GND_SEL_SHIFT 0 +#define MADERA_HP1_GND_SEL_WIDTH 3 + +/* (0x0414) Output_Path_Config_1R */ +#define MADERA_OUT1R_ANC_SRC_MASK 0x0C00 +#define MADERA_OUT1R_ANC_SRC_SHIFT 10 +#define MADERA_OUT1R_ANC_SRC_WIDTH 2 + +/* (0x0415) DAC_Digital_Volume_1R */ +#define MADERA_OUT1R_MUTE 0x0100 +#define MADERA_OUT1R_MUTE_MASK 0x0100 +#define MADERA_OUT1R_MUTE_SHIFT 8 +#define MADERA_OUT1R_MUTE_WIDTH 1 +#define MADERA_OUT1R_VOL_MASK 0x00FF +#define MADERA_OUT1R_VOL_SHIFT 0 +#define MADERA_OUT1R_VOL_WIDTH 8 + +/* (0x0418) Output_Path_Config_2L */ +#define MADERA_OUT2L_ANC_SRC_MASK 0x0C00 +#define MADERA_OUT2L_ANC_SRC_SHIFT 10 +#define MADERA_OUT2L_ANC_SRC_WIDTH 2 + +/* (0x0419) DAC_Digital_Volume_2L */ +#define MADERA_OUT2L_MUTE 0x0100 +#define MADERA_OUT2L_MUTE_MASK 0x0100 +#define MADERA_OUT2L_MUTE_SHIFT 8 +#define MADERA_OUT2L_MUTE_WIDTH 1 +#define MADERA_OUT2L_VOL_MASK 0x00FF +#define MADERA_OUT2L_VOL_SHIFT 0 +#define MADERA_OUT2L_VOL_WIDTH 8 + +/* (0x041A) Output_Path_Config_2 */ +#define MADERA_HP2_GND_SEL_MASK 0x0007 +#define MADERA_HP2_GND_SEL_SHIFT 0 +#define MADERA_HP2_GND_SEL_WIDTH 3 + +/* (0x041C) Output_Path_Config_2R */ +#define MADERA_OUT2R_ANC_SRC_MASK 0x0C00 +#define MADERA_OUT2R_ANC_SRC_SHIFT 10 +#define MADERA_OUT2R_ANC_SRC_WIDTH 2 + +/* (0x041D) DAC_Digital_Volume_2R */ +#define MADERA_OUT2R_MUTE 0x0100 +#define MADERA_OUT2R_MUTE_MASK 0x0100 +#define MADERA_OUT2R_MUTE_SHIFT 8 +#define MADERA_OUT2R_MUTE_WIDTH 1 +#define MADERA_OUT2R_VOL_MASK 0x00FF +#define MADERA_OUT2R_VOL_SHIFT 0 +#define MADERA_OUT2R_VOL_WIDTH 8 + +/* (0x0420) Output_Path_Config_3L */ +#define MADERA_OUT3L_ANC_SRC_MASK 0x0C00 +#define MADERA_OUT3L_ANC_SRC_SHIFT 10 +#define MADERA_OUT3L_ANC_SRC_WIDTH 2 + +/* (0x0421) DAC_Digital_Volume_3L */ +#define MADERA_OUT3L_MUTE 0x0100 +#define MADERA_OUT3L_MUTE_MASK 0x0100 +#define MADERA_OUT3L_MUTE_SHIFT 8 +#define MADERA_OUT3L_MUTE_WIDTH 1 +#define MADERA_OUT3L_VOL_MASK 0x00FF +#define MADERA_OUT3L_VOL_SHIFT 0 +#define MADERA_OUT3L_VOL_WIDTH 8 + +/* (0x0424) Output_Path_Config_3R */ +#define MADERA_OUT3R_ANC_SRC_MASK 0x0C00 +#define MADERA_OUT3R_ANC_SRC_SHIFT 10 +#define MADERA_OUT3R_ANC_SRC_WIDTH 2 + +/* (0x0425) DAC_Digital_Volume_3R */ +#define MADERA_OUT3R_MUTE 0x0100 +#define MADERA_OUT3R_MUTE_MASK 0x0100 +#define MADERA_OUT3R_MUTE_SHIFT 8 +#define MADERA_OUT3R_MUTE_WIDTH 1 +#define MADERA_OUT3R_VOL_MASK 0x00FF +#define MADERA_OUT3R_VOL_SHIFT 0 +#define MADERA_OUT3R_VOL_WIDTH 8 + +/* (0x0428) Output_Path_Config_4L */ +#define MADERA_OUT4L_ANC_SRC_MASK 0x0C00 +#define MADERA_OUT4L_ANC_SRC_SHIFT 10 +#define MADERA_OUT4L_ANC_SRC_WIDTH 2 + +/* (0x0429) DAC_Digital_Volume_4L */ +#define MADERA_OUT4L_MUTE 0x0100 +#define MADERA_OUT4L_MUTE_MASK 0x0100 +#define MADERA_OUT4L_MUTE_SHIFT 8 +#define MADERA_OUT4L_MUTE_WIDTH 1 +#define MADERA_OUT4L_VOL_MASK 0x00FF +#define MADERA_OUT4L_VOL_SHIFT 0 +#define MADERA_OUT4L_VOL_WIDTH 8 + +/* (0x042C) Output_Path_Config_4R */ +#define MADERA_OUT4R_ANC_SRC_MASK 0x0C00 +#define MADERA_OUT4R_ANC_SRC_SHIFT 10 +#define MADERA_OUT4R_ANC_SRC_WIDTH 2 + +/* (0x042D) DAC_Digital_Volume_4R */ +#define MADERA_OUT4R_MUTE 0x0100 +#define MADERA_OUT4R_MUTE_MASK 0x0100 +#define MADERA_OUT4R_MUTE_SHIFT 8 +#define MADERA_OUT4R_MUTE_WIDTH 1 +#define MADERA_OUT4R_VOL_MASK 0x00FF +#define MADERA_OUT4R_VOL_SHIFT 0 +#define MADERA_OUT4R_VOL_WIDTH 8 + +/* (0x0430) Output_Path_Config_5L */ +#define MADERA_OUT5_OSR 0x2000 +#define MADERA_OUT5_OSR_MASK 0x2000 +#define MADERA_OUT5_OSR_SHIFT 13 +#define MADERA_OUT5_OSR_WIDTH 1 +#define MADERA_OUT5L_ANC_SRC_MASK 0x0C00 +#define MADERA_OUT5L_ANC_SRC_SHIFT 10 +#define MADERA_OUT5L_ANC_SRC_WIDTH 2 + +/* (0x0431) DAC_Digital_Volume_5L */ +#define MADERA_OUT5L_MUTE 0x0100 +#define MADERA_OUT5L_MUTE_MASK 0x0100 +#define MADERA_OUT5L_MUTE_SHIFT 8 +#define MADERA_OUT5L_MUTE_WIDTH 1 +#define MADERA_OUT5L_VOL_MASK 0x00FF +#define MADERA_OUT5L_VOL_SHIFT 0 +#define MADERA_OUT5L_VOL_WIDTH 8 + +/* (0x0434) Output_Path_Config_5R */ +#define MADERA_OUT5R_ANC_SRC_MASK 0x0C00 +#define MADERA_OUT5R_ANC_SRC_SHIFT 10 +#define MADERA_OUT5R_ANC_SRC_WIDTH 2 + +/* (0x0435) DAC_Digital_Volume_5R */ +#define MADERA_OUT5R_MUTE 0x0100 +#define MADERA_OUT5R_MUTE_MASK 0x0100 +#define MADERA_OUT5R_MUTE_SHIFT 8 +#define MADERA_OUT5R_MUTE_WIDTH 1 +#define MADERA_OUT5R_VOL_MASK 0x00FF +#define MADERA_OUT5R_VOL_SHIFT 0 +#define MADERA_OUT5R_VOL_WIDTH 8 + +/* (0x0438) Output_Path_Config_6L */ +#define MADERA_OUT6_OSR 0x2000 +#define MADERA_OUT6_OSR_MASK 0x2000 +#define MADERA_OUT6_OSR_SHIFT 13 +#define MADERA_OUT6_OSR_WIDTH 1 +#define MADERA_OUT6L_ANC_SRC_MASK 0x0C00 +#define MADERA_OUT6L_ANC_SRC_SHIFT 10 +#define MADERA_OUT6L_ANC_SRC_WIDTH 2 + +/* (0x0439) DAC_Digital_Volume_6L */ +#define MADERA_OUT6L_MUTE 0x0100 +#define MADERA_OUT6L_MUTE_MASK 0x0100 +#define MADERA_OUT6L_MUTE_SHIFT 8 +#define MADERA_OUT6L_MUTE_WIDTH 1 +#define MADERA_OUT6L_VOL_MASK 0x00FF +#define MADERA_OUT6L_VOL_SHIFT 0 +#define MADERA_OUT6L_VOL_WIDTH 8 + +/* (0x043C) Output_Path_Config_6R */ +#define MADERA_OUT6R_ANC_SRC_MASK 0x0C00 +#define MADERA_OUT6R_ANC_SRC_SHIFT 10 +#define MADERA_OUT6R_ANC_SRC_WIDTH 2 + +/* (0x043D) DAC_Digital_Volume_6R */ +#define MADERA_OUT6R_MUTE 0x0100 +#define MADERA_OUT6R_MUTE_MASK 0x0100 +#define MADERA_OUT6R_MUTE_SHIFT 8 +#define MADERA_OUT6R_MUTE_WIDTH 1 +#define MADERA_OUT6R_VOL_MASK 0x00FF +#define MADERA_OUT6R_VOL_SHIFT 0 +#define MADERA_OUT6R_VOL_WIDTH 8 + +/* (0x0450) - DAC AEC Control 1 */ +#define MADERA_AEC1_LOOPBACK_SRC_MASK 0x003C +#define MADERA_AEC1_LOOPBACK_SRC_SHIFT 2 +#define MADERA_AEC1_LOOPBACK_SRC_WIDTH 4 +#define MADERA_AEC1_ENA_STS 0x0002 +#define MADERA_AEC1_ENA_STS_MASK 0x0002 +#define MADERA_AEC1_ENA_STS_SHIFT 1 +#define MADERA_AEC1_ENA_STS_WIDTH 1 +#define MADERA_AEC1_LOOPBACK_ENA 0x0001 +#define MADERA_AEC1_LOOPBACK_ENA_MASK 0x0001 +#define MADERA_AEC1_LOOPBACK_ENA_SHIFT 0 +#define MADERA_AEC1_LOOPBACK_ENA_WIDTH 1 + +/* (0x0451) DAC_AEC_Control_2 */ +#define MADERA_AEC2_LOOPBACK_SRC_MASK 0x003C +#define MADERA_AEC2_LOOPBACK_SRC_SHIFT 2 +#define MADERA_AEC2_LOOPBACK_SRC_WIDTH 4 +#define MADERA_AEC2_ENA_STS 0x0002 +#define MADERA_AEC2_ENA_STS_MASK 0x0002 +#define MADERA_AEC2_ENA_STS_SHIFT 1 +#define MADERA_AEC2_ENA_STS_WIDTH 1 +#define MADERA_AEC2_LOOPBACK_ENA 0x0001 +#define MADERA_AEC2_LOOPBACK_ENA_MASK 0x0001 +#define MADERA_AEC2_LOOPBACK_ENA_SHIFT 0 +#define MADERA_AEC2_LOOPBACK_ENA_WIDTH 1 + +/* (0x0458) Noise_Gate_Control */ +#define MADERA_NGATE_HOLD_MASK 0x0030 +#define MADERA_NGATE_HOLD_SHIFT 4 +#define MADERA_NGATE_HOLD_WIDTH 2 +#define MADERA_NGATE_THR_MASK 0x000E +#define MADERA_NGATE_THR_SHIFT 1 +#define MADERA_NGATE_THR_WIDTH 3 +#define MADERA_NGATE_ENA 0x0001 +#define MADERA_NGATE_ENA_MASK 0x0001 +#define MADERA_NGATE_ENA_SHIFT 0 +#define MADERA_NGATE_ENA_WIDTH 1 + +/* (0x0490) PDM_SPK1_CTRL_1 */ +#define MADERA_SPK1R_MUTE 0x2000 +#define MADERA_SPK1R_MUTE_MASK 0x2000 +#define MADERA_SPK1R_MUTE_SHIFT 13 +#define MADERA_SPK1R_MUTE_WIDTH 1 +#define MADERA_SPK1L_MUTE 0x1000 +#define MADERA_SPK1L_MUTE_MASK 0x1000 +#define MADERA_SPK1L_MUTE_SHIFT 12 +#define MADERA_SPK1L_MUTE_WIDTH 1 +#define MADERA_SPK1_MUTE_ENDIAN 0x0100 +#define MADERA_SPK1_MUTE_ENDIAN_MASK 0x0100 +#define MADERA_SPK1_MUTE_ENDIAN_SHIFT 8 +#define MADERA_SPK1_MUTE_ENDIAN_WIDTH 1 +#define MADERA_SPK1_MUTE_SEQ1_MASK 0x00FF +#define MADERA_SPK1_MUTE_SEQ1_SHIFT 0 +#define MADERA_SPK1_MUTE_SEQ1_WIDTH 8 + +/* (0x0491) PDM_SPK1_CTRL_2 */ +#define MADERA_SPK1_FMT 0x0001 +#define MADERA_SPK1_FMT_MASK 0x0001 +#define MADERA_SPK1_FMT_SHIFT 0 +#define MADERA_SPK1_FMT_WIDTH 1 + +/* (0x0492) PDM_SPK2_CTRL_1 */ +#define MADERA_SPK2R_MUTE 0x2000 +#define MADERA_SPK2R_MUTE_MASK 0x2000 +#define MADERA_SPK2R_MUTE_SHIFT 13 +#define MADERA_SPK2R_MUTE_WIDTH 1 +#define MADERA_SPK2L_MUTE 0x1000 +#define MADERA_SPK2L_MUTE_MASK 0x1000 +#define MADERA_SPK2L_MUTE_SHIFT 12 +#define MADERA_SPK2L_MUTE_WIDTH 1 + +/* (0x04A0) - HP1 Short Circuit Ctrl */ +#define MADERA_HP1_SC_ENA 0x1000 +#define MADERA_HP1_SC_ENA_MASK 0x1000 +#define MADERA_HP1_SC_ENA_SHIFT 12 +#define MADERA_HP1_SC_ENA_WIDTH 1 + +/* (0x04A1) - HP2 Short Circuit Ctrl */ +#define MADERA_HP2_SC_ENA 0x1000 +#define MADERA_HP2_SC_ENA_MASK 0x1000 +#define MADERA_HP2_SC_ENA_SHIFT 12 +#define MADERA_HP2_SC_ENA_WIDTH 1 + +/* (0x04A2) - HP3 Short Circuit Ctrl */ +#define MADERA_HP3_SC_ENA 0x1000 +#define MADERA_HP3_SC_ENA_MASK 0x1000 +#define MADERA_HP3_SC_ENA_SHIFT 12 +#define MADERA_HP3_SC_ENA_WIDTH 1 + +/* (0x04A8) - HP_Test_Ctrl_5 */ +#define MADERA_HP1L_ONEFLT 0x0100 +#define MADERA_HP1L_ONEFLT_MASK 0x0100 +#define MADERA_HP1L_ONEFLT_SHIFT 8 +#define MADERA_HP1L_ONEFLT_WIDTH 1 + +/* (0x04A9) - HP_Test_Ctrl_6 */ +#define MADERA_HP1R_ONEFLT 0x0100 +#define MADERA_HP1R_ONEFLT_MASK 0x0100 +#define MADERA_HP1R_ONEFLT_SHIFT 8 +#define MADERA_HP1R_ONEFLT_WIDTH 1 + +/* (0x0500) AIF1_BCLK_Ctrl */ +#define MADERA_AIF1_BCLK_INV 0x0080 +#define MADERA_AIF1_BCLK_INV_MASK 0x0080 +#define MADERA_AIF1_BCLK_INV_SHIFT 7 +#define MADERA_AIF1_BCLK_INV_WIDTH 1 +#define MADERA_AIF1_BCLK_MSTR 0x0020 +#define MADERA_AIF1_BCLK_MSTR_MASK 0x0020 +#define MADERA_AIF1_BCLK_MSTR_SHIFT 5 +#define MADERA_AIF1_BCLK_MSTR_WIDTH 1 +#define MADERA_AIF1_BCLK_FREQ_MASK 0x001F +#define MADERA_AIF1_BCLK_FREQ_SHIFT 0 +#define MADERA_AIF1_BCLK_FREQ_WIDTH 5 + +/* (0x0501) AIF1_Tx_Pin_Ctrl */ +#define MADERA_AIF1TX_LRCLK_SRC 0x0008 +#define MADERA_AIF1TX_LRCLK_SRC_MASK 0x0008 +#define MADERA_AIF1TX_LRCLK_SRC_SHIFT 3 +#define MADERA_AIF1TX_LRCLK_SRC_WIDTH 1 +#define MADERA_AIF1TX_LRCLK_INV 0x0004 +#define MADERA_AIF1TX_LRCLK_INV_MASK 0x0004 +#define MADERA_AIF1TX_LRCLK_INV_SHIFT 2 +#define MADERA_AIF1TX_LRCLK_INV_WIDTH 1 +#define MADERA_AIF1TX_LRCLK_MSTR 0x0001 +#define MADERA_AIF1TX_LRCLK_MSTR_MASK 0x0001 +#define MADERA_AIF1TX_LRCLK_MSTR_SHIFT 0 +#define MADERA_AIF1TX_LRCLK_MSTR_WIDTH 1 + +/* (0x0502) AIF1_Rx_Pin_Ctrl */ +#define MADERA_AIF1RX_LRCLK_INV 0x0004 +#define MADERA_AIF1RX_LRCLK_INV_MASK 0x0004 +#define MADERA_AIF1RX_LRCLK_INV_SHIFT 2 +#define MADERA_AIF1RX_LRCLK_INV_WIDTH 1 +#define MADERA_AIF1RX_LRCLK_FRC 0x0002 +#define MADERA_AIF1RX_LRCLK_FRC_MASK 0x0002 +#define MADERA_AIF1RX_LRCLK_FRC_SHIFT 1 +#define MADERA_AIF1RX_LRCLK_FRC_WIDTH 1 +#define MADERA_AIF1RX_LRCLK_MSTR 0x0001 +#define MADERA_AIF1RX_LRCLK_MSTR_MASK 0x0001 +#define MADERA_AIF1RX_LRCLK_MSTR_SHIFT 0 +#define MADERA_AIF1RX_LRCLK_MSTR_WIDTH 1 + +/* (0x0503) AIF1_Rate_Ctrl */ +#define MADERA_AIF1_RATE_MASK 0xF800 +#define MADERA_AIF1_RATE_SHIFT 11 +#define MADERA_AIF1_RATE_WIDTH 5 +#define MADERA_AIF1_TRI 0x0040 +#define MADERA_AIF1_TRI_MASK 0x0040 +#define MADERA_AIF1_TRI_SHIFT 6 +#define MADERA_AIF1_TRI_WIDTH 1 + +/* (0x0504) AIF1_Format */ +#define MADERA_AIF1_FMT_MASK 0x0007 +#define MADERA_AIF1_FMT_SHIFT 0 +#define MADERA_AIF1_FMT_WIDTH 3 + +/* (0x0506) AIF1_Rx_BCLK_Rate */ +#define MADERA_AIF1RX_BCPF_MASK 0x1FFF +#define MADERA_AIF1RX_BCPF_SHIFT 0 +#define MADERA_AIF1RX_BCPF_WIDTH 13 + +/* (0x0507) AIF1_Frame_Ctrl_1 */ +#define MADERA_AIF1TX_WL_MASK 0x3F00 +#define MADERA_AIF1TX_WL_SHIFT 8 +#define MADERA_AIF1TX_WL_WIDTH 6 +#define MADERA_AIF1TX_SLOT_LEN_MASK 0x00FF +#define MADERA_AIF1TX_SLOT_LEN_SHIFT 0 +#define MADERA_AIF1TX_SLOT_LEN_WIDTH 8 + +/* (0x0508) AIF1_Frame_Ctrl_2 */ +#define MADERA_AIF1RX_WL_MASK 0x3F00 +#define MADERA_AIF1RX_WL_SHIFT 8 +#define MADERA_AIF1RX_WL_WIDTH 6 +#define MADERA_AIF1RX_SLOT_LEN_MASK 0x00FF +#define MADERA_AIF1RX_SLOT_LEN_SHIFT 0 +#define MADERA_AIF1RX_SLOT_LEN_WIDTH 8 + +/* (0x0509) AIF1_Frame_Ctrl_3 */ +#define MADERA_AIF1TX1_SLOT_MASK 0x003F +#define MADERA_AIF1TX1_SLOT_SHIFT 0 +#define MADERA_AIF1TX1_SLOT_WIDTH 6 + +/* (0x0519) AIF1_Tx_Enables */ +#define MADERA_AIF1TX8_ENA 0x0080 +#define MADERA_AIF1TX8_ENA_MASK 0x0080 +#define MADERA_AIF1TX8_ENA_SHIFT 7 +#define MADERA_AIF1TX8_ENA_WIDTH 1 +#define MADERA_AIF1TX7_ENA 0x0040 +#define MADERA_AIF1TX7_ENA_MASK 0x0040 +#define MADERA_AIF1TX7_ENA_SHIFT 6 +#define MADERA_AIF1TX7_ENA_WIDTH 1 +#define MADERA_AIF1TX6_ENA 0x0020 +#define MADERA_AIF1TX6_ENA_MASK 0x0020 +#define MADERA_AIF1TX6_ENA_SHIFT 5 +#define MADERA_AIF1TX6_ENA_WIDTH 1 +#define MADERA_AIF1TX5_ENA 0x0010 +#define MADERA_AIF1TX5_ENA_MASK 0x0010 +#define MADERA_AIF1TX5_ENA_SHIFT 4 +#define MADERA_AIF1TX5_ENA_WIDTH 1 +#define MADERA_AIF1TX4_ENA 0x0008 +#define MADERA_AIF1TX4_ENA_MASK 0x0008 +#define MADERA_AIF1TX4_ENA_SHIFT 3 +#define MADERA_AIF1TX4_ENA_WIDTH 1 +#define MADERA_AIF1TX3_ENA 0x0004 +#define MADERA_AIF1TX3_ENA_MASK 0x0004 +#define MADERA_AIF1TX3_ENA_SHIFT 2 +#define MADERA_AIF1TX3_ENA_WIDTH 1 +#define MADERA_AIF1TX2_ENA 0x0002 +#define MADERA_AIF1TX2_ENA_MASK 0x0002 +#define MADERA_AIF1TX2_ENA_SHIFT 1 +#define MADERA_AIF1TX2_ENA_WIDTH 1 +#define MADERA_AIF1TX1_ENA 0x0001 +#define MADERA_AIF1TX1_ENA_MASK 0x0001 +#define MADERA_AIF1TX1_ENA_SHIFT 0 +#define MADERA_AIF1TX1_ENA_WIDTH 1 + +/* (0x051A) AIF1_Rx_Enables */ +#define MADERA_AIF1RX8_ENA 0x0080 +#define MADERA_AIF1RX8_ENA_MASK 0x0080 +#define MADERA_AIF1RX8_ENA_SHIFT 7 +#define MADERA_AIF1RX8_ENA_WIDTH 1 +#define MADERA_AIF1RX7_ENA 0x0040 +#define MADERA_AIF1RX7_ENA_MASK 0x0040 +#define MADERA_AIF1RX7_ENA_SHIFT 6 +#define MADERA_AIF1RX7_ENA_WIDTH 1 +#define MADERA_AIF1RX6_ENA 0x0020 +#define MADERA_AIF1RX6_ENA_MASK 0x0020 +#define MADERA_AIF1RX6_ENA_SHIFT 5 +#define MADERA_AIF1RX6_ENA_WIDTH 1 +#define MADERA_AIF1RX5_ENA 0x0010 +#define MADERA_AIF1RX5_ENA_MASK 0x0010 +#define MADERA_AIF1RX5_ENA_SHIFT 4 +#define MADERA_AIF1RX5_ENA_WIDTH 1 +#define MADERA_AIF1RX4_ENA 0x0008 +#define MADERA_AIF1RX4_ENA_MASK 0x0008 +#define MADERA_AIF1RX4_ENA_SHIFT 3 +#define MADERA_AIF1RX4_ENA_WIDTH 1 +#define MADERA_AIF1RX3_ENA 0x0004 +#define MADERA_AIF1RX3_ENA_MASK 0x0004 +#define MADERA_AIF1RX3_ENA_SHIFT 2 +#define MADERA_AIF1RX3_ENA_WIDTH 1 +#define MADERA_AIF1RX2_ENA 0x0002 +#define MADERA_AIF1RX2_ENA_MASK 0x0002 +#define MADERA_AIF1RX2_ENA_SHIFT 1 +#define MADERA_AIF1RX2_ENA_WIDTH 1 +#define MADERA_AIF1RX1_ENA 0x0001 +#define MADERA_AIF1RX1_ENA_MASK 0x0001 +#define MADERA_AIF1RX1_ENA_SHIFT 0 +#define MADERA_AIF1RX1_ENA_WIDTH 1 + +/* (0x0559) AIF2_Tx_Enables */ +#define MADERA_AIF2TX8_ENA 0x0080 +#define MADERA_AIF2TX8_ENA_MASK 0x0080 +#define MADERA_AIF2TX8_ENA_SHIFT 7 +#define MADERA_AIF2TX8_ENA_WIDTH 1 +#define MADERA_AIF2TX7_ENA 0x0040 +#define MADERA_AIF2TX7_ENA_MASK 0x0040 +#define MADERA_AIF2TX7_ENA_SHIFT 6 +#define MADERA_AIF2TX7_ENA_WIDTH 1 +#define MADERA_AIF2TX6_ENA 0x0020 +#define MADERA_AIF2TX6_ENA_MASK 0x0020 +#define MADERA_AIF2TX6_ENA_SHIFT 5 +#define MADERA_AIF2TX6_ENA_WIDTH 1 +#define MADERA_AIF2TX5_ENA 0x0010 +#define MADERA_AIF2TX5_ENA_MASK 0x0010 +#define MADERA_AIF2TX5_ENA_SHIFT 4 +#define MADERA_AIF2TX5_ENA_WIDTH 1 +#define MADERA_AIF2TX4_ENA 0x0008 +#define MADERA_AIF2TX4_ENA_MASK 0x0008 +#define MADERA_AIF2TX4_ENA_SHIFT 3 +#define MADERA_AIF2TX4_ENA_WIDTH 1 +#define MADERA_AIF2TX3_ENA 0x0004 +#define MADERA_AIF2TX3_ENA_MASK 0x0004 +#define MADERA_AIF2TX3_ENA_SHIFT 2 +#define MADERA_AIF2TX3_ENA_WIDTH 1 +#define MADERA_AIF2TX2_ENA 0x0002 +#define MADERA_AIF2TX2_ENA_MASK 0x0002 +#define MADERA_AIF2TX2_ENA_SHIFT 1 +#define MADERA_AIF2TX2_ENA_WIDTH 1 +#define MADERA_AIF2TX1_ENA 0x0001 +#define MADERA_AIF2TX1_ENA_MASK 0x0001 +#define MADERA_AIF2TX1_ENA_SHIFT 0 +#define MADERA_AIF2TX1_ENA_WIDTH 1 + +/* (0x055A) AIF2_Rx_Enables */ +#define MADERA_AIF2RX8_ENA 0x0080 +#define MADERA_AIF2RX8_ENA_MASK 0x0080 +#define MADERA_AIF2RX8_ENA_SHIFT 7 +#define MADERA_AIF2RX8_ENA_WIDTH 1 +#define MADERA_AIF2RX7_ENA 0x0040 +#define MADERA_AIF2RX7_ENA_MASK 0x0040 +#define MADERA_AIF2RX7_ENA_SHIFT 6 +#define MADERA_AIF2RX7_ENA_WIDTH 1 +#define MADERA_AIF2RX6_ENA 0x0020 +#define MADERA_AIF2RX6_ENA_MASK 0x0020 +#define MADERA_AIF2RX6_ENA_SHIFT 5 +#define MADERA_AIF2RX6_ENA_WIDTH 1 +#define MADERA_AIF2RX5_ENA 0x0010 +#define MADERA_AIF2RX5_ENA_MASK 0x0010 +#define MADERA_AIF2RX5_ENA_SHIFT 4 +#define MADERA_AIF2RX5_ENA_WIDTH 1 +#define MADERA_AIF2RX4_ENA 0x0008 +#define MADERA_AIF2RX4_ENA_MASK 0x0008 +#define MADERA_AIF2RX4_ENA_SHIFT 3 +#define MADERA_AIF2RX4_ENA_WIDTH 1 +#define MADERA_AIF2RX3_ENA 0x0004 +#define MADERA_AIF2RX3_ENA_MASK 0x0004 +#define MADERA_AIF2RX3_ENA_SHIFT 2 +#define MADERA_AIF2RX3_ENA_WIDTH 1 +#define MADERA_AIF2RX2_ENA 0x0002 +#define MADERA_AIF2RX2_ENA_MASK 0x0002 +#define MADERA_AIF2RX2_ENA_SHIFT 1 +#define MADERA_AIF2RX2_ENA_WIDTH 1 +#define MADERA_AIF2RX1_ENA 0x0001 +#define MADERA_AIF2RX1_ENA_MASK 0x0001 +#define MADERA_AIF2RX1_ENA_SHIFT 0 +#define MADERA_AIF2RX1_ENA_WIDTH 1 + +/* (0x0599) AIF3_Tx_Enables */ +#define MADERA_AIF3TX2_ENA 0x0002 +#define MADERA_AIF3TX2_ENA_MASK 0x0002 +#define MADERA_AIF3TX2_ENA_SHIFT 1 +#define MADERA_AIF3TX2_ENA_WIDTH 1 +#define MADERA_AIF3TX1_ENA 0x0001 +#define MADERA_AIF3TX1_ENA_MASK 0x0001 +#define MADERA_AIF3TX1_ENA_SHIFT 0 +#define MADERA_AIF3TX1_ENA_WIDTH 1 + +/* (0x059A) AIF3_Rx_Enables */ +#define MADERA_AIF3RX2_ENA 0x0002 +#define MADERA_AIF3RX2_ENA_MASK 0x0002 +#define MADERA_AIF3RX2_ENA_SHIFT 1 +#define MADERA_AIF3RX2_ENA_WIDTH 1 +#define MADERA_AIF3RX1_ENA 0x0001 +#define MADERA_AIF3RX1_ENA_MASK 0x0001 +#define MADERA_AIF3RX1_ENA_SHIFT 0 +#define MADERA_AIF3RX1_ENA_WIDTH 1 + +/* (0x05B9) AIF4_Tx_Enables */ +#define MADERA_AIF4TX2_ENA 0x0002 +#define MADERA_AIF4TX2_ENA_MASK 0x0002 +#define MADERA_AIF4TX2_ENA_SHIFT 1 +#define MADERA_AIF4TX2_ENA_WIDTH 1 +#define MADERA_AIF4TX1_ENA 0x0001 +#define MADERA_AIF4TX1_ENA_MASK 0x0001 +#define MADERA_AIF4TX1_ENA_SHIFT 0 +#define MADERA_AIF4TX1_ENA_WIDTH 1 + +/* (0x05BA) AIF4_Rx_Enables */ +#define MADERA_AIF4RX2_ENA 0x0002 +#define MADERA_AIF4RX2_ENA_MASK 0x0002 +#define MADERA_AIF4RX2_ENA_SHIFT 1 +#define MADERA_AIF4RX2_ENA_WIDTH 1 +#define MADERA_AIF4RX1_ENA 0x0001 +#define MADERA_AIF4RX1_ENA_MASK 0x0001 +#define MADERA_AIF4RX1_ENA_SHIFT 0 +#define MADERA_AIF4RX1_ENA_WIDTH 1 + +/* (0x05C2) SPD1_TX_Control */ +#define MADERA_SPD1_VAL2 0x2000 +#define MADERA_SPD1_VAL2_MASK 0x2000 +#define MADERA_SPD1_VAL2_SHIFT 13 +#define MADERA_SPD1_VAL2_WIDTH 1 +#define MADERA_SPD1_VAL1 0x1000 +#define MADERA_SPD1_VAL1_MASK 0x1000 +#define MADERA_SPD1_VAL1_SHIFT 12 +#define MADERA_SPD1_VAL1_WIDTH 1 +#define MADERA_SPD1_RATE_MASK 0x00F0 +#define MADERA_SPD1_RATE_SHIFT 4 +#define MADERA_SPD1_RATE_WIDTH 4 +#define MADERA_SPD1_ENA 0x0001 +#define MADERA_SPD1_ENA_MASK 0x0001 +#define MADERA_SPD1_ENA_SHIFT 0 +#define MADERA_SPD1_ENA_WIDTH 1 + +/* (0x05F5) SLIMbus_RX_Channel_Enable */ +#define MADERA_SLIMRX8_ENA 0x0080 +#define MADERA_SLIMRX8_ENA_MASK 0x0080 +#define MADERA_SLIMRX8_ENA_SHIFT 7 +#define MADERA_SLIMRX8_ENA_WIDTH 1 +#define MADERA_SLIMRX7_ENA 0x0040 +#define MADERA_SLIMRX7_ENA_MASK 0x0040 +#define MADERA_SLIMRX7_ENA_SHIFT 6 +#define MADERA_SLIMRX7_ENA_WIDTH 1 +#define MADERA_SLIMRX6_ENA 0x0020 +#define MADERA_SLIMRX6_ENA_MASK 0x0020 +#define MADERA_SLIMRX6_ENA_SHIFT 5 +#define MADERA_SLIMRX6_ENA_WIDTH 1 +#define MADERA_SLIMRX5_ENA 0x0010 +#define MADERA_SLIMRX5_ENA_MASK 0x0010 +#define MADERA_SLIMRX5_ENA_SHIFT 4 +#define MADERA_SLIMRX5_ENA_WIDTH 1 +#define MADERA_SLIMRX4_ENA 0x0008 +#define MADERA_SLIMRX4_ENA_MASK 0x0008 +#define MADERA_SLIMRX4_ENA_SHIFT 3 +#define MADERA_SLIMRX4_ENA_WIDTH 1 +#define MADERA_SLIMRX3_ENA 0x0004 +#define MADERA_SLIMRX3_ENA_MASK 0x0004 +#define MADERA_SLIMRX3_ENA_SHIFT 2 +#define MADERA_SLIMRX3_ENA_WIDTH 1 +#define MADERA_SLIMRX2_ENA 0x0002 +#define MADERA_SLIMRX2_ENA_MASK 0x0002 +#define MADERA_SLIMRX2_ENA_SHIFT 1 +#define MADERA_SLIMRX2_ENA_WIDTH 1 +#define MADERA_SLIMRX1_ENA 0x0001 +#define MADERA_SLIMRX1_ENA_MASK 0x0001 +#define MADERA_SLIMRX1_ENA_SHIFT 0 +#define MADERA_SLIMRX1_ENA_WIDTH 1 + +/* (0x05F6) SLIMbus_TX_Channel_Enable */ +#define MADERA_SLIMTX8_ENA 0x0080 +#define MADERA_SLIMTX8_ENA_MASK 0x0080 +#define MADERA_SLIMTX8_ENA_SHIFT 7 +#define MADERA_SLIMTX8_ENA_WIDTH 1 +#define MADERA_SLIMTX7_ENA 0x0040 +#define MADERA_SLIMTX7_ENA_MASK 0x0040 +#define MADERA_SLIMTX7_ENA_SHIFT 6 +#define MADERA_SLIMTX7_ENA_WIDTH 1 +#define MADERA_SLIMTX6_ENA 0x0020 +#define MADERA_SLIMTX6_ENA_MASK 0x0020 +#define MADERA_SLIMTX6_ENA_SHIFT 5 +#define MADERA_SLIMTX6_ENA_WIDTH 1 +#define MADERA_SLIMTX5_ENA 0x0010 +#define MADERA_SLIMTX5_ENA_MASK 0x0010 +#define MADERA_SLIMTX5_ENA_SHIFT 4 +#define MADERA_SLIMTX5_ENA_WIDTH 1 +#define MADERA_SLIMTX4_ENA 0x0008 +#define MADERA_SLIMTX4_ENA_MASK 0x0008 +#define MADERA_SLIMTX4_ENA_SHIFT 3 +#define MADERA_SLIMTX4_ENA_WIDTH 1 +#define MADERA_SLIMTX3_ENA 0x0004 +#define MADERA_SLIMTX3_ENA_MASK 0x0004 +#define MADERA_SLIMTX3_ENA_SHIFT 2 +#define MADERA_SLIMTX3_ENA_WIDTH 1 +#define MADERA_SLIMTX2_ENA 0x0002 +#define MADERA_SLIMTX2_ENA_MASK 0x0002 +#define MADERA_SLIMTX2_ENA_SHIFT 1 +#define MADERA_SLIMTX2_ENA_WIDTH 1 +#define MADERA_SLIMTX1_ENA 0x0001 +#define MADERA_SLIMTX1_ENA_MASK 0x0001 +#define MADERA_SLIMTX1_ENA_SHIFT 0 +#define MADERA_SLIMTX1_ENA_WIDTH 1 + +/* (0x0E10) EQ1_1 */ +#define MADERA_EQ1_B1_GAIN_MASK 0xF800 +#define MADERA_EQ1_B1_GAIN_SHIFT 11 +#define MADERA_EQ1_B1_GAIN_WIDTH 5 +#define MADERA_EQ1_B2_GAIN_MASK 0x07C0 +#define MADERA_EQ1_B2_GAIN_SHIFT 6 +#define MADERA_EQ1_B2_GAIN_WIDTH 5 +#define MADERA_EQ1_B3_GAIN_MASK 0x003E +#define MADERA_EQ1_B3_GAIN_SHIFT 1 +#define MADERA_EQ1_B3_GAIN_WIDTH 5 +#define MADERA_EQ1_ENA 0x0001 +#define MADERA_EQ1_ENA_MASK 0x0001 +#define MADERA_EQ1_ENA_SHIFT 0 +#define MADERA_EQ1_ENA_WIDTH 1 + +/* (0x0E11) EQ1_2 */ +#define MADERA_EQ1_B4_GAIN_MASK 0xF800 +#define MADERA_EQ1_B4_GAIN_SHIFT 11 +#define MADERA_EQ1_B4_GAIN_WIDTH 5 +#define MADERA_EQ1_B5_GAIN_MASK 0x07C0 +#define MADERA_EQ1_B5_GAIN_SHIFT 6 +#define MADERA_EQ1_B5_GAIN_WIDTH 5 +#define MADERA_EQ1_B1_MODE 0x0001 +#define MADERA_EQ1_B1_MODE_MASK 0x0001 +#define MADERA_EQ1_B1_MODE_SHIFT 0 +#define MADERA_EQ1_B1_MODE_WIDTH 1 + +/* (0x0E26) EQ2_1 */ +#define MADERA_EQ2_B1_GAIN_MASK 0xF800 +#define MADERA_EQ2_B1_GAIN_SHIFT 11 +#define MADERA_EQ2_B1_GAIN_WIDTH 5 +#define MADERA_EQ2_B2_GAIN_MASK 0x07C0 +#define MADERA_EQ2_B2_GAIN_SHIFT 6 +#define MADERA_EQ2_B2_GAIN_WIDTH 5 +#define MADERA_EQ2_B3_GAIN_MASK 0x003E +#define MADERA_EQ2_B3_GAIN_SHIFT 1 +#define MADERA_EQ2_B3_GAIN_WIDTH 5 +#define MADERA_EQ2_ENA 0x0001 +#define MADERA_EQ2_ENA_MASK 0x0001 +#define MADERA_EQ2_ENA_SHIFT 0 +#define MADERA_EQ2_ENA_WIDTH 1 + +/* (0x0E27) EQ2_2 */ +#define MADERA_EQ2_B4_GAIN_MASK 0xF800 +#define MADERA_EQ2_B4_GAIN_SHIFT 11 +#define MADERA_EQ2_B4_GAIN_WIDTH 5 +#define MADERA_EQ2_B5_GAIN_MASK 0x07C0 +#define MADERA_EQ2_B5_GAIN_SHIFT 6 +#define MADERA_EQ2_B5_GAIN_WIDTH 5 +#define MADERA_EQ2_B1_MODE 0x0001 +#define MADERA_EQ2_B1_MODE_MASK 0x0001 +#define MADERA_EQ2_B1_MODE_SHIFT 0 +#define MADERA_EQ2_B1_MODE_WIDTH 1 + +/* (0x0E3C) EQ3_1 */ +#define MADERA_EQ3_B1_GAIN_MASK 0xF800 +#define MADERA_EQ3_B1_GAIN_SHIFT 11 +#define MADERA_EQ3_B1_GAIN_WIDTH 5 +#define MADERA_EQ3_B2_GAIN_MASK 0x07C0 +#define MADERA_EQ3_B2_GAIN_SHIFT 6 +#define MADERA_EQ3_B2_GAIN_WIDTH 5 +#define MADERA_EQ3_B3_GAIN_MASK 0x003E +#define MADERA_EQ3_B3_GAIN_SHIFT 1 +#define MADERA_EQ3_B3_GAIN_WIDTH 5 +#define MADERA_EQ3_ENA 0x0001 +#define MADERA_EQ3_ENA_MASK 0x0001 +#define MADERA_EQ3_ENA_SHIFT 0 +#define MADERA_EQ3_ENA_WIDTH 1 + +/* (0x0E3D) EQ3_2 */ +#define MADERA_EQ3_B4_GAIN_MASK 0xF800 +#define MADERA_EQ3_B4_GAIN_SHIFT 11 +#define MADERA_EQ3_B4_GAIN_WIDTH 5 +#define MADERA_EQ3_B5_GAIN_MASK 0x07C0 +#define MADERA_EQ3_B5_GAIN_SHIFT 6 +#define MADERA_EQ3_B5_GAIN_WIDTH 5 +#define MADERA_EQ3_B1_MODE 0x0001 +#define MADERA_EQ3_B1_MODE_MASK 0x0001 +#define MADERA_EQ3_B1_MODE_SHIFT 0 +#define MADERA_EQ3_B1_MODE_WIDTH 1 + +/* (0x0E52) EQ4_1 */ +#define MADERA_EQ4_B1_GAIN_MASK 0xF800 +#define MADERA_EQ4_B1_GAIN_SHIFT 11 +#define MADERA_EQ4_B1_GAIN_WIDTH 5 +#define MADERA_EQ4_B2_GAIN_MASK 0x07C0 +#define MADERA_EQ4_B2_GAIN_SHIFT 6 +#define MADERA_EQ4_B2_GAIN_WIDTH 5 +#define MADERA_EQ4_B3_GAIN_MASK 0x003E +#define MADERA_EQ4_B3_GAIN_SHIFT 1 +#define MADERA_EQ4_B3_GAIN_WIDTH 5 +#define MADERA_EQ4_ENA 0x0001 +#define MADERA_EQ4_ENA_MASK 0x0001 +#define MADERA_EQ4_ENA_SHIFT 0 +#define MADERA_EQ4_ENA_WIDTH 1 + +/* (0x0E53) EQ4_2 */ +#define MADERA_EQ4_B4_GAIN_MASK 0xF800 +#define MADERA_EQ4_B4_GAIN_SHIFT 11 +#define MADERA_EQ4_B4_GAIN_WIDTH 5 +#define MADERA_EQ4_B5_GAIN_MASK 0x07C0 +#define MADERA_EQ4_B5_GAIN_SHIFT 6 +#define MADERA_EQ4_B5_GAIN_WIDTH 5 +#define MADERA_EQ4_B1_MODE 0x0001 +#define MADERA_EQ4_B1_MODE_MASK 0x0001 +#define MADERA_EQ4_B1_MODE_SHIFT 0 +#define MADERA_EQ4_B1_MODE_WIDTH 1 + +/* (0x0E80) DRC1_ctrl1 */ +#define MADERA_DRC1L_ENA 0x0002 +#define MADERA_DRC1L_ENA_MASK 0x0002 +#define MADERA_DRC1L_ENA_SHIFT 1 +#define MADERA_DRC1L_ENA_WIDTH 1 +#define MADERA_DRC1R_ENA 0x0001 +#define MADERA_DRC1R_ENA_MASK 0x0001 +#define MADERA_DRC1R_ENA_SHIFT 0 +#define MADERA_DRC1R_ENA_WIDTH 1 + +/* (0x0E88) DRC2_ctrl1 */ +#define MADERA_DRC2L_ENA 0x0002 +#define MADERA_DRC2L_ENA_MASK 0x0002 +#define MADERA_DRC2L_ENA_SHIFT 1 +#define MADERA_DRC2L_ENA_WIDTH 1 +#define MADERA_DRC2R_ENA 0x0001 +#define MADERA_DRC2R_ENA_MASK 0x0001 +#define MADERA_DRC2R_ENA_SHIFT 0 +#define MADERA_DRC2R_ENA_WIDTH 1 + +/* (0x0EC0) HPLPF1_1 */ +#define MADERA_LHPF1_MODE 0x0002 +#define MADERA_LHPF1_MODE_MASK 0x0002 +#define MADERA_LHPF1_MODE_SHIFT 1 +#define MADERA_LHPF1_MODE_WIDTH 1 +#define MADERA_LHPF1_ENA 0x0001 +#define MADERA_LHPF1_ENA_MASK 0x0001 +#define MADERA_LHPF1_ENA_SHIFT 0 +#define MADERA_LHPF1_ENA_WIDTH 1 + +/* (0x0EC1) HPLPF1_2 */ +#define MADERA_LHPF1_COEFF_MASK 0xFFFF +#define MADERA_LHPF1_COEFF_SHIFT 0 +#define MADERA_LHPF1_COEFF_WIDTH 16 + +/* (0x0EC4) HPLPF2_1 */ +#define MADERA_LHPF2_MODE 0x0002 +#define MADERA_LHPF2_MODE_MASK 0x0002 +#define MADERA_LHPF2_MODE_SHIFT 1 +#define MADERA_LHPF2_MODE_WIDTH 1 +#define MADERA_LHPF2_ENA 0x0001 +#define MADERA_LHPF2_ENA_MASK 0x0001 +#define MADERA_LHPF2_ENA_SHIFT 0 +#define MADERA_LHPF2_ENA_WIDTH 1 + +/* (0x0EC5) HPLPF2_2 */ +#define MADERA_LHPF2_COEFF_MASK 0xFFFF +#define MADERA_LHPF2_COEFF_SHIFT 0 +#define MADERA_LHPF2_COEFF_WIDTH 16 + +/* (0x0EC8) HPLPF3_1 */ +#define MADERA_LHPF3_MODE 0x0002 +#define MADERA_LHPF3_MODE_MASK 0x0002 +#define MADERA_LHPF3_MODE_SHIFT 1 +#define MADERA_LHPF3_MODE_WIDTH 1 +#define MADERA_LHPF3_ENA 0x0001 +#define MADERA_LHPF3_ENA_MASK 0x0001 +#define MADERA_LHPF3_ENA_SHIFT 0 +#define MADERA_LHPF3_ENA_WIDTH 1 + +/* (0x0EC9) HPLPF3_2 */ +#define MADERA_LHPF3_COEFF_MASK 0xFFFF +#define MADERA_LHPF3_COEFF_SHIFT 0 +#define MADERA_LHPF3_COEFF_WIDTH 16 + +/* (0x0ECC) HPLPF4_1 */ +#define MADERA_LHPF4_MODE 0x0002 +#define MADERA_LHPF4_MODE_MASK 0x0002 +#define MADERA_LHPF4_MODE_SHIFT 1 +#define MADERA_LHPF4_MODE_WIDTH 1 +#define MADERA_LHPF4_ENA 0x0001 +#define MADERA_LHPF4_ENA_MASK 0x0001 +#define MADERA_LHPF4_ENA_SHIFT 0 +#define MADERA_LHPF4_ENA_WIDTH 1 + +/* (0x0ECD) HPLPF4_2 */ +#define MADERA_LHPF4_COEFF_MASK 0xFFFF +#define MADERA_LHPF4_COEFF_SHIFT 0 +#define MADERA_LHPF4_COEFF_WIDTH 16 + +/* (0x0ED0) ASRC2_ENABLE */ +#define MADERA_ASRC2_IN2L_ENA 0x0008 +#define MADERA_ASRC2_IN2L_ENA_MASK 0x0008 +#define MADERA_ASRC2_IN2L_ENA_SHIFT 3 +#define MADERA_ASRC2_IN2L_ENA_WIDTH 1 +#define MADERA_ASRC2_IN2R_ENA 0x0004 +#define MADERA_ASRC2_IN2R_ENA_MASK 0x0004 +#define MADERA_ASRC2_IN2R_ENA_SHIFT 2 +#define MADERA_ASRC2_IN2R_ENA_WIDTH 1 +#define MADERA_ASRC2_IN1L_ENA 0x0002 +#define MADERA_ASRC2_IN1L_ENA_MASK 0x0002 +#define MADERA_ASRC2_IN1L_ENA_SHIFT 1 +#define MADERA_ASRC2_IN1L_ENA_WIDTH 1 +#define MADERA_ASRC2_IN1R_ENA 0x0001 +#define MADERA_ASRC2_IN1R_ENA_MASK 0x0001 +#define MADERA_ASRC2_IN1R_ENA_SHIFT 0 +#define MADERA_ASRC2_IN1R_ENA_WIDTH 1 + +/* (0x0ED2) ASRC2_RATE1 */ +#define MADERA_ASRC2_RATE1_MASK 0xF800 +#define MADERA_ASRC2_RATE1_SHIFT 11 +#define MADERA_ASRC2_RATE1_WIDTH 5 + +/* (0x0ED3) ASRC2_RATE2 */ +#define MADERA_ASRC2_RATE2_MASK 0xF800 +#define MADERA_ASRC2_RATE2_SHIFT 11 +#define MADERA_ASRC2_RATE2_WIDTH 5 + +/* (0x0EE0) ASRC1_ENABLE */ +#define MADERA_ASRC1_IN2L_ENA 0x0008 +#define MADERA_ASRC1_IN2L_ENA_MASK 0x0008 +#define MADERA_ASRC1_IN2L_ENA_SHIFT 3 +#define MADERA_ASRC1_IN2L_ENA_WIDTH 1 +#define MADERA_ASRC1_IN2R_ENA 0x0004 +#define MADERA_ASRC1_IN2R_ENA_MASK 0x0004 +#define MADERA_ASRC1_IN2R_ENA_SHIFT 2 +#define MADERA_ASRC1_IN2R_ENA_WIDTH 1 +#define MADERA_ASRC1_IN1L_ENA 0x0002 +#define MADERA_ASRC1_IN1L_ENA_MASK 0x0002 +#define MADERA_ASRC1_IN1L_ENA_SHIFT 1 +#define MADERA_ASRC1_IN1L_ENA_WIDTH 1 +#define MADERA_ASRC1_IN1R_ENA 0x0001 +#define MADERA_ASRC1_IN1R_ENA_MASK 0x0001 +#define MADERA_ASRC1_IN1R_ENA_SHIFT 0 +#define MADERA_ASRC1_IN1R_ENA_WIDTH 1 + +/* (0x0EE2) ASRC1_RATE1 */ +#define MADERA_ASRC1_RATE1_MASK 0xF800 +#define MADERA_ASRC1_RATE1_SHIFT 11 +#define MADERA_ASRC1_RATE1_WIDTH 5 + +/* (0x0EE3) ASRC1_RATE2 */ +#define MADERA_ASRC1_RATE2_MASK 0xF800 +#define MADERA_ASRC1_RATE2_SHIFT 11 +#define MADERA_ASRC1_RATE2_WIDTH 5 + +/* (0x0EF0) - ISRC1 CTRL 1 */ +#define MADERA_ISRC1_FSH_MASK 0xF800 +#define MADERA_ISRC1_FSH_SHIFT 11 +#define MADERA_ISRC1_FSH_WIDTH 5 +#define MADERA_ISRC1_CLK_SEL_MASK 0x0700 +#define MADERA_ISRC1_CLK_SEL_SHIFT 8 +#define MADERA_ISRC1_CLK_SEL_WIDTH 3 + +/* (0x0EF1) ISRC1_CTRL_2 */ +#define MADERA_ISRC1_FSL_MASK 0xF800 +#define MADERA_ISRC1_FSL_SHIFT 11 +#define MADERA_ISRC1_FSL_WIDTH 5 + +/* (0x0EF2) ISRC1_CTRL_3 */ +#define MADERA_ISRC1_INT1_ENA 0x8000 +#define MADERA_ISRC1_INT1_ENA_MASK 0x8000 +#define MADERA_ISRC1_INT1_ENA_SHIFT 15 +#define MADERA_ISRC1_INT1_ENA_WIDTH 1 +#define MADERA_ISRC1_INT2_ENA 0x4000 +#define MADERA_ISRC1_INT2_ENA_MASK 0x4000 +#define MADERA_ISRC1_INT2_ENA_SHIFT 14 +#define MADERA_ISRC1_INT2_ENA_WIDTH 1 +#define MADERA_ISRC1_INT3_ENA 0x2000 +#define MADERA_ISRC1_INT3_ENA_MASK 0x2000 +#define MADERA_ISRC1_INT3_ENA_SHIFT 13 +#define MADERA_ISRC1_INT3_ENA_WIDTH 1 +#define MADERA_ISRC1_INT4_ENA 0x1000 +#define MADERA_ISRC1_INT4_ENA_MASK 0x1000 +#define MADERA_ISRC1_INT4_ENA_SHIFT 12 +#define MADERA_ISRC1_INT4_ENA_WIDTH 1 +#define MADERA_ISRC1_DEC1_ENA 0x0200 +#define MADERA_ISRC1_DEC1_ENA_MASK 0x0200 +#define MADERA_ISRC1_DEC1_ENA_SHIFT 9 +#define MADERA_ISRC1_DEC1_ENA_WIDTH 1 +#define MADERA_ISRC1_DEC2_ENA 0x0100 +#define MADERA_ISRC1_DEC2_ENA_MASK 0x0100 +#define MADERA_ISRC1_DEC2_ENA_SHIFT 8 +#define MADERA_ISRC1_DEC2_ENA_WIDTH 1 +#define MADERA_ISRC1_DEC3_ENA 0x0080 +#define MADERA_ISRC1_DEC3_ENA_MASK 0x0080 +#define MADERA_ISRC1_DEC3_ENA_SHIFT 7 +#define MADERA_ISRC1_DEC3_ENA_WIDTH 1 +#define MADERA_ISRC1_DEC4_ENA 0x0040 +#define MADERA_ISRC1_DEC4_ENA_MASK 0x0040 +#define MADERA_ISRC1_DEC4_ENA_SHIFT 6 +#define MADERA_ISRC1_DEC4_ENA_WIDTH 1 +#define MADERA_ISRC1_NOTCH_ENA 0x0001 +#define MADERA_ISRC1_NOTCH_ENA_MASK 0x0001 +#define MADERA_ISRC1_NOTCH_ENA_SHIFT 0 +#define MADERA_ISRC1_NOTCH_ENA_WIDTH 1 + +/* (0x0EF3) ISRC2_CTRL_1 */ +#define MADERA_ISRC2_FSH_MASK 0xF800 +#define MADERA_ISRC2_FSH_SHIFT 11 +#define MADERA_ISRC2_FSH_WIDTH 5 +#define MADERA_ISRC2_CLK_SEL_MASK 0x0700 +#define MADERA_ISRC2_CLK_SEL_SHIFT 8 +#define MADERA_ISRC2_CLK_SEL_WIDTH 3 + +/* (0x0EF4) ISRC2_CTRL_2 */ +#define MADERA_ISRC2_FSL_MASK 0xF800 +#define MADERA_ISRC2_FSL_SHIFT 11 +#define MADERA_ISRC2_FSL_WIDTH 5 + +/* (0x0EF5) ISRC2_CTRL_3 */ +#define MADERA_ISRC2_INT1_ENA 0x8000 +#define MADERA_ISRC2_INT1_ENA_MASK 0x8000 +#define MADERA_ISRC2_INT1_ENA_SHIFT 15 +#define MADERA_ISRC2_INT1_ENA_WIDTH 1 +#define MADERA_ISRC2_INT2_ENA 0x4000 +#define MADERA_ISRC2_INT2_ENA_MASK 0x4000 +#define MADERA_ISRC2_INT2_ENA_SHIFT 14 +#define MADERA_ISRC2_INT2_ENA_WIDTH 1 +#define MADERA_ISRC2_INT3_ENA 0x2000 +#define MADERA_ISRC2_INT3_ENA_MASK 0x2000 +#define MADERA_ISRC2_INT3_ENA_SHIFT 13 +#define MADERA_ISRC2_INT3_ENA_WIDTH 1 +#define MADERA_ISRC2_INT4_ENA 0x1000 +#define MADERA_ISRC2_INT4_ENA_MASK 0x1000 +#define MADERA_ISRC2_INT4_ENA_SHIFT 12 +#define MADERA_ISRC2_INT4_ENA_WIDTH 1 +#define MADERA_ISRC2_DEC1_ENA 0x0200 +#define MADERA_ISRC2_DEC1_ENA_MASK 0x0200 +#define MADERA_ISRC2_DEC1_ENA_SHIFT 9 +#define MADERA_ISRC2_DEC1_ENA_WIDTH 1 +#define MADERA_ISRC2_DEC2_ENA 0x0100 +#define MADERA_ISRC2_DEC2_ENA_MASK 0x0100 +#define MADERA_ISRC2_DEC2_ENA_SHIFT 8 +#define MADERA_ISRC2_DEC2_ENA_WIDTH 1 +#define MADERA_ISRC2_DEC3_ENA 0x0080 +#define MADERA_ISRC2_DEC3_ENA_MASK 0x0080 +#define MADERA_ISRC2_DEC3_ENA_SHIFT 7 +#define MADERA_ISRC2_DEC3_ENA_WIDTH 1 +#define MADERA_ISRC2_DEC4_ENA 0x0040 +#define MADERA_ISRC2_DEC4_ENA_MASK 0x0040 +#define MADERA_ISRC2_DEC4_ENA_SHIFT 6 +#define MADERA_ISRC2_DEC4_ENA_WIDTH 1 +#define MADERA_ISRC2_NOTCH_ENA 0x0001 +#define MADERA_ISRC2_NOTCH_ENA_MASK 0x0001 +#define MADERA_ISRC2_NOTCH_ENA_SHIFT 0 +#define MADERA_ISRC2_NOTCH_ENA_WIDTH 1 + +/* (0x0EF6) ISRC3_CTRL_1 */ +#define MADERA_ISRC3_FSH_MASK 0xF800 +#define MADERA_ISRC3_FSH_SHIFT 11 +#define MADERA_ISRC3_FSH_WIDTH 5 +#define MADERA_ISRC3_CLK_SEL_MASK 0x0700 +#define MADERA_ISRC3_CLK_SEL_SHIFT 8 +#define MADERA_ISRC3_CLK_SEL_WIDTH 3 + +/* (0x0EF7) ISRC3_CTRL_2 */ +#define MADERA_ISRC3_FSL_MASK 0xF800 +#define MADERA_ISRC3_FSL_SHIFT 11 +#define MADERA_ISRC3_FSL_WIDTH 5 + +/* (0x0EF8) ISRC3_CTRL_3 */ +#define MADERA_ISRC3_INT1_ENA 0x8000 +#define MADERA_ISRC3_INT1_ENA_MASK 0x8000 +#define MADERA_ISRC3_INT1_ENA_SHIFT 15 +#define MADERA_ISRC3_INT1_ENA_WIDTH 1 +#define MADERA_ISRC3_INT2_ENA 0x4000 +#define MADERA_ISRC3_INT2_ENA_MASK 0x4000 +#define MADERA_ISRC3_INT2_ENA_SHIFT 14 +#define MADERA_ISRC3_INT2_ENA_WIDTH 1 +#define MADERA_ISRC3_INT3_ENA 0x2000 +#define MADERA_ISRC3_INT3_ENA_MASK 0x2000 +#define MADERA_ISRC3_INT3_ENA_SHIFT 13 +#define MADERA_ISRC3_INT3_ENA_WIDTH 1 +#define MADERA_ISRC3_INT4_ENA 0x1000 +#define MADERA_ISRC3_INT4_ENA_MASK 0x1000 +#define MADERA_ISRC3_INT4_ENA_SHIFT 12 +#define MADERA_ISRC3_INT4_ENA_WIDTH 1 +#define MADERA_ISRC3_DEC1_ENA 0x0200 +#define MADERA_ISRC3_DEC1_ENA_MASK 0x0200 +#define MADERA_ISRC3_DEC1_ENA_SHIFT 9 +#define MADERA_ISRC3_DEC1_ENA_WIDTH 1 +#define MADERA_ISRC3_DEC2_ENA 0x0100 +#define MADERA_ISRC3_DEC2_ENA_MASK 0x0100 +#define MADERA_ISRC3_DEC2_ENA_SHIFT 8 +#define MADERA_ISRC3_DEC2_ENA_WIDTH 1 +#define MADERA_ISRC3_DEC3_ENA 0x0080 +#define MADERA_ISRC3_DEC3_ENA_MASK 0x0080 +#define MADERA_ISRC3_DEC3_ENA_SHIFT 7 +#define MADERA_ISRC3_DEC3_ENA_WIDTH 1 +#define MADERA_ISRC3_DEC4_ENA 0x0040 +#define MADERA_ISRC3_DEC4_ENA_MASK 0x0040 +#define MADERA_ISRC3_DEC4_ENA_SHIFT 6 +#define MADERA_ISRC3_DEC4_ENA_WIDTH 1 +#define MADERA_ISRC3_NOTCH_ENA 0x0001 +#define MADERA_ISRC3_NOTCH_ENA_MASK 0x0001 +#define MADERA_ISRC3_NOTCH_ENA_SHIFT 0 +#define MADERA_ISRC3_NOTCH_ENA_WIDTH 1 + +/* (0x0EF9) ISRC4_CTRL_1 */ +#define MADERA_ISRC4_FSH_MASK 0xF800 +#define MADERA_ISRC4_FSH_SHIFT 11 +#define MADERA_ISRC4_FSH_WIDTH 5 +#define MADERA_ISRC4_CLK_SEL_MASK 0x0700 +#define MADERA_ISRC4_CLK_SEL_SHIFT 8 +#define MADERA_ISRC4_CLK_SEL_WIDTH 3 + +/* (0x0EFA) ISRC4_CTRL_2 */ +#define MADERA_ISRC4_FSL_MASK 0xF800 +#define MADERA_ISRC4_FSL_SHIFT 11 +#define MADERA_ISRC4_FSL_WIDTH 5 + +/* (0x0EFB) ISRC4_CTRL_3 */ +#define MADERA_ISRC4_INT1_ENA 0x8000 +#define MADERA_ISRC4_INT1_ENA_MASK 0x8000 +#define MADERA_ISRC4_INT1_ENA_SHIFT 15 +#define MADERA_ISRC4_INT1_ENA_WIDTH 1 +#define MADERA_ISRC4_INT2_ENA 0x4000 +#define MADERA_ISRC4_INT2_ENA_MASK 0x4000 +#define MADERA_ISRC4_INT2_ENA_SHIFT 14 +#define MADERA_ISRC4_INT2_ENA_WIDTH 1 +#define MADERA_ISRC4_INT3_ENA 0x2000 +#define MADERA_ISRC4_INT3_ENA_MASK 0x2000 +#define MADERA_ISRC4_INT3_ENA_SHIFT 13 +#define MADERA_ISRC4_INT3_ENA_WIDTH 1 +#define MADERA_ISRC4_INT4_ENA 0x1000 +#define MADERA_ISRC4_INT4_ENA_MASK 0x1000 +#define MADERA_ISRC4_INT4_ENA_SHIFT 12 +#define MADERA_ISRC4_INT4_ENA_WIDTH 1 +#define MADERA_ISRC4_DEC1_ENA 0x0200 +#define MADERA_ISRC4_DEC1_ENA_MASK 0x0200 +#define MADERA_ISRC4_DEC1_ENA_SHIFT 9 +#define MADERA_ISRC4_DEC1_ENA_WIDTH 1 +#define MADERA_ISRC4_DEC2_ENA 0x0100 +#define MADERA_ISRC4_DEC2_ENA_MASK 0x0100 +#define MADERA_ISRC4_DEC2_ENA_SHIFT 8 +#define MADERA_ISRC4_DEC2_ENA_WIDTH 1 +#define MADERA_ISRC4_DEC3_ENA 0x0080 +#define MADERA_ISRC4_DEC3_ENA_MASK 0x0080 +#define MADERA_ISRC4_DEC3_ENA_SHIFT 7 +#define MADERA_ISRC4_DEC3_ENA_WIDTH 1 +#define MADERA_ISRC4_DEC4_ENA 0x0040 +#define MADERA_ISRC4_DEC4_ENA_MASK 0x0040 +#define MADERA_ISRC4_DEC4_ENA_SHIFT 6 +#define MADERA_ISRC4_DEC4_ENA_WIDTH 1 +#define MADERA_ISRC4_NOTCH_ENA 0x0001 +#define MADERA_ISRC4_NOTCH_ENA_MASK 0x0001 +#define MADERA_ISRC4_NOTCH_ENA_SHIFT 0 +#define MADERA_ISRC4_NOTCH_ENA_WIDTH 1 + +/* (0x0F00) Clock_Control */ +#define MADERA_EXT_NG_SEL_CLR 0x0080 +#define MADERA_EXT_NG_SEL_CLR_MASK 0x0080 +#define MADERA_EXT_NG_SEL_CLR_SHIFT 7 +#define MADERA_EXT_NG_SEL_CLR_WIDTH 1 +#define MADERA_EXT_NG_SEL_SET 0x0040 +#define MADERA_EXT_NG_SEL_SET_MASK 0x0040 +#define MADERA_EXT_NG_SEL_SET_SHIFT 6 +#define MADERA_EXT_NG_SEL_SET_WIDTH 1 +#define MADERA_CLK_R_ENA_CLR 0x0020 +#define MADERA_CLK_R_ENA_CLR_MASK 0x0020 +#define MADERA_CLK_R_ENA_CLR_SHIFT 5 +#define MADERA_CLK_R_ENA_CLR_WIDTH 1 +#define MADERA_CLK_R_ENA_SET 0x0010 +#define MADERA_CLK_R_ENA_SET_MASK 0x0010 +#define MADERA_CLK_R_ENA_SET_SHIFT 4 +#define MADERA_CLK_R_ENA_SET_WIDTH 1 +#define MADERA_CLK_NG_ENA_CLR 0x0008 +#define MADERA_CLK_NG_ENA_CLR_MASK 0x0008 +#define MADERA_CLK_NG_ENA_CLR_SHIFT 3 +#define MADERA_CLK_NG_ENA_CLR_WIDTH 1 +#define MADERA_CLK_NG_ENA_SET 0x0004 +#define MADERA_CLK_NG_ENA_SET_MASK 0x0004 +#define MADERA_CLK_NG_ENA_SET_SHIFT 2 +#define MADERA_CLK_NG_ENA_SET_WIDTH 1 +#define MADERA_CLK_L_ENA_CLR 0x0002 +#define MADERA_CLK_L_ENA_CLR_MASK 0x0002 +#define MADERA_CLK_L_ENA_CLR_SHIFT 1 +#define MADERA_CLK_L_ENA_CLR_WIDTH 1 +#define MADERA_CLK_L_ENA_SET 0x0001 +#define MADERA_CLK_L_ENA_SET_MASK 0x0001 +#define MADERA_CLK_L_ENA_SET_SHIFT 0 +#define MADERA_CLK_L_ENA_SET_WIDTH 1 + +/* (0x0F01) ANC_SRC */ +#define MADERA_IN_RXANCR_SEL_MASK 0x0070 +#define MADERA_IN_RXANCR_SEL_SHIFT 4 +#define MADERA_IN_RXANCR_SEL_WIDTH 3 +#define MADERA_IN_RXANCL_SEL_MASK 0x0007 +#define MADERA_IN_RXANCL_SEL_SHIFT 0 +#define MADERA_IN_RXANCL_SEL_WIDTH 3 + +/* (0x0F17) FCL_ADC_reformatter_control */ +#define MADERA_FCL_MIC_MODE_SEL 0x000C +#define MADERA_FCL_MIC_MODE_SEL_SHIFT 2 +#define MADERA_FCL_MIC_MODE_SEL_WIDTH 2 + +/* (0x0F73) FCR_ADC_reformatter_control */ +#define MADERA_FCR_MIC_MODE_SEL 0x000C +#define MADERA_FCR_MIC_MODE_SEL_SHIFT 2 +#define MADERA_FCR_MIC_MODE_SEL_WIDTH 2 + +/* (0x1480) DFC1_CTRL_W0 */ +#define MADERA_DFC1_RATE_MASK 0x007C +#define MADERA_DFC1_RATE_SHIFT 2 +#define MADERA_DFC1_RATE_WIDTH 5 +#define MADERA_DFC1_DITH_ENA 0x0002 +#define MADERA_DFC1_DITH_ENA_MASK 0x0002 +#define MADERA_DFC1_DITH_ENA_SHIFT 1 +#define MADERA_DFC1_DITH_ENA_WIDTH 1 +#define MADERA_DFC1_ENA 0x0001 +#define MADERA_DFC1_ENA_MASK 0x0001 +#define MADERA_DFC1_ENA_SHIFT 0 +#define MADERA_DFC1_ENA_WIDTH 1 + +/* (0x1482) DFC1_RX_W0 */ +#define MADERA_DFC1_RX_DATA_WIDTH_MASK 0x1F00 +#define MADERA_DFC1_RX_DATA_WIDTH_SHIFT 8 +#define MADERA_DFC1_RX_DATA_WIDTH_WIDTH 5 + +#define MADERA_DFC1_RX_DATA_TYPE_MASK 0x0007 +#define MADERA_DFC1_RX_DATA_TYPE_SHIFT 0 +#define MADERA_DFC1_RX_DATA_TYPE_WIDTH 3 + +/* (0x1484) DFC1_TX_W0 */ +#define MADERA_DFC1_TX_DATA_WIDTH_MASK 0x1F00 +#define MADERA_DFC1_TX_DATA_WIDTH_SHIFT 8 +#define MADERA_DFC1_TX_DATA_WIDTH_WIDTH 5 + +#define MADERA_DFC1_TX_DATA_TYPE_MASK 0x0007 +#define MADERA_DFC1_TX_DATA_TYPE_SHIFT 0 +#define MADERA_DFC1_TX_DATA_TYPE_WIDTH 3 + +/* (0x1600) ADSP2_IRQ0 */ +#define MADERA_DSP_IRQ2 0x0002 +#define MADERA_DSP_IRQ1 0x0001 + +/* (0x1601) ADSP2_IRQ1 */ +#define MADERA_DSP_IRQ4 0x0002 +#define MADERA_DSP_IRQ3 0x0001 + +/* (0x1602) ADSP2_IRQ2 */ +#define MADERA_DSP_IRQ6 0x0002 +#define MADERA_DSP_IRQ5 0x0001 + +/* (0x1603) ADSP2_IRQ3 */ +#define MADERA_DSP_IRQ8 0x0002 +#define MADERA_DSP_IRQ7 0x0001 + +/* (0x1604) ADSP2_IRQ4 */ +#define MADERA_DSP_IRQ10 0x0002 +#define MADERA_DSP_IRQ9 0x0001 + +/* (0x1605) ADSP2_IRQ5 */ +#define MADERA_DSP_IRQ12 0x0002 +#define MADERA_DSP_IRQ11 0x0001 + +/* (0x1606) ADSP2_IRQ6 */ +#define MADERA_DSP_IRQ14 0x0002 +#define MADERA_DSP_IRQ13 0x0001 + +/* (0x1607) ADSP2_IRQ7 */ +#define MADERA_DSP_IRQ16 0x0002 +#define MADERA_DSP_IRQ15 0x0001 + +/* (0x1700) GPIO1_CTRL_1 */ +#define MADERA_GP1_LVL 0x8000 +#define MADERA_GP1_LVL_MASK 0x8000 +#define MADERA_GP1_LVL_SHIFT 15 +#define MADERA_GP1_LVL_WIDTH 1 +#define MADERA_GP1_OP_CFG 0x4000 +#define MADERA_GP1_OP_CFG_MASK 0x4000 +#define MADERA_GP1_OP_CFG_SHIFT 14 +#define MADERA_GP1_OP_CFG_WIDTH 1 +#define MADERA_GP1_DB 0x2000 +#define MADERA_GP1_DB_MASK 0x2000 +#define MADERA_GP1_DB_SHIFT 13 +#define MADERA_GP1_DB_WIDTH 1 +#define MADERA_GP1_POL 0x1000 +#define MADERA_GP1_POL_MASK 0x1000 +#define MADERA_GP1_POL_SHIFT 12 +#define MADERA_GP1_POL_WIDTH 1 +#define MADERA_GP1_IP_CFG 0x0800 +#define MADERA_GP1_IP_CFG_MASK 0x0800 +#define MADERA_GP1_IP_CFG_SHIFT 11 +#define MADERA_GP1_IP_CFG_WIDTH 1 +#define MADERA_GP1_FN_MASK 0x03FF +#define MADERA_GP1_FN_SHIFT 0 +#define MADERA_GP1_FN_WIDTH 10 + +/* (0x1701) GPIO1_CTRL_2 */ +#define MADERA_GP1_DIR 0x8000 +#define MADERA_GP1_DIR_MASK 0x8000 +#define MADERA_GP1_DIR_SHIFT 15 +#define MADERA_GP1_DIR_WIDTH 1 +#define MADERA_GP1_PU 0x4000 +#define MADERA_GP1_PU_MASK 0x4000 +#define MADERA_GP1_PU_SHIFT 14 +#define MADERA_GP1_PU_WIDTH 1 +#define MADERA_GP1_PD 0x2000 +#define MADERA_GP1_PD_MASK 0x2000 +#define MADERA_GP1_PD_SHIFT 13 +#define MADERA_GP1_PD_WIDTH 1 +#define MADERA_GP1_DRV_STR_MASK 0x1800 +#define MADERA_GP1_DRV_STR_SHIFT 11 +#define MADERA_GP1_DRV_STR_WIDTH 2 + +/* (0x1800) IRQ1_Status_1 */ +#define MADERA_CTRLIF_ERR_EINT1 0x1000 +#define MADERA_CTRLIF_ERR_EINT1_MASK 0x1000 +#define MADERA_CTRLIF_ERR_EINT1_SHIFT 12 +#define MADERA_CTRLIF_ERR_EINT1_WIDTH 1 +#define MADERA_SYSCLK_FAIL_EINT1 0x0200 +#define MADERA_SYSCLK_FAIL_EINT1_MASK 0x0200 +#define MADERA_SYSCLK_FAIL_EINT1_SHIFT 9 +#define MADERA_SYSCLK_FAIL_EINT1_WIDTH 1 +#define MADERA_CLOCK_DETECT_EINT1 0x0100 +#define MADERA_CLOCK_DETECT_EINT1_MASK 0x0100 +#define MADERA_CLOCK_DETECT_EINT1_SHIFT 8 +#define MADERA_CLOCK_DETECT_EINT1_WIDTH 1 +#define MADERA_BOOT_DONE_EINT1 0x0080 +#define MADERA_BOOT_DONE_EINT1_MASK 0x0080 +#define MADERA_BOOT_DONE_EINT1_SHIFT 7 +#define MADERA_BOOT_DONE_EINT1_WIDTH 1 + +/* (0x1801) IRQ1_Status_2 */ +#define MADERA_FLLAO_LOCK_EINT1 0x0800 +#define MADERA_FLLAO_LOCK_EINT1_MASK 0x0800 +#define MADERA_FLLAO_LOCK_EINT1_SHIFT 11 +#define MADERA_FLLAO_LOCK_EINT1_WIDTH 1 +#define MADERA_FLL3_LOCK_EINT1 0x0400 +#define MADERA_FLL3_LOCK_EINT1_MASK 0x0400 +#define MADERA_FLL3_LOCK_EINT1_SHIFT 10 +#define MADERA_FLL3_LOCK_EINT1_WIDTH 1 +#define MADERA_FLL2_LOCK_EINT1 0x0200 +#define MADERA_FLL2_LOCK_EINT1_MASK 0x0200 +#define MADERA_FLL2_LOCK_EINT1_SHIFT 9 +#define MADERA_FLL2_LOCK_EINT1_WIDTH 1 +#define MADERA_FLL1_LOCK_EINT1 0x0100 +#define MADERA_FLL1_LOCK_EINT1_MASK 0x0100 +#define MADERA_FLL1_LOCK_EINT1_SHIFT 8 +#define MADERA_FLL1_LOCK_EINT1_WIDTH 1 + +/* (0x1805) IRQ1_Status_6 */ +#define MADERA_MICDET2_EINT1 0x0200 +#define MADERA_MICDET2_EINT1_MASK 0x0200 +#define MADERA_MICDET2_EINT1_SHIFT 9 +#define MADERA_MICDET2_EINT1_WIDTH 1 +#define MADERA_MICDET1_EINT1 0x0100 +#define MADERA_MICDET1_EINT1_MASK 0x0100 +#define MADERA_MICDET1_EINT1_SHIFT 8 +#define MADERA_MICDET1_EINT1_WIDTH 1 +#define MADERA_HPDET_EINT1 0x0001 +#define MADERA_HPDET_EINT1_MASK 0x0001 +#define MADERA_HPDET_EINT1_SHIFT 0 +#define MADERA_HPDET_EINT1_WIDTH 1 + +/* (0x1806) IRQ1_Status_7 */ +#define MADERA_MICD_CLAMP_FALL_EINT1 0x0020 +#define MADERA_MICD_CLAMP_FALL_EINT1_MASK 0x0020 +#define MADERA_MICD_CLAMP_FALL_EINT1_SHIFT 5 +#define MADERA_MICD_CLAMP_FALL_EINT1_WIDTH 1 +#define MADERA_MICD_CLAMP_RISE_EINT1 0x0010 +#define MADERA_MICD_CLAMP_RISE_EINT1_MASK 0x0010 +#define MADERA_MICD_CLAMP_RISE_EINT1_SHIFT 4 +#define MADERA_MICD_CLAMP_RISE_EINT1_WIDTH 1 +#define MADERA_JD2_FALL_EINT1 0x0008 +#define MADERA_JD2_FALL_EINT1_MASK 0x0008 +#define MADERA_JD2_FALL_EINT1_SHIFT 3 +#define MADERA_JD2_FALL_EINT1_WIDTH 1 +#define MADERA_JD2_RISE_EINT1 0x0004 +#define MADERA_JD2_RISE_EINT1_MASK 0x0004 +#define MADERA_JD2_RISE_EINT1_SHIFT 2 +#define MADERA_JD2_RISE_EINT1_WIDTH 1 +#define MADERA_JD1_FALL_EINT1 0x0002 +#define MADERA_JD1_FALL_EINT1_MASK 0x0002 +#define MADERA_JD1_FALL_EINT1_SHIFT 1 +#define MADERA_JD1_FALL_EINT1_WIDTH 1 +#define MADERA_JD1_RISE_EINT1 0x0001 +#define MADERA_JD1_RISE_EINT1_MASK 0x0001 +#define MADERA_JD1_RISE_EINT1_SHIFT 0 +#define MADERA_JD1_RISE_EINT1_WIDTH 1 + +/* (0x1808) IRQ1_Status_9 */ +#define MADERA_ASRC2_IN2_LOCK_EINT1 0x0800 +#define MADERA_ASRC2_IN2_LOCK_EINT1_MASK 0x0800 +#define MADERA_ASRC2_IN2_LOCK_EINT1_SHIFT 11 +#define MADERA_ASRC2_IN2_LOCK_EINT1_WIDTH 1 +#define MADERA_ASRC2_IN1_LOCK_EINT1 0x0400 +#define MADERA_ASRC2_IN1_LOCK_EINT1_MASK 0x0400 +#define MADERA_ASRC2_IN1_LOCK_EINT1_SHIFT 10 +#define MADERA_ASRC2_IN1_LOCK_EINT1_WIDTH 1 +#define MADERA_ASRC1_IN2_LOCK_EINT1 0x0200 +#define MADERA_ASRC1_IN2_LOCK_EINT1_MASK 0x0200 +#define MADERA_ASRC1_IN2_LOCK_EINT1_SHIFT 9 +#define MADERA_ASRC1_IN2_LOCK_EINT1_WIDTH 1 +#define MADERA_ASRC1_IN1_LOCK_EINT1 0x0100 +#define MADERA_ASRC1_IN1_LOCK_EINT1_MASK 0x0100 +#define MADERA_ASRC1_IN1_LOCK_EINT1_SHIFT 8 +#define MADERA_ASRC1_IN1_LOCK_EINT1_WIDTH 1 +#define MADERA_DRC2_SIG_DET_EINT1 0x0002 +#define MADERA_DRC2_SIG_DET_EINT1_MASK 0x0002 +#define MADERA_DRC2_SIG_DET_EINT1_SHIFT 1 +#define MADERA_DRC2_SIG_DET_EINT1_WIDTH 1 +#define MADERA_DRC1_SIG_DET_EINT1 0x0001 +#define MADERA_DRC1_SIG_DET_EINT1_MASK 0x0001 +#define MADERA_DRC1_SIG_DET_EINT1_SHIFT 0 +#define MADERA_DRC1_SIG_DET_EINT1_WIDTH 1 + +/* (0x180A) IRQ1_Status_11 */ +#define MADERA_DSP_IRQ16_EINT1 0x8000 +#define MADERA_DSP_IRQ16_EINT1_MASK 0x8000 +#define MADERA_DSP_IRQ16_EINT1_SHIFT 15 +#define MADERA_DSP_IRQ16_EINT1_WIDTH 1 +#define MADERA_DSP_IRQ15_EINT1 0x4000 +#define MADERA_DSP_IRQ15_EINT1_MASK 0x4000 +#define MADERA_DSP_IRQ15_EINT1_SHIFT 14 +#define MADERA_DSP_IRQ15_EINT1_WIDTH 1 +#define MADERA_DSP_IRQ14_EINT1 0x2000 +#define MADERA_DSP_IRQ14_EINT1_MASK 0x2000 +#define MADERA_DSP_IRQ14_EINT1_SHIFT 13 +#define MADERA_DSP_IRQ14_EINT1_WIDTH 1 +#define MADERA_DSP_IRQ13_EINT1 0x1000 +#define MADERA_DSP_IRQ13_EINT1_MASK 0x1000 +#define MADERA_DSP_IRQ13_EINT1_SHIFT 12 +#define MADERA_DSP_IRQ13_EINT1_WIDTH 1 +#define MADERA_DSP_IRQ12_EINT1 0x0800 +#define MADERA_DSP_IRQ12_EINT1_MASK 0x0800 +#define MADERA_DSP_IRQ12_EINT1_SHIFT 11 +#define MADERA_DSP_IRQ12_EINT1_WIDTH 1 +#define MADERA_DSP_IRQ11_EINT1 0x0400 +#define MADERA_DSP_IRQ11_EINT1_MASK 0x0400 +#define MADERA_DSP_IRQ11_EINT1_SHIFT 10 +#define MADERA_DSP_IRQ11_EINT1_WIDTH 1 +#define MADERA_DSP_IRQ10_EINT1 0x0200 +#define MADERA_DSP_IRQ10_EINT1_MASK 0x0200 +#define MADERA_DSP_IRQ10_EINT1_SHIFT 9 +#define MADERA_DSP_IRQ10_EINT1_WIDTH 1 +#define MADERA_DSP_IRQ9_EINT1 0x0100 +#define MADERA_DSP_IRQ9_EINT1_MASK 0x0100 +#define MADERA_DSP_IRQ9_EINT1_SHIFT 8 +#define MADERA_DSP_IRQ9_EINT1_WIDTH 1 +#define MADERA_DSP_IRQ8_EINT1 0x0080 +#define MADERA_DSP_IRQ8_EINT1_MASK 0x0080 +#define MADERA_DSP_IRQ8_EINT1_SHIFT 7 +#define MADERA_DSP_IRQ8_EINT1_WIDTH 1 +#define MADERA_DSP_IRQ7_EINT1 0x0040 +#define MADERA_DSP_IRQ7_EINT1_MASK 0x0040 +#define MADERA_DSP_IRQ7_EINT1_SHIFT 6 +#define MADERA_DSP_IRQ7_EINT1_WIDTH 1 +#define MADERA_DSP_IRQ6_EINT1 0x0020 +#define MADERA_DSP_IRQ6_EINT1_MASK 0x0020 +#define MADERA_DSP_IRQ6_EINT1_SHIFT 5 +#define MADERA_DSP_IRQ6_EINT1_WIDTH 1 +#define MADERA_DSP_IRQ5_EINT1 0x0010 +#define MADERA_DSP_IRQ5_EINT1_MASK 0x0010 +#define MADERA_DSP_IRQ5_EINT1_SHIFT 4 +#define MADERA_DSP_IRQ5_EINT1_WIDTH 1 +#define MADERA_DSP_IRQ4_EINT1 0x0008 +#define MADERA_DSP_IRQ4_EINT1_MASK 0x0008 +#define MADERA_DSP_IRQ4_EINT1_SHIFT 3 +#define MADERA_DSP_IRQ4_EINT1_WIDTH 1 +#define MADERA_DSP_IRQ3_EINT1 0x0004 +#define MADERA_DSP_IRQ3_EINT1_MASK 0x0004 +#define MADERA_DSP_IRQ3_EINT1_SHIFT 2 +#define MADERA_DSP_IRQ3_EINT1_WIDTH 1 +#define MADERA_DSP_IRQ2_EINT1 0x0002 +#define MADERA_DSP_IRQ2_EINT1_MASK 0x0002 +#define MADERA_DSP_IRQ2_EINT1_SHIFT 1 +#define MADERA_DSP_IRQ2_EINT1_WIDTH 1 +#define MADERA_DSP_IRQ1_EINT1 0x0001 +#define MADERA_DSP_IRQ1_EINT1_MASK 0x0001 +#define MADERA_DSP_IRQ1_EINT1_SHIFT 0 +#define MADERA_DSP_IRQ1_EINT1_WIDTH 1 + +/* (0x180B) IRQ1_Status_12 */ +#define MADERA_SPKOUTR_SC_EINT1 0x0080 +#define MADERA_SPKOUTR_SC_EINT1_MASK 0x0080 +#define MADERA_SPKOUTR_SC_EINT1_SHIFT 7 +#define MADERA_SPKOUTR_SC_EINT1_WIDTH 1 +#define MADERA_SPKOUTL_SC_EINT1 0x0040 +#define MADERA_SPKOUTL_SC_EINT1_MASK 0x0040 +#define MADERA_SPKOUTL_SC_EINT1_SHIFT 6 +#define MADERA_SPKOUTL_SC_EINT1_WIDTH 1 +#define MADERA_HP3R_SC_EINT1 0x0020 +#define MADERA_HP3R_SC_EINT1_MASK 0x0020 +#define MADERA_HP3R_SC_EINT1_SHIFT 5 +#define MADERA_HP3R_SC_EINT1_WIDTH 1 +#define MADERA_HP3L_SC_EINT1 0x0010 +#define MADERA_HP3L_SC_EINT1_MASK 0x0010 +#define MADERA_HP3L_SC_EINT1_SHIFT 4 +#define MADERA_HP3L_SC_EINT1_WIDTH 1 +#define MADERA_HP2R_SC_EINT1 0x0008 +#define MADERA_HP2R_SC_EINT1_MASK 0x0008 +#define MADERA_HP2R_SC_EINT1_SHIFT 3 +#define MADERA_HP2R_SC_EINT1_WIDTH 1 +#define MADERA_HP2L_SC_EINT1 0x0004 +#define MADERA_HP2L_SC_EINT1_MASK 0x0004 +#define MADERA_HP2L_SC_EINT1_SHIFT 2 +#define MADERA_HP2L_SC_EINT1_WIDTH 1 +#define MADERA_HP1R_SC_EINT1 0x0002 +#define MADERA_HP1R_SC_EINT1_MASK 0x0002 +#define MADERA_HP1R_SC_EINT1_SHIFT 1 +#define MADERA_HP1R_SC_EINT1_WIDTH 1 +#define MADERA_HP1L_SC_EINT1 0x0001 +#define MADERA_HP1L_SC_EINT1_MASK 0x0001 +#define MADERA_HP1L_SC_EINT1_SHIFT 0 +#define MADERA_HP1L_SC_EINT1_WIDTH 1 + +/* (0x180E) IRQ1_Status_15 */ +#define MADERA_SPK_OVERHEAT_WARN_EINT1 0x0004 +#define MADERA_SPK_OVERHEAT_WARN_EINT1_MASK 0x0004 +#define MADERA_SPK_OVERHEAT_WARN_EINT1_SHIFT 2 +#define MADERA_SPK_OVERHEAT_WARN_EINT1_WIDTH 1 +#define MADERA_SPK_OVERHEAT_EINT1 0x0002 +#define MADERA_SPK_OVERHEAT_EINT1_MASK 0x0002 +#define MADERA_SPK_OVERHEAT_EINT1_SHIFT 1 +#define MADERA_SPK_OVERHEAT_EINT1_WIDTH 1 +#define MADERA_SPK_SHUTDOWN_EINT1 0x0001 +#define MADERA_SPK_SHUTDOWN_EINT1_MASK 0x0001 +#define MADERA_SPK_SHUTDOWN_EINT1_SHIFT 0 +#define MADERA_SPK_SHUTDOWN_EINT1_WIDTH 1 + +/* (0x1820) - IRQ1 Status 33 */ +#define MADERA_DSP7_BUS_ERR_EINT1 0x0040 +#define MADERA_DSP7_BUS_ERR_EINT1_MASK 0x0040 +#define MADERA_DSP7_BUS_ERR_EINT1_SHIFT 6 +#define MADERA_DSP7_BUS_ERR_EINT1_WIDTH 1 +#define MADERA_DSP6_BUS_ERR_EINT1 0x0020 +#define MADERA_DSP6_BUS_ERR_EINT1_MASK 0x0020 +#define MADERA_DSP6_BUS_ERR_EINT1_SHIFT 5 +#define MADERA_DSP6_BUS_ERR_EINT1_WIDTH 1 +#define MADERA_DSP5_BUS_ERR_EINT1 0x0010 +#define MADERA_DSP5_BUS_ERR_EINT1_MASK 0x0010 +#define MADERA_DSP5_BUS_ERR_EINT1_SHIFT 4 +#define MADERA_DSP5_BUS_ERR_EINT1_WIDTH 1 +#define MADERA_DSP4_BUS_ERR_EINT1 0x0008 +#define MADERA_DSP4_BUS_ERR_EINT1_MASK 0x0008 +#define MADERA_DSP4_BUS_ERR_EINT1_SHIFT 3 +#define MADERA_DSP4_BUS_ERR_EINT1_WIDTH 1 +#define MADERA_DSP3_BUS_ERR_EINT1 0x0004 +#define MADERA_DSP3_BUS_ERR_EINT1_MASK 0x0004 +#define MADERA_DSP3_BUS_ERR_EINT1_SHIFT 2 +#define MADERA_DSP3_BUS_ERR_EINT1_WIDTH 1 +#define MADERA_DSP2_BUS_ERR_EINT1 0x0002 +#define MADERA_DSP2_BUS_ERR_EINT1_MASK 0x0002 +#define MADERA_DSP2_BUS_ERR_EINT1_SHIFT 1 +#define MADERA_DSP2_BUS_ERR_EINT1_WIDTH 1 +#define MADERA_DSP1_BUS_ERR_EINT1 0x0001 +#define MADERA_DSP1_BUS_ERR_EINT1_MASK 0x0001 +#define MADERA_DSP1_BUS_ERR_EINT1_SHIFT 0 +#define MADERA_DSP1_BUS_ERR_EINT1_WIDTH 1 + +/* (0x1845) IRQ1_Mask_6 */ +#define MADERA_IM_MICDET2_EINT1 0x0200 +#define MADERA_IM_MICDET2_EINT1_MASK 0x0200 +#define MADERA_IM_MICDET2_EINT1_SHIFT 9 +#define MADERA_IM_MICDET2_EINT1_WIDTH 1 +#define MADERA_IM_MICDET1_EINT1 0x0100 +#define MADERA_IM_MICDET1_EINT1_MASK 0x0100 +#define MADERA_IM_MICDET1_EINT1_SHIFT 8 +#define MADERA_IM_MICDET1_EINT1_WIDTH 1 +#define MADERA_IM_HPDET_EINT1 0x0001 +#define MADERA_IM_HPDET_EINT1_MASK 0x0001 +#define MADERA_IM_HPDET_EINT1_SHIFT 0 +#define MADERA_IM_HPDET_EINT1_WIDTH 1 +/* (0x184E) IRQ1_Mask_15 */ +#define MADERA_IM_SPK_OVERHEAT_WARN_EINT1 0x0004 +#define MADERA_IM_SPK_OVERHEAT_WARN_EINT1_MASK 0x0004 +#define MADERA_IM_SPK_OVERHEAT_WARN_EINT1_SHIFT 2 +#define MADERA_IM_SPK_OVERHEAT_WARN_EINT1_WIDTH 1 +#define MADERA_IM_SPK_OVERHEAT_EINT1 0x0002 +#define MADERA_IM_SPK_OVERHEAT_EINT1_MASK 0x0002 +#define MADERA_IM_SPK_OVERHEAT_EINT1_SHIFT 1 +#define MADERA_IM_SPK_OVERHEAT_EINT1_WIDTH 1 +#define MADERA_IM_SPK_SHUTDOWN_EINT1 0x0001 +#define MADERA_IM_SPK_SHUTDOWN_EINT1_MASK 0x0001 +#define MADERA_IM_SPK_SHUTDOWN_EINT1_SHIFT 0 +#define MADERA_IM_SPK_SHUTDOWN_EINT1_WIDTH 1 + +/* (0x1880) - IRQ1 Raw Status 1 */ +#define MADERA_CTRLIF_ERR_STS1 0x1000 +#define MADERA_CTRLIF_ERR_STS1_MASK 0x1000 +#define MADERA_CTRLIF_ERR_STS1_SHIFT 12 +#define MADERA_CTRLIF_ERR_STS1_WIDTH 1 +#define MADERA_SYSCLK_FAIL_STS1 0x0200 +#define MADERA_SYSCLK_FAIL_STS1_MASK 0x0200 +#define MADERA_SYSCLK_FAIL_STS1_SHIFT 9 +#define MADERA_SYSCLK_FAIL_STS1_WIDTH 1 +#define MADERA_CLOCK_DETECT_STS1 0x0100 +#define MADERA_CLOCK_DETECT_STS1_MASK 0x0100 +#define MADERA_CLOCK_DETECT_STS1_SHIFT 8 +#define MADERA_CLOCK_DETECT_STS1_WIDTH 1 +#define MADERA_BOOT_DONE_STS1 0x0080 +#define MADERA_BOOT_DONE_STS1_MASK 0x0080 +#define MADERA_BOOT_DONE_STS1_SHIFT 7 +#define MADERA_BOOT_DONE_STS1_WIDTH 1 + +/* (0x1881) - IRQ1 Raw Status 2 */ +#define MADERA_FLL3_LOCK_STS1 0x0400 +#define MADERA_FLL3_LOCK_STS1_MASK 0x0400 +#define MADERA_FLL3_LOCK_STS1_SHIFT 10 +#define MADERA_FLL3_LOCK_STS1_WIDTH 1 +#define MADERA_FLL2_LOCK_STS1 0x0200 +#define MADERA_FLL2_LOCK_STS1_MASK 0x0200 +#define MADERA_FLL2_LOCK_STS1_SHIFT 9 +#define MADERA_FLL2_LOCK_STS1_WIDTH 1 +#define MADERA_FLL1_LOCK_STS1 0x0100 +#define MADERA_FLL1_LOCK_STS1_MASK 0x0100 +#define MADERA_FLL1_LOCK_STS1_SHIFT 8 +#define MADERA_FLL1_LOCK_STS1_WIDTH 1 + +/* (0x1886) - IRQ1 Raw Status 7 */ +#define MADERA_MICD_CLAMP_FALL_STS1 0x0020 +#define MADERA_MICD_CLAMP_FALL_STS1_MASK 0x0020 +#define MADERA_MICD_CLAMP_FALL_STS1_SHIFT 5 +#define MADERA_MICD_CLAMP_FALL_STS1_WIDTH 1 +#define MADERA_MICD_CLAMP_RISE_STS1 0x0010 +#define MADERA_MICD_CLAMP_RISE_STS1_MASK 0x0010 +#define MADERA_MICD_CLAMP_RISE_STS1_SHIFT 4 +#define MADERA_MICD_CLAMP_RISE_STS1_WIDTH 1 +#define MADERA_JD2_FALL_STS1 0x0008 +#define MADERA_JD2_FALL_STS1_MASK 0x0008 +#define MADERA_JD2_FALL_STS1_SHIFT 3 +#define MADERA_JD2_FALL_STS1_WIDTH 1 +#define MADERA_JD2_RISE_STS1 0x0004 +#define MADERA_JD2_RISE_STS1_MASK 0x0004 +#define MADERA_JD2_RISE_STS1_SHIFT 2 +#define MADERA_JD2_RISE_STS1_WIDTH 1 +#define MADERA_JD1_FALL_STS1 0x0002 +#define MADERA_JD1_FALL_STS1_MASK 0x0002 +#define MADERA_JD1_FALL_STS1_SHIFT 1 +#define MADERA_JD1_FALL_STS1_WIDTH 1 +#define MADERA_JD1_RISE_STS1 0x0001 +#define MADERA_JD1_RISE_STS1_MASK 0x0001 +#define MADERA_JD1_RISE_STS1_SHIFT 0 +#define MADERA_JD1_RISE_STS1_WIDTH 1 + +/* (0x188E) - IRQ1 Raw Status 15 */ +#define MADERA_SPK_OVERHEAT_WARN_STS1 0x0004 +#define MADERA_SPK_OVERHEAT_WARN_STS1_MASK 0x0004 +#define MADERA_SPK_OVERHEAT_WARN_STS1_SHIFT 2 +#define MADERA_SPK_OVERHEAT_WARN_STS1_WIDTH 1 +#define MADERA_SPK_OVERHEAT_STS1 0x0002 +#define MADERA_SPK_OVERHEAT_STS1_MASK 0x0002 +#define MADERA_SPK_OVERHEAT_STS1_SHIFT 1 +#define MADERA_SPK_OVERHEAT_STS1_WIDTH 1 +#define MADERA_SPK_SHUTDOWN_STS1 0x0001 +#define MADERA_SPK_SHUTDOWN_STS1_MASK 0x0001 +#define MADERA_SPK_SHUTDOWN_STS1_SHIFT 0 +#define MADERA_SPK_SHUTDOWN_STS1_WIDTH 1 + +/* (0x1A06) Interrupt_Debounce_7 */ +#define MADERA_MICD_CLAMP_DB 0x0010 +#define MADERA_MICD_CLAMP_DB_MASK 0x0010 +#define MADERA_MICD_CLAMP_DB_SHIFT 4 +#define MADERA_MICD_CLAMP_DB_WIDTH 1 +#define MADERA_JD2_DB 0x0004 +#define MADERA_JD2_DB_MASK 0x0004 +#define MADERA_JD2_DB_SHIFT 2 +#define MADERA_JD2_DB_WIDTH 1 +#define MADERA_JD1_DB 0x0001 +#define MADERA_JD1_DB_MASK 0x0001 +#define MADERA_JD1_DB_SHIFT 0 +#define MADERA_JD1_DB_WIDTH 1 + +/* (0x1A0E) Interrupt_Debounce_15 */ +#define MADERA_SPK_OVERHEAT_WARN_DB 0x0004 +#define MADERA_SPK_OVERHEAT_WARN_DB_MASK 0x0004 +#define MADERA_SPK_OVERHEAT_WARN_DB_SHIFT 2 +#define MADERA_SPK_OVERHEAT_WARN_DB_WIDTH 1 +#define MADERA_SPK_OVERHEAT_DB 0x0002 +#define MADERA_SPK_OVERHEAT_DB_MASK 0x0002 +#define MADERA_SPK_OVERHEAT_DB_SHIFT 1 +#define MADERA_SPK_OVERHEAT_DB_WIDTH 1 + +/* (0x1A80) IRQ1_CTRL */ +#define MADERA_IM_IRQ1 0x0800 +#define MADERA_IM_IRQ1_MASK 0x0800 +#define MADERA_IM_IRQ1_SHIFT 11 +#define MADERA_IM_IRQ1_WIDTH 1 +#define MADERA_IRQ_POL 0x0400 +#define MADERA_IRQ_POL_MASK 0x0400 +#define MADERA_IRQ_POL_SHIFT 10 +#define MADERA_IRQ_POL_WIDTH 1 + +/* (0x20004) OTP_HPDET_Cal_1 */ +#define MADERA_OTP_HPDET_CALIB_OFFSET_11 0xFF000000 +#define MADERA_OTP_HPDET_CALIB_OFFSET_11_MASK 0xFF000000 +#define MADERA_OTP_HPDET_CALIB_OFFSET_11_SHIFT 24 +#define MADERA_OTP_HPDET_CALIB_OFFSET_11_WIDTH 8 +#define MADERA_OTP_HPDET_CALIB_OFFSET_10 0x00FF0000 +#define MADERA_OTP_HPDET_CALIB_OFFSET_10_MASK 0x00FF0000 +#define MADERA_OTP_HPDET_CALIB_OFFSET_10_SHIFT 16 +#define MADERA_OTP_HPDET_CALIB_OFFSET_10_WIDTH 8 +#define MADERA_OTP_HPDET_CALIB_OFFSET_01 0x0000FF00 +#define MADERA_OTP_HPDET_CALIB_OFFSET_01_MASK 0x0000FF00 +#define MADERA_OTP_HPDET_CALIB_OFFSET_01_SHIFT 8 +#define MADERA_OTP_HPDET_CALIB_OFFSET_01_WIDTH 8 +#define MADERA_OTP_HPDET_CALIB_OFFSET_00 0x000000FF +#define MADERA_OTP_HPDET_CALIB_OFFSET_00_MASK 0x000000FF +#define MADERA_OTP_HPDET_CALIB_OFFSET_00_SHIFT 0 +#define MADERA_OTP_HPDET_CALIB_OFFSET_00_WIDTH 8 + +/* (0x20006) OTP_HPDET_Cal_2 */ +#define MADERA_OTP_HPDET_GRADIENT_1X 0x0000FF00 +#define MADERA_OTP_HPDET_GRADIENT_1X_MASK 0x0000FF00 +#define MADERA_OTP_HPDET_GRADIENT_1X_SHIFT 8 +#define MADERA_OTP_HPDET_GRADIENT_1X_WIDTH 8 +#define MADERA_OTP_HPDET_GRADIENT_0X 0x000000FF +#define MADERA_OTP_HPDET_GRADIENT_0X_MASK 0x000000FF +#define MADERA_OTP_HPDET_GRADIENT_0X_SHIFT 0 +#define MADERA_OTP_HPDET_GRADIENT_0X_WIDTH 8 + +#endif diff --git a/include/linux/mfd/max14577-private.h b/include/linux/mfd/max14577-private.h new file mode 100644 index 000000000..df75234f9 --- /dev/null +++ b/include/linux/mfd/max14577-private.h @@ -0,0 +1,485 @@ +/* + * max14577-private.h - Common API for the Maxim 14577/77836 internal sub chip + * + * Copyright (C) 2014 Samsung Electrnoics + * Chanwoo Choi + * Krzysztof Kozlowski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MAX14577_PRIVATE_H__ +#define __MAX14577_PRIVATE_H__ + +#include +#include + +#define I2C_ADDR_PMIC (0x46 >> 1) +#define I2C_ADDR_MUIC (0x4A >> 1) +#define I2C_ADDR_FG (0x6C >> 1) + +enum maxim_device_type { + MAXIM_DEVICE_TYPE_UNKNOWN = 0, + MAXIM_DEVICE_TYPE_MAX14577, + MAXIM_DEVICE_TYPE_MAX77836, + + MAXIM_DEVICE_TYPE_NUM, +}; + +/* Slave addr = 0x4A: MUIC and Charger */ +enum max14577_reg { + MAX14577_REG_DEVICEID = 0x00, + MAX14577_REG_INT1 = 0x01, + MAX14577_REG_INT2 = 0x02, + MAX14577_REG_INT3 = 0x03, + MAX14577_REG_STATUS1 = 0x04, + MAX14577_REG_STATUS2 = 0x05, + MAX14577_REG_STATUS3 = 0x06, + MAX14577_REG_INTMASK1 = 0x07, + MAX14577_REG_INTMASK2 = 0x08, + MAX14577_REG_INTMASK3 = 0x09, + MAX14577_REG_CDETCTRL1 = 0x0A, + MAX14577_REG_RFU = 0x0B, + MAX14577_REG_CONTROL1 = 0x0C, + MAX14577_REG_CONTROL2 = 0x0D, + MAX14577_REG_CONTROL3 = 0x0E, + MAX14577_REG_CHGCTRL1 = 0x0F, + MAX14577_REG_CHGCTRL2 = 0x10, + MAX14577_REG_CHGCTRL3 = 0x11, + MAX14577_REG_CHGCTRL4 = 0x12, + MAX14577_REG_CHGCTRL5 = 0x13, + MAX14577_REG_CHGCTRL6 = 0x14, + MAX14577_REG_CHGCTRL7 = 0x15, + + MAX14577_REG_END, +}; + +/* Slave addr = 0x4A: MUIC */ +enum max14577_muic_reg { + MAX14577_MUIC_REG_STATUS1 = 0x04, + MAX14577_MUIC_REG_STATUS2 = 0x05, + MAX14577_MUIC_REG_CONTROL1 = 0x0C, + MAX14577_MUIC_REG_CONTROL3 = 0x0E, + + MAX14577_MUIC_REG_END, +}; + +/* + * Combined charger types for max14577 and max77836. + * + * On max14577 three lower bits map to STATUS2/CHGTYP field. + * However the max77836 has different two last values of STATUS2/CHGTYP. + * To indicate the difference enum has two additional values for max77836. + * These values are just a register value bitwise OR with 0x8. + */ +enum max14577_muic_charger_type { + MAX14577_CHARGER_TYPE_NONE = 0x0, + MAX14577_CHARGER_TYPE_USB = 0x1, + MAX14577_CHARGER_TYPE_DOWNSTREAM_PORT = 0x2, + MAX14577_CHARGER_TYPE_DEDICATED_CHG = 0x3, + MAX14577_CHARGER_TYPE_SPECIAL_500MA = 0x4, + /* Special 1A or 2A charger */ + MAX14577_CHARGER_TYPE_SPECIAL_1A = 0x5, + /* max14577: reserved, used on max77836 */ + MAX14577_CHARGER_TYPE_RESERVED = 0x6, + /* max14577: dead-battery charing with maximum current 100mA */ + MAX14577_CHARGER_TYPE_DEAD_BATTERY = 0x7, + /* + * max77836: special charger (bias on D+/D-), + * matches register value of 0x6 + */ + MAX77836_CHARGER_TYPE_SPECIAL_BIAS = 0xe, + /* max77836: reserved, register value 0x7 */ + MAX77836_CHARGER_TYPE_RESERVED = 0xf, +}; + +/* MAX14577 interrupts */ +#define MAX14577_INT1_ADC_MASK BIT(0) +#define MAX14577_INT1_ADCLOW_MASK BIT(1) +#define MAX14577_INT1_ADCERR_MASK BIT(2) +#define MAX77836_INT1_ADC1K_MASK BIT(3) + +#define MAX14577_INT2_CHGTYP_MASK BIT(0) +#define MAX14577_INT2_CHGDETRUN_MASK BIT(1) +#define MAX14577_INT2_DCDTMR_MASK BIT(2) +#define MAX14577_INT2_DBCHG_MASK BIT(3) +#define MAX14577_INT2_VBVOLT_MASK BIT(4) +#define MAX77836_INT2_VIDRM_MASK BIT(5) + +#define MAX14577_INT3_EOC_MASK BIT(0) +#define MAX14577_INT3_CGMBC_MASK BIT(1) +#define MAX14577_INT3_OVP_MASK BIT(2) +#define MAX14577_INT3_MBCCHGERR_MASK BIT(3) + +/* MAX14577 DEVICE ID register */ +#define DEVID_VENDORID_SHIFT 0 +#define DEVID_DEVICEID_SHIFT 3 +#define DEVID_VENDORID_MASK (0x07 << DEVID_VENDORID_SHIFT) +#define DEVID_DEVICEID_MASK (0x1f << DEVID_DEVICEID_SHIFT) + +/* MAX14577 STATUS1 register */ +#define STATUS1_ADC_SHIFT 0 +#define STATUS1_ADCLOW_SHIFT 5 +#define STATUS1_ADCERR_SHIFT 6 +#define MAX77836_STATUS1_ADC1K_SHIFT 7 +#define STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT) +#define STATUS1_ADCLOW_MASK BIT(STATUS1_ADCLOW_SHIFT) +#define STATUS1_ADCERR_MASK BIT(STATUS1_ADCERR_SHIFT) +#define MAX77836_STATUS1_ADC1K_MASK BIT(MAX77836_STATUS1_ADC1K_SHIFT) + +/* MAX14577 STATUS2 register */ +#define STATUS2_CHGTYP_SHIFT 0 +#define STATUS2_CHGDETRUN_SHIFT 3 +#define STATUS2_DCDTMR_SHIFT 4 +#define MAX14577_STATUS2_DBCHG_SHIFT 5 +#define MAX77836_STATUS2_DXOVP_SHIFT 5 +#define STATUS2_VBVOLT_SHIFT 6 +#define MAX77836_STATUS2_VIDRM_SHIFT 7 +#define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT) +#define STATUS2_CHGDETRUN_MASK BIT(STATUS2_CHGDETRUN_SHIFT) +#define STATUS2_DCDTMR_MASK BIT(STATUS2_DCDTMR_SHIFT) +#define MAX14577_STATUS2_DBCHG_MASK BIT(MAX14577_STATUS2_DBCHG_SHIFT) +#define MAX77836_STATUS2_DXOVP_MASK BIT(MAX77836_STATUS2_DXOVP_SHIFT) +#define STATUS2_VBVOLT_MASK BIT(STATUS2_VBVOLT_SHIFT) +#define MAX77836_STATUS2_VIDRM_MASK BIT(MAX77836_STATUS2_VIDRM_SHIFT) + +/* MAX14577 CONTROL1 register */ +#define COMN1SW_SHIFT 0 +#define COMP2SW_SHIFT 3 +#define MICEN_SHIFT 6 +#define IDBEN_SHIFT 7 +#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT) +#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT) +#define MICEN_MASK BIT(MICEN_SHIFT) +#define IDBEN_MASK BIT(IDBEN_SHIFT) +#define CLEAR_IDBEN_MICEN_MASK (COMN1SW_MASK | COMP2SW_MASK) +#define CTRL1_SW_USB ((1 << COMP2SW_SHIFT) \ + | (1 << COMN1SW_SHIFT)) +#define CTRL1_SW_AUDIO ((2 << COMP2SW_SHIFT) \ + | (2 << COMN1SW_SHIFT)) +#define CTRL1_SW_UART ((3 << COMP2SW_SHIFT) \ + | (3 << COMN1SW_SHIFT)) +#define CTRL1_SW_OPEN ((0 << COMP2SW_SHIFT) \ + | (0 << COMN1SW_SHIFT)) + +/* MAX14577 CONTROL2 register */ +#define CTRL2_LOWPWR_SHIFT (0) +#define CTRL2_ADCEN_SHIFT (1) +#define CTRL2_CPEN_SHIFT (2) +#define CTRL2_SFOUTASRT_SHIFT (3) +#define CTRL2_SFOUTORD_SHIFT (4) +#define CTRL2_ACCDET_SHIFT (5) +#define CTRL2_USBCPINT_SHIFT (6) +#define CTRL2_RCPS_SHIFT (7) +#define CTRL2_LOWPWR_MASK BIT(CTRL2_LOWPWR_SHIFT) +#define CTRL2_ADCEN_MASK BIT(CTRL2_ADCEN_SHIFT) +#define CTRL2_CPEN_MASK BIT(CTRL2_CPEN_SHIFT) +#define CTRL2_SFOUTASRT_MASK BIT(CTRL2_SFOUTASRT_SHIFT) +#define CTRL2_SFOUTORD_MASK BIT(CTRL2_SFOUTORD_SHIFT) +#define CTRL2_ACCDET_MASK BIT(CTRL2_ACCDET_SHIFT) +#define CTRL2_USBCPINT_MASK BIT(CTRL2_USBCPINT_SHIFT) +#define CTRL2_RCPS_MASK BIT(CTRL2_RCPS_SHIFT) + +#define CTRL2_CPEN1_LOWPWR0 ((1 << CTRL2_CPEN_SHIFT) | \ + (0 << CTRL2_LOWPWR_SHIFT)) +#define CTRL2_CPEN0_LOWPWR1 ((0 << CTRL2_CPEN_SHIFT) | \ + (1 << CTRL2_LOWPWR_SHIFT)) + +/* MAX14577 CONTROL3 register */ +#define CTRL3_JIGSET_SHIFT 0 +#define CTRL3_BOOTSET_SHIFT 2 +#define CTRL3_ADCDBSET_SHIFT 4 +#define CTRL3_WBTH_SHIFT 6 +#define CTRL3_JIGSET_MASK (0x3 << CTRL3_JIGSET_SHIFT) +#define CTRL3_BOOTSET_MASK (0x3 << CTRL3_BOOTSET_SHIFT) +#define CTRL3_ADCDBSET_MASK (0x3 << CTRL3_ADCDBSET_SHIFT) +#define CTRL3_WBTH_MASK (0x3 << CTRL3_WBTH_SHIFT) + +/* Slave addr = 0x4A: Charger */ +enum max14577_charger_reg { + MAX14577_CHG_REG_STATUS3 = 0x06, + MAX14577_CHG_REG_CHG_CTRL1 = 0x0F, + MAX14577_CHG_REG_CHG_CTRL2 = 0x10, + MAX14577_CHG_REG_CHG_CTRL3 = 0x11, + MAX14577_CHG_REG_CHG_CTRL4 = 0x12, + MAX14577_CHG_REG_CHG_CTRL5 = 0x13, + MAX14577_CHG_REG_CHG_CTRL6 = 0x14, + MAX14577_CHG_REG_CHG_CTRL7 = 0x15, + + MAX14577_CHG_REG_END, +}; + +/* MAX14577 STATUS3 register */ +#define STATUS3_EOC_SHIFT 0 +#define STATUS3_CGMBC_SHIFT 1 +#define STATUS3_OVP_SHIFT 2 +#define STATUS3_MBCCHGERR_SHIFT 3 +#define STATUS3_EOC_MASK (0x1 << STATUS3_EOC_SHIFT) +#define STATUS3_CGMBC_MASK (0x1 << STATUS3_CGMBC_SHIFT) +#define STATUS3_OVP_MASK (0x1 << STATUS3_OVP_SHIFT) +#define STATUS3_MBCCHGERR_MASK (0x1 << STATUS3_MBCCHGERR_SHIFT) + +/* MAX14577 CDETCTRL1 register */ +#define CDETCTRL1_CHGDETEN_SHIFT 0 +#define CDETCTRL1_CHGTYPMAN_SHIFT 1 +#define CDETCTRL1_DCDEN_SHIFT 2 +#define CDETCTRL1_DCD2SCT_SHIFT 3 +#define MAX14577_CDETCTRL1_DCHKTM_SHIFT 4 +#define MAX77836_CDETCTRL1_CDLY_SHIFT 4 +#define MAX14577_CDETCTRL1_DBEXIT_SHIFT 5 +#define MAX77836_CDETCTRL1_DCDCPL_SHIFT 5 +#define CDETCTRL1_DBIDLE_SHIFT 6 +#define CDETCTRL1_CDPDET_SHIFT 7 +#define CDETCTRL1_CHGDETEN_MASK BIT(CDETCTRL1_CHGDETEN_SHIFT) +#define CDETCTRL1_CHGTYPMAN_MASK BIT(CDETCTRL1_CHGTYPMAN_SHIFT) +#define CDETCTRL1_DCDEN_MASK BIT(CDETCTRL1_DCDEN_SHIFT) +#define CDETCTRL1_DCD2SCT_MASK BIT(CDETCTRL1_DCD2SCT_SHIFT) +#define MAX14577_CDETCTRL1_DCHKTM_MASK BIT(MAX14577_CDETCTRL1_DCHKTM_SHIFT) +#define MAX77836_CDETCTRL1_CDDLY_MASK BIT(MAX77836_CDETCTRL1_CDDLY_SHIFT) +#define MAX14577_CDETCTRL1_DBEXIT_MASK BIT(MAX14577_CDETCTRL1_DBEXIT_SHIFT) +#define MAX77836_CDETCTRL1_DCDCPL_MASK BIT(MAX77836_CDETCTRL1_DCDCPL_SHIFT) +#define CDETCTRL1_DBIDLE_MASK BIT(CDETCTRL1_DBIDLE_SHIFT) +#define CDETCTRL1_CDPDET_MASK BIT(CDETCTRL1_CDPDET_SHIFT) + +/* MAX14577 CHGCTRL1 register */ +#define CHGCTRL1_TCHW_SHIFT 4 +#define CHGCTRL1_TCHW_MASK (0x7 << CHGCTRL1_TCHW_SHIFT) + +/* MAX14577 CHGCTRL2 register */ +#define CHGCTRL2_MBCHOSTEN_SHIFT 6 +#define CHGCTRL2_MBCHOSTEN_MASK BIT(CHGCTRL2_MBCHOSTEN_SHIFT) +#define CHGCTRL2_VCHGR_RC_SHIFT 7 +#define CHGCTRL2_VCHGR_RC_MASK BIT(CHGCTRL2_VCHGR_RC_SHIFT) + +/* MAX14577 CHGCTRL3 register */ +#define CHGCTRL3_MBCCVWRC_SHIFT 0 +#define CHGCTRL3_MBCCVWRC_MASK (0xf << CHGCTRL3_MBCCVWRC_SHIFT) + +/* MAX14577 CHGCTRL4 register */ +#define CHGCTRL4_MBCICHWRCH_SHIFT 0 +#define CHGCTRL4_MBCICHWRCH_MASK (0xf << CHGCTRL4_MBCICHWRCH_SHIFT) +#define CHGCTRL4_MBCICHWRCL_SHIFT 4 +#define CHGCTRL4_MBCICHWRCL_MASK BIT(CHGCTRL4_MBCICHWRCL_SHIFT) + +/* MAX14577 CHGCTRL5 register */ +#define CHGCTRL5_EOCS_SHIFT 0 +#define CHGCTRL5_EOCS_MASK (0xf << CHGCTRL5_EOCS_SHIFT) + +/* MAX14577 CHGCTRL6 register */ +#define CHGCTRL6_AUTOSTOP_SHIFT 5 +#define CHGCTRL6_AUTOSTOP_MASK BIT(CHGCTRL6_AUTOSTOP_SHIFT) + +/* MAX14577 CHGCTRL7 register */ +#define CHGCTRL7_OTPCGHCVS_SHIFT 0 +#define CHGCTRL7_OTPCGHCVS_MASK (0x3 << CHGCTRL7_OTPCGHCVS_SHIFT) + +/* MAX14577 charger current limits (as in CHGCTRL4 register), uA */ +#define MAX14577_CHARGER_CURRENT_LIMIT_MIN 90000U +#define MAX14577_CHARGER_CURRENT_LIMIT_HIGH_START 200000U +#define MAX14577_CHARGER_CURRENT_LIMIT_HIGH_STEP 50000U +#define MAX14577_CHARGER_CURRENT_LIMIT_MAX 950000U + +/* MAX77836 charger current limits (as in CHGCTRL4 register), uA */ +#define MAX77836_CHARGER_CURRENT_LIMIT_MIN 45000U +#define MAX77836_CHARGER_CURRENT_LIMIT_HIGH_START 100000U +#define MAX77836_CHARGER_CURRENT_LIMIT_HIGH_STEP 25000U +#define MAX77836_CHARGER_CURRENT_LIMIT_MAX 475000U + +/* + * MAX14577 charger End-Of-Charge current limits + * (as in CHGCTRL5 register), uA + */ +#define MAX14577_CHARGER_EOC_CURRENT_LIMIT_MIN 50000U +#define MAX14577_CHARGER_EOC_CURRENT_LIMIT_STEP 10000U +#define MAX14577_CHARGER_EOC_CURRENT_LIMIT_MAX 200000U + +/* + * MAX14577/MAX77836 Battery Constant Voltage + * (as in CHGCTRL3 register), uV + */ +#define MAXIM_CHARGER_CONSTANT_VOLTAGE_MIN 4000000U +#define MAXIM_CHARGER_CONSTANT_VOLTAGE_STEP 20000U +#define MAXIM_CHARGER_CONSTANT_VOLTAGE_MAX 4350000U + +/* Default value for fast charge timer, in hours */ +#define MAXIM_CHARGER_FAST_CHARGE_TIMER_DEFAULT 5 + +/* MAX14577 regulator SFOUT LDO voltage, fixed, uV */ +#define MAX14577_REGULATOR_SAFEOUT_VOLTAGE 4900000 + +/* MAX77836 regulator LDOx voltage, uV */ +#define MAX77836_REGULATOR_LDO_VOLTAGE_MIN 800000 +#define MAX77836_REGULATOR_LDO_VOLTAGE_MAX 3950000 +#define MAX77836_REGULATOR_LDO_VOLTAGE_STEP 50000 +#define MAX77836_REGULATOR_LDO_VOLTAGE_STEPS_NUM 64 + +/* Slave addr = 0x46: PMIC */ +enum max77836_pmic_reg { + MAX77836_PMIC_REG_PMIC_ID = 0x20, + MAX77836_PMIC_REG_PMIC_REV = 0x21, + MAX77836_PMIC_REG_INTSRC = 0x22, + MAX77836_PMIC_REG_INTSRC_MASK = 0x23, + MAX77836_PMIC_REG_TOPSYS_INT = 0x24, + MAX77836_PMIC_REG_TOPSYS_INT_MASK = 0x26, + MAX77836_PMIC_REG_TOPSYS_STAT = 0x28, + MAX77836_PMIC_REG_MRSTB_CNTL = 0x2A, + MAX77836_PMIC_REG_LSCNFG = 0x2B, + + MAX77836_LDO_REG_CNFG1_LDO1 = 0x51, + MAX77836_LDO_REG_CNFG2_LDO1 = 0x52, + MAX77836_LDO_REG_CNFG1_LDO2 = 0x53, + MAX77836_LDO_REG_CNFG2_LDO2 = 0x54, + MAX77836_LDO_REG_CNFG_LDO_BIAS = 0x55, + + MAX77836_COMP_REG_COMP1 = 0x60, + + MAX77836_PMIC_REG_END, +}; + +#define MAX77836_INTSRC_MASK_TOP_INT_SHIFT 1 +#define MAX77836_INTSRC_MASK_MUIC_CHG_INT_SHIFT 3 +#define MAX77836_INTSRC_MASK_TOP_INT_MASK BIT(MAX77836_INTSRC_MASK_TOP_INT_SHIFT) +#define MAX77836_INTSRC_MASK_MUIC_CHG_INT_MASK BIT(MAX77836_INTSRC_MASK_MUIC_CHG_INT_SHIFT) + +/* MAX77836 PMIC interrupts */ +#define MAX77836_TOPSYS_INT_T120C_SHIFT 0 +#define MAX77836_TOPSYS_INT_T140C_SHIFT 1 +#define MAX77836_TOPSYS_INT_T120C_MASK BIT(MAX77836_TOPSYS_INT_T120C_SHIFT) +#define MAX77836_TOPSYS_INT_T140C_MASK BIT(MAX77836_TOPSYS_INT_T140C_SHIFT) + +/* LDO1/LDO2 CONFIG1 register */ +#define MAX77836_CNFG1_LDO_PWRMD_SHIFT 6 +#define MAX77836_CNFG1_LDO_TV_SHIFT 0 +#define MAX77836_CNFG1_LDO_PWRMD_MASK (0x3 << MAX77836_CNFG1_LDO_PWRMD_SHIFT) +#define MAX77836_CNFG1_LDO_TV_MASK (0x3f << MAX77836_CNFG1_LDO_TV_SHIFT) + +/* LDO1/LDO2 CONFIG2 register */ +#define MAX77836_CNFG2_LDO_OVCLMPEN_SHIFT 7 +#define MAX77836_CNFG2_LDO_ALPMEN_SHIFT 6 +#define MAX77836_CNFG2_LDO_COMP_SHIFT 4 +#define MAX77836_CNFG2_LDO_POK_SHIFT 3 +#define MAX77836_CNFG2_LDO_ADE_SHIFT 1 +#define MAX77836_CNFG2_LDO_SS_SHIFT 0 +#define MAX77836_CNFG2_LDO_OVCLMPEN_MASK BIT(MAX77836_CNFG2_LDO_OVCLMPEN_SHIFT) +#define MAX77836_CNFG2_LDO_ALPMEN_MASK BIT(MAX77836_CNFG2_LDO_ALPMEN_SHIFT) +#define MAX77836_CNFG2_LDO_COMP_MASK (0x3 << MAX77836_CNFG2_LDO_COMP_SHIFT) +#define MAX77836_CNFG2_LDO_POK_MASK BIT(MAX77836_CNFG2_LDO_POK_SHIFT) +#define MAX77836_CNFG2_LDO_ADE_MASK BIT(MAX77836_CNFG2_LDO_ADE_SHIFT) +#define MAX77836_CNFG2_LDO_SS_MASK BIT(MAX77836_CNFG2_LDO_SS_SHIFT) + +/* Slave addr = 0x6C: Fuel-Gauge/Battery */ +enum max77836_fg_reg { + MAX77836_FG_REG_VCELL_MSB = 0x02, + MAX77836_FG_REG_VCELL_LSB = 0x03, + MAX77836_FG_REG_SOC_MSB = 0x04, + MAX77836_FG_REG_SOC_LSB = 0x05, + MAX77836_FG_REG_MODE_H = 0x06, + MAX77836_FG_REG_MODE_L = 0x07, + MAX77836_FG_REG_VERSION_MSB = 0x08, + MAX77836_FG_REG_VERSION_LSB = 0x09, + MAX77836_FG_REG_HIBRT_H = 0x0A, + MAX77836_FG_REG_HIBRT_L = 0x0B, + MAX77836_FG_REG_CONFIG_H = 0x0C, + MAX77836_FG_REG_CONFIG_L = 0x0D, + MAX77836_FG_REG_VALRT_MIN = 0x14, + MAX77836_FG_REG_VALRT_MAX = 0x15, + MAX77836_FG_REG_CRATE_MSB = 0x16, + MAX77836_FG_REG_CRATE_LSB = 0x17, + MAX77836_FG_REG_VRESET = 0x18, + MAX77836_FG_REG_FGID = 0x19, + MAX77836_FG_REG_STATUS_H = 0x1A, + MAX77836_FG_REG_STATUS_L = 0x1B, + /* + * TODO: TABLE registers + * TODO: CMD register + */ + + MAX77836_FG_REG_END, +}; + +enum max14577_irq { + /* INT1 */ + MAX14577_IRQ_INT1_ADC, + MAX14577_IRQ_INT1_ADCLOW, + MAX14577_IRQ_INT1_ADCERR, + MAX77836_IRQ_INT1_ADC1K, + + /* INT2 */ + MAX14577_IRQ_INT2_CHGTYP, + MAX14577_IRQ_INT2_CHGDETRUN, + MAX14577_IRQ_INT2_DCDTMR, + MAX14577_IRQ_INT2_DBCHG, + MAX14577_IRQ_INT2_VBVOLT, + MAX77836_IRQ_INT2_VIDRM, + + /* INT3 */ + MAX14577_IRQ_INT3_EOC, + MAX14577_IRQ_INT3_CGMBC, + MAX14577_IRQ_INT3_OVP, + MAX14577_IRQ_INT3_MBCCHGERR, + + /* TOPSYS_INT, only MAX77836 */ + MAX77836_IRQ_TOPSYS_T140C, + MAX77836_IRQ_TOPSYS_T120C, + + MAX14577_IRQ_NUM, +}; + +struct max14577 { + struct device *dev; + struct i2c_client *i2c; /* Slave addr = 0x4A */ + struct i2c_client *i2c_pmic; /* Slave addr = 0x46 */ + enum maxim_device_type dev_type; + + struct regmap *regmap; /* For MUIC and Charger */ + struct regmap *regmap_pmic; + + struct regmap_irq_chip_data *irq_data; /* For MUIC and Charger */ + struct regmap_irq_chip_data *irq_data_pmic; + int irq; +}; + +/* MAX14577 shared regmap API function */ +static inline int max14577_read_reg(struct regmap *map, u8 reg, u8 *dest) +{ + unsigned int val; + int ret; + + ret = regmap_read(map, reg, &val); + *dest = val; + + return ret; +} + +static inline int max14577_bulk_read(struct regmap *map, u8 reg, u8 *buf, + int count) +{ + return regmap_bulk_read(map, reg, buf, count); +} + +static inline int max14577_write_reg(struct regmap *map, u8 reg, u8 value) +{ + return regmap_write(map, reg, value); +} + +static inline int max14577_bulk_write(struct regmap *map, u8 reg, u8 *buf, + int count) +{ + return regmap_bulk_write(map, reg, buf, count); +} + +static inline int max14577_update_reg(struct regmap *map, u8 reg, u8 mask, + u8 val) +{ + return regmap_update_bits(map, reg, mask, val); +} + +#endif /* __MAX14577_PRIVATE_H__ */ diff --git a/include/linux/mfd/max14577.h b/include/linux/mfd/max14577.h new file mode 100644 index 000000000..d81b52bb8 --- /dev/null +++ b/include/linux/mfd/max14577.h @@ -0,0 +1,107 @@ +/* + * max14577.h - Driver for the Maxim 14577/77836 + * + * Copyright (C) 2014 Samsung Electrnoics + * Chanwoo Choi + * Krzysztof Kozlowski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * This driver is based on max8997.h + * + * MAX14577 has MUIC, Charger devices. + * The devices share the same I2C bus and interrupt line + * included in this mfd driver. + * + * MAX77836 has additional PMIC and Fuel-Gauge on different I2C slave + * addresses. + */ + +#ifndef __MAX14577_H__ +#define __MAX14577_H__ + +#include + +/* MAX14577 regulator IDs */ +enum max14577_regulators { + MAX14577_SAFEOUT = 0, + MAX14577_CHARGER, + + MAX14577_REGULATOR_NUM, +}; + +/* MAX77836 regulator IDs */ +enum max77836_regulators { + MAX77836_SAFEOUT = 0, + MAX77836_CHARGER, + MAX77836_LDO1, + MAX77836_LDO2, + + MAX77836_REGULATOR_NUM, +}; + +struct max14577_regulator_platform_data { + int id; + struct regulator_init_data *initdata; + struct device_node *of_node; +}; + +struct max14577_charger_platform_data { + u32 constant_uvolt; + u32 fast_charge_uamp; + u32 eoc_uamp; + u32 ovp_uvolt; +}; + +/* + * MAX14577 MFD platform data + */ +struct max14577_platform_data { + /* IRQ */ + int irq_base; + + /* current control GPIOs */ + int gpio_pogo_vbatt_en; + int gpio_pogo_vbus_en; + + /* current control GPIO control function */ + int (*set_gpio_pogo_vbatt_en) (int gpio_val); + int (*set_gpio_pogo_vbus_en) (int gpio_val); + + int (*set_gpio_pogo_cb) (int new_dev); + + struct max14577_regulator_platform_data *regulators; +}; + +/* + * Valid limits of current for max14577 and max77836 chargers. + * They must correspond to MBCICHWRCL and MBCICHWRCH fields in CHGCTRL4 + * register for given chipset. + */ +struct maxim_charger_current { + /* Minimal current, set in CHGCTRL4/MBCICHWRCL, uA */ + unsigned int min; + /* + * Minimal current when high setting is active, + * set in CHGCTRL4/MBCICHWRCH, uA + */ + unsigned int high_start; + /* Value of one step in high setting, uA */ + unsigned int high_step; + /* Maximum current of high setting, uA */ + unsigned int max; +}; + +extern const struct maxim_charger_current maxim_charger_currents[]; +extern int maxim_charger_calc_reg_current(const struct maxim_charger_current *limits, + unsigned int min_ua, unsigned int max_ua, u8 *dst); + +#endif /* __MAX14577_H__ */ diff --git a/include/linux/mfd/max77620.h b/include/linux/mfd/max77620.h new file mode 100644 index 000000000..b4fd5a7c2 --- /dev/null +++ b/include/linux/mfd/max77620.h @@ -0,0 +1,348 @@ +/* + * Defining registers address and its bit definitions of MAX77620 and MAX20024 + * + * Copyright (C) 2016 NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +#ifndef _MFD_MAX77620_H_ +#define _MFD_MAX77620_H_ + +#include + +/* GLOBAL, PMIC, GPIO, FPS, ONOFFC, CID Registers */ +#define MAX77620_REG_CNFGGLBL1 0x00 +#define MAX77620_REG_CNFGGLBL2 0x01 +#define MAX77620_REG_CNFGGLBL3 0x02 +#define MAX77620_REG_CNFG1_32K 0x03 +#define MAX77620_REG_CNFGBBC 0x04 +#define MAX77620_REG_IRQTOP 0x05 +#define MAX77620_REG_INTLBT 0x06 +#define MAX77620_REG_IRQSD 0x07 +#define MAX77620_REG_IRQ_LVL2_L0_7 0x08 +#define MAX77620_REG_IRQ_LVL2_L8 0x09 +#define MAX77620_REG_IRQ_LVL2_GPIO 0x0A +#define MAX77620_REG_ONOFFIRQ 0x0B +#define MAX77620_REG_NVERC 0x0C +#define MAX77620_REG_IRQTOPM 0x0D +#define MAX77620_REG_INTENLBT 0x0E +#define MAX77620_REG_IRQMASKSD 0x0F +#define MAX77620_REG_IRQ_MSK_L0_7 0x10 +#define MAX77620_REG_IRQ_MSK_L8 0x11 +#define MAX77620_REG_ONOFFIRQM 0x12 +#define MAX77620_REG_STATLBT 0x13 +#define MAX77620_REG_STATSD 0x14 +#define MAX77620_REG_ONOFFSTAT 0x15 + +/* SD and LDO Registers */ +#define MAX77620_REG_SD0 0x16 +#define MAX77620_REG_SD1 0x17 +#define MAX77620_REG_SD2 0x18 +#define MAX77620_REG_SD3 0x19 +#define MAX77620_REG_SD4 0x1A +#define MAX77620_REG_DVSSD0 0x1B +#define MAX77620_REG_DVSSD1 0x1C +#define MAX77620_REG_SD0_CFG 0x1D +#define MAX77620_REG_SD1_CFG 0x1E +#define MAX77620_REG_SD2_CFG 0x1F +#define MAX77620_REG_SD3_CFG 0x20 +#define MAX77620_REG_SD4_CFG 0x21 +#define MAX77620_REG_SD_CFG2 0x22 +#define MAX77620_REG_LDO0_CFG 0x23 +#define MAX77620_REG_LDO0_CFG2 0x24 +#define MAX77620_REG_LDO1_CFG 0x25 +#define MAX77620_REG_LDO1_CFG2 0x26 +#define MAX77620_REG_LDO2_CFG 0x27 +#define MAX77620_REG_LDO2_CFG2 0x28 +#define MAX77620_REG_LDO3_CFG 0x29 +#define MAX77620_REG_LDO3_CFG2 0x2A +#define MAX77620_REG_LDO4_CFG 0x2B +#define MAX77620_REG_LDO4_CFG2 0x2C +#define MAX77620_REG_LDO5_CFG 0x2D +#define MAX77620_REG_LDO5_CFG2 0x2E +#define MAX77620_REG_LDO6_CFG 0x2F +#define MAX77620_REG_LDO6_CFG2 0x30 +#define MAX77620_REG_LDO7_CFG 0x31 +#define MAX77620_REG_LDO7_CFG2 0x32 +#define MAX77620_REG_LDO8_CFG 0x33 +#define MAX77620_REG_LDO8_CFG2 0x34 +#define MAX77620_REG_LDO_CFG3 0x35 + +#define MAX77620_LDO_SLEW_RATE_MASK 0x1 + +/* LDO Configuration 3 */ +#define MAX77620_TRACK4_MASK BIT(5) +#define MAX77620_TRACK4_SHIFT 5 + +/* Voltage */ +#define MAX77620_SDX_VOLT_MASK 0xFF +#define MAX77620_SD0_VOLT_MASK 0x3F +#define MAX77620_SD1_VOLT_MASK 0x7F +#define MAX77620_LDO_VOLT_MASK 0x3F + +#define MAX77620_REG_GPIO0 0x36 +#define MAX77620_REG_GPIO1 0x37 +#define MAX77620_REG_GPIO2 0x38 +#define MAX77620_REG_GPIO3 0x39 +#define MAX77620_REG_GPIO4 0x3A +#define MAX77620_REG_GPIO5 0x3B +#define MAX77620_REG_GPIO6 0x3C +#define MAX77620_REG_GPIO7 0x3D +#define MAX77620_REG_PUE_GPIO 0x3E +#define MAX77620_REG_PDE_GPIO 0x3F +#define MAX77620_REG_AME_GPIO 0x40 +#define MAX77620_REG_ONOFFCNFG1 0x41 +#define MAX77620_REG_ONOFFCNFG2 0x42 + +/* FPS Registers */ +#define MAX77620_REG_FPS_CFG0 0x43 +#define MAX77620_REG_FPS_CFG1 0x44 +#define MAX77620_REG_FPS_CFG2 0x45 +#define MAX77620_REG_FPS_LDO0 0x46 +#define MAX77620_REG_FPS_LDO1 0x47 +#define MAX77620_REG_FPS_LDO2 0x48 +#define MAX77620_REG_FPS_LDO3 0x49 +#define MAX77620_REG_FPS_LDO4 0x4A +#define MAX77620_REG_FPS_LDO5 0x4B +#define MAX77620_REG_FPS_LDO6 0x4C +#define MAX77620_REG_FPS_LDO7 0x4D +#define MAX77620_REG_FPS_LDO8 0x4E +#define MAX77620_REG_FPS_SD0 0x4F +#define MAX77620_REG_FPS_SD1 0x50 +#define MAX77620_REG_FPS_SD2 0x51 +#define MAX77620_REG_FPS_SD3 0x52 +#define MAX77620_REG_FPS_SD4 0x53 +#define MAX77620_REG_FPS_NONE 0 + +#define MAX77620_FPS_SRC_MASK 0xC0 +#define MAX77620_FPS_SRC_SHIFT 6 +#define MAX77620_FPS_PU_PERIOD_MASK 0x38 +#define MAX77620_FPS_PU_PERIOD_SHIFT 3 +#define MAX77620_FPS_PD_PERIOD_MASK 0x07 +#define MAX77620_FPS_PD_PERIOD_SHIFT 0 +#define MAX77620_FPS_TIME_PERIOD_MASK 0x38 +#define MAX77620_FPS_TIME_PERIOD_SHIFT 3 +#define MAX77620_FPS_EN_SRC_MASK 0x06 +#define MAX77620_FPS_EN_SRC_SHIFT 1 +#define MAX77620_FPS_ENFPS_SW_MASK 0x01 +#define MAX77620_FPS_ENFPS_SW 0x01 + +/* Minimum and maximum FPS period time (in microseconds) are + * different for MAX77620 and Max20024. + */ +#define MAX77620_FPS_PERIOD_MIN_US 40 +#define MAX20024_FPS_PERIOD_MIN_US 20 + +#define MAX20024_FPS_PERIOD_MAX_US 2560 +#define MAX77620_FPS_PERIOD_MAX_US 5120 + +#define MAX77620_REG_FPS_GPIO1 0x54 +#define MAX77620_REG_FPS_GPIO2 0x55 +#define MAX77620_REG_FPS_GPIO3 0x56 +#define MAX77620_REG_FPS_RSO 0x57 +#define MAX77620_REG_CID0 0x58 +#define MAX77620_REG_CID1 0x59 +#define MAX77620_REG_CID2 0x5A +#define MAX77620_REG_CID3 0x5B +#define MAX77620_REG_CID4 0x5C +#define MAX77620_REG_CID5 0x5D + +#define MAX77620_REG_DVSSD4 0x5E +#define MAX20024_REG_MAX_ADD 0x70 + +#define MAX77620_CID_DIDM_MASK 0xF0 +#define MAX77620_CID_DIDM_SHIFT 4 + +/* CNCG2SD */ +#define MAX77620_SD_CNF2_ROVS_EN_SD1 BIT(1) +#define MAX77620_SD_CNF2_ROVS_EN_SD0 BIT(2) + +/* Device Identification Metal */ +#define MAX77620_CID5_DIDM(n) (((n) >> 4) & 0xF) +/* Device Indentification OTP */ +#define MAX77620_CID5_DIDO(n) ((n) & 0xF) + +/* SD CNFG1 */ +#define MAX77620_SD_SR_MASK 0xC0 +#define MAX77620_SD_SR_SHIFT 6 +#define MAX77620_SD_POWER_MODE_MASK 0x30 +#define MAX77620_SD_POWER_MODE_SHIFT 4 +#define MAX77620_SD_CFG1_ADE_MASK BIT(3) +#define MAX77620_SD_CFG1_ADE_DISABLE 0 +#define MAX77620_SD_CFG1_ADE_ENABLE BIT(3) +#define MAX77620_SD_FPWM_MASK 0x04 +#define MAX77620_SD_FPWM_SHIFT 2 +#define MAX77620_SD_FSRADE_MASK 0x01 +#define MAX77620_SD_FSRADE_SHIFT 0 +#define MAX77620_SD_CFG1_FPWM_SD_MASK BIT(2) +#define MAX77620_SD_CFG1_FPWM_SD_SKIP 0 +#define MAX77620_SD_CFG1_FPWM_SD_FPWM BIT(2) +#define MAX20024_SD_CFG1_MPOK_MASK BIT(1) +#define MAX77620_SD_CFG1_FSRADE_SD_MASK BIT(0) +#define MAX77620_SD_CFG1_FSRADE_SD_DISABLE 0 +#define MAX77620_SD_CFG1_FSRADE_SD_ENABLE BIT(0) + +/* LDO_CNFG2 */ +#define MAX77620_LDO_POWER_MODE_MASK 0xC0 +#define MAX77620_LDO_POWER_MODE_SHIFT 6 +#define MAX20024_LDO_CFG2_MPOK_MASK BIT(2) +#define MAX77620_LDO_CFG2_ADE_MASK BIT(1) +#define MAX77620_LDO_CFG2_ADE_DISABLE 0 +#define MAX77620_LDO_CFG2_ADE_ENABLE BIT(1) +#define MAX77620_LDO_CFG2_SS_MASK BIT(0) +#define MAX77620_LDO_CFG2_SS_FAST BIT(0) +#define MAX77620_LDO_CFG2_SS_SLOW 0 + +#define MAX77620_IRQ_TOP_GLBL_MASK BIT(7) +#define MAX77620_IRQ_TOP_SD_MASK BIT(6) +#define MAX77620_IRQ_TOP_LDO_MASK BIT(5) +#define MAX77620_IRQ_TOP_GPIO_MASK BIT(4) +#define MAX77620_IRQ_TOP_RTC_MASK BIT(3) +#define MAX77620_IRQ_TOP_32K_MASK BIT(2) +#define MAX77620_IRQ_TOP_ONOFF_MASK BIT(1) + +#define MAX77620_IRQ_LBM_MASK BIT(3) +#define MAX77620_IRQ_TJALRM1_MASK BIT(2) +#define MAX77620_IRQ_TJALRM2_MASK BIT(1) + +#define MAX77620_PWR_I2C_ADDR 0x3c +#define MAX77620_RTC_I2C_ADDR 0x68 + +#define MAX77620_CNFG_GPIO_DRV_MASK BIT(0) +#define MAX77620_CNFG_GPIO_DRV_PUSHPULL BIT(0) +#define MAX77620_CNFG_GPIO_DRV_OPENDRAIN 0 +#define MAX77620_CNFG_GPIO_DIR_MASK BIT(1) +#define MAX77620_CNFG_GPIO_DIR_INPUT BIT(1) +#define MAX77620_CNFG_GPIO_DIR_OUTPUT 0 +#define MAX77620_CNFG_GPIO_INPUT_VAL_MASK BIT(2) +#define MAX77620_CNFG_GPIO_OUTPUT_VAL_MASK BIT(3) +#define MAX77620_CNFG_GPIO_OUTPUT_VAL_HIGH BIT(3) +#define MAX77620_CNFG_GPIO_OUTPUT_VAL_LOW 0 +#define MAX77620_CNFG_GPIO_INT_MASK (0x3 << 4) +#define MAX77620_CNFG_GPIO_INT_FALLING BIT(4) +#define MAX77620_CNFG_GPIO_INT_RISING BIT(5) +#define MAX77620_CNFG_GPIO_DBNC_MASK (0x3 << 6) +#define MAX77620_CNFG_GPIO_DBNC_None (0x0 << 6) +#define MAX77620_CNFG_GPIO_DBNC_8ms (0x1 << 6) +#define MAX77620_CNFG_GPIO_DBNC_16ms (0x2 << 6) +#define MAX77620_CNFG_GPIO_DBNC_32ms (0x3 << 6) + +#define MAX77620_IRQ_LVL2_GPIO_EDGE0 BIT(0) +#define MAX77620_IRQ_LVL2_GPIO_EDGE1 BIT(1) +#define MAX77620_IRQ_LVL2_GPIO_EDGE2 BIT(2) +#define MAX77620_IRQ_LVL2_GPIO_EDGE3 BIT(3) +#define MAX77620_IRQ_LVL2_GPIO_EDGE4 BIT(4) +#define MAX77620_IRQ_LVL2_GPIO_EDGE5 BIT(5) +#define MAX77620_IRQ_LVL2_GPIO_EDGE6 BIT(6) +#define MAX77620_IRQ_LVL2_GPIO_EDGE7 BIT(7) + +#define MAX77620_CNFG1_32K_OUT0_EN BIT(2) + +#define MAX77620_ONOFFCNFG1_SFT_RST BIT(7) +#define MAX77620_ONOFFCNFG1_MRT_MASK 0x38 +#define MAX77620_ONOFFCNFG1_MRT_SHIFT 0x3 +#define MAX77620_ONOFFCNFG1_SLPEN BIT(2) +#define MAX77620_ONOFFCNFG1_PWR_OFF BIT(1) +#define MAX20024_ONOFFCNFG1_CLRSE 0x18 + +#define MAX77620_ONOFFCNFG2_SFT_RST_WK BIT(7) +#define MAX77620_ONOFFCNFG2_WD_RST_WK BIT(6) +#define MAX77620_ONOFFCNFG2_SLP_LPM_MSK BIT(5) +#define MAX77620_ONOFFCNFG2_WK_ALARM1 BIT(2) +#define MAX77620_ONOFFCNFG2_WK_EN0 BIT(0) + +#define MAX77620_GLBLM_MASK BIT(0) + +#define MAX77620_WDTC_MASK 0x3 +#define MAX77620_WDTOFFC BIT(4) +#define MAX77620_WDTSLPC BIT(3) +#define MAX77620_WDTEN BIT(2) + +#define MAX77620_TWD_MASK 0x3 +#define MAX77620_TWD_2s 0x0 +#define MAX77620_TWD_16s 0x1 +#define MAX77620_TWD_64s 0x2 +#define MAX77620_TWD_128s 0x3 + +#define MAX77620_CNFGGLBL1_LBDAC_EN BIT(7) +#define MAX77620_CNFGGLBL1_MPPLD BIT(6) +#define MAX77620_CNFGGLBL1_LBHYST (BIT(5) | BIT(4)) +#define MAX77620_CNFGGLBL1_LBDAC 0x0E +#define MAX77620_CNFGGLBL1_LBRSTEN BIT(0) + +/* CNFG BBC registers */ +#define MAX77620_CNFGBBC_ENABLE BIT(0) +#define MAX77620_CNFGBBC_CURRENT_MASK 0x06 +#define MAX77620_CNFGBBC_CURRENT_SHIFT 1 +#define MAX77620_CNFGBBC_VOLTAGE_MASK 0x18 +#define MAX77620_CNFGBBC_VOLTAGE_SHIFT 3 +#define MAX77620_CNFGBBC_LOW_CURRENT_DISABLE BIT(5) +#define MAX77620_CNFGBBC_RESISTOR_MASK 0xC0 +#define MAX77620_CNFGBBC_RESISTOR_SHIFT 6 + +#define MAX77620_FPS_COUNT 3 + +/* Interrupts */ +enum { + MAX77620_IRQ_TOP_GLBL, /* Low-Battery */ + MAX77620_IRQ_TOP_SD, /* SD power fail */ + MAX77620_IRQ_TOP_LDO, /* LDO power fail */ + MAX77620_IRQ_TOP_GPIO, /* TOP GPIO internal int to MAX77620 */ + MAX77620_IRQ_TOP_RTC, /* RTC */ + MAX77620_IRQ_TOP_32K, /* 32kHz oscillator */ + MAX77620_IRQ_TOP_ONOFF, /* ON/OFF oscillator */ + MAX77620_IRQ_LBT_MBATLOW, /* Thermal alarm status, > 120C */ + MAX77620_IRQ_LBT_TJALRM1, /* Thermal alarm status, > 120C */ + MAX77620_IRQ_LBT_TJALRM2, /* Thermal alarm status, > 140C */ +}; + +/* GPIOs */ +enum { + MAX77620_GPIO0, + MAX77620_GPIO1, + MAX77620_GPIO2, + MAX77620_GPIO3, + MAX77620_GPIO4, + MAX77620_GPIO5, + MAX77620_GPIO6, + MAX77620_GPIO7, + MAX77620_GPIO_NR, +}; + +/* FPS Source */ +enum max77620_fps_src { + MAX77620_FPS_SRC_0, + MAX77620_FPS_SRC_1, + MAX77620_FPS_SRC_2, + MAX77620_FPS_SRC_NONE, + MAX77620_FPS_SRC_DEF, +}; + +enum max77620_chip_id { + MAX77620, + MAX20024, +}; + +struct max77620_chip { + struct device *dev; + struct regmap *rmap; + + int chip_irq; + int irq_base; + + /* chip id */ + enum max77620_chip_id chip_id; + + bool sleep_enable; + bool enable_global_lpm; + int shutdown_fps_period[MAX77620_FPS_COUNT]; + int suspend_fps_period[MAX77620_FPS_COUNT]; + + struct regmap_irq_chip_data *top_irq_data; + struct regmap_irq_chip_data *gpio_irq_data; +}; + +#endif /* _MFD_MAX77620_H_ */ diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h new file mode 100644 index 000000000..643dae777 --- /dev/null +++ b/include/linux/mfd/max77686-private.h @@ -0,0 +1,461 @@ +/* + * max77686-private.h - Voltage regulator driver for the Maxim 77686/802 + * + * Copyright (C) 2012 Samsung Electrnoics + * Chiwoong Byun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_MFD_MAX77686_PRIV_H +#define __LINUX_MFD_MAX77686_PRIV_H + +#include +#include +#include + +#define MAX77686_REG_INVALID (0xff) + +/* MAX77686 PMIC registers */ +enum max77686_pmic_reg { + MAX77686_REG_DEVICE_ID = 0x00, + MAX77686_REG_INTSRC = 0x01, + MAX77686_REG_INT1 = 0x02, + MAX77686_REG_INT2 = 0x03, + + MAX77686_REG_INT1MSK = 0x04, + MAX77686_REG_INT2MSK = 0x05, + + MAX77686_REG_STATUS1 = 0x06, + MAX77686_REG_STATUS2 = 0x07, + + MAX77686_REG_PWRON = 0x08, + MAX77686_REG_ONOFF_DELAY = 0x09, + MAX77686_REG_MRSTB = 0x0A, + /* Reserved: 0x0B-0x0F */ + + MAX77686_REG_BUCK1CTRL = 0x10, + MAX77686_REG_BUCK1OUT = 0x11, + MAX77686_REG_BUCK2CTRL1 = 0x12, + MAX77686_REG_BUCK234FREQ = 0x13, + MAX77686_REG_BUCK2DVS1 = 0x14, + MAX77686_REG_BUCK2DVS2 = 0x15, + MAX77686_REG_BUCK2DVS3 = 0x16, + MAX77686_REG_BUCK2DVS4 = 0x17, + MAX77686_REG_BUCK2DVS5 = 0x18, + MAX77686_REG_BUCK2DVS6 = 0x19, + MAX77686_REG_BUCK2DVS7 = 0x1A, + MAX77686_REG_BUCK2DVS8 = 0x1B, + MAX77686_REG_BUCK3CTRL1 = 0x1C, + /* Reserved: 0x1D */ + MAX77686_REG_BUCK3DVS1 = 0x1E, + MAX77686_REG_BUCK3DVS2 = 0x1F, + MAX77686_REG_BUCK3DVS3 = 0x20, + MAX77686_REG_BUCK3DVS4 = 0x21, + MAX77686_REG_BUCK3DVS5 = 0x22, + MAX77686_REG_BUCK3DVS6 = 0x23, + MAX77686_REG_BUCK3DVS7 = 0x24, + MAX77686_REG_BUCK3DVS8 = 0x25, + MAX77686_REG_BUCK4CTRL1 = 0x26, + /* Reserved: 0x27 */ + MAX77686_REG_BUCK4DVS1 = 0x28, + MAX77686_REG_BUCK4DVS2 = 0x29, + MAX77686_REG_BUCK4DVS3 = 0x2A, + MAX77686_REG_BUCK4DVS4 = 0x2B, + MAX77686_REG_BUCK4DVS5 = 0x2C, + MAX77686_REG_BUCK4DVS6 = 0x2D, + MAX77686_REG_BUCK4DVS7 = 0x2E, + MAX77686_REG_BUCK4DVS8 = 0x2F, + MAX77686_REG_BUCK5CTRL = 0x30, + MAX77686_REG_BUCK5OUT = 0x31, + MAX77686_REG_BUCK6CTRL = 0x32, + MAX77686_REG_BUCK6OUT = 0x33, + MAX77686_REG_BUCK7CTRL = 0x34, + MAX77686_REG_BUCK7OUT = 0x35, + MAX77686_REG_BUCK8CTRL = 0x36, + MAX77686_REG_BUCK8OUT = 0x37, + MAX77686_REG_BUCK9CTRL = 0x38, + MAX77686_REG_BUCK9OUT = 0x39, + /* Reserved: 0x3A-0x3F */ + + MAX77686_REG_LDO1CTRL1 = 0x40, + MAX77686_REG_LDO2CTRL1 = 0x41, + MAX77686_REG_LDO3CTRL1 = 0x42, + MAX77686_REG_LDO4CTRL1 = 0x43, + MAX77686_REG_LDO5CTRL1 = 0x44, + MAX77686_REG_LDO6CTRL1 = 0x45, + MAX77686_REG_LDO7CTRL1 = 0x46, + MAX77686_REG_LDO8CTRL1 = 0x47, + MAX77686_REG_LDO9CTRL1 = 0x48, + MAX77686_REG_LDO10CTRL1 = 0x49, + MAX77686_REG_LDO11CTRL1 = 0x4A, + MAX77686_REG_LDO12CTRL1 = 0x4B, + MAX77686_REG_LDO13CTRL1 = 0x4C, + MAX77686_REG_LDO14CTRL1 = 0x4D, + MAX77686_REG_LDO15CTRL1 = 0x4E, + MAX77686_REG_LDO16CTRL1 = 0x4F, + MAX77686_REG_LDO17CTRL1 = 0x50, + MAX77686_REG_LDO18CTRL1 = 0x51, + MAX77686_REG_LDO19CTRL1 = 0x52, + MAX77686_REG_LDO20CTRL1 = 0x53, + MAX77686_REG_LDO21CTRL1 = 0x54, + MAX77686_REG_LDO22CTRL1 = 0x55, + MAX77686_REG_LDO23CTRL1 = 0x56, + MAX77686_REG_LDO24CTRL1 = 0x57, + MAX77686_REG_LDO25CTRL1 = 0x58, + MAX77686_REG_LDO26CTRL1 = 0x59, + /* Reserved: 0x5A-0x5F */ + MAX77686_REG_LDO1CTRL2 = 0x60, + MAX77686_REG_LDO2CTRL2 = 0x61, + MAX77686_REG_LDO3CTRL2 = 0x62, + MAX77686_REG_LDO4CTRL2 = 0x63, + MAX77686_REG_LDO5CTRL2 = 0x64, + MAX77686_REG_LDO6CTRL2 = 0x65, + MAX77686_REG_LDO7CTRL2 = 0x66, + MAX77686_REG_LDO8CTRL2 = 0x67, + MAX77686_REG_LDO9CTRL2 = 0x68, + MAX77686_REG_LDO10CTRL2 = 0x69, + MAX77686_REG_LDO11CTRL2 = 0x6A, + MAX77686_REG_LDO12CTRL2 = 0x6B, + MAX77686_REG_LDO13CTRL2 = 0x6C, + MAX77686_REG_LDO14CTRL2 = 0x6D, + MAX77686_REG_LDO15CTRL2 = 0x6E, + MAX77686_REG_LDO16CTRL2 = 0x6F, + MAX77686_REG_LDO17CTRL2 = 0x70, + MAX77686_REG_LDO18CTRL2 = 0x71, + MAX77686_REG_LDO19CTRL2 = 0x72, + MAX77686_REG_LDO20CTRL2 = 0x73, + MAX77686_REG_LDO21CTRL2 = 0x74, + MAX77686_REG_LDO22CTRL2 = 0x75, + MAX77686_REG_LDO23CTRL2 = 0x76, + MAX77686_REG_LDO24CTRL2 = 0x77, + MAX77686_REG_LDO25CTRL2 = 0x78, + MAX77686_REG_LDO26CTRL2 = 0x79, + /* Reserved: 0x7A-0x7D */ + + MAX77686_REG_BBAT_CHG = 0x7E, + MAX77686_REG_32KHZ = 0x7F, + + MAX77686_REG_PMIC_END = 0x80, +}; + +enum max77686_rtc_reg { + MAX77686_RTC_INT = 0x00, + MAX77686_RTC_INTM = 0x01, + MAX77686_RTC_CONTROLM = 0x02, + MAX77686_RTC_CONTROL = 0x03, + MAX77686_RTC_UPDATE0 = 0x04, + /* Reserved: 0x5 */ + MAX77686_WTSR_SMPL_CNTL = 0x06, + MAX77686_RTC_SEC = 0x07, + MAX77686_RTC_MIN = 0x08, + MAX77686_RTC_HOUR = 0x09, + MAX77686_RTC_WEEKDAY = 0x0A, + MAX77686_RTC_MONTH = 0x0B, + MAX77686_RTC_YEAR = 0x0C, + MAX77686_RTC_DATE = 0x0D, + MAX77686_ALARM1_SEC = 0x0E, + MAX77686_ALARM1_MIN = 0x0F, + MAX77686_ALARM1_HOUR = 0x10, + MAX77686_ALARM1_WEEKDAY = 0x11, + MAX77686_ALARM1_MONTH = 0x12, + MAX77686_ALARM1_YEAR = 0x13, + MAX77686_ALARM1_DATE = 0x14, + MAX77686_ALARM2_SEC = 0x15, + MAX77686_ALARM2_MIN = 0x16, + MAX77686_ALARM2_HOUR = 0x17, + MAX77686_ALARM2_WEEKDAY = 0x18, + MAX77686_ALARM2_MONTH = 0x19, + MAX77686_ALARM2_YEAR = 0x1A, + MAX77686_ALARM2_DATE = 0x1B, +}; + +/* MAX77802 PMIC registers */ +enum max77802_pmic_reg { + MAX77802_REG_DEVICE_ID = 0x00, + MAX77802_REG_INTSRC = 0x01, + MAX77802_REG_INT1 = 0x02, + MAX77802_REG_INT2 = 0x03, + + MAX77802_REG_INT1MSK = 0x04, + MAX77802_REG_INT2MSK = 0x05, + + MAX77802_REG_STATUS1 = 0x06, + MAX77802_REG_STATUS2 = 0x07, + + MAX77802_REG_PWRON = 0x08, + /* Reserved: 0x09 */ + MAX77802_REG_MRSTB = 0x0A, + MAX77802_REG_EPWRHOLD = 0x0B, + /* Reserved: 0x0C-0x0D */ + MAX77802_REG_BOOSTCTRL = 0x0E, + MAX77802_REG_BOOSTOUT = 0x0F, + + MAX77802_REG_BUCK1CTRL = 0x10, + MAX77802_REG_BUCK1DVS1 = 0x11, + MAX77802_REG_BUCK1DVS2 = 0x12, + MAX77802_REG_BUCK1DVS3 = 0x13, + MAX77802_REG_BUCK1DVS4 = 0x14, + MAX77802_REG_BUCK1DVS5 = 0x15, + MAX77802_REG_BUCK1DVS6 = 0x16, + MAX77802_REG_BUCK1DVS7 = 0x17, + MAX77802_REG_BUCK1DVS8 = 0x18, + /* Reserved: 0x19 */ + MAX77802_REG_BUCK2CTRL1 = 0x1A, + MAX77802_REG_BUCK2CTRL2 = 0x1B, + MAX77802_REG_BUCK2PHTRAN = 0x1C, + MAX77802_REG_BUCK2DVS1 = 0x1D, + MAX77802_REG_BUCK2DVS2 = 0x1E, + MAX77802_REG_BUCK2DVS3 = 0x1F, + MAX77802_REG_BUCK2DVS4 = 0x20, + MAX77802_REG_BUCK2DVS5 = 0x21, + MAX77802_REG_BUCK2DVS6 = 0x22, + MAX77802_REG_BUCK2DVS7 = 0x23, + MAX77802_REG_BUCK2DVS8 = 0x24, + /* Reserved: 0x25-0x26 */ + MAX77802_REG_BUCK3CTRL1 = 0x27, + MAX77802_REG_BUCK3DVS1 = 0x28, + MAX77802_REG_BUCK3DVS2 = 0x29, + MAX77802_REG_BUCK3DVS3 = 0x2A, + MAX77802_REG_BUCK3DVS4 = 0x2B, + MAX77802_REG_BUCK3DVS5 = 0x2C, + MAX77802_REG_BUCK3DVS6 = 0x2D, + MAX77802_REG_BUCK3DVS7 = 0x2E, + MAX77802_REG_BUCK3DVS8 = 0x2F, + /* Reserved: 0x30-0x36 */ + MAX77802_REG_BUCK4CTRL1 = 0x37, + MAX77802_REG_BUCK4DVS1 = 0x38, + MAX77802_REG_BUCK4DVS2 = 0x39, + MAX77802_REG_BUCK4DVS3 = 0x3A, + MAX77802_REG_BUCK4DVS4 = 0x3B, + MAX77802_REG_BUCK4DVS5 = 0x3C, + MAX77802_REG_BUCK4DVS6 = 0x3D, + MAX77802_REG_BUCK4DVS7 = 0x3E, + MAX77802_REG_BUCK4DVS8 = 0x3F, + /* Reserved: 0x40 */ + MAX77802_REG_BUCK5CTRL = 0x41, + MAX77802_REG_BUCK5OUT = 0x42, + /* Reserved: 0x43 */ + MAX77802_REG_BUCK6CTRL = 0x44, + MAX77802_REG_BUCK6DVS1 = 0x45, + MAX77802_REG_BUCK6DVS2 = 0x46, + MAX77802_REG_BUCK6DVS3 = 0x47, + MAX77802_REG_BUCK6DVS4 = 0x48, + MAX77802_REG_BUCK6DVS5 = 0x49, + MAX77802_REG_BUCK6DVS6 = 0x4A, + MAX77802_REG_BUCK6DVS7 = 0x4B, + MAX77802_REG_BUCK6DVS8 = 0x4C, + /* Reserved: 0x4D */ + MAX77802_REG_BUCK7CTRL = 0x4E, + MAX77802_REG_BUCK7OUT = 0x4F, + /* Reserved: 0x50 */ + MAX77802_REG_BUCK8CTRL = 0x51, + MAX77802_REG_BUCK8OUT = 0x52, + /* Reserved: 0x53 */ + MAX77802_REG_BUCK9CTRL = 0x54, + MAX77802_REG_BUCK9OUT = 0x55, + /* Reserved: 0x56 */ + MAX77802_REG_BUCK10CTRL = 0x57, + MAX77802_REG_BUCK10OUT = 0x58, + + /* Reserved: 0x59-0x5F */ + + MAX77802_REG_LDO1CTRL1 = 0x60, + MAX77802_REG_LDO2CTRL1 = 0x61, + MAX77802_REG_LDO3CTRL1 = 0x62, + MAX77802_REG_LDO4CTRL1 = 0x63, + MAX77802_REG_LDO5CTRL1 = 0x64, + MAX77802_REG_LDO6CTRL1 = 0x65, + MAX77802_REG_LDO7CTRL1 = 0x66, + MAX77802_REG_LDO8CTRL1 = 0x67, + MAX77802_REG_LDO9CTRL1 = 0x68, + MAX77802_REG_LDO10CTRL1 = 0x69, + MAX77802_REG_LDO11CTRL1 = 0x6A, + MAX77802_REG_LDO12CTRL1 = 0x6B, + MAX77802_REG_LDO13CTRL1 = 0x6C, + MAX77802_REG_LDO14CTRL1 = 0x6D, + MAX77802_REG_LDO15CTRL1 = 0x6E, + /* Reserved: 0x6F */ + MAX77802_REG_LDO17CTRL1 = 0x70, + MAX77802_REG_LDO18CTRL1 = 0x71, + MAX77802_REG_LDO19CTRL1 = 0x72, + MAX77802_REG_LDO20CTRL1 = 0x73, + MAX77802_REG_LDO21CTRL1 = 0x74, + MAX77802_REG_LDO22CTRL1 = 0x75, + MAX77802_REG_LDO23CTRL1 = 0x76, + MAX77802_REG_LDO24CTRL1 = 0x77, + MAX77802_REG_LDO25CTRL1 = 0x78, + MAX77802_REG_LDO26CTRL1 = 0x79, + MAX77802_REG_LDO27CTRL1 = 0x7A, + MAX77802_REG_LDO28CTRL1 = 0x7B, + MAX77802_REG_LDO29CTRL1 = 0x7C, + MAX77802_REG_LDO30CTRL1 = 0x7D, + /* Reserved: 0x7E */ + MAX77802_REG_LDO32CTRL1 = 0x7F, + MAX77802_REG_LDO33CTRL1 = 0x80, + MAX77802_REG_LDO34CTRL1 = 0x81, + MAX77802_REG_LDO35CTRL1 = 0x82, + /* Reserved: 0x83-0x8F */ + MAX77802_REG_LDO1CTRL2 = 0x90, + MAX77802_REG_LDO2CTRL2 = 0x91, + MAX77802_REG_LDO3CTRL2 = 0x92, + MAX77802_REG_LDO4CTRL2 = 0x93, + MAX77802_REG_LDO5CTRL2 = 0x94, + MAX77802_REG_LDO6CTRL2 = 0x95, + MAX77802_REG_LDO7CTRL2 = 0x96, + MAX77802_REG_LDO8CTRL2 = 0x97, + MAX77802_REG_LDO9CTRL2 = 0x98, + MAX77802_REG_LDO10CTRL2 = 0x99, + MAX77802_REG_LDO11CTRL2 = 0x9A, + MAX77802_REG_LDO12CTRL2 = 0x9B, + MAX77802_REG_LDO13CTRL2 = 0x9C, + MAX77802_REG_LDO14CTRL2 = 0x9D, + MAX77802_REG_LDO15CTRL2 = 0x9E, + /* Reserved: 0x9F */ + MAX77802_REG_LDO17CTRL2 = 0xA0, + MAX77802_REG_LDO18CTRL2 = 0xA1, + MAX77802_REG_LDO19CTRL2 = 0xA2, + MAX77802_REG_LDO20CTRL2 = 0xA3, + MAX77802_REG_LDO21CTRL2 = 0xA4, + MAX77802_REG_LDO22CTRL2 = 0xA5, + MAX77802_REG_LDO23CTRL2 = 0xA6, + MAX77802_REG_LDO24CTRL2 = 0xA7, + MAX77802_REG_LDO25CTRL2 = 0xA8, + MAX77802_REG_LDO26CTRL2 = 0xA9, + MAX77802_REG_LDO27CTRL2 = 0xAA, + MAX77802_REG_LDO28CTRL2 = 0xAB, + MAX77802_REG_LDO29CTRL2 = 0xAC, + MAX77802_REG_LDO30CTRL2 = 0xAD, + /* Reserved: 0xAE */ + MAX77802_REG_LDO32CTRL2 = 0xAF, + MAX77802_REG_LDO33CTRL2 = 0xB0, + MAX77802_REG_LDO34CTRL2 = 0xB1, + MAX77802_REG_LDO35CTRL2 = 0xB2, + /* Reserved: 0xB3 */ + + MAX77802_REG_BBAT_CHG = 0xB4, + MAX77802_REG_32KHZ = 0xB5, + + MAX77802_REG_PMIC_END = 0xB6, +}; + +enum max77802_rtc_reg { + MAX77802_RTC_INT = 0xC0, + MAX77802_RTC_INTM = 0xC1, + MAX77802_RTC_CONTROLM = 0xC2, + MAX77802_RTC_CONTROL = 0xC3, + MAX77802_RTC_UPDATE0 = 0xC4, + MAX77802_RTC_UPDATE1 = 0xC5, + MAX77802_WTSR_SMPL_CNTL = 0xC6, + MAX77802_RTC_SEC = 0xC7, + MAX77802_RTC_MIN = 0xC8, + MAX77802_RTC_HOUR = 0xC9, + MAX77802_RTC_WEEKDAY = 0xCA, + MAX77802_RTC_MONTH = 0xCB, + MAX77802_RTC_YEAR = 0xCC, + MAX77802_RTC_DATE = 0xCD, + MAX77802_RTC_AE1 = 0xCE, + MAX77802_ALARM1_SEC = 0xCF, + MAX77802_ALARM1_MIN = 0xD0, + MAX77802_ALARM1_HOUR = 0xD1, + MAX77802_ALARM1_WEEKDAY = 0xD2, + MAX77802_ALARM1_MONTH = 0xD3, + MAX77802_ALARM1_YEAR = 0xD4, + MAX77802_ALARM1_DATE = 0xD5, + MAX77802_RTC_AE2 = 0xD6, + MAX77802_ALARM2_SEC = 0xD7, + MAX77802_ALARM2_MIN = 0xD8, + MAX77802_ALARM2_HOUR = 0xD9, + MAX77802_ALARM2_WEEKDAY = 0xDA, + MAX77802_ALARM2_MONTH = 0xDB, + MAX77802_ALARM2_YEAR = 0xDC, + MAX77802_ALARM2_DATE = 0xDD, + + MAX77802_RTC_END = 0xDF, +}; + +enum max77686_irq_source { + PMIC_INT1 = 0, + PMIC_INT2, + RTC_INT, + + MAX77686_IRQ_GROUP_NR, +}; + +enum max77686_irq { + MAX77686_PMICIRQ_PWRONF, + MAX77686_PMICIRQ_PWRONR, + MAX77686_PMICIRQ_JIGONBF, + MAX77686_PMICIRQ_JIGONBR, + MAX77686_PMICIRQ_ACOKBF, + MAX77686_PMICIRQ_ACOKBR, + MAX77686_PMICIRQ_ONKEY1S, + MAX77686_PMICIRQ_MRSTB, + + MAX77686_PMICIRQ_140C, + MAX77686_PMICIRQ_120C, + + MAX77686_RTCIRQ_RTC60S = 0, + MAX77686_RTCIRQ_RTCA1, + MAX77686_RTCIRQ_RTCA2, + MAX77686_RTCIRQ_SMPL, + MAX77686_RTCIRQ_RTC1S, + MAX77686_RTCIRQ_WTSR, +}; + +#define MAX77686_INT1_PWRONF_MSK BIT(0) +#define MAX77686_INT1_PWRONR_MSK BIT(1) +#define MAX77686_INT1_JIGONBF_MSK BIT(2) +#define MAX77686_INT1_JIGONBR_MSK BIT(3) +#define MAX77686_INT1_ACOKBF_MSK BIT(4) +#define MAX77686_INT1_ACOKBR_MSK BIT(5) +#define MAX77686_INT1_ONKEY1S_MSK BIT(6) +#define MAX77686_INT1_MRSTB_MSK BIT(7) + +#define MAX77686_INT2_140C_MSK BIT(0) +#define MAX77686_INT2_120C_MSK BIT(1) + +#define MAX77686_RTCINT_RTC60S_MSK BIT(0) +#define MAX77686_RTCINT_RTCA1_MSK BIT(1) +#define MAX77686_RTCINT_RTCA2_MSK BIT(2) +#define MAX77686_RTCINT_SMPL_MSK BIT(3) +#define MAX77686_RTCINT_RTC1S_MSK BIT(4) +#define MAX77686_RTCINT_WTSR_MSK BIT(5) + +struct max77686_dev { + struct device *dev; + struct i2c_client *i2c; /* 0xcc / PMIC, Battery Control, and FLASH */ + + unsigned long type; + + struct regmap *regmap; /* regmap for mfd */ + struct regmap_irq_chip_data *irq_data; + + int irq; + struct mutex irqlock; + int irq_masks_cur[MAX77686_IRQ_GROUP_NR]; + int irq_masks_cache[MAX77686_IRQ_GROUP_NR]; +}; + +enum max77686_types { + TYPE_MAX77686, + TYPE_MAX77802, +}; + +extern int max77686_irq_init(struct max77686_dev *max77686); +extern void max77686_irq_exit(struct max77686_dev *max77686); +extern int max77686_irq_resume(struct max77686_dev *max77686); + +#endif /* __LINUX_MFD_MAX77686_PRIV_H */ diff --git a/include/linux/mfd/max77686.h b/include/linux/mfd/max77686.h new file mode 100644 index 000000000..d4b72d519 --- /dev/null +++ b/include/linux/mfd/max77686.h @@ -0,0 +1,128 @@ +/* + * max77686.h - Driver for the Maxim 77686/802 + * + * Copyright (C) 2012 Samsung Electrnoics + * Chiwoong Byun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * This driver is based on max8997.h + * + * MAX77686 has PMIC, RTC devices. + * The devices share the same I2C bus and included in + * this mfd driver. + */ + +#ifndef __LINUX_MFD_MAX77686_H +#define __LINUX_MFD_MAX77686_H + +#include + +/* MAX77686 regulator IDs */ +enum max77686_regulators { + MAX77686_LDO1 = 0, + MAX77686_LDO2, + MAX77686_LDO3, + MAX77686_LDO4, + MAX77686_LDO5, + MAX77686_LDO6, + MAX77686_LDO7, + MAX77686_LDO8, + MAX77686_LDO9, + MAX77686_LDO10, + MAX77686_LDO11, + MAX77686_LDO12, + MAX77686_LDO13, + MAX77686_LDO14, + MAX77686_LDO15, + MAX77686_LDO16, + MAX77686_LDO17, + MAX77686_LDO18, + MAX77686_LDO19, + MAX77686_LDO20, + MAX77686_LDO21, + MAX77686_LDO22, + MAX77686_LDO23, + MAX77686_LDO24, + MAX77686_LDO25, + MAX77686_LDO26, + MAX77686_BUCK1, + MAX77686_BUCK2, + MAX77686_BUCK3, + MAX77686_BUCK4, + MAX77686_BUCK5, + MAX77686_BUCK6, + MAX77686_BUCK7, + MAX77686_BUCK8, + MAX77686_BUCK9, + + MAX77686_REG_MAX, +}; + +/* MAX77802 regulator IDs */ +enum max77802_regulators { + MAX77802_BUCK1 = 0, + MAX77802_BUCK2, + MAX77802_BUCK3, + MAX77802_BUCK4, + MAX77802_BUCK5, + MAX77802_BUCK6, + MAX77802_BUCK7, + MAX77802_BUCK8, + MAX77802_BUCK9, + MAX77802_BUCK10, + MAX77802_LDO1, + MAX77802_LDO2, + MAX77802_LDO3, + MAX77802_LDO4, + MAX77802_LDO5, + MAX77802_LDO6, + MAX77802_LDO7, + MAX77802_LDO8, + MAX77802_LDO9, + MAX77802_LDO10, + MAX77802_LDO11, + MAX77802_LDO12, + MAX77802_LDO13, + MAX77802_LDO14, + MAX77802_LDO15, + MAX77802_LDO17, + MAX77802_LDO18, + MAX77802_LDO19, + MAX77802_LDO20, + MAX77802_LDO21, + MAX77802_LDO23, + MAX77802_LDO24, + MAX77802_LDO25, + MAX77802_LDO26, + MAX77802_LDO27, + MAX77802_LDO28, + MAX77802_LDO29, + MAX77802_LDO30, + MAX77802_LDO32, + MAX77802_LDO33, + MAX77802_LDO34, + MAX77802_LDO35, + + MAX77802_REG_MAX, +}; + +enum max77686_opmode { + MAX77686_OPMODE_NORMAL, + MAX77686_OPMODE_LP, + MAX77686_OPMODE_STANDBY, +}; + +#endif /* __LINUX_MFD_MAX77686_H */ diff --git a/include/linux/mfd/max77693-common.h b/include/linux/mfd/max77693-common.h new file mode 100644 index 000000000..095b121aa --- /dev/null +++ b/include/linux/mfd/max77693-common.h @@ -0,0 +1,49 @@ +/* + * Common data shared between Maxim 77693 and 77843 drivers + * + * Copyright (C) 2015 Samsung Electronics + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __LINUX_MFD_MAX77693_COMMON_H +#define __LINUX_MFD_MAX77693_COMMON_H + +enum max77693_types { + TYPE_MAX77693_UNKNOWN, + TYPE_MAX77693, + TYPE_MAX77843, + + TYPE_MAX77693_NUM, +}; + +/* + * Shared also with max77843. + */ +struct max77693_dev { + struct device *dev; + struct i2c_client *i2c; /* 0xCC , PMIC, Charger, Flash LED */ + struct i2c_client *i2c_muic; /* 0x4A , MUIC */ + struct i2c_client *i2c_haptic; /* MAX77693: 0x90 , Haptic */ + struct i2c_client *i2c_chg; /* MAX77843: 0xD2, Charger */ + + enum max77693_types type; + + struct regmap *regmap; + struct regmap *regmap_muic; + struct regmap *regmap_haptic; /* Only MAX77693 */ + struct regmap *regmap_chg; /* Only MAX77843 */ + + struct regmap_irq_chip_data *irq_data_led; + struct regmap_irq_chip_data *irq_data_topsys; + struct regmap_irq_chip_data *irq_data_chg; /* Only MAX77693 */ + struct regmap_irq_chip_data *irq_data_muic; + + int irq; +}; + + +#endif /* __LINUX_MFD_MAX77693_COMMON_H */ diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h new file mode 100644 index 000000000..3c7a63b98 --- /dev/null +++ b/include/linux/mfd/max77693-private.h @@ -0,0 +1,532 @@ +/* + * max77693-private.h - Voltage regulator driver for the Maxim 77693 + * + * Copyright (C) 2012 Samsung Electrnoics + * SangYoung Son + * + * This program is not provided / owned by Maxim Integrated Products. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_MFD_MAX77693_PRIV_H +#define __LINUX_MFD_MAX77693_PRIV_H + +#include + +#define MAX77693_REG_INVALID (0xff) + +/* Slave addr = 0xCC: PMIC, Charger, Flash LED */ +enum max77693_pmic_reg { + MAX77693_LED_REG_IFLASH1 = 0x00, + MAX77693_LED_REG_IFLASH2 = 0x01, + MAX77693_LED_REG_ITORCH = 0x02, + MAX77693_LED_REG_ITORCHTIMER = 0x03, + MAX77693_LED_REG_FLASH_TIMER = 0x04, + MAX77693_LED_REG_FLASH_EN = 0x05, + MAX77693_LED_REG_MAX_FLASH1 = 0x06, + MAX77693_LED_REG_MAX_FLASH2 = 0x07, + MAX77693_LED_REG_MAX_FLASH3 = 0x08, + MAX77693_LED_REG_MAX_FLASH4 = 0x09, + MAX77693_LED_REG_VOUT_CNTL = 0x0A, + MAX77693_LED_REG_VOUT_FLASH1 = 0x0B, + MAX77693_LED_REG_VOUT_FLASH2 = 0x0C, + MAX77693_LED_REG_FLASH_INT = 0x0E, + MAX77693_LED_REG_FLASH_INT_MASK = 0x0F, + MAX77693_LED_REG_FLASH_STATUS = 0x10, + + MAX77693_PMIC_REG_PMIC_ID1 = 0x20, + MAX77693_PMIC_REG_PMIC_ID2 = 0x21, + MAX77693_PMIC_REG_INTSRC = 0x22, + MAX77693_PMIC_REG_INTSRC_MASK = 0x23, + MAX77693_PMIC_REG_TOPSYS_INT = 0x24, + MAX77693_PMIC_REG_TOPSYS_INT_MASK = 0x26, + MAX77693_PMIC_REG_TOPSYS_STAT = 0x28, + MAX77693_PMIC_REG_MAINCTRL1 = 0x2A, + MAX77693_PMIC_REG_LSCNFG = 0x2B, + + MAX77693_CHG_REG_CHG_INT = 0xB0, + MAX77693_CHG_REG_CHG_INT_MASK = 0xB1, + MAX77693_CHG_REG_CHG_INT_OK = 0xB2, + MAX77693_CHG_REG_CHG_DETAILS_00 = 0xB3, + MAX77693_CHG_REG_CHG_DETAILS_01 = 0xB4, + MAX77693_CHG_REG_CHG_DETAILS_02 = 0xB5, + MAX77693_CHG_REG_CHG_DETAILS_03 = 0xB6, + MAX77693_CHG_REG_CHG_CNFG_00 = 0xB7, + MAX77693_CHG_REG_CHG_CNFG_01 = 0xB8, + MAX77693_CHG_REG_CHG_CNFG_02 = 0xB9, + MAX77693_CHG_REG_CHG_CNFG_03 = 0xBA, + MAX77693_CHG_REG_CHG_CNFG_04 = 0xBB, + MAX77693_CHG_REG_CHG_CNFG_05 = 0xBC, + MAX77693_CHG_REG_CHG_CNFG_06 = 0xBD, + MAX77693_CHG_REG_CHG_CNFG_07 = 0xBE, + MAX77693_CHG_REG_CHG_CNFG_08 = 0xBF, + MAX77693_CHG_REG_CHG_CNFG_09 = 0xC0, + MAX77693_CHG_REG_CHG_CNFG_10 = 0xC1, + MAX77693_CHG_REG_CHG_CNFG_11 = 0xC2, + MAX77693_CHG_REG_CHG_CNFG_12 = 0xC3, + MAX77693_CHG_REG_CHG_CNFG_13 = 0xC4, + MAX77693_CHG_REG_CHG_CNFG_14 = 0xC5, + MAX77693_CHG_REG_SAFEOUT_CTRL = 0xC6, + + MAX77693_PMIC_REG_END, +}; + +/* MAX77693 ITORCH register */ +#define TORCH_IOUT1_SHIFT 0 +#define TORCH_IOUT2_SHIFT 4 +#define TORCH_IOUT_MASK(x) (0xf << (x)) +#define TORCH_IOUT_MIN 15625 +#define TORCH_IOUT_MAX 250000 +#define TORCH_IOUT_STEP 15625 + +/* MAX77693 IFLASH1 and IFLASH2 registers */ +#define FLASH_IOUT_MIN 15625 +#define FLASH_IOUT_MAX_1LED 1000000 +#define FLASH_IOUT_MAX_2LEDS 625000 +#define FLASH_IOUT_STEP 15625 + +/* MAX77693 TORCH_TIMER register */ +#define TORCH_TMR_NO_TIMER 0x40 +#define TORCH_TIMEOUT_MIN 262000 +#define TORCH_TIMEOUT_MAX 15728000 + +/* MAX77693 FLASH_TIMER register */ +#define FLASH_TMR_LEVEL 0x80 +#define FLASH_TIMEOUT_MIN 62500 +#define FLASH_TIMEOUT_MAX 1000000 +#define FLASH_TIMEOUT_STEP 62500 + +/* MAX77693 FLASH_EN register */ +#define FLASH_EN_OFF 0x0 +#define FLASH_EN_FLASH 0x1 +#define FLASH_EN_TORCH 0x2 +#define FLASH_EN_ON 0x3 +#define FLASH_EN_SHIFT(x) (6 - (x) * 2) +#define TORCH_EN_SHIFT(x) (2 - (x) * 2) + +/* MAX77693 MAX_FLASH1 register */ +#define MAX_FLASH1_MAX_FL_EN 0x80 +#define MAX_FLASH1_VSYS_MIN 2400 +#define MAX_FLASH1_VSYS_MAX 3400 +#define MAX_FLASH1_VSYS_STEP 33 + +/* MAX77693 VOUT_CNTL register */ +#define FLASH_BOOST_FIXED 0x04 +#define FLASH_BOOST_LEDNUM_2 0x80 + +/* MAX77693 VOUT_FLASH1 register */ +#define FLASH_VOUT_MIN 3300 +#define FLASH_VOUT_MAX 5500 +#define FLASH_VOUT_STEP 25 +#define FLASH_VOUT_RMIN 0x0c + +/* MAX77693 FLASH_STATUS register */ +#define FLASH_STATUS_FLASH_ON BIT(3) +#define FLASH_STATUS_TORCH_ON BIT(2) + +/* MAX77693 FLASH_INT register */ +#define FLASH_INT_FLED2_OPEN BIT(0) +#define FLASH_INT_FLED2_SHORT BIT(1) +#define FLASH_INT_FLED1_OPEN BIT(2) +#define FLASH_INT_FLED1_SHORT BIT(3) +#define FLASH_INT_OVER_CURRENT BIT(4) + +/* Fast charge timer in in hours */ +#define DEFAULT_FAST_CHARGE_TIMER 4 +/* microamps */ +#define DEFAULT_TOP_OFF_THRESHOLD_CURRENT 150000 +/* minutes */ +#define DEFAULT_TOP_OFF_TIMER 30 +/* microvolts */ +#define DEFAULT_CONSTANT_VOLT 4200000 +/* microvolts */ +#define DEFAULT_MIN_SYSTEM_VOLT 3600000 +/* celsius */ +#define DEFAULT_THERMAL_REGULATION_TEMP 100 +/* microamps */ +#define DEFAULT_BATTERY_OVERCURRENT 3500000 +/* microvolts */ +#define DEFAULT_CHARGER_INPUT_THRESHOLD_VOLT 4300000 + +/* MAX77693_CHG_REG_CHG_INT_OK register */ +#define CHG_INT_OK_BYP_SHIFT 0 +#define CHG_INT_OK_BAT_SHIFT 3 +#define CHG_INT_OK_CHG_SHIFT 4 +#define CHG_INT_OK_CHGIN_SHIFT 6 +#define CHG_INT_OK_DETBAT_SHIFT 7 +#define CHG_INT_OK_BYP_MASK BIT(CHG_INT_OK_BYP_SHIFT) +#define CHG_INT_OK_BAT_MASK BIT(CHG_INT_OK_BAT_SHIFT) +#define CHG_INT_OK_CHG_MASK BIT(CHG_INT_OK_CHG_SHIFT) +#define CHG_INT_OK_CHGIN_MASK BIT(CHG_INT_OK_CHGIN_SHIFT) +#define CHG_INT_OK_DETBAT_MASK BIT(CHG_INT_OK_DETBAT_SHIFT) + +/* MAX77693_CHG_REG_CHG_DETAILS_00 register */ +#define CHG_DETAILS_00_CHGIN_SHIFT 5 +#define CHG_DETAILS_00_CHGIN_MASK (0x3 << CHG_DETAILS_00_CHGIN_SHIFT) + +/* MAX77693_CHG_REG_CHG_DETAILS_01 register */ +#define CHG_DETAILS_01_CHG_SHIFT 0 +#define CHG_DETAILS_01_BAT_SHIFT 4 +#define CHG_DETAILS_01_TREG_SHIFT 7 +#define CHG_DETAILS_01_CHG_MASK (0xf << CHG_DETAILS_01_CHG_SHIFT) +#define CHG_DETAILS_01_BAT_MASK (0x7 << CHG_DETAILS_01_BAT_SHIFT) +#define CHG_DETAILS_01_TREG_MASK BIT(7) + +/* MAX77693_CHG_REG_CHG_DETAILS_01/CHG field */ +enum max77693_charger_charging_state { + MAX77693_CHARGING_PREQUALIFICATION = 0x0, + MAX77693_CHARGING_FAST_CONST_CURRENT, + MAX77693_CHARGING_FAST_CONST_VOLTAGE, + MAX77693_CHARGING_TOP_OFF, + MAX77693_CHARGING_DONE, + MAX77693_CHARGING_HIGH_TEMP, + MAX77693_CHARGING_TIMER_EXPIRED, + MAX77693_CHARGING_THERMISTOR_SUSPEND, + MAX77693_CHARGING_OFF, + MAX77693_CHARGING_RESERVED, + MAX77693_CHARGING_OVER_TEMP, + MAX77693_CHARGING_WATCHDOG_EXPIRED, +}; + +/* MAX77693_CHG_REG_CHG_DETAILS_01/BAT field */ +enum max77693_charger_battery_state { + MAX77693_BATTERY_NOBAT = 0x0, + /* Dead-battery or low-battery prequalification */ + MAX77693_BATTERY_PREQUALIFICATION, + MAX77693_BATTERY_TIMER_EXPIRED, + MAX77693_BATTERY_GOOD, + MAX77693_BATTERY_LOWVOLTAGE, + MAX77693_BATTERY_OVERVOLTAGE, + MAX77693_BATTERY_OVERCURRENT, + MAX77693_BATTERY_RESERVED, +}; + +/* MAX77693_CHG_REG_CHG_DETAILS_02 register */ +#define CHG_DETAILS_02_BYP_SHIFT 0 +#define CHG_DETAILS_02_BYP_MASK (0xf << CHG_DETAILS_02_BYP_SHIFT) + +/* MAX77693 CHG_CNFG_00 register */ +#define CHG_CNFG_00_CHG_MASK 0x1 +#define CHG_CNFG_00_BUCK_MASK 0x4 + +/* MAX77693_CHG_REG_CHG_CNFG_01 register */ +#define CHG_CNFG_01_FCHGTIME_SHIFT 0 +#define CHG_CNFG_01_CHGRSTRT_SHIFT 4 +#define CHG_CNFG_01_PQEN_SHIFT 7 +#define CHG_CNFG_01_FCHGTIME_MASK (0x7 << CHG_CNFG_01_FCHGTIME_SHIFT) +#define CHG_CNFG_01_CHGRSTRT_MASK (0x3 << CHG_CNFG_01_CHGRSTRT_SHIFT) +#define CHG_CNFG_01_PQEN_MAKS BIT(CHG_CNFG_01_PQEN_SHIFT) + +/* MAX77693_CHG_REG_CHG_CNFG_03 register */ +#define CHG_CNFG_03_TOITH_SHIFT 0 +#define CHG_CNFG_03_TOTIME_SHIFT 3 +#define CHG_CNFG_03_TOITH_MASK (0x7 << CHG_CNFG_03_TOITH_SHIFT) +#define CHG_CNFG_03_TOTIME_MASK (0x7 << CHG_CNFG_03_TOTIME_SHIFT) + +/* MAX77693_CHG_REG_CHG_CNFG_04 register */ +#define CHG_CNFG_04_CHGCVPRM_SHIFT 0 +#define CHG_CNFG_04_MINVSYS_SHIFT 5 +#define CHG_CNFG_04_CHGCVPRM_MASK (0x1f << CHG_CNFG_04_CHGCVPRM_SHIFT) +#define CHG_CNFG_04_MINVSYS_MASK (0x7 << CHG_CNFG_04_MINVSYS_SHIFT) + +/* MAX77693_CHG_REG_CHG_CNFG_06 register */ +#define CHG_CNFG_06_CHGPROT_SHIFT 2 +#define CHG_CNFG_06_CHGPROT_MASK (0x3 << CHG_CNFG_06_CHGPROT_SHIFT) + +/* MAX77693_CHG_REG_CHG_CNFG_07 register */ +#define CHG_CNFG_07_REGTEMP_SHIFT 5 +#define CHG_CNFG_07_REGTEMP_MASK (0x3 << CHG_CNFG_07_REGTEMP_SHIFT) + +/* MAX77693_CHG_REG_CHG_CNFG_12 register */ +#define CHG_CNFG_12_B2SOVRC_SHIFT 0 +#define CHG_CNFG_12_VCHGINREG_SHIFT 3 +#define CHG_CNFG_12_B2SOVRC_MASK (0x7 << CHG_CNFG_12_B2SOVRC_SHIFT) +#define CHG_CNFG_12_VCHGINREG_MASK (0x3 << CHG_CNFG_12_VCHGINREG_SHIFT) + +/* MAX77693 CHG_CNFG_09 Register */ +#define CHG_CNFG_09_CHGIN_ILIM_MASK 0x7F + +/* MAX77693 CHG_CTRL Register */ +#define SAFEOUT_CTRL_SAFEOUT1_MASK 0x3 +#define SAFEOUT_CTRL_SAFEOUT2_MASK 0xC +#define SAFEOUT_CTRL_ENSAFEOUT1_MASK 0x40 +#define SAFEOUT_CTRL_ENSAFEOUT2_MASK 0x80 + +/* Slave addr = 0x4A: MUIC */ +enum max77693_muic_reg { + MAX77693_MUIC_REG_ID = 0x00, + MAX77693_MUIC_REG_INT1 = 0x01, + MAX77693_MUIC_REG_INT2 = 0x02, + MAX77693_MUIC_REG_INT3 = 0x03, + MAX77693_MUIC_REG_STATUS1 = 0x04, + MAX77693_MUIC_REG_STATUS2 = 0x05, + MAX77693_MUIC_REG_STATUS3 = 0x06, + MAX77693_MUIC_REG_INTMASK1 = 0x07, + MAX77693_MUIC_REG_INTMASK2 = 0x08, + MAX77693_MUIC_REG_INTMASK3 = 0x09, + MAX77693_MUIC_REG_CDETCTRL1 = 0x0A, + MAX77693_MUIC_REG_CDETCTRL2 = 0x0B, + MAX77693_MUIC_REG_CTRL1 = 0x0C, + MAX77693_MUIC_REG_CTRL2 = 0x0D, + MAX77693_MUIC_REG_CTRL3 = 0x0E, + + MAX77693_MUIC_REG_END, +}; + +/* MAX77693 INTMASK1~2 Register */ +#define INTMASK1_ADC1K_SHIFT 3 +#define INTMASK1_ADCERR_SHIFT 2 +#define INTMASK1_ADCLOW_SHIFT 1 +#define INTMASK1_ADC_SHIFT 0 +#define INTMASK1_ADC1K_MASK (1 << INTMASK1_ADC1K_SHIFT) +#define INTMASK1_ADCERR_MASK (1 << INTMASK1_ADCERR_SHIFT) +#define INTMASK1_ADCLOW_MASK (1 << INTMASK1_ADCLOW_SHIFT) +#define INTMASK1_ADC_MASK (1 << INTMASK1_ADC_SHIFT) + +#define INTMASK2_VIDRM_SHIFT 5 +#define INTMASK2_VBVOLT_SHIFT 4 +#define INTMASK2_DXOVP_SHIFT 3 +#define INTMASK2_DCDTMR_SHIFT 2 +#define INTMASK2_CHGDETRUN_SHIFT 1 +#define INTMASK2_CHGTYP_SHIFT 0 +#define INTMASK2_VIDRM_MASK (1 << INTMASK2_VIDRM_SHIFT) +#define INTMASK2_VBVOLT_MASK (1 << INTMASK2_VBVOLT_SHIFT) +#define INTMASK2_DXOVP_MASK (1 << INTMASK2_DXOVP_SHIFT) +#define INTMASK2_DCDTMR_MASK (1 << INTMASK2_DCDTMR_SHIFT) +#define INTMASK2_CHGDETRUN_MASK (1 << INTMASK2_CHGDETRUN_SHIFT) +#define INTMASK2_CHGTYP_MASK (1 << INTMASK2_CHGTYP_SHIFT) + +/* MAX77693 MUIC - STATUS1~3 Register */ +#define MAX77693_STATUS1_ADC_SHIFT 0 +#define MAX77693_STATUS1_ADCLOW_SHIFT 5 +#define MAX77693_STATUS1_ADCERR_SHIFT 6 +#define MAX77693_STATUS1_ADC1K_SHIFT 7 +#define MAX77693_STATUS1_ADC_MASK (0x1f << MAX77693_STATUS1_ADC_SHIFT) +#define MAX77693_STATUS1_ADCLOW_MASK BIT(MAX77693_STATUS1_ADCLOW_SHIFT) +#define MAX77693_STATUS1_ADCERR_MASK BIT(MAX77693_STATUS1_ADCERR_SHIFT) +#define MAX77693_STATUS1_ADC1K_MASK BIT(MAX77693_STATUS1_ADC1K_SHIFT) + +#define MAX77693_STATUS2_CHGTYP_SHIFT 0 +#define MAX77693_STATUS2_CHGDETRUN_SHIFT 3 +#define MAX77693_STATUS2_DCDTMR_SHIFT 4 +#define MAX77693_STATUS2_DXOVP_SHIFT 5 +#define MAX77693_STATUS2_VBVOLT_SHIFT 6 +#define MAX77693_STATUS2_VIDRM_SHIFT 7 +#define MAX77693_STATUS2_CHGTYP_MASK (0x7 << MAX77693_STATUS2_CHGTYP_SHIFT) +#define MAX77693_STATUS2_CHGDETRUN_MASK BIT(MAX77693_STATUS2_CHGDETRUN_SHIFT) +#define MAX77693_STATUS2_DCDTMR_MASK BIT(MAX77693_STATUS2_DCDTMR_SHIFT) +#define MAX77693_STATUS2_DXOVP_MASK BIT(MAX77693_STATUS2_DXOVP_SHIFT) +#define MAX77693_STATUS2_VBVOLT_MASK BIT(MAX77693_STATUS2_VBVOLT_SHIFT) +#define MAX77693_STATUS2_VIDRM_MASK BIT(MAX77693_STATUS2_VIDRM_SHIFT) + +#define MAX77693_STATUS3_OVP_SHIFT 2 +#define MAX77693_STATUS3_OVP_MASK BIT(MAX77693_STATUS3_OVP_SHIFT) + +/* MAX77693 CDETCTRL1~2 register */ +#define CDETCTRL1_CHGDETEN_SHIFT (0) +#define CDETCTRL1_CHGTYPMAN_SHIFT (1) +#define CDETCTRL1_DCDEN_SHIFT (2) +#define CDETCTRL1_DCD2SCT_SHIFT (3) +#define CDETCTRL1_CDDELAY_SHIFT (4) +#define CDETCTRL1_DCDCPL_SHIFT (5) +#define CDETCTRL1_CDPDET_SHIFT (7) +#define CDETCTRL1_CHGDETEN_MASK (0x1 << CDETCTRL1_CHGDETEN_SHIFT) +#define CDETCTRL1_CHGTYPMAN_MASK (0x1 << CDETCTRL1_CHGTYPMAN_SHIFT) +#define CDETCTRL1_DCDEN_MASK (0x1 << CDETCTRL1_DCDEN_SHIFT) +#define CDETCTRL1_DCD2SCT_MASK (0x1 << CDETCTRL1_DCD2SCT_SHIFT) +#define CDETCTRL1_CDDELAY_MASK (0x1 << CDETCTRL1_CDDELAY_SHIFT) +#define CDETCTRL1_DCDCPL_MASK (0x1 << CDETCTRL1_DCDCPL_SHIFT) +#define CDETCTRL1_CDPDET_MASK (0x1 << CDETCTRL1_CDPDET_SHIFT) + +#define CDETCTRL2_VIDRMEN_SHIFT (1) +#define CDETCTRL2_DXOVPEN_SHIFT (3) +#define CDETCTRL2_VIDRMEN_MASK (0x1 << CDETCTRL2_VIDRMEN_SHIFT) +#define CDETCTRL2_DXOVPEN_MASK (0x1 << CDETCTRL2_DXOVPEN_SHIFT) + +/* MAX77693 MUIC - CONTROL1~3 register */ +#define COMN1SW_SHIFT (0) +#define COMP2SW_SHIFT (3) +#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT) +#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT) +#define COMP_SW_MASK (COMP2SW_MASK | COMN1SW_MASK) +#define MAX77693_CONTROL1_SW_USB ((1 << COMP2SW_SHIFT) \ + | (1 << COMN1SW_SHIFT)) +#define MAX77693_CONTROL1_SW_AUDIO ((2 << COMP2SW_SHIFT) \ + | (2 << COMN1SW_SHIFT)) +#define MAX77693_CONTROL1_SW_UART ((3 << COMP2SW_SHIFT) \ + | (3 << COMN1SW_SHIFT)) +#define MAX77693_CONTROL1_SW_OPEN ((0 << COMP2SW_SHIFT) \ + | (0 << COMN1SW_SHIFT)) + +#define MAX77693_CONTROL2_LOWPWR_SHIFT 0 +#define MAX77693_CONTROL2_ADCEN_SHIFT 1 +#define MAX77693_CONTROL2_CPEN_SHIFT 2 +#define MAX77693_CONTROL2_SFOUTASRT_SHIFT 3 +#define MAX77693_CONTROL2_SFOUTORD_SHIFT 4 +#define MAX77693_CONTROL2_ACCDET_SHIFT 5 +#define MAX77693_CONTROL2_USBCPINT_SHIFT 6 +#define MAX77693_CONTROL2_RCPS_SHIFT 7 +#define MAX77693_CONTROL2_LOWPWR_MASK BIT(MAX77693_CONTROL2_LOWPWR_SHIFT) +#define MAX77693_CONTROL2_ADCEN_MASK BIT(MAX77693_CONTROL2_ADCEN_SHIFT) +#define MAX77693_CONTROL2_CPEN_MASK BIT(MAX77693_CONTROL2_CPEN_SHIFT) +#define MAX77693_CONTROL2_SFOUTASRT_MASK BIT(MAX77693_CONTROL2_SFOUTASRT_SHIFT) +#define MAX77693_CONTROL2_SFOUTORD_MASK BIT(MAX77693_CONTROL2_SFOUTORD_SHIFT) +#define MAX77693_CONTROL2_ACCDET_MASK BIT(MAX77693_CONTROL2_ACCDET_SHIFT) +#define MAX77693_CONTROL2_USBCPINT_MASK BIT(MAX77693_CONTROL2_USBCPINT_SHIFT) +#define MAX77693_CONTROL2_RCPS_MASK BIT(MAX77693_CONTROL2_RCPS_SHIFT) + +#define MAX77693_CONTROL3_JIGSET_SHIFT 0 +#define MAX77693_CONTROL3_BTLDSET_SHIFT 2 +#define MAX77693_CONTROL3_ADCDBSET_SHIFT 4 +#define MAX77693_CONTROL3_JIGSET_MASK (0x3 << MAX77693_CONTROL3_JIGSET_SHIFT) +#define MAX77693_CONTROL3_BTLDSET_MASK (0x3 << MAX77693_CONTROL3_BTLDSET_SHIFT) +#define MAX77693_CONTROL3_ADCDBSET_MASK (0x3 << MAX77693_CONTROL3_ADCDBSET_SHIFT) + +/* Slave addr = 0x90: Haptic */ +enum max77693_haptic_reg { + MAX77693_HAPTIC_REG_STATUS = 0x00, + MAX77693_HAPTIC_REG_CONFIG1 = 0x01, + MAX77693_HAPTIC_REG_CONFIG2 = 0x02, + MAX77693_HAPTIC_REG_CONFIG_CHNL = 0x03, + MAX77693_HAPTIC_REG_CONFG_CYC1 = 0x04, + MAX77693_HAPTIC_REG_CONFG_CYC2 = 0x05, + MAX77693_HAPTIC_REG_CONFIG_PER1 = 0x06, + MAX77693_HAPTIC_REG_CONFIG_PER2 = 0x07, + MAX77693_HAPTIC_REG_CONFIG_PER3 = 0x08, + MAX77693_HAPTIC_REG_CONFIG_PER4 = 0x09, + MAX77693_HAPTIC_REG_CONFIG_DUTY1 = 0x0A, + MAX77693_HAPTIC_REG_CONFIG_DUTY2 = 0x0B, + MAX77693_HAPTIC_REG_CONFIG_PWM1 = 0x0C, + MAX77693_HAPTIC_REG_CONFIG_PWM2 = 0x0D, + MAX77693_HAPTIC_REG_CONFIG_PWM3 = 0x0E, + MAX77693_HAPTIC_REG_CONFIG_PWM4 = 0x0F, + MAX77693_HAPTIC_REG_REV = 0x10, + + MAX77693_HAPTIC_REG_END, +}; + +/* max77693-pmic LSCNFG configuraton register */ +#define MAX77693_PMIC_LOW_SYS_MASK 0x80 +#define MAX77693_PMIC_LOW_SYS_SHIFT 7 + +/* max77693-haptic configuration register */ +#define MAX77693_CONFIG2_MODE 7 +#define MAX77693_CONFIG2_MEN 6 +#define MAX77693_CONFIG2_HTYP 5 + +enum max77693_irq_source { + LED_INT = 0, + TOPSYS_INT, + CHG_INT, + MUIC_INT1, + MUIC_INT2, + MUIC_INT3, + + MAX77693_IRQ_GROUP_NR, +}; + +#define SRC_IRQ_CHARGER BIT(0) +#define SRC_IRQ_TOP BIT(1) +#define SRC_IRQ_FLASH BIT(2) +#define SRC_IRQ_MUIC BIT(3) +#define SRC_IRQ_ALL (SRC_IRQ_CHARGER | SRC_IRQ_TOP \ + | SRC_IRQ_FLASH | SRC_IRQ_MUIC) + +#define LED_IRQ_FLED2_OPEN BIT(0) +#define LED_IRQ_FLED2_SHORT BIT(1) +#define LED_IRQ_FLED1_OPEN BIT(2) +#define LED_IRQ_FLED1_SHORT BIT(3) +#define LED_IRQ_MAX_FLASH BIT(4) + +#define TOPSYS_IRQ_T120C_INT BIT(0) +#define TOPSYS_IRQ_T140C_INT BIT(1) +#define TOPSYS_IRQ_LOWSYS_INT BIT(3) + +#define CHG_IRQ_BYP_I BIT(0) +#define CHG_IRQ_THM_I BIT(2) +#define CHG_IRQ_BAT_I BIT(3) +#define CHG_IRQ_CHG_I BIT(4) +#define CHG_IRQ_CHGIN_I BIT(6) + +#define MUIC_IRQ_INT1_ADC BIT(0) +#define MUIC_IRQ_INT1_ADC_LOW BIT(1) +#define MUIC_IRQ_INT1_ADC_ERR BIT(2) +#define MUIC_IRQ_INT1_ADC1K BIT(3) + +#define MUIC_IRQ_INT2_CHGTYP BIT(0) +#define MUIC_IRQ_INT2_CHGDETREUN BIT(1) +#define MUIC_IRQ_INT2_DCDTMR BIT(2) +#define MUIC_IRQ_INT2_DXOVP BIT(3) +#define MUIC_IRQ_INT2_VBVOLT BIT(4) +#define MUIC_IRQ_INT2_VIDRM BIT(5) + +#define MUIC_IRQ_INT3_EOC BIT(0) +#define MUIC_IRQ_INT3_CGMBC BIT(1) +#define MUIC_IRQ_INT3_OVP BIT(2) +#define MUIC_IRQ_INT3_MBCCHG_ERR BIT(3) +#define MUIC_IRQ_INT3_CHG_ENABLED BIT(4) +#define MUIC_IRQ_INT3_BAT_DET BIT(5) + +enum max77693_irq { + /* PMIC - FLASH */ + MAX77693_LED_IRQ_FLED2_OPEN, + MAX77693_LED_IRQ_FLED2_SHORT, + MAX77693_LED_IRQ_FLED1_OPEN, + MAX77693_LED_IRQ_FLED1_SHORT, + MAX77693_LED_IRQ_MAX_FLASH, + + /* PMIC - TOPSYS */ + MAX77693_TOPSYS_IRQ_T120C_INT, + MAX77693_TOPSYS_IRQ_T140C_INT, + MAX77693_TOPSYS_IRQ_LOWSYS_INT, + + /* PMIC - Charger */ + MAX77693_CHG_IRQ_BYP_I, + MAX77693_CHG_IRQ_THM_I, + MAX77693_CHG_IRQ_BAT_I, + MAX77693_CHG_IRQ_CHG_I, + MAX77693_CHG_IRQ_CHGIN_I, + + MAX77693_IRQ_NR, +}; + +enum max77693_irq_muic { + /* MUIC INT1 */ + MAX77693_MUIC_IRQ_INT1_ADC, + MAX77693_MUIC_IRQ_INT1_ADC_LOW, + MAX77693_MUIC_IRQ_INT1_ADC_ERR, + MAX77693_MUIC_IRQ_INT1_ADC1K, + + /* MUIC INT2 */ + MAX77693_MUIC_IRQ_INT2_CHGTYP, + MAX77693_MUIC_IRQ_INT2_CHGDETREUN, + MAX77693_MUIC_IRQ_INT2_DCDTMR, + MAX77693_MUIC_IRQ_INT2_DXOVP, + MAX77693_MUIC_IRQ_INT2_VBVOLT, + MAX77693_MUIC_IRQ_INT2_VIDRM, + + /* MUIC INT3 */ + MAX77693_MUIC_IRQ_INT3_EOC, + MAX77693_MUIC_IRQ_INT3_CGMBC, + MAX77693_MUIC_IRQ_INT3_OVP, + MAX77693_MUIC_IRQ_INT3_MBCCHG_ERR, + MAX77693_MUIC_IRQ_INT3_CHG_ENABLED, + MAX77693_MUIC_IRQ_INT3_BAT_DET, + + MAX77693_MUIC_IRQ_NR, +}; + +#endif /* __LINUX_MFD_MAX77693_PRIV_H */ diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h new file mode 100644 index 000000000..d450f6873 --- /dev/null +++ b/include/linux/mfd/max77693.h @@ -0,0 +1,91 @@ +/* + * max77693.h - Driver for the Maxim 77693 + * + * Copyright (C) 2012 Samsung Electrnoics + * SangYoung Son + * + * This program is not provided / owned by Maxim Integrated Products. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * This driver is based on max8997.h + * + * MAX77693 has PMIC, Charger, Flash LED, Haptic, MUIC devices. + * The devices share the same I2C bus and included in + * this mfd driver. + */ + +#ifndef __LINUX_MFD_MAX77693_H +#define __LINUX_MFD_MAX77693_H + +/* MAX77693 regulator IDs */ +enum max77693_regulators { + MAX77693_ESAFEOUT1 = 0, + MAX77693_ESAFEOUT2, + MAX77693_CHARGER, + MAX77693_REG_MAX, +}; + +struct max77693_reg_data { + u8 addr; + u8 data; +}; + +struct max77693_muic_platform_data { + struct max77693_reg_data *init_data; + int num_init_data; + + int detcable_delay_ms; + + /* + * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB + * h/w path of COMP2/COMN1 on CONTROL1 register. + */ + int path_usb; + int path_uart; +}; + +/* MAX77693 led flash */ + +/* triggers */ +enum max77693_led_trigger { + MAX77693_LED_TRIG_OFF, + MAX77693_LED_TRIG_FLASH, + MAX77693_LED_TRIG_TORCH, + MAX77693_LED_TRIG_EXT, + MAX77693_LED_TRIG_SOFT, +}; + +/* trigger types */ +enum max77693_led_trigger_type { + MAX77693_LED_TRIG_TYPE_EDGE, + MAX77693_LED_TRIG_TYPE_LEVEL, +}; + +/* boost modes */ +enum max77693_led_boost_mode { + MAX77693_LED_BOOST_NONE, + MAX77693_LED_BOOST_ADAPTIVE, + MAX77693_LED_BOOST_FIXED, +}; + +/* MAX77693 */ + +struct max77693_platform_data { + /* muic data */ + struct max77693_muic_platform_data *muic_data; + struct max77693_led_platform_data *led_data; +}; +#endif /* __LINUX_MFD_MAX77693_H */ diff --git a/include/linux/mfd/max77843-private.h b/include/linux/mfd/max77843-private.h new file mode 100644 index 000000000..b8908bf8d --- /dev/null +++ b/include/linux/mfd/max77843-private.h @@ -0,0 +1,439 @@ +/* + * Common variables for the Maxim MAX77843 driver + * + * Copyright (C) 2015 Samsung Electronics + * Author: Jaewon Kim + * Author: Beomho Seo + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __MAX77843_PRIVATE_H_ +#define __MAX77843_PRIVATE_H_ + +#include +#include + +#define I2C_ADDR_TOPSYS (0xCC >> 1) +#define I2C_ADDR_CHG (0xD2 >> 1) +#define I2C_ADDR_FG (0x6C >> 1) +#define I2C_ADDR_MUIC (0x4A >> 1) + +/* Topsys, Haptic and LED registers */ +enum max77843_sys_reg { + MAX77843_SYS_REG_PMICID = 0x00, + MAX77843_SYS_REG_PMICREV = 0x01, + MAX77843_SYS_REG_MAINCTRL1 = 0x02, + MAX77843_SYS_REG_INTSRC = 0x22, + MAX77843_SYS_REG_INTSRCMASK = 0x23, + MAX77843_SYS_REG_SYSINTSRC = 0x24, + MAX77843_SYS_REG_SYSINTMASK = 0x26, + MAX77843_SYS_REG_TOPSYS_STAT = 0x28, + MAX77843_SYS_REG_SAFEOUTCTRL = 0xC6, + + MAX77843_SYS_REG_END, +}; + +enum max77843_haptic_reg { + MAX77843_HAP_REG_MCONFIG = 0x10, + + MAX77843_HAP_REG_END, +}; + +enum max77843_led_reg { + MAX77843_LED_REG_LEDEN = 0x30, + MAX77843_LED_REG_LED0BRT = 0x31, + MAX77843_LED_REG_LED1BRT = 0x32, + MAX77843_LED_REG_LED2BRT = 0x33, + MAX77843_LED_REG_LED3BRT = 0x34, + MAX77843_LED_REG_LEDBLNK = 0x38, + MAX77843_LED_REG_LEDRAMP = 0x36, + + MAX77843_LED_REG_END, +}; + +/* Charger registers */ +enum max77843_charger_reg { + MAX77843_CHG_REG_CHG_INT = 0xB0, + MAX77843_CHG_REG_CHG_INT_MASK = 0xB1, + MAX77843_CHG_REG_CHG_INT_OK = 0xB2, + MAX77843_CHG_REG_CHG_DTLS_00 = 0xB3, + MAX77843_CHG_REG_CHG_DTLS_01 = 0xB4, + MAX77843_CHG_REG_CHG_DTLS_02 = 0xB5, + MAX77843_CHG_REG_CHG_CNFG_00 = 0xB7, + MAX77843_CHG_REG_CHG_CNFG_01 = 0xB8, + MAX77843_CHG_REG_CHG_CNFG_02 = 0xB9, + MAX77843_CHG_REG_CHG_CNFG_03 = 0xBA, + MAX77843_CHG_REG_CHG_CNFG_04 = 0xBB, + MAX77843_CHG_REG_CHG_CNFG_06 = 0xBD, + MAX77843_CHG_REG_CHG_CNFG_07 = 0xBE, + MAX77843_CHG_REG_CHG_CNFG_09 = 0xC0, + MAX77843_CHG_REG_CHG_CNFG_10 = 0xC1, + MAX77843_CHG_REG_CHG_CNFG_11 = 0xC2, + MAX77843_CHG_REG_CHG_CNFG_12 = 0xC3, + + MAX77843_CHG_REG_END, +}; + +/* Fuel gauge registers */ +enum max77843_fuelgauge { + MAX77843_FG_REG_STATUS = 0x00, + MAX77843_FG_REG_VALRT_TH = 0x01, + MAX77843_FG_REG_TALRT_TH = 0x02, + MAX77843_FG_REG_SALRT_TH = 0x03, + MAX77843_FG_RATE_AT_RATE = 0x04, + MAX77843_FG_REG_REMCAP_REP = 0x05, + MAX77843_FG_REG_SOCREP = 0x06, + MAX77843_FG_REG_AGE = 0x07, + MAX77843_FG_REG_TEMP = 0x08, + MAX77843_FG_REG_VCELL = 0x09, + MAX77843_FG_REG_CURRENT = 0x0A, + MAX77843_FG_REG_AVG_CURRENT = 0x0B, + MAX77843_FG_REG_SOCMIX = 0x0D, + MAX77843_FG_REG_SOCAV = 0x0E, + MAX77843_FG_REG_REMCAP_MIX = 0x0F, + MAX77843_FG_REG_FULLCAP = 0x10, + MAX77843_FG_REG_AVG_TEMP = 0x16, + MAX77843_FG_REG_CYCLES = 0x17, + MAX77843_FG_REG_AVG_VCELL = 0x19, + MAX77843_FG_REG_CONFIG = 0x1D, + MAX77843_FG_REG_REMCAP_AV = 0x1F, + MAX77843_FG_REG_FULLCAP_NOM = 0x23, + MAX77843_FG_REG_MISCCFG = 0x2B, + MAX77843_FG_REG_RCOMP = 0x38, + MAX77843_FG_REG_FSTAT = 0x3D, + MAX77843_FG_REG_DQACC = 0x45, + MAX77843_FG_REG_DPACC = 0x46, + MAX77843_FG_REG_OCV = 0xEE, + MAX77843_FG_REG_VFOCV = 0xFB, + MAX77843_FG_SOCVF = 0xFF, + + MAX77843_FG_END, +}; + +/* MUIC registers */ +enum max77843_muic_reg { + MAX77843_MUIC_REG_ID = 0x00, + MAX77843_MUIC_REG_INT1 = 0x01, + MAX77843_MUIC_REG_INT2 = 0x02, + MAX77843_MUIC_REG_INT3 = 0x03, + MAX77843_MUIC_REG_STATUS1 = 0x04, + MAX77843_MUIC_REG_STATUS2 = 0x05, + MAX77843_MUIC_REG_STATUS3 = 0x06, + MAX77843_MUIC_REG_INTMASK1 = 0x07, + MAX77843_MUIC_REG_INTMASK2 = 0x08, + MAX77843_MUIC_REG_INTMASK3 = 0x09, + MAX77843_MUIC_REG_CDETCTRL1 = 0x0A, + MAX77843_MUIC_REG_CDETCTRL2 = 0x0B, + MAX77843_MUIC_REG_CONTROL1 = 0x0C, + MAX77843_MUIC_REG_CONTROL2 = 0x0D, + MAX77843_MUIC_REG_CONTROL3 = 0x0E, + MAX77843_MUIC_REG_CONTROL4 = 0x16, + MAX77843_MUIC_REG_HVCONTROL1 = 0x17, + MAX77843_MUIC_REG_HVCONTROL2 = 0x18, + + MAX77843_MUIC_REG_END, +}; + +enum max77843_irq { + /* Topsys: SYSTEM */ + MAX77843_SYS_IRQ_SYSINTSRC_SYSUVLO_INT, + MAX77843_SYS_IRQ_SYSINTSRC_SYSOVLO_INT, + MAX77843_SYS_IRQ_SYSINTSRC_TSHDN_INT, + MAX77843_SYS_IRQ_SYSINTSRC_TM_INT, + + /* Charger: CHG_INT */ + MAX77843_CHG_IRQ_CHG_INT_BYP_I, + MAX77843_CHG_IRQ_CHG_INT_BATP_I, + MAX77843_CHG_IRQ_CHG_INT_BAT_I, + MAX77843_CHG_IRQ_CHG_INT_CHG_I, + MAX77843_CHG_IRQ_CHG_INT_WCIN_I, + MAX77843_CHG_IRQ_CHG_INT_CHGIN_I, + MAX77843_CHG_IRQ_CHG_INT_AICL_I, + + MAX77843_IRQ_NUM, +}; + +enum max77843_irq_muic { + /* MUIC: INT1 */ + MAX77843_MUIC_IRQ_INT1_ADC, + MAX77843_MUIC_IRQ_INT1_ADCERROR, + MAX77843_MUIC_IRQ_INT1_ADC1K, + + /* MUIC: INT2 */ + MAX77843_MUIC_IRQ_INT2_CHGTYP, + MAX77843_MUIC_IRQ_INT2_CHGDETRUN, + MAX77843_MUIC_IRQ_INT2_DCDTMR, + MAX77843_MUIC_IRQ_INT2_DXOVP, + MAX77843_MUIC_IRQ_INT2_VBVOLT, + + /* MUIC: INT3 */ + MAX77843_MUIC_IRQ_INT3_VBADC, + MAX77843_MUIC_IRQ_INT3_VDNMON, + MAX77843_MUIC_IRQ_INT3_DNRES, + MAX77843_MUIC_IRQ_INT3_MPNACK, + MAX77843_MUIC_IRQ_INT3_MRXBUFOW, + MAX77843_MUIC_IRQ_INT3_MRXTRF, + MAX77843_MUIC_IRQ_INT3_MRXPERR, + MAX77843_MUIC_IRQ_INT3_MRXRDY, + + MAX77843_MUIC_IRQ_NUM, +}; + +/* MAX77843 interrupts */ +#define MAX77843_SYS_IRQ_SYSUVLO_INT BIT(0) +#define MAX77843_SYS_IRQ_SYSOVLO_INT BIT(1) +#define MAX77843_SYS_IRQ_TSHDN_INT BIT(2) +#define MAX77843_SYS_IRQ_TM_INT BIT(3) + +/* MAX77843 MAINCTRL1 register */ +#define MAINCTRL1_BIASEN_SHIFT 7 +#define MAX77843_MAINCTRL1_BIASEN_MASK BIT(MAINCTRL1_BIASEN_SHIFT) + +/* MAX77843 MCONFIG register */ +#define MCONFIG_MODE_SHIFT 7 +#define MCONFIG_MEN_SHIFT 6 +#define MCONFIG_PDIV_SHIFT 0 + +#define MAX77843_MCONFIG_MODE_MASK BIT(MCONFIG_MODE_SHIFT) +#define MAX77843_MCONFIG_MEN_MASK BIT(MCONFIG_MEN_SHIFT) +#define MAX77843_MCONFIG_PDIV_MASK (0x3 << MCONFIG_PDIV_SHIFT) + +/* Max77843 charger insterrupts */ +#define MAX77843_CHG_BYP_I BIT(0) +#define MAX77843_CHG_BATP_I BIT(2) +#define MAX77843_CHG_BAT_I BIT(3) +#define MAX77843_CHG_CHG_I BIT(4) +#define MAX77843_CHG_WCIN_I BIT(5) +#define MAX77843_CHG_CHGIN_I BIT(6) +#define MAX77843_CHG_AICL_I BIT(7) + +/* MAX77843 CHG_INT_OK register */ +#define MAX77843_CHG_BYP_OK BIT(0) +#define MAX77843_CHG_BATP_OK BIT(2) +#define MAX77843_CHG_BAT_OK BIT(3) +#define MAX77843_CHG_CHG_OK BIT(4) +#define MAX77843_CHG_WCIN_OK BIT(5) +#define MAX77843_CHG_CHGIN_OK BIT(6) +#define MAX77843_CHG_AICL_OK BIT(7) + +/* MAX77843 CHG_DETAILS_00 register */ +#define MAX77843_CHG_BAT_DTLS BIT(0) + +/* MAX77843 CHG_DETAILS_01 register */ +#define MAX77843_CHG_DTLS_MASK 0x0f +#define MAX77843_CHG_PQ_MODE 0x00 +#define MAX77843_CHG_CC_MODE 0x01 +#define MAX77843_CHG_CV_MODE 0x02 +#define MAX77843_CHG_TO_MODE 0x03 +#define MAX77843_CHG_DO_MODE 0x04 +#define MAX77843_CHG_HT_MODE 0x05 +#define MAX77843_CHG_TF_MODE 0x06 +#define MAX77843_CHG_TS_MODE 0x07 +#define MAX77843_CHG_OFF_MODE 0x08 + +#define MAX77843_CHG_BAT_DTLS_MASK 0xf0 +#define MAX77843_CHG_NO_BAT (0x00 << 4) +#define MAX77843_CHG_LOW_VOLT_BAT (0x01 << 4) +#define MAX77843_CHG_LONG_BAT_TIME (0x02 << 4) +#define MAX77843_CHG_OK_BAT (0x03 << 4) +#define MAX77843_CHG_OK_LOW_VOLT_BAT (0x04 << 4) +#define MAX77843_CHG_OVER_VOLT_BAT (0x05 << 4) +#define MAX77843_CHG_OVER_CURRENT_BAT (0x06 << 4) + +/* MAX77843 CHG_CNFG_00 register */ +#define MAX77843_CHG_MODE_MASK 0x0f +#define MAX77843_CHG_DISABLE 0x00 +#define MAX77843_CHG_ENABLE 0x05 +#define MAX77843_CHG_MASK 0x01 +#define MAX77843_CHG_OTG_MASK 0x02 +#define MAX77843_CHG_BUCK_MASK 0x04 +#define MAX77843_CHG_BOOST_MASK 0x08 + +/* MAX77843 CHG_CNFG_01 register */ +#define MAX77843_CHG_RESTART_THRESHOLD_100 0x00 +#define MAX77843_CHG_RESTART_THRESHOLD_150 0x10 +#define MAX77843_CHG_RESTART_THRESHOLD_200 0x20 +#define MAX77843_CHG_RESTART_THRESHOLD_DISABLE 0x30 + +/* MAX77843 CHG_CNFG_02 register */ +#define MAX77843_CHG_FAST_CHG_CURRENT_MIN 100000 +#define MAX77843_CHG_FAST_CHG_CURRENT_MAX 3150000 +#define MAX77843_CHG_FAST_CHG_CURRENT_STEP 50000 +#define MAX77843_CHG_FAST_CHG_CURRENT_MASK 0x3f +#define MAX77843_CHG_OTG_ILIMIT_500 (0x00 << 6) +#define MAX77843_CHG_OTG_ILIMIT_900 (0x01 << 6) +#define MAX77843_CHG_OTG_ILIMIT_1200 (0x02 << 6) +#define MAX77843_CHG_OTG_ILIMIT_1500 (0x03 << 6) +#define MAX77843_CHG_OTG_ILIMIT_MASK 0xc0 + +/* MAX77843 CHG_CNFG_03 register */ +#define MAX77843_CHG_TOP_OFF_CURRENT_MIN 125000 +#define MAX77843_CHG_TOP_OFF_CURRENT_MAX 650000 +#define MAX77843_CHG_TOP_OFF_CURRENT_STEP 75000 +#define MAX77843_CHG_TOP_OFF_CURRENT_MASK 0x07 + +/* MAX77843 CHG_CNFG_06 register */ +#define MAX77843_CHG_WRITE_CAP_BLOCK 0x10 +#define MAX77843_CHG_WRITE_CAP_UNBLOCK 0x0C + +/* MAX77843_CHG_CNFG_09_register */ +#define MAX77843_CHG_INPUT_CURRENT_LIMIT_MIN 100000 +#define MAX77843_CHG_INPUT_CURRENT_LIMIT_MAX 4000000 +#define MAX77843_CHG_INPUT_CURRENT_LIMIT_REF 3367000 +#define MAX77843_CHG_INPUT_CURRENT_LIMIT_STEP 33000 + +#define MAX77843_MUIC_ADC BIT(0) +#define MAX77843_MUIC_ADCERROR BIT(2) +#define MAX77843_MUIC_ADC1K BIT(3) + +#define MAX77843_MUIC_CHGTYP BIT(0) +#define MAX77843_MUIC_CHGDETRUN BIT(1) +#define MAX77843_MUIC_DCDTMR BIT(2) +#define MAX77843_MUIC_DXOVP BIT(3) +#define MAX77843_MUIC_VBVOLT BIT(4) + +#define MAX77843_MUIC_VBADC BIT(0) +#define MAX77843_MUIC_VDNMON BIT(1) +#define MAX77843_MUIC_DNRES BIT(2) +#define MAX77843_MUIC_MPNACK BIT(3) +#define MAX77843_MUIC_MRXBUFOW BIT(4) +#define MAX77843_MUIC_MRXTRF BIT(5) +#define MAX77843_MUIC_MRXPERR BIT(6) +#define MAX77843_MUIC_MRXRDY BIT(7) + +/* MAX77843 INTSRCMASK register */ +#define MAX77843_INTSRCMASK_CHGR 0 +#define MAX77843_INTSRCMASK_SYS 1 +#define MAX77843_INTSRCMASK_FG 2 +#define MAX77843_INTSRCMASK_MUIC 3 + +#define MAX77843_INTSRCMASK_CHGR_MASK BIT(MAX77843_INTSRCMASK_CHGR) +#define MAX77843_INTSRCMASK_SYS_MASK BIT(MAX77843_INTSRCMASK_SYS) +#define MAX77843_INTSRCMASK_FG_MASK BIT(MAX77843_INTSRCMASK_FG) +#define MAX77843_INTSRCMASK_MUIC_MASK BIT(MAX77843_INTSRCMASK_MUIC) + +#define MAX77843_INTSRC_MASK_MASK \ + (MAX77843_INTSRCMASK_MUIC_MASK | MAX77843_INTSRCMASK_FG_MASK | \ + MAX77843_INTSRCMASK_SYS_MASK | MAX77843_INTSRCMASK_CHGR_MASK) + +/* MAX77843 STATUS register*/ +#define MAX77843_MUIC_STATUS1_ADC_SHIFT 0 +#define MAX77843_MUIC_STATUS1_ADCERROR_SHIFT 6 +#define MAX77843_MUIC_STATUS1_ADC1K_SHIFT 7 +#define MAX77843_MUIC_STATUS2_CHGTYP_SHIFT 0 +#define MAX77843_MUIC_STATUS2_CHGDETRUN_SHIFT 3 +#define MAX77843_MUIC_STATUS2_DCDTMR_SHIFT 4 +#define MAX77843_MUIC_STATUS2_DXOVP_SHIFT 5 +#define MAX77843_MUIC_STATUS2_VBVOLT_SHIFT 6 +#define MAX77843_MUIC_STATUS3_VBADC_SHIFT 0 +#define MAX77843_MUIC_STATUS3_VDNMON_SHIFT 4 +#define MAX77843_MUIC_STATUS3_DNRES_SHIFT 5 +#define MAX77843_MUIC_STATUS3_MPNACK_SHIFT 6 + +#define MAX77843_MUIC_STATUS1_ADC_MASK (0x1f << MAX77843_MUIC_STATUS1_ADC_SHIFT) +#define MAX77843_MUIC_STATUS1_ADCERROR_MASK BIT(MAX77843_MUIC_STATUS1_ADCERROR_SHIFT) +#define MAX77843_MUIC_STATUS1_ADC1K_MASK BIT(MAX77843_MUIC_STATUS1_ADC1K_SHIFT) +#define MAX77843_MUIC_STATUS2_CHGTYP_MASK (0x7 << MAX77843_MUIC_STATUS2_CHGTYP_SHIFT) +#define MAX77843_MUIC_STATUS2_CHGDETRUN_MASK BIT(MAX77843_MUIC_STATUS2_CHGDETRUN_SHIFT) +#define MAX77843_MUIC_STATUS2_DCDTMR_MASK BIT(MAX77843_MUIC_STATUS2_DCDTMR_SHIFT) +#define MAX77843_MUIC_STATUS2_DXOVP_MASK BIT(MAX77843_MUIC_STATUS2_DXOVP_SHIFT) +#define MAX77843_MUIC_STATUS2_VBVOLT_MASK BIT(MAX77843_MUIC_STATUS2_VBVOLT_SHIFT) +#define MAX77843_MUIC_STATUS3_VBADC_MASK (0xf << MAX77843_MUIC_STATUS3_VBADC_SHIFT) +#define MAX77843_MUIC_STATUS3_VDNMON_MASK BIT(MAX77843_MUIC_STATUS3_VDNMON_SHIFT) +#define MAX77843_MUIC_STATUS3_DNRES_MASK BIT(MAX77843_MUIC_STATUS3_DNRES_SHIFT) +#define MAX77843_MUIC_STATUS3_MPNACK_MASK BIT(MAX77843_MUIC_STATUS3_MPNACK_SHIFT) + +/* MAX77843 CONTROL register */ +#define MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT 0 +#define MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT 3 +#define MAX77843_MUIC_CONTROL1_NOBCCOMP_SHIFT 6 +#define MAX77843_MUIC_CONTROL1_IDBEN_SHIFT 7 +#define MAX77843_MUIC_CONTROL2_LOWPWR_SHIFT 0 +#define MAX77843_MUIC_CONTROL2_ADCEN_SHIFT 1 +#define MAX77843_MUIC_CONTROL2_CPEN_SHIFT 2 +#define MAX77843_MUIC_CONTROL2_ACC_DET_SHIFT 5 +#define MAX77843_MUIC_CONTROL2_USBCPINT_SHIFT 6 +#define MAX77843_MUIC_CONTROL2_RCPS_SHIFT 7 +#define MAX77843_MUIC_CONTROL3_JIGSET_SHIFT 0 +#define MAX77843_MUIC_CONTROL4_ADCDBSET_SHIFT 0 +#define MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT 4 +#define MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT 5 +#define MAX77843_MUIC_CONTROL4_ADCMODE_SHIFT 6 + +#define MAX77843_MUIC_CONTROL1_COMP1SW_MASK (0x7 << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT) +#define MAX77843_MUIC_CONTROL1_COMP2SW_MASK (0x7 << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT) +#define MAX77843_MUIC_CONTROL1_IDBEN_MASK BIT(MAX77843_MUIC_CONTROL1_IDBEN_SHIFT) +#define MAX77843_MUIC_CONTROL1_NOBCCOMP_MASK BIT(MAX77843_MUIC_CONTROL1_NOBCCOMP_SHIFT) +#define MAX77843_MUIC_CONTROL2_LOWPWR_MASK BIT(MAX77843_MUIC_CONTROL2_LOWPWR_SHIFT) +#define MAX77843_MUIC_CONTROL2_ADCEN_MASK BIT(MAX77843_MUIC_CONTROL2_ADCEN_SHIFT) +#define MAX77843_MUIC_CONTROL2_CPEN_MASK BIT(MAX77843_MUIC_CONTROL2_CPEN_SHIFT) +#define MAX77843_MUIC_CONTROL2_ACC_DET_MASK BIT(MAX77843_MUIC_CONTROL2_ACC_DET_SHIFT) +#define MAX77843_MUIC_CONTROL2_USBCPINT_MASK BIT(MAX77843_MUIC_CONTROL2_USBCPINT_SHIFT) +#define MAX77843_MUIC_CONTROL2_RCPS_MASK BIT(MAX77843_MUIC_CONTROL2_RCPS_SHIFT) +#define MAX77843_MUIC_CONTROL3_JIGSET_MASK (0x3 << MAX77843_MUIC_CONTROL3_JIGSET_SHIFT) +#define MAX77843_MUIC_CONTROL4_ADCDBSET_MASK (0x3 << MAX77843_MUIC_CONTROL4_ADCDBSET_SHIFT) +#define MAX77843_MUIC_CONTROL4_USBAUTO_MASK BIT(MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT) +#define MAX77843_MUIC_CONTROL4_FCTAUTO_MASK BIT(MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT) +#define MAX77843_MUIC_CONTROL4_ADCMODE_MASK (0x3 << MAX77843_MUIC_CONTROL4_ADCMODE_SHIFT) + +/* MAX77843 switch port */ +#define COM_OPEN 0 +#define COM_USB 1 +#define COM_AUDIO 2 +#define COM_UART 3 +#define COM_AUX_USB 4 +#define COM_AUX_UART 5 + +#define MAX77843_MUIC_CONTROL1_COM_SW \ + ((MAX77843_MUIC_CONTROL1_COMP1SW_MASK | \ + MAX77843_MUIC_CONTROL1_COMP2SW_MASK)) + +#define MAX77843_MUIC_CONTROL1_SW_OPEN \ + ((COM_OPEN << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \ + COM_OPEN << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT)) +#define MAX77843_MUIC_CONTROL1_SW_USB \ + ((COM_USB << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \ + COM_USB << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT)) +#define MAX77843_MUIC_CONTROL1_SW_AUDIO \ + ((COM_AUDIO << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \ + COM_AUDIO << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT)) +#define MAX77843_MUIC_CONTROL1_SW_UART \ + ((COM_UART << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \ + COM_UART << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT)) +#define MAX77843_MUIC_CONTROL1_SW_AUX_USB \ + ((COM_AUX_USB << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \ + COM_AUX_USB << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT)) +#define MAX77843_MUIC_CONTROL1_SW_AUX_UART \ + ((COM_AUX_UART << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \ + COM_AUX_UART << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT)) + +#define MAX77843_DISABLE 0 +#define MAX77843_ENABLE 1 + +#define CONTROL4_AUTO_DISABLE \ + ((MAX77843_DISABLE << MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT) | \ + (MAX77843_DISABLE << MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT)) +#define CONTROL4_AUTO_ENABLE \ + ((MAX77843_ENABLE << MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT) | \ + (MAX77843_ENABLE << MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT)) + +/* MAX77843 SAFEOUT LDO Control register */ +#define SAFEOUTCTRL_SAFEOUT1_SHIFT 0 +#define SAFEOUTCTRL_SAFEOUT2_SHIFT 2 +#define SAFEOUTCTRL_ENSAFEOUT1_SHIFT 6 +#define SAFEOUTCTRL_ENSAFEOUT2_SHIFT 7 + +#define MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT1 \ + BIT(SAFEOUTCTRL_ENSAFEOUT1_SHIFT) +#define MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT2 \ + BIT(SAFEOUTCTRL_ENSAFEOUT2_SHIFT) +#define MAX77843_REG_SAFEOUTCTRL_SAFEOUT1_MASK \ + (0x3 << SAFEOUTCTRL_SAFEOUT1_SHIFT) +#define MAX77843_REG_SAFEOUTCTRL_SAFEOUT2_MASK \ + (0x3 << SAFEOUTCTRL_SAFEOUT2_SHIFT) + +#endif /* __MAX77843_H__ */ diff --git a/include/linux/mfd/max8907.h b/include/linux/mfd/max8907.h new file mode 100644 index 000000000..b06f7a6a1 --- /dev/null +++ b/include/linux/mfd/max8907.h @@ -0,0 +1,252 @@ +/* + * Functions to access MAX8907 power management chip. + * + * Copyright (C) 2010 Gyungoh Yoo + * Copyright (C) 2012, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_MFD_MAX8907_H +#define __LINUX_MFD_MAX8907_H + +#include +#include + +#define MAX8907_GEN_I2C_ADDR (0x78 >> 1) +#define MAX8907_ADC_I2C_ADDR (0x8e >> 1) +#define MAX8907_RTC_I2C_ADDR (0xd0 >> 1) + +/* MAX8907 register map */ +#define MAX8907_REG_SYSENSEL 0x00 +#define MAX8907_REG_ON_OFF_IRQ1 0x01 +#define MAX8907_REG_ON_OFF_IRQ1_MASK 0x02 +#define MAX8907_REG_ON_OFF_STAT 0x03 +#define MAX8907_REG_SDCTL1 0x04 +#define MAX8907_REG_SDSEQCNT1 0x05 +#define MAX8907_REG_SDV1 0x06 +#define MAX8907_REG_SDCTL2 0x07 +#define MAX8907_REG_SDSEQCNT2 0x08 +#define MAX8907_REG_SDV2 0x09 +#define MAX8907_REG_SDCTL3 0x0A +#define MAX8907_REG_SDSEQCNT3 0x0B +#define MAX8907_REG_SDV3 0x0C +#define MAX8907_REG_ON_OFF_IRQ2 0x0D +#define MAX8907_REG_ON_OFF_IRQ2_MASK 0x0E +#define MAX8907_REG_RESET_CNFG 0x0F +#define MAX8907_REG_LDOCTL16 0x10 +#define MAX8907_REG_LDOSEQCNT16 0x11 +#define MAX8907_REG_LDO16VOUT 0x12 +#define MAX8907_REG_SDBYSEQCNT 0x13 +#define MAX8907_REG_LDOCTL17 0x14 +#define MAX8907_REG_LDOSEQCNT17 0x15 +#define MAX8907_REG_LDO17VOUT 0x16 +#define MAX8907_REG_LDOCTL1 0x18 +#define MAX8907_REG_LDOSEQCNT1 0x19 +#define MAX8907_REG_LDO1VOUT 0x1A +#define MAX8907_REG_LDOCTL2 0x1C +#define MAX8907_REG_LDOSEQCNT2 0x1D +#define MAX8907_REG_LDO2VOUT 0x1E +#define MAX8907_REG_LDOCTL3 0x20 +#define MAX8907_REG_LDOSEQCNT3 0x21 +#define MAX8907_REG_LDO3VOUT 0x22 +#define MAX8907_REG_LDOCTL4 0x24 +#define MAX8907_REG_LDOSEQCNT4 0x25 +#define MAX8907_REG_LDO4VOUT 0x26 +#define MAX8907_REG_LDOCTL5 0x28 +#define MAX8907_REG_LDOSEQCNT5 0x29 +#define MAX8907_REG_LDO5VOUT 0x2A +#define MAX8907_REG_LDOCTL6 0x2C +#define MAX8907_REG_LDOSEQCNT6 0x2D +#define MAX8907_REG_LDO6VOUT 0x2E +#define MAX8907_REG_LDOCTL7 0x30 +#define MAX8907_REG_LDOSEQCNT7 0x31 +#define MAX8907_REG_LDO7VOUT 0x32 +#define MAX8907_REG_LDOCTL8 0x34 +#define MAX8907_REG_LDOSEQCNT8 0x35 +#define MAX8907_REG_LDO8VOUT 0x36 +#define MAX8907_REG_LDOCTL9 0x38 +#define MAX8907_REG_LDOSEQCNT9 0x39 +#define MAX8907_REG_LDO9VOUT 0x3A +#define MAX8907_REG_LDOCTL10 0x3C +#define MAX8907_REG_LDOSEQCNT10 0x3D +#define MAX8907_REG_LDO10VOUT 0x3E +#define MAX8907_REG_LDOCTL11 0x40 +#define MAX8907_REG_LDOSEQCNT11 0x41 +#define MAX8907_REG_LDO11VOUT 0x42 +#define MAX8907_REG_LDOCTL12 0x44 +#define MAX8907_REG_LDOSEQCNT12 0x45 +#define MAX8907_REG_LDO12VOUT 0x46 +#define MAX8907_REG_LDOCTL13 0x48 +#define MAX8907_REG_LDOSEQCNT13 0x49 +#define MAX8907_REG_LDO13VOUT 0x4A +#define MAX8907_REG_LDOCTL14 0x4C +#define MAX8907_REG_LDOSEQCNT14 0x4D +#define MAX8907_REG_LDO14VOUT 0x4E +#define MAX8907_REG_LDOCTL15 0x50 +#define MAX8907_REG_LDOSEQCNT15 0x51 +#define MAX8907_REG_LDO15VOUT 0x52 +#define MAX8907_REG_OUT5VEN 0x54 +#define MAX8907_REG_OUT5VSEQ 0x55 +#define MAX8907_REG_OUT33VEN 0x58 +#define MAX8907_REG_OUT33VSEQ 0x59 +#define MAX8907_REG_LDOCTL19 0x5C +#define MAX8907_REG_LDOSEQCNT19 0x5D +#define MAX8907_REG_LDO19VOUT 0x5E +#define MAX8907_REG_LBCNFG 0x60 +#define MAX8907_REG_SEQ1CNFG 0x64 +#define MAX8907_REG_SEQ2CNFG 0x65 +#define MAX8907_REG_SEQ3CNFG 0x66 +#define MAX8907_REG_SEQ4CNFG 0x67 +#define MAX8907_REG_SEQ5CNFG 0x68 +#define MAX8907_REG_SEQ6CNFG 0x69 +#define MAX8907_REG_SEQ7CNFG 0x6A +#define MAX8907_REG_LDOCTL18 0x72 +#define MAX8907_REG_LDOSEQCNT18 0x73 +#define MAX8907_REG_LDO18VOUT 0x74 +#define MAX8907_REG_BBAT_CNFG 0x78 +#define MAX8907_REG_CHG_CNTL1 0x7C +#define MAX8907_REG_CHG_CNTL2 0x7D +#define MAX8907_REG_CHG_IRQ1 0x7E +#define MAX8907_REG_CHG_IRQ2 0x7F +#define MAX8907_REG_CHG_IRQ1_MASK 0x80 +#define MAX8907_REG_CHG_IRQ2_MASK 0x81 +#define MAX8907_REG_CHG_STAT 0x82 +#define MAX8907_REG_WLED_MODE_CNTL 0x84 +#define MAX8907_REG_ILED_CNTL 0x84 +#define MAX8907_REG_II1RR 0x8E +#define MAX8907_REG_II2RR 0x8F +#define MAX8907_REG_LDOCTL20 0x9C +#define MAX8907_REG_LDOSEQCNT20 0x9D +#define MAX8907_REG_LDO20VOUT 0x9E + +/* RTC register map */ +#define MAX8907_REG_RTC_SEC 0x00 +#define MAX8907_REG_RTC_MIN 0x01 +#define MAX8907_REG_RTC_HOURS 0x02 +#define MAX8907_REG_RTC_WEEKDAY 0x03 +#define MAX8907_REG_RTC_DATE 0x04 +#define MAX8907_REG_RTC_MONTH 0x05 +#define MAX8907_REG_RTC_YEAR1 0x06 +#define MAX8907_REG_RTC_YEAR2 0x07 +#define MAX8907_REG_ALARM0_SEC 0x08 +#define MAX8907_REG_ALARM0_MIN 0x09 +#define MAX8907_REG_ALARM0_HOURS 0x0A +#define MAX8907_REG_ALARM0_WEEKDAY 0x0B +#define MAX8907_REG_ALARM0_DATE 0x0C +#define MAX8907_REG_ALARM0_MONTH 0x0D +#define MAX8907_REG_ALARM0_YEAR1 0x0E +#define MAX8907_REG_ALARM0_YEAR2 0x0F +#define MAX8907_REG_ALARM1_SEC 0x10 +#define MAX8907_REG_ALARM1_MIN 0x11 +#define MAX8907_REG_ALARM1_HOURS 0x12 +#define MAX8907_REG_ALARM1_WEEKDAY 0x13 +#define MAX8907_REG_ALARM1_DATE 0x14 +#define MAX8907_REG_ALARM1_MONTH 0x15 +#define MAX8907_REG_ALARM1_YEAR1 0x16 +#define MAX8907_REG_ALARM1_YEAR2 0x17 +#define MAX8907_REG_ALARM0_CNTL 0x18 +#define MAX8907_REG_ALARM1_CNTL 0x19 +#define MAX8907_REG_RTC_STATUS 0x1A +#define MAX8907_REG_RTC_CNTL 0x1B +#define MAX8907_REG_RTC_IRQ 0x1C +#define MAX8907_REG_RTC_IRQ_MASK 0x1D +#define MAX8907_REG_MPL_CNTL 0x1E + +/* ADC and Touch Screen Controller register map */ +#define MAX8907_CTL 0 +#define MAX8907_SEQCNT 1 +#define MAX8907_VOUT 2 + +/* mask bit fields */ +#define MAX8907_MASK_LDO_SEQ 0x1C +#define MAX8907_MASK_LDO_EN 0x01 +#define MAX8907_MASK_VBBATTCV 0x03 +#define MAX8907_MASK_OUT5V_VINEN 0x10 +#define MAX8907_MASK_OUT5V_ENSRC 0x0E +#define MAX8907_MASK_OUT5V_EN 0x01 +#define MAX8907_MASK_POWER_OFF 0x40 + +/* Regulator IDs */ +#define MAX8907_MBATT 0 +#define MAX8907_SD1 1 +#define MAX8907_SD2 2 +#define MAX8907_SD3 3 +#define MAX8907_LDO1 4 +#define MAX8907_LDO2 5 +#define MAX8907_LDO3 6 +#define MAX8907_LDO4 7 +#define MAX8907_LDO5 8 +#define MAX8907_LDO6 9 +#define MAX8907_LDO7 10 +#define MAX8907_LDO8 11 +#define MAX8907_LDO9 12 +#define MAX8907_LDO10 13 +#define MAX8907_LDO11 14 +#define MAX8907_LDO12 15 +#define MAX8907_LDO13 16 +#define MAX8907_LDO14 17 +#define MAX8907_LDO15 18 +#define MAX8907_LDO16 19 +#define MAX8907_LDO17 20 +#define MAX8907_LDO18 21 +#define MAX8907_LDO19 22 +#define MAX8907_LDO20 23 +#define MAX8907_OUT5V 24 +#define MAX8907_OUT33V 25 +#define MAX8907_BBAT 26 +#define MAX8907_SDBY 27 +#define MAX8907_VRTC 28 +#define MAX8907_NUM_REGULATORS (MAX8907_VRTC + 1) + +/* IRQ definitions */ +enum { + MAX8907_IRQ_VCHG_DC_OVP = 0, + MAX8907_IRQ_VCHG_DC_F, + MAX8907_IRQ_VCHG_DC_R, + MAX8907_IRQ_VCHG_THM_OK_R, + MAX8907_IRQ_VCHG_THM_OK_F, + MAX8907_IRQ_VCHG_MBATTLOW_F, + MAX8907_IRQ_VCHG_MBATTLOW_R, + MAX8907_IRQ_VCHG_RST, + MAX8907_IRQ_VCHG_DONE, + MAX8907_IRQ_VCHG_TOPOFF, + MAX8907_IRQ_VCHG_TMR_FAULT, + + MAX8907_IRQ_GPM_RSTIN = 0, + MAX8907_IRQ_GPM_MPL, + MAX8907_IRQ_GPM_SW_3SEC, + MAX8907_IRQ_GPM_EXTON_F, + MAX8907_IRQ_GPM_EXTON_R, + MAX8907_IRQ_GPM_SW_1SEC, + MAX8907_IRQ_GPM_SW_F, + MAX8907_IRQ_GPM_SW_R, + MAX8907_IRQ_GPM_SYSCKEN_F, + MAX8907_IRQ_GPM_SYSCKEN_R, + + MAX8907_IRQ_RTC_ALARM1 = 0, + MAX8907_IRQ_RTC_ALARM0, +}; + +struct max8907_platform_data { + struct regulator_init_data *init_data[MAX8907_NUM_REGULATORS]; + bool pm_off; +}; + +struct regmap_irq_chips_data; + +struct max8907 { + struct device *dev; + struct mutex irq_lock; + struct i2c_client *i2c_gen; + struct i2c_client *i2c_rtc; + struct regmap *regmap_gen; + struct regmap *regmap_rtc; + struct regmap_irq_chip_data *irqc_chg; + struct regmap_irq_chip_data *irqc_on_off; + struct regmap_irq_chip_data *irqc_rtc; +}; + +#endif diff --git a/include/linux/mfd/max8925.h b/include/linux/mfd/max8925.h new file mode 100644 index 000000000..ce8502e9e --- /dev/null +++ b/include/linux/mfd/max8925.h @@ -0,0 +1,277 @@ +/* + * Maxim8925 Interface + * + * Copyright (C) 2009 Marvell International Ltd. + * Haojian Zhuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_MFD_MAX8925_H +#define __LINUX_MFD_MAX8925_H + +#include +#include + +/* Unified sub device IDs for MAX8925 */ +enum { + MAX8925_ID_SD1, + MAX8925_ID_SD2, + MAX8925_ID_SD3, + MAX8925_ID_LDO1, + MAX8925_ID_LDO2, + MAX8925_ID_LDO3, + MAX8925_ID_LDO4, + MAX8925_ID_LDO5, + MAX8925_ID_LDO6, + MAX8925_ID_LDO7, + MAX8925_ID_LDO8, + MAX8925_ID_LDO9, + MAX8925_ID_LDO10, + MAX8925_ID_LDO11, + MAX8925_ID_LDO12, + MAX8925_ID_LDO13, + MAX8925_ID_LDO14, + MAX8925_ID_LDO15, + MAX8925_ID_LDO16, + MAX8925_ID_LDO17, + MAX8925_ID_LDO18, + MAX8925_ID_LDO19, + MAX8925_ID_LDO20, + MAX8925_ID_MAX, +}; + +enum { + /* + * Charging current threshold trigger going from fast charge + * to TOPOFF charge. From 5% to 20% of fasting charging current. + */ + MAX8925_TOPOFF_THR_5PER, + MAX8925_TOPOFF_THR_10PER, + MAX8925_TOPOFF_THR_15PER, + MAX8925_TOPOFF_THR_20PER, +}; + +enum { + /* Fast charging current */ + MAX8925_FCHG_85MA, + MAX8925_FCHG_300MA, + MAX8925_FCHG_460MA, + MAX8925_FCHG_600MA, + MAX8925_FCHG_700MA, + MAX8925_FCHG_800MA, + MAX8925_FCHG_900MA, + MAX8925_FCHG_1000MA, +}; + +/* Charger registers */ +#define MAX8925_CHG_IRQ1 (0x7e) +#define MAX8925_CHG_IRQ2 (0x7f) +#define MAX8925_CHG_IRQ1_MASK (0x80) +#define MAX8925_CHG_IRQ2_MASK (0x81) +#define MAX8925_CHG_STATUS (0x82) + +/* GPM registers */ +#define MAX8925_SYSENSEL (0x00) +#define MAX8925_ON_OFF_IRQ1 (0x01) +#define MAX8925_ON_OFF_IRQ1_MASK (0x02) +#define MAX8925_ON_OFF_STATUS (0x03) +#define MAX8925_ON_OFF_IRQ2 (0x0d) +#define MAX8925_ON_OFF_IRQ2_MASK (0x0e) +#define MAX8925_RESET_CNFG (0x0f) + +/* Touch registers */ +#define MAX8925_TSC_IRQ (0x00) +#define MAX8925_TSC_IRQ_MASK (0x01) +#define MAX8925_TSC_CNFG1 (0x02) +#define MAX8925_ADC_SCHED (0x10) +#define MAX8925_ADC_RES_END (0x6f) + +#define MAX8925_NREF_OK (1 << 4) + +/* RTC registers */ +#define MAX8925_ALARM0_CNTL (0x18) +#define MAX8925_ALARM1_CNTL (0x19) +#define MAX8925_RTC_IRQ (0x1c) +#define MAX8925_RTC_IRQ_MASK (0x1d) +#define MAX8925_MPL_CNTL (0x1e) + +/* WLED registers */ +#define MAX8925_WLED_MODE_CNTL (0x84) +#define MAX8925_WLED_CNTL (0x85) + +/* MAX8925 Registers */ +#define MAX8925_SDCTL1 (0x04) +#define MAX8925_SDCTL2 (0x07) +#define MAX8925_SDCTL3 (0x0A) +#define MAX8925_SDV1 (0x06) +#define MAX8925_SDV2 (0x09) +#define MAX8925_SDV3 (0x0C) +#define MAX8925_LDOCTL1 (0x18) +#define MAX8925_LDOCTL2 (0x1C) +#define MAX8925_LDOCTL3 (0x20) +#define MAX8925_LDOCTL4 (0x24) +#define MAX8925_LDOCTL5 (0x28) +#define MAX8925_LDOCTL6 (0x2C) +#define MAX8925_LDOCTL7 (0x30) +#define MAX8925_LDOCTL8 (0x34) +#define MAX8925_LDOCTL9 (0x38) +#define MAX8925_LDOCTL10 (0x3C) +#define MAX8925_LDOCTL11 (0x40) +#define MAX8925_LDOCTL12 (0x44) +#define MAX8925_LDOCTL13 (0x48) +#define MAX8925_LDOCTL14 (0x4C) +#define MAX8925_LDOCTL15 (0x50) +#define MAX8925_LDOCTL16 (0x10) +#define MAX8925_LDOCTL17 (0x14) +#define MAX8925_LDOCTL18 (0x72) +#define MAX8925_LDOCTL19 (0x5C) +#define MAX8925_LDOCTL20 (0x9C) +#define MAX8925_LDOVOUT1 (0x1A) +#define MAX8925_LDOVOUT2 (0x1E) +#define MAX8925_LDOVOUT3 (0x22) +#define MAX8925_LDOVOUT4 (0x26) +#define MAX8925_LDOVOUT5 (0x2A) +#define MAX8925_LDOVOUT6 (0x2E) +#define MAX8925_LDOVOUT7 (0x32) +#define MAX8925_LDOVOUT8 (0x36) +#define MAX8925_LDOVOUT9 (0x3A) +#define MAX8925_LDOVOUT10 (0x3E) +#define MAX8925_LDOVOUT11 (0x42) +#define MAX8925_LDOVOUT12 (0x46) +#define MAX8925_LDOVOUT13 (0x4A) +#define MAX8925_LDOVOUT14 (0x4E) +#define MAX8925_LDOVOUT15 (0x52) +#define MAX8925_LDOVOUT16 (0x12) +#define MAX8925_LDOVOUT17 (0x16) +#define MAX8925_LDOVOUT18 (0x74) +#define MAX8925_LDOVOUT19 (0x5E) +#define MAX8925_LDOVOUT20 (0x9E) + +/* bit definitions */ +#define CHG_IRQ1_MASK (0x07) +#define CHG_IRQ2_MASK (0xff) +#define ON_OFF_IRQ1_MASK (0xff) +#define ON_OFF_IRQ2_MASK (0x03) +#define TSC_IRQ_MASK (0x03) +#define RTC_IRQ_MASK (0x0c) + +#define MAX8925_NAME_SIZE (32) + +/* IRQ definitions */ +enum { + MAX8925_IRQ_VCHG_DC_OVP, + MAX8925_IRQ_VCHG_DC_F, + MAX8925_IRQ_VCHG_DC_R, + MAX8925_IRQ_VCHG_THM_OK_R, + MAX8925_IRQ_VCHG_THM_OK_F, + MAX8925_IRQ_VCHG_SYSLOW_F, + MAX8925_IRQ_VCHG_SYSLOW_R, + MAX8925_IRQ_VCHG_RST, + MAX8925_IRQ_VCHG_DONE, + MAX8925_IRQ_VCHG_TOPOFF, + MAX8925_IRQ_VCHG_TMR_FAULT, + MAX8925_IRQ_GPM_RSTIN, + MAX8925_IRQ_GPM_MPL, + MAX8925_IRQ_GPM_SW_3SEC, + MAX8925_IRQ_GPM_EXTON_F, + MAX8925_IRQ_GPM_EXTON_R, + MAX8925_IRQ_GPM_SW_1SEC, + MAX8925_IRQ_GPM_SW_F, + MAX8925_IRQ_GPM_SW_R, + MAX8925_IRQ_GPM_SYSCKEN_F, + MAX8925_IRQ_GPM_SYSCKEN_R, + MAX8925_IRQ_RTC_ALARM1, + MAX8925_IRQ_RTC_ALARM0, + MAX8925_IRQ_TSC_STICK, + MAX8925_IRQ_TSC_NSTICK, + MAX8925_NR_IRQS, +}; + + + +struct max8925_chip { + struct device *dev; + struct i2c_client *i2c; + struct i2c_client *adc; + struct i2c_client *rtc; + struct mutex io_lock; + struct mutex irq_lock; + + int irq_base; + int core_irq; + int tsc_irq; + unsigned int wakeup_flag; +}; + +struct max8925_backlight_pdata { + int lxw_scl; /* 0/1 -- 0.8Ohm/0.4Ohm */ + int lxw_freq; /* 700KHz ~ 1400KHz */ + int dual_string; /* 0/1 -- single/dual string */ +}; + +struct max8925_touch_pdata { + unsigned int flags; +}; + +struct max8925_power_pdata { + int (*set_charger)(int); + unsigned batt_detect:1; + unsigned topoff_threshold:2; + unsigned fast_charge:3; /* charge current */ + unsigned no_temp_support:1; /* set if no temperature detect */ + unsigned no_insert_detect:1; /* set if no ac insert detect */ + char **supplied_to; + int num_supplicants; +}; + +/* + * irq_base: stores IRQ base number of MAX8925 in platform + * tsc_irq: stores IRQ number of MAX8925 TSC + */ +struct max8925_platform_data { + struct max8925_backlight_pdata *backlight; + struct max8925_touch_pdata *touch; + struct max8925_power_pdata *power; + struct regulator_init_data *sd1; + struct regulator_init_data *sd2; + struct regulator_init_data *sd3; + struct regulator_init_data *ldo1; + struct regulator_init_data *ldo2; + struct regulator_init_data *ldo3; + struct regulator_init_data *ldo4; + struct regulator_init_data *ldo5; + struct regulator_init_data *ldo6; + struct regulator_init_data *ldo7; + struct regulator_init_data *ldo8; + struct regulator_init_data *ldo9; + struct regulator_init_data *ldo10; + struct regulator_init_data *ldo11; + struct regulator_init_data *ldo12; + struct regulator_init_data *ldo13; + struct regulator_init_data *ldo14; + struct regulator_init_data *ldo15; + struct regulator_init_data *ldo16; + struct regulator_init_data *ldo17; + struct regulator_init_data *ldo18; + struct regulator_init_data *ldo19; + struct regulator_init_data *ldo20; + + int irq_base; + int tsc_irq; +}; + +extern int max8925_reg_read(struct i2c_client *, int); +extern int max8925_reg_write(struct i2c_client *, int, unsigned char); +extern int max8925_bulk_read(struct i2c_client *, int, int, unsigned char *); +extern int max8925_bulk_write(struct i2c_client *, int, int, unsigned char *); +extern int max8925_set_bits(struct i2c_client *, int, unsigned char, + unsigned char); + +extern int max8925_device_init(struct max8925_chip *, + struct max8925_platform_data *); +extern void max8925_device_exit(struct max8925_chip *); +#endif /* __LINUX_MFD_MAX8925_H */ + diff --git a/include/linux/mfd/max8997-private.h b/include/linux/mfd/max8997-private.h new file mode 100644 index 000000000..78c76cd4d --- /dev/null +++ b/include/linux/mfd/max8997-private.h @@ -0,0 +1,430 @@ +/* + * max8997-private.h - Voltage regulator driver for the Maxim 8997 + * + * Copyright (C) 2010 Samsung Electrnoics + * MyungJoo Ham + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_MFD_MAX8997_PRIV_H +#define __LINUX_MFD_MAX8997_PRIV_H + +#include +#include +#include + +#define MAX8997_REG_INVALID (0xff) + +enum max8997_pmic_reg { + MAX8997_REG_PMIC_ID0 = 0x00, + MAX8997_REG_PMIC_ID1 = 0x01, + MAX8997_REG_INTSRC = 0x02, + MAX8997_REG_INT1 = 0x03, + MAX8997_REG_INT2 = 0x04, + MAX8997_REG_INT3 = 0x05, + MAX8997_REG_INT4 = 0x06, + + MAX8997_REG_INT1MSK = 0x08, + MAX8997_REG_INT2MSK = 0x09, + MAX8997_REG_INT3MSK = 0x0a, + MAX8997_REG_INT4MSK = 0x0b, + + MAX8997_REG_STATUS1 = 0x0d, + MAX8997_REG_STATUS2 = 0x0e, + MAX8997_REG_STATUS3 = 0x0f, + MAX8997_REG_STATUS4 = 0x10, + + MAX8997_REG_MAINCON1 = 0x13, + MAX8997_REG_MAINCON2 = 0x14, + MAX8997_REG_BUCKRAMP = 0x15, + + MAX8997_REG_BUCK1CTRL = 0x18, + MAX8997_REG_BUCK1DVS1 = 0x19, + MAX8997_REG_BUCK1DVS2 = 0x1a, + MAX8997_REG_BUCK1DVS3 = 0x1b, + MAX8997_REG_BUCK1DVS4 = 0x1c, + MAX8997_REG_BUCK1DVS5 = 0x1d, + MAX8997_REG_BUCK1DVS6 = 0x1e, + MAX8997_REG_BUCK1DVS7 = 0x1f, + MAX8997_REG_BUCK1DVS8 = 0x20, + MAX8997_REG_BUCK2CTRL = 0x21, + MAX8997_REG_BUCK2DVS1 = 0x22, + MAX8997_REG_BUCK2DVS2 = 0x23, + MAX8997_REG_BUCK2DVS3 = 0x24, + MAX8997_REG_BUCK2DVS4 = 0x25, + MAX8997_REG_BUCK2DVS5 = 0x26, + MAX8997_REG_BUCK2DVS6 = 0x27, + MAX8997_REG_BUCK2DVS7 = 0x28, + MAX8997_REG_BUCK2DVS8 = 0x29, + MAX8997_REG_BUCK3CTRL = 0x2a, + MAX8997_REG_BUCK3DVS = 0x2b, + MAX8997_REG_BUCK4CTRL = 0x2c, + MAX8997_REG_BUCK4DVS = 0x2d, + MAX8997_REG_BUCK5CTRL = 0x2e, + MAX8997_REG_BUCK5DVS1 = 0x2f, + MAX8997_REG_BUCK5DVS2 = 0x30, + MAX8997_REG_BUCK5DVS3 = 0x31, + MAX8997_REG_BUCK5DVS4 = 0x32, + MAX8997_REG_BUCK5DVS5 = 0x33, + MAX8997_REG_BUCK5DVS6 = 0x34, + MAX8997_REG_BUCK5DVS7 = 0x35, + MAX8997_REG_BUCK5DVS8 = 0x36, + MAX8997_REG_BUCK6CTRL = 0x37, + MAX8997_REG_BUCK6BPSKIPCTRL = 0x38, + MAX8997_REG_BUCK7CTRL = 0x39, + MAX8997_REG_BUCK7DVS = 0x3a, + MAX8997_REG_LDO1CTRL = 0x3b, + MAX8997_REG_LDO2CTRL = 0x3c, + MAX8997_REG_LDO3CTRL = 0x3d, + MAX8997_REG_LDO4CTRL = 0x3e, + MAX8997_REG_LDO5CTRL = 0x3f, + MAX8997_REG_LDO6CTRL = 0x40, + MAX8997_REG_LDO7CTRL = 0x41, + MAX8997_REG_LDO8CTRL = 0x42, + MAX8997_REG_LDO9CTRL = 0x43, + MAX8997_REG_LDO10CTRL = 0x44, + MAX8997_REG_LDO11CTRL = 0x45, + MAX8997_REG_LDO12CTRL = 0x46, + MAX8997_REG_LDO13CTRL = 0x47, + MAX8997_REG_LDO14CTRL = 0x48, + MAX8997_REG_LDO15CTRL = 0x49, + MAX8997_REG_LDO16CTRL = 0x4a, + MAX8997_REG_LDO17CTRL = 0x4b, + MAX8997_REG_LDO18CTRL = 0x4c, + MAX8997_REG_LDO21CTRL = 0x4d, + + MAX8997_REG_MBCCTRL1 = 0x50, + MAX8997_REG_MBCCTRL2 = 0x51, + MAX8997_REG_MBCCTRL3 = 0x52, + MAX8997_REG_MBCCTRL4 = 0x53, + MAX8997_REG_MBCCTRL5 = 0x54, + MAX8997_REG_MBCCTRL6 = 0x55, + MAX8997_REG_OTPCGHCVS = 0x56, + + MAX8997_REG_SAFEOUTCTRL = 0x5a, + + MAX8997_REG_LBCNFG1 = 0x5e, + MAX8997_REG_LBCNFG2 = 0x5f, + MAX8997_REG_BBCCTRL = 0x60, + + MAX8997_REG_FLASH1_CUR = 0x63, /* 0x63 ~ 0x6e for FLASH */ + MAX8997_REG_FLASH2_CUR = 0x64, + MAX8997_REG_MOVIE_CUR = 0x65, + MAX8997_REG_GSMB_CUR = 0x66, + MAX8997_REG_BOOST_CNTL = 0x67, + MAX8997_REG_LEN_CNTL = 0x68, + MAX8997_REG_FLASH_CNTL = 0x69, + MAX8997_REG_WDT_CNTL = 0x6a, + MAX8997_REG_MAXFLASH1 = 0x6b, + MAX8997_REG_MAXFLASH2 = 0x6c, + MAX8997_REG_FLASHSTATUS = 0x6d, + MAX8997_REG_FLASHSTATUSMASK = 0x6e, + + MAX8997_REG_GPIOCNTL1 = 0x70, + MAX8997_REG_GPIOCNTL2 = 0x71, + MAX8997_REG_GPIOCNTL3 = 0x72, + MAX8997_REG_GPIOCNTL4 = 0x73, + MAX8997_REG_GPIOCNTL5 = 0x74, + MAX8997_REG_GPIOCNTL6 = 0x75, + MAX8997_REG_GPIOCNTL7 = 0x76, + MAX8997_REG_GPIOCNTL8 = 0x77, + MAX8997_REG_GPIOCNTL9 = 0x78, + MAX8997_REG_GPIOCNTL10 = 0x79, + MAX8997_REG_GPIOCNTL11 = 0x7a, + MAX8997_REG_GPIOCNTL12 = 0x7b, + + MAX8997_REG_LDO1CONFIG = 0x80, + MAX8997_REG_LDO2CONFIG = 0x81, + MAX8997_REG_LDO3CONFIG = 0x82, + MAX8997_REG_LDO4CONFIG = 0x83, + MAX8997_REG_LDO5CONFIG = 0x84, + MAX8997_REG_LDO6CONFIG = 0x85, + MAX8997_REG_LDO7CONFIG = 0x86, + MAX8997_REG_LDO8CONFIG = 0x87, + MAX8997_REG_LDO9CONFIG = 0x88, + MAX8997_REG_LDO10CONFIG = 0x89, + MAX8997_REG_LDO11CONFIG = 0x8a, + MAX8997_REG_LDO12CONFIG = 0x8b, + MAX8997_REG_LDO13CONFIG = 0x8c, + MAX8997_REG_LDO14CONFIG = 0x8d, + MAX8997_REG_LDO15CONFIG = 0x8e, + MAX8997_REG_LDO16CONFIG = 0x8f, + MAX8997_REG_LDO17CONFIG = 0x90, + MAX8997_REG_LDO18CONFIG = 0x91, + MAX8997_REG_LDO21CONFIG = 0x92, + + MAX8997_REG_DVSOKTIMER1 = 0x97, + MAX8997_REG_DVSOKTIMER2 = 0x98, + MAX8997_REG_DVSOKTIMER4 = 0x99, + MAX8997_REG_DVSOKTIMER5 = 0x9a, + + MAX8997_REG_PMIC_END = 0x9b, +}; + +enum max8997_muic_reg { + MAX8997_MUIC_REG_ID = 0x0, + MAX8997_MUIC_REG_INT1 = 0x1, + MAX8997_MUIC_REG_INT2 = 0x2, + MAX8997_MUIC_REG_INT3 = 0x3, + MAX8997_MUIC_REG_STATUS1 = 0x4, + MAX8997_MUIC_REG_STATUS2 = 0x5, + MAX8997_MUIC_REG_STATUS3 = 0x6, + MAX8997_MUIC_REG_INTMASK1 = 0x7, + MAX8997_MUIC_REG_INTMASK2 = 0x8, + MAX8997_MUIC_REG_INTMASK3 = 0x9, + MAX8997_MUIC_REG_CDETCTRL = 0xa, + + MAX8997_MUIC_REG_CONTROL1 = 0xc, + MAX8997_MUIC_REG_CONTROL2 = 0xd, + MAX8997_MUIC_REG_CONTROL3 = 0xe, + + MAX8997_MUIC_REG_END = 0xf, +}; + +/* MAX8997-MUIC STATUS1 register */ +#define STATUS1_ADC_SHIFT 0 +#define STATUS1_ADCLOW_SHIFT 5 +#define STATUS1_ADCERR_SHIFT 6 +#define STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT) +#define STATUS1_ADCLOW_MASK (0x1 << STATUS1_ADCLOW_SHIFT) +#define STATUS1_ADCERR_MASK (0x1 << STATUS1_ADCERR_SHIFT) + +/* MAX8997-MUIC STATUS2 register */ +#define STATUS2_CHGTYP_SHIFT 0 +#define STATUS2_CHGDETRUN_SHIFT 3 +#define STATUS2_DCDTMR_SHIFT 4 +#define STATUS2_DBCHG_SHIFT 5 +#define STATUS2_VBVOLT_SHIFT 6 +#define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT) +#define STATUS2_CHGDETRUN_MASK (0x1 << STATUS2_CHGDETRUN_SHIFT) +#define STATUS2_DCDTMR_MASK (0x1 << STATUS2_DCDTMR_SHIFT) +#define STATUS2_DBCHG_MASK (0x1 << STATUS2_DBCHG_SHIFT) +#define STATUS2_VBVOLT_MASK (0x1 << STATUS2_VBVOLT_SHIFT) + +/* MAX8997-MUIC STATUS3 register */ +#define STATUS3_OVP_SHIFT 2 +#define STATUS3_OVP_MASK (0x1 << STATUS3_OVP_SHIFT) + +/* MAX8997-MUIC CONTROL1 register */ +#define COMN1SW_SHIFT 0 +#define COMP2SW_SHIFT 3 +#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT) +#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT) +#define COMP_SW_MASK (COMP2SW_MASK | COMN1SW_MASK) + +#define CONTROL1_SW_USB ((1 << COMP2SW_SHIFT) \ + | (1 << COMN1SW_SHIFT)) +#define CONTROL1_SW_AUDIO ((2 << COMP2SW_SHIFT) \ + | (2 << COMN1SW_SHIFT)) +#define CONTROL1_SW_UART ((3 << COMP2SW_SHIFT) \ + | (3 << COMN1SW_SHIFT)) +#define CONTROL1_SW_OPEN ((0 << COMP2SW_SHIFT) \ + | (0 << COMN1SW_SHIFT)) + +#define CONTROL2_LOWPWR_SHIFT (0) +#define CONTROL2_ADCEN_SHIFT (1) +#define CONTROL2_CPEN_SHIFT (2) +#define CONTROL2_SFOUTASRT_SHIFT (3) +#define CONTROL2_SFOUTORD_SHIFT (4) +#define CONTROL2_ACCDET_SHIFT (5) +#define CONTROL2_USBCPINT_SHIFT (6) +#define CONTROL2_RCPS_SHIFT (7) +#define CONTROL2_LOWPWR_MASK (0x1 << CONTROL2_LOWPWR_SHIFT) +#define CONTROL2_ADCEN_MASK (0x1 << CONTROL2_ADCEN_SHIFT) +#define CONTROL2_CPEN_MASK (0x1 << CONTROL2_CPEN_SHIFT) +#define CONTROL2_SFOUTASRT_MASK (0x1 << CONTROL2_SFOUTASRT_SHIFT) +#define CONTROL2_SFOUTORD_MASK (0x1 << CONTROL2_SFOUTORD_SHIFT) +#define CONTROL2_ACCDET_MASK (0x1 << CONTROL2_ACCDET_SHIFT) +#define CONTROL2_USBCPINT_MASK (0x1 << CONTROL2_USBCPINT_SHIFT) +#define CONTROL2_RCPS_MASK (0x1 << CONTROL2_RCPS_SHIFT) + +#define CONTROL3_JIGSET_SHIFT (0) +#define CONTROL3_BTLDSET_SHIFT (2) +#define CONTROL3_ADCDBSET_SHIFT (4) +#define CONTROL3_JIGSET_MASK (0x3 << CONTROL3_JIGSET_SHIFT) +#define CONTROL3_BTLDSET_MASK (0x3 << CONTROL3_BTLDSET_SHIFT) +#define CONTROL3_ADCDBSET_MASK (0x3 << CONTROL3_ADCDBSET_SHIFT) + +enum max8997_haptic_reg { + MAX8997_HAPTIC_REG_GENERAL = 0x00, + MAX8997_HAPTIC_REG_CONF1 = 0x01, + MAX8997_HAPTIC_REG_CONF2 = 0x02, + MAX8997_HAPTIC_REG_DRVCONF = 0x03, + MAX8997_HAPTIC_REG_CYCLECONF1 = 0x04, + MAX8997_HAPTIC_REG_CYCLECONF2 = 0x05, + MAX8997_HAPTIC_REG_SIGCONF1 = 0x06, + MAX8997_HAPTIC_REG_SIGCONF2 = 0x07, + MAX8997_HAPTIC_REG_SIGCONF3 = 0x08, + MAX8997_HAPTIC_REG_SIGCONF4 = 0x09, + MAX8997_HAPTIC_REG_SIGDC1 = 0x0a, + MAX8997_HAPTIC_REG_SIGDC2 = 0x0b, + MAX8997_HAPTIC_REG_SIGPWMDC1 = 0x0c, + MAX8997_HAPTIC_REG_SIGPWMDC2 = 0x0d, + MAX8997_HAPTIC_REG_SIGPWMDC3 = 0x0e, + MAX8997_HAPTIC_REG_SIGPWMDC4 = 0x0f, + MAX8997_HAPTIC_REG_MTR_REV = 0x10, + + MAX8997_HAPTIC_REG_END = 0x11, +}; + +/* slave addr = 0x0c: using "2nd part" of rev4 datasheet */ +enum max8997_rtc_reg { + MAX8997_RTC_CTRLMASK = 0x02, + MAX8997_RTC_CTRL = 0x03, + MAX8997_RTC_UPDATE1 = 0x04, + MAX8997_RTC_UPDATE2 = 0x05, + MAX8997_RTC_WTSR_SMPL = 0x06, + + MAX8997_RTC_SEC = 0x10, + MAX8997_RTC_MIN = 0x11, + MAX8997_RTC_HOUR = 0x12, + MAX8997_RTC_DAY_OF_WEEK = 0x13, + MAX8997_RTC_MONTH = 0x14, + MAX8997_RTC_YEAR = 0x15, + MAX8997_RTC_DAY_OF_MONTH = 0x16, + MAX8997_RTC_ALARM1_SEC = 0x17, + MAX8997_RTC_ALARM1_MIN = 0x18, + MAX8997_RTC_ALARM1_HOUR = 0x19, + MAX8997_RTC_ALARM1_DAY_OF_WEEK = 0x1a, + MAX8997_RTC_ALARM1_MONTH = 0x1b, + MAX8997_RTC_ALARM1_YEAR = 0x1c, + MAX8997_RTC_ALARM1_DAY_OF_MONTH = 0x1d, + MAX8997_RTC_ALARM2_SEC = 0x1e, + MAX8997_RTC_ALARM2_MIN = 0x1f, + MAX8997_RTC_ALARM2_HOUR = 0x20, + MAX8997_RTC_ALARM2_DAY_OF_WEEK = 0x21, + MAX8997_RTC_ALARM2_MONTH = 0x22, + MAX8997_RTC_ALARM2_YEAR = 0x23, + MAX8997_RTC_ALARM2_DAY_OF_MONTH = 0x24, +}; + +enum max8997_irq_source { + PMIC_INT1 = 0, + PMIC_INT2, + PMIC_INT3, + PMIC_INT4, + + FUEL_GAUGE, /* Ignored (MAX17042 driver handles) */ + + MUIC_INT1, + MUIC_INT2, + MUIC_INT3, + + GPIO_LOW, /* Not implemented */ + GPIO_HI, /* Not implemented */ + + FLASH_STATUS, /* Not implemented */ + + MAX8997_IRQ_GROUP_NR, +}; + +enum max8997_irq { + MAX8997_PMICIRQ_PWRONR, + MAX8997_PMICIRQ_PWRONF, + MAX8997_PMICIRQ_PWRON1SEC, + MAX8997_PMICIRQ_JIGONR, + MAX8997_PMICIRQ_JIGONF, + MAX8997_PMICIRQ_LOWBAT2, + MAX8997_PMICIRQ_LOWBAT1, + + MAX8997_PMICIRQ_JIGR, + MAX8997_PMICIRQ_JIGF, + MAX8997_PMICIRQ_MR, + MAX8997_PMICIRQ_DVS1OK, + MAX8997_PMICIRQ_DVS2OK, + MAX8997_PMICIRQ_DVS3OK, + MAX8997_PMICIRQ_DVS4OK, + + MAX8997_PMICIRQ_CHGINS, + MAX8997_PMICIRQ_CHGRM, + MAX8997_PMICIRQ_DCINOVP, + MAX8997_PMICIRQ_TOPOFFR, + MAX8997_PMICIRQ_CHGRSTF, + MAX8997_PMICIRQ_MBCHGTMEXPD, + + MAX8997_PMICIRQ_RTC60S, + MAX8997_PMICIRQ_RTCA1, + MAX8997_PMICIRQ_RTCA2, + MAX8997_PMICIRQ_SMPL_INT, + MAX8997_PMICIRQ_RTC1S, + MAX8997_PMICIRQ_WTSR, + + MAX8997_MUICIRQ_ADCError, + MAX8997_MUICIRQ_ADCLow, + MAX8997_MUICIRQ_ADC, + + MAX8997_MUICIRQ_VBVolt, + MAX8997_MUICIRQ_DBChg, + MAX8997_MUICIRQ_DCDTmr, + MAX8997_MUICIRQ_ChgDetRun, + MAX8997_MUICIRQ_ChgTyp, + + MAX8997_MUICIRQ_OVP, + + MAX8997_IRQ_NR, +}; + +#define MAX8997_NUM_GPIO 12 +struct max8997_dev { + struct device *dev; + struct max8997_platform_data *pdata; + struct i2c_client *i2c; /* 0xcc / PMIC, Battery Control, and FLASH */ + struct i2c_client *rtc; /* slave addr 0x0c */ + struct i2c_client *haptic; /* slave addr 0x90 */ + struct i2c_client *muic; /* slave addr 0x4a */ + struct mutex iolock; + + unsigned long type; + struct platform_device *battery; /* battery control (not fuel gauge) */ + + int irq; + int ono; + struct irq_domain *irq_domain; + struct mutex irqlock; + int irq_masks_cur[MAX8997_IRQ_GROUP_NR]; + int irq_masks_cache[MAX8997_IRQ_GROUP_NR]; + + /* For hibernation */ + u8 reg_dump[MAX8997_REG_PMIC_END + MAX8997_MUIC_REG_END + + MAX8997_HAPTIC_REG_END]; + + bool gpio_status[MAX8997_NUM_GPIO]; +}; + +enum max8997_types { + TYPE_MAX8997, + TYPE_MAX8966, +}; + +extern int max8997_irq_init(struct max8997_dev *max8997); +extern void max8997_irq_exit(struct max8997_dev *max8997); +extern int max8997_irq_resume(struct max8997_dev *max8997); + +extern int max8997_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest); +extern int max8997_bulk_read(struct i2c_client *i2c, u8 reg, int count, + u8 *buf); +extern int max8997_write_reg(struct i2c_client *i2c, u8 reg, u8 value); +extern int max8997_bulk_write(struct i2c_client *i2c, u8 reg, int count, + u8 *buf); +extern int max8997_update_reg(struct i2c_client *i2c, u8 reg, u8 val, u8 mask); + +#define MAX8997_GPIO_INT_BOTH (0x3 << 4) +#define MAX8997_GPIO_INT_RISE (0x2 << 4) +#define MAX8997_GPIO_INT_FALL (0x1 << 4) + +#define MAX8997_GPIO_INT_MASK (0x3 << 4) +#define MAX8997_GPIO_DATA_MASK (0x1 << 2) +#endif /* __LINUX_MFD_MAX8997_PRIV_H */ diff --git a/include/linux/mfd/max8997.h b/include/linux/mfd/max8997.h new file mode 100644 index 000000000..3ae1fe743 --- /dev/null +++ b/include/linux/mfd/max8997.h @@ -0,0 +1,223 @@ +/* + * max8997.h - Driver for the Maxim 8997/8966 + * + * Copyright (C) 2009-2010 Samsung Electrnoics + * MyungJoo Ham + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * This driver is based on max8998.h + * + * MAX8997 has PMIC, MUIC, HAPTIC, RTC, FLASH, and Fuel Gauge devices. + * Except Fuel Gauge, every device shares the same I2C bus and included in + * this mfd driver. Although the fuel gauge is included in the chip, it is + * excluded from the driver because a) it has a different I2C bus from + * others and b) it can be enabled simply by using MAX17042 driver. + */ + +#ifndef __LINUX_MFD_MAX8998_H +#define __LINUX_MFD_MAX8998_H + +#include + +/* MAX8997/8966 regulator IDs */ +enum max8998_regulators { + MAX8997_LDO1 = 0, + MAX8997_LDO2, + MAX8997_LDO3, + MAX8997_LDO4, + MAX8997_LDO5, + MAX8997_LDO6, + MAX8997_LDO7, + MAX8997_LDO8, + MAX8997_LDO9, + MAX8997_LDO10, + MAX8997_LDO11, + MAX8997_LDO12, + MAX8997_LDO13, + MAX8997_LDO14, + MAX8997_LDO15, + MAX8997_LDO16, + MAX8997_LDO17, + MAX8997_LDO18, + MAX8997_LDO21, + MAX8997_BUCK1, + MAX8997_BUCK2, + MAX8997_BUCK3, + MAX8997_BUCK4, + MAX8997_BUCK5, + MAX8997_BUCK6, + MAX8997_BUCK7, + MAX8997_EN32KHZ_AP, + MAX8997_EN32KHZ_CP, + MAX8997_ENVICHG, + MAX8997_ESAFEOUT1, + MAX8997_ESAFEOUT2, + MAX8997_CHARGER_CV, /* control MBCCV of MBCCTRL3 */ + MAX8997_CHARGER, /* charger current, MBCCTRL4 */ + MAX8997_CHARGER_TOPOFF, /* MBCCTRL5 */ + + MAX8997_REG_MAX, +}; + +struct max8997_regulator_data { + int id; + struct regulator_init_data *initdata; + struct device_node *reg_node; +}; + +struct max8997_muic_reg_data { + u8 addr; + u8 data; +}; + +/** + * struct max8997_muic_platform_data + * @init_data: array of max8997_muic_reg_data + * used for initializing registers of MAX8997 MUIC device + * @num_init_data: array size of init_data + */ +struct max8997_muic_platform_data { + struct max8997_muic_reg_data *init_data; + int num_init_data; + + /* Check cable state after certain delay */ + int detcable_delay_ms; + + /* + * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB + * h/w path of COMP2/COMN1 on CONTROL1 register. + */ + int path_usb; + int path_uart; +}; + +enum max8997_haptic_motor_type { + MAX8997_HAPTIC_ERM, + MAX8997_HAPTIC_LRA, +}; + +enum max8997_haptic_pulse_mode { + MAX8997_EXTERNAL_MODE, + MAX8997_INTERNAL_MODE, +}; + +enum max8997_haptic_pwm_divisor { + MAX8997_PWM_DIVISOR_32, + MAX8997_PWM_DIVISOR_64, + MAX8997_PWM_DIVISOR_128, + MAX8997_PWM_DIVISOR_256, +}; + +/** + * max8997_haptic_platform_data + * @pwm_channel_id: channel number of PWM device + * valid for MAX8997_EXTERNAL_MODE + * @pwm_period: period in nano second for PWM device + * valid for MAX8997_EXTERNAL_MODE + * @type: motor type + * @mode: pulse mode + * MAX8997_EXTERNAL_MODE: external PWM device is used to control motor + * MAX8997_INTERNAL_MODE: internal pulse generator is used to control motor + * @pwm_divisor: divisor for external PWM device + * @internal_mode_pattern: internal mode pattern for internal mode + * [0 - 3]: valid pattern number + * @pattern_cycle: the number of cycles of the waveform + * for the internal mode pattern + * [0 - 15]: available cycles + * @pattern_signal_period: period of the waveform for the internal mode pattern + * [0 - 255]: available period + */ +struct max8997_haptic_platform_data { + unsigned int pwm_channel_id; + unsigned int pwm_period; + + enum max8997_haptic_motor_type type; + enum max8997_haptic_pulse_mode mode; + enum max8997_haptic_pwm_divisor pwm_divisor; + + unsigned int internal_mode_pattern; + unsigned int pattern_cycle; + unsigned int pattern_signal_period; +}; + +enum max8997_led_mode { + MAX8997_NONE, + MAX8997_FLASH_MODE, + MAX8997_MOVIE_MODE, + MAX8997_FLASH_PIN_CONTROL_MODE, + MAX8997_MOVIE_PIN_CONTROL_MODE, +}; + +/** + * struct max8997_led_platform_data + * The number of LED devices for MAX8997 is two + * @mode: LED mode for each LED device + * @brightness: initial brightness for each LED device + * range: + * [0 - 31]: MAX8997_FLASH_MODE and MAX8997_FLASH_PIN_CONTROL_MODE + * [0 - 15]: MAX8997_MOVIE_MODE and MAX8997_MOVIE_PIN_CONTROL_MODE + */ +struct max8997_led_platform_data { + enum max8997_led_mode mode[2]; + u8 brightness[2]; +}; + +struct max8997_platform_data { + /* IRQ */ + int ono; + + /* ---- PMIC ---- */ + struct max8997_regulator_data *regulators; + int num_regulators; + + /* + * SET1~3 DVS GPIOs control Buck1, 2, and 5 simultaneously. Therefore, + * With buckx_gpiodvs enabled, the buckx cannot be controlled + * independently. To control buckx (of 1, 2, and 5) independently, + * disable buckx_gpiodvs and control with BUCKxDVS1 register. + * + * When buckx_gpiodvs and bucky_gpiodvs are both enabled, set_voltage + * on buckx will change the voltage of bucky at the same time. + * + */ + bool ignore_gpiodvs_side_effect; + int buck125_gpios[3]; /* GPIO of [0]SET1, [1]SET2, [2]SET3 */ + int buck125_default_idx; /* Default value of SET1, 2, 3 */ + unsigned int buck1_voltage[8]; /* buckx_voltage in uV */ + bool buck1_gpiodvs; + unsigned int buck2_voltage[8]; + bool buck2_gpiodvs; + unsigned int buck5_voltage[8]; + bool buck5_gpiodvs; + + /* ---- Charger control ---- */ + /* eoc stands for 'end of charge' */ + int eoc_mA; /* 50 ~ 200mA by 10mA step */ + /* charge Full Timeout */ + int timeout; /* 0 (no timeout), 5, 6, 7 hours */ + + /* ---- MUIC ---- */ + struct max8997_muic_platform_data *muic_pdata; + + /* ---- HAPTIC ---- */ + struct max8997_haptic_platform_data *haptic_pdata; + + /* RTC: Not implemented */ + /* ---- LED ---- */ + struct max8997_led_platform_data *led_pdata; +}; + +#endif /* __LINUX_MFD_MAX8998_H */ diff --git a/include/linux/mfd/max8998-private.h b/include/linux/mfd/max8998-private.h new file mode 100644 index 000000000..d68ada502 --- /dev/null +++ b/include/linux/mfd/max8998-private.h @@ -0,0 +1,182 @@ +/* + * max8998-private.h - Voltage regulator driver for the Maxim 8998 + * + * Copyright (C) 2009-2010 Samsung Electrnoics + * Kyungmin Park + * Marek Szyprowski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_MFD_MAX8998_PRIV_H +#define __LINUX_MFD_MAX8998_PRIV_H + +#define MAX8998_NUM_IRQ_REGS 4 + +/* MAX 8998 registers */ +enum { + MAX8998_REG_IRQ1, + MAX8998_REG_IRQ2, + MAX8998_REG_IRQ3, + MAX8998_REG_IRQ4, + MAX8998_REG_IRQM1, + MAX8998_REG_IRQM2, + MAX8998_REG_IRQM3, + MAX8998_REG_IRQM4, + MAX8998_REG_STATUS1, + MAX8998_REG_STATUS2, + MAX8998_REG_STATUSM1, + MAX8998_REG_STATUSM2, + MAX8998_REG_CHGR1, + MAX8998_REG_CHGR2, + MAX8998_REG_LDO_ACTIVE_DISCHARGE1, + MAX8998_REG_LDO_ACTIVE_DISCHARGE2, + MAX8998_REG_BUCK_ACTIVE_DISCHARGE3, + MAX8998_REG_ONOFF1, + MAX8998_REG_ONOFF2, + MAX8998_REG_ONOFF3, + MAX8998_REG_ONOFF4, + MAX8998_REG_BUCK1_VOLTAGE1, + MAX8998_REG_BUCK1_VOLTAGE2, + MAX8998_REG_BUCK1_VOLTAGE3, + MAX8998_REG_BUCK1_VOLTAGE4, + MAX8998_REG_BUCK2_VOLTAGE1, + MAX8998_REG_BUCK2_VOLTAGE2, + MAX8998_REG_BUCK3, + MAX8998_REG_BUCK4, + MAX8998_REG_LDO2_LDO3, + MAX8998_REG_LDO4, + MAX8998_REG_LDO5, + MAX8998_REG_LDO6, + MAX8998_REG_LDO7, + MAX8998_REG_LDO8_LDO9, + MAX8998_REG_LDO10_LDO11, + MAX8998_REG_LDO12, + MAX8998_REG_LDO13, + MAX8998_REG_LDO14, + MAX8998_REG_LDO15, + MAX8998_REG_LDO16, + MAX8998_REG_LDO17, + MAX8998_REG_BKCHR, + MAX8998_REG_LBCNFG1, + MAX8998_REG_LBCNFG2, +}; + +/* IRQ definitions */ +enum { + MAX8998_IRQ_DCINF, + MAX8998_IRQ_DCINR, + MAX8998_IRQ_JIGF, + MAX8998_IRQ_JIGR, + MAX8998_IRQ_PWRONF, + MAX8998_IRQ_PWRONR, + + MAX8998_IRQ_WTSREVNT, + MAX8998_IRQ_SMPLEVNT, + MAX8998_IRQ_ALARM1, + MAX8998_IRQ_ALARM0, + + MAX8998_IRQ_ONKEY1S, + MAX8998_IRQ_TOPOFFR, + MAX8998_IRQ_DCINOVPR, + MAX8998_IRQ_CHGRSTF, + MAX8998_IRQ_DONER, + MAX8998_IRQ_CHGFAULT, + + MAX8998_IRQ_LOBAT1, + MAX8998_IRQ_LOBAT2, + + MAX8998_IRQ_NR, +}; + +/* MAX8998 various variants */ +enum { + TYPE_MAX8998 = 0, /* Default */ + TYPE_LP3974, /* National version of MAX8998 */ + TYPE_LP3979, /* Added AVS */ +}; + +#define MAX8998_IRQ_DCINF_MASK (1 << 2) +#define MAX8998_IRQ_DCINR_MASK (1 << 3) +#define MAX8998_IRQ_JIGF_MASK (1 << 4) +#define MAX8998_IRQ_JIGR_MASK (1 << 5) +#define MAX8998_IRQ_PWRONF_MASK (1 << 6) +#define MAX8998_IRQ_PWRONR_MASK (1 << 7) + +#define MAX8998_IRQ_WTSREVNT_MASK (1 << 0) +#define MAX8998_IRQ_SMPLEVNT_MASK (1 << 1) +#define MAX8998_IRQ_ALARM1_MASK (1 << 2) +#define MAX8998_IRQ_ALARM0_MASK (1 << 3) + +#define MAX8998_IRQ_ONKEY1S_MASK (1 << 0) +#define MAX8998_IRQ_TOPOFFR_MASK (1 << 2) +#define MAX8998_IRQ_DCINOVPR_MASK (1 << 3) +#define MAX8998_IRQ_CHGRSTF_MASK (1 << 4) +#define MAX8998_IRQ_DONER_MASK (1 << 5) +#define MAX8998_IRQ_CHGFAULT_MASK (1 << 7) + +#define MAX8998_IRQ_LOBAT1_MASK (1 << 0) +#define MAX8998_IRQ_LOBAT2_MASK (1 << 1) + +#define MAX8998_ENRAMP (1 << 4) + +struct irq_domain; + +/** + * struct max8998_dev - max8998 master device for sub-drivers + * @dev: master device of the chip (can be used to access platform data) + * @pdata: platform data for the driver and subdrivers + * @i2c: i2c client private data for regulator + * @rtc: i2c client private data for rtc + * @iolock: mutex for serializing io access + * @irqlock: mutex for buslock + * @irq_base: base IRQ number for max8998, required for IRQs + * @irq: generic IRQ number for max8998 + * @ono: power onoff IRQ number for max8998 + * @irq_masks_cur: currently active value + * @irq_masks_cache: cached hardware value + * @type: indicate which max8998 "variant" is used + */ +struct max8998_dev { + struct device *dev; + struct max8998_platform_data *pdata; + struct i2c_client *i2c; + struct i2c_client *rtc; + struct mutex iolock; + struct mutex irqlock; + + unsigned int irq_base; + struct irq_domain *irq_domain; + int irq; + int ono; + u8 irq_masks_cur[MAX8998_NUM_IRQ_REGS]; + u8 irq_masks_cache[MAX8998_NUM_IRQ_REGS]; + unsigned long type; + bool wakeup; +}; + +int max8998_irq_init(struct max8998_dev *max8998); +void max8998_irq_exit(struct max8998_dev *max8998); +int max8998_irq_resume(struct max8998_dev *max8998); + +extern int max8998_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest); +extern int max8998_bulk_read(struct i2c_client *i2c, u8 reg, int count, + u8 *buf); +extern int max8998_write_reg(struct i2c_client *i2c, u8 reg, u8 value); +extern int max8998_bulk_write(struct i2c_client *i2c, u8 reg, int count, + u8 *buf); +extern int max8998_update_reg(struct i2c_client *i2c, u8 reg, u8 val, u8 mask); + +#endif /* __LINUX_MFD_MAX8998_PRIV_H */ diff --git a/include/linux/mfd/max8998.h b/include/linux/mfd/max8998.h new file mode 100644 index 000000000..e3956a654 --- /dev/null +++ b/include/linux/mfd/max8998.h @@ -0,0 +1,118 @@ +/* + * max8998.h - Voltage regulator driver for the Maxim 8998 + * + * Copyright (C) 2009-2010 Samsung Electrnoics + * Kyungmin Park + * Marek Szyprowski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_MFD_MAX8998_H +#define __LINUX_MFD_MAX8998_H + +#include + +/* MAX 8998 regulator ids */ +enum { + MAX8998_LDO2 = 2, + MAX8998_LDO3, + MAX8998_LDO4, + MAX8998_LDO5, + MAX8998_LDO6, + MAX8998_LDO7, + MAX8998_LDO8, + MAX8998_LDO9, + MAX8998_LDO10, + MAX8998_LDO11, + MAX8998_LDO12, + MAX8998_LDO13, + MAX8998_LDO14, + MAX8998_LDO15, + MAX8998_LDO16, + MAX8998_LDO17, + MAX8998_BUCK1, + MAX8998_BUCK2, + MAX8998_BUCK3, + MAX8998_BUCK4, + MAX8998_EN32KHZ_AP, + MAX8998_EN32KHZ_CP, + MAX8998_ENVICHG, + MAX8998_ESAFEOUT1, + MAX8998_ESAFEOUT2, +}; + +/** + * max8998_regulator_data - regulator data + * @id: regulator id + * @initdata: regulator init data (contraints, supplies, ...) + * @reg_node: DT node of regulator (unused on non-DT platforms) + */ +struct max8998_regulator_data { + int id; + struct regulator_init_data *initdata; + struct device_node *reg_node; +}; + +/** + * struct max8998_board - packages regulator init data + * @regulators: array of defined regulators + * @num_regulators: number of regulators used + * @irq_base: base IRQ number for max8998, required for IRQs + * @ono: power onoff IRQ number for max8998 + * @buck_voltage_lock: Do NOT change the values of the following six + * registers set by buck?_voltage?. The voltage of BUCK1/2 cannot + * be other than the preset values. + * @buck1_voltage: BUCK1 DVS mode 1 voltage registers + * @buck2_voltage: BUCK2 DVS mode 2 voltage registers + * @buck1_set1: BUCK1 gpio pin 1 to set output voltage + * @buck1_set2: BUCK1 gpio pin 2 to set output voltage + * @buck1_default_idx: Default for BUCK1 gpio pin 1, 2 + * @buck2_set3: BUCK2 gpio pin to set output voltage + * @buck2_default_idx: Default for BUCK2 gpio pin. + * @wakeup: Allow to wake up from suspend + * @rtc_delay: LP3974 RTC chip bug that requires delay after a register + * write before reading it. + * @eoc: End of Charge Level in percent: 10% ~ 45% by 5% step + * If it equals 0, leave it unchanged. + * Otherwise, it is a invalid value. + * @restart: Restart Level in mV: 100, 150, 200, and -1 for disable. + * If it equals 0, leave it unchanged. + * Otherwise, it is a invalid value. + * @timeout: Full Timeout in hours: 5, 6, 7, and -1 for disable. + * If it equals 0, leave it unchanged. + * Otherwise, leave it unchanged. + */ +struct max8998_platform_data { + struct max8998_regulator_data *regulators; + int num_regulators; + unsigned int irq_base; + int ono; + bool buck_voltage_lock; + int buck1_voltage[4]; + int buck2_voltage[2]; + int buck1_set1; + int buck1_set2; + int buck1_default_idx; + int buck2_set3; + int buck2_default_idx; + bool wakeup; + bool rtc_delay; + int eoc; + int restart; + int timeout; +}; + +#endif /* __LINUX_MFD_MAX8998_H */ diff --git a/include/linux/mfd/mc13783.h b/include/linux/mfd/mc13783.h new file mode 100644 index 000000000..4ff6137d8 --- /dev/null +++ b/include/linux/mfd/mc13783.h @@ -0,0 +1,90 @@ +/* + * Copyright 2010 Yong Shen + * Copyright 2009-2010 Pengutronix + * Uwe Kleine-Koenig + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. + */ +#ifndef __LINUX_MFD_MC13783_H +#define __LINUX_MFD_MC13783_H + +#include + +#define MC13783_REG_SW1A 0 +#define MC13783_REG_SW1B 1 +#define MC13783_REG_SW2A 2 +#define MC13783_REG_SW2B 3 +#define MC13783_REG_SW3 4 +#define MC13783_REG_PLL 5 +#define MC13783_REG_VAUDIO 6 +#define MC13783_REG_VIOHI 7 +#define MC13783_REG_VIOLO 8 +#define MC13783_REG_VDIG 9 +#define MC13783_REG_VGEN 10 +#define MC13783_REG_VRFDIG 11 +#define MC13783_REG_VRFREF 12 +#define MC13783_REG_VRFCP 13 +#define MC13783_REG_VSIM 14 +#define MC13783_REG_VESIM 15 +#define MC13783_REG_VCAM 16 +#define MC13783_REG_VRFBG 17 +#define MC13783_REG_VVIB 18 +#define MC13783_REG_VRF1 19 +#define MC13783_REG_VRF2 20 +#define MC13783_REG_VMMC1 21 +#define MC13783_REG_VMMC2 22 +#define MC13783_REG_GPO1 23 +#define MC13783_REG_GPO2 24 +#define MC13783_REG_GPO3 25 +#define MC13783_REG_GPO4 26 +#define MC13783_REG_V1 27 +#define MC13783_REG_V2 28 +#define MC13783_REG_V3 29 +#define MC13783_REG_V4 30 +#define MC13783_REG_PWGT1SPI 31 +#define MC13783_REG_PWGT2SPI 32 + +#define MC13783_IRQ_ADCDONE MC13XXX_IRQ_ADCDONE +#define MC13783_IRQ_ADCBISDONE MC13XXX_IRQ_ADCBISDONE +#define MC13783_IRQ_TS MC13XXX_IRQ_TS +#define MC13783_IRQ_WHIGH 3 +#define MC13783_IRQ_WLOW 4 +#define MC13783_IRQ_CHGDET MC13XXX_IRQ_CHGDET +#define MC13783_IRQ_CHGOV 7 +#define MC13783_IRQ_CHGREV MC13XXX_IRQ_CHGREV +#define MC13783_IRQ_CHGSHORT MC13XXX_IRQ_CHGSHORT +#define MC13783_IRQ_CCCV MC13XXX_IRQ_CCCV +#define MC13783_IRQ_CHGCURR MC13XXX_IRQ_CHGCURR +#define MC13783_IRQ_BPON MC13XXX_IRQ_BPON +#define MC13783_IRQ_LOBATL MC13XXX_IRQ_LOBATL +#define MC13783_IRQ_LOBATH MC13XXX_IRQ_LOBATH +#define MC13783_IRQ_UDP 15 +#define MC13783_IRQ_USB 16 +#define MC13783_IRQ_ID 19 +#define MC13783_IRQ_SE1 21 +#define MC13783_IRQ_CKDET 22 +#define MC13783_IRQ_UDM 23 +#define MC13783_IRQ_1HZ MC13XXX_IRQ_1HZ +#define MC13783_IRQ_TODA MC13XXX_IRQ_TODA +#define MC13783_IRQ_ONOFD1 27 +#define MC13783_IRQ_ONOFD2 28 +#define MC13783_IRQ_ONOFD3 29 +#define MC13783_IRQ_SYSRST MC13XXX_IRQ_SYSRST +#define MC13783_IRQ_RTCRST MC13XXX_IRQ_RTCRST +#define MC13783_IRQ_PC MC13XXX_IRQ_PC +#define MC13783_IRQ_WARM MC13XXX_IRQ_WARM +#define MC13783_IRQ_MEMHLD MC13XXX_IRQ_MEMHLD +#define MC13783_IRQ_PWRRDY 35 +#define MC13783_IRQ_THWARNL MC13XXX_IRQ_THWARNL +#define MC13783_IRQ_THWARNH MC13XXX_IRQ_THWARNH +#define MC13783_IRQ_CLK MC13XXX_IRQ_CLK +#define MC13783_IRQ_SEMAF 39 +#define MC13783_IRQ_MC2B 41 +#define MC13783_IRQ_HSDET 42 +#define MC13783_IRQ_HSL 43 +#define MC13783_IRQ_ALSPTH 44 +#define MC13783_IRQ_AHSSHORT 45 + +#endif /* ifndef __LINUX_MFD_MC13783_H */ diff --git a/include/linux/mfd/mc13892.h b/include/linux/mfd/mc13892.h new file mode 100644 index 000000000..a00f2bec1 --- /dev/null +++ b/include/linux/mfd/mc13892.h @@ -0,0 +1,39 @@ +/* + * Copyright 2010 Yong Shen + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. + */ + +#ifndef __LINUX_MFD_MC13892_H +#define __LINUX_MFD_MC13892_H + +#include + +#define MC13892_SW1 0 +#define MC13892_SW2 1 +#define MC13892_SW3 2 +#define MC13892_SW4 3 +#define MC13892_SWBST 4 +#define MC13892_VIOHI 5 +#define MC13892_VPLL 6 +#define MC13892_VDIG 7 +#define MC13892_VSD 8 +#define MC13892_VUSB2 9 +#define MC13892_VVIDEO 10 +#define MC13892_VAUDIO 11 +#define MC13892_VCAM 12 +#define MC13892_VGEN1 13 +#define MC13892_VGEN2 14 +#define MC13892_VGEN3 15 +#define MC13892_VUSB 16 +#define MC13892_GPO1 17 +#define MC13892_GPO2 18 +#define MC13892_GPO3 19 +#define MC13892_GPO4 20 +#define MC13892_PWGT1SPI 21 +#define MC13892_PWGT2SPI 22 +#define MC13892_VCOINCELL 23 + +#endif diff --git a/include/linux/mfd/mc13xxx.h b/include/linux/mfd/mc13xxx.h new file mode 100644 index 000000000..2ad9bdc0a --- /dev/null +++ b/include/linux/mfd/mc13xxx.h @@ -0,0 +1,265 @@ +/* + * Copyright 2009-2010 Pengutronix + * Uwe Kleine-Koenig + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. + */ +#ifndef __LINUX_MFD_MC13XXX_H +#define __LINUX_MFD_MC13XXX_H + +#include + +struct mc13xxx; + +void mc13xxx_lock(struct mc13xxx *mc13xxx); +void mc13xxx_unlock(struct mc13xxx *mc13xxx); + +int mc13xxx_reg_read(struct mc13xxx *mc13xxx, unsigned int offset, u32 *val); +int mc13xxx_reg_write(struct mc13xxx *mc13xxx, unsigned int offset, u32 val); +int mc13xxx_reg_rmw(struct mc13xxx *mc13xxx, unsigned int offset, + u32 mask, u32 val); + +int mc13xxx_irq_request(struct mc13xxx *mc13xxx, int irq, + irq_handler_t handler, const char *name, void *dev); +int mc13xxx_irq_free(struct mc13xxx *mc13xxx, int irq, void *dev); + +int mc13xxx_irq_status(struct mc13xxx *mc13xxx, int irq, + int *enabled, int *pending); + +int mc13xxx_get_flags(struct mc13xxx *mc13xxx); + +int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, + unsigned int mode, unsigned int channel, + u8 ato, bool atox, unsigned int *sample); + +/* Deprecated calls */ +static inline int mc13xxx_irq_ack(struct mc13xxx *mc13xxx, int irq) +{ + return 0; +} + +static inline int mc13xxx_irq_request_nounmask(struct mc13xxx *mc13xxx, int irq, + irq_handler_t handler, + const char *name, void *dev) +{ + return mc13xxx_irq_request(mc13xxx, irq, handler, name, dev); +} + +int mc13xxx_irq_mask(struct mc13xxx *mc13xxx, int irq); +int mc13xxx_irq_unmask(struct mc13xxx *mc13xxx, int irq); + +#define MC13783_AUDIO_RX0 36 +#define MC13783_AUDIO_RX1 37 +#define MC13783_AUDIO_TX 38 +#define MC13783_SSI_NETWORK 39 +#define MC13783_AUDIO_CODEC 40 +#define MC13783_AUDIO_DAC 41 + +#define MC13XXX_IRQ_ADCDONE 0 +#define MC13XXX_IRQ_ADCBISDONE 1 +#define MC13XXX_IRQ_TS 2 +#define MC13XXX_IRQ_CHGDET 6 +#define MC13XXX_IRQ_CHGREV 8 +#define MC13XXX_IRQ_CHGSHORT 9 +#define MC13XXX_IRQ_CCCV 10 +#define MC13XXX_IRQ_CHGCURR 11 +#define MC13XXX_IRQ_BPON 12 +#define MC13XXX_IRQ_LOBATL 13 +#define MC13XXX_IRQ_LOBATH 14 +#define MC13XXX_IRQ_1HZ 24 +#define MC13XXX_IRQ_TODA 25 +#define MC13XXX_IRQ_SYSRST 30 +#define MC13XXX_IRQ_RTCRST 31 +#define MC13XXX_IRQ_PC 32 +#define MC13XXX_IRQ_WARM 33 +#define MC13XXX_IRQ_MEMHLD 34 +#define MC13XXX_IRQ_THWARNL 36 +#define MC13XXX_IRQ_THWARNH 37 +#define MC13XXX_IRQ_CLK 38 + +struct regulator_init_data; + +struct mc13xxx_regulator_init_data { + int id; + struct regulator_init_data *init_data; + struct device_node *node; +}; + +struct mc13xxx_regulator_platform_data { + int num_regulators; + struct mc13xxx_regulator_init_data *regulators; +}; + +enum { + /* MC13783 LED IDs */ + MC13783_LED_MD, + MC13783_LED_AD, + MC13783_LED_KP, + MC13783_LED_R1, + MC13783_LED_G1, + MC13783_LED_B1, + MC13783_LED_R2, + MC13783_LED_G2, + MC13783_LED_B2, + MC13783_LED_R3, + MC13783_LED_G3, + MC13783_LED_B3, + /* MC13892 LED IDs */ + MC13892_LED_MD, + MC13892_LED_AD, + MC13892_LED_KP, + MC13892_LED_R, + MC13892_LED_G, + MC13892_LED_B, + /* MC34708 LED IDs */ + MC34708_LED_R, + MC34708_LED_G, +}; + +struct mc13xxx_led_platform_data { + int id; + const char *name; + const char *default_trigger; +}; + +#define MAX_LED_CONTROL_REGS 6 + +/* MC13783 LED Control 0 */ +#define MC13783_LED_C0_ENABLE (1 << 0) +#define MC13783_LED_C0_TRIODE_MD (1 << 7) +#define MC13783_LED_C0_TRIODE_AD (1 << 8) +#define MC13783_LED_C0_TRIODE_KP (1 << 9) +#define MC13783_LED_C0_BOOST (1 << 10) +#define MC13783_LED_C0_ABMODE(x) (((x) & 0x7) << 11) +#define MC13783_LED_C0_ABREF(x) (((x) & 0x3) << 14) +/* MC13783 LED Control 1 */ +#define MC13783_LED_C1_TC1HALF (1 << 18) +#define MC13783_LED_C1_SLEWLIM (1 << 23) +/* MC13783 LED Control 2 */ +#define MC13783_LED_C2_CURRENT_MD(x) (((x) & 0x7) << 0) +#define MC13783_LED_C2_CURRENT_AD(x) (((x) & 0x7) << 3) +#define MC13783_LED_C2_CURRENT_KP(x) (((x) & 0x7) << 6) +#define MC13783_LED_C2_PERIOD(x) (((x) & 0x3) << 21) +#define MC13783_LED_C2_SLEWLIM (1 << 23) +/* MC13783 LED Control 3 */ +#define MC13783_LED_C3_CURRENT_R1(x) (((x) & 0x3) << 0) +#define MC13783_LED_C3_CURRENT_G1(x) (((x) & 0x3) << 2) +#define MC13783_LED_C3_CURRENT_B1(x) (((x) & 0x3) << 4) +#define MC13783_LED_C3_PERIOD(x) (((x) & 0x3) << 21) +#define MC13783_LED_C3_TRIODE_TC1 (1 << 23) +/* MC13783 LED Control 4 */ +#define MC13783_LED_C4_CURRENT_R2(x) (((x) & 0x3) << 0) +#define MC13783_LED_C4_CURRENT_G2(x) (((x) & 0x3) << 2) +#define MC13783_LED_C4_CURRENT_B2(x) (((x) & 0x3) << 4) +#define MC13783_LED_C4_PERIOD(x) (((x) & 0x3) << 21) +#define MC13783_LED_C4_TRIODE_TC2 (1 << 23) +/* MC13783 LED Control 5 */ +#define MC13783_LED_C5_CURRENT_R3(x) (((x) & 0x3) << 0) +#define MC13783_LED_C5_CURRENT_G3(x) (((x) & 0x3) << 2) +#define MC13783_LED_C5_CURRENT_B3(x) (((x) & 0x3) << 4) +#define MC13783_LED_C5_PERIOD(x) (((x) & 0x3) << 21) +#define MC13783_LED_C5_TRIODE_TC3 (1 << 23) +/* MC13892 LED Control 0 */ +#define MC13892_LED_C0_CURRENT_MD(x) (((x) & 0x7) << 9) +#define MC13892_LED_C0_CURRENT_AD(x) (((x) & 0x7) << 21) +/* MC13892 LED Control 1 */ +#define MC13892_LED_C1_CURRENT_KP(x) (((x) & 0x7) << 9) +/* MC13892 LED Control 2 */ +#define MC13892_LED_C2_CURRENT_R(x) (((x) & 0x7) << 9) +#define MC13892_LED_C2_CURRENT_G(x) (((x) & 0x7) << 21) +/* MC13892 LED Control 3 */ +#define MC13892_LED_C3_CURRENT_B(x) (((x) & 0x7) << 9) +/* MC34708 LED Control 0 */ +#define MC34708_LED_C0_CURRENT_R(x) (((x) & 0x3) << 9) +#define MC34708_LED_C0_CURRENT_G(x) (((x) & 0x3) << 21) + +struct mc13xxx_leds_platform_data { + struct mc13xxx_led_platform_data *led; + int num_leds; + u32 led_control[MAX_LED_CONTROL_REGS]; +}; + +#define MC13783_BUTTON_DBNC_0MS 0 +#define MC13783_BUTTON_DBNC_30MS 1 +#define MC13783_BUTTON_DBNC_150MS 2 +#define MC13783_BUTTON_DBNC_750MS 3 +#define MC13783_BUTTON_ENABLE (1 << 2) +#define MC13783_BUTTON_POL_INVERT (1 << 3) +#define MC13783_BUTTON_RESET_EN (1 << 4) + +struct mc13xxx_buttons_platform_data { + int b1on_flags; + unsigned short b1on_key; + int b2on_flags; + unsigned short b2on_key; + int b3on_flags; + unsigned short b3on_key; +}; + +#define MC13783_TS_ATO_FIRST false +#define MC13783_TS_ATO_EACH true + +struct mc13xxx_ts_platform_data { + /* Delay between Touchscreen polarization and ADC Conversion. + * Given in clock ticks of a 32 kHz clock which gives a granularity of + * about 30.5ms */ + u8 ato; + /* Use the ATO delay only for the first conversion or for each one */ + bool atox; +}; + +enum mc13783_ssi_port { + MC13783_SSI1_PORT, + MC13783_SSI2_PORT, +}; + +struct mc13xxx_codec_platform_data { + enum mc13783_ssi_port adc_ssi_port; + enum mc13783_ssi_port dac_ssi_port; +}; + +#define MC13XXX_USE_TOUCHSCREEN (1 << 0) +#define MC13XXX_USE_CODEC (1 << 1) +#define MC13XXX_USE_ADC (1 << 2) +#define MC13XXX_USE_RTC (1 << 3) + +struct mc13xxx_platform_data { + unsigned int flags; + + struct mc13xxx_regulator_platform_data regulators; + struct mc13xxx_leds_platform_data *leds; + struct mc13xxx_buttons_platform_data *buttons; + struct mc13xxx_ts_platform_data touch; + struct mc13xxx_codec_platform_data *codec; +}; + +#define MC13XXX_ADC_MODE_TS 1 +#define MC13XXX_ADC_MODE_SINGLE_CHAN 2 +#define MC13XXX_ADC_MODE_MULT_CHAN 3 + +#define MC13XXX_ADC0 43 +#define MC13XXX_ADC0_LICELLCON (1 << 0) +#define MC13XXX_ADC0_CHRGICON (1 << 1) +#define MC13XXX_ADC0_BATICON (1 << 2) +#define MC13XXX_ADC0_ADIN7SEL_DIE (1 << 4) +#define MC13XXX_ADC0_ADIN7SEL_UID (2 << 4) +#define MC13XXX_ADC0_ADREFEN (1 << 10) +#define MC13XXX_ADC0_TSMOD0 (1 << 12) +#define MC13XXX_ADC0_TSMOD1 (1 << 13) +#define MC13XXX_ADC0_TSMOD2 (1 << 14) +#define MC13XXX_ADC0_CHRGRAWDIV (1 << 15) +#define MC13XXX_ADC0_ADINC1 (1 << 16) +#define MC13XXX_ADC0_ADINC2 (1 << 17) + +#define MC13XXX_ADC0_TSMOD_MASK (MC13XXX_ADC0_TSMOD0 | \ + MC13XXX_ADC0_TSMOD1 | \ + MC13XXX_ADC0_TSMOD2) + +#define MC13XXX_ADC0_CONFIG_MASK (MC13XXX_ADC0_TSMOD_MASK | \ + MC13XXX_ADC0_LICELLCON | \ + MC13XXX_ADC0_CHRGICON | \ + MC13XXX_ADC0_BATICON) + +#endif /* ifndef __LINUX_MFD_MC13XXX_H */ diff --git a/include/linux/mfd/mcp.h b/include/linux/mfd/mcp.h new file mode 100644 index 000000000..f68295304 --- /dev/null +++ b/include/linux/mfd/mcp.h @@ -0,0 +1,66 @@ +/* + * linux/drivers/mfd/mcp.h + * + * Copyright (C) 2001 Russell King, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + */ +#ifndef MCP_H +#define MCP_H + +#include + +struct mcp_ops; + +struct mcp { + struct module *owner; + struct mcp_ops *ops; + spinlock_t lock; + int use_count; + unsigned int sclk_rate; + unsigned int rw_timeout; + struct device attached_device; +}; + +struct mcp_ops { + void (*set_telecom_divisor)(struct mcp *, unsigned int); + void (*set_audio_divisor)(struct mcp *, unsigned int); + void (*reg_write)(struct mcp *, unsigned int, unsigned int); + unsigned int (*reg_read)(struct mcp *, unsigned int); + void (*enable)(struct mcp *); + void (*disable)(struct mcp *); +}; + +void mcp_set_telecom_divisor(struct mcp *, unsigned int); +void mcp_set_audio_divisor(struct mcp *, unsigned int); +void mcp_reg_write(struct mcp *, unsigned int, unsigned int); +unsigned int mcp_reg_read(struct mcp *, unsigned int); +void mcp_enable(struct mcp *); +void mcp_disable(struct mcp *); +#define mcp_get_sclk_rate(mcp) ((mcp)->sclk_rate) + +struct mcp *mcp_host_alloc(struct device *, size_t); +int mcp_host_add(struct mcp *, void *); +void mcp_host_del(struct mcp *); +void mcp_host_free(struct mcp *); + +struct mcp_driver { + struct device_driver drv; + int (*probe)(struct mcp *); + void (*remove)(struct mcp *); +}; + +int mcp_driver_register(struct mcp_driver *); +void mcp_driver_unregister(struct mcp_driver *); + +#define mcp_get_drvdata(mcp) dev_get_drvdata(&(mcp)->attached_device) +#define mcp_set_drvdata(mcp,d) dev_set_drvdata(&(mcp)->attached_device, d) + +static inline void *mcp_priv(struct mcp *mcp) +{ + return mcp + 1; +} + +#endif diff --git a/include/linux/mfd/menelaus.h b/include/linux/mfd/menelaus.h new file mode 100644 index 000000000..ce489aba8 --- /dev/null +++ b/include/linux/mfd/menelaus.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Functions to access Menelaus power management chip + */ + +#ifndef __ASM_ARCH_MENELAUS_H +#define __ASM_ARCH_MENELAUS_H + +struct device; + +struct menelaus_platform_data { + int (* late_init)(struct device *dev); +}; + +extern int menelaus_register_mmc_callback(void (*callback)(void *data, u8 card_mask), + void *data); +extern void menelaus_unregister_mmc_callback(void); +extern int menelaus_set_mmc_opendrain(int slot, int enable); +extern int menelaus_set_mmc_slot(int slot, int enable, int power, int cd_on); + +extern int menelaus_set_vmem(unsigned int mV); +extern int menelaus_set_vio(unsigned int mV); +extern int menelaus_set_vmmc(unsigned int mV); +extern int menelaus_set_vaux(unsigned int mV); +extern int menelaus_set_vdcdc(int dcdc, unsigned int mV); +extern int menelaus_set_slot_sel(int enable); +extern int menelaus_get_slot_pin_states(void); +extern int menelaus_set_vcore_hw(unsigned int roof_mV, unsigned int floor_mV); + +#define EN_VPLL_SLEEP (1 << 7) +#define EN_VMMC_SLEEP (1 << 6) +#define EN_VAUX_SLEEP (1 << 5) +#define EN_VIO_SLEEP (1 << 4) +#define EN_VMEM_SLEEP (1 << 3) +#define EN_DC3_SLEEP (1 << 2) +#define EN_DC2_SLEEP (1 << 1) +#define EN_VC_SLEEP (1 << 0) + +extern int menelaus_set_regulator_sleep(int enable, u32 val); + +#endif diff --git a/include/linux/mfd/motorola-cpcap.h b/include/linux/mfd/motorola-cpcap.h new file mode 100644 index 000000000..aefc49cb7 --- /dev/null +++ b/include/linux/mfd/motorola-cpcap.h @@ -0,0 +1,297 @@ +/* + * The register defines are based on earlier cpcap.h in Motorola Linux kernel + * tree. + * + * Copyright (C) 2007-2009 Motorola, Inc. + * + * Rewritten for the real register offsets instead of enumeration + * to make the defines usable with Linux kernel regmap support + * + * Copyright (C) 2016 Tony Lindgren + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include + +#define CPCAP_VENDOR_ST 0 +#define CPCAP_VENDOR_TI 1 + +#define CPCAP_REVISION_MAJOR(r) (((r) >> 4) + 1) +#define CPCAP_REVISION_MINOR(r) ((r) & 0xf) + +#define CPCAP_REVISION_1_0 0x08 +#define CPCAP_REVISION_1_1 0x09 +#define CPCAP_REVISION_2_0 0x10 +#define CPCAP_REVISION_2_1 0x11 + +/* CPCAP registers */ +#define CPCAP_REG_INT1 0x0000 /* Interrupt 1 */ +#define CPCAP_REG_INT2 0x0004 /* Interrupt 2 */ +#define CPCAP_REG_INT3 0x0008 /* Interrupt 3 */ +#define CPCAP_REG_INT4 0x000c /* Interrupt 4 */ +#define CPCAP_REG_INTM1 0x0010 /* Interrupt Mask 1 */ +#define CPCAP_REG_INTM2 0x0014 /* Interrupt Mask 2 */ +#define CPCAP_REG_INTM3 0x0018 /* Interrupt Mask 3 */ +#define CPCAP_REG_INTM4 0x001c /* Interrupt Mask 4 */ +#define CPCAP_REG_INTS1 0x0020 /* Interrupt Sense 1 */ +#define CPCAP_REG_INTS2 0x0024 /* Interrupt Sense 2 */ +#define CPCAP_REG_INTS3 0x0028 /* Interrupt Sense 3 */ +#define CPCAP_REG_INTS4 0x002c /* Interrupt Sense 4 */ +#define CPCAP_REG_ASSIGN1 0x0030 /* Resource Assignment 1 */ +#define CPCAP_REG_ASSIGN2 0x0034 /* Resource Assignment 2 */ +#define CPCAP_REG_ASSIGN3 0x0038 /* Resource Assignment 3 */ +#define CPCAP_REG_ASSIGN4 0x003c /* Resource Assignment 4 */ +#define CPCAP_REG_ASSIGN5 0x0040 /* Resource Assignment 5 */ +#define CPCAP_REG_ASSIGN6 0x0044 /* Resource Assignment 6 */ +#define CPCAP_REG_VERSC1 0x0048 /* Version Control 1 */ +#define CPCAP_REG_VERSC2 0x004c /* Version Control 2 */ + +#define CPCAP_REG_MI1 0x0200 /* Macro Interrupt 1 */ +#define CPCAP_REG_MIM1 0x0204 /* Macro Interrupt Mask 1 */ +#define CPCAP_REG_MI2 0x0208 /* Macro Interrupt 2 */ +#define CPCAP_REG_MIM2 0x020c /* Macro Interrupt Mask 2 */ +#define CPCAP_REG_UCC1 0x0210 /* UC Control 1 */ +#define CPCAP_REG_UCC2 0x0214 /* UC Control 2 */ + +#define CPCAP_REG_PC1 0x021c /* Power Cut 1 */ +#define CPCAP_REG_PC2 0x0220 /* Power Cut 2 */ +#define CPCAP_REG_BPEOL 0x0224 /* BP and EOL */ +#define CPCAP_REG_PGC 0x0228 /* Power Gate and Control */ +#define CPCAP_REG_MT1 0x022c /* Memory Transfer 1 */ +#define CPCAP_REG_MT2 0x0230 /* Memory Transfer 2 */ +#define CPCAP_REG_MT3 0x0234 /* Memory Transfer 3 */ +#define CPCAP_REG_PF 0x0238 /* Print Format */ + +#define CPCAP_REG_SCC 0x0400 /* System Clock Control */ +#define CPCAP_REG_SW1 0x0404 /* Stop Watch 1 */ +#define CPCAP_REG_SW2 0x0408 /* Stop Watch 2 */ +#define CPCAP_REG_UCTM 0x040c /* UC Turbo Mode */ +#define CPCAP_REG_TOD1 0x0410 /* Time of Day 1 */ +#define CPCAP_REG_TOD2 0x0414 /* Time of Day 2 */ +#define CPCAP_REG_TODA1 0x0418 /* Time of Day Alarm 1 */ +#define CPCAP_REG_TODA2 0x041c /* Time of Day Alarm 2 */ +#define CPCAP_REG_DAY 0x0420 /* Day */ +#define CPCAP_REG_DAYA 0x0424 /* Day Alarm */ +#define CPCAP_REG_VAL1 0x0428 /* Validity 1 */ +#define CPCAP_REG_VAL2 0x042c /* Validity 2 */ + +#define CPCAP_REG_SDVSPLL 0x0600 /* Switcher DVS and PLL */ +#define CPCAP_REG_SI2CC1 0x0604 /* Switcher I2C Control 1 */ +#define CPCAP_REG_Si2CC2 0x0608 /* Switcher I2C Control 2 */ +#define CPCAP_REG_S1C1 0x060c /* Switcher 1 Control 1 */ +#define CPCAP_REG_S1C2 0x0610 /* Switcher 1 Control 2 */ +#define CPCAP_REG_S2C1 0x0614 /* Switcher 2 Control 1 */ +#define CPCAP_REG_S2C2 0x0618 /* Switcher 2 Control 2 */ +#define CPCAP_REG_S3C 0x061c /* Switcher 3 Control */ +#define CPCAP_REG_S4C1 0x0620 /* Switcher 4 Control 1 */ +#define CPCAP_REG_S4C2 0x0624 /* Switcher 4 Control 2 */ +#define CPCAP_REG_S5C 0x0628 /* Switcher 5 Control */ +#define CPCAP_REG_S6C 0x062c /* Switcher 6 Control */ +#define CPCAP_REG_VCAMC 0x0630 /* VCAM Control */ +#define CPCAP_REG_VCSIC 0x0634 /* VCSI Control */ +#define CPCAP_REG_VDACC 0x0638 /* VDAC Control */ +#define CPCAP_REG_VDIGC 0x063c /* VDIG Control */ +#define CPCAP_REG_VFUSEC 0x0640 /* VFUSE Control */ +#define CPCAP_REG_VHVIOC 0x0644 /* VHVIO Control */ +#define CPCAP_REG_VSDIOC 0x0648 /* VSDIO Control */ +#define CPCAP_REG_VPLLC 0x064c /* VPLL Control */ +#define CPCAP_REG_VRF1C 0x0650 /* VRF1 Control */ +#define CPCAP_REG_VRF2C 0x0654 /* VRF2 Control */ +#define CPCAP_REG_VRFREFC 0x0658 /* VRFREF Control */ +#define CPCAP_REG_VWLAN1C 0x065c /* VWLAN1 Control */ +#define CPCAP_REG_VWLAN2C 0x0660 /* VWLAN2 Control */ +#define CPCAP_REG_VSIMC 0x0664 /* VSIM Control */ +#define CPCAP_REG_VVIBC 0x0668 /* VVIB Control */ +#define CPCAP_REG_VUSBC 0x066c /* VUSB Control */ +#define CPCAP_REG_VUSBINT1C 0x0670 /* VUSBINT1 Control */ +#define CPCAP_REG_VUSBINT2C 0x0674 /* VUSBINT2 Control */ +#define CPCAP_REG_URT 0x0678 /* Useroff Regulator Trigger */ +#define CPCAP_REG_URM1 0x067c /* Useroff Regulator Mask 1 */ +#define CPCAP_REG_URM2 0x0680 /* Useroff Regulator Mask 2 */ + +#define CPCAP_REG_VAUDIOC 0x0800 /* VAUDIO Control */ +#define CPCAP_REG_CC 0x0804 /* Codec Control */ +#define CPCAP_REG_CDI 0x0808 /* Codec Digital Interface */ +#define CPCAP_REG_SDAC 0x080c /* Stereo DAC */ +#define CPCAP_REG_SDACDI 0x0810 /* Stereo DAC Digital Interface */ +#define CPCAP_REG_TXI 0x0814 /* TX Inputs */ +#define CPCAP_REG_TXMP 0x0818 /* TX MIC PGA's */ +#define CPCAP_REG_RXOA 0x081c /* RX Output Amplifiers */ +#define CPCAP_REG_RXVC 0x0820 /* RX Volume Control */ +#define CPCAP_REG_RXCOA 0x0824 /* RX Codec to Output Amps */ +#define CPCAP_REG_RXSDOA 0x0828 /* RX Stereo DAC to Output Amps */ +#define CPCAP_REG_RXEPOA 0x082c /* RX External PGA to Output Amps */ +#define CPCAP_REG_RXLL 0x0830 /* RX Low Latency */ +#define CPCAP_REG_A2LA 0x0834 /* A2 Loudspeaker Amplifier */ +#define CPCAP_REG_MIPIS1 0x0838 /* MIPI Slimbus 1 */ +#define CPCAP_REG_MIPIS2 0x083c /* MIPI Slimbus 2 */ +#define CPCAP_REG_MIPIS3 0x0840 /* MIPI Slimbus 3. */ +#define CPCAP_REG_LVAB 0x0844 /* LMR Volume and A4 Balanced. */ + +#define CPCAP_REG_CCC1 0x0a00 /* Coulomb Counter Control 1 */ +#define CPCAP_REG_CRM 0x0a04 /* Charger and Reverse Mode */ +#define CPCAP_REG_CCCC2 0x0a08 /* Coincell and Coulomb Ctr Ctrl 2 */ +#define CPCAP_REG_CCS1 0x0a0c /* Coulomb Counter Sample 1 */ +#define CPCAP_REG_CCS2 0x0a10 /* Coulomb Counter Sample 2 */ +#define CPCAP_REG_CCA1 0x0a14 /* Coulomb Counter Accumulator 1 */ +#define CPCAP_REG_CCA2 0x0a18 /* Coulomb Counter Accumulator 2 */ +#define CPCAP_REG_CCM 0x0a1c /* Coulomb Counter Mode */ +#define CPCAP_REG_CCO 0x0a20 /* Coulomb Counter Offset */ +#define CPCAP_REG_CCI 0x0a24 /* Coulomb Counter Integrator */ + +#define CPCAP_REG_ADCC1 0x0c00 /* A/D Converter Configuration 1 */ +#define CPCAP_REG_ADCC2 0x0c04 /* A/D Converter Configuration 2 */ +#define CPCAP_REG_ADCD0 0x0c08 /* A/D Converter Data 0 */ +#define CPCAP_REG_ADCD1 0x0c0c /* A/D Converter Data 1 */ +#define CPCAP_REG_ADCD2 0x0c10 /* A/D Converter Data 2 */ +#define CPCAP_REG_ADCD3 0x0c14 /* A/D Converter Data 3 */ +#define CPCAP_REG_ADCD4 0x0c18 /* A/D Converter Data 4 */ +#define CPCAP_REG_ADCD5 0x0c1c /* A/D Converter Data 5 */ +#define CPCAP_REG_ADCD6 0x0c20 /* A/D Converter Data 6 */ +#define CPCAP_REG_ADCD7 0x0c24 /* A/D Converter Data 7 */ +#define CPCAP_REG_ADCAL1 0x0c28 /* A/D Converter Calibration 1 */ +#define CPCAP_REG_ADCAL2 0x0c2c /* A/D Converter Calibration 2 */ + +#define CPCAP_REG_USBC1 0x0e00 /* USB Control 1 */ +#define CPCAP_REG_USBC2 0x0e04 /* USB Control 2 */ +#define CPCAP_REG_USBC3 0x0e08 /* USB Control 3 */ +#define CPCAP_REG_UVIDL 0x0e0c /* ULPI Vendor ID Low */ +#define CPCAP_REG_UVIDH 0x0e10 /* ULPI Vendor ID High */ +#define CPCAP_REG_UPIDL 0x0e14 /* ULPI Product ID Low */ +#define CPCAP_REG_UPIDH 0x0e18 /* ULPI Product ID High */ +#define CPCAP_REG_UFC1 0x0e1c /* ULPI Function Control 1 */ +#define CPCAP_REG_UFC2 0x0e20 /* ULPI Function Control 2 */ +#define CPCAP_REG_UFC3 0x0e24 /* ULPI Function Control 3 */ +#define CPCAP_REG_UIC1 0x0e28 /* ULPI Interface Control 1 */ +#define CPCAP_REG_UIC2 0x0e2c /* ULPI Interface Control 2 */ +#define CPCAP_REG_UIC3 0x0e30 /* ULPI Interface Control 3 */ +#define CPCAP_REG_USBOTG1 0x0e34 /* USB OTG Control 1 */ +#define CPCAP_REG_USBOTG2 0x0e38 /* USB OTG Control 2 */ +#define CPCAP_REG_USBOTG3 0x0e3c /* USB OTG Control 3 */ +#define CPCAP_REG_UIER1 0x0e40 /* USB Interrupt Enable Rising 1 */ +#define CPCAP_REG_UIER2 0x0e44 /* USB Interrupt Enable Rising 2 */ +#define CPCAP_REG_UIER3 0x0e48 /* USB Interrupt Enable Rising 3 */ +#define CPCAP_REG_UIEF1 0x0e4c /* USB Interrupt Enable Falling 1 */ +#define CPCAP_REG_UIEF2 0x0e50 /* USB Interrupt Enable Falling 1 */ +#define CPCAP_REG_UIEF3 0x0e54 /* USB Interrupt Enable Falling 1 */ +#define CPCAP_REG_UIS 0x0e58 /* USB Interrupt Status */ +#define CPCAP_REG_UIL 0x0e5c /* USB Interrupt Latch */ +#define CPCAP_REG_USBD 0x0e60 /* USB Debug */ +#define CPCAP_REG_SCR1 0x0e64 /* Scratch 1 */ +#define CPCAP_REG_SCR2 0x0e68 /* Scratch 2 */ +#define CPCAP_REG_SCR3 0x0e6c /* Scratch 3 */ + +#define CPCAP_REG_VMC 0x0eac /* Video Mux Control */ +#define CPCAP_REG_OWDC 0x0eb0 /* One Wire Device Control */ +#define CPCAP_REG_GPIO0 0x0eb4 /* GPIO 0 Control */ + +#define CPCAP_REG_GPIO1 0x0ebc /* GPIO 1 Control */ + +#define CPCAP_REG_GPIO2 0x0ec4 /* GPIO 2 Control */ + +#define CPCAP_REG_GPIO3 0x0ecc /* GPIO 3 Control */ + +#define CPCAP_REG_GPIO4 0x0ed4 /* GPIO 4 Control */ + +#define CPCAP_REG_GPIO5 0x0edc /* GPIO 5 Control */ + +#define CPCAP_REG_GPIO6 0x0ee4 /* GPIO 6 Control */ + +#define CPCAP_REG_MDLC 0x1000 /* Main Display Lighting Control */ +#define CPCAP_REG_KLC 0x1004 /* Keypad Lighting Control */ +#define CPCAP_REG_ADLC 0x1008 /* Aux Display Lighting Control */ +#define CPCAP_REG_REDC 0x100c /* Red Triode Control */ +#define CPCAP_REG_GREENC 0x1010 /* Green Triode Control */ +#define CPCAP_REG_BLUEC 0x1014 /* Blue Triode Control */ +#define CPCAP_REG_CFC 0x1018 /* Camera Flash Control */ +#define CPCAP_REG_ABC 0x101c /* Adaptive Boost Control */ +#define CPCAP_REG_BLEDC 0x1020 /* Bluetooth LED Control */ +#define CPCAP_REG_CLEDC 0x1024 /* Camera Privacy LED Control */ + +#define CPCAP_REG_OW1C 0x1200 /* One Wire 1 Command */ +#define CPCAP_REG_OW1D 0x1204 /* One Wire 1 Data */ +#define CPCAP_REG_OW1I 0x1208 /* One Wire 1 Interrupt */ +#define CPCAP_REG_OW1IE 0x120c /* One Wire 1 Interrupt Enable */ + +#define CPCAP_REG_OW1 0x1214 /* One Wire 1 Control */ + +#define CPCAP_REG_OW2C 0x1220 /* One Wire 2 Command */ +#define CPCAP_REG_OW2D 0x1224 /* One Wire 2 Data */ +#define CPCAP_REG_OW2I 0x1228 /* One Wire 2 Interrupt */ +#define CPCAP_REG_OW2IE 0x122c /* One Wire 2 Interrupt Enable */ + +#define CPCAP_REG_OW2 0x1234 /* One Wire 2 Control */ + +#define CPCAP_REG_OW3C 0x1240 /* One Wire 3 Command */ +#define CPCAP_REG_OW3D 0x1244 /* One Wire 3 Data */ +#define CPCAP_REG_OW3I 0x1248 /* One Wire 3 Interrupt */ +#define CPCAP_REG_OW3IE 0x124c /* One Wire 3 Interrupt Enable */ + +#define CPCAP_REG_OW3 0x1254 /* One Wire 3 Control */ +#define CPCAP_REG_GCAIC 0x1258 /* GCAI Clock Control */ +#define CPCAP_REG_GCAIM 0x125c /* GCAI GPIO Mode */ +#define CPCAP_REG_LGDIR 0x1260 /* LMR GCAI GPIO Direction */ +#define CPCAP_REG_LGPU 0x1264 /* LMR GCAI GPIO Pull-up */ +#define CPCAP_REG_LGPIN 0x1268 /* LMR GCAI GPIO Pin */ +#define CPCAP_REG_LGMASK 0x126c /* LMR GCAI GPIO Mask */ +#define CPCAP_REG_LDEB 0x1270 /* LMR Debounce Settings */ +#define CPCAP_REG_LGDET 0x1274 /* LMR GCAI Detach Detect */ +#define CPCAP_REG_LMISC 0x1278 /* LMR Misc Bits */ +#define CPCAP_REG_LMACE 0x127c /* LMR Mace IC Support */ + +#define CPCAP_REG_TEST 0x7c00 /* Test */ + +#define CPCAP_REG_ST_TEST1 0x7d08 /* ST Test1 */ + +#define CPCAP_REG_ST_TEST2 0x7d18 /* ST Test2 */ + +/* + * Helpers for child devices to check the revision and vendor. + * + * REVISIT: No documentation for the bits below, please update + * to use proper names for defines when available. + */ + +static inline int cpcap_get_revision(struct device *dev, + struct regmap *regmap, + u16 *revision) +{ + unsigned int val; + int ret; + + ret = regmap_read(regmap, CPCAP_REG_VERSC1, &val); + if (ret) { + dev_err(dev, "Could not read revision\n"); + + return ret; + } + + *revision = ((val >> 3) & 0x7) | ((val << 3) & 0x38); + + return 0; +} + +static inline int cpcap_get_vendor(struct device *dev, + struct regmap *regmap, + u16 *vendor) +{ + unsigned int val; + int ret; + + ret = regmap_read(regmap, CPCAP_REG_VERSC1, &val); + if (ret) { + dev_err(dev, "Could not read vendor\n"); + + return ret; + } + + *vendor = (val >> 6) & 0x7; + + return 0; +} + +extern int cpcap_sense_virq(struct regmap *regmap, int virq); diff --git a/include/linux/mfd/mt6323/core.h b/include/linux/mfd/mt6323/core.h new file mode 100644 index 000000000..06d0ec3b1 --- /dev/null +++ b/include/linux/mfd/mt6323/core.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2016 Chen Zhong + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MFD_MT6323_CORE_H__ +#define __MFD_MT6323_CORE_H__ + +enum MT6323_IRQ_STATUS_numbers { + MT6323_IRQ_STATUS_SPKL_AB = 0, + MT6323_IRQ_STATUS_SPKL, + MT6323_IRQ_STATUS_BAT_L, + MT6323_IRQ_STATUS_BAT_H, + MT6323_IRQ_STATUS_WATCHDOG, + MT6323_IRQ_STATUS_PWRKEY, + MT6323_IRQ_STATUS_THR_L, + MT6323_IRQ_STATUS_THR_H, + MT6323_IRQ_STATUS_VBATON_UNDET, + MT6323_IRQ_STATUS_BVALID_DET, + MT6323_IRQ_STATUS_CHRDET, + MT6323_IRQ_STATUS_OV, + MT6323_IRQ_STATUS_LDO = 16, + MT6323_IRQ_STATUS_FCHRKEY, + MT6323_IRQ_STATUS_ACCDET, + MT6323_IRQ_STATUS_AUDIO, + MT6323_IRQ_STATUS_RTC, + MT6323_IRQ_STATUS_VPROC, + MT6323_IRQ_STATUS_VSYS, + MT6323_IRQ_STATUS_VPA, + MT6323_IRQ_STATUS_NR, +}; + +#endif /* __MFD_MT6323_CORE_H__ */ diff --git a/include/linux/mfd/mt6323/registers.h b/include/linux/mfd/mt6323/registers.h new file mode 100644 index 000000000..160f3c0e2 --- /dev/null +++ b/include/linux/mfd/mt6323/registers.h @@ -0,0 +1,408 @@ +/* + * Copyright (c) 2016 Chen Zhong + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MFD_MT6323_REGISTERS_H__ +#define __MFD_MT6323_REGISTERS_H__ + +/* PMIC Registers */ +#define MT6323_CHR_CON0 0x0000 +#define MT6323_CHR_CON1 0x0002 +#define MT6323_CHR_CON2 0x0004 +#define MT6323_CHR_CON3 0x0006 +#define MT6323_CHR_CON4 0x0008 +#define MT6323_CHR_CON5 0x000A +#define MT6323_CHR_CON6 0x000C +#define MT6323_CHR_CON7 0x000E +#define MT6323_CHR_CON8 0x0010 +#define MT6323_CHR_CON9 0x0012 +#define MT6323_CHR_CON10 0x0014 +#define MT6323_CHR_CON11 0x0016 +#define MT6323_CHR_CON12 0x0018 +#define MT6323_CHR_CON13 0x001A +#define MT6323_CHR_CON14 0x001C +#define MT6323_CHR_CON15 0x001E +#define MT6323_CHR_CON16 0x0020 +#define MT6323_CHR_CON17 0x0022 +#define MT6323_CHR_CON18 0x0024 +#define MT6323_CHR_CON19 0x0026 +#define MT6323_CHR_CON20 0x0028 +#define MT6323_CHR_CON21 0x002A +#define MT6323_CHR_CON22 0x002C +#define MT6323_CHR_CON23 0x002E +#define MT6323_CHR_CON24 0x0030 +#define MT6323_CHR_CON25 0x0032 +#define MT6323_CHR_CON26 0x0034 +#define MT6323_CHR_CON27 0x0036 +#define MT6323_CHR_CON28 0x0038 +#define MT6323_CHR_CON29 0x003A +#define MT6323_STRUP_CON0 0x003C +#define MT6323_STRUP_CON2 0x003E +#define MT6323_STRUP_CON3 0x0040 +#define MT6323_STRUP_CON4 0x0042 +#define MT6323_STRUP_CON5 0x0044 +#define MT6323_STRUP_CON6 0x0046 +#define MT6323_STRUP_CON7 0x0048 +#define MT6323_STRUP_CON8 0x004A +#define MT6323_STRUP_CON9 0x004C +#define MT6323_STRUP_CON10 0x004E +#define MT6323_STRUP_CON11 0x0050 +#define MT6323_SPK_CON0 0x0052 +#define MT6323_SPK_CON1 0x0054 +#define MT6323_SPK_CON2 0x0056 +#define MT6323_SPK_CON6 0x005E +#define MT6323_SPK_CON7 0x0060 +#define MT6323_SPK_CON8 0x0062 +#define MT6323_SPK_CON9 0x0064 +#define MT6323_SPK_CON10 0x0066 +#define MT6323_SPK_CON11 0x0068 +#define MT6323_SPK_CON12 0x006A +#define MT6323_CID 0x0100 +#define MT6323_TOP_CKPDN0 0x0102 +#define MT6323_TOP_CKPDN0_SET 0x0104 +#define MT6323_TOP_CKPDN0_CLR 0x0106 +#define MT6323_TOP_CKPDN1 0x0108 +#define MT6323_TOP_CKPDN1_SET 0x010A +#define MT6323_TOP_CKPDN1_CLR 0x010C +#define MT6323_TOP_CKPDN2 0x010E +#define MT6323_TOP_CKPDN2_SET 0x0110 +#define MT6323_TOP_CKPDN2_CLR 0x0112 +#define MT6323_TOP_RST_CON 0x0114 +#define MT6323_TOP_RST_CON_SET 0x0116 +#define MT6323_TOP_RST_CON_CLR 0x0118 +#define MT6323_TOP_RST_MISC 0x011A +#define MT6323_TOP_RST_MISC_SET 0x011C +#define MT6323_TOP_RST_MISC_CLR 0x011E +#define MT6323_TOP_CKCON0 0x0120 +#define MT6323_TOP_CKCON0_SET 0x0122 +#define MT6323_TOP_CKCON0_CLR 0x0124 +#define MT6323_TOP_CKCON1 0x0126 +#define MT6323_TOP_CKCON1_SET 0x0128 +#define MT6323_TOP_CKCON1_CLR 0x012A +#define MT6323_TOP_CKTST0 0x012C +#define MT6323_TOP_CKTST1 0x012E +#define MT6323_TOP_CKTST2 0x0130 +#define MT6323_TEST_OUT 0x0132 +#define MT6323_TEST_CON0 0x0134 +#define MT6323_TEST_CON1 0x0136 +#define MT6323_EN_STATUS0 0x0138 +#define MT6323_EN_STATUS1 0x013A +#define MT6323_OCSTATUS0 0x013C +#define MT6323_OCSTATUS1 0x013E +#define MT6323_PGSTATUS 0x0140 +#define MT6323_CHRSTATUS 0x0142 +#define MT6323_TDSEL_CON 0x0144 +#define MT6323_RDSEL_CON 0x0146 +#define MT6323_SMT_CON0 0x0148 +#define MT6323_SMT_CON1 0x014A +#define MT6323_SMT_CON2 0x014C +#define MT6323_SMT_CON3 0x014E +#define MT6323_SMT_CON4 0x0150 +#define MT6323_DRV_CON0 0x0152 +#define MT6323_DRV_CON1 0x0154 +#define MT6323_DRV_CON2 0x0156 +#define MT6323_DRV_CON3 0x0158 +#define MT6323_DRV_CON4 0x015A +#define MT6323_SIMLS1_CON 0x015C +#define MT6323_SIMLS2_CON 0x015E +#define MT6323_INT_CON0 0x0160 +#define MT6323_INT_CON0_SET 0x0162 +#define MT6323_INT_CON0_CLR 0x0164 +#define MT6323_INT_CON1 0x0166 +#define MT6323_INT_CON1_SET 0x0168 +#define MT6323_INT_CON1_CLR 0x016A +#define MT6323_INT_MISC_CON 0x016C +#define MT6323_INT_MISC_CON_SET 0x016E +#define MT6323_INT_MISC_CON_CLR 0x0170 +#define MT6323_INT_STATUS0 0x0172 +#define MT6323_INT_STATUS1 0x0174 +#define MT6323_OC_GEAR_0 0x0176 +#define MT6323_OC_GEAR_1 0x0178 +#define MT6323_OC_GEAR_2 0x017A +#define MT6323_OC_CTL_VPROC 0x017C +#define MT6323_OC_CTL_VSYS 0x017E +#define MT6323_OC_CTL_VPA 0x0180 +#define MT6323_FQMTR_CON0 0x0182 +#define MT6323_FQMTR_CON1 0x0184 +#define MT6323_FQMTR_CON2 0x0186 +#define MT6323_RG_SPI_CON 0x0188 +#define MT6323_DEW_DIO_EN 0x018A +#define MT6323_DEW_READ_TEST 0x018C +#define MT6323_DEW_WRITE_TEST 0x018E +#define MT6323_DEW_CRC_SWRST 0x0190 +#define MT6323_DEW_CRC_EN 0x0192 +#define MT6323_DEW_CRC_VAL 0x0194 +#define MT6323_DEW_DBG_MON_SEL 0x0196 +#define MT6323_DEW_CIPHER_KEY_SEL 0x0198 +#define MT6323_DEW_CIPHER_IV_SEL 0x019A +#define MT6323_DEW_CIPHER_EN 0x019C +#define MT6323_DEW_CIPHER_RDY 0x019E +#define MT6323_DEW_CIPHER_MODE 0x01A0 +#define MT6323_DEW_CIPHER_SWRST 0x01A2 +#define MT6323_DEW_RDDMY_NO 0x01A4 +#define MT6323_DEW_RDATA_DLY_SEL 0x01A6 +#define MT6323_BUCK_CON0 0x0200 +#define MT6323_BUCK_CON1 0x0202 +#define MT6323_BUCK_CON2 0x0204 +#define MT6323_BUCK_CON3 0x0206 +#define MT6323_BUCK_CON4 0x0208 +#define MT6323_BUCK_CON5 0x020A +#define MT6323_VPROC_CON0 0x020C +#define MT6323_VPROC_CON1 0x020E +#define MT6323_VPROC_CON2 0x0210 +#define MT6323_VPROC_CON3 0x0212 +#define MT6323_VPROC_CON4 0x0214 +#define MT6323_VPROC_CON5 0x0216 +#define MT6323_VPROC_CON7 0x021A +#define MT6323_VPROC_CON8 0x021C +#define MT6323_VPROC_CON9 0x021E +#define MT6323_VPROC_CON10 0x0220 +#define MT6323_VPROC_CON11 0x0222 +#define MT6323_VPROC_CON12 0x0224 +#define MT6323_VPROC_CON13 0x0226 +#define MT6323_VPROC_CON14 0x0228 +#define MT6323_VPROC_CON15 0x022A +#define MT6323_VPROC_CON18 0x0230 +#define MT6323_VSYS_CON0 0x0232 +#define MT6323_VSYS_CON1 0x0234 +#define MT6323_VSYS_CON2 0x0236 +#define MT6323_VSYS_CON3 0x0238 +#define MT6323_VSYS_CON4 0x023A +#define MT6323_VSYS_CON5 0x023C +#define MT6323_VSYS_CON7 0x0240 +#define MT6323_VSYS_CON8 0x0242 +#define MT6323_VSYS_CON9 0x0244 +#define MT6323_VSYS_CON10 0x0246 +#define MT6323_VSYS_CON11 0x0248 +#define MT6323_VSYS_CON12 0x024A +#define MT6323_VSYS_CON13 0x024C +#define MT6323_VSYS_CON14 0x024E +#define MT6323_VSYS_CON15 0x0250 +#define MT6323_VSYS_CON18 0x0256 +#define MT6323_VPA_CON0 0x0300 +#define MT6323_VPA_CON1 0x0302 +#define MT6323_VPA_CON2 0x0304 +#define MT6323_VPA_CON3 0x0306 +#define MT6323_VPA_CON4 0x0308 +#define MT6323_VPA_CON5 0x030A +#define MT6323_VPA_CON7 0x030E +#define MT6323_VPA_CON8 0x0310 +#define MT6323_VPA_CON9 0x0312 +#define MT6323_VPA_CON10 0x0314 +#define MT6323_VPA_CON11 0x0316 +#define MT6323_VPA_CON12 0x0318 +#define MT6323_VPA_CON14 0x031C +#define MT6323_VPA_CON16 0x0320 +#define MT6323_VPA_CON17 0x0322 +#define MT6323_VPA_CON18 0x0324 +#define MT6323_VPA_CON19 0x0326 +#define MT6323_VPA_CON20 0x0328 +#define MT6323_BUCK_K_CON0 0x032A +#define MT6323_BUCK_K_CON1 0x032C +#define MT6323_BUCK_K_CON2 0x032E +#define MT6323_ISINK0_CON0 0x0330 +#define MT6323_ISINK0_CON1 0x0332 +#define MT6323_ISINK0_CON2 0x0334 +#define MT6323_ISINK0_CON3 0x0336 +#define MT6323_ISINK1_CON0 0x0338 +#define MT6323_ISINK1_CON1 0x033A +#define MT6323_ISINK1_CON2 0x033C +#define MT6323_ISINK1_CON3 0x033E +#define MT6323_ISINK2_CON0 0x0340 +#define MT6323_ISINK2_CON1 0x0342 +#define MT6323_ISINK2_CON2 0x0344 +#define MT6323_ISINK2_CON3 0x0346 +#define MT6323_ISINK3_CON0 0x0348 +#define MT6323_ISINK3_CON1 0x034A +#define MT6323_ISINK3_CON2 0x034C +#define MT6323_ISINK3_CON3 0x034E +#define MT6323_ISINK_ANA0 0x0350 +#define MT6323_ISINK_ANA1 0x0352 +#define MT6323_ISINK_PHASE_DLY 0x0354 +#define MT6323_ISINK_EN_CTRL 0x0356 +#define MT6323_ANALDO_CON0 0x0400 +#define MT6323_ANALDO_CON1 0x0402 +#define MT6323_ANALDO_CON2 0x0404 +#define MT6323_ANALDO_CON3 0x0406 +#define MT6323_ANALDO_CON4 0x0408 +#define MT6323_ANALDO_CON5 0x040A +#define MT6323_ANALDO_CON6 0x040C +#define MT6323_ANALDO_CON7 0x040E +#define MT6323_ANALDO_CON8 0x0410 +#define MT6323_ANALDO_CON10 0x0412 +#define MT6323_ANALDO_CON15 0x0414 +#define MT6323_ANALDO_CON16 0x0416 +#define MT6323_ANALDO_CON17 0x0418 +#define MT6323_ANALDO_CON18 0x041A +#define MT6323_ANALDO_CON19 0x041C +#define MT6323_ANALDO_CON20 0x041E +#define MT6323_ANALDO_CON21 0x0420 +#define MT6323_DIGLDO_CON0 0x0500 +#define MT6323_DIGLDO_CON2 0x0502 +#define MT6323_DIGLDO_CON3 0x0504 +#define MT6323_DIGLDO_CON5 0x0506 +#define MT6323_DIGLDO_CON6 0x0508 +#define MT6323_DIGLDO_CON7 0x050A +#define MT6323_DIGLDO_CON8 0x050C +#define MT6323_DIGLDO_CON9 0x050E +#define MT6323_DIGLDO_CON10 0x0510 +#define MT6323_DIGLDO_CON11 0x0512 +#define MT6323_DIGLDO_CON12 0x0514 +#define MT6323_DIGLDO_CON13 0x0516 +#define MT6323_DIGLDO_CON14 0x0518 +#define MT6323_DIGLDO_CON15 0x051A +#define MT6323_DIGLDO_CON16 0x051C +#define MT6323_DIGLDO_CON17 0x051E +#define MT6323_DIGLDO_CON18 0x0520 +#define MT6323_DIGLDO_CON19 0x0522 +#define MT6323_DIGLDO_CON20 0x0524 +#define MT6323_DIGLDO_CON21 0x0526 +#define MT6323_DIGLDO_CON23 0x0528 +#define MT6323_DIGLDO_CON24 0x052A +#define MT6323_DIGLDO_CON26 0x052C +#define MT6323_DIGLDO_CON27 0x052E +#define MT6323_DIGLDO_CON28 0x0530 +#define MT6323_DIGLDO_CON29 0x0532 +#define MT6323_DIGLDO_CON30 0x0534 +#define MT6323_DIGLDO_CON31 0x0536 +#define MT6323_DIGLDO_CON32 0x0538 +#define MT6323_DIGLDO_CON33 0x053A +#define MT6323_DIGLDO_CON34 0x053C +#define MT6323_DIGLDO_CON35 0x053E +#define MT6323_DIGLDO_CON36 0x0540 +#define MT6323_DIGLDO_CON39 0x0542 +#define MT6323_DIGLDO_CON40 0x0544 +#define MT6323_DIGLDO_CON41 0x0546 +#define MT6323_DIGLDO_CON42 0x0548 +#define MT6323_DIGLDO_CON43 0x054A +#define MT6323_DIGLDO_CON44 0x054C +#define MT6323_DIGLDO_CON45 0x054E +#define MT6323_DIGLDO_CON46 0x0550 +#define MT6323_DIGLDO_CON47 0x0552 +#define MT6323_DIGLDO_CON48 0x0554 +#define MT6323_DIGLDO_CON49 0x0556 +#define MT6323_DIGLDO_CON50 0x0558 +#define MT6323_DIGLDO_CON51 0x055A +#define MT6323_DIGLDO_CON52 0x055C +#define MT6323_DIGLDO_CON53 0x055E +#define MT6323_DIGLDO_CON54 0x0560 +#define MT6323_EFUSE_CON0 0x0600 +#define MT6323_EFUSE_CON1 0x0602 +#define MT6323_EFUSE_CON2 0x0604 +#define MT6323_EFUSE_CON3 0x0606 +#define MT6323_EFUSE_CON4 0x0608 +#define MT6323_EFUSE_CON5 0x060A +#define MT6323_EFUSE_CON6 0x060C +#define MT6323_EFUSE_VAL_0_15 0x060E +#define MT6323_EFUSE_VAL_16_31 0x0610 +#define MT6323_EFUSE_VAL_32_47 0x0612 +#define MT6323_EFUSE_VAL_48_63 0x0614 +#define MT6323_EFUSE_VAL_64_79 0x0616 +#define MT6323_EFUSE_VAL_80_95 0x0618 +#define MT6323_EFUSE_VAL_96_111 0x061A +#define MT6323_EFUSE_VAL_112_127 0x061C +#define MT6323_EFUSE_VAL_128_143 0x061E +#define MT6323_EFUSE_VAL_144_159 0x0620 +#define MT6323_EFUSE_VAL_160_175 0x0622 +#define MT6323_EFUSE_VAL_176_191 0x0624 +#define MT6323_EFUSE_DOUT_0_15 0x0626 +#define MT6323_EFUSE_DOUT_16_31 0x0628 +#define MT6323_EFUSE_DOUT_32_47 0x062A +#define MT6323_EFUSE_DOUT_48_63 0x062C +#define MT6323_EFUSE_DOUT_64_79 0x062E +#define MT6323_EFUSE_DOUT_80_95 0x0630 +#define MT6323_EFUSE_DOUT_96_111 0x0632 +#define MT6323_EFUSE_DOUT_112_127 0x0634 +#define MT6323_EFUSE_DOUT_128_143 0x0636 +#define MT6323_EFUSE_DOUT_144_159 0x0638 +#define MT6323_EFUSE_DOUT_160_175 0x063A +#define MT6323_EFUSE_DOUT_176_191 0x063C +#define MT6323_EFUSE_CON7 0x063E +#define MT6323_EFUSE_CON8 0x0640 +#define MT6323_EFUSE_CON9 0x0642 +#define MT6323_RTC_MIX_CON0 0x0644 +#define MT6323_RTC_MIX_CON1 0x0646 +#define MT6323_AUDTOP_CON0 0x0700 +#define MT6323_AUDTOP_CON1 0x0702 +#define MT6323_AUDTOP_CON2 0x0704 +#define MT6323_AUDTOP_CON3 0x0706 +#define MT6323_AUDTOP_CON4 0x0708 +#define MT6323_AUDTOP_CON5 0x070A +#define MT6323_AUDTOP_CON6 0x070C +#define MT6323_AUDTOP_CON7 0x070E +#define MT6323_AUDTOP_CON8 0x0710 +#define MT6323_AUDTOP_CON9 0x0712 +#define MT6323_AUXADC_ADC0 0x0714 +#define MT6323_AUXADC_ADC1 0x0716 +#define MT6323_AUXADC_ADC2 0x0718 +#define MT6323_AUXADC_ADC3 0x071A +#define MT6323_AUXADC_ADC4 0x071C +#define MT6323_AUXADC_ADC5 0x071E +#define MT6323_AUXADC_ADC6 0x0720 +#define MT6323_AUXADC_ADC7 0x0722 +#define MT6323_AUXADC_ADC8 0x0724 +#define MT6323_AUXADC_ADC9 0x0726 +#define MT6323_AUXADC_ADC10 0x0728 +#define MT6323_AUXADC_ADC11 0x072A +#define MT6323_AUXADC_ADC12 0x072C +#define MT6323_AUXADC_ADC13 0x072E +#define MT6323_AUXADC_ADC14 0x0730 +#define MT6323_AUXADC_ADC15 0x0732 +#define MT6323_AUXADC_ADC16 0x0734 +#define MT6323_AUXADC_ADC17 0x0736 +#define MT6323_AUXADC_ADC18 0x0738 +#define MT6323_AUXADC_ADC19 0x073A +#define MT6323_AUXADC_ADC20 0x073C +#define MT6323_AUXADC_RSV1 0x073E +#define MT6323_AUXADC_RSV2 0x0740 +#define MT6323_AUXADC_CON0 0x0742 +#define MT6323_AUXADC_CON1 0x0744 +#define MT6323_AUXADC_CON2 0x0746 +#define MT6323_AUXADC_CON3 0x0748 +#define MT6323_AUXADC_CON4 0x074A +#define MT6323_AUXADC_CON5 0x074C +#define MT6323_AUXADC_CON6 0x074E +#define MT6323_AUXADC_CON7 0x0750 +#define MT6323_AUXADC_CON8 0x0752 +#define MT6323_AUXADC_CON9 0x0754 +#define MT6323_AUXADC_CON10 0x0756 +#define MT6323_AUXADC_CON11 0x0758 +#define MT6323_AUXADC_CON12 0x075A +#define MT6323_AUXADC_CON13 0x075C +#define MT6323_AUXADC_CON14 0x075E +#define MT6323_AUXADC_CON15 0x0760 +#define MT6323_AUXADC_CON16 0x0762 +#define MT6323_AUXADC_CON17 0x0764 +#define MT6323_AUXADC_CON18 0x0766 +#define MT6323_AUXADC_CON19 0x0768 +#define MT6323_AUXADC_CON20 0x076A +#define MT6323_AUXADC_CON21 0x076C +#define MT6323_AUXADC_CON22 0x076E +#define MT6323_AUXADC_CON23 0x0770 +#define MT6323_AUXADC_CON24 0x0772 +#define MT6323_AUXADC_CON25 0x0774 +#define MT6323_AUXADC_CON26 0x0776 +#define MT6323_AUXADC_CON27 0x0778 +#define MT6323_ACCDET_CON0 0x077A +#define MT6323_ACCDET_CON1 0x077C +#define MT6323_ACCDET_CON2 0x077E +#define MT6323_ACCDET_CON3 0x0780 +#define MT6323_ACCDET_CON4 0x0782 +#define MT6323_ACCDET_CON5 0x0784 +#define MT6323_ACCDET_CON6 0x0786 +#define MT6323_ACCDET_CON7 0x0788 +#define MT6323_ACCDET_CON8 0x078A +#define MT6323_ACCDET_CON9 0x078C +#define MT6323_ACCDET_CON10 0x078E +#define MT6323_ACCDET_CON11 0x0790 +#define MT6323_ACCDET_CON12 0x0792 +#define MT6323_ACCDET_CON13 0x0794 +#define MT6323_ACCDET_CON14 0x0796 +#define MT6323_ACCDET_CON15 0x0798 +#define MT6323_ACCDET_CON16 0x079A + +#endif /* __MFD_MT6323_REGISTERS_H__ */ diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h new file mode 100644 index 000000000..d678f526e --- /dev/null +++ b/include/linux/mfd/mt6397/core.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Flora Fu, MediaTek + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MFD_MT6397_CORE_H__ +#define __MFD_MT6397_CORE_H__ + +enum mt6397_irq_numbers { + MT6397_IRQ_SPKL_AB = 0, + MT6397_IRQ_SPKR_AB, + MT6397_IRQ_SPKL, + MT6397_IRQ_SPKR, + MT6397_IRQ_BAT_L, + MT6397_IRQ_BAT_H, + MT6397_IRQ_FG_BAT_L, + MT6397_IRQ_FG_BAT_H, + MT6397_IRQ_WATCHDOG, + MT6397_IRQ_PWRKEY, + MT6397_IRQ_THR_L, + MT6397_IRQ_THR_H, + MT6397_IRQ_VBATON_UNDET, + MT6397_IRQ_BVALID_DET, + MT6397_IRQ_CHRDET, + MT6397_IRQ_OV, + MT6397_IRQ_LDO, + MT6397_IRQ_HOMEKEY, + MT6397_IRQ_ACCDET, + MT6397_IRQ_AUDIO, + MT6397_IRQ_RTC, + MT6397_IRQ_PWRKEY_RSTB, + MT6397_IRQ_HDMI_SIFM, + MT6397_IRQ_HDMI_CEC, + MT6397_IRQ_VCA15, + MT6397_IRQ_VSRMCA15, + MT6397_IRQ_VCORE, + MT6397_IRQ_VGPU, + MT6397_IRQ_VIO18, + MT6397_IRQ_VPCA7, + MT6397_IRQ_VSRMCA7, + MT6397_IRQ_VDRM, + MT6397_IRQ_NR, +}; + +struct mt6397_chip { + struct device *dev; + struct regmap *regmap; + int irq; + struct irq_domain *irq_domain; + struct mutex irqlock; + u16 wake_mask[2]; + u16 irq_masks_cur[2]; + u16 irq_masks_cache[2]; + u16 int_con[2]; + u16 int_status[2]; +}; + +#endif /* __MFD_MT6397_CORE_H__ */ diff --git a/include/linux/mfd/mt6397/registers.h b/include/linux/mfd/mt6397/registers.h new file mode 100644 index 000000000..f23a0a60a --- /dev/null +++ b/include/linux/mfd/mt6397/registers.h @@ -0,0 +1,362 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Flora Fu, MediaTek + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MFD_MT6397_REGISTERS_H__ +#define __MFD_MT6397_REGISTERS_H__ + +/* PMIC Registers */ +#define MT6397_CID 0x0100 +#define MT6397_TOP_CKPDN 0x0102 +#define MT6397_TOP_CKPDN_SET 0x0104 +#define MT6397_TOP_CKPDN_CLR 0x0106 +#define MT6397_TOP_CKPDN2 0x0108 +#define MT6397_TOP_CKPDN2_SET 0x010A +#define MT6397_TOP_CKPDN2_CLR 0x010C +#define MT6397_TOP_GPIO_CKPDN 0x010E +#define MT6397_TOP_RST_CON 0x0114 +#define MT6397_WRP_CKPDN 0x011A +#define MT6397_WRP_RST_CON 0x0120 +#define MT6397_TOP_RST_MISC 0x0126 +#define MT6397_TOP_CKCON1 0x0128 +#define MT6397_TOP_CKCON2 0x012A +#define MT6397_TOP_CKTST1 0x012C +#define MT6397_TOP_CKTST2 0x012E +#define MT6397_OC_DEG_EN 0x0130 +#define MT6397_OC_CTL0 0x0132 +#define MT6397_OC_CTL1 0x0134 +#define MT6397_OC_CTL2 0x0136 +#define MT6397_INT_RSV 0x0138 +#define MT6397_TEST_CON0 0x013A +#define MT6397_TEST_CON1 0x013C +#define MT6397_STATUS0 0x013E +#define MT6397_STATUS1 0x0140 +#define MT6397_PGSTATUS 0x0142 +#define MT6397_CHRSTATUS 0x0144 +#define MT6397_OCSTATUS0 0x0146 +#define MT6397_OCSTATUS1 0x0148 +#define MT6397_OCSTATUS2 0x014A +#define MT6397_HDMI_PAD_IE 0x014C +#define MT6397_TEST_OUT_L 0x014E +#define MT6397_TEST_OUT_H 0x0150 +#define MT6397_TDSEL_CON 0x0152 +#define MT6397_RDSEL_CON 0x0154 +#define MT6397_GPIO_SMT_CON0 0x0156 +#define MT6397_GPIO_SMT_CON1 0x0158 +#define MT6397_GPIO_SMT_CON2 0x015A +#define MT6397_GPIO_SMT_CON3 0x015C +#define MT6397_DRV_CON0 0x015E +#define MT6397_DRV_CON1 0x0160 +#define MT6397_DRV_CON2 0x0162 +#define MT6397_DRV_CON3 0x0164 +#define MT6397_DRV_CON4 0x0166 +#define MT6397_DRV_CON5 0x0168 +#define MT6397_DRV_CON6 0x016A +#define MT6397_DRV_CON7 0x016C +#define MT6397_DRV_CON8 0x016E +#define MT6397_DRV_CON9 0x0170 +#define MT6397_DRV_CON10 0x0172 +#define MT6397_DRV_CON11 0x0174 +#define MT6397_DRV_CON12 0x0176 +#define MT6397_INT_CON0 0x0178 +#define MT6397_INT_CON1 0x017E +#define MT6397_INT_STATUS0 0x0184 +#define MT6397_INT_STATUS1 0x0186 +#define MT6397_FQMTR_CON0 0x0188 +#define MT6397_FQMTR_CON1 0x018A +#define MT6397_FQMTR_CON2 0x018C +#define MT6397_EFUSE_DOUT_0_15 0x01C4 +#define MT6397_EFUSE_DOUT_16_31 0x01C6 +#define MT6397_EFUSE_DOUT_32_47 0x01C8 +#define MT6397_EFUSE_DOUT_48_63 0x01CA +#define MT6397_SPI_CON 0x01CC +#define MT6397_TOP_CKPDN3 0x01CE +#define MT6397_TOP_CKCON3 0x01D4 +#define MT6397_EFUSE_DOUT_64_79 0x01D6 +#define MT6397_EFUSE_DOUT_80_95 0x01D8 +#define MT6397_EFUSE_DOUT_96_111 0x01DA +#define MT6397_EFUSE_DOUT_112_127 0x01DC +#define MT6397_EFUSE_DOUT_128_143 0x01DE +#define MT6397_EFUSE_DOUT_144_159 0x01E0 +#define MT6397_EFUSE_DOUT_160_175 0x01E2 +#define MT6397_EFUSE_DOUT_176_191 0x01E4 +#define MT6397_EFUSE_DOUT_192_207 0x01E6 +#define MT6397_EFUSE_DOUT_208_223 0x01E8 +#define MT6397_EFUSE_DOUT_224_239 0x01EA +#define MT6397_EFUSE_DOUT_240_255 0x01EC +#define MT6397_EFUSE_DOUT_256_271 0x01EE +#define MT6397_EFUSE_DOUT_272_287 0x01F0 +#define MT6397_EFUSE_DOUT_288_300 0x01F2 +#define MT6397_EFUSE_DOUT_304_319 0x01F4 +#define MT6397_BUCK_CON0 0x0200 +#define MT6397_BUCK_CON1 0x0202 +#define MT6397_BUCK_CON2 0x0204 +#define MT6397_BUCK_CON3 0x0206 +#define MT6397_BUCK_CON4 0x0208 +#define MT6397_BUCK_CON5 0x020A +#define MT6397_BUCK_CON6 0x020C +#define MT6397_BUCK_CON7 0x020E +#define MT6397_BUCK_CON8 0x0210 +#define MT6397_BUCK_CON9 0x0212 +#define MT6397_VCA15_CON0 0x0214 +#define MT6397_VCA15_CON1 0x0216 +#define MT6397_VCA15_CON2 0x0218 +#define MT6397_VCA15_CON3 0x021A +#define MT6397_VCA15_CON4 0x021C +#define MT6397_VCA15_CON5 0x021E +#define MT6397_VCA15_CON6 0x0220 +#define MT6397_VCA15_CON7 0x0222 +#define MT6397_VCA15_CON8 0x0224 +#define MT6397_VCA15_CON9 0x0226 +#define MT6397_VCA15_CON10 0x0228 +#define MT6397_VCA15_CON11 0x022A +#define MT6397_VCA15_CON12 0x022C +#define MT6397_VCA15_CON13 0x022E +#define MT6397_VCA15_CON14 0x0230 +#define MT6397_VCA15_CON15 0x0232 +#define MT6397_VCA15_CON16 0x0234 +#define MT6397_VCA15_CON17 0x0236 +#define MT6397_VCA15_CON18 0x0238 +#define MT6397_VSRMCA15_CON0 0x023A +#define MT6397_VSRMCA15_CON1 0x023C +#define MT6397_VSRMCA15_CON2 0x023E +#define MT6397_VSRMCA15_CON3 0x0240 +#define MT6397_VSRMCA15_CON4 0x0242 +#define MT6397_VSRMCA15_CON5 0x0244 +#define MT6397_VSRMCA15_CON6 0x0246 +#define MT6397_VSRMCA15_CON7 0x0248 +#define MT6397_VSRMCA15_CON8 0x024A +#define MT6397_VSRMCA15_CON9 0x024C +#define MT6397_VSRMCA15_CON10 0x024E +#define MT6397_VSRMCA15_CON11 0x0250 +#define MT6397_VSRMCA15_CON12 0x0252 +#define MT6397_VSRMCA15_CON13 0x0254 +#define MT6397_VSRMCA15_CON14 0x0256 +#define MT6397_VSRMCA15_CON15 0x0258 +#define MT6397_VSRMCA15_CON16 0x025A +#define MT6397_VSRMCA15_CON17 0x025C +#define MT6397_VSRMCA15_CON18 0x025E +#define MT6397_VSRMCA15_CON19 0x0260 +#define MT6397_VSRMCA15_CON20 0x0262 +#define MT6397_VSRMCA15_CON21 0x0264 +#define MT6397_VCORE_CON0 0x0266 +#define MT6397_VCORE_CON1 0x0268 +#define MT6397_VCORE_CON2 0x026A +#define MT6397_VCORE_CON3 0x026C +#define MT6397_VCORE_CON4 0x026E +#define MT6397_VCORE_CON5 0x0270 +#define MT6397_VCORE_CON6 0x0272 +#define MT6397_VCORE_CON7 0x0274 +#define MT6397_VCORE_CON8 0x0276 +#define MT6397_VCORE_CON9 0x0278 +#define MT6397_VCORE_CON10 0x027A +#define MT6397_VCORE_CON11 0x027C +#define MT6397_VCORE_CON12 0x027E +#define MT6397_VCORE_CON13 0x0280 +#define MT6397_VCORE_CON14 0x0282 +#define MT6397_VCORE_CON15 0x0284 +#define MT6397_VCORE_CON16 0x0286 +#define MT6397_VCORE_CON17 0x0288 +#define MT6397_VCORE_CON18 0x028A +#define MT6397_VGPU_CON0 0x028C +#define MT6397_VGPU_CON1 0x028E +#define MT6397_VGPU_CON2 0x0290 +#define MT6397_VGPU_CON3 0x0292 +#define MT6397_VGPU_CON4 0x0294 +#define MT6397_VGPU_CON5 0x0296 +#define MT6397_VGPU_CON6 0x0298 +#define MT6397_VGPU_CON7 0x029A +#define MT6397_VGPU_CON8 0x029C +#define MT6397_VGPU_CON9 0x029E +#define MT6397_VGPU_CON10 0x02A0 +#define MT6397_VGPU_CON11 0x02A2 +#define MT6397_VGPU_CON12 0x02A4 +#define MT6397_VGPU_CON13 0x02A6 +#define MT6397_VGPU_CON14 0x02A8 +#define MT6397_VGPU_CON15 0x02AA +#define MT6397_VGPU_CON16 0x02AC +#define MT6397_VGPU_CON17 0x02AE +#define MT6397_VGPU_CON18 0x02B0 +#define MT6397_VIO18_CON0 0x0300 +#define MT6397_VIO18_CON1 0x0302 +#define MT6397_VIO18_CON2 0x0304 +#define MT6397_VIO18_CON3 0x0306 +#define MT6397_VIO18_CON4 0x0308 +#define MT6397_VIO18_CON5 0x030A +#define MT6397_VIO18_CON6 0x030C +#define MT6397_VIO18_CON7 0x030E +#define MT6397_VIO18_CON8 0x0310 +#define MT6397_VIO18_CON9 0x0312 +#define MT6397_VIO18_CON10 0x0314 +#define MT6397_VIO18_CON11 0x0316 +#define MT6397_VIO18_CON12 0x0318 +#define MT6397_VIO18_CON13 0x031A +#define MT6397_VIO18_CON14 0x031C +#define MT6397_VIO18_CON15 0x031E +#define MT6397_VIO18_CON16 0x0320 +#define MT6397_VIO18_CON17 0x0322 +#define MT6397_VIO18_CON18 0x0324 +#define MT6397_VPCA7_CON0 0x0326 +#define MT6397_VPCA7_CON1 0x0328 +#define MT6397_VPCA7_CON2 0x032A +#define MT6397_VPCA7_CON3 0x032C +#define MT6397_VPCA7_CON4 0x032E +#define MT6397_VPCA7_CON5 0x0330 +#define MT6397_VPCA7_CON6 0x0332 +#define MT6397_VPCA7_CON7 0x0334 +#define MT6397_VPCA7_CON8 0x0336 +#define MT6397_VPCA7_CON9 0x0338 +#define MT6397_VPCA7_CON10 0x033A +#define MT6397_VPCA7_CON11 0x033C +#define MT6397_VPCA7_CON12 0x033E +#define MT6397_VPCA7_CON13 0x0340 +#define MT6397_VPCA7_CON14 0x0342 +#define MT6397_VPCA7_CON15 0x0344 +#define MT6397_VPCA7_CON16 0x0346 +#define MT6397_VPCA7_CON17 0x0348 +#define MT6397_VPCA7_CON18 0x034A +#define MT6397_VSRMCA7_CON0 0x034C +#define MT6397_VSRMCA7_CON1 0x034E +#define MT6397_VSRMCA7_CON2 0x0350 +#define MT6397_VSRMCA7_CON3 0x0352 +#define MT6397_VSRMCA7_CON4 0x0354 +#define MT6397_VSRMCA7_CON5 0x0356 +#define MT6397_VSRMCA7_CON6 0x0358 +#define MT6397_VSRMCA7_CON7 0x035A +#define MT6397_VSRMCA7_CON8 0x035C +#define MT6397_VSRMCA7_CON9 0x035E +#define MT6397_VSRMCA7_CON10 0x0360 +#define MT6397_VSRMCA7_CON11 0x0362 +#define MT6397_VSRMCA7_CON12 0x0364 +#define MT6397_VSRMCA7_CON13 0x0366 +#define MT6397_VSRMCA7_CON14 0x0368 +#define MT6397_VSRMCA7_CON15 0x036A +#define MT6397_VSRMCA7_CON16 0x036C +#define MT6397_VSRMCA7_CON17 0x036E +#define MT6397_VSRMCA7_CON18 0x0370 +#define MT6397_VSRMCA7_CON19 0x0372 +#define MT6397_VSRMCA7_CON20 0x0374 +#define MT6397_VSRMCA7_CON21 0x0376 +#define MT6397_VDRM_CON0 0x0378 +#define MT6397_VDRM_CON1 0x037A +#define MT6397_VDRM_CON2 0x037C +#define MT6397_VDRM_CON3 0x037E +#define MT6397_VDRM_CON4 0x0380 +#define MT6397_VDRM_CON5 0x0382 +#define MT6397_VDRM_CON6 0x0384 +#define MT6397_VDRM_CON7 0x0386 +#define MT6397_VDRM_CON8 0x0388 +#define MT6397_VDRM_CON9 0x038A +#define MT6397_VDRM_CON10 0x038C +#define MT6397_VDRM_CON11 0x038E +#define MT6397_VDRM_CON12 0x0390 +#define MT6397_VDRM_CON13 0x0392 +#define MT6397_VDRM_CON14 0x0394 +#define MT6397_VDRM_CON15 0x0396 +#define MT6397_VDRM_CON16 0x0398 +#define MT6397_VDRM_CON17 0x039A +#define MT6397_VDRM_CON18 0x039C +#define MT6397_BUCK_K_CON0 0x039E +#define MT6397_BUCK_K_CON1 0x03A0 +#define MT6397_ANALDO_CON0 0x0400 +#define MT6397_ANALDO_CON1 0x0402 +#define MT6397_ANALDO_CON2 0x0404 +#define MT6397_ANALDO_CON3 0x0406 +#define MT6397_ANALDO_CON4 0x0408 +#define MT6397_ANALDO_CON5 0x040A +#define MT6397_ANALDO_CON6 0x040C +#define MT6397_ANALDO_CON7 0x040E +#define MT6397_DIGLDO_CON0 0x0410 +#define MT6397_DIGLDO_CON1 0x0412 +#define MT6397_DIGLDO_CON2 0x0414 +#define MT6397_DIGLDO_CON3 0x0416 +#define MT6397_DIGLDO_CON4 0x0418 +#define MT6397_DIGLDO_CON5 0x041A +#define MT6397_DIGLDO_CON6 0x041C +#define MT6397_DIGLDO_CON7 0x041E +#define MT6397_DIGLDO_CON8 0x0420 +#define MT6397_DIGLDO_CON9 0x0422 +#define MT6397_DIGLDO_CON10 0x0424 +#define MT6397_DIGLDO_CON11 0x0426 +#define MT6397_DIGLDO_CON12 0x0428 +#define MT6397_DIGLDO_CON13 0x042A +#define MT6397_DIGLDO_CON14 0x042C +#define MT6397_DIGLDO_CON15 0x042E +#define MT6397_DIGLDO_CON16 0x0430 +#define MT6397_DIGLDO_CON17 0x0432 +#define MT6397_DIGLDO_CON18 0x0434 +#define MT6397_DIGLDO_CON19 0x0436 +#define MT6397_DIGLDO_CON20 0x0438 +#define MT6397_DIGLDO_CON21 0x043A +#define MT6397_DIGLDO_CON22 0x043C +#define MT6397_DIGLDO_CON23 0x043E +#define MT6397_DIGLDO_CON24 0x0440 +#define MT6397_DIGLDO_CON25 0x0442 +#define MT6397_DIGLDO_CON26 0x0444 +#define MT6397_DIGLDO_CON27 0x0446 +#define MT6397_DIGLDO_CON28 0x0448 +#define MT6397_DIGLDO_CON29 0x044A +#define MT6397_DIGLDO_CON30 0x044C +#define MT6397_DIGLDO_CON31 0x044E +#define MT6397_DIGLDO_CON32 0x0450 +#define MT6397_DIGLDO_CON33 0x045A +#define MT6397_SPK_CON0 0x0600 +#define MT6397_SPK_CON1 0x0602 +#define MT6397_SPK_CON2 0x0604 +#define MT6397_SPK_CON3 0x0606 +#define MT6397_SPK_CON4 0x0608 +#define MT6397_SPK_CON5 0x060A +#define MT6397_SPK_CON6 0x060C +#define MT6397_SPK_CON7 0x060E +#define MT6397_SPK_CON8 0x0610 +#define MT6397_SPK_CON9 0x0612 +#define MT6397_SPK_CON10 0x0614 +#define MT6397_SPK_CON11 0x0616 +#define MT6397_AUDDAC_CON0 0x0700 +#define MT6397_AUDBUF_CFG0 0x0702 +#define MT6397_AUDBUF_CFG1 0x0704 +#define MT6397_AUDBUF_CFG2 0x0706 +#define MT6397_AUDBUF_CFG3 0x0708 +#define MT6397_AUDBUF_CFG4 0x070A +#define MT6397_IBIASDIST_CFG0 0x070C +#define MT6397_AUDACCDEPOP_CFG0 0x070E +#define MT6397_AUD_IV_CFG0 0x0710 +#define MT6397_AUDCLKGEN_CFG0 0x0712 +#define MT6397_AUDLDO_CFG0 0x0714 +#define MT6397_AUDLDO_CFG1 0x0716 +#define MT6397_AUDNVREGGLB_CFG0 0x0718 +#define MT6397_AUD_NCP0 0x071A +#define MT6397_AUDPREAMP_CON0 0x071C +#define MT6397_AUDADC_CON0 0x071E +#define MT6397_AUDADC_CON1 0x0720 +#define MT6397_AUDADC_CON2 0x0722 +#define MT6397_AUDADC_CON3 0x0724 +#define MT6397_AUDADC_CON4 0x0726 +#define MT6397_AUDADC_CON5 0x0728 +#define MT6397_AUDADC_CON6 0x072A +#define MT6397_AUDDIGMI_CON0 0x072C +#define MT6397_AUDLSBUF_CON0 0x072E +#define MT6397_AUDLSBUF_CON1 0x0730 +#define MT6397_AUDENCSPARE_CON0 0x0732 +#define MT6397_AUDENCCLKSQ_CON0 0x0734 +#define MT6397_AUDPREAMPGAIN_CON0 0x0736 +#define MT6397_ZCD_CON0 0x0738 +#define MT6397_ZCD_CON1 0x073A +#define MT6397_ZCD_CON2 0x073C +#define MT6397_ZCD_CON3 0x073E +#define MT6397_ZCD_CON4 0x0740 +#define MT6397_ZCD_CON5 0x0742 +#define MT6397_NCP_CLKDIV_CON0 0x0744 +#define MT6397_NCP_CLKDIV_CON1 0x0746 + +#endif /* __MFD_MT6397_REGISTERS_H__ */ diff --git a/include/linux/mfd/mxs-lradc.h b/include/linux/mfd/mxs-lradc.h new file mode 100644 index 000000000..661a4521f --- /dev/null +++ b/include/linux/mfd/mxs-lradc.h @@ -0,0 +1,187 @@ +/* + * Freescale MXS Low Resolution Analog-to-Digital Converter driver + * + * Copyright (c) 2012 DENX Software Engineering, GmbH. + * Copyright (c) 2016 Ksenija Stanojevic + * + * Author: Marek Vasut + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MFD_MXS_LRADC_H +#define __MFD_MXS_LRADC_H + +#include +#include +#include + +#define LRADC_MAX_DELAY_CHANS 4 +#define LRADC_MAX_MAPPED_CHANS 8 +#define LRADC_MAX_TOTAL_CHANS 16 + +#define LRADC_DELAY_TIMER_HZ 2000 + +#define LRADC_CTRL0 0x00 +# define LRADC_CTRL0_MX28_TOUCH_DETECT_ENABLE BIT(23) +# define LRADC_CTRL0_MX28_TOUCH_SCREEN_TYPE BIT(22) +# define LRADC_CTRL0_MX28_YNNSW /* YM */ BIT(21) +# define LRADC_CTRL0_MX28_YPNSW /* YP */ BIT(20) +# define LRADC_CTRL0_MX28_YPPSW /* YP */ BIT(19) +# define LRADC_CTRL0_MX28_XNNSW /* XM */ BIT(18) +# define LRADC_CTRL0_MX28_XNPSW /* XM */ BIT(17) +# define LRADC_CTRL0_MX28_XPPSW /* XP */ BIT(16) + +# define LRADC_CTRL0_MX23_TOUCH_DETECT_ENABLE BIT(20) +# define LRADC_CTRL0_MX23_YM BIT(19) +# define LRADC_CTRL0_MX23_XM BIT(18) +# define LRADC_CTRL0_MX23_YP BIT(17) +# define LRADC_CTRL0_MX23_XP BIT(16) + +# define LRADC_CTRL0_MX28_PLATE_MASK \ + (LRADC_CTRL0_MX28_TOUCH_DETECT_ENABLE | \ + LRADC_CTRL0_MX28_YNNSW | LRADC_CTRL0_MX28_YPNSW | \ + LRADC_CTRL0_MX28_YPPSW | LRADC_CTRL0_MX28_XNNSW | \ + LRADC_CTRL0_MX28_XNPSW | LRADC_CTRL0_MX28_XPPSW) + +# define LRADC_CTRL0_MX23_PLATE_MASK \ + (LRADC_CTRL0_MX23_TOUCH_DETECT_ENABLE | \ + LRADC_CTRL0_MX23_YM | LRADC_CTRL0_MX23_XM | \ + LRADC_CTRL0_MX23_YP | LRADC_CTRL0_MX23_XP) + +#define LRADC_CTRL1 0x10 +#define LRADC_CTRL1_TOUCH_DETECT_IRQ_EN BIT(24) +#define LRADC_CTRL1_LRADC_IRQ_EN(n) (1 << ((n) + 16)) +#define LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK (0x1fff << 16) +#define LRADC_CTRL1_MX23_LRADC_IRQ_EN_MASK (0x01ff << 16) +#define LRADC_CTRL1_LRADC_IRQ_EN_OFFSET 16 +#define LRADC_CTRL1_TOUCH_DETECT_IRQ BIT(8) +#define LRADC_CTRL1_LRADC_IRQ(n) BIT(n) +#define LRADC_CTRL1_MX28_LRADC_IRQ_MASK 0x1fff +#define LRADC_CTRL1_MX23_LRADC_IRQ_MASK 0x01ff +#define LRADC_CTRL1_LRADC_IRQ_OFFSET 0 + +#define LRADC_CTRL2 0x20 +#define LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET 24 +#define LRADC_CTRL2_TEMPSENSE_PWD BIT(15) + +#define LRADC_STATUS 0x40 +#define LRADC_STATUS_TOUCH_DETECT_RAW BIT(0) + +#define LRADC_CH(n) (0x50 + (0x10 * (n))) +#define LRADC_CH_ACCUMULATE BIT(29) +#define LRADC_CH_NUM_SAMPLES_MASK (0x1f << 24) +#define LRADC_CH_NUM_SAMPLES_OFFSET 24 +#define LRADC_CH_NUM_SAMPLES(x) \ + ((x) << LRADC_CH_NUM_SAMPLES_OFFSET) +#define LRADC_CH_VALUE_MASK 0x3ffff +#define LRADC_CH_VALUE_OFFSET 0 + +#define LRADC_DELAY(n) (0xd0 + (0x10 * (n))) +#define LRADC_DELAY_TRIGGER_LRADCS_MASK (0xffUL << 24) +#define LRADC_DELAY_TRIGGER_LRADCS_OFFSET 24 +#define LRADC_DELAY_TRIGGER(x) \ + (((x) << LRADC_DELAY_TRIGGER_LRADCS_OFFSET) & \ + LRADC_DELAY_TRIGGER_LRADCS_MASK) +#define LRADC_DELAY_KICK BIT(20) +#define LRADC_DELAY_TRIGGER_DELAYS_MASK (0xf << 16) +#define LRADC_DELAY_TRIGGER_DELAYS_OFFSET 16 +#define LRADC_DELAY_TRIGGER_DELAYS(x) \ + (((x) << LRADC_DELAY_TRIGGER_DELAYS_OFFSET) & \ + LRADC_DELAY_TRIGGER_DELAYS_MASK) +#define LRADC_DELAY_LOOP_COUNT_MASK (0x1f << 11) +#define LRADC_DELAY_LOOP_COUNT_OFFSET 11 +#define LRADC_DELAY_LOOP(x) \ + (((x) << LRADC_DELAY_LOOP_COUNT_OFFSET) & \ + LRADC_DELAY_LOOP_COUNT_MASK) +#define LRADC_DELAY_DELAY_MASK 0x7ff +#define LRADC_DELAY_DELAY_OFFSET 0 +#define LRADC_DELAY_DELAY(x) \ + (((x) << LRADC_DELAY_DELAY_OFFSET) & \ + LRADC_DELAY_DELAY_MASK) + +#define LRADC_CTRL4 0x140 +#define LRADC_CTRL4_LRADCSELECT_MASK(n) (0xf << ((n) * 4)) +#define LRADC_CTRL4_LRADCSELECT_OFFSET(n) ((n) * 4) +#define LRADC_CTRL4_LRADCSELECT(n, x) \ + (((x) << LRADC_CTRL4_LRADCSELECT_OFFSET(n)) & \ + LRADC_CTRL4_LRADCSELECT_MASK(n)) + +#define LRADC_RESOLUTION 12 +#define LRADC_SINGLE_SAMPLE_MASK ((1 << LRADC_RESOLUTION) - 1) + +#define BUFFER_VCHANS_LIMITED 0x3f +#define BUFFER_VCHANS_ALL 0xff + + /* + * Certain LRADC channels are shared between touchscreen + * and/or touch-buttons and generic LRADC block. Therefore when using + * either of these, these channels are not available for the regular + * sampling. The shared channels are as follows: + * + * CH0 -- Touch button #0 + * CH1 -- Touch button #1 + * CH2 -- Touch screen XPUL + * CH3 -- Touch screen YPLL + * CH4 -- Touch screen XNUL + * CH5 -- Touch screen YNLR + * CH6 -- Touch screen WIPER (5-wire only) + * + * The bit fields below represents which parts of the LRADC block are + * switched into special mode of operation. These channels can not + * be sampled as regular LRADC channels. The driver will refuse any + * attempt to sample these channels. + */ +#define CHAN_MASK_TOUCHBUTTON (BIT(1) | BIT(0)) +#define CHAN_MASK_TOUCHSCREEN_4WIRE (0xf << 2) +#define CHAN_MASK_TOUCHSCREEN_5WIRE (0x1f << 2) + +enum mxs_lradc_id { + IMX23_LRADC, + IMX28_LRADC, +}; + +enum mxs_lradc_ts_wires { + MXS_LRADC_TOUCHSCREEN_NONE = 0, + MXS_LRADC_TOUCHSCREEN_4WIRE, + MXS_LRADC_TOUCHSCREEN_5WIRE, +}; + +/** + * struct mxs_lradc + * @soc: soc type (IMX23 or IMX28) + * @clk: 2 kHz clock for delay units + * @buffer_vchans: channels that can be used during buffered capture + * @touchscreen_wire: touchscreen type (4-wire or 5-wire) + * @use_touchbutton: button state (on or off) + */ +struct mxs_lradc { + enum mxs_lradc_id soc; + struct clk *clk; + u8 buffer_vchans; + + enum mxs_lradc_ts_wires touchscreen_wire; + bool use_touchbutton; +}; + +static inline u32 mxs_lradc_irq_mask(struct mxs_lradc *lradc) +{ + switch (lradc->soc) { + case IMX23_LRADC: + return LRADC_CTRL1_MX23_LRADC_IRQ_MASK; + case IMX28_LRADC: + return LRADC_CTRL1_MX28_LRADC_IRQ_MASK; + default: + return 0; + } +} + +#endif /* __MXS_LRADC_H */ diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h new file mode 100644 index 000000000..75e5c8ff8 --- /dev/null +++ b/include/linux/mfd/palmas.h @@ -0,0 +1,3814 @@ +/* + * TI Palmas + * + * Copyright 2011-2013 Texas Instruments Inc. + * + * Author: Graeme Gregory + * Author: Ian Lartey + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MFD_PALMAS_H +#define __LINUX_MFD_PALMAS_H + +#include +#include +#include +#include +#include +#include +#include + +#define PALMAS_NUM_CLIENTS 3 + +/* The ID_REVISION NUMBERS */ +#define PALMAS_CHIP_OLD_ID 0x0000 +#define PALMAS_CHIP_ID 0xC035 +#define PALMAS_CHIP_CHARGER_ID 0xC036 + +#define TPS65917_RESERVED -1 + +#define is_palmas(a) (((a) == PALMAS_CHIP_OLD_ID) || \ + ((a) == PALMAS_CHIP_ID)) +#define is_palmas_charger(a) ((a) == PALMAS_CHIP_CHARGER_ID) + +/** + * Palmas PMIC feature types + * + * PALMAS_PMIC_FEATURE_SMPS10_BOOST - used when the PMIC provides SMPS10_BOOST + * regulator. + * + * PALMAS_PMIC_HAS(b, f) - macro to check if a bandgap device is capable of a + * specific feature (above) or not. Return non-zero, if yes. + */ +#define PALMAS_PMIC_FEATURE_SMPS10_BOOST BIT(0) +#define PALMAS_PMIC_HAS(b, f) \ + ((b)->features & PALMAS_PMIC_FEATURE_ ## f) + +struct palmas_pmic; +struct palmas_gpadc; +struct palmas_resource; +struct palmas_usb; +struct palmas_pmic_driver_data; +struct palmas_pmic_platform_data; + +enum palmas_usb_state { + PALMAS_USB_STATE_DISCONNECT, + PALMAS_USB_STATE_VBUS, + PALMAS_USB_STATE_ID, +}; + +struct palmas { + struct device *dev; + + struct i2c_client *i2c_clients[PALMAS_NUM_CLIENTS]; + struct regmap *regmap[PALMAS_NUM_CLIENTS]; + + /* Stored chip id */ + int id; + + unsigned int features; + /* IRQ Data */ + int irq; + u32 irq_mask; + struct mutex irq_lock; + struct regmap_irq_chip_data *irq_data; + + struct palmas_pmic_driver_data *pmic_ddata; + + /* Child Devices */ + struct palmas_pmic *pmic; + struct palmas_gpadc *gpadc; + struct palmas_resource *resource; + struct palmas_usb *usb; + + /* GPIO MUXing */ + u8 gpio_muxed; + u8 led_muxed; + u8 pwm_muxed; +}; + +#define PALMAS_EXT_REQ (PALMAS_EXT_CONTROL_ENABLE1 | \ + PALMAS_EXT_CONTROL_ENABLE2 | \ + PALMAS_EXT_CONTROL_NSLEEP) + +struct palmas_sleep_requestor_info { + int id; + int reg_offset; + int bit_pos; +}; + +struct palmas_regs_info { + char *name; + char *sname; + u8 vsel_addr; + u8 ctrl_addr; + u8 tstep_addr; + int sleep_id; +}; + +struct palmas_pmic_driver_data { + int smps_start; + int smps_end; + int ldo_begin; + int ldo_end; + int max_reg; + bool has_regen3; + struct palmas_regs_info *palmas_regs_info; + struct of_regulator_match *palmas_matches; + struct palmas_sleep_requestor_info *sleep_req_info; + int (*smps_register)(struct palmas_pmic *pmic, + struct palmas_pmic_driver_data *ddata, + struct palmas_pmic_platform_data *pdata, + const char *pdev_name, + struct regulator_config config); + int (*ldo_register)(struct palmas_pmic *pmic, + struct palmas_pmic_driver_data *ddata, + struct palmas_pmic_platform_data *pdata, + const char *pdev_name, + struct regulator_config config); +}; + +struct palmas_adc_wakeup_property { + int adc_channel_number; + int adc_high_threshold; + int adc_low_threshold; +}; + +struct palmas_gpadc_platform_data { + /* Channel 3 current source is only enabled during conversion */ + int ch3_current; /* 0: off; 1: 10uA; 2: 400uA; 3: 800 uA */ + + /* Channel 0 current source can be used for battery detection. + * If used for battery detection this will cause a permanent current + * consumption depending on current level set here. + */ + int ch0_current; /* 0: off; 1: 5uA; 2: 15uA; 3: 20 uA */ + bool extended_delay; /* use extended delay for conversion */ + + /* default BAT_REMOVAL_DAT setting on device probe */ + int bat_removal; + + /* Sets the START_POLARITY bit in the RT_CTRL register */ + int start_polarity; + + int auto_conversion_period_ms; + struct palmas_adc_wakeup_property *adc_wakeup1_data; + struct palmas_adc_wakeup_property *adc_wakeup2_data; +}; + +struct palmas_reg_init { + /* warm_rest controls the voltage levels after a warm reset + * + * 0: reload default values from OTP on warm reset + * 1: maintain voltage from VSEL on warm reset + */ + int warm_reset; + + /* roof_floor controls whether the regulator uses the i2c style + * of DVS or uses the method where a GPIO or other control method is + * attached to the NSLEEP/ENABLE1/ENABLE2 pins + * + * For SMPS + * + * 0: i2c selection of voltage + * 1: pin selection of voltage. + * + * For LDO unused + */ + int roof_floor; + + /* sleep_mode is the mode loaded to MODE_SLEEP bits as defined in + * the data sheet. + * + * For SMPS + * + * 0: Off + * 1: AUTO + * 2: ECO + * 3: Forced PWM + * + * For LDO + * + * 0: Off + * 1: On + */ + int mode_sleep; + + /* voltage_sel is the bitfield loaded onto the SMPSX_VOLTAGE + * register. Set this is the default voltage set in OTP needs + * to be overridden. + */ + u8 vsel; + +}; + +enum palmas_regulators { + /* SMPS regulators */ + PALMAS_REG_SMPS12, + PALMAS_REG_SMPS123, + PALMAS_REG_SMPS3, + PALMAS_REG_SMPS45, + PALMAS_REG_SMPS457, + PALMAS_REG_SMPS6, + PALMAS_REG_SMPS7, + PALMAS_REG_SMPS8, + PALMAS_REG_SMPS9, + PALMAS_REG_SMPS10_OUT2, + PALMAS_REG_SMPS10_OUT1, + /* LDO regulators */ + PALMAS_REG_LDO1, + PALMAS_REG_LDO2, + PALMAS_REG_LDO3, + PALMAS_REG_LDO4, + PALMAS_REG_LDO5, + PALMAS_REG_LDO6, + PALMAS_REG_LDO7, + PALMAS_REG_LDO8, + PALMAS_REG_LDO9, + PALMAS_REG_LDOLN, + PALMAS_REG_LDOUSB, + /* External regulators */ + PALMAS_REG_REGEN1, + PALMAS_REG_REGEN2, + PALMAS_REG_REGEN3, + PALMAS_REG_SYSEN1, + PALMAS_REG_SYSEN2, + /* Total number of regulators */ + PALMAS_NUM_REGS, +}; + +enum tps65917_regulators { + /* SMPS regulators */ + TPS65917_REG_SMPS1, + TPS65917_REG_SMPS2, + TPS65917_REG_SMPS3, + TPS65917_REG_SMPS4, + TPS65917_REG_SMPS5, + TPS65917_REG_SMPS12, + /* LDO regulators */ + TPS65917_REG_LDO1, + TPS65917_REG_LDO2, + TPS65917_REG_LDO3, + TPS65917_REG_LDO4, + TPS65917_REG_LDO5, + TPS65917_REG_REGEN1, + TPS65917_REG_REGEN2, + TPS65917_REG_REGEN3, + + /* Total number of regulators */ + TPS65917_NUM_REGS, +}; + +/* External controll signal name */ +enum { + PALMAS_EXT_CONTROL_ENABLE1 = 0x1, + PALMAS_EXT_CONTROL_ENABLE2 = 0x2, + PALMAS_EXT_CONTROL_NSLEEP = 0x4, +}; + +/* + * Palmas device resources can be controlled externally for + * enabling/disabling it rather than register write through i2c. + * Add the external controlled requestor ID for different resources. + */ +enum palmas_external_requestor_id { + PALMAS_EXTERNAL_REQSTR_ID_REGEN1, + PALMAS_EXTERNAL_REQSTR_ID_REGEN2, + PALMAS_EXTERNAL_REQSTR_ID_SYSEN1, + PALMAS_EXTERNAL_REQSTR_ID_SYSEN2, + PALMAS_EXTERNAL_REQSTR_ID_CLK32KG, + PALMAS_EXTERNAL_REQSTR_ID_CLK32KGAUDIO, + PALMAS_EXTERNAL_REQSTR_ID_REGEN3, + PALMAS_EXTERNAL_REQSTR_ID_SMPS12, + PALMAS_EXTERNAL_REQSTR_ID_SMPS3, + PALMAS_EXTERNAL_REQSTR_ID_SMPS45, + PALMAS_EXTERNAL_REQSTR_ID_SMPS6, + PALMAS_EXTERNAL_REQSTR_ID_SMPS7, + PALMAS_EXTERNAL_REQSTR_ID_SMPS8, + PALMAS_EXTERNAL_REQSTR_ID_SMPS9, + PALMAS_EXTERNAL_REQSTR_ID_SMPS10, + PALMAS_EXTERNAL_REQSTR_ID_LDO1, + PALMAS_EXTERNAL_REQSTR_ID_LDO2, + PALMAS_EXTERNAL_REQSTR_ID_LDO3, + PALMAS_EXTERNAL_REQSTR_ID_LDO4, + PALMAS_EXTERNAL_REQSTR_ID_LDO5, + PALMAS_EXTERNAL_REQSTR_ID_LDO6, + PALMAS_EXTERNAL_REQSTR_ID_LDO7, + PALMAS_EXTERNAL_REQSTR_ID_LDO8, + PALMAS_EXTERNAL_REQSTR_ID_LDO9, + PALMAS_EXTERNAL_REQSTR_ID_LDOLN, + PALMAS_EXTERNAL_REQSTR_ID_LDOUSB, + + /* Last entry */ + PALMAS_EXTERNAL_REQSTR_ID_MAX, +}; + +enum tps65917_external_requestor_id { + TPS65917_EXTERNAL_REQSTR_ID_REGEN1, + TPS65917_EXTERNAL_REQSTR_ID_REGEN2, + TPS65917_EXTERNAL_REQSTR_ID_REGEN3, + TPS65917_EXTERNAL_REQSTR_ID_SMPS1, + TPS65917_EXTERNAL_REQSTR_ID_SMPS2, + TPS65917_EXTERNAL_REQSTR_ID_SMPS3, + TPS65917_EXTERNAL_REQSTR_ID_SMPS4, + TPS65917_EXTERNAL_REQSTR_ID_SMPS5, + TPS65917_EXTERNAL_REQSTR_ID_SMPS12, + TPS65917_EXTERNAL_REQSTR_ID_LDO1, + TPS65917_EXTERNAL_REQSTR_ID_LDO2, + TPS65917_EXTERNAL_REQSTR_ID_LDO3, + TPS65917_EXTERNAL_REQSTR_ID_LDO4, + TPS65917_EXTERNAL_REQSTR_ID_LDO5, + /* Last entry */ + TPS65917_EXTERNAL_REQSTR_ID_MAX, +}; + +struct palmas_pmic_platform_data { + /* An array of pointers to regulator init data indexed by regulator + * ID + */ + struct regulator_init_data *reg_data[PALMAS_NUM_REGS]; + + /* An array of pointers to structures containing sleep mode and DVS + * configuration for regulators indexed by ID + */ + struct palmas_reg_init *reg_init[PALMAS_NUM_REGS]; + + /* use LDO6 for vibrator control */ + int ldo6_vibrator; + + /* Enable tracking mode of LDO8 */ + bool enable_ldo8_tracking; +}; + +struct palmas_usb_platform_data { + /* Do we enable the wakeup comparator on probe */ + int wakeup; +}; + +struct palmas_resource_platform_data { + int regen1_mode_sleep; + int regen2_mode_sleep; + int sysen1_mode_sleep; + int sysen2_mode_sleep; + + /* bitfield to be loaded to NSLEEP_RES_ASSIGN */ + u8 nsleep_res; + /* bitfield to be loaded to NSLEEP_SMPS_ASSIGN */ + u8 nsleep_smps; + /* bitfield to be loaded to NSLEEP_LDO_ASSIGN1 */ + u8 nsleep_ldo1; + /* bitfield to be loaded to NSLEEP_LDO_ASSIGN2 */ + u8 nsleep_ldo2; + + /* bitfield to be loaded to ENABLE1_RES_ASSIGN */ + u8 enable1_res; + /* bitfield to be loaded to ENABLE1_SMPS_ASSIGN */ + u8 enable1_smps; + /* bitfield to be loaded to ENABLE1_LDO_ASSIGN1 */ + u8 enable1_ldo1; + /* bitfield to be loaded to ENABLE1_LDO_ASSIGN2 */ + u8 enable1_ldo2; + + /* bitfield to be loaded to ENABLE2_RES_ASSIGN */ + u8 enable2_res; + /* bitfield to be loaded to ENABLE2_SMPS_ASSIGN */ + u8 enable2_smps; + /* bitfield to be loaded to ENABLE2_LDO_ASSIGN1 */ + u8 enable2_ldo1; + /* bitfield to be loaded to ENABLE2_LDO_ASSIGN2 */ + u8 enable2_ldo2; +}; + +struct palmas_clk_platform_data { + int clk32kg_mode_sleep; + int clk32kgaudio_mode_sleep; +}; + +struct palmas_platform_data { + int irq_flags; + int gpio_base; + + /* bit value to be loaded to the POWER_CTRL register */ + u8 power_ctrl; + + /* + * boolean to select if we want to configure muxing here + * then the two value to load into the registers if true + */ + int mux_from_pdata; + u8 pad1, pad2; + bool pm_off; + + struct palmas_pmic_platform_data *pmic_pdata; + struct palmas_gpadc_platform_data *gpadc_pdata; + struct palmas_usb_platform_data *usb_pdata; + struct palmas_resource_platform_data *resource_pdata; + struct palmas_clk_platform_data *clk_pdata; +}; + +struct palmas_gpadc_calibration { + s32 gain; + s32 gain_error; + s32 offset_error; +}; + +#define PALMAS_DATASHEET_NAME(_name) "palmas-gpadc-chan-"#_name + +struct palmas_gpadc_result { + s32 raw_code; + s32 corrected_code; + s32 result; +}; + +#define PALMAS_MAX_CHANNELS 16 + +/* Define the tps65917 IRQ numbers */ +enum tps65917_irqs { + /* INT1 registers */ + TPS65917_RESERVED1, + TPS65917_PWRON_IRQ, + TPS65917_LONG_PRESS_KEY_IRQ, + TPS65917_RESERVED2, + TPS65917_PWRDOWN_IRQ, + TPS65917_HOTDIE_IRQ, + TPS65917_VSYS_MON_IRQ, + TPS65917_RESERVED3, + /* INT2 registers */ + TPS65917_RESERVED4, + TPS65917_OTP_ERROR_IRQ, + TPS65917_WDT_IRQ, + TPS65917_RESERVED5, + TPS65917_RESET_IN_IRQ, + TPS65917_FSD_IRQ, + TPS65917_SHORT_IRQ, + TPS65917_RESERVED6, + /* INT3 registers */ + TPS65917_GPADC_AUTO_0_IRQ, + TPS65917_GPADC_AUTO_1_IRQ, + TPS65917_GPADC_EOC_SW_IRQ, + TPS65917_RESREVED6, + TPS65917_RESERVED7, + TPS65917_RESERVED8, + TPS65917_RESERVED9, + TPS65917_VBUS_IRQ, + /* INT4 registers */ + TPS65917_GPIO_0_IRQ, + TPS65917_GPIO_1_IRQ, + TPS65917_GPIO_2_IRQ, + TPS65917_GPIO_3_IRQ, + TPS65917_GPIO_4_IRQ, + TPS65917_GPIO_5_IRQ, + TPS65917_GPIO_6_IRQ, + TPS65917_RESERVED10, + /* Total Number IRQs */ + TPS65917_NUM_IRQ, +}; + +/* Define the palmas IRQ numbers */ +enum palmas_irqs { + /* INT1 registers */ + PALMAS_CHARG_DET_N_VBUS_OVV_IRQ, + PALMAS_PWRON_IRQ, + PALMAS_LONG_PRESS_KEY_IRQ, + PALMAS_RPWRON_IRQ, + PALMAS_PWRDOWN_IRQ, + PALMAS_HOTDIE_IRQ, + PALMAS_VSYS_MON_IRQ, + PALMAS_VBAT_MON_IRQ, + /* INT2 registers */ + PALMAS_RTC_ALARM_IRQ, + PALMAS_RTC_TIMER_IRQ, + PALMAS_WDT_IRQ, + PALMAS_BATREMOVAL_IRQ, + PALMAS_RESET_IN_IRQ, + PALMAS_FBI_BB_IRQ, + PALMAS_SHORT_IRQ, + PALMAS_VAC_ACOK_IRQ, + /* INT3 registers */ + PALMAS_GPADC_AUTO_0_IRQ, + PALMAS_GPADC_AUTO_1_IRQ, + PALMAS_GPADC_EOC_SW_IRQ, + PALMAS_GPADC_EOC_RT_IRQ, + PALMAS_ID_OTG_IRQ, + PALMAS_ID_IRQ, + PALMAS_VBUS_OTG_IRQ, + PALMAS_VBUS_IRQ, + /* INT4 registers */ + PALMAS_GPIO_0_IRQ, + PALMAS_GPIO_1_IRQ, + PALMAS_GPIO_2_IRQ, + PALMAS_GPIO_3_IRQ, + PALMAS_GPIO_4_IRQ, + PALMAS_GPIO_5_IRQ, + PALMAS_GPIO_6_IRQ, + PALMAS_GPIO_7_IRQ, + /* Total Number IRQs */ + PALMAS_NUM_IRQ, +}; + +/* Palmas GPADC Channels */ +enum { + PALMAS_ADC_CH_IN0, + PALMAS_ADC_CH_IN1, + PALMAS_ADC_CH_IN2, + PALMAS_ADC_CH_IN3, + PALMAS_ADC_CH_IN4, + PALMAS_ADC_CH_IN5, + PALMAS_ADC_CH_IN6, + PALMAS_ADC_CH_IN7, + PALMAS_ADC_CH_IN8, + PALMAS_ADC_CH_IN9, + PALMAS_ADC_CH_IN10, + PALMAS_ADC_CH_IN11, + PALMAS_ADC_CH_IN12, + PALMAS_ADC_CH_IN13, + PALMAS_ADC_CH_IN14, + PALMAS_ADC_CH_IN15, + PALMAS_ADC_CH_MAX, +}; + +/* Palmas GPADC Channel0 Current Source */ +enum { + PALMAS_ADC_CH0_CURRENT_SRC_0, + PALMAS_ADC_CH0_CURRENT_SRC_5, + PALMAS_ADC_CH0_CURRENT_SRC_15, + PALMAS_ADC_CH0_CURRENT_SRC_20, +}; + +/* Palmas GPADC Channel3 Current Source */ +enum { + PALMAS_ADC_CH3_CURRENT_SRC_0, + PALMAS_ADC_CH3_CURRENT_SRC_10, + PALMAS_ADC_CH3_CURRENT_SRC_400, + PALMAS_ADC_CH3_CURRENT_SRC_800, +}; + +struct palmas_pmic { + struct palmas *palmas; + struct device *dev; + struct regulator_desc desc[PALMAS_NUM_REGS]; + struct regulator_dev *rdev[PALMAS_NUM_REGS]; + struct mutex mutex; + + int smps123; + int smps457; + int smps12; + + int range[PALMAS_REG_SMPS10_OUT1]; + unsigned int ramp_delay[PALMAS_REG_SMPS10_OUT1]; + unsigned int current_reg_mode[PALMAS_REG_SMPS10_OUT1]; +}; + +struct palmas_resource { + struct palmas *palmas; + struct device *dev; +}; + +struct palmas_usb { + struct palmas *palmas; + struct device *dev; + + struct extcon_dev *edev; + + int id_otg_irq; + int id_irq; + int vbus_otg_irq; + int vbus_irq; + + int gpio_id_irq; + int gpio_vbus_irq; + struct gpio_desc *id_gpiod; + struct gpio_desc *vbus_gpiod; + unsigned long sw_debounce_jiffies; + struct delayed_work wq_detectid; + + enum palmas_usb_state linkstat; + int wakeup; + bool enable_vbus_detection; + bool enable_id_detection; + bool enable_gpio_id_detection; + bool enable_gpio_vbus_detection; +}; + +#define comparator_to_palmas(x) container_of((x), struct palmas_usb, comparator) + +enum usb_irq_events { + /* Wakeup events from INT3 */ + PALMAS_USB_ID_WAKEPUP, + PALMAS_USB_VBUS_WAKEUP, + + /* ID_OTG_EVENTS */ + PALMAS_USB_ID_GND, + N_PALMAS_USB_ID_GND, + PALMAS_USB_ID_C, + N_PALMAS_USB_ID_C, + PALMAS_USB_ID_B, + N_PALMAS_USB_ID_B, + PALMAS_USB_ID_A, + N_PALMAS_USB_ID_A, + PALMAS_USB_ID_FLOAT, + N_PALMAS_USB_ID_FLOAT, + + /* VBUS_OTG_EVENTS */ + PALMAS_USB_VB_SESS_END, + N_PALMAS_USB_VB_SESS_END, + PALMAS_USB_VB_SESS_VLD, + N_PALMAS_USB_VB_SESS_VLD, + PALMAS_USB_VA_SESS_VLD, + N_PALMAS_USB_VA_SESS_VLD, + PALMAS_USB_VA_VBUS_VLD, + N_PALMAS_USB_VA_VBUS_VLD, + PALMAS_USB_VADP_SNS, + N_PALMAS_USB_VADP_SNS, + PALMAS_USB_VADP_PRB, + N_PALMAS_USB_VADP_PRB, + PALMAS_USB_VOTG_SESS_VLD, + N_PALMAS_USB_VOTG_SESS_VLD, +}; + +/* defines so we can store the mux settings */ +#define PALMAS_GPIO_0_MUXED (1 << 0) +#define PALMAS_GPIO_1_MUXED (1 << 1) +#define PALMAS_GPIO_2_MUXED (1 << 2) +#define PALMAS_GPIO_3_MUXED (1 << 3) +#define PALMAS_GPIO_4_MUXED (1 << 4) +#define PALMAS_GPIO_5_MUXED (1 << 5) +#define PALMAS_GPIO_6_MUXED (1 << 6) +#define PALMAS_GPIO_7_MUXED (1 << 7) + +#define PALMAS_LED1_MUXED (1 << 0) +#define PALMAS_LED2_MUXED (1 << 1) + +#define PALMAS_PWM1_MUXED (1 << 0) +#define PALMAS_PWM2_MUXED (1 << 1) + +/* helper macro to get correct slave number */ +#define PALMAS_BASE_TO_SLAVE(x) ((x >> 8) - 1) +#define PALMAS_BASE_TO_REG(x, y) ((x & 0xFF) + y) + +/* Base addresses of IP blocks in Palmas */ +#define PALMAS_SMPS_DVS_BASE 0x020 +#define PALMAS_RTC_BASE 0x100 +#define PALMAS_VALIDITY_BASE 0x118 +#define PALMAS_SMPS_BASE 0x120 +#define PALMAS_LDO_BASE 0x150 +#define PALMAS_DVFS_BASE 0x180 +#define PALMAS_PMU_CONTROL_BASE 0x1A0 +#define PALMAS_RESOURCE_BASE 0x1D4 +#define PALMAS_PU_PD_OD_BASE 0x1F0 +#define PALMAS_LED_BASE 0x200 +#define PALMAS_INTERRUPT_BASE 0x210 +#define PALMAS_USB_OTG_BASE 0x250 +#define PALMAS_VIBRATOR_BASE 0x270 +#define PALMAS_GPIO_BASE 0x280 +#define PALMAS_USB_BASE 0x290 +#define PALMAS_GPADC_BASE 0x2C0 +#define PALMAS_TRIM_GPADC_BASE 0x3CD + +/* Registers for function RTC */ +#define PALMAS_SECONDS_REG 0x00 +#define PALMAS_MINUTES_REG 0x01 +#define PALMAS_HOURS_REG 0x02 +#define PALMAS_DAYS_REG 0x03 +#define PALMAS_MONTHS_REG 0x04 +#define PALMAS_YEARS_REG 0x05 +#define PALMAS_WEEKS_REG 0x06 +#define PALMAS_ALARM_SECONDS_REG 0x08 +#define PALMAS_ALARM_MINUTES_REG 0x09 +#define PALMAS_ALARM_HOURS_REG 0x0A +#define PALMAS_ALARM_DAYS_REG 0x0B +#define PALMAS_ALARM_MONTHS_REG 0x0C +#define PALMAS_ALARM_YEARS_REG 0x0D +#define PALMAS_RTC_CTRL_REG 0x10 +#define PALMAS_RTC_STATUS_REG 0x11 +#define PALMAS_RTC_INTERRUPTS_REG 0x12 +#define PALMAS_RTC_COMP_LSB_REG 0x13 +#define PALMAS_RTC_COMP_MSB_REG 0x14 +#define PALMAS_RTC_RES_PROG_REG 0x15 +#define PALMAS_RTC_RESET_STATUS_REG 0x16 + +/* Bit definitions for SECONDS_REG */ +#define PALMAS_SECONDS_REG_SEC1_MASK 0x70 +#define PALMAS_SECONDS_REG_SEC1_SHIFT 0x04 +#define PALMAS_SECONDS_REG_SEC0_MASK 0x0F +#define PALMAS_SECONDS_REG_SEC0_SHIFT 0x00 + +/* Bit definitions for MINUTES_REG */ +#define PALMAS_MINUTES_REG_MIN1_MASK 0x70 +#define PALMAS_MINUTES_REG_MIN1_SHIFT 0x04 +#define PALMAS_MINUTES_REG_MIN0_MASK 0x0F +#define PALMAS_MINUTES_REG_MIN0_SHIFT 0x00 + +/* Bit definitions for HOURS_REG */ +#define PALMAS_HOURS_REG_PM_NAM 0x80 +#define PALMAS_HOURS_REG_PM_NAM_SHIFT 0x07 +#define PALMAS_HOURS_REG_HOUR1_MASK 0x30 +#define PALMAS_HOURS_REG_HOUR1_SHIFT 0x04 +#define PALMAS_HOURS_REG_HOUR0_MASK 0x0F +#define PALMAS_HOURS_REG_HOUR0_SHIFT 0x00 + +/* Bit definitions for DAYS_REG */ +#define PALMAS_DAYS_REG_DAY1_MASK 0x30 +#define PALMAS_DAYS_REG_DAY1_SHIFT 0x04 +#define PALMAS_DAYS_REG_DAY0_MASK 0x0F +#define PALMAS_DAYS_REG_DAY0_SHIFT 0x00 + +/* Bit definitions for MONTHS_REG */ +#define PALMAS_MONTHS_REG_MONTH1 0x10 +#define PALMAS_MONTHS_REG_MONTH1_SHIFT 0x04 +#define PALMAS_MONTHS_REG_MONTH0_MASK 0x0F +#define PALMAS_MONTHS_REG_MONTH0_SHIFT 0x00 + +/* Bit definitions for YEARS_REG */ +#define PALMAS_YEARS_REG_YEAR1_MASK 0xf0 +#define PALMAS_YEARS_REG_YEAR1_SHIFT 0x04 +#define PALMAS_YEARS_REG_YEAR0_MASK 0x0F +#define PALMAS_YEARS_REG_YEAR0_SHIFT 0x00 + +/* Bit definitions for WEEKS_REG */ +#define PALMAS_WEEKS_REG_WEEK_MASK 0x07 +#define PALMAS_WEEKS_REG_WEEK_SHIFT 0x00 + +/* Bit definitions for ALARM_SECONDS_REG */ +#define PALMAS_ALARM_SECONDS_REG_ALARM_SEC1_MASK 0x70 +#define PALMAS_ALARM_SECONDS_REG_ALARM_SEC1_SHIFT 0x04 +#define PALMAS_ALARM_SECONDS_REG_ALARM_SEC0_MASK 0x0F +#define PALMAS_ALARM_SECONDS_REG_ALARM_SEC0_SHIFT 0x00 + +/* Bit definitions for ALARM_MINUTES_REG */ +#define PALMAS_ALARM_MINUTES_REG_ALARM_MIN1_MASK 0x70 +#define PALMAS_ALARM_MINUTES_REG_ALARM_MIN1_SHIFT 0x04 +#define PALMAS_ALARM_MINUTES_REG_ALARM_MIN0_MASK 0x0F +#define PALMAS_ALARM_MINUTES_REG_ALARM_MIN0_SHIFT 0x00 + +/* Bit definitions for ALARM_HOURS_REG */ +#define PALMAS_ALARM_HOURS_REG_ALARM_PM_NAM 0x80 +#define PALMAS_ALARM_HOURS_REG_ALARM_PM_NAM_SHIFT 0x07 +#define PALMAS_ALARM_HOURS_REG_ALARM_HOUR1_MASK 0x30 +#define PALMAS_ALARM_HOURS_REG_ALARM_HOUR1_SHIFT 0x04 +#define PALMAS_ALARM_HOURS_REG_ALARM_HOUR0_MASK 0x0F +#define PALMAS_ALARM_HOURS_REG_ALARM_HOUR0_SHIFT 0x00 + +/* Bit definitions for ALARM_DAYS_REG */ +#define PALMAS_ALARM_DAYS_REG_ALARM_DAY1_MASK 0x30 +#define PALMAS_ALARM_DAYS_REG_ALARM_DAY1_SHIFT 0x04 +#define PALMAS_ALARM_DAYS_REG_ALARM_DAY0_MASK 0x0F +#define PALMAS_ALARM_DAYS_REG_ALARM_DAY0_SHIFT 0x00 + +/* Bit definitions for ALARM_MONTHS_REG */ +#define PALMAS_ALARM_MONTHS_REG_ALARM_MONTH1 0x10 +#define PALMAS_ALARM_MONTHS_REG_ALARM_MONTH1_SHIFT 0x04 +#define PALMAS_ALARM_MONTHS_REG_ALARM_MONTH0_MASK 0x0F +#define PALMAS_ALARM_MONTHS_REG_ALARM_MONTH0_SHIFT 0x00 + +/* Bit definitions for ALARM_YEARS_REG */ +#define PALMAS_ALARM_YEARS_REG_ALARM_YEAR1_MASK 0xf0 +#define PALMAS_ALARM_YEARS_REG_ALARM_YEAR1_SHIFT 0x04 +#define PALMAS_ALARM_YEARS_REG_ALARM_YEAR0_MASK 0x0F +#define PALMAS_ALARM_YEARS_REG_ALARM_YEAR0_SHIFT 0x00 + +/* Bit definitions for RTC_CTRL_REG */ +#define PALMAS_RTC_CTRL_REG_RTC_V_OPT 0x80 +#define PALMAS_RTC_CTRL_REG_RTC_V_OPT_SHIFT 0x07 +#define PALMAS_RTC_CTRL_REG_GET_TIME 0x40 +#define PALMAS_RTC_CTRL_REG_GET_TIME_SHIFT 0x06 +#define PALMAS_RTC_CTRL_REG_SET_32_COUNTER 0x20 +#define PALMAS_RTC_CTRL_REG_SET_32_COUNTER_SHIFT 0x05 +#define PALMAS_RTC_CTRL_REG_TEST_MODE 0x10 +#define PALMAS_RTC_CTRL_REG_TEST_MODE_SHIFT 0x04 +#define PALMAS_RTC_CTRL_REG_MODE_12_24 0x08 +#define PALMAS_RTC_CTRL_REG_MODE_12_24_SHIFT 0x03 +#define PALMAS_RTC_CTRL_REG_AUTO_COMP 0x04 +#define PALMAS_RTC_CTRL_REG_AUTO_COMP_SHIFT 0x02 +#define PALMAS_RTC_CTRL_REG_ROUND_30S 0x02 +#define PALMAS_RTC_CTRL_REG_ROUND_30S_SHIFT 0x01 +#define PALMAS_RTC_CTRL_REG_STOP_RTC 0x01 +#define PALMAS_RTC_CTRL_REG_STOP_RTC_SHIFT 0x00 + +/* Bit definitions for RTC_STATUS_REG */ +#define PALMAS_RTC_STATUS_REG_POWER_UP 0x80 +#define PALMAS_RTC_STATUS_REG_POWER_UP_SHIFT 0x07 +#define PALMAS_RTC_STATUS_REG_ALARM 0x40 +#define PALMAS_RTC_STATUS_REG_ALARM_SHIFT 0x06 +#define PALMAS_RTC_STATUS_REG_EVENT_1D 0x20 +#define PALMAS_RTC_STATUS_REG_EVENT_1D_SHIFT 0x05 +#define PALMAS_RTC_STATUS_REG_EVENT_1H 0x10 +#define PALMAS_RTC_STATUS_REG_EVENT_1H_SHIFT 0x04 +#define PALMAS_RTC_STATUS_REG_EVENT_1M 0x08 +#define PALMAS_RTC_STATUS_REG_EVENT_1M_SHIFT 0x03 +#define PALMAS_RTC_STATUS_REG_EVENT_1S 0x04 +#define PALMAS_RTC_STATUS_REG_EVENT_1S_SHIFT 0x02 +#define PALMAS_RTC_STATUS_REG_RUN 0x02 +#define PALMAS_RTC_STATUS_REG_RUN_SHIFT 0x01 + +/* Bit definitions for RTC_INTERRUPTS_REG */ +#define PALMAS_RTC_INTERRUPTS_REG_IT_SLEEP_MASK_EN 0x10 +#define PALMAS_RTC_INTERRUPTS_REG_IT_SLEEP_MASK_EN_SHIFT 0x04 +#define PALMAS_RTC_INTERRUPTS_REG_IT_ALARM 0x08 +#define PALMAS_RTC_INTERRUPTS_REG_IT_ALARM_SHIFT 0x03 +#define PALMAS_RTC_INTERRUPTS_REG_IT_TIMER 0x04 +#define PALMAS_RTC_INTERRUPTS_REG_IT_TIMER_SHIFT 0x02 +#define PALMAS_RTC_INTERRUPTS_REG_EVERY_MASK 0x03 +#define PALMAS_RTC_INTERRUPTS_REG_EVERY_SHIFT 0x00 + +/* Bit definitions for RTC_COMP_LSB_REG */ +#define PALMAS_RTC_COMP_LSB_REG_RTC_COMP_LSB_MASK 0xFF +#define PALMAS_RTC_COMP_LSB_REG_RTC_COMP_LSB_SHIFT 0x00 + +/* Bit definitions for RTC_COMP_MSB_REG */ +#define PALMAS_RTC_COMP_MSB_REG_RTC_COMP_MSB_MASK 0xFF +#define PALMAS_RTC_COMP_MSB_REG_RTC_COMP_MSB_SHIFT 0x00 + +/* Bit definitions for RTC_RES_PROG_REG */ +#define PALMAS_RTC_RES_PROG_REG_SW_RES_PROG_MASK 0x3F +#define PALMAS_RTC_RES_PROG_REG_SW_RES_PROG_SHIFT 0x00 + +/* Bit definitions for RTC_RESET_STATUS_REG */ +#define PALMAS_RTC_RESET_STATUS_REG_RESET_STATUS 0x01 +#define PALMAS_RTC_RESET_STATUS_REG_RESET_STATUS_SHIFT 0x00 + +/* Registers for function BACKUP */ +#define PALMAS_BACKUP0 0x00 +#define PALMAS_BACKUP1 0x01 +#define PALMAS_BACKUP2 0x02 +#define PALMAS_BACKUP3 0x03 +#define PALMAS_BACKUP4 0x04 +#define PALMAS_BACKUP5 0x05 +#define PALMAS_BACKUP6 0x06 +#define PALMAS_BACKUP7 0x07 + +/* Bit definitions for BACKUP0 */ +#define PALMAS_BACKUP0_BACKUP_MASK 0xFF +#define PALMAS_BACKUP0_BACKUP_SHIFT 0x00 + +/* Bit definitions for BACKUP1 */ +#define PALMAS_BACKUP1_BACKUP_MASK 0xFF +#define PALMAS_BACKUP1_BACKUP_SHIFT 0x00 + +/* Bit definitions for BACKUP2 */ +#define PALMAS_BACKUP2_BACKUP_MASK 0xFF +#define PALMAS_BACKUP2_BACKUP_SHIFT 0x00 + +/* Bit definitions for BACKUP3 */ +#define PALMAS_BACKUP3_BACKUP_MASK 0xFF +#define PALMAS_BACKUP3_BACKUP_SHIFT 0x00 + +/* Bit definitions for BACKUP4 */ +#define PALMAS_BACKUP4_BACKUP_MASK 0xFF +#define PALMAS_BACKUP4_BACKUP_SHIFT 0x00 + +/* Bit definitions for BACKUP5 */ +#define PALMAS_BACKUP5_BACKUP_MASK 0xFF +#define PALMAS_BACKUP5_BACKUP_SHIFT 0x00 + +/* Bit definitions for BACKUP6 */ +#define PALMAS_BACKUP6_BACKUP_MASK 0xFF +#define PALMAS_BACKUP6_BACKUP_SHIFT 0x00 + +/* Bit definitions for BACKUP7 */ +#define PALMAS_BACKUP7_BACKUP_MASK 0xFF +#define PALMAS_BACKUP7_BACKUP_SHIFT 0x00 + +/* Registers for function SMPS */ +#define PALMAS_SMPS12_CTRL 0x00 +#define PALMAS_SMPS12_TSTEP 0x01 +#define PALMAS_SMPS12_FORCE 0x02 +#define PALMAS_SMPS12_VOLTAGE 0x03 +#define PALMAS_SMPS3_CTRL 0x04 +#define PALMAS_SMPS3_VOLTAGE 0x07 +#define PALMAS_SMPS45_CTRL 0x08 +#define PALMAS_SMPS45_TSTEP 0x09 +#define PALMAS_SMPS45_FORCE 0x0A +#define PALMAS_SMPS45_VOLTAGE 0x0B +#define PALMAS_SMPS6_CTRL 0x0C +#define PALMAS_SMPS6_TSTEP 0x0D +#define PALMAS_SMPS6_FORCE 0x0E +#define PALMAS_SMPS6_VOLTAGE 0x0F +#define PALMAS_SMPS7_CTRL 0x10 +#define PALMAS_SMPS7_VOLTAGE 0x13 +#define PALMAS_SMPS8_CTRL 0x14 +#define PALMAS_SMPS8_TSTEP 0x15 +#define PALMAS_SMPS8_FORCE 0x16 +#define PALMAS_SMPS8_VOLTAGE 0x17 +#define PALMAS_SMPS9_CTRL 0x18 +#define PALMAS_SMPS9_VOLTAGE 0x1B +#define PALMAS_SMPS10_CTRL 0x1C +#define PALMAS_SMPS10_STATUS 0x1F +#define PALMAS_SMPS_CTRL 0x24 +#define PALMAS_SMPS_PD_CTRL 0x25 +#define PALMAS_SMPS_DITHER_EN 0x26 +#define PALMAS_SMPS_THERMAL_EN 0x27 +#define PALMAS_SMPS_THERMAL_STATUS 0x28 +#define PALMAS_SMPS_SHORT_STATUS 0x29 +#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN 0x2A +#define PALMAS_SMPS_POWERGOOD_MASK1 0x2B +#define PALMAS_SMPS_POWERGOOD_MASK2 0x2C + +/* Bit definitions for SMPS12_CTRL */ +#define PALMAS_SMPS12_CTRL_WR_S 0x80 +#define PALMAS_SMPS12_CTRL_WR_S_SHIFT 0x07 +#define PALMAS_SMPS12_CTRL_ROOF_FLOOR_EN 0x40 +#define PALMAS_SMPS12_CTRL_ROOF_FLOOR_EN_SHIFT 0x06 +#define PALMAS_SMPS12_CTRL_STATUS_MASK 0x30 +#define PALMAS_SMPS12_CTRL_STATUS_SHIFT 0x04 +#define PALMAS_SMPS12_CTRL_MODE_SLEEP_MASK 0x0c +#define PALMAS_SMPS12_CTRL_MODE_SLEEP_SHIFT 0x02 +#define PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK 0x03 +#define PALMAS_SMPS12_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for SMPS12_TSTEP */ +#define PALMAS_SMPS12_TSTEP_TSTEP_MASK 0x03 +#define PALMAS_SMPS12_TSTEP_TSTEP_SHIFT 0x00 + +/* Bit definitions for SMPS12_FORCE */ +#define PALMAS_SMPS12_FORCE_CMD 0x80 +#define PALMAS_SMPS12_FORCE_CMD_SHIFT 0x07 +#define PALMAS_SMPS12_FORCE_VSEL_MASK 0x7F +#define PALMAS_SMPS12_FORCE_VSEL_SHIFT 0x00 + +/* Bit definitions for SMPS12_VOLTAGE */ +#define PALMAS_SMPS12_VOLTAGE_RANGE 0x80 +#define PALMAS_SMPS12_VOLTAGE_RANGE_SHIFT 0x07 +#define PALMAS_SMPS12_VOLTAGE_VSEL_MASK 0x7F +#define PALMAS_SMPS12_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for SMPS3_CTRL */ +#define PALMAS_SMPS3_CTRL_WR_S 0x80 +#define PALMAS_SMPS3_CTRL_WR_S_SHIFT 0x07 +#define PALMAS_SMPS3_CTRL_STATUS_MASK 0x30 +#define PALMAS_SMPS3_CTRL_STATUS_SHIFT 0x04 +#define PALMAS_SMPS3_CTRL_MODE_SLEEP_MASK 0x0c +#define PALMAS_SMPS3_CTRL_MODE_SLEEP_SHIFT 0x02 +#define PALMAS_SMPS3_CTRL_MODE_ACTIVE_MASK 0x03 +#define PALMAS_SMPS3_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for SMPS3_VOLTAGE */ +#define PALMAS_SMPS3_VOLTAGE_RANGE 0x80 +#define PALMAS_SMPS3_VOLTAGE_RANGE_SHIFT 0x07 +#define PALMAS_SMPS3_VOLTAGE_VSEL_MASK 0x7F +#define PALMAS_SMPS3_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for SMPS45_CTRL */ +#define PALMAS_SMPS45_CTRL_WR_S 0x80 +#define PALMAS_SMPS45_CTRL_WR_S_SHIFT 0x07 +#define PALMAS_SMPS45_CTRL_ROOF_FLOOR_EN 0x40 +#define PALMAS_SMPS45_CTRL_ROOF_FLOOR_EN_SHIFT 0x06 +#define PALMAS_SMPS45_CTRL_STATUS_MASK 0x30 +#define PALMAS_SMPS45_CTRL_STATUS_SHIFT 0x04 +#define PALMAS_SMPS45_CTRL_MODE_SLEEP_MASK 0x0c +#define PALMAS_SMPS45_CTRL_MODE_SLEEP_SHIFT 0x02 +#define PALMAS_SMPS45_CTRL_MODE_ACTIVE_MASK 0x03 +#define PALMAS_SMPS45_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for SMPS45_TSTEP */ +#define PALMAS_SMPS45_TSTEP_TSTEP_MASK 0x03 +#define PALMAS_SMPS45_TSTEP_TSTEP_SHIFT 0x00 + +/* Bit definitions for SMPS45_FORCE */ +#define PALMAS_SMPS45_FORCE_CMD 0x80 +#define PALMAS_SMPS45_FORCE_CMD_SHIFT 0x07 +#define PALMAS_SMPS45_FORCE_VSEL_MASK 0x7F +#define PALMAS_SMPS45_FORCE_VSEL_SHIFT 0x00 + +/* Bit definitions for SMPS45_VOLTAGE */ +#define PALMAS_SMPS45_VOLTAGE_RANGE 0x80 +#define PALMAS_SMPS45_VOLTAGE_RANGE_SHIFT 0x07 +#define PALMAS_SMPS45_VOLTAGE_VSEL_MASK 0x7F +#define PALMAS_SMPS45_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for SMPS6_CTRL */ +#define PALMAS_SMPS6_CTRL_WR_S 0x80 +#define PALMAS_SMPS6_CTRL_WR_S_SHIFT 0x07 +#define PALMAS_SMPS6_CTRL_ROOF_FLOOR_EN 0x40 +#define PALMAS_SMPS6_CTRL_ROOF_FLOOR_EN_SHIFT 0x06 +#define PALMAS_SMPS6_CTRL_STATUS_MASK 0x30 +#define PALMAS_SMPS6_CTRL_STATUS_SHIFT 0x04 +#define PALMAS_SMPS6_CTRL_MODE_SLEEP_MASK 0x0c +#define PALMAS_SMPS6_CTRL_MODE_SLEEP_SHIFT 0x02 +#define PALMAS_SMPS6_CTRL_MODE_ACTIVE_MASK 0x03 +#define PALMAS_SMPS6_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for SMPS6_TSTEP */ +#define PALMAS_SMPS6_TSTEP_TSTEP_MASK 0x03 +#define PALMAS_SMPS6_TSTEP_TSTEP_SHIFT 0x00 + +/* Bit definitions for SMPS6_FORCE */ +#define PALMAS_SMPS6_FORCE_CMD 0x80 +#define PALMAS_SMPS6_FORCE_CMD_SHIFT 0x07 +#define PALMAS_SMPS6_FORCE_VSEL_MASK 0x7F +#define PALMAS_SMPS6_FORCE_VSEL_SHIFT 0x00 + +/* Bit definitions for SMPS6_VOLTAGE */ +#define PALMAS_SMPS6_VOLTAGE_RANGE 0x80 +#define PALMAS_SMPS6_VOLTAGE_RANGE_SHIFT 0x07 +#define PALMAS_SMPS6_VOLTAGE_VSEL_MASK 0x7F +#define PALMAS_SMPS6_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for SMPS7_CTRL */ +#define PALMAS_SMPS7_CTRL_WR_S 0x80 +#define PALMAS_SMPS7_CTRL_WR_S_SHIFT 0x07 +#define PALMAS_SMPS7_CTRL_STATUS_MASK 0x30 +#define PALMAS_SMPS7_CTRL_STATUS_SHIFT 0x04 +#define PALMAS_SMPS7_CTRL_MODE_SLEEP_MASK 0x0c +#define PALMAS_SMPS7_CTRL_MODE_SLEEP_SHIFT 0x02 +#define PALMAS_SMPS7_CTRL_MODE_ACTIVE_MASK 0x03 +#define PALMAS_SMPS7_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for SMPS7_VOLTAGE */ +#define PALMAS_SMPS7_VOLTAGE_RANGE 0x80 +#define PALMAS_SMPS7_VOLTAGE_RANGE_SHIFT 0x07 +#define PALMAS_SMPS7_VOLTAGE_VSEL_MASK 0x7F +#define PALMAS_SMPS7_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for SMPS8_CTRL */ +#define PALMAS_SMPS8_CTRL_WR_S 0x80 +#define PALMAS_SMPS8_CTRL_WR_S_SHIFT 0x07 +#define PALMAS_SMPS8_CTRL_ROOF_FLOOR_EN 0x40 +#define PALMAS_SMPS8_CTRL_ROOF_FLOOR_EN_SHIFT 0x06 +#define PALMAS_SMPS8_CTRL_STATUS_MASK 0x30 +#define PALMAS_SMPS8_CTRL_STATUS_SHIFT 0x04 +#define PALMAS_SMPS8_CTRL_MODE_SLEEP_MASK 0x0c +#define PALMAS_SMPS8_CTRL_MODE_SLEEP_SHIFT 0x02 +#define PALMAS_SMPS8_CTRL_MODE_ACTIVE_MASK 0x03 +#define PALMAS_SMPS8_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for SMPS8_TSTEP */ +#define PALMAS_SMPS8_TSTEP_TSTEP_MASK 0x03 +#define PALMAS_SMPS8_TSTEP_TSTEP_SHIFT 0x00 + +/* Bit definitions for SMPS8_FORCE */ +#define PALMAS_SMPS8_FORCE_CMD 0x80 +#define PALMAS_SMPS8_FORCE_CMD_SHIFT 0x07 +#define PALMAS_SMPS8_FORCE_VSEL_MASK 0x7F +#define PALMAS_SMPS8_FORCE_VSEL_SHIFT 0x00 + +/* Bit definitions for SMPS8_VOLTAGE */ +#define PALMAS_SMPS8_VOLTAGE_RANGE 0x80 +#define PALMAS_SMPS8_VOLTAGE_RANGE_SHIFT 0x07 +#define PALMAS_SMPS8_VOLTAGE_VSEL_MASK 0x7F +#define PALMAS_SMPS8_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for SMPS9_CTRL */ +#define PALMAS_SMPS9_CTRL_WR_S 0x80 +#define PALMAS_SMPS9_CTRL_WR_S_SHIFT 0x07 +#define PALMAS_SMPS9_CTRL_STATUS_MASK 0x30 +#define PALMAS_SMPS9_CTRL_STATUS_SHIFT 0x04 +#define PALMAS_SMPS9_CTRL_MODE_SLEEP_MASK 0x0c +#define PALMAS_SMPS9_CTRL_MODE_SLEEP_SHIFT 0x02 +#define PALMAS_SMPS9_CTRL_MODE_ACTIVE_MASK 0x03 +#define PALMAS_SMPS9_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for SMPS9_VOLTAGE */ +#define PALMAS_SMPS9_VOLTAGE_RANGE 0x80 +#define PALMAS_SMPS9_VOLTAGE_RANGE_SHIFT 0x07 +#define PALMAS_SMPS9_VOLTAGE_VSEL_MASK 0x7F +#define PALMAS_SMPS9_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for SMPS10_CTRL */ +#define PALMAS_SMPS10_CTRL_MODE_SLEEP_MASK 0xf0 +#define PALMAS_SMPS10_CTRL_MODE_SLEEP_SHIFT 0x04 +#define PALMAS_SMPS10_CTRL_MODE_ACTIVE_MASK 0x0F +#define PALMAS_SMPS10_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for SMPS10_STATUS */ +#define PALMAS_SMPS10_STATUS_STATUS_MASK 0x0F +#define PALMAS_SMPS10_STATUS_STATUS_SHIFT 0x00 + +/* Bit definitions for SMPS_CTRL */ +#define PALMAS_SMPS_CTRL_SMPS45_SMPS457_EN 0x20 +#define PALMAS_SMPS_CTRL_SMPS45_SMPS457_EN_SHIFT 0x05 +#define PALMAS_SMPS_CTRL_SMPS12_SMPS123_EN 0x10 +#define PALMAS_SMPS_CTRL_SMPS12_SMPS123_EN_SHIFT 0x04 +#define PALMAS_SMPS_CTRL_SMPS45_PHASE_CTRL_MASK 0x0c +#define PALMAS_SMPS_CTRL_SMPS45_PHASE_CTRL_SHIFT 0x02 +#define PALMAS_SMPS_CTRL_SMPS123_PHASE_CTRL_MASK 0x03 +#define PALMAS_SMPS_CTRL_SMPS123_PHASE_CTRL_SHIFT 0x00 + +/* Bit definitions for SMPS_PD_CTRL */ +#define PALMAS_SMPS_PD_CTRL_SMPS9 0x40 +#define PALMAS_SMPS_PD_CTRL_SMPS9_SHIFT 0x06 +#define PALMAS_SMPS_PD_CTRL_SMPS8 0x20 +#define PALMAS_SMPS_PD_CTRL_SMPS8_SHIFT 0x05 +#define PALMAS_SMPS_PD_CTRL_SMPS7 0x10 +#define PALMAS_SMPS_PD_CTRL_SMPS7_SHIFT 0x04 +#define PALMAS_SMPS_PD_CTRL_SMPS6 0x08 +#define PALMAS_SMPS_PD_CTRL_SMPS6_SHIFT 0x03 +#define PALMAS_SMPS_PD_CTRL_SMPS45 0x04 +#define PALMAS_SMPS_PD_CTRL_SMPS45_SHIFT 0x02 +#define PALMAS_SMPS_PD_CTRL_SMPS3 0x02 +#define PALMAS_SMPS_PD_CTRL_SMPS3_SHIFT 0x01 +#define PALMAS_SMPS_PD_CTRL_SMPS12 0x01 +#define PALMAS_SMPS_PD_CTRL_SMPS12_SHIFT 0x00 + +/* Bit definitions for SMPS_THERMAL_EN */ +#define PALMAS_SMPS_THERMAL_EN_SMPS9 0x40 +#define PALMAS_SMPS_THERMAL_EN_SMPS9_SHIFT 0x06 +#define PALMAS_SMPS_THERMAL_EN_SMPS8 0x20 +#define PALMAS_SMPS_THERMAL_EN_SMPS8_SHIFT 0x05 +#define PALMAS_SMPS_THERMAL_EN_SMPS6 0x08 +#define PALMAS_SMPS_THERMAL_EN_SMPS6_SHIFT 0x03 +#define PALMAS_SMPS_THERMAL_EN_SMPS457 0x04 +#define PALMAS_SMPS_THERMAL_EN_SMPS457_SHIFT 0x02 +#define PALMAS_SMPS_THERMAL_EN_SMPS123 0x01 +#define PALMAS_SMPS_THERMAL_EN_SMPS123_SHIFT 0x00 + +/* Bit definitions for SMPS_THERMAL_STATUS */ +#define PALMAS_SMPS_THERMAL_STATUS_SMPS9 0x40 +#define PALMAS_SMPS_THERMAL_STATUS_SMPS9_SHIFT 0x06 +#define PALMAS_SMPS_THERMAL_STATUS_SMPS8 0x20 +#define PALMAS_SMPS_THERMAL_STATUS_SMPS8_SHIFT 0x05 +#define PALMAS_SMPS_THERMAL_STATUS_SMPS6 0x08 +#define PALMAS_SMPS_THERMAL_STATUS_SMPS6_SHIFT 0x03 +#define PALMAS_SMPS_THERMAL_STATUS_SMPS457 0x04 +#define PALMAS_SMPS_THERMAL_STATUS_SMPS457_SHIFT 0x02 +#define PALMAS_SMPS_THERMAL_STATUS_SMPS123 0x01 +#define PALMAS_SMPS_THERMAL_STATUS_SMPS123_SHIFT 0x00 + +/* Bit definitions for SMPS_SHORT_STATUS */ +#define PALMAS_SMPS_SHORT_STATUS_SMPS10 0x80 +#define PALMAS_SMPS_SHORT_STATUS_SMPS10_SHIFT 0x07 +#define PALMAS_SMPS_SHORT_STATUS_SMPS9 0x40 +#define PALMAS_SMPS_SHORT_STATUS_SMPS9_SHIFT 0x06 +#define PALMAS_SMPS_SHORT_STATUS_SMPS8 0x20 +#define PALMAS_SMPS_SHORT_STATUS_SMPS8_SHIFT 0x05 +#define PALMAS_SMPS_SHORT_STATUS_SMPS7 0x10 +#define PALMAS_SMPS_SHORT_STATUS_SMPS7_SHIFT 0x04 +#define PALMAS_SMPS_SHORT_STATUS_SMPS6 0x08 +#define PALMAS_SMPS_SHORT_STATUS_SMPS6_SHIFT 0x03 +#define PALMAS_SMPS_SHORT_STATUS_SMPS45 0x04 +#define PALMAS_SMPS_SHORT_STATUS_SMPS45_SHIFT 0x02 +#define PALMAS_SMPS_SHORT_STATUS_SMPS3 0x02 +#define PALMAS_SMPS_SHORT_STATUS_SMPS3_SHIFT 0x01 +#define PALMAS_SMPS_SHORT_STATUS_SMPS12 0x01 +#define PALMAS_SMPS_SHORT_STATUS_SMPS12_SHIFT 0x00 + +/* Bit definitions for SMPS_NEGATIVE_CURRENT_LIMIT_EN */ +#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS9 0x40 +#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS9_SHIFT 0x06 +#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS8 0x20 +#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS8_SHIFT 0x05 +#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS7 0x10 +#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS7_SHIFT 0x04 +#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS6 0x08 +#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS6_SHIFT 0x03 +#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS45 0x04 +#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS45_SHIFT 0x02 +#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS3 0x02 +#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS3_SHIFT 0x01 +#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS12 0x01 +#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS12_SHIFT 0x00 + +/* Bit definitions for SMPS_POWERGOOD_MASK1 */ +#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS10 0x80 +#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS10_SHIFT 0x07 +#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS9 0x40 +#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS9_SHIFT 0x06 +#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS8 0x20 +#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS8_SHIFT 0x05 +#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS7 0x10 +#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS7_SHIFT 0x04 +#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS6 0x08 +#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS6_SHIFT 0x03 +#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS45 0x04 +#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS45_SHIFT 0x02 +#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS3 0x02 +#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS3_SHIFT 0x01 +#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS12 0x01 +#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS12_SHIFT 0x00 + +/* Bit definitions for SMPS_POWERGOOD_MASK2 */ +#define PALMAS_SMPS_POWERGOOD_MASK2_POWERGOOD_TYPE_SELECT 0x80 +#define PALMAS_SMPS_POWERGOOD_MASK2_POWERGOOD_TYPE_SELECT_SHIFT 0x07 +#define PALMAS_SMPS_POWERGOOD_MASK2_GPIO_7 0x04 +#define PALMAS_SMPS_POWERGOOD_MASK2_GPIO_7_SHIFT 0x02 +#define PALMAS_SMPS_POWERGOOD_MASK2_VBUS 0x02 +#define PALMAS_SMPS_POWERGOOD_MASK2_VBUS_SHIFT 0x01 +#define PALMAS_SMPS_POWERGOOD_MASK2_ACOK 0x01 +#define PALMAS_SMPS_POWERGOOD_MASK2_ACOK_SHIFT 0x00 + +/* Registers for function LDO */ +#define PALMAS_LDO1_CTRL 0x00 +#define PALMAS_LDO1_VOLTAGE 0x01 +#define PALMAS_LDO2_CTRL 0x02 +#define PALMAS_LDO2_VOLTAGE 0x03 +#define PALMAS_LDO3_CTRL 0x04 +#define PALMAS_LDO3_VOLTAGE 0x05 +#define PALMAS_LDO4_CTRL 0x06 +#define PALMAS_LDO4_VOLTAGE 0x07 +#define PALMAS_LDO5_CTRL 0x08 +#define PALMAS_LDO5_VOLTAGE 0x09 +#define PALMAS_LDO6_CTRL 0x0A +#define PALMAS_LDO6_VOLTAGE 0x0B +#define PALMAS_LDO7_CTRL 0x0C +#define PALMAS_LDO7_VOLTAGE 0x0D +#define PALMAS_LDO8_CTRL 0x0E +#define PALMAS_LDO8_VOLTAGE 0x0F +#define PALMAS_LDO9_CTRL 0x10 +#define PALMAS_LDO9_VOLTAGE 0x11 +#define PALMAS_LDOLN_CTRL 0x12 +#define PALMAS_LDOLN_VOLTAGE 0x13 +#define PALMAS_LDOUSB_CTRL 0x14 +#define PALMAS_LDOUSB_VOLTAGE 0x15 +#define PALMAS_LDO_CTRL 0x1A +#define PALMAS_LDO_PD_CTRL1 0x1B +#define PALMAS_LDO_PD_CTRL2 0x1C +#define PALMAS_LDO_SHORT_STATUS1 0x1D +#define PALMAS_LDO_SHORT_STATUS2 0x1E + +/* Bit definitions for LDO1_CTRL */ +#define PALMAS_LDO1_CTRL_WR_S 0x80 +#define PALMAS_LDO1_CTRL_WR_S_SHIFT 0x07 +#define PALMAS_LDO1_CTRL_STATUS 0x10 +#define PALMAS_LDO1_CTRL_STATUS_SHIFT 0x04 +#define PALMAS_LDO1_CTRL_MODE_SLEEP 0x04 +#define PALMAS_LDO1_CTRL_MODE_SLEEP_SHIFT 0x02 +#define PALMAS_LDO1_CTRL_MODE_ACTIVE 0x01 +#define PALMAS_LDO1_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for LDO1_VOLTAGE */ +#define PALMAS_LDO1_VOLTAGE_VSEL_MASK 0x3F +#define PALMAS_LDO1_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for LDO2_CTRL */ +#define PALMAS_LDO2_CTRL_WR_S 0x80 +#define PALMAS_LDO2_CTRL_WR_S_SHIFT 0x07 +#define PALMAS_LDO2_CTRL_STATUS 0x10 +#define PALMAS_LDO2_CTRL_STATUS_SHIFT 0x04 +#define PALMAS_LDO2_CTRL_MODE_SLEEP 0x04 +#define PALMAS_LDO2_CTRL_MODE_SLEEP_SHIFT 0x02 +#define PALMAS_LDO2_CTRL_MODE_ACTIVE 0x01 +#define PALMAS_LDO2_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for LDO2_VOLTAGE */ +#define PALMAS_LDO2_VOLTAGE_VSEL_MASK 0x3F +#define PALMAS_LDO2_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for LDO3_CTRL */ +#define PALMAS_LDO3_CTRL_WR_S 0x80 +#define PALMAS_LDO3_CTRL_WR_S_SHIFT 0x07 +#define PALMAS_LDO3_CTRL_STATUS 0x10 +#define PALMAS_LDO3_CTRL_STATUS_SHIFT 0x04 +#define PALMAS_LDO3_CTRL_MODE_SLEEP 0x04 +#define PALMAS_LDO3_CTRL_MODE_SLEEP_SHIFT 0x02 +#define PALMAS_LDO3_CTRL_MODE_ACTIVE 0x01 +#define PALMAS_LDO3_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for LDO3_VOLTAGE */ +#define PALMAS_LDO3_VOLTAGE_VSEL_MASK 0x3F +#define PALMAS_LDO3_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for LDO4_CTRL */ +#define PALMAS_LDO4_CTRL_WR_S 0x80 +#define PALMAS_LDO4_CTRL_WR_S_SHIFT 0x07 +#define PALMAS_LDO4_CTRL_STATUS 0x10 +#define PALMAS_LDO4_CTRL_STATUS_SHIFT 0x04 +#define PALMAS_LDO4_CTRL_MODE_SLEEP 0x04 +#define PALMAS_LDO4_CTRL_MODE_SLEEP_SHIFT 0x02 +#define PALMAS_LDO4_CTRL_MODE_ACTIVE 0x01 +#define PALMAS_LDO4_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for LDO4_VOLTAGE */ +#define PALMAS_LDO4_VOLTAGE_VSEL_MASK 0x3F +#define PALMAS_LDO4_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for LDO5_CTRL */ +#define PALMAS_LDO5_CTRL_WR_S 0x80 +#define PALMAS_LDO5_CTRL_WR_S_SHIFT 0x07 +#define PALMAS_LDO5_CTRL_STATUS 0x10 +#define PALMAS_LDO5_CTRL_STATUS_SHIFT 0x04 +#define PALMAS_LDO5_CTRL_MODE_SLEEP 0x04 +#define PALMAS_LDO5_CTRL_MODE_SLEEP_SHIFT 0x02 +#define PALMAS_LDO5_CTRL_MODE_ACTIVE 0x01 +#define PALMAS_LDO5_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for LDO5_VOLTAGE */ +#define PALMAS_LDO5_VOLTAGE_VSEL_MASK 0x3F +#define PALMAS_LDO5_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for LDO6_CTRL */ +#define PALMAS_LDO6_CTRL_WR_S 0x80 +#define PALMAS_LDO6_CTRL_WR_S_SHIFT 0x07 +#define PALMAS_LDO6_CTRL_LDO_VIB_EN 0x40 +#define PALMAS_LDO6_CTRL_LDO_VIB_EN_SHIFT 0x06 +#define PALMAS_LDO6_CTRL_STATUS 0x10 +#define PALMAS_LDO6_CTRL_STATUS_SHIFT 0x04 +#define PALMAS_LDO6_CTRL_MODE_SLEEP 0x04 +#define PALMAS_LDO6_CTRL_MODE_SLEEP_SHIFT 0x02 +#define PALMAS_LDO6_CTRL_MODE_ACTIVE 0x01 +#define PALMAS_LDO6_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for LDO6_VOLTAGE */ +#define PALMAS_LDO6_VOLTAGE_VSEL_MASK 0x3F +#define PALMAS_LDO6_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for LDO7_CTRL */ +#define PALMAS_LDO7_CTRL_WR_S 0x80 +#define PALMAS_LDO7_CTRL_WR_S_SHIFT 0x07 +#define PALMAS_LDO7_CTRL_STATUS 0x10 +#define PALMAS_LDO7_CTRL_STATUS_SHIFT 0x04 +#define PALMAS_LDO7_CTRL_MODE_SLEEP 0x04 +#define PALMAS_LDO7_CTRL_MODE_SLEEP_SHIFT 0x02 +#define PALMAS_LDO7_CTRL_MODE_ACTIVE 0x01 +#define PALMAS_LDO7_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for LDO7_VOLTAGE */ +#define PALMAS_LDO7_VOLTAGE_VSEL_MASK 0x3F +#define PALMAS_LDO7_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for LDO8_CTRL */ +#define PALMAS_LDO8_CTRL_WR_S 0x80 +#define PALMAS_LDO8_CTRL_WR_S_SHIFT 0x07 +#define PALMAS_LDO8_CTRL_LDO_TRACKING_EN 0x40 +#define PALMAS_LDO8_CTRL_LDO_TRACKING_EN_SHIFT 0x06 +#define PALMAS_LDO8_CTRL_STATUS 0x10 +#define PALMAS_LDO8_CTRL_STATUS_SHIFT 0x04 +#define PALMAS_LDO8_CTRL_MODE_SLEEP 0x04 +#define PALMAS_LDO8_CTRL_MODE_SLEEP_SHIFT 0x02 +#define PALMAS_LDO8_CTRL_MODE_ACTIVE 0x01 +#define PALMAS_LDO8_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for LDO8_VOLTAGE */ +#define PALMAS_LDO8_VOLTAGE_VSEL_MASK 0x3F +#define PALMAS_LDO8_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for LDO9_CTRL */ +#define PALMAS_LDO9_CTRL_WR_S 0x80 +#define PALMAS_LDO9_CTRL_WR_S_SHIFT 0x07 +#define PALMAS_LDO9_CTRL_LDO_BYPASS_EN 0x40 +#define PALMAS_LDO9_CTRL_LDO_BYPASS_EN_SHIFT 0x06 +#define PALMAS_LDO9_CTRL_STATUS 0x10 +#define PALMAS_LDO9_CTRL_STATUS_SHIFT 0x04 +#define PALMAS_LDO9_CTRL_MODE_SLEEP 0x04 +#define PALMAS_LDO9_CTRL_MODE_SLEEP_SHIFT 0x02 +#define PALMAS_LDO9_CTRL_MODE_ACTIVE 0x01 +#define PALMAS_LDO9_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for LDO9_VOLTAGE */ +#define PALMAS_LDO9_VOLTAGE_VSEL_MASK 0x3F +#define PALMAS_LDO9_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for LDOLN_CTRL */ +#define PALMAS_LDOLN_CTRL_WR_S 0x80 +#define PALMAS_LDOLN_CTRL_WR_S_SHIFT 0x07 +#define PALMAS_LDOLN_CTRL_STATUS 0x10 +#define PALMAS_LDOLN_CTRL_STATUS_SHIFT 0x04 +#define PALMAS_LDOLN_CTRL_MODE_SLEEP 0x04 +#define PALMAS_LDOLN_CTRL_MODE_SLEEP_SHIFT 0x02 +#define PALMAS_LDOLN_CTRL_MODE_ACTIVE 0x01 +#define PALMAS_LDOLN_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for LDOLN_VOLTAGE */ +#define PALMAS_LDOLN_VOLTAGE_VSEL_MASK 0x3F +#define PALMAS_LDOLN_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for LDOUSB_CTRL */ +#define PALMAS_LDOUSB_CTRL_WR_S 0x80 +#define PALMAS_LDOUSB_CTRL_WR_S_SHIFT 0x07 +#define PALMAS_LDOUSB_CTRL_STATUS 0x10 +#define PALMAS_LDOUSB_CTRL_STATUS_SHIFT 0x04 +#define PALMAS_LDOUSB_CTRL_MODE_SLEEP 0x04 +#define PALMAS_LDOUSB_CTRL_MODE_SLEEP_SHIFT 0x02 +#define PALMAS_LDOUSB_CTRL_MODE_ACTIVE 0x01 +#define PALMAS_LDOUSB_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for LDOUSB_VOLTAGE */ +#define PALMAS_LDOUSB_VOLTAGE_VSEL_MASK 0x3F +#define PALMAS_LDOUSB_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for LDO_CTRL */ +#define PALMAS_LDO_CTRL_LDOUSB_ON_VBUS_VSYS 0x01 +#define PALMAS_LDO_CTRL_LDOUSB_ON_VBUS_VSYS_SHIFT 0x00 + +/* Bit definitions for LDO_PD_CTRL1 */ +#define PALMAS_LDO_PD_CTRL1_LDO8 0x80 +#define PALMAS_LDO_PD_CTRL1_LDO8_SHIFT 0x07 +#define PALMAS_LDO_PD_CTRL1_LDO7 0x40 +#define PALMAS_LDO_PD_CTRL1_LDO7_SHIFT 0x06 +#define PALMAS_LDO_PD_CTRL1_LDO6 0x20 +#define PALMAS_LDO_PD_CTRL1_LDO6_SHIFT 0x05 +#define PALMAS_LDO_PD_CTRL1_LDO5 0x10 +#define PALMAS_LDO_PD_CTRL1_LDO5_SHIFT 0x04 +#define PALMAS_LDO_PD_CTRL1_LDO4 0x08 +#define PALMAS_LDO_PD_CTRL1_LDO4_SHIFT 0x03 +#define PALMAS_LDO_PD_CTRL1_LDO3 0x04 +#define PALMAS_LDO_PD_CTRL1_LDO3_SHIFT 0x02 +#define PALMAS_LDO_PD_CTRL1_LDO2 0x02 +#define PALMAS_LDO_PD_CTRL1_LDO2_SHIFT 0x01 +#define PALMAS_LDO_PD_CTRL1_LDO1 0x01 +#define PALMAS_LDO_PD_CTRL1_LDO1_SHIFT 0x00 + +/* Bit definitions for LDO_PD_CTRL2 */ +#define PALMAS_LDO_PD_CTRL2_LDOUSB 0x04 +#define PALMAS_LDO_PD_CTRL2_LDOUSB_SHIFT 0x02 +#define PALMAS_LDO_PD_CTRL2_LDOLN 0x02 +#define PALMAS_LDO_PD_CTRL2_LDOLN_SHIFT 0x01 +#define PALMAS_LDO_PD_CTRL2_LDO9 0x01 +#define PALMAS_LDO_PD_CTRL2_LDO9_SHIFT 0x00 + +/* Bit definitions for LDO_SHORT_STATUS1 */ +#define PALMAS_LDO_SHORT_STATUS1_LDO8 0x80 +#define PALMAS_LDO_SHORT_STATUS1_LDO8_SHIFT 0x07 +#define PALMAS_LDO_SHORT_STATUS1_LDO7 0x40 +#define PALMAS_LDO_SHORT_STATUS1_LDO7_SHIFT 0x06 +#define PALMAS_LDO_SHORT_STATUS1_LDO6 0x20 +#define PALMAS_LDO_SHORT_STATUS1_LDO6_SHIFT 0x05 +#define PALMAS_LDO_SHORT_STATUS1_LDO5 0x10 +#define PALMAS_LDO_SHORT_STATUS1_LDO5_SHIFT 0x04 +#define PALMAS_LDO_SHORT_STATUS1_LDO4 0x08 +#define PALMAS_LDO_SHORT_STATUS1_LDO4_SHIFT 0x03 +#define PALMAS_LDO_SHORT_STATUS1_LDO3 0x04 +#define PALMAS_LDO_SHORT_STATUS1_LDO3_SHIFT 0x02 +#define PALMAS_LDO_SHORT_STATUS1_LDO2 0x02 +#define PALMAS_LDO_SHORT_STATUS1_LDO2_SHIFT 0x01 +#define PALMAS_LDO_SHORT_STATUS1_LDO1 0x01 +#define PALMAS_LDO_SHORT_STATUS1_LDO1_SHIFT 0x00 + +/* Bit definitions for LDO_SHORT_STATUS2 */ +#define PALMAS_LDO_SHORT_STATUS2_LDOVANA 0x08 +#define PALMAS_LDO_SHORT_STATUS2_LDOVANA_SHIFT 0x03 +#define PALMAS_LDO_SHORT_STATUS2_LDOUSB 0x04 +#define PALMAS_LDO_SHORT_STATUS2_LDOUSB_SHIFT 0x02 +#define PALMAS_LDO_SHORT_STATUS2_LDOLN 0x02 +#define PALMAS_LDO_SHORT_STATUS2_LDOLN_SHIFT 0x01 +#define PALMAS_LDO_SHORT_STATUS2_LDO9 0x01 +#define PALMAS_LDO_SHORT_STATUS2_LDO9_SHIFT 0x00 + +/* Registers for function PMU_CONTROL */ +#define PALMAS_DEV_CTRL 0x00 +#define PALMAS_POWER_CTRL 0x01 +#define PALMAS_VSYS_LO 0x02 +#define PALMAS_VSYS_MON 0x03 +#define PALMAS_VBAT_MON 0x04 +#define PALMAS_WATCHDOG 0x05 +#define PALMAS_BOOT_STATUS 0x06 +#define PALMAS_BATTERY_BOUNCE 0x07 +#define PALMAS_BACKUP_BATTERY_CTRL 0x08 +#define PALMAS_LONG_PRESS_KEY 0x09 +#define PALMAS_OSC_THERM_CTRL 0x0A +#define PALMAS_BATDEBOUNCING 0x0B +#define PALMAS_SWOFF_HWRST 0x0F +#define PALMAS_SWOFF_COLDRST 0x10 +#define PALMAS_SWOFF_STATUS 0x11 +#define PALMAS_PMU_CONFIG 0x12 +#define PALMAS_SPARE 0x14 +#define PALMAS_PMU_SECONDARY_INT 0x15 +#define PALMAS_SW_REVISION 0x17 +#define PALMAS_EXT_CHRG_CTRL 0x18 +#define PALMAS_PMU_SECONDARY_INT2 0x19 + +/* Bit definitions for DEV_CTRL */ +#define PALMAS_DEV_CTRL_DEV_STATUS_MASK 0x0c +#define PALMAS_DEV_CTRL_DEV_STATUS_SHIFT 0x02 +#define PALMAS_DEV_CTRL_SW_RST 0x02 +#define PALMAS_DEV_CTRL_SW_RST_SHIFT 0x01 +#define PALMAS_DEV_CTRL_DEV_ON 0x01 +#define PALMAS_DEV_CTRL_DEV_ON_SHIFT 0x00 + +/* Bit definitions for POWER_CTRL */ +#define PALMAS_POWER_CTRL_ENABLE2_MASK 0x04 +#define PALMAS_POWER_CTRL_ENABLE2_MASK_SHIFT 0x02 +#define PALMAS_POWER_CTRL_ENABLE1_MASK 0x02 +#define PALMAS_POWER_CTRL_ENABLE1_MASK_SHIFT 0x01 +#define PALMAS_POWER_CTRL_NSLEEP_MASK 0x01 +#define PALMAS_POWER_CTRL_NSLEEP_MASK_SHIFT 0x00 + +/* Bit definitions for VSYS_LO */ +#define PALMAS_VSYS_LO_THRESHOLD_MASK 0x1F +#define PALMAS_VSYS_LO_THRESHOLD_SHIFT 0x00 + +/* Bit definitions for VSYS_MON */ +#define PALMAS_VSYS_MON_ENABLE 0x80 +#define PALMAS_VSYS_MON_ENABLE_SHIFT 0x07 +#define PALMAS_VSYS_MON_THRESHOLD_MASK 0x3F +#define PALMAS_VSYS_MON_THRESHOLD_SHIFT 0x00 + +/* Bit definitions for VBAT_MON */ +#define PALMAS_VBAT_MON_ENABLE 0x80 +#define PALMAS_VBAT_MON_ENABLE_SHIFT 0x07 +#define PALMAS_VBAT_MON_THRESHOLD_MASK 0x3F +#define PALMAS_VBAT_MON_THRESHOLD_SHIFT 0x00 + +/* Bit definitions for WATCHDOG */ +#define PALMAS_WATCHDOG_LOCK 0x20 +#define PALMAS_WATCHDOG_LOCK_SHIFT 0x05 +#define PALMAS_WATCHDOG_ENABLE 0x10 +#define PALMAS_WATCHDOG_ENABLE_SHIFT 0x04 +#define PALMAS_WATCHDOG_MODE 0x08 +#define PALMAS_WATCHDOG_MODE_SHIFT 0x03 +#define PALMAS_WATCHDOG_TIMER_MASK 0x07 +#define PALMAS_WATCHDOG_TIMER_SHIFT 0x00 + +/* Bit definitions for BOOT_STATUS */ +#define PALMAS_BOOT_STATUS_BOOT1 0x02 +#define PALMAS_BOOT_STATUS_BOOT1_SHIFT 0x01 +#define PALMAS_BOOT_STATUS_BOOT0 0x01 +#define PALMAS_BOOT_STATUS_BOOT0_SHIFT 0x00 + +/* Bit definitions for BATTERY_BOUNCE */ +#define PALMAS_BATTERY_BOUNCE_BB_DELAY_MASK 0x3F +#define PALMAS_BATTERY_BOUNCE_BB_DELAY_SHIFT 0x00 + +/* Bit definitions for BACKUP_BATTERY_CTRL */ +#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_18_15 0x80 +#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_18_15_SHIFT 0x07 +#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_EN_SLP 0x40 +#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_EN_SLP_SHIFT 0x06 +#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_EN_OFF 0x20 +#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_EN_OFF_SHIFT 0x05 +#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_PWEN 0x10 +#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_PWEN_SHIFT 0x04 +#define PALMAS_BACKUP_BATTERY_CTRL_BBS_BBC_LOW_ICHRG 0x08 +#define PALMAS_BACKUP_BATTERY_CTRL_BBS_BBC_LOW_ICHRG_SHIFT 0x03 +#define PALMAS_BACKUP_BATTERY_CTRL_BB_SEL_MASK 0x06 +#define PALMAS_BACKUP_BATTERY_CTRL_BB_SEL_SHIFT 0x01 +#define PALMAS_BACKUP_BATTERY_CTRL_BB_CHG_EN 0x01 +#define PALMAS_BACKUP_BATTERY_CTRL_BB_CHG_EN_SHIFT 0x00 + +/* Bit definitions for LONG_PRESS_KEY */ +#define PALMAS_LONG_PRESS_KEY_LPK_LOCK 0x80 +#define PALMAS_LONG_PRESS_KEY_LPK_LOCK_SHIFT 0x07 +#define PALMAS_LONG_PRESS_KEY_LPK_INT_CLR 0x10 +#define PALMAS_LONG_PRESS_KEY_LPK_INT_CLR_SHIFT 0x04 +#define PALMAS_LONG_PRESS_KEY_LPK_TIME_MASK 0x0c +#define PALMAS_LONG_PRESS_KEY_LPK_TIME_SHIFT 0x02 +#define PALMAS_LONG_PRESS_KEY_PWRON_DEBOUNCE_MASK 0x03 +#define PALMAS_LONG_PRESS_KEY_PWRON_DEBOUNCE_SHIFT 0x00 + +/* Bit definitions for OSC_THERM_CTRL */ +#define PALMAS_OSC_THERM_CTRL_VANA_ON_IN_SLEEP 0x80 +#define PALMAS_OSC_THERM_CTRL_VANA_ON_IN_SLEEP_SHIFT 0x07 +#define PALMAS_OSC_THERM_CTRL_INT_MASK_IN_SLEEP 0x40 +#define PALMAS_OSC_THERM_CTRL_INT_MASK_IN_SLEEP_SHIFT 0x06 +#define PALMAS_OSC_THERM_CTRL_RC15MHZ_ON_IN_SLEEP 0x20 +#define PALMAS_OSC_THERM_CTRL_RC15MHZ_ON_IN_SLEEP_SHIFT 0x05 +#define PALMAS_OSC_THERM_CTRL_THERM_OFF_IN_SLEEP 0x10 +#define PALMAS_OSC_THERM_CTRL_THERM_OFF_IN_SLEEP_SHIFT 0x04 +#define PALMAS_OSC_THERM_CTRL_THERM_HD_SEL_MASK 0x0c +#define PALMAS_OSC_THERM_CTRL_THERM_HD_SEL_SHIFT 0x02 +#define PALMAS_OSC_THERM_CTRL_OSC_BYPASS 0x02 +#define PALMAS_OSC_THERM_CTRL_OSC_BYPASS_SHIFT 0x01 +#define PALMAS_OSC_THERM_CTRL_OSC_HPMODE 0x01 +#define PALMAS_OSC_THERM_CTRL_OSC_HPMODE_SHIFT 0x00 + +/* Bit definitions for BATDEBOUNCING */ +#define PALMAS_BATDEBOUNCING_BAT_DEB_BYPASS 0x80 +#define PALMAS_BATDEBOUNCING_BAT_DEB_BYPASS_SHIFT 0x07 +#define PALMAS_BATDEBOUNCING_BINS_DEB_MASK 0x78 +#define PALMAS_BATDEBOUNCING_BINS_DEB_SHIFT 0x03 +#define PALMAS_BATDEBOUNCING_BEXT_DEB_MASK 0x07 +#define PALMAS_BATDEBOUNCING_BEXT_DEB_SHIFT 0x00 + +/* Bit definitions for SWOFF_HWRST */ +#define PALMAS_SWOFF_HWRST_PWRON_LPK 0x80 +#define PALMAS_SWOFF_HWRST_PWRON_LPK_SHIFT 0x07 +#define PALMAS_SWOFF_HWRST_PWRDOWN 0x40 +#define PALMAS_SWOFF_HWRST_PWRDOWN_SHIFT 0x06 +#define PALMAS_SWOFF_HWRST_WTD 0x20 +#define PALMAS_SWOFF_HWRST_WTD_SHIFT 0x05 +#define PALMAS_SWOFF_HWRST_TSHUT 0x10 +#define PALMAS_SWOFF_HWRST_TSHUT_SHIFT 0x04 +#define PALMAS_SWOFF_HWRST_RESET_IN 0x08 +#define PALMAS_SWOFF_HWRST_RESET_IN_SHIFT 0x03 +#define PALMAS_SWOFF_HWRST_SW_RST 0x04 +#define PALMAS_SWOFF_HWRST_SW_RST_SHIFT 0x02 +#define PALMAS_SWOFF_HWRST_VSYS_LO 0x02 +#define PALMAS_SWOFF_HWRST_VSYS_LO_SHIFT 0x01 +#define PALMAS_SWOFF_HWRST_GPADC_SHUTDOWN 0x01 +#define PALMAS_SWOFF_HWRST_GPADC_SHUTDOWN_SHIFT 0x00 + +/* Bit definitions for SWOFF_COLDRST */ +#define PALMAS_SWOFF_COLDRST_PWRON_LPK 0x80 +#define PALMAS_SWOFF_COLDRST_PWRON_LPK_SHIFT 0x07 +#define PALMAS_SWOFF_COLDRST_PWRDOWN 0x40 +#define PALMAS_SWOFF_COLDRST_PWRDOWN_SHIFT 0x06 +#define PALMAS_SWOFF_COLDRST_WTD 0x20 +#define PALMAS_SWOFF_COLDRST_WTD_SHIFT 0x05 +#define PALMAS_SWOFF_COLDRST_TSHUT 0x10 +#define PALMAS_SWOFF_COLDRST_TSHUT_SHIFT 0x04 +#define PALMAS_SWOFF_COLDRST_RESET_IN 0x08 +#define PALMAS_SWOFF_COLDRST_RESET_IN_SHIFT 0x03 +#define PALMAS_SWOFF_COLDRST_SW_RST 0x04 +#define PALMAS_SWOFF_COLDRST_SW_RST_SHIFT 0x02 +#define PALMAS_SWOFF_COLDRST_VSYS_LO 0x02 +#define PALMAS_SWOFF_COLDRST_VSYS_LO_SHIFT 0x01 +#define PALMAS_SWOFF_COLDRST_GPADC_SHUTDOWN 0x01 +#define PALMAS_SWOFF_COLDRST_GPADC_SHUTDOWN_SHIFT 0x00 + +/* Bit definitions for SWOFF_STATUS */ +#define PALMAS_SWOFF_STATUS_PWRON_LPK 0x80 +#define PALMAS_SWOFF_STATUS_PWRON_LPK_SHIFT 0x07 +#define PALMAS_SWOFF_STATUS_PWRDOWN 0x40 +#define PALMAS_SWOFF_STATUS_PWRDOWN_SHIFT 0x06 +#define PALMAS_SWOFF_STATUS_WTD 0x20 +#define PALMAS_SWOFF_STATUS_WTD_SHIFT 0x05 +#define PALMAS_SWOFF_STATUS_TSHUT 0x10 +#define PALMAS_SWOFF_STATUS_TSHUT_SHIFT 0x04 +#define PALMAS_SWOFF_STATUS_RESET_IN 0x08 +#define PALMAS_SWOFF_STATUS_RESET_IN_SHIFT 0x03 +#define PALMAS_SWOFF_STATUS_SW_RST 0x04 +#define PALMAS_SWOFF_STATUS_SW_RST_SHIFT 0x02 +#define PALMAS_SWOFF_STATUS_VSYS_LO 0x02 +#define PALMAS_SWOFF_STATUS_VSYS_LO_SHIFT 0x01 +#define PALMAS_SWOFF_STATUS_GPADC_SHUTDOWN 0x01 +#define PALMAS_SWOFF_STATUS_GPADC_SHUTDOWN_SHIFT 0x00 + +/* Bit definitions for PMU_CONFIG */ +#define PALMAS_PMU_CONFIG_MULTI_CELL_EN 0x40 +#define PALMAS_PMU_CONFIG_MULTI_CELL_EN_SHIFT 0x06 +#define PALMAS_PMU_CONFIG_SPARE_MASK 0x30 +#define PALMAS_PMU_CONFIG_SPARE_SHIFT 0x04 +#define PALMAS_PMU_CONFIG_SWOFF_DLY_MASK 0x0c +#define PALMAS_PMU_CONFIG_SWOFF_DLY_SHIFT 0x02 +#define PALMAS_PMU_CONFIG_GATE_RESET_OUT 0x02 +#define PALMAS_PMU_CONFIG_GATE_RESET_OUT_SHIFT 0x01 +#define PALMAS_PMU_CONFIG_AUTODEVON 0x01 +#define PALMAS_PMU_CONFIG_AUTODEVON_SHIFT 0x00 + +/* Bit definitions for SPARE */ +#define PALMAS_SPARE_SPARE_MASK 0xf8 +#define PALMAS_SPARE_SPARE_SHIFT 0x03 +#define PALMAS_SPARE_REGEN3_OD 0x04 +#define PALMAS_SPARE_REGEN3_OD_SHIFT 0x02 +#define PALMAS_SPARE_REGEN2_OD 0x02 +#define PALMAS_SPARE_REGEN2_OD_SHIFT 0x01 +#define PALMAS_SPARE_REGEN1_OD 0x01 +#define PALMAS_SPARE_REGEN1_OD_SHIFT 0x00 + +/* Bit definitions for PMU_SECONDARY_INT */ +#define PALMAS_PMU_SECONDARY_INT_VBUS_OVV_INT_SRC 0x80 +#define PALMAS_PMU_SECONDARY_INT_VBUS_OVV_INT_SRC_SHIFT 0x07 +#define PALMAS_PMU_SECONDARY_INT_CHARG_DET_N_INT_SRC 0x40 +#define PALMAS_PMU_SECONDARY_INT_CHARG_DET_N_INT_SRC_SHIFT 0x06 +#define PALMAS_PMU_SECONDARY_INT_BB_INT_SRC 0x20 +#define PALMAS_PMU_SECONDARY_INT_BB_INT_SRC_SHIFT 0x05 +#define PALMAS_PMU_SECONDARY_INT_FBI_INT_SRC 0x10 +#define PALMAS_PMU_SECONDARY_INT_FBI_INT_SRC_SHIFT 0x04 +#define PALMAS_PMU_SECONDARY_INT_VBUS_OVV_MASK 0x08 +#define PALMAS_PMU_SECONDARY_INT_VBUS_OVV_MASK_SHIFT 0x03 +#define PALMAS_PMU_SECONDARY_INT_CHARG_DET_N_MASK 0x04 +#define PALMAS_PMU_SECONDARY_INT_CHARG_DET_N_MASK_SHIFT 0x02 +#define PALMAS_PMU_SECONDARY_INT_BB_MASK 0x02 +#define PALMAS_PMU_SECONDARY_INT_BB_MASK_SHIFT 0x01 +#define PALMAS_PMU_SECONDARY_INT_FBI_MASK 0x01 +#define PALMAS_PMU_SECONDARY_INT_FBI_MASK_SHIFT 0x00 + +/* Bit definitions for SW_REVISION */ +#define PALMAS_SW_REVISION_SW_REVISION_MASK 0xFF +#define PALMAS_SW_REVISION_SW_REVISION_SHIFT 0x00 + +/* Bit definitions for EXT_CHRG_CTRL */ +#define PALMAS_EXT_CHRG_CTRL_VBUS_OVV_STATUS 0x80 +#define PALMAS_EXT_CHRG_CTRL_VBUS_OVV_STATUS_SHIFT 0x07 +#define PALMAS_EXT_CHRG_CTRL_CHARG_DET_N_STATUS 0x40 +#define PALMAS_EXT_CHRG_CTRL_CHARG_DET_N_STATUS_SHIFT 0x06 +#define PALMAS_EXT_CHRG_CTRL_VSYS_DEBOUNCE_DELAY 0x08 +#define PALMAS_EXT_CHRG_CTRL_VSYS_DEBOUNCE_DELAY_SHIFT 0x03 +#define PALMAS_EXT_CHRG_CTRL_CHRG_DET_N 0x04 +#define PALMAS_EXT_CHRG_CTRL_CHRG_DET_N_SHIFT 0x02 +#define PALMAS_EXT_CHRG_CTRL_AUTO_ACA_EN 0x02 +#define PALMAS_EXT_CHRG_CTRL_AUTO_ACA_EN_SHIFT 0x01 +#define PALMAS_EXT_CHRG_CTRL_AUTO_LDOUSB_EN 0x01 +#define PALMAS_EXT_CHRG_CTRL_AUTO_LDOUSB_EN_SHIFT 0x00 + +/* Bit definitions for PMU_SECONDARY_INT2 */ +#define PALMAS_PMU_SECONDARY_INT2_DVFS2_INT_SRC 0x20 +#define PALMAS_PMU_SECONDARY_INT2_DVFS2_INT_SRC_SHIFT 0x05 +#define PALMAS_PMU_SECONDARY_INT2_DVFS1_INT_SRC 0x10 +#define PALMAS_PMU_SECONDARY_INT2_DVFS1_INT_SRC_SHIFT 0x04 +#define PALMAS_PMU_SECONDARY_INT2_DVFS2_MASK 0x02 +#define PALMAS_PMU_SECONDARY_INT2_DVFS2_MASK_SHIFT 0x01 +#define PALMAS_PMU_SECONDARY_INT2_DVFS1_MASK 0x01 +#define PALMAS_PMU_SECONDARY_INT2_DVFS1_MASK_SHIFT 0x00 + +/* Registers for function RESOURCE */ +#define PALMAS_CLK32KG_CTRL 0x00 +#define PALMAS_CLK32KGAUDIO_CTRL 0x01 +#define PALMAS_REGEN1_CTRL 0x02 +#define PALMAS_REGEN2_CTRL 0x03 +#define PALMAS_SYSEN1_CTRL 0x04 +#define PALMAS_SYSEN2_CTRL 0x05 +#define PALMAS_NSLEEP_RES_ASSIGN 0x06 +#define PALMAS_NSLEEP_SMPS_ASSIGN 0x07 +#define PALMAS_NSLEEP_LDO_ASSIGN1 0x08 +#define PALMAS_NSLEEP_LDO_ASSIGN2 0x09 +#define PALMAS_ENABLE1_RES_ASSIGN 0x0A +#define PALMAS_ENABLE1_SMPS_ASSIGN 0x0B +#define PALMAS_ENABLE1_LDO_ASSIGN1 0x0C +#define PALMAS_ENABLE1_LDO_ASSIGN2 0x0D +#define PALMAS_ENABLE2_RES_ASSIGN 0x0E +#define PALMAS_ENABLE2_SMPS_ASSIGN 0x0F +#define PALMAS_ENABLE2_LDO_ASSIGN1 0x10 +#define PALMAS_ENABLE2_LDO_ASSIGN2 0x11 +#define PALMAS_REGEN3_CTRL 0x12 + +/* Bit definitions for CLK32KG_CTRL */ +#define PALMAS_CLK32KG_CTRL_STATUS 0x10 +#define PALMAS_CLK32KG_CTRL_STATUS_SHIFT 0x04 +#define PALMAS_CLK32KG_CTRL_MODE_SLEEP 0x04 +#define PALMAS_CLK32KG_CTRL_MODE_SLEEP_SHIFT 0x02 +#define PALMAS_CLK32KG_CTRL_MODE_ACTIVE 0x01 +#define PALMAS_CLK32KG_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for CLK32KGAUDIO_CTRL */ +#define PALMAS_CLK32KGAUDIO_CTRL_STATUS 0x10 +#define PALMAS_CLK32KGAUDIO_CTRL_STATUS_SHIFT 0x04 +#define PALMAS_CLK32KGAUDIO_CTRL_RESERVED3 0x08 +#define PALMAS_CLK32KGAUDIO_CTRL_RESERVED3_SHIFT 0x03 +#define PALMAS_CLK32KGAUDIO_CTRL_MODE_SLEEP 0x04 +#define PALMAS_CLK32KGAUDIO_CTRL_MODE_SLEEP_SHIFT 0x02 +#define PALMAS_CLK32KGAUDIO_CTRL_MODE_ACTIVE 0x01 +#define PALMAS_CLK32KGAUDIO_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for REGEN1_CTRL */ +#define PALMAS_REGEN1_CTRL_STATUS 0x10 +#define PALMAS_REGEN1_CTRL_STATUS_SHIFT 0x04 +#define PALMAS_REGEN1_CTRL_MODE_SLEEP 0x04 +#define PALMAS_REGEN1_CTRL_MODE_SLEEP_SHIFT 0x02 +#define PALMAS_REGEN1_CTRL_MODE_ACTIVE 0x01 +#define PALMAS_REGEN1_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for REGEN2_CTRL */ +#define PALMAS_REGEN2_CTRL_STATUS 0x10 +#define PALMAS_REGEN2_CTRL_STATUS_SHIFT 0x04 +#define PALMAS_REGEN2_CTRL_MODE_SLEEP 0x04 +#define PALMAS_REGEN2_CTRL_MODE_SLEEP_SHIFT 0x02 +#define PALMAS_REGEN2_CTRL_MODE_ACTIVE 0x01 +#define PALMAS_REGEN2_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for SYSEN1_CTRL */ +#define PALMAS_SYSEN1_CTRL_STATUS 0x10 +#define PALMAS_SYSEN1_CTRL_STATUS_SHIFT 0x04 +#define PALMAS_SYSEN1_CTRL_MODE_SLEEP 0x04 +#define PALMAS_SYSEN1_CTRL_MODE_SLEEP_SHIFT 0x02 +#define PALMAS_SYSEN1_CTRL_MODE_ACTIVE 0x01 +#define PALMAS_SYSEN1_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for SYSEN2_CTRL */ +#define PALMAS_SYSEN2_CTRL_STATUS 0x10 +#define PALMAS_SYSEN2_CTRL_STATUS_SHIFT 0x04 +#define PALMAS_SYSEN2_CTRL_MODE_SLEEP 0x04 +#define PALMAS_SYSEN2_CTRL_MODE_SLEEP_SHIFT 0x02 +#define PALMAS_SYSEN2_CTRL_MODE_ACTIVE 0x01 +#define PALMAS_SYSEN2_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for NSLEEP_RES_ASSIGN */ +#define PALMAS_NSLEEP_RES_ASSIGN_REGEN3 0x40 +#define PALMAS_NSLEEP_RES_ASSIGN_REGEN3_SHIFT 0x06 +#define PALMAS_NSLEEP_RES_ASSIGN_CLK32KGAUDIO 0x20 +#define PALMAS_NSLEEP_RES_ASSIGN_CLK32KGAUDIO_SHIFT 0x05 +#define PALMAS_NSLEEP_RES_ASSIGN_CLK32KG 0x10 +#define PALMAS_NSLEEP_RES_ASSIGN_CLK32KG_SHIFT 0x04 +#define PALMAS_NSLEEP_RES_ASSIGN_SYSEN2 0x08 +#define PALMAS_NSLEEP_RES_ASSIGN_SYSEN2_SHIFT 0x03 +#define PALMAS_NSLEEP_RES_ASSIGN_SYSEN1 0x04 +#define PALMAS_NSLEEP_RES_ASSIGN_SYSEN1_SHIFT 0x02 +#define PALMAS_NSLEEP_RES_ASSIGN_REGEN2 0x02 +#define PALMAS_NSLEEP_RES_ASSIGN_REGEN2_SHIFT 0x01 +#define PALMAS_NSLEEP_RES_ASSIGN_REGEN1 0x01 +#define PALMAS_NSLEEP_RES_ASSIGN_REGEN1_SHIFT 0x00 + +/* Bit definitions for NSLEEP_SMPS_ASSIGN */ +#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS10 0x80 +#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS10_SHIFT 0x07 +#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS9 0x40 +#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS9_SHIFT 0x06 +#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS8 0x20 +#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS8_SHIFT 0x05 +#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS7 0x10 +#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS7_SHIFT 0x04 +#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS6 0x08 +#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS6_SHIFT 0x03 +#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS45 0x04 +#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS45_SHIFT 0x02 +#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS3 0x02 +#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS3_SHIFT 0x01 +#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS12 0x01 +#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS12_SHIFT 0x00 + +/* Bit definitions for NSLEEP_LDO_ASSIGN1 */ +#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO8 0x80 +#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO8_SHIFT 0x07 +#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO7 0x40 +#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO7_SHIFT 0x06 +#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO6 0x20 +#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO6_SHIFT 0x05 +#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO5 0x10 +#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO5_SHIFT 0x04 +#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO4 0x08 +#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO4_SHIFT 0x03 +#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO3 0x04 +#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO3_SHIFT 0x02 +#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO2 0x02 +#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO2_SHIFT 0x01 +#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO1 0x01 +#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO1_SHIFT 0x00 + +/* Bit definitions for NSLEEP_LDO_ASSIGN2 */ +#define PALMAS_NSLEEP_LDO_ASSIGN2_LDOUSB 0x04 +#define PALMAS_NSLEEP_LDO_ASSIGN2_LDOUSB_SHIFT 0x02 +#define PALMAS_NSLEEP_LDO_ASSIGN2_LDOLN 0x02 +#define PALMAS_NSLEEP_LDO_ASSIGN2_LDOLN_SHIFT 0x01 +#define PALMAS_NSLEEP_LDO_ASSIGN2_LDO9 0x01 +#define PALMAS_NSLEEP_LDO_ASSIGN2_LDO9_SHIFT 0x00 + +/* Bit definitions for ENABLE1_RES_ASSIGN */ +#define PALMAS_ENABLE1_RES_ASSIGN_REGEN3 0x40 +#define PALMAS_ENABLE1_RES_ASSIGN_REGEN3_SHIFT 0x06 +#define PALMAS_ENABLE1_RES_ASSIGN_CLK32KGAUDIO 0x20 +#define PALMAS_ENABLE1_RES_ASSIGN_CLK32KGAUDIO_SHIFT 0x05 +#define PALMAS_ENABLE1_RES_ASSIGN_CLK32KG 0x10 +#define PALMAS_ENABLE1_RES_ASSIGN_CLK32KG_SHIFT 0x04 +#define PALMAS_ENABLE1_RES_ASSIGN_SYSEN2 0x08 +#define PALMAS_ENABLE1_RES_ASSIGN_SYSEN2_SHIFT 0x03 +#define PALMAS_ENABLE1_RES_ASSIGN_SYSEN1 0x04 +#define PALMAS_ENABLE1_RES_ASSIGN_SYSEN1_SHIFT 0x02 +#define PALMAS_ENABLE1_RES_ASSIGN_REGEN2 0x02 +#define PALMAS_ENABLE1_RES_ASSIGN_REGEN2_SHIFT 0x01 +#define PALMAS_ENABLE1_RES_ASSIGN_REGEN1 0x01 +#define PALMAS_ENABLE1_RES_ASSIGN_REGEN1_SHIFT 0x00 + +/* Bit definitions for ENABLE1_SMPS_ASSIGN */ +#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS10 0x80 +#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS10_SHIFT 0x07 +#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS9 0x40 +#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS9_SHIFT 0x06 +#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS8 0x20 +#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS8_SHIFT 0x05 +#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS7 0x10 +#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS7_SHIFT 0x04 +#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS6 0x08 +#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS6_SHIFT 0x03 +#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS45 0x04 +#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS45_SHIFT 0x02 +#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS3 0x02 +#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS3_SHIFT 0x01 +#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS12 0x01 +#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS12_SHIFT 0x00 + +/* Bit definitions for ENABLE1_LDO_ASSIGN1 */ +#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO8 0x80 +#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO8_SHIFT 0x07 +#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO7 0x40 +#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO7_SHIFT 0x06 +#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO6 0x20 +#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO6_SHIFT 0x05 +#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO5 0x10 +#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO5_SHIFT 0x04 +#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO4 0x08 +#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO4_SHIFT 0x03 +#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO3 0x04 +#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO3_SHIFT 0x02 +#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO2 0x02 +#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO2_SHIFT 0x01 +#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO1 0x01 +#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO1_SHIFT 0x00 + +/* Bit definitions for ENABLE1_LDO_ASSIGN2 */ +#define PALMAS_ENABLE1_LDO_ASSIGN2_LDOUSB 0x04 +#define PALMAS_ENABLE1_LDO_ASSIGN2_LDOUSB_SHIFT 0x02 +#define PALMAS_ENABLE1_LDO_ASSIGN2_LDOLN 0x02 +#define PALMAS_ENABLE1_LDO_ASSIGN2_LDOLN_SHIFT 0x01 +#define PALMAS_ENABLE1_LDO_ASSIGN2_LDO9 0x01 +#define PALMAS_ENABLE1_LDO_ASSIGN2_LDO9_SHIFT 0x00 + +/* Bit definitions for ENABLE2_RES_ASSIGN */ +#define PALMAS_ENABLE2_RES_ASSIGN_REGEN3 0x40 +#define PALMAS_ENABLE2_RES_ASSIGN_REGEN3_SHIFT 0x06 +#define PALMAS_ENABLE2_RES_ASSIGN_CLK32KGAUDIO 0x20 +#define PALMAS_ENABLE2_RES_ASSIGN_CLK32KGAUDIO_SHIFT 0x05 +#define PALMAS_ENABLE2_RES_ASSIGN_CLK32KG 0x10 +#define PALMAS_ENABLE2_RES_ASSIGN_CLK32KG_SHIFT 0x04 +#define PALMAS_ENABLE2_RES_ASSIGN_SYSEN2 0x08 +#define PALMAS_ENABLE2_RES_ASSIGN_SYSEN2_SHIFT 0x03 +#define PALMAS_ENABLE2_RES_ASSIGN_SYSEN1 0x04 +#define PALMAS_ENABLE2_RES_ASSIGN_SYSEN1_SHIFT 0x02 +#define PALMAS_ENABLE2_RES_ASSIGN_REGEN2 0x02 +#define PALMAS_ENABLE2_RES_ASSIGN_REGEN2_SHIFT 0x01 +#define PALMAS_ENABLE2_RES_ASSIGN_REGEN1 0x01 +#define PALMAS_ENABLE2_RES_ASSIGN_REGEN1_SHIFT 0x00 + +/* Bit definitions for ENABLE2_SMPS_ASSIGN */ +#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS10 0x80 +#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS10_SHIFT 0x07 +#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS9 0x40 +#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS9_SHIFT 0x06 +#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS8 0x20 +#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS8_SHIFT 0x05 +#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS7 0x10 +#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS7_SHIFT 0x04 +#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS6 0x08 +#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS6_SHIFT 0x03 +#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS45 0x04 +#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS45_SHIFT 0x02 +#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS3 0x02 +#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS3_SHIFT 0x01 +#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS12 0x01 +#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS12_SHIFT 0x00 + +/* Bit definitions for ENABLE2_LDO_ASSIGN1 */ +#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO8 0x80 +#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO8_SHIFT 0x07 +#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO7 0x40 +#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO7_SHIFT 0x06 +#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO6 0x20 +#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO6_SHIFT 0x05 +#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO5 0x10 +#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO5_SHIFT 0x04 +#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO4 0x08 +#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO4_SHIFT 0x03 +#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO3 0x04 +#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO3_SHIFT 0x02 +#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO2 0x02 +#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO2_SHIFT 0x01 +#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO1 0x01 +#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO1_SHIFT 0x00 + +/* Bit definitions for ENABLE2_LDO_ASSIGN2 */ +#define PALMAS_ENABLE2_LDO_ASSIGN2_LDOUSB 0x04 +#define PALMAS_ENABLE2_LDO_ASSIGN2_LDOUSB_SHIFT 0x02 +#define PALMAS_ENABLE2_LDO_ASSIGN2_LDOLN 0x02 +#define PALMAS_ENABLE2_LDO_ASSIGN2_LDOLN_SHIFT 0x01 +#define PALMAS_ENABLE2_LDO_ASSIGN2_LDO9 0x01 +#define PALMAS_ENABLE2_LDO_ASSIGN2_LDO9_SHIFT 0x00 + +/* Bit definitions for REGEN3_CTRL */ +#define PALMAS_REGEN3_CTRL_STATUS 0x10 +#define PALMAS_REGEN3_CTRL_STATUS_SHIFT 0x04 +#define PALMAS_REGEN3_CTRL_MODE_SLEEP 0x04 +#define PALMAS_REGEN3_CTRL_MODE_SLEEP_SHIFT 0x02 +#define PALMAS_REGEN3_CTRL_MODE_ACTIVE 0x01 +#define PALMAS_REGEN3_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Registers for function PAD_CONTROL */ +#define PALMAS_OD_OUTPUT_CTRL2 0x02 +#define PALMAS_POLARITY_CTRL2 0x03 +#define PALMAS_PU_PD_INPUT_CTRL1 0x04 +#define PALMAS_PU_PD_INPUT_CTRL2 0x05 +#define PALMAS_PU_PD_INPUT_CTRL3 0x06 +#define PALMAS_PU_PD_INPUT_CTRL5 0x07 +#define PALMAS_OD_OUTPUT_CTRL 0x08 +#define PALMAS_POLARITY_CTRL 0x09 +#define PALMAS_PRIMARY_SECONDARY_PAD1 0x0A +#define PALMAS_PRIMARY_SECONDARY_PAD2 0x0B +#define PALMAS_I2C_SPI 0x0C +#define PALMAS_PU_PD_INPUT_CTRL4 0x0D +#define PALMAS_PRIMARY_SECONDARY_PAD3 0x0E +#define PALMAS_PRIMARY_SECONDARY_PAD4 0x0F + +/* Bit definitions for PU_PD_INPUT_CTRL1 */ +#define PALMAS_PU_PD_INPUT_CTRL1_RESET_IN_PD 0x40 +#define PALMAS_PU_PD_INPUT_CTRL1_RESET_IN_PD_SHIFT 0x06 +#define PALMAS_PU_PD_INPUT_CTRL1_GPADC_START_PU 0x20 +#define PALMAS_PU_PD_INPUT_CTRL1_GPADC_START_PU_SHIFT 0x05 +#define PALMAS_PU_PD_INPUT_CTRL1_GPADC_START_PD 0x10 +#define PALMAS_PU_PD_INPUT_CTRL1_GPADC_START_PD_SHIFT 0x04 +#define PALMAS_PU_PD_INPUT_CTRL1_PWRDOWN_PD 0x04 +#define PALMAS_PU_PD_INPUT_CTRL1_PWRDOWN_PD_SHIFT 0x02 +#define PALMAS_PU_PD_INPUT_CTRL1_NRESWARM_PU 0x02 +#define PALMAS_PU_PD_INPUT_CTRL1_NRESWARM_PU_SHIFT 0x01 + +/* Bit definitions for PU_PD_INPUT_CTRL2 */ +#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE2_PU 0x20 +#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE2_PU_SHIFT 0x05 +#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE2_PD 0x10 +#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE2_PD_SHIFT 0x04 +#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE1_PU 0x08 +#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE1_PU_SHIFT 0x03 +#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE1_PD 0x04 +#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE1_PD_SHIFT 0x02 +#define PALMAS_PU_PD_INPUT_CTRL2_NSLEEP_PU 0x02 +#define PALMAS_PU_PD_INPUT_CTRL2_NSLEEP_PU_SHIFT 0x01 +#define PALMAS_PU_PD_INPUT_CTRL2_NSLEEP_PD 0x01 +#define PALMAS_PU_PD_INPUT_CTRL2_NSLEEP_PD_SHIFT 0x00 + +/* Bit definitions for PU_PD_INPUT_CTRL3 */ +#define PALMAS_PU_PD_INPUT_CTRL3_ACOK_PD 0x40 +#define PALMAS_PU_PD_INPUT_CTRL3_ACOK_PD_SHIFT 0x06 +#define PALMAS_PU_PD_INPUT_CTRL3_CHRG_DET_N_PD 0x10 +#define PALMAS_PU_PD_INPUT_CTRL3_CHRG_DET_N_PD_SHIFT 0x04 +#define PALMAS_PU_PD_INPUT_CTRL3_POWERHOLD_PD 0x04 +#define PALMAS_PU_PD_INPUT_CTRL3_POWERHOLD_PD_SHIFT 0x02 +#define PALMAS_PU_PD_INPUT_CTRL3_MSECURE_PD 0x01 +#define PALMAS_PU_PD_INPUT_CTRL3_MSECURE_PD_SHIFT 0x00 + +/* Bit definitions for OD_OUTPUT_CTRL */ +#define PALMAS_OD_OUTPUT_CTRL_PWM_2_OD 0x80 +#define PALMAS_OD_OUTPUT_CTRL_PWM_2_OD_SHIFT 0x07 +#define PALMAS_OD_OUTPUT_CTRL_VBUSDET_OD 0x40 +#define PALMAS_OD_OUTPUT_CTRL_VBUSDET_OD_SHIFT 0x06 +#define PALMAS_OD_OUTPUT_CTRL_PWM_1_OD 0x20 +#define PALMAS_OD_OUTPUT_CTRL_PWM_1_OD_SHIFT 0x05 +#define PALMAS_OD_OUTPUT_CTRL_INT_OD 0x08 +#define PALMAS_OD_OUTPUT_CTRL_INT_OD_SHIFT 0x03 + +/* Bit definitions for POLARITY_CTRL */ +#define PALMAS_POLARITY_CTRL_INT_POLARITY 0x80 +#define PALMAS_POLARITY_CTRL_INT_POLARITY_SHIFT 0x07 +#define PALMAS_POLARITY_CTRL_ENABLE2_POLARITY 0x40 +#define PALMAS_POLARITY_CTRL_ENABLE2_POLARITY_SHIFT 0x06 +#define PALMAS_POLARITY_CTRL_ENABLE1_POLARITY 0x20 +#define PALMAS_POLARITY_CTRL_ENABLE1_POLARITY_SHIFT 0x05 +#define PALMAS_POLARITY_CTRL_NSLEEP_POLARITY 0x10 +#define PALMAS_POLARITY_CTRL_NSLEEP_POLARITY_SHIFT 0x04 +#define PALMAS_POLARITY_CTRL_RESET_IN_POLARITY 0x08 +#define PALMAS_POLARITY_CTRL_RESET_IN_POLARITY_SHIFT 0x03 +#define PALMAS_POLARITY_CTRL_GPIO_3_CHRG_DET_N_POLARITY 0x04 +#define PALMAS_POLARITY_CTRL_GPIO_3_CHRG_DET_N_POLARITY_SHIFT 0x02 +#define PALMAS_POLARITY_CTRL_POWERGOOD_USB_PSEL_POLARITY 0x02 +#define PALMAS_POLARITY_CTRL_POWERGOOD_USB_PSEL_POLARITY_SHIFT 0x01 +#define PALMAS_POLARITY_CTRL_PWRDOWN_POLARITY 0x01 +#define PALMAS_POLARITY_CTRL_PWRDOWN_POLARITY_SHIFT 0x00 + +/* Bit definitions for PRIMARY_SECONDARY_PAD1 */ +#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_3 0x80 +#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_3_SHIFT 0x07 +#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_2_MASK 0x60 +#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_2_SHIFT 0x05 +#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_MASK 0x18 +#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_SHIFT 0x03 +#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_0 0x04 +#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_0_SHIFT 0x02 +#define PALMAS_PRIMARY_SECONDARY_PAD1_VAC 0x02 +#define PALMAS_PRIMARY_SECONDARY_PAD1_VAC_SHIFT 0x01 +#define PALMAS_PRIMARY_SECONDARY_PAD1_POWERGOOD 0x01 +#define PALMAS_PRIMARY_SECONDARY_PAD1_POWERGOOD_SHIFT 0x00 + +/* Bit definitions for PRIMARY_SECONDARY_PAD2 */ +#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_MASK 0x30 +#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_SHIFT 0x04 +#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_6 0x08 +#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_6_SHIFT 0x03 +#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_5_MASK 0x06 +#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_5_SHIFT 0x01 +#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_4 0x01 +#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_4_SHIFT 0x00 + +/* Bit definitions for I2C_SPI */ +#define PALMAS_I2C_SPI_I2C2OTP_EN 0x80 +#define PALMAS_I2C_SPI_I2C2OTP_EN_SHIFT 0x07 +#define PALMAS_I2C_SPI_I2C2OTP_PAGESEL 0x40 +#define PALMAS_I2C_SPI_I2C2OTP_PAGESEL_SHIFT 0x06 +#define PALMAS_I2C_SPI_ID_I2C2 0x20 +#define PALMAS_I2C_SPI_ID_I2C2_SHIFT 0x05 +#define PALMAS_I2C_SPI_I2C_SPI 0x10 +#define PALMAS_I2C_SPI_I2C_SPI_SHIFT 0x04 +#define PALMAS_I2C_SPI_ID_I2C1_MASK 0x0F +#define PALMAS_I2C_SPI_ID_I2C1_SHIFT 0x00 + +/* Bit definitions for PU_PD_INPUT_CTRL4 */ +#define PALMAS_PU_PD_INPUT_CTRL4_DVFS2_DAT_PD 0x40 +#define PALMAS_PU_PD_INPUT_CTRL4_DVFS2_DAT_PD_SHIFT 0x06 +#define PALMAS_PU_PD_INPUT_CTRL4_DVFS2_CLK_PD 0x10 +#define PALMAS_PU_PD_INPUT_CTRL4_DVFS2_CLK_PD_SHIFT 0x04 +#define PALMAS_PU_PD_INPUT_CTRL4_DVFS1_DAT_PD 0x04 +#define PALMAS_PU_PD_INPUT_CTRL4_DVFS1_DAT_PD_SHIFT 0x02 +#define PALMAS_PU_PD_INPUT_CTRL4_DVFS1_CLK_PD 0x01 +#define PALMAS_PU_PD_INPUT_CTRL4_DVFS1_CLK_PD_SHIFT 0x00 + +/* Bit definitions for PRIMARY_SECONDARY_PAD3 */ +#define PALMAS_PRIMARY_SECONDARY_PAD3_DVFS2 0x02 +#define PALMAS_PRIMARY_SECONDARY_PAD3_DVFS2_SHIFT 0x01 +#define PALMAS_PRIMARY_SECONDARY_PAD3_DVFS1 0x01 +#define PALMAS_PRIMARY_SECONDARY_PAD3_DVFS1_SHIFT 0x00 + +/* Registers for function LED_PWM */ +#define PALMAS_LED_PERIOD_CTRL 0x00 +#define PALMAS_LED_CTRL 0x01 +#define PALMAS_PWM_CTRL1 0x02 +#define PALMAS_PWM_CTRL2 0x03 + +/* Bit definitions for LED_PERIOD_CTRL */ +#define PALMAS_LED_PERIOD_CTRL_LED_2_PERIOD_MASK 0x38 +#define PALMAS_LED_PERIOD_CTRL_LED_2_PERIOD_SHIFT 0x03 +#define PALMAS_LED_PERIOD_CTRL_LED_1_PERIOD_MASK 0x07 +#define PALMAS_LED_PERIOD_CTRL_LED_1_PERIOD_SHIFT 0x00 + +/* Bit definitions for LED_CTRL */ +#define PALMAS_LED_CTRL_LED_2_SEQ 0x20 +#define PALMAS_LED_CTRL_LED_2_SEQ_SHIFT 0x05 +#define PALMAS_LED_CTRL_LED_1_SEQ 0x10 +#define PALMAS_LED_CTRL_LED_1_SEQ_SHIFT 0x04 +#define PALMAS_LED_CTRL_LED_2_ON_TIME_MASK 0x0c +#define PALMAS_LED_CTRL_LED_2_ON_TIME_SHIFT 0x02 +#define PALMAS_LED_CTRL_LED_1_ON_TIME_MASK 0x03 +#define PALMAS_LED_CTRL_LED_1_ON_TIME_SHIFT 0x00 + +/* Bit definitions for PWM_CTRL1 */ +#define PALMAS_PWM_CTRL1_PWM_FREQ_EN 0x02 +#define PALMAS_PWM_CTRL1_PWM_FREQ_EN_SHIFT 0x01 +#define PALMAS_PWM_CTRL1_PWM_FREQ_SEL 0x01 +#define PALMAS_PWM_CTRL1_PWM_FREQ_SEL_SHIFT 0x00 + +/* Bit definitions for PWM_CTRL2 */ +#define PALMAS_PWM_CTRL2_PWM_DUTY_SEL_MASK 0xFF +#define PALMAS_PWM_CTRL2_PWM_DUTY_SEL_SHIFT 0x00 + +/* Registers for function INTERRUPT */ +#define PALMAS_INT1_STATUS 0x00 +#define PALMAS_INT1_MASK 0x01 +#define PALMAS_INT1_LINE_STATE 0x02 +#define PALMAS_INT1_EDGE_DETECT1_RESERVED 0x03 +#define PALMAS_INT1_EDGE_DETECT2_RESERVED 0x04 +#define PALMAS_INT2_STATUS 0x05 +#define PALMAS_INT2_MASK 0x06 +#define PALMAS_INT2_LINE_STATE 0x07 +#define PALMAS_INT2_EDGE_DETECT1_RESERVED 0x08 +#define PALMAS_INT2_EDGE_DETECT2_RESERVED 0x09 +#define PALMAS_INT3_STATUS 0x0A +#define PALMAS_INT3_MASK 0x0B +#define PALMAS_INT3_LINE_STATE 0x0C +#define PALMAS_INT3_EDGE_DETECT1_RESERVED 0x0D +#define PALMAS_INT3_EDGE_DETECT2_RESERVED 0x0E +#define PALMAS_INT4_STATUS 0x0F +#define PALMAS_INT4_MASK 0x10 +#define PALMAS_INT4_LINE_STATE 0x11 +#define PALMAS_INT4_EDGE_DETECT1 0x12 +#define PALMAS_INT4_EDGE_DETECT2 0x13 +#define PALMAS_INT_CTRL 0x14 + +/* Bit definitions for INT1_STATUS */ +#define PALMAS_INT1_STATUS_VBAT_MON 0x80 +#define PALMAS_INT1_STATUS_VBAT_MON_SHIFT 0x07 +#define PALMAS_INT1_STATUS_VSYS_MON 0x40 +#define PALMAS_INT1_STATUS_VSYS_MON_SHIFT 0x06 +#define PALMAS_INT1_STATUS_HOTDIE 0x20 +#define PALMAS_INT1_STATUS_HOTDIE_SHIFT 0x05 +#define PALMAS_INT1_STATUS_PWRDOWN 0x10 +#define PALMAS_INT1_STATUS_PWRDOWN_SHIFT 0x04 +#define PALMAS_INT1_STATUS_RPWRON 0x08 +#define PALMAS_INT1_STATUS_RPWRON_SHIFT 0x03 +#define PALMAS_INT1_STATUS_LONG_PRESS_KEY 0x04 +#define PALMAS_INT1_STATUS_LONG_PRESS_KEY_SHIFT 0x02 +#define PALMAS_INT1_STATUS_PWRON 0x02 +#define PALMAS_INT1_STATUS_PWRON_SHIFT 0x01 +#define PALMAS_INT1_STATUS_CHARG_DET_N_VBUS_OVV 0x01 +#define PALMAS_INT1_STATUS_CHARG_DET_N_VBUS_OVV_SHIFT 0x00 + +/* Bit definitions for INT1_MASK */ +#define PALMAS_INT1_MASK_VBAT_MON 0x80 +#define PALMAS_INT1_MASK_VBAT_MON_SHIFT 0x07 +#define PALMAS_INT1_MASK_VSYS_MON 0x40 +#define PALMAS_INT1_MASK_VSYS_MON_SHIFT 0x06 +#define PALMAS_INT1_MASK_HOTDIE 0x20 +#define PALMAS_INT1_MASK_HOTDIE_SHIFT 0x05 +#define PALMAS_INT1_MASK_PWRDOWN 0x10 +#define PALMAS_INT1_MASK_PWRDOWN_SHIFT 0x04 +#define PALMAS_INT1_MASK_RPWRON 0x08 +#define PALMAS_INT1_MASK_RPWRON_SHIFT 0x03 +#define PALMAS_INT1_MASK_LONG_PRESS_KEY 0x04 +#define PALMAS_INT1_MASK_LONG_PRESS_KEY_SHIFT 0x02 +#define PALMAS_INT1_MASK_PWRON 0x02 +#define PALMAS_INT1_MASK_PWRON_SHIFT 0x01 +#define PALMAS_INT1_MASK_CHARG_DET_N_VBUS_OVV 0x01 +#define PALMAS_INT1_MASK_CHARG_DET_N_VBUS_OVV_SHIFT 0x00 + +/* Bit definitions for INT1_LINE_STATE */ +#define PALMAS_INT1_LINE_STATE_VBAT_MON 0x80 +#define PALMAS_INT1_LINE_STATE_VBAT_MON_SHIFT 0x07 +#define PALMAS_INT1_LINE_STATE_VSYS_MON 0x40 +#define PALMAS_INT1_LINE_STATE_VSYS_MON_SHIFT 0x06 +#define PALMAS_INT1_LINE_STATE_HOTDIE 0x20 +#define PALMAS_INT1_LINE_STATE_HOTDIE_SHIFT 0x05 +#define PALMAS_INT1_LINE_STATE_PWRDOWN 0x10 +#define PALMAS_INT1_LINE_STATE_PWRDOWN_SHIFT 0x04 +#define PALMAS_INT1_LINE_STATE_RPWRON 0x08 +#define PALMAS_INT1_LINE_STATE_RPWRON_SHIFT 0x03 +#define PALMAS_INT1_LINE_STATE_LONG_PRESS_KEY 0x04 +#define PALMAS_INT1_LINE_STATE_LONG_PRESS_KEY_SHIFT 0x02 +#define PALMAS_INT1_LINE_STATE_PWRON 0x02 +#define PALMAS_INT1_LINE_STATE_PWRON_SHIFT 0x01 +#define PALMAS_INT1_LINE_STATE_CHARG_DET_N_VBUS_OVV 0x01 +#define PALMAS_INT1_LINE_STATE_CHARG_DET_N_VBUS_OVV_SHIFT 0x00 + +/* Bit definitions for INT2_STATUS */ +#define PALMAS_INT2_STATUS_VAC_ACOK 0x80 +#define PALMAS_INT2_STATUS_VAC_ACOK_SHIFT 0x07 +#define PALMAS_INT2_STATUS_SHORT 0x40 +#define PALMAS_INT2_STATUS_SHORT_SHIFT 0x06 +#define PALMAS_INT2_STATUS_FBI_BB 0x20 +#define PALMAS_INT2_STATUS_FBI_BB_SHIFT 0x05 +#define PALMAS_INT2_STATUS_RESET_IN 0x10 +#define PALMAS_INT2_STATUS_RESET_IN_SHIFT 0x04 +#define PALMAS_INT2_STATUS_BATREMOVAL 0x08 +#define PALMAS_INT2_STATUS_BATREMOVAL_SHIFT 0x03 +#define PALMAS_INT2_STATUS_WDT 0x04 +#define PALMAS_INT2_STATUS_WDT_SHIFT 0x02 +#define PALMAS_INT2_STATUS_RTC_TIMER 0x02 +#define PALMAS_INT2_STATUS_RTC_TIMER_SHIFT 0x01 +#define PALMAS_INT2_STATUS_RTC_ALARM 0x01 +#define PALMAS_INT2_STATUS_RTC_ALARM_SHIFT 0x00 + +/* Bit definitions for INT2_MASK */ +#define PALMAS_INT2_MASK_VAC_ACOK 0x80 +#define PALMAS_INT2_MASK_VAC_ACOK_SHIFT 0x07 +#define PALMAS_INT2_MASK_SHORT 0x40 +#define PALMAS_INT2_MASK_SHORT_SHIFT 0x06 +#define PALMAS_INT2_MASK_FBI_BB 0x20 +#define PALMAS_INT2_MASK_FBI_BB_SHIFT 0x05 +#define PALMAS_INT2_MASK_RESET_IN 0x10 +#define PALMAS_INT2_MASK_RESET_IN_SHIFT 0x04 +#define PALMAS_INT2_MASK_BATREMOVAL 0x08 +#define PALMAS_INT2_MASK_BATREMOVAL_SHIFT 0x03 +#define PALMAS_INT2_MASK_WDT 0x04 +#define PALMAS_INT2_MASK_WDT_SHIFT 0x02 +#define PALMAS_INT2_MASK_RTC_TIMER 0x02 +#define PALMAS_INT2_MASK_RTC_TIMER_SHIFT 0x01 +#define PALMAS_INT2_MASK_RTC_ALARM 0x01 +#define PALMAS_INT2_MASK_RTC_ALARM_SHIFT 0x00 + +/* Bit definitions for INT2_LINE_STATE */ +#define PALMAS_INT2_LINE_STATE_VAC_ACOK 0x80 +#define PALMAS_INT2_LINE_STATE_VAC_ACOK_SHIFT 0x07 +#define PALMAS_INT2_LINE_STATE_SHORT 0x40 +#define PALMAS_INT2_LINE_STATE_SHORT_SHIFT 0x06 +#define PALMAS_INT2_LINE_STATE_FBI_BB 0x20 +#define PALMAS_INT2_LINE_STATE_FBI_BB_SHIFT 0x05 +#define PALMAS_INT2_LINE_STATE_RESET_IN 0x10 +#define PALMAS_INT2_LINE_STATE_RESET_IN_SHIFT 0x04 +#define PALMAS_INT2_LINE_STATE_BATREMOVAL 0x08 +#define PALMAS_INT2_LINE_STATE_BATREMOVAL_SHIFT 0x03 +#define PALMAS_INT2_LINE_STATE_WDT 0x04 +#define PALMAS_INT2_LINE_STATE_WDT_SHIFT 0x02 +#define PALMAS_INT2_LINE_STATE_RTC_TIMER 0x02 +#define PALMAS_INT2_LINE_STATE_RTC_TIMER_SHIFT 0x01 +#define PALMAS_INT2_LINE_STATE_RTC_ALARM 0x01 +#define PALMAS_INT2_LINE_STATE_RTC_ALARM_SHIFT 0x00 + +/* Bit definitions for INT3_STATUS */ +#define PALMAS_INT3_STATUS_VBUS 0x80 +#define PALMAS_INT3_STATUS_VBUS_SHIFT 0x07 +#define PALMAS_INT3_STATUS_VBUS_OTG 0x40 +#define PALMAS_INT3_STATUS_VBUS_OTG_SHIFT 0x06 +#define PALMAS_INT3_STATUS_ID 0x20 +#define PALMAS_INT3_STATUS_ID_SHIFT 0x05 +#define PALMAS_INT3_STATUS_ID_OTG 0x10 +#define PALMAS_INT3_STATUS_ID_OTG_SHIFT 0x04 +#define PALMAS_INT3_STATUS_GPADC_EOC_RT 0x08 +#define PALMAS_INT3_STATUS_GPADC_EOC_RT_SHIFT 0x03 +#define PALMAS_INT3_STATUS_GPADC_EOC_SW 0x04 +#define PALMAS_INT3_STATUS_GPADC_EOC_SW_SHIFT 0x02 +#define PALMAS_INT3_STATUS_GPADC_AUTO_1 0x02 +#define PALMAS_INT3_STATUS_GPADC_AUTO_1_SHIFT 0x01 +#define PALMAS_INT3_STATUS_GPADC_AUTO_0 0x01 +#define PALMAS_INT3_STATUS_GPADC_AUTO_0_SHIFT 0x00 + +/* Bit definitions for INT3_MASK */ +#define PALMAS_INT3_MASK_VBUS 0x80 +#define PALMAS_INT3_MASK_VBUS_SHIFT 0x07 +#define PALMAS_INT3_MASK_VBUS_OTG 0x40 +#define PALMAS_INT3_MASK_VBUS_OTG_SHIFT 0x06 +#define PALMAS_INT3_MASK_ID 0x20 +#define PALMAS_INT3_MASK_ID_SHIFT 0x05 +#define PALMAS_INT3_MASK_ID_OTG 0x10 +#define PALMAS_INT3_MASK_ID_OTG_SHIFT 0x04 +#define PALMAS_INT3_MASK_GPADC_EOC_RT 0x08 +#define PALMAS_INT3_MASK_GPADC_EOC_RT_SHIFT 0x03 +#define PALMAS_INT3_MASK_GPADC_EOC_SW 0x04 +#define PALMAS_INT3_MASK_GPADC_EOC_SW_SHIFT 0x02 +#define PALMAS_INT3_MASK_GPADC_AUTO_1 0x02 +#define PALMAS_INT3_MASK_GPADC_AUTO_1_SHIFT 0x01 +#define PALMAS_INT3_MASK_GPADC_AUTO_0 0x01 +#define PALMAS_INT3_MASK_GPADC_AUTO_0_SHIFT 0x00 + +/* Bit definitions for INT3_LINE_STATE */ +#define PALMAS_INT3_LINE_STATE_VBUS 0x80 +#define PALMAS_INT3_LINE_STATE_VBUS_SHIFT 0x07 +#define PALMAS_INT3_LINE_STATE_VBUS_OTG 0x40 +#define PALMAS_INT3_LINE_STATE_VBUS_OTG_SHIFT 0x06 +#define PALMAS_INT3_LINE_STATE_ID 0x20 +#define PALMAS_INT3_LINE_STATE_ID_SHIFT 0x05 +#define PALMAS_INT3_LINE_STATE_ID_OTG 0x10 +#define PALMAS_INT3_LINE_STATE_ID_OTG_SHIFT 0x04 +#define PALMAS_INT3_LINE_STATE_GPADC_EOC_RT 0x08 +#define PALMAS_INT3_LINE_STATE_GPADC_EOC_RT_SHIFT 0x03 +#define PALMAS_INT3_LINE_STATE_GPADC_EOC_SW 0x04 +#define PALMAS_INT3_LINE_STATE_GPADC_EOC_SW_SHIFT 0x02 +#define PALMAS_INT3_LINE_STATE_GPADC_AUTO_1 0x02 +#define PALMAS_INT3_LINE_STATE_GPADC_AUTO_1_SHIFT 0x01 +#define PALMAS_INT3_LINE_STATE_GPADC_AUTO_0 0x01 +#define PALMAS_INT3_LINE_STATE_GPADC_AUTO_0_SHIFT 0x00 + +/* Bit definitions for INT4_STATUS */ +#define PALMAS_INT4_STATUS_GPIO_7 0x80 +#define PALMAS_INT4_STATUS_GPIO_7_SHIFT 0x07 +#define PALMAS_INT4_STATUS_GPIO_6 0x40 +#define PALMAS_INT4_STATUS_GPIO_6_SHIFT 0x06 +#define PALMAS_INT4_STATUS_GPIO_5 0x20 +#define PALMAS_INT4_STATUS_GPIO_5_SHIFT 0x05 +#define PALMAS_INT4_STATUS_GPIO_4 0x10 +#define PALMAS_INT4_STATUS_GPIO_4_SHIFT 0x04 +#define PALMAS_INT4_STATUS_GPIO_3 0x08 +#define PALMAS_INT4_STATUS_GPIO_3_SHIFT 0x03 +#define PALMAS_INT4_STATUS_GPIO_2 0x04 +#define PALMAS_INT4_STATUS_GPIO_2_SHIFT 0x02 +#define PALMAS_INT4_STATUS_GPIO_1 0x02 +#define PALMAS_INT4_STATUS_GPIO_1_SHIFT 0x01 +#define PALMAS_INT4_STATUS_GPIO_0 0x01 +#define PALMAS_INT4_STATUS_GPIO_0_SHIFT 0x00 + +/* Bit definitions for INT4_MASK */ +#define PALMAS_INT4_MASK_GPIO_7 0x80 +#define PALMAS_INT4_MASK_GPIO_7_SHIFT 0x07 +#define PALMAS_INT4_MASK_GPIO_6 0x40 +#define PALMAS_INT4_MASK_GPIO_6_SHIFT 0x06 +#define PALMAS_INT4_MASK_GPIO_5 0x20 +#define PALMAS_INT4_MASK_GPIO_5_SHIFT 0x05 +#define PALMAS_INT4_MASK_GPIO_4 0x10 +#define PALMAS_INT4_MASK_GPIO_4_SHIFT 0x04 +#define PALMAS_INT4_MASK_GPIO_3 0x08 +#define PALMAS_INT4_MASK_GPIO_3_SHIFT 0x03 +#define PALMAS_INT4_MASK_GPIO_2 0x04 +#define PALMAS_INT4_MASK_GPIO_2_SHIFT 0x02 +#define PALMAS_INT4_MASK_GPIO_1 0x02 +#define PALMAS_INT4_MASK_GPIO_1_SHIFT 0x01 +#define PALMAS_INT4_MASK_GPIO_0 0x01 +#define PALMAS_INT4_MASK_GPIO_0_SHIFT 0x00 + +/* Bit definitions for INT4_LINE_STATE */ +#define PALMAS_INT4_LINE_STATE_GPIO_7 0x80 +#define PALMAS_INT4_LINE_STATE_GPIO_7_SHIFT 0x07 +#define PALMAS_INT4_LINE_STATE_GPIO_6 0x40 +#define PALMAS_INT4_LINE_STATE_GPIO_6_SHIFT 0x06 +#define PALMAS_INT4_LINE_STATE_GPIO_5 0x20 +#define PALMAS_INT4_LINE_STATE_GPIO_5_SHIFT 0x05 +#define PALMAS_INT4_LINE_STATE_GPIO_4 0x10 +#define PALMAS_INT4_LINE_STATE_GPIO_4_SHIFT 0x04 +#define PALMAS_INT4_LINE_STATE_GPIO_3 0x08 +#define PALMAS_INT4_LINE_STATE_GPIO_3_SHIFT 0x03 +#define PALMAS_INT4_LINE_STATE_GPIO_2 0x04 +#define PALMAS_INT4_LINE_STATE_GPIO_2_SHIFT 0x02 +#define PALMAS_INT4_LINE_STATE_GPIO_1 0x02 +#define PALMAS_INT4_LINE_STATE_GPIO_1_SHIFT 0x01 +#define PALMAS_INT4_LINE_STATE_GPIO_0 0x01 +#define PALMAS_INT4_LINE_STATE_GPIO_0_SHIFT 0x00 + +/* Bit definitions for INT4_EDGE_DETECT1 */ +#define PALMAS_INT4_EDGE_DETECT1_GPIO_3_RISING 0x80 +#define PALMAS_INT4_EDGE_DETECT1_GPIO_3_RISING_SHIFT 0x07 +#define PALMAS_INT4_EDGE_DETECT1_GPIO_3_FALLING 0x40 +#define PALMAS_INT4_EDGE_DETECT1_GPIO_3_FALLING_SHIFT 0x06 +#define PALMAS_INT4_EDGE_DETECT1_GPIO_2_RISING 0x20 +#define PALMAS_INT4_EDGE_DETECT1_GPIO_2_RISING_SHIFT 0x05 +#define PALMAS_INT4_EDGE_DETECT1_GPIO_2_FALLING 0x10 +#define PALMAS_INT4_EDGE_DETECT1_GPIO_2_FALLING_SHIFT 0x04 +#define PALMAS_INT4_EDGE_DETECT1_GPIO_1_RISING 0x08 +#define PALMAS_INT4_EDGE_DETECT1_GPIO_1_RISING_SHIFT 0x03 +#define PALMAS_INT4_EDGE_DETECT1_GPIO_1_FALLING 0x04 +#define PALMAS_INT4_EDGE_DETECT1_GPIO_1_FALLING_SHIFT 0x02 +#define PALMAS_INT4_EDGE_DETECT1_GPIO_0_RISING 0x02 +#define PALMAS_INT4_EDGE_DETECT1_GPIO_0_RISING_SHIFT 0x01 +#define PALMAS_INT4_EDGE_DETECT1_GPIO_0_FALLING 0x01 +#define PALMAS_INT4_EDGE_DETECT1_GPIO_0_FALLING_SHIFT 0x00 + +/* Bit definitions for INT4_EDGE_DETECT2 */ +#define PALMAS_INT4_EDGE_DETECT2_GPIO_7_RISING 0x80 +#define PALMAS_INT4_EDGE_DETECT2_GPIO_7_RISING_SHIFT 0x07 +#define PALMAS_INT4_EDGE_DETECT2_GPIO_7_FALLING 0x40 +#define PALMAS_INT4_EDGE_DETECT2_GPIO_7_FALLING_SHIFT 0x06 +#define PALMAS_INT4_EDGE_DETECT2_GPIO_6_RISING 0x20 +#define PALMAS_INT4_EDGE_DETECT2_GPIO_6_RISING_SHIFT 0x05 +#define PALMAS_INT4_EDGE_DETECT2_GPIO_6_FALLING 0x10 +#define PALMAS_INT4_EDGE_DETECT2_GPIO_6_FALLING_SHIFT 0x04 +#define PALMAS_INT4_EDGE_DETECT2_GPIO_5_RISING 0x08 +#define PALMAS_INT4_EDGE_DETECT2_GPIO_5_RISING_SHIFT 0x03 +#define PALMAS_INT4_EDGE_DETECT2_GPIO_5_FALLING 0x04 +#define PALMAS_INT4_EDGE_DETECT2_GPIO_5_FALLING_SHIFT 0x02 +#define PALMAS_INT4_EDGE_DETECT2_GPIO_4_RISING 0x02 +#define PALMAS_INT4_EDGE_DETECT2_GPIO_4_RISING_SHIFT 0x01 +#define PALMAS_INT4_EDGE_DETECT2_GPIO_4_FALLING 0x01 +#define PALMAS_INT4_EDGE_DETECT2_GPIO_4_FALLING_SHIFT 0x00 + +/* Bit definitions for INT_CTRL */ +#define PALMAS_INT_CTRL_INT_PENDING 0x04 +#define PALMAS_INT_CTRL_INT_PENDING_SHIFT 0x02 +#define PALMAS_INT_CTRL_INT_CLEAR 0x01 +#define PALMAS_INT_CTRL_INT_CLEAR_SHIFT 0x00 + +/* Registers for function USB_OTG */ +#define PALMAS_USB_WAKEUP 0x03 +#define PALMAS_USB_VBUS_CTRL_SET 0x04 +#define PALMAS_USB_VBUS_CTRL_CLR 0x05 +#define PALMAS_USB_ID_CTRL_SET 0x06 +#define PALMAS_USB_ID_CTRL_CLEAR 0x07 +#define PALMAS_USB_VBUS_INT_SRC 0x08 +#define PALMAS_USB_VBUS_INT_LATCH_SET 0x09 +#define PALMAS_USB_VBUS_INT_LATCH_CLR 0x0A +#define PALMAS_USB_VBUS_INT_EN_LO_SET 0x0B +#define PALMAS_USB_VBUS_INT_EN_LO_CLR 0x0C +#define PALMAS_USB_VBUS_INT_EN_HI_SET 0x0D +#define PALMAS_USB_VBUS_INT_EN_HI_CLR 0x0E +#define PALMAS_USB_ID_INT_SRC 0x0F +#define PALMAS_USB_ID_INT_LATCH_SET 0x10 +#define PALMAS_USB_ID_INT_LATCH_CLR 0x11 +#define PALMAS_USB_ID_INT_EN_LO_SET 0x12 +#define PALMAS_USB_ID_INT_EN_LO_CLR 0x13 +#define PALMAS_USB_ID_INT_EN_HI_SET 0x14 +#define PALMAS_USB_ID_INT_EN_HI_CLR 0x15 +#define PALMAS_USB_OTG_ADP_CTRL 0x16 +#define PALMAS_USB_OTG_ADP_HIGH 0x17 +#define PALMAS_USB_OTG_ADP_LOW 0x18 +#define PALMAS_USB_OTG_ADP_RISE 0x19 +#define PALMAS_USB_OTG_REVISION 0x1A + +/* Bit definitions for USB_WAKEUP */ +#define PALMAS_USB_WAKEUP_ID_WK_UP_COMP 0x01 +#define PALMAS_USB_WAKEUP_ID_WK_UP_COMP_SHIFT 0x00 + +/* Bit definitions for USB_VBUS_CTRL_SET */ +#define PALMAS_USB_VBUS_CTRL_SET_VBUS_CHRG_VSYS 0x80 +#define PALMAS_USB_VBUS_CTRL_SET_VBUS_CHRG_VSYS_SHIFT 0x07 +#define PALMAS_USB_VBUS_CTRL_SET_VBUS_DISCHRG 0x20 +#define PALMAS_USB_VBUS_CTRL_SET_VBUS_DISCHRG_SHIFT 0x05 +#define PALMAS_USB_VBUS_CTRL_SET_VBUS_IADP_SRC 0x10 +#define PALMAS_USB_VBUS_CTRL_SET_VBUS_IADP_SRC_SHIFT 0x04 +#define PALMAS_USB_VBUS_CTRL_SET_VBUS_IADP_SINK 0x08 +#define PALMAS_USB_VBUS_CTRL_SET_VBUS_IADP_SINK_SHIFT 0x03 +#define PALMAS_USB_VBUS_CTRL_SET_VBUS_ACT_COMP 0x04 +#define PALMAS_USB_VBUS_CTRL_SET_VBUS_ACT_COMP_SHIFT 0x02 + +/* Bit definitions for USB_VBUS_CTRL_CLR */ +#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_CHRG_VSYS 0x80 +#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_CHRG_VSYS_SHIFT 0x07 +#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_DISCHRG 0x20 +#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_DISCHRG_SHIFT 0x05 +#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_IADP_SRC 0x10 +#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_IADP_SRC_SHIFT 0x04 +#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_IADP_SINK 0x08 +#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_IADP_SINK_SHIFT 0x03 +#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_ACT_COMP 0x04 +#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_ACT_COMP_SHIFT 0x02 + +/* Bit definitions for USB_ID_CTRL_SET */ +#define PALMAS_USB_ID_CTRL_SET_ID_PU_220K 0x80 +#define PALMAS_USB_ID_CTRL_SET_ID_PU_220K_SHIFT 0x07 +#define PALMAS_USB_ID_CTRL_SET_ID_PU_100K 0x40 +#define PALMAS_USB_ID_CTRL_SET_ID_PU_100K_SHIFT 0x06 +#define PALMAS_USB_ID_CTRL_SET_ID_GND_DRV 0x20 +#define PALMAS_USB_ID_CTRL_SET_ID_GND_DRV_SHIFT 0x05 +#define PALMAS_USB_ID_CTRL_SET_ID_SRC_16U 0x10 +#define PALMAS_USB_ID_CTRL_SET_ID_SRC_16U_SHIFT 0x04 +#define PALMAS_USB_ID_CTRL_SET_ID_SRC_5U 0x08 +#define PALMAS_USB_ID_CTRL_SET_ID_SRC_5U_SHIFT 0x03 +#define PALMAS_USB_ID_CTRL_SET_ID_ACT_COMP 0x04 +#define PALMAS_USB_ID_CTRL_SET_ID_ACT_COMP_SHIFT 0x02 + +/* Bit definitions for USB_ID_CTRL_CLEAR */ +#define PALMAS_USB_ID_CTRL_CLEAR_ID_PU_220K 0x80 +#define PALMAS_USB_ID_CTRL_CLEAR_ID_PU_220K_SHIFT 0x07 +#define PALMAS_USB_ID_CTRL_CLEAR_ID_PU_100K 0x40 +#define PALMAS_USB_ID_CTRL_CLEAR_ID_PU_100K_SHIFT 0x06 +#define PALMAS_USB_ID_CTRL_CLEAR_ID_GND_DRV 0x20 +#define PALMAS_USB_ID_CTRL_CLEAR_ID_GND_DRV_SHIFT 0x05 +#define PALMAS_USB_ID_CTRL_CLEAR_ID_SRC_16U 0x10 +#define PALMAS_USB_ID_CTRL_CLEAR_ID_SRC_16U_SHIFT 0x04 +#define PALMAS_USB_ID_CTRL_CLEAR_ID_SRC_5U 0x08 +#define PALMAS_USB_ID_CTRL_CLEAR_ID_SRC_5U_SHIFT 0x03 +#define PALMAS_USB_ID_CTRL_CLEAR_ID_ACT_COMP 0x04 +#define PALMAS_USB_ID_CTRL_CLEAR_ID_ACT_COMP_SHIFT 0x02 + +/* Bit definitions for USB_VBUS_INT_SRC */ +#define PALMAS_USB_VBUS_INT_SRC_VOTG_SESS_VLD 0x80 +#define PALMAS_USB_VBUS_INT_SRC_VOTG_SESS_VLD_SHIFT 0x07 +#define PALMAS_USB_VBUS_INT_SRC_VADP_PRB 0x40 +#define PALMAS_USB_VBUS_INT_SRC_VADP_PRB_SHIFT 0x06 +#define PALMAS_USB_VBUS_INT_SRC_VADP_SNS 0x20 +#define PALMAS_USB_VBUS_INT_SRC_VADP_SNS_SHIFT 0x05 +#define PALMAS_USB_VBUS_INT_SRC_VA_VBUS_VLD 0x08 +#define PALMAS_USB_VBUS_INT_SRC_VA_VBUS_VLD_SHIFT 0x03 +#define PALMAS_USB_VBUS_INT_SRC_VA_SESS_VLD 0x04 +#define PALMAS_USB_VBUS_INT_SRC_VA_SESS_VLD_SHIFT 0x02 +#define PALMAS_USB_VBUS_INT_SRC_VB_SESS_VLD 0x02 +#define PALMAS_USB_VBUS_INT_SRC_VB_SESS_VLD_SHIFT 0x01 +#define PALMAS_USB_VBUS_INT_SRC_VB_SESS_END 0x01 +#define PALMAS_USB_VBUS_INT_SRC_VB_SESS_END_SHIFT 0x00 + +/* Bit definitions for USB_VBUS_INT_LATCH_SET */ +#define PALMAS_USB_VBUS_INT_LATCH_SET_VOTG_SESS_VLD 0x80 +#define PALMAS_USB_VBUS_INT_LATCH_SET_VOTG_SESS_VLD_SHIFT 0x07 +#define PALMAS_USB_VBUS_INT_LATCH_SET_VADP_PRB 0x40 +#define PALMAS_USB_VBUS_INT_LATCH_SET_VADP_PRB_SHIFT 0x06 +#define PALMAS_USB_VBUS_INT_LATCH_SET_VADP_SNS 0x20 +#define PALMAS_USB_VBUS_INT_LATCH_SET_VADP_SNS_SHIFT 0x05 +#define PALMAS_USB_VBUS_INT_LATCH_SET_ADP 0x10 +#define PALMAS_USB_VBUS_INT_LATCH_SET_ADP_SHIFT 0x04 +#define PALMAS_USB_VBUS_INT_LATCH_SET_VA_VBUS_VLD 0x08 +#define PALMAS_USB_VBUS_INT_LATCH_SET_VA_VBUS_VLD_SHIFT 0x03 +#define PALMAS_USB_VBUS_INT_LATCH_SET_VA_SESS_VLD 0x04 +#define PALMAS_USB_VBUS_INT_LATCH_SET_VA_SESS_VLD_SHIFT 0x02 +#define PALMAS_USB_VBUS_INT_LATCH_SET_VB_SESS_VLD 0x02 +#define PALMAS_USB_VBUS_INT_LATCH_SET_VB_SESS_VLD_SHIFT 0x01 +#define PALMAS_USB_VBUS_INT_LATCH_SET_VB_SESS_END 0x01 +#define PALMAS_USB_VBUS_INT_LATCH_SET_VB_SESS_END_SHIFT 0x00 + +/* Bit definitions for USB_VBUS_INT_LATCH_CLR */ +#define PALMAS_USB_VBUS_INT_LATCH_CLR_VOTG_SESS_VLD 0x80 +#define PALMAS_USB_VBUS_INT_LATCH_CLR_VOTG_SESS_VLD_SHIFT 0x07 +#define PALMAS_USB_VBUS_INT_LATCH_CLR_VADP_PRB 0x40 +#define PALMAS_USB_VBUS_INT_LATCH_CLR_VADP_PRB_SHIFT 0x06 +#define PALMAS_USB_VBUS_INT_LATCH_CLR_VADP_SNS 0x20 +#define PALMAS_USB_VBUS_INT_LATCH_CLR_VADP_SNS_SHIFT 0x05 +#define PALMAS_USB_VBUS_INT_LATCH_CLR_ADP 0x10 +#define PALMAS_USB_VBUS_INT_LATCH_CLR_ADP_SHIFT 0x04 +#define PALMAS_USB_VBUS_INT_LATCH_CLR_VA_VBUS_VLD 0x08 +#define PALMAS_USB_VBUS_INT_LATCH_CLR_VA_VBUS_VLD_SHIFT 0x03 +#define PALMAS_USB_VBUS_INT_LATCH_CLR_VA_SESS_VLD 0x04 +#define PALMAS_USB_VBUS_INT_LATCH_CLR_VA_SESS_VLD_SHIFT 0x02 +#define PALMAS_USB_VBUS_INT_LATCH_CLR_VB_SESS_VLD 0x02 +#define PALMAS_USB_VBUS_INT_LATCH_CLR_VB_SESS_VLD_SHIFT 0x01 +#define PALMAS_USB_VBUS_INT_LATCH_CLR_VB_SESS_END 0x01 +#define PALMAS_USB_VBUS_INT_LATCH_CLR_VB_SESS_END_SHIFT 0x00 + +/* Bit definitions for USB_VBUS_INT_EN_LO_SET */ +#define PALMAS_USB_VBUS_INT_EN_LO_SET_VOTG_SESS_VLD 0x80 +#define PALMAS_USB_VBUS_INT_EN_LO_SET_VOTG_SESS_VLD_SHIFT 0x07 +#define PALMAS_USB_VBUS_INT_EN_LO_SET_VADP_PRB 0x40 +#define PALMAS_USB_VBUS_INT_EN_LO_SET_VADP_PRB_SHIFT 0x06 +#define PALMAS_USB_VBUS_INT_EN_LO_SET_VADP_SNS 0x20 +#define PALMAS_USB_VBUS_INT_EN_LO_SET_VADP_SNS_SHIFT 0x05 +#define PALMAS_USB_VBUS_INT_EN_LO_SET_VA_VBUS_VLD 0x08 +#define PALMAS_USB_VBUS_INT_EN_LO_SET_VA_VBUS_VLD_SHIFT 0x03 +#define PALMAS_USB_VBUS_INT_EN_LO_SET_VA_SESS_VLD 0x04 +#define PALMAS_USB_VBUS_INT_EN_LO_SET_VA_SESS_VLD_SHIFT 0x02 +#define PALMAS_USB_VBUS_INT_EN_LO_SET_VB_SESS_VLD 0x02 +#define PALMAS_USB_VBUS_INT_EN_LO_SET_VB_SESS_VLD_SHIFT 0x01 +#define PALMAS_USB_VBUS_INT_EN_LO_SET_VB_SESS_END 0x01 +#define PALMAS_USB_VBUS_INT_EN_LO_SET_VB_SESS_END_SHIFT 0x00 + +/* Bit definitions for USB_VBUS_INT_EN_LO_CLR */ +#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VOTG_SESS_VLD 0x80 +#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VOTG_SESS_VLD_SHIFT 0x07 +#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VADP_PRB 0x40 +#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VADP_PRB_SHIFT 0x06 +#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VADP_SNS 0x20 +#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VADP_SNS_SHIFT 0x05 +#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VA_VBUS_VLD 0x08 +#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VA_VBUS_VLD_SHIFT 0x03 +#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VA_SESS_VLD 0x04 +#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VA_SESS_VLD_SHIFT 0x02 +#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VB_SESS_VLD 0x02 +#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VB_SESS_VLD_SHIFT 0x01 +#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VB_SESS_END 0x01 +#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VB_SESS_END_SHIFT 0x00 + +/* Bit definitions for USB_VBUS_INT_EN_HI_SET */ +#define PALMAS_USB_VBUS_INT_EN_HI_SET_VOTG_SESS_VLD 0x80 +#define PALMAS_USB_VBUS_INT_EN_HI_SET_VOTG_SESS_VLD_SHIFT 0x07 +#define PALMAS_USB_VBUS_INT_EN_HI_SET_VADP_PRB 0x40 +#define PALMAS_USB_VBUS_INT_EN_HI_SET_VADP_PRB_SHIFT 0x06 +#define PALMAS_USB_VBUS_INT_EN_HI_SET_VADP_SNS 0x20 +#define PALMAS_USB_VBUS_INT_EN_HI_SET_VADP_SNS_SHIFT 0x05 +#define PALMAS_USB_VBUS_INT_EN_HI_SET_ADP 0x10 +#define PALMAS_USB_VBUS_INT_EN_HI_SET_ADP_SHIFT 0x04 +#define PALMAS_USB_VBUS_INT_EN_HI_SET_VA_VBUS_VLD 0x08 +#define PALMAS_USB_VBUS_INT_EN_HI_SET_VA_VBUS_VLD_SHIFT 0x03 +#define PALMAS_USB_VBUS_INT_EN_HI_SET_VA_SESS_VLD 0x04 +#define PALMAS_USB_VBUS_INT_EN_HI_SET_VA_SESS_VLD_SHIFT 0x02 +#define PALMAS_USB_VBUS_INT_EN_HI_SET_VB_SESS_VLD 0x02 +#define PALMAS_USB_VBUS_INT_EN_HI_SET_VB_SESS_VLD_SHIFT 0x01 +#define PALMAS_USB_VBUS_INT_EN_HI_SET_VB_SESS_END 0x01 +#define PALMAS_USB_VBUS_INT_EN_HI_SET_VB_SESS_END_SHIFT 0x00 + +/* Bit definitions for USB_VBUS_INT_EN_HI_CLR */ +#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VOTG_SESS_VLD 0x80 +#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VOTG_SESS_VLD_SHIFT 0x07 +#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VADP_PRB 0x40 +#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VADP_PRB_SHIFT 0x06 +#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VADP_SNS 0x20 +#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VADP_SNS_SHIFT 0x05 +#define PALMAS_USB_VBUS_INT_EN_HI_CLR_ADP 0x10 +#define PALMAS_USB_VBUS_INT_EN_HI_CLR_ADP_SHIFT 0x04 +#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VA_VBUS_VLD 0x08 +#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VA_VBUS_VLD_SHIFT 0x03 +#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VA_SESS_VLD 0x04 +#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VA_SESS_VLD_SHIFT 0x02 +#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VB_SESS_VLD 0x02 +#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VB_SESS_VLD_SHIFT 0x01 +#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VB_SESS_END 0x01 +#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VB_SESS_END_SHIFT 0x00 + +/* Bit definitions for USB_ID_INT_SRC */ +#define PALMAS_USB_ID_INT_SRC_ID_FLOAT 0x10 +#define PALMAS_USB_ID_INT_SRC_ID_FLOAT_SHIFT 0x04 +#define PALMAS_USB_ID_INT_SRC_ID_A 0x08 +#define PALMAS_USB_ID_INT_SRC_ID_A_SHIFT 0x03 +#define PALMAS_USB_ID_INT_SRC_ID_B 0x04 +#define PALMAS_USB_ID_INT_SRC_ID_B_SHIFT 0x02 +#define PALMAS_USB_ID_INT_SRC_ID_C 0x02 +#define PALMAS_USB_ID_INT_SRC_ID_C_SHIFT 0x01 +#define PALMAS_USB_ID_INT_SRC_ID_GND 0x01 +#define PALMAS_USB_ID_INT_SRC_ID_GND_SHIFT 0x00 + +/* Bit definitions for USB_ID_INT_LATCH_SET */ +#define PALMAS_USB_ID_INT_LATCH_SET_ID_FLOAT 0x10 +#define PALMAS_USB_ID_INT_LATCH_SET_ID_FLOAT_SHIFT 0x04 +#define PALMAS_USB_ID_INT_LATCH_SET_ID_A 0x08 +#define PALMAS_USB_ID_INT_LATCH_SET_ID_A_SHIFT 0x03 +#define PALMAS_USB_ID_INT_LATCH_SET_ID_B 0x04 +#define PALMAS_USB_ID_INT_LATCH_SET_ID_B_SHIFT 0x02 +#define PALMAS_USB_ID_INT_LATCH_SET_ID_C 0x02 +#define PALMAS_USB_ID_INT_LATCH_SET_ID_C_SHIFT 0x01 +#define PALMAS_USB_ID_INT_LATCH_SET_ID_GND 0x01 +#define PALMAS_USB_ID_INT_LATCH_SET_ID_GND_SHIFT 0x00 + +/* Bit definitions for USB_ID_INT_LATCH_CLR */ +#define PALMAS_USB_ID_INT_LATCH_CLR_ID_FLOAT 0x10 +#define PALMAS_USB_ID_INT_LATCH_CLR_ID_FLOAT_SHIFT 0x04 +#define PALMAS_USB_ID_INT_LATCH_CLR_ID_A 0x08 +#define PALMAS_USB_ID_INT_LATCH_CLR_ID_A_SHIFT 0x03 +#define PALMAS_USB_ID_INT_LATCH_CLR_ID_B 0x04 +#define PALMAS_USB_ID_INT_LATCH_CLR_ID_B_SHIFT 0x02 +#define PALMAS_USB_ID_INT_LATCH_CLR_ID_C 0x02 +#define PALMAS_USB_ID_INT_LATCH_CLR_ID_C_SHIFT 0x01 +#define PALMAS_USB_ID_INT_LATCH_CLR_ID_GND 0x01 +#define PALMAS_USB_ID_INT_LATCH_CLR_ID_GND_SHIFT 0x00 + +/* Bit definitions for USB_ID_INT_EN_LO_SET */ +#define PALMAS_USB_ID_INT_EN_LO_SET_ID_FLOAT 0x10 +#define PALMAS_USB_ID_INT_EN_LO_SET_ID_FLOAT_SHIFT 0x04 +#define PALMAS_USB_ID_INT_EN_LO_SET_ID_A 0x08 +#define PALMAS_USB_ID_INT_EN_LO_SET_ID_A_SHIFT 0x03 +#define PALMAS_USB_ID_INT_EN_LO_SET_ID_B 0x04 +#define PALMAS_USB_ID_INT_EN_LO_SET_ID_B_SHIFT 0x02 +#define PALMAS_USB_ID_INT_EN_LO_SET_ID_C 0x02 +#define PALMAS_USB_ID_INT_EN_LO_SET_ID_C_SHIFT 0x01 +#define PALMAS_USB_ID_INT_EN_LO_SET_ID_GND 0x01 +#define PALMAS_USB_ID_INT_EN_LO_SET_ID_GND_SHIFT 0x00 + +/* Bit definitions for USB_ID_INT_EN_LO_CLR */ +#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_FLOAT 0x10 +#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_FLOAT_SHIFT 0x04 +#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_A 0x08 +#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_A_SHIFT 0x03 +#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_B 0x04 +#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_B_SHIFT 0x02 +#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_C 0x02 +#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_C_SHIFT 0x01 +#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_GND 0x01 +#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_GND_SHIFT 0x00 + +/* Bit definitions for USB_ID_INT_EN_HI_SET */ +#define PALMAS_USB_ID_INT_EN_HI_SET_ID_FLOAT 0x10 +#define PALMAS_USB_ID_INT_EN_HI_SET_ID_FLOAT_SHIFT 0x04 +#define PALMAS_USB_ID_INT_EN_HI_SET_ID_A 0x08 +#define PALMAS_USB_ID_INT_EN_HI_SET_ID_A_SHIFT 0x03 +#define PALMAS_USB_ID_INT_EN_HI_SET_ID_B 0x04 +#define PALMAS_USB_ID_INT_EN_HI_SET_ID_B_SHIFT 0x02 +#define PALMAS_USB_ID_INT_EN_HI_SET_ID_C 0x02 +#define PALMAS_USB_ID_INT_EN_HI_SET_ID_C_SHIFT 0x01 +#define PALMAS_USB_ID_INT_EN_HI_SET_ID_GND 0x01 +#define PALMAS_USB_ID_INT_EN_HI_SET_ID_GND_SHIFT 0x00 + +/* Bit definitions for USB_ID_INT_EN_HI_CLR */ +#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_FLOAT 0x10 +#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_FLOAT_SHIFT 0x04 +#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_A 0x08 +#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_A_SHIFT 0x03 +#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_B 0x04 +#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_B_SHIFT 0x02 +#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_C 0x02 +#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_C_SHIFT 0x01 +#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_GND 0x01 +#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_GND_SHIFT 0x00 + +/* Bit definitions for USB_OTG_ADP_CTRL */ +#define PALMAS_USB_OTG_ADP_CTRL_ADP_EN 0x04 +#define PALMAS_USB_OTG_ADP_CTRL_ADP_EN_SHIFT 0x02 +#define PALMAS_USB_OTG_ADP_CTRL_ADP_MODE_MASK 0x03 +#define PALMAS_USB_OTG_ADP_CTRL_ADP_MODE_SHIFT 0x00 + +/* Bit definitions for USB_OTG_ADP_HIGH */ +#define PALMAS_USB_OTG_ADP_HIGH_T_ADP_HIGH_MASK 0xFF +#define PALMAS_USB_OTG_ADP_HIGH_T_ADP_HIGH_SHIFT 0x00 + +/* Bit definitions for USB_OTG_ADP_LOW */ +#define PALMAS_USB_OTG_ADP_LOW_T_ADP_LOW_MASK 0xFF +#define PALMAS_USB_OTG_ADP_LOW_T_ADP_LOW_SHIFT 0x00 + +/* Bit definitions for USB_OTG_ADP_RISE */ +#define PALMAS_USB_OTG_ADP_RISE_T_ADP_RISE_MASK 0xFF +#define PALMAS_USB_OTG_ADP_RISE_T_ADP_RISE_SHIFT 0x00 + +/* Bit definitions for USB_OTG_REVISION */ +#define PALMAS_USB_OTG_REVISION_OTG_REV 0x01 +#define PALMAS_USB_OTG_REVISION_OTG_REV_SHIFT 0x00 + +/* Registers for function VIBRATOR */ +#define PALMAS_VIBRA_CTRL 0x00 + +/* Bit definitions for VIBRA_CTRL */ +#define PALMAS_VIBRA_CTRL_PWM_DUTY_SEL_MASK 0x06 +#define PALMAS_VIBRA_CTRL_PWM_DUTY_SEL_SHIFT 0x01 +#define PALMAS_VIBRA_CTRL_PWM_FREQ_SEL 0x01 +#define PALMAS_VIBRA_CTRL_PWM_FREQ_SEL_SHIFT 0x00 + +/* Registers for function GPIO */ +#define PALMAS_GPIO_DATA_IN 0x00 +#define PALMAS_GPIO_DATA_DIR 0x01 +#define PALMAS_GPIO_DATA_OUT 0x02 +#define PALMAS_GPIO_DEBOUNCE_EN 0x03 +#define PALMAS_GPIO_CLEAR_DATA_OUT 0x04 +#define PALMAS_GPIO_SET_DATA_OUT 0x05 +#define PALMAS_PU_PD_GPIO_CTRL1 0x06 +#define PALMAS_PU_PD_GPIO_CTRL2 0x07 +#define PALMAS_OD_OUTPUT_GPIO_CTRL 0x08 +#define PALMAS_GPIO_DATA_IN2 0x09 +#define PALMAS_GPIO_DATA_DIR2 0x0A +#define PALMAS_GPIO_DATA_OUT2 0x0B +#define PALMAS_GPIO_DEBOUNCE_EN2 0x0C +#define PALMAS_GPIO_CLEAR_DATA_OUT2 0x0D +#define PALMAS_GPIO_SET_DATA_OUT2 0x0E +#define PALMAS_PU_PD_GPIO_CTRL3 0x0F +#define PALMAS_PU_PD_GPIO_CTRL4 0x10 +#define PALMAS_OD_OUTPUT_GPIO_CTRL2 0x11 + +/* Bit definitions for GPIO_DATA_IN */ +#define PALMAS_GPIO_DATA_IN_GPIO_7_IN 0x80 +#define PALMAS_GPIO_DATA_IN_GPIO_7_IN_SHIFT 0x07 +#define PALMAS_GPIO_DATA_IN_GPIO_6_IN 0x40 +#define PALMAS_GPIO_DATA_IN_GPIO_6_IN_SHIFT 0x06 +#define PALMAS_GPIO_DATA_IN_GPIO_5_IN 0x20 +#define PALMAS_GPIO_DATA_IN_GPIO_5_IN_SHIFT 0x05 +#define PALMAS_GPIO_DATA_IN_GPIO_4_IN 0x10 +#define PALMAS_GPIO_DATA_IN_GPIO_4_IN_SHIFT 0x04 +#define PALMAS_GPIO_DATA_IN_GPIO_3_IN 0x08 +#define PALMAS_GPIO_DATA_IN_GPIO_3_IN_SHIFT 0x03 +#define PALMAS_GPIO_DATA_IN_GPIO_2_IN 0x04 +#define PALMAS_GPIO_DATA_IN_GPIO_2_IN_SHIFT 0x02 +#define PALMAS_GPIO_DATA_IN_GPIO_1_IN 0x02 +#define PALMAS_GPIO_DATA_IN_GPIO_1_IN_SHIFT 0x01 +#define PALMAS_GPIO_DATA_IN_GPIO_0_IN 0x01 +#define PALMAS_GPIO_DATA_IN_GPIO_0_IN_SHIFT 0x00 + +/* Bit definitions for GPIO_DATA_DIR */ +#define PALMAS_GPIO_DATA_DIR_GPIO_7_DIR 0x80 +#define PALMAS_GPIO_DATA_DIR_GPIO_7_DIR_SHIFT 0x07 +#define PALMAS_GPIO_DATA_DIR_GPIO_6_DIR 0x40 +#define PALMAS_GPIO_DATA_DIR_GPIO_6_DIR_SHIFT 0x06 +#define PALMAS_GPIO_DATA_DIR_GPIO_5_DIR 0x20 +#define PALMAS_GPIO_DATA_DIR_GPIO_5_DIR_SHIFT 0x05 +#define PALMAS_GPIO_DATA_DIR_GPIO_4_DIR 0x10 +#define PALMAS_GPIO_DATA_DIR_GPIO_4_DIR_SHIFT 0x04 +#define PALMAS_GPIO_DATA_DIR_GPIO_3_DIR 0x08 +#define PALMAS_GPIO_DATA_DIR_GPIO_3_DIR_SHIFT 0x03 +#define PALMAS_GPIO_DATA_DIR_GPIO_2_DIR 0x04 +#define PALMAS_GPIO_DATA_DIR_GPIO_2_DIR_SHIFT 0x02 +#define PALMAS_GPIO_DATA_DIR_GPIO_1_DIR 0x02 +#define PALMAS_GPIO_DATA_DIR_GPIO_1_DIR_SHIFT 0x01 +#define PALMAS_GPIO_DATA_DIR_GPIO_0_DIR 0x01 +#define PALMAS_GPIO_DATA_DIR_GPIO_0_DIR_SHIFT 0x00 + +/* Bit definitions for GPIO_DATA_OUT */ +#define PALMAS_GPIO_DATA_OUT_GPIO_7_OUT 0x80 +#define PALMAS_GPIO_DATA_OUT_GPIO_7_OUT_SHIFT 0x07 +#define PALMAS_GPIO_DATA_OUT_GPIO_6_OUT 0x40 +#define PALMAS_GPIO_DATA_OUT_GPIO_6_OUT_SHIFT 0x06 +#define PALMAS_GPIO_DATA_OUT_GPIO_5_OUT 0x20 +#define PALMAS_GPIO_DATA_OUT_GPIO_5_OUT_SHIFT 0x05 +#define PALMAS_GPIO_DATA_OUT_GPIO_4_OUT 0x10 +#define PALMAS_GPIO_DATA_OUT_GPIO_4_OUT_SHIFT 0x04 +#define PALMAS_GPIO_DATA_OUT_GPIO_3_OUT 0x08 +#define PALMAS_GPIO_DATA_OUT_GPIO_3_OUT_SHIFT 0x03 +#define PALMAS_GPIO_DATA_OUT_GPIO_2_OUT 0x04 +#define PALMAS_GPIO_DATA_OUT_GPIO_2_OUT_SHIFT 0x02 +#define PALMAS_GPIO_DATA_OUT_GPIO_1_OUT 0x02 +#define PALMAS_GPIO_DATA_OUT_GPIO_1_OUT_SHIFT 0x01 +#define PALMAS_GPIO_DATA_OUT_GPIO_0_OUT 0x01 +#define PALMAS_GPIO_DATA_OUT_GPIO_0_OUT_SHIFT 0x00 + +/* Bit definitions for GPIO_DEBOUNCE_EN */ +#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_7_DEBOUNCE_EN 0x80 +#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_7_DEBOUNCE_EN_SHIFT 0x07 +#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_6_DEBOUNCE_EN 0x40 +#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_6_DEBOUNCE_EN_SHIFT 0x06 +#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_5_DEBOUNCE_EN 0x20 +#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_5_DEBOUNCE_EN_SHIFT 0x05 +#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_4_DEBOUNCE_EN 0x10 +#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_4_DEBOUNCE_EN_SHIFT 0x04 +#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_3_DEBOUNCE_EN 0x08 +#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_3_DEBOUNCE_EN_SHIFT 0x03 +#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_2_DEBOUNCE_EN 0x04 +#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_2_DEBOUNCE_EN_SHIFT 0x02 +#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_1_DEBOUNCE_EN 0x02 +#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_1_DEBOUNCE_EN_SHIFT 0x01 +#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_0_DEBOUNCE_EN 0x01 +#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_0_DEBOUNCE_EN_SHIFT 0x00 + +/* Bit definitions for GPIO_CLEAR_DATA_OUT */ +#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_7_CLEAR_DATA_OUT 0x80 +#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_7_CLEAR_DATA_OUT_SHIFT 0x07 +#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_6_CLEAR_DATA_OUT 0x40 +#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_6_CLEAR_DATA_OUT_SHIFT 0x06 +#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_5_CLEAR_DATA_OUT 0x20 +#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_5_CLEAR_DATA_OUT_SHIFT 0x05 +#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_4_CLEAR_DATA_OUT 0x10 +#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_4_CLEAR_DATA_OUT_SHIFT 0x04 +#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_3_CLEAR_DATA_OUT 0x08 +#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_3_CLEAR_DATA_OUT_SHIFT 0x03 +#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_2_CLEAR_DATA_OUT 0x04 +#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_2_CLEAR_DATA_OUT_SHIFT 0x02 +#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_1_CLEAR_DATA_OUT 0x02 +#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_1_CLEAR_DATA_OUT_SHIFT 0x01 +#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_0_CLEAR_DATA_OUT 0x01 +#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_0_CLEAR_DATA_OUT_SHIFT 0x00 + +/* Bit definitions for GPIO_SET_DATA_OUT */ +#define PALMAS_GPIO_SET_DATA_OUT_GPIO_7_SET_DATA_OUT 0x80 +#define PALMAS_GPIO_SET_DATA_OUT_GPIO_7_SET_DATA_OUT_SHIFT 0x07 +#define PALMAS_GPIO_SET_DATA_OUT_GPIO_6_SET_DATA_OUT 0x40 +#define PALMAS_GPIO_SET_DATA_OUT_GPIO_6_SET_DATA_OUT_SHIFT 0x06 +#define PALMAS_GPIO_SET_DATA_OUT_GPIO_5_SET_DATA_OUT 0x20 +#define PALMAS_GPIO_SET_DATA_OUT_GPIO_5_SET_DATA_OUT_SHIFT 0x05 +#define PALMAS_GPIO_SET_DATA_OUT_GPIO_4_SET_DATA_OUT 0x10 +#define PALMAS_GPIO_SET_DATA_OUT_GPIO_4_SET_DATA_OUT_SHIFT 0x04 +#define PALMAS_GPIO_SET_DATA_OUT_GPIO_3_SET_DATA_OUT 0x08 +#define PALMAS_GPIO_SET_DATA_OUT_GPIO_3_SET_DATA_OUT_SHIFT 0x03 +#define PALMAS_GPIO_SET_DATA_OUT_GPIO_2_SET_DATA_OUT 0x04 +#define PALMAS_GPIO_SET_DATA_OUT_GPIO_2_SET_DATA_OUT_SHIFT 0x02 +#define PALMAS_GPIO_SET_DATA_OUT_GPIO_1_SET_DATA_OUT 0x02 +#define PALMAS_GPIO_SET_DATA_OUT_GPIO_1_SET_DATA_OUT_SHIFT 0x01 +#define PALMAS_GPIO_SET_DATA_OUT_GPIO_0_SET_DATA_OUT 0x01 +#define PALMAS_GPIO_SET_DATA_OUT_GPIO_0_SET_DATA_OUT_SHIFT 0x00 + +/* Bit definitions for PU_PD_GPIO_CTRL1 */ +#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_3_PD 0x40 +#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_3_PD_SHIFT 0x06 +#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_2_PU 0x20 +#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_2_PU_SHIFT 0x05 +#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_2_PD 0x10 +#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_2_PD_SHIFT 0x04 +#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_1_PU 0x08 +#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_1_PU_SHIFT 0x03 +#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_1_PD 0x04 +#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_1_PD_SHIFT 0x02 +#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_0_PD 0x01 +#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_0_PD_SHIFT 0x00 + +/* Bit definitions for PU_PD_GPIO_CTRL2 */ +#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_7_PD 0x40 +#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_7_PD_SHIFT 0x06 +#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_6_PU 0x20 +#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_6_PU_SHIFT 0x05 +#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_6_PD 0x10 +#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_6_PD_SHIFT 0x04 +#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_5_PU 0x08 +#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_5_PU_SHIFT 0x03 +#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_5_PD 0x04 +#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_5_PD_SHIFT 0x02 +#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_4_PU 0x02 +#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_4_PU_SHIFT 0x01 +#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_4_PD 0x01 +#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_4_PD_SHIFT 0x00 + +/* Bit definitions for OD_OUTPUT_GPIO_CTRL */ +#define PALMAS_OD_OUTPUT_GPIO_CTRL_GPIO_5_OD 0x20 +#define PALMAS_OD_OUTPUT_GPIO_CTRL_GPIO_5_OD_SHIFT 0x05 +#define PALMAS_OD_OUTPUT_GPIO_CTRL_GPIO_2_OD 0x04 +#define PALMAS_OD_OUTPUT_GPIO_CTRL_GPIO_2_OD_SHIFT 0x02 +#define PALMAS_OD_OUTPUT_GPIO_CTRL_GPIO_1_OD 0x02 +#define PALMAS_OD_OUTPUT_GPIO_CTRL_GPIO_1_OD_SHIFT 0x01 + +/* Registers for function GPADC */ +#define PALMAS_GPADC_CTRL1 0x00 +#define PALMAS_GPADC_CTRL2 0x01 +#define PALMAS_GPADC_RT_CTRL 0x02 +#define PALMAS_GPADC_AUTO_CTRL 0x03 +#define PALMAS_GPADC_STATUS 0x04 +#define PALMAS_GPADC_RT_SELECT 0x05 +#define PALMAS_GPADC_RT_CONV0_LSB 0x06 +#define PALMAS_GPADC_RT_CONV0_MSB 0x07 +#define PALMAS_GPADC_AUTO_SELECT 0x08 +#define PALMAS_GPADC_AUTO_CONV0_LSB 0x09 +#define PALMAS_GPADC_AUTO_CONV0_MSB 0x0A +#define PALMAS_GPADC_AUTO_CONV1_LSB 0x0B +#define PALMAS_GPADC_AUTO_CONV1_MSB 0x0C +#define PALMAS_GPADC_SW_SELECT 0x0D +#define PALMAS_GPADC_SW_CONV0_LSB 0x0E +#define PALMAS_GPADC_SW_CONV0_MSB 0x0F +#define PALMAS_GPADC_THRES_CONV0_LSB 0x10 +#define PALMAS_GPADC_THRES_CONV0_MSB 0x11 +#define PALMAS_GPADC_THRES_CONV1_LSB 0x12 +#define PALMAS_GPADC_THRES_CONV1_MSB 0x13 +#define PALMAS_GPADC_SMPS_ILMONITOR_EN 0x14 +#define PALMAS_GPADC_SMPS_VSEL_MONITORING 0x15 + +/* Bit definitions for GPADC_CTRL1 */ +#define PALMAS_GPADC_CTRL1_RESERVED_MASK 0xc0 +#define PALMAS_GPADC_CTRL1_RESERVED_SHIFT 0x06 +#define PALMAS_GPADC_CTRL1_CURRENT_SRC_CH3_MASK 0x30 +#define PALMAS_GPADC_CTRL1_CURRENT_SRC_CH3_SHIFT 0x04 +#define PALMAS_GPADC_CTRL1_CURRENT_SRC_CH0_MASK 0x0c +#define PALMAS_GPADC_CTRL1_CURRENT_SRC_CH0_SHIFT 0x02 +#define PALMAS_GPADC_CTRL1_BAT_REMOVAL_DET 0x02 +#define PALMAS_GPADC_CTRL1_BAT_REMOVAL_DET_SHIFT 0x01 +#define PALMAS_GPADC_CTRL1_GPADC_FORCE 0x01 +#define PALMAS_GPADC_CTRL1_GPADC_FORCE_SHIFT 0x00 + +/* Bit definitions for GPADC_CTRL2 */ +#define PALMAS_GPADC_CTRL2_RESERVED_MASK 0x06 +#define PALMAS_GPADC_CTRL2_RESERVED_SHIFT 0x01 + +/* Bit definitions for GPADC_RT_CTRL */ +#define PALMAS_GPADC_RT_CTRL_EXTEND_DELAY 0x02 +#define PALMAS_GPADC_RT_CTRL_EXTEND_DELAY_SHIFT 0x01 +#define PALMAS_GPADC_RT_CTRL_START_POLARITY 0x01 +#define PALMAS_GPADC_RT_CTRL_START_POLARITY_SHIFT 0x00 + +/* Bit definitions for GPADC_AUTO_CTRL */ +#define PALMAS_GPADC_AUTO_CTRL_SHUTDOWN_CONV1 0x80 +#define PALMAS_GPADC_AUTO_CTRL_SHUTDOWN_CONV1_SHIFT 0x07 +#define PALMAS_GPADC_AUTO_CTRL_SHUTDOWN_CONV0 0x40 +#define PALMAS_GPADC_AUTO_CTRL_SHUTDOWN_CONV0_SHIFT 0x06 +#define PALMAS_GPADC_AUTO_CTRL_AUTO_CONV1_EN 0x20 +#define PALMAS_GPADC_AUTO_CTRL_AUTO_CONV1_EN_SHIFT 0x05 +#define PALMAS_GPADC_AUTO_CTRL_AUTO_CONV0_EN 0x10 +#define PALMAS_GPADC_AUTO_CTRL_AUTO_CONV0_EN_SHIFT 0x04 +#define PALMAS_GPADC_AUTO_CTRL_COUNTER_CONV_MASK 0x0F +#define PALMAS_GPADC_AUTO_CTRL_COUNTER_CONV_SHIFT 0x00 + +/* Bit definitions for GPADC_STATUS */ +#define PALMAS_GPADC_STATUS_GPADC_AVAILABLE 0x10 +#define PALMAS_GPADC_STATUS_GPADC_AVAILABLE_SHIFT 0x04 + +/* Bit definitions for GPADC_RT_SELECT */ +#define PALMAS_GPADC_RT_SELECT_RT_CONV_EN 0x80 +#define PALMAS_GPADC_RT_SELECT_RT_CONV_EN_SHIFT 0x07 +#define PALMAS_GPADC_RT_SELECT_RT_CONV0_SEL_MASK 0x0F +#define PALMAS_GPADC_RT_SELECT_RT_CONV0_SEL_SHIFT 0x00 + +/* Bit definitions for GPADC_RT_CONV0_LSB */ +#define PALMAS_GPADC_RT_CONV0_LSB_RT_CONV0_LSB_MASK 0xFF +#define PALMAS_GPADC_RT_CONV0_LSB_RT_CONV0_LSB_SHIFT 0x00 + +/* Bit definitions for GPADC_RT_CONV0_MSB */ +#define PALMAS_GPADC_RT_CONV0_MSB_RT_CONV0_MSB_MASK 0x0F +#define PALMAS_GPADC_RT_CONV0_MSB_RT_CONV0_MSB_SHIFT 0x00 + +/* Bit definitions for GPADC_AUTO_SELECT */ +#define PALMAS_GPADC_AUTO_SELECT_AUTO_CONV1_SEL_MASK 0xF0 +#define PALMAS_GPADC_AUTO_SELECT_AUTO_CONV1_SEL_SHIFT 0x04 +#define PALMAS_GPADC_AUTO_SELECT_AUTO_CONV0_SEL_MASK 0x0F +#define PALMAS_GPADC_AUTO_SELECT_AUTO_CONV0_SEL_SHIFT 0x00 + +/* Bit definitions for GPADC_AUTO_CONV0_LSB */ +#define PALMAS_GPADC_AUTO_CONV0_LSB_AUTO_CONV0_LSB_MASK 0xFF +#define PALMAS_GPADC_AUTO_CONV0_LSB_AUTO_CONV0_LSB_SHIFT 0x00 + +/* Bit definitions for GPADC_AUTO_CONV0_MSB */ +#define PALMAS_GPADC_AUTO_CONV0_MSB_AUTO_CONV0_MSB_MASK 0x0F +#define PALMAS_GPADC_AUTO_CONV0_MSB_AUTO_CONV0_MSB_SHIFT 0x00 + +/* Bit definitions for GPADC_AUTO_CONV1_LSB */ +#define PALMAS_GPADC_AUTO_CONV1_LSB_AUTO_CONV1_LSB_MASK 0xFF +#define PALMAS_GPADC_AUTO_CONV1_LSB_AUTO_CONV1_LSB_SHIFT 0x00 + +/* Bit definitions for GPADC_AUTO_CONV1_MSB */ +#define PALMAS_GPADC_AUTO_CONV1_MSB_AUTO_CONV1_MSB_MASK 0x0F +#define PALMAS_GPADC_AUTO_CONV1_MSB_AUTO_CONV1_MSB_SHIFT 0x00 + +/* Bit definitions for GPADC_SW_SELECT */ +#define PALMAS_GPADC_SW_SELECT_SW_CONV_EN 0x80 +#define PALMAS_GPADC_SW_SELECT_SW_CONV_EN_SHIFT 0x07 +#define PALMAS_GPADC_SW_SELECT_SW_START_CONV0 0x10 +#define PALMAS_GPADC_SW_SELECT_SW_START_CONV0_SHIFT 0x04 +#define PALMAS_GPADC_SW_SELECT_SW_CONV0_SEL_MASK 0x0F +#define PALMAS_GPADC_SW_SELECT_SW_CONV0_SEL_SHIFT 0x00 + +/* Bit definitions for GPADC_SW_CONV0_LSB */ +#define PALMAS_GPADC_SW_CONV0_LSB_SW_CONV0_LSB_MASK 0xFF +#define PALMAS_GPADC_SW_CONV0_LSB_SW_CONV0_LSB_SHIFT 0x00 + +/* Bit definitions for GPADC_SW_CONV0_MSB */ +#define PALMAS_GPADC_SW_CONV0_MSB_SW_CONV0_MSB_MASK 0x0F +#define PALMAS_GPADC_SW_CONV0_MSB_SW_CONV0_MSB_SHIFT 0x00 + +/* Bit definitions for GPADC_THRES_CONV0_LSB */ +#define PALMAS_GPADC_THRES_CONV0_LSB_THRES_CONV0_LSB_MASK 0xFF +#define PALMAS_GPADC_THRES_CONV0_LSB_THRES_CONV0_LSB_SHIFT 0x00 + +/* Bit definitions for GPADC_THRES_CONV0_MSB */ +#define PALMAS_GPADC_THRES_CONV0_MSB_THRES_CONV0_POL 0x80 +#define PALMAS_GPADC_THRES_CONV0_MSB_THRES_CONV0_POL_SHIFT 0x07 +#define PALMAS_GPADC_THRES_CONV0_MSB_THRES_CONV0_MSB_MASK 0x0F +#define PALMAS_GPADC_THRES_CONV0_MSB_THRES_CONV0_MSB_SHIFT 0x00 + +/* Bit definitions for GPADC_THRES_CONV1_LSB */ +#define PALMAS_GPADC_THRES_CONV1_LSB_THRES_CONV1_LSB_MASK 0xFF +#define PALMAS_GPADC_THRES_CONV1_LSB_THRES_CONV1_LSB_SHIFT 0x00 + +/* Bit definitions for GPADC_THRES_CONV1_MSB */ +#define PALMAS_GPADC_THRES_CONV1_MSB_THRES_CONV1_POL 0x80 +#define PALMAS_GPADC_THRES_CONV1_MSB_THRES_CONV1_POL_SHIFT 0x07 +#define PALMAS_GPADC_THRES_CONV1_MSB_THRES_CONV1_MSB_MASK 0x0F +#define PALMAS_GPADC_THRES_CONV1_MSB_THRES_CONV1_MSB_SHIFT 0x00 + +/* Bit definitions for GPADC_SMPS_ILMONITOR_EN */ +#define PALMAS_GPADC_SMPS_ILMONITOR_EN_SMPS_ILMON_EN 0x20 +#define PALMAS_GPADC_SMPS_ILMONITOR_EN_SMPS_ILMON_EN_SHIFT 0x05 +#define PALMAS_GPADC_SMPS_ILMONITOR_EN_SMPS_ILMON_REXT 0x10 +#define PALMAS_GPADC_SMPS_ILMONITOR_EN_SMPS_ILMON_REXT_SHIFT 0x04 +#define PALMAS_GPADC_SMPS_ILMONITOR_EN_SMPS_ILMON_SEL_MASK 0x0F +#define PALMAS_GPADC_SMPS_ILMONITOR_EN_SMPS_ILMON_SEL_SHIFT 0x00 + +/* Bit definitions for GPADC_SMPS_VSEL_MONITORING */ +#define PALMAS_GPADC_SMPS_VSEL_MONITORING_ACTIVE_PHASE 0x80 +#define PALMAS_GPADC_SMPS_VSEL_MONITORING_ACTIVE_PHASE_SHIFT 0x07 +#define PALMAS_GPADC_SMPS_VSEL_MONITORING_SMPS_VSEL_MONITORING_MASK 0x7F +#define PALMAS_GPADC_SMPS_VSEL_MONITORING_SMPS_VSEL_MONITORING_SHIFT 0x00 + +/* Registers for function GPADC */ +#define PALMAS_GPADC_TRIM1 0x00 +#define PALMAS_GPADC_TRIM2 0x01 +#define PALMAS_GPADC_TRIM3 0x02 +#define PALMAS_GPADC_TRIM4 0x03 +#define PALMAS_GPADC_TRIM5 0x04 +#define PALMAS_GPADC_TRIM6 0x05 +#define PALMAS_GPADC_TRIM7 0x06 +#define PALMAS_GPADC_TRIM8 0x07 +#define PALMAS_GPADC_TRIM9 0x08 +#define PALMAS_GPADC_TRIM10 0x09 +#define PALMAS_GPADC_TRIM11 0x0A +#define PALMAS_GPADC_TRIM12 0x0B +#define PALMAS_GPADC_TRIM13 0x0C +#define PALMAS_GPADC_TRIM14 0x0D +#define PALMAS_GPADC_TRIM15 0x0E +#define PALMAS_GPADC_TRIM16 0x0F + +/* TPS659038 regen2_ctrl offset iss different from palmas */ +#define TPS659038_REGEN2_CTRL 0x12 + +/* TPS65917 Interrupt registers */ + +/* Registers for function INTERRUPT */ +#define TPS65917_INT1_STATUS 0x00 +#define TPS65917_INT1_MASK 0x01 +#define TPS65917_INT1_LINE_STATE 0x02 +#define TPS65917_INT2_STATUS 0x05 +#define TPS65917_INT2_MASK 0x06 +#define TPS65917_INT2_LINE_STATE 0x07 +#define TPS65917_INT3_STATUS 0x0A +#define TPS65917_INT3_MASK 0x0B +#define TPS65917_INT3_LINE_STATE 0x0C +#define TPS65917_INT4_STATUS 0x0F +#define TPS65917_INT4_MASK 0x10 +#define TPS65917_INT4_LINE_STATE 0x11 +#define TPS65917_INT4_EDGE_DETECT1 0x12 +#define TPS65917_INT4_EDGE_DETECT2 0x13 +#define TPS65917_INT_CTRL 0x14 + +/* Bit definitions for INT1_STATUS */ +#define TPS65917_INT1_STATUS_VSYS_MON 0x40 +#define TPS65917_INT1_STATUS_VSYS_MON_SHIFT 0x06 +#define TPS65917_INT1_STATUS_HOTDIE 0x20 +#define TPS65917_INT1_STATUS_HOTDIE_SHIFT 0x05 +#define TPS65917_INT1_STATUS_PWRDOWN 0x10 +#define TPS65917_INT1_STATUS_PWRDOWN_SHIFT 0x04 +#define TPS65917_INT1_STATUS_LONG_PRESS_KEY 0x04 +#define TPS65917_INT1_STATUS_LONG_PRESS_KEY_SHIFT 0x02 +#define TPS65917_INT1_STATUS_PWRON 0x02 +#define TPS65917_INT1_STATUS_PWRON_SHIFT 0x01 + +/* Bit definitions for INT1_MASK */ +#define TPS65917_INT1_MASK_VSYS_MON 0x40 +#define TPS65917_INT1_MASK_VSYS_MON_SHIFT 0x06 +#define TPS65917_INT1_MASK_HOTDIE 0x20 +#define TPS65917_INT1_MASK_HOTDIE_SHIFT 0x05 +#define TPS65917_INT1_MASK_PWRDOWN 0x10 +#define TPS65917_INT1_MASK_PWRDOWN_SHIFT 0x04 +#define TPS65917_INT1_MASK_LONG_PRESS_KEY 0x04 +#define TPS65917_INT1_MASK_LONG_PRESS_KEY_SHIFT 0x02 +#define TPS65917_INT1_MASK_PWRON 0x02 +#define TPS65917_INT1_MASK_PWRON_SHIFT 0x01 + +/* Bit definitions for INT1_LINE_STATE */ +#define TPS65917_INT1_LINE_STATE_VSYS_MON 0x40 +#define TPS65917_INT1_LINE_STATE_VSYS_MON_SHIFT 0x06 +#define TPS65917_INT1_LINE_STATE_HOTDIE 0x20 +#define TPS65917_INT1_LINE_STATE_HOTDIE_SHIFT 0x05 +#define TPS65917_INT1_LINE_STATE_PWRDOWN 0x10 +#define TPS65917_INT1_LINE_STATE_PWRDOWN_SHIFT 0x04 +#define TPS65917_INT1_LINE_STATE_LONG_PRESS_KEY 0x04 +#define TPS65917_INT1_LINE_STATE_LONG_PRESS_KEY_SHIFT 0x02 +#define TPS65917_INT1_LINE_STATE_PWRON 0x02 +#define TPS65917_INT1_LINE_STATE_PWRON_SHIFT 0x01 + +/* Bit definitions for INT2_STATUS */ +#define TPS65917_INT2_STATUS_SHORT 0x40 +#define TPS65917_INT2_STATUS_SHORT_SHIFT 0x06 +#define TPS65917_INT2_STATUS_FSD 0x20 +#define TPS65917_INT2_STATUS_FSD_SHIFT 0x05 +#define TPS65917_INT2_STATUS_RESET_IN 0x10 +#define TPS65917_INT2_STATUS_RESET_IN_SHIFT 0x04 +#define TPS65917_INT2_STATUS_WDT 0x04 +#define TPS65917_INT2_STATUS_WDT_SHIFT 0x02 +#define TPS65917_INT2_STATUS_OTP_ERROR 0x02 +#define TPS65917_INT2_STATUS_OTP_ERROR_SHIFT 0x01 + +/* Bit definitions for INT2_MASK */ +#define TPS65917_INT2_MASK_SHORT 0x40 +#define TPS65917_INT2_MASK_SHORT_SHIFT 0x06 +#define TPS65917_INT2_MASK_FSD 0x20 +#define TPS65917_INT2_MASK_FSD_SHIFT 0x05 +#define TPS65917_INT2_MASK_RESET_IN 0x10 +#define TPS65917_INT2_MASK_RESET_IN_SHIFT 0x04 +#define TPS65917_INT2_MASK_WDT 0x04 +#define TPS65917_INT2_MASK_WDT_SHIFT 0x02 +#define TPS65917_INT2_MASK_OTP_ERROR_TIMER 0x02 +#define TPS65917_INT2_MASK_OTP_ERROR_SHIFT 0x01 + +/* Bit definitions for INT2_LINE_STATE */ +#define TPS65917_INT2_LINE_STATE_SHORT 0x40 +#define TPS65917_INT2_LINE_STATE_SHORT_SHIFT 0x06 +#define TPS65917_INT2_LINE_STATE_FSD 0x20 +#define TPS65917_INT2_LINE_STATE_FSD_SHIFT 0x05 +#define TPS65917_INT2_LINE_STATE_RESET_IN 0x10 +#define TPS65917_INT2_LINE_STATE_RESET_IN_SHIFT 0x04 +#define TPS65917_INT2_LINE_STATE_WDT 0x04 +#define TPS65917_INT2_LINE_STATE_WDT_SHIFT 0x02 +#define TPS65917_INT2_LINE_STATE_OTP_ERROR 0x02 +#define TPS65917_INT2_LINE_STATE_OTP_ERROR_SHIFT 0x01 + +/* Bit definitions for INT3_STATUS */ +#define TPS65917_INT3_STATUS_VBUS 0x80 +#define TPS65917_INT3_STATUS_VBUS_SHIFT 0x07 +#define TPS65917_INT3_STATUS_GPADC_EOC_SW 0x04 +#define TPS65917_INT3_STATUS_GPADC_EOC_SW_SHIFT 0x02 +#define TPS65917_INT3_STATUS_GPADC_AUTO_1 0x02 +#define TPS65917_INT3_STATUS_GPADC_AUTO_1_SHIFT 0x01 +#define TPS65917_INT3_STATUS_GPADC_AUTO_0 0x01 +#define TPS65917_INT3_STATUS_GPADC_AUTO_0_SHIFT 0x00 + +/* Bit definitions for INT3_MASK */ +#define TPS65917_INT3_MASK_VBUS 0x80 +#define TPS65917_INT3_MASK_VBUS_SHIFT 0x07 +#define TPS65917_INT3_MASK_GPADC_EOC_SW 0x04 +#define TPS65917_INT3_MASK_GPADC_EOC_SW_SHIFT 0x02 +#define TPS65917_INT3_MASK_GPADC_AUTO_1 0x02 +#define TPS65917_INT3_MASK_GPADC_AUTO_1_SHIFT 0x01 +#define TPS65917_INT3_MASK_GPADC_AUTO_0 0x01 +#define TPS65917_INT3_MASK_GPADC_AUTO_0_SHIFT 0x00 + +/* Bit definitions for INT3_LINE_STATE */ +#define TPS65917_INT3_LINE_STATE_VBUS 0x80 +#define TPS65917_INT3_LINE_STATE_VBUS_SHIFT 0x07 +#define TPS65917_INT3_LINE_STATE_GPADC_EOC_SW 0x04 +#define TPS65917_INT3_LINE_STATE_GPADC_EOC_SW_SHIFT 0x02 +#define TPS65917_INT3_LINE_STATE_GPADC_AUTO_1 0x02 +#define TPS65917_INT3_LINE_STATE_GPADC_AUTO_1_SHIFT 0x01 +#define TPS65917_INT3_LINE_STATE_GPADC_AUTO_0 0x01 +#define TPS65917_INT3_LINE_STATE_GPADC_AUTO_0_SHIFT 0x00 + +/* Bit definitions for INT4_STATUS */ +#define TPS65917_INT4_STATUS_GPIO_6 0x40 +#define TPS65917_INT4_STATUS_GPIO_6_SHIFT 0x06 +#define TPS65917_INT4_STATUS_GPIO_5 0x20 +#define TPS65917_INT4_STATUS_GPIO_5_SHIFT 0x05 +#define TPS65917_INT4_STATUS_GPIO_4 0x10 +#define TPS65917_INT4_STATUS_GPIO_4_SHIFT 0x04 +#define TPS65917_INT4_STATUS_GPIO_3 0x08 +#define TPS65917_INT4_STATUS_GPIO_3_SHIFT 0x03 +#define TPS65917_INT4_STATUS_GPIO_2 0x04 +#define TPS65917_INT4_STATUS_GPIO_2_SHIFT 0x02 +#define TPS65917_INT4_STATUS_GPIO_1 0x02 +#define TPS65917_INT4_STATUS_GPIO_1_SHIFT 0x01 +#define TPS65917_INT4_STATUS_GPIO_0 0x01 +#define TPS65917_INT4_STATUS_GPIO_0_SHIFT 0x00 + +/* Bit definitions for INT4_MASK */ +#define TPS65917_INT4_MASK_GPIO_6 0x40 +#define TPS65917_INT4_MASK_GPIO_6_SHIFT 0x06 +#define TPS65917_INT4_MASK_GPIO_5 0x20 +#define TPS65917_INT4_MASK_GPIO_5_SHIFT 0x05 +#define TPS65917_INT4_MASK_GPIO_4 0x10 +#define TPS65917_INT4_MASK_GPIO_4_SHIFT 0x04 +#define TPS65917_INT4_MASK_GPIO_3 0x08 +#define TPS65917_INT4_MASK_GPIO_3_SHIFT 0x03 +#define TPS65917_INT4_MASK_GPIO_2 0x04 +#define TPS65917_INT4_MASK_GPIO_2_SHIFT 0x02 +#define TPS65917_INT4_MASK_GPIO_1 0x02 +#define TPS65917_INT4_MASK_GPIO_1_SHIFT 0x01 +#define TPS65917_INT4_MASK_GPIO_0 0x01 +#define TPS65917_INT4_MASK_GPIO_0_SHIFT 0x00 + +/* Bit definitions for INT4_LINE_STATE */ +#define TPS65917_INT4_LINE_STATE_GPIO_6 0x40 +#define TPS65917_INT4_LINE_STATE_GPIO_6_SHIFT 0x06 +#define TPS65917_INT4_LINE_STATE_GPIO_5 0x20 +#define TPS65917_INT4_LINE_STATE_GPIO_5_SHIFT 0x05 +#define TPS65917_INT4_LINE_STATE_GPIO_4 0x10 +#define TPS65917_INT4_LINE_STATE_GPIO_4_SHIFT 0x04 +#define TPS65917_INT4_LINE_STATE_GPIO_3 0x08 +#define TPS65917_INT4_LINE_STATE_GPIO_3_SHIFT 0x03 +#define TPS65917_INT4_LINE_STATE_GPIO_2 0x04 +#define TPS65917_INT4_LINE_STATE_GPIO_2_SHIFT 0x02 +#define TPS65917_INT4_LINE_STATE_GPIO_1 0x02 +#define TPS65917_INT4_LINE_STATE_GPIO_1_SHIFT 0x01 +#define TPS65917_INT4_LINE_STATE_GPIO_0 0x01 +#define TPS65917_INT4_LINE_STATE_GPIO_0_SHIFT 0x00 + +/* Bit definitions for INT4_EDGE_DETECT1 */ +#define TPS65917_INT4_EDGE_DETECT1_GPIO_3_RISING 0x80 +#define TPS65917_INT4_EDGE_DETECT1_GPIO_3_RISING_SHIFT 0x07 +#define TPS65917_INT4_EDGE_DETECT1_GPIO_3_FALLING 0x40 +#define TPS65917_INT4_EDGE_DETECT1_GPIO_3_FALLING_SHIFT 0x06 +#define TPS65917_INT4_EDGE_DETECT1_GPIO_2_RISING 0x20 +#define TPS65917_INT4_EDGE_DETECT1_GPIO_2_RISING_SHIFT 0x05 +#define TPS65917_INT4_EDGE_DETECT1_GPIO_2_FALLING 0x10 +#define TPS65917_INT4_EDGE_DETECT1_GPIO_2_FALLING_SHIFT 0x04 +#define TPS65917_INT4_EDGE_DETECT1_GPIO_1_RISING 0x08 +#define TPS65917_INT4_EDGE_DETECT1_GPIO_1_RISING_SHIFT 0x03 +#define TPS65917_INT4_EDGE_DETECT1_GPIO_1_FALLING 0x04 +#define TPS65917_INT4_EDGE_DETECT1_GPIO_1_FALLING_SHIFT 0x02 +#define TPS65917_INT4_EDGE_DETECT1_GPIO_0_RISING 0x02 +#define TPS65917_INT4_EDGE_DETECT1_GPIO_0_RISING_SHIFT 0x01 +#define TPS65917_INT4_EDGE_DETECT1_GPIO_0_FALLING 0x01 +#define TPS65917_INT4_EDGE_DETECT1_GPIO_0_FALLING_SHIFT 0x00 + +/* Bit definitions for INT4_EDGE_DETECT2 */ +#define TPS65917_INT4_EDGE_DETECT2_GPIO_6_RISING 0x20 +#define TPS65917_INT4_EDGE_DETECT2_GPIO_6_RISING_SHIFT 0x05 +#define TPS65917_INT4_EDGE_DETECT2_GPIO_6_FALLING 0x10 +#define TPS65917_INT4_EDGE_DETECT2_GPIO_6_FALLING_SHIFT 0x04 +#define TPS65917_INT4_EDGE_DETECT2_GPIO_5_RISING 0x08 +#define TPS65917_INT4_EDGE_DETECT2_GPIO_5_RISING_SHIFT 0x03 +#define TPS65917_INT4_EDGE_DETECT2_GPIO_5_FALLING 0x04 +#define TPS65917_INT4_EDGE_DETECT2_GPIO_5_FALLING_SHIFT 0x02 +#define TPS65917_INT4_EDGE_DETECT2_GPIO_4_RISING 0x02 +#define TPS65917_INT4_EDGE_DETECT2_GPIO_4_RISING_SHIFT 0x01 +#define TPS65917_INT4_EDGE_DETECT2_GPIO_4_FALLING 0x01 +#define TPS65917_INT4_EDGE_DETECT2_GPIO_4_FALLING_SHIFT 0x00 + +/* Bit definitions for INT_CTRL */ +#define TPS65917_INT_CTRL_INT_PENDING 0x04 +#define TPS65917_INT_CTRL_INT_PENDING_SHIFT 0x02 +#define TPS65917_INT_CTRL_INT_CLEAR 0x01 +#define TPS65917_INT_CTRL_INT_CLEAR_SHIFT 0x00 + +/* TPS65917 SMPS Registers */ + +/* Registers for function SMPS */ +#define TPS65917_SMPS1_CTRL 0x00 +#define TPS65917_SMPS1_FORCE 0x02 +#define TPS65917_SMPS1_VOLTAGE 0x03 +#define TPS65917_SMPS2_CTRL 0x04 +#define TPS65917_SMPS2_FORCE 0x06 +#define TPS65917_SMPS2_VOLTAGE 0x07 +#define TPS65917_SMPS3_CTRL 0x0C +#define TPS65917_SMPS3_FORCE 0x0E +#define TPS65917_SMPS3_VOLTAGE 0x0F +#define TPS65917_SMPS4_CTRL 0x10 +#define TPS65917_SMPS4_VOLTAGE 0x13 +#define TPS65917_SMPS5_CTRL 0x18 +#define TPS65917_SMPS5_VOLTAGE 0x1B +#define TPS65917_SMPS_CTRL 0x24 +#define TPS65917_SMPS_PD_CTRL 0x25 +#define TPS65917_SMPS_THERMAL_EN 0x27 +#define TPS65917_SMPS_THERMAL_STATUS 0x28 +#define TPS65917_SMPS_SHORT_STATUS 0x29 +#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN 0x2A +#define TPS65917_SMPS_POWERGOOD_MASK1 0x2B +#define TPS65917_SMPS_POWERGOOD_MASK2 0x2C + +/* Bit definitions for SMPS1_CTRL */ +#define TPS65917_SMPS1_CTRL_WR_S 0x80 +#define TPS65917_SMPS1_CTRL_WR_S_SHIFT 0x07 +#define TPS65917_SMPS1_CTRL_ROOF_FLOOR_EN 0x40 +#define TPS65917_SMPS1_CTRL_ROOF_FLOOR_EN_SHIFT 0x06 +#define TPS65917_SMPS1_CTRL_STATUS_MASK 0x30 +#define TPS65917_SMPS1_CTRL_STATUS_SHIFT 0x04 +#define TPS65917_SMPS1_CTRL_MODE_SLEEP_MASK 0x0C +#define TPS65917_SMPS1_CTRL_MODE_SLEEP_SHIFT 0x02 +#define TPS65917_SMPS1_CTRL_MODE_ACTIVE_MASK 0x03 +#define TPS65917_SMPS1_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for SMPS1_FORCE */ +#define TPS65917_SMPS1_FORCE_CMD 0x80 +#define TPS65917_SMPS1_FORCE_CMD_SHIFT 0x07 +#define TPS65917_SMPS1_FORCE_VSEL_MASK 0x7F +#define TPS65917_SMPS1_FORCE_VSEL_SHIFT 0x00 + +/* Bit definitions for SMPS1_VOLTAGE */ +#define TPS65917_SMPS1_VOLTAGE_RANGE 0x80 +#define TPS65917_SMPS1_VOLTAGE_RANGE_SHIFT 0x07 +#define TPS65917_SMPS1_VOLTAGE_VSEL_MASK 0x7F +#define TPS65917_SMPS1_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for SMPS2_CTRL */ +#define TPS65917_SMPS2_CTRL_WR_S 0x80 +#define TPS65917_SMPS2_CTRL_WR_S_SHIFT 0x07 +#define TPS65917_SMPS2_CTRL_ROOF_FLOOR_EN 0x40 +#define TPS65917_SMPS2_CTRL_ROOF_FLOOR_EN_SHIFT 0x06 +#define TPS65917_SMPS2_CTRL_STATUS_MASK 0x30 +#define TPS65917_SMPS2_CTRL_STATUS_SHIFT 0x04 +#define TPS65917_SMPS2_CTRL_MODE_SLEEP_MASK 0x0C +#define TPS65917_SMPS2_CTRL_MODE_SLEEP_SHIFT 0x02 +#define TPS65917_SMPS2_CTRL_MODE_ACTIVE_MASK 0x03 +#define TPS65917_SMPS2_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for SMPS2_FORCE */ +#define TPS65917_SMPS2_FORCE_CMD 0x80 +#define TPS65917_SMPS2_FORCE_CMD_SHIFT 0x07 +#define TPS65917_SMPS2_FORCE_VSEL_MASK 0x7F +#define TPS65917_SMPS2_FORCE_VSEL_SHIFT 0x00 + +/* Bit definitions for SMPS2_VOLTAGE */ +#define TPS65917_SMPS2_VOLTAGE_RANGE 0x80 +#define TPS65917_SMPS2_VOLTAGE_RANGE_SHIFT 0x07 +#define TPS65917_SMPS2_VOLTAGE_VSEL_MASK 0x7F +#define TPS65917_SMPS2_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for SMPS3_CTRL */ +#define TPS65917_SMPS3_CTRL_WR_S 0x80 +#define TPS65917_SMPS3_CTRL_WR_S_SHIFT 0x07 +#define TPS65917_SMPS3_CTRL_ROOF_FLOOR_EN 0x40 +#define TPS65917_SMPS3_CTRL_ROOF_FLOOR_EN_SHIFT 0x06 +#define TPS65917_SMPS3_CTRL_STATUS_MASK 0x30 +#define TPS65917_SMPS3_CTRL_STATUS_SHIFT 0x04 +#define TPS65917_SMPS3_CTRL_MODE_SLEEP_MASK 0x0C +#define TPS65917_SMPS3_CTRL_MODE_SLEEP_SHIFT 0x02 +#define TPS65917_SMPS3_CTRL_MODE_ACTIVE_MASK 0x03 +#define TPS65917_SMPS3_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for SMPS3_FORCE */ +#define TPS65917_SMPS3_FORCE_CMD 0x80 +#define TPS65917_SMPS3_FORCE_CMD_SHIFT 0x07 +#define TPS65917_SMPS3_FORCE_VSEL_MASK 0x7F +#define TPS65917_SMPS3_FORCE_VSEL_SHIFT 0x00 + +/* Bit definitions for SMPS3_VOLTAGE */ +#define TPS65917_SMPS3_VOLTAGE_RANGE 0x80 +#define TPS65917_SMPS3_VOLTAGE_RANGE_SHIFT 0x07 +#define TPS65917_SMPS3_VOLTAGE_VSEL_MASK 0x7F +#define TPS65917_SMPS3_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for SMPS4_CTRL */ +#define TPS65917_SMPS4_CTRL_WR_S 0x80 +#define TPS65917_SMPS4_CTRL_WR_S_SHIFT 0x07 +#define TPS65917_SMPS4_CTRL_ROOF_FLOOR_EN 0x40 +#define TPS65917_SMPS4_CTRL_ROOF_FLOOR_EN_SHIFT 0x06 +#define TPS65917_SMPS4_CTRL_STATUS_MASK 0x30 +#define TPS65917_SMPS4_CTRL_STATUS_SHIFT 0x04 +#define TPS65917_SMPS4_CTRL_MODE_SLEEP_MASK 0x0C +#define TPS65917_SMPS4_CTRL_MODE_SLEEP_SHIFT 0x02 +#define TPS65917_SMPS4_CTRL_MODE_ACTIVE_MASK 0x03 +#define TPS65917_SMPS4_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for SMPS4_VOLTAGE */ +#define TPS65917_SMPS4_VOLTAGE_RANGE 0x80 +#define TPS65917_SMPS4_VOLTAGE_RANGE_SHIFT 0x07 +#define TPS65917_SMPS4_VOLTAGE_VSEL_MASK 0x7F +#define TPS65917_SMPS4_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for SMPS5_CTRL */ +#define TPS65917_SMPS5_CTRL_WR_S 0x80 +#define TPS65917_SMPS5_CTRL_WR_S_SHIFT 0x07 +#define TPS65917_SMPS5_CTRL_ROOF_FLOOR_EN 0x40 +#define TPS65917_SMPS5_CTRL_ROOF_FLOOR_EN_SHIFT 0x06 +#define TPS65917_SMPS5_CTRL_STATUS_MASK 0x30 +#define TPS65917_SMPS5_CTRL_STATUS_SHIFT 0x04 +#define TPS65917_SMPS5_CTRL_MODE_SLEEP_MASK 0x0C +#define TPS65917_SMPS5_CTRL_MODE_SLEEP_SHIFT 0x02 +#define TPS65917_SMPS5_CTRL_MODE_ACTIVE_MASK 0x03 +#define TPS65917_SMPS5_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for SMPS5_VOLTAGE */ +#define TPS65917_SMPS5_VOLTAGE_RANGE 0x80 +#define TPS65917_SMPS5_VOLTAGE_RANGE_SHIFT 0x07 +#define TPS65917_SMPS5_VOLTAGE_VSEL_MASK 0x7F +#define TPS65917_SMPS5_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for SMPS_CTRL */ +#define TPS65917_SMPS_CTRL_SMPS1_SMPS12_EN 0x10 +#define TPS65917_SMPS_CTRL_SMPS1_SMPS12_EN_SHIFT 0x04 +#define TPS65917_SMPS_CTRL_SMPS12_PHASE_CTRL 0x03 +#define TPS65917_SMPS_CTRL_SMPS12_PHASE_CTRL_SHIFT 0x00 + +/* Bit definitions for SMPS_PD_CTRL */ +#define TPS65917_SMPS_PD_CTRL_SMPS5 0x40 +#define TPS65917_SMPS_PD_CTRL_SMPS5_SHIFT 0x06 +#define TPS65917_SMPS_PD_CTRL_SMPS4 0x10 +#define TPS65917_SMPS_PD_CTRL_SMPS4_SHIFT 0x04 +#define TPS65917_SMPS_PD_CTRL_SMPS3 0x08 +#define TPS65917_SMPS_PD_CTRL_SMPS3_SHIFT 0x03 +#define TPS65917_SMPS_PD_CTRL_SMPS2 0x02 +#define TPS65917_SMPS_PD_CTRL_SMPS2_SHIFT 0x01 +#define TPS65917_SMPS_PD_CTRL_SMPS1 0x01 +#define TPS65917_SMPS_PD_CTRL_SMPS1_SHIFT 0x00 + +/* Bit definitions for SMPS_THERMAL_EN */ +#define TPS65917_SMPS_THERMAL_EN_SMPS5 0x40 +#define TPS65917_SMPS_THERMAL_EN_SMPS5_SHIFT 0x06 +#define TPS65917_SMPS_THERMAL_EN_SMPS3 0x08 +#define TPS65917_SMPS_THERMAL_EN_SMPS3_SHIFT 0x03 +#define TPS65917_SMPS_THERMAL_EN_SMPS12 0x01 +#define TPS65917_SMPS_THERMAL_EN_SMPS12_SHIFT 0x00 + +/* Bit definitions for SMPS_THERMAL_STATUS */ +#define TPS65917_SMPS_THERMAL_STATUS_SMPS5 0x40 +#define TPS65917_SMPS_THERMAL_STATUS_SMPS5_SHIFT 0x06 +#define TPS65917_SMPS_THERMAL_STATUS_SMPS3 0x08 +#define TPS65917_SMPS_THERMAL_STATUS_SMPS3_SHIFT 0x03 +#define TPS65917_SMPS_THERMAL_STATUS_SMPS12 0x01 +#define TPS65917_SMPS_THERMAL_STATUS_SMPS12_SHIFT 0x00 + +/* Bit definitions for SMPS_SHORT_STATUS */ +#define TPS65917_SMPS_SHORT_STATUS_SMPS5 0x40 +#define TPS65917_SMPS_SHORT_STATUS_SMPS5_SHIFT 0x06 +#define TPS65917_SMPS_SHORT_STATUS_SMPS4 0x10 +#define TPS65917_SMPS_SHORT_STATUS_SMPS4_SHIFT 0x04 +#define TPS65917_SMPS_SHORT_STATUS_SMPS3 0x08 +#define TPS65917_SMPS_SHORT_STATUS_SMPS3_SHIFT 0x03 +#define TPS65917_SMPS_SHORT_STATUS_SMPS2 0x02 +#define TPS65917_SMPS_SHORT_STATUS_SMPS2_SHIFT 0x01 +#define TPS65917_SMPS_SHORT_STATUS_SMPS1 0x01 +#define TPS65917_SMPS_SHORT_STATUS_SMPS1_SHIFT 0x00 + +/* Bit definitions for SMPS_NEGATIVE_CURRENT_LIMIT_EN */ +#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS5 0x40 +#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS5_SHIFT 0x06 +#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS4 0x10 +#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS4_SHIFT 0x04 +#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS3 0x08 +#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS3_SHIFT 0x03 +#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS2 0x02 +#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS2_SHIFT 0x01 +#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS1 0x01 +#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS1_SHIFT 0x00 + +/* Bit definitions for SMPS_POWERGOOD_MASK1 */ +#define TPS65917_SMPS_POWERGOOD_MASK1_SMPS5 0x40 +#define TPS65917_SMPS_POWERGOOD_MASK1_SMPS5_SHIFT 0x06 +#define TPS65917_SMPS_POWERGOOD_MASK1_SMPS4 0x10 +#define TPS65917_SMPS_POWERGOOD_MASK1_SMPS4_SHIFT 0x04 +#define TPS65917_SMPS_POWERGOOD_MASK1_SMPS3 0x08 +#define TPS65917_SMPS_POWERGOOD_MASK1_SMPS3_SHIFT 0x03 +#define TPS65917_SMPS_POWERGOOD_MASK1_SMPS2 0x02 +#define TPS65917_SMPS_POWERGOOD_MASK1_SMPS2_SHIFT 0x01 +#define TPS65917_SMPS_POWERGOOD_MASK1_SMPS1 0x01 +#define TPS65917_SMPS_POWERGOOD_MASK1_SMPS1_SHIFT 0x00 + +/* Bit definitions for SMPS_POWERGOOD_MASK2 */ +#define TPS65917_SMPS_POWERGOOD_MASK2_POWERGOOD_TYPE_SELECT 0x80 +#define TPS65917_SMPS_POWERGOOD_MASK2_POWERGOOD_TYPE_SELECT_SHIFT 0x07 +#define TPS65917_SMPS_POWERGOOD_MASK2_OVC_ALARM_SHIFT 0x10 +#define TPS65917_SMPS_POWERGOOD_MASK2_OVC_ALARM 0x04 + +/* Bit definitions for SMPS_PLL_CTRL */ + +#define TPS65917_SMPS_PLL_CTRL_PLL_EN_PLL_BYPASS_SHIFT 0x08 +#define TPS65917_SMPS_PLL_CTRL_PLL_PLL_EN_BYPASS 0x03 +#define TPS65917_SMPS_PLL_CTRL_PLL_PLL_BYPASS_CLK_SHIFT 0x04 +#define TPS65917_SMPS_PLL_CTRL_PLL_PLL_BYPASS_CLK 0x02 + +/* Registers for function LDO */ +#define TPS65917_LDO1_CTRL 0x00 +#define TPS65917_LDO1_VOLTAGE 0x01 +#define TPS65917_LDO2_CTRL 0x02 +#define TPS65917_LDO2_VOLTAGE 0x03 +#define TPS65917_LDO3_CTRL 0x04 +#define TPS65917_LDO3_VOLTAGE 0x05 +#define TPS65917_LDO4_CTRL 0x0E +#define TPS65917_LDO4_VOLTAGE 0x0F +#define TPS65917_LDO5_CTRL 0x12 +#define TPS65917_LDO5_VOLTAGE 0x13 +#define TPS65917_LDO_PD_CTRL1 0x1B +#define TPS65917_LDO_PD_CTRL2 0x1C +#define TPS65917_LDO_SHORT_STATUS1 0x1D +#define TPS65917_LDO_SHORT_STATUS2 0x1E +#define TPS65917_LDO_PD_CTRL3 0x2D +#define TPS65917_LDO_SHORT_STATUS3 0x2E + +/* Bit definitions for LDO1_CTRL */ +#define TPS65917_LDO1_CTRL_WR_S 0x80 +#define TPS65917_LDO1_CTRL_WR_S_SHIFT 0x07 +#define TPS65917_LDO1_CTRL_BYPASS_EN 0x40 +#define TPS65917_LDO1_CTRL_BYPASS_EN_SHIFT 0x06 +#define TPS65917_LDO1_CTRL_STATUS 0x10 +#define TPS65917_LDO1_CTRL_STATUS_SHIFT 0x04 +#define TPS65917_LDO1_CTRL_MODE_SLEEP 0x04 +#define TPS65917_LDO1_CTRL_MODE_SLEEP_SHIFT 0x02 +#define TPS65917_LDO1_CTRL_MODE_ACTIVE 0x01 +#define TPS65917_LDO1_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for LDO1_VOLTAGE */ +#define TPS65917_LDO1_VOLTAGE_VSEL_MASK 0x2F +#define TPS65917_LDO1_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for LDO2_CTRL */ +#define TPS65917_LDO2_CTRL_WR_S 0x80 +#define TPS65917_LDO2_CTRL_WR_S_SHIFT 0x07 +#define TPS65917_LDO2_CTRL_BYPASS_EN 0x40 +#define TPS65917_LDO2_CTRL_BYPASS_EN_SHIFT 0x06 +#define TPS65917_LDO2_CTRL_STATUS 0x10 +#define TPS65917_LDO2_CTRL_STATUS_SHIFT 0x04 +#define TPS65917_LDO2_CTRL_MODE_SLEEP 0x04 +#define TPS65917_LDO2_CTRL_MODE_SLEEP_SHIFT 0x02 +#define TPS65917_LDO2_CTRL_MODE_ACTIVE 0x01 +#define TPS65917_LDO2_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for LDO2_VOLTAGE */ +#define TPS65917_LDO2_VOLTAGE_VSEL_MASK 0x2F +#define TPS65917_LDO2_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for LDO3_CTRL */ +#define TPS65917_LDO3_CTRL_WR_S 0x80 +#define TPS65917_LDO3_CTRL_WR_S_SHIFT 0x07 +#define TPS65917_LDO3_CTRL_STATUS 0x10 +#define TPS65917_LDO3_CTRL_STATUS_SHIFT 0x04 +#define TPS65917_LDO3_CTRL_MODE_SLEEP 0x04 +#define TPS65917_LDO3_CTRL_MODE_SLEEP_SHIFT 0x02 +#define TPS65917_LDO3_CTRL_MODE_ACTIVE 0x01 +#define TPS65917_LDO3_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for LDO3_VOLTAGE */ +#define TPS65917_LDO3_VOLTAGE_VSEL_MASK 0x2F +#define TPS65917_LDO3_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for LDO4_CTRL */ +#define TPS65917_LDO4_CTRL_WR_S 0x80 +#define TPS65917_LDO4_CTRL_WR_S_SHIFT 0x07 +#define TPS65917_LDO4_CTRL_STATUS 0x10 +#define TPS65917_LDO4_CTRL_STATUS_SHIFT 0x04 +#define TPS65917_LDO4_CTRL_MODE_SLEEP 0x04 +#define TPS65917_LDO4_CTRL_MODE_SLEEP_SHIFT 0x02 +#define TPS65917_LDO4_CTRL_MODE_ACTIVE 0x01 +#define TPS65917_LDO4_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for LDO4_VOLTAGE */ +#define TPS65917_LDO4_VOLTAGE_VSEL_MASK 0x2F +#define TPS65917_LDO4_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for LDO5_CTRL */ +#define TPS65917_LDO5_CTRL_WR_S 0x80 +#define TPS65917_LDO5_CTRL_WR_S_SHIFT 0x07 +#define TPS65917_LDO5_CTRL_STATUS 0x10 +#define TPS65917_LDO5_CTRL_STATUS_SHIFT 0x04 +#define TPS65917_LDO5_CTRL_MODE_SLEEP 0x04 +#define TPS65917_LDO5_CTRL_MODE_SLEEP_SHIFT 0x02 +#define TPS65917_LDO5_CTRL_MODE_ACTIVE 0x01 +#define TPS65917_LDO5_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for LDO5_VOLTAGE */ +#define TPS65917_LDO5_VOLTAGE_VSEL_MASK 0x2F +#define TPS65917_LDO5_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for LDO_PD_CTRL1 */ +#define TPS65917_LDO_PD_CTRL1_LDO4 0x80 +#define TPS65917_LDO_PD_CTRL1_LDO4_SHIFT 0x07 +#define TPS65917_LDO_PD_CTRL1_LDO2 0x02 +#define TPS65917_LDO_PD_CTRL1_LDO2_SHIFT 0x01 +#define TPS65917_LDO_PD_CTRL1_LDO1 0x01 +#define TPS65917_LDO_PD_CTRL1_LDO1_SHIFT 0x00 + +/* Bit definitions for LDO_PD_CTRL2 */ +#define TPS65917_LDO_PD_CTRL2_LDO3 0x04 +#define TPS65917_LDO_PD_CTRL2_LDO3_SHIFT 0x02 +#define TPS65917_LDO_PD_CTRL2_LDO5 0x02 +#define TPS65917_LDO_PD_CTRL2_LDO5_SHIFT 0x01 + +/* Bit definitions for LDO_PD_CTRL3 */ +#define TPS65917_LDO_PD_CTRL2_LDOVANA 0x80 +#define TPS65917_LDO_PD_CTRL2_LDOVANA_SHIFT 0x07 + +/* Bit definitions for LDO_SHORT_STATUS1 */ +#define TPS65917_LDO_SHORT_STATUS1_LDO4 0x80 +#define TPS65917_LDO_SHORT_STATUS1_LDO4_SHIFT 0x07 +#define TPS65917_LDO_SHORT_STATUS1_LDO2 0x02 +#define TPS65917_LDO_SHORT_STATUS1_LDO2_SHIFT 0x01 +#define TPS65917_LDO_SHORT_STATUS1_LDO1 0x01 +#define TPS65917_LDO_SHORT_STATUS1_LDO1_SHIFT 0x00 + +/* Bit definitions for LDO_SHORT_STATUS2 */ +#define TPS65917_LDO_SHORT_STATUS2_LDO3 0x04 +#define TPS65917_LDO_SHORT_STATUS2_LDO3_SHIFT 0x02 +#define TPS65917_LDO_SHORT_STATUS2_LDO5 0x02 +#define TPS65917_LDO_SHORT_STATUS2_LDO5_SHIFT 0x01 + +/* Bit definitions for LDO_SHORT_STATUS2 */ +#define TPS65917_LDO_SHORT_STATUS2_LDOVANA 0x80 +#define TPS65917_LDO_SHORT_STATUS2_LDOVANA_SHIFT 0x07 + +/* Bit definitions for REGEN1_CTRL */ +#define TPS65917_REGEN1_CTRL_STATUS 0x10 +#define TPS65917_REGEN1_CTRL_STATUS_SHIFT 0x04 +#define TPS65917_REGEN1_CTRL_MODE_SLEEP 0x04 +#define TPS65917_REGEN1_CTRL_MODE_SLEEP_SHIFT 0x02 +#define TPS65917_REGEN1_CTRL_MODE_ACTIVE 0x01 +#define TPS65917_REGEN1_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for PLLEN_CTRL */ +#define TPS65917_PLLEN_CTRL_STATUS 0x10 +#define TPS65917_PLLEN_CTRL_STATUS_SHIFT 0x04 +#define TPS65917_PLLEN_CTRL_MODE_SLEEP 0x04 +#define TPS65917_PLLEN_CTRL_MODE_SLEEP_SHIFT 0x02 +#define TPS65917_PLLEN_CTRL_MODE_ACTIVE 0x01 +#define TPS65917_PLLEN_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for REGEN2_CTRL */ +#define TPS65917_REGEN2_CTRL_STATUS 0x10 +#define TPS65917_REGEN2_CTRL_STATUS_SHIFT 0x04 +#define TPS65917_REGEN2_CTRL_MODE_SLEEP 0x04 +#define TPS65917_REGEN2_CTRL_MODE_SLEEP_SHIFT 0x02 +#define TPS65917_REGEN2_CTRL_MODE_ACTIVE 0x01 +#define TPS65917_REGEN2_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for NSLEEP_RES_ASSIGN */ +#define TPS65917_NSLEEP_RES_ASSIGN_PLL_EN 0x08 +#define TPS65917_NSLEEP_RES_ASSIGN_PLL_EN_SHIFT 0x03 +#define TPS65917_NSLEEP_RES_ASSIGN_REGEN3 0x04 +#define TPS65917_NSLEEP_RES_ASSIGN_REGEN3_SHIFT 0x02 +#define TPS65917_NSLEEP_RES_ASSIGN_REGEN2 0x02 +#define TPS65917_NSLEEP_RES_ASSIGN_REGEN2_SHIFT 0x01 +#define TPS65917_NSLEEP_RES_ASSIGN_REGEN1 0x01 +#define TPS65917_NSLEEP_RES_ASSIGN_REGEN1_SHIFT 0x00 + +/* Bit definitions for NSLEEP_SMPS_ASSIGN */ +#define TPS65917_NSLEEP_SMPS_ASSIGN_SMPS5 0x40 +#define TPS65917_NSLEEP_SMPS_ASSIGN_SMPS5_SHIFT 0x06 +#define TPS65917_NSLEEP_SMPS_ASSIGN_SMPS4 0x10 +#define TPS65917_NSLEEP_SMPS_ASSIGN_SMPS4_SHIFT 0x04 +#define TPS65917_NSLEEP_SMPS_ASSIGN_SMPS3 0x08 +#define TPS65917_NSLEEP_SMPS_ASSIGN_SMPS3_SHIFT 0x03 +#define TPS65917_NSLEEP_SMPS_ASSIGN_SMPS2 0x02 +#define TPS65917_NSLEEP_SMPS_ASSIGN_SMPS2_SHIFT 0x01 +#define TPS65917_NSLEEP_SMPS_ASSIGN_SMPS1 0x01 +#define TPS65917_NSLEEP_SMPS_ASSIGN_SMPS1_SHIFT 0x00 + +/* Bit definitions for NSLEEP_LDO_ASSIGN1 */ +#define TPS65917_NSLEEP_LDO_ASSIGN1_LDO4 0x80 +#define TPS65917_NSLEEP_LDO_ASSIGN1_LDO4_SHIFT 0x07 +#define TPS65917_NSLEEP_LDO_ASSIGN1_LDO2 0x02 +#define TPS65917_NSLEEP_LDO_ASSIGN1_LDO2_SHIFT 0x01 +#define TPS65917_NSLEEP_LDO_ASSIGN1_LDO1 0x01 +#define TPS65917_NSLEEP_LDO_ASSIGN1_LDO1_SHIFT 0x00 + +/* Bit definitions for NSLEEP_LDO_ASSIGN2 */ +#define TPS65917_NSLEEP_LDO_ASSIGN2_LDO3 0x04 +#define TPS65917_NSLEEP_LDO_ASSIGN2_LDO3_SHIFT 0x02 +#define TPS65917_NSLEEP_LDO_ASSIGN2_LDO5 0x02 +#define TPS65917_NSLEEP_LDO_ASSIGN2_LDO5_SHIFT 0x01 + +/* Bit definitions for ENABLE1_RES_ASSIGN */ +#define TPS65917_ENABLE1_RES_ASSIGN_PLLEN 0x08 +#define TPS65917_ENABLE1_RES_ASSIGN_PLLEN_SHIFT 0x03 +#define TPS65917_ENABLE1_RES_ASSIGN_REGEN3 0x04 +#define TPS65917_ENABLE1_RES_ASSIGN_REGEN3_SHIFT 0x02 +#define TPS65917_ENABLE1_RES_ASSIGN_REGEN2 0x02 +#define TPS65917_ENABLE1_RES_ASSIGN_REGEN2_SHIFT 0x01 +#define TPS65917_ENABLE1_RES_ASSIGN_REGEN1 0x01 +#define TPS65917_ENABLE1_RES_ASSIGN_REGEN1_SHIFT 0x00 + +/* Bit definitions for ENABLE1_SMPS_ASSIGN */ +#define TPS65917_ENABLE1_SMPS_ASSIGN_SMPS5 0x40 +#define TPS65917_ENABLE1_SMPS_ASSIGN_SMPS5_SHIFT 0x06 +#define TPS65917_ENABLE1_SMPS_ASSIGN_SMPS4 0x10 +#define TPS65917_ENABLE1_SMPS_ASSIGN_SMPS4_SHIFT 0x04 +#define TPS65917_ENABLE1_SMPS_ASSIGN_SMPS3 0x08 +#define TPS65917_ENABLE1_SMPS_ASSIGN_SMPS3_SHIFT 0x03 +#define TPS65917_ENABLE1_SMPS_ASSIGN_SMPS2 0x02 +#define TPS65917_ENABLE1_SMPS_ASSIGN_SMPS2_SHIFT 0x01 +#define TPS65917_ENABLE1_SMPS_ASSIGN_SMPS1 0x01 +#define TPS65917_ENABLE1_SMPS_ASSIGN_SMPS1_SHIFT 0x00 + +/* Bit definitions for ENABLE1_LDO_ASSIGN1 */ +#define TPS65917_ENABLE1_LDO_ASSIGN1_LDO4 0x80 +#define TPS65917_ENABLE1_LDO_ASSIGN1_LDO4_SHIFT 0x07 +#define TPS65917_ENABLE1_LDO_ASSIGN1_LDO2 0x02 +#define TPS65917_ENABLE1_LDO_ASSIGN1_LDO2_SHIFT 0x01 +#define TPS65917_ENABLE1_LDO_ASSIGN1_LDO1 0x01 +#define TPS65917_ENABLE1_LDO_ASSIGN1_LDO1_SHIFT 0x00 + +/* Bit definitions for ENABLE1_LDO_ASSIGN2 */ +#define TPS65917_ENABLE1_LDO_ASSIGN2_LDO3 0x04 +#define TPS65917_ENABLE1_LDO_ASSIGN2_LDO3_SHIFT 0x02 +#define TPS65917_ENABLE1_LDO_ASSIGN2_LDO5 0x02 +#define TPS65917_ENABLE1_LDO_ASSIGN2_LDO5_SHIFT 0x01 + +/* Bit definitions for ENABLE2_RES_ASSIGN */ +#define TPS65917_ENABLE2_RES_ASSIGN_PLLEN 0x08 +#define TPS65917_ENABLE2_RES_ASSIGN_PLLEN_SHIFT 0x03 +#define TPS65917_ENABLE2_RES_ASSIGN_REGEN3 0x04 +#define TPS65917_ENABLE2_RES_ASSIGN_REGEN3_SHIFT 0x02 +#define TPS65917_ENABLE2_RES_ASSIGN_REGEN2 0x02 +#define TPS65917_ENABLE2_RES_ASSIGN_REGEN2_SHIFT 0x01 +#define TPS65917_ENABLE2_RES_ASSIGN_REGEN1 0x01 +#define TPS65917_ENABLE2_RES_ASSIGN_REGEN1_SHIFT 0x00 + +/* Bit definitions for ENABLE2_SMPS_ASSIGN */ +#define TPS65917_ENABLE2_SMPS_ASSIGN_SMPS5 0x40 +#define TPS65917_ENABLE2_SMPS_ASSIGN_SMPS5_SHIFT 0x06 +#define TPS65917_ENABLE2_SMPS_ASSIGN_SMPS4 0x10 +#define TPS65917_ENABLE2_SMPS_ASSIGN_SMPS4_SHIFT 0x04 +#define TPS65917_ENABLE2_SMPS_ASSIGN_SMPS3 0x08 +#define TPS65917_ENABLE2_SMPS_ASSIGN_SMPS3_SHIFT 0x03 +#define TPS65917_ENABLE2_SMPS_ASSIGN_SMPS2 0x02 +#define TPS65917_ENABLE2_SMPS_ASSIGN_SMPS2_SHIFT 0x01 +#define TPS65917_ENABLE2_SMPS_ASSIGN_SMPS1 0x01 +#define TPS65917_ENABLE2_SMPS_ASSIGN_SMPS1_SHIFT 0x00 + +/* Bit definitions for ENABLE2_LDO_ASSIGN1 */ +#define TPS65917_ENABLE2_LDO_ASSIGN1_LDO4 0x80 +#define TPS65917_ENABLE2_LDO_ASSIGN1_LDO4_SHIFT 0x07 +#define TPS65917_ENABLE2_LDO_ASSIGN1_LDO2 0x02 +#define TPS65917_ENABLE2_LDO_ASSIGN1_LDO2_SHIFT 0x01 +#define TPS65917_ENABLE2_LDO_ASSIGN1_LDO1 0x01 +#define TPS65917_ENABLE2_LDO_ASSIGN1_LDO1_SHIFT 0x00 + +/* Bit definitions for ENABLE2_LDO_ASSIGN2 */ +#define TPS65917_ENABLE2_LDO_ASSIGN2_LDO3 0x04 +#define TPS65917_ENABLE2_LDO_ASSIGN2_LDO3_SHIFT 0x02 +#define TPS65917_ENABLE2_LDO_ASSIGN2_LDO5 0x02 +#define TPS65917_ENABLE2_LDO_ASSIGN2_LDO5_SHIFT 0x01 + +/* Bit definitions for REGEN3_CTRL */ +#define TPS65917_REGEN3_CTRL_STATUS 0x10 +#define TPS65917_REGEN3_CTRL_STATUS_SHIFT 0x04 +#define TPS65917_REGEN3_CTRL_MODE_SLEEP 0x04 +#define TPS65917_REGEN3_CTRL_MODE_SLEEP_SHIFT 0x02 +#define TPS65917_REGEN3_CTRL_MODE_ACTIVE 0x01 +#define TPS65917_REGEN3_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* POWERHOLD Mask field for PRIMARY_SECONDARY_PAD2 register */ +#define TPS65917_PRIMARY_SECONDARY_PAD2_GPIO_5_MASK 0xC + +/* Registers for function RESOURCE */ +#define TPS65917_REGEN1_CTRL 0x2 +#define TPS65917_PLLEN_CTRL 0x3 +#define TPS65917_NSLEEP_RES_ASSIGN 0x6 +#define TPS65917_NSLEEP_SMPS_ASSIGN 0x7 +#define TPS65917_NSLEEP_LDO_ASSIGN1 0x8 +#define TPS65917_NSLEEP_LDO_ASSIGN2 0x9 +#define TPS65917_ENABLE1_RES_ASSIGN 0xA +#define TPS65917_ENABLE1_SMPS_ASSIGN 0xB +#define TPS65917_ENABLE1_LDO_ASSIGN1 0xC +#define TPS65917_ENABLE1_LDO_ASSIGN2 0xD +#define TPS65917_ENABLE2_RES_ASSIGN 0xE +#define TPS65917_ENABLE2_SMPS_ASSIGN 0xF +#define TPS65917_ENABLE2_LDO_ASSIGN1 0x10 +#define TPS65917_ENABLE2_LDO_ASSIGN2 0x11 +#define TPS65917_REGEN2_CTRL 0x12 +#define TPS65917_REGEN3_CTRL 0x13 + +static inline int palmas_read(struct palmas *palmas, unsigned int base, + unsigned int reg, unsigned int *val) +{ + unsigned int addr = PALMAS_BASE_TO_REG(base, reg); + int slave_id = PALMAS_BASE_TO_SLAVE(base); + + return regmap_read(palmas->regmap[slave_id], addr, val); +} + +static inline int palmas_write(struct palmas *palmas, unsigned int base, + unsigned int reg, unsigned int value) +{ + unsigned int addr = PALMAS_BASE_TO_REG(base, reg); + int slave_id = PALMAS_BASE_TO_SLAVE(base); + + return regmap_write(palmas->regmap[slave_id], addr, value); +} + +static inline int palmas_bulk_write(struct palmas *palmas, unsigned int base, + unsigned int reg, const void *val, size_t val_count) +{ + unsigned int addr = PALMAS_BASE_TO_REG(base, reg); + int slave_id = PALMAS_BASE_TO_SLAVE(base); + + return regmap_bulk_write(palmas->regmap[slave_id], addr, + val, val_count); +} + +static inline int palmas_bulk_read(struct palmas *palmas, unsigned int base, + unsigned int reg, void *val, size_t val_count) +{ + unsigned int addr = PALMAS_BASE_TO_REG(base, reg); + int slave_id = PALMAS_BASE_TO_SLAVE(base); + + return regmap_bulk_read(palmas->regmap[slave_id], addr, + val, val_count); +} + +static inline int palmas_update_bits(struct palmas *palmas, unsigned int base, + unsigned int reg, unsigned int mask, unsigned int val) +{ + unsigned int addr = PALMAS_BASE_TO_REG(base, reg); + int slave_id = PALMAS_BASE_TO_SLAVE(base); + + return regmap_update_bits(palmas->regmap[slave_id], addr, mask, val); +} + +static inline int palmas_irq_get_virq(struct palmas *palmas, int irq) +{ + return regmap_irq_get_virq(palmas->irq_data, irq); +} + + +int palmas_ext_control_req_config(struct palmas *palmas, + enum palmas_external_requestor_id ext_control_req_id, + int ext_ctrl, bool enable); + +#endif /* __LINUX_MFD_PALMAS_H */ diff --git a/include/linux/mfd/pcf50633/adc.h b/include/linux/mfd/pcf50633/adc.h new file mode 100644 index 000000000..b35e62801 --- /dev/null +++ b/include/linux/mfd/pcf50633/adc.h @@ -0,0 +1,73 @@ +/* + * adc.h -- Driver for NXP PCF50633 ADC + * + * (C) 2006-2008 by Openmoko, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __LINUX_MFD_PCF50633_ADC_H +#define __LINUX_MFD_PCF50633_ADC_H + +#include +#include + +/* ADC Registers */ +#define PCF50633_REG_ADCC3 0x52 +#define PCF50633_REG_ADCC2 0x53 +#define PCF50633_REG_ADCC1 0x54 +#define PCF50633_REG_ADCS1 0x55 +#define PCF50633_REG_ADCS2 0x56 +#define PCF50633_REG_ADCS3 0x57 + +#define PCF50633_ADCC1_ADCSTART 0x01 +#define PCF50633_ADCC1_RES_8BIT 0x02 +#define PCF50633_ADCC1_RES_10BIT 0x00 +#define PCF50633_ADCC1_AVERAGE_NO 0x00 +#define PCF50633_ADCC1_AVERAGE_4 0x04 +#define PCF50633_ADCC1_AVERAGE_8 0x08 +#define PCF50633_ADCC1_AVERAGE_16 0x0c +#define PCF50633_ADCC1_MUX_BATSNS_RES 0x00 +#define PCF50633_ADCC1_MUX_BATSNS_SUBTR 0x10 +#define PCF50633_ADCC1_MUX_ADCIN2_RES 0x20 +#define PCF50633_ADCC1_MUX_ADCIN2_SUBTR 0x30 +#define PCF50633_ADCC1_MUX_BATTEMP 0x60 +#define PCF50633_ADCC1_MUX_ADCIN1 0x70 +#define PCF50633_ADCC1_AVERAGE_MASK 0x0c +#define PCF50633_ADCC1_ADCMUX_MASK 0xf0 + +#define PCF50633_ADCC2_RATIO_NONE 0x00 +#define PCF50633_ADCC2_RATIO_BATTEMP 0x01 +#define PCF50633_ADCC2_RATIO_ADCIN1 0x02 +#define PCF50633_ADCC2_RATIO_BOTH 0x03 +#define PCF50633_ADCC2_RATIOSETTL_100US 0x04 + +#define PCF50633_ADCC3_ACCSW_EN 0x01 +#define PCF50633_ADCC3_NTCSW_EN 0x04 +#define PCF50633_ADCC3_RES_DIV_TWO 0x10 +#define PCF50633_ADCC3_RES_DIV_THREE 0x00 + +#define PCF50633_ADCS3_REF_NTCSW 0x00 +#define PCF50633_ADCS3_REF_ACCSW 0x10 +#define PCF50633_ADCS3_REF_2V0 0x20 +#define PCF50633_ADCS3_REF_VISA 0x30 +#define PCF50633_ADCS3_REF_2V0_2 0x70 +#define PCF50633_ADCS3_ADCRDY 0x80 + +#define PCF50633_ADCS3_ADCDAT1L_MASK 0x03 +#define PCF50633_ADCS3_ADCDAT2L_MASK 0x0c +#define PCF50633_ADCS3_ADCDAT2L_SHIFT 2 +#define PCF50633_ASCS3_REF_MASK 0x70 + +extern int +pcf50633_adc_async_read(struct pcf50633 *pcf, int mux, int avg, + void (*callback)(struct pcf50633 *, void *, int), + void *callback_param); +extern int +pcf50633_adc_sync_read(struct pcf50633 *pcf, int mux, int avg); + +#endif /* __LINUX_PCF50633_ADC_H */ diff --git a/include/linux/mfd/pcf50633/backlight.h b/include/linux/mfd/pcf50633/backlight.h new file mode 100644 index 000000000..83747e217 --- /dev/null +++ b/include/linux/mfd/pcf50633/backlight.h @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2009-2010, Lars-Peter Clausen + * PCF50633 backlight device driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef __LINUX_MFD_PCF50633_BACKLIGHT +#define __LINUX_MFD_PCF50633_BACKLIGHT + +/* +* @default_brightness: Backlight brightness is initialized to this value +* +* Brightness to be used after the driver has been probed. +* Valid range 0-63. +* +* @default_brightness_limit: The actual brightness is limited by this value +* +* Brightness limit to be used after the driver has been probed. This is useful +* when it is not known how much power is available for the backlight during +* probe. +* Valid range 0-63. Can be changed later with pcf50633_bl_set_brightness_limit. +* +* @ramp_time: Display ramp time when changing brightness +* +* When changing the backlights brightness the change is not instant, instead +* it fades smooth from one state to another. This value specifies how long +* the fade should take. The lower the value the higher the fade time. +* Valid range 0-255 +*/ +struct pcf50633_bl_platform_data { + unsigned int default_brightness; + unsigned int default_brightness_limit; + uint8_t ramp_time; +}; + + +struct pcf50633; + +int pcf50633_bl_set_brightness_limit(struct pcf50633 *pcf, unsigned int limit); + +#endif + diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h new file mode 100644 index 000000000..a80840752 --- /dev/null +++ b/include/linux/mfd/pcf50633/core.h @@ -0,0 +1,238 @@ +/* + * core.h -- Core driver for NXP PCF50633 + * + * (C) 2006-2008 by Openmoko, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __LINUX_MFD_PCF50633_CORE_H +#define __LINUX_MFD_PCF50633_CORE_H + +#include +#include +#include +#include +#include +#include + +struct pcf50633; +struct regmap; + +#define PCF50633_NUM_REGULATORS 11 + +struct pcf50633_platform_data { + struct regulator_init_data reg_init_data[PCF50633_NUM_REGULATORS]; + + char **batteries; + int num_batteries; + + /* + * Should be set accordingly to the reference resistor used, see + * I_{ch(ref)} charger reference current in the pcf50633 User + * Manual. + */ + int charger_reference_current_ma; + + /* Callbacks */ + void (*probe_done)(struct pcf50633 *); + void (*mbc_event_callback)(struct pcf50633 *, int); + void (*regulator_registered)(struct pcf50633 *, int); + void (*force_shutdown)(struct pcf50633 *); + + u8 resumers[5]; + + struct pcf50633_bl_platform_data *backlight_data; +}; + +struct pcf50633_irq { + void (*handler) (int, void *); + void *data; +}; + +int pcf50633_register_irq(struct pcf50633 *pcf, int irq, + void (*handler) (int, void *), void *data); +int pcf50633_free_irq(struct pcf50633 *pcf, int irq); + +int pcf50633_irq_mask(struct pcf50633 *pcf, int irq); +int pcf50633_irq_unmask(struct pcf50633 *pcf, int irq); +int pcf50633_irq_mask_get(struct pcf50633 *pcf, int irq); + +int pcf50633_read_block(struct pcf50633 *, u8 reg, + int nr_regs, u8 *data); +int pcf50633_write_block(struct pcf50633 *pcf, u8 reg, + int nr_regs, u8 *data); +u8 pcf50633_reg_read(struct pcf50633 *, u8 reg); +int pcf50633_reg_write(struct pcf50633 *pcf, u8 reg, u8 val); + +int pcf50633_reg_set_bit_mask(struct pcf50633 *pcf, u8 reg, u8 mask, u8 val); +int pcf50633_reg_clear_bits(struct pcf50633 *pcf, u8 reg, u8 bits); + +/* Interrupt registers */ + +#define PCF50633_REG_INT1 0x02 +#define PCF50633_REG_INT2 0x03 +#define PCF50633_REG_INT3 0x04 +#define PCF50633_REG_INT4 0x05 +#define PCF50633_REG_INT5 0x06 + +#define PCF50633_REG_INT1M 0x07 +#define PCF50633_REG_INT2M 0x08 +#define PCF50633_REG_INT3M 0x09 +#define PCF50633_REG_INT4M 0x0a +#define PCF50633_REG_INT5M 0x0b + +enum { + /* Chip IRQs */ + PCF50633_IRQ_ADPINS, + PCF50633_IRQ_ADPREM, + PCF50633_IRQ_USBINS, + PCF50633_IRQ_USBREM, + PCF50633_IRQ_RESERVED1, + PCF50633_IRQ_RESERVED2, + PCF50633_IRQ_ALARM, + PCF50633_IRQ_SECOND, + PCF50633_IRQ_ONKEYR, + PCF50633_IRQ_ONKEYF, + PCF50633_IRQ_EXTON1R, + PCF50633_IRQ_EXTON1F, + PCF50633_IRQ_EXTON2R, + PCF50633_IRQ_EXTON2F, + PCF50633_IRQ_EXTON3R, + PCF50633_IRQ_EXTON3F, + PCF50633_IRQ_BATFULL, + PCF50633_IRQ_CHGHALT, + PCF50633_IRQ_THLIMON, + PCF50633_IRQ_THLIMOFF, + PCF50633_IRQ_USBLIMON, + PCF50633_IRQ_USBLIMOFF, + PCF50633_IRQ_ADCRDY, + PCF50633_IRQ_ONKEY1S, + PCF50633_IRQ_LOWSYS, + PCF50633_IRQ_LOWBAT, + PCF50633_IRQ_HIGHTMP, + PCF50633_IRQ_AUTOPWRFAIL, + PCF50633_IRQ_DWN1PWRFAIL, + PCF50633_IRQ_DWN2PWRFAIL, + PCF50633_IRQ_LEDPWRFAIL, + PCF50633_IRQ_LEDOVP, + PCF50633_IRQ_LDO1PWRFAIL, + PCF50633_IRQ_LDO2PWRFAIL, + PCF50633_IRQ_LDO3PWRFAIL, + PCF50633_IRQ_LDO4PWRFAIL, + PCF50633_IRQ_LDO5PWRFAIL, + PCF50633_IRQ_LDO6PWRFAIL, + PCF50633_IRQ_HCLDOPWRFAIL, + PCF50633_IRQ_HCLDOOVL, + + /* Always last */ + PCF50633_NUM_IRQ, +}; + +struct pcf50633 { + struct device *dev; + struct regmap *regmap; + + struct pcf50633_platform_data *pdata; + int irq; + struct pcf50633_irq irq_handler[PCF50633_NUM_IRQ]; + struct work_struct irq_work; + struct workqueue_struct *work_queue; + struct mutex lock; + + u8 mask_regs[5]; + + u8 suspend_irq_masks[5]; + u8 resume_reason[5]; + int is_suspended; + + int onkey1s_held; + + struct platform_device *rtc_pdev; + struct platform_device *mbc_pdev; + struct platform_device *adc_pdev; + struct platform_device *input_pdev; + struct platform_device *bl_pdev; + struct platform_device *regulator_pdev[PCF50633_NUM_REGULATORS]; +}; + +enum pcf50633_reg_int1 { + PCF50633_INT1_ADPINS = 0x01, /* Adapter inserted */ + PCF50633_INT1_ADPREM = 0x02, /* Adapter removed */ + PCF50633_INT1_USBINS = 0x04, /* USB inserted */ + PCF50633_INT1_USBREM = 0x08, /* USB removed */ + /* reserved */ + PCF50633_INT1_ALARM = 0x40, /* RTC alarm time is reached */ + PCF50633_INT1_SECOND = 0x80, /* RTC periodic second interrupt */ +}; + +enum pcf50633_reg_int2 { + PCF50633_INT2_ONKEYR = 0x01, /* ONKEY rising edge */ + PCF50633_INT2_ONKEYF = 0x02, /* ONKEY falling edge */ + PCF50633_INT2_EXTON1R = 0x04, /* EXTON1 rising edge */ + PCF50633_INT2_EXTON1F = 0x08, /* EXTON1 falling edge */ + PCF50633_INT2_EXTON2R = 0x10, /* EXTON2 rising edge */ + PCF50633_INT2_EXTON2F = 0x20, /* EXTON2 falling edge */ + PCF50633_INT2_EXTON3R = 0x40, /* EXTON3 rising edge */ + PCF50633_INT2_EXTON3F = 0x80, /* EXTON3 falling edge */ +}; + +enum pcf50633_reg_int3 { + PCF50633_INT3_BATFULL = 0x01, /* Battery full */ + PCF50633_INT3_CHGHALT = 0x02, /* Charger halt */ + PCF50633_INT3_THLIMON = 0x04, + PCF50633_INT3_THLIMOFF = 0x08, + PCF50633_INT3_USBLIMON = 0x10, + PCF50633_INT3_USBLIMOFF = 0x20, + PCF50633_INT3_ADCRDY = 0x40, /* ADC result ready */ + PCF50633_INT3_ONKEY1S = 0x80, /* ONKEY pressed 1 second */ +}; + +enum pcf50633_reg_int4 { + PCF50633_INT4_LOWSYS = 0x01, + PCF50633_INT4_LOWBAT = 0x02, + PCF50633_INT4_HIGHTMP = 0x04, + PCF50633_INT4_AUTOPWRFAIL = 0x08, + PCF50633_INT4_DWN1PWRFAIL = 0x10, + PCF50633_INT4_DWN2PWRFAIL = 0x20, + PCF50633_INT4_LEDPWRFAIL = 0x40, + PCF50633_INT4_LEDOVP = 0x80, +}; + +enum pcf50633_reg_int5 { + PCF50633_INT5_LDO1PWRFAIL = 0x01, + PCF50633_INT5_LDO2PWRFAIL = 0x02, + PCF50633_INT5_LDO3PWRFAIL = 0x04, + PCF50633_INT5_LDO4PWRFAIL = 0x08, + PCF50633_INT5_LDO5PWRFAIL = 0x10, + PCF50633_INT5_LDO6PWRFAIL = 0x20, + PCF50633_INT5_HCLDOPWRFAIL = 0x40, + PCF50633_INT5_HCLDOOVL = 0x80, +}; + +/* misc. registers */ +#define PCF50633_REG_OOCSHDWN 0x0c + +/* LED registers */ +#define PCF50633_REG_LEDOUT 0x28 +#define PCF50633_REG_LEDENA 0x29 +#define PCF50633_REG_LEDCTL 0x2a +#define PCF50633_REG_LEDDIM 0x2b + +static inline struct pcf50633 *dev_to_pcf50633(struct device *dev) +{ + return dev_get_drvdata(dev); +} + +int pcf50633_irq_init(struct pcf50633 *pcf, int irq); +void pcf50633_irq_free(struct pcf50633 *pcf); +#ifdef CONFIG_PM +int pcf50633_irq_suspend(struct pcf50633 *pcf); +int pcf50633_irq_resume(struct pcf50633 *pcf); +#endif + +#endif diff --git a/include/linux/mfd/pcf50633/gpio.h b/include/linux/mfd/pcf50633/gpio.h new file mode 100644 index 000000000..a42b845ef --- /dev/null +++ b/include/linux/mfd/pcf50633/gpio.h @@ -0,0 +1,52 @@ +/* + * gpio.h -- GPIO driver for NXP PCF50633 + * + * (C) 2006-2008 by Openmoko, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __LINUX_MFD_PCF50633_GPIO_H +#define __LINUX_MFD_PCF50633_GPIO_H + +#include + +#define PCF50633_GPIO1 1 +#define PCF50633_GPIO2 2 +#define PCF50633_GPIO3 3 +#define PCF50633_GPO 4 + +#define PCF50633_REG_GPIO1CFG 0x14 +#define PCF50633_REG_GPIO2CFG 0x15 +#define PCF50633_REG_GPIO3CFG 0x16 +#define PCF50633_REG_GPOCFG 0x17 + +#define PCF50633_GPOCFG_GPOSEL_MASK 0x07 + +enum pcf50633_reg_gpocfg { + PCF50633_GPOCFG_GPOSEL_0 = 0x00, + PCF50633_GPOCFG_GPOSEL_LED_NFET = 0x01, + PCF50633_GPOCFG_GPOSEL_SYSxOK = 0x02, + PCF50633_GPOCFG_GPOSEL_CLK32K = 0x03, + PCF50633_GPOCFG_GPOSEL_ADAPUSB = 0x04, + PCF50633_GPOCFG_GPOSEL_USBxOK = 0x05, + PCF50633_GPOCFG_GPOSEL_ACTPH4 = 0x06, + PCF50633_GPOCFG_GPOSEL_1 = 0x07, + PCF50633_GPOCFG_GPOSEL_INVERSE = 0x08, +}; + +int pcf50633_gpio_set(struct pcf50633 *pcf, int gpio, u8 val); +u8 pcf50633_gpio_get(struct pcf50633 *pcf, int gpio); + +int pcf50633_gpio_invert_set(struct pcf50633 *, int gpio, int invert); +int pcf50633_gpio_invert_get(struct pcf50633 *pcf, int gpio); + +int pcf50633_gpio_power_supply_set(struct pcf50633 *, + int gpio, int regulator, int on); +#endif /* __LINUX_MFD_PCF50633_GPIO_H */ + + diff --git a/include/linux/mfd/pcf50633/mbc.h b/include/linux/mfd/pcf50633/mbc.h new file mode 100644 index 000000000..df4f5fa88 --- /dev/null +++ b/include/linux/mfd/pcf50633/mbc.h @@ -0,0 +1,134 @@ +/* + * mbc.h -- Driver for NXP PCF50633 Main Battery Charger + * + * (C) 2006-2008 by Openmoko, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __LINUX_MFD_PCF50633_MBC_H +#define __LINUX_MFD_PCF50633_MBC_H + +#include +#include + +#define PCF50633_REG_MBCC1 0x43 +#define PCF50633_REG_MBCC2 0x44 +#define PCF50633_REG_MBCC3 0x45 +#define PCF50633_REG_MBCC4 0x46 +#define PCF50633_REG_MBCC5 0x47 +#define PCF50633_REG_MBCC6 0x48 +#define PCF50633_REG_MBCC7 0x49 +#define PCF50633_REG_MBCC8 0x4a +#define PCF50633_REG_MBCS1 0x4b +#define PCF50633_REG_MBCS2 0x4c +#define PCF50633_REG_MBCS3 0x4d + +enum pcf50633_reg_mbcc1 { + PCF50633_MBCC1_CHGENA = 0x01, /* Charger enable */ + PCF50633_MBCC1_AUTOSTOP = 0x02, + PCF50633_MBCC1_AUTORES = 0x04, /* automatic resume */ + PCF50633_MBCC1_RESUME = 0x08, /* explicit resume cmd */ + PCF50633_MBCC1_RESTART = 0x10, /* restart charging */ + PCF50633_MBCC1_PREWDTIME_60M = 0x20, /* max. precharging time */ + PCF50633_MBCC1_WDTIME_1H = 0x00, + PCF50633_MBCC1_WDTIME_2H = 0x40, + PCF50633_MBCC1_WDTIME_4H = 0x80, + PCF50633_MBCC1_WDTIME_6H = 0xc0, +}; +#define PCF50633_MBCC1_WDTIME_MASK 0xc0 + +enum pcf50633_reg_mbcc2 { + PCF50633_MBCC2_VBATCOND_2V7 = 0x00, + PCF50633_MBCC2_VBATCOND_2V85 = 0x01, + PCF50633_MBCC2_VBATCOND_3V0 = 0x02, + PCF50633_MBCC2_VBATCOND_3V15 = 0x03, + PCF50633_MBCC2_VMAX_4V = 0x00, + PCF50633_MBCC2_VMAX_4V20 = 0x28, + PCF50633_MBCC2_VRESDEBTIME_64S = 0x80, /* debounce time (32/64sec) */ +}; + +enum pcf50633_reg_mbcc7 { + PCF50633_MBCC7_USB_100mA = 0x00, + PCF50633_MBCC7_USB_500mA = 0x01, + PCF50633_MBCC7_USB_1000mA = 0x02, + PCF50633_MBCC7_USB_SUSPEND = 0x03, + PCF50633_MBCC7_BATTEMP_EN = 0x04, + PCF50633_MBCC7_BATSYSIMAX_1A6 = 0x00, + PCF50633_MBCC7_BATSYSIMAX_1A8 = 0x40, + PCF50633_MBCC7_BATSYSIMAX_2A0 = 0x80, + PCF50633_MBCC7_BATSYSIMAX_2A2 = 0xc0, +}; +#define PCF50633_MBCC7_USB_MASK 0x03 + +enum pcf50633_reg_mbcc8 { + PCF50633_MBCC8_USBENASUS = 0x10, +}; + +enum pcf50633_reg_mbcs1 { + PCF50633_MBCS1_USBPRES = 0x01, + PCF50633_MBCS1_USBOK = 0x02, + PCF50633_MBCS1_ADAPTPRES = 0x04, + PCF50633_MBCS1_ADAPTOK = 0x08, + PCF50633_MBCS1_TBAT_OK = 0x00, + PCF50633_MBCS1_TBAT_ABOVE = 0x10, + PCF50633_MBCS1_TBAT_BELOW = 0x20, + PCF50633_MBCS1_TBAT_UNDEF = 0x30, + PCF50633_MBCS1_PREWDTEXP = 0x40, + PCF50633_MBCS1_WDTEXP = 0x80, +}; + +enum pcf50633_reg_mbcs2_mbcmod { + PCF50633_MBCS2_MBC_PLAY = 0x00, + PCF50633_MBCS2_MBC_USB_PRE = 0x01, + PCF50633_MBCS2_MBC_USB_PRE_WAIT = 0x02, + PCF50633_MBCS2_MBC_USB_FAST = 0x03, + PCF50633_MBCS2_MBC_USB_FAST_WAIT = 0x04, + PCF50633_MBCS2_MBC_USB_SUSPEND = 0x05, + PCF50633_MBCS2_MBC_ADP_PRE = 0x06, + PCF50633_MBCS2_MBC_ADP_PRE_WAIT = 0x07, + PCF50633_MBCS2_MBC_ADP_FAST = 0x08, + PCF50633_MBCS2_MBC_ADP_FAST_WAIT = 0x09, + PCF50633_MBCS2_MBC_BAT_FULL = 0x0a, + PCF50633_MBCS2_MBC_HALT = 0x0b, +}; +#define PCF50633_MBCS2_MBC_MASK 0x0f +enum pcf50633_reg_mbcs2_chgstat { + PCF50633_MBCS2_CHGS_NONE = 0x00, + PCF50633_MBCS2_CHGS_ADAPTER = 0x10, + PCF50633_MBCS2_CHGS_USB = 0x20, + PCF50633_MBCS2_CHGS_BOTH = 0x30, +}; +#define PCF50633_MBCS2_RESSTAT_AUTO 0x40 + +enum pcf50633_reg_mbcs3 { + PCF50633_MBCS3_USBLIM_PLAY = 0x01, + PCF50633_MBCS3_USBLIM_CGH = 0x02, + PCF50633_MBCS3_TLIM_PLAY = 0x04, + PCF50633_MBCS3_TLIM_CHG = 0x08, + PCF50633_MBCS3_ILIM = 0x10, /* 1: Ibat > Icutoff */ + PCF50633_MBCS3_VLIM = 0x20, /* 1: Vbat == Vmax */ + PCF50633_MBCS3_VBATSTAT = 0x40, /* 1: Vbat > Vbatcond */ + PCF50633_MBCS3_VRES = 0x80, /* 1: Vbat > Vth(RES) */ +}; + +#define PCF50633_MBCC2_VBATCOND_MASK 0x03 +#define PCF50633_MBCC2_VMAX_MASK 0x3c + +/* Charger status */ +#define PCF50633_MBC_USB_ONLINE 0x01 +#define PCF50633_MBC_USB_ACTIVE 0x02 +#define PCF50633_MBC_ADAPTER_ONLINE 0x04 +#define PCF50633_MBC_ADAPTER_ACTIVE 0x08 + +int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma); + +int pcf50633_mbc_get_status(struct pcf50633 *); +int pcf50633_mbc_get_usb_online_status(struct pcf50633 *); + +#endif + diff --git a/include/linux/mfd/pcf50633/pmic.h b/include/linux/mfd/pcf50633/pmic.h new file mode 100644 index 000000000..eac0c3d8e --- /dev/null +++ b/include/linux/mfd/pcf50633/pmic.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_MFD_PCF50633_PMIC_H +#define __LINUX_MFD_PCF50633_PMIC_H + +#include +#include + +#define PCF50633_REG_AUTOOUT 0x1a +#define PCF50633_REG_AUTOENA 0x1b +#define PCF50633_REG_AUTOCTL 0x1c +#define PCF50633_REG_AUTOMXC 0x1d +#define PCF50633_REG_DOWN1OUT 0x1e +#define PCF50633_REG_DOWN1ENA 0x1f +#define PCF50633_REG_DOWN1CTL 0x20 +#define PCF50633_REG_DOWN1MXC 0x21 +#define PCF50633_REG_DOWN2OUT 0x22 +#define PCF50633_REG_DOWN2ENA 0x23 +#define PCF50633_REG_DOWN2CTL 0x24 +#define PCF50633_REG_DOWN2MXC 0x25 +#define PCF50633_REG_MEMLDOOUT 0x26 +#define PCF50633_REG_MEMLDOENA 0x27 +#define PCF50633_REG_LDO1OUT 0x2d +#define PCF50633_REG_LDO1ENA 0x2e +#define PCF50633_REG_LDO2OUT 0x2f +#define PCF50633_REG_LDO2ENA 0x30 +#define PCF50633_REG_LDO3OUT 0x31 +#define PCF50633_REG_LDO3ENA 0x32 +#define PCF50633_REG_LDO4OUT 0x33 +#define PCF50633_REG_LDO4ENA 0x34 +#define PCF50633_REG_LDO5OUT 0x35 +#define PCF50633_REG_LDO5ENA 0x36 +#define PCF50633_REG_LDO6OUT 0x37 +#define PCF50633_REG_LDO6ENA 0x38 +#define PCF50633_REG_HCLDOOUT 0x39 +#define PCF50633_REG_HCLDOENA 0x3a +#define PCF50633_REG_HCLDOOVL 0x40 + +enum pcf50633_regulator_enable { + PCF50633_REGULATOR_ON = 0x01, + PCF50633_REGULATOR_ON_GPIO1 = 0x02, + PCF50633_REGULATOR_ON_GPIO2 = 0x04, + PCF50633_REGULATOR_ON_GPIO3 = 0x08, +}; +#define PCF50633_REGULATOR_ON_MASK 0x0f + +enum pcf50633_regulator_phase { + PCF50633_REGULATOR_ACTPH1 = 0x00, + PCF50633_REGULATOR_ACTPH2 = 0x10, + PCF50633_REGULATOR_ACTPH3 = 0x20, + PCF50633_REGULATOR_ACTPH4 = 0x30, +}; +#define PCF50633_REGULATOR_ACTPH_MASK 0x30 + +enum pcf50633_regulator_id { + PCF50633_REGULATOR_AUTO, + PCF50633_REGULATOR_DOWN1, + PCF50633_REGULATOR_DOWN2, + PCF50633_REGULATOR_LDO1, + PCF50633_REGULATOR_LDO2, + PCF50633_REGULATOR_LDO3, + PCF50633_REGULATOR_LDO4, + PCF50633_REGULATOR_LDO5, + PCF50633_REGULATOR_LDO6, + PCF50633_REGULATOR_HCLDO, + PCF50633_REGULATOR_MEMLDO, +}; +#endif + diff --git a/include/linux/mfd/qcom_rpm.h b/include/linux/mfd/qcom_rpm.h new file mode 100644 index 000000000..4b6b644f1 --- /dev/null +++ b/include/linux/mfd/qcom_rpm.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __QCOM_RPM_H__ +#define __QCOM_RPM_H__ + +#include + +struct qcom_rpm; + +#define QCOM_RPM_ACTIVE_STATE 0 +#define QCOM_RPM_SLEEP_STATE 1 + +int qcom_rpm_write(struct qcom_rpm *rpm, int state, int resource, u32 *buf, size_t count); + +#endif diff --git a/include/linux/mfd/rave-sp.h b/include/linux/mfd/rave-sp.h new file mode 100644 index 000000000..11eef77ef --- /dev/null +++ b/include/linux/mfd/rave-sp.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +/* + * Core definitions for RAVE SP MFD driver. + * + * Copyright (C) 2017 Zodiac Inflight Innovations + */ + +#ifndef _LINUX_RAVE_SP_H_ +#define _LINUX_RAVE_SP_H_ + +#include + +enum rave_sp_command { + RAVE_SP_CMD_GET_FIRMWARE_VERSION = 0x20, + RAVE_SP_CMD_GET_BOOTLOADER_VERSION = 0x21, + RAVE_SP_CMD_BOOT_SOURCE = 0x26, + RAVE_SP_CMD_GET_BOARD_COPPER_REV = 0x2B, + RAVE_SP_CMD_GET_GPIO_STATE = 0x2F, + + RAVE_SP_CMD_STATUS = 0xA0, + RAVE_SP_CMD_SW_WDT = 0xA1, + RAVE_SP_CMD_PET_WDT = 0xA2, + RAVE_SP_CMD_RMB_EEPROM = 0xA4, + RAVE_SP_CMD_SET_BACKLIGHT = 0xA6, + RAVE_SP_CMD_RESET = 0xA7, + RAVE_SP_CMD_RESET_REASON = 0xA8, + + RAVE_SP_CMD_REQ_COPPER_REV = 0xB6, + RAVE_SP_CMD_GET_I2C_DEVICE_STATUS = 0xBA, + RAVE_SP_CMD_GET_SP_SILICON_REV = 0xB9, + RAVE_SP_CMD_CONTROL_EVENTS = 0xBB, + + RAVE_SP_EVNT_BASE = 0xE0, +}; + +struct rave_sp; + +static inline unsigned long rave_sp_action_pack(u8 event, u8 value) +{ + return ((unsigned long)value << 8) | event; +} + +static inline u8 rave_sp_action_unpack_event(unsigned long action) +{ + return action; +} + +static inline u8 rave_sp_action_unpack_value(unsigned long action) +{ + return action >> 8; +} + +int rave_sp_exec(struct rave_sp *sp, + void *__data, size_t data_size, + void *reply_data, size_t reply_data_size); + +struct device; +int devm_rave_sp_register_event_notifier(struct device *dev, + struct notifier_block *nb); + +#endif /* _LINUX_RAVE_SP_H_ */ diff --git a/include/linux/mfd/rc5t583.h b/include/linux/mfd/rc5t583.h new file mode 100644 index 000000000..8d0a392e0 --- /dev/null +++ b/include/linux/mfd/rc5t583.h @@ -0,0 +1,381 @@ +/* + * Core driver interface to access RICOH_RC5T583 power management chip. + * + * Copyright (c) 2011-2012, NVIDIA CORPORATION. All rights reserved. + * Author: Laxman dewangan + * + * Based on code + * Copyright (C) 2011 RICOH COMPANY,LTD + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +#ifndef __LINUX_MFD_RC5T583_H +#define __LINUX_MFD_RC5T583_H + +#include +#include +#include + +/* Maximum number of main interrupts */ +#define MAX_MAIN_INTERRUPT 5 +#define RC5T583_MAX_GPEDGE_REG 2 +#define RC5T583_MAX_INTERRUPT_EN_REGS 8 +#define RC5T583_MAX_INTERRUPT_MASK_REGS 9 + +/* Interrupt enable register */ +#define RC5T583_INT_EN_SYS1 0x19 +#define RC5T583_INT_EN_SYS2 0x1D +#define RC5T583_INT_EN_DCDC 0x41 +#define RC5T583_INT_EN_RTC 0xED +#define RC5T583_INT_EN_ADC1 0x90 +#define RC5T583_INT_EN_ADC2 0x91 +#define RC5T583_INT_EN_ADC3 0x92 + +/* Interrupt status registers (monitor regs in Ricoh)*/ +#define RC5T583_INTC_INTPOL 0xAD +#define RC5T583_INTC_INTEN 0xAE +#define RC5T583_INTC_INTMON 0xAF + +#define RC5T583_INT_MON_GRP 0xAF +#define RC5T583_INT_MON_SYS1 0x1B +#define RC5T583_INT_MON_SYS2 0x1F +#define RC5T583_INT_MON_DCDC 0x43 +#define RC5T583_INT_MON_RTC 0xEE + +/* Interrupt clearing registers */ +#define RC5T583_INT_IR_SYS1 0x1A +#define RC5T583_INT_IR_SYS2 0x1E +#define RC5T583_INT_IR_DCDC 0x42 +#define RC5T583_INT_IR_RTC 0xEE +#define RC5T583_INT_IR_ADCL 0x94 +#define RC5T583_INT_IR_ADCH 0x95 +#define RC5T583_INT_IR_ADCEND 0x96 +#define RC5T583_INT_IR_GPIOR 0xA9 +#define RC5T583_INT_IR_GPIOF 0xAA + +/* Sleep sequence registers */ +#define RC5T583_SLPSEQ1 0x21 +#define RC5T583_SLPSEQ2 0x22 +#define RC5T583_SLPSEQ3 0x23 +#define RC5T583_SLPSEQ4 0x24 +#define RC5T583_SLPSEQ5 0x25 +#define RC5T583_SLPSEQ6 0x26 +#define RC5T583_SLPSEQ7 0x27 +#define RC5T583_SLPSEQ8 0x28 +#define RC5T583_SLPSEQ9 0x29 +#define RC5T583_SLPSEQ10 0x2A +#define RC5T583_SLPSEQ11 0x2B + +/* Regulator registers */ +#define RC5T583_REG_DC0CTL 0x30 +#define RC5T583_REG_DC0DAC 0x31 +#define RC5T583_REG_DC0LATCTL 0x32 +#define RC5T583_REG_SR0CTL 0x33 + +#define RC5T583_REG_DC1CTL 0x34 +#define RC5T583_REG_DC1DAC 0x35 +#define RC5T583_REG_DC1LATCTL 0x36 +#define RC5T583_REG_SR1CTL 0x37 + +#define RC5T583_REG_DC2CTL 0x38 +#define RC5T583_REG_DC2DAC 0x39 +#define RC5T583_REG_DC2LATCTL 0x3A +#define RC5T583_REG_SR2CTL 0x3B + +#define RC5T583_REG_DC3CTL 0x3C +#define RC5T583_REG_DC3DAC 0x3D +#define RC5T583_REG_DC3LATCTL 0x3E +#define RC5T583_REG_SR3CTL 0x3F + + +#define RC5T583_REG_LDOEN1 0x50 +#define RC5T583_REG_LDOEN2 0x51 +#define RC5T583_REG_LDODIS1 0x52 +#define RC5T583_REG_LDODIS2 0x53 + +#define RC5T583_REG_LDO0DAC 0x54 +#define RC5T583_REG_LDO1DAC 0x55 +#define RC5T583_REG_LDO2DAC 0x56 +#define RC5T583_REG_LDO3DAC 0x57 +#define RC5T583_REG_LDO4DAC 0x58 +#define RC5T583_REG_LDO5DAC 0x59 +#define RC5T583_REG_LDO6DAC 0x5A +#define RC5T583_REG_LDO7DAC 0x5B +#define RC5T583_REG_LDO8DAC 0x5C +#define RC5T583_REG_LDO9DAC 0x5D + +#define RC5T583_REG_DC0DAC_DS 0x60 +#define RC5T583_REG_DC1DAC_DS 0x61 +#define RC5T583_REG_DC2DAC_DS 0x62 +#define RC5T583_REG_DC3DAC_DS 0x63 + +#define RC5T583_REG_LDO0DAC_DS 0x64 +#define RC5T583_REG_LDO1DAC_DS 0x65 +#define RC5T583_REG_LDO2DAC_DS 0x66 +#define RC5T583_REG_LDO3DAC_DS 0x67 +#define RC5T583_REG_LDO4DAC_DS 0x68 +#define RC5T583_REG_LDO5DAC_DS 0x69 +#define RC5T583_REG_LDO6DAC_DS 0x6A +#define RC5T583_REG_LDO7DAC_DS 0x6B +#define RC5T583_REG_LDO8DAC_DS 0x6C +#define RC5T583_REG_LDO9DAC_DS 0x6D + +/* GPIO register base address */ +#define RC5T583_GPIO_IOSEL 0xA0 +#define RC5T583_GPIO_PDEN 0xA1 +#define RC5T583_GPIO_IOOUT 0xA2 +#define RC5T583_GPIO_PGSEL 0xA3 +#define RC5T583_GPIO_GPINV 0xA4 +#define RC5T583_GPIO_GPDEB 0xA5 +#define RC5T583_GPIO_GPEDGE1 0xA6 +#define RC5T583_GPIO_GPEDGE2 0xA7 +#define RC5T583_GPIO_EN_INT 0xA8 +#define RC5T583_GPIO_MON_IOIN 0xAB +#define RC5T583_GPIO_GPOFUNC 0xAC + +/* RTC registers */ +#define RC5T583_RTC_SEC 0xE0 +#define RC5T583_RTC_MIN 0xE1 +#define RC5T583_RTC_HOUR 0xE2 +#define RC5T583_RTC_WDAY 0xE3 +#define RC5T583_RTC_DAY 0xE4 +#define RC5T583_RTC_MONTH 0xE5 +#define RC5T583_RTC_YEAR 0xE6 +#define RC5T583_RTC_ADJ 0xE7 +#define RC5T583_RTC_AW_MIN 0xE8 +#define RC5T583_RTC_AW_HOUR 0xE9 +#define RC5T583_RTC_AW_WEEK 0xEA +#define RC5T583_RTC_AD_MIN 0xEB +#define RC5T583_RTC_AD_HOUR 0xEC +#define RC5T583_RTC_CTL1 0xED +#define RC5T583_RTC_CTL2 0xEE +#define RC5T583_RTC_AY_MIN 0xF0 +#define RC5T583_RTC_AY_HOUR 0xF1 +#define RC5T583_RTC_AY_DAY 0xF2 +#define RC5T583_RTC_AY_MONTH 0xF3 +#define RC5T583_RTC_AY_YEAR 0xF4 + +#define RC5T583_MAX_REG 0xF7 +#define RC5T583_NUM_REGS (RC5T583_MAX_REG + 1) + +/* RICOH_RC5T583 IRQ definitions */ +enum { + RC5T583_IRQ_ONKEY, + RC5T583_IRQ_ACOK, + RC5T583_IRQ_LIDOPEN, + RC5T583_IRQ_PREOT, + RC5T583_IRQ_CLKSTP, + RC5T583_IRQ_ONKEY_OFF, + RC5T583_IRQ_WD, + RC5T583_IRQ_EN_PWRREQ1, + RC5T583_IRQ_EN_PWRREQ2, + RC5T583_IRQ_PRE_VINDET, + + RC5T583_IRQ_DC0LIM, + RC5T583_IRQ_DC1LIM, + RC5T583_IRQ_DC2LIM, + RC5T583_IRQ_DC3LIM, + + RC5T583_IRQ_CTC, + RC5T583_IRQ_YALE, + RC5T583_IRQ_DALE, + RC5T583_IRQ_WALE, + + RC5T583_IRQ_AIN1L, + RC5T583_IRQ_AIN2L, + RC5T583_IRQ_AIN3L, + RC5T583_IRQ_VBATL, + RC5T583_IRQ_VIN3L, + RC5T583_IRQ_VIN8L, + RC5T583_IRQ_AIN1H, + RC5T583_IRQ_AIN2H, + RC5T583_IRQ_AIN3H, + RC5T583_IRQ_VBATH, + RC5T583_IRQ_VIN3H, + RC5T583_IRQ_VIN8H, + RC5T583_IRQ_ADCEND, + + RC5T583_IRQ_GPIO0, + RC5T583_IRQ_GPIO1, + RC5T583_IRQ_GPIO2, + RC5T583_IRQ_GPIO3, + RC5T583_IRQ_GPIO4, + RC5T583_IRQ_GPIO5, + RC5T583_IRQ_GPIO6, + RC5T583_IRQ_GPIO7, + + /* Should be last entry */ + RC5T583_MAX_IRQS, +}; + +/* Ricoh583 gpio definitions */ +enum { + RC5T583_GPIO0, + RC5T583_GPIO1, + RC5T583_GPIO2, + RC5T583_GPIO3, + RC5T583_GPIO4, + RC5T583_GPIO5, + RC5T583_GPIO6, + RC5T583_GPIO7, + + /* Should be last entry */ + RC5T583_MAX_GPIO, +}; + +enum { + RC5T583_DS_NONE, + RC5T583_DS_DC0, + RC5T583_DS_DC1, + RC5T583_DS_DC2, + RC5T583_DS_DC3, + RC5T583_DS_LDO0, + RC5T583_DS_LDO1, + RC5T583_DS_LDO2, + RC5T583_DS_LDO3, + RC5T583_DS_LDO4, + RC5T583_DS_LDO5, + RC5T583_DS_LDO6, + RC5T583_DS_LDO7, + RC5T583_DS_LDO8, + RC5T583_DS_LDO9, + RC5T583_DS_PSO0, + RC5T583_DS_PSO1, + RC5T583_DS_PSO2, + RC5T583_DS_PSO3, + RC5T583_DS_PSO4, + RC5T583_DS_PSO5, + RC5T583_DS_PSO6, + RC5T583_DS_PSO7, + + /* Should be last entry */ + RC5T583_DS_MAX, +}; + +/* + * Ricoh pmic RC5T583 supports sleep through two external controls. + * The output of gpios and regulator can be enable/disable through + * this external signals. + */ +enum { + RC5T583_EXT_PWRREQ1_CONTROL = 0x1, + RC5T583_EXT_PWRREQ2_CONTROL = 0x2, +}; + +enum { + RC5T583_REGULATOR_DC0, + RC5T583_REGULATOR_DC1, + RC5T583_REGULATOR_DC2, + RC5T583_REGULATOR_DC3, + RC5T583_REGULATOR_LDO0, + RC5T583_REGULATOR_LDO1, + RC5T583_REGULATOR_LDO2, + RC5T583_REGULATOR_LDO3, + RC5T583_REGULATOR_LDO4, + RC5T583_REGULATOR_LDO5, + RC5T583_REGULATOR_LDO6, + RC5T583_REGULATOR_LDO7, + RC5T583_REGULATOR_LDO8, + RC5T583_REGULATOR_LDO9, + + /* Should be last entry */ + RC5T583_REGULATOR_MAX, +}; + +struct rc5t583 { + struct device *dev; + struct regmap *regmap; + int chip_irq; + int irq_base; + struct mutex irq_lock; + unsigned long group_irq_en[MAX_MAIN_INTERRUPT]; + + /* For main interrupt bits in INTC */ + uint8_t intc_inten_reg; + + /* For group interrupt bits and address */ + uint8_t irq_en_reg[RC5T583_MAX_INTERRUPT_EN_REGS]; + + /* For gpio edge */ + uint8_t gpedge_reg[RC5T583_MAX_GPEDGE_REG]; +}; + +/* + * rc5t583_platform_data: Platform data for ricoh rc5t583 pmu. + * The board specific data is provided through this structure. + * @irq_base: Irq base number on which this device registers their interrupts. + * @gpio_base: GPIO base from which gpio of this device will start. + * @enable_shutdown: Enable shutdown through the input pin "shutdown". + * @regulator_deepsleep_slot: The slot number on which device goes to sleep + * in device sleep mode. + * @regulator_ext_pwr_control: External power request regulator control. The + * regulator output enable/disable is controlled by the external + * power request input state. + * @reg_init_data: Regulator init data. + */ + +struct rc5t583_platform_data { + int irq_base; + int gpio_base; + bool enable_shutdown; + int regulator_deepsleep_slot[RC5T583_REGULATOR_MAX]; + unsigned long regulator_ext_pwr_control[RC5T583_REGULATOR_MAX]; + struct regulator_init_data *reg_init_data[RC5T583_REGULATOR_MAX]; +}; + +static inline int rc5t583_write(struct device *dev, uint8_t reg, uint8_t val) +{ + struct rc5t583 *rc5t583 = dev_get_drvdata(dev); + return regmap_write(rc5t583->regmap, reg, val); +} + +static inline int rc5t583_read(struct device *dev, uint8_t reg, uint8_t *val) +{ + struct rc5t583 *rc5t583 = dev_get_drvdata(dev); + unsigned int ival; + int ret; + ret = regmap_read(rc5t583->regmap, reg, &ival); + if (!ret) + *val = (uint8_t)ival; + return ret; +} + +static inline int rc5t583_set_bits(struct device *dev, unsigned int reg, + unsigned int bit_mask) +{ + struct rc5t583 *rc5t583 = dev_get_drvdata(dev); + return regmap_update_bits(rc5t583->regmap, reg, bit_mask, bit_mask); +} + +static inline int rc5t583_clear_bits(struct device *dev, unsigned int reg, + unsigned int bit_mask) +{ + struct rc5t583 *rc5t583 = dev_get_drvdata(dev); + return regmap_update_bits(rc5t583->regmap, reg, bit_mask, 0); +} + +static inline int rc5t583_update(struct device *dev, unsigned int reg, + unsigned int val, unsigned int mask) +{ + struct rc5t583 *rc5t583 = dev_get_drvdata(dev); + return regmap_update_bits(rc5t583->regmap, reg, mask, val); +} + +int rc5t583_ext_power_req_config(struct device *dev, int deepsleep_id, + int ext_pwr_req, int deepsleep_slot_nr); +int rc5t583_irq_init(struct rc5t583 *rc5t583, int irq, int irq_base); +int rc5t583_irq_exit(struct rc5t583 *rc5t583); + +#endif diff --git a/include/linux/mfd/rdc321x.h b/include/linux/mfd/rdc321x.h new file mode 100644 index 000000000..697933b22 --- /dev/null +++ b/include/linux/mfd/rdc321x.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __RDC321X_MFD_H +#define __RDC321X_MFD_H + +#include +#include + +/* Offsets to be accessed in the southbridge PCI + * device configuration register */ +#define RDC321X_WDT_CTRL 0x44 +#define RDC321X_GPIO_CTRL_REG1 0x48 +#define RDC321X_GPIO_DATA_REG1 0x4c +#define RDC321X_GPIO_CTRL_REG2 0x84 +#define RDC321X_GPIO_DATA_REG2 0x88 + +#define RDC321X_NUM_GPIO 59 + +struct rdc321x_gpio_pdata { + struct pci_dev *sb_pdev; + unsigned max_gpios; +}; + +struct rdc321x_wdt_pdata { + struct pci_dev *sb_pdev; +}; + +#endif /* __RDC321X_MFD_H */ diff --git a/include/linux/mfd/retu.h b/include/linux/mfd/retu.h new file mode 100644 index 000000000..65471c4a3 --- /dev/null +++ b/include/linux/mfd/retu.h @@ -0,0 +1,28 @@ +/* + * Retu/Tahvo MFD driver interface + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of this + * archive for more details. + */ + +#ifndef __LINUX_MFD_RETU_H +#define __LINUX_MFD_RETU_H + +struct retu_dev; + +int retu_read(struct retu_dev *, u8); +int retu_write(struct retu_dev *, u8, u16); + +/* Registers */ +#define RETU_REG_WATCHDOG 0x17 /* Watchdog */ +#define RETU_REG_CC1 0x0d /* Common control register 1 */ +#define RETU_REG_STATUS 0x16 /* Status register */ + +/* Interrupt sources */ +#define TAHVO_INT_VBUS 0 /* VBUS state */ + +/* Interrupt status */ +#define TAHVO_STAT_VBUS (1 << TAHVO_INT_VBUS) + +#endif /* __LINUX_MFD_RETU_H */ diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h new file mode 100644 index 000000000..338e0f6e2 --- /dev/null +++ b/include/linux/mfd/rk808.h @@ -0,0 +1,457 @@ +/* + * Register definitions for Rockchip's RK808/RK818 PMIC + * + * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd + * + * Author: Chris Zhong + * Author: Zhang Qing + * + * Copyright (C) 2016 PHYTEC Messtechnik GmbH + * + * Author: Wadim Egorov + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __LINUX_REGULATOR_RK808_H +#define __LINUX_REGULATOR_RK808_H + +#include +#include + +/* + * rk808 Global Register Map. + */ + +#define RK808_DCDC1 0 /* (0+RK808_START) */ +#define RK808_LDO1 4 /* (4+RK808_START) */ +#define RK808_NUM_REGULATORS 14 + +enum rk808_reg { + RK808_ID_DCDC1, + RK808_ID_DCDC2, + RK808_ID_DCDC3, + RK808_ID_DCDC4, + RK808_ID_LDO1, + RK808_ID_LDO2, + RK808_ID_LDO3, + RK808_ID_LDO4, + RK808_ID_LDO5, + RK808_ID_LDO6, + RK808_ID_LDO7, + RK808_ID_LDO8, + RK808_ID_SWITCH1, + RK808_ID_SWITCH2, +}; + +#define RK808_SECONDS_REG 0x00 +#define RK808_MINUTES_REG 0x01 +#define RK808_HOURS_REG 0x02 +#define RK808_DAYS_REG 0x03 +#define RK808_MONTHS_REG 0x04 +#define RK808_YEARS_REG 0x05 +#define RK808_WEEKS_REG 0x06 +#define RK808_ALARM_SECONDS_REG 0x08 +#define RK808_ALARM_MINUTES_REG 0x09 +#define RK808_ALARM_HOURS_REG 0x0a +#define RK808_ALARM_DAYS_REG 0x0b +#define RK808_ALARM_MONTHS_REG 0x0c +#define RK808_ALARM_YEARS_REG 0x0d +#define RK808_RTC_CTRL_REG 0x10 +#define RK808_RTC_STATUS_REG 0x11 +#define RK808_RTC_INT_REG 0x12 +#define RK808_RTC_COMP_LSB_REG 0x13 +#define RK808_RTC_COMP_MSB_REG 0x14 +#define RK808_ID_MSB 0x17 +#define RK808_ID_LSB 0x18 +#define RK808_CLK32OUT_REG 0x20 +#define RK808_VB_MON_REG 0x21 +#define RK808_THERMAL_REG 0x22 +#define RK808_DCDC_EN_REG 0x23 +#define RK808_LDO_EN_REG 0x24 +#define RK808_SLEEP_SET_OFF_REG1 0x25 +#define RK808_SLEEP_SET_OFF_REG2 0x26 +#define RK808_DCDC_UV_STS_REG 0x27 +#define RK808_DCDC_UV_ACT_REG 0x28 +#define RK808_LDO_UV_STS_REG 0x29 +#define RK808_LDO_UV_ACT_REG 0x2a +#define RK808_DCDC_PG_REG 0x2b +#define RK808_LDO_PG_REG 0x2c +#define RK808_VOUT_MON_TDB_REG 0x2d +#define RK808_BUCK1_CONFIG_REG 0x2e +#define RK808_BUCK1_ON_VSEL_REG 0x2f +#define RK808_BUCK1_SLP_VSEL_REG 0x30 +#define RK808_BUCK1_DVS_VSEL_REG 0x31 +#define RK808_BUCK2_CONFIG_REG 0x32 +#define RK808_BUCK2_ON_VSEL_REG 0x33 +#define RK808_BUCK2_SLP_VSEL_REG 0x34 +#define RK808_BUCK2_DVS_VSEL_REG 0x35 +#define RK808_BUCK3_CONFIG_REG 0x36 +#define RK808_BUCK4_CONFIG_REG 0x37 +#define RK808_BUCK4_ON_VSEL_REG 0x38 +#define RK808_BUCK4_SLP_VSEL_REG 0x39 +#define RK808_BOOST_CONFIG_REG 0x3a +#define RK808_LDO1_ON_VSEL_REG 0x3b +#define RK808_LDO1_SLP_VSEL_REG 0x3c +#define RK808_LDO2_ON_VSEL_REG 0x3d +#define RK808_LDO2_SLP_VSEL_REG 0x3e +#define RK808_LDO3_ON_VSEL_REG 0x3f +#define RK808_LDO3_SLP_VSEL_REG 0x40 +#define RK808_LDO4_ON_VSEL_REG 0x41 +#define RK808_LDO4_SLP_VSEL_REG 0x42 +#define RK808_LDO5_ON_VSEL_REG 0x43 +#define RK808_LDO5_SLP_VSEL_REG 0x44 +#define RK808_LDO6_ON_VSEL_REG 0x45 +#define RK808_LDO6_SLP_VSEL_REG 0x46 +#define RK808_LDO7_ON_VSEL_REG 0x47 +#define RK808_LDO7_SLP_VSEL_REG 0x48 +#define RK808_LDO8_ON_VSEL_REG 0x49 +#define RK808_LDO8_SLP_VSEL_REG 0x4a +#define RK808_DEVCTRL_REG 0x4b +#define RK808_INT_STS_REG1 0x4c +#define RK808_INT_STS_MSK_REG1 0x4d +#define RK808_INT_STS_REG2 0x4e +#define RK808_INT_STS_MSK_REG2 0x4f +#define RK808_IO_POL_REG 0x50 + +/* RK818 */ +#define RK818_DCDC1 0 +#define RK818_LDO1 4 +#define RK818_NUM_REGULATORS 17 + +enum rk818_reg { + RK818_ID_DCDC1, + RK818_ID_DCDC2, + RK818_ID_DCDC3, + RK818_ID_DCDC4, + RK818_ID_BOOST, + RK818_ID_LDO1, + RK818_ID_LDO2, + RK818_ID_LDO3, + RK818_ID_LDO4, + RK818_ID_LDO5, + RK818_ID_LDO6, + RK818_ID_LDO7, + RK818_ID_LDO8, + RK818_ID_LDO9, + RK818_ID_SWITCH, + RK818_ID_HDMI_SWITCH, + RK818_ID_OTG_SWITCH, +}; + +#define RK818_DCDC_EN_REG 0x23 +#define RK818_LDO_EN_REG 0x24 +#define RK818_SLEEP_SET_OFF_REG1 0x25 +#define RK818_SLEEP_SET_OFF_REG2 0x26 +#define RK818_DCDC_UV_STS_REG 0x27 +#define RK818_DCDC_UV_ACT_REG 0x28 +#define RK818_LDO_UV_STS_REG 0x29 +#define RK818_LDO_UV_ACT_REG 0x2a +#define RK818_DCDC_PG_REG 0x2b +#define RK818_LDO_PG_REG 0x2c +#define RK818_VOUT_MON_TDB_REG 0x2d +#define RK818_BUCK1_CONFIG_REG 0x2e +#define RK818_BUCK1_ON_VSEL_REG 0x2f +#define RK818_BUCK1_SLP_VSEL_REG 0x30 +#define RK818_BUCK2_CONFIG_REG 0x32 +#define RK818_BUCK2_ON_VSEL_REG 0x33 +#define RK818_BUCK2_SLP_VSEL_REG 0x34 +#define RK818_BUCK3_CONFIG_REG 0x36 +#define RK818_BUCK4_CONFIG_REG 0x37 +#define RK818_BUCK4_ON_VSEL_REG 0x38 +#define RK818_BUCK4_SLP_VSEL_REG 0x39 +#define RK818_BOOST_CONFIG_REG 0x3a +#define RK818_LDO1_ON_VSEL_REG 0x3b +#define RK818_LDO1_SLP_VSEL_REG 0x3c +#define RK818_LDO2_ON_VSEL_REG 0x3d +#define RK818_LDO2_SLP_VSEL_REG 0x3e +#define RK818_LDO3_ON_VSEL_REG 0x3f +#define RK818_LDO3_SLP_VSEL_REG 0x40 +#define RK818_LDO4_ON_VSEL_REG 0x41 +#define RK818_LDO4_SLP_VSEL_REG 0x42 +#define RK818_LDO5_ON_VSEL_REG 0x43 +#define RK818_LDO5_SLP_VSEL_REG 0x44 +#define RK818_LDO6_ON_VSEL_REG 0x45 +#define RK818_LDO6_SLP_VSEL_REG 0x46 +#define RK818_LDO7_ON_VSEL_REG 0x47 +#define RK818_LDO7_SLP_VSEL_REG 0x48 +#define RK818_LDO8_ON_VSEL_REG 0x49 +#define RK818_LDO8_SLP_VSEL_REG 0x4a +#define RK818_BOOST_LDO9_ON_VSEL_REG 0x54 +#define RK818_BOOST_LDO9_SLP_VSEL_REG 0x55 +#define RK818_DEVCTRL_REG 0x4b +#define RK818_INT_STS_REG1 0X4c +#define RK818_INT_STS_MSK_REG1 0x4d +#define RK818_INT_STS_REG2 0x4e +#define RK818_INT_STS_MSK_REG2 0x4f +#define RK818_IO_POL_REG 0x50 +#define RK818_H5V_EN_REG 0x52 +#define RK818_SLEEP_SET_OFF_REG3 0x53 +#define RK818_BOOST_LDO9_ON_VSEL_REG 0x54 +#define RK818_BOOST_LDO9_SLP_VSEL_REG 0x55 +#define RK818_BOOST_CTRL_REG 0x56 +#define RK818_DCDC_ILMAX 0x90 +#define RK818_USB_CTRL_REG 0xa1 + +#define RK818_H5V_EN BIT(0) +#define RK818_REF_RDY_CTRL BIT(1) +#define RK818_USB_ILIM_SEL_MASK 0xf +#define RK818_USB_ILMIN_2000MA 0x7 +#define RK818_USB_CHG_SD_VSEL_MASK 0x70 + +/* RK805 */ +enum rk805_reg { + RK805_ID_DCDC1, + RK805_ID_DCDC2, + RK805_ID_DCDC3, + RK805_ID_DCDC4, + RK805_ID_LDO1, + RK805_ID_LDO2, + RK805_ID_LDO3, +}; + +/* CONFIG REGISTER */ +#define RK805_VB_MON_REG 0x21 +#define RK805_THERMAL_REG 0x22 + +/* POWER CHANNELS ENABLE REGISTER */ +#define RK805_DCDC_EN_REG 0x23 +#define RK805_SLP_DCDC_EN_REG 0x25 +#define RK805_SLP_LDO_EN_REG 0x26 +#define RK805_LDO_EN_REG 0x27 + +/* BUCK AND LDO CONFIG REGISTER */ +#define RK805_BUCK_LDO_SLP_LP_EN_REG 0x2A +#define RK805_BUCK1_CONFIG_REG 0x2E +#define RK805_BUCK1_ON_VSEL_REG 0x2F +#define RK805_BUCK1_SLP_VSEL_REG 0x30 +#define RK805_BUCK2_CONFIG_REG 0x32 +#define RK805_BUCK2_ON_VSEL_REG 0x33 +#define RK805_BUCK2_SLP_VSEL_REG 0x34 +#define RK805_BUCK3_CONFIG_REG 0x36 +#define RK805_BUCK4_CONFIG_REG 0x37 +#define RK805_BUCK4_ON_VSEL_REG 0x38 +#define RK805_BUCK4_SLP_VSEL_REG 0x39 +#define RK805_LDO1_ON_VSEL_REG 0x3B +#define RK805_LDO1_SLP_VSEL_REG 0x3C +#define RK805_LDO2_ON_VSEL_REG 0x3D +#define RK805_LDO2_SLP_VSEL_REG 0x3E +#define RK805_LDO3_ON_VSEL_REG 0x3F +#define RK805_LDO3_SLP_VSEL_REG 0x40 + +/* INTERRUPT REGISTER */ +#define RK805_PWRON_LP_INT_TIME_REG 0x47 +#define RK805_PWRON_DB_REG 0x48 +#define RK805_DEV_CTRL_REG 0x4B +#define RK805_INT_STS_REG 0x4C +#define RK805_INT_STS_MSK_REG 0x4D +#define RK805_GPIO_IO_POL_REG 0x50 +#define RK805_OUT_REG 0x52 +#define RK805_ON_SOURCE_REG 0xAE +#define RK805_OFF_SOURCE_REG 0xAF + +#define RK805_NUM_REGULATORS 7 + +#define RK805_PWRON_FALL_RISE_INT_EN 0x0 +#define RK805_PWRON_FALL_RISE_INT_MSK 0x81 + +/* RK805 IRQ Definitions */ +#define RK805_IRQ_PWRON_RISE 0 +#define RK805_IRQ_VB_LOW 1 +#define RK805_IRQ_PWRON 2 +#define RK805_IRQ_PWRON_LP 3 +#define RK805_IRQ_HOTDIE 4 +#define RK805_IRQ_RTC_ALARM 5 +#define RK805_IRQ_RTC_PERIOD 6 +#define RK805_IRQ_PWRON_FALL 7 + +#define RK805_IRQ_PWRON_RISE_MSK BIT(0) +#define RK805_IRQ_VB_LOW_MSK BIT(1) +#define RK805_IRQ_PWRON_MSK BIT(2) +#define RK805_IRQ_PWRON_LP_MSK BIT(3) +#define RK805_IRQ_HOTDIE_MSK BIT(4) +#define RK805_IRQ_RTC_ALARM_MSK BIT(5) +#define RK805_IRQ_RTC_PERIOD_MSK BIT(6) +#define RK805_IRQ_PWRON_FALL_MSK BIT(7) + +#define RK805_PWR_RISE_INT_STATUS BIT(0) +#define RK805_VB_LOW_INT_STATUS BIT(1) +#define RK805_PWRON_INT_STATUS BIT(2) +#define RK805_PWRON_LP_INT_STATUS BIT(3) +#define RK805_HOTDIE_INT_STATUS BIT(4) +#define RK805_ALARM_INT_STATUS BIT(5) +#define RK805_PERIOD_INT_STATUS BIT(6) +#define RK805_PWR_FALL_INT_STATUS BIT(7) + +#define RK805_BUCK1_2_ILMAX_MASK (3 << 6) +#define RK805_BUCK3_4_ILMAX_MASK (3 << 3) +#define RK805_RTC_PERIOD_INT_MASK (1 << 6) +#define RK805_RTC_ALARM_INT_MASK (1 << 5) +#define RK805_INT_ALARM_EN (1 << 3) +#define RK805_INT_TIMER_EN (1 << 2) + +/* RK808 IRQ Definitions */ +#define RK808_IRQ_VOUT_LO 0 +#define RK808_IRQ_VB_LO 1 +#define RK808_IRQ_PWRON 2 +#define RK808_IRQ_PWRON_LP 3 +#define RK808_IRQ_HOTDIE 4 +#define RK808_IRQ_RTC_ALARM 5 +#define RK808_IRQ_RTC_PERIOD 6 +#define RK808_IRQ_PLUG_IN_INT 7 +#define RK808_IRQ_PLUG_OUT_INT 8 +#define RK808_NUM_IRQ 9 + +#define RK808_IRQ_VOUT_LO_MSK BIT(0) +#define RK808_IRQ_VB_LO_MSK BIT(1) +#define RK808_IRQ_PWRON_MSK BIT(2) +#define RK808_IRQ_PWRON_LP_MSK BIT(3) +#define RK808_IRQ_HOTDIE_MSK BIT(4) +#define RK808_IRQ_RTC_ALARM_MSK BIT(5) +#define RK808_IRQ_RTC_PERIOD_MSK BIT(6) +#define RK808_IRQ_PLUG_IN_INT_MSK BIT(0) +#define RK808_IRQ_PLUG_OUT_INT_MSK BIT(1) + +/* RK818 IRQ Definitions */ +#define RK818_IRQ_VOUT_LO 0 +#define RK818_IRQ_VB_LO 1 +#define RK818_IRQ_PWRON 2 +#define RK818_IRQ_PWRON_LP 3 +#define RK818_IRQ_HOTDIE 4 +#define RK818_IRQ_RTC_ALARM 5 +#define RK818_IRQ_RTC_PERIOD 6 +#define RK818_IRQ_USB_OV 7 +#define RK818_IRQ_PLUG_IN 8 +#define RK818_IRQ_PLUG_OUT 9 +#define RK818_IRQ_CHG_OK 10 +#define RK818_IRQ_CHG_TE 11 +#define RK818_IRQ_CHG_TS1 12 +#define RK818_IRQ_TS2 13 +#define RK818_IRQ_CHG_CVTLIM 14 +#define RK818_IRQ_DISCHG_ILIM 15 + +#define RK818_IRQ_VOUT_LO_MSK BIT(0) +#define RK818_IRQ_VB_LO_MSK BIT(1) +#define RK818_IRQ_PWRON_MSK BIT(2) +#define RK818_IRQ_PWRON_LP_MSK BIT(3) +#define RK818_IRQ_HOTDIE_MSK BIT(4) +#define RK818_IRQ_RTC_ALARM_MSK BIT(5) +#define RK818_IRQ_RTC_PERIOD_MSK BIT(6) +#define RK818_IRQ_USB_OV_MSK BIT(7) +#define RK818_IRQ_PLUG_IN_MSK BIT(0) +#define RK818_IRQ_PLUG_OUT_MSK BIT(1) +#define RK818_IRQ_CHG_OK_MSK BIT(2) +#define RK818_IRQ_CHG_TE_MSK BIT(3) +#define RK818_IRQ_CHG_TS1_MSK BIT(4) +#define RK818_IRQ_TS2_MSK BIT(5) +#define RK818_IRQ_CHG_CVTLIM_MSK BIT(6) +#define RK818_IRQ_DISCHG_ILIM_MSK BIT(7) + +#define RK818_NUM_IRQ 16 + +#define RK808_VBAT_LOW_2V8 0x00 +#define RK808_VBAT_LOW_2V9 0x01 +#define RK808_VBAT_LOW_3V0 0x02 +#define RK808_VBAT_LOW_3V1 0x03 +#define RK808_VBAT_LOW_3V2 0x04 +#define RK808_VBAT_LOW_3V3 0x05 +#define RK808_VBAT_LOW_3V4 0x06 +#define RK808_VBAT_LOW_3V5 0x07 +#define VBAT_LOW_VOL_MASK (0x07 << 0) +#define EN_VABT_LOW_SHUT_DOWN (0x00 << 4) +#define EN_VBAT_LOW_IRQ (0x1 << 4) +#define VBAT_LOW_ACT_MASK (0x1 << 4) + +#define BUCK_ILMIN_MASK (7 << 0) +#define BOOST_ILMIN_MASK (7 << 0) +#define BUCK1_RATE_MASK (3 << 3) +#define BUCK2_RATE_MASK (3 << 3) +#define MASK_ALL 0xff + +#define BUCK_UV_ACT_MASK 0x0f +#define BUCK_UV_ACT_DISABLE 0 + +#define SWITCH2_EN BIT(6) +#define SWITCH1_EN BIT(5) +#define DEV_OFF_RST BIT(3) +#define DEV_OFF BIT(0) + +#define VB_LO_ACT BIT(4) +#define VB_LO_SEL_3500MV (7 << 0) + +#define VOUT_LO_INT BIT(0) +#define CLK32KOUT2_EN BIT(0) + +#define TEMP115C 0x0c +#define TEMP_HOTDIE_MSK 0x0c +#define SLP_SD_MSK (0x3 << 2) +#define SHUTDOWN_FUN (0x2 << 2) +#define SLEEP_FUN (0x1 << 2) +#define RK8XX_ID_MSK 0xfff0 +#define FPWM_MODE BIT(7) + +enum { + BUCK_ILMIN_50MA, + BUCK_ILMIN_100MA, + BUCK_ILMIN_150MA, + BUCK_ILMIN_200MA, + BUCK_ILMIN_250MA, + BUCK_ILMIN_300MA, + BUCK_ILMIN_350MA, + BUCK_ILMIN_400MA, +}; + +enum { + BOOST_ILMIN_75MA, + BOOST_ILMIN_100MA, + BOOST_ILMIN_125MA, + BOOST_ILMIN_150MA, + BOOST_ILMIN_175MA, + BOOST_ILMIN_200MA, + BOOST_ILMIN_225MA, + BOOST_ILMIN_250MA, +}; + +enum { + RK805_BUCK1_2_ILMAX_2500MA, + RK805_BUCK1_2_ILMAX_3000MA, + RK805_BUCK1_2_ILMAX_3500MA, + RK805_BUCK1_2_ILMAX_4000MA, +}; + +enum { + RK805_BUCK3_ILMAX_1500MA, + RK805_BUCK3_ILMAX_2000MA, + RK805_BUCK3_ILMAX_2500MA, + RK805_BUCK3_ILMAX_3000MA, +}; + +enum { + RK805_BUCK4_ILMAX_2000MA, + RK805_BUCK4_ILMAX_2500MA, + RK805_BUCK4_ILMAX_3000MA, + RK805_BUCK4_ILMAX_3500MA, +}; + +enum { + RK805_ID = 0x8050, + RK808_ID = 0x0000, + RK818_ID = 0x8180, +}; + +struct rk808 { + struct i2c_client *i2c; + struct regmap_irq_chip_data *irq_data; + struct regmap *regmap; + long variant; + const struct regmap_config *regmap_cfg; + const struct regmap_irq_chip *regmap_irq_chip; +}; +#endif /* __LINUX_REGULATOR_RK808_H */ diff --git a/include/linux/mfd/rn5t618.h b/include/linux/mfd/rn5t618.h new file mode 100644 index 000000000..d61bc58ab --- /dev/null +++ b/include/linux/mfd/rn5t618.h @@ -0,0 +1,256 @@ +/* + * MFD core driver for Ricoh RN5T618 PMIC + * + * Copyright (C) 2014 Beniamino Galvani + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __LINUX_MFD_RN5T618_H +#define __LINUX_MFD_RN5T618_H + +#include + +#define RN5T618_LSIVER 0x00 +#define RN5T618_OTPVER 0x01 +#define RN5T618_IODAC 0x02 +#define RN5T618_VINDAC 0x03 +#define RN5T618_OUT32KEN 0x05 +#define RN5T618_CPUCNT 0x06 +#define RN5T618_PSWR 0x07 +#define RN5T618_PONHIS 0x09 +#define RN5T618_POFFHIS 0x0a +#define RN5T618_WATCHDOG 0x0b +#define RN5T618_WATCHDOGCNT 0x0c +#define RN5T618_PWRFUNC 0x0d +#define RN5T618_SLPCNT 0x0e +#define RN5T618_REPCNT 0x0f +#define RN5T618_PWRONTIMSET 0x10 +#define RN5T618_NOETIMSETCNT 0x11 +#define RN5T618_PWRIREN 0x12 +#define RN5T618_PWRIRQ 0x13 +#define RN5T618_PWRMON 0x14 +#define RN5T618_PWRIRSEL 0x15 +#define RN5T618_DC1_SLOT 0x16 +#define RN5T618_DC2_SLOT 0x17 +#define RN5T618_DC3_SLOT 0x18 +#define RN5T618_DC4_SLOT 0x19 +#define RN5T618_LDO1_SLOT 0x1b +#define RN5T618_LDO2_SLOT 0x1c +#define RN5T618_LDO3_SLOT 0x1d +#define RN5T618_LDO4_SLOT 0x1e +#define RN5T618_LDO5_SLOT 0x1f +#define RN5T618_PSO0_SLOT 0x25 +#define RN5T618_PSO1_SLOT 0x26 +#define RN5T618_PSO2_SLOT 0x27 +#define RN5T618_PSO3_SLOT 0x28 +#define RN5T618_LDORTC1_SLOT 0x2a +#define RN5T618_DC1CTL 0x2c +#define RN5T618_DC1CTL2 0x2d +#define RN5T618_DC2CTL 0x2e +#define RN5T618_DC2CTL2 0x2f +#define RN5T618_DC3CTL 0x30 +#define RN5T618_DC3CTL2 0x31 +#define RN5T618_DC4CTL 0x32 +#define RN5T618_DC4CTL2 0x33 +#define RN5T618_DC5CTL 0x34 +#define RN5T618_DC5CTL2 0x35 +#define RN5T618_DC1DAC 0x36 +#define RN5T618_DC2DAC 0x37 +#define RN5T618_DC3DAC 0x38 +#define RN5T618_DC4DAC 0x39 +#define RN5T618_DC5DAC 0x3a +#define RN5T618_DC1DAC_SLP 0x3b +#define RN5T618_DC2DAC_SLP 0x3c +#define RN5T618_DC3DAC_SLP 0x3d +#define RN5T618_DC4DAC_SLP 0x3e +#define RN5T618_DCIREN 0x40 +#define RN5T618_DCIRQ 0x41 +#define RN5T618_DCIRMON 0x42 +#define RN5T618_LDOEN1 0x44 +#define RN5T618_LDOEN2 0x45 +#define RN5T618_LDODIS 0x46 +#define RN5T618_LDO1DAC 0x4c +#define RN5T618_LDO2DAC 0x4d +#define RN5T618_LDO3DAC 0x4e +#define RN5T618_LDO4DAC 0x4f +#define RN5T618_LDO5DAC 0x50 +#define RN5T618_LDO6DAC 0x51 +#define RN5T618_LDO7DAC 0x52 +#define RN5T618_LDO8DAC 0x53 +#define RN5T618_LDO9DAC 0x54 +#define RN5T618_LDO10DAC 0x55 +#define RN5T618_LDORTCDAC 0x56 +#define RN5T618_LDORTC2DAC 0x57 +#define RN5T618_LDO1DAC_SLP 0x58 +#define RN5T618_LDO2DAC_SLP 0x59 +#define RN5T618_LDO3DAC_SLP 0x5a +#define RN5T618_LDO4DAC_SLP 0x5b +#define RN5T618_LDO5DAC_SLP 0x5c +#define RN5T618_ADCCNT1 0x64 +#define RN5T618_ADCCNT2 0x65 +#define RN5T618_ADCCNT3 0x66 +#define RN5T618_ILIMDATAH 0x68 +#define RN5T618_ILIMDATAL 0x69 +#define RN5T618_VBATDATAH 0x6a +#define RN5T618_VBATDATAL 0x6b +#define RN5T618_VADPDATAH 0x6c +#define RN5T618_VADPDATAL 0x6d +#define RN5T618_VUSBDATAH 0x6e +#define RN5T618_VUSBDATAL 0x6f +#define RN5T618_VSYSDATAH 0x70 +#define RN5T618_VSYSDATAL 0x71 +#define RN5T618_VTHMDATAH 0x72 +#define RN5T618_VTHMDATAL 0x73 +#define RN5T618_AIN1DATAH 0x74 +#define RN5T618_AIN1DATAL 0x75 +#define RN5T618_AIN0DATAH 0x76 +#define RN5T618_AIN0DATAL 0x77 +#define RN5T618_ILIMTHL 0x78 +#define RN5T618_ILIMTHH 0x79 +#define RN5T618_VBATTHL 0x7a +#define RN5T618_VBATTHH 0x7b +#define RN5T618_VADPTHL 0x7c +#define RN5T618_VADPTHH 0x7d +#define RN5T618_VUSBTHL 0x7e +#define RN5T618_VUSBTHH 0x7f +#define RN5T618_VSYSTHL 0x80 +#define RN5T618_VSYSTHH 0x81 +#define RN5T618_VTHMTHL 0x82 +#define RN5T618_VTHMTHH 0x83 +#define RN5T618_AIN1THL 0x84 +#define RN5T618_AIN1THH 0x85 +#define RN5T618_AIN0THL 0x86 +#define RN5T618_AIN0THH 0x87 +#define RN5T618_EN_ADCIR1 0x88 +#define RN5T618_EN_ADCIR2 0x89 +#define RN5T618_EN_ADCIR3 0x8a +#define RN5T618_IR_ADC1 0x8c +#define RN5T618_IR_ADC2 0x8d +#define RN5T618_IR_ADC3 0x8e +#define RN5T618_IOSEL 0x90 +#define RN5T618_IOOUT 0x91 +#define RN5T618_GPEDGE1 0x92 +#define RN5T618_GPEDGE2 0x93 +#define RN5T618_EN_GPIR 0x94 +#define RN5T618_IR_GPR 0x95 +#define RN5T618_IR_GPF 0x96 +#define RN5T618_MON_IOIN 0x97 +#define RN5T618_GPLED_FUNC 0x98 +#define RN5T618_INTPOL 0x9c +#define RN5T618_INTEN 0x9d +#define RN5T618_INTMON 0x9e +#define RN5T618_PREVINDAC 0xb0 +#define RN5T618_BATDAC 0xb1 +#define RN5T618_CHGCTL1 0xb3 +#define RN5T618_CHGCTL2 0xb4 +#define RN5T618_VSYSSET 0xb5 +#define RN5T618_REGISET1 0xb6 +#define RN5T618_REGISET2 0xb7 +#define RN5T618_CHGISET 0xb8 +#define RN5T618_TIMSET 0xb9 +#define RN5T618_BATSET1 0xba +#define RN5T618_BATSET2 0xbb +#define RN5T618_DIESET 0xbc +#define RN5T618_CHGSTATE 0xbd +#define RN5T618_CHGCTRL_IRFMASK 0xbe +#define RN5T618_CHGSTAT_IRFMASK1 0xbf +#define RN5T618_CHGSTAT_IRFMASK2 0xc0 +#define RN5T618_CHGERR_IRFMASK 0xc1 +#define RN5T618_CHGCTRL_IRR 0xc2 +#define RN5T618_CHGSTAT_IRR1 0xc3 +#define RN5T618_CHGSTAT_IRR2 0xc4 +#define RN5T618_CHGERR_IRR 0xc5 +#define RN5T618_CHGCTRL_MONI 0xc6 +#define RN5T618_CHGSTAT_MONI1 0xc7 +#define RN5T618_CHGSTAT_MONI2 0xc8 +#define RN5T618_CHGERR_MONI 0xc9 +#define RN5T618_CHGCTRL_DETMOD1 0xca +#define RN5T618_CHGCTRL_DETMOD2 0xcb +#define RN5T618_CHGSTAT_DETMOD1 0xcc +#define RN5T618_CHGSTAT_DETMOD2 0xcd +#define RN5T618_CHGSTAT_DETMOD3 0xce +#define RN5T618_CHGERR_DETMOD1 0xcf +#define RN5T618_CHGERR_DETMOD2 0xd0 +#define RN5T618_CHGOSCCTL 0xd4 +#define RN5T618_CHGOSCSCORESET1 0xd5 +#define RN5T618_CHGOSCSCORESET2 0xd6 +#define RN5T618_CHGOSCSCORESET3 0xd7 +#define RN5T618_CHGOSCFREQSET1 0xd8 +#define RN5T618_CHGOSCFREQSET2 0xd9 +#define RN5T618_CONTROL 0xe0 +#define RN5T618_SOC 0xe1 +#define RN5T618_RE_CAP_H 0xe2 +#define RN5T618_RE_CAP_L 0xe3 +#define RN5T618_FA_CAP_H 0xe4 +#define RN5T618_FA_CAP_L 0xe5 +#define RN5T618_AGE 0xe6 +#define RN5T618_TT_EMPTY_H 0xe7 +#define RN5T618_TT_EMPTY_L 0xe8 +#define RN5T618_TT_FULL_H 0xe9 +#define RN5T618_TT_FULL_L 0xea +#define RN5T618_VOLTAGE_1 0xeb +#define RN5T618_VOLTAGE_0 0xec +#define RN5T618_TEMP_1 0xed +#define RN5T618_TEMP_0 0xee +#define RN5T618_CC_CTRL 0xef +#define RN5T618_CC_COUNT2 0xf0 +#define RN5T618_CC_COUNT1 0xf1 +#define RN5T618_CC_COUNT0 0xf2 +#define RN5T618_CC_SUMREG3 0xf3 +#define RN5T618_CC_SUMREG2 0xf4 +#define RN5T618_CC_SUMREG1 0xf5 +#define RN5T618_CC_SUMREG0 0xf6 +#define RN5T618_CC_OFFREG1 0xf7 +#define RN5T618_CC_OFFREG0 0xf8 +#define RN5T618_CC_GAINREG1 0xf9 +#define RN5T618_CC_GAINREG0 0xfa +#define RN5T618_CC_AVEREG1 0xfb +#define RN5T618_CC_AVEREG0 0xfc +#define RN5T618_MAX_REG 0xfc + +#define RN5T618_REPCNT_REPWRON BIT(0) +#define RN5T618_SLPCNT_SWPWROFF BIT(0) +#define RN5T618_WATCHDOG_WDOGEN BIT(2) +#define RN5T618_WATCHDOG_WDOGTIM_M (BIT(0) | BIT(1)) +#define RN5T618_WATCHDOG_WDOGTIM_S 0 +#define RN5T618_PWRIRQ_IR_WDOG BIT(6) + +enum { + RN5T618_DCDC1, + RN5T618_DCDC2, + RN5T618_DCDC3, + RN5T618_DCDC4, + RN5T618_DCDC5, + RN5T618_LDO1, + RN5T618_LDO2, + RN5T618_LDO3, + RN5T618_LDO4, + RN5T618_LDO5, + RN5T618_LDO6, + RN5T618_LDO7, + RN5T618_LDO8, + RN5T618_LDO9, + RN5T618_LDO10, + RN5T618_LDORTC1, + RN5T618_LDORTC2, + RN5T618_REG_NUM, +}; + +enum { + RN5T567 = 0, + RN5T618, + RC5T619, +}; + +struct rn5t618 { + struct regmap *regmap; + long variant; +}; + +#endif /* __LINUX_MFD_RN5T618_H */ diff --git a/include/linux/mfd/rohm-bd718x7.h b/include/linux/mfd/rohm-bd718x7.h new file mode 100644 index 000000000..e8338e5dc --- /dev/null +++ b/include/linux/mfd/rohm-bd718x7.h @@ -0,0 +1,359 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Copyright (C) 2018 ROHM Semiconductors */ + +#ifndef __LINUX_MFD_BD71837_H__ +#define __LINUX_MFD_BD71837_H__ + +#include + +enum { + BD71837_BUCK1 = 0, + BD71837_BUCK2, + BD71837_BUCK3, + BD71837_BUCK4, + BD71837_BUCK5, + BD71837_BUCK6, + BD71837_BUCK7, + BD71837_BUCK8, + BD71837_LDO1, + BD71837_LDO2, + BD71837_LDO3, + BD71837_LDO4, + BD71837_LDO5, + BD71837_LDO6, + BD71837_LDO7, + BD71837_REGULATOR_CNT, +}; + +#define BD71837_BUCK1_VOLTAGE_NUM 0x40 +#define BD71837_BUCK2_VOLTAGE_NUM 0x40 +#define BD71837_BUCK3_VOLTAGE_NUM 0x40 +#define BD71837_BUCK4_VOLTAGE_NUM 0x40 + +#define BD71837_BUCK5_VOLTAGE_NUM 0x08 +#define BD71837_BUCK6_VOLTAGE_NUM 0x04 +#define BD71837_BUCK7_VOLTAGE_NUM 0x08 +#define BD71837_BUCK8_VOLTAGE_NUM 0x40 + +#define BD71837_LDO1_VOLTAGE_NUM 0x04 +#define BD71837_LDO2_VOLTAGE_NUM 0x02 +#define BD71837_LDO3_VOLTAGE_NUM 0x10 +#define BD71837_LDO4_VOLTAGE_NUM 0x10 +#define BD71837_LDO5_VOLTAGE_NUM 0x10 +#define BD71837_LDO6_VOLTAGE_NUM 0x10 +#define BD71837_LDO7_VOLTAGE_NUM 0x10 + +enum { + BD71837_REG_REV = 0x00, + BD71837_REG_SWRESET = 0x01, + BD71837_REG_I2C_DEV = 0x02, + BD71837_REG_PWRCTRL0 = 0x03, + BD71837_REG_PWRCTRL1 = 0x04, + BD71837_REG_BUCK1_CTRL = 0x05, + BD71837_REG_BUCK2_CTRL = 0x06, + BD71837_REG_BUCK3_CTRL = 0x07, + BD71837_REG_BUCK4_CTRL = 0x08, + BD71837_REG_BUCK5_CTRL = 0x09, + BD71837_REG_BUCK6_CTRL = 0x0A, + BD71837_REG_BUCK7_CTRL = 0x0B, + BD71837_REG_BUCK8_CTRL = 0x0C, + BD71837_REG_BUCK1_VOLT_RUN = 0x0D, + BD71837_REG_BUCK1_VOLT_IDLE = 0x0E, + BD71837_REG_BUCK1_VOLT_SUSP = 0x0F, + BD71837_REG_BUCK2_VOLT_RUN = 0x10, + BD71837_REG_BUCK2_VOLT_IDLE = 0x11, + BD71837_REG_BUCK3_VOLT_RUN = 0x12, + BD71837_REG_BUCK4_VOLT_RUN = 0x13, + BD71837_REG_BUCK5_VOLT = 0x14, + BD71837_REG_BUCK6_VOLT = 0x15, + BD71837_REG_BUCK7_VOLT = 0x16, + BD71837_REG_BUCK8_VOLT = 0x17, + BD71837_REG_LDO1_VOLT = 0x18, + BD71837_REG_LDO2_VOLT = 0x19, + BD71837_REG_LDO3_VOLT = 0x1A, + BD71837_REG_LDO4_VOLT = 0x1B, + BD71837_REG_LDO5_VOLT = 0x1C, + BD71837_REG_LDO6_VOLT = 0x1D, + BD71837_REG_LDO7_VOLT = 0x1E, + BD71837_REG_TRANS_COND0 = 0x1F, + BD71837_REG_TRANS_COND1 = 0x20, + BD71837_REG_VRFAULTEN = 0x21, + BD718XX_REG_MVRFLTMASK0 = 0x22, + BD718XX_REG_MVRFLTMASK1 = 0x23, + BD718XX_REG_MVRFLTMASK2 = 0x24, + BD71837_REG_RCVCFG = 0x25, + BD71837_REG_RCVNUM = 0x26, + BD71837_REG_PWRONCONFIG0 = 0x27, + BD71837_REG_PWRONCONFIG1 = 0x28, + BD71837_REG_RESETSRC = 0x29, + BD71837_REG_MIRQ = 0x2A, + BD71837_REG_IRQ = 0x2B, + BD71837_REG_IN_MON = 0x2C, + BD71837_REG_POW_STATE = 0x2D, + BD71837_REG_OUT32K = 0x2E, + BD71837_REG_REGLOCK = 0x2F, + BD71837_REG_OTPVER = 0xFF, + BD71837_MAX_REGISTER = 0x100, +}; + +#define REGLOCK_PWRSEQ 0x1 +#define REGLOCK_VREG 0x10 + +/* Generic BUCK control masks */ +#define BD71837_BUCK_SEL 0x02 +#define BD71837_BUCK_EN 0x01 +#define BD71837_BUCK_RUN_ON 0x04 + +/* Generic LDO masks */ +#define BD71837_LDO_SEL 0x80 +#define BD71837_LDO_EN 0x40 + +/* BD71837 BUCK ramp rate CTRL reg bits */ +#define BUCK_RAMPRATE_MASK 0xC0 +#define BUCK_RAMPRATE_10P00MV 0x0 +#define BUCK_RAMPRATE_5P00MV 0x1 +#define BUCK_RAMPRATE_2P50MV 0x2 +#define BUCK_RAMPRATE_1P25MV 0x3 + +/* BD71837_REG_BUCK1_VOLT_RUN bits */ +#define BUCK1_RUN_MASK 0x3F +#define BUCK1_RUN_DEFAULT 0x14 + +/* BD71837_REG_BUCK1_VOLT_SUSP bits */ +#define BUCK1_SUSP_MASK 0x3F +#define BUCK1_SUSP_DEFAULT 0x14 + +/* BD71837_REG_BUCK1_VOLT_IDLE bits */ +#define BUCK1_IDLE_MASK 0x3F +#define BUCK1_IDLE_DEFAULT 0x14 + +/* BD71837_REG_BUCK2_VOLT_RUN bits */ +#define BUCK2_RUN_MASK 0x3F +#define BUCK2_RUN_DEFAULT 0x1E + +/* BD71837_REG_BUCK2_VOLT_IDLE bits */ +#define BUCK2_IDLE_MASK 0x3F +#define BUCK2_IDLE_DEFAULT 0x14 + +/* BD71837_REG_BUCK3_VOLT_RUN bits */ +#define BUCK3_RUN_MASK 0x3F +#define BUCK3_RUN_DEFAULT 0x1E + +/* BD71837_REG_BUCK4_VOLT_RUN bits */ +#define BUCK4_RUN_MASK 0x3F +#define BUCK4_RUN_DEFAULT 0x1E + +/* BD71837_REG_BUCK5_VOLT bits */ +#define BUCK5_MASK 0x07 +#define BUCK5_DEFAULT 0x02 + +/* BD71837_REG_BUCK6_VOLT bits */ +#define BUCK6_MASK 0x03 +#define BUCK6_DEFAULT 0x03 + +/* BD71837_REG_BUCK7_VOLT bits */ +#define BUCK7_MASK 0x07 +#define BUCK7_DEFAULT 0x03 + +/* BD71837_REG_BUCK8_VOLT bits */ +#define BUCK8_MASK 0x3F +#define BUCK8_DEFAULT 0x1E + +/* BD718XX Voltage monitoring masks */ +#define BD718XX_BUCK1_VRMON80 0x1 +#define BD718XX_BUCK1_VRMON130 0x2 +#define BD718XX_BUCK2_VRMON80 0x4 +#define BD718XX_BUCK2_VRMON130 0x8 +#define BD718XX_1ST_NODVS_BUCK_VRMON80 0x1 +#define BD718XX_1ST_NODVS_BUCK_VRMON130 0x2 +#define BD718XX_2ND_NODVS_BUCK_VRMON80 0x4 +#define BD718XX_2ND_NODVS_BUCK_VRMON130 0x8 +#define BD718XX_3RD_NODVS_BUCK_VRMON80 0x10 +#define BD718XX_3RD_NODVS_BUCK_VRMON130 0x20 +#define BD718XX_4TH_NODVS_BUCK_VRMON80 0x40 +#define BD718XX_4TH_NODVS_BUCK_VRMON130 0x80 +#define BD718XX_LDO1_VRMON80 0x1 +#define BD718XX_LDO2_VRMON80 0x2 +#define BD718XX_LDO3_VRMON80 0x4 +#define BD718XX_LDO4_VRMON80 0x8 +#define BD718XX_LDO5_VRMON80 0x10 +#define BD718XX_LDO6_VRMON80 0x20 + +/* BD71837 specific voltage monitoring masks */ +#define BD71837_BUCK3_VRMON80 0x10 +#define BD71837_BUCK3_VRMON130 0x20 +#define BD71837_BUCK4_VRMON80 0x40 +#define BD71837_BUCK4_VRMON130 0x80 +#define BD71837_LDO7_VRMON80 0x40 + +/* BD71837_REG_IRQ bits */ +#define IRQ_SWRST 0x40 +#define IRQ_PWRON_S 0x20 +#define IRQ_PWRON_L 0x10 +#define IRQ_PWRON 0x08 +#define IRQ_WDOG 0x04 +#define IRQ_ON_REQ 0x02 +#define IRQ_STBY_REQ 0x01 + +/* BD71837_REG_OUT32K bits */ +#define BD71837_OUT32K_EN 0x01 + +/* BD71837 gated clock rate */ +#define BD71837_CLK_RATE 32768 + +/* ROHM BD71837 irqs */ +enum { + BD71837_INT_STBY_REQ, + BD71837_INT_ON_REQ, + BD71837_INT_WDOG, + BD71837_INT_PWRBTN, + BD71837_INT_PWRBTN_L, + BD71837_INT_PWRBTN_S, + BD71837_INT_SWRST +}; + +/* ROHM BD71837 interrupt masks */ +#define BD71837_INT_SWRST_MASK 0x40 +#define BD71837_INT_PWRBTN_S_MASK 0x20 +#define BD71837_INT_PWRBTN_L_MASK 0x10 +#define BD71837_INT_PWRBTN_MASK 0x8 +#define BD71837_INT_WDOG_MASK 0x4 +#define BD71837_INT_ON_REQ_MASK 0x2 +#define BD71837_INT_STBY_REQ_MASK 0x1 + +/* BD71837_REG_LDO1_VOLT bits */ +#define LDO1_MASK 0x03 + +/* BD71837_REG_LDO1_VOLT bits */ +#define LDO2_MASK 0x20 + +/* BD71837_REG_LDO3_VOLT bits */ +#define LDO3_MASK 0x0F + +/* BD71837_REG_LDO4_VOLT bits */ +#define LDO4_MASK 0x0F + +/* BD71837_REG_LDO5_VOLT bits */ +#define LDO5_MASK 0x0F + +/* BD71837_REG_LDO6_VOLT bits */ +#define LDO6_MASK 0x0F + +/* BD71837_REG_LDO7_VOLT bits */ +#define LDO7_MASK 0x0F + +/* Register write induced reset settings */ + +/* + * Even though the bit zero is not SWRESET type we still want to write zero + * to it when changing type. Bit zero is 'SWRESET' trigger bit and if we + * write 1 to it we will trigger the action. So always write 0 to it when + * changning SWRESET action - no matter what we read from it. + */ +#define BD71837_SWRESET_TYPE_MASK 7 +#define BD71837_SWRESET_TYPE_DISABLED 0 +#define BD71837_SWRESET_TYPE_COLD 4 +#define BD71837_SWRESET_TYPE_WARM 6 + +#define BD71837_SWRESET_RESET_MASK 1 +#define BD71837_SWRESET_RESET 1 + +/* Poweroff state transition conditions */ + +#define BD718XX_ON_REQ_POWEROFF_MASK 1 +#define BD718XX_SWRESET_POWEROFF_MASK 2 +#define BD718XX_WDOG_POWEROFF_MASK 4 +#define BD718XX_KEY_L_POWEROFF_MASK 8 + +#define BD718XX_POWOFF_TO_SNVS 0 +#define BD718XX_POWOFF_TO_RDY 0xF + +#define BD718XX_POWOFF_TIME_MASK 0xF0 +enum { + BD718XX_POWOFF_TIME_5MS = 0, + BD718XX_POWOFF_TIME_10MS, + BD718XX_POWOFF_TIME_15MS, + BD718XX_POWOFF_TIME_20MS, + BD718XX_POWOFF_TIME_25MS, + BD718XX_POWOFF_TIME_30MS, + BD718XX_POWOFF_TIME_35MS, + BD718XX_POWOFF_TIME_40MS, + BD718XX_POWOFF_TIME_45MS, + BD718XX_POWOFF_TIME_50MS, + BD718XX_POWOFF_TIME_75MS, + BD718XX_POWOFF_TIME_100MS, + BD718XX_POWOFF_TIME_250MS, + BD718XX_POWOFF_TIME_500MS, + BD718XX_POWOFF_TIME_750MS, + BD718XX_POWOFF_TIME_1500MS +}; + +/* Poweron sequence state transition conditions */ +#define BD718XX_RDY_TO_SNVS_MASK 0xF +#define BD718XX_SNVS_TO_RUN_MASK 0xF0 + +#define BD718XX_PWR_TRIG_KEY_L 1 +#define BD718XX_PWR_TRIG_KEY_S 2 +#define BD718XX_PWR_TRIG_PMIC_ON 4 +#define BD718XX_PWR_TRIG_VSYS_UVLO 8 +#define BD718XX_RDY_TO_SNVS_SIFT 0 +#define BD718XX_SNVS_TO_RUN_SIFT 4 + +#define BD718XX_PWRBTN_PRESS_DURATION_MASK 0xF + +/* Timeout value for detecting short press */ +enum { + BD718XX_PWRBTN_SHORT_PRESS_10MS = 0, + BD718XX_PWRBTN_SHORT_PRESS_500MS, + BD718XX_PWRBTN_SHORT_PRESS_1000MS, + BD718XX_PWRBTN_SHORT_PRESS_1500MS, + BD718XX_PWRBTN_SHORT_PRESS_2000MS, + BD718XX_PWRBTN_SHORT_PRESS_2500MS, + BD718XX_PWRBTN_SHORT_PRESS_3000MS, + BD718XX_PWRBTN_SHORT_PRESS_3500MS, + BD718XX_PWRBTN_SHORT_PRESS_4000MS, + BD718XX_PWRBTN_SHORT_PRESS_4500MS, + BD718XX_PWRBTN_SHORT_PRESS_5000MS, + BD718XX_PWRBTN_SHORT_PRESS_5500MS, + BD718XX_PWRBTN_SHORT_PRESS_6000MS, + BD718XX_PWRBTN_SHORT_PRESS_6500MS, + BD718XX_PWRBTN_SHORT_PRESS_7000MS, + BD718XX_PWRBTN_SHORT_PRESS_7500MS +}; + +/* Timeout value for detecting LONG press */ +enum { + BD718XX_PWRBTN_LONG_PRESS_10MS = 0, + BD718XX_PWRBTN_LONG_PRESS_1S, + BD718XX_PWRBTN_LONG_PRESS_2S, + BD718XX_PWRBTN_LONG_PRESS_3S, + BD718XX_PWRBTN_LONG_PRESS_4S, + BD718XX_PWRBTN_LONG_PRESS_5S, + BD718XX_PWRBTN_LONG_PRESS_6S, + BD718XX_PWRBTN_LONG_PRESS_7S, + BD718XX_PWRBTN_LONG_PRESS_8S, + BD718XX_PWRBTN_LONG_PRESS_9S, + BD718XX_PWRBTN_LONG_PRESS_10S, + BD718XX_PWRBTN_LONG_PRESS_11S, + BD718XX_PWRBTN_LONG_PRESS_12S, + BD718XX_PWRBTN_LONG_PRESS_13S, + BD718XX_PWRBTN_LONG_PRESS_14S, + BD718XX_PWRBTN_LONG_PRESS_15S +}; + +struct bd71837_pmic; +struct bd71837_clk; + +struct bd71837 { + struct device *dev; + struct regmap *regmap; + unsigned long int id; + + int chip_irq; + struct regmap_irq_chip_data *irq_data; + + struct bd71837_pmic *pmic; + struct bd71837_clk *clk; +}; + +#endif /* __LINUX_MFD_BD71837_H__ */ diff --git a/include/linux/mfd/rt5033-private.h b/include/linux/mfd/rt5033-private.h new file mode 100644 index 000000000..52d53d134 --- /dev/null +++ b/include/linux/mfd/rt5033-private.h @@ -0,0 +1,260 @@ +/* + * MFD core driver for Richtek RT5033 + * + * Copyright (C) 2014 Samsung Electronics, Co., Ltd. + * Author: Beomho Seo + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published bythe Free Software Foundation. + */ + +#ifndef __RT5033_PRIVATE_H__ +#define __RT5033_PRIVATE_H__ + +enum rt5033_reg { + RT5033_REG_CHG_STAT = 0x00, + RT5033_REG_CHG_CTRL1 = 0x01, + RT5033_REG_CHG_CTRL2 = 0x02, + RT5033_REG_DEVICE_ID = 0x03, + RT5033_REG_CHG_CTRL3 = 0x04, + RT5033_REG_CHG_CTRL4 = 0x05, + RT5033_REG_CHG_CTRL5 = 0x06, + RT5033_REG_RT_CTRL0 = 0x07, + RT5033_REG_CHG_RESET = 0x08, + /* Reserved 0x09~0x18 */ + RT5033_REG_RT_CTRL1 = 0x19, + /* Reserved 0x1A~0x20 */ + RT5033_REG_FLED_FUNCTION1 = 0x21, + RT5033_REG_FLED_FUNCTION2 = 0x22, + RT5033_REG_FLED_STROBE_CTRL1 = 0x23, + RT5033_REG_FLED_STROBE_CTRL2 = 0x24, + RT5033_REG_FLED_CTRL1 = 0x25, + RT5033_REG_FLED_CTRL2 = 0x26, + RT5033_REG_FLED_CTRL3 = 0x27, + RT5033_REG_FLED_CTRL4 = 0x28, + RT5033_REG_FLED_CTRL5 = 0x29, + /* Reserved 0x2A~0x40 */ + RT5033_REG_CTRL = 0x41, + RT5033_REG_BUCK_CTRL = 0x42, + RT5033_REG_LDO_CTRL = 0x43, + /* Reserved 0x44~0x46 */ + RT5033_REG_MANUAL_RESET_CTRL = 0x47, + /* Reserved 0x48~0x5F */ + RT5033_REG_CHG_IRQ1 = 0x60, + RT5033_REG_CHG_IRQ2 = 0x61, + RT5033_REG_CHG_IRQ3 = 0x62, + RT5033_REG_CHG_IRQ1_CTRL = 0x63, + RT5033_REG_CHG_IRQ2_CTRL = 0x64, + RT5033_REG_CHG_IRQ3_CTRL = 0x65, + RT5033_REG_LED_IRQ_STAT = 0x66, + RT5033_REG_LED_IRQ_CTRL = 0x67, + RT5033_REG_PMIC_IRQ_STAT = 0x68, + RT5033_REG_PMIC_IRQ_CTRL = 0x69, + RT5033_REG_SHDN_CTRL = 0x6A, + RT5033_REG_OFF_EVENT = 0x6B, + + RT5033_REG_END, +}; + +/* RT5033 Charger state register */ +#define RT5033_CHG_STAT_MASK 0x20 +#define RT5033_CHG_STAT_DISCHARGING 0x00 +#define RT5033_CHG_STAT_FULL 0x10 +#define RT5033_CHG_STAT_CHARGING 0x20 +#define RT5033_CHG_STAT_NOT_CHARGING 0x30 +#define RT5033_CHG_STAT_TYPE_MASK 0x60 +#define RT5033_CHG_STAT_TYPE_PRE 0x20 +#define RT5033_CHG_STAT_TYPE_FAST 0x60 + +/* RT5033 CHGCTRL1 register */ +#define RT5033_CHGCTRL1_IAICR_MASK 0xe0 +#define RT5033_CHGCTRL1_MODE_MASK 0x01 + +/* RT5033 CHGCTRL2 register */ +#define RT5033_CHGCTRL2_CV_MASK 0xfc + +/* RT5033 CHGCTRL3 register */ +#define RT5033_CHGCTRL3_CFO_EN_MASK 0x40 +#define RT5033_CHGCTRL3_TIMER_MASK 0x38 +#define RT5033_CHGCTRL3_TIMER_EN_MASK 0x01 + +/* RT5033 CHGCTRL4 register */ +#define RT5033_CHGCTRL4_EOC_MASK 0x07 +#define RT5033_CHGCTRL4_IPREC_MASK 0x18 + +/* RT5033 CHGCTRL5 register */ +#define RT5033_CHGCTRL5_VPREC_MASK 0x0f +#define RT5033_CHGCTRL5_ICHG_MASK 0xf0 +#define RT5033_CHGCTRL5_ICHG_SHIFT 0x04 +#define RT5033_CHG_MAX_CURRENT 0x0d + +/* RT5033 RT CTRL1 register */ +#define RT5033_RT_CTRL1_UUG_MASK 0x02 +#define RT5033_RT_HZ_MASK 0x01 + +/* RT5033 control register */ +#define RT5033_CTRL_FCCM_BUCK_MASK 0x00 +#define RT5033_CTRL_BUCKOMS_MASK 0x01 +#define RT5033_CTRL_LDOOMS_MASK 0x02 +#define RT5033_CTRL_SLDOOMS_MASK 0x03 +#define RT5033_CTRL_EN_BUCK_MASK 0x04 +#define RT5033_CTRL_EN_LDO_MASK 0x05 +#define RT5033_CTRL_EN_SAFE_LDO_MASK 0x06 +#define RT5033_CTRL_LDO_SLEEP_MASK 0x07 + +/* RT5033 BUCK control register */ +#define RT5033_BUCK_CTRL_MASK 0x1f + +/* RT5033 LDO control register */ +#define RT5033_LDO_CTRL_MASK 0x1f + +/* RT5033 charger property - model, manufacturer */ + +#define RT5033_CHARGER_MODEL "RT5033WSC Charger" +#define RT5033_MANUFACTURER "Richtek Technology Corporation" + +/* + * RT5033 charger fast-charge current lmits (as in CHGCTRL1 register), + * AICR mode limits the input current for example, + * the AIRC 100 mode limits the input current to 100 mA. + */ +#define RT5033_AICR_100_MODE 0x20 +#define RT5033_AICR_500_MODE 0x40 +#define RT5033_AICR_700_MODE 0x60 +#define RT5033_AICR_900_MODE 0x80 +#define RT5033_AICR_1500_MODE 0xc0 +#define RT5033_AICR_2000_MODE 0xe0 +#define RT5033_AICR_MODE_MASK 0xe0 + +/* RT5033 use internal timer need to set time */ +#define RT5033_FAST_CHARGE_TIMER4 0x00 +#define RT5033_FAST_CHARGE_TIMER6 0x01 +#define RT5033_FAST_CHARGE_TIMER8 0x02 +#define RT5033_FAST_CHARGE_TIMER9 0x03 +#define RT5033_FAST_CHARGE_TIMER12 0x04 +#define RT5033_FAST_CHARGE_TIMER14 0x05 +#define RT5033_FAST_CHARGE_TIMER16 0x06 + +#define RT5033_INT_TIMER_ENABLE 0x01 + +/* RT5033 charger termination enable mask */ +#define RT5033_TE_ENABLE_MASK 0x08 + +/* + * RT5033 charger opa mode. RT50300 have two opa mode charger mode + * and boost mode for OTG + */ + +#define RT5033_CHARGER_MODE 0x00 +#define RT5033_BOOST_MODE 0x01 + +/* RT5033 charger termination enable */ +#define RT5033_TE_ENABLE 0x08 + +/* RT5033 charger CFO enable */ +#define RT5033_CFO_ENABLE 0x40 + +/* RT5033 charger constant charge voltage (as in CHGCTRL2 register), uV */ +#define RT5033_CHARGER_CONST_VOLTAGE_LIMIT_MIN 3650000U +#define RT5033_CHARGER_CONST_VOLTAGE_STEP_NUM 25000U +#define RT5033_CHARGER_CONST_VOLTAGE_LIMIT_MAX 4400000U + +/* RT5033 charger pre-charge current limits (as in CHGCTRL4 register), uA */ +#define RT5033_CHARGER_PRE_CURRENT_LIMIT_MIN 350000U +#define RT5033_CHARGER_PRE_CURRENT_STEP_NUM 100000U +#define RT5033_CHARGER_PRE_CURRENT_LIMIT_MAX 650000U + +/* RT5033 charger fast-charge current (as in CHGCTRL5 register), uA */ +#define RT5033_CHARGER_FAST_CURRENT_MIN 700000U +#define RT5033_CHARGER_FAST_CURRENT_STEP_NUM 100000U +#define RT5033_CHARGER_FAST_CURRENT_MAX 2000000U + +/* + * RT5033 charger const-charge end of charger current ( + * as in CHGCTRL4 register), uA + */ +#define RT5033_CHARGER_EOC_MIN 150000U +#define RT5033_CHARGER_EOC_REF 300000U +#define RT5033_CHARGER_EOC_STEP_NUM1 50000U +#define RT5033_CHARGER_EOC_STEP_NUM2 100000U +#define RT5033_CHARGER_EOC_MAX 600000U + +/* + * RT5033 charger pre-charge threshold volt limits + * (as in CHGCTRL5 register), uV + */ + +#define RT5033_CHARGER_PRE_THRESHOLD_LIMIT_MIN 2300000U +#define RT5033_CHARGER_PRE_THRESHOLD_STEP_NUM 100000U +#define RT5033_CHARGER_PRE_THRESHOLD_LIMIT_MAX 3800000U + +/* + * RT5033 charger enable UUG, If UUG enable MOS auto control by H/W charger + * circuit. + */ +#define RT5033_CHARGER_UUG_ENABLE 0x02 + +/* RT5033 charger High impedance mode */ +#define RT5033_CHARGER_HZ_DISABLE 0x00 +#define RT5033_CHARGER_HZ_ENABLE 0x01 + +/* RT5033 regulator BUCK output voltage uV */ +#define RT5033_REGULATOR_BUCK_VOLTAGE_MIN 1000000U +#define RT5033_REGULATOR_BUCK_VOLTAGE_MAX 3000000U +#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP 100000U +#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM 21 + +/* RT5033 regulator LDO output voltage uV */ +#define RT5033_REGULATOR_LDO_VOLTAGE_MIN 1200000U +#define RT5033_REGULATOR_LDO_VOLTAGE_MAX 3000000U +#define RT5033_REGULATOR_LDO_VOLTAGE_STEP 100000U +#define RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM 19 + +/* RT5033 regulator SAFE LDO output voltage uV */ +#define RT5033_REGULATOR_SAFE_LDO_VOLTAGE 4900000U + +enum rt5033_fuel_reg { + RT5033_FUEL_REG_OCV_H = 0x00, + RT5033_FUEL_REG_OCV_L = 0x01, + RT5033_FUEL_REG_VBAT_H = 0x02, + RT5033_FUEL_REG_VBAT_L = 0x03, + RT5033_FUEL_REG_SOC_H = 0x04, + RT5033_FUEL_REG_SOC_L = 0x05, + RT5033_FUEL_REG_CTRL_H = 0x06, + RT5033_FUEL_REG_CTRL_L = 0x07, + RT5033_FUEL_REG_CRATE = 0x08, + RT5033_FUEL_REG_DEVICE_ID = 0x09, + RT5033_FUEL_REG_AVG_VOLT_H = 0x0A, + RT5033_FUEL_REG_AVG_VOLT_L = 0x0B, + RT5033_FUEL_REG_CONFIG_H = 0x0C, + RT5033_FUEL_REG_CONFIG_L = 0x0D, + /* Reserved 0x0E~0x0F */ + RT5033_FUEL_REG_IRQ_CTRL = 0x10, + RT5033_FUEL_REG_IRQ_FLAG = 0x11, + RT5033_FUEL_VMIN = 0x12, + RT5033_FUEL_SMIN = 0x13, + /* Reserved 0x14~0x1F */ + RT5033_FUEL_VGCOMP1 = 0x20, + RT5033_FUEL_VGCOMP2 = 0x21, + RT5033_FUEL_VGCOMP3 = 0x22, + RT5033_FUEL_VGCOMP4 = 0x23, + /* Reserved 0x24~0xFD */ + RT5033_FUEL_MFA_H = 0xFE, + RT5033_FUEL_MFA_L = 0xFF, + + RT5033_FUEL_REG_END, +}; + +/* RT5033 fuel gauge battery present property */ +#define RT5033_FUEL_BAT_PRESENT 0x02 + +/* RT5033 PMIC interrupts */ +#define RT5033_PMIC_IRQ_BUCKOCP 2 +#define RT5033_PMIC_IRQ_BUCKLV 3 +#define RT5033_PMIC_IRQ_SAFELDOLV 4 +#define RT5033_PMIC_IRQ_LDOLV 5 +#define RT5033_PMIC_IRQ_OT 6 +#define RT5033_PMIC_IRQ_VDDA_UV 7 + +#endif /* __RT5033_PRIVATE_H__ */ diff --git a/include/linux/mfd/rt5033.h b/include/linux/mfd/rt5033.h new file mode 100644 index 000000000..6cff5cf45 --- /dev/null +++ b/include/linux/mfd/rt5033.h @@ -0,0 +1,62 @@ +/* + * MFD core driver for the RT5033 + * + * Copyright (C) 2014 Samsung Electronics + * Author: Beomho Seo + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published bythe Free Software Foundation. + */ + +#ifndef __RT5033_H__ +#define __RT5033_H__ + +#include +#include +#include +#include + +/* RT5033 regulator IDs */ +enum rt5033_regulators { + RT5033_BUCK = 0, + RT5033_LDO, + RT5033_SAFE_LDO, + + RT5033_REGULATOR_NUM, +}; + +struct rt5033_dev { + struct device *dev; + + struct regmap *regmap; + struct regmap_irq_chip_data *irq_data; + int irq; + bool wakeup; +}; + +struct rt5033_battery { + struct i2c_client *client; + struct rt5033_dev *rt5033; + struct regmap *regmap; + struct power_supply *psy; +}; + +/* RT5033 charger platform data */ +struct rt5033_charger_data { + unsigned int pre_uamp; + unsigned int pre_uvolt; + unsigned int const_uvolt; + unsigned int eoc_uamp; + unsigned int fast_uamp; +}; + +struct rt5033_charger { + struct device *dev; + struct rt5033_dev *rt5033; + struct power_supply psy; + + struct rt5033_charger_data *chg; +}; + +#endif /* __RT5033_H__ */ diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h new file mode 100644 index 000000000..28f4ae762 --- /dev/null +++ b/include/linux/mfd/samsung/core.h @@ -0,0 +1,186 @@ +/* + * core.h + * + * copyright (c) 2011 Samsung Electronics Co., Ltd + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MFD_SEC_CORE_H +#define __LINUX_MFD_SEC_CORE_H + +/* Macros to represent minimum voltages for LDO/BUCK */ +#define MIN_3000_MV 3000000 +#define MIN_2500_MV 2500000 +#define MIN_2000_MV 2000000 +#define MIN_1800_MV 1800000 +#define MIN_1500_MV 1500000 +#define MIN_1400_MV 1400000 +#define MIN_1000_MV 1000000 + +#define MIN_900_MV 900000 +#define MIN_850_MV 850000 +#define MIN_800_MV 800000 +#define MIN_750_MV 750000 +#define MIN_600_MV 600000 +#define MIN_500_MV 500000 + +/* Ramp delay in uV/us */ +#define RAMP_DELAY_12_MVUS 12000 + +/* Macros to represent steps for LDO/BUCK */ +#define STEP_50_MV 50000 +#define STEP_25_MV 25000 +#define STEP_12_5_MV 12500 +#define STEP_6_25_MV 6250 + +struct gpio_desc; + +enum sec_device_type { + S5M8751X, + S5M8763X, + S5M8767X, + S2MPA01, + S2MPS11X, + S2MPS13X, + S2MPS14X, + S2MPS15X, + S2MPU02, +}; + +/** + * struct sec_pmic_dev - s2m/s5m master device for sub-drivers + * @dev: Master device of the chip + * @pdata: Platform data populated with data from DTS + * or board files + * @regmap_pmic: Regmap associated with PMIC's I2C address + * @i2c: I2C client of the main driver + * @device_type: Type of device, matches enum sec_device_type + * @irq_base: Base IRQ number for device, required for IRQs + * @irq: Generic IRQ number for device + * @irq_data: Runtime data structure for IRQ controller + * @wakeup: Whether or not this is a wakeup device + */ +struct sec_pmic_dev { + struct device *dev; + struct sec_platform_data *pdata; + struct regmap *regmap_pmic; + struct i2c_client *i2c; + + unsigned long device_type; + int irq_base; + int irq; + struct regmap_irq_chip_data *irq_data; + + bool wakeup; +}; + +int sec_irq_init(struct sec_pmic_dev *sec_pmic); +void sec_irq_exit(struct sec_pmic_dev *sec_pmic); +int sec_irq_resume(struct sec_pmic_dev *sec_pmic); + +struct sec_platform_data { + struct sec_regulator_data *regulators; + struct sec_opmode_data *opmode; + int device_type; + int num_regulators; + + int irq_base; + int (*cfg_pmic_irq)(void); + + bool wakeup; + bool buck_voltage_lock; + + int buck_gpios[3]; + int buck_ds[3]; + unsigned int buck2_voltage[8]; + bool buck2_gpiodvs; + unsigned int buck3_voltage[8]; + bool buck3_gpiodvs; + unsigned int buck4_voltage[8]; + bool buck4_gpiodvs; + + int buck_set1; + int buck_set2; + int buck_set3; + int buck2_enable; + int buck3_enable; + int buck4_enable; + int buck_default_idx; + int buck2_default_idx; + int buck3_default_idx; + int buck4_default_idx; + + int buck_ramp_delay; + + int buck2_ramp_delay; + int buck34_ramp_delay; + int buck5_ramp_delay; + int buck16_ramp_delay; + int buck7810_ramp_delay; + int buck9_ramp_delay; + int buck24_ramp_delay; + int buck3_ramp_delay; + int buck7_ramp_delay; + int buck8910_ramp_delay; + + bool buck1_ramp_enable; + bool buck2_ramp_enable; + bool buck3_ramp_enable; + bool buck4_ramp_enable; + bool buck6_ramp_enable; + + int buck2_init; + int buck3_init; + int buck4_init; + /* Whether or not manually set PWRHOLD to low during shutdown. */ + bool manual_poweroff; + /* Disable the WRSTBI (buck voltage warm reset) when probing? */ + bool disable_wrstbi; +}; + +/** + * sec_regulator_data - regulator data + * @id: regulator id + * @initdata: regulator init data (contraints, supplies, ...) + */ +struct sec_regulator_data { + int id; + struct regulator_init_data *initdata; + struct device_node *reg_node; + struct gpio_desc *ext_control_gpiod; +}; + +/* + * sec_opmode_data - regulator operation mode data + * @id: regulator id + * @mode: regulator operation mode + */ +struct sec_opmode_data { + int id; + unsigned int mode; +}; + +/* + * samsung regulator operation mode + * SEC_OPMODE_OFF Regulator always OFF + * SEC_OPMODE_ON Regulator always ON + * SEC_OPMODE_LOWPOWER Regulator is on in low-power mode + * SEC_OPMODE_SUSPEND Regulator is changed by PWREN pin + * If PWREN is high, regulator is on + * If PWREN is low, regulator is off + */ + +enum sec_opmode { + SEC_OPMODE_OFF, + SEC_OPMODE_ON, + SEC_OPMODE_LOWPOWER, + SEC_OPMODE_SUSPEND, +}; + +#endif /* __LINUX_MFD_SEC_CORE_H */ diff --git a/include/linux/mfd/samsung/irq.h b/include/linux/mfd/samsung/irq.h new file mode 100644 index 000000000..667aa4048 --- /dev/null +++ b/include/linux/mfd/samsung/irq.h @@ -0,0 +1,253 @@ +/* irq.h + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MFD_SEC_IRQ_H +#define __LINUX_MFD_SEC_IRQ_H + +enum s2mpa01_irq { + S2MPA01_IRQ_PWRONF, + S2MPA01_IRQ_PWRONR, + S2MPA01_IRQ_JIGONBF, + S2MPA01_IRQ_JIGONBR, + S2MPA01_IRQ_ACOKBF, + S2MPA01_IRQ_ACOKBR, + S2MPA01_IRQ_PWRON1S, + S2MPA01_IRQ_MRB, + + S2MPA01_IRQ_RTC60S, + S2MPA01_IRQ_RTCA1, + S2MPA01_IRQ_RTCA0, + S2MPA01_IRQ_SMPL, + S2MPA01_IRQ_RTC1S, + S2MPA01_IRQ_WTSR, + + S2MPA01_IRQ_INT120C, + S2MPA01_IRQ_INT140C, + S2MPA01_IRQ_LDO3_TSD, + S2MPA01_IRQ_B16_TSD, + S2MPA01_IRQ_B24_TSD, + S2MPA01_IRQ_B35_TSD, + + S2MPA01_IRQ_NR, +}; + +#define S2MPA01_IRQ_PWRONF_MASK (1 << 0) +#define S2MPA01_IRQ_PWRONR_MASK (1 << 1) +#define S2MPA01_IRQ_JIGONBF_MASK (1 << 2) +#define S2MPA01_IRQ_JIGONBR_MASK (1 << 3) +#define S2MPA01_IRQ_ACOKBF_MASK (1 << 4) +#define S2MPA01_IRQ_ACOKBR_MASK (1 << 5) +#define S2MPA01_IRQ_PWRON1S_MASK (1 << 6) +#define S2MPA01_IRQ_MRB_MASK (1 << 7) + +#define S2MPA01_IRQ_RTC60S_MASK (1 << 0) +#define S2MPA01_IRQ_RTCA1_MASK (1 << 1) +#define S2MPA01_IRQ_RTCA0_MASK (1 << 2) +#define S2MPA01_IRQ_SMPL_MASK (1 << 3) +#define S2MPA01_IRQ_RTC1S_MASK (1 << 4) +#define S2MPA01_IRQ_WTSR_MASK (1 << 5) + +#define S2MPA01_IRQ_INT120C_MASK (1 << 0) +#define S2MPA01_IRQ_INT140C_MASK (1 << 1) +#define S2MPA01_IRQ_LDO3_TSD_MASK (1 << 2) +#define S2MPA01_IRQ_B16_TSD_MASK (1 << 3) +#define S2MPA01_IRQ_B24_TSD_MASK (1 << 4) +#define S2MPA01_IRQ_B35_TSD_MASK (1 << 5) + +enum s2mps11_irq { + S2MPS11_IRQ_PWRONF, + S2MPS11_IRQ_PWRONR, + S2MPS11_IRQ_JIGONBF, + S2MPS11_IRQ_JIGONBR, + S2MPS11_IRQ_ACOKBF, + S2MPS11_IRQ_ACOKBR, + S2MPS11_IRQ_PWRON1S, + S2MPS11_IRQ_MRB, + + S2MPS11_IRQ_RTC60S, + S2MPS11_IRQ_RTCA1, + S2MPS11_IRQ_RTCA0, + S2MPS11_IRQ_SMPL, + S2MPS11_IRQ_RTC1S, + S2MPS11_IRQ_WTSR, + + S2MPS11_IRQ_INT120C, + S2MPS11_IRQ_INT140C, + + S2MPS11_IRQ_NR, +}; + +#define S2MPS11_IRQ_PWRONF_MASK (1 << 0) +#define S2MPS11_IRQ_PWRONR_MASK (1 << 1) +#define S2MPS11_IRQ_JIGONBF_MASK (1 << 2) +#define S2MPS11_IRQ_JIGONBR_MASK (1 << 3) +#define S2MPS11_IRQ_ACOKBF_MASK (1 << 4) +#define S2MPS11_IRQ_ACOKBR_MASK (1 << 5) +#define S2MPS11_IRQ_PWRON1S_MASK (1 << 6) +#define S2MPS11_IRQ_MRB_MASK (1 << 7) + +#define S2MPS11_IRQ_RTC60S_MASK (1 << 0) +#define S2MPS11_IRQ_RTCA1_MASK (1 << 1) +#define S2MPS11_IRQ_RTCA0_MASK (1 << 2) +#define S2MPS11_IRQ_SMPL_MASK (1 << 3) +#define S2MPS11_IRQ_RTC1S_MASK (1 << 4) +#define S2MPS11_IRQ_WTSR_MASK (1 << 5) + +#define S2MPS11_IRQ_INT120C_MASK (1 << 0) +#define S2MPS11_IRQ_INT140C_MASK (1 << 1) + +enum s2mps14_irq { + S2MPS14_IRQ_PWRONF, + S2MPS14_IRQ_PWRONR, + S2MPS14_IRQ_JIGONBF, + S2MPS14_IRQ_JIGONBR, + S2MPS14_IRQ_ACOKBF, + S2MPS14_IRQ_ACOKBR, + S2MPS14_IRQ_PWRON1S, + S2MPS14_IRQ_MRB, + + S2MPS14_IRQ_RTC60S, + S2MPS14_IRQ_RTCA1, + S2MPS14_IRQ_RTCA0, + S2MPS14_IRQ_SMPL, + S2MPS14_IRQ_RTC1S, + S2MPS14_IRQ_WTSR, + + S2MPS14_IRQ_INT120C, + S2MPS14_IRQ_INT140C, + S2MPS14_IRQ_TSD, + + S2MPS14_IRQ_NR, +}; + +enum s2mpu02_irq { + S2MPU02_IRQ_PWRONF, + S2MPU02_IRQ_PWRONR, + S2MPU02_IRQ_JIGONBF, + S2MPU02_IRQ_JIGONBR, + S2MPU02_IRQ_ACOKBF, + S2MPU02_IRQ_ACOKBR, + S2MPU02_IRQ_PWRON1S, + S2MPU02_IRQ_MRB, + + S2MPU02_IRQ_RTC60S, + S2MPU02_IRQ_RTCA1, + S2MPU02_IRQ_RTCA0, + S2MPU02_IRQ_SMPL, + S2MPU02_IRQ_RTC1S, + S2MPU02_IRQ_WTSR, + + S2MPU02_IRQ_INT120C, + S2MPU02_IRQ_INT140C, + S2MPU02_IRQ_TSD, + + S2MPU02_IRQ_NR, +}; + +/* Masks for interrupts are the same as in s2mps11 */ +#define S2MPS14_IRQ_TSD_MASK (1 << 2) + +enum s5m8767_irq { + S5M8767_IRQ_PWRR, + S5M8767_IRQ_PWRF, + S5M8767_IRQ_PWR1S, + S5M8767_IRQ_JIGR, + S5M8767_IRQ_JIGF, + S5M8767_IRQ_LOWBAT2, + S5M8767_IRQ_LOWBAT1, + + S5M8767_IRQ_MRB, + S5M8767_IRQ_DVSOK2, + S5M8767_IRQ_DVSOK3, + S5M8767_IRQ_DVSOK4, + + S5M8767_IRQ_RTC60S, + S5M8767_IRQ_RTCA1, + S5M8767_IRQ_RTCA2, + S5M8767_IRQ_SMPL, + S5M8767_IRQ_RTC1S, + S5M8767_IRQ_WTSR, + + S5M8767_IRQ_NR, +}; + +#define S5M8767_IRQ_PWRR_MASK (1 << 0) +#define S5M8767_IRQ_PWRF_MASK (1 << 1) +#define S5M8767_IRQ_PWR1S_MASK (1 << 3) +#define S5M8767_IRQ_JIGR_MASK (1 << 4) +#define S5M8767_IRQ_JIGF_MASK (1 << 5) +#define S5M8767_IRQ_LOWBAT2_MASK (1 << 6) +#define S5M8767_IRQ_LOWBAT1_MASK (1 << 7) + +#define S5M8767_IRQ_MRB_MASK (1 << 2) +#define S5M8767_IRQ_DVSOK2_MASK (1 << 3) +#define S5M8767_IRQ_DVSOK3_MASK (1 << 4) +#define S5M8767_IRQ_DVSOK4_MASK (1 << 5) + +#define S5M8767_IRQ_RTC60S_MASK (1 << 0) +#define S5M8767_IRQ_RTCA1_MASK (1 << 1) +#define S5M8767_IRQ_RTCA2_MASK (1 << 2) +#define S5M8767_IRQ_SMPL_MASK (1 << 3) +#define S5M8767_IRQ_RTC1S_MASK (1 << 4) +#define S5M8767_IRQ_WTSR_MASK (1 << 5) + +enum s5m8763_irq { + S5M8763_IRQ_DCINF, + S5M8763_IRQ_DCINR, + S5M8763_IRQ_JIGF, + S5M8763_IRQ_JIGR, + S5M8763_IRQ_PWRONF, + S5M8763_IRQ_PWRONR, + + S5M8763_IRQ_WTSREVNT, + S5M8763_IRQ_SMPLEVNT, + S5M8763_IRQ_ALARM1, + S5M8763_IRQ_ALARM0, + + S5M8763_IRQ_ONKEY1S, + S5M8763_IRQ_TOPOFFR, + S5M8763_IRQ_DCINOVPR, + S5M8763_IRQ_CHGRSTF, + S5M8763_IRQ_DONER, + S5M8763_IRQ_CHGFAULT, + + S5M8763_IRQ_LOBAT1, + S5M8763_IRQ_LOBAT2, + + S5M8763_IRQ_NR, +}; + +#define S5M8763_IRQ_DCINF_MASK (1 << 2) +#define S5M8763_IRQ_DCINR_MASK (1 << 3) +#define S5M8763_IRQ_JIGF_MASK (1 << 4) +#define S5M8763_IRQ_JIGR_MASK (1 << 5) +#define S5M8763_IRQ_PWRONF_MASK (1 << 6) +#define S5M8763_IRQ_PWRONR_MASK (1 << 7) + +#define S5M8763_IRQ_WTSREVNT_MASK (1 << 0) +#define S5M8763_IRQ_SMPLEVNT_MASK (1 << 1) +#define S5M8763_IRQ_ALARM1_MASK (1 << 2) +#define S5M8763_IRQ_ALARM0_MASK (1 << 3) + +#define S5M8763_IRQ_ONKEY1S_MASK (1 << 0) +#define S5M8763_IRQ_TOPOFFR_MASK (1 << 2) +#define S5M8763_IRQ_DCINOVPR_MASK (1 << 3) +#define S5M8763_IRQ_CHGRSTF_MASK (1 << 4) +#define S5M8763_IRQ_DONER_MASK (1 << 5) +#define S5M8763_IRQ_CHGFAULT_MASK (1 << 7) + +#define S5M8763_IRQ_LOBAT1_MASK (1 << 0) +#define S5M8763_IRQ_LOBAT2_MASK (1 << 1) + +#define S5M8763_ENRAMP (1 << 4) + +#endif /* __LINUX_MFD_SEC_IRQ_H */ diff --git a/include/linux/mfd/samsung/rtc.h b/include/linux/mfd/samsung/rtc.h new file mode 100644 index 000000000..9ed2871ea --- /dev/null +++ b/include/linux/mfd/samsung/rtc.h @@ -0,0 +1,144 @@ +/* rtc.h + * + * Copyright (c) 2011-2014 Samsung Electronics Co., Ltd + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __LINUX_MFD_SEC_RTC_H +#define __LINUX_MFD_SEC_RTC_H + +enum s5m_rtc_reg { + S5M_RTC_SEC, + S5M_RTC_MIN, + S5M_RTC_HOUR, + S5M_RTC_WEEKDAY, + S5M_RTC_DATE, + S5M_RTC_MONTH, + S5M_RTC_YEAR1, + S5M_RTC_YEAR2, + S5M_ALARM0_SEC, + S5M_ALARM0_MIN, + S5M_ALARM0_HOUR, + S5M_ALARM0_WEEKDAY, + S5M_ALARM0_DATE, + S5M_ALARM0_MONTH, + S5M_ALARM0_YEAR1, + S5M_ALARM0_YEAR2, + S5M_ALARM1_SEC, + S5M_ALARM1_MIN, + S5M_ALARM1_HOUR, + S5M_ALARM1_WEEKDAY, + S5M_ALARM1_DATE, + S5M_ALARM1_MONTH, + S5M_ALARM1_YEAR1, + S5M_ALARM1_YEAR2, + S5M_ALARM0_CONF, + S5M_ALARM1_CONF, + S5M_RTC_STATUS, + S5M_WTSR_SMPL_CNTL, + S5M_RTC_UDR_CON, + + S5M_RTC_REG_MAX, +}; + +enum s2mps_rtc_reg { + S2MPS_RTC_CTRL, + S2MPS_WTSR_SMPL_CNTL, + S2MPS_RTC_UDR_CON, + S2MPS_RSVD, + S2MPS_RTC_SEC, + S2MPS_RTC_MIN, + S2MPS_RTC_HOUR, + S2MPS_RTC_WEEKDAY, + S2MPS_RTC_DATE, + S2MPS_RTC_MONTH, + S2MPS_RTC_YEAR, + S2MPS_ALARM0_SEC, + S2MPS_ALARM0_MIN, + S2MPS_ALARM0_HOUR, + S2MPS_ALARM0_WEEKDAY, + S2MPS_ALARM0_DATE, + S2MPS_ALARM0_MONTH, + S2MPS_ALARM0_YEAR, + S2MPS_ALARM1_SEC, + S2MPS_ALARM1_MIN, + S2MPS_ALARM1_HOUR, + S2MPS_ALARM1_WEEKDAY, + S2MPS_ALARM1_DATE, + S2MPS_ALARM1_MONTH, + S2MPS_ALARM1_YEAR, + S2MPS_OFFSRC, + + S2MPS_RTC_REG_MAX, +}; + +#define RTC_I2C_ADDR (0x0C >> 1) + +#define HOUR_12 (1 << 7) +#define HOUR_AMPM (1 << 6) +#define HOUR_PM (1 << 5) +#define S5M_ALARM0_STATUS (1 << 1) +#define S5M_ALARM1_STATUS (1 << 2) +#define S5M_UPDATE_AD (1 << 0) + +#define S2MPS_ALARM0_STATUS (1 << 2) +#define S2MPS_ALARM1_STATUS (1 << 1) + +/* RTC Control Register */ +#define BCD_EN_SHIFT 0 +#define BCD_EN_MASK (1 << BCD_EN_SHIFT) +#define MODEL24_SHIFT 1 +#define MODEL24_MASK (1 << MODEL24_SHIFT) +/* RTC Update Register1 */ +#define S5M_RTC_UDR_SHIFT 0 +#define S5M_RTC_UDR_MASK (1 << S5M_RTC_UDR_SHIFT) +#define S2MPS_RTC_WUDR_SHIFT 4 +#define S2MPS_RTC_WUDR_MASK (1 << S2MPS_RTC_WUDR_SHIFT) +#define S2MPS15_RTC_AUDR_SHIFT 4 +#define S2MPS15_RTC_AUDR_MASK (1 << S2MPS15_RTC_AUDR_SHIFT) +#define S2MPS13_RTC_AUDR_SHIFT 1 +#define S2MPS13_RTC_AUDR_MASK (1 << S2MPS13_RTC_AUDR_SHIFT) +#define S2MPS15_RTC_WUDR_SHIFT 1 +#define S2MPS15_RTC_WUDR_MASK (1 << S2MPS15_RTC_WUDR_SHIFT) +#define S2MPS_RTC_RUDR_SHIFT 0 +#define S2MPS_RTC_RUDR_MASK (1 << S2MPS_RTC_RUDR_SHIFT) +#define RTC_TCON_SHIFT 1 +#define RTC_TCON_MASK (1 << RTC_TCON_SHIFT) +#define S5M_RTC_TIME_EN_SHIFT 3 +#define S5M_RTC_TIME_EN_MASK (1 << S5M_RTC_TIME_EN_SHIFT) +/* + * UDR_T field in S5M_RTC_UDR_CON register determines the time needed + * for updating alarm and time registers. Default is 7.32 ms. + */ +#define S5M_RTC_UDR_T_SHIFT 6 +#define S5M_RTC_UDR_T_MASK (0x3 << S5M_RTC_UDR_T_SHIFT) +#define S5M_RTC_UDR_T_7320_US (0x0 << S5M_RTC_UDR_T_SHIFT) +#define S5M_RTC_UDR_T_1830_US (0x1 << S5M_RTC_UDR_T_SHIFT) +#define S5M_RTC_UDR_T_3660_US (0x2 << S5M_RTC_UDR_T_SHIFT) +#define S5M_RTC_UDR_T_450_US (0x3 << S5M_RTC_UDR_T_SHIFT) + +/* RTC Hour register */ +#define HOUR_PM_SHIFT 6 +#define HOUR_PM_MASK (1 << HOUR_PM_SHIFT) +/* RTC Alarm Enable */ +#define ALARM_ENABLE_SHIFT 7 +#define ALARM_ENABLE_MASK (1 << ALARM_ENABLE_SHIFT) + +#define SMPL_ENABLE_SHIFT 7 +#define SMPL_ENABLE_MASK (1 << SMPL_ENABLE_SHIFT) + +#define WTSR_ENABLE_SHIFT 6 +#define WTSR_ENABLE_MASK (1 << WTSR_ENABLE_SHIFT) + +#endif /* __LINUX_MFD_SEC_RTC_H */ diff --git a/include/linux/mfd/samsung/s2mpa01.h b/include/linux/mfd/samsung/s2mpa01.h new file mode 100644 index 000000000..2766108bc --- /dev/null +++ b/include/linux/mfd/samsung/s2mpa01.h @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2013 Samsung Electronics Co., Ltd + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MFD_S2MPA01_H +#define __LINUX_MFD_S2MPA01_H + +/* S2MPA01 registers */ +enum s2mpa01_reg { + S2MPA01_REG_ID, + S2MPA01_REG_INT1, + S2MPA01_REG_INT2, + S2MPA01_REG_INT3, + S2MPA01_REG_INT1M, + S2MPA01_REG_INT2M, + S2MPA01_REG_INT3M, + S2MPA01_REG_ST1, + S2MPA01_REG_ST2, + S2MPA01_REG_PWRONSRC, + S2MPA01_REG_OFFSRC, + S2MPA01_REG_RTC_BUF, + S2MPA01_REG_CTRL1, + S2MPA01_REG_ETC_TEST, + S2MPA01_REG_RSVD1, + S2MPA01_REG_BU_CHG, + S2MPA01_REG_RAMP1, + S2MPA01_REG_RAMP2, + S2MPA01_REG_LDO_DSCH1, + S2MPA01_REG_LDO_DSCH2, + S2MPA01_REG_LDO_DSCH3, + S2MPA01_REG_LDO_DSCH4, + S2MPA01_REG_OTP_ADRL, + S2MPA01_REG_OTP_ADRH, + S2MPA01_REG_OTP_DATA, + S2MPA01_REG_MON1SEL, + S2MPA01_REG_MON2SEL, + S2MPA01_REG_LEE, + S2MPA01_REG_RSVD2, + S2MPA01_REG_RSVD3, + S2MPA01_REG_RSVD4, + S2MPA01_REG_RSVD5, + S2MPA01_REG_RSVD6, + S2MPA01_REG_TOP_RSVD, + S2MPA01_REG_DVS_SEL, + S2MPA01_REG_DVS_PTR, + S2MPA01_REG_DVS_DATA, + S2MPA01_REG_RSVD_NO, + S2MPA01_REG_UVLO, + S2MPA01_REG_LEE_NO, + S2MPA01_REG_B1CTRL1, + S2MPA01_REG_B1CTRL2, + S2MPA01_REG_B2CTRL1, + S2MPA01_REG_B2CTRL2, + S2MPA01_REG_B3CTRL1, + S2MPA01_REG_B3CTRL2, + S2MPA01_REG_B4CTRL1, + S2MPA01_REG_B4CTRL2, + S2MPA01_REG_B5CTRL1, + S2MPA01_REG_B5CTRL2, + S2MPA01_REG_B5CTRL3, + S2MPA01_REG_B5CTRL4, + S2MPA01_REG_B5CTRL5, + S2MPA01_REG_B5CTRL6, + S2MPA01_REG_B6CTRL1, + S2MPA01_REG_B6CTRL2, + S2MPA01_REG_B7CTRL1, + S2MPA01_REG_B7CTRL2, + S2MPA01_REG_B8CTRL1, + S2MPA01_REG_B8CTRL2, + S2MPA01_REG_B9CTRL1, + S2MPA01_REG_B9CTRL2, + S2MPA01_REG_B10CTRL1, + S2MPA01_REG_B10CTRL2, + S2MPA01_REG_L1CTRL, + S2MPA01_REG_L2CTRL, + S2MPA01_REG_L3CTRL, + S2MPA01_REG_L4CTRL, + S2MPA01_REG_L5CTRL, + S2MPA01_REG_L6CTRL, + S2MPA01_REG_L7CTRL, + S2MPA01_REG_L8CTRL, + S2MPA01_REG_L9CTRL, + S2MPA01_REG_L10CTRL, + S2MPA01_REG_L11CTRL, + S2MPA01_REG_L12CTRL, + S2MPA01_REG_L13CTRL, + S2MPA01_REG_L14CTRL, + S2MPA01_REG_L15CTRL, + S2MPA01_REG_L16CTRL, + S2MPA01_REG_L17CTRL, + S2MPA01_REG_L18CTRL, + S2MPA01_REG_L19CTRL, + S2MPA01_REG_L20CTRL, + S2MPA01_REG_L21CTRL, + S2MPA01_REG_L22CTRL, + S2MPA01_REG_L23CTRL, + S2MPA01_REG_L24CTRL, + S2MPA01_REG_L25CTRL, + S2MPA01_REG_L26CTRL, + + S2MPA01_REG_LDO_OVCB1, + S2MPA01_REG_LDO_OVCB2, + S2MPA01_REG_LDO_OVCB3, + S2MPA01_REG_LDO_OVCB4, + +}; + +/* S2MPA01 regulator ids */ +enum s2mpa01_regulators { + S2MPA01_LDO1, + S2MPA01_LDO2, + S2MPA01_LDO3, + S2MPA01_LDO4, + S2MPA01_LDO5, + S2MPA01_LDO6, + S2MPA01_LDO7, + S2MPA01_LDO8, + S2MPA01_LDO9, + S2MPA01_LDO10, + S2MPA01_LDO11, + S2MPA01_LDO12, + S2MPA01_LDO13, + S2MPA01_LDO14, + S2MPA01_LDO15, + S2MPA01_LDO16, + S2MPA01_LDO17, + S2MPA01_LDO18, + S2MPA01_LDO19, + S2MPA01_LDO20, + S2MPA01_LDO21, + S2MPA01_LDO22, + S2MPA01_LDO23, + S2MPA01_LDO24, + S2MPA01_LDO25, + S2MPA01_LDO26, + + S2MPA01_BUCK1, + S2MPA01_BUCK2, + S2MPA01_BUCK3, + S2MPA01_BUCK4, + S2MPA01_BUCK5, + S2MPA01_BUCK6, + S2MPA01_BUCK7, + S2MPA01_BUCK8, + S2MPA01_BUCK9, + S2MPA01_BUCK10, + + S2MPA01_REGULATOR_MAX, +}; + +#define S2MPA01_LDO_VSEL_MASK 0x3F +#define S2MPA01_BUCK_VSEL_MASK 0xFF +#define S2MPA01_ENABLE_MASK (0x03 << S2MPA01_ENABLE_SHIFT) +#define S2MPA01_ENABLE_SHIFT 0x06 +#define S2MPA01_LDO_N_VOLTAGES (S2MPA01_LDO_VSEL_MASK + 1) +#define S2MPA01_BUCK_N_VOLTAGES (S2MPA01_BUCK_VSEL_MASK + 1) + +#define S2MPA01_RAMP_DELAY 12500 /* uV/us */ + +#define S2MPA01_BUCK16_RAMP_SHIFT 4 +#define S2MPA01_BUCK24_RAMP_SHIFT 6 +#define S2MPA01_BUCK3_RAMP_SHIFT 4 +#define S2MPA01_BUCK5_RAMP_SHIFT 6 +#define S2MPA01_BUCK7_RAMP_SHIFT 2 +#define S2MPA01_BUCK8910_RAMP_SHIFT 0 + +#define S2MPA01_BUCK1_RAMP_EN_SHIFT 3 +#define S2MPA01_BUCK2_RAMP_EN_SHIFT 2 +#define S2MPA01_BUCK3_RAMP_EN_SHIFT 1 +#define S2MPA01_BUCK4_RAMP_EN_SHIFT 0 +#define S2MPA01_PMIC_EN_SHIFT 6 + +#endif /*__LINUX_MFD_S2MPA01_H */ diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h new file mode 100644 index 000000000..2c14eeca4 --- /dev/null +++ b/include/linux/mfd/samsung/s2mps11.h @@ -0,0 +1,198 @@ +/* + * s2mps11.h + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MFD_S2MPS11_H +#define __LINUX_MFD_S2MPS11_H + +/* S2MPS11 registers */ +enum s2mps11_reg { + S2MPS11_REG_ID, + S2MPS11_REG_INT1, + S2MPS11_REG_INT2, + S2MPS11_REG_INT3, + S2MPS11_REG_INT1M, + S2MPS11_REG_INT2M, + S2MPS11_REG_INT3M, + S2MPS11_REG_ST1, + S2MPS11_REG_ST2, + S2MPS11_REG_OFFSRC, + S2MPS11_REG_PWRONSRC, + S2MPS11_REG_RTC_CTRL, + S2MPS11_REG_CTRL1, + S2MPS11_REG_ETC_TEST, + S2MPS11_REG_RSVD3, + S2MPS11_REG_BU_CHG, + S2MPS11_REG_RAMP, + S2MPS11_REG_RAMP_BUCK, + S2MPS11_REG_LDO1_8, + S2MPS11_REG_LDO9_16, + S2MPS11_REG_LDO17_24, + S2MPS11_REG_LDO25_32, + S2MPS11_REG_LDO33_38, + S2MPS11_REG_LDO1_8_1, + S2MPS11_REG_LDO9_16_1, + S2MPS11_REG_LDO17_24_1, + S2MPS11_REG_LDO25_32_1, + S2MPS11_REG_LDO33_38_1, + S2MPS11_REG_OTP_ADRL, + S2MPS11_REG_OTP_ADRH, + S2MPS11_REG_OTP_DATA, + S2MPS11_REG_MON1SEL, + S2MPS11_REG_MON2SEL, + S2MPS11_REG_LEE, + S2MPS11_REG_RSVD_NO, + S2MPS11_REG_UVLO, + S2MPS11_REG_LEE_NO, + S2MPS11_REG_B1CTRL1, + S2MPS11_REG_B1CTRL2, + S2MPS11_REG_B2CTRL1, + S2MPS11_REG_B2CTRL2, + S2MPS11_REG_B3CTRL1, + S2MPS11_REG_B3CTRL2, + S2MPS11_REG_B4CTRL1, + S2MPS11_REG_B4CTRL2, + S2MPS11_REG_B5CTRL1, + S2MPS11_REG_BUCK5_SW, + S2MPS11_REG_B5CTRL2, + S2MPS11_REG_B5CTRL3, + S2MPS11_REG_B5CTRL4, + S2MPS11_REG_B5CTRL5, + S2MPS11_REG_B6CTRL1, + S2MPS11_REG_B6CTRL2, + S2MPS11_REG_B7CTRL1, + S2MPS11_REG_B7CTRL2, + S2MPS11_REG_B8CTRL1, + S2MPS11_REG_B8CTRL2, + S2MPS11_REG_B9CTRL1, + S2MPS11_REG_B9CTRL2, + S2MPS11_REG_B10CTRL1, + S2MPS11_REG_B10CTRL2, + S2MPS11_REG_L1CTRL, + S2MPS11_REG_L2CTRL, + S2MPS11_REG_L3CTRL, + S2MPS11_REG_L4CTRL, + S2MPS11_REG_L5CTRL, + S2MPS11_REG_L6CTRL, + S2MPS11_REG_L7CTRL, + S2MPS11_REG_L8CTRL, + S2MPS11_REG_L9CTRL, + S2MPS11_REG_L10CTRL, + S2MPS11_REG_L11CTRL, + S2MPS11_REG_L12CTRL, + S2MPS11_REG_L13CTRL, + S2MPS11_REG_L14CTRL, + S2MPS11_REG_L15CTRL, + S2MPS11_REG_L16CTRL, + S2MPS11_REG_L17CTRL, + S2MPS11_REG_L18CTRL, + S2MPS11_REG_L19CTRL, + S2MPS11_REG_L20CTRL, + S2MPS11_REG_L21CTRL, + S2MPS11_REG_L22CTRL, + S2MPS11_REG_L23CTRL, + S2MPS11_REG_L24CTRL, + S2MPS11_REG_L25CTRL, + S2MPS11_REG_L26CTRL, + S2MPS11_REG_L27CTRL, + S2MPS11_REG_L28CTRL, + S2MPS11_REG_L29CTRL, + S2MPS11_REG_L30CTRL, + S2MPS11_REG_L31CTRL, + S2MPS11_REG_L32CTRL, + S2MPS11_REG_L33CTRL, + S2MPS11_REG_L34CTRL, + S2MPS11_REG_L35CTRL, + S2MPS11_REG_L36CTRL, + S2MPS11_REG_L37CTRL, + S2MPS11_REG_L38CTRL, +}; + +/* S2MPS11 regulator ids */ +enum s2mps11_regulators { + S2MPS11_LDO1, + S2MPS11_LDO2, + S2MPS11_LDO3, + S2MPS11_LDO4, + S2MPS11_LDO5, + S2MPS11_LDO6, + S2MPS11_LDO7, + S2MPS11_LDO8, + S2MPS11_LDO9, + S2MPS11_LDO10, + S2MPS11_LDO11, + S2MPS11_LDO12, + S2MPS11_LDO13, + S2MPS11_LDO14, + S2MPS11_LDO15, + S2MPS11_LDO16, + S2MPS11_LDO17, + S2MPS11_LDO18, + S2MPS11_LDO19, + S2MPS11_LDO20, + S2MPS11_LDO21, + S2MPS11_LDO22, + S2MPS11_LDO23, + S2MPS11_LDO24, + S2MPS11_LDO25, + S2MPS11_LDO26, + S2MPS11_LDO27, + S2MPS11_LDO28, + S2MPS11_LDO29, + S2MPS11_LDO30, + S2MPS11_LDO31, + S2MPS11_LDO32, + S2MPS11_LDO33, + S2MPS11_LDO34, + S2MPS11_LDO35, + S2MPS11_LDO36, + S2MPS11_LDO37, + S2MPS11_LDO38, + S2MPS11_BUCK1, + S2MPS11_BUCK2, + S2MPS11_BUCK3, + S2MPS11_BUCK4, + S2MPS11_BUCK5, + S2MPS11_BUCK6, + S2MPS11_BUCK7, + S2MPS11_BUCK8, + S2MPS11_BUCK9, + S2MPS11_BUCK10, + + S2MPS11_REGULATOR_MAX, +}; + +#define S2MPS11_LDO_VSEL_MASK 0x3F +#define S2MPS11_BUCK_VSEL_MASK 0xFF +#define S2MPS11_BUCK9_VSEL_MASK 0x1F +#define S2MPS11_ENABLE_MASK (0x03 << S2MPS11_ENABLE_SHIFT) +#define S2MPS11_ENABLE_SHIFT 0x06 +#define S2MPS11_LDO_N_VOLTAGES (S2MPS11_LDO_VSEL_MASK + 1) +#define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1) +#define S2MPS11_BUCK9_N_VOLTAGES (S2MPS11_BUCK9_VSEL_MASK + 1) +#define S2MPS11_RAMP_DELAY 25000 /* uV/us */ + +#define S2MPS11_CTRL1_PWRHOLD_MASK BIT(4) + +#define S2MPS11_BUCK2_RAMP_SHIFT 6 +#define S2MPS11_BUCK34_RAMP_SHIFT 4 +#define S2MPS11_BUCK5_RAMP_SHIFT 6 +#define S2MPS11_BUCK16_RAMP_SHIFT 4 +#define S2MPS11_BUCK7810_RAMP_SHIFT 2 +#define S2MPS11_BUCK9_RAMP_SHIFT 0 +#define S2MPS11_BUCK2_RAMP_EN_SHIFT 3 +#define S2MPS11_BUCK3_RAMP_EN_SHIFT 2 +#define S2MPS11_BUCK4_RAMP_EN_SHIFT 1 +#define S2MPS11_BUCK6_RAMP_EN_SHIFT 0 +#define S2MPS11_PMIC_EN_SHIFT 6 + +#endif /* __LINUX_MFD_S2MPS11_H */ diff --git a/include/linux/mfd/samsung/s2mps13.h b/include/linux/mfd/samsung/s2mps13.h new file mode 100644 index 000000000..239e977ba --- /dev/null +++ b/include/linux/mfd/samsung/s2mps13.h @@ -0,0 +1,189 @@ +/* + * s2mps13.h + * + * Copyright (c) 2014 Samsung Electronics Co., Ltd + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __LINUX_MFD_S2MPS13_H +#define __LINUX_MFD_S2MPS13_H + +/* S2MPS13 registers */ +enum s2mps13_reg { + S2MPS13_REG_ID, + S2MPS13_REG_INT1, + S2MPS13_REG_INT2, + S2MPS13_REG_INT3, + S2MPS13_REG_INT1M, + S2MPS13_REG_INT2M, + S2MPS13_REG_INT3M, + S2MPS13_REG_ST1, + S2MPS13_REG_ST2, + S2MPS13_REG_PWRONSRC, + S2MPS13_REG_OFFSRC, + S2MPS13_REG_BU_CHG, + S2MPS13_REG_RTCCTRL, + S2MPS13_REG_CTRL1, + S2MPS13_REG_CTRL2, + S2MPS13_REG_RSVD1, + S2MPS13_REG_RSVD2, + S2MPS13_REG_RSVD3, + S2MPS13_REG_RSVD4, + S2MPS13_REG_RSVD5, + S2MPS13_REG_RSVD6, + S2MPS13_REG_CTRL3, + S2MPS13_REG_RSVD7, + S2MPS13_REG_RSVD8, + S2MPS13_REG_WRSTBI, + S2MPS13_REG_B1CTRL, + S2MPS13_REG_B1OUT, + S2MPS13_REG_B2CTRL, + S2MPS13_REG_B2OUT, + S2MPS13_REG_B3CTRL, + S2MPS13_REG_B3OUT, + S2MPS13_REG_B4CTRL, + S2MPS13_REG_B4OUT, + S2MPS13_REG_B5CTRL, + S2MPS13_REG_B5OUT, + S2MPS13_REG_B6CTRL, + S2MPS13_REG_B6OUT, + S2MPS13_REG_B7CTRL, + S2MPS13_REG_B7SW, + S2MPS13_REG_B7OUT, + S2MPS13_REG_B8CTRL, + S2MPS13_REG_B8OUT, + S2MPS13_REG_B9CTRL, + S2MPS13_REG_B9OUT, + S2MPS13_REG_B10CTRL, + S2MPS13_REG_B10OUT, + S2MPS13_REG_BB1CTRL, + S2MPS13_REG_BB1OUT, + S2MPS13_REG_BUCK_RAMP1, + S2MPS13_REG_BUCK_RAMP2, + S2MPS13_REG_LDO_DVS1, + S2MPS13_REG_LDO_DVS2, + S2MPS13_REG_LDO_DVS3, + S2MPS13_REG_B6OUT2, + S2MPS13_REG_L1CTRL, + S2MPS13_REG_L2CTRL, + S2MPS13_REG_L3CTRL, + S2MPS13_REG_L4CTRL, + S2MPS13_REG_L5CTRL, + S2MPS13_REG_L6CTRL, + S2MPS13_REG_L7CTRL, + S2MPS13_REG_L8CTRL, + S2MPS13_REG_L9CTRL, + S2MPS13_REG_L10CTRL, + S2MPS13_REG_L11CTRL, + S2MPS13_REG_L12CTRL, + S2MPS13_REG_L13CTRL, + S2MPS13_REG_L14CTRL, + S2MPS13_REG_L15CTRL, + S2MPS13_REG_L16CTRL, + S2MPS13_REG_L17CTRL, + S2MPS13_REG_L18CTRL, + S2MPS13_REG_L19CTRL, + S2MPS13_REG_L20CTRL, + S2MPS13_REG_L21CTRL, + S2MPS13_REG_L22CTRL, + S2MPS13_REG_L23CTRL, + S2MPS13_REG_L24CTRL, + S2MPS13_REG_L25CTRL, + S2MPS13_REG_L26CTRL, + S2MPS13_REG_L27CTRL, + S2MPS13_REG_L28CTRL, + S2MPS13_REG_L29CTRL, + S2MPS13_REG_L30CTRL, + S2MPS13_REG_L31CTRL, + S2MPS13_REG_L32CTRL, + S2MPS13_REG_L33CTRL, + S2MPS13_REG_L34CTRL, + S2MPS13_REG_L35CTRL, + S2MPS13_REG_L36CTRL, + S2MPS13_REG_L37CTRL, + S2MPS13_REG_L38CTRL, + S2MPS13_REG_L39CTRL, + S2MPS13_REG_L40CTRL, + S2MPS13_REG_LDODSCH1, + S2MPS13_REG_LDODSCH2, + S2MPS13_REG_LDODSCH3, + S2MPS13_REG_LDODSCH4, + S2MPS13_REG_LDODSCH5, +}; + +/* regulator ids */ +enum s2mps13_regulators { + S2MPS13_LDO1, + S2MPS13_LDO2, + S2MPS13_LDO3, + S2MPS13_LDO4, + S2MPS13_LDO5, + S2MPS13_LDO6, + S2MPS13_LDO7, + S2MPS13_LDO8, + S2MPS13_LDO9, + S2MPS13_LDO10, + S2MPS13_LDO11, + S2MPS13_LDO12, + S2MPS13_LDO13, + S2MPS13_LDO14, + S2MPS13_LDO15, + S2MPS13_LDO16, + S2MPS13_LDO17, + S2MPS13_LDO18, + S2MPS13_LDO19, + S2MPS13_LDO20, + S2MPS13_LDO21, + S2MPS13_LDO22, + S2MPS13_LDO23, + S2MPS13_LDO24, + S2MPS13_LDO25, + S2MPS13_LDO26, + S2MPS13_LDO27, + S2MPS13_LDO28, + S2MPS13_LDO29, + S2MPS13_LDO30, + S2MPS13_LDO31, + S2MPS13_LDO32, + S2MPS13_LDO33, + S2MPS13_LDO34, + S2MPS13_LDO35, + S2MPS13_LDO36, + S2MPS13_LDO37, + S2MPS13_LDO38, + S2MPS13_LDO39, + S2MPS13_LDO40, + S2MPS13_BUCK1, + S2MPS13_BUCK2, + S2MPS13_BUCK3, + S2MPS13_BUCK4, + S2MPS13_BUCK5, + S2MPS13_BUCK6, + S2MPS13_BUCK7, + S2MPS13_BUCK8, + S2MPS13_BUCK9, + S2MPS13_BUCK10, + + S2MPS13_REGULATOR_MAX, +}; + +/* + * Default ramp delay in uv/us. Datasheet says that ramp delay can be + * controlled however it does not specify which register is used for that. + * Let's assume that default value will be set. + */ +#define S2MPS13_BUCK_RAMP_DELAY 12500 +#define S2MPS13_REG_WRSTBI_MASK BIT(5) + +#endif /* __LINUX_MFD_S2MPS13_H */ diff --git a/include/linux/mfd/samsung/s2mps14.h b/include/linux/mfd/samsung/s2mps14.h new file mode 100644 index 000000000..c92f4782a --- /dev/null +++ b/include/linux/mfd/samsung/s2mps14.h @@ -0,0 +1,146 @@ +/* + * s2mps14.h + * + * Copyright (c) 2014 Samsung Electronics Co., Ltd + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __LINUX_MFD_S2MPS14_H +#define __LINUX_MFD_S2MPS14_H + +/* S2MPS14 registers */ +enum s2mps14_reg { + S2MPS14_REG_ID, + S2MPS14_REG_INT1, + S2MPS14_REG_INT2, + S2MPS14_REG_INT3, + S2MPS14_REG_INT1M, + S2MPS14_REG_INT2M, + S2MPS14_REG_INT3M, + S2MPS14_REG_ST1, + S2MPS14_REG_ST2, + S2MPS14_REG_PWRONSRC, + S2MPS14_REG_OFFSRC, + S2MPS14_REG_BU_CHG, + S2MPS14_REG_RTCCTRL, + S2MPS14_REG_CTRL1, + S2MPS14_REG_CTRL2, + S2MPS14_REG_RSVD1, + S2MPS14_REG_RSVD2, + S2MPS14_REG_RSVD3, + S2MPS14_REG_RSVD4, + S2MPS14_REG_RSVD5, + S2MPS14_REG_RSVD6, + S2MPS14_REG_CTRL3, + S2MPS14_REG_RSVD7, + S2MPS14_REG_RSVD8, + S2MPS14_REG_WRSTBI, + S2MPS14_REG_B1CTRL1, + S2MPS14_REG_B1CTRL2, + S2MPS14_REG_B2CTRL1, + S2MPS14_REG_B2CTRL2, + S2MPS14_REG_B3CTRL1, + S2MPS14_REG_B3CTRL2, + S2MPS14_REG_B4CTRL1, + S2MPS14_REG_B4CTRL2, + S2MPS14_REG_B5CTRL1, + S2MPS14_REG_B5CTRL2, + S2MPS14_REG_L1CTRL, + S2MPS14_REG_L2CTRL, + S2MPS14_REG_L3CTRL, + S2MPS14_REG_L4CTRL, + S2MPS14_REG_L5CTRL, + S2MPS14_REG_L6CTRL, + S2MPS14_REG_L7CTRL, + S2MPS14_REG_L8CTRL, + S2MPS14_REG_L9CTRL, + S2MPS14_REG_L10CTRL, + S2MPS14_REG_L11CTRL, + S2MPS14_REG_L12CTRL, + S2MPS14_REG_L13CTRL, + S2MPS14_REG_L14CTRL, + S2MPS14_REG_L15CTRL, + S2MPS14_REG_L16CTRL, + S2MPS14_REG_L17CTRL, + S2MPS14_REG_L18CTRL, + S2MPS14_REG_L19CTRL, + S2MPS14_REG_L20CTRL, + S2MPS14_REG_L21CTRL, + S2MPS14_REG_L22CTRL, + S2MPS14_REG_L23CTRL, + S2MPS14_REG_L24CTRL, + S2MPS14_REG_L25CTRL, + S2MPS14_REG_LDODSCH1, + S2MPS14_REG_LDODSCH2, + S2MPS14_REG_LDODSCH3, +}; + +/* S2MPS14 regulator ids */ +enum s2mps14_regulators { + S2MPS14_LDO1, + S2MPS14_LDO2, + S2MPS14_LDO3, + S2MPS14_LDO4, + S2MPS14_LDO5, + S2MPS14_LDO6, + S2MPS14_LDO7, + S2MPS14_LDO8, + S2MPS14_LDO9, + S2MPS14_LDO10, + S2MPS14_LDO11, + S2MPS14_LDO12, + S2MPS14_LDO13, + S2MPS14_LDO14, + S2MPS14_LDO15, + S2MPS14_LDO16, + S2MPS14_LDO17, + S2MPS14_LDO18, + S2MPS14_LDO19, + S2MPS14_LDO20, + S2MPS14_LDO21, + S2MPS14_LDO22, + S2MPS14_LDO23, + S2MPS14_LDO24, + S2MPS14_LDO25, + S2MPS14_BUCK1, + S2MPS14_BUCK2, + S2MPS14_BUCK3, + S2MPS14_BUCK4, + S2MPS14_BUCK5, + + S2MPS14_REGULATOR_MAX, +}; + +/* Regulator constraints for BUCKx */ +#define S2MPS14_BUCK1235_START_SEL 0x20 +#define S2MPS14_BUCK4_START_SEL 0x40 +/* + * Default ramp delay in uv/us. Datasheet says that ramp delay can be + * controlled however it does not specify which register is used for that. + * Let's assume that default value will be set. + */ +#define S2MPS14_BUCK_RAMP_DELAY 12500 + +#define S2MPS14_LDO_VSEL_MASK 0x3F +#define S2MPS14_BUCK_VSEL_MASK 0xFF +#define S2MPS14_ENABLE_MASK (0x03 << S2MPS14_ENABLE_SHIFT) +#define S2MPS14_ENABLE_SHIFT 6 +/* On/Off controlled by PWREN */ +#define S2MPS14_ENABLE_SUSPEND (0x01 << S2MPS14_ENABLE_SHIFT) +/* On/Off controlled by LDO10EN or EMMCEN */ +#define S2MPS14_ENABLE_EXT_CONTROL (0x00 << S2MPS14_ENABLE_SHIFT) +#define S2MPS14_LDO_N_VOLTAGES (S2MPS14_LDO_VSEL_MASK + 1) +#define S2MPS14_BUCK_N_VOLTAGES (S2MPS14_BUCK_VSEL_MASK + 1) + +#endif /* __LINUX_MFD_S2MPS14_H */ diff --git a/include/linux/mfd/samsung/s2mps15.h b/include/linux/mfd/samsung/s2mps15.h new file mode 100644 index 000000000..36d35287c --- /dev/null +++ b/include/linux/mfd/samsung/s2mps15.h @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2015 Samsung Electronics Co., Ltd + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_MFD_S2MPS15_H +#define __LINUX_MFD_S2MPS15_H + +/* S2MPS15 registers */ +enum s2mps15_reg { + S2MPS15_REG_ID, + S2MPS15_REG_INT1, + S2MPS15_REG_INT2, + S2MPS15_REG_INT3, + S2MPS15_REG_INT1M, + S2MPS15_REG_INT2M, + S2MPS15_REG_INT3M, + S2MPS15_REG_ST1, + S2MPS15_REG_ST2, + S2MPS15_REG_PWRONSRC, + S2MPS15_REG_OFFSRC, + S2MPS15_REG_BU_CHG, + S2MPS15_REG_RTC_BUF, + S2MPS15_REG_CTRL1, + S2MPS15_REG_CTRL2, + S2MPS15_REG_RSVD1, + S2MPS15_REG_RSVD2, + S2MPS15_REG_RSVD3, + S2MPS15_REG_RSVD4, + S2MPS15_REG_RSVD5, + S2MPS15_REG_RSVD6, + S2MPS15_REG_CTRL3, + S2MPS15_REG_RSVD7, + S2MPS15_REG_RSVD8, + S2MPS15_REG_RSVD9, + S2MPS15_REG_B1CTRL1, + S2MPS15_REG_B1CTRL2, + S2MPS15_REG_B2CTRL1, + S2MPS15_REG_B2CTRL2, + S2MPS15_REG_B3CTRL1, + S2MPS15_REG_B3CTRL2, + S2MPS15_REG_B4CTRL1, + S2MPS15_REG_B4CTRL2, + S2MPS15_REG_B5CTRL1, + S2MPS15_REG_B5CTRL2, + S2MPS15_REG_B6CTRL1, + S2MPS15_REG_B6CTRL2, + S2MPS15_REG_B7CTRL1, + S2MPS15_REG_B7CTRL2, + S2MPS15_REG_B8CTRL1, + S2MPS15_REG_B8CTRL2, + S2MPS15_REG_B9CTRL1, + S2MPS15_REG_B9CTRL2, + S2MPS15_REG_B10CTRL1, + S2MPS15_REG_B10CTRL2, + S2MPS15_REG_BBCTRL1, + S2MPS15_REG_BBCTRL2, + S2MPS15_REG_BRAMP, + S2MPS15_REG_LDODVS1, + S2MPS15_REG_LDODVS2, + S2MPS15_REG_LDODVS3, + S2MPS15_REG_LDODVS4, + S2MPS15_REG_L1CTRL, + S2MPS15_REG_L2CTRL, + S2MPS15_REG_L3CTRL, + S2MPS15_REG_L4CTRL, + S2MPS15_REG_L5CTRL, + S2MPS15_REG_L6CTRL, + S2MPS15_REG_L7CTRL, + S2MPS15_REG_L8CTRL, + S2MPS15_REG_L9CTRL, + S2MPS15_REG_L10CTRL, + S2MPS15_REG_L11CTRL, + S2MPS15_REG_L12CTRL, + S2MPS15_REG_L13CTRL, + S2MPS15_REG_L14CTRL, + S2MPS15_REG_L15CTRL, + S2MPS15_REG_L16CTRL, + S2MPS15_REG_L17CTRL, + S2MPS15_REG_L18CTRL, + S2MPS15_REG_L19CTRL, + S2MPS15_REG_L20CTRL, + S2MPS15_REG_L21CTRL, + S2MPS15_REG_L22CTRL, + S2MPS15_REG_L23CTRL, + S2MPS15_REG_L24CTRL, + S2MPS15_REG_L25CTRL, + S2MPS15_REG_L26CTRL, + S2MPS15_REG_L27CTRL, + S2MPS15_REG_LDODSCH1, + S2MPS15_REG_LDODSCH2, + S2MPS15_REG_LDODSCH3, + S2MPS15_REG_LDODSCH4, +}; + +/* S2MPS15 regulator ids */ +enum s2mps15_regulators { + S2MPS15_LDO1, + S2MPS15_LDO2, + S2MPS15_LDO3, + S2MPS15_LDO4, + S2MPS15_LDO5, + S2MPS15_LDO6, + S2MPS15_LDO7, + S2MPS15_LDO8, + S2MPS15_LDO9, + S2MPS15_LDO10, + S2MPS15_LDO11, + S2MPS15_LDO12, + S2MPS15_LDO13, + S2MPS15_LDO14, + S2MPS15_LDO15, + S2MPS15_LDO16, + S2MPS15_LDO17, + S2MPS15_LDO18, + S2MPS15_LDO19, + S2MPS15_LDO20, + S2MPS15_LDO21, + S2MPS15_LDO22, + S2MPS15_LDO23, + S2MPS15_LDO24, + S2MPS15_LDO25, + S2MPS15_LDO26, + S2MPS15_LDO27, + S2MPS15_BUCK1, + S2MPS15_BUCK2, + S2MPS15_BUCK3, + S2MPS15_BUCK4, + S2MPS15_BUCK5, + S2MPS15_BUCK6, + S2MPS15_BUCK7, + S2MPS15_BUCK8, + S2MPS15_BUCK9, + S2MPS15_BUCK10, + S2MPS15_BUCK11, + S2MPS15_REGULATOR_MAX, +}; + +#define S2MPS15_LDO_VSEL_MASK (0x3F) +#define S2MPS15_BUCK_VSEL_MASK (0xFF) + +#define S2MPS15_ENABLE_SHIFT (0x06) +#define S2MPS15_ENABLE_MASK (0x03 << S2MPS15_ENABLE_SHIFT) + +#define S2MPS15_LDO_N_VOLTAGES (S2MPS15_LDO_VSEL_MASK + 1) +#define S2MPS15_BUCK_N_VOLTAGES (S2MPS15_BUCK_VSEL_MASK + 1) + +#endif /* __LINUX_MFD_S2MPS15_H */ diff --git a/include/linux/mfd/samsung/s2mpu02.h b/include/linux/mfd/samsung/s2mpu02.h new file mode 100644 index 000000000..47ae9bc58 --- /dev/null +++ b/include/linux/mfd/samsung/s2mpu02.h @@ -0,0 +1,201 @@ +/* + * s2mpu02.h + * + * Copyright (c) 2014 Samsung Electronics Co., Ltd + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __LINUX_MFD_S2MPU02_H +#define __LINUX_MFD_S2MPU02_H + +/* S2MPU02 registers */ +enum S2MPU02_reg { + S2MPU02_REG_ID, + S2MPU02_REG_INT1, + S2MPU02_REG_INT2, + S2MPU02_REG_INT3, + S2MPU02_REG_INT1M, + S2MPU02_REG_INT2M, + S2MPU02_REG_INT3M, + S2MPU02_REG_ST1, + S2MPU02_REG_ST2, + S2MPU02_REG_PWRONSRC, + S2MPU02_REG_OFFSRC, + S2MPU02_REG_BU_CHG, + S2MPU02_REG_RTCCTRL, + S2MPU02_REG_PMCTRL1, + S2MPU02_REG_RSVD1, + S2MPU02_REG_RSVD2, + S2MPU02_REG_RSVD3, + S2MPU02_REG_RSVD4, + S2MPU02_REG_RSVD5, + S2MPU02_REG_RSVD6, + S2MPU02_REG_RSVD7, + S2MPU02_REG_WRSTEN, + S2MPU02_REG_RSVD8, + S2MPU02_REG_RSVD9, + S2MPU02_REG_RSVD10, + S2MPU02_REG_B1CTRL1, + S2MPU02_REG_B1CTRL2, + S2MPU02_REG_B2CTRL1, + S2MPU02_REG_B2CTRL2, + S2MPU02_REG_B3CTRL1, + S2MPU02_REG_B3CTRL2, + S2MPU02_REG_B4CTRL1, + S2MPU02_REG_B4CTRL2, + S2MPU02_REG_B5CTRL1, + S2MPU02_REG_B5CTRL2, + S2MPU02_REG_B5CTRL3, + S2MPU02_REG_B5CTRL4, + S2MPU02_REG_B5CTRL5, + S2MPU02_REG_B6CTRL1, + S2MPU02_REG_B6CTRL2, + S2MPU02_REG_B7CTRL1, + S2MPU02_REG_B7CTRL2, + S2MPU02_REG_RAMP1, + S2MPU02_REG_RAMP2, + S2MPU02_REG_L1CTRL, + S2MPU02_REG_L2CTRL1, + S2MPU02_REG_L2CTRL2, + S2MPU02_REG_L2CTRL3, + S2MPU02_REG_L2CTRL4, + S2MPU02_REG_L3CTRL, + S2MPU02_REG_L4CTRL, + S2MPU02_REG_L5CTRL, + S2MPU02_REG_L6CTRL, + S2MPU02_REG_L7CTRL, + S2MPU02_REG_L8CTRL, + S2MPU02_REG_L9CTRL, + S2MPU02_REG_L10CTRL, + S2MPU02_REG_L11CTRL, + S2MPU02_REG_L12CTRL, + S2MPU02_REG_L13CTRL, + S2MPU02_REG_L14CTRL, + S2MPU02_REG_L15CTRL, + S2MPU02_REG_L16CTRL, + S2MPU02_REG_L17CTRL, + S2MPU02_REG_L18CTRL, + S2MPU02_REG_L19CTRL, + S2MPU02_REG_L20CTRL, + S2MPU02_REG_L21CTRL, + S2MPU02_REG_L22CTRL, + S2MPU02_REG_L23CTRL, + S2MPU02_REG_L24CTRL, + S2MPU02_REG_L25CTRL, + S2MPU02_REG_L26CTRL, + S2MPU02_REG_L27CTRL, + S2MPU02_REG_L28CTRL, + S2MPU02_REG_LDODSCH1, + S2MPU02_REG_LDODSCH2, + S2MPU02_REG_LDODSCH3, + S2MPU02_REG_LDODSCH4, + S2MPU02_REG_SELMIF, + S2MPU02_REG_RSVD11, + S2MPU02_REG_RSVD12, + S2MPU02_REG_RSVD13, + S2MPU02_REG_DVSSEL, + S2MPU02_REG_DVSPTR, + S2MPU02_REG_DVSDATA, +}; + +/* S2MPU02 regulator ids */ +enum S2MPU02_regulators { + S2MPU02_LDO1, + S2MPU02_LDO2, + S2MPU02_LDO3, + S2MPU02_LDO4, + S2MPU02_LDO5, + S2MPU02_LDO6, + S2MPU02_LDO7, + S2MPU02_LDO8, + S2MPU02_LDO9, + S2MPU02_LDO10, + S2MPU02_LDO11, + S2MPU02_LDO12, + S2MPU02_LDO13, + S2MPU02_LDO14, + S2MPU02_LDO15, + S2MPU02_LDO16, + S2MPU02_LDO17, + S2MPU02_LDO18, + S2MPU02_LDO19, + S2MPU02_LDO20, + S2MPU02_LDO21, + S2MPU02_LDO22, + S2MPU02_LDO23, + S2MPU02_LDO24, + S2MPU02_LDO25, + S2MPU02_LDO26, + S2MPU02_LDO27, + S2MPU02_LDO28, + S2MPU02_BUCK1, + S2MPU02_BUCK2, + S2MPU02_BUCK3, + S2MPU02_BUCK4, + S2MPU02_BUCK5, + S2MPU02_BUCK6, + S2MPU02_BUCK7, + + S2MPU02_REGULATOR_MAX, +}; + +/* Regulator constraints for BUCKx */ +#define S2MPU02_BUCK1234_MIN_600MV 600000 +#define S2MPU02_BUCK5_MIN_1081_25MV 1081250 +#define S2MPU02_BUCK6_MIN_1700MV 1700000 +#define S2MPU02_BUCK7_MIN_900MV 900000 + +#define S2MPU02_BUCK1234_STEP_6_25MV 6250 +#define S2MPU02_BUCK5_STEP_6_25MV 6250 +#define S2MPU02_BUCK6_STEP_2_50MV 2500 +#define S2MPU02_BUCK7_STEP_6_25MV 6250 + +#define S2MPU02_BUCK1234_START_SEL 0x00 +#define S2MPU02_BUCK5_START_SEL 0x4D +#define S2MPU02_BUCK6_START_SEL 0x28 +#define S2MPU02_BUCK7_START_SEL 0x30 + +#define S2MPU02_BUCK_RAMP_DELAY 12500 + +/* Regulator constraints for different types of LDOx */ +#define S2MPU02_LDO_MIN_900MV 900000 +#define S2MPU02_LDO_MIN_1050MV 1050000 +#define S2MPU02_LDO_MIN_1600MV 1600000 +#define S2MPU02_LDO_STEP_12_5MV 12500 +#define S2MPU02_LDO_STEP_25MV 25000 +#define S2MPU02_LDO_STEP_50MV 50000 + +#define S2MPU02_LDO_GROUP1_START_SEL 0x8 +#define S2MPU02_LDO_GROUP2_START_SEL 0xA +#define S2MPU02_LDO_GROUP3_START_SEL 0x10 + +#define S2MPU02_LDO_VSEL_MASK 0x3F +#define S2MPU02_BUCK_VSEL_MASK 0xFF +#define S2MPU02_ENABLE_MASK (0x03 << S2MPU02_ENABLE_SHIFT) +#define S2MPU02_ENABLE_SHIFT 6 + +/* On/Off controlled by PWREN */ +#define S2MPU02_ENABLE_SUSPEND (0x01 << S2MPU02_ENABLE_SHIFT) +#define S2MPU02_DISABLE_SUSPEND (0x11 << S2MPU02_ENABLE_SHIFT) +#define S2MPU02_LDO_N_VOLTAGES (S2MPU02_LDO_VSEL_MASK + 1) +#define S2MPU02_BUCK_N_VOLTAGES (S2MPU02_BUCK_VSEL_MASK + 1) + +/* RAMP delay for BUCK1234*/ +#define S2MPU02_BUCK1_RAMP_SHIFT 6 +#define S2MPU02_BUCK2_RAMP_SHIFT 4 +#define S2MPU02_BUCK3_RAMP_SHIFT 2 +#define S2MPU02_BUCK4_RAMP_SHIFT 0 +#define S2MPU02_BUCK1234_RAMP_MASK 0x3 + +#endif /* __LINUX_MFD_S2MPU02_H */ diff --git a/include/linux/mfd/samsung/s5m8763.h b/include/linux/mfd/samsung/s5m8763.h new file mode 100644 index 000000000..e025418e5 --- /dev/null +++ b/include/linux/mfd/samsung/s5m8763.h @@ -0,0 +1,96 @@ +/* s5m8763.h + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MFD_S5M8763_H +#define __LINUX_MFD_S5M8763_H + +/* S5M8763 registers */ +enum s5m8763_reg { + S5M8763_REG_IRQ1, + S5M8763_REG_IRQ2, + S5M8763_REG_IRQ3, + S5M8763_REG_IRQ4, + S5M8763_REG_IRQM1, + S5M8763_REG_IRQM2, + S5M8763_REG_IRQM3, + S5M8763_REG_IRQM4, + S5M8763_REG_STATUS1, + S5M8763_REG_STATUS2, + S5M8763_REG_STATUSM1, + S5M8763_REG_STATUSM2, + S5M8763_REG_CHGR1, + S5M8763_REG_CHGR2, + S5M8763_REG_LDO_ACTIVE_DISCHARGE1, + S5M8763_REG_LDO_ACTIVE_DISCHARGE2, + S5M8763_REG_BUCK_ACTIVE_DISCHARGE3, + S5M8763_REG_ONOFF1, + S5M8763_REG_ONOFF2, + S5M8763_REG_ONOFF3, + S5M8763_REG_ONOFF4, + S5M8763_REG_BUCK1_VOLTAGE1, + S5M8763_REG_BUCK1_VOLTAGE2, + S5M8763_REG_BUCK1_VOLTAGE3, + S5M8763_REG_BUCK1_VOLTAGE4, + S5M8763_REG_BUCK2_VOLTAGE1, + S5M8763_REG_BUCK2_VOLTAGE2, + S5M8763_REG_BUCK3, + S5M8763_REG_BUCK4, + S5M8763_REG_LDO1_LDO2, + S5M8763_REG_LDO3, + S5M8763_REG_LDO4, + S5M8763_REG_LDO5, + S5M8763_REG_LDO6, + S5M8763_REG_LDO7, + S5M8763_REG_LDO7_LDO8, + S5M8763_REG_LDO9_LDO10, + S5M8763_REG_LDO11, + S5M8763_REG_LDO12, + S5M8763_REG_LDO13, + S5M8763_REG_LDO14, + S5M8763_REG_LDO15, + S5M8763_REG_LDO16, + S5M8763_REG_BKCHR, + S5M8763_REG_LBCNFG1, + S5M8763_REG_LBCNFG2, +}; + +/* S5M8763 regulator ids */ +enum s5m8763_regulators { + S5M8763_LDO1, + S5M8763_LDO2, + S5M8763_LDO3, + S5M8763_LDO4, + S5M8763_LDO5, + S5M8763_LDO6, + S5M8763_LDO7, + S5M8763_LDO8, + S5M8763_LDO9, + S5M8763_LDO10, + S5M8763_LDO11, + S5M8763_LDO12, + S5M8763_LDO13, + S5M8763_LDO14, + S5M8763_LDO15, + S5M8763_LDO16, + S5M8763_BUCK1, + S5M8763_BUCK2, + S5M8763_BUCK3, + S5M8763_BUCK4, + S5M8763_AP_EN32KHZ, + S5M8763_CP_EN32KHZ, + S5M8763_ENCHGVI, + S5M8763_ESAFEUSB1, + S5M8763_ESAFEUSB2, +}; + +#define S5M8763_ENRAMP (1 << 4) +#endif /* __LINUX_MFD_S5M8763_H */ diff --git a/include/linux/mfd/samsung/s5m8767.h b/include/linux/mfd/samsung/s5m8767.h new file mode 100644 index 000000000..243b58fec --- /dev/null +++ b/include/linux/mfd/samsung/s5m8767.h @@ -0,0 +1,211 @@ +/* s5m8767.h + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MFD_S5M8767_H +#define __LINUX_MFD_S5M8767_H + +/* S5M8767 registers */ +enum s5m8767_reg { + S5M8767_REG_ID, + S5M8767_REG_INT1, + S5M8767_REG_INT2, + S5M8767_REG_INT3, + S5M8767_REG_INT1M, + S5M8767_REG_INT2M, + S5M8767_REG_INT3M, + S5M8767_REG_STATUS1, + S5M8767_REG_STATUS2, + S5M8767_REG_STATUS3, + S5M8767_REG_CTRL1, + S5M8767_REG_CTRL2, + S5M8767_REG_LOWBAT1, + S5M8767_REG_LOWBAT2, + S5M8767_REG_BUCHG, + S5M8767_REG_DVSRAMP, + S5M8767_REG_DVSTIMER2 = 0x10, + S5M8767_REG_DVSTIMER3, + S5M8767_REG_DVSTIMER4, + S5M8767_REG_LDO1, + S5M8767_REG_LDO2, + S5M8767_REG_LDO3, + S5M8767_REG_LDO4, + S5M8767_REG_LDO5, + S5M8767_REG_LDO6, + S5M8767_REG_LDO7, + S5M8767_REG_LDO8, + S5M8767_REG_LDO9, + S5M8767_REG_LDO10, + S5M8767_REG_LDO11, + S5M8767_REG_LDO12, + S5M8767_REG_LDO13, + S5M8767_REG_LDO14 = 0x20, + S5M8767_REG_LDO15, + S5M8767_REG_LDO16, + S5M8767_REG_LDO17, + S5M8767_REG_LDO18, + S5M8767_REG_LDO19, + S5M8767_REG_LDO20, + S5M8767_REG_LDO21, + S5M8767_REG_LDO22, + S5M8767_REG_LDO23, + S5M8767_REG_LDO24, + S5M8767_REG_LDO25, + S5M8767_REG_LDO26, + S5M8767_REG_LDO27, + S5M8767_REG_LDO28, + S5M8767_REG_UVLO = 0x31, + S5M8767_REG_BUCK1CTRL1, + S5M8767_REG_BUCK1CTRL2, + S5M8767_REG_BUCK2CTRL, + S5M8767_REG_BUCK2DVS1, + S5M8767_REG_BUCK2DVS2, + S5M8767_REG_BUCK2DVS3, + S5M8767_REG_BUCK2DVS4, + S5M8767_REG_BUCK2DVS5, + S5M8767_REG_BUCK2DVS6, + S5M8767_REG_BUCK2DVS7, + S5M8767_REG_BUCK2DVS8, + S5M8767_REG_BUCK3CTRL, + S5M8767_REG_BUCK3DVS1, + S5M8767_REG_BUCK3DVS2, + S5M8767_REG_BUCK3DVS3, + S5M8767_REG_BUCK3DVS4, + S5M8767_REG_BUCK3DVS5, + S5M8767_REG_BUCK3DVS6, + S5M8767_REG_BUCK3DVS7, + S5M8767_REG_BUCK3DVS8, + S5M8767_REG_BUCK4CTRL, + S5M8767_REG_BUCK4DVS1, + S5M8767_REG_BUCK4DVS2, + S5M8767_REG_BUCK4DVS3, + S5M8767_REG_BUCK4DVS4, + S5M8767_REG_BUCK4DVS5, + S5M8767_REG_BUCK4DVS6, + S5M8767_REG_BUCK4DVS7, + S5M8767_REG_BUCK4DVS8, + S5M8767_REG_BUCK5CTRL1, + S5M8767_REG_BUCK5CTRL2, + S5M8767_REG_BUCK5CTRL3, + S5M8767_REG_BUCK5CTRL4, + S5M8767_REG_BUCK5CTRL5, + S5M8767_REG_BUCK6CTRL1, + S5M8767_REG_BUCK6CTRL2, + S5M8767_REG_BUCK7CTRL1, + S5M8767_REG_BUCK7CTRL2, + S5M8767_REG_BUCK8CTRL1, + S5M8767_REG_BUCK8CTRL2, + S5M8767_REG_BUCK9CTRL1, + S5M8767_REG_BUCK9CTRL2, + S5M8767_REG_LDO1CTRL, + S5M8767_REG_LDO2_1CTRL, + S5M8767_REG_LDO2_2CTRL, + S5M8767_REG_LDO2_3CTRL, + S5M8767_REG_LDO2_4CTRL, + S5M8767_REG_LDO3CTRL, + S5M8767_REG_LDO4CTRL, + S5M8767_REG_LDO5CTRL, + S5M8767_REG_LDO6CTRL, + S5M8767_REG_LDO7CTRL, + S5M8767_REG_LDO8CTRL, + S5M8767_REG_LDO9CTRL, + S5M8767_REG_LDO10CTRL, + S5M8767_REG_LDO11CTRL, + S5M8767_REG_LDO12CTRL, + S5M8767_REG_LDO13CTRL, + S5M8767_REG_LDO14CTRL, + S5M8767_REG_LDO15CTRL, + S5M8767_REG_LDO16CTRL, + S5M8767_REG_LDO17CTRL, + S5M8767_REG_LDO18CTRL, + S5M8767_REG_LDO19CTRL, + S5M8767_REG_LDO20CTRL, + S5M8767_REG_LDO21CTRL, + S5M8767_REG_LDO22CTRL, + S5M8767_REG_LDO23CTRL, + S5M8767_REG_LDO24CTRL, + S5M8767_REG_LDO25CTRL, + S5M8767_REG_LDO26CTRL, + S5M8767_REG_LDO27CTRL, + S5M8767_REG_LDO28CTRL, +}; + +/* S5M8767 regulator ids */ +enum s5m8767_regulators { + S5M8767_LDO1, + S5M8767_LDO2, + S5M8767_LDO3, + S5M8767_LDO4, + S5M8767_LDO5, + S5M8767_LDO6, + S5M8767_LDO7, + S5M8767_LDO8, + S5M8767_LDO9, + S5M8767_LDO10, + S5M8767_LDO11, + S5M8767_LDO12, + S5M8767_LDO13, + S5M8767_LDO14, + S5M8767_LDO15, + S5M8767_LDO16, + S5M8767_LDO17, + S5M8767_LDO18, + S5M8767_LDO19, + S5M8767_LDO20, + S5M8767_LDO21, + S5M8767_LDO22, + S5M8767_LDO23, + S5M8767_LDO24, + S5M8767_LDO25, + S5M8767_LDO26, + S5M8767_LDO27, + S5M8767_LDO28, + S5M8767_BUCK1, + S5M8767_BUCK2, + S5M8767_BUCK3, + S5M8767_BUCK4, + S5M8767_BUCK5, + S5M8767_BUCK6, + S5M8767_BUCK7, + S5M8767_BUCK8, + S5M8767_BUCK9, + S5M8767_AP_EN32KHZ, + S5M8767_CP_EN32KHZ, + + S5M8767_REG_MAX, +}; + +/* LDO_EN/BUCK_EN field in registers */ +#define S5M8767_ENCTRL_SHIFT 6 +#define S5M8767_ENCTRL_MASK (0x3 << S5M8767_ENCTRL_SHIFT) + +/* + * LDO_EN/BUCK_EN register value for controlling this Buck or LDO + * by GPIO (PWREN, BUCKEN). + */ +#define S5M8767_ENCTRL_USE_GPIO 0x1 + +/* + * Values for BUCK_RAMP field in DVS_RAMP register, matching raw values + * in mV/us. + */ +enum s5m8767_dvs_buck_ramp_values { + S5M8767_DVS_BUCK_RAMP_5 = 0x4, + S5M8767_DVS_BUCK_RAMP_10 = 0x9, + S5M8767_DVS_BUCK_RAMP_12_5 = 0xb, + S5M8767_DVS_BUCK_RAMP_25 = 0xd, + S5M8767_DVS_BUCK_RAMP_50 = 0xe, + S5M8767_DVS_BUCK_RAMP_100 = 0xf, +}; +#define S5M8767_DVS_BUCK_RAMP_SHIFT 4 +#define S5M8767_DVS_BUCK_RAMP_MASK (0xf << S5M8767_DVS_BUCK_RAMP_SHIFT) + +#endif /* __LINUX_MFD_S5M8767_H */ diff --git a/include/linux/mfd/si476x-core.h b/include/linux/mfd/si476x-core.h new file mode 100644 index 000000000..674b45d5a --- /dev/null +++ b/include/linux/mfd/si476x-core.h @@ -0,0 +1,533 @@ +/* + * include/media/si476x-core.h -- Common definitions for si476x core + * device + * + * Copyright (C) 2012 Innovative Converged Devices(ICD) + * Copyright (C) 2013 Andrey Smirnov + * + * Author: Andrey Smirnov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + */ + +#ifndef SI476X_CORE_H +#define SI476X_CORE_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/* Command Timeouts */ +#define SI476X_DEFAULT_TIMEOUT 100000 +#define SI476X_TIMEOUT_TUNE 700000 +#define SI476X_TIMEOUT_POWER_UP 330000 +#define SI476X_STATUS_POLL_US 0 + +/* -------------------- si476x-i2c.c ----------------------- */ + +enum si476x_freq_supported_chips { + SI476X_CHIP_SI4761 = 1, + SI476X_CHIP_SI4764, + SI476X_CHIP_SI4768, +}; + +enum si476x_part_revisions { + SI476X_REVISION_A10 = 0, + SI476X_REVISION_A20 = 1, + SI476X_REVISION_A30 = 2, +}; + +enum si476x_mfd_cells { + SI476X_RADIO_CELL = 0, + SI476X_CODEC_CELL, + SI476X_MFD_CELLS, +}; + +/** + * enum si476x_power_state - possible power state of the si476x + * device. + * + * @SI476X_POWER_DOWN: In this state all regulators are turned off + * and the reset line is pulled low. The device is completely + * inactive. + * @SI476X_POWER_UP_FULL: In this state all the power regualtors are + * turned on, reset line pulled high, IRQ line is enabled(polling is + * active for polling use scenario) and device is turned on with + * POWER_UP command. The device is ready to be used. + * @SI476X_POWER_INCONSISTENT: This state indicates that previous + * power down was inconsistent, meaning some of the regulators were + * not turned down and thus use of the device, without power-cycling + * is impossible. + */ +enum si476x_power_state { + SI476X_POWER_DOWN = 0, + SI476X_POWER_UP_FULL = 1, + SI476X_POWER_INCONSISTENT = 2, +}; + +/** + * struct si476x_core - internal data structure representing the + * underlying "core" device which all the MFD cell-devices use. + * + * @client: Actual I2C client used to transfer commands to the chip. + * @chip_id: Last digit of the chip model(E.g. "1" for SI4761) + * @cells: MFD cell devices created by this driver. + * @cmd_lock: Mutex used to serialize all the requests to the core + * device. This filed should not be used directly. Instead + * si476x_core_lock()/si476x_core_unlock() should be used to get + * exclusive access to the "core" device. + * @users: Active users counter(Used by the radio cell) + * @rds_read_queue: Wait queue used to wait for RDS data. + * @rds_fifo: FIFO in which all the RDS data received from the chip is + * placed. + * @rds_fifo_drainer: Worker that drains on-chip RDS FIFO. + * @rds_drainer_is_working: Flag used for launching only one instance + * of the @rds_fifo_drainer. + * @rds_drainer_status_lock: Lock used to guard access to the + * @rds_drainer_is_working variable. + * @command: Wait queue for wainting on the command comapletion. + * @cts: Clear To Send flag set upon receiving first status with CTS + * set. + * @tuning: Wait queue used for wainting for tune/seek comand + * completion. + * @stc: Similar to @cts, but for the STC bit of the status value. + * @power_up_parameters: Parameters used as argument for POWER_UP + * command when the device is started. + * @state: Current power state of the device. + * @supplues: Structure containing handles to all power supplies used + * by the device (NULL ones are ignored). + * @gpio_reset: GPIO pin connectet to the RSTB pin of the chip. + * @pinmux: Chip's configurable pins configuration. + * @diversity_mode: Chips role when functioning in diversity mode. + * @status_monitor: Polling worker used in polling use case scenarion + * (when IRQ is not avalible). + * @revision: Chip's running firmware revision number(Used for correct + * command set support). + */ + +struct si476x_core { + struct i2c_client *client; + struct regmap *regmap; + int chip_id; + struct mfd_cell cells[SI476X_MFD_CELLS]; + + struct mutex cmd_lock; /* for serializing fm radio operations */ + atomic_t users; + + wait_queue_head_t rds_read_queue; + struct kfifo rds_fifo; + struct work_struct rds_fifo_drainer; + bool rds_drainer_is_working; + struct mutex rds_drainer_status_lock; + + wait_queue_head_t command; + atomic_t cts; + + wait_queue_head_t tuning; + atomic_t stc; + + struct si476x_power_up_args power_up_parameters; + + enum si476x_power_state power_state; + + struct regulator_bulk_data supplies[4]; + + int gpio_reset; + + struct si476x_pinmux pinmux; + enum si476x_phase_diversity_mode diversity_mode; + + atomic_t is_alive; + + struct delayed_work status_monitor; +#define SI476X_WORK_TO_CORE(w) container_of(to_delayed_work(w), \ + struct si476x_core, \ + status_monitor) + + int revision; + + int rds_fifo_depth; +}; + +static inline struct si476x_core *i2c_mfd_cell_to_core(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev->parent); + return i2c_get_clientdata(client); +} + + +/** + * si476x_core_lock() - lock the core device to get an exclusive access + * to it. + */ +static inline void si476x_core_lock(struct si476x_core *core) +{ + mutex_lock(&core->cmd_lock); +} + +/** + * si476x_core_unlock() - unlock the core device to relinquish an + * exclusive access to it. + */ +static inline void si476x_core_unlock(struct si476x_core *core) +{ + mutex_unlock(&core->cmd_lock); +} + +/* *_TUNE_FREQ family of commands accept frequency in multiples of + 10kHz */ +static inline u16 hz_to_si476x(struct si476x_core *core, int freq) +{ + u16 result; + + switch (core->power_up_parameters.func) { + default: + case SI476X_FUNC_FM_RECEIVER: + result = freq / 10000; + break; + case SI476X_FUNC_AM_RECEIVER: + result = freq / 1000; + break; + } + + return result; +} + +static inline int si476x_to_hz(struct si476x_core *core, u16 freq) +{ + int result; + + switch (core->power_up_parameters.func) { + default: + case SI476X_FUNC_FM_RECEIVER: + result = freq * 10000; + break; + case SI476X_FUNC_AM_RECEIVER: + result = freq * 1000; + break; + } + + return result; +} + +/* Since the V4L2_TUNER_CAP_LOW flag is supplied, V4L2 subsystem + * mesures frequency in 62.5 Hz units */ + +static inline int hz_to_v4l2(int freq) +{ + return (freq * 10) / 625; +} + +static inline int v4l2_to_hz(int freq) +{ + return (freq * 625) / 10; +} + +static inline u16 v4l2_to_si476x(struct si476x_core *core, int freq) +{ + return hz_to_si476x(core, v4l2_to_hz(freq)); +} + +static inline int si476x_to_v4l2(struct si476x_core *core, u16 freq) +{ + return hz_to_v4l2(si476x_to_hz(core, freq)); +} + + + +/** + * struct si476x_func_info - structure containing result of the + * FUNC_INFO command. + * + * @firmware.major: Firmware major number. + * @firmware.minor[...]: Firmware minor numbers. + * @patch_id: + * @func: Mode tuner is working in. + */ +struct si476x_func_info { + struct { + u8 major, minor[2]; + } firmware; + u16 patch_id; + enum si476x_func func; +}; + +/** + * struct si476x_power_down_args - structure used to pass parameters + * to POWER_DOWN command + * + * @xosc: true - Power down, but leav oscillator running. + * false - Full power down. + */ +struct si476x_power_down_args { + bool xosc; +}; + +/** + * enum si476x_tunemode - enum representing possible tune modes for + * the chip. + * @SI476X_TM_VALIDATED_NORMAL_TUNE: Unconditionally stay on the new + * channel after tune, tune status is valid. + * @SI476X_TM_INVALIDATED_FAST_TUNE: Unconditionally stay in the new + * channel after tune, tune status invalid. + * @SI476X_TM_VALIDATED_AF_TUNE: Jump back to previous channel if + * metric thresholds are not met. + * @SI476X_TM_VALIDATED_AF_CHECK: Unconditionally jump back to the + * previous channel. + */ +enum si476x_tunemode { + SI476X_TM_VALIDATED_NORMAL_TUNE = 0, + SI476X_TM_INVALIDATED_FAST_TUNE = 1, + SI476X_TM_VALIDATED_AF_TUNE = 2, + SI476X_TM_VALIDATED_AF_CHECK = 3, +}; + +/** + * enum si476x_smoothmetrics - enum containing the possible setting fo + * audio transitioning of the chip + * @SI476X_SM_INITIALIZE_AUDIO: Initialize audio state to match this + * new channel + * @SI476X_SM_TRANSITION_AUDIO: Transition audio state from previous + * channel values to the new values + */ +enum si476x_smoothmetrics { + SI476X_SM_INITIALIZE_AUDIO = 0, + SI476X_SM_TRANSITION_AUDIO = 1, +}; + +/** + * struct si476x_rds_status_report - the structure representing the + * response to 'FM_RD_STATUS' command + * @rdstpptyint: Traffic program flag(TP) and/or program type(PTY) + * code has changed. + * @rdspiint: Program identification(PI) code has changed. + * @rdssyncint: RDS synchronization has changed. + * @rdsfifoint: RDS was received and the RDS FIFO has at least + * 'FM_RDS_INTERRUPT_FIFO_COUNT' elements in it. + * @tpptyvalid: TP flag and PTY code are valid falg. + * @pivalid: PI code is valid flag. + * @rdssync: RDS is currently synchronized. + * @rdsfifolost: On or more RDS groups have been lost/discarded flag. + * @tp: Current channel's TP flag. + * @pty: Current channel's PTY code. + * @pi: Current channel's PI code. + * @rdsfifoused: Number of blocks remaining in the RDS FIFO (0 if + * empty). + */ +struct si476x_rds_status_report { + bool rdstpptyint, rdspiint, rdssyncint, rdsfifoint; + bool tpptyvalid, pivalid, rdssync, rdsfifolost; + bool tp; + + u8 pty; + u16 pi; + + u8 rdsfifoused; + u8 ble[4]; + + struct v4l2_rds_data rds[4]; +}; + +struct si476x_rsq_status_args { + bool primary; + bool rsqack; + bool attune; + bool cancel; + bool stcack; +}; + +enum si476x_injside { + SI476X_INJSIDE_AUTO = 0, + SI476X_INJSIDE_LOW = 1, + SI476X_INJSIDE_HIGH = 2, +}; + +struct si476x_tune_freq_args { + bool zifsr; + bool hd; + enum si476x_injside injside; + int freq; + enum si476x_tunemode tunemode; + enum si476x_smoothmetrics smoothmetrics; + int antcap; +}; + +int si476x_core_stop(struct si476x_core *, bool); +int si476x_core_start(struct si476x_core *, bool); +int si476x_core_set_power_state(struct si476x_core *, enum si476x_power_state); +bool si476x_core_has_am(struct si476x_core *); +bool si476x_core_has_diversity(struct si476x_core *); +bool si476x_core_is_a_secondary_tuner(struct si476x_core *); +bool si476x_core_is_a_primary_tuner(struct si476x_core *); +bool si476x_core_is_in_am_receiver_mode(struct si476x_core *core); +bool si476x_core_is_powered_up(struct si476x_core *core); + +enum si476x_i2c_type { + SI476X_I2C_SEND, + SI476X_I2C_RECV +}; + +int si476x_core_i2c_xfer(struct si476x_core *, + enum si476x_i2c_type, + char *, int); + + +/* -------------------- si476x-cmd.c ----------------------- */ + +int si476x_core_cmd_func_info(struct si476x_core *, struct si476x_func_info *); +int si476x_core_cmd_set_property(struct si476x_core *, u16, u16); +int si476x_core_cmd_get_property(struct si476x_core *, u16); +int si476x_core_cmd_dig_audio_pin_cfg(struct si476x_core *, + enum si476x_dclk_config, + enum si476x_dfs_config, + enum si476x_dout_config, + enum si476x_xout_config); +int si476x_core_cmd_zif_pin_cfg(struct si476x_core *, + enum si476x_iqclk_config, + enum si476x_iqfs_config, + enum si476x_iout_config, + enum si476x_qout_config); +int si476x_core_cmd_ic_link_gpo_ctl_pin_cfg(struct si476x_core *, + enum si476x_icin_config, + enum si476x_icip_config, + enum si476x_icon_config, + enum si476x_icop_config); +int si476x_core_cmd_ana_audio_pin_cfg(struct si476x_core *, + enum si476x_lrout_config); +int si476x_core_cmd_intb_pin_cfg(struct si476x_core *, enum si476x_intb_config, + enum si476x_a1_config); +int si476x_core_cmd_fm_seek_start(struct si476x_core *, bool, bool); +int si476x_core_cmd_am_seek_start(struct si476x_core *, bool, bool); +int si476x_core_cmd_fm_rds_status(struct si476x_core *, bool, bool, bool, + struct si476x_rds_status_report *); +int si476x_core_cmd_fm_rds_blockcount(struct si476x_core *, bool, + struct si476x_rds_blockcount_report *); +int si476x_core_cmd_fm_tune_freq(struct si476x_core *, + struct si476x_tune_freq_args *); +int si476x_core_cmd_am_tune_freq(struct si476x_core *, + struct si476x_tune_freq_args *); +int si476x_core_cmd_am_rsq_status(struct si476x_core *, + struct si476x_rsq_status_args *, + struct si476x_rsq_status_report *); +int si476x_core_cmd_fm_rsq_status(struct si476x_core *, + struct si476x_rsq_status_args *, + struct si476x_rsq_status_report *); +int si476x_core_cmd_power_up(struct si476x_core *, + struct si476x_power_up_args *); +int si476x_core_cmd_power_down(struct si476x_core *, + struct si476x_power_down_args *); +int si476x_core_cmd_fm_phase_div_status(struct si476x_core *); +int si476x_core_cmd_fm_phase_diversity(struct si476x_core *, + enum si476x_phase_diversity_mode); + +int si476x_core_cmd_fm_acf_status(struct si476x_core *, + struct si476x_acf_status_report *); +int si476x_core_cmd_am_acf_status(struct si476x_core *, + struct si476x_acf_status_report *); +int si476x_core_cmd_agc_status(struct si476x_core *, + struct si476x_agc_status_report *); + +enum si476x_power_grid_type { + SI476X_POWER_GRID_50HZ = 0, + SI476X_POWER_GRID_60HZ, +}; + +/* Properties */ + +enum si476x_interrupt_flags { + SI476X_STCIEN = (1 << 0), + SI476X_ACFIEN = (1 << 1), + SI476X_RDSIEN = (1 << 2), + SI476X_RSQIEN = (1 << 3), + + SI476X_ERRIEN = (1 << 6), + SI476X_CTSIEN = (1 << 7), + + SI476X_STCREP = (1 << 8), + SI476X_ACFREP = (1 << 9), + SI476X_RDSREP = (1 << 10), + SI476X_RSQREP = (1 << 11), +}; + +enum si476x_rdsint_sources { + SI476X_RDSTPPTY = (1 << 4), + SI476X_RDSPI = (1 << 3), + SI476X_RDSSYNC = (1 << 1), + SI476X_RDSRECV = (1 << 0), +}; + +enum si476x_status_response_bits { + SI476X_CTS = (1 << 7), + SI476X_ERR = (1 << 6), + /* Status response for WB receiver */ + SI476X_WB_ASQ_INT = (1 << 4), + SI476X_RSQ_INT = (1 << 3), + /* Status response for FM receiver */ + SI476X_FM_RDS_INT = (1 << 2), + SI476X_ACF_INT = (1 << 1), + SI476X_STC_INT = (1 << 0), +}; + +/* -------------------- si476x-prop.c ----------------------- */ + +enum si476x_common_receiver_properties { + SI476X_PROP_INT_CTL_ENABLE = 0x0000, + SI476X_PROP_DIGITAL_IO_INPUT_SAMPLE_RATE = 0x0200, + SI476X_PROP_DIGITAL_IO_INPUT_FORMAT = 0x0201, + SI476X_PROP_DIGITAL_IO_OUTPUT_SAMPLE_RATE = 0x0202, + SI476X_PROP_DIGITAL_IO_OUTPUT_FORMAT = 0x0203, + + SI476X_PROP_SEEK_BAND_BOTTOM = 0x1100, + SI476X_PROP_SEEK_BAND_TOP = 0x1101, + SI476X_PROP_SEEK_FREQUENCY_SPACING = 0x1102, + + SI476X_PROP_VALID_MAX_TUNE_ERROR = 0x2000, + SI476X_PROP_VALID_SNR_THRESHOLD = 0x2003, + SI476X_PROP_VALID_RSSI_THRESHOLD = 0x2004, +}; + +enum si476x_am_receiver_properties { + SI476X_PROP_AUDIO_PWR_LINE_FILTER = 0x0303, +}; + +enum si476x_fm_receiver_properties { + SI476X_PROP_AUDIO_DEEMPHASIS = 0x0302, + + SI476X_PROP_FM_RDS_INTERRUPT_SOURCE = 0x4000, + SI476X_PROP_FM_RDS_INTERRUPT_FIFO_COUNT = 0x4001, + SI476X_PROP_FM_RDS_CONFIG = 0x4002, +}; + +enum si476x_prop_audio_pwr_line_filter_bits { + SI476X_PROP_PWR_HARMONICS_MASK = 0x001f, + SI476X_PROP_PWR_GRID_MASK = 0x0100, + SI476X_PROP_PWR_ENABLE_MASK = 0x0200, + SI476X_PROP_PWR_GRID_50HZ = 0x0000, + SI476X_PROP_PWR_GRID_60HZ = 0x0100, +}; + +enum si476x_prop_fm_rds_config_bits { + SI476X_PROP_RDSEN_MASK = 0x1, + SI476X_PROP_RDSEN = 0x1, +}; + + +struct regmap *devm_regmap_init_si476x(struct si476x_core *); + +#endif /* SI476X_CORE_H */ diff --git a/include/linux/mfd/si476x-platform.h b/include/linux/mfd/si476x-platform.h new file mode 100644 index 000000000..88bb93b7a --- /dev/null +++ b/include/linux/mfd/si476x-platform.h @@ -0,0 +1,267 @@ +/* + * include/media/si476x-platform.h -- Platform data specific definitions + * + * Copyright (C) 2013 Andrey Smirnov + * + * Author: Andrey Smirnov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + */ + +#ifndef __SI476X_PLATFORM_H__ +#define __SI476X_PLATFORM_H__ + +/* It is possible to select one of the four adresses using pins A0 + * and A1 on SI476x */ +#define SI476X_I2C_ADDR_1 0x60 +#define SI476X_I2C_ADDR_2 0x61 +#define SI476X_I2C_ADDR_3 0x62 +#define SI476X_I2C_ADDR_4 0x63 + +enum si476x_iqclk_config { + SI476X_IQCLK_NOOP = 0, + SI476X_IQCLK_TRISTATE = 1, + SI476X_IQCLK_IQ = 21, +}; +enum si476x_iqfs_config { + SI476X_IQFS_NOOP = 0, + SI476X_IQFS_TRISTATE = 1, + SI476X_IQFS_IQ = 21, +}; +enum si476x_iout_config { + SI476X_IOUT_NOOP = 0, + SI476X_IOUT_TRISTATE = 1, + SI476X_IOUT_OUTPUT = 22, +}; +enum si476x_qout_config { + SI476X_QOUT_NOOP = 0, + SI476X_QOUT_TRISTATE = 1, + SI476X_QOUT_OUTPUT = 22, +}; + +enum si476x_dclk_config { + SI476X_DCLK_NOOP = 0, + SI476X_DCLK_TRISTATE = 1, + SI476X_DCLK_DAUDIO = 10, +}; + +enum si476x_dfs_config { + SI476X_DFS_NOOP = 0, + SI476X_DFS_TRISTATE = 1, + SI476X_DFS_DAUDIO = 10, +}; + +enum si476x_dout_config { + SI476X_DOUT_NOOP = 0, + SI476X_DOUT_TRISTATE = 1, + SI476X_DOUT_I2S_OUTPUT = 12, + SI476X_DOUT_I2S_INPUT = 13, +}; + +enum si476x_xout_config { + SI476X_XOUT_NOOP = 0, + SI476X_XOUT_TRISTATE = 1, + SI476X_XOUT_I2S_INPUT = 13, + SI476X_XOUT_MODE_SELECT = 23, +}; + +enum si476x_icin_config { + SI476X_ICIN_NOOP = 0, + SI476X_ICIN_TRISTATE = 1, + SI476X_ICIN_GPO1_HIGH = 2, + SI476X_ICIN_GPO1_LOW = 3, + SI476X_ICIN_IC_LINK = 30, +}; + +enum si476x_icip_config { + SI476X_ICIP_NOOP = 0, + SI476X_ICIP_TRISTATE = 1, + SI476X_ICIP_GPO2_HIGH = 2, + SI476X_ICIP_GPO2_LOW = 3, + SI476X_ICIP_IC_LINK = 30, +}; + +enum si476x_icon_config { + SI476X_ICON_NOOP = 0, + SI476X_ICON_TRISTATE = 1, + SI476X_ICON_I2S = 10, + SI476X_ICON_IC_LINK = 30, +}; + +enum si476x_icop_config { + SI476X_ICOP_NOOP = 0, + SI476X_ICOP_TRISTATE = 1, + SI476X_ICOP_I2S = 10, + SI476X_ICOP_IC_LINK = 30, +}; + + +enum si476x_lrout_config { + SI476X_LROUT_NOOP = 0, + SI476X_LROUT_TRISTATE = 1, + SI476X_LROUT_AUDIO = 2, + SI476X_LROUT_MPX = 3, +}; + + +enum si476x_intb_config { + SI476X_INTB_NOOP = 0, + SI476X_INTB_TRISTATE = 1, + SI476X_INTB_DAUDIO = 10, + SI476X_INTB_IRQ = 40, +}; + +enum si476x_a1_config { + SI476X_A1_NOOP = 0, + SI476X_A1_TRISTATE = 1, + SI476X_A1_IRQ = 40, +}; + + +struct si476x_pinmux { + enum si476x_dclk_config dclk; + enum si476x_dfs_config dfs; + enum si476x_dout_config dout; + enum si476x_xout_config xout; + + enum si476x_iqclk_config iqclk; + enum si476x_iqfs_config iqfs; + enum si476x_iout_config iout; + enum si476x_qout_config qout; + + enum si476x_icin_config icin; + enum si476x_icip_config icip; + enum si476x_icon_config icon; + enum si476x_icop_config icop; + + enum si476x_lrout_config lrout; + + enum si476x_intb_config intb; + enum si476x_a1_config a1; +}; + +enum si476x_ibias6x { + SI476X_IBIAS6X_OTHER = 0, + SI476X_IBIAS6X_RCVR1_NON_4MHZ_CLK = 1, +}; + +enum si476x_xstart { + SI476X_XSTART_MULTIPLE_TUNER = 0x11, + SI476X_XSTART_NORMAL = 0x77, +}; + +enum si476x_freq { + SI476X_FREQ_4_MHZ = 0, + SI476X_FREQ_37P209375_MHZ = 1, + SI476X_FREQ_36P4_MHZ = 2, + SI476X_FREQ_37P8_MHZ = 3, +}; + +enum si476x_xmode { + SI476X_XMODE_CRYSTAL_RCVR1 = 1, + SI476X_XMODE_EXT_CLOCK = 2, + SI476X_XMODE_CRYSTAL_RCVR2_3 = 3, +}; + +enum si476x_xbiashc { + SI476X_XBIASHC_SINGLE_RECEIVER = 0, + SI476X_XBIASHC_MULTIPLE_RECEIVER = 1, +}; + +enum si476x_xbias { + SI476X_XBIAS_RCVR2_3 = 0, + SI476X_XBIAS_4MHZ_RCVR1 = 3, + SI476X_XBIAS_RCVR1 = 7, +}; + +enum si476x_func { + SI476X_FUNC_BOOTLOADER = 0, + SI476X_FUNC_FM_RECEIVER = 1, + SI476X_FUNC_AM_RECEIVER = 2, + SI476X_FUNC_WB_RECEIVER = 3, +}; + + +/** + * @xcload: Selects the amount of additional on-chip capacitance to + * be connected between XTAL1 and gnd and between XTAL2 and + * GND. One half of the capacitance value shown here is the + * additional load capacitance presented to the xtal. The + * minimum step size is 0.277 pF. Recommended value is 0x28 + * but it will be layout dependent. Range is 0–0x3F i.e. + * (0–16.33 pF) + * @ctsien: enable CTSINT(interrupt request when CTS condition + * arises) when set + * @intsel: when set A1 pin becomes the interrupt pin; otherwise, + * INTB is the interrupt pin + * @func: selects the boot function of the device. I.e. + * SI476X_BOOTLOADER - Boot loader + * SI476X_FM_RECEIVER - FM receiver + * SI476X_AM_RECEIVER - AM receiver + * SI476X_WB_RECEIVER - Weatherband receiver + * @freq: oscillator's crystal frequency: + * SI476X_XTAL_37P209375_MHZ - 37.209375 Mhz + * SI476X_XTAL_36P4_MHZ - 36.4 Mhz + * SI476X_XTAL_37P8_MHZ - 37.8 Mhz + */ +struct si476x_power_up_args { + enum si476x_ibias6x ibias6x; + enum si476x_xstart xstart; + u8 xcload; + bool fastboot; + enum si476x_xbiashc xbiashc; + enum si476x_xbias xbias; + enum si476x_func func; + enum si476x_freq freq; + enum si476x_xmode xmode; +}; + + +/** + * enum si476x_phase_diversity_mode - possbile phase diversity modes + * for SI4764/5/6/7 chips. + * + * @SI476X_PHDIV_DISABLED: Phase diversity feature is + * disabled. + * @SI476X_PHDIV_PRIMARY_COMBINING: Tuner works as a primary tuner + * in combination with a + * secondary one. + * @SI476X_PHDIV_PRIMARY_ANTENNA: Tuner works as a primary tuner + * using only its own antenna. + * @SI476X_PHDIV_SECONDARY_ANTENNA: Tuner works as a primary tuner + * usning seconary tuner's antenna. + * @SI476X_PHDIV_SECONDARY_COMBINING: Tuner works as a secondary + * tuner in combination with the + * primary one. + */ +enum si476x_phase_diversity_mode { + SI476X_PHDIV_DISABLED = 0, + SI476X_PHDIV_PRIMARY_COMBINING = 1, + SI476X_PHDIV_PRIMARY_ANTENNA = 2, + SI476X_PHDIV_SECONDARY_ANTENNA = 3, + SI476X_PHDIV_SECONDARY_COMBINING = 5, +}; + + +/* + * Platform dependent definition + */ +struct si476x_platform_data { + int gpio_reset; /* < 0 if not used */ + + struct si476x_power_up_args power_up_parameters; + enum si476x_phase_diversity_mode diversity_mode; + + struct si476x_pinmux pinmux; +}; + + +#endif /* __SI476X_PLATFORM_H__ */ diff --git a/include/linux/mfd/si476x-reports.h b/include/linux/mfd/si476x-reports.h new file mode 100644 index 000000000..e0b9455a7 --- /dev/null +++ b/include/linux/mfd/si476x-reports.h @@ -0,0 +1,163 @@ +/* + * include/media/si476x-platform.h -- Definitions of the data formats + * returned by debugfs hooks + * + * Copyright (C) 2013 Andrey Smirnov + * + * Author: Andrey Smirnov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + */ + +#ifndef __SI476X_REPORTS_H__ +#define __SI476X_REPORTS_H__ + +/** + * struct si476x_rsq_status - structure containing received signal + * quality + * @multhint: Multipath Detect High. + * true - Indicatedes that the value is below + * FM_RSQ_MULTIPATH_HIGH_THRESHOLD + * false - Indicatedes that the value is above + * FM_RSQ_MULTIPATH_HIGH_THRESHOLD + * @multlint: Multipath Detect Low. + * true - Indicatedes that the value is below + * FM_RSQ_MULTIPATH_LOW_THRESHOLD + * false - Indicatedes that the value is above + * FM_RSQ_MULTIPATH_LOW_THRESHOLD + * @snrhint: SNR Detect High. + * true - Indicatedes that the value is below + * FM_RSQ_SNR_HIGH_THRESHOLD + * false - Indicatedes that the value is above + * FM_RSQ_SNR_HIGH_THRESHOLD + * @snrlint: SNR Detect Low. + * true - Indicatedes that the value is below + * FM_RSQ_SNR_LOW_THRESHOLD + * false - Indicatedes that the value is above + * FM_RSQ_SNR_LOW_THRESHOLD + * @rssihint: RSSI Detect High. + * true - Indicatedes that the value is below + * FM_RSQ_RSSI_HIGH_THRESHOLD + * false - Indicatedes that the value is above + * FM_RSQ_RSSI_HIGH_THRESHOLD + * @rssilint: RSSI Detect Low. + * true - Indicatedes that the value is below + * FM_RSQ_RSSI_LOW_THRESHOLD + * false - Indicatedes that the value is above + * FM_RSQ_RSSI_LOW_THRESHOLD + * @bltf: Band Limit. + * Set if seek command hits the band limit or wrapped to + * the original frequency. + * @snr_ready: SNR measurement in progress. + * @rssiready: RSSI measurement in progress. + * @afcrl: Set if FREQOFF >= MAX_TUNE_ERROR + * @valid: Set if the channel is valid + * rssi < FM_VALID_RSSI_THRESHOLD + * snr < FM_VALID_SNR_THRESHOLD + * tune_error < FM_VALID_MAX_TUNE_ERROR + * @readfreq: Current tuned frequency. + * @freqoff: Signed frequency offset. + * @rssi: Received Signal Strength Indicator(dBuV). + * @snr: RF SNR Indicator(dB). + * @lassi: + * @hassi: Low/High side Adjacent(100 kHz) Channel Strength Indicator + * @mult: Multipath indicator + * @dev: Who knows? But values may vary. + * @readantcap: Antenna tuning capacity value. + * @assi: Adjacent Channel(+/- 200kHz) Strength Indicator + * @usn: Ultrasonic Noise Inticator in -DBFS + */ +struct si476x_rsq_status_report { + __u8 multhint, multlint; + __u8 snrhint, snrlint; + __u8 rssihint, rssilint; + __u8 bltf; + __u8 snr_ready; + __u8 rssiready; + __u8 injside; + __u8 afcrl; + __u8 valid; + + __u16 readfreq; + __s8 freqoff; + __s8 rssi; + __s8 snr; + __s8 issi; + __s8 lassi, hassi; + __s8 mult; + __u8 dev; + __u16 readantcap; + __s8 assi; + __s8 usn; + + __u8 pilotdev; + __u8 rdsdev; + __u8 assidev; + __u8 strongdev; + __u16 rdspi; +} __packed; + +/** + * si476x_acf_status_report - ACF report results + * + * @blend_int: If set, indicates that stereo separation has crossed + * below the blend threshold as set by FM_ACF_BLEND_THRESHOLD + * @hblend_int: If set, indicates that HiBlend cutoff frequency is + * lower than threshold as set by FM_ACF_HBLEND_THRESHOLD + * @hicut_int: If set, indicates that HiCut cutoff frequency is lower + * than the threshold set by ACF_ + + */ +struct si476x_acf_status_report { + __u8 blend_int; + __u8 hblend_int; + __u8 hicut_int; + __u8 chbw_int; + __u8 softmute_int; + __u8 smute; + __u8 smattn; + __u8 chbw; + __u8 hicut; + __u8 hiblend; + __u8 pilot; + __u8 stblend; +} __packed; + +enum si476x_fmagc { + SI476X_FMAGC_10K_OHM = 0, + SI476X_FMAGC_800_OHM = 1, + SI476X_FMAGC_400_OHM = 2, + SI476X_FMAGC_200_OHM = 4, + SI476X_FMAGC_100_OHM = 8, + SI476X_FMAGC_50_OHM = 16, + SI476X_FMAGC_25_OHM = 32, + SI476X_FMAGC_12P5_OHM = 64, + SI476X_FMAGC_6P25_OHM = 128, +}; + +struct si476x_agc_status_report { + __u8 mxhi; + __u8 mxlo; + __u8 lnahi; + __u8 lnalo; + __u8 fmagc1; + __u8 fmagc2; + __u8 pgagain; + __u8 fmwblang; +} __packed; + +struct si476x_rds_blockcount_report { + __u16 expected; + __u16 received; + __u16 uncorrectable; +} __packed; + +#endif /* __SI476X_REPORTS_H__ */ diff --git a/include/linux/mfd/sky81452.h b/include/linux/mfd/sky81452.h new file mode 100644 index 000000000..b0925fa3e --- /dev/null +++ b/include/linux/mfd/sky81452.h @@ -0,0 +1,31 @@ +/* + * sky81452.h SKY81452 MFD driver + * + * Copyright 2014 Skyworks Solutions Inc. + * Author : Gyungoh Yoo + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#ifndef _SKY81452_H +#define _SKY81452_H + +#include +#include + +struct sky81452_platform_data { + struct sky81452_bl_platform_data *bl_pdata; + struct regulator_init_data *regulator_init_data; +}; + +#endif diff --git a/include/linux/mfd/smsc.h b/include/linux/mfd/smsc.h new file mode 100644 index 000000000..9747b29f3 --- /dev/null +++ b/include/linux/mfd/smsc.h @@ -0,0 +1,109 @@ +/* + * SMSC ECE1099 + * + * Copyright 2012 Texas Instruments Inc. + * + * Author: Sourav Poddar + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MFD_SMSC_H +#define __LINUX_MFD_SMSC_H + +#include + +#define SMSC_ID_ECE1099 1 +#define SMSC_NUM_CLIENTS 2 + +#define SMSC_BASE_ADDR 0x38 +#define OMAP_GPIO_SMSC_IRQ 151 + +#define SMSC_MAXGPIO 32 +#define SMSC_BANK(offs) ((offs) >> 3) +#define SMSC_BIT(offs) (1u << ((offs) & 0x7)) + +struct smsc { + struct device *dev; + struct i2c_client *i2c_clients[SMSC_NUM_CLIENTS]; + struct regmap *regmap; + int clk; + /* Stored chip id */ + int id; +}; + +struct smsc_gpio; +struct smsc_keypad; + +static inline int smsc_read(struct device *child, unsigned int reg, + unsigned int *dest) +{ + struct smsc *smsc = dev_get_drvdata(child->parent); + + return regmap_read(smsc->regmap, reg, dest); +} + +static inline int smsc_write(struct device *child, unsigned int reg, + unsigned int value) +{ + struct smsc *smsc = dev_get_drvdata(child->parent); + + return regmap_write(smsc->regmap, reg, value); +} + +/* Registers for SMSC */ +#define SMSC_RESET 0xF5 +#define SMSC_GRP_INT 0xF9 +#define SMSC_CLK_CTRL 0xFA +#define SMSC_WKUP_CTRL 0xFB +#define SMSC_DEV_ID 0xFC +#define SMSC_DEV_REV 0xFD +#define SMSC_VEN_ID_L 0xFE +#define SMSC_VEN_ID_H 0xFF + +/* CLK VALUE */ +#define SMSC_CLK_VALUE 0x13 + +/* Registers for function GPIO INPUT */ +#define SMSC_GPIO_DATA_IN_START 0x00 + +/* Registers for function GPIO OUPUT */ +#define SMSC_GPIO_DATA_OUT_START 0x05 + +/* Definitions for SMSC GPIO CONFIGURATION REGISTER*/ +#define SMSC_GPIO_INPUT_LOW 0x01 +#define SMSC_GPIO_INPUT_RISING 0x09 +#define SMSC_GPIO_INPUT_FALLING 0x11 +#define SMSC_GPIO_INPUT_BOTH_EDGE 0x19 +#define SMSC_GPIO_OUTPUT_PP 0x21 +#define SMSC_GPIO_OUTPUT_OP 0x31 + +#define GRP_INT_STAT 0xf9 +#define SMSC_GPI_INT 0x0f +#define SMSC_CFG_START 0x0A + +/* Registers for SMSC GPIO INTERRUPT STATUS REGISTER*/ +#define SMSC_GPIO_INT_STAT_START 0x32 + +/* Registers for SMSC GPIO INTERRUPT MASK REGISTER*/ +#define SMSC_GPIO_INT_MASK_START 0x37 + +/* Registers for SMSC function KEYPAD*/ +#define SMSC_KP_OUT 0x40 +#define SMSC_KP_IN 0x41 +#define SMSC_KP_INT_STAT 0x42 +#define SMSC_KP_INT_MASK 0x43 + +/* Definitions for keypad */ +#define SMSC_KP_KSO 0x70 +#define SMSC_KP_KSI 0x51 +#define SMSC_KSO_ALL_LOW 0x20 +#define SMSC_KP_SET_LOW_PWR 0x0B +#define SMSC_KP_SET_HIGH 0xFF +#define SMSC_KSO_EVAL 0x00 + +#endif /* __LINUX_MFD_SMSC_H */ diff --git a/include/linux/mfd/sta2x11-mfd.h b/include/linux/mfd/sta2x11-mfd.h new file mode 100644 index 000000000..9a855ac11 --- /dev/null +++ b/include/linux/mfd/sta2x11-mfd.h @@ -0,0 +1,518 @@ +/* + * Copyright (c) 2009-2011 Wind River Systems, Inc. + * Copyright (c) 2011 ST Microelectronics (Alessandro Rubini) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * The STMicroelectronics ConneXt (STA2X11) chip has several unrelated + * functions in one PCI endpoint functions. This driver simply + * registers the platform devices in this iomemregion and exports a few + * functions to access common registers + */ + +#ifndef __STA2X11_MFD_H +#define __STA2X11_MFD_H +#include +#include + +enum sta2x11_mfd_plat_dev { + sta2x11_sctl = 0, + sta2x11_gpio, + sta2x11_scr, + sta2x11_time, + sta2x11_apbreg, + sta2x11_apb_soc_regs, + sta2x11_vic, + sta2x11_n_mfd_plat_devs, +}; + +#define STA2X11_MFD_SCTL_NAME "sta2x11-sctl" +#define STA2X11_MFD_GPIO_NAME "sta2x11-gpio" +#define STA2X11_MFD_SCR_NAME "sta2x11-scr" +#define STA2X11_MFD_TIME_NAME "sta2x11-time" +#define STA2X11_MFD_APBREG_NAME "sta2x11-apbreg" +#define STA2X11_MFD_APB_SOC_REGS_NAME "sta2x11-apb-soc-regs" +#define STA2X11_MFD_VIC_NAME "sta2x11-vic" + +extern u32 +__sta2x11_mfd_mask(struct pci_dev *, u32, u32, u32, enum sta2x11_mfd_plat_dev); + +/* + * The MFD PCI block includes the GPIO peripherals and other register blocks. + * For GPIO, we have 32*4 bits (I use "gsta" for "gpio sta2x11".) + */ +#define GSTA_GPIO_PER_BLOCK 32 +#define GSTA_NR_BLOCKS 4 +#define GSTA_NR_GPIO (GSTA_GPIO_PER_BLOCK * GSTA_NR_BLOCKS) + +/* Pinconfig is set by the board definition: altfunc, pull-up, pull-down */ +struct sta2x11_gpio_pdata { + unsigned pinconfig[GSTA_NR_GPIO]; +}; + +/* Macros below lifted from sh_pfc.h, with minor differences */ +#define PINMUX_TYPE_NONE 0 +#define PINMUX_TYPE_FUNCTION 1 +#define PINMUX_TYPE_OUTPUT_LOW 2 +#define PINMUX_TYPE_OUTPUT_HIGH 3 +#define PINMUX_TYPE_INPUT 4 +#define PINMUX_TYPE_INPUT_PULLUP 5 +#define PINMUX_TYPE_INPUT_PULLDOWN 6 + +/* Give names to GPIO pins, like PXA does, taken from the manual */ +#define STA2X11_GPIO0 0 +#define STA2X11_GPIO1 1 +#define STA2X11_GPIO2 2 +#define STA2X11_GPIO3 3 +#define STA2X11_GPIO4 4 +#define STA2X11_GPIO5 5 +#define STA2X11_GPIO6 6 +#define STA2X11_GPIO7 7 +#define STA2X11_GPIO8_RGBOUT_RED7 8 +#define STA2X11_GPIO9_RGBOUT_RED6 9 +#define STA2X11_GPIO10_RGBOUT_RED5 10 +#define STA2X11_GPIO11_RGBOUT_RED4 11 +#define STA2X11_GPIO12_RGBOUT_RED3 12 +#define STA2X11_GPIO13_RGBOUT_RED2 13 +#define STA2X11_GPIO14_RGBOUT_RED1 14 +#define STA2X11_GPIO15_RGBOUT_RED0 15 +#define STA2X11_GPIO16_RGBOUT_GREEN7 16 +#define STA2X11_GPIO17_RGBOUT_GREEN6 17 +#define STA2X11_GPIO18_RGBOUT_GREEN5 18 +#define STA2X11_GPIO19_RGBOUT_GREEN4 19 +#define STA2X11_GPIO20_RGBOUT_GREEN3 20 +#define STA2X11_GPIO21_RGBOUT_GREEN2 21 +#define STA2X11_GPIO22_RGBOUT_GREEN1 22 +#define STA2X11_GPIO23_RGBOUT_GREEN0 23 +#define STA2X11_GPIO24_RGBOUT_BLUE7 24 +#define STA2X11_GPIO25_RGBOUT_BLUE6 25 +#define STA2X11_GPIO26_RGBOUT_BLUE5 26 +#define STA2X11_GPIO27_RGBOUT_BLUE4 27 +#define STA2X11_GPIO28_RGBOUT_BLUE3 28 +#define STA2X11_GPIO29_RGBOUT_BLUE2 29 +#define STA2X11_GPIO30_RGBOUT_BLUE1 30 +#define STA2X11_GPIO31_RGBOUT_BLUE0 31 +#define STA2X11_GPIO32_RGBOUT_VSYNCH 32 +#define STA2X11_GPIO33_RGBOUT_HSYNCH 33 +#define STA2X11_GPIO34_RGBOUT_DEN 34 +#define STA2X11_GPIO35_ETH_CRS_DV 35 +#define STA2X11_GPIO36_ETH_TXD1 36 +#define STA2X11_GPIO37_ETH_TXD0 37 +#define STA2X11_GPIO38_ETH_TX_EN 38 +#define STA2X11_GPIO39_MDIO 39 +#define STA2X11_GPIO40_ETH_REF_CLK 40 +#define STA2X11_GPIO41_ETH_RXD1 41 +#define STA2X11_GPIO42_ETH_RXD0 42 +#define STA2X11_GPIO43_MDC 43 +#define STA2X11_GPIO44_CAN_TX 44 +#define STA2X11_GPIO45_CAN_RX 45 +#define STA2X11_GPIO46_MLB_DAT 46 +#define STA2X11_GPIO47_MLB_SIG 47 +#define STA2X11_GPIO48_SPI0_CLK 48 +#define STA2X11_GPIO49_SPI0_TXD 49 +#define STA2X11_GPIO50_SPI0_RXD 50 +#define STA2X11_GPIO51_SPI0_FRM 51 +#define STA2X11_GPIO52_SPI1_CLK 52 +#define STA2X11_GPIO53_SPI1_TXD 53 +#define STA2X11_GPIO54_SPI1_RXD 54 +#define STA2X11_GPIO55_SPI1_FRM 55 +#define STA2X11_GPIO56_SPI2_CLK 56 +#define STA2X11_GPIO57_SPI2_TXD 57 +#define STA2X11_GPIO58_SPI2_RXD 58 +#define STA2X11_GPIO59_SPI2_FRM 59 +#define STA2X11_GPIO60_I2C0_SCL 60 +#define STA2X11_GPIO61_I2C0_SDA 61 +#define STA2X11_GPIO62_I2C1_SCL 62 +#define STA2X11_GPIO63_I2C1_SDA 63 +#define STA2X11_GPIO64_I2C2_SCL 64 +#define STA2X11_GPIO65_I2C2_SDA 65 +#define STA2X11_GPIO66_I2C3_SCL 66 +#define STA2X11_GPIO67_I2C3_SDA 67 +#define STA2X11_GPIO68_MSP0_RCK 68 +#define STA2X11_GPIO69_MSP0_RXD 69 +#define STA2X11_GPIO70_MSP0_RFS 70 +#define STA2X11_GPIO71_MSP0_TCK 71 +#define STA2X11_GPIO72_MSP0_TXD 72 +#define STA2X11_GPIO73_MSP0_TFS 73 +#define STA2X11_GPIO74_MSP0_SCK 74 +#define STA2X11_GPIO75_MSP1_CK 75 +#define STA2X11_GPIO76_MSP1_RXD 76 +#define STA2X11_GPIO77_MSP1_FS 77 +#define STA2X11_GPIO78_MSP1_TXD 78 +#define STA2X11_GPIO79_MSP2_CK 79 +#define STA2X11_GPIO80_MSP2_RXD 80 +#define STA2X11_GPIO81_MSP2_FS 81 +#define STA2X11_GPIO82_MSP2_TXD 82 +#define STA2X11_GPIO83_MSP3_CK 83 +#define STA2X11_GPIO84_MSP3_RXD 84 +#define STA2X11_GPIO85_MSP3_FS 85 +#define STA2X11_GPIO86_MSP3_TXD 86 +#define STA2X11_GPIO87_MSP4_CK 87 +#define STA2X11_GPIO88_MSP4_RXD 88 +#define STA2X11_GPIO89_MSP4_FS 89 +#define STA2X11_GPIO90_MSP4_TXD 90 +#define STA2X11_GPIO91_MSP5_CK 91 +#define STA2X11_GPIO92_MSP5_RXD 92 +#define STA2X11_GPIO93_MSP5_FS 93 +#define STA2X11_GPIO94_MSP5_TXD 94 +#define STA2X11_GPIO95_SDIO3_DAT3 95 +#define STA2X11_GPIO96_SDIO3_DAT2 96 +#define STA2X11_GPIO97_SDIO3_DAT1 97 +#define STA2X11_GPIO98_SDIO3_DAT0 98 +#define STA2X11_GPIO99_SDIO3_CLK 99 +#define STA2X11_GPIO100_SDIO3_CMD 100 +#define STA2X11_GPIO101 101 +#define STA2X11_GPIO102 102 +#define STA2X11_GPIO103 103 +#define STA2X11_GPIO104 104 +#define STA2X11_GPIO105_SDIO2_DAT3 105 +#define STA2X11_GPIO106_SDIO2_DAT2 106 +#define STA2X11_GPIO107_SDIO2_DAT1 107 +#define STA2X11_GPIO108_SDIO2_DAT0 108 +#define STA2X11_GPIO109_SDIO2_CLK 109 +#define STA2X11_GPIO110_SDIO2_CMD 110 +#define STA2X11_GPIO111 111 +#define STA2X11_GPIO112 112 +#define STA2X11_GPIO113 113 +#define STA2X11_GPIO114 114 +#define STA2X11_GPIO115_SDIO1_DAT3 115 +#define STA2X11_GPIO116_SDIO1_DAT2 116 +#define STA2X11_GPIO117_SDIO1_DAT1 117 +#define STA2X11_GPIO118_SDIO1_DAT0 118 +#define STA2X11_GPIO119_SDIO1_CLK 119 +#define STA2X11_GPIO120_SDIO1_CMD 120 +#define STA2X11_GPIO121 121 +#define STA2X11_GPIO122 122 +#define STA2X11_GPIO123 123 +#define STA2X11_GPIO124 124 +#define STA2X11_GPIO125_UART2_TXD 125 +#define STA2X11_GPIO126_UART2_RXD 126 +#define STA2X11_GPIO127_UART3_TXD 127 + +/* + * The APB bridge has its own registers, needed by our users as well. + * They are accessed with the following read/mask/write function. + */ +static inline u32 +sta2x11_apbreg_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val) +{ + return __sta2x11_mfd_mask(pdev, reg, mask, val, sta2x11_apbreg); +} + +/* CAN and MLB */ +#define APBREG_BSR 0x00 /* Bridge Status Reg */ +#define APBREG_PAER 0x08 /* Peripherals Address Error Reg */ +#define APBREG_PWAC 0x20 /* Peripheral Write Access Control reg */ +#define APBREG_PRAC 0x40 /* Peripheral Read Access Control reg */ +#define APBREG_PCG 0x60 /* Peripheral Clock Gating Reg */ +#define APBREG_PUR 0x80 /* Peripheral Under Reset Reg */ +#define APBREG_EMU_PCG 0xA0 /* Emulator Peripheral Clock Gating Reg */ + +#define APBREG_CAN (1 << 1) +#define APBREG_MLB (1 << 3) + +/* SARAC */ +#define APBREG_BSR_SARAC 0x100 /* Bridge Status Reg */ +#define APBREG_PAER_SARAC 0x108 /* Peripherals Address Error Reg */ +#define APBREG_PWAC_SARAC 0x120 /* Peripheral Write Access Control reg */ +#define APBREG_PRAC_SARAC 0x140 /* Peripheral Read Access Control reg */ +#define APBREG_PCG_SARAC 0x160 /* Peripheral Clock Gating Reg */ +#define APBREG_PUR_SARAC 0x180 /* Peripheral Under Reset Reg */ +#define APBREG_EMU_PCG_SARAC 0x1A0 /* Emulator Peripheral Clock Gating Reg */ + +#define APBREG_SARAC (1 << 2) + +/* + * The system controller has its own registers. Some of these are accessed + * by out users as well, using the following read/mask/write/function + */ +static inline +u32 sta2x11_sctl_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val) +{ + return __sta2x11_mfd_mask(pdev, reg, mask, val, sta2x11_sctl); +} + +#define SCTL_SCCTL 0x00 /* System controller control register */ +#define SCTL_ARMCFG 0x04 /* ARM configuration register */ +#define SCTL_SCPLLCTL 0x08 /* PLL control status register */ + +#define SCTL_SCPLLCTL_AUDIO_PLL_PD BIT(1) +#define SCTL_SCPLLCTL_FRAC_CONTROL BIT(3) +#define SCTL_SCPLLCTL_STRB_BYPASS BIT(6) +#define SCTL_SCPLLCTL_STRB_INPUT BIT(8) + +#define SCTL_SCPLLFCTRL 0x0c /* PLL frequency control register */ + +#define SCTL_SCPLLFCTRL_AUDIO_PLL_NDIV_MASK 0xff +#define SCTL_SCPLLFCTRL_AUDIO_PLL_NDIV_SHIFT 10 +#define SCTL_SCPLLFCTRL_AUDIO_PLL_IDF_MASK 7 +#define SCTL_SCPLLFCTRL_AUDIO_PLL_IDF_SHIFT 21 +#define SCTL_SCPLLFCTRL_AUDIO_PLL_ODF_MASK 7 +#define SCTL_SCPLLFCTRL_AUDIO_PLL_ODF_SHIFT 18 +#define SCTL_SCPLLFCTRL_DITHER_DISABLE_MASK 0x03 +#define SCTL_SCPLLFCTRL_DITHER_DISABLE_SHIFT 4 + + +#define SCTL_SCRESFRACT 0x10 /* PLL fractional input register */ + +#define SCTL_SCRESFRACT_MASK 0x0000ffff + + +#define SCTL_SCRESCTRL1 0x14 /* Peripheral reset control 1 */ +#define SCTL_SCRESXTRL2 0x18 /* Peripheral reset control 2 */ +#define SCTL_SCPEREN0 0x1c /* Peripheral clock enable register 0 */ +#define SCTL_SCPEREN1 0x20 /* Peripheral clock enable register 1 */ +#define SCTL_SCPEREN2 0x24 /* Peripheral clock enable register 2 */ +#define SCTL_SCGRST 0x28 /* Peripheral global reset */ +#define SCTL_SCPCIECSBRST 0x2c /* PCIe PAB CSB reset status register */ +#define SCTL_SCPCIPMCR1 0x30 /* PCI power management control 1 */ +#define SCTL_SCPCIPMCR2 0x34 /* PCI power management control 2 */ +#define SCTL_SCPCIPMSR1 0x38 /* PCI power management status 1 */ +#define SCTL_SCPCIPMSR2 0x3c /* PCI power management status 2 */ +#define SCTL_SCPCIPMSR3 0x40 /* PCI power management status 3 */ +#define SCTL_SCINTREN 0x44 /* Interrupt enable */ +#define SCTL_SCRISR 0x48 /* RAW interrupt status */ +#define SCTL_SCCLKSTAT0 0x4c /* Peripheral clocks status 0 */ +#define SCTL_SCCLKSTAT1 0x50 /* Peripheral clocks status 1 */ +#define SCTL_SCCLKSTAT2 0x54 /* Peripheral clocks status 2 */ +#define SCTL_SCRSTSTA 0x58 /* Reset status register */ + +#define SCTL_SCRESCTRL1_USB_PHY_POR (1 << 0) +#define SCTL_SCRESCTRL1_USB_OTG (1 << 1) +#define SCTL_SCRESCTRL1_USB_HRST (1 << 2) +#define SCTL_SCRESCTRL1_USB_PHY_HOST (1 << 3) +#define SCTL_SCRESCTRL1_SATAII (1 << 4) +#define SCTL_SCRESCTRL1_VIP (1 << 5) +#define SCTL_SCRESCTRL1_PER_MMC0 (1 << 6) +#define SCTL_SCRESCTRL1_PER_MMC1 (1 << 7) +#define SCTL_SCRESCTRL1_PER_GPIO0 (1 << 8) +#define SCTL_SCRESCTRL1_PER_GPIO1 (1 << 9) +#define SCTL_SCRESCTRL1_PER_GPIO2 (1 << 10) +#define SCTL_SCRESCTRL1_PER_GPIO3 (1 << 11) +#define SCTL_SCRESCTRL1_PER_MTU0 (1 << 12) +#define SCTL_SCRESCTRL1_KER_SPI0 (1 << 13) +#define SCTL_SCRESCTRL1_KER_SPI1 (1 << 14) +#define SCTL_SCRESCTRL1_KER_SPI2 (1 << 15) +#define SCTL_SCRESCTRL1_KER_MCI0 (1 << 16) +#define SCTL_SCRESCTRL1_KER_MCI1 (1 << 17) +#define SCTL_SCRESCTRL1_PRE_HSI2C0 (1 << 18) +#define SCTL_SCRESCTRL1_PER_HSI2C1 (1 << 19) +#define SCTL_SCRESCTRL1_PER_HSI2C2 (1 << 20) +#define SCTL_SCRESCTRL1_PER_HSI2C3 (1 << 21) +#define SCTL_SCRESCTRL1_PER_MSP0 (1 << 22) +#define SCTL_SCRESCTRL1_PER_MSP1 (1 << 23) +#define SCTL_SCRESCTRL1_PER_MSP2 (1 << 24) +#define SCTL_SCRESCTRL1_PER_MSP3 (1 << 25) +#define SCTL_SCRESCTRL1_PER_MSP4 (1 << 26) +#define SCTL_SCRESCTRL1_PER_MSP5 (1 << 27) +#define SCTL_SCRESCTRL1_PER_MMC (1 << 28) +#define SCTL_SCRESCTRL1_KER_MSP0 (1 << 29) +#define SCTL_SCRESCTRL1_KER_MSP1 (1 << 30) +#define SCTL_SCRESCTRL1_KER_MSP2 (1 << 31) + +#define SCTL_SCPEREN0_UART0 (1 << 0) +#define SCTL_SCPEREN0_UART1 (1 << 1) +#define SCTL_SCPEREN0_UART2 (1 << 2) +#define SCTL_SCPEREN0_UART3 (1 << 3) +#define SCTL_SCPEREN0_MSP0 (1 << 4) +#define SCTL_SCPEREN0_MSP1 (1 << 5) +#define SCTL_SCPEREN0_MSP2 (1 << 6) +#define SCTL_SCPEREN0_MSP3 (1 << 7) +#define SCTL_SCPEREN0_MSP4 (1 << 8) +#define SCTL_SCPEREN0_MSP5 (1 << 9) +#define SCTL_SCPEREN0_SPI0 (1 << 10) +#define SCTL_SCPEREN0_SPI1 (1 << 11) +#define SCTL_SCPEREN0_SPI2 (1 << 12) +#define SCTL_SCPEREN0_I2C0 (1 << 13) +#define SCTL_SCPEREN0_I2C1 (1 << 14) +#define SCTL_SCPEREN0_I2C2 (1 << 15) +#define SCTL_SCPEREN0_I2C3 (1 << 16) +#define SCTL_SCPEREN0_SVDO_LVDS (1 << 17) +#define SCTL_SCPEREN0_USB_HOST (1 << 18) +#define SCTL_SCPEREN0_USB_OTG (1 << 19) +#define SCTL_SCPEREN0_MCI0 (1 << 20) +#define SCTL_SCPEREN0_MCI1 (1 << 21) +#define SCTL_SCPEREN0_MCI2 (1 << 22) +#define SCTL_SCPEREN0_MCI3 (1 << 23) +#define SCTL_SCPEREN0_SATA (1 << 24) +#define SCTL_SCPEREN0_ETHERNET (1 << 25) +#define SCTL_SCPEREN0_VIC (1 << 26) +#define SCTL_SCPEREN0_DMA_AUDIO (1 << 27) +#define SCTL_SCPEREN0_DMA_SOC (1 << 28) +#define SCTL_SCPEREN0_RAM (1 << 29) +#define SCTL_SCPEREN0_VIP (1 << 30) +#define SCTL_SCPEREN0_ARM (1 << 31) + +#define SCTL_SCPEREN1_UART0 (1 << 0) +#define SCTL_SCPEREN1_UART1 (1 << 1) +#define SCTL_SCPEREN1_UART2 (1 << 2) +#define SCTL_SCPEREN1_UART3 (1 << 3) +#define SCTL_SCPEREN1_MSP0 (1 << 4) +#define SCTL_SCPEREN1_MSP1 (1 << 5) +#define SCTL_SCPEREN1_MSP2 (1 << 6) +#define SCTL_SCPEREN1_MSP3 (1 << 7) +#define SCTL_SCPEREN1_MSP4 (1 << 8) +#define SCTL_SCPEREN1_MSP5 (1 << 9) +#define SCTL_SCPEREN1_SPI0 (1 << 10) +#define SCTL_SCPEREN1_SPI1 (1 << 11) +#define SCTL_SCPEREN1_SPI2 (1 << 12) +#define SCTL_SCPEREN1_I2C0 (1 << 13) +#define SCTL_SCPEREN1_I2C1 (1 << 14) +#define SCTL_SCPEREN1_I2C2 (1 << 15) +#define SCTL_SCPEREN1_I2C3 (1 << 16) +#define SCTL_SCPEREN1_USB_PHY (1 << 17) + +/* + * APB-SOC registers + */ +static inline +u32 sta2x11_apb_soc_regs_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val) +{ + return __sta2x11_mfd_mask(pdev, reg, mask, val, sta2x11_apb_soc_regs); +} + +#define PCIE_EP1_FUNC3_0_INTR_REG 0x000 +#define PCIE_EP1_FUNC7_4_INTR_REG 0x004 +#define PCIE_EP2_FUNC3_0_INTR_REG 0x008 +#define PCIE_EP2_FUNC7_4_INTR_REG 0x00c +#define PCIE_EP3_FUNC3_0_INTR_REG 0x010 +#define PCIE_EP3_FUNC7_4_INTR_REG 0x014 +#define PCIE_EP4_FUNC3_0_INTR_REG 0x018 +#define PCIE_EP4_FUNC7_4_INTR_REG 0x01c +#define PCIE_INTR_ENABLE0_REG 0x020 +#define PCIE_INTR_ENABLE1_REG 0x024 +#define PCIE_EP1_FUNC_TC_REG 0x028 +#define PCIE_EP2_FUNC_TC_REG 0x02c +#define PCIE_EP3_FUNC_TC_REG 0x030 +#define PCIE_EP4_FUNC_TC_REG 0x034 +#define PCIE_EP1_FUNC_F_REG 0x038 +#define PCIE_EP2_FUNC_F_REG 0x03c +#define PCIE_EP3_FUNC_F_REG 0x040 +#define PCIE_EP4_FUNC_F_REG 0x044 +#define PCIE_PAB_AMBA_SW_RST_REG 0x048 +#define PCIE_PM_STATUS_0_PORT_0_4 0x04c +#define PCIE_PM_STATUS_7_0_EP1 0x050 +#define PCIE_PM_STATUS_7_0_EP2 0x054 +#define PCIE_PM_STATUS_7_0_EP3 0x058 +#define PCIE_PM_STATUS_7_0_EP4 0x05c +#define PCIE_DEV_ID_0_EP1_REG 0x060 +#define PCIE_CC_REV_ID_0_EP1_REG 0x064 +#define PCIE_DEV_ID_1_EP1_REG 0x068 +#define PCIE_CC_REV_ID_1_EP1_REG 0x06c +#define PCIE_DEV_ID_2_EP1_REG 0x070 +#define PCIE_CC_REV_ID_2_EP1_REG 0x074 +#define PCIE_DEV_ID_3_EP1_REG 0x078 +#define PCIE_CC_REV_ID_3_EP1_REG 0x07c +#define PCIE_DEV_ID_4_EP1_REG 0x080 +#define PCIE_CC_REV_ID_4_EP1_REG 0x084 +#define PCIE_DEV_ID_5_EP1_REG 0x088 +#define PCIE_CC_REV_ID_5_EP1_REG 0x08c +#define PCIE_DEV_ID_6_EP1_REG 0x090 +#define PCIE_CC_REV_ID_6_EP1_REG 0x094 +#define PCIE_DEV_ID_7_EP1_REG 0x098 +#define PCIE_CC_REV_ID_7_EP1_REG 0x09c +#define PCIE_DEV_ID_0_EP2_REG 0x0a0 +#define PCIE_CC_REV_ID_0_EP2_REG 0x0a4 +#define PCIE_DEV_ID_1_EP2_REG 0x0a8 +#define PCIE_CC_REV_ID_1_EP2_REG 0x0ac +#define PCIE_DEV_ID_2_EP2_REG 0x0b0 +#define PCIE_CC_REV_ID_2_EP2_REG 0x0b4 +#define PCIE_DEV_ID_3_EP2_REG 0x0b8 +#define PCIE_CC_REV_ID_3_EP2_REG 0x0bc +#define PCIE_DEV_ID_4_EP2_REG 0x0c0 +#define PCIE_CC_REV_ID_4_EP2_REG 0x0c4 +#define PCIE_DEV_ID_5_EP2_REG 0x0c8 +#define PCIE_CC_REV_ID_5_EP2_REG 0x0cc +#define PCIE_DEV_ID_6_EP2_REG 0x0d0 +#define PCIE_CC_REV_ID_6_EP2_REG 0x0d4 +#define PCIE_DEV_ID_7_EP2_REG 0x0d8 +#define PCIE_CC_REV_ID_7_EP2_REG 0x0dC +#define PCIE_DEV_ID_0_EP3_REG 0x0e0 +#define PCIE_CC_REV_ID_0_EP3_REG 0x0e4 +#define PCIE_DEV_ID_1_EP3_REG 0x0e8 +#define PCIE_CC_REV_ID_1_EP3_REG 0x0ec +#define PCIE_DEV_ID_2_EP3_REG 0x0f0 +#define PCIE_CC_REV_ID_2_EP3_REG 0x0f4 +#define PCIE_DEV_ID_3_EP3_REG 0x0f8 +#define PCIE_CC_REV_ID_3_EP3_REG 0x0fc +#define PCIE_DEV_ID_4_EP3_REG 0x100 +#define PCIE_CC_REV_ID_4_EP3_REG 0x104 +#define PCIE_DEV_ID_5_EP3_REG 0x108 +#define PCIE_CC_REV_ID_5_EP3_REG 0x10c +#define PCIE_DEV_ID_6_EP3_REG 0x110 +#define PCIE_CC_REV_ID_6_EP3_REG 0x114 +#define PCIE_DEV_ID_7_EP3_REG 0x118 +#define PCIE_CC_REV_ID_7_EP3_REG 0x11c +#define PCIE_DEV_ID_0_EP4_REG 0x120 +#define PCIE_CC_REV_ID_0_EP4_REG 0x124 +#define PCIE_DEV_ID_1_EP4_REG 0x128 +#define PCIE_CC_REV_ID_1_EP4_REG 0x12c +#define PCIE_DEV_ID_2_EP4_REG 0x130 +#define PCIE_CC_REV_ID_2_EP4_REG 0x134 +#define PCIE_DEV_ID_3_EP4_REG 0x138 +#define PCIE_CC_REV_ID_3_EP4_REG 0x13c +#define PCIE_DEV_ID_4_EP4_REG 0x140 +#define PCIE_CC_REV_ID_4_EP4_REG 0x144 +#define PCIE_DEV_ID_5_EP4_REG 0x148 +#define PCIE_CC_REV_ID_5_EP4_REG 0x14c +#define PCIE_DEV_ID_6_EP4_REG 0x150 +#define PCIE_CC_REV_ID_6_EP4_REG 0x154 +#define PCIE_DEV_ID_7_EP4_REG 0x158 +#define PCIE_CC_REV_ID_7_EP4_REG 0x15c +#define PCIE_SUBSYS_VEN_ID_REG 0x160 +#define PCIE_COMMON_CLOCK_CONFIG_0_4_0 0x164 +#define PCIE_MIPHYP_SSC_EN_REG 0x168 +#define PCIE_MIPHYP_ADDR_REG 0x16c +#define PCIE_L1_ASPM_READY_REG 0x170 +#define PCIE_EXT_CFG_RDY_REG 0x174 +#define PCIE_SoC_INT_ROUTER_STATUS0_REG 0x178 +#define PCIE_SoC_INT_ROUTER_STATUS1_REG 0x17c +#define PCIE_SoC_INT_ROUTER_STATUS2_REG 0x180 +#define PCIE_SoC_INT_ROUTER_STATUS3_REG 0x184 +#define DMA_IP_CTRL_REG 0x324 +#define DISP_BRIDGE_PU_PD_CTRL_REG 0x328 +#define VIP_PU_PD_CTRL_REG 0x32c +#define USB_MLB_PU_PD_CTRL_REG 0x330 +#define SDIO_PU_PD_MISCFUNC_CTRL_REG1 0x334 +#define SDIO_PU_PD_MISCFUNC_CTRL_REG2 0x338 +#define UART_PU_PD_CTRL_REG 0x33c +#define ARM_Lock 0x340 +#define SYS_IO_CHAR_REG1 0x344 +#define SYS_IO_CHAR_REG2 0x348 +#define SATA_CORE_ID_REG 0x34c +#define SATA_CTRL_REG 0x350 +#define I2C_HSFIX_MISC_REG 0x354 +#define SPARE2_RESERVED 0x358 +#define SPARE3_RESERVED 0x35c +#define MASTER_LOCK_REG 0x368 +#define SYSTEM_CONFIG_STATUS_REG 0x36c +#define MSP_CLK_CTRL_REG 0x39c +#define COMPENSATION_REG1 0x3c4 +#define COMPENSATION_REG2 0x3c8 +#define COMPENSATION_REG3 0x3cc +#define TEST_CTL_REG 0x3d0 + +/* + * SECR (OTP) registers + */ +#define STA2X11_SECR_CR 0x00 +#define STA2X11_SECR_FVR0 0x10 +#define STA2X11_SECR_FVR1 0x14 + +extern int sta2x11_mfd_get_regs_data(struct platform_device *pdev, + enum sta2x11_mfd_plat_dev index, + void __iomem **regs, + spinlock_t **lock); + +#endif /* __STA2X11_MFD_H */ diff --git a/include/linux/mfd/stm32-lptimer.h b/include/linux/mfd/stm32-lptimer.h new file mode 100644 index 000000000..605f62264 --- /dev/null +++ b/include/linux/mfd/stm32-lptimer.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * STM32 Low-Power Timer parent driver. + * Copyright (C) STMicroelectronics 2017 + * Author: Fabrice Gasnier + * Inspired by Benjamin Gaignard's stm32-timers driver + */ + +#ifndef _LINUX_STM32_LPTIMER_H_ +#define _LINUX_STM32_LPTIMER_H_ + +#include +#include + +#define STM32_LPTIM_ISR 0x00 /* Interrupt and Status Reg */ +#define STM32_LPTIM_ICR 0x04 /* Interrupt Clear Reg */ +#define STM32_LPTIM_IER 0x08 /* Interrupt Enable Reg */ +#define STM32_LPTIM_CFGR 0x0C /* Configuration Reg */ +#define STM32_LPTIM_CR 0x10 /* Control Reg */ +#define STM32_LPTIM_CMP 0x14 /* Compare Reg */ +#define STM32_LPTIM_ARR 0x18 /* Autoreload Reg */ +#define STM32_LPTIM_CNT 0x1C /* Counter Reg */ + +/* STM32_LPTIM_ISR - bit fields */ +#define STM32_LPTIM_CMPOK_ARROK GENMASK(4, 3) +#define STM32_LPTIM_ARROK BIT(4) +#define STM32_LPTIM_CMPOK BIT(3) + +/* STM32_LPTIM_ICR - bit fields */ +#define STM32_LPTIM_CMPOKCF_ARROKCF GENMASK(4, 3) + +/* STM32_LPTIM_CR - bit fields */ +#define STM32_LPTIM_CNTSTRT BIT(2) +#define STM32_LPTIM_ENABLE BIT(0) + +/* STM32_LPTIM_CFGR - bit fields */ +#define STM32_LPTIM_ENC BIT(24) +#define STM32_LPTIM_COUNTMODE BIT(23) +#define STM32_LPTIM_WAVPOL BIT(21) +#define STM32_LPTIM_PRESC GENMASK(11, 9) +#define STM32_LPTIM_CKPOL GENMASK(2, 1) + +/* STM32_LPTIM_ARR */ +#define STM32_LPTIM_MAX_ARR 0xFFFF + +/** + * struct stm32_lptimer - STM32 Low-Power Timer data assigned by parent device + * @clk: clock reference for this instance + * @regmap: register map reference for this instance + * @has_encoder: indicates this Low-Power Timer supports encoder mode + */ +struct stm32_lptimer { + struct clk *clk; + struct regmap *regmap; + bool has_encoder; +}; + +#endif diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h new file mode 100644 index 000000000..067d14655 --- /dev/null +++ b/include/linux/mfd/stm32-timers.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) STMicroelectronics 2016 + * Author: Benjamin Gaignard + */ + +#ifndef _LINUX_STM32_GPTIMER_H_ +#define _LINUX_STM32_GPTIMER_H_ + +#include +#include +#include +#include + +#define TIM_CR1 0x00 /* Control Register 1 */ +#define TIM_CR2 0x04 /* Control Register 2 */ +#define TIM_SMCR 0x08 /* Slave mode control reg */ +#define TIM_DIER 0x0C /* DMA/interrupt register */ +#define TIM_SR 0x10 /* Status register */ +#define TIM_EGR 0x14 /* Event Generation Reg */ +#define TIM_CCMR1 0x18 /* Capt/Comp 1 Mode Reg */ +#define TIM_CCMR2 0x1C /* Capt/Comp 2 Mode Reg */ +#define TIM_CCER 0x20 /* Capt/Comp Enable Reg */ +#define TIM_CNT 0x24 /* Counter */ +#define TIM_PSC 0x28 /* Prescaler */ +#define TIM_ARR 0x2c /* Auto-Reload Register */ +#define TIM_CCR1 0x34 /* Capt/Comp Register 1 */ +#define TIM_CCR2 0x38 /* Capt/Comp Register 2 */ +#define TIM_CCR3 0x3C /* Capt/Comp Register 3 */ +#define TIM_CCR4 0x40 /* Capt/Comp Register 4 */ +#define TIM_BDTR 0x44 /* Break and Dead-Time Reg */ +#define TIM_DCR 0x48 /* DMA control register */ +#define TIM_DMAR 0x4C /* DMA register for transfer */ + +#define TIM_CR1_CEN BIT(0) /* Counter Enable */ +#define TIM_CR1_DIR BIT(4) /* Counter Direction */ +#define TIM_CR1_ARPE BIT(7) /* Auto-reload Preload Ena */ +#define TIM_CR2_MMS (BIT(4) | BIT(5) | BIT(6)) /* Master mode selection */ +#define TIM_CR2_MMS2 GENMASK(23, 20) /* Master mode selection 2 */ +#define TIM_SMCR_SMS (BIT(0) | BIT(1) | BIT(2)) /* Slave mode selection */ +#define TIM_SMCR_TS (BIT(4) | BIT(5) | BIT(6)) /* Trigger selection */ +#define TIM_DIER_UIE BIT(0) /* Update interrupt */ +#define TIM_DIER_UDE BIT(8) /* Update DMA request Enable */ +#define TIM_DIER_CC1DE BIT(9) /* CC1 DMA request Enable */ +#define TIM_DIER_CC2DE BIT(10) /* CC2 DMA request Enable */ +#define TIM_DIER_CC3DE BIT(11) /* CC3 DMA request Enable */ +#define TIM_DIER_CC4DE BIT(12) /* CC4 DMA request Enable */ +#define TIM_DIER_COMDE BIT(13) /* COM DMA request Enable */ +#define TIM_DIER_TDE BIT(14) /* Trigger DMA request Enable */ +#define TIM_SR_UIF BIT(0) /* Update interrupt flag */ +#define TIM_EGR_UG BIT(0) /* Update Generation */ +#define TIM_CCMR_PE BIT(3) /* Channel Preload Enable */ +#define TIM_CCMR_M1 (BIT(6) | BIT(5)) /* Channel PWM Mode 1 */ +#define TIM_CCMR_CC1S (BIT(0) | BIT(1)) /* Capture/compare 1 sel */ +#define TIM_CCMR_IC1PSC GENMASK(3, 2) /* Input capture 1 prescaler */ +#define TIM_CCMR_CC2S (BIT(8) | BIT(9)) /* Capture/compare 2 sel */ +#define TIM_CCMR_IC2PSC GENMASK(11, 10) /* Input capture 2 prescaler */ +#define TIM_CCMR_CC1S_TI1 BIT(0) /* IC1/IC3 selects TI1/TI3 */ +#define TIM_CCMR_CC1S_TI2 BIT(1) /* IC1/IC3 selects TI2/TI4 */ +#define TIM_CCMR_CC2S_TI2 BIT(8) /* IC2/IC4 selects TI2/TI4 */ +#define TIM_CCMR_CC2S_TI1 BIT(9) /* IC2/IC4 selects TI1/TI3 */ +#define TIM_CCER_CC1E BIT(0) /* Capt/Comp 1 out Ena */ +#define TIM_CCER_CC1P BIT(1) /* Capt/Comp 1 Polarity */ +#define TIM_CCER_CC1NE BIT(2) /* Capt/Comp 1N out Ena */ +#define TIM_CCER_CC1NP BIT(3) /* Capt/Comp 1N Polarity */ +#define TIM_CCER_CC2E BIT(4) /* Capt/Comp 2 out Ena */ +#define TIM_CCER_CC2P BIT(5) /* Capt/Comp 2 Polarity */ +#define TIM_CCER_CC3E BIT(8) /* Capt/Comp 3 out Ena */ +#define TIM_CCER_CC3P BIT(9) /* Capt/Comp 3 Polarity */ +#define TIM_CCER_CC4E BIT(12) /* Capt/Comp 4 out Ena */ +#define TIM_CCER_CC4P BIT(13) /* Capt/Comp 4 Polarity */ +#define TIM_CCER_CCXE (BIT(0) | BIT(4) | BIT(8) | BIT(12)) +#define TIM_BDTR_BKE BIT(12) /* Break input enable */ +#define TIM_BDTR_BKP BIT(13) /* Break input polarity */ +#define TIM_BDTR_AOE BIT(14) /* Automatic Output Enable */ +#define TIM_BDTR_MOE BIT(15) /* Main Output Enable */ +#define TIM_BDTR_BKF (BIT(16) | BIT(17) | BIT(18) | BIT(19)) +#define TIM_BDTR_BK2F (BIT(20) | BIT(21) | BIT(22) | BIT(23)) +#define TIM_BDTR_BK2E BIT(24) /* Break 2 input enable */ +#define TIM_BDTR_BK2P BIT(25) /* Break 2 input polarity */ +#define TIM_DCR_DBA GENMASK(4, 0) /* DMA base addr */ +#define TIM_DCR_DBL GENMASK(12, 8) /* DMA burst len */ + +#define MAX_TIM_PSC 0xFFFF +#define MAX_TIM_ICPSC 0x3 +#define TIM_CR2_MMS_SHIFT 4 +#define TIM_CR2_MMS2_SHIFT 20 +#define TIM_SMCR_TS_SHIFT 4 +#define TIM_BDTR_BKF_MASK 0xF +#define TIM_BDTR_BKF_SHIFT 16 +#define TIM_BDTR_BK2F_SHIFT 20 + +enum stm32_timers_dmas { + STM32_TIMERS_DMA_CH1, + STM32_TIMERS_DMA_CH2, + STM32_TIMERS_DMA_CH3, + STM32_TIMERS_DMA_CH4, + STM32_TIMERS_DMA_UP, + STM32_TIMERS_DMA_TRIG, + STM32_TIMERS_DMA_COM, + STM32_TIMERS_MAX_DMAS, +}; + +/** + * struct stm32_timers_dma - STM32 timer DMA handling. + * @completion: end of DMA transfer completion + * @phys_base: control registers physical base address + * @lock: protect DMA access + * @chan: DMA channel in use + * @chans: DMA channels available for this timer instance + */ +struct stm32_timers_dma { + struct completion completion; + phys_addr_t phys_base; + struct mutex lock; + struct dma_chan *chan; + struct dma_chan *chans[STM32_TIMERS_MAX_DMAS]; +}; + +struct stm32_timers { + struct clk *clk; + struct regmap *regmap; + u32 max_arr; + struct stm32_timers_dma dma; /* Only to be used by the parent */ +}; + +#if IS_REACHABLE(CONFIG_MFD_STM32_TIMERS) +int stm32_timers_dma_burst_read(struct device *dev, u32 *buf, + enum stm32_timers_dmas id, u32 reg, + unsigned int num_reg, unsigned int bursts, + unsigned long tmo_ms); +#else +static inline int stm32_timers_dma_burst_read(struct device *dev, u32 *buf, + enum stm32_timers_dmas id, + u32 reg, + unsigned int num_reg, + unsigned int bursts, + unsigned long tmo_ms) +{ + return -ENODEV; +} +#endif +#endif diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h new file mode 100644 index 000000000..4a827af17 --- /dev/null +++ b/include/linux/mfd/stmpe.h @@ -0,0 +1,142 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License, version 2 + * Author: Rabin Vincent for ST-Ericsson + */ + +#ifndef __LINUX_MFD_STMPE_H +#define __LINUX_MFD_STMPE_H + +#include + +struct device; +struct regulator; + +enum stmpe_block { + STMPE_BLOCK_GPIO = 1 << 0, + STMPE_BLOCK_KEYPAD = 1 << 1, + STMPE_BLOCK_TOUCHSCREEN = 1 << 2, + STMPE_BLOCK_ADC = 1 << 3, + STMPE_BLOCK_PWM = 1 << 4, + STMPE_BLOCK_ROTATOR = 1 << 5, +}; + +enum stmpe_partnum { + STMPE610, + STMPE801, + STMPE811, + STMPE1600, + STMPE1601, + STMPE1801, + STMPE2401, + STMPE2403, + STMPE_NBR_PARTS +}; + +/* + * For registers whose locations differ on variants, the correct address is + * obtained by indexing stmpe->regs with one of the following. + */ +enum { + STMPE_IDX_CHIP_ID, + STMPE_IDX_SYS_CTRL, + STMPE_IDX_SYS_CTRL2, + STMPE_IDX_ICR_LSB, + STMPE_IDX_IER_LSB, + STMPE_IDX_IER_MSB, + STMPE_IDX_ISR_LSB, + STMPE_IDX_ISR_MSB, + STMPE_IDX_GPMR_LSB, + STMPE_IDX_GPMR_CSB, + STMPE_IDX_GPMR_MSB, + STMPE_IDX_GPSR_LSB, + STMPE_IDX_GPSR_CSB, + STMPE_IDX_GPSR_MSB, + STMPE_IDX_GPCR_LSB, + STMPE_IDX_GPCR_CSB, + STMPE_IDX_GPCR_MSB, + STMPE_IDX_GPDR_LSB, + STMPE_IDX_GPDR_CSB, + STMPE_IDX_GPDR_MSB, + STMPE_IDX_GPEDR_LSB, + STMPE_IDX_GPEDR_CSB, + STMPE_IDX_GPEDR_MSB, + STMPE_IDX_GPRER_LSB, + STMPE_IDX_GPRER_CSB, + STMPE_IDX_GPRER_MSB, + STMPE_IDX_GPFER_LSB, + STMPE_IDX_GPFER_CSB, + STMPE_IDX_GPFER_MSB, + STMPE_IDX_GPPUR_LSB, + STMPE_IDX_GPPDR_LSB, + STMPE_IDX_GPAFR_U_MSB, + STMPE_IDX_IEGPIOR_LSB, + STMPE_IDX_IEGPIOR_CSB, + STMPE_IDX_IEGPIOR_MSB, + STMPE_IDX_ISGPIOR_LSB, + STMPE_IDX_ISGPIOR_CSB, + STMPE_IDX_ISGPIOR_MSB, + STMPE_IDX_MAX, +}; + + +struct stmpe_variant_info; +struct stmpe_client_info; +struct stmpe_platform_data; + +/** + * struct stmpe - STMPE MFD structure + * @vcc: optional VCC regulator + * @vio: optional VIO regulator + * @lock: lock protecting I/O operations + * @irq_lock: IRQ bus lock + * @dev: device, mostly for dev_dbg() + * @irq_domain: IRQ domain + * @client: client - i2c or spi + * @ci: client specific information + * @partnum: part number + * @variant: the detected STMPE model number + * @regs: list of addresses of registers which are at different addresses on + * different variants. Indexed by one of STMPE_IDX_*. + * @irq: irq number for stmpe + * @num_gpios: number of gpios, differs for variants + * @ier: cache of IER registers for bus_lock + * @oldier: cache of IER registers for bus_lock + * @pdata: platform data + */ +struct stmpe { + struct regulator *vcc; + struct regulator *vio; + struct mutex lock; + struct mutex irq_lock; + struct device *dev; + struct irq_domain *domain; + void *client; + struct stmpe_client_info *ci; + enum stmpe_partnum partnum; + struct stmpe_variant_info *variant; + const u8 *regs; + + int irq; + int num_gpios; + u8 ier[2]; + u8 oldier[2]; + struct stmpe_platform_data *pdata; +}; + +extern int stmpe_reg_write(struct stmpe *stmpe, u8 reg, u8 data); +extern int stmpe_reg_read(struct stmpe *stmpe, u8 reg); +extern int stmpe_block_read(struct stmpe *stmpe, u8 reg, u8 length, + u8 *values); +extern int stmpe_block_write(struct stmpe *stmpe, u8 reg, u8 length, + const u8 *values); +extern int stmpe_set_bits(struct stmpe *stmpe, u8 reg, u8 mask, u8 val); +extern int stmpe_set_altfunc(struct stmpe *stmpe, u32 pins, + enum stmpe_block block); +extern int stmpe_enable(struct stmpe *stmpe, unsigned int blocks); +extern int stmpe_disable(struct stmpe *stmpe, unsigned int blocks); + +#define STMPE_GPIO_NOREQ_811_TOUCH (0xf0) + +#endif diff --git a/include/linux/mfd/stw481x.h b/include/linux/mfd/stw481x.h new file mode 100644 index 000000000..833074b76 --- /dev/null +++ b/include/linux/mfd/stw481x.h @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2011 ST-Ericsson SA + * Written on behalf of Linaro for ST-Ericsson + * + * Author: Linus Walleij + * + * License terms: GNU General Public License (GPL) version 2 + */ +#ifndef MFD_STW481X_H +#define MFD_STW481X_H + +#include +#include +#include +#include + +/* These registers are accessed from more than one driver */ +#define STW_CONF1 0x11U +#define STW_CONF1_PDN_VMMC 0x01U +#define STW_CONF1_VMMC_MASK 0x0eU +#define STW_CONF1_VMMC_1_8V 0x02U +#define STW_CONF1_VMMC_2_85V 0x04U +#define STW_CONF1_VMMC_3V 0x06U +#define STW_CONF1_VMMC_1_85V 0x08U +#define STW_CONF1_VMMC_2_6V 0x0aU +#define STW_CONF1_VMMC_2_7V 0x0cU +#define STW_CONF1_VMMC_3_3V 0x0eU +#define STW_CONF1_MMC_LS_STATUS 0x10U +#define STW_PCTL_REG_LO 0x1eU +#define STW_PCTL_REG_HI 0x1fU +#define STW_CONF1_V_MONITORING 0x20U +#define STW_CONF1_IT_WARN 0x40U +#define STW_CONF1_PDN_VAUX 0x80U +#define STW_CONF2 0x20U +#define STW_CONF2_MASK_TWARN 0x01U +#define STW_CONF2_VMMC_EXT 0x02U +#define STW_CONF2_MASK_IT_WAKE_UP 0x04U +#define STW_CONF2_GPO1 0x08U +#define STW_CONF2_GPO2 0x10U +#define STW_VCORE_SLEEP 0x21U + +/** + * struct stw481x - state holder for the Stw481x drivers + * @i2c_client: corresponding I2C client + * @map: regmap handle to access device registers + */ +struct stw481x { + struct i2c_client *client; + struct regmap *map; +}; + +#endif diff --git a/include/linux/mfd/sun4i-gpadc.h b/include/linux/mfd/sun4i-gpadc.h new file mode 100644 index 000000000..139872c2e --- /dev/null +++ b/include/linux/mfd/sun4i-gpadc.h @@ -0,0 +1,100 @@ +/* Header of ADC MFD core driver for sunxi platforms + * + * Copyright (c) 2016 Quentin Schulz + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. + */ + +#ifndef __SUN4I_GPADC__H__ +#define __SUN4I_GPADC__H__ + +#define SUN4I_GPADC_CTRL0 0x00 + +#define SUN4I_GPADC_CTRL0_ADC_FIRST_DLY(x) ((GENMASK(7, 0) & (x)) << 24) +#define SUN4I_GPADC_CTRL0_ADC_FIRST_DLY_MODE BIT(23) +#define SUN4I_GPADC_CTRL0_ADC_CLK_SELECT BIT(22) +#define SUN4I_GPADC_CTRL0_ADC_CLK_DIVIDER(x) ((GENMASK(1, 0) & (x)) << 20) +#define SUN4I_GPADC_CTRL0_FS_DIV(x) ((GENMASK(3, 0) & (x)) << 16) +#define SUN4I_GPADC_CTRL0_T_ACQ(x) (GENMASK(15, 0) & (x)) + +#define SUN4I_GPADC_CTRL1 0x04 + +#define SUN4I_GPADC_CTRL1_STYLUS_UP_DEBOUNCE(x) ((GENMASK(7, 0) & (x)) << 12) +#define SUN4I_GPADC_CTRL1_STYLUS_UP_DEBOUNCE_EN BIT(9) +#define SUN4I_GPADC_CTRL1_TOUCH_PAN_CALI_EN BIT(6) +#define SUN4I_GPADC_CTRL1_TP_DUAL_EN BIT(5) +#define SUN4I_GPADC_CTRL1_TP_MODE_EN BIT(4) +#define SUN4I_GPADC_CTRL1_TP_ADC_SELECT BIT(3) +#define SUN4I_GPADC_CTRL1_ADC_CHAN_SELECT(x) (GENMASK(2, 0) & (x)) +#define SUN4I_GPADC_CTRL1_ADC_CHAN_MASK GENMASK(2, 0) + +/* TP_CTRL1 bits for sun6i SOCs */ +#define SUN6I_GPADC_CTRL1_TOUCH_PAN_CALI_EN BIT(7) +#define SUN6I_GPADC_CTRL1_TP_DUAL_EN BIT(6) +#define SUN6I_GPADC_CTRL1_TP_MODE_EN BIT(5) +#define SUN6I_GPADC_CTRL1_TP_ADC_SELECT BIT(4) +#define SUN6I_GPADC_CTRL1_ADC_CHAN_SELECT(x) (GENMASK(3, 0) & BIT(x)) +#define SUN6I_GPADC_CTRL1_ADC_CHAN_MASK GENMASK(3, 0) + +/* TP_CTRL1 bits for sun8i SoCs */ +#define SUN8I_GPADC_CTRL1_CHOP_TEMP_EN BIT(8) +#define SUN8I_GPADC_CTRL1_GPADC_CALI_EN BIT(7) + +#define SUN4I_GPADC_CTRL2 0x08 + +#define SUN4I_GPADC_CTRL2_TP_SENSITIVE_ADJUST(x) ((GENMASK(3, 0) & (x)) << 28) +#define SUN4I_GPADC_CTRL2_TP_MODE_SELECT(x) ((GENMASK(1, 0) & (x)) << 26) +#define SUN4I_GPADC_CTRL2_PRE_MEA_EN BIT(24) +#define SUN4I_GPADC_CTRL2_PRE_MEA_THRE_CNT(x) (GENMASK(23, 0) & (x)) + +#define SUN4I_GPADC_CTRL3 0x0c + +#define SUN4I_GPADC_CTRL3_FILTER_EN BIT(2) +#define SUN4I_GPADC_CTRL3_FILTER_TYPE(x) (GENMASK(1, 0) & (x)) + +#define SUN4I_GPADC_TPR 0x18 + +#define SUN4I_GPADC_TPR_TEMP_ENABLE BIT(16) +#define SUN4I_GPADC_TPR_TEMP_PERIOD(x) (GENMASK(15, 0) & (x)) + +#define SUN4I_GPADC_INT_FIFOC 0x10 + +#define SUN4I_GPADC_INT_FIFOC_TEMP_IRQ_EN BIT(18) +#define SUN4I_GPADC_INT_FIFOC_TP_OVERRUN_IRQ_EN BIT(17) +#define SUN4I_GPADC_INT_FIFOC_TP_DATA_IRQ_EN BIT(16) +#define SUN4I_GPADC_INT_FIFOC_TP_DATA_XY_CHANGE BIT(13) +#define SUN4I_GPADC_INT_FIFOC_TP_FIFO_TRIG_LEVEL(x) ((GENMASK(4, 0) & (x)) << 8) +#define SUN4I_GPADC_INT_FIFOC_TP_DATA_DRQ_EN BIT(7) +#define SUN4I_GPADC_INT_FIFOC_TP_FIFO_FLUSH BIT(4) +#define SUN4I_GPADC_INT_FIFOC_TP_UP_IRQ_EN BIT(1) +#define SUN4I_GPADC_INT_FIFOC_TP_DOWN_IRQ_EN BIT(0) + +#define SUN4I_GPADC_INT_FIFOS 0x14 + +#define SUN4I_GPADC_INT_FIFOS_TEMP_DATA_PENDING BIT(18) +#define SUN4I_GPADC_INT_FIFOS_FIFO_OVERRUN_PENDING BIT(17) +#define SUN4I_GPADC_INT_FIFOS_FIFO_DATA_PENDING BIT(16) +#define SUN4I_GPADC_INT_FIFOS_TP_IDLE_FLG BIT(2) +#define SUN4I_GPADC_INT_FIFOS_TP_UP_PENDING BIT(1) +#define SUN4I_GPADC_INT_FIFOS_TP_DOWN_PENDING BIT(0) + +#define SUN4I_GPADC_CDAT 0x1c +#define SUN4I_GPADC_TEMP_DATA 0x20 +#define SUN4I_GPADC_DATA 0x24 + +#define SUN4I_GPADC_IRQ_FIFO_DATA 0 +#define SUN4I_GPADC_IRQ_TEMP_DATA 1 + +/* 10s delay before suspending the IP */ +#define SUN4I_GPADC_AUTOSUSPEND_DELAY 10000 + +struct sun4i_gpadc_dev { + struct device *dev; + struct regmap *regmap; + struct regmap_irq_chip_data *regmap_irqc; + void __iomem *base; +}; + +#endif diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h new file mode 100644 index 000000000..40a76b97b --- /dev/null +++ b/include/linux/mfd/syscon.h @@ -0,0 +1,54 @@ +/* + * System Control Driver + * + * Copyright (C) 2012 Freescale Semiconductor, Inc. + * Copyright (C) 2012 Linaro Ltd. + * + * Author: Dong Aisheng + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __LINUX_MFD_SYSCON_H__ +#define __LINUX_MFD_SYSCON_H__ + +#include +#include + +struct device_node; + +#ifdef CONFIG_MFD_SYSCON +extern struct regmap *syscon_node_to_regmap(struct device_node *np); +extern struct regmap *syscon_regmap_lookup_by_compatible(const char *s); +extern struct regmap *syscon_regmap_lookup_by_pdevname(const char *s); +extern struct regmap *syscon_regmap_lookup_by_phandle( + struct device_node *np, + const char *property); +#else +static inline struct regmap *syscon_node_to_regmap(struct device_node *np) +{ + return ERR_PTR(-ENOTSUPP); +} + +static inline struct regmap *syscon_regmap_lookup_by_compatible(const char *s) +{ + return ERR_PTR(-ENOTSUPP); +} + +static inline struct regmap *syscon_regmap_lookup_by_pdevname(const char *s) +{ + return ERR_PTR(-ENOTSUPP); +} + +static inline struct regmap *syscon_regmap_lookup_by_phandle( + struct device_node *np, + const char *property) +{ + return ERR_PTR(-ENOTSUPP); +} +#endif + +#endif /* __LINUX_MFD_SYSCON_H__ */ diff --git a/include/linux/mfd/syscon/atmel-matrix.h b/include/linux/mfd/syscon/atmel-matrix.h new file mode 100644 index 000000000..8293c3e2a --- /dev/null +++ b/include/linux/mfd/syscon/atmel-matrix.h @@ -0,0 +1,117 @@ +/* + * Copyright (C) 2014 Atmel Corporation. + * + * Memory Controllers (MATRIX, EBI) - System peripherals registers. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _LINUX_MFD_SYSCON_ATMEL_MATRIX_H +#define _LINUX_MFD_SYSCON_ATMEL_MATRIX_H + +#define AT91SAM9260_MATRIX_MCFG 0x00 +#define AT91SAM9260_MATRIX_SCFG 0x40 +#define AT91SAM9260_MATRIX_PRS 0x80 +#define AT91SAM9260_MATRIX_MRCR 0x100 +#define AT91SAM9260_MATRIX_EBICSA 0x11c + +#define AT91SAM9261_MATRIX_MRCR 0x0 +#define AT91SAM9261_MATRIX_SCFG 0x4 +#define AT91SAM9261_MATRIX_TCR 0x24 +#define AT91SAM9261_MATRIX_EBICSA 0x30 +#define AT91SAM9261_MATRIX_USBPUCR 0x34 + +#define AT91SAM9263_MATRIX_MCFG 0x00 +#define AT91SAM9263_MATRIX_SCFG 0x40 +#define AT91SAM9263_MATRIX_PRS 0x80 +#define AT91SAM9263_MATRIX_MRCR 0x100 +#define AT91SAM9263_MATRIX_TCR 0x114 +#define AT91SAM9263_MATRIX_EBI0CSA 0x120 +#define AT91SAM9263_MATRIX_EBI1CSA 0x124 + +#define AT91SAM9RL_MATRIX_MCFG 0x00 +#define AT91SAM9RL_MATRIX_SCFG 0x40 +#define AT91SAM9RL_MATRIX_PRS 0x80 +#define AT91SAM9RL_MATRIX_MRCR 0x100 +#define AT91SAM9RL_MATRIX_TCR 0x114 +#define AT91SAM9RL_MATRIX_EBICSA 0x120 + +#define AT91SAM9G45_MATRIX_MCFG 0x00 +#define AT91SAM9G45_MATRIX_SCFG 0x40 +#define AT91SAM9G45_MATRIX_PRS 0x80 +#define AT91SAM9G45_MATRIX_MRCR 0x100 +#define AT91SAM9G45_MATRIX_TCR 0x110 +#define AT91SAM9G45_MATRIX_DDRMPR 0x118 +#define AT91SAM9G45_MATRIX_EBICSA 0x128 + +#define AT91SAM9N12_MATRIX_MCFG 0x00 +#define AT91SAM9N12_MATRIX_SCFG 0x40 +#define AT91SAM9N12_MATRIX_PRS 0x80 +#define AT91SAM9N12_MATRIX_MRCR 0x100 +#define AT91SAM9N12_MATRIX_EBICSA 0x118 + +#define AT91SAM9X5_MATRIX_MCFG 0x00 +#define AT91SAM9X5_MATRIX_SCFG 0x40 +#define AT91SAM9X5_MATRIX_PRS 0x80 +#define AT91SAM9X5_MATRIX_MRCR 0x100 +#define AT91SAM9X5_MATRIX_EBICSA 0x120 + +#define SAMA5D3_MATRIX_MCFG 0x00 +#define SAMA5D3_MATRIX_SCFG 0x40 +#define SAMA5D3_MATRIX_PRS 0x80 +#define SAMA5D3_MATRIX_MRCR 0x100 + +#define AT91_MATRIX_MCFG(o, x) ((o) + ((x) * 0x4)) +#define AT91_MATRIX_ULBT GENMASK(2, 0) +#define AT91_MATRIX_ULBT_INFINITE (0 << 0) +#define AT91_MATRIX_ULBT_SINGLE (1 << 0) +#define AT91_MATRIX_ULBT_FOUR (2 << 0) +#define AT91_MATRIX_ULBT_EIGHT (3 << 0) +#define AT91_MATRIX_ULBT_SIXTEEN (4 << 0) + +#define AT91_MATRIX_SCFG(o, x) ((o) + ((x) * 0x4)) +#define AT91_MATRIX_SLOT_CYCLE GENMASK(7, 0) +#define AT91_MATRIX_DEFMSTR_TYPE GENMASK(17, 16) +#define AT91_MATRIX_DEFMSTR_TYPE_NONE (0 << 16) +#define AT91_MATRIX_DEFMSTR_TYPE_LAST (1 << 16) +#define AT91_MATRIX_DEFMSTR_TYPE_FIXED (2 << 16) +#define AT91_MATRIX_FIXED_DEFMSTR GENMASK(20, 18) +#define AT91_MATRIX_ARBT GENMASK(25, 24) +#define AT91_MATRIX_ARBT_ROUND_ROBIN (0 << 24) +#define AT91_MATRIX_ARBT_FIXED_PRIORITY (1 << 24) + +#define AT91_MATRIX_ITCM_SIZE GENMASK(3, 0) +#define AT91_MATRIX_ITCM_0 (0 << 0) +#define AT91_MATRIX_ITCM_16 (5 << 0) +#define AT91_MATRIX_ITCM_32 (6 << 0) +#define AT91_MATRIX_ITCM_64 (7 << 0) +#define AT91_MATRIX_DTCM_SIZE GENMASK(7, 4) +#define AT91_MATRIX_DTCM_0 (0 << 4) +#define AT91_MATRIX_DTCM_16 (5 << 4) +#define AT91_MATRIX_DTCM_32 (6 << 4) +#define AT91_MATRIX_DTCM_64 (7 << 4) + +#define AT91_MATRIX_PRAS(o, x) ((o) + ((x) * 0x8)) +#define AT91_MATRIX_PRBS(o, x) ((o) + ((x) * 0x8) + 0x4) +#define AT91_MATRIX_MPR(x) GENMASK(((x) * 0x4) + 1, ((x) * 0x4)) + +#define AT91_MATRIX_RCB(x) BIT(x) + +#define AT91_MATRIX_CSA(cs, val) (val << (cs)) +#define AT91_MATRIX_DBPUC BIT(8) +#define AT91_MATRIX_DBPDC BIT(9) +#define AT91_MATRIX_VDDIOMSEL BIT(16) +#define AT91_MATRIX_VDDIOMSEL_1_8V (0 << 16) +#define AT91_MATRIX_VDDIOMSEL_3_3V (1 << 16) +#define AT91_MATRIX_EBI_IOSR BIT(17) +#define AT91_MATRIX_DDR_IOSR BIT(18) +#define AT91_MATRIX_NFD0_SELECT BIT(24) +#define AT91_MATRIX_DDR_MP_EN BIT(25) +#define AT91_MATRIX_EBI_NUM_CS 8 + +#define AT91_MATRIX_USBPUCR_PUON BIT(30) + +#endif /* _LINUX_MFD_SYSCON_ATMEL_MATRIX_H */ diff --git a/include/linux/mfd/syscon/atmel-mc.h b/include/linux/mfd/syscon/atmel-mc.h new file mode 100644 index 000000000..afd9b8f1e --- /dev/null +++ b/include/linux/mfd/syscon/atmel-mc.h @@ -0,0 +1,144 @@ +/* + * Copyright (C) 2005 Ivan Kokshaysky + * Copyright (C) SAN People + * + * Memory Controllers (MC, EBI, SMC, SDRAMC, BFC) - System peripherals + * registers. + * Based on AT91RM9200 datasheet revision E. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _LINUX_MFD_SYSCON_ATMEL_MC_H_ +#define _LINUX_MFD_SYSCON_ATMEL_MC_H_ + +/* Memory Controller */ +#define AT91_MC_RCR 0x00 +#define AT91_MC_RCB BIT(0) + +#define AT91_MC_ASR 0x04 +#define AT91_MC_UNADD BIT(0) +#define AT91_MC_MISADD BIT(1) +#define AT91_MC_ABTSZ GENMASK(9, 8) +#define AT91_MC_ABTSZ_BYTE (0 << 8) +#define AT91_MC_ABTSZ_HALFWORD (1 << 8) +#define AT91_MC_ABTSZ_WORD (2 << 8) +#define AT91_MC_ABTTYP GENMASK(11, 10) +#define AT91_MC_ABTTYP_DATAREAD (0 << 10) +#define AT91_MC_ABTTYP_DATAWRITE (1 << 10) +#define AT91_MC_ABTTYP_FETCH (2 << 10) +#define AT91_MC_MST(n) BIT(16 + (n)) +#define AT91_MC_SVMST(n) BIT(24 + (n)) + +#define AT91_MC_AASR 0x08 + +#define AT91_MC_MPR 0x0c +#define AT91_MPR_MSTP(n) GENMASK(2 + ((x) * 4), ((x) * 4)) + +/* External Bus Interface (EBI) registers */ +#define AT91_MC_EBI_CSA 0x60 +#define AT91_MC_EBI_CS(n) BIT(x) +#define AT91_MC_EBI_NUM_CS 8 + +#define AT91_MC_EBI_CFGR 0x64 +#define AT91_MC_EBI_DBPUC BIT(0) + +/* Static Memory Controller (SMC) registers */ +#define AT91_MC_SMC_CSR(n) (0x70 + ((n) * 4)) +#define AT91_MC_SMC_NWS GENMASK(6, 0) +#define AT91_MC_SMC_NWS_(x) ((x) << 0) +#define AT91_MC_SMC_WSEN BIT(7) +#define AT91_MC_SMC_TDF GENMASK(11, 8) +#define AT91_MC_SMC_TDF_(x) ((x) << 8) +#define AT91_MC_SMC_TDF_MAX 0xf +#define AT91_MC_SMC_BAT BIT(12) +#define AT91_MC_SMC_DBW GENMASK(14, 13) +#define AT91_MC_SMC_DBW_16 (1 << 13) +#define AT91_MC_SMC_DBW_8 (2 << 13) +#define AT91_MC_SMC_DPR BIT(15) +#define AT91_MC_SMC_ACSS GENMASK(17, 16) +#define AT91_MC_SMC_ACSS_(x) ((x) << 16) +#define AT91_MC_SMC_ACSS_MAX 3 +#define AT91_MC_SMC_RWSETUP GENMASK(26, 24) +#define AT91_MC_SMC_RWSETUP_(x) ((x) << 24) +#define AT91_MC_SMC_RWHOLD GENMASK(30, 28) +#define AT91_MC_SMC_RWHOLD_(x) ((x) << 28) +#define AT91_MC_SMC_RWHOLDSETUP_MAX 7 + +/* SDRAM Controller registers */ +#define AT91_MC_SDRAMC_MR 0x90 +#define AT91_MC_SDRAMC_MODE GENMASK(3, 0) +#define AT91_MC_SDRAMC_MODE_NORMAL (0 << 0) +#define AT91_MC_SDRAMC_MODE_NOP (1 << 0) +#define AT91_MC_SDRAMC_MODE_PRECHARGE (2 << 0) +#define AT91_MC_SDRAMC_MODE_LMR (3 << 0) +#define AT91_MC_SDRAMC_MODE_REFRESH (4 << 0) +#define AT91_MC_SDRAMC_DBW_16 BIT(4) + +#define AT91_MC_SDRAMC_TR 0x94 +#define AT91_MC_SDRAMC_COUNT GENMASK(11, 0) + +#define AT91_MC_SDRAMC_CR 0x98 +#define AT91_MC_SDRAMC_NC GENMASK(1, 0) +#define AT91_MC_SDRAMC_NC_8 (0 << 0) +#define AT91_MC_SDRAMC_NC_9 (1 << 0) +#define AT91_MC_SDRAMC_NC_10 (2 << 0) +#define AT91_MC_SDRAMC_NC_11 (3 << 0) +#define AT91_MC_SDRAMC_NR GENMASK(3, 2) +#define AT91_MC_SDRAMC_NR_11 (0 << 2) +#define AT91_MC_SDRAMC_NR_12 (1 << 2) +#define AT91_MC_SDRAMC_NR_13 (2 << 2) +#define AT91_MC_SDRAMC_NB BIT(4) +#define AT91_MC_SDRAMC_NB_2 (0 << 4) +#define AT91_MC_SDRAMC_NB_4 (1 << 4) +#define AT91_MC_SDRAMC_CAS GENMASK(6, 5) +#define AT91_MC_SDRAMC_CAS_2 (2 << 5) +#define AT91_MC_SDRAMC_TWR GENMASK(10, 7) +#define AT91_MC_SDRAMC_TRC GENMASK(14, 11) +#define AT91_MC_SDRAMC_TRP GENMASK(18, 15) +#define AT91_MC_SDRAMC_TRCD GENMASK(22, 19) +#define AT91_MC_SDRAMC_TRAS GENMASK(26, 23) +#define AT91_MC_SDRAMC_TXSR GENMASK(30, 27) + +#define AT91_MC_SDRAMC_SRR 0x9c +#define AT91_MC_SDRAMC_SRCB BIT(0) + +#define AT91_MC_SDRAMC_LPR 0xa0 +#define AT91_MC_SDRAMC_LPCB BIT(0) + +#define AT91_MC_SDRAMC_IER 0xa4 +#define AT91_MC_SDRAMC_IDR 0xa8 +#define AT91_MC_SDRAMC_IMR 0xac +#define AT91_MC_SDRAMC_ISR 0xb0 +#define AT91_MC_SDRAMC_RES BIT(0) + +/* Burst Flash Controller register */ +#define AT91_MC_BFC_MR 0xc0 +#define AT91_MC_BFC_BFCOM GENMASK(1, 0) +#define AT91_MC_BFC_BFCOM_DISABLED (0 << 0) +#define AT91_MC_BFC_BFCOM_ASYNC (1 << 0) +#define AT91_MC_BFC_BFCOM_BURST (2 << 0) +#define AT91_MC_BFC_BFCC GENMASK(3, 2) +#define AT91_MC_BFC_BFCC_MCK (1 << 2) +#define AT91_MC_BFC_BFCC_DIV2 (2 << 2) +#define AT91_MC_BFC_BFCC_DIV4 (3 << 2) +#define AT91_MC_BFC_AVL GENMASK(7, 4) +#define AT91_MC_BFC_PAGES GENMASK(10, 8) +#define AT91_MC_BFC_PAGES_NO_PAGE (0 << 8) +#define AT91_MC_BFC_PAGES_16 (1 << 8) +#define AT91_MC_BFC_PAGES_32 (2 << 8) +#define AT91_MC_BFC_PAGES_64 (3 << 8) +#define AT91_MC_BFC_PAGES_128 (4 << 8) +#define AT91_MC_BFC_PAGES_256 (5 << 8) +#define AT91_MC_BFC_PAGES_512 (6 << 8) +#define AT91_MC_BFC_PAGES_1024 (7 << 8) +#define AT91_MC_BFC_OEL GENMASK(13, 12) +#define AT91_MC_BFC_BAAEN BIT(16) +#define AT91_MC_BFC_BFOEH BIT(17) +#define AT91_MC_BFC_MUXEN BIT(18) +#define AT91_MC_BFC_RDYEN BIT(19) + +#endif /* _LINUX_MFD_SYSCON_ATMEL_MC_H_ */ diff --git a/include/linux/mfd/syscon/atmel-smc.h b/include/linux/mfd/syscon/atmel-smc.h new file mode 100644 index 000000000..7a367f34b --- /dev/null +++ b/include/linux/mfd/syscon/atmel-smc.h @@ -0,0 +1,122 @@ +/* + * Atmel SMC (Static Memory Controller) register offsets and bit definitions. + * + * Copyright (C) 2014 Atmel + * Copyright (C) 2014 Free Electrons + * + * Author: Boris Brezillon + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _LINUX_MFD_SYSCON_ATMEL_SMC_H_ +#define _LINUX_MFD_SYSCON_ATMEL_SMC_H_ + +#include +#include +#include + +#define ATMEL_SMC_SETUP(cs) (((cs) * 0x10)) +#define ATMEL_HSMC_SETUP(layout, cs) \ + ((layout)->timing_regs_offset + ((cs) * 0x14)) +#define ATMEL_SMC_PULSE(cs) (((cs) * 0x10) + 0x4) +#define ATMEL_HSMC_PULSE(layout, cs) \ + ((layout)->timing_regs_offset + ((cs) * 0x14) + 0x4) +#define ATMEL_SMC_CYCLE(cs) (((cs) * 0x10) + 0x8) +#define ATMEL_HSMC_CYCLE(layout, cs) \ + ((layout)->timing_regs_offset + ((cs) * 0x14) + 0x8) +#define ATMEL_SMC_NWE_SHIFT 0 +#define ATMEL_SMC_NCS_WR_SHIFT 8 +#define ATMEL_SMC_NRD_SHIFT 16 +#define ATMEL_SMC_NCS_RD_SHIFT 24 + +#define ATMEL_SMC_MODE(cs) (((cs) * 0x10) + 0xc) +#define ATMEL_HSMC_MODE(layout, cs) \ + ((layout)->timing_regs_offset + ((cs) * 0x14) + 0x10) +#define ATMEL_SMC_MODE_READMODE_MASK BIT(0) +#define ATMEL_SMC_MODE_READMODE_NCS (0 << 0) +#define ATMEL_SMC_MODE_READMODE_NRD (1 << 0) +#define ATMEL_SMC_MODE_WRITEMODE_MASK BIT(1) +#define ATMEL_SMC_MODE_WRITEMODE_NCS (0 << 1) +#define ATMEL_SMC_MODE_WRITEMODE_NWE (1 << 1) +#define ATMEL_SMC_MODE_EXNWMODE_MASK GENMASK(5, 4) +#define ATMEL_SMC_MODE_EXNWMODE_DISABLE (0 << 4) +#define ATMEL_SMC_MODE_EXNWMODE_FROZEN (2 << 4) +#define ATMEL_SMC_MODE_EXNWMODE_READY (3 << 4) +#define ATMEL_SMC_MODE_BAT_MASK BIT(8) +#define ATMEL_SMC_MODE_BAT_SELECT (0 << 8) +#define ATMEL_SMC_MODE_BAT_WRITE (1 << 8) +#define ATMEL_SMC_MODE_DBW_MASK GENMASK(13, 12) +#define ATMEL_SMC_MODE_DBW_8 (0 << 12) +#define ATMEL_SMC_MODE_DBW_16 (1 << 12) +#define ATMEL_SMC_MODE_DBW_32 (2 << 12) +#define ATMEL_SMC_MODE_TDF_MASK GENMASK(19, 16) +#define ATMEL_SMC_MODE_TDF(x) (((x) - 1) << 16) +#define ATMEL_SMC_MODE_TDF_MAX 16 +#define ATMEL_SMC_MODE_TDF_MIN 1 +#define ATMEL_SMC_MODE_TDFMODE_OPTIMIZED BIT(20) +#define ATMEL_SMC_MODE_PMEN BIT(24) +#define ATMEL_SMC_MODE_PS_MASK GENMASK(29, 28) +#define ATMEL_SMC_MODE_PS_4 (0 << 28) +#define ATMEL_SMC_MODE_PS_8 (1 << 28) +#define ATMEL_SMC_MODE_PS_16 (2 << 28) +#define ATMEL_SMC_MODE_PS_32 (3 << 28) + +#define ATMEL_HSMC_TIMINGS(layout, cs) \ + ((layout)->timing_regs_offset + ((cs) * 0x14) + 0xc) +#define ATMEL_HSMC_TIMINGS_OCMS BIT(12) +#define ATMEL_HSMC_TIMINGS_RBNSEL(x) ((x) << 28) +#define ATMEL_HSMC_TIMINGS_NFSEL BIT(31) +#define ATMEL_HSMC_TIMINGS_TCLR_SHIFT 0 +#define ATMEL_HSMC_TIMINGS_TADL_SHIFT 4 +#define ATMEL_HSMC_TIMINGS_TAR_SHIFT 8 +#define ATMEL_HSMC_TIMINGS_TRR_SHIFT 16 +#define ATMEL_HSMC_TIMINGS_TWB_SHIFT 24 + +struct atmel_hsmc_reg_layout { + unsigned int timing_regs_offset; +}; + +/** + * struct atmel_smc_cs_conf - SMC CS config as described in the datasheet. + * @setup: NCS/NWE/NRD setup timings (not applicable to at91rm9200) + * @pulse: NCS/NWE/NRD pulse timings (not applicable to at91rm9200) + * @cycle: NWE/NRD cycle timings (not applicable to at91rm9200) + * @timings: advanced NAND related timings (only applicable to HSMC) + * @mode: all kind of config parameters (see the fields definition above). + * The mode fields are different on at91rm9200 + */ +struct atmel_smc_cs_conf { + u32 setup; + u32 pulse; + u32 cycle; + u32 timings; + u32 mode; +}; + +void atmel_smc_cs_conf_init(struct atmel_smc_cs_conf *conf); +int atmel_smc_cs_conf_set_timing(struct atmel_smc_cs_conf *conf, + unsigned int shift, + unsigned int ncycles); +int atmel_smc_cs_conf_set_setup(struct atmel_smc_cs_conf *conf, + unsigned int shift, unsigned int ncycles); +int atmel_smc_cs_conf_set_pulse(struct atmel_smc_cs_conf *conf, + unsigned int shift, unsigned int ncycles); +int atmel_smc_cs_conf_set_cycle(struct atmel_smc_cs_conf *conf, + unsigned int shift, unsigned int ncycles); +void atmel_smc_cs_conf_apply(struct regmap *regmap, int cs, + const struct atmel_smc_cs_conf *conf); +void atmel_hsmc_cs_conf_apply(struct regmap *regmap, + const struct atmel_hsmc_reg_layout *reglayout, + int cs, const struct atmel_smc_cs_conf *conf); +void atmel_smc_cs_conf_get(struct regmap *regmap, int cs, + struct atmel_smc_cs_conf *conf); +void atmel_hsmc_cs_conf_get(struct regmap *regmap, + const struct atmel_hsmc_reg_layout *reglayout, + int cs, struct atmel_smc_cs_conf *conf); +const struct atmel_hsmc_reg_layout * +atmel_hsmc_get_reg_layout(struct device_node *np); + +#endif /* _LINUX_MFD_SYSCON_ATMEL_SMC_H_ */ diff --git a/include/linux/mfd/syscon/atmel-st.h b/include/linux/mfd/syscon/atmel-st.h new file mode 100644 index 000000000..8acf1ec1f --- /dev/null +++ b/include/linux/mfd/syscon/atmel-st.h @@ -0,0 +1,49 @@ +/* + * Copyright (C) 2005 Ivan Kokshaysky + * Copyright (C) SAN People + * + * System Timer (ST) - System peripherals registers. + * Based on AT91RM9200 datasheet revision E. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _LINUX_MFD_SYSCON_ATMEL_ST_H +#define _LINUX_MFD_SYSCON_ATMEL_ST_H + +#include + +#define AT91_ST_CR 0x00 /* Control Register */ +#define AT91_ST_WDRST BIT(0) /* Watchdog Timer Restart */ + +#define AT91_ST_PIMR 0x04 /* Period Interval Mode Register */ +#define AT91_ST_PIV 0xffff /* Period Interval Value */ + +#define AT91_ST_WDMR 0x08 /* Watchdog Mode Register */ +#define AT91_ST_WDV 0xffff /* Watchdog Counter Value */ +#define AT91_ST_RSTEN BIT(16) /* Reset Enable */ +#define AT91_ST_EXTEN BIT(17) /* External Signal Assertion Enable */ + +#define AT91_ST_RTMR 0x0c /* Real-time Mode Register */ +#define AT91_ST_RTPRES 0xffff /* Real-time Prescalar Value */ + +#define AT91_ST_SR 0x10 /* Status Register */ +#define AT91_ST_PITS BIT(0) /* Period Interval Timer Status */ +#define AT91_ST_WDOVF BIT(1) /* Watchdog Overflow */ +#define AT91_ST_RTTINC BIT(2) /* Real-time Timer Increment */ +#define AT91_ST_ALMS BIT(3) /* Alarm Status */ + +#define AT91_ST_IER 0x14 /* Interrupt Enable Register */ +#define AT91_ST_IDR 0x18 /* Interrupt Disable Register */ +#define AT91_ST_IMR 0x1c /* Interrupt Mask Register */ + +#define AT91_ST_RTAR 0x20 /* Real-time Alarm Register */ +#define AT91_ST_ALMV 0xfffff /* Alarm Value */ + +#define AT91_ST_CRTR 0x24 /* Current Real-time Register */ +#define AT91_ST_CRTV 0xfffff /* Current Real-Time Value */ + +#endif /* _LINUX_MFD_SYSCON_ATMEL_ST_H */ diff --git a/include/linux/mfd/syscon/clps711x.h b/include/linux/mfd/syscon/clps711x.h new file mode 100644 index 000000000..26355abae --- /dev/null +++ b/include/linux/mfd/syscon/clps711x.h @@ -0,0 +1,94 @@ +/* + * CLPS711X system register bits definitions + * + * Copyright (C) 2013 Alexander Shiyan + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _LINUX_MFD_SYSCON_CLPS711X_H_ +#define _LINUX_MFD_SYSCON_CLPS711X_H_ + +#define SYSCON_OFFSET (0x00) +#define SYSFLG_OFFSET (0x40) + +#define SYSCON1_KBDSCAN(x) ((x) & 15) +#define SYSCON1_KBDSCAN_MASK (15) +#define SYSCON1_TC1M (1 << 4) +#define SYSCON1_TC1S (1 << 5) +#define SYSCON1_TC2M (1 << 6) +#define SYSCON1_TC2S (1 << 7) +#define SYSCON1_BZTOG (1 << 9) +#define SYSCON1_BZMOD (1 << 10) +#define SYSCON1_DBGEN (1 << 11) +#define SYSCON1_LCDEN (1 << 12) +#define SYSCON1_CDENTX (1 << 13) +#define SYSCON1_CDENRX (1 << 14) +#define SYSCON1_SIREN (1 << 15) +#define SYSCON1_ADCKSEL(x) (((x) & 3) << 16) +#define SYSCON1_ADCKSEL_MASK (3 << 16) +#define SYSCON1_EXCKEN (1 << 18) +#define SYSCON1_WAKEDIS (1 << 19) +#define SYSCON1_IRTXM (1 << 20) + +#define SYSCON2_SERSEL (1 << 0) +#define SYSCON2_KBD6 (1 << 1) +#define SYSCON2_DRAMZ (1 << 2) +#define SYSCON2_KBWEN (1 << 3) +#define SYSCON2_SS2TXEN (1 << 4) +#define SYSCON2_PCCARD1 (1 << 5) +#define SYSCON2_PCCARD2 (1 << 6) +#define SYSCON2_SS2RXEN (1 << 7) +#define SYSCON2_SS2MAEN (1 << 9) +#define SYSCON2_OSTB (1 << 12) +#define SYSCON2_CLKENSL (1 << 13) +#define SYSCON2_BUZFREQ (1 << 14) + +#define SYSCON3_ADCCON (1 << 0) +#define SYSCON3_CLKCTL0 (1 << 1) +#define SYSCON3_CLKCTL1 (1 << 2) +#define SYSCON3_DAISEL (1 << 3) +#define SYSCON3_ADCCKNSEN (1 << 4) +#define SYSCON3_VERSN(x) (((x) >> 5) & 7) +#define SYSCON3_VERSN_MASK (7 << 5) +#define SYSCON3_FASTWAKE (1 << 8) +#define SYSCON3_DAIEN (1 << 9) +#define SYSCON3_128FS SYSCON3_DAIEN +#define SYSCON3_ENPD67 (1 << 10) + +#define SYSCON_UARTEN (1 << 8) + +#define SYSFLG1_MCDR (1 << 0) +#define SYSFLG1_DCDET (1 << 1) +#define SYSFLG1_WUDR (1 << 2) +#define SYSFLG1_WUON (1 << 3) +#define SYSFLG1_CTS (1 << 8) +#define SYSFLG1_DSR (1 << 9) +#define SYSFLG1_DCD (1 << 10) +#define SYSFLG1_NBFLG (1 << 12) +#define SYSFLG1_RSTFLG (1 << 13) +#define SYSFLG1_PFFLG (1 << 14) +#define SYSFLG1_CLDFLG (1 << 15) +#define SYSFLG1_CRXFE (1 << 24) +#define SYSFLG1_CTXFF (1 << 25) +#define SYSFLG1_SSIBUSY (1 << 26) +#define SYSFLG1_ID (1 << 29) +#define SYSFLG1_VERID(x) (((x) >> 30) & 3) +#define SYSFLG1_VERID_MASK (3 << 30) + +#define SYSFLG2_SSRXOF (1 << 0) +#define SYSFLG2_RESVAL (1 << 1) +#define SYSFLG2_RESFRM (1 << 2) +#define SYSFLG2_SS2RXFE (1 << 3) +#define SYSFLG2_SS2TXFF (1 << 4) +#define SYSFLG2_SS2TXUF (1 << 5) +#define SYSFLG2_CKMODE (1 << 6) + +#define SYSFLG_UBUSY (1 << 11) +#define SYSFLG_URXFE (1 << 22) +#define SYSFLG_UTXFF (1 << 23) + +#endif diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h new file mode 100644 index 000000000..6c1ad160e --- /dev/null +++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h @@ -0,0 +1,463 @@ +/* + * Copyright (C) 2012 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_IMX6Q_IOMUXC_GPR_H +#define __LINUX_IMX6Q_IOMUXC_GPR_H + +#include + +#define IOMUXC_GPR0 0x00 +#define IOMUXC_GPR1 0x04 +#define IOMUXC_GPR2 0x08 +#define IOMUXC_GPR3 0x0c +#define IOMUXC_GPR4 0x10 +#define IOMUXC_GPR5 0x14 +#define IOMUXC_GPR6 0x18 +#define IOMUXC_GPR7 0x1c +#define IOMUXC_GPR8 0x20 +#define IOMUXC_GPR9 0x24 +#define IOMUXC_GPR10 0x28 +#define IOMUXC_GPR11 0x2c +#define IOMUXC_GPR12 0x30 +#define IOMUXC_GPR13 0x34 + +#define IMX6Q_GPR0_CLOCK_8_MUX_SEL_MASK (0x3 << 30) +#define IMX6Q_GPR0_CLOCK_8_MUX_SEL_AUDMUX_RXCLK_P7_MUXED (0x0 << 30) +#define IMX6Q_GPR0_CLOCK_8_MUX_SEL_AUDMUX_RXCLK_P7 (0x1 << 30) +#define IMX6Q_GPR0_CLOCK_8_MUX_SEL_SSI3_SSI_SRCK (0x2 << 30) +#define IMX6Q_GPR0_CLOCK_8_MUX_SEL_SSI3_RX_BIT_CLK (0x3 << 30) +#define IMX6Q_GPR0_CLOCK_0_MUX_SEL_MASK (0x3 << 28) +#define IMX6Q_GPR0_CLOCK_0_MUX_SEL_ESAI1_IPP_IND_SCKR_MUXED (0x0 << 28) +#define IMX6Q_GPR0_CLOCK_0_MUX_SEL_ESAI1_IPP_IND_SCKR (0x1 << 28) +#define IMX6Q_GPR0_CLOCK_0_MUX_SEL_ESAI1_IPP_DO_SCKR (0x2 << 28) +#define IMX6Q_GPR0_CLOCK_B_MUX_SEL_MASK (0x3 << 26) +#define IMX6Q_GPR0_CLOCK_B_MUX_SEL_AUDMUX_TXCLK_P7_MUXED (0x0 << 26) +#define IMX6Q_GPR0_CLOCK_B_MUX_SEL_AUDMUX_TXCLK_P7 (0x1 << 26) +#define IMX6Q_GPR0_CLOCK_B_MUX_SEL_SSI3_SSI_STCK (0x2 << 26) +#define IMX6Q_GPR0_CLOCK_B_MUX_SEL_SSI3_TX_BIT_CLK (0x3 << 26) +#define IMX6Q_GPR0_CLOCK_3_MUX_SEL_MASK (0x3 << 24) +#define IMX6Q_GPR0_CLOCK_3_MUX_SEL_AUDMUX_RXCLK_P7_MUXED (0x3 << 24) +#define IMX6Q_GPR0_CLOCK_3_MUX_SEL_AUDMUX_RXCLK_P7 (0x3 << 24) +#define IMX6Q_GPR0_CLOCK_3_MUX_SEL_SSI3_SSI_SRCK (0x3 << 24) +#define IMX6Q_GPR0_CLOCK_3_MUX_SEL_SSI3_RX_BIT_CLK (0x3 << 24) +#define IMX6Q_GPR0_CLOCK_A_MUX_SEL_MASK (0x3 << 22) +#define IMX6Q_GPR0_CLOCK_A_MUX_SEL_AUDMUX_TXCLK_P2_MUXED (0x0 << 22) +#define IMX6Q_GPR0_CLOCK_A_MUX_SEL_AUDMUX_TXCLK_P2 (0x1 << 22) +#define IMX6Q_GPR0_CLOCK_A_MUX_SEL_SSI2_SSI_STCK (0x2 << 22) +#define IMX6Q_GPR0_CLOCK_A_MUX_SEL_SSI2_TX_BIT_CLK (0x3 << 22) +#define IMX6Q_GPR0_CLOCK_2_MUX_SEL_MASK (0x3 << 20) +#define IMX6Q_GPR0_CLOCK_2_MUX_SEL_AUDMUX_RXCLK_P2_MUXED (0x0 << 20) +#define IMX6Q_GPR0_CLOCK_2_MUX_SEL_AUDMUX_RXCLK_P2 (0x1 << 20) +#define IMX6Q_GPR0_CLOCK_2_MUX_SEL_SSI2_SSI_SRCK (0x2 << 20) +#define IMX6Q_GPR0_CLOCK_2_MUX_SEL_SSI2_RX_BIT_CLK (0x3 << 20) +#define IMX6Q_GPR0_CLOCK_9_MUX_SEL_MASK (0x3 << 18) +#define IMX6Q_GPR0_CLOCK_9_MUX_SEL_AUDMUX_TXCLK_P1_MUXED (0x0 << 18) +#define IMX6Q_GPR0_CLOCK_9_MUX_SEL_AUDMUX_TXCLK_P1 (0x1 << 18) +#define IMX6Q_GPR0_CLOCK_9_MUX_SEL_SSI1_SSI_STCK (0x2 << 18) +#define IMX6Q_GPR0_CLOCK_9_MUX_SEL_SSI1_SSI_TX_BIT_CLK (0x3 << 18) +#define IMX6Q_GPR0_CLOCK_1_MUX_SEL_MASK (0x3 << 16) +#define IMX6Q_GPR0_CLOCK_1_MUX_SEL_AUDMUX_RXCLK_P1_MUXED (0x0 << 16) +#define IMX6Q_GPR0_CLOCK_1_MUX_SEL_AUDMUX_RXCLK_P1 (0x1 << 16) +#define IMX6Q_GPR0_CLOCK_1_MUX_SEL_SSI1_SSI_SRCK (0x2 << 16) +#define IMX6Q_GPR0_CLOCK_1_MUX_SEL_SSI1_SSI_RX_BIT_CLK (0x3 << 16) +#define IMX6Q_GPR0_TX_CLK2_MUX_SEL_MASK (0x3 << 14) +#define IMX6Q_GPR0_TX_CLK2_MUX_SEL_ASRCK_CLK1 (0x0 << 14) +#define IMX6Q_GPR0_TX_CLK2_MUX_SEL_ASRCK_CLK2 (0x1 << 14) +#define IMX6Q_GPR0_TX_CLK2_MUX_SEL_ASRCK_CLK3 (0x2 << 14) +#define IMX6Q_GPR0_DMAREQ_MUX_SEL7_MASK BIT(7) +#define IMX6Q_GPR0_DMAREQ_MUX_SEL7_SPDIF 0x0 +#define IMX6Q_GPR0_DMAREQ_MUX_SEL7_IOMUX BIT(7) +#define IMX6Q_GPR0_DMAREQ_MUX_SEL6_MASK BIT(6) +#define IMX6Q_GPR0_DMAREQ_MUX_SEL6_ESAI 0x0 +#define IMX6Q_GPR0_DMAREQ_MUX_SEL6_I2C3 BIT(6) +#define IMX6Q_GPR0_DMAREQ_MUX_SEL5_MASK BIT(5) +#define IMX6Q_GPR0_DMAREQ_MUX_SEL5_ECSPI4 0x0 +#define IMX6Q_GPR0_DMAREQ_MUX_SEL5_EPIT2 BIT(5) +#define IMX6Q_GPR0_DMAREQ_MUX_SEL4_MASK BIT(4) +#define IMX6Q_GPR0_DMAREQ_MUX_SEL4_ECSPI4 0x0 +#define IMX6Q_GPR0_DMAREQ_MUX_SEL4_I2C1 BIT(4) +#define IMX6Q_GPR0_DMAREQ_MUX_SEL3_MASK BIT(3) +#define IMX6Q_GPR0_DMAREQ_MUX_SEL3_ECSPI2 0x0 +#define IMX6Q_GPR0_DMAREQ_MUX_SEL3_I2C1 BIT(3) +#define IMX6Q_GPR0_DMAREQ_MUX_SEL2_MASK BIT(2) +#define IMX6Q_GPR0_DMAREQ_MUX_SEL2_ECSPI1 0x0 +#define IMX6Q_GPR0_DMAREQ_MUX_SEL2_I2C2 BIT(2) +#define IMX6Q_GPR0_DMAREQ_MUX_SEL1_MASK BIT(1) +#define IMX6Q_GPR0_DMAREQ_MUX_SEL1_ECSPI1 0x0 +#define IMX6Q_GPR0_DMAREQ_MUX_SEL1_I2C3 BIT(1) +#define IMX6Q_GPR0_DMAREQ_MUX_SEL0_MASK BIT(0) +#define IMX6Q_GPR0_DMAREQ_MUX_SEL0_IPU1 0x0 +#define IMX6Q_GPR0_DMAREQ_MUX_SEL0_IOMUX BIT(0) + +#define IMX6Q_GPR1_PCIE_REQ_MASK (0x3 << 30) +#define IMX6Q_GPR1_PCIE_SW_RST BIT(29) +#define IMX6Q_GPR1_PCIE_EXIT_L1 BIT(28) +#define IMX6Q_GPR1_PCIE_RDY_L23 BIT(27) +#define IMX6Q_GPR1_PCIE_ENTER_L1 BIT(26) +#define IMX6Q_GPR1_MIPI_COLOR_SW BIT(25) +#define IMX6Q_GPR1_DPI_OFF BIT(24) +#define IMX6Q_GPR1_EXC_MON_MASK BIT(22) +#define IMX6Q_GPR1_EXC_MON_OKAY 0x0 +#define IMX6Q_GPR1_EXC_MON_SLVE BIT(22) +#define IMX6Q_GPR1_ENET_CLK_SEL_MASK BIT(21) +#define IMX6Q_GPR1_ENET_CLK_SEL_PAD 0 +#define IMX6Q_GPR1_ENET_CLK_SEL_ANATOP BIT(21) +#define IMX6Q_GPR1_MIPI_IPU2_MUX_MASK BIT(20) +#define IMX6Q_GPR1_MIPI_IPU2_MUX_GASKET 0x0 +#define IMX6Q_GPR1_MIPI_IPU2_MUX_IOMUX BIT(20) +#define IMX6Q_GPR1_MIPI_IPU1_MUX_MASK BIT(19) +#define IMX6Q_GPR1_MIPI_IPU1_MUX_GASKET 0x0 +#define IMX6Q_GPR1_MIPI_IPU1_MUX_IOMUX BIT(19) +#define IMX6Q_GPR1_PCIE_TEST_PD BIT(18) +#define IMX6Q_GPR1_IPU_VPU_MUX_MASK BIT(17) +#define IMX6Q_GPR1_IPU_VPU_MUX_IPU1 0x0 +#define IMX6Q_GPR1_IPU_VPU_MUX_IPU2 BIT(17) +#define IMX6Q_GPR1_PCIE_REF_CLK_EN BIT(16) +#define IMX6Q_GPR1_USB_EXP_MODE BIT(15) +#define IMX6Q_GPR1_PCIE_INT BIT(14) +#define IMX6Q_GPR1_USB_OTG_ID_SEL_MASK BIT(13) +#define IMX6Q_GPR1_USB_OTG_ID_SEL_ENET_RX_ER 0x0 +#define IMX6Q_GPR1_USB_OTG_ID_SEL_GPIO_1 BIT(13) +#define IMX6Q_GPR1_GINT BIT(12) +#define IMX6Q_GPR1_ADDRS3_MASK (0x3 << 10) +#define IMX6Q_GPR1_ADDRS3_32MB (0x0 << 10) +#define IMX6Q_GPR1_ADDRS3_64MB (0x1 << 10) +#define IMX6Q_GPR1_ADDRS3_128MB (0x2 << 10) +#define IMX6Q_GPR1_ACT_CS3 BIT(9) +#define IMX6Q_GPR1_ADDRS2_MASK (0x3 << 7) +#define IMX6Q_GPR1_ACT_CS2 BIT(6) +#define IMX6Q_GPR1_ADDRS1_MASK (0x3 << 4) +#define IMX6Q_GPR1_ACT_CS1 BIT(3) +#define IMX6Q_GPR1_ADDRS0_MASK (0x3 << 1) +#define IMX6Q_GPR1_ACT_CS0 BIT(0) + +#define IMX6Q_GPR2_COUNTER_RESET_VAL_MASK (0x3 << 20) +#define IMX6Q_GPR2_COUNTER_RESET_VAL_5 (0x0 << 20) +#define IMX6Q_GPR2_COUNTER_RESET_VAL_3 (0x1 << 20) +#define IMX6Q_GPR2_COUNTER_RESET_VAL_4 (0x2 << 20) +#define IMX6Q_GPR2_COUNTER_RESET_VAL_6 (0x3 << 20) +#define IMX6Q_GPR2_LVDS_CLK_SHIFT_MASK (0x7 << 16) +#define IMX6Q_GPR2_LVDS_CLK_SHIFT_0 (0x0 << 16) +#define IMX6Q_GPR2_LVDS_CLK_SHIFT_1 (0x1 << 16) +#define IMX6Q_GPR2_LVDS_CLK_SHIFT_2 (0x2 << 16) +#define IMX6Q_GPR2_LVDS_CLK_SHIFT_3 (0x3 << 16) +#define IMX6Q_GPR2_LVDS_CLK_SHIFT_4 (0x4 << 16) +#define IMX6Q_GPR2_LVDS_CLK_SHIFT_5 (0x5 << 16) +#define IMX6Q_GPR2_LVDS_CLK_SHIFT_6 (0x6 << 16) +#define IMX6Q_GPR2_LVDS_CLK_SHIFT_7 (0x7 << 16) +#define IMX6Q_GPR2_BGREF_RRMODE_MASK BIT(15) +#define IMX6Q_GPR2_BGREF_RRMODE_EXT_RESISTOR 0x0 +#define IMX6Q_GPR2_BGREF_RRMODE_INT_RESISTOR BIT(15) +#define IMX6Q_GPR2_DI1_VS_POLARITY_MASK BIT(10) +#define IMX6Q_GPR2_DI1_VS_POLARITY_ACTIVE_H 0x0 +#define IMX6Q_GPR2_DI1_VS_POLARITY_ACTIVE_L BIT(10) +#define IMX6Q_GPR2_DI0_VS_POLARITY_MASK BIT(9) +#define IMX6Q_GPR2_DI0_VS_POLARITY_ACTIVE_H 0x0 +#define IMX6Q_GPR2_DI0_VS_POLARITY_ACTIVE_L BIT(9) +#define IMX6Q_GPR2_BIT_MAPPING_CH1_MASK BIT(8) +#define IMX6Q_GPR2_BIT_MAPPING_CH1_SPWG 0x0 +#define IMX6Q_GPR2_BIT_MAPPING_CH1_JEIDA BIT(8) +#define IMX6Q_GPR2_DATA_WIDTH_CH1_MASK BIT(7) +#define IMX6Q_GPR2_DATA_WIDTH_CH1_18BIT 0x0 +#define IMX6Q_GPR2_DATA_WIDTH_CH1_24BIT BIT(7) +#define IMX6Q_GPR2_BIT_MAPPING_CH0_MASK BIT(6) +#define IMX6Q_GPR2_BIT_MAPPING_CH0_SPWG 0x0 +#define IMX6Q_GPR2_BIT_MAPPING_CH0_JEIDA BIT(6) +#define IMX6Q_GPR2_DATA_WIDTH_CH0_MASK BIT(5) +#define IMX6Q_GPR2_DATA_WIDTH_CH0_18BIT 0x0 +#define IMX6Q_GPR2_DATA_WIDTH_CH0_24BIT BIT(5) +#define IMX6Q_GPR2_SPLIT_MODE_EN BIT(4) +#define IMX6Q_GPR2_CH1_MODE_MASK (0x3 << 2) +#define IMX6Q_GPR2_CH1_MODE_DISABLE (0x0 << 2) +#define IMX6Q_GPR2_CH1_MODE_EN_ROUTE_DI0 (0x1 << 2) +#define IMX6Q_GPR2_CH1_MODE_EN_ROUTE_DI1 (0x3 << 2) +#define IMX6Q_GPR2_CH0_MODE_MASK (0x3 << 0) +#define IMX6Q_GPR2_CH0_MODE_DISABLE (0x0 << 0) +#define IMX6Q_GPR2_CH0_MODE_EN_ROUTE_DI0 (0x1 << 0) +#define IMX6Q_GPR2_CH0_MODE_EN_ROUTE_DI1 (0x3 << 0) + +#define IMX6Q_GPR3_GPU_DBG_MASK (0x3 << 29) +#define IMX6Q_GPR3_GPU_DBG_GPU3D (0x0 << 29) +#define IMX6Q_GPR3_GPU_DBG_GPU2D (0x1 << 29) +#define IMX6Q_GPR3_GPU_DBG_OPENVG (0x2 << 29) +#define IMX6Q_GPR3_BCH_WR_CACHE_CTL BIT(28) +#define IMX6Q_GPR3_BCH_RD_CACHE_CTL BIT(27) +#define IMX6Q_GPR3_USDHCX_WR_CACHE_CTL BIT(26) +#define IMX6Q_GPR3_USDHCX_RD_CACHE_CTL BIT(25) +#define IMX6Q_GPR3_OCRAM_CTL_MASK (0xf << 21) +#define IMX6Q_GPR3_OCRAM_STATUS_MASK (0xf << 17) +#define IMX6Q_GPR3_CORE3_DBG_ACK_EN BIT(16) +#define IMX6Q_GPR3_CORE2_DBG_ACK_EN BIT(15) +#define IMX6Q_GPR3_CORE1_DBG_ACK_EN BIT(14) +#define IMX6Q_GPR3_CORE0_DBG_ACK_EN BIT(13) +#define IMX6Q_GPR3_TZASC2_BOOT_LOCK BIT(12) +#define IMX6Q_GPR3_TZASC1_BOOT_LOCK BIT(11) +#define IMX6Q_GPR3_IPU_DIAG_MASK BIT(10) +#define IMX6Q_GPR3_LVDS1_MUX_CTL_MASK (0x3 << 8) +#define IMX6Q_GPR3_LVDS1_MUX_CTL_IPU1_DI0 (0x0 << 8) +#define IMX6Q_GPR3_LVDS1_MUX_CTL_IPU1_DI1 (0x1 << 8) +#define IMX6Q_GPR3_LVDS1_MUX_CTL_IPU2_DI0 (0x2 << 8) +#define IMX6Q_GPR3_LVDS1_MUX_CTL_IPU2_DI1 (0x3 << 8) +#define IMX6Q_GPR3_LVDS0_MUX_CTL_MASK (0x3 << 6) +#define IMX6Q_GPR3_LVDS0_MUX_CTL_IPU1_DI0 (0x0 << 6) +#define IMX6Q_GPR3_LVDS0_MUX_CTL_IPU1_DI1 (0x1 << 6) +#define IMX6Q_GPR3_LVDS0_MUX_CTL_IPU2_DI0 (0x2 << 6) +#define IMX6Q_GPR3_LVDS0_MUX_CTL_IPU2_DI1 (0x3 << 6) +#define IMX6Q_GPR3_MIPI_MUX_CTL_SHIFT 4 +#define IMX6Q_GPR3_MIPI_MUX_CTL_MASK (0x3 << 4) +#define IMX6Q_GPR3_MIPI_MUX_CTL_IPU1_DI0 (0x0 << 4) +#define IMX6Q_GPR3_MIPI_MUX_CTL_IPU1_DI1 (0x1 << 4) +#define IMX6Q_GPR3_MIPI_MUX_CTL_IPU2_DI0 (0x2 << 4) +#define IMX6Q_GPR3_MIPI_MUX_CTL_IPU2_DI1 (0x3 << 4) +#define IMX6Q_GPR3_HDMI_MUX_CTL_SHIFT 2 +#define IMX6Q_GPR3_HDMI_MUX_CTL_MASK (0x3 << 2) +#define IMX6Q_GPR3_HDMI_MUX_CTL_IPU1_DI0 (0x0 << 2) +#define IMX6Q_GPR3_HDMI_MUX_CTL_IPU1_DI1 (0x1 << 2) +#define IMX6Q_GPR3_HDMI_MUX_CTL_IPU2_DI0 (0x2 << 2) +#define IMX6Q_GPR3_HDMI_MUX_CTL_IPU2_DI1 (0x3 << 2) + +#define IMX6Q_GPR4_VDOA_WR_CACHE_SEL BIT(31) +#define IMX6Q_GPR4_VDOA_RD_CACHE_SEL BIT(30) +#define IMX6Q_GPR4_VDOA_WR_CACHE_VAL BIT(29) +#define IMX6Q_GPR4_VDOA_RD_CACHE_VAL BIT(28) +#define IMX6Q_GPR4_PCIE_WR_CACHE_SEL BIT(27) +#define IMX6Q_GPR4_PCIE_RD_CACHE_SEL BIT(26) +#define IMX6Q_GPR4_PCIE_WR_CACHE_VAL BIT(25) +#define IMX6Q_GPR4_PCIE_RD_CACHE_VAL BIT(24) +#define IMX6Q_GPR4_SDMA_STOP_ACK BIT(19) +#define IMX6Q_GPR4_CAN2_STOP_ACK BIT(18) +#define IMX6Q_GPR4_CAN1_STOP_ACK BIT(17) +#define IMX6Q_GPR4_ENET_STOP_ACK BIT(16) +#define IMX6Q_GPR4_SOC_VERSION_MASK (0xff << 8) +#define IMX6Q_GPR4_SOC_VERSION_OFF 0x8 +#define IMX6Q_GPR4_VPU_WR_CACHE_SEL BIT(7) +#define IMX6Q_GPR4_VPU_RD_CACHE_SEL BIT(6) +#define IMX6Q_GPR4_VPU_P_WR_CACHE_VAL BIT(3) +#define IMX6Q_GPR4_VPU_P_RD_CACHE_VAL_MASK BIT(2) +#define IMX6Q_GPR4_IPU_WR_CACHE_CTL BIT(1) +#define IMX6Q_GPR4_IPU_RD_CACHE_CTL BIT(0) + +#define IMX6Q_GPR5_L2_CLK_STOP BIT(8) +#define IMX6Q_GPR5_SATA_SW_PD BIT(10) +#define IMX6Q_GPR5_SATA_SW_RST BIT(11) + +#define IMX6Q_GPR6_IPU1_ID00_WR_QOS_MASK (0xf << 0) +#define IMX6Q_GPR6_IPU1_ID01_WR_QOS_MASK (0xf << 4) +#define IMX6Q_GPR6_IPU1_ID10_WR_QOS_MASK (0xf << 8) +#define IMX6Q_GPR6_IPU1_ID11_WR_QOS_MASK (0xf << 12) +#define IMX6Q_GPR6_IPU1_ID00_RD_QOS_MASK (0xf << 16) +#define IMX6Q_GPR6_IPU1_ID01_RD_QOS_MASK (0xf << 20) +#define IMX6Q_GPR6_IPU1_ID10_RD_QOS_MASK (0xf << 24) +#define IMX6Q_GPR6_IPU1_ID11_RD_QOS_MASK (0xf << 28) + +#define IMX6Q_GPR7_IPU2_ID00_WR_QOS_MASK (0xf << 0) +#define IMX6Q_GPR7_IPU2_ID01_WR_QOS_MASK (0xf << 4) +#define IMX6Q_GPR7_IPU2_ID10_WR_QOS_MASK (0xf << 8) +#define IMX6Q_GPR7_IPU2_ID11_WR_QOS_MASK (0xf << 12) +#define IMX6Q_GPR7_IPU2_ID00_RD_QOS_MASK (0xf << 16) +#define IMX6Q_GPR7_IPU2_ID01_RD_QOS_MASK (0xf << 20) +#define IMX6Q_GPR7_IPU2_ID10_RD_QOS_MASK (0xf << 24) +#define IMX6Q_GPR7_IPU2_ID11_RD_QOS_MASK (0xf << 28) + +#define IMX6Q_GPR8_TX_SWING_LOW (0x7f << 25) +#define IMX6Q_GPR8_TX_SWING_FULL (0x7f << 18) +#define IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB (0x3f << 12) +#define IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB (0x3f << 6) +#define IMX6Q_GPR8_TX_DEEMPH_GEN1 (0x3f << 0) + +#define IMX6Q_GPR9_TZASC2_BYP BIT(1) +#define IMX6Q_GPR9_TZASC1_BYP BIT(0) + +#define IMX6Q_GPR10_LOCK_DBG_EN BIT(29) +#define IMX6Q_GPR10_LOCK_DBG_CLK_EN BIT(28) +#define IMX6Q_GPR10_LOCK_SEC_ERR_RESP BIT(27) +#define IMX6Q_GPR10_LOCK_OCRAM_TZ_ADDR (0x3f << 21) +#define IMX6Q_GPR10_LOCK_OCRAM_TZ_EN BIT(20) +#define IMX6Q_GPR10_LOCK_DCIC2_MUX_MASK (0x3 << 18) +#define IMX6Q_GPR10_LOCK_DCIC1_MUX_MASK (0x3 << 16) +#define IMX6Q_GPR10_DBG_EN BIT(13) +#define IMX6Q_GPR10_DBG_CLK_EN BIT(12) +#define IMX6Q_GPR10_SEC_ERR_RESP_MASK BIT(11) +#define IMX6Q_GPR10_SEC_ERR_RESP_OKEY 0x0 +#define IMX6Q_GPR10_SEC_ERR_RESP_SLVE BIT(11) +#define IMX6Q_GPR10_OCRAM_TZ_ADDR_MASK (0x3f << 5) +#define IMX6Q_GPR10_OCRAM_TZ_EN_MASK BIT(4) +#define IMX6Q_GPR10_DCIC2_MUX_CTL_MASK (0x3 << 2) +#define IMX6Q_GPR10_DCIC2_MUX_CTL_IPU1_DI0 (0x0 << 2) +#define IMX6Q_GPR10_DCIC2_MUX_CTL_IPU1_DI1 (0x1 << 2) +#define IMX6Q_GPR10_DCIC2_MUX_CTL_IPU2_DI0 (0x2 << 2) +#define IMX6Q_GPR10_DCIC2_MUX_CTL_IPU2_DI1 (0x3 << 2) +#define IMX6Q_GPR10_DCIC1_MUX_CTL_MASK (0x3 << 0) +#define IMX6Q_GPR10_DCIC1_MUX_CTL_IPU1_DI0 (0x0 << 0) +#define IMX6Q_GPR10_DCIC1_MUX_CTL_IPU1_DI1 (0x1 << 0) +#define IMX6Q_GPR10_DCIC1_MUX_CTL_IPU2_DI0 (0x2 << 0) +#define IMX6Q_GPR10_DCIC1_MUX_CTL_IPU2_DI1 (0x3 << 0) + +#define IMX6Q_GPR12_ARMP_IPG_CLK_EN BIT(27) +#define IMX6Q_GPR12_ARMP_AHB_CLK_EN BIT(26) +#define IMX6Q_GPR12_ARMP_ATB_CLK_EN BIT(25) +#define IMX6Q_GPR12_ARMP_APB_CLK_EN BIT(24) +#define IMX6Q_GPR12_DEVICE_TYPE (0xf << 12) +#define IMX6Q_GPR12_PCIE_CTL_2 BIT(10) +#define IMX6Q_GPR12_LOS_LEVEL (0x1f << 4) + +#define IMX6Q_GPR13_SDMA_STOP_REQ BIT(30) +#define IMX6Q_GPR13_CAN2_STOP_REQ BIT(29) +#define IMX6Q_GPR13_CAN1_STOP_REQ BIT(28) +#define IMX6Q_GPR13_ENET_STOP_REQ BIT(27) +#define IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK (0x7 << 24) +#define IMX6Q_GPR13_SATA_RX_EQ_VAL_0_5_DB (0x0 << 24) +#define IMX6Q_GPR13_SATA_RX_EQ_VAL_1_0_DB (0x1 << 24) +#define IMX6Q_GPR13_SATA_RX_EQ_VAL_1_5_DB (0x2 << 24) +#define IMX6Q_GPR13_SATA_RX_EQ_VAL_2_0_DB (0x3 << 24) +#define IMX6Q_GPR13_SATA_RX_EQ_VAL_2_5_DB (0x4 << 24) +#define IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB (0x5 << 24) +#define IMX6Q_GPR13_SATA_RX_EQ_VAL_3_5_DB (0x6 << 24) +#define IMX6Q_GPR13_SATA_RX_EQ_VAL_4_0_DB (0x7 << 24) +#define IMX6Q_GPR13_SATA_RX_LOS_LVL_MASK (0x1f << 19) +#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA1I (0x10 << 19) +#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA1M (0x10 << 19) +#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA1X (0x1a << 19) +#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2I (0x12 << 19) +#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M (0x12 << 19) +#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2X (0x1a << 19) +#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_MASK (0x7 << 16) +#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_1P_1F (0x0 << 16) +#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_2F (0x1 << 16) +#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_1P_4F (0x2 << 16) +#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F (0x3 << 16) +#define IMX6Q_GPR13_SATA_SPD_MODE_MASK BIT(15) +#define IMX6Q_GPR13_SATA_SPD_MODE_1P5G 0x0 +#define IMX6Q_GPR13_SATA_SPD_MODE_3P0G BIT(15) +#define IMX6Q_GPR13_SATA_MPLL_SS_EN BIT(14) +#define IMX6Q_GPR13_SATA_TX_ATTEN_MASK (0x7 << 11) +#define IMX6Q_GPR13_SATA_TX_ATTEN_16_16 (0x0 << 11) +#define IMX6Q_GPR13_SATA_TX_ATTEN_14_16 (0x1 << 11) +#define IMX6Q_GPR13_SATA_TX_ATTEN_12_16 (0x2 << 11) +#define IMX6Q_GPR13_SATA_TX_ATTEN_10_16 (0x3 << 11) +#define IMX6Q_GPR13_SATA_TX_ATTEN_9_16 (0x4 << 11) +#define IMX6Q_GPR13_SATA_TX_ATTEN_8_16 (0x5 << 11) +#define IMX6Q_GPR13_SATA_TX_BOOST_MASK (0xf << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_0_00_DB (0x0 << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_0_37_DB (0x1 << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_0_74_DB (0x2 << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_1_11_DB (0x3 << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_1_48_DB (0x4 << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_1_85_DB (0x5 << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_2_22_DB (0x6 << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_2_59_DB (0x7 << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_2_96_DB (0x8 << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB (0x9 << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_3_70_DB (0xa << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_4_07_DB (0xb << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_4_44_DB (0xc << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_4_81_DB (0xd << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_5_28_DB (0xe << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_5_75_DB (0xf << 7) +#define IMX6Q_GPR13_SATA_TX_LVL_MASK (0x1f << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_0_937_V (0x00 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_0_947_V (0x01 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_0_957_V (0x02 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_0_966_V (0x03 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_0_976_V (0x04 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_0_986_V (0x05 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_0_996_V (0x06 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_005_V (0x07 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_015_V (0x08 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_025_V (0x09 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_035_V (0x0a << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_045_V (0x0b << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_054_V (0x0c << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_064_V (0x0d << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_074_V (0x0e << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_084_V (0x0f << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_094_V (0x10 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_104_V (0x11 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_113_V (0x12 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_123_V (0x13 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_133_V (0x14 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_143_V (0x15 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_152_V (0x16 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_162_V (0x17 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_172_V (0x18 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_182_V (0x19 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_191_V (0x1a << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_201_V (0x1b << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_211_V (0x1c << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_221_V (0x1d << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_230_V (0x1e << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_240_V (0x1f << 2) +#define IMX6Q_GPR13_SATA_MPLL_CLK_EN BIT(1) +#define IMX6Q_GPR13_SATA_TX_EDGE_RATE BIT(0) + +/* For imx6sl iomux gpr register field define */ +#define IMX6SL_GPR1_FEC_CLOCK_MUX1_SEL_MASK (0x3 << 17) +#define IMX6SL_GPR1_FEC_CLOCK_MUX2_SEL_MASK (0x1 << 14) + +/* For imx6sx iomux gpr register field define */ +#define IMX6SX_GPR1_VDEC_SW_RST_MASK (0x1 << 20) +#define IMX6SX_GPR1_VDEC_SW_RST_RESET (0x1 << 20) +#define IMX6SX_GPR1_VDEC_SW_RST_RELEASE (0x0 << 20) +#define IMX6SX_GPR1_VADC_SW_RST_MASK (0x1 << 19) +#define IMX6SX_GPR1_VADC_SW_RST_RESET (0x1 << 19) +#define IMX6SX_GPR1_VADC_SW_RST_RELEASE (0x0 << 19) +#define IMX6SX_GPR1_FEC_CLOCK_MUX_SEL_MASK (0x3 << 13) +#define IMX6SX_GPR1_FEC_CLOCK_PAD_DIR_MASK (0x3 << 17) +#define IMX6SX_GPR1_FEC_CLOCK_MUX_SEL_EXT (0x3 << 13) + +#define IMX6SX_GPR4_FEC_ENET1_STOP_REQ (0x1 << 3) +#define IMX6SX_GPR4_FEC_ENET2_STOP_REQ (0x1 << 4) + +#define IMX6SX_GPR5_DISP_MUX_LDB_CTRL_MASK (0x1 << 3) +#define IMX6SX_GPR5_DISP_MUX_LDB_CTRL_LCDIF1 (0x0 << 3) +#define IMX6SX_GPR5_DISP_MUX_LDB_CTRL_LCDIF2 (0x1 << 3) + +#define IMX6SX_GPR5_CSI2_MUX_CTRL_MASK (0x3 << 27) +#define IMX6SX_GPR5_CSI2_MUX_CTRL_EXT_PIN (0x0 << 27) +#define IMX6SX_GPR5_CSI2_MUX_CTRL_CVD (0x1 << 27) +#define IMX6SX_GPR5_CSI2_MUX_CTRL_VDAC_TO_CSI (0x2 << 27) +#define IMX6SX_GPR5_CSI2_MUX_CTRL_GND (0x3 << 27) +#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_MASK (0x1 << 26) +#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_ENABLE (0x1 << 26) +#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_DISABLE (0x0 << 26) +#define IMX6SX_GPR5_PCIE_BTNRST_RESET BIT(19) +#define IMX6SX_GPR5_CSI1_MUX_CTRL_MASK (0x3 << 4) +#define IMX6SX_GPR5_CSI1_MUX_CTRL_EXT_PIN (0x0 << 4) +#define IMX6SX_GPR5_CSI1_MUX_CTRL_CVD (0x1 << 4) +#define IMX6SX_GPR5_CSI1_MUX_CTRL_VDAC_TO_CSI (0x2 << 4) +#define IMX6SX_GPR5_CSI1_MUX_CTRL_GND (0x3 << 4) + +#define IMX6SX_GPR5_DISP_MUX_DCIC2_LCDIF2 (0x0 << 2) +#define IMX6SX_GPR5_DISP_MUX_DCIC2_LVDS (0x1 << 2) +#define IMX6SX_GPR5_DISP_MUX_DCIC2_MASK (0x1 << 2) +#define IMX6SX_GPR5_DISP_MUX_DCIC1_LCDIF1 (0x0 << 1) +#define IMX6SX_GPR5_DISP_MUX_DCIC1_LVDS (0x1 << 1) +#define IMX6SX_GPR5_DISP_MUX_DCIC1_MASK (0x1 << 1) + +#define IMX6SX_GPR12_PCIE_TEST_POWERDOWN BIT(30) +#define IMX6SX_GPR12_PCIE_RX_EQ_MASK (0x7 << 0) +#define IMX6SX_GPR12_PCIE_RX_EQ_2 (0x2 << 0) + +/* For imx6ul iomux gpr register field define */ +#define IMX6UL_GPR1_ENET1_CLK_DIR (0x1 << 17) +#define IMX6UL_GPR1_ENET2_CLK_DIR (0x1 << 18) +#define IMX6UL_GPR1_ENET1_CLK_OUTPUT (0x1 << 17) +#define IMX6UL_GPR1_ENET2_CLK_OUTPUT (0x1 << 18) +#define IMX6UL_GPR1_ENET_CLK_DIR (0x3 << 17) +#define IMX6UL_GPR1_ENET_CLK_OUTPUT (0x3 << 17) +#define IMX6UL_GPR1_SAI1_MCLK_DIR (0x1 << 19) +#define IMX6UL_GPR1_SAI2_MCLK_DIR (0x1 << 20) +#define IMX6UL_GPR1_SAI3_MCLK_DIR (0x1 << 21) +#define IMX6UL_GPR1_SAI_MCLK_MASK (0x7 << 19) +#define MCLK_DIR(x) (x == 1 ? IMX6UL_GPR1_SAI1_MCLK_DIR : x == 2 ? \ + IMX6UL_GPR1_SAI2_MCLK_DIR : IMX6UL_GPR1_SAI3_MCLK_DIR) + +/* For imx6sll iomux gpr register field define */ +#define IMX6SLL_GPR5_AFCG_X_BYPASS_MASK (0x1f << 11) + +#endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */ diff --git a/include/linux/mfd/syscon/imx7-iomuxc-gpr.h b/include/linux/mfd/syscon/imx7-iomuxc-gpr.h new file mode 100644 index 000000000..abbd52466 --- /dev/null +++ b/include/linux/mfd/syscon/imx7-iomuxc-gpr.h @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2015 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_IMX7_IOMUXC_GPR_H +#define __LINUX_IMX7_IOMUXC_GPR_H + +#define IOMUXC_GPR0 0x00 +#define IOMUXC_GPR1 0x04 +#define IOMUXC_GPR2 0x08 +#define IOMUXC_GPR3 0x0c +#define IOMUXC_GPR4 0x10 +#define IOMUXC_GPR5 0x14 +#define IOMUXC_GPR6 0x18 +#define IOMUXC_GPR7 0x1c +#define IOMUXC_GPR8 0x20 +#define IOMUXC_GPR9 0x24 +#define IOMUXC_GPR10 0x28 +#define IOMUXC_GPR11 0x2c +#define IOMUXC_GPR12 0x30 +#define IOMUXC_GPR13 0x34 +#define IOMUXC_GPR14 0x38 +#define IOMUXC_GPR15 0x3c +#define IOMUXC_GPR16 0x40 +#define IOMUXC_GPR17 0x44 +#define IOMUXC_GPR18 0x48 +#define IOMUXC_GPR19 0x4c +#define IOMUXC_GPR20 0x50 +#define IOMUXC_GPR21 0x54 +#define IOMUXC_GPR22 0x58 + +/* For imx7d iomux gpr register field define */ +#define IMX7D_GPR1_IRQ_MASK (0x1 << 12) +#define IMX7D_GPR1_ENET1_TX_CLK_SEL_MASK (0x1 << 13) +#define IMX7D_GPR1_ENET2_TX_CLK_SEL_MASK (0x1 << 14) +#define IMX7D_GPR1_ENET_TX_CLK_SEL_MASK (0x3 << 13) +#define IMX7D_GPR1_ENET1_CLK_DIR_MASK (0x1 << 17) +#define IMX7D_GPR1_ENET2_CLK_DIR_MASK (0x1 << 18) +#define IMX7D_GPR1_ENET_CLK_DIR_MASK (0x3 << 17) + +#define IMX7D_GPR5_CSI_MUX_CONTROL_MIPI (0x1 << 4) + +#define IMX7D_GPR12_PCIE_PHY_REFCLK_SEL BIT(5) + +#define IMX7D_GPR22_PCIE_PHY_PLL_LOCKED BIT(31) + +#endif /* __LINUX_IMX7_IOMUXC_GPR_H */ diff --git a/include/linux/mfd/t7l66xb.h b/include/linux/mfd/t7l66xb.h new file mode 100644 index 000000000..b4629818a --- /dev/null +++ b/include/linux/mfd/t7l66xb.h @@ -0,0 +1,34 @@ +/* + * This file contains the definitions for the T7L66XB + * + * (C) Copyright 2005 Ian Molton + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#ifndef MFD_T7L66XB_H +#define MFD_T7L66XB_H + +#include +#include + +struct t7l66xb_platform_data { + int (*enable)(struct platform_device *dev); + int (*disable)(struct platform_device *dev); + int (*suspend)(struct platform_device *dev); + int (*resume)(struct platform_device *dev); + + int irq_base; /* The base for subdevice irqs */ + + struct tmio_nand_data *nand_data; +}; + + +#define IRQ_T7L66XB_MMC (1) +#define IRQ_T7L66XB_NAND (3) + +#define T7L66XB_NR_IRQS 8 + +#endif diff --git a/include/linux/mfd/tc3589x.h b/include/linux/mfd/tc3589x.h new file mode 100644 index 000000000..468c31a27 --- /dev/null +++ b/include/linux/mfd/tc3589x.h @@ -0,0 +1,152 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License, version 2 + */ + +#ifndef __LINUX_MFD_TC3589x_H +#define __LINUX_MFD_TC3589x_H + +struct device; + +enum tx3589x_block { + TC3589x_BLOCK_GPIO = 1 << 0, + TC3589x_BLOCK_KEYPAD = 1 << 1, +}; + +#define TC3589x_RSTCTRL_IRQRST (1 << 4) +#define TC3589x_RSTCTRL_TIMRST (1 << 3) +#define TC3589x_RSTCTRL_ROTRST (1 << 2) +#define TC3589x_RSTCTRL_KBDRST (1 << 1) +#define TC3589x_RSTCTRL_GPIRST (1 << 0) + +/* Keyboard Configuration Registers */ +#define TC3589x_KBDSETTLE_REG 0x01 +#define TC3589x_KBDBOUNCE 0x02 +#define TC3589x_KBDSIZE 0x03 +#define TC3589x_KBCFG_LSB 0x04 +#define TC3589x_KBCFG_MSB 0x05 +#define TC3589x_KBDIC 0x08 +#define TC3589x_KBDMSK 0x09 +#define TC3589x_EVTCODE_FIFO 0x10 +#define TC3589x_KBDMFS 0x8F + +#define TC3589x_IRQST 0x91 + +#define TC3589x_MANFCODE_MAGIC 0x03 +#define TC3589x_MANFCODE 0x80 +#define TC3589x_VERSION 0x81 +#define TC3589x_IOCFG 0xA7 + +#define TC3589x_CLKMODE 0x88 +#define TC3589x_CLKCFG 0x89 +#define TC3589x_CLKEN 0x8A + +#define TC3589x_RSTCTRL 0x82 +#define TC3589x_EXTRSTN 0x83 +#define TC3589x_RSTINTCLR 0x84 + +/* Pull up/down configuration registers */ +#define TC3589x_IOCFG 0xA7 +#define TC3589x_IOPULLCFG0_LSB 0xAA +#define TC3589x_IOPULLCFG0_MSB 0xAB +#define TC3589x_IOPULLCFG1_LSB 0xAC +#define TC3589x_IOPULLCFG1_MSB 0xAD +#define TC3589x_IOPULLCFG2_LSB 0xAE + +#define TC3589x_GPIOIS0 0xC9 +#define TC3589x_GPIOIS1 0xCA +#define TC3589x_GPIOIS2 0xCB +#define TC3589x_GPIOIBE0 0xCC +#define TC3589x_GPIOIBE1 0xCD +#define TC3589x_GPIOIBE2 0xCE +#define TC3589x_GPIOIEV0 0xCF +#define TC3589x_GPIOIEV1 0xD0 +#define TC3589x_GPIOIEV2 0xD1 +#define TC3589x_GPIOIE0 0xD2 +#define TC3589x_GPIOIE1 0xD3 +#define TC3589x_GPIOIE2 0xD4 +#define TC3589x_GPIORIS0 0xD6 +#define TC3589x_GPIORIS1 0xD7 +#define TC3589x_GPIORIS2 0xD8 +#define TC3589x_GPIOMIS0 0xD9 +#define TC3589x_GPIOMIS1 0xDA +#define TC3589x_GPIOMIS2 0xDB +#define TC3589x_GPIOIC0 0xDC +#define TC3589x_GPIOIC1 0xDD +#define TC3589x_GPIOIC2 0xDE + +#define TC3589x_GPIODATA0 0xC0 +#define TC3589x_GPIOMASK0 0xc1 +#define TC3589x_GPIODATA1 0xC2 +#define TC3589x_GPIOMASK1 0xc3 +#define TC3589x_GPIODATA2 0xC4 +#define TC3589x_GPIOMASK2 0xC5 + +#define TC3589x_GPIODIR0 0xC6 +#define TC3589x_GPIODIR1 0xC7 +#define TC3589x_GPIODIR2 0xC8 + +#define TC3589x_GPIOSYNC0 0xE6 +#define TC3589x_GPIOSYNC1 0xE7 +#define TC3589x_GPIOSYNC2 0xE8 + +#define TC3589x_GPIOWAKE0 0xE9 +#define TC3589x_GPIOWAKE1 0xEA +#define TC3589x_GPIOWAKE2 0xEB + +#define TC3589x_GPIOODM0 0xE0 +#define TC3589x_GPIOODE0 0xE1 +#define TC3589x_GPIOODM1 0xE2 +#define TC3589x_GPIOODE1 0xE3 +#define TC3589x_GPIOODM2 0xE4 +#define TC3589x_GPIOODE2 0xE5 + +#define TC3589x_INT_GPIIRQ 0 +#define TC3589x_INT_TI0IRQ 1 +#define TC3589x_INT_TI1IRQ 2 +#define TC3589x_INT_TI2IRQ 3 +#define TC3589x_INT_ROTIRQ 5 +#define TC3589x_INT_KBDIRQ 6 +#define TC3589x_INT_PORIRQ 7 + +#define TC3589x_NR_INTERNAL_IRQS 8 + +struct tc3589x { + struct mutex lock; + struct device *dev; + struct i2c_client *i2c; + struct irq_domain *domain; + + int irq_base; + int num_gpio; + struct tc3589x_platform_data *pdata; +}; + +extern int tc3589x_reg_write(struct tc3589x *tc3589x, u8 reg, u8 data); +extern int tc3589x_reg_read(struct tc3589x *tc3589x, u8 reg); +extern int tc3589x_block_read(struct tc3589x *tc3589x, u8 reg, u8 length, + u8 *values); +extern int tc3589x_block_write(struct tc3589x *tc3589x, u8 reg, u8 length, + const u8 *values); +extern int tc3589x_set_bits(struct tc3589x *tc3589x, u8 reg, u8 mask, u8 val); + +/* + * Keypad related platform specific constants + * These values may be modified for fine tuning + */ +#define TC_KPD_ROWS 0x8 +#define TC_KPD_COLUMNS 0x8 +#define TC_KPD_DEBOUNCE_PERIOD 0xA3 +#define TC_KPD_SETTLE_TIME 0xA3 + + +/** + * struct tc3589x_platform_data - TC3589x platform data + * @block: bitmask of blocks to enable (use TC3589x_BLOCK_*) + */ +struct tc3589x_platform_data { + unsigned int block; +}; + +#endif diff --git a/include/linux/mfd/tc6387xb.h b/include/linux/mfd/tc6387xb.h new file mode 100644 index 000000000..b48882094 --- /dev/null +++ b/include/linux/mfd/tc6387xb.h @@ -0,0 +1,20 @@ +/* + * This file contains the definitions for the TC6387XB + * + * (C) Copyright 2005 Ian Molton + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + */ +#ifndef MFD_TC6387XB_H +#define MFD_TC6387XB_H + +struct tc6387xb_platform_data { + int (*enable)(struct platform_device *dev); + int (*disable)(struct platform_device *dev); + int (*suspend)(struct platform_device *dev); + int (*resume)(struct platform_device *dev); +}; + +#endif diff --git a/include/linux/mfd/tc6393xb.h b/include/linux/mfd/tc6393xb.h new file mode 100644 index 000000000..626e44820 --- /dev/null +++ b/include/linux/mfd/tc6393xb.h @@ -0,0 +1,59 @@ +/* + * Toshiba TC6393XB SoC support + * + * Copyright(c) 2005-2006 Chris Humbert + * Copyright(c) 2005 Dirk Opfer + * Copyright(c) 2005 Ian Molton + * Copyright(c) 2007 Dmitry Baryshkov + * + * Based on code written by Sharp/Lineo for 2.4 kernels + * Based on locomo.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef MFD_TC6393XB_H +#define MFD_TC6393XB_H + +#include + +/* Also one should provide the CK3P6MI clock */ +struct tc6393xb_platform_data { + u16 scr_pll2cr; /* PLL2 Control */ + u16 scr_gper; /* GP Enable */ + + int (*enable)(struct platform_device *dev); + int (*disable)(struct platform_device *dev); + int (*suspend)(struct platform_device *dev); + int (*resume)(struct platform_device *dev); + + int irq_base; /* base for subdevice irqs */ + int gpio_base; + int (*setup)(struct platform_device *dev); + void (*teardown)(struct platform_device *dev); + + struct tmio_nand_data *nand_data; + struct tmio_fb_data *fb_data; + + unsigned resume_restore : 1; /* make special actions + to preserve the state + on suspend/resume */ +}; + +extern int tc6393xb_lcd_mode(struct platform_device *fb, + const struct fb_videomode *mode); +extern int tc6393xb_lcd_set_power(struct platform_device *fb, bool on); + +/* + * Relative to irq_base + */ +#define IRQ_TC6393_NAND 0 +#define IRQ_TC6393_MMC 1 +#define IRQ_TC6393_OHCI 2 +#define IRQ_TC6393_FB 4 + +#define TC6393XB_NR_IRQS 8 + +#endif diff --git a/include/linux/mfd/ti-lmu-register.h b/include/linux/mfd/ti-lmu-register.h new file mode 100644 index 000000000..2125c7c02 --- /dev/null +++ b/include/linux/mfd/ti-lmu-register.h @@ -0,0 +1,280 @@ +/* + * TI LMU (Lighting Management Unit) Device Register Map + * + * Copyright 2017 Texas Instruments + * + * Author: Milo Kim + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MFD_TI_LMU_REGISTER_H__ +#define __MFD_TI_LMU_REGISTER_H__ + +#include + +/* LM3532 */ +#define LM3532_REG_OUTPUT_CFG 0x10 +#define LM3532_ILED1_CFG_MASK 0x03 +#define LM3532_ILED2_CFG_MASK 0x0C +#define LM3532_ILED3_CFG_MASK 0x30 +#define LM3532_ILED1_CFG_SHIFT 0 +#define LM3532_ILED2_CFG_SHIFT 2 +#define LM3532_ILED3_CFG_SHIFT 4 + +#define LM3532_REG_RAMPUP 0x12 +#define LM3532_REG_RAMPDN LM3532_REG_RAMPUP +#define LM3532_RAMPUP_MASK 0x07 +#define LM3532_RAMPUP_SHIFT 0 +#define LM3532_RAMPDN_MASK 0x38 +#define LM3532_RAMPDN_SHIFT 3 + +#define LM3532_REG_ENABLE 0x1D + +#define LM3532_REG_PWM_A_CFG 0x13 +#define LM3532_PWM_A_MASK 0x05 /* zone 0 */ +#define LM3532_PWM_ZONE_0 BIT(2) + +#define LM3532_REG_PWM_B_CFG 0x14 +#define LM3532_PWM_B_MASK 0x09 /* zone 1 */ +#define LM3532_PWM_ZONE_1 BIT(3) + +#define LM3532_REG_PWM_C_CFG 0x15 +#define LM3532_PWM_C_MASK 0x11 /* zone 2 */ +#define LM3532_PWM_ZONE_2 BIT(4) + +#define LM3532_REG_ZONE_CFG_A 0x16 +#define LM3532_REG_ZONE_CFG_B 0x18 +#define LM3532_REG_ZONE_CFG_C 0x1A +#define LM3532_ZONE_MASK (BIT(2) | BIT(3) | BIT(4)) +#define LM3532_ZONE_0 0 +#define LM3532_ZONE_1 BIT(2) +#define LM3532_ZONE_2 BIT(3) + +#define LM3532_REG_BRT_A 0x70 /* zone 0 */ +#define LM3532_REG_BRT_B 0x76 /* zone 1 */ +#define LM3532_REG_BRT_C 0x7C /* zone 2 */ + +#define LM3532_MAX_REG 0x7E + +/* LM3631 */ +#define LM3631_REG_DEVCTRL 0x00 +#define LM3631_LCD_EN_MASK BIT(1) +#define LM3631_BL_EN_MASK BIT(0) + +#define LM3631_REG_BRT_LSB 0x01 +#define LM3631_REG_BRT_MSB 0x02 + +#define LM3631_REG_BL_CFG 0x06 +#define LM3631_BL_CHANNEL_MASK BIT(3) +#define LM3631_BL_DUAL_CHANNEL 0 +#define LM3631_BL_SINGLE_CHANNEL BIT(3) +#define LM3631_MAP_MASK BIT(5) +#define LM3631_EXPONENTIAL_MAP 0 + +#define LM3631_REG_BRT_MODE 0x08 +#define LM3631_MODE_MASK (BIT(1) | BIT(2) | BIT(3)) +#define LM3631_DEFAULT_MODE (BIT(1) | BIT(3)) + +#define LM3631_REG_SLOPE 0x09 +#define LM3631_SLOPE_MASK 0xF0 +#define LM3631_SLOPE_SHIFT 4 + +#define LM3631_REG_LDO_CTRL1 0x0A +#define LM3631_EN_OREF_MASK BIT(0) +#define LM3631_EN_VNEG_MASK BIT(1) +#define LM3631_EN_VPOS_MASK BIT(2) + +#define LM3631_REG_LDO_CTRL2 0x0B +#define LM3631_EN_CONT_MASK BIT(0) + +#define LM3631_REG_VOUT_CONT 0x0C +#define LM3631_VOUT_CONT_MASK (BIT(6) | BIT(7)) + +#define LM3631_REG_VOUT_BOOST 0x0C +#define LM3631_REG_VOUT_POS 0x0D +#define LM3631_REG_VOUT_NEG 0x0E +#define LM3631_REG_VOUT_OREF 0x0F +#define LM3631_VOUT_MASK 0x3F + +#define LM3631_REG_ENTIME_VCONT 0x0B +#define LM3631_ENTIME_CONT_MASK 0x70 + +#define LM3631_REG_ENTIME_VOREF 0x0F +#define LM3631_REG_ENTIME_VPOS 0x10 +#define LM3631_REG_ENTIME_VNEG 0x11 +#define LM3631_ENTIME_MASK 0xF0 +#define LM3631_ENTIME_SHIFT 4 + +#define LM3631_MAX_REG 0x16 + +/* LM3632 */ +#define LM3632_REG_CONFIG1 0x02 +#define LM3632_OVP_MASK (BIT(5) | BIT(6) | BIT(7)) +#define LM3632_OVP_25V BIT(6) + +#define LM3632_REG_CONFIG2 0x03 +#define LM3632_SWFREQ_MASK BIT(7) +#define LM3632_SWFREQ_1MHZ BIT(7) + +#define LM3632_REG_BRT_LSB 0x04 +#define LM3632_REG_BRT_MSB 0x05 + +#define LM3632_REG_IO_CTRL 0x09 +#define LM3632_PWM_MASK BIT(6) +#define LM3632_I2C_MODE 0 +#define LM3632_PWM_MODE BIT(6) + +#define LM3632_REG_ENABLE 0x0A +#define LM3632_BL_EN_MASK BIT(0) +#define LM3632_BL_CHANNEL_MASK (BIT(3) | BIT(4)) +#define LM3632_BL_SINGLE_CHANNEL BIT(4) +#define LM3632_BL_DUAL_CHANNEL BIT(3) + +#define LM3632_REG_BIAS_CONFIG 0x0C +#define LM3632_EXT_EN_MASK BIT(0) +#define LM3632_EN_VNEG_MASK BIT(1) +#define LM3632_EN_VPOS_MASK BIT(2) + +#define LM3632_REG_VOUT_BOOST 0x0D +#define LM3632_REG_VOUT_POS 0x0E +#define LM3632_REG_VOUT_NEG 0x0F +#define LM3632_VOUT_MASK 0x3F + +#define LM3632_MAX_REG 0x10 + +/* LM3633 */ +#define LM3633_REG_HVLED_OUTPUT_CFG 0x10 +#define LM3633_HVLED1_CFG_MASK BIT(0) +#define LM3633_HVLED2_CFG_MASK BIT(1) +#define LM3633_HVLED3_CFG_MASK BIT(2) +#define LM3633_HVLED1_CFG_SHIFT 0 +#define LM3633_HVLED2_CFG_SHIFT 1 +#define LM3633_HVLED3_CFG_SHIFT 2 + +#define LM3633_REG_BANK_SEL 0x11 + +#define LM3633_REG_BL0_RAMP 0x12 +#define LM3633_REG_BL1_RAMP 0x13 +#define LM3633_BL_RAMPUP_MASK 0xF0 +#define LM3633_BL_RAMPUP_SHIFT 4 +#define LM3633_BL_RAMPDN_MASK 0x0F +#define LM3633_BL_RAMPDN_SHIFT 0 + +#define LM3633_REG_BL_RAMP_CONF 0x1B +#define LM3633_BL_RAMP_MASK 0x0F +#define LM3633_BL_RAMP_EACH 0x05 + +#define LM3633_REG_PTN0_RAMP 0x1C +#define LM3633_REG_PTN1_RAMP 0x1D +#define LM3633_PTN_RAMPUP_MASK 0x70 +#define LM3633_PTN_RAMPUP_SHIFT 4 +#define LM3633_PTN_RAMPDN_MASK 0x07 +#define LM3633_PTN_RAMPDN_SHIFT 0 + +#define LM3633_REG_LED_MAPPING_MODE 0x1F +#define LM3633_LED_EXPONENTIAL BIT(1) + +#define LM3633_REG_IMAX_HVLED_A 0x20 +#define LM3633_REG_IMAX_HVLED_B 0x21 +#define LM3633_REG_IMAX_LVLED_BASE 0x22 + +#define LM3633_REG_BL_FEEDBACK_ENABLE 0x28 + +#define LM3633_REG_ENABLE 0x2B +#define LM3633_LED_BANK_OFFSET 2 + +#define LM3633_REG_PATTERN 0x2C + +#define LM3633_REG_BOOST_CFG 0x2D +#define LM3633_OVP_MASK (BIT(1) | BIT(2)) +#define LM3633_OVP_40V 0x6 + +#define LM3633_REG_PWM_CFG 0x2F +#define LM3633_PWM_A_MASK BIT(0) +#define LM3633_PWM_B_MASK BIT(1) + +#define LM3633_REG_BRT_HVLED_A_LSB 0x40 +#define LM3633_REG_BRT_HVLED_A_MSB 0x41 +#define LM3633_REG_BRT_HVLED_B_LSB 0x42 +#define LM3633_REG_BRT_HVLED_B_MSB 0x43 + +#define LM3633_REG_BRT_LVLED_BASE 0x44 + +#define LM3633_REG_PTN_DELAY 0x50 + +#define LM3633_REG_PTN_LOWTIME 0x51 + +#define LM3633_REG_PTN_HIGHTIME 0x52 + +#define LM3633_REG_PTN_LOWBRT 0x53 + +#define LM3633_REG_PTN_HIGHBRT LM3633_REG_BRT_LVLED_BASE + +#define LM3633_REG_BL_OPEN_FAULT_STATUS 0xB0 + +#define LM3633_REG_BL_SHORT_FAULT_STATUS 0xB2 + +#define LM3633_REG_MONITOR_ENABLE 0xB4 + +#define LM3633_MAX_REG 0xB4 + +/* LM3695 */ +#define LM3695_REG_GP 0x10 +#define LM3695_BL_CHANNEL_MASK BIT(3) +#define LM3695_BL_DUAL_CHANNEL 0 +#define LM3695_BL_SINGLE_CHANNEL BIT(3) +#define LM3695_BRT_RW_MASK BIT(2) +#define LM3695_BL_EN_MASK BIT(0) + +#define LM3695_REG_BRT_LSB 0x13 +#define LM3695_REG_BRT_MSB 0x14 + +#define LM3695_MAX_REG 0x14 + +/* LM3697 */ +#define LM3697_REG_HVLED_OUTPUT_CFG 0x10 +#define LM3697_HVLED1_CFG_MASK BIT(0) +#define LM3697_HVLED2_CFG_MASK BIT(1) +#define LM3697_HVLED3_CFG_MASK BIT(2) +#define LM3697_HVLED1_CFG_SHIFT 0 +#define LM3697_HVLED2_CFG_SHIFT 1 +#define LM3697_HVLED3_CFG_SHIFT 2 + +#define LM3697_REG_BL0_RAMP 0x11 +#define LM3697_REG_BL1_RAMP 0x12 +#define LM3697_RAMPUP_MASK 0xF0 +#define LM3697_RAMPUP_SHIFT 4 +#define LM3697_RAMPDN_MASK 0x0F +#define LM3697_RAMPDN_SHIFT 0 + +#define LM3697_REG_RAMP_CONF 0x14 +#define LM3697_RAMP_MASK 0x0F +#define LM3697_RAMP_EACH 0x05 + +#define LM3697_REG_PWM_CFG 0x1C +#define LM3697_PWM_A_MASK BIT(0) +#define LM3697_PWM_B_MASK BIT(1) + +#define LM3697_REG_IMAX_A 0x17 +#define LM3697_REG_IMAX_B 0x18 + +#define LM3697_REG_FEEDBACK_ENABLE 0x19 + +#define LM3697_REG_BRT_A_LSB 0x20 +#define LM3697_REG_BRT_A_MSB 0x21 +#define LM3697_REG_BRT_B_LSB 0x22 +#define LM3697_REG_BRT_B_MSB 0x23 + +#define LM3697_REG_ENABLE 0x24 + +#define LM3697_REG_OPEN_FAULT_STATUS 0xB0 + +#define LM3697_REG_SHORT_FAULT_STATUS 0xB2 + +#define LM3697_REG_MONITOR_ENABLE 0xB4 + +#define LM3697_MAX_REG 0xB4 +#endif diff --git a/include/linux/mfd/ti-lmu.h b/include/linux/mfd/ti-lmu.h new file mode 100644 index 000000000..09d5f3038 --- /dev/null +++ b/include/linux/mfd/ti-lmu.h @@ -0,0 +1,87 @@ +/* + * TI LMU (Lighting Management Unit) Devices + * + * Copyright 2017 Texas Instruments + * + * Author: Milo Kim + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MFD_TI_LMU_H__ +#define __MFD_TI_LMU_H__ + +#include +#include +#include + +/* Notifier event */ +#define LMU_EVENT_MONITOR_DONE 0x01 + +enum ti_lmu_id { + LM3532, + LM3631, + LM3632, + LM3633, + LM3695, + LM3697, + LMU_MAX_ID, +}; + +enum ti_lmu_max_current { + LMU_IMAX_5mA, + LMU_IMAX_6mA, + LMU_IMAX_7mA = 0x03, + LMU_IMAX_8mA, + LMU_IMAX_9mA, + LMU_IMAX_10mA = 0x07, + LMU_IMAX_11mA, + LMU_IMAX_12mA, + LMU_IMAX_13mA, + LMU_IMAX_14mA, + LMU_IMAX_15mA = 0x0D, + LMU_IMAX_16mA, + LMU_IMAX_17mA, + LMU_IMAX_18mA, + LMU_IMAX_19mA, + LMU_IMAX_20mA = 0x13, + LMU_IMAX_21mA, + LMU_IMAX_22mA, + LMU_IMAX_23mA = 0x17, + LMU_IMAX_24mA, + LMU_IMAX_25mA, + LMU_IMAX_26mA, + LMU_IMAX_27mA = 0x1C, + LMU_IMAX_28mA, + LMU_IMAX_29mA, + LMU_IMAX_30mA, +}; + +enum lm363x_regulator_id { + LM3631_BOOST, /* Boost output */ + LM3631_LDO_CONT, /* Display panel controller */ + LM3631_LDO_OREF, /* Gamma reference */ + LM3631_LDO_POS, /* Positive display bias output */ + LM3631_LDO_NEG, /* Negative display bias output */ + LM3632_BOOST, /* Boost output */ + LM3632_LDO_POS, /* Positive display bias output */ + LM3632_LDO_NEG, /* Negative display bias output */ +}; + +/** + * struct ti_lmu + * + * @dev: Parent device pointer + * @regmap: Used for i2c communcation on accessing registers + * @en_gpio: GPIO for HWEN pin [Optional] + * @notifier: Notifier for reporting hwmon event + */ +struct ti_lmu { + struct device *dev; + struct regmap *regmap; + int en_gpio; + struct blocking_notifier_head notifier; +}; +#endif diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h new file mode 100644 index 000000000..b9a53e013 --- /dev/null +++ b/include/linux/mfd/ti_am335x_tscadc.h @@ -0,0 +1,198 @@ +#ifndef __LINUX_TI_AM335X_TSCADC_MFD_H +#define __LINUX_TI_AM335X_TSCADC_MFD_H + +/* + * TI Touch Screen / ADC MFD driver + * + * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +#define REG_RAWIRQSTATUS 0x024 +#define REG_IRQSTATUS 0x028 +#define REG_IRQENABLE 0x02C +#define REG_IRQCLR 0x030 +#define REG_IRQWAKEUP 0x034 +#define REG_DMAENABLE_SET 0x038 +#define REG_DMAENABLE_CLEAR 0x03c +#define REG_CTRL 0x040 +#define REG_ADCFSM 0x044 +#define REG_CLKDIV 0x04C +#define REG_SE 0x054 +#define REG_IDLECONFIG 0x058 +#define REG_CHARGECONFIG 0x05C +#define REG_CHARGEDELAY 0x060 +#define REG_STEPCONFIG(n) (0x64 + ((n) * 8)) +#define REG_STEPDELAY(n) (0x68 + ((n) * 8)) +#define REG_FIFO0CNT 0xE4 +#define REG_FIFO0THR 0xE8 +#define REG_FIFO1CNT 0xF0 +#define REG_FIFO1THR 0xF4 +#define REG_DMA1REQ 0xF8 +#define REG_FIFO0 0x100 +#define REG_FIFO1 0x200 + +/* Register Bitfields */ +/* IRQ wakeup enable */ +#define IRQWKUP_ENB BIT(0) + +/* Step Enable */ +#define STEPENB_MASK (0x1FFFF << 0) +#define STEPENB(val) ((val) << 0) +#define ENB(val) (1 << (val)) +#define STPENB_STEPENB STEPENB(0x1FFFF) +#define STPENB_STEPENB_TC STEPENB(0x1FFF) + +/* IRQ enable */ +#define IRQENB_HW_PEN BIT(0) +#define IRQENB_EOS BIT(1) +#define IRQENB_FIFO0THRES BIT(2) +#define IRQENB_FIFO0OVRRUN BIT(3) +#define IRQENB_FIFO0UNDRFLW BIT(4) +#define IRQENB_FIFO1THRES BIT(5) +#define IRQENB_FIFO1OVRRUN BIT(6) +#define IRQENB_FIFO1UNDRFLW BIT(7) +#define IRQENB_PENUP BIT(9) + +/* Step Configuration */ +#define STEPCONFIG_MODE_MASK (3 << 0) +#define STEPCONFIG_MODE(val) ((val) << 0) +#define STEPCONFIG_MODE_SWCNT STEPCONFIG_MODE(1) +#define STEPCONFIG_MODE_HWSYNC STEPCONFIG_MODE(2) +#define STEPCONFIG_AVG_MASK (7 << 2) +#define STEPCONFIG_AVG(val) ((val) << 2) +#define STEPCONFIG_AVG_16 STEPCONFIG_AVG(4) +#define STEPCONFIG_XPP BIT(5) +#define STEPCONFIG_XNN BIT(6) +#define STEPCONFIG_YPP BIT(7) +#define STEPCONFIG_YNN BIT(8) +#define STEPCONFIG_XNP BIT(9) +#define STEPCONFIG_YPN BIT(10) +#define STEPCONFIG_INM_MASK (0xF << 15) +#define STEPCONFIG_INM(val) ((val) << 15) +#define STEPCONFIG_INM_ADCREFM STEPCONFIG_INM(8) +#define STEPCONFIG_INP_MASK (0xF << 19) +#define STEPCONFIG_INP(val) ((val) << 19) +#define STEPCONFIG_INP_AN4 STEPCONFIG_INP(4) +#define STEPCONFIG_INP_ADCREFM STEPCONFIG_INP(8) +#define STEPCONFIG_FIFO1 BIT(26) + +/* Delay register */ +#define STEPDELAY_OPEN_MASK (0x3FFFF << 0) +#define STEPDELAY_OPEN(val) ((val) << 0) +#define STEPCONFIG_OPENDLY STEPDELAY_OPEN(0x098) +#define STEPDELAY_SAMPLE_MASK (0xFF << 24) +#define STEPDELAY_SAMPLE(val) ((val) << 24) +#define STEPCONFIG_SAMPLEDLY STEPDELAY_SAMPLE(0) + +/* Charge Config */ +#define STEPCHARGE_RFP_MASK (7 << 12) +#define STEPCHARGE_RFP(val) ((val) << 12) +#define STEPCHARGE_RFP_XPUL STEPCHARGE_RFP(1) +#define STEPCHARGE_INM_MASK (0xF << 15) +#define STEPCHARGE_INM(val) ((val) << 15) +#define STEPCHARGE_INM_AN1 STEPCHARGE_INM(1) +#define STEPCHARGE_INP_MASK (0xF << 19) +#define STEPCHARGE_INP(val) ((val) << 19) +#define STEPCHARGE_RFM_MASK (3 << 23) +#define STEPCHARGE_RFM(val) ((val) << 23) +#define STEPCHARGE_RFM_XNUR STEPCHARGE_RFM(1) + +/* Charge delay */ +#define CHARGEDLY_OPEN_MASK (0x3FFFF << 0) +#define CHARGEDLY_OPEN(val) ((val) << 0) +#define CHARGEDLY_OPENDLY CHARGEDLY_OPEN(0x400) + +/* Control register */ +#define CNTRLREG_TSCSSENB BIT(0) +#define CNTRLREG_STEPID BIT(1) +#define CNTRLREG_STEPCONFIGWRT BIT(2) +#define CNTRLREG_POWERDOWN BIT(4) +#define CNTRLREG_AFE_CTRL_MASK (3 << 5) +#define CNTRLREG_AFE_CTRL(val) ((val) << 5) +#define CNTRLREG_4WIRE CNTRLREG_AFE_CTRL(1) +#define CNTRLREG_5WIRE CNTRLREG_AFE_CTRL(2) +#define CNTRLREG_8WIRE CNTRLREG_AFE_CTRL(3) +#define CNTRLREG_TSCENB BIT(7) + +/* FIFO READ Register */ +#define FIFOREAD_DATA_MASK (0xfff << 0) +#define FIFOREAD_CHNLID_MASK (0xf << 16) + +/* DMA ENABLE/CLEAR Register */ +#define DMA_FIFO0 BIT(0) +#define DMA_FIFO1 BIT(1) + +/* Sequencer Status */ +#define SEQ_STATUS BIT(5) +#define CHARGE_STEP 0x11 + +#define ADC_CLK 3000000 +#define TOTAL_STEPS 16 +#define TOTAL_CHANNELS 8 +#define FIFO1_THRESHOLD 19 + +/* + * time in us for processing a single channel, calculated as follows: + * + * max num cycles = open delay + (sample delay + conv time) * averaging + * + * max num cycles: 262143 + (255 + 13) * 16 = 266431 + * + * clock frequency: 26MHz / 8 = 3.25MHz + * clock period: 1 / 3.25MHz = 308ns + * + * max processing time: 266431 * 308ns = 83ms(approx) + */ +#define IDLE_TIMEOUT 83 /* milliseconds */ + +#define TSCADC_CELLS 2 + +struct ti_tscadc_dev { + struct device *dev; + struct regmap *regmap; + void __iomem *tscadc_base; + phys_addr_t tscadc_phys_base; + int irq; + int used_cells; /* 1-2 */ + int tsc_wires; + int tsc_cell; /* -1 if not used */ + int adc_cell; /* -1 if not used */ + struct mfd_cell cells[TSCADC_CELLS]; + u32 reg_se_cache; + bool adc_waiting; + bool adc_in_use; + wait_queue_head_t reg_se_wait; + spinlock_t reg_lock; + unsigned int clk_div; + + /* tsc device */ + struct titsc *tsc; + + /* adc device */ + struct adc_device *adc; +}; + +static inline struct ti_tscadc_dev *ti_tscadc_dev_get(struct platform_device *p) +{ + struct ti_tscadc_dev **tscadc_dev = p->dev.platform_data; + + return *tscadc_dev; +} + +void am335x_tsc_se_set_cache(struct ti_tscadc_dev *tsadc, u32 val); +void am335x_tsc_se_set_once(struct ti_tscadc_dev *tsadc, u32 val); +void am335x_tsc_se_clr(struct ti_tscadc_dev *tsadc, u32 val); +void am335x_tsc_se_adc_done(struct ti_tscadc_dev *tsadc); + +#endif diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h new file mode 100644 index 000000000..77866214a --- /dev/null +++ b/include/linux/mfd/tmio.h @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef MFD_TMIO_H +#define MFD_TMIO_H + +#include +#include +#include +#include +#include +#include +#include + +#define tmio_ioread8(addr) readb(addr) +#define tmio_ioread16(addr) readw(addr) +#define tmio_ioread16_rep(r, b, l) readsw(r, b, l) +#define tmio_ioread32(addr) \ + (((u32)readw((addr))) | (((u32)readw((addr) + 2)) << 16)) + +#define tmio_iowrite8(val, addr) writeb((val), (addr)) +#define tmio_iowrite16(val, addr) writew((val), (addr)) +#define tmio_iowrite16_rep(r, b, l) writesw(r, b, l) +#define tmio_iowrite32(val, addr) \ + do { \ + writew((val), (addr)); \ + writew((val) >> 16, (addr) + 2); \ + } while (0) + +#define sd_config_write8(base, shift, reg, val) \ + tmio_iowrite8((val), (base) + ((reg) << (shift))) +#define sd_config_write16(base, shift, reg, val) \ + tmio_iowrite16((val), (base) + ((reg) << (shift))) +#define sd_config_write32(base, shift, reg, val) \ + do { \ + tmio_iowrite16((val), (base) + ((reg) << (shift))); \ + tmio_iowrite16((val) >> 16, (base) + ((reg + 2) << (shift))); \ + } while (0) + +/* tmio MMC platform flags */ +/* + * Some controllers can support a 2-byte block size when the bus width + * is configured in 4-bit mode. + */ +#define TMIO_MMC_BLKSZ_2BYTES BIT(1) +/* + * Some controllers can support SDIO IRQ signalling. + */ +#define TMIO_MMC_SDIO_IRQ BIT(2) + +/* Some features are only available or tested on R-Car Gen2 or later */ +#define TMIO_MMC_MIN_RCAR2 BIT(3) + +/* + * Some controllers require waiting for the SD bus to become + * idle before writing to some registers. + */ +#define TMIO_MMC_HAS_IDLE_WAIT BIT(4) +/* + * A GPIO is used for card hotplug detection. We need an extra flag for this, + * because 0 is a valid GPIO number too, and requiring users to specify + * cd_gpio < 0 to disable GPIO hotplug would break backwards compatibility. + */ +#define TMIO_MMC_USE_GPIO_CD BIT(5) + +/* + * Some controllers doesn't have over 0x100 register. + * it is used to checking accessibility of + * CTL_SD_CARD_CLK_CTL / CTL_CLK_AND_WAIT_CTL + */ +#define TMIO_MMC_HAVE_HIGH_REG BIT(6) + +/* + * Some controllers have CMD12 automatically + * issue/non-issue register + */ +#define TMIO_MMC_HAVE_CMD12_CTRL BIT(7) + +/* Controller has some SDIO status bits which must be 1 */ +#define TMIO_MMC_SDIO_STATUS_SETBITS BIT(8) + +/* + * Some controllers have a 32-bit wide data port register + */ +#define TMIO_MMC_32BIT_DATA_PORT BIT(9) + +/* + * Some controllers allows to set SDx actual clock + */ +#define TMIO_MMC_CLK_ACTUAL BIT(10) + +/* Some controllers have a CBSY bit */ +#define TMIO_MMC_HAVE_CBSY BIT(11) + +/* Some controllers that support HS400 use use 4 taps while others use 8. */ +#define TMIO_MMC_HAVE_4TAP_HS400 BIT(13) + +int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base); +int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base); +void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state); +void tmio_core_mmc_clk_div(void __iomem *cnf, int shift, int state); + +struct dma_chan; + +/* + * data for the MMC controller + */ +struct tmio_mmc_data { + void *chan_priv_tx; + void *chan_priv_rx; + unsigned int hclk; + unsigned long capabilities; + unsigned long capabilities2; + unsigned long flags; + u32 ocr_mask; /* available voltages */ + unsigned int cd_gpio; + int alignment_shift; + dma_addr_t dma_rx_offset; + unsigned int max_blk_count; + unsigned short max_segs; + void (*set_pwr)(struct platform_device *host, int state); + void (*set_clk_div)(struct platform_device *host, int state); +}; + +/* + * data for the NAND controller + */ +struct tmio_nand_data { + struct nand_bbt_descr *badblock_pattern; + struct mtd_partition *partition; + unsigned int num_partitions; + const char *const *part_parsers; +}; + +#define FBIO_TMIO_ACC_WRITE 0x7C639300 +#define FBIO_TMIO_ACC_SYNC 0x7C639301 + +struct tmio_fb_data { + int (*lcd_set_power)(struct platform_device *fb_dev, + bool on); + int (*lcd_mode)(struct platform_device *fb_dev, + const struct fb_videomode *mode); + int num_modes; + struct fb_videomode *modes; + + /* in mm: size of screen */ + int height; + int width; +}; + +#endif diff --git a/include/linux/mfd/tps6105x.h b/include/linux/mfd/tps6105x.h new file mode 100644 index 000000000..8bc511808 --- /dev/null +++ b/include/linux/mfd/tps6105x.h @@ -0,0 +1,97 @@ +/* + * Copyright (C) 2011 ST-Ericsson SA + * Written on behalf of Linaro for ST-Ericsson + * + * Author: Linus Walleij + * + * License terms: GNU General Public License (GPL) version 2 + */ +#ifndef MFD_TPS6105X_H +#define MFD_TPS6105X_H + +#include +#include +#include + +/* + * Register definitions to all subdrivers + */ +#define TPS6105X_REG_0 0x00 +#define TPS6105X_REG0_MODE_SHIFT 6 +#define TPS6105X_REG0_MODE_MASK (0x03<<6) +/* These defines for both reg0 and reg1 */ +#define TPS6105X_REG0_MODE_SHUTDOWN 0x00 +#define TPS6105X_REG0_MODE_TORCH 0x01 +#define TPS6105X_REG0_MODE_TORCH_FLASH 0x02 +#define TPS6105X_REG0_MODE_VOLTAGE 0x03 +#define TPS6105X_REG0_VOLTAGE_SHIFT 4 +#define TPS6105X_REG0_VOLTAGE_MASK (3<<4) +#define TPS6105X_REG0_VOLTAGE_450 0 +#define TPS6105X_REG0_VOLTAGE_500 1 +#define TPS6105X_REG0_VOLTAGE_525 2 +#define TPS6105X_REG0_VOLTAGE_500_2 3 +#define TPS6105X_REG0_DIMMING_SHIFT 3 +#define TPS6105X_REG0_TORCHC_SHIFT 0 +#define TPS6105X_REG0_TORCHC_MASK (7<<0) +#define TPS6105X_REG0_TORCHC_0 0x00 +#define TPS6105X_REG0_TORCHC_50 0x01 +#define TPS6105X_REG0_TORCHC_75 0x02 +#define TPS6105X_REG0_TORCHC_100 0x03 +#define TPS6105X_REG0_TORCHC_150 0x04 +#define TPS6105X_REG0_TORCHC_200 0x05 +#define TPS6105X_REG0_TORCHC_250_400 0x06 +#define TPS6105X_REG0_TORCHC_250_500 0x07 +#define TPS6105X_REG_1 0x01 +#define TPS6105X_REG1_MODE_SHIFT 6 +#define TPS6105X_REG1_MODE_MASK (0x03<<6) +#define TPS6105X_REG1_MODE_SHUTDOWN 0x00 +#define TPS6105X_REG1_MODE_TORCH 0x01 +#define TPS6105X_REG1_MODE_TORCH_FLASH 0x02 +#define TPS6105X_REG1_MODE_VOLTAGE 0x03 +#define TPS6105X_REG_2 0x02 +#define TPS6105X_REG_3 0x03 + +/** + * enum tps6105x_mode - desired mode for the TPS6105x + * @TPS6105X_MODE_SHUTDOWN: this instance is inactive, not used for anything + * @TPS61905X_MODE_TORCH: this instance is used as a LED, usually a while + * LED, for example as backlight or flashlight. If this is set, the + * TPS6105X will register to the LED framework + * @TPS6105X_MODE_TORCH_FLASH: this instance is used as a flashgun, usually + * in a camera + * @TPS6105X_MODE_VOLTAGE: this instance is used as a voltage regulator and + * will register to the regulator framework + */ +enum tps6105x_mode { + TPS6105X_MODE_SHUTDOWN, + TPS6105X_MODE_TORCH, + TPS6105X_MODE_TORCH_FLASH, + TPS6105X_MODE_VOLTAGE, +}; + +/** + * struct tps6105x_platform_data - TPS61905x platform data + * @mode: what mode this instance shall be operated in, + * this is not selectable at runtime + * @regulator_data: initialization data for the voltage + * regulator if used as a voltage source + */ +struct tps6105x_platform_data { + enum tps6105x_mode mode; + struct regulator_init_data *regulator_data; +}; + +/** + * struct tps6105x - state holder for the TPS6105x drivers + * @i2c_client: corresponding I2C client + * @regulator: regulator device if used in voltage mode + * @regmap: used for i2c communcation on accessing registers + */ +struct tps6105x { + struct tps6105x_platform_data *pdata; + struct i2c_client *client; + struct regulator_dev *regulator; + struct regmap *regmap; +}; + +#endif diff --git a/include/linux/mfd/tps65010.h b/include/linux/mfd/tps65010.h new file mode 100644 index 000000000..a1fb9bc53 --- /dev/null +++ b/include/linux/mfd/tps65010.h @@ -0,0 +1,205 @@ +/* linux/mfd/tps65010.h + * + * Functions to access TPS65010 power management device. + * + * Copyright (C) 2004 Dirk Behme + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __LINUX_I2C_TPS65010_H +#define __LINUX_I2C_TPS65010_H + +/* + * ---------------------------------------------------------------------------- + * Registers, all 8 bits + * ---------------------------------------------------------------------------- + */ + +#define TPS_CHGSTATUS 0x01 +# define TPS_CHG_USB (1 << 7) +# define TPS_CHG_AC (1 << 6) +# define TPS_CHG_THERM (1 << 5) +# define TPS_CHG_TERM (1 << 4) +# define TPS_CHG_TAPER_TMO (1 << 3) +# define TPS_CHG_CHG_TMO (1 << 2) +# define TPS_CHG_PRECHG_TMO (1 << 1) +# define TPS_CHG_TEMP_ERR (1 << 0) +#define TPS_REGSTATUS 0x02 +# define TPS_REG_ONOFF (1 << 7) +# define TPS_REG_COVER (1 << 6) +# define TPS_REG_UVLO (1 << 5) +# define TPS_REG_NO_CHG (1 << 4) /* tps65013 */ +# define TPS_REG_PG_LD02 (1 << 3) +# define TPS_REG_PG_LD01 (1 << 2) +# define TPS_REG_PG_MAIN (1 << 1) +# define TPS_REG_PG_CORE (1 << 0) +#define TPS_MASK1 0x03 +#define TPS_MASK2 0x04 +#define TPS_ACKINT1 0x05 +#define TPS_ACKINT2 0x06 +#define TPS_CHGCONFIG 0x07 +# define TPS_CHARGE_POR (1 << 7) /* 65010/65012 */ +# define TPS65013_AUA (1 << 7) /* 65011/65013 */ +# define TPS_CHARGE_RESET (1 << 6) +# define TPS_CHARGE_FAST (1 << 5) +# define TPS_CHARGE_CURRENT (3 << 3) +# define TPS_VBUS_500MA (1 << 2) +# define TPS_VBUS_CHARGING (1 << 1) +# define TPS_CHARGE_ENABLE (1 << 0) +#define TPS_LED1_ON 0x08 +#define TPS_LED1_PER 0x09 +#define TPS_LED2_ON 0x0a +#define TPS_LED2_PER 0x0b +#define TPS_VDCDC1 0x0c +# define TPS_ENABLE_LP (1 << 3) +#define TPS_VDCDC2 0x0d +# define TPS_LP_COREOFF (1 << 7) +# define TPS_VCORE_1_8V (7<<4) +# define TPS_VCORE_1_5V (6 << 4) +# define TPS_VCORE_1_4V (5 << 4) +# define TPS_VCORE_1_3V (4 << 4) +# define TPS_VCORE_1_2V (3 << 4) +# define TPS_VCORE_1_1V (2 << 4) +# define TPS_VCORE_1_0V (1 << 4) +# define TPS_VCORE_0_85V (0 << 4) +# define TPS_VCORE_LP_1_2V (3 << 2) +# define TPS_VCORE_LP_1_1V (2 << 2) +# define TPS_VCORE_LP_1_0V (1 << 2) +# define TPS_VCORE_LP_0_85V (0 << 2) +# define TPS_VIB (1 << 1) +# define TPS_VCORE_DISCH (1 << 0) +#define TPS_VREGS1 0x0e +# define TPS_LDO2_ENABLE (1 << 7) +# define TPS_LDO2_OFF (1 << 6) +# define TPS_VLDO2_3_0V (3 << 4) +# define TPS_VLDO2_2_75V (2 << 4) +# define TPS_VLDO2_2_5V (1 << 4) +# define TPS_VLDO2_1_8V (0 << 4) +# define TPS_LDO1_ENABLE (1 << 3) +# define TPS_LDO1_OFF (1 << 2) +# define TPS_VLDO1_3_0V (3 << 0) +# define TPS_VLDO1_2_75V (2 << 0) +# define TPS_VLDO1_2_5V (1 << 0) +# define TPS_VLDO1_ADJ (0 << 0) +#define TPS_MASK3 0x0f +#define TPS_DEFGPIO 0x10 + +/* + * ---------------------------------------------------------------------------- + * Macros used by exported functions + * ---------------------------------------------------------------------------- + */ + +#define LED1 1 +#define LED2 2 +#define OFF 0 +#define ON 1 +#define BLINK 2 +#define GPIO1 1 +#define GPIO2 2 +#define GPIO3 3 +#define GPIO4 4 +#define LOW 0 +#define HIGH 1 + +/* + * ---------------------------------------------------------------------------- + * Exported functions + * ---------------------------------------------------------------------------- + */ + +/* Draw from VBUS: + * 0 mA -- DON'T DRAW (might supply power instead) + * 100 mA -- usb unit load (slowest charge rate) + * 500 mA -- usb high power (fast battery charge) + */ +extern int tps65010_set_vbus_draw(unsigned mA); + +/* tps65010_set_gpio_out_value parameter: + * gpio: GPIO1, GPIO2, GPIO3 or GPIO4 + * value: LOW or HIGH + */ +extern int tps65010_set_gpio_out_value(unsigned gpio, unsigned value); + +/* tps65010_set_led parameter: + * led: LED1 or LED2 + * mode: ON, OFF or BLINK + */ +extern int tps65010_set_led(unsigned led, unsigned mode); + +/* tps65010_set_vib parameter: + * value: ON or OFF + */ +extern int tps65010_set_vib(unsigned value); + +/* tps65010_set_low_pwr parameter: + * mode: ON or OFF + */ +extern int tps65010_set_low_pwr(unsigned mode); + +/* tps65010_config_vregs1 parameter: + * value to be written to VREGS1 register + * Note: The complete register is written, set all bits you need + */ +extern int tps65010_config_vregs1(unsigned value); + +/* tps65013_set_low_pwr parameter: + * mode: ON or OFF + */ +extern int tps65013_set_low_pwr(unsigned mode); + +/* tps65010_set_vdcdc2 + * value to be written to VDCDC2 + */ +extern int tps65010_config_vdcdc2(unsigned value); + +struct i2c_client; + +/** + * struct tps65010_board - packages GPIO and LED lines + * @base: the GPIO number to assign to GPIO-1 + * @outmask: bit (N-1) is set to allow GPIO-N to be used as an + * (open drain) output + * @setup: optional callback issued once the GPIOs are valid + * @teardown: optional callback issued before the GPIOs are invalidated + * @context: optional parameter passed to setup() and teardown() + * + * Board data may be used to package the GPIO (and LED) lines for use + * in by the generic GPIO and LED frameworks. The first four GPIOs + * starting at gpio_base are GPIO1..GPIO4. The next two are LED1/nPG + * and LED2 (with hardware blinking capability, not currently exposed). + * + * The @setup callback may be used with the kind of board-specific glue + * which hands the (now-valid) GPIOs to other drivers, or which puts + * devices in their initial states using these GPIOs. + */ +struct tps65010_board { + int base; + unsigned outmask; + + int (*setup)(struct i2c_client *client, void *context); + int (*teardown)(struct i2c_client *client, void *context); + void *context; +}; + +#endif /* __LINUX_I2C_TPS65010_H */ + diff --git a/include/linux/mfd/tps6507x.h b/include/linux/mfd/tps6507x.h new file mode 100644 index 000000000..c2ae56933 --- /dev/null +++ b/include/linux/mfd/tps6507x.h @@ -0,0 +1,168 @@ +/* linux/mfd/tps6507x.h + * + * Functions to access TPS65070 power management chip. + * + * Copyright (c) 2009 RidgeRun (todd.fischer@ridgerun.com) + * + * + * For licencing details see kernel-base/COPYING + */ + +#ifndef __LINUX_MFD_TPS6507X_H +#define __LINUX_MFD_TPS6507X_H + +/* + * ---------------------------------------------------------------------------- + * Registers, all 8 bits + * ---------------------------------------------------------------------------- + */ + + +/* Register definitions */ +#define TPS6507X_REG_PPATH1 0X01 +#define TPS6507X_CHG_USB BIT(7) +#define TPS6507X_CHG_AC BIT(6) +#define TPS6507X_CHG_USB_PW_ENABLE BIT(5) +#define TPS6507X_CHG_AC_PW_ENABLE BIT(4) +#define TPS6507X_CHG_AC_CURRENT BIT(2) +#define TPS6507X_CHG_USB_CURRENT BIT(0) + +#define TPS6507X_REG_INT 0X02 +#define TPS6507X_REG_MASK_AC_USB BIT(7) +#define TPS6507X_REG_MASK_TSC BIT(6) +#define TPS6507X_REG_MASK_PB_IN BIT(5) +#define TPS6507X_REG_TSC_INT BIT(3) +#define TPS6507X_REG_PB_IN_INT BIT(2) +#define TPS6507X_REG_AC_USB_APPLIED BIT(1) +#define TPS6507X_REG_AC_USB_REMOVED BIT(0) + +#define TPS6507X_REG_CHGCONFIG0 0X03 + +#define TPS6507X_REG_CHGCONFIG1 0X04 +#define TPS6507X_CON_CTRL1_DCDC1_ENABLE BIT(4) +#define TPS6507X_CON_CTRL1_DCDC2_ENABLE BIT(3) +#define TPS6507X_CON_CTRL1_DCDC3_ENABLE BIT(2) +#define TPS6507X_CON_CTRL1_LDO1_ENABLE BIT(1) +#define TPS6507X_CON_CTRL1_LDO2_ENABLE BIT(0) + +#define TPS6507X_REG_CHGCONFIG2 0X05 + +#define TPS6507X_REG_CHGCONFIG3 0X06 + +#define TPS6507X_REG_ADCONFIG 0X07 +#define TPS6507X_ADCONFIG_AD_ENABLE BIT(7) +#define TPS6507X_ADCONFIG_START_CONVERSION BIT(6) +#define TPS6507X_ADCONFIG_CONVERSION_DONE BIT(5) +#define TPS6507X_ADCONFIG_VREF_ENABLE BIT(4) +#define TPS6507X_ADCONFIG_INPUT_AD_IN1 0 +#define TPS6507X_ADCONFIG_INPUT_AD_IN2 1 +#define TPS6507X_ADCONFIG_INPUT_AD_IN3 2 +#define TPS6507X_ADCONFIG_INPUT_AD_IN4 3 +#define TPS6507X_ADCONFIG_INPUT_TS_PIN 4 +#define TPS6507X_ADCONFIG_INPUT_BAT_CURRENT 5 +#define TPS6507X_ADCONFIG_INPUT_AC_VOLTAGE 6 +#define TPS6507X_ADCONFIG_INPUT_SYS_VOLTAGE 7 +#define TPS6507X_ADCONFIG_INPUT_CHARGER_VOLTAGE 8 +#define TPS6507X_ADCONFIG_INPUT_BAT_VOLTAGE 9 +#define TPS6507X_ADCONFIG_INPUT_THRESHOLD_VOLTAGE 10 +#define TPS6507X_ADCONFIG_INPUT_ISET1_VOLTAGE 11 +#define TPS6507X_ADCONFIG_INPUT_ISET2_VOLTAGE 12 +#define TPS6507X_ADCONFIG_INPUT_REAL_TSC 14 +#define TPS6507X_ADCONFIG_INPUT_TSC 15 + +#define TPS6507X_REG_TSCMODE 0X08 +#define TPS6507X_TSCMODE_X_POSITION 0 +#define TPS6507X_TSCMODE_Y_POSITION 1 +#define TPS6507X_TSCMODE_PRESSURE 2 +#define TPS6507X_TSCMODE_X_PLATE 3 +#define TPS6507X_TSCMODE_Y_PLATE 4 +#define TPS6507X_TSCMODE_STANDBY 5 +#define TPS6507X_TSCMODE_ADC_INPUT 6 +#define TPS6507X_TSCMODE_DISABLE 7 + +#define TPS6507X_REG_ADRESULT_1 0X09 + +#define TPS6507X_REG_ADRESULT_2 0X0A +#define TPS6507X_REG_ADRESULT_2_MASK (BIT(1) | BIT(0)) + +#define TPS6507X_REG_PGOOD 0X0B + +#define TPS6507X_REG_PGOODMASK 0X0C + +#define TPS6507X_REG_CON_CTRL1 0X0D +#define TPS6507X_CON_CTRL1_DCDC1_ENABLE BIT(4) +#define TPS6507X_CON_CTRL1_DCDC2_ENABLE BIT(3) +#define TPS6507X_CON_CTRL1_DCDC3_ENABLE BIT(2) +#define TPS6507X_CON_CTRL1_LDO1_ENABLE BIT(1) +#define TPS6507X_CON_CTRL1_LDO2_ENABLE BIT(0) + +#define TPS6507X_REG_CON_CTRL2 0X0E + +#define TPS6507X_REG_CON_CTRL3 0X0F + +#define TPS6507X_REG_DEFDCDC1 0X10 +#define TPS6507X_DEFDCDC1_DCDC1_EXT_ADJ_EN BIT(7) +#define TPS6507X_DEFDCDC1_DCDC1_MASK 0X3F + +#define TPS6507X_REG_DEFDCDC2_LOW 0X11 +#define TPS6507X_DEFDCDC2_LOW_DCDC2_MASK 0X3F + +#define TPS6507X_REG_DEFDCDC2_HIGH 0X12 +#define TPS6507X_DEFDCDC2_HIGH_DCDC2_MASK 0X3F + +#define TPS6507X_REG_DEFDCDC3_LOW 0X13 +#define TPS6507X_DEFDCDC3_LOW_DCDC3_MASK 0X3F + +#define TPS6507X_REG_DEFDCDC3_HIGH 0X14 +#define TPS6507X_DEFDCDC3_HIGH_DCDC3_MASK 0X3F + +#define TPS6507X_REG_DEFSLEW 0X15 + +#define TPS6507X_REG_LDO_CTRL1 0X16 +#define TPS6507X_REG_LDO_CTRL1_LDO1_MASK 0X0F + +#define TPS6507X_REG_DEFLDO2 0X17 +#define TPS6507X_REG_DEFLDO2_LDO2_MASK 0X3F + +#define TPS6507X_REG_WLED_CTRL1 0X18 + +#define TPS6507X_REG_WLED_CTRL2 0X19 + +/* VDCDC MASK */ +#define TPS6507X_DEFDCDCX_DCDC_MASK 0X3F + +#define TPS6507X_MAX_REGISTER 0X19 + +/** + * struct tps6507x_board - packages regulator and touchscreen init data + * @tps6507x_regulator_data: regulator initialization values + * + * Board data may be used to initialize regulator and touchscreen. + */ + +struct tps6507x_board { + struct regulator_init_data *tps6507x_pmic_init_data; + struct touchscreen_init_data *tps6507x_ts_init_data; +}; + +/** + * struct tps6507x_dev - tps6507x sub-driver chip access routines + * @read_dev() - I2C register read function + * @write_dev() - I2C register write function + * + * Device data may be used to access the TPS6507x chip + */ + +struct tps6507x_dev { + struct device *dev; + struct i2c_client *i2c_client; + int (*read_dev)(struct tps6507x_dev *tps6507x, char reg, int size, + void *dest); + int (*write_dev)(struct tps6507x_dev *tps6507x, char reg, int size, + void *src); + + /* Client devices */ + struct tps6507x_pmic *pmic; +}; + +#endif /* __LINUX_MFD_TPS6507X_H */ diff --git a/include/linux/mfd/tps65086.h b/include/linux/mfd/tps65086.h new file mode 100644 index 000000000..a228ae4c8 --- /dev/null +++ b/include/linux/mfd/tps65086.h @@ -0,0 +1,117 @@ +/* + * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Andrew F. Davis + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether expressed or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License version 2 for more details. + * + * Based on the TPS65912 driver + */ + +#ifndef __LINUX_MFD_TPS65086_H +#define __LINUX_MFD_TPS65086_H + +#include +#include + +/* List of registers for TPS65086 */ +#define TPS65086_DEVICEID 0x01 +#define TPS65086_IRQ 0x02 +#define TPS65086_IRQ_MASK 0x03 +#define TPS65086_PMICSTAT 0x04 +#define TPS65086_SHUTDNSRC 0x05 +#define TPS65086_BUCK1CTRL 0x20 +#define TPS65086_BUCK2CTRL 0x21 +#define TPS65086_BUCK3DECAY 0x22 +#define TPS65086_BUCK3VID 0x23 +#define TPS65086_BUCK3SLPCTRL 0x24 +#define TPS65086_BUCK4CTRL 0x25 +#define TPS65086_BUCK5CTRL 0x26 +#define TPS65086_BUCK6CTRL 0x27 +#define TPS65086_LDOA2CTRL 0x28 +#define TPS65086_LDOA3CTRL 0x29 +#define TPS65086_DISCHCTRL1 0x40 +#define TPS65086_DISCHCTRL2 0x41 +#define TPS65086_DISCHCTRL3 0x42 +#define TPS65086_PG_DELAY1 0x43 +#define TPS65086_FORCESHUTDN 0x91 +#define TPS65086_BUCK1SLPCTRL 0x92 +#define TPS65086_BUCK2SLPCTRL 0x93 +#define TPS65086_BUCK4VID 0x94 +#define TPS65086_BUCK4SLPVID 0x95 +#define TPS65086_BUCK5VID 0x96 +#define TPS65086_BUCK5SLPVID 0x97 +#define TPS65086_BUCK6VID 0x98 +#define TPS65086_BUCK6SLPVID 0x99 +#define TPS65086_LDOA2VID 0x9A +#define TPS65086_LDOA3VID 0x9B +#define TPS65086_BUCK123CTRL 0x9C +#define TPS65086_PG_DELAY2 0x9D +#define TPS65086_PIN_EN_MASK1 0x9E +#define TPS65086_PIN_EN_MASK2 0x9F +#define TPS65086_SWVTT_EN 0x9F +#define TPS65086_PIN_EN_OVR1 0xA0 +#define TPS65086_PIN_EN_OVR2 0xA1 +#define TPS65086_GPOCTRL 0xA1 +#define TPS65086_PWR_FAULT_MASK1 0xA2 +#define TPS65086_PWR_FAULT_MASK2 0xA3 +#define TPS65086_GPO1PG_CTRL1 0xA4 +#define TPS65086_GPO1PG_CTRL2 0xA5 +#define TPS65086_GPO4PG_CTRL1 0xA6 +#define TPS65086_GPO4PG_CTRL2 0xA7 +#define TPS65086_GPO2PG_CTRL1 0xA8 +#define TPS65086_GPO2PG_CTRL2 0xA9 +#define TPS65086_GPO3PG_CTRL1 0xAA +#define TPS65086_GPO3PG_CTRL2 0xAB +#define TPS65086_LDOA1CTRL 0xAE +#define TPS65086_PG_STATUS1 0xB0 +#define TPS65086_PG_STATUS2 0xB1 +#define TPS65086_PWR_FAULT_STATUS1 0xB2 +#define TPS65086_PWR_FAULT_STATUS2 0xB3 +#define TPS65086_TEMPCRIT 0xB4 +#define TPS65086_TEMPHOT 0xB5 +#define TPS65086_OC_STATUS 0xB6 + +/* IRQ Register field definitions */ +#define TPS65086_IRQ_DIETEMP_MASK BIT(0) +#define TPS65086_IRQ_SHUTDN_MASK BIT(3) +#define TPS65086_IRQ_FAULT_MASK BIT(7) + +/* DEVICEID Register field definitions */ +#define TPS65086_DEVICEID_PART_MASK GENMASK(3, 0) +#define TPS65086_DEVICEID_OTP_MASK GENMASK(5, 4) +#define TPS65086_DEVICEID_REV_MASK GENMASK(7, 6) + +/* VID Masks */ +#define BUCK_VID_MASK GENMASK(7, 1) +#define VDOA1_VID_MASK GENMASK(4, 1) +#define VDOA23_VID_MASK GENMASK(3, 0) + +/* Define the TPS65086 IRQ numbers */ +enum tps65086_irqs { + TPS65086_IRQ_DIETEMP, + TPS65086_IRQ_SHUTDN, + TPS65086_IRQ_FAULT, +}; + +/** + * struct tps65086 - state holder for the tps65086 driver + * + * Device data may be used to access the TPS65086 chip + */ +struct tps65086 { + struct device *dev; + struct regmap *regmap; + + /* IRQ Data */ + int irq; + struct regmap_irq_chip_data *irq_data; +}; + +#endif /* __LINUX_MFD_TPS65086_H */ diff --git a/include/linux/mfd/tps65090.h b/include/linux/mfd/tps65090.h new file mode 100644 index 000000000..f05bf4a14 --- /dev/null +++ b/include/linux/mfd/tps65090.h @@ -0,0 +1,163 @@ +/* + * Core driver interface for TI TPS65090 PMIC family + * + * Copyright (C) 2012 NVIDIA Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef __LINUX_MFD_TPS65090_H +#define __LINUX_MFD_TPS65090_H + +#include +#include + +/* TPS65090 IRQs */ +enum { + TPS65090_IRQ_INTERRUPT, + TPS65090_IRQ_VAC_STATUS_CHANGE, + TPS65090_IRQ_VSYS_STATUS_CHANGE, + TPS65090_IRQ_BAT_STATUS_CHANGE, + TPS65090_IRQ_CHARGING_STATUS_CHANGE, + TPS65090_IRQ_CHARGING_COMPLETE, + TPS65090_IRQ_OVERLOAD_DCDC1, + TPS65090_IRQ_OVERLOAD_DCDC2, + TPS65090_IRQ_OVERLOAD_DCDC3, + TPS65090_IRQ_OVERLOAD_FET1, + TPS65090_IRQ_OVERLOAD_FET2, + TPS65090_IRQ_OVERLOAD_FET3, + TPS65090_IRQ_OVERLOAD_FET4, + TPS65090_IRQ_OVERLOAD_FET5, + TPS65090_IRQ_OVERLOAD_FET6, + TPS65090_IRQ_OVERLOAD_FET7, +}; + +/* TPS65090 Regulator ID */ +enum { + TPS65090_REGULATOR_DCDC1, + TPS65090_REGULATOR_DCDC2, + TPS65090_REGULATOR_DCDC3, + TPS65090_REGULATOR_FET1, + TPS65090_REGULATOR_FET2, + TPS65090_REGULATOR_FET3, + TPS65090_REGULATOR_FET4, + TPS65090_REGULATOR_FET5, + TPS65090_REGULATOR_FET6, + TPS65090_REGULATOR_FET7, + TPS65090_REGULATOR_LDO1, + TPS65090_REGULATOR_LDO2, + + /* Last entry for maximum ID */ + TPS65090_REGULATOR_MAX, +}; + +/* Register addresses */ +#define TPS65090_REG_INTR_STS 0x00 +#define TPS65090_REG_INTR_STS2 0x01 +#define TPS65090_REG_INTR_MASK 0x02 +#define TPS65090_REG_INTR_MASK2 0x03 +#define TPS65090_REG_CG_CTRL0 0x04 +#define TPS65090_REG_CG_CTRL1 0x05 +#define TPS65090_REG_CG_CTRL2 0x06 +#define TPS65090_REG_CG_CTRL3 0x07 +#define TPS65090_REG_CG_CTRL4 0x08 +#define TPS65090_REG_CG_CTRL5 0x09 +#define TPS65090_REG_CG_STATUS1 0x0a +#define TPS65090_REG_CG_STATUS2 0x0b +#define TPS65090_REG_AD_OUT1 0x17 +#define TPS65090_REG_AD_OUT2 0x18 + +#define TPS65090_MAX_REG TPS65090_REG_AD_OUT2 +#define TPS65090_NUM_REGS (TPS65090_MAX_REG + 1) + +struct gpio_desc; + +struct tps65090 { + struct device *dev; + struct regmap *rmap; + struct regmap_irq_chip_data *irq_data; +}; + +/* + * struct tps65090_regulator_plat_data + * + * @reg_init_data: The regulator init data. + * @enable_ext_control: Enable extrenal control or not. Only available for + * DCDC1, DCDC2 and DCDC3. + * @gpiod: Gpio descriptor if external control is enabled and controlled through + * gpio + * @overcurrent_wait_valid: True if the overcurrent_wait should be applied. + * @overcurrent_wait: Value to set as the overcurrent wait time. This is the + * actual bitfield value, not a time in ms (valid value are 0 - 3). + */ +struct tps65090_regulator_plat_data { + struct regulator_init_data *reg_init_data; + bool enable_ext_control; + struct gpio_desc *gpiod; + bool overcurrent_wait_valid; + int overcurrent_wait; +}; + +struct tps65090_platform_data { + int irq_base; + + char **supplied_to; + size_t num_supplicants; + int enable_low_current_chrg; + + struct tps65090_regulator_plat_data *reg_pdata[TPS65090_REGULATOR_MAX]; +}; + +/* + * NOTE: the functions below are not intended for use outside + * of the TPS65090 sub-device drivers + */ +static inline int tps65090_write(struct device *dev, int reg, uint8_t val) +{ + struct tps65090 *tps = dev_get_drvdata(dev); + + return regmap_write(tps->rmap, reg, val); +} + +static inline int tps65090_read(struct device *dev, int reg, uint8_t *val) +{ + struct tps65090 *tps = dev_get_drvdata(dev); + unsigned int temp_val; + int ret; + + ret = regmap_read(tps->rmap, reg, &temp_val); + if (!ret) + *val = temp_val; + return ret; +} + +static inline int tps65090_set_bits(struct device *dev, int reg, + uint8_t bit_num) +{ + struct tps65090 *tps = dev_get_drvdata(dev); + + return regmap_update_bits(tps->rmap, reg, BIT(bit_num), ~0u); +} + +static inline int tps65090_clr_bits(struct device *dev, int reg, + uint8_t bit_num) +{ + struct tps65090 *tps = dev_get_drvdata(dev); + + return regmap_update_bits(tps->rmap, reg, BIT(bit_num), 0u); +} + +#endif /*__LINUX_MFD_TPS65090_H */ diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h new file mode 100644 index 000000000..b5dd10842 --- /dev/null +++ b/include/linux/mfd/tps65217.h @@ -0,0 +1,289 @@ +/* + * linux/mfd/tps65217.h + * + * Functions to access TPS65217 power management chip. + * + * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_MFD_TPS65217_H +#define __LINUX_MFD_TPS65217_H + +#include +#include +#include + +/* TPS chip id list */ +#define TPS65217 0xF0 + +/* I2C ID for TPS65217 part */ +#define TPS65217_I2C_ID 0x24 + +/* All register addresses */ +#define TPS65217_REG_CHIPID 0X00 +#define TPS65217_REG_PPATH 0X01 +#define TPS65217_REG_INT 0X02 +#define TPS65217_REG_CHGCONFIG0 0X03 +#define TPS65217_REG_CHGCONFIG1 0X04 +#define TPS65217_REG_CHGCONFIG2 0X05 +#define TPS65217_REG_CHGCONFIG3 0X06 +#define TPS65217_REG_WLEDCTRL1 0X07 +#define TPS65217_REG_WLEDCTRL2 0X08 +#define TPS65217_REG_MUXCTRL 0X09 +#define TPS65217_REG_STATUS 0X0A +#define TPS65217_REG_PASSWORD 0X0B +#define TPS65217_REG_PGOOD 0X0C +#define TPS65217_REG_DEFPG 0X0D +#define TPS65217_REG_DEFDCDC1 0X0E +#define TPS65217_REG_DEFDCDC2 0X0F +#define TPS65217_REG_DEFDCDC3 0X10 +#define TPS65217_REG_DEFSLEW 0X11 +#define TPS65217_REG_DEFLDO1 0X12 +#define TPS65217_REG_DEFLDO2 0X13 +#define TPS65217_REG_DEFLS1 0X14 +#define TPS65217_REG_DEFLS2 0X15 +#define TPS65217_REG_ENABLE 0X16 +#define TPS65217_REG_DEFUVLO 0X18 +#define TPS65217_REG_SEQ1 0X19 +#define TPS65217_REG_SEQ2 0X1A +#define TPS65217_REG_SEQ3 0X1B +#define TPS65217_REG_SEQ4 0X1C +#define TPS65217_REG_SEQ5 0X1D +#define TPS65217_REG_SEQ6 0X1E + +#define TPS65217_REG_MAX TPS65217_REG_SEQ6 + +/* Register field definitions */ +#define TPS65217_CHIPID_CHIP_MASK 0xF0 +#define TPS65217_CHIPID_REV_MASK 0x0F + +#define TPS65217_PPATH_ACSINK_ENABLE BIT(7) +#define TPS65217_PPATH_USBSINK_ENABLE BIT(6) +#define TPS65217_PPATH_AC_PW_ENABLE BIT(5) +#define TPS65217_PPATH_USB_PW_ENABLE BIT(4) +#define TPS65217_PPATH_AC_CURRENT_MASK 0x0C +#define TPS65217_PPATH_USB_CURRENT_MASK 0x03 + +#define TPS65217_INT_PBM BIT(6) +#define TPS65217_INT_ACM BIT(5) +#define TPS65217_INT_USBM BIT(4) +#define TPS65217_INT_PBI BIT(2) +#define TPS65217_INT_ACI BIT(1) +#define TPS65217_INT_USBI BIT(0) +#define TPS65217_INT_SHIFT 4 +#define TPS65217_INT_MASK (TPS65217_INT_PBM | TPS65217_INT_ACM | \ + TPS65217_INT_USBM) + +#define TPS65217_CHGCONFIG0_TREG BIT(7) +#define TPS65217_CHGCONFIG0_DPPM BIT(6) +#define TPS65217_CHGCONFIG0_TSUSP BIT(5) +#define TPS65217_CHGCONFIG0_TERMI BIT(4) +#define TPS65217_CHGCONFIG0_ACTIVE BIT(3) +#define TPS65217_CHGCONFIG0_CHGTOUT BIT(2) +#define TPS65217_CHGCONFIG0_PCHGTOUT BIT(1) +#define TPS65217_CHGCONFIG0_BATTEMP BIT(0) + +#define TPS65217_CHGCONFIG1_TMR_MASK 0xC0 +#define TPS65217_CHGCONFIG1_TMR_ENABLE BIT(5) +#define TPS65217_CHGCONFIG1_NTC_TYPE BIT(4) +#define TPS65217_CHGCONFIG1_RESET BIT(3) +#define TPS65217_CHGCONFIG1_TERM BIT(2) +#define TPS65217_CHGCONFIG1_SUSP BIT(1) +#define TPS65217_CHGCONFIG1_CHG_EN BIT(0) + +#define TPS65217_CHGCONFIG2_DYNTMR BIT(7) +#define TPS65217_CHGCONFIG2_VPREGHG BIT(6) +#define TPS65217_CHGCONFIG2_VOREG_MASK 0x30 + +#define TPS65217_CHGCONFIG3_ICHRG_MASK 0xC0 +#define TPS65217_CHGCONFIG3_DPPMTH_MASK 0x30 +#define TPS65217_CHGCONFIG2_PCHRGT BIT(3) +#define TPS65217_CHGCONFIG2_TERMIF 0x06 +#define TPS65217_CHGCONFIG2_TRANGE BIT(0) + +#define TPS65217_WLEDCTRL1_ISINK_ENABLE BIT(3) +#define TPS65217_WLEDCTRL1_ISEL BIT(2) +#define TPS65217_WLEDCTRL1_FDIM_MASK 0x03 + +#define TPS65217_WLEDCTRL2_DUTY_MASK 0x7F + +#define TPS65217_MUXCTRL_MUX_MASK 0x07 + +#define TPS65217_STATUS_OFF BIT(7) +#define TPS65217_STATUS_ACPWR BIT(3) +#define TPS65217_STATUS_USBPWR BIT(2) +#define TPS65217_STATUS_PB BIT(0) + +#define TPS65217_PASSWORD_REGS_UNLOCK 0x7D + +#define TPS65217_PGOOD_LDO3_PG BIT(6) +#define TPS65217_PGOOD_LDO4_PG BIT(5) +#define TPS65217_PGOOD_DC1_PG BIT(4) +#define TPS65217_PGOOD_DC2_PG BIT(3) +#define TPS65217_PGOOD_DC3_PG BIT(2) +#define TPS65217_PGOOD_LDO1_PG BIT(1) +#define TPS65217_PGOOD_LDO2_PG BIT(0) + +#define TPS65217_DEFPG_LDO1PGM BIT(3) +#define TPS65217_DEFPG_LDO2PGM BIT(2) +#define TPS65217_DEFPG_PGDLY_MASK 0x03 + +#define TPS65217_DEFDCDCX_XADJX BIT(7) +#define TPS65217_DEFDCDCX_DCDC_MASK 0x3F + +#define TPS65217_DEFSLEW_GO BIT(7) +#define TPS65217_DEFSLEW_GODSBL BIT(6) +#define TPS65217_DEFSLEW_PFM_EN1 BIT(5) +#define TPS65217_DEFSLEW_PFM_EN2 BIT(4) +#define TPS65217_DEFSLEW_PFM_EN3 BIT(3) +#define TPS65217_DEFSLEW_SLEW_MASK 0x07 + +#define TPS65217_DEFLDO1_LDO1_MASK 0x0F + +#define TPS65217_DEFLDO2_TRACK BIT(6) +#define TPS65217_DEFLDO2_LDO2_MASK 0x3F + +#define TPS65217_DEFLDO3_LDO3_EN BIT(5) +#define TPS65217_DEFLDO3_LDO3_MASK 0x1F + +#define TPS65217_DEFLDO4_LDO4_EN BIT(5) +#define TPS65217_DEFLDO4_LDO4_MASK 0x1F + +#define TPS65217_ENABLE_LS1_EN BIT(6) +#define TPS65217_ENABLE_LS2_EN BIT(5) +#define TPS65217_ENABLE_DC1_EN BIT(4) +#define TPS65217_ENABLE_DC2_EN BIT(3) +#define TPS65217_ENABLE_DC3_EN BIT(2) +#define TPS65217_ENABLE_LDO1_EN BIT(1) +#define TPS65217_ENABLE_LDO2_EN BIT(0) + +#define TPS65217_DEFUVLO_UVLOHYS BIT(2) +#define TPS65217_DEFUVLO_UVLO_MASK 0x03 + +#define TPS65217_SEQ1_DC1_SEQ_MASK 0xF0 +#define TPS65217_SEQ1_DC2_SEQ_MASK 0x0F + +#define TPS65217_SEQ2_DC3_SEQ_MASK 0xF0 +#define TPS65217_SEQ2_LDO1_SEQ_MASK 0x0F + +#define TPS65217_SEQ3_LDO2_SEQ_MASK 0xF0 +#define TPS65217_SEQ3_LDO3_SEQ_MASK 0x0F + +#define TPS65217_SEQ4_LDO4_SEQ_MASK 0xF0 + +#define TPS65217_SEQ5_DLY1_MASK 0xC0 +#define TPS65217_SEQ5_DLY2_MASK 0x30 +#define TPS65217_SEQ5_DLY3_MASK 0x0C +#define TPS65217_SEQ5_DLY4_MASK 0x03 + +#define TPS65217_SEQ6_DLY5_MASK 0xC0 +#define TPS65217_SEQ6_DLY6_MASK 0x30 +#define TPS65217_SEQ6_SEQUP BIT(2) +#define TPS65217_SEQ6_SEQDWN BIT(1) +#define TPS65217_SEQ6_INSTDWN BIT(0) + +#define TPS65217_MAX_REGISTER 0x1E +#define TPS65217_PROTECT_NONE 0 +#define TPS65217_PROTECT_L1 1 +#define TPS65217_PROTECT_L2 2 + + +enum tps65217_regulator_id { + /* DCDC's */ + TPS65217_DCDC_1, + TPS65217_DCDC_2, + TPS65217_DCDC_3, + /* LDOs */ + TPS65217_LDO_1, + TPS65217_LDO_2, + TPS65217_LDO_3, + TPS65217_LDO_4, +}; + +#define TPS65217_MAX_REG_ID TPS65217_LDO_4 + +/* Number of step-down converters available */ +#define TPS65217_NUM_DCDC 3 +/* Number of LDO voltage regulators available */ +#define TPS65217_NUM_LDO 4 +/* Number of total regulators available */ +#define TPS65217_NUM_REGULATOR (TPS65217_NUM_DCDC + TPS65217_NUM_LDO) + +enum tps65217_bl_isel { + TPS65217_BL_ISET1 = 1, + TPS65217_BL_ISET2, +}; + +enum tps65217_bl_fdim { + TPS65217_BL_FDIM_100HZ, + TPS65217_BL_FDIM_200HZ, + TPS65217_BL_FDIM_500HZ, + TPS65217_BL_FDIM_1000HZ, +}; + +struct tps65217_bl_pdata { + enum tps65217_bl_isel isel; + enum tps65217_bl_fdim fdim; + int dft_brightness; +}; + +/* Interrupt numbers */ +#define TPS65217_IRQ_USB 0 +#define TPS65217_IRQ_AC 1 +#define TPS65217_IRQ_PB 2 +#define TPS65217_NUM_IRQ 3 + +/** + * struct tps65217_board - packages regulator init data + * @tps65217_regulator_data: regulator initialization values + * + * Board data may be used to initialize regulator. + */ +struct tps65217_board { + struct regulator_init_data *tps65217_init_data[TPS65217_NUM_REGULATOR]; + struct device_node *of_node[TPS65217_NUM_REGULATOR]; + struct tps65217_bl_pdata *bl_pdata; +}; + +/** + * struct tps65217 - tps65217 sub-driver chip access routines + * + * Device data may be used to access the TPS65217 chip + */ + +struct tps65217 { + struct device *dev; + struct tps65217_board *pdata; + struct regulator_desc desc[TPS65217_NUM_REGULATOR]; + struct regmap *regmap; + u8 *strobes; + struct irq_domain *irq_domain; + struct mutex irq_lock; + u8 irq_mask; + int irq; +}; + +static inline struct tps65217 *dev_to_tps65217(struct device *dev) +{ + return dev_get_drvdata(dev); +} + +int tps65217_reg_read(struct tps65217 *tps, unsigned int reg, + unsigned int *val); +int tps65217_reg_write(struct tps65217 *tps, unsigned int reg, + unsigned int val, unsigned int level); +int tps65217_set_bits(struct tps65217 *tps, unsigned int reg, + unsigned int mask, unsigned int val, unsigned int level); +int tps65217_clear_bits(struct tps65217 *tps, unsigned int reg, + unsigned int mask, unsigned int level); + +#endif /* __LINUX_MFD_TPS65217_H */ diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h new file mode 100644 index 000000000..c204d9a79 --- /dev/null +++ b/include/linux/mfd/tps65218.h @@ -0,0 +1,276 @@ +/* + * linux/mfd/tps65218.h + * + * Functions to access TPS65219 power management chip. + * + * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether expressed or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License version 2 for more details. + */ + +#ifndef __LINUX_MFD_TPS65218_H +#define __LINUX_MFD_TPS65218_H + +#include +#include +#include +#include + +/* TPS chip id list */ +#define TPS65218 0xF0 + +/* I2C ID for TPS65218 part */ +#define TPS65218_I2C_ID 0x24 + +/* All register addresses */ +#define TPS65218_REG_CHIPID 0x00 +#define TPS65218_REG_INT1 0x01 +#define TPS65218_REG_INT2 0x02 +#define TPS65218_REG_INT_MASK1 0x03 +#define TPS65218_REG_INT_MASK2 0x04 +#define TPS65218_REG_STATUS 0x05 +#define TPS65218_REG_CONTROL 0x06 +#define TPS65218_REG_FLAG 0x07 + +#define TPS65218_REG_PASSWORD 0x10 +#define TPS65218_REG_ENABLE1 0x11 +#define TPS65218_REG_ENABLE2 0x12 +#define TPS65218_REG_CONFIG1 0x13 +#define TPS65218_REG_CONFIG2 0x14 +#define TPS65218_REG_CONFIG3 0x15 +#define TPS65218_REG_CONTROL_DCDC1 0x16 +#define TPS65218_REG_CONTROL_DCDC2 0x17 +#define TPS65218_REG_CONTROL_DCDC3 0x18 +#define TPS65218_REG_CONTROL_DCDC4 0x19 +#define TPS65218_REG_CONTRL_SLEW_RATE 0x1A +#define TPS65218_REG_CONTROL_LDO1 0x1B +#define TPS65218_REG_SEQ1 0x20 +#define TPS65218_REG_SEQ2 0x21 +#define TPS65218_REG_SEQ3 0x22 +#define TPS65218_REG_SEQ4 0x23 +#define TPS65218_REG_SEQ5 0x24 +#define TPS65218_REG_SEQ6 0x25 +#define TPS65218_REG_SEQ7 0x26 + +/* Register field definitions */ +#define TPS65218_CHIPID_CHIP_MASK 0xF8 +#define TPS65218_CHIPID_REV_MASK 0x07 + +#define TPS65218_REV_1_0 0x0 +#define TPS65218_REV_1_1 0x1 +#define TPS65218_REV_2_0 0x2 +#define TPS65218_REV_2_1 0x3 + +#define TPS65218_INT1_VPRG BIT(5) +#define TPS65218_INT1_AC BIT(4) +#define TPS65218_INT1_PB BIT(3) +#define TPS65218_INT1_HOT BIT(2) +#define TPS65218_INT1_CC_AQC BIT(1) +#define TPS65218_INT1_PRGC BIT(0) + +#define TPS65218_INT2_LS3_F BIT(5) +#define TPS65218_INT2_LS2_F BIT(4) +#define TPS65218_INT2_LS1_F BIT(3) +#define TPS65218_INT2_LS3_I BIT(2) +#define TPS65218_INT2_LS2_I BIT(1) +#define TPS65218_INT2_LS1_I BIT(0) + +#define TPS65218_INT_MASK1_VPRG BIT(5) +#define TPS65218_INT_MASK1_AC BIT(4) +#define TPS65218_INT_MASK1_PB BIT(3) +#define TPS65218_INT_MASK1_HOT BIT(2) +#define TPS65218_INT_MASK1_CC_AQC BIT(1) +#define TPS65218_INT_MASK1_PRGC BIT(0) + +#define TPS65218_INT_MASK2_LS3_F BIT(5) +#define TPS65218_INT_MASK2_LS2_F BIT(4) +#define TPS65218_INT_MASK2_LS1_F BIT(3) +#define TPS65218_INT_MASK2_LS3_I BIT(2) +#define TPS65218_INT_MASK2_LS2_I BIT(1) +#define TPS65218_INT_MASK2_LS1_I BIT(0) + +#define TPS65218_STATUS_FSEAL BIT(7) +#define TPS65218_STATUS_EE BIT(6) +#define TPS65218_STATUS_AC_STATE BIT(5) +#define TPS65218_STATUS_PB_STATE BIT(4) +#define TPS65218_STATUS_STATE_MASK 0xC +#define TPS65218_STATUS_CC_STAT 0x3 + +#define TPS65218_CONTROL_OFFNPFO BIT(1) +#define TPS65218_CONTROL_CC_AQ BIT(0) + +#define TPS65218_FLAG_GPO3_FLG BIT(7) +#define TPS65218_FLAG_GPO2_FLG BIT(6) +#define TPS65218_FLAG_GPO1_FLG BIT(5) +#define TPS65218_FLAG_LDO1_FLG BIT(4) +#define TPS65218_FLAG_DC4_FLG BIT(3) +#define TPS65218_FLAG_DC3_FLG BIT(2) +#define TPS65218_FLAG_DC2_FLG BIT(1) +#define TPS65218_FLAG_DC1_FLG BIT(0) + +#define TPS65218_ENABLE1_DC6_EN BIT(5) +#define TPS65218_ENABLE1_DC5_EN BIT(4) +#define TPS65218_ENABLE1_DC4_EN BIT(3) +#define TPS65218_ENABLE1_DC3_EN BIT(2) +#define TPS65218_ENABLE1_DC2_EN BIT(1) +#define TPS65218_ENABLE1_DC1_EN BIT(0) + +#define TPS65218_ENABLE2_GPIO3 BIT(6) +#define TPS65218_ENABLE2_GPIO2 BIT(5) +#define TPS65218_ENABLE2_GPIO1 BIT(4) +#define TPS65218_ENABLE2_LS3_EN BIT(3) +#define TPS65218_ENABLE2_LS2_EN BIT(2) +#define TPS65218_ENABLE2_LS1_EN BIT(1) +#define TPS65218_ENABLE2_LDO1_EN BIT(0) + + +#define TPS65218_CONFIG1_TRST BIT(7) +#define TPS65218_CONFIG1_GPO2_BUF BIT(6) +#define TPS65218_CONFIG1_IO1_SEL BIT(5) +#define TPS65218_CONFIG1_PGDLY_MASK 0x18 +#define TPS65218_CONFIG1_STRICT BIT(2) +#define TPS65218_CONFIG1_UVLO_MASK 0x3 + +#define TPS65218_CONFIG2_DC12_RST BIT(7) +#define TPS65218_CONFIG2_UVLOHYS BIT(6) +#define TPS65218_CONFIG2_LS3ILIM_MASK 0xC +#define TPS65218_CONFIG2_LS2ILIM_MASK 0x3 + +#define TPS65218_CONFIG3_LS3NPFO BIT(5) +#define TPS65218_CONFIG3_LS2NPFO BIT(4) +#define TPS65218_CONFIG3_LS1NPFO BIT(3) +#define TPS65218_CONFIG3_LS3DCHRG BIT(2) +#define TPS65218_CONFIG3_LS2DCHRG BIT(1) +#define TPS65218_CONFIG3_LS1DCHRG BIT(0) + +#define TPS65218_CONTROL_DCDC1_PFM BIT(7) +#define TPS65218_CONTROL_DCDC1_MASK 0x7F + +#define TPS65218_CONTROL_DCDC2_PFM BIT(7) +#define TPS65218_CONTROL_DCDC2_MASK 0x3F + +#define TPS65218_CONTROL_DCDC3_PFM BIT(7) +#define TPS65218_CONTROL_DCDC3_MASK 0x3F + +#define TPS65218_CONTROL_DCDC4_PFM BIT(7) +#define TPS65218_CONTROL_DCDC4_MASK 0x3F + +#define TPS65218_SLEW_RATE_GO BIT(7) +#define TPS65218_SLEW_RATE_GODSBL BIT(6) +#define TPS65218_SLEW_RATE_SLEW_MASK 0x7 + +#define TPS65218_CONTROL_LDO1_MASK 0x3F + +#define TPS65218_SEQ1_DLY8 BIT(7) +#define TPS65218_SEQ1_DLY7 BIT(6) +#define TPS65218_SEQ1_DLY6 BIT(5) +#define TPS65218_SEQ1_DLY5 BIT(4) +#define TPS65218_SEQ1_DLY4 BIT(3) +#define TPS65218_SEQ1_DLY3 BIT(2) +#define TPS65218_SEQ1_DLY2 BIT(1) +#define TPS65218_SEQ1_DLY1 BIT(0) + +#define TPS65218_SEQ2_DLYFCTR BIT(7) +#define TPS65218_SEQ2_DLY9 BIT(0) + +#define TPS65218_SEQ3_DC2_SEQ_MASK 0xF0 +#define TPS65218_SEQ3_DC1_SEQ_MASK 0xF + +#define TPS65218_SEQ4_DC4_SEQ_MASK 0xF0 +#define TPS65218_SEQ4_DC3_SEQ_MASK 0xF + +#define TPS65218_SEQ5_DC6_SEQ_MASK 0xF0 +#define TPS65218_SEQ5_DC5_SEQ_MASK 0xF + +#define TPS65218_SEQ6_LS1_SEQ_MASK 0xF0 +#define TPS65218_SEQ6_LDO1_SEQ_MASK 0xF + +#define TPS65218_SEQ7_GPO3_SEQ_MASK 0xF0 +#define TPS65218_SEQ7_GPO1_SEQ_MASK 0xF +#define TPS65218_PROTECT_NONE 0 +#define TPS65218_PROTECT_L1 1 + +enum tps65218_regulator_id { + /* DCDC's */ + TPS65218_DCDC_1, + TPS65218_DCDC_2, + TPS65218_DCDC_3, + TPS65218_DCDC_4, + TPS65218_DCDC_5, + TPS65218_DCDC_6, + /* LDOs */ + TPS65218_LDO_1, + /* LS's */ + TPS65218_LS_3, +}; + +#define TPS65218_MAX_REG_ID TPS65218_LDO_1 + +/* Number of step-down converters available */ +#define TPS65218_NUM_DCDC 6 +/* Number of LDO voltage regulators available */ +#define TPS65218_NUM_LDO 1 +/* Number of total LS current regulators available */ +#define TPS65218_NUM_LS 1 +/* Number of total regulators available */ +#define TPS65218_NUM_REGULATOR (TPS65218_NUM_DCDC + TPS65218_NUM_LDO \ + + TPS65218_NUM_LS) + +/* Define the TPS65218 IRQ numbers */ +enum tps65218_irqs { + /* INT1 registers */ + TPS65218_PRGC_IRQ, + TPS65218_CC_AQC_IRQ, + TPS65218_HOT_IRQ, + TPS65218_PB_IRQ, + TPS65218_AC_IRQ, + TPS65218_VPRG_IRQ, + TPS65218_INVALID1_IRQ, + TPS65218_INVALID2_IRQ, + /* INT2 registers */ + TPS65218_LS1_I_IRQ, + TPS65218_LS2_I_IRQ, + TPS65218_LS3_I_IRQ, + TPS65218_LS1_F_IRQ, + TPS65218_LS2_F_IRQ, + TPS65218_LS3_F_IRQ, + TPS65218_INVALID3_IRQ, + TPS65218_INVALID4_IRQ, +}; + +/** + * struct tps65218 - tps65218 sub-driver chip access routines + * + * Device data may be used to access the TPS65218 chip + */ + +struct tps65218 { + struct device *dev; + unsigned int id; + u8 rev; + + struct mutex tps_lock; /* lock guarding the data structure */ + /* IRQ Data */ + int irq; + u32 irq_mask; + struct regmap_irq_chip_data *irq_data; + struct regulator_desc desc[TPS65218_NUM_REGULATOR]; + struct regmap *regmap; + u8 *strobes; +}; + +int tps65218_reg_write(struct tps65218 *tps, unsigned int reg, + unsigned int val, unsigned int level); +int tps65218_set_bits(struct tps65218 *tps, unsigned int reg, + unsigned int mask, unsigned int val, unsigned int level); +int tps65218_clear_bits(struct tps65218 *tps, unsigned int reg, + unsigned int mask, unsigned int level); + +#endif /* __LINUX_MFD_TPS65218_H */ diff --git a/include/linux/mfd/tps6586x.h b/include/linux/mfd/tps6586x.h new file mode 100644 index 000000000..b19c2801a --- /dev/null +++ b/include/linux/mfd/tps6586x.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_MFD_TPS6586X_H +#define __LINUX_MFD_TPS6586X_H + +#define TPS6586X_SLEW_RATE_INSTANTLY 0x00 +#define TPS6586X_SLEW_RATE_110UV 0x01 +#define TPS6586X_SLEW_RATE_220UV 0x02 +#define TPS6586X_SLEW_RATE_440UV 0x03 +#define TPS6586X_SLEW_RATE_880UV 0x04 +#define TPS6586X_SLEW_RATE_1760UV 0x05 +#define TPS6586X_SLEW_RATE_3520UV 0x06 +#define TPS6586X_SLEW_RATE_7040UV 0x07 + +#define TPS6586X_SLEW_RATE_SET 0x08 +#define TPS6586X_SLEW_RATE_MASK 0x07 + +/* VERSION CRC */ +#define TPS658621A 0x15 +#define TPS658621CD 0x2c +#define TPS658623 0x1b +#define TPS658624 0x0a +#define TPS658640 0x01 +#define TPS658640v2 0x02 +#define TPS658643 0x03 + +enum { + TPS6586X_ID_SYS, + TPS6586X_ID_SM_0, + TPS6586X_ID_SM_1, + TPS6586X_ID_SM_2, + TPS6586X_ID_LDO_0, + TPS6586X_ID_LDO_1, + TPS6586X_ID_LDO_2, + TPS6586X_ID_LDO_3, + TPS6586X_ID_LDO_4, + TPS6586X_ID_LDO_5, + TPS6586X_ID_LDO_6, + TPS6586X_ID_LDO_7, + TPS6586X_ID_LDO_8, + TPS6586X_ID_LDO_9, + TPS6586X_ID_LDO_RTC, + TPS6586X_ID_MAX_REGULATOR, +}; + +enum { + TPS6586X_INT_PLDO_0, + TPS6586X_INT_PLDO_1, + TPS6586X_INT_PLDO_2, + TPS6586X_INT_PLDO_3, + TPS6586X_INT_PLDO_4, + TPS6586X_INT_PLDO_5, + TPS6586X_INT_PLDO_6, + TPS6586X_INT_PLDO_7, + TPS6586X_INT_COMP_DET, + TPS6586X_INT_ADC, + TPS6586X_INT_PLDO_8, + TPS6586X_INT_PLDO_9, + TPS6586X_INT_PSM_0, + TPS6586X_INT_PSM_1, + TPS6586X_INT_PSM_2, + TPS6586X_INT_PSM_3, + TPS6586X_INT_RTC_ALM1, + TPS6586X_INT_ACUSB_OVP, + TPS6586X_INT_USB_DET, + TPS6586X_INT_AC_DET, + TPS6586X_INT_BAT_DET, + TPS6586X_INT_CHG_STAT, + TPS6586X_INT_CHG_TEMP, + TPS6586X_INT_PP, + TPS6586X_INT_RESUME, + TPS6586X_INT_LOW_SYS, + TPS6586X_INT_RTC_ALM2, +}; + +struct tps6586x_settings { + int slew_rate; +}; + +struct tps6586x_subdev_info { + int id; + const char *name; + void *platform_data; + struct device_node *of_node; +}; + +struct tps6586x_platform_data { + int num_subdevs; + struct tps6586x_subdev_info *subdevs; + + int gpio_base; + int irq_base; + bool pm_off; + + struct regulator_init_data *reg_init_data[TPS6586X_ID_MAX_REGULATOR]; +}; + +/* + * NOTE: the functions below are not intended for use outside + * of the TPS6586X sub-device drivers + */ +extern int tps6586x_write(struct device *dev, int reg, uint8_t val); +extern int tps6586x_writes(struct device *dev, int reg, int len, uint8_t *val); +extern int tps6586x_read(struct device *dev, int reg, uint8_t *val); +extern int tps6586x_reads(struct device *dev, int reg, int len, uint8_t *val); +extern int tps6586x_set_bits(struct device *dev, int reg, uint8_t bit_mask); +extern int tps6586x_clr_bits(struct device *dev, int reg, uint8_t bit_mask); +extern int tps6586x_update(struct device *dev, int reg, uint8_t val, + uint8_t mask); +extern int tps6586x_irq_get_virq(struct device *dev, int irq); +extern int tps6586x_get_version(struct device *dev); + +#endif /*__LINUX_MFD_TPS6586X_H */ diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h new file mode 100644 index 000000000..deffdcd02 --- /dev/null +++ b/include/linux/mfd/tps65910.h @@ -0,0 +1,956 @@ +/* + * tps65910.h -- TI TPS6591x + * + * Copyright 2010-2011 Texas Instruments Inc. + * + * Author: Graeme Gregory + * Author: Jorge Eduardo Candelaria + * Author: Arnaud Deconinck + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MFD_TPS65910_H +#define __LINUX_MFD_TPS65910_H + +#include +#include + +/* TPS chip id list */ +#define TPS65910 0 +#define TPS65911 1 + +/* TPS regulator type list */ +#define REGULATOR_LDO 0 +#define REGULATOR_DCDC 1 + +/* + * List of registers for component TPS65910 + * + */ + +#define TPS65910_SECONDS 0x0 +#define TPS65910_MINUTES 0x1 +#define TPS65910_HOURS 0x2 +#define TPS65910_DAYS 0x3 +#define TPS65910_MONTHS 0x4 +#define TPS65910_YEARS 0x5 +#define TPS65910_WEEKS 0x6 +#define TPS65910_ALARM_SECONDS 0x8 +#define TPS65910_ALARM_MINUTES 0x9 +#define TPS65910_ALARM_HOURS 0xA +#define TPS65910_ALARM_DAYS 0xB +#define TPS65910_ALARM_MONTHS 0xC +#define TPS65910_ALARM_YEARS 0xD +#define TPS65910_RTC_CTRL 0x10 +#define TPS65910_RTC_STATUS 0x11 +#define TPS65910_RTC_INTERRUPTS 0x12 +#define TPS65910_RTC_COMP_LSB 0x13 +#define TPS65910_RTC_COMP_MSB 0x14 +#define TPS65910_RTC_RES_PROG 0x15 +#define TPS65910_RTC_RESET_STATUS 0x16 +#define TPS65910_BCK1 0x17 +#define TPS65910_BCK2 0x18 +#define TPS65910_BCK3 0x19 +#define TPS65910_BCK4 0x1A +#define TPS65910_BCK5 0x1B +#define TPS65910_PUADEN 0x1C +#define TPS65910_REF 0x1D +#define TPS65910_VRTC 0x1E +#define TPS65910_VIO 0x20 +#define TPS65910_VDD1 0x21 +#define TPS65910_VDD1_OP 0x22 +#define TPS65910_VDD1_SR 0x23 +#define TPS65910_VDD2 0x24 +#define TPS65910_VDD2_OP 0x25 +#define TPS65910_VDD2_SR 0x26 +#define TPS65910_VDD3 0x27 +#define TPS65910_VDIG1 0x30 +#define TPS65910_VDIG2 0x31 +#define TPS65910_VAUX1 0x32 +#define TPS65910_VAUX2 0x33 +#define TPS65910_VAUX33 0x34 +#define TPS65910_VMMC 0x35 +#define TPS65910_VPLL 0x36 +#define TPS65910_VDAC 0x37 +#define TPS65910_THERM 0x38 +#define TPS65910_BBCH 0x39 +#define TPS65910_DCDCCTRL 0x3E +#define TPS65910_DEVCTRL 0x3F +#define TPS65910_DEVCTRL2 0x40 +#define TPS65910_SLEEP_KEEP_LDO_ON 0x41 +#define TPS65910_SLEEP_KEEP_RES_ON 0x42 +#define TPS65910_SLEEP_SET_LDO_OFF 0x43 +#define TPS65910_SLEEP_SET_RES_OFF 0x44 +#define TPS65910_EN1_LDO_ASS 0x45 +#define TPS65910_EN1_SMPS_ASS 0x46 +#define TPS65910_EN2_LDO_ASS 0x47 +#define TPS65910_EN2_SMPS_ASS 0x48 +#define TPS65910_EN3_LDO_ASS 0x49 +#define TPS65910_SPARE 0x4A +#define TPS65910_INT_STS 0x50 +#define TPS65910_INT_MSK 0x51 +#define TPS65910_INT_STS2 0x52 +#define TPS65910_INT_MSK2 0x53 +#define TPS65910_INT_STS3 0x54 +#define TPS65910_INT_MSK3 0x55 +#define TPS65910_GPIO0 0x60 +#define TPS65910_GPIO1 0x61 +#define TPS65910_GPIO2 0x62 +#define TPS65910_GPIO3 0x63 +#define TPS65910_GPIO4 0x64 +#define TPS65910_GPIO5 0x65 +#define TPS65910_GPIO6 0x66 +#define TPS65910_GPIO7 0x67 +#define TPS65910_GPIO8 0x68 +#define TPS65910_JTAGVERNUM 0x80 +#define TPS65910_MAX_REGISTER 0x80 + +/* + * List of registers specific to TPS65911 + */ +#define TPS65911_VDDCTRL 0x27 +#define TPS65911_VDDCTRL_OP 0x28 +#define TPS65911_VDDCTRL_SR 0x29 +#define TPS65911_LDO1 0x30 +#define TPS65911_LDO2 0x31 +#define TPS65911_LDO5 0x32 +#define TPS65911_LDO8 0x33 +#define TPS65911_LDO7 0x34 +#define TPS65911_LDO6 0x35 +#define TPS65911_LDO4 0x36 +#define TPS65911_LDO3 0x37 +#define TPS65911_VMBCH 0x6A +#define TPS65911_VMBCH2 0x6B + +/* + * List of register bitfields for component TPS65910 + * + */ + +/* RTC_CTRL_REG bitfields */ +#define TPS65910_RTC_CTRL_STOP_RTC 0x01 /*0=stop, 1=run */ +#define TPS65910_RTC_CTRL_AUTO_COMP 0x04 +#define TPS65910_RTC_CTRL_GET_TIME 0x40 + +/* RTC_STATUS_REG bitfields */ +#define TPS65910_RTC_STATUS_ALARM 0x40 + +/* RTC_INTERRUPTS_REG bitfields */ +#define TPS65910_RTC_INTERRUPTS_EVERY 0x03 +#define TPS65910_RTC_INTERRUPTS_IT_ALARM 0x08 + +/*Register BCK1 (0x80) register.RegisterDescription */ +#define BCK1_BCKUP_MASK 0xFF +#define BCK1_BCKUP_SHIFT 0 + + +/*Register BCK2 (0x80) register.RegisterDescription */ +#define BCK2_BCKUP_MASK 0xFF +#define BCK2_BCKUP_SHIFT 0 + + +/*Register BCK3 (0x80) register.RegisterDescription */ +#define BCK3_BCKUP_MASK 0xFF +#define BCK3_BCKUP_SHIFT 0 + + +/*Register BCK4 (0x80) register.RegisterDescription */ +#define BCK4_BCKUP_MASK 0xFF +#define BCK4_BCKUP_SHIFT 0 + + +/*Register BCK5 (0x80) register.RegisterDescription */ +#define BCK5_BCKUP_MASK 0xFF +#define BCK5_BCKUP_SHIFT 0 + + +/*Register PUADEN (0x80) register.RegisterDescription */ +#define PUADEN_EN3P_MASK 0x80 +#define PUADEN_EN3P_SHIFT 7 +#define PUADEN_I2CCTLP_MASK 0x40 +#define PUADEN_I2CCTLP_SHIFT 6 +#define PUADEN_I2CSRP_MASK 0x20 +#define PUADEN_I2CSRP_SHIFT 5 +#define PUADEN_PWRONP_MASK 0x10 +#define PUADEN_PWRONP_SHIFT 4 +#define PUADEN_SLEEPP_MASK 0x08 +#define PUADEN_SLEEPP_SHIFT 3 +#define PUADEN_PWRHOLDP_MASK 0x04 +#define PUADEN_PWRHOLDP_SHIFT 2 +#define PUADEN_BOOT1P_MASK 0x02 +#define PUADEN_BOOT1P_SHIFT 1 +#define PUADEN_BOOT0P_MASK 0x01 +#define PUADEN_BOOT0P_SHIFT 0 + + +/*Register REF (0x80) register.RegisterDescription */ +#define REF_VMBCH_SEL_MASK 0x0C +#define REF_VMBCH_SEL_SHIFT 2 +#define REF_ST_MASK 0x03 +#define REF_ST_SHIFT 0 + + +/*Register VRTC (0x80) register.RegisterDescription */ +#define VRTC_VRTC_OFFMASK_MASK 0x08 +#define VRTC_VRTC_OFFMASK_SHIFT 3 +#define VRTC_ST_MASK 0x03 +#define VRTC_ST_SHIFT 0 + + +/*Register VIO (0x80) register.RegisterDescription */ +#define VIO_ILMAX_MASK 0xC0 +#define VIO_ILMAX_SHIFT 6 +#define VIO_SEL_MASK 0x0C +#define VIO_SEL_SHIFT 2 +#define VIO_ST_MASK 0x03 +#define VIO_ST_SHIFT 0 + + +/*Register VDD1 (0x80) register.RegisterDescription */ +#define VDD1_VGAIN_SEL_MASK 0xC0 +#define VDD1_VGAIN_SEL_SHIFT 6 +#define VDD1_ILMAX_MASK 0x20 +#define VDD1_ILMAX_SHIFT 5 +#define VDD1_TSTEP_MASK 0x1C +#define VDD1_TSTEP_SHIFT 2 +#define VDD1_ST_MASK 0x03 +#define VDD1_ST_SHIFT 0 + + +/*Register VDD1_OP (0x80) register.RegisterDescription */ +#define VDD1_OP_CMD_MASK 0x80 +#define VDD1_OP_CMD_SHIFT 7 +#define VDD1_OP_SEL_MASK 0x7F +#define VDD1_OP_SEL_SHIFT 0 + + +/*Register VDD1_SR (0x80) register.RegisterDescription */ +#define VDD1_SR_SEL_MASK 0x7F +#define VDD1_SR_SEL_SHIFT 0 + + +/*Register VDD2 (0x80) register.RegisterDescription */ +#define VDD2_VGAIN_SEL_MASK 0xC0 +#define VDD2_VGAIN_SEL_SHIFT 6 +#define VDD2_ILMAX_MASK 0x20 +#define VDD2_ILMAX_SHIFT 5 +#define VDD2_TSTEP_MASK 0x1C +#define VDD2_TSTEP_SHIFT 2 +#define VDD2_ST_MASK 0x03 +#define VDD2_ST_SHIFT 0 + + +/*Register VDD2_OP (0x80) register.RegisterDescription */ +#define VDD2_OP_CMD_MASK 0x80 +#define VDD2_OP_CMD_SHIFT 7 +#define VDD2_OP_SEL_MASK 0x7F +#define VDD2_OP_SEL_SHIFT 0 + +/*Register VDD2_SR (0x80) register.RegisterDescription */ +#define VDD2_SR_SEL_MASK 0x7F +#define VDD2_SR_SEL_SHIFT 0 + + +/*Registers VDD1, VDD2 voltage values definitions */ +#define VDD1_2_NUM_VOLT_FINE 73 +#define VDD1_2_NUM_VOLT_COARSE 3 +#define VDD1_2_MIN_VOLT 6000 +#define VDD1_2_OFFSET 125 + + +/*Register VDD3 (0x80) register.RegisterDescription */ +#define VDD3_CKINEN_MASK 0x04 +#define VDD3_CKINEN_SHIFT 2 +#define VDD3_ST_MASK 0x03 +#define VDD3_ST_SHIFT 0 +#define VDDCTRL_MIN_VOLT 6000 +#define VDDCTRL_OFFSET 125 + +/*Registers VDIG (0x80) to VDAC register.RegisterDescription */ +#define LDO_SEL_MASK 0x0C +#define LDO_SEL_SHIFT 2 +#define LDO_ST_MASK 0x03 +#define LDO_ST_SHIFT 0 +#define LDO_ST_ON_BIT 0x01 +#define LDO_ST_MODE_BIT 0x02 + + +/* Registers LDO1 to LDO8 in tps65910 */ +#define LDO1_SEL_MASK 0xFC +#define LDO3_SEL_MASK 0x7C +#define LDO_MIN_VOLT 1000 +#define LDO_MAX_VOLT 3300 + + +/*Register VDIG1 (0x80) register.RegisterDescription */ +#define VDIG1_SEL_MASK 0x0C +#define VDIG1_SEL_SHIFT 2 +#define VDIG1_ST_MASK 0x03 +#define VDIG1_ST_SHIFT 0 + + +/*Register VDIG2 (0x80) register.RegisterDescription */ +#define VDIG2_SEL_MASK 0x0C +#define VDIG2_SEL_SHIFT 2 +#define VDIG2_ST_MASK 0x03 +#define VDIG2_ST_SHIFT 0 + + +/*Register VAUX1 (0x80) register.RegisterDescription */ +#define VAUX1_SEL_MASK 0x0C +#define VAUX1_SEL_SHIFT 2 +#define VAUX1_ST_MASK 0x03 +#define VAUX1_ST_SHIFT 0 + + +/*Register VAUX2 (0x80) register.RegisterDescription */ +#define VAUX2_SEL_MASK 0x0C +#define VAUX2_SEL_SHIFT 2 +#define VAUX2_ST_MASK 0x03 +#define VAUX2_ST_SHIFT 0 + + +/*Register VAUX33 (0x80) register.RegisterDescription */ +#define VAUX33_SEL_MASK 0x0C +#define VAUX33_SEL_SHIFT 2 +#define VAUX33_ST_MASK 0x03 +#define VAUX33_ST_SHIFT 0 + + +/*Register VMMC (0x80) register.RegisterDescription */ +#define VMMC_SEL_MASK 0x0C +#define VMMC_SEL_SHIFT 2 +#define VMMC_ST_MASK 0x03 +#define VMMC_ST_SHIFT 0 + + +/*Register VPLL (0x80) register.RegisterDescription */ +#define VPLL_SEL_MASK 0x0C +#define VPLL_SEL_SHIFT 2 +#define VPLL_ST_MASK 0x03 +#define VPLL_ST_SHIFT 0 + + +/*Register VDAC (0x80) register.RegisterDescription */ +#define VDAC_SEL_MASK 0x0C +#define VDAC_SEL_SHIFT 2 +#define VDAC_ST_MASK 0x03 +#define VDAC_ST_SHIFT 0 + + +/*Register THERM (0x80) register.RegisterDescription */ +#define THERM_THERM_HD_MASK 0x20 +#define THERM_THERM_HD_SHIFT 5 +#define THERM_THERM_TS_MASK 0x10 +#define THERM_THERM_TS_SHIFT 4 +#define THERM_THERM_HDSEL_MASK 0x0C +#define THERM_THERM_HDSEL_SHIFT 2 +#define THERM_RSVD1_MASK 0x02 +#define THERM_RSVD1_SHIFT 1 +#define THERM_THERM_STATE_MASK 0x01 +#define THERM_THERM_STATE_SHIFT 0 + + +/*Register BBCH (0x80) register.RegisterDescription */ +#define BBCH_BBSEL_MASK 0x06 +#define BBCH_BBSEL_SHIFT 1 + + +/*Register DCDCCTRL (0x80) register.RegisterDescription */ +#define DCDCCTRL_VDD2_PSKIP_MASK 0x20 +#define DCDCCTRL_VDD2_PSKIP_SHIFT 5 +#define DCDCCTRL_VDD1_PSKIP_MASK 0x10 +#define DCDCCTRL_VDD1_PSKIP_SHIFT 4 +#define DCDCCTRL_VIO_PSKIP_MASK 0x08 +#define DCDCCTRL_VIO_PSKIP_SHIFT 3 +#define DCDCCTRL_DCDCCKEXT_MASK 0x04 +#define DCDCCTRL_DCDCCKEXT_SHIFT 2 +#define DCDCCTRL_DCDCCKSYNC_MASK 0x03 +#define DCDCCTRL_DCDCCKSYNC_SHIFT 0 + + +/*Register DEVCTRL (0x80) register.RegisterDescription */ +#define DEVCTRL_PWR_OFF_MASK 0x80 +#define DEVCTRL_PWR_OFF_SHIFT 7 +#define DEVCTRL_RTC_PWDN_MASK 0x40 +#define DEVCTRL_RTC_PWDN_SHIFT 6 +#define DEVCTRL_CK32K_CTRL_MASK 0x20 +#define DEVCTRL_CK32K_CTRL_SHIFT 5 +#define DEVCTRL_SR_CTL_I2C_SEL_MASK 0x10 +#define DEVCTRL_SR_CTL_I2C_SEL_SHIFT 4 +#define DEVCTRL_DEV_OFF_RST_MASK 0x08 +#define DEVCTRL_DEV_OFF_RST_SHIFT 3 +#define DEVCTRL_DEV_ON_MASK 0x04 +#define DEVCTRL_DEV_ON_SHIFT 2 +#define DEVCTRL_DEV_SLP_MASK 0x02 +#define DEVCTRL_DEV_SLP_SHIFT 1 +#define DEVCTRL_DEV_OFF_MASK 0x01 +#define DEVCTRL_DEV_OFF_SHIFT 0 + + +/*Register DEVCTRL2 (0x80) register.RegisterDescription */ +#define DEVCTRL2_TSLOT_LENGTH_MASK 0x30 +#define DEVCTRL2_TSLOT_LENGTH_SHIFT 4 +#define DEVCTRL2_SLEEPSIG_POL_MASK 0x08 +#define DEVCTRL2_SLEEPSIG_POL_SHIFT 3 +#define DEVCTRL2_PWON_LP_OFF_MASK 0x04 +#define DEVCTRL2_PWON_LP_OFF_SHIFT 2 +#define DEVCTRL2_PWON_LP_RST_MASK 0x02 +#define DEVCTRL2_PWON_LP_RST_SHIFT 1 +#define DEVCTRL2_IT_POL_MASK 0x01 +#define DEVCTRL2_IT_POL_SHIFT 0 + + +/*Register SLEEP_KEEP_LDO_ON (0x80) register.RegisterDescription */ +#define SLEEP_KEEP_LDO_ON_VDAC_KEEPON_MASK 0x80 +#define SLEEP_KEEP_LDO_ON_VDAC_KEEPON_SHIFT 7 +#define SLEEP_KEEP_LDO_ON_VPLL_KEEPON_MASK 0x40 +#define SLEEP_KEEP_LDO_ON_VPLL_KEEPON_SHIFT 6 +#define SLEEP_KEEP_LDO_ON_VAUX33_KEEPON_MASK 0x20 +#define SLEEP_KEEP_LDO_ON_VAUX33_KEEPON_SHIFT 5 +#define SLEEP_KEEP_LDO_ON_VAUX2_KEEPON_MASK 0x10 +#define SLEEP_KEEP_LDO_ON_VAUX2_KEEPON_SHIFT 4 +#define SLEEP_KEEP_LDO_ON_VAUX1_KEEPON_MASK 0x08 +#define SLEEP_KEEP_LDO_ON_VAUX1_KEEPON_SHIFT 3 +#define SLEEP_KEEP_LDO_ON_VDIG2_KEEPON_MASK 0x04 +#define SLEEP_KEEP_LDO_ON_VDIG2_KEEPON_SHIFT 2 +#define SLEEP_KEEP_LDO_ON_VDIG1_KEEPON_MASK 0x02 +#define SLEEP_KEEP_LDO_ON_VDIG1_KEEPON_SHIFT 1 +#define SLEEP_KEEP_LDO_ON_VMMC_KEEPON_MASK 0x01 +#define SLEEP_KEEP_LDO_ON_VMMC_KEEPON_SHIFT 0 + + +/*Register SLEEP_KEEP_RES_ON (0x80) register.RegisterDescription */ +#define SLEEP_KEEP_RES_ON_THERM_KEEPON_MASK 0x80 +#define SLEEP_KEEP_RES_ON_THERM_KEEPON_SHIFT 7 +#define SLEEP_KEEP_RES_ON_CLKOUT32K_KEEPON_MASK 0x40 +#define SLEEP_KEEP_RES_ON_CLKOUT32K_KEEPON_SHIFT 6 +#define SLEEP_KEEP_RES_ON_VRTC_KEEPON_MASK 0x20 +#define SLEEP_KEEP_RES_ON_VRTC_KEEPON_SHIFT 5 +#define SLEEP_KEEP_RES_ON_I2CHS_KEEPON_MASK 0x10 +#define SLEEP_KEEP_RES_ON_I2CHS_KEEPON_SHIFT 4 +#define SLEEP_KEEP_RES_ON_VDD3_KEEPON_MASK 0x08 +#define SLEEP_KEEP_RES_ON_VDD3_KEEPON_SHIFT 3 +#define SLEEP_KEEP_RES_ON_VDD2_KEEPON_MASK 0x04 +#define SLEEP_KEEP_RES_ON_VDD2_KEEPON_SHIFT 2 +#define SLEEP_KEEP_RES_ON_VDD1_KEEPON_MASK 0x02 +#define SLEEP_KEEP_RES_ON_VDD1_KEEPON_SHIFT 1 +#define SLEEP_KEEP_RES_ON_VIO_KEEPON_MASK 0x01 +#define SLEEP_KEEP_RES_ON_VIO_KEEPON_SHIFT 0 + + +/*Register SLEEP_SET_LDO_OFF (0x80) register.RegisterDescription */ +#define SLEEP_SET_LDO_OFF_VDAC_SETOFF_MASK 0x80 +#define SLEEP_SET_LDO_OFF_VDAC_SETOFF_SHIFT 7 +#define SLEEP_SET_LDO_OFF_VPLL_SETOFF_MASK 0x40 +#define SLEEP_SET_LDO_OFF_VPLL_SETOFF_SHIFT 6 +#define SLEEP_SET_LDO_OFF_VAUX33_SETOFF_MASK 0x20 +#define SLEEP_SET_LDO_OFF_VAUX33_SETOFF_SHIFT 5 +#define SLEEP_SET_LDO_OFF_VAUX2_SETOFF_MASK 0x10 +#define SLEEP_SET_LDO_OFF_VAUX2_SETOFF_SHIFT 4 +#define SLEEP_SET_LDO_OFF_VAUX1_SETOFF_MASK 0x08 +#define SLEEP_SET_LDO_OFF_VAUX1_SETOFF_SHIFT 3 +#define SLEEP_SET_LDO_OFF_VDIG2_SETOFF_MASK 0x04 +#define SLEEP_SET_LDO_OFF_VDIG2_SETOFF_SHIFT 2 +#define SLEEP_SET_LDO_OFF_VDIG1_SETOFF_MASK 0x02 +#define SLEEP_SET_LDO_OFF_VDIG1_SETOFF_SHIFT 1 +#define SLEEP_SET_LDO_OFF_VMMC_SETOFF_MASK 0x01 +#define SLEEP_SET_LDO_OFF_VMMC_SETOFF_SHIFT 0 + + +/*Register SLEEP_SET_RES_OFF (0x80) register.RegisterDescription */ +#define SLEEP_SET_RES_OFF_DEFAULT_VOLT_MASK 0x80 +#define SLEEP_SET_RES_OFF_DEFAULT_VOLT_SHIFT 7 +#define SLEEP_SET_RES_OFF_RSVD_MASK 0x60 +#define SLEEP_SET_RES_OFF_RSVD_SHIFT 5 +#define SLEEP_SET_RES_OFF_SPARE_SETOFF_MASK 0x10 +#define SLEEP_SET_RES_OFF_SPARE_SETOFF_SHIFT 4 +#define SLEEP_SET_RES_OFF_VDD3_SETOFF_MASK 0x08 +#define SLEEP_SET_RES_OFF_VDD3_SETOFF_SHIFT 3 +#define SLEEP_SET_RES_OFF_VDD2_SETOFF_MASK 0x04 +#define SLEEP_SET_RES_OFF_VDD2_SETOFF_SHIFT 2 +#define SLEEP_SET_RES_OFF_VDD1_SETOFF_MASK 0x02 +#define SLEEP_SET_RES_OFF_VDD1_SETOFF_SHIFT 1 +#define SLEEP_SET_RES_OFF_VIO_SETOFF_MASK 0x01 +#define SLEEP_SET_RES_OFF_VIO_SETOFF_SHIFT 0 + + +/*Register EN1_LDO_ASS (0x80) register.RegisterDescription */ +#define EN1_LDO_ASS_VDAC_EN1_MASK 0x80 +#define EN1_LDO_ASS_VDAC_EN1_SHIFT 7 +#define EN1_LDO_ASS_VPLL_EN1_MASK 0x40 +#define EN1_LDO_ASS_VPLL_EN1_SHIFT 6 +#define EN1_LDO_ASS_VAUX33_EN1_MASK 0x20 +#define EN1_LDO_ASS_VAUX33_EN1_SHIFT 5 +#define EN1_LDO_ASS_VAUX2_EN1_MASK 0x10 +#define EN1_LDO_ASS_VAUX2_EN1_SHIFT 4 +#define EN1_LDO_ASS_VAUX1_EN1_MASK 0x08 +#define EN1_LDO_ASS_VAUX1_EN1_SHIFT 3 +#define EN1_LDO_ASS_VDIG2_EN1_MASK 0x04 +#define EN1_LDO_ASS_VDIG2_EN1_SHIFT 2 +#define EN1_LDO_ASS_VDIG1_EN1_MASK 0x02 +#define EN1_LDO_ASS_VDIG1_EN1_SHIFT 1 +#define EN1_LDO_ASS_VMMC_EN1_MASK 0x01 +#define EN1_LDO_ASS_VMMC_EN1_SHIFT 0 + + +/*Register EN1_SMPS_ASS (0x80) register.RegisterDescription */ +#define EN1_SMPS_ASS_RSVD_MASK 0xE0 +#define EN1_SMPS_ASS_RSVD_SHIFT 5 +#define EN1_SMPS_ASS_SPARE_EN1_MASK 0x10 +#define EN1_SMPS_ASS_SPARE_EN1_SHIFT 4 +#define EN1_SMPS_ASS_VDD3_EN1_MASK 0x08 +#define EN1_SMPS_ASS_VDD3_EN1_SHIFT 3 +#define EN1_SMPS_ASS_VDD2_EN1_MASK 0x04 +#define EN1_SMPS_ASS_VDD2_EN1_SHIFT 2 +#define EN1_SMPS_ASS_VDD1_EN1_MASK 0x02 +#define EN1_SMPS_ASS_VDD1_EN1_SHIFT 1 +#define EN1_SMPS_ASS_VIO_EN1_MASK 0x01 +#define EN1_SMPS_ASS_VIO_EN1_SHIFT 0 + + +/*Register EN2_LDO_ASS (0x80) register.RegisterDescription */ +#define EN2_LDO_ASS_VDAC_EN2_MASK 0x80 +#define EN2_LDO_ASS_VDAC_EN2_SHIFT 7 +#define EN2_LDO_ASS_VPLL_EN2_MASK 0x40 +#define EN2_LDO_ASS_VPLL_EN2_SHIFT 6 +#define EN2_LDO_ASS_VAUX33_EN2_MASK 0x20 +#define EN2_LDO_ASS_VAUX33_EN2_SHIFT 5 +#define EN2_LDO_ASS_VAUX2_EN2_MASK 0x10 +#define EN2_LDO_ASS_VAUX2_EN2_SHIFT 4 +#define EN2_LDO_ASS_VAUX1_EN2_MASK 0x08 +#define EN2_LDO_ASS_VAUX1_EN2_SHIFT 3 +#define EN2_LDO_ASS_VDIG2_EN2_MASK 0x04 +#define EN2_LDO_ASS_VDIG2_EN2_SHIFT 2 +#define EN2_LDO_ASS_VDIG1_EN2_MASK 0x02 +#define EN2_LDO_ASS_VDIG1_EN2_SHIFT 1 +#define EN2_LDO_ASS_VMMC_EN2_MASK 0x01 +#define EN2_LDO_ASS_VMMC_EN2_SHIFT 0 + + +/*Register EN2_SMPS_ASS (0x80) register.RegisterDescription */ +#define EN2_SMPS_ASS_RSVD_MASK 0xE0 +#define EN2_SMPS_ASS_RSVD_SHIFT 5 +#define EN2_SMPS_ASS_SPARE_EN2_MASK 0x10 +#define EN2_SMPS_ASS_SPARE_EN2_SHIFT 4 +#define EN2_SMPS_ASS_VDD3_EN2_MASK 0x08 +#define EN2_SMPS_ASS_VDD3_EN2_SHIFT 3 +#define EN2_SMPS_ASS_VDD2_EN2_MASK 0x04 +#define EN2_SMPS_ASS_VDD2_EN2_SHIFT 2 +#define EN2_SMPS_ASS_VDD1_EN2_MASK 0x02 +#define EN2_SMPS_ASS_VDD1_EN2_SHIFT 1 +#define EN2_SMPS_ASS_VIO_EN2_MASK 0x01 +#define EN2_SMPS_ASS_VIO_EN2_SHIFT 0 + + +/*Register EN3_LDO_ASS (0x80) register.RegisterDescription */ +#define EN3_LDO_ASS_VDAC_EN3_MASK 0x80 +#define EN3_LDO_ASS_VDAC_EN3_SHIFT 7 +#define EN3_LDO_ASS_VPLL_EN3_MASK 0x40 +#define EN3_LDO_ASS_VPLL_EN3_SHIFT 6 +#define EN3_LDO_ASS_VAUX33_EN3_MASK 0x20 +#define EN3_LDO_ASS_VAUX33_EN3_SHIFT 5 +#define EN3_LDO_ASS_VAUX2_EN3_MASK 0x10 +#define EN3_LDO_ASS_VAUX2_EN3_SHIFT 4 +#define EN3_LDO_ASS_VAUX1_EN3_MASK 0x08 +#define EN3_LDO_ASS_VAUX1_EN3_SHIFT 3 +#define EN3_LDO_ASS_VDIG2_EN3_MASK 0x04 +#define EN3_LDO_ASS_VDIG2_EN3_SHIFT 2 +#define EN3_LDO_ASS_VDIG1_EN3_MASK 0x02 +#define EN3_LDO_ASS_VDIG1_EN3_SHIFT 1 +#define EN3_LDO_ASS_VMMC_EN3_MASK 0x01 +#define EN3_LDO_ASS_VMMC_EN3_SHIFT 0 + + +/*Register SPARE (0x80) register.RegisterDescription */ +#define SPARE_SPARE_MASK 0xFF +#define SPARE_SPARE_SHIFT 0 + +#define TPS65910_INT_STS_RTC_PERIOD_IT_MASK 0x80 +#define TPS65910_INT_STS_RTC_PERIOD_IT_SHIFT 7 +#define TPS65910_INT_STS_RTC_ALARM_IT_MASK 0x40 +#define TPS65910_INT_STS_RTC_ALARM_IT_SHIFT 6 +#define TPS65910_INT_STS_HOTDIE_IT_MASK 0x20 +#define TPS65910_INT_STS_HOTDIE_IT_SHIFT 5 +#define TPS65910_INT_STS_PWRHOLD_F_IT_MASK 0x10 +#define TPS65910_INT_STS_PWRHOLD_F_IT_SHIFT 4 +#define TPS65910_INT_STS_PWRON_LP_IT_MASK 0x08 +#define TPS65910_INT_STS_PWRON_LP_IT_SHIFT 3 +#define TPS65910_INT_STS_PWRON_IT_MASK 0x04 +#define TPS65910_INT_STS_PWRON_IT_SHIFT 2 +#define TPS65910_INT_STS_VMBHI_IT_MASK 0x02 +#define TPS65910_INT_STS_VMBHI_IT_SHIFT 1 +#define TPS65910_INT_STS_VMBDCH_IT_MASK 0x01 +#define TPS65910_INT_STS_VMBDCH_IT_SHIFT 0 + +#define TPS65910_INT_MSK_RTC_PERIOD_IT_MSK_MASK 0x80 +#define TPS65910_INT_MSK_RTC_PERIOD_IT_MSK_SHIFT 7 +#define TPS65910_INT_MSK_RTC_ALARM_IT_MSK_MASK 0x40 +#define TPS65910_INT_MSK_RTC_ALARM_IT_MSK_SHIFT 6 +#define TPS65910_INT_MSK_HOTDIE_IT_MSK_MASK 0x20 +#define TPS65910_INT_MSK_HOTDIE_IT_MSK_SHIFT 5 +#define TPS65910_INT_MSK_PWRHOLD_IT_MSK_MASK 0x10 +#define TPS65910_INT_MSK_PWRHOLD_IT_MSK_SHIFT 4 +#define TPS65910_INT_MSK_PWRON_LP_IT_MSK_MASK 0x08 +#define TPS65910_INT_MSK_PWRON_LP_IT_MSK_SHIFT 3 +#define TPS65910_INT_MSK_PWRON_IT_MSK_MASK 0x04 +#define TPS65910_INT_MSK_PWRON_IT_MSK_SHIFT 2 +#define TPS65910_INT_MSK_VMBHI_IT_MSK_MASK 0x02 +#define TPS65910_INT_MSK_VMBHI_IT_MSK_SHIFT 1 +#define TPS65910_INT_MSK_VMBDCH_IT_MSK_MASK 0x01 +#define TPS65910_INT_MSK_VMBDCH_IT_MSK_SHIFT 0 + +#define TPS65910_INT_STS2_GPIO0_F_IT_SHIFT 2 +#define TPS65910_INT_STS2_GPIO0_F_IT_MASK 0x02 +#define TPS65910_INT_STS2_GPIO0_R_IT_SHIFT 1 +#define TPS65910_INT_STS2_GPIO0_R_IT_MASK 0x01 + +#define TPS65910_INT_MSK2_GPIO0_F_IT_MSK_SHIFT 2 +#define TPS65910_INT_MSK2_GPIO0_F_IT_MSK_MASK 0x02 +#define TPS65910_INT_MSK2_GPIO0_R_IT_MSK_SHIFT 1 +#define TPS65910_INT_MSK2_GPIO0_R_IT_MSK_MASK 0x01 + +/*Register INT_STS (0x80) register.RegisterDescription */ +#define INT_STS_RTC_PERIOD_IT_MASK 0x80 +#define INT_STS_RTC_PERIOD_IT_SHIFT 7 +#define INT_STS_RTC_ALARM_IT_MASK 0x40 +#define INT_STS_RTC_ALARM_IT_SHIFT 6 +#define INT_STS_HOTDIE_IT_MASK 0x20 +#define INT_STS_HOTDIE_IT_SHIFT 5 +#define INT_STS_PWRHOLD_R_IT_MASK 0x10 +#define INT_STS_PWRHOLD_R_IT_SHIFT 4 +#define INT_STS_PWRON_LP_IT_MASK 0x08 +#define INT_STS_PWRON_LP_IT_SHIFT 3 +#define INT_STS_PWRON_IT_MASK 0x04 +#define INT_STS_PWRON_IT_SHIFT 2 +#define INT_STS_VMBHI_IT_MASK 0x02 +#define INT_STS_VMBHI_IT_SHIFT 1 +#define INT_STS_PWRHOLD_F_IT_MASK 0x01 +#define INT_STS_PWRHOLD_F_IT_SHIFT 0 + + +/*Register INT_MSK (0x80) register.RegisterDescription */ +#define INT_MSK_RTC_PERIOD_IT_MSK_MASK 0x80 +#define INT_MSK_RTC_PERIOD_IT_MSK_SHIFT 7 +#define INT_MSK_RTC_ALARM_IT_MSK_MASK 0x40 +#define INT_MSK_RTC_ALARM_IT_MSK_SHIFT 6 +#define INT_MSK_HOTDIE_IT_MSK_MASK 0x20 +#define INT_MSK_HOTDIE_IT_MSK_SHIFT 5 +#define INT_MSK_PWRHOLD_R_IT_MSK_MASK 0x10 +#define INT_MSK_PWRHOLD_R_IT_MSK_SHIFT 4 +#define INT_MSK_PWRON_LP_IT_MSK_MASK 0x08 +#define INT_MSK_PWRON_LP_IT_MSK_SHIFT 3 +#define INT_MSK_PWRON_IT_MSK_MASK 0x04 +#define INT_MSK_PWRON_IT_MSK_SHIFT 2 +#define INT_MSK_VMBHI_IT_MSK_MASK 0x02 +#define INT_MSK_VMBHI_IT_MSK_SHIFT 1 +#define INT_MSK_PWRHOLD_F_IT_MSK_MASK 0x01 +#define INT_MSK_PWRHOLD_F_IT_MSK_SHIFT 0 + + +/*Register INT_STS2 (0x80) register.RegisterDescription */ +#define INT_STS2_GPIO3_F_IT_MASK 0x80 +#define INT_STS2_GPIO3_F_IT_SHIFT 7 +#define INT_STS2_GPIO3_R_IT_MASK 0x40 +#define INT_STS2_GPIO3_R_IT_SHIFT 6 +#define INT_STS2_GPIO2_F_IT_MASK 0x20 +#define INT_STS2_GPIO2_F_IT_SHIFT 5 +#define INT_STS2_GPIO2_R_IT_MASK 0x10 +#define INT_STS2_GPIO2_R_IT_SHIFT 4 +#define INT_STS2_GPIO1_F_IT_MASK 0x08 +#define INT_STS2_GPIO1_F_IT_SHIFT 3 +#define INT_STS2_GPIO1_R_IT_MASK 0x04 +#define INT_STS2_GPIO1_R_IT_SHIFT 2 +#define INT_STS2_GPIO0_F_IT_MASK 0x02 +#define INT_STS2_GPIO0_F_IT_SHIFT 1 +#define INT_STS2_GPIO0_R_IT_MASK 0x01 +#define INT_STS2_GPIO0_R_IT_SHIFT 0 + + +/*Register INT_MSK2 (0x80) register.RegisterDescription */ +#define INT_MSK2_GPIO3_F_IT_MSK_MASK 0x80 +#define INT_MSK2_GPIO3_F_IT_MSK_SHIFT 7 +#define INT_MSK2_GPIO3_R_IT_MSK_MASK 0x40 +#define INT_MSK2_GPIO3_R_IT_MSK_SHIFT 6 +#define INT_MSK2_GPIO2_F_IT_MSK_MASK 0x20 +#define INT_MSK2_GPIO2_F_IT_MSK_SHIFT 5 +#define INT_MSK2_GPIO2_R_IT_MSK_MASK 0x10 +#define INT_MSK2_GPIO2_R_IT_MSK_SHIFT 4 +#define INT_MSK2_GPIO1_F_IT_MSK_MASK 0x08 +#define INT_MSK2_GPIO1_F_IT_MSK_SHIFT 3 +#define INT_MSK2_GPIO1_R_IT_MSK_MASK 0x04 +#define INT_MSK2_GPIO1_R_IT_MSK_SHIFT 2 +#define INT_MSK2_GPIO0_F_IT_MSK_MASK 0x02 +#define INT_MSK2_GPIO0_F_IT_MSK_SHIFT 1 +#define INT_MSK2_GPIO0_R_IT_MSK_MASK 0x01 +#define INT_MSK2_GPIO0_R_IT_MSK_SHIFT 0 + + +/*Register INT_STS3 (0x80) register.RegisterDescription */ +#define INT_STS3_PWRDN_IT_MASK 0x80 +#define INT_STS3_PWRDN_IT_SHIFT 7 +#define INT_STS3_VMBCH2_L_IT_MASK 0x40 +#define INT_STS3_VMBCH2_L_IT_SHIFT 6 +#define INT_STS3_VMBCH2_H_IT_MASK 0x20 +#define INT_STS3_VMBCH2_H_IT_SHIFT 5 +#define INT_STS3_WTCHDG_IT_MASK 0x10 +#define INT_STS3_WTCHDG_IT_SHIFT 4 +#define INT_STS3_GPIO5_F_IT_MASK 0x08 +#define INT_STS3_GPIO5_F_IT_SHIFT 3 +#define INT_STS3_GPIO5_R_IT_MASK 0x04 +#define INT_STS3_GPIO5_R_IT_SHIFT 2 +#define INT_STS3_GPIO4_F_IT_MASK 0x02 +#define INT_STS3_GPIO4_F_IT_SHIFT 1 +#define INT_STS3_GPIO4_R_IT_MASK 0x01 +#define INT_STS3_GPIO4_R_IT_SHIFT 0 + + +/*Register INT_MSK3 (0x80) register.RegisterDescription */ +#define INT_MSK3_PWRDN_IT_MSK_MASK 0x80 +#define INT_MSK3_PWRDN_IT_MSK_SHIFT 7 +#define INT_MSK3_VMBCH2_L_IT_MSK_MASK 0x40 +#define INT_MSK3_VMBCH2_L_IT_MSK_SHIFT 6 +#define INT_MSK3_VMBCH2_H_IT_MSK_MASK 0x20 +#define INT_MSK3_VMBCH2_H_IT_MSK_SHIFT 5 +#define INT_MSK3_WTCHDG_IT_MSK_MASK 0x10 +#define INT_MSK3_WTCHDG_IT_MSK_SHIFT 4 +#define INT_MSK3_GPIO5_F_IT_MSK_MASK 0x08 +#define INT_MSK3_GPIO5_F_IT_MSK_SHIFT 3 +#define INT_MSK3_GPIO5_R_IT_MSK_MASK 0x04 +#define INT_MSK3_GPIO5_R_IT_MSK_SHIFT 2 +#define INT_MSK3_GPIO4_F_IT_MSK_MASK 0x02 +#define INT_MSK3_GPIO4_F_IT_MSK_SHIFT 1 +#define INT_MSK3_GPIO4_R_IT_MSK_MASK 0x01 +#define INT_MSK3_GPIO4_R_IT_MSK_SHIFT 0 + + +/*Register GPIO (0x80) register.RegisterDescription */ +#define GPIO_SLEEP_MASK 0x80 +#define GPIO_SLEEP_SHIFT 7 +#define GPIO_DEB_MASK 0x10 +#define GPIO_DEB_SHIFT 4 +#define GPIO_PUEN_MASK 0x08 +#define GPIO_PUEN_SHIFT 3 +#define GPIO_CFG_MASK 0x04 +#define GPIO_CFG_SHIFT 2 +#define GPIO_STS_MASK 0x02 +#define GPIO_STS_SHIFT 1 +#define GPIO_SET_MASK 0x01 +#define GPIO_SET_SHIFT 0 + + +/*Register JTAGVERNUM (0x80) register.RegisterDescription */ +#define JTAGVERNUM_VERNUM_MASK 0x0F +#define JTAGVERNUM_VERNUM_SHIFT 0 + + +/* Register VDDCTRL (0x27) bit definitions */ +#define VDDCTRL_ST_MASK 0x03 +#define VDDCTRL_ST_SHIFT 0 + + +/*Register VDDCTRL_OP (0x28) bit definitios */ +#define VDDCTRL_OP_CMD_MASK 0x80 +#define VDDCTRL_OP_CMD_SHIFT 7 +#define VDDCTRL_OP_SEL_MASK 0x7F +#define VDDCTRL_OP_SEL_SHIFT 0 + + +/*Register VDDCTRL_SR (0x29) bit definitions */ +#define VDDCTRL_SR_SEL_MASK 0x7F +#define VDDCTRL_SR_SEL_SHIFT 0 + + +/* IRQ Definitions */ +#define TPS65910_IRQ_VBAT_VMBDCH 0 +#define TPS65910_IRQ_VBAT_VMHI 1 +#define TPS65910_IRQ_PWRON 2 +#define TPS65910_IRQ_PWRON_LP 3 +#define TPS65910_IRQ_PWRHOLD 4 +#define TPS65910_IRQ_HOTDIE 5 +#define TPS65910_IRQ_RTC_ALARM 6 +#define TPS65910_IRQ_RTC_PERIOD 7 +#define TPS65910_IRQ_GPIO_R 8 +#define TPS65910_IRQ_GPIO_F 9 +#define TPS65910_NUM_IRQ 10 + +#define TPS65911_IRQ_PWRHOLD_F 0 +#define TPS65911_IRQ_VBAT_VMHI 1 +#define TPS65911_IRQ_PWRON 2 +#define TPS65911_IRQ_PWRON_LP 3 +#define TPS65911_IRQ_PWRHOLD_R 4 +#define TPS65911_IRQ_HOTDIE 5 +#define TPS65911_IRQ_RTC_ALARM 6 +#define TPS65911_IRQ_RTC_PERIOD 7 +#define TPS65911_IRQ_GPIO0_R 8 +#define TPS65911_IRQ_GPIO0_F 9 +#define TPS65911_IRQ_GPIO1_R 10 +#define TPS65911_IRQ_GPIO1_F 11 +#define TPS65911_IRQ_GPIO2_R 12 +#define TPS65911_IRQ_GPIO2_F 13 +#define TPS65911_IRQ_GPIO3_R 14 +#define TPS65911_IRQ_GPIO3_F 15 +#define TPS65911_IRQ_GPIO4_R 16 +#define TPS65911_IRQ_GPIO4_F 17 +#define TPS65911_IRQ_GPIO5_R 18 +#define TPS65911_IRQ_GPIO5_F 19 +#define TPS65911_IRQ_WTCHDG 20 +#define TPS65911_IRQ_VMBCH2_H 21 +#define TPS65911_IRQ_VMBCH2_L 22 +#define TPS65911_IRQ_PWRDN 23 + +#define TPS65911_NUM_IRQ 24 + +/* GPIO Register Definitions */ +#define TPS65910_GPIO_DEB BIT(2) +#define TPS65910_GPIO_PUEN BIT(3) +#define TPS65910_GPIO_CFG BIT(2) +#define TPS65910_GPIO_STS BIT(1) +#define TPS65910_GPIO_SET BIT(0) + +/* Max number of TPS65910/11 GPIOs */ +#define TPS65910_NUM_GPIO 6 +#define TPS65911_NUM_GPIO 9 +#define TPS6591X_MAX_NUM_GPIO 9 + +/* Regulator Index Definitions */ +#define TPS65910_REG_VRTC 0 +#define TPS65910_REG_VIO 1 +#define TPS65910_REG_VDD1 2 +#define TPS65910_REG_VDD2 3 +#define TPS65910_REG_VDD3 4 +#define TPS65910_REG_VDIG1 5 +#define TPS65910_REG_VDIG2 6 +#define TPS65910_REG_VPLL 7 +#define TPS65910_REG_VDAC 8 +#define TPS65910_REG_VAUX1 9 +#define TPS65910_REG_VAUX2 10 +#define TPS65910_REG_VAUX33 11 +#define TPS65910_REG_VMMC 12 +#define TPS65910_REG_VBB 13 + +#define TPS65911_REG_VDDCTRL 4 +#define TPS65911_REG_LDO1 5 +#define TPS65911_REG_LDO2 6 +#define TPS65911_REG_LDO3 7 +#define TPS65911_REG_LDO4 8 +#define TPS65911_REG_LDO5 9 +#define TPS65911_REG_LDO6 10 +#define TPS65911_REG_LDO7 11 +#define TPS65911_REG_LDO8 12 + +/* Max number of TPS65910/11 regulators */ +#define TPS65910_NUM_REGS 14 + +/* External sleep controls through EN1/EN2/EN3/SLEEP inputs */ +#define TPS65910_SLEEP_CONTROL_EXT_INPUT_EN1 0x1 +#define TPS65910_SLEEP_CONTROL_EXT_INPUT_EN2 0x2 +#define TPS65910_SLEEP_CONTROL_EXT_INPUT_EN3 0x4 +#define TPS65911_SLEEP_CONTROL_EXT_INPUT_SLEEP 0x8 + +/* + * Sleep keepon data: Maintains the state in sleep mode + * @therm_keepon: Keep on the thermal monitoring in sleep state. + * @clkout32k_keepon: Keep on the 32KHz clock output in sleep state. + * @i2chs_keepon: Keep on high speed internal clock in sleep state. + */ +struct tps65910_sleep_keepon_data { + unsigned therm_keepon:1; + unsigned clkout32k_keepon:1; + unsigned i2chs_keepon:1; +}; + +/** + * struct tps65910_board + * Board platform data may be used to initialize regulators. + */ + +struct tps65910_board { + int gpio_base; + int irq; + int irq_base; + int vmbch_threshold; + int vmbch2_threshold; + bool en_ck32k_xtal; + bool en_dev_slp; + bool pm_off; + struct tps65910_sleep_keepon_data slp_keepon; + bool en_gpio_sleep[TPS6591X_MAX_NUM_GPIO]; + unsigned long regulator_ext_sleep_control[TPS65910_NUM_REGS]; + struct regulator_init_data *tps65910_pmic_init_data[TPS65910_NUM_REGS]; +}; + +/** + * struct tps65910 - tps65910 sub-driver chip access routines + */ + +struct tps65910 { + struct device *dev; + struct i2c_client *i2c_client; + struct regmap *regmap; + unsigned long id; + + /* Client devices */ + struct tps65910_pmic *pmic; + struct tps65910_rtc *rtc; + struct tps65910_power *power; + + /* Device node parsed board data */ + struct tps65910_board *of_plat_data; + + /* IRQ Handling */ + int chip_irq; + struct regmap_irq_chip_data *irq_data; +}; + +struct tps65910_platform_data { + int irq; + int irq_base; +}; + +static inline int tps65910_chip_id(struct tps65910 *tps65910) +{ + return tps65910->id; +} + +static inline int tps65910_reg_read(struct tps65910 *tps65910, u8 reg, + unsigned int *val) +{ + return regmap_read(tps65910->regmap, reg, val); +} + +static inline int tps65910_reg_write(struct tps65910 *tps65910, u8 reg, + unsigned int val) +{ + return regmap_write(tps65910->regmap, reg, val); +} + +static inline int tps65910_reg_set_bits(struct tps65910 *tps65910, u8 reg, + u8 mask) +{ + return regmap_update_bits(tps65910->regmap, reg, mask, mask); +} + +static inline int tps65910_reg_clear_bits(struct tps65910 *tps65910, u8 reg, + u8 mask) +{ + return regmap_update_bits(tps65910->regmap, reg, mask, 0); +} + +static inline int tps65910_reg_update_bits(struct tps65910 *tps65910, u8 reg, + u8 mask, u8 val) +{ + return regmap_update_bits(tps65910->regmap, reg, mask, val); +} + +static inline int tps65910_irq_get_virq(struct tps65910 *tps65910, int irq) +{ + return regmap_irq_get_virq(tps65910->irq_data, irq); +} + +#endif /* __LINUX_MFD_TPS65910_H */ diff --git a/include/linux/mfd/tps65912.h b/include/linux/mfd/tps65912.h new file mode 100644 index 000000000..b25d0297b --- /dev/null +++ b/include/linux/mfd/tps65912.h @@ -0,0 +1,327 @@ +/* + * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Andrew F. Davis + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether expressed or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License version 2 for more details. + * + * Based on the TPS65218 driver and the previous TPS65912 driver by + * Margarita Olaya Cabrera + */ + +#ifndef __LINUX_MFD_TPS65912_H +#define __LINUX_MFD_TPS65912_H + +#include +#include + +/* List of registers for TPS65912 */ +#define TPS65912_DCDC1_CTRL 0x00 +#define TPS65912_DCDC2_CTRL 0x01 +#define TPS65912_DCDC3_CTRL 0x02 +#define TPS65912_DCDC4_CTRL 0x03 +#define TPS65912_DCDC1_OP 0x04 +#define TPS65912_DCDC1_AVS 0x05 +#define TPS65912_DCDC1_LIMIT 0x06 +#define TPS65912_DCDC2_OP 0x07 +#define TPS65912_DCDC2_AVS 0x08 +#define TPS65912_DCDC2_LIMIT 0x09 +#define TPS65912_DCDC3_OP 0x0A +#define TPS65912_DCDC3_AVS 0x0B +#define TPS65912_DCDC3_LIMIT 0x0C +#define TPS65912_DCDC4_OP 0x0D +#define TPS65912_DCDC4_AVS 0x0E +#define TPS65912_DCDC4_LIMIT 0x0F +#define TPS65912_LDO1_OP 0x10 +#define TPS65912_LDO1_AVS 0x11 +#define TPS65912_LDO1_LIMIT 0x12 +#define TPS65912_LDO2_OP 0x13 +#define TPS65912_LDO2_AVS 0x14 +#define TPS65912_LDO2_LIMIT 0x15 +#define TPS65912_LDO3_OP 0x16 +#define TPS65912_LDO3_AVS 0x17 +#define TPS65912_LDO3_LIMIT 0x18 +#define TPS65912_LDO4_OP 0x19 +#define TPS65912_LDO4_AVS 0x1A +#define TPS65912_LDO4_LIMIT 0x1B +#define TPS65912_LDO5 0x1C +#define TPS65912_LDO6 0x1D +#define TPS65912_LDO7 0x1E +#define TPS65912_LDO8 0x1F +#define TPS65912_LDO9 0x20 +#define TPS65912_LDO10 0x21 +#define TPS65912_THRM 0x22 +#define TPS65912_CLK32OUT 0x23 +#define TPS65912_DEVCTRL 0x24 +#define TPS65912_DEVCTRL2 0x25 +#define TPS65912_I2C_SPI_CFG 0x26 +#define TPS65912_KEEP_ON 0x27 +#define TPS65912_KEEP_ON2 0x28 +#define TPS65912_SET_OFF1 0x29 +#define TPS65912_SET_OFF2 0x2A +#define TPS65912_DEF_VOLT 0x2B +#define TPS65912_DEF_VOLT_MAPPING 0x2C +#define TPS65912_DISCHARGE 0x2D +#define TPS65912_DISCHARGE2 0x2E +#define TPS65912_EN1_SET1 0x2F +#define TPS65912_EN1_SET2 0x30 +#define TPS65912_EN2_SET1 0x31 +#define TPS65912_EN2_SET2 0x32 +#define TPS65912_EN3_SET1 0x33 +#define TPS65912_EN3_SET2 0x34 +#define TPS65912_EN4_SET1 0x35 +#define TPS65912_EN4_SET2 0x36 +#define TPS65912_PGOOD 0x37 +#define TPS65912_PGOOD2 0x38 +#define TPS65912_INT_STS 0x39 +#define TPS65912_INT_MSK 0x3A +#define TPS65912_INT_STS2 0x3B +#define TPS65912_INT_MSK2 0x3C +#define TPS65912_INT_STS3 0x3D +#define TPS65912_INT_MSK3 0x3E +#define TPS65912_INT_STS4 0x3F +#define TPS65912_INT_MSK4 0x40 +#define TPS65912_GPIO1 0x41 +#define TPS65912_GPIO2 0x42 +#define TPS65912_GPIO3 0x43 +#define TPS65912_GPIO4 0x44 +#define TPS65912_GPIO5 0x45 +#define TPS65912_VMON 0x46 +#define TPS65912_LEDA_CTRL1 0x47 +#define TPS65912_LEDA_CTRL2 0x48 +#define TPS65912_LEDA_CTRL3 0x49 +#define TPS65912_LEDA_CTRL4 0x4A +#define TPS65912_LEDA_CTRL5 0x4B +#define TPS65912_LEDA_CTRL6 0x4C +#define TPS65912_LEDA_CTRL7 0x4D +#define TPS65912_LEDA_CTRL8 0x4E +#define TPS65912_LEDB_CTRL1 0x4F +#define TPS65912_LEDB_CTRL2 0x50 +#define TPS65912_LEDB_CTRL3 0x51 +#define TPS65912_LEDB_CTRL4 0x52 +#define TPS65912_LEDB_CTRL5 0x53 +#define TPS65912_LEDB_CTRL6 0x54 +#define TPS65912_LEDB_CTRL7 0x55 +#define TPS65912_LEDB_CTRL8 0x56 +#define TPS65912_LEDC_CTRL1 0x57 +#define TPS65912_LEDC_CTRL2 0x58 +#define TPS65912_LEDC_CTRL3 0x59 +#define TPS65912_LEDC_CTRL4 0x5A +#define TPS65912_LEDC_CTRL5 0x5B +#define TPS65912_LEDC_CTRL6 0x5C +#define TPS65912_LEDC_CTRL7 0x5D +#define TPS65912_LEDC_CTRL8 0x5E +#define TPS65912_LED_RAMP_UP_TIME 0x5F +#define TPS65912_LED_RAMP_DOWN_TIME 0x60 +#define TPS65912_LED_SEQ_EN 0x61 +#define TPS65912_LOADSWITCH 0x62 +#define TPS65912_SPARE 0x63 +#define TPS65912_VERNUM 0x64 +#define TPS6591X_MAX_REGISTER 0x64 + +/* INT_STS Register field definitions */ +#define TPS65912_INT_STS_PWRHOLD_F BIT(0) +#define TPS65912_INT_STS_VMON BIT(1) +#define TPS65912_INT_STS_PWRON BIT(2) +#define TPS65912_INT_STS_PWRON_LP BIT(3) +#define TPS65912_INT_STS_PWRHOLD_R BIT(4) +#define TPS65912_INT_STS_HOTDIE BIT(5) +#define TPS65912_INT_STS_GPIO1_R BIT(6) +#define TPS65912_INT_STS_GPIO1_F BIT(7) + +/* INT_STS Register field definitions */ +#define TPS65912_INT_STS2_GPIO2_R BIT(0) +#define TPS65912_INT_STS2_GPIO2_F BIT(1) +#define TPS65912_INT_STS2_GPIO3_R BIT(2) +#define TPS65912_INT_STS2_GPIO3_F BIT(3) +#define TPS65912_INT_STS2_GPIO4_R BIT(4) +#define TPS65912_INT_STS2_GPIO4_F BIT(5) +#define TPS65912_INT_STS2_GPIO5_R BIT(6) +#define TPS65912_INT_STS2_GPIO5_F BIT(7) + +/* INT_STS Register field definitions */ +#define TPS65912_INT_STS3_PGOOD_DCDC1 BIT(0) +#define TPS65912_INT_STS3_PGOOD_DCDC2 BIT(1) +#define TPS65912_INT_STS3_PGOOD_DCDC3 BIT(2) +#define TPS65912_INT_STS3_PGOOD_DCDC4 BIT(3) +#define TPS65912_INT_STS3_PGOOD_LDO1 BIT(4) +#define TPS65912_INT_STS3_PGOOD_LDO2 BIT(5) +#define TPS65912_INT_STS3_PGOOD_LDO3 BIT(6) +#define TPS65912_INT_STS3_PGOOD_LDO4 BIT(7) + +/* INT_STS Register field definitions */ +#define TPS65912_INT_STS4_PGOOD_LDO5 BIT(0) +#define TPS65912_INT_STS4_PGOOD_LDO6 BIT(1) +#define TPS65912_INT_STS4_PGOOD_LDO7 BIT(2) +#define TPS65912_INT_STS4_PGOOD_LDO8 BIT(3) +#define TPS65912_INT_STS4_PGOOD_LDO9 BIT(4) +#define TPS65912_INT_STS4_PGOOD_LDO10 BIT(5) + +/* GPIO 1 and 2 Register field definitions */ +#define GPIO_SLEEP_MASK 0x80 +#define GPIO_SLEEP_SHIFT 7 +#define GPIO_DEB_MASK 0x10 +#define GPIO_DEB_SHIFT 4 +#define GPIO_CFG_MASK 0x04 +#define GPIO_CFG_SHIFT 2 +#define GPIO_STS_MASK 0x02 +#define GPIO_STS_SHIFT 1 +#define GPIO_SET_MASK 0x01 +#define GPIO_SET_SHIFT 0 + +/* GPIO 3 Register field definitions */ +#define GPIO3_SLEEP_MASK 0x80 +#define GPIO3_SLEEP_SHIFT 7 +#define GPIO3_SEL_MASK 0x40 +#define GPIO3_SEL_SHIFT 6 +#define GPIO3_ODEN_MASK 0x20 +#define GPIO3_ODEN_SHIFT 5 +#define GPIO3_DEB_MASK 0x10 +#define GPIO3_DEB_SHIFT 4 +#define GPIO3_PDEN_MASK 0x08 +#define GPIO3_PDEN_SHIFT 3 +#define GPIO3_CFG_MASK 0x04 +#define GPIO3_CFG_SHIFT 2 +#define GPIO3_STS_MASK 0x02 +#define GPIO3_STS_SHIFT 1 +#define GPIO3_SET_MASK 0x01 +#define GPIO3_SET_SHIFT 0 + +/* GPIO 4 Register field definitions */ +#define GPIO4_SLEEP_MASK 0x80 +#define GPIO4_SLEEP_SHIFT 7 +#define GPIO4_SEL_MASK 0x40 +#define GPIO4_SEL_SHIFT 6 +#define GPIO4_ODEN_MASK 0x20 +#define GPIO4_ODEN_SHIFT 5 +#define GPIO4_DEB_MASK 0x10 +#define GPIO4_DEB_SHIFT 4 +#define GPIO4_PDEN_MASK 0x08 +#define GPIO4_PDEN_SHIFT 3 +#define GPIO4_CFG_MASK 0x04 +#define GPIO4_CFG_SHIFT 2 +#define GPIO4_STS_MASK 0x02 +#define GPIO4_STS_SHIFT 1 +#define GPIO4_SET_MASK 0x01 +#define GPIO4_SET_SHIFT 0 + +/* Register THERM (0x80) register.RegisterDescription */ +#define THERM_THERM_HD_MASK 0x20 +#define THERM_THERM_HD_SHIFT 5 +#define THERM_THERM_TS_MASK 0x10 +#define THERM_THERM_TS_SHIFT 4 +#define THERM_THERM_HDSEL_MASK 0x0C +#define THERM_THERM_HDSEL_SHIFT 2 +#define THERM_RSVD1_MASK 0x02 +#define THERM_RSVD1_SHIFT 1 +#define THERM_THERM_STATE_MASK 0x01 +#define THERM_THERM_STATE_SHIFT 0 + +/* Register DCDCCTRL1 register.RegisterDescription */ +#define DCDCCTRL_VCON_ENABLE_MASK 0x80 +#define DCDCCTRL_VCON_ENABLE_SHIFT 7 +#define DCDCCTRL_VCON_RANGE1_MASK 0x40 +#define DCDCCTRL_VCON_RANGE1_SHIFT 6 +#define DCDCCTRL_VCON_RANGE0_MASK 0x20 +#define DCDCCTRL_VCON_RANGE0_SHIFT 5 +#define DCDCCTRL_TSTEP2_MASK 0x10 +#define DCDCCTRL_TSTEP2_SHIFT 4 +#define DCDCCTRL_TSTEP1_MASK 0x08 +#define DCDCCTRL_TSTEP1_SHIFT 3 +#define DCDCCTRL_TSTEP0_MASK 0x04 +#define DCDCCTRL_TSTEP0_SHIFT 2 +#define DCDCCTRL_DCDC1_MODE_MASK 0x02 +#define DCDCCTRL_DCDC1_MODE_SHIFT 1 + +/* Register DCDCCTRL2 and DCDCCTRL3 register.RegisterDescription */ +#define DCDCCTRL_TSTEP2_MASK 0x10 +#define DCDCCTRL_TSTEP2_SHIFT 4 +#define DCDCCTRL_TSTEP1_MASK 0x08 +#define DCDCCTRL_TSTEP1_SHIFT 3 +#define DCDCCTRL_TSTEP0_MASK 0x04 +#define DCDCCTRL_TSTEP0_SHIFT 2 +#define DCDCCTRL_DCDC_MODE_MASK 0x02 +#define DCDCCTRL_DCDC_MODE_SHIFT 1 +#define DCDCCTRL_RSVD0_MASK 0x01 +#define DCDCCTRL_RSVD0_SHIFT 0 + +/* Register DCDCCTRL4 register.RegisterDescription */ +#define DCDCCTRL_RAMP_TIME_MASK 0x01 +#define DCDCCTRL_RAMP_TIME_SHIFT 0 + +/* Register DCDCx_AVS */ +#define DCDC_AVS_ENABLE_MASK 0x80 +#define DCDC_AVS_ENABLE_SHIFT 7 +#define DCDC_AVS_ECO_MASK 0x40 +#define DCDC_AVS_ECO_SHIFT 6 + +/* Register DCDCx_LIMIT */ +#define DCDC_LIMIT_RANGE_MASK 0xC0 +#define DCDC_LIMIT_RANGE_SHIFT 6 +#define DCDC_LIMIT_MAX_SEL_MASK 0x3F +#define DCDC_LIMIT_MAX_SEL_SHIFT 0 + +/* Define the TPS65912 IRQ numbers */ +enum tps65912_irqs { + /* INT_STS registers */ + TPS65912_IRQ_PWRHOLD_F, + TPS65912_IRQ_VMON, + TPS65912_IRQ_PWRON, + TPS65912_IRQ_PWRON_LP, + TPS65912_IRQ_PWRHOLD_R, + TPS65912_IRQ_HOTDIE, + TPS65912_IRQ_GPIO1_R, + TPS65912_IRQ_GPIO1_F, + /* INT_STS2 registers */ + TPS65912_IRQ_GPIO2_R, + TPS65912_IRQ_GPIO2_F, + TPS65912_IRQ_GPIO3_R, + TPS65912_IRQ_GPIO3_F, + TPS65912_IRQ_GPIO4_R, + TPS65912_IRQ_GPIO4_F, + TPS65912_IRQ_GPIO5_R, + TPS65912_IRQ_GPIO5_F, + /* INT_STS3 registers */ + TPS65912_IRQ_PGOOD_DCDC1, + TPS65912_IRQ_PGOOD_DCDC2, + TPS65912_IRQ_PGOOD_DCDC3, + TPS65912_IRQ_PGOOD_DCDC4, + TPS65912_IRQ_PGOOD_LDO1, + TPS65912_IRQ_PGOOD_LDO2, + TPS65912_IRQ_PGOOD_LDO3, + TPS65912_IRQ_PGOOD_LDO4, + /* INT_STS4 registers */ + TPS65912_IRQ_PGOOD_LDO5, + TPS65912_IRQ_PGOOD_LDO6, + TPS65912_IRQ_PGOOD_LDO7, + TPS65912_IRQ_PGOOD_LDO8, + TPS65912_IRQ_PGOOD_LDO9, + TPS65912_IRQ_PGOOD_LDO10, +}; + +/* + * struct tps65912 - state holder for the tps65912 driver + * + * Device data may be used to access the TPS65912 chip + */ +struct tps65912 { + struct device *dev; + struct regmap *regmap; + + /* IRQ Data */ + int irq; + struct regmap_irq_chip_data *irq_data; +}; + +extern const struct regmap_config tps65912_regmap_config; + +int tps65912_device_init(struct tps65912 *tps); +int tps65912_device_exit(struct tps65912 *tps); + +#endif /* __LINUX_MFD_TPS65912_H */ diff --git a/include/linux/mfd/tps68470.h b/include/linux/mfd/tps68470.h new file mode 100644 index 000000000..ffe81127d --- /dev/null +++ b/include/linux/mfd/tps68470.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2017 Intel Corporation */ +/* Functions to access TPS68470 power management chip. */ + +#ifndef __LINUX_MFD_TPS68470_H +#define __LINUX_MFD_TPS68470_H + +/* Register addresses */ +#define TPS68470_REG_POSTDIV2 0x06 +#define TPS68470_REG_BOOSTDIV 0x07 +#define TPS68470_REG_BUCKDIV 0x08 +#define TPS68470_REG_PLLSWR 0x09 +#define TPS68470_REG_XTALDIV 0x0A +#define TPS68470_REG_PLLDIV 0x0B +#define TPS68470_REG_POSTDIV 0x0C +#define TPS68470_REG_PLLCTL 0x0D +#define TPS68470_REG_PLLCTL2 0x0E +#define TPS68470_REG_CLKCFG1 0x0F +#define TPS68470_REG_CLKCFG2 0x10 +#define TPS68470_REG_GPCTL0A 0x14 +#define TPS68470_REG_GPCTL0B 0x15 +#define TPS68470_REG_GPCTL1A 0x16 +#define TPS68470_REG_GPCTL1B 0x17 +#define TPS68470_REG_GPCTL2A 0x18 +#define TPS68470_REG_GPCTL2B 0x19 +#define TPS68470_REG_GPCTL3A 0x1A +#define TPS68470_REG_GPCTL3B 0x1B +#define TPS68470_REG_GPCTL4A 0x1C +#define TPS68470_REG_GPCTL4B 0x1D +#define TPS68470_REG_GPCTL5A 0x1E +#define TPS68470_REG_GPCTL5B 0x1F +#define TPS68470_REG_GPCTL6A 0x20 +#define TPS68470_REG_GPCTL6B 0x21 +#define TPS68470_REG_SGPO 0x22 +#define TPS68470_REG_GPDI 0x26 +#define TPS68470_REG_GPDO 0x27 +#define TPS68470_REG_VCMVAL 0x3C +#define TPS68470_REG_VAUX1VAL 0x3D +#define TPS68470_REG_VAUX2VAL 0x3E +#define TPS68470_REG_VIOVAL 0x3F +#define TPS68470_REG_VSIOVAL 0x40 +#define TPS68470_REG_VAVAL 0x41 +#define TPS68470_REG_VDVAL 0x42 +#define TPS68470_REG_S_I2C_CTL 0x43 +#define TPS68470_REG_VCMCTL 0x44 +#define TPS68470_REG_VAUX1CTL 0x45 +#define TPS68470_REG_VAUX2CTL 0x46 +#define TPS68470_REG_VACTL 0x47 +#define TPS68470_REG_VDCTL 0x48 +#define TPS68470_REG_RESET 0x50 +#define TPS68470_REG_REVID 0xFF + +#define TPS68470_REG_MAX TPS68470_REG_REVID + +/* Register field definitions */ + +#define TPS68470_REG_RESET_MASK GENMASK(7, 0) +#define TPS68470_VAVAL_AVOLT_MASK GENMASK(6, 0) + +#define TPS68470_VDVAL_DVOLT_MASK GENMASK(5, 0) +#define TPS68470_VCMVAL_VCVOLT_MASK GENMASK(6, 0) +#define TPS68470_VIOVAL_IOVOLT_MASK GENMASK(6, 0) +#define TPS68470_VSIOVAL_IOVOLT_MASK GENMASK(6, 0) +#define TPS68470_VAUX1VAL_AUX1VOLT_MASK GENMASK(6, 0) +#define TPS68470_VAUX2VAL_AUX2VOLT_MASK GENMASK(6, 0) + +#define TPS68470_VACTL_EN_MASK GENMASK(0, 0) +#define TPS68470_VDCTL_EN_MASK GENMASK(0, 0) +#define TPS68470_VCMCTL_EN_MASK GENMASK(0, 0) +#define TPS68470_S_I2C_CTL_EN_MASK GENMASK(1, 0) +#define TPS68470_VAUX1CTL_EN_MASK GENMASK(0, 0) +#define TPS68470_VAUX2CTL_EN_MASK GENMASK(0, 0) +#define TPS68470_PLL_EN_MASK GENMASK(0, 0) + +#define TPS68470_CLKCFG1_MODE_A_MASK GENMASK(1, 0) +#define TPS68470_CLKCFG1_MODE_B_MASK GENMASK(3, 2) + +#define TPS68470_GPIO_CTL_REG_A(x) (TPS68470_REG_GPCTL0A + (x) * 2) +#define TPS68470_GPIO_CTL_REG_B(x) (TPS68470_REG_GPCTL0B + (x) * 2) +#define TPS68470_GPIO_MODE_MASK GENMASK(1, 0) +#define TPS68470_GPIO_MODE_IN 0 +#define TPS68470_GPIO_MODE_IN_PULLUP 1 +#define TPS68470_GPIO_MODE_OUT_CMOS 2 +#define TPS68470_GPIO_MODE_OUT_ODRAIN 3 + +#endif /* __LINUX_MFD_TPS68470_H */ diff --git a/include/linux/mfd/tps80031.h b/include/linux/mfd/tps80031.h new file mode 100644 index 000000000..2c75c9c93 --- /dev/null +++ b/include/linux/mfd/tps80031.h @@ -0,0 +1,637 @@ +/* + * tps80031.h -- TI TPS80031 and TI TPS80032 PMIC driver. + * + * Copyright (c) 2012, NVIDIA Corporation. + * + * Author: Laxman Dewangan + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind, + * whether express or implied; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + * 02111-1307, USA + */ + +#ifndef __LINUX_MFD_TPS80031_H +#define __LINUX_MFD_TPS80031_H + +#include +#include + +/* Pull-ups/Pull-downs */ +#define TPS80031_CFG_INPUT_PUPD1 0xF0 +#define TPS80031_CFG_INPUT_PUPD2 0xF1 +#define TPS80031_CFG_INPUT_PUPD3 0xF2 +#define TPS80031_CFG_INPUT_PUPD4 0xF3 +#define TPS80031_CFG_LDO_PD1 0xF4 +#define TPS80031_CFG_LDO_PD2 0xF5 +#define TPS80031_CFG_SMPS_PD 0xF6 + +/* Real Time Clock */ +#define TPS80031_SECONDS_REG 0x00 +#define TPS80031_MINUTES_REG 0x01 +#define TPS80031_HOURS_REG 0x02 +#define TPS80031_DAYS_REG 0x03 +#define TPS80031_MONTHS_REG 0x04 +#define TPS80031_YEARS_REG 0x05 +#define TPS80031_WEEKS_REG 0x06 +#define TPS80031_ALARM_SECONDS_REG 0x08 +#define TPS80031_ALARM_MINUTES_REG 0x09 +#define TPS80031_ALARM_HOURS_REG 0x0A +#define TPS80031_ALARM_DAYS_REG 0x0B +#define TPS80031_ALARM_MONTHS_REG 0x0C +#define TPS80031_ALARM_YEARS_REG 0x0D +#define TPS80031_RTC_CTRL_REG 0x10 +#define TPS80031_RTC_STATUS_REG 0x11 +#define TPS80031_RTC_INTERRUPTS_REG 0x12 +#define TPS80031_RTC_COMP_LSB_REG 0x13 +#define TPS80031_RTC_COMP_MSB_REG 0x14 +#define TPS80031_RTC_RESET_STATUS_REG 0x16 + +/*PMC Master Module */ +#define TPS80031_PHOENIX_START_CONDITION 0x1F +#define TPS80031_PHOENIX_MSK_TRANSITION 0x20 +#define TPS80031_STS_HW_CONDITIONS 0x21 +#define TPS80031_PHOENIX_LAST_TURNOFF_STS 0x22 +#define TPS80031_VSYSMIN_LO_THRESHOLD 0x23 +#define TPS80031_VSYSMIN_HI_THRESHOLD 0x24 +#define TPS80031_PHOENIX_DEV_ON 0x25 +#define TPS80031_STS_PWR_GRP_STATE 0x27 +#define TPS80031_PH_CFG_VSYSLOW 0x28 +#define TPS80031_PH_STS_BOOT 0x29 +#define TPS80031_PHOENIX_SENS_TRANSITION 0x2A +#define TPS80031_PHOENIX_SEQ_CFG 0x2B +#define TPS80031_PRIMARY_WATCHDOG_CFG 0X2C +#define TPS80031_KEY_PRESS_DUR_CFG 0X2D +#define TPS80031_SMPS_LDO_SHORT_STS 0x2E + +/* PMC Slave Module - Broadcast */ +#define TPS80031_BROADCAST_ADDR_ALL 0x31 +#define TPS80031_BROADCAST_ADDR_REF 0x32 +#define TPS80031_BROADCAST_ADDR_PROV 0x33 +#define TPS80031_BROADCAST_ADDR_CLK_RST 0x34 + +/* PMC Slave Module SMPS Regulators */ +#define TPS80031_SMPS4_CFG_TRANS 0x41 +#define TPS80031_SMPS4_CFG_STATE 0x42 +#define TPS80031_SMPS4_CFG_VOLTAGE 0x44 +#define TPS80031_VIO_CFG_TRANS 0x47 +#define TPS80031_VIO_CFG_STATE 0x48 +#define TPS80031_VIO_CFG_FORCE 0x49 +#define TPS80031_VIO_CFG_VOLTAGE 0x4A +#define TPS80031_VIO_CFG_STEP 0x48 +#define TPS80031_SMPS1_CFG_TRANS 0x53 +#define TPS80031_SMPS1_CFG_STATE 0x54 +#define TPS80031_SMPS1_CFG_FORCE 0x55 +#define TPS80031_SMPS1_CFG_VOLTAGE 0x56 +#define TPS80031_SMPS1_CFG_STEP 0x57 +#define TPS80031_SMPS2_CFG_TRANS 0x59 +#define TPS80031_SMPS2_CFG_STATE 0x5A +#define TPS80031_SMPS2_CFG_FORCE 0x5B +#define TPS80031_SMPS2_CFG_VOLTAGE 0x5C +#define TPS80031_SMPS2_CFG_STEP 0x5D +#define TPS80031_SMPS3_CFG_TRANS 0x65 +#define TPS80031_SMPS3_CFG_STATE 0x66 +#define TPS80031_SMPS3_CFG_VOLTAGE 0x68 + +/* PMC Slave Module LDO Regulators */ +#define TPS80031_VANA_CFG_TRANS 0x81 +#define TPS80031_VANA_CFG_STATE 0x82 +#define TPS80031_VANA_CFG_VOLTAGE 0x83 +#define TPS80031_LDO2_CFG_TRANS 0x85 +#define TPS80031_LDO2_CFG_STATE 0x86 +#define TPS80031_LDO2_CFG_VOLTAGE 0x87 +#define TPS80031_LDO4_CFG_TRANS 0x89 +#define TPS80031_LDO4_CFG_STATE 0x8A +#define TPS80031_LDO4_CFG_VOLTAGE 0x8B +#define TPS80031_LDO3_CFG_TRANS 0x8D +#define TPS80031_LDO3_CFG_STATE 0x8E +#define TPS80031_LDO3_CFG_VOLTAGE 0x8F +#define TPS80031_LDO6_CFG_TRANS 0x91 +#define TPS80031_LDO6_CFG_STATE 0x92 +#define TPS80031_LDO6_CFG_VOLTAGE 0x93 +#define TPS80031_LDOLN_CFG_TRANS 0x95 +#define TPS80031_LDOLN_CFG_STATE 0x96 +#define TPS80031_LDOLN_CFG_VOLTAGE 0x97 +#define TPS80031_LDO5_CFG_TRANS 0x99 +#define TPS80031_LDO5_CFG_STATE 0x9A +#define TPS80031_LDO5_CFG_VOLTAGE 0x9B +#define TPS80031_LDO1_CFG_TRANS 0x9D +#define TPS80031_LDO1_CFG_STATE 0x9E +#define TPS80031_LDO1_CFG_VOLTAGE 0x9F +#define TPS80031_LDOUSB_CFG_TRANS 0xA1 +#define TPS80031_LDOUSB_CFG_STATE 0xA2 +#define TPS80031_LDOUSB_CFG_VOLTAGE 0xA3 +#define TPS80031_LDO7_CFG_TRANS 0xA5 +#define TPS80031_LDO7_CFG_STATE 0xA6 +#define TPS80031_LDO7_CFG_VOLTAGE 0xA7 + +/* PMC Slave Module External Control */ +#define TPS80031_REGEN1_CFG_TRANS 0xAE +#define TPS80031_REGEN1_CFG_STATE 0xAF +#define TPS80031_REGEN2_CFG_TRANS 0xB1 +#define TPS80031_REGEN2_CFG_STATE 0xB2 +#define TPS80031_SYSEN_CFG_TRANS 0xB4 +#define TPS80031_SYSEN_CFG_STATE 0xB5 + +/* PMC Slave Module Internal Control */ +#define TPS80031_NRESPWRON_CFG_TRANS 0xB7 +#define TPS80031_NRESPWRON_CFG_STATE 0xB8 +#define TPS80031_CLK32KAO_CFG_TRANS 0xBA +#define TPS80031_CLK32KAO_CFG_STATE 0xBB +#define TPS80031_CLK32KG_CFG_TRANS 0xBD +#define TPS80031_CLK32KG_CFG_STATE 0xBE +#define TPS80031_CLK32KAUDIO_CFG_TRANS 0xC0 +#define TPS80031_CLK32KAUDIO_CFG_STATE 0xC1 +#define TPS80031_VRTC_CFG_TRANS 0xC3 +#define TPS80031_VRTC_CFG_STATE 0xC4 +#define TPS80031_BIAS_CFG_TRANS 0xC6 +#define TPS80031_BIAS_CFG_STATE 0xC7 +#define TPS80031_VSYSMIN_HI_CFG_TRANS 0xC9 +#define TPS80031_VSYSMIN_HI_CFG_STATE 0xCA +#define TPS80031_RC6MHZ_CFG_TRANS 0xCC +#define TPS80031_RC6MHZ_CFG_STATE 0xCD +#define TPS80031_TMP_CFG_TRANS 0xCF +#define TPS80031_TMP_CFG_STATE 0xD0 + +/* PMC Slave Module resources assignment */ +#define TPS80031_PREQ1_RES_ASS_A 0xD7 +#define TPS80031_PREQ1_RES_ASS_B 0xD8 +#define TPS80031_PREQ1_RES_ASS_C 0xD9 +#define TPS80031_PREQ2_RES_ASS_A 0xDA +#define TPS80031_PREQ2_RES_ASS_B 0xDB +#define TPS80031_PREQ2_RES_ASS_C 0xDC +#define TPS80031_PREQ3_RES_ASS_A 0xDD +#define TPS80031_PREQ3_RES_ASS_B 0xDE +#define TPS80031_PREQ3_RES_ASS_C 0xDF + +/* PMC Slave Module Miscellaneous */ +#define TPS80031_SMPS_OFFSET 0xE0 +#define TPS80031_SMPS_MULT 0xE3 +#define TPS80031_MISC1 0xE4 +#define TPS80031_MISC2 0xE5 +#define TPS80031_BBSPOR_CFG 0xE6 +#define TPS80031_TMP_CFG 0xE7 + +/* Battery Charging Controller and Indicator LED */ +#define TPS80031_CONTROLLER_CTRL2 0xDA +#define TPS80031_CONTROLLER_VSEL_COMP 0xDB +#define TPS80031_CHARGERUSB_VSYSREG 0xDC +#define TPS80031_CHARGERUSB_VICHRG_PC 0xDD +#define TPS80031_LINEAR_CHRG_STS 0xDE +#define TPS80031_CONTROLLER_INT_MASK 0xE0 +#define TPS80031_CONTROLLER_CTRL1 0xE1 +#define TPS80031_CONTROLLER_WDG 0xE2 +#define TPS80031_CONTROLLER_STAT1 0xE3 +#define TPS80031_CHARGERUSB_INT_STATUS 0xE4 +#define TPS80031_CHARGERUSB_INT_MASK 0xE5 +#define TPS80031_CHARGERUSB_STATUS_INT1 0xE6 +#define TPS80031_CHARGERUSB_STATUS_INT2 0xE7 +#define TPS80031_CHARGERUSB_CTRL1 0xE8 +#define TPS80031_CHARGERUSB_CTRL2 0xE9 +#define TPS80031_CHARGERUSB_CTRL3 0xEA +#define TPS80031_CHARGERUSB_STAT1 0xEB +#define TPS80031_CHARGERUSB_VOREG 0xEC +#define TPS80031_CHARGERUSB_VICHRG 0xED +#define TPS80031_CHARGERUSB_CINLIMIT 0xEE +#define TPS80031_CHARGERUSB_CTRLLIMIT1 0xEF +#define TPS80031_CHARGERUSB_CTRLLIMIT2 0xF0 +#define TPS80031_LED_PWM_CTRL1 0xF4 +#define TPS80031_LED_PWM_CTRL2 0xF5 + +/* USB On-The-Go */ +#define TPS80031_BACKUP_REG 0xFA +#define TPS80031_USB_VENDOR_ID_LSB 0x00 +#define TPS80031_USB_VENDOR_ID_MSB 0x01 +#define TPS80031_USB_PRODUCT_ID_LSB 0x02 +#define TPS80031_USB_PRODUCT_ID_MSB 0x03 +#define TPS80031_USB_VBUS_CTRL_SET 0x04 +#define TPS80031_USB_VBUS_CTRL_CLR 0x05 +#define TPS80031_USB_ID_CTRL_SET 0x06 +#define TPS80031_USB_ID_CTRL_CLR 0x07 +#define TPS80031_USB_VBUS_INT_SRC 0x08 +#define TPS80031_USB_VBUS_INT_LATCH_SET 0x09 +#define TPS80031_USB_VBUS_INT_LATCH_CLR 0x0A +#define TPS80031_USB_VBUS_INT_EN_LO_SET 0x0B +#define TPS80031_USB_VBUS_INT_EN_LO_CLR 0x0C +#define TPS80031_USB_VBUS_INT_EN_HI_SET 0x0D +#define TPS80031_USB_VBUS_INT_EN_HI_CLR 0x0E +#define TPS80031_USB_ID_INT_SRC 0x0F +#define TPS80031_USB_ID_INT_LATCH_SET 0x10 +#define TPS80031_USB_ID_INT_LATCH_CLR 0x11 +#define TPS80031_USB_ID_INT_EN_LO_SET 0x12 +#define TPS80031_USB_ID_INT_EN_LO_CLR 0x13 +#define TPS80031_USB_ID_INT_EN_HI_SET 0x14 +#define TPS80031_USB_ID_INT_EN_HI_CLR 0x15 +#define TPS80031_USB_OTG_ADP_CTRL 0x16 +#define TPS80031_USB_OTG_ADP_HIGH 0x17 +#define TPS80031_USB_OTG_ADP_LOW 0x18 +#define TPS80031_USB_OTG_ADP_RISE 0x19 +#define TPS80031_USB_OTG_REVISION 0x1A + +/* Gas Gauge */ +#define TPS80031_FG_REG_00 0xC0 +#define TPS80031_FG_REG_01 0xC1 +#define TPS80031_FG_REG_02 0xC2 +#define TPS80031_FG_REG_03 0xC3 +#define TPS80031_FG_REG_04 0xC4 +#define TPS80031_FG_REG_05 0xC5 +#define TPS80031_FG_REG_06 0xC6 +#define TPS80031_FG_REG_07 0xC7 +#define TPS80031_FG_REG_08 0xC8 +#define TPS80031_FG_REG_09 0xC9 +#define TPS80031_FG_REG_10 0xCA +#define TPS80031_FG_REG_11 0xCB + +/* General Purpose ADC */ +#define TPS80031_GPADC_CTRL 0x2E +#define TPS80031_GPADC_CTRL2 0x2F +#define TPS80031_RTSELECT_LSB 0x32 +#define TPS80031_RTSELECT_ISB 0x33 +#define TPS80031_RTSELECT_MSB 0x34 +#define TPS80031_GPSELECT_ISB 0x35 +#define TPS80031_CTRL_P1 0x36 +#define TPS80031_RTCH0_LSB 0x37 +#define TPS80031_RTCH0_MSB 0x38 +#define TPS80031_RTCH1_LSB 0x39 +#define TPS80031_RTCH1_MSB 0x3A +#define TPS80031_GPCH0_LSB 0x3B +#define TPS80031_GPCH0_MSB 0x3C + +/* SIM, MMC and Battery Detection */ +#define TPS80031_SIMDEBOUNCING 0xEB +#define TPS80031_SIMCTRL 0xEC +#define TPS80031_MMCDEBOUNCING 0xED +#define TPS80031_MMCCTRL 0xEE +#define TPS80031_BATDEBOUNCING 0xEF + +/* Vibrator Driver and PWMs */ +#define TPS80031_VIBCTRL 0x9B +#define TPS80031_VIBMODE 0x9C +#define TPS80031_PWM1ON 0xBA +#define TPS80031_PWM1OFF 0xBB +#define TPS80031_PWM2ON 0xBD +#define TPS80031_PWM2OFF 0xBE + +/* Control Interface */ +#define TPS80031_INT_STS_A 0xD0 +#define TPS80031_INT_STS_B 0xD1 +#define TPS80031_INT_STS_C 0xD2 +#define TPS80031_INT_MSK_LINE_A 0xD3 +#define TPS80031_INT_MSK_LINE_B 0xD4 +#define TPS80031_INT_MSK_LINE_C 0xD5 +#define TPS80031_INT_MSK_STS_A 0xD6 +#define TPS80031_INT_MSK_STS_B 0xD7 +#define TPS80031_INT_MSK_STS_C 0xD8 +#define TPS80031_TOGGLE1 0x90 +#define TPS80031_TOGGLE2 0x91 +#define TPS80031_TOGGLE3 0x92 +#define TPS80031_PWDNSTATUS1 0x93 +#define TPS80031_PWDNSTATUS2 0x94 +#define TPS80031_VALIDITY0 0x17 +#define TPS80031_VALIDITY1 0x18 +#define TPS80031_VALIDITY2 0x19 +#define TPS80031_VALIDITY3 0x1A +#define TPS80031_VALIDITY4 0x1B +#define TPS80031_VALIDITY5 0x1C +#define TPS80031_VALIDITY6 0x1D +#define TPS80031_VALIDITY7 0x1E + +/* Version number related register */ +#define TPS80031_JTAGVERNUM 0x87 +#define TPS80031_EPROM_REV 0xDF + +/* GPADC Trimming Bits. */ +#define TPS80031_GPADC_TRIM0 0xCC +#define TPS80031_GPADC_TRIM1 0xCD +#define TPS80031_GPADC_TRIM2 0xCE +#define TPS80031_GPADC_TRIM3 0xCF +#define TPS80031_GPADC_TRIM4 0xD0 +#define TPS80031_GPADC_TRIM5 0xD1 +#define TPS80031_GPADC_TRIM6 0xD2 +#define TPS80031_GPADC_TRIM7 0xD3 +#define TPS80031_GPADC_TRIM8 0xD4 +#define TPS80031_GPADC_TRIM9 0xD5 +#define TPS80031_GPADC_TRIM10 0xD6 +#define TPS80031_GPADC_TRIM11 0xD7 +#define TPS80031_GPADC_TRIM12 0xD8 +#define TPS80031_GPADC_TRIM13 0xD9 +#define TPS80031_GPADC_TRIM14 0xDA +#define TPS80031_GPADC_TRIM15 0xDB +#define TPS80031_GPADC_TRIM16 0xDC +#define TPS80031_GPADC_TRIM17 0xDD +#define TPS80031_GPADC_TRIM18 0xDE + +/* TPS80031_CONTROLLER_STAT1 bit fields */ +#define TPS80031_CONTROLLER_STAT1_BAT_TEMP 0 +#define TPS80031_CONTROLLER_STAT1_BAT_REMOVED 1 +#define TPS80031_CONTROLLER_STAT1_VBUS_DET 2 +#define TPS80031_CONTROLLER_STAT1_VAC_DET 3 +#define TPS80031_CONTROLLER_STAT1_FAULT_WDG 4 +#define TPS80031_CONTROLLER_STAT1_LINCH_GATED 6 +/* TPS80031_CONTROLLER_INT_MASK bit filed */ +#define TPS80031_CONTROLLER_INT_MASK_MVAC_DET 0 +#define TPS80031_CONTROLLER_INT_MASK_MVBUS_DET 1 +#define TPS80031_CONTROLLER_INT_MASK_MBAT_TEMP 2 +#define TPS80031_CONTROLLER_INT_MASK_MFAULT_WDG 3 +#define TPS80031_CONTROLLER_INT_MASK_MBAT_REMOVED 4 +#define TPS80031_CONTROLLER_INT_MASK_MLINCH_GATED 5 + +#define TPS80031_CHARGE_CONTROL_SUB_INT_MASK 0x3F + +/* TPS80031_PHOENIX_DEV_ON bit field */ +#define TPS80031_DEVOFF 0x1 + +#define TPS80031_EXT_CONTROL_CFG_TRANS 0 +#define TPS80031_EXT_CONTROL_CFG_STATE 1 + +/* State register field */ +#define TPS80031_STATE_OFF 0x00 +#define TPS80031_STATE_ON 0x01 +#define TPS80031_STATE_MASK 0x03 + +/* Trans register field */ +#define TPS80031_TRANS_ACTIVE_OFF 0x00 +#define TPS80031_TRANS_ACTIVE_ON 0x01 +#define TPS80031_TRANS_ACTIVE_MASK 0x03 +#define TPS80031_TRANS_SLEEP_OFF 0x00 +#define TPS80031_TRANS_SLEEP_ON 0x04 +#define TPS80031_TRANS_SLEEP_MASK 0x0C +#define TPS80031_TRANS_OFF_OFF 0x00 +#define TPS80031_TRANS_OFF_ACTIVE 0x10 +#define TPS80031_TRANS_OFF_MASK 0x30 + +#define TPS80031_EXT_PWR_REQ (TPS80031_PWR_REQ_INPUT_PREQ1 | \ + TPS80031_PWR_REQ_INPUT_PREQ2 | \ + TPS80031_PWR_REQ_INPUT_PREQ3) + +/* TPS80031_BBSPOR_CFG bit field */ +#define TPS80031_BBSPOR_CHG_EN 0x8 +#define TPS80031_MAX_REGISTER 0xFF + +struct i2c_client; + +/* Supported chips */ +enum chips { + TPS80031 = 0x00000001, + TPS80032 = 0x00000002, +}; + +enum { + TPS80031_INT_PWRON, + TPS80031_INT_RPWRON, + TPS80031_INT_SYS_VLOW, + TPS80031_INT_RTC_ALARM, + TPS80031_INT_RTC_PERIOD, + TPS80031_INT_HOT_DIE, + TPS80031_INT_VXX_SHORT, + TPS80031_INT_SPDURATION, + TPS80031_INT_WATCHDOG, + TPS80031_INT_BAT, + TPS80031_INT_SIM, + TPS80031_INT_MMC, + TPS80031_INT_RES, + TPS80031_INT_GPADC_RT, + TPS80031_INT_GPADC_SW2_EOC, + TPS80031_INT_CC_AUTOCAL, + TPS80031_INT_ID_WKUP, + TPS80031_INT_VBUSS_WKUP, + TPS80031_INT_ID, + TPS80031_INT_VBUS, + TPS80031_INT_CHRG_CTRL, + TPS80031_INT_EXT_CHRG, + TPS80031_INT_INT_CHRG, + TPS80031_INT_RES2, + TPS80031_INT_BAT_TEMP_OVRANGE, + TPS80031_INT_BAT_REMOVED, + TPS80031_INT_VBUS_DET, + TPS80031_INT_VAC_DET, + TPS80031_INT_FAULT_WDG, + TPS80031_INT_LINCH_GATED, + + /* Last interrupt id to get the end number */ + TPS80031_INT_NR, +}; + +/* TPS80031 Slave IDs */ +#define TPS80031_NUM_SLAVES 4 +#define TPS80031_SLAVE_ID0 0 +#define TPS80031_SLAVE_ID1 1 +#define TPS80031_SLAVE_ID2 2 +#define TPS80031_SLAVE_ID3 3 + +/* TPS80031 I2C addresses */ +#define TPS80031_I2C_ID0_ADDR 0x12 +#define TPS80031_I2C_ID1_ADDR 0x48 +#define TPS80031_I2C_ID2_ADDR 0x49 +#define TPS80031_I2C_ID3_ADDR 0x4A + +enum { + TPS80031_REGULATOR_VIO, + TPS80031_REGULATOR_SMPS1, + TPS80031_REGULATOR_SMPS2, + TPS80031_REGULATOR_SMPS3, + TPS80031_REGULATOR_SMPS4, + TPS80031_REGULATOR_VANA, + TPS80031_REGULATOR_LDO1, + TPS80031_REGULATOR_LDO2, + TPS80031_REGULATOR_LDO3, + TPS80031_REGULATOR_LDO4, + TPS80031_REGULATOR_LDO5, + TPS80031_REGULATOR_LDO6, + TPS80031_REGULATOR_LDO7, + TPS80031_REGULATOR_LDOLN, + TPS80031_REGULATOR_LDOUSB, + TPS80031_REGULATOR_VBUS, + TPS80031_REGULATOR_REGEN1, + TPS80031_REGULATOR_REGEN2, + TPS80031_REGULATOR_SYSEN, + TPS80031_REGULATOR_MAX, +}; + +/* Different configurations for the rails */ +enum { + /* USBLDO input selection */ + TPS80031_USBLDO_INPUT_VSYS = 0x00000001, + TPS80031_USBLDO_INPUT_PMID = 0x00000002, + + /* LDO3 output mode */ + TPS80031_LDO3_OUTPUT_VIB = 0x00000004, + + /* VBUS configuration */ + TPS80031_VBUS_DISCHRG_EN_PDN = 0x00000004, + TPS80031_VBUS_SW_ONLY = 0x00000008, + TPS80031_VBUS_SW_N_ID = 0x00000010, +}; + +/* External controls requests */ +enum tps80031_ext_control { + TPS80031_PWR_REQ_INPUT_NONE = 0x00000000, + TPS80031_PWR_REQ_INPUT_PREQ1 = 0x00000001, + TPS80031_PWR_REQ_INPUT_PREQ2 = 0x00000002, + TPS80031_PWR_REQ_INPUT_PREQ3 = 0x00000004, + TPS80031_PWR_OFF_ON_SLEEP = 0x00000008, + TPS80031_PWR_ON_ON_SLEEP = 0x00000010, +}; + +enum tps80031_pupd_pins { + TPS80031_PREQ1 = 0, + TPS80031_PREQ2A, + TPS80031_PREQ2B, + TPS80031_PREQ2C, + TPS80031_PREQ3, + TPS80031_NRES_WARM, + TPS80031_PWM_FORCE, + TPS80031_CHRG_EXT_CHRG_STATZ, + TPS80031_SIM, + TPS80031_MMC, + TPS80031_GPADC_START, + TPS80031_DVSI2C_SCL, + TPS80031_DVSI2C_SDA, + TPS80031_CTLI2C_SCL, + TPS80031_CTLI2C_SDA, +}; + +enum tps80031_pupd_settings { + TPS80031_PUPD_NORMAL, + TPS80031_PUPD_PULLDOWN, + TPS80031_PUPD_PULLUP, +}; + +struct tps80031 { + struct device *dev; + unsigned long chip_info; + int es_version; + struct i2c_client *clients[TPS80031_NUM_SLAVES]; + struct regmap *regmap[TPS80031_NUM_SLAVES]; + struct regmap_irq_chip_data *irq_data; +}; + +struct tps80031_pupd_init_data { + int input_pin; + int setting; +}; + +/* + * struct tps80031_regulator_platform_data - tps80031 regulator platform data. + * + * @reg_init_data: The regulator init data. + * @ext_ctrl_flag: External control flag for sleep/power request control. + * @config_flags: Configuration flag to configure the rails. + * It should be ORed of config enums. + */ + +struct tps80031_regulator_platform_data { + struct regulator_init_data *reg_init_data; + unsigned int ext_ctrl_flag; + unsigned int config_flags; +}; + +struct tps80031_platform_data { + int irq_base; + bool use_power_off; + struct tps80031_pupd_init_data *pupd_init_data; + int pupd_init_data_size; + struct tps80031_regulator_platform_data + *regulator_pdata[TPS80031_REGULATOR_MAX]; +}; + +static inline int tps80031_write(struct device *dev, int sid, + int reg, uint8_t val) +{ + struct tps80031 *tps80031 = dev_get_drvdata(dev); + + return regmap_write(tps80031->regmap[sid], reg, val); +} + +static inline int tps80031_writes(struct device *dev, int sid, int reg, + int len, uint8_t *val) +{ + struct tps80031 *tps80031 = dev_get_drvdata(dev); + + return regmap_bulk_write(tps80031->regmap[sid], reg, val, len); +} + +static inline int tps80031_read(struct device *dev, int sid, + int reg, uint8_t *val) +{ + struct tps80031 *tps80031 = dev_get_drvdata(dev); + unsigned int ival; + int ret; + + ret = regmap_read(tps80031->regmap[sid], reg, &ival); + if (ret < 0) { + dev_err(dev, "failed reading from reg 0x%02x\n", reg); + return ret; + } + + *val = ival; + return ret; +} + +static inline int tps80031_reads(struct device *dev, int sid, + int reg, int len, uint8_t *val) +{ + struct tps80031 *tps80031 = dev_get_drvdata(dev); + + return regmap_bulk_read(tps80031->regmap[sid], reg, val, len); +} + +static inline int tps80031_set_bits(struct device *dev, int sid, + int reg, uint8_t bit_mask) +{ + struct tps80031 *tps80031 = dev_get_drvdata(dev); + + return regmap_update_bits(tps80031->regmap[sid], reg, + bit_mask, bit_mask); +} + +static inline int tps80031_clr_bits(struct device *dev, int sid, + int reg, uint8_t bit_mask) +{ + struct tps80031 *tps80031 = dev_get_drvdata(dev); + + return regmap_update_bits(tps80031->regmap[sid], reg, bit_mask, 0); +} + +static inline int tps80031_update(struct device *dev, int sid, + int reg, uint8_t val, uint8_t mask) +{ + struct tps80031 *tps80031 = dev_get_drvdata(dev); + + return regmap_update_bits(tps80031->regmap[sid], reg, mask, val); +} + +static inline unsigned long tps80031_get_chip_info(struct device *dev) +{ + struct tps80031 *tps80031 = dev_get_drvdata(dev); + + return tps80031->chip_info; +} + +static inline int tps80031_get_pmu_version(struct device *dev) +{ + struct tps80031 *tps80031 = dev_get_drvdata(dev); + + return tps80031->es_version; +} + +static inline int tps80031_irq_get_virq(struct device *dev, int irq) +{ + struct tps80031 *tps80031 = dev_get_drvdata(dev); + + return regmap_irq_get_virq(tps80031->irq_data, irq); +} + +extern int tps80031_ext_power_req_config(struct device *dev, + unsigned long ext_ctrl_flag, int preq_bit, + int state_reg_add, int trans_reg_add); +#endif /*__LINUX_MFD_TPS80031_H */ diff --git a/include/linux/mfd/twl.h b/include/linux/mfd/twl.h new file mode 100644 index 000000000..9ad7828d9 --- /dev/null +++ b/include/linux/mfd/twl.h @@ -0,0 +1,876 @@ +/* + * twl4030.h - header for TWL4030 PM and audio CODEC device + * + * Copyright (C) 2005-2006 Texas Instruments, Inc. + * + * Based on tlv320aic23.c: + * Copyright (c) by Kai Svahn + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#ifndef __TWL_H_ +#define __TWL_H_ + +#include +#include + +/* + * Using the twl4030 core we address registers using a pair + * { module id, relative register offset } + * which that core then maps to the relevant + * { i2c slave, absolute register address } + * + * The module IDs are meaningful only to the twl4030 core code, + * which uses them as array indices to look up the first register + * address each module uses within a given i2c slave. + */ + +/* Module IDs for similar functionalities found in twl4030/twl6030 */ +enum twl_module_ids { + TWL_MODULE_USB, + TWL_MODULE_PIH, + TWL_MODULE_MAIN_CHARGE, + TWL_MODULE_PM_MASTER, + TWL_MODULE_PM_RECEIVER, + + TWL_MODULE_RTC, + TWL_MODULE_PWM, + TWL_MODULE_LED, + TWL_MODULE_SECURED_REG, + + TWL_MODULE_LAST, +}; + +/* Modules only available in twl4030 series */ +enum twl4030_module_ids { + TWL4030_MODULE_AUDIO_VOICE = TWL_MODULE_LAST, + TWL4030_MODULE_GPIO, + TWL4030_MODULE_INTBR, + TWL4030_MODULE_TEST, + TWL4030_MODULE_KEYPAD, + + TWL4030_MODULE_MADC, + TWL4030_MODULE_INTERRUPTS, + TWL4030_MODULE_PRECHARGE, + TWL4030_MODULE_BACKUP, + TWL4030_MODULE_INT, + + TWL5031_MODULE_ACCESSORY, + TWL5031_MODULE_INTERRUPTS, + + TWL4030_MODULE_LAST, +}; + +/* Modules only available in twl6030 series */ +enum twl6030_module_ids { + TWL6030_MODULE_ID0 = TWL_MODULE_LAST, + TWL6030_MODULE_ID1, + TWL6030_MODULE_ID2, + TWL6030_MODULE_GPADC, + TWL6030_MODULE_GASGAUGE, + + TWL6030_MODULE_LAST, +}; + +/* Until the clients has been converted to use TWL_MODULE_LED */ +#define TWL4030_MODULE_LED TWL_MODULE_LED + +#define GPIO_INTR_OFFSET 0 +#define KEYPAD_INTR_OFFSET 1 +#define BCI_INTR_OFFSET 2 +#define MADC_INTR_OFFSET 3 +#define USB_INTR_OFFSET 4 +#define CHARGERFAULT_INTR_OFFSET 5 +#define BCI_PRES_INTR_OFFSET 9 +#define USB_PRES_INTR_OFFSET 10 +#define RTC_INTR_OFFSET 11 + +/* + * Offset from TWL6030_IRQ_BASE / pdata->irq_base + */ +#define PWR_INTR_OFFSET 0 +#define HOTDIE_INTR_OFFSET 12 +#define SMPSLDO_INTR_OFFSET 13 +#define BATDETECT_INTR_OFFSET 14 +#define SIMDETECT_INTR_OFFSET 15 +#define MMCDETECT_INTR_OFFSET 16 +#define GASGAUGE_INTR_OFFSET 17 +#define USBOTG_INTR_OFFSET 4 +#define CHARGER_INTR_OFFSET 2 +#define RSV_INTR_OFFSET 0 + +/* INT register offsets */ +#define REG_INT_STS_A 0x00 +#define REG_INT_STS_B 0x01 +#define REG_INT_STS_C 0x02 + +#define REG_INT_MSK_LINE_A 0x03 +#define REG_INT_MSK_LINE_B 0x04 +#define REG_INT_MSK_LINE_C 0x05 + +#define REG_INT_MSK_STS_A 0x06 +#define REG_INT_MSK_STS_B 0x07 +#define REG_INT_MSK_STS_C 0x08 + +/* MASK INT REG GROUP A */ +#define TWL6030_PWR_INT_MASK 0x07 +#define TWL6030_RTC_INT_MASK 0x18 +#define TWL6030_HOTDIE_INT_MASK 0x20 +#define TWL6030_SMPSLDOA_INT_MASK 0xC0 + +/* MASK INT REG GROUP B */ +#define TWL6030_SMPSLDOB_INT_MASK 0x01 +#define TWL6030_BATDETECT_INT_MASK 0x02 +#define TWL6030_SIMDETECT_INT_MASK 0x04 +#define TWL6030_MMCDETECT_INT_MASK 0x08 +#define TWL6030_GPADC_INT_MASK 0x60 +#define TWL6030_GASGAUGE_INT_MASK 0x80 + +/* MASK INT REG GROUP C */ +#define TWL6030_USBOTG_INT_MASK 0x0F +#define TWL6030_CHARGER_CTRL_INT_MASK 0x10 +#define TWL6030_CHARGER_FAULT_INT_MASK 0x60 + +#define TWL6030_MMCCTRL 0xEE +#define VMMC_AUTO_OFF (0x1 << 3) +#define SW_FC (0x1 << 2) +#define STS_MMC 0x1 + +#define TWL6030_CFG_INPUT_PUPD3 0xF2 +#define MMC_PU (0x1 << 3) +#define MMC_PD (0x1 << 2) + +#define TWL_SIL_TYPE(rev) ((rev) & 0x00FFFFFF) +#define TWL_SIL_REV(rev) ((rev) >> 24) +#define TWL_SIL_5030 0x09002F +#define TWL5030_REV_1_0 0x00 +#define TWL5030_REV_1_1 0x10 +#define TWL5030_REV_1_2 0x30 + +#define TWL4030_CLASS_ID 0x4030 +#define TWL6030_CLASS_ID 0x6030 +unsigned int twl_rev(void); +#define GET_TWL_REV (twl_rev()) +#define TWL_CLASS_IS(class, id) \ +static inline int twl_class_is_ ##class(void) \ +{ \ + return ((id) == (GET_TWL_REV)) ? 1 : 0; \ +} + +TWL_CLASS_IS(4030, TWL4030_CLASS_ID) +TWL_CLASS_IS(6030, TWL6030_CLASS_ID) + +/* Set the regcache bypass for the regmap associated with the nodule */ +int twl_set_regcache_bypass(u8 mod_no, bool enable); + +/* + * Read and write several 8-bit registers at once. + */ +int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); +int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); + +/* + * Read and write single 8-bit registers + */ +static inline int twl_i2c_write_u8(u8 mod_no, u8 val, u8 reg) { + return twl_i2c_write(mod_no, &val, reg, 1); +} + +static inline int twl_i2c_read_u8(u8 mod_no, u8 *val, u8 reg) { + return twl_i2c_read(mod_no, val, reg, 1); +} + +static inline int twl_i2c_write_u16(u8 mod_no, u16 val, u8 reg) { + val = cpu_to_le16(val); + return twl_i2c_write(mod_no, (u8*) &val, reg, 2); +} + +static inline int twl_i2c_read_u16(u8 mod_no, u16 *val, u8 reg) { + int ret; + ret = twl_i2c_read(mod_no, (u8*) val, reg, 2); + *val = le16_to_cpu(*val); + return ret; +} + +int twl_get_type(void); +int twl_get_version(void); +int twl_get_hfclk_rate(void); + +int twl6030_interrupt_unmask(u8 bit_mask, u8 offset); +int twl6030_interrupt_mask(u8 bit_mask, u8 offset); + +/* Card detect Configuration for MMC1 Controller on OMAP4 */ +#ifdef CONFIG_TWL4030_CORE +int twl6030_mmc_card_detect_config(void); +#else +static inline int twl6030_mmc_card_detect_config(void) +{ + pr_debug("twl6030_mmc_card_detect_config not supported\n"); + return 0; +} +#endif + +/* MMC1 Controller on OMAP4 uses Phoenix irq for Card detect */ +#ifdef CONFIG_TWL4030_CORE +int twl6030_mmc_card_detect(struct device *dev, int slot); +#else +static inline int twl6030_mmc_card_detect(struct device *dev, int slot) +{ + pr_debug("Call back twl6030_mmc_card_detect not supported\n"); + return -EIO; +} +#endif +/*----------------------------------------------------------------------*/ + +/* + * NOTE: at up to 1024 registers, this is a big chip. + * + * Avoid putting register declarations in this file, instead of into + * a driver-private file, unless some of the registers in a block + * need to be shared with other drivers. One example is blocks that + * have Secondary IRQ Handler (SIH) registers. + */ + +#define TWL4030_SIH_CTRL_EXCLEN_MASK BIT(0) +#define TWL4030_SIH_CTRL_PENDDIS_MASK BIT(1) +#define TWL4030_SIH_CTRL_COR_MASK BIT(2) + +/*----------------------------------------------------------------------*/ + +/* + * GPIO Block Register offsets (use TWL4030_MODULE_GPIO) + */ + +#define REG_GPIODATAIN1 0x0 +#define REG_GPIODATAIN2 0x1 +#define REG_GPIODATAIN3 0x2 +#define REG_GPIODATADIR1 0x3 +#define REG_GPIODATADIR2 0x4 +#define REG_GPIODATADIR3 0x5 +#define REG_GPIODATAOUT1 0x6 +#define REG_GPIODATAOUT2 0x7 +#define REG_GPIODATAOUT3 0x8 +#define REG_CLEARGPIODATAOUT1 0x9 +#define REG_CLEARGPIODATAOUT2 0xA +#define REG_CLEARGPIODATAOUT3 0xB +#define REG_SETGPIODATAOUT1 0xC +#define REG_SETGPIODATAOUT2 0xD +#define REG_SETGPIODATAOUT3 0xE +#define REG_GPIO_DEBEN1 0xF +#define REG_GPIO_DEBEN2 0x10 +#define REG_GPIO_DEBEN3 0x11 +#define REG_GPIO_CTRL 0x12 +#define REG_GPIOPUPDCTR1 0x13 +#define REG_GPIOPUPDCTR2 0x14 +#define REG_GPIOPUPDCTR3 0x15 +#define REG_GPIOPUPDCTR4 0x16 +#define REG_GPIOPUPDCTR5 0x17 +#define REG_GPIO_ISR1A 0x19 +#define REG_GPIO_ISR2A 0x1A +#define REG_GPIO_ISR3A 0x1B +#define REG_GPIO_IMR1A 0x1C +#define REG_GPIO_IMR2A 0x1D +#define REG_GPIO_IMR3A 0x1E +#define REG_GPIO_ISR1B 0x1F +#define REG_GPIO_ISR2B 0x20 +#define REG_GPIO_ISR3B 0x21 +#define REG_GPIO_IMR1B 0x22 +#define REG_GPIO_IMR2B 0x23 +#define REG_GPIO_IMR3B 0x24 +#define REG_GPIO_EDR1 0x28 +#define REG_GPIO_EDR2 0x29 +#define REG_GPIO_EDR3 0x2A +#define REG_GPIO_EDR4 0x2B +#define REG_GPIO_EDR5 0x2C +#define REG_GPIO_SIH_CTRL 0x2D + +/* Up to 18 signals are available as GPIOs, when their + * pins are not assigned to another use (such as ULPI/USB). + */ +#define TWL4030_GPIO_MAX 18 + +/*----------------------------------------------------------------------*/ + +/*Interface Bit Register (INTBR) offsets + *(Use TWL_4030_MODULE_INTBR) + */ + +#define REG_IDCODE_7_0 0x00 +#define REG_IDCODE_15_8 0x01 +#define REG_IDCODE_16_23 0x02 +#define REG_IDCODE_31_24 0x03 +#define REG_GPPUPDCTR1 0x0F +#define REG_UNLOCK_TEST_REG 0x12 + +/*I2C1 and I2C4(SR) SDA/SCL pull-up control bits */ + +#define I2C_SCL_CTRL_PU BIT(0) +#define I2C_SDA_CTRL_PU BIT(2) +#define SR_I2C_SCL_CTRL_PU BIT(4) +#define SR_I2C_SDA_CTRL_PU BIT(6) + +#define TWL_EEPROM_R_UNLOCK 0x49 + +/*----------------------------------------------------------------------*/ + +/* + * Keypad register offsets (use TWL4030_MODULE_KEYPAD) + * ... SIH/interrupt only + */ + +#define TWL4030_KEYPAD_KEYP_ISR1 0x11 +#define TWL4030_KEYPAD_KEYP_IMR1 0x12 +#define TWL4030_KEYPAD_KEYP_ISR2 0x13 +#define TWL4030_KEYPAD_KEYP_IMR2 0x14 +#define TWL4030_KEYPAD_KEYP_SIR 0x15 /* test register */ +#define TWL4030_KEYPAD_KEYP_EDR 0x16 +#define TWL4030_KEYPAD_KEYP_SIH_CTRL 0x17 + +/*----------------------------------------------------------------------*/ + +/* + * Multichannel ADC register offsets (use TWL4030_MODULE_MADC) + * ... SIH/interrupt only + */ + +#define TWL4030_MADC_ISR1 0x61 +#define TWL4030_MADC_IMR1 0x62 +#define TWL4030_MADC_ISR2 0x63 +#define TWL4030_MADC_IMR2 0x64 +#define TWL4030_MADC_SIR 0x65 /* test register */ +#define TWL4030_MADC_EDR 0x66 +#define TWL4030_MADC_SIH_CTRL 0x67 + +/*----------------------------------------------------------------------*/ + +/* + * Battery charger register offsets (use TWL4030_MODULE_INTERRUPTS) + */ + +#define TWL4030_INTERRUPTS_BCIISR1A 0x0 +#define TWL4030_INTERRUPTS_BCIISR2A 0x1 +#define TWL4030_INTERRUPTS_BCIIMR1A 0x2 +#define TWL4030_INTERRUPTS_BCIIMR2A 0x3 +#define TWL4030_INTERRUPTS_BCIISR1B 0x4 +#define TWL4030_INTERRUPTS_BCIISR2B 0x5 +#define TWL4030_INTERRUPTS_BCIIMR1B 0x6 +#define TWL4030_INTERRUPTS_BCIIMR2B 0x7 +#define TWL4030_INTERRUPTS_BCISIR1 0x8 /* test register */ +#define TWL4030_INTERRUPTS_BCISIR2 0x9 /* test register */ +#define TWL4030_INTERRUPTS_BCIEDR1 0xa +#define TWL4030_INTERRUPTS_BCIEDR2 0xb +#define TWL4030_INTERRUPTS_BCIEDR3 0xc +#define TWL4030_INTERRUPTS_BCISIHCTRL 0xd + +/*----------------------------------------------------------------------*/ + +/* + * Power Interrupt block register offsets (use TWL4030_MODULE_INT) + */ + +#define TWL4030_INT_PWR_ISR1 0x0 +#define TWL4030_INT_PWR_IMR1 0x1 +#define TWL4030_INT_PWR_ISR2 0x2 +#define TWL4030_INT_PWR_IMR2 0x3 +#define TWL4030_INT_PWR_SIR 0x4 /* test register */ +#define TWL4030_INT_PWR_EDR1 0x5 +#define TWL4030_INT_PWR_EDR2 0x6 +#define TWL4030_INT_PWR_SIH_CTRL 0x7 + +/*----------------------------------------------------------------------*/ + +/* + * Accessory Interrupts + */ +#define TWL5031_ACIIMR_LSB 0x05 +#define TWL5031_ACIIMR_MSB 0x06 +#define TWL5031_ACIIDR_LSB 0x07 +#define TWL5031_ACIIDR_MSB 0x08 +#define TWL5031_ACCISR1 0x0F +#define TWL5031_ACCIMR1 0x10 +#define TWL5031_ACCISR2 0x11 +#define TWL5031_ACCIMR2 0x12 +#define TWL5031_ACCSIR 0x13 +#define TWL5031_ACCEDR1 0x14 +#define TWL5031_ACCSIHCTRL 0x15 + +/*----------------------------------------------------------------------*/ + +/* + * Battery Charger Controller + */ + +#define TWL5031_INTERRUPTS_BCIISR1 0x0 +#define TWL5031_INTERRUPTS_BCIIMR1 0x1 +#define TWL5031_INTERRUPTS_BCIISR2 0x2 +#define TWL5031_INTERRUPTS_BCIIMR2 0x3 +#define TWL5031_INTERRUPTS_BCISIR 0x4 +#define TWL5031_INTERRUPTS_BCIEDR1 0x5 +#define TWL5031_INTERRUPTS_BCIEDR2 0x6 +#define TWL5031_INTERRUPTS_BCISIHCTRL 0x7 + +/*----------------------------------------------------------------------*/ + +/* + * PM Master module register offsets (use TWL4030_MODULE_PM_MASTER) + */ + +#define TWL4030_PM_MASTER_CFG_P1_TRANSITION 0x00 +#define TWL4030_PM_MASTER_CFG_P2_TRANSITION 0x01 +#define TWL4030_PM_MASTER_CFG_P3_TRANSITION 0x02 +#define TWL4030_PM_MASTER_CFG_P123_TRANSITION 0x03 +#define TWL4030_PM_MASTER_STS_BOOT 0x04 +#define TWL4030_PM_MASTER_CFG_BOOT 0x05 +#define TWL4030_PM_MASTER_SHUNDAN 0x06 +#define TWL4030_PM_MASTER_BOOT_BCI 0x07 +#define TWL4030_PM_MASTER_CFG_PWRANA1 0x08 +#define TWL4030_PM_MASTER_CFG_PWRANA2 0x09 +#define TWL4030_PM_MASTER_BACKUP_MISC_STS 0x0b +#define TWL4030_PM_MASTER_BACKUP_MISC_CFG 0x0c +#define TWL4030_PM_MASTER_BACKUP_MISC_TST 0x0d +#define TWL4030_PM_MASTER_PROTECT_KEY 0x0e +#define TWL4030_PM_MASTER_STS_HW_CONDITIONS 0x0f +#define TWL4030_PM_MASTER_P1_SW_EVENTS 0x10 +#define TWL4030_PM_MASTER_P2_SW_EVENTS 0x11 +#define TWL4030_PM_MASTER_P3_SW_EVENTS 0x12 +#define TWL4030_PM_MASTER_STS_P123_STATE 0x13 +#define TWL4030_PM_MASTER_PB_CFG 0x14 +#define TWL4030_PM_MASTER_PB_WORD_MSB 0x15 +#define TWL4030_PM_MASTER_PB_WORD_LSB 0x16 +#define TWL4030_PM_MASTER_SEQ_ADD_W2P 0x1c +#define TWL4030_PM_MASTER_SEQ_ADD_P2A 0x1d +#define TWL4030_PM_MASTER_SEQ_ADD_A2W 0x1e +#define TWL4030_PM_MASTER_SEQ_ADD_A2S 0x1f +#define TWL4030_PM_MASTER_SEQ_ADD_S2A12 0x20 +#define TWL4030_PM_MASTER_SEQ_ADD_S2A3 0x21 +#define TWL4030_PM_MASTER_SEQ_ADD_WARM 0x22 +#define TWL4030_PM_MASTER_MEMORY_ADDRESS 0x23 +#define TWL4030_PM_MASTER_MEMORY_DATA 0x24 + +#define TWL4030_PM_MASTER_KEY_CFG1 0xc0 +#define TWL4030_PM_MASTER_KEY_CFG2 0x0c + +#define TWL4030_PM_MASTER_KEY_TST1 0xe0 +#define TWL4030_PM_MASTER_KEY_TST2 0x0e + +#define TWL4030_PM_MASTER_GLOBAL_TST 0xb6 + +/*----------------------------------------------------------------------*/ + +/* Power bus message definitions */ + +/* The TWL4030/5030 splits its power-management resources (the various + * regulators, clock and reset lines) into 3 processor groups - P1, P2 and + * P3. These groups can then be configured to transition between sleep, wait-on + * and active states by sending messages to the power bus. See Section 5.4.2 + * Power Resources of TWL4030 TRM + */ + +/* Processor groups */ +#define DEV_GRP_NULL 0x0 +#define DEV_GRP_P1 0x1 /* P1: all OMAP devices */ +#define DEV_GRP_P2 0x2 /* P2: all Modem devices */ +#define DEV_GRP_P3 0x4 /* P3: all peripheral devices */ + +/* Resource groups */ +#define RES_GRP_RES 0x0 /* Reserved */ +#define RES_GRP_PP 0x1 /* Power providers */ +#define RES_GRP_RC 0x2 /* Reset and control */ +#define RES_GRP_PP_RC 0x3 +#define RES_GRP_PR 0x4 /* Power references */ +#define RES_GRP_PP_PR 0x5 +#define RES_GRP_RC_PR 0x6 +#define RES_GRP_ALL 0x7 /* All resource groups */ + +#define RES_TYPE2_R0 0x0 +#define RES_TYPE2_R1 0x1 +#define RES_TYPE2_R2 0x2 + +#define RES_TYPE_R0 0x0 +#define RES_TYPE_ALL 0x7 + +/* Resource states */ +#define RES_STATE_WRST 0xF +#define RES_STATE_ACTIVE 0xE +#define RES_STATE_SLEEP 0x8 +#define RES_STATE_OFF 0x0 + +/* Power resources */ + +/* Power providers */ +#define RES_VAUX1 1 +#define RES_VAUX2 2 +#define RES_VAUX3 3 +#define RES_VAUX4 4 +#define RES_VMMC1 5 +#define RES_VMMC2 6 +#define RES_VPLL1 7 +#define RES_VPLL2 8 +#define RES_VSIM 9 +#define RES_VDAC 10 +#define RES_VINTANA1 11 +#define RES_VINTANA2 12 +#define RES_VINTDIG 13 +#define RES_VIO 14 +#define RES_VDD1 15 +#define RES_VDD2 16 +#define RES_VUSB_1V5 17 +#define RES_VUSB_1V8 18 +#define RES_VUSB_3V1 19 +#define RES_VUSBCP 20 +#define RES_REGEN 21 +/* Reset and control */ +#define RES_NRES_PWRON 22 +#define RES_CLKEN 23 +#define RES_SYSEN 24 +#define RES_HFCLKOUT 25 +#define RES_32KCLKOUT 26 +#define RES_RESET 27 +/* Power Reference */ +#define RES_MAIN_REF 28 + +#define TOTAL_RESOURCES 28 +/* + * Power Bus Message Format ... these can be sent individually by Linux, + * but are usually part of downloaded scripts that are run when various + * power events are triggered. + * + * Broadcast Message (16 Bits): + * DEV_GRP[15:13] MT[12] RES_GRP[11:9] RES_TYPE2[8:7] RES_TYPE[6:4] + * RES_STATE[3:0] + * + * Singular Message (16 Bits): + * DEV_GRP[15:13] MT[12] RES_ID[11:4] RES_STATE[3:0] + */ + +#define MSG_BROADCAST(devgrp, grp, type, type2, state) \ + ( (devgrp) << 13 | 1 << 12 | (grp) << 9 | (type2) << 7 \ + | (type) << 4 | (state)) + +#define MSG_SINGULAR(devgrp, id, state) \ + ((devgrp) << 13 | 0 << 12 | (id) << 4 | (state)) + +#define MSG_BROADCAST_ALL(devgrp, state) \ + ((devgrp) << 5 | (state)) + +#define MSG_BROADCAST_REF MSG_BROADCAST_ALL +#define MSG_BROADCAST_PROV MSG_BROADCAST_ALL +#define MSG_BROADCAST__CLK_RST MSG_BROADCAST_ALL +/*----------------------------------------------------------------------*/ + +struct twl4030_clock_init_data { + bool ck32k_lowpwr_enable; +}; + +struct twl4030_bci_platform_data { + int *battery_tmp_tbl; + unsigned int tblsize; + int bb_uvolt; /* voltage to charge backup battery */ + int bb_uamp; /* current for backup battery charging */ +}; + +/* TWL4030_GPIO_MAX (18) GPIOs, with interrupts */ +struct twl4030_gpio_platform_data { + /* package the two LED signals as output-only GPIOs? */ + bool use_leds; + + /* gpio-n should control VMMC(n+1) if BIT(n) in mmc_cd is set */ + u8 mmc_cd; + + /* if BIT(N) is set, or VMMC(n+1) is linked, debounce GPIO-N */ + u32 debounce; + + /* For gpio-N, bit (1 << N) in "pullups" is set if that pullup + * should be enabled. Else, if that bit is set in "pulldowns", + * that pulldown is enabled. Don't waste power by letting any + * digital inputs float... + */ + u32 pullups; + u32 pulldowns; + + int (*setup)(struct device *dev, + unsigned gpio, unsigned ngpio); + int (*teardown)(struct device *dev, + unsigned gpio, unsigned ngpio); +}; + +struct twl4030_madc_platform_data { + int irq_line; +}; + +/* Boards have unique mappings of {row, col} --> keycode. + * Column and row are 8 bits each, but range only from 0..7. + * a PERSISTENT_KEY is "always on" and never reported. + */ +#define PERSISTENT_KEY(r, c) KEY((r), (c), KEY_RESERVED) + +struct twl4030_keypad_data { + const struct matrix_keymap_data *keymap_data; + unsigned rows; + unsigned cols; + bool rep; +}; + +enum twl4030_usb_mode { + T2_USB_MODE_ULPI = 1, + T2_USB_MODE_CEA2011_3PIN = 2, +}; + +struct twl4030_usb_data { + enum twl4030_usb_mode usb_mode; + unsigned long features; + + int (*phy_init)(struct device *dev); + int (*phy_exit)(struct device *dev); + /* Power on/off the PHY */ + int (*phy_power)(struct device *dev, int iD, int on); + /* enable/disable phy clocks */ + int (*phy_set_clock)(struct device *dev, int on); + /* suspend/resume of phy */ + int (*phy_suspend)(struct device *dev, int suspend); +}; + +struct twl4030_ins { + u16 pmb_message; + u8 delay; +}; + +struct twl4030_script { + struct twl4030_ins *script; + unsigned size; + u8 flags; +#define TWL4030_WRST_SCRIPT (1<<0) +#define TWL4030_WAKEUP12_SCRIPT (1<<1) +#define TWL4030_WAKEUP3_SCRIPT (1<<2) +#define TWL4030_SLEEP_SCRIPT (1<<3) +}; + +struct twl4030_resconfig { + u8 resource; + u8 devgroup; /* Processor group that Power resource belongs to */ + u8 type; /* Power resource addressed, 6 / broadcast message */ + u8 type2; /* Power resource addressed, 3 / broadcast message */ + u8 remap_off; /* off state remapping */ + u8 remap_sleep; /* sleep state remapping */ +}; + +struct twl4030_power_data { + struct twl4030_script **scripts; + unsigned num; + struct twl4030_resconfig *resource_config; + struct twl4030_resconfig *board_config; +#define TWL4030_RESCONFIG_UNDEF ((u8)-1) + bool use_poweroff; /* Board is wired for TWL poweroff */ + bool ac_charger_quirk; /* Disable AC charger on board */ +}; + +extern int twl4030_remove_script(u8 flags); +extern void twl4030_power_off(void); + +struct twl4030_codec_data { + unsigned int digimic_delay; /* in ms */ + unsigned int ramp_delay_value; + unsigned int offset_cncl_path; + unsigned int hs_extmute:1; + int hs_extmute_gpio; +}; + +struct twl4030_vibra_data { + unsigned int coexist; +}; + +struct twl4030_audio_data { + unsigned int audio_mclk; + struct twl4030_codec_data *codec; + struct twl4030_vibra_data *vibra; + + /* twl6040 */ + int audpwron_gpio; /* audio power-on gpio */ + int naudint_irq; /* audio interrupt */ + unsigned int irq_base; +}; + +struct twl4030_platform_data { + struct twl4030_clock_init_data *clock; + struct twl4030_bci_platform_data *bci; + struct twl4030_gpio_platform_data *gpio; + struct twl4030_madc_platform_data *madc; + struct twl4030_keypad_data *keypad; + struct twl4030_usb_data *usb; + struct twl4030_power_data *power; + struct twl4030_audio_data *audio; + + /* Common LDO regulators for TWL4030/TWL6030 */ + struct regulator_init_data *vdac; + struct regulator_init_data *vaux1; + struct regulator_init_data *vaux2; + struct regulator_init_data *vaux3; + struct regulator_init_data *vdd1; + struct regulator_init_data *vdd2; + struct regulator_init_data *vdd3; + /* TWL4030 LDO regulators */ + struct regulator_init_data *vpll1; + struct regulator_init_data *vpll2; + struct regulator_init_data *vmmc1; + struct regulator_init_data *vmmc2; + struct regulator_init_data *vsim; + struct regulator_init_data *vaux4; + struct regulator_init_data *vio; + struct regulator_init_data *vintana1; + struct regulator_init_data *vintana2; + struct regulator_init_data *vintdig; + /* TWL6030 LDO regulators */ + struct regulator_init_data *vmmc; + struct regulator_init_data *vpp; + struct regulator_init_data *vusim; + struct regulator_init_data *vana; + struct regulator_init_data *vcxio; + struct regulator_init_data *vusb; + struct regulator_init_data *clk32kg; + struct regulator_init_data *v1v8; + struct regulator_init_data *v2v1; + /* TWL6032 LDO regulators */ + struct regulator_init_data *ldo1; + struct regulator_init_data *ldo2; + struct regulator_init_data *ldo3; + struct regulator_init_data *ldo4; + struct regulator_init_data *ldo5; + struct regulator_init_data *ldo6; + struct regulator_init_data *ldo7; + struct regulator_init_data *ldoln; + struct regulator_init_data *ldousb; + /* TWL6032 DCDC regulators */ + struct regulator_init_data *smps3; + struct regulator_init_data *smps4; + struct regulator_init_data *vio6025; +}; + +struct twl_regulator_driver_data { + int (*set_voltage)(void *data, int target_uV); + int (*get_voltage)(void *data); + void *data; + unsigned long features; +}; +/* chip-specific feature flags, for twl_regulator_driver_data.features */ +#define TWL4030_VAUX2 BIT(0) /* pre-5030 voltage ranges */ +#define TPS_SUBSET BIT(1) /* tps659[23]0 have fewer LDOs */ +#define TWL5031 BIT(2) /* twl5031 has different registers */ +#define TWL6030_CLASS BIT(3) /* TWL6030 class */ +#define TWL6032_SUBCLASS BIT(4) /* TWL6032 has changed registers */ +#define TWL4030_ALLOW_UNSUPPORTED BIT(5) /* Some voltages are possible + * but not officially supported. + * This flag is necessary to + * enable them. + */ + +/*----------------------------------------------------------------------*/ + +int twl4030_sih_setup(struct device *dev, int module, int irq_base); + +/* Offsets to Power Registers */ +#define TWL4030_VDAC_DEV_GRP 0x3B +#define TWL4030_VDAC_DEDICATED 0x3E +#define TWL4030_VAUX1_DEV_GRP 0x17 +#define TWL4030_VAUX1_DEDICATED 0x1A +#define TWL4030_VAUX2_DEV_GRP 0x1B +#define TWL4030_VAUX2_DEDICATED 0x1E +#define TWL4030_VAUX3_DEV_GRP 0x1F +#define TWL4030_VAUX3_DEDICATED 0x22 + +static inline int twl4030charger_usb_en(int enable) { return 0; } + +/*----------------------------------------------------------------------*/ + +/* Linux-specific regulator identifiers ... for now, we only support + * the LDOs, and leave the three buck converters alone. VDD1 and VDD2 + * need to tie into hardware based voltage scaling (cpufreq etc), while + * VIO is generally fixed. + */ + +/* TWL4030 SMPS/LDO's */ +/* EXTERNAL dc-to-dc buck converters */ +#define TWL4030_REG_VDD1 0 +#define TWL4030_REG_VDD2 1 +#define TWL4030_REG_VIO 2 + +/* EXTERNAL LDOs */ +#define TWL4030_REG_VDAC 3 +#define TWL4030_REG_VPLL1 4 +#define TWL4030_REG_VPLL2 5 /* not on all chips */ +#define TWL4030_REG_VMMC1 6 +#define TWL4030_REG_VMMC2 7 /* not on all chips */ +#define TWL4030_REG_VSIM 8 /* not on all chips */ +#define TWL4030_REG_VAUX1 9 /* not on all chips */ +#define TWL4030_REG_VAUX2_4030 10 /* (twl4030-specific) */ +#define TWL4030_REG_VAUX2 11 /* (twl5030 and newer) */ +#define TWL4030_REG_VAUX3 12 /* not on all chips */ +#define TWL4030_REG_VAUX4 13 /* not on all chips */ + +/* INTERNAL LDOs */ +#define TWL4030_REG_VINTANA1 14 +#define TWL4030_REG_VINTANA2 15 +#define TWL4030_REG_VINTDIG 16 +#define TWL4030_REG_VUSB1V5 17 +#define TWL4030_REG_VUSB1V8 18 +#define TWL4030_REG_VUSB3V1 19 + +/* TWL6030 SMPS/LDO's */ +/* EXTERNAL dc-to-dc buck convertor controllable via SR */ +#define TWL6030_REG_VDD1 30 +#define TWL6030_REG_VDD2 31 +#define TWL6030_REG_VDD3 32 + +/* Non SR compliant dc-to-dc buck convertors */ +#define TWL6030_REG_VMEM 33 +#define TWL6030_REG_V2V1 34 +#define TWL6030_REG_V1V29 35 +#define TWL6030_REG_V1V8 36 + +/* EXTERNAL LDOs */ +#define TWL6030_REG_VAUX1_6030 37 +#define TWL6030_REG_VAUX2_6030 38 +#define TWL6030_REG_VAUX3_6030 39 +#define TWL6030_REG_VMMC 40 +#define TWL6030_REG_VPP 41 +#define TWL6030_REG_VUSIM 42 +#define TWL6030_REG_VANA 43 +#define TWL6030_REG_VCXIO 44 +#define TWL6030_REG_VDAC 45 +#define TWL6030_REG_VUSB 46 + +/* INTERNAL LDOs */ +#define TWL6030_REG_VRTC 47 +#define TWL6030_REG_CLK32KG 48 + +/* LDOs on 6025 have different names */ +#define TWL6032_REG_LDO2 49 +#define TWL6032_REG_LDO4 50 +#define TWL6032_REG_LDO3 51 +#define TWL6032_REG_LDO5 52 +#define TWL6032_REG_LDO1 53 +#define TWL6032_REG_LDO7 54 +#define TWL6032_REG_LDO6 55 +#define TWL6032_REG_LDOLN 56 +#define TWL6032_REG_LDOUSB 57 + +/* 6025 DCDC supplies */ +#define TWL6032_REG_SMPS3 58 +#define TWL6032_REG_SMPS4 59 +#define TWL6032_REG_VIO 60 + + +#endif /* End of __TWL4030_H */ diff --git a/include/linux/mfd/twl4030-audio.h b/include/linux/mfd/twl4030-audio.h new file mode 100644 index 000000000..3d22b72df --- /dev/null +++ b/include/linux/mfd/twl4030-audio.h @@ -0,0 +1,272 @@ +/* + * MFD driver for twl4030 audio submodule + * + * Author: Peter Ujfalusi + * + * Copyright: (C) 2009 Nokia Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __TWL4030_CODEC_H__ +#define __TWL4030_CODEC_H__ + +/* Codec registers */ +#define TWL4030_REG_CODEC_MODE 0x01 +#define TWL4030_REG_OPTION 0x02 +#define TWL4030_REG_UNKNOWN 0x03 +#define TWL4030_REG_MICBIAS_CTL 0x04 +#define TWL4030_REG_ANAMICL 0x05 +#define TWL4030_REG_ANAMICR 0x06 +#define TWL4030_REG_AVADC_CTL 0x07 +#define TWL4030_REG_ADCMICSEL 0x08 +#define TWL4030_REG_DIGMIXING 0x09 +#define TWL4030_REG_ATXL1PGA 0x0A +#define TWL4030_REG_ATXR1PGA 0x0B +#define TWL4030_REG_AVTXL2PGA 0x0C +#define TWL4030_REG_AVTXR2PGA 0x0D +#define TWL4030_REG_AUDIO_IF 0x0E +#define TWL4030_REG_VOICE_IF 0x0F +#define TWL4030_REG_ARXR1PGA 0x10 +#define TWL4030_REG_ARXL1PGA 0x11 +#define TWL4030_REG_ARXR2PGA 0x12 +#define TWL4030_REG_ARXL2PGA 0x13 +#define TWL4030_REG_VRXPGA 0x14 +#define TWL4030_REG_VSTPGA 0x15 +#define TWL4030_REG_VRX2ARXPGA 0x16 +#define TWL4030_REG_AVDAC_CTL 0x17 +#define TWL4030_REG_ARX2VTXPGA 0x18 +#define TWL4030_REG_ARXL1_APGA_CTL 0x19 +#define TWL4030_REG_ARXR1_APGA_CTL 0x1A +#define TWL4030_REG_ARXL2_APGA_CTL 0x1B +#define TWL4030_REG_ARXR2_APGA_CTL 0x1C +#define TWL4030_REG_ATX2ARXPGA 0x1D +#define TWL4030_REG_BT_IF 0x1E +#define TWL4030_REG_BTPGA 0x1F +#define TWL4030_REG_BTSTPGA 0x20 +#define TWL4030_REG_EAR_CTL 0x21 +#define TWL4030_REG_HS_SEL 0x22 +#define TWL4030_REG_HS_GAIN_SET 0x23 +#define TWL4030_REG_HS_POPN_SET 0x24 +#define TWL4030_REG_PREDL_CTL 0x25 +#define TWL4030_REG_PREDR_CTL 0x26 +#define TWL4030_REG_PRECKL_CTL 0x27 +#define TWL4030_REG_PRECKR_CTL 0x28 +#define TWL4030_REG_HFL_CTL 0x29 +#define TWL4030_REG_HFR_CTL 0x2A +#define TWL4030_REG_ALC_CTL 0x2B +#define TWL4030_REG_ALC_SET1 0x2C +#define TWL4030_REG_ALC_SET2 0x2D +#define TWL4030_REG_BOOST_CTL 0x2E +#define TWL4030_REG_SOFTVOL_CTL 0x2F +#define TWL4030_REG_DTMF_FREQSEL 0x30 +#define TWL4030_REG_DTMF_TONEXT1H 0x31 +#define TWL4030_REG_DTMF_TONEXT1L 0x32 +#define TWL4030_REG_DTMF_TONEXT2H 0x33 +#define TWL4030_REG_DTMF_TONEXT2L 0x34 +#define TWL4030_REG_DTMF_TONOFF 0x35 +#define TWL4030_REG_DTMF_WANONOFF 0x36 +#define TWL4030_REG_I2S_RX_SCRAMBLE_H 0x37 +#define TWL4030_REG_I2S_RX_SCRAMBLE_M 0x38 +#define TWL4030_REG_I2S_RX_SCRAMBLE_L 0x39 +#define TWL4030_REG_APLL_CTL 0x3A +#define TWL4030_REG_DTMF_CTL 0x3B +#define TWL4030_REG_DTMF_PGA_CTL2 0x3C +#define TWL4030_REG_DTMF_PGA_CTL1 0x3D +#define TWL4030_REG_MISC_SET_1 0x3E +#define TWL4030_REG_PCMBTMUX 0x3F +#define TWL4030_REG_RX_PATH_SEL 0x43 +#define TWL4030_REG_VDL_APGA_CTL 0x44 +#define TWL4030_REG_VIBRA_CTL 0x45 +#define TWL4030_REG_VIBRA_SET 0x46 +#define TWL4030_REG_VIBRA_PWM_SET 0x47 +#define TWL4030_REG_ANAMIC_GAIN 0x48 +#define TWL4030_REG_MISC_SET_2 0x49 + +/* Bitfield Definitions */ + +/* TWL4030_CODEC_MODE (0x01) Fields */ +#define TWL4030_APLL_RATE 0xF0 +#define TWL4030_APLL_RATE_8000 0x00 +#define TWL4030_APLL_RATE_11025 0x10 +#define TWL4030_APLL_RATE_12000 0x20 +#define TWL4030_APLL_RATE_16000 0x40 +#define TWL4030_APLL_RATE_22050 0x50 +#define TWL4030_APLL_RATE_24000 0x60 +#define TWL4030_APLL_RATE_32000 0x80 +#define TWL4030_APLL_RATE_44100 0x90 +#define TWL4030_APLL_RATE_48000 0xA0 +#define TWL4030_APLL_RATE_96000 0xE0 +#define TWL4030_SEL_16K 0x08 +#define TWL4030_CODECPDZ 0x02 +#define TWL4030_OPT_MODE 0x01 +#define TWL4030_OPTION_1 (1 << 0) +#define TWL4030_OPTION_2 (0 << 0) + +/* TWL4030_OPTION (0x02) Fields */ +#define TWL4030_ATXL1_EN (1 << 0) +#define TWL4030_ATXR1_EN (1 << 1) +#define TWL4030_ATXL2_VTXL_EN (1 << 2) +#define TWL4030_ATXR2_VTXR_EN (1 << 3) +#define TWL4030_ARXL1_VRX_EN (1 << 4) +#define TWL4030_ARXR1_EN (1 << 5) +#define TWL4030_ARXL2_EN (1 << 6) +#define TWL4030_ARXR2_EN (1 << 7) + +/* TWL4030_REG_MICBIAS_CTL (0x04) Fields */ +#define TWL4030_MICBIAS2_CTL 0x40 +#define TWL4030_MICBIAS1_CTL 0x20 +#define TWL4030_HSMICBIAS_EN 0x04 +#define TWL4030_MICBIAS2_EN 0x02 +#define TWL4030_MICBIAS1_EN 0x01 + +/* ANAMICL (0x05) Fields */ +#define TWL4030_CNCL_OFFSET_START 0x80 +#define TWL4030_OFFSET_CNCL_SEL 0x60 +#define TWL4030_OFFSET_CNCL_SEL_ARX1 0x00 +#define TWL4030_OFFSET_CNCL_SEL_ARX2 0x20 +#define TWL4030_OFFSET_CNCL_SEL_VRX 0x40 +#define TWL4030_OFFSET_CNCL_SEL_ALL 0x60 +#define TWL4030_MICAMPL_EN 0x10 +#define TWL4030_CKMIC_EN 0x08 +#define TWL4030_AUXL_EN 0x04 +#define TWL4030_HSMIC_EN 0x02 +#define TWL4030_MAINMIC_EN 0x01 + +/* ANAMICR (0x06) Fields */ +#define TWL4030_MICAMPR_EN 0x10 +#define TWL4030_AUXR_EN 0x04 +#define TWL4030_SUBMIC_EN 0x01 + +/* AVADC_CTL (0x07) Fields */ +#define TWL4030_ADCL_EN 0x08 +#define TWL4030_AVADC_CLK_PRIORITY 0x04 +#define TWL4030_ADCR_EN 0x02 + +/* TWL4030_REG_ADCMICSEL (0x08) Fields */ +#define TWL4030_DIGMIC1_EN 0x08 +#define TWL4030_TX2IN_SEL 0x04 +#define TWL4030_DIGMIC0_EN 0x02 +#define TWL4030_TX1IN_SEL 0x01 + +/* AUDIO_IF (0x0E) Fields */ +#define TWL4030_AIF_SLAVE_EN 0x80 +#define TWL4030_DATA_WIDTH 0x60 +#define TWL4030_DATA_WIDTH_16S_16W 0x00 +#define TWL4030_DATA_WIDTH_32S_16W 0x40 +#define TWL4030_DATA_WIDTH_32S_24W 0x60 +#define TWL4030_AIF_FORMAT 0x18 +#define TWL4030_AIF_FORMAT_CODEC 0x00 +#define TWL4030_AIF_FORMAT_LEFT 0x08 +#define TWL4030_AIF_FORMAT_RIGHT 0x10 +#define TWL4030_AIF_FORMAT_TDM 0x18 +#define TWL4030_AIF_TRI_EN 0x04 +#define TWL4030_CLK256FS_EN 0x02 +#define TWL4030_AIF_EN 0x01 + +/* VOICE_IF (0x0F) Fields */ +#define TWL4030_VIF_SLAVE_EN 0x80 +#define TWL4030_VIF_DIN_EN 0x40 +#define TWL4030_VIF_DOUT_EN 0x20 +#define TWL4030_VIF_SWAP 0x10 +#define TWL4030_VIF_FORMAT 0x08 +#define TWL4030_VIF_TRI_EN 0x04 +#define TWL4030_VIF_SUB_EN 0x02 +#define TWL4030_VIF_EN 0x01 + +/* EAR_CTL (0x21) */ +#define TWL4030_EAR_GAIN 0x30 + +/* HS_GAIN_SET (0x23) Fields */ +#define TWL4030_HSR_GAIN 0x0C +#define TWL4030_HSR_GAIN_PWR_DOWN 0x00 +#define TWL4030_HSR_GAIN_PLUS_6DB 0x04 +#define TWL4030_HSR_GAIN_0DB 0x08 +#define TWL4030_HSR_GAIN_MINUS_6DB 0x0C +#define TWL4030_HSL_GAIN 0x03 +#define TWL4030_HSL_GAIN_PWR_DOWN 0x00 +#define TWL4030_HSL_GAIN_PLUS_6DB 0x01 +#define TWL4030_HSL_GAIN_0DB 0x02 +#define TWL4030_HSL_GAIN_MINUS_6DB 0x03 + +/* HS_POPN_SET (0x24) Fields */ +#define TWL4030_VMID_EN 0x40 +#define TWL4030_EXTMUTE 0x20 +#define TWL4030_RAMP_DELAY 0x1C +#define TWL4030_RAMP_DELAY_20MS 0x00 +#define TWL4030_RAMP_DELAY_40MS 0x04 +#define TWL4030_RAMP_DELAY_81MS 0x08 +#define TWL4030_RAMP_DELAY_161MS 0x0C +#define TWL4030_RAMP_DELAY_323MS 0x10 +#define TWL4030_RAMP_DELAY_645MS 0x14 +#define TWL4030_RAMP_DELAY_1291MS 0x18 +#define TWL4030_RAMP_DELAY_2581MS 0x1C +#define TWL4030_RAMP_EN 0x02 + +/* PREDL_CTL (0x25) */ +#define TWL4030_PREDL_GAIN 0x30 + +/* PREDR_CTL (0x26) */ +#define TWL4030_PREDR_GAIN 0x30 + +/* PRECKL_CTL (0x27) */ +#define TWL4030_PRECKL_GAIN 0x30 + +/* PRECKR_CTL (0x28) */ +#define TWL4030_PRECKR_GAIN 0x30 + +/* HFL_CTL (0x29, 0x2A) Fields */ +#define TWL4030_HF_CTL_HB_EN 0x04 +#define TWL4030_HF_CTL_LOOP_EN 0x08 +#define TWL4030_HF_CTL_RAMP_EN 0x10 +#define TWL4030_HF_CTL_REF_EN 0x20 + +/* APLL_CTL (0x3A) Fields */ +#define TWL4030_APLL_EN 0x10 +#define TWL4030_APLL_INFREQ 0x0F +#define TWL4030_APLL_INFREQ_19200KHZ 0x05 +#define TWL4030_APLL_INFREQ_26000KHZ 0x06 +#define TWL4030_APLL_INFREQ_38400KHZ 0x0F + +/* REG_MISC_SET_1 (0x3E) Fields */ +#define TWL4030_CLK64_EN 0x80 +#define TWL4030_SCRAMBLE_EN 0x40 +#define TWL4030_FMLOOP_EN 0x20 +#define TWL4030_SMOOTH_ANAVOL_EN 0x02 +#define TWL4030_DIGMIC_LR_SWAP_EN 0x01 + +/* VIBRA_CTL (0x45) */ +#define TWL4030_VIBRA_EN 0x01 +#define TWL4030_VIBRA_DIR 0x02 +#define TWL4030_VIBRA_AUDIO_SEL_L1 (0x00 << 2) +#define TWL4030_VIBRA_AUDIO_SEL_R1 (0x01 << 2) +#define TWL4030_VIBRA_AUDIO_SEL_L2 (0x02 << 2) +#define TWL4030_VIBRA_AUDIO_SEL_R2 (0x03 << 2) +#define TWL4030_VIBRA_SEL 0x10 +#define TWL4030_VIBRA_DIR_SEL 0x20 + +/* TWL4030 codec resource IDs */ +enum twl4030_audio_res { + TWL4030_AUDIO_RES_POWER = 0, + TWL4030_AUDIO_RES_APLL, + TWL4030_AUDIO_RES_MAX, +}; + +int twl4030_audio_disable_resource(enum twl4030_audio_res id); +int twl4030_audio_enable_resource(enum twl4030_audio_res id); +unsigned int twl4030_audio_get_mclk(void); + +#endif /* End of __TWL4030_CODEC_H__ */ diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h new file mode 100644 index 000000000..a2e88761c --- /dev/null +++ b/include/linux/mfd/twl6040.h @@ -0,0 +1,271 @@ +/* + * MFD driver for twl6040 + * + * Authors: Jorge Eduardo Candelaria + * Misael Lopez Cruz + * + * Copyright: (C) 2011 Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __TWL6040_CODEC_H__ +#define __TWL6040_CODEC_H__ + +#include +#include +#include +#include + +#define TWL6040_REG_ASICID 0x01 +#define TWL6040_REG_ASICREV 0x02 +#define TWL6040_REG_INTID 0x03 +#define TWL6040_REG_INTMR 0x04 +#define TWL6040_REG_NCPCTL 0x05 +#define TWL6040_REG_LDOCTL 0x06 +#define TWL6040_REG_HPPLLCTL 0x07 +#define TWL6040_REG_LPPLLCTL 0x08 +#define TWL6040_REG_LPPLLDIV 0x09 +#define TWL6040_REG_AMICBCTL 0x0A +#define TWL6040_REG_DMICBCTL 0x0B +#define TWL6040_REG_MICLCTL 0x0C +#define TWL6040_REG_MICRCTL 0x0D +#define TWL6040_REG_MICGAIN 0x0E +#define TWL6040_REG_LINEGAIN 0x0F +#define TWL6040_REG_HSLCTL 0x10 +#define TWL6040_REG_HSRCTL 0x11 +#define TWL6040_REG_HSGAIN 0x12 +#define TWL6040_REG_EARCTL 0x13 +#define TWL6040_REG_HFLCTL 0x14 +#define TWL6040_REG_HFLGAIN 0x15 +#define TWL6040_REG_HFRCTL 0x16 +#define TWL6040_REG_HFRGAIN 0x17 +#define TWL6040_REG_VIBCTLL 0x18 +#define TWL6040_REG_VIBDATL 0x19 +#define TWL6040_REG_VIBCTLR 0x1A +#define TWL6040_REG_VIBDATR 0x1B +#define TWL6040_REG_HKCTL1 0x1C +#define TWL6040_REG_HKCTL2 0x1D +#define TWL6040_REG_GPOCTL 0x1E +#define TWL6040_REG_ALB 0x1F +#define TWL6040_REG_DLB 0x20 +#define TWL6040_REG_TRIM1 0x28 +#define TWL6040_REG_TRIM2 0x29 +#define TWL6040_REG_TRIM3 0x2A +#define TWL6040_REG_HSOTRIM 0x2B +#define TWL6040_REG_HFOTRIM 0x2C +#define TWL6040_REG_ACCCTL 0x2D +#define TWL6040_REG_STATUS 0x2E + +/* INTID (0x03) fields */ + +#define TWL6040_THINT 0x01 +#define TWL6040_PLUGINT 0x02 +#define TWL6040_UNPLUGINT 0x04 +#define TWL6040_HOOKINT 0x08 +#define TWL6040_HFINT 0x10 +#define TWL6040_VIBINT 0x20 +#define TWL6040_READYINT 0x40 + +/* INTMR (0x04) fields */ + +#define TWL6040_THMSK 0x01 +#define TWL6040_PLUGMSK 0x02 +#define TWL6040_HOOKMSK 0x08 +#define TWL6040_HFMSK 0x10 +#define TWL6040_VIBMSK 0x20 +#define TWL6040_READYMSK 0x40 +#define TWL6040_ALLINT_MSK 0x7B + +/* NCPCTL (0x05) fields */ + +#define TWL6040_NCPENA 0x01 +#define TWL6040_NCPOPEN 0x40 + +/* LDOCTL (0x06) fields */ + +#define TWL6040_LSLDOENA 0x01 +#define TWL6040_HSLDOENA 0x04 +#define TWL6040_REFENA 0x40 +#define TWL6040_OSCENA 0x80 + +/* HPPLLCTL (0x07) fields */ + +#define TWL6040_HPLLENA 0x01 +#define TWL6040_HPLLRST 0x02 +#define TWL6040_HPLLBP 0x04 +#define TWL6040_HPLLSQRENA 0x08 +#define TWL6040_MCLK_12000KHZ (0 << 5) +#define TWL6040_MCLK_19200KHZ (1 << 5) +#define TWL6040_MCLK_26000KHZ (2 << 5) +#define TWL6040_MCLK_38400KHZ (3 << 5) +#define TWL6040_MCLK_MSK 0x60 + +/* LPPLLCTL (0x08) fields */ + +#define TWL6040_LPLLENA 0x01 +#define TWL6040_LPLLRST 0x02 +#define TWL6040_LPLLSEL 0x04 +#define TWL6040_LPLLFIN 0x08 +#define TWL6040_HPLLSEL 0x10 + +/* HSLCTL/R (0x10/0x11) fields */ + +#define TWL6040_HSDACENA (1 << 0) +#define TWL6040_HSDACMODE (1 << 1) +#define TWL6040_HSDRVENA (1 << 2) +#define TWL6040_HSDRVMODE (1 << 3) + +/* HFLCTL/R (0x14/0x16) fields */ + +#define TWL6040_HFDACENA (1 << 0) +#define TWL6040_HFPGAENA (1 << 1) +#define TWL6040_HFDRVENA (1 << 4) +#define TWL6040_HFSWENA (1 << 6) + +/* VIBCTLL/R (0x18/0x1A) fields */ + +#define TWL6040_VIBENA (1 << 0) +#define TWL6040_VIBSEL (1 << 1) +#define TWL6040_VIBCTRL (1 << 2) +#define TWL6040_VIBCTRL_P (1 << 3) +#define TWL6040_VIBCTRL_N (1 << 4) + +/* VIBDATL/R (0x19/0x1B) fields */ + +#define TWL6040_VIBDAT_MAX 0x64 + +/* GPOCTL (0x1E) fields */ + +#define TWL6040_GPO1 0x01 +#define TWL6040_GPO2 0x02 +#define TWL6040_GPO3 0x04 + +/* ACCCTL (0x2D) fields */ + +#define TWL6040_I2CSEL 0x01 +#define TWL6040_RESETSPLIT 0x04 +#define TWL6040_INTCLRMODE 0x08 +#define TWL6040_I2CMODE(x) ((x & 0x3) << 4) + +/* STATUS (0x2E) fields */ + +#define TWL6040_PLUGCOMP 0x02 +#define TWL6040_VIBLOCDET 0x10 +#define TWL6040_VIBROCDET 0x20 +#define TWL6040_TSHUTDET 0x40 + +#define TWL6040_CELLS 4 + +#define TWL6040_REV_ES1_0 0x00 +#define TWL6040_REV_ES1_1 0x01 /* Rev ES1.1 and ES1.2 */ +#define TWL6040_REV_ES1_3 0x02 +#define TWL6041_REV_ES2_0 0x10 + +#define TWL6040_IRQ_TH 0 +#define TWL6040_IRQ_PLUG 1 +#define TWL6040_IRQ_HOOK 2 +#define TWL6040_IRQ_HF 3 +#define TWL6040_IRQ_VIB 4 +#define TWL6040_IRQ_READY 5 + +/* PLL selection */ +#define TWL6040_SYSCLK_SEL_LPPLL 0 +#define TWL6040_SYSCLK_SEL_HPPLL 1 + +#define TWL6040_GPO_MAX 3 + +/* TODO: All platform data struct can be removed */ +struct twl6040_codec_data { + u16 hs_left_step; + u16 hs_right_step; + u16 hf_left_step; + u16 hf_right_step; +}; + +struct twl6040_vibra_data { + unsigned int vibldrv_res; /* left driver resistance */ + unsigned int vibrdrv_res; /* right driver resistance */ + unsigned int viblmotor_res; /* left motor resistance */ + unsigned int vibrmotor_res; /* right motor resistance */ + int vddvibl_uV; /* VDDVIBL volt, set 0 for fixed reg */ + int vddvibr_uV; /* VDDVIBR volt, set 0 for fixed reg */ +}; + +struct twl6040_gpo_data { + int gpio_base; +}; + +struct twl6040_platform_data { + int audpwron_gpio; /* audio power-on gpio */ + + struct twl6040_codec_data *codec; + struct twl6040_vibra_data *vibra; + struct twl6040_gpo_data *gpo; +}; + +struct regmap; +struct regmap_irq_chips_data; + +struct twl6040 { + struct device *dev; + struct regmap *regmap; + struct regmap_irq_chip_data *irq_data; + struct regulator_bulk_data supplies[2]; /* supplies for vio, v2v1 */ + struct clk *clk32k; + struct clk *mclk; + struct mutex mutex; + struct mutex irq_mutex; + struct mfd_cell cells[TWL6040_CELLS]; + struct completion ready; + + int audpwron; + int power_count; + int rev; + + /* PLL configuration */ + int pll; + unsigned int sysclk_rate; + unsigned int mclk_rate; + + unsigned int irq; + unsigned int irq_ready; + unsigned int irq_th; +}; + +int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg); +int twl6040_reg_write(struct twl6040 *twl6040, unsigned int reg, + u8 val); +int twl6040_set_bits(struct twl6040 *twl6040, unsigned int reg, + u8 mask); +int twl6040_clear_bits(struct twl6040 *twl6040, unsigned int reg, + u8 mask); +int twl6040_power(struct twl6040 *twl6040, int on); +int twl6040_set_pll(struct twl6040 *twl6040, int pll_id, + unsigned int freq_in, unsigned int freq_out); +int twl6040_get_pll(struct twl6040 *twl6040); +unsigned int twl6040_get_sysclk(struct twl6040 *twl6040); + +/* Get the combined status of the vibra control register */ +int twl6040_get_vibralr_status(struct twl6040 *twl6040); + +static inline int twl6040_get_revid(struct twl6040 *twl6040) +{ + return twl6040->rev; +} + + +#endif /* End of __TWL6040_CODEC_H__ */ diff --git a/include/linux/mfd/ucb1x00.h b/include/linux/mfd/ucb1x00.h new file mode 100644 index 000000000..88f90cbf8 --- /dev/null +++ b/include/linux/mfd/ucb1x00.h @@ -0,0 +1,260 @@ +/* + * linux/include/mfd/ucb1x00.h + * + * Copyright (C) 2001 Russell King, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + */ +#ifndef UCB1200_H +#define UCB1200_H + +#include +#include +#include +#include + +#define UCB_IO_DATA 0x00 +#define UCB_IO_DIR 0x01 + +#define UCB_IO_0 (1 << 0) +#define UCB_IO_1 (1 << 1) +#define UCB_IO_2 (1 << 2) +#define UCB_IO_3 (1 << 3) +#define UCB_IO_4 (1 << 4) +#define UCB_IO_5 (1 << 5) +#define UCB_IO_6 (1 << 6) +#define UCB_IO_7 (1 << 7) +#define UCB_IO_8 (1 << 8) +#define UCB_IO_9 (1 << 9) + +#define UCB_IE_RIS 0x02 +#define UCB_IE_FAL 0x03 +#define UCB_IE_STATUS 0x04 +#define UCB_IE_CLEAR 0x04 +#define UCB_IE_ADC (1 << 11) +#define UCB_IE_TSPX (1 << 12) +#define UCB_IE_TSMX (1 << 13) +#define UCB_IE_TCLIP (1 << 14) +#define UCB_IE_ACLIP (1 << 15) + +#define UCB_IRQ_TSPX 12 + +#define UCB_TC_A 0x05 +#define UCB_TC_A_LOOP (1 << 7) /* UCB1200 */ +#define UCB_TC_A_AMPL (1 << 7) /* UCB1300 */ + +#define UCB_TC_B 0x06 +#define UCB_TC_B_VOICE_ENA (1 << 3) +#define UCB_TC_B_CLIP (1 << 4) +#define UCB_TC_B_ATT (1 << 6) +#define UCB_TC_B_SIDE_ENA (1 << 11) +#define UCB_TC_B_MUTE (1 << 13) +#define UCB_TC_B_IN_ENA (1 << 14) +#define UCB_TC_B_OUT_ENA (1 << 15) + +#define UCB_AC_A 0x07 +#define UCB_AC_B 0x08 +#define UCB_AC_B_LOOP (1 << 8) +#define UCB_AC_B_MUTE (1 << 13) +#define UCB_AC_B_IN_ENA (1 << 14) +#define UCB_AC_B_OUT_ENA (1 << 15) + +#define UCB_TS_CR 0x09 +#define UCB_TS_CR_TSMX_POW (1 << 0) +#define UCB_TS_CR_TSPX_POW (1 << 1) +#define UCB_TS_CR_TSMY_POW (1 << 2) +#define UCB_TS_CR_TSPY_POW (1 << 3) +#define UCB_TS_CR_TSMX_GND (1 << 4) +#define UCB_TS_CR_TSPX_GND (1 << 5) +#define UCB_TS_CR_TSMY_GND (1 << 6) +#define UCB_TS_CR_TSPY_GND (1 << 7) +#define UCB_TS_CR_MODE_INT (0 << 8) +#define UCB_TS_CR_MODE_PRES (1 << 8) +#define UCB_TS_CR_MODE_POS (2 << 8) +#define UCB_TS_CR_BIAS_ENA (1 << 11) +#define UCB_TS_CR_TSPX_LOW (1 << 12) +#define UCB_TS_CR_TSMX_LOW (1 << 13) + +#define UCB_ADC_CR 0x0a +#define UCB_ADC_SYNC_ENA (1 << 0) +#define UCB_ADC_VREFBYP_CON (1 << 1) +#define UCB_ADC_INP_TSPX (0 << 2) +#define UCB_ADC_INP_TSMX (1 << 2) +#define UCB_ADC_INP_TSPY (2 << 2) +#define UCB_ADC_INP_TSMY (3 << 2) +#define UCB_ADC_INP_AD0 (4 << 2) +#define UCB_ADC_INP_AD1 (5 << 2) +#define UCB_ADC_INP_AD2 (6 << 2) +#define UCB_ADC_INP_AD3 (7 << 2) +#define UCB_ADC_EXT_REF (1 << 5) +#define UCB_ADC_START (1 << 7) +#define UCB_ADC_ENA (1 << 15) + +#define UCB_ADC_DATA 0x0b +#define UCB_ADC_DAT_VAL (1 << 15) +#define UCB_ADC_DAT(x) (((x) & 0x7fe0) >> 5) + +#define UCB_ID 0x0c +#define UCB_ID_1200 0x1004 +#define UCB_ID_1300 0x1005 +#define UCB_ID_TC35143 0x9712 + +#define UCB_MODE 0x0d +#define UCB_MODE_DYN_VFLAG_ENA (1 << 12) +#define UCB_MODE_AUD_OFF_CAN (1 << 13) + +enum ucb1x00_reset { + UCB_RST_PROBE, + UCB_RST_RESUME, + UCB_RST_SUSPEND, + UCB_RST_REMOVE, + UCB_RST_PROBE_FAIL, +}; + +struct ucb1x00_plat_data { + void (*reset)(enum ucb1x00_reset); + unsigned irq_base; + int gpio_base; + unsigned can_wakeup; +}; + +struct ucb1x00 { + raw_spinlock_t irq_lock; + struct mcp *mcp; + unsigned int irq; + int irq_base; + struct mutex adc_mutex; + spinlock_t io_lock; + u16 id; + u16 io_dir; + u16 io_out; + u16 adc_cr; + u16 irq_fal_enbl; + u16 irq_ris_enbl; + u16 irq_mask; + u16 irq_wake; + struct device dev; + struct list_head node; + struct list_head devs; + struct gpio_chip gpio; +}; + +struct ucb1x00_driver; + +struct ucb1x00_dev { + struct list_head dev_node; + struct list_head drv_node; + struct ucb1x00 *ucb; + struct ucb1x00_driver *drv; + void *priv; +}; + +struct ucb1x00_driver { + struct list_head node; + struct list_head devs; + int (*add)(struct ucb1x00_dev *dev); + void (*remove)(struct ucb1x00_dev *dev); + int (*suspend)(struct ucb1x00_dev *dev); + int (*resume)(struct ucb1x00_dev *dev); +}; + +#define classdev_to_ucb1x00(cd) container_of(cd, struct ucb1x00, dev) + +int ucb1x00_register_driver(struct ucb1x00_driver *); +void ucb1x00_unregister_driver(struct ucb1x00_driver *); + +/** + * ucb1x00_clkrate - return the UCB1x00 SIB clock rate + * @ucb: UCB1x00 structure describing chip + * + * Return the SIB clock rate in Hz. + */ +static inline unsigned int ucb1x00_clkrate(struct ucb1x00 *ucb) +{ + return mcp_get_sclk_rate(ucb->mcp); +} + +/** + * ucb1x00_enable - enable the UCB1x00 SIB clock + * @ucb: UCB1x00 structure describing chip + * + * Enable the SIB clock. This can be called multiple times. + */ +static inline void ucb1x00_enable(struct ucb1x00 *ucb) +{ + mcp_enable(ucb->mcp); +} + +/** + * ucb1x00_disable - disable the UCB1x00 SIB clock + * @ucb: UCB1x00 structure describing chip + * + * Disable the SIB clock. The SIB clock will only be disabled + * when the number of ucb1x00_enable calls match the number of + * ucb1x00_disable calls. + */ +static inline void ucb1x00_disable(struct ucb1x00 *ucb) +{ + mcp_disable(ucb->mcp); +} + +/** + * ucb1x00_reg_write - write a UCB1x00 register + * @ucb: UCB1x00 structure describing chip + * @reg: UCB1x00 4-bit register index to write + * @val: UCB1x00 16-bit value to write + * + * Write the UCB1x00 register @reg with value @val. The SIB + * clock must be running for this function to return. + */ +static inline void ucb1x00_reg_write(struct ucb1x00 *ucb, unsigned int reg, unsigned int val) +{ + mcp_reg_write(ucb->mcp, reg, val); +} + +/** + * ucb1x00_reg_read - read a UCB1x00 register + * @ucb: UCB1x00 structure describing chip + * @reg: UCB1x00 4-bit register index to write + * + * Read the UCB1x00 register @reg and return its value. The SIB + * clock must be running for this function to return. + */ +static inline unsigned int ucb1x00_reg_read(struct ucb1x00 *ucb, unsigned int reg) +{ + return mcp_reg_read(ucb->mcp, reg); +} +/** + * ucb1x00_set_audio_divisor - + * @ucb: UCB1x00 structure describing chip + * @div: SIB clock divisor + */ +static inline void ucb1x00_set_audio_divisor(struct ucb1x00 *ucb, unsigned int div) +{ + mcp_set_audio_divisor(ucb->mcp, div); +} + +/** + * ucb1x00_set_telecom_divisor - + * @ucb: UCB1x00 structure describing chip + * @div: SIB clock divisor + */ +static inline void ucb1x00_set_telecom_divisor(struct ucb1x00 *ucb, unsigned int div) +{ + mcp_set_telecom_divisor(ucb->mcp, div); +} + +void ucb1x00_io_set_dir(struct ucb1x00 *ucb, unsigned int, unsigned int); +void ucb1x00_io_write(struct ucb1x00 *ucb, unsigned int, unsigned int); +unsigned int ucb1x00_io_read(struct ucb1x00 *ucb); + +#define UCB_NOSYNC (0) +#define UCB_SYNC (1) + +unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync); +void ucb1x00_adc_enable(struct ucb1x00 *ucb); +void ucb1x00_adc_disable(struct ucb1x00 *ucb); + +#endif diff --git a/include/linux/mfd/viperboard.h b/include/linux/mfd/viperboard.h new file mode 100644 index 000000000..193452848 --- /dev/null +++ b/include/linux/mfd/viperboard.h @@ -0,0 +1,110 @@ +/* + * include/linux/mfd/viperboard.h + * + * Nano River Technologies viperboard definitions + * + * (C) 2012 by Lemonage GmbH + * Author: Lars Poeschel + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_VIPERBOARD_H__ +#define __MFD_VIPERBOARD_H__ + +#include +#include + +#define VPRBRD_EP_OUT 0x02 +#define VPRBRD_EP_IN 0x86 + +#define VPRBRD_I2C_MSG_LEN 512 /* max length of a msg on USB level */ + +#define VPRBRD_I2C_FREQ_6MHZ 1 /* 6 MBit/s */ +#define VPRBRD_I2C_FREQ_3MHZ 2 /* 3 MBit/s */ +#define VPRBRD_I2C_FREQ_1MHZ 3 /* 1 MBit/s */ +#define VPRBRD_I2C_FREQ_FAST 4 /* 400 kbit/s */ +#define VPRBRD_I2C_FREQ_400KHZ VPRBRD_I2C_FREQ_FAST +#define VPRBRD_I2C_FREQ_200KHZ 5 /* 200 kbit/s */ +#define VPRBRD_I2C_FREQ_STD 6 /* 100 kbit/s */ +#define VPRBRD_I2C_FREQ_100KHZ VPRBRD_I2C_FREQ_STD +#define VPRBRD_I2C_FREQ_10KHZ 7 /* 10 kbit/s */ + +#define VPRBRD_I2C_CMD_WRITE 0x00 +#define VPRBRD_I2C_CMD_READ 0x01 +#define VPRBRD_I2C_CMD_ADDR 0x02 + +#define VPRBRD_USB_TYPE_OUT 0x40 +#define VPRBRD_USB_TYPE_IN 0xc0 +#define VPRBRD_USB_TIMEOUT_MS 100 +#define VPRBRD_USB_REQUEST_I2C_FREQ 0xe6 +#define VPRBRD_USB_REQUEST_I2C 0xe9 +#define VPRBRD_USB_REQUEST_MAJOR 0xea +#define VPRBRD_USB_REQUEST_MINOR 0xeb +#define VPRBRD_USB_REQUEST_ADC 0xec +#define VPRBRD_USB_REQUEST_GPIOA 0xed +#define VPRBRD_USB_REQUEST_GPIOB 0xdd + +struct vprbrd_i2c_write_hdr { + u8 cmd; + u16 addr; + u8 len1; + u8 len2; + u8 last; + u8 chan; + u16 spi; +} __packed; + +struct vprbrd_i2c_read_hdr { + u8 cmd; + u16 addr; + u8 len0; + u8 len1; + u8 len2; + u8 len3; + u8 len4; + u8 len5; + u16 tf1; /* transfer 1 length */ + u16 tf2; /* transfer 2 length */ +} __packed; + +struct vprbrd_i2c_status { + u8 unknown[11]; + u8 status; +} __packed; + +struct vprbrd_i2c_write_msg { + struct vprbrd_i2c_write_hdr header; + u8 data[VPRBRD_I2C_MSG_LEN + - sizeof(struct vprbrd_i2c_write_hdr)]; +} __packed; + +struct vprbrd_i2c_read_msg { + struct vprbrd_i2c_read_hdr header; + u8 data[VPRBRD_I2C_MSG_LEN + - sizeof(struct vprbrd_i2c_read_hdr)]; +} __packed; + +struct vprbrd_i2c_addr_msg { + u8 cmd; + u8 addr; + u8 unknown1; + u16 len; + u8 unknown2; + u8 unknown3; +} __packed; + +/* Structure to hold all device specific stuff */ +struct vprbrd { + struct usb_device *usb_dev; /* the usb device for this device */ + struct mutex lock; + u8 buf[sizeof(struct vprbrd_i2c_write_msg)]; + struct platform_device pdev; +}; + +#endif /* __MFD_VIPERBOARD_H__ */ diff --git a/include/linux/mfd/wl1273-core.h b/include/linux/mfd/wl1273-core.h new file mode 100644 index 000000000..db2f3f454 --- /dev/null +++ b/include/linux/mfd/wl1273-core.h @@ -0,0 +1,290 @@ +/* + * include/linux/mfd/wl1273-core.h + * + * Some definitions for the wl1273 radio receiver/transmitter chip. + * + * Copyright (C) 2010 Nokia Corporation + * Author: Matti J. Aaltonen + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef WL1273_CORE_H +#define WL1273_CORE_H + +#include +#include + +#define WL1273_FM_DRIVER_NAME "wl1273-fm" +#define RX71_FM_I2C_ADDR 0x22 + +#define WL1273_STEREO_GET 0 +#define WL1273_RSSI_LVL_GET 1 +#define WL1273_IF_COUNT_GET 2 +#define WL1273_FLAG_GET 3 +#define WL1273_RDS_SYNC_GET 4 +#define WL1273_RDS_DATA_GET 5 +#define WL1273_FREQ_SET 10 +#define WL1273_AF_FREQ_SET 11 +#define WL1273_MOST_MODE_SET 12 +#define WL1273_MOST_BLEND_SET 13 +#define WL1273_DEMPH_MODE_SET 14 +#define WL1273_SEARCH_LVL_SET 15 +#define WL1273_BAND_SET 16 +#define WL1273_MUTE_STATUS_SET 17 +#define WL1273_RDS_PAUSE_LVL_SET 18 +#define WL1273_RDS_PAUSE_DUR_SET 19 +#define WL1273_RDS_MEM_SET 20 +#define WL1273_RDS_BLK_B_SET 21 +#define WL1273_RDS_MSK_B_SET 22 +#define WL1273_RDS_PI_MASK_SET 23 +#define WL1273_RDS_PI_SET 24 +#define WL1273_RDS_SYSTEM_SET 25 +#define WL1273_INT_MASK_SET 26 +#define WL1273_SEARCH_DIR_SET 27 +#define WL1273_VOLUME_SET 28 +#define WL1273_AUDIO_ENABLE 29 +#define WL1273_PCM_MODE_SET 30 +#define WL1273_I2S_MODE_CONFIG_SET 31 +#define WL1273_POWER_SET 32 +#define WL1273_INTX_CONFIG_SET 33 +#define WL1273_PULL_EN_SET 34 +#define WL1273_HILO_SET 35 +#define WL1273_SWITCH2FREF 36 +#define WL1273_FREQ_DRIFT_REPORT 37 + +#define WL1273_PCE_GET 40 +#define WL1273_FIRM_VER_GET 41 +#define WL1273_ASIC_VER_GET 42 +#define WL1273_ASIC_ID_GET 43 +#define WL1273_MAN_ID_GET 44 +#define WL1273_TUNER_MODE_SET 45 +#define WL1273_STOP_SEARCH 46 +#define WL1273_RDS_CNTRL_SET 47 + +#define WL1273_WRITE_HARDWARE_REG 100 +#define WL1273_CODE_DOWNLOAD 101 +#define WL1273_RESET 102 + +#define WL1273_FM_POWER_MODE 254 +#define WL1273_FM_INTERRUPT 255 + +/* Transmitter API */ + +#define WL1273_CHANL_SET 55 +#define WL1273_SCAN_SPACING_SET 56 +#define WL1273_REF_SET 57 +#define WL1273_POWER_ENB_SET 90 +#define WL1273_POWER_ATT_SET 58 +#define WL1273_POWER_LEV_SET 59 +#define WL1273_AUDIO_DEV_SET 60 +#define WL1273_PILOT_DEV_SET 61 +#define WL1273_RDS_DEV_SET 62 +#define WL1273_PUPD_SET 91 +#define WL1273_AUDIO_IO_SET 63 +#define WL1273_PREMPH_SET 64 +#define WL1273_MONO_SET 66 +#define WL1273_MUTE 92 +#define WL1273_MPX_LMT_ENABLE 67 +#define WL1273_PI_SET 93 +#define WL1273_ECC_SET 69 +#define WL1273_PTY 70 +#define WL1273_AF 71 +#define WL1273_DISPLAY_MODE 74 +#define WL1273_RDS_REP_SET 77 +#define WL1273_RDS_CONFIG_DATA_SET 98 +#define WL1273_RDS_DATA_SET 99 +#define WL1273_RDS_DATA_ENB 94 +#define WL1273_TA_SET 78 +#define WL1273_TP_SET 79 +#define WL1273_DI_SET 80 +#define WL1273_MS_SET 81 +#define WL1273_PS_SCROLL_SPEED 82 +#define WL1273_TX_AUDIO_LEVEL_TEST 96 +#define WL1273_TX_AUDIO_LEVEL_TEST_THRESHOLD 73 +#define WL1273_TX_AUDIO_INPUT_LEVEL_RANGE_SET 54 +#define WL1273_RX_ANTENNA_SELECT 87 +#define WL1273_I2C_DEV_ADDR_SET 86 +#define WL1273_REF_ERR_CALIB_PARAM_SET 88 +#define WL1273_REF_ERR_CALIB_PERIODICITY_SET 89 +#define WL1273_SOC_INT_TRIGGER 52 +#define WL1273_SOC_AUDIO_PATH_SET 83 +#define WL1273_SOC_PCMI_OVERRIDE 84 +#define WL1273_SOC_I2S_OVERRIDE 85 +#define WL1273_RSSI_BLOCK_SCAN_FREQ_SET 95 +#define WL1273_RSSI_BLOCK_SCAN_START 97 +#define WL1273_RSSI_BLOCK_SCAN_DATA_GET 5 +#define WL1273_READ_FMANT_TUNE_VALUE 104 + +#define WL1273_RDS_OFF 0 +#define WL1273_RDS_ON 1 +#define WL1273_RDS_RESET 2 + +#define WL1273_AUDIO_DIGITAL 0 +#define WL1273_AUDIO_ANALOG 1 + +#define WL1273_MODE_RX BIT(0) +#define WL1273_MODE_TX BIT(1) +#define WL1273_MODE_OFF BIT(2) +#define WL1273_MODE_SUSPENDED BIT(3) + +#define WL1273_RADIO_CHILD BIT(0) +#define WL1273_CODEC_CHILD BIT(1) + +#define WL1273_RX_MONO 1 +#define WL1273_RX_STEREO 0 +#define WL1273_TX_MONO 0 +#define WL1273_TX_STEREO 1 + +#define WL1273_MAX_VOLUME 0xffff +#define WL1273_DEFAULT_VOLUME 0x78b8 + +/* I2S protocol, left channel first, data width 16 bits */ +#define WL1273_PCM_DEF_MODE 0x00 + +/* Rx */ +#define WL1273_AUDIO_ENABLE_I2S BIT(0) +#define WL1273_AUDIO_ENABLE_ANALOG BIT(1) + +/* Tx */ +#define WL1273_AUDIO_IO_SET_ANALOG 0 +#define WL1273_AUDIO_IO_SET_I2S 1 + +#define WL1273_PUPD_SET_OFF 0x00 +#define WL1273_PUPD_SET_ON 0x01 +#define WL1273_PUPD_SET_RETENTION 0x10 + +/* I2S mode */ +#define WL1273_IS2_WIDTH_32 0x0 +#define WL1273_IS2_WIDTH_40 0x1 +#define WL1273_IS2_WIDTH_22_23 0x2 +#define WL1273_IS2_WIDTH_23_22 0x3 +#define WL1273_IS2_WIDTH_48 0x4 +#define WL1273_IS2_WIDTH_50 0x5 +#define WL1273_IS2_WIDTH_60 0x6 +#define WL1273_IS2_WIDTH_64 0x7 +#define WL1273_IS2_WIDTH_80 0x8 +#define WL1273_IS2_WIDTH_96 0x9 +#define WL1273_IS2_WIDTH_128 0xa +#define WL1273_IS2_WIDTH 0xf + +#define WL1273_IS2_FORMAT_STD (0x0 << 4) +#define WL1273_IS2_FORMAT_LEFT (0x1 << 4) +#define WL1273_IS2_FORMAT_RIGHT (0x2 << 4) +#define WL1273_IS2_FORMAT_USER (0x3 << 4) + +#define WL1273_IS2_MASTER (0x0 << 6) +#define WL1273_IS2_SLAVEW (0x1 << 6) + +#define WL1273_IS2_TRI_AFTER_SENDING (0x0 << 7) +#define WL1273_IS2_TRI_ALWAYS_ACTIVE (0x1 << 7) + +#define WL1273_IS2_SDOWS_RR (0x0 << 8) +#define WL1273_IS2_SDOWS_RF (0x1 << 8) +#define WL1273_IS2_SDOWS_FR (0x2 << 8) +#define WL1273_IS2_SDOWS_FF (0x3 << 8) + +#define WL1273_IS2_TRI_OPT (0x0 << 10) +#define WL1273_IS2_TRI_ALWAYS (0x1 << 10) + +#define WL1273_IS2_RATE_48K (0x0 << 12) +#define WL1273_IS2_RATE_44_1K (0x1 << 12) +#define WL1273_IS2_RATE_32K (0x2 << 12) +#define WL1273_IS2_RATE_22_05K (0x4 << 12) +#define WL1273_IS2_RATE_16K (0x5 << 12) +#define WL1273_IS2_RATE_12K (0x8 << 12) +#define WL1273_IS2_RATE_11_025 (0x9 << 12) +#define WL1273_IS2_RATE_8K (0xa << 12) +#define WL1273_IS2_RATE (0xf << 12) + +#define WL1273_I2S_DEF_MODE (WL1273_IS2_WIDTH_32 | \ + WL1273_IS2_FORMAT_STD | \ + WL1273_IS2_MASTER | \ + WL1273_IS2_TRI_AFTER_SENDING | \ + WL1273_IS2_SDOWS_RR | \ + WL1273_IS2_TRI_OPT | \ + WL1273_IS2_RATE_48K) + +#define SCHAR_MIN (-128) +#define SCHAR_MAX 127 + +#define WL1273_FR_EVENT BIT(0) +#define WL1273_BL_EVENT BIT(1) +#define WL1273_RDS_EVENT BIT(2) +#define WL1273_BBLK_EVENT BIT(3) +#define WL1273_LSYNC_EVENT BIT(4) +#define WL1273_LEV_EVENT BIT(5) +#define WL1273_IFFR_EVENT BIT(6) +#define WL1273_PI_EVENT BIT(7) +#define WL1273_PD_EVENT BIT(8) +#define WL1273_STIC_EVENT BIT(9) +#define WL1273_MAL_EVENT BIT(10) +#define WL1273_POW_ENB_EVENT BIT(11) +#define WL1273_SCAN_OVER_EVENT BIT(12) +#define WL1273_ERROR_EVENT BIT(13) + +#define TUNER_MODE_STOP_SEARCH 0 +#define TUNER_MODE_PRESET 1 +#define TUNER_MODE_AUTO_SEEK 2 +#define TUNER_MODE_AF 3 +#define TUNER_MODE_AUTO_SEEK_PI 4 +#define TUNER_MODE_AUTO_SEEK_BULK 5 + +#define RDS_BLOCK_SIZE 3 + +struct wl1273_fm_platform_data { + int (*request_resources) (struct i2c_client *client); + void (*free_resources) (void); + void (*enable) (void); + void (*disable) (void); + + u8 forbidden_modes; + unsigned int children; +}; + +#define WL1273_FM_CORE_CELLS 2 + +#define WL1273_BAND_OTHER 0 +#define WL1273_BAND_JAPAN 1 + +#define WL1273_BAND_JAPAN_LOW 76000 +#define WL1273_BAND_JAPAN_HIGH 90000 +#define WL1273_BAND_OTHER_LOW 87500 +#define WL1273_BAND_OTHER_HIGH 108000 + +#define WL1273_BAND_TX_LOW 76000 +#define WL1273_BAND_TX_HIGH 108000 + +struct wl1273_core { + struct mfd_cell cells[WL1273_FM_CORE_CELLS]; + struct wl1273_fm_platform_data *pdata; + + unsigned int mode; + unsigned int i2s_mode; + unsigned int volume; + unsigned int audio_mode; + unsigned int channel_number; + struct mutex lock; /* for serializing fm radio operations */ + + struct i2c_client *client; + + int (*read)(struct wl1273_core *core, u8, u16 *); + int (*write)(struct wl1273_core *core, u8, u16); + int (*write_data)(struct wl1273_core *core, u8 *, u16); + int (*set_audio)(struct wl1273_core *core, unsigned int); + int (*set_volume)(struct wl1273_core *core, unsigned int); +}; + +#endif /* ifndef WL1273_CORE_H */ diff --git a/include/linux/mfd/wm831x/auxadc.h b/include/linux/mfd/wm831x/auxadc.h new file mode 100644 index 000000000..867aa23f9 --- /dev/null +++ b/include/linux/mfd/wm831x/auxadc.h @@ -0,0 +1,218 @@ +/* + * include/linux/mfd/wm831x/auxadc.h -- Auxiliary ADC interface for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_AUXADC_H__ +#define __MFD_WM831X_AUXADC_H__ + +struct wm831x; + +/* + * R16429 (0x402D) - AuxADC Data + */ +#define WM831X_AUX_DATA_SRC_MASK 0xF000 /* AUX_DATA_SRC - [15:12] */ +#define WM831X_AUX_DATA_SRC_SHIFT 12 /* AUX_DATA_SRC - [15:12] */ +#define WM831X_AUX_DATA_SRC_WIDTH 4 /* AUX_DATA_SRC - [15:12] */ +#define WM831X_AUX_DATA_MASK 0x0FFF /* AUX_DATA - [11:0] */ +#define WM831X_AUX_DATA_SHIFT 0 /* AUX_DATA - [11:0] */ +#define WM831X_AUX_DATA_WIDTH 12 /* AUX_DATA - [11:0] */ + +/* + * R16430 (0x402E) - AuxADC Control + */ +#define WM831X_AUX_ENA 0x8000 /* AUX_ENA */ +#define WM831X_AUX_ENA_MASK 0x8000 /* AUX_ENA */ +#define WM831X_AUX_ENA_SHIFT 15 /* AUX_ENA */ +#define WM831X_AUX_ENA_WIDTH 1 /* AUX_ENA */ +#define WM831X_AUX_CVT_ENA 0x4000 /* AUX_CVT_ENA */ +#define WM831X_AUX_CVT_ENA_MASK 0x4000 /* AUX_CVT_ENA */ +#define WM831X_AUX_CVT_ENA_SHIFT 14 /* AUX_CVT_ENA */ +#define WM831X_AUX_CVT_ENA_WIDTH 1 /* AUX_CVT_ENA */ +#define WM831X_AUX_SLPENA 0x1000 /* AUX_SLPENA */ +#define WM831X_AUX_SLPENA_MASK 0x1000 /* AUX_SLPENA */ +#define WM831X_AUX_SLPENA_SHIFT 12 /* AUX_SLPENA */ +#define WM831X_AUX_SLPENA_WIDTH 1 /* AUX_SLPENA */ +#define WM831X_AUX_FRC_ENA 0x0800 /* AUX_FRC_ENA */ +#define WM831X_AUX_FRC_ENA_MASK 0x0800 /* AUX_FRC_ENA */ +#define WM831X_AUX_FRC_ENA_SHIFT 11 /* AUX_FRC_ENA */ +#define WM831X_AUX_FRC_ENA_WIDTH 1 /* AUX_FRC_ENA */ +#define WM831X_AUX_RATE_MASK 0x003F /* AUX_RATE - [5:0] */ +#define WM831X_AUX_RATE_SHIFT 0 /* AUX_RATE - [5:0] */ +#define WM831X_AUX_RATE_WIDTH 6 /* AUX_RATE - [5:0] */ + +/* + * R16431 (0x402F) - AuxADC Source + */ +#define WM831X_AUX_CAL_SEL 0x8000 /* AUX_CAL_SEL */ +#define WM831X_AUX_CAL_SEL_MASK 0x8000 /* AUX_CAL_SEL */ +#define WM831X_AUX_CAL_SEL_SHIFT 15 /* AUX_CAL_SEL */ +#define WM831X_AUX_CAL_SEL_WIDTH 1 /* AUX_CAL_SEL */ +#define WM831X_AUX_BKUP_BATT_SEL 0x0400 /* AUX_BKUP_BATT_SEL */ +#define WM831X_AUX_BKUP_BATT_SEL_MASK 0x0400 /* AUX_BKUP_BATT_SEL */ +#define WM831X_AUX_BKUP_BATT_SEL_SHIFT 10 /* AUX_BKUP_BATT_SEL */ +#define WM831X_AUX_BKUP_BATT_SEL_WIDTH 1 /* AUX_BKUP_BATT_SEL */ +#define WM831X_AUX_WALL_SEL 0x0200 /* AUX_WALL_SEL */ +#define WM831X_AUX_WALL_SEL_MASK 0x0200 /* AUX_WALL_SEL */ +#define WM831X_AUX_WALL_SEL_SHIFT 9 /* AUX_WALL_SEL */ +#define WM831X_AUX_WALL_SEL_WIDTH 1 /* AUX_WALL_SEL */ +#define WM831X_AUX_BATT_SEL 0x0100 /* AUX_BATT_SEL */ +#define WM831X_AUX_BATT_SEL_MASK 0x0100 /* AUX_BATT_SEL */ +#define WM831X_AUX_BATT_SEL_SHIFT 8 /* AUX_BATT_SEL */ +#define WM831X_AUX_BATT_SEL_WIDTH 1 /* AUX_BATT_SEL */ +#define WM831X_AUX_USB_SEL 0x0080 /* AUX_USB_SEL */ +#define WM831X_AUX_USB_SEL_MASK 0x0080 /* AUX_USB_SEL */ +#define WM831X_AUX_USB_SEL_SHIFT 7 /* AUX_USB_SEL */ +#define WM831X_AUX_USB_SEL_WIDTH 1 /* AUX_USB_SEL */ +#define WM831X_AUX_SYSVDD_SEL 0x0040 /* AUX_SYSVDD_SEL */ +#define WM831X_AUX_SYSVDD_SEL_MASK 0x0040 /* AUX_SYSVDD_SEL */ +#define WM831X_AUX_SYSVDD_SEL_SHIFT 6 /* AUX_SYSVDD_SEL */ +#define WM831X_AUX_SYSVDD_SEL_WIDTH 1 /* AUX_SYSVDD_SEL */ +#define WM831X_AUX_BATT_TEMP_SEL 0x0020 /* AUX_BATT_TEMP_SEL */ +#define WM831X_AUX_BATT_TEMP_SEL_MASK 0x0020 /* AUX_BATT_TEMP_SEL */ +#define WM831X_AUX_BATT_TEMP_SEL_SHIFT 5 /* AUX_BATT_TEMP_SEL */ +#define WM831X_AUX_BATT_TEMP_SEL_WIDTH 1 /* AUX_BATT_TEMP_SEL */ +#define WM831X_AUX_CHIP_TEMP_SEL 0x0010 /* AUX_CHIP_TEMP_SEL */ +#define WM831X_AUX_CHIP_TEMP_SEL_MASK 0x0010 /* AUX_CHIP_TEMP_SEL */ +#define WM831X_AUX_CHIP_TEMP_SEL_SHIFT 4 /* AUX_CHIP_TEMP_SEL */ +#define WM831X_AUX_CHIP_TEMP_SEL_WIDTH 1 /* AUX_CHIP_TEMP_SEL */ +#define WM831X_AUX_AUX4_SEL 0x0008 /* AUX_AUX4_SEL */ +#define WM831X_AUX_AUX4_SEL_MASK 0x0008 /* AUX_AUX4_SEL */ +#define WM831X_AUX_AUX4_SEL_SHIFT 3 /* AUX_AUX4_SEL */ +#define WM831X_AUX_AUX4_SEL_WIDTH 1 /* AUX_AUX4_SEL */ +#define WM831X_AUX_AUX3_SEL 0x0004 /* AUX_AUX3_SEL */ +#define WM831X_AUX_AUX3_SEL_MASK 0x0004 /* AUX_AUX3_SEL */ +#define WM831X_AUX_AUX3_SEL_SHIFT 2 /* AUX_AUX3_SEL */ +#define WM831X_AUX_AUX3_SEL_WIDTH 1 /* AUX_AUX3_SEL */ +#define WM831X_AUX_AUX2_SEL 0x0002 /* AUX_AUX2_SEL */ +#define WM831X_AUX_AUX2_SEL_MASK 0x0002 /* AUX_AUX2_SEL */ +#define WM831X_AUX_AUX2_SEL_SHIFT 1 /* AUX_AUX2_SEL */ +#define WM831X_AUX_AUX2_SEL_WIDTH 1 /* AUX_AUX2_SEL */ +#define WM831X_AUX_AUX1_SEL 0x0001 /* AUX_AUX1_SEL */ +#define WM831X_AUX_AUX1_SEL_MASK 0x0001 /* AUX_AUX1_SEL */ +#define WM831X_AUX_AUX1_SEL_SHIFT 0 /* AUX_AUX1_SEL */ +#define WM831X_AUX_AUX1_SEL_WIDTH 1 /* AUX_AUX1_SEL */ + +/* + * R16432 (0x4030) - Comparator Control + */ +#define WM831X_DCOMP4_STS 0x0800 /* DCOMP4_STS */ +#define WM831X_DCOMP4_STS_MASK 0x0800 /* DCOMP4_STS */ +#define WM831X_DCOMP4_STS_SHIFT 11 /* DCOMP4_STS */ +#define WM831X_DCOMP4_STS_WIDTH 1 /* DCOMP4_STS */ +#define WM831X_DCOMP3_STS 0x0400 /* DCOMP3_STS */ +#define WM831X_DCOMP3_STS_MASK 0x0400 /* DCOMP3_STS */ +#define WM831X_DCOMP3_STS_SHIFT 10 /* DCOMP3_STS */ +#define WM831X_DCOMP3_STS_WIDTH 1 /* DCOMP3_STS */ +#define WM831X_DCOMP2_STS 0x0200 /* DCOMP2_STS */ +#define WM831X_DCOMP2_STS_MASK 0x0200 /* DCOMP2_STS */ +#define WM831X_DCOMP2_STS_SHIFT 9 /* DCOMP2_STS */ +#define WM831X_DCOMP2_STS_WIDTH 1 /* DCOMP2_STS */ +#define WM831X_DCOMP1_STS 0x0100 /* DCOMP1_STS */ +#define WM831X_DCOMP1_STS_MASK 0x0100 /* DCOMP1_STS */ +#define WM831X_DCOMP1_STS_SHIFT 8 /* DCOMP1_STS */ +#define WM831X_DCOMP1_STS_WIDTH 1 /* DCOMP1_STS */ +#define WM831X_DCMP4_ENA 0x0008 /* DCMP4_ENA */ +#define WM831X_DCMP4_ENA_MASK 0x0008 /* DCMP4_ENA */ +#define WM831X_DCMP4_ENA_SHIFT 3 /* DCMP4_ENA */ +#define WM831X_DCMP4_ENA_WIDTH 1 /* DCMP4_ENA */ +#define WM831X_DCMP3_ENA 0x0004 /* DCMP3_ENA */ +#define WM831X_DCMP3_ENA_MASK 0x0004 /* DCMP3_ENA */ +#define WM831X_DCMP3_ENA_SHIFT 2 /* DCMP3_ENA */ +#define WM831X_DCMP3_ENA_WIDTH 1 /* DCMP3_ENA */ +#define WM831X_DCMP2_ENA 0x0002 /* DCMP2_ENA */ +#define WM831X_DCMP2_ENA_MASK 0x0002 /* DCMP2_ENA */ +#define WM831X_DCMP2_ENA_SHIFT 1 /* DCMP2_ENA */ +#define WM831X_DCMP2_ENA_WIDTH 1 /* DCMP2_ENA */ +#define WM831X_DCMP1_ENA 0x0001 /* DCMP1_ENA */ +#define WM831X_DCMP1_ENA_MASK 0x0001 /* DCMP1_ENA */ +#define WM831X_DCMP1_ENA_SHIFT 0 /* DCMP1_ENA */ +#define WM831X_DCMP1_ENA_WIDTH 1 /* DCMP1_ENA */ + +/* + * R16433 (0x4031) - Comparator 1 + */ +#define WM831X_DCMP1_SRC_MASK 0xE000 /* DCMP1_SRC - [15:13] */ +#define WM831X_DCMP1_SRC_SHIFT 13 /* DCMP1_SRC - [15:13] */ +#define WM831X_DCMP1_SRC_WIDTH 3 /* DCMP1_SRC - [15:13] */ +#define WM831X_DCMP1_GT 0x1000 /* DCMP1_GT */ +#define WM831X_DCMP1_GT_MASK 0x1000 /* DCMP1_GT */ +#define WM831X_DCMP1_GT_SHIFT 12 /* DCMP1_GT */ +#define WM831X_DCMP1_GT_WIDTH 1 /* DCMP1_GT */ +#define WM831X_DCMP1_THR_MASK 0x0FFF /* DCMP1_THR - [11:0] */ +#define WM831X_DCMP1_THR_SHIFT 0 /* DCMP1_THR - [11:0] */ +#define WM831X_DCMP1_THR_WIDTH 12 /* DCMP1_THR - [11:0] */ + +/* + * R16434 (0x4032) - Comparator 2 + */ +#define WM831X_DCMP2_SRC_MASK 0xE000 /* DCMP2_SRC - [15:13] */ +#define WM831X_DCMP2_SRC_SHIFT 13 /* DCMP2_SRC - [15:13] */ +#define WM831X_DCMP2_SRC_WIDTH 3 /* DCMP2_SRC - [15:13] */ +#define WM831X_DCMP2_GT 0x1000 /* DCMP2_GT */ +#define WM831X_DCMP2_GT_MASK 0x1000 /* DCMP2_GT */ +#define WM831X_DCMP2_GT_SHIFT 12 /* DCMP2_GT */ +#define WM831X_DCMP2_GT_WIDTH 1 /* DCMP2_GT */ +#define WM831X_DCMP2_THR_MASK 0x0FFF /* DCMP2_THR - [11:0] */ +#define WM831X_DCMP2_THR_SHIFT 0 /* DCMP2_THR - [11:0] */ +#define WM831X_DCMP2_THR_WIDTH 12 /* DCMP2_THR - [11:0] */ + +/* + * R16435 (0x4033) - Comparator 3 + */ +#define WM831X_DCMP3_SRC_MASK 0xE000 /* DCMP3_SRC - [15:13] */ +#define WM831X_DCMP3_SRC_SHIFT 13 /* DCMP3_SRC - [15:13] */ +#define WM831X_DCMP3_SRC_WIDTH 3 /* DCMP3_SRC - [15:13] */ +#define WM831X_DCMP3_GT 0x1000 /* DCMP3_GT */ +#define WM831X_DCMP3_GT_MASK 0x1000 /* DCMP3_GT */ +#define WM831X_DCMP3_GT_SHIFT 12 /* DCMP3_GT */ +#define WM831X_DCMP3_GT_WIDTH 1 /* DCMP3_GT */ +#define WM831X_DCMP3_THR_MASK 0x0FFF /* DCMP3_THR - [11:0] */ +#define WM831X_DCMP3_THR_SHIFT 0 /* DCMP3_THR - [11:0] */ +#define WM831X_DCMP3_THR_WIDTH 12 /* DCMP3_THR - [11:0] */ + +/* + * R16436 (0x4034) - Comparator 4 + */ +#define WM831X_DCMP4_SRC_MASK 0xE000 /* DCMP4_SRC - [15:13] */ +#define WM831X_DCMP4_SRC_SHIFT 13 /* DCMP4_SRC - [15:13] */ +#define WM831X_DCMP4_SRC_WIDTH 3 /* DCMP4_SRC - [15:13] */ +#define WM831X_DCMP4_GT 0x1000 /* DCMP4_GT */ +#define WM831X_DCMP4_GT_MASK 0x1000 /* DCMP4_GT */ +#define WM831X_DCMP4_GT_SHIFT 12 /* DCMP4_GT */ +#define WM831X_DCMP4_GT_WIDTH 1 /* DCMP4_GT */ +#define WM831X_DCMP4_THR_MASK 0x0FFF /* DCMP4_THR - [11:0] */ +#define WM831X_DCMP4_THR_SHIFT 0 /* DCMP4_THR - [11:0] */ +#define WM831X_DCMP4_THR_WIDTH 12 /* DCMP4_THR - [11:0] */ + +#define WM831X_AUX_CAL_FACTOR 0xfff +#define WM831X_AUX_CAL_NOMINAL 0x222 + +enum wm831x_auxadc { + WM831X_AUX_CAL = 15, + WM831X_AUX_BKUP_BATT = 10, + WM831X_AUX_WALL = 9, + WM831X_AUX_BATT = 8, + WM831X_AUX_USB = 7, + WM831X_AUX_SYSVDD = 6, + WM831X_AUX_BATT_TEMP = 5, + WM831X_AUX_CHIP_TEMP = 4, + WM831X_AUX_AUX4 = 3, + WM831X_AUX_AUX3 = 2, + WM831X_AUX_AUX2 = 1, + WM831X_AUX_AUX1 = 0, +}; + +int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input); +int wm831x_auxadc_read_uv(struct wm831x *wm831x, enum wm831x_auxadc input); + +#endif diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h new file mode 100644 index 000000000..b49fa6761 --- /dev/null +++ b/include/linux/mfd/wm831x/core.h @@ -0,0 +1,437 @@ +/* + * include/linux/mfd/wm831x/core.h -- Core interface for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_CORE_H__ +#define __MFD_WM831X_CORE_H__ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Register values. + */ +#define WM831X_RESET_ID 0x00 +#define WM831X_REVISION 0x01 +#define WM831X_PARENT_ID 0x4000 +#define WM831X_SYSVDD_CONTROL 0x4001 +#define WM831X_THERMAL_MONITORING 0x4002 +#define WM831X_POWER_STATE 0x4003 +#define WM831X_WATCHDOG 0x4004 +#define WM831X_ON_PIN_CONTROL 0x4005 +#define WM831X_RESET_CONTROL 0x4006 +#define WM831X_CONTROL_INTERFACE 0x4007 +#define WM831X_SECURITY_KEY 0x4008 +#define WM831X_SOFTWARE_SCRATCH 0x4009 +#define WM831X_OTP_CONTROL 0x400A +#define WM831X_GPIO_LEVEL 0x400C +#define WM831X_SYSTEM_STATUS 0x400D +#define WM831X_ON_SOURCE 0x400E +#define WM831X_OFF_SOURCE 0x400F +#define WM831X_SYSTEM_INTERRUPTS 0x4010 +#define WM831X_INTERRUPT_STATUS_1 0x4011 +#define WM831X_INTERRUPT_STATUS_2 0x4012 +#define WM831X_INTERRUPT_STATUS_3 0x4013 +#define WM831X_INTERRUPT_STATUS_4 0x4014 +#define WM831X_INTERRUPT_STATUS_5 0x4015 +#define WM831X_IRQ_CONFIG 0x4017 +#define WM831X_SYSTEM_INTERRUPTS_MASK 0x4018 +#define WM831X_INTERRUPT_STATUS_1_MASK 0x4019 +#define WM831X_INTERRUPT_STATUS_2_MASK 0x401A +#define WM831X_INTERRUPT_STATUS_3_MASK 0x401B +#define WM831X_INTERRUPT_STATUS_4_MASK 0x401C +#define WM831X_INTERRUPT_STATUS_5_MASK 0x401D +#define WM831X_RTC_WRITE_COUNTER 0x4020 +#define WM831X_RTC_TIME_1 0x4021 +#define WM831X_RTC_TIME_2 0x4022 +#define WM831X_RTC_ALARM_1 0x4023 +#define WM831X_RTC_ALARM_2 0x4024 +#define WM831X_RTC_CONTROL 0x4025 +#define WM831X_RTC_TRIM 0x4026 +#define WM831X_TOUCH_CONTROL_1 0x4028 +#define WM831X_TOUCH_CONTROL_2 0x4029 +#define WM831X_TOUCH_DATA_X 0x402A +#define WM831X_TOUCH_DATA_Y 0x402B +#define WM831X_TOUCH_DATA_Z 0x402C +#define WM831X_AUXADC_DATA 0x402D +#define WM831X_AUXADC_CONTROL 0x402E +#define WM831X_AUXADC_SOURCE 0x402F +#define WM831X_COMPARATOR_CONTROL 0x4030 +#define WM831X_COMPARATOR_1 0x4031 +#define WM831X_COMPARATOR_2 0x4032 +#define WM831X_COMPARATOR_3 0x4033 +#define WM831X_COMPARATOR_4 0x4034 +#define WM831X_GPIO1_CONTROL 0x4038 +#define WM831X_GPIO2_CONTROL 0x4039 +#define WM831X_GPIO3_CONTROL 0x403A +#define WM831X_GPIO4_CONTROL 0x403B +#define WM831X_GPIO5_CONTROL 0x403C +#define WM831X_GPIO6_CONTROL 0x403D +#define WM831X_GPIO7_CONTROL 0x403E +#define WM831X_GPIO8_CONTROL 0x403F +#define WM831X_GPIO9_CONTROL 0x4040 +#define WM831X_GPIO10_CONTROL 0x4041 +#define WM831X_GPIO11_CONTROL 0x4042 +#define WM831X_GPIO12_CONTROL 0x4043 +#define WM831X_GPIO13_CONTROL 0x4044 +#define WM831X_GPIO14_CONTROL 0x4045 +#define WM831X_GPIO15_CONTROL 0x4046 +#define WM831X_GPIO16_CONTROL 0x4047 +#define WM831X_CHARGER_CONTROL_1 0x4048 +#define WM831X_CHARGER_CONTROL_2 0x4049 +#define WM831X_CHARGER_STATUS 0x404A +#define WM831X_BACKUP_CHARGER_CONTROL 0x404B +#define WM831X_STATUS_LED_1 0x404C +#define WM831X_STATUS_LED_2 0x404D +#define WM831X_CURRENT_SINK_1 0x404E +#define WM831X_CURRENT_SINK_2 0x404F +#define WM831X_DCDC_ENABLE 0x4050 +#define WM831X_LDO_ENABLE 0x4051 +#define WM831X_DCDC_STATUS 0x4052 +#define WM831X_LDO_STATUS 0x4053 +#define WM831X_DCDC_UV_STATUS 0x4054 +#define WM831X_LDO_UV_STATUS 0x4055 +#define WM831X_DC1_CONTROL_1 0x4056 +#define WM831X_DC1_CONTROL_2 0x4057 +#define WM831X_DC1_ON_CONFIG 0x4058 +#define WM831X_DC1_SLEEP_CONTROL 0x4059 +#define WM831X_DC1_DVS_CONTROL 0x405A +#define WM831X_DC2_CONTROL_1 0x405B +#define WM831X_DC2_CONTROL_2 0x405C +#define WM831X_DC2_ON_CONFIG 0x405D +#define WM831X_DC2_SLEEP_CONTROL 0x405E +#define WM831X_DC2_DVS_CONTROL 0x405F +#define WM831X_DC3_CONTROL_1 0x4060 +#define WM831X_DC3_CONTROL_2 0x4061 +#define WM831X_DC3_ON_CONFIG 0x4062 +#define WM831X_DC3_SLEEP_CONTROL 0x4063 +#define WM831X_DC4_CONTROL 0x4064 +#define WM831X_DC4_SLEEP_CONTROL 0x4065 +#define WM832X_DC4_SLEEP_CONTROL 0x4067 +#define WM831X_EPE1_CONTROL 0x4066 +#define WM831X_EPE2_CONTROL 0x4067 +#define WM831X_LDO1_CONTROL 0x4068 +#define WM831X_LDO1_ON_CONTROL 0x4069 +#define WM831X_LDO1_SLEEP_CONTROL 0x406A +#define WM831X_LDO2_CONTROL 0x406B +#define WM831X_LDO2_ON_CONTROL 0x406C +#define WM831X_LDO2_SLEEP_CONTROL 0x406D +#define WM831X_LDO3_CONTROL 0x406E +#define WM831X_LDO3_ON_CONTROL 0x406F +#define WM831X_LDO3_SLEEP_CONTROL 0x4070 +#define WM831X_LDO4_CONTROL 0x4071 +#define WM831X_LDO4_ON_CONTROL 0x4072 +#define WM831X_LDO4_SLEEP_CONTROL 0x4073 +#define WM831X_LDO5_CONTROL 0x4074 +#define WM831X_LDO5_ON_CONTROL 0x4075 +#define WM831X_LDO5_SLEEP_CONTROL 0x4076 +#define WM831X_LDO6_CONTROL 0x4077 +#define WM831X_LDO6_ON_CONTROL 0x4078 +#define WM831X_LDO6_SLEEP_CONTROL 0x4079 +#define WM831X_LDO7_CONTROL 0x407A +#define WM831X_LDO7_ON_CONTROL 0x407B +#define WM831X_LDO7_SLEEP_CONTROL 0x407C +#define WM831X_LDO8_CONTROL 0x407D +#define WM831X_LDO8_ON_CONTROL 0x407E +#define WM831X_LDO8_SLEEP_CONTROL 0x407F +#define WM831X_LDO9_CONTROL 0x4080 +#define WM831X_LDO9_ON_CONTROL 0x4081 +#define WM831X_LDO9_SLEEP_CONTROL 0x4082 +#define WM831X_LDO10_CONTROL 0x4083 +#define WM831X_LDO10_ON_CONTROL 0x4084 +#define WM831X_LDO10_SLEEP_CONTROL 0x4085 +#define WM831X_LDO11_ON_CONTROL 0x4087 +#define WM831X_LDO11_SLEEP_CONTROL 0x4088 +#define WM831X_POWER_GOOD_SOURCE_1 0x408E +#define WM831X_POWER_GOOD_SOURCE_2 0x408F +#define WM831X_CLOCK_CONTROL_1 0x4090 +#define WM831X_CLOCK_CONTROL_2 0x4091 +#define WM831X_FLL_CONTROL_1 0x4092 +#define WM831X_FLL_CONTROL_2 0x4093 +#define WM831X_FLL_CONTROL_3 0x4094 +#define WM831X_FLL_CONTROL_4 0x4095 +#define WM831X_FLL_CONTROL_5 0x4096 +#define WM831X_UNIQUE_ID_1 0x7800 +#define WM831X_UNIQUE_ID_2 0x7801 +#define WM831X_UNIQUE_ID_3 0x7802 +#define WM831X_UNIQUE_ID_4 0x7803 +#define WM831X_UNIQUE_ID_5 0x7804 +#define WM831X_UNIQUE_ID_6 0x7805 +#define WM831X_UNIQUE_ID_7 0x7806 +#define WM831X_UNIQUE_ID_8 0x7807 +#define WM831X_FACTORY_OTP_ID 0x7808 +#define WM831X_FACTORY_OTP_1 0x7809 +#define WM831X_FACTORY_OTP_2 0x780A +#define WM831X_FACTORY_OTP_3 0x780B +#define WM831X_FACTORY_OTP_4 0x780C +#define WM831X_FACTORY_OTP_5 0x780D +#define WM831X_CUSTOMER_OTP_ID 0x7810 +#define WM831X_DC1_OTP_CONTROL 0x7811 +#define WM831X_DC2_OTP_CONTROL 0x7812 +#define WM831X_DC3_OTP_CONTROL 0x7813 +#define WM831X_LDO1_2_OTP_CONTROL 0x7814 +#define WM831X_LDO3_4_OTP_CONTROL 0x7815 +#define WM831X_LDO5_6_OTP_CONTROL 0x7816 +#define WM831X_LDO7_8_OTP_CONTROL 0x7817 +#define WM831X_LDO9_10_OTP_CONTROL 0x7818 +#define WM831X_LDO11_EPE_CONTROL 0x7819 +#define WM831X_GPIO1_OTP_CONTROL 0x781A +#define WM831X_GPIO2_OTP_CONTROL 0x781B +#define WM831X_GPIO3_OTP_CONTROL 0x781C +#define WM831X_GPIO4_OTP_CONTROL 0x781D +#define WM831X_GPIO5_OTP_CONTROL 0x781E +#define WM831X_GPIO6_OTP_CONTROL 0x781F +#define WM831X_DBE_CHECK_DATA 0x7827 + +/* + * R0 (0x00) - Reset ID + */ +#define WM831X_CHIP_ID_MASK 0xFFFF /* CHIP_ID - [15:0] */ +#define WM831X_CHIP_ID_SHIFT 0 /* CHIP_ID - [15:0] */ +#define WM831X_CHIP_ID_WIDTH 16 /* CHIP_ID - [15:0] */ + +/* + * R1 (0x01) - Revision + */ +#define WM831X_PARENT_REV_MASK 0xFF00 /* PARENT_REV - [15:8] */ +#define WM831X_PARENT_REV_SHIFT 8 /* PARENT_REV - [15:8] */ +#define WM831X_PARENT_REV_WIDTH 8 /* PARENT_REV - [15:8] */ +#define WM831X_CHILD_REV_MASK 0x00FF /* CHILD_REV - [7:0] */ +#define WM831X_CHILD_REV_SHIFT 0 /* CHILD_REV - [7:0] */ +#define WM831X_CHILD_REV_WIDTH 8 /* CHILD_REV - [7:0] */ + +/* + * R16384 (0x4000) - Parent ID + */ +#define WM831X_PARENT_ID_MASK 0xFFFF /* PARENT_ID - [15:0] */ +#define WM831X_PARENT_ID_SHIFT 0 /* PARENT_ID - [15:0] */ +#define WM831X_PARENT_ID_WIDTH 16 /* PARENT_ID - [15:0] */ + +/* + * R16389 (0x4005) - ON Pin Control + */ +#define WM831X_ON_PIN_SECACT_MASK 0x0300 /* ON_PIN_SECACT - [9:8] */ +#define WM831X_ON_PIN_SECACT_SHIFT 8 /* ON_PIN_SECACT - [9:8] */ +#define WM831X_ON_PIN_SECACT_WIDTH 2 /* ON_PIN_SECACT - [9:8] */ +#define WM831X_ON_PIN_PRIMACT_MASK 0x0030 /* ON_PIN_PRIMACT - [5:4] */ +#define WM831X_ON_PIN_PRIMACT_SHIFT 4 /* ON_PIN_PRIMACT - [5:4] */ +#define WM831X_ON_PIN_PRIMACT_WIDTH 2 /* ON_PIN_PRIMACT - [5:4] */ +#define WM831X_ON_PIN_STS 0x0008 /* ON_PIN_STS */ +#define WM831X_ON_PIN_STS_MASK 0x0008 /* ON_PIN_STS */ +#define WM831X_ON_PIN_STS_SHIFT 3 /* ON_PIN_STS */ +#define WM831X_ON_PIN_STS_WIDTH 1 /* ON_PIN_STS */ +#define WM831X_ON_PIN_TO_MASK 0x0003 /* ON_PIN_TO - [1:0] */ +#define WM831X_ON_PIN_TO_SHIFT 0 /* ON_PIN_TO - [1:0] */ +#define WM831X_ON_PIN_TO_WIDTH 2 /* ON_PIN_TO - [1:0] */ + +/* + * R16528 (0x4090) - Clock Control 1 + */ +#define WM831X_CLKOUT_ENA 0x8000 /* CLKOUT_ENA */ +#define WM831X_CLKOUT_ENA_MASK 0x8000 /* CLKOUT_ENA */ +#define WM831X_CLKOUT_ENA_SHIFT 15 /* CLKOUT_ENA */ +#define WM831X_CLKOUT_ENA_WIDTH 1 /* CLKOUT_ENA */ +#define WM831X_CLKOUT_OD 0x2000 /* CLKOUT_OD */ +#define WM831X_CLKOUT_OD_MASK 0x2000 /* CLKOUT_OD */ +#define WM831X_CLKOUT_OD_SHIFT 13 /* CLKOUT_OD */ +#define WM831X_CLKOUT_OD_WIDTH 1 /* CLKOUT_OD */ +#define WM831X_CLKOUT_SLOT_MASK 0x0700 /* CLKOUT_SLOT - [10:8] */ +#define WM831X_CLKOUT_SLOT_SHIFT 8 /* CLKOUT_SLOT - [10:8] */ +#define WM831X_CLKOUT_SLOT_WIDTH 3 /* CLKOUT_SLOT - [10:8] */ +#define WM831X_CLKOUT_SLPSLOT_MASK 0x0070 /* CLKOUT_SLPSLOT - [6:4] */ +#define WM831X_CLKOUT_SLPSLOT_SHIFT 4 /* CLKOUT_SLPSLOT - [6:4] */ +#define WM831X_CLKOUT_SLPSLOT_WIDTH 3 /* CLKOUT_SLPSLOT - [6:4] */ +#define WM831X_CLKOUT_SRC 0x0001 /* CLKOUT_SRC */ +#define WM831X_CLKOUT_SRC_MASK 0x0001 /* CLKOUT_SRC */ +#define WM831X_CLKOUT_SRC_SHIFT 0 /* CLKOUT_SRC */ +#define WM831X_CLKOUT_SRC_WIDTH 1 /* CLKOUT_SRC */ + +/* + * R16529 (0x4091) - Clock Control 2 + */ +#define WM831X_XTAL_INH 0x8000 /* XTAL_INH */ +#define WM831X_XTAL_INH_MASK 0x8000 /* XTAL_INH */ +#define WM831X_XTAL_INH_SHIFT 15 /* XTAL_INH */ +#define WM831X_XTAL_INH_WIDTH 1 /* XTAL_INH */ +#define WM831X_XTAL_ENA 0x2000 /* XTAL_ENA */ +#define WM831X_XTAL_ENA_MASK 0x2000 /* XTAL_ENA */ +#define WM831X_XTAL_ENA_SHIFT 13 /* XTAL_ENA */ +#define WM831X_XTAL_ENA_WIDTH 1 /* XTAL_ENA */ +#define WM831X_XTAL_BKUPENA 0x1000 /* XTAL_BKUPENA */ +#define WM831X_XTAL_BKUPENA_MASK 0x1000 /* XTAL_BKUPENA */ +#define WM831X_XTAL_BKUPENA_SHIFT 12 /* XTAL_BKUPENA */ +#define WM831X_XTAL_BKUPENA_WIDTH 1 /* XTAL_BKUPENA */ +#define WM831X_FLL_AUTO 0x0080 /* FLL_AUTO */ +#define WM831X_FLL_AUTO_MASK 0x0080 /* FLL_AUTO */ +#define WM831X_FLL_AUTO_SHIFT 7 /* FLL_AUTO */ +#define WM831X_FLL_AUTO_WIDTH 1 /* FLL_AUTO */ +#define WM831X_FLL_AUTO_FREQ_MASK 0x0007 /* FLL_AUTO_FREQ - [2:0] */ +#define WM831X_FLL_AUTO_FREQ_SHIFT 0 /* FLL_AUTO_FREQ - [2:0] */ +#define WM831X_FLL_AUTO_FREQ_WIDTH 3 /* FLL_AUTO_FREQ - [2:0] */ + +/* + * R16530 (0x4092) - FLL Control 1 + */ +#define WM831X_FLL_FRAC 0x0004 /* FLL_FRAC */ +#define WM831X_FLL_FRAC_MASK 0x0004 /* FLL_FRAC */ +#define WM831X_FLL_FRAC_SHIFT 2 /* FLL_FRAC */ +#define WM831X_FLL_FRAC_WIDTH 1 /* FLL_FRAC */ +#define WM831X_FLL_OSC_ENA 0x0002 /* FLL_OSC_ENA */ +#define WM831X_FLL_OSC_ENA_MASK 0x0002 /* FLL_OSC_ENA */ +#define WM831X_FLL_OSC_ENA_SHIFT 1 /* FLL_OSC_ENA */ +#define WM831X_FLL_OSC_ENA_WIDTH 1 /* FLL_OSC_ENA */ +#define WM831X_FLL_ENA 0x0001 /* FLL_ENA */ +#define WM831X_FLL_ENA_MASK 0x0001 /* FLL_ENA */ +#define WM831X_FLL_ENA_SHIFT 0 /* FLL_ENA */ +#define WM831X_FLL_ENA_WIDTH 1 /* FLL_ENA */ + +/* + * R16531 (0x4093) - FLL Control 2 + */ +#define WM831X_FLL_OUTDIV_MASK 0x3F00 /* FLL_OUTDIV - [13:8] */ +#define WM831X_FLL_OUTDIV_SHIFT 8 /* FLL_OUTDIV - [13:8] */ +#define WM831X_FLL_OUTDIV_WIDTH 6 /* FLL_OUTDIV - [13:8] */ +#define WM831X_FLL_CTRL_RATE_MASK 0x0070 /* FLL_CTRL_RATE - [6:4] */ +#define WM831X_FLL_CTRL_RATE_SHIFT 4 /* FLL_CTRL_RATE - [6:4] */ +#define WM831X_FLL_CTRL_RATE_WIDTH 3 /* FLL_CTRL_RATE - [6:4] */ +#define WM831X_FLL_FRATIO_MASK 0x0007 /* FLL_FRATIO - [2:0] */ +#define WM831X_FLL_FRATIO_SHIFT 0 /* FLL_FRATIO - [2:0] */ +#define WM831X_FLL_FRATIO_WIDTH 3 /* FLL_FRATIO - [2:0] */ + +/* + * R16532 (0x4094) - FLL Control 3 + */ +#define WM831X_FLL_K_MASK 0xFFFF /* FLL_K - [15:0] */ +#define WM831X_FLL_K_SHIFT 0 /* FLL_K - [15:0] */ +#define WM831X_FLL_K_WIDTH 16 /* FLL_K - [15:0] */ + +/* + * R16533 (0x4095) - FLL Control 4 + */ +#define WM831X_FLL_N_MASK 0x7FE0 /* FLL_N - [14:5] */ +#define WM831X_FLL_N_SHIFT 5 /* FLL_N - [14:5] */ +#define WM831X_FLL_N_WIDTH 10 /* FLL_N - [14:5] */ +#define WM831X_FLL_GAIN_MASK 0x000F /* FLL_GAIN - [3:0] */ +#define WM831X_FLL_GAIN_SHIFT 0 /* FLL_GAIN - [3:0] */ +#define WM831X_FLL_GAIN_WIDTH 4 /* FLL_GAIN - [3:0] */ + +/* + * R16534 (0x4096) - FLL Control 5 + */ +#define WM831X_FLL_CLK_REF_DIV_MASK 0x0018 /* FLL_CLK_REF_DIV - [4:3] */ +#define WM831X_FLL_CLK_REF_DIV_SHIFT 3 /* FLL_CLK_REF_DIV - [4:3] */ +#define WM831X_FLL_CLK_REF_DIV_WIDTH 2 /* FLL_CLK_REF_DIV - [4:3] */ +#define WM831X_FLL_CLK_SRC_MASK 0x0003 /* FLL_CLK_SRC - [1:0] */ +#define WM831X_FLL_CLK_SRC_SHIFT 0 /* FLL_CLK_SRC - [1:0] */ +#define WM831X_FLL_CLK_SRC_WIDTH 2 /* FLL_CLK_SRC - [1:0] */ + +struct regulator_dev; +struct irq_domain; + +#define WM831X_NUM_IRQ_REGS 5 +#define WM831X_NUM_GPIO_REGS 16 + +enum wm831x_parent { + WM8310 = 0x8310, + WM8311 = 0x8311, + WM8312 = 0x8312, + WM8320 = 0x8320, + WM8321 = 0x8321, + WM8325 = 0x8325, + WM8326 = 0x8326, +}; + +struct wm831x; + +typedef int (*wm831x_auxadc_read_fn)(struct wm831x *wm831x, + enum wm831x_auxadc input); + +struct wm831x { + struct mutex io_lock; + + struct device *dev; + + struct regmap *regmap; + + struct wm831x_pdata pdata; + enum wm831x_parent type; + + int irq; /* Our chip IRQ */ + struct mutex irq_lock; + struct irq_domain *irq_domain; + int irq_masks_cur[WM831X_NUM_IRQ_REGS]; /* Currently active value */ + int irq_masks_cache[WM831X_NUM_IRQ_REGS]; /* Cached hardware value */ + + bool soft_shutdown; + + /* Chip revision based flags */ + unsigned has_gpio_ena:1; /* Has GPIO enable bit */ + unsigned has_cs_sts:1; /* Has current sink status bit */ + unsigned charger_irq_wake:1; /* Are charger IRQs a wake source? */ + + int num_gpio; + + /* Used by the interrupt controller code to post writes */ + int gpio_update[WM831X_NUM_GPIO_REGS]; + bool gpio_level_high[WM831X_NUM_GPIO_REGS]; + bool gpio_level_low[WM831X_NUM_GPIO_REGS]; + + struct mutex auxadc_lock; + struct list_head auxadc_pending; + u16 auxadc_active; + wm831x_auxadc_read_fn auxadc_read; + + /* The WM831x has a security key blocking access to certain + * registers. The mutex is taken by the accessors for locking + * and unlocking the security key, locked is used to fail + * writes if the lock is held. + */ + struct mutex key_lock; + unsigned int locked:1; +}; + +/* Device I/O API */ +int wm831x_reg_read(struct wm831x *wm831x, unsigned short reg); +int wm831x_reg_write(struct wm831x *wm831x, unsigned short reg, + unsigned short val); +void wm831x_reg_lock(struct wm831x *wm831x); +int wm831x_reg_unlock(struct wm831x *wm831x); +int wm831x_set_bits(struct wm831x *wm831x, unsigned short reg, + unsigned short mask, unsigned short val); +int wm831x_bulk_read(struct wm831x *wm831x, unsigned short reg, + int count, u16 *buf); + +int wm831x_device_init(struct wm831x *wm831x, int irq); +void wm831x_device_exit(struct wm831x *wm831x); +int wm831x_device_suspend(struct wm831x *wm831x); +void wm831x_device_shutdown(struct wm831x *wm831x); +int wm831x_irq_init(struct wm831x *wm831x, int irq); +void wm831x_irq_exit(struct wm831x *wm831x); +void wm831x_auxadc_init(struct wm831x *wm831x); + +static inline int wm831x_irq(struct wm831x *wm831x, int irq) +{ + return irq_create_mapping(wm831x->irq_domain, irq); +} + +extern struct regmap_config wm831x_regmap_config; + +extern const struct of_device_id wm831x_of_match[]; + +#endif diff --git a/include/linux/mfd/wm831x/gpio.h b/include/linux/mfd/wm831x/gpio.h new file mode 100644 index 000000000..9b163c588 --- /dev/null +++ b/include/linux/mfd/wm831x/gpio.h @@ -0,0 +1,59 @@ +/* + * include/linux/mfd/wm831x/gpio.h -- GPIO for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_GPIO_H__ +#define __MFD_WM831X_GPIO_H__ + +/* + * R16440-16455 (0x4038-0x4047) - GPIOx Control + */ +#define WM831X_GPN_DIR 0x8000 /* GPN_DIR */ +#define WM831X_GPN_DIR_MASK 0x8000 /* GPN_DIR */ +#define WM831X_GPN_DIR_SHIFT 15 /* GPN_DIR */ +#define WM831X_GPN_DIR_WIDTH 1 /* GPN_DIR */ +#define WM831X_GPN_PULL_MASK 0x6000 /* GPN_PULL - [14:13] */ +#define WM831X_GPN_PULL_SHIFT 13 /* GPN_PULL - [14:13] */ +#define WM831X_GPN_PULL_WIDTH 2 /* GPN_PULL - [14:13] */ +#define WM831X_GPN_INT_MODE 0x1000 /* GPN_INT_MODE */ +#define WM831X_GPN_INT_MODE_MASK 0x1000 /* GPN_INT_MODE */ +#define WM831X_GPN_INT_MODE_SHIFT 12 /* GPN_INT_MODE */ +#define WM831X_GPN_INT_MODE_WIDTH 1 /* GPN_INT_MODE */ +#define WM831X_GPN_PWR_DOM 0x0800 /* GPN_PWR_DOM */ +#define WM831X_GPN_PWR_DOM_MASK 0x0800 /* GPN_PWR_DOM */ +#define WM831X_GPN_PWR_DOM_SHIFT 11 /* GPN_PWR_DOM */ +#define WM831X_GPN_PWR_DOM_WIDTH 1 /* GPN_PWR_DOM */ +#define WM831X_GPN_POL 0x0400 /* GPN_POL */ +#define WM831X_GPN_POL_MASK 0x0400 /* GPN_POL */ +#define WM831X_GPN_POL_SHIFT 10 /* GPN_POL */ +#define WM831X_GPN_POL_WIDTH 1 /* GPN_POL */ +#define WM831X_GPN_OD 0x0200 /* GPN_OD */ +#define WM831X_GPN_OD_MASK 0x0200 /* GPN_OD */ +#define WM831X_GPN_OD_SHIFT 9 /* GPN_OD */ +#define WM831X_GPN_OD_WIDTH 1 /* GPN_OD */ +#define WM831X_GPN_ENA 0x0080 /* GPN_ENA */ +#define WM831X_GPN_ENA_MASK 0x0080 /* GPN_ENA */ +#define WM831X_GPN_ENA_SHIFT 7 /* GPN_ENA */ +#define WM831X_GPN_ENA_WIDTH 1 /* GPN_ENA */ +#define WM831X_GPN_TRI 0x0080 /* GPN_TRI */ +#define WM831X_GPN_TRI_MASK 0x0080 /* GPN_TRI */ +#define WM831X_GPN_TRI_SHIFT 7 /* GPN_TRI */ +#define WM831X_GPN_TRI_WIDTH 1 /* GPN_TRI */ +#define WM831X_GPN_FN_MASK 0x000F /* GPN_FN - [3:0] */ +#define WM831X_GPN_FN_SHIFT 0 /* GPN_FN - [3:0] */ +#define WM831X_GPN_FN_WIDTH 4 /* GPN_FN - [3:0] */ + +#define WM831X_GPIO_PULL_NONE (0 << WM831X_GPN_PULL_SHIFT) +#define WM831X_GPIO_PULL_DOWN (1 << WM831X_GPN_PULL_SHIFT) +#define WM831X_GPIO_PULL_UP (2 << WM831X_GPN_PULL_SHIFT) +#endif diff --git a/include/linux/mfd/wm831x/irq.h b/include/linux/mfd/wm831x/irq.h new file mode 100644 index 000000000..3a8c97656 --- /dev/null +++ b/include/linux/mfd/wm831x/irq.h @@ -0,0 +1,764 @@ +/* + * include/linux/mfd/wm831x/irq.h -- Interrupt controller for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_IRQ_H__ +#define __MFD_WM831X_IRQ_H__ + +/* Interrupt number assignments within Linux */ +#define WM831X_IRQ_TEMP_THW 0 +#define WM831X_IRQ_GPIO_1 1 +#define WM831X_IRQ_GPIO_2 2 +#define WM831X_IRQ_GPIO_3 3 +#define WM831X_IRQ_GPIO_4 4 +#define WM831X_IRQ_GPIO_5 5 +#define WM831X_IRQ_GPIO_6 6 +#define WM831X_IRQ_GPIO_7 7 +#define WM831X_IRQ_GPIO_8 8 +#define WM831X_IRQ_GPIO_9 9 +#define WM831X_IRQ_GPIO_10 10 +#define WM831X_IRQ_GPIO_11 11 +#define WM831X_IRQ_GPIO_12 12 +#define WM831X_IRQ_GPIO_13 13 +#define WM831X_IRQ_GPIO_14 14 +#define WM831X_IRQ_GPIO_15 15 +#define WM831X_IRQ_GPIO_16 16 +#define WM831X_IRQ_ON 17 +#define WM831X_IRQ_PPM_SYSLO 18 +#define WM831X_IRQ_PPM_PWR_SRC 19 +#define WM831X_IRQ_PPM_USB_CURR 20 +#define WM831X_IRQ_WDOG_TO 21 +#define WM831X_IRQ_RTC_PER 22 +#define WM831X_IRQ_RTC_ALM 23 +#define WM831X_IRQ_CHG_BATT_HOT 24 +#define WM831X_IRQ_CHG_BATT_COLD 25 +#define WM831X_IRQ_CHG_BATT_FAIL 26 +#define WM831X_IRQ_CHG_OV 27 +#define WM831X_IRQ_CHG_END 29 +#define WM831X_IRQ_CHG_TO 30 +#define WM831X_IRQ_CHG_MODE 31 +#define WM831X_IRQ_CHG_START 32 +#define WM831X_IRQ_TCHDATA 33 +#define WM831X_IRQ_TCHPD 34 +#define WM831X_IRQ_AUXADC_DATA 35 +#define WM831X_IRQ_AUXADC_DCOMP1 36 +#define WM831X_IRQ_AUXADC_DCOMP2 37 +#define WM831X_IRQ_AUXADC_DCOMP3 38 +#define WM831X_IRQ_AUXADC_DCOMP4 39 +#define WM831X_IRQ_CS1 40 +#define WM831X_IRQ_CS2 41 +#define WM831X_IRQ_HC_DC1 42 +#define WM831X_IRQ_HC_DC2 43 +#define WM831X_IRQ_UV_LDO1 44 +#define WM831X_IRQ_UV_LDO2 45 +#define WM831X_IRQ_UV_LDO3 46 +#define WM831X_IRQ_UV_LDO4 47 +#define WM831X_IRQ_UV_LDO5 48 +#define WM831X_IRQ_UV_LDO6 49 +#define WM831X_IRQ_UV_LDO7 50 +#define WM831X_IRQ_UV_LDO8 51 +#define WM831X_IRQ_UV_LDO9 52 +#define WM831X_IRQ_UV_LDO10 53 +#define WM831X_IRQ_UV_DC1 54 +#define WM831X_IRQ_UV_DC2 55 +#define WM831X_IRQ_UV_DC3 56 +#define WM831X_IRQ_UV_DC4 57 + +#define WM831X_NUM_IRQS 58 + +/* + * R16400 (0x4010) - System Interrupts + */ +#define WM831X_PS_INT 0x8000 /* PS_INT */ +#define WM831X_PS_INT_MASK 0x8000 /* PS_INT */ +#define WM831X_PS_INT_SHIFT 15 /* PS_INT */ +#define WM831X_PS_INT_WIDTH 1 /* PS_INT */ +#define WM831X_TEMP_INT 0x4000 /* TEMP_INT */ +#define WM831X_TEMP_INT_MASK 0x4000 /* TEMP_INT */ +#define WM831X_TEMP_INT_SHIFT 14 /* TEMP_INT */ +#define WM831X_TEMP_INT_WIDTH 1 /* TEMP_INT */ +#define WM831X_GP_INT 0x2000 /* GP_INT */ +#define WM831X_GP_INT_MASK 0x2000 /* GP_INT */ +#define WM831X_GP_INT_SHIFT 13 /* GP_INT */ +#define WM831X_GP_INT_WIDTH 1 /* GP_INT */ +#define WM831X_ON_PIN_INT 0x1000 /* ON_PIN_INT */ +#define WM831X_ON_PIN_INT_MASK 0x1000 /* ON_PIN_INT */ +#define WM831X_ON_PIN_INT_SHIFT 12 /* ON_PIN_INT */ +#define WM831X_ON_PIN_INT_WIDTH 1 /* ON_PIN_INT */ +#define WM831X_WDOG_INT 0x0800 /* WDOG_INT */ +#define WM831X_WDOG_INT_MASK 0x0800 /* WDOG_INT */ +#define WM831X_WDOG_INT_SHIFT 11 /* WDOG_INT */ +#define WM831X_WDOG_INT_WIDTH 1 /* WDOG_INT */ +#define WM831X_TCHDATA_INT 0x0400 /* TCHDATA_INT */ +#define WM831X_TCHDATA_INT_MASK 0x0400 /* TCHDATA_INT */ +#define WM831X_TCHDATA_INT_SHIFT 10 /* TCHDATA_INT */ +#define WM831X_TCHDATA_INT_WIDTH 1 /* TCHDATA_INT */ +#define WM831X_TCHPD_INT 0x0200 /* TCHPD_INT */ +#define WM831X_TCHPD_INT_MASK 0x0200 /* TCHPD_INT */ +#define WM831X_TCHPD_INT_SHIFT 9 /* TCHPD_INT */ +#define WM831X_TCHPD_INT_WIDTH 1 /* TCHPD_INT */ +#define WM831X_AUXADC_INT 0x0100 /* AUXADC_INT */ +#define WM831X_AUXADC_INT_MASK 0x0100 /* AUXADC_INT */ +#define WM831X_AUXADC_INT_SHIFT 8 /* AUXADC_INT */ +#define WM831X_AUXADC_INT_WIDTH 1 /* AUXADC_INT */ +#define WM831X_PPM_INT 0x0080 /* PPM_INT */ +#define WM831X_PPM_INT_MASK 0x0080 /* PPM_INT */ +#define WM831X_PPM_INT_SHIFT 7 /* PPM_INT */ +#define WM831X_PPM_INT_WIDTH 1 /* PPM_INT */ +#define WM831X_CS_INT 0x0040 /* CS_INT */ +#define WM831X_CS_INT_MASK 0x0040 /* CS_INT */ +#define WM831X_CS_INT_SHIFT 6 /* CS_INT */ +#define WM831X_CS_INT_WIDTH 1 /* CS_INT */ +#define WM831X_RTC_INT 0x0020 /* RTC_INT */ +#define WM831X_RTC_INT_MASK 0x0020 /* RTC_INT */ +#define WM831X_RTC_INT_SHIFT 5 /* RTC_INT */ +#define WM831X_RTC_INT_WIDTH 1 /* RTC_INT */ +#define WM831X_OTP_INT 0x0010 /* OTP_INT */ +#define WM831X_OTP_INT_MASK 0x0010 /* OTP_INT */ +#define WM831X_OTP_INT_SHIFT 4 /* OTP_INT */ +#define WM831X_OTP_INT_WIDTH 1 /* OTP_INT */ +#define WM831X_CHILD_INT 0x0008 /* CHILD_INT */ +#define WM831X_CHILD_INT_MASK 0x0008 /* CHILD_INT */ +#define WM831X_CHILD_INT_SHIFT 3 /* CHILD_INT */ +#define WM831X_CHILD_INT_WIDTH 1 /* CHILD_INT */ +#define WM831X_CHG_INT 0x0004 /* CHG_INT */ +#define WM831X_CHG_INT_MASK 0x0004 /* CHG_INT */ +#define WM831X_CHG_INT_SHIFT 2 /* CHG_INT */ +#define WM831X_CHG_INT_WIDTH 1 /* CHG_INT */ +#define WM831X_HC_INT 0x0002 /* HC_INT */ +#define WM831X_HC_INT_MASK 0x0002 /* HC_INT */ +#define WM831X_HC_INT_SHIFT 1 /* HC_INT */ +#define WM831X_HC_INT_WIDTH 1 /* HC_INT */ +#define WM831X_UV_INT 0x0001 /* UV_INT */ +#define WM831X_UV_INT_MASK 0x0001 /* UV_INT */ +#define WM831X_UV_INT_SHIFT 0 /* UV_INT */ +#define WM831X_UV_INT_WIDTH 1 /* UV_INT */ + +/* + * R16401 (0x4011) - Interrupt Status 1 + */ +#define WM831X_PPM_SYSLO_EINT 0x8000 /* PPM_SYSLO_EINT */ +#define WM831X_PPM_SYSLO_EINT_MASK 0x8000 /* PPM_SYSLO_EINT */ +#define WM831X_PPM_SYSLO_EINT_SHIFT 15 /* PPM_SYSLO_EINT */ +#define WM831X_PPM_SYSLO_EINT_WIDTH 1 /* PPM_SYSLO_EINT */ +#define WM831X_PPM_PWR_SRC_EINT 0x4000 /* PPM_PWR_SRC_EINT */ +#define WM831X_PPM_PWR_SRC_EINT_MASK 0x4000 /* PPM_PWR_SRC_EINT */ +#define WM831X_PPM_PWR_SRC_EINT_SHIFT 14 /* PPM_PWR_SRC_EINT */ +#define WM831X_PPM_PWR_SRC_EINT_WIDTH 1 /* PPM_PWR_SRC_EINT */ +#define WM831X_PPM_USB_CURR_EINT 0x2000 /* PPM_USB_CURR_EINT */ +#define WM831X_PPM_USB_CURR_EINT_MASK 0x2000 /* PPM_USB_CURR_EINT */ +#define WM831X_PPM_USB_CURR_EINT_SHIFT 13 /* PPM_USB_CURR_EINT */ +#define WM831X_PPM_USB_CURR_EINT_WIDTH 1 /* PPM_USB_CURR_EINT */ +#define WM831X_ON_PIN_EINT 0x1000 /* ON_PIN_EINT */ +#define WM831X_ON_PIN_EINT_MASK 0x1000 /* ON_PIN_EINT */ +#define WM831X_ON_PIN_EINT_SHIFT 12 /* ON_PIN_EINT */ +#define WM831X_ON_PIN_EINT_WIDTH 1 /* ON_PIN_EINT */ +#define WM831X_WDOG_TO_EINT 0x0800 /* WDOG_TO_EINT */ +#define WM831X_WDOG_TO_EINT_MASK 0x0800 /* WDOG_TO_EINT */ +#define WM831X_WDOG_TO_EINT_SHIFT 11 /* WDOG_TO_EINT */ +#define WM831X_WDOG_TO_EINT_WIDTH 1 /* WDOG_TO_EINT */ +#define WM831X_TCHDATA_EINT 0x0400 /* TCHDATA_EINT */ +#define WM831X_TCHDATA_EINT_MASK 0x0400 /* TCHDATA_EINT */ +#define WM831X_TCHDATA_EINT_SHIFT 10 /* TCHDATA_EINT */ +#define WM831X_TCHDATA_EINT_WIDTH 1 /* TCHDATA_EINT */ +#define WM831X_TCHPD_EINT 0x0200 /* TCHPD_EINT */ +#define WM831X_TCHPD_EINT_MASK 0x0200 /* TCHPD_EINT */ +#define WM831X_TCHPD_EINT_SHIFT 9 /* TCHPD_EINT */ +#define WM831X_TCHPD_EINT_WIDTH 1 /* TCHPD_EINT */ +#define WM831X_AUXADC_DATA_EINT 0x0100 /* AUXADC_DATA_EINT */ +#define WM831X_AUXADC_DATA_EINT_MASK 0x0100 /* AUXADC_DATA_EINT */ +#define WM831X_AUXADC_DATA_EINT_SHIFT 8 /* AUXADC_DATA_EINT */ +#define WM831X_AUXADC_DATA_EINT_WIDTH 1 /* AUXADC_DATA_EINT */ +#define WM831X_AUXADC_DCOMP4_EINT 0x0080 /* AUXADC_DCOMP4_EINT */ +#define WM831X_AUXADC_DCOMP4_EINT_MASK 0x0080 /* AUXADC_DCOMP4_EINT */ +#define WM831X_AUXADC_DCOMP4_EINT_SHIFT 7 /* AUXADC_DCOMP4_EINT */ +#define WM831X_AUXADC_DCOMP4_EINT_WIDTH 1 /* AUXADC_DCOMP4_EINT */ +#define WM831X_AUXADC_DCOMP3_EINT 0x0040 /* AUXADC_DCOMP3_EINT */ +#define WM831X_AUXADC_DCOMP3_EINT_MASK 0x0040 /* AUXADC_DCOMP3_EINT */ +#define WM831X_AUXADC_DCOMP3_EINT_SHIFT 6 /* AUXADC_DCOMP3_EINT */ +#define WM831X_AUXADC_DCOMP3_EINT_WIDTH 1 /* AUXADC_DCOMP3_EINT */ +#define WM831X_AUXADC_DCOMP2_EINT 0x0020 /* AUXADC_DCOMP2_EINT */ +#define WM831X_AUXADC_DCOMP2_EINT_MASK 0x0020 /* AUXADC_DCOMP2_EINT */ +#define WM831X_AUXADC_DCOMP2_EINT_SHIFT 5 /* AUXADC_DCOMP2_EINT */ +#define WM831X_AUXADC_DCOMP2_EINT_WIDTH 1 /* AUXADC_DCOMP2_EINT */ +#define WM831X_AUXADC_DCOMP1_EINT 0x0010 /* AUXADC_DCOMP1_EINT */ +#define WM831X_AUXADC_DCOMP1_EINT_MASK 0x0010 /* AUXADC_DCOMP1_EINT */ +#define WM831X_AUXADC_DCOMP1_EINT_SHIFT 4 /* AUXADC_DCOMP1_EINT */ +#define WM831X_AUXADC_DCOMP1_EINT_WIDTH 1 /* AUXADC_DCOMP1_EINT */ +#define WM831X_RTC_PER_EINT 0x0008 /* RTC_PER_EINT */ +#define WM831X_RTC_PER_EINT_MASK 0x0008 /* RTC_PER_EINT */ +#define WM831X_RTC_PER_EINT_SHIFT 3 /* RTC_PER_EINT */ +#define WM831X_RTC_PER_EINT_WIDTH 1 /* RTC_PER_EINT */ +#define WM831X_RTC_ALM_EINT 0x0004 /* RTC_ALM_EINT */ +#define WM831X_RTC_ALM_EINT_MASK 0x0004 /* RTC_ALM_EINT */ +#define WM831X_RTC_ALM_EINT_SHIFT 2 /* RTC_ALM_EINT */ +#define WM831X_RTC_ALM_EINT_WIDTH 1 /* RTC_ALM_EINT */ +#define WM831X_TEMP_THW_EINT 0x0002 /* TEMP_THW_EINT */ +#define WM831X_TEMP_THW_EINT_MASK 0x0002 /* TEMP_THW_EINT */ +#define WM831X_TEMP_THW_EINT_SHIFT 1 /* TEMP_THW_EINT */ +#define WM831X_TEMP_THW_EINT_WIDTH 1 /* TEMP_THW_EINT */ + +/* + * R16402 (0x4012) - Interrupt Status 2 + */ +#define WM831X_CHG_BATT_HOT_EINT 0x8000 /* CHG_BATT_HOT_EINT */ +#define WM831X_CHG_BATT_HOT_EINT_MASK 0x8000 /* CHG_BATT_HOT_EINT */ +#define WM831X_CHG_BATT_HOT_EINT_SHIFT 15 /* CHG_BATT_HOT_EINT */ +#define WM831X_CHG_BATT_HOT_EINT_WIDTH 1 /* CHG_BATT_HOT_EINT */ +#define WM831X_CHG_BATT_COLD_EINT 0x4000 /* CHG_BATT_COLD_EINT */ +#define WM831X_CHG_BATT_COLD_EINT_MASK 0x4000 /* CHG_BATT_COLD_EINT */ +#define WM831X_CHG_BATT_COLD_EINT_SHIFT 14 /* CHG_BATT_COLD_EINT */ +#define WM831X_CHG_BATT_COLD_EINT_WIDTH 1 /* CHG_BATT_COLD_EINT */ +#define WM831X_CHG_BATT_FAIL_EINT 0x2000 /* CHG_BATT_FAIL_EINT */ +#define WM831X_CHG_BATT_FAIL_EINT_MASK 0x2000 /* CHG_BATT_FAIL_EINT */ +#define WM831X_CHG_BATT_FAIL_EINT_SHIFT 13 /* CHG_BATT_FAIL_EINT */ +#define WM831X_CHG_BATT_FAIL_EINT_WIDTH 1 /* CHG_BATT_FAIL_EINT */ +#define WM831X_CHG_OV_EINT 0x1000 /* CHG_OV_EINT */ +#define WM831X_CHG_OV_EINT_MASK 0x1000 /* CHG_OV_EINT */ +#define WM831X_CHG_OV_EINT_SHIFT 12 /* CHG_OV_EINT */ +#define WM831X_CHG_OV_EINT_WIDTH 1 /* CHG_OV_EINT */ +#define WM831X_CHG_END_EINT 0x0800 /* CHG_END_EINT */ +#define WM831X_CHG_END_EINT_MASK 0x0800 /* CHG_END_EINT */ +#define WM831X_CHG_END_EINT_SHIFT 11 /* CHG_END_EINT */ +#define WM831X_CHG_END_EINT_WIDTH 1 /* CHG_END_EINT */ +#define WM831X_CHG_TO_EINT 0x0400 /* CHG_TO_EINT */ +#define WM831X_CHG_TO_EINT_MASK 0x0400 /* CHG_TO_EINT */ +#define WM831X_CHG_TO_EINT_SHIFT 10 /* CHG_TO_EINT */ +#define WM831X_CHG_TO_EINT_WIDTH 1 /* CHG_TO_EINT */ +#define WM831X_CHG_MODE_EINT 0x0200 /* CHG_MODE_EINT */ +#define WM831X_CHG_MODE_EINT_MASK 0x0200 /* CHG_MODE_EINT */ +#define WM831X_CHG_MODE_EINT_SHIFT 9 /* CHG_MODE_EINT */ +#define WM831X_CHG_MODE_EINT_WIDTH 1 /* CHG_MODE_EINT */ +#define WM831X_CHG_START_EINT 0x0100 /* CHG_START_EINT */ +#define WM831X_CHG_START_EINT_MASK 0x0100 /* CHG_START_EINT */ +#define WM831X_CHG_START_EINT_SHIFT 8 /* CHG_START_EINT */ +#define WM831X_CHG_START_EINT_WIDTH 1 /* CHG_START_EINT */ +#define WM831X_CS2_EINT 0x0080 /* CS2_EINT */ +#define WM831X_CS2_EINT_MASK 0x0080 /* CS2_EINT */ +#define WM831X_CS2_EINT_SHIFT 7 /* CS2_EINT */ +#define WM831X_CS2_EINT_WIDTH 1 /* CS2_EINT */ +#define WM831X_CS1_EINT 0x0040 /* CS1_EINT */ +#define WM831X_CS1_EINT_MASK 0x0040 /* CS1_EINT */ +#define WM831X_CS1_EINT_SHIFT 6 /* CS1_EINT */ +#define WM831X_CS1_EINT_WIDTH 1 /* CS1_EINT */ +#define WM831X_OTP_CMD_END_EINT 0x0020 /* OTP_CMD_END_EINT */ +#define WM831X_OTP_CMD_END_EINT_MASK 0x0020 /* OTP_CMD_END_EINT */ +#define WM831X_OTP_CMD_END_EINT_SHIFT 5 /* OTP_CMD_END_EINT */ +#define WM831X_OTP_CMD_END_EINT_WIDTH 1 /* OTP_CMD_END_EINT */ +#define WM831X_OTP_ERR_EINT 0x0010 /* OTP_ERR_EINT */ +#define WM831X_OTP_ERR_EINT_MASK 0x0010 /* OTP_ERR_EINT */ +#define WM831X_OTP_ERR_EINT_SHIFT 4 /* OTP_ERR_EINT */ +#define WM831X_OTP_ERR_EINT_WIDTH 1 /* OTP_ERR_EINT */ +#define WM831X_PS_POR_EINT 0x0004 /* PS_POR_EINT */ +#define WM831X_PS_POR_EINT_MASK 0x0004 /* PS_POR_EINT */ +#define WM831X_PS_POR_EINT_SHIFT 2 /* PS_POR_EINT */ +#define WM831X_PS_POR_EINT_WIDTH 1 /* PS_POR_EINT */ +#define WM831X_PS_SLEEP_OFF_EINT 0x0002 /* PS_SLEEP_OFF_EINT */ +#define WM831X_PS_SLEEP_OFF_EINT_MASK 0x0002 /* PS_SLEEP_OFF_EINT */ +#define WM831X_PS_SLEEP_OFF_EINT_SHIFT 1 /* PS_SLEEP_OFF_EINT */ +#define WM831X_PS_SLEEP_OFF_EINT_WIDTH 1 /* PS_SLEEP_OFF_EINT */ +#define WM831X_PS_ON_WAKE_EINT 0x0001 /* PS_ON_WAKE_EINT */ +#define WM831X_PS_ON_WAKE_EINT_MASK 0x0001 /* PS_ON_WAKE_EINT */ +#define WM831X_PS_ON_WAKE_EINT_SHIFT 0 /* PS_ON_WAKE_EINT */ +#define WM831X_PS_ON_WAKE_EINT_WIDTH 1 /* PS_ON_WAKE_EINT */ + +/* + * R16403 (0x4013) - Interrupt Status 3 + */ +#define WM831X_UV_LDO10_EINT 0x0200 /* UV_LDO10_EINT */ +#define WM831X_UV_LDO10_EINT_MASK 0x0200 /* UV_LDO10_EINT */ +#define WM831X_UV_LDO10_EINT_SHIFT 9 /* UV_LDO10_EINT */ +#define WM831X_UV_LDO10_EINT_WIDTH 1 /* UV_LDO10_EINT */ +#define WM831X_UV_LDO9_EINT 0x0100 /* UV_LDO9_EINT */ +#define WM831X_UV_LDO9_EINT_MASK 0x0100 /* UV_LDO9_EINT */ +#define WM831X_UV_LDO9_EINT_SHIFT 8 /* UV_LDO9_EINT */ +#define WM831X_UV_LDO9_EINT_WIDTH 1 /* UV_LDO9_EINT */ +#define WM831X_UV_LDO8_EINT 0x0080 /* UV_LDO8_EINT */ +#define WM831X_UV_LDO8_EINT_MASK 0x0080 /* UV_LDO8_EINT */ +#define WM831X_UV_LDO8_EINT_SHIFT 7 /* UV_LDO8_EINT */ +#define WM831X_UV_LDO8_EINT_WIDTH 1 /* UV_LDO8_EINT */ +#define WM831X_UV_LDO7_EINT 0x0040 /* UV_LDO7_EINT */ +#define WM831X_UV_LDO7_EINT_MASK 0x0040 /* UV_LDO7_EINT */ +#define WM831X_UV_LDO7_EINT_SHIFT 6 /* UV_LDO7_EINT */ +#define WM831X_UV_LDO7_EINT_WIDTH 1 /* UV_LDO7_EINT */ +#define WM831X_UV_LDO6_EINT 0x0020 /* UV_LDO6_EINT */ +#define WM831X_UV_LDO6_EINT_MASK 0x0020 /* UV_LDO6_EINT */ +#define WM831X_UV_LDO6_EINT_SHIFT 5 /* UV_LDO6_EINT */ +#define WM831X_UV_LDO6_EINT_WIDTH 1 /* UV_LDO6_EINT */ +#define WM831X_UV_LDO5_EINT 0x0010 /* UV_LDO5_EINT */ +#define WM831X_UV_LDO5_EINT_MASK 0x0010 /* UV_LDO5_EINT */ +#define WM831X_UV_LDO5_EINT_SHIFT 4 /* UV_LDO5_EINT */ +#define WM831X_UV_LDO5_EINT_WIDTH 1 /* UV_LDO5_EINT */ +#define WM831X_UV_LDO4_EINT 0x0008 /* UV_LDO4_EINT */ +#define WM831X_UV_LDO4_EINT_MASK 0x0008 /* UV_LDO4_EINT */ +#define WM831X_UV_LDO4_EINT_SHIFT 3 /* UV_LDO4_EINT */ +#define WM831X_UV_LDO4_EINT_WIDTH 1 /* UV_LDO4_EINT */ +#define WM831X_UV_LDO3_EINT 0x0004 /* UV_LDO3_EINT */ +#define WM831X_UV_LDO3_EINT_MASK 0x0004 /* UV_LDO3_EINT */ +#define WM831X_UV_LDO3_EINT_SHIFT 2 /* UV_LDO3_EINT */ +#define WM831X_UV_LDO3_EINT_WIDTH 1 /* UV_LDO3_EINT */ +#define WM831X_UV_LDO2_EINT 0x0002 /* UV_LDO2_EINT */ +#define WM831X_UV_LDO2_EINT_MASK 0x0002 /* UV_LDO2_EINT */ +#define WM831X_UV_LDO2_EINT_SHIFT 1 /* UV_LDO2_EINT */ +#define WM831X_UV_LDO2_EINT_WIDTH 1 /* UV_LDO2_EINT */ +#define WM831X_UV_LDO1_EINT 0x0001 /* UV_LDO1_EINT */ +#define WM831X_UV_LDO1_EINT_MASK 0x0001 /* UV_LDO1_EINT */ +#define WM831X_UV_LDO1_EINT_SHIFT 0 /* UV_LDO1_EINT */ +#define WM831X_UV_LDO1_EINT_WIDTH 1 /* UV_LDO1_EINT */ + +/* + * R16404 (0x4014) - Interrupt Status 4 + */ +#define WM831X_HC_DC2_EINT 0x0200 /* HC_DC2_EINT */ +#define WM831X_HC_DC2_EINT_MASK 0x0200 /* HC_DC2_EINT */ +#define WM831X_HC_DC2_EINT_SHIFT 9 /* HC_DC2_EINT */ +#define WM831X_HC_DC2_EINT_WIDTH 1 /* HC_DC2_EINT */ +#define WM831X_HC_DC1_EINT 0x0100 /* HC_DC1_EINT */ +#define WM831X_HC_DC1_EINT_MASK 0x0100 /* HC_DC1_EINT */ +#define WM831X_HC_DC1_EINT_SHIFT 8 /* HC_DC1_EINT */ +#define WM831X_HC_DC1_EINT_WIDTH 1 /* HC_DC1_EINT */ +#define WM831X_UV_DC4_EINT 0x0008 /* UV_DC4_EINT */ +#define WM831X_UV_DC4_EINT_MASK 0x0008 /* UV_DC4_EINT */ +#define WM831X_UV_DC4_EINT_SHIFT 3 /* UV_DC4_EINT */ +#define WM831X_UV_DC4_EINT_WIDTH 1 /* UV_DC4_EINT */ +#define WM831X_UV_DC3_EINT 0x0004 /* UV_DC3_EINT */ +#define WM831X_UV_DC3_EINT_MASK 0x0004 /* UV_DC3_EINT */ +#define WM831X_UV_DC3_EINT_SHIFT 2 /* UV_DC3_EINT */ +#define WM831X_UV_DC3_EINT_WIDTH 1 /* UV_DC3_EINT */ +#define WM831X_UV_DC2_EINT 0x0002 /* UV_DC2_EINT */ +#define WM831X_UV_DC2_EINT_MASK 0x0002 /* UV_DC2_EINT */ +#define WM831X_UV_DC2_EINT_SHIFT 1 /* UV_DC2_EINT */ +#define WM831X_UV_DC2_EINT_WIDTH 1 /* UV_DC2_EINT */ +#define WM831X_UV_DC1_EINT 0x0001 /* UV_DC1_EINT */ +#define WM831X_UV_DC1_EINT_MASK 0x0001 /* UV_DC1_EINT */ +#define WM831X_UV_DC1_EINT_SHIFT 0 /* UV_DC1_EINT */ +#define WM831X_UV_DC1_EINT_WIDTH 1 /* UV_DC1_EINT */ + +/* + * R16405 (0x4015) - Interrupt Status 5 + */ +#define WM831X_GP16_EINT 0x8000 /* GP16_EINT */ +#define WM831X_GP16_EINT_MASK 0x8000 /* GP16_EINT */ +#define WM831X_GP16_EINT_SHIFT 15 /* GP16_EINT */ +#define WM831X_GP16_EINT_WIDTH 1 /* GP16_EINT */ +#define WM831X_GP15_EINT 0x4000 /* GP15_EINT */ +#define WM831X_GP15_EINT_MASK 0x4000 /* GP15_EINT */ +#define WM831X_GP15_EINT_SHIFT 14 /* GP15_EINT */ +#define WM831X_GP15_EINT_WIDTH 1 /* GP15_EINT */ +#define WM831X_GP14_EINT 0x2000 /* GP14_EINT */ +#define WM831X_GP14_EINT_MASK 0x2000 /* GP14_EINT */ +#define WM831X_GP14_EINT_SHIFT 13 /* GP14_EINT */ +#define WM831X_GP14_EINT_WIDTH 1 /* GP14_EINT */ +#define WM831X_GP13_EINT 0x1000 /* GP13_EINT */ +#define WM831X_GP13_EINT_MASK 0x1000 /* GP13_EINT */ +#define WM831X_GP13_EINT_SHIFT 12 /* GP13_EINT */ +#define WM831X_GP13_EINT_WIDTH 1 /* GP13_EINT */ +#define WM831X_GP12_EINT 0x0800 /* GP12_EINT */ +#define WM831X_GP12_EINT_MASK 0x0800 /* GP12_EINT */ +#define WM831X_GP12_EINT_SHIFT 11 /* GP12_EINT */ +#define WM831X_GP12_EINT_WIDTH 1 /* GP12_EINT */ +#define WM831X_GP11_EINT 0x0400 /* GP11_EINT */ +#define WM831X_GP11_EINT_MASK 0x0400 /* GP11_EINT */ +#define WM831X_GP11_EINT_SHIFT 10 /* GP11_EINT */ +#define WM831X_GP11_EINT_WIDTH 1 /* GP11_EINT */ +#define WM831X_GP10_EINT 0x0200 /* GP10_EINT */ +#define WM831X_GP10_EINT_MASK 0x0200 /* GP10_EINT */ +#define WM831X_GP10_EINT_SHIFT 9 /* GP10_EINT */ +#define WM831X_GP10_EINT_WIDTH 1 /* GP10_EINT */ +#define WM831X_GP9_EINT 0x0100 /* GP9_EINT */ +#define WM831X_GP9_EINT_MASK 0x0100 /* GP9_EINT */ +#define WM831X_GP9_EINT_SHIFT 8 /* GP9_EINT */ +#define WM831X_GP9_EINT_WIDTH 1 /* GP9_EINT */ +#define WM831X_GP8_EINT 0x0080 /* GP8_EINT */ +#define WM831X_GP8_EINT_MASK 0x0080 /* GP8_EINT */ +#define WM831X_GP8_EINT_SHIFT 7 /* GP8_EINT */ +#define WM831X_GP8_EINT_WIDTH 1 /* GP8_EINT */ +#define WM831X_GP7_EINT 0x0040 /* GP7_EINT */ +#define WM831X_GP7_EINT_MASK 0x0040 /* GP7_EINT */ +#define WM831X_GP7_EINT_SHIFT 6 /* GP7_EINT */ +#define WM831X_GP7_EINT_WIDTH 1 /* GP7_EINT */ +#define WM831X_GP6_EINT 0x0020 /* GP6_EINT */ +#define WM831X_GP6_EINT_MASK 0x0020 /* GP6_EINT */ +#define WM831X_GP6_EINT_SHIFT 5 /* GP6_EINT */ +#define WM831X_GP6_EINT_WIDTH 1 /* GP6_EINT */ +#define WM831X_GP5_EINT 0x0010 /* GP5_EINT */ +#define WM831X_GP5_EINT_MASK 0x0010 /* GP5_EINT */ +#define WM831X_GP5_EINT_SHIFT 4 /* GP5_EINT */ +#define WM831X_GP5_EINT_WIDTH 1 /* GP5_EINT */ +#define WM831X_GP4_EINT 0x0008 /* GP4_EINT */ +#define WM831X_GP4_EINT_MASK 0x0008 /* GP4_EINT */ +#define WM831X_GP4_EINT_SHIFT 3 /* GP4_EINT */ +#define WM831X_GP4_EINT_WIDTH 1 /* GP4_EINT */ +#define WM831X_GP3_EINT 0x0004 /* GP3_EINT */ +#define WM831X_GP3_EINT_MASK 0x0004 /* GP3_EINT */ +#define WM831X_GP3_EINT_SHIFT 2 /* GP3_EINT */ +#define WM831X_GP3_EINT_WIDTH 1 /* GP3_EINT */ +#define WM831X_GP2_EINT 0x0002 /* GP2_EINT */ +#define WM831X_GP2_EINT_MASK 0x0002 /* GP2_EINT */ +#define WM831X_GP2_EINT_SHIFT 1 /* GP2_EINT */ +#define WM831X_GP2_EINT_WIDTH 1 /* GP2_EINT */ +#define WM831X_GP1_EINT 0x0001 /* GP1_EINT */ +#define WM831X_GP1_EINT_MASK 0x0001 /* GP1_EINT */ +#define WM831X_GP1_EINT_SHIFT 0 /* GP1_EINT */ +#define WM831X_GP1_EINT_WIDTH 1 /* GP1_EINT */ + +/* + * R16407 (0x4017) - IRQ Config + */ +#define WM831X_IRQ_OD 0x0002 /* IRQ_OD */ +#define WM831X_IRQ_OD_MASK 0x0002 /* IRQ_OD */ +#define WM831X_IRQ_OD_SHIFT 1 /* IRQ_OD */ +#define WM831X_IRQ_OD_WIDTH 1 /* IRQ_OD */ +#define WM831X_IM_IRQ 0x0001 /* IM_IRQ */ +#define WM831X_IM_IRQ_MASK 0x0001 /* IM_IRQ */ +#define WM831X_IM_IRQ_SHIFT 0 /* IM_IRQ */ +#define WM831X_IM_IRQ_WIDTH 1 /* IM_IRQ */ + +/* + * R16408 (0x4018) - System Interrupts Mask + */ +#define WM831X_IM_PS_INT 0x8000 /* IM_PS_INT */ +#define WM831X_IM_PS_INT_MASK 0x8000 /* IM_PS_INT */ +#define WM831X_IM_PS_INT_SHIFT 15 /* IM_PS_INT */ +#define WM831X_IM_PS_INT_WIDTH 1 /* IM_PS_INT */ +#define WM831X_IM_TEMP_INT 0x4000 /* IM_TEMP_INT */ +#define WM831X_IM_TEMP_INT_MASK 0x4000 /* IM_TEMP_INT */ +#define WM831X_IM_TEMP_INT_SHIFT 14 /* IM_TEMP_INT */ +#define WM831X_IM_TEMP_INT_WIDTH 1 /* IM_TEMP_INT */ +#define WM831X_IM_GP_INT 0x2000 /* IM_GP_INT */ +#define WM831X_IM_GP_INT_MASK 0x2000 /* IM_GP_INT */ +#define WM831X_IM_GP_INT_SHIFT 13 /* IM_GP_INT */ +#define WM831X_IM_GP_INT_WIDTH 1 /* IM_GP_INT */ +#define WM831X_IM_ON_PIN_INT 0x1000 /* IM_ON_PIN_INT */ +#define WM831X_IM_ON_PIN_INT_MASK 0x1000 /* IM_ON_PIN_INT */ +#define WM831X_IM_ON_PIN_INT_SHIFT 12 /* IM_ON_PIN_INT */ +#define WM831X_IM_ON_PIN_INT_WIDTH 1 /* IM_ON_PIN_INT */ +#define WM831X_IM_WDOG_INT 0x0800 /* IM_WDOG_INT */ +#define WM831X_IM_WDOG_INT_MASK 0x0800 /* IM_WDOG_INT */ +#define WM831X_IM_WDOG_INT_SHIFT 11 /* IM_WDOG_INT */ +#define WM831X_IM_WDOG_INT_WIDTH 1 /* IM_WDOG_INT */ +#define WM831X_IM_TCHDATA_INT 0x0400 /* IM_TCHDATA_INT */ +#define WM831X_IM_TCHDATA_INT_MASK 0x0400 /* IM_TCHDATA_INT */ +#define WM831X_IM_TCHDATA_INT_SHIFT 10 /* IM_TCHDATA_INT */ +#define WM831X_IM_TCHDATA_INT_WIDTH 1 /* IM_TCHDATA_INT */ +#define WM831X_IM_TCHPD_INT 0x0200 /* IM_TCHPD_INT */ +#define WM831X_IM_TCHPD_INT_MASK 0x0200 /* IM_TCHPD_INT */ +#define WM831X_IM_TCHPD_INT_SHIFT 9 /* IM_TCHPD_INT */ +#define WM831X_IM_TCHPD_INT_WIDTH 1 /* IM_TCHPD_INT */ +#define WM831X_IM_AUXADC_INT 0x0100 /* IM_AUXADC_INT */ +#define WM831X_IM_AUXADC_INT_MASK 0x0100 /* IM_AUXADC_INT */ +#define WM831X_IM_AUXADC_INT_SHIFT 8 /* IM_AUXADC_INT */ +#define WM831X_IM_AUXADC_INT_WIDTH 1 /* IM_AUXADC_INT */ +#define WM831X_IM_PPM_INT 0x0080 /* IM_PPM_INT */ +#define WM831X_IM_PPM_INT_MASK 0x0080 /* IM_PPM_INT */ +#define WM831X_IM_PPM_INT_SHIFT 7 /* IM_PPM_INT */ +#define WM831X_IM_PPM_INT_WIDTH 1 /* IM_PPM_INT */ +#define WM831X_IM_CS_INT 0x0040 /* IM_CS_INT */ +#define WM831X_IM_CS_INT_MASK 0x0040 /* IM_CS_INT */ +#define WM831X_IM_CS_INT_SHIFT 6 /* IM_CS_INT */ +#define WM831X_IM_CS_INT_WIDTH 1 /* IM_CS_INT */ +#define WM831X_IM_RTC_INT 0x0020 /* IM_RTC_INT */ +#define WM831X_IM_RTC_INT_MASK 0x0020 /* IM_RTC_INT */ +#define WM831X_IM_RTC_INT_SHIFT 5 /* IM_RTC_INT */ +#define WM831X_IM_RTC_INT_WIDTH 1 /* IM_RTC_INT */ +#define WM831X_IM_OTP_INT 0x0010 /* IM_OTP_INT */ +#define WM831X_IM_OTP_INT_MASK 0x0010 /* IM_OTP_INT */ +#define WM831X_IM_OTP_INT_SHIFT 4 /* IM_OTP_INT */ +#define WM831X_IM_OTP_INT_WIDTH 1 /* IM_OTP_INT */ +#define WM831X_IM_CHILD_INT 0x0008 /* IM_CHILD_INT */ +#define WM831X_IM_CHILD_INT_MASK 0x0008 /* IM_CHILD_INT */ +#define WM831X_IM_CHILD_INT_SHIFT 3 /* IM_CHILD_INT */ +#define WM831X_IM_CHILD_INT_WIDTH 1 /* IM_CHILD_INT */ +#define WM831X_IM_CHG_INT 0x0004 /* IM_CHG_INT */ +#define WM831X_IM_CHG_INT_MASK 0x0004 /* IM_CHG_INT */ +#define WM831X_IM_CHG_INT_SHIFT 2 /* IM_CHG_INT */ +#define WM831X_IM_CHG_INT_WIDTH 1 /* IM_CHG_INT */ +#define WM831X_IM_HC_INT 0x0002 /* IM_HC_INT */ +#define WM831X_IM_HC_INT_MASK 0x0002 /* IM_HC_INT */ +#define WM831X_IM_HC_INT_SHIFT 1 /* IM_HC_INT */ +#define WM831X_IM_HC_INT_WIDTH 1 /* IM_HC_INT */ +#define WM831X_IM_UV_INT 0x0001 /* IM_UV_INT */ +#define WM831X_IM_UV_INT_MASK 0x0001 /* IM_UV_INT */ +#define WM831X_IM_UV_INT_SHIFT 0 /* IM_UV_INT */ +#define WM831X_IM_UV_INT_WIDTH 1 /* IM_UV_INT */ + +/* + * R16409 (0x4019) - Interrupt Status 1 Mask + */ +#define WM831X_IM_PPM_SYSLO_EINT 0x8000 /* IM_PPM_SYSLO_EINT */ +#define WM831X_IM_PPM_SYSLO_EINT_MASK 0x8000 /* IM_PPM_SYSLO_EINT */ +#define WM831X_IM_PPM_SYSLO_EINT_SHIFT 15 /* IM_PPM_SYSLO_EINT */ +#define WM831X_IM_PPM_SYSLO_EINT_WIDTH 1 /* IM_PPM_SYSLO_EINT */ +#define WM831X_IM_PPM_PWR_SRC_EINT 0x4000 /* IM_PPM_PWR_SRC_EINT */ +#define WM831X_IM_PPM_PWR_SRC_EINT_MASK 0x4000 /* IM_PPM_PWR_SRC_EINT */ +#define WM831X_IM_PPM_PWR_SRC_EINT_SHIFT 14 /* IM_PPM_PWR_SRC_EINT */ +#define WM831X_IM_PPM_PWR_SRC_EINT_WIDTH 1 /* IM_PPM_PWR_SRC_EINT */ +#define WM831X_IM_PPM_USB_CURR_EINT 0x2000 /* IM_PPM_USB_CURR_EINT */ +#define WM831X_IM_PPM_USB_CURR_EINT_MASK 0x2000 /* IM_PPM_USB_CURR_EINT */ +#define WM831X_IM_PPM_USB_CURR_EINT_SHIFT 13 /* IM_PPM_USB_CURR_EINT */ +#define WM831X_IM_PPM_USB_CURR_EINT_WIDTH 1 /* IM_PPM_USB_CURR_EINT */ +#define WM831X_IM_ON_PIN_EINT 0x1000 /* IM_ON_PIN_EINT */ +#define WM831X_IM_ON_PIN_EINT_MASK 0x1000 /* IM_ON_PIN_EINT */ +#define WM831X_IM_ON_PIN_EINT_SHIFT 12 /* IM_ON_PIN_EINT */ +#define WM831X_IM_ON_PIN_EINT_WIDTH 1 /* IM_ON_PIN_EINT */ +#define WM831X_IM_WDOG_TO_EINT 0x0800 /* IM_WDOG_TO_EINT */ +#define WM831X_IM_WDOG_TO_EINT_MASK 0x0800 /* IM_WDOG_TO_EINT */ +#define WM831X_IM_WDOG_TO_EINT_SHIFT 11 /* IM_WDOG_TO_EINT */ +#define WM831X_IM_WDOG_TO_EINT_WIDTH 1 /* IM_WDOG_TO_EINT */ +#define WM831X_IM_TCHDATA_EINT 0x0400 /* IM_TCHDATA_EINT */ +#define WM831X_IM_TCHDATA_EINT_MASK 0x0400 /* IM_TCHDATA_EINT */ +#define WM831X_IM_TCHDATA_EINT_SHIFT 10 /* IM_TCHDATA_EINT */ +#define WM831X_IM_TCHDATA_EINT_WIDTH 1 /* IM_TCHDATA_EINT */ +#define WM831X_IM_TCHPD_EINT 0x0200 /* IM_TCHPD_EINT */ +#define WM831X_IM_TCHPD_EINT_MASK 0x0200 /* IM_TCHPD_EINT */ +#define WM831X_IM_TCHPD_EINT_SHIFT 9 /* IM_TCHPD_EINT */ +#define WM831X_IM_TCHPD_EINT_WIDTH 1 /* IM_TCHPD_EINT */ +#define WM831X_IM_AUXADC_DATA_EINT 0x0100 /* IM_AUXADC_DATA_EINT */ +#define WM831X_IM_AUXADC_DATA_EINT_MASK 0x0100 /* IM_AUXADC_DATA_EINT */ +#define WM831X_IM_AUXADC_DATA_EINT_SHIFT 8 /* IM_AUXADC_DATA_EINT */ +#define WM831X_IM_AUXADC_DATA_EINT_WIDTH 1 /* IM_AUXADC_DATA_EINT */ +#define WM831X_IM_AUXADC_DCOMP4_EINT 0x0080 /* IM_AUXADC_DCOMP4_EINT */ +#define WM831X_IM_AUXADC_DCOMP4_EINT_MASK 0x0080 /* IM_AUXADC_DCOMP4_EINT */ +#define WM831X_IM_AUXADC_DCOMP4_EINT_SHIFT 7 /* IM_AUXADC_DCOMP4_EINT */ +#define WM831X_IM_AUXADC_DCOMP4_EINT_WIDTH 1 /* IM_AUXADC_DCOMP4_EINT */ +#define WM831X_IM_AUXADC_DCOMP3_EINT 0x0040 /* IM_AUXADC_DCOMP3_EINT */ +#define WM831X_IM_AUXADC_DCOMP3_EINT_MASK 0x0040 /* IM_AUXADC_DCOMP3_EINT */ +#define WM831X_IM_AUXADC_DCOMP3_EINT_SHIFT 6 /* IM_AUXADC_DCOMP3_EINT */ +#define WM831X_IM_AUXADC_DCOMP3_EINT_WIDTH 1 /* IM_AUXADC_DCOMP3_EINT */ +#define WM831X_IM_AUXADC_DCOMP2_EINT 0x0020 /* IM_AUXADC_DCOMP2_EINT */ +#define WM831X_IM_AUXADC_DCOMP2_EINT_MASK 0x0020 /* IM_AUXADC_DCOMP2_EINT */ +#define WM831X_IM_AUXADC_DCOMP2_EINT_SHIFT 5 /* IM_AUXADC_DCOMP2_EINT */ +#define WM831X_IM_AUXADC_DCOMP2_EINT_WIDTH 1 /* IM_AUXADC_DCOMP2_EINT */ +#define WM831X_IM_AUXADC_DCOMP1_EINT 0x0010 /* IM_AUXADC_DCOMP1_EINT */ +#define WM831X_IM_AUXADC_DCOMP1_EINT_MASK 0x0010 /* IM_AUXADC_DCOMP1_EINT */ +#define WM831X_IM_AUXADC_DCOMP1_EINT_SHIFT 4 /* IM_AUXADC_DCOMP1_EINT */ +#define WM831X_IM_AUXADC_DCOMP1_EINT_WIDTH 1 /* IM_AUXADC_DCOMP1_EINT */ +#define WM831X_IM_RTC_PER_EINT 0x0008 /* IM_RTC_PER_EINT */ +#define WM831X_IM_RTC_PER_EINT_MASK 0x0008 /* IM_RTC_PER_EINT */ +#define WM831X_IM_RTC_PER_EINT_SHIFT 3 /* IM_RTC_PER_EINT */ +#define WM831X_IM_RTC_PER_EINT_WIDTH 1 /* IM_RTC_PER_EINT */ +#define WM831X_IM_RTC_ALM_EINT 0x0004 /* IM_RTC_ALM_EINT */ +#define WM831X_IM_RTC_ALM_EINT_MASK 0x0004 /* IM_RTC_ALM_EINT */ +#define WM831X_IM_RTC_ALM_EINT_SHIFT 2 /* IM_RTC_ALM_EINT */ +#define WM831X_IM_RTC_ALM_EINT_WIDTH 1 /* IM_RTC_ALM_EINT */ +#define WM831X_IM_TEMP_THW_EINT 0x0002 /* IM_TEMP_THW_EINT */ +#define WM831X_IM_TEMP_THW_EINT_MASK 0x0002 /* IM_TEMP_THW_EINT */ +#define WM831X_IM_TEMP_THW_EINT_SHIFT 1 /* IM_TEMP_THW_EINT */ +#define WM831X_IM_TEMP_THW_EINT_WIDTH 1 /* IM_TEMP_THW_EINT */ + +/* + * R16410 (0x401A) - Interrupt Status 2 Mask + */ +#define WM831X_IM_CHG_BATT_HOT_EINT 0x8000 /* IM_CHG_BATT_HOT_EINT */ +#define WM831X_IM_CHG_BATT_HOT_EINT_MASK 0x8000 /* IM_CHG_BATT_HOT_EINT */ +#define WM831X_IM_CHG_BATT_HOT_EINT_SHIFT 15 /* IM_CHG_BATT_HOT_EINT */ +#define WM831X_IM_CHG_BATT_HOT_EINT_WIDTH 1 /* IM_CHG_BATT_HOT_EINT */ +#define WM831X_IM_CHG_BATT_COLD_EINT 0x4000 /* IM_CHG_BATT_COLD_EINT */ +#define WM831X_IM_CHG_BATT_COLD_EINT_MASK 0x4000 /* IM_CHG_BATT_COLD_EINT */ +#define WM831X_IM_CHG_BATT_COLD_EINT_SHIFT 14 /* IM_CHG_BATT_COLD_EINT */ +#define WM831X_IM_CHG_BATT_COLD_EINT_WIDTH 1 /* IM_CHG_BATT_COLD_EINT */ +#define WM831X_IM_CHG_BATT_FAIL_EINT 0x2000 /* IM_CHG_BATT_FAIL_EINT */ +#define WM831X_IM_CHG_BATT_FAIL_EINT_MASK 0x2000 /* IM_CHG_BATT_FAIL_EINT */ +#define WM831X_IM_CHG_BATT_FAIL_EINT_SHIFT 13 /* IM_CHG_BATT_FAIL_EINT */ +#define WM831X_IM_CHG_BATT_FAIL_EINT_WIDTH 1 /* IM_CHG_BATT_FAIL_EINT */ +#define WM831X_IM_CHG_OV_EINT 0x1000 /* IM_CHG_OV_EINT */ +#define WM831X_IM_CHG_OV_EINT_MASK 0x1000 /* IM_CHG_OV_EINT */ +#define WM831X_IM_CHG_OV_EINT_SHIFT 12 /* IM_CHG_OV_EINT */ +#define WM831X_IM_CHG_OV_EINT_WIDTH 1 /* IM_CHG_OV_EINT */ +#define WM831X_IM_CHG_END_EINT 0x0800 /* IM_CHG_END_EINT */ +#define WM831X_IM_CHG_END_EINT_MASK 0x0800 /* IM_CHG_END_EINT */ +#define WM831X_IM_CHG_END_EINT_SHIFT 11 /* IM_CHG_END_EINT */ +#define WM831X_IM_CHG_END_EINT_WIDTH 1 /* IM_CHG_END_EINT */ +#define WM831X_IM_CHG_TO_EINT 0x0400 /* IM_CHG_TO_EINT */ +#define WM831X_IM_CHG_TO_EINT_MASK 0x0400 /* IM_CHG_TO_EINT */ +#define WM831X_IM_CHG_TO_EINT_SHIFT 10 /* IM_CHG_TO_EINT */ +#define WM831X_IM_CHG_TO_EINT_WIDTH 1 /* IM_CHG_TO_EINT */ +#define WM831X_IM_CHG_MODE_EINT 0x0200 /* IM_CHG_MODE_EINT */ +#define WM831X_IM_CHG_MODE_EINT_MASK 0x0200 /* IM_CHG_MODE_EINT */ +#define WM831X_IM_CHG_MODE_EINT_SHIFT 9 /* IM_CHG_MODE_EINT */ +#define WM831X_IM_CHG_MODE_EINT_WIDTH 1 /* IM_CHG_MODE_EINT */ +#define WM831X_IM_CHG_START_EINT 0x0100 /* IM_CHG_START_EINT */ +#define WM831X_IM_CHG_START_EINT_MASK 0x0100 /* IM_CHG_START_EINT */ +#define WM831X_IM_CHG_START_EINT_SHIFT 8 /* IM_CHG_START_EINT */ +#define WM831X_IM_CHG_START_EINT_WIDTH 1 /* IM_CHG_START_EINT */ +#define WM831X_IM_CS2_EINT 0x0080 /* IM_CS2_EINT */ +#define WM831X_IM_CS2_EINT_MASK 0x0080 /* IM_CS2_EINT */ +#define WM831X_IM_CS2_EINT_SHIFT 7 /* IM_CS2_EINT */ +#define WM831X_IM_CS2_EINT_WIDTH 1 /* IM_CS2_EINT */ +#define WM831X_IM_CS1_EINT 0x0040 /* IM_CS1_EINT */ +#define WM831X_IM_CS1_EINT_MASK 0x0040 /* IM_CS1_EINT */ +#define WM831X_IM_CS1_EINT_SHIFT 6 /* IM_CS1_EINT */ +#define WM831X_IM_CS1_EINT_WIDTH 1 /* IM_CS1_EINT */ +#define WM831X_IM_OTP_CMD_END_EINT 0x0020 /* IM_OTP_CMD_END_EINT */ +#define WM831X_IM_OTP_CMD_END_EINT_MASK 0x0020 /* IM_OTP_CMD_END_EINT */ +#define WM831X_IM_OTP_CMD_END_EINT_SHIFT 5 /* IM_OTP_CMD_END_EINT */ +#define WM831X_IM_OTP_CMD_END_EINT_WIDTH 1 /* IM_OTP_CMD_END_EINT */ +#define WM831X_IM_OTP_ERR_EINT 0x0010 /* IM_OTP_ERR_EINT */ +#define WM831X_IM_OTP_ERR_EINT_MASK 0x0010 /* IM_OTP_ERR_EINT */ +#define WM831X_IM_OTP_ERR_EINT_SHIFT 4 /* IM_OTP_ERR_EINT */ +#define WM831X_IM_OTP_ERR_EINT_WIDTH 1 /* IM_OTP_ERR_EINT */ +#define WM831X_IM_PS_POR_EINT 0x0004 /* IM_PS_POR_EINT */ +#define WM831X_IM_PS_POR_EINT_MASK 0x0004 /* IM_PS_POR_EINT */ +#define WM831X_IM_PS_POR_EINT_SHIFT 2 /* IM_PS_POR_EINT */ +#define WM831X_IM_PS_POR_EINT_WIDTH 1 /* IM_PS_POR_EINT */ +#define WM831X_IM_PS_SLEEP_OFF_EINT 0x0002 /* IM_PS_SLEEP_OFF_EINT */ +#define WM831X_IM_PS_SLEEP_OFF_EINT_MASK 0x0002 /* IM_PS_SLEEP_OFF_EINT */ +#define WM831X_IM_PS_SLEEP_OFF_EINT_SHIFT 1 /* IM_PS_SLEEP_OFF_EINT */ +#define WM831X_IM_PS_SLEEP_OFF_EINT_WIDTH 1 /* IM_PS_SLEEP_OFF_EINT */ +#define WM831X_IM_PS_ON_WAKE_EINT 0x0001 /* IM_PS_ON_WAKE_EINT */ +#define WM831X_IM_PS_ON_WAKE_EINT_MASK 0x0001 /* IM_PS_ON_WAKE_EINT */ +#define WM831X_IM_PS_ON_WAKE_EINT_SHIFT 0 /* IM_PS_ON_WAKE_EINT */ +#define WM831X_IM_PS_ON_WAKE_EINT_WIDTH 1 /* IM_PS_ON_WAKE_EINT */ + +/* + * R16411 (0x401B) - Interrupt Status 3 Mask + */ +#define WM831X_IM_UV_LDO10_EINT 0x0200 /* IM_UV_LDO10_EINT */ +#define WM831X_IM_UV_LDO10_EINT_MASK 0x0200 /* IM_UV_LDO10_EINT */ +#define WM831X_IM_UV_LDO10_EINT_SHIFT 9 /* IM_UV_LDO10_EINT */ +#define WM831X_IM_UV_LDO10_EINT_WIDTH 1 /* IM_UV_LDO10_EINT */ +#define WM831X_IM_UV_LDO9_EINT 0x0100 /* IM_UV_LDO9_EINT */ +#define WM831X_IM_UV_LDO9_EINT_MASK 0x0100 /* IM_UV_LDO9_EINT */ +#define WM831X_IM_UV_LDO9_EINT_SHIFT 8 /* IM_UV_LDO9_EINT */ +#define WM831X_IM_UV_LDO9_EINT_WIDTH 1 /* IM_UV_LDO9_EINT */ +#define WM831X_IM_UV_LDO8_EINT 0x0080 /* IM_UV_LDO8_EINT */ +#define WM831X_IM_UV_LDO8_EINT_MASK 0x0080 /* IM_UV_LDO8_EINT */ +#define WM831X_IM_UV_LDO8_EINT_SHIFT 7 /* IM_UV_LDO8_EINT */ +#define WM831X_IM_UV_LDO8_EINT_WIDTH 1 /* IM_UV_LDO8_EINT */ +#define WM831X_IM_UV_LDO7_EINT 0x0040 /* IM_UV_LDO7_EINT */ +#define WM831X_IM_UV_LDO7_EINT_MASK 0x0040 /* IM_UV_LDO7_EINT */ +#define WM831X_IM_UV_LDO7_EINT_SHIFT 6 /* IM_UV_LDO7_EINT */ +#define WM831X_IM_UV_LDO7_EINT_WIDTH 1 /* IM_UV_LDO7_EINT */ +#define WM831X_IM_UV_LDO6_EINT 0x0020 /* IM_UV_LDO6_EINT */ +#define WM831X_IM_UV_LDO6_EINT_MASK 0x0020 /* IM_UV_LDO6_EINT */ +#define WM831X_IM_UV_LDO6_EINT_SHIFT 5 /* IM_UV_LDO6_EINT */ +#define WM831X_IM_UV_LDO6_EINT_WIDTH 1 /* IM_UV_LDO6_EINT */ +#define WM831X_IM_UV_LDO5_EINT 0x0010 /* IM_UV_LDO5_EINT */ +#define WM831X_IM_UV_LDO5_EINT_MASK 0x0010 /* IM_UV_LDO5_EINT */ +#define WM831X_IM_UV_LDO5_EINT_SHIFT 4 /* IM_UV_LDO5_EINT */ +#define WM831X_IM_UV_LDO5_EINT_WIDTH 1 /* IM_UV_LDO5_EINT */ +#define WM831X_IM_UV_LDO4_EINT 0x0008 /* IM_UV_LDO4_EINT */ +#define WM831X_IM_UV_LDO4_EINT_MASK 0x0008 /* IM_UV_LDO4_EINT */ +#define WM831X_IM_UV_LDO4_EINT_SHIFT 3 /* IM_UV_LDO4_EINT */ +#define WM831X_IM_UV_LDO4_EINT_WIDTH 1 /* IM_UV_LDO4_EINT */ +#define WM831X_IM_UV_LDO3_EINT 0x0004 /* IM_UV_LDO3_EINT */ +#define WM831X_IM_UV_LDO3_EINT_MASK 0x0004 /* IM_UV_LDO3_EINT */ +#define WM831X_IM_UV_LDO3_EINT_SHIFT 2 /* IM_UV_LDO3_EINT */ +#define WM831X_IM_UV_LDO3_EINT_WIDTH 1 /* IM_UV_LDO3_EINT */ +#define WM831X_IM_UV_LDO2_EINT 0x0002 /* IM_UV_LDO2_EINT */ +#define WM831X_IM_UV_LDO2_EINT_MASK 0x0002 /* IM_UV_LDO2_EINT */ +#define WM831X_IM_UV_LDO2_EINT_SHIFT 1 /* IM_UV_LDO2_EINT */ +#define WM831X_IM_UV_LDO2_EINT_WIDTH 1 /* IM_UV_LDO2_EINT */ +#define WM831X_IM_UV_LDO1_EINT 0x0001 /* IM_UV_LDO1_EINT */ +#define WM831X_IM_UV_LDO1_EINT_MASK 0x0001 /* IM_UV_LDO1_EINT */ +#define WM831X_IM_UV_LDO1_EINT_SHIFT 0 /* IM_UV_LDO1_EINT */ +#define WM831X_IM_UV_LDO1_EINT_WIDTH 1 /* IM_UV_LDO1_EINT */ + +/* + * R16412 (0x401C) - Interrupt Status 4 Mask + */ +#define WM831X_IM_HC_DC2_EINT 0x0200 /* IM_HC_DC2_EINT */ +#define WM831X_IM_HC_DC2_EINT_MASK 0x0200 /* IM_HC_DC2_EINT */ +#define WM831X_IM_HC_DC2_EINT_SHIFT 9 /* IM_HC_DC2_EINT */ +#define WM831X_IM_HC_DC2_EINT_WIDTH 1 /* IM_HC_DC2_EINT */ +#define WM831X_IM_HC_DC1_EINT 0x0100 /* IM_HC_DC1_EINT */ +#define WM831X_IM_HC_DC1_EINT_MASK 0x0100 /* IM_HC_DC1_EINT */ +#define WM831X_IM_HC_DC1_EINT_SHIFT 8 /* IM_HC_DC1_EINT */ +#define WM831X_IM_HC_DC1_EINT_WIDTH 1 /* IM_HC_DC1_EINT */ +#define WM831X_IM_UV_DC4_EINT 0x0008 /* IM_UV_DC4_EINT */ +#define WM831X_IM_UV_DC4_EINT_MASK 0x0008 /* IM_UV_DC4_EINT */ +#define WM831X_IM_UV_DC4_EINT_SHIFT 3 /* IM_UV_DC4_EINT */ +#define WM831X_IM_UV_DC4_EINT_WIDTH 1 /* IM_UV_DC4_EINT */ +#define WM831X_IM_UV_DC3_EINT 0x0004 /* IM_UV_DC3_EINT */ +#define WM831X_IM_UV_DC3_EINT_MASK 0x0004 /* IM_UV_DC3_EINT */ +#define WM831X_IM_UV_DC3_EINT_SHIFT 2 /* IM_UV_DC3_EINT */ +#define WM831X_IM_UV_DC3_EINT_WIDTH 1 /* IM_UV_DC3_EINT */ +#define WM831X_IM_UV_DC2_EINT 0x0002 /* IM_UV_DC2_EINT */ +#define WM831X_IM_UV_DC2_EINT_MASK 0x0002 /* IM_UV_DC2_EINT */ +#define WM831X_IM_UV_DC2_EINT_SHIFT 1 /* IM_UV_DC2_EINT */ +#define WM831X_IM_UV_DC2_EINT_WIDTH 1 /* IM_UV_DC2_EINT */ +#define WM831X_IM_UV_DC1_EINT 0x0001 /* IM_UV_DC1_EINT */ +#define WM831X_IM_UV_DC1_EINT_MASK 0x0001 /* IM_UV_DC1_EINT */ +#define WM831X_IM_UV_DC1_EINT_SHIFT 0 /* IM_UV_DC1_EINT */ +#define WM831X_IM_UV_DC1_EINT_WIDTH 1 /* IM_UV_DC1_EINT */ + +/* + * R16413 (0x401D) - Interrupt Status 5 Mask + */ +#define WM831X_IM_GP16_EINT 0x8000 /* IM_GP16_EINT */ +#define WM831X_IM_GP16_EINT_MASK 0x8000 /* IM_GP16_EINT */ +#define WM831X_IM_GP16_EINT_SHIFT 15 /* IM_GP16_EINT */ +#define WM831X_IM_GP16_EINT_WIDTH 1 /* IM_GP16_EINT */ +#define WM831X_IM_GP15_EINT 0x4000 /* IM_GP15_EINT */ +#define WM831X_IM_GP15_EINT_MASK 0x4000 /* IM_GP15_EINT */ +#define WM831X_IM_GP15_EINT_SHIFT 14 /* IM_GP15_EINT */ +#define WM831X_IM_GP15_EINT_WIDTH 1 /* IM_GP15_EINT */ +#define WM831X_IM_GP14_EINT 0x2000 /* IM_GP14_EINT */ +#define WM831X_IM_GP14_EINT_MASK 0x2000 /* IM_GP14_EINT */ +#define WM831X_IM_GP14_EINT_SHIFT 13 /* IM_GP14_EINT */ +#define WM831X_IM_GP14_EINT_WIDTH 1 /* IM_GP14_EINT */ +#define WM831X_IM_GP13_EINT 0x1000 /* IM_GP13_EINT */ +#define WM831X_IM_GP13_EINT_MASK 0x1000 /* IM_GP13_EINT */ +#define WM831X_IM_GP13_EINT_SHIFT 12 /* IM_GP13_EINT */ +#define WM831X_IM_GP13_EINT_WIDTH 1 /* IM_GP13_EINT */ +#define WM831X_IM_GP12_EINT 0x0800 /* IM_GP12_EINT */ +#define WM831X_IM_GP12_EINT_MASK 0x0800 /* IM_GP12_EINT */ +#define WM831X_IM_GP12_EINT_SHIFT 11 /* IM_GP12_EINT */ +#define WM831X_IM_GP12_EINT_WIDTH 1 /* IM_GP12_EINT */ +#define WM831X_IM_GP11_EINT 0x0400 /* IM_GP11_EINT */ +#define WM831X_IM_GP11_EINT_MASK 0x0400 /* IM_GP11_EINT */ +#define WM831X_IM_GP11_EINT_SHIFT 10 /* IM_GP11_EINT */ +#define WM831X_IM_GP11_EINT_WIDTH 1 /* IM_GP11_EINT */ +#define WM831X_IM_GP10_EINT 0x0200 /* IM_GP10_EINT */ +#define WM831X_IM_GP10_EINT_MASK 0x0200 /* IM_GP10_EINT */ +#define WM831X_IM_GP10_EINT_SHIFT 9 /* IM_GP10_EINT */ +#define WM831X_IM_GP10_EINT_WIDTH 1 /* IM_GP10_EINT */ +#define WM831X_IM_GP9_EINT 0x0100 /* IM_GP9_EINT */ +#define WM831X_IM_GP9_EINT_MASK 0x0100 /* IM_GP9_EINT */ +#define WM831X_IM_GP9_EINT_SHIFT 8 /* IM_GP9_EINT */ +#define WM831X_IM_GP9_EINT_WIDTH 1 /* IM_GP9_EINT */ +#define WM831X_IM_GP8_EINT 0x0080 /* IM_GP8_EINT */ +#define WM831X_IM_GP8_EINT_MASK 0x0080 /* IM_GP8_EINT */ +#define WM831X_IM_GP8_EINT_SHIFT 7 /* IM_GP8_EINT */ +#define WM831X_IM_GP8_EINT_WIDTH 1 /* IM_GP8_EINT */ +#define WM831X_IM_GP7_EINT 0x0040 /* IM_GP7_EINT */ +#define WM831X_IM_GP7_EINT_MASK 0x0040 /* IM_GP7_EINT */ +#define WM831X_IM_GP7_EINT_SHIFT 6 /* IM_GP7_EINT */ +#define WM831X_IM_GP7_EINT_WIDTH 1 /* IM_GP7_EINT */ +#define WM831X_IM_GP6_EINT 0x0020 /* IM_GP6_EINT */ +#define WM831X_IM_GP6_EINT_MASK 0x0020 /* IM_GP6_EINT */ +#define WM831X_IM_GP6_EINT_SHIFT 5 /* IM_GP6_EINT */ +#define WM831X_IM_GP6_EINT_WIDTH 1 /* IM_GP6_EINT */ +#define WM831X_IM_GP5_EINT 0x0010 /* IM_GP5_EINT */ +#define WM831X_IM_GP5_EINT_MASK 0x0010 /* IM_GP5_EINT */ +#define WM831X_IM_GP5_EINT_SHIFT 4 /* IM_GP5_EINT */ +#define WM831X_IM_GP5_EINT_WIDTH 1 /* IM_GP5_EINT */ +#define WM831X_IM_GP4_EINT 0x0008 /* IM_GP4_EINT */ +#define WM831X_IM_GP4_EINT_MASK 0x0008 /* IM_GP4_EINT */ +#define WM831X_IM_GP4_EINT_SHIFT 3 /* IM_GP4_EINT */ +#define WM831X_IM_GP4_EINT_WIDTH 1 /* IM_GP4_EINT */ +#define WM831X_IM_GP3_EINT 0x0004 /* IM_GP3_EINT */ +#define WM831X_IM_GP3_EINT_MASK 0x0004 /* IM_GP3_EINT */ +#define WM831X_IM_GP3_EINT_SHIFT 2 /* IM_GP3_EINT */ +#define WM831X_IM_GP3_EINT_WIDTH 1 /* IM_GP3_EINT */ +#define WM831X_IM_GP2_EINT 0x0002 /* IM_GP2_EINT */ +#define WM831X_IM_GP2_EINT_MASK 0x0002 /* IM_GP2_EINT */ +#define WM831X_IM_GP2_EINT_SHIFT 1 /* IM_GP2_EINT */ +#define WM831X_IM_GP2_EINT_WIDTH 1 /* IM_GP2_EINT */ +#define WM831X_IM_GP1_EINT 0x0001 /* IM_GP1_EINT */ +#define WM831X_IM_GP1_EINT_MASK 0x0001 /* IM_GP1_EINT */ +#define WM831X_IM_GP1_EINT_SHIFT 0 /* IM_GP1_EINT */ +#define WM831X_IM_GP1_EINT_WIDTH 1 /* IM_GP1_EINT */ + + +#endif diff --git a/include/linux/mfd/wm831x/otp.h b/include/linux/mfd/wm831x/otp.h new file mode 100644 index 000000000..ce1f81a39 --- /dev/null +++ b/include/linux/mfd/wm831x/otp.h @@ -0,0 +1,162 @@ +/* + * include/linux/mfd/wm831x/otp.h -- OTP interface for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_OTP_H__ +#define __MFD_WM831X_OTP_H__ + +int wm831x_otp_init(struct wm831x *wm831x); +void wm831x_otp_exit(struct wm831x *wm831x); + +/* + * R30720 (0x7800) - Unique ID 1 + */ +#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ + +/* + * R30721 (0x7801) - Unique ID 2 + */ +#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ + +/* + * R30722 (0x7802) - Unique ID 3 + */ +#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ + +/* + * R30723 (0x7803) - Unique ID 4 + */ +#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ + +/* + * R30724 (0x7804) - Unique ID 5 + */ +#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ + +/* + * R30725 (0x7805) - Unique ID 6 + */ +#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ + +/* + * R30726 (0x7806) - Unique ID 7 + */ +#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ + +/* + * R30727 (0x7807) - Unique ID 8 + */ +#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ + +/* + * R30728 (0x7808) - Factory OTP ID + */ +#define WM831X_OTP_FACT_ID_MASK 0xFFFE /* OTP_FACT_ID - [15:1] */ +#define WM831X_OTP_FACT_ID_SHIFT 1 /* OTP_FACT_ID - [15:1] */ +#define WM831X_OTP_FACT_ID_WIDTH 15 /* OTP_FACT_ID - [15:1] */ +#define WM831X_OTP_FACT_FINAL 0x0001 /* OTP_FACT_FINAL */ +#define WM831X_OTP_FACT_FINAL_MASK 0x0001 /* OTP_FACT_FINAL */ +#define WM831X_OTP_FACT_FINAL_SHIFT 0 /* OTP_FACT_FINAL */ +#define WM831X_OTP_FACT_FINAL_WIDTH 1 /* OTP_FACT_FINAL */ + +/* + * R30729 (0x7809) - Factory OTP 1 + */ +#define WM831X_DC3_TRIM_MASK 0xF000 /* DC3_TRIM - [15:12] */ +#define WM831X_DC3_TRIM_SHIFT 12 /* DC3_TRIM - [15:12] */ +#define WM831X_DC3_TRIM_WIDTH 4 /* DC3_TRIM - [15:12] */ +#define WM831X_DC2_TRIM_MASK 0x0FC0 /* DC2_TRIM - [11:6] */ +#define WM831X_DC2_TRIM_SHIFT 6 /* DC2_TRIM - [11:6] */ +#define WM831X_DC2_TRIM_WIDTH 6 /* DC2_TRIM - [11:6] */ +#define WM831X_DC1_TRIM_MASK 0x003F /* DC1_TRIM - [5:0] */ +#define WM831X_DC1_TRIM_SHIFT 0 /* DC1_TRIM - [5:0] */ +#define WM831X_DC1_TRIM_WIDTH 6 /* DC1_TRIM - [5:0] */ + +/* + * R30730 (0x780A) - Factory OTP 2 + */ +#define WM831X_CHIP_ID_MASK 0xFFFF /* CHIP_ID - [15:0] */ +#define WM831X_CHIP_ID_SHIFT 0 /* CHIP_ID - [15:0] */ +#define WM831X_CHIP_ID_WIDTH 16 /* CHIP_ID - [15:0] */ + +/* + * R30731 (0x780B) - Factory OTP 3 + */ +#define WM831X_OSC_TRIM_MASK 0x0780 /* OSC_TRIM - [10:7] */ +#define WM831X_OSC_TRIM_SHIFT 7 /* OSC_TRIM - [10:7] */ +#define WM831X_OSC_TRIM_WIDTH 4 /* OSC_TRIM - [10:7] */ +#define WM831X_BG_TRIM_MASK 0x0078 /* BG_TRIM - [6:3] */ +#define WM831X_BG_TRIM_SHIFT 3 /* BG_TRIM - [6:3] */ +#define WM831X_BG_TRIM_WIDTH 4 /* BG_TRIM - [6:3] */ +#define WM831X_LPBG_TRIM_MASK 0x0007 /* LPBG_TRIM - [2:0] */ +#define WM831X_LPBG_TRIM_SHIFT 0 /* LPBG_TRIM - [2:0] */ +#define WM831X_LPBG_TRIM_WIDTH 3 /* LPBG_TRIM - [2:0] */ + +/* + * R30732 (0x780C) - Factory OTP 4 + */ +#define WM831X_CHILD_I2C_ADDR_MASK 0x00FE /* CHILD_I2C_ADDR - [7:1] */ +#define WM831X_CHILD_I2C_ADDR_SHIFT 1 /* CHILD_I2C_ADDR - [7:1] */ +#define WM831X_CHILD_I2C_ADDR_WIDTH 7 /* CHILD_I2C_ADDR - [7:1] */ +#define WM831X_CH_AW 0x0001 /* CH_AW */ +#define WM831X_CH_AW_MASK 0x0001 /* CH_AW */ +#define WM831X_CH_AW_SHIFT 0 /* CH_AW */ +#define WM831X_CH_AW_WIDTH 1 /* CH_AW */ + +/* + * R30733 (0x780D) - Factory OTP 5 + */ +#define WM831X_CHARGE_TRIM_MASK 0x003F /* CHARGE_TRIM - [5:0] */ +#define WM831X_CHARGE_TRIM_SHIFT 0 /* CHARGE_TRIM - [5:0] */ +#define WM831X_CHARGE_TRIM_WIDTH 6 /* CHARGE_TRIM - [5:0] */ + +/* + * R30736 (0x7810) - Customer OTP ID + */ +#define WM831X_OTP_AUTO_PROG 0x8000 /* OTP_AUTO_PROG */ +#define WM831X_OTP_AUTO_PROG_MASK 0x8000 /* OTP_AUTO_PROG */ +#define WM831X_OTP_AUTO_PROG_SHIFT 15 /* OTP_AUTO_PROG */ +#define WM831X_OTP_AUTO_PROG_WIDTH 1 /* OTP_AUTO_PROG */ +#define WM831X_OTP_CUST_ID_MASK 0x7FFE /* OTP_CUST_ID - [14:1] */ +#define WM831X_OTP_CUST_ID_SHIFT 1 /* OTP_CUST_ID - [14:1] */ +#define WM831X_OTP_CUST_ID_WIDTH 14 /* OTP_CUST_ID - [14:1] */ +#define WM831X_OTP_CUST_FINAL 0x0001 /* OTP_CUST_FINAL */ +#define WM831X_OTP_CUST_FINAL_MASK 0x0001 /* OTP_CUST_FINAL */ +#define WM831X_OTP_CUST_FINAL_SHIFT 0 /* OTP_CUST_FINAL */ +#define WM831X_OTP_CUST_FINAL_WIDTH 1 /* OTP_CUST_FINAL */ + +/* + * R30759 (0x7827) - DBE CHECK DATA + */ +#define WM831X_DBE_VALID_DATA_MASK 0xFFFF /* DBE_VALID_DATA - [15:0] */ +#define WM831X_DBE_VALID_DATA_SHIFT 0 /* DBE_VALID_DATA - [15:0] */ +#define WM831X_DBE_VALID_DATA_WIDTH 16 /* DBE_VALID_DATA - [15:0] */ + + +#endif diff --git a/include/linux/mfd/wm831x/pdata.h b/include/linux/mfd/wm831x/pdata.h new file mode 100644 index 000000000..dcc9631b3 --- /dev/null +++ b/include/linux/mfd/wm831x/pdata.h @@ -0,0 +1,150 @@ +/* + * include/linux/mfd/wm831x/pdata.h -- Platform data for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_PDATA_H__ +#define __MFD_WM831X_PDATA_H__ + +struct wm831x; +struct regulator_init_data; + +struct wm831x_backlight_pdata { + int isink; /** ISINK to use, 1 or 2 */ + int max_uA; /** Maximum current to allow */ +}; + +struct wm831x_backup_pdata { + int charger_enable; + int no_constant_voltage; /** Disable constant voltage charging */ + int vlim; /** Voltage limit in millivolts */ + int ilim; /** Current limit in microamps */ +}; + +struct wm831x_battery_pdata { + int enable; /** Enable charging */ + int fast_enable; /** Enable fast charging */ + int off_mask; /** Mask OFF while charging */ + int trickle_ilim; /** Trickle charge current limit, in mA */ + int vsel; /** Target voltage, in mV */ + int eoc_iterm; /** End of trickle charge current, in mA */ + int fast_ilim; /** Fast charge current limit, in mA */ + int timeout; /** Charge cycle timeout, in minutes */ +}; + +/** + * Configuration for the WM831x DC-DC BuckWise convertors. This + * should be passed as driver_data in the regulator_init_data. + * + * Currently all the configuration is for the fast DVS switching + * support of the devices. This allows MFPs on the device to be + * configured as an input to switch between two output voltages, + * allowing voltage transitions without the expense of an access over + * I2C or SPI buses. + */ +struct wm831x_buckv_pdata { + int dvs_gpio; /** CPU GPIO to use for DVS switching */ + int dvs_control_src; /** Hardware DVS source to use (1 or 2) */ + int dvs_init_state; /** DVS state to expect on startup */ + int dvs_state_gpio; /** CPU GPIO to use for monitoring status */ +}; + +/* Sources for status LED configuration. Values are register values + * plus 1 to allow for a zero default for preserve. + */ +enum wm831x_status_src { + WM831X_STATUS_PRESERVE = 0, /* Keep the current hardware setting */ + WM831X_STATUS_OTP = 1, + WM831X_STATUS_POWER = 2, + WM831X_STATUS_CHARGER = 3, + WM831X_STATUS_MANUAL = 4, +}; + +struct wm831x_status_pdata { + enum wm831x_status_src default_src; + const char *name; + const char *default_trigger; +}; + +struct wm831x_touch_pdata { + int fivewire; /** 1 for five wire mode, 0 for 4 wire */ + int isel; /** Current for pen down (uA) */ + int rpu; /** Pen down sensitivity resistor divider */ + int pressure; /** Report pressure (boolean) */ + unsigned int data_irq; /** Touch data ready IRQ */ + int data_irqf; /** IRQ flags for data ready IRQ */ + unsigned int pd_irq; /** Touch pendown detect IRQ */ + int pd_irqf; /** IRQ flags for pen down IRQ */ +}; + +enum wm831x_watchdog_action { + WM831X_WDOG_NONE = 0, + WM831X_WDOG_INTERRUPT = 1, + WM831X_WDOG_RESET = 2, + WM831X_WDOG_WAKE = 3, +}; + +struct wm831x_watchdog_pdata { + enum wm831x_watchdog_action primary, secondary; + int update_gpio; + unsigned int software:1; +}; + +#define WM831X_MAX_STATUS 2 +#define WM831X_MAX_DCDC 4 +#define WM831X_MAX_EPE 2 +#define WM831X_MAX_LDO 11 +#define WM831X_MAX_ISINK 2 + +#define WM831X_GPIO_CONFIGURE 0x10000 +#define WM831X_GPIO_NUM 16 + +struct wm831x_pdata { + /** Used to distinguish multiple WM831x chips */ + int wm831x_num; + + /** Called before subdevices are set up */ + int (*pre_init)(struct wm831x *wm831x); + /** Called after subdevices are set up */ + int (*post_init)(struct wm831x *wm831x); + + /** Put the /IRQ line into CMOS mode */ + bool irq_cmos; + + /** Disable the touchscreen */ + bool disable_touch; + + /** The driver should initiate a power off sequence during shutdown */ + bool soft_shutdown; + + int irq_base; + int gpio_base; + int gpio_defaults[WM831X_GPIO_NUM]; + struct wm831x_backlight_pdata *backlight; + struct wm831x_backup_pdata *backup; + struct wm831x_battery_pdata *battery; + struct wm831x_touch_pdata *touch; + struct wm831x_watchdog_pdata *watchdog; + + /** LED1 = 0 and so on */ + struct wm831x_status_pdata *status[WM831X_MAX_STATUS]; + /** DCDC1 = 0 and so on */ + struct regulator_init_data *dcdc[WM831X_MAX_DCDC]; + /** EPE1 = 0 and so on */ + struct regulator_init_data *epe[WM831X_MAX_EPE]; + /** LDO1 = 0 and so on */ + struct regulator_init_data *ldo[WM831X_MAX_LDO]; + /** ISINK1 = 0 and so on*/ + struct regulator_init_data *isink[WM831X_MAX_ISINK]; +}; + +#endif diff --git a/include/linux/mfd/wm831x/pmu.h b/include/linux/mfd/wm831x/pmu.h new file mode 100644 index 000000000..b18cbb027 --- /dev/null +++ b/include/linux/mfd/wm831x/pmu.h @@ -0,0 +1,189 @@ +/* + * include/linux/mfd/wm831x/pmu.h -- PMU for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_PMU_H__ +#define __MFD_WM831X_PMU_H__ + +/* + * R16387 (0x4003) - Power State + */ +#define WM831X_CHIP_ON 0x8000 /* CHIP_ON */ +#define WM831X_CHIP_ON_MASK 0x8000 /* CHIP_ON */ +#define WM831X_CHIP_ON_SHIFT 15 /* CHIP_ON */ +#define WM831X_CHIP_ON_WIDTH 1 /* CHIP_ON */ +#define WM831X_CHIP_SLP 0x4000 /* CHIP_SLP */ +#define WM831X_CHIP_SLP_MASK 0x4000 /* CHIP_SLP */ +#define WM831X_CHIP_SLP_SHIFT 14 /* CHIP_SLP */ +#define WM831X_CHIP_SLP_WIDTH 1 /* CHIP_SLP */ +#define WM831X_REF_LP 0x1000 /* REF_LP */ +#define WM831X_REF_LP_MASK 0x1000 /* REF_LP */ +#define WM831X_REF_LP_SHIFT 12 /* REF_LP */ +#define WM831X_REF_LP_WIDTH 1 /* REF_LP */ +#define WM831X_PWRSTATE_DLY_MASK 0x0C00 /* PWRSTATE_DLY - [11:10] */ +#define WM831X_PWRSTATE_DLY_SHIFT 10 /* PWRSTATE_DLY - [11:10] */ +#define WM831X_PWRSTATE_DLY_WIDTH 2 /* PWRSTATE_DLY - [11:10] */ +#define WM831X_SWRST_DLY 0x0200 /* SWRST_DLY */ +#define WM831X_SWRST_DLY_MASK 0x0200 /* SWRST_DLY */ +#define WM831X_SWRST_DLY_SHIFT 9 /* SWRST_DLY */ +#define WM831X_SWRST_DLY_WIDTH 1 /* SWRST_DLY */ +#define WM831X_USB100MA_STARTUP_MASK 0x0030 /* USB100MA_STARTUP - [5:4] */ +#define WM831X_USB100MA_STARTUP_SHIFT 4 /* USB100MA_STARTUP - [5:4] */ +#define WM831X_USB100MA_STARTUP_WIDTH 2 /* USB100MA_STARTUP - [5:4] */ +#define WM831X_USB_CURR_STS 0x0008 /* USB_CURR_STS */ +#define WM831X_USB_CURR_STS_MASK 0x0008 /* USB_CURR_STS */ +#define WM831X_USB_CURR_STS_SHIFT 3 /* USB_CURR_STS */ +#define WM831X_USB_CURR_STS_WIDTH 1 /* USB_CURR_STS */ +#define WM831X_USB_ILIM_MASK 0x0007 /* USB_ILIM - [2:0] */ +#define WM831X_USB_ILIM_SHIFT 0 /* USB_ILIM - [2:0] */ +#define WM831X_USB_ILIM_WIDTH 3 /* USB_ILIM - [2:0] */ + +/* + * R16397 (0x400D) - System Status + */ +#define WM831X_THW_STS 0x8000 /* THW_STS */ +#define WM831X_THW_STS_MASK 0x8000 /* THW_STS */ +#define WM831X_THW_STS_SHIFT 15 /* THW_STS */ +#define WM831X_THW_STS_WIDTH 1 /* THW_STS */ +#define WM831X_PWR_SRC_BATT 0x0400 /* PWR_SRC_BATT */ +#define WM831X_PWR_SRC_BATT_MASK 0x0400 /* PWR_SRC_BATT */ +#define WM831X_PWR_SRC_BATT_SHIFT 10 /* PWR_SRC_BATT */ +#define WM831X_PWR_SRC_BATT_WIDTH 1 /* PWR_SRC_BATT */ +#define WM831X_PWR_WALL 0x0200 /* PWR_WALL */ +#define WM831X_PWR_WALL_MASK 0x0200 /* PWR_WALL */ +#define WM831X_PWR_WALL_SHIFT 9 /* PWR_WALL */ +#define WM831X_PWR_WALL_WIDTH 1 /* PWR_WALL */ +#define WM831X_PWR_USB 0x0100 /* PWR_USB */ +#define WM831X_PWR_USB_MASK 0x0100 /* PWR_USB */ +#define WM831X_PWR_USB_SHIFT 8 /* PWR_USB */ +#define WM831X_PWR_USB_WIDTH 1 /* PWR_USB */ +#define WM831X_MAIN_STATE_MASK 0x001F /* MAIN_STATE - [4:0] */ +#define WM831X_MAIN_STATE_SHIFT 0 /* MAIN_STATE - [4:0] */ +#define WM831X_MAIN_STATE_WIDTH 5 /* MAIN_STATE - [4:0] */ + +/* + * R16456 (0x4048) - Charger Control 1 + */ +#define WM831X_CHG_ENA 0x8000 /* CHG_ENA */ +#define WM831X_CHG_ENA_MASK 0x8000 /* CHG_ENA */ +#define WM831X_CHG_ENA_SHIFT 15 /* CHG_ENA */ +#define WM831X_CHG_ENA_WIDTH 1 /* CHG_ENA */ +#define WM831X_CHG_FRC 0x4000 /* CHG_FRC */ +#define WM831X_CHG_FRC_MASK 0x4000 /* CHG_FRC */ +#define WM831X_CHG_FRC_SHIFT 14 /* CHG_FRC */ +#define WM831X_CHG_FRC_WIDTH 1 /* CHG_FRC */ +#define WM831X_CHG_ITERM_MASK 0x1C00 /* CHG_ITERM - [12:10] */ +#define WM831X_CHG_ITERM_SHIFT 10 /* CHG_ITERM - [12:10] */ +#define WM831X_CHG_ITERM_WIDTH 3 /* CHG_ITERM - [12:10] */ +#define WM831X_CHG_FAST 0x0020 /* CHG_FAST */ +#define WM831X_CHG_FAST_MASK 0x0020 /* CHG_FAST */ +#define WM831X_CHG_FAST_SHIFT 5 /* CHG_FAST */ +#define WM831X_CHG_FAST_WIDTH 1 /* CHG_FAST */ +#define WM831X_CHG_IMON_ENA 0x0002 /* CHG_IMON_ENA */ +#define WM831X_CHG_IMON_ENA_MASK 0x0002 /* CHG_IMON_ENA */ +#define WM831X_CHG_IMON_ENA_SHIFT 1 /* CHG_IMON_ENA */ +#define WM831X_CHG_IMON_ENA_WIDTH 1 /* CHG_IMON_ENA */ +#define WM831X_CHG_CHIP_TEMP_MON 0x0001 /* CHG_CHIP_TEMP_MON */ +#define WM831X_CHG_CHIP_TEMP_MON_MASK 0x0001 /* CHG_CHIP_TEMP_MON */ +#define WM831X_CHG_CHIP_TEMP_MON_SHIFT 0 /* CHG_CHIP_TEMP_MON */ +#define WM831X_CHG_CHIP_TEMP_MON_WIDTH 1 /* CHG_CHIP_TEMP_MON */ + +/* + * R16457 (0x4049) - Charger Control 2 + */ +#define WM831X_CHG_OFF_MSK 0x4000 /* CHG_OFF_MSK */ +#define WM831X_CHG_OFF_MSK_MASK 0x4000 /* CHG_OFF_MSK */ +#define WM831X_CHG_OFF_MSK_SHIFT 14 /* CHG_OFF_MSK */ +#define WM831X_CHG_OFF_MSK_WIDTH 1 /* CHG_OFF_MSK */ +#define WM831X_CHG_TIME_MASK 0x0F00 /* CHG_TIME - [11:8] */ +#define WM831X_CHG_TIME_SHIFT 8 /* CHG_TIME - [11:8] */ +#define WM831X_CHG_TIME_WIDTH 4 /* CHG_TIME - [11:8] */ +#define WM831X_CHG_TRKL_ILIM_MASK 0x00C0 /* CHG_TRKL_ILIM - [7:6] */ +#define WM831X_CHG_TRKL_ILIM_SHIFT 6 /* CHG_TRKL_ILIM - [7:6] */ +#define WM831X_CHG_TRKL_ILIM_WIDTH 2 /* CHG_TRKL_ILIM - [7:6] */ +#define WM831X_CHG_VSEL_MASK 0x0030 /* CHG_VSEL - [5:4] */ +#define WM831X_CHG_VSEL_SHIFT 4 /* CHG_VSEL - [5:4] */ +#define WM831X_CHG_VSEL_WIDTH 2 /* CHG_VSEL - [5:4] */ +#define WM831X_CHG_FAST_ILIM_MASK 0x000F /* CHG_FAST_ILIM - [3:0] */ +#define WM831X_CHG_FAST_ILIM_SHIFT 0 /* CHG_FAST_ILIM - [3:0] */ +#define WM831X_CHG_FAST_ILIM_WIDTH 4 /* CHG_FAST_ILIM - [3:0] */ + +/* + * R16458 (0x404A) - Charger Status + */ +#define WM831X_BATT_OV_STS 0x8000 /* BATT_OV_STS */ +#define WM831X_BATT_OV_STS_MASK 0x8000 /* BATT_OV_STS */ +#define WM831X_BATT_OV_STS_SHIFT 15 /* BATT_OV_STS */ +#define WM831X_BATT_OV_STS_WIDTH 1 /* BATT_OV_STS */ +#define WM831X_CHG_STATE_MASK 0x7000 /* CHG_STATE - [14:12] */ +#define WM831X_CHG_STATE_SHIFT 12 /* CHG_STATE - [14:12] */ +#define WM831X_CHG_STATE_WIDTH 3 /* CHG_STATE - [14:12] */ +#define WM831X_BATT_HOT_STS 0x0800 /* BATT_HOT_STS */ +#define WM831X_BATT_HOT_STS_MASK 0x0800 /* BATT_HOT_STS */ +#define WM831X_BATT_HOT_STS_SHIFT 11 /* BATT_HOT_STS */ +#define WM831X_BATT_HOT_STS_WIDTH 1 /* BATT_HOT_STS */ +#define WM831X_BATT_COLD_STS 0x0400 /* BATT_COLD_STS */ +#define WM831X_BATT_COLD_STS_MASK 0x0400 /* BATT_COLD_STS */ +#define WM831X_BATT_COLD_STS_SHIFT 10 /* BATT_COLD_STS */ +#define WM831X_BATT_COLD_STS_WIDTH 1 /* BATT_COLD_STS */ +#define WM831X_CHG_TOPOFF 0x0200 /* CHG_TOPOFF */ +#define WM831X_CHG_TOPOFF_MASK 0x0200 /* CHG_TOPOFF */ +#define WM831X_CHG_TOPOFF_SHIFT 9 /* CHG_TOPOFF */ +#define WM831X_CHG_TOPOFF_WIDTH 1 /* CHG_TOPOFF */ +#define WM831X_CHG_ACTIVE 0x0100 /* CHG_ACTIVE */ +#define WM831X_CHG_ACTIVE_MASK 0x0100 /* CHG_ACTIVE */ +#define WM831X_CHG_ACTIVE_SHIFT 8 /* CHG_ACTIVE */ +#define WM831X_CHG_ACTIVE_WIDTH 1 /* CHG_ACTIVE */ +#define WM831X_CHG_TIME_ELAPSED_MASK 0x00FF /* CHG_TIME_ELAPSED - [7:0] */ +#define WM831X_CHG_TIME_ELAPSED_SHIFT 0 /* CHG_TIME_ELAPSED - [7:0] */ +#define WM831X_CHG_TIME_ELAPSED_WIDTH 8 /* CHG_TIME_ELAPSED - [7:0] */ + +#define WM831X_CHG_STATE_OFF (0 << WM831X_CHG_STATE_SHIFT) +#define WM831X_CHG_STATE_TRICKLE (1 << WM831X_CHG_STATE_SHIFT) +#define WM831X_CHG_STATE_FAST (2 << WM831X_CHG_STATE_SHIFT) +#define WM831X_CHG_STATE_TRICKLE_OT (3 << WM831X_CHG_STATE_SHIFT) +#define WM831X_CHG_STATE_FAST_OT (4 << WM831X_CHG_STATE_SHIFT) +#define WM831X_CHG_STATE_DEFECTIVE (5 << WM831X_CHG_STATE_SHIFT) + +/* + * R16459 (0x404B) - Backup Charger Control + */ +#define WM831X_BKUP_CHG_ENA 0x8000 /* BKUP_CHG_ENA */ +#define WM831X_BKUP_CHG_ENA_MASK 0x8000 /* BKUP_CHG_ENA */ +#define WM831X_BKUP_CHG_ENA_SHIFT 15 /* BKUP_CHG_ENA */ +#define WM831X_BKUP_CHG_ENA_WIDTH 1 /* BKUP_CHG_ENA */ +#define WM831X_BKUP_CHG_STS 0x4000 /* BKUP_CHG_STS */ +#define WM831X_BKUP_CHG_STS_MASK 0x4000 /* BKUP_CHG_STS */ +#define WM831X_BKUP_CHG_STS_SHIFT 14 /* BKUP_CHG_STS */ +#define WM831X_BKUP_CHG_STS_WIDTH 1 /* BKUP_CHG_STS */ +#define WM831X_BKUP_CHG_MODE 0x1000 /* BKUP_CHG_MODE */ +#define WM831X_BKUP_CHG_MODE_MASK 0x1000 /* BKUP_CHG_MODE */ +#define WM831X_BKUP_CHG_MODE_SHIFT 12 /* BKUP_CHG_MODE */ +#define WM831X_BKUP_CHG_MODE_WIDTH 1 /* BKUP_CHG_MODE */ +#define WM831X_BKUP_BATT_DET_ENA 0x0800 /* BKUP_BATT_DET_ENA */ +#define WM831X_BKUP_BATT_DET_ENA_MASK 0x0800 /* BKUP_BATT_DET_ENA */ +#define WM831X_BKUP_BATT_DET_ENA_SHIFT 11 /* BKUP_BATT_DET_ENA */ +#define WM831X_BKUP_BATT_DET_ENA_WIDTH 1 /* BKUP_BATT_DET_ENA */ +#define WM831X_BKUP_BATT_STS 0x0400 /* BKUP_BATT_STS */ +#define WM831X_BKUP_BATT_STS_MASK 0x0400 /* BKUP_BATT_STS */ +#define WM831X_BKUP_BATT_STS_SHIFT 10 /* BKUP_BATT_STS */ +#define WM831X_BKUP_BATT_STS_WIDTH 1 /* BKUP_BATT_STS */ +#define WM831X_BKUP_CHG_VLIM 0x0010 /* BKUP_CHG_VLIM */ +#define WM831X_BKUP_CHG_VLIM_MASK 0x0010 /* BKUP_CHG_VLIM */ +#define WM831X_BKUP_CHG_VLIM_SHIFT 4 /* BKUP_CHG_VLIM */ +#define WM831X_BKUP_CHG_VLIM_WIDTH 1 /* BKUP_CHG_VLIM */ +#define WM831X_BKUP_CHG_ILIM_MASK 0x0003 /* BKUP_CHG_ILIM - [1:0] */ +#define WM831X_BKUP_CHG_ILIM_SHIFT 0 /* BKUP_CHG_ILIM - [1:0] */ +#define WM831X_BKUP_CHG_ILIM_WIDTH 2 /* BKUP_CHG_ILIM - [1:0] */ + +#endif diff --git a/include/linux/mfd/wm831x/regulator.h b/include/linux/mfd/wm831x/regulator.h new file mode 100644 index 000000000..955d30fc6 --- /dev/null +++ b/include/linux/mfd/wm831x/regulator.h @@ -0,0 +1,1218 @@ +/* + * linux/mfd/wm831x/regulator.h -- Regulator definitons for wm831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_REGULATOR_H__ +#define __MFD_WM831X_REGULATOR_H__ + +/* + * R16462 (0x404E) - Current Sink 1 + */ +#define WM831X_CS1_ENA 0x8000 /* CS1_ENA */ +#define WM831X_CS1_ENA_MASK 0x8000 /* CS1_ENA */ +#define WM831X_CS1_ENA_SHIFT 15 /* CS1_ENA */ +#define WM831X_CS1_ENA_WIDTH 1 /* CS1_ENA */ +#define WM831X_CS1_DRIVE 0x4000 /* CS1_DRIVE */ +#define WM831X_CS1_DRIVE_MASK 0x4000 /* CS1_DRIVE */ +#define WM831X_CS1_DRIVE_SHIFT 14 /* CS1_DRIVE */ +#define WM831X_CS1_DRIVE_WIDTH 1 /* CS1_DRIVE */ +#define WM831X_CS1_SLPENA 0x1000 /* CS1_SLPENA */ +#define WM831X_CS1_SLPENA_MASK 0x1000 /* CS1_SLPENA */ +#define WM831X_CS1_SLPENA_SHIFT 12 /* CS1_SLPENA */ +#define WM831X_CS1_SLPENA_WIDTH 1 /* CS1_SLPENA */ +#define WM831X_CS1_OFF_RAMP_MASK 0x0C00 /* CS1_OFF_RAMP - [11:10] */ +#define WM831X_CS1_OFF_RAMP_SHIFT 10 /* CS1_OFF_RAMP - [11:10] */ +#define WM831X_CS1_OFF_RAMP_WIDTH 2 /* CS1_OFF_RAMP - [11:10] */ +#define WM831X_CS1_ON_RAMP_MASK 0x0300 /* CS1_ON_RAMP - [9:8] */ +#define WM831X_CS1_ON_RAMP_SHIFT 8 /* CS1_ON_RAMP - [9:8] */ +#define WM831X_CS1_ON_RAMP_WIDTH 2 /* CS1_ON_RAMP - [9:8] */ +#define WM831X_CS1_ISEL_MASK 0x003F /* CS1_ISEL - [5:0] */ +#define WM831X_CS1_ISEL_SHIFT 0 /* CS1_ISEL - [5:0] */ +#define WM831X_CS1_ISEL_WIDTH 6 /* CS1_ISEL - [5:0] */ + +/* + * R16463 (0x404F) - Current Sink 2 + */ +#define WM831X_CS2_ENA 0x8000 /* CS2_ENA */ +#define WM831X_CS2_ENA_MASK 0x8000 /* CS2_ENA */ +#define WM831X_CS2_ENA_SHIFT 15 /* CS2_ENA */ +#define WM831X_CS2_ENA_WIDTH 1 /* CS2_ENA */ +#define WM831X_CS2_DRIVE 0x4000 /* CS2_DRIVE */ +#define WM831X_CS2_DRIVE_MASK 0x4000 /* CS2_DRIVE */ +#define WM831X_CS2_DRIVE_SHIFT 14 /* CS2_DRIVE */ +#define WM831X_CS2_DRIVE_WIDTH 1 /* CS2_DRIVE */ +#define WM831X_CS2_SLPENA 0x1000 /* CS2_SLPENA */ +#define WM831X_CS2_SLPENA_MASK 0x1000 /* CS2_SLPENA */ +#define WM831X_CS2_SLPENA_SHIFT 12 /* CS2_SLPENA */ +#define WM831X_CS2_SLPENA_WIDTH 1 /* CS2_SLPENA */ +#define WM831X_CS2_OFF_RAMP_MASK 0x0C00 /* CS2_OFF_RAMP - [11:10] */ +#define WM831X_CS2_OFF_RAMP_SHIFT 10 /* CS2_OFF_RAMP - [11:10] */ +#define WM831X_CS2_OFF_RAMP_WIDTH 2 /* CS2_OFF_RAMP - [11:10] */ +#define WM831X_CS2_ON_RAMP_MASK 0x0300 /* CS2_ON_RAMP - [9:8] */ +#define WM831X_CS2_ON_RAMP_SHIFT 8 /* CS2_ON_RAMP - [9:8] */ +#define WM831X_CS2_ON_RAMP_WIDTH 2 /* CS2_ON_RAMP - [9:8] */ +#define WM831X_CS2_ISEL_MASK 0x003F /* CS2_ISEL - [5:0] */ +#define WM831X_CS2_ISEL_SHIFT 0 /* CS2_ISEL - [5:0] */ +#define WM831X_CS2_ISEL_WIDTH 6 /* CS2_ISEL - [5:0] */ + +/* + * R16464 (0x4050) - DCDC Enable + */ +#define WM831X_EPE2_ENA 0x0080 /* EPE2_ENA */ +#define WM831X_EPE2_ENA_MASK 0x0080 /* EPE2_ENA */ +#define WM831X_EPE2_ENA_SHIFT 7 /* EPE2_ENA */ +#define WM831X_EPE2_ENA_WIDTH 1 /* EPE2_ENA */ +#define WM831X_EPE1_ENA 0x0040 /* EPE1_ENA */ +#define WM831X_EPE1_ENA_MASK 0x0040 /* EPE1_ENA */ +#define WM831X_EPE1_ENA_SHIFT 6 /* EPE1_ENA */ +#define WM831X_EPE1_ENA_WIDTH 1 /* EPE1_ENA */ +#define WM831X_DC4_ENA 0x0008 /* DC4_ENA */ +#define WM831X_DC4_ENA_MASK 0x0008 /* DC4_ENA */ +#define WM831X_DC4_ENA_SHIFT 3 /* DC4_ENA */ +#define WM831X_DC4_ENA_WIDTH 1 /* DC4_ENA */ +#define WM831X_DC3_ENA 0x0004 /* DC3_ENA */ +#define WM831X_DC3_ENA_MASK 0x0004 /* DC3_ENA */ +#define WM831X_DC3_ENA_SHIFT 2 /* DC3_ENA */ +#define WM831X_DC3_ENA_WIDTH 1 /* DC3_ENA */ +#define WM831X_DC2_ENA 0x0002 /* DC2_ENA */ +#define WM831X_DC2_ENA_MASK 0x0002 /* DC2_ENA */ +#define WM831X_DC2_ENA_SHIFT 1 /* DC2_ENA */ +#define WM831X_DC2_ENA_WIDTH 1 /* DC2_ENA */ +#define WM831X_DC1_ENA 0x0001 /* DC1_ENA */ +#define WM831X_DC1_ENA_MASK 0x0001 /* DC1_ENA */ +#define WM831X_DC1_ENA_SHIFT 0 /* DC1_ENA */ +#define WM831X_DC1_ENA_WIDTH 1 /* DC1_ENA */ + +/* + * R16465 (0x4051) - LDO Enable + */ +#define WM831X_LDO11_ENA 0x0400 /* LDO11_ENA */ +#define WM831X_LDO11_ENA_MASK 0x0400 /* LDO11_ENA */ +#define WM831X_LDO11_ENA_SHIFT 10 /* LDO11_ENA */ +#define WM831X_LDO11_ENA_WIDTH 1 /* LDO11_ENA */ +#define WM831X_LDO10_ENA 0x0200 /* LDO10_ENA */ +#define WM831X_LDO10_ENA_MASK 0x0200 /* LDO10_ENA */ +#define WM831X_LDO10_ENA_SHIFT 9 /* LDO10_ENA */ +#define WM831X_LDO10_ENA_WIDTH 1 /* LDO10_ENA */ +#define WM831X_LDO9_ENA 0x0100 /* LDO9_ENA */ +#define WM831X_LDO9_ENA_MASK 0x0100 /* LDO9_ENA */ +#define WM831X_LDO9_ENA_SHIFT 8 /* LDO9_ENA */ +#define WM831X_LDO9_ENA_WIDTH 1 /* LDO9_ENA */ +#define WM831X_LDO8_ENA 0x0080 /* LDO8_ENA */ +#define WM831X_LDO8_ENA_MASK 0x0080 /* LDO8_ENA */ +#define WM831X_LDO8_ENA_SHIFT 7 /* LDO8_ENA */ +#define WM831X_LDO8_ENA_WIDTH 1 /* LDO8_ENA */ +#define WM831X_LDO7_ENA 0x0040 /* LDO7_ENA */ +#define WM831X_LDO7_ENA_MASK 0x0040 /* LDO7_ENA */ +#define WM831X_LDO7_ENA_SHIFT 6 /* LDO7_ENA */ +#define WM831X_LDO7_ENA_WIDTH 1 /* LDO7_ENA */ +#define WM831X_LDO6_ENA 0x0020 /* LDO6_ENA */ +#define WM831X_LDO6_ENA_MASK 0x0020 /* LDO6_ENA */ +#define WM831X_LDO6_ENA_SHIFT 5 /* LDO6_ENA */ +#define WM831X_LDO6_ENA_WIDTH 1 /* LDO6_ENA */ +#define WM831X_LDO5_ENA 0x0010 /* LDO5_ENA */ +#define WM831X_LDO5_ENA_MASK 0x0010 /* LDO5_ENA */ +#define WM831X_LDO5_ENA_SHIFT 4 /* LDO5_ENA */ +#define WM831X_LDO5_ENA_WIDTH 1 /* LDO5_ENA */ +#define WM831X_LDO4_ENA 0x0008 /* LDO4_ENA */ +#define WM831X_LDO4_ENA_MASK 0x0008 /* LDO4_ENA */ +#define WM831X_LDO4_ENA_SHIFT 3 /* LDO4_ENA */ +#define WM831X_LDO4_ENA_WIDTH 1 /* LDO4_ENA */ +#define WM831X_LDO3_ENA 0x0004 /* LDO3_ENA */ +#define WM831X_LDO3_ENA_MASK 0x0004 /* LDO3_ENA */ +#define WM831X_LDO3_ENA_SHIFT 2 /* LDO3_ENA */ +#define WM831X_LDO3_ENA_WIDTH 1 /* LDO3_ENA */ +#define WM831X_LDO2_ENA 0x0002 /* LDO2_ENA */ +#define WM831X_LDO2_ENA_MASK 0x0002 /* LDO2_ENA */ +#define WM831X_LDO2_ENA_SHIFT 1 /* LDO2_ENA */ +#define WM831X_LDO2_ENA_WIDTH 1 /* LDO2_ENA */ +#define WM831X_LDO1_ENA 0x0001 /* LDO1_ENA */ +#define WM831X_LDO1_ENA_MASK 0x0001 /* LDO1_ENA */ +#define WM831X_LDO1_ENA_SHIFT 0 /* LDO1_ENA */ +#define WM831X_LDO1_ENA_WIDTH 1 /* LDO1_ENA */ + +/* + * R16466 (0x4052) - DCDC Status + */ +#define WM831X_EPE2_STS 0x0080 /* EPE2_STS */ +#define WM831X_EPE2_STS_MASK 0x0080 /* EPE2_STS */ +#define WM831X_EPE2_STS_SHIFT 7 /* EPE2_STS */ +#define WM831X_EPE2_STS_WIDTH 1 /* EPE2_STS */ +#define WM831X_EPE1_STS 0x0040 /* EPE1_STS */ +#define WM831X_EPE1_STS_MASK 0x0040 /* EPE1_STS */ +#define WM831X_EPE1_STS_SHIFT 6 /* EPE1_STS */ +#define WM831X_EPE1_STS_WIDTH 1 /* EPE1_STS */ +#define WM831X_DC4_STS 0x0008 /* DC4_STS */ +#define WM831X_DC4_STS_MASK 0x0008 /* DC4_STS */ +#define WM831X_DC4_STS_SHIFT 3 /* DC4_STS */ +#define WM831X_DC4_STS_WIDTH 1 /* DC4_STS */ +#define WM831X_DC3_STS 0x0004 /* DC3_STS */ +#define WM831X_DC3_STS_MASK 0x0004 /* DC3_STS */ +#define WM831X_DC3_STS_SHIFT 2 /* DC3_STS */ +#define WM831X_DC3_STS_WIDTH 1 /* DC3_STS */ +#define WM831X_DC2_STS 0x0002 /* DC2_STS */ +#define WM831X_DC2_STS_MASK 0x0002 /* DC2_STS */ +#define WM831X_DC2_STS_SHIFT 1 /* DC2_STS */ +#define WM831X_DC2_STS_WIDTH 1 /* DC2_STS */ +#define WM831X_DC1_STS 0x0001 /* DC1_STS */ +#define WM831X_DC1_STS_MASK 0x0001 /* DC1_STS */ +#define WM831X_DC1_STS_SHIFT 0 /* DC1_STS */ +#define WM831X_DC1_STS_WIDTH 1 /* DC1_STS */ + +/* + * R16467 (0x4053) - LDO Status + */ +#define WM831X_LDO11_STS 0x0400 /* LDO11_STS */ +#define WM831X_LDO11_STS_MASK 0x0400 /* LDO11_STS */ +#define WM831X_LDO11_STS_SHIFT 10 /* LDO11_STS */ +#define WM831X_LDO11_STS_WIDTH 1 /* LDO11_STS */ +#define WM831X_LDO10_STS 0x0200 /* LDO10_STS */ +#define WM831X_LDO10_STS_MASK 0x0200 /* LDO10_STS */ +#define WM831X_LDO10_STS_SHIFT 9 /* LDO10_STS */ +#define WM831X_LDO10_STS_WIDTH 1 /* LDO10_STS */ +#define WM831X_LDO9_STS 0x0100 /* LDO9_STS */ +#define WM831X_LDO9_STS_MASK 0x0100 /* LDO9_STS */ +#define WM831X_LDO9_STS_SHIFT 8 /* LDO9_STS */ +#define WM831X_LDO9_STS_WIDTH 1 /* LDO9_STS */ +#define WM831X_LDO8_STS 0x0080 /* LDO8_STS */ +#define WM831X_LDO8_STS_MASK 0x0080 /* LDO8_STS */ +#define WM831X_LDO8_STS_SHIFT 7 /* LDO8_STS */ +#define WM831X_LDO8_STS_WIDTH 1 /* LDO8_STS */ +#define WM831X_LDO7_STS 0x0040 /* LDO7_STS */ +#define WM831X_LDO7_STS_MASK 0x0040 /* LDO7_STS */ +#define WM831X_LDO7_STS_SHIFT 6 /* LDO7_STS */ +#define WM831X_LDO7_STS_WIDTH 1 /* LDO7_STS */ +#define WM831X_LDO6_STS 0x0020 /* LDO6_STS */ +#define WM831X_LDO6_STS_MASK 0x0020 /* LDO6_STS */ +#define WM831X_LDO6_STS_SHIFT 5 /* LDO6_STS */ +#define WM831X_LDO6_STS_WIDTH 1 /* LDO6_STS */ +#define WM831X_LDO5_STS 0x0010 /* LDO5_STS */ +#define WM831X_LDO5_STS_MASK 0x0010 /* LDO5_STS */ +#define WM831X_LDO5_STS_SHIFT 4 /* LDO5_STS */ +#define WM831X_LDO5_STS_WIDTH 1 /* LDO5_STS */ +#define WM831X_LDO4_STS 0x0008 /* LDO4_STS */ +#define WM831X_LDO4_STS_MASK 0x0008 /* LDO4_STS */ +#define WM831X_LDO4_STS_SHIFT 3 /* LDO4_STS */ +#define WM831X_LDO4_STS_WIDTH 1 /* LDO4_STS */ +#define WM831X_LDO3_STS 0x0004 /* LDO3_STS */ +#define WM831X_LDO3_STS_MASK 0x0004 /* LDO3_STS */ +#define WM831X_LDO3_STS_SHIFT 2 /* LDO3_STS */ +#define WM831X_LDO3_STS_WIDTH 1 /* LDO3_STS */ +#define WM831X_LDO2_STS 0x0002 /* LDO2_STS */ +#define WM831X_LDO2_STS_MASK 0x0002 /* LDO2_STS */ +#define WM831X_LDO2_STS_SHIFT 1 /* LDO2_STS */ +#define WM831X_LDO2_STS_WIDTH 1 /* LDO2_STS */ +#define WM831X_LDO1_STS 0x0001 /* LDO1_STS */ +#define WM831X_LDO1_STS_MASK 0x0001 /* LDO1_STS */ +#define WM831X_LDO1_STS_SHIFT 0 /* LDO1_STS */ +#define WM831X_LDO1_STS_WIDTH 1 /* LDO1_STS */ + +/* + * R16468 (0x4054) - DCDC UV Status + */ +#define WM831X_DC2_OV_STS 0x2000 /* DC2_OV_STS */ +#define WM831X_DC2_OV_STS_MASK 0x2000 /* DC2_OV_STS */ +#define WM831X_DC2_OV_STS_SHIFT 13 /* DC2_OV_STS */ +#define WM831X_DC2_OV_STS_WIDTH 1 /* DC2_OV_STS */ +#define WM831X_DC1_OV_STS 0x1000 /* DC1_OV_STS */ +#define WM831X_DC1_OV_STS_MASK 0x1000 /* DC1_OV_STS */ +#define WM831X_DC1_OV_STS_SHIFT 12 /* DC1_OV_STS */ +#define WM831X_DC1_OV_STS_WIDTH 1 /* DC1_OV_STS */ +#define WM831X_DC2_HC_STS 0x0200 /* DC2_HC_STS */ +#define WM831X_DC2_HC_STS_MASK 0x0200 /* DC2_HC_STS */ +#define WM831X_DC2_HC_STS_SHIFT 9 /* DC2_HC_STS */ +#define WM831X_DC2_HC_STS_WIDTH 1 /* DC2_HC_STS */ +#define WM831X_DC1_HC_STS 0x0100 /* DC1_HC_STS */ +#define WM831X_DC1_HC_STS_MASK 0x0100 /* DC1_HC_STS */ +#define WM831X_DC1_HC_STS_SHIFT 8 /* DC1_HC_STS */ +#define WM831X_DC1_HC_STS_WIDTH 1 /* DC1_HC_STS */ +#define WM831X_DC4_UV_STS 0x0008 /* DC4_UV_STS */ +#define WM831X_DC4_UV_STS_MASK 0x0008 /* DC4_UV_STS */ +#define WM831X_DC4_UV_STS_SHIFT 3 /* DC4_UV_STS */ +#define WM831X_DC4_UV_STS_WIDTH 1 /* DC4_UV_STS */ +#define WM831X_DC3_UV_STS 0x0004 /* DC3_UV_STS */ +#define WM831X_DC3_UV_STS_MASK 0x0004 /* DC3_UV_STS */ +#define WM831X_DC3_UV_STS_SHIFT 2 /* DC3_UV_STS */ +#define WM831X_DC3_UV_STS_WIDTH 1 /* DC3_UV_STS */ +#define WM831X_DC2_UV_STS 0x0002 /* DC2_UV_STS */ +#define WM831X_DC2_UV_STS_MASK 0x0002 /* DC2_UV_STS */ +#define WM831X_DC2_UV_STS_SHIFT 1 /* DC2_UV_STS */ +#define WM831X_DC2_UV_STS_WIDTH 1 /* DC2_UV_STS */ +#define WM831X_DC1_UV_STS 0x0001 /* DC1_UV_STS */ +#define WM831X_DC1_UV_STS_MASK 0x0001 /* DC1_UV_STS */ +#define WM831X_DC1_UV_STS_SHIFT 0 /* DC1_UV_STS */ +#define WM831X_DC1_UV_STS_WIDTH 1 /* DC1_UV_STS */ + +/* + * R16469 (0x4055) - LDO UV Status + */ +#define WM831X_INTLDO_UV_STS 0x8000 /* INTLDO_UV_STS */ +#define WM831X_INTLDO_UV_STS_MASK 0x8000 /* INTLDO_UV_STS */ +#define WM831X_INTLDO_UV_STS_SHIFT 15 /* INTLDO_UV_STS */ +#define WM831X_INTLDO_UV_STS_WIDTH 1 /* INTLDO_UV_STS */ +#define WM831X_LDO10_UV_STS 0x0200 /* LDO10_UV_STS */ +#define WM831X_LDO10_UV_STS_MASK 0x0200 /* LDO10_UV_STS */ +#define WM831X_LDO10_UV_STS_SHIFT 9 /* LDO10_UV_STS */ +#define WM831X_LDO10_UV_STS_WIDTH 1 /* LDO10_UV_STS */ +#define WM831X_LDO9_UV_STS 0x0100 /* LDO9_UV_STS */ +#define WM831X_LDO9_UV_STS_MASK 0x0100 /* LDO9_UV_STS */ +#define WM831X_LDO9_UV_STS_SHIFT 8 /* LDO9_UV_STS */ +#define WM831X_LDO9_UV_STS_WIDTH 1 /* LDO9_UV_STS */ +#define WM831X_LDO8_UV_STS 0x0080 /* LDO8_UV_STS */ +#define WM831X_LDO8_UV_STS_MASK 0x0080 /* LDO8_UV_STS */ +#define WM831X_LDO8_UV_STS_SHIFT 7 /* LDO8_UV_STS */ +#define WM831X_LDO8_UV_STS_WIDTH 1 /* LDO8_UV_STS */ +#define WM831X_LDO7_UV_STS 0x0040 /* LDO7_UV_STS */ +#define WM831X_LDO7_UV_STS_MASK 0x0040 /* LDO7_UV_STS */ +#define WM831X_LDO7_UV_STS_SHIFT 6 /* LDO7_UV_STS */ +#define WM831X_LDO7_UV_STS_WIDTH 1 /* LDO7_UV_STS */ +#define WM831X_LDO6_UV_STS 0x0020 /* LDO6_UV_STS */ +#define WM831X_LDO6_UV_STS_MASK 0x0020 /* LDO6_UV_STS */ +#define WM831X_LDO6_UV_STS_SHIFT 5 /* LDO6_UV_STS */ +#define WM831X_LDO6_UV_STS_WIDTH 1 /* LDO6_UV_STS */ +#define WM831X_LDO5_UV_STS 0x0010 /* LDO5_UV_STS */ +#define WM831X_LDO5_UV_STS_MASK 0x0010 /* LDO5_UV_STS */ +#define WM831X_LDO5_UV_STS_SHIFT 4 /* LDO5_UV_STS */ +#define WM831X_LDO5_UV_STS_WIDTH 1 /* LDO5_UV_STS */ +#define WM831X_LDO4_UV_STS 0x0008 /* LDO4_UV_STS */ +#define WM831X_LDO4_UV_STS_MASK 0x0008 /* LDO4_UV_STS */ +#define WM831X_LDO4_UV_STS_SHIFT 3 /* LDO4_UV_STS */ +#define WM831X_LDO4_UV_STS_WIDTH 1 /* LDO4_UV_STS */ +#define WM831X_LDO3_UV_STS 0x0004 /* LDO3_UV_STS */ +#define WM831X_LDO3_UV_STS_MASK 0x0004 /* LDO3_UV_STS */ +#define WM831X_LDO3_UV_STS_SHIFT 2 /* LDO3_UV_STS */ +#define WM831X_LDO3_UV_STS_WIDTH 1 /* LDO3_UV_STS */ +#define WM831X_LDO2_UV_STS 0x0002 /* LDO2_UV_STS */ +#define WM831X_LDO2_UV_STS_MASK 0x0002 /* LDO2_UV_STS */ +#define WM831X_LDO2_UV_STS_SHIFT 1 /* LDO2_UV_STS */ +#define WM831X_LDO2_UV_STS_WIDTH 1 /* LDO2_UV_STS */ +#define WM831X_LDO1_UV_STS 0x0001 /* LDO1_UV_STS */ +#define WM831X_LDO1_UV_STS_MASK 0x0001 /* LDO1_UV_STS */ +#define WM831X_LDO1_UV_STS_SHIFT 0 /* LDO1_UV_STS */ +#define WM831X_LDO1_UV_STS_WIDTH 1 /* LDO1_UV_STS */ + +/* + * R16470 (0x4056) - DC1 Control 1 + */ +#define WM831X_DC1_RATE_MASK 0xC000 /* DC1_RATE - [15:14] */ +#define WM831X_DC1_RATE_SHIFT 14 /* DC1_RATE - [15:14] */ +#define WM831X_DC1_RATE_WIDTH 2 /* DC1_RATE - [15:14] */ +#define WM831X_DC1_PHASE 0x1000 /* DC1_PHASE */ +#define WM831X_DC1_PHASE_MASK 0x1000 /* DC1_PHASE */ +#define WM831X_DC1_PHASE_SHIFT 12 /* DC1_PHASE */ +#define WM831X_DC1_PHASE_WIDTH 1 /* DC1_PHASE */ +#define WM831X_DC1_FREQ_MASK 0x0300 /* DC1_FREQ - [9:8] */ +#define WM831X_DC1_FREQ_SHIFT 8 /* DC1_FREQ - [9:8] */ +#define WM831X_DC1_FREQ_WIDTH 2 /* DC1_FREQ - [9:8] */ +#define WM831X_DC1_FLT 0x0080 /* DC1_FLT */ +#define WM831X_DC1_FLT_MASK 0x0080 /* DC1_FLT */ +#define WM831X_DC1_FLT_SHIFT 7 /* DC1_FLT */ +#define WM831X_DC1_FLT_WIDTH 1 /* DC1_FLT */ +#define WM831X_DC1_SOFT_START_MASK 0x0030 /* DC1_SOFT_START - [5:4] */ +#define WM831X_DC1_SOFT_START_SHIFT 4 /* DC1_SOFT_START - [5:4] */ +#define WM831X_DC1_SOFT_START_WIDTH 2 /* DC1_SOFT_START - [5:4] */ +#define WM831X_DC1_CAP_MASK 0x0003 /* DC1_CAP - [1:0] */ +#define WM831X_DC1_CAP_SHIFT 0 /* DC1_CAP - [1:0] */ +#define WM831X_DC1_CAP_WIDTH 2 /* DC1_CAP - [1:0] */ + +/* + * R16471 (0x4057) - DC1 Control 2 + */ +#define WM831X_DC1_ERR_ACT_MASK 0xC000 /* DC1_ERR_ACT - [15:14] */ +#define WM831X_DC1_ERR_ACT_SHIFT 14 /* DC1_ERR_ACT - [15:14] */ +#define WM831X_DC1_ERR_ACT_WIDTH 2 /* DC1_ERR_ACT - [15:14] */ +#define WM831X_DC1_HWC_SRC_MASK 0x1800 /* DC1_HWC_SRC - [12:11] */ +#define WM831X_DC1_HWC_SRC_SHIFT 11 /* DC1_HWC_SRC - [12:11] */ +#define WM831X_DC1_HWC_SRC_WIDTH 2 /* DC1_HWC_SRC - [12:11] */ +#define WM831X_DC1_HWC_VSEL 0x0400 /* DC1_HWC_VSEL */ +#define WM831X_DC1_HWC_VSEL_MASK 0x0400 /* DC1_HWC_VSEL */ +#define WM831X_DC1_HWC_VSEL_SHIFT 10 /* DC1_HWC_VSEL */ +#define WM831X_DC1_HWC_VSEL_WIDTH 1 /* DC1_HWC_VSEL */ +#define WM831X_DC1_HWC_MODE_MASK 0x0300 /* DC1_HWC_MODE - [9:8] */ +#define WM831X_DC1_HWC_MODE_SHIFT 8 /* DC1_HWC_MODE - [9:8] */ +#define WM831X_DC1_HWC_MODE_WIDTH 2 /* DC1_HWC_MODE - [9:8] */ +#define WM831X_DC1_HC_THR_MASK 0x0070 /* DC1_HC_THR - [6:4] */ +#define WM831X_DC1_HC_THR_SHIFT 4 /* DC1_HC_THR - [6:4] */ +#define WM831X_DC1_HC_THR_WIDTH 3 /* DC1_HC_THR - [6:4] */ +#define WM831X_DC1_HC_IND_ENA 0x0001 /* DC1_HC_IND_ENA */ +#define WM831X_DC1_HC_IND_ENA_MASK 0x0001 /* DC1_HC_IND_ENA */ +#define WM831X_DC1_HC_IND_ENA_SHIFT 0 /* DC1_HC_IND_ENA */ +#define WM831X_DC1_HC_IND_ENA_WIDTH 1 /* DC1_HC_IND_ENA */ + +/* + * R16472 (0x4058) - DC1 ON Config + */ +#define WM831X_DC1_ON_SLOT_MASK 0xE000 /* DC1_ON_SLOT - [15:13] */ +#define WM831X_DC1_ON_SLOT_SHIFT 13 /* DC1_ON_SLOT - [15:13] */ +#define WM831X_DC1_ON_SLOT_WIDTH 3 /* DC1_ON_SLOT - [15:13] */ +#define WM831X_DC1_ON_MODE_MASK 0x0300 /* DC1_ON_MODE - [9:8] */ +#define WM831X_DC1_ON_MODE_SHIFT 8 /* DC1_ON_MODE - [9:8] */ +#define WM831X_DC1_ON_MODE_WIDTH 2 /* DC1_ON_MODE - [9:8] */ +#define WM831X_DC1_ON_VSEL_MASK 0x007F /* DC1_ON_VSEL - [6:0] */ +#define WM831X_DC1_ON_VSEL_SHIFT 0 /* DC1_ON_VSEL - [6:0] */ +#define WM831X_DC1_ON_VSEL_WIDTH 7 /* DC1_ON_VSEL - [6:0] */ + +/* + * R16473 (0x4059) - DC1 SLEEP Control + */ +#define WM831X_DC1_SLP_SLOT_MASK 0xE000 /* DC1_SLP_SLOT - [15:13] */ +#define WM831X_DC1_SLP_SLOT_SHIFT 13 /* DC1_SLP_SLOT - [15:13] */ +#define WM831X_DC1_SLP_SLOT_WIDTH 3 /* DC1_SLP_SLOT - [15:13] */ +#define WM831X_DC1_SLP_MODE_MASK 0x0300 /* DC1_SLP_MODE - [9:8] */ +#define WM831X_DC1_SLP_MODE_SHIFT 8 /* DC1_SLP_MODE - [9:8] */ +#define WM831X_DC1_SLP_MODE_WIDTH 2 /* DC1_SLP_MODE - [9:8] */ +#define WM831X_DC1_SLP_VSEL_MASK 0x007F /* DC1_SLP_VSEL - [6:0] */ +#define WM831X_DC1_SLP_VSEL_SHIFT 0 /* DC1_SLP_VSEL - [6:0] */ +#define WM831X_DC1_SLP_VSEL_WIDTH 7 /* DC1_SLP_VSEL - [6:0] */ + +/* + * R16474 (0x405A) - DC1 DVS Control + */ +#define WM831X_DC1_DVS_SRC_MASK 0x1800 /* DC1_DVS_SRC - [12:11] */ +#define WM831X_DC1_DVS_SRC_SHIFT 11 /* DC1_DVS_SRC - [12:11] */ +#define WM831X_DC1_DVS_SRC_WIDTH 2 /* DC1_DVS_SRC - [12:11] */ +#define WM831X_DC1_DVS_VSEL_MASK 0x007F /* DC1_DVS_VSEL - [6:0] */ +#define WM831X_DC1_DVS_VSEL_SHIFT 0 /* DC1_DVS_VSEL - [6:0] */ +#define WM831X_DC1_DVS_VSEL_WIDTH 7 /* DC1_DVS_VSEL - [6:0] */ + +/* + * R16475 (0x405B) - DC2 Control 1 + */ +#define WM831X_DC2_RATE_MASK 0xC000 /* DC2_RATE - [15:14] */ +#define WM831X_DC2_RATE_SHIFT 14 /* DC2_RATE - [15:14] */ +#define WM831X_DC2_RATE_WIDTH 2 /* DC2_RATE - [15:14] */ +#define WM831X_DC2_PHASE 0x1000 /* DC2_PHASE */ +#define WM831X_DC2_PHASE_MASK 0x1000 /* DC2_PHASE */ +#define WM831X_DC2_PHASE_SHIFT 12 /* DC2_PHASE */ +#define WM831X_DC2_PHASE_WIDTH 1 /* DC2_PHASE */ +#define WM831X_DC2_FREQ_MASK 0x0300 /* DC2_FREQ - [9:8] */ +#define WM831X_DC2_FREQ_SHIFT 8 /* DC2_FREQ - [9:8] */ +#define WM831X_DC2_FREQ_WIDTH 2 /* DC2_FREQ - [9:8] */ +#define WM831X_DC2_FLT 0x0080 /* DC2_FLT */ +#define WM831X_DC2_FLT_MASK 0x0080 /* DC2_FLT */ +#define WM831X_DC2_FLT_SHIFT 7 /* DC2_FLT */ +#define WM831X_DC2_FLT_WIDTH 1 /* DC2_FLT */ +#define WM831X_DC2_SOFT_START_MASK 0x0030 /* DC2_SOFT_START - [5:4] */ +#define WM831X_DC2_SOFT_START_SHIFT 4 /* DC2_SOFT_START - [5:4] */ +#define WM831X_DC2_SOFT_START_WIDTH 2 /* DC2_SOFT_START - [5:4] */ +#define WM831X_DC2_CAP_MASK 0x0003 /* DC2_CAP - [1:0] */ +#define WM831X_DC2_CAP_SHIFT 0 /* DC2_CAP - [1:0] */ +#define WM831X_DC2_CAP_WIDTH 2 /* DC2_CAP - [1:0] */ + +/* + * R16476 (0x405C) - DC2 Control 2 + */ +#define WM831X_DC2_ERR_ACT_MASK 0xC000 /* DC2_ERR_ACT - [15:14] */ +#define WM831X_DC2_ERR_ACT_SHIFT 14 /* DC2_ERR_ACT - [15:14] */ +#define WM831X_DC2_ERR_ACT_WIDTH 2 /* DC2_ERR_ACT - [15:14] */ +#define WM831X_DC2_HWC_SRC_MASK 0x1800 /* DC2_HWC_SRC - [12:11] */ +#define WM831X_DC2_HWC_SRC_SHIFT 11 /* DC2_HWC_SRC - [12:11] */ +#define WM831X_DC2_HWC_SRC_WIDTH 2 /* DC2_HWC_SRC - [12:11] */ +#define WM831X_DC2_HWC_VSEL 0x0400 /* DC2_HWC_VSEL */ +#define WM831X_DC2_HWC_VSEL_MASK 0x0400 /* DC2_HWC_VSEL */ +#define WM831X_DC2_HWC_VSEL_SHIFT 10 /* DC2_HWC_VSEL */ +#define WM831X_DC2_HWC_VSEL_WIDTH 1 /* DC2_HWC_VSEL */ +#define WM831X_DC2_HWC_MODE_MASK 0x0300 /* DC2_HWC_MODE - [9:8] */ +#define WM831X_DC2_HWC_MODE_SHIFT 8 /* DC2_HWC_MODE - [9:8] */ +#define WM831X_DC2_HWC_MODE_WIDTH 2 /* DC2_HWC_MODE - [9:8] */ +#define WM831X_DC2_HC_THR_MASK 0x0070 /* DC2_HC_THR - [6:4] */ +#define WM831X_DC2_HC_THR_SHIFT 4 /* DC2_HC_THR - [6:4] */ +#define WM831X_DC2_HC_THR_WIDTH 3 /* DC2_HC_THR - [6:4] */ +#define WM831X_DC2_HC_IND_ENA 0x0001 /* DC2_HC_IND_ENA */ +#define WM831X_DC2_HC_IND_ENA_MASK 0x0001 /* DC2_HC_IND_ENA */ +#define WM831X_DC2_HC_IND_ENA_SHIFT 0 /* DC2_HC_IND_ENA */ +#define WM831X_DC2_HC_IND_ENA_WIDTH 1 /* DC2_HC_IND_ENA */ + +/* + * R16477 (0x405D) - DC2 ON Config + */ +#define WM831X_DC2_ON_SLOT_MASK 0xE000 /* DC2_ON_SLOT - [15:13] */ +#define WM831X_DC2_ON_SLOT_SHIFT 13 /* DC2_ON_SLOT - [15:13] */ +#define WM831X_DC2_ON_SLOT_WIDTH 3 /* DC2_ON_SLOT - [15:13] */ +#define WM831X_DC2_ON_MODE_MASK 0x0300 /* DC2_ON_MODE - [9:8] */ +#define WM831X_DC2_ON_MODE_SHIFT 8 /* DC2_ON_MODE - [9:8] */ +#define WM831X_DC2_ON_MODE_WIDTH 2 /* DC2_ON_MODE - [9:8] */ +#define WM831X_DC2_ON_VSEL_MASK 0x007F /* DC2_ON_VSEL - [6:0] */ +#define WM831X_DC2_ON_VSEL_SHIFT 0 /* DC2_ON_VSEL - [6:0] */ +#define WM831X_DC2_ON_VSEL_WIDTH 7 /* DC2_ON_VSEL - [6:0] */ + +/* + * R16478 (0x405E) - DC2 SLEEP Control + */ +#define WM831X_DC2_SLP_SLOT_MASK 0xE000 /* DC2_SLP_SLOT - [15:13] */ +#define WM831X_DC2_SLP_SLOT_SHIFT 13 /* DC2_SLP_SLOT - [15:13] */ +#define WM831X_DC2_SLP_SLOT_WIDTH 3 /* DC2_SLP_SLOT - [15:13] */ +#define WM831X_DC2_SLP_MODE_MASK 0x0300 /* DC2_SLP_MODE - [9:8] */ +#define WM831X_DC2_SLP_MODE_SHIFT 8 /* DC2_SLP_MODE - [9:8] */ +#define WM831X_DC2_SLP_MODE_WIDTH 2 /* DC2_SLP_MODE - [9:8] */ +#define WM831X_DC2_SLP_VSEL_MASK 0x007F /* DC2_SLP_VSEL - [6:0] */ +#define WM831X_DC2_SLP_VSEL_SHIFT 0 /* DC2_SLP_VSEL - [6:0] */ +#define WM831X_DC2_SLP_VSEL_WIDTH 7 /* DC2_SLP_VSEL - [6:0] */ + +/* + * R16479 (0x405F) - DC2 DVS Control + */ +#define WM831X_DC2_DVS_SRC_MASK 0x1800 /* DC2_DVS_SRC - [12:11] */ +#define WM831X_DC2_DVS_SRC_SHIFT 11 /* DC2_DVS_SRC - [12:11] */ +#define WM831X_DC2_DVS_SRC_WIDTH 2 /* DC2_DVS_SRC - [12:11] */ +#define WM831X_DC2_DVS_VSEL_MASK 0x007F /* DC2_DVS_VSEL - [6:0] */ +#define WM831X_DC2_DVS_VSEL_SHIFT 0 /* DC2_DVS_VSEL - [6:0] */ +#define WM831X_DC2_DVS_VSEL_WIDTH 7 /* DC2_DVS_VSEL - [6:0] */ + +/* + * R16480 (0x4060) - DC3 Control 1 + */ +#define WM831X_DC3_PHASE 0x1000 /* DC3_PHASE */ +#define WM831X_DC3_PHASE_MASK 0x1000 /* DC3_PHASE */ +#define WM831X_DC3_PHASE_SHIFT 12 /* DC3_PHASE */ +#define WM831X_DC3_PHASE_WIDTH 1 /* DC3_PHASE */ +#define WM831X_DC3_FLT 0x0080 /* DC3_FLT */ +#define WM831X_DC3_FLT_MASK 0x0080 /* DC3_FLT */ +#define WM831X_DC3_FLT_SHIFT 7 /* DC3_FLT */ +#define WM831X_DC3_FLT_WIDTH 1 /* DC3_FLT */ +#define WM831X_DC3_SOFT_START_MASK 0x0030 /* DC3_SOFT_START - [5:4] */ +#define WM831X_DC3_SOFT_START_SHIFT 4 /* DC3_SOFT_START - [5:4] */ +#define WM831X_DC3_SOFT_START_WIDTH 2 /* DC3_SOFT_START - [5:4] */ +#define WM831X_DC3_STNBY_LIM_MASK 0x000C /* DC3_STNBY_LIM - [3:2] */ +#define WM831X_DC3_STNBY_LIM_SHIFT 2 /* DC3_STNBY_LIM - [3:2] */ +#define WM831X_DC3_STNBY_LIM_WIDTH 2 /* DC3_STNBY_LIM - [3:2] */ +#define WM831X_DC3_CAP_MASK 0x0003 /* DC3_CAP - [1:0] */ +#define WM831X_DC3_CAP_SHIFT 0 /* DC3_CAP - [1:0] */ +#define WM831X_DC3_CAP_WIDTH 2 /* DC3_CAP - [1:0] */ + +/* + * R16481 (0x4061) - DC3 Control 2 + */ +#define WM831X_DC3_ERR_ACT_MASK 0xC000 /* DC3_ERR_ACT - [15:14] */ +#define WM831X_DC3_ERR_ACT_SHIFT 14 /* DC3_ERR_ACT - [15:14] */ +#define WM831X_DC3_ERR_ACT_WIDTH 2 /* DC3_ERR_ACT - [15:14] */ +#define WM831X_DC3_HWC_SRC_MASK 0x1800 /* DC3_HWC_SRC - [12:11] */ +#define WM831X_DC3_HWC_SRC_SHIFT 11 /* DC3_HWC_SRC - [12:11] */ +#define WM831X_DC3_HWC_SRC_WIDTH 2 /* DC3_HWC_SRC - [12:11] */ +#define WM831X_DC3_HWC_VSEL 0x0400 /* DC3_HWC_VSEL */ +#define WM831X_DC3_HWC_VSEL_MASK 0x0400 /* DC3_HWC_VSEL */ +#define WM831X_DC3_HWC_VSEL_SHIFT 10 /* DC3_HWC_VSEL */ +#define WM831X_DC3_HWC_VSEL_WIDTH 1 /* DC3_HWC_VSEL */ +#define WM831X_DC3_HWC_MODE_MASK 0x0300 /* DC3_HWC_MODE - [9:8] */ +#define WM831X_DC3_HWC_MODE_SHIFT 8 /* DC3_HWC_MODE - [9:8] */ +#define WM831X_DC3_HWC_MODE_WIDTH 2 /* DC3_HWC_MODE - [9:8] */ +#define WM831X_DC3_OVP 0x0080 /* DC3_OVP */ +#define WM831X_DC3_OVP_MASK 0x0080 /* DC3_OVP */ +#define WM831X_DC3_OVP_SHIFT 7 /* DC3_OVP */ +#define WM831X_DC3_OVP_WIDTH 1 /* DC3_OVP */ + +/* + * R16482 (0x4062) - DC3 ON Config + */ +#define WM831X_DC3_ON_SLOT_MASK 0xE000 /* DC3_ON_SLOT - [15:13] */ +#define WM831X_DC3_ON_SLOT_SHIFT 13 /* DC3_ON_SLOT - [15:13] */ +#define WM831X_DC3_ON_SLOT_WIDTH 3 /* DC3_ON_SLOT - [15:13] */ +#define WM831X_DC3_ON_MODE_MASK 0x0300 /* DC3_ON_MODE - [9:8] */ +#define WM831X_DC3_ON_MODE_SHIFT 8 /* DC3_ON_MODE - [9:8] */ +#define WM831X_DC3_ON_MODE_WIDTH 2 /* DC3_ON_MODE - [9:8] */ +#define WM831X_DC3_ON_VSEL_MASK 0x007F /* DC3_ON_VSEL - [6:0] */ +#define WM831X_DC3_ON_VSEL_SHIFT 0 /* DC3_ON_VSEL - [6:0] */ +#define WM831X_DC3_ON_VSEL_WIDTH 7 /* DC3_ON_VSEL - [6:0] */ + +/* + * R16483 (0x4063) - DC3 SLEEP Control + */ +#define WM831X_DC3_SLP_SLOT_MASK 0xE000 /* DC3_SLP_SLOT - [15:13] */ +#define WM831X_DC3_SLP_SLOT_SHIFT 13 /* DC3_SLP_SLOT - [15:13] */ +#define WM831X_DC3_SLP_SLOT_WIDTH 3 /* DC3_SLP_SLOT - [15:13] */ +#define WM831X_DC3_SLP_MODE_MASK 0x0300 /* DC3_SLP_MODE - [9:8] */ +#define WM831X_DC3_SLP_MODE_SHIFT 8 /* DC3_SLP_MODE - [9:8] */ +#define WM831X_DC3_SLP_MODE_WIDTH 2 /* DC3_SLP_MODE - [9:8] */ +#define WM831X_DC3_SLP_VSEL_MASK 0x007F /* DC3_SLP_VSEL - [6:0] */ +#define WM831X_DC3_SLP_VSEL_SHIFT 0 /* DC3_SLP_VSEL - [6:0] */ +#define WM831X_DC3_SLP_VSEL_WIDTH 7 /* DC3_SLP_VSEL - [6:0] */ + +/* + * R16484 (0x4064) - DC4 Control + */ +#define WM831X_DC4_ERR_ACT_MASK 0xC000 /* DC4_ERR_ACT - [15:14] */ +#define WM831X_DC4_ERR_ACT_SHIFT 14 /* DC4_ERR_ACT - [15:14] */ +#define WM831X_DC4_ERR_ACT_WIDTH 2 /* DC4_ERR_ACT - [15:14] */ +#define WM831X_DC4_HWC_SRC_MASK 0x1800 /* DC4_HWC_SRC - [12:11] */ +#define WM831X_DC4_HWC_SRC_SHIFT 11 /* DC4_HWC_SRC - [12:11] */ +#define WM831X_DC4_HWC_SRC_WIDTH 2 /* DC4_HWC_SRC - [12:11] */ +#define WM831X_DC4_HWC_MODE 0x0100 /* DC4_HWC_MODE */ +#define WM831X_DC4_HWC_MODE_MASK 0x0100 /* DC4_HWC_MODE */ +#define WM831X_DC4_HWC_MODE_SHIFT 8 /* DC4_HWC_MODE */ +#define WM831X_DC4_HWC_MODE_WIDTH 1 /* DC4_HWC_MODE */ +#define WM831X_DC4_RANGE_MASK 0x000C /* DC4_RANGE - [3:2] */ +#define WM831X_DC4_RANGE_SHIFT 2 /* DC4_RANGE - [3:2] */ +#define WM831X_DC4_RANGE_WIDTH 2 /* DC4_RANGE - [3:2] */ +#define WM831X_DC4_FBSRC 0x0001 /* DC4_FBSRC */ +#define WM831X_DC4_FBSRC_MASK 0x0001 /* DC4_FBSRC */ +#define WM831X_DC4_FBSRC_SHIFT 0 /* DC4_FBSRC */ +#define WM831X_DC4_FBSRC_WIDTH 1 /* DC4_FBSRC */ + +/* + * R16485 (0x4065) - DC4 SLEEP Control + */ +#define WM831X_DC4_SLPENA 0x0100 /* DC4_SLPENA */ +#define WM831X_DC4_SLPENA_MASK 0x0100 /* DC4_SLPENA */ +#define WM831X_DC4_SLPENA_SHIFT 8 /* DC4_SLPENA */ +#define WM831X_DC4_SLPENA_WIDTH 1 /* DC4_SLPENA */ + +/* + * R16488 (0x4068) - LDO1 Control + */ +#define WM831X_LDO1_ERR_ACT_MASK 0xC000 /* LDO1_ERR_ACT - [15:14] */ +#define WM831X_LDO1_ERR_ACT_SHIFT 14 /* LDO1_ERR_ACT - [15:14] */ +#define WM831X_LDO1_ERR_ACT_WIDTH 2 /* LDO1_ERR_ACT - [15:14] */ +#define WM831X_LDO1_HWC_SRC_MASK 0x1800 /* LDO1_HWC_SRC - [12:11] */ +#define WM831X_LDO1_HWC_SRC_SHIFT 11 /* LDO1_HWC_SRC - [12:11] */ +#define WM831X_LDO1_HWC_SRC_WIDTH 2 /* LDO1_HWC_SRC - [12:11] */ +#define WM831X_LDO1_HWC_VSEL 0x0400 /* LDO1_HWC_VSEL */ +#define WM831X_LDO1_HWC_VSEL_MASK 0x0400 /* LDO1_HWC_VSEL */ +#define WM831X_LDO1_HWC_VSEL_SHIFT 10 /* LDO1_HWC_VSEL */ +#define WM831X_LDO1_HWC_VSEL_WIDTH 1 /* LDO1_HWC_VSEL */ +#define WM831X_LDO1_HWC_MODE_MASK 0x0300 /* LDO1_HWC_MODE - [9:8] */ +#define WM831X_LDO1_HWC_MODE_SHIFT 8 /* LDO1_HWC_MODE - [9:8] */ +#define WM831X_LDO1_HWC_MODE_WIDTH 2 /* LDO1_HWC_MODE - [9:8] */ +#define WM831X_LDO1_FLT 0x0080 /* LDO1_FLT */ +#define WM831X_LDO1_FLT_MASK 0x0080 /* LDO1_FLT */ +#define WM831X_LDO1_FLT_SHIFT 7 /* LDO1_FLT */ +#define WM831X_LDO1_FLT_WIDTH 1 /* LDO1_FLT */ +#define WM831X_LDO1_SWI 0x0040 /* LDO1_SWI */ +#define WM831X_LDO1_SWI_MASK 0x0040 /* LDO1_SWI */ +#define WM831X_LDO1_SWI_SHIFT 6 /* LDO1_SWI */ +#define WM831X_LDO1_SWI_WIDTH 1 /* LDO1_SWI */ +#define WM831X_LDO1_LP_MODE 0x0001 /* LDO1_LP_MODE */ +#define WM831X_LDO1_LP_MODE_MASK 0x0001 /* LDO1_LP_MODE */ +#define WM831X_LDO1_LP_MODE_SHIFT 0 /* LDO1_LP_MODE */ +#define WM831X_LDO1_LP_MODE_WIDTH 1 /* LDO1_LP_MODE */ + +/* + * R16489 (0x4069) - LDO1 ON Control + */ +#define WM831X_LDO1_ON_SLOT_MASK 0xE000 /* LDO1_ON_SLOT - [15:13] */ +#define WM831X_LDO1_ON_SLOT_SHIFT 13 /* LDO1_ON_SLOT - [15:13] */ +#define WM831X_LDO1_ON_SLOT_WIDTH 3 /* LDO1_ON_SLOT - [15:13] */ +#define WM831X_LDO1_ON_MODE 0x0100 /* LDO1_ON_MODE */ +#define WM831X_LDO1_ON_MODE_MASK 0x0100 /* LDO1_ON_MODE */ +#define WM831X_LDO1_ON_MODE_SHIFT 8 /* LDO1_ON_MODE */ +#define WM831X_LDO1_ON_MODE_WIDTH 1 /* LDO1_ON_MODE */ +#define WM831X_LDO1_ON_VSEL_MASK 0x001F /* LDO1_ON_VSEL - [4:0] */ +#define WM831X_LDO1_ON_VSEL_SHIFT 0 /* LDO1_ON_VSEL - [4:0] */ +#define WM831X_LDO1_ON_VSEL_WIDTH 5 /* LDO1_ON_VSEL - [4:0] */ + +/* + * R16490 (0x406A) - LDO1 SLEEP Control + */ +#define WM831X_LDO1_SLP_SLOT_MASK 0xE000 /* LDO1_SLP_SLOT - [15:13] */ +#define WM831X_LDO1_SLP_SLOT_SHIFT 13 /* LDO1_SLP_SLOT - [15:13] */ +#define WM831X_LDO1_SLP_SLOT_WIDTH 3 /* LDO1_SLP_SLOT - [15:13] */ +#define WM831X_LDO1_SLP_MODE 0x0100 /* LDO1_SLP_MODE */ +#define WM831X_LDO1_SLP_MODE_MASK 0x0100 /* LDO1_SLP_MODE */ +#define WM831X_LDO1_SLP_MODE_SHIFT 8 /* LDO1_SLP_MODE */ +#define WM831X_LDO1_SLP_MODE_WIDTH 1 /* LDO1_SLP_MODE */ +#define WM831X_LDO1_SLP_VSEL_MASK 0x001F /* LDO1_SLP_VSEL - [4:0] */ +#define WM831X_LDO1_SLP_VSEL_SHIFT 0 /* LDO1_SLP_VSEL - [4:0] */ +#define WM831X_LDO1_SLP_VSEL_WIDTH 5 /* LDO1_SLP_VSEL - [4:0] */ + +/* + * R16491 (0x406B) - LDO2 Control + */ +#define WM831X_LDO2_ERR_ACT_MASK 0xC000 /* LDO2_ERR_ACT - [15:14] */ +#define WM831X_LDO2_ERR_ACT_SHIFT 14 /* LDO2_ERR_ACT - [15:14] */ +#define WM831X_LDO2_ERR_ACT_WIDTH 2 /* LDO2_ERR_ACT - [15:14] */ +#define WM831X_LDO2_HWC_SRC_MASK 0x1800 /* LDO2_HWC_SRC - [12:11] */ +#define WM831X_LDO2_HWC_SRC_SHIFT 11 /* LDO2_HWC_SRC - [12:11] */ +#define WM831X_LDO2_HWC_SRC_WIDTH 2 /* LDO2_HWC_SRC - [12:11] */ +#define WM831X_LDO2_HWC_VSEL 0x0400 /* LDO2_HWC_VSEL */ +#define WM831X_LDO2_HWC_VSEL_MASK 0x0400 /* LDO2_HWC_VSEL */ +#define WM831X_LDO2_HWC_VSEL_SHIFT 10 /* LDO2_HWC_VSEL */ +#define WM831X_LDO2_HWC_VSEL_WIDTH 1 /* LDO2_HWC_VSEL */ +#define WM831X_LDO2_HWC_MODE_MASK 0x0300 /* LDO2_HWC_MODE - [9:8] */ +#define WM831X_LDO2_HWC_MODE_SHIFT 8 /* LDO2_HWC_MODE - [9:8] */ +#define WM831X_LDO2_HWC_MODE_WIDTH 2 /* LDO2_HWC_MODE - [9:8] */ +#define WM831X_LDO2_FLT 0x0080 /* LDO2_FLT */ +#define WM831X_LDO2_FLT_MASK 0x0080 /* LDO2_FLT */ +#define WM831X_LDO2_FLT_SHIFT 7 /* LDO2_FLT */ +#define WM831X_LDO2_FLT_WIDTH 1 /* LDO2_FLT */ +#define WM831X_LDO2_SWI 0x0040 /* LDO2_SWI */ +#define WM831X_LDO2_SWI_MASK 0x0040 /* LDO2_SWI */ +#define WM831X_LDO2_SWI_SHIFT 6 /* LDO2_SWI */ +#define WM831X_LDO2_SWI_WIDTH 1 /* LDO2_SWI */ +#define WM831X_LDO2_LP_MODE 0x0001 /* LDO2_LP_MODE */ +#define WM831X_LDO2_LP_MODE_MASK 0x0001 /* LDO2_LP_MODE */ +#define WM831X_LDO2_LP_MODE_SHIFT 0 /* LDO2_LP_MODE */ +#define WM831X_LDO2_LP_MODE_WIDTH 1 /* LDO2_LP_MODE */ + +/* + * R16492 (0x406C) - LDO2 ON Control + */ +#define WM831X_LDO2_ON_SLOT_MASK 0xE000 /* LDO2_ON_SLOT - [15:13] */ +#define WM831X_LDO2_ON_SLOT_SHIFT 13 /* LDO2_ON_SLOT - [15:13] */ +#define WM831X_LDO2_ON_SLOT_WIDTH 3 /* LDO2_ON_SLOT - [15:13] */ +#define WM831X_LDO2_ON_MODE 0x0100 /* LDO2_ON_MODE */ +#define WM831X_LDO2_ON_MODE_MASK 0x0100 /* LDO2_ON_MODE */ +#define WM831X_LDO2_ON_MODE_SHIFT 8 /* LDO2_ON_MODE */ +#define WM831X_LDO2_ON_MODE_WIDTH 1 /* LDO2_ON_MODE */ +#define WM831X_LDO2_ON_VSEL_MASK 0x001F /* LDO2_ON_VSEL - [4:0] */ +#define WM831X_LDO2_ON_VSEL_SHIFT 0 /* LDO2_ON_VSEL - [4:0] */ +#define WM831X_LDO2_ON_VSEL_WIDTH 5 /* LDO2_ON_VSEL - [4:0] */ + +/* + * R16493 (0x406D) - LDO2 SLEEP Control + */ +#define WM831X_LDO2_SLP_SLOT_MASK 0xE000 /* LDO2_SLP_SLOT - [15:13] */ +#define WM831X_LDO2_SLP_SLOT_SHIFT 13 /* LDO2_SLP_SLOT - [15:13] */ +#define WM831X_LDO2_SLP_SLOT_WIDTH 3 /* LDO2_SLP_SLOT - [15:13] */ +#define WM831X_LDO2_SLP_MODE 0x0100 /* LDO2_SLP_MODE */ +#define WM831X_LDO2_SLP_MODE_MASK 0x0100 /* LDO2_SLP_MODE */ +#define WM831X_LDO2_SLP_MODE_SHIFT 8 /* LDO2_SLP_MODE */ +#define WM831X_LDO2_SLP_MODE_WIDTH 1 /* LDO2_SLP_MODE */ +#define WM831X_LDO2_SLP_VSEL_MASK 0x001F /* LDO2_SLP_VSEL - [4:0] */ +#define WM831X_LDO2_SLP_VSEL_SHIFT 0 /* LDO2_SLP_VSEL - [4:0] */ +#define WM831X_LDO2_SLP_VSEL_WIDTH 5 /* LDO2_SLP_VSEL - [4:0] */ + +/* + * R16494 (0x406E) - LDO3 Control + */ +#define WM831X_LDO3_ERR_ACT_MASK 0xC000 /* LDO3_ERR_ACT - [15:14] */ +#define WM831X_LDO3_ERR_ACT_SHIFT 14 /* LDO3_ERR_ACT - [15:14] */ +#define WM831X_LDO3_ERR_ACT_WIDTH 2 /* LDO3_ERR_ACT - [15:14] */ +#define WM831X_LDO3_HWC_SRC_MASK 0x1800 /* LDO3_HWC_SRC - [12:11] */ +#define WM831X_LDO3_HWC_SRC_SHIFT 11 /* LDO3_HWC_SRC - [12:11] */ +#define WM831X_LDO3_HWC_SRC_WIDTH 2 /* LDO3_HWC_SRC - [12:11] */ +#define WM831X_LDO3_HWC_VSEL 0x0400 /* LDO3_HWC_VSEL */ +#define WM831X_LDO3_HWC_VSEL_MASK 0x0400 /* LDO3_HWC_VSEL */ +#define WM831X_LDO3_HWC_VSEL_SHIFT 10 /* LDO3_HWC_VSEL */ +#define WM831X_LDO3_HWC_VSEL_WIDTH 1 /* LDO3_HWC_VSEL */ +#define WM831X_LDO3_HWC_MODE_MASK 0x0300 /* LDO3_HWC_MODE - [9:8] */ +#define WM831X_LDO3_HWC_MODE_SHIFT 8 /* LDO3_HWC_MODE - [9:8] */ +#define WM831X_LDO3_HWC_MODE_WIDTH 2 /* LDO3_HWC_MODE - [9:8] */ +#define WM831X_LDO3_FLT 0x0080 /* LDO3_FLT */ +#define WM831X_LDO3_FLT_MASK 0x0080 /* LDO3_FLT */ +#define WM831X_LDO3_FLT_SHIFT 7 /* LDO3_FLT */ +#define WM831X_LDO3_FLT_WIDTH 1 /* LDO3_FLT */ +#define WM831X_LDO3_SWI 0x0040 /* LDO3_SWI */ +#define WM831X_LDO3_SWI_MASK 0x0040 /* LDO3_SWI */ +#define WM831X_LDO3_SWI_SHIFT 6 /* LDO3_SWI */ +#define WM831X_LDO3_SWI_WIDTH 1 /* LDO3_SWI */ +#define WM831X_LDO3_LP_MODE 0x0001 /* LDO3_LP_MODE */ +#define WM831X_LDO3_LP_MODE_MASK 0x0001 /* LDO3_LP_MODE */ +#define WM831X_LDO3_LP_MODE_SHIFT 0 /* LDO3_LP_MODE */ +#define WM831X_LDO3_LP_MODE_WIDTH 1 /* LDO3_LP_MODE */ + +/* + * R16495 (0x406F) - LDO3 ON Control + */ +#define WM831X_LDO3_ON_SLOT_MASK 0xE000 /* LDO3_ON_SLOT - [15:13] */ +#define WM831X_LDO3_ON_SLOT_SHIFT 13 /* LDO3_ON_SLOT - [15:13] */ +#define WM831X_LDO3_ON_SLOT_WIDTH 3 /* LDO3_ON_SLOT - [15:13] */ +#define WM831X_LDO3_ON_MODE 0x0100 /* LDO3_ON_MODE */ +#define WM831X_LDO3_ON_MODE_MASK 0x0100 /* LDO3_ON_MODE */ +#define WM831X_LDO3_ON_MODE_SHIFT 8 /* LDO3_ON_MODE */ +#define WM831X_LDO3_ON_MODE_WIDTH 1 /* LDO3_ON_MODE */ +#define WM831X_LDO3_ON_VSEL_MASK 0x001F /* LDO3_ON_VSEL - [4:0] */ +#define WM831X_LDO3_ON_VSEL_SHIFT 0 /* LDO3_ON_VSEL - [4:0] */ +#define WM831X_LDO3_ON_VSEL_WIDTH 5 /* LDO3_ON_VSEL - [4:0] */ + +/* + * R16496 (0x4070) - LDO3 SLEEP Control + */ +#define WM831X_LDO3_SLP_SLOT_MASK 0xE000 /* LDO3_SLP_SLOT - [15:13] */ +#define WM831X_LDO3_SLP_SLOT_SHIFT 13 /* LDO3_SLP_SLOT - [15:13] */ +#define WM831X_LDO3_SLP_SLOT_WIDTH 3 /* LDO3_SLP_SLOT - [15:13] */ +#define WM831X_LDO3_SLP_MODE 0x0100 /* LDO3_SLP_MODE */ +#define WM831X_LDO3_SLP_MODE_MASK 0x0100 /* LDO3_SLP_MODE */ +#define WM831X_LDO3_SLP_MODE_SHIFT 8 /* LDO3_SLP_MODE */ +#define WM831X_LDO3_SLP_MODE_WIDTH 1 /* LDO3_SLP_MODE */ +#define WM831X_LDO3_SLP_VSEL_MASK 0x001F /* LDO3_SLP_VSEL - [4:0] */ +#define WM831X_LDO3_SLP_VSEL_SHIFT 0 /* LDO3_SLP_VSEL - [4:0] */ +#define WM831X_LDO3_SLP_VSEL_WIDTH 5 /* LDO3_SLP_VSEL - [4:0] */ + +/* + * R16497 (0x4071) - LDO4 Control + */ +#define WM831X_LDO4_ERR_ACT_MASK 0xC000 /* LDO4_ERR_ACT - [15:14] */ +#define WM831X_LDO4_ERR_ACT_SHIFT 14 /* LDO4_ERR_ACT - [15:14] */ +#define WM831X_LDO4_ERR_ACT_WIDTH 2 /* LDO4_ERR_ACT - [15:14] */ +#define WM831X_LDO4_HWC_SRC_MASK 0x1800 /* LDO4_HWC_SRC - [12:11] */ +#define WM831X_LDO4_HWC_SRC_SHIFT 11 /* LDO4_HWC_SRC - [12:11] */ +#define WM831X_LDO4_HWC_SRC_WIDTH 2 /* LDO4_HWC_SRC - [12:11] */ +#define WM831X_LDO4_HWC_VSEL 0x0400 /* LDO4_HWC_VSEL */ +#define WM831X_LDO4_HWC_VSEL_MASK 0x0400 /* LDO4_HWC_VSEL */ +#define WM831X_LDO4_HWC_VSEL_SHIFT 10 /* LDO4_HWC_VSEL */ +#define WM831X_LDO4_HWC_VSEL_WIDTH 1 /* LDO4_HWC_VSEL */ +#define WM831X_LDO4_HWC_MODE_MASK 0x0300 /* LDO4_HWC_MODE - [9:8] */ +#define WM831X_LDO4_HWC_MODE_SHIFT 8 /* LDO4_HWC_MODE - [9:8] */ +#define WM831X_LDO4_HWC_MODE_WIDTH 2 /* LDO4_HWC_MODE - [9:8] */ +#define WM831X_LDO4_FLT 0x0080 /* LDO4_FLT */ +#define WM831X_LDO4_FLT_MASK 0x0080 /* LDO4_FLT */ +#define WM831X_LDO4_FLT_SHIFT 7 /* LDO4_FLT */ +#define WM831X_LDO4_FLT_WIDTH 1 /* LDO4_FLT */ +#define WM831X_LDO4_SWI 0x0040 /* LDO4_SWI */ +#define WM831X_LDO4_SWI_MASK 0x0040 /* LDO4_SWI */ +#define WM831X_LDO4_SWI_SHIFT 6 /* LDO4_SWI */ +#define WM831X_LDO4_SWI_WIDTH 1 /* LDO4_SWI */ +#define WM831X_LDO4_LP_MODE 0x0001 /* LDO4_LP_MODE */ +#define WM831X_LDO4_LP_MODE_MASK 0x0001 /* LDO4_LP_MODE */ +#define WM831X_LDO4_LP_MODE_SHIFT 0 /* LDO4_LP_MODE */ +#define WM831X_LDO4_LP_MODE_WIDTH 1 /* LDO4_LP_MODE */ + +/* + * R16498 (0x4072) - LDO4 ON Control + */ +#define WM831X_LDO4_ON_SLOT_MASK 0xE000 /* LDO4_ON_SLOT - [15:13] */ +#define WM831X_LDO4_ON_SLOT_SHIFT 13 /* LDO4_ON_SLOT - [15:13] */ +#define WM831X_LDO4_ON_SLOT_WIDTH 3 /* LDO4_ON_SLOT - [15:13] */ +#define WM831X_LDO4_ON_MODE 0x0100 /* LDO4_ON_MODE */ +#define WM831X_LDO4_ON_MODE_MASK 0x0100 /* LDO4_ON_MODE */ +#define WM831X_LDO4_ON_MODE_SHIFT 8 /* LDO4_ON_MODE */ +#define WM831X_LDO4_ON_MODE_WIDTH 1 /* LDO4_ON_MODE */ +#define WM831X_LDO4_ON_VSEL_MASK 0x001F /* LDO4_ON_VSEL - [4:0] */ +#define WM831X_LDO4_ON_VSEL_SHIFT 0 /* LDO4_ON_VSEL - [4:0] */ +#define WM831X_LDO4_ON_VSEL_WIDTH 5 /* LDO4_ON_VSEL - [4:0] */ + +/* + * R16499 (0x4073) - LDO4 SLEEP Control + */ +#define WM831X_LDO4_SLP_SLOT_MASK 0xE000 /* LDO4_SLP_SLOT - [15:13] */ +#define WM831X_LDO4_SLP_SLOT_SHIFT 13 /* LDO4_SLP_SLOT - [15:13] */ +#define WM831X_LDO4_SLP_SLOT_WIDTH 3 /* LDO4_SLP_SLOT - [15:13] */ +#define WM831X_LDO4_SLP_MODE 0x0100 /* LDO4_SLP_MODE */ +#define WM831X_LDO4_SLP_MODE_MASK 0x0100 /* LDO4_SLP_MODE */ +#define WM831X_LDO4_SLP_MODE_SHIFT 8 /* LDO4_SLP_MODE */ +#define WM831X_LDO4_SLP_MODE_WIDTH 1 /* LDO4_SLP_MODE */ +#define WM831X_LDO4_SLP_VSEL_MASK 0x001F /* LDO4_SLP_VSEL - [4:0] */ +#define WM831X_LDO4_SLP_VSEL_SHIFT 0 /* LDO4_SLP_VSEL - [4:0] */ +#define WM831X_LDO4_SLP_VSEL_WIDTH 5 /* LDO4_SLP_VSEL - [4:0] */ + +/* + * R16500 (0x4074) - LDO5 Control + */ +#define WM831X_LDO5_ERR_ACT_MASK 0xC000 /* LDO5_ERR_ACT - [15:14] */ +#define WM831X_LDO5_ERR_ACT_SHIFT 14 /* LDO5_ERR_ACT - [15:14] */ +#define WM831X_LDO5_ERR_ACT_WIDTH 2 /* LDO5_ERR_ACT - [15:14] */ +#define WM831X_LDO5_HWC_SRC_MASK 0x1800 /* LDO5_HWC_SRC - [12:11] */ +#define WM831X_LDO5_HWC_SRC_SHIFT 11 /* LDO5_HWC_SRC - [12:11] */ +#define WM831X_LDO5_HWC_SRC_WIDTH 2 /* LDO5_HWC_SRC - [12:11] */ +#define WM831X_LDO5_HWC_VSEL 0x0400 /* LDO5_HWC_VSEL */ +#define WM831X_LDO5_HWC_VSEL_MASK 0x0400 /* LDO5_HWC_VSEL */ +#define WM831X_LDO5_HWC_VSEL_SHIFT 10 /* LDO5_HWC_VSEL */ +#define WM831X_LDO5_HWC_VSEL_WIDTH 1 /* LDO5_HWC_VSEL */ +#define WM831X_LDO5_HWC_MODE_MASK 0x0300 /* LDO5_HWC_MODE - [9:8] */ +#define WM831X_LDO5_HWC_MODE_SHIFT 8 /* LDO5_HWC_MODE - [9:8] */ +#define WM831X_LDO5_HWC_MODE_WIDTH 2 /* LDO5_HWC_MODE - [9:8] */ +#define WM831X_LDO5_FLT 0x0080 /* LDO5_FLT */ +#define WM831X_LDO5_FLT_MASK 0x0080 /* LDO5_FLT */ +#define WM831X_LDO5_FLT_SHIFT 7 /* LDO5_FLT */ +#define WM831X_LDO5_FLT_WIDTH 1 /* LDO5_FLT */ +#define WM831X_LDO5_SWI 0x0040 /* LDO5_SWI */ +#define WM831X_LDO5_SWI_MASK 0x0040 /* LDO5_SWI */ +#define WM831X_LDO5_SWI_SHIFT 6 /* LDO5_SWI */ +#define WM831X_LDO5_SWI_WIDTH 1 /* LDO5_SWI */ +#define WM831X_LDO5_LP_MODE 0x0001 /* LDO5_LP_MODE */ +#define WM831X_LDO5_LP_MODE_MASK 0x0001 /* LDO5_LP_MODE */ +#define WM831X_LDO5_LP_MODE_SHIFT 0 /* LDO5_LP_MODE */ +#define WM831X_LDO5_LP_MODE_WIDTH 1 /* LDO5_LP_MODE */ + +/* + * R16501 (0x4075) - LDO5 ON Control + */ +#define WM831X_LDO5_ON_SLOT_MASK 0xE000 /* LDO5_ON_SLOT - [15:13] */ +#define WM831X_LDO5_ON_SLOT_SHIFT 13 /* LDO5_ON_SLOT - [15:13] */ +#define WM831X_LDO5_ON_SLOT_WIDTH 3 /* LDO5_ON_SLOT - [15:13] */ +#define WM831X_LDO5_ON_MODE 0x0100 /* LDO5_ON_MODE */ +#define WM831X_LDO5_ON_MODE_MASK 0x0100 /* LDO5_ON_MODE */ +#define WM831X_LDO5_ON_MODE_SHIFT 8 /* LDO5_ON_MODE */ +#define WM831X_LDO5_ON_MODE_WIDTH 1 /* LDO5_ON_MODE */ +#define WM831X_LDO5_ON_VSEL_MASK 0x001F /* LDO5_ON_VSEL - [4:0] */ +#define WM831X_LDO5_ON_VSEL_SHIFT 0 /* LDO5_ON_VSEL - [4:0] */ +#define WM831X_LDO5_ON_VSEL_WIDTH 5 /* LDO5_ON_VSEL - [4:0] */ + +/* + * R16502 (0x4076) - LDO5 SLEEP Control + */ +#define WM831X_LDO5_SLP_SLOT_MASK 0xE000 /* LDO5_SLP_SLOT - [15:13] */ +#define WM831X_LDO5_SLP_SLOT_SHIFT 13 /* LDO5_SLP_SLOT - [15:13] */ +#define WM831X_LDO5_SLP_SLOT_WIDTH 3 /* LDO5_SLP_SLOT - [15:13] */ +#define WM831X_LDO5_SLP_MODE 0x0100 /* LDO5_SLP_MODE */ +#define WM831X_LDO5_SLP_MODE_MASK 0x0100 /* LDO5_SLP_MODE */ +#define WM831X_LDO5_SLP_MODE_SHIFT 8 /* LDO5_SLP_MODE */ +#define WM831X_LDO5_SLP_MODE_WIDTH 1 /* LDO5_SLP_MODE */ +#define WM831X_LDO5_SLP_VSEL_MASK 0x001F /* LDO5_SLP_VSEL - [4:0] */ +#define WM831X_LDO5_SLP_VSEL_SHIFT 0 /* LDO5_SLP_VSEL - [4:0] */ +#define WM831X_LDO5_SLP_VSEL_WIDTH 5 /* LDO5_SLP_VSEL - [4:0] */ + +/* + * R16503 (0x4077) - LDO6 Control + */ +#define WM831X_LDO6_ERR_ACT_MASK 0xC000 /* LDO6_ERR_ACT - [15:14] */ +#define WM831X_LDO6_ERR_ACT_SHIFT 14 /* LDO6_ERR_ACT - [15:14] */ +#define WM831X_LDO6_ERR_ACT_WIDTH 2 /* LDO6_ERR_ACT - [15:14] */ +#define WM831X_LDO6_HWC_SRC_MASK 0x1800 /* LDO6_HWC_SRC - [12:11] */ +#define WM831X_LDO6_HWC_SRC_SHIFT 11 /* LDO6_HWC_SRC - [12:11] */ +#define WM831X_LDO6_HWC_SRC_WIDTH 2 /* LDO6_HWC_SRC - [12:11] */ +#define WM831X_LDO6_HWC_VSEL 0x0400 /* LDO6_HWC_VSEL */ +#define WM831X_LDO6_HWC_VSEL_MASK 0x0400 /* LDO6_HWC_VSEL */ +#define WM831X_LDO6_HWC_VSEL_SHIFT 10 /* LDO6_HWC_VSEL */ +#define WM831X_LDO6_HWC_VSEL_WIDTH 1 /* LDO6_HWC_VSEL */ +#define WM831X_LDO6_HWC_MODE_MASK 0x0300 /* LDO6_HWC_MODE - [9:8] */ +#define WM831X_LDO6_HWC_MODE_SHIFT 8 /* LDO6_HWC_MODE - [9:8] */ +#define WM831X_LDO6_HWC_MODE_WIDTH 2 /* LDO6_HWC_MODE - [9:8] */ +#define WM831X_LDO6_FLT 0x0080 /* LDO6_FLT */ +#define WM831X_LDO6_FLT_MASK 0x0080 /* LDO6_FLT */ +#define WM831X_LDO6_FLT_SHIFT 7 /* LDO6_FLT */ +#define WM831X_LDO6_FLT_WIDTH 1 /* LDO6_FLT */ +#define WM831X_LDO6_SWI 0x0040 /* LDO6_SWI */ +#define WM831X_LDO6_SWI_MASK 0x0040 /* LDO6_SWI */ +#define WM831X_LDO6_SWI_SHIFT 6 /* LDO6_SWI */ +#define WM831X_LDO6_SWI_WIDTH 1 /* LDO6_SWI */ +#define WM831X_LDO6_LP_MODE 0x0001 /* LDO6_LP_MODE */ +#define WM831X_LDO6_LP_MODE_MASK 0x0001 /* LDO6_LP_MODE */ +#define WM831X_LDO6_LP_MODE_SHIFT 0 /* LDO6_LP_MODE */ +#define WM831X_LDO6_LP_MODE_WIDTH 1 /* LDO6_LP_MODE */ + +/* + * R16504 (0x4078) - LDO6 ON Control + */ +#define WM831X_LDO6_ON_SLOT_MASK 0xE000 /* LDO6_ON_SLOT - [15:13] */ +#define WM831X_LDO6_ON_SLOT_SHIFT 13 /* LDO6_ON_SLOT - [15:13] */ +#define WM831X_LDO6_ON_SLOT_WIDTH 3 /* LDO6_ON_SLOT - [15:13] */ +#define WM831X_LDO6_ON_MODE 0x0100 /* LDO6_ON_MODE */ +#define WM831X_LDO6_ON_MODE_MASK 0x0100 /* LDO6_ON_MODE */ +#define WM831X_LDO6_ON_MODE_SHIFT 8 /* LDO6_ON_MODE */ +#define WM831X_LDO6_ON_MODE_WIDTH 1 /* LDO6_ON_MODE */ +#define WM831X_LDO6_ON_VSEL_MASK 0x001F /* LDO6_ON_VSEL - [4:0] */ +#define WM831X_LDO6_ON_VSEL_SHIFT 0 /* LDO6_ON_VSEL - [4:0] */ +#define WM831X_LDO6_ON_VSEL_WIDTH 5 /* LDO6_ON_VSEL - [4:0] */ + +/* + * R16505 (0x4079) - LDO6 SLEEP Control + */ +#define WM831X_LDO6_SLP_SLOT_MASK 0xE000 /* LDO6_SLP_SLOT - [15:13] */ +#define WM831X_LDO6_SLP_SLOT_SHIFT 13 /* LDO6_SLP_SLOT - [15:13] */ +#define WM831X_LDO6_SLP_SLOT_WIDTH 3 /* LDO6_SLP_SLOT - [15:13] */ +#define WM831X_LDO6_SLP_MODE 0x0100 /* LDO6_SLP_MODE */ +#define WM831X_LDO6_SLP_MODE_MASK 0x0100 /* LDO6_SLP_MODE */ +#define WM831X_LDO6_SLP_MODE_SHIFT 8 /* LDO6_SLP_MODE */ +#define WM831X_LDO6_SLP_MODE_WIDTH 1 /* LDO6_SLP_MODE */ +#define WM831X_LDO6_SLP_VSEL_MASK 0x001F /* LDO6_SLP_VSEL - [4:0] */ +#define WM831X_LDO6_SLP_VSEL_SHIFT 0 /* LDO6_SLP_VSEL - [4:0] */ +#define WM831X_LDO6_SLP_VSEL_WIDTH 5 /* LDO6_SLP_VSEL - [4:0] */ + +/* + * R16506 (0x407A) - LDO7 Control + */ +#define WM831X_LDO7_ERR_ACT_MASK 0xC000 /* LDO7_ERR_ACT - [15:14] */ +#define WM831X_LDO7_ERR_ACT_SHIFT 14 /* LDO7_ERR_ACT - [15:14] */ +#define WM831X_LDO7_ERR_ACT_WIDTH 2 /* LDO7_ERR_ACT - [15:14] */ +#define WM831X_LDO7_HWC_SRC_MASK 0x1800 /* LDO7_HWC_SRC - [12:11] */ +#define WM831X_LDO7_HWC_SRC_SHIFT 11 /* LDO7_HWC_SRC - [12:11] */ +#define WM831X_LDO7_HWC_SRC_WIDTH 2 /* LDO7_HWC_SRC - [12:11] */ +#define WM831X_LDO7_HWC_VSEL 0x0400 /* LDO7_HWC_VSEL */ +#define WM831X_LDO7_HWC_VSEL_MASK 0x0400 /* LDO7_HWC_VSEL */ +#define WM831X_LDO7_HWC_VSEL_SHIFT 10 /* LDO7_HWC_VSEL */ +#define WM831X_LDO7_HWC_VSEL_WIDTH 1 /* LDO7_HWC_VSEL */ +#define WM831X_LDO7_HWC_MODE_MASK 0x0300 /* LDO7_HWC_MODE - [9:8] */ +#define WM831X_LDO7_HWC_MODE_SHIFT 8 /* LDO7_HWC_MODE - [9:8] */ +#define WM831X_LDO7_HWC_MODE_WIDTH 2 /* LDO7_HWC_MODE - [9:8] */ +#define WM831X_LDO7_FLT 0x0080 /* LDO7_FLT */ +#define WM831X_LDO7_FLT_MASK 0x0080 /* LDO7_FLT */ +#define WM831X_LDO7_FLT_SHIFT 7 /* LDO7_FLT */ +#define WM831X_LDO7_FLT_WIDTH 1 /* LDO7_FLT */ +#define WM831X_LDO7_SWI 0x0040 /* LDO7_SWI */ +#define WM831X_LDO7_SWI_MASK 0x0040 /* LDO7_SWI */ +#define WM831X_LDO7_SWI_SHIFT 6 /* LDO7_SWI */ +#define WM831X_LDO7_SWI_WIDTH 1 /* LDO7_SWI */ + +/* + * R16507 (0x407B) - LDO7 ON Control + */ +#define WM831X_LDO7_ON_SLOT_MASK 0xE000 /* LDO7_ON_SLOT - [15:13] */ +#define WM831X_LDO7_ON_SLOT_SHIFT 13 /* LDO7_ON_SLOT - [15:13] */ +#define WM831X_LDO7_ON_SLOT_WIDTH 3 /* LDO7_ON_SLOT - [15:13] */ +#define WM831X_LDO7_ON_MODE 0x0100 /* LDO7_ON_MODE */ +#define WM831X_LDO7_ON_MODE_MASK 0x0100 /* LDO7_ON_MODE */ +#define WM831X_LDO7_ON_MODE_SHIFT 8 /* LDO7_ON_MODE */ +#define WM831X_LDO7_ON_MODE_WIDTH 1 /* LDO7_ON_MODE */ +#define WM831X_LDO7_ON_VSEL_MASK 0x001F /* LDO7_ON_VSEL - [4:0] */ +#define WM831X_LDO7_ON_VSEL_SHIFT 0 /* LDO7_ON_VSEL - [4:0] */ +#define WM831X_LDO7_ON_VSEL_WIDTH 5 /* LDO7_ON_VSEL - [4:0] */ + +/* + * R16508 (0x407C) - LDO7 SLEEP Control + */ +#define WM831X_LDO7_SLP_SLOT_MASK 0xE000 /* LDO7_SLP_SLOT - [15:13] */ +#define WM831X_LDO7_SLP_SLOT_SHIFT 13 /* LDO7_SLP_SLOT - [15:13] */ +#define WM831X_LDO7_SLP_SLOT_WIDTH 3 /* LDO7_SLP_SLOT - [15:13] */ +#define WM831X_LDO7_SLP_MODE 0x0100 /* LDO7_SLP_MODE */ +#define WM831X_LDO7_SLP_MODE_MASK 0x0100 /* LDO7_SLP_MODE */ +#define WM831X_LDO7_SLP_MODE_SHIFT 8 /* LDO7_SLP_MODE */ +#define WM831X_LDO7_SLP_MODE_WIDTH 1 /* LDO7_SLP_MODE */ +#define WM831X_LDO7_SLP_VSEL_MASK 0x001F /* LDO7_SLP_VSEL - [4:0] */ +#define WM831X_LDO7_SLP_VSEL_SHIFT 0 /* LDO7_SLP_VSEL - [4:0] */ +#define WM831X_LDO7_SLP_VSEL_WIDTH 5 /* LDO7_SLP_VSEL - [4:0] */ + +/* + * R16509 (0x407D) - LDO8 Control + */ +#define WM831X_LDO8_ERR_ACT_MASK 0xC000 /* LDO8_ERR_ACT - [15:14] */ +#define WM831X_LDO8_ERR_ACT_SHIFT 14 /* LDO8_ERR_ACT - [15:14] */ +#define WM831X_LDO8_ERR_ACT_WIDTH 2 /* LDO8_ERR_ACT - [15:14] */ +#define WM831X_LDO8_HWC_SRC_MASK 0x1800 /* LDO8_HWC_SRC - [12:11] */ +#define WM831X_LDO8_HWC_SRC_SHIFT 11 /* LDO8_HWC_SRC - [12:11] */ +#define WM831X_LDO8_HWC_SRC_WIDTH 2 /* LDO8_HWC_SRC - [12:11] */ +#define WM831X_LDO8_HWC_VSEL 0x0400 /* LDO8_HWC_VSEL */ +#define WM831X_LDO8_HWC_VSEL_MASK 0x0400 /* LDO8_HWC_VSEL */ +#define WM831X_LDO8_HWC_VSEL_SHIFT 10 /* LDO8_HWC_VSEL */ +#define WM831X_LDO8_HWC_VSEL_WIDTH 1 /* LDO8_HWC_VSEL */ +#define WM831X_LDO8_HWC_MODE_MASK 0x0300 /* LDO8_HWC_MODE - [9:8] */ +#define WM831X_LDO8_HWC_MODE_SHIFT 8 /* LDO8_HWC_MODE - [9:8] */ +#define WM831X_LDO8_HWC_MODE_WIDTH 2 /* LDO8_HWC_MODE - [9:8] */ +#define WM831X_LDO8_FLT 0x0080 /* LDO8_FLT */ +#define WM831X_LDO8_FLT_MASK 0x0080 /* LDO8_FLT */ +#define WM831X_LDO8_FLT_SHIFT 7 /* LDO8_FLT */ +#define WM831X_LDO8_FLT_WIDTH 1 /* LDO8_FLT */ +#define WM831X_LDO8_SWI 0x0040 /* LDO8_SWI */ +#define WM831X_LDO8_SWI_MASK 0x0040 /* LDO8_SWI */ +#define WM831X_LDO8_SWI_SHIFT 6 /* LDO8_SWI */ +#define WM831X_LDO8_SWI_WIDTH 1 /* LDO8_SWI */ + +/* + * R16510 (0x407E) - LDO8 ON Control + */ +#define WM831X_LDO8_ON_SLOT_MASK 0xE000 /* LDO8_ON_SLOT - [15:13] */ +#define WM831X_LDO8_ON_SLOT_SHIFT 13 /* LDO8_ON_SLOT - [15:13] */ +#define WM831X_LDO8_ON_SLOT_WIDTH 3 /* LDO8_ON_SLOT - [15:13] */ +#define WM831X_LDO8_ON_MODE 0x0100 /* LDO8_ON_MODE */ +#define WM831X_LDO8_ON_MODE_MASK 0x0100 /* LDO8_ON_MODE */ +#define WM831X_LDO8_ON_MODE_SHIFT 8 /* LDO8_ON_MODE */ +#define WM831X_LDO8_ON_MODE_WIDTH 1 /* LDO8_ON_MODE */ +#define WM831X_LDO8_ON_VSEL_MASK 0x001F /* LDO8_ON_VSEL - [4:0] */ +#define WM831X_LDO8_ON_VSEL_SHIFT 0 /* LDO8_ON_VSEL - [4:0] */ +#define WM831X_LDO8_ON_VSEL_WIDTH 5 /* LDO8_ON_VSEL - [4:0] */ + +/* + * R16511 (0x407F) - LDO8 SLEEP Control + */ +#define WM831X_LDO8_SLP_SLOT_MASK 0xE000 /* LDO8_SLP_SLOT - [15:13] */ +#define WM831X_LDO8_SLP_SLOT_SHIFT 13 /* LDO8_SLP_SLOT - [15:13] */ +#define WM831X_LDO8_SLP_SLOT_WIDTH 3 /* LDO8_SLP_SLOT - [15:13] */ +#define WM831X_LDO8_SLP_MODE 0x0100 /* LDO8_SLP_MODE */ +#define WM831X_LDO8_SLP_MODE_MASK 0x0100 /* LDO8_SLP_MODE */ +#define WM831X_LDO8_SLP_MODE_SHIFT 8 /* LDO8_SLP_MODE */ +#define WM831X_LDO8_SLP_MODE_WIDTH 1 /* LDO8_SLP_MODE */ +#define WM831X_LDO8_SLP_VSEL_MASK 0x001F /* LDO8_SLP_VSEL - [4:0] */ +#define WM831X_LDO8_SLP_VSEL_SHIFT 0 /* LDO8_SLP_VSEL - [4:0] */ +#define WM831X_LDO8_SLP_VSEL_WIDTH 5 /* LDO8_SLP_VSEL - [4:0] */ + +/* + * R16512 (0x4080) - LDO9 Control + */ +#define WM831X_LDO9_ERR_ACT_MASK 0xC000 /* LDO9_ERR_ACT - [15:14] */ +#define WM831X_LDO9_ERR_ACT_SHIFT 14 /* LDO9_ERR_ACT - [15:14] */ +#define WM831X_LDO9_ERR_ACT_WIDTH 2 /* LDO9_ERR_ACT - [15:14] */ +#define WM831X_LDO9_HWC_SRC_MASK 0x1800 /* LDO9_HWC_SRC - [12:11] */ +#define WM831X_LDO9_HWC_SRC_SHIFT 11 /* LDO9_HWC_SRC - [12:11] */ +#define WM831X_LDO9_HWC_SRC_WIDTH 2 /* LDO9_HWC_SRC - [12:11] */ +#define WM831X_LDO9_HWC_VSEL 0x0400 /* LDO9_HWC_VSEL */ +#define WM831X_LDO9_HWC_VSEL_MASK 0x0400 /* LDO9_HWC_VSEL */ +#define WM831X_LDO9_HWC_VSEL_SHIFT 10 /* LDO9_HWC_VSEL */ +#define WM831X_LDO9_HWC_VSEL_WIDTH 1 /* LDO9_HWC_VSEL */ +#define WM831X_LDO9_HWC_MODE_MASK 0x0300 /* LDO9_HWC_MODE - [9:8] */ +#define WM831X_LDO9_HWC_MODE_SHIFT 8 /* LDO9_HWC_MODE - [9:8] */ +#define WM831X_LDO9_HWC_MODE_WIDTH 2 /* LDO9_HWC_MODE - [9:8] */ +#define WM831X_LDO9_FLT 0x0080 /* LDO9_FLT */ +#define WM831X_LDO9_FLT_MASK 0x0080 /* LDO9_FLT */ +#define WM831X_LDO9_FLT_SHIFT 7 /* LDO9_FLT */ +#define WM831X_LDO9_FLT_WIDTH 1 /* LDO9_FLT */ +#define WM831X_LDO9_SWI 0x0040 /* LDO9_SWI */ +#define WM831X_LDO9_SWI_MASK 0x0040 /* LDO9_SWI */ +#define WM831X_LDO9_SWI_SHIFT 6 /* LDO9_SWI */ +#define WM831X_LDO9_SWI_WIDTH 1 /* LDO9_SWI */ + +/* + * R16513 (0x4081) - LDO9 ON Control + */ +#define WM831X_LDO9_ON_SLOT_MASK 0xE000 /* LDO9_ON_SLOT - [15:13] */ +#define WM831X_LDO9_ON_SLOT_SHIFT 13 /* LDO9_ON_SLOT - [15:13] */ +#define WM831X_LDO9_ON_SLOT_WIDTH 3 /* LDO9_ON_SLOT - [15:13] */ +#define WM831X_LDO9_ON_MODE 0x0100 /* LDO9_ON_MODE */ +#define WM831X_LDO9_ON_MODE_MASK 0x0100 /* LDO9_ON_MODE */ +#define WM831X_LDO9_ON_MODE_SHIFT 8 /* LDO9_ON_MODE */ +#define WM831X_LDO9_ON_MODE_WIDTH 1 /* LDO9_ON_MODE */ +#define WM831X_LDO9_ON_VSEL_MASK 0x001F /* LDO9_ON_VSEL - [4:0] */ +#define WM831X_LDO9_ON_VSEL_SHIFT 0 /* LDO9_ON_VSEL - [4:0] */ +#define WM831X_LDO9_ON_VSEL_WIDTH 5 /* LDO9_ON_VSEL - [4:0] */ + +/* + * R16514 (0x4082) - LDO9 SLEEP Control + */ +#define WM831X_LDO9_SLP_SLOT_MASK 0xE000 /* LDO9_SLP_SLOT - [15:13] */ +#define WM831X_LDO9_SLP_SLOT_SHIFT 13 /* LDO9_SLP_SLOT - [15:13] */ +#define WM831X_LDO9_SLP_SLOT_WIDTH 3 /* LDO9_SLP_SLOT - [15:13] */ +#define WM831X_LDO9_SLP_MODE 0x0100 /* LDO9_SLP_MODE */ +#define WM831X_LDO9_SLP_MODE_MASK 0x0100 /* LDO9_SLP_MODE */ +#define WM831X_LDO9_SLP_MODE_SHIFT 8 /* LDO9_SLP_MODE */ +#define WM831X_LDO9_SLP_MODE_WIDTH 1 /* LDO9_SLP_MODE */ +#define WM831X_LDO9_SLP_VSEL_MASK 0x001F /* LDO9_SLP_VSEL - [4:0] */ +#define WM831X_LDO9_SLP_VSEL_SHIFT 0 /* LDO9_SLP_VSEL - [4:0] */ +#define WM831X_LDO9_SLP_VSEL_WIDTH 5 /* LDO9_SLP_VSEL - [4:0] */ + +/* + * R16515 (0x4083) - LDO10 Control + */ +#define WM831X_LDO10_ERR_ACT_MASK 0xC000 /* LDO10_ERR_ACT - [15:14] */ +#define WM831X_LDO10_ERR_ACT_SHIFT 14 /* LDO10_ERR_ACT - [15:14] */ +#define WM831X_LDO10_ERR_ACT_WIDTH 2 /* LDO10_ERR_ACT - [15:14] */ +#define WM831X_LDO10_HWC_SRC_MASK 0x1800 /* LDO10_HWC_SRC - [12:11] */ +#define WM831X_LDO10_HWC_SRC_SHIFT 11 /* LDO10_HWC_SRC - [12:11] */ +#define WM831X_LDO10_HWC_SRC_WIDTH 2 /* LDO10_HWC_SRC - [12:11] */ +#define WM831X_LDO10_HWC_VSEL 0x0400 /* LDO10_HWC_VSEL */ +#define WM831X_LDO10_HWC_VSEL_MASK 0x0400 /* LDO10_HWC_VSEL */ +#define WM831X_LDO10_HWC_VSEL_SHIFT 10 /* LDO10_HWC_VSEL */ +#define WM831X_LDO10_HWC_VSEL_WIDTH 1 /* LDO10_HWC_VSEL */ +#define WM831X_LDO10_HWC_MODE_MASK 0x0300 /* LDO10_HWC_MODE - [9:8] */ +#define WM831X_LDO10_HWC_MODE_SHIFT 8 /* LDO10_HWC_MODE - [9:8] */ +#define WM831X_LDO10_HWC_MODE_WIDTH 2 /* LDO10_HWC_MODE - [9:8] */ +#define WM831X_LDO10_FLT 0x0080 /* LDO10_FLT */ +#define WM831X_LDO10_FLT_MASK 0x0080 /* LDO10_FLT */ +#define WM831X_LDO10_FLT_SHIFT 7 /* LDO10_FLT */ +#define WM831X_LDO10_FLT_WIDTH 1 /* LDO10_FLT */ +#define WM831X_LDO10_SWI 0x0040 /* LDO10_SWI */ +#define WM831X_LDO10_SWI_MASK 0x0040 /* LDO10_SWI */ +#define WM831X_LDO10_SWI_SHIFT 6 /* LDO10_SWI */ +#define WM831X_LDO10_SWI_WIDTH 1 /* LDO10_SWI */ + +/* + * R16516 (0x4084) - LDO10 ON Control + */ +#define WM831X_LDO10_ON_SLOT_MASK 0xE000 /* LDO10_ON_SLOT - [15:13] */ +#define WM831X_LDO10_ON_SLOT_SHIFT 13 /* LDO10_ON_SLOT - [15:13] */ +#define WM831X_LDO10_ON_SLOT_WIDTH 3 /* LDO10_ON_SLOT - [15:13] */ +#define WM831X_LDO10_ON_MODE 0x0100 /* LDO10_ON_MODE */ +#define WM831X_LDO10_ON_MODE_MASK 0x0100 /* LDO10_ON_MODE */ +#define WM831X_LDO10_ON_MODE_SHIFT 8 /* LDO10_ON_MODE */ +#define WM831X_LDO10_ON_MODE_WIDTH 1 /* LDO10_ON_MODE */ +#define WM831X_LDO10_ON_VSEL_MASK 0x001F /* LDO10_ON_VSEL - [4:0] */ +#define WM831X_LDO10_ON_VSEL_SHIFT 0 /* LDO10_ON_VSEL - [4:0] */ +#define WM831X_LDO10_ON_VSEL_WIDTH 5 /* LDO10_ON_VSEL - [4:0] */ + +/* + * R16517 (0x4085) - LDO10 SLEEP Control + */ +#define WM831X_LDO10_SLP_SLOT_MASK 0xE000 /* LDO10_SLP_SLOT - [15:13] */ +#define WM831X_LDO10_SLP_SLOT_SHIFT 13 /* LDO10_SLP_SLOT - [15:13] */ +#define WM831X_LDO10_SLP_SLOT_WIDTH 3 /* LDO10_SLP_SLOT - [15:13] */ +#define WM831X_LDO10_SLP_MODE 0x0100 /* LDO10_SLP_MODE */ +#define WM831X_LDO10_SLP_MODE_MASK 0x0100 /* LDO10_SLP_MODE */ +#define WM831X_LDO10_SLP_MODE_SHIFT 8 /* LDO10_SLP_MODE */ +#define WM831X_LDO10_SLP_MODE_WIDTH 1 /* LDO10_SLP_MODE */ +#define WM831X_LDO10_SLP_VSEL_MASK 0x001F /* LDO10_SLP_VSEL - [4:0] */ +#define WM831X_LDO10_SLP_VSEL_SHIFT 0 /* LDO10_SLP_VSEL - [4:0] */ +#define WM831X_LDO10_SLP_VSEL_WIDTH 5 /* LDO10_SLP_VSEL - [4:0] */ + +/* + * R16519 (0x4087) - LDO11 ON Control + */ +#define WM831X_LDO11_ON_SLOT_MASK 0xE000 /* LDO11_ON_SLOT - [15:13] */ +#define WM831X_LDO11_ON_SLOT_SHIFT 13 /* LDO11_ON_SLOT - [15:13] */ +#define WM831X_LDO11_ON_SLOT_WIDTH 3 /* LDO11_ON_SLOT - [15:13] */ +#define WM831X_LDO11_OFFENA 0x1000 /* LDO11_OFFENA */ +#define WM831X_LDO11_OFFENA_MASK 0x1000 /* LDO11_OFFENA */ +#define WM831X_LDO11_OFFENA_SHIFT 12 /* LDO11_OFFENA */ +#define WM831X_LDO11_OFFENA_WIDTH 1 /* LDO11_OFFENA */ +#define WM831X_LDO11_VSEL_SRC 0x0080 /* LDO11_VSEL_SRC */ +#define WM831X_LDO11_VSEL_SRC_MASK 0x0080 /* LDO11_VSEL_SRC */ +#define WM831X_LDO11_VSEL_SRC_SHIFT 7 /* LDO11_VSEL_SRC */ +#define WM831X_LDO11_VSEL_SRC_WIDTH 1 /* LDO11_VSEL_SRC */ +#define WM831X_LDO11_ON_VSEL_MASK 0x000F /* LDO11_ON_VSEL - [3:0] */ +#define WM831X_LDO11_ON_VSEL_SHIFT 0 /* LDO11_ON_VSEL - [3:0] */ +#define WM831X_LDO11_ON_VSEL_WIDTH 4 /* LDO11_ON_VSEL - [3:0] */ + +/* + * R16520 (0x4088) - LDO11 SLEEP Control + */ +#define WM831X_LDO11_SLP_SLOT_MASK 0xE000 /* LDO11_SLP_SLOT - [15:13] */ +#define WM831X_LDO11_SLP_SLOT_SHIFT 13 /* LDO11_SLP_SLOT - [15:13] */ +#define WM831X_LDO11_SLP_SLOT_WIDTH 3 /* LDO11_SLP_SLOT - [15:13] */ +#define WM831X_LDO11_SLP_VSEL_MASK 0x000F /* LDO11_SLP_VSEL - [3:0] */ +#define WM831X_LDO11_SLP_VSEL_SHIFT 0 /* LDO11_SLP_VSEL - [3:0] */ +#define WM831X_LDO11_SLP_VSEL_WIDTH 4 /* LDO11_SLP_VSEL - [3:0] */ + +/* + * R16526 (0x408E) - Power Good Source 1 + */ +#define WM831X_DC4_OK 0x0008 /* DC4_OK */ +#define WM831X_DC4_OK_MASK 0x0008 /* DC4_OK */ +#define WM831X_DC4_OK_SHIFT 3 /* DC4_OK */ +#define WM831X_DC4_OK_WIDTH 1 /* DC4_OK */ +#define WM831X_DC3_OK 0x0004 /* DC3_OK */ +#define WM831X_DC3_OK_MASK 0x0004 /* DC3_OK */ +#define WM831X_DC3_OK_SHIFT 2 /* DC3_OK */ +#define WM831X_DC3_OK_WIDTH 1 /* DC3_OK */ +#define WM831X_DC2_OK 0x0002 /* DC2_OK */ +#define WM831X_DC2_OK_MASK 0x0002 /* DC2_OK */ +#define WM831X_DC2_OK_SHIFT 1 /* DC2_OK */ +#define WM831X_DC2_OK_WIDTH 1 /* DC2_OK */ +#define WM831X_DC1_OK 0x0001 /* DC1_OK */ +#define WM831X_DC1_OK_MASK 0x0001 /* DC1_OK */ +#define WM831X_DC1_OK_SHIFT 0 /* DC1_OK */ +#define WM831X_DC1_OK_WIDTH 1 /* DC1_OK */ + +/* + * R16527 (0x408F) - Power Good Source 2 + */ +#define WM831X_LDO10_OK 0x0200 /* LDO10_OK */ +#define WM831X_LDO10_OK_MASK 0x0200 /* LDO10_OK */ +#define WM831X_LDO10_OK_SHIFT 9 /* LDO10_OK */ +#define WM831X_LDO10_OK_WIDTH 1 /* LDO10_OK */ +#define WM831X_LDO9_OK 0x0100 /* LDO9_OK */ +#define WM831X_LDO9_OK_MASK 0x0100 /* LDO9_OK */ +#define WM831X_LDO9_OK_SHIFT 8 /* LDO9_OK */ +#define WM831X_LDO9_OK_WIDTH 1 /* LDO9_OK */ +#define WM831X_LDO8_OK 0x0080 /* LDO8_OK */ +#define WM831X_LDO8_OK_MASK 0x0080 /* LDO8_OK */ +#define WM831X_LDO8_OK_SHIFT 7 /* LDO8_OK */ +#define WM831X_LDO8_OK_WIDTH 1 /* LDO8_OK */ +#define WM831X_LDO7_OK 0x0040 /* LDO7_OK */ +#define WM831X_LDO7_OK_MASK 0x0040 /* LDO7_OK */ +#define WM831X_LDO7_OK_SHIFT 6 /* LDO7_OK */ +#define WM831X_LDO7_OK_WIDTH 1 /* LDO7_OK */ +#define WM831X_LDO6_OK 0x0020 /* LDO6_OK */ +#define WM831X_LDO6_OK_MASK 0x0020 /* LDO6_OK */ +#define WM831X_LDO6_OK_SHIFT 5 /* LDO6_OK */ +#define WM831X_LDO6_OK_WIDTH 1 /* LDO6_OK */ +#define WM831X_LDO5_OK 0x0010 /* LDO5_OK */ +#define WM831X_LDO5_OK_MASK 0x0010 /* LDO5_OK */ +#define WM831X_LDO5_OK_SHIFT 4 /* LDO5_OK */ +#define WM831X_LDO5_OK_WIDTH 1 /* LDO5_OK */ +#define WM831X_LDO4_OK 0x0008 /* LDO4_OK */ +#define WM831X_LDO4_OK_MASK 0x0008 /* LDO4_OK */ +#define WM831X_LDO4_OK_SHIFT 3 /* LDO4_OK */ +#define WM831X_LDO4_OK_WIDTH 1 /* LDO4_OK */ +#define WM831X_LDO3_OK 0x0004 /* LDO3_OK */ +#define WM831X_LDO3_OK_MASK 0x0004 /* LDO3_OK */ +#define WM831X_LDO3_OK_SHIFT 2 /* LDO3_OK */ +#define WM831X_LDO3_OK_WIDTH 1 /* LDO3_OK */ +#define WM831X_LDO2_OK 0x0002 /* LDO2_OK */ +#define WM831X_LDO2_OK_MASK 0x0002 /* LDO2_OK */ +#define WM831X_LDO2_OK_SHIFT 1 /* LDO2_OK */ +#define WM831X_LDO2_OK_WIDTH 1 /* LDO2_OK */ +#define WM831X_LDO1_OK 0x0001 /* LDO1_OK */ +#define WM831X_LDO1_OK_MASK 0x0001 /* LDO1_OK */ +#define WM831X_LDO1_OK_SHIFT 0 /* LDO1_OK */ +#define WM831X_LDO1_OK_WIDTH 1 /* LDO1_OK */ + +#define WM831X_ISINK_MAX_ISEL 55 +extern int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1]; + +#endif diff --git a/include/linux/mfd/wm831x/status.h b/include/linux/mfd/wm831x/status.h new file mode 100644 index 000000000..6bc090d0e --- /dev/null +++ b/include/linux/mfd/wm831x/status.h @@ -0,0 +1,34 @@ +/* + * include/linux/mfd/wm831x/status.h -- Status LEDs for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_STATUS_H__ +#define __MFD_WM831X_STATUS_H__ + +#define WM831X_LED_SRC_MASK 0xC000 /* LED_SRC - [15:14] */ +#define WM831X_LED_SRC_SHIFT 14 /* LED_SRC - [15:14] */ +#define WM831X_LED_SRC_WIDTH 2 /* LED_SRC - [15:14] */ +#define WM831X_LED_MODE_MASK 0x0300 /* LED_MODE - [9:8] */ +#define WM831X_LED_MODE_SHIFT 8 /* LED_MODE - [9:8] */ +#define WM831X_LED_MODE_WIDTH 2 /* LED_MODE - [9:8] */ +#define WM831X_LED_SEQ_LEN_MASK 0x0030 /* LED_SEQ_LEN - [5:4] */ +#define WM831X_LED_SEQ_LEN_SHIFT 4 /* LED_SEQ_LEN - [5:4] */ +#define WM831X_LED_SEQ_LEN_WIDTH 2 /* LED_SEQ_LEN - [5:4] */ +#define WM831X_LED_DUR_MASK 0x000C /* LED_DUR - [3:2] */ +#define WM831X_LED_DUR_SHIFT 2 /* LED_DUR - [3:2] */ +#define WM831X_LED_DUR_WIDTH 2 /* LED_DUR - [3:2] */ +#define WM831X_LED_DUTY_CYC_MASK 0x0003 /* LED_DUTY_CYC - [1:0] */ +#define WM831X_LED_DUTY_CYC_SHIFT 0 /* LED_DUTY_CYC - [1:0] */ +#define WM831X_LED_DUTY_CYC_WIDTH 2 /* LED_DUTY_CYC - [1:0] */ + +#endif diff --git a/include/linux/mfd/wm831x/watchdog.h b/include/linux/mfd/wm831x/watchdog.h new file mode 100644 index 000000000..97a99b529 --- /dev/null +++ b/include/linux/mfd/wm831x/watchdog.h @@ -0,0 +1,52 @@ +/* + * include/linux/mfd/wm831x/watchdog.h -- Watchdog for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_WATCHDOG_H__ +#define __MFD_WM831X_WATCHDOG_H__ + + +/* + * R16388 (0x4004) - Watchdog + */ +#define WM831X_WDOG_ENA 0x8000 /* WDOG_ENA */ +#define WM831X_WDOG_ENA_MASK 0x8000 /* WDOG_ENA */ +#define WM831X_WDOG_ENA_SHIFT 15 /* WDOG_ENA */ +#define WM831X_WDOG_ENA_WIDTH 1 /* WDOG_ENA */ +#define WM831X_WDOG_DEBUG 0x4000 /* WDOG_DEBUG */ +#define WM831X_WDOG_DEBUG_MASK 0x4000 /* WDOG_DEBUG */ +#define WM831X_WDOG_DEBUG_SHIFT 14 /* WDOG_DEBUG */ +#define WM831X_WDOG_DEBUG_WIDTH 1 /* WDOG_DEBUG */ +#define WM831X_WDOG_RST_SRC 0x2000 /* WDOG_RST_SRC */ +#define WM831X_WDOG_RST_SRC_MASK 0x2000 /* WDOG_RST_SRC */ +#define WM831X_WDOG_RST_SRC_SHIFT 13 /* WDOG_RST_SRC */ +#define WM831X_WDOG_RST_SRC_WIDTH 1 /* WDOG_RST_SRC */ +#define WM831X_WDOG_SLPENA 0x1000 /* WDOG_SLPENA */ +#define WM831X_WDOG_SLPENA_MASK 0x1000 /* WDOG_SLPENA */ +#define WM831X_WDOG_SLPENA_SHIFT 12 /* WDOG_SLPENA */ +#define WM831X_WDOG_SLPENA_WIDTH 1 /* WDOG_SLPENA */ +#define WM831X_WDOG_RESET 0x0800 /* WDOG_RESET */ +#define WM831X_WDOG_RESET_MASK 0x0800 /* WDOG_RESET */ +#define WM831X_WDOG_RESET_SHIFT 11 /* WDOG_RESET */ +#define WM831X_WDOG_RESET_WIDTH 1 /* WDOG_RESET */ +#define WM831X_WDOG_SECACT_MASK 0x0300 /* WDOG_SECACT - [9:8] */ +#define WM831X_WDOG_SECACT_SHIFT 8 /* WDOG_SECACT - [9:8] */ +#define WM831X_WDOG_SECACT_WIDTH 2 /* WDOG_SECACT - [9:8] */ +#define WM831X_WDOG_PRIMACT_MASK 0x0030 /* WDOG_PRIMACT - [5:4] */ +#define WM831X_WDOG_PRIMACT_SHIFT 4 /* WDOG_PRIMACT - [5:4] */ +#define WM831X_WDOG_PRIMACT_WIDTH 2 /* WDOG_PRIMACT - [5:4] */ +#define WM831X_WDOG_TO_MASK 0x0007 /* WDOG_TO - [2:0] */ +#define WM831X_WDOG_TO_SHIFT 0 /* WDOG_TO - [2:0] */ +#define WM831X_WDOG_TO_WIDTH 3 /* WDOG_TO - [2:0] */ + +#endif diff --git a/include/linux/mfd/wm8350/audio.h b/include/linux/mfd/wm8350/audio.h new file mode 100644 index 000000000..0bc41c4c0 --- /dev/null +++ b/include/linux/mfd/wm8350/audio.h @@ -0,0 +1,625 @@ +/* + * audio.h -- Audio Driver for Wolfson WM8350 PMIC + * + * Copyright 2007, 2008 Wolfson Microelectronics PLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MFD_WM8350_AUDIO_H_ +#define __LINUX_MFD_WM8350_AUDIO_H_ + +#include + +#define WM8350_CLOCK_CONTROL_1 0x28 +#define WM8350_CLOCK_CONTROL_2 0x29 +#define WM8350_FLL_CONTROL_1 0x2A +#define WM8350_FLL_CONTROL_2 0x2B +#define WM8350_FLL_CONTROL_3 0x2C +#define WM8350_FLL_CONTROL_4 0x2D +#define WM8350_DAC_CONTROL 0x30 +#define WM8350_DAC_DIGITAL_VOLUME_L 0x32 +#define WM8350_DAC_DIGITAL_VOLUME_R 0x33 +#define WM8350_DAC_LR_RATE 0x35 +#define WM8350_DAC_CLOCK_CONTROL 0x36 +#define WM8350_DAC_MUTE 0x3A +#define WM8350_DAC_MUTE_VOLUME 0x3B +#define WM8350_DAC_SIDE 0x3C +#define WM8350_ADC_CONTROL 0x40 +#define WM8350_ADC_DIGITAL_VOLUME_L 0x42 +#define WM8350_ADC_DIGITAL_VOLUME_R 0x43 +#define WM8350_ADC_DIVIDER 0x44 +#define WM8350_ADC_LR_RATE 0x46 +#define WM8350_INPUT_CONTROL 0x48 +#define WM8350_IN3_INPUT_CONTROL 0x49 +#define WM8350_MIC_BIAS_CONTROL 0x4A +#define WM8350_OUTPUT_CONTROL 0x4C +#define WM8350_JACK_DETECT 0x4D +#define WM8350_ANTI_POP_CONTROL 0x4E +#define WM8350_LEFT_INPUT_VOLUME 0x50 +#define WM8350_RIGHT_INPUT_VOLUME 0x51 +#define WM8350_LEFT_MIXER_CONTROL 0x58 +#define WM8350_RIGHT_MIXER_CONTROL 0x59 +#define WM8350_OUT3_MIXER_CONTROL 0x5C +#define WM8350_OUT4_MIXER_CONTROL 0x5D +#define WM8350_OUTPUT_LEFT_MIXER_VOLUME 0x60 +#define WM8350_OUTPUT_RIGHT_MIXER_VOLUME 0x61 +#define WM8350_INPUT_MIXER_VOLUME_L 0x62 +#define WM8350_INPUT_MIXER_VOLUME_R 0x63 +#define WM8350_INPUT_MIXER_VOLUME 0x64 +#define WM8350_LOUT1_VOLUME 0x68 +#define WM8350_ROUT1_VOLUME 0x69 +#define WM8350_LOUT2_VOLUME 0x6A +#define WM8350_ROUT2_VOLUME 0x6B +#define WM8350_BEEP_VOLUME 0x6F +#define WM8350_AI_FORMATING 0x70 +#define WM8350_ADC_DAC_COMP 0x71 +#define WM8350_AI_ADC_CONTROL 0x72 +#define WM8350_AI_DAC_CONTROL 0x73 +#define WM8350_AIF_TEST 0x74 +#define WM8350_JACK_PIN_STATUS 0xE7 + +/* Bit values for R08 (0x08) */ +#define WM8350_CODEC_ISEL_1_5 0 /* x1.5 */ +#define WM8350_CODEC_ISEL_1_0 1 /* x1.0 */ +#define WM8350_CODEC_ISEL_0_75 2 /* x0.75 */ +#define WM8350_CODEC_ISEL_0_5 3 /* x0.5 */ + +#define WM8350_VMID_OFF 0 +#define WM8350_VMID_300K 1 +#define WM8350_VMID_50K 2 +#define WM8350_VMID_5K 3 + +/* + * R40 (0x28) - Clock Control 1 + */ +#define WM8350_TOCLK_RATE 0x4000 +#define WM8350_MCLK_SEL 0x0800 +#define WM8350_MCLK_DIV_MASK 0x0100 +#define WM8350_BCLK_DIV_MASK 0x00F0 +#define WM8350_OPCLK_DIV_MASK 0x0007 + +/* + * R41 (0x29) - Clock Control 2 + */ +#define WM8350_LRC_ADC_SEL 0x8000 +#define WM8350_MCLK_DIR 0x0001 + +/* + * R42 (0x2A) - FLL Control 1 + */ +#define WM8350_FLL_DITHER_WIDTH_MASK 0x3000 +#define WM8350_FLL_DITHER_HP 0x0800 +#define WM8350_FLL_OUTDIV_MASK 0x0700 +#define WM8350_FLL_RSP_RATE_MASK 0x00F0 +#define WM8350_FLL_RATE_MASK 0x0007 + +/* + * R43 (0x2B) - FLL Control 2 + */ +#define WM8350_FLL_RATIO_MASK 0xF800 +#define WM8350_FLL_N_MASK 0x03FF + +/* + * R44 (0x2C) - FLL Control 3 + */ +#define WM8350_FLL_K_MASK 0xFFFF + +/* + * R45 (0x2D) - FLL Control 4 + */ +#define WM8350_FLL_FRAC 0x0020 +#define WM8350_FLL_SLOW_LOCK_REF 0x0010 +#define WM8350_FLL_CLK_SRC_MASK 0x0003 + +/* + * R48 (0x30) - DAC Control + */ +#define WM8350_DAC_MONO 0x2000 +#define WM8350_AIF_LRCLKRATE 0x1000 +#define WM8350_DEEMP_MASK 0x0030 +#define WM8350_DACL_DATINV 0x0002 +#define WM8350_DACR_DATINV 0x0001 + +/* + * R50 (0x32) - DAC Digital Volume L + */ +#define WM8350_DAC_VU 0x0100 +#define WM8350_DACL_VOL_MASK 0x00FF + +/* + * R51 (0x33) - DAC Digital Volume R + */ +#define WM8350_DAC_VU 0x0100 +#define WM8350_DACR_VOL_MASK 0x00FF + +/* + * R53 (0x35) - DAC LR Rate + */ +#define WM8350_DACLRC_ENA 0x0800 +#define WM8350_DACLRC_RATE_MASK 0x07FF + +/* + * R54 (0x36) - DAC Clock Control + */ +#define WM8350_DACCLK_POL 0x0010 +#define WM8350_DAC_CLKDIV_MASK 0x0007 + +/* + * R58 (0x3A) - DAC Mute + */ +#define WM8350_DAC_MUTE_ENA 0x4000 + +/* + * R59 (0x3B) - DAC Mute Volume + */ +#define WM8350_DAC_MUTEMODE 0x4000 +#define WM8350_DAC_MUTERATE 0x2000 +#define WM8350_DAC_SB_FILT 0x1000 + +/* + * R60 (0x3C) - DAC Side + */ +#define WM8350_ADC_TO_DACL_MASK 0x3000 +#define WM8350_ADC_TO_DACR_MASK 0x0C00 + +/* + * R64 (0x40) - ADC Control + */ +#define WM8350_ADC_HPF_CUT_MASK 0x0300 +#define WM8350_ADCL_DATINV 0x0002 +#define WM8350_ADCR_DATINV 0x0001 + +/* + * R66 (0x42) - ADC Digital Volume L + */ +#define WM8350_ADC_VU 0x0100 +#define WM8350_ADCL_VOL_MASK 0x00FF + +/* + * R67 (0x43) - ADC Digital Volume R + */ +#define WM8350_ADC_VU 0x0100 +#define WM8350_ADCR_VOL_MASK 0x00FF + +/* + * R68 (0x44) - ADC Divider + */ +#define WM8350_ADCL_DAC_SVOL_MASK 0x0F00 +#define WM8350_ADCR_DAC_SVOL_MASK 0x00F0 +#define WM8350_ADCCLK_POL 0x0008 +#define WM8350_ADC_CLKDIV_MASK 0x0007 + +/* + * R70 (0x46) - ADC LR Rate + */ +#define WM8350_ADCLRC_ENA 0x0800 +#define WM8350_ADCLRC_RATE_MASK 0x07FF + +/* + * R72 (0x48) - Input Control + */ +#define WM8350_IN2R_ENA 0x0400 +#define WM8350_IN1RN_ENA 0x0200 +#define WM8350_IN1RP_ENA 0x0100 +#define WM8350_IN2L_ENA 0x0004 +#define WM8350_IN1LN_ENA 0x0002 +#define WM8350_IN1LP_ENA 0x0001 + +/* + * R73 (0x49) - IN3 Input Control + */ +#define WM8350_IN3R_SHORT 0x4000 +#define WM8350_IN3L_SHORT 0x0040 + +/* + * R74 (0x4A) - Mic Bias Control + */ +#define WM8350_MICBSEL 0x4000 +#define WM8350_MCDTHR_MASK 0x001C +#define WM8350_MCDSCTHR_MASK 0x0003 + +/* + * R76 (0x4C) - Output Control + */ +#define WM8350_OUT4_VROI 0x0800 +#define WM8350_OUT3_VROI 0x0400 +#define WM8350_OUT2_VROI 0x0200 +#define WM8350_OUT1_VROI 0x0100 +#define WM8350_OUT2_FB 0x0004 +#define WM8350_OUT1_FB 0x0001 + +/* + * R77 (0x4D) - Jack Detect + */ +#define WM8350_JDL_ENA 0x8000 +#define WM8350_JDR_ENA 0x4000 + +/* + * R78 (0x4E) - Anti Pop Control + */ +#define WM8350_ANTI_POP_MASK 0x0300 +#define WM8350_DIS_OP_LN4_MASK 0x00C0 +#define WM8350_DIS_OP_LN3_MASK 0x0030 +#define WM8350_DIS_OP_OUT2_MASK 0x000C +#define WM8350_DIS_OP_OUT1_MASK 0x0003 + +/* + * R80 (0x50) - Left Input Volume + */ +#define WM8350_INL_MUTE 0x4000 +#define WM8350_INL_ZC 0x2000 +#define WM8350_IN_VU 0x0100 +#define WM8350_INL_VOL_MASK 0x00FC + +/* + * R81 (0x51) - Right Input Volume + */ +#define WM8350_INR_MUTE 0x4000 +#define WM8350_INR_ZC 0x2000 +#define WM8350_IN_VU 0x0100 +#define WM8350_INR_VOL_MASK 0x00FC + +/* + * R88 (0x58) - Left Mixer Control + */ +#define WM8350_DACR_TO_MIXOUTL 0x1000 +#define WM8350_DACL_TO_MIXOUTL 0x0800 +#define WM8350_IN3L_TO_MIXOUTL 0x0004 +#define WM8350_INR_TO_MIXOUTL 0x0002 +#define WM8350_INL_TO_MIXOUTL 0x0001 + +/* + * R89 (0x59) - Right Mixer Control + */ +#define WM8350_DACR_TO_MIXOUTR 0x1000 +#define WM8350_DACL_TO_MIXOUTR 0x0800 +#define WM8350_IN3R_TO_MIXOUTR 0x0008 +#define WM8350_INR_TO_MIXOUTR 0x0002 +#define WM8350_INL_TO_MIXOUTR 0x0001 + +/* + * R92 (0x5C) - OUT3 Mixer Control + */ +#define WM8350_DACL_TO_OUT3 0x0800 +#define WM8350_MIXINL_TO_OUT3 0x0100 +#define WM8350_OUT4_TO_OUT3 0x0008 +#define WM8350_MIXOUTL_TO_OUT3 0x0001 + +/* + * R93 (0x5D) - OUT4 Mixer Control + */ +#define WM8350_DACR_TO_OUT4 0x1000 +#define WM8350_DACL_TO_OUT4 0x0800 +#define WM8350_OUT4_ATTN 0x0400 +#define WM8350_MIXINR_TO_OUT4 0x0200 +#define WM8350_OUT3_TO_OUT4 0x0004 +#define WM8350_MIXOUTR_TO_OUT4 0x0002 +#define WM8350_MIXOUTL_TO_OUT4 0x0001 + +/* + * R96 (0x60) - Output Left Mixer Volume + */ +#define WM8350_IN3L_MIXOUTL_VOL_MASK 0x0E00 +#define WM8350_IN3L_MIXOUTL_VOL_SHIFT 9 +#define WM8350_INR_MIXOUTL_VOL_MASK 0x00E0 +#define WM8350_INR_MIXOUTL_VOL_SHIFT 5 +#define WM8350_INL_MIXOUTL_VOL_MASK 0x000E +#define WM8350_INL_MIXOUTL_VOL_SHIFT 1 + +/* Bit values for R96 (0x60) */ +#define WM8350_IN3L_MIXOUTL_VOL_OFF 0 +#define WM8350_IN3L_MIXOUTL_VOL_M12DB 1 +#define WM8350_IN3L_MIXOUTL_VOL_M9DB 2 +#define WM8350_IN3L_MIXOUTL_VOL_M6DB 3 +#define WM8350_IN3L_MIXOUTL_VOL_M3DB 4 +#define WM8350_IN3L_MIXOUTL_VOL_0DB 5 +#define WM8350_IN3L_MIXOUTL_VOL_3DB 6 +#define WM8350_IN3L_MIXOUTL_VOL_6DB 7 + +#define WM8350_INR_MIXOUTL_VOL_OFF 0 +#define WM8350_INR_MIXOUTL_VOL_M12DB 1 +#define WM8350_INR_MIXOUTL_VOL_M9DB 2 +#define WM8350_INR_MIXOUTL_VOL_M6DB 3 +#define WM8350_INR_MIXOUTL_VOL_M3DB 4 +#define WM8350_INR_MIXOUTL_VOL_0DB 5 +#define WM8350_INR_MIXOUTL_VOL_3DB 6 +#define WM8350_INR_MIXOUTL_VOL_6DB 7 + +#define WM8350_INL_MIXOUTL_VOL_OFF 0 +#define WM8350_INL_MIXOUTL_VOL_M12DB 1 +#define WM8350_INL_MIXOUTL_VOL_M9DB 2 +#define WM8350_INL_MIXOUTL_VOL_M6DB 3 +#define WM8350_INL_MIXOUTL_VOL_M3DB 4 +#define WM8350_INL_MIXOUTL_VOL_0DB 5 +#define WM8350_INL_MIXOUTL_VOL_3DB 6 +#define WM8350_INL_MIXOUTL_VOL_6DB 7 + +/* + * R97 (0x61) - Output Right Mixer Volume + */ +#define WM8350_IN3R_MIXOUTR_VOL_MASK 0xE000 +#define WM8350_IN3R_MIXOUTR_VOL_SHIFT 13 +#define WM8350_INR_MIXOUTR_VOL_MASK 0x00E0 +#define WM8350_INR_MIXOUTR_VOL_SHIFT 5 +#define WM8350_INL_MIXOUTR_VOL_MASK 0x000E +#define WM8350_INL_MIXOUTR_VOL_SHIFT 1 + +/* Bit values for R96 (0x60) */ +#define WM8350_IN3R_MIXOUTR_VOL_OFF 0 +#define WM8350_IN3R_MIXOUTR_VOL_M12DB 1 +#define WM8350_IN3R_MIXOUTR_VOL_M9DB 2 +#define WM8350_IN3R_MIXOUTR_VOL_M6DB 3 +#define WM8350_IN3R_MIXOUTR_VOL_M3DB 4 +#define WM8350_IN3R_MIXOUTR_VOL_0DB 5 +#define WM8350_IN3R_MIXOUTR_VOL_3DB 6 +#define WM8350_IN3R_MIXOUTR_VOL_6DB 7 + +#define WM8350_INR_MIXOUTR_VOL_OFF 0 +#define WM8350_INR_MIXOUTR_VOL_M12DB 1 +#define WM8350_INR_MIXOUTR_VOL_M9DB 2 +#define WM8350_INR_MIXOUTR_VOL_M6DB 3 +#define WM8350_INR_MIXOUTR_VOL_M3DB 4 +#define WM8350_INR_MIXOUTR_VOL_0DB 5 +#define WM8350_INR_MIXOUTR_VOL_3DB 6 +#define WM8350_INR_MIXOUTR_VOL_6DB 7 + +#define WM8350_INL_MIXOUTR_VOL_OFF 0 +#define WM8350_INL_MIXOUTR_VOL_M12DB 1 +#define WM8350_INL_MIXOUTR_VOL_M9DB 2 +#define WM8350_INL_MIXOUTR_VOL_M6DB 3 +#define WM8350_INL_MIXOUTR_VOL_M3DB 4 +#define WM8350_INL_MIXOUTR_VOL_0DB 5 +#define WM8350_INL_MIXOUTR_VOL_3DB 6 +#define WM8350_INL_MIXOUTR_VOL_6DB 7 + +/* + * R98 (0x62) - Input Mixer Volume L + */ +#define WM8350_IN3L_MIXINL_VOL_MASK 0x0E00 +#define WM8350_IN2L_MIXINL_VOL_MASK 0x000E +#define WM8350_INL_MIXINL_VOL 0x0001 + +/* + * R99 (0x63) - Input Mixer Volume R + */ +#define WM8350_IN3R_MIXINR_VOL_MASK 0xE000 +#define WM8350_IN2R_MIXINR_VOL_MASK 0x00E0 +#define WM8350_INR_MIXINR_VOL 0x0001 + +/* + * R100 (0x64) - Input Mixer Volume + */ +#define WM8350_OUT4_MIXIN_DST 0x8000 +#define WM8350_OUT4_MIXIN_VOL_MASK 0x000E + +/* + * R104 (0x68) - LOUT1 Volume + */ +#define WM8350_OUT1L_MUTE 0x4000 +#define WM8350_OUT1L_ZC 0x2000 +#define WM8350_OUT1_VU 0x0100 +#define WM8350_OUT1L_VOL_MASK 0x00FC +#define WM8350_OUT1L_VOL_SHIFT 2 + +/* + * R105 (0x69) - ROUT1 Volume + */ +#define WM8350_OUT1R_MUTE 0x4000 +#define WM8350_OUT1R_ZC 0x2000 +#define WM8350_OUT1_VU 0x0100 +#define WM8350_OUT1R_VOL_MASK 0x00FC +#define WM8350_OUT1R_VOL_SHIFT 2 + +/* + * R106 (0x6A) - LOUT2 Volume + */ +#define WM8350_OUT2L_MUTE 0x4000 +#define WM8350_OUT2L_ZC 0x2000 +#define WM8350_OUT2_VU 0x0100 +#define WM8350_OUT2L_VOL_MASK 0x00FC + +/* + * R107 (0x6B) - ROUT2 Volume + */ +#define WM8350_OUT2R_MUTE 0x4000 +#define WM8350_OUT2R_ZC 0x2000 +#define WM8350_OUT2R_INV 0x0400 +#define WM8350_OUT2R_INV_MUTE 0x0200 +#define WM8350_OUT2_VU 0x0100 +#define WM8350_OUT2R_VOL_MASK 0x00FC + +/* + * R111 (0x6F) - BEEP Volume + */ +#define WM8350_IN3R_OUT2R_VOL_MASK 0x00E0 + +/* + * R112 (0x70) - AI Formating + */ +#define WM8350_AIF_BCLK_INV 0x8000 +#define WM8350_AIF_TRI 0x2000 +#define WM8350_AIF_LRCLK_INV 0x1000 +#define WM8350_AIF_WL_MASK 0x0C00 +#define WM8350_AIF_FMT_MASK 0x0300 + +/* + * R113 (0x71) - ADC DAC COMP + */ +#define WM8350_DAC_COMP 0x0080 +#define WM8350_DAC_COMPMODE 0x0040 +#define WM8350_ADC_COMP 0x0020 +#define WM8350_ADC_COMPMODE 0x0010 +#define WM8350_LOOPBACK 0x0001 + +/* + * R114 (0x72) - AI ADC Control + */ +#define WM8350_AIFADC_PD 0x0080 +#define WM8350_AIFADCL_SRC 0x0040 +#define WM8350_AIFADCR_SRC 0x0020 +#define WM8350_AIFADC_TDM_CHAN 0x0010 +#define WM8350_AIFADC_TDM 0x0008 + +/* + * R115 (0x73) - AI DAC Control + */ +#define WM8350_BCLK_MSTR 0x4000 +#define WM8350_AIFDAC_PD 0x0080 +#define WM8350_DACL_SRC 0x0040 +#define WM8350_DACR_SRC 0x0020 +#define WM8350_AIFDAC_TDM_CHAN 0x0010 +#define WM8350_AIFDAC_TDM 0x0008 +#define WM8350_DAC_BOOST_MASK 0x0003 + +/* + * R116 (0x74) - AIF Test + */ +#define WM8350_CODEC_BYP 0x4000 +#define WM8350_AIFADC_WR_TST 0x2000 +#define WM8350_AIFADC_RD_TST 0x1000 +#define WM8350_AIFDAC_WR_TST 0x0800 +#define WM8350_AIFDAC_RD_TST 0x0400 +#define WM8350_AIFADC_ASYN 0x0020 +#define WM8350_AIFDAC_ASYN 0x0010 + +/* + * R231 (0xE7) - Jack Status + */ +#define WM8350_JACK_L_LVL 0x0800 +#define WM8350_JACK_R_LVL 0x0400 +#define WM8350_JACK_MICSCD_LVL 0x0200 +#define WM8350_JACK_MICSD_LVL 0x0100 + +/* + * WM8350 Platform setup + */ +#define WM8350_S_CURVE_NONE 0x0 +#define WM8350_S_CURVE_FAST 0x1 +#define WM8350_S_CURVE_MEDIUM 0x2 +#define WM8350_S_CURVE_SLOW 0x3 + +#define WM8350_DISCHARGE_OFF 0x0 +#define WM8350_DISCHARGE_FAST 0x1 +#define WM8350_DISCHARGE_MEDIUM 0x2 +#define WM8350_DISCHARGE_SLOW 0x3 + +#define WM8350_TIE_OFF_500R 0x0 +#define WM8350_TIE_OFF_30K 0x1 + +/* + * Clock sources & directions + */ +#define WM8350_SYSCLK 0 + +#define WM8350_MCLK_SEL_PLL_MCLK 0 +#define WM8350_MCLK_SEL_PLL_DAC 1 +#define WM8350_MCLK_SEL_PLL_ADC 2 +#define WM8350_MCLK_SEL_PLL_32K 3 +#define WM8350_MCLK_SEL_MCLK 5 + +/* clock divider id's */ +#define WM8350_ADC_CLKDIV 0 +#define WM8350_DAC_CLKDIV 1 +#define WM8350_BCLK_CLKDIV 2 +#define WM8350_OPCLK_CLKDIV 3 +#define WM8350_TO_CLKDIV 4 +#define WM8350_SYS_CLKDIV 5 +#define WM8350_DACLR_CLKDIV 6 +#define WM8350_ADCLR_CLKDIV 7 + +/* ADC clock dividers */ +#define WM8350_ADCDIV_1 0x0 +#define WM8350_ADCDIV_1_5 0x1 +#define WM8350_ADCDIV_2 0x2 +#define WM8350_ADCDIV_3 0x3 +#define WM8350_ADCDIV_4 0x4 +#define WM8350_ADCDIV_5_5 0x5 +#define WM8350_ADCDIV_6 0x6 + +/* ADC clock dividers */ +#define WM8350_DACDIV_1 0x0 +#define WM8350_DACDIV_1_5 0x1 +#define WM8350_DACDIV_2 0x2 +#define WM8350_DACDIV_3 0x3 +#define WM8350_DACDIV_4 0x4 +#define WM8350_DACDIV_5_5 0x5 +#define WM8350_DACDIV_6 0x6 + +/* BCLK clock dividers */ +#define WM8350_BCLK_DIV_1 (0x0 << 4) +#define WM8350_BCLK_DIV_1_5 (0x1 << 4) +#define WM8350_BCLK_DIV_2 (0x2 << 4) +#define WM8350_BCLK_DIV_3 (0x3 << 4) +#define WM8350_BCLK_DIV_4 (0x4 << 4) +#define WM8350_BCLK_DIV_5_5 (0x5 << 4) +#define WM8350_BCLK_DIV_6 (0x6 << 4) +#define WM8350_BCLK_DIV_8 (0x7 << 4) +#define WM8350_BCLK_DIV_11 (0x8 << 4) +#define WM8350_BCLK_DIV_12 (0x9 << 4) +#define WM8350_BCLK_DIV_16 (0xa << 4) +#define WM8350_BCLK_DIV_22 (0xb << 4) +#define WM8350_BCLK_DIV_24 (0xc << 4) +#define WM8350_BCLK_DIV_32 (0xd << 4) +#define WM8350_BCLK_DIV_44 (0xe << 4) +#define WM8350_BCLK_DIV_48 (0xf << 4) + +/* Sys (MCLK) clock dividers */ +#define WM8350_MCLK_DIV_1 (0x0 << 8) +#define WM8350_MCLK_DIV_2 (0x1 << 8) + +/* OP clock dividers */ +#define WM8350_OPCLK_DIV_1 0x0 +#define WM8350_OPCLK_DIV_2 0x1 +#define WM8350_OPCLK_DIV_3 0x2 +#define WM8350_OPCLK_DIV_4 0x3 +#define WM8350_OPCLK_DIV_5_5 0x4 +#define WM8350_OPCLK_DIV_6 0x5 + +/* DAI ID */ +#define WM8350_HIFI_DAI 0 + +/* + * Audio interrupts. + */ +#define WM8350_IRQ_CODEC_JCK_DET_L 39 +#define WM8350_IRQ_CODEC_JCK_DET_R 40 +#define WM8350_IRQ_CODEC_MICSCD 41 +#define WM8350_IRQ_CODEC_MICD 42 + +/* + * WM8350 Platform data. + * + * This must be initialised per platform for best audio performance. + * Please see WM8350 datasheet for information. + */ +struct wm8350_audio_platform_data { + int vmid_discharge_msecs; /* VMID --> OFF discharge time */ + int drain_msecs; /* OFF drain time */ + int cap_discharge_msecs; /* Cap ON (from OFF) discharge time */ + int vmid_charge_msecs; /* vmid power up time */ + u32 vmid_s_curve:2; /* vmid enable s curve speed */ + u32 dis_out4:2; /* out4 discharge speed */ + u32 dis_out3:2; /* out3 discharge speed */ + u32 dis_out2:2; /* out2 discharge speed */ + u32 dis_out1:2; /* out1 discharge speed */ + u32 vroi_out4:1; /* out4 tie off */ + u32 vroi_out3:1; /* out3 tie off */ + u32 vroi_out2:1; /* out2 tie off */ + u32 vroi_out1:1; /* out1 tie off */ + u32 vroi_enable:1; /* enable tie off */ + u32 codec_current_on:2; /* current level ON */ + u32 codec_current_standby:2; /* current level STANDBY */ + u32 codec_current_charge:2; /* codec current @ vmid charge */ +}; + +struct wm8350_codec { + struct platform_device *pdev; + struct wm8350_audio_platform_data *platform_data; +}; + +#endif diff --git a/include/linux/mfd/wm8350/comparator.h b/include/linux/mfd/wm8350/comparator.h new file mode 100644 index 000000000..54bc5d0fd --- /dev/null +++ b/include/linux/mfd/wm8350/comparator.h @@ -0,0 +1,175 @@ +/* + * comparator.h -- Comparator Aux ADC for Wolfson WM8350 PMIC + * + * Copyright 2007 Wolfson Microelectronics PLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __LINUX_MFD_WM8350_COMPARATOR_H_ +#define __LINUX_MFD_WM8350_COMPARATOR_H_ + +/* + * Registers + */ + +#define WM8350_DIGITISER_CONTROL_1 0x90 +#define WM8350_DIGITISER_CONTROL_2 0x91 +#define WM8350_AUX1_READBACK 0x98 +#define WM8350_AUX2_READBACK 0x99 +#define WM8350_AUX3_READBACK 0x9A +#define WM8350_AUX4_READBACK 0x9B +#define WM8350_CHIP_TEMP_READBACK 0x9F +#define WM8350_GENERIC_COMPARATOR_CONTROL 0xA3 +#define WM8350_GENERIC_COMPARATOR_1 0xA4 +#define WM8350_GENERIC_COMPARATOR_2 0xA5 +#define WM8350_GENERIC_COMPARATOR_3 0xA6 +#define WM8350_GENERIC_COMPARATOR_4 0xA7 + +/* + * R144 (0x90) - Digitiser Control (1) + */ +#define WM8350_AUXADC_CTC 0x4000 +#define WM8350_AUXADC_POLL 0x2000 +#define WM8350_AUXADC_HIB_MODE 0x1000 +#define WM8350_AUXADC_SEL8 0x0080 +#define WM8350_AUXADC_SEL7 0x0040 +#define WM8350_AUXADC_SEL6 0x0020 +#define WM8350_AUXADC_SEL5 0x0010 +#define WM8350_AUXADC_SEL4 0x0008 +#define WM8350_AUXADC_SEL3 0x0004 +#define WM8350_AUXADC_SEL2 0x0002 +#define WM8350_AUXADC_SEL1 0x0001 + +/* + * R145 (0x91) - Digitiser Control (2) + */ +#define WM8350_AUXADC_MASKMODE_MASK 0x3000 +#define WM8350_AUXADC_CRATE_MASK 0x0700 +#define WM8350_AUXADC_CAL 0x0004 +#define WM8350_AUX_RBMODE 0x0002 +#define WM8350_AUXADC_WAIT 0x0001 + +/* + * R152 (0x98) - AUX1 Readback + */ +#define WM8350_AUXADC_SCALE1_MASK 0x6000 +#define WM8350_AUXADC_REF1 0x1000 +#define WM8350_AUXADC_DATA1_MASK 0x0FFF + +/* + * R153 (0x99) - AUX2 Readback + */ +#define WM8350_AUXADC_SCALE2_MASK 0x6000 +#define WM8350_AUXADC_REF2 0x1000 +#define WM8350_AUXADC_DATA2_MASK 0x0FFF + +/* + * R154 (0x9A) - AUX3 Readback + */ +#define WM8350_AUXADC_SCALE3_MASK 0x6000 +#define WM8350_AUXADC_REF3 0x1000 +#define WM8350_AUXADC_DATA3_MASK 0x0FFF + +/* + * R155 (0x9B) - AUX4 Readback + */ +#define WM8350_AUXADC_SCALE4_MASK 0x6000 +#define WM8350_AUXADC_REF4 0x1000 +#define WM8350_AUXADC_DATA4_MASK 0x0FFF + +/* + * R156 (0x9C) - USB Voltage Readback + */ +#define WM8350_AUXADC_DATA_USB_MASK 0x0FFF + +/* + * R157 (0x9D) - LINE Voltage Readback + */ +#define WM8350_AUXADC_DATA_LINE_MASK 0x0FFF + +/* + * R158 (0x9E) - BATT Voltage Readback + */ +#define WM8350_AUXADC_DATA_BATT_MASK 0x0FFF + +/* + * R159 (0x9F) - Chip Temp Readback + */ +#define WM8350_AUXADC_DATA_CHIPTEMP_MASK 0x0FFF + +/* + * R163 (0xA3) - Generic Comparator Control + */ +#define WM8350_DCMP4_ENA 0x0008 +#define WM8350_DCMP3_ENA 0x0004 +#define WM8350_DCMP2_ENA 0x0002 +#define WM8350_DCMP1_ENA 0x0001 + +/* + * R164 (0xA4) - Generic comparator 1 + */ +#define WM8350_DCMP1_SRCSEL_MASK 0xE000 +#define WM8350_DCMP1_GT 0x1000 +#define WM8350_DCMP1_THR_MASK 0x0FFF + +/* + * R165 (0xA5) - Generic comparator 2 + */ +#define WM8350_DCMP2_SRCSEL_MASK 0xE000 +#define WM8350_DCMP2_GT 0x1000 +#define WM8350_DCMP2_THR_MASK 0x0FFF + +/* + * R166 (0xA6) - Generic comparator 3 + */ +#define WM8350_DCMP3_SRCSEL_MASK 0xE000 +#define WM8350_DCMP3_GT 0x1000 +#define WM8350_DCMP3_THR_MASK 0x0FFF + +/* + * R167 (0xA7) - Generic comparator 4 + */ +#define WM8350_DCMP4_SRCSEL_MASK 0xE000 +#define WM8350_DCMP4_GT 0x1000 +#define WM8350_DCMP4_THR_MASK 0x0FFF + +/* + * Interrupts. + */ +#define WM8350_IRQ_AUXADC_DATARDY 16 +#define WM8350_IRQ_AUXADC_DCOMP4 17 +#define WM8350_IRQ_AUXADC_DCOMP3 18 +#define WM8350_IRQ_AUXADC_DCOMP2 19 +#define WM8350_IRQ_AUXADC_DCOMP1 20 +#define WM8350_IRQ_SYS_HYST_COMP_FAIL 21 +#define WM8350_IRQ_SYS_CHIP_GT115 22 +#define WM8350_IRQ_SYS_CHIP_GT140 23 + +/* + * USB/2, LINE & BATT = ((VRTC * 2) / 4095)) * 10e6 uV + * Where VRTC = 2.7 V + */ +#define WM8350_AUX_COEFF 1319 + +#define WM8350_AUXADC_AUX1 0 +#define WM8350_AUXADC_AUX2 1 +#define WM8350_AUXADC_AUX3 2 +#define WM8350_AUXADC_AUX4 3 +#define WM8350_AUXADC_USB 4 +#define WM8350_AUXADC_LINE 5 +#define WM8350_AUXADC_BATT 6 +#define WM8350_AUXADC_TEMP 7 + +struct wm8350; + +/* + * AUX ADC Readback + */ +int wm8350_read_auxadc(struct wm8350 *wm8350, int channel, int scale, + int vref); + +#endif diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h new file mode 100644 index 000000000..509481d9c --- /dev/null +++ b/include/linux/mfd/wm8350/core.h @@ -0,0 +1,694 @@ +/* + * core.h -- Core Driver for Wolfson WM8350 PMIC + * + * Copyright 2007 Wolfson Microelectronics PLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MFD_WM8350_CORE_H_ +#define __LINUX_MFD_WM8350_CORE_H_ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* + * Register values. + */ +#define WM8350_RESET_ID 0x00 +#define WM8350_ID 0x01 +#define WM8350_REVISION 0x02 +#define WM8350_SYSTEM_CONTROL_1 0x03 +#define WM8350_SYSTEM_CONTROL_2 0x04 +#define WM8350_SYSTEM_HIBERNATE 0x05 +#define WM8350_INTERFACE_CONTROL 0x06 +#define WM8350_POWER_MGMT_1 0x08 +#define WM8350_POWER_MGMT_2 0x09 +#define WM8350_POWER_MGMT_3 0x0A +#define WM8350_POWER_MGMT_4 0x0B +#define WM8350_POWER_MGMT_5 0x0C +#define WM8350_POWER_MGMT_6 0x0D +#define WM8350_POWER_MGMT_7 0x0E + +#define WM8350_SYSTEM_INTERRUPTS 0x18 +#define WM8350_INT_STATUS_1 0x19 +#define WM8350_INT_STATUS_2 0x1A +#define WM8350_POWER_UP_INT_STATUS 0x1B +#define WM8350_UNDER_VOLTAGE_INT_STATUS 0x1C +#define WM8350_OVER_CURRENT_INT_STATUS 0x1D +#define WM8350_GPIO_INT_STATUS 0x1E +#define WM8350_COMPARATOR_INT_STATUS 0x1F +#define WM8350_SYSTEM_INTERRUPTS_MASK 0x20 +#define WM8350_INT_STATUS_1_MASK 0x21 +#define WM8350_INT_STATUS_2_MASK 0x22 +#define WM8350_POWER_UP_INT_STATUS_MASK 0x23 +#define WM8350_UNDER_VOLTAGE_INT_STATUS_MASK 0x24 +#define WM8350_OVER_CURRENT_INT_STATUS_MASK 0x25 +#define WM8350_GPIO_INT_STATUS_MASK 0x26 +#define WM8350_COMPARATOR_INT_STATUS_MASK 0x27 +#define WM8350_CHARGER_OVERRIDES 0xE2 +#define WM8350_MISC_OVERRIDES 0xE3 +#define WM8350_COMPARATOR_OVERRIDES 0xE7 +#define WM8350_STATE_MACHINE_STATUS 0xE9 + +#define WM8350_MAX_REGISTER 0xFF + +#define WM8350_UNLOCK_KEY 0x0013 +#define WM8350_LOCK_KEY 0x0000 + +/* + * Field Definitions. + */ + +/* + * R0 (0x00) - Reset/ID + */ +#define WM8350_SW_RESET_CHIP_ID_MASK 0xFFFF + +/* + * R1 (0x01) - ID + */ +#define WM8350_CHIP_REV_MASK 0x7000 +#define WM8350_CONF_STS_MASK 0x0C00 +#define WM8350_CUST_ID_MASK 0x00FF + +/* + * R2 (0x02) - Revision + */ +#define WM8350_MASK_REV_MASK 0x00FF + +/* + * R3 (0x03) - System Control 1 + */ +#define WM8350_CHIP_ON 0x8000 +#define WM8350_POWERCYCLE 0x2000 +#define WM8350_VCC_FAULT_OV 0x1000 +#define WM8350_REG_RSTB_TIME_MASK 0x0C00 +#define WM8350_BG_SLEEP 0x0200 +#define WM8350_MEM_VALID 0x0020 +#define WM8350_CHIP_SET_UP 0x0010 +#define WM8350_ON_DEB_T 0x0008 +#define WM8350_ON_POL 0x0002 +#define WM8350_IRQ_POL 0x0001 + +/* + * R4 (0x04) - System Control 2 + */ +#define WM8350_USB_SUSPEND_8MA 0x8000 +#define WM8350_USB_SUSPEND 0x4000 +#define WM8350_USB_MSTR 0x2000 +#define WM8350_USB_MSTR_SRC 0x1000 +#define WM8350_USB_500MA 0x0800 +#define WM8350_USB_NOLIM 0x0400 + +/* + * R5 (0x05) - System Hibernate + */ +#define WM8350_HIBERNATE 0x8000 +#define WM8350_WDOG_HIB_MODE 0x0080 +#define WM8350_REG_HIB_STARTUP_SEQ 0x0040 +#define WM8350_REG_RESET_HIB_MODE 0x0020 +#define WM8350_RST_HIB_MODE 0x0010 +#define WM8350_IRQ_HIB_MODE 0x0008 +#define WM8350_MEMRST_HIB_MODE 0x0004 +#define WM8350_PCCOMP_HIB_MODE 0x0002 +#define WM8350_TEMPMON_HIB_MODE 0x0001 + +/* + * R6 (0x06) - Interface Control + */ +#define WM8350_USE_DEV_PINS 0x8000 +#define WM8350_USE_DEV_PINS_MASK 0x8000 +#define WM8350_USE_DEV_PINS_SHIFT 15 +#define WM8350_DEV_ADDR_MASK 0x6000 +#define WM8350_DEV_ADDR_SHIFT 13 +#define WM8350_CONFIG_DONE 0x1000 +#define WM8350_CONFIG_DONE_MASK 0x1000 +#define WM8350_CONFIG_DONE_SHIFT 12 +#define WM8350_RECONFIG_AT_ON 0x0800 +#define WM8350_RECONFIG_AT_ON_MASK 0x0800 +#define WM8350_RECONFIG_AT_ON_SHIFT 11 +#define WM8350_AUTOINC 0x0200 +#define WM8350_AUTOINC_MASK 0x0200 +#define WM8350_AUTOINC_SHIFT 9 +#define WM8350_ARA 0x0100 +#define WM8350_ARA_MASK 0x0100 +#define WM8350_ARA_SHIFT 8 +#define WM8350_SPI_CFG 0x0008 +#define WM8350_SPI_CFG_MASK 0x0008 +#define WM8350_SPI_CFG_SHIFT 3 +#define WM8350_SPI_4WIRE 0x0004 +#define WM8350_SPI_4WIRE_MASK 0x0004 +#define WM8350_SPI_4WIRE_SHIFT 2 +#define WM8350_SPI_3WIRE 0x0002 +#define WM8350_SPI_3WIRE_MASK 0x0002 +#define WM8350_SPI_3WIRE_SHIFT 1 + +/* Bit values for R06 (0x06) */ +#define WM8350_USE_DEV_PINS_PRIMARY 0 +#define WM8350_USE_DEV_PINS_DEV 1 + +#define WM8350_DEV_ADDR_34 0 +#define WM8350_DEV_ADDR_36 1 +#define WM8350_DEV_ADDR_3C 2 +#define WM8350_DEV_ADDR_3E 3 + +#define WM8350_CONFIG_DONE_OFF 0 +#define WM8350_CONFIG_DONE_DONE 1 + +#define WM8350_RECONFIG_AT_ON_OFF 0 +#define WM8350_RECONFIG_AT_ON_ON 1 + +#define WM8350_AUTOINC_OFF 0 +#define WM8350_AUTOINC_ON 1 + +#define WM8350_ARA_OFF 0 +#define WM8350_ARA_ON 1 + +#define WM8350_SPI_CFG_CMOS 0 +#define WM8350_SPI_CFG_OD 1 + +#define WM8350_SPI_4WIRE_3WIRE 0 +#define WM8350_SPI_4WIRE_4WIRE 1 + +#define WM8350_SPI_3WIRE_I2C 0 +#define WM8350_SPI_3WIRE_SPI 1 + +/* + * R8 (0x08) - Power mgmt (1) + */ +#define WM8350_CODEC_ISEL_MASK 0xC000 +#define WM8350_VBUFEN 0x2000 +#define WM8350_OUTPUT_DRAIN_EN 0x0400 +#define WM8350_MIC_DET_ENA 0x0100 +#define WM8350_BIASEN 0x0020 +#define WM8350_MICBEN 0x0010 +#define WM8350_VMIDEN 0x0004 +#define WM8350_VMID_MASK 0x0003 +#define WM8350_VMID_SHIFT 0 + +/* + * R9 (0x09) - Power mgmt (2) + */ +#define WM8350_IN3R_ENA 0x0800 +#define WM8350_IN3L_ENA 0x0400 +#define WM8350_INR_ENA 0x0200 +#define WM8350_INL_ENA 0x0100 +#define WM8350_MIXINR_ENA 0x0080 +#define WM8350_MIXINL_ENA 0x0040 +#define WM8350_OUT4_ENA 0x0020 +#define WM8350_OUT3_ENA 0x0010 +#define WM8350_MIXOUTR_ENA 0x0002 +#define WM8350_MIXOUTL_ENA 0x0001 + +/* + * R10 (0x0A) - Power mgmt (3) + */ +#define WM8350_IN3R_TO_OUT2R 0x0080 +#define WM8350_OUT2R_ENA 0x0008 +#define WM8350_OUT2L_ENA 0x0004 +#define WM8350_OUT1R_ENA 0x0002 +#define WM8350_OUT1L_ENA 0x0001 + +/* + * R11 (0x0B) - Power mgmt (4) + */ +#define WM8350_SYSCLK_ENA 0x4000 +#define WM8350_ADC_HPF_ENA 0x2000 +#define WM8350_FLL_ENA 0x0800 +#define WM8350_FLL_OSC_ENA 0x0400 +#define WM8350_TOCLK_ENA 0x0100 +#define WM8350_DACR_ENA 0x0020 +#define WM8350_DACL_ENA 0x0010 +#define WM8350_ADCR_ENA 0x0008 +#define WM8350_ADCL_ENA 0x0004 + +/* + * R12 (0x0C) - Power mgmt (5) + */ +#define WM8350_CODEC_ENA 0x1000 +#define WM8350_RTC_TICK_ENA 0x0800 +#define WM8350_OSC32K_ENA 0x0400 +#define WM8350_CHG_ENA 0x0200 +#define WM8350_ACC_DET_ENA 0x0100 +#define WM8350_AUXADC_ENA 0x0080 +#define WM8350_DCMP4_ENA 0x0008 +#define WM8350_DCMP3_ENA 0x0004 +#define WM8350_DCMP2_ENA 0x0002 +#define WM8350_DCMP1_ENA 0x0001 + +/* + * R13 (0x0D) - Power mgmt (6) + */ +#define WM8350_LS_ENA 0x8000 +#define WM8350_LDO4_ENA 0x0800 +#define WM8350_LDO3_ENA 0x0400 +#define WM8350_LDO2_ENA 0x0200 +#define WM8350_LDO1_ENA 0x0100 +#define WM8350_DC6_ENA 0x0020 +#define WM8350_DC5_ENA 0x0010 +#define WM8350_DC4_ENA 0x0008 +#define WM8350_DC3_ENA 0x0004 +#define WM8350_DC2_ENA 0x0002 +#define WM8350_DC1_ENA 0x0001 + +/* + * R14 (0x0E) - Power mgmt (7) + */ +#define WM8350_CS2_ENA 0x0002 +#define WM8350_CS1_ENA 0x0001 + +/* + * R24 (0x18) - System Interrupts + */ +#define WM8350_OC_INT 0x2000 +#define WM8350_UV_INT 0x1000 +#define WM8350_PUTO_INT 0x0800 +#define WM8350_CS_INT 0x0200 +#define WM8350_EXT_INT 0x0100 +#define WM8350_CODEC_INT 0x0080 +#define WM8350_GP_INT 0x0040 +#define WM8350_AUXADC_INT 0x0020 +#define WM8350_RTC_INT 0x0010 +#define WM8350_SYS_INT 0x0008 +#define WM8350_CHG_INT 0x0004 +#define WM8350_USB_INT 0x0002 +#define WM8350_WKUP_INT 0x0001 + +/* + * R25 (0x19) - Interrupt Status 1 + */ +#define WM8350_CHG_BAT_HOT_EINT 0x8000 +#define WM8350_CHG_BAT_COLD_EINT 0x4000 +#define WM8350_CHG_BAT_FAIL_EINT 0x2000 +#define WM8350_CHG_TO_EINT 0x1000 +#define WM8350_CHG_END_EINT 0x0800 +#define WM8350_CHG_START_EINT 0x0400 +#define WM8350_CHG_FAST_RDY_EINT 0x0200 +#define WM8350_RTC_PER_EINT 0x0080 +#define WM8350_RTC_SEC_EINT 0x0040 +#define WM8350_RTC_ALM_EINT 0x0020 +#define WM8350_CHG_VBATT_LT_3P9_EINT 0x0004 +#define WM8350_CHG_VBATT_LT_3P1_EINT 0x0002 +#define WM8350_CHG_VBATT_LT_2P85_EINT 0x0001 + +/* + * R26 (0x1A) - Interrupt Status 2 + */ +#define WM8350_CS1_EINT 0x2000 +#define WM8350_CS2_EINT 0x1000 +#define WM8350_USB_LIMIT_EINT 0x0400 +#define WM8350_AUXADC_DATARDY_EINT 0x0100 +#define WM8350_AUXADC_DCOMP4_EINT 0x0080 +#define WM8350_AUXADC_DCOMP3_EINT 0x0040 +#define WM8350_AUXADC_DCOMP2_EINT 0x0020 +#define WM8350_AUXADC_DCOMP1_EINT 0x0010 +#define WM8350_SYS_HYST_COMP_FAIL_EINT 0x0008 +#define WM8350_SYS_CHIP_GT115_EINT 0x0004 +#define WM8350_SYS_CHIP_GT140_EINT 0x0002 +#define WM8350_SYS_WDOG_TO_EINT 0x0001 + +/* + * R27 (0x1B) - Power Up Interrupt Status + */ +#define WM8350_PUTO_LDO4_EINT 0x0800 +#define WM8350_PUTO_LDO3_EINT 0x0400 +#define WM8350_PUTO_LDO2_EINT 0x0200 +#define WM8350_PUTO_LDO1_EINT 0x0100 +#define WM8350_PUTO_DC6_EINT 0x0020 +#define WM8350_PUTO_DC5_EINT 0x0010 +#define WM8350_PUTO_DC4_EINT 0x0008 +#define WM8350_PUTO_DC3_EINT 0x0004 +#define WM8350_PUTO_DC2_EINT 0x0002 +#define WM8350_PUTO_DC1_EINT 0x0001 + +/* + * R28 (0x1C) - Under Voltage Interrupt status + */ +#define WM8350_UV_LDO4_EINT 0x0800 +#define WM8350_UV_LDO3_EINT 0x0400 +#define WM8350_UV_LDO2_EINT 0x0200 +#define WM8350_UV_LDO1_EINT 0x0100 +#define WM8350_UV_DC6_EINT 0x0020 +#define WM8350_UV_DC5_EINT 0x0010 +#define WM8350_UV_DC4_EINT 0x0008 +#define WM8350_UV_DC3_EINT 0x0004 +#define WM8350_UV_DC2_EINT 0x0002 +#define WM8350_UV_DC1_EINT 0x0001 + +/* + * R29 (0x1D) - Over Current Interrupt status + */ +#define WM8350_OC_LS_EINT 0x8000 + +/* + * R30 (0x1E) - GPIO Interrupt Status + */ +#define WM8350_GP12_EINT 0x1000 +#define WM8350_GP11_EINT 0x0800 +#define WM8350_GP10_EINT 0x0400 +#define WM8350_GP9_EINT 0x0200 +#define WM8350_GP8_EINT 0x0100 +#define WM8350_GP7_EINT 0x0080 +#define WM8350_GP6_EINT 0x0040 +#define WM8350_GP5_EINT 0x0020 +#define WM8350_GP4_EINT 0x0010 +#define WM8350_GP3_EINT 0x0008 +#define WM8350_GP2_EINT 0x0004 +#define WM8350_GP1_EINT 0x0002 +#define WM8350_GP0_EINT 0x0001 + +/* + * R31 (0x1F) - Comparator Interrupt Status + */ +#define WM8350_EXT_USB_FB_EINT 0x8000 +#define WM8350_EXT_WALL_FB_EINT 0x4000 +#define WM8350_EXT_BAT_FB_EINT 0x2000 +#define WM8350_CODEC_JCK_DET_L_EINT 0x0800 +#define WM8350_CODEC_JCK_DET_R_EINT 0x0400 +#define WM8350_CODEC_MICSCD_EINT 0x0200 +#define WM8350_CODEC_MICD_EINT 0x0100 +#define WM8350_WKUP_OFF_STATE_EINT 0x0040 +#define WM8350_WKUP_HIB_STATE_EINT 0x0020 +#define WM8350_WKUP_CONV_FAULT_EINT 0x0010 +#define WM8350_WKUP_WDOG_RST_EINT 0x0008 +#define WM8350_WKUP_GP_PWR_ON_EINT 0x0004 +#define WM8350_WKUP_ONKEY_EINT 0x0002 +#define WM8350_WKUP_GP_WAKEUP_EINT 0x0001 + +/* + * R32 (0x20) - System Interrupts Mask + */ +#define WM8350_IM_OC_INT 0x2000 +#define WM8350_IM_UV_INT 0x1000 +#define WM8350_IM_PUTO_INT 0x0800 +#define WM8350_IM_SPARE_INT 0x0400 +#define WM8350_IM_CS_INT 0x0200 +#define WM8350_IM_EXT_INT 0x0100 +#define WM8350_IM_CODEC_INT 0x0080 +#define WM8350_IM_GP_INT 0x0040 +#define WM8350_IM_AUXADC_INT 0x0020 +#define WM8350_IM_RTC_INT 0x0010 +#define WM8350_IM_SYS_INT 0x0008 +#define WM8350_IM_CHG_INT 0x0004 +#define WM8350_IM_USB_INT 0x0002 +#define WM8350_IM_WKUP_INT 0x0001 + +/* + * R33 (0x21) - Interrupt Status 1 Mask + */ +#define WM8350_IM_CHG_BAT_HOT_EINT 0x8000 +#define WM8350_IM_CHG_BAT_COLD_EINT 0x4000 +#define WM8350_IM_CHG_BAT_FAIL_EINT 0x2000 +#define WM8350_IM_CHG_TO_EINT 0x1000 +#define WM8350_IM_CHG_END_EINT 0x0800 +#define WM8350_IM_CHG_START_EINT 0x0400 +#define WM8350_IM_CHG_FAST_RDY_EINT 0x0200 +#define WM8350_IM_RTC_PER_EINT 0x0080 +#define WM8350_IM_RTC_SEC_EINT 0x0040 +#define WM8350_IM_RTC_ALM_EINT 0x0020 +#define WM8350_IM_CHG_VBATT_LT_3P9_EINT 0x0004 +#define WM8350_IM_CHG_VBATT_LT_3P1_EINT 0x0002 +#define WM8350_IM_CHG_VBATT_LT_2P85_EINT 0x0001 + +/* + * R34 (0x22) - Interrupt Status 2 Mask + */ +#define WM8350_IM_SPARE2_EINT 0x8000 +#define WM8350_IM_SPARE1_EINT 0x4000 +#define WM8350_IM_CS1_EINT 0x2000 +#define WM8350_IM_CS2_EINT 0x1000 +#define WM8350_IM_USB_LIMIT_EINT 0x0400 +#define WM8350_IM_AUXADC_DATARDY_EINT 0x0100 +#define WM8350_IM_AUXADC_DCOMP4_EINT 0x0080 +#define WM8350_IM_AUXADC_DCOMP3_EINT 0x0040 +#define WM8350_IM_AUXADC_DCOMP2_EINT 0x0020 +#define WM8350_IM_AUXADC_DCOMP1_EINT 0x0010 +#define WM8350_IM_SYS_HYST_COMP_FAIL_EINT 0x0008 +#define WM8350_IM_SYS_CHIP_GT115_EINT 0x0004 +#define WM8350_IM_SYS_CHIP_GT140_EINT 0x0002 +#define WM8350_IM_SYS_WDOG_TO_EINT 0x0001 + +/* + * R35 (0x23) - Power Up Interrupt Status Mask + */ +#define WM8350_IM_PUTO_LDO4_EINT 0x0800 +#define WM8350_IM_PUTO_LDO3_EINT 0x0400 +#define WM8350_IM_PUTO_LDO2_EINT 0x0200 +#define WM8350_IM_PUTO_LDO1_EINT 0x0100 +#define WM8350_IM_PUTO_DC6_EINT 0x0020 +#define WM8350_IM_PUTO_DC5_EINT 0x0010 +#define WM8350_IM_PUTO_DC4_EINT 0x0008 +#define WM8350_IM_PUTO_DC3_EINT 0x0004 +#define WM8350_IM_PUTO_DC2_EINT 0x0002 +#define WM8350_IM_PUTO_DC1_EINT 0x0001 + +/* + * R36 (0x24) - Under Voltage Interrupt status Mask + */ +#define WM8350_IM_UV_LDO4_EINT 0x0800 +#define WM8350_IM_UV_LDO3_EINT 0x0400 +#define WM8350_IM_UV_LDO2_EINT 0x0200 +#define WM8350_IM_UV_LDO1_EINT 0x0100 +#define WM8350_IM_UV_DC6_EINT 0x0020 +#define WM8350_IM_UV_DC5_EINT 0x0010 +#define WM8350_IM_UV_DC4_EINT 0x0008 +#define WM8350_IM_UV_DC3_EINT 0x0004 +#define WM8350_IM_UV_DC2_EINT 0x0002 +#define WM8350_IM_UV_DC1_EINT 0x0001 + +/* + * R37 (0x25) - Over Current Interrupt status Mask + */ +#define WM8350_IM_OC_LS_EINT 0x8000 + +/* + * R38 (0x26) - GPIO Interrupt Status Mask + */ +#define WM8350_IM_GP12_EINT 0x1000 +#define WM8350_IM_GP11_EINT 0x0800 +#define WM8350_IM_GP10_EINT 0x0400 +#define WM8350_IM_GP9_EINT 0x0200 +#define WM8350_IM_GP8_EINT 0x0100 +#define WM8350_IM_GP7_EINT 0x0080 +#define WM8350_IM_GP6_EINT 0x0040 +#define WM8350_IM_GP5_EINT 0x0020 +#define WM8350_IM_GP4_EINT 0x0010 +#define WM8350_IM_GP3_EINT 0x0008 +#define WM8350_IM_GP2_EINT 0x0004 +#define WM8350_IM_GP1_EINT 0x0002 +#define WM8350_IM_GP0_EINT 0x0001 + +/* + * R39 (0x27) - Comparator Interrupt Status Mask + */ +#define WM8350_IM_EXT_USB_FB_EINT 0x8000 +#define WM8350_IM_EXT_WALL_FB_EINT 0x4000 +#define WM8350_IM_EXT_BAT_FB_EINT 0x2000 +#define WM8350_IM_CODEC_JCK_DET_L_EINT 0x0800 +#define WM8350_IM_CODEC_JCK_DET_R_EINT 0x0400 +#define WM8350_IM_CODEC_MICSCD_EINT 0x0200 +#define WM8350_IM_CODEC_MICD_EINT 0x0100 +#define WM8350_IM_WKUP_OFF_STATE_EINT 0x0040 +#define WM8350_IM_WKUP_HIB_STATE_EINT 0x0020 +#define WM8350_IM_WKUP_CONV_FAULT_EINT 0x0010 +#define WM8350_IM_WKUP_WDOG_RST_EINT 0x0008 +#define WM8350_IM_WKUP_GP_PWR_ON_EINT 0x0004 +#define WM8350_IM_WKUP_ONKEY_EINT 0x0002 +#define WM8350_IM_WKUP_GP_WAKEUP_EINT 0x0001 + +/* + * R220 (0xDC) - RAM BIST 1 + */ +#define WM8350_READ_STATUS 0x0800 +#define WM8350_TSTRAM_CLK 0x0100 +#define WM8350_TSTRAM_CLK_ENA 0x0080 +#define WM8350_STARTSEQ 0x0040 +#define WM8350_READ_SRC 0x0020 +#define WM8350_COUNT_DIR 0x0010 +#define WM8350_TSTRAM_MODE_MASK 0x000E +#define WM8350_TSTRAM_ENA 0x0001 + +/* + * R225 (0xE1) - DCDC/LDO status + */ +#define WM8350_LS_STS 0x8000 +#define WM8350_LDO4_STS 0x0800 +#define WM8350_LDO3_STS 0x0400 +#define WM8350_LDO2_STS 0x0200 +#define WM8350_LDO1_STS 0x0100 +#define WM8350_DC6_STS 0x0020 +#define WM8350_DC5_STS 0x0010 +#define WM8350_DC4_STS 0x0008 +#define WM8350_DC3_STS 0x0004 +#define WM8350_DC2_STS 0x0002 +#define WM8350_DC1_STS 0x0001 + +/* + * R226 (0xE2) - Charger status + */ +#define WM8350_CHG_BATT_HOT_OVRDE 0x8000 +#define WM8350_CHG_BATT_COLD_OVRDE 0x4000 + +/* + * R227 (0xE3) - Misc Overrides + */ +#define WM8350_USB_LIMIT_OVRDE 0x0400 + +/* + * R227 (0xE7) - Comparator Overrides + */ +#define WM8350_USB_FB_OVRDE 0x8000 +#define WM8350_WALL_FB_OVRDE 0x4000 +#define WM8350_BATT_FB_OVRDE 0x2000 + + +/* + * R233 (0xE9) - State Machinine Status + */ +#define WM8350_USB_SM_MASK 0x0700 +#define WM8350_USB_SM_SHIFT 8 + +#define WM8350_USB_SM_100_SLV 1 +#define WM8350_USB_SM_500_SLV 5 +#define WM8350_USB_SM_STDBY_SLV 7 + +/* WM8350 wake up conditions */ +#define WM8350_IRQ_WKUP_OFF_STATE 43 +#define WM8350_IRQ_WKUP_HIB_STATE 44 +#define WM8350_IRQ_WKUP_CONV_FAULT 45 +#define WM8350_IRQ_WKUP_WDOG_RST 46 +#define WM8350_IRQ_WKUP_GP_PWR_ON 47 +#define WM8350_IRQ_WKUP_ONKEY 48 +#define WM8350_IRQ_WKUP_GP_WAKEUP 49 + +/* wm8350 chip revisions */ +#define WM8350_REV_E 0x4 +#define WM8350_REV_F 0x5 +#define WM8350_REV_G 0x6 +#define WM8350_REV_H 0x7 + +#define WM8350_NUM_IRQ 63 + +#define WM8350_NUM_IRQ_REGS 7 + +extern const struct regmap_config wm8350_regmap; + +struct wm8350; + +struct wm8350_hwmon { + struct platform_device *pdev; + struct device *classdev; +}; + +struct wm8350 { + struct device *dev; + + /* device IO */ + struct regmap *regmap; + bool unlocked; + + struct mutex auxadc_mutex; + struct completion auxadc_done; + + /* Interrupt handling */ + struct mutex irq_lock; + int chip_irq; + int irq_base; + u16 irq_masks[WM8350_NUM_IRQ_REGS]; + + /* Client devices */ + struct wm8350_codec codec; + struct wm8350_gpio gpio; + struct wm8350_hwmon hwmon; + struct wm8350_pmic pmic; + struct wm8350_power power; + struct wm8350_rtc rtc; + struct wm8350_wdt wdt; +}; + +/** + * Data to be supplied by the platform to initialise the WM8350. + * + * @init: Function called during driver initialisation. Should be + * used by the platform to configure GPIO functions and similar. + * @irq_high: Set if WM8350 IRQ is active high. + * @irq_base: Base IRQ for genirq (not currently used). + * @gpio_base: Base for gpiolib. + */ +struct wm8350_platform_data { + int (*init)(struct wm8350 *wm8350); + int irq_high; + int irq_base; + int gpio_base; +}; + + +/* + * WM8350 device initialisation and exit. + */ +int wm8350_device_init(struct wm8350 *wm8350, int irq, + struct wm8350_platform_data *pdata); +void wm8350_device_exit(struct wm8350 *wm8350); + +/* + * WM8350 device IO + */ +int wm8350_clear_bits(struct wm8350 *wm8350, u16 reg, u16 mask); +int wm8350_set_bits(struct wm8350 *wm8350, u16 reg, u16 mask); +u16 wm8350_reg_read(struct wm8350 *wm8350, int reg); +int wm8350_reg_write(struct wm8350 *wm8350, int reg, u16 val); +int wm8350_reg_lock(struct wm8350 *wm8350); +int wm8350_reg_unlock(struct wm8350 *wm8350); +int wm8350_block_read(struct wm8350 *wm8350, int reg, int size, u16 *dest); +int wm8350_block_write(struct wm8350 *wm8350, int reg, int size, u16 *src); + +/* + * WM8350 internal interrupts + */ +static inline int wm8350_register_irq(struct wm8350 *wm8350, int irq, + irq_handler_t handler, + unsigned long flags, + const char *name, void *data) +{ + if (!wm8350->irq_base) + return -ENODEV; + + return request_threaded_irq(irq + wm8350->irq_base, NULL, + handler, flags, name, data); +} + +static inline void wm8350_free_irq(struct wm8350 *wm8350, int irq, void *data) +{ + free_irq(irq + wm8350->irq_base, data); +} + +static inline void wm8350_mask_irq(struct wm8350 *wm8350, int irq) +{ + disable_irq(irq + wm8350->irq_base); +} + +static inline void wm8350_unmask_irq(struct wm8350 *wm8350, int irq) +{ + enable_irq(irq + wm8350->irq_base); +} + +int wm8350_irq_init(struct wm8350 *wm8350, int irq, + struct wm8350_platform_data *pdata); +int wm8350_irq_exit(struct wm8350 *wm8350); + +#endif diff --git a/include/linux/mfd/wm8350/gpio.h b/include/linux/mfd/wm8350/gpio.h new file mode 100644 index 000000000..d657bcd6d --- /dev/null +++ b/include/linux/mfd/wm8350/gpio.h @@ -0,0 +1,361 @@ +/* + * gpio.h -- GPIO Driver for Wolfson WM8350 PMIC + * + * Copyright 2007 Wolfson Microelectronics PLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MFD_WM8350_GPIO_H_ +#define __LINUX_MFD_WM8350_GPIO_H_ + +#include + +/* + * GPIO Registers. + */ +#define WM8350_GPIO_DEBOUNCE 0x80 +#define WM8350_GPIO_PIN_PULL_UP_CONTROL 0x81 +#define WM8350_GPIO_PULL_DOWN_CONTROL 0x82 +#define WM8350_GPIO_INT_MODE 0x83 +#define WM8350_GPIO_CONTROL 0x85 +#define WM8350_GPIO_CONFIGURATION_I_O 0x86 +#define WM8350_GPIO_PIN_POLARITY_TYPE 0x87 +#define WM8350_GPIO_FUNCTION_SELECT_1 0x8C +#define WM8350_GPIO_FUNCTION_SELECT_2 0x8D +#define WM8350_GPIO_FUNCTION_SELECT_3 0x8E +#define WM8350_GPIO_FUNCTION_SELECT_4 0x8F +#define WM8350_GPIO_LEVEL 0xE6 + +/* + * GPIO Functions + */ +#define WM8350_GPIO0_GPIO_IN 0x0 +#define WM8350_GPIO0_GPIO_OUT 0x0 +#define WM8350_GPIO0_PWR_ON_IN 0x1 +#define WM8350_GPIO0_PWR_ON_OUT 0x1 +#define WM8350_GPIO0_LDO_EN_IN 0x2 +#define WM8350_GPIO0_VRTC_OUT 0x2 +#define WM8350_GPIO0_LPWR1_IN 0x3 +#define WM8350_GPIO0_POR_B_OUT 0x3 + +#define WM8350_GPIO1_GPIO_IN 0x0 +#define WM8350_GPIO1_GPIO_OUT 0x0 +#define WM8350_GPIO1_PWR_ON_IN 0x1 +#define WM8350_GPIO1_DO_CONF_OUT 0x1 +#define WM8350_GPIO1_LDO_EN_IN 0x2 +#define WM8350_GPIO1_RESET_OUT 0x2 +#define WM8350_GPIO1_LPWR2_IN 0x3 +#define WM8350_GPIO1_MEMRST_OUT 0x3 + +#define WM8350_GPIO2_GPIO_IN 0x0 +#define WM8350_GPIO2_GPIO_OUT 0x0 +#define WM8350_GPIO2_PWR_ON_IN 0x1 +#define WM8350_GPIO2_PWR_ON_OUT 0x1 +#define WM8350_GPIO2_WAKE_UP_IN 0x2 +#define WM8350_GPIO2_VRTC_OUT 0x2 +#define WM8350_GPIO2_32KHZ_IN 0x3 +#define WM8350_GPIO2_32KHZ_OUT 0x3 + +#define WM8350_GPIO3_GPIO_IN 0x0 +#define WM8350_GPIO3_GPIO_OUT 0x0 +#define WM8350_GPIO3_PWR_ON_IN 0x1 +#define WM8350_GPIO3_P_CLK_OUT 0x1 +#define WM8350_GPIO3_LDO_EN_IN 0x2 +#define WM8350_GPIO3_VRTC_OUT 0x2 +#define WM8350_GPIO3_PWR_OFF_IN 0x3 +#define WM8350_GPIO3_32KHZ_OUT 0x3 + +#define WM8350_GPIO4_GPIO_IN 0x0 +#define WM8350_GPIO4_GPIO_OUT 0x0 +#define WM8350_GPIO4_MR_IN 0x1 +#define WM8350_GPIO4_MEM_RST_OUT 0x1 +#define WM8350_GPIO4_FLASH_IN 0x2 +#define WM8350_GPIO4_ADA_OUT 0x2 +#define WM8350_GPIO4_HIBERNATE_IN 0x3 +#define WM8350_GPIO4_FLASH_OUT 0x3 +#define WM8350_GPIO4_MICDET_OUT 0x4 +#define WM8350_GPIO4_MICSHT_OUT 0x5 + +#define WM8350_GPIO5_GPIO_IN 0x0 +#define WM8350_GPIO5_GPIO_OUT 0x0 +#define WM8350_GPIO5_LPWR1_IN 0x1 +#define WM8350_GPIO5_P_CLK_OUT 0x1 +#define WM8350_GPIO5_ADCLRCLK_IN 0x2 +#define WM8350_GPIO5_ADCLRCLK_OUT 0x2 +#define WM8350_GPIO5_HIBERNATE_IN 0x3 +#define WM8350_GPIO5_32KHZ_OUT 0x3 +#define WM8350_GPIO5_MICDET_OUT 0x4 +#define WM8350_GPIO5_MICSHT_OUT 0x5 +#define WM8350_GPIO5_ADA_OUT 0x6 +#define WM8350_GPIO5_OPCLK_OUT 0x7 + +#define WM8350_GPIO6_GPIO_IN 0x0 +#define WM8350_GPIO6_GPIO_OUT 0x0 +#define WM8350_GPIO6_LPWR2_IN 0x1 +#define WM8350_GPIO6_MEMRST_OUT 0x1 +#define WM8350_GPIO6_FLASH_IN 0x2 +#define WM8350_GPIO6_ADA_OUT 0x2 +#define WM8350_GPIO6_HIBERNATE_IN 0x3 +#define WM8350_GPIO6_RTC_OUT 0x3 +#define WM8350_GPIO6_MICDET_OUT 0x4 +#define WM8350_GPIO6_MICSHT_OUT 0x5 +#define WM8350_GPIO6_ADCLRCLKB_OUT 0x6 +#define WM8350_GPIO6_SDOUT_OUT 0x7 + +#define WM8350_GPIO7_GPIO_IN 0x0 +#define WM8350_GPIO7_GPIO_OUT 0x0 +#define WM8350_GPIO7_LPWR3_IN 0x1 +#define WM8350_GPIO7_P_CLK_OUT 0x1 +#define WM8350_GPIO7_MASK_IN 0x2 +#define WM8350_GPIO7_VCC_FAULT_OUT 0x2 +#define WM8350_GPIO7_HIBERNATE_IN 0x3 +#define WM8350_GPIO7_BATT_FAULT_OUT 0x3 +#define WM8350_GPIO7_MICDET_OUT 0x4 +#define WM8350_GPIO7_MICSHT_OUT 0x5 +#define WM8350_GPIO7_ADA_OUT 0x6 +#define WM8350_GPIO7_CSB_IN 0x7 + +#define WM8350_GPIO8_GPIO_IN 0x0 +#define WM8350_GPIO8_GPIO_OUT 0x0 +#define WM8350_GPIO8_MR_IN 0x1 +#define WM8350_GPIO8_VCC_FAULT_OUT 0x1 +#define WM8350_GPIO8_ADCBCLK_IN 0x2 +#define WM8350_GPIO8_ADCBCLK_OUT 0x2 +#define WM8350_GPIO8_PWR_OFF_IN 0x3 +#define WM8350_GPIO8_BATT_FAULT_OUT 0x3 +#define WM8350_GPIO8_ALTSCL_IN 0xf + +#define WM8350_GPIO9_GPIO_IN 0x0 +#define WM8350_GPIO9_GPIO_OUT 0x0 +#define WM8350_GPIO9_HEARTBEAT_IN 0x1 +#define WM8350_GPIO9_VCC_FAULT_OUT 0x1 +#define WM8350_GPIO9_MASK_IN 0x2 +#define WM8350_GPIO9_LINE_GT_BATT_OUT 0x2 +#define WM8350_GPIO9_PWR_OFF_IN 0x3 +#define WM8350_GPIO9_BATT_FAULT_OUT 0x3 +#define WM8350_GPIO9_ALTSDA_OUT 0xf + +#define WM8350_GPIO10_GPIO_IN 0x0 +#define WM8350_GPIO10_GPIO_OUT 0x0 +#define WM8350_GPIO10_ISINKC_OUT 0x1 +#define WM8350_GPIO10_PWR_OFF_IN 0x2 +#define WM8350_GPIO10_LINE_GT_BATT_OUT 0x2 +#define WM8350_GPIO10_CHD_IND_IN 0x3 + +#define WM8350_GPIO11_GPIO_IN 0x0 +#define WM8350_GPIO11_GPIO_OUT 0x0 +#define WM8350_GPIO11_ISINKD_OUT 0x1 +#define WM8350_GPIO11_WAKEUP_IN 0x2 +#define WM8350_GPIO11_LINE_GT_BATT_OUT 0x2 +#define WM8350_GPIO11_CHD_IND_IN 0x3 + +#define WM8350_GPIO12_GPIO_IN 0x0 +#define WM8350_GPIO12_GPIO_OUT 0x0 +#define WM8350_GPIO12_ISINKE_OUT 0x1 +#define WM8350_GPIO12_LINE_GT_BATT_OUT 0x2 +#define WM8350_GPIO12_LINE_EN_OUT 0x3 +#define WM8350_GPIO12_32KHZ_OUT 0x4 + +#define WM8350_GPIO_DIR_IN 0 +#define WM8350_GPIO_DIR_OUT 1 +#define WM8350_GPIO_ACTIVE_LOW 0 +#define WM8350_GPIO_ACTIVE_HIGH 1 +#define WM8350_GPIO_PULL_NONE 0 +#define WM8350_GPIO_PULL_UP 1 +#define WM8350_GPIO_PULL_DOWN 2 +#define WM8350_GPIO_INVERT_OFF 0 +#define WM8350_GPIO_INVERT_ON 1 +#define WM8350_GPIO_DEBOUNCE_OFF 0 +#define WM8350_GPIO_DEBOUNCE_ON 1 + +/* + * R30 (0x1E) - GPIO Interrupt Status + */ +#define WM8350_GP12_EINT 0x1000 +#define WM8350_GP11_EINT 0x0800 +#define WM8350_GP10_EINT 0x0400 +#define WM8350_GP9_EINT 0x0200 +#define WM8350_GP8_EINT 0x0100 +#define WM8350_GP7_EINT 0x0080 +#define WM8350_GP6_EINT 0x0040 +#define WM8350_GP5_EINT 0x0020 +#define WM8350_GP4_EINT 0x0010 +#define WM8350_GP3_EINT 0x0008 +#define WM8350_GP2_EINT 0x0004 +#define WM8350_GP1_EINT 0x0002 +#define WM8350_GP0_EINT 0x0001 + + +/* + * R128 (0x80) - GPIO Debounce + */ +#define WM8350_GP12_DB 0x1000 +#define WM8350_GP11_DB 0x0800 +#define WM8350_GP10_DB 0x0400 +#define WM8350_GP9_DB 0x0200 +#define WM8350_GP8_DB 0x0100 +#define WM8350_GP7_DB 0x0080 +#define WM8350_GP6_DB 0x0040 +#define WM8350_GP5_DB 0x0020 +#define WM8350_GP4_DB 0x0010 +#define WM8350_GP3_DB 0x0008 +#define WM8350_GP2_DB 0x0004 +#define WM8350_GP1_DB 0x0002 +#define WM8350_GP0_DB 0x0001 + +/* + * R129 (0x81) - GPIO Pin pull up Control + */ +#define WM8350_GP12_PU 0x1000 +#define WM8350_GP11_PU 0x0800 +#define WM8350_GP10_PU 0x0400 +#define WM8350_GP9_PU 0x0200 +#define WM8350_GP8_PU 0x0100 +#define WM8350_GP7_PU 0x0080 +#define WM8350_GP6_PU 0x0040 +#define WM8350_GP5_PU 0x0020 +#define WM8350_GP4_PU 0x0010 +#define WM8350_GP3_PU 0x0008 +#define WM8350_GP2_PU 0x0004 +#define WM8350_GP1_PU 0x0002 +#define WM8350_GP0_PU 0x0001 + +/* + * R130 (0x82) - GPIO Pull down Control + */ +#define WM8350_GP12_PD 0x1000 +#define WM8350_GP11_PD 0x0800 +#define WM8350_GP10_PD 0x0400 +#define WM8350_GP9_PD 0x0200 +#define WM8350_GP8_PD 0x0100 +#define WM8350_GP7_PD 0x0080 +#define WM8350_GP6_PD 0x0040 +#define WM8350_GP5_PD 0x0020 +#define WM8350_GP4_PD 0x0010 +#define WM8350_GP3_PD 0x0008 +#define WM8350_GP2_PD 0x0004 +#define WM8350_GP1_PD 0x0002 +#define WM8350_GP0_PD 0x0001 + +/* + * R131 (0x83) - GPIO Interrupt Mode + */ +#define WM8350_GP12_INTMODE 0x1000 +#define WM8350_GP11_INTMODE 0x0800 +#define WM8350_GP10_INTMODE 0x0400 +#define WM8350_GP9_INTMODE 0x0200 +#define WM8350_GP8_INTMODE 0x0100 +#define WM8350_GP7_INTMODE 0x0080 +#define WM8350_GP6_INTMODE 0x0040 +#define WM8350_GP5_INTMODE 0x0020 +#define WM8350_GP4_INTMODE 0x0010 +#define WM8350_GP3_INTMODE 0x0008 +#define WM8350_GP2_INTMODE 0x0004 +#define WM8350_GP1_INTMODE 0x0002 +#define WM8350_GP0_INTMODE 0x0001 + +/* + * R133 (0x85) - GPIO Control + */ +#define WM8350_GP_DBTIME_MASK 0x00C0 + +/* + * R134 (0x86) - GPIO Configuration (i/o) + */ +#define WM8350_GP12_DIR 0x1000 +#define WM8350_GP11_DIR 0x0800 +#define WM8350_GP10_DIR 0x0400 +#define WM8350_GP9_DIR 0x0200 +#define WM8350_GP8_DIR 0x0100 +#define WM8350_GP7_DIR 0x0080 +#define WM8350_GP6_DIR 0x0040 +#define WM8350_GP5_DIR 0x0020 +#define WM8350_GP4_DIR 0x0010 +#define WM8350_GP3_DIR 0x0008 +#define WM8350_GP2_DIR 0x0004 +#define WM8350_GP1_DIR 0x0002 +#define WM8350_GP0_DIR 0x0001 + +/* + * R135 (0x87) - GPIO Pin Polarity / Type + */ +#define WM8350_GP12_CFG 0x1000 +#define WM8350_GP11_CFG 0x0800 +#define WM8350_GP10_CFG 0x0400 +#define WM8350_GP9_CFG 0x0200 +#define WM8350_GP8_CFG 0x0100 +#define WM8350_GP7_CFG 0x0080 +#define WM8350_GP6_CFG 0x0040 +#define WM8350_GP5_CFG 0x0020 +#define WM8350_GP4_CFG 0x0010 +#define WM8350_GP3_CFG 0x0008 +#define WM8350_GP2_CFG 0x0004 +#define WM8350_GP1_CFG 0x0002 +#define WM8350_GP0_CFG 0x0001 + +/* + * R140 (0x8C) - GPIO Function Select 1 + */ +#define WM8350_GP3_FN_MASK 0xF000 +#define WM8350_GP2_FN_MASK 0x0F00 +#define WM8350_GP1_FN_MASK 0x00F0 +#define WM8350_GP0_FN_MASK 0x000F + +/* + * R141 (0x8D) - GPIO Function Select 2 + */ +#define WM8350_GP7_FN_MASK 0xF000 +#define WM8350_GP6_FN_MASK 0x0F00 +#define WM8350_GP5_FN_MASK 0x00F0 +#define WM8350_GP4_FN_MASK 0x000F + +/* + * R142 (0x8E) - GPIO Function Select 3 + */ +#define WM8350_GP11_FN_MASK 0xF000 +#define WM8350_GP10_FN_MASK 0x0F00 +#define WM8350_GP9_FN_MASK 0x00F0 +#define WM8350_GP8_FN_MASK 0x000F + +/* + * R143 (0x8F) - GPIO Function Select 4 + */ +#define WM8350_GP12_FN_MASK 0x000F + +/* + * R230 (0xE6) - GPIO Pin Status + */ +#define WM8350_GP12_LVL 0x1000 +#define WM8350_GP11_LVL 0x0800 +#define WM8350_GP10_LVL 0x0400 +#define WM8350_GP9_LVL 0x0200 +#define WM8350_GP8_LVL 0x0100 +#define WM8350_GP7_LVL 0x0080 +#define WM8350_GP6_LVL 0x0040 +#define WM8350_GP5_LVL 0x0020 +#define WM8350_GP4_LVL 0x0010 +#define WM8350_GP3_LVL 0x0008 +#define WM8350_GP2_LVL 0x0004 +#define WM8350_GP1_LVL 0x0002 +#define WM8350_GP0_LVL 0x0001 + +struct wm8350; + +int wm8350_gpio_config(struct wm8350 *wm8350, int gpio, int dir, int func, + int pol, int pull, int invert, int debounce); + +struct wm8350_gpio { + struct platform_device *pdev; +}; + +/* + * GPIO Interrupts + */ +#define WM8350_IRQ_GPIO(x) (50 + x) + +#endif diff --git a/include/linux/mfd/wm8350/pmic.h b/include/linux/mfd/wm8350/pmic.h new file mode 100644 index 000000000..7a09e7f1f --- /dev/null +++ b/include/linux/mfd/wm8350/pmic.h @@ -0,0 +1,780 @@ +/* + * pmic.h -- Power Management Driver for Wolfson WM8350 PMIC + * + * Copyright 2007 Wolfson Microelectronics PLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MFD_WM8350_PMIC_H +#define __LINUX_MFD_WM8350_PMIC_H + +#include +#include +#include + +/* + * Register values. + */ + +#define WM8350_CURRENT_SINK_DRIVER_A 0xAC +#define WM8350_CSA_FLASH_CONTROL 0xAD +#define WM8350_CURRENT_SINK_DRIVER_B 0xAE +#define WM8350_CSB_FLASH_CONTROL 0xAF +#define WM8350_DCDC_LDO_REQUESTED 0xB0 +#define WM8350_DCDC_ACTIVE_OPTIONS 0xB1 +#define WM8350_DCDC_SLEEP_OPTIONS 0xB2 +#define WM8350_POWER_CHECK_COMPARATOR 0xB3 +#define WM8350_DCDC1_CONTROL 0xB4 +#define WM8350_DCDC1_TIMEOUTS 0xB5 +#define WM8350_DCDC1_LOW_POWER 0xB6 +#define WM8350_DCDC2_CONTROL 0xB7 +#define WM8350_DCDC2_TIMEOUTS 0xB8 +#define WM8350_DCDC3_CONTROL 0xBA +#define WM8350_DCDC3_TIMEOUTS 0xBB +#define WM8350_DCDC3_LOW_POWER 0xBC +#define WM8350_DCDC4_CONTROL 0xBD +#define WM8350_DCDC4_TIMEOUTS 0xBE +#define WM8350_DCDC4_LOW_POWER 0xBF +#define WM8350_DCDC5_CONTROL 0xC0 +#define WM8350_DCDC5_TIMEOUTS 0xC1 +#define WM8350_DCDC6_CONTROL 0xC3 +#define WM8350_DCDC6_TIMEOUTS 0xC4 +#define WM8350_DCDC6_LOW_POWER 0xC5 +#define WM8350_LIMIT_SWITCH_CONTROL 0xC7 +#define WM8350_LDO1_CONTROL 0xC8 +#define WM8350_LDO1_TIMEOUTS 0xC9 +#define WM8350_LDO1_LOW_POWER 0xCA +#define WM8350_LDO2_CONTROL 0xCB +#define WM8350_LDO2_TIMEOUTS 0xCC +#define WM8350_LDO2_LOW_POWER 0xCD +#define WM8350_LDO3_CONTROL 0xCE +#define WM8350_LDO3_TIMEOUTS 0xCF +#define WM8350_LDO3_LOW_POWER 0xD0 +#define WM8350_LDO4_CONTROL 0xD1 +#define WM8350_LDO4_TIMEOUTS 0xD2 +#define WM8350_LDO4_LOW_POWER 0xD3 +#define WM8350_VCC_FAULT_MASKS 0xD7 +#define WM8350_MAIN_BANDGAP_CONTROL 0xD8 +#define WM8350_OSC_CONTROL 0xD9 +#define WM8350_RTC_TICK_CONTROL 0xDA +#define WM8350_SECURITY 0xDB +#define WM8350_RAM_BIST_1 0xDC +#define WM8350_DCDC_LDO_STATUS 0xE1 +#define WM8350_GPIO_PIN_STATUS 0xE6 + +#define WM8350_DCDC1_FORCE_PWM 0xF8 +#define WM8350_DCDC3_FORCE_PWM 0xFA +#define WM8350_DCDC4_FORCE_PWM 0xFB +#define WM8350_DCDC6_FORCE_PWM 0xFD + +/* + * R172 (0xAC) - Current Sink Driver A + */ +#define WM8350_CS1_HIB_MODE 0x1000 +#define WM8350_CS1_HIB_MODE_MASK 0x1000 +#define WM8350_CS1_HIB_MODE_SHIFT 12 +#define WM8350_CS1_ISEL_MASK 0x003F +#define WM8350_CS1_ISEL_SHIFT 0 + +/* Bit values for R172 (0xAC) */ +#define WM8350_CS1_HIB_MODE_DISABLE 0 +#define WM8350_CS1_HIB_MODE_LEAVE 1 + +#define WM8350_CS1_ISEL_220M 0x3F + +/* + * R173 (0xAD) - CSA Flash control + */ +#define WM8350_CS1_FLASH_MODE 0x8000 +#define WM8350_CS1_TRIGSRC 0x4000 +#define WM8350_CS1_DRIVE 0x2000 +#define WM8350_CS1_FLASH_DUR_MASK 0x0300 +#define WM8350_CS1_OFF_RAMP_MASK 0x0030 +#define WM8350_CS1_ON_RAMP_MASK 0x0003 + +/* + * R174 (0xAE) - Current Sink Driver B + */ +#define WM8350_CS2_HIB_MODE 0x1000 +#define WM8350_CS2_ISEL_MASK 0x003F + +/* + * R175 (0xAF) - CSB Flash control + */ +#define WM8350_CS2_FLASH_MODE 0x8000 +#define WM8350_CS2_TRIGSRC 0x4000 +#define WM8350_CS2_DRIVE 0x2000 +#define WM8350_CS2_FLASH_DUR_MASK 0x0300 +#define WM8350_CS2_OFF_RAMP_MASK 0x0030 +#define WM8350_CS2_ON_RAMP_MASK 0x0003 + +/* + * R176 (0xB0) - DCDC/LDO requested + */ +#define WM8350_LS_ENA 0x8000 +#define WM8350_LDO4_ENA 0x0800 +#define WM8350_LDO3_ENA 0x0400 +#define WM8350_LDO2_ENA 0x0200 +#define WM8350_LDO1_ENA 0x0100 +#define WM8350_DC6_ENA 0x0020 +#define WM8350_DC5_ENA 0x0010 +#define WM8350_DC4_ENA 0x0008 +#define WM8350_DC3_ENA 0x0004 +#define WM8350_DC2_ENA 0x0002 +#define WM8350_DC1_ENA 0x0001 + +/* + * R177 (0xB1) - DCDC Active options + */ +#define WM8350_PUTO_MASK 0x3000 +#define WM8350_PWRUP_DELAY_MASK 0x0300 +#define WM8350_DC6_ACTIVE 0x0020 +#define WM8350_DC4_ACTIVE 0x0008 +#define WM8350_DC3_ACTIVE 0x0004 +#define WM8350_DC1_ACTIVE 0x0001 + +/* + * R178 (0xB2) - DCDC Sleep options + */ +#define WM8350_DC6_SLEEP 0x0020 +#define WM8350_DC4_SLEEP 0x0008 +#define WM8350_DC3_SLEEP 0x0004 +#define WM8350_DC1_SLEEP 0x0001 + +/* + * R179 (0xB3) - Power-check comparator + */ +#define WM8350_PCCMP_ERRACT 0x4000 +#define WM8350_PCCMP_RAIL 0x0100 +#define WM8350_PCCMP_OFF_THR_MASK 0x0070 +#define WM8350_PCCMP_ON_THR_MASK 0x0007 + +/* + * R180 (0xB4) - DCDC1 Control + */ +#define WM8350_DC1_OPFLT 0x0400 +#define WM8350_DC1_VSEL_MASK 0x007F +#define WM8350_DC1_VSEL_SHIFT 0 + +/* + * R181 (0xB5) - DCDC1 Timeouts + */ +#define WM8350_DC1_ERRACT_MASK 0xC000 +#define WM8350_DC1_ERRACT_SHIFT 14 +#define WM8350_DC1_ENSLOT_MASK 0x3C00 +#define WM8350_DC1_ENSLOT_SHIFT 10 +#define WM8350_DC1_SDSLOT_MASK 0x03C0 +#define WM8350_DC1_UVTO_MASK 0x0030 +#define WM8350_DC1_SDSLOT_SHIFT 6 + +/* Bit values for R181 (0xB5) */ +#define WM8350_DC1_ERRACT_NONE 0 +#define WM8350_DC1_ERRACT_SHUTDOWN_CONV 1 +#define WM8350_DC1_ERRACT_SHUTDOWN_SYS 2 + +/* + * R182 (0xB6) - DCDC1 Low Power + */ +#define WM8350_DC1_HIB_MODE_MASK 0x7000 +#define WM8350_DC1_HIB_TRIG_MASK 0x0300 +#define WM8350_DC1_VIMG_MASK 0x007F + +/* + * R183 (0xB7) - DCDC2 Control + */ +#define WM8350_DC2_MODE 0x4000 +#define WM8350_DC2_MODE_MASK 0x4000 +#define WM8350_DC2_MODE_SHIFT 14 +#define WM8350_DC2_HIB_MODE 0x1000 +#define WM8350_DC2_HIB_MODE_MASK 0x1000 +#define WM8350_DC2_HIB_MODE_SHIFT 12 +#define WM8350_DC2_HIB_TRIG_MASK 0x0300 +#define WM8350_DC2_HIB_TRIG_SHIFT 8 +#define WM8350_DC2_ILIM 0x0040 +#define WM8350_DC2_ILIM_MASK 0x0040 +#define WM8350_DC2_ILIM_SHIFT 6 +#define WM8350_DC2_RMP_MASK 0x0018 +#define WM8350_DC2_RMP_SHIFT 3 +#define WM8350_DC2_FBSRC_MASK 0x0003 +#define WM8350_DC2_FBSRC_SHIFT 0 + +/* Bit values for R183 (0xB7) */ +#define WM8350_DC2_MODE_BOOST 0 +#define WM8350_DC2_MODE_SWITCH 1 + +#define WM8350_DC2_HIB_MODE_ACTIVE 1 +#define WM8350_DC2_HIB_MODE_DISABLE 0 + +#define WM8350_DC2_HIB_TRIG_NONE 0 +#define WM8350_DC2_HIB_TRIG_LPWR1 1 +#define WM8350_DC2_HIB_TRIG_LPWR2 2 +#define WM8350_DC2_HIB_TRIG_LPWR3 3 + +#define WM8350_DC2_ILIM_HIGH 0 +#define WM8350_DC2_ILIM_LOW 1 + +#define WM8350_DC2_RMP_30V 0 +#define WM8350_DC2_RMP_20V 1 +#define WM8350_DC2_RMP_10V 2 +#define WM8350_DC2_RMP_5V 3 + +#define WM8350_DC2_FBSRC_FB2 0 +#define WM8350_DC2_FBSRC_ISINKA 1 +#define WM8350_DC2_FBSRC_ISINKB 2 +#define WM8350_DC2_FBSRC_USB 3 + +/* + * R184 (0xB8) - DCDC2 Timeouts + */ +#define WM8350_DC2_ERRACT_MASK 0xC000 +#define WM8350_DC2_ERRACT_SHIFT 14 +#define WM8350_DC2_ENSLOT_MASK 0x3C00 +#define WM8350_DC2_ENSLOT_SHIFT 10 +#define WM8350_DC2_SDSLOT_MASK 0x03C0 +#define WM8350_DC2_UVTO_MASK 0x0030 + +/* Bit values for R184 (0xB8) */ +#define WM8350_DC2_ERRACT_NONE 0 +#define WM8350_DC2_ERRACT_SHUTDOWN_CONV 1 +#define WM8350_DC2_ERRACT_SHUTDOWN_SYS 2 + +/* + * R186 (0xBA) - DCDC3 Control + */ +#define WM8350_DC3_OPFLT 0x0400 +#define WM8350_DC3_VSEL_MASK 0x007F +#define WM8350_DC3_VSEL_SHIFT 0 + +/* + * R187 (0xBB) - DCDC3 Timeouts + */ +#define WM8350_DC3_ERRACT_MASK 0xC000 +#define WM8350_DC3_ERRACT_SHIFT 14 +#define WM8350_DC3_ENSLOT_MASK 0x3C00 +#define WM8350_DC3_ENSLOT_SHIFT 10 +#define WM8350_DC3_SDSLOT_MASK 0x03C0 +#define WM8350_DC3_UVTO_MASK 0x0030 +#define WM8350_DC3_SDSLOT_SHIFT 6 + +/* Bit values for R187 (0xBB) */ +#define WM8350_DC3_ERRACT_NONE 0 +#define WM8350_DC3_ERRACT_SHUTDOWN_CONV 1 +#define WM8350_DC3_ERRACT_SHUTDOWN_SYS 2 +/* + * R188 (0xBC) - DCDC3 Low Power + */ +#define WM8350_DC3_HIB_MODE_MASK 0x7000 +#define WM8350_DC3_HIB_TRIG_MASK 0x0300 +#define WM8350_DC3_VIMG_MASK 0x007F + +/* + * R189 (0xBD) - DCDC4 Control + */ +#define WM8350_DC4_OPFLT 0x0400 +#define WM8350_DC4_VSEL_MASK 0x007F +#define WM8350_DC4_VSEL_SHIFT 0 + +/* + * R190 (0xBE) - DCDC4 Timeouts + */ +#define WM8350_DC4_ERRACT_MASK 0xC000 +#define WM8350_DC4_ERRACT_SHIFT 14 +#define WM8350_DC4_ENSLOT_MASK 0x3C00 +#define WM8350_DC4_ENSLOT_SHIFT 10 +#define WM8350_DC4_SDSLOT_MASK 0x03C0 +#define WM8350_DC4_UVTO_MASK 0x0030 +#define WM8350_DC4_SDSLOT_SHIFT 6 + +/* Bit values for R190 (0xBE) */ +#define WM8350_DC4_ERRACT_NONE 0 +#define WM8350_DC4_ERRACT_SHUTDOWN_CONV 1 +#define WM8350_DC4_ERRACT_SHUTDOWN_SYS 2 + +/* + * R191 (0xBF) - DCDC4 Low Power + */ +#define WM8350_DC4_HIB_MODE_MASK 0x7000 +#define WM8350_DC4_HIB_TRIG_MASK 0x0300 +#define WM8350_DC4_VIMG_MASK 0x007F + +/* + * R192 (0xC0) - DCDC5 Control + */ +#define WM8350_DC5_MODE 0x4000 +#define WM8350_DC5_MODE_MASK 0x4000 +#define WM8350_DC5_MODE_SHIFT 14 +#define WM8350_DC5_HIB_MODE 0x1000 +#define WM8350_DC5_HIB_MODE_MASK 0x1000 +#define WM8350_DC5_HIB_MODE_SHIFT 12 +#define WM8350_DC5_HIB_TRIG_MASK 0x0300 +#define WM8350_DC5_HIB_TRIG_SHIFT 8 +#define WM8350_DC5_ILIM 0x0040 +#define WM8350_DC5_ILIM_MASK 0x0040 +#define WM8350_DC5_ILIM_SHIFT 6 +#define WM8350_DC5_RMP_MASK 0x0018 +#define WM8350_DC5_RMP_SHIFT 3 +#define WM8350_DC5_FBSRC_MASK 0x0003 +#define WM8350_DC5_FBSRC_SHIFT 0 + +/* Bit values for R192 (0xC0) */ +#define WM8350_DC5_MODE_BOOST 0 +#define WM8350_DC5_MODE_SWITCH 1 + +#define WM8350_DC5_HIB_MODE_ACTIVE 1 +#define WM8350_DC5_HIB_MODE_DISABLE 0 + +#define WM8350_DC5_HIB_TRIG_NONE 0 +#define WM8350_DC5_HIB_TRIG_LPWR1 1 +#define WM8350_DC5_HIB_TRIG_LPWR2 2 +#define WM8350_DC5_HIB_TRIG_LPWR3 3 + +#define WM8350_DC5_ILIM_HIGH 0 +#define WM8350_DC5_ILIM_LOW 1 + +#define WM8350_DC5_RMP_30V 0 +#define WM8350_DC5_RMP_20V 1 +#define WM8350_DC5_RMP_10V 2 +#define WM8350_DC5_RMP_5V 3 + +#define WM8350_DC5_FBSRC_FB2 0 +#define WM8350_DC5_FBSRC_ISINKA 1 +#define WM8350_DC5_FBSRC_ISINKB 2 +#define WM8350_DC5_FBSRC_USB 3 + +/* + * R193 (0xC1) - DCDC5 Timeouts + */ +#define WM8350_DC5_ERRACT_MASK 0xC000 +#define WM8350_DC5_ERRACT_SHIFT 14 +#define WM8350_DC5_ENSLOT_MASK 0x3C00 +#define WM8350_DC5_ENSLOT_SHIFT 10 +#define WM8350_DC5_SDSLOT_MASK 0x03C0 +#define WM8350_DC5_UVTO_MASK 0x0030 +#define WM8350_DC5_SDSLOT_SHIFT 6 + +/* Bit values for R193 (0xC1) */ +#define WM8350_DC5_ERRACT_NONE 0 +#define WM8350_DC5_ERRACT_SHUTDOWN_CONV 1 +#define WM8350_DC5_ERRACT_SHUTDOWN_SYS 2 + +/* + * R195 (0xC3) - DCDC6 Control + */ +#define WM8350_DC6_OPFLT 0x0400 +#define WM8350_DC6_VSEL_MASK 0x007F +#define WM8350_DC6_VSEL_SHIFT 0 + +/* + * R196 (0xC4) - DCDC6 Timeouts + */ +#define WM8350_DC6_ERRACT_MASK 0xC000 +#define WM8350_DC6_ERRACT_SHIFT 14 +#define WM8350_DC6_ENSLOT_MASK 0x3C00 +#define WM8350_DC6_ENSLOT_SHIFT 10 +#define WM8350_DC6_SDSLOT_MASK 0x03C0 +#define WM8350_DC6_UVTO_MASK 0x0030 +#define WM8350_DC6_SDSLOT_SHIFT 6 + +/* Bit values for R196 (0xC4) */ +#define WM8350_DC6_ERRACT_NONE 0 +#define WM8350_DC6_ERRACT_SHUTDOWN_CONV 1 +#define WM8350_DC6_ERRACT_SHUTDOWN_SYS 2 + +/* + * R197 (0xC5) - DCDC6 Low Power + */ +#define WM8350_DC6_HIB_MODE_MASK 0x7000 +#define WM8350_DC6_HIB_TRIG_MASK 0x0300 +#define WM8350_DC6_VIMG_MASK 0x007F + +/* + * R199 (0xC7) - Limit Switch Control + */ +#define WM8350_LS_ERRACT_MASK 0xC000 +#define WM8350_LS_ERRACT_SHIFT 14 +#define WM8350_LS_ENSLOT_MASK 0x3C00 +#define WM8350_LS_ENSLOT_SHIFT 10 +#define WM8350_LS_SDSLOT_MASK 0x03C0 +#define WM8350_LS_SDSLOT_SHIFT 6 +#define WM8350_LS_HIB_MODE 0x0010 +#define WM8350_LS_HIB_MODE_MASK 0x0010 +#define WM8350_LS_HIB_MODE_SHIFT 4 +#define WM8350_LS_HIB_PROT 0x0002 +#define WM8350_LS_HIB_PROT_MASK 0x0002 +#define WM8350_LS_HIB_PROT_SHIFT 1 +#define WM8350_LS_PROT 0x0001 +#define WM8350_LS_PROT_MASK 0x0001 +#define WM8350_LS_PROT_SHIFT 0 + +/* Bit values for R199 (0xC7) */ +#define WM8350_LS_ERRACT_NONE 0 +#define WM8350_LS_ERRACT_SHUTDOWN_CONV 1 +#define WM8350_LS_ERRACT_SHUTDOWN_SYS 2 + +/* + * R200 (0xC8) - LDO1 Control + */ +#define WM8350_LDO1_SWI 0x4000 +#define WM8350_LDO1_OPFLT 0x0400 +#define WM8350_LDO1_VSEL_MASK 0x001F +#define WM8350_LDO1_VSEL_SHIFT 0 + +/* + * R201 (0xC9) - LDO1 Timeouts + */ +#define WM8350_LDO1_ERRACT_MASK 0xC000 +#define WM8350_LDO1_ERRACT_SHIFT 14 +#define WM8350_LDO1_ENSLOT_MASK 0x3C00 +#define WM8350_LDO1_ENSLOT_SHIFT 10 +#define WM8350_LDO1_SDSLOT_MASK 0x03C0 +#define WM8350_LDO1_UVTO_MASK 0x0030 +#define WM8350_LDO1_SDSLOT_SHIFT 6 + +/* Bit values for R201 (0xC9) */ +#define WM8350_LDO1_ERRACT_NONE 0 +#define WM8350_LDO1_ERRACT_SHUTDOWN_CONV 1 +#define WM8350_LDO1_ERRACT_SHUTDOWN_SYS 2 + +/* + * R202 (0xCA) - LDO1 Low Power + */ +#define WM8350_LDO1_HIB_MODE_MASK 0x3000 +#define WM8350_LDO1_HIB_TRIG_MASK 0x0300 +#define WM8350_LDO1_VIMG_MASK 0x001F +#define WM8350_LDO1_HIB_MODE_DIS (0x1 << 12) + + +/* + * R203 (0xCB) - LDO2 Control + */ +#define WM8350_LDO2_SWI 0x4000 +#define WM8350_LDO2_OPFLT 0x0400 +#define WM8350_LDO2_VSEL_MASK 0x001F +#define WM8350_LDO2_VSEL_SHIFT 0 + +/* + * R204 (0xCC) - LDO2 Timeouts + */ +#define WM8350_LDO2_ERRACT_MASK 0xC000 +#define WM8350_LDO2_ERRACT_SHIFT 14 +#define WM8350_LDO2_ENSLOT_MASK 0x3C00 +#define WM8350_LDO2_ENSLOT_SHIFT 10 +#define WM8350_LDO2_SDSLOT_MASK 0x03C0 +#define WM8350_LDO2_SDSLOT_SHIFT 6 + +/* Bit values for R204 (0xCC) */ +#define WM8350_LDO2_ERRACT_NONE 0 +#define WM8350_LDO2_ERRACT_SHUTDOWN_CONV 1 +#define WM8350_LDO2_ERRACT_SHUTDOWN_SYS 2 + +/* + * R205 (0xCD) - LDO2 Low Power + */ +#define WM8350_LDO2_HIB_MODE_MASK 0x3000 +#define WM8350_LDO2_HIB_TRIG_MASK 0x0300 +#define WM8350_LDO2_VIMG_MASK 0x001F + +/* + * R206 (0xCE) - LDO3 Control + */ +#define WM8350_LDO3_SWI 0x4000 +#define WM8350_LDO3_OPFLT 0x0400 +#define WM8350_LDO3_VSEL_MASK 0x001F +#define WM8350_LDO3_VSEL_SHIFT 0 + +/* + * R207 (0xCF) - LDO3 Timeouts + */ +#define WM8350_LDO3_ERRACT_MASK 0xC000 +#define WM8350_LDO3_ERRACT_SHIFT 14 +#define WM8350_LDO3_ENSLOT_MASK 0x3C00 +#define WM8350_LDO3_ENSLOT_SHIFT 10 +#define WM8350_LDO3_SDSLOT_MASK 0x03C0 +#define WM8350_LDO3_UVTO_MASK 0x0030 +#define WM8350_LDO3_SDSLOT_SHIFT 6 + +/* Bit values for R207 (0xCF) */ +#define WM8350_LDO3_ERRACT_NONE 0 +#define WM8350_LDO3_ERRACT_SHUTDOWN_CONV 1 +#define WM8350_LDO3_ERRACT_SHUTDOWN_SYS 2 + +/* + * R208 (0xD0) - LDO3 Low Power + */ +#define WM8350_LDO3_HIB_MODE_MASK 0x3000 +#define WM8350_LDO3_HIB_TRIG_MASK 0x0300 +#define WM8350_LDO3_VIMG_MASK 0x001F + +/* + * R209 (0xD1) - LDO4 Control + */ +#define WM8350_LDO4_SWI 0x4000 +#define WM8350_LDO4_OPFLT 0x0400 +#define WM8350_LDO4_VSEL_MASK 0x001F +#define WM8350_LDO4_VSEL_SHIFT 0 + +/* + * R210 (0xD2) - LDO4 Timeouts + */ +#define WM8350_LDO4_ERRACT_MASK 0xC000 +#define WM8350_LDO4_ERRACT_SHIFT 14 +#define WM8350_LDO4_ENSLOT_MASK 0x3C00 +#define WM8350_LDO4_ENSLOT_SHIFT 10 +#define WM8350_LDO4_SDSLOT_MASK 0x03C0 +#define WM8350_LDO4_UVTO_MASK 0x0030 +#define WM8350_LDO4_SDSLOT_SHIFT 6 + +/* Bit values for R210 (0xD2) */ +#define WM8350_LDO4_ERRACT_NONE 0 +#define WM8350_LDO4_ERRACT_SHUTDOWN_CONV 1 +#define WM8350_LDO4_ERRACT_SHUTDOWN_SYS 2 + +/* + * R211 (0xD3) - LDO4 Low Power + */ +#define WM8350_LDO4_HIB_MODE_MASK 0x3000 +#define WM8350_LDO4_HIB_TRIG_MASK 0x0300 +#define WM8350_LDO4_VIMG_MASK 0x001F + +/* + * R215 (0xD7) - VCC_FAULT Masks + */ +#define WM8350_LS_FAULT 0x8000 +#define WM8350_LDO4_FAULT 0x0800 +#define WM8350_LDO3_FAULT 0x0400 +#define WM8350_LDO2_FAULT 0x0200 +#define WM8350_LDO1_FAULT 0x0100 +#define WM8350_DC6_FAULT 0x0020 +#define WM8350_DC5_FAULT 0x0010 +#define WM8350_DC4_FAULT 0x0008 +#define WM8350_DC3_FAULT 0x0004 +#define WM8350_DC2_FAULT 0x0002 +#define WM8350_DC1_FAULT 0x0001 + +/* + * R216 (0xD8) - Main Bandgap Control + */ +#define WM8350_MBG_LOAD_FUSES 0x8000 +#define WM8350_MBG_FUSE_WPREP 0x4000 +#define WM8350_MBG_FUSE_WRITE 0x2000 +#define WM8350_MBG_FUSE_TRIM_MASK 0x1F00 +#define WM8350_MBG_TRIM_SRC 0x0020 +#define WM8350_MBG_USER_TRIM_MASK 0x001F + +/* + * R217 (0xD9) - OSC Control + */ +#define WM8350_OSC_LOAD_FUSES 0x8000 +#define WM8350_OSC_FUSE_WPREP 0x4000 +#define WM8350_OSC_FUSE_WRITE 0x2000 +#define WM8350_OSC_FUSE_TRIM_MASK 0x0F00 +#define WM8350_OSC_TRIM_SRC 0x0020 +#define WM8350_OSC_USER_TRIM_MASK 0x000F + +/* + * R248 (0xF8) - DCDC1 Force PWM + */ +#define WM8350_DCDC1_FORCE_PWM_ENA 0x0010 + +/* + * R250 (0xFA) - DCDC3 Force PWM + */ +#define WM8350_DCDC3_FORCE_PWM_ENA 0x0010 + +/* + * R251 (0xFB) - DCDC4 Force PWM + */ +#define WM8350_DCDC4_FORCE_PWM_ENA 0x0010 + +/* + * R253 (0xFD) - DCDC1 Force PWM + */ +#define WM8350_DCDC6_FORCE_PWM_ENA 0x0010 + +/* + * DCDC's + */ +#define WM8350_DCDC_1 0 +#define WM8350_DCDC_2 1 +#define WM8350_DCDC_3 2 +#define WM8350_DCDC_4 3 +#define WM8350_DCDC_5 4 +#define WM8350_DCDC_6 5 + +/* DCDC modes */ +#define WM8350_DCDC_ACTIVE_STANDBY 0 +#define WM8350_DCDC_ACTIVE_PULSE 1 +#define WM8350_DCDC_SLEEP_NORMAL 0 +#define WM8350_DCDC_SLEEP_LOW 1 + +/* DCDC Low power (Hibernate) mode */ +#define WM8350_DCDC_HIB_MODE_CUR (0 << 12) +#define WM8350_DCDC_HIB_MODE_IMAGE (1 << 12) +#define WM8350_DCDC_HIB_MODE_STANDBY (2 << 12) +#define WM8350_DCDC_HIB_MODE_LDO (4 << 12) +#define WM8350_DCDC_HIB_MODE_LDO_IM (5 << 12) +#define WM8350_DCDC_HIB_MODE_DIS (7 << 12) +#define WM8350_DCDC_HIB_MODE_MASK (7 << 12) + +/* DCDC Low Power (Hibernate) signal */ +#define WM8350_DCDC_HIB_SIG_REG (0 << 8) +#define WM8350_DCDC_HIB_SIG_LPWR1 (1 << 8) +#define WM8350_DCDC_HIB_SIG_LPWR2 (2 << 8) +#define WM8350_DCDC_HIB_SIG_LPWR3 (3 << 8) + +/* LDO Low power (Hibernate) mode */ +#define WM8350_LDO_HIB_MODE_IMAGE (0 << 0) +#define WM8350_LDO_HIB_MODE_DIS (1 << 0) + +/* LDO Low Power (Hibernate) signal */ +#define WM8350_LDO_HIB_SIG_REG (0 << 8) +#define WM8350_LDO_HIB_SIG_LPWR1 (1 << 8) +#define WM8350_LDO_HIB_SIG_LPWR2 (2 << 8) +#define WM8350_LDO_HIB_SIG_LPWR3 (3 << 8) + +/* + * LDOs + */ +#define WM8350_LDO_1 6 +#define WM8350_LDO_2 7 +#define WM8350_LDO_3 8 +#define WM8350_LDO_4 9 + +/* + * ISINKs + */ +#define WM8350_ISINK_A 10 +#define WM8350_ISINK_B 11 + +#define WM8350_ISINK_MODE_BOOST 0 +#define WM8350_ISINK_MODE_SWITCH 1 +#define WM8350_ISINK_ILIM_NORMAL 0 +#define WM8350_ISINK_ILIM_LOW 1 + +#define WM8350_ISINK_FLASH_DISABLE 0 +#define WM8350_ISINK_FLASH_ENABLE 1 +#define WM8350_ISINK_FLASH_TRIG_BIT 0 +#define WM8350_ISINK_FLASH_TRIG_GPIO 1 +#define WM8350_ISINK_FLASH_MODE_EN (1 << 13) +#define WM8350_ISINK_FLASH_MODE_DIS (0 << 13) +#define WM8350_ISINK_FLASH_DUR_32MS (0 << 8) +#define WM8350_ISINK_FLASH_DUR_64MS (1 << 8) +#define WM8350_ISINK_FLASH_DUR_96MS (2 << 8) +#define WM8350_ISINK_FLASH_DUR_1024MS (3 << 8) +#define WM8350_ISINK_FLASH_ON_INSTANT (0 << 0) +#define WM8350_ISINK_FLASH_ON_0_25S (1 << 0) +#define WM8350_ISINK_FLASH_ON_0_50S (2 << 0) +#define WM8350_ISINK_FLASH_ON_1_00S (3 << 0) +#define WM8350_ISINK_FLASH_ON_1_95S (1 << 0) +#define WM8350_ISINK_FLASH_ON_3_91S (2 << 0) +#define WM8350_ISINK_FLASH_ON_7_80S (3 << 0) +#define WM8350_ISINK_FLASH_OFF_INSTANT (0 << 4) +#define WM8350_ISINK_FLASH_OFF_0_25S (1 << 4) +#define WM8350_ISINK_FLASH_OFF_0_50S (2 << 4) +#define WM8350_ISINK_FLASH_OFF_1_00S (3 << 4) +#define WM8350_ISINK_FLASH_OFF_1_95S (1 << 4) +#define WM8350_ISINK_FLASH_OFF_3_91S (2 << 4) +#define WM8350_ISINK_FLASH_OFF_7_80S (3 << 4) + +/* + * Regulator Interrupts. + */ +#define WM8350_IRQ_CS1 13 +#define WM8350_IRQ_CS2 14 +#define WM8350_IRQ_UV_LDO4 25 +#define WM8350_IRQ_UV_LDO3 26 +#define WM8350_IRQ_UV_LDO2 27 +#define WM8350_IRQ_UV_LDO1 28 +#define WM8350_IRQ_UV_DC6 29 +#define WM8350_IRQ_UV_DC5 30 +#define WM8350_IRQ_UV_DC4 31 +#define WM8350_IRQ_UV_DC3 32 +#define WM8350_IRQ_UV_DC2 33 +#define WM8350_IRQ_UV_DC1 34 +#define WM8350_IRQ_OC_LS 35 + +#define NUM_WM8350_REGULATORS 12 + +struct wm8350; +struct platform_device; +struct regulator_init_data; + +/* + * WM8350 LED platform data + */ +struct wm8350_led_platform_data { + const char *name; + const char *default_trigger; + int max_uA; +}; + +struct wm8350_led { + struct platform_device *pdev; + struct work_struct work; + spinlock_t value_lock; + enum led_brightness value; + struct led_classdev cdev; + int max_uA_index; + int enabled; + + struct regulator *isink; + struct regulator_consumer_supply isink_consumer; + struct regulator_init_data isink_init; + struct regulator *dcdc; + struct regulator_consumer_supply dcdc_consumer; + struct regulator_init_data dcdc_init; +}; + +struct wm8350_pmic { + /* Number of regulators of each type on this device */ + int max_dcdc; + int max_isink; + + /* ISINK to DCDC mapping */ + int isink_A_dcdc; + int isink_B_dcdc; + + /* hibernate configs */ + u16 dcdc1_hib_mode; + u16 dcdc3_hib_mode; + u16 dcdc4_hib_mode; + u16 dcdc6_hib_mode; + + /* regulator devices */ + struct platform_device *pdev[NUM_WM8350_REGULATORS]; + + /* LED devices */ + struct wm8350_led led[2]; +}; + +int wm8350_register_regulator(struct wm8350 *wm8350, int reg, + struct regulator_init_data *initdata); +int wm8350_register_led(struct wm8350 *wm8350, int lednum, int dcdc, int isink, + struct wm8350_led_platform_data *pdata); + +/* + * Additional DCDC control not supported via regulator API + */ +int wm8350_dcdc_set_slot(struct wm8350 *wm8350, int dcdc, u16 start, + u16 stop, u16 fault); +int wm8350_dcdc25_set_mode(struct wm8350 *wm8350, int dcdc, u16 mode, + u16 ilim, u16 ramp, u16 feedback); + +/* + * Additional LDO control not supported via regulator API + */ +int wm8350_ldo_set_slot(struct wm8350 *wm8350, int ldo, u16 start, u16 stop); + +/* + * Additional ISINK control not supported via regulator API + */ +int wm8350_isink_set_flash(struct wm8350 *wm8350, int isink, u16 mode, + u16 trigger, u16 duration, u16 on_ramp, + u16 off_ramp, u16 drive); + +#endif diff --git a/include/linux/mfd/wm8350/rtc.h b/include/linux/mfd/wm8350/rtc.h new file mode 100644 index 000000000..ebd72ffc6 --- /dev/null +++ b/include/linux/mfd/wm8350/rtc.h @@ -0,0 +1,269 @@ +/* + * rtc.h -- RTC driver for Wolfson WM8350 PMIC + * + * Copyright 2007 Wolfson Microelectronics PLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __LINUX_MFD_WM8350_RTC_H +#define __LINUX_MFD_WM8350_RTC_H + +#include + +/* + * Register values. + */ +#define WM8350_RTC_SECONDS_MINUTES 0x10 +#define WM8350_RTC_HOURS_DAY 0x11 +#define WM8350_RTC_DATE_MONTH 0x12 +#define WM8350_RTC_YEAR 0x13 +#define WM8350_ALARM_SECONDS_MINUTES 0x14 +#define WM8350_ALARM_HOURS_DAY 0x15 +#define WM8350_ALARM_DATE_MONTH 0x16 +#define WM8350_RTC_TIME_CONTROL 0x17 + +/* + * R16 (0x10) - RTC Seconds/Minutes + */ +#define WM8350_RTC_MINS_MASK 0x7F00 +#define WM8350_RTC_MINS_SHIFT 8 +#define WM8350_RTC_SECS_MASK 0x007F +#define WM8350_RTC_SECS_SHIFT 0 + +/* + * R17 (0x11) - RTC Hours/Day + */ +#define WM8350_RTC_DAY_MASK 0x0700 +#define WM8350_RTC_DAY_SHIFT 8 +#define WM8350_RTC_HPM_MASK 0x0020 +#define WM8350_RTC_HPM_SHIFT 5 +#define WM8350_RTC_HRS_MASK 0x001F +#define WM8350_RTC_HRS_SHIFT 0 + +/* Bit values for R21 (0x15) */ +#define WM8350_RTC_DAY_SUN 1 +#define WM8350_RTC_DAY_MON 2 +#define WM8350_RTC_DAY_TUE 3 +#define WM8350_RTC_DAY_WED 4 +#define WM8350_RTC_DAY_THU 5 +#define WM8350_RTC_DAY_FRI 6 +#define WM8350_RTC_DAY_SAT 7 + +#define WM8350_RTC_HPM_AM 0 +#define WM8350_RTC_HPM_PM 1 + +/* + * R18 (0x12) - RTC Date/Month + */ +#define WM8350_RTC_MTH_MASK 0x1F00 +#define WM8350_RTC_MTH_SHIFT 8 +#define WM8350_RTC_DATE_MASK 0x003F +#define WM8350_RTC_DATE_SHIFT 0 + +/* Bit values for R22 (0x16) */ +#define WM8350_RTC_MTH_JAN 1 +#define WM8350_RTC_MTH_FEB 2 +#define WM8350_RTC_MTH_MAR 3 +#define WM8350_RTC_MTH_APR 4 +#define WM8350_RTC_MTH_MAY 5 +#define WM8350_RTC_MTH_JUN 6 +#define WM8350_RTC_MTH_JUL 7 +#define WM8350_RTC_MTH_AUG 8 +#define WM8350_RTC_MTH_SEP 9 +#define WM8350_RTC_MTH_OCT 10 +#define WM8350_RTC_MTH_NOV 11 +#define WM8350_RTC_MTH_DEC 12 +#define WM8350_RTC_MTH_JAN_BCD 0x01 +#define WM8350_RTC_MTH_FEB_BCD 0x02 +#define WM8350_RTC_MTH_MAR_BCD 0x03 +#define WM8350_RTC_MTH_APR_BCD 0x04 +#define WM8350_RTC_MTH_MAY_BCD 0x05 +#define WM8350_RTC_MTH_JUN_BCD 0x06 +#define WM8350_RTC_MTH_JUL_BCD 0x07 +#define WM8350_RTC_MTH_AUG_BCD 0x08 +#define WM8350_RTC_MTH_SEP_BCD 0x09 +#define WM8350_RTC_MTH_OCT_BCD 0x10 +#define WM8350_RTC_MTH_NOV_BCD 0x11 +#define WM8350_RTC_MTH_DEC_BCD 0x12 + +/* + * R19 (0x13) - RTC Year + */ +#define WM8350_RTC_YHUNDREDS_MASK 0x3F00 +#define WM8350_RTC_YHUNDREDS_SHIFT 8 +#define WM8350_RTC_YUNITS_MASK 0x00FF +#define WM8350_RTC_YUNITS_SHIFT 0 + +/* + * R20 (0x14) - Alarm Seconds/Minutes + */ +#define WM8350_RTC_ALMMINS_MASK 0x7F00 +#define WM8350_RTC_ALMMINS_SHIFT 8 +#define WM8350_RTC_ALMSECS_MASK 0x007F +#define WM8350_RTC_ALMSECS_SHIFT 0 + +/* Bit values for R20 (0x14) */ +#define WM8350_RTC_ALMMINS_DONT_CARE -1 +#define WM8350_RTC_ALMSECS_DONT_CARE -1 + +/* + * R21 (0x15) - Alarm Hours/Day + */ +#define WM8350_RTC_ALMDAY_MASK 0x0F00 +#define WM8350_RTC_ALMDAY_SHIFT 8 +#define WM8350_RTC_ALMHPM_MASK 0x0020 +#define WM8350_RTC_ALMHPM_SHIFT 5 +#define WM8350_RTC_ALMHRS_MASK 0x001F +#define WM8350_RTC_ALMHRS_SHIFT 0 + +/* Bit values for R21 (0x15) */ +#define WM8350_RTC_ALMDAY_DONT_CARE -1 +#define WM8350_RTC_ALMDAY_SUN 1 +#define WM8350_RTC_ALMDAY_MON 2 +#define WM8350_RTC_ALMDAY_TUE 3 +#define WM8350_RTC_ALMDAY_WED 4 +#define WM8350_RTC_ALMDAY_THU 5 +#define WM8350_RTC_ALMDAY_FRI 6 +#define WM8350_RTC_ALMDAY_SAT 7 + +#define WM8350_RTC_ALMHPM_AM 0 +#define WM8350_RTC_ALMHPM_PM 1 + +#define WM8350_RTC_ALMHRS_DONT_CARE -1 + +/* + * R22 (0x16) - Alarm Date/Month + */ +#define WM8350_RTC_ALMMTH_MASK 0x1F00 +#define WM8350_RTC_ALMMTH_SHIFT 8 +#define WM8350_RTC_ALMDATE_MASK 0x003F +#define WM8350_RTC_ALMDATE_SHIFT 0 + +/* Bit values for R22 (0x16) */ +#define WM8350_RTC_ALMDATE_DONT_CARE -1 + +#define WM8350_RTC_ALMMTH_DONT_CARE -1 +#define WM8350_RTC_ALMMTH_JAN 1 +#define WM8350_RTC_ALMMTH_FEB 2 +#define WM8350_RTC_ALMMTH_MAR 3 +#define WM8350_RTC_ALMMTH_APR 4 +#define WM8350_RTC_ALMMTH_MAY 5 +#define WM8350_RTC_ALMMTH_JUN 6 +#define WM8350_RTC_ALMMTH_JUL 7 +#define WM8350_RTC_ALMMTH_AUG 8 +#define WM8350_RTC_ALMMTH_SEP 9 +#define WM8350_RTC_ALMMTH_OCT 10 +#define WM8350_RTC_ALMMTH_NOV 11 +#define WM8350_RTC_ALMMTH_DEC 12 +#define WM8350_RTC_ALMMTH_JAN_BCD 0x01 +#define WM8350_RTC_ALMMTH_FEB_BCD 0x02 +#define WM8350_RTC_ALMMTH_MAR_BCD 0x03 +#define WM8350_RTC_ALMMTH_APR_BCD 0x04 +#define WM8350_RTC_ALMMTH_MAY_BCD 0x05 +#define WM8350_RTC_ALMMTH_JUN_BCD 0x06 +#define WM8350_RTC_ALMMTH_JUL_BCD 0x07 +#define WM8350_RTC_ALMMTH_AUG_BCD 0x08 +#define WM8350_RTC_ALMMTH_SEP_BCD 0x09 +#define WM8350_RTC_ALMMTH_OCT_BCD 0x10 +#define WM8350_RTC_ALMMTH_NOV_BCD 0x11 +#define WM8350_RTC_ALMMTH_DEC_BCD 0x12 + +/* + * R23 (0x17) - RTC Time Control + */ +#define WM8350_RTC_BCD 0x8000 +#define WM8350_RTC_BCD_MASK 0x8000 +#define WM8350_RTC_BCD_SHIFT 15 +#define WM8350_RTC_12HR 0x4000 +#define WM8350_RTC_12HR_MASK 0x4000 +#define WM8350_RTC_12HR_SHIFT 14 +#define WM8350_RTC_DST 0x2000 +#define WM8350_RTC_DST_MASK 0x2000 +#define WM8350_RTC_DST_SHIFT 13 +#define WM8350_RTC_SET 0x0800 +#define WM8350_RTC_SET_MASK 0x0800 +#define WM8350_RTC_SET_SHIFT 11 +#define WM8350_RTC_STS 0x0400 +#define WM8350_RTC_STS_MASK 0x0400 +#define WM8350_RTC_STS_SHIFT 10 +#define WM8350_RTC_ALMSET 0x0200 +#define WM8350_RTC_ALMSET_MASK 0x0200 +#define WM8350_RTC_ALMSET_SHIFT 9 +#define WM8350_RTC_ALMSTS 0x0100 +#define WM8350_RTC_ALMSTS_MASK 0x0100 +#define WM8350_RTC_ALMSTS_SHIFT 8 +#define WM8350_RTC_PINT 0x0070 +#define WM8350_RTC_PINT_MASK 0x0070 +#define WM8350_RTC_PINT_SHIFT 4 +#define WM8350_RTC_DSW 0x000F +#define WM8350_RTC_DSW_MASK 0x000F +#define WM8350_RTC_DSW_SHIFT 0 + +/* Bit values for R23 (0x17) */ +#define WM8350_RTC_BCD_BINARY 0 +#define WM8350_RTC_BCD_BCD 1 + +#define WM8350_RTC_12HR_24HR 0 +#define WM8350_RTC_12HR_12HR 1 + +#define WM8350_RTC_DST_DISABLED 0 +#define WM8350_RTC_DST_ENABLED 1 + +#define WM8350_RTC_SET_RUN 0 +#define WM8350_RTC_SET_SET 1 + +#define WM8350_RTC_STS_RUNNING 0 +#define WM8350_RTC_STS_STOPPED 1 + +#define WM8350_RTC_ALMSET_RUN 0 +#define WM8350_RTC_ALMSET_SET 1 + +#define WM8350_RTC_ALMSTS_RUNNING 0 +#define WM8350_RTC_ALMSTS_STOPPED 1 + +#define WM8350_RTC_PINT_DISABLED 0 +#define WM8350_RTC_PINT_SECS 1 +#define WM8350_RTC_PINT_MINS 2 +#define WM8350_RTC_PINT_HRS 3 +#define WM8350_RTC_PINT_DAYS 4 +#define WM8350_RTC_PINT_MTHS 5 + +#define WM8350_RTC_DSW_DISABLED 0 +#define WM8350_RTC_DSW_1HZ 1 +#define WM8350_RTC_DSW_2HZ 2 +#define WM8350_RTC_DSW_4HZ 3 +#define WM8350_RTC_DSW_8HZ 4 +#define WM8350_RTC_DSW_16HZ 5 +#define WM8350_RTC_DSW_32HZ 6 +#define WM8350_RTC_DSW_64HZ 7 +#define WM8350_RTC_DSW_128HZ 8 +#define WM8350_RTC_DSW_256HZ 9 +#define WM8350_RTC_DSW_512HZ 10 +#define WM8350_RTC_DSW_1024HZ 11 + +/* + * R218 (0xDA) - RTC Tick Control + */ +#define WM8350_RTC_TICKSTS 0x4000 +#define WM8350_RTC_CLKSRC 0x2000 +#define WM8350_RTC_TRIM_MASK 0x03FF + +/* + * RTC Interrupts. + */ +#define WM8350_IRQ_RTC_PER 7 +#define WM8350_IRQ_RTC_SEC 8 +#define WM8350_IRQ_RTC_ALM 9 + +struct wm8350_rtc { + struct platform_device *pdev; + struct rtc_device *rtc; + int alarm_enabled; /* used over suspend/resume */ + int update_enabled; +}; + +#endif diff --git a/include/linux/mfd/wm8350/supply.h b/include/linux/mfd/wm8350/supply.h new file mode 100644 index 000000000..8dc93673e --- /dev/null +++ b/include/linux/mfd/wm8350/supply.h @@ -0,0 +1,134 @@ +/* + * supply.h -- Power Supply Driver for Wolfson WM8350 PMIC + * + * Copyright 2007 Wolfson Microelectronics PLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MFD_WM8350_SUPPLY_H_ +#define __LINUX_MFD_WM8350_SUPPLY_H_ + +#include +#include + +/* + * Charger registers + */ +#define WM8350_BATTERY_CHARGER_CONTROL_1 0xA8 +#define WM8350_BATTERY_CHARGER_CONTROL_2 0xA9 +#define WM8350_BATTERY_CHARGER_CONTROL_3 0xAA + +/* + * R168 (0xA8) - Battery Charger Control 1 + */ +#define WM8350_CHG_ENA_R168 0x8000 +#define WM8350_CHG_THR 0x2000 +#define WM8350_CHG_EOC_SEL_MASK 0x1C00 +#define WM8350_CHG_TRICKLE_TEMP_CHOKE 0x0200 +#define WM8350_CHG_TRICKLE_USB_CHOKE 0x0100 +#define WM8350_CHG_RECOVER_T 0x0080 +#define WM8350_CHG_END_ACT 0x0040 +#define WM8350_CHG_FAST 0x0020 +#define WM8350_CHG_FAST_USB_THROTTLE 0x0010 +#define WM8350_CHG_NTC_MON 0x0008 +#define WM8350_CHG_BATT_HOT_MON 0x0004 +#define WM8350_CHG_BATT_COLD_MON 0x0002 +#define WM8350_CHG_CHIP_TEMP_MON 0x0001 + +/* + * R169 (0xA9) - Battery Charger Control 2 + */ +#define WM8350_CHG_ACTIVE 0x8000 +#define WM8350_CHG_PAUSE 0x4000 +#define WM8350_CHG_STS_MASK 0x3000 +#define WM8350_CHG_TIME_MASK 0x0F00 +#define WM8350_CHG_MASK_WALL_FB 0x0080 +#define WM8350_CHG_TRICKLE_SEL 0x0040 +#define WM8350_CHG_VSEL_MASK 0x0030 +#define WM8350_CHG_ISEL_MASK 0x000F +#define WM8350_CHG_STS_OFF 0x0000 +#define WM8350_CHG_STS_TRICKLE 0x1000 +#define WM8350_CHG_STS_FAST 0x2000 + +/* + * R170 (0xAA) - Battery Charger Control 3 + */ +#define WM8350_CHG_THROTTLE_T_MASK 0x0060 +#define WM8350_CHG_SMART 0x0010 +#define WM8350_CHG_TIMER_ADJT_MASK 0x000F + +/* + * Charger Interrupts + */ +#define WM8350_IRQ_CHG_BAT_HOT 0 +#define WM8350_IRQ_CHG_BAT_COLD 1 +#define WM8350_IRQ_CHG_BAT_FAIL 2 +#define WM8350_IRQ_CHG_TO 3 +#define WM8350_IRQ_CHG_END 4 +#define WM8350_IRQ_CHG_START 5 +#define WM8350_IRQ_CHG_FAST_RDY 6 +#define WM8350_IRQ_CHG_VBATT_LT_3P9 10 +#define WM8350_IRQ_CHG_VBATT_LT_3P1 11 +#define WM8350_IRQ_CHG_VBATT_LT_2P85 12 + +/* + * Charger Policy + */ +#define WM8350_CHG_TRICKLE_50mA (0 << 6) +#define WM8350_CHG_TRICKLE_100mA (1 << 6) +#define WM8350_CHG_4_05V (0 << 4) +#define WM8350_CHG_4_10V (1 << 4) +#define WM8350_CHG_4_15V (2 << 4) +#define WM8350_CHG_4_20V (3 << 4) +#define WM8350_CHG_FAST_LIMIT_mA(x) ((x / 50) & 0xf) +#define WM8350_CHG_EOC_mA(x) (((x - 10) & 0x7) << 10) +#define WM8350_CHG_TRICKLE_3_1V (0 << 13) +#define WM8350_CHG_TRICKLE_3_9V (1 << 13) + +/* + * Supply Registers. + */ +#define WM8350_USB_VOLTAGE_READBACK 0x9C +#define WM8350_LINE_VOLTAGE_READBACK 0x9D +#define WM8350_BATT_VOLTAGE_READBACK 0x9E + +/* + * Supply Interrupts. + */ +#define WM8350_IRQ_USB_LIMIT 15 +#define WM8350_IRQ_EXT_USB_FB 36 +#define WM8350_IRQ_EXT_WALL_FB 37 +#define WM8350_IRQ_EXT_BAT_FB 38 + +/* + * Policy to control charger state machine. + */ +struct wm8350_charger_policy { + + /* charger state machine policy - set in machine driver */ + int eoc_mA; /* end of charge current (mA) */ + int charge_mV; /* charge voltage */ + int fast_limit_mA; /* fast charge current limit */ + int fast_limit_USB_mA; /* USB fast charge current limit */ + int charge_timeout; /* charge timeout (mins) */ + int trickle_start_mV; /* trickle charge starts at mV */ + int trickle_charge_mA; /* trickle charge current */ + int trickle_charge_USB_mA; /* USB trickle charge current */ +}; + +struct wm8350_power { + struct platform_device *pdev; + struct power_supply *battery; + struct power_supply *usb; + struct power_supply *ac; + struct wm8350_charger_policy *policy; + + int rev_g_coeff; +}; + +#endif diff --git a/include/linux/mfd/wm8350/wdt.h b/include/linux/mfd/wm8350/wdt.h new file mode 100644 index 000000000..f6135b5e5 --- /dev/null +++ b/include/linux/mfd/wm8350/wdt.h @@ -0,0 +1,28 @@ +/* + * wdt.h -- Watchdog Driver for Wolfson WM8350 PMIC + * + * Copyright 2007, 2008 Wolfson Microelectronics PLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __LINUX_MFD_WM8350_WDT_H_ +#define __LINUX_MFD_WM8350_WDT_H_ + +#include + +#define WM8350_WDOG_HIB_MODE 0x0080 +#define WM8350_WDOG_DEBUG 0x0040 +#define WM8350_WDOG_MODE_MASK 0x0030 +#define WM8350_WDOG_TO_MASK 0x0007 + +#define WM8350_IRQ_SYS_WDOG_TO 24 + +struct wm8350_wdt { + struct platform_device *pdev; +}; + +#endif diff --git a/include/linux/mfd/wm8400-audio.h b/include/linux/mfd/wm8400-audio.h new file mode 100644 index 000000000..e06ed3eb1 --- /dev/null +++ b/include/linux/mfd/wm8400-audio.h @@ -0,0 +1,1187 @@ +/* + * wm8400 private definitions for audio + * + * Copyright 2008 Wolfson Microelectronics plc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __LINUX_MFD_WM8400_AUDIO_H +#define __LINUX_MFD_WM8400_AUDIO_H + +#include + +/* + * R2 (0x02) - Power Management (1) + */ +#define WM8400_CODEC_ENA 0x8000 /* CODEC_ENA */ +#define WM8400_CODEC_ENA_MASK 0x8000 /* CODEC_ENA */ +#define WM8400_CODEC_ENA_SHIFT 15 /* CODEC_ENA */ +#define WM8400_CODEC_ENA_WIDTH 1 /* CODEC_ENA */ +#define WM8400_SYSCLK_ENA 0x4000 /* SYSCLK_ENA */ +#define WM8400_SYSCLK_ENA_MASK 0x4000 /* SYSCLK_ENA */ +#define WM8400_SYSCLK_ENA_SHIFT 14 /* SYSCLK_ENA */ +#define WM8400_SYSCLK_ENA_WIDTH 1 /* SYSCLK_ENA */ +#define WM8400_SPK_MIX_ENA 0x2000 /* SPK_MIX_ENA */ +#define WM8400_SPK_MIX_ENA_MASK 0x2000 /* SPK_MIX_ENA */ +#define WM8400_SPK_MIX_ENA_SHIFT 13 /* SPK_MIX_ENA */ +#define WM8400_SPK_MIX_ENA_WIDTH 1 /* SPK_MIX_ENA */ +#define WM8400_SPK_ENA 0x1000 /* SPK_ENA */ +#define WM8400_SPK_ENA_MASK 0x1000 /* SPK_ENA */ +#define WM8400_SPK_ENA_SHIFT 12 /* SPK_ENA */ +#define WM8400_SPK_ENA_WIDTH 1 /* SPK_ENA */ +#define WM8400_OUT3_ENA 0x0800 /* OUT3_ENA */ +#define WM8400_OUT3_ENA_MASK 0x0800 /* OUT3_ENA */ +#define WM8400_OUT3_ENA_SHIFT 11 /* OUT3_ENA */ +#define WM8400_OUT3_ENA_WIDTH 1 /* OUT3_ENA */ +#define WM8400_OUT4_ENA 0x0400 /* OUT4_ENA */ +#define WM8400_OUT4_ENA_MASK 0x0400 /* OUT4_ENA */ +#define WM8400_OUT4_ENA_SHIFT 10 /* OUT4_ENA */ +#define WM8400_OUT4_ENA_WIDTH 1 /* OUT4_ENA */ +#define WM8400_LOUT_ENA 0x0200 /* LOUT_ENA */ +#define WM8400_LOUT_ENA_MASK 0x0200 /* LOUT_ENA */ +#define WM8400_LOUT_ENA_SHIFT 9 /* LOUT_ENA */ +#define WM8400_LOUT_ENA_WIDTH 1 /* LOUT_ENA */ +#define WM8400_ROUT_ENA 0x0100 /* ROUT_ENA */ +#define WM8400_ROUT_ENA_MASK 0x0100 /* ROUT_ENA */ +#define WM8400_ROUT_ENA_SHIFT 8 /* ROUT_ENA */ +#define WM8400_ROUT_ENA_WIDTH 1 /* ROUT_ENA */ +#define WM8400_MIC1BIAS_ENA 0x0010 /* MIC1BIAS_ENA */ +#define WM8400_MIC1BIAS_ENA_MASK 0x0010 /* MIC1BIAS_ENA */ +#define WM8400_MIC1BIAS_ENA_SHIFT 4 /* MIC1BIAS_ENA */ +#define WM8400_MIC1BIAS_ENA_WIDTH 1 /* MIC1BIAS_ENA */ +#define WM8400_VMID_MODE_MASK 0x0006 /* VMID_MODE - [2:1] */ +#define WM8400_VMID_MODE_SHIFT 1 /* VMID_MODE - [2:1] */ +#define WM8400_VMID_MODE_WIDTH 2 /* VMID_MODE - [2:1] */ +#define WM8400_VREF_ENA 0x0001 /* VREF_ENA */ +#define WM8400_VREF_ENA_MASK 0x0001 /* VREF_ENA */ +#define WM8400_VREF_ENA_SHIFT 0 /* VREF_ENA */ +#define WM8400_VREF_ENA_WIDTH 1 /* VREF_ENA */ + +/* + * R3 (0x03) - Power Management (2) + */ +#define WM8400_FLL_ENA 0x8000 /* FLL_ENA */ +#define WM8400_FLL_ENA_MASK 0x8000 /* FLL_ENA */ +#define WM8400_FLL_ENA_SHIFT 15 /* FLL_ENA */ +#define WM8400_FLL_ENA_WIDTH 1 /* FLL_ENA */ +#define WM8400_TSHUT_ENA 0x4000 /* TSHUT_ENA */ +#define WM8400_TSHUT_ENA_MASK 0x4000 /* TSHUT_ENA */ +#define WM8400_TSHUT_ENA_SHIFT 14 /* TSHUT_ENA */ +#define WM8400_TSHUT_ENA_WIDTH 1 /* TSHUT_ENA */ +#define WM8400_TSHUT_OPDIS 0x2000 /* TSHUT_OPDIS */ +#define WM8400_TSHUT_OPDIS_MASK 0x2000 /* TSHUT_OPDIS */ +#define WM8400_TSHUT_OPDIS_SHIFT 13 /* TSHUT_OPDIS */ +#define WM8400_TSHUT_OPDIS_WIDTH 1 /* TSHUT_OPDIS */ +#define WM8400_OPCLK_ENA 0x0800 /* OPCLK_ENA */ +#define WM8400_OPCLK_ENA_MASK 0x0800 /* OPCLK_ENA */ +#define WM8400_OPCLK_ENA_SHIFT 11 /* OPCLK_ENA */ +#define WM8400_OPCLK_ENA_WIDTH 1 /* OPCLK_ENA */ +#define WM8400_AINL_ENA 0x0200 /* AINL_ENA */ +#define WM8400_AINL_ENA_MASK 0x0200 /* AINL_ENA */ +#define WM8400_AINL_ENA_SHIFT 9 /* AINL_ENA */ +#define WM8400_AINL_ENA_WIDTH 1 /* AINL_ENA */ +#define WM8400_AINR_ENA 0x0100 /* AINR_ENA */ +#define WM8400_AINR_ENA_MASK 0x0100 /* AINR_ENA */ +#define WM8400_AINR_ENA_SHIFT 8 /* AINR_ENA */ +#define WM8400_AINR_ENA_WIDTH 1 /* AINR_ENA */ +#define WM8400_LIN34_ENA 0x0080 /* LIN34_ENA */ +#define WM8400_LIN34_ENA_MASK 0x0080 /* LIN34_ENA */ +#define WM8400_LIN34_ENA_SHIFT 7 /* LIN34_ENA */ +#define WM8400_LIN34_ENA_WIDTH 1 /* LIN34_ENA */ +#define WM8400_LIN12_ENA 0x0040 /* LIN12_ENA */ +#define WM8400_LIN12_ENA_MASK 0x0040 /* LIN12_ENA */ +#define WM8400_LIN12_ENA_SHIFT 6 /* LIN12_ENA */ +#define WM8400_LIN12_ENA_WIDTH 1 /* LIN12_ENA */ +#define WM8400_RIN34_ENA 0x0020 /* RIN34_ENA */ +#define WM8400_RIN34_ENA_MASK 0x0020 /* RIN34_ENA */ +#define WM8400_RIN34_ENA_SHIFT 5 /* RIN34_ENA */ +#define WM8400_RIN34_ENA_WIDTH 1 /* RIN34_ENA */ +#define WM8400_RIN12_ENA 0x0010 /* RIN12_ENA */ +#define WM8400_RIN12_ENA_MASK 0x0010 /* RIN12_ENA */ +#define WM8400_RIN12_ENA_SHIFT 4 /* RIN12_ENA */ +#define WM8400_RIN12_ENA_WIDTH 1 /* RIN12_ENA */ +#define WM8400_ADCL_ENA 0x0002 /* ADCL_ENA */ +#define WM8400_ADCL_ENA_MASK 0x0002 /* ADCL_ENA */ +#define WM8400_ADCL_ENA_SHIFT 1 /* ADCL_ENA */ +#define WM8400_ADCL_ENA_WIDTH 1 /* ADCL_ENA */ +#define WM8400_ADCR_ENA 0x0001 /* ADCR_ENA */ +#define WM8400_ADCR_ENA_MASK 0x0001 /* ADCR_ENA */ +#define WM8400_ADCR_ENA_SHIFT 0 /* ADCR_ENA */ +#define WM8400_ADCR_ENA_WIDTH 1 /* ADCR_ENA */ + +/* + * R4 (0x04) - Power Management (3) + */ +#define WM8400_LON_ENA 0x2000 /* LON_ENA */ +#define WM8400_LON_ENA_MASK 0x2000 /* LON_ENA */ +#define WM8400_LON_ENA_SHIFT 13 /* LON_ENA */ +#define WM8400_LON_ENA_WIDTH 1 /* LON_ENA */ +#define WM8400_LOP_ENA 0x1000 /* LOP_ENA */ +#define WM8400_LOP_ENA_MASK 0x1000 /* LOP_ENA */ +#define WM8400_LOP_ENA_SHIFT 12 /* LOP_ENA */ +#define WM8400_LOP_ENA_WIDTH 1 /* LOP_ENA */ +#define WM8400_RON_ENA 0x0800 /* RON_ENA */ +#define WM8400_RON_ENA_MASK 0x0800 /* RON_ENA */ +#define WM8400_RON_ENA_SHIFT 11 /* RON_ENA */ +#define WM8400_RON_ENA_WIDTH 1 /* RON_ENA */ +#define WM8400_ROP_ENA 0x0400 /* ROP_ENA */ +#define WM8400_ROP_ENA_MASK 0x0400 /* ROP_ENA */ +#define WM8400_ROP_ENA_SHIFT 10 /* ROP_ENA */ +#define WM8400_ROP_ENA_WIDTH 1 /* ROP_ENA */ +#define WM8400_LOPGA_ENA 0x0080 /* LOPGA_ENA */ +#define WM8400_LOPGA_ENA_MASK 0x0080 /* LOPGA_ENA */ +#define WM8400_LOPGA_ENA_SHIFT 7 /* LOPGA_ENA */ +#define WM8400_LOPGA_ENA_WIDTH 1 /* LOPGA_ENA */ +#define WM8400_ROPGA_ENA 0x0040 /* ROPGA_ENA */ +#define WM8400_ROPGA_ENA_MASK 0x0040 /* ROPGA_ENA */ +#define WM8400_ROPGA_ENA_SHIFT 6 /* ROPGA_ENA */ +#define WM8400_ROPGA_ENA_WIDTH 1 /* ROPGA_ENA */ +#define WM8400_LOMIX_ENA 0x0020 /* LOMIX_ENA */ +#define WM8400_LOMIX_ENA_MASK 0x0020 /* LOMIX_ENA */ +#define WM8400_LOMIX_ENA_SHIFT 5 /* LOMIX_ENA */ +#define WM8400_LOMIX_ENA_WIDTH 1 /* LOMIX_ENA */ +#define WM8400_ROMIX_ENA 0x0010 /* ROMIX_ENA */ +#define WM8400_ROMIX_ENA_MASK 0x0010 /* ROMIX_ENA */ +#define WM8400_ROMIX_ENA_SHIFT 4 /* ROMIX_ENA */ +#define WM8400_ROMIX_ENA_WIDTH 1 /* ROMIX_ENA */ +#define WM8400_DACL_ENA 0x0002 /* DACL_ENA */ +#define WM8400_DACL_ENA_MASK 0x0002 /* DACL_ENA */ +#define WM8400_DACL_ENA_SHIFT 1 /* DACL_ENA */ +#define WM8400_DACL_ENA_WIDTH 1 /* DACL_ENA */ +#define WM8400_DACR_ENA 0x0001 /* DACR_ENA */ +#define WM8400_DACR_ENA_MASK 0x0001 /* DACR_ENA */ +#define WM8400_DACR_ENA_SHIFT 0 /* DACR_ENA */ +#define WM8400_DACR_ENA_WIDTH 1 /* DACR_ENA */ + +/* + * R5 (0x05) - Audio Interface (1) + */ +#define WM8400_AIFADCL_SRC 0x8000 /* AIFADCL_SRC */ +#define WM8400_AIFADCL_SRC_MASK 0x8000 /* AIFADCL_SRC */ +#define WM8400_AIFADCL_SRC_SHIFT 15 /* AIFADCL_SRC */ +#define WM8400_AIFADCL_SRC_WIDTH 1 /* AIFADCL_SRC */ +#define WM8400_AIFADCR_SRC 0x4000 /* AIFADCR_SRC */ +#define WM8400_AIFADCR_SRC_MASK 0x4000 /* AIFADCR_SRC */ +#define WM8400_AIFADCR_SRC_SHIFT 14 /* AIFADCR_SRC */ +#define WM8400_AIFADCR_SRC_WIDTH 1 /* AIFADCR_SRC */ +#define WM8400_AIFADC_TDM 0x2000 /* AIFADC_TDM */ +#define WM8400_AIFADC_TDM_MASK 0x2000 /* AIFADC_TDM */ +#define WM8400_AIFADC_TDM_SHIFT 13 /* AIFADC_TDM */ +#define WM8400_AIFADC_TDM_WIDTH 1 /* AIFADC_TDM */ +#define WM8400_AIFADC_TDM_CHAN 0x1000 /* AIFADC_TDM_CHAN */ +#define WM8400_AIFADC_TDM_CHAN_MASK 0x1000 /* AIFADC_TDM_CHAN */ +#define WM8400_AIFADC_TDM_CHAN_SHIFT 12 /* AIFADC_TDM_CHAN */ +#define WM8400_AIFADC_TDM_CHAN_WIDTH 1 /* AIFADC_TDM_CHAN */ +#define WM8400_AIF_BCLK_INV 0x0100 /* AIF_BCLK_INV */ +#define WM8400_AIF_BCLK_INV_MASK 0x0100 /* AIF_BCLK_INV */ +#define WM8400_AIF_BCLK_INV_SHIFT 8 /* AIF_BCLK_INV */ +#define WM8400_AIF_BCLK_INV_WIDTH 1 /* AIF_BCLK_INV */ +#define WM8400_AIF_LRCLK_INV 0x0080 /* AIF_LRCLK_INV */ +#define WM8400_AIF_LRCLK_INV_MASK 0x0080 /* AIF_LRCLK_INV */ +#define WM8400_AIF_LRCLK_INV_SHIFT 7 /* AIF_LRCLK_INV */ +#define WM8400_AIF_LRCLK_INV_WIDTH 1 /* AIF_LRCLK_INV */ +#define WM8400_AIF_WL_MASK 0x0060 /* AIF_WL - [6:5] */ +#define WM8400_AIF_WL_SHIFT 5 /* AIF_WL - [6:5] */ +#define WM8400_AIF_WL_WIDTH 2 /* AIF_WL - [6:5] */ +#define WM8400_AIF_WL_16BITS (0 << 5) +#define WM8400_AIF_WL_20BITS (1 << 5) +#define WM8400_AIF_WL_24BITS (2 << 5) +#define WM8400_AIF_WL_32BITS (3 << 5) +#define WM8400_AIF_FMT_MASK 0x0018 /* AIF_FMT - [4:3] */ +#define WM8400_AIF_FMT_SHIFT 3 /* AIF_FMT - [4:3] */ +#define WM8400_AIF_FMT_WIDTH 2 /* AIF_FMT - [4:3] */ +#define WM8400_AIF_FMT_RIGHTJ (0 << 3) +#define WM8400_AIF_FMT_LEFTJ (1 << 3) +#define WM8400_AIF_FMT_I2S (2 << 3) +#define WM8400_AIF_FMT_DSP (3 << 3) + +/* + * R6 (0x06) - Audio Interface (2) + */ +#define WM8400_DACL_SRC 0x8000 /* DACL_SRC */ +#define WM8400_DACL_SRC_MASK 0x8000 /* DACL_SRC */ +#define WM8400_DACL_SRC_SHIFT 15 /* DACL_SRC */ +#define WM8400_DACL_SRC_WIDTH 1 /* DACL_SRC */ +#define WM8400_DACR_SRC 0x4000 /* DACR_SRC */ +#define WM8400_DACR_SRC_MASK 0x4000 /* DACR_SRC */ +#define WM8400_DACR_SRC_SHIFT 14 /* DACR_SRC */ +#define WM8400_DACR_SRC_WIDTH 1 /* DACR_SRC */ +#define WM8400_AIFDAC_TDM 0x2000 /* AIFDAC_TDM */ +#define WM8400_AIFDAC_TDM_MASK 0x2000 /* AIFDAC_TDM */ +#define WM8400_AIFDAC_TDM_SHIFT 13 /* AIFDAC_TDM */ +#define WM8400_AIFDAC_TDM_WIDTH 1 /* AIFDAC_TDM */ +#define WM8400_AIFDAC_TDM_CHAN 0x1000 /* AIFDAC_TDM_CHAN */ +#define WM8400_AIFDAC_TDM_CHAN_MASK 0x1000 /* AIFDAC_TDM_CHAN */ +#define WM8400_AIFDAC_TDM_CHAN_SHIFT 12 /* AIFDAC_TDM_CHAN */ +#define WM8400_AIFDAC_TDM_CHAN_WIDTH 1 /* AIFDAC_TDM_CHAN */ +#define WM8400_DAC_BOOST_MASK 0x0C00 /* DAC_BOOST - [11:10] */ +#define WM8400_DAC_BOOST_SHIFT 10 /* DAC_BOOST - [11:10] */ +#define WM8400_DAC_BOOST_WIDTH 2 /* DAC_BOOST - [11:10] */ +#define WM8400_DAC_COMP 0x0010 /* DAC_COMP */ +#define WM8400_DAC_COMP_MASK 0x0010 /* DAC_COMP */ +#define WM8400_DAC_COMP_SHIFT 4 /* DAC_COMP */ +#define WM8400_DAC_COMP_WIDTH 1 /* DAC_COMP */ +#define WM8400_DAC_COMPMODE 0x0008 /* DAC_COMPMODE */ +#define WM8400_DAC_COMPMODE_MASK 0x0008 /* DAC_COMPMODE */ +#define WM8400_DAC_COMPMODE_SHIFT 3 /* DAC_COMPMODE */ +#define WM8400_DAC_COMPMODE_WIDTH 1 /* DAC_COMPMODE */ +#define WM8400_ADC_COMP 0x0004 /* ADC_COMP */ +#define WM8400_ADC_COMP_MASK 0x0004 /* ADC_COMP */ +#define WM8400_ADC_COMP_SHIFT 2 /* ADC_COMP */ +#define WM8400_ADC_COMP_WIDTH 1 /* ADC_COMP */ +#define WM8400_ADC_COMPMODE 0x0002 /* ADC_COMPMODE */ +#define WM8400_ADC_COMPMODE_MASK 0x0002 /* ADC_COMPMODE */ +#define WM8400_ADC_COMPMODE_SHIFT 1 /* ADC_COMPMODE */ +#define WM8400_ADC_COMPMODE_WIDTH 1 /* ADC_COMPMODE */ +#define WM8400_LOOPBACK 0x0001 /* LOOPBACK */ +#define WM8400_LOOPBACK_MASK 0x0001 /* LOOPBACK */ +#define WM8400_LOOPBACK_SHIFT 0 /* LOOPBACK */ +#define WM8400_LOOPBACK_WIDTH 1 /* LOOPBACK */ + +/* + * R7 (0x07) - Clocking (1) + */ +#define WM8400_TOCLK_RATE 0x8000 /* TOCLK_RATE */ +#define WM8400_TOCLK_RATE_MASK 0x8000 /* TOCLK_RATE */ +#define WM8400_TOCLK_RATE_SHIFT 15 /* TOCLK_RATE */ +#define WM8400_TOCLK_RATE_WIDTH 1 /* TOCLK_RATE */ +#define WM8400_TOCLK_ENA 0x4000 /* TOCLK_ENA */ +#define WM8400_TOCLK_ENA_MASK 0x4000 /* TOCLK_ENA */ +#define WM8400_TOCLK_ENA_SHIFT 14 /* TOCLK_ENA */ +#define WM8400_TOCLK_ENA_WIDTH 1 /* TOCLK_ENA */ +#define WM8400_OPCLKDIV_MASK 0x1E00 /* OPCLKDIV - [12:9] */ +#define WM8400_OPCLKDIV_SHIFT 9 /* OPCLKDIV - [12:9] */ +#define WM8400_OPCLKDIV_WIDTH 4 /* OPCLKDIV - [12:9] */ +#define WM8400_DCLKDIV_MASK 0x01C0 /* DCLKDIV - [8:6] */ +#define WM8400_DCLKDIV_SHIFT 6 /* DCLKDIV - [8:6] */ +#define WM8400_DCLKDIV_WIDTH 3 /* DCLKDIV - [8:6] */ +#define WM8400_BCLK_DIV_MASK 0x001E /* BCLK_DIV - [4:1] */ +#define WM8400_BCLK_DIV_SHIFT 1 /* BCLK_DIV - [4:1] */ +#define WM8400_BCLK_DIV_WIDTH 4 /* BCLK_DIV - [4:1] */ + +/* + * R8 (0x08) - Clocking (2) + */ +#define WM8400_MCLK_SRC 0x8000 /* MCLK_SRC */ +#define WM8400_MCLK_SRC_MASK 0x8000 /* MCLK_SRC */ +#define WM8400_MCLK_SRC_SHIFT 15 /* MCLK_SRC */ +#define WM8400_MCLK_SRC_WIDTH 1 /* MCLK_SRC */ +#define WM8400_SYSCLK_SRC 0x4000 /* SYSCLK_SRC */ +#define WM8400_SYSCLK_SRC_MASK 0x4000 /* SYSCLK_SRC */ +#define WM8400_SYSCLK_SRC_SHIFT 14 /* SYSCLK_SRC */ +#define WM8400_SYSCLK_SRC_WIDTH 1 /* SYSCLK_SRC */ +#define WM8400_CLK_FORCE 0x2000 /* CLK_FORCE */ +#define WM8400_CLK_FORCE_MASK 0x2000 /* CLK_FORCE */ +#define WM8400_CLK_FORCE_SHIFT 13 /* CLK_FORCE */ +#define WM8400_CLK_FORCE_WIDTH 1 /* CLK_FORCE */ +#define WM8400_MCLK_DIV_MASK 0x1800 /* MCLK_DIV - [12:11] */ +#define WM8400_MCLK_DIV_SHIFT 11 /* MCLK_DIV - [12:11] */ +#define WM8400_MCLK_DIV_WIDTH 2 /* MCLK_DIV - [12:11] */ +#define WM8400_MCLK_INV 0x0400 /* MCLK_INV */ +#define WM8400_MCLK_INV_MASK 0x0400 /* MCLK_INV */ +#define WM8400_MCLK_INV_SHIFT 10 /* MCLK_INV */ +#define WM8400_MCLK_INV_WIDTH 1 /* MCLK_INV */ +#define WM8400_ADC_CLKDIV_MASK 0x00E0 /* ADC_CLKDIV - [7:5] */ +#define WM8400_ADC_CLKDIV_SHIFT 5 /* ADC_CLKDIV - [7:5] */ +#define WM8400_ADC_CLKDIV_WIDTH 3 /* ADC_CLKDIV - [7:5] */ +#define WM8400_DAC_CLKDIV_MASK 0x001C /* DAC_CLKDIV - [4:2] */ +#define WM8400_DAC_CLKDIV_SHIFT 2 /* DAC_CLKDIV - [4:2] */ +#define WM8400_DAC_CLKDIV_WIDTH 3 /* DAC_CLKDIV - [4:2] */ + +/* + * R9 (0x09) - Audio Interface (3) + */ +#define WM8400_AIF_MSTR1 0x8000 /* AIF_MSTR1 */ +#define WM8400_AIF_MSTR1_MASK 0x8000 /* AIF_MSTR1 */ +#define WM8400_AIF_MSTR1_SHIFT 15 /* AIF_MSTR1 */ +#define WM8400_AIF_MSTR1_WIDTH 1 /* AIF_MSTR1 */ +#define WM8400_AIF_MSTR2 0x4000 /* AIF_MSTR2 */ +#define WM8400_AIF_MSTR2_MASK 0x4000 /* AIF_MSTR2 */ +#define WM8400_AIF_MSTR2_SHIFT 14 /* AIF_MSTR2 */ +#define WM8400_AIF_MSTR2_WIDTH 1 /* AIF_MSTR2 */ +#define WM8400_AIF_SEL 0x2000 /* AIF_SEL */ +#define WM8400_AIF_SEL_MASK 0x2000 /* AIF_SEL */ +#define WM8400_AIF_SEL_SHIFT 13 /* AIF_SEL */ +#define WM8400_AIF_SEL_WIDTH 1 /* AIF_SEL */ +#define WM8400_ADCLRC_DIR 0x0800 /* ADCLRC_DIR */ +#define WM8400_ADCLRC_DIR_MASK 0x0800 /* ADCLRC_DIR */ +#define WM8400_ADCLRC_DIR_SHIFT 11 /* ADCLRC_DIR */ +#define WM8400_ADCLRC_DIR_WIDTH 1 /* ADCLRC_DIR */ +#define WM8400_ADCLRC_RATE_MASK 0x07FF /* ADCLRC_RATE - [10:0] */ +#define WM8400_ADCLRC_RATE_SHIFT 0 /* ADCLRC_RATE - [10:0] */ +#define WM8400_ADCLRC_RATE_WIDTH 11 /* ADCLRC_RATE - [10:0] */ + +/* + * R10 (0x0A) - Audio Interface (4) + */ +#define WM8400_ALRCGPIO1 0x8000 /* ALRCGPIO1 */ +#define WM8400_ALRCGPIO1_MASK 0x8000 /* ALRCGPIO1 */ +#define WM8400_ALRCGPIO1_SHIFT 15 /* ALRCGPIO1 */ +#define WM8400_ALRCGPIO1_WIDTH 1 /* ALRCGPIO1 */ +#define WM8400_ALRCBGPIO6 0x4000 /* ALRCBGPIO6 */ +#define WM8400_ALRCBGPIO6_MASK 0x4000 /* ALRCBGPIO6 */ +#define WM8400_ALRCBGPIO6_SHIFT 14 /* ALRCBGPIO6 */ +#define WM8400_ALRCBGPIO6_WIDTH 1 /* ALRCBGPIO6 */ +#define WM8400_AIF_TRIS 0x2000 /* AIF_TRIS */ +#define WM8400_AIF_TRIS_MASK 0x2000 /* AIF_TRIS */ +#define WM8400_AIF_TRIS_SHIFT 13 /* AIF_TRIS */ +#define WM8400_AIF_TRIS_WIDTH 1 /* AIF_TRIS */ +#define WM8400_DACLRC_DIR 0x0800 /* DACLRC_DIR */ +#define WM8400_DACLRC_DIR_MASK 0x0800 /* DACLRC_DIR */ +#define WM8400_DACLRC_DIR_SHIFT 11 /* DACLRC_DIR */ +#define WM8400_DACLRC_DIR_WIDTH 1 /* DACLRC_DIR */ +#define WM8400_DACLRC_RATE_MASK 0x07FF /* DACLRC_RATE - [10:0] */ +#define WM8400_DACLRC_RATE_SHIFT 0 /* DACLRC_RATE - [10:0] */ +#define WM8400_DACLRC_RATE_WIDTH 11 /* DACLRC_RATE - [10:0] */ + +/* + * R11 (0x0B) - DAC CTRL + */ +#define WM8400_DAC_SDMCLK_RATE 0x2000 /* DAC_SDMCLK_RATE */ +#define WM8400_DAC_SDMCLK_RATE_MASK 0x2000 /* DAC_SDMCLK_RATE */ +#define WM8400_DAC_SDMCLK_RATE_SHIFT 13 /* DAC_SDMCLK_RATE */ +#define WM8400_DAC_SDMCLK_RATE_WIDTH 1 /* DAC_SDMCLK_RATE */ +#define WM8400_AIF_LRCLKRATE 0x0400 /* AIF_LRCLKRATE */ +#define WM8400_AIF_LRCLKRATE_MASK 0x0400 /* AIF_LRCLKRATE */ +#define WM8400_AIF_LRCLKRATE_SHIFT 10 /* AIF_LRCLKRATE */ +#define WM8400_AIF_LRCLKRATE_WIDTH 1 /* AIF_LRCLKRATE */ +#define WM8400_DAC_MONO 0x0200 /* DAC_MONO */ +#define WM8400_DAC_MONO_MASK 0x0200 /* DAC_MONO */ +#define WM8400_DAC_MONO_SHIFT 9 /* DAC_MONO */ +#define WM8400_DAC_MONO_WIDTH 1 /* DAC_MONO */ +#define WM8400_DAC_SB_FILT 0x0100 /* DAC_SB_FILT */ +#define WM8400_DAC_SB_FILT_MASK 0x0100 /* DAC_SB_FILT */ +#define WM8400_DAC_SB_FILT_SHIFT 8 /* DAC_SB_FILT */ +#define WM8400_DAC_SB_FILT_WIDTH 1 /* DAC_SB_FILT */ +#define WM8400_DAC_MUTERATE 0x0080 /* DAC_MUTERATE */ +#define WM8400_DAC_MUTERATE_MASK 0x0080 /* DAC_MUTERATE */ +#define WM8400_DAC_MUTERATE_SHIFT 7 /* DAC_MUTERATE */ +#define WM8400_DAC_MUTERATE_WIDTH 1 /* DAC_MUTERATE */ +#define WM8400_DAC_MUTEMODE 0x0040 /* DAC_MUTEMODE */ +#define WM8400_DAC_MUTEMODE_MASK 0x0040 /* DAC_MUTEMODE */ +#define WM8400_DAC_MUTEMODE_SHIFT 6 /* DAC_MUTEMODE */ +#define WM8400_DAC_MUTEMODE_WIDTH 1 /* DAC_MUTEMODE */ +#define WM8400_DEEMP_MASK 0x0030 /* DEEMP - [5:4] */ +#define WM8400_DEEMP_SHIFT 4 /* DEEMP - [5:4] */ +#define WM8400_DEEMP_WIDTH 2 /* DEEMP - [5:4] */ +#define WM8400_DAC_MUTE 0x0004 /* DAC_MUTE */ +#define WM8400_DAC_MUTE_MASK 0x0004 /* DAC_MUTE */ +#define WM8400_DAC_MUTE_SHIFT 2 /* DAC_MUTE */ +#define WM8400_DAC_MUTE_WIDTH 1 /* DAC_MUTE */ +#define WM8400_DACL_DATINV 0x0002 /* DACL_DATINV */ +#define WM8400_DACL_DATINV_MASK 0x0002 /* DACL_DATINV */ +#define WM8400_DACL_DATINV_SHIFT 1 /* DACL_DATINV */ +#define WM8400_DACL_DATINV_WIDTH 1 /* DACL_DATINV */ +#define WM8400_DACR_DATINV 0x0001 /* DACR_DATINV */ +#define WM8400_DACR_DATINV_MASK 0x0001 /* DACR_DATINV */ +#define WM8400_DACR_DATINV_SHIFT 0 /* DACR_DATINV */ +#define WM8400_DACR_DATINV_WIDTH 1 /* DACR_DATINV */ + +/* + * R12 (0x0C) - Left DAC Digital Volume + */ +#define WM8400_DAC_VU 0x0100 /* DAC_VU */ +#define WM8400_DAC_VU_MASK 0x0100 /* DAC_VU */ +#define WM8400_DAC_VU_SHIFT 8 /* DAC_VU */ +#define WM8400_DAC_VU_WIDTH 1 /* DAC_VU */ +#define WM8400_DACL_VOL_MASK 0x00FF /* DACL_VOL - [7:0] */ +#define WM8400_DACL_VOL_SHIFT 0 /* DACL_VOL - [7:0] */ +#define WM8400_DACL_VOL_WIDTH 8 /* DACL_VOL - [7:0] */ + +/* + * R13 (0x0D) - Right DAC Digital Volume + */ +#define WM8400_DAC_VU 0x0100 /* DAC_VU */ +#define WM8400_DAC_VU_MASK 0x0100 /* DAC_VU */ +#define WM8400_DAC_VU_SHIFT 8 /* DAC_VU */ +#define WM8400_DAC_VU_WIDTH 1 /* DAC_VU */ +#define WM8400_DACR_VOL_MASK 0x00FF /* DACR_VOL - [7:0] */ +#define WM8400_DACR_VOL_SHIFT 0 /* DACR_VOL - [7:0] */ +#define WM8400_DACR_VOL_WIDTH 8 /* DACR_VOL - [7:0] */ + +/* + * R14 (0x0E) - Digital Side Tone + */ +#define WM8400_ADCL_DAC_SVOL_MASK 0x1E00 /* ADCL_DAC_SVOL - [12:9] */ +#define WM8400_ADCL_DAC_SVOL_SHIFT 9 /* ADCL_DAC_SVOL - [12:9] */ +#define WM8400_ADCL_DAC_SVOL_WIDTH 4 /* ADCL_DAC_SVOL - [12:9] */ +#define WM8400_ADCR_DAC_SVOL_MASK 0x01E0 /* ADCR_DAC_SVOL - [8:5] */ +#define WM8400_ADCR_DAC_SVOL_SHIFT 5 /* ADCR_DAC_SVOL - [8:5] */ +#define WM8400_ADCR_DAC_SVOL_WIDTH 4 /* ADCR_DAC_SVOL - [8:5] */ +#define WM8400_ADC_TO_DACL_MASK 0x000C /* ADC_TO_DACL - [3:2] */ +#define WM8400_ADC_TO_DACL_SHIFT 2 /* ADC_TO_DACL - [3:2] */ +#define WM8400_ADC_TO_DACL_WIDTH 2 /* ADC_TO_DACL - [3:2] */ +#define WM8400_ADC_TO_DACR_MASK 0x0003 /* ADC_TO_DACR - [1:0] */ +#define WM8400_ADC_TO_DACR_SHIFT 0 /* ADC_TO_DACR - [1:0] */ +#define WM8400_ADC_TO_DACR_WIDTH 2 /* ADC_TO_DACR - [1:0] */ + +/* + * R15 (0x0F) - ADC CTRL + */ +#define WM8400_ADC_HPF_ENA 0x0100 /* ADC_HPF_ENA */ +#define WM8400_ADC_HPF_ENA_MASK 0x0100 /* ADC_HPF_ENA */ +#define WM8400_ADC_HPF_ENA_SHIFT 8 /* ADC_HPF_ENA */ +#define WM8400_ADC_HPF_ENA_WIDTH 1 /* ADC_HPF_ENA */ +#define WM8400_ADC_HPF_CUT_MASK 0x0060 /* ADC_HPF_CUT - [6:5] */ +#define WM8400_ADC_HPF_CUT_SHIFT 5 /* ADC_HPF_CUT - [6:5] */ +#define WM8400_ADC_HPF_CUT_WIDTH 2 /* ADC_HPF_CUT - [6:5] */ +#define WM8400_ADCL_DATINV 0x0002 /* ADCL_DATINV */ +#define WM8400_ADCL_DATINV_MASK 0x0002 /* ADCL_DATINV */ +#define WM8400_ADCL_DATINV_SHIFT 1 /* ADCL_DATINV */ +#define WM8400_ADCL_DATINV_WIDTH 1 /* ADCL_DATINV */ +#define WM8400_ADCR_DATINV 0x0001 /* ADCR_DATINV */ +#define WM8400_ADCR_DATINV_MASK 0x0001 /* ADCR_DATINV */ +#define WM8400_ADCR_DATINV_SHIFT 0 /* ADCR_DATINV */ +#define WM8400_ADCR_DATINV_WIDTH 1 /* ADCR_DATINV */ + +/* + * R16 (0x10) - Left ADC Digital Volume + */ +#define WM8400_ADC_VU 0x0100 /* ADC_VU */ +#define WM8400_ADC_VU_MASK 0x0100 /* ADC_VU */ +#define WM8400_ADC_VU_SHIFT 8 /* ADC_VU */ +#define WM8400_ADC_VU_WIDTH 1 /* ADC_VU */ +#define WM8400_ADCL_VOL_MASK 0x00FF /* ADCL_VOL - [7:0] */ +#define WM8400_ADCL_VOL_SHIFT 0 /* ADCL_VOL - [7:0] */ +#define WM8400_ADCL_VOL_WIDTH 8 /* ADCL_VOL - [7:0] */ + +/* + * R17 (0x11) - Right ADC Digital Volume + */ +#define WM8400_ADC_VU 0x0100 /* ADC_VU */ +#define WM8400_ADC_VU_MASK 0x0100 /* ADC_VU */ +#define WM8400_ADC_VU_SHIFT 8 /* ADC_VU */ +#define WM8400_ADC_VU_WIDTH 1 /* ADC_VU */ +#define WM8400_ADCR_VOL_MASK 0x00FF /* ADCR_VOL - [7:0] */ +#define WM8400_ADCR_VOL_SHIFT 0 /* ADCR_VOL - [7:0] */ +#define WM8400_ADCR_VOL_WIDTH 8 /* ADCR_VOL - [7:0] */ + +/* + * R24 (0x18) - Left Line Input 1&2 Volume + */ +#define WM8400_IPVU 0x0100 /* IPVU */ +#define WM8400_IPVU_MASK 0x0100 /* IPVU */ +#define WM8400_IPVU_SHIFT 8 /* IPVU */ +#define WM8400_IPVU_WIDTH 1 /* IPVU */ +#define WM8400_LI12MUTE 0x0080 /* LI12MUTE */ +#define WM8400_LI12MUTE_MASK 0x0080 /* LI12MUTE */ +#define WM8400_LI12MUTE_SHIFT 7 /* LI12MUTE */ +#define WM8400_LI12MUTE_WIDTH 1 /* LI12MUTE */ +#define WM8400_LI12ZC 0x0040 /* LI12ZC */ +#define WM8400_LI12ZC_MASK 0x0040 /* LI12ZC */ +#define WM8400_LI12ZC_SHIFT 6 /* LI12ZC */ +#define WM8400_LI12ZC_WIDTH 1 /* LI12ZC */ +#define WM8400_LIN12VOL_MASK 0x001F /* LIN12VOL - [4:0] */ +#define WM8400_LIN12VOL_SHIFT 0 /* LIN12VOL - [4:0] */ +#define WM8400_LIN12VOL_WIDTH 5 /* LIN12VOL - [4:0] */ + +/* + * R25 (0x19) - Left Line Input 3&4 Volume + */ +#define WM8400_IPVU 0x0100 /* IPVU */ +#define WM8400_IPVU_MASK 0x0100 /* IPVU */ +#define WM8400_IPVU_SHIFT 8 /* IPVU */ +#define WM8400_IPVU_WIDTH 1 /* IPVU */ +#define WM8400_LI34MUTE 0x0080 /* LI34MUTE */ +#define WM8400_LI34MUTE_MASK 0x0080 /* LI34MUTE */ +#define WM8400_LI34MUTE_SHIFT 7 /* LI34MUTE */ +#define WM8400_LI34MUTE_WIDTH 1 /* LI34MUTE */ +#define WM8400_LI34ZC 0x0040 /* LI34ZC */ +#define WM8400_LI34ZC_MASK 0x0040 /* LI34ZC */ +#define WM8400_LI34ZC_SHIFT 6 /* LI34ZC */ +#define WM8400_LI34ZC_WIDTH 1 /* LI34ZC */ +#define WM8400_LIN34VOL_MASK 0x001F /* LIN34VOL - [4:0] */ +#define WM8400_LIN34VOL_SHIFT 0 /* LIN34VOL - [4:0] */ +#define WM8400_LIN34VOL_WIDTH 5 /* LIN34VOL - [4:0] */ + +/* + * R26 (0x1A) - Right Line Input 1&2 Volume + */ +#define WM8400_IPVU 0x0100 /* IPVU */ +#define WM8400_IPVU_MASK 0x0100 /* IPVU */ +#define WM8400_IPVU_SHIFT 8 /* IPVU */ +#define WM8400_IPVU_WIDTH 1 /* IPVU */ +#define WM8400_RI12MUTE 0x0080 /* RI12MUTE */ +#define WM8400_RI12MUTE_MASK 0x0080 /* RI12MUTE */ +#define WM8400_RI12MUTE_SHIFT 7 /* RI12MUTE */ +#define WM8400_RI12MUTE_WIDTH 1 /* RI12MUTE */ +#define WM8400_RI12ZC 0x0040 /* RI12ZC */ +#define WM8400_RI12ZC_MASK 0x0040 /* RI12ZC */ +#define WM8400_RI12ZC_SHIFT 6 /* RI12ZC */ +#define WM8400_RI12ZC_WIDTH 1 /* RI12ZC */ +#define WM8400_RIN12VOL_MASK 0x001F /* RIN12VOL - [4:0] */ +#define WM8400_RIN12VOL_SHIFT 0 /* RIN12VOL - [4:0] */ +#define WM8400_RIN12VOL_WIDTH 5 /* RIN12VOL - [4:0] */ + +/* + * R27 (0x1B) - Right Line Input 3&4 Volume + */ +#define WM8400_IPVU 0x0100 /* IPVU */ +#define WM8400_IPVU_MASK 0x0100 /* IPVU */ +#define WM8400_IPVU_SHIFT 8 /* IPVU */ +#define WM8400_IPVU_WIDTH 1 /* IPVU */ +#define WM8400_RI34MUTE 0x0080 /* RI34MUTE */ +#define WM8400_RI34MUTE_MASK 0x0080 /* RI34MUTE */ +#define WM8400_RI34MUTE_SHIFT 7 /* RI34MUTE */ +#define WM8400_RI34MUTE_WIDTH 1 /* RI34MUTE */ +#define WM8400_RI34ZC 0x0040 /* RI34ZC */ +#define WM8400_RI34ZC_MASK 0x0040 /* RI34ZC */ +#define WM8400_RI34ZC_SHIFT 6 /* RI34ZC */ +#define WM8400_RI34ZC_WIDTH 1 /* RI34ZC */ +#define WM8400_RIN34VOL_MASK 0x001F /* RIN34VOL - [4:0] */ +#define WM8400_RIN34VOL_SHIFT 0 /* RIN34VOL - [4:0] */ +#define WM8400_RIN34VOL_WIDTH 5 /* RIN34VOL - [4:0] */ + +/* + * R28 (0x1C) - Left Output Volume + */ +#define WM8400_OPVU 0x0100 /* OPVU */ +#define WM8400_OPVU_MASK 0x0100 /* OPVU */ +#define WM8400_OPVU_SHIFT 8 /* OPVU */ +#define WM8400_OPVU_WIDTH 1 /* OPVU */ +#define WM8400_LOZC 0x0080 /* LOZC */ +#define WM8400_LOZC_MASK 0x0080 /* LOZC */ +#define WM8400_LOZC_SHIFT 7 /* LOZC */ +#define WM8400_LOZC_WIDTH 1 /* LOZC */ +#define WM8400_LOUTVOL_MASK 0x007F /* LOUTVOL - [6:0] */ +#define WM8400_LOUTVOL_SHIFT 0 /* LOUTVOL - [6:0] */ +#define WM8400_LOUTVOL_WIDTH 7 /* LOUTVOL - [6:0] */ + +/* + * R29 (0x1D) - Right Output Volume + */ +#define WM8400_OPVU 0x0100 /* OPVU */ +#define WM8400_OPVU_MASK 0x0100 /* OPVU */ +#define WM8400_OPVU_SHIFT 8 /* OPVU */ +#define WM8400_OPVU_WIDTH 1 /* OPVU */ +#define WM8400_ROZC 0x0080 /* ROZC */ +#define WM8400_ROZC_MASK 0x0080 /* ROZC */ +#define WM8400_ROZC_SHIFT 7 /* ROZC */ +#define WM8400_ROZC_WIDTH 1 /* ROZC */ +#define WM8400_ROUTVOL_MASK 0x007F /* ROUTVOL - [6:0] */ +#define WM8400_ROUTVOL_SHIFT 0 /* ROUTVOL - [6:0] */ +#define WM8400_ROUTVOL_WIDTH 7 /* ROUTVOL - [6:0] */ + +/* + * R30 (0x1E) - Line Outputs Volume + */ +#define WM8400_LONMUTE 0x0040 /* LONMUTE */ +#define WM8400_LONMUTE_MASK 0x0040 /* LONMUTE */ +#define WM8400_LONMUTE_SHIFT 6 /* LONMUTE */ +#define WM8400_LONMUTE_WIDTH 1 /* LONMUTE */ +#define WM8400_LOPMUTE 0x0020 /* LOPMUTE */ +#define WM8400_LOPMUTE_MASK 0x0020 /* LOPMUTE */ +#define WM8400_LOPMUTE_SHIFT 5 /* LOPMUTE */ +#define WM8400_LOPMUTE_WIDTH 1 /* LOPMUTE */ +#define WM8400_LOATTN 0x0010 /* LOATTN */ +#define WM8400_LOATTN_MASK 0x0010 /* LOATTN */ +#define WM8400_LOATTN_SHIFT 4 /* LOATTN */ +#define WM8400_LOATTN_WIDTH 1 /* LOATTN */ +#define WM8400_RONMUTE 0x0004 /* RONMUTE */ +#define WM8400_RONMUTE_MASK 0x0004 /* RONMUTE */ +#define WM8400_RONMUTE_SHIFT 2 /* RONMUTE */ +#define WM8400_RONMUTE_WIDTH 1 /* RONMUTE */ +#define WM8400_ROPMUTE 0x0002 /* ROPMUTE */ +#define WM8400_ROPMUTE_MASK 0x0002 /* ROPMUTE */ +#define WM8400_ROPMUTE_SHIFT 1 /* ROPMUTE */ +#define WM8400_ROPMUTE_WIDTH 1 /* ROPMUTE */ +#define WM8400_ROATTN 0x0001 /* ROATTN */ +#define WM8400_ROATTN_MASK 0x0001 /* ROATTN */ +#define WM8400_ROATTN_SHIFT 0 /* ROATTN */ +#define WM8400_ROATTN_WIDTH 1 /* ROATTN */ + +/* + * R31 (0x1F) - Out3/4 Volume + */ +#define WM8400_OUT3MUTE 0x0020 /* OUT3MUTE */ +#define WM8400_OUT3MUTE_MASK 0x0020 /* OUT3MUTE */ +#define WM8400_OUT3MUTE_SHIFT 5 /* OUT3MUTE */ +#define WM8400_OUT3MUTE_WIDTH 1 /* OUT3MUTE */ +#define WM8400_OUT3ATTN 0x0010 /* OUT3ATTN */ +#define WM8400_OUT3ATTN_MASK 0x0010 /* OUT3ATTN */ +#define WM8400_OUT3ATTN_SHIFT 4 /* OUT3ATTN */ +#define WM8400_OUT3ATTN_WIDTH 1 /* OUT3ATTN */ +#define WM8400_OUT4MUTE 0x0002 /* OUT4MUTE */ +#define WM8400_OUT4MUTE_MASK 0x0002 /* OUT4MUTE */ +#define WM8400_OUT4MUTE_SHIFT 1 /* OUT4MUTE */ +#define WM8400_OUT4MUTE_WIDTH 1 /* OUT4MUTE */ +#define WM8400_OUT4ATTN 0x0001 /* OUT4ATTN */ +#define WM8400_OUT4ATTN_MASK 0x0001 /* OUT4ATTN */ +#define WM8400_OUT4ATTN_SHIFT 0 /* OUT4ATTN */ +#define WM8400_OUT4ATTN_WIDTH 1 /* OUT4ATTN */ + +/* + * R32 (0x20) - Left OPGA Volume + */ +#define WM8400_OPVU 0x0100 /* OPVU */ +#define WM8400_OPVU_MASK 0x0100 /* OPVU */ +#define WM8400_OPVU_SHIFT 8 /* OPVU */ +#define WM8400_OPVU_WIDTH 1 /* OPVU */ +#define WM8400_LOPGAZC 0x0080 /* LOPGAZC */ +#define WM8400_LOPGAZC_MASK 0x0080 /* LOPGAZC */ +#define WM8400_LOPGAZC_SHIFT 7 /* LOPGAZC */ +#define WM8400_LOPGAZC_WIDTH 1 /* LOPGAZC */ +#define WM8400_LOPGAVOL_MASK 0x007F /* LOPGAVOL - [6:0] */ +#define WM8400_LOPGAVOL_SHIFT 0 /* LOPGAVOL - [6:0] */ +#define WM8400_LOPGAVOL_WIDTH 7 /* LOPGAVOL - [6:0] */ + +/* + * R33 (0x21) - Right OPGA Volume + */ +#define WM8400_OPVU 0x0100 /* OPVU */ +#define WM8400_OPVU_MASK 0x0100 /* OPVU */ +#define WM8400_OPVU_SHIFT 8 /* OPVU */ +#define WM8400_OPVU_WIDTH 1 /* OPVU */ +#define WM8400_ROPGAZC 0x0080 /* ROPGAZC */ +#define WM8400_ROPGAZC_MASK 0x0080 /* ROPGAZC */ +#define WM8400_ROPGAZC_SHIFT 7 /* ROPGAZC */ +#define WM8400_ROPGAZC_WIDTH 1 /* ROPGAZC */ +#define WM8400_ROPGAVOL_MASK 0x007F /* ROPGAVOL - [6:0] */ +#define WM8400_ROPGAVOL_SHIFT 0 /* ROPGAVOL - [6:0] */ +#define WM8400_ROPGAVOL_WIDTH 7 /* ROPGAVOL - [6:0] */ + +/* + * R34 (0x22) - Speaker Volume + */ +#define WM8400_SPKATTN_MASK 0x0003 /* SPKATTN - [1:0] */ +#define WM8400_SPKATTN_SHIFT 0 /* SPKATTN - [1:0] */ +#define WM8400_SPKATTN_WIDTH 2 /* SPKATTN - [1:0] */ + +/* + * R35 (0x23) - ClassD1 + */ +#define WM8400_CDMODE 0x0100 /* CDMODE */ +#define WM8400_CDMODE_MASK 0x0100 /* CDMODE */ +#define WM8400_CDMODE_SHIFT 8 /* CDMODE */ +#define WM8400_CDMODE_WIDTH 1 /* CDMODE */ +#define WM8400_CLASSD_CLK_SEL 0x0080 /* CLASSD_CLK_SEL */ +#define WM8400_CLASSD_CLK_SEL_MASK 0x0080 /* CLASSD_CLK_SEL */ +#define WM8400_CLASSD_CLK_SEL_SHIFT 7 /* CLASSD_CLK_SEL */ +#define WM8400_CLASSD_CLK_SEL_WIDTH 1 /* CLASSD_CLK_SEL */ +#define WM8400_CD_SRCTRL 0x0040 /* CD_SRCTRL */ +#define WM8400_CD_SRCTRL_MASK 0x0040 /* CD_SRCTRL */ +#define WM8400_CD_SRCTRL_SHIFT 6 /* CD_SRCTRL */ +#define WM8400_CD_SRCTRL_WIDTH 1 /* CD_SRCTRL */ +#define WM8400_SPKNOPOP 0x0020 /* SPKNOPOP */ +#define WM8400_SPKNOPOP_MASK 0x0020 /* SPKNOPOP */ +#define WM8400_SPKNOPOP_SHIFT 5 /* SPKNOPOP */ +#define WM8400_SPKNOPOP_WIDTH 1 /* SPKNOPOP */ +#define WM8400_DBLERATE 0x0010 /* DBLERATE */ +#define WM8400_DBLERATE_MASK 0x0010 /* DBLERATE */ +#define WM8400_DBLERATE_SHIFT 4 /* DBLERATE */ +#define WM8400_DBLERATE_WIDTH 1 /* DBLERATE */ +#define WM8400_LOOPTEST 0x0008 /* LOOPTEST */ +#define WM8400_LOOPTEST_MASK 0x0008 /* LOOPTEST */ +#define WM8400_LOOPTEST_SHIFT 3 /* LOOPTEST */ +#define WM8400_LOOPTEST_WIDTH 1 /* LOOPTEST */ +#define WM8400_HALFABBIAS 0x0004 /* HALFABBIAS */ +#define WM8400_HALFABBIAS_MASK 0x0004 /* HALFABBIAS */ +#define WM8400_HALFABBIAS_SHIFT 2 /* HALFABBIAS */ +#define WM8400_HALFABBIAS_WIDTH 1 /* HALFABBIAS */ +#define WM8400_TRIDEL_MASK 0x0003 /* TRIDEL - [1:0] */ +#define WM8400_TRIDEL_SHIFT 0 /* TRIDEL - [1:0] */ +#define WM8400_TRIDEL_WIDTH 2 /* TRIDEL - [1:0] */ + +/* + * R37 (0x25) - ClassD3 + */ +#define WM8400_DCGAIN_MASK 0x0038 /* DCGAIN - [5:3] */ +#define WM8400_DCGAIN_SHIFT 3 /* DCGAIN - [5:3] */ +#define WM8400_DCGAIN_WIDTH 3 /* DCGAIN - [5:3] */ +#define WM8400_ACGAIN_MASK 0x0007 /* ACGAIN - [2:0] */ +#define WM8400_ACGAIN_SHIFT 0 /* ACGAIN - [2:0] */ +#define WM8400_ACGAIN_WIDTH 3 /* ACGAIN - [2:0] */ + +/* + * R39 (0x27) - Input Mixer1 + */ +#define WM8400_AINLMODE_MASK 0x000C /* AINLMODE - [3:2] */ +#define WM8400_AINLMODE_SHIFT 2 /* AINLMODE - [3:2] */ +#define WM8400_AINLMODE_WIDTH 2 /* AINLMODE - [3:2] */ +#define WM8400_AINRMODE_MASK 0x0003 /* AINRMODE - [1:0] */ +#define WM8400_AINRMODE_SHIFT 0 /* AINRMODE - [1:0] */ +#define WM8400_AINRMODE_WIDTH 2 /* AINRMODE - [1:0] */ + +/* + * R40 (0x28) - Input Mixer2 + */ +#define WM8400_LMP4 0x0080 /* LMP4 */ +#define WM8400_LMP4_MASK 0x0080 /* LMP4 */ +#define WM8400_LMP4_SHIFT 7 /* LMP4 */ +#define WM8400_LMP4_WIDTH 1 /* LMP4 */ +#define WM8400_LMN3 0x0040 /* LMN3 */ +#define WM8400_LMN3_MASK 0x0040 /* LMN3 */ +#define WM8400_LMN3_SHIFT 6 /* LMN3 */ +#define WM8400_LMN3_WIDTH 1 /* LMN3 */ +#define WM8400_LMP2 0x0020 /* LMP2 */ +#define WM8400_LMP2_MASK 0x0020 /* LMP2 */ +#define WM8400_LMP2_SHIFT 5 /* LMP2 */ +#define WM8400_LMP2_WIDTH 1 /* LMP2 */ +#define WM8400_LMN1 0x0010 /* LMN1 */ +#define WM8400_LMN1_MASK 0x0010 /* LMN1 */ +#define WM8400_LMN1_SHIFT 4 /* LMN1 */ +#define WM8400_LMN1_WIDTH 1 /* LMN1 */ +#define WM8400_RMP4 0x0008 /* RMP4 */ +#define WM8400_RMP4_MASK 0x0008 /* RMP4 */ +#define WM8400_RMP4_SHIFT 3 /* RMP4 */ +#define WM8400_RMP4_WIDTH 1 /* RMP4 */ +#define WM8400_RMN3 0x0004 /* RMN3 */ +#define WM8400_RMN3_MASK 0x0004 /* RMN3 */ +#define WM8400_RMN3_SHIFT 2 /* RMN3 */ +#define WM8400_RMN3_WIDTH 1 /* RMN3 */ +#define WM8400_RMP2 0x0002 /* RMP2 */ +#define WM8400_RMP2_MASK 0x0002 /* RMP2 */ +#define WM8400_RMP2_SHIFT 1 /* RMP2 */ +#define WM8400_RMP2_WIDTH 1 /* RMP2 */ +#define WM8400_RMN1 0x0001 /* RMN1 */ +#define WM8400_RMN1_MASK 0x0001 /* RMN1 */ +#define WM8400_RMN1_SHIFT 0 /* RMN1 */ +#define WM8400_RMN1_WIDTH 1 /* RMN1 */ + +/* + * R41 (0x29) - Input Mixer3 + */ +#define WM8400_L34MNB 0x0100 /* L34MNB */ +#define WM8400_L34MNB_MASK 0x0100 /* L34MNB */ +#define WM8400_L34MNB_SHIFT 8 /* L34MNB */ +#define WM8400_L34MNB_WIDTH 1 /* L34MNB */ +#define WM8400_L34MNBST 0x0080 /* L34MNBST */ +#define WM8400_L34MNBST_MASK 0x0080 /* L34MNBST */ +#define WM8400_L34MNBST_SHIFT 7 /* L34MNBST */ +#define WM8400_L34MNBST_WIDTH 1 /* L34MNBST */ +#define WM8400_L12MNB 0x0020 /* L12MNB */ +#define WM8400_L12MNB_MASK 0x0020 /* L12MNB */ +#define WM8400_L12MNB_SHIFT 5 /* L12MNB */ +#define WM8400_L12MNB_WIDTH 1 /* L12MNB */ +#define WM8400_L12MNBST 0x0010 /* L12MNBST */ +#define WM8400_L12MNBST_MASK 0x0010 /* L12MNBST */ +#define WM8400_L12MNBST_SHIFT 4 /* L12MNBST */ +#define WM8400_L12MNBST_WIDTH 1 /* L12MNBST */ +#define WM8400_LDBVOL_MASK 0x0007 /* LDBVOL - [2:0] */ +#define WM8400_LDBVOL_SHIFT 0 /* LDBVOL - [2:0] */ +#define WM8400_LDBVOL_WIDTH 3 /* LDBVOL - [2:0] */ + +/* + * R42 (0x2A) - Input Mixer4 + */ +#define WM8400_R34MNB 0x0100 /* R34MNB */ +#define WM8400_R34MNB_MASK 0x0100 /* R34MNB */ +#define WM8400_R34MNB_SHIFT 8 /* R34MNB */ +#define WM8400_R34MNB_WIDTH 1 /* R34MNB */ +#define WM8400_R34MNBST 0x0080 /* R34MNBST */ +#define WM8400_R34MNBST_MASK 0x0080 /* R34MNBST */ +#define WM8400_R34MNBST_SHIFT 7 /* R34MNBST */ +#define WM8400_R34MNBST_WIDTH 1 /* R34MNBST */ +#define WM8400_R12MNB 0x0020 /* R12MNB */ +#define WM8400_R12MNB_MASK 0x0020 /* R12MNB */ +#define WM8400_R12MNB_SHIFT 5 /* R12MNB */ +#define WM8400_R12MNB_WIDTH 1 /* R12MNB */ +#define WM8400_R12MNBST 0x0010 /* R12MNBST */ +#define WM8400_R12MNBST_MASK 0x0010 /* R12MNBST */ +#define WM8400_R12MNBST_SHIFT 4 /* R12MNBST */ +#define WM8400_R12MNBST_WIDTH 1 /* R12MNBST */ +#define WM8400_RDBVOL_MASK 0x0007 /* RDBVOL - [2:0] */ +#define WM8400_RDBVOL_SHIFT 0 /* RDBVOL - [2:0] */ +#define WM8400_RDBVOL_WIDTH 3 /* RDBVOL - [2:0] */ + +/* + * R43 (0x2B) - Input Mixer5 + */ +#define WM8400_LI2BVOL_MASK 0x01C0 /* LI2BVOL - [8:6] */ +#define WM8400_LI2BVOL_SHIFT 6 /* LI2BVOL - [8:6] */ +#define WM8400_LI2BVOL_WIDTH 3 /* LI2BVOL - [8:6] */ +#define WM8400_LR4BVOL_MASK 0x0038 /* LR4BVOL - [5:3] */ +#define WM8400_LR4BVOL_SHIFT 3 /* LR4BVOL - [5:3] */ +#define WM8400_LR4BVOL_WIDTH 3 /* LR4BVOL - [5:3] */ +#define WM8400_LL4BVOL_MASK 0x0007 /* LL4BVOL - [2:0] */ +#define WM8400_LL4BVOL_SHIFT 0 /* LL4BVOL - [2:0] */ +#define WM8400_LL4BVOL_WIDTH 3 /* LL4BVOL - [2:0] */ + +/* + * R44 (0x2C) - Input Mixer6 + */ +#define WM8400_RI2BVOL_MASK 0x01C0 /* RI2BVOL - [8:6] */ +#define WM8400_RI2BVOL_SHIFT 6 /* RI2BVOL - [8:6] */ +#define WM8400_RI2BVOL_WIDTH 3 /* RI2BVOL - [8:6] */ +#define WM8400_RL4BVOL_MASK 0x0038 /* RL4BVOL - [5:3] */ +#define WM8400_RL4BVOL_SHIFT 3 /* RL4BVOL - [5:3] */ +#define WM8400_RL4BVOL_WIDTH 3 /* RL4BVOL - [5:3] */ +#define WM8400_RR4BVOL_MASK 0x0007 /* RR4BVOL - [2:0] */ +#define WM8400_RR4BVOL_SHIFT 0 /* RR4BVOL - [2:0] */ +#define WM8400_RR4BVOL_WIDTH 3 /* RR4BVOL - [2:0] */ + +/* + * R45 (0x2D) - Output Mixer1 + */ +#define WM8400_LRBLO 0x0080 /* LRBLO */ +#define WM8400_LRBLO_MASK 0x0080 /* LRBLO */ +#define WM8400_LRBLO_SHIFT 7 /* LRBLO */ +#define WM8400_LRBLO_WIDTH 1 /* LRBLO */ +#define WM8400_LLBLO 0x0040 /* LLBLO */ +#define WM8400_LLBLO_MASK 0x0040 /* LLBLO */ +#define WM8400_LLBLO_SHIFT 6 /* LLBLO */ +#define WM8400_LLBLO_WIDTH 1 /* LLBLO */ +#define WM8400_LRI3LO 0x0020 /* LRI3LO */ +#define WM8400_LRI3LO_MASK 0x0020 /* LRI3LO */ +#define WM8400_LRI3LO_SHIFT 5 /* LRI3LO */ +#define WM8400_LRI3LO_WIDTH 1 /* LRI3LO */ +#define WM8400_LLI3LO 0x0010 /* LLI3LO */ +#define WM8400_LLI3LO_MASK 0x0010 /* LLI3LO */ +#define WM8400_LLI3LO_SHIFT 4 /* LLI3LO */ +#define WM8400_LLI3LO_WIDTH 1 /* LLI3LO */ +#define WM8400_LR12LO 0x0008 /* LR12LO */ +#define WM8400_LR12LO_MASK 0x0008 /* LR12LO */ +#define WM8400_LR12LO_SHIFT 3 /* LR12LO */ +#define WM8400_LR12LO_WIDTH 1 /* LR12LO */ +#define WM8400_LL12LO 0x0004 /* LL12LO */ +#define WM8400_LL12LO_MASK 0x0004 /* LL12LO */ +#define WM8400_LL12LO_SHIFT 2 /* LL12LO */ +#define WM8400_LL12LO_WIDTH 1 /* LL12LO */ +#define WM8400_LDLO 0x0001 /* LDLO */ +#define WM8400_LDLO_MASK 0x0001 /* LDLO */ +#define WM8400_LDLO_SHIFT 0 /* LDLO */ +#define WM8400_LDLO_WIDTH 1 /* LDLO */ + +/* + * R46 (0x2E) - Output Mixer2 + */ +#define WM8400_RLBRO 0x0080 /* RLBRO */ +#define WM8400_RLBRO_MASK 0x0080 /* RLBRO */ +#define WM8400_RLBRO_SHIFT 7 /* RLBRO */ +#define WM8400_RLBRO_WIDTH 1 /* RLBRO */ +#define WM8400_RRBRO 0x0040 /* RRBRO */ +#define WM8400_RRBRO_MASK 0x0040 /* RRBRO */ +#define WM8400_RRBRO_SHIFT 6 /* RRBRO */ +#define WM8400_RRBRO_WIDTH 1 /* RRBRO */ +#define WM8400_RLI3RO 0x0020 /* RLI3RO */ +#define WM8400_RLI3RO_MASK 0x0020 /* RLI3RO */ +#define WM8400_RLI3RO_SHIFT 5 /* RLI3RO */ +#define WM8400_RLI3RO_WIDTH 1 /* RLI3RO */ +#define WM8400_RRI3RO 0x0010 /* RRI3RO */ +#define WM8400_RRI3RO_MASK 0x0010 /* RRI3RO */ +#define WM8400_RRI3RO_SHIFT 4 /* RRI3RO */ +#define WM8400_RRI3RO_WIDTH 1 /* RRI3RO */ +#define WM8400_RL12RO 0x0008 /* RL12RO */ +#define WM8400_RL12RO_MASK 0x0008 /* RL12RO */ +#define WM8400_RL12RO_SHIFT 3 /* RL12RO */ +#define WM8400_RL12RO_WIDTH 1 /* RL12RO */ +#define WM8400_RR12RO 0x0004 /* RR12RO */ +#define WM8400_RR12RO_MASK 0x0004 /* RR12RO */ +#define WM8400_RR12RO_SHIFT 2 /* RR12RO */ +#define WM8400_RR12RO_WIDTH 1 /* RR12RO */ +#define WM8400_RDRO 0x0001 /* RDRO */ +#define WM8400_RDRO_MASK 0x0001 /* RDRO */ +#define WM8400_RDRO_SHIFT 0 /* RDRO */ +#define WM8400_RDRO_WIDTH 1 /* RDRO */ + +/* + * R47 (0x2F) - Output Mixer3 + */ +#define WM8400_LLI3LOVOL_MASK 0x01C0 /* LLI3LOVOL - [8:6] */ +#define WM8400_LLI3LOVOL_SHIFT 6 /* LLI3LOVOL - [8:6] */ +#define WM8400_LLI3LOVOL_WIDTH 3 /* LLI3LOVOL - [8:6] */ +#define WM8400_LR12LOVOL_MASK 0x0038 /* LR12LOVOL - [5:3] */ +#define WM8400_LR12LOVOL_SHIFT 3 /* LR12LOVOL - [5:3] */ +#define WM8400_LR12LOVOL_WIDTH 3 /* LR12LOVOL - [5:3] */ +#define WM8400_LL12LOVOL_MASK 0x0007 /* LL12LOVOL - [2:0] */ +#define WM8400_LL12LOVOL_SHIFT 0 /* LL12LOVOL - [2:0] */ +#define WM8400_LL12LOVOL_WIDTH 3 /* LL12LOVOL - [2:0] */ + +/* + * R48 (0x30) - Output Mixer4 + */ +#define WM8400_RRI3ROVOL_MASK 0x01C0 /* RRI3ROVOL - [8:6] */ +#define WM8400_RRI3ROVOL_SHIFT 6 /* RRI3ROVOL - [8:6] */ +#define WM8400_RRI3ROVOL_WIDTH 3 /* RRI3ROVOL - [8:6] */ +#define WM8400_RL12ROVOL_MASK 0x0038 /* RL12ROVOL - [5:3] */ +#define WM8400_RL12ROVOL_SHIFT 3 /* RL12ROVOL - [5:3] */ +#define WM8400_RL12ROVOL_WIDTH 3 /* RL12ROVOL - [5:3] */ +#define WM8400_RR12ROVOL_MASK 0x0007 /* RR12ROVOL - [2:0] */ +#define WM8400_RR12ROVOL_SHIFT 0 /* RR12ROVOL - [2:0] */ +#define WM8400_RR12ROVOL_WIDTH 3 /* RR12ROVOL - [2:0] */ + +/* + * R49 (0x31) - Output Mixer5 + */ +#define WM8400_LRI3LOVOL_MASK 0x01C0 /* LRI3LOVOL - [8:6] */ +#define WM8400_LRI3LOVOL_SHIFT 6 /* LRI3LOVOL - [8:6] */ +#define WM8400_LRI3LOVOL_WIDTH 3 /* LRI3LOVOL - [8:6] */ +#define WM8400_LRBLOVOL_MASK 0x0038 /* LRBLOVOL - [5:3] */ +#define WM8400_LRBLOVOL_SHIFT 3 /* LRBLOVOL - [5:3] */ +#define WM8400_LRBLOVOL_WIDTH 3 /* LRBLOVOL - [5:3] */ +#define WM8400_LLBLOVOL_MASK 0x0007 /* LLBLOVOL - [2:0] */ +#define WM8400_LLBLOVOL_SHIFT 0 /* LLBLOVOL - [2:0] */ +#define WM8400_LLBLOVOL_WIDTH 3 /* LLBLOVOL - [2:0] */ + +/* + * R50 (0x32) - Output Mixer6 + */ +#define WM8400_RLI3ROVOL_MASK 0x01C0 /* RLI3ROVOL - [8:6] */ +#define WM8400_RLI3ROVOL_SHIFT 6 /* RLI3ROVOL - [8:6] */ +#define WM8400_RLI3ROVOL_WIDTH 3 /* RLI3ROVOL - [8:6] */ +#define WM8400_RLBROVOL_MASK 0x0038 /* RLBROVOL - [5:3] */ +#define WM8400_RLBROVOL_SHIFT 3 /* RLBROVOL - [5:3] */ +#define WM8400_RLBROVOL_WIDTH 3 /* RLBROVOL - [5:3] */ +#define WM8400_RRBROVOL_MASK 0x0007 /* RRBROVOL - [2:0] */ +#define WM8400_RRBROVOL_SHIFT 0 /* RRBROVOL - [2:0] */ +#define WM8400_RRBROVOL_WIDTH 3 /* RRBROVOL - [2:0] */ + +/* + * R51 (0x33) - Out3/4 Mixer + */ +#define WM8400_VSEL_MASK 0x0180 /* VSEL - [8:7] */ +#define WM8400_VSEL_SHIFT 7 /* VSEL - [8:7] */ +#define WM8400_VSEL_WIDTH 2 /* VSEL - [8:7] */ +#define WM8400_LI4O3 0x0020 /* LI4O3 */ +#define WM8400_LI4O3_MASK 0x0020 /* LI4O3 */ +#define WM8400_LI4O3_SHIFT 5 /* LI4O3 */ +#define WM8400_LI4O3_WIDTH 1 /* LI4O3 */ +#define WM8400_LPGAO3 0x0010 /* LPGAO3 */ +#define WM8400_LPGAO3_MASK 0x0010 /* LPGAO3 */ +#define WM8400_LPGAO3_SHIFT 4 /* LPGAO3 */ +#define WM8400_LPGAO3_WIDTH 1 /* LPGAO3 */ +#define WM8400_RI4O4 0x0002 /* RI4O4 */ +#define WM8400_RI4O4_MASK 0x0002 /* RI4O4 */ +#define WM8400_RI4O4_SHIFT 1 /* RI4O4 */ +#define WM8400_RI4O4_WIDTH 1 /* RI4O4 */ +#define WM8400_RPGAO4 0x0001 /* RPGAO4 */ +#define WM8400_RPGAO4_MASK 0x0001 /* RPGAO4 */ +#define WM8400_RPGAO4_SHIFT 0 /* RPGAO4 */ +#define WM8400_RPGAO4_WIDTH 1 /* RPGAO4 */ + +/* + * R52 (0x34) - Line Mixer1 + */ +#define WM8400_LLOPGALON 0x0040 /* LLOPGALON */ +#define WM8400_LLOPGALON_MASK 0x0040 /* LLOPGALON */ +#define WM8400_LLOPGALON_SHIFT 6 /* LLOPGALON */ +#define WM8400_LLOPGALON_WIDTH 1 /* LLOPGALON */ +#define WM8400_LROPGALON 0x0020 /* LROPGALON */ +#define WM8400_LROPGALON_MASK 0x0020 /* LROPGALON */ +#define WM8400_LROPGALON_SHIFT 5 /* LROPGALON */ +#define WM8400_LROPGALON_WIDTH 1 /* LROPGALON */ +#define WM8400_LOPLON 0x0010 /* LOPLON */ +#define WM8400_LOPLON_MASK 0x0010 /* LOPLON */ +#define WM8400_LOPLON_SHIFT 4 /* LOPLON */ +#define WM8400_LOPLON_WIDTH 1 /* LOPLON */ +#define WM8400_LR12LOP 0x0004 /* LR12LOP */ +#define WM8400_LR12LOP_MASK 0x0004 /* LR12LOP */ +#define WM8400_LR12LOP_SHIFT 2 /* LR12LOP */ +#define WM8400_LR12LOP_WIDTH 1 /* LR12LOP */ +#define WM8400_LL12LOP 0x0002 /* LL12LOP */ +#define WM8400_LL12LOP_MASK 0x0002 /* LL12LOP */ +#define WM8400_LL12LOP_SHIFT 1 /* LL12LOP */ +#define WM8400_LL12LOP_WIDTH 1 /* LL12LOP */ +#define WM8400_LLOPGALOP 0x0001 /* LLOPGALOP */ +#define WM8400_LLOPGALOP_MASK 0x0001 /* LLOPGALOP */ +#define WM8400_LLOPGALOP_SHIFT 0 /* LLOPGALOP */ +#define WM8400_LLOPGALOP_WIDTH 1 /* LLOPGALOP */ + +/* + * R53 (0x35) - Line Mixer2 + */ +#define WM8400_RROPGARON 0x0040 /* RROPGARON */ +#define WM8400_RROPGARON_MASK 0x0040 /* RROPGARON */ +#define WM8400_RROPGARON_SHIFT 6 /* RROPGARON */ +#define WM8400_RROPGARON_WIDTH 1 /* RROPGARON */ +#define WM8400_RLOPGARON 0x0020 /* RLOPGARON */ +#define WM8400_RLOPGARON_MASK 0x0020 /* RLOPGARON */ +#define WM8400_RLOPGARON_SHIFT 5 /* RLOPGARON */ +#define WM8400_RLOPGARON_WIDTH 1 /* RLOPGARON */ +#define WM8400_ROPRON 0x0010 /* ROPRON */ +#define WM8400_ROPRON_MASK 0x0010 /* ROPRON */ +#define WM8400_ROPRON_SHIFT 4 /* ROPRON */ +#define WM8400_ROPRON_WIDTH 1 /* ROPRON */ +#define WM8400_RL12ROP 0x0004 /* RL12ROP */ +#define WM8400_RL12ROP_MASK 0x0004 /* RL12ROP */ +#define WM8400_RL12ROP_SHIFT 2 /* RL12ROP */ +#define WM8400_RL12ROP_WIDTH 1 /* RL12ROP */ +#define WM8400_RR12ROP 0x0002 /* RR12ROP */ +#define WM8400_RR12ROP_MASK 0x0002 /* RR12ROP */ +#define WM8400_RR12ROP_SHIFT 1 /* RR12ROP */ +#define WM8400_RR12ROP_WIDTH 1 /* RR12ROP */ +#define WM8400_RROPGAROP 0x0001 /* RROPGAROP */ +#define WM8400_RROPGAROP_MASK 0x0001 /* RROPGAROP */ +#define WM8400_RROPGAROP_SHIFT 0 /* RROPGAROP */ +#define WM8400_RROPGAROP_WIDTH 1 /* RROPGAROP */ + +/* + * R54 (0x36) - Speaker Mixer + */ +#define WM8400_LB2SPK 0x0080 /* LB2SPK */ +#define WM8400_LB2SPK_MASK 0x0080 /* LB2SPK */ +#define WM8400_LB2SPK_SHIFT 7 /* LB2SPK */ +#define WM8400_LB2SPK_WIDTH 1 /* LB2SPK */ +#define WM8400_RB2SPK 0x0040 /* RB2SPK */ +#define WM8400_RB2SPK_MASK 0x0040 /* RB2SPK */ +#define WM8400_RB2SPK_SHIFT 6 /* RB2SPK */ +#define WM8400_RB2SPK_WIDTH 1 /* RB2SPK */ +#define WM8400_LI2SPK 0x0020 /* LI2SPK */ +#define WM8400_LI2SPK_MASK 0x0020 /* LI2SPK */ +#define WM8400_LI2SPK_SHIFT 5 /* LI2SPK */ +#define WM8400_LI2SPK_WIDTH 1 /* LI2SPK */ +#define WM8400_RI2SPK 0x0010 /* RI2SPK */ +#define WM8400_RI2SPK_MASK 0x0010 /* RI2SPK */ +#define WM8400_RI2SPK_SHIFT 4 /* RI2SPK */ +#define WM8400_RI2SPK_WIDTH 1 /* RI2SPK */ +#define WM8400_LOPGASPK 0x0008 /* LOPGASPK */ +#define WM8400_LOPGASPK_MASK 0x0008 /* LOPGASPK */ +#define WM8400_LOPGASPK_SHIFT 3 /* LOPGASPK */ +#define WM8400_LOPGASPK_WIDTH 1 /* LOPGASPK */ +#define WM8400_ROPGASPK 0x0004 /* ROPGASPK */ +#define WM8400_ROPGASPK_MASK 0x0004 /* ROPGASPK */ +#define WM8400_ROPGASPK_SHIFT 2 /* ROPGASPK */ +#define WM8400_ROPGASPK_WIDTH 1 /* ROPGASPK */ +#define WM8400_LDSPK 0x0002 /* LDSPK */ +#define WM8400_LDSPK_MASK 0x0002 /* LDSPK */ +#define WM8400_LDSPK_SHIFT 1 /* LDSPK */ +#define WM8400_LDSPK_WIDTH 1 /* LDSPK */ +#define WM8400_RDSPK 0x0001 /* RDSPK */ +#define WM8400_RDSPK_MASK 0x0001 /* RDSPK */ +#define WM8400_RDSPK_SHIFT 0 /* RDSPK */ +#define WM8400_RDSPK_WIDTH 1 /* RDSPK */ + +/* + * R55 (0x37) - Additional Control + */ +#define WM8400_VROI 0x0001 /* VROI */ +#define WM8400_VROI_MASK 0x0001 /* VROI */ +#define WM8400_VROI_SHIFT 0 /* VROI */ +#define WM8400_VROI_WIDTH 1 /* VROI */ + +/* + * R56 (0x38) - AntiPOP1 + */ +#define WM8400_DIS_LLINE 0x0020 /* DIS_LLINE */ +#define WM8400_DIS_LLINE_MASK 0x0020 /* DIS_LLINE */ +#define WM8400_DIS_LLINE_SHIFT 5 /* DIS_LLINE */ +#define WM8400_DIS_LLINE_WIDTH 1 /* DIS_LLINE */ +#define WM8400_DIS_RLINE 0x0010 /* DIS_RLINE */ +#define WM8400_DIS_RLINE_MASK 0x0010 /* DIS_RLINE */ +#define WM8400_DIS_RLINE_SHIFT 4 /* DIS_RLINE */ +#define WM8400_DIS_RLINE_WIDTH 1 /* DIS_RLINE */ +#define WM8400_DIS_OUT3 0x0008 /* DIS_OUT3 */ +#define WM8400_DIS_OUT3_MASK 0x0008 /* DIS_OUT3 */ +#define WM8400_DIS_OUT3_SHIFT 3 /* DIS_OUT3 */ +#define WM8400_DIS_OUT3_WIDTH 1 /* DIS_OUT3 */ +#define WM8400_DIS_OUT4 0x0004 /* DIS_OUT4 */ +#define WM8400_DIS_OUT4_MASK 0x0004 /* DIS_OUT4 */ +#define WM8400_DIS_OUT4_SHIFT 2 /* DIS_OUT4 */ +#define WM8400_DIS_OUT4_WIDTH 1 /* DIS_OUT4 */ +#define WM8400_DIS_LOUT 0x0002 /* DIS_LOUT */ +#define WM8400_DIS_LOUT_MASK 0x0002 /* DIS_LOUT */ +#define WM8400_DIS_LOUT_SHIFT 1 /* DIS_LOUT */ +#define WM8400_DIS_LOUT_WIDTH 1 /* DIS_LOUT */ +#define WM8400_DIS_ROUT 0x0001 /* DIS_ROUT */ +#define WM8400_DIS_ROUT_MASK 0x0001 /* DIS_ROUT */ +#define WM8400_DIS_ROUT_SHIFT 0 /* DIS_ROUT */ +#define WM8400_DIS_ROUT_WIDTH 1 /* DIS_ROUT */ + +/* + * R57 (0x39) - AntiPOP2 + */ +#define WM8400_SOFTST 0x0040 /* SOFTST */ +#define WM8400_SOFTST_MASK 0x0040 /* SOFTST */ +#define WM8400_SOFTST_SHIFT 6 /* SOFTST */ +#define WM8400_SOFTST_WIDTH 1 /* SOFTST */ +#define WM8400_BUFIOEN 0x0008 /* BUFIOEN */ +#define WM8400_BUFIOEN_MASK 0x0008 /* BUFIOEN */ +#define WM8400_BUFIOEN_SHIFT 3 /* BUFIOEN */ +#define WM8400_BUFIOEN_WIDTH 1 /* BUFIOEN */ +#define WM8400_BUFDCOPEN 0x0004 /* BUFDCOPEN */ +#define WM8400_BUFDCOPEN_MASK 0x0004 /* BUFDCOPEN */ +#define WM8400_BUFDCOPEN_SHIFT 2 /* BUFDCOPEN */ +#define WM8400_BUFDCOPEN_WIDTH 1 /* BUFDCOPEN */ +#define WM8400_POBCTRL 0x0002 /* POBCTRL */ +#define WM8400_POBCTRL_MASK 0x0002 /* POBCTRL */ +#define WM8400_POBCTRL_SHIFT 1 /* POBCTRL */ +#define WM8400_POBCTRL_WIDTH 1 /* POBCTRL */ +#define WM8400_VMIDTOG 0x0001 /* VMIDTOG */ +#define WM8400_VMIDTOG_MASK 0x0001 /* VMIDTOG */ +#define WM8400_VMIDTOG_SHIFT 0 /* VMIDTOG */ +#define WM8400_VMIDTOG_WIDTH 1 /* VMIDTOG */ + +/* + * R58 (0x3A) - MICBIAS + */ +#define WM8400_MCDSCTH_MASK 0x00C0 /* MCDSCTH - [7:6] */ +#define WM8400_MCDSCTH_SHIFT 6 /* MCDSCTH - [7:6] */ +#define WM8400_MCDSCTH_WIDTH 2 /* MCDSCTH - [7:6] */ +#define WM8400_MCDTHR_MASK 0x0038 /* MCDTHR - [5:3] */ +#define WM8400_MCDTHR_SHIFT 3 /* MCDTHR - [5:3] */ +#define WM8400_MCDTHR_WIDTH 3 /* MCDTHR - [5:3] */ +#define WM8400_MCD 0x0004 /* MCD */ +#define WM8400_MCD_MASK 0x0004 /* MCD */ +#define WM8400_MCD_SHIFT 2 /* MCD */ +#define WM8400_MCD_WIDTH 1 /* MCD */ +#define WM8400_MBSEL 0x0001 /* MBSEL */ +#define WM8400_MBSEL_MASK 0x0001 /* MBSEL */ +#define WM8400_MBSEL_SHIFT 0 /* MBSEL */ +#define WM8400_MBSEL_WIDTH 1 /* MBSEL */ + +/* + * R60 (0x3C) - FLL Control 1 + */ +#define WM8400_FLL_REF_FREQ 0x1000 /* FLL_REF_FREQ */ +#define WM8400_FLL_REF_FREQ_MASK 0x1000 /* FLL_REF_FREQ */ +#define WM8400_FLL_REF_FREQ_SHIFT 12 /* FLL_REF_FREQ */ +#define WM8400_FLL_REF_FREQ_WIDTH 1 /* FLL_REF_FREQ */ +#define WM8400_FLL_CLK_SRC_MASK 0x0C00 /* FLL_CLK_SRC - [11:10] */ +#define WM8400_FLL_CLK_SRC_SHIFT 10 /* FLL_CLK_SRC - [11:10] */ +#define WM8400_FLL_CLK_SRC_WIDTH 2 /* FLL_CLK_SRC - [11:10] */ +#define WM8400_FLL_FRAC 0x0200 /* FLL_FRAC */ +#define WM8400_FLL_FRAC_MASK 0x0200 /* FLL_FRAC */ +#define WM8400_FLL_FRAC_SHIFT 9 /* FLL_FRAC */ +#define WM8400_FLL_FRAC_WIDTH 1 /* FLL_FRAC */ +#define WM8400_FLL_OSC_ENA 0x0100 /* FLL_OSC_ENA */ +#define WM8400_FLL_OSC_ENA_MASK 0x0100 /* FLL_OSC_ENA */ +#define WM8400_FLL_OSC_ENA_SHIFT 8 /* FLL_OSC_ENA */ +#define WM8400_FLL_OSC_ENA_WIDTH 1 /* FLL_OSC_ENA */ +#define WM8400_FLL_CTRL_RATE_MASK 0x00E0 /* FLL_CTRL_RATE - [7:5] */ +#define WM8400_FLL_CTRL_RATE_SHIFT 5 /* FLL_CTRL_RATE - [7:5] */ +#define WM8400_FLL_CTRL_RATE_WIDTH 3 /* FLL_CTRL_RATE - [7:5] */ +#define WM8400_FLL_FRATIO_MASK 0x001F /* FLL_FRATIO - [4:0] */ +#define WM8400_FLL_FRATIO_SHIFT 0 /* FLL_FRATIO - [4:0] */ +#define WM8400_FLL_FRATIO_WIDTH 5 /* FLL_FRATIO - [4:0] */ + +/* + * R61 (0x3D) - FLL Control 2 + */ +#define WM8400_FLL_K_MASK 0xFFFF /* FLL_K - [15:0] */ +#define WM8400_FLL_K_SHIFT 0 /* FLL_K - [15:0] */ +#define WM8400_FLL_K_WIDTH 16 /* FLL_K - [15:0] */ + +/* + * R62 (0x3E) - FLL Control 3 + */ +#define WM8400_FLL_N_MASK 0x03FF /* FLL_N - [9:0] */ +#define WM8400_FLL_N_SHIFT 0 /* FLL_N - [9:0] */ +#define WM8400_FLL_N_WIDTH 10 /* FLL_N - [9:0] */ + +/* + * R63 (0x3F) - FLL Control 4 + */ +#define WM8400_FLL_TRK_GAIN_MASK 0x0078 /* FLL_TRK_GAIN - [6:3] */ +#define WM8400_FLL_TRK_GAIN_SHIFT 3 /* FLL_TRK_GAIN - [6:3] */ +#define WM8400_FLL_TRK_GAIN_WIDTH 4 /* FLL_TRK_GAIN - [6:3] */ +#define WM8400_FLL_OUTDIV_MASK 0x0007 /* FLL_OUTDIV - [2:0] */ +#define WM8400_FLL_OUTDIV_SHIFT 0 /* FLL_OUTDIV - [2:0] */ +#define WM8400_FLL_OUTDIV_WIDTH 3 /* FLL_OUTDIV - [2:0] */ + +struct wm8400; +void wm8400_reset_codec_reg_cache(struct wm8400 *wm8400); + +#endif diff --git a/include/linux/mfd/wm8400-private.h b/include/linux/mfd/wm8400-private.h new file mode 100644 index 000000000..4ee908f5b --- /dev/null +++ b/include/linux/mfd/wm8400-private.h @@ -0,0 +1,934 @@ +/* + * wm8400 private definitions. + * + * Copyright 2008 Wolfson Microelectronics plc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __LINUX_MFD_WM8400_PRIV_H +#define __LINUX_MFD_WM8400_PRIV_H + +#include +#include +#include +#include + +#define WM8400_REGISTER_COUNT 0x55 + +struct wm8400 { + struct device *dev; + struct regmap *regmap; + + struct platform_device regulators[6]; +}; + +/* + * Register values. + */ +#define WM8400_RESET_ID 0x00 +#define WM8400_ID 0x01 +#define WM8400_POWER_MANAGEMENT_1 0x02 +#define WM8400_POWER_MANAGEMENT_2 0x03 +#define WM8400_POWER_MANAGEMENT_3 0x04 +#define WM8400_AUDIO_INTERFACE_1 0x05 +#define WM8400_AUDIO_INTERFACE_2 0x06 +#define WM8400_CLOCKING_1 0x07 +#define WM8400_CLOCKING_2 0x08 +#define WM8400_AUDIO_INTERFACE_3 0x09 +#define WM8400_AUDIO_INTERFACE_4 0x0A +#define WM8400_DAC_CTRL 0x0B +#define WM8400_LEFT_DAC_DIGITAL_VOLUME 0x0C +#define WM8400_RIGHT_DAC_DIGITAL_VOLUME 0x0D +#define WM8400_DIGITAL_SIDE_TONE 0x0E +#define WM8400_ADC_CTRL 0x0F +#define WM8400_LEFT_ADC_DIGITAL_VOLUME 0x10 +#define WM8400_RIGHT_ADC_DIGITAL_VOLUME 0x11 +#define WM8400_GPIO_CTRL_1 0x12 +#define WM8400_GPIO1_GPIO2 0x13 +#define WM8400_GPIO3_GPIO4 0x14 +#define WM8400_GPIO5_GPIO6 0x15 +#define WM8400_GPIOCTRL_2 0x16 +#define WM8400_GPIO_POL 0x17 +#define WM8400_LEFT_LINE_INPUT_1_2_VOLUME 0x18 +#define WM8400_LEFT_LINE_INPUT_3_4_VOLUME 0x19 +#define WM8400_RIGHT_LINE_INPUT_1_2_VOLUME 0x1A +#define WM8400_RIGHT_LINE_INPUT_3_4_VOLUME 0x1B +#define WM8400_LEFT_OUTPUT_VOLUME 0x1C +#define WM8400_RIGHT_OUTPUT_VOLUME 0x1D +#define WM8400_LINE_OUTPUTS_VOLUME 0x1E +#define WM8400_OUT3_4_VOLUME 0x1F +#define WM8400_LEFT_OPGA_VOLUME 0x20 +#define WM8400_RIGHT_OPGA_VOLUME 0x21 +#define WM8400_SPEAKER_VOLUME 0x22 +#define WM8400_CLASSD1 0x23 +#define WM8400_CLASSD3 0x25 +#define WM8400_INPUT_MIXER1 0x27 +#define WM8400_INPUT_MIXER2 0x28 +#define WM8400_INPUT_MIXER3 0x29 +#define WM8400_INPUT_MIXER4 0x2A +#define WM8400_INPUT_MIXER5 0x2B +#define WM8400_INPUT_MIXER6 0x2C +#define WM8400_OUTPUT_MIXER1 0x2D +#define WM8400_OUTPUT_MIXER2 0x2E +#define WM8400_OUTPUT_MIXER3 0x2F +#define WM8400_OUTPUT_MIXER4 0x30 +#define WM8400_OUTPUT_MIXER5 0x31 +#define WM8400_OUTPUT_MIXER6 0x32 +#define WM8400_OUT3_4_MIXER 0x33 +#define WM8400_LINE_MIXER1 0x34 +#define WM8400_LINE_MIXER2 0x35 +#define WM8400_SPEAKER_MIXER 0x36 +#define WM8400_ADDITIONAL_CONTROL 0x37 +#define WM8400_ANTIPOP1 0x38 +#define WM8400_ANTIPOP2 0x39 +#define WM8400_MICBIAS 0x3A +#define WM8400_FLL_CONTROL_1 0x3C +#define WM8400_FLL_CONTROL_2 0x3D +#define WM8400_FLL_CONTROL_3 0x3E +#define WM8400_FLL_CONTROL_4 0x3F +#define WM8400_LDO1_CONTROL 0x41 +#define WM8400_LDO2_CONTROL 0x42 +#define WM8400_LDO3_CONTROL 0x43 +#define WM8400_LDO4_CONTROL 0x44 +#define WM8400_DCDC1_CONTROL_1 0x46 +#define WM8400_DCDC1_CONTROL_2 0x47 +#define WM8400_DCDC2_CONTROL_1 0x48 +#define WM8400_DCDC2_CONTROL_2 0x49 +#define WM8400_INTERFACE 0x4B +#define WM8400_PM_GENERAL 0x4C +#define WM8400_PM_SHUTDOWN_CONTROL 0x4E +#define WM8400_INTERRUPT_STATUS_1 0x4F +#define WM8400_INTERRUPT_STATUS_1_MASK 0x50 +#define WM8400_INTERRUPT_LEVELS 0x51 +#define WM8400_SHUTDOWN_REASON 0x52 +#define WM8400_LINE_CIRCUITS 0x54 + +/* + * Field Definitions. + */ + +/* + * R0 (0x00) - Reset/ID + */ +#define WM8400_SW_RESET_CHIP_ID_MASK 0xFFFF /* SW_RESET/CHIP_ID - [15:0] */ +#define WM8400_SW_RESET_CHIP_ID_SHIFT 0 /* SW_RESET/CHIP_ID - [15:0] */ +#define WM8400_SW_RESET_CHIP_ID_WIDTH 16 /* SW_RESET/CHIP_ID - [15:0] */ + +/* + * R1 (0x01) - ID + */ +#define WM8400_CHIP_REV_MASK 0x7000 /* CHIP_REV - [14:12] */ +#define WM8400_CHIP_REV_SHIFT 12 /* CHIP_REV - [14:12] */ +#define WM8400_CHIP_REV_WIDTH 3 /* CHIP_REV - [14:12] */ + +/* + * R18 (0x12) - GPIO CTRL 1 + */ +#define WM8400_IRQ 0x1000 /* IRQ */ +#define WM8400_IRQ_MASK 0x1000 /* IRQ */ +#define WM8400_IRQ_SHIFT 12 /* IRQ */ +#define WM8400_IRQ_WIDTH 1 /* IRQ */ +#define WM8400_TEMPOK 0x0800 /* TEMPOK */ +#define WM8400_TEMPOK_MASK 0x0800 /* TEMPOK */ +#define WM8400_TEMPOK_SHIFT 11 /* TEMPOK */ +#define WM8400_TEMPOK_WIDTH 1 /* TEMPOK */ +#define WM8400_MIC1SHRT 0x0400 /* MIC1SHRT */ +#define WM8400_MIC1SHRT_MASK 0x0400 /* MIC1SHRT */ +#define WM8400_MIC1SHRT_SHIFT 10 /* MIC1SHRT */ +#define WM8400_MIC1SHRT_WIDTH 1 /* MIC1SHRT */ +#define WM8400_MIC1DET 0x0200 /* MIC1DET */ +#define WM8400_MIC1DET_MASK 0x0200 /* MIC1DET */ +#define WM8400_MIC1DET_SHIFT 9 /* MIC1DET */ +#define WM8400_MIC1DET_WIDTH 1 /* MIC1DET */ +#define WM8400_FLL_LCK 0x0100 /* FLL_LCK */ +#define WM8400_FLL_LCK_MASK 0x0100 /* FLL_LCK */ +#define WM8400_FLL_LCK_SHIFT 8 /* FLL_LCK */ +#define WM8400_FLL_LCK_WIDTH 1 /* FLL_LCK */ +#define WM8400_GPIO_STATUS_MASK 0x00FF /* GPIO_STATUS - [7:0] */ +#define WM8400_GPIO_STATUS_SHIFT 0 /* GPIO_STATUS - [7:0] */ +#define WM8400_GPIO_STATUS_WIDTH 8 /* GPIO_STATUS - [7:0] */ + +/* + * R19 (0x13) - GPIO1 & GPIO2 + */ +#define WM8400_GPIO2_DEB_ENA 0x8000 /* GPIO2_DEB_ENA */ +#define WM8400_GPIO2_DEB_ENA_MASK 0x8000 /* GPIO2_DEB_ENA */ +#define WM8400_GPIO2_DEB_ENA_SHIFT 15 /* GPIO2_DEB_ENA */ +#define WM8400_GPIO2_DEB_ENA_WIDTH 1 /* GPIO2_DEB_ENA */ +#define WM8400_GPIO2_IRQ_ENA 0x4000 /* GPIO2_IRQ_ENA */ +#define WM8400_GPIO2_IRQ_ENA_MASK 0x4000 /* GPIO2_IRQ_ENA */ +#define WM8400_GPIO2_IRQ_ENA_SHIFT 14 /* GPIO2_IRQ_ENA */ +#define WM8400_GPIO2_IRQ_ENA_WIDTH 1 /* GPIO2_IRQ_ENA */ +#define WM8400_GPIO2_PU 0x2000 /* GPIO2_PU */ +#define WM8400_GPIO2_PU_MASK 0x2000 /* GPIO2_PU */ +#define WM8400_GPIO2_PU_SHIFT 13 /* GPIO2_PU */ +#define WM8400_GPIO2_PU_WIDTH 1 /* GPIO2_PU */ +#define WM8400_GPIO2_PD 0x1000 /* GPIO2_PD */ +#define WM8400_GPIO2_PD_MASK 0x1000 /* GPIO2_PD */ +#define WM8400_GPIO2_PD_SHIFT 12 /* GPIO2_PD */ +#define WM8400_GPIO2_PD_WIDTH 1 /* GPIO2_PD */ +#define WM8400_GPIO2_SEL_MASK 0x0F00 /* GPIO2_SEL - [11:8] */ +#define WM8400_GPIO2_SEL_SHIFT 8 /* GPIO2_SEL - [11:8] */ +#define WM8400_GPIO2_SEL_WIDTH 4 /* GPIO2_SEL - [11:8] */ +#define WM8400_GPIO1_DEB_ENA 0x0080 /* GPIO1_DEB_ENA */ +#define WM8400_GPIO1_DEB_ENA_MASK 0x0080 /* GPIO1_DEB_ENA */ +#define WM8400_GPIO1_DEB_ENA_SHIFT 7 /* GPIO1_DEB_ENA */ +#define WM8400_GPIO1_DEB_ENA_WIDTH 1 /* GPIO1_DEB_ENA */ +#define WM8400_GPIO1_IRQ_ENA 0x0040 /* GPIO1_IRQ_ENA */ +#define WM8400_GPIO1_IRQ_ENA_MASK 0x0040 /* GPIO1_IRQ_ENA */ +#define WM8400_GPIO1_IRQ_ENA_SHIFT 6 /* GPIO1_IRQ_ENA */ +#define WM8400_GPIO1_IRQ_ENA_WIDTH 1 /* GPIO1_IRQ_ENA */ +#define WM8400_GPIO1_PU 0x0020 /* GPIO1_PU */ +#define WM8400_GPIO1_PU_MASK 0x0020 /* GPIO1_PU */ +#define WM8400_GPIO1_PU_SHIFT 5 /* GPIO1_PU */ +#define WM8400_GPIO1_PU_WIDTH 1 /* GPIO1_PU */ +#define WM8400_GPIO1_PD 0x0010 /* GPIO1_PD */ +#define WM8400_GPIO1_PD_MASK 0x0010 /* GPIO1_PD */ +#define WM8400_GPIO1_PD_SHIFT 4 /* GPIO1_PD */ +#define WM8400_GPIO1_PD_WIDTH 1 /* GPIO1_PD */ +#define WM8400_GPIO1_SEL_MASK 0x000F /* GPIO1_SEL - [3:0] */ +#define WM8400_GPIO1_SEL_SHIFT 0 /* GPIO1_SEL - [3:0] */ +#define WM8400_GPIO1_SEL_WIDTH 4 /* GPIO1_SEL - [3:0] */ + +/* + * R20 (0x14) - GPIO3 & GPIO4 + */ +#define WM8400_GPIO4_DEB_ENA 0x8000 /* GPIO4_DEB_ENA */ +#define WM8400_GPIO4_DEB_ENA_MASK 0x8000 /* GPIO4_DEB_ENA */ +#define WM8400_GPIO4_DEB_ENA_SHIFT 15 /* GPIO4_DEB_ENA */ +#define WM8400_GPIO4_DEB_ENA_WIDTH 1 /* GPIO4_DEB_ENA */ +#define WM8400_GPIO4_IRQ_ENA 0x4000 /* GPIO4_IRQ_ENA */ +#define WM8400_GPIO4_IRQ_ENA_MASK 0x4000 /* GPIO4_IRQ_ENA */ +#define WM8400_GPIO4_IRQ_ENA_SHIFT 14 /* GPIO4_IRQ_ENA */ +#define WM8400_GPIO4_IRQ_ENA_WIDTH 1 /* GPIO4_IRQ_ENA */ +#define WM8400_GPIO4_PU 0x2000 /* GPIO4_PU */ +#define WM8400_GPIO4_PU_MASK 0x2000 /* GPIO4_PU */ +#define WM8400_GPIO4_PU_SHIFT 13 /* GPIO4_PU */ +#define WM8400_GPIO4_PU_WIDTH 1 /* GPIO4_PU */ +#define WM8400_GPIO4_PD 0x1000 /* GPIO4_PD */ +#define WM8400_GPIO4_PD_MASK 0x1000 /* GPIO4_PD */ +#define WM8400_GPIO4_PD_SHIFT 12 /* GPIO4_PD */ +#define WM8400_GPIO4_PD_WIDTH 1 /* GPIO4_PD */ +#define WM8400_GPIO4_SEL_MASK 0x0F00 /* GPIO4_SEL - [11:8] */ +#define WM8400_GPIO4_SEL_SHIFT 8 /* GPIO4_SEL - [11:8] */ +#define WM8400_GPIO4_SEL_WIDTH 4 /* GPIO4_SEL - [11:8] */ +#define WM8400_GPIO3_DEB_ENA 0x0080 /* GPIO3_DEB_ENA */ +#define WM8400_GPIO3_DEB_ENA_MASK 0x0080 /* GPIO3_DEB_ENA */ +#define WM8400_GPIO3_DEB_ENA_SHIFT 7 /* GPIO3_DEB_ENA */ +#define WM8400_GPIO3_DEB_ENA_WIDTH 1 /* GPIO3_DEB_ENA */ +#define WM8400_GPIO3_IRQ_ENA 0x0040 /* GPIO3_IRQ_ENA */ +#define WM8400_GPIO3_IRQ_ENA_MASK 0x0040 /* GPIO3_IRQ_ENA */ +#define WM8400_GPIO3_IRQ_ENA_SHIFT 6 /* GPIO3_IRQ_ENA */ +#define WM8400_GPIO3_IRQ_ENA_WIDTH 1 /* GPIO3_IRQ_ENA */ +#define WM8400_GPIO3_PU 0x0020 /* GPIO3_PU */ +#define WM8400_GPIO3_PU_MASK 0x0020 /* GPIO3_PU */ +#define WM8400_GPIO3_PU_SHIFT 5 /* GPIO3_PU */ +#define WM8400_GPIO3_PU_WIDTH 1 /* GPIO3_PU */ +#define WM8400_GPIO3_PD 0x0010 /* GPIO3_PD */ +#define WM8400_GPIO3_PD_MASK 0x0010 /* GPIO3_PD */ +#define WM8400_GPIO3_PD_SHIFT 4 /* GPIO3_PD */ +#define WM8400_GPIO3_PD_WIDTH 1 /* GPIO3_PD */ +#define WM8400_GPIO3_SEL_MASK 0x000F /* GPIO3_SEL - [3:0] */ +#define WM8400_GPIO3_SEL_SHIFT 0 /* GPIO3_SEL - [3:0] */ +#define WM8400_GPIO3_SEL_WIDTH 4 /* GPIO3_SEL - [3:0] */ + +/* + * R21 (0x15) - GPIO5 & GPIO6 + */ +#define WM8400_GPIO6_DEB_ENA 0x8000 /* GPIO6_DEB_ENA */ +#define WM8400_GPIO6_DEB_ENA_MASK 0x8000 /* GPIO6_DEB_ENA */ +#define WM8400_GPIO6_DEB_ENA_SHIFT 15 /* GPIO6_DEB_ENA */ +#define WM8400_GPIO6_DEB_ENA_WIDTH 1 /* GPIO6_DEB_ENA */ +#define WM8400_GPIO6_IRQ_ENA 0x4000 /* GPIO6_IRQ_ENA */ +#define WM8400_GPIO6_IRQ_ENA_MASK 0x4000 /* GPIO6_IRQ_ENA */ +#define WM8400_GPIO6_IRQ_ENA_SHIFT 14 /* GPIO6_IRQ_ENA */ +#define WM8400_GPIO6_IRQ_ENA_WIDTH 1 /* GPIO6_IRQ_ENA */ +#define WM8400_GPIO6_PU 0x2000 /* GPIO6_PU */ +#define WM8400_GPIO6_PU_MASK 0x2000 /* GPIO6_PU */ +#define WM8400_GPIO6_PU_SHIFT 13 /* GPIO6_PU */ +#define WM8400_GPIO6_PU_WIDTH 1 /* GPIO6_PU */ +#define WM8400_GPIO6_PD 0x1000 /* GPIO6_PD */ +#define WM8400_GPIO6_PD_MASK 0x1000 /* GPIO6_PD */ +#define WM8400_GPIO6_PD_SHIFT 12 /* GPIO6_PD */ +#define WM8400_GPIO6_PD_WIDTH 1 /* GPIO6_PD */ +#define WM8400_GPIO6_SEL_MASK 0x0F00 /* GPIO6_SEL - [11:8] */ +#define WM8400_GPIO6_SEL_SHIFT 8 /* GPIO6_SEL - [11:8] */ +#define WM8400_GPIO6_SEL_WIDTH 4 /* GPIO6_SEL - [11:8] */ +#define WM8400_GPIO5_DEB_ENA 0x0080 /* GPIO5_DEB_ENA */ +#define WM8400_GPIO5_DEB_ENA_MASK 0x0080 /* GPIO5_DEB_ENA */ +#define WM8400_GPIO5_DEB_ENA_SHIFT 7 /* GPIO5_DEB_ENA */ +#define WM8400_GPIO5_DEB_ENA_WIDTH 1 /* GPIO5_DEB_ENA */ +#define WM8400_GPIO5_IRQ_ENA 0x0040 /* GPIO5_IRQ_ENA */ +#define WM8400_GPIO5_IRQ_ENA_MASK 0x0040 /* GPIO5_IRQ_ENA */ +#define WM8400_GPIO5_IRQ_ENA_SHIFT 6 /* GPIO5_IRQ_ENA */ +#define WM8400_GPIO5_IRQ_ENA_WIDTH 1 /* GPIO5_IRQ_ENA */ +#define WM8400_GPIO5_PU 0x0020 /* GPIO5_PU */ +#define WM8400_GPIO5_PU_MASK 0x0020 /* GPIO5_PU */ +#define WM8400_GPIO5_PU_SHIFT 5 /* GPIO5_PU */ +#define WM8400_GPIO5_PU_WIDTH 1 /* GPIO5_PU */ +#define WM8400_GPIO5_PD 0x0010 /* GPIO5_PD */ +#define WM8400_GPIO5_PD_MASK 0x0010 /* GPIO5_PD */ +#define WM8400_GPIO5_PD_SHIFT 4 /* GPIO5_PD */ +#define WM8400_GPIO5_PD_WIDTH 1 /* GPIO5_PD */ +#define WM8400_GPIO5_SEL_MASK 0x000F /* GPIO5_SEL - [3:0] */ +#define WM8400_GPIO5_SEL_SHIFT 0 /* GPIO5_SEL - [3:0] */ +#define WM8400_GPIO5_SEL_WIDTH 4 /* GPIO5_SEL - [3:0] */ + +/* + * R22 (0x16) - GPIOCTRL 2 + */ +#define WM8400_TEMPOK_IRQ_ENA 0x0800 /* TEMPOK_IRQ_ENA */ +#define WM8400_TEMPOK_IRQ_ENA_MASK 0x0800 /* TEMPOK_IRQ_ENA */ +#define WM8400_TEMPOK_IRQ_ENA_SHIFT 11 /* TEMPOK_IRQ_ENA */ +#define WM8400_TEMPOK_IRQ_ENA_WIDTH 1 /* TEMPOK_IRQ_ENA */ +#define WM8400_MIC1SHRT_IRQ_ENA 0x0400 /* MIC1SHRT_IRQ_ENA */ +#define WM8400_MIC1SHRT_IRQ_ENA_MASK 0x0400 /* MIC1SHRT_IRQ_ENA */ +#define WM8400_MIC1SHRT_IRQ_ENA_SHIFT 10 /* MIC1SHRT_IRQ_ENA */ +#define WM8400_MIC1SHRT_IRQ_ENA_WIDTH 1 /* MIC1SHRT_IRQ_ENA */ +#define WM8400_MIC1DET_IRQ_ENA 0x0200 /* MIC1DET_IRQ_ENA */ +#define WM8400_MIC1DET_IRQ_ENA_MASK 0x0200 /* MIC1DET_IRQ_ENA */ +#define WM8400_MIC1DET_IRQ_ENA_SHIFT 9 /* MIC1DET_IRQ_ENA */ +#define WM8400_MIC1DET_IRQ_ENA_WIDTH 1 /* MIC1DET_IRQ_ENA */ +#define WM8400_FLL_LCK_IRQ_ENA 0x0100 /* FLL_LCK_IRQ_ENA */ +#define WM8400_FLL_LCK_IRQ_ENA_MASK 0x0100 /* FLL_LCK_IRQ_ENA */ +#define WM8400_FLL_LCK_IRQ_ENA_SHIFT 8 /* FLL_LCK_IRQ_ENA */ +#define WM8400_FLL_LCK_IRQ_ENA_WIDTH 1 /* FLL_LCK_IRQ_ENA */ +#define WM8400_GPI8_DEB_ENA 0x0080 /* GPI8_DEB_ENA */ +#define WM8400_GPI8_DEB_ENA_MASK 0x0080 /* GPI8_DEB_ENA */ +#define WM8400_GPI8_DEB_ENA_SHIFT 7 /* GPI8_DEB_ENA */ +#define WM8400_GPI8_DEB_ENA_WIDTH 1 /* GPI8_DEB_ENA */ +#define WM8400_GPI8_IRQ_ENA 0x0040 /* GPI8_IRQ_ENA */ +#define WM8400_GPI8_IRQ_ENA_MASK 0x0040 /* GPI8_IRQ_ENA */ +#define WM8400_GPI8_IRQ_ENA_SHIFT 6 /* GPI8_IRQ_ENA */ +#define WM8400_GPI8_IRQ_ENA_WIDTH 1 /* GPI8_IRQ_ENA */ +#define WM8400_GPI8_ENA 0x0010 /* GPI8_ENA */ +#define WM8400_GPI8_ENA_MASK 0x0010 /* GPI8_ENA */ +#define WM8400_GPI8_ENA_SHIFT 4 /* GPI8_ENA */ +#define WM8400_GPI8_ENA_WIDTH 1 /* GPI8_ENA */ +#define WM8400_GPI7_DEB_ENA 0x0008 /* GPI7_DEB_ENA */ +#define WM8400_GPI7_DEB_ENA_MASK 0x0008 /* GPI7_DEB_ENA */ +#define WM8400_GPI7_DEB_ENA_SHIFT 3 /* GPI7_DEB_ENA */ +#define WM8400_GPI7_DEB_ENA_WIDTH 1 /* GPI7_DEB_ENA */ +#define WM8400_GPI7_IRQ_ENA 0x0004 /* GPI7_IRQ_ENA */ +#define WM8400_GPI7_IRQ_ENA_MASK 0x0004 /* GPI7_IRQ_ENA */ +#define WM8400_GPI7_IRQ_ENA_SHIFT 2 /* GPI7_IRQ_ENA */ +#define WM8400_GPI7_IRQ_ENA_WIDTH 1 /* GPI7_IRQ_ENA */ +#define WM8400_GPI7_ENA 0x0001 /* GPI7_ENA */ +#define WM8400_GPI7_ENA_MASK 0x0001 /* GPI7_ENA */ +#define WM8400_GPI7_ENA_SHIFT 0 /* GPI7_ENA */ +#define WM8400_GPI7_ENA_WIDTH 1 /* GPI7_ENA */ + +/* + * R23 (0x17) - GPIO_POL + */ +#define WM8400_IRQ_INV 0x1000 /* IRQ_INV */ +#define WM8400_IRQ_INV_MASK 0x1000 /* IRQ_INV */ +#define WM8400_IRQ_INV_SHIFT 12 /* IRQ_INV */ +#define WM8400_IRQ_INV_WIDTH 1 /* IRQ_INV */ +#define WM8400_TEMPOK_POL 0x0800 /* TEMPOK_POL */ +#define WM8400_TEMPOK_POL_MASK 0x0800 /* TEMPOK_POL */ +#define WM8400_TEMPOK_POL_SHIFT 11 /* TEMPOK_POL */ +#define WM8400_TEMPOK_POL_WIDTH 1 /* TEMPOK_POL */ +#define WM8400_MIC1SHRT_POL 0x0400 /* MIC1SHRT_POL */ +#define WM8400_MIC1SHRT_POL_MASK 0x0400 /* MIC1SHRT_POL */ +#define WM8400_MIC1SHRT_POL_SHIFT 10 /* MIC1SHRT_POL */ +#define WM8400_MIC1SHRT_POL_WIDTH 1 /* MIC1SHRT_POL */ +#define WM8400_MIC1DET_POL 0x0200 /* MIC1DET_POL */ +#define WM8400_MIC1DET_POL_MASK 0x0200 /* MIC1DET_POL */ +#define WM8400_MIC1DET_POL_SHIFT 9 /* MIC1DET_POL */ +#define WM8400_MIC1DET_POL_WIDTH 1 /* MIC1DET_POL */ +#define WM8400_FLL_LCK_POL 0x0100 /* FLL_LCK_POL */ +#define WM8400_FLL_LCK_POL_MASK 0x0100 /* FLL_LCK_POL */ +#define WM8400_FLL_LCK_POL_SHIFT 8 /* FLL_LCK_POL */ +#define WM8400_FLL_LCK_POL_WIDTH 1 /* FLL_LCK_POL */ +#define WM8400_GPIO_POL_MASK 0x00FF /* GPIO_POL - [7:0] */ +#define WM8400_GPIO_POL_SHIFT 0 /* GPIO_POL - [7:0] */ +#define WM8400_GPIO_POL_WIDTH 8 /* GPIO_POL - [7:0] */ + +/* + * R65 (0x41) - LDO 1 Control + */ +#define WM8400_LDO1_ENA 0x8000 /* LDO1_ENA */ +#define WM8400_LDO1_ENA_MASK 0x8000 /* LDO1_ENA */ +#define WM8400_LDO1_ENA_SHIFT 15 /* LDO1_ENA */ +#define WM8400_LDO1_ENA_WIDTH 1 /* LDO1_ENA */ +#define WM8400_LDO1_SWI 0x4000 /* LDO1_SWI */ +#define WM8400_LDO1_SWI_MASK 0x4000 /* LDO1_SWI */ +#define WM8400_LDO1_SWI_SHIFT 14 /* LDO1_SWI */ +#define WM8400_LDO1_SWI_WIDTH 1 /* LDO1_SWI */ +#define WM8400_LDO1_OPFLT 0x1000 /* LDO1_OPFLT */ +#define WM8400_LDO1_OPFLT_MASK 0x1000 /* LDO1_OPFLT */ +#define WM8400_LDO1_OPFLT_SHIFT 12 /* LDO1_OPFLT */ +#define WM8400_LDO1_OPFLT_WIDTH 1 /* LDO1_OPFLT */ +#define WM8400_LDO1_ERRACT 0x0800 /* LDO1_ERRACT */ +#define WM8400_LDO1_ERRACT_MASK 0x0800 /* LDO1_ERRACT */ +#define WM8400_LDO1_ERRACT_SHIFT 11 /* LDO1_ERRACT */ +#define WM8400_LDO1_ERRACT_WIDTH 1 /* LDO1_ERRACT */ +#define WM8400_LDO1_HIB_MODE 0x0400 /* LDO1_HIB_MODE */ +#define WM8400_LDO1_HIB_MODE_MASK 0x0400 /* LDO1_HIB_MODE */ +#define WM8400_LDO1_HIB_MODE_SHIFT 10 /* LDO1_HIB_MODE */ +#define WM8400_LDO1_HIB_MODE_WIDTH 1 /* LDO1_HIB_MODE */ +#define WM8400_LDO1_VIMG_MASK 0x03E0 /* LDO1_VIMG - [9:5] */ +#define WM8400_LDO1_VIMG_SHIFT 5 /* LDO1_VIMG - [9:5] */ +#define WM8400_LDO1_VIMG_WIDTH 5 /* LDO1_VIMG - [9:5] */ +#define WM8400_LDO1_VSEL_MASK 0x001F /* LDO1_VSEL - [4:0] */ +#define WM8400_LDO1_VSEL_SHIFT 0 /* LDO1_VSEL - [4:0] */ +#define WM8400_LDO1_VSEL_WIDTH 5 /* LDO1_VSEL - [4:0] */ + +/* + * R66 (0x42) - LDO 2 Control + */ +#define WM8400_LDO2_ENA 0x8000 /* LDO2_ENA */ +#define WM8400_LDO2_ENA_MASK 0x8000 /* LDO2_ENA */ +#define WM8400_LDO2_ENA_SHIFT 15 /* LDO2_ENA */ +#define WM8400_LDO2_ENA_WIDTH 1 /* LDO2_ENA */ +#define WM8400_LDO2_SWI 0x4000 /* LDO2_SWI */ +#define WM8400_LDO2_SWI_MASK 0x4000 /* LDO2_SWI */ +#define WM8400_LDO2_SWI_SHIFT 14 /* LDO2_SWI */ +#define WM8400_LDO2_SWI_WIDTH 1 /* LDO2_SWI */ +#define WM8400_LDO2_OPFLT 0x1000 /* LDO2_OPFLT */ +#define WM8400_LDO2_OPFLT_MASK 0x1000 /* LDO2_OPFLT */ +#define WM8400_LDO2_OPFLT_SHIFT 12 /* LDO2_OPFLT */ +#define WM8400_LDO2_OPFLT_WIDTH 1 /* LDO2_OPFLT */ +#define WM8400_LDO2_ERRACT 0x0800 /* LDO2_ERRACT */ +#define WM8400_LDO2_ERRACT_MASK 0x0800 /* LDO2_ERRACT */ +#define WM8400_LDO2_ERRACT_SHIFT 11 /* LDO2_ERRACT */ +#define WM8400_LDO2_ERRACT_WIDTH 1 /* LDO2_ERRACT */ +#define WM8400_LDO2_HIB_MODE 0x0400 /* LDO2_HIB_MODE */ +#define WM8400_LDO2_HIB_MODE_MASK 0x0400 /* LDO2_HIB_MODE */ +#define WM8400_LDO2_HIB_MODE_SHIFT 10 /* LDO2_HIB_MODE */ +#define WM8400_LDO2_HIB_MODE_WIDTH 1 /* LDO2_HIB_MODE */ +#define WM8400_LDO2_VIMG_MASK 0x03E0 /* LDO2_VIMG - [9:5] */ +#define WM8400_LDO2_VIMG_SHIFT 5 /* LDO2_VIMG - [9:5] */ +#define WM8400_LDO2_VIMG_WIDTH 5 /* LDO2_VIMG - [9:5] */ +#define WM8400_LDO2_VSEL_MASK 0x001F /* LDO2_VSEL - [4:0] */ +#define WM8400_LDO2_VSEL_SHIFT 0 /* LDO2_VSEL - [4:0] */ +#define WM8400_LDO2_VSEL_WIDTH 5 /* LDO2_VSEL - [4:0] */ + +/* + * R67 (0x43) - LDO 3 Control + */ +#define WM8400_LDO3_ENA 0x8000 /* LDO3_ENA */ +#define WM8400_LDO3_ENA_MASK 0x8000 /* LDO3_ENA */ +#define WM8400_LDO3_ENA_SHIFT 15 /* LDO3_ENA */ +#define WM8400_LDO3_ENA_WIDTH 1 /* LDO3_ENA */ +#define WM8400_LDO3_SWI 0x4000 /* LDO3_SWI */ +#define WM8400_LDO3_SWI_MASK 0x4000 /* LDO3_SWI */ +#define WM8400_LDO3_SWI_SHIFT 14 /* LDO3_SWI */ +#define WM8400_LDO3_SWI_WIDTH 1 /* LDO3_SWI */ +#define WM8400_LDO3_OPFLT 0x1000 /* LDO3_OPFLT */ +#define WM8400_LDO3_OPFLT_MASK 0x1000 /* LDO3_OPFLT */ +#define WM8400_LDO3_OPFLT_SHIFT 12 /* LDO3_OPFLT */ +#define WM8400_LDO3_OPFLT_WIDTH 1 /* LDO3_OPFLT */ +#define WM8400_LDO3_ERRACT 0x0800 /* LDO3_ERRACT */ +#define WM8400_LDO3_ERRACT_MASK 0x0800 /* LDO3_ERRACT */ +#define WM8400_LDO3_ERRACT_SHIFT 11 /* LDO3_ERRACT */ +#define WM8400_LDO3_ERRACT_WIDTH 1 /* LDO3_ERRACT */ +#define WM8400_LDO3_HIB_MODE 0x0400 /* LDO3_HIB_MODE */ +#define WM8400_LDO3_HIB_MODE_MASK 0x0400 /* LDO3_HIB_MODE */ +#define WM8400_LDO3_HIB_MODE_SHIFT 10 /* LDO3_HIB_MODE */ +#define WM8400_LDO3_HIB_MODE_WIDTH 1 /* LDO3_HIB_MODE */ +#define WM8400_LDO3_VIMG_MASK 0x03E0 /* LDO3_VIMG - [9:5] */ +#define WM8400_LDO3_VIMG_SHIFT 5 /* LDO3_VIMG - [9:5] */ +#define WM8400_LDO3_VIMG_WIDTH 5 /* LDO3_VIMG - [9:5] */ +#define WM8400_LDO3_VSEL_MASK 0x001F /* LDO3_VSEL - [4:0] */ +#define WM8400_LDO3_VSEL_SHIFT 0 /* LDO3_VSEL - [4:0] */ +#define WM8400_LDO3_VSEL_WIDTH 5 /* LDO3_VSEL - [4:0] */ + +/* + * R68 (0x44) - LDO 4 Control + */ +#define WM8400_LDO4_ENA 0x8000 /* LDO4_ENA */ +#define WM8400_LDO4_ENA_MASK 0x8000 /* LDO4_ENA */ +#define WM8400_LDO4_ENA_SHIFT 15 /* LDO4_ENA */ +#define WM8400_LDO4_ENA_WIDTH 1 /* LDO4_ENA */ +#define WM8400_LDO4_SWI 0x4000 /* LDO4_SWI */ +#define WM8400_LDO4_SWI_MASK 0x4000 /* LDO4_SWI */ +#define WM8400_LDO4_SWI_SHIFT 14 /* LDO4_SWI */ +#define WM8400_LDO4_SWI_WIDTH 1 /* LDO4_SWI */ +#define WM8400_LDO4_OPFLT 0x1000 /* LDO4_OPFLT */ +#define WM8400_LDO4_OPFLT_MASK 0x1000 /* LDO4_OPFLT */ +#define WM8400_LDO4_OPFLT_SHIFT 12 /* LDO4_OPFLT */ +#define WM8400_LDO4_OPFLT_WIDTH 1 /* LDO4_OPFLT */ +#define WM8400_LDO4_ERRACT 0x0800 /* LDO4_ERRACT */ +#define WM8400_LDO4_ERRACT_MASK 0x0800 /* LDO4_ERRACT */ +#define WM8400_LDO4_ERRACT_SHIFT 11 /* LDO4_ERRACT */ +#define WM8400_LDO4_ERRACT_WIDTH 1 /* LDO4_ERRACT */ +#define WM8400_LDO4_HIB_MODE 0x0400 /* LDO4_HIB_MODE */ +#define WM8400_LDO4_HIB_MODE_MASK 0x0400 /* LDO4_HIB_MODE */ +#define WM8400_LDO4_HIB_MODE_SHIFT 10 /* LDO4_HIB_MODE */ +#define WM8400_LDO4_HIB_MODE_WIDTH 1 /* LDO4_HIB_MODE */ +#define WM8400_LDO4_VIMG_MASK 0x03E0 /* LDO4_VIMG - [9:5] */ +#define WM8400_LDO4_VIMG_SHIFT 5 /* LDO4_VIMG - [9:5] */ +#define WM8400_LDO4_VIMG_WIDTH 5 /* LDO4_VIMG - [9:5] */ +#define WM8400_LDO4_VSEL_MASK 0x001F /* LDO4_VSEL - [4:0] */ +#define WM8400_LDO4_VSEL_SHIFT 0 /* LDO4_VSEL - [4:0] */ +#define WM8400_LDO4_VSEL_WIDTH 5 /* LDO4_VSEL - [4:0] */ + +/* + * R70 (0x46) - DCDC1 Control 1 + */ +#define WM8400_DC1_ENA 0x8000 /* DC1_ENA */ +#define WM8400_DC1_ENA_MASK 0x8000 /* DC1_ENA */ +#define WM8400_DC1_ENA_SHIFT 15 /* DC1_ENA */ +#define WM8400_DC1_ENA_WIDTH 1 /* DC1_ENA */ +#define WM8400_DC1_ACTIVE 0x4000 /* DC1_ACTIVE */ +#define WM8400_DC1_ACTIVE_MASK 0x4000 /* DC1_ACTIVE */ +#define WM8400_DC1_ACTIVE_SHIFT 14 /* DC1_ACTIVE */ +#define WM8400_DC1_ACTIVE_WIDTH 1 /* DC1_ACTIVE */ +#define WM8400_DC1_SLEEP 0x2000 /* DC1_SLEEP */ +#define WM8400_DC1_SLEEP_MASK 0x2000 /* DC1_SLEEP */ +#define WM8400_DC1_SLEEP_SHIFT 13 /* DC1_SLEEP */ +#define WM8400_DC1_SLEEP_WIDTH 1 /* DC1_SLEEP */ +#define WM8400_DC1_OPFLT 0x1000 /* DC1_OPFLT */ +#define WM8400_DC1_OPFLT_MASK 0x1000 /* DC1_OPFLT */ +#define WM8400_DC1_OPFLT_SHIFT 12 /* DC1_OPFLT */ +#define WM8400_DC1_OPFLT_WIDTH 1 /* DC1_OPFLT */ +#define WM8400_DC1_ERRACT 0x0800 /* DC1_ERRACT */ +#define WM8400_DC1_ERRACT_MASK 0x0800 /* DC1_ERRACT */ +#define WM8400_DC1_ERRACT_SHIFT 11 /* DC1_ERRACT */ +#define WM8400_DC1_ERRACT_WIDTH 1 /* DC1_ERRACT */ +#define WM8400_DC1_HIB_MODE 0x0400 /* DC1_HIB_MODE */ +#define WM8400_DC1_HIB_MODE_MASK 0x0400 /* DC1_HIB_MODE */ +#define WM8400_DC1_HIB_MODE_SHIFT 10 /* DC1_HIB_MODE */ +#define WM8400_DC1_HIB_MODE_WIDTH 1 /* DC1_HIB_MODE */ +#define WM8400_DC1_SOFTST_MASK 0x0300 /* DC1_SOFTST - [9:8] */ +#define WM8400_DC1_SOFTST_SHIFT 8 /* DC1_SOFTST - [9:8] */ +#define WM8400_DC1_SOFTST_WIDTH 2 /* DC1_SOFTST - [9:8] */ +#define WM8400_DC1_OV_PROT 0x0080 /* DC1_OV_PROT */ +#define WM8400_DC1_OV_PROT_MASK 0x0080 /* DC1_OV_PROT */ +#define WM8400_DC1_OV_PROT_SHIFT 7 /* DC1_OV_PROT */ +#define WM8400_DC1_OV_PROT_WIDTH 1 /* DC1_OV_PROT */ +#define WM8400_DC1_VSEL_MASK 0x007F /* DC1_VSEL - [6:0] */ +#define WM8400_DC1_VSEL_SHIFT 0 /* DC1_VSEL - [6:0] */ +#define WM8400_DC1_VSEL_WIDTH 7 /* DC1_VSEL - [6:0] */ + +/* + * R71 (0x47) - DCDC1 Control 2 + */ +#define WM8400_DC1_FRC_PWM 0x2000 /* DC1_FRC_PWM */ +#define WM8400_DC1_FRC_PWM_MASK 0x2000 /* DC1_FRC_PWM */ +#define WM8400_DC1_FRC_PWM_SHIFT 13 /* DC1_FRC_PWM */ +#define WM8400_DC1_FRC_PWM_WIDTH 1 /* DC1_FRC_PWM */ +#define WM8400_DC1_STBY_LIM_MASK 0x0300 /* DC1_STBY_LIM - [9:8] */ +#define WM8400_DC1_STBY_LIM_SHIFT 8 /* DC1_STBY_LIM - [9:8] */ +#define WM8400_DC1_STBY_LIM_WIDTH 2 /* DC1_STBY_LIM - [9:8] */ +#define WM8400_DC1_ACT_LIM 0x0080 /* DC1_ACT_LIM */ +#define WM8400_DC1_ACT_LIM_MASK 0x0080 /* DC1_ACT_LIM */ +#define WM8400_DC1_ACT_LIM_SHIFT 7 /* DC1_ACT_LIM */ +#define WM8400_DC1_ACT_LIM_WIDTH 1 /* DC1_ACT_LIM */ +#define WM8400_DC1_VIMG_MASK 0x007F /* DC1_VIMG - [6:0] */ +#define WM8400_DC1_VIMG_SHIFT 0 /* DC1_VIMG - [6:0] */ +#define WM8400_DC1_VIMG_WIDTH 7 /* DC1_VIMG - [6:0] */ + +/* + * R72 (0x48) - DCDC2 Control 1 + */ +#define WM8400_DC2_ENA 0x8000 /* DC2_ENA */ +#define WM8400_DC2_ENA_MASK 0x8000 /* DC2_ENA */ +#define WM8400_DC2_ENA_SHIFT 15 /* DC2_ENA */ +#define WM8400_DC2_ENA_WIDTH 1 /* DC2_ENA */ +#define WM8400_DC2_ACTIVE 0x4000 /* DC2_ACTIVE */ +#define WM8400_DC2_ACTIVE_MASK 0x4000 /* DC2_ACTIVE */ +#define WM8400_DC2_ACTIVE_SHIFT 14 /* DC2_ACTIVE */ +#define WM8400_DC2_ACTIVE_WIDTH 1 /* DC2_ACTIVE */ +#define WM8400_DC2_SLEEP 0x2000 /* DC2_SLEEP */ +#define WM8400_DC2_SLEEP_MASK 0x2000 /* DC2_SLEEP */ +#define WM8400_DC2_SLEEP_SHIFT 13 /* DC2_SLEEP */ +#define WM8400_DC2_SLEEP_WIDTH 1 /* DC2_SLEEP */ +#define WM8400_DC2_OPFLT 0x1000 /* DC2_OPFLT */ +#define WM8400_DC2_OPFLT_MASK 0x1000 /* DC2_OPFLT */ +#define WM8400_DC2_OPFLT_SHIFT 12 /* DC2_OPFLT */ +#define WM8400_DC2_OPFLT_WIDTH 1 /* DC2_OPFLT */ +#define WM8400_DC2_ERRACT 0x0800 /* DC2_ERRACT */ +#define WM8400_DC2_ERRACT_MASK 0x0800 /* DC2_ERRACT */ +#define WM8400_DC2_ERRACT_SHIFT 11 /* DC2_ERRACT */ +#define WM8400_DC2_ERRACT_WIDTH 1 /* DC2_ERRACT */ +#define WM8400_DC2_HIB_MODE 0x0400 /* DC2_HIB_MODE */ +#define WM8400_DC2_HIB_MODE_MASK 0x0400 /* DC2_HIB_MODE */ +#define WM8400_DC2_HIB_MODE_SHIFT 10 /* DC2_HIB_MODE */ +#define WM8400_DC2_HIB_MODE_WIDTH 1 /* DC2_HIB_MODE */ +#define WM8400_DC2_SOFTST_MASK 0x0300 /* DC2_SOFTST - [9:8] */ +#define WM8400_DC2_SOFTST_SHIFT 8 /* DC2_SOFTST - [9:8] */ +#define WM8400_DC2_SOFTST_WIDTH 2 /* DC2_SOFTST - [9:8] */ +#define WM8400_DC2_OV_PROT 0x0080 /* DC2_OV_PROT */ +#define WM8400_DC2_OV_PROT_MASK 0x0080 /* DC2_OV_PROT */ +#define WM8400_DC2_OV_PROT_SHIFT 7 /* DC2_OV_PROT */ +#define WM8400_DC2_OV_PROT_WIDTH 1 /* DC2_OV_PROT */ +#define WM8400_DC2_VSEL_MASK 0x007F /* DC2_VSEL - [6:0] */ +#define WM8400_DC2_VSEL_SHIFT 0 /* DC2_VSEL - [6:0] */ +#define WM8400_DC2_VSEL_WIDTH 7 /* DC2_VSEL - [6:0] */ + +/* + * R73 (0x49) - DCDC2 Control 2 + */ +#define WM8400_DC2_FRC_PWM 0x2000 /* DC2_FRC_PWM */ +#define WM8400_DC2_FRC_PWM_MASK 0x2000 /* DC2_FRC_PWM */ +#define WM8400_DC2_FRC_PWM_SHIFT 13 /* DC2_FRC_PWM */ +#define WM8400_DC2_FRC_PWM_WIDTH 1 /* DC2_FRC_PWM */ +#define WM8400_DC2_STBY_LIM_MASK 0x0300 /* DC2_STBY_LIM - [9:8] */ +#define WM8400_DC2_STBY_LIM_SHIFT 8 /* DC2_STBY_LIM - [9:8] */ +#define WM8400_DC2_STBY_LIM_WIDTH 2 /* DC2_STBY_LIM - [9:8] */ +#define WM8400_DC2_ACT_LIM 0x0080 /* DC2_ACT_LIM */ +#define WM8400_DC2_ACT_LIM_MASK 0x0080 /* DC2_ACT_LIM */ +#define WM8400_DC2_ACT_LIM_SHIFT 7 /* DC2_ACT_LIM */ +#define WM8400_DC2_ACT_LIM_WIDTH 1 /* DC2_ACT_LIM */ +#define WM8400_DC2_VIMG_MASK 0x007F /* DC2_VIMG - [6:0] */ +#define WM8400_DC2_VIMG_SHIFT 0 /* DC2_VIMG - [6:0] */ +#define WM8400_DC2_VIMG_WIDTH 7 /* DC2_VIMG - [6:0] */ + +/* + * R75 (0x4B) - Interface + */ +#define WM8400_AUTOINC 0x0008 /* AUTOINC */ +#define WM8400_AUTOINC_MASK 0x0008 /* AUTOINC */ +#define WM8400_AUTOINC_SHIFT 3 /* AUTOINC */ +#define WM8400_AUTOINC_WIDTH 1 /* AUTOINC */ +#define WM8400_ARA_ENA 0x0004 /* ARA_ENA */ +#define WM8400_ARA_ENA_MASK 0x0004 /* ARA_ENA */ +#define WM8400_ARA_ENA_SHIFT 2 /* ARA_ENA */ +#define WM8400_ARA_ENA_WIDTH 1 /* ARA_ENA */ +#define WM8400_SPI_CFG 0x0002 /* SPI_CFG */ +#define WM8400_SPI_CFG_MASK 0x0002 /* SPI_CFG */ +#define WM8400_SPI_CFG_SHIFT 1 /* SPI_CFG */ +#define WM8400_SPI_CFG_WIDTH 1 /* SPI_CFG */ + +/* + * R76 (0x4C) - PM GENERAL + */ +#define WM8400_CODEC_SOFTST 0x8000 /* CODEC_SOFTST */ +#define WM8400_CODEC_SOFTST_MASK 0x8000 /* CODEC_SOFTST */ +#define WM8400_CODEC_SOFTST_SHIFT 15 /* CODEC_SOFTST */ +#define WM8400_CODEC_SOFTST_WIDTH 1 /* CODEC_SOFTST */ +#define WM8400_CODEC_SOFTSD 0x4000 /* CODEC_SOFTSD */ +#define WM8400_CODEC_SOFTSD_MASK 0x4000 /* CODEC_SOFTSD */ +#define WM8400_CODEC_SOFTSD_SHIFT 14 /* CODEC_SOFTSD */ +#define WM8400_CODEC_SOFTSD_WIDTH 1 /* CODEC_SOFTSD */ +#define WM8400_CHIP_SOFTSD 0x2000 /* CHIP_SOFTSD */ +#define WM8400_CHIP_SOFTSD_MASK 0x2000 /* CHIP_SOFTSD */ +#define WM8400_CHIP_SOFTSD_SHIFT 13 /* CHIP_SOFTSD */ +#define WM8400_CHIP_SOFTSD_WIDTH 1 /* CHIP_SOFTSD */ +#define WM8400_DSLEEP1_POL 0x0008 /* DSLEEP1_POL */ +#define WM8400_DSLEEP1_POL_MASK 0x0008 /* DSLEEP1_POL */ +#define WM8400_DSLEEP1_POL_SHIFT 3 /* DSLEEP1_POL */ +#define WM8400_DSLEEP1_POL_WIDTH 1 /* DSLEEP1_POL */ +#define WM8400_DSLEEP2_POL 0x0004 /* DSLEEP2_POL */ +#define WM8400_DSLEEP2_POL_MASK 0x0004 /* DSLEEP2_POL */ +#define WM8400_DSLEEP2_POL_SHIFT 2 /* DSLEEP2_POL */ +#define WM8400_DSLEEP2_POL_WIDTH 1 /* DSLEEP2_POL */ +#define WM8400_PWR_STATE_MASK 0x0003 /* PWR_STATE - [1:0] */ +#define WM8400_PWR_STATE_SHIFT 0 /* PWR_STATE - [1:0] */ +#define WM8400_PWR_STATE_WIDTH 2 /* PWR_STATE - [1:0] */ + +/* + * R78 (0x4E) - PM Shutdown Control + */ +#define WM8400_CHIP_GT150_ERRACT 0x0200 /* CHIP_GT150_ERRACT */ +#define WM8400_CHIP_GT150_ERRACT_MASK 0x0200 /* CHIP_GT150_ERRACT */ +#define WM8400_CHIP_GT150_ERRACT_SHIFT 9 /* CHIP_GT150_ERRACT */ +#define WM8400_CHIP_GT150_ERRACT_WIDTH 1 /* CHIP_GT150_ERRACT */ +#define WM8400_CHIP_GT115_ERRACT 0x0100 /* CHIP_GT115_ERRACT */ +#define WM8400_CHIP_GT115_ERRACT_MASK 0x0100 /* CHIP_GT115_ERRACT */ +#define WM8400_CHIP_GT115_ERRACT_SHIFT 8 /* CHIP_GT115_ERRACT */ +#define WM8400_CHIP_GT115_ERRACT_WIDTH 1 /* CHIP_GT115_ERRACT */ +#define WM8400_LINE_CMP_ERRACT 0x0080 /* LINE_CMP_ERRACT */ +#define WM8400_LINE_CMP_ERRACT_MASK 0x0080 /* LINE_CMP_ERRACT */ +#define WM8400_LINE_CMP_ERRACT_SHIFT 7 /* LINE_CMP_ERRACT */ +#define WM8400_LINE_CMP_ERRACT_WIDTH 1 /* LINE_CMP_ERRACT */ +#define WM8400_UVLO_ERRACT 0x0040 /* UVLO_ERRACT */ +#define WM8400_UVLO_ERRACT_MASK 0x0040 /* UVLO_ERRACT */ +#define WM8400_UVLO_ERRACT_SHIFT 6 /* UVLO_ERRACT */ +#define WM8400_UVLO_ERRACT_WIDTH 1 /* UVLO_ERRACT */ + +/* + * R79 (0x4F) - Interrupt Status 1 + */ +#define WM8400_MICD_CINT 0x8000 /* MICD_CINT */ +#define WM8400_MICD_CINT_MASK 0x8000 /* MICD_CINT */ +#define WM8400_MICD_CINT_SHIFT 15 /* MICD_CINT */ +#define WM8400_MICD_CINT_WIDTH 1 /* MICD_CINT */ +#define WM8400_MICSCD_CINT 0x4000 /* MICSCD_CINT */ +#define WM8400_MICSCD_CINT_MASK 0x4000 /* MICSCD_CINT */ +#define WM8400_MICSCD_CINT_SHIFT 14 /* MICSCD_CINT */ +#define WM8400_MICSCD_CINT_WIDTH 1 /* MICSCD_CINT */ +#define WM8400_JDL_CINT 0x2000 /* JDL_CINT */ +#define WM8400_JDL_CINT_MASK 0x2000 /* JDL_CINT */ +#define WM8400_JDL_CINT_SHIFT 13 /* JDL_CINT */ +#define WM8400_JDL_CINT_WIDTH 1 /* JDL_CINT */ +#define WM8400_JDR_CINT 0x1000 /* JDR_CINT */ +#define WM8400_JDR_CINT_MASK 0x1000 /* JDR_CINT */ +#define WM8400_JDR_CINT_SHIFT 12 /* JDR_CINT */ +#define WM8400_JDR_CINT_WIDTH 1 /* JDR_CINT */ +#define WM8400_CODEC_SEQ_END_EINT 0x0800 /* CODEC_SEQ_END_EINT */ +#define WM8400_CODEC_SEQ_END_EINT_MASK 0x0800 /* CODEC_SEQ_END_EINT */ +#define WM8400_CODEC_SEQ_END_EINT_SHIFT 11 /* CODEC_SEQ_END_EINT */ +#define WM8400_CODEC_SEQ_END_EINT_WIDTH 1 /* CODEC_SEQ_END_EINT */ +#define WM8400_CDEL_TO_EINT 0x0400 /* CDEL_TO_EINT */ +#define WM8400_CDEL_TO_EINT_MASK 0x0400 /* CDEL_TO_EINT */ +#define WM8400_CDEL_TO_EINT_SHIFT 10 /* CDEL_TO_EINT */ +#define WM8400_CDEL_TO_EINT_WIDTH 1 /* CDEL_TO_EINT */ +#define WM8400_CHIP_GT150_EINT 0x0200 /* CHIP_GT150_EINT */ +#define WM8400_CHIP_GT150_EINT_MASK 0x0200 /* CHIP_GT150_EINT */ +#define WM8400_CHIP_GT150_EINT_SHIFT 9 /* CHIP_GT150_EINT */ +#define WM8400_CHIP_GT150_EINT_WIDTH 1 /* CHIP_GT150_EINT */ +#define WM8400_CHIP_GT115_EINT 0x0100 /* CHIP_GT115_EINT */ +#define WM8400_CHIP_GT115_EINT_MASK 0x0100 /* CHIP_GT115_EINT */ +#define WM8400_CHIP_GT115_EINT_SHIFT 8 /* CHIP_GT115_EINT */ +#define WM8400_CHIP_GT115_EINT_WIDTH 1 /* CHIP_GT115_EINT */ +#define WM8400_LINE_CMP_EINT 0x0080 /* LINE_CMP_EINT */ +#define WM8400_LINE_CMP_EINT_MASK 0x0080 /* LINE_CMP_EINT */ +#define WM8400_LINE_CMP_EINT_SHIFT 7 /* LINE_CMP_EINT */ +#define WM8400_LINE_CMP_EINT_WIDTH 1 /* LINE_CMP_EINT */ +#define WM8400_UVLO_EINT 0x0040 /* UVLO_EINT */ +#define WM8400_UVLO_EINT_MASK 0x0040 /* UVLO_EINT */ +#define WM8400_UVLO_EINT_SHIFT 6 /* UVLO_EINT */ +#define WM8400_UVLO_EINT_WIDTH 1 /* UVLO_EINT */ +#define WM8400_DC2_UV_EINT 0x0020 /* DC2_UV_EINT */ +#define WM8400_DC2_UV_EINT_MASK 0x0020 /* DC2_UV_EINT */ +#define WM8400_DC2_UV_EINT_SHIFT 5 /* DC2_UV_EINT */ +#define WM8400_DC2_UV_EINT_WIDTH 1 /* DC2_UV_EINT */ +#define WM8400_DC1_UV_EINT 0x0010 /* DC1_UV_EINT */ +#define WM8400_DC1_UV_EINT_MASK 0x0010 /* DC1_UV_EINT */ +#define WM8400_DC1_UV_EINT_SHIFT 4 /* DC1_UV_EINT */ +#define WM8400_DC1_UV_EINT_WIDTH 1 /* DC1_UV_EINT */ +#define WM8400_LDO4_UV_EINT 0x0008 /* LDO4_UV_EINT */ +#define WM8400_LDO4_UV_EINT_MASK 0x0008 /* LDO4_UV_EINT */ +#define WM8400_LDO4_UV_EINT_SHIFT 3 /* LDO4_UV_EINT */ +#define WM8400_LDO4_UV_EINT_WIDTH 1 /* LDO4_UV_EINT */ +#define WM8400_LDO3_UV_EINT 0x0004 /* LDO3_UV_EINT */ +#define WM8400_LDO3_UV_EINT_MASK 0x0004 /* LDO3_UV_EINT */ +#define WM8400_LDO3_UV_EINT_SHIFT 2 /* LDO3_UV_EINT */ +#define WM8400_LDO3_UV_EINT_WIDTH 1 /* LDO3_UV_EINT */ +#define WM8400_LDO2_UV_EINT 0x0002 /* LDO2_UV_EINT */ +#define WM8400_LDO2_UV_EINT_MASK 0x0002 /* LDO2_UV_EINT */ +#define WM8400_LDO2_UV_EINT_SHIFT 1 /* LDO2_UV_EINT */ +#define WM8400_LDO2_UV_EINT_WIDTH 1 /* LDO2_UV_EINT */ +#define WM8400_LDO1_UV_EINT 0x0001 /* LDO1_UV_EINT */ +#define WM8400_LDO1_UV_EINT_MASK 0x0001 /* LDO1_UV_EINT */ +#define WM8400_LDO1_UV_EINT_SHIFT 0 /* LDO1_UV_EINT */ +#define WM8400_LDO1_UV_EINT_WIDTH 1 /* LDO1_UV_EINT */ + +/* + * R80 (0x50) - Interrupt Status 1 Mask + */ +#define WM8400_IM_MICD_CINT 0x8000 /* IM_MICD_CINT */ +#define WM8400_IM_MICD_CINT_MASK 0x8000 /* IM_MICD_CINT */ +#define WM8400_IM_MICD_CINT_SHIFT 15 /* IM_MICD_CINT */ +#define WM8400_IM_MICD_CINT_WIDTH 1 /* IM_MICD_CINT */ +#define WM8400_IM_MICSCD_CINT 0x4000 /* IM_MICSCD_CINT */ +#define WM8400_IM_MICSCD_CINT_MASK 0x4000 /* IM_MICSCD_CINT */ +#define WM8400_IM_MICSCD_CINT_SHIFT 14 /* IM_MICSCD_CINT */ +#define WM8400_IM_MICSCD_CINT_WIDTH 1 /* IM_MICSCD_CINT */ +#define WM8400_IM_JDL_CINT 0x2000 /* IM_JDL_CINT */ +#define WM8400_IM_JDL_CINT_MASK 0x2000 /* IM_JDL_CINT */ +#define WM8400_IM_JDL_CINT_SHIFT 13 /* IM_JDL_CINT */ +#define WM8400_IM_JDL_CINT_WIDTH 1 /* IM_JDL_CINT */ +#define WM8400_IM_JDR_CINT 0x1000 /* IM_JDR_CINT */ +#define WM8400_IM_JDR_CINT_MASK 0x1000 /* IM_JDR_CINT */ +#define WM8400_IM_JDR_CINT_SHIFT 12 /* IM_JDR_CINT */ +#define WM8400_IM_JDR_CINT_WIDTH 1 /* IM_JDR_CINT */ +#define WM8400_IM_CODEC_SEQ_END_EINT 0x0800 /* IM_CODEC_SEQ_END_EINT */ +#define WM8400_IM_CODEC_SEQ_END_EINT_MASK 0x0800 /* IM_CODEC_SEQ_END_EINT */ +#define WM8400_IM_CODEC_SEQ_END_EINT_SHIFT 11 /* IM_CODEC_SEQ_END_EINT */ +#define WM8400_IM_CODEC_SEQ_END_EINT_WIDTH 1 /* IM_CODEC_SEQ_END_EINT */ +#define WM8400_IM_CDEL_TO_EINT 0x0400 /* IM_CDEL_TO_EINT */ +#define WM8400_IM_CDEL_TO_EINT_MASK 0x0400 /* IM_CDEL_TO_EINT */ +#define WM8400_IM_CDEL_TO_EINT_SHIFT 10 /* IM_CDEL_TO_EINT */ +#define WM8400_IM_CDEL_TO_EINT_WIDTH 1 /* IM_CDEL_TO_EINT */ +#define WM8400_IM_CHIP_GT150_EINT 0x0200 /* IM_CHIP_GT150_EINT */ +#define WM8400_IM_CHIP_GT150_EINT_MASK 0x0200 /* IM_CHIP_GT150_EINT */ +#define WM8400_IM_CHIP_GT150_EINT_SHIFT 9 /* IM_CHIP_GT150_EINT */ +#define WM8400_IM_CHIP_GT150_EINT_WIDTH 1 /* IM_CHIP_GT150_EINT */ +#define WM8400_IM_CHIP_GT115_EINT 0x0100 /* IM_CHIP_GT115_EINT */ +#define WM8400_IM_CHIP_GT115_EINT_MASK 0x0100 /* IM_CHIP_GT115_EINT */ +#define WM8400_IM_CHIP_GT115_EINT_SHIFT 8 /* IM_CHIP_GT115_EINT */ +#define WM8400_IM_CHIP_GT115_EINT_WIDTH 1 /* IM_CHIP_GT115_EINT */ +#define WM8400_IM_LINE_CMP_EINT 0x0080 /* IM_LINE_CMP_EINT */ +#define WM8400_IM_LINE_CMP_EINT_MASK 0x0080 /* IM_LINE_CMP_EINT */ +#define WM8400_IM_LINE_CMP_EINT_SHIFT 7 /* IM_LINE_CMP_EINT */ +#define WM8400_IM_LINE_CMP_EINT_WIDTH 1 /* IM_LINE_CMP_EINT */ +#define WM8400_IM_UVLO_EINT 0x0040 /* IM_UVLO_EINT */ +#define WM8400_IM_UVLO_EINT_MASK 0x0040 /* IM_UVLO_EINT */ +#define WM8400_IM_UVLO_EINT_SHIFT 6 /* IM_UVLO_EINT */ +#define WM8400_IM_UVLO_EINT_WIDTH 1 /* IM_UVLO_EINT */ +#define WM8400_IM_DC2_UV_EINT 0x0020 /* IM_DC2_UV_EINT */ +#define WM8400_IM_DC2_UV_EINT_MASK 0x0020 /* IM_DC2_UV_EINT */ +#define WM8400_IM_DC2_UV_EINT_SHIFT 5 /* IM_DC2_UV_EINT */ +#define WM8400_IM_DC2_UV_EINT_WIDTH 1 /* IM_DC2_UV_EINT */ +#define WM8400_IM_DC1_UV_EINT 0x0010 /* IM_DC1_UV_EINT */ +#define WM8400_IM_DC1_UV_EINT_MASK 0x0010 /* IM_DC1_UV_EINT */ +#define WM8400_IM_DC1_UV_EINT_SHIFT 4 /* IM_DC1_UV_EINT */ +#define WM8400_IM_DC1_UV_EINT_WIDTH 1 /* IM_DC1_UV_EINT */ +#define WM8400_IM_LDO4_UV_EINT 0x0008 /* IM_LDO4_UV_EINT */ +#define WM8400_IM_LDO4_UV_EINT_MASK 0x0008 /* IM_LDO4_UV_EINT */ +#define WM8400_IM_LDO4_UV_EINT_SHIFT 3 /* IM_LDO4_UV_EINT */ +#define WM8400_IM_LDO4_UV_EINT_WIDTH 1 /* IM_LDO4_UV_EINT */ +#define WM8400_IM_LDO3_UV_EINT 0x0004 /* IM_LDO3_UV_EINT */ +#define WM8400_IM_LDO3_UV_EINT_MASK 0x0004 /* IM_LDO3_UV_EINT */ +#define WM8400_IM_LDO3_UV_EINT_SHIFT 2 /* IM_LDO3_UV_EINT */ +#define WM8400_IM_LDO3_UV_EINT_WIDTH 1 /* IM_LDO3_UV_EINT */ +#define WM8400_IM_LDO2_UV_EINT 0x0002 /* IM_LDO2_UV_EINT */ +#define WM8400_IM_LDO2_UV_EINT_MASK 0x0002 /* IM_LDO2_UV_EINT */ +#define WM8400_IM_LDO2_UV_EINT_SHIFT 1 /* IM_LDO2_UV_EINT */ +#define WM8400_IM_LDO2_UV_EINT_WIDTH 1 /* IM_LDO2_UV_EINT */ +#define WM8400_IM_LDO1_UV_EINT 0x0001 /* IM_LDO1_UV_EINT */ +#define WM8400_IM_LDO1_UV_EINT_MASK 0x0001 /* IM_LDO1_UV_EINT */ +#define WM8400_IM_LDO1_UV_EINT_SHIFT 0 /* IM_LDO1_UV_EINT */ +#define WM8400_IM_LDO1_UV_EINT_WIDTH 1 /* IM_LDO1_UV_EINT */ + +/* + * R81 (0x51) - Interrupt Levels + */ +#define WM8400_MICD_LVL 0x8000 /* MICD_LVL */ +#define WM8400_MICD_LVL_MASK 0x8000 /* MICD_LVL */ +#define WM8400_MICD_LVL_SHIFT 15 /* MICD_LVL */ +#define WM8400_MICD_LVL_WIDTH 1 /* MICD_LVL */ +#define WM8400_MICSCD_LVL 0x4000 /* MICSCD_LVL */ +#define WM8400_MICSCD_LVL_MASK 0x4000 /* MICSCD_LVL */ +#define WM8400_MICSCD_LVL_SHIFT 14 /* MICSCD_LVL */ +#define WM8400_MICSCD_LVL_WIDTH 1 /* MICSCD_LVL */ +#define WM8400_JDL_LVL 0x2000 /* JDL_LVL */ +#define WM8400_JDL_LVL_MASK 0x2000 /* JDL_LVL */ +#define WM8400_JDL_LVL_SHIFT 13 /* JDL_LVL */ +#define WM8400_JDL_LVL_WIDTH 1 /* JDL_LVL */ +#define WM8400_JDR_LVL 0x1000 /* JDR_LVL */ +#define WM8400_JDR_LVL_MASK 0x1000 /* JDR_LVL */ +#define WM8400_JDR_LVL_SHIFT 12 /* JDR_LVL */ +#define WM8400_JDR_LVL_WIDTH 1 /* JDR_LVL */ +#define WM8400_CODEC_SEQ_END_LVL 0x0800 /* CODEC_SEQ_END_LVL */ +#define WM8400_CODEC_SEQ_END_LVL_MASK 0x0800 /* CODEC_SEQ_END_LVL */ +#define WM8400_CODEC_SEQ_END_LVL_SHIFT 11 /* CODEC_SEQ_END_LVL */ +#define WM8400_CODEC_SEQ_END_LVL_WIDTH 1 /* CODEC_SEQ_END_LVL */ +#define WM8400_CDEL_TO_LVL 0x0400 /* CDEL_TO_LVL */ +#define WM8400_CDEL_TO_LVL_MASK 0x0400 /* CDEL_TO_LVL */ +#define WM8400_CDEL_TO_LVL_SHIFT 10 /* CDEL_TO_LVL */ +#define WM8400_CDEL_TO_LVL_WIDTH 1 /* CDEL_TO_LVL */ +#define WM8400_CHIP_GT150_LVL 0x0200 /* CHIP_GT150_LVL */ +#define WM8400_CHIP_GT150_LVL_MASK 0x0200 /* CHIP_GT150_LVL */ +#define WM8400_CHIP_GT150_LVL_SHIFT 9 /* CHIP_GT150_LVL */ +#define WM8400_CHIP_GT150_LVL_WIDTH 1 /* CHIP_GT150_LVL */ +#define WM8400_CHIP_GT115_LVL 0x0100 /* CHIP_GT115_LVL */ +#define WM8400_CHIP_GT115_LVL_MASK 0x0100 /* CHIP_GT115_LVL */ +#define WM8400_CHIP_GT115_LVL_SHIFT 8 /* CHIP_GT115_LVL */ +#define WM8400_CHIP_GT115_LVL_WIDTH 1 /* CHIP_GT115_LVL */ +#define WM8400_LINE_CMP_LVL 0x0080 /* LINE_CMP_LVL */ +#define WM8400_LINE_CMP_LVL_MASK 0x0080 /* LINE_CMP_LVL */ +#define WM8400_LINE_CMP_LVL_SHIFT 7 /* LINE_CMP_LVL */ +#define WM8400_LINE_CMP_LVL_WIDTH 1 /* LINE_CMP_LVL */ +#define WM8400_UVLO_LVL 0x0040 /* UVLO_LVL */ +#define WM8400_UVLO_LVL_MASK 0x0040 /* UVLO_LVL */ +#define WM8400_UVLO_LVL_SHIFT 6 /* UVLO_LVL */ +#define WM8400_UVLO_LVL_WIDTH 1 /* UVLO_LVL */ +#define WM8400_DC2_UV_LVL 0x0020 /* DC2_UV_LVL */ +#define WM8400_DC2_UV_LVL_MASK 0x0020 /* DC2_UV_LVL */ +#define WM8400_DC2_UV_LVL_SHIFT 5 /* DC2_UV_LVL */ +#define WM8400_DC2_UV_LVL_WIDTH 1 /* DC2_UV_LVL */ +#define WM8400_DC1_UV_LVL 0x0010 /* DC1_UV_LVL */ +#define WM8400_DC1_UV_LVL_MASK 0x0010 /* DC1_UV_LVL */ +#define WM8400_DC1_UV_LVL_SHIFT 4 /* DC1_UV_LVL */ +#define WM8400_DC1_UV_LVL_WIDTH 1 /* DC1_UV_LVL */ +#define WM8400_LDO4_UV_LVL 0x0008 /* LDO4_UV_LVL */ +#define WM8400_LDO4_UV_LVL_MASK 0x0008 /* LDO4_UV_LVL */ +#define WM8400_LDO4_UV_LVL_SHIFT 3 /* LDO4_UV_LVL */ +#define WM8400_LDO4_UV_LVL_WIDTH 1 /* LDO4_UV_LVL */ +#define WM8400_LDO3_UV_LVL 0x0004 /* LDO3_UV_LVL */ +#define WM8400_LDO3_UV_LVL_MASK 0x0004 /* LDO3_UV_LVL */ +#define WM8400_LDO3_UV_LVL_SHIFT 2 /* LDO3_UV_LVL */ +#define WM8400_LDO3_UV_LVL_WIDTH 1 /* LDO3_UV_LVL */ +#define WM8400_LDO2_UV_LVL 0x0002 /* LDO2_UV_LVL */ +#define WM8400_LDO2_UV_LVL_MASK 0x0002 /* LDO2_UV_LVL */ +#define WM8400_LDO2_UV_LVL_SHIFT 1 /* LDO2_UV_LVL */ +#define WM8400_LDO2_UV_LVL_WIDTH 1 /* LDO2_UV_LVL */ +#define WM8400_LDO1_UV_LVL 0x0001 /* LDO1_UV_LVL */ +#define WM8400_LDO1_UV_LVL_MASK 0x0001 /* LDO1_UV_LVL */ +#define WM8400_LDO1_UV_LVL_SHIFT 0 /* LDO1_UV_LVL */ +#define WM8400_LDO1_UV_LVL_WIDTH 1 /* LDO1_UV_LVL */ + +/* + * R82 (0x52) - Shutdown Reason + */ +#define WM8400_SDR_CHIP_SOFTSD 0x2000 /* SDR_CHIP_SOFTSD */ +#define WM8400_SDR_CHIP_SOFTSD_MASK 0x2000 /* SDR_CHIP_SOFTSD */ +#define WM8400_SDR_CHIP_SOFTSD_SHIFT 13 /* SDR_CHIP_SOFTSD */ +#define WM8400_SDR_CHIP_SOFTSD_WIDTH 1 /* SDR_CHIP_SOFTSD */ +#define WM8400_SDR_NPDN 0x0800 /* SDR_NPDN */ +#define WM8400_SDR_NPDN_MASK 0x0800 /* SDR_NPDN */ +#define WM8400_SDR_NPDN_SHIFT 11 /* SDR_NPDN */ +#define WM8400_SDR_NPDN_WIDTH 1 /* SDR_NPDN */ +#define WM8400_SDR_CHIP_GT150 0x0200 /* SDR_CHIP_GT150 */ +#define WM8400_SDR_CHIP_GT150_MASK 0x0200 /* SDR_CHIP_GT150 */ +#define WM8400_SDR_CHIP_GT150_SHIFT 9 /* SDR_CHIP_GT150 */ +#define WM8400_SDR_CHIP_GT150_WIDTH 1 /* SDR_CHIP_GT150 */ +#define WM8400_SDR_CHIP_GT115 0x0100 /* SDR_CHIP_GT115 */ +#define WM8400_SDR_CHIP_GT115_MASK 0x0100 /* SDR_CHIP_GT115 */ +#define WM8400_SDR_CHIP_GT115_SHIFT 8 /* SDR_CHIP_GT115 */ +#define WM8400_SDR_CHIP_GT115_WIDTH 1 /* SDR_CHIP_GT115 */ +#define WM8400_SDR_LINE_CMP 0x0080 /* SDR_LINE_CMP */ +#define WM8400_SDR_LINE_CMP_MASK 0x0080 /* SDR_LINE_CMP */ +#define WM8400_SDR_LINE_CMP_SHIFT 7 /* SDR_LINE_CMP */ +#define WM8400_SDR_LINE_CMP_WIDTH 1 /* SDR_LINE_CMP */ +#define WM8400_SDR_UVLO 0x0040 /* SDR_UVLO */ +#define WM8400_SDR_UVLO_MASK 0x0040 /* SDR_UVLO */ +#define WM8400_SDR_UVLO_SHIFT 6 /* SDR_UVLO */ +#define WM8400_SDR_UVLO_WIDTH 1 /* SDR_UVLO */ +#define WM8400_SDR_DC2_UV 0x0020 /* SDR_DC2_UV */ +#define WM8400_SDR_DC2_UV_MASK 0x0020 /* SDR_DC2_UV */ +#define WM8400_SDR_DC2_UV_SHIFT 5 /* SDR_DC2_UV */ +#define WM8400_SDR_DC2_UV_WIDTH 1 /* SDR_DC2_UV */ +#define WM8400_SDR_DC1_UV 0x0010 /* SDR_DC1_UV */ +#define WM8400_SDR_DC1_UV_MASK 0x0010 /* SDR_DC1_UV */ +#define WM8400_SDR_DC1_UV_SHIFT 4 /* SDR_DC1_UV */ +#define WM8400_SDR_DC1_UV_WIDTH 1 /* SDR_DC1_UV */ +#define WM8400_SDR_LDO4_UV 0x0008 /* SDR_LDO4_UV */ +#define WM8400_SDR_LDO4_UV_MASK 0x0008 /* SDR_LDO4_UV */ +#define WM8400_SDR_LDO4_UV_SHIFT 3 /* SDR_LDO4_UV */ +#define WM8400_SDR_LDO4_UV_WIDTH 1 /* SDR_LDO4_UV */ +#define WM8400_SDR_LDO3_UV 0x0004 /* SDR_LDO3_UV */ +#define WM8400_SDR_LDO3_UV_MASK 0x0004 /* SDR_LDO3_UV */ +#define WM8400_SDR_LDO3_UV_SHIFT 2 /* SDR_LDO3_UV */ +#define WM8400_SDR_LDO3_UV_WIDTH 1 /* SDR_LDO3_UV */ +#define WM8400_SDR_LDO2_UV 0x0002 /* SDR_LDO2_UV */ +#define WM8400_SDR_LDO2_UV_MASK 0x0002 /* SDR_LDO2_UV */ +#define WM8400_SDR_LDO2_UV_SHIFT 1 /* SDR_LDO2_UV */ +#define WM8400_SDR_LDO2_UV_WIDTH 1 /* SDR_LDO2_UV */ +#define WM8400_SDR_LDO1_UV 0x0001 /* SDR_LDO1_UV */ +#define WM8400_SDR_LDO1_UV_MASK 0x0001 /* SDR_LDO1_UV */ +#define WM8400_SDR_LDO1_UV_SHIFT 0 /* SDR_LDO1_UV */ +#define WM8400_SDR_LDO1_UV_WIDTH 1 /* SDR_LDO1_UV */ + +/* + * R84 (0x54) - Line Circuits + */ +#define WM8400_BG_LINE_COMP 0x8000 /* BG_LINE_COMP */ +#define WM8400_BG_LINE_COMP_MASK 0x8000 /* BG_LINE_COMP */ +#define WM8400_BG_LINE_COMP_SHIFT 15 /* BG_LINE_COMP */ +#define WM8400_BG_LINE_COMP_WIDTH 1 /* BG_LINE_COMP */ +#define WM8400_LINE_CMP_VTHI_MASK 0x00F0 /* LINE_CMP_VTHI - [7:4] */ +#define WM8400_LINE_CMP_VTHI_SHIFT 4 /* LINE_CMP_VTHI - [7:4] */ +#define WM8400_LINE_CMP_VTHI_WIDTH 4 /* LINE_CMP_VTHI - [7:4] */ +#define WM8400_LINE_CMP_VTHD_MASK 0x000F /* LINE_CMP_VTHD - [3:0] */ +#define WM8400_LINE_CMP_VTHD_SHIFT 0 /* LINE_CMP_VTHD - [3:0] */ +#define WM8400_LINE_CMP_VTHD_WIDTH 4 /* LINE_CMP_VTHD - [3:0] */ + +int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data); + +static inline int wm8400_set_bits(struct wm8400 *wm8400, u8 reg, + u16 mask, u16 val) +{ + return regmap_update_bits(wm8400->regmap, reg, mask, val); +} + +#endif diff --git a/include/linux/mfd/wm8400.h b/include/linux/mfd/wm8400.h new file mode 100644 index 000000000..b46b566ac --- /dev/null +++ b/include/linux/mfd/wm8400.h @@ -0,0 +1,40 @@ +/* + * wm8400 client interface + * + * Copyright 2008 Wolfson Microelectronics plc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __LINUX_MFD_WM8400_H +#define __LINUX_MFD_WM8400_H + +#include + +#define WM8400_LDO1 0 +#define WM8400_LDO2 1 +#define WM8400_LDO3 2 +#define WM8400_LDO4 3 +#define WM8400_DCDC1 4 +#define WM8400_DCDC2 5 + +struct wm8400_platform_data { + int (*platform_init)(struct device *dev); +}; + +int wm8400_register_regulator(struct device *dev, int reg, + struct regulator_init_data *initdata); + +#endif diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h new file mode 100644 index 000000000..eefafa62d --- /dev/null +++ b/include/linux/mfd/wm8994/core.h @@ -0,0 +1,145 @@ +/* + * include/linux/mfd/wm8994/core.h -- Core interface for WM8994 + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM8994_CORE_H__ +#define __MFD_WM8994_CORE_H__ + +#include +#include +#include + +#include + +enum wm8994_type { + WM8994 = 0, + WM8958 = 1, + WM1811 = 2, +}; + +struct regulator_dev; +struct regulator_bulk_data; +struct irq_domain; + +#define WM8994_NUM_GPIO_REGS 11 +#define WM8994_NUM_LDO_REGS 2 +#define WM8994_NUM_IRQ_REGS 2 + +#define WM8994_IRQ_TEMP_SHUT 0 +#define WM8994_IRQ_MIC1_DET 1 +#define WM8994_IRQ_MIC1_SHRT 2 +#define WM8994_IRQ_MIC2_DET 3 +#define WM8994_IRQ_MIC2_SHRT 4 +#define WM8994_IRQ_FLL1_LOCK 5 +#define WM8994_IRQ_FLL2_LOCK 6 +#define WM8994_IRQ_SRC1_LOCK 7 +#define WM8994_IRQ_SRC2_LOCK 8 +#define WM8994_IRQ_AIF1DRC1_SIG_DET 9 +#define WM8994_IRQ_AIF1DRC2_SIG_DET 10 +#define WM8994_IRQ_AIF2DRC_SIG_DET 11 +#define WM8994_IRQ_FIFOS_ERR 12 +#define WM8994_IRQ_WSEQ_DONE 13 +#define WM8994_IRQ_DCS_DONE 14 +#define WM8994_IRQ_TEMP_WARN 15 + +/* GPIOs in the chip are numbered from 1-11 */ +#define WM8994_IRQ_GPIO(x) (x + WM8994_IRQ_TEMP_WARN) + +struct wm8994 { + struct wm8994_pdata pdata; + + enum wm8994_type type; + int revision; + int cust_id; + + struct device *dev; + struct regmap *regmap; + + bool ldo_ena_always_driven; + + int gpio_base; + int irq_base; + + int irq; + struct regmap_irq_chip_data *irq_data; + struct irq_domain *edge_irq; + + /* Used over suspend/resume */ + bool suspended; + + struct regulator_dev *dbvdd; + int num_supplies; + struct regulator_bulk_data *supplies; +}; + +/* Device I/O API */ + +static inline int wm8994_reg_read(struct wm8994 *wm8994, unsigned short reg) +{ + unsigned int val; + int ret; + + ret = regmap_read(wm8994->regmap, reg, &val); + + if (ret < 0) + return ret; + else + return val; +} + +static inline int wm8994_reg_write(struct wm8994 *wm8994, unsigned short reg, + unsigned short val) +{ + return regmap_write(wm8994->regmap, reg, val); +} + +static inline int wm8994_bulk_read(struct wm8994 *wm8994, unsigned short reg, + int count, u16 *buf) +{ + return regmap_bulk_read(wm8994->regmap, reg, buf, count); +} + +static inline int wm8994_bulk_write(struct wm8994 *wm8994, unsigned short reg, + int count, const u16 *buf) +{ + return regmap_raw_write(wm8994->regmap, reg, buf, count * sizeof(u16)); +} + +static inline int wm8994_set_bits(struct wm8994 *wm8994, unsigned short reg, + unsigned short mask, unsigned short val) +{ + return regmap_update_bits(wm8994->regmap, reg, mask, val); +} + +/* Helper to save on boilerplate */ +static inline int wm8994_request_irq(struct wm8994 *wm8994, int irq, + irq_handler_t handler, const char *name, + void *data) +{ + if (!wm8994->irq_data) + return -EINVAL; + return request_threaded_irq(regmap_irq_get_virq(wm8994->irq_data, irq), + NULL, handler, IRQF_TRIGGER_RISING, name, + data); +} +static inline void wm8994_free_irq(struct wm8994 *wm8994, int irq, void *data) +{ + if (!wm8994->irq_data) + return; + free_irq(regmap_irq_get_virq(wm8994->irq_data, irq), data); +} + +int wm8994_irq_init(struct wm8994 *wm8994); +void wm8994_irq_exit(struct wm8994 *wm8994); + +#endif diff --git a/include/linux/mfd/wm8994/gpio.h b/include/linux/mfd/wm8994/gpio.h new file mode 100644 index 000000000..0c79b5ff4 --- /dev/null +++ b/include/linux/mfd/wm8994/gpio.h @@ -0,0 +1,76 @@ +/* + * include/linux/mfd/wm8994/gpio.h - GPIO configuration for WM8994 + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM8994_GPIO_H__ +#define __MFD_WM8994_GPIO_H__ + +#define WM8994_GPIO_MAX 11 + +#define WM8994_GP_FN_PIN_SPECIFIC 0 +#define WM8994_GP_FN_GPIO 1 +#define WM8994_GP_FN_SDOUT 2 +#define WM8994_GP_FN_IRQ 3 +#define WM8994_GP_FN_TEMPERATURE 4 +#define WM8994_GP_FN_MICBIAS1_DET 5 +#define WM8994_GP_FN_MICBIAS1_SHORT 6 +#define WM8994_GP_FN_MICBIAS2_DET 7 +#define WM8994_GP_FN_MICBIAS2_SHORT 8 +#define WM8994_GP_FN_FLL1_LOCK 9 +#define WM8994_GP_FN_FLL2_LOCK 10 +#define WM8994_GP_FN_SRC1_LOCK 11 +#define WM8994_GP_FN_SRC2_LOCK 12 +#define WM8994_GP_FN_DRC1_ACT 13 +#define WM8994_GP_FN_DRC2_ACT 14 +#define WM8994_GP_FN_DRC3_ACT 15 +#define WM8994_GP_FN_WSEQ_STATUS 16 +#define WM8994_GP_FN_FIFO_ERROR 17 +#define WM8994_GP_FN_OPCLK 18 +#define WM8994_GP_FN_THW 19 +#define WM8994_GP_FN_DCS_DONE 20 +#define WM8994_GP_FN_FLL1_OUT 21 +#define WM8994_GP_FN_FLL2_OUT 22 + +#define WM8994_GPN_DIR 0x8000 /* GPN_DIR */ +#define WM8994_GPN_DIR_MASK 0x8000 /* GPN_DIR */ +#define WM8994_GPN_DIR_SHIFT 15 /* GPN_DIR */ +#define WM8994_GPN_DIR_WIDTH 1 /* GPN_DIR */ +#define WM8994_GPN_PU 0x4000 /* GPN_PU */ +#define WM8994_GPN_PU_MASK 0x4000 /* GPN_PU */ +#define WM8994_GPN_PU_SHIFT 14 /* GPN_PU */ +#define WM8994_GPN_PU_WIDTH 1 /* GPN_PU */ +#define WM8994_GPN_PD 0x2000 /* GPN_PD */ +#define WM8994_GPN_PD_MASK 0x2000 /* GPN_PD */ +#define WM8994_GPN_PD_SHIFT 13 /* GPN_PD */ +#define WM8994_GPN_PD_WIDTH 1 /* GPN_PD */ +#define WM8994_GPN_POL 0x0400 /* GPN_POL */ +#define WM8994_GPN_POL_MASK 0x0400 /* GPN_POL */ +#define WM8994_GPN_POL_SHIFT 10 /* GPN_POL */ +#define WM8994_GPN_POL_WIDTH 1 /* GPN_POL */ +#define WM8994_GPN_OP_CFG 0x0200 /* GPN_OP_CFG */ +#define WM8994_GPN_OP_CFG_MASK 0x0200 /* GPN_OP_CFG */ +#define WM8994_GPN_OP_CFG_SHIFT 9 /* GPN_OP_CFG */ +#define WM8994_GPN_OP_CFG_WIDTH 1 /* GPN_OP_CFG */ +#define WM8994_GPN_DB 0x0100 /* GPN_DB */ +#define WM8994_GPN_DB_MASK 0x0100 /* GPN_DB */ +#define WM8994_GPN_DB_SHIFT 8 /* GPN_DB */ +#define WM8994_GPN_DB_WIDTH 1 /* GPN_DB */ +#define WM8994_GPN_LVL 0x0040 /* GPN_LVL */ +#define WM8994_GPN_LVL_MASK 0x0040 /* GPN_LVL */ +#define WM8994_GPN_LVL_SHIFT 6 /* GPN_LVL */ +#define WM8994_GPN_LVL_WIDTH 1 /* GPN_LVL */ +#define WM8994_GPN_FN_MASK 0x001F /* GPN_FN - [4:0] */ +#define WM8994_GPN_FN_SHIFT 0 /* GPN_FN - [4:0] */ +#define WM8994_GPN_FN_WIDTH 5 /* GPN_FN - [4:0] */ + +#endif diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h new file mode 100644 index 000000000..b19c370fe --- /dev/null +++ b/include/linux/mfd/wm8994/pdata.h @@ -0,0 +1,244 @@ +/* + * include/linux/mfd/wm8994/pdata.h -- Platform data for WM8994 + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM8994_PDATA_H__ +#define __MFD_WM8994_PDATA_H__ + +#define WM8994_NUM_LDO 2 +#define WM8994_NUM_GPIO 11 +#define WM8994_NUM_AIF 3 + +struct wm8994_ldo_pdata { + /** GPIOs to enable regulator, 0 or less if not available */ + int enable; + + const struct regulator_init_data *init_data; +}; + +#define WM8994_CONFIGURE_GPIO 0x10000 + +#define WM8994_DRC_REGS 5 +#define WM8994_EQ_REGS 20 +#define WM8958_MBC_CUTOFF_REGS 20 +#define WM8958_MBC_COEFF_REGS 48 +#define WM8958_MBC_COMBINED_REGS 56 +#define WM8958_VSS_HPF_REGS 2 +#define WM8958_VSS_REGS 148 +#define WM8958_ENH_EQ_REGS 32 + +/** + * DRC configurations are specified with a label and a set of register + * values to write (the enable bits will be ignored). At runtime an + * enumerated control will be presented for each DRC block allowing + * the user to choose the configration to use. + * + * Configurations may be generated by hand or by using the DRC control + * panel provided by the WISCE - see http://www.wolfsonmicro.com/wisce/ + * for details. + */ +struct wm8994_drc_cfg { + const char *name; + u16 regs[WM8994_DRC_REGS]; +}; + +/** + * ReTune Mobile configurations are specified with a label, sample + * rate and set of values to write (the enable bits will be ignored). + * + * Configurations are expected to be generated using the ReTune Mobile + * control panel in WISCE - see http://www.wolfsonmicro.com/wisce/ + */ +struct wm8994_retune_mobile_cfg { + const char *name; + unsigned int rate; + u16 regs[WM8994_EQ_REGS]; +}; + +/** + * Multiband compressor configurations are specified with a label and + * two sets of values to write. Configurations are expected to be + * generated using the multiband compressor configuration panel in + * WISCE - see http://www.wolfsonmicro.com/wisce/ + */ +struct wm8958_mbc_cfg { + const char *name; + u16 cutoff_regs[WM8958_MBC_CUTOFF_REGS]; + u16 coeff_regs[WM8958_MBC_COEFF_REGS]; + + /* Coefficient layout when using MBC+VSS firmware */ + u16 combined_regs[WM8958_MBC_COMBINED_REGS]; +}; + +/** + * VSS HPF configurations are specified with a label and two values to + * write. Configurations are expected to be generated using the + * multiband compressor configuration panel in WISCE - see + * http://www.wolfsonmicro.com/wisce/ + */ +struct wm8958_vss_hpf_cfg { + const char *name; + u16 regs[WM8958_VSS_HPF_REGS]; +}; + +/** + * VSS configurations are specified with a label and array of values + * to write. Configurations are expected to be generated using the + * multiband compressor configuration panel in WISCE - see + * http://www.wolfsonmicro.com/wisce/ + */ +struct wm8958_vss_cfg { + const char *name; + u16 regs[WM8958_VSS_REGS]; +}; + +/** + * Enhanced EQ configurations are specified with a label and array of + * values to write. Configurations are expected to be generated using + * the multiband compressor configuration panel in WISCE - see + * http://www.wolfsonmicro.com/wisce/ + */ +struct wm8958_enh_eq_cfg { + const char *name; + u16 regs[WM8958_ENH_EQ_REGS]; +}; + +/** + * Microphone detection rates, used to tune response rates and power + * consumption for WM8958/WM1811 microphone detection. + * + * @sysclk: System clock rate to use this configuration for. + * @idle: True if this configuration should use when no accessory is detected, + * false otherwise. + * @start: Value for MICD_BIAS_START_TIME register field (not shifted). + * @rate: Value for MICD_RATE register field (not shifted). + */ +struct wm8958_micd_rate { + int sysclk; + bool idle; + int start; + int rate; +}; + +struct wm8994_pdata { + int gpio_base; + + /** + * Default values for GPIOs if non-zero, WM8994_CONFIGURE_GPIO + * can be used for all zero values. + */ + int gpio_defaults[WM8994_NUM_GPIO]; + + struct wm8994_ldo_pdata ldo[WM8994_NUM_LDO]; + + int irq_base; /** Base IRQ number for WM8994, required for IRQs */ + unsigned long irq_flags; /** user irq flags */ + + int num_drc_cfgs; + struct wm8994_drc_cfg *drc_cfgs; + + int num_retune_mobile_cfgs; + struct wm8994_retune_mobile_cfg *retune_mobile_cfgs; + + int num_mbc_cfgs; + struct wm8958_mbc_cfg *mbc_cfgs; + + int num_vss_cfgs; + struct wm8958_vss_cfg *vss_cfgs; + + int num_vss_hpf_cfgs; + struct wm8958_vss_hpf_cfg *vss_hpf_cfgs; + + int num_enh_eq_cfgs; + struct wm8958_enh_eq_cfg *enh_eq_cfgs; + + int num_micd_rates; + struct wm8958_micd_rate *micd_rates; + + /* Power up delays to add after microphone bias power up (ms) */ + int micb1_delay; + int micb2_delay; + + /* LINEOUT can be differential or single ended */ + unsigned int lineout1_diff:1; + unsigned int lineout2_diff:1; + + /* Common mode feedback */ + unsigned int lineout1fb:1; + unsigned int lineout2fb:1; + + /* Delay between detecting a jack and starting microphone + * detect (specified in ms) + */ + int micdet_delay; + + /* Delay between microphone detect completing and reporting on + * insert (specified in ms) + */ + int mic_id_delay; + + /* IRQ for microphone detection if brought out directly as a + * signal. + */ + int micdet_irq; + + /* WM8994 microphone biases: 0=0.9*AVDD1 1=0.65*AVVD1 */ + unsigned int micbias1_lvl:1; + unsigned int micbias2_lvl:1; + + /* WM8994 jack detect threashold levels, see datasheet for values */ + unsigned int jd_scthr:2; + unsigned int jd_thr:2; + + /* Configure WM1811 jack detection for use with external capacitor */ + unsigned int jd_ext_cap:1; + + /* WM8958 microphone bias configuration */ + int micbias[2]; + + /* WM8958 microphone detection ranges */ + u16 micd_lvl_sel; + + /* Disable the internal pull downs on the LDOs if they are + * always driven (eg, connected to an always on supply or + * GPIO that always drives an output. If they float power + * consumption will rise. + */ + bool ldo_ena_always_driven; + + /* + * SPKMODE must be pulled internally by the device on this + * system. + */ + bool spkmode_pu; + + /* + * CS/ADDR must be pulled internally by the device on this + * system. + */ + bool csnaddr_pd; + + /** + * Maximum number of channels clocks will be generated for, + * useful for systems where and I2S bus with multiple data + * lines is mastered. + */ + int max_channels_clocked[WM8994_NUM_AIF]; + + /** + * GPIO for the IRQ pin if host only supports edge triggering + */ + int irq_gpio; +}; + +#endif diff --git a/include/linux/mfd/wm8994/registers.h b/include/linux/mfd/wm8994/registers.h new file mode 100644 index 000000000..db8cef3d5 --- /dev/null +++ b/include/linux/mfd/wm8994/registers.h @@ -0,0 +1,4822 @@ +/* + * include/linux/mfd/wm8994/registers.h -- Register definitions for WM8994 + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM8994_REGISTERS_H__ +#define __MFD_WM8994_REGISTERS_H__ + +/* + * Register values. + */ +#define WM8994_SOFTWARE_RESET 0x00 +#define WM8994_POWER_MANAGEMENT_1 0x01 +#define WM8994_POWER_MANAGEMENT_2 0x02 +#define WM8994_POWER_MANAGEMENT_3 0x03 +#define WM8994_POWER_MANAGEMENT_4 0x04 +#define WM8994_POWER_MANAGEMENT_5 0x05 +#define WM8994_POWER_MANAGEMENT_6 0x06 +#define WM8994_INPUT_MIXER_1 0x15 +#define WM8994_LEFT_LINE_INPUT_1_2_VOLUME 0x18 +#define WM8994_LEFT_LINE_INPUT_3_4_VOLUME 0x19 +#define WM8994_RIGHT_LINE_INPUT_1_2_VOLUME 0x1A +#define WM8994_RIGHT_LINE_INPUT_3_4_VOLUME 0x1B +#define WM8994_LEFT_OUTPUT_VOLUME 0x1C +#define WM8994_RIGHT_OUTPUT_VOLUME 0x1D +#define WM8994_LINE_OUTPUTS_VOLUME 0x1E +#define WM8994_HPOUT2_VOLUME 0x1F +#define WM8994_LEFT_OPGA_VOLUME 0x20 +#define WM8994_RIGHT_OPGA_VOLUME 0x21 +#define WM8994_SPKMIXL_ATTENUATION 0x22 +#define WM8994_SPKMIXR_ATTENUATION 0x23 +#define WM8994_SPKOUT_MIXERS 0x24 +#define WM8994_CLASSD 0x25 +#define WM8994_SPEAKER_VOLUME_LEFT 0x26 +#define WM8994_SPEAKER_VOLUME_RIGHT 0x27 +#define WM8994_INPUT_MIXER_2 0x28 +#define WM8994_INPUT_MIXER_3 0x29 +#define WM8994_INPUT_MIXER_4 0x2A +#define WM8994_INPUT_MIXER_5 0x2B +#define WM8994_INPUT_MIXER_6 0x2C +#define WM8994_OUTPUT_MIXER_1 0x2D +#define WM8994_OUTPUT_MIXER_2 0x2E +#define WM8994_OUTPUT_MIXER_3 0x2F +#define WM8994_OUTPUT_MIXER_4 0x30 +#define WM8994_OUTPUT_MIXER_5 0x31 +#define WM8994_OUTPUT_MIXER_6 0x32 +#define WM8994_HPOUT2_MIXER 0x33 +#define WM8994_LINE_MIXER_1 0x34 +#define WM8994_LINE_MIXER_2 0x35 +#define WM8994_SPEAKER_MIXER 0x36 +#define WM8994_ADDITIONAL_CONTROL 0x37 +#define WM8994_ANTIPOP_1 0x38 +#define WM8994_ANTIPOP_2 0x39 +#define WM8994_MICBIAS 0x3A +#define WM8994_LDO_1 0x3B +#define WM8994_LDO_2 0x3C +#define WM8958_MICBIAS1 0x3D +#define WM8958_MICBIAS2 0x3E +#define WM8994_CHARGE_PUMP_1 0x4C +#define WM8958_CHARGE_PUMP_2 0x4D +#define WM8994_CLASS_W_1 0x51 +#define WM8994_DC_SERVO_1 0x54 +#define WM8994_DC_SERVO_2 0x55 +#define WM8994_DC_SERVO_4 0x57 +#define WM8994_DC_SERVO_READBACK 0x58 +#define WM8994_DC_SERVO_4E 0x59 +#define WM8994_ANALOGUE_HP_1 0x60 +#define WM8958_MIC_DETECT_1 0xD0 +#define WM8958_MIC_DETECT_2 0xD1 +#define WM8958_MIC_DETECT_3 0xD2 +#define WM8994_CHIP_REVISION 0x100 +#define WM8994_CONTROL_INTERFACE 0x101 +#define WM8994_WRITE_SEQUENCER_CTRL_1 0x110 +#define WM8994_WRITE_SEQUENCER_CTRL_2 0x111 +#define WM8994_AIF1_CLOCKING_1 0x200 +#define WM8994_AIF1_CLOCKING_2 0x201 +#define WM8994_AIF2_CLOCKING_1 0x204 +#define WM8994_AIF2_CLOCKING_2 0x205 +#define WM8994_CLOCKING_1 0x208 +#define WM8994_CLOCKING_2 0x209 +#define WM8994_AIF1_RATE 0x210 +#define WM8994_AIF2_RATE 0x211 +#define WM8994_RATE_STATUS 0x212 +#define WM8994_FLL1_CONTROL_1 0x220 +#define WM8994_FLL1_CONTROL_2 0x221 +#define WM8994_FLL1_CONTROL_3 0x222 +#define WM8994_FLL1_CONTROL_4 0x223 +#define WM8994_FLL1_CONTROL_5 0x224 +#define WM8958_FLL1_EFS_1 0x226 +#define WM8958_FLL1_EFS_2 0x227 +#define WM8994_FLL2_CONTROL_1 0x240 +#define WM8994_FLL2_CONTROL_2 0x241 +#define WM8994_FLL2_CONTROL_3 0x242 +#define WM8994_FLL2_CONTROL_4 0x243 +#define WM8994_FLL2_CONTROL_5 0x244 +#define WM8958_FLL2_EFS_1 0x246 +#define WM8958_FLL2_EFS_2 0x247 +#define WM8994_AIF1_CONTROL_1 0x300 +#define WM8994_AIF1_CONTROL_2 0x301 +#define WM8994_AIF1_MASTER_SLAVE 0x302 +#define WM8994_AIF1_BCLK 0x303 +#define WM8994_AIF1ADC_LRCLK 0x304 +#define WM8994_AIF1DAC_LRCLK 0x305 +#define WM8994_AIF1DAC_DATA 0x306 +#define WM8994_AIF1ADC_DATA 0x307 +#define WM8994_AIF2_CONTROL_1 0x310 +#define WM8994_AIF2_CONTROL_2 0x311 +#define WM8994_AIF2_MASTER_SLAVE 0x312 +#define WM8994_AIF2_BCLK 0x313 +#define WM8994_AIF2ADC_LRCLK 0x314 +#define WM8994_AIF2DAC_LRCLK 0x315 +#define WM8994_AIF2DAC_DATA 0x316 +#define WM8994_AIF2ADC_DATA 0x317 +#define WM1811_AIF2TX_CONTROL 0x318 +#define WM8958_AIF3_CONTROL_1 0x320 +#define WM8958_AIF3_CONTROL_2 0x321 +#define WM8958_AIF3DAC_DATA 0x322 +#define WM8958_AIF3ADC_DATA 0x323 +#define WM8994_AIF1_ADC1_LEFT_VOLUME 0x400 +#define WM8994_AIF1_ADC1_RIGHT_VOLUME 0x401 +#define WM8994_AIF1_DAC1_LEFT_VOLUME 0x402 +#define WM8994_AIF1_DAC1_RIGHT_VOLUME 0x403 +#define WM8994_AIF1_ADC2_LEFT_VOLUME 0x404 +#define WM8994_AIF1_ADC2_RIGHT_VOLUME 0x405 +#define WM8994_AIF1_DAC2_LEFT_VOLUME 0x406 +#define WM8994_AIF1_DAC2_RIGHT_VOLUME 0x407 +#define WM8994_AIF1_ADC1_FILTERS 0x410 +#define WM8994_AIF1_ADC2_FILTERS 0x411 +#define WM8994_AIF1_DAC1_FILTERS_1 0x420 +#define WM8994_AIF1_DAC1_FILTERS_2 0x421 +#define WM8994_AIF1_DAC2_FILTERS_1 0x422 +#define WM8994_AIF1_DAC2_FILTERS_2 0x423 +#define WM8958_AIF1_DAC1_NOISE_GATE 0x430 +#define WM8958_AIF1_DAC2_NOISE_GATE 0x431 +#define WM8994_AIF1_DRC1_1 0x440 +#define WM8994_AIF1_DRC1_2 0x441 +#define WM8994_AIF1_DRC1_3 0x442 +#define WM8994_AIF1_DRC1_4 0x443 +#define WM8994_AIF1_DRC1_5 0x444 +#define WM8994_AIF1_DRC2_1 0x450 +#define WM8994_AIF1_DRC2_2 0x451 +#define WM8994_AIF1_DRC2_3 0x452 +#define WM8994_AIF1_DRC2_4 0x453 +#define WM8994_AIF1_DRC2_5 0x454 +#define WM8994_AIF1_DAC1_EQ_GAINS_1 0x480 +#define WM8994_AIF1_DAC1_EQ_GAINS_2 0x481 +#define WM8994_AIF1_DAC1_EQ_BAND_1_A 0x482 +#define WM8994_AIF1_DAC1_EQ_BAND_1_B 0x483 +#define WM8994_AIF1_DAC1_EQ_BAND_1_PG 0x484 +#define WM8994_AIF1_DAC1_EQ_BAND_2_A 0x485 +#define WM8994_AIF1_DAC1_EQ_BAND_2_B 0x486 +#define WM8994_AIF1_DAC1_EQ_BAND_2_C 0x487 +#define WM8994_AIF1_DAC1_EQ_BAND_2_PG 0x488 +#define WM8994_AIF1_DAC1_EQ_BAND_3_A 0x489 +#define WM8994_AIF1_DAC1_EQ_BAND_3_B 0x48A +#define WM8994_AIF1_DAC1_EQ_BAND_3_C 0x48B +#define WM8994_AIF1_DAC1_EQ_BAND_3_PG 0x48C +#define WM8994_AIF1_DAC1_EQ_BAND_4_A 0x48D +#define WM8994_AIF1_DAC1_EQ_BAND_4_B 0x48E +#define WM8994_AIF1_DAC1_EQ_BAND_4_C 0x48F +#define WM8994_AIF1_DAC1_EQ_BAND_4_PG 0x490 +#define WM8994_AIF1_DAC1_EQ_BAND_5_A 0x491 +#define WM8994_AIF1_DAC1_EQ_BAND_5_B 0x492 +#define WM8994_AIF1_DAC1_EQ_BAND_5_PG 0x493 +#define WM8994_AIF1_DAC1_EQ_BAND_1_C 0x494 +#define WM8994_AIF1_DAC2_EQ_GAINS_1 0x4A0 +#define WM8994_AIF1_DAC2_EQ_GAINS_2 0x4A1 +#define WM8994_AIF1_DAC2_EQ_BAND_1_A 0x4A2 +#define WM8994_AIF1_DAC2_EQ_BAND_1_B 0x4A3 +#define WM8994_AIF1_DAC2_EQ_BAND_1_PG 0x4A4 +#define WM8994_AIF1_DAC2_EQ_BAND_2_A 0x4A5 +#define WM8994_AIF1_DAC2_EQ_BAND_2_B 0x4A6 +#define WM8994_AIF1_DAC2_EQ_BAND_2_C 0x4A7 +#define WM8994_AIF1_DAC2_EQ_BAND_2_PG 0x4A8 +#define WM8994_AIF1_DAC2_EQ_BAND_3_A 0x4A9 +#define WM8994_AIF1_DAC2_EQ_BAND_3_B 0x4AA +#define WM8994_AIF1_DAC2_EQ_BAND_3_C 0x4AB +#define WM8994_AIF1_DAC2_EQ_BAND_3_PG 0x4AC +#define WM8994_AIF1_DAC2_EQ_BAND_4_A 0x4AD +#define WM8994_AIF1_DAC2_EQ_BAND_4_B 0x4AE +#define WM8994_AIF1_DAC2_EQ_BAND_4_C 0x4AF +#define WM8994_AIF1_DAC2_EQ_BAND_4_PG 0x4B0 +#define WM8994_AIF1_DAC2_EQ_BAND_5_A 0x4B1 +#define WM8994_AIF1_DAC2_EQ_BAND_5_B 0x4B2 +#define WM8994_AIF1_DAC2_EQ_BAND_5_PG 0x4B3 +#define WM8994_AIF1_DAC2_EQ_BAND_1_C 0x4B4 +#define WM8994_AIF2_ADC_LEFT_VOLUME 0x500 +#define WM8994_AIF2_ADC_RIGHT_VOLUME 0x501 +#define WM8994_AIF2_DAC_LEFT_VOLUME 0x502 +#define WM8994_AIF2_DAC_RIGHT_VOLUME 0x503 +#define WM8994_AIF2_ADC_FILTERS 0x510 +#define WM8994_AIF2_DAC_FILTERS_1 0x520 +#define WM8994_AIF2_DAC_FILTERS_2 0x521 +#define WM8958_AIF2_DAC_NOISE_GATE 0x530 +#define WM8994_AIF2_DRC_1 0x540 +#define WM8994_AIF2_DRC_2 0x541 +#define WM8994_AIF2_DRC_3 0x542 +#define WM8994_AIF2_DRC_4 0x543 +#define WM8994_AIF2_DRC_5 0x544 +#define WM8994_AIF2_EQ_GAINS_1 0x580 +#define WM8994_AIF2_EQ_GAINS_2 0x581 +#define WM8994_AIF2_EQ_BAND_1_A 0x582 +#define WM8994_AIF2_EQ_BAND_1_B 0x583 +#define WM8994_AIF2_EQ_BAND_1_PG 0x584 +#define WM8994_AIF2_EQ_BAND_2_A 0x585 +#define WM8994_AIF2_EQ_BAND_2_B 0x586 +#define WM8994_AIF2_EQ_BAND_2_C 0x587 +#define WM8994_AIF2_EQ_BAND_2_PG 0x588 +#define WM8994_AIF2_EQ_BAND_3_A 0x589 +#define WM8994_AIF2_EQ_BAND_3_B 0x58A +#define WM8994_AIF2_EQ_BAND_3_C 0x58B +#define WM8994_AIF2_EQ_BAND_3_PG 0x58C +#define WM8994_AIF2_EQ_BAND_4_A 0x58D +#define WM8994_AIF2_EQ_BAND_4_B 0x58E +#define WM8994_AIF2_EQ_BAND_4_C 0x58F +#define WM8994_AIF2_EQ_BAND_4_PG 0x590 +#define WM8994_AIF2_EQ_BAND_5_A 0x591 +#define WM8994_AIF2_EQ_BAND_5_B 0x592 +#define WM8994_AIF2_EQ_BAND_5_PG 0x593 +#define WM8994_AIF2_EQ_BAND_1_C 0x594 +#define WM8994_DAC1_MIXER_VOLUMES 0x600 +#define WM8994_DAC1_LEFT_MIXER_ROUTING 0x601 +#define WM8994_DAC1_RIGHT_MIXER_ROUTING 0x602 +#define WM8994_DAC2_MIXER_VOLUMES 0x603 +#define WM8994_DAC2_LEFT_MIXER_ROUTING 0x604 +#define WM8994_DAC2_RIGHT_MIXER_ROUTING 0x605 +#define WM8994_AIF1_ADC1_LEFT_MIXER_ROUTING 0x606 +#define WM8994_AIF1_ADC1_RIGHT_MIXER_ROUTING 0x607 +#define WM8994_AIF1_ADC2_LEFT_MIXER_ROUTING 0x608 +#define WM8994_AIF1_ADC2_RIGHT_MIXER_ROUTING 0x609 +#define WM8994_DAC1_LEFT_VOLUME 0x610 +#define WM8994_DAC1_RIGHT_VOLUME 0x611 +#define WM8994_DAC2_LEFT_VOLUME 0x612 +#define WM8994_DAC2_RIGHT_VOLUME 0x613 +#define WM8994_DAC_SOFTMUTE 0x614 +#define WM8994_OVERSAMPLING 0x620 +#define WM8994_SIDETONE 0x621 +#define WM8994_GPIO_1 0x700 +#define WM8994_GPIO_2 0x701 +#define WM8994_GPIO_3 0x702 +#define WM8994_GPIO_4 0x703 +#define WM8994_GPIO_5 0x704 +#define WM8994_GPIO_6 0x705 +#define WM1811_JACKDET_CTRL 0x705 +#define WM8994_GPIO_7 0x706 +#define WM8994_GPIO_8 0x707 +#define WM8994_GPIO_9 0x708 +#define WM8994_GPIO_10 0x709 +#define WM8994_GPIO_11 0x70A +#define WM8994_PULL_CONTROL_1 0x720 +#define WM8994_PULL_CONTROL_2 0x721 +#define WM8994_INTERRUPT_STATUS_1 0x730 +#define WM8994_INTERRUPT_STATUS_2 0x731 +#define WM8994_INTERRUPT_RAW_STATUS_2 0x732 +#define WM8994_INTERRUPT_STATUS_1_MASK 0x738 +#define WM8994_INTERRUPT_STATUS_2_MASK 0x739 +#define WM8994_INTERRUPT_CONTROL 0x740 +#define WM8994_IRQ_DEBOUNCE 0x748 +#define WM8958_DSP2_PROGRAM 0x900 +#define WM8958_DSP2_CONFIG 0x901 +#define WM8958_DSP2_MAGICNUM 0xA00 +#define WM8958_DSP2_RELEASEYEAR 0xA01 +#define WM8958_DSP2_RELEASEMONTHDAY 0xA02 +#define WM8958_DSP2_RELEASETIME 0xA03 +#define WM8958_DSP2_VERMAJMIN 0xA04 +#define WM8958_DSP2_VERBUILD 0xA05 +#define WM8958_DSP2_TESTREG 0xA06 +#define WM8958_DSP2_XORREG 0xA07 +#define WM8958_DSP2_SHIFTMAXX 0xA08 +#define WM8958_DSP2_SHIFTMAXY 0xA09 +#define WM8958_DSP2_SHIFTMAXZ 0xA0A +#define WM8958_DSP2_SHIFTMAXEXTLO 0xA0B +#define WM8958_DSP2_AESSELECT 0xA0C +#define WM8958_DSP2_EXECCONTROL 0xA0D +#define WM8958_DSP2_SAMPLEBREAK 0xA0E +#define WM8958_DSP2_COUNTBREAK 0xA0F +#define WM8958_DSP2_INTSTATUS 0xA10 +#define WM8958_DSP2_EVENTSTATUS 0xA11 +#define WM8958_DSP2_INTMASK 0xA12 +#define WM8958_DSP2_CONFIGDWIDTH 0xA13 +#define WM8958_DSP2_CONFIGINSTR 0xA14 +#define WM8958_DSP2_CONFIGDMEM 0xA15 +#define WM8958_DSP2_CONFIGDELAYS 0xA16 +#define WM8958_DSP2_CONFIGNUMIO 0xA17 +#define WM8958_DSP2_CONFIGEXTDEPTH 0xA18 +#define WM8958_DSP2_CONFIGMULTIPLIER 0xA19 +#define WM8958_DSP2_CONFIGCTRLDWIDTH 0xA1A +#define WM8958_DSP2_CONFIGPIPELINE 0xA1B +#define WM8958_DSP2_SHIFTMAXEXTHI 0xA1C +#define WM8958_DSP2_SWVERSIONREG 0xA1D +#define WM8958_DSP2_CONFIGXMEM 0xA1E +#define WM8958_DSP2_CONFIGYMEM 0xA1F +#define WM8958_DSP2_CONFIGZMEM 0xA20 +#define WM8958_FW_BUILD_1 0x2000 +#define WM8958_FW_BUILD_0 0x2001 +#define WM8958_FW_ID_1 0x2002 +#define WM8958_FW_ID_0 0x2003 +#define WM8958_FW_MAJOR_1 0x2004 +#define WM8958_FW_MAJOR_0 0x2005 +#define WM8958_FW_MINOR_1 0x2006 +#define WM8958_FW_MINOR_0 0x2007 +#define WM8958_FW_PATCH_1 0x2008 +#define WM8958_FW_PATCH_0 0x2009 +#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C1_1 0x2200 +#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C1_2 0x2201 +#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C2_1 0x2202 +#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C2_2 0x2203 +#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C3_1 0x2204 +#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C3_2 0x2205 +#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C2_1 0x2206 +#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C2_2 0x2207 +#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C3_1 0x2208 +#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C3_2 0x2209 +#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C1_1 0x220A +#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C1_2 0x220B +#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C1_1 0x220C +#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C1_2 0x220D +#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C2_1 0x220E +#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C2_2 0x220F +#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C3_1 0x2210 +#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C3_2 0x2211 +#define WM8958_MBC_BAND_1_LOWER_CUTOFF_1 0x2212 +#define WM8958_MBC_BAND_1_LOWER_CUTOFF_2 0x2213 +#define WM8958_MBC_BAND_1_K_1 0x2400 +#define WM8958_MBC_BAND_1_K_2 0x2401 +#define WM8958_MBC_BAND_1_N1_1 0x2402 +#define WM8958_MBC_BAND_1_N1_2 0x2403 +#define WM8958_MBC_BAND_1_N2_1 0x2404 +#define WM8958_MBC_BAND_1_N2_2 0x2405 +#define WM8958_MBC_BAND_1_N3_1 0x2406 +#define WM8958_MBC_BAND_1_N3_2 0x2407 +#define WM8958_MBC_BAND_1_N4_1 0x2408 +#define WM8958_MBC_BAND_1_N4_2 0x2409 +#define WM8958_MBC_BAND_1_N5_1 0x240A +#define WM8958_MBC_BAND_1_N5_2 0x240B +#define WM8958_MBC_BAND_1_X1_1 0x240C +#define WM8958_MBC_BAND_1_X1_2 0x240D +#define WM8958_MBC_BAND_1_X2_1 0x240E +#define WM8958_MBC_BAND_1_X2_2 0x240F +#define WM8958_MBC_BAND_1_X3_1 0x2410 +#define WM8958_MBC_BAND_1_X3_2 0x2411 +#define WM8958_MBC_BAND_1_ATTACK_1 0x2412 +#define WM8958_MBC_BAND_1_ATTACK_2 0x2413 +#define WM8958_MBC_BAND_1_DECAY_1 0x2414 +#define WM8958_MBC_BAND_1_DECAY_2 0x2415 +#define WM8958_MBC_BAND_2_K_1 0x2416 +#define WM8958_MBC_BAND_2_K_2 0x2417 +#define WM8958_MBC_BAND_2_N1_1 0x2418 +#define WM8958_MBC_BAND_2_N1_2 0x2419 +#define WM8958_MBC_BAND_2_N2_1 0x241A +#define WM8958_MBC_BAND_2_N2_2 0x241B +#define WM8958_MBC_BAND_2_N3_1 0x241C +#define WM8958_MBC_BAND_2_N3_2 0x241D +#define WM8958_MBC_BAND_2_N4_1 0x241E +#define WM8958_MBC_BAND_2_N4_2 0x241F +#define WM8958_MBC_BAND_2_N5_1 0x2420 +#define WM8958_MBC_BAND_2_N5_2 0x2421 +#define WM8958_MBC_BAND_2_X1_1 0x2422 +#define WM8958_MBC_BAND_2_X1_2 0x2423 +#define WM8958_MBC_BAND_2_X2_1 0x2424 +#define WM8958_MBC_BAND_2_X2_2 0x2425 +#define WM8958_MBC_BAND_2_X3_1 0x2426 +#define WM8958_MBC_BAND_2_X3_2 0x2427 +#define WM8958_MBC_BAND_2_ATTACK_1 0x2428 +#define WM8958_MBC_BAND_2_ATTACK_2 0x2429 +#define WM8958_MBC_BAND_2_DECAY_1 0x242A +#define WM8958_MBC_BAND_2_DECAY_2 0x242B +#define WM8958_MBC_B2_PG2_1 0x242C +#define WM8958_MBC_B2_PG2_2 0x242D +#define WM8958_MBC_B1_PG2_1 0x242E +#define WM8958_MBC_B1_PG2_2 0x242F +#define WM8958_MBC_CROSSOVER_1 0x2600 +#define WM8958_MBC_CROSSOVER_2 0x2601 +#define WM8958_MBC_HPF_1 0x2602 +#define WM8958_MBC_HPF_2 0x2603 +#define WM8958_MBC_LPF_1 0x2606 +#define WM8958_MBC_LPF_2 0x2607 +#define WM8958_MBC_RMS_LIMIT_1 0x260A +#define WM8958_MBC_RMS_LIMIT_2 0x260B +#define WM8994_WRITE_SEQUENCER_0 0x3000 +#define WM8994_WRITE_SEQUENCER_1 0x3001 +#define WM8994_WRITE_SEQUENCER_2 0x3002 +#define WM8994_WRITE_SEQUENCER_3 0x3003 +#define WM8994_WRITE_SEQUENCER_4 0x3004 +#define WM8994_WRITE_SEQUENCER_5 0x3005 +#define WM8994_WRITE_SEQUENCER_6 0x3006 +#define WM8994_WRITE_SEQUENCER_7 0x3007 +#define WM8994_WRITE_SEQUENCER_8 0x3008 +#define WM8994_WRITE_SEQUENCER_9 0x3009 +#define WM8994_WRITE_SEQUENCER_10 0x300A +#define WM8994_WRITE_SEQUENCER_11 0x300B +#define WM8994_WRITE_SEQUENCER_12 0x300C +#define WM8994_WRITE_SEQUENCER_13 0x300D +#define WM8994_WRITE_SEQUENCER_14 0x300E +#define WM8994_WRITE_SEQUENCER_15 0x300F +#define WM8994_WRITE_SEQUENCER_16 0x3010 +#define WM8994_WRITE_SEQUENCER_17 0x3011 +#define WM8994_WRITE_SEQUENCER_18 0x3012 +#define WM8994_WRITE_SEQUENCER_19 0x3013 +#define WM8994_WRITE_SEQUENCER_20 0x3014 +#define WM8994_WRITE_SEQUENCER_21 0x3015 +#define WM8994_WRITE_SEQUENCER_22 0x3016 +#define WM8994_WRITE_SEQUENCER_23 0x3017 +#define WM8994_WRITE_SEQUENCER_24 0x3018 +#define WM8994_WRITE_SEQUENCER_25 0x3019 +#define WM8994_WRITE_SEQUENCER_26 0x301A +#define WM8994_WRITE_SEQUENCER_27 0x301B +#define WM8994_WRITE_SEQUENCER_28 0x301C +#define WM8994_WRITE_SEQUENCER_29 0x301D +#define WM8994_WRITE_SEQUENCER_30 0x301E +#define WM8994_WRITE_SEQUENCER_31 0x301F +#define WM8994_WRITE_SEQUENCER_32 0x3020 +#define WM8994_WRITE_SEQUENCER_33 0x3021 +#define WM8994_WRITE_SEQUENCER_34 0x3022 +#define WM8994_WRITE_SEQUENCER_35 0x3023 +#define WM8994_WRITE_SEQUENCER_36 0x3024 +#define WM8994_WRITE_SEQUENCER_37 0x3025 +#define WM8994_WRITE_SEQUENCER_38 0x3026 +#define WM8994_WRITE_SEQUENCER_39 0x3027 +#define WM8994_WRITE_SEQUENCER_40 0x3028 +#define WM8994_WRITE_SEQUENCER_41 0x3029 +#define WM8994_WRITE_SEQUENCER_42 0x302A +#define WM8994_WRITE_SEQUENCER_43 0x302B +#define WM8994_WRITE_SEQUENCER_44 0x302C +#define WM8994_WRITE_SEQUENCER_45 0x302D +#define WM8994_WRITE_SEQUENCER_46 0x302E +#define WM8994_WRITE_SEQUENCER_47 0x302F +#define WM8994_WRITE_SEQUENCER_48 0x3030 +#define WM8994_WRITE_SEQUENCER_49 0x3031 +#define WM8994_WRITE_SEQUENCER_50 0x3032 +#define WM8994_WRITE_SEQUENCER_51 0x3033 +#define WM8994_WRITE_SEQUENCER_52 0x3034 +#define WM8994_WRITE_SEQUENCER_53 0x3035 +#define WM8994_WRITE_SEQUENCER_54 0x3036 +#define WM8994_WRITE_SEQUENCER_55 0x3037 +#define WM8994_WRITE_SEQUENCER_56 0x3038 +#define WM8994_WRITE_SEQUENCER_57 0x3039 +#define WM8994_WRITE_SEQUENCER_58 0x303A +#define WM8994_WRITE_SEQUENCER_59 0x303B +#define WM8994_WRITE_SEQUENCER_60 0x303C +#define WM8994_WRITE_SEQUENCER_61 0x303D +#define WM8994_WRITE_SEQUENCER_62 0x303E +#define WM8994_WRITE_SEQUENCER_63 0x303F +#define WM8994_WRITE_SEQUENCER_64 0x3040 +#define WM8994_WRITE_SEQUENCER_65 0x3041 +#define WM8994_WRITE_SEQUENCER_66 0x3042 +#define WM8994_WRITE_SEQUENCER_67 0x3043 +#define WM8994_WRITE_SEQUENCER_68 0x3044 +#define WM8994_WRITE_SEQUENCER_69 0x3045 +#define WM8994_WRITE_SEQUENCER_70 0x3046 +#define WM8994_WRITE_SEQUENCER_71 0x3047 +#define WM8994_WRITE_SEQUENCER_72 0x3048 +#define WM8994_WRITE_SEQUENCER_73 0x3049 +#define WM8994_WRITE_SEQUENCER_74 0x304A +#define WM8994_WRITE_SEQUENCER_75 0x304B +#define WM8994_WRITE_SEQUENCER_76 0x304C +#define WM8994_WRITE_SEQUENCER_77 0x304D +#define WM8994_WRITE_SEQUENCER_78 0x304E +#define WM8994_WRITE_SEQUENCER_79 0x304F +#define WM8994_WRITE_SEQUENCER_80 0x3050 +#define WM8994_WRITE_SEQUENCER_81 0x3051 +#define WM8994_WRITE_SEQUENCER_82 0x3052 +#define WM8994_WRITE_SEQUENCER_83 0x3053 +#define WM8994_WRITE_SEQUENCER_84 0x3054 +#define WM8994_WRITE_SEQUENCER_85 0x3055 +#define WM8994_WRITE_SEQUENCER_86 0x3056 +#define WM8994_WRITE_SEQUENCER_87 0x3057 +#define WM8994_WRITE_SEQUENCER_88 0x3058 +#define WM8994_WRITE_SEQUENCER_89 0x3059 +#define WM8994_WRITE_SEQUENCER_90 0x305A +#define WM8994_WRITE_SEQUENCER_91 0x305B +#define WM8994_WRITE_SEQUENCER_92 0x305C +#define WM8994_WRITE_SEQUENCER_93 0x305D +#define WM8994_WRITE_SEQUENCER_94 0x305E +#define WM8994_WRITE_SEQUENCER_95 0x305F +#define WM8994_WRITE_SEQUENCER_96 0x3060 +#define WM8994_WRITE_SEQUENCER_97 0x3061 +#define WM8994_WRITE_SEQUENCER_98 0x3062 +#define WM8994_WRITE_SEQUENCER_99 0x3063 +#define WM8994_WRITE_SEQUENCER_100 0x3064 +#define WM8994_WRITE_SEQUENCER_101 0x3065 +#define WM8994_WRITE_SEQUENCER_102 0x3066 +#define WM8994_WRITE_SEQUENCER_103 0x3067 +#define WM8994_WRITE_SEQUENCER_104 0x3068 +#define WM8994_WRITE_SEQUENCER_105 0x3069 +#define WM8994_WRITE_SEQUENCER_106 0x306A +#define WM8994_WRITE_SEQUENCER_107 0x306B +#define WM8994_WRITE_SEQUENCER_108 0x306C +#define WM8994_WRITE_SEQUENCER_109 0x306D +#define WM8994_WRITE_SEQUENCER_110 0x306E +#define WM8994_WRITE_SEQUENCER_111 0x306F +#define WM8994_WRITE_SEQUENCER_112 0x3070 +#define WM8994_WRITE_SEQUENCER_113 0x3071 +#define WM8994_WRITE_SEQUENCER_114 0x3072 +#define WM8994_WRITE_SEQUENCER_115 0x3073 +#define WM8994_WRITE_SEQUENCER_116 0x3074 +#define WM8994_WRITE_SEQUENCER_117 0x3075 +#define WM8994_WRITE_SEQUENCER_118 0x3076 +#define WM8994_WRITE_SEQUENCER_119 0x3077 +#define WM8994_WRITE_SEQUENCER_120 0x3078 +#define WM8994_WRITE_SEQUENCER_121 0x3079 +#define WM8994_WRITE_SEQUENCER_122 0x307A +#define WM8994_WRITE_SEQUENCER_123 0x307B +#define WM8994_WRITE_SEQUENCER_124 0x307C +#define WM8994_WRITE_SEQUENCER_125 0x307D +#define WM8994_WRITE_SEQUENCER_126 0x307E +#define WM8994_WRITE_SEQUENCER_127 0x307F +#define WM8994_WRITE_SEQUENCER_128 0x3080 +#define WM8994_WRITE_SEQUENCER_129 0x3081 +#define WM8994_WRITE_SEQUENCER_130 0x3082 +#define WM8994_WRITE_SEQUENCER_131 0x3083 +#define WM8994_WRITE_SEQUENCER_132 0x3084 +#define WM8994_WRITE_SEQUENCER_133 0x3085 +#define WM8994_WRITE_SEQUENCER_134 0x3086 +#define WM8994_WRITE_SEQUENCER_135 0x3087 +#define WM8994_WRITE_SEQUENCER_136 0x3088 +#define WM8994_WRITE_SEQUENCER_137 0x3089 +#define WM8994_WRITE_SEQUENCER_138 0x308A +#define WM8994_WRITE_SEQUENCER_139 0x308B +#define WM8994_WRITE_SEQUENCER_140 0x308C +#define WM8994_WRITE_SEQUENCER_141 0x308D +#define WM8994_WRITE_SEQUENCER_142 0x308E +#define WM8994_WRITE_SEQUENCER_143 0x308F +#define WM8994_WRITE_SEQUENCER_144 0x3090 +#define WM8994_WRITE_SEQUENCER_145 0x3091 +#define WM8994_WRITE_SEQUENCER_146 0x3092 +#define WM8994_WRITE_SEQUENCER_147 0x3093 +#define WM8994_WRITE_SEQUENCER_148 0x3094 +#define WM8994_WRITE_SEQUENCER_149 0x3095 +#define WM8994_WRITE_SEQUENCER_150 0x3096 +#define WM8994_WRITE_SEQUENCER_151 0x3097 +#define WM8994_WRITE_SEQUENCER_152 0x3098 +#define WM8994_WRITE_SEQUENCER_153 0x3099 +#define WM8994_WRITE_SEQUENCER_154 0x309A +#define WM8994_WRITE_SEQUENCER_155 0x309B +#define WM8994_WRITE_SEQUENCER_156 0x309C +#define WM8994_WRITE_SEQUENCER_157 0x309D +#define WM8994_WRITE_SEQUENCER_158 0x309E +#define WM8994_WRITE_SEQUENCER_159 0x309F +#define WM8994_WRITE_SEQUENCER_160 0x30A0 +#define WM8994_WRITE_SEQUENCER_161 0x30A1 +#define WM8994_WRITE_SEQUENCER_162 0x30A2 +#define WM8994_WRITE_SEQUENCER_163 0x30A3 +#define WM8994_WRITE_SEQUENCER_164 0x30A4 +#define WM8994_WRITE_SEQUENCER_165 0x30A5 +#define WM8994_WRITE_SEQUENCER_166 0x30A6 +#define WM8994_WRITE_SEQUENCER_167 0x30A7 +#define WM8994_WRITE_SEQUENCER_168 0x30A8 +#define WM8994_WRITE_SEQUENCER_169 0x30A9 +#define WM8994_WRITE_SEQUENCER_170 0x30AA +#define WM8994_WRITE_SEQUENCER_171 0x30AB +#define WM8994_WRITE_SEQUENCER_172 0x30AC +#define WM8994_WRITE_SEQUENCER_173 0x30AD +#define WM8994_WRITE_SEQUENCER_174 0x30AE +#define WM8994_WRITE_SEQUENCER_175 0x30AF +#define WM8994_WRITE_SEQUENCER_176 0x30B0 +#define WM8994_WRITE_SEQUENCER_177 0x30B1 +#define WM8994_WRITE_SEQUENCER_178 0x30B2 +#define WM8994_WRITE_SEQUENCER_179 0x30B3 +#define WM8994_WRITE_SEQUENCER_180 0x30B4 +#define WM8994_WRITE_SEQUENCER_181 0x30B5 +#define WM8994_WRITE_SEQUENCER_182 0x30B6 +#define WM8994_WRITE_SEQUENCER_183 0x30B7 +#define WM8994_WRITE_SEQUENCER_184 0x30B8 +#define WM8994_WRITE_SEQUENCER_185 0x30B9 +#define WM8994_WRITE_SEQUENCER_186 0x30BA +#define WM8994_WRITE_SEQUENCER_187 0x30BB +#define WM8994_WRITE_SEQUENCER_188 0x30BC +#define WM8994_WRITE_SEQUENCER_189 0x30BD +#define WM8994_WRITE_SEQUENCER_190 0x30BE +#define WM8994_WRITE_SEQUENCER_191 0x30BF +#define WM8994_WRITE_SEQUENCER_192 0x30C0 +#define WM8994_WRITE_SEQUENCER_193 0x30C1 +#define WM8994_WRITE_SEQUENCER_194 0x30C2 +#define WM8994_WRITE_SEQUENCER_195 0x30C3 +#define WM8994_WRITE_SEQUENCER_196 0x30C4 +#define WM8994_WRITE_SEQUENCER_197 0x30C5 +#define WM8994_WRITE_SEQUENCER_198 0x30C6 +#define WM8994_WRITE_SEQUENCER_199 0x30C7 +#define WM8994_WRITE_SEQUENCER_200 0x30C8 +#define WM8994_WRITE_SEQUENCER_201 0x30C9 +#define WM8994_WRITE_SEQUENCER_202 0x30CA +#define WM8994_WRITE_SEQUENCER_203 0x30CB +#define WM8994_WRITE_SEQUENCER_204 0x30CC +#define WM8994_WRITE_SEQUENCER_205 0x30CD +#define WM8994_WRITE_SEQUENCER_206 0x30CE +#define WM8994_WRITE_SEQUENCER_207 0x30CF +#define WM8994_WRITE_SEQUENCER_208 0x30D0 +#define WM8994_WRITE_SEQUENCER_209 0x30D1 +#define WM8994_WRITE_SEQUENCER_210 0x30D2 +#define WM8994_WRITE_SEQUENCER_211 0x30D3 +#define WM8994_WRITE_SEQUENCER_212 0x30D4 +#define WM8994_WRITE_SEQUENCER_213 0x30D5 +#define WM8994_WRITE_SEQUENCER_214 0x30D6 +#define WM8994_WRITE_SEQUENCER_215 0x30D7 +#define WM8994_WRITE_SEQUENCER_216 0x30D8 +#define WM8994_WRITE_SEQUENCER_217 0x30D9 +#define WM8994_WRITE_SEQUENCER_218 0x30DA +#define WM8994_WRITE_SEQUENCER_219 0x30DB +#define WM8994_WRITE_SEQUENCER_220 0x30DC +#define WM8994_WRITE_SEQUENCER_221 0x30DD +#define WM8994_WRITE_SEQUENCER_222 0x30DE +#define WM8994_WRITE_SEQUENCER_223 0x30DF +#define WM8994_WRITE_SEQUENCER_224 0x30E0 +#define WM8994_WRITE_SEQUENCER_225 0x30E1 +#define WM8994_WRITE_SEQUENCER_226 0x30E2 +#define WM8994_WRITE_SEQUENCER_227 0x30E3 +#define WM8994_WRITE_SEQUENCER_228 0x30E4 +#define WM8994_WRITE_SEQUENCER_229 0x30E5 +#define WM8994_WRITE_SEQUENCER_230 0x30E6 +#define WM8994_WRITE_SEQUENCER_231 0x30E7 +#define WM8994_WRITE_SEQUENCER_232 0x30E8 +#define WM8994_WRITE_SEQUENCER_233 0x30E9 +#define WM8994_WRITE_SEQUENCER_234 0x30EA +#define WM8994_WRITE_SEQUENCER_235 0x30EB +#define WM8994_WRITE_SEQUENCER_236 0x30EC +#define WM8994_WRITE_SEQUENCER_237 0x30ED +#define WM8994_WRITE_SEQUENCER_238 0x30EE +#define WM8994_WRITE_SEQUENCER_239 0x30EF +#define WM8994_WRITE_SEQUENCER_240 0x30F0 +#define WM8994_WRITE_SEQUENCER_241 0x30F1 +#define WM8994_WRITE_SEQUENCER_242 0x30F2 +#define WM8994_WRITE_SEQUENCER_243 0x30F3 +#define WM8994_WRITE_SEQUENCER_244 0x30F4 +#define WM8994_WRITE_SEQUENCER_245 0x30F5 +#define WM8994_WRITE_SEQUENCER_246 0x30F6 +#define WM8994_WRITE_SEQUENCER_247 0x30F7 +#define WM8994_WRITE_SEQUENCER_248 0x30F8 +#define WM8994_WRITE_SEQUENCER_249 0x30F9 +#define WM8994_WRITE_SEQUENCER_250 0x30FA +#define WM8994_WRITE_SEQUENCER_251 0x30FB +#define WM8994_WRITE_SEQUENCER_252 0x30FC +#define WM8994_WRITE_SEQUENCER_253 0x30FD +#define WM8994_WRITE_SEQUENCER_254 0x30FE +#define WM8994_WRITE_SEQUENCER_255 0x30FF +#define WM8994_WRITE_SEQUENCER_256 0x3100 +#define WM8994_WRITE_SEQUENCER_257 0x3101 +#define WM8994_WRITE_SEQUENCER_258 0x3102 +#define WM8994_WRITE_SEQUENCER_259 0x3103 +#define WM8994_WRITE_SEQUENCER_260 0x3104 +#define WM8994_WRITE_SEQUENCER_261 0x3105 +#define WM8994_WRITE_SEQUENCER_262 0x3106 +#define WM8994_WRITE_SEQUENCER_263 0x3107 +#define WM8994_WRITE_SEQUENCER_264 0x3108 +#define WM8994_WRITE_SEQUENCER_265 0x3109 +#define WM8994_WRITE_SEQUENCER_266 0x310A +#define WM8994_WRITE_SEQUENCER_267 0x310B +#define WM8994_WRITE_SEQUENCER_268 0x310C +#define WM8994_WRITE_SEQUENCER_269 0x310D +#define WM8994_WRITE_SEQUENCER_270 0x310E +#define WM8994_WRITE_SEQUENCER_271 0x310F +#define WM8994_WRITE_SEQUENCER_272 0x3110 +#define WM8994_WRITE_SEQUENCER_273 0x3111 +#define WM8994_WRITE_SEQUENCER_274 0x3112 +#define WM8994_WRITE_SEQUENCER_275 0x3113 +#define WM8994_WRITE_SEQUENCER_276 0x3114 +#define WM8994_WRITE_SEQUENCER_277 0x3115 +#define WM8994_WRITE_SEQUENCER_278 0x3116 +#define WM8994_WRITE_SEQUENCER_279 0x3117 +#define WM8994_WRITE_SEQUENCER_280 0x3118 +#define WM8994_WRITE_SEQUENCER_281 0x3119 +#define WM8994_WRITE_SEQUENCER_282 0x311A +#define WM8994_WRITE_SEQUENCER_283 0x311B +#define WM8994_WRITE_SEQUENCER_284 0x311C +#define WM8994_WRITE_SEQUENCER_285 0x311D +#define WM8994_WRITE_SEQUENCER_286 0x311E +#define WM8994_WRITE_SEQUENCER_287 0x311F +#define WM8994_WRITE_SEQUENCER_288 0x3120 +#define WM8994_WRITE_SEQUENCER_289 0x3121 +#define WM8994_WRITE_SEQUENCER_290 0x3122 +#define WM8994_WRITE_SEQUENCER_291 0x3123 +#define WM8994_WRITE_SEQUENCER_292 0x3124 +#define WM8994_WRITE_SEQUENCER_293 0x3125 +#define WM8994_WRITE_SEQUENCER_294 0x3126 +#define WM8994_WRITE_SEQUENCER_295 0x3127 +#define WM8994_WRITE_SEQUENCER_296 0x3128 +#define WM8994_WRITE_SEQUENCER_297 0x3129 +#define WM8994_WRITE_SEQUENCER_298 0x312A +#define WM8994_WRITE_SEQUENCER_299 0x312B +#define WM8994_WRITE_SEQUENCER_300 0x312C +#define WM8994_WRITE_SEQUENCER_301 0x312D +#define WM8994_WRITE_SEQUENCER_302 0x312E +#define WM8994_WRITE_SEQUENCER_303 0x312F +#define WM8994_WRITE_SEQUENCER_304 0x3130 +#define WM8994_WRITE_SEQUENCER_305 0x3131 +#define WM8994_WRITE_SEQUENCER_306 0x3132 +#define WM8994_WRITE_SEQUENCER_307 0x3133 +#define WM8994_WRITE_SEQUENCER_308 0x3134 +#define WM8994_WRITE_SEQUENCER_309 0x3135 +#define WM8994_WRITE_SEQUENCER_310 0x3136 +#define WM8994_WRITE_SEQUENCER_311 0x3137 +#define WM8994_WRITE_SEQUENCER_312 0x3138 +#define WM8994_WRITE_SEQUENCER_313 0x3139 +#define WM8994_WRITE_SEQUENCER_314 0x313A +#define WM8994_WRITE_SEQUENCER_315 0x313B +#define WM8994_WRITE_SEQUENCER_316 0x313C +#define WM8994_WRITE_SEQUENCER_317 0x313D +#define WM8994_WRITE_SEQUENCER_318 0x313E +#define WM8994_WRITE_SEQUENCER_319 0x313F +#define WM8994_WRITE_SEQUENCER_320 0x3140 +#define WM8994_WRITE_SEQUENCER_321 0x3141 +#define WM8994_WRITE_SEQUENCER_322 0x3142 +#define WM8994_WRITE_SEQUENCER_323 0x3143 +#define WM8994_WRITE_SEQUENCER_324 0x3144 +#define WM8994_WRITE_SEQUENCER_325 0x3145 +#define WM8994_WRITE_SEQUENCER_326 0x3146 +#define WM8994_WRITE_SEQUENCER_327 0x3147 +#define WM8994_WRITE_SEQUENCER_328 0x3148 +#define WM8994_WRITE_SEQUENCER_329 0x3149 +#define WM8994_WRITE_SEQUENCER_330 0x314A +#define WM8994_WRITE_SEQUENCER_331 0x314B +#define WM8994_WRITE_SEQUENCER_332 0x314C +#define WM8994_WRITE_SEQUENCER_333 0x314D +#define WM8994_WRITE_SEQUENCER_334 0x314E +#define WM8994_WRITE_SEQUENCER_335 0x314F +#define WM8994_WRITE_SEQUENCER_336 0x3150 +#define WM8994_WRITE_SEQUENCER_337 0x3151 +#define WM8994_WRITE_SEQUENCER_338 0x3152 +#define WM8994_WRITE_SEQUENCER_339 0x3153 +#define WM8994_WRITE_SEQUENCER_340 0x3154 +#define WM8994_WRITE_SEQUENCER_341 0x3155 +#define WM8994_WRITE_SEQUENCER_342 0x3156 +#define WM8994_WRITE_SEQUENCER_343 0x3157 +#define WM8994_WRITE_SEQUENCER_344 0x3158 +#define WM8994_WRITE_SEQUENCER_345 0x3159 +#define WM8994_WRITE_SEQUENCER_346 0x315A +#define WM8994_WRITE_SEQUENCER_347 0x315B +#define WM8994_WRITE_SEQUENCER_348 0x315C +#define WM8994_WRITE_SEQUENCER_349 0x315D +#define WM8994_WRITE_SEQUENCER_350 0x315E +#define WM8994_WRITE_SEQUENCER_351 0x315F +#define WM8994_WRITE_SEQUENCER_352 0x3160 +#define WM8994_WRITE_SEQUENCER_353 0x3161 +#define WM8994_WRITE_SEQUENCER_354 0x3162 +#define WM8994_WRITE_SEQUENCER_355 0x3163 +#define WM8994_WRITE_SEQUENCER_356 0x3164 +#define WM8994_WRITE_SEQUENCER_357 0x3165 +#define WM8994_WRITE_SEQUENCER_358 0x3166 +#define WM8994_WRITE_SEQUENCER_359 0x3167 +#define WM8994_WRITE_SEQUENCER_360 0x3168 +#define WM8994_WRITE_SEQUENCER_361 0x3169 +#define WM8994_WRITE_SEQUENCER_362 0x316A +#define WM8994_WRITE_SEQUENCER_363 0x316B +#define WM8994_WRITE_SEQUENCER_364 0x316C +#define WM8994_WRITE_SEQUENCER_365 0x316D +#define WM8994_WRITE_SEQUENCER_366 0x316E +#define WM8994_WRITE_SEQUENCER_367 0x316F +#define WM8994_WRITE_SEQUENCER_368 0x3170 +#define WM8994_WRITE_SEQUENCER_369 0x3171 +#define WM8994_WRITE_SEQUENCER_370 0x3172 +#define WM8994_WRITE_SEQUENCER_371 0x3173 +#define WM8994_WRITE_SEQUENCER_372 0x3174 +#define WM8994_WRITE_SEQUENCER_373 0x3175 +#define WM8994_WRITE_SEQUENCER_374 0x3176 +#define WM8994_WRITE_SEQUENCER_375 0x3177 +#define WM8994_WRITE_SEQUENCER_376 0x3178 +#define WM8994_WRITE_SEQUENCER_377 0x3179 +#define WM8994_WRITE_SEQUENCER_378 0x317A +#define WM8994_WRITE_SEQUENCER_379 0x317B +#define WM8994_WRITE_SEQUENCER_380 0x317C +#define WM8994_WRITE_SEQUENCER_381 0x317D +#define WM8994_WRITE_SEQUENCER_382 0x317E +#define WM8994_WRITE_SEQUENCER_383 0x317F +#define WM8994_WRITE_SEQUENCER_384 0x3180 +#define WM8994_WRITE_SEQUENCER_385 0x3181 +#define WM8994_WRITE_SEQUENCER_386 0x3182 +#define WM8994_WRITE_SEQUENCER_387 0x3183 +#define WM8994_WRITE_SEQUENCER_388 0x3184 +#define WM8994_WRITE_SEQUENCER_389 0x3185 +#define WM8994_WRITE_SEQUENCER_390 0x3186 +#define WM8994_WRITE_SEQUENCER_391 0x3187 +#define WM8994_WRITE_SEQUENCER_392 0x3188 +#define WM8994_WRITE_SEQUENCER_393 0x3189 +#define WM8994_WRITE_SEQUENCER_394 0x318A +#define WM8994_WRITE_SEQUENCER_395 0x318B +#define WM8994_WRITE_SEQUENCER_396 0x318C +#define WM8994_WRITE_SEQUENCER_397 0x318D +#define WM8994_WRITE_SEQUENCER_398 0x318E +#define WM8994_WRITE_SEQUENCER_399 0x318F +#define WM8994_WRITE_SEQUENCER_400 0x3190 +#define WM8994_WRITE_SEQUENCER_401 0x3191 +#define WM8994_WRITE_SEQUENCER_402 0x3192 +#define WM8994_WRITE_SEQUENCER_403 0x3193 +#define WM8994_WRITE_SEQUENCER_404 0x3194 +#define WM8994_WRITE_SEQUENCER_405 0x3195 +#define WM8994_WRITE_SEQUENCER_406 0x3196 +#define WM8994_WRITE_SEQUENCER_407 0x3197 +#define WM8994_WRITE_SEQUENCER_408 0x3198 +#define WM8994_WRITE_SEQUENCER_409 0x3199 +#define WM8994_WRITE_SEQUENCER_410 0x319A +#define WM8994_WRITE_SEQUENCER_411 0x319B +#define WM8994_WRITE_SEQUENCER_412 0x319C +#define WM8994_WRITE_SEQUENCER_413 0x319D +#define WM8994_WRITE_SEQUENCER_414 0x319E +#define WM8994_WRITE_SEQUENCER_415 0x319F +#define WM8994_WRITE_SEQUENCER_416 0x31A0 +#define WM8994_WRITE_SEQUENCER_417 0x31A1 +#define WM8994_WRITE_SEQUENCER_418 0x31A2 +#define WM8994_WRITE_SEQUENCER_419 0x31A3 +#define WM8994_WRITE_SEQUENCER_420 0x31A4 +#define WM8994_WRITE_SEQUENCER_421 0x31A5 +#define WM8994_WRITE_SEQUENCER_422 0x31A6 +#define WM8994_WRITE_SEQUENCER_423 0x31A7 +#define WM8994_WRITE_SEQUENCER_424 0x31A8 +#define WM8994_WRITE_SEQUENCER_425 0x31A9 +#define WM8994_WRITE_SEQUENCER_426 0x31AA +#define WM8994_WRITE_SEQUENCER_427 0x31AB +#define WM8994_WRITE_SEQUENCER_428 0x31AC +#define WM8994_WRITE_SEQUENCER_429 0x31AD +#define WM8994_WRITE_SEQUENCER_430 0x31AE +#define WM8994_WRITE_SEQUENCER_431 0x31AF +#define WM8994_WRITE_SEQUENCER_432 0x31B0 +#define WM8994_WRITE_SEQUENCER_433 0x31B1 +#define WM8994_WRITE_SEQUENCER_434 0x31B2 +#define WM8994_WRITE_SEQUENCER_435 0x31B3 +#define WM8994_WRITE_SEQUENCER_436 0x31B4 +#define WM8994_WRITE_SEQUENCER_437 0x31B5 +#define WM8994_WRITE_SEQUENCER_438 0x31B6 +#define WM8994_WRITE_SEQUENCER_439 0x31B7 +#define WM8994_WRITE_SEQUENCER_440 0x31B8 +#define WM8994_WRITE_SEQUENCER_441 0x31B9 +#define WM8994_WRITE_SEQUENCER_442 0x31BA +#define WM8994_WRITE_SEQUENCER_443 0x31BB +#define WM8994_WRITE_SEQUENCER_444 0x31BC +#define WM8994_WRITE_SEQUENCER_445 0x31BD +#define WM8994_WRITE_SEQUENCER_446 0x31BE +#define WM8994_WRITE_SEQUENCER_447 0x31BF +#define WM8994_WRITE_SEQUENCER_448 0x31C0 +#define WM8994_WRITE_SEQUENCER_449 0x31C1 +#define WM8994_WRITE_SEQUENCER_450 0x31C2 +#define WM8994_WRITE_SEQUENCER_451 0x31C3 +#define WM8994_WRITE_SEQUENCER_452 0x31C4 +#define WM8994_WRITE_SEQUENCER_453 0x31C5 +#define WM8994_WRITE_SEQUENCER_454 0x31C6 +#define WM8994_WRITE_SEQUENCER_455 0x31C7 +#define WM8994_WRITE_SEQUENCER_456 0x31C8 +#define WM8994_WRITE_SEQUENCER_457 0x31C9 +#define WM8994_WRITE_SEQUENCER_458 0x31CA +#define WM8994_WRITE_SEQUENCER_459 0x31CB +#define WM8994_WRITE_SEQUENCER_460 0x31CC +#define WM8994_WRITE_SEQUENCER_461 0x31CD +#define WM8994_WRITE_SEQUENCER_462 0x31CE +#define WM8994_WRITE_SEQUENCER_463 0x31CF +#define WM8994_WRITE_SEQUENCER_464 0x31D0 +#define WM8994_WRITE_SEQUENCER_465 0x31D1 +#define WM8994_WRITE_SEQUENCER_466 0x31D2 +#define WM8994_WRITE_SEQUENCER_467 0x31D3 +#define WM8994_WRITE_SEQUENCER_468 0x31D4 +#define WM8994_WRITE_SEQUENCER_469 0x31D5 +#define WM8994_WRITE_SEQUENCER_470 0x31D6 +#define WM8994_WRITE_SEQUENCER_471 0x31D7 +#define WM8994_WRITE_SEQUENCER_472 0x31D8 +#define WM8994_WRITE_SEQUENCER_473 0x31D9 +#define WM8994_WRITE_SEQUENCER_474 0x31DA +#define WM8994_WRITE_SEQUENCER_475 0x31DB +#define WM8994_WRITE_SEQUENCER_476 0x31DC +#define WM8994_WRITE_SEQUENCER_477 0x31DD +#define WM8994_WRITE_SEQUENCER_478 0x31DE +#define WM8994_WRITE_SEQUENCER_479 0x31DF +#define WM8994_WRITE_SEQUENCER_480 0x31E0 +#define WM8994_WRITE_SEQUENCER_481 0x31E1 +#define WM8994_WRITE_SEQUENCER_482 0x31E2 +#define WM8994_WRITE_SEQUENCER_483 0x31E3 +#define WM8994_WRITE_SEQUENCER_484 0x31E4 +#define WM8994_WRITE_SEQUENCER_485 0x31E5 +#define WM8994_WRITE_SEQUENCER_486 0x31E6 +#define WM8994_WRITE_SEQUENCER_487 0x31E7 +#define WM8994_WRITE_SEQUENCER_488 0x31E8 +#define WM8994_WRITE_SEQUENCER_489 0x31E9 +#define WM8994_WRITE_SEQUENCER_490 0x31EA +#define WM8994_WRITE_SEQUENCER_491 0x31EB +#define WM8994_WRITE_SEQUENCER_492 0x31EC +#define WM8994_WRITE_SEQUENCER_493 0x31ED +#define WM8994_WRITE_SEQUENCER_494 0x31EE +#define WM8994_WRITE_SEQUENCER_495 0x31EF +#define WM8994_WRITE_SEQUENCER_496 0x31F0 +#define WM8994_WRITE_SEQUENCER_497 0x31F1 +#define WM8994_WRITE_SEQUENCER_498 0x31F2 +#define WM8994_WRITE_SEQUENCER_499 0x31F3 +#define WM8994_WRITE_SEQUENCER_500 0x31F4 +#define WM8994_WRITE_SEQUENCER_501 0x31F5 +#define WM8994_WRITE_SEQUENCER_502 0x31F6 +#define WM8994_WRITE_SEQUENCER_503 0x31F7 +#define WM8994_WRITE_SEQUENCER_504 0x31F8 +#define WM8994_WRITE_SEQUENCER_505 0x31F9 +#define WM8994_WRITE_SEQUENCER_506 0x31FA +#define WM8994_WRITE_SEQUENCER_507 0x31FB +#define WM8994_WRITE_SEQUENCER_508 0x31FC +#define WM8994_WRITE_SEQUENCER_509 0x31FD +#define WM8994_WRITE_SEQUENCER_510 0x31FE +#define WM8994_WRITE_SEQUENCER_511 0x31FF + +#define WM8994_REGISTER_COUNT 736 +#define WM8994_MAX_REGISTER 0x31FF +#define WM8994_MAX_CACHED_REGISTER 0x749 + +/* + * Field Definitions. + */ + +/* + * R0 (0x00) - Software Reset + */ +#define WM8994_SW_RESET_MASK 0xFFFF /* SW_RESET - [15:0] */ +#define WM8994_SW_RESET_SHIFT 0 /* SW_RESET - [15:0] */ +#define WM8994_SW_RESET_WIDTH 16 /* SW_RESET - [15:0] */ + +/* + * R1 (0x01) - Power Management (1) + */ +#define WM8994_SPKOUTR_ENA 0x2000 /* SPKOUTR_ENA */ +#define WM8994_SPKOUTR_ENA_MASK 0x2000 /* SPKOUTR_ENA */ +#define WM8994_SPKOUTR_ENA_SHIFT 13 /* SPKOUTR_ENA */ +#define WM8994_SPKOUTR_ENA_WIDTH 1 /* SPKOUTR_ENA */ +#define WM8994_SPKOUTL_ENA 0x1000 /* SPKOUTL_ENA */ +#define WM8994_SPKOUTL_ENA_MASK 0x1000 /* SPKOUTL_ENA */ +#define WM8994_SPKOUTL_ENA_SHIFT 12 /* SPKOUTL_ENA */ +#define WM8994_SPKOUTL_ENA_WIDTH 1 /* SPKOUTL_ENA */ +#define WM8994_HPOUT2_ENA 0x0800 /* HPOUT2_ENA */ +#define WM8994_HPOUT2_ENA_MASK 0x0800 /* HPOUT2_ENA */ +#define WM8994_HPOUT2_ENA_SHIFT 11 /* HPOUT2_ENA */ +#define WM8994_HPOUT2_ENA_WIDTH 1 /* HPOUT2_ENA */ +#define WM8994_HPOUT1L_ENA 0x0200 /* HPOUT1L_ENA */ +#define WM8994_HPOUT1L_ENA_MASK 0x0200 /* HPOUT1L_ENA */ +#define WM8994_HPOUT1L_ENA_SHIFT 9 /* HPOUT1L_ENA */ +#define WM8994_HPOUT1L_ENA_WIDTH 1 /* HPOUT1L_ENA */ +#define WM8994_HPOUT1R_ENA 0x0100 /* HPOUT1R_ENA */ +#define WM8994_HPOUT1R_ENA_MASK 0x0100 /* HPOUT1R_ENA */ +#define WM8994_HPOUT1R_ENA_SHIFT 8 /* HPOUT1R_ENA */ +#define WM8994_HPOUT1R_ENA_WIDTH 1 /* HPOUT1R_ENA */ +#define WM8994_MICB2_ENA 0x0020 /* MICB2_ENA */ +#define WM8994_MICB2_ENA_MASK 0x0020 /* MICB2_ENA */ +#define WM8994_MICB2_ENA_SHIFT 5 /* MICB2_ENA */ +#define WM8994_MICB2_ENA_WIDTH 1 /* MICB2_ENA */ +#define WM8994_MICB1_ENA 0x0010 /* MICB1_ENA */ +#define WM8994_MICB1_ENA_MASK 0x0010 /* MICB1_ENA */ +#define WM8994_MICB1_ENA_SHIFT 4 /* MICB1_ENA */ +#define WM8994_MICB1_ENA_WIDTH 1 /* MICB1_ENA */ +#define WM8994_VMID_SEL_MASK 0x0006 /* VMID_SEL - [2:1] */ +#define WM8994_VMID_SEL_SHIFT 1 /* VMID_SEL - [2:1] */ +#define WM8994_VMID_SEL_WIDTH 2 /* VMID_SEL - [2:1] */ +#define WM8994_BIAS_ENA 0x0001 /* BIAS_ENA */ +#define WM8994_BIAS_ENA_MASK 0x0001 /* BIAS_ENA */ +#define WM8994_BIAS_ENA_SHIFT 0 /* BIAS_ENA */ +#define WM8994_BIAS_ENA_WIDTH 1 /* BIAS_ENA */ + +/* + * R2 (0x02) - Power Management (2) + */ +#define WM8994_TSHUT_ENA 0x4000 /* TSHUT_ENA */ +#define WM8994_TSHUT_ENA_MASK 0x4000 /* TSHUT_ENA */ +#define WM8994_TSHUT_ENA_SHIFT 14 /* TSHUT_ENA */ +#define WM8994_TSHUT_ENA_WIDTH 1 /* TSHUT_ENA */ +#define WM8994_TSHUT_OPDIS 0x2000 /* TSHUT_OPDIS */ +#define WM8994_TSHUT_OPDIS_MASK 0x2000 /* TSHUT_OPDIS */ +#define WM8994_TSHUT_OPDIS_SHIFT 13 /* TSHUT_OPDIS */ +#define WM8994_TSHUT_OPDIS_WIDTH 1 /* TSHUT_OPDIS */ +#define WM8994_OPCLK_ENA 0x0800 /* OPCLK_ENA */ +#define WM8994_OPCLK_ENA_MASK 0x0800 /* OPCLK_ENA */ +#define WM8994_OPCLK_ENA_SHIFT 11 /* OPCLK_ENA */ +#define WM8994_OPCLK_ENA_WIDTH 1 /* OPCLK_ENA */ +#define WM8994_MIXINL_ENA 0x0200 /* MIXINL_ENA */ +#define WM8994_MIXINL_ENA_MASK 0x0200 /* MIXINL_ENA */ +#define WM8994_MIXINL_ENA_SHIFT 9 /* MIXINL_ENA */ +#define WM8994_MIXINL_ENA_WIDTH 1 /* MIXINL_ENA */ +#define WM8994_MIXINR_ENA 0x0100 /* MIXINR_ENA */ +#define WM8994_MIXINR_ENA_MASK 0x0100 /* MIXINR_ENA */ +#define WM8994_MIXINR_ENA_SHIFT 8 /* MIXINR_ENA */ +#define WM8994_MIXINR_ENA_WIDTH 1 /* MIXINR_ENA */ +#define WM8994_IN2L_ENA 0x0080 /* IN2L_ENA */ +#define WM8994_IN2L_ENA_MASK 0x0080 /* IN2L_ENA */ +#define WM8994_IN2L_ENA_SHIFT 7 /* IN2L_ENA */ +#define WM8994_IN2L_ENA_WIDTH 1 /* IN2L_ENA */ +#define WM8994_IN1L_ENA 0x0040 /* IN1L_ENA */ +#define WM8994_IN1L_ENA_MASK 0x0040 /* IN1L_ENA */ +#define WM8994_IN1L_ENA_SHIFT 6 /* IN1L_ENA */ +#define WM8994_IN1L_ENA_WIDTH 1 /* IN1L_ENA */ +#define WM8994_IN2R_ENA 0x0020 /* IN2R_ENA */ +#define WM8994_IN2R_ENA_MASK 0x0020 /* IN2R_ENA */ +#define WM8994_IN2R_ENA_SHIFT 5 /* IN2R_ENA */ +#define WM8994_IN2R_ENA_WIDTH 1 /* IN2R_ENA */ +#define WM8994_IN1R_ENA 0x0010 /* IN1R_ENA */ +#define WM8994_IN1R_ENA_MASK 0x0010 /* IN1R_ENA */ +#define WM8994_IN1R_ENA_SHIFT 4 /* IN1R_ENA */ +#define WM8994_IN1R_ENA_WIDTH 1 /* IN1R_ENA */ + +/* + * R3 (0x03) - Power Management (3) + */ +#define WM8994_LINEOUT1N_ENA 0x2000 /* LINEOUT1N_ENA */ +#define WM8994_LINEOUT1N_ENA_MASK 0x2000 /* LINEOUT1N_ENA */ +#define WM8994_LINEOUT1N_ENA_SHIFT 13 /* LINEOUT1N_ENA */ +#define WM8994_LINEOUT1N_ENA_WIDTH 1 /* LINEOUT1N_ENA */ +#define WM8994_LINEOUT1P_ENA 0x1000 /* LINEOUT1P_ENA */ +#define WM8994_LINEOUT1P_ENA_MASK 0x1000 /* LINEOUT1P_ENA */ +#define WM8994_LINEOUT1P_ENA_SHIFT 12 /* LINEOUT1P_ENA */ +#define WM8994_LINEOUT1P_ENA_WIDTH 1 /* LINEOUT1P_ENA */ +#define WM8994_LINEOUT2N_ENA 0x0800 /* LINEOUT2N_ENA */ +#define WM8994_LINEOUT2N_ENA_MASK 0x0800 /* LINEOUT2N_ENA */ +#define WM8994_LINEOUT2N_ENA_SHIFT 11 /* LINEOUT2N_ENA */ +#define WM8994_LINEOUT2N_ENA_WIDTH 1 /* LINEOUT2N_ENA */ +#define WM8994_LINEOUT2P_ENA 0x0400 /* LINEOUT2P_ENA */ +#define WM8994_LINEOUT2P_ENA_MASK 0x0400 /* LINEOUT2P_ENA */ +#define WM8994_LINEOUT2P_ENA_SHIFT 10 /* LINEOUT2P_ENA */ +#define WM8994_LINEOUT2P_ENA_WIDTH 1 /* LINEOUT2P_ENA */ +#define WM8994_SPKRVOL_ENA 0x0200 /* SPKRVOL_ENA */ +#define WM8994_SPKRVOL_ENA_MASK 0x0200 /* SPKRVOL_ENA */ +#define WM8994_SPKRVOL_ENA_SHIFT 9 /* SPKRVOL_ENA */ +#define WM8994_SPKRVOL_ENA_WIDTH 1 /* SPKRVOL_ENA */ +#define WM8994_SPKLVOL_ENA 0x0100 /* SPKLVOL_ENA */ +#define WM8994_SPKLVOL_ENA_MASK 0x0100 /* SPKLVOL_ENA */ +#define WM8994_SPKLVOL_ENA_SHIFT 8 /* SPKLVOL_ENA */ +#define WM8994_SPKLVOL_ENA_WIDTH 1 /* SPKLVOL_ENA */ +#define WM8994_MIXOUTLVOL_ENA 0x0080 /* MIXOUTLVOL_ENA */ +#define WM8994_MIXOUTLVOL_ENA_MASK 0x0080 /* MIXOUTLVOL_ENA */ +#define WM8994_MIXOUTLVOL_ENA_SHIFT 7 /* MIXOUTLVOL_ENA */ +#define WM8994_MIXOUTLVOL_ENA_WIDTH 1 /* MIXOUTLVOL_ENA */ +#define WM8994_MIXOUTRVOL_ENA 0x0040 /* MIXOUTRVOL_ENA */ +#define WM8994_MIXOUTRVOL_ENA_MASK 0x0040 /* MIXOUTRVOL_ENA */ +#define WM8994_MIXOUTRVOL_ENA_SHIFT 6 /* MIXOUTRVOL_ENA */ +#define WM8994_MIXOUTRVOL_ENA_WIDTH 1 /* MIXOUTRVOL_ENA */ +#define WM8994_MIXOUTL_ENA 0x0020 /* MIXOUTL_ENA */ +#define WM8994_MIXOUTL_ENA_MASK 0x0020 /* MIXOUTL_ENA */ +#define WM8994_MIXOUTL_ENA_SHIFT 5 /* MIXOUTL_ENA */ +#define WM8994_MIXOUTL_ENA_WIDTH 1 /* MIXOUTL_ENA */ +#define WM8994_MIXOUTR_ENA 0x0010 /* MIXOUTR_ENA */ +#define WM8994_MIXOUTR_ENA_MASK 0x0010 /* MIXOUTR_ENA */ +#define WM8994_MIXOUTR_ENA_SHIFT 4 /* MIXOUTR_ENA */ +#define WM8994_MIXOUTR_ENA_WIDTH 1 /* MIXOUTR_ENA */ + +/* + * R4 (0x04) - Power Management (4) + */ +#define WM8994_AIF2ADCL_ENA 0x2000 /* AIF2ADCL_ENA */ +#define WM8994_AIF2ADCL_ENA_MASK 0x2000 /* AIF2ADCL_ENA */ +#define WM8994_AIF2ADCL_ENA_SHIFT 13 /* AIF2ADCL_ENA */ +#define WM8994_AIF2ADCL_ENA_WIDTH 1 /* AIF2ADCL_ENA */ +#define WM8994_AIF2ADCR_ENA 0x1000 /* AIF2ADCR_ENA */ +#define WM8994_AIF2ADCR_ENA_MASK 0x1000 /* AIF2ADCR_ENA */ +#define WM8994_AIF2ADCR_ENA_SHIFT 12 /* AIF2ADCR_ENA */ +#define WM8994_AIF2ADCR_ENA_WIDTH 1 /* AIF2ADCR_ENA */ +#define WM8994_AIF1ADC2L_ENA 0x0800 /* AIF1ADC2L_ENA */ +#define WM8994_AIF1ADC2L_ENA_MASK 0x0800 /* AIF1ADC2L_ENA */ +#define WM8994_AIF1ADC2L_ENA_SHIFT 11 /* AIF1ADC2L_ENA */ +#define WM8994_AIF1ADC2L_ENA_WIDTH 1 /* AIF1ADC2L_ENA */ +#define WM8994_AIF1ADC2R_ENA 0x0400 /* AIF1ADC2R_ENA */ +#define WM8994_AIF1ADC2R_ENA_MASK 0x0400 /* AIF1ADC2R_ENA */ +#define WM8994_AIF1ADC2R_ENA_SHIFT 10 /* AIF1ADC2R_ENA */ +#define WM8994_AIF1ADC2R_ENA_WIDTH 1 /* AIF1ADC2R_ENA */ +#define WM8994_AIF1ADC1L_ENA 0x0200 /* AIF1ADC1L_ENA */ +#define WM8994_AIF1ADC1L_ENA_MASK 0x0200 /* AIF1ADC1L_ENA */ +#define WM8994_AIF1ADC1L_ENA_SHIFT 9 /* AIF1ADC1L_ENA */ +#define WM8994_AIF1ADC1L_ENA_WIDTH 1 /* AIF1ADC1L_ENA */ +#define WM8994_AIF1ADC1R_ENA 0x0100 /* AIF1ADC1R_ENA */ +#define WM8994_AIF1ADC1R_ENA_MASK 0x0100 /* AIF1ADC1R_ENA */ +#define WM8994_AIF1ADC1R_ENA_SHIFT 8 /* AIF1ADC1R_ENA */ +#define WM8994_AIF1ADC1R_ENA_WIDTH 1 /* AIF1ADC1R_ENA */ +#define WM8994_DMIC2L_ENA 0x0020 /* DMIC2L_ENA */ +#define WM8994_DMIC2L_ENA_MASK 0x0020 /* DMIC2L_ENA */ +#define WM8994_DMIC2L_ENA_SHIFT 5 /* DMIC2L_ENA */ +#define WM8994_DMIC2L_ENA_WIDTH 1 /* DMIC2L_ENA */ +#define WM8994_DMIC2R_ENA 0x0010 /* DMIC2R_ENA */ +#define WM8994_DMIC2R_ENA_MASK 0x0010 /* DMIC2R_ENA */ +#define WM8994_DMIC2R_ENA_SHIFT 4 /* DMIC2R_ENA */ +#define WM8994_DMIC2R_ENA_WIDTH 1 /* DMIC2R_ENA */ +#define WM8994_DMIC1L_ENA 0x0008 /* DMIC1L_ENA */ +#define WM8994_DMIC1L_ENA_MASK 0x0008 /* DMIC1L_ENA */ +#define WM8994_DMIC1L_ENA_SHIFT 3 /* DMIC1L_ENA */ +#define WM8994_DMIC1L_ENA_WIDTH 1 /* DMIC1L_ENA */ +#define WM8994_DMIC1R_ENA 0x0004 /* DMIC1R_ENA */ +#define WM8994_DMIC1R_ENA_MASK 0x0004 /* DMIC1R_ENA */ +#define WM8994_DMIC1R_ENA_SHIFT 2 /* DMIC1R_ENA */ +#define WM8994_DMIC1R_ENA_WIDTH 1 /* DMIC1R_ENA */ +#define WM8994_ADCL_ENA 0x0002 /* ADCL_ENA */ +#define WM8994_ADCL_ENA_MASK 0x0002 /* ADCL_ENA */ +#define WM8994_ADCL_ENA_SHIFT 1 /* ADCL_ENA */ +#define WM8994_ADCL_ENA_WIDTH 1 /* ADCL_ENA */ +#define WM8994_ADCR_ENA 0x0001 /* ADCR_ENA */ +#define WM8994_ADCR_ENA_MASK 0x0001 /* ADCR_ENA */ +#define WM8994_ADCR_ENA_SHIFT 0 /* ADCR_ENA */ +#define WM8994_ADCR_ENA_WIDTH 1 /* ADCR_ENA */ + +/* + * R5 (0x05) - Power Management (5) + */ +#define WM8994_AIF2DACL_ENA 0x2000 /* AIF2DACL_ENA */ +#define WM8994_AIF2DACL_ENA_MASK 0x2000 /* AIF2DACL_ENA */ +#define WM8994_AIF2DACL_ENA_SHIFT 13 /* AIF2DACL_ENA */ +#define WM8994_AIF2DACL_ENA_WIDTH 1 /* AIF2DACL_ENA */ +#define WM8994_AIF2DACR_ENA 0x1000 /* AIF2DACR_ENA */ +#define WM8994_AIF2DACR_ENA_MASK 0x1000 /* AIF2DACR_ENA */ +#define WM8994_AIF2DACR_ENA_SHIFT 12 /* AIF2DACR_ENA */ +#define WM8994_AIF2DACR_ENA_WIDTH 1 /* AIF2DACR_ENA */ +#define WM8994_AIF1DAC2L_ENA 0x0800 /* AIF1DAC2L_ENA */ +#define WM8994_AIF1DAC2L_ENA_MASK 0x0800 /* AIF1DAC2L_ENA */ +#define WM8994_AIF1DAC2L_ENA_SHIFT 11 /* AIF1DAC2L_ENA */ +#define WM8994_AIF1DAC2L_ENA_WIDTH 1 /* AIF1DAC2L_ENA */ +#define WM8994_AIF1DAC2R_ENA 0x0400 /* AIF1DAC2R_ENA */ +#define WM8994_AIF1DAC2R_ENA_MASK 0x0400 /* AIF1DAC2R_ENA */ +#define WM8994_AIF1DAC2R_ENA_SHIFT 10 /* AIF1DAC2R_ENA */ +#define WM8994_AIF1DAC2R_ENA_WIDTH 1 /* AIF1DAC2R_ENA */ +#define WM8994_AIF1DAC1L_ENA 0x0200 /* AIF1DAC1L_ENA */ +#define WM8994_AIF1DAC1L_ENA_MASK 0x0200 /* AIF1DAC1L_ENA */ +#define WM8994_AIF1DAC1L_ENA_SHIFT 9 /* AIF1DAC1L_ENA */ +#define WM8994_AIF1DAC1L_ENA_WIDTH 1 /* AIF1DAC1L_ENA */ +#define WM8994_AIF1DAC1R_ENA 0x0100 /* AIF1DAC1R_ENA */ +#define WM8994_AIF1DAC1R_ENA_MASK 0x0100 /* AIF1DAC1R_ENA */ +#define WM8994_AIF1DAC1R_ENA_SHIFT 8 /* AIF1DAC1R_ENA */ +#define WM8994_AIF1DAC1R_ENA_WIDTH 1 /* AIF1DAC1R_ENA */ +#define WM8994_DAC2L_ENA 0x0008 /* DAC2L_ENA */ +#define WM8994_DAC2L_ENA_MASK 0x0008 /* DAC2L_ENA */ +#define WM8994_DAC2L_ENA_SHIFT 3 /* DAC2L_ENA */ +#define WM8994_DAC2L_ENA_WIDTH 1 /* DAC2L_ENA */ +#define WM8994_DAC2R_ENA 0x0004 /* DAC2R_ENA */ +#define WM8994_DAC2R_ENA_MASK 0x0004 /* DAC2R_ENA */ +#define WM8994_DAC2R_ENA_SHIFT 2 /* DAC2R_ENA */ +#define WM8994_DAC2R_ENA_WIDTH 1 /* DAC2R_ENA */ +#define WM8994_DAC1L_ENA 0x0002 /* DAC1L_ENA */ +#define WM8994_DAC1L_ENA_MASK 0x0002 /* DAC1L_ENA */ +#define WM8994_DAC1L_ENA_SHIFT 1 /* DAC1L_ENA */ +#define WM8994_DAC1L_ENA_WIDTH 1 /* DAC1L_ENA */ +#define WM8994_DAC1R_ENA 0x0001 /* DAC1R_ENA */ +#define WM8994_DAC1R_ENA_MASK 0x0001 /* DAC1R_ENA */ +#define WM8994_DAC1R_ENA_SHIFT 0 /* DAC1R_ENA */ +#define WM8994_DAC1R_ENA_WIDTH 1 /* DAC1R_ENA */ + +/* + * R6 (0x06) - Power Management (6) + */ +#define WM8958_AIF3ADC_SRC_MASK 0x0600 /* AIF3ADC_SRC - [10:9] */ +#define WM8958_AIF3ADC_SRC_SHIFT 9 /* AIF3ADC_SRC - [10:9] */ +#define WM8958_AIF3ADC_SRC_WIDTH 2 /* AIF3ADC_SRC - [10:9] */ +#define WM8958_AIF2DAC_SRC_MASK 0x0180 /* AIF2DAC_SRC - [8:7] */ +#define WM8958_AIF2DAC_SRC_SHIFT 7 /* AIF2DAC_SRC - [8:7] */ +#define WM8958_AIF2DAC_SRC_WIDTH 2 /* AIF2DAC_SRC - [8:7] */ +#define WM8994_AIF3_TRI 0x0020 /* AIF3_TRI */ +#define WM8994_AIF3_TRI_MASK 0x0020 /* AIF3_TRI */ +#define WM8994_AIF3_TRI_SHIFT 5 /* AIF3_TRI */ +#define WM8994_AIF3_TRI_WIDTH 1 /* AIF3_TRI */ +#define WM8994_AIF3_ADCDAT_SRC_MASK 0x0018 /* AIF3_ADCDAT_SRC - [4:3] */ +#define WM8994_AIF3_ADCDAT_SRC_SHIFT 3 /* AIF3_ADCDAT_SRC - [4:3] */ +#define WM8994_AIF3_ADCDAT_SRC_WIDTH 2 /* AIF3_ADCDAT_SRC - [4:3] */ +#define WM8994_AIF2_ADCDAT_SRC 0x0004 /* AIF2_ADCDAT_SRC */ +#define WM8994_AIF2_ADCDAT_SRC_MASK 0x0004 /* AIF2_ADCDAT_SRC */ +#define WM8994_AIF2_ADCDAT_SRC_SHIFT 2 /* AIF2_ADCDAT_SRC */ +#define WM8994_AIF2_ADCDAT_SRC_WIDTH 1 /* AIF2_ADCDAT_SRC */ +#define WM8994_AIF2_DACDAT_SRC 0x0002 /* AIF2_DACDAT_SRC */ +#define WM8994_AIF2_DACDAT_SRC_MASK 0x0002 /* AIF2_DACDAT_SRC */ +#define WM8994_AIF2_DACDAT_SRC_SHIFT 1 /* AIF2_DACDAT_SRC */ +#define WM8994_AIF2_DACDAT_SRC_WIDTH 1 /* AIF2_DACDAT_SRC */ +#define WM8994_AIF1_DACDAT_SRC 0x0001 /* AIF1_DACDAT_SRC */ +#define WM8994_AIF1_DACDAT_SRC_MASK 0x0001 /* AIF1_DACDAT_SRC */ +#define WM8994_AIF1_DACDAT_SRC_SHIFT 0 /* AIF1_DACDAT_SRC */ +#define WM8994_AIF1_DACDAT_SRC_WIDTH 1 /* AIF1_DACDAT_SRC */ + +/* + * R21 (0x15) - Input Mixer (1) + */ +#define WM8994_IN1RP_MIXINR_BOOST 0x0100 /* IN1RP_MIXINR_BOOST */ +#define WM8994_IN1RP_MIXINR_BOOST_MASK 0x0100 /* IN1RP_MIXINR_BOOST */ +#define WM8994_IN1RP_MIXINR_BOOST_SHIFT 8 /* IN1RP_MIXINR_BOOST */ +#define WM8994_IN1RP_MIXINR_BOOST_WIDTH 1 /* IN1RP_MIXINR_BOOST */ +#define WM8994_IN1LP_MIXINL_BOOST 0x0080 /* IN1LP_MIXINL_BOOST */ +#define WM8994_IN1LP_MIXINL_BOOST_MASK 0x0080 /* IN1LP_MIXINL_BOOST */ +#define WM8994_IN1LP_MIXINL_BOOST_SHIFT 7 /* IN1LP_MIXINL_BOOST */ +#define WM8994_IN1LP_MIXINL_BOOST_WIDTH 1 /* IN1LP_MIXINL_BOOST */ +#define WM8994_INPUTS_CLAMP 0x0040 /* INPUTS_CLAMP */ +#define WM8994_INPUTS_CLAMP_MASK 0x0040 /* INPUTS_CLAMP */ +#define WM8994_INPUTS_CLAMP_SHIFT 6 /* INPUTS_CLAMP */ +#define WM8994_INPUTS_CLAMP_WIDTH 1 /* INPUTS_CLAMP */ + +/* + * R24 (0x18) - Left Line Input 1&2 Volume + */ +#define WM8994_IN1_VU 0x0100 /* IN1_VU */ +#define WM8994_IN1_VU_MASK 0x0100 /* IN1_VU */ +#define WM8994_IN1_VU_SHIFT 8 /* IN1_VU */ +#define WM8994_IN1_VU_WIDTH 1 /* IN1_VU */ +#define WM8994_IN1L_MUTE 0x0080 /* IN1L_MUTE */ +#define WM8994_IN1L_MUTE_MASK 0x0080 /* IN1L_MUTE */ +#define WM8994_IN1L_MUTE_SHIFT 7 /* IN1L_MUTE */ +#define WM8994_IN1L_MUTE_WIDTH 1 /* IN1L_MUTE */ +#define WM8994_IN1L_ZC 0x0040 /* IN1L_ZC */ +#define WM8994_IN1L_ZC_MASK 0x0040 /* IN1L_ZC */ +#define WM8994_IN1L_ZC_SHIFT 6 /* IN1L_ZC */ +#define WM8994_IN1L_ZC_WIDTH 1 /* IN1L_ZC */ +#define WM8994_IN1L_VOL_MASK 0x001F /* IN1L_VOL - [4:0] */ +#define WM8994_IN1L_VOL_SHIFT 0 /* IN1L_VOL - [4:0] */ +#define WM8994_IN1L_VOL_WIDTH 5 /* IN1L_VOL - [4:0] */ + +/* + * R25 (0x19) - Left Line Input 3&4 Volume + */ +#define WM8994_IN2_VU 0x0100 /* IN2_VU */ +#define WM8994_IN2_VU_MASK 0x0100 /* IN2_VU */ +#define WM8994_IN2_VU_SHIFT 8 /* IN2_VU */ +#define WM8994_IN2_VU_WIDTH 1 /* IN2_VU */ +#define WM8994_IN2L_MUTE 0x0080 /* IN2L_MUTE */ +#define WM8994_IN2L_MUTE_MASK 0x0080 /* IN2L_MUTE */ +#define WM8994_IN2L_MUTE_SHIFT 7 /* IN2L_MUTE */ +#define WM8994_IN2L_MUTE_WIDTH 1 /* IN2L_MUTE */ +#define WM8994_IN2L_ZC 0x0040 /* IN2L_ZC */ +#define WM8994_IN2L_ZC_MASK 0x0040 /* IN2L_ZC */ +#define WM8994_IN2L_ZC_SHIFT 6 /* IN2L_ZC */ +#define WM8994_IN2L_ZC_WIDTH 1 /* IN2L_ZC */ +#define WM8994_IN2L_VOL_MASK 0x001F /* IN2L_VOL - [4:0] */ +#define WM8994_IN2L_VOL_SHIFT 0 /* IN2L_VOL - [4:0] */ +#define WM8994_IN2L_VOL_WIDTH 5 /* IN2L_VOL - [4:0] */ + +/* + * R26 (0x1A) - Right Line Input 1&2 Volume + */ +#define WM8994_IN1_VU 0x0100 /* IN1_VU */ +#define WM8994_IN1_VU_MASK 0x0100 /* IN1_VU */ +#define WM8994_IN1_VU_SHIFT 8 /* IN1_VU */ +#define WM8994_IN1_VU_WIDTH 1 /* IN1_VU */ +#define WM8994_IN1R_MUTE 0x0080 /* IN1R_MUTE */ +#define WM8994_IN1R_MUTE_MASK 0x0080 /* IN1R_MUTE */ +#define WM8994_IN1R_MUTE_SHIFT 7 /* IN1R_MUTE */ +#define WM8994_IN1R_MUTE_WIDTH 1 /* IN1R_MUTE */ +#define WM8994_IN1R_ZC 0x0040 /* IN1R_ZC */ +#define WM8994_IN1R_ZC_MASK 0x0040 /* IN1R_ZC */ +#define WM8994_IN1R_ZC_SHIFT 6 /* IN1R_ZC */ +#define WM8994_IN1R_ZC_WIDTH 1 /* IN1R_ZC */ +#define WM8994_IN1R_VOL_MASK 0x001F /* IN1R_VOL - [4:0] */ +#define WM8994_IN1R_VOL_SHIFT 0 /* IN1R_VOL - [4:0] */ +#define WM8994_IN1R_VOL_WIDTH 5 /* IN1R_VOL - [4:0] */ + +/* + * R27 (0x1B) - Right Line Input 3&4 Volume + */ +#define WM8994_IN2_VU 0x0100 /* IN2_VU */ +#define WM8994_IN2_VU_MASK 0x0100 /* IN2_VU */ +#define WM8994_IN2_VU_SHIFT 8 /* IN2_VU */ +#define WM8994_IN2_VU_WIDTH 1 /* IN2_VU */ +#define WM8994_IN2R_MUTE 0x0080 /* IN2R_MUTE */ +#define WM8994_IN2R_MUTE_MASK 0x0080 /* IN2R_MUTE */ +#define WM8994_IN2R_MUTE_SHIFT 7 /* IN2R_MUTE */ +#define WM8994_IN2R_MUTE_WIDTH 1 /* IN2R_MUTE */ +#define WM8994_IN2R_ZC 0x0040 /* IN2R_ZC */ +#define WM8994_IN2R_ZC_MASK 0x0040 /* IN2R_ZC */ +#define WM8994_IN2R_ZC_SHIFT 6 /* IN2R_ZC */ +#define WM8994_IN2R_ZC_WIDTH 1 /* IN2R_ZC */ +#define WM8994_IN2R_VOL_MASK 0x001F /* IN2R_VOL - [4:0] */ +#define WM8994_IN2R_VOL_SHIFT 0 /* IN2R_VOL - [4:0] */ +#define WM8994_IN2R_VOL_WIDTH 5 /* IN2R_VOL - [4:0] */ + +/* + * R28 (0x1C) - Left Output Volume + */ +#define WM8994_HPOUT1_VU 0x0100 /* HPOUT1_VU */ +#define WM8994_HPOUT1_VU_MASK 0x0100 /* HPOUT1_VU */ +#define WM8994_HPOUT1_VU_SHIFT 8 /* HPOUT1_VU */ +#define WM8994_HPOUT1_VU_WIDTH 1 /* HPOUT1_VU */ +#define WM8994_HPOUT1L_ZC 0x0080 /* HPOUT1L_ZC */ +#define WM8994_HPOUT1L_ZC_MASK 0x0080 /* HPOUT1L_ZC */ +#define WM8994_HPOUT1L_ZC_SHIFT 7 /* HPOUT1L_ZC */ +#define WM8994_HPOUT1L_ZC_WIDTH 1 /* HPOUT1L_ZC */ +#define WM8994_HPOUT1L_MUTE_N 0x0040 /* HPOUT1L_MUTE_N */ +#define WM8994_HPOUT1L_MUTE_N_MASK 0x0040 /* HPOUT1L_MUTE_N */ +#define WM8994_HPOUT1L_MUTE_N_SHIFT 6 /* HPOUT1L_MUTE_N */ +#define WM8994_HPOUT1L_MUTE_N_WIDTH 1 /* HPOUT1L_MUTE_N */ +#define WM8994_HPOUT1L_VOL_MASK 0x003F /* HPOUT1L_VOL - [5:0] */ +#define WM8994_HPOUT1L_VOL_SHIFT 0 /* HPOUT1L_VOL - [5:0] */ +#define WM8994_HPOUT1L_VOL_WIDTH 6 /* HPOUT1L_VOL - [5:0] */ + +/* + * R29 (0x1D) - Right Output Volume + */ +#define WM8994_HPOUT1_VU 0x0100 /* HPOUT1_VU */ +#define WM8994_HPOUT1_VU_MASK 0x0100 /* HPOUT1_VU */ +#define WM8994_HPOUT1_VU_SHIFT 8 /* HPOUT1_VU */ +#define WM8994_HPOUT1_VU_WIDTH 1 /* HPOUT1_VU */ +#define WM8994_HPOUT1R_ZC 0x0080 /* HPOUT1R_ZC */ +#define WM8994_HPOUT1R_ZC_MASK 0x0080 /* HPOUT1R_ZC */ +#define WM8994_HPOUT1R_ZC_SHIFT 7 /* HPOUT1R_ZC */ +#define WM8994_HPOUT1R_ZC_WIDTH 1 /* HPOUT1R_ZC */ +#define WM8994_HPOUT1R_MUTE_N 0x0040 /* HPOUT1R_MUTE_N */ +#define WM8994_HPOUT1R_MUTE_N_MASK 0x0040 /* HPOUT1R_MUTE_N */ +#define WM8994_HPOUT1R_MUTE_N_SHIFT 6 /* HPOUT1R_MUTE_N */ +#define WM8994_HPOUT1R_MUTE_N_WIDTH 1 /* HPOUT1R_MUTE_N */ +#define WM8994_HPOUT1R_VOL_MASK 0x003F /* HPOUT1R_VOL - [5:0] */ +#define WM8994_HPOUT1R_VOL_SHIFT 0 /* HPOUT1R_VOL - [5:0] */ +#define WM8994_HPOUT1R_VOL_WIDTH 6 /* HPOUT1R_VOL - [5:0] */ + +/* + * R30 (0x1E) - Line Outputs Volume + */ +#define WM8994_LINEOUT1N_MUTE 0x0040 /* LINEOUT1N_MUTE */ +#define WM8994_LINEOUT1N_MUTE_MASK 0x0040 /* LINEOUT1N_MUTE */ +#define WM8994_LINEOUT1N_MUTE_SHIFT 6 /* LINEOUT1N_MUTE */ +#define WM8994_LINEOUT1N_MUTE_WIDTH 1 /* LINEOUT1N_MUTE */ +#define WM8994_LINEOUT1P_MUTE 0x0020 /* LINEOUT1P_MUTE */ +#define WM8994_LINEOUT1P_MUTE_MASK 0x0020 /* LINEOUT1P_MUTE */ +#define WM8994_LINEOUT1P_MUTE_SHIFT 5 /* LINEOUT1P_MUTE */ +#define WM8994_LINEOUT1P_MUTE_WIDTH 1 /* LINEOUT1P_MUTE */ +#define WM8994_LINEOUT1_VOL 0x0010 /* LINEOUT1_VOL */ +#define WM8994_LINEOUT1_VOL_MASK 0x0010 /* LINEOUT1_VOL */ +#define WM8994_LINEOUT1_VOL_SHIFT 4 /* LINEOUT1_VOL */ +#define WM8994_LINEOUT1_VOL_WIDTH 1 /* LINEOUT1_VOL */ +#define WM8994_LINEOUT2N_MUTE 0x0004 /* LINEOUT2N_MUTE */ +#define WM8994_LINEOUT2N_MUTE_MASK 0x0004 /* LINEOUT2N_MUTE */ +#define WM8994_LINEOUT2N_MUTE_SHIFT 2 /* LINEOUT2N_MUTE */ +#define WM8994_LINEOUT2N_MUTE_WIDTH 1 /* LINEOUT2N_MUTE */ +#define WM8994_LINEOUT2P_MUTE 0x0002 /* LINEOUT2P_MUTE */ +#define WM8994_LINEOUT2P_MUTE_MASK 0x0002 /* LINEOUT2P_MUTE */ +#define WM8994_LINEOUT2P_MUTE_SHIFT 1 /* LINEOUT2P_MUTE */ +#define WM8994_LINEOUT2P_MUTE_WIDTH 1 /* LINEOUT2P_MUTE */ +#define WM8994_LINEOUT2_VOL 0x0001 /* LINEOUT2_VOL */ +#define WM8994_LINEOUT2_VOL_MASK 0x0001 /* LINEOUT2_VOL */ +#define WM8994_LINEOUT2_VOL_SHIFT 0 /* LINEOUT2_VOL */ +#define WM8994_LINEOUT2_VOL_WIDTH 1 /* LINEOUT2_VOL */ + +/* + * R31 (0x1F) - HPOUT2 Volume + */ +#define WM8994_HPOUT2_MUTE 0x0020 /* HPOUT2_MUTE */ +#define WM8994_HPOUT2_MUTE_MASK 0x0020 /* HPOUT2_MUTE */ +#define WM8994_HPOUT2_MUTE_SHIFT 5 /* HPOUT2_MUTE */ +#define WM8994_HPOUT2_MUTE_WIDTH 1 /* HPOUT2_MUTE */ +#define WM8994_HPOUT2_VOL 0x0010 /* HPOUT2_VOL */ +#define WM8994_HPOUT2_VOL_MASK 0x0010 /* HPOUT2_VOL */ +#define WM8994_HPOUT2_VOL_SHIFT 4 /* HPOUT2_VOL */ +#define WM8994_HPOUT2_VOL_WIDTH 1 /* HPOUT2_VOL */ + +/* + * R32 (0x20) - Left OPGA Volume + */ +#define WM8994_MIXOUT_VU 0x0100 /* MIXOUT_VU */ +#define WM8994_MIXOUT_VU_MASK 0x0100 /* MIXOUT_VU */ +#define WM8994_MIXOUT_VU_SHIFT 8 /* MIXOUT_VU */ +#define WM8994_MIXOUT_VU_WIDTH 1 /* MIXOUT_VU */ +#define WM8994_MIXOUTL_ZC 0x0080 /* MIXOUTL_ZC */ +#define WM8994_MIXOUTL_ZC_MASK 0x0080 /* MIXOUTL_ZC */ +#define WM8994_MIXOUTL_ZC_SHIFT 7 /* MIXOUTL_ZC */ +#define WM8994_MIXOUTL_ZC_WIDTH 1 /* MIXOUTL_ZC */ +#define WM8994_MIXOUTL_MUTE_N 0x0040 /* MIXOUTL_MUTE_N */ +#define WM8994_MIXOUTL_MUTE_N_MASK 0x0040 /* MIXOUTL_MUTE_N */ +#define WM8994_MIXOUTL_MUTE_N_SHIFT 6 /* MIXOUTL_MUTE_N */ +#define WM8994_MIXOUTL_MUTE_N_WIDTH 1 /* MIXOUTL_MUTE_N */ +#define WM8994_MIXOUTL_VOL_MASK 0x003F /* MIXOUTL_VOL - [5:0] */ +#define WM8994_MIXOUTL_VOL_SHIFT 0 /* MIXOUTL_VOL - [5:0] */ +#define WM8994_MIXOUTL_VOL_WIDTH 6 /* MIXOUTL_VOL - [5:0] */ + +/* + * R33 (0x21) - Right OPGA Volume + */ +#define WM8994_MIXOUT_VU 0x0100 /* MIXOUT_VU */ +#define WM8994_MIXOUT_VU_MASK 0x0100 /* MIXOUT_VU */ +#define WM8994_MIXOUT_VU_SHIFT 8 /* MIXOUT_VU */ +#define WM8994_MIXOUT_VU_WIDTH 1 /* MIXOUT_VU */ +#define WM8994_MIXOUTR_ZC 0x0080 /* MIXOUTR_ZC */ +#define WM8994_MIXOUTR_ZC_MASK 0x0080 /* MIXOUTR_ZC */ +#define WM8994_MIXOUTR_ZC_SHIFT 7 /* MIXOUTR_ZC */ +#define WM8994_MIXOUTR_ZC_WIDTH 1 /* MIXOUTR_ZC */ +#define WM8994_MIXOUTR_MUTE_N 0x0040 /* MIXOUTR_MUTE_N */ +#define WM8994_MIXOUTR_MUTE_N_MASK 0x0040 /* MIXOUTR_MUTE_N */ +#define WM8994_MIXOUTR_MUTE_N_SHIFT 6 /* MIXOUTR_MUTE_N */ +#define WM8994_MIXOUTR_MUTE_N_WIDTH 1 /* MIXOUTR_MUTE_N */ +#define WM8994_MIXOUTR_VOL_MASK 0x003F /* MIXOUTR_VOL - [5:0] */ +#define WM8994_MIXOUTR_VOL_SHIFT 0 /* MIXOUTR_VOL - [5:0] */ +#define WM8994_MIXOUTR_VOL_WIDTH 6 /* MIXOUTR_VOL - [5:0] */ + +/* + * R34 (0x22) - SPKMIXL Attenuation + */ +#define WM8994_DAC2L_SPKMIXL_VOL 0x0040 /* DAC2L_SPKMIXL_VOL */ +#define WM8994_DAC2L_SPKMIXL_VOL_MASK 0x0040 /* DAC2L_SPKMIXL_VOL */ +#define WM8994_DAC2L_SPKMIXL_VOL_SHIFT 6 /* DAC2L_SPKMIXL_VOL */ +#define WM8994_DAC2L_SPKMIXL_VOL_WIDTH 1 /* DAC2L_SPKMIXL_VOL */ +#define WM8994_MIXINL_SPKMIXL_VOL 0x0020 /* MIXINL_SPKMIXL_VOL */ +#define WM8994_MIXINL_SPKMIXL_VOL_MASK 0x0020 /* MIXINL_SPKMIXL_VOL */ +#define WM8994_MIXINL_SPKMIXL_VOL_SHIFT 5 /* MIXINL_SPKMIXL_VOL */ +#define WM8994_MIXINL_SPKMIXL_VOL_WIDTH 1 /* MIXINL_SPKMIXL_VOL */ +#define WM8994_IN1LP_SPKMIXL_VOL 0x0010 /* IN1LP_SPKMIXL_VOL */ +#define WM8994_IN1LP_SPKMIXL_VOL_MASK 0x0010 /* IN1LP_SPKMIXL_VOL */ +#define WM8994_IN1LP_SPKMIXL_VOL_SHIFT 4 /* IN1LP_SPKMIXL_VOL */ +#define WM8994_IN1LP_SPKMIXL_VOL_WIDTH 1 /* IN1LP_SPKMIXL_VOL */ +#define WM8994_MIXOUTL_SPKMIXL_VOL 0x0008 /* MIXOUTL_SPKMIXL_VOL */ +#define WM8994_MIXOUTL_SPKMIXL_VOL_MASK 0x0008 /* MIXOUTL_SPKMIXL_VOL */ +#define WM8994_MIXOUTL_SPKMIXL_VOL_SHIFT 3 /* MIXOUTL_SPKMIXL_VOL */ +#define WM8994_MIXOUTL_SPKMIXL_VOL_WIDTH 1 /* MIXOUTL_SPKMIXL_VOL */ +#define WM8994_DAC1L_SPKMIXL_VOL 0x0004 /* DAC1L_SPKMIXL_VOL */ +#define WM8994_DAC1L_SPKMIXL_VOL_MASK 0x0004 /* DAC1L_SPKMIXL_VOL */ +#define WM8994_DAC1L_SPKMIXL_VOL_SHIFT 2 /* DAC1L_SPKMIXL_VOL */ +#define WM8994_DAC1L_SPKMIXL_VOL_WIDTH 1 /* DAC1L_SPKMIXL_VOL */ +#define WM8994_SPKMIXL_VOL_MASK 0x0003 /* SPKMIXL_VOL - [1:0] */ +#define WM8994_SPKMIXL_VOL_SHIFT 0 /* SPKMIXL_VOL - [1:0] */ +#define WM8994_SPKMIXL_VOL_WIDTH 2 /* SPKMIXL_VOL - [1:0] */ + +/* + * R35 (0x23) - SPKMIXR Attenuation + */ +#define WM8994_SPKOUT_CLASSAB 0x0100 /* SPKOUT_CLASSAB */ +#define WM8994_SPKOUT_CLASSAB_MASK 0x0100 /* SPKOUT_CLASSAB */ +#define WM8994_SPKOUT_CLASSAB_SHIFT 8 /* SPKOUT_CLASSAB */ +#define WM8994_SPKOUT_CLASSAB_WIDTH 1 /* SPKOUT_CLASSAB */ +#define WM8994_DAC2R_SPKMIXR_VOL 0x0040 /* DAC2R_SPKMIXR_VOL */ +#define WM8994_DAC2R_SPKMIXR_VOL_MASK 0x0040 /* DAC2R_SPKMIXR_VOL */ +#define WM8994_DAC2R_SPKMIXR_VOL_SHIFT 6 /* DAC2R_SPKMIXR_VOL */ +#define WM8994_DAC2R_SPKMIXR_VOL_WIDTH 1 /* DAC2R_SPKMIXR_VOL */ +#define WM8994_MIXINR_SPKMIXR_VOL 0x0020 /* MIXINR_SPKMIXR_VOL */ +#define WM8994_MIXINR_SPKMIXR_VOL_MASK 0x0020 /* MIXINR_SPKMIXR_VOL */ +#define WM8994_MIXINR_SPKMIXR_VOL_SHIFT 5 /* MIXINR_SPKMIXR_VOL */ +#define WM8994_MIXINR_SPKMIXR_VOL_WIDTH 1 /* MIXINR_SPKMIXR_VOL */ +#define WM8994_IN1RP_SPKMIXR_VOL 0x0010 /* IN1RP_SPKMIXR_VOL */ +#define WM8994_IN1RP_SPKMIXR_VOL_MASK 0x0010 /* IN1RP_SPKMIXR_VOL */ +#define WM8994_IN1RP_SPKMIXR_VOL_SHIFT 4 /* IN1RP_SPKMIXR_VOL */ +#define WM8994_IN1RP_SPKMIXR_VOL_WIDTH 1 /* IN1RP_SPKMIXR_VOL */ +#define WM8994_MIXOUTR_SPKMIXR_VOL 0x0008 /* MIXOUTR_SPKMIXR_VOL */ +#define WM8994_MIXOUTR_SPKMIXR_VOL_MASK 0x0008 /* MIXOUTR_SPKMIXR_VOL */ +#define WM8994_MIXOUTR_SPKMIXR_VOL_SHIFT 3 /* MIXOUTR_SPKMIXR_VOL */ +#define WM8994_MIXOUTR_SPKMIXR_VOL_WIDTH 1 /* MIXOUTR_SPKMIXR_VOL */ +#define WM8994_DAC1R_SPKMIXR_VOL 0x0004 /* DAC1R_SPKMIXR_VOL */ +#define WM8994_DAC1R_SPKMIXR_VOL_MASK 0x0004 /* DAC1R_SPKMIXR_VOL */ +#define WM8994_DAC1R_SPKMIXR_VOL_SHIFT 2 /* DAC1R_SPKMIXR_VOL */ +#define WM8994_DAC1R_SPKMIXR_VOL_WIDTH 1 /* DAC1R_SPKMIXR_VOL */ +#define WM8994_SPKMIXR_VOL_MASK 0x0003 /* SPKMIXR_VOL - [1:0] */ +#define WM8994_SPKMIXR_VOL_SHIFT 0 /* SPKMIXR_VOL - [1:0] */ +#define WM8994_SPKMIXR_VOL_WIDTH 2 /* SPKMIXR_VOL - [1:0] */ + +/* + * R36 (0x24) - SPKOUT Mixers + */ +#define WM8994_IN2LRP_TO_SPKOUTL 0x0020 /* IN2LRP_TO_SPKOUTL */ +#define WM8994_IN2LRP_TO_SPKOUTL_MASK 0x0020 /* IN2LRP_TO_SPKOUTL */ +#define WM8994_IN2LRP_TO_SPKOUTL_SHIFT 5 /* IN2LRP_TO_SPKOUTL */ +#define WM8994_IN2LRP_TO_SPKOUTL_WIDTH 1 /* IN2LRP_TO_SPKOUTL */ +#define WM8994_SPKMIXL_TO_SPKOUTL 0x0010 /* SPKMIXL_TO_SPKOUTL */ +#define WM8994_SPKMIXL_TO_SPKOUTL_MASK 0x0010 /* SPKMIXL_TO_SPKOUTL */ +#define WM8994_SPKMIXL_TO_SPKOUTL_SHIFT 4 /* SPKMIXL_TO_SPKOUTL */ +#define WM8994_SPKMIXL_TO_SPKOUTL_WIDTH 1 /* SPKMIXL_TO_SPKOUTL */ +#define WM8994_SPKMIXR_TO_SPKOUTL 0x0008 /* SPKMIXR_TO_SPKOUTL */ +#define WM8994_SPKMIXR_TO_SPKOUTL_MASK 0x0008 /* SPKMIXR_TO_SPKOUTL */ +#define WM8994_SPKMIXR_TO_SPKOUTL_SHIFT 3 /* SPKMIXR_TO_SPKOUTL */ +#define WM8994_SPKMIXR_TO_SPKOUTL_WIDTH 1 /* SPKMIXR_TO_SPKOUTL */ +#define WM8994_IN2LRP_TO_SPKOUTR 0x0004 /* IN2LRP_TO_SPKOUTR */ +#define WM8994_IN2LRP_TO_SPKOUTR_MASK 0x0004 /* IN2LRP_TO_SPKOUTR */ +#define WM8994_IN2LRP_TO_SPKOUTR_SHIFT 2 /* IN2LRP_TO_SPKOUTR */ +#define WM8994_IN2LRP_TO_SPKOUTR_WIDTH 1 /* IN2LRP_TO_SPKOUTR */ +#define WM8994_SPKMIXL_TO_SPKOUTR 0x0002 /* SPKMIXL_TO_SPKOUTR */ +#define WM8994_SPKMIXL_TO_SPKOUTR_MASK 0x0002 /* SPKMIXL_TO_SPKOUTR */ +#define WM8994_SPKMIXL_TO_SPKOUTR_SHIFT 1 /* SPKMIXL_TO_SPKOUTR */ +#define WM8994_SPKMIXL_TO_SPKOUTR_WIDTH 1 /* SPKMIXL_TO_SPKOUTR */ +#define WM8994_SPKMIXR_TO_SPKOUTR 0x0001 /* SPKMIXR_TO_SPKOUTR */ +#define WM8994_SPKMIXR_TO_SPKOUTR_MASK 0x0001 /* SPKMIXR_TO_SPKOUTR */ +#define WM8994_SPKMIXR_TO_SPKOUTR_SHIFT 0 /* SPKMIXR_TO_SPKOUTR */ +#define WM8994_SPKMIXR_TO_SPKOUTR_WIDTH 1 /* SPKMIXR_TO_SPKOUTR */ + +/* + * R37 (0x25) - ClassD + */ +#define WM8994_SPKOUTL_BOOST_MASK 0x0038 /* SPKOUTL_BOOST - [5:3] */ +#define WM8994_SPKOUTL_BOOST_SHIFT 3 /* SPKOUTL_BOOST - [5:3] */ +#define WM8994_SPKOUTL_BOOST_WIDTH 3 /* SPKOUTL_BOOST - [5:3] */ +#define WM8994_SPKOUTR_BOOST_MASK 0x0007 /* SPKOUTR_BOOST - [2:0] */ +#define WM8994_SPKOUTR_BOOST_SHIFT 0 /* SPKOUTR_BOOST - [2:0] */ +#define WM8994_SPKOUTR_BOOST_WIDTH 3 /* SPKOUTR_BOOST - [2:0] */ + +/* + * R38 (0x26) - Speaker Volume Left + */ +#define WM8994_SPKOUT_VU 0x0100 /* SPKOUT_VU */ +#define WM8994_SPKOUT_VU_MASK 0x0100 /* SPKOUT_VU */ +#define WM8994_SPKOUT_VU_SHIFT 8 /* SPKOUT_VU */ +#define WM8994_SPKOUT_VU_WIDTH 1 /* SPKOUT_VU */ +#define WM8994_SPKOUTL_ZC 0x0080 /* SPKOUTL_ZC */ +#define WM8994_SPKOUTL_ZC_MASK 0x0080 /* SPKOUTL_ZC */ +#define WM8994_SPKOUTL_ZC_SHIFT 7 /* SPKOUTL_ZC */ +#define WM8994_SPKOUTL_ZC_WIDTH 1 /* SPKOUTL_ZC */ +#define WM8994_SPKOUTL_MUTE_N 0x0040 /* SPKOUTL_MUTE_N */ +#define WM8994_SPKOUTL_MUTE_N_MASK 0x0040 /* SPKOUTL_MUTE_N */ +#define WM8994_SPKOUTL_MUTE_N_SHIFT 6 /* SPKOUTL_MUTE_N */ +#define WM8994_SPKOUTL_MUTE_N_WIDTH 1 /* SPKOUTL_MUTE_N */ +#define WM8994_SPKOUTL_VOL_MASK 0x003F /* SPKOUTL_VOL - [5:0] */ +#define WM8994_SPKOUTL_VOL_SHIFT 0 /* SPKOUTL_VOL - [5:0] */ +#define WM8994_SPKOUTL_VOL_WIDTH 6 /* SPKOUTL_VOL - [5:0] */ + +/* + * R39 (0x27) - Speaker Volume Right + */ +#define WM8994_SPKOUT_VU 0x0100 /* SPKOUT_VU */ +#define WM8994_SPKOUT_VU_MASK 0x0100 /* SPKOUT_VU */ +#define WM8994_SPKOUT_VU_SHIFT 8 /* SPKOUT_VU */ +#define WM8994_SPKOUT_VU_WIDTH 1 /* SPKOUT_VU */ +#define WM8994_SPKOUTR_ZC 0x0080 /* SPKOUTR_ZC */ +#define WM8994_SPKOUTR_ZC_MASK 0x0080 /* SPKOUTR_ZC */ +#define WM8994_SPKOUTR_ZC_SHIFT 7 /* SPKOUTR_ZC */ +#define WM8994_SPKOUTR_ZC_WIDTH 1 /* SPKOUTR_ZC */ +#define WM8994_SPKOUTR_MUTE_N 0x0040 /* SPKOUTR_MUTE_N */ +#define WM8994_SPKOUTR_MUTE_N_MASK 0x0040 /* SPKOUTR_MUTE_N */ +#define WM8994_SPKOUTR_MUTE_N_SHIFT 6 /* SPKOUTR_MUTE_N */ +#define WM8994_SPKOUTR_MUTE_N_WIDTH 1 /* SPKOUTR_MUTE_N */ +#define WM8994_SPKOUTR_VOL_MASK 0x003F /* SPKOUTR_VOL - [5:0] */ +#define WM8994_SPKOUTR_VOL_SHIFT 0 /* SPKOUTR_VOL - [5:0] */ +#define WM8994_SPKOUTR_VOL_WIDTH 6 /* SPKOUTR_VOL - [5:0] */ + +/* + * R40 (0x28) - Input Mixer (2) + */ +#define WM8994_IN2LP_TO_IN2L 0x0080 /* IN2LP_TO_IN2L */ +#define WM8994_IN2LP_TO_IN2L_MASK 0x0080 /* IN2LP_TO_IN2L */ +#define WM8994_IN2LP_TO_IN2L_SHIFT 7 /* IN2LP_TO_IN2L */ +#define WM8994_IN2LP_TO_IN2L_WIDTH 1 /* IN2LP_TO_IN2L */ +#define WM8994_IN2LN_TO_IN2L 0x0040 /* IN2LN_TO_IN2L */ +#define WM8994_IN2LN_TO_IN2L_MASK 0x0040 /* IN2LN_TO_IN2L */ +#define WM8994_IN2LN_TO_IN2L_SHIFT 6 /* IN2LN_TO_IN2L */ +#define WM8994_IN2LN_TO_IN2L_WIDTH 1 /* IN2LN_TO_IN2L */ +#define WM8994_IN1LP_TO_IN1L 0x0020 /* IN1LP_TO_IN1L */ +#define WM8994_IN1LP_TO_IN1L_MASK 0x0020 /* IN1LP_TO_IN1L */ +#define WM8994_IN1LP_TO_IN1L_SHIFT 5 /* IN1LP_TO_IN1L */ +#define WM8994_IN1LP_TO_IN1L_WIDTH 1 /* IN1LP_TO_IN1L */ +#define WM8994_IN1LN_TO_IN1L 0x0010 /* IN1LN_TO_IN1L */ +#define WM8994_IN1LN_TO_IN1L_MASK 0x0010 /* IN1LN_TO_IN1L */ +#define WM8994_IN1LN_TO_IN1L_SHIFT 4 /* IN1LN_TO_IN1L */ +#define WM8994_IN1LN_TO_IN1L_WIDTH 1 /* IN1LN_TO_IN1L */ +#define WM8994_IN2RP_TO_IN2R 0x0008 /* IN2RP_TO_IN2R */ +#define WM8994_IN2RP_TO_IN2R_MASK 0x0008 /* IN2RP_TO_IN2R */ +#define WM8994_IN2RP_TO_IN2R_SHIFT 3 /* IN2RP_TO_IN2R */ +#define WM8994_IN2RP_TO_IN2R_WIDTH 1 /* IN2RP_TO_IN2R */ +#define WM8994_IN2RN_TO_IN2R 0x0004 /* IN2RN_TO_IN2R */ +#define WM8994_IN2RN_TO_IN2R_MASK 0x0004 /* IN2RN_TO_IN2R */ +#define WM8994_IN2RN_TO_IN2R_SHIFT 2 /* IN2RN_TO_IN2R */ +#define WM8994_IN2RN_TO_IN2R_WIDTH 1 /* IN2RN_TO_IN2R */ +#define WM8994_IN1RP_TO_IN1R 0x0002 /* IN1RP_TO_IN1R */ +#define WM8994_IN1RP_TO_IN1R_MASK 0x0002 /* IN1RP_TO_IN1R */ +#define WM8994_IN1RP_TO_IN1R_SHIFT 1 /* IN1RP_TO_IN1R */ +#define WM8994_IN1RP_TO_IN1R_WIDTH 1 /* IN1RP_TO_IN1R */ +#define WM8994_IN1RN_TO_IN1R 0x0001 /* IN1RN_TO_IN1R */ +#define WM8994_IN1RN_TO_IN1R_MASK 0x0001 /* IN1RN_TO_IN1R */ +#define WM8994_IN1RN_TO_IN1R_SHIFT 0 /* IN1RN_TO_IN1R */ +#define WM8994_IN1RN_TO_IN1R_WIDTH 1 /* IN1RN_TO_IN1R */ + +/* + * R41 (0x29) - Input Mixer (3) + */ +#define WM8994_IN2L_TO_MIXINL 0x0100 /* IN2L_TO_MIXINL */ +#define WM8994_IN2L_TO_MIXINL_MASK 0x0100 /* IN2L_TO_MIXINL */ +#define WM8994_IN2L_TO_MIXINL_SHIFT 8 /* IN2L_TO_MIXINL */ +#define WM8994_IN2L_TO_MIXINL_WIDTH 1 /* IN2L_TO_MIXINL */ +#define WM8994_IN2L_MIXINL_VOL 0x0080 /* IN2L_MIXINL_VOL */ +#define WM8994_IN2L_MIXINL_VOL_MASK 0x0080 /* IN2L_MIXINL_VOL */ +#define WM8994_IN2L_MIXINL_VOL_SHIFT 7 /* IN2L_MIXINL_VOL */ +#define WM8994_IN2L_MIXINL_VOL_WIDTH 1 /* IN2L_MIXINL_VOL */ +#define WM8994_IN1L_TO_MIXINL 0x0020 /* IN1L_TO_MIXINL */ +#define WM8994_IN1L_TO_MIXINL_MASK 0x0020 /* IN1L_TO_MIXINL */ +#define WM8994_IN1L_TO_MIXINL_SHIFT 5 /* IN1L_TO_MIXINL */ +#define WM8994_IN1L_TO_MIXINL_WIDTH 1 /* IN1L_TO_MIXINL */ +#define WM8994_IN1L_MIXINL_VOL 0x0010 /* IN1L_MIXINL_VOL */ +#define WM8994_IN1L_MIXINL_VOL_MASK 0x0010 /* IN1L_MIXINL_VOL */ +#define WM8994_IN1L_MIXINL_VOL_SHIFT 4 /* IN1L_MIXINL_VOL */ +#define WM8994_IN1L_MIXINL_VOL_WIDTH 1 /* IN1L_MIXINL_VOL */ +#define WM8994_MIXOUTL_MIXINL_VOL_MASK 0x0007 /* MIXOUTL_MIXINL_VOL - [2:0] */ +#define WM8994_MIXOUTL_MIXINL_VOL_SHIFT 0 /* MIXOUTL_MIXINL_VOL - [2:0] */ +#define WM8994_MIXOUTL_MIXINL_VOL_WIDTH 3 /* MIXOUTL_MIXINL_VOL - [2:0] */ + +/* + * R42 (0x2A) - Input Mixer (4) + */ +#define WM8994_IN2R_TO_MIXINR 0x0100 /* IN2R_TO_MIXINR */ +#define WM8994_IN2R_TO_MIXINR_MASK 0x0100 /* IN2R_TO_MIXINR */ +#define WM8994_IN2R_TO_MIXINR_SHIFT 8 /* IN2R_TO_MIXINR */ +#define WM8994_IN2R_TO_MIXINR_WIDTH 1 /* IN2R_TO_MIXINR */ +#define WM8994_IN2R_MIXINR_VOL 0x0080 /* IN2R_MIXINR_VOL */ +#define WM8994_IN2R_MIXINR_VOL_MASK 0x0080 /* IN2R_MIXINR_VOL */ +#define WM8994_IN2R_MIXINR_VOL_SHIFT 7 /* IN2R_MIXINR_VOL */ +#define WM8994_IN2R_MIXINR_VOL_WIDTH 1 /* IN2R_MIXINR_VOL */ +#define WM8994_IN1R_TO_MIXINR 0x0020 /* IN1R_TO_MIXINR */ +#define WM8994_IN1R_TO_MIXINR_MASK 0x0020 /* IN1R_TO_MIXINR */ +#define WM8994_IN1R_TO_MIXINR_SHIFT 5 /* IN1R_TO_MIXINR */ +#define WM8994_IN1R_TO_MIXINR_WIDTH 1 /* IN1R_TO_MIXINR */ +#define WM8994_IN1R_MIXINR_VOL 0x0010 /* IN1R_MIXINR_VOL */ +#define WM8994_IN1R_MIXINR_VOL_MASK 0x0010 /* IN1R_MIXINR_VOL */ +#define WM8994_IN1R_MIXINR_VOL_SHIFT 4 /* IN1R_MIXINR_VOL */ +#define WM8994_IN1R_MIXINR_VOL_WIDTH 1 /* IN1R_MIXINR_VOL */ +#define WM8994_MIXOUTR_MIXINR_VOL_MASK 0x0007 /* MIXOUTR_MIXINR_VOL - [2:0] */ +#define WM8994_MIXOUTR_MIXINR_VOL_SHIFT 0 /* MIXOUTR_MIXINR_VOL - [2:0] */ +#define WM8994_MIXOUTR_MIXINR_VOL_WIDTH 3 /* MIXOUTR_MIXINR_VOL - [2:0] */ + +/* + * R43 (0x2B) - Input Mixer (5) + */ +#define WM8994_IN1LP_MIXINL_VOL_MASK 0x01C0 /* IN1LP_MIXINL_VOL - [8:6] */ +#define WM8994_IN1LP_MIXINL_VOL_SHIFT 6 /* IN1LP_MIXINL_VOL - [8:6] */ +#define WM8994_IN1LP_MIXINL_VOL_WIDTH 3 /* IN1LP_MIXINL_VOL - [8:6] */ +#define WM8994_IN2LRP_MIXINL_VOL_MASK 0x0007 /* IN2LRP_MIXINL_VOL - [2:0] */ +#define WM8994_IN2LRP_MIXINL_VOL_SHIFT 0 /* IN2LRP_MIXINL_VOL - [2:0] */ +#define WM8994_IN2LRP_MIXINL_VOL_WIDTH 3 /* IN2LRP_MIXINL_VOL - [2:0] */ + +/* + * R44 (0x2C) - Input Mixer (6) + */ +#define WM8994_IN1RP_MIXINR_VOL_MASK 0x01C0 /* IN1RP_MIXINR_VOL - [8:6] */ +#define WM8994_IN1RP_MIXINR_VOL_SHIFT 6 /* IN1RP_MIXINR_VOL - [8:6] */ +#define WM8994_IN1RP_MIXINR_VOL_WIDTH 3 /* IN1RP_MIXINR_VOL - [8:6] */ +#define WM8994_IN2LRP_MIXINR_VOL_MASK 0x0007 /* IN2LRP_MIXINR_VOL - [2:0] */ +#define WM8994_IN2LRP_MIXINR_VOL_SHIFT 0 /* IN2LRP_MIXINR_VOL - [2:0] */ +#define WM8994_IN2LRP_MIXINR_VOL_WIDTH 3 /* IN2LRP_MIXINR_VOL - [2:0] */ + +/* + * R45 (0x2D) - Output Mixer (1) + */ +#define WM8994_DAC1L_TO_HPOUT1L 0x0100 /* DAC1L_TO_HPOUT1L */ +#define WM8994_DAC1L_TO_HPOUT1L_MASK 0x0100 /* DAC1L_TO_HPOUT1L */ +#define WM8994_DAC1L_TO_HPOUT1L_SHIFT 8 /* DAC1L_TO_HPOUT1L */ +#define WM8994_DAC1L_TO_HPOUT1L_WIDTH 1 /* DAC1L_TO_HPOUT1L */ +#define WM8994_MIXINR_TO_MIXOUTL 0x0080 /* MIXINR_TO_MIXOUTL */ +#define WM8994_MIXINR_TO_MIXOUTL_MASK 0x0080 /* MIXINR_TO_MIXOUTL */ +#define WM8994_MIXINR_TO_MIXOUTL_SHIFT 7 /* MIXINR_TO_MIXOUTL */ +#define WM8994_MIXINR_TO_MIXOUTL_WIDTH 1 /* MIXINR_TO_MIXOUTL */ +#define WM8994_MIXINL_TO_MIXOUTL 0x0040 /* MIXINL_TO_MIXOUTL */ +#define WM8994_MIXINL_TO_MIXOUTL_MASK 0x0040 /* MIXINL_TO_MIXOUTL */ +#define WM8994_MIXINL_TO_MIXOUTL_SHIFT 6 /* MIXINL_TO_MIXOUTL */ +#define WM8994_MIXINL_TO_MIXOUTL_WIDTH 1 /* MIXINL_TO_MIXOUTL */ +#define WM8994_IN2RN_TO_MIXOUTL 0x0020 /* IN2RN_TO_MIXOUTL */ +#define WM8994_IN2RN_TO_MIXOUTL_MASK 0x0020 /* IN2RN_TO_MIXOUTL */ +#define WM8994_IN2RN_TO_MIXOUTL_SHIFT 5 /* IN2RN_TO_MIXOUTL */ +#define WM8994_IN2RN_TO_MIXOUTL_WIDTH 1 /* IN2RN_TO_MIXOUTL */ +#define WM8994_IN2LN_TO_MIXOUTL 0x0010 /* IN2LN_TO_MIXOUTL */ +#define WM8994_IN2LN_TO_MIXOUTL_MASK 0x0010 /* IN2LN_TO_MIXOUTL */ +#define WM8994_IN2LN_TO_MIXOUTL_SHIFT 4 /* IN2LN_TO_MIXOUTL */ +#define WM8994_IN2LN_TO_MIXOUTL_WIDTH 1 /* IN2LN_TO_MIXOUTL */ +#define WM8994_IN1R_TO_MIXOUTL 0x0008 /* IN1R_TO_MIXOUTL */ +#define WM8994_IN1R_TO_MIXOUTL_MASK 0x0008 /* IN1R_TO_MIXOUTL */ +#define WM8994_IN1R_TO_MIXOUTL_SHIFT 3 /* IN1R_TO_MIXOUTL */ +#define WM8994_IN1R_TO_MIXOUTL_WIDTH 1 /* IN1R_TO_MIXOUTL */ +#define WM8994_IN1L_TO_MIXOUTL 0x0004 /* IN1L_TO_MIXOUTL */ +#define WM8994_IN1L_TO_MIXOUTL_MASK 0x0004 /* IN1L_TO_MIXOUTL */ +#define WM8994_IN1L_TO_MIXOUTL_SHIFT 2 /* IN1L_TO_MIXOUTL */ +#define WM8994_IN1L_TO_MIXOUTL_WIDTH 1 /* IN1L_TO_MIXOUTL */ +#define WM8994_IN2LP_TO_MIXOUTL 0x0002 /* IN2LP_TO_MIXOUTL */ +#define WM8994_IN2LP_TO_MIXOUTL_MASK 0x0002 /* IN2LP_TO_MIXOUTL */ +#define WM8994_IN2LP_TO_MIXOUTL_SHIFT 1 /* IN2LP_TO_MIXOUTL */ +#define WM8994_IN2LP_TO_MIXOUTL_WIDTH 1 /* IN2LP_TO_MIXOUTL */ +#define WM8994_DAC1L_TO_MIXOUTL 0x0001 /* DAC1L_TO_MIXOUTL */ +#define WM8994_DAC1L_TO_MIXOUTL_MASK 0x0001 /* DAC1L_TO_MIXOUTL */ +#define WM8994_DAC1L_TO_MIXOUTL_SHIFT 0 /* DAC1L_TO_MIXOUTL */ +#define WM8994_DAC1L_TO_MIXOUTL_WIDTH 1 /* DAC1L_TO_MIXOUTL */ + +/* + * R46 (0x2E) - Output Mixer (2) + */ +#define WM8994_DAC1R_TO_HPOUT1R 0x0100 /* DAC1R_TO_HPOUT1R */ +#define WM8994_DAC1R_TO_HPOUT1R_MASK 0x0100 /* DAC1R_TO_HPOUT1R */ +#define WM8994_DAC1R_TO_HPOUT1R_SHIFT 8 /* DAC1R_TO_HPOUT1R */ +#define WM8994_DAC1R_TO_HPOUT1R_WIDTH 1 /* DAC1R_TO_HPOUT1R */ +#define WM8994_MIXINL_TO_MIXOUTR 0x0080 /* MIXINL_TO_MIXOUTR */ +#define WM8994_MIXINL_TO_MIXOUTR_MASK 0x0080 /* MIXINL_TO_MIXOUTR */ +#define WM8994_MIXINL_TO_MIXOUTR_SHIFT 7 /* MIXINL_TO_MIXOUTR */ +#define WM8994_MIXINL_TO_MIXOUTR_WIDTH 1 /* MIXINL_TO_MIXOUTR */ +#define WM8994_MIXINR_TO_MIXOUTR 0x0040 /* MIXINR_TO_MIXOUTR */ +#define WM8994_MIXINR_TO_MIXOUTR_MASK 0x0040 /* MIXINR_TO_MIXOUTR */ +#define WM8994_MIXINR_TO_MIXOUTR_SHIFT 6 /* MIXINR_TO_MIXOUTR */ +#define WM8994_MIXINR_TO_MIXOUTR_WIDTH 1 /* MIXINR_TO_MIXOUTR */ +#define WM8994_IN2LN_TO_MIXOUTR 0x0020 /* IN2LN_TO_MIXOUTR */ +#define WM8994_IN2LN_TO_MIXOUTR_MASK 0x0020 /* IN2LN_TO_MIXOUTR */ +#define WM8994_IN2LN_TO_MIXOUTR_SHIFT 5 /* IN2LN_TO_MIXOUTR */ +#define WM8994_IN2LN_TO_MIXOUTR_WIDTH 1 /* IN2LN_TO_MIXOUTR */ +#define WM8994_IN2RN_TO_MIXOUTR 0x0010 /* IN2RN_TO_MIXOUTR */ +#define WM8994_IN2RN_TO_MIXOUTR_MASK 0x0010 /* IN2RN_TO_MIXOUTR */ +#define WM8994_IN2RN_TO_MIXOUTR_SHIFT 4 /* IN2RN_TO_MIXOUTR */ +#define WM8994_IN2RN_TO_MIXOUTR_WIDTH 1 /* IN2RN_TO_MIXOUTR */ +#define WM8994_IN1L_TO_MIXOUTR 0x0008 /* IN1L_TO_MIXOUTR */ +#define WM8994_IN1L_TO_MIXOUTR_MASK 0x0008 /* IN1L_TO_MIXOUTR */ +#define WM8994_IN1L_TO_MIXOUTR_SHIFT 3 /* IN1L_TO_MIXOUTR */ +#define WM8994_IN1L_TO_MIXOUTR_WIDTH 1 /* IN1L_TO_MIXOUTR */ +#define WM8994_IN1R_TO_MIXOUTR 0x0004 /* IN1R_TO_MIXOUTR */ +#define WM8994_IN1R_TO_MIXOUTR_MASK 0x0004 /* IN1R_TO_MIXOUTR */ +#define WM8994_IN1R_TO_MIXOUTR_SHIFT 2 /* IN1R_TO_MIXOUTR */ +#define WM8994_IN1R_TO_MIXOUTR_WIDTH 1 /* IN1R_TO_MIXOUTR */ +#define WM8994_IN2RP_TO_MIXOUTR 0x0002 /* IN2RP_TO_MIXOUTR */ +#define WM8994_IN2RP_TO_MIXOUTR_MASK 0x0002 /* IN2RP_TO_MIXOUTR */ +#define WM8994_IN2RP_TO_MIXOUTR_SHIFT 1 /* IN2RP_TO_MIXOUTR */ +#define WM8994_IN2RP_TO_MIXOUTR_WIDTH 1 /* IN2RP_TO_MIXOUTR */ +#define WM8994_DAC1R_TO_MIXOUTR 0x0001 /* DAC1R_TO_MIXOUTR */ +#define WM8994_DAC1R_TO_MIXOUTR_MASK 0x0001 /* DAC1R_TO_MIXOUTR */ +#define WM8994_DAC1R_TO_MIXOUTR_SHIFT 0 /* DAC1R_TO_MIXOUTR */ +#define WM8994_DAC1R_TO_MIXOUTR_WIDTH 1 /* DAC1R_TO_MIXOUTR */ + +/* + * R47 (0x2F) - Output Mixer (3) + */ +#define WM8994_IN2LP_MIXOUTL_VOL_MASK 0x0E00 /* IN2LP_MIXOUTL_VOL - [11:9] */ +#define WM8994_IN2LP_MIXOUTL_VOL_SHIFT 9 /* IN2LP_MIXOUTL_VOL - [11:9] */ +#define WM8994_IN2LP_MIXOUTL_VOL_WIDTH 3 /* IN2LP_MIXOUTL_VOL - [11:9] */ +#define WM8994_IN2LN_MIXOUTL_VOL_MASK 0x01C0 /* IN2LN_MIXOUTL_VOL - [8:6] */ +#define WM8994_IN2LN_MIXOUTL_VOL_SHIFT 6 /* IN2LN_MIXOUTL_VOL - [8:6] */ +#define WM8994_IN2LN_MIXOUTL_VOL_WIDTH 3 /* IN2LN_MIXOUTL_VOL - [8:6] */ +#define WM8994_IN1R_MIXOUTL_VOL_MASK 0x0038 /* IN1R_MIXOUTL_VOL - [5:3] */ +#define WM8994_IN1R_MIXOUTL_VOL_SHIFT 3 /* IN1R_MIXOUTL_VOL - [5:3] */ +#define WM8994_IN1R_MIXOUTL_VOL_WIDTH 3 /* IN1R_MIXOUTL_VOL - [5:3] */ +#define WM8994_IN1L_MIXOUTL_VOL_MASK 0x0007 /* IN1L_MIXOUTL_VOL - [2:0] */ +#define WM8994_IN1L_MIXOUTL_VOL_SHIFT 0 /* IN1L_MIXOUTL_VOL - [2:0] */ +#define WM8994_IN1L_MIXOUTL_VOL_WIDTH 3 /* IN1L_MIXOUTL_VOL - [2:0] */ + +/* + * R48 (0x30) - Output Mixer (4) + */ +#define WM8994_IN2RP_MIXOUTR_VOL_MASK 0x0E00 /* IN2RP_MIXOUTR_VOL - [11:9] */ +#define WM8994_IN2RP_MIXOUTR_VOL_SHIFT 9 /* IN2RP_MIXOUTR_VOL - [11:9] */ +#define WM8994_IN2RP_MIXOUTR_VOL_WIDTH 3 /* IN2RP_MIXOUTR_VOL - [11:9] */ +#define WM8994_IN2RN_MIXOUTR_VOL_MASK 0x01C0 /* IN2RN_MIXOUTR_VOL - [8:6] */ +#define WM8994_IN2RN_MIXOUTR_VOL_SHIFT 6 /* IN2RN_MIXOUTR_VOL - [8:6] */ +#define WM8994_IN2RN_MIXOUTR_VOL_WIDTH 3 /* IN2RN_MIXOUTR_VOL - [8:6] */ +#define WM8994_IN1L_MIXOUTR_VOL_MASK 0x0038 /* IN1L_MIXOUTR_VOL - [5:3] */ +#define WM8994_IN1L_MIXOUTR_VOL_SHIFT 3 /* IN1L_MIXOUTR_VOL - [5:3] */ +#define WM8994_IN1L_MIXOUTR_VOL_WIDTH 3 /* IN1L_MIXOUTR_VOL - [5:3] */ +#define WM8994_IN1R_MIXOUTR_VOL_MASK 0x0007 /* IN1R_MIXOUTR_VOL - [2:0] */ +#define WM8994_IN1R_MIXOUTR_VOL_SHIFT 0 /* IN1R_MIXOUTR_VOL - [2:0] */ +#define WM8994_IN1R_MIXOUTR_VOL_WIDTH 3 /* IN1R_MIXOUTR_VOL - [2:0] */ + +/* + * R49 (0x31) - Output Mixer (5) + */ +#define WM8994_DAC1L_MIXOUTL_VOL_MASK 0x0E00 /* DAC1L_MIXOUTL_VOL - [11:9] */ +#define WM8994_DAC1L_MIXOUTL_VOL_SHIFT 9 /* DAC1L_MIXOUTL_VOL - [11:9] */ +#define WM8994_DAC1L_MIXOUTL_VOL_WIDTH 3 /* DAC1L_MIXOUTL_VOL - [11:9] */ +#define WM8994_IN2RN_MIXOUTL_VOL_MASK 0x01C0 /* IN2RN_MIXOUTL_VOL - [8:6] */ +#define WM8994_IN2RN_MIXOUTL_VOL_SHIFT 6 /* IN2RN_MIXOUTL_VOL - [8:6] */ +#define WM8994_IN2RN_MIXOUTL_VOL_WIDTH 3 /* IN2RN_MIXOUTL_VOL - [8:6] */ +#define WM8994_MIXINR_MIXOUTL_VOL_MASK 0x0038 /* MIXINR_MIXOUTL_VOL - [5:3] */ +#define WM8994_MIXINR_MIXOUTL_VOL_SHIFT 3 /* MIXINR_MIXOUTL_VOL - [5:3] */ +#define WM8994_MIXINR_MIXOUTL_VOL_WIDTH 3 /* MIXINR_MIXOUTL_VOL - [5:3] */ +#define WM8994_MIXINL_MIXOUTL_VOL_MASK 0x0007 /* MIXINL_MIXOUTL_VOL - [2:0] */ +#define WM8994_MIXINL_MIXOUTL_VOL_SHIFT 0 /* MIXINL_MIXOUTL_VOL - [2:0] */ +#define WM8994_MIXINL_MIXOUTL_VOL_WIDTH 3 /* MIXINL_MIXOUTL_VOL - [2:0] */ + +/* + * R50 (0x32) - Output Mixer (6) + */ +#define WM8994_DAC1R_MIXOUTR_VOL_MASK 0x0E00 /* DAC1R_MIXOUTR_VOL - [11:9] */ +#define WM8994_DAC1R_MIXOUTR_VOL_SHIFT 9 /* DAC1R_MIXOUTR_VOL - [11:9] */ +#define WM8994_DAC1R_MIXOUTR_VOL_WIDTH 3 /* DAC1R_MIXOUTR_VOL - [11:9] */ +#define WM8994_IN2LN_MIXOUTR_VOL_MASK 0x01C0 /* IN2LN_MIXOUTR_VOL - [8:6] */ +#define WM8994_IN2LN_MIXOUTR_VOL_SHIFT 6 /* IN2LN_MIXOUTR_VOL - [8:6] */ +#define WM8994_IN2LN_MIXOUTR_VOL_WIDTH 3 /* IN2LN_MIXOUTR_VOL - [8:6] */ +#define WM8994_MIXINL_MIXOUTR_VOL_MASK 0x0038 /* MIXINL_MIXOUTR_VOL - [5:3] */ +#define WM8994_MIXINL_MIXOUTR_VOL_SHIFT 3 /* MIXINL_MIXOUTR_VOL - [5:3] */ +#define WM8994_MIXINL_MIXOUTR_VOL_WIDTH 3 /* MIXINL_MIXOUTR_VOL - [5:3] */ +#define WM8994_MIXINR_MIXOUTR_VOL_MASK 0x0007 /* MIXINR_MIXOUTR_VOL - [2:0] */ +#define WM8994_MIXINR_MIXOUTR_VOL_SHIFT 0 /* MIXINR_MIXOUTR_VOL - [2:0] */ +#define WM8994_MIXINR_MIXOUTR_VOL_WIDTH 3 /* MIXINR_MIXOUTR_VOL - [2:0] */ + +/* + * R51 (0x33) - HPOUT2 Mixer + */ +#define WM8994_IN2LRP_TO_HPOUT2 0x0020 /* IN2LRP_TO_HPOUT2 */ +#define WM8994_IN2LRP_TO_HPOUT2_MASK 0x0020 /* IN2LRP_TO_HPOUT2 */ +#define WM8994_IN2LRP_TO_HPOUT2_SHIFT 5 /* IN2LRP_TO_HPOUT2 */ +#define WM8994_IN2LRP_TO_HPOUT2_WIDTH 1 /* IN2LRP_TO_HPOUT2 */ +#define WM8994_MIXOUTLVOL_TO_HPOUT2 0x0010 /* MIXOUTLVOL_TO_HPOUT2 */ +#define WM8994_MIXOUTLVOL_TO_HPOUT2_MASK 0x0010 /* MIXOUTLVOL_TO_HPOUT2 */ +#define WM8994_MIXOUTLVOL_TO_HPOUT2_SHIFT 4 /* MIXOUTLVOL_TO_HPOUT2 */ +#define WM8994_MIXOUTLVOL_TO_HPOUT2_WIDTH 1 /* MIXOUTLVOL_TO_HPOUT2 */ +#define WM8994_MIXOUTRVOL_TO_HPOUT2 0x0008 /* MIXOUTRVOL_TO_HPOUT2 */ +#define WM8994_MIXOUTRVOL_TO_HPOUT2_MASK 0x0008 /* MIXOUTRVOL_TO_HPOUT2 */ +#define WM8994_MIXOUTRVOL_TO_HPOUT2_SHIFT 3 /* MIXOUTRVOL_TO_HPOUT2 */ +#define WM8994_MIXOUTRVOL_TO_HPOUT2_WIDTH 1 /* MIXOUTRVOL_TO_HPOUT2 */ + +/* + * R52 (0x34) - Line Mixer (1) + */ +#define WM8994_MIXOUTL_TO_LINEOUT1N 0x0040 /* MIXOUTL_TO_LINEOUT1N */ +#define WM8994_MIXOUTL_TO_LINEOUT1N_MASK 0x0040 /* MIXOUTL_TO_LINEOUT1N */ +#define WM8994_MIXOUTL_TO_LINEOUT1N_SHIFT 6 /* MIXOUTL_TO_LINEOUT1N */ +#define WM8994_MIXOUTL_TO_LINEOUT1N_WIDTH 1 /* MIXOUTL_TO_LINEOUT1N */ +#define WM8994_MIXOUTR_TO_LINEOUT1N 0x0020 /* MIXOUTR_TO_LINEOUT1N */ +#define WM8994_MIXOUTR_TO_LINEOUT1N_MASK 0x0020 /* MIXOUTR_TO_LINEOUT1N */ +#define WM8994_MIXOUTR_TO_LINEOUT1N_SHIFT 5 /* MIXOUTR_TO_LINEOUT1N */ +#define WM8994_MIXOUTR_TO_LINEOUT1N_WIDTH 1 /* MIXOUTR_TO_LINEOUT1N */ +#define WM8994_LINEOUT1_MODE 0x0010 /* LINEOUT1_MODE */ +#define WM8994_LINEOUT1_MODE_MASK 0x0010 /* LINEOUT1_MODE */ +#define WM8994_LINEOUT1_MODE_SHIFT 4 /* LINEOUT1_MODE */ +#define WM8994_LINEOUT1_MODE_WIDTH 1 /* LINEOUT1_MODE */ +#define WM8994_IN1R_TO_LINEOUT1P 0x0004 /* IN1R_TO_LINEOUT1P */ +#define WM8994_IN1R_TO_LINEOUT1P_MASK 0x0004 /* IN1R_TO_LINEOUT1P */ +#define WM8994_IN1R_TO_LINEOUT1P_SHIFT 2 /* IN1R_TO_LINEOUT1P */ +#define WM8994_IN1R_TO_LINEOUT1P_WIDTH 1 /* IN1R_TO_LINEOUT1P */ +#define WM8994_IN1L_TO_LINEOUT1P 0x0002 /* IN1L_TO_LINEOUT1P */ +#define WM8994_IN1L_TO_LINEOUT1P_MASK 0x0002 /* IN1L_TO_LINEOUT1P */ +#define WM8994_IN1L_TO_LINEOUT1P_SHIFT 1 /* IN1L_TO_LINEOUT1P */ +#define WM8994_IN1L_TO_LINEOUT1P_WIDTH 1 /* IN1L_TO_LINEOUT1P */ +#define WM8994_MIXOUTL_TO_LINEOUT1P 0x0001 /* MIXOUTL_TO_LINEOUT1P */ +#define WM8994_MIXOUTL_TO_LINEOUT1P_MASK 0x0001 /* MIXOUTL_TO_LINEOUT1P */ +#define WM8994_MIXOUTL_TO_LINEOUT1P_SHIFT 0 /* MIXOUTL_TO_LINEOUT1P */ +#define WM8994_MIXOUTL_TO_LINEOUT1P_WIDTH 1 /* MIXOUTL_TO_LINEOUT1P */ + +/* + * R53 (0x35) - Line Mixer (2) + */ +#define WM8994_MIXOUTR_TO_LINEOUT2N 0x0040 /* MIXOUTR_TO_LINEOUT2N */ +#define WM8994_MIXOUTR_TO_LINEOUT2N_MASK 0x0040 /* MIXOUTR_TO_LINEOUT2N */ +#define WM8994_MIXOUTR_TO_LINEOUT2N_SHIFT 6 /* MIXOUTR_TO_LINEOUT2N */ +#define WM8994_MIXOUTR_TO_LINEOUT2N_WIDTH 1 /* MIXOUTR_TO_LINEOUT2N */ +#define WM8994_MIXOUTL_TO_LINEOUT2N 0x0020 /* MIXOUTL_TO_LINEOUT2N */ +#define WM8994_MIXOUTL_TO_LINEOUT2N_MASK 0x0020 /* MIXOUTL_TO_LINEOUT2N */ +#define WM8994_MIXOUTL_TO_LINEOUT2N_SHIFT 5 /* MIXOUTL_TO_LINEOUT2N */ +#define WM8994_MIXOUTL_TO_LINEOUT2N_WIDTH 1 /* MIXOUTL_TO_LINEOUT2N */ +#define WM8994_LINEOUT2_MODE 0x0010 /* LINEOUT2_MODE */ +#define WM8994_LINEOUT2_MODE_MASK 0x0010 /* LINEOUT2_MODE */ +#define WM8994_LINEOUT2_MODE_SHIFT 4 /* LINEOUT2_MODE */ +#define WM8994_LINEOUT2_MODE_WIDTH 1 /* LINEOUT2_MODE */ +#define WM8994_IN1L_TO_LINEOUT2P 0x0004 /* IN1L_TO_LINEOUT2P */ +#define WM8994_IN1L_TO_LINEOUT2P_MASK 0x0004 /* IN1L_TO_LINEOUT2P */ +#define WM8994_IN1L_TO_LINEOUT2P_SHIFT 2 /* IN1L_TO_LINEOUT2P */ +#define WM8994_IN1L_TO_LINEOUT2P_WIDTH 1 /* IN1L_TO_LINEOUT2P */ +#define WM8994_IN1R_TO_LINEOUT2P 0x0002 /* IN1R_TO_LINEOUT2P */ +#define WM8994_IN1R_TO_LINEOUT2P_MASK 0x0002 /* IN1R_TO_LINEOUT2P */ +#define WM8994_IN1R_TO_LINEOUT2P_SHIFT 1 /* IN1R_TO_LINEOUT2P */ +#define WM8994_IN1R_TO_LINEOUT2P_WIDTH 1 /* IN1R_TO_LINEOUT2P */ +#define WM8994_MIXOUTR_TO_LINEOUT2P 0x0001 /* MIXOUTR_TO_LINEOUT2P */ +#define WM8994_MIXOUTR_TO_LINEOUT2P_MASK 0x0001 /* MIXOUTR_TO_LINEOUT2P */ +#define WM8994_MIXOUTR_TO_LINEOUT2P_SHIFT 0 /* MIXOUTR_TO_LINEOUT2P */ +#define WM8994_MIXOUTR_TO_LINEOUT2P_WIDTH 1 /* MIXOUTR_TO_LINEOUT2P */ + +/* + * R54 (0x36) - Speaker Mixer + */ +#define WM8994_DAC2L_TO_SPKMIXL 0x0200 /* DAC2L_TO_SPKMIXL */ +#define WM8994_DAC2L_TO_SPKMIXL_MASK 0x0200 /* DAC2L_TO_SPKMIXL */ +#define WM8994_DAC2L_TO_SPKMIXL_SHIFT 9 /* DAC2L_TO_SPKMIXL */ +#define WM8994_DAC2L_TO_SPKMIXL_WIDTH 1 /* DAC2L_TO_SPKMIXL */ +#define WM8994_DAC2R_TO_SPKMIXR 0x0100 /* DAC2R_TO_SPKMIXR */ +#define WM8994_DAC2R_TO_SPKMIXR_MASK 0x0100 /* DAC2R_TO_SPKMIXR */ +#define WM8994_DAC2R_TO_SPKMIXR_SHIFT 8 /* DAC2R_TO_SPKMIXR */ +#define WM8994_DAC2R_TO_SPKMIXR_WIDTH 1 /* DAC2R_TO_SPKMIXR */ +#define WM8994_MIXINL_TO_SPKMIXL 0x0080 /* MIXINL_TO_SPKMIXL */ +#define WM8994_MIXINL_TO_SPKMIXL_MASK 0x0080 /* MIXINL_TO_SPKMIXL */ +#define WM8994_MIXINL_TO_SPKMIXL_SHIFT 7 /* MIXINL_TO_SPKMIXL */ +#define WM8994_MIXINL_TO_SPKMIXL_WIDTH 1 /* MIXINL_TO_SPKMIXL */ +#define WM8994_MIXINR_TO_SPKMIXR 0x0040 /* MIXINR_TO_SPKMIXR */ +#define WM8994_MIXINR_TO_SPKMIXR_MASK 0x0040 /* MIXINR_TO_SPKMIXR */ +#define WM8994_MIXINR_TO_SPKMIXR_SHIFT 6 /* MIXINR_TO_SPKMIXR */ +#define WM8994_MIXINR_TO_SPKMIXR_WIDTH 1 /* MIXINR_TO_SPKMIXR */ +#define WM8994_IN1LP_TO_SPKMIXL 0x0020 /* IN1LP_TO_SPKMIXL */ +#define WM8994_IN1LP_TO_SPKMIXL_MASK 0x0020 /* IN1LP_TO_SPKMIXL */ +#define WM8994_IN1LP_TO_SPKMIXL_SHIFT 5 /* IN1LP_TO_SPKMIXL */ +#define WM8994_IN1LP_TO_SPKMIXL_WIDTH 1 /* IN1LP_TO_SPKMIXL */ +#define WM8994_IN1RP_TO_SPKMIXR 0x0010 /* IN1RP_TO_SPKMIXR */ +#define WM8994_IN1RP_TO_SPKMIXR_MASK 0x0010 /* IN1RP_TO_SPKMIXR */ +#define WM8994_IN1RP_TO_SPKMIXR_SHIFT 4 /* IN1RP_TO_SPKMIXR */ +#define WM8994_IN1RP_TO_SPKMIXR_WIDTH 1 /* IN1RP_TO_SPKMIXR */ +#define WM8994_MIXOUTL_TO_SPKMIXL 0x0008 /* MIXOUTL_TO_SPKMIXL */ +#define WM8994_MIXOUTL_TO_SPKMIXL_MASK 0x0008 /* MIXOUTL_TO_SPKMIXL */ +#define WM8994_MIXOUTL_TO_SPKMIXL_SHIFT 3 /* MIXOUTL_TO_SPKMIXL */ +#define WM8994_MIXOUTL_TO_SPKMIXL_WIDTH 1 /* MIXOUTL_TO_SPKMIXL */ +#define WM8994_MIXOUTR_TO_SPKMIXR 0x0004 /* MIXOUTR_TO_SPKMIXR */ +#define WM8994_MIXOUTR_TO_SPKMIXR_MASK 0x0004 /* MIXOUTR_TO_SPKMIXR */ +#define WM8994_MIXOUTR_TO_SPKMIXR_SHIFT 2 /* MIXOUTR_TO_SPKMIXR */ +#define WM8994_MIXOUTR_TO_SPKMIXR_WIDTH 1 /* MIXOUTR_TO_SPKMIXR */ +#define WM8994_DAC1L_TO_SPKMIXL 0x0002 /* DAC1L_TO_SPKMIXL */ +#define WM8994_DAC1L_TO_SPKMIXL_MASK 0x0002 /* DAC1L_TO_SPKMIXL */ +#define WM8994_DAC1L_TO_SPKMIXL_SHIFT 1 /* DAC1L_TO_SPKMIXL */ +#define WM8994_DAC1L_TO_SPKMIXL_WIDTH 1 /* DAC1L_TO_SPKMIXL */ +#define WM8994_DAC1R_TO_SPKMIXR 0x0001 /* DAC1R_TO_SPKMIXR */ +#define WM8994_DAC1R_TO_SPKMIXR_MASK 0x0001 /* DAC1R_TO_SPKMIXR */ +#define WM8994_DAC1R_TO_SPKMIXR_SHIFT 0 /* DAC1R_TO_SPKMIXR */ +#define WM8994_DAC1R_TO_SPKMIXR_WIDTH 1 /* DAC1R_TO_SPKMIXR */ + +/* + * R55 (0x37) - Additional Control + */ +#define WM8994_LINEOUT1_FB 0x0080 /* LINEOUT1_FB */ +#define WM8994_LINEOUT1_FB_MASK 0x0080 /* LINEOUT1_FB */ +#define WM8994_LINEOUT1_FB_SHIFT 7 /* LINEOUT1_FB */ +#define WM8994_LINEOUT1_FB_WIDTH 1 /* LINEOUT1_FB */ +#define WM8994_LINEOUT2_FB 0x0040 /* LINEOUT2_FB */ +#define WM8994_LINEOUT2_FB_MASK 0x0040 /* LINEOUT2_FB */ +#define WM8994_LINEOUT2_FB_SHIFT 6 /* LINEOUT2_FB */ +#define WM8994_LINEOUT2_FB_WIDTH 1 /* LINEOUT2_FB */ +#define WM8994_VROI 0x0001 /* VROI */ +#define WM8994_VROI_MASK 0x0001 /* VROI */ +#define WM8994_VROI_SHIFT 0 /* VROI */ +#define WM8994_VROI_WIDTH 1 /* VROI */ + +/* + * R56 (0x38) - AntiPOP (1) + */ +#define WM8994_LINEOUT_VMID_BUF_ENA 0x0080 /* LINEOUT_VMID_BUF_ENA */ +#define WM8994_LINEOUT_VMID_BUF_ENA_MASK 0x0080 /* LINEOUT_VMID_BUF_ENA */ +#define WM8994_LINEOUT_VMID_BUF_ENA_SHIFT 7 /* LINEOUT_VMID_BUF_ENA */ +#define WM8994_LINEOUT_VMID_BUF_ENA_WIDTH 1 /* LINEOUT_VMID_BUF_ENA */ +#define WM8994_HPOUT2_IN_ENA 0x0040 /* HPOUT2_IN_ENA */ +#define WM8994_HPOUT2_IN_ENA_MASK 0x0040 /* HPOUT2_IN_ENA */ +#define WM8994_HPOUT2_IN_ENA_SHIFT 6 /* HPOUT2_IN_ENA */ +#define WM8994_HPOUT2_IN_ENA_WIDTH 1 /* HPOUT2_IN_ENA */ +#define WM8994_LINEOUT1_DISCH 0x0020 /* LINEOUT1_DISCH */ +#define WM8994_LINEOUT1_DISCH_MASK 0x0020 /* LINEOUT1_DISCH */ +#define WM8994_LINEOUT1_DISCH_SHIFT 5 /* LINEOUT1_DISCH */ +#define WM8994_LINEOUT1_DISCH_WIDTH 1 /* LINEOUT1_DISCH */ +#define WM8994_LINEOUT2_DISCH 0x0010 /* LINEOUT2_DISCH */ +#define WM8994_LINEOUT2_DISCH_MASK 0x0010 /* LINEOUT2_DISCH */ +#define WM8994_LINEOUT2_DISCH_SHIFT 4 /* LINEOUT2_DISCH */ +#define WM8994_LINEOUT2_DISCH_WIDTH 1 /* LINEOUT2_DISCH */ + +/* + * R57 (0x39) - AntiPOP (2) + */ +#define WM1811_JACKDET_MODE_MASK 0x0180 /* JACKDET_MODE - [8:7] */ +#define WM1811_JACKDET_MODE_SHIFT 7 /* JACKDET_MODE - [8:7] */ +#define WM1811_JACKDET_MODE_WIDTH 2 /* JACKDET_MODE - [8:7] */ +#define WM8994_MICB2_DISCH 0x0100 /* MICB2_DISCH */ +#define WM8994_MICB2_DISCH_MASK 0x0100 /* MICB2_DISCH */ +#define WM8994_MICB2_DISCH_SHIFT 8 /* MICB2_DISCH */ +#define WM8994_MICB2_DISCH_WIDTH 1 /* MICB2_DISCH */ +#define WM8994_MICB1_DISCH 0x0080 /* MICB1_DISCH */ +#define WM8994_MICB1_DISCH_MASK 0x0080 /* MICB1_DISCH */ +#define WM8994_MICB1_DISCH_SHIFT 7 /* MICB1_DISCH */ +#define WM8994_MICB1_DISCH_WIDTH 1 /* MICB1_DISCH */ +#define WM8994_VMID_RAMP_MASK 0x0060 /* VMID_RAMP - [6:5] */ +#define WM8994_VMID_RAMP_SHIFT 5 /* VMID_RAMP - [6:5] */ +#define WM8994_VMID_RAMP_WIDTH 2 /* VMID_RAMP - [6:5] */ +#define WM8994_VMID_BUF_ENA 0x0008 /* VMID_BUF_ENA */ +#define WM8994_VMID_BUF_ENA_MASK 0x0008 /* VMID_BUF_ENA */ +#define WM8994_VMID_BUF_ENA_SHIFT 3 /* VMID_BUF_ENA */ +#define WM8994_VMID_BUF_ENA_WIDTH 1 /* VMID_BUF_ENA */ +#define WM8994_STARTUP_BIAS_ENA 0x0004 /* STARTUP_BIAS_ENA */ +#define WM8994_STARTUP_BIAS_ENA_MASK 0x0004 /* STARTUP_BIAS_ENA */ +#define WM8994_STARTUP_BIAS_ENA_SHIFT 2 /* STARTUP_BIAS_ENA */ +#define WM8994_STARTUP_BIAS_ENA_WIDTH 1 /* STARTUP_BIAS_ENA */ +#define WM8994_BIAS_SRC 0x0002 /* BIAS_SRC */ +#define WM8994_BIAS_SRC_MASK 0x0002 /* BIAS_SRC */ +#define WM8994_BIAS_SRC_SHIFT 1 /* BIAS_SRC */ +#define WM8994_BIAS_SRC_WIDTH 1 /* BIAS_SRC */ +#define WM8994_VMID_DISCH 0x0001 /* VMID_DISCH */ +#define WM8994_VMID_DISCH_MASK 0x0001 /* VMID_DISCH */ +#define WM8994_VMID_DISCH_SHIFT 0 /* VMID_DISCH */ +#define WM8994_VMID_DISCH_WIDTH 1 /* VMID_DISCH */ + +/* + * R58 (0x3A) - MICBIAS + */ +#define WM8994_MICD_SCTHR_MASK 0x00C0 /* MICD_SCTHR - [7:6] */ +#define WM8994_MICD_SCTHR_SHIFT 6 /* MICD_SCTHR - [7:6] */ +#define WM8994_MICD_SCTHR_WIDTH 2 /* MICD_SCTHR - [7:6] */ +#define WM8994_MICD_THR_MASK 0x0038 /* MICD_THR - [5:3] */ +#define WM8994_MICD_THR_SHIFT 3 /* MICD_THR - [5:3] */ +#define WM8994_MICD_THR_WIDTH 3 /* MICD_THR - [5:3] */ +#define WM8994_MICD_ENA 0x0004 /* MICD_ENA */ +#define WM8994_MICD_ENA_MASK 0x0004 /* MICD_ENA */ +#define WM8994_MICD_ENA_SHIFT 2 /* MICD_ENA */ +#define WM8994_MICD_ENA_WIDTH 1 /* MICD_ENA */ +#define WM8994_MICB2_LVL 0x0002 /* MICB2_LVL */ +#define WM8994_MICB2_LVL_MASK 0x0002 /* MICB2_LVL */ +#define WM8994_MICB2_LVL_SHIFT 1 /* MICB2_LVL */ +#define WM8994_MICB2_LVL_WIDTH 1 /* MICB2_LVL */ +#define WM8994_MICB1_LVL 0x0001 /* MICB1_LVL */ +#define WM8994_MICB1_LVL_MASK 0x0001 /* MICB1_LVL */ +#define WM8994_MICB1_LVL_SHIFT 0 /* MICB1_LVL */ +#define WM8994_MICB1_LVL_WIDTH 1 /* MICB1_LVL */ + +/* + * R59 (0x3B) - LDO 1 + */ +#define WM8994_LDO1_VSEL_MASK 0x000E /* LDO1_VSEL - [3:1] */ +#define WM8994_LDO1_VSEL_SHIFT 1 /* LDO1_VSEL - [3:1] */ +#define WM8994_LDO1_VSEL_WIDTH 3 /* LDO1_VSEL - [3:1] */ +#define WM8994_LDO1_DISCH 0x0001 /* LDO1_DISCH */ +#define WM8994_LDO1_DISCH_MASK 0x0001 /* LDO1_DISCH */ +#define WM8994_LDO1_DISCH_SHIFT 0 /* LDO1_DISCH */ +#define WM8994_LDO1_DISCH_WIDTH 1 /* LDO1_DISCH */ + +/* + * R60 (0x3C) - LDO 2 + */ +#define WM8994_LDO2_VSEL_MASK 0x0006 /* LDO2_VSEL - [2:1] */ +#define WM8994_LDO2_VSEL_SHIFT 1 /* LDO2_VSEL - [2:1] */ +#define WM8994_LDO2_VSEL_WIDTH 2 /* LDO2_VSEL - [2:1] */ +#define WM8994_LDO2_DISCH 0x0001 /* LDO2_DISCH */ +#define WM8994_LDO2_DISCH_MASK 0x0001 /* LDO2_DISCH */ +#define WM8994_LDO2_DISCH_SHIFT 0 /* LDO2_DISCH */ +#define WM8994_LDO2_DISCH_WIDTH 1 /* LDO2_DISCH */ + +/* + * R61 (0x3D) - MICBIAS1 + */ +#define WM8958_MICB1_RATE 0x0020 /* MICB1_RATE */ +#define WM8958_MICB1_RATE_MASK 0x0020 /* MICB1_RATE */ +#define WM8958_MICB1_RATE_SHIFT 5 /* MICB1_RATE */ +#define WM8958_MICB1_RATE_WIDTH 1 /* MICB1_RATE */ +#define WM8958_MICB1_MODE 0x0010 /* MICB1_MODE */ +#define WM8958_MICB1_MODE_MASK 0x0010 /* MICB1_MODE */ +#define WM8958_MICB1_MODE_SHIFT 4 /* MICB1_MODE */ +#define WM8958_MICB1_MODE_WIDTH 1 /* MICB1_MODE */ +#define WM8958_MICB1_LVL_MASK 0x000E /* MICB1_LVL - [3:1] */ +#define WM8958_MICB1_LVL_SHIFT 1 /* MICB1_LVL - [3:1] */ +#define WM8958_MICB1_LVL_WIDTH 3 /* MICB1_LVL - [3:1] */ +#define WM8958_MICB1_DISCH 0x0001 /* MICB1_DISCH */ +#define WM8958_MICB1_DISCH_MASK 0x0001 /* MICB1_DISCH */ +#define WM8958_MICB1_DISCH_SHIFT 0 /* MICB1_DISCH */ +#define WM8958_MICB1_DISCH_WIDTH 1 /* MICB1_DISCH */ + +/* + * R62 (0x3E) - MICBIAS2 + */ +#define WM8958_MICB2_RATE 0x0020 /* MICB2_RATE */ +#define WM8958_MICB2_RATE_MASK 0x0020 /* MICB2_RATE */ +#define WM8958_MICB2_RATE_SHIFT 5 /* MICB2_RATE */ +#define WM8958_MICB2_RATE_WIDTH 1 /* MICB2_RATE */ +#define WM8958_MICB2_MODE 0x0010 /* MICB2_MODE */ +#define WM8958_MICB2_MODE_MASK 0x0010 /* MICB2_MODE */ +#define WM8958_MICB2_MODE_SHIFT 4 /* MICB2_MODE */ +#define WM8958_MICB2_MODE_WIDTH 1 /* MICB2_MODE */ +#define WM8958_MICB2_LVL_MASK 0x000E /* MICB2_LVL - [3:1] */ +#define WM8958_MICB2_LVL_SHIFT 1 /* MICB2_LVL - [3:1] */ +#define WM8958_MICB2_LVL_WIDTH 3 /* MICB2_LVL - [3:1] */ +#define WM8958_MICB2_DISCH 0x0001 /* MICB2_DISCH */ +#define WM8958_MICB2_DISCH_MASK 0x0001 /* MICB2_DISCH */ +#define WM8958_MICB2_DISCH_SHIFT 0 /* MICB2_DISCH */ +#define WM8958_MICB2_DISCH_WIDTH 1 /* MICB2_DISCH */ + +/* + * R210 (0xD2) - Mic Detect 3 + */ +#define WM8958_MICD_LVL_MASK 0x07FC /* MICD_LVL - [10:2] */ +#define WM8958_MICD_LVL_SHIFT 2 /* MICD_LVL - [10:2] */ +#define WM8958_MICD_LVL_WIDTH 9 /* MICD_LVL - [10:2] */ +#define WM8958_MICD_VALID 0x0002 /* MICD_VALID */ +#define WM8958_MICD_VALID_MASK 0x0002 /* MICD_VALID */ +#define WM8958_MICD_VALID_SHIFT 1 /* MICD_VALID */ +#define WM8958_MICD_VALID_WIDTH 1 /* MICD_VALID */ +#define WM8958_MICD_STS 0x0001 /* MICD_STS */ +#define WM8958_MICD_STS_MASK 0x0001 /* MICD_STS */ +#define WM8958_MICD_STS_SHIFT 0 /* MICD_STS */ +#define WM8958_MICD_STS_WIDTH 1 /* MICD_STS */ + +/* + * R76 (0x4C) - Charge Pump (1) + */ +#define WM8994_CP_ENA 0x8000 /* CP_ENA */ +#define WM8994_CP_ENA_MASK 0x8000 /* CP_ENA */ +#define WM8994_CP_ENA_SHIFT 15 /* CP_ENA */ +#define WM8994_CP_ENA_WIDTH 1 /* CP_ENA */ + +/* + * R77 (0x4D) - Charge Pump (2) + */ +#define WM8958_CP_DISCH 0x8000 /* CP_DISCH */ +#define WM8958_CP_DISCH_MASK 0x8000 /* CP_DISCH */ +#define WM8958_CP_DISCH_SHIFT 15 /* CP_DISCH */ +#define WM8958_CP_DISCH_WIDTH 1 /* CP_DISCH */ + +/* + * R81 (0x51) - Class W (1) + */ +#define WM8994_CP_DYN_SRC_SEL_MASK 0x0300 /* CP_DYN_SRC_SEL - [9:8] */ +#define WM8994_CP_DYN_SRC_SEL_SHIFT 8 /* CP_DYN_SRC_SEL - [9:8] */ +#define WM8994_CP_DYN_SRC_SEL_WIDTH 2 /* CP_DYN_SRC_SEL - [9:8] */ +#define WM8994_CP_DYN_PWR 0x0001 /* CP_DYN_PWR */ +#define WM8994_CP_DYN_PWR_MASK 0x0001 /* CP_DYN_PWR */ +#define WM8994_CP_DYN_PWR_SHIFT 0 /* CP_DYN_PWR */ +#define WM8994_CP_DYN_PWR_WIDTH 1 /* CP_DYN_PWR */ + +/* + * R84 (0x54) - DC Servo (1) + */ +#define WM8994_DCS_TRIG_SINGLE_1 0x2000 /* DCS_TRIG_SINGLE_1 */ +#define WM8994_DCS_TRIG_SINGLE_1_MASK 0x2000 /* DCS_TRIG_SINGLE_1 */ +#define WM8994_DCS_TRIG_SINGLE_1_SHIFT 13 /* DCS_TRIG_SINGLE_1 */ +#define WM8994_DCS_TRIG_SINGLE_1_WIDTH 1 /* DCS_TRIG_SINGLE_1 */ +#define WM8994_DCS_TRIG_SINGLE_0 0x1000 /* DCS_TRIG_SINGLE_0 */ +#define WM8994_DCS_TRIG_SINGLE_0_MASK 0x1000 /* DCS_TRIG_SINGLE_0 */ +#define WM8994_DCS_TRIG_SINGLE_0_SHIFT 12 /* DCS_TRIG_SINGLE_0 */ +#define WM8994_DCS_TRIG_SINGLE_0_WIDTH 1 /* DCS_TRIG_SINGLE_0 */ +#define WM8994_DCS_TRIG_SERIES_1 0x0200 /* DCS_TRIG_SERIES_1 */ +#define WM8994_DCS_TRIG_SERIES_1_MASK 0x0200 /* DCS_TRIG_SERIES_1 */ +#define WM8994_DCS_TRIG_SERIES_1_SHIFT 9 /* DCS_TRIG_SERIES_1 */ +#define WM8994_DCS_TRIG_SERIES_1_WIDTH 1 /* DCS_TRIG_SERIES_1 */ +#define WM8994_DCS_TRIG_SERIES_0 0x0100 /* DCS_TRIG_SERIES_0 */ +#define WM8994_DCS_TRIG_SERIES_0_MASK 0x0100 /* DCS_TRIG_SERIES_0 */ +#define WM8994_DCS_TRIG_SERIES_0_SHIFT 8 /* DCS_TRIG_SERIES_0 */ +#define WM8994_DCS_TRIG_SERIES_0_WIDTH 1 /* DCS_TRIG_SERIES_0 */ +#define WM8994_DCS_TRIG_STARTUP_1 0x0020 /* DCS_TRIG_STARTUP_1 */ +#define WM8994_DCS_TRIG_STARTUP_1_MASK 0x0020 /* DCS_TRIG_STARTUP_1 */ +#define WM8994_DCS_TRIG_STARTUP_1_SHIFT 5 /* DCS_TRIG_STARTUP_1 */ +#define WM8994_DCS_TRIG_STARTUP_1_WIDTH 1 /* DCS_TRIG_STARTUP_1 */ +#define WM8994_DCS_TRIG_STARTUP_0 0x0010 /* DCS_TRIG_STARTUP_0 */ +#define WM8994_DCS_TRIG_STARTUP_0_MASK 0x0010 /* DCS_TRIG_STARTUP_0 */ +#define WM8994_DCS_TRIG_STARTUP_0_SHIFT 4 /* DCS_TRIG_STARTUP_0 */ +#define WM8994_DCS_TRIG_STARTUP_0_WIDTH 1 /* DCS_TRIG_STARTUP_0 */ +#define WM8994_DCS_TRIG_DAC_WR_1 0x0008 /* DCS_TRIG_DAC_WR_1 */ +#define WM8994_DCS_TRIG_DAC_WR_1_MASK 0x0008 /* DCS_TRIG_DAC_WR_1 */ +#define WM8994_DCS_TRIG_DAC_WR_1_SHIFT 3 /* DCS_TRIG_DAC_WR_1 */ +#define WM8994_DCS_TRIG_DAC_WR_1_WIDTH 1 /* DCS_TRIG_DAC_WR_1 */ +#define WM8994_DCS_TRIG_DAC_WR_0 0x0004 /* DCS_TRIG_DAC_WR_0 */ +#define WM8994_DCS_TRIG_DAC_WR_0_MASK 0x0004 /* DCS_TRIG_DAC_WR_0 */ +#define WM8994_DCS_TRIG_DAC_WR_0_SHIFT 2 /* DCS_TRIG_DAC_WR_0 */ +#define WM8994_DCS_TRIG_DAC_WR_0_WIDTH 1 /* DCS_TRIG_DAC_WR_0 */ +#define WM8994_DCS_ENA_CHAN_1 0x0002 /* DCS_ENA_CHAN_1 */ +#define WM8994_DCS_ENA_CHAN_1_MASK 0x0002 /* DCS_ENA_CHAN_1 */ +#define WM8994_DCS_ENA_CHAN_1_SHIFT 1 /* DCS_ENA_CHAN_1 */ +#define WM8994_DCS_ENA_CHAN_1_WIDTH 1 /* DCS_ENA_CHAN_1 */ +#define WM8994_DCS_ENA_CHAN_0 0x0001 /* DCS_ENA_CHAN_0 */ +#define WM8994_DCS_ENA_CHAN_0_MASK 0x0001 /* DCS_ENA_CHAN_0 */ +#define WM8994_DCS_ENA_CHAN_0_SHIFT 0 /* DCS_ENA_CHAN_0 */ +#define WM8994_DCS_ENA_CHAN_0_WIDTH 1 /* DCS_ENA_CHAN_0 */ + +/* + * R85 (0x55) - DC Servo (2) + */ +#define WM8994_DCS_SERIES_NO_01_MASK 0x0FE0 /* DCS_SERIES_NO_01 - [11:5] */ +#define WM8994_DCS_SERIES_NO_01_SHIFT 5 /* DCS_SERIES_NO_01 - [11:5] */ +#define WM8994_DCS_SERIES_NO_01_WIDTH 7 /* DCS_SERIES_NO_01 - [11:5] */ +#define WM8994_DCS_TIMER_PERIOD_01_MASK 0x000F /* DCS_TIMER_PERIOD_01 - [3:0] */ +#define WM8994_DCS_TIMER_PERIOD_01_SHIFT 0 /* DCS_TIMER_PERIOD_01 - [3:0] */ +#define WM8994_DCS_TIMER_PERIOD_01_WIDTH 4 /* DCS_TIMER_PERIOD_01 - [3:0] */ + +/* + * R87 (0x57) - DC Servo (4) + */ +#define WM8994_DCS_DAC_WR_VAL_1_MASK 0xFF00 /* DCS_DAC_WR_VAL_1 - [15:8] */ +#define WM8994_DCS_DAC_WR_VAL_1_SHIFT 8 /* DCS_DAC_WR_VAL_1 - [15:8] */ +#define WM8994_DCS_DAC_WR_VAL_1_WIDTH 8 /* DCS_DAC_WR_VAL_1 - [15:8] */ +#define WM8994_DCS_DAC_WR_VAL_0_MASK 0x00FF /* DCS_DAC_WR_VAL_0 - [7:0] */ +#define WM8994_DCS_DAC_WR_VAL_0_SHIFT 0 /* DCS_DAC_WR_VAL_0 - [7:0] */ +#define WM8994_DCS_DAC_WR_VAL_0_WIDTH 8 /* DCS_DAC_WR_VAL_0 - [7:0] */ + +/* + * R88 (0x58) - DC Servo Readback + */ +#define WM8994_DCS_CAL_COMPLETE_MASK 0x0300 /* DCS_CAL_COMPLETE - [9:8] */ +#define WM8994_DCS_CAL_COMPLETE_SHIFT 8 /* DCS_CAL_COMPLETE - [9:8] */ +#define WM8994_DCS_CAL_COMPLETE_WIDTH 2 /* DCS_CAL_COMPLETE - [9:8] */ +#define WM8994_DCS_DAC_WR_COMPLETE_MASK 0x0030 /* DCS_DAC_WR_COMPLETE - [5:4] */ +#define WM8994_DCS_DAC_WR_COMPLETE_SHIFT 4 /* DCS_DAC_WR_COMPLETE - [5:4] */ +#define WM8994_DCS_DAC_WR_COMPLETE_WIDTH 2 /* DCS_DAC_WR_COMPLETE - [5:4] */ +#define WM8994_DCS_STARTUP_COMPLETE_MASK 0x0003 /* DCS_STARTUP_COMPLETE - [1:0] */ +#define WM8994_DCS_STARTUP_COMPLETE_SHIFT 0 /* DCS_STARTUP_COMPLETE - [1:0] */ +#define WM8994_DCS_STARTUP_COMPLETE_WIDTH 2 /* DCS_STARTUP_COMPLETE - [1:0] */ + +/* + * R96 (0x60) - Analogue HP (1) + */ +#define WM1811_HPOUT1_ATTN 0x0100 /* HPOUT1_ATTN */ +#define WM1811_HPOUT1_ATTN_MASK 0x0100 /* HPOUT1_ATTN */ +#define WM1811_HPOUT1_ATTN_SHIFT 8 /* HPOUT1_ATTN */ +#define WM1811_HPOUT1_ATTN_WIDTH 1 /* HPOUT1_ATTN */ +#define WM8994_HPOUT1L_RMV_SHORT 0x0080 /* HPOUT1L_RMV_SHORT */ +#define WM8994_HPOUT1L_RMV_SHORT_MASK 0x0080 /* HPOUT1L_RMV_SHORT */ +#define WM8994_HPOUT1L_RMV_SHORT_SHIFT 7 /* HPOUT1L_RMV_SHORT */ +#define WM8994_HPOUT1L_RMV_SHORT_WIDTH 1 /* HPOUT1L_RMV_SHORT */ +#define WM8994_HPOUT1L_OUTP 0x0040 /* HPOUT1L_OUTP */ +#define WM8994_HPOUT1L_OUTP_MASK 0x0040 /* HPOUT1L_OUTP */ +#define WM8994_HPOUT1L_OUTP_SHIFT 6 /* HPOUT1L_OUTP */ +#define WM8994_HPOUT1L_OUTP_WIDTH 1 /* HPOUT1L_OUTP */ +#define WM8994_HPOUT1L_DLY 0x0020 /* HPOUT1L_DLY */ +#define WM8994_HPOUT1L_DLY_MASK 0x0020 /* HPOUT1L_DLY */ +#define WM8994_HPOUT1L_DLY_SHIFT 5 /* HPOUT1L_DLY */ +#define WM8994_HPOUT1L_DLY_WIDTH 1 /* HPOUT1L_DLY */ +#define WM8994_HPOUT1R_RMV_SHORT 0x0008 /* HPOUT1R_RMV_SHORT */ +#define WM8994_HPOUT1R_RMV_SHORT_MASK 0x0008 /* HPOUT1R_RMV_SHORT */ +#define WM8994_HPOUT1R_RMV_SHORT_SHIFT 3 /* HPOUT1R_RMV_SHORT */ +#define WM8994_HPOUT1R_RMV_SHORT_WIDTH 1 /* HPOUT1R_RMV_SHORT */ +#define WM8994_HPOUT1R_OUTP 0x0004 /* HPOUT1R_OUTP */ +#define WM8994_HPOUT1R_OUTP_MASK 0x0004 /* HPOUT1R_OUTP */ +#define WM8994_HPOUT1R_OUTP_SHIFT 2 /* HPOUT1R_OUTP */ +#define WM8994_HPOUT1R_OUTP_WIDTH 1 /* HPOUT1R_OUTP */ +#define WM8994_HPOUT1R_DLY 0x0002 /* HPOUT1R_DLY */ +#define WM8994_HPOUT1R_DLY_MASK 0x0002 /* HPOUT1R_DLY */ +#define WM8994_HPOUT1R_DLY_SHIFT 1 /* HPOUT1R_DLY */ +#define WM8994_HPOUT1R_DLY_WIDTH 1 /* HPOUT1R_DLY */ + +/* + * R208 (0xD0) - Mic Detect 1 + */ +#define WM8958_MICD_BIAS_STARTTIME_MASK 0xF000 /* MICD_BIAS_STARTTIME - [15:12] */ +#define WM8958_MICD_BIAS_STARTTIME_SHIFT 12 /* MICD_BIAS_STARTTIME - [15:12] */ +#define WM8958_MICD_BIAS_STARTTIME_WIDTH 4 /* MICD_BIAS_STARTTIME - [15:12] */ +#define WM8958_MICD_RATE_MASK 0x0F00 /* MICD_RATE - [11:8] */ +#define WM8958_MICD_RATE_SHIFT 8 /* MICD_RATE - [11:8] */ +#define WM8958_MICD_RATE_WIDTH 4 /* MICD_RATE - [11:8] */ +#define WM8958_MICD_DBTIME 0x0002 /* MICD_DBTIME */ +#define WM8958_MICD_DBTIME_MASK 0x0002 /* MICD_DBTIME */ +#define WM8958_MICD_DBTIME_SHIFT 1 /* MICD_DBTIME */ +#define WM8958_MICD_DBTIME_WIDTH 1 /* MICD_DBTIME */ +#define WM8958_MICD_ENA 0x0001 /* MICD_ENA */ +#define WM8958_MICD_ENA_MASK 0x0001 /* MICD_ENA */ +#define WM8958_MICD_ENA_SHIFT 0 /* MICD_ENA */ +#define WM8958_MICD_ENA_WIDTH 1 /* MICD_ENA */ + +/* + * R209 (0xD1) - Mic Detect 2 + */ +#define WM8958_MICD_LVL_SEL_MASK 0x00FF /* MICD_LVL_SEL - [7:0] */ +#define WM8958_MICD_LVL_SEL_SHIFT 0 /* MICD_LVL_SEL - [7:0] */ +#define WM8958_MICD_LVL_SEL_WIDTH 8 /* MICD_LVL_SEL - [7:0] */ + +/* + * R210 (0xD2) - Mic Detect 3 + */ +#define WM8958_MICD_LVL_MASK 0x07FC /* MICD_LVL - [10:2] */ +#define WM8958_MICD_LVL_SHIFT 2 /* MICD_LVL - [10:2] */ +#define WM8958_MICD_LVL_WIDTH 9 /* MICD_LVL - [10:2] */ +#define WM8958_MICD_VALID 0x0002 /* MICD_VALID */ +#define WM8958_MICD_VALID_MASK 0x0002 /* MICD_VALID */ +#define WM8958_MICD_VALID_SHIFT 1 /* MICD_VALID */ +#define WM8958_MICD_VALID_WIDTH 1 /* MICD_VALID */ +#define WM8958_MICD_STS 0x0001 /* MICD_STS */ +#define WM8958_MICD_STS_MASK 0x0001 /* MICD_STS */ +#define WM8958_MICD_STS_SHIFT 0 /* MICD_STS */ +#define WM8958_MICD_STS_WIDTH 1 /* MICD_STS */ + +/* + * R256 (0x100) - Chip Revision + */ +#define WM8994_CUST_ID_MASK 0xFF00 /* CUST_ID - [15:8] */ +#define WM8994_CUST_ID_SHIFT 8 /* CUST_ID - [15:8] */ +#define WM8994_CUST_ID_WIDTH 8 /* CUST_ID - [15:8] */ +#define WM8994_CHIP_REV_MASK 0x000F /* CHIP_REV - [3:0] */ +#define WM8994_CHIP_REV_SHIFT 0 /* CHIP_REV - [3:0] */ +#define WM8994_CHIP_REV_WIDTH 4 /* CHIP_REV - [3:0] */ + +/* + * R257 (0x101) - Control Interface + */ +#define WM8994_SPI_CONTRD 0x0040 /* SPI_CONTRD */ +#define WM8994_SPI_CONTRD_MASK 0x0040 /* SPI_CONTRD */ +#define WM8994_SPI_CONTRD_SHIFT 6 /* SPI_CONTRD */ +#define WM8994_SPI_CONTRD_WIDTH 1 /* SPI_CONTRD */ +#define WM8994_SPI_4WIRE 0x0020 /* SPI_4WIRE */ +#define WM8994_SPI_4WIRE_MASK 0x0020 /* SPI_4WIRE */ +#define WM8994_SPI_4WIRE_SHIFT 5 /* SPI_4WIRE */ +#define WM8994_SPI_4WIRE_WIDTH 1 /* SPI_4WIRE */ +#define WM8994_SPI_CFG 0x0010 /* SPI_CFG */ +#define WM8994_SPI_CFG_MASK 0x0010 /* SPI_CFG */ +#define WM8994_SPI_CFG_SHIFT 4 /* SPI_CFG */ +#define WM8994_SPI_CFG_WIDTH 1 /* SPI_CFG */ +#define WM8994_AUTO_INC 0x0004 /* AUTO_INC */ +#define WM8994_AUTO_INC_MASK 0x0004 /* AUTO_INC */ +#define WM8994_AUTO_INC_SHIFT 2 /* AUTO_INC */ +#define WM8994_AUTO_INC_WIDTH 1 /* AUTO_INC */ + +/* + * R272 (0x110) - Write Sequencer Ctrl (1) + */ +#define WM8994_WSEQ_ENA 0x8000 /* WSEQ_ENA */ +#define WM8994_WSEQ_ENA_MASK 0x8000 /* WSEQ_ENA */ +#define WM8994_WSEQ_ENA_SHIFT 15 /* WSEQ_ENA */ +#define WM8994_WSEQ_ENA_WIDTH 1 /* WSEQ_ENA */ +#define WM8994_WSEQ_ABORT 0x0200 /* WSEQ_ABORT */ +#define WM8994_WSEQ_ABORT_MASK 0x0200 /* WSEQ_ABORT */ +#define WM8994_WSEQ_ABORT_SHIFT 9 /* WSEQ_ABORT */ +#define WM8994_WSEQ_ABORT_WIDTH 1 /* WSEQ_ABORT */ +#define WM8994_WSEQ_START 0x0100 /* WSEQ_START */ +#define WM8994_WSEQ_START_MASK 0x0100 /* WSEQ_START */ +#define WM8994_WSEQ_START_SHIFT 8 /* WSEQ_START */ +#define WM8994_WSEQ_START_WIDTH 1 /* WSEQ_START */ +#define WM8994_WSEQ_START_INDEX_MASK 0x007F /* WSEQ_START_INDEX - [6:0] */ +#define WM8994_WSEQ_START_INDEX_SHIFT 0 /* WSEQ_START_INDEX - [6:0] */ +#define WM8994_WSEQ_START_INDEX_WIDTH 7 /* WSEQ_START_INDEX - [6:0] */ + +/* + * R273 (0x111) - Write Sequencer Ctrl (2) + */ +#define WM8994_WSEQ_BUSY 0x0100 /* WSEQ_BUSY */ +#define WM8994_WSEQ_BUSY_MASK 0x0100 /* WSEQ_BUSY */ +#define WM8994_WSEQ_BUSY_SHIFT 8 /* WSEQ_BUSY */ +#define WM8994_WSEQ_BUSY_WIDTH 1 /* WSEQ_BUSY */ +#define WM8994_WSEQ_CURRENT_INDEX_MASK 0x007F /* WSEQ_CURRENT_INDEX - [6:0] */ +#define WM8994_WSEQ_CURRENT_INDEX_SHIFT 0 /* WSEQ_CURRENT_INDEX - [6:0] */ +#define WM8994_WSEQ_CURRENT_INDEX_WIDTH 7 /* WSEQ_CURRENT_INDEX - [6:0] */ + +/* + * R512 (0x200) - AIF1 Clocking (1) + */ +#define WM8994_AIF1CLK_SRC_MASK 0x0018 /* AIF1CLK_SRC - [4:3] */ +#define WM8994_AIF1CLK_SRC_SHIFT 3 /* AIF1CLK_SRC - [4:3] */ +#define WM8994_AIF1CLK_SRC_WIDTH 2 /* AIF1CLK_SRC - [4:3] */ +#define WM8994_AIF1CLK_INV 0x0004 /* AIF1CLK_INV */ +#define WM8994_AIF1CLK_INV_MASK 0x0004 /* AIF1CLK_INV */ +#define WM8994_AIF1CLK_INV_SHIFT 2 /* AIF1CLK_INV */ +#define WM8994_AIF1CLK_INV_WIDTH 1 /* AIF1CLK_INV */ +#define WM8994_AIF1CLK_DIV 0x0002 /* AIF1CLK_DIV */ +#define WM8994_AIF1CLK_DIV_MASK 0x0002 /* AIF1CLK_DIV */ +#define WM8994_AIF1CLK_DIV_SHIFT 1 /* AIF1CLK_DIV */ +#define WM8994_AIF1CLK_DIV_WIDTH 1 /* AIF1CLK_DIV */ +#define WM8994_AIF1CLK_ENA 0x0001 /* AIF1CLK_ENA */ +#define WM8994_AIF1CLK_ENA_MASK 0x0001 /* AIF1CLK_ENA */ +#define WM8994_AIF1CLK_ENA_SHIFT 0 /* AIF1CLK_ENA */ +#define WM8994_AIF1CLK_ENA_WIDTH 1 /* AIF1CLK_ENA */ + +/* + * R513 (0x201) - AIF1 Clocking (2) + */ +#define WM8994_AIF1DAC_DIV_MASK 0x0038 /* AIF1DAC_DIV - [5:3] */ +#define WM8994_AIF1DAC_DIV_SHIFT 3 /* AIF1DAC_DIV - [5:3] */ +#define WM8994_AIF1DAC_DIV_WIDTH 3 /* AIF1DAC_DIV - [5:3] */ +#define WM8994_AIF1ADC_DIV_MASK 0x0007 /* AIF1ADC_DIV - [2:0] */ +#define WM8994_AIF1ADC_DIV_SHIFT 0 /* AIF1ADC_DIV - [2:0] */ +#define WM8994_AIF1ADC_DIV_WIDTH 3 /* AIF1ADC_DIV - [2:0] */ + +/* + * R516 (0x204) - AIF2 Clocking (1) + */ +#define WM8994_AIF2CLK_SRC_MASK 0x0018 /* AIF2CLK_SRC - [4:3] */ +#define WM8994_AIF2CLK_SRC_SHIFT 3 /* AIF2CLK_SRC - [4:3] */ +#define WM8994_AIF2CLK_SRC_WIDTH 2 /* AIF2CLK_SRC - [4:3] */ +#define WM8994_AIF2CLK_INV 0x0004 /* AIF2CLK_INV */ +#define WM8994_AIF2CLK_INV_MASK 0x0004 /* AIF2CLK_INV */ +#define WM8994_AIF2CLK_INV_SHIFT 2 /* AIF2CLK_INV */ +#define WM8994_AIF2CLK_INV_WIDTH 1 /* AIF2CLK_INV */ +#define WM8994_AIF2CLK_DIV 0x0002 /* AIF2CLK_DIV */ +#define WM8994_AIF2CLK_DIV_MASK 0x0002 /* AIF2CLK_DIV */ +#define WM8994_AIF2CLK_DIV_SHIFT 1 /* AIF2CLK_DIV */ +#define WM8994_AIF2CLK_DIV_WIDTH 1 /* AIF2CLK_DIV */ +#define WM8994_AIF2CLK_ENA 0x0001 /* AIF2CLK_ENA */ +#define WM8994_AIF2CLK_ENA_MASK 0x0001 /* AIF2CLK_ENA */ +#define WM8994_AIF2CLK_ENA_SHIFT 0 /* AIF2CLK_ENA */ +#define WM8994_AIF2CLK_ENA_WIDTH 1 /* AIF2CLK_ENA */ + +/* + * R517 (0x205) - AIF2 Clocking (2) + */ +#define WM8994_AIF2DAC_DIV_MASK 0x0038 /* AIF2DAC_DIV - [5:3] */ +#define WM8994_AIF2DAC_DIV_SHIFT 3 /* AIF2DAC_DIV - [5:3] */ +#define WM8994_AIF2DAC_DIV_WIDTH 3 /* AIF2DAC_DIV - [5:3] */ +#define WM8994_AIF2ADC_DIV_MASK 0x0007 /* AIF2ADC_DIV - [2:0] */ +#define WM8994_AIF2ADC_DIV_SHIFT 0 /* AIF2ADC_DIV - [2:0] */ +#define WM8994_AIF2ADC_DIV_WIDTH 3 /* AIF2ADC_DIV - [2:0] */ + +/* + * R520 (0x208) - Clocking (1) + */ +#define WM8958_DSP2CLK_ENA 0x4000 /* DSP2CLK_ENA */ +#define WM8958_DSP2CLK_ENA_MASK 0x4000 /* DSP2CLK_ENA */ +#define WM8958_DSP2CLK_ENA_SHIFT 14 /* DSP2CLK_ENA */ +#define WM8958_DSP2CLK_ENA_WIDTH 1 /* DSP2CLK_ENA */ +#define WM8958_DSP2CLK_SRC 0x1000 /* DSP2CLK_SRC */ +#define WM8958_DSP2CLK_SRC_MASK 0x1000 /* DSP2CLK_SRC */ +#define WM8958_DSP2CLK_SRC_SHIFT 12 /* DSP2CLK_SRC */ +#define WM8958_DSP2CLK_SRC_WIDTH 1 /* DSP2CLK_SRC */ +#define WM8994_TOCLK_ENA 0x0010 /* TOCLK_ENA */ +#define WM8994_TOCLK_ENA_MASK 0x0010 /* TOCLK_ENA */ +#define WM8994_TOCLK_ENA_SHIFT 4 /* TOCLK_ENA */ +#define WM8994_TOCLK_ENA_WIDTH 1 /* TOCLK_ENA */ +#define WM8994_AIF1DSPCLK_ENA 0x0008 /* AIF1DSPCLK_ENA */ +#define WM8994_AIF1DSPCLK_ENA_MASK 0x0008 /* AIF1DSPCLK_ENA */ +#define WM8994_AIF1DSPCLK_ENA_SHIFT 3 /* AIF1DSPCLK_ENA */ +#define WM8994_AIF1DSPCLK_ENA_WIDTH 1 /* AIF1DSPCLK_ENA */ +#define WM8994_AIF2DSPCLK_ENA 0x0004 /* AIF2DSPCLK_ENA */ +#define WM8994_AIF2DSPCLK_ENA_MASK 0x0004 /* AIF2DSPCLK_ENA */ +#define WM8994_AIF2DSPCLK_ENA_SHIFT 2 /* AIF2DSPCLK_ENA */ +#define WM8994_AIF2DSPCLK_ENA_WIDTH 1 /* AIF2DSPCLK_ENA */ +#define WM8994_SYSDSPCLK_ENA 0x0002 /* SYSDSPCLK_ENA */ +#define WM8994_SYSDSPCLK_ENA_MASK 0x0002 /* SYSDSPCLK_ENA */ +#define WM8994_SYSDSPCLK_ENA_SHIFT 1 /* SYSDSPCLK_ENA */ +#define WM8994_SYSDSPCLK_ENA_WIDTH 1 /* SYSDSPCLK_ENA */ +#define WM8994_SYSCLK_SRC 0x0001 /* SYSCLK_SRC */ +#define WM8994_SYSCLK_SRC_MASK 0x0001 /* SYSCLK_SRC */ +#define WM8994_SYSCLK_SRC_SHIFT 0 /* SYSCLK_SRC */ +#define WM8994_SYSCLK_SRC_WIDTH 1 /* SYSCLK_SRC */ + +/* + * R521 (0x209) - Clocking (2) + */ +#define WM8994_TOCLK_DIV_MASK 0x0700 /* TOCLK_DIV - [10:8] */ +#define WM8994_TOCLK_DIV_SHIFT 8 /* TOCLK_DIV - [10:8] */ +#define WM8994_TOCLK_DIV_WIDTH 3 /* TOCLK_DIV - [10:8] */ +#define WM8994_DBCLK_DIV_MASK 0x0070 /* DBCLK_DIV - [6:4] */ +#define WM8994_DBCLK_DIV_SHIFT 4 /* DBCLK_DIV - [6:4] */ +#define WM8994_DBCLK_DIV_WIDTH 3 /* DBCLK_DIV - [6:4] */ +#define WM8994_OPCLK_DIV_MASK 0x0007 /* OPCLK_DIV - [2:0] */ +#define WM8994_OPCLK_DIV_SHIFT 0 /* OPCLK_DIV - [2:0] */ +#define WM8994_OPCLK_DIV_WIDTH 3 /* OPCLK_DIV - [2:0] */ + +/* + * R528 (0x210) - AIF1 Rate + */ +#define WM8994_AIF1_SR_MASK 0x00F0 /* AIF1_SR - [7:4] */ +#define WM8994_AIF1_SR_SHIFT 4 /* AIF1_SR - [7:4] */ +#define WM8994_AIF1_SR_WIDTH 4 /* AIF1_SR - [7:4] */ +#define WM8994_AIF1CLK_RATE_MASK 0x000F /* AIF1CLK_RATE - [3:0] */ +#define WM8994_AIF1CLK_RATE_SHIFT 0 /* AIF1CLK_RATE - [3:0] */ +#define WM8994_AIF1CLK_RATE_WIDTH 4 /* AIF1CLK_RATE - [3:0] */ + +/* + * R529 (0x211) - AIF2 Rate + */ +#define WM8994_AIF2_SR_MASK 0x00F0 /* AIF2_SR - [7:4] */ +#define WM8994_AIF2_SR_SHIFT 4 /* AIF2_SR - [7:4] */ +#define WM8994_AIF2_SR_WIDTH 4 /* AIF2_SR - [7:4] */ +#define WM8994_AIF2CLK_RATE_MASK 0x000F /* AIF2CLK_RATE - [3:0] */ +#define WM8994_AIF2CLK_RATE_SHIFT 0 /* AIF2CLK_RATE - [3:0] */ +#define WM8994_AIF2CLK_RATE_WIDTH 4 /* AIF2CLK_RATE - [3:0] */ + +/* + * R530 (0x212) - Rate Status + */ +#define WM8994_SR_ERROR_MASK 0x000F /* SR_ERROR - [3:0] */ +#define WM8994_SR_ERROR_SHIFT 0 /* SR_ERROR - [3:0] */ +#define WM8994_SR_ERROR_WIDTH 4 /* SR_ERROR - [3:0] */ + +/* + * R544 (0x220) - FLL1 Control (1) + */ +#define WM8994_FLL1_FRAC 0x0004 /* FLL1_FRAC */ +#define WM8994_FLL1_FRAC_MASK 0x0004 /* FLL1_FRAC */ +#define WM8994_FLL1_FRAC_SHIFT 2 /* FLL1_FRAC */ +#define WM8994_FLL1_FRAC_WIDTH 1 /* FLL1_FRAC */ +#define WM8994_FLL1_OSC_ENA 0x0002 /* FLL1_OSC_ENA */ +#define WM8994_FLL1_OSC_ENA_MASK 0x0002 /* FLL1_OSC_ENA */ +#define WM8994_FLL1_OSC_ENA_SHIFT 1 /* FLL1_OSC_ENA */ +#define WM8994_FLL1_OSC_ENA_WIDTH 1 /* FLL1_OSC_ENA */ +#define WM8994_FLL1_ENA 0x0001 /* FLL1_ENA */ +#define WM8994_FLL1_ENA_MASK 0x0001 /* FLL1_ENA */ +#define WM8994_FLL1_ENA_SHIFT 0 /* FLL1_ENA */ +#define WM8994_FLL1_ENA_WIDTH 1 /* FLL1_ENA */ + +/* + * R545 (0x221) - FLL1 Control (2) + */ +#define WM8994_FLL1_OUTDIV_MASK 0x3F00 /* FLL1_OUTDIV - [13:8] */ +#define WM8994_FLL1_OUTDIV_SHIFT 8 /* FLL1_OUTDIV - [13:8] */ +#define WM8994_FLL1_OUTDIV_WIDTH 6 /* FLL1_OUTDIV - [13:8] */ +#define WM8994_FLL1_CTRL_RATE_MASK 0x0070 /* FLL1_CTRL_RATE - [6:4] */ +#define WM8994_FLL1_CTRL_RATE_SHIFT 4 /* FLL1_CTRL_RATE - [6:4] */ +#define WM8994_FLL1_CTRL_RATE_WIDTH 3 /* FLL1_CTRL_RATE - [6:4] */ +#define WM8994_FLL1_FRATIO_MASK 0x0007 /* FLL1_FRATIO - [2:0] */ +#define WM8994_FLL1_FRATIO_SHIFT 0 /* FLL1_FRATIO - [2:0] */ +#define WM8994_FLL1_FRATIO_WIDTH 3 /* FLL1_FRATIO - [2:0] */ + +/* + * R546 (0x222) - FLL1 Control (3) + */ +#define WM8994_FLL1_K_MASK 0xFFFF /* FLL1_K - [15:0] */ +#define WM8994_FLL1_K_SHIFT 0 /* FLL1_K - [15:0] */ +#define WM8994_FLL1_K_WIDTH 16 /* FLL1_K - [15:0] */ + +/* + * R547 (0x223) - FLL1 Control (4) + */ +#define WM8994_FLL1_N_MASK 0x7FE0 /* FLL1_N - [14:5] */ +#define WM8994_FLL1_N_SHIFT 5 /* FLL1_N - [14:5] */ +#define WM8994_FLL1_N_WIDTH 10 /* FLL1_N - [14:5] */ +#define WM8994_FLL1_LOOP_GAIN_MASK 0x000F /* FLL1_LOOP_GAIN - [3:0] */ +#define WM8994_FLL1_LOOP_GAIN_SHIFT 0 /* FLL1_LOOP_GAIN - [3:0] */ +#define WM8994_FLL1_LOOP_GAIN_WIDTH 4 /* FLL1_LOOP_GAIN - [3:0] */ + +/* + * R548 (0x224) - FLL1 Control (5) + */ +#define WM8958_FLL1_BYP 0x8000 /* FLL1_BYP */ +#define WM8958_FLL1_BYP_MASK 0x8000 /* FLL1_BYP */ +#define WM8958_FLL1_BYP_SHIFT 15 /* FLL1_BYP */ +#define WM8958_FLL1_BYP_WIDTH 1 /* FLL1_BYP */ +#define WM8994_FLL1_FRC_NCO_VAL_MASK 0x1F80 /* FLL1_FRC_NCO_VAL - [12:7] */ +#define WM8994_FLL1_FRC_NCO_VAL_SHIFT 7 /* FLL1_FRC_NCO_VAL - [12:7] */ +#define WM8994_FLL1_FRC_NCO_VAL_WIDTH 6 /* FLL1_FRC_NCO_VAL - [12:7] */ +#define WM8994_FLL1_FRC_NCO 0x0040 /* FLL1_FRC_NCO */ +#define WM8994_FLL1_FRC_NCO_MASK 0x0040 /* FLL1_FRC_NCO */ +#define WM8994_FLL1_FRC_NCO_SHIFT 6 /* FLL1_FRC_NCO */ +#define WM8994_FLL1_FRC_NCO_WIDTH 1 /* FLL1_FRC_NCO */ +#define WM8994_FLL1_REFCLK_DIV_MASK 0x0018 /* FLL1_REFCLK_DIV - [4:3] */ +#define WM8994_FLL1_REFCLK_DIV_SHIFT 3 /* FLL1_REFCLK_DIV - [4:3] */ +#define WM8994_FLL1_REFCLK_DIV_WIDTH 2 /* FLL1_REFCLK_DIV - [4:3] */ +#define WM8994_FLL1_REFCLK_SRC_MASK 0x0003 /* FLL1_REFCLK_SRC - [1:0] */ +#define WM8994_FLL1_REFCLK_SRC_SHIFT 0 /* FLL1_REFCLK_SRC - [1:0] */ +#define WM8994_FLL1_REFCLK_SRC_WIDTH 2 /* FLL1_REFCLK_SRC - [1:0] */ + +/* + * R550 (0x226) - FLL1 EFS 1 + */ +#define WM8958_FLL1_LAMBDA_MASK 0xFFFF /* FLL1_LAMBDA - [15:0] */ +#define WM8958_FLL1_LAMBDA_SHIFT 0 /* FLL1_LAMBDA - [15:0] */ +#define WM8958_FLL1_LAMBDA_WIDTH 16 /* FLL1_LAMBDA - [15:0] */ + +/* + * R551 (0x227) - FLL1 EFS 2 + */ +#define WM8958_FLL1_LFSR_SEL_MASK 0x0006 /* FLL1_LFSR_SEL - [2:1] */ +#define WM8958_FLL1_LFSR_SEL_SHIFT 1 /* FLL1_LFSR_SEL - [2:1] */ +#define WM8958_FLL1_LFSR_SEL_WIDTH 2 /* FLL1_LFSR_SEL - [2:1] */ +#define WM8958_FLL1_EFS_ENA 0x0001 /* FLL1_EFS_ENA */ +#define WM8958_FLL1_EFS_ENA_MASK 0x0001 /* FLL1_EFS_ENA */ +#define WM8958_FLL1_EFS_ENA_SHIFT 0 /* FLL1_EFS_ENA */ +#define WM8958_FLL1_EFS_ENA_WIDTH 1 /* FLL1_EFS_ENA */ + +/* + * R576 (0x240) - FLL2 Control (1) + */ +#define WM8994_FLL2_FRAC 0x0004 /* FLL2_FRAC */ +#define WM8994_FLL2_FRAC_MASK 0x0004 /* FLL2_FRAC */ +#define WM8994_FLL2_FRAC_SHIFT 2 /* FLL2_FRAC */ +#define WM8994_FLL2_FRAC_WIDTH 1 /* FLL2_FRAC */ +#define WM8994_FLL2_OSC_ENA 0x0002 /* FLL2_OSC_ENA */ +#define WM8994_FLL2_OSC_ENA_MASK 0x0002 /* FLL2_OSC_ENA */ +#define WM8994_FLL2_OSC_ENA_SHIFT 1 /* FLL2_OSC_ENA */ +#define WM8994_FLL2_OSC_ENA_WIDTH 1 /* FLL2_OSC_ENA */ +#define WM8994_FLL2_ENA 0x0001 /* FLL2_ENA */ +#define WM8994_FLL2_ENA_MASK 0x0001 /* FLL2_ENA */ +#define WM8994_FLL2_ENA_SHIFT 0 /* FLL2_ENA */ +#define WM8994_FLL2_ENA_WIDTH 1 /* FLL2_ENA */ + +/* + * R577 (0x241) - FLL2 Control (2) + */ +#define WM8994_FLL2_OUTDIV_MASK 0x3F00 /* FLL2_OUTDIV - [13:8] */ +#define WM8994_FLL2_OUTDIV_SHIFT 8 /* FLL2_OUTDIV - [13:8] */ +#define WM8994_FLL2_OUTDIV_WIDTH 6 /* FLL2_OUTDIV - [13:8] */ +#define WM8994_FLL2_CTRL_RATE_MASK 0x0070 /* FLL2_CTRL_RATE - [6:4] */ +#define WM8994_FLL2_CTRL_RATE_SHIFT 4 /* FLL2_CTRL_RATE - [6:4] */ +#define WM8994_FLL2_CTRL_RATE_WIDTH 3 /* FLL2_CTRL_RATE - [6:4] */ +#define WM8994_FLL2_FRATIO_MASK 0x0007 /* FLL2_FRATIO - [2:0] */ +#define WM8994_FLL2_FRATIO_SHIFT 0 /* FLL2_FRATIO - [2:0] */ +#define WM8994_FLL2_FRATIO_WIDTH 3 /* FLL2_FRATIO - [2:0] */ + +/* + * R578 (0x242) - FLL2 Control (3) + */ +#define WM8994_FLL2_K_MASK 0xFFFF /* FLL2_K - [15:0] */ +#define WM8994_FLL2_K_SHIFT 0 /* FLL2_K - [15:0] */ +#define WM8994_FLL2_K_WIDTH 16 /* FLL2_K - [15:0] */ + +/* + * R579 (0x243) - FLL2 Control (4) + */ +#define WM8994_FLL2_N_MASK 0x7FE0 /* FLL2_N - [14:5] */ +#define WM8994_FLL2_N_SHIFT 5 /* FLL2_N - [14:5] */ +#define WM8994_FLL2_N_WIDTH 10 /* FLL2_N - [14:5] */ +#define WM8994_FLL2_LOOP_GAIN_MASK 0x000F /* FLL2_LOOP_GAIN - [3:0] */ +#define WM8994_FLL2_LOOP_GAIN_SHIFT 0 /* FLL2_LOOP_GAIN - [3:0] */ +#define WM8994_FLL2_LOOP_GAIN_WIDTH 4 /* FLL2_LOOP_GAIN - [3:0] */ + +/* + * R580 (0x244) - FLL2 Control (5) + */ +#define WM8958_FLL2_BYP 0x8000 /* FLL2_BYP */ +#define WM8958_FLL2_BYP_MASK 0x8000 /* FLL2_BYP */ +#define WM8958_FLL2_BYP_SHIFT 15 /* FLL2_BYP */ +#define WM8958_FLL2_BYP_WIDTH 1 /* FLL2_BYP */ +#define WM8994_FLL2_FRC_NCO_VAL_MASK 0x1F80 /* FLL2_FRC_NCO_VAL - [12:7] */ +#define WM8994_FLL2_FRC_NCO_VAL_SHIFT 7 /* FLL2_FRC_NCO_VAL - [12:7] */ +#define WM8994_FLL2_FRC_NCO_VAL_WIDTH 6 /* FLL2_FRC_NCO_VAL - [12:7] */ +#define WM8994_FLL2_FRC_NCO 0x0040 /* FLL2_FRC_NCO */ +#define WM8994_FLL2_FRC_NCO_MASK 0x0040 /* FLL2_FRC_NCO */ +#define WM8994_FLL2_FRC_NCO_SHIFT 6 /* FLL2_FRC_NCO */ +#define WM8994_FLL2_FRC_NCO_WIDTH 1 /* FLL2_FRC_NCO */ +#define WM8994_FLL2_REFCLK_DIV_MASK 0x0018 /* FLL2_REFCLK_DIV - [4:3] */ +#define WM8994_FLL2_REFCLK_DIV_SHIFT 3 /* FLL2_REFCLK_DIV - [4:3] */ +#define WM8994_FLL2_REFCLK_DIV_WIDTH 2 /* FLL2_REFCLK_DIV - [4:3] */ +#define WM8994_FLL2_REFCLK_SRC_MASK 0x0003 /* FLL2_REFCLK_SRC - [1:0] */ +#define WM8994_FLL2_REFCLK_SRC_SHIFT 0 /* FLL2_REFCLK_SRC - [1:0] */ +#define WM8994_FLL2_REFCLK_SRC_WIDTH 2 /* FLL2_REFCLK_SRC - [1:0] */ + +/* + * R582 (0x246) - FLL2 EFS 1 + */ +#define WM8958_FLL2_LAMBDA_MASK 0xFFFF /* FLL2_LAMBDA - [15:0] */ +#define WM8958_FLL2_LAMBDA_SHIFT 0 /* FLL2_LAMBDA - [15:0] */ +#define WM8958_FLL2_LAMBDA_WIDTH 16 /* FLL2_LAMBDA - [15:0] */ + +/* + * R583 (0x247) - FLL2 EFS 2 + */ +#define WM8958_FLL2_LFSR_SEL_MASK 0x0006 /* FLL2_LFSR_SEL - [2:1] */ +#define WM8958_FLL2_LFSR_SEL_SHIFT 1 /* FLL2_LFSR_SEL - [2:1] */ +#define WM8958_FLL2_LFSR_SEL_WIDTH 2 /* FLL2_LFSR_SEL - [2:1] */ +#define WM8958_FLL2_EFS_ENA 0x0001 /* FLL2_EFS_ENA */ +#define WM8958_FLL2_EFS_ENA_MASK 0x0001 /* FLL2_EFS_ENA */ +#define WM8958_FLL2_EFS_ENA_SHIFT 0 /* FLL2_EFS_ENA */ +#define WM8958_FLL2_EFS_ENA_WIDTH 1 /* FLL2_EFS_ENA */ + +/* + * R768 (0x300) - AIF1 Control (1) + */ +#define WM8994_AIF1ADCL_SRC 0x8000 /* AIF1ADCL_SRC */ +#define WM8994_AIF1ADCL_SRC_MASK 0x8000 /* AIF1ADCL_SRC */ +#define WM8994_AIF1ADCL_SRC_SHIFT 15 /* AIF1ADCL_SRC */ +#define WM8994_AIF1ADCL_SRC_WIDTH 1 /* AIF1ADCL_SRC */ +#define WM8994_AIF1ADCR_SRC 0x4000 /* AIF1ADCR_SRC */ +#define WM8994_AIF1ADCR_SRC_MASK 0x4000 /* AIF1ADCR_SRC */ +#define WM8994_AIF1ADCR_SRC_SHIFT 14 /* AIF1ADCR_SRC */ +#define WM8994_AIF1ADCR_SRC_WIDTH 1 /* AIF1ADCR_SRC */ +#define WM8994_AIF1ADC_TDM 0x2000 /* AIF1ADC_TDM */ +#define WM8994_AIF1ADC_TDM_MASK 0x2000 /* AIF1ADC_TDM */ +#define WM8994_AIF1ADC_TDM_SHIFT 13 /* AIF1ADC_TDM */ +#define WM8994_AIF1ADC_TDM_WIDTH 1 /* AIF1ADC_TDM */ +#define WM8994_AIF1_BCLK_INV 0x0100 /* AIF1_BCLK_INV */ +#define WM8994_AIF1_BCLK_INV_MASK 0x0100 /* AIF1_BCLK_INV */ +#define WM8994_AIF1_BCLK_INV_SHIFT 8 /* AIF1_BCLK_INV */ +#define WM8994_AIF1_BCLK_INV_WIDTH 1 /* AIF1_BCLK_INV */ +#define WM8994_AIF1_LRCLK_INV 0x0080 /* AIF1_LRCLK_INV */ +#define WM8994_AIF1_LRCLK_INV_MASK 0x0080 /* AIF1_LRCLK_INV */ +#define WM8994_AIF1_LRCLK_INV_SHIFT 7 /* AIF1_LRCLK_INV */ +#define WM8994_AIF1_LRCLK_INV_WIDTH 1 /* AIF1_LRCLK_INV */ +#define WM8994_AIF1_WL_MASK 0x0060 /* AIF1_WL - [6:5] */ +#define WM8994_AIF1_WL_SHIFT 5 /* AIF1_WL - [6:5] */ +#define WM8994_AIF1_WL_WIDTH 2 /* AIF1_WL - [6:5] */ +#define WM8994_AIF1_FMT_MASK 0x0018 /* AIF1_FMT - [4:3] */ +#define WM8994_AIF1_FMT_SHIFT 3 /* AIF1_FMT - [4:3] */ +#define WM8994_AIF1_FMT_WIDTH 2 /* AIF1_FMT - [4:3] */ + +/* + * R769 (0x301) - AIF1 Control (2) + */ +#define WM8994_AIF1DACL_SRC 0x8000 /* AIF1DACL_SRC */ +#define WM8994_AIF1DACL_SRC_MASK 0x8000 /* AIF1DACL_SRC */ +#define WM8994_AIF1DACL_SRC_SHIFT 15 /* AIF1DACL_SRC */ +#define WM8994_AIF1DACL_SRC_WIDTH 1 /* AIF1DACL_SRC */ +#define WM8994_AIF1DACR_SRC 0x4000 /* AIF1DACR_SRC */ +#define WM8994_AIF1DACR_SRC_MASK 0x4000 /* AIF1DACR_SRC */ +#define WM8994_AIF1DACR_SRC_SHIFT 14 /* AIF1DACR_SRC */ +#define WM8994_AIF1DACR_SRC_WIDTH 1 /* AIF1DACR_SRC */ +#define WM8994_AIF1DAC_BOOST_MASK 0x0C00 /* AIF1DAC_BOOST - [11:10] */ +#define WM8994_AIF1DAC_BOOST_SHIFT 10 /* AIF1DAC_BOOST - [11:10] */ +#define WM8994_AIF1DAC_BOOST_WIDTH 2 /* AIF1DAC_BOOST - [11:10] */ +#define WM8994_AIF1_MONO 0x0100 /* AIF1_MONO */ +#define WM8994_AIF1_MONO_MASK 0x0100 /* AIF1_MONO */ +#define WM8994_AIF1_MONO_SHIFT 8 /* AIF1_MONO */ +#define WM8994_AIF1_MONO_WIDTH 1 /* AIF1_MONO */ +#define WM8994_AIF1DAC_COMP 0x0010 /* AIF1DAC_COMP */ +#define WM8994_AIF1DAC_COMP_MASK 0x0010 /* AIF1DAC_COMP */ +#define WM8994_AIF1DAC_COMP_SHIFT 4 /* AIF1DAC_COMP */ +#define WM8994_AIF1DAC_COMP_WIDTH 1 /* AIF1DAC_COMP */ +#define WM8994_AIF1DAC_COMPMODE 0x0008 /* AIF1DAC_COMPMODE */ +#define WM8994_AIF1DAC_COMPMODE_MASK 0x0008 /* AIF1DAC_COMPMODE */ +#define WM8994_AIF1DAC_COMPMODE_SHIFT 3 /* AIF1DAC_COMPMODE */ +#define WM8994_AIF1DAC_COMPMODE_WIDTH 1 /* AIF1DAC_COMPMODE */ +#define WM8994_AIF1ADC_COMP 0x0004 /* AIF1ADC_COMP */ +#define WM8994_AIF1ADC_COMP_MASK 0x0004 /* AIF1ADC_COMP */ +#define WM8994_AIF1ADC_COMP_SHIFT 2 /* AIF1ADC_COMP */ +#define WM8994_AIF1ADC_COMP_WIDTH 1 /* AIF1ADC_COMP */ +#define WM8994_AIF1ADC_COMPMODE 0x0002 /* AIF1ADC_COMPMODE */ +#define WM8994_AIF1ADC_COMPMODE_MASK 0x0002 /* AIF1ADC_COMPMODE */ +#define WM8994_AIF1ADC_COMPMODE_SHIFT 1 /* AIF1ADC_COMPMODE */ +#define WM8994_AIF1ADC_COMPMODE_WIDTH 1 /* AIF1ADC_COMPMODE */ +#define WM8994_AIF1_LOOPBACK 0x0001 /* AIF1_LOOPBACK */ +#define WM8994_AIF1_LOOPBACK_MASK 0x0001 /* AIF1_LOOPBACK */ +#define WM8994_AIF1_LOOPBACK_SHIFT 0 /* AIF1_LOOPBACK */ +#define WM8994_AIF1_LOOPBACK_WIDTH 1 /* AIF1_LOOPBACK */ + +/* + * R770 (0x302) - AIF1 Master/Slave + */ +#define WM8994_AIF1_TRI 0x8000 /* AIF1_TRI */ +#define WM8994_AIF1_TRI_MASK 0x8000 /* AIF1_TRI */ +#define WM8994_AIF1_TRI_SHIFT 15 /* AIF1_TRI */ +#define WM8994_AIF1_TRI_WIDTH 1 /* AIF1_TRI */ +#define WM8994_AIF1_MSTR 0x4000 /* AIF1_MSTR */ +#define WM8994_AIF1_MSTR_MASK 0x4000 /* AIF1_MSTR */ +#define WM8994_AIF1_MSTR_SHIFT 14 /* AIF1_MSTR */ +#define WM8994_AIF1_MSTR_WIDTH 1 /* AIF1_MSTR */ +#define WM8994_AIF1_CLK_FRC 0x2000 /* AIF1_CLK_FRC */ +#define WM8994_AIF1_CLK_FRC_MASK 0x2000 /* AIF1_CLK_FRC */ +#define WM8994_AIF1_CLK_FRC_SHIFT 13 /* AIF1_CLK_FRC */ +#define WM8994_AIF1_CLK_FRC_WIDTH 1 /* AIF1_CLK_FRC */ +#define WM8994_AIF1_LRCLK_FRC 0x1000 /* AIF1_LRCLK_FRC */ +#define WM8994_AIF1_LRCLK_FRC_MASK 0x1000 /* AIF1_LRCLK_FRC */ +#define WM8994_AIF1_LRCLK_FRC_SHIFT 12 /* AIF1_LRCLK_FRC */ +#define WM8994_AIF1_LRCLK_FRC_WIDTH 1 /* AIF1_LRCLK_FRC */ + +/* + * R771 (0x303) - AIF1 BCLK + */ +#define WM8994_AIF1_BCLK_DIV_MASK 0x01F0 /* AIF1_BCLK_DIV - [8:4] */ +#define WM8994_AIF1_BCLK_DIV_SHIFT 4 /* AIF1_BCLK_DIV - [8:4] */ +#define WM8994_AIF1_BCLK_DIV_WIDTH 5 /* AIF1_BCLK_DIV - [8:4] */ + +/* + * R772 (0x304) - AIF1ADC LRCLK + */ +#define WM8958_AIF1_LRCLK_INV 0x1000 /* AIF1_LRCLK_INV */ +#define WM8958_AIF1_LRCLK_INV_MASK 0x1000 /* AIF1_LRCLK_INV */ +#define WM8958_AIF1_LRCLK_INV_SHIFT 12 /* AIF1_LRCLK_INV */ +#define WM8958_AIF1_LRCLK_INV_WIDTH 1 /* AIF1_LRCLK_INV */ +#define WM8994_AIF1ADC_LRCLK_DIR 0x0800 /* AIF1ADC_LRCLK_DIR */ +#define WM8994_AIF1ADC_LRCLK_DIR_MASK 0x0800 /* AIF1ADC_LRCLK_DIR */ +#define WM8994_AIF1ADC_LRCLK_DIR_SHIFT 11 /* AIF1ADC_LRCLK_DIR */ +#define WM8994_AIF1ADC_LRCLK_DIR_WIDTH 1 /* AIF1ADC_LRCLK_DIR */ +#define WM8994_AIF1ADC_RATE_MASK 0x07FF /* AIF1ADC_RATE - [10:0] */ +#define WM8994_AIF1ADC_RATE_SHIFT 0 /* AIF1ADC_RATE - [10:0] */ +#define WM8994_AIF1ADC_RATE_WIDTH 11 /* AIF1ADC_RATE - [10:0] */ + +/* + * R773 (0x305) - AIF1DAC LRCLK + */ +#define WM8958_AIF1_LRCLK_INV 0x1000 /* AIF1_LRCLK_INV */ +#define WM8958_AIF1_LRCLK_INV_MASK 0x1000 /* AIF1_LRCLK_INV */ +#define WM8958_AIF1_LRCLK_INV_SHIFT 12 /* AIF1_LRCLK_INV */ +#define WM8958_AIF1_LRCLK_INV_WIDTH 1 /* AIF1_LRCLK_INV */ +#define WM8994_AIF1DAC_LRCLK_DIR 0x0800 /* AIF1DAC_LRCLK_DIR */ +#define WM8994_AIF1DAC_LRCLK_DIR_MASK 0x0800 /* AIF1DAC_LRCLK_DIR */ +#define WM8994_AIF1DAC_LRCLK_DIR_SHIFT 11 /* AIF1DAC_LRCLK_DIR */ +#define WM8994_AIF1DAC_LRCLK_DIR_WIDTH 1 /* AIF1DAC_LRCLK_DIR */ +#define WM8994_AIF1DAC_RATE_MASK 0x07FF /* AIF1DAC_RATE - [10:0] */ +#define WM8994_AIF1DAC_RATE_SHIFT 0 /* AIF1DAC_RATE - [10:0] */ +#define WM8994_AIF1DAC_RATE_WIDTH 11 /* AIF1DAC_RATE - [10:0] */ + +/* + * R774 (0x306) - AIF1DAC Data + */ +#define WM8994_AIF1DACL_DAT_INV 0x0002 /* AIF1DACL_DAT_INV */ +#define WM8994_AIF1DACL_DAT_INV_MASK 0x0002 /* AIF1DACL_DAT_INV */ +#define WM8994_AIF1DACL_DAT_INV_SHIFT 1 /* AIF1DACL_DAT_INV */ +#define WM8994_AIF1DACL_DAT_INV_WIDTH 1 /* AIF1DACL_DAT_INV */ +#define WM8994_AIF1DACR_DAT_INV 0x0001 /* AIF1DACR_DAT_INV */ +#define WM8994_AIF1DACR_DAT_INV_MASK 0x0001 /* AIF1DACR_DAT_INV */ +#define WM8994_AIF1DACR_DAT_INV_SHIFT 0 /* AIF1DACR_DAT_INV */ +#define WM8994_AIF1DACR_DAT_INV_WIDTH 1 /* AIF1DACR_DAT_INV */ + +/* + * R775 (0x307) - AIF1ADC Data + */ +#define WM8994_AIF1ADCL_DAT_INV 0x0002 /* AIF1ADCL_DAT_INV */ +#define WM8994_AIF1ADCL_DAT_INV_MASK 0x0002 /* AIF1ADCL_DAT_INV */ +#define WM8994_AIF1ADCL_DAT_INV_SHIFT 1 /* AIF1ADCL_DAT_INV */ +#define WM8994_AIF1ADCL_DAT_INV_WIDTH 1 /* AIF1ADCL_DAT_INV */ +#define WM8994_AIF1ADCR_DAT_INV 0x0001 /* AIF1ADCR_DAT_INV */ +#define WM8994_AIF1ADCR_DAT_INV_MASK 0x0001 /* AIF1ADCR_DAT_INV */ +#define WM8994_AIF1ADCR_DAT_INV_SHIFT 0 /* AIF1ADCR_DAT_INV */ +#define WM8994_AIF1ADCR_DAT_INV_WIDTH 1 /* AIF1ADCR_DAT_INV */ + +/* + * R784 (0x310) - AIF2 Control (1) + */ +#define WM8994_AIF2ADCL_SRC 0x8000 /* AIF2ADCL_SRC */ +#define WM8994_AIF2ADCL_SRC_MASK 0x8000 /* AIF2ADCL_SRC */ +#define WM8994_AIF2ADCL_SRC_SHIFT 15 /* AIF2ADCL_SRC */ +#define WM8994_AIF2ADCL_SRC_WIDTH 1 /* AIF2ADCL_SRC */ +#define WM8994_AIF2ADCR_SRC 0x4000 /* AIF2ADCR_SRC */ +#define WM8994_AIF2ADCR_SRC_MASK 0x4000 /* AIF2ADCR_SRC */ +#define WM8994_AIF2ADCR_SRC_SHIFT 14 /* AIF2ADCR_SRC */ +#define WM8994_AIF2ADCR_SRC_WIDTH 1 /* AIF2ADCR_SRC */ +#define WM8994_AIF2ADC_TDM 0x2000 /* AIF2ADC_TDM */ +#define WM8994_AIF2ADC_TDM_MASK 0x2000 /* AIF2ADC_TDM */ +#define WM8994_AIF2ADC_TDM_SHIFT 13 /* AIF2ADC_TDM */ +#define WM8994_AIF2ADC_TDM_WIDTH 1 /* AIF2ADC_TDM */ +#define WM8994_AIF2ADC_TDM_CHAN 0x1000 /* AIF2ADC_TDM_CHAN */ +#define WM8994_AIF2ADC_TDM_CHAN_MASK 0x1000 /* AIF2ADC_TDM_CHAN */ +#define WM8994_AIF2ADC_TDM_CHAN_SHIFT 12 /* AIF2ADC_TDM_CHAN */ +#define WM8994_AIF2ADC_TDM_CHAN_WIDTH 1 /* AIF2ADC_TDM_CHAN */ +#define WM8994_AIF2_BCLK_INV 0x0100 /* AIF2_BCLK_INV */ +#define WM8994_AIF2_BCLK_INV_MASK 0x0100 /* AIF2_BCLK_INV */ +#define WM8994_AIF2_BCLK_INV_SHIFT 8 /* AIF2_BCLK_INV */ +#define WM8994_AIF2_BCLK_INV_WIDTH 1 /* AIF2_BCLK_INV */ +#define WM8994_AIF2_LRCLK_INV 0x0080 /* AIF2_LRCLK_INV */ +#define WM8994_AIF2_LRCLK_INV_MASK 0x0080 /* AIF2_LRCLK_INV */ +#define WM8994_AIF2_LRCLK_INV_SHIFT 7 /* AIF2_LRCLK_INV */ +#define WM8994_AIF2_LRCLK_INV_WIDTH 1 /* AIF2_LRCLK_INV */ +#define WM8994_AIF2_WL_MASK 0x0060 /* AIF2_WL - [6:5] */ +#define WM8994_AIF2_WL_SHIFT 5 /* AIF2_WL - [6:5] */ +#define WM8994_AIF2_WL_WIDTH 2 /* AIF2_WL - [6:5] */ +#define WM8994_AIF2_FMT_MASK 0x0018 /* AIF2_FMT - [4:3] */ +#define WM8994_AIF2_FMT_SHIFT 3 /* AIF2_FMT - [4:3] */ +#define WM8994_AIF2_FMT_WIDTH 2 /* AIF2_FMT - [4:3] */ + +/* + * R785 (0x311) - AIF2 Control (2) + */ +#define WM8994_AIF2DACL_SRC 0x8000 /* AIF2DACL_SRC */ +#define WM8994_AIF2DACL_SRC_MASK 0x8000 /* AIF2DACL_SRC */ +#define WM8994_AIF2DACL_SRC_SHIFT 15 /* AIF2DACL_SRC */ +#define WM8994_AIF2DACL_SRC_WIDTH 1 /* AIF2DACL_SRC */ +#define WM8994_AIF2DACR_SRC 0x4000 /* AIF2DACR_SRC */ +#define WM8994_AIF2DACR_SRC_MASK 0x4000 /* AIF2DACR_SRC */ +#define WM8994_AIF2DACR_SRC_SHIFT 14 /* AIF2DACR_SRC */ +#define WM8994_AIF2DACR_SRC_WIDTH 1 /* AIF2DACR_SRC */ +#define WM8994_AIF2DAC_TDM 0x2000 /* AIF2DAC_TDM */ +#define WM8994_AIF2DAC_TDM_MASK 0x2000 /* AIF2DAC_TDM */ +#define WM8994_AIF2DAC_TDM_SHIFT 13 /* AIF2DAC_TDM */ +#define WM8994_AIF2DAC_TDM_WIDTH 1 /* AIF2DAC_TDM */ +#define WM8994_AIF2DAC_TDM_CHAN 0x1000 /* AIF2DAC_TDM_CHAN */ +#define WM8994_AIF2DAC_TDM_CHAN_MASK 0x1000 /* AIF2DAC_TDM_CHAN */ +#define WM8994_AIF2DAC_TDM_CHAN_SHIFT 12 /* AIF2DAC_TDM_CHAN */ +#define WM8994_AIF2DAC_TDM_CHAN_WIDTH 1 /* AIF2DAC_TDM_CHAN */ +#define WM8994_AIF2DAC_BOOST_MASK 0x0C00 /* AIF2DAC_BOOST - [11:10] */ +#define WM8994_AIF2DAC_BOOST_SHIFT 10 /* AIF2DAC_BOOST - [11:10] */ +#define WM8994_AIF2DAC_BOOST_WIDTH 2 /* AIF2DAC_BOOST - [11:10] */ +#define WM8994_AIF2_MONO 0x0100 /* AIF2_MONO */ +#define WM8994_AIF2_MONO_MASK 0x0100 /* AIF2_MONO */ +#define WM8994_AIF2_MONO_SHIFT 8 /* AIF2_MONO */ +#define WM8994_AIF2_MONO_WIDTH 1 /* AIF2_MONO */ +#define WM8994_AIF2DAC_COMP 0x0010 /* AIF2DAC_COMP */ +#define WM8994_AIF2DAC_COMP_MASK 0x0010 /* AIF2DAC_COMP */ +#define WM8994_AIF2DAC_COMP_SHIFT 4 /* AIF2DAC_COMP */ +#define WM8994_AIF2DAC_COMP_WIDTH 1 /* AIF2DAC_COMP */ +#define WM8994_AIF2DAC_COMPMODE 0x0008 /* AIF2DAC_COMPMODE */ +#define WM8994_AIF2DAC_COMPMODE_MASK 0x0008 /* AIF2DAC_COMPMODE */ +#define WM8994_AIF2DAC_COMPMODE_SHIFT 3 /* AIF2DAC_COMPMODE */ +#define WM8994_AIF2DAC_COMPMODE_WIDTH 1 /* AIF2DAC_COMPMODE */ +#define WM8994_AIF2ADC_COMP 0x0004 /* AIF2ADC_COMP */ +#define WM8994_AIF2ADC_COMP_MASK 0x0004 /* AIF2ADC_COMP */ +#define WM8994_AIF2ADC_COMP_SHIFT 2 /* AIF2ADC_COMP */ +#define WM8994_AIF2ADC_COMP_WIDTH 1 /* AIF2ADC_COMP */ +#define WM8994_AIF2ADC_COMPMODE 0x0002 /* AIF2ADC_COMPMODE */ +#define WM8994_AIF2ADC_COMPMODE_MASK 0x0002 /* AIF2ADC_COMPMODE */ +#define WM8994_AIF2ADC_COMPMODE_SHIFT 1 /* AIF2ADC_COMPMODE */ +#define WM8994_AIF2ADC_COMPMODE_WIDTH 1 /* AIF2ADC_COMPMODE */ +#define WM8994_AIF2_LOOPBACK 0x0001 /* AIF2_LOOPBACK */ +#define WM8994_AIF2_LOOPBACK_MASK 0x0001 /* AIF2_LOOPBACK */ +#define WM8994_AIF2_LOOPBACK_SHIFT 0 /* AIF2_LOOPBACK */ +#define WM8994_AIF2_LOOPBACK_WIDTH 1 /* AIF2_LOOPBACK */ + +/* + * R786 (0x312) - AIF2 Master/Slave + */ +#define WM8994_AIF2_TRI 0x8000 /* AIF2_TRI */ +#define WM8994_AIF2_TRI_MASK 0x8000 /* AIF2_TRI */ +#define WM8994_AIF2_TRI_SHIFT 15 /* AIF2_TRI */ +#define WM8994_AIF2_TRI_WIDTH 1 /* AIF2_TRI */ +#define WM8994_AIF2_MSTR 0x4000 /* AIF2_MSTR */ +#define WM8994_AIF2_MSTR_MASK 0x4000 /* AIF2_MSTR */ +#define WM8994_AIF2_MSTR_SHIFT 14 /* AIF2_MSTR */ +#define WM8994_AIF2_MSTR_WIDTH 1 /* AIF2_MSTR */ +#define WM8994_AIF2_CLK_FRC 0x2000 /* AIF2_CLK_FRC */ +#define WM8994_AIF2_CLK_FRC_MASK 0x2000 /* AIF2_CLK_FRC */ +#define WM8994_AIF2_CLK_FRC_SHIFT 13 /* AIF2_CLK_FRC */ +#define WM8994_AIF2_CLK_FRC_WIDTH 1 /* AIF2_CLK_FRC */ +#define WM8994_AIF2_LRCLK_FRC 0x1000 /* AIF2_LRCLK_FRC */ +#define WM8994_AIF2_LRCLK_FRC_MASK 0x1000 /* AIF2_LRCLK_FRC */ +#define WM8994_AIF2_LRCLK_FRC_SHIFT 12 /* AIF2_LRCLK_FRC */ +#define WM8994_AIF2_LRCLK_FRC_WIDTH 1 /* AIF2_LRCLK_FRC */ + +/* + * R787 (0x313) - AIF2 BCLK + */ +#define WM8994_AIF2_BCLK_DIV_MASK 0x01F0 /* AIF2_BCLK_DIV - [8:4] */ +#define WM8994_AIF2_BCLK_DIV_SHIFT 4 /* AIF2_BCLK_DIV - [8:4] */ +#define WM8994_AIF2_BCLK_DIV_WIDTH 5 /* AIF2_BCLK_DIV - [8:4] */ + +/* + * R788 (0x314) - AIF2ADC LRCLK + */ +#define WM8994_AIF2ADC_LRCLK_DIR 0x0800 /* AIF2ADC_LRCLK_DIR */ +#define WM8994_AIF2ADC_LRCLK_DIR_MASK 0x0800 /* AIF2ADC_LRCLK_DIR */ +#define WM8994_AIF2ADC_LRCLK_DIR_SHIFT 11 /* AIF2ADC_LRCLK_DIR */ +#define WM8994_AIF2ADC_LRCLK_DIR_WIDTH 1 /* AIF2ADC_LRCLK_DIR */ +#define WM8994_AIF2ADC_RATE_MASK 0x07FF /* AIF2ADC_RATE - [10:0] */ +#define WM8994_AIF2ADC_RATE_SHIFT 0 /* AIF2ADC_RATE - [10:0] */ +#define WM8994_AIF2ADC_RATE_WIDTH 11 /* AIF2ADC_RATE - [10:0] */ + +/* + * R789 (0x315) - AIF2DAC LRCLK + */ +#define WM8994_AIF2DAC_LRCLK_DIR 0x0800 /* AIF2DAC_LRCLK_DIR */ +#define WM8994_AIF2DAC_LRCLK_DIR_MASK 0x0800 /* AIF2DAC_LRCLK_DIR */ +#define WM8994_AIF2DAC_LRCLK_DIR_SHIFT 11 /* AIF2DAC_LRCLK_DIR */ +#define WM8994_AIF2DAC_LRCLK_DIR_WIDTH 1 /* AIF2DAC_LRCLK_DIR */ +#define WM8994_AIF2DAC_RATE_MASK 0x07FF /* AIF2DAC_RATE - [10:0] */ +#define WM8994_AIF2DAC_RATE_SHIFT 0 /* AIF2DAC_RATE - [10:0] */ +#define WM8994_AIF2DAC_RATE_WIDTH 11 /* AIF2DAC_RATE - [10:0] */ + +/* + * R790 (0x316) - AIF2DAC Data + */ +#define WM8994_AIF2DACL_DAT_INV 0x0002 /* AIF2DACL_DAT_INV */ +#define WM8994_AIF2DACL_DAT_INV_MASK 0x0002 /* AIF2DACL_DAT_INV */ +#define WM8994_AIF2DACL_DAT_INV_SHIFT 1 /* AIF2DACL_DAT_INV */ +#define WM8994_AIF2DACL_DAT_INV_WIDTH 1 /* AIF2DACL_DAT_INV */ +#define WM8994_AIF2DACR_DAT_INV 0x0001 /* AIF2DACR_DAT_INV */ +#define WM8994_AIF2DACR_DAT_INV_MASK 0x0001 /* AIF2DACR_DAT_INV */ +#define WM8994_AIF2DACR_DAT_INV_SHIFT 0 /* AIF2DACR_DAT_INV */ +#define WM8994_AIF2DACR_DAT_INV_WIDTH 1 /* AIF2DACR_DAT_INV */ + +/* + * R791 (0x317) - AIF2ADC Data + */ +#define WM8994_AIF2ADCL_DAT_INV 0x0002 /* AIF2ADCL_DAT_INV */ +#define WM8994_AIF2ADCL_DAT_INV_MASK 0x0002 /* AIF2ADCL_DAT_INV */ +#define WM8994_AIF2ADCL_DAT_INV_SHIFT 1 /* AIF2ADCL_DAT_INV */ +#define WM8994_AIF2ADCL_DAT_INV_WIDTH 1 /* AIF2ADCL_DAT_INV */ +#define WM8994_AIF2ADCR_DAT_INV 0x0001 /* AIF2ADCR_DAT_INV */ +#define WM8994_AIF2ADCR_DAT_INV_MASK 0x0001 /* AIF2ADCR_DAT_INV */ +#define WM8994_AIF2ADCR_DAT_INV_SHIFT 0 /* AIF2ADCR_DAT_INV */ +#define WM8994_AIF2ADCR_DAT_INV_WIDTH 1 /* AIF2ADCR_DAT_INV */ + +/* + * R800 (0x320) - AIF3 Control (1) + */ +#define WM8958_AIF3_LRCLK_INV 0x0080 /* AIF3_LRCLK_INV */ +#define WM8958_AIF3_LRCLK_INV_MASK 0x0080 /* AIF3_LRCLK_INV */ +#define WM8958_AIF3_LRCLK_INV_SHIFT 7 /* AIF3_LRCLK_INV */ +#define WM8958_AIF3_LRCLK_INV_WIDTH 1 /* AIF3_LRCLK_INV */ +#define WM8958_AIF3_WL_MASK 0x0060 /* AIF3_WL - [6:5] */ +#define WM8958_AIF3_WL_SHIFT 5 /* AIF3_WL - [6:5] */ +#define WM8958_AIF3_WL_WIDTH 2 /* AIF3_WL - [6:5] */ +#define WM8958_AIF3_FMT_MASK 0x0018 /* AIF3_FMT - [4:3] */ +#define WM8958_AIF3_FMT_SHIFT 3 /* AIF3_FMT - [4:3] */ +#define WM8958_AIF3_FMT_WIDTH 2 /* AIF3_FMT - [4:3] */ + +/* + * R801 (0x321) - AIF3 Control (2) + */ +#define WM8958_AIF3DAC_BOOST_MASK 0x0C00 /* AIF3DAC_BOOST - [11:10] */ +#define WM8958_AIF3DAC_BOOST_SHIFT 10 /* AIF3DAC_BOOST - [11:10] */ +#define WM8958_AIF3DAC_BOOST_WIDTH 2 /* AIF3DAC_BOOST - [11:10] */ +#define WM8958_AIF3DAC_COMP 0x0010 /* AIF3DAC_COMP */ +#define WM8958_AIF3DAC_COMP_MASK 0x0010 /* AIF3DAC_COMP */ +#define WM8958_AIF3DAC_COMP_SHIFT 4 /* AIF3DAC_COMP */ +#define WM8958_AIF3DAC_COMP_WIDTH 1 /* AIF3DAC_COMP */ +#define WM8958_AIF3DAC_COMPMODE 0x0008 /* AIF3DAC_COMPMODE */ +#define WM8958_AIF3DAC_COMPMODE_MASK 0x0008 /* AIF3DAC_COMPMODE */ +#define WM8958_AIF3DAC_COMPMODE_SHIFT 3 /* AIF3DAC_COMPMODE */ +#define WM8958_AIF3DAC_COMPMODE_WIDTH 1 /* AIF3DAC_COMPMODE */ +#define WM8958_AIF3ADC_COMP 0x0004 /* AIF3ADC_COMP */ +#define WM8958_AIF3ADC_COMP_MASK 0x0004 /* AIF3ADC_COMP */ +#define WM8958_AIF3ADC_COMP_SHIFT 2 /* AIF3ADC_COMP */ +#define WM8958_AIF3ADC_COMP_WIDTH 1 /* AIF3ADC_COMP */ +#define WM8958_AIF3ADC_COMPMODE 0x0002 /* AIF3ADC_COMPMODE */ +#define WM8958_AIF3ADC_COMPMODE_MASK 0x0002 /* AIF3ADC_COMPMODE */ +#define WM8958_AIF3ADC_COMPMODE_SHIFT 1 /* AIF3ADC_COMPMODE */ +#define WM8958_AIF3ADC_COMPMODE_WIDTH 1 /* AIF3ADC_COMPMODE */ +#define WM8958_AIF3_LOOPBACK 0x0001 /* AIF3_LOOPBACK */ +#define WM8958_AIF3_LOOPBACK_MASK 0x0001 /* AIF3_LOOPBACK */ +#define WM8958_AIF3_LOOPBACK_SHIFT 0 /* AIF3_LOOPBACK */ +#define WM8958_AIF3_LOOPBACK_WIDTH 1 /* AIF3_LOOPBACK */ + +/* + * R802 (0x322) - AIF3DAC Data + */ +#define WM8958_AIF3DAC_DAT_INV 0x0001 /* AIF3DAC_DAT_INV */ +#define WM8958_AIF3DAC_DAT_INV_MASK 0x0001 /* AIF3DAC_DAT_INV */ +#define WM8958_AIF3DAC_DAT_INV_SHIFT 0 /* AIF3DAC_DAT_INV */ +#define WM8958_AIF3DAC_DAT_INV_WIDTH 1 /* AIF3DAC_DAT_INV */ + +/* + * R803 (0x323) - AIF3ADC Data + */ +#define WM8958_AIF3ADC_DAT_INV 0x0001 /* AIF3ADC_DAT_INV */ +#define WM8958_AIF3ADC_DAT_INV_MASK 0x0001 /* AIF3ADC_DAT_INV */ +#define WM8958_AIF3ADC_DAT_INV_SHIFT 0 /* AIF3ADC_DAT_INV */ +#define WM8958_AIF3ADC_DAT_INV_WIDTH 1 /* AIF3ADC_DAT_INV */ + +/* + * R1024 (0x400) - AIF1 ADC1 Left Volume + */ +#define WM8994_AIF1ADC1_VU 0x0100 /* AIF1ADC1_VU */ +#define WM8994_AIF1ADC1_VU_MASK 0x0100 /* AIF1ADC1_VU */ +#define WM8994_AIF1ADC1_VU_SHIFT 8 /* AIF1ADC1_VU */ +#define WM8994_AIF1ADC1_VU_WIDTH 1 /* AIF1ADC1_VU */ +#define WM8994_AIF1ADC1L_VOL_MASK 0x00FF /* AIF1ADC1L_VOL - [7:0] */ +#define WM8994_AIF1ADC1L_VOL_SHIFT 0 /* AIF1ADC1L_VOL - [7:0] */ +#define WM8994_AIF1ADC1L_VOL_WIDTH 8 /* AIF1ADC1L_VOL - [7:0] */ + +/* + * R1025 (0x401) - AIF1 ADC1 Right Volume + */ +#define WM8994_AIF1ADC1_VU 0x0100 /* AIF1ADC1_VU */ +#define WM8994_AIF1ADC1_VU_MASK 0x0100 /* AIF1ADC1_VU */ +#define WM8994_AIF1ADC1_VU_SHIFT 8 /* AIF1ADC1_VU */ +#define WM8994_AIF1ADC1_VU_WIDTH 1 /* AIF1ADC1_VU */ +#define WM8994_AIF1ADC1R_VOL_MASK 0x00FF /* AIF1ADC1R_VOL - [7:0] */ +#define WM8994_AIF1ADC1R_VOL_SHIFT 0 /* AIF1ADC1R_VOL - [7:0] */ +#define WM8994_AIF1ADC1R_VOL_WIDTH 8 /* AIF1ADC1R_VOL - [7:0] */ + +/* + * R1026 (0x402) - AIF1 DAC1 Left Volume + */ +#define WM8994_AIF1DAC1_VU 0x0100 /* AIF1DAC1_VU */ +#define WM8994_AIF1DAC1_VU_MASK 0x0100 /* AIF1DAC1_VU */ +#define WM8994_AIF1DAC1_VU_SHIFT 8 /* AIF1DAC1_VU */ +#define WM8994_AIF1DAC1_VU_WIDTH 1 /* AIF1DAC1_VU */ +#define WM8994_AIF1DAC1L_VOL_MASK 0x00FF /* AIF1DAC1L_VOL - [7:0] */ +#define WM8994_AIF1DAC1L_VOL_SHIFT 0 /* AIF1DAC1L_VOL - [7:0] */ +#define WM8994_AIF1DAC1L_VOL_WIDTH 8 /* AIF1DAC1L_VOL - [7:0] */ + +/* + * R1027 (0x403) - AIF1 DAC1 Right Volume + */ +#define WM8994_AIF1DAC1_VU 0x0100 /* AIF1DAC1_VU */ +#define WM8994_AIF1DAC1_VU_MASK 0x0100 /* AIF1DAC1_VU */ +#define WM8994_AIF1DAC1_VU_SHIFT 8 /* AIF1DAC1_VU */ +#define WM8994_AIF1DAC1_VU_WIDTH 1 /* AIF1DAC1_VU */ +#define WM8994_AIF1DAC1R_VOL_MASK 0x00FF /* AIF1DAC1R_VOL - [7:0] */ +#define WM8994_AIF1DAC1R_VOL_SHIFT 0 /* AIF1DAC1R_VOL - [7:0] */ +#define WM8994_AIF1DAC1R_VOL_WIDTH 8 /* AIF1DAC1R_VOL - [7:0] */ + +/* + * R1028 (0x404) - AIF1 ADC2 Left Volume + */ +#define WM8994_AIF1ADC2_VU 0x0100 /* AIF1ADC2_VU */ +#define WM8994_AIF1ADC2_VU_MASK 0x0100 /* AIF1ADC2_VU */ +#define WM8994_AIF1ADC2_VU_SHIFT 8 /* AIF1ADC2_VU */ +#define WM8994_AIF1ADC2_VU_WIDTH 1 /* AIF1ADC2_VU */ +#define WM8994_AIF1ADC2L_VOL_MASK 0x00FF /* AIF1ADC2L_VOL - [7:0] */ +#define WM8994_AIF1ADC2L_VOL_SHIFT 0 /* AIF1ADC2L_VOL - [7:0] */ +#define WM8994_AIF1ADC2L_VOL_WIDTH 8 /* AIF1ADC2L_VOL - [7:0] */ + +/* + * R1029 (0x405) - AIF1 ADC2 Right Volume + */ +#define WM8994_AIF1ADC2_VU 0x0100 /* AIF1ADC2_VU */ +#define WM8994_AIF1ADC2_VU_MASK 0x0100 /* AIF1ADC2_VU */ +#define WM8994_AIF1ADC2_VU_SHIFT 8 /* AIF1ADC2_VU */ +#define WM8994_AIF1ADC2_VU_WIDTH 1 /* AIF1ADC2_VU */ +#define WM8994_AIF1ADC2R_VOL_MASK 0x00FF /* AIF1ADC2R_VOL - [7:0] */ +#define WM8994_AIF1ADC2R_VOL_SHIFT 0 /* AIF1ADC2R_VOL - [7:0] */ +#define WM8994_AIF1ADC2R_VOL_WIDTH 8 /* AIF1ADC2R_VOL - [7:0] */ + +/* + * R1030 (0x406) - AIF1 DAC2 Left Volume + */ +#define WM8994_AIF1DAC2_VU 0x0100 /* AIF1DAC2_VU */ +#define WM8994_AIF1DAC2_VU_MASK 0x0100 /* AIF1DAC2_VU */ +#define WM8994_AIF1DAC2_VU_SHIFT 8 /* AIF1DAC2_VU */ +#define WM8994_AIF1DAC2_VU_WIDTH 1 /* AIF1DAC2_VU */ +#define WM8994_AIF1DAC2L_VOL_MASK 0x00FF /* AIF1DAC2L_VOL - [7:0] */ +#define WM8994_AIF1DAC2L_VOL_SHIFT 0 /* AIF1DAC2L_VOL - [7:0] */ +#define WM8994_AIF1DAC2L_VOL_WIDTH 8 /* AIF1DAC2L_VOL - [7:0] */ + +/* + * R1031 (0x407) - AIF1 DAC2 Right Volume + */ +#define WM8994_AIF1DAC2_VU 0x0100 /* AIF1DAC2_VU */ +#define WM8994_AIF1DAC2_VU_MASK 0x0100 /* AIF1DAC2_VU */ +#define WM8994_AIF1DAC2_VU_SHIFT 8 /* AIF1DAC2_VU */ +#define WM8994_AIF1DAC2_VU_WIDTH 1 /* AIF1DAC2_VU */ +#define WM8994_AIF1DAC2R_VOL_MASK 0x00FF /* AIF1DAC2R_VOL - [7:0] */ +#define WM8994_AIF1DAC2R_VOL_SHIFT 0 /* AIF1DAC2R_VOL - [7:0] */ +#define WM8994_AIF1DAC2R_VOL_WIDTH 8 /* AIF1DAC2R_VOL - [7:0] */ + +/* + * R1040 (0x410) - AIF1 ADC1 Filters + */ +#define WM8994_AIF1ADC_4FS 0x8000 /* AIF1ADC_4FS */ +#define WM8994_AIF1ADC_4FS_MASK 0x8000 /* AIF1ADC_4FS */ +#define WM8994_AIF1ADC_4FS_SHIFT 15 /* AIF1ADC_4FS */ +#define WM8994_AIF1ADC_4FS_WIDTH 1 /* AIF1ADC_4FS */ +#define WM8994_AIF1ADC1_HPF_CUT_MASK 0x6000 /* AIF1ADC1_HPF_CUT - [14:13] */ +#define WM8994_AIF1ADC1_HPF_CUT_SHIFT 13 /* AIF1ADC1_HPF_CUT - [14:13] */ +#define WM8994_AIF1ADC1_HPF_CUT_WIDTH 2 /* AIF1ADC1_HPF_CUT - [14:13] */ +#define WM8994_AIF1ADC1L_HPF 0x1000 /* AIF1ADC1L_HPF */ +#define WM8994_AIF1ADC1L_HPF_MASK 0x1000 /* AIF1ADC1L_HPF */ +#define WM8994_AIF1ADC1L_HPF_SHIFT 12 /* AIF1ADC1L_HPF */ +#define WM8994_AIF1ADC1L_HPF_WIDTH 1 /* AIF1ADC1L_HPF */ +#define WM8994_AIF1ADC1R_HPF 0x0800 /* AIF1ADC1R_HPF */ +#define WM8994_AIF1ADC1R_HPF_MASK 0x0800 /* AIF1ADC1R_HPF */ +#define WM8994_AIF1ADC1R_HPF_SHIFT 11 /* AIF1ADC1R_HPF */ +#define WM8994_AIF1ADC1R_HPF_WIDTH 1 /* AIF1ADC1R_HPF */ + +/* + * R1041 (0x411) - AIF1 ADC2 Filters + */ +#define WM8994_AIF1ADC2_HPF_CUT_MASK 0x6000 /* AIF1ADC2_HPF_CUT - [14:13] */ +#define WM8994_AIF1ADC2_HPF_CUT_SHIFT 13 /* AIF1ADC2_HPF_CUT - [14:13] */ +#define WM8994_AIF1ADC2_HPF_CUT_WIDTH 2 /* AIF1ADC2_HPF_CUT - [14:13] */ +#define WM8994_AIF1ADC2L_HPF 0x1000 /* AIF1ADC2L_HPF */ +#define WM8994_AIF1ADC2L_HPF_MASK 0x1000 /* AIF1ADC2L_HPF */ +#define WM8994_AIF1ADC2L_HPF_SHIFT 12 /* AIF1ADC2L_HPF */ +#define WM8994_AIF1ADC2L_HPF_WIDTH 1 /* AIF1ADC2L_HPF */ +#define WM8994_AIF1ADC2R_HPF 0x0800 /* AIF1ADC2R_HPF */ +#define WM8994_AIF1ADC2R_HPF_MASK 0x0800 /* AIF1ADC2R_HPF */ +#define WM8994_AIF1ADC2R_HPF_SHIFT 11 /* AIF1ADC2R_HPF */ +#define WM8994_AIF1ADC2R_HPF_WIDTH 1 /* AIF1ADC2R_HPF */ + +/* + * R1056 (0x420) - AIF1 DAC1 Filters (1) + */ +#define WM8994_AIF1DAC1_MUTE 0x0200 /* AIF1DAC1_MUTE */ +#define WM8994_AIF1DAC1_MUTE_MASK 0x0200 /* AIF1DAC1_MUTE */ +#define WM8994_AIF1DAC1_MUTE_SHIFT 9 /* AIF1DAC1_MUTE */ +#define WM8994_AIF1DAC1_MUTE_WIDTH 1 /* AIF1DAC1_MUTE */ +#define WM8994_AIF1DAC1_MONO 0x0080 /* AIF1DAC1_MONO */ +#define WM8994_AIF1DAC1_MONO_MASK 0x0080 /* AIF1DAC1_MONO */ +#define WM8994_AIF1DAC1_MONO_SHIFT 7 /* AIF1DAC1_MONO */ +#define WM8994_AIF1DAC1_MONO_WIDTH 1 /* AIF1DAC1_MONO */ +#define WM8994_AIF1DAC1_MUTERATE 0x0020 /* AIF1DAC1_MUTERATE */ +#define WM8994_AIF1DAC1_MUTERATE_MASK 0x0020 /* AIF1DAC1_MUTERATE */ +#define WM8994_AIF1DAC1_MUTERATE_SHIFT 5 /* AIF1DAC1_MUTERATE */ +#define WM8994_AIF1DAC1_MUTERATE_WIDTH 1 /* AIF1DAC1_MUTERATE */ +#define WM8994_AIF1DAC1_UNMUTE_RAMP 0x0010 /* AIF1DAC1_UNMUTE_RAMP */ +#define WM8994_AIF1DAC1_UNMUTE_RAMP_MASK 0x0010 /* AIF1DAC1_UNMUTE_RAMP */ +#define WM8994_AIF1DAC1_UNMUTE_RAMP_SHIFT 4 /* AIF1DAC1_UNMUTE_RAMP */ +#define WM8994_AIF1DAC1_UNMUTE_RAMP_WIDTH 1 /* AIF1DAC1_UNMUTE_RAMP */ +#define WM8994_AIF1DAC1_DEEMP_MASK 0x0006 /* AIF1DAC1_DEEMP - [2:1] */ +#define WM8994_AIF1DAC1_DEEMP_SHIFT 1 /* AIF1DAC1_DEEMP - [2:1] */ +#define WM8994_AIF1DAC1_DEEMP_WIDTH 2 /* AIF1DAC1_DEEMP - [2:1] */ + +/* + * R1057 (0x421) - AIF1 DAC1 Filters (2) + */ +#define WM8994_AIF1DAC1_3D_GAIN_MASK 0x3E00 /* AIF1DAC1_3D_GAIN - [13:9] */ +#define WM8994_AIF1DAC1_3D_GAIN_SHIFT 9 /* AIF1DAC1_3D_GAIN - [13:9] */ +#define WM8994_AIF1DAC1_3D_GAIN_WIDTH 5 /* AIF1DAC1_3D_GAIN - [13:9] */ +#define WM8994_AIF1DAC1_3D_ENA 0x0100 /* AIF1DAC1_3D_ENA */ +#define WM8994_AIF1DAC1_3D_ENA_MASK 0x0100 /* AIF1DAC1_3D_ENA */ +#define WM8994_AIF1DAC1_3D_ENA_SHIFT 8 /* AIF1DAC1_3D_ENA */ +#define WM8994_AIF1DAC1_3D_ENA_WIDTH 1 /* AIF1DAC1_3D_ENA */ + +/* + * R1058 (0x422) - AIF1 DAC2 Filters (1) + */ +#define WM8994_AIF1DAC2_MUTE 0x0200 /* AIF1DAC2_MUTE */ +#define WM8994_AIF1DAC2_MUTE_MASK 0x0200 /* AIF1DAC2_MUTE */ +#define WM8994_AIF1DAC2_MUTE_SHIFT 9 /* AIF1DAC2_MUTE */ +#define WM8994_AIF1DAC2_MUTE_WIDTH 1 /* AIF1DAC2_MUTE */ +#define WM8994_AIF1DAC2_MONO 0x0080 /* AIF1DAC2_MONO */ +#define WM8994_AIF1DAC2_MONO_MASK 0x0080 /* AIF1DAC2_MONO */ +#define WM8994_AIF1DAC2_MONO_SHIFT 7 /* AIF1DAC2_MONO */ +#define WM8994_AIF1DAC2_MONO_WIDTH 1 /* AIF1DAC2_MONO */ +#define WM8994_AIF1DAC2_MUTERATE 0x0020 /* AIF1DAC2_MUTERATE */ +#define WM8994_AIF1DAC2_MUTERATE_MASK 0x0020 /* AIF1DAC2_MUTERATE */ +#define WM8994_AIF1DAC2_MUTERATE_SHIFT 5 /* AIF1DAC2_MUTERATE */ +#define WM8994_AIF1DAC2_MUTERATE_WIDTH 1 /* AIF1DAC2_MUTERATE */ +#define WM8994_AIF1DAC2_UNMUTE_RAMP 0x0010 /* AIF1DAC2_UNMUTE_RAMP */ +#define WM8994_AIF1DAC2_UNMUTE_RAMP_MASK 0x0010 /* AIF1DAC2_UNMUTE_RAMP */ +#define WM8994_AIF1DAC2_UNMUTE_RAMP_SHIFT 4 /* AIF1DAC2_UNMUTE_RAMP */ +#define WM8994_AIF1DAC2_UNMUTE_RAMP_WIDTH 1 /* AIF1DAC2_UNMUTE_RAMP */ +#define WM8994_AIF1DAC2_DEEMP_MASK 0x0006 /* AIF1DAC2_DEEMP - [2:1] */ +#define WM8994_AIF1DAC2_DEEMP_SHIFT 1 /* AIF1DAC2_DEEMP - [2:1] */ +#define WM8994_AIF1DAC2_DEEMP_WIDTH 2 /* AIF1DAC2_DEEMP - [2:1] */ + +/* + * R1059 (0x423) - AIF1 DAC2 Filters (2) + */ +#define WM8994_AIF1DAC2_3D_GAIN_MASK 0x3E00 /* AIF1DAC2_3D_GAIN - [13:9] */ +#define WM8994_AIF1DAC2_3D_GAIN_SHIFT 9 /* AIF1DAC2_3D_GAIN - [13:9] */ +#define WM8994_AIF1DAC2_3D_GAIN_WIDTH 5 /* AIF1DAC2_3D_GAIN - [13:9] */ +#define WM8994_AIF1DAC2_3D_ENA 0x0100 /* AIF1DAC2_3D_ENA */ +#define WM8994_AIF1DAC2_3D_ENA_MASK 0x0100 /* AIF1DAC2_3D_ENA */ +#define WM8994_AIF1DAC2_3D_ENA_SHIFT 8 /* AIF1DAC2_3D_ENA */ +#define WM8994_AIF1DAC2_3D_ENA_WIDTH 1 /* AIF1DAC2_3D_ENA */ + +/* + * R1072 (0x430) - AIF1 DAC1 Noise Gate + */ +#define WM8958_AIF1DAC1_NG_HLD_MASK 0x0060 /* AIF1DAC1_NG_HLD - [6:5] */ +#define WM8958_AIF1DAC1_NG_HLD_SHIFT 5 /* AIF1DAC1_NG_HLD - [6:5] */ +#define WM8958_AIF1DAC1_NG_HLD_WIDTH 2 /* AIF1DAC1_NG_HLD - [6:5] */ +#define WM8958_AIF1DAC1_NG_THR_MASK 0x000E /* AIF1DAC1_NG_THR - [3:1] */ +#define WM8958_AIF1DAC1_NG_THR_SHIFT 1 /* AIF1DAC1_NG_THR - [3:1] */ +#define WM8958_AIF1DAC1_NG_THR_WIDTH 3 /* AIF1DAC1_NG_THR - [3:1] */ +#define WM8958_AIF1DAC1_NG_ENA 0x0001 /* AIF1DAC1_NG_ENA */ +#define WM8958_AIF1DAC1_NG_ENA_MASK 0x0001 /* AIF1DAC1_NG_ENA */ +#define WM8958_AIF1DAC1_NG_ENA_SHIFT 0 /* AIF1DAC1_NG_ENA */ +#define WM8958_AIF1DAC1_NG_ENA_WIDTH 1 /* AIF1DAC1_NG_ENA */ + +/* + * R1073 (0x431) - AIF1 DAC2 Noise Gate + */ +#define WM8958_AIF1DAC2_NG_HLD_MASK 0x0060 /* AIF1DAC2_NG_HLD - [6:5] */ +#define WM8958_AIF1DAC2_NG_HLD_SHIFT 5 /* AIF1DAC2_NG_HLD - [6:5] */ +#define WM8958_AIF1DAC2_NG_HLD_WIDTH 2 /* AIF1DAC2_NG_HLD - [6:5] */ +#define WM8958_AIF1DAC2_NG_THR_MASK 0x000E /* AIF1DAC2_NG_THR - [3:1] */ +#define WM8958_AIF1DAC2_NG_THR_SHIFT 1 /* AIF1DAC2_NG_THR - [3:1] */ +#define WM8958_AIF1DAC2_NG_THR_WIDTH 3 /* AIF1DAC2_NG_THR - [3:1] */ +#define WM8958_AIF1DAC2_NG_ENA 0x0001 /* AIF1DAC2_NG_ENA */ +#define WM8958_AIF1DAC2_NG_ENA_MASK 0x0001 /* AIF1DAC2_NG_ENA */ +#define WM8958_AIF1DAC2_NG_ENA_SHIFT 0 /* AIF1DAC2_NG_ENA */ +#define WM8958_AIF1DAC2_NG_ENA_WIDTH 1 /* AIF1DAC2_NG_ENA */ + +/* + * R1088 (0x440) - AIF1 DRC1 (1) + */ +#define WM8994_AIF1DRC1_SIG_DET_RMS_MASK 0xF800 /* AIF1DRC1_SIG_DET_RMS - [15:11] */ +#define WM8994_AIF1DRC1_SIG_DET_RMS_SHIFT 11 /* AIF1DRC1_SIG_DET_RMS - [15:11] */ +#define WM8994_AIF1DRC1_SIG_DET_RMS_WIDTH 5 /* AIF1DRC1_SIG_DET_RMS - [15:11] */ +#define WM8994_AIF1DRC1_SIG_DET_PK_MASK 0x0600 /* AIF1DRC1_SIG_DET_PK - [10:9] */ +#define WM8994_AIF1DRC1_SIG_DET_PK_SHIFT 9 /* AIF1DRC1_SIG_DET_PK - [10:9] */ +#define WM8994_AIF1DRC1_SIG_DET_PK_WIDTH 2 /* AIF1DRC1_SIG_DET_PK - [10:9] */ +#define WM8994_AIF1DRC1_NG_ENA 0x0100 /* AIF1DRC1_NG_ENA */ +#define WM8994_AIF1DRC1_NG_ENA_MASK 0x0100 /* AIF1DRC1_NG_ENA */ +#define WM8994_AIF1DRC1_NG_ENA_SHIFT 8 /* AIF1DRC1_NG_ENA */ +#define WM8994_AIF1DRC1_NG_ENA_WIDTH 1 /* AIF1DRC1_NG_ENA */ +#define WM8994_AIF1DRC1_SIG_DET_MODE 0x0080 /* AIF1DRC1_SIG_DET_MODE */ +#define WM8994_AIF1DRC1_SIG_DET_MODE_MASK 0x0080 /* AIF1DRC1_SIG_DET_MODE */ +#define WM8994_AIF1DRC1_SIG_DET_MODE_SHIFT 7 /* AIF1DRC1_SIG_DET_MODE */ +#define WM8994_AIF1DRC1_SIG_DET_MODE_WIDTH 1 /* AIF1DRC1_SIG_DET_MODE */ +#define WM8994_AIF1DRC1_SIG_DET 0x0040 /* AIF1DRC1_SIG_DET */ +#define WM8994_AIF1DRC1_SIG_DET_MASK 0x0040 /* AIF1DRC1_SIG_DET */ +#define WM8994_AIF1DRC1_SIG_DET_SHIFT 6 /* AIF1DRC1_SIG_DET */ +#define WM8994_AIF1DRC1_SIG_DET_WIDTH 1 /* AIF1DRC1_SIG_DET */ +#define WM8994_AIF1DRC1_KNEE2_OP_ENA 0x0020 /* AIF1DRC1_KNEE2_OP_ENA */ +#define WM8994_AIF1DRC1_KNEE2_OP_ENA_MASK 0x0020 /* AIF1DRC1_KNEE2_OP_ENA */ +#define WM8994_AIF1DRC1_KNEE2_OP_ENA_SHIFT 5 /* AIF1DRC1_KNEE2_OP_ENA */ +#define WM8994_AIF1DRC1_KNEE2_OP_ENA_WIDTH 1 /* AIF1DRC1_KNEE2_OP_ENA */ +#define WM8994_AIF1DRC1_QR 0x0010 /* AIF1DRC1_QR */ +#define WM8994_AIF1DRC1_QR_MASK 0x0010 /* AIF1DRC1_QR */ +#define WM8994_AIF1DRC1_QR_SHIFT 4 /* AIF1DRC1_QR */ +#define WM8994_AIF1DRC1_QR_WIDTH 1 /* AIF1DRC1_QR */ +#define WM8994_AIF1DRC1_ANTICLIP 0x0008 /* AIF1DRC1_ANTICLIP */ +#define WM8994_AIF1DRC1_ANTICLIP_MASK 0x0008 /* AIF1DRC1_ANTICLIP */ +#define WM8994_AIF1DRC1_ANTICLIP_SHIFT 3 /* AIF1DRC1_ANTICLIP */ +#define WM8994_AIF1DRC1_ANTICLIP_WIDTH 1 /* AIF1DRC1_ANTICLIP */ +#define WM8994_AIF1DAC1_DRC_ENA 0x0004 /* AIF1DAC1_DRC_ENA */ +#define WM8994_AIF1DAC1_DRC_ENA_MASK 0x0004 /* AIF1DAC1_DRC_ENA */ +#define WM8994_AIF1DAC1_DRC_ENA_SHIFT 2 /* AIF1DAC1_DRC_ENA */ +#define WM8994_AIF1DAC1_DRC_ENA_WIDTH 1 /* AIF1DAC1_DRC_ENA */ +#define WM8994_AIF1ADC1L_DRC_ENA 0x0002 /* AIF1ADC1L_DRC_ENA */ +#define WM8994_AIF1ADC1L_DRC_ENA_MASK 0x0002 /* AIF1ADC1L_DRC_ENA */ +#define WM8994_AIF1ADC1L_DRC_ENA_SHIFT 1 /* AIF1ADC1L_DRC_ENA */ +#define WM8994_AIF1ADC1L_DRC_ENA_WIDTH 1 /* AIF1ADC1L_DRC_ENA */ +#define WM8994_AIF1ADC1R_DRC_ENA 0x0001 /* AIF1ADC1R_DRC_ENA */ +#define WM8994_AIF1ADC1R_DRC_ENA_MASK 0x0001 /* AIF1ADC1R_DRC_ENA */ +#define WM8994_AIF1ADC1R_DRC_ENA_SHIFT 0 /* AIF1ADC1R_DRC_ENA */ +#define WM8994_AIF1ADC1R_DRC_ENA_WIDTH 1 /* AIF1ADC1R_DRC_ENA */ + +/* + * R1089 (0x441) - AIF1 DRC1 (2) + */ +#define WM8994_AIF1DRC1_ATK_MASK 0x1E00 /* AIF1DRC1_ATK - [12:9] */ +#define WM8994_AIF1DRC1_ATK_SHIFT 9 /* AIF1DRC1_ATK - [12:9] */ +#define WM8994_AIF1DRC1_ATK_WIDTH 4 /* AIF1DRC1_ATK - [12:9] */ +#define WM8994_AIF1DRC1_DCY_MASK 0x01E0 /* AIF1DRC1_DCY - [8:5] */ +#define WM8994_AIF1DRC1_DCY_SHIFT 5 /* AIF1DRC1_DCY - [8:5] */ +#define WM8994_AIF1DRC1_DCY_WIDTH 4 /* AIF1DRC1_DCY - [8:5] */ +#define WM8994_AIF1DRC1_MINGAIN_MASK 0x001C /* AIF1DRC1_MINGAIN - [4:2] */ +#define WM8994_AIF1DRC1_MINGAIN_SHIFT 2 /* AIF1DRC1_MINGAIN - [4:2] */ +#define WM8994_AIF1DRC1_MINGAIN_WIDTH 3 /* AIF1DRC1_MINGAIN - [4:2] */ +#define WM8994_AIF1DRC1_MAXGAIN_MASK 0x0003 /* AIF1DRC1_MAXGAIN - [1:0] */ +#define WM8994_AIF1DRC1_MAXGAIN_SHIFT 0 /* AIF1DRC1_MAXGAIN - [1:0] */ +#define WM8994_AIF1DRC1_MAXGAIN_WIDTH 2 /* AIF1DRC1_MAXGAIN - [1:0] */ + +/* + * R1090 (0x442) - AIF1 DRC1 (3) + */ +#define WM8994_AIF1DRC1_NG_MINGAIN_MASK 0xF000 /* AIF1DRC1_NG_MINGAIN - [15:12] */ +#define WM8994_AIF1DRC1_NG_MINGAIN_SHIFT 12 /* AIF1DRC1_NG_MINGAIN - [15:12] */ +#define WM8994_AIF1DRC1_NG_MINGAIN_WIDTH 4 /* AIF1DRC1_NG_MINGAIN - [15:12] */ +#define WM8994_AIF1DRC1_NG_EXP_MASK 0x0C00 /* AIF1DRC1_NG_EXP - [11:10] */ +#define WM8994_AIF1DRC1_NG_EXP_SHIFT 10 /* AIF1DRC1_NG_EXP - [11:10] */ +#define WM8994_AIF1DRC1_NG_EXP_WIDTH 2 /* AIF1DRC1_NG_EXP - [11:10] */ +#define WM8994_AIF1DRC1_QR_THR_MASK 0x0300 /* AIF1DRC1_QR_THR - [9:8] */ +#define WM8994_AIF1DRC1_QR_THR_SHIFT 8 /* AIF1DRC1_QR_THR - [9:8] */ +#define WM8994_AIF1DRC1_QR_THR_WIDTH 2 /* AIF1DRC1_QR_THR - [9:8] */ +#define WM8994_AIF1DRC1_QR_DCY_MASK 0x00C0 /* AIF1DRC1_QR_DCY - [7:6] */ +#define WM8994_AIF1DRC1_QR_DCY_SHIFT 6 /* AIF1DRC1_QR_DCY - [7:6] */ +#define WM8994_AIF1DRC1_QR_DCY_WIDTH 2 /* AIF1DRC1_QR_DCY - [7:6] */ +#define WM8994_AIF1DRC1_HI_COMP_MASK 0x0038 /* AIF1DRC1_HI_COMP - [5:3] */ +#define WM8994_AIF1DRC1_HI_COMP_SHIFT 3 /* AIF1DRC1_HI_COMP - [5:3] */ +#define WM8994_AIF1DRC1_HI_COMP_WIDTH 3 /* AIF1DRC1_HI_COMP - [5:3] */ +#define WM8994_AIF1DRC1_LO_COMP_MASK 0x0007 /* AIF1DRC1_LO_COMP - [2:0] */ +#define WM8994_AIF1DRC1_LO_COMP_SHIFT 0 /* AIF1DRC1_LO_COMP - [2:0] */ +#define WM8994_AIF1DRC1_LO_COMP_WIDTH 3 /* AIF1DRC1_LO_COMP - [2:0] */ + +/* + * R1091 (0x443) - AIF1 DRC1 (4) + */ +#define WM8994_AIF1DRC1_KNEE_IP_MASK 0x07E0 /* AIF1DRC1_KNEE_IP - [10:5] */ +#define WM8994_AIF1DRC1_KNEE_IP_SHIFT 5 /* AIF1DRC1_KNEE_IP - [10:5] */ +#define WM8994_AIF1DRC1_KNEE_IP_WIDTH 6 /* AIF1DRC1_KNEE_IP - [10:5] */ +#define WM8994_AIF1DRC1_KNEE_OP_MASK 0x001F /* AIF1DRC1_KNEE_OP - [4:0] */ +#define WM8994_AIF1DRC1_KNEE_OP_SHIFT 0 /* AIF1DRC1_KNEE_OP - [4:0] */ +#define WM8994_AIF1DRC1_KNEE_OP_WIDTH 5 /* AIF1DRC1_KNEE_OP - [4:0] */ + +/* + * R1092 (0x444) - AIF1 DRC1 (5) + */ +#define WM8994_AIF1DRC1_KNEE2_IP_MASK 0x03E0 /* AIF1DRC1_KNEE2_IP - [9:5] */ +#define WM8994_AIF1DRC1_KNEE2_IP_SHIFT 5 /* AIF1DRC1_KNEE2_IP - [9:5] */ +#define WM8994_AIF1DRC1_KNEE2_IP_WIDTH 5 /* AIF1DRC1_KNEE2_IP - [9:5] */ +#define WM8994_AIF1DRC1_KNEE2_OP_MASK 0x001F /* AIF1DRC1_KNEE2_OP - [4:0] */ +#define WM8994_AIF1DRC1_KNEE2_OP_SHIFT 0 /* AIF1DRC1_KNEE2_OP - [4:0] */ +#define WM8994_AIF1DRC1_KNEE2_OP_WIDTH 5 /* AIF1DRC1_KNEE2_OP - [4:0] */ + +/* + * R1104 (0x450) - AIF1 DRC2 (1) + */ +#define WM8994_AIF1DRC2_SIG_DET_RMS_MASK 0xF800 /* AIF1DRC2_SIG_DET_RMS - [15:11] */ +#define WM8994_AIF1DRC2_SIG_DET_RMS_SHIFT 11 /* AIF1DRC2_SIG_DET_RMS - [15:11] */ +#define WM8994_AIF1DRC2_SIG_DET_RMS_WIDTH 5 /* AIF1DRC2_SIG_DET_RMS - [15:11] */ +#define WM8994_AIF1DRC2_SIG_DET_PK_MASK 0x0600 /* AIF1DRC2_SIG_DET_PK - [10:9] */ +#define WM8994_AIF1DRC2_SIG_DET_PK_SHIFT 9 /* AIF1DRC2_SIG_DET_PK - [10:9] */ +#define WM8994_AIF1DRC2_SIG_DET_PK_WIDTH 2 /* AIF1DRC2_SIG_DET_PK - [10:9] */ +#define WM8994_AIF1DRC2_NG_ENA 0x0100 /* AIF1DRC2_NG_ENA */ +#define WM8994_AIF1DRC2_NG_ENA_MASK 0x0100 /* AIF1DRC2_NG_ENA */ +#define WM8994_AIF1DRC2_NG_ENA_SHIFT 8 /* AIF1DRC2_NG_ENA */ +#define WM8994_AIF1DRC2_NG_ENA_WIDTH 1 /* AIF1DRC2_NG_ENA */ +#define WM8994_AIF1DRC2_SIG_DET_MODE 0x0080 /* AIF1DRC2_SIG_DET_MODE */ +#define WM8994_AIF1DRC2_SIG_DET_MODE_MASK 0x0080 /* AIF1DRC2_SIG_DET_MODE */ +#define WM8994_AIF1DRC2_SIG_DET_MODE_SHIFT 7 /* AIF1DRC2_SIG_DET_MODE */ +#define WM8994_AIF1DRC2_SIG_DET_MODE_WIDTH 1 /* AIF1DRC2_SIG_DET_MODE */ +#define WM8994_AIF1DRC2_SIG_DET 0x0040 /* AIF1DRC2_SIG_DET */ +#define WM8994_AIF1DRC2_SIG_DET_MASK 0x0040 /* AIF1DRC2_SIG_DET */ +#define WM8994_AIF1DRC2_SIG_DET_SHIFT 6 /* AIF1DRC2_SIG_DET */ +#define WM8994_AIF1DRC2_SIG_DET_WIDTH 1 /* AIF1DRC2_SIG_DET */ +#define WM8994_AIF1DRC2_KNEE2_OP_ENA 0x0020 /* AIF1DRC2_KNEE2_OP_ENA */ +#define WM8994_AIF1DRC2_KNEE2_OP_ENA_MASK 0x0020 /* AIF1DRC2_KNEE2_OP_ENA */ +#define WM8994_AIF1DRC2_KNEE2_OP_ENA_SHIFT 5 /* AIF1DRC2_KNEE2_OP_ENA */ +#define WM8994_AIF1DRC2_KNEE2_OP_ENA_WIDTH 1 /* AIF1DRC2_KNEE2_OP_ENA */ +#define WM8994_AIF1DRC2_QR 0x0010 /* AIF1DRC2_QR */ +#define WM8994_AIF1DRC2_QR_MASK 0x0010 /* AIF1DRC2_QR */ +#define WM8994_AIF1DRC2_QR_SHIFT 4 /* AIF1DRC2_QR */ +#define WM8994_AIF1DRC2_QR_WIDTH 1 /* AIF1DRC2_QR */ +#define WM8994_AIF1DRC2_ANTICLIP 0x0008 /* AIF1DRC2_ANTICLIP */ +#define WM8994_AIF1DRC2_ANTICLIP_MASK 0x0008 /* AIF1DRC2_ANTICLIP */ +#define WM8994_AIF1DRC2_ANTICLIP_SHIFT 3 /* AIF1DRC2_ANTICLIP */ +#define WM8994_AIF1DRC2_ANTICLIP_WIDTH 1 /* AIF1DRC2_ANTICLIP */ +#define WM8994_AIF1DAC2_DRC_ENA 0x0004 /* AIF1DAC2_DRC_ENA */ +#define WM8994_AIF1DAC2_DRC_ENA_MASK 0x0004 /* AIF1DAC2_DRC_ENA */ +#define WM8994_AIF1DAC2_DRC_ENA_SHIFT 2 /* AIF1DAC2_DRC_ENA */ +#define WM8994_AIF1DAC2_DRC_ENA_WIDTH 1 /* AIF1DAC2_DRC_ENA */ +#define WM8994_AIF1ADC2L_DRC_ENA 0x0002 /* AIF1ADC2L_DRC_ENA */ +#define WM8994_AIF1ADC2L_DRC_ENA_MASK 0x0002 /* AIF1ADC2L_DRC_ENA */ +#define WM8994_AIF1ADC2L_DRC_ENA_SHIFT 1 /* AIF1ADC2L_DRC_ENA */ +#define WM8994_AIF1ADC2L_DRC_ENA_WIDTH 1 /* AIF1ADC2L_DRC_ENA */ +#define WM8994_AIF1ADC2R_DRC_ENA 0x0001 /* AIF1ADC2R_DRC_ENA */ +#define WM8994_AIF1ADC2R_DRC_ENA_MASK 0x0001 /* AIF1ADC2R_DRC_ENA */ +#define WM8994_AIF1ADC2R_DRC_ENA_SHIFT 0 /* AIF1ADC2R_DRC_ENA */ +#define WM8994_AIF1ADC2R_DRC_ENA_WIDTH 1 /* AIF1ADC2R_DRC_ENA */ + +/* + * R1105 (0x451) - AIF1 DRC2 (2) + */ +#define WM8994_AIF1DRC2_ATK_MASK 0x1E00 /* AIF1DRC2_ATK - [12:9] */ +#define WM8994_AIF1DRC2_ATK_SHIFT 9 /* AIF1DRC2_ATK - [12:9] */ +#define WM8994_AIF1DRC2_ATK_WIDTH 4 /* AIF1DRC2_ATK - [12:9] */ +#define WM8994_AIF1DRC2_DCY_MASK 0x01E0 /* AIF1DRC2_DCY - [8:5] */ +#define WM8994_AIF1DRC2_DCY_SHIFT 5 /* AIF1DRC2_DCY - [8:5] */ +#define WM8994_AIF1DRC2_DCY_WIDTH 4 /* AIF1DRC2_DCY - [8:5] */ +#define WM8994_AIF1DRC2_MINGAIN_MASK 0x001C /* AIF1DRC2_MINGAIN - [4:2] */ +#define WM8994_AIF1DRC2_MINGAIN_SHIFT 2 /* AIF1DRC2_MINGAIN - [4:2] */ +#define WM8994_AIF1DRC2_MINGAIN_WIDTH 3 /* AIF1DRC2_MINGAIN - [4:2] */ +#define WM8994_AIF1DRC2_MAXGAIN_MASK 0x0003 /* AIF1DRC2_MAXGAIN - [1:0] */ +#define WM8994_AIF1DRC2_MAXGAIN_SHIFT 0 /* AIF1DRC2_MAXGAIN - [1:0] */ +#define WM8994_AIF1DRC2_MAXGAIN_WIDTH 2 /* AIF1DRC2_MAXGAIN - [1:0] */ + +/* + * R1106 (0x452) - AIF1 DRC2 (3) + */ +#define WM8994_AIF1DRC2_NG_MINGAIN_MASK 0xF000 /* AIF1DRC2_NG_MINGAIN - [15:12] */ +#define WM8994_AIF1DRC2_NG_MINGAIN_SHIFT 12 /* AIF1DRC2_NG_MINGAIN - [15:12] */ +#define WM8994_AIF1DRC2_NG_MINGAIN_WIDTH 4 /* AIF1DRC2_NG_MINGAIN - [15:12] */ +#define WM8994_AIF1DRC2_NG_EXP_MASK 0x0C00 /* AIF1DRC2_NG_EXP - [11:10] */ +#define WM8994_AIF1DRC2_NG_EXP_SHIFT 10 /* AIF1DRC2_NG_EXP - [11:10] */ +#define WM8994_AIF1DRC2_NG_EXP_WIDTH 2 /* AIF1DRC2_NG_EXP - [11:10] */ +#define WM8994_AIF1DRC2_QR_THR_MASK 0x0300 /* AIF1DRC2_QR_THR - [9:8] */ +#define WM8994_AIF1DRC2_QR_THR_SHIFT 8 /* AIF1DRC2_QR_THR - [9:8] */ +#define WM8994_AIF1DRC2_QR_THR_WIDTH 2 /* AIF1DRC2_QR_THR - [9:8] */ +#define WM8994_AIF1DRC2_QR_DCY_MASK 0x00C0 /* AIF1DRC2_QR_DCY - [7:6] */ +#define WM8994_AIF1DRC2_QR_DCY_SHIFT 6 /* AIF1DRC2_QR_DCY - [7:6] */ +#define WM8994_AIF1DRC2_QR_DCY_WIDTH 2 /* AIF1DRC2_QR_DCY - [7:6] */ +#define WM8994_AIF1DRC2_HI_COMP_MASK 0x0038 /* AIF1DRC2_HI_COMP - [5:3] */ +#define WM8994_AIF1DRC2_HI_COMP_SHIFT 3 /* AIF1DRC2_HI_COMP - [5:3] */ +#define WM8994_AIF1DRC2_HI_COMP_WIDTH 3 /* AIF1DRC2_HI_COMP - [5:3] */ +#define WM8994_AIF1DRC2_LO_COMP_MASK 0x0007 /* AIF1DRC2_LO_COMP - [2:0] */ +#define WM8994_AIF1DRC2_LO_COMP_SHIFT 0 /* AIF1DRC2_LO_COMP - [2:0] */ +#define WM8994_AIF1DRC2_LO_COMP_WIDTH 3 /* AIF1DRC2_LO_COMP - [2:0] */ + +/* + * R1107 (0x453) - AIF1 DRC2 (4) + */ +#define WM8994_AIF1DRC2_KNEE_IP_MASK 0x07E0 /* AIF1DRC2_KNEE_IP - [10:5] */ +#define WM8994_AIF1DRC2_KNEE_IP_SHIFT 5 /* AIF1DRC2_KNEE_IP - [10:5] */ +#define WM8994_AIF1DRC2_KNEE_IP_WIDTH 6 /* AIF1DRC2_KNEE_IP - [10:5] */ +#define WM8994_AIF1DRC2_KNEE_OP_MASK 0x001F /* AIF1DRC2_KNEE_OP - [4:0] */ +#define WM8994_AIF1DRC2_KNEE_OP_SHIFT 0 /* AIF1DRC2_KNEE_OP - [4:0] */ +#define WM8994_AIF1DRC2_KNEE_OP_WIDTH 5 /* AIF1DRC2_KNEE_OP - [4:0] */ + +/* + * R1108 (0x454) - AIF1 DRC2 (5) + */ +#define WM8994_AIF1DRC2_KNEE2_IP_MASK 0x03E0 /* AIF1DRC2_KNEE2_IP - [9:5] */ +#define WM8994_AIF1DRC2_KNEE2_IP_SHIFT 5 /* AIF1DRC2_KNEE2_IP - [9:5] */ +#define WM8994_AIF1DRC2_KNEE2_IP_WIDTH 5 /* AIF1DRC2_KNEE2_IP - [9:5] */ +#define WM8994_AIF1DRC2_KNEE2_OP_MASK 0x001F /* AIF1DRC2_KNEE2_OP - [4:0] */ +#define WM8994_AIF1DRC2_KNEE2_OP_SHIFT 0 /* AIF1DRC2_KNEE2_OP - [4:0] */ +#define WM8994_AIF1DRC2_KNEE2_OP_WIDTH 5 /* AIF1DRC2_KNEE2_OP - [4:0] */ + +/* + * R1152 (0x480) - AIF1 DAC1 EQ Gains (1) + */ +#define WM8994_AIF1DAC1_EQ_B1_GAIN_MASK 0xF800 /* AIF1DAC1_EQ_B1_GAIN - [15:11] */ +#define WM8994_AIF1DAC1_EQ_B1_GAIN_SHIFT 11 /* AIF1DAC1_EQ_B1_GAIN - [15:11] */ +#define WM8994_AIF1DAC1_EQ_B1_GAIN_WIDTH 5 /* AIF1DAC1_EQ_B1_GAIN - [15:11] */ +#define WM8994_AIF1DAC1_EQ_B2_GAIN_MASK 0x07C0 /* AIF1DAC1_EQ_B2_GAIN - [10:6] */ +#define WM8994_AIF1DAC1_EQ_B2_GAIN_SHIFT 6 /* AIF1DAC1_EQ_B2_GAIN - [10:6] */ +#define WM8994_AIF1DAC1_EQ_B2_GAIN_WIDTH 5 /* AIF1DAC1_EQ_B2_GAIN - [10:6] */ +#define WM8994_AIF1DAC1_EQ_B3_GAIN_MASK 0x003E /* AIF1DAC1_EQ_B3_GAIN - [5:1] */ +#define WM8994_AIF1DAC1_EQ_B3_GAIN_SHIFT 1 /* AIF1DAC1_EQ_B3_GAIN - [5:1] */ +#define WM8994_AIF1DAC1_EQ_B3_GAIN_WIDTH 5 /* AIF1DAC1_EQ_B3_GAIN - [5:1] */ +#define WM8994_AIF1DAC1_EQ_ENA 0x0001 /* AIF1DAC1_EQ_ENA */ +#define WM8994_AIF1DAC1_EQ_ENA_MASK 0x0001 /* AIF1DAC1_EQ_ENA */ +#define WM8994_AIF1DAC1_EQ_ENA_SHIFT 0 /* AIF1DAC1_EQ_ENA */ +#define WM8994_AIF1DAC1_EQ_ENA_WIDTH 1 /* AIF1DAC1_EQ_ENA */ + +/* + * R1153 (0x481) - AIF1 DAC1 EQ Gains (2) + */ +#define WM8994_AIF1DAC1_EQ_B4_GAIN_MASK 0xF800 /* AIF1DAC1_EQ_B4_GAIN - [15:11] */ +#define WM8994_AIF1DAC1_EQ_B4_GAIN_SHIFT 11 /* AIF1DAC1_EQ_B4_GAIN - [15:11] */ +#define WM8994_AIF1DAC1_EQ_B4_GAIN_WIDTH 5 /* AIF1DAC1_EQ_B4_GAIN - [15:11] */ +#define WM8994_AIF1DAC1_EQ_B5_GAIN_MASK 0x07C0 /* AIF1DAC1_EQ_B5_GAIN - [10:6] */ +#define WM8994_AIF1DAC1_EQ_B5_GAIN_SHIFT 6 /* AIF1DAC1_EQ_B5_GAIN - [10:6] */ +#define WM8994_AIF1DAC1_EQ_B5_GAIN_WIDTH 5 /* AIF1DAC1_EQ_B5_GAIN - [10:6] */ + +/* + * R1154 (0x482) - AIF1 DAC1 EQ Band 1 A + */ +#define WM8994_AIF1DAC1_EQ_B1_A_MASK 0xFFFF /* AIF1DAC1_EQ_B1_A - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B1_A_SHIFT 0 /* AIF1DAC1_EQ_B1_A - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B1_A_WIDTH 16 /* AIF1DAC1_EQ_B1_A - [15:0] */ + +/* + * R1155 (0x483) - AIF1 DAC1 EQ Band 1 B + */ +#define WM8994_AIF1DAC1_EQ_B1_B_MASK 0xFFFF /* AIF1DAC1_EQ_B1_B - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B1_B_SHIFT 0 /* AIF1DAC1_EQ_B1_B - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B1_B_WIDTH 16 /* AIF1DAC1_EQ_B1_B - [15:0] */ + +/* + * R1156 (0x484) - AIF1 DAC1 EQ Band 1 PG + */ +#define WM8994_AIF1DAC1_EQ_B1_PG_MASK 0xFFFF /* AIF1DAC1_EQ_B1_PG - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B1_PG_SHIFT 0 /* AIF1DAC1_EQ_B1_PG - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B1_PG_WIDTH 16 /* AIF1DAC1_EQ_B1_PG - [15:0] */ + +/* + * R1157 (0x485) - AIF1 DAC1 EQ Band 2 A + */ +#define WM8994_AIF1DAC1_EQ_B2_A_MASK 0xFFFF /* AIF1DAC1_EQ_B2_A - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B2_A_SHIFT 0 /* AIF1DAC1_EQ_B2_A - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B2_A_WIDTH 16 /* AIF1DAC1_EQ_B2_A - [15:0] */ + +/* + * R1158 (0x486) - AIF1 DAC1 EQ Band 2 B + */ +#define WM8994_AIF1DAC1_EQ_B2_B_MASK 0xFFFF /* AIF1DAC1_EQ_B2_B - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B2_B_SHIFT 0 /* AIF1DAC1_EQ_B2_B - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B2_B_WIDTH 16 /* AIF1DAC1_EQ_B2_B - [15:0] */ + +/* + * R1159 (0x487) - AIF1 DAC1 EQ Band 2 C + */ +#define WM8994_AIF1DAC1_EQ_B2_C_MASK 0xFFFF /* AIF1DAC1_EQ_B2_C - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B2_C_SHIFT 0 /* AIF1DAC1_EQ_B2_C - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B2_C_WIDTH 16 /* AIF1DAC1_EQ_B2_C - [15:0] */ + +/* + * R1160 (0x488) - AIF1 DAC1 EQ Band 2 PG + */ +#define WM8994_AIF1DAC1_EQ_B2_PG_MASK 0xFFFF /* AIF1DAC1_EQ_B2_PG - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B2_PG_SHIFT 0 /* AIF1DAC1_EQ_B2_PG - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B2_PG_WIDTH 16 /* AIF1DAC1_EQ_B2_PG - [15:0] */ + +/* + * R1161 (0x489) - AIF1 DAC1 EQ Band 3 A + */ +#define WM8994_AIF1DAC1_EQ_B3_A_MASK 0xFFFF /* AIF1DAC1_EQ_B3_A - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B3_A_SHIFT 0 /* AIF1DAC1_EQ_B3_A - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B3_A_WIDTH 16 /* AIF1DAC1_EQ_B3_A - [15:0] */ + +/* + * R1162 (0x48A) - AIF1 DAC1 EQ Band 3 B + */ +#define WM8994_AIF1DAC1_EQ_B3_B_MASK 0xFFFF /* AIF1DAC1_EQ_B3_B - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B3_B_SHIFT 0 /* AIF1DAC1_EQ_B3_B - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B3_B_WIDTH 16 /* AIF1DAC1_EQ_B3_B - [15:0] */ + +/* + * R1163 (0x48B) - AIF1 DAC1 EQ Band 3 C + */ +#define WM8994_AIF1DAC1_EQ_B3_C_MASK 0xFFFF /* AIF1DAC1_EQ_B3_C - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B3_C_SHIFT 0 /* AIF1DAC1_EQ_B3_C - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B3_C_WIDTH 16 /* AIF1DAC1_EQ_B3_C - [15:0] */ + +/* + * R1164 (0x48C) - AIF1 DAC1 EQ Band 3 PG + */ +#define WM8994_AIF1DAC1_EQ_B3_PG_MASK 0xFFFF /* AIF1DAC1_EQ_B3_PG - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B3_PG_SHIFT 0 /* AIF1DAC1_EQ_B3_PG - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B3_PG_WIDTH 16 /* AIF1DAC1_EQ_B3_PG - [15:0] */ + +/* + * R1165 (0x48D) - AIF1 DAC1 EQ Band 4 A + */ +#define WM8994_AIF1DAC1_EQ_B4_A_MASK 0xFFFF /* AIF1DAC1_EQ_B4_A - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B4_A_SHIFT 0 /* AIF1DAC1_EQ_B4_A - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B4_A_WIDTH 16 /* AIF1DAC1_EQ_B4_A - [15:0] */ + +/* + * R1166 (0x48E) - AIF1 DAC1 EQ Band 4 B + */ +#define WM8994_AIF1DAC1_EQ_B4_B_MASK 0xFFFF /* AIF1DAC1_EQ_B4_B - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B4_B_SHIFT 0 /* AIF1DAC1_EQ_B4_B - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B4_B_WIDTH 16 /* AIF1DAC1_EQ_B4_B - [15:0] */ + +/* + * R1167 (0x48F) - AIF1 DAC1 EQ Band 4 C + */ +#define WM8994_AIF1DAC1_EQ_B4_C_MASK 0xFFFF /* AIF1DAC1_EQ_B4_C - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B4_C_SHIFT 0 /* AIF1DAC1_EQ_B4_C - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B4_C_WIDTH 16 /* AIF1DAC1_EQ_B4_C - [15:0] */ + +/* + * R1168 (0x490) - AIF1 DAC1 EQ Band 4 PG + */ +#define WM8994_AIF1DAC1_EQ_B4_PG_MASK 0xFFFF /* AIF1DAC1_EQ_B4_PG - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B4_PG_SHIFT 0 /* AIF1DAC1_EQ_B4_PG - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B4_PG_WIDTH 16 /* AIF1DAC1_EQ_B4_PG - [15:0] */ + +/* + * R1169 (0x491) - AIF1 DAC1 EQ Band 5 A + */ +#define WM8994_AIF1DAC1_EQ_B5_A_MASK 0xFFFF /* AIF1DAC1_EQ_B5_A - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B5_A_SHIFT 0 /* AIF1DAC1_EQ_B5_A - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B5_A_WIDTH 16 /* AIF1DAC1_EQ_B5_A - [15:0] */ + +/* + * R1170 (0x492) - AIF1 DAC1 EQ Band 5 B + */ +#define WM8994_AIF1DAC1_EQ_B5_B_MASK 0xFFFF /* AIF1DAC1_EQ_B5_B - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B5_B_SHIFT 0 /* AIF1DAC1_EQ_B5_B - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B5_B_WIDTH 16 /* AIF1DAC1_EQ_B5_B - [15:0] */ + +/* + * R1171 (0x493) - AIF1 DAC1 EQ Band 5 PG + */ +#define WM8994_AIF1DAC1_EQ_B5_PG_MASK 0xFFFF /* AIF1DAC1_EQ_B5_PG - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B5_PG_SHIFT 0 /* AIF1DAC1_EQ_B5_PG - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B5_PG_WIDTH 16 /* AIF1DAC1_EQ_B5_PG - [15:0] */ + +/* + * R1184 (0x4A0) - AIF1 DAC2 EQ Gains (1) + */ +#define WM8994_AIF1DAC2_EQ_B1_GAIN_MASK 0xF800 /* AIF1DAC2_EQ_B1_GAIN - [15:11] */ +#define WM8994_AIF1DAC2_EQ_B1_GAIN_SHIFT 11 /* AIF1DAC2_EQ_B1_GAIN - [15:11] */ +#define WM8994_AIF1DAC2_EQ_B1_GAIN_WIDTH 5 /* AIF1DAC2_EQ_B1_GAIN - [15:11] */ +#define WM8994_AIF1DAC2_EQ_B2_GAIN_MASK 0x07C0 /* AIF1DAC2_EQ_B2_GAIN - [10:6] */ +#define WM8994_AIF1DAC2_EQ_B2_GAIN_SHIFT 6 /* AIF1DAC2_EQ_B2_GAIN - [10:6] */ +#define WM8994_AIF1DAC2_EQ_B2_GAIN_WIDTH 5 /* AIF1DAC2_EQ_B2_GAIN - [10:6] */ +#define WM8994_AIF1DAC2_EQ_B3_GAIN_MASK 0x003E /* AIF1DAC2_EQ_B3_GAIN - [5:1] */ +#define WM8994_AIF1DAC2_EQ_B3_GAIN_SHIFT 1 /* AIF1DAC2_EQ_B3_GAIN - [5:1] */ +#define WM8994_AIF1DAC2_EQ_B3_GAIN_WIDTH 5 /* AIF1DAC2_EQ_B3_GAIN - [5:1] */ +#define WM8994_AIF1DAC2_EQ_ENA 0x0001 /* AIF1DAC2_EQ_ENA */ +#define WM8994_AIF1DAC2_EQ_ENA_MASK 0x0001 /* AIF1DAC2_EQ_ENA */ +#define WM8994_AIF1DAC2_EQ_ENA_SHIFT 0 /* AIF1DAC2_EQ_ENA */ +#define WM8994_AIF1DAC2_EQ_ENA_WIDTH 1 /* AIF1DAC2_EQ_ENA */ + +/* + * R1185 (0x4A1) - AIF1 DAC2 EQ Gains (2) + */ +#define WM8994_AIF1DAC2_EQ_B4_GAIN_MASK 0xF800 /* AIF1DAC2_EQ_B4_GAIN - [15:11] */ +#define WM8994_AIF1DAC2_EQ_B4_GAIN_SHIFT 11 /* AIF1DAC2_EQ_B4_GAIN - [15:11] */ +#define WM8994_AIF1DAC2_EQ_B4_GAIN_WIDTH 5 /* AIF1DAC2_EQ_B4_GAIN - [15:11] */ +#define WM8994_AIF1DAC2_EQ_B5_GAIN_MASK 0x07C0 /* AIF1DAC2_EQ_B5_GAIN - [10:6] */ +#define WM8994_AIF1DAC2_EQ_B5_GAIN_SHIFT 6 /* AIF1DAC2_EQ_B5_GAIN - [10:6] */ +#define WM8994_AIF1DAC2_EQ_B5_GAIN_WIDTH 5 /* AIF1DAC2_EQ_B5_GAIN - [10:6] */ + +/* + * R1186 (0x4A2) - AIF1 DAC2 EQ Band 1 A + */ +#define WM8994_AIF1DAC2_EQ_B1_A_MASK 0xFFFF /* AIF1DAC2_EQ_B1_A - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B1_A_SHIFT 0 /* AIF1DAC2_EQ_B1_A - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B1_A_WIDTH 16 /* AIF1DAC2_EQ_B1_A - [15:0] */ + +/* + * R1187 (0x4A3) - AIF1 DAC2 EQ Band 1 B + */ +#define WM8994_AIF1DAC2_EQ_B1_B_MASK 0xFFFF /* AIF1DAC2_EQ_B1_B - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B1_B_SHIFT 0 /* AIF1DAC2_EQ_B1_B - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B1_B_WIDTH 16 /* AIF1DAC2_EQ_B1_B - [15:0] */ + +/* + * R1188 (0x4A4) - AIF1 DAC2 EQ Band 1 PG + */ +#define WM8994_AIF1DAC2_EQ_B1_PG_MASK 0xFFFF /* AIF1DAC2_EQ_B1_PG - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B1_PG_SHIFT 0 /* AIF1DAC2_EQ_B1_PG - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B1_PG_WIDTH 16 /* AIF1DAC2_EQ_B1_PG - [15:0] */ + +/* + * R1189 (0x4A5) - AIF1 DAC2 EQ Band 2 A + */ +#define WM8994_AIF1DAC2_EQ_B2_A_MASK 0xFFFF /* AIF1DAC2_EQ_B2_A - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B2_A_SHIFT 0 /* AIF1DAC2_EQ_B2_A - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B2_A_WIDTH 16 /* AIF1DAC2_EQ_B2_A - [15:0] */ + +/* + * R1190 (0x4A6) - AIF1 DAC2 EQ Band 2 B + */ +#define WM8994_AIF1DAC2_EQ_B2_B_MASK 0xFFFF /* AIF1DAC2_EQ_B2_B - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B2_B_SHIFT 0 /* AIF1DAC2_EQ_B2_B - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B2_B_WIDTH 16 /* AIF1DAC2_EQ_B2_B - [15:0] */ + +/* + * R1191 (0x4A7) - AIF1 DAC2 EQ Band 2 C + */ +#define WM8994_AIF1DAC2_EQ_B2_C_MASK 0xFFFF /* AIF1DAC2_EQ_B2_C - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B2_C_SHIFT 0 /* AIF1DAC2_EQ_B2_C - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B2_C_WIDTH 16 /* AIF1DAC2_EQ_B2_C - [15:0] */ + +/* + * R1192 (0x4A8) - AIF1 DAC2 EQ Band 2 PG + */ +#define WM8994_AIF1DAC2_EQ_B2_PG_MASK 0xFFFF /* AIF1DAC2_EQ_B2_PG - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B2_PG_SHIFT 0 /* AIF1DAC2_EQ_B2_PG - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B2_PG_WIDTH 16 /* AIF1DAC2_EQ_B2_PG - [15:0] */ + +/* + * R1193 (0x4A9) - AIF1 DAC2 EQ Band 3 A + */ +#define WM8994_AIF1DAC2_EQ_B3_A_MASK 0xFFFF /* AIF1DAC2_EQ_B3_A - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B3_A_SHIFT 0 /* AIF1DAC2_EQ_B3_A - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B3_A_WIDTH 16 /* AIF1DAC2_EQ_B3_A - [15:0] */ + +/* + * R1194 (0x4AA) - AIF1 DAC2 EQ Band 3 B + */ +#define WM8994_AIF1DAC2_EQ_B3_B_MASK 0xFFFF /* AIF1DAC2_EQ_B3_B - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B3_B_SHIFT 0 /* AIF1DAC2_EQ_B3_B - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B3_B_WIDTH 16 /* AIF1DAC2_EQ_B3_B - [15:0] */ + +/* + * R1195 (0x4AB) - AIF1 DAC2 EQ Band 3 C + */ +#define WM8994_AIF1DAC2_EQ_B3_C_MASK 0xFFFF /* AIF1DAC2_EQ_B3_C - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B3_C_SHIFT 0 /* AIF1DAC2_EQ_B3_C - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B3_C_WIDTH 16 /* AIF1DAC2_EQ_B3_C - [15:0] */ + +/* + * R1196 (0x4AC) - AIF1 DAC2 EQ Band 3 PG + */ +#define WM8994_AIF1DAC2_EQ_B3_PG_MASK 0xFFFF /* AIF1DAC2_EQ_B3_PG - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B3_PG_SHIFT 0 /* AIF1DAC2_EQ_B3_PG - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B3_PG_WIDTH 16 /* AIF1DAC2_EQ_B3_PG - [15:0] */ + +/* + * R1197 (0x4AD) - AIF1 DAC2 EQ Band 4 A + */ +#define WM8994_AIF1DAC2_EQ_B4_A_MASK 0xFFFF /* AIF1DAC2_EQ_B4_A - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B4_A_SHIFT 0 /* AIF1DAC2_EQ_B4_A - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B4_A_WIDTH 16 /* AIF1DAC2_EQ_B4_A - [15:0] */ + +/* + * R1198 (0x4AE) - AIF1 DAC2 EQ Band 4 B + */ +#define WM8994_AIF1DAC2_EQ_B4_B_MASK 0xFFFF /* AIF1DAC2_EQ_B4_B - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B4_B_SHIFT 0 /* AIF1DAC2_EQ_B4_B - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B4_B_WIDTH 16 /* AIF1DAC2_EQ_B4_B - [15:0] */ + +/* + * R1199 (0x4AF) - AIF1 DAC2 EQ Band 4 C + */ +#define WM8994_AIF1DAC2_EQ_B4_C_MASK 0xFFFF /* AIF1DAC2_EQ_B4_C - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B4_C_SHIFT 0 /* AIF1DAC2_EQ_B4_C - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B4_C_WIDTH 16 /* AIF1DAC2_EQ_B4_C - [15:0] */ + +/* + * R1200 (0x4B0) - AIF1 DAC2 EQ Band 4 PG + */ +#define WM8994_AIF1DAC2_EQ_B4_PG_MASK 0xFFFF /* AIF1DAC2_EQ_B4_PG - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B4_PG_SHIFT 0 /* AIF1DAC2_EQ_B4_PG - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B4_PG_WIDTH 16 /* AIF1DAC2_EQ_B4_PG - [15:0] */ + +/* + * R1201 (0x4B1) - AIF1 DAC2 EQ Band 5 A + */ +#define WM8994_AIF1DAC2_EQ_B5_A_MASK 0xFFFF /* AIF1DAC2_EQ_B5_A - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B5_A_SHIFT 0 /* AIF1DAC2_EQ_B5_A - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B5_A_WIDTH 16 /* AIF1DAC2_EQ_B5_A - [15:0] */ + +/* + * R1202 (0x4B2) - AIF1 DAC2 EQ Band 5 B + */ +#define WM8994_AIF1DAC2_EQ_B5_B_MASK 0xFFFF /* AIF1DAC2_EQ_B5_B - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B5_B_SHIFT 0 /* AIF1DAC2_EQ_B5_B - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B5_B_WIDTH 16 /* AIF1DAC2_EQ_B5_B - [15:0] */ + +/* + * R1203 (0x4B3) - AIF1 DAC2 EQ Band 5 PG + */ +#define WM8994_AIF1DAC2_EQ_B5_PG_MASK 0xFFFF /* AIF1DAC2_EQ_B5_PG - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B5_PG_SHIFT 0 /* AIF1DAC2_EQ_B5_PG - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B5_PG_WIDTH 16 /* AIF1DAC2_EQ_B5_PG - [15:0] */ + +/* + * R1280 (0x500) - AIF2 ADC Left Volume + */ +#define WM8994_AIF2ADC_VU 0x0100 /* AIF2ADC_VU */ +#define WM8994_AIF2ADC_VU_MASK 0x0100 /* AIF2ADC_VU */ +#define WM8994_AIF2ADC_VU_SHIFT 8 /* AIF2ADC_VU */ +#define WM8994_AIF2ADC_VU_WIDTH 1 /* AIF2ADC_VU */ +#define WM8994_AIF2ADCL_VOL_MASK 0x00FF /* AIF2ADCL_VOL - [7:0] */ +#define WM8994_AIF2ADCL_VOL_SHIFT 0 /* AIF2ADCL_VOL - [7:0] */ +#define WM8994_AIF2ADCL_VOL_WIDTH 8 /* AIF2ADCL_VOL - [7:0] */ + +/* + * R1281 (0x501) - AIF2 ADC Right Volume + */ +#define WM8994_AIF2ADC_VU 0x0100 /* AIF2ADC_VU */ +#define WM8994_AIF2ADC_VU_MASK 0x0100 /* AIF2ADC_VU */ +#define WM8994_AIF2ADC_VU_SHIFT 8 /* AIF2ADC_VU */ +#define WM8994_AIF2ADC_VU_WIDTH 1 /* AIF2ADC_VU */ +#define WM8994_AIF2ADCR_VOL_MASK 0x00FF /* AIF2ADCR_VOL - [7:0] */ +#define WM8994_AIF2ADCR_VOL_SHIFT 0 /* AIF2ADCR_VOL - [7:0] */ +#define WM8994_AIF2ADCR_VOL_WIDTH 8 /* AIF2ADCR_VOL - [7:0] */ + +/* + * R1282 (0x502) - AIF2 DAC Left Volume + */ +#define WM8994_AIF2DAC_VU 0x0100 /* AIF2DAC_VU */ +#define WM8994_AIF2DAC_VU_MASK 0x0100 /* AIF2DAC_VU */ +#define WM8994_AIF2DAC_VU_SHIFT 8 /* AIF2DAC_VU */ +#define WM8994_AIF2DAC_VU_WIDTH 1 /* AIF2DAC_VU */ +#define WM8994_AIF2DACL_VOL_MASK 0x00FF /* AIF2DACL_VOL - [7:0] */ +#define WM8994_AIF2DACL_VOL_SHIFT 0 /* AIF2DACL_VOL - [7:0] */ +#define WM8994_AIF2DACL_VOL_WIDTH 8 /* AIF2DACL_VOL - [7:0] */ + +/* + * R1283 (0x503) - AIF2 DAC Right Volume + */ +#define WM8994_AIF2DAC_VU 0x0100 /* AIF2DAC_VU */ +#define WM8994_AIF2DAC_VU_MASK 0x0100 /* AIF2DAC_VU */ +#define WM8994_AIF2DAC_VU_SHIFT 8 /* AIF2DAC_VU */ +#define WM8994_AIF2DAC_VU_WIDTH 1 /* AIF2DAC_VU */ +#define WM8994_AIF2DACR_VOL_MASK 0x00FF /* AIF2DACR_VOL - [7:0] */ +#define WM8994_AIF2DACR_VOL_SHIFT 0 /* AIF2DACR_VOL - [7:0] */ +#define WM8994_AIF2DACR_VOL_WIDTH 8 /* AIF2DACR_VOL - [7:0] */ + +/* + * R1296 (0x510) - AIF2 ADC Filters + */ +#define WM8994_AIF2ADC_4FS 0x8000 /* AIF2ADC_4FS */ +#define WM8994_AIF2ADC_4FS_MASK 0x8000 /* AIF2ADC_4FS */ +#define WM8994_AIF2ADC_4FS_SHIFT 15 /* AIF2ADC_4FS */ +#define WM8994_AIF2ADC_4FS_WIDTH 1 /* AIF2ADC_4FS */ +#define WM8994_AIF2ADC_HPF_CUT_MASK 0x6000 /* AIF2ADC_HPF_CUT - [14:13] */ +#define WM8994_AIF2ADC_HPF_CUT_SHIFT 13 /* AIF2ADC_HPF_CUT - [14:13] */ +#define WM8994_AIF2ADC_HPF_CUT_WIDTH 2 /* AIF2ADC_HPF_CUT - [14:13] */ +#define WM8994_AIF2ADCL_HPF 0x1000 /* AIF2ADCL_HPF */ +#define WM8994_AIF2ADCL_HPF_MASK 0x1000 /* AIF2ADCL_HPF */ +#define WM8994_AIF2ADCL_HPF_SHIFT 12 /* AIF2ADCL_HPF */ +#define WM8994_AIF2ADCL_HPF_WIDTH 1 /* AIF2ADCL_HPF */ +#define WM8994_AIF2ADCR_HPF 0x0800 /* AIF2ADCR_HPF */ +#define WM8994_AIF2ADCR_HPF_MASK 0x0800 /* AIF2ADCR_HPF */ +#define WM8994_AIF2ADCR_HPF_SHIFT 11 /* AIF2ADCR_HPF */ +#define WM8994_AIF2ADCR_HPF_WIDTH 1 /* AIF2ADCR_HPF */ + +/* + * R1312 (0x520) - AIF2 DAC Filters (1) + */ +#define WM8994_AIF2DAC_MUTE 0x0200 /* AIF2DAC_MUTE */ +#define WM8994_AIF2DAC_MUTE_MASK 0x0200 /* AIF2DAC_MUTE */ +#define WM8994_AIF2DAC_MUTE_SHIFT 9 /* AIF2DAC_MUTE */ +#define WM8994_AIF2DAC_MUTE_WIDTH 1 /* AIF2DAC_MUTE */ +#define WM8994_AIF2DAC_MONO 0x0080 /* AIF2DAC_MONO */ +#define WM8994_AIF2DAC_MONO_MASK 0x0080 /* AIF2DAC_MONO */ +#define WM8994_AIF2DAC_MONO_SHIFT 7 /* AIF2DAC_MONO */ +#define WM8994_AIF2DAC_MONO_WIDTH 1 /* AIF2DAC_MONO */ +#define WM8994_AIF2DAC_MUTERATE 0x0020 /* AIF2DAC_MUTERATE */ +#define WM8994_AIF2DAC_MUTERATE_MASK 0x0020 /* AIF2DAC_MUTERATE */ +#define WM8994_AIF2DAC_MUTERATE_SHIFT 5 /* AIF2DAC_MUTERATE */ +#define WM8994_AIF2DAC_MUTERATE_WIDTH 1 /* AIF2DAC_MUTERATE */ +#define WM8994_AIF2DAC_UNMUTE_RAMP 0x0010 /* AIF2DAC_UNMUTE_RAMP */ +#define WM8994_AIF2DAC_UNMUTE_RAMP_MASK 0x0010 /* AIF2DAC_UNMUTE_RAMP */ +#define WM8994_AIF2DAC_UNMUTE_RAMP_SHIFT 4 /* AIF2DAC_UNMUTE_RAMP */ +#define WM8994_AIF2DAC_UNMUTE_RAMP_WIDTH 1 /* AIF2DAC_UNMUTE_RAMP */ +#define WM8994_AIF2DAC_DEEMP_MASK 0x0006 /* AIF2DAC_DEEMP - [2:1] */ +#define WM8994_AIF2DAC_DEEMP_SHIFT 1 /* AIF2DAC_DEEMP - [2:1] */ +#define WM8994_AIF2DAC_DEEMP_WIDTH 2 /* AIF2DAC_DEEMP - [2:1] */ + +/* + * R1313 (0x521) - AIF2 DAC Filters (2) + */ +#define WM8994_AIF2DAC_3D_GAIN_MASK 0x3E00 /* AIF2DAC_3D_GAIN - [13:9] */ +#define WM8994_AIF2DAC_3D_GAIN_SHIFT 9 /* AIF2DAC_3D_GAIN - [13:9] */ +#define WM8994_AIF2DAC_3D_GAIN_WIDTH 5 /* AIF2DAC_3D_GAIN - [13:9] */ +#define WM8994_AIF2DAC_3D_ENA 0x0100 /* AIF2DAC_3D_ENA */ +#define WM8994_AIF2DAC_3D_ENA_MASK 0x0100 /* AIF2DAC_3D_ENA */ +#define WM8994_AIF2DAC_3D_ENA_SHIFT 8 /* AIF2DAC_3D_ENA */ +#define WM8994_AIF2DAC_3D_ENA_WIDTH 1 /* AIF2DAC_3D_ENA */ + +/* + * R1328 (0x530) - AIF2 DAC Noise Gate + */ +#define WM8958_AIF2DAC_NG_HLD_MASK 0x0060 /* AIF2DAC_NG_HLD - [6:5] */ +#define WM8958_AIF2DAC_NG_HLD_SHIFT 5 /* AIF2DAC_NG_HLD - [6:5] */ +#define WM8958_AIF2DAC_NG_HLD_WIDTH 2 /* AIF2DAC_NG_HLD - [6:5] */ +#define WM8958_AIF2DAC_NG_THR_MASK 0x000E /* AIF2DAC_NG_THR - [3:1] */ +#define WM8958_AIF2DAC_NG_THR_SHIFT 1 /* AIF2DAC_NG_THR - [3:1] */ +#define WM8958_AIF2DAC_NG_THR_WIDTH 3 /* AIF2DAC_NG_THR - [3:1] */ +#define WM8958_AIF2DAC_NG_ENA 0x0001 /* AIF2DAC_NG_ENA */ +#define WM8958_AIF2DAC_NG_ENA_MASK 0x0001 /* AIF2DAC_NG_ENA */ +#define WM8958_AIF2DAC_NG_ENA_SHIFT 0 /* AIF2DAC_NG_ENA */ +#define WM8958_AIF2DAC_NG_ENA_WIDTH 1 /* AIF2DAC_NG_ENA */ + +/* + * R1344 (0x540) - AIF2 DRC (1) + */ +#define WM8994_AIF2DRC_SIG_DET_RMS_MASK 0xF800 /* AIF2DRC_SIG_DET_RMS - [15:11] */ +#define WM8994_AIF2DRC_SIG_DET_RMS_SHIFT 11 /* AIF2DRC_SIG_DET_RMS - [15:11] */ +#define WM8994_AIF2DRC_SIG_DET_RMS_WIDTH 5 /* AIF2DRC_SIG_DET_RMS - [15:11] */ +#define WM8994_AIF2DRC_SIG_DET_PK_MASK 0x0600 /* AIF2DRC_SIG_DET_PK - [10:9] */ +#define WM8994_AIF2DRC_SIG_DET_PK_SHIFT 9 /* AIF2DRC_SIG_DET_PK - [10:9] */ +#define WM8994_AIF2DRC_SIG_DET_PK_WIDTH 2 /* AIF2DRC_SIG_DET_PK - [10:9] */ +#define WM8994_AIF2DRC_NG_ENA 0x0100 /* AIF2DRC_NG_ENA */ +#define WM8994_AIF2DRC_NG_ENA_MASK 0x0100 /* AIF2DRC_NG_ENA */ +#define WM8994_AIF2DRC_NG_ENA_SHIFT 8 /* AIF2DRC_NG_ENA */ +#define WM8994_AIF2DRC_NG_ENA_WIDTH 1 /* AIF2DRC_NG_ENA */ +#define WM8994_AIF2DRC_SIG_DET_MODE 0x0080 /* AIF2DRC_SIG_DET_MODE */ +#define WM8994_AIF2DRC_SIG_DET_MODE_MASK 0x0080 /* AIF2DRC_SIG_DET_MODE */ +#define WM8994_AIF2DRC_SIG_DET_MODE_SHIFT 7 /* AIF2DRC_SIG_DET_MODE */ +#define WM8994_AIF2DRC_SIG_DET_MODE_WIDTH 1 /* AIF2DRC_SIG_DET_MODE */ +#define WM8994_AIF2DRC_SIG_DET 0x0040 /* AIF2DRC_SIG_DET */ +#define WM8994_AIF2DRC_SIG_DET_MASK 0x0040 /* AIF2DRC_SIG_DET */ +#define WM8994_AIF2DRC_SIG_DET_SHIFT 6 /* AIF2DRC_SIG_DET */ +#define WM8994_AIF2DRC_SIG_DET_WIDTH 1 /* AIF2DRC_SIG_DET */ +#define WM8994_AIF2DRC_KNEE2_OP_ENA 0x0020 /* AIF2DRC_KNEE2_OP_ENA */ +#define WM8994_AIF2DRC_KNEE2_OP_ENA_MASK 0x0020 /* AIF2DRC_KNEE2_OP_ENA */ +#define WM8994_AIF2DRC_KNEE2_OP_ENA_SHIFT 5 /* AIF2DRC_KNEE2_OP_ENA */ +#define WM8994_AIF2DRC_KNEE2_OP_ENA_WIDTH 1 /* AIF2DRC_KNEE2_OP_ENA */ +#define WM8994_AIF2DRC_QR 0x0010 /* AIF2DRC_QR */ +#define WM8994_AIF2DRC_QR_MASK 0x0010 /* AIF2DRC_QR */ +#define WM8994_AIF2DRC_QR_SHIFT 4 /* AIF2DRC_QR */ +#define WM8994_AIF2DRC_QR_WIDTH 1 /* AIF2DRC_QR */ +#define WM8994_AIF2DRC_ANTICLIP 0x0008 /* AIF2DRC_ANTICLIP */ +#define WM8994_AIF2DRC_ANTICLIP_MASK 0x0008 /* AIF2DRC_ANTICLIP */ +#define WM8994_AIF2DRC_ANTICLIP_SHIFT 3 /* AIF2DRC_ANTICLIP */ +#define WM8994_AIF2DRC_ANTICLIP_WIDTH 1 /* AIF2DRC_ANTICLIP */ +#define WM8994_AIF2DAC_DRC_ENA 0x0004 /* AIF2DAC_DRC_ENA */ +#define WM8994_AIF2DAC_DRC_ENA_MASK 0x0004 /* AIF2DAC_DRC_ENA */ +#define WM8994_AIF2DAC_DRC_ENA_SHIFT 2 /* AIF2DAC_DRC_ENA */ +#define WM8994_AIF2DAC_DRC_ENA_WIDTH 1 /* AIF2DAC_DRC_ENA */ +#define WM8994_AIF2ADCL_DRC_ENA 0x0002 /* AIF2ADCL_DRC_ENA */ +#define WM8994_AIF2ADCL_DRC_ENA_MASK 0x0002 /* AIF2ADCL_DRC_ENA */ +#define WM8994_AIF2ADCL_DRC_ENA_SHIFT 1 /* AIF2ADCL_DRC_ENA */ +#define WM8994_AIF2ADCL_DRC_ENA_WIDTH 1 /* AIF2ADCL_DRC_ENA */ +#define WM8994_AIF2ADCR_DRC_ENA 0x0001 /* AIF2ADCR_DRC_ENA */ +#define WM8994_AIF2ADCR_DRC_ENA_MASK 0x0001 /* AIF2ADCR_DRC_ENA */ +#define WM8994_AIF2ADCR_DRC_ENA_SHIFT 0 /* AIF2ADCR_DRC_ENA */ +#define WM8994_AIF2ADCR_DRC_ENA_WIDTH 1 /* AIF2ADCR_DRC_ENA */ + +/* + * R1345 (0x541) - AIF2 DRC (2) + */ +#define WM8994_AIF2DRC_ATK_MASK 0x1E00 /* AIF2DRC_ATK - [12:9] */ +#define WM8994_AIF2DRC_ATK_SHIFT 9 /* AIF2DRC_ATK - [12:9] */ +#define WM8994_AIF2DRC_ATK_WIDTH 4 /* AIF2DRC_ATK - [12:9] */ +#define WM8994_AIF2DRC_DCY_MASK 0x01E0 /* AIF2DRC_DCY - [8:5] */ +#define WM8994_AIF2DRC_DCY_SHIFT 5 /* AIF2DRC_DCY - [8:5] */ +#define WM8994_AIF2DRC_DCY_WIDTH 4 /* AIF2DRC_DCY - [8:5] */ +#define WM8994_AIF2DRC_MINGAIN_MASK 0x001C /* AIF2DRC_MINGAIN - [4:2] */ +#define WM8994_AIF2DRC_MINGAIN_SHIFT 2 /* AIF2DRC_MINGAIN - [4:2] */ +#define WM8994_AIF2DRC_MINGAIN_WIDTH 3 /* AIF2DRC_MINGAIN - [4:2] */ +#define WM8994_AIF2DRC_MAXGAIN_MASK 0x0003 /* AIF2DRC_MAXGAIN - [1:0] */ +#define WM8994_AIF2DRC_MAXGAIN_SHIFT 0 /* AIF2DRC_MAXGAIN - [1:0] */ +#define WM8994_AIF2DRC_MAXGAIN_WIDTH 2 /* AIF2DRC_MAXGAIN - [1:0] */ + +/* + * R1346 (0x542) - AIF2 DRC (3) + */ +#define WM8994_AIF2DRC_NG_MINGAIN_MASK 0xF000 /* AIF2DRC_NG_MINGAIN - [15:12] */ +#define WM8994_AIF2DRC_NG_MINGAIN_SHIFT 12 /* AIF2DRC_NG_MINGAIN - [15:12] */ +#define WM8994_AIF2DRC_NG_MINGAIN_WIDTH 4 /* AIF2DRC_NG_MINGAIN - [15:12] */ +#define WM8994_AIF2DRC_NG_EXP_MASK 0x0C00 /* AIF2DRC_NG_EXP - [11:10] */ +#define WM8994_AIF2DRC_NG_EXP_SHIFT 10 /* AIF2DRC_NG_EXP - [11:10] */ +#define WM8994_AIF2DRC_NG_EXP_WIDTH 2 /* AIF2DRC_NG_EXP - [11:10] */ +#define WM8994_AIF2DRC_QR_THR_MASK 0x0300 /* AIF2DRC_QR_THR - [9:8] */ +#define WM8994_AIF2DRC_QR_THR_SHIFT 8 /* AIF2DRC_QR_THR - [9:8] */ +#define WM8994_AIF2DRC_QR_THR_WIDTH 2 /* AIF2DRC_QR_THR - [9:8] */ +#define WM8994_AIF2DRC_QR_DCY_MASK 0x00C0 /* AIF2DRC_QR_DCY - [7:6] */ +#define WM8994_AIF2DRC_QR_DCY_SHIFT 6 /* AIF2DRC_QR_DCY - [7:6] */ +#define WM8994_AIF2DRC_QR_DCY_WIDTH 2 /* AIF2DRC_QR_DCY - [7:6] */ +#define WM8994_AIF2DRC_HI_COMP_MASK 0x0038 /* AIF2DRC_HI_COMP - [5:3] */ +#define WM8994_AIF2DRC_HI_COMP_SHIFT 3 /* AIF2DRC_HI_COMP - [5:3] */ +#define WM8994_AIF2DRC_HI_COMP_WIDTH 3 /* AIF2DRC_HI_COMP - [5:3] */ +#define WM8994_AIF2DRC_LO_COMP_MASK 0x0007 /* AIF2DRC_LO_COMP - [2:0] */ +#define WM8994_AIF2DRC_LO_COMP_SHIFT 0 /* AIF2DRC_LO_COMP - [2:0] */ +#define WM8994_AIF2DRC_LO_COMP_WIDTH 3 /* AIF2DRC_LO_COMP - [2:0] */ + +/* + * R1347 (0x543) - AIF2 DRC (4) + */ +#define WM8994_AIF2DRC_KNEE_IP_MASK 0x07E0 /* AIF2DRC_KNEE_IP - [10:5] */ +#define WM8994_AIF2DRC_KNEE_IP_SHIFT 5 /* AIF2DRC_KNEE_IP - [10:5] */ +#define WM8994_AIF2DRC_KNEE_IP_WIDTH 6 /* AIF2DRC_KNEE_IP - [10:5] */ +#define WM8994_AIF2DRC_KNEE_OP_MASK 0x001F /* AIF2DRC_KNEE_OP - [4:0] */ +#define WM8994_AIF2DRC_KNEE_OP_SHIFT 0 /* AIF2DRC_KNEE_OP - [4:0] */ +#define WM8994_AIF2DRC_KNEE_OP_WIDTH 5 /* AIF2DRC_KNEE_OP - [4:0] */ + +/* + * R1348 (0x544) - AIF2 DRC (5) + */ +#define WM8994_AIF2DRC_KNEE2_IP_MASK 0x03E0 /* AIF2DRC_KNEE2_IP - [9:5] */ +#define WM8994_AIF2DRC_KNEE2_IP_SHIFT 5 /* AIF2DRC_KNEE2_IP - [9:5] */ +#define WM8994_AIF2DRC_KNEE2_IP_WIDTH 5 /* AIF2DRC_KNEE2_IP - [9:5] */ +#define WM8994_AIF2DRC_KNEE2_OP_MASK 0x001F /* AIF2DRC_KNEE2_OP - [4:0] */ +#define WM8994_AIF2DRC_KNEE2_OP_SHIFT 0 /* AIF2DRC_KNEE2_OP - [4:0] */ +#define WM8994_AIF2DRC_KNEE2_OP_WIDTH 5 /* AIF2DRC_KNEE2_OP - [4:0] */ + +/* + * R1408 (0x580) - AIF2 EQ Gains (1) + */ +#define WM8994_AIF2DAC_EQ_B1_GAIN_MASK 0xF800 /* AIF2DAC_EQ_B1_GAIN - [15:11] */ +#define WM8994_AIF2DAC_EQ_B1_GAIN_SHIFT 11 /* AIF2DAC_EQ_B1_GAIN - [15:11] */ +#define WM8994_AIF2DAC_EQ_B1_GAIN_WIDTH 5 /* AIF2DAC_EQ_B1_GAIN - [15:11] */ +#define WM8994_AIF2DAC_EQ_B2_GAIN_MASK 0x07C0 /* AIF2DAC_EQ_B2_GAIN - [10:6] */ +#define WM8994_AIF2DAC_EQ_B2_GAIN_SHIFT 6 /* AIF2DAC_EQ_B2_GAIN - [10:6] */ +#define WM8994_AIF2DAC_EQ_B2_GAIN_WIDTH 5 /* AIF2DAC_EQ_B2_GAIN - [10:6] */ +#define WM8994_AIF2DAC_EQ_B3_GAIN_MASK 0x003E /* AIF2DAC_EQ_B3_GAIN - [5:1] */ +#define WM8994_AIF2DAC_EQ_B3_GAIN_SHIFT 1 /* AIF2DAC_EQ_B3_GAIN - [5:1] */ +#define WM8994_AIF2DAC_EQ_B3_GAIN_WIDTH 5 /* AIF2DAC_EQ_B3_GAIN - [5:1] */ +#define WM8994_AIF2DAC_EQ_ENA 0x0001 /* AIF2DAC_EQ_ENA */ +#define WM8994_AIF2DAC_EQ_ENA_MASK 0x0001 /* AIF2DAC_EQ_ENA */ +#define WM8994_AIF2DAC_EQ_ENA_SHIFT 0 /* AIF2DAC_EQ_ENA */ +#define WM8994_AIF2DAC_EQ_ENA_WIDTH 1 /* AIF2DAC_EQ_ENA */ + +/* + * R1409 (0x581) - AIF2 EQ Gains (2) + */ +#define WM8994_AIF2DAC_EQ_B4_GAIN_MASK 0xF800 /* AIF2DAC_EQ_B4_GAIN - [15:11] */ +#define WM8994_AIF2DAC_EQ_B4_GAIN_SHIFT 11 /* AIF2DAC_EQ_B4_GAIN - [15:11] */ +#define WM8994_AIF2DAC_EQ_B4_GAIN_WIDTH 5 /* AIF2DAC_EQ_B4_GAIN - [15:11] */ +#define WM8994_AIF2DAC_EQ_B5_GAIN_MASK 0x07C0 /* AIF2DAC_EQ_B5_GAIN - [10:6] */ +#define WM8994_AIF2DAC_EQ_B5_GAIN_SHIFT 6 /* AIF2DAC_EQ_B5_GAIN - [10:6] */ +#define WM8994_AIF2DAC_EQ_B5_GAIN_WIDTH 5 /* AIF2DAC_EQ_B5_GAIN - [10:6] */ + +/* + * R1410 (0x582) - AIF2 EQ Band 1 A + */ +#define WM8994_AIF2DAC_EQ_B1_A_MASK 0xFFFF /* AIF2DAC_EQ_B1_A - [15:0] */ +#define WM8994_AIF2DAC_EQ_B1_A_SHIFT 0 /* AIF2DAC_EQ_B1_A - [15:0] */ +#define WM8994_AIF2DAC_EQ_B1_A_WIDTH 16 /* AIF2DAC_EQ_B1_A - [15:0] */ + +/* + * R1411 (0x583) - AIF2 EQ Band 1 B + */ +#define WM8994_AIF2DAC_EQ_B1_B_MASK 0xFFFF /* AIF2DAC_EQ_B1_B - [15:0] */ +#define WM8994_AIF2DAC_EQ_B1_B_SHIFT 0 /* AIF2DAC_EQ_B1_B - [15:0] */ +#define WM8994_AIF2DAC_EQ_B1_B_WIDTH 16 /* AIF2DAC_EQ_B1_B - [15:0] */ + +/* + * R1412 (0x584) - AIF2 EQ Band 1 PG + */ +#define WM8994_AIF2DAC_EQ_B1_PG_MASK 0xFFFF /* AIF2DAC_EQ_B1_PG - [15:0] */ +#define WM8994_AIF2DAC_EQ_B1_PG_SHIFT 0 /* AIF2DAC_EQ_B1_PG - [15:0] */ +#define WM8994_AIF2DAC_EQ_B1_PG_WIDTH 16 /* AIF2DAC_EQ_B1_PG - [15:0] */ + +/* + * R1413 (0x585) - AIF2 EQ Band 2 A + */ +#define WM8994_AIF2DAC_EQ_B2_A_MASK 0xFFFF /* AIF2DAC_EQ_B2_A - [15:0] */ +#define WM8994_AIF2DAC_EQ_B2_A_SHIFT 0 /* AIF2DAC_EQ_B2_A - [15:0] */ +#define WM8994_AIF2DAC_EQ_B2_A_WIDTH 16 /* AIF2DAC_EQ_B2_A - [15:0] */ + +/* + * R1414 (0x586) - AIF2 EQ Band 2 B + */ +#define WM8994_AIF2DAC_EQ_B2_B_MASK 0xFFFF /* AIF2DAC_EQ_B2_B - [15:0] */ +#define WM8994_AIF2DAC_EQ_B2_B_SHIFT 0 /* AIF2DAC_EQ_B2_B - [15:0] */ +#define WM8994_AIF2DAC_EQ_B2_B_WIDTH 16 /* AIF2DAC_EQ_B2_B - [15:0] */ + +/* + * R1415 (0x587) - AIF2 EQ Band 2 C + */ +#define WM8994_AIF2DAC_EQ_B2_C_MASK 0xFFFF /* AIF2DAC_EQ_B2_C - [15:0] */ +#define WM8994_AIF2DAC_EQ_B2_C_SHIFT 0 /* AIF2DAC_EQ_B2_C - [15:0] */ +#define WM8994_AIF2DAC_EQ_B2_C_WIDTH 16 /* AIF2DAC_EQ_B2_C - [15:0] */ + +/* + * R1416 (0x588) - AIF2 EQ Band 2 PG + */ +#define WM8994_AIF2DAC_EQ_B2_PG_MASK 0xFFFF /* AIF2DAC_EQ_B2_PG - [15:0] */ +#define WM8994_AIF2DAC_EQ_B2_PG_SHIFT 0 /* AIF2DAC_EQ_B2_PG - [15:0] */ +#define WM8994_AIF2DAC_EQ_B2_PG_WIDTH 16 /* AIF2DAC_EQ_B2_PG - [15:0] */ + +/* + * R1417 (0x589) - AIF2 EQ Band 3 A + */ +#define WM8994_AIF2DAC_EQ_B3_A_MASK 0xFFFF /* AIF2DAC_EQ_B3_A - [15:0] */ +#define WM8994_AIF2DAC_EQ_B3_A_SHIFT 0 /* AIF2DAC_EQ_B3_A - [15:0] */ +#define WM8994_AIF2DAC_EQ_B3_A_WIDTH 16 /* AIF2DAC_EQ_B3_A - [15:0] */ + +/* + * R1418 (0x58A) - AIF2 EQ Band 3 B + */ +#define WM8994_AIF2DAC_EQ_B3_B_MASK 0xFFFF /* AIF2DAC_EQ_B3_B - [15:0] */ +#define WM8994_AIF2DAC_EQ_B3_B_SHIFT 0 /* AIF2DAC_EQ_B3_B - [15:0] */ +#define WM8994_AIF2DAC_EQ_B3_B_WIDTH 16 /* AIF2DAC_EQ_B3_B - [15:0] */ + +/* + * R1419 (0x58B) - AIF2 EQ Band 3 C + */ +#define WM8994_AIF2DAC_EQ_B3_C_MASK 0xFFFF /* AIF2DAC_EQ_B3_C - [15:0] */ +#define WM8994_AIF2DAC_EQ_B3_C_SHIFT 0 /* AIF2DAC_EQ_B3_C - [15:0] */ +#define WM8994_AIF2DAC_EQ_B3_C_WIDTH 16 /* AIF2DAC_EQ_B3_C - [15:0] */ + +/* + * R1420 (0x58C) - AIF2 EQ Band 3 PG + */ +#define WM8994_AIF2DAC_EQ_B3_PG_MASK 0xFFFF /* AIF2DAC_EQ_B3_PG - [15:0] */ +#define WM8994_AIF2DAC_EQ_B3_PG_SHIFT 0 /* AIF2DAC_EQ_B3_PG - [15:0] */ +#define WM8994_AIF2DAC_EQ_B3_PG_WIDTH 16 /* AIF2DAC_EQ_B3_PG - [15:0] */ + +/* + * R1421 (0x58D) - AIF2 EQ Band 4 A + */ +#define WM8994_AIF2DAC_EQ_B4_A_MASK 0xFFFF /* AIF2DAC_EQ_B4_A - [15:0] */ +#define WM8994_AIF2DAC_EQ_B4_A_SHIFT 0 /* AIF2DAC_EQ_B4_A - [15:0] */ +#define WM8994_AIF2DAC_EQ_B4_A_WIDTH 16 /* AIF2DAC_EQ_B4_A - [15:0] */ + +/* + * R1422 (0x58E) - AIF2 EQ Band 4 B + */ +#define WM8994_AIF2DAC_EQ_B4_B_MASK 0xFFFF /* AIF2DAC_EQ_B4_B - [15:0] */ +#define WM8994_AIF2DAC_EQ_B4_B_SHIFT 0 /* AIF2DAC_EQ_B4_B - [15:0] */ +#define WM8994_AIF2DAC_EQ_B4_B_WIDTH 16 /* AIF2DAC_EQ_B4_B - [15:0] */ + +/* + * R1423 (0x58F) - AIF2 EQ Band 4 C + */ +#define WM8994_AIF2DAC_EQ_B4_C_MASK 0xFFFF /* AIF2DAC_EQ_B4_C - [15:0] */ +#define WM8994_AIF2DAC_EQ_B4_C_SHIFT 0 /* AIF2DAC_EQ_B4_C - [15:0] */ +#define WM8994_AIF2DAC_EQ_B4_C_WIDTH 16 /* AIF2DAC_EQ_B4_C - [15:0] */ + +/* + * R1424 (0x590) - AIF2 EQ Band 4 PG + */ +#define WM8994_AIF2DAC_EQ_B4_PG_MASK 0xFFFF /* AIF2DAC_EQ_B4_PG - [15:0] */ +#define WM8994_AIF2DAC_EQ_B4_PG_SHIFT 0 /* AIF2DAC_EQ_B4_PG - [15:0] */ +#define WM8994_AIF2DAC_EQ_B4_PG_WIDTH 16 /* AIF2DAC_EQ_B4_PG - [15:0] */ + +/* + * R1425 (0x591) - AIF2 EQ Band 5 A + */ +#define WM8994_AIF2DAC_EQ_B5_A_MASK 0xFFFF /* AIF2DAC_EQ_B5_A - [15:0] */ +#define WM8994_AIF2DAC_EQ_B5_A_SHIFT 0 /* AIF2DAC_EQ_B5_A - [15:0] */ +#define WM8994_AIF2DAC_EQ_B5_A_WIDTH 16 /* AIF2DAC_EQ_B5_A - [15:0] */ + +/* + * R1426 (0x592) - AIF2 EQ Band 5 B + */ +#define WM8994_AIF2DAC_EQ_B5_B_MASK 0xFFFF /* AIF2DAC_EQ_B5_B - [15:0] */ +#define WM8994_AIF2DAC_EQ_B5_B_SHIFT 0 /* AIF2DAC_EQ_B5_B - [15:0] */ +#define WM8994_AIF2DAC_EQ_B5_B_WIDTH 16 /* AIF2DAC_EQ_B5_B - [15:0] */ + +/* + * R1427 (0x593) - AIF2 EQ Band 5 PG + */ +#define WM8994_AIF2DAC_EQ_B5_PG_MASK 0xFFFF /* AIF2DAC_EQ_B5_PG - [15:0] */ +#define WM8994_AIF2DAC_EQ_B5_PG_SHIFT 0 /* AIF2DAC_EQ_B5_PG - [15:0] */ +#define WM8994_AIF2DAC_EQ_B5_PG_WIDTH 16 /* AIF2DAC_EQ_B5_PG - [15:0] */ + +/* + * R1536 (0x600) - DAC1 Mixer Volumes + */ +#define WM8994_ADCR_DAC1_VOL_MASK 0x01E0 /* ADCR_DAC1_VOL - [8:5] */ +#define WM8994_ADCR_DAC1_VOL_SHIFT 5 /* ADCR_DAC1_VOL - [8:5] */ +#define WM8994_ADCR_DAC1_VOL_WIDTH 4 /* ADCR_DAC1_VOL - [8:5] */ +#define WM8994_ADCL_DAC1_VOL_MASK 0x000F /* ADCL_DAC1_VOL - [3:0] */ +#define WM8994_ADCL_DAC1_VOL_SHIFT 0 /* ADCL_DAC1_VOL - [3:0] */ +#define WM8994_ADCL_DAC1_VOL_WIDTH 4 /* ADCL_DAC1_VOL - [3:0] */ + +/* + * R1537 (0x601) - DAC1 Left Mixer Routing + */ +#define WM8994_ADCR_TO_DAC1L 0x0020 /* ADCR_TO_DAC1L */ +#define WM8994_ADCR_TO_DAC1L_MASK 0x0020 /* ADCR_TO_DAC1L */ +#define WM8994_ADCR_TO_DAC1L_SHIFT 5 /* ADCR_TO_DAC1L */ +#define WM8994_ADCR_TO_DAC1L_WIDTH 1 /* ADCR_TO_DAC1L */ +#define WM8994_ADCL_TO_DAC1L 0x0010 /* ADCL_TO_DAC1L */ +#define WM8994_ADCL_TO_DAC1L_MASK 0x0010 /* ADCL_TO_DAC1L */ +#define WM8994_ADCL_TO_DAC1L_SHIFT 4 /* ADCL_TO_DAC1L */ +#define WM8994_ADCL_TO_DAC1L_WIDTH 1 /* ADCL_TO_DAC1L */ +#define WM8994_AIF2DACL_TO_DAC1L 0x0004 /* AIF2DACL_TO_DAC1L */ +#define WM8994_AIF2DACL_TO_DAC1L_MASK 0x0004 /* AIF2DACL_TO_DAC1L */ +#define WM8994_AIF2DACL_TO_DAC1L_SHIFT 2 /* AIF2DACL_TO_DAC1L */ +#define WM8994_AIF2DACL_TO_DAC1L_WIDTH 1 /* AIF2DACL_TO_DAC1L */ +#define WM8994_AIF1DAC2L_TO_DAC1L 0x0002 /* AIF1DAC2L_TO_DAC1L */ +#define WM8994_AIF1DAC2L_TO_DAC1L_MASK 0x0002 /* AIF1DAC2L_TO_DAC1L */ +#define WM8994_AIF1DAC2L_TO_DAC1L_SHIFT 1 /* AIF1DAC2L_TO_DAC1L */ +#define WM8994_AIF1DAC2L_TO_DAC1L_WIDTH 1 /* AIF1DAC2L_TO_DAC1L */ +#define WM8994_AIF1DAC1L_TO_DAC1L 0x0001 /* AIF1DAC1L_TO_DAC1L */ +#define WM8994_AIF1DAC1L_TO_DAC1L_MASK 0x0001 /* AIF1DAC1L_TO_DAC1L */ +#define WM8994_AIF1DAC1L_TO_DAC1L_SHIFT 0 /* AIF1DAC1L_TO_DAC1L */ +#define WM8994_AIF1DAC1L_TO_DAC1L_WIDTH 1 /* AIF1DAC1L_TO_DAC1L */ + +/* + * R1538 (0x602) - DAC1 Right Mixer Routing + */ +#define WM8994_ADCR_TO_DAC1R 0x0020 /* ADCR_TO_DAC1R */ +#define WM8994_ADCR_TO_DAC1R_MASK 0x0020 /* ADCR_TO_DAC1R */ +#define WM8994_ADCR_TO_DAC1R_SHIFT 5 /* ADCR_TO_DAC1R */ +#define WM8994_ADCR_TO_DAC1R_WIDTH 1 /* ADCR_TO_DAC1R */ +#define WM8994_ADCL_TO_DAC1R 0x0010 /* ADCL_TO_DAC1R */ +#define WM8994_ADCL_TO_DAC1R_MASK 0x0010 /* ADCL_TO_DAC1R */ +#define WM8994_ADCL_TO_DAC1R_SHIFT 4 /* ADCL_TO_DAC1R */ +#define WM8994_ADCL_TO_DAC1R_WIDTH 1 /* ADCL_TO_DAC1R */ +#define WM8994_AIF2DACR_TO_DAC1R 0x0004 /* AIF2DACR_TO_DAC1R */ +#define WM8994_AIF2DACR_TO_DAC1R_MASK 0x0004 /* AIF2DACR_TO_DAC1R */ +#define WM8994_AIF2DACR_TO_DAC1R_SHIFT 2 /* AIF2DACR_TO_DAC1R */ +#define WM8994_AIF2DACR_TO_DAC1R_WIDTH 1 /* AIF2DACR_TO_DAC1R */ +#define WM8994_AIF1DAC2R_TO_DAC1R 0x0002 /* AIF1DAC2R_TO_DAC1R */ +#define WM8994_AIF1DAC2R_TO_DAC1R_MASK 0x0002 /* AIF1DAC2R_TO_DAC1R */ +#define WM8994_AIF1DAC2R_TO_DAC1R_SHIFT 1 /* AIF1DAC2R_TO_DAC1R */ +#define WM8994_AIF1DAC2R_TO_DAC1R_WIDTH 1 /* AIF1DAC2R_TO_DAC1R */ +#define WM8994_AIF1DAC1R_TO_DAC1R 0x0001 /* AIF1DAC1R_TO_DAC1R */ +#define WM8994_AIF1DAC1R_TO_DAC1R_MASK 0x0001 /* AIF1DAC1R_TO_DAC1R */ +#define WM8994_AIF1DAC1R_TO_DAC1R_SHIFT 0 /* AIF1DAC1R_TO_DAC1R */ +#define WM8994_AIF1DAC1R_TO_DAC1R_WIDTH 1 /* AIF1DAC1R_TO_DAC1R */ + +/* + * R1539 (0x603) - DAC2 Mixer Volumes + */ +#define WM8994_ADCR_DAC2_VOL_MASK 0x01E0 /* ADCR_DAC2_VOL - [8:5] */ +#define WM8994_ADCR_DAC2_VOL_SHIFT 5 /* ADCR_DAC2_VOL - [8:5] */ +#define WM8994_ADCR_DAC2_VOL_WIDTH 4 /* ADCR_DAC2_VOL - [8:5] */ +#define WM8994_ADCL_DAC2_VOL_MASK 0x000F /* ADCL_DAC2_VOL - [3:0] */ +#define WM8994_ADCL_DAC2_VOL_SHIFT 0 /* ADCL_DAC2_VOL - [3:0] */ +#define WM8994_ADCL_DAC2_VOL_WIDTH 4 /* ADCL_DAC2_VOL - [3:0] */ + +/* + * R1540 (0x604) - DAC2 Left Mixer Routing + */ +#define WM8994_ADCR_TO_DAC2L 0x0020 /* ADCR_TO_DAC2L */ +#define WM8994_ADCR_TO_DAC2L_MASK 0x0020 /* ADCR_TO_DAC2L */ +#define WM8994_ADCR_TO_DAC2L_SHIFT 5 /* ADCR_TO_DAC2L */ +#define WM8994_ADCR_TO_DAC2L_WIDTH 1 /* ADCR_TO_DAC2L */ +#define WM8994_ADCL_TO_DAC2L 0x0010 /* ADCL_TO_DAC2L */ +#define WM8994_ADCL_TO_DAC2L_MASK 0x0010 /* ADCL_TO_DAC2L */ +#define WM8994_ADCL_TO_DAC2L_SHIFT 4 /* ADCL_TO_DAC2L */ +#define WM8994_ADCL_TO_DAC2L_WIDTH 1 /* ADCL_TO_DAC2L */ +#define WM8994_AIF2DACL_TO_DAC2L 0x0004 /* AIF2DACL_TO_DAC2L */ +#define WM8994_AIF2DACL_TO_DAC2L_MASK 0x0004 /* AIF2DACL_TO_DAC2L */ +#define WM8994_AIF2DACL_TO_DAC2L_SHIFT 2 /* AIF2DACL_TO_DAC2L */ +#define WM8994_AIF2DACL_TO_DAC2L_WIDTH 1 /* AIF2DACL_TO_DAC2L */ +#define WM8994_AIF1DAC2L_TO_DAC2L 0x0002 /* AIF1DAC2L_TO_DAC2L */ +#define WM8994_AIF1DAC2L_TO_DAC2L_MASK 0x0002 /* AIF1DAC2L_TO_DAC2L */ +#define WM8994_AIF1DAC2L_TO_DAC2L_SHIFT 1 /* AIF1DAC2L_TO_DAC2L */ +#define WM8994_AIF1DAC2L_TO_DAC2L_WIDTH 1 /* AIF1DAC2L_TO_DAC2L */ +#define WM8994_AIF1DAC1L_TO_DAC2L 0x0001 /* AIF1DAC1L_TO_DAC2L */ +#define WM8994_AIF1DAC1L_TO_DAC2L_MASK 0x0001 /* AIF1DAC1L_TO_DAC2L */ +#define WM8994_AIF1DAC1L_TO_DAC2L_SHIFT 0 /* AIF1DAC1L_TO_DAC2L */ +#define WM8994_AIF1DAC1L_TO_DAC2L_WIDTH 1 /* AIF1DAC1L_TO_DAC2L */ + +/* + * R1541 (0x605) - DAC2 Right Mixer Routing + */ +#define WM8994_ADCR_TO_DAC2R 0x0020 /* ADCR_TO_DAC2R */ +#define WM8994_ADCR_TO_DAC2R_MASK 0x0020 /* ADCR_TO_DAC2R */ +#define WM8994_ADCR_TO_DAC2R_SHIFT 5 /* ADCR_TO_DAC2R */ +#define WM8994_ADCR_TO_DAC2R_WIDTH 1 /* ADCR_TO_DAC2R */ +#define WM8994_ADCL_TO_DAC2R 0x0010 /* ADCL_TO_DAC2R */ +#define WM8994_ADCL_TO_DAC2R_MASK 0x0010 /* ADCL_TO_DAC2R */ +#define WM8994_ADCL_TO_DAC2R_SHIFT 4 /* ADCL_TO_DAC2R */ +#define WM8994_ADCL_TO_DAC2R_WIDTH 1 /* ADCL_TO_DAC2R */ +#define WM8994_AIF2DACR_TO_DAC2R 0x0004 /* AIF2DACR_TO_DAC2R */ +#define WM8994_AIF2DACR_TO_DAC2R_MASK 0x0004 /* AIF2DACR_TO_DAC2R */ +#define WM8994_AIF2DACR_TO_DAC2R_SHIFT 2 /* AIF2DACR_TO_DAC2R */ +#define WM8994_AIF2DACR_TO_DAC2R_WIDTH 1 /* AIF2DACR_TO_DAC2R */ +#define WM8994_AIF1DAC2R_TO_DAC2R 0x0002 /* AIF1DAC2R_TO_DAC2R */ +#define WM8994_AIF1DAC2R_TO_DAC2R_MASK 0x0002 /* AIF1DAC2R_TO_DAC2R */ +#define WM8994_AIF1DAC2R_TO_DAC2R_SHIFT 1 /* AIF1DAC2R_TO_DAC2R */ +#define WM8994_AIF1DAC2R_TO_DAC2R_WIDTH 1 /* AIF1DAC2R_TO_DAC2R */ +#define WM8994_AIF1DAC1R_TO_DAC2R 0x0001 /* AIF1DAC1R_TO_DAC2R */ +#define WM8994_AIF1DAC1R_TO_DAC2R_MASK 0x0001 /* AIF1DAC1R_TO_DAC2R */ +#define WM8994_AIF1DAC1R_TO_DAC2R_SHIFT 0 /* AIF1DAC1R_TO_DAC2R */ +#define WM8994_AIF1DAC1R_TO_DAC2R_WIDTH 1 /* AIF1DAC1R_TO_DAC2R */ + +/* + * R1542 (0x606) - AIF1 ADC1 Left Mixer Routing + */ +#define WM8994_ADC1L_TO_AIF1ADC1L 0x0002 /* ADC1L_TO_AIF1ADC1L */ +#define WM8994_ADC1L_TO_AIF1ADC1L_MASK 0x0002 /* ADC1L_TO_AIF1ADC1L */ +#define WM8994_ADC1L_TO_AIF1ADC1L_SHIFT 1 /* ADC1L_TO_AIF1ADC1L */ +#define WM8994_ADC1L_TO_AIF1ADC1L_WIDTH 1 /* ADC1L_TO_AIF1ADC1L */ +#define WM8994_AIF2DACL_TO_AIF1ADC1L 0x0001 /* AIF2DACL_TO_AIF1ADC1L */ +#define WM8994_AIF2DACL_TO_AIF1ADC1L_MASK 0x0001 /* AIF2DACL_TO_AIF1ADC1L */ +#define WM8994_AIF2DACL_TO_AIF1ADC1L_SHIFT 0 /* AIF2DACL_TO_AIF1ADC1L */ +#define WM8994_AIF2DACL_TO_AIF1ADC1L_WIDTH 1 /* AIF2DACL_TO_AIF1ADC1L */ + +/* + * R1543 (0x607) - AIF1 ADC1 Right Mixer Routing + */ +#define WM8994_ADC1R_TO_AIF1ADC1R 0x0002 /* ADC1R_TO_AIF1ADC1R */ +#define WM8994_ADC1R_TO_AIF1ADC1R_MASK 0x0002 /* ADC1R_TO_AIF1ADC1R */ +#define WM8994_ADC1R_TO_AIF1ADC1R_SHIFT 1 /* ADC1R_TO_AIF1ADC1R */ +#define WM8994_ADC1R_TO_AIF1ADC1R_WIDTH 1 /* ADC1R_TO_AIF1ADC1R */ +#define WM8994_AIF2DACR_TO_AIF1ADC1R 0x0001 /* AIF2DACR_TO_AIF1ADC1R */ +#define WM8994_AIF2DACR_TO_AIF1ADC1R_MASK 0x0001 /* AIF2DACR_TO_AIF1ADC1R */ +#define WM8994_AIF2DACR_TO_AIF1ADC1R_SHIFT 0 /* AIF2DACR_TO_AIF1ADC1R */ +#define WM8994_AIF2DACR_TO_AIF1ADC1R_WIDTH 1 /* AIF2DACR_TO_AIF1ADC1R */ + +/* + * R1544 (0x608) - AIF1 ADC2 Left Mixer Routing + */ +#define WM8994_ADC2L_TO_AIF1ADC2L 0x0002 /* ADC2L_TO_AIF1ADC2L */ +#define WM8994_ADC2L_TO_AIF1ADC2L_MASK 0x0002 /* ADC2L_TO_AIF1ADC2L */ +#define WM8994_ADC2L_TO_AIF1ADC2L_SHIFT 1 /* ADC2L_TO_AIF1ADC2L */ +#define WM8994_ADC2L_TO_AIF1ADC2L_WIDTH 1 /* ADC2L_TO_AIF1ADC2L */ +#define WM8994_AIF2DACL_TO_AIF1ADC2L 0x0001 /* AIF2DACL_TO_AIF1ADC2L */ +#define WM8994_AIF2DACL_TO_AIF1ADC2L_MASK 0x0001 /* AIF2DACL_TO_AIF1ADC2L */ +#define WM8994_AIF2DACL_TO_AIF1ADC2L_SHIFT 0 /* AIF2DACL_TO_AIF1ADC2L */ +#define WM8994_AIF2DACL_TO_AIF1ADC2L_WIDTH 1 /* AIF2DACL_TO_AIF1ADC2L */ + +/* + * R1545 (0x609) - AIF1 ADC2 Right mixer Routing + */ +#define WM8994_ADC2R_TO_AIF1ADC2R 0x0002 /* ADC2R_TO_AIF1ADC2R */ +#define WM8994_ADC2R_TO_AIF1ADC2R_MASK 0x0002 /* ADC2R_TO_AIF1ADC2R */ +#define WM8994_ADC2R_TO_AIF1ADC2R_SHIFT 1 /* ADC2R_TO_AIF1ADC2R */ +#define WM8994_ADC2R_TO_AIF1ADC2R_WIDTH 1 /* ADC2R_TO_AIF1ADC2R */ +#define WM8994_AIF2DACR_TO_AIF1ADC2R 0x0001 /* AIF2DACR_TO_AIF1ADC2R */ +#define WM8994_AIF2DACR_TO_AIF1ADC2R_MASK 0x0001 /* AIF2DACR_TO_AIF1ADC2R */ +#define WM8994_AIF2DACR_TO_AIF1ADC2R_SHIFT 0 /* AIF2DACR_TO_AIF1ADC2R */ +#define WM8994_AIF2DACR_TO_AIF1ADC2R_WIDTH 1 /* AIF2DACR_TO_AIF1ADC2R */ + +/* + * R1552 (0x610) - DAC1 Left Volume + */ +#define WM8994_DAC1L_MUTE 0x0200 /* DAC1L_MUTE */ +#define WM8994_DAC1L_MUTE_MASK 0x0200 /* DAC1L_MUTE */ +#define WM8994_DAC1L_MUTE_SHIFT 9 /* DAC1L_MUTE */ +#define WM8994_DAC1L_MUTE_WIDTH 1 /* DAC1L_MUTE */ +#define WM8994_DAC1_VU 0x0100 /* DAC1_VU */ +#define WM8994_DAC1_VU_MASK 0x0100 /* DAC1_VU */ +#define WM8994_DAC1_VU_SHIFT 8 /* DAC1_VU */ +#define WM8994_DAC1_VU_WIDTH 1 /* DAC1_VU */ +#define WM8994_DAC1L_VOL_MASK 0x00FF /* DAC1L_VOL - [7:0] */ +#define WM8994_DAC1L_VOL_SHIFT 0 /* DAC1L_VOL - [7:0] */ +#define WM8994_DAC1L_VOL_WIDTH 8 /* DAC1L_VOL - [7:0] */ + +/* + * R1553 (0x611) - DAC1 Right Volume + */ +#define WM8994_DAC1R_MUTE 0x0200 /* DAC1R_MUTE */ +#define WM8994_DAC1R_MUTE_MASK 0x0200 /* DAC1R_MUTE */ +#define WM8994_DAC1R_MUTE_SHIFT 9 /* DAC1R_MUTE */ +#define WM8994_DAC1R_MUTE_WIDTH 1 /* DAC1R_MUTE */ +#define WM8994_DAC1_VU 0x0100 /* DAC1_VU */ +#define WM8994_DAC1_VU_MASK 0x0100 /* DAC1_VU */ +#define WM8994_DAC1_VU_SHIFT 8 /* DAC1_VU */ +#define WM8994_DAC1_VU_WIDTH 1 /* DAC1_VU */ +#define WM8994_DAC1R_VOL_MASK 0x00FF /* DAC1R_VOL - [7:0] */ +#define WM8994_DAC1R_VOL_SHIFT 0 /* DAC1R_VOL - [7:0] */ +#define WM8994_DAC1R_VOL_WIDTH 8 /* DAC1R_VOL - [7:0] */ + +/* + * R1554 (0x612) - DAC2 Left Volume + */ +#define WM8994_DAC2L_MUTE 0x0200 /* DAC2L_MUTE */ +#define WM8994_DAC2L_MUTE_MASK 0x0200 /* DAC2L_MUTE */ +#define WM8994_DAC2L_MUTE_SHIFT 9 /* DAC2L_MUTE */ +#define WM8994_DAC2L_MUTE_WIDTH 1 /* DAC2L_MUTE */ +#define WM8994_DAC2_VU 0x0100 /* DAC2_VU */ +#define WM8994_DAC2_VU_MASK 0x0100 /* DAC2_VU */ +#define WM8994_DAC2_VU_SHIFT 8 /* DAC2_VU */ +#define WM8994_DAC2_VU_WIDTH 1 /* DAC2_VU */ +#define WM8994_DAC2L_VOL_MASK 0x00FF /* DAC2L_VOL - [7:0] */ +#define WM8994_DAC2L_VOL_SHIFT 0 /* DAC2L_VOL - [7:0] */ +#define WM8994_DAC2L_VOL_WIDTH 8 /* DAC2L_VOL - [7:0] */ + +/* + * R1555 (0x613) - DAC2 Right Volume + */ +#define WM8994_DAC2R_MUTE 0x0200 /* DAC2R_MUTE */ +#define WM8994_DAC2R_MUTE_MASK 0x0200 /* DAC2R_MUTE */ +#define WM8994_DAC2R_MUTE_SHIFT 9 /* DAC2R_MUTE */ +#define WM8994_DAC2R_MUTE_WIDTH 1 /* DAC2R_MUTE */ +#define WM8994_DAC2_VU 0x0100 /* DAC2_VU */ +#define WM8994_DAC2_VU_MASK 0x0100 /* DAC2_VU */ +#define WM8994_DAC2_VU_SHIFT 8 /* DAC2_VU */ +#define WM8994_DAC2_VU_WIDTH 1 /* DAC2_VU */ +#define WM8994_DAC2R_VOL_MASK 0x00FF /* DAC2R_VOL - [7:0] */ +#define WM8994_DAC2R_VOL_SHIFT 0 /* DAC2R_VOL - [7:0] */ +#define WM8994_DAC2R_VOL_WIDTH 8 /* DAC2R_VOL - [7:0] */ + +/* + * R1556 (0x614) - DAC Softmute + */ +#define WM8994_DAC_SOFTMUTEMODE 0x0002 /* DAC_SOFTMUTEMODE */ +#define WM8994_DAC_SOFTMUTEMODE_MASK 0x0002 /* DAC_SOFTMUTEMODE */ +#define WM8994_DAC_SOFTMUTEMODE_SHIFT 1 /* DAC_SOFTMUTEMODE */ +#define WM8994_DAC_SOFTMUTEMODE_WIDTH 1 /* DAC_SOFTMUTEMODE */ +#define WM8994_DAC_MUTERATE 0x0001 /* DAC_MUTERATE */ +#define WM8994_DAC_MUTERATE_MASK 0x0001 /* DAC_MUTERATE */ +#define WM8994_DAC_MUTERATE_SHIFT 0 /* DAC_MUTERATE */ +#define WM8994_DAC_MUTERATE_WIDTH 1 /* DAC_MUTERATE */ + +/* + * R1568 (0x620) - Oversampling + */ +#define WM8994_ADC_OSR128 0x0002 /* ADC_OSR128 */ +#define WM8994_ADC_OSR128_MASK 0x0002 /* ADC_OSR128 */ +#define WM8994_ADC_OSR128_SHIFT 1 /* ADC_OSR128 */ +#define WM8994_ADC_OSR128_WIDTH 1 /* ADC_OSR128 */ +#define WM8994_DAC_OSR128 0x0001 /* DAC_OSR128 */ +#define WM8994_DAC_OSR128_MASK 0x0001 /* DAC_OSR128 */ +#define WM8994_DAC_OSR128_SHIFT 0 /* DAC_OSR128 */ +#define WM8994_DAC_OSR128_WIDTH 1 /* DAC_OSR128 */ + +/* + * R1569 (0x621) - Sidetone + */ +#define WM8994_ST_HPF_CUT_MASK 0x0380 /* ST_HPF_CUT - [9:7] */ +#define WM8994_ST_HPF_CUT_SHIFT 7 /* ST_HPF_CUT - [9:7] */ +#define WM8994_ST_HPF_CUT_WIDTH 3 /* ST_HPF_CUT - [9:7] */ +#define WM8994_ST_HPF 0x0040 /* ST_HPF */ +#define WM8994_ST_HPF_MASK 0x0040 /* ST_HPF */ +#define WM8994_ST_HPF_SHIFT 6 /* ST_HPF */ +#define WM8994_ST_HPF_WIDTH 1 /* ST_HPF */ +#define WM8994_STR_SEL 0x0002 /* STR_SEL */ +#define WM8994_STR_SEL_MASK 0x0002 /* STR_SEL */ +#define WM8994_STR_SEL_SHIFT 1 /* STR_SEL */ +#define WM8994_STR_SEL_WIDTH 1 /* STR_SEL */ +#define WM8994_STL_SEL 0x0001 /* STL_SEL */ +#define WM8994_STL_SEL_MASK 0x0001 /* STL_SEL */ +#define WM8994_STL_SEL_SHIFT 0 /* STL_SEL */ +#define WM8994_STL_SEL_WIDTH 1 /* STL_SEL */ + +/* + * R1797 (0x705) - JACKDET Ctrl + */ +#define WM1811_JACKDET_DB 0x0100 /* JACKDET_DB */ +#define WM1811_JACKDET_DB_MASK 0x0100 /* JACKDET_DB */ +#define WM1811_JACKDET_DB_SHIFT 8 /* JACKDET_DB */ +#define WM1811_JACKDET_DB_WIDTH 1 /* JACKDET_DB */ +#define WM1811_JACKDET_LVL 0x0040 /* JACKDET_LVL */ +#define WM1811_JACKDET_LVL_MASK 0x0040 /* JACKDET_LVL */ +#define WM1811_JACKDET_LVL_SHIFT 6 /* JACKDET_LVL */ +#define WM1811_JACKDET_LVL_WIDTH 1 /* JACKDET_LVL */ + +/* + * R1824 (0x720) - Pull Control (1) + */ +#define WM8994_DMICDAT2_PU 0x0800 /* DMICDAT2_PU */ +#define WM8994_DMICDAT2_PU_MASK 0x0800 /* DMICDAT2_PU */ +#define WM8994_DMICDAT2_PU_SHIFT 11 /* DMICDAT2_PU */ +#define WM8994_DMICDAT2_PU_WIDTH 1 /* DMICDAT2_PU */ +#define WM8994_DMICDAT2_PD 0x0400 /* DMICDAT2_PD */ +#define WM8994_DMICDAT2_PD_MASK 0x0400 /* DMICDAT2_PD */ +#define WM8994_DMICDAT2_PD_SHIFT 10 /* DMICDAT2_PD */ +#define WM8994_DMICDAT2_PD_WIDTH 1 /* DMICDAT2_PD */ +#define WM8994_DMICDAT1_PU 0x0200 /* DMICDAT1_PU */ +#define WM8994_DMICDAT1_PU_MASK 0x0200 /* DMICDAT1_PU */ +#define WM8994_DMICDAT1_PU_SHIFT 9 /* DMICDAT1_PU */ +#define WM8994_DMICDAT1_PU_WIDTH 1 /* DMICDAT1_PU */ +#define WM8994_DMICDAT1_PD 0x0100 /* DMICDAT1_PD */ +#define WM8994_DMICDAT1_PD_MASK 0x0100 /* DMICDAT1_PD */ +#define WM8994_DMICDAT1_PD_SHIFT 8 /* DMICDAT1_PD */ +#define WM8994_DMICDAT1_PD_WIDTH 1 /* DMICDAT1_PD */ +#define WM8994_MCLK1_PU 0x0080 /* MCLK1_PU */ +#define WM8994_MCLK1_PU_MASK 0x0080 /* MCLK1_PU */ +#define WM8994_MCLK1_PU_SHIFT 7 /* MCLK1_PU */ +#define WM8994_MCLK1_PU_WIDTH 1 /* MCLK1_PU */ +#define WM8994_MCLK1_PD 0x0040 /* MCLK1_PD */ +#define WM8994_MCLK1_PD_MASK 0x0040 /* MCLK1_PD */ +#define WM8994_MCLK1_PD_SHIFT 6 /* MCLK1_PD */ +#define WM8994_MCLK1_PD_WIDTH 1 /* MCLK1_PD */ +#define WM8994_DACDAT1_PU 0x0020 /* DACDAT1_PU */ +#define WM8994_DACDAT1_PU_MASK 0x0020 /* DACDAT1_PU */ +#define WM8994_DACDAT1_PU_SHIFT 5 /* DACDAT1_PU */ +#define WM8994_DACDAT1_PU_WIDTH 1 /* DACDAT1_PU */ +#define WM8994_DACDAT1_PD 0x0010 /* DACDAT1_PD */ +#define WM8994_DACDAT1_PD_MASK 0x0010 /* DACDAT1_PD */ +#define WM8994_DACDAT1_PD_SHIFT 4 /* DACDAT1_PD */ +#define WM8994_DACDAT1_PD_WIDTH 1 /* DACDAT1_PD */ +#define WM8994_DACLRCLK1_PU 0x0008 /* DACLRCLK1_PU */ +#define WM8994_DACLRCLK1_PU_MASK 0x0008 /* DACLRCLK1_PU */ +#define WM8994_DACLRCLK1_PU_SHIFT 3 /* DACLRCLK1_PU */ +#define WM8994_DACLRCLK1_PU_WIDTH 1 /* DACLRCLK1_PU */ +#define WM8994_DACLRCLK1_PD 0x0004 /* DACLRCLK1_PD */ +#define WM8994_DACLRCLK1_PD_MASK 0x0004 /* DACLRCLK1_PD */ +#define WM8994_DACLRCLK1_PD_SHIFT 2 /* DACLRCLK1_PD */ +#define WM8994_DACLRCLK1_PD_WIDTH 1 /* DACLRCLK1_PD */ +#define WM8994_BCLK1_PU 0x0002 /* BCLK1_PU */ +#define WM8994_BCLK1_PU_MASK 0x0002 /* BCLK1_PU */ +#define WM8994_BCLK1_PU_SHIFT 1 /* BCLK1_PU */ +#define WM8994_BCLK1_PU_WIDTH 1 /* BCLK1_PU */ +#define WM8994_BCLK1_PD 0x0001 /* BCLK1_PD */ +#define WM8994_BCLK1_PD_MASK 0x0001 /* BCLK1_PD */ +#define WM8994_BCLK1_PD_SHIFT 0 /* BCLK1_PD */ +#define WM8994_BCLK1_PD_WIDTH 1 /* BCLK1_PD */ + +/* + * R1825 (0x721) - Pull Control (2) + */ +#define WM8994_CSNADDR_PD 0x0100 /* CSNADDR_PD */ +#define WM8994_CSNADDR_PD_MASK 0x0100 /* CSNADDR_PD */ +#define WM8994_CSNADDR_PD_SHIFT 8 /* CSNADDR_PD */ +#define WM8994_CSNADDR_PD_WIDTH 1 /* CSNADDR_PD */ +#define WM8994_LDO2ENA_PD 0x0040 /* LDO2ENA_PD */ +#define WM8994_LDO2ENA_PD_MASK 0x0040 /* LDO2ENA_PD */ +#define WM8994_LDO2ENA_PD_SHIFT 6 /* LDO2ENA_PD */ +#define WM8994_LDO2ENA_PD_WIDTH 1 /* LDO2ENA_PD */ +#define WM8994_LDO1ENA_PD 0x0010 /* LDO1ENA_PD */ +#define WM8994_LDO1ENA_PD_MASK 0x0010 /* LDO1ENA_PD */ +#define WM8994_LDO1ENA_PD_SHIFT 4 /* LDO1ENA_PD */ +#define WM8994_LDO1ENA_PD_WIDTH 1 /* LDO1ENA_PD */ +#define WM8994_CIFMODE_PD 0x0004 /* CIFMODE_PD */ +#define WM8994_CIFMODE_PD_MASK 0x0004 /* CIFMODE_PD */ +#define WM8994_CIFMODE_PD_SHIFT 2 /* CIFMODE_PD */ +#define WM8994_CIFMODE_PD_WIDTH 1 /* CIFMODE_PD */ +#define WM8994_SPKMODE_PU 0x0002 /* SPKMODE_PU */ +#define WM8994_SPKMODE_PU_MASK 0x0002 /* SPKMODE_PU */ +#define WM8994_SPKMODE_PU_SHIFT 1 /* SPKMODE_PU */ +#define WM8994_SPKMODE_PU_WIDTH 1 /* SPKMODE_PU */ + +/* + * R1840 (0x730) - Interrupt Status 1 + */ +#define WM8994_GP11_EINT 0x0400 /* GP11_EINT */ +#define WM8994_GP11_EINT_MASK 0x0400 /* GP11_EINT */ +#define WM8994_GP11_EINT_SHIFT 10 /* GP11_EINT */ +#define WM8994_GP11_EINT_WIDTH 1 /* GP11_EINT */ +#define WM8994_GP10_EINT 0x0200 /* GP10_EINT */ +#define WM8994_GP10_EINT_MASK 0x0200 /* GP10_EINT */ +#define WM8994_GP10_EINT_SHIFT 9 /* GP10_EINT */ +#define WM8994_GP10_EINT_WIDTH 1 /* GP10_EINT */ +#define WM8994_GP9_EINT 0x0100 /* GP9_EINT */ +#define WM8994_GP9_EINT_MASK 0x0100 /* GP9_EINT */ +#define WM8994_GP9_EINT_SHIFT 8 /* GP9_EINT */ +#define WM8994_GP9_EINT_WIDTH 1 /* GP9_EINT */ +#define WM8994_GP8_EINT 0x0080 /* GP8_EINT */ +#define WM8994_GP8_EINT_MASK 0x0080 /* GP8_EINT */ +#define WM8994_GP8_EINT_SHIFT 7 /* GP8_EINT */ +#define WM8994_GP8_EINT_WIDTH 1 /* GP8_EINT */ +#define WM8994_GP7_EINT 0x0040 /* GP7_EINT */ +#define WM8994_GP7_EINT_MASK 0x0040 /* GP7_EINT */ +#define WM8994_GP7_EINT_SHIFT 6 /* GP7_EINT */ +#define WM8994_GP7_EINT_WIDTH 1 /* GP7_EINT */ +#define WM8994_GP6_EINT 0x0020 /* GP6_EINT */ +#define WM8994_GP6_EINT_MASK 0x0020 /* GP6_EINT */ +#define WM8994_GP6_EINT_SHIFT 5 /* GP6_EINT */ +#define WM8994_GP6_EINT_WIDTH 1 /* GP6_EINT */ +#define WM8994_GP5_EINT 0x0010 /* GP5_EINT */ +#define WM8994_GP5_EINT_MASK 0x0010 /* GP5_EINT */ +#define WM8994_GP5_EINT_SHIFT 4 /* GP5_EINT */ +#define WM8994_GP5_EINT_WIDTH 1 /* GP5_EINT */ +#define WM8994_GP4_EINT 0x0008 /* GP4_EINT */ +#define WM8994_GP4_EINT_MASK 0x0008 /* GP4_EINT */ +#define WM8994_GP4_EINT_SHIFT 3 /* GP4_EINT */ +#define WM8994_GP4_EINT_WIDTH 1 /* GP4_EINT */ +#define WM8994_GP3_EINT 0x0004 /* GP3_EINT */ +#define WM8994_GP3_EINT_MASK 0x0004 /* GP3_EINT */ +#define WM8994_GP3_EINT_SHIFT 2 /* GP3_EINT */ +#define WM8994_GP3_EINT_WIDTH 1 /* GP3_EINT */ +#define WM8994_GP2_EINT 0x0002 /* GP2_EINT */ +#define WM8994_GP2_EINT_MASK 0x0002 /* GP2_EINT */ +#define WM8994_GP2_EINT_SHIFT 1 /* GP2_EINT */ +#define WM8994_GP2_EINT_WIDTH 1 /* GP2_EINT */ +#define WM8994_GP1_EINT 0x0001 /* GP1_EINT */ +#define WM8994_GP1_EINT_MASK 0x0001 /* GP1_EINT */ +#define WM8994_GP1_EINT_SHIFT 0 /* GP1_EINT */ +#define WM8994_GP1_EINT_WIDTH 1 /* GP1_EINT */ + +/* + * R1841 (0x731) - Interrupt Status 2 + */ +#define WM8994_TEMP_WARN_EINT 0x8000 /* TEMP_WARN_EINT */ +#define WM8994_TEMP_WARN_EINT_MASK 0x8000 /* TEMP_WARN_EINT */ +#define WM8994_TEMP_WARN_EINT_SHIFT 15 /* TEMP_WARN_EINT */ +#define WM8994_TEMP_WARN_EINT_WIDTH 1 /* TEMP_WARN_EINT */ +#define WM8994_DCS_DONE_EINT 0x4000 /* DCS_DONE_EINT */ +#define WM8994_DCS_DONE_EINT_MASK 0x4000 /* DCS_DONE_EINT */ +#define WM8994_DCS_DONE_EINT_SHIFT 14 /* DCS_DONE_EINT */ +#define WM8994_DCS_DONE_EINT_WIDTH 1 /* DCS_DONE_EINT */ +#define WM8994_WSEQ_DONE_EINT 0x2000 /* WSEQ_DONE_EINT */ +#define WM8994_WSEQ_DONE_EINT_MASK 0x2000 /* WSEQ_DONE_EINT */ +#define WM8994_WSEQ_DONE_EINT_SHIFT 13 /* WSEQ_DONE_EINT */ +#define WM8994_WSEQ_DONE_EINT_WIDTH 1 /* WSEQ_DONE_EINT */ +#define WM8994_FIFOS_ERR_EINT 0x1000 /* FIFOS_ERR_EINT */ +#define WM8994_FIFOS_ERR_EINT_MASK 0x1000 /* FIFOS_ERR_EINT */ +#define WM8994_FIFOS_ERR_EINT_SHIFT 12 /* FIFOS_ERR_EINT */ +#define WM8994_FIFOS_ERR_EINT_WIDTH 1 /* FIFOS_ERR_EINT */ +#define WM8994_AIF2DRC_SIG_DET_EINT 0x0800 /* AIF2DRC_SIG_DET_EINT */ +#define WM8994_AIF2DRC_SIG_DET_EINT_MASK 0x0800 /* AIF2DRC_SIG_DET_EINT */ +#define WM8994_AIF2DRC_SIG_DET_EINT_SHIFT 11 /* AIF2DRC_SIG_DET_EINT */ +#define WM8994_AIF2DRC_SIG_DET_EINT_WIDTH 1 /* AIF2DRC_SIG_DET_EINT */ +#define WM8994_AIF1DRC2_SIG_DET_EINT 0x0400 /* AIF1DRC2_SIG_DET_EINT */ +#define WM8994_AIF1DRC2_SIG_DET_EINT_MASK 0x0400 /* AIF1DRC2_SIG_DET_EINT */ +#define WM8994_AIF1DRC2_SIG_DET_EINT_SHIFT 10 /* AIF1DRC2_SIG_DET_EINT */ +#define WM8994_AIF1DRC2_SIG_DET_EINT_WIDTH 1 /* AIF1DRC2_SIG_DET_EINT */ +#define WM8994_AIF1DRC1_SIG_DET_EINT 0x0200 /* AIF1DRC1_SIG_DET_EINT */ +#define WM8994_AIF1DRC1_SIG_DET_EINT_MASK 0x0200 /* AIF1DRC1_SIG_DET_EINT */ +#define WM8994_AIF1DRC1_SIG_DET_EINT_SHIFT 9 /* AIF1DRC1_SIG_DET_EINT */ +#define WM8994_AIF1DRC1_SIG_DET_EINT_WIDTH 1 /* AIF1DRC1_SIG_DET_EINT */ +#define WM8994_SRC2_LOCK_EINT 0x0100 /* SRC2_LOCK_EINT */ +#define WM8994_SRC2_LOCK_EINT_MASK 0x0100 /* SRC2_LOCK_EINT */ +#define WM8994_SRC2_LOCK_EINT_SHIFT 8 /* SRC2_LOCK_EINT */ +#define WM8994_SRC2_LOCK_EINT_WIDTH 1 /* SRC2_LOCK_EINT */ +#define WM8994_SRC1_LOCK_EINT 0x0080 /* SRC1_LOCK_EINT */ +#define WM8994_SRC1_LOCK_EINT_MASK 0x0080 /* SRC1_LOCK_EINT */ +#define WM8994_SRC1_LOCK_EINT_SHIFT 7 /* SRC1_LOCK_EINT */ +#define WM8994_SRC1_LOCK_EINT_WIDTH 1 /* SRC1_LOCK_EINT */ +#define WM8994_FLL2_LOCK_EINT 0x0040 /* FLL2_LOCK_EINT */ +#define WM8994_FLL2_LOCK_EINT_MASK 0x0040 /* FLL2_LOCK_EINT */ +#define WM8994_FLL2_LOCK_EINT_SHIFT 6 /* FLL2_LOCK_EINT */ +#define WM8994_FLL2_LOCK_EINT_WIDTH 1 /* FLL2_LOCK_EINT */ +#define WM8994_FLL1_LOCK_EINT 0x0020 /* FLL1_LOCK_EINT */ +#define WM8994_FLL1_LOCK_EINT_MASK 0x0020 /* FLL1_LOCK_EINT */ +#define WM8994_FLL1_LOCK_EINT_SHIFT 5 /* FLL1_LOCK_EINT */ +#define WM8994_FLL1_LOCK_EINT_WIDTH 1 /* FLL1_LOCK_EINT */ +#define WM8994_MIC2_SHRT_EINT 0x0010 /* MIC2_SHRT_EINT */ +#define WM8994_MIC2_SHRT_EINT_MASK 0x0010 /* MIC2_SHRT_EINT */ +#define WM8994_MIC2_SHRT_EINT_SHIFT 4 /* MIC2_SHRT_EINT */ +#define WM8994_MIC2_SHRT_EINT_WIDTH 1 /* MIC2_SHRT_EINT */ +#define WM8994_MIC2_DET_EINT 0x0008 /* MIC2_DET_EINT */ +#define WM8994_MIC2_DET_EINT_MASK 0x0008 /* MIC2_DET_EINT */ +#define WM8994_MIC2_DET_EINT_SHIFT 3 /* MIC2_DET_EINT */ +#define WM8994_MIC2_DET_EINT_WIDTH 1 /* MIC2_DET_EINT */ +#define WM8994_MIC1_SHRT_EINT 0x0004 /* MIC1_SHRT_EINT */ +#define WM8994_MIC1_SHRT_EINT_MASK 0x0004 /* MIC1_SHRT_EINT */ +#define WM8994_MIC1_SHRT_EINT_SHIFT 2 /* MIC1_SHRT_EINT */ +#define WM8994_MIC1_SHRT_EINT_WIDTH 1 /* MIC1_SHRT_EINT */ +#define WM8994_MIC1_DET_EINT 0x0002 /* MIC1_DET_EINT */ +#define WM8994_MIC1_DET_EINT_MASK 0x0002 /* MIC1_DET_EINT */ +#define WM8994_MIC1_DET_EINT_SHIFT 1 /* MIC1_DET_EINT */ +#define WM8994_MIC1_DET_EINT_WIDTH 1 /* MIC1_DET_EINT */ +#define WM8994_TEMP_SHUT_EINT 0x0001 /* TEMP_SHUT_EINT */ +#define WM8994_TEMP_SHUT_EINT_MASK 0x0001 /* TEMP_SHUT_EINT */ +#define WM8994_TEMP_SHUT_EINT_SHIFT 0 /* TEMP_SHUT_EINT */ +#define WM8994_TEMP_SHUT_EINT_WIDTH 1 /* TEMP_SHUT_EINT */ + +/* + * R1842 (0x732) - Interrupt Raw Status 2 + */ +#define WM8994_TEMP_WARN_STS 0x8000 /* TEMP_WARN_STS */ +#define WM8994_TEMP_WARN_STS_MASK 0x8000 /* TEMP_WARN_STS */ +#define WM8994_TEMP_WARN_STS_SHIFT 15 /* TEMP_WARN_STS */ +#define WM8994_TEMP_WARN_STS_WIDTH 1 /* TEMP_WARN_STS */ +#define WM8994_DCS_DONE_STS 0x4000 /* DCS_DONE_STS */ +#define WM8994_DCS_DONE_STS_MASK 0x4000 /* DCS_DONE_STS */ +#define WM8994_DCS_DONE_STS_SHIFT 14 /* DCS_DONE_STS */ +#define WM8994_DCS_DONE_STS_WIDTH 1 /* DCS_DONE_STS */ +#define WM8994_WSEQ_DONE_STS 0x2000 /* WSEQ_DONE_STS */ +#define WM8994_WSEQ_DONE_STS_MASK 0x2000 /* WSEQ_DONE_STS */ +#define WM8994_WSEQ_DONE_STS_SHIFT 13 /* WSEQ_DONE_STS */ +#define WM8994_WSEQ_DONE_STS_WIDTH 1 /* WSEQ_DONE_STS */ +#define WM8994_FIFOS_ERR_STS 0x1000 /* FIFOS_ERR_STS */ +#define WM8994_FIFOS_ERR_STS_MASK 0x1000 /* FIFOS_ERR_STS */ +#define WM8994_FIFOS_ERR_STS_SHIFT 12 /* FIFOS_ERR_STS */ +#define WM8994_FIFOS_ERR_STS_WIDTH 1 /* FIFOS_ERR_STS */ +#define WM8994_AIF2DRC_SIG_DET_STS 0x0800 /* AIF2DRC_SIG_DET_STS */ +#define WM8994_AIF2DRC_SIG_DET_STS_MASK 0x0800 /* AIF2DRC_SIG_DET_STS */ +#define WM8994_AIF2DRC_SIG_DET_STS_SHIFT 11 /* AIF2DRC_SIG_DET_STS */ +#define WM8994_AIF2DRC_SIG_DET_STS_WIDTH 1 /* AIF2DRC_SIG_DET_STS */ +#define WM8994_AIF1DRC2_SIG_DET_STS 0x0400 /* AIF1DRC2_SIG_DET_STS */ +#define WM8994_AIF1DRC2_SIG_DET_STS_MASK 0x0400 /* AIF1DRC2_SIG_DET_STS */ +#define WM8994_AIF1DRC2_SIG_DET_STS_SHIFT 10 /* AIF1DRC2_SIG_DET_STS */ +#define WM8994_AIF1DRC2_SIG_DET_STS_WIDTH 1 /* AIF1DRC2_SIG_DET_STS */ +#define WM8994_AIF1DRC1_SIG_DET_STS 0x0200 /* AIF1DRC1_SIG_DET_STS */ +#define WM8994_AIF1DRC1_SIG_DET_STS_MASK 0x0200 /* AIF1DRC1_SIG_DET_STS */ +#define WM8994_AIF1DRC1_SIG_DET_STS_SHIFT 9 /* AIF1DRC1_SIG_DET_STS */ +#define WM8994_AIF1DRC1_SIG_DET_STS_WIDTH 1 /* AIF1DRC1_SIG_DET_STS */ +#define WM8994_SRC2_LOCK_STS 0x0100 /* SRC2_LOCK_STS */ +#define WM8994_SRC2_LOCK_STS_MASK 0x0100 /* SRC2_LOCK_STS */ +#define WM8994_SRC2_LOCK_STS_SHIFT 8 /* SRC2_LOCK_STS */ +#define WM8994_SRC2_LOCK_STS_WIDTH 1 /* SRC2_LOCK_STS */ +#define WM8994_SRC1_LOCK_STS 0x0080 /* SRC1_LOCK_STS */ +#define WM8994_SRC1_LOCK_STS_MASK 0x0080 /* SRC1_LOCK_STS */ +#define WM8994_SRC1_LOCK_STS_SHIFT 7 /* SRC1_LOCK_STS */ +#define WM8994_SRC1_LOCK_STS_WIDTH 1 /* SRC1_LOCK_STS */ +#define WM8994_FLL2_LOCK_STS 0x0040 /* FLL2_LOCK_STS */ +#define WM8994_FLL2_LOCK_STS_MASK 0x0040 /* FLL2_LOCK_STS */ +#define WM8994_FLL2_LOCK_STS_SHIFT 6 /* FLL2_LOCK_STS */ +#define WM8994_FLL2_LOCK_STS_WIDTH 1 /* FLL2_LOCK_STS */ +#define WM8994_FLL1_LOCK_STS 0x0020 /* FLL1_LOCK_STS */ +#define WM8994_FLL1_LOCK_STS_MASK 0x0020 /* FLL1_LOCK_STS */ +#define WM8994_FLL1_LOCK_STS_SHIFT 5 /* FLL1_LOCK_STS */ +#define WM8994_FLL1_LOCK_STS_WIDTH 1 /* FLL1_LOCK_STS */ +#define WM8994_MIC2_SHRT_STS 0x0010 /* MIC2_SHRT_STS */ +#define WM8994_MIC2_SHRT_STS_MASK 0x0010 /* MIC2_SHRT_STS */ +#define WM8994_MIC2_SHRT_STS_SHIFT 4 /* MIC2_SHRT_STS */ +#define WM8994_MIC2_SHRT_STS_WIDTH 1 /* MIC2_SHRT_STS */ +#define WM8994_MIC2_DET_STS 0x0008 /* MIC2_DET_STS */ +#define WM8994_MIC2_DET_STS_MASK 0x0008 /* MIC2_DET_STS */ +#define WM8994_MIC2_DET_STS_SHIFT 3 /* MIC2_DET_STS */ +#define WM8994_MIC2_DET_STS_WIDTH 1 /* MIC2_DET_STS */ +#define WM8994_MIC1_SHRT_STS 0x0004 /* MIC1_SHRT_STS */ +#define WM8994_MIC1_SHRT_STS_MASK 0x0004 /* MIC1_SHRT_STS */ +#define WM8994_MIC1_SHRT_STS_SHIFT 2 /* MIC1_SHRT_STS */ +#define WM8994_MIC1_SHRT_STS_WIDTH 1 /* MIC1_SHRT_STS */ +#define WM8994_MIC1_DET_STS 0x0002 /* MIC1_DET_STS */ +#define WM8994_MIC1_DET_STS_MASK 0x0002 /* MIC1_DET_STS */ +#define WM8994_MIC1_DET_STS_SHIFT 1 /* MIC1_DET_STS */ +#define WM8994_MIC1_DET_STS_WIDTH 1 /* MIC1_DET_STS */ +#define WM8994_TEMP_SHUT_STS 0x0001 /* TEMP_SHUT_STS */ +#define WM8994_TEMP_SHUT_STS_MASK 0x0001 /* TEMP_SHUT_STS */ +#define WM8994_TEMP_SHUT_STS_SHIFT 0 /* TEMP_SHUT_STS */ +#define WM8994_TEMP_SHUT_STS_WIDTH 1 /* TEMP_SHUT_STS */ + +/* + * R1848 (0x738) - Interrupt Status 1 Mask + */ +#define WM8994_IM_GP11_EINT 0x0400 /* IM_GP11_EINT */ +#define WM8994_IM_GP11_EINT_MASK 0x0400 /* IM_GP11_EINT */ +#define WM8994_IM_GP11_EINT_SHIFT 10 /* IM_GP11_EINT */ +#define WM8994_IM_GP11_EINT_WIDTH 1 /* IM_GP11_EINT */ +#define WM8994_IM_GP10_EINT 0x0200 /* IM_GP10_EINT */ +#define WM8994_IM_GP10_EINT_MASK 0x0200 /* IM_GP10_EINT */ +#define WM8994_IM_GP10_EINT_SHIFT 9 /* IM_GP10_EINT */ +#define WM8994_IM_GP10_EINT_WIDTH 1 /* IM_GP10_EINT */ +#define WM8994_IM_GP9_EINT 0x0100 /* IM_GP9_EINT */ +#define WM8994_IM_GP9_EINT_MASK 0x0100 /* IM_GP9_EINT */ +#define WM8994_IM_GP9_EINT_SHIFT 8 /* IM_GP9_EINT */ +#define WM8994_IM_GP9_EINT_WIDTH 1 /* IM_GP9_EINT */ +#define WM8994_IM_GP8_EINT 0x0080 /* IM_GP8_EINT */ +#define WM8994_IM_GP8_EINT_MASK 0x0080 /* IM_GP8_EINT */ +#define WM8994_IM_GP8_EINT_SHIFT 7 /* IM_GP8_EINT */ +#define WM8994_IM_GP8_EINT_WIDTH 1 /* IM_GP8_EINT */ +#define WM8994_IM_GP7_EINT 0x0040 /* IM_GP7_EINT */ +#define WM8994_IM_GP7_EINT_MASK 0x0040 /* IM_GP7_EINT */ +#define WM8994_IM_GP7_EINT_SHIFT 6 /* IM_GP7_EINT */ +#define WM8994_IM_GP7_EINT_WIDTH 1 /* IM_GP7_EINT */ +#define WM8994_IM_GP6_EINT 0x0020 /* IM_GP6_EINT */ +#define WM8994_IM_GP6_EINT_MASK 0x0020 /* IM_GP6_EINT */ +#define WM8994_IM_GP6_EINT_SHIFT 5 /* IM_GP6_EINT */ +#define WM8994_IM_GP6_EINT_WIDTH 1 /* IM_GP6_EINT */ +#define WM8994_IM_GP5_EINT 0x0010 /* IM_GP5_EINT */ +#define WM8994_IM_GP5_EINT_MASK 0x0010 /* IM_GP5_EINT */ +#define WM8994_IM_GP5_EINT_SHIFT 4 /* IM_GP5_EINT */ +#define WM8994_IM_GP5_EINT_WIDTH 1 /* IM_GP5_EINT */ +#define WM8994_IM_GP4_EINT 0x0008 /* IM_GP4_EINT */ +#define WM8994_IM_GP4_EINT_MASK 0x0008 /* IM_GP4_EINT */ +#define WM8994_IM_GP4_EINT_SHIFT 3 /* IM_GP4_EINT */ +#define WM8994_IM_GP4_EINT_WIDTH 1 /* IM_GP4_EINT */ +#define WM8994_IM_GP3_EINT 0x0004 /* IM_GP3_EINT */ +#define WM8994_IM_GP3_EINT_MASK 0x0004 /* IM_GP3_EINT */ +#define WM8994_IM_GP3_EINT_SHIFT 2 /* IM_GP3_EINT */ +#define WM8994_IM_GP3_EINT_WIDTH 1 /* IM_GP3_EINT */ +#define WM8994_IM_GP2_EINT 0x0002 /* IM_GP2_EINT */ +#define WM8994_IM_GP2_EINT_MASK 0x0002 /* IM_GP2_EINT */ +#define WM8994_IM_GP2_EINT_SHIFT 1 /* IM_GP2_EINT */ +#define WM8994_IM_GP2_EINT_WIDTH 1 /* IM_GP2_EINT */ +#define WM8994_IM_GP1_EINT 0x0001 /* IM_GP1_EINT */ +#define WM8994_IM_GP1_EINT_MASK 0x0001 /* IM_GP1_EINT */ +#define WM8994_IM_GP1_EINT_SHIFT 0 /* IM_GP1_EINT */ +#define WM8994_IM_GP1_EINT_WIDTH 1 /* IM_GP1_EINT */ + +/* + * R1849 (0x739) - Interrupt Status 2 Mask + */ +#define WM8994_IM_TEMP_WARN_EINT 0x8000 /* IM_TEMP_WARN_EINT */ +#define WM8994_IM_TEMP_WARN_EINT_MASK 0x8000 /* IM_TEMP_WARN_EINT */ +#define WM8994_IM_TEMP_WARN_EINT_SHIFT 15 /* IM_TEMP_WARN_EINT */ +#define WM8994_IM_TEMP_WARN_EINT_WIDTH 1 /* IM_TEMP_WARN_EINT */ +#define WM8994_IM_DCS_DONE_EINT 0x4000 /* IM_DCS_DONE_EINT */ +#define WM8994_IM_DCS_DONE_EINT_MASK 0x4000 /* IM_DCS_DONE_EINT */ +#define WM8994_IM_DCS_DONE_EINT_SHIFT 14 /* IM_DCS_DONE_EINT */ +#define WM8994_IM_DCS_DONE_EINT_WIDTH 1 /* IM_DCS_DONE_EINT */ +#define WM8994_IM_WSEQ_DONE_EINT 0x2000 /* IM_WSEQ_DONE_EINT */ +#define WM8994_IM_WSEQ_DONE_EINT_MASK 0x2000 /* IM_WSEQ_DONE_EINT */ +#define WM8994_IM_WSEQ_DONE_EINT_SHIFT 13 /* IM_WSEQ_DONE_EINT */ +#define WM8994_IM_WSEQ_DONE_EINT_WIDTH 1 /* IM_WSEQ_DONE_EINT */ +#define WM8994_IM_FIFOS_ERR_EINT 0x1000 /* IM_FIFOS_ERR_EINT */ +#define WM8994_IM_FIFOS_ERR_EINT_MASK 0x1000 /* IM_FIFOS_ERR_EINT */ +#define WM8994_IM_FIFOS_ERR_EINT_SHIFT 12 /* IM_FIFOS_ERR_EINT */ +#define WM8994_IM_FIFOS_ERR_EINT_WIDTH 1 /* IM_FIFOS_ERR_EINT */ +#define WM8994_IM_AIF2DRC_SIG_DET_EINT 0x0800 /* IM_AIF2DRC_SIG_DET_EINT */ +#define WM8994_IM_AIF2DRC_SIG_DET_EINT_MASK 0x0800 /* IM_AIF2DRC_SIG_DET_EINT */ +#define WM8994_IM_AIF2DRC_SIG_DET_EINT_SHIFT 11 /* IM_AIF2DRC_SIG_DET_EINT */ +#define WM8994_IM_AIF2DRC_SIG_DET_EINT_WIDTH 1 /* IM_AIF2DRC_SIG_DET_EINT */ +#define WM8994_IM_AIF1DRC2_SIG_DET_EINT 0x0400 /* IM_AIF1DRC2_SIG_DET_EINT */ +#define WM8994_IM_AIF1DRC2_SIG_DET_EINT_MASK 0x0400 /* IM_AIF1DRC2_SIG_DET_EINT */ +#define WM8994_IM_AIF1DRC2_SIG_DET_EINT_SHIFT 10 /* IM_AIF1DRC2_SIG_DET_EINT */ +#define WM8994_IM_AIF1DRC2_SIG_DET_EINT_WIDTH 1 /* IM_AIF1DRC2_SIG_DET_EINT */ +#define WM8994_IM_AIF1DRC1_SIG_DET_EINT 0x0200 /* IM_AIF1DRC1_SIG_DET_EINT */ +#define WM8994_IM_AIF1DRC1_SIG_DET_EINT_MASK 0x0200 /* IM_AIF1DRC1_SIG_DET_EINT */ +#define WM8994_IM_AIF1DRC1_SIG_DET_EINT_SHIFT 9 /* IM_AIF1DRC1_SIG_DET_EINT */ +#define WM8994_IM_AIF1DRC1_SIG_DET_EINT_WIDTH 1 /* IM_AIF1DRC1_SIG_DET_EINT */ +#define WM8994_IM_SRC2_LOCK_EINT 0x0100 /* IM_SRC2_LOCK_EINT */ +#define WM8994_IM_SRC2_LOCK_EINT_MASK 0x0100 /* IM_SRC2_LOCK_EINT */ +#define WM8994_IM_SRC2_LOCK_EINT_SHIFT 8 /* IM_SRC2_LOCK_EINT */ +#define WM8994_IM_SRC2_LOCK_EINT_WIDTH 1 /* IM_SRC2_LOCK_EINT */ +#define WM8994_IM_SRC1_LOCK_EINT 0x0080 /* IM_SRC1_LOCK_EINT */ +#define WM8994_IM_SRC1_LOCK_EINT_MASK 0x0080 /* IM_SRC1_LOCK_EINT */ +#define WM8994_IM_SRC1_LOCK_EINT_SHIFT 7 /* IM_SRC1_LOCK_EINT */ +#define WM8994_IM_SRC1_LOCK_EINT_WIDTH 1 /* IM_SRC1_LOCK_EINT */ +#define WM8994_IM_FLL2_LOCK_EINT 0x0040 /* IM_FLL2_LOCK_EINT */ +#define WM8994_IM_FLL2_LOCK_EINT_MASK 0x0040 /* IM_FLL2_LOCK_EINT */ +#define WM8994_IM_FLL2_LOCK_EINT_SHIFT 6 /* IM_FLL2_LOCK_EINT */ +#define WM8994_IM_FLL2_LOCK_EINT_WIDTH 1 /* IM_FLL2_LOCK_EINT */ +#define WM8994_IM_FLL1_LOCK_EINT 0x0020 /* IM_FLL1_LOCK_EINT */ +#define WM8994_IM_FLL1_LOCK_EINT_MASK 0x0020 /* IM_FLL1_LOCK_EINT */ +#define WM8994_IM_FLL1_LOCK_EINT_SHIFT 5 /* IM_FLL1_LOCK_EINT */ +#define WM8994_IM_FLL1_LOCK_EINT_WIDTH 1 /* IM_FLL1_LOCK_EINT */ +#define WM8994_IM_MIC2_SHRT_EINT 0x0010 /* IM_MIC2_SHRT_EINT */ +#define WM8994_IM_MIC2_SHRT_EINT_MASK 0x0010 /* IM_MIC2_SHRT_EINT */ +#define WM8994_IM_MIC2_SHRT_EINT_SHIFT 4 /* IM_MIC2_SHRT_EINT */ +#define WM8994_IM_MIC2_SHRT_EINT_WIDTH 1 /* IM_MIC2_SHRT_EINT */ +#define WM8994_IM_MIC2_DET_EINT 0x0008 /* IM_MIC2_DET_EINT */ +#define WM8994_IM_MIC2_DET_EINT_MASK 0x0008 /* IM_MIC2_DET_EINT */ +#define WM8994_IM_MIC2_DET_EINT_SHIFT 3 /* IM_MIC2_DET_EINT */ +#define WM8994_IM_MIC2_DET_EINT_WIDTH 1 /* IM_MIC2_DET_EINT */ +#define WM8994_IM_MIC1_SHRT_EINT 0x0004 /* IM_MIC1_SHRT_EINT */ +#define WM8994_IM_MIC1_SHRT_EINT_MASK 0x0004 /* IM_MIC1_SHRT_EINT */ +#define WM8994_IM_MIC1_SHRT_EINT_SHIFT 2 /* IM_MIC1_SHRT_EINT */ +#define WM8994_IM_MIC1_SHRT_EINT_WIDTH 1 /* IM_MIC1_SHRT_EINT */ +#define WM8994_IM_MIC1_DET_EINT 0x0002 /* IM_MIC1_DET_EINT */ +#define WM8994_IM_MIC1_DET_EINT_MASK 0x0002 /* IM_MIC1_DET_EINT */ +#define WM8994_IM_MIC1_DET_EINT_SHIFT 1 /* IM_MIC1_DET_EINT */ +#define WM8994_IM_MIC1_DET_EINT_WIDTH 1 /* IM_MIC1_DET_EINT */ +#define WM8994_IM_TEMP_SHUT_EINT 0x0001 /* IM_TEMP_SHUT_EINT */ +#define WM8994_IM_TEMP_SHUT_EINT_MASK 0x0001 /* IM_TEMP_SHUT_EINT */ +#define WM8994_IM_TEMP_SHUT_EINT_SHIFT 0 /* IM_TEMP_SHUT_EINT */ +#define WM8994_IM_TEMP_SHUT_EINT_WIDTH 1 /* IM_TEMP_SHUT_EINT */ + +/* + * R1856 (0x740) - Interrupt Control + */ +#define WM8994_IM_IRQ 0x0001 /* IM_IRQ */ +#define WM8994_IM_IRQ_MASK 0x0001 /* IM_IRQ */ +#define WM8994_IM_IRQ_SHIFT 0 /* IM_IRQ */ +#define WM8994_IM_IRQ_WIDTH 1 /* IM_IRQ */ + +/* + * R1864 (0x748) - IRQ Debounce + */ +#define WM8994_TEMP_WARN_DB 0x0020 /* TEMP_WARN_DB */ +#define WM8994_TEMP_WARN_DB_MASK 0x0020 /* TEMP_WARN_DB */ +#define WM8994_TEMP_WARN_DB_SHIFT 5 /* TEMP_WARN_DB */ +#define WM8994_TEMP_WARN_DB_WIDTH 1 /* TEMP_WARN_DB */ +#define WM8994_MIC2_SHRT_DB 0x0010 /* MIC2_SHRT_DB */ +#define WM8994_MIC2_SHRT_DB_MASK 0x0010 /* MIC2_SHRT_DB */ +#define WM8994_MIC2_SHRT_DB_SHIFT 4 /* MIC2_SHRT_DB */ +#define WM8994_MIC2_SHRT_DB_WIDTH 1 /* MIC2_SHRT_DB */ +#define WM8994_MIC2_DET_DB 0x0008 /* MIC2_DET_DB */ +#define WM8994_MIC2_DET_DB_MASK 0x0008 /* MIC2_DET_DB */ +#define WM8994_MIC2_DET_DB_SHIFT 3 /* MIC2_DET_DB */ +#define WM8994_MIC2_DET_DB_WIDTH 1 /* MIC2_DET_DB */ +#define WM8994_MIC1_SHRT_DB 0x0004 /* MIC1_SHRT_DB */ +#define WM8994_MIC1_SHRT_DB_MASK 0x0004 /* MIC1_SHRT_DB */ +#define WM8994_MIC1_SHRT_DB_SHIFT 2 /* MIC1_SHRT_DB */ +#define WM8994_MIC1_SHRT_DB_WIDTH 1 /* MIC1_SHRT_DB */ +#define WM8994_MIC1_DET_DB 0x0002 /* MIC1_DET_DB */ +#define WM8994_MIC1_DET_DB_MASK 0x0002 /* MIC1_DET_DB */ +#define WM8994_MIC1_DET_DB_SHIFT 1 /* MIC1_DET_DB */ +#define WM8994_MIC1_DET_DB_WIDTH 1 /* MIC1_DET_DB */ +#define WM8994_TEMP_SHUT_DB 0x0001 /* TEMP_SHUT_DB */ +#define WM8994_TEMP_SHUT_DB_MASK 0x0001 /* TEMP_SHUT_DB */ +#define WM8994_TEMP_SHUT_DB_SHIFT 0 /* TEMP_SHUT_DB */ +#define WM8994_TEMP_SHUT_DB_WIDTH 1 /* TEMP_SHUT_DB */ + +/* + * R2304 (0x900) - DSP2_Program + */ +#define WM8958_DSP2_ENA 0x0001 /* DSP2_ENA */ +#define WM8958_DSP2_ENA_MASK 0x0001 /* DSP2_ENA */ +#define WM8958_DSP2_ENA_SHIFT 0 /* DSP2_ENA */ +#define WM8958_DSP2_ENA_WIDTH 1 /* DSP2_ENA */ + +/* + * R2305 (0x901) - DSP2_Config + */ +#define WM8958_MBC_SEL_MASK 0x0030 /* MBC_SEL - [5:4] */ +#define WM8958_MBC_SEL_SHIFT 4 /* MBC_SEL - [5:4] */ +#define WM8958_MBC_SEL_WIDTH 2 /* MBC_SEL - [5:4] */ +#define WM8958_MBC_ENA 0x0001 /* MBC_ENA */ +#define WM8958_MBC_ENA_MASK 0x0001 /* MBC_ENA */ +#define WM8958_MBC_ENA_SHIFT 0 /* MBC_ENA */ +#define WM8958_MBC_ENA_WIDTH 1 /* MBC_ENA */ + +/* + * R2560 (0xA00) - DSP2_MagicNum + */ +#define WM8958_DSP2_MAGIC_NUM_MASK 0xFFFF /* DSP2_MAGIC_NUM - [15:0] */ +#define WM8958_DSP2_MAGIC_NUM_SHIFT 0 /* DSP2_MAGIC_NUM - [15:0] */ +#define WM8958_DSP2_MAGIC_NUM_WIDTH 16 /* DSP2_MAGIC_NUM - [15:0] */ + +/* + * R2561 (0xA01) - DSP2_ReleaseYear + */ +#define WM8958_DSP2_RELEASE_YEAR_MASK 0xFFFF /* DSP2_RELEASE_YEAR - [15:0] */ +#define WM8958_DSP2_RELEASE_YEAR_SHIFT 0 /* DSP2_RELEASE_YEAR - [15:0] */ +#define WM8958_DSP2_RELEASE_YEAR_WIDTH 16 /* DSP2_RELEASE_YEAR - [15:0] */ + +/* + * R2562 (0xA02) - DSP2_ReleaseMonthDay + */ +#define WM8958_DSP2_RELEASE_MONTH_MASK 0xFF00 /* DSP2_RELEASE_MONTH - [15:8] */ +#define WM8958_DSP2_RELEASE_MONTH_SHIFT 8 /* DSP2_RELEASE_MONTH - [15:8] */ +#define WM8958_DSP2_RELEASE_MONTH_WIDTH 8 /* DSP2_RELEASE_MONTH - [15:8] */ +#define WM8958_DSP2_RELEASE_DAY_MASK 0x00FF /* DSP2_RELEASE_DAY - [7:0] */ +#define WM8958_DSP2_RELEASE_DAY_SHIFT 0 /* DSP2_RELEASE_DAY - [7:0] */ +#define WM8958_DSP2_RELEASE_DAY_WIDTH 8 /* DSP2_RELEASE_DAY - [7:0] */ + +/* + * R2563 (0xA03) - DSP2_ReleaseTime + */ +#define WM8958_DSP2_RELEASE_HOURS_MASK 0xFF00 /* DSP2_RELEASE_HOURS - [15:8] */ +#define WM8958_DSP2_RELEASE_HOURS_SHIFT 8 /* DSP2_RELEASE_HOURS - [15:8] */ +#define WM8958_DSP2_RELEASE_HOURS_WIDTH 8 /* DSP2_RELEASE_HOURS - [15:8] */ +#define WM8958_DSP2_RELEASE_MINS_MASK 0x00FF /* DSP2_RELEASE_MINS - [7:0] */ +#define WM8958_DSP2_RELEASE_MINS_SHIFT 0 /* DSP2_RELEASE_MINS - [7:0] */ +#define WM8958_DSP2_RELEASE_MINS_WIDTH 8 /* DSP2_RELEASE_MINS - [7:0] */ + +/* + * R2564 (0xA04) - DSP2_VerMajMin + */ +#define WM8958_DSP2_MAJOR_VER_MASK 0xFF00 /* DSP2_MAJOR_VER - [15:8] */ +#define WM8958_DSP2_MAJOR_VER_SHIFT 8 /* DSP2_MAJOR_VER - [15:8] */ +#define WM8958_DSP2_MAJOR_VER_WIDTH 8 /* DSP2_MAJOR_VER - [15:8] */ +#define WM8958_DSP2_MINOR_VER_MASK 0x00FF /* DSP2_MINOR_VER - [7:0] */ +#define WM8958_DSP2_MINOR_VER_SHIFT 0 /* DSP2_MINOR_VER - [7:0] */ +#define WM8958_DSP2_MINOR_VER_WIDTH 8 /* DSP2_MINOR_VER - [7:0] */ + +/* + * R2565 (0xA05) - DSP2_VerBuild + */ +#define WM8958_DSP2_BUILD_VER_MASK 0xFFFF /* DSP2_BUILD_VER - [15:0] */ +#define WM8958_DSP2_BUILD_VER_SHIFT 0 /* DSP2_BUILD_VER - [15:0] */ +#define WM8958_DSP2_BUILD_VER_WIDTH 16 /* DSP2_BUILD_VER - [15:0] */ + +/* + * R2573 (0xA0D) - DSP2_ExecControl + */ +#define WM8958_DSP2_STOPC 0x0020 /* DSP2_STOPC */ +#define WM8958_DSP2_STOPC_MASK 0x0020 /* DSP2_STOPC */ +#define WM8958_DSP2_STOPC_SHIFT 5 /* DSP2_STOPC */ +#define WM8958_DSP2_STOPC_WIDTH 1 /* DSP2_STOPC */ +#define WM8958_DSP2_STOPS 0x0010 /* DSP2_STOPS */ +#define WM8958_DSP2_STOPS_MASK 0x0010 /* DSP2_STOPS */ +#define WM8958_DSP2_STOPS_SHIFT 4 /* DSP2_STOPS */ +#define WM8958_DSP2_STOPS_WIDTH 1 /* DSP2_STOPS */ +#define WM8958_DSP2_STOPI 0x0008 /* DSP2_STOPI */ +#define WM8958_DSP2_STOPI_MASK 0x0008 /* DSP2_STOPI */ +#define WM8958_DSP2_STOPI_SHIFT 3 /* DSP2_STOPI */ +#define WM8958_DSP2_STOPI_WIDTH 1 /* DSP2_STOPI */ +#define WM8958_DSP2_STOP 0x0004 /* DSP2_STOP */ +#define WM8958_DSP2_STOP_MASK 0x0004 /* DSP2_STOP */ +#define WM8958_DSP2_STOP_SHIFT 2 /* DSP2_STOP */ +#define WM8958_DSP2_STOP_WIDTH 1 /* DSP2_STOP */ +#define WM8958_DSP2_RUNR 0x0002 /* DSP2_RUNR */ +#define WM8958_DSP2_RUNR_MASK 0x0002 /* DSP2_RUNR */ +#define WM8958_DSP2_RUNR_SHIFT 1 /* DSP2_RUNR */ +#define WM8958_DSP2_RUNR_WIDTH 1 /* DSP2_RUNR */ +#define WM8958_DSP2_RUN 0x0001 /* DSP2_RUN */ +#define WM8958_DSP2_RUN_MASK 0x0001 /* DSP2_RUN */ +#define WM8958_DSP2_RUN_SHIFT 0 /* DSP2_RUN */ +#define WM8958_DSP2_RUN_WIDTH 1 /* DSP2_RUN */ + +#endif diff --git a/include/linux/mfd/wm97xx.h b/include/linux/mfd/wm97xx.h new file mode 100644 index 000000000..45fb54f19 --- /dev/null +++ b/include/linux/mfd/wm97xx.h @@ -0,0 +1,25 @@ +/* + * wm97xx client interface + * + * Copyright (C) 2017 Robert Jarzmik + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __LINUX_MFD_WM97XX_H +#define __LINUX_MFD_WM97XX_H + +struct regmap; +struct wm97xx_batt_pdata; +struct snd_ac97; + +struct wm97xx_platform_data { + struct snd_ac97 *ac97; + struct regmap *regmap; + struct wm97xx_batt_pdata *batt_pdata; +}; + +#endif diff --git a/include/linux/mic_bus.h b/include/linux/mic_bus.h new file mode 100644 index 000000000..504d54c71 --- /dev/null +++ b/include/linux/mic_bus.h @@ -0,0 +1,111 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Bus driver. + * + * This implementation is very similar to the the virtio bus driver + * implementation @ include/linux/virtio.h. + */ +#ifndef _MIC_BUS_H_ +#define _MIC_BUS_H_ +/* + * Everything a mbus driver needs to work with any particular mbus + * implementation. + */ +#include +#include + +struct mbus_device_id { + __u32 device; + __u32 vendor; +}; + +#define MBUS_DEV_DMA_HOST 2 +#define MBUS_DEV_DMA_MIC 3 +#define MBUS_DEV_ANY_ID 0xffffffff + +/** + * mbus_device - representation of a device using mbus + * @mmio_va: virtual address of mmio space + * @hw_ops: the hardware ops supported by this device. + * @id: the device type identification (used to match it with a driver). + * @dev: underlying device. + * be used to communicate with. + * @index: unique position on the mbus bus + */ +struct mbus_device { + void __iomem *mmio_va; + struct mbus_hw_ops *hw_ops; + struct mbus_device_id id; + struct device dev; + int index; +}; + +/** + * mbus_driver - operations for a mbus I/O driver + * @driver: underlying device driver (populate name and owner). + * @id_table: the ids serviced by this driver. + * @probe: the function to call when a device is found. Returns 0 or -errno. + * @remove: the function to call when a device is removed. + */ +struct mbus_driver { + struct device_driver driver; + const struct mbus_device_id *id_table; + int (*probe)(struct mbus_device *dev); + void (*scan)(struct mbus_device *dev); + void (*remove)(struct mbus_device *dev); +}; + +/** + * struct mic_irq - opaque pointer used as cookie + */ +struct mic_irq; + +/** + * mbus_hw_ops - Hardware operations for accessing a MIC device on the MIC bus. + */ +struct mbus_hw_ops { + struct mic_irq* (*request_threaded_irq)(struct mbus_device *mbdev, + irq_handler_t handler, + irq_handler_t thread_fn, + const char *name, void *data, + int intr_src); + void (*free_irq)(struct mbus_device *mbdev, + struct mic_irq *cookie, void *data); + void (*ack_interrupt)(struct mbus_device *mbdev, int num); +}; + +struct mbus_device * +mbus_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops, + struct mbus_hw_ops *hw_ops, int index, + void __iomem *mmio_va); +void mbus_unregister_device(struct mbus_device *mbdev); + +int mbus_register_driver(struct mbus_driver *drv); +void mbus_unregister_driver(struct mbus_driver *drv); + +static inline struct mbus_device *dev_to_mbus(struct device *_dev) +{ + return container_of(_dev, struct mbus_device, dev); +} + +static inline struct mbus_driver *drv_to_mbus(struct device_driver *drv) +{ + return container_of(drv, struct mbus_driver, driver); +} + +#endif /* _MIC_BUS_H */ diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h new file mode 100644 index 000000000..472fa4d4e --- /dev/null +++ b/include/linux/micrel_phy.h @@ -0,0 +1,51 @@ +/* + * include/linux/micrel_phy.h + * + * Micrel PHY IDs + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef _MICREL_PHY_H +#define _MICREL_PHY_H + +#define MICREL_PHY_ID_MASK 0x00fffff0 + +#define PHY_ID_KSZ8873MLL 0x000e7237 +#define PHY_ID_KSZ9021 0x00221610 +#define PHY_ID_KSZ9021RLRN 0x00221611 +#define PHY_ID_KS8737 0x00221720 +#define PHY_ID_KSZ8021 0x00221555 +#define PHY_ID_KSZ8031 0x00221556 +#define PHY_ID_KSZ8041 0x00221510 +/* undocumented */ +#define PHY_ID_KSZ8041RNLI 0x00221537 +#define PHY_ID_KSZ8051 0x00221550 +/* same id: ks8001 Rev. A/B, and ks8721 Rev 3. */ +#define PHY_ID_KSZ8001 0x0022161A +/* same id: KS8081, KS8091 */ +#define PHY_ID_KSZ8081 0x00221560 +#define PHY_ID_KSZ8061 0x00221570 +#define PHY_ID_KSZ9031 0x00221620 + +#define PHY_ID_KSZ886X 0x00221430 +#define PHY_ID_KSZ8863 0x00221435 + +#define PHY_ID_KSZ8795 0x00221550 + +#define PHY_ID_KSZ9477 0x00221631 + +/* struct phy_device dev_flags definitions */ +#define MICREL_PHY_50MHZ_CLK 0x00000001 +#define MICREL_PHY_FXEN 0x00000002 + +#define MICREL_KSZ9021_EXTREG_CTRL 0xB +#define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC +#define MICREL_KSZ9021_RGMII_CLK_CTRL_PAD_SCEW 0x104 +#define MICREL_KSZ9021_RGMII_RX_DATA_PAD_SCEW 0x105 + +#endif /* _MICREL_PHY_H */ diff --git a/include/linux/microchipphy.h b/include/linux/microchipphy.h new file mode 100644 index 000000000..8c40128af --- /dev/null +++ b/include/linux/microchipphy.h @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2015 Microchip Technology + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef _MICROCHIPPHY_H +#define _MICROCHIPPHY_H + +#define LAN88XX_INT_MASK (0x19) +#define LAN88XX_INT_MASK_MDINTPIN_EN_ (0x8000) +#define LAN88XX_INT_MASK_SPEED_CHANGE_ (0x4000) +#define LAN88XX_INT_MASK_LINK_CHANGE_ (0x2000) +#define LAN88XX_INT_MASK_FDX_CHANGE_ (0x1000) +#define LAN88XX_INT_MASK_AUTONEG_ERR_ (0x0800) +#define LAN88XX_INT_MASK_AUTONEG_DONE_ (0x0400) +#define LAN88XX_INT_MASK_POE_DETECT_ (0x0200) +#define LAN88XX_INT_MASK_SYMBOL_ERR_ (0x0100) +#define LAN88XX_INT_MASK_FAST_LINK_FAIL_ (0x0080) +#define LAN88XX_INT_MASK_WOL_EVENT_ (0x0040) +#define LAN88XX_INT_MASK_EXTENDED_INT_ (0x0020) +#define LAN88XX_INT_MASK_RESERVED_ (0x0010) +#define LAN88XX_INT_MASK_FALSE_CARRIER_ (0x0008) +#define LAN88XX_INT_MASK_LINK_SPEED_DS_ (0x0004) +#define LAN88XX_INT_MASK_MASTER_SLAVE_DONE_ (0x0002) +#define LAN88XX_INT_MASK_RX__ER_ (0x0001) + +#define LAN88XX_INT_STS (0x1A) +#define LAN88XX_INT_STS_INT_ACTIVE_ (0x8000) +#define LAN88XX_INT_STS_SPEED_CHANGE_ (0x4000) +#define LAN88XX_INT_STS_LINK_CHANGE_ (0x2000) +#define LAN88XX_INT_STS_FDX_CHANGE_ (0x1000) +#define LAN88XX_INT_STS_AUTONEG_ERR_ (0x0800) +#define LAN88XX_INT_STS_AUTONEG_DONE_ (0x0400) +#define LAN88XX_INT_STS_POE_DETECT_ (0x0200) +#define LAN88XX_INT_STS_SYMBOL_ERR_ (0x0100) +#define LAN88XX_INT_STS_FAST_LINK_FAIL_ (0x0080) +#define LAN88XX_INT_STS_WOL_EVENT_ (0x0040) +#define LAN88XX_INT_STS_EXTENDED_INT_ (0x0020) +#define LAN88XX_INT_STS_RESERVED_ (0x0010) +#define LAN88XX_INT_STS_FALSE_CARRIER_ (0x0008) +#define LAN88XX_INT_STS_LINK_SPEED_DS_ (0x0004) +#define LAN88XX_INT_STS_MASTER_SLAVE_DONE_ (0x0002) +#define LAN88XX_INT_STS_RX_ER_ (0x0001) + +#define LAN88XX_EXT_PAGE_ACCESS (0x1F) +#define LAN88XX_EXT_PAGE_SPACE_0 (0x0000) +#define LAN88XX_EXT_PAGE_SPACE_1 (0x0001) +#define LAN88XX_EXT_PAGE_SPACE_2 (0x0002) + +/* Extended Register Page 1 space */ +#define LAN88XX_EXT_MODE_CTRL (0x13) +#define LAN88XX_EXT_MODE_CTRL_MDIX_MASK_ (0x000C) +#define LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_ (0x0000) +#define LAN88XX_EXT_MODE_CTRL_MDI_ (0x0008) +#define LAN88XX_EXT_MODE_CTRL_MDI_X_ (0x000C) + +/* MMD 3 Registers */ +#define LAN88XX_MMD3_CHIP_ID (32877) +#define LAN88XX_MMD3_CHIP_REV (32878) + +/* Registers specific to the LAN7800/LAN7850 embedded phy */ +#define LAN78XX_PHY_LED_MODE_SELECT (0x1D) + +/* DSP registers */ +#define PHY_ARDENNES_MMD_DEV_3_PHY_CFG (0x806A) +#define PHY_ARDENNES_MMD_DEV_3_PHY_CFG_ZD_DLY_EN_ (0x2000) +#define LAN88XX_EXT_PAGE_ACCESS_TR (0x52B5) +#define LAN88XX_EXT_PAGE_TR_CR 16 +#define LAN88XX_EXT_PAGE_TR_LOW_DATA 17 +#define LAN88XX_EXT_PAGE_TR_HIGH_DATA 18 + +#endif /* _MICROCHIPPHY_H */ diff --git a/include/linux/migrate.h b/include/linux/migrate.h new file mode 100644 index 000000000..f2b4abbca --- /dev/null +++ b/include/linux/migrate.h @@ -0,0 +1,291 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MIGRATE_H +#define _LINUX_MIGRATE_H + +#include +#include +#include +#include + +typedef struct page *new_page_t(struct page *page, unsigned long private); +typedef void free_page_t(struct page *page, unsigned long private); + +/* + * Return values from addresss_space_operations.migratepage(): + * - negative errno on page migration failure; + * - zero on page migration success; + */ +#define MIGRATEPAGE_SUCCESS 0 + +enum migrate_reason { + MR_COMPACTION, + MR_MEMORY_FAILURE, + MR_MEMORY_HOTPLUG, + MR_SYSCALL, /* also applies to cpusets */ + MR_MEMPOLICY_MBIND, + MR_NUMA_MISPLACED, + MR_CONTIG_RANGE, + MR_TYPES +}; + +/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */ +extern char *migrate_reason_names[MR_TYPES]; + +static inline struct page *new_page_nodemask(struct page *page, + int preferred_nid, nodemask_t *nodemask) +{ + gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL; + unsigned int order = 0; + struct page *new_page = NULL; + + if (PageHuge(page)) + return alloc_huge_page_nodemask(page_hstate(compound_head(page)), + preferred_nid, nodemask); + + if (PageTransHuge(page)) { + gfp_mask |= GFP_TRANSHUGE; + order = HPAGE_PMD_ORDER; + } + + if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE)) + gfp_mask |= __GFP_HIGHMEM; + + new_page = __alloc_pages_nodemask(gfp_mask, order, + preferred_nid, nodemask); + + if (new_page && PageTransHuge(new_page)) + prep_transhuge_page(new_page); + + return new_page; +} + +#ifdef CONFIG_MIGRATION + +extern void putback_movable_pages(struct list_head *l); +extern int migrate_page(struct address_space *mapping, + struct page *newpage, struct page *page, + enum migrate_mode mode); +extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, + unsigned long private, enum migrate_mode mode, int reason); +extern int isolate_movable_page(struct page *page, isolate_mode_t mode); +extern void putback_movable_page(struct page *page); + +extern int migrate_prep(void); +extern int migrate_prep_local(void); +extern void migrate_page_states(struct page *newpage, struct page *page); +extern void migrate_page_copy(struct page *newpage, struct page *page); +extern int migrate_huge_page_move_mapping(struct address_space *mapping, + struct page *newpage, struct page *page); +extern int migrate_page_move_mapping(struct address_space *mapping, + struct page *newpage, struct page *page, + struct buffer_head *head, enum migrate_mode mode, + int extra_count); +#else + +static inline void putback_movable_pages(struct list_head *l) {} +static inline int migrate_pages(struct list_head *l, new_page_t new, + free_page_t free, unsigned long private, enum migrate_mode mode, + int reason) + { return -ENOSYS; } +static inline int isolate_movable_page(struct page *page, isolate_mode_t mode) + { return -EBUSY; } + +static inline int migrate_prep(void) { return -ENOSYS; } +static inline int migrate_prep_local(void) { return -ENOSYS; } + +static inline void migrate_page_states(struct page *newpage, struct page *page) +{ +} + +static inline void migrate_page_copy(struct page *newpage, + struct page *page) {} + +static inline int migrate_huge_page_move_mapping(struct address_space *mapping, + struct page *newpage, struct page *page) +{ + return -ENOSYS; +} + +#endif /* CONFIG_MIGRATION */ + +#ifdef CONFIG_COMPACTION +extern int PageMovable(struct page *page); +extern void __SetPageMovable(struct page *page, struct address_space *mapping); +extern void __ClearPageMovable(struct page *page); +#else +static inline int PageMovable(struct page *page) { return 0; }; +static inline void __SetPageMovable(struct page *page, + struct address_space *mapping) +{ +} +static inline void __ClearPageMovable(struct page *page) +{ +} +#endif + +#ifdef CONFIG_NUMA_BALANCING +extern bool pmd_trans_migrating(pmd_t pmd); +extern int migrate_misplaced_page(struct page *page, + struct vm_area_struct *vma, int node); +#else +static inline bool pmd_trans_migrating(pmd_t pmd) +{ + return false; +} +static inline int migrate_misplaced_page(struct page *page, + struct vm_area_struct *vma, int node) +{ + return -EAGAIN; /* can't migrate now */ +} +#endif /* CONFIG_NUMA_BALANCING */ + +#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE) +extern int migrate_misplaced_transhuge_page(struct mm_struct *mm, + struct vm_area_struct *vma, + pmd_t *pmd, pmd_t entry, + unsigned long address, + struct page *page, int node); +#else +static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm, + struct vm_area_struct *vma, + pmd_t *pmd, pmd_t entry, + unsigned long address, + struct page *page, int node) +{ + return -EAGAIN; +} +#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/ + + +#ifdef CONFIG_MIGRATION + +/* + * Watch out for PAE architecture, which has an unsigned long, and might not + * have enough bits to store all physical address and flags. So far we have + * enough room for all our flags. + */ +#define MIGRATE_PFN_VALID (1UL << 0) +#define MIGRATE_PFN_MIGRATE (1UL << 1) +#define MIGRATE_PFN_LOCKED (1UL << 2) +#define MIGRATE_PFN_WRITE (1UL << 3) +#define MIGRATE_PFN_DEVICE (1UL << 4) +#define MIGRATE_PFN_ERROR (1UL << 5) +#define MIGRATE_PFN_SHIFT 6 + +static inline struct page *migrate_pfn_to_page(unsigned long mpfn) +{ + if (!(mpfn & MIGRATE_PFN_VALID)) + return NULL; + return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT); +} + +static inline unsigned long migrate_pfn(unsigned long pfn) +{ + return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID; +} + +/* + * struct migrate_vma_ops - migrate operation callback + * + * @alloc_and_copy: alloc destination memory and copy source memory to it + * @finalize_and_map: allow caller to map the successfully migrated pages + * + * + * The alloc_and_copy() callback happens once all source pages have been locked, + * unmapped and checked (checked whether pinned or not). All pages that can be + * migrated will have an entry in the src array set with the pfn value of the + * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set (other + * flags might be set but should be ignored by the callback). + * + * The alloc_and_copy() callback can then allocate destination memory and copy + * source memory to it for all those entries (ie with MIGRATE_PFN_VALID and + * MIGRATE_PFN_MIGRATE flag set). Once these are allocated and copied, the + * callback must update each corresponding entry in the dst array with the pfn + * value of the destination page and with the MIGRATE_PFN_VALID and + * MIGRATE_PFN_LOCKED flags set (destination pages must have their struct pages + * locked, via lock_page()). + * + * At this point the alloc_and_copy() callback is done and returns. + * + * Note that the callback does not have to migrate all the pages that are + * marked with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration + * from device memory to system memory (ie the MIGRATE_PFN_DEVICE flag is also + * set in the src array entry). If the device driver cannot migrate a device + * page back to system memory, then it must set the corresponding dst array + * entry to MIGRATE_PFN_ERROR. This will trigger a SIGBUS if CPU tries to + * access any of the virtual addresses originally backed by this page. Because + * a SIGBUS is such a severe result for the userspace process, the device + * driver should avoid setting MIGRATE_PFN_ERROR unless it is really in an + * unrecoverable state. + * + * For empty entry inside CPU page table (pte_none() or pmd_none() is true) we + * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus + * allowing device driver to allocate device memory for those unback virtual + * address. For this the device driver simply have to allocate device memory + * and properly set the destination entry like for regular migration. Note that + * this can still fails and thus inside the device driver must check if the + * migration was successful for those entry inside the finalize_and_map() + * callback just like for regular migration. + * + * THE alloc_and_copy() CALLBACK MUST NOT CHANGE ANY OF THE SRC ARRAY ENTRIES + * OR BAD THINGS WILL HAPPEN ! + * + * + * The finalize_and_map() callback happens after struct page migration from + * source to destination (destination struct pages are the struct pages for the + * memory allocated by the alloc_and_copy() callback). Migration can fail, and + * thus the finalize_and_map() allows the driver to inspect which pages were + * successfully migrated, and which were not. Successfully migrated pages will + * have the MIGRATE_PFN_MIGRATE flag set for their src array entry. + * + * It is safe to update device page table from within the finalize_and_map() + * callback because both destination and source page are still locked, and the + * mmap_sem is held in read mode (hence no one can unmap the range being + * migrated). + * + * Once callback is done cleaning up things and updating its page table (if it + * chose to do so, this is not an obligation) then it returns. At this point, + * the HMM core will finish up the final steps, and the migration is complete. + * + * THE finalize_and_map() CALLBACK MUST NOT CHANGE ANY OF THE SRC OR DST ARRAY + * ENTRIES OR BAD THINGS WILL HAPPEN ! + */ +struct migrate_vma_ops { + void (*alloc_and_copy)(struct vm_area_struct *vma, + const unsigned long *src, + unsigned long *dst, + unsigned long start, + unsigned long end, + void *private); + void (*finalize_and_map)(struct vm_area_struct *vma, + const unsigned long *src, + const unsigned long *dst, + unsigned long start, + unsigned long end, + void *private); +}; + +#if defined(CONFIG_MIGRATE_VMA_HELPER) +int migrate_vma(const struct migrate_vma_ops *ops, + struct vm_area_struct *vma, + unsigned long start, + unsigned long end, + unsigned long *src, + unsigned long *dst, + void *private); +#else +static inline int migrate_vma(const struct migrate_vma_ops *ops, + struct vm_area_struct *vma, + unsigned long start, + unsigned long end, + unsigned long *src, + unsigned long *dst, + void *private) +{ + return -EINVAL; +} +#endif /* IS_ENABLED(CONFIG_MIGRATE_VMA_HELPER) */ + +#endif /* CONFIG_MIGRATION */ + +#endif /* _LINUX_MIGRATE_H */ diff --git a/include/linux/migrate_mode.h b/include/linux/migrate_mode.h new file mode 100644 index 000000000..883c99249 --- /dev/null +++ b/include/linux/migrate_mode.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef MIGRATE_MODE_H_INCLUDED +#define MIGRATE_MODE_H_INCLUDED +/* + * MIGRATE_ASYNC means never block + * MIGRATE_SYNC_LIGHT in the current implementation means to allow blocking + * on most operations but not ->writepage as the potential stall time + * is too significant + * MIGRATE_SYNC will block when migrating pages + * MIGRATE_SYNC_NO_COPY will block when migrating pages but will not copy pages + * with the CPU. Instead, page copy happens outside the migratepage() + * callback and is likely using a DMA engine. See migrate_vma() and HMM + * (mm/hmm.c) for users of this mode. + */ +enum migrate_mode { + MIGRATE_ASYNC, + MIGRATE_SYNC_LIGHT, + MIGRATE_SYNC, + MIGRATE_SYNC_NO_COPY, +}; + +#endif /* MIGRATE_MODE_H_INCLUDED */ diff --git a/include/linux/mii.h b/include/linux/mii.h new file mode 100644 index 000000000..55000ee5c --- /dev/null +++ b/include/linux/mii.h @@ -0,0 +1,344 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/mii.h: definitions for MII-compatible transceivers + * Originally drivers/net/sunhme.h. + * + * Copyright (C) 1996, 1999, 2001 David S. Miller (davem@redhat.com) + */ +#ifndef __LINUX_MII_H__ +#define __LINUX_MII_H__ + + +#include +#include + +struct ethtool_cmd; + +struct mii_if_info { + int phy_id; + int advertising; + int phy_id_mask; + int reg_num_mask; + + unsigned int full_duplex : 1; /* is full duplex? */ + unsigned int force_media : 1; /* is autoneg. disabled? */ + unsigned int supports_gmii : 1; /* are GMII registers supported? */ + + struct net_device *dev; + int (*mdio_read) (struct net_device *dev, int phy_id, int location); + void (*mdio_write) (struct net_device *dev, int phy_id, int location, int val); +}; + +extern int mii_link_ok (struct mii_if_info *mii); +extern int mii_nway_restart (struct mii_if_info *mii); +extern int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); +extern void mii_ethtool_get_link_ksettings( + struct mii_if_info *mii, struct ethtool_link_ksettings *cmd); +extern int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); +extern int mii_ethtool_set_link_ksettings( + struct mii_if_info *mii, const struct ethtool_link_ksettings *cmd); +extern int mii_check_gmii_support(struct mii_if_info *mii); +extern void mii_check_link (struct mii_if_info *mii); +extern unsigned int mii_check_media (struct mii_if_info *mii, + unsigned int ok_to_print, + unsigned int init_media); +extern int generic_mii_ioctl(struct mii_if_info *mii_if, + struct mii_ioctl_data *mii_data, int cmd, + unsigned int *duplex_changed); + + +static inline struct mii_ioctl_data *if_mii(struct ifreq *rq) +{ + return (struct mii_ioctl_data *) &rq->ifr_ifru; +} + +/** + * mii_nway_result + * @negotiated: value of MII ANAR and'd with ANLPAR + * + * Given a set of MII abilities, check each bit and returns the + * currently supported media, in the priority order defined by + * IEEE 802.3u. We use LPA_xxx constants but note this is not the + * value of LPA solely, as described above. + * + * The one exception to IEEE 802.3u is that 100baseT4 is placed + * between 100T-full and 100T-half. If your phy does not support + * 100T4 this is fine. If your phy places 100T4 elsewhere in the + * priority order, you will need to roll your own function. + */ +static inline unsigned int mii_nway_result (unsigned int negotiated) +{ + unsigned int ret; + + if (negotiated & LPA_100FULL) + ret = LPA_100FULL; + else if (negotiated & LPA_100BASE4) + ret = LPA_100BASE4; + else if (negotiated & LPA_100HALF) + ret = LPA_100HALF; + else if (negotiated & LPA_10FULL) + ret = LPA_10FULL; + else + ret = LPA_10HALF; + + return ret; +} + +/** + * mii_duplex + * @duplex_lock: Non-zero if duplex is locked at full + * @negotiated: value of MII ANAR and'd with ANLPAR + * + * A small helper function for a common case. Returns one + * if the media is operating or locked at full duplex, and + * returns zero otherwise. + */ +static inline unsigned int mii_duplex (unsigned int duplex_lock, + unsigned int negotiated) +{ + if (duplex_lock) + return 1; + if (mii_nway_result(negotiated) & LPA_DUPLEX) + return 1; + return 0; +} + +/** + * ethtool_adv_to_mii_adv_t + * @ethadv: the ethtool advertisement settings + * + * A small helper function that translates ethtool advertisement + * settings to phy autonegotiation advertisements for the + * MII_ADVERTISE register. + */ +static inline u32 ethtool_adv_to_mii_adv_t(u32 ethadv) +{ + u32 result = 0; + + if (ethadv & ADVERTISED_10baseT_Half) + result |= ADVERTISE_10HALF; + if (ethadv & ADVERTISED_10baseT_Full) + result |= ADVERTISE_10FULL; + if (ethadv & ADVERTISED_100baseT_Half) + result |= ADVERTISE_100HALF; + if (ethadv & ADVERTISED_100baseT_Full) + result |= ADVERTISE_100FULL; + if (ethadv & ADVERTISED_Pause) + result |= ADVERTISE_PAUSE_CAP; + if (ethadv & ADVERTISED_Asym_Pause) + result |= ADVERTISE_PAUSE_ASYM; + + return result; +} + +/** + * mii_adv_to_ethtool_adv_t + * @adv: value of the MII_ADVERTISE register + * + * A small helper function that translates MII_ADVERTISE bits + * to ethtool advertisement settings. + */ +static inline u32 mii_adv_to_ethtool_adv_t(u32 adv) +{ + u32 result = 0; + + if (adv & ADVERTISE_10HALF) + result |= ADVERTISED_10baseT_Half; + if (adv & ADVERTISE_10FULL) + result |= ADVERTISED_10baseT_Full; + if (adv & ADVERTISE_100HALF) + result |= ADVERTISED_100baseT_Half; + if (adv & ADVERTISE_100FULL) + result |= ADVERTISED_100baseT_Full; + if (adv & ADVERTISE_PAUSE_CAP) + result |= ADVERTISED_Pause; + if (adv & ADVERTISE_PAUSE_ASYM) + result |= ADVERTISED_Asym_Pause; + + return result; +} + +/** + * ethtool_adv_to_mii_ctrl1000_t + * @ethadv: the ethtool advertisement settings + * + * A small helper function that translates ethtool advertisement + * settings to phy autonegotiation advertisements for the + * MII_CTRL1000 register when in 1000T mode. + */ +static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv) +{ + u32 result = 0; + + if (ethadv & ADVERTISED_1000baseT_Half) + result |= ADVERTISE_1000HALF; + if (ethadv & ADVERTISED_1000baseT_Full) + result |= ADVERTISE_1000FULL; + + return result; +} + +/** + * mii_ctrl1000_to_ethtool_adv_t + * @adv: value of the MII_CTRL1000 register + * + * A small helper function that translates MII_CTRL1000 + * bits, when in 1000Base-T mode, to ethtool + * advertisement settings. + */ +static inline u32 mii_ctrl1000_to_ethtool_adv_t(u32 adv) +{ + u32 result = 0; + + if (adv & ADVERTISE_1000HALF) + result |= ADVERTISED_1000baseT_Half; + if (adv & ADVERTISE_1000FULL) + result |= ADVERTISED_1000baseT_Full; + + return result; +} + +/** + * mii_lpa_to_ethtool_lpa_t + * @adv: value of the MII_LPA register + * + * A small helper function that translates MII_LPA + * bits, when in 1000Base-T mode, to ethtool + * LP advertisement settings. + */ +static inline u32 mii_lpa_to_ethtool_lpa_t(u32 lpa) +{ + u32 result = 0; + + if (lpa & LPA_LPACK) + result |= ADVERTISED_Autoneg; + + return result | mii_adv_to_ethtool_adv_t(lpa); +} + +/** + * mii_stat1000_to_ethtool_lpa_t + * @adv: value of the MII_STAT1000 register + * + * A small helper function that translates MII_STAT1000 + * bits, when in 1000Base-T mode, to ethtool + * advertisement settings. + */ +static inline u32 mii_stat1000_to_ethtool_lpa_t(u32 lpa) +{ + u32 result = 0; + + if (lpa & LPA_1000HALF) + result |= ADVERTISED_1000baseT_Half; + if (lpa & LPA_1000FULL) + result |= ADVERTISED_1000baseT_Full; + + return result; +} + +/** + * ethtool_adv_to_mii_adv_x + * @ethadv: the ethtool advertisement settings + * + * A small helper function that translates ethtool advertisement + * settings to phy autonegotiation advertisements for the + * MII_CTRL1000 register when in 1000Base-X mode. + */ +static inline u32 ethtool_adv_to_mii_adv_x(u32 ethadv) +{ + u32 result = 0; + + if (ethadv & ADVERTISED_1000baseT_Half) + result |= ADVERTISE_1000XHALF; + if (ethadv & ADVERTISED_1000baseT_Full) + result |= ADVERTISE_1000XFULL; + if (ethadv & ADVERTISED_Pause) + result |= ADVERTISE_1000XPAUSE; + if (ethadv & ADVERTISED_Asym_Pause) + result |= ADVERTISE_1000XPSE_ASYM; + + return result; +} + +/** + * mii_adv_to_ethtool_adv_x + * @adv: value of the MII_CTRL1000 register + * + * A small helper function that translates MII_CTRL1000 + * bits, when in 1000Base-X mode, to ethtool + * advertisement settings. + */ +static inline u32 mii_adv_to_ethtool_adv_x(u32 adv) +{ + u32 result = 0; + + if (adv & ADVERTISE_1000XHALF) + result |= ADVERTISED_1000baseT_Half; + if (adv & ADVERTISE_1000XFULL) + result |= ADVERTISED_1000baseT_Full; + if (adv & ADVERTISE_1000XPAUSE) + result |= ADVERTISED_Pause; + if (adv & ADVERTISE_1000XPSE_ASYM) + result |= ADVERTISED_Asym_Pause; + + return result; +} + +/** + * mii_lpa_to_ethtool_lpa_x + * @adv: value of the MII_LPA register + * + * A small helper function that translates MII_LPA + * bits, when in 1000Base-X mode, to ethtool + * LP advertisement settings. + */ +static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa) +{ + u32 result = 0; + + if (lpa & LPA_LPACK) + result |= ADVERTISED_Autoneg; + + return result | mii_adv_to_ethtool_adv_x(lpa); +} + +/** + * mii_advertise_flowctrl - get flow control advertisement flags + * @cap: Flow control capabilities (FLOW_CTRL_RX, FLOW_CTRL_TX or both) + */ +static inline u16 mii_advertise_flowctrl(int cap) +{ + u16 adv = 0; + + if (cap & FLOW_CTRL_RX) + adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + if (cap & FLOW_CTRL_TX) + adv ^= ADVERTISE_PAUSE_ASYM; + + return adv; +} + +/** + * mii_resolve_flowctrl_fdx + * @lcladv: value of MII ADVERTISE register + * @rmtadv: value of MII LPA register + * + * Resolve full duplex flow control as per IEEE 802.3-2005 table 28B-3 + */ +static inline u8 mii_resolve_flowctrl_fdx(u16 lcladv, u16 rmtadv) +{ + u8 cap = 0; + + if (lcladv & rmtadv & ADVERTISE_PAUSE_CAP) { + cap = FLOW_CTRL_TX | FLOW_CTRL_RX; + } else if (lcladv & rmtadv & ADVERTISE_PAUSE_ASYM) { + if (lcladv & ADVERTISE_PAUSE_CAP) + cap = FLOW_CTRL_RX; + else if (rmtadv & ADVERTISE_PAUSE_CAP) + cap = FLOW_CTRL_TX; + } + + return cap; +} + +#endif /* __LINUX_MII_H__ */ diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h new file mode 100644 index 000000000..b06b75776 --- /dev/null +++ b/include/linux/miscdevice.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MISCDEVICE_H +#define _LINUX_MISCDEVICE_H +#include +#include +#include +#include + +/* + * These allocations are managed by device@lanana.org. If you use an + * entry that is not in assigned your entry may well be moved and + * reassigned, or set dynamic if a fixed value is not justified. + */ + +#define PSMOUSE_MINOR 1 +#define MS_BUSMOUSE_MINOR 2 /* unused */ +#define ATIXL_BUSMOUSE_MINOR 3 /* unused */ +/*#define AMIGAMOUSE_MINOR 4 FIXME OBSOLETE */ +#define ATARIMOUSE_MINOR 5 /* unused */ +#define SUN_MOUSE_MINOR 6 /* unused */ +#define APOLLO_MOUSE_MINOR 7 /* unused */ +#define PC110PAD_MINOR 9 /* unused */ +/*#define ADB_MOUSE_MINOR 10 FIXME OBSOLETE */ +#define WATCHDOG_MINOR 130 /* Watchdog timer */ +#define TEMP_MINOR 131 /* Temperature Sensor */ +#define APM_MINOR_DEV 134 +#define RTC_MINOR 135 +#define EFI_RTC_MINOR 136 /* EFI Time services */ +#define VHCI_MINOR 137 +#define SUN_OPENPROM_MINOR 139 +#define DMAPI_MINOR 140 /* unused */ +#define NVRAM_MINOR 144 +#define SGI_MMTIMER 153 +#define STORE_QUEUE_MINOR 155 /* unused */ +#define I2O_MINOR 166 +#define HWRNG_MINOR 183 +#define MICROCODE_MINOR 184 +#define IRNET_MINOR 187 +#define D7S_MINOR 193 +#define VFIO_MINOR 196 +#define TUN_MINOR 200 +#define CUSE_MINOR 203 +#define MWAVE_MINOR 219 /* ACP/Mwave Modem */ +#define MPT_MINOR 220 +#define MPT2SAS_MINOR 221 +#define MPT3SAS_MINOR 222 +#define UINPUT_MINOR 223 +#define MISC_MCELOG_MINOR 227 +#define HPET_MINOR 228 +#define FUSE_MINOR 229 +#define KVM_MINOR 232 +#define BTRFS_MINOR 234 +#define AUTOFS_MINOR 235 +#define MAPPER_CTRL_MINOR 236 +#define LOOP_CTRL_MINOR 237 +#define VHOST_NET_MINOR 238 +#define UHID_MINOR 239 +#define USERIO_MINOR 240 +#define VHOST_VSOCK_MINOR 241 +#define RFKILL_MINOR 242 +#define MISC_DYNAMIC_MINOR 255 + +struct device; +struct attribute_group; + +struct miscdevice { + int minor; + const char *name; + const struct file_operations *fops; + struct list_head list; + struct device *parent; + struct device *this_device; + const struct attribute_group **groups; + const char *nodename; + umode_t mode; +}; + +extern int misc_register(struct miscdevice *misc); +extern void misc_deregister(struct miscdevice *misc); + +/* + * Helper macro for drivers that don't do anything special in the initcall. + * This helps in eleminating of boilerplate code. + */ +#define builtin_misc_device(__misc_device) \ + builtin_driver(__misc_device, misc_register) + +/* + * Helper macro for drivers that don't do anything special in module init / exit + * call. This helps in eleminating of boilerplate code. + */ +#define module_misc_device(__misc_device) \ + module_driver(__misc_device, misc_register, misc_deregister) + +#define MODULE_ALIAS_MISCDEV(minor) \ + MODULE_ALIAS("char-major-" __stringify(MISC_MAJOR) \ + "-" __stringify(minor)) +#endif diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h new file mode 100644 index 000000000..7b74afcbb --- /dev/null +++ b/include/linux/mlx4/cmd.h @@ -0,0 +1,334 @@ +/* + * Copyright (c) 2006 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_CMD_H +#define MLX4_CMD_H + +#include +#include +#include +#include + +enum { + /* initialization and general commands */ + MLX4_CMD_SYS_EN = 0x1, + MLX4_CMD_SYS_DIS = 0x2, + MLX4_CMD_MAP_FA = 0xfff, + MLX4_CMD_UNMAP_FA = 0xffe, + MLX4_CMD_RUN_FW = 0xff6, + MLX4_CMD_MOD_STAT_CFG = 0x34, + MLX4_CMD_QUERY_DEV_CAP = 0x3, + MLX4_CMD_QUERY_FW = 0x4, + MLX4_CMD_ENABLE_LAM = 0xff8, + MLX4_CMD_DISABLE_LAM = 0xff7, + MLX4_CMD_QUERY_DDR = 0x5, + MLX4_CMD_QUERY_ADAPTER = 0x6, + MLX4_CMD_INIT_HCA = 0x7, + MLX4_CMD_CLOSE_HCA = 0x8, + MLX4_CMD_INIT_PORT = 0x9, + MLX4_CMD_CLOSE_PORT = 0xa, + MLX4_CMD_QUERY_HCA = 0xb, + MLX4_CMD_QUERY_PORT = 0x43, + MLX4_CMD_SENSE_PORT = 0x4d, + MLX4_CMD_HW_HEALTH_CHECK = 0x50, + MLX4_CMD_SET_PORT = 0xc, + MLX4_CMD_SET_NODE = 0x5a, + MLX4_CMD_QUERY_FUNC = 0x56, + MLX4_CMD_ACCESS_DDR = 0x2e, + MLX4_CMD_MAP_ICM = 0xffa, + MLX4_CMD_UNMAP_ICM = 0xff9, + MLX4_CMD_MAP_ICM_AUX = 0xffc, + MLX4_CMD_UNMAP_ICM_AUX = 0xffb, + MLX4_CMD_SET_ICM_SIZE = 0xffd, + MLX4_CMD_ACCESS_REG = 0x3b, + MLX4_CMD_ALLOCATE_VPP = 0x80, + MLX4_CMD_SET_VPORT_QOS = 0x81, + + /*master notify fw on finish for slave's flr*/ + MLX4_CMD_INFORM_FLR_DONE = 0x5b, + MLX4_CMD_VIRT_PORT_MAP = 0x5c, + MLX4_CMD_GET_OP_REQ = 0x59, + + /* TPT commands */ + MLX4_CMD_SW2HW_MPT = 0xd, + MLX4_CMD_QUERY_MPT = 0xe, + MLX4_CMD_HW2SW_MPT = 0xf, + MLX4_CMD_READ_MTT = 0x10, + MLX4_CMD_WRITE_MTT = 0x11, + MLX4_CMD_SYNC_TPT = 0x2f, + + /* EQ commands */ + MLX4_CMD_MAP_EQ = 0x12, + MLX4_CMD_SW2HW_EQ = 0x13, + MLX4_CMD_HW2SW_EQ = 0x14, + MLX4_CMD_QUERY_EQ = 0x15, + + /* CQ commands */ + MLX4_CMD_SW2HW_CQ = 0x16, + MLX4_CMD_HW2SW_CQ = 0x17, + MLX4_CMD_QUERY_CQ = 0x18, + MLX4_CMD_MODIFY_CQ = 0x2c, + + /* SRQ commands */ + MLX4_CMD_SW2HW_SRQ = 0x35, + MLX4_CMD_HW2SW_SRQ = 0x36, + MLX4_CMD_QUERY_SRQ = 0x37, + MLX4_CMD_ARM_SRQ = 0x40, + + /* QP/EE commands */ + MLX4_CMD_RST2INIT_QP = 0x19, + MLX4_CMD_INIT2RTR_QP = 0x1a, + MLX4_CMD_RTR2RTS_QP = 0x1b, + MLX4_CMD_RTS2RTS_QP = 0x1c, + MLX4_CMD_SQERR2RTS_QP = 0x1d, + MLX4_CMD_2ERR_QP = 0x1e, + MLX4_CMD_RTS2SQD_QP = 0x1f, + MLX4_CMD_SQD2SQD_QP = 0x38, + MLX4_CMD_SQD2RTS_QP = 0x20, + MLX4_CMD_2RST_QP = 0x21, + MLX4_CMD_QUERY_QP = 0x22, + MLX4_CMD_INIT2INIT_QP = 0x2d, + MLX4_CMD_SUSPEND_QP = 0x32, + MLX4_CMD_UNSUSPEND_QP = 0x33, + MLX4_CMD_UPDATE_QP = 0x61, + /* special QP and management commands */ + MLX4_CMD_CONF_SPECIAL_QP = 0x23, + MLX4_CMD_MAD_IFC = 0x24, + MLX4_CMD_MAD_DEMUX = 0x203, + + /* multicast commands */ + MLX4_CMD_READ_MCG = 0x25, + MLX4_CMD_WRITE_MCG = 0x26, + MLX4_CMD_MGID_HASH = 0x27, + + /* miscellaneous commands */ + MLX4_CMD_DIAG_RPRT = 0x30, + MLX4_CMD_NOP = 0x31, + MLX4_CMD_CONFIG_DEV = 0x3a, + MLX4_CMD_ACCESS_MEM = 0x2e, + MLX4_CMD_SET_VEP = 0x52, + + /* Ethernet specific commands */ + MLX4_CMD_SET_VLAN_FLTR = 0x47, + MLX4_CMD_SET_MCAST_FLTR = 0x48, + MLX4_CMD_DUMP_ETH_STATS = 0x49, + + /* Communication channel commands */ + MLX4_CMD_ARM_COMM_CHANNEL = 0x57, + MLX4_CMD_GEN_EQE = 0x58, + + /* virtual commands */ + MLX4_CMD_ALLOC_RES = 0xf00, + MLX4_CMD_FREE_RES = 0xf01, + MLX4_CMD_MCAST_ATTACH = 0xf05, + MLX4_CMD_UCAST_ATTACH = 0xf06, + MLX4_CMD_PROMISC = 0xf08, + MLX4_CMD_QUERY_FUNC_CAP = 0xf0a, + MLX4_CMD_QP_ATTACH = 0xf0b, + + /* debug commands */ + MLX4_CMD_QUERY_DEBUG_MSG = 0x2a, + MLX4_CMD_SET_DEBUG_MSG = 0x2b, + + /* statistics commands */ + MLX4_CMD_QUERY_IF_STAT = 0X54, + MLX4_CMD_SET_IF_STAT = 0X55, + + /* register/delete flow steering network rules */ + MLX4_QP_FLOW_STEERING_ATTACH = 0x65, + MLX4_QP_FLOW_STEERING_DETACH = 0x66, + MLX4_FLOW_STEERING_IB_UC_QP_RANGE = 0x64, + + /* Update and read QCN parameters */ + MLX4_CMD_CONGESTION_CTRL_OPCODE = 0x68, +}; + +enum { + MLX4_CMD_TIME_CLASS_A = 60000, + MLX4_CMD_TIME_CLASS_B = 60000, + MLX4_CMD_TIME_CLASS_C = 60000, +}; + +enum { + /* virtual to physical port mapping opcode modifiers */ + MLX4_GET_PORT_VIRT2PHY = 0x0, + MLX4_SET_PORT_VIRT2PHY = 0x1, +}; + +enum { + MLX4_MAILBOX_SIZE = 4096, + MLX4_ACCESS_MEM_ALIGN = 256, +}; + +enum { + /* Set port opcode modifiers */ + MLX4_SET_PORT_IB_OPCODE = 0x0, + MLX4_SET_PORT_ETH_OPCODE = 0x1, + MLX4_SET_PORT_BEACON_OPCODE = 0x4, +}; + +enum { + /* Set port Ethernet input modifiers */ + MLX4_SET_PORT_GENERAL = 0x0, + MLX4_SET_PORT_RQP_CALC = 0x1, + MLX4_SET_PORT_MAC_TABLE = 0x2, + MLX4_SET_PORT_VLAN_TABLE = 0x3, + MLX4_SET_PORT_PRIO_MAP = 0x4, + MLX4_SET_PORT_GID_TABLE = 0x5, + MLX4_SET_PORT_PRIO2TC = 0x8, + MLX4_SET_PORT_SCHEDULER = 0x9, + MLX4_SET_PORT_VXLAN = 0xB, + MLX4_SET_PORT_ROCE_ADDR = 0xD +}; + +enum { + MLX4_CMD_MAD_DEMUX_CONFIG = 0, + MLX4_CMD_MAD_DEMUX_QUERY_STATE = 1, + MLX4_CMD_MAD_DEMUX_QUERY_RESTR = 2, /* Query mad demux restrictions */ +}; + +enum { + MLX4_CMD_WRAPPED, + MLX4_CMD_NATIVE +}; + +/* + * MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP - + * Receive checksum value is reported in CQE also for non TCP/UDP packets. + * + * MLX4_RX_CSUM_MODE_L4 - + * L4_CSUM bit in CQE, which indicates whether or not L4 checksum + * was validated correctly, is supported. + * + * MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP - + * IP_OK CQE's field is supported also for non TCP/UDP IP packets. + * + * MLX4_RX_CSUM_MODE_MULTI_VLAN - + * Receive Checksum offload is supported for packets with more than 2 vlan headers. + */ +enum mlx4_rx_csum_mode { + MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP = 1UL << 0, + MLX4_RX_CSUM_MODE_L4 = 1UL << 1, + MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP = 1UL << 2, + MLX4_RX_CSUM_MODE_MULTI_VLAN = 1UL << 3 +}; + +struct mlx4_config_dev_params { + u16 vxlan_udp_dport; + u8 rx_csum_flags_port_1; + u8 rx_csum_flags_port_2; +}; + +enum mlx4_en_congestion_control_algorithm { + MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT = 0, +}; + +enum mlx4_en_congestion_control_opmod { + MLX4_CONGESTION_CONTROL_GET_PARAMS, + MLX4_CONGESTION_CONTROL_GET_STATISTICS, + MLX4_CONGESTION_CONTROL_SET_PARAMS = 4, +}; + +struct mlx4_dev; + +struct mlx4_cmd_mailbox { + void *buf; + dma_addr_t dma; +}; + +int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, + int out_is_imm, u32 in_modifier, u8 op_modifier, + u16 op, unsigned long timeout, int native); + +/* Invoke a command with no output parameter */ +static inline int mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u32 in_modifier, + u8 op_modifier, u16 op, unsigned long timeout, + int native) +{ + return __mlx4_cmd(dev, in_param, NULL, 0, in_modifier, + op_modifier, op, timeout, native); +} + +/* Invoke a command with an output mailbox */ +static inline int mlx4_cmd_box(struct mlx4_dev *dev, u64 in_param, u64 out_param, + u32 in_modifier, u8 op_modifier, u16 op, + unsigned long timeout, int native) +{ + return __mlx4_cmd(dev, in_param, &out_param, 0, in_modifier, + op_modifier, op, timeout, native); +} + +/* + * Invoke a command with an immediate output parameter (and copy the + * output into the caller's out_param pointer after the command + * executes). + */ +static inline int mlx4_cmd_imm(struct mlx4_dev *dev, u64 in_param, u64 *out_param, + u32 in_modifier, u8 op_modifier, u16 op, + unsigned long timeout, int native) +{ + return __mlx4_cmd(dev, in_param, out_param, 1, in_modifier, + op_modifier, op, timeout, native); +} + +struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev); +void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox); + +int mlx4_get_counter_stats(struct mlx4_dev *dev, int counter_index, + struct mlx4_counter *counter_stats, int reset); +int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx, + struct ifla_vf_stats *vf_stats); +u32 mlx4_comm_get_version(void); +int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u8 *mac); +int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, + u8 qos, __be16 proto); +int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate, + int max_tx_rate); +int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting); +int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf); +int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state); +int mlx4_config_dev_retrieval(struct mlx4_dev *dev, + struct mlx4_config_dev_params *params); +void mlx4_cmd_wake_completions(struct mlx4_dev *dev); +void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev); +/* + * mlx4_get_slave_default_vlan - + * return true if VST ( default vlan) + * if VST, will return vlan & qos (if not NULL) + */ +bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave, + u16 *vlan, u8 *qos); + +#define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8) +#define COMM_CHAN_EVENT_INTERNAL_ERR (1 << 17) + +#endif /* MLX4_CMD_H */ diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h new file mode 100644 index 000000000..508e8cc5e --- /dev/null +++ b/include/linux/mlx4/cq.h @@ -0,0 +1,182 @@ +/* + * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_CQ_H +#define MLX4_CQ_H + +#include +#include + +#include +#include + +struct mlx4_cqe { + __be32 vlan_my_qpn; + __be32 immed_rss_invalid; + __be32 g_mlpath_rqpn; + __be16 sl_vid; + union { + struct { + __be16 rlid; + __be16 status; + u8 ipv6_ext_mask; + u8 badfcs_enc; + }; + u8 smac[ETH_ALEN]; + }; + __be32 byte_cnt; + __be16 wqe_index; + __be16 checksum; + u8 reserved[3]; + u8 owner_sr_opcode; +}; + +struct mlx4_err_cqe { + __be32 my_qpn; + u32 reserved1[5]; + __be16 wqe_index; + u8 vendor_err_syndrome; + u8 syndrome; + u8 reserved2[3]; + u8 owner_sr_opcode; +}; + +struct mlx4_ts_cqe { + __be32 vlan_my_qpn; + __be32 immed_rss_invalid; + __be32 g_mlpath_rqpn; + __be32 timestamp_hi; + __be16 status; + u8 ipv6_ext_mask; + u8 badfcs_enc; + __be32 byte_cnt; + __be16 wqe_index; + __be16 checksum; + u8 reserved; + __be16 timestamp_lo; + u8 owner_sr_opcode; +} __packed; + +enum { + MLX4_CQE_L2_TUNNEL_IPOK = 1 << 31, + MLX4_CQE_CVLAN_PRESENT_MASK = 1 << 29, + MLX4_CQE_SVLAN_PRESENT_MASK = 1 << 30, + MLX4_CQE_L2_TUNNEL = 1 << 27, + MLX4_CQE_L2_TUNNEL_CSUM = 1 << 26, + MLX4_CQE_L2_TUNNEL_IPV4 = 1 << 25, + + MLX4_CQE_QPN_MASK = 0xffffff, + MLX4_CQE_VID_MASK = 0xfff, +}; + +enum { + MLX4_CQE_OWNER_MASK = 0x80, + MLX4_CQE_IS_SEND_MASK = 0x40, + MLX4_CQE_OPCODE_MASK = 0x1f +}; + +enum { + MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR = 0x01, + MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR = 0x02, + MLX4_CQE_SYNDROME_LOCAL_PROT_ERR = 0x04, + MLX4_CQE_SYNDROME_WR_FLUSH_ERR = 0x05, + MLX4_CQE_SYNDROME_MW_BIND_ERR = 0x06, + MLX4_CQE_SYNDROME_BAD_RESP_ERR = 0x10, + MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR = 0x11, + MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12, + MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR = 0x13, + MLX4_CQE_SYNDROME_REMOTE_OP_ERR = 0x14, + MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR = 0x15, + MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR = 0x16, + MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22, +}; + +enum { + MLX4_CQE_STATUS_IPV4 = 1 << 6, + MLX4_CQE_STATUS_IPV4F = 1 << 7, + MLX4_CQE_STATUS_IPV6 = 1 << 8, + MLX4_CQE_STATUS_IPV4OPT = 1 << 9, + MLX4_CQE_STATUS_TCP = 1 << 10, + MLX4_CQE_STATUS_UDP = 1 << 11, + MLX4_CQE_STATUS_IPOK = 1 << 12, +}; + +enum { + MLX4_CQE_LLC = 1, + MLX4_CQE_SNAP = 1 << 1, + MLX4_CQE_BAD_FCS = 1 << 4, +}; + +#define MLX4_MAX_CQ_PERIOD (BIT(16) - 1) +#define MLX4_MAX_CQ_COUNT (BIT(16) - 1) + +static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd, + void __iomem *uar_page, + spinlock_t *doorbell_lock) +{ + __be32 doorbell[2]; + u32 sn; + u32 ci; + + sn = cq->arm_sn & 3; + ci = cq->cons_index & 0xffffff; + + *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci); + + /* + * Make sure that the doorbell record in host memory is + * written before ringing the doorbell via PCI MMIO. + */ + wmb(); + + doorbell[0] = cpu_to_be32(sn << 28 | cmd | cq->cqn); + doorbell[1] = cpu_to_be32(ci); + + mlx4_write64(doorbell, uar_page + MLX4_CQ_DOORBELL, doorbell_lock); +} + +static inline void mlx4_cq_set_ci(struct mlx4_cq *cq) +{ + *cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff); +} + +enum { + MLX4_CQ_DB_REQ_NOT_SOL = 1 << 24, + MLX4_CQ_DB_REQ_NOT = 2 << 24 +}; + +int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq, + u16 count, u16 period); +int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq, + int entries, struct mlx4_mtt *mtt); + +#endif /* MLX4_CQ_H */ diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h new file mode 100644 index 000000000..3ebdd384a --- /dev/null +++ b/include/linux/mlx4/device.h @@ -0,0 +1,1605 @@ +/* + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_DEVICE_H +#define MLX4_DEVICE_H + +#include +#include +#include +#include +#include +#include + +#include + +#include + +#define DEFAULT_UAR_PAGE_SHIFT 12 + +#define MAX_MSIX_P_PORT 17 +#define MAX_MSIX 64 +#define MIN_MSIX_P_PORT 5 +#define MLX4_IS_LEGACY_EQ_MODE(dev_cap) ((dev_cap).num_comp_vectors < \ + (dev_cap).num_ports * MIN_MSIX_P_PORT) + +#define MLX4_MAX_100M_UNITS_VAL 255 /* + * work around: can't set values + * greater then this value when + * using 100 Mbps units. + */ +#define MLX4_RATELIMIT_100M_UNITS 3 /* 100 Mbps */ +#define MLX4_RATELIMIT_1G_UNITS 4 /* 1 Gbps */ +#define MLX4_RATELIMIT_DEFAULT 0x00ff + +#define MLX4_ROCE_MAX_GIDS 128 +#define MLX4_ROCE_PF_GIDS 16 + +enum { + MLX4_FLAG_MSI_X = 1 << 0, + MLX4_FLAG_OLD_PORT_CMDS = 1 << 1, + MLX4_FLAG_MASTER = 1 << 2, + MLX4_FLAG_SLAVE = 1 << 3, + MLX4_FLAG_SRIOV = 1 << 4, + MLX4_FLAG_OLD_REG_MAC = 1 << 6, + MLX4_FLAG_BONDED = 1 << 7, + MLX4_FLAG_SECURE_HOST = 1 << 8, +}; + +enum { + MLX4_PORT_CAP_IS_SM = 1 << 1, + MLX4_PORT_CAP_DEV_MGMT_SUP = 1 << 19, +}; + +enum { + MLX4_MAX_PORTS = 2, + MLX4_MAX_PORT_PKEYS = 128, + MLX4_MAX_PORT_GIDS = 128 +}; + +/* base qkey for use in sriov tunnel-qp/proxy-qp communication. + * These qkeys must not be allowed for general use. This is a 64k range, + * and to test for violation, we use the mask (protect against future chg). + */ +#define MLX4_RESERVED_QKEY_BASE (0xFFFF0000) +#define MLX4_RESERVED_QKEY_MASK (0xFFFF0000) + +enum { + MLX4_BOARD_ID_LEN = 64 +}; + +enum { + MLX4_MAX_NUM_PF = 16, + MLX4_MAX_NUM_VF = 126, + MLX4_MAX_NUM_VF_P_PORT = 64, + MLX4_MFUNC_MAX = 128, + MLX4_MAX_EQ_NUM = 1024, + MLX4_MFUNC_EQ_NUM = 4, + MLX4_MFUNC_MAX_EQES = 8, + MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1) +}; + +/* Driver supports 3 different device methods to manage traffic steering: + * -device managed - High level API for ib and eth flow steering. FW is + * managing flow steering tables. + * - B0 steering mode - Common low level API for ib and (if supported) eth. + * - A0 steering mode - Limited low level API for eth. In case of IB, + * B0 mode is in use. + */ +enum { + MLX4_STEERING_MODE_A0, + MLX4_STEERING_MODE_B0, + MLX4_STEERING_MODE_DEVICE_MANAGED +}; + +enum { + MLX4_STEERING_DMFS_A0_DEFAULT, + MLX4_STEERING_DMFS_A0_DYNAMIC, + MLX4_STEERING_DMFS_A0_STATIC, + MLX4_STEERING_DMFS_A0_DISABLE, + MLX4_STEERING_DMFS_A0_NOT_SUPPORTED +}; + +static inline const char *mlx4_steering_mode_str(int steering_mode) +{ + switch (steering_mode) { + case MLX4_STEERING_MODE_A0: + return "A0 steering"; + + case MLX4_STEERING_MODE_B0: + return "B0 steering"; + + case MLX4_STEERING_MODE_DEVICE_MANAGED: + return "Device managed flow steering"; + + default: + return "Unrecognize steering mode"; + } +} + +enum { + MLX4_TUNNEL_OFFLOAD_MODE_NONE, + MLX4_TUNNEL_OFFLOAD_MODE_VXLAN +}; + +enum { + MLX4_DEV_CAP_FLAG_RC = 1LL << 0, + MLX4_DEV_CAP_FLAG_UC = 1LL << 1, + MLX4_DEV_CAP_FLAG_UD = 1LL << 2, + MLX4_DEV_CAP_FLAG_XRC = 1LL << 3, + MLX4_DEV_CAP_FLAG_SRQ = 1LL << 6, + MLX4_DEV_CAP_FLAG_IPOIB_CSUM = 1LL << 7, + MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, + MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, + MLX4_DEV_CAP_FLAG_DPDP = 1LL << 12, + MLX4_DEV_CAP_FLAG_BLH = 1LL << 15, + MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1LL << 16, + MLX4_DEV_CAP_FLAG_APM = 1LL << 17, + MLX4_DEV_CAP_FLAG_ATOMIC = 1LL << 18, + MLX4_DEV_CAP_FLAG_RAW_MCAST = 1LL << 19, + MLX4_DEV_CAP_FLAG_UD_AV_PORT = 1LL << 20, + MLX4_DEV_CAP_FLAG_UD_MCAST = 1LL << 21, + MLX4_DEV_CAP_FLAG_IBOE = 1LL << 30, + MLX4_DEV_CAP_FLAG_UC_LOOPBACK = 1LL << 32, + MLX4_DEV_CAP_FLAG_FCS_KEEP = 1LL << 34, + MLX4_DEV_CAP_FLAG_WOL_PORT1 = 1LL << 37, + MLX4_DEV_CAP_FLAG_WOL_PORT2 = 1LL << 38, + MLX4_DEV_CAP_FLAG_UDP_RSS = 1LL << 40, + MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41, + MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42, + MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48, + MLX4_DEV_CAP_FLAG_RSS_IP_FRAG = 1LL << 52, + MLX4_DEV_CAP_FLAG_SET_ETH_SCHED = 1LL << 53, + MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55, + MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59, + MLX4_DEV_CAP_FLAG_64B_EQE = 1LL << 61, + MLX4_DEV_CAP_FLAG_64B_CQE = 1LL << 62 +}; + +enum { + MLX4_DEV_CAP_FLAG2_RSS = 1LL << 0, + MLX4_DEV_CAP_FLAG2_RSS_TOP = 1LL << 1, + MLX4_DEV_CAP_FLAG2_RSS_XOR = 1LL << 2, + MLX4_DEV_CAP_FLAG2_FS_EN = 1LL << 3, + MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN = 1LL << 4, + MLX4_DEV_CAP_FLAG2_TS = 1LL << 5, + MLX4_DEV_CAP_FLAG2_VLAN_CONTROL = 1LL << 6, + MLX4_DEV_CAP_FLAG2_FSM = 1LL << 7, + MLX4_DEV_CAP_FLAG2_UPDATE_QP = 1LL << 8, + MLX4_DEV_CAP_FLAG2_DMFS_IPOIB = 1LL << 9, + MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10, + MLX4_DEV_CAP_FLAG2_MAD_DEMUX = 1LL << 11, + MLX4_DEV_CAP_FLAG2_CQE_STRIDE = 1LL << 12, + MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13, + MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL = 1LL << 14, + MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP = 1LL << 15, + MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16, + MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17, + MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18, + MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19, + MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 20, + MLX4_DEV_CAP_FLAG2_PORT_REMAP = 1LL << 21, + MLX4_DEV_CAP_FLAG2_QCN = 1LL << 22, + MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT = 1LL << 23, + MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN = 1LL << 24, + MLX4_DEV_CAP_FLAG2_QOS_VPP = 1LL << 25, + MLX4_DEV_CAP_FLAG2_ETS_CFG = 1LL << 26, + MLX4_DEV_CAP_FLAG2_PORT_BEACON = 1LL << 27, + MLX4_DEV_CAP_FLAG2_IGNORE_FCS = 1LL << 28, + MLX4_DEV_CAP_FLAG2_PHV_EN = 1LL << 29, + MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN = 1LL << 30, + MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB = 1ULL << 31, + MLX4_DEV_CAP_FLAG2_LB_SRC_CHK = 1ULL << 32, + MLX4_DEV_CAP_FLAG2_ROCE_V1_V2 = 1ULL << 33, + MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER = 1ULL << 34, + MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT = 1ULL << 35, + MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP = 1ULL << 36, + MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT = 1ULL << 37, + MLX4_DEV_CAP_FLAG2_USER_MAC_EN = 1ULL << 38, + MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW = 1ULL << 39, +}; + +enum { + MLX4_QUERY_FUNC_FLAGS_BF_RES_QP = 1LL << 0, + MLX4_QUERY_FUNC_FLAGS_A0_RES_QP = 1LL << 1 +}; + +enum { + MLX4_VF_CAP_FLAG_RESET = 1 << 0 +}; + +/* bit enums for an 8-bit flags field indicating special use + * QPs which require special handling in qp_reserve_range. + * Currently, this only includes QPs used by the ETH interface, + * where we expect to use blueflame. These QPs must not have + * bits 6 and 7 set in their qp number. + * + * This enum may use only bits 0..7. + */ +enum { + MLX4_RESERVE_A0_QP = 1 << 6, + MLX4_RESERVE_ETH_BF_QP = 1 << 7, +}; + +enum { + MLX4_DEV_CAP_64B_EQE_ENABLED = 1LL << 0, + MLX4_DEV_CAP_64B_CQE_ENABLED = 1LL << 1, + MLX4_DEV_CAP_CQE_STRIDE_ENABLED = 1LL << 2, + MLX4_DEV_CAP_EQE_STRIDE_ENABLED = 1LL << 3 +}; + +enum { + MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0, + MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1, + MLX4_FUNC_CAP_DMFS_A0_STATIC = 1L << 2 +}; + + +#define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) + +enum { + MLX4_BMME_FLAG_WIN_TYPE_2B = 1 << 1, + MLX4_BMME_FLAG_LOCAL_INV = 1 << 6, + MLX4_BMME_FLAG_REMOTE_INV = 1 << 7, + MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9, + MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10, + MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11, + MLX4_BMME_FLAG_ROCE_V1_V2 = 1 << 19, + MLX4_BMME_FLAG_PORT_REMAP = 1 << 24, + MLX4_BMME_FLAG_VSD_INIT2RTR = 1 << 28, +}; + +enum { + MLX4_FLAG_PORT_REMAP = MLX4_BMME_FLAG_PORT_REMAP, + MLX4_FLAG_ROCE_V1_V2 = MLX4_BMME_FLAG_ROCE_V1_V2 +}; + +enum mlx4_event { + MLX4_EVENT_TYPE_COMP = 0x00, + MLX4_EVENT_TYPE_PATH_MIG = 0x01, + MLX4_EVENT_TYPE_COMM_EST = 0x02, + MLX4_EVENT_TYPE_SQ_DRAINED = 0x03, + MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE = 0x13, + MLX4_EVENT_TYPE_SRQ_LIMIT = 0x14, + MLX4_EVENT_TYPE_CQ_ERROR = 0x04, + MLX4_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, + MLX4_EVENT_TYPE_EEC_CATAS_ERROR = 0x06, + MLX4_EVENT_TYPE_PATH_MIG_FAILED = 0x07, + MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10, + MLX4_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11, + MLX4_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12, + MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR = 0x08, + MLX4_EVENT_TYPE_PORT_CHANGE = 0x09, + MLX4_EVENT_TYPE_EQ_OVERFLOW = 0x0f, + MLX4_EVENT_TYPE_ECC_DETECT = 0x0e, + MLX4_EVENT_TYPE_CMD = 0x0a, + MLX4_EVENT_TYPE_VEP_UPDATE = 0x19, + MLX4_EVENT_TYPE_COMM_CHANNEL = 0x18, + MLX4_EVENT_TYPE_OP_REQUIRED = 0x1a, + MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b, + MLX4_EVENT_TYPE_FLR_EVENT = 0x1c, + MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d, + MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT = 0x3e, + MLX4_EVENT_TYPE_NONE = 0xff, +}; + +enum { + MLX4_PORT_CHANGE_SUBTYPE_DOWN = 1, + MLX4_PORT_CHANGE_SUBTYPE_ACTIVE = 4 +}; + +enum { + MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE = 1, + MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE = 2, +}; + +enum { + MLX4_FATAL_WARNING_SUBTYPE_WARMING = 0, +}; + +enum slave_port_state { + SLAVE_PORT_DOWN = 0, + SLAVE_PENDING_UP, + SLAVE_PORT_UP, +}; + +enum slave_port_gen_event { + SLAVE_PORT_GEN_EVENT_DOWN = 0, + SLAVE_PORT_GEN_EVENT_UP, + SLAVE_PORT_GEN_EVENT_NONE, +}; + +enum slave_port_state_event { + MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, + MLX4_PORT_STATE_DEV_EVENT_PORT_UP, + MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID, + MLX4_PORT_STATE_IB_EVENT_GID_INVALID, +}; + +enum { + MLX4_PERM_LOCAL_READ = 1 << 10, + MLX4_PERM_LOCAL_WRITE = 1 << 11, + MLX4_PERM_REMOTE_READ = 1 << 12, + MLX4_PERM_REMOTE_WRITE = 1 << 13, + MLX4_PERM_ATOMIC = 1 << 14, + MLX4_PERM_BIND_MW = 1 << 15, + MLX4_PERM_MASK = 0xFC00 +}; + +enum { + MLX4_OPCODE_NOP = 0x00, + MLX4_OPCODE_SEND_INVAL = 0x01, + MLX4_OPCODE_RDMA_WRITE = 0x08, + MLX4_OPCODE_RDMA_WRITE_IMM = 0x09, + MLX4_OPCODE_SEND = 0x0a, + MLX4_OPCODE_SEND_IMM = 0x0b, + MLX4_OPCODE_LSO = 0x0e, + MLX4_OPCODE_RDMA_READ = 0x10, + MLX4_OPCODE_ATOMIC_CS = 0x11, + MLX4_OPCODE_ATOMIC_FA = 0x12, + MLX4_OPCODE_MASKED_ATOMIC_CS = 0x14, + MLX4_OPCODE_MASKED_ATOMIC_FA = 0x15, + MLX4_OPCODE_BIND_MW = 0x18, + MLX4_OPCODE_FMR = 0x19, + MLX4_OPCODE_LOCAL_INVAL = 0x1b, + MLX4_OPCODE_CONFIG_CMD = 0x1f, + + MLX4_RECV_OPCODE_RDMA_WRITE_IMM = 0x00, + MLX4_RECV_OPCODE_SEND = 0x01, + MLX4_RECV_OPCODE_SEND_IMM = 0x02, + MLX4_RECV_OPCODE_SEND_INVAL = 0x03, + + MLX4_CQE_OPCODE_ERROR = 0x1e, + MLX4_CQE_OPCODE_RESIZE = 0x16, +}; + +enum { + MLX4_STAT_RATE_OFFSET = 5 +}; + +enum mlx4_protocol { + MLX4_PROT_IB_IPV6 = 0, + MLX4_PROT_ETH, + MLX4_PROT_IB_IPV4, + MLX4_PROT_FCOE +}; + +enum { + MLX4_MTT_FLAG_PRESENT = 1 +}; + +enum mlx4_qp_region { + MLX4_QP_REGION_FW = 0, + MLX4_QP_REGION_RSS_RAW_ETH, + MLX4_QP_REGION_BOTTOM = MLX4_QP_REGION_RSS_RAW_ETH, + MLX4_QP_REGION_ETH_ADDR, + MLX4_QP_REGION_FC_ADDR, + MLX4_QP_REGION_FC_EXCH, + MLX4_NUM_QP_REGION +}; + +enum mlx4_port_type { + MLX4_PORT_TYPE_NONE = 0, + MLX4_PORT_TYPE_IB = 1, + MLX4_PORT_TYPE_ETH = 2, + MLX4_PORT_TYPE_AUTO = 3 +}; + +enum mlx4_special_vlan_idx { + MLX4_NO_VLAN_IDX = 0, + MLX4_VLAN_MISS_IDX, + MLX4_VLAN_REGULAR +}; + +enum mlx4_steer_type { + MLX4_MC_STEER = 0, + MLX4_UC_STEER, + MLX4_NUM_STEERS +}; + +enum mlx4_resource_usage { + MLX4_RES_USAGE_NONE, + MLX4_RES_USAGE_DRIVER, + MLX4_RES_USAGE_USER_VERBS, +}; + +enum { + MLX4_NUM_FEXCH = 64 * 1024, +}; + +enum { + MLX4_MAX_FAST_REG_PAGES = 511, +}; + +enum { + /* + * Max wqe size for rdma read is 512 bytes, so this + * limits our max_sge_rd as the wqe needs to fit: + * - ctrl segment (16 bytes) + * - rdma segment (16 bytes) + * - scatter elements (16 bytes each) + */ + MLX4_MAX_SGE_RD = (512 - 16 - 16) / 16 +}; + +enum { + MLX4_DEV_PMC_SUBTYPE_GUID_INFO = 0x14, + MLX4_DEV_PMC_SUBTYPE_PORT_INFO = 0x15, + MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE = 0x16, + MLX4_DEV_PMC_SUBTYPE_SL_TO_VL_MAP = 0x17, +}; + +/* Port mgmt change event handling */ +enum { + MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK = 1 << 0, + MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK = 1 << 1, + MLX4_EQ_PORT_INFO_LID_CHANGE_MASK = 1 << 2, + MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK = 1 << 3, + MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK = 1 << 4, +}; + +union sl2vl_tbl_to_u64 { + u8 sl8[8]; + u64 sl64; +}; + +enum { + MLX4_DEVICE_STATE_UP = 1 << 0, + MLX4_DEVICE_STATE_INTERNAL_ERROR = 1 << 1, +}; + +enum { + MLX4_INTERFACE_STATE_UP = 1 << 0, + MLX4_INTERFACE_STATE_DELETION = 1 << 1, + MLX4_INTERFACE_STATE_NOWAIT = 1 << 2, +}; + +#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ + MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK) + +enum mlx4_module_id { + MLX4_MODULE_ID_SFP = 0x3, + MLX4_MODULE_ID_QSFP = 0xC, + MLX4_MODULE_ID_QSFP_PLUS = 0xD, + MLX4_MODULE_ID_QSFP28 = 0x11, +}; + +enum { /* rl */ + MLX4_QP_RATE_LIMIT_NONE = 0, + MLX4_QP_RATE_LIMIT_KBS = 1, + MLX4_QP_RATE_LIMIT_MBS = 2, + MLX4_QP_RATE_LIMIT_GBS = 3 +}; + +struct mlx4_rate_limit_caps { + u16 num_rates; /* Number of different rates */ + u8 min_unit; + u16 min_val; + u8 max_unit; + u16 max_val; +}; + +static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) +{ + return (major << 32) | (minor << 16) | subminor; +} + +struct mlx4_phys_caps { + u32 gid_phys_table_len[MLX4_MAX_PORTS + 1]; + u32 pkey_phys_table_len[MLX4_MAX_PORTS + 1]; + u32 num_phys_eqs; + u32 base_sqpn; + u32 base_proxy_sqpn; + u32 base_tunnel_sqpn; +}; + +struct mlx4_spec_qps { + u32 qp0_qkey; + u32 qp0_proxy; + u32 qp0_tunnel; + u32 qp1_proxy; + u32 qp1_tunnel; +}; + +struct mlx4_caps { + u64 fw_ver; + u32 function; + int num_ports; + int vl_cap[MLX4_MAX_PORTS + 1]; + int ib_mtu_cap[MLX4_MAX_PORTS + 1]; + __be32 ib_port_def_cap[MLX4_MAX_PORTS + 1]; + u64 def_mac[MLX4_MAX_PORTS + 1]; + int eth_mtu_cap[MLX4_MAX_PORTS + 1]; + int gid_table_len[MLX4_MAX_PORTS + 1]; + int pkey_table_len[MLX4_MAX_PORTS + 1]; + int trans_type[MLX4_MAX_PORTS + 1]; + int vendor_oui[MLX4_MAX_PORTS + 1]; + int wavelength[MLX4_MAX_PORTS + 1]; + u64 trans_code[MLX4_MAX_PORTS + 1]; + int local_ca_ack_delay; + int num_uars; + u32 uar_page_size; + int bf_reg_size; + int bf_regs_per_page; + int max_sq_sg; + int max_rq_sg; + int num_qps; + int max_wqes; + int max_sq_desc_sz; + int max_rq_desc_sz; + int max_qp_init_rdma; + int max_qp_dest_rdma; + int max_tc_eth; + struct mlx4_spec_qps *spec_qps; + int num_srqs; + int max_srq_wqes; + int max_srq_sge; + int reserved_srqs; + int num_cqs; + int max_cqes; + int reserved_cqs; + int num_sys_eqs; + int num_eqs; + int reserved_eqs; + int num_comp_vectors; + int num_mpts; + int max_fmr_maps; + int num_mtts; + int fmr_reserved_mtts; + int reserved_mtts; + int reserved_mrws; + int reserved_uars; + int num_mgms; + int num_amgms; + int reserved_mcgs; + int num_qp_per_mgm; + int steering_mode; + int dmfs_high_steer_mode; + int fs_log_max_ucast_qp_range_size; + int num_pds; + int reserved_pds; + int max_xrcds; + int reserved_xrcds; + int mtt_entry_sz; + u32 max_msg_sz; + u32 page_size_cap; + u64 flags; + u64 flags2; + u32 bmme_flags; + u32 reserved_lkey; + u16 stat_rate_support; + u8 port_width_cap[MLX4_MAX_PORTS + 1]; + int max_gso_sz; + int max_rss_tbl_sz; + int reserved_qps_cnt[MLX4_NUM_QP_REGION]; + int reserved_qps; + int reserved_qps_base[MLX4_NUM_QP_REGION]; + int log_num_macs; + int log_num_vlans; + enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; + u8 supported_type[MLX4_MAX_PORTS + 1]; + u8 suggested_type[MLX4_MAX_PORTS + 1]; + u8 default_sense[MLX4_MAX_PORTS + 1]; + u32 port_mask[MLX4_MAX_PORTS + 1]; + enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1]; + u32 max_counters; + u8 port_ib_mtu[MLX4_MAX_PORTS + 1]; + u16 sqp_demux; + u32 eqe_size; + u32 cqe_size; + u8 eqe_factor; + u32 userspace_caps; /* userspace must be aware of these */ + u32 function_caps; /* VFs must be aware of these */ + u16 hca_core_clock; + u64 phys_port_id[MLX4_MAX_PORTS + 1]; + int tunnel_offload_mode; + u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1]; + u8 phv_bit[MLX4_MAX_PORTS + 1]; + u8 alloc_res_qp_mask; + u32 dmfs_high_rate_qpn_base; + u32 dmfs_high_rate_qpn_range; + u32 vf_caps; + bool wol_port[MLX4_MAX_PORTS + 1]; + struct mlx4_rate_limit_caps rl_caps; + u32 health_buffer_addrs; + bool map_clock_to_user; +}; + +struct mlx4_buf_list { + void *buf; + dma_addr_t map; +}; + +struct mlx4_buf { + struct mlx4_buf_list direct; + struct mlx4_buf_list *page_list; + int nbufs; + int npages; + int page_shift; +}; + +struct mlx4_mtt { + u32 offset; + int order; + int page_shift; +}; + +enum { + MLX4_DB_PER_PAGE = PAGE_SIZE / 4 +}; + +struct mlx4_db_pgdir { + struct list_head list; + DECLARE_BITMAP(order0, MLX4_DB_PER_PAGE); + DECLARE_BITMAP(order1, MLX4_DB_PER_PAGE / 2); + unsigned long *bits[2]; + __be32 *db_page; + dma_addr_t db_dma; +}; + +struct mlx4_ib_user_db_page; + +struct mlx4_db { + __be32 *db; + union { + struct mlx4_db_pgdir *pgdir; + struct mlx4_ib_user_db_page *user_page; + } u; + dma_addr_t dma; + int index; + int order; +}; + +struct mlx4_hwq_resources { + struct mlx4_db db; + struct mlx4_mtt mtt; + struct mlx4_buf buf; +}; + +struct mlx4_mr { + struct mlx4_mtt mtt; + u64 iova; + u64 size; + u32 key; + u32 pd; + u32 access; + int enabled; +}; + +enum mlx4_mw_type { + MLX4_MW_TYPE_1 = 1, + MLX4_MW_TYPE_2 = 2, +}; + +struct mlx4_mw { + u32 key; + u32 pd; + enum mlx4_mw_type type; + int enabled; +}; + +struct mlx4_fmr { + struct mlx4_mr mr; + struct mlx4_mpt_entry *mpt; + __be64 *mtts; + dma_addr_t dma_handle; + int max_pages; + int max_maps; + int maps; + u8 page_shift; +}; + +struct mlx4_uar { + unsigned long pfn; + int index; + struct list_head bf_list; + unsigned free_bf_bmap; + void __iomem *map; + void __iomem *bf_map; +}; + +struct mlx4_bf { + unsigned int offset; + int buf_size; + struct mlx4_uar *uar; + void __iomem *reg; +}; + +struct mlx4_cq { + void (*comp) (struct mlx4_cq *); + void (*event) (struct mlx4_cq *, enum mlx4_event); + + struct mlx4_uar *uar; + + u32 cons_index; + + u16 irq; + __be32 *set_ci_db; + __be32 *arm_db; + int arm_sn; + + int cqn; + unsigned vector; + + refcount_t refcount; + struct completion free; + struct { + struct list_head list; + void (*comp)(struct mlx4_cq *); + void *priv; + } tasklet_ctx; + int reset_notify_added; + struct list_head reset_notify; + u8 usage; +}; + +struct mlx4_qp { + void (*event) (struct mlx4_qp *, enum mlx4_event); + + int qpn; + + refcount_t refcount; + struct completion free; + u8 usage; +}; + +struct mlx4_srq { + void (*event) (struct mlx4_srq *, enum mlx4_event); + + int srqn; + int max; + int max_gs; + int wqe_shift; + + refcount_t refcount; + struct completion free; +}; + +struct mlx4_av { + __be32 port_pd; + u8 reserved1; + u8 g_slid; + __be16 dlid; + u8 reserved2; + u8 gid_index; + u8 stat_rate; + u8 hop_limit; + __be32 sl_tclass_flowlabel; + u8 dgid[16]; +}; + +struct mlx4_eth_av { + __be32 port_pd; + u8 reserved1; + u8 smac_idx; + u16 reserved2; + u8 reserved3; + u8 gid_index; + u8 stat_rate; + u8 hop_limit; + __be32 sl_tclass_flowlabel; + u8 dgid[16]; + u8 s_mac[6]; + u8 reserved4[2]; + __be16 vlan; + u8 mac[ETH_ALEN]; +}; + +union mlx4_ext_av { + struct mlx4_av ib; + struct mlx4_eth_av eth; +}; + +/* Counters should be saturate once they reach their maximum value */ +#define ASSIGN_32BIT_COUNTER(counter, value) do { \ + if ((value) > U32_MAX) \ + counter = cpu_to_be32(U32_MAX); \ + else \ + counter = cpu_to_be32(value); \ +} while (0) + +struct mlx4_counter { + u8 reserved1[3]; + u8 counter_mode; + __be32 num_ifc; + u32 reserved2[2]; + __be64 rx_frames; + __be64 rx_bytes; + __be64 tx_frames; + __be64 tx_bytes; +}; + +struct mlx4_quotas { + int qp; + int cq; + int srq; + int mpt; + int mtt; + int counter; + int xrcd; +}; + +struct mlx4_vf_dev { + u8 min_port; + u8 n_ports; +}; + +struct mlx4_fw_crdump { + bool snapshot_enable; + struct devlink_region *region_crspace; + struct devlink_region *region_fw_health; +}; + +enum mlx4_pci_status { + MLX4_PCI_STATUS_DISABLED, + MLX4_PCI_STATUS_ENABLED, +}; + +struct mlx4_dev_persistent { + struct pci_dev *pdev; + struct mlx4_dev *dev; + int nvfs[MLX4_MAX_PORTS + 1]; + int num_vfs; + enum mlx4_port_type curr_port_type[MLX4_MAX_PORTS + 1]; + enum mlx4_port_type curr_port_poss_type[MLX4_MAX_PORTS + 1]; + struct work_struct catas_work; + struct workqueue_struct *catas_wq; + struct mutex device_state_mutex; /* protect HW state */ + u8 state; + struct mutex interface_state_mutex; /* protect SW state */ + u8 interface_state; + struct mutex pci_status_mutex; /* sync pci state */ + enum mlx4_pci_status pci_status; + struct mlx4_fw_crdump crdump; +}; + +struct mlx4_dev { + struct mlx4_dev_persistent *persist; + unsigned long flags; + unsigned long num_slaves; + struct mlx4_caps caps; + struct mlx4_phys_caps phys_caps; + struct mlx4_quotas quotas; + struct radix_tree_root qp_table_tree; + u8 rev_id; + u8 port_random_macs; + char board_id[MLX4_BOARD_ID_LEN]; + int numa_node; + int oper_log_mgm_entry_size; + u64 regid_promisc_array[MLX4_MAX_PORTS + 1]; + u64 regid_allmulti_array[MLX4_MAX_PORTS + 1]; + struct mlx4_vf_dev *dev_vfs; + u8 uar_page_shift; +}; + +struct mlx4_clock_params { + u64 offset; + u8 bar; + u8 size; +}; + +struct mlx4_eqe { + u8 reserved1; + u8 type; + u8 reserved2; + u8 subtype; + union { + u32 raw[6]; + struct { + __be32 cqn; + } __packed comp; + struct { + u16 reserved1; + __be16 token; + u32 reserved2; + u8 reserved3[3]; + u8 status; + __be64 out_param; + } __packed cmd; + struct { + __be32 qpn; + } __packed qp; + struct { + __be32 srqn; + } __packed srq; + struct { + __be32 cqn; + u32 reserved1; + u8 reserved2[3]; + u8 syndrome; + } __packed cq_err; + struct { + u32 reserved1[2]; + __be32 port; + } __packed port_change; + struct { + #define COMM_CHANNEL_BIT_ARRAY_SIZE 4 + u32 reserved; + u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE]; + } __packed comm_channel_arm; + struct { + u8 port; + u8 reserved[3]; + __be64 mac; + } __packed mac_update; + struct { + __be32 slave_id; + } __packed flr_event; + struct { + __be16 current_temperature; + __be16 warning_threshold; + } __packed warming; + struct { + u8 reserved[3]; + u8 port; + union { + struct { + __be16 mstr_sm_lid; + __be16 port_lid; + __be32 changed_attr; + u8 reserved[3]; + u8 mstr_sm_sl; + __be64 gid_prefix; + } __packed port_info; + struct { + __be32 block_ptr; + __be32 tbl_entries_mask; + } __packed tbl_change_info; + struct { + u8 sl2vl_table[8]; + } __packed sl2vl_tbl_change_info; + } params; + } __packed port_mgmt_change; + struct { + u8 reserved[3]; + u8 port; + u32 reserved1[5]; + } __packed bad_cable; + } event; + u8 slave_id; + u8 reserved3[2]; + u8 owner; +} __packed; + +struct mlx4_init_port_param { + int set_guid0; + int set_node_guid; + int set_si_guid; + u16 mtu; + int port_width_cap; + u16 vl_cap; + u16 max_gid; + u16 max_pkey; + u64 guid0; + u64 node_guid; + u64 si_guid; +}; + +#define MAD_IFC_DATA_SZ 192 +/* MAD IFC Mailbox */ +struct mlx4_mad_ifc { + u8 base_version; + u8 mgmt_class; + u8 class_version; + u8 method; + __be16 status; + __be16 class_specific; + __be64 tid; + __be16 attr_id; + __be16 resv; + __be32 attr_mod; + __be64 mkey; + __be16 dr_slid; + __be16 dr_dlid; + u8 reserved[28]; + u8 data[MAD_IFC_DATA_SZ]; +} __packed; + +#define mlx4_foreach_port(port, dev, type) \ + for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ + if ((type) == (dev)->caps.port_mask[(port)]) + +#define mlx4_foreach_ib_transport_port(port, dev) \ + for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ + if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \ + ((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_ETH)) + +#define MLX4_INVALID_SLAVE_ID 0xFF +#define MLX4_SINK_COUNTER_INDEX(dev) (dev->caps.max_counters - 1) + +void handle_port_mgmt_change_event(struct work_struct *work); + +static inline int mlx4_master_func_num(struct mlx4_dev *dev) +{ + return dev->caps.function; +} + +static inline int mlx4_is_master(struct mlx4_dev *dev) +{ + return dev->flags & MLX4_FLAG_MASTER; +} + +static inline int mlx4_num_reserved_sqps(struct mlx4_dev *dev) +{ + return dev->phys_caps.base_sqpn + 8 + + 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev); +} + +static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn) +{ + return (qpn < dev->phys_caps.base_sqpn + 8 + + 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev) && + qpn >= dev->phys_caps.base_sqpn) || + (qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]); +} + +static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn) +{ + int guest_proxy_base = dev->phys_caps.base_proxy_sqpn + slave * 8; + + if (qpn >= guest_proxy_base && qpn < guest_proxy_base + 8) + return 1; + + return 0; +} + +static inline int mlx4_is_mfunc(struct mlx4_dev *dev) +{ + return dev->flags & (MLX4_FLAG_SLAVE | MLX4_FLAG_MASTER); +} + +static inline int mlx4_is_slave(struct mlx4_dev *dev) +{ + return dev->flags & MLX4_FLAG_SLAVE; +} + +static inline int mlx4_is_eth(struct mlx4_dev *dev, int port) +{ + return dev->caps.port_type[port] == MLX4_PORT_TYPE_IB ? 0 : 1; +} + +int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, + struct mlx4_buf *buf); +void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); +static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset) +{ + if (buf->nbufs == 1) + return buf->direct.buf + offset; + else + return buf->page_list[offset >> PAGE_SHIFT].buf + + (offset & (PAGE_SIZE - 1)); +} + +int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn); +void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn); +int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn); +void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn); + +int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar); +void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar); +int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node); +void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf); + +int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, + struct mlx4_mtt *mtt); +void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt); +u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt); + +int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, + int npages, int page_shift, struct mlx4_mr *mr); +int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr); +int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr); +int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type, + struct mlx4_mw *mw); +void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw); +int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw); +int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + int start_index, int npages, u64 *page_list); +int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + struct mlx4_buf *buf); + +int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order); +void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db); + +int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, + int size); +void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres, + int size); + +int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, + struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, + unsigned vector, int collapsed, int timestamp_en); +void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); +int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, + int *base, u8 flags, u8 usage); +void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); + +int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp); +void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp); + +int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcdn, + struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq); +void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq); +int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark); +int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark); + +int mlx4_INIT_PORT(struct mlx4_dev *dev, int port); +int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port); + +int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], + int block_mcast_loopback, enum mlx4_protocol prot); +int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], + enum mlx4_protocol prot); +int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], + u8 port, int block_mcast_loopback, + enum mlx4_protocol protocol, u64 *reg_id); +int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], + enum mlx4_protocol protocol, u64 reg_id); + +enum { + MLX4_DOMAIN_UVERBS = 0x1000, + MLX4_DOMAIN_ETHTOOL = 0x2000, + MLX4_DOMAIN_RFS = 0x3000, + MLX4_DOMAIN_NIC = 0x5000, +}; + +enum mlx4_net_trans_rule_id { + MLX4_NET_TRANS_RULE_ID_ETH = 0, + MLX4_NET_TRANS_RULE_ID_IB, + MLX4_NET_TRANS_RULE_ID_IPV6, + MLX4_NET_TRANS_RULE_ID_IPV4, + MLX4_NET_TRANS_RULE_ID_TCP, + MLX4_NET_TRANS_RULE_ID_UDP, + MLX4_NET_TRANS_RULE_ID_VXLAN, + MLX4_NET_TRANS_RULE_NUM, /* should be last */ +}; + +extern const u16 __sw_id_hw[]; + +static inline int map_hw_to_sw_id(u16 header_id) +{ + + int i; + for (i = 0; i < MLX4_NET_TRANS_RULE_NUM; i++) { + if (header_id == __sw_id_hw[i]) + return i; + } + return -EINVAL; +} + +enum mlx4_net_trans_promisc_mode { + MLX4_FS_REGULAR = 1, + MLX4_FS_ALL_DEFAULT, + MLX4_FS_MC_DEFAULT, + MLX4_FS_MIRROR_RX_PORT, + MLX4_FS_MIRROR_SX_PORT, + MLX4_FS_UC_SNIFFER, + MLX4_FS_MC_SNIFFER, + MLX4_FS_MODE_NUM, /* should be last */ +}; + +struct mlx4_spec_eth { + u8 dst_mac[ETH_ALEN]; + u8 dst_mac_msk[ETH_ALEN]; + u8 src_mac[ETH_ALEN]; + u8 src_mac_msk[ETH_ALEN]; + u8 ether_type_enable; + __be16 ether_type; + __be16 vlan_id_msk; + __be16 vlan_id; +}; + +struct mlx4_spec_tcp_udp { + __be16 dst_port; + __be16 dst_port_msk; + __be16 src_port; + __be16 src_port_msk; +}; + +struct mlx4_spec_ipv4 { + __be32 dst_ip; + __be32 dst_ip_msk; + __be32 src_ip; + __be32 src_ip_msk; +}; + +struct mlx4_spec_ib { + __be32 l3_qpn; + __be32 qpn_msk; + u8 dst_gid[16]; + u8 dst_gid_msk[16]; +}; + +struct mlx4_spec_vxlan { + __be32 vni; + __be32 vni_mask; + +}; + +struct mlx4_spec_list { + struct list_head list; + enum mlx4_net_trans_rule_id id; + union { + struct mlx4_spec_eth eth; + struct mlx4_spec_ib ib; + struct mlx4_spec_ipv4 ipv4; + struct mlx4_spec_tcp_udp tcp_udp; + struct mlx4_spec_vxlan vxlan; + }; +}; + +enum mlx4_net_trans_hw_rule_queue { + MLX4_NET_TRANS_Q_FIFO, + MLX4_NET_TRANS_Q_LIFO, +}; + +struct mlx4_net_trans_rule { + struct list_head list; + enum mlx4_net_trans_hw_rule_queue queue_mode; + bool exclusive; + bool allow_loopback; + enum mlx4_net_trans_promisc_mode promisc_mode; + u8 port; + u16 priority; + u32 qpn; +}; + +struct mlx4_net_trans_rule_hw_ctrl { + __be16 prio; + u8 type; + u8 flags; + u8 rsvd1; + u8 funcid; + u8 vep; + u8 port; + __be32 qpn; + __be32 rsvd2; +}; + +struct mlx4_net_trans_rule_hw_ib { + u8 size; + u8 rsvd1; + __be16 id; + u32 rsvd2; + __be32 l3_qpn; + __be32 qpn_mask; + u8 dst_gid[16]; + u8 dst_gid_msk[16]; +} __packed; + +struct mlx4_net_trans_rule_hw_eth { + u8 size; + u8 rsvd; + __be16 id; + u8 rsvd1[6]; + u8 dst_mac[6]; + u16 rsvd2; + u8 dst_mac_msk[6]; + u16 rsvd3; + u8 src_mac[6]; + u16 rsvd4; + u8 src_mac_msk[6]; + u8 rsvd5; + u8 ether_type_enable; + __be16 ether_type; + __be16 vlan_tag_msk; + __be16 vlan_tag; +} __packed; + +struct mlx4_net_trans_rule_hw_tcp_udp { + u8 size; + u8 rsvd; + __be16 id; + __be16 rsvd1[3]; + __be16 dst_port; + __be16 rsvd2; + __be16 dst_port_msk; + __be16 rsvd3; + __be16 src_port; + __be16 rsvd4; + __be16 src_port_msk; +} __packed; + +struct mlx4_net_trans_rule_hw_ipv4 { + u8 size; + u8 rsvd; + __be16 id; + __be32 rsvd1; + __be32 dst_ip; + __be32 dst_ip_msk; + __be32 src_ip; + __be32 src_ip_msk; +} __packed; + +struct mlx4_net_trans_rule_hw_vxlan { + u8 size; + u8 rsvd; + __be16 id; + __be32 rsvd1; + __be32 vni; + __be32 vni_mask; +} __packed; + +struct _rule_hw { + union { + struct { + u8 size; + u8 rsvd; + __be16 id; + }; + struct mlx4_net_trans_rule_hw_eth eth; + struct mlx4_net_trans_rule_hw_ib ib; + struct mlx4_net_trans_rule_hw_ipv4 ipv4; + struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp; + struct mlx4_net_trans_rule_hw_vxlan vxlan; + }; +}; + +enum { + VXLAN_STEER_BY_OUTER_MAC = 1 << 0, + VXLAN_STEER_BY_OUTER_VLAN = 1 << 1, + VXLAN_STEER_BY_VSID_VNI = 1 << 2, + VXLAN_STEER_BY_INNER_MAC = 1 << 3, + VXLAN_STEER_BY_INNER_VLAN = 1 << 4, +}; + +enum { + MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS = 0x2, +}; + +int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn, + enum mlx4_net_trans_promisc_mode mode); +int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port, + enum mlx4_net_trans_promisc_mode mode); +int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port); +int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port); +int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port); +int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port); +int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode); + +int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac); +void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac); +int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port); +int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac); +int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, + u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx); +int mlx4_SET_PORT_user_mac(struct mlx4_dev *dev, u8 port, u8 *user_mac); +int mlx4_SET_PORT_user_mtu(struct mlx4_dev *dev, u8 port, u16 user_mtu); +int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, + u8 promisc); +int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time); +int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, + u8 ignore_fcs_value); +int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable); +int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val); +int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv); +int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port, + bool *vlan_offload_disabled); +void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl, + struct _rule_hw *eth_header); +int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx); +int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); +int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); +void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); + +int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, + int npages, u64 iova, u32 *lkey, u32 *rkey); +int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, + int max_maps, u8 page_shift, struct mlx4_fmr *fmr); +int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr); +void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, + u32 *lkey, u32 *rkey); +int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); +int mlx4_SYNC_TPT(struct mlx4_dev *dev); +int mlx4_test_interrupt(struct mlx4_dev *dev, int vector); +int mlx4_test_async(struct mlx4_dev *dev); +int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier, + const u32 offset[], u32 value[], + size_t array_len, u8 port); +u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port); +bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector); +struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port); +int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector); +void mlx4_release_eq(struct mlx4_dev *dev, int vec); + +int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector); +int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec); + +int mlx4_get_phys_port_id(struct mlx4_dev *dev); +int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port); +int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port); + +int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage); +void mlx4_counter_free(struct mlx4_dev *dev, u32 idx); +int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port); + +void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, + int port); +__be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port); +void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port); +int mlx4_flow_attach(struct mlx4_dev *dev, + struct mlx4_net_trans_rule *rule, u64 *reg_id); +int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id); +int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev, + enum mlx4_net_trans_promisc_mode flow_type); +int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev, + enum mlx4_net_trans_rule_id id); +int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id); + +int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr, + int port, int qpn, u16 prio, u64 *reg_id); + +void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, + int i, int val); + +int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey); + +int mlx4_is_slave_active(struct mlx4_dev *dev, int slave); +int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port); +int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port); +int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr); +int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port, u8 port_subtype_change); +enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port); +int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, u8 port, int event, enum slave_port_gen_event *gen_event); + +void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid); +__be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave); + +int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid, + int *slave_id); +int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id, + u8 *gid); + +int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, + u32 max_range_qpn); + +u64 mlx4_read_clock(struct mlx4_dev *dev); + +struct mlx4_active_ports { + DECLARE_BITMAP(ports, MLX4_MAX_PORTS); +}; +/* Returns a bitmap of the physical ports which are assigned to slave */ +struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave); + +/* Returns the physical port that represents the virtual port of the slave, */ +/* or a value < 0 in case of an error. If a slave has 2 ports, the identity */ +/* mapping is returned. */ +int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port); + +struct mlx4_slaves_pport { + DECLARE_BITMAP(slaves, MLX4_MFUNC_MAX); +}; +/* Returns a bitmap of all slaves that are assigned to port. */ +struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev, + int port); + +/* Returns a bitmap of all slaves that are assigned exactly to all the */ +/* the ports that are set in crit_ports. */ +struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv( + struct mlx4_dev *dev, + const struct mlx4_active_ports *crit_ports); + +/* Returns the slave's virtual port that represents the physical port. */ +int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port); + +int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port); + +int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port); +int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis); +int mlx4_config_roce_v2_port(struct mlx4_dev *dev, u16 udp_port); +int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2); +int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port); +int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port); +int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port, + int enable); +int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, + struct mlx4_mpt_entry ***mpt_entry); +int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, + struct mlx4_mpt_entry **mpt_entry); +int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry, + u32 pdn); +int mlx4_mr_hw_change_access(struct mlx4_dev *dev, + struct mlx4_mpt_entry *mpt_entry, + u32 access); +void mlx4_mr_hw_put_mpt(struct mlx4_dev *dev, + struct mlx4_mpt_entry **mpt_entry); +void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr); +int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, + u64 iova, u64 size, int npages, + int page_shift, struct mlx4_mpt_entry *mpt_entry); + +int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, + u16 offset, u16 size, u8 *data); +int mlx4_max_tc(struct mlx4_dev *dev); + +/* Returns true if running in low memory profile (kdump kernel) */ +static inline bool mlx4_low_memory_profile(void) +{ + return is_kdump_kernel(); +} + +/* ACCESS REG commands */ +enum mlx4_access_reg_method { + MLX4_ACCESS_REG_QUERY = 0x1, + MLX4_ACCESS_REG_WRITE = 0x2, +}; + +/* ACCESS PTYS Reg command */ +enum mlx4_ptys_proto { + MLX4_PTYS_IB = 1<<0, + MLX4_PTYS_EN = 1<<2, +}; + +enum mlx4_ptys_flags { + MLX4_PTYS_AN_DISABLE_CAP = 1 << 5, + MLX4_PTYS_AN_DISABLE_ADMIN = 1 << 6, +}; + +struct mlx4_ptys_reg { + u8 flags; + u8 local_port; + u8 resrvd2; + u8 proto_mask; + __be32 resrvd3[2]; + __be32 eth_proto_cap; + __be16 ib_width_cap; + __be16 ib_speed_cap; + __be32 resrvd4; + __be32 eth_proto_admin; + __be16 ib_width_admin; + __be16 ib_speed_admin; + __be32 resrvd5; + __be32 eth_proto_oper; + __be16 ib_width_oper; + __be16 ib_speed_oper; + __be32 resrvd6; + __be32 eth_proto_lp_adv; +} __packed; + +int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev, + enum mlx4_access_reg_method method, + struct mlx4_ptys_reg *ptys_reg); + +int mlx4_get_internal_clock_params(struct mlx4_dev *dev, + struct mlx4_clock_params *params); + +static inline int mlx4_to_hw_uar_index(struct mlx4_dev *dev, int index) +{ + return (index << (PAGE_SHIFT - dev->uar_page_shift)); +} + +static inline int mlx4_get_num_reserved_uar(struct mlx4_dev *dev) +{ + /* The first 128 UARs are used for EQ doorbells */ + return (128 >> (PAGE_SHIFT - dev->uar_page_shift)); +} +#endif /* MLX4_DEVICE_H */ diff --git a/include/linux/mlx4/doorbell.h b/include/linux/mlx4/doorbell.h new file mode 100644 index 000000000..f31bba270 --- /dev/null +++ b/include/linux/mlx4/doorbell.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_DOORBELL_H +#define MLX4_DOORBELL_H + +#include +#include + +#define MLX4_SEND_DOORBELL 0x14 +#define MLX4_CQ_DOORBELL 0x20 + +#if BITS_PER_LONG == 64 +/* + * Assume that we can just write a 64-bit doorbell atomically. s390 + * actually doesn't have writeq() but S/390 systems don't even have + * PCI so we won't worry about it. + */ + +#define MLX4_DECLARE_DOORBELL_LOCK(name) +#define MLX4_INIT_DOORBELL_LOCK(ptr) do { } while (0) +#define MLX4_GET_DOORBELL_LOCK(ptr) (NULL) + +static inline void mlx4_write64(__be32 val[2], void __iomem *dest, + spinlock_t *doorbell_lock) +{ + __raw_writeq(*(u64 *) val, dest); +} + +#else + +/* + * Just fall back to a spinlock to protect the doorbell if + * BITS_PER_LONG is 32 -- there's no portable way to do atomic 64-bit + * MMIO writes. + */ + +#define MLX4_DECLARE_DOORBELL_LOCK(name) spinlock_t name; +#define MLX4_INIT_DOORBELL_LOCK(ptr) spin_lock_init(ptr) +#define MLX4_GET_DOORBELL_LOCK(ptr) (ptr) + +static inline void mlx4_write64(__be32 val[2], void __iomem *dest, + spinlock_t *doorbell_lock) +{ + unsigned long flags; + + spin_lock_irqsave(doorbell_lock, flags); + __raw_writel((__force u32) val[0], dest); + __raw_writel((__force u32) val[1], dest + 4); + spin_unlock_irqrestore(doorbell_lock, flags); +} + +#endif + +#endif /* MLX4_DOORBELL_H */ diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h new file mode 100644 index 000000000..a858bcb62 --- /dev/null +++ b/include/linux/mlx4/driver.h @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2006 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_DRIVER_H +#define MLX4_DRIVER_H + +#include +#include + +struct mlx4_dev; + +#define MLX4_MAC_MASK 0xffffffffffffULL + +enum mlx4_dev_event { + MLX4_DEV_EVENT_CATASTROPHIC_ERROR, + MLX4_DEV_EVENT_PORT_UP, + MLX4_DEV_EVENT_PORT_DOWN, + MLX4_DEV_EVENT_PORT_REINIT, + MLX4_DEV_EVENT_PORT_MGMT_CHANGE, + MLX4_DEV_EVENT_SLAVE_INIT, + MLX4_DEV_EVENT_SLAVE_SHUTDOWN, +}; + +enum { + MLX4_INTFF_BONDING = 1 << 0 +}; + +struct mlx4_interface { + void * (*add) (struct mlx4_dev *dev); + void (*remove)(struct mlx4_dev *dev, void *context); + void (*event) (struct mlx4_dev *dev, void *context, + enum mlx4_dev_event event, unsigned long param); + void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port); + void (*activate)(struct mlx4_dev *dev, void *context); + struct list_head list; + enum mlx4_protocol protocol; + int flags; +}; + +int mlx4_register_interface(struct mlx4_interface *intf); +void mlx4_unregister_interface(struct mlx4_interface *intf); + +int mlx4_bond(struct mlx4_dev *dev); +int mlx4_unbond(struct mlx4_dev *dev); +static inline int mlx4_is_bonded(struct mlx4_dev *dev) +{ + return !!(dev->flags & MLX4_FLAG_BONDED); +} + +static inline int mlx4_is_mf_bonded(struct mlx4_dev *dev) +{ + return (mlx4_is_bonded(dev) && mlx4_is_mfunc(dev)); +} + +struct mlx4_port_map { + u8 port1; + u8 port2; +}; + +int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p); + +void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port); + +struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port); + +static inline u64 mlx4_mac_to_u64(u8 *addr) +{ + u64 mac = 0; + int i; + + for (i = 0; i < ETH_ALEN; i++) { + mac <<= 8; + mac |= addr[i]; + } + return mac; +} + +static inline void mlx4_u64_to_mac(u8 *addr, u64 mac) +{ + int i; + + for (i = ETH_ALEN; i > 0; i--) { + addr[i - 1] = mac & 0xFF; + mac >>= 8; + } +} + +#endif /* MLX4_DRIVER_H */ diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h new file mode 100644 index 000000000..8e2828d48 --- /dev/null +++ b/include/linux/mlx4/qp.h @@ -0,0 +1,506 @@ +/* + * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_QP_H +#define MLX4_QP_H + +#include +#include + +#include + +#define MLX4_INVALID_LKEY 0x100 + +enum mlx4_qp_optpar { + MLX4_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, + MLX4_QP_OPTPAR_RRE = 1 << 1, + MLX4_QP_OPTPAR_RAE = 1 << 2, + MLX4_QP_OPTPAR_RWE = 1 << 3, + MLX4_QP_OPTPAR_PKEY_INDEX = 1 << 4, + MLX4_QP_OPTPAR_Q_KEY = 1 << 5, + MLX4_QP_OPTPAR_RNR_TIMEOUT = 1 << 6, + MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7, + MLX4_QP_OPTPAR_SRA_MAX = 1 << 8, + MLX4_QP_OPTPAR_RRA_MAX = 1 << 9, + MLX4_QP_OPTPAR_PM_STATE = 1 << 10, + MLX4_QP_OPTPAR_RETRY_COUNT = 1 << 12, + MLX4_QP_OPTPAR_RNR_RETRY = 1 << 13, + MLX4_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, + MLX4_QP_OPTPAR_SCHED_QUEUE = 1 << 16, + MLX4_QP_OPTPAR_COUNTER_INDEX = 1 << 20, + MLX4_QP_OPTPAR_VLAN_STRIPPING = 1 << 21, +}; + +enum mlx4_qp_state { + MLX4_QP_STATE_RST = 0, + MLX4_QP_STATE_INIT = 1, + MLX4_QP_STATE_RTR = 2, + MLX4_QP_STATE_RTS = 3, + MLX4_QP_STATE_SQER = 4, + MLX4_QP_STATE_SQD = 5, + MLX4_QP_STATE_ERR = 6, + MLX4_QP_STATE_SQ_DRAINING = 7, + MLX4_QP_NUM_STATE +}; + +enum { + MLX4_QP_ST_RC = 0x0, + MLX4_QP_ST_UC = 0x1, + MLX4_QP_ST_RD = 0x2, + MLX4_QP_ST_UD = 0x3, + MLX4_QP_ST_XRC = 0x6, + MLX4_QP_ST_MLX = 0x7 +}; + +enum { + MLX4_QP_PM_MIGRATED = 0x3, + MLX4_QP_PM_ARMED = 0x0, + MLX4_QP_PM_REARM = 0x1 +}; + +enum { + /* params1 */ + MLX4_QP_BIT_SRE = 1 << 15, + MLX4_QP_BIT_SWE = 1 << 14, + MLX4_QP_BIT_SAE = 1 << 13, + /* params2 */ + MLX4_QP_BIT_RRE = 1 << 15, + MLX4_QP_BIT_RWE = 1 << 14, + MLX4_QP_BIT_RAE = 1 << 13, + MLX4_QP_BIT_FPP = 1 << 3, + MLX4_QP_BIT_RIC = 1 << 4, +}; + +enum { + MLX4_RSS_HASH_XOR = 0, + MLX4_RSS_HASH_TOP = 1, + + MLX4_RSS_UDP_IPV6 = 1 << 0, + MLX4_RSS_UDP_IPV4 = 1 << 1, + MLX4_RSS_TCP_IPV6 = 1 << 2, + MLX4_RSS_IPV6 = 1 << 3, + MLX4_RSS_TCP_IPV4 = 1 << 4, + MLX4_RSS_IPV4 = 1 << 5, + + MLX4_RSS_BY_OUTER_HEADERS = 0 << 6, + MLX4_RSS_BY_INNER_HEADERS = 2 << 6, + MLX4_RSS_BY_INNER_HEADERS_IPONLY = 3 << 6, + + /* offset of mlx4_rss_context within mlx4_qp_context.pri_path */ + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH = 0x24, + /* offset of being RSS indirection QP within mlx4_qp_context.flags */ + MLX4_RSS_QPC_FLAG_OFFSET = 13, +}; + +#define MLX4_EN_RSS_KEY_SIZE 40 + +struct mlx4_rss_context { + __be32 base_qpn; + __be32 default_qpn; + u16 reserved; + u8 hash_fn; + u8 flags; + __be32 rss_key[MLX4_EN_RSS_KEY_SIZE / sizeof(__be32)]; + __be32 base_qpn_udp; +}; + +struct mlx4_qp_path { + u8 fl; + union { + u8 vlan_control; + u8 control; + }; + u8 disable_pkey_check; + u8 pkey_index; + u8 counter_index; + u8 grh_mylmc; + __be16 rlid; + u8 ackto; + u8 mgid_index; + u8 static_rate; + u8 hop_limit; + __be32 tclass_flowlabel; + u8 rgid[16]; + u8 sched_queue; + u8 vlan_index; + u8 feup; + u8 fvl_rx; + u8 reserved4[2]; + u8 dmac[ETH_ALEN]; +}; + +enum { /* fl */ + MLX4_FL_CV = 1 << 6, + MLX4_FL_SV = 1 << 5, + MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2, + MLX4_FL_ETH_SRC_CHECK_MC_LB = 1 << 1, + MLX4_FL_ETH_SRC_CHECK_UC_LB = 1 << 0, +}; + +enum { /* control */ + MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER = 1 << 7, +}; + +enum { /* vlan_control */ + MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED = 1 << 6, + MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED = 1 << 5, /* 802.1p priority tag */ + MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED = 1 << 4, + MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED = 1 << 2, + MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED = 1 << 1, /* 802.1p priority tag */ + MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED = 1 << 0 +}; + +enum { /* feup */ + MLX4_FEUP_FORCE_ETH_UP = 1 << 6, /* force Eth UP */ + MLX4_FSM_FORCE_ETH_SRC_MAC = 1 << 5, /* force Source MAC */ + MLX4_FVL_FORCE_ETH_VLAN = 1 << 3 /* force Eth vlan */ +}; + +enum { /* fvl_rx */ + MLX4_FVL_RX_FORCE_ETH_VLAN = 1 << 0 /* enforce Eth rx vlan */ +}; + +struct mlx4_qp_context { + __be32 flags; + __be32 pd; + u8 mtu_msgmax; + u8 rq_size_stride; + u8 sq_size_stride; + u8 rlkey_roce_mode; + __be32 usr_page; + __be32 local_qpn; + __be32 remote_qpn; + struct mlx4_qp_path pri_path; + struct mlx4_qp_path alt_path; + __be32 params1; + u32 reserved1; + __be32 next_send_psn; + __be32 cqn_send; + __be16 roce_entropy; + __be16 reserved2[3]; + __be32 last_acked_psn; + __be32 ssn; + __be32 params2; + __be32 rnr_nextrecvpsn; + __be32 xrcd; + __be32 cqn_recv; + __be64 db_rec_addr; + __be32 qkey; + __be32 srqn; + __be32 msn; + __be16 rq_wqe_counter; + __be16 sq_wqe_counter; + u32 reserved3; + __be16 rate_limit_params; + u8 reserved4; + u8 qos_vport; + __be32 param3; + __be32 nummmcpeers_basemkey; + u8 log_page_size; + u8 reserved5[2]; + u8 mtt_base_addr_h; + __be32 mtt_base_addr_l; + u32 reserved6[10]; +}; + +struct mlx4_update_qp_context { + __be64 qp_mask; + __be64 primary_addr_path_mask; + __be64 secondary_addr_path_mask; + u64 reserved1; + struct mlx4_qp_context qp_context; + u64 reserved2[58]; +}; + +enum { + MLX4_UPD_QP_MASK_PM_STATE = 32, + MLX4_UPD_QP_MASK_VSD = 33, + MLX4_UPD_QP_MASK_QOS_VPP = 34, + MLX4_UPD_QP_MASK_RATE_LIMIT = 35, +}; + +enum { + MLX4_UPD_QP_PATH_MASK_PKEY_INDEX = 0 + 32, + MLX4_UPD_QP_PATH_MASK_FSM = 1 + 32, + MLX4_UPD_QP_PATH_MASK_MAC_INDEX = 2 + 32, + MLX4_UPD_QP_PATH_MASK_FVL = 3 + 32, + MLX4_UPD_QP_PATH_MASK_CV = 4 + 32, + MLX4_UPD_QP_PATH_MASK_VLAN_INDEX = 5 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN = 6 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED = 7 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P = 8 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED = 9 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED = 10 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P = 11 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED = 12 + 32, + MLX4_UPD_QP_PATH_MASK_FEUP = 13 + 32, + MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE = 14 + 32, + MLX4_UPD_QP_PATH_MASK_IF_COUNTER_INDEX = 15 + 32, + MLX4_UPD_QP_PATH_MASK_FVL_RX = 16 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_UC_LB = 18 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB = 19 + 32, + MLX4_UPD_QP_PATH_MASK_SV = 22 + 32, +}; + +enum { /* param3 */ + MLX4_STRIP_VLAN = 1 << 30 +}; + +/* Which firmware version adds support for NEC (NoErrorCompletion) bit */ +#define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232) + +enum { + MLX4_WQE_CTRL_NEC = 1 << 29, + MLX4_WQE_CTRL_IIP = 1 << 28, + MLX4_WQE_CTRL_ILP = 1 << 27, + MLX4_WQE_CTRL_FENCE = 1 << 6, + MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2, + MLX4_WQE_CTRL_SOLICITED = 1 << 1, + MLX4_WQE_CTRL_IP_CSUM = 1 << 4, + MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5, + MLX4_WQE_CTRL_INS_CVLAN = 1 << 6, + MLX4_WQE_CTRL_INS_SVLAN = 1 << 7, + MLX4_WQE_CTRL_STRONG_ORDER = 1 << 7, + MLX4_WQE_CTRL_FORCE_LOOPBACK = 1 << 0, +}; + +union mlx4_wqe_qpn_vlan { + struct { + __be16 vlan_tag; + u8 ins_vlan; + u8 fence_size; + }; + __be32 bf_qpn; +}; + +struct mlx4_wqe_ctrl_seg { + __be32 owner_opcode; + union mlx4_wqe_qpn_vlan qpn_vlan; + /* + * High 24 bits are SRC remote buffer; low 8 bits are flags: + * [7] SO (strong ordering) + * [5] TCP/UDP checksum + * [4] IP checksum + * [3:2] C (generate completion queue entry) + * [1] SE (solicited event) + * [0] FL (force loopback) + */ + union { + __be32 srcrb_flags; + __be16 srcrb_flags16[2]; + }; + /* + * imm is immediate data for send/RDMA write w/ immediate; + * also invalidation key for send with invalidate; input + * modifier for WQEs on CCQs. + */ + __be32 imm; +}; + +enum { + MLX4_WQE_MLX_VL15 = 1 << 17, + MLX4_WQE_MLX_SLR = 1 << 16 +}; + +struct mlx4_wqe_mlx_seg { + u8 owner; + u8 reserved1[2]; + u8 opcode; + __be16 sched_prio; + u8 reserved2; + u8 size; + /* + * [17] VL15 + * [16] SLR + * [15:12] static rate + * [11:8] SL + * [4] ICRC + * [3:2] C + * [0] FL (force loopback) + */ + __be32 flags; + __be16 rlid; + u16 reserved3; +}; + +struct mlx4_wqe_datagram_seg { + __be32 av[8]; + __be32 dqpn; + __be32 qkey; + __be16 vlan; + u8 mac[ETH_ALEN]; +}; + +struct mlx4_wqe_lso_seg { + __be32 mss_hdr_size; + __be32 header[0]; +}; + +enum mlx4_wqe_bind_seg_flags2 { + MLX4_WQE_BIND_ZERO_BASED = (1 << 30), + MLX4_WQE_BIND_TYPE_2 = (1 << 31), +}; + +struct mlx4_wqe_bind_seg { + __be32 flags1; + __be32 flags2; + __be32 new_rkey; + __be32 lkey; + __be64 addr; + __be64 length; +}; + +enum { + MLX4_WQE_FMR_PERM_LOCAL_READ = 1 << 27, + MLX4_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28, + MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ = 1 << 29, + MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE = 1 << 30, + MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC = 1 << 31 +}; + +struct mlx4_wqe_fmr_seg { + __be32 flags; + __be32 mem_key; + __be64 buf_list; + __be64 start_addr; + __be64 reg_len; + __be32 offset; + __be32 page_size; + u32 reserved[2]; +}; + +struct mlx4_wqe_fmr_ext_seg { + u8 flags; + u8 reserved; + __be16 app_mask; + __be16 wire_app_tag; + __be16 mem_app_tag; + __be32 wire_ref_tag_base; + __be32 mem_ref_tag_base; +}; + +struct mlx4_wqe_local_inval_seg { + u64 reserved1; + __be32 mem_key; + u32 reserved2; + u64 reserved3[2]; +}; + +struct mlx4_wqe_raddr_seg { + __be64 raddr; + __be32 rkey; + u32 reserved; +}; + +struct mlx4_wqe_atomic_seg { + __be64 swap_add; + __be64 compare; +}; + +struct mlx4_wqe_masked_atomic_seg { + __be64 swap_add; + __be64 compare; + __be64 swap_add_mask; + __be64 compare_mask; +}; + +struct mlx4_wqe_data_seg { + __be32 byte_count; + __be32 lkey; + __be64 addr; +}; + +enum { + MLX4_INLINE_ALIGN = 64, + MLX4_INLINE_SEG = 1 << 31, +}; + +struct mlx4_wqe_inline_seg { + __be32 byte_count; +}; + +enum mlx4_update_qp_attr { + MLX4_UPDATE_QP_SMAC = 1 << 0, + MLX4_UPDATE_QP_VSD = 1 << 1, + MLX4_UPDATE_QP_RATE_LIMIT = 1 << 2, + MLX4_UPDATE_QP_QOS_VPORT = 1 << 3, + MLX4_UPDATE_QP_ETH_SRC_CHECK_MC_LB = 1 << 4, + MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 5) - 1 +}; + +enum mlx4_update_qp_params_flags { + MLX4_UPDATE_QP_PARAMS_FLAGS_ETH_CHECK_MC_LB = 1 << 0, + MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE = 1 << 1, +}; + +struct mlx4_update_qp_params { + u8 smac_index; + u8 qos_vport; + u32 flags; + u16 rate_unit; + u16 rate_val; +}; + +struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn); +int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, + enum mlx4_update_qp_attr attr, + struct mlx4_update_qp_params *params); +int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, + struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar, + int sqd_event, struct mlx4_qp *qp); + +int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, + struct mlx4_qp_context *context); + +int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + struct mlx4_qp_context *context, + struct mlx4_qp *qp, enum mlx4_qp_state *qp_state); + +static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) +{ + return radix_tree_lookup(&dev->qp_table_tree, qpn & (dev->caps.num_qps - 1)); +} + +void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp); + +static inline u16 folded_qp(u32 q) +{ + u16 res; + + res = ((q & 0xff) ^ ((q & 0xff0000) >> 16)) | (q & 0xff00); + return res; +} + +u16 mlx4_qp_roce_entropy(struct mlx4_dev *dev, u32 qpn); + +#endif /* MLX4_QP_H */ diff --git a/include/linux/mlx4/srq.h b/include/linux/mlx4/srq.h new file mode 100644 index 000000000..192e0f778 --- /dev/null +++ b/include/linux/mlx4/srq.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_SRQ_H +#define MLX4_SRQ_H + +struct mlx4_wqe_srq_next_seg { + u16 reserved1; + __be16 next_wqe_index; + u32 reserved2[3]; +}; + +struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn); + +#endif /* MLX4_SRQ_H */ diff --git a/include/linux/mlx5/accel.h b/include/linux/mlx5/accel.h new file mode 100644 index 000000000..70e7e5673 --- /dev/null +++ b/include/linux/mlx5/accel.h @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef __MLX5_ACCEL_H__ +#define __MLX5_ACCEL_H__ + +#include + +enum mlx5_accel_esp_aes_gcm_keymat_iv_algo { + MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ, +}; + +enum mlx5_accel_esp_flags { + MLX5_ACCEL_ESP_FLAGS_TUNNEL = 0, /* Default */ + MLX5_ACCEL_ESP_FLAGS_TRANSPORT = 1UL << 0, + MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED = 1UL << 1, + MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP = 1UL << 2, +}; + +enum mlx5_accel_esp_action { + MLX5_ACCEL_ESP_ACTION_DECRYPT, + MLX5_ACCEL_ESP_ACTION_ENCRYPT, +}; + +enum mlx5_accel_esp_keymats { + MLX5_ACCEL_ESP_KEYMAT_AES_NONE, + MLX5_ACCEL_ESP_KEYMAT_AES_GCM, +}; + +enum mlx5_accel_esp_replay { + MLX5_ACCEL_ESP_REPLAY_NONE, + MLX5_ACCEL_ESP_REPLAY_BMP, +}; + +struct aes_gcm_keymat { + u64 seq_iv; + enum mlx5_accel_esp_aes_gcm_keymat_iv_algo iv_algo; + + u32 salt; + u32 icv_len; + + u32 key_len; + u32 aes_key[256 / 32]; +}; + +struct mlx5_accel_esp_xfrm_attrs { + enum mlx5_accel_esp_action action; + u32 esn; + u32 spi; + u32 seq; + u32 tfc_pad; + u32 flags; + u32 sa_handle; + enum mlx5_accel_esp_replay replay_type; + union { + struct { + u32 size; + + } bmp; + } replay; + enum mlx5_accel_esp_keymats keymat_type; + union { + struct aes_gcm_keymat aes_gcm; + } keymat; +}; + +struct mlx5_accel_esp_xfrm { + struct mlx5_core_dev *mdev; + struct mlx5_accel_esp_xfrm_attrs attrs; +}; + +enum { + MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA = 1UL << 0, +}; + +enum mlx5_accel_ipsec_cap { + MLX5_ACCEL_IPSEC_CAP_DEVICE = 1 << 0, + MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA = 1 << 1, + MLX5_ACCEL_IPSEC_CAP_ESP = 1 << 2, + MLX5_ACCEL_IPSEC_CAP_IPV6 = 1 << 3, + MLX5_ACCEL_IPSEC_CAP_LSO = 1 << 4, + MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER = 1 << 5, + MLX5_ACCEL_IPSEC_CAP_ESN = 1 << 6, + MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN = 1 << 7, +}; + +#ifdef CONFIG_MLX5_ACCEL + +u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev); + +struct mlx5_accel_esp_xfrm * +mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *attrs, + u32 flags); +void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm); +int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, + const struct mlx5_accel_esp_xfrm_attrs *attrs); + +#else + +static inline u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; } + +static inline struct mlx5_accel_esp_xfrm * +mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *attrs, + u32 flags) { return ERR_PTR(-EOPNOTSUPP); } +static inline void +mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) {} +static inline int +mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, + const struct mlx5_accel_esp_xfrm_attrs *attrs) { return -EOPNOTSUPP; } + +#endif +#endif diff --git a/include/linux/mlx5/cmd.h b/include/linux/mlx5/cmd.h new file mode 100644 index 000000000..68cd08f02 --- /dev/null +++ b/include/linux/mlx5/cmd.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_CMD_H +#define MLX5_CMD_H + +#include + +struct manage_pages_layout { + u64 ptr; + u32 reserved; + u16 num_entries; + u16 func_id; +}; + + +struct mlx5_cmd_alloc_uar_imm_out { + u32 rsvd[3]; + u32 uarn; +}; + +#endif /* MLX5_CMD_H */ diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h new file mode 100644 index 000000000..0ef6138ec --- /dev/null +++ b/include/linux/mlx5/cq.h @@ -0,0 +1,205 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_CORE_CQ_H +#define MLX5_CORE_CQ_H + +#include +#include +#include + +struct mlx5_core_cq { + u32 cqn; + int cqe_sz; + __be32 *set_ci_db; + __be32 *arm_db; + struct mlx5_uars_page *uar; + refcount_t refcount; + struct completion free; + unsigned vector; + unsigned int irqn; + void (*comp) (struct mlx5_core_cq *); + void (*event) (struct mlx5_core_cq *, enum mlx5_event); + u32 cons_index; + unsigned arm_sn; + struct mlx5_rsc_debug *dbg; + int pid; + struct { + struct list_head list; + void (*comp)(struct mlx5_core_cq *); + void *priv; + } tasklet_ctx; + int reset_notify_added; + struct list_head reset_notify; + struct mlx5_eq *eq; +}; + + +enum { + MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR = 0x01, + MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR = 0x02, + MLX5_CQE_SYNDROME_LOCAL_PROT_ERR = 0x04, + MLX5_CQE_SYNDROME_WR_FLUSH_ERR = 0x05, + MLX5_CQE_SYNDROME_MW_BIND_ERR = 0x06, + MLX5_CQE_SYNDROME_BAD_RESP_ERR = 0x10, + MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR = 0x11, + MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12, + MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR = 0x13, + MLX5_CQE_SYNDROME_REMOTE_OP_ERR = 0x14, + MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR = 0x15, + MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR = 0x16, + MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22, +}; + +enum { + MLX5_CQE_OWNER_MASK = 1, + MLX5_CQE_REQ = 0, + MLX5_CQE_RESP_WR_IMM = 1, + MLX5_CQE_RESP_SEND = 2, + MLX5_CQE_RESP_SEND_IMM = 3, + MLX5_CQE_RESP_SEND_INV = 4, + MLX5_CQE_RESIZE_CQ = 5, + MLX5_CQE_SIG_ERR = 12, + MLX5_CQE_REQ_ERR = 13, + MLX5_CQE_RESP_ERR = 14, + MLX5_CQE_INVALID = 15, +}; + +enum { + MLX5_CQ_MODIFY_PERIOD = 1 << 0, + MLX5_CQ_MODIFY_COUNT = 1 << 1, + MLX5_CQ_MODIFY_OVERRUN = 1 << 2, +}; + +enum { + MLX5_CQ_OPMOD_RESIZE = 1, + MLX5_MODIFY_CQ_MASK_LOG_SIZE = 1 << 0, + MLX5_MODIFY_CQ_MASK_PG_OFFSET = 1 << 1, + MLX5_MODIFY_CQ_MASK_PG_SIZE = 1 << 2, +}; + +struct mlx5_cq_modify_params { + int type; + union { + struct { + u32 page_offset; + u8 log_cq_size; + } resize; + + struct { + } moder; + + struct { + } mapping; + } params; +}; + +enum { + CQE_SIZE_64 = 0, + CQE_SIZE_128 = 1, + CQE_SIZE_128_PAD = 2, +}; + +#define MLX5_MAX_CQ_PERIOD (BIT(__mlx5_bit_sz(cqc, cq_period)) - 1) +#define MLX5_MAX_CQ_COUNT (BIT(__mlx5_bit_sz(cqc, cq_max_count)) - 1) + +static inline int cqe_sz_to_mlx_sz(u8 size, int padding_128_en) +{ + return padding_128_en ? CQE_SIZE_128_PAD : + size == 64 ? CQE_SIZE_64 : CQE_SIZE_128; +} + +static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq) +{ + *cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff); +} + +enum { + MLX5_CQ_DB_REQ_NOT_SOL = 1 << 24, + MLX5_CQ_DB_REQ_NOT = 0 << 24 +}; + +static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd, + void __iomem *uar_page, + u32 cons_index) +{ + __be32 doorbell[2]; + u32 sn; + u32 ci; + + sn = cq->arm_sn & 3; + ci = cons_index & 0xffffff; + + *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci); + + /* Make sure that the doorbell record in host memory is + * written before ringing the doorbell via PCI MMIO. + */ + wmb(); + + doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci); + doorbell[1] = cpu_to_be32(cq->cqn); + + mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, NULL); +} + +static inline void mlx5_cq_hold(struct mlx5_core_cq *cq) +{ + refcount_inc(&cq->refcount); +} + +static inline void mlx5_cq_put(struct mlx5_core_cq *cq) +{ + if (refcount_dec_and_test(&cq->refcount)) + complete(&cq->free); +} + +int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + u32 *in, int inlen); +int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); +int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + u32 *out, int outlen); +int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + u32 *in, int inlen); +int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, + struct mlx5_core_cq *cq, u16 cq_period, + u16 cq_max_count); +static inline void mlx5_dump_err_cqe(struct mlx5_core_dev *dev, + struct mlx5_err_cqe *err_cqe) +{ + print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe, + sizeof(*err_cqe), false); +} +int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); +void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); + +#endif /* MLX5_CORE_CQ_H */ diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h new file mode 100644 index 000000000..11fa4e66a --- /dev/null +++ b/include/linux/mlx5/device.h @@ -0,0 +1,1262 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_DEVICE_H +#define MLX5_DEVICE_H + +#include +#include +#include + +#if defined(__LITTLE_ENDIAN) +#define MLX5_SET_HOST_ENDIANNESS 0 +#elif defined(__BIG_ENDIAN) +#define MLX5_SET_HOST_ENDIANNESS 0x80 +#else +#error Host endianness not defined +#endif + +/* helper macros */ +#define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0) +#define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld) +#define __mlx5_bit_off(typ, fld) (offsetof(struct mlx5_ifc_##typ##_bits, fld)) +#define __mlx5_16_off(typ, fld) (__mlx5_bit_off(typ, fld) / 16) +#define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32) +#define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64) +#define __mlx5_16_bit_off(typ, fld) (16 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0xf)) +#define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f)) +#define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1)) +#define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld)) +#define __mlx5_mask16(typ, fld) ((u16)((1ull << __mlx5_bit_sz(typ, fld)) - 1)) +#define __mlx5_16_mask(typ, fld) (__mlx5_mask16(typ, fld) << __mlx5_16_bit_off(typ, fld)) +#define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits) + +#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8) +#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8) +#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32) +#define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64) +#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8) +#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32) +#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8) +#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld)) + +/* insert a value to a struct */ +#define MLX5_SET(typ, p, fld, v) do { \ + u32 _v = v; \ + BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ + *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ + cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \ + (~__mlx5_dw_mask(typ, fld))) | (((_v) & __mlx5_mask(typ, fld)) \ + << __mlx5_dw_bit_off(typ, fld))); \ +} while (0) + +#define MLX5_ARRAY_SET(typ, p, fld, idx, v) do { \ + BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 32); \ + MLX5_SET(typ, p, fld[idx], v); \ +} while (0) + +#define MLX5_SET_TO_ONES(typ, p, fld) do { \ + BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ + *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ + cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \ + (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \ + << __mlx5_dw_bit_off(typ, fld))); \ +} while (0) + +#define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\ +__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \ +__mlx5_mask(typ, fld)) + +#define MLX5_GET_PR(typ, p, fld) ({ \ + u32 ___t = MLX5_GET(typ, p, fld); \ + pr_debug(#fld " = 0x%x\n", ___t); \ + ___t; \ +}) + +#define __MLX5_SET64(typ, p, fld, v) do { \ + BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \ + *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \ +} while (0) + +#define MLX5_SET64(typ, p, fld, v) do { \ + BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \ + __MLX5_SET64(typ, p, fld, v); \ +} while (0) + +#define MLX5_ARRAY_SET64(typ, p, fld, idx, v) do { \ + BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \ + __MLX5_SET64(typ, p, fld[idx], v); \ +} while (0) + +#define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld))) + +#define MLX5_GET64_PR(typ, p, fld) ({ \ + u64 ___t = MLX5_GET64(typ, p, fld); \ + pr_debug(#fld " = 0x%llx\n", ___t); \ + ___t; \ +}) + +#define MLX5_GET16(typ, p, fld) ((be16_to_cpu(*((__be16 *)(p) +\ +__mlx5_16_off(typ, fld))) >> __mlx5_16_bit_off(typ, fld)) & \ +__mlx5_mask16(typ, fld)) + +#define MLX5_SET16(typ, p, fld, v) do { \ + u16 _v = v; \ + BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 16); \ + *((__be16 *)(p) + __mlx5_16_off(typ, fld)) = \ + cpu_to_be16((be16_to_cpu(*((__be16 *)(p) + __mlx5_16_off(typ, fld))) & \ + (~__mlx5_16_mask(typ, fld))) | (((_v) & __mlx5_mask16(typ, fld)) \ + << __mlx5_16_bit_off(typ, fld))); \ +} while (0) + +/* Big endian getters */ +#define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\ + __mlx5_64_off(typ, fld))) + +#define MLX5_GET_BE(type_t, typ, p, fld) ({ \ + type_t tmp; \ + switch (sizeof(tmp)) { \ + case sizeof(u8): \ + tmp = (__force type_t)MLX5_GET(typ, p, fld); \ + break; \ + case sizeof(u16): \ + tmp = (__force type_t)cpu_to_be16(MLX5_GET(typ, p, fld)); \ + break; \ + case sizeof(u32): \ + tmp = (__force type_t)cpu_to_be32(MLX5_GET(typ, p, fld)); \ + break; \ + case sizeof(u64): \ + tmp = (__force type_t)MLX5_GET64_BE(typ, p, fld); \ + break; \ + } \ + tmp; \ + }) + +enum mlx5_inline_modes { + MLX5_INLINE_MODE_NONE, + MLX5_INLINE_MODE_L2, + MLX5_INLINE_MODE_IP, + MLX5_INLINE_MODE_TCP_UDP, +}; + +enum { + MLX5_MAX_COMMANDS = 32, + MLX5_CMD_DATA_BLOCK_SIZE = 512, + MLX5_PCI_CMD_XPORT = 7, + MLX5_MKEY_BSF_OCTO_SIZE = 4, + MLX5_MAX_PSVS = 4, +}; + +enum { + MLX5_EXTENDED_UD_AV = 0x80000000, +}; + +enum { + MLX5_CQ_STATE_ARMED = 9, + MLX5_CQ_STATE_ALWAYS_ARMED = 0xb, + MLX5_CQ_STATE_FIRED = 0xa, +}; + +enum { + MLX5_STAT_RATE_OFFSET = 5, +}; + +enum { + MLX5_INLINE_SEG = 0x80000000, +}; + +enum { + MLX5_HW_START_PADDING = MLX5_INLINE_SEG, +}; + +enum { + MLX5_MIN_PKEY_TABLE_SIZE = 128, + MLX5_MAX_LOG_PKEY_TABLE = 5, +}; + +enum { + MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31 +}; + +enum { + MLX5_PFAULT_SUBTYPE_WQE = 0, + MLX5_PFAULT_SUBTYPE_RDMA = 1, +}; + +enum { + MLX5_PERM_LOCAL_READ = 1 << 2, + MLX5_PERM_LOCAL_WRITE = 1 << 3, + MLX5_PERM_REMOTE_READ = 1 << 4, + MLX5_PERM_REMOTE_WRITE = 1 << 5, + MLX5_PERM_ATOMIC = 1 << 6, + MLX5_PERM_UMR_EN = 1 << 7, +}; + +enum { + MLX5_PCIE_CTRL_SMALL_FENCE = 1 << 0, + MLX5_PCIE_CTRL_RELAXED_ORDERING = 1 << 2, + MLX5_PCIE_CTRL_NO_SNOOP = 1 << 3, + MLX5_PCIE_CTRL_TLP_PROCE_EN = 1 << 6, + MLX5_PCIE_CTRL_TPH_MASK = 3 << 4, +}; + +enum { + MLX5_EN_RD = (u64)1, + MLX5_EN_WR = (u64)2 +}; + +enum { + MLX5_ADAPTER_PAGE_SHIFT = 12, + MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT, +}; + +enum { + MLX5_BFREGS_PER_UAR = 4, + MLX5_MAX_UARS = 1 << 8, + MLX5_NON_FP_BFREGS_PER_UAR = 2, + MLX5_FP_BFREGS_PER_UAR = MLX5_BFREGS_PER_UAR - + MLX5_NON_FP_BFREGS_PER_UAR, + MLX5_MAX_BFREGS = MLX5_MAX_UARS * + MLX5_NON_FP_BFREGS_PER_UAR, + MLX5_UARS_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE, + MLX5_NON_FP_BFREGS_IN_PAGE = MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE, + MLX5_MIN_DYN_BFREGS = 512, + MLX5_MAX_DYN_BFREGS = 1024, +}; + +enum { + MLX5_MKEY_MASK_LEN = 1ull << 0, + MLX5_MKEY_MASK_PAGE_SIZE = 1ull << 1, + MLX5_MKEY_MASK_START_ADDR = 1ull << 6, + MLX5_MKEY_MASK_PD = 1ull << 7, + MLX5_MKEY_MASK_EN_RINVAL = 1ull << 8, + MLX5_MKEY_MASK_EN_SIGERR = 1ull << 9, + MLX5_MKEY_MASK_BSF_EN = 1ull << 12, + MLX5_MKEY_MASK_KEY = 1ull << 13, + MLX5_MKEY_MASK_QPN = 1ull << 14, + MLX5_MKEY_MASK_LR = 1ull << 17, + MLX5_MKEY_MASK_LW = 1ull << 18, + MLX5_MKEY_MASK_RR = 1ull << 19, + MLX5_MKEY_MASK_RW = 1ull << 20, + MLX5_MKEY_MASK_A = 1ull << 21, + MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23, + MLX5_MKEY_MASK_FREE = 1ull << 29, +}; + +enum { + MLX5_UMR_TRANSLATION_OFFSET_EN = (1 << 4), + + MLX5_UMR_CHECK_NOT_FREE = (1 << 5), + MLX5_UMR_CHECK_FREE = (2 << 5), + + MLX5_UMR_INLINE = (1 << 7), +}; + +#define MLX5_UMR_MTT_ALIGNMENT 0x40 +#define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1) +#define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT + +#define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8) + +enum { + MLX5_EVENT_QUEUE_TYPE_QP = 0, + MLX5_EVENT_QUEUE_TYPE_RQ = 1, + MLX5_EVENT_QUEUE_TYPE_SQ = 2, + MLX5_EVENT_QUEUE_TYPE_DCT = 6, +}; + +enum mlx5_event { + MLX5_EVENT_TYPE_COMP = 0x0, + + MLX5_EVENT_TYPE_PATH_MIG = 0x01, + MLX5_EVENT_TYPE_COMM_EST = 0x02, + MLX5_EVENT_TYPE_SQ_DRAINED = 0x03, + MLX5_EVENT_TYPE_SRQ_LAST_WQE = 0x13, + MLX5_EVENT_TYPE_SRQ_RQ_LIMIT = 0x14, + + MLX5_EVENT_TYPE_CQ_ERROR = 0x04, + MLX5_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, + MLX5_EVENT_TYPE_PATH_MIG_FAILED = 0x07, + MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10, + MLX5_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11, + MLX5_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12, + + MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08, + MLX5_EVENT_TYPE_PORT_CHANGE = 0x09, + MLX5_EVENT_TYPE_GPIO_EVENT = 0x15, + MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16, + MLX5_EVENT_TYPE_TEMP_WARN_EVENT = 0x17, + MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19, + MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22, + MLX5_EVENT_TYPE_PPS_EVENT = 0x25, + + MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a, + MLX5_EVENT_TYPE_STALL_EVENT = 0x1b, + + MLX5_EVENT_TYPE_CMD = 0x0a, + MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb, + + MLX5_EVENT_TYPE_PAGE_FAULT = 0xc, + MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd, + + MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c, + + MLX5_EVENT_TYPE_FPGA_ERROR = 0x20, + MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21, + + MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26, +}; + +enum { + MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0, + MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1, +}; + +enum { + MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1, +}; + +enum { + MLX5_PORT_CHANGE_SUBTYPE_DOWN = 1, + MLX5_PORT_CHANGE_SUBTYPE_ACTIVE = 4, + MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED = 5, + MLX5_PORT_CHANGE_SUBTYPE_LID = 6, + MLX5_PORT_CHANGE_SUBTYPE_PKEY = 7, + MLX5_PORT_CHANGE_SUBTYPE_GUID = 8, + MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG = 9, +}; + +enum { + MLX5_DEV_CAP_FLAG_XRC = 1LL << 3, + MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, + MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, + MLX5_DEV_CAP_FLAG_APM = 1LL << 17, + MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18, + MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23, + MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24, + MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, + MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30, + MLX5_DEV_CAP_FLAG_DCT = 1LL << 37, + MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, + MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46, +}; + +enum { + MLX5_ROCE_VERSION_1 = 0, + MLX5_ROCE_VERSION_2 = 2, +}; + +enum { + MLX5_ROCE_VERSION_1_CAP = 1 << MLX5_ROCE_VERSION_1, + MLX5_ROCE_VERSION_2_CAP = 1 << MLX5_ROCE_VERSION_2, +}; + +enum { + MLX5_ROCE_L3_TYPE_IPV4 = 0, + MLX5_ROCE_L3_TYPE_IPV6 = 1, +}; + +enum { + MLX5_ROCE_L3_TYPE_IPV4_CAP = 1 << 1, + MLX5_ROCE_L3_TYPE_IPV6_CAP = 1 << 2, +}; + +enum { + MLX5_OPCODE_NOP = 0x00, + MLX5_OPCODE_SEND_INVAL = 0x01, + MLX5_OPCODE_RDMA_WRITE = 0x08, + MLX5_OPCODE_RDMA_WRITE_IMM = 0x09, + MLX5_OPCODE_SEND = 0x0a, + MLX5_OPCODE_SEND_IMM = 0x0b, + MLX5_OPCODE_LSO = 0x0e, + MLX5_OPCODE_RDMA_READ = 0x10, + MLX5_OPCODE_ATOMIC_CS = 0x11, + MLX5_OPCODE_ATOMIC_FA = 0x12, + MLX5_OPCODE_ATOMIC_MASKED_CS = 0x14, + MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15, + MLX5_OPCODE_BIND_MW = 0x18, + MLX5_OPCODE_CONFIG_CMD = 0x1f, + + MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00, + MLX5_RECV_OPCODE_SEND = 0x01, + MLX5_RECV_OPCODE_SEND_IMM = 0x02, + MLX5_RECV_OPCODE_SEND_INVAL = 0x03, + + MLX5_CQE_OPCODE_ERROR = 0x1e, + MLX5_CQE_OPCODE_RESIZE = 0x16, + + MLX5_OPCODE_SET_PSV = 0x20, + MLX5_OPCODE_GET_PSV = 0x21, + MLX5_OPCODE_CHECK_PSV = 0x22, + MLX5_OPCODE_RGET_PSV = 0x26, + MLX5_OPCODE_RCHECK_PSV = 0x27, + + MLX5_OPCODE_UMR = 0x25, + +}; + +enum { + MLX5_SET_PORT_RESET_QKEY = 0, + MLX5_SET_PORT_GUID0 = 16, + MLX5_SET_PORT_NODE_GUID = 17, + MLX5_SET_PORT_SYS_GUID = 18, + MLX5_SET_PORT_GID_TABLE = 19, + MLX5_SET_PORT_PKEY_TABLE = 20, +}; + +enum { + MLX5_BW_NO_LIMIT = 0, + MLX5_100_MBPS_UNIT = 3, + MLX5_GBPS_UNIT = 4, +}; + +enum { + MLX5_MAX_PAGE_SHIFT = 31 +}; + +enum { + MLX5_CAP_OFF_CMDIF_CSUM = 46, +}; + +enum { + /* + * Max wqe size for rdma read is 512 bytes, so this + * limits our max_sge_rd as the wqe needs to fit: + * - ctrl segment (16 bytes) + * - rdma segment (16 bytes) + * - scatter elements (16 bytes each) + */ + MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16 +}; + +enum mlx5_odp_transport_cap_bits { + MLX5_ODP_SUPPORT_SEND = 1 << 31, + MLX5_ODP_SUPPORT_RECV = 1 << 30, + MLX5_ODP_SUPPORT_WRITE = 1 << 29, + MLX5_ODP_SUPPORT_READ = 1 << 28, +}; + +struct mlx5_odp_caps { + char reserved[0x10]; + struct { + __be32 rc_odp_caps; + __be32 uc_odp_caps; + __be32 ud_odp_caps; + } per_transport_caps; + char reserved2[0xe4]; +}; + +struct mlx5_cmd_layout { + u8 type; + u8 rsvd0[3]; + __be32 inlen; + __be64 in_ptr; + __be32 in[4]; + __be32 out[4]; + __be64 out_ptr; + __be32 outlen; + u8 token; + u8 sig; + u8 rsvd1; + u8 status_own; +}; + +struct health_buffer { + __be32 assert_var[5]; + __be32 rsvd0[3]; + __be32 assert_exit_ptr; + __be32 assert_callra; + __be32 rsvd1[2]; + __be32 fw_ver; + __be32 hw_id; + __be32 rsvd2; + u8 irisc_index; + u8 synd; + __be16 ext_synd; +}; + +struct mlx5_init_seg { + __be32 fw_rev; + __be32 cmdif_rev_fw_sub; + __be32 rsvd0[2]; + __be32 cmdq_addr_h; + __be32 cmdq_addr_l_sz; + __be32 cmd_dbell; + __be32 rsvd1[120]; + __be32 initializing; + struct health_buffer health; + __be32 rsvd2[880]; + __be32 internal_timer_h; + __be32 internal_timer_l; + __be32 rsvd3[2]; + __be32 health_counter; + __be32 rsvd4[1019]; + __be64 ieee1588_clk; + __be32 ieee1588_clk_type; + __be32 clr_intx; +}; + +struct mlx5_eqe_comp { + __be32 reserved[6]; + __be32 cqn; +}; + +struct mlx5_eqe_qp_srq { + __be32 reserved1[5]; + u8 type; + u8 reserved2[3]; + __be32 qp_srq_n; +}; + +struct mlx5_eqe_cq_err { + __be32 cqn; + u8 reserved1[7]; + u8 syndrome; +}; + +struct mlx5_eqe_port_state { + u8 reserved0[8]; + u8 port; +}; + +struct mlx5_eqe_gpio { + __be32 reserved0[2]; + __be64 gpio_event; +}; + +struct mlx5_eqe_congestion { + u8 type; + u8 rsvd0; + u8 congestion_level; +}; + +struct mlx5_eqe_stall_vl { + u8 rsvd0[3]; + u8 port_vl; +}; + +struct mlx5_eqe_cmd { + __be32 vector; + __be32 rsvd[6]; +}; + +struct mlx5_eqe_page_req { + u8 rsvd0[2]; + __be16 func_id; + __be32 num_pages; + __be32 rsvd1[5]; +}; + +struct mlx5_eqe_page_fault { + __be32 bytes_committed; + union { + struct { + u16 reserved1; + __be16 wqe_index; + u16 reserved2; + __be16 packet_length; + __be32 token; + u8 reserved4[8]; + __be32 pftype_wq; + } __packed wqe; + struct { + __be32 r_key; + u16 reserved1; + __be16 packet_length; + __be32 rdma_op_len; + __be64 rdma_va; + __be32 pftype_token; + } __packed rdma; + } __packed; +} __packed; + +struct mlx5_eqe_vport_change { + u8 rsvd0[2]; + __be16 vport_num; + __be32 rsvd1[6]; +} __packed; + +struct mlx5_eqe_port_module { + u8 reserved_at_0[1]; + u8 module; + u8 reserved_at_2[1]; + u8 module_status; + u8 reserved_at_4[2]; + u8 error_type; +} __packed; + +struct mlx5_eqe_pps { + u8 rsvd0[3]; + u8 pin; + u8 rsvd1[4]; + union { + struct { + __be32 time_sec; + __be32 time_nsec; + }; + struct { + __be64 time_stamp; + }; + }; + u8 rsvd2[12]; +} __packed; + +struct mlx5_eqe_dct { + __be32 reserved[6]; + __be32 dctn; +}; + +struct mlx5_eqe_temp_warning { + __be64 sensor_warning_msb; + __be64 sensor_warning_lsb; +} __packed; + +union ev_data { + __be32 raw[7]; + struct mlx5_eqe_cmd cmd; + struct mlx5_eqe_comp comp; + struct mlx5_eqe_qp_srq qp_srq; + struct mlx5_eqe_cq_err cq_err; + struct mlx5_eqe_port_state port; + struct mlx5_eqe_gpio gpio; + struct mlx5_eqe_congestion cong; + struct mlx5_eqe_stall_vl stall_vl; + struct mlx5_eqe_page_req req_pages; + struct mlx5_eqe_page_fault page_fault; + struct mlx5_eqe_vport_change vport_change; + struct mlx5_eqe_port_module port_module; + struct mlx5_eqe_pps pps; + struct mlx5_eqe_dct dct; + struct mlx5_eqe_temp_warning temp_warning; +} __packed; + +struct mlx5_eqe { + u8 rsvd0; + u8 type; + u8 rsvd1; + u8 sub_type; + __be32 rsvd2[7]; + union ev_data data; + __be16 rsvd3; + u8 signature; + u8 owner; +} __packed; + +struct mlx5_cmd_prot_block { + u8 data[MLX5_CMD_DATA_BLOCK_SIZE]; + u8 rsvd0[48]; + __be64 next; + __be32 block_num; + u8 rsvd1; + u8 token; + u8 ctrl_sig; + u8 sig; +}; + +enum { + MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5, +}; + +struct mlx5_err_cqe { + u8 rsvd0[32]; + __be32 srqn; + u8 rsvd1[18]; + u8 vendor_err_synd; + u8 syndrome; + __be32 s_wqe_opcode_qpn; + __be16 wqe_counter; + u8 signature; + u8 op_own; +}; + +struct mlx5_cqe64 { + u8 outer_l3_tunneled; + u8 rsvd0; + __be16 wqe_id; + u8 lro_tcppsh_abort_dupack; + u8 lro_min_ttl; + __be16 lro_tcp_win; + __be32 lro_ack_seq_num; + __be32 rss_hash_result; + u8 rss_hash_type; + u8 ml_path; + u8 rsvd20[2]; + __be16 check_sum; + __be16 slid; + __be32 flags_rqpn; + u8 hds_ip_ext; + u8 l4_l3_hdr_type; + __be16 vlan_info; + __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */ + __be32 imm_inval_pkey; + u8 rsvd40[4]; + __be32 byte_cnt; + __be32 timestamp_h; + __be32 timestamp_l; + __be32 sop_drop_qpn; + __be16 wqe_counter; + u8 signature; + u8 op_own; +}; + +struct mlx5_mini_cqe8 { + union { + __be32 rx_hash_result; + struct { + __be16 checksum; + __be16 rsvd; + }; + struct { + __be16 wqe_counter; + u8 s_wqe_opcode; + u8 reserved; + } s_wqe_info; + }; + __be32 byte_cnt; +}; + +enum { + MLX5_NO_INLINE_DATA, + MLX5_INLINE_DATA32_SEG, + MLX5_INLINE_DATA64_SEG, + MLX5_COMPRESSED, +}; + +enum { + MLX5_CQE_FORMAT_CSUM = 0x1, +}; + +#define MLX5_MINI_CQE_ARRAY_SIZE 8 + +static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe) +{ + return (cqe->op_own >> 2) & 0x3; +} + +static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) +{ + return (cqe->lro_tcppsh_abort_dupack >> 6) & 1; +} + +static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe) +{ + return (cqe->l4_l3_hdr_type >> 4) & 0x7; +} + +static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe) +{ + return (cqe->l4_l3_hdr_type >> 2) & 0x3; +} + +static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe) +{ + return cqe->outer_l3_tunneled & 0x1; +} + +static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe) +{ + return cqe->l4_l3_hdr_type & 0x1; +} + +static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe) +{ + u32 hi, lo; + + hi = be32_to_cpu(cqe->timestamp_h); + lo = be32_to_cpu(cqe->timestamp_l); + + return (u64)lo | ((u64)hi << 32); +} + +#define MLX5_MPWQE_LOG_NUM_STRIDES_BASE (9) +#define MLX5_MPWQE_LOG_STRIDE_SZ_BASE (6) + +struct mpwrq_cqe_bc { + __be16 filler_consumed_strides; + __be16 byte_cnt; +}; + +static inline u16 mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64 *cqe) +{ + struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt; + + return be16_to_cpu(bc->byte_cnt); +} + +static inline u16 mpwrq_get_cqe_bc_consumed_strides(struct mpwrq_cqe_bc *bc) +{ + return 0x7fff & be16_to_cpu(bc->filler_consumed_strides); +} + +static inline u16 mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64 *cqe) +{ + struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt; + + return mpwrq_get_cqe_bc_consumed_strides(bc); +} + +static inline bool mpwrq_is_filler_cqe(struct mlx5_cqe64 *cqe) +{ + struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt; + + return 0x8000 & be16_to_cpu(bc->filler_consumed_strides); +} + +static inline u16 mpwrq_get_cqe_stride_index(struct mlx5_cqe64 *cqe) +{ + return be16_to_cpu(cqe->wqe_counter); +} + +enum { + CQE_L4_HDR_TYPE_NONE = 0x0, + CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1, + CQE_L4_HDR_TYPE_UDP = 0x2, + CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA = 0x3, + CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA = 0x4, +}; + +enum { + CQE_RSS_HTYPE_IP = 0x3 << 2, + /* cqe->rss_hash_type[3:2] - IP destination selected for hash + * (00 = none, 01 = IPv4, 10 = IPv6, 11 = Reserved) + */ + CQE_RSS_HTYPE_L4 = 0x3 << 6, + /* cqe->rss_hash_type[7:6] - L4 destination selected for hash + * (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI + */ +}; + +enum { + MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH = 0x0, + MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6 = 0x1, + MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4 = 0x2, +}; + +enum { + CQE_L2_OK = 1 << 0, + CQE_L3_OK = 1 << 1, + CQE_L4_OK = 1 << 2, +}; + +struct mlx5_sig_err_cqe { + u8 rsvd0[16]; + __be32 expected_trans_sig; + __be32 actual_trans_sig; + __be32 expected_reftag; + __be32 actual_reftag; + __be16 syndrome; + u8 rsvd22[2]; + __be32 mkey; + __be64 err_offset; + u8 rsvd30[8]; + __be32 qpn; + u8 rsvd38[2]; + u8 signature; + u8 op_own; +}; + +struct mlx5_wqe_srq_next_seg { + u8 rsvd0[2]; + __be16 next_wqe_index; + u8 signature; + u8 rsvd1[11]; +}; + +union mlx5_ext_cqe { + struct ib_grh grh; + u8 inl[64]; +}; + +struct mlx5_cqe128 { + union mlx5_ext_cqe inl_grh; + struct mlx5_cqe64 cqe64; +}; + +enum { + MLX5_MKEY_STATUS_FREE = 1 << 6, +}; + +enum { + MLX5_MKEY_REMOTE_INVAL = 1 << 24, + MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29, + MLX5_MKEY_BSF_EN = 1 << 30, + MLX5_MKEY_LEN64 = 1 << 31, +}; + +struct mlx5_mkey_seg { + /* This is a two bit field occupying bits 31-30. + * bit 31 is always 0, + * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation + */ + u8 status; + u8 pcie_control; + u8 flags; + u8 version; + __be32 qpn_mkey7_0; + u8 rsvd1[4]; + __be32 flags_pd; + __be64 start_addr; + __be64 len; + __be32 bsfs_octo_size; + u8 rsvd2[16]; + __be32 xlt_oct_size; + u8 rsvd3[3]; + u8 log2_page_size; + u8 rsvd4[4]; +}; + +#define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) + +enum { + MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0 +}; + +enum { + VPORT_STATE_DOWN = 0x0, + VPORT_STATE_UP = 0x1, +}; + +enum { + MLX5_VPORT_ADMIN_STATE_DOWN = 0x0, + MLX5_VPORT_ADMIN_STATE_UP = 0x1, + MLX5_VPORT_ADMIN_STATE_AUTO = 0x2, +}; + +enum { + MLX5_L3_PROT_TYPE_IPV4 = 0, + MLX5_L3_PROT_TYPE_IPV6 = 1, +}; + +enum { + MLX5_L4_PROT_TYPE_TCP = 0, + MLX5_L4_PROT_TYPE_UDP = 1, +}; + +enum { + MLX5_HASH_FIELD_SEL_SRC_IP = 1 << 0, + MLX5_HASH_FIELD_SEL_DST_IP = 1 << 1, + MLX5_HASH_FIELD_SEL_L4_SPORT = 1 << 2, + MLX5_HASH_FIELD_SEL_L4_DPORT = 1 << 3, + MLX5_HASH_FIELD_SEL_IPSEC_SPI = 1 << 4, +}; + +enum { + MLX5_MATCH_OUTER_HEADERS = 1 << 0, + MLX5_MATCH_MISC_PARAMETERS = 1 << 1, + MLX5_MATCH_INNER_HEADERS = 1 << 2, + +}; + +enum { + MLX5_FLOW_TABLE_TYPE_NIC_RCV = 0, + MLX5_FLOW_TABLE_TYPE_ESWITCH = 4, +}; + +enum { + MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT = 0, + MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE = 1, + MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 2, +}; + +enum mlx5_list_type { + MLX5_NVPRT_LIST_TYPE_UC = 0x0, + MLX5_NVPRT_LIST_TYPE_MC = 0x1, + MLX5_NVPRT_LIST_TYPE_VLAN = 0x2, +}; + +enum { + MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0, + MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1, +}; + +enum mlx5_wol_mode { + MLX5_WOL_DISABLE = 0, + MLX5_WOL_SECURED_MAGIC = 1 << 1, + MLX5_WOL_MAGIC = 1 << 2, + MLX5_WOL_ARP = 1 << 3, + MLX5_WOL_BROADCAST = 1 << 4, + MLX5_WOL_MULTICAST = 1 << 5, + MLX5_WOL_UNICAST = 1 << 6, + MLX5_WOL_PHY_ACTIVITY = 1 << 7, +}; + +enum mlx5_mpls_supported_fields { + MLX5_FIELD_SUPPORT_MPLS_LABEL = 1 << 0, + MLX5_FIELD_SUPPORT_MPLS_EXP = 1 << 1, + MLX5_FIELD_SUPPORT_MPLS_S_BOS = 1 << 2, + MLX5_FIELD_SUPPORT_MPLS_TTL = 1 << 3 +}; + +enum mlx5_flex_parser_protos { + MLX5_FLEX_PROTO_CW_MPLS_GRE = 1 << 4, + MLX5_FLEX_PROTO_CW_MPLS_UDP = 1 << 5, +}; + +/* MLX5 DEV CAPs */ + +/* TODO: EAT.ME */ +enum mlx5_cap_mode { + HCA_CAP_OPMOD_GET_MAX = 0, + HCA_CAP_OPMOD_GET_CUR = 1, +}; + +enum mlx5_cap_type { + MLX5_CAP_GENERAL = 0, + MLX5_CAP_ETHERNET_OFFLOADS, + MLX5_CAP_ODP, + MLX5_CAP_ATOMIC, + MLX5_CAP_ROCE, + MLX5_CAP_IPOIB_OFFLOADS, + MLX5_CAP_IPOIB_ENHANCED_OFFLOADS, + MLX5_CAP_FLOW_TABLE, + MLX5_CAP_ESWITCH_FLOW_TABLE, + MLX5_CAP_ESWITCH, + MLX5_CAP_RESERVED, + MLX5_CAP_VECTOR_CALC, + MLX5_CAP_QOS, + MLX5_CAP_DEBUG, + MLX5_CAP_RESERVED_14, + MLX5_CAP_DEV_MEM, + /* NUM OF CAP Types */ + MLX5_CAP_NUM +}; + +enum mlx5_pcam_reg_groups { + MLX5_PCAM_REGS_5000_TO_507F = 0x0, +}; + +enum mlx5_pcam_feature_groups { + MLX5_PCAM_FEATURE_ENHANCED_FEATURES = 0x0, +}; + +enum mlx5_mcam_reg_groups { + MLX5_MCAM_REGS_FIRST_128 = 0x0, +}; + +enum mlx5_mcam_feature_groups { + MLX5_MCAM_FEATURE_ENHANCED_FEATURES = 0x0, +}; + +enum mlx5_qcam_reg_groups { + MLX5_QCAM_REGS_FIRST_128 = 0x0, +}; + +enum mlx5_qcam_feature_groups { + MLX5_QCAM_FEATURE_ENHANCED_FEATURES = 0x0, +}; + +/* GET Dev Caps macros */ +#define MLX5_CAP_GEN(mdev, cap) \ + MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap) + +#define MLX5_CAP_GEN_64(mdev, cap) \ + MLX5_GET64(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap) + +#define MLX5_CAP_GEN_MAX(mdev, cap) \ + MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap) + +#define MLX5_CAP_ETH(mdev, cap) \ + MLX5_GET(per_protocol_networking_offload_caps,\ + mdev->caps.hca_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap) + +#define MLX5_CAP_ETH_MAX(mdev, cap) \ + MLX5_GET(per_protocol_networking_offload_caps,\ + mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap) + +#define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \ + MLX5_GET(per_protocol_networking_offload_caps,\ + mdev->caps.hca_cur[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS], cap) + +#define MLX5_CAP_ROCE(mdev, cap) \ + MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap) + +#define MLX5_CAP_ROCE_MAX(mdev, cap) \ + MLX5_GET(roce_cap, mdev->caps.hca_max[MLX5_CAP_ROCE], cap) + +#define MLX5_CAP_ATOMIC(mdev, cap) \ + MLX5_GET(atomic_caps, mdev->caps.hca_cur[MLX5_CAP_ATOMIC], cap) + +#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \ + MLX5_GET(atomic_caps, mdev->caps.hca_max[MLX5_CAP_ATOMIC], cap) + +#define MLX5_CAP_FLOWTABLE(mdev, cap) \ + MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap) + +#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ + MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap) + +#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \ + MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap) + +#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \ + MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap) + +#define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \ + MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap) + +#define MLX5_CAP_FLOWTABLE_SNIFFER_RX_MAX(mdev, cap) \ + MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_sniffer.cap) + +#define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \ + MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap) + +#define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \ + MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap) + +#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ + MLX5_GET(flow_table_eswitch_cap, \ + mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) + +#define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \ + MLX5_GET(flow_table_eswitch_cap, \ + mdev->caps.hca_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) + +#define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \ + MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap) + +#define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \ + MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap) + +#define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \ + MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap) + +#define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \ + MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap) + +#define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \ + MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap) + +#define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \ + MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap) + +#define MLX5_CAP_ESW(mdev, cap) \ + MLX5_GET(e_switch_cap, \ + mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap) + +#define MLX5_CAP_ESW_MAX(mdev, cap) \ + MLX5_GET(e_switch_cap, \ + mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap) + +#define MLX5_CAP_ODP(mdev, cap)\ + MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap) + +#define MLX5_CAP_VECTOR_CALC(mdev, cap) \ + MLX5_GET(vector_calc_cap, \ + mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap) + +#define MLX5_CAP_QOS(mdev, cap)\ + MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap) + +#define MLX5_CAP_DEBUG(mdev, cap)\ + MLX5_GET(debug_cap, mdev->caps.hca_cur[MLX5_CAP_DEBUG], cap) + +#define MLX5_CAP_PCAM_FEATURE(mdev, fld) \ + MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld) + +#define MLX5_CAP_PCAM_REG(mdev, reg) \ + MLX5_GET(pcam_reg, (mdev)->caps.pcam, port_access_reg_cap_mask.regs_5000_to_507f.reg) + +#define MLX5_CAP_MCAM_REG(mdev, reg) \ + MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_access_reg_cap_mask.access_regs.reg) + +#define MLX5_CAP_MCAM_FEATURE(mdev, fld) \ + MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld) + +#define MLX5_CAP_QCAM_REG(mdev, fld) \ + MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_access_reg_cap_mask.reg_cap.fld) + +#define MLX5_CAP_QCAM_FEATURE(mdev, fld) \ + MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_feature_cap_mask.feature_cap.fld) + +#define MLX5_CAP_FPGA(mdev, cap) \ + MLX5_GET(fpga_cap, (mdev)->caps.fpga, cap) + +#define MLX5_CAP64_FPGA(mdev, cap) \ + MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap) + +#define MLX5_CAP_DEV_MEM(mdev, cap)\ + MLX5_GET(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap) + +#define MLX5_CAP64_DEV_MEM(mdev, cap)\ + MLX5_GET64(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap) + +enum { + MLX5_CMD_STAT_OK = 0x0, + MLX5_CMD_STAT_INT_ERR = 0x1, + MLX5_CMD_STAT_BAD_OP_ERR = 0x2, + MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3, + MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4, + MLX5_CMD_STAT_BAD_RES_ERR = 0x5, + MLX5_CMD_STAT_RES_BUSY = 0x6, + MLX5_CMD_STAT_LIM_ERR = 0x8, + MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9, + MLX5_CMD_STAT_IX_ERR = 0xa, + MLX5_CMD_STAT_NO_RES_ERR = 0xf, + MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50, + MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51, + MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10, + MLX5_CMD_STAT_BAD_PKT_ERR = 0x30, + MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40, +}; + +enum { + MLX5_IEEE_802_3_COUNTERS_GROUP = 0x0, + MLX5_RFC_2863_COUNTERS_GROUP = 0x1, + MLX5_RFC_2819_COUNTERS_GROUP = 0x2, + MLX5_RFC_3635_COUNTERS_GROUP = 0x3, + MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5, + MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10, + MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11, + MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12, + MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16, + MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20, +}; + +enum { + MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0, +}; + +static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) +{ + if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE) + return 0; + return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz; +} + +#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16 +#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16 +#define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1 +#define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\ + MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\ + MLX5_BY_PASS_NUM_MULTICAST_PRIOS) + +#endif /* MLX5_DEVICE_H */ diff --git a/include/linux/mlx5/doorbell.h b/include/linux/mlx5/doorbell.h new file mode 100644 index 000000000..0787de28f --- /dev/null +++ b/include/linux/mlx5/doorbell.h @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_DOORBELL_H +#define MLX5_DOORBELL_H + +#define MLX5_BF_OFFSET 0x800 +#define MLX5_CQ_DOORBELL 0x20 + +#if BITS_PER_LONG == 64 +/* Assume that we can just write a 64-bit doorbell atomically. s390 + * actually doesn't have writeq() but S/390 systems don't even have + * PCI so we won't worry about it. + */ + +#define MLX5_DECLARE_DOORBELL_LOCK(name) +#define MLX5_INIT_DOORBELL_LOCK(ptr) do { } while (0) +#define MLX5_GET_DOORBELL_LOCK(ptr) (NULL) + +static inline void mlx5_write64(__be32 val[2], void __iomem *dest, + spinlock_t *doorbell_lock) +{ + __raw_writeq(*(u64 *)val, dest); +} + +#else + +/* Just fall back to a spinlock to protect the doorbell if + * BITS_PER_LONG is 32 -- there's no portable way to do atomic 64-bit + * MMIO writes. + */ + +#define MLX5_DECLARE_DOORBELL_LOCK(name) spinlock_t name; +#define MLX5_INIT_DOORBELL_LOCK(ptr) spin_lock_init(ptr) +#define MLX5_GET_DOORBELL_LOCK(ptr) (ptr) + +static inline void mlx5_write64(__be32 val[2], void __iomem *dest, + spinlock_t *doorbell_lock) +{ + unsigned long flags; + + if (doorbell_lock) + spin_lock_irqsave(doorbell_lock, flags); + __raw_writel((__force u32) val[0], dest); + __raw_writel((__force u32) val[1], dest + 4); + if (doorbell_lock) + spin_unlock_irqrestore(doorbell_lock, flags); +} + +#endif + +#endif /* MLX5_DOORBELL_H */ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h new file mode 100644 index 000000000..dc89a964c --- /dev/null +++ b/include/linux/mlx5/driver.h @@ -0,0 +1,1327 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_DRIVER_H +#define MLX5_DRIVER_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +enum { + MLX5_BOARD_ID_LEN = 64, + MLX5_MAX_NAME_LEN = 16, +}; + +enum { + /* one minute for the sake of bringup. Generally, commands must always + * complete and we may need to increase this timeout value + */ + MLX5_CMD_TIMEOUT_MSEC = 60 * 1000, + MLX5_CMD_WQ_MAX_NAME = 32, +}; + +enum { + CMD_OWNER_SW = 0x0, + CMD_OWNER_HW = 0x1, + CMD_STATUS_SUCCESS = 0, +}; + +enum mlx5_sqp_t { + MLX5_SQP_SMI = 0, + MLX5_SQP_GSI = 1, + MLX5_SQP_IEEE_1588 = 2, + MLX5_SQP_SNIFFER = 3, + MLX5_SQP_SYNC_UMR = 4, +}; + +enum { + MLX5_MAX_PORTS = 2, +}; + +enum { + MLX5_EQ_VEC_PAGES = 0, + MLX5_EQ_VEC_CMD = 1, + MLX5_EQ_VEC_ASYNC = 2, + MLX5_EQ_VEC_PFAULT = 3, + MLX5_EQ_VEC_COMP_BASE, +}; + +enum { + MLX5_MAX_IRQ_NAME = 32 +}; + +enum { + MLX5_ATOMIC_MODE_IB_COMP = 1 << 16, + MLX5_ATOMIC_MODE_CX = 2 << 16, + MLX5_ATOMIC_MODE_8B = 3 << 16, + MLX5_ATOMIC_MODE_16B = 4 << 16, + MLX5_ATOMIC_MODE_32B = 5 << 16, + MLX5_ATOMIC_MODE_64B = 6 << 16, + MLX5_ATOMIC_MODE_128B = 7 << 16, + MLX5_ATOMIC_MODE_256B = 8 << 16, +}; + +enum { + MLX5_REG_QPTS = 0x4002, + MLX5_REG_QETCR = 0x4005, + MLX5_REG_QTCT = 0x400a, + MLX5_REG_QPDPM = 0x4013, + MLX5_REG_QCAM = 0x4019, + MLX5_REG_DCBX_PARAM = 0x4020, + MLX5_REG_DCBX_APP = 0x4021, + MLX5_REG_FPGA_CAP = 0x4022, + MLX5_REG_FPGA_CTRL = 0x4023, + MLX5_REG_FPGA_ACCESS_REG = 0x4024, + MLX5_REG_PCAP = 0x5001, + MLX5_REG_PMTU = 0x5003, + MLX5_REG_PTYS = 0x5004, + MLX5_REG_PAOS = 0x5006, + MLX5_REG_PFCC = 0x5007, + MLX5_REG_PPCNT = 0x5008, + MLX5_REG_PPTB = 0x500b, + MLX5_REG_PBMC = 0x500c, + MLX5_REG_PMAOS = 0x5012, + MLX5_REG_PUDE = 0x5009, + MLX5_REG_PMPE = 0x5010, + MLX5_REG_PELC = 0x500e, + MLX5_REG_PVLC = 0x500f, + MLX5_REG_PCMR = 0x5041, + MLX5_REG_PMLP = 0x5002, + MLX5_REG_PCAM = 0x507f, + MLX5_REG_NODE_DESC = 0x6001, + MLX5_REG_HOST_ENDIANNESS = 0x7004, + MLX5_REG_MCIA = 0x9014, + MLX5_REG_MLCR = 0x902b, + MLX5_REG_MTRC_CAP = 0x9040, + MLX5_REG_MTRC_CONF = 0x9041, + MLX5_REG_MTRC_STDB = 0x9042, + MLX5_REG_MTRC_CTRL = 0x9043, + MLX5_REG_MPCNT = 0x9051, + MLX5_REG_MTPPS = 0x9053, + MLX5_REG_MTPPSE = 0x9054, + MLX5_REG_MPEGC = 0x9056, + MLX5_REG_MCQI = 0x9061, + MLX5_REG_MCC = 0x9062, + MLX5_REG_MCDA = 0x9063, + MLX5_REG_MCAM = 0x907f, +}; + +enum mlx5_qpts_trust_state { + MLX5_QPTS_TRUST_PCP = 1, + MLX5_QPTS_TRUST_DSCP = 2, +}; + +enum mlx5_dcbx_oper_mode { + MLX5E_DCBX_PARAM_VER_OPER_HOST = 0x0, + MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3, +}; + +enum mlx5_dct_atomic_mode { + MLX5_ATOMIC_MODE_DCT_CX = 2, +}; + +enum { + MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0, + MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1, +}; + +enum mlx5_page_fault_resume_flags { + MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0, + MLX5_PAGE_FAULT_RESUME_WRITE = 1 << 1, + MLX5_PAGE_FAULT_RESUME_RDMA = 1 << 2, + MLX5_PAGE_FAULT_RESUME_ERROR = 1 << 7, +}; + +enum dbg_rsc_type { + MLX5_DBG_RSC_QP, + MLX5_DBG_RSC_EQ, + MLX5_DBG_RSC_CQ, +}; + +enum port_state_policy { + MLX5_POLICY_DOWN = 0, + MLX5_POLICY_UP = 1, + MLX5_POLICY_FOLLOW = 2, + MLX5_POLICY_INVALID = 0xffffffff +}; + +struct mlx5_field_desc { + struct dentry *dent; + int i; +}; + +struct mlx5_rsc_debug { + struct mlx5_core_dev *dev; + void *object; + enum dbg_rsc_type type; + struct dentry *root; + struct mlx5_field_desc fields[0]; +}; + +enum mlx5_dev_event { + MLX5_DEV_EVENT_SYS_ERROR, + MLX5_DEV_EVENT_PORT_UP, + MLX5_DEV_EVENT_PORT_DOWN, + MLX5_DEV_EVENT_PORT_INITIALIZED, + MLX5_DEV_EVENT_LID_CHANGE, + MLX5_DEV_EVENT_PKEY_CHANGE, + MLX5_DEV_EVENT_GUID_CHANGE, + MLX5_DEV_EVENT_CLIENT_REREG, + MLX5_DEV_EVENT_PPS, + MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT, +}; + +enum mlx5_port_status { + MLX5_PORT_UP = 1, + MLX5_PORT_DOWN = 2, +}; + +enum mlx5_eq_type { + MLX5_EQ_TYPE_COMP, + MLX5_EQ_TYPE_ASYNC, +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + MLX5_EQ_TYPE_PF, +#endif +}; + +struct mlx5_bfreg_info { + u32 *sys_pages; + int num_low_latency_bfregs; + unsigned int *count; + + /* + * protect bfreg allocation data structs + */ + struct mutex lock; + u32 ver; + bool lib_uar_4k; + u32 num_sys_pages; + u32 num_static_sys_pages; + u32 total_num_bfregs; + u32 num_dyn_bfregs; +}; + +struct mlx5_cmd_first { + __be32 data[4]; +}; + +struct mlx5_cmd_msg { + struct list_head list; + struct cmd_msg_cache *parent; + u32 len; + struct mlx5_cmd_first first; + struct mlx5_cmd_mailbox *next; +}; + +struct mlx5_cmd_debug { + struct dentry *dbg_root; + struct dentry *dbg_in; + struct dentry *dbg_out; + struct dentry *dbg_outlen; + struct dentry *dbg_status; + struct dentry *dbg_run; + void *in_msg; + void *out_msg; + u8 status; + u16 inlen; + u16 outlen; +}; + +struct cmd_msg_cache { + /* protect block chain allocations + */ + spinlock_t lock; + struct list_head head; + unsigned int max_inbox_size; + unsigned int num_ent; +}; + +enum { + MLX5_NUM_COMMAND_CACHES = 5, +}; + +struct mlx5_cmd_stats { + u64 sum; + u64 n; + struct dentry *root; + struct dentry *avg; + struct dentry *count; + /* protect command average calculations */ + spinlock_t lock; +}; + +struct mlx5_cmd { + void *cmd_alloc_buf; + dma_addr_t alloc_dma; + int alloc_size; + void *cmd_buf; + dma_addr_t dma; + u16 cmdif_rev; + u8 log_sz; + u8 log_stride; + int max_reg_cmds; + int events; + u32 __iomem *vector; + + /* protect command queue allocations + */ + spinlock_t alloc_lock; + + /* protect token allocations + */ + spinlock_t token_lock; + u8 token; + unsigned long bitmask; + char wq_name[MLX5_CMD_WQ_MAX_NAME]; + struct workqueue_struct *wq; + struct semaphore sem; + struct semaphore pages_sem; + int mode; + struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS]; + struct dma_pool *pool; + struct mlx5_cmd_debug dbg; + struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES]; + int checksum_disabled; + struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX]; +}; + +struct mlx5_port_caps { + int gid_table_len; + int pkey_table_len; + u8 ext_port_cap; + bool has_smi; +}; + +struct mlx5_cmd_mailbox { + void *buf; + dma_addr_t dma; + struct mlx5_cmd_mailbox *next; +}; + +struct mlx5_buf_list { + void *buf; + dma_addr_t map; +}; + +struct mlx5_frag_buf { + struct mlx5_buf_list *frags; + int npages; + int size; + u8 page_shift; +}; + +struct mlx5_frag_buf_ctrl { + struct mlx5_frag_buf frag_buf; + u32 sz_m1; + u16 frag_sz_m1; + u16 strides_offset; + u8 log_sz; + u8 log_stride; + u8 log_frag_strides; +}; + +struct mlx5_eq_tasklet { + struct list_head list; + struct list_head process_list; + struct tasklet_struct task; + /* lock on completion tasklet list */ + spinlock_t lock; +}; + +struct mlx5_eq_pagefault { + struct work_struct work; + /* Pagefaults lock */ + spinlock_t lock; + struct workqueue_struct *wq; + mempool_t *pool; +}; + +struct mlx5_cq_table { + /* protect radix tree */ + spinlock_t lock; + struct radix_tree_root tree; +}; + +struct mlx5_eq { + struct mlx5_core_dev *dev; + struct mlx5_cq_table cq_table; + __be32 __iomem *doorbell; + u32 cons_index; + struct mlx5_frag_buf buf; + int size; + unsigned int irqn; + u8 eqn; + int nent; + u64 mask; + struct list_head list; + int index; + struct mlx5_rsc_debug *dbg; + enum mlx5_eq_type type; + union { + struct mlx5_eq_tasklet tasklet_ctx; +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + struct mlx5_eq_pagefault pf_ctx; +#endif + }; +}; + +struct mlx5_core_psv { + u32 psv_idx; + struct psv_layout { + u32 pd; + u16 syndrome; + u16 reserved; + u16 bg; + u16 app_tag; + u32 ref_tag; + } psv; +}; + +struct mlx5_core_sig_ctx { + struct mlx5_core_psv psv_memory; + struct mlx5_core_psv psv_wire; + struct ib_sig_err err_item; + bool sig_status_checked; + bool sig_err_exists; + u32 sigerr_count; +}; + +enum { + MLX5_MKEY_MR = 1, + MLX5_MKEY_MW, +}; + +struct mlx5_core_mkey { + u64 iova; + u64 size; + u32 key; + u32 pd; + u32 type; +}; + +#define MLX5_24BIT_MASK ((1 << 24) - 1) + +enum mlx5_res_type { + MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP, + MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ, + MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ, + MLX5_RES_SRQ = 3, + MLX5_RES_XSRQ = 4, + MLX5_RES_XRQ = 5, + MLX5_RES_DCT = MLX5_EVENT_QUEUE_TYPE_DCT, +}; + +struct mlx5_core_rsc_common { + enum mlx5_res_type res; + atomic_t refcount; + struct completion free; +}; + +struct mlx5_core_srq { + struct mlx5_core_rsc_common common; /* must be first */ + u32 srqn; + int max; + size_t max_gs; + size_t max_avail_gather; + int wqe_shift; + void (*event) (struct mlx5_core_srq *, enum mlx5_event); + + atomic_t refcount; + struct completion free; +}; + +struct mlx5_eq_table { + void __iomem *update_ci; + void __iomem *update_arm_ci; + struct list_head comp_eqs_list; + struct mlx5_eq pages_eq; + struct mlx5_eq async_eq; + struct mlx5_eq cmd_eq; +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + struct mlx5_eq pfault_eq; +#endif + int num_comp_vectors; + /* protect EQs list + */ + spinlock_t lock; +}; + +struct mlx5_uars_page { + void __iomem *map; + bool wc; + u32 index; + struct list_head list; + unsigned int bfregs; + unsigned long *reg_bitmap; /* for non fast path bf regs */ + unsigned long *fp_bitmap; + unsigned int reg_avail; + unsigned int fp_avail; + struct kref ref_count; + struct mlx5_core_dev *mdev; +}; + +struct mlx5_bfreg_head { + /* protect blue flame registers allocations */ + struct mutex lock; + struct list_head list; +}; + +struct mlx5_bfreg_data { + struct mlx5_bfreg_head reg_head; + struct mlx5_bfreg_head wc_head; +}; + +struct mlx5_sq_bfreg { + void __iomem *map; + struct mlx5_uars_page *up; + bool wc; + u32 index; + unsigned int offset; +}; + +struct mlx5_core_health { + struct health_buffer __iomem *health; + __be32 __iomem *health_counter; + struct timer_list timer; + u32 prev; + int miss_counter; + bool sick; + /* wq spinlock to synchronize draining */ + spinlock_t wq_lock; + struct workqueue_struct *wq; + unsigned long flags; + struct work_struct work; + struct delayed_work recover_work; +}; + +struct mlx5_qp_table { + /* protect radix tree + */ + spinlock_t lock; + struct radix_tree_root tree; +}; + +struct mlx5_srq_table { + /* protect radix tree + */ + spinlock_t lock; + struct radix_tree_root tree; +}; + +struct mlx5_mkey_table { + /* protect radix tree + */ + rwlock_t lock; + struct radix_tree_root tree; +}; + +struct mlx5_vf_context { + int enabled; + u64 port_guid; + u64 node_guid; + enum port_state_policy policy; +}; + +struct mlx5_core_sriov { + struct mlx5_vf_context *vfs_ctx; + int num_vfs; + int enabled_vfs; +}; + +struct mlx5_irq_info { + cpumask_var_t mask; + char name[MLX5_MAX_IRQ_NAME]; +}; + +struct mlx5_fc_stats { + struct rb_root counters; + struct list_head addlist; + /* protect addlist add/splice operations */ + spinlock_t addlist_lock; + + struct workqueue_struct *wq; + struct delayed_work work; + unsigned long next_query; + unsigned long sampling_interval; /* jiffies */ +}; + +struct mlx5_mpfs; +struct mlx5_eswitch; +struct mlx5_lag; +struct mlx5_pagefault; + +struct mlx5_rate_limit { + u32 rate; + u32 max_burst_sz; + u16 typical_pkt_sz; +}; + +struct mlx5_rl_entry { + struct mlx5_rate_limit rl; + u16 index; + u16 refcount; +}; + +struct mlx5_rl_table { + /* protect rate limit table */ + struct mutex rl_lock; + u16 max_size; + u32 max_rate; + u32 min_rate; + struct mlx5_rl_entry *rl_entry; +}; + +enum port_module_event_status_type { + MLX5_MODULE_STATUS_PLUGGED = 0x1, + MLX5_MODULE_STATUS_UNPLUGGED = 0x2, + MLX5_MODULE_STATUS_ERROR = 0x3, + MLX5_MODULE_STATUS_NUM = 0x3, +}; + +enum port_module_event_error_type { + MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED, + MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE, + MLX5_MODULE_EVENT_ERROR_BUS_STUCK, + MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT, + MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST, + MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER, + MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE, + MLX5_MODULE_EVENT_ERROR_BAD_CABLE, + MLX5_MODULE_EVENT_ERROR_UNKNOWN, + MLX5_MODULE_EVENT_ERROR_NUM, +}; + +struct mlx5_port_module_event_stats { + u64 status_counters[MLX5_MODULE_STATUS_NUM]; + u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM]; +}; + +struct mlx5_priv { + char name[MLX5_MAX_NAME_LEN]; + struct mlx5_eq_table eq_table; + struct mlx5_irq_info *irq_info; + + /* pages stuff */ + struct workqueue_struct *pg_wq; + struct rb_root page_root; + int fw_pages; + atomic_t reg_pages; + struct list_head free_list; + int vfs_pages; + + struct mlx5_core_health health; + + struct mlx5_srq_table srq_table; + + /* start: qp staff */ + struct mlx5_qp_table qp_table; + struct dentry *qp_debugfs; + struct dentry *eq_debugfs; + struct dentry *cq_debugfs; + struct dentry *cmdif_debugfs; + /* end: qp staff */ + + /* start: mkey staff */ + struct mlx5_mkey_table mkey_table; + /* end: mkey staff */ + + /* start: alloc staff */ + /* protect buffer alocation according to numa node */ + struct mutex alloc_mutex; + int numa_node; + + struct mutex pgdir_mutex; + struct list_head pgdir_list; + /* end: alloc staff */ + struct dentry *dbg_root; + + /* protect mkey key part */ + spinlock_t mkey_lock; + u8 mkey_key; + + struct list_head dev_list; + struct list_head ctx_list; + spinlock_t ctx_lock; + + struct list_head waiting_events_list; + bool is_accum_events; + + struct mlx5_flow_steering *steering; + struct mlx5_mpfs *mpfs; + struct mlx5_eswitch *eswitch; + struct mlx5_core_sriov sriov; + struct mlx5_lag *lag; + unsigned long pci_dev_data; + struct mlx5_fc_stats fc_stats; + struct mlx5_rl_table rl_table; + + struct mlx5_port_module_event_stats pme_stats; + +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + void (*pfault)(struct mlx5_core_dev *dev, + void *context, + struct mlx5_pagefault *pfault); + void *pfault_ctx; + struct srcu_struct pfault_srcu; +#endif + struct mlx5_bfreg_data bfregs; + struct mlx5_uars_page *uar; +}; + +enum mlx5_device_state { + MLX5_DEVICE_STATE_UP, + MLX5_DEVICE_STATE_INTERNAL_ERROR, +}; + +enum mlx5_interface_state { + MLX5_INTERFACE_STATE_UP = BIT(0), +}; + +enum mlx5_pci_status { + MLX5_PCI_STATUS_DISABLED, + MLX5_PCI_STATUS_ENABLED, +}; + +enum mlx5_pagefault_type_flags { + MLX5_PFAULT_REQUESTOR = 1 << 0, + MLX5_PFAULT_WRITE = 1 << 1, + MLX5_PFAULT_RDMA = 1 << 2, +}; + +/* Contains the details of a pagefault. */ +struct mlx5_pagefault { + u32 bytes_committed; + u32 token; + u8 event_subtype; + u8 type; + union { + /* Initiator or send message responder pagefault details. */ + struct { + /* Received packet size, only valid for responders. */ + u32 packet_size; + /* + * Number of resource holding WQE, depends on type. + */ + u32 wq_num; + /* + * WQE index. Refers to either the send queue or + * receive queue, according to event_subtype. + */ + u16 wqe_index; + } wqe; + /* RDMA responder pagefault details */ + struct { + u32 r_key; + /* + * Received packet size, minimal size page fault + * resolution required for forward progress. + */ + u32 packet_size; + u32 rdma_op_len; + u64 rdma_va; + } rdma; + }; + + struct mlx5_eq *eq; + struct work_struct work; +}; + +struct mlx5_td { + /* protects tirs list changes while tirs refresh */ + struct mutex list_lock; + struct list_head tirs_list; + u32 tdn; +}; + +struct mlx5e_resources { + u32 pdn; + struct mlx5_td td; + struct mlx5_core_mkey mkey; + struct mlx5_sq_bfreg bfreg; +}; + +#define MLX5_MAX_RESERVED_GIDS 8 + +struct mlx5_rsvd_gids { + unsigned int start; + unsigned int count; + struct ida ida; +}; + +#define MAX_PIN_NUM 8 +struct mlx5_pps { + u8 pin_caps[MAX_PIN_NUM]; + struct work_struct out_work; + u64 start[MAX_PIN_NUM]; + u8 enabled; +}; + +struct mlx5_clock { + rwlock_t lock; + struct cyclecounter cycles; + struct timecounter tc; + struct hwtstamp_config hwtstamp_config; + u32 nominal_c_mult; + unsigned long overflow_period; + struct delayed_work overflow_work; + struct mlx5_core_dev *mdev; + struct ptp_clock *ptp; + struct ptp_clock_info ptp_info; + struct mlx5_pps pps_info; +}; + +struct mlx5_fw_tracer; +struct mlx5_vxlan; + +struct mlx5_core_dev { + struct pci_dev *pdev; + /* sync pci state */ + struct mutex pci_status_mutex; + enum mlx5_pci_status pci_status; + u8 rev_id; + char board_id[MLX5_BOARD_ID_LEN]; + struct mlx5_cmd cmd; + struct mlx5_port_caps port_caps[MLX5_MAX_PORTS]; + struct { + u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; + u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; + u32 pcam[MLX5_ST_SZ_DW(pcam_reg)]; + u32 mcam[MLX5_ST_SZ_DW(mcam_reg)]; + u32 fpga[MLX5_ST_SZ_DW(fpga_cap)]; + u32 qcam[MLX5_ST_SZ_DW(qcam_reg)]; + } caps; + phys_addr_t iseg_base; + struct mlx5_init_seg __iomem *iseg; + enum mlx5_device_state state; + /* sync interface state */ + struct mutex intf_state_mutex; + unsigned long intf_state; + void (*event) (struct mlx5_core_dev *dev, + enum mlx5_dev_event event, + unsigned long param); + struct mlx5_priv priv; + struct mlx5_profile *profile; + atomic_t num_qps; + u32 issi; + struct mlx5e_resources mlx5e_res; + struct mlx5_vxlan *vxlan; + struct { + struct mlx5_rsvd_gids reserved_gids; + u32 roce_en; + } roce; +#ifdef CONFIG_MLX5_FPGA + struct mlx5_fpga_device *fpga; +#endif +#ifdef CONFIG_RFS_ACCEL + struct cpu_rmap *rmap; +#endif + struct mlx5_clock clock; + struct mlx5_ib_clock_info *clock_info; + struct page *clock_info_page; + struct mlx5_fw_tracer *tracer; +}; + +struct mlx5_db { + __be32 *db; + union { + struct mlx5_db_pgdir *pgdir; + struct mlx5_ib_user_db_page *user_page; + } u; + dma_addr_t dma; + int index; +}; + +enum { + MLX5_COMP_EQ_SIZE = 1024, +}; + +enum { + MLX5_PTYS_IB = 1 << 0, + MLX5_PTYS_EN = 1 << 2, +}; + +typedef void (*mlx5_cmd_cbk_t)(int status, void *context); + +enum { + MLX5_CMD_ENT_STATE_PENDING_COMP, +}; + +struct mlx5_cmd_work_ent { + unsigned long state; + struct mlx5_cmd_msg *in; + struct mlx5_cmd_msg *out; + void *uout; + int uout_size; + mlx5_cmd_cbk_t callback; + struct delayed_work cb_timeout_work; + void *context; + int idx; + struct completion handling; + struct completion done; + struct mlx5_cmd *cmd; + struct work_struct work; + struct mlx5_cmd_layout *lay; + int ret; + int page_queue; + u8 status; + u8 token; + u64 ts1; + u64 ts2; + u16 op; + bool polling; +}; + +struct mlx5_pas { + u64 pa; + u8 log_sz; +}; + +enum phy_port_state { + MLX5_AAA_111 +}; + +struct mlx5_hca_vport_context { + u32 field_select; + bool sm_virt_aware; + bool has_smi; + bool has_raw; + enum port_state_policy policy; + enum phy_port_state phys_state; + enum ib_port_state vport_state; + u8 port_physical_state; + u64 sys_image_guid; + u64 port_guid; + u64 node_guid; + u32 cap_mask1; + u32 cap_mask1_perm; + u32 cap_mask2; + u32 cap_mask2_perm; + u16 lid; + u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */ + u8 lmc; + u8 subnet_timeout; + u16 sm_lid; + u8 sm_sl; + u16 qkey_violation_counter; + u16 pkey_violation_counter; + bool grh_required; +}; + +static inline void *mlx5_buf_offset(struct mlx5_frag_buf *buf, int offset) +{ + return buf->frags->buf + offset; +} + +#define STRUCT_FIELD(header, field) \ + .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \ + .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field + +static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev) +{ + return pci_get_drvdata(pdev); +} + +extern struct dentry *mlx5_debugfs_root; + +static inline u16 fw_rev_maj(struct mlx5_core_dev *dev) +{ + return ioread32be(&dev->iseg->fw_rev) & 0xffff; +} + +static inline u16 fw_rev_min(struct mlx5_core_dev *dev) +{ + return ioread32be(&dev->iseg->fw_rev) >> 16; +} + +static inline u16 fw_rev_sub(struct mlx5_core_dev *dev) +{ + return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff; +} + +static inline u16 cmdif_rev(struct mlx5_core_dev *dev) +{ + return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; +} + +static inline u32 mlx5_base_mkey(const u32 key) +{ + return key & 0xffffff00u; +} + +static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz, + u16 strides_offset, + struct mlx5_frag_buf_ctrl *fbc) +{ + fbc->log_stride = log_stride; + fbc->log_sz = log_sz; + fbc->sz_m1 = (1 << fbc->log_sz) - 1; + fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride; + fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1; + fbc->strides_offset = strides_offset; +} + +static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz, + struct mlx5_frag_buf_ctrl *fbc) +{ + mlx5_fill_fbc_offset(log_stride, log_sz, 0, fbc); +} + +static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc, + void *cqc) +{ + mlx5_fill_fbc(6 + MLX5_GET(cqc, cqc, cqe_sz), + MLX5_GET(cqc, cqc, log_cq_size), + fbc); +} + +static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc, + u32 ix) +{ + unsigned int frag; + + ix += fbc->strides_offset; + frag = ix >> fbc->log_frag_strides; + + return fbc->frag_buf.frags[frag].buf + + ((fbc->frag_sz_m1 & ix) << fbc->log_stride); +} + +static inline u32 +mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix) +{ + u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1; + + return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1); +} + +int mlx5_cmd_init(struct mlx5_core_dev *dev); +void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); +void mlx5_cmd_use_events(struct mlx5_core_dev *dev); +void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); + +int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, + int out_size); +int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, + void *out, int out_size, mlx5_cmd_cbk_t callback, + void *context); +int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, + void *out, int out_size); +void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome); + +int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); +int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); +int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); +void mlx5_health_cleanup(struct mlx5_core_dev *dev); +int mlx5_health_init(struct mlx5_core_dev *dev); +void mlx5_start_health_poll(struct mlx5_core_dev *dev); +void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health); +void mlx5_drain_health_wq(struct mlx5_core_dev *dev); +void mlx5_trigger_health_work(struct mlx5_core_dev *dev); +void mlx5_drain_health_recovery(struct mlx5_core_dev *dev); +int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, + struct mlx5_frag_buf *buf, int node); +int mlx5_buf_alloc(struct mlx5_core_dev *dev, + int size, struct mlx5_frag_buf *buf); +void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf); +int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size, + struct mlx5_frag_buf *buf, int node); +void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf); +struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, + gfp_t flags, int npages); +void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, + struct mlx5_cmd_mailbox *head); +int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_srq_attr *in); +int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq); +int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_srq_attr *out); +int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + u16 lwm, int is_srq); +void mlx5_init_mkey_table(struct mlx5_core_dev *dev); +void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev); +int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev, + struct mlx5_core_mkey *mkey, + u32 *in, int inlen, + u32 *out, int outlen, + mlx5_cmd_cbk_t callback, void *context); +int mlx5_core_create_mkey(struct mlx5_core_dev *dev, + struct mlx5_core_mkey *mkey, + u32 *in, int inlen); +int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, + struct mlx5_core_mkey *mkey); +int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, + u32 *out, int outlen); +int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); +int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); +int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, + u16 opmod, u8 port); +void mlx5_pagealloc_init(struct mlx5_core_dev *dev); +void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); +int mlx5_pagealloc_start(struct mlx5_core_dev *dev); +void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); +void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, + s32 npages); +int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); +int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); +void mlx5_register_debugfs(void); +void mlx5_unregister_debugfs(void); + +void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas); +void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); +void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); +void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); +struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); +int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, + unsigned int *irqn); +int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); +int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); + +int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); +void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev); +int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, + int size_in, void *data_out, int size_out, + u16 reg_num, int arg, int write); + +int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); +int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, + int node); +void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); + +const char *mlx5_command_str(int command); +int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); +void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); +int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, + int npsvs, u32 *sig_index); +int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num); +void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common); +int mlx5_query_odp_caps(struct mlx5_core_dev *dev, + struct mlx5_odp_caps *odp_caps); +int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev, + u8 port_num, void *out, size_t sz); +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING +int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token, + u32 wq_num, u8 type, int error); +#endif + +int mlx5_init_rl_table(struct mlx5_core_dev *dev); +void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev); +int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index, + struct mlx5_rate_limit *rl); +void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl); +bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate); +bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0, + struct mlx5_rate_limit *rl_1); +int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg, + bool map_wc, bool fast_path); +void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg); + +unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev); +int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index, + u8 roce_version, u8 roce_l3_type, const u8 *gid, + const u8 *mac, bool vlan, u16 vlan_id, u8 port_num); + +static inline int fw_initializing(struct mlx5_core_dev *dev) +{ + return ioread32be(&dev->iseg->initializing) >> 31; +} + +static inline u32 mlx5_mkey_to_idx(u32 mkey) +{ + return mkey >> 8; +} + +static inline u32 mlx5_idx_to_mkey(u32 mkey_idx) +{ + return mkey_idx << 8; +} + +static inline u8 mlx5_mkey_variant(u32 mkey) +{ + return mkey & 0xff; +} + +enum { + MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0, + MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1, +}; + +enum { + MR_CACHE_LAST_STD_ENTRY = 20, + MLX5_IMR_MTT_CACHE_ENTRY, + MLX5_IMR_KSM_CACHE_ENTRY, + MAX_MR_CACHE_ENTRIES +}; + +enum { + MLX5_INTERFACE_PROTOCOL_IB = 0, + MLX5_INTERFACE_PROTOCOL_ETH = 1, +}; + +struct mlx5_interface { + void * (*add)(struct mlx5_core_dev *dev); + void (*remove)(struct mlx5_core_dev *dev, void *context); + int (*attach)(struct mlx5_core_dev *dev, void *context); + void (*detach)(struct mlx5_core_dev *dev, void *context); + void (*event)(struct mlx5_core_dev *dev, void *context, + enum mlx5_dev_event event, unsigned long param); + void (*pfault)(struct mlx5_core_dev *dev, + void *context, + struct mlx5_pagefault *pfault); + void * (*get_dev)(void *context); + int protocol; + struct list_head list; +}; + +void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol); +int mlx5_register_interface(struct mlx5_interface *intf); +void mlx5_unregister_interface(struct mlx5_interface *intf); +int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); + +int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev); +int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev); +bool mlx5_lag_is_active(struct mlx5_core_dev *dev); +struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev); +int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, + u64 *values, + int num_counters, + size_t *offsets); +struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev); +void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up); + +#ifndef CONFIG_MLX5_CORE_IPOIB +static inline +struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, + struct ib_device *ibdev, + const char *name, + void (*setup)(struct net_device *)) +{ + return ERR_PTR(-EOPNOTSUPP); +} +#else +struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, + struct ib_device *ibdev, + const char *name, + void (*setup)(struct net_device *)); +#endif /* CONFIG_MLX5_CORE_IPOIB */ + +struct mlx5_profile { + u64 mask; + u8 log_max_qp; + struct { + int size; + int limit; + } mr_cache[MAX_MR_CACHE_ENTRIES]; +}; + +enum { + MLX5_PCI_DEV_IS_VF = 1 << 0, +}; + +static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev) +{ + return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF); +} + +#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs((mdev)->pdev)) +#define MLX5_VPORT_MANAGER(mdev) \ + (MLX5_CAP_GEN(mdev, vport_group_manager) && \ + (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \ + mlx5_core_is_pf(mdev)) + +static inline int mlx5_get_gid_table_len(u16 param) +{ + if (param > 4) { + pr_warn("gid table length is zero\n"); + return 0; + } + + return 8 * (1 << param); +} + +static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev) +{ + return !!(dev->priv.rl_table.max_size); +} + +static inline int mlx5_core_is_mp_slave(struct mlx5_core_dev *dev) +{ + return MLX5_CAP_GEN(dev, affiliate_nic_vport_criteria) && + MLX5_CAP_GEN(dev, num_vhca_ports) <= 1; +} + +static inline int mlx5_core_is_mp_master(struct mlx5_core_dev *dev) +{ + return MLX5_CAP_GEN(dev, num_vhca_ports) > 1; +} + +static inline int mlx5_core_mp_enabled(struct mlx5_core_dev *dev) +{ + return mlx5_core_is_mp_slave(dev) || + mlx5_core_is_mp_master(dev); +} + +static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev) +{ + if (!mlx5_core_mp_enabled(dev)) + return 1; + + return MLX5_CAP_GEN(dev, native_port_num); +} + +enum { + MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, +}; + +static inline const struct cpumask * +mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector) +{ + return dev->priv.irq_info[vector + MLX5_EQ_VEC_COMP_BASE].mask; +} + +#endif /* MLX5_DRIVER_H */ diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h new file mode 100644 index 000000000..fab5121ff --- /dev/null +++ b/include/linux/mlx5/eswitch.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + */ + +#ifndef _MLX5_ESWITCH_ +#define _MLX5_ESWITCH_ + +#include + +#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager) + +enum { + SRIOV_NONE, + SRIOV_LEGACY, + SRIOV_OFFLOADS +}; + +enum { + REP_ETH, + REP_IB, + NUM_REP_TYPES, +}; + +struct mlx5_eswitch_rep; +struct mlx5_eswitch_rep_if { + int (*load)(struct mlx5_core_dev *dev, + struct mlx5_eswitch_rep *rep); + void (*unload)(struct mlx5_eswitch_rep *rep); + void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep); + void *priv; + bool valid; +}; + +struct mlx5_eswitch_rep { + struct mlx5_eswitch_rep_if rep_if[NUM_REP_TYPES]; + u16 vport; + u8 hw_id[ETH_ALEN]; + u16 vlan; + u32 vlan_refcount; +}; + +void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, + int vport_index, + struct mlx5_eswitch_rep_if *rep_if, + u8 rep_type); +void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, + int vport_index, + u8 rep_type); +void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, + int vport, + u8 rep_type); +struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw, + int vport); +void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type); +u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw); +struct mlx5_flow_handle * +mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, + int vport, u32 sqn); +#endif diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h new file mode 100644 index 000000000..3386399fe --- /dev/null +++ b/include/linux/mlx5/fs.h @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _MLX5_FS_ +#define _MLX5_FS_ + +#include +#include + +#define MLX5_FS_DEFAULT_FLOW_TAG 0x0 + +enum { + MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16, + MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 17, + MLX5_FLOW_CONTEXT_ACTION_DECRYPT = 1 << 18, +}; + +enum { + MLX5_FLOW_TABLE_TUNNEL_EN = BIT(0), +}; + +#define LEFTOVERS_RULE_NUM 2 +static inline void build_leftovers_ft_param(int *priority, + int *n_ent, + int *n_grp) +{ + *priority = 0; /* Priority of leftovers_prio-0 */ + *n_ent = LEFTOVERS_RULE_NUM; + *n_grp = LEFTOVERS_RULE_NUM; +} + +enum mlx5_flow_namespace_type { + MLX5_FLOW_NAMESPACE_BYPASS, + MLX5_FLOW_NAMESPACE_LAG, + MLX5_FLOW_NAMESPACE_OFFLOADS, + MLX5_FLOW_NAMESPACE_ETHTOOL, + MLX5_FLOW_NAMESPACE_KERNEL, + MLX5_FLOW_NAMESPACE_LEFTOVERS, + MLX5_FLOW_NAMESPACE_ANCHOR, + MLX5_FLOW_NAMESPACE_FDB, + MLX5_FLOW_NAMESPACE_ESW_EGRESS, + MLX5_FLOW_NAMESPACE_ESW_INGRESS, + MLX5_FLOW_NAMESPACE_SNIFFER_RX, + MLX5_FLOW_NAMESPACE_SNIFFER_TX, + MLX5_FLOW_NAMESPACE_EGRESS, +}; + +struct mlx5_flow_table; +struct mlx5_flow_group; +struct mlx5_flow_namespace; +struct mlx5_flow_handle; + +struct mlx5_flow_spec { + u8 match_criteria_enable; + u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)]; + u32 match_value[MLX5_ST_SZ_DW(fte_match_param)]; +}; + +struct mlx5_flow_destination { + enum mlx5_flow_destination_type type; + union { + u32 tir_num; + u32 ft_num; + struct mlx5_flow_table *ft; + struct mlx5_fc *counter; + struct { + u16 num; + u16 vhca_id; + bool vhca_id_valid; + } vport; + }; +}; + +struct mlx5_flow_namespace * +mlx5_get_flow_namespace(struct mlx5_core_dev *dev, + enum mlx5_flow_namespace_type type); +struct mlx5_flow_namespace * +mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev, + enum mlx5_flow_namespace_type type, + int vport); + +struct mlx5_flow_table * +mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, + int prio, + int num_flow_table_entries, + int max_num_groups, + u32 level, + u32 flags); + +struct mlx5_flow_table_attr { + int prio; + int max_fte; + u32 level; + u32 flags; +}; + +struct mlx5_flow_table * +mlx5_create_flow_table(struct mlx5_flow_namespace *ns, + struct mlx5_flow_table_attr *ft_attr); + +struct mlx5_flow_table * +mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, + int prio, + int num_flow_table_entries, + u32 level, u16 vport); +struct mlx5_flow_table *mlx5_create_lag_demux_flow_table( + struct mlx5_flow_namespace *ns, + int prio, u32 level); +int mlx5_destroy_flow_table(struct mlx5_flow_table *ft); + +/* inbox should be set with the following values: + * start_flow_index + * end_flow_index + * match_criteria_enable + * match_criteria + */ +struct mlx5_flow_group * +mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in); +void mlx5_destroy_flow_group(struct mlx5_flow_group *fg); + +struct mlx5_fs_vlan { + u16 ethtype; + u16 vid; + u8 prio; +}; + +#define MLX5_FS_VLAN_DEPTH 2 + +struct mlx5_flow_act { + u32 action; + bool has_flow_tag; + u32 flow_tag; + u32 encap_id; + u32 modify_id; + uintptr_t esp_id; + struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH]; + struct ib_counters *counters; +}; + +#define MLX5_DECLARE_FLOW_ACT(name) \ + struct mlx5_flow_act name = {MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,\ + MLX5_FS_DEFAULT_FLOW_TAG, 0, 0} + +/* Single destination per rule. + * Group ID is implied by the match criteria. + */ +struct mlx5_flow_handle * +mlx5_add_flow_rules(struct mlx5_flow_table *ft, + struct mlx5_flow_spec *spec, + struct mlx5_flow_act *flow_act, + struct mlx5_flow_destination *dest, + int num_dest); +void mlx5_del_flow_rules(struct mlx5_flow_handle *fr); + +int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler, + struct mlx5_flow_destination *new_dest, + struct mlx5_flow_destination *old_dest); + +struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handler); +struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); +void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); +u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter); +void mlx5_fc_query_cached(struct mlx5_fc *counter, + u64 *bytes, u64 *packets, u64 *lastuse); +int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter, + u64 *packets, u64 *bytes); + +int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn); +int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn); + +#endif diff --git a/include/linux/mlx5/fs_helpers.h b/include/linux/mlx5/fs_helpers.h new file mode 100644 index 000000000..9db21cd0e --- /dev/null +++ b/include/linux/mlx5/fs_helpers.h @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2018, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _MLX5_FS_HELPERS_ +#define _MLX5_FS_HELPERS_ + +#include + +#define MLX5_FS_IPV4_VERSION 4 +#define MLX5_FS_IPV6_VERSION 6 + +static inline bool mlx5_fs_is_ipsec_flow(const u32 *match_c) +{ + void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c, + misc_parameters); + + return MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi); +} + +static inline bool _mlx5_fs_is_outer_ipproto_flow(const u32 *match_c, + const u32 *match_v, u8 match) +{ + const void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c, + outer_headers); + const void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v, + outer_headers); + + return MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_protocol) == 0xff && + MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol) == match; +} + +static inline bool mlx5_fs_is_outer_tcp_flow(const u32 *match_c, + const u32 *match_v) +{ + return _mlx5_fs_is_outer_ipproto_flow(match_c, match_v, IPPROTO_TCP); +} + +static inline bool mlx5_fs_is_outer_udp_flow(const u32 *match_c, + const u32 *match_v) +{ + return _mlx5_fs_is_outer_ipproto_flow(match_c, match_v, IPPROTO_UDP); +} + +static inline bool mlx5_fs_is_vxlan_flow(const u32 *match_c) +{ + void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c, + misc_parameters); + + return MLX5_GET(fte_match_set_misc, misc_params_c, vxlan_vni); +} + +static inline bool _mlx5_fs_is_outer_ipv_flow(struct mlx5_core_dev *mdev, + const u32 *match_c, + const u32 *match_v, int version) +{ + int match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, + ft_field_support.outer_ip_version); + const void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c, + outer_headers); + const void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v, + outer_headers); + + if (!match_ipv) { + u16 ethertype; + + switch (version) { + case MLX5_FS_IPV4_VERSION: + ethertype = ETH_P_IP; + break; + case MLX5_FS_IPV6_VERSION: + ethertype = ETH_P_IPV6; + break; + default: + return false; + } + + return MLX5_GET(fte_match_set_lyr_2_4, headers_c, + ethertype) == 0xffff && + MLX5_GET(fte_match_set_lyr_2_4, headers_v, + ethertype) == ethertype; + } + + return MLX5_GET(fte_match_set_lyr_2_4, headers_c, + ip_version) == 0xf && + MLX5_GET(fte_match_set_lyr_2_4, headers_v, + ip_version) == version; +} + +static inline bool +mlx5_fs_is_outer_ipv4_flow(struct mlx5_core_dev *mdev, const u32 *match_c, + const u32 *match_v) +{ + return _mlx5_fs_is_outer_ipv_flow(mdev, match_c, match_v, + MLX5_FS_IPV4_VERSION); +} + +static inline bool +mlx5_fs_is_outer_ipv6_flow(struct mlx5_core_dev *mdev, const u32 *match_c, + const u32 *match_v) +{ + return _mlx5_fs_is_outer_ipv_flow(mdev, match_c, match_v, + MLX5_FS_IPV6_VERSION); +} + +static inline bool mlx5_fs_is_outer_ipsec_flow(const u32 *match_c) +{ + void *misc_params_c = + MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters); + + return MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi); +} + +#endif diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h new file mode 100644 index 000000000..af040fcd2 --- /dev/null +++ b/include/linux/mlx5/mlx5_ifc.h @@ -0,0 +1,9295 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. +*/ +#ifndef MLX5_IFC_H +#define MLX5_IFC_H + +#include "mlx5_ifc_fpga.h" + +enum { + MLX5_EVENT_TYPE_CODING_COMPLETION_EVENTS = 0x0, + MLX5_EVENT_TYPE_CODING_PATH_MIGRATED_SUCCEEDED = 0x1, + MLX5_EVENT_TYPE_CODING_COMMUNICATION_ESTABLISHED = 0x2, + MLX5_EVENT_TYPE_CODING_SEND_QUEUE_DRAINED = 0x3, + MLX5_EVENT_TYPE_CODING_LAST_WQE_REACHED = 0x13, + MLX5_EVENT_TYPE_CODING_SRQ_LIMIT = 0x14, + MLX5_EVENT_TYPE_CODING_DCT_ALL_CONNECTIONS_CLOSED = 0x1c, + MLX5_EVENT_TYPE_CODING_DCT_ACCESS_KEY_VIOLATION = 0x1d, + MLX5_EVENT_TYPE_CODING_CQ_ERROR = 0x4, + MLX5_EVENT_TYPE_CODING_LOCAL_WQ_CATASTROPHIC_ERROR = 0x5, + MLX5_EVENT_TYPE_CODING_PATH_MIGRATION_FAILED = 0x7, + MLX5_EVENT_TYPE_CODING_PAGE_FAULT_EVENT = 0xc, + MLX5_EVENT_TYPE_CODING_INVALID_REQUEST_LOCAL_WQ_ERROR = 0x10, + MLX5_EVENT_TYPE_CODING_LOCAL_ACCESS_VIOLATION_WQ_ERROR = 0x11, + MLX5_EVENT_TYPE_CODING_LOCAL_SRQ_CATASTROPHIC_ERROR = 0x12, + MLX5_EVENT_TYPE_CODING_INTERNAL_ERROR = 0x8, + MLX5_EVENT_TYPE_CODING_PORT_STATE_CHANGE = 0x9, + MLX5_EVENT_TYPE_CODING_GPIO_EVENT = 0x15, + MLX5_EVENT_TYPE_CODING_REMOTE_CONFIGURATION_PROTOCOL_EVENT = 0x19, + MLX5_EVENT_TYPE_CODING_DOORBELL_BLUEFLAME_CONGESTION_EVENT = 0x1a, + MLX5_EVENT_TYPE_CODING_STALL_VL_EVENT = 0x1b, + MLX5_EVENT_TYPE_CODING_DROPPED_PACKET_LOGGED_EVENT = 0x1f, + MLX5_EVENT_TYPE_CODING_COMMAND_INTERFACE_COMPLETION = 0xa, + MLX5_EVENT_TYPE_CODING_PAGE_REQUEST = 0xb, + MLX5_EVENT_TYPE_CODING_FPGA_ERROR = 0x20, + MLX5_EVENT_TYPE_CODING_FPGA_QP_ERROR = 0x21 +}; + +enum { + MLX5_MODIFY_TIR_BITMASK_LRO = 0x0, + MLX5_MODIFY_TIR_BITMASK_INDIRECT_TABLE = 0x1, + MLX5_MODIFY_TIR_BITMASK_HASH = 0x2, + MLX5_MODIFY_TIR_BITMASK_TUNNELED_OFFLOAD_EN = 0x3 +}; + +enum { + MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0, + MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3, +}; + +enum { + MLX5_GENERAL_OBJ_TYPES_CAP_UCTX = (1ULL << 4), + MLX5_GENERAL_OBJ_TYPES_CAP_UMEM = (1ULL << 5), +}; + +enum { + MLX5_OBJ_TYPE_UCTX = 0x0004, + MLX5_OBJ_TYPE_UMEM = 0x0005, +}; + +enum { + MLX5_CMD_OP_QUERY_HCA_CAP = 0x100, + MLX5_CMD_OP_QUERY_ADAPTER = 0x101, + MLX5_CMD_OP_INIT_HCA = 0x102, + MLX5_CMD_OP_TEARDOWN_HCA = 0x103, + MLX5_CMD_OP_ENABLE_HCA = 0x104, + MLX5_CMD_OP_DISABLE_HCA = 0x105, + MLX5_CMD_OP_QUERY_PAGES = 0x107, + MLX5_CMD_OP_MANAGE_PAGES = 0x108, + MLX5_CMD_OP_SET_HCA_CAP = 0x109, + MLX5_CMD_OP_QUERY_ISSI = 0x10a, + MLX5_CMD_OP_SET_ISSI = 0x10b, + MLX5_CMD_OP_SET_DRIVER_VERSION = 0x10d, + MLX5_CMD_OP_CREATE_MKEY = 0x200, + MLX5_CMD_OP_QUERY_MKEY = 0x201, + MLX5_CMD_OP_DESTROY_MKEY = 0x202, + MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203, + MLX5_CMD_OP_PAGE_FAULT_RESUME = 0x204, + MLX5_CMD_OP_ALLOC_MEMIC = 0x205, + MLX5_CMD_OP_DEALLOC_MEMIC = 0x206, + MLX5_CMD_OP_CREATE_EQ = 0x301, + MLX5_CMD_OP_DESTROY_EQ = 0x302, + MLX5_CMD_OP_QUERY_EQ = 0x303, + MLX5_CMD_OP_GEN_EQE = 0x304, + MLX5_CMD_OP_CREATE_CQ = 0x400, + MLX5_CMD_OP_DESTROY_CQ = 0x401, + MLX5_CMD_OP_QUERY_CQ = 0x402, + MLX5_CMD_OP_MODIFY_CQ = 0x403, + MLX5_CMD_OP_CREATE_QP = 0x500, + MLX5_CMD_OP_DESTROY_QP = 0x501, + MLX5_CMD_OP_RST2INIT_QP = 0x502, + MLX5_CMD_OP_INIT2RTR_QP = 0x503, + MLX5_CMD_OP_RTR2RTS_QP = 0x504, + MLX5_CMD_OP_RTS2RTS_QP = 0x505, + MLX5_CMD_OP_SQERR2RTS_QP = 0x506, + MLX5_CMD_OP_2ERR_QP = 0x507, + MLX5_CMD_OP_2RST_QP = 0x50a, + MLX5_CMD_OP_QUERY_QP = 0x50b, + MLX5_CMD_OP_SQD_RTS_QP = 0x50c, + MLX5_CMD_OP_INIT2INIT_QP = 0x50e, + MLX5_CMD_OP_CREATE_PSV = 0x600, + MLX5_CMD_OP_DESTROY_PSV = 0x601, + MLX5_CMD_OP_CREATE_SRQ = 0x700, + MLX5_CMD_OP_DESTROY_SRQ = 0x701, + MLX5_CMD_OP_QUERY_SRQ = 0x702, + MLX5_CMD_OP_ARM_RQ = 0x703, + MLX5_CMD_OP_CREATE_XRC_SRQ = 0x705, + MLX5_CMD_OP_DESTROY_XRC_SRQ = 0x706, + MLX5_CMD_OP_QUERY_XRC_SRQ = 0x707, + MLX5_CMD_OP_ARM_XRC_SRQ = 0x708, + MLX5_CMD_OP_CREATE_DCT = 0x710, + MLX5_CMD_OP_DESTROY_DCT = 0x711, + MLX5_CMD_OP_DRAIN_DCT = 0x712, + MLX5_CMD_OP_QUERY_DCT = 0x713, + MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION = 0x714, + MLX5_CMD_OP_CREATE_XRQ = 0x717, + MLX5_CMD_OP_DESTROY_XRQ = 0x718, + MLX5_CMD_OP_QUERY_XRQ = 0x719, + MLX5_CMD_OP_ARM_XRQ = 0x71a, + MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750, + MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751, + MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752, + MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT = 0x753, + MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754, + MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT = 0x755, + MLX5_CMD_OP_QUERY_ROCE_ADDRESS = 0x760, + MLX5_CMD_OP_SET_ROCE_ADDRESS = 0x761, + MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT = 0x762, + MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT = 0x763, + MLX5_CMD_OP_QUERY_HCA_VPORT_GID = 0x764, + MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY = 0x765, + MLX5_CMD_OP_QUERY_VNIC_ENV = 0x76f, + MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770, + MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771, + MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772, + MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773, + MLX5_CMD_OP_SET_PP_RATE_LIMIT = 0x780, + MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781, + MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782, + MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT = 0x783, + MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT = 0x784, + MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT = 0x785, + MLX5_CMD_OP_CREATE_QOS_PARA_VPORT = 0x786, + MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT = 0x787, + MLX5_CMD_OP_ALLOC_PD = 0x800, + MLX5_CMD_OP_DEALLOC_PD = 0x801, + MLX5_CMD_OP_ALLOC_UAR = 0x802, + MLX5_CMD_OP_DEALLOC_UAR = 0x803, + MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804, + MLX5_CMD_OP_ACCESS_REG = 0x805, + MLX5_CMD_OP_ATTACH_TO_MCG = 0x806, + MLX5_CMD_OP_DETACH_FROM_MCG = 0x807, + MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a, + MLX5_CMD_OP_MAD_IFC = 0x50d, + MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b, + MLX5_CMD_OP_SET_MAD_DEMUX = 0x80c, + MLX5_CMD_OP_NOP = 0x80d, + MLX5_CMD_OP_ALLOC_XRCD = 0x80e, + MLX5_CMD_OP_DEALLOC_XRCD = 0x80f, + MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN = 0x816, + MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN = 0x817, + MLX5_CMD_OP_QUERY_CONG_STATUS = 0x822, + MLX5_CMD_OP_MODIFY_CONG_STATUS = 0x823, + MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x824, + MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x825, + MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x826, + MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT = 0x827, + MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT = 0x828, + MLX5_CMD_OP_SET_L2_TABLE_ENTRY = 0x829, + MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY = 0x82a, + MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b, + MLX5_CMD_OP_SET_WOL_ROL = 0x830, + MLX5_CMD_OP_QUERY_WOL_ROL = 0x831, + MLX5_CMD_OP_CREATE_LAG = 0x840, + MLX5_CMD_OP_MODIFY_LAG = 0x841, + MLX5_CMD_OP_QUERY_LAG = 0x842, + MLX5_CMD_OP_DESTROY_LAG = 0x843, + MLX5_CMD_OP_CREATE_VPORT_LAG = 0x844, + MLX5_CMD_OP_DESTROY_VPORT_LAG = 0x845, + MLX5_CMD_OP_CREATE_TIR = 0x900, + MLX5_CMD_OP_MODIFY_TIR = 0x901, + MLX5_CMD_OP_DESTROY_TIR = 0x902, + MLX5_CMD_OP_QUERY_TIR = 0x903, + MLX5_CMD_OP_CREATE_SQ = 0x904, + MLX5_CMD_OP_MODIFY_SQ = 0x905, + MLX5_CMD_OP_DESTROY_SQ = 0x906, + MLX5_CMD_OP_QUERY_SQ = 0x907, + MLX5_CMD_OP_CREATE_RQ = 0x908, + MLX5_CMD_OP_MODIFY_RQ = 0x909, + MLX5_CMD_OP_SET_DELAY_DROP_PARAMS = 0x910, + MLX5_CMD_OP_DESTROY_RQ = 0x90a, + MLX5_CMD_OP_QUERY_RQ = 0x90b, + MLX5_CMD_OP_CREATE_RMP = 0x90c, + MLX5_CMD_OP_MODIFY_RMP = 0x90d, + MLX5_CMD_OP_DESTROY_RMP = 0x90e, + MLX5_CMD_OP_QUERY_RMP = 0x90f, + MLX5_CMD_OP_CREATE_TIS = 0x912, + MLX5_CMD_OP_MODIFY_TIS = 0x913, + MLX5_CMD_OP_DESTROY_TIS = 0x914, + MLX5_CMD_OP_QUERY_TIS = 0x915, + MLX5_CMD_OP_CREATE_RQT = 0x916, + MLX5_CMD_OP_MODIFY_RQT = 0x917, + MLX5_CMD_OP_DESTROY_RQT = 0x918, + MLX5_CMD_OP_QUERY_RQT = 0x919, + MLX5_CMD_OP_SET_FLOW_TABLE_ROOT = 0x92f, + MLX5_CMD_OP_CREATE_FLOW_TABLE = 0x930, + MLX5_CMD_OP_DESTROY_FLOW_TABLE = 0x931, + MLX5_CMD_OP_QUERY_FLOW_TABLE = 0x932, + MLX5_CMD_OP_CREATE_FLOW_GROUP = 0x933, + MLX5_CMD_OP_DESTROY_FLOW_GROUP = 0x934, + MLX5_CMD_OP_QUERY_FLOW_GROUP = 0x935, + MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936, + MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x937, + MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY = 0x938, + MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939, + MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a, + MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b, + MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c, + MLX5_CMD_OP_ALLOC_ENCAP_HEADER = 0x93d, + MLX5_CMD_OP_DEALLOC_ENCAP_HEADER = 0x93e, + MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT = 0x940, + MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941, + MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT = 0x942, + MLX5_CMD_OP_FPGA_CREATE_QP = 0x960, + MLX5_CMD_OP_FPGA_MODIFY_QP = 0x961, + MLX5_CMD_OP_FPGA_QUERY_QP = 0x962, + MLX5_CMD_OP_FPGA_DESTROY_QP = 0x963, + MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS = 0x964, + MLX5_CMD_OP_CREATE_GENERAL_OBJECT = 0xa00, + MLX5_CMD_OP_MODIFY_GENERAL_OBJECT = 0xa01, + MLX5_CMD_OP_QUERY_GENERAL_OBJECT = 0xa02, + MLX5_CMD_OP_DESTROY_GENERAL_OBJECT = 0xa03, + MLX5_CMD_OP_MAX +}; + +struct mlx5_ifc_flow_table_fields_supported_bits { + u8 outer_dmac[0x1]; + u8 outer_smac[0x1]; + u8 outer_ether_type[0x1]; + u8 outer_ip_version[0x1]; + u8 outer_first_prio[0x1]; + u8 outer_first_cfi[0x1]; + u8 outer_first_vid[0x1]; + u8 outer_ipv4_ttl[0x1]; + u8 outer_second_prio[0x1]; + u8 outer_second_cfi[0x1]; + u8 outer_second_vid[0x1]; + u8 reserved_at_b[0x1]; + u8 outer_sip[0x1]; + u8 outer_dip[0x1]; + u8 outer_frag[0x1]; + u8 outer_ip_protocol[0x1]; + u8 outer_ip_ecn[0x1]; + u8 outer_ip_dscp[0x1]; + u8 outer_udp_sport[0x1]; + u8 outer_udp_dport[0x1]; + u8 outer_tcp_sport[0x1]; + u8 outer_tcp_dport[0x1]; + u8 outer_tcp_flags[0x1]; + u8 outer_gre_protocol[0x1]; + u8 outer_gre_key[0x1]; + u8 outer_vxlan_vni[0x1]; + u8 reserved_at_1a[0x5]; + u8 source_eswitch_port[0x1]; + + u8 inner_dmac[0x1]; + u8 inner_smac[0x1]; + u8 inner_ether_type[0x1]; + u8 inner_ip_version[0x1]; + u8 inner_first_prio[0x1]; + u8 inner_first_cfi[0x1]; + u8 inner_first_vid[0x1]; + u8 reserved_at_27[0x1]; + u8 inner_second_prio[0x1]; + u8 inner_second_cfi[0x1]; + u8 inner_second_vid[0x1]; + u8 reserved_at_2b[0x1]; + u8 inner_sip[0x1]; + u8 inner_dip[0x1]; + u8 inner_frag[0x1]; + u8 inner_ip_protocol[0x1]; + u8 inner_ip_ecn[0x1]; + u8 inner_ip_dscp[0x1]; + u8 inner_udp_sport[0x1]; + u8 inner_udp_dport[0x1]; + u8 inner_tcp_sport[0x1]; + u8 inner_tcp_dport[0x1]; + u8 inner_tcp_flags[0x1]; + u8 reserved_at_37[0x9]; + + u8 reserved_at_40[0x5]; + u8 outer_first_mpls_over_udp[0x4]; + u8 outer_first_mpls_over_gre[0x4]; + u8 inner_first_mpls[0x4]; + u8 outer_first_mpls[0x4]; + u8 reserved_at_55[0x2]; + u8 outer_esp_spi[0x1]; + u8 reserved_at_58[0x2]; + u8 bth_dst_qp[0x1]; + + u8 reserved_at_5b[0x25]; +}; + +struct mlx5_ifc_flow_table_prop_layout_bits { + u8 ft_support[0x1]; + u8 reserved_at_1[0x1]; + u8 flow_counter[0x1]; + u8 flow_modify_en[0x1]; + u8 modify_root[0x1]; + u8 identified_miss_table_mode[0x1]; + u8 flow_table_modify[0x1]; + u8 encap[0x1]; + u8 decap[0x1]; + u8 reserved_at_9[0x1]; + u8 pop_vlan[0x1]; + u8 push_vlan[0x1]; + u8 reserved_at_c[0x1]; + u8 pop_vlan_2[0x1]; + u8 push_vlan_2[0x1]; + u8 reserved_at_f[0x11]; + + u8 reserved_at_20[0x2]; + u8 log_max_ft_size[0x6]; + u8 log_max_modify_header_context[0x8]; + u8 max_modify_header_actions[0x8]; + u8 max_ft_level[0x8]; + + u8 reserved_at_40[0x20]; + + u8 reserved_at_60[0x18]; + u8 log_max_ft_num[0x8]; + + u8 reserved_at_80[0x10]; + u8 log_max_flow_counter[0x8]; + u8 log_max_destination[0x8]; + + u8 reserved_at_a0[0x18]; + u8 log_max_flow[0x8]; + + u8 reserved_at_c0[0x40]; + + struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support; + + struct mlx5_ifc_flow_table_fields_supported_bits ft_field_bitmask_support; +}; + +struct mlx5_ifc_odp_per_transport_service_cap_bits { + u8 send[0x1]; + u8 receive[0x1]; + u8 write[0x1]; + u8 read[0x1]; + u8 atomic[0x1]; + u8 srq_receive[0x1]; + u8 reserved_at_6[0x1a]; +}; + +struct mlx5_ifc_fte_match_set_lyr_2_4_bits { + u8 smac_47_16[0x20]; + + u8 smac_15_0[0x10]; + u8 ethertype[0x10]; + + u8 dmac_47_16[0x20]; + + u8 dmac_15_0[0x10]; + u8 first_prio[0x3]; + u8 first_cfi[0x1]; + u8 first_vid[0xc]; + + u8 ip_protocol[0x8]; + u8 ip_dscp[0x6]; + u8 ip_ecn[0x2]; + u8 cvlan_tag[0x1]; + u8 svlan_tag[0x1]; + u8 frag[0x1]; + u8 ip_version[0x4]; + u8 tcp_flags[0x9]; + + u8 tcp_sport[0x10]; + u8 tcp_dport[0x10]; + + u8 reserved_at_c0[0x18]; + u8 ttl_hoplimit[0x8]; + + u8 udp_sport[0x10]; + u8 udp_dport[0x10]; + + union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6; + + union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6; +}; + +struct mlx5_ifc_fte_match_set_misc_bits { + u8 reserved_at_0[0x8]; + u8 source_sqn[0x18]; + + u8 source_eswitch_owner_vhca_id[0x10]; + u8 source_port[0x10]; + + u8 outer_second_prio[0x3]; + u8 outer_second_cfi[0x1]; + u8 outer_second_vid[0xc]; + u8 inner_second_prio[0x3]; + u8 inner_second_cfi[0x1]; + u8 inner_second_vid[0xc]; + + u8 outer_second_cvlan_tag[0x1]; + u8 inner_second_cvlan_tag[0x1]; + u8 outer_second_svlan_tag[0x1]; + u8 inner_second_svlan_tag[0x1]; + u8 reserved_at_64[0xc]; + u8 gre_protocol[0x10]; + + u8 gre_key_h[0x18]; + u8 gre_key_l[0x8]; + + u8 vxlan_vni[0x18]; + u8 reserved_at_b8[0x8]; + + u8 reserved_at_c0[0x20]; + + u8 reserved_at_e0[0xc]; + u8 outer_ipv6_flow_label[0x14]; + + u8 reserved_at_100[0xc]; + u8 inner_ipv6_flow_label[0x14]; + + u8 reserved_at_120[0x28]; + u8 bth_dst_qp[0x18]; + u8 reserved_at_160[0x20]; + u8 outer_esp_spi[0x20]; + u8 reserved_at_1a0[0x60]; +}; + +struct mlx5_ifc_fte_match_mpls_bits { + u8 mpls_label[0x14]; + u8 mpls_exp[0x3]; + u8 mpls_s_bos[0x1]; + u8 mpls_ttl[0x8]; +}; + +struct mlx5_ifc_fte_match_set_misc2_bits { + struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls; + + struct mlx5_ifc_fte_match_mpls_bits inner_first_mpls; + + struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_gre; + + struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_udp; + + u8 reserved_at_80[0x100]; + + u8 metadata_reg_a[0x20]; + + u8 reserved_at_1a0[0x60]; +}; + +struct mlx5_ifc_cmd_pas_bits { + u8 pa_h[0x20]; + + u8 pa_l[0x14]; + u8 reserved_at_34[0xc]; +}; + +struct mlx5_ifc_uint64_bits { + u8 hi[0x20]; + + u8 lo[0x20]; +}; + +enum { + MLX5_ADS_STAT_RATE_NO_LIMIT = 0x0, + MLX5_ADS_STAT_RATE_2_5GBPS = 0x7, + MLX5_ADS_STAT_RATE_10GBPS = 0x8, + MLX5_ADS_STAT_RATE_30GBPS = 0x9, + MLX5_ADS_STAT_RATE_5GBPS = 0xa, + MLX5_ADS_STAT_RATE_20GBPS = 0xb, + MLX5_ADS_STAT_RATE_40GBPS = 0xc, + MLX5_ADS_STAT_RATE_60GBPS = 0xd, + MLX5_ADS_STAT_RATE_80GBPS = 0xe, + MLX5_ADS_STAT_RATE_120GBPS = 0xf, +}; + +struct mlx5_ifc_ads_bits { + u8 fl[0x1]; + u8 free_ar[0x1]; + u8 reserved_at_2[0xe]; + u8 pkey_index[0x10]; + + u8 reserved_at_20[0x8]; + u8 grh[0x1]; + u8 mlid[0x7]; + u8 rlid[0x10]; + + u8 ack_timeout[0x5]; + u8 reserved_at_45[0x3]; + u8 src_addr_index[0x8]; + u8 reserved_at_50[0x4]; + u8 stat_rate[0x4]; + u8 hop_limit[0x8]; + + u8 reserved_at_60[0x4]; + u8 tclass[0x8]; + u8 flow_label[0x14]; + + u8 rgid_rip[16][0x8]; + + u8 reserved_at_100[0x4]; + u8 f_dscp[0x1]; + u8 f_ecn[0x1]; + u8 reserved_at_106[0x1]; + u8 f_eth_prio[0x1]; + u8 ecn[0x2]; + u8 dscp[0x6]; + u8 udp_sport[0x10]; + + u8 dei_cfi[0x1]; + u8 eth_prio[0x3]; + u8 sl[0x4]; + u8 vhca_port_num[0x8]; + u8 rmac_47_32[0x10]; + + u8 rmac_31_0[0x20]; +}; + +struct mlx5_ifc_flow_table_nic_cap_bits { + u8 nic_rx_multi_path_tirs[0x1]; + u8 nic_rx_multi_path_tirs_fts[0x1]; + u8 allow_sniffer_and_nic_rx_shared_tir[0x1]; + u8 reserved_at_3[0x1fd]; + + struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive; + + u8 reserved_at_400[0x200]; + + struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer; + + struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit; + + u8 reserved_at_a00[0x200]; + + struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer; + + u8 reserved_at_e00[0x7200]; +}; + +struct mlx5_ifc_flow_table_eswitch_cap_bits { + u8 reserved_at_0[0x1c]; + u8 fdb_multi_path_to_table[0x1]; + u8 reserved_at_1d[0x1e3]; + + struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb; + + struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_ingress; + + struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress; + + u8 reserved_at_800[0x7800]; +}; + +struct mlx5_ifc_e_switch_cap_bits { + u8 vport_svlan_strip[0x1]; + u8 vport_cvlan_strip[0x1]; + u8 vport_svlan_insert[0x1]; + u8 vport_cvlan_insert_if_not_exist[0x1]; + u8 vport_cvlan_insert_overwrite[0x1]; + u8 reserved_at_5[0x18]; + u8 merged_eswitch[0x1]; + u8 nic_vport_node_guid_modify[0x1]; + u8 nic_vport_port_guid_modify[0x1]; + + u8 vxlan_encap_decap[0x1]; + u8 nvgre_encap_decap[0x1]; + u8 reserved_at_22[0x9]; + u8 log_max_encap_headers[0x5]; + u8 reserved_2b[0x6]; + u8 max_encap_header_size[0xa]; + + u8 reserved_40[0x7c0]; + +}; + +struct mlx5_ifc_qos_cap_bits { + u8 packet_pacing[0x1]; + u8 esw_scheduling[0x1]; + u8 esw_bw_share[0x1]; + u8 esw_rate_limit[0x1]; + u8 reserved_at_4[0x1]; + u8 packet_pacing_burst_bound[0x1]; + u8 packet_pacing_typical_size[0x1]; + u8 reserved_at_7[0x19]; + + u8 reserved_at_20[0x20]; + + u8 packet_pacing_max_rate[0x20]; + + u8 packet_pacing_min_rate[0x20]; + + u8 reserved_at_80[0x10]; + u8 packet_pacing_rate_table_size[0x10]; + + u8 esw_element_type[0x10]; + u8 esw_tsar_type[0x10]; + + u8 reserved_at_c0[0x10]; + u8 max_qos_para_vport[0x10]; + + u8 max_tsar_bw_share[0x20]; + + u8 reserved_at_100[0x700]; +}; + +struct mlx5_ifc_debug_cap_bits { + u8 reserved_at_0[0x20]; + + u8 reserved_at_20[0x2]; + u8 stall_detect[0x1]; + u8 reserved_at_23[0x1d]; + + u8 reserved_at_40[0x7c0]; +}; + +struct mlx5_ifc_per_protocol_networking_offload_caps_bits { + u8 csum_cap[0x1]; + u8 vlan_cap[0x1]; + u8 lro_cap[0x1]; + u8 lro_psh_flag[0x1]; + u8 lro_time_stamp[0x1]; + u8 reserved_at_5[0x2]; + u8 wqe_vlan_insert[0x1]; + u8 self_lb_en_modifiable[0x1]; + u8 reserved_at_9[0x2]; + u8 max_lso_cap[0x5]; + u8 multi_pkt_send_wqe[0x2]; + u8 wqe_inline_mode[0x2]; + u8 rss_ind_tbl_cap[0x4]; + u8 reg_umr_sq[0x1]; + u8 scatter_fcs[0x1]; + u8 enhanced_multi_pkt_send_wqe[0x1]; + u8 tunnel_lso_const_out_ip_id[0x1]; + u8 reserved_at_1c[0x2]; + u8 tunnel_stateless_gre[0x1]; + u8 tunnel_stateless_vxlan[0x1]; + + u8 swp[0x1]; + u8 swp_csum[0x1]; + u8 swp_lso[0x1]; + u8 cqe_checksum_full[0x1]; + u8 tunnel_stateless_geneve_tx[0x1]; + u8 tunnel_stateless_mpls_over_udp[0x1]; + u8 tunnel_stateless_mpls_over_gre[0x1]; + u8 tunnel_stateless_vxlan_gpe[0x1]; + u8 tunnel_stateless_ipv4_over_vxlan[0x1]; + u8 tunnel_stateless_ip_over_ip[0x1]; + u8 reserved_at_2a[0x6]; + u8 max_vxlan_udp_ports[0x8]; + u8 reserved_at_38[0x6]; + u8 max_geneve_opt_len[0x1]; + u8 tunnel_stateless_geneve_rx[0x1]; + + u8 reserved_at_40[0x10]; + u8 lro_min_mss_size[0x10]; + + u8 reserved_at_60[0x120]; + + u8 lro_timer_supported_periods[4][0x20]; + + u8 reserved_at_200[0x600]; +}; + +struct mlx5_ifc_roce_cap_bits { + u8 roce_apm[0x1]; + u8 reserved_at_1[0x1f]; + + u8 reserved_at_20[0x60]; + + u8 reserved_at_80[0xc]; + u8 l3_type[0x4]; + u8 reserved_at_90[0x8]; + u8 roce_version[0x8]; + + u8 reserved_at_a0[0x10]; + u8 r_roce_dest_udp_port[0x10]; + + u8 r_roce_max_src_udp_port[0x10]; + u8 r_roce_min_src_udp_port[0x10]; + + u8 reserved_at_e0[0x10]; + u8 roce_address_table_size[0x10]; + + u8 reserved_at_100[0x700]; +}; + +struct mlx5_ifc_device_mem_cap_bits { + u8 memic[0x1]; + u8 reserved_at_1[0x1f]; + + u8 reserved_at_20[0xb]; + u8 log_min_memic_alloc_size[0x5]; + u8 reserved_at_30[0x8]; + u8 log_max_memic_addr_alignment[0x8]; + + u8 memic_bar_start_addr[0x40]; + + u8 memic_bar_size[0x20]; + + u8 max_memic_size[0x20]; + + u8 reserved_at_c0[0x740]; +}; + +enum { + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_4_BYTES = 0x4, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_8_BYTES = 0x8, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_16_BYTES = 0x10, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_32_BYTES = 0x20, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_64_BYTES = 0x40, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_128_BYTES = 0x80, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_256_BYTES = 0x100, +}; + +enum { + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_1_BYTE = 0x1, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_2_BYTES = 0x2, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_4_BYTES = 0x4, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_8_BYTES = 0x8, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_16_BYTES = 0x10, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_32_BYTES = 0x20, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_64_BYTES = 0x40, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_128_BYTES = 0x80, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_256_BYTES = 0x100, +}; + +struct mlx5_ifc_atomic_caps_bits { + u8 reserved_at_0[0x40]; + + u8 atomic_req_8B_endianness_mode[0x2]; + u8 reserved_at_42[0x4]; + u8 supported_atomic_req_8B_endianness_mode_1[0x1]; + + u8 reserved_at_47[0x19]; + + u8 reserved_at_60[0x20]; + + u8 reserved_at_80[0x10]; + u8 atomic_operations[0x10]; + + u8 reserved_at_a0[0x10]; + u8 atomic_size_qp[0x10]; + + u8 reserved_at_c0[0x10]; + u8 atomic_size_dc[0x10]; + + u8 reserved_at_e0[0x720]; +}; + +struct mlx5_ifc_odp_cap_bits { + u8 reserved_at_0[0x40]; + + u8 sig[0x1]; + u8 reserved_at_41[0x1f]; + + u8 reserved_at_60[0x20]; + + struct mlx5_ifc_odp_per_transport_service_cap_bits rc_odp_caps; + + struct mlx5_ifc_odp_per_transport_service_cap_bits uc_odp_caps; + + struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps; + + u8 reserved_at_e0[0x720]; +}; + +struct mlx5_ifc_calc_op { + u8 reserved_at_0[0x10]; + u8 reserved_at_10[0x9]; + u8 op_swap_endianness[0x1]; + u8 op_min[0x1]; + u8 op_xor[0x1]; + u8 op_or[0x1]; + u8 op_and[0x1]; + u8 op_max[0x1]; + u8 op_add[0x1]; +}; + +struct mlx5_ifc_vector_calc_cap_bits { + u8 calc_matrix[0x1]; + u8 reserved_at_1[0x1f]; + u8 reserved_at_20[0x8]; + u8 max_vec_count[0x8]; + u8 reserved_at_30[0xd]; + u8 max_chunk_size[0x3]; + struct mlx5_ifc_calc_op calc0; + struct mlx5_ifc_calc_op calc1; + struct mlx5_ifc_calc_op calc2; + struct mlx5_ifc_calc_op calc3; + + u8 reserved_at_e0[0x720]; +}; + +enum { + MLX5_WQ_TYPE_LINKED_LIST = 0x0, + MLX5_WQ_TYPE_CYCLIC = 0x1, + MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ = 0x2, + MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ = 0x3, +}; + +enum { + MLX5_WQ_END_PAD_MODE_NONE = 0x0, + MLX5_WQ_END_PAD_MODE_ALIGN = 0x1, +}; + +enum { + MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_8_GID_ENTRIES = 0x0, + MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_16_GID_ENTRIES = 0x1, + MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_32_GID_ENTRIES = 0x2, + MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_64_GID_ENTRIES = 0x3, + MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_128_GID_ENTRIES = 0x4, +}; + +enum { + MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_128_ENTRIES = 0x0, + MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_256_ENTRIES = 0x1, + MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_512_ENTRIES = 0x2, + MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_1K_ENTRIES = 0x3, + MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_2K_ENTRIES = 0x4, + MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_4K_ENTRIES = 0x5, +}; + +enum { + MLX5_CMD_HCA_CAP_PORT_TYPE_IB = 0x0, + MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET = 0x1, +}; + +enum { + MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_DISABLED = 0x0, + MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_INITIAL_STATE = 0x1, + MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_ENABLED = 0x3, +}; + +enum { + MLX5_CAP_PORT_TYPE_IB = 0x0, + MLX5_CAP_PORT_TYPE_ETH = 0x1, +}; + +enum { + MLX5_CAP_UMR_FENCE_STRONG = 0x0, + MLX5_CAP_UMR_FENCE_SMALL = 0x1, + MLX5_CAP_UMR_FENCE_NONE = 0x2, +}; + +struct mlx5_ifc_cmd_hca_cap_bits { + u8 reserved_at_0[0x30]; + u8 vhca_id[0x10]; + + u8 reserved_at_40[0x40]; + + u8 log_max_srq_sz[0x8]; + u8 log_max_qp_sz[0x8]; + u8 reserved_at_90[0xb]; + u8 log_max_qp[0x5]; + + u8 reserved_at_a0[0xb]; + u8 log_max_srq[0x5]; + u8 reserved_at_b0[0x10]; + + u8 reserved_at_c0[0x8]; + u8 log_max_cq_sz[0x8]; + u8 reserved_at_d0[0xb]; + u8 log_max_cq[0x5]; + + u8 log_max_eq_sz[0x8]; + u8 reserved_at_e8[0x2]; + u8 log_max_mkey[0x6]; + u8 reserved_at_f0[0x8]; + u8 dump_fill_mkey[0x1]; + u8 reserved_at_f9[0x3]; + u8 log_max_eq[0x4]; + + u8 max_indirection[0x8]; + u8 fixed_buffer_size[0x1]; + u8 log_max_mrw_sz[0x7]; + u8 force_teardown[0x1]; + u8 reserved_at_111[0x1]; + u8 log_max_bsf_list_size[0x6]; + u8 umr_extended_translation_offset[0x1]; + u8 null_mkey[0x1]; + u8 log_max_klm_list_size[0x6]; + + u8 reserved_at_120[0xa]; + u8 log_max_ra_req_dc[0x6]; + u8 reserved_at_130[0xa]; + u8 log_max_ra_res_dc[0x6]; + + u8 reserved_at_140[0xa]; + u8 log_max_ra_req_qp[0x6]; + u8 reserved_at_150[0xa]; + u8 log_max_ra_res_qp[0x6]; + + u8 end_pad[0x1]; + u8 cc_query_allowed[0x1]; + u8 cc_modify_allowed[0x1]; + u8 start_pad[0x1]; + u8 cache_line_128byte[0x1]; + u8 reserved_at_165[0xa]; + u8 qcam_reg[0x1]; + u8 gid_table_size[0x10]; + + u8 out_of_seq_cnt[0x1]; + u8 vport_counters[0x1]; + u8 retransmission_q_counters[0x1]; + u8 debug[0x1]; + u8 modify_rq_counter_set_id[0x1]; + u8 rq_delay_drop[0x1]; + u8 max_qp_cnt[0xa]; + u8 pkey_table_size[0x10]; + + u8 vport_group_manager[0x1]; + u8 vhca_group_manager[0x1]; + u8 ib_virt[0x1]; + u8 eth_virt[0x1]; + u8 vnic_env_queue_counters[0x1]; + u8 ets[0x1]; + u8 nic_flow_table[0x1]; + u8 eswitch_manager[0x1]; + u8 device_memory[0x1]; + u8 mcam_reg[0x1]; + u8 pcam_reg[0x1]; + u8 local_ca_ack_delay[0x5]; + u8 port_module_event[0x1]; + u8 enhanced_error_q_counters[0x1]; + u8 ports_check[0x1]; + u8 reserved_at_1b3[0x1]; + u8 disable_link_up[0x1]; + u8 beacon_led[0x1]; + u8 port_type[0x2]; + u8 num_ports[0x8]; + + u8 reserved_at_1c0[0x1]; + u8 pps[0x1]; + u8 pps_modify[0x1]; + u8 log_max_msg[0x5]; + u8 reserved_at_1c8[0x4]; + u8 max_tc[0x4]; + u8 temp_warn_event[0x1]; + u8 dcbx[0x1]; + u8 general_notification_event[0x1]; + u8 reserved_at_1d3[0x2]; + u8 fpga[0x1]; + u8 rol_s[0x1]; + u8 rol_g[0x1]; + u8 reserved_at_1d8[0x1]; + u8 wol_s[0x1]; + u8 wol_g[0x1]; + u8 wol_a[0x1]; + u8 wol_b[0x1]; + u8 wol_m[0x1]; + u8 wol_u[0x1]; + u8 wol_p[0x1]; + + u8 stat_rate_support[0x10]; + u8 reserved_at_1f0[0xc]; + u8 cqe_version[0x4]; + + u8 compact_address_vector[0x1]; + u8 striding_rq[0x1]; + u8 reserved_at_202[0x1]; + u8 ipoib_enhanced_offloads[0x1]; + u8 ipoib_basic_offloads[0x1]; + u8 reserved_at_205[0x1]; + u8 repeated_block_disabled[0x1]; + u8 umr_modify_entity_size_disabled[0x1]; + u8 umr_modify_atomic_disabled[0x1]; + u8 umr_indirect_mkey_disabled[0x1]; + u8 umr_fence[0x2]; + u8 reserved_at_20c[0x3]; + u8 drain_sigerr[0x1]; + u8 cmdif_checksum[0x2]; + u8 sigerr_cqe[0x1]; + u8 reserved_at_213[0x1]; + u8 wq_signature[0x1]; + u8 sctr_data_cqe[0x1]; + u8 reserved_at_216[0x1]; + u8 sho[0x1]; + u8 tph[0x1]; + u8 rf[0x1]; + u8 dct[0x1]; + u8 qos[0x1]; + u8 eth_net_offloads[0x1]; + u8 roce[0x1]; + u8 atomic[0x1]; + u8 reserved_at_21f[0x1]; + + u8 cq_oi[0x1]; + u8 cq_resize[0x1]; + u8 cq_moderation[0x1]; + u8 reserved_at_223[0x3]; + u8 cq_eq_remap[0x1]; + u8 pg[0x1]; + u8 block_lb_mc[0x1]; + u8 reserved_at_229[0x1]; + u8 scqe_break_moderation[0x1]; + u8 cq_period_start_from_cqe[0x1]; + u8 cd[0x1]; + u8 reserved_at_22d[0x1]; + u8 apm[0x1]; + u8 vector_calc[0x1]; + u8 umr_ptr_rlky[0x1]; + u8 imaicl[0x1]; + u8 reserved_at_232[0x4]; + u8 qkv[0x1]; + u8 pkv[0x1]; + u8 set_deth_sqpn[0x1]; + u8 reserved_at_239[0x3]; + u8 xrc[0x1]; + u8 ud[0x1]; + u8 uc[0x1]; + u8 rc[0x1]; + + u8 uar_4k[0x1]; + u8 reserved_at_241[0x9]; + u8 uar_sz[0x6]; + u8 reserved_at_250[0x8]; + u8 log_pg_sz[0x8]; + + u8 bf[0x1]; + u8 driver_version[0x1]; + u8 pad_tx_eth_packet[0x1]; + u8 reserved_at_263[0x8]; + u8 log_bf_reg_size[0x5]; + + u8 reserved_at_270[0xb]; + u8 lag_master[0x1]; + u8 num_lag_ports[0x4]; + + u8 reserved_at_280[0x10]; + u8 max_wqe_sz_sq[0x10]; + + u8 reserved_at_2a0[0x10]; + u8 max_wqe_sz_rq[0x10]; + + u8 max_flow_counter_31_16[0x10]; + u8 max_wqe_sz_sq_dc[0x10]; + + u8 reserved_at_2e0[0x7]; + u8 max_qp_mcg[0x19]; + + u8 reserved_at_300[0x18]; + u8 log_max_mcg[0x8]; + + u8 reserved_at_320[0x3]; + u8 log_max_transport_domain[0x5]; + u8 reserved_at_328[0x3]; + u8 log_max_pd[0x5]; + u8 reserved_at_330[0xb]; + u8 log_max_xrcd[0x5]; + + u8 nic_receive_steering_discard[0x1]; + u8 receive_discard_vport_down[0x1]; + u8 transmit_discard_vport_down[0x1]; + u8 reserved_at_343[0x5]; + u8 log_max_flow_counter_bulk[0x8]; + u8 max_flow_counter_15_0[0x10]; + + + u8 reserved_at_360[0x3]; + u8 log_max_rq[0x5]; + u8 reserved_at_368[0x3]; + u8 log_max_sq[0x5]; + u8 reserved_at_370[0x3]; + u8 log_max_tir[0x5]; + u8 reserved_at_378[0x3]; + u8 log_max_tis[0x5]; + + u8 basic_cyclic_rcv_wqe[0x1]; + u8 reserved_at_381[0x2]; + u8 log_max_rmp[0x5]; + u8 reserved_at_388[0x3]; + u8 log_max_rqt[0x5]; + u8 reserved_at_390[0x3]; + u8 log_max_rqt_size[0x5]; + u8 reserved_at_398[0x3]; + u8 log_max_tis_per_sq[0x5]; + + u8 ext_stride_num_range[0x1]; + u8 reserved_at_3a1[0x2]; + u8 log_max_stride_sz_rq[0x5]; + u8 reserved_at_3a8[0x3]; + u8 log_min_stride_sz_rq[0x5]; + u8 reserved_at_3b0[0x3]; + u8 log_max_stride_sz_sq[0x5]; + u8 reserved_at_3b8[0x3]; + u8 log_min_stride_sz_sq[0x5]; + + u8 hairpin[0x1]; + u8 reserved_at_3c1[0x2]; + u8 log_max_hairpin_queues[0x5]; + u8 reserved_at_3c8[0x3]; + u8 log_max_hairpin_wq_data_sz[0x5]; + u8 reserved_at_3d0[0x3]; + u8 log_max_hairpin_num_packets[0x5]; + u8 reserved_at_3d8[0x3]; + u8 log_max_wq_sz[0x5]; + + u8 nic_vport_change_event[0x1]; + u8 disable_local_lb_uc[0x1]; + u8 disable_local_lb_mc[0x1]; + u8 log_min_hairpin_wq_data_sz[0x5]; + u8 reserved_at_3e8[0x3]; + u8 log_max_vlan_list[0x5]; + u8 reserved_at_3f0[0x3]; + u8 log_max_current_mc_list[0x5]; + u8 reserved_at_3f8[0x3]; + u8 log_max_current_uc_list[0x5]; + + u8 general_obj_types[0x40]; + + u8 reserved_at_440[0x20]; + + u8 reserved_at_460[0x10]; + u8 max_num_eqs[0x10]; + + u8 reserved_at_480[0x3]; + u8 log_max_l2_table[0x5]; + u8 reserved_at_488[0x8]; + u8 log_uar_page_sz[0x10]; + + u8 reserved_at_4a0[0x20]; + u8 device_frequency_mhz[0x20]; + u8 device_frequency_khz[0x20]; + + u8 reserved_at_500[0x20]; + u8 num_of_uars_per_page[0x20]; + + u8 flex_parser_protocols[0x20]; + u8 reserved_at_560[0x20]; + + u8 reserved_at_580[0x3c]; + u8 mini_cqe_resp_stride_index[0x1]; + u8 cqe_128_always[0x1]; + u8 cqe_compression_128[0x1]; + u8 cqe_compression[0x1]; + + u8 cqe_compression_timeout[0x10]; + u8 cqe_compression_max_num[0x10]; + + u8 reserved_at_5e0[0x10]; + u8 tag_matching[0x1]; + u8 rndv_offload_rc[0x1]; + u8 rndv_offload_dc[0x1]; + u8 log_tag_matching_list_sz[0x5]; + u8 reserved_at_5f8[0x3]; + u8 log_max_xrq[0x5]; + + u8 affiliate_nic_vport_criteria[0x8]; + u8 native_port_num[0x8]; + u8 num_vhca_ports[0x8]; + u8 reserved_at_618[0x6]; + u8 sw_owner_id[0x1]; + u8 reserved_at_61f[0x1e1]; +}; + +enum mlx5_flow_destination_type { + MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0, + MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1, + MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2, + + MLX5_FLOW_DESTINATION_TYPE_PORT = 0x99, + MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100, + MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM = 0x101, +}; + +struct mlx5_ifc_dest_format_struct_bits { + u8 destination_type[0x8]; + u8 destination_id[0x18]; + u8 destination_eswitch_owner_vhca_id_valid[0x1]; + u8 reserved_at_21[0xf]; + u8 destination_eswitch_owner_vhca_id[0x10]; +}; + +struct mlx5_ifc_flow_counter_list_bits { + u8 flow_counter_id[0x20]; + + u8 reserved_at_20[0x20]; +}; + +union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits { + struct mlx5_ifc_dest_format_struct_bits dest_format_struct; + struct mlx5_ifc_flow_counter_list_bits flow_counter_list; + u8 reserved_at_0[0x40]; +}; + +struct mlx5_ifc_fte_match_param_bits { + struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers; + + struct mlx5_ifc_fte_match_set_misc_bits misc_parameters; + + struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers; + + struct mlx5_ifc_fte_match_set_misc2_bits misc_parameters_2; + + u8 reserved_at_800[0x800]; +}; + +enum { + MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP = 0x0, + MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP = 0x1, + MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT = 0x2, + MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT = 0x3, + MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI = 0x4, +}; + +struct mlx5_ifc_rx_hash_field_select_bits { + u8 l3_prot_type[0x1]; + u8 l4_prot_type[0x1]; + u8 selected_fields[0x1e]; +}; + +enum { + MLX5_WQ_WQ_TYPE_WQ_LINKED_LIST = 0x0, + MLX5_WQ_WQ_TYPE_WQ_CYCLIC = 0x1, +}; + +enum { + MLX5_WQ_END_PADDING_MODE_END_PAD_NONE = 0x0, + MLX5_WQ_END_PADDING_MODE_END_PAD_ALIGN = 0x1, +}; + +struct mlx5_ifc_wq_bits { + u8 wq_type[0x4]; + u8 wq_signature[0x1]; + u8 end_padding_mode[0x2]; + u8 cd_slave[0x1]; + u8 reserved_at_8[0x18]; + + u8 hds_skip_first_sge[0x1]; + u8 log2_hds_buf_size[0x3]; + u8 reserved_at_24[0x7]; + u8 page_offset[0x5]; + u8 lwm[0x10]; + + u8 reserved_at_40[0x8]; + u8 pd[0x18]; + + u8 reserved_at_60[0x8]; + u8 uar_page[0x18]; + + u8 dbr_addr[0x40]; + + u8 hw_counter[0x20]; + + u8 sw_counter[0x20]; + + u8 reserved_at_100[0xc]; + u8 log_wq_stride[0x4]; + u8 reserved_at_110[0x3]; + u8 log_wq_pg_sz[0x5]; + u8 reserved_at_118[0x3]; + u8 log_wq_sz[0x5]; + + u8 reserved_at_120[0x3]; + u8 log_hairpin_num_packets[0x5]; + u8 reserved_at_128[0x3]; + u8 log_hairpin_data_sz[0x5]; + + u8 reserved_at_130[0x4]; + u8 log_wqe_num_of_strides[0x4]; + u8 two_byte_shift_en[0x1]; + u8 reserved_at_139[0x4]; + u8 log_wqe_stride_size[0x3]; + + u8 reserved_at_140[0x4c0]; + + struct mlx5_ifc_cmd_pas_bits pas[0]; +}; + +struct mlx5_ifc_rq_num_bits { + u8 reserved_at_0[0x8]; + u8 rq_num[0x18]; +}; + +struct mlx5_ifc_mac_address_layout_bits { + u8 reserved_at_0[0x10]; + u8 mac_addr_47_32[0x10]; + + u8 mac_addr_31_0[0x20]; +}; + +struct mlx5_ifc_vlan_layout_bits { + u8 reserved_at_0[0x14]; + u8 vlan[0x0c]; + + u8 reserved_at_20[0x20]; +}; + +struct mlx5_ifc_cong_control_r_roce_ecn_np_bits { + u8 reserved_at_0[0xa0]; + + u8 min_time_between_cnps[0x20]; + + u8 reserved_at_c0[0x12]; + u8 cnp_dscp[0x6]; + u8 reserved_at_d8[0x4]; + u8 cnp_prio_mode[0x1]; + u8 cnp_802p_prio[0x3]; + + u8 reserved_at_e0[0x720]; +}; + +struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits { + u8 reserved_at_0[0x60]; + + u8 reserved_at_60[0x4]; + u8 clamp_tgt_rate[0x1]; + u8 reserved_at_65[0x3]; + u8 clamp_tgt_rate_after_time_inc[0x1]; + u8 reserved_at_69[0x17]; + + u8 reserved_at_80[0x20]; + + u8 rpg_time_reset[0x20]; + + u8 rpg_byte_reset[0x20]; + + u8 rpg_threshold[0x20]; + + u8 rpg_max_rate[0x20]; + + u8 rpg_ai_rate[0x20]; + + u8 rpg_hai_rate[0x20]; + + u8 rpg_gd[0x20]; + + u8 rpg_min_dec_fac[0x20]; + + u8 rpg_min_rate[0x20]; + + u8 reserved_at_1c0[0xe0]; + + u8 rate_to_set_on_first_cnp[0x20]; + + u8 dce_tcp_g[0x20]; + + u8 dce_tcp_rtt[0x20]; + + u8 rate_reduce_monitor_period[0x20]; + + u8 reserved_at_320[0x20]; + + u8 initial_alpha_value[0x20]; + + u8 reserved_at_360[0x4a0]; +}; + +struct mlx5_ifc_cong_control_802_1qau_rp_bits { + u8 reserved_at_0[0x80]; + + u8 rppp_max_rps[0x20]; + + u8 rpg_time_reset[0x20]; + + u8 rpg_byte_reset[0x20]; + + u8 rpg_threshold[0x20]; + + u8 rpg_max_rate[0x20]; + + u8 rpg_ai_rate[0x20]; + + u8 rpg_hai_rate[0x20]; + + u8 rpg_gd[0x20]; + + u8 rpg_min_dec_fac[0x20]; + + u8 rpg_min_rate[0x20]; + + u8 reserved_at_1c0[0x640]; +}; + +enum { + MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_CQ_SIZE = 0x1, + MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_PAGE_OFFSET = 0x2, + MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_PAGE_SIZE = 0x4, +}; + +struct mlx5_ifc_resize_field_select_bits { + u8 resize_field_select[0x20]; +}; + +enum { + MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_PERIOD = 0x1, + MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_MAX_COUNT = 0x2, + MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_OI = 0x4, + MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_C_EQN = 0x8, +}; + +struct mlx5_ifc_modify_field_select_bits { + u8 modify_field_select[0x20]; +}; + +struct mlx5_ifc_field_select_r_roce_np_bits { + u8 field_select_r_roce_np[0x20]; +}; + +struct mlx5_ifc_field_select_r_roce_rp_bits { + u8 field_select_r_roce_rp[0x20]; +}; + +enum { + MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPPP_MAX_RPS = 0x4, + MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_TIME_RESET = 0x8, + MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_BYTE_RESET = 0x10, + MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_THRESHOLD = 0x20, + MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MAX_RATE = 0x40, + MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_AI_RATE = 0x80, + MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_HAI_RATE = 0x100, + MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_GD = 0x200, + MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_DEC_FAC = 0x400, + MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_RATE = 0x800, +}; + +struct mlx5_ifc_field_select_802_1qau_rp_bits { + u8 field_select_8021qaurp[0x20]; +}; + +struct mlx5_ifc_phys_layer_cntrs_bits { + u8 time_since_last_clear_high[0x20]; + + u8 time_since_last_clear_low[0x20]; + + u8 symbol_errors_high[0x20]; + + u8 symbol_errors_low[0x20]; + + u8 sync_headers_errors_high[0x20]; + + u8 sync_headers_errors_low[0x20]; + + u8 edpl_bip_errors_lane0_high[0x20]; + + u8 edpl_bip_errors_lane0_low[0x20]; + + u8 edpl_bip_errors_lane1_high[0x20]; + + u8 edpl_bip_errors_lane1_low[0x20]; + + u8 edpl_bip_errors_lane2_high[0x20]; + + u8 edpl_bip_errors_lane2_low[0x20]; + + u8 edpl_bip_errors_lane3_high[0x20]; + + u8 edpl_bip_errors_lane3_low[0x20]; + + u8 fc_fec_corrected_blocks_lane0_high[0x20]; + + u8 fc_fec_corrected_blocks_lane0_low[0x20]; + + u8 fc_fec_corrected_blocks_lane1_high[0x20]; + + u8 fc_fec_corrected_blocks_lane1_low[0x20]; + + u8 fc_fec_corrected_blocks_lane2_high[0x20]; + + u8 fc_fec_corrected_blocks_lane2_low[0x20]; + + u8 fc_fec_corrected_blocks_lane3_high[0x20]; + + u8 fc_fec_corrected_blocks_lane3_low[0x20]; + + u8 fc_fec_uncorrectable_blocks_lane0_high[0x20]; + + u8 fc_fec_uncorrectable_blocks_lane0_low[0x20]; + + u8 fc_fec_uncorrectable_blocks_lane1_high[0x20]; + + u8 fc_fec_uncorrectable_blocks_lane1_low[0x20]; + + u8 fc_fec_uncorrectable_blocks_lane2_high[0x20]; + + u8 fc_fec_uncorrectable_blocks_lane2_low[0x20]; + + u8 fc_fec_uncorrectable_blocks_lane3_high[0x20]; + + u8 fc_fec_uncorrectable_blocks_lane3_low[0x20]; + + u8 rs_fec_corrected_blocks_high[0x20]; + + u8 rs_fec_corrected_blocks_low[0x20]; + + u8 rs_fec_uncorrectable_blocks_high[0x20]; + + u8 rs_fec_uncorrectable_blocks_low[0x20]; + + u8 rs_fec_no_errors_blocks_high[0x20]; + + u8 rs_fec_no_errors_blocks_low[0x20]; + + u8 rs_fec_single_error_blocks_high[0x20]; + + u8 rs_fec_single_error_blocks_low[0x20]; + + u8 rs_fec_corrected_symbols_total_high[0x20]; + + u8 rs_fec_corrected_symbols_total_low[0x20]; + + u8 rs_fec_corrected_symbols_lane0_high[0x20]; + + u8 rs_fec_corrected_symbols_lane0_low[0x20]; + + u8 rs_fec_corrected_symbols_lane1_high[0x20]; + + u8 rs_fec_corrected_symbols_lane1_low[0x20]; + + u8 rs_fec_corrected_symbols_lane2_high[0x20]; + + u8 rs_fec_corrected_symbols_lane2_low[0x20]; + + u8 rs_fec_corrected_symbols_lane3_high[0x20]; + + u8 rs_fec_corrected_symbols_lane3_low[0x20]; + + u8 link_down_events[0x20]; + + u8 successful_recovery_events[0x20]; + + u8 reserved_at_640[0x180]; +}; + +struct mlx5_ifc_phys_layer_statistical_cntrs_bits { + u8 time_since_last_clear_high[0x20]; + + u8 time_since_last_clear_low[0x20]; + + u8 phy_received_bits_high[0x20]; + + u8 phy_received_bits_low[0x20]; + + u8 phy_symbol_errors_high[0x20]; + + u8 phy_symbol_errors_low[0x20]; + + u8 phy_corrected_bits_high[0x20]; + + u8 phy_corrected_bits_low[0x20]; + + u8 phy_corrected_bits_lane0_high[0x20]; + + u8 phy_corrected_bits_lane0_low[0x20]; + + u8 phy_corrected_bits_lane1_high[0x20]; + + u8 phy_corrected_bits_lane1_low[0x20]; + + u8 phy_corrected_bits_lane2_high[0x20]; + + u8 phy_corrected_bits_lane2_low[0x20]; + + u8 phy_corrected_bits_lane3_high[0x20]; + + u8 phy_corrected_bits_lane3_low[0x20]; + + u8 reserved_at_200[0x5c0]; +}; + +struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits { + u8 symbol_error_counter[0x10]; + + u8 link_error_recovery_counter[0x8]; + + u8 link_downed_counter[0x8]; + + u8 port_rcv_errors[0x10]; + + u8 port_rcv_remote_physical_errors[0x10]; + + u8 port_rcv_switch_relay_errors[0x10]; + + u8 port_xmit_discards[0x10]; + + u8 port_xmit_constraint_errors[0x8]; + + u8 port_rcv_constraint_errors[0x8]; + + u8 reserved_at_70[0x8]; + + u8 link_overrun_errors[0x8]; + + u8 reserved_at_80[0x10]; + + u8 vl_15_dropped[0x10]; + + u8 reserved_at_a0[0x80]; + + u8 port_xmit_wait[0x20]; +}; + +struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits { + u8 transmit_queue_high[0x20]; + + u8 transmit_queue_low[0x20]; + + u8 reserved_at_40[0x780]; +}; + +struct mlx5_ifc_eth_per_prio_grp_data_layout_bits { + u8 rx_octets_high[0x20]; + + u8 rx_octets_low[0x20]; + + u8 reserved_at_40[0xc0]; + + u8 rx_frames_high[0x20]; + + u8 rx_frames_low[0x20]; + + u8 tx_octets_high[0x20]; + + u8 tx_octets_low[0x20]; + + u8 reserved_at_180[0xc0]; + + u8 tx_frames_high[0x20]; + + u8 tx_frames_low[0x20]; + + u8 rx_pause_high[0x20]; + + u8 rx_pause_low[0x20]; + + u8 rx_pause_duration_high[0x20]; + + u8 rx_pause_duration_low[0x20]; + + u8 tx_pause_high[0x20]; + + u8 tx_pause_low[0x20]; + + u8 tx_pause_duration_high[0x20]; + + u8 tx_pause_duration_low[0x20]; + + u8 rx_pause_transition_high[0x20]; + + u8 rx_pause_transition_low[0x20]; + + u8 reserved_at_3c0[0x40]; + + u8 device_stall_minor_watermark_cnt_high[0x20]; + + u8 device_stall_minor_watermark_cnt_low[0x20]; + + u8 device_stall_critical_watermark_cnt_high[0x20]; + + u8 device_stall_critical_watermark_cnt_low[0x20]; + + u8 reserved_at_480[0x340]; +}; + +struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits { + u8 port_transmit_wait_high[0x20]; + + u8 port_transmit_wait_low[0x20]; + + u8 reserved_at_40[0x100]; + + u8 rx_buffer_almost_full_high[0x20]; + + u8 rx_buffer_almost_full_low[0x20]; + + u8 rx_buffer_full_high[0x20]; + + u8 rx_buffer_full_low[0x20]; + + u8 rx_icrc_encapsulated_high[0x20]; + + u8 rx_icrc_encapsulated_low[0x20]; + + u8 reserved_at_200[0x5c0]; +}; + +struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits { + u8 dot3stats_alignment_errors_high[0x20]; + + u8 dot3stats_alignment_errors_low[0x20]; + + u8 dot3stats_fcs_errors_high[0x20]; + + u8 dot3stats_fcs_errors_low[0x20]; + + u8 dot3stats_single_collision_frames_high[0x20]; + + u8 dot3stats_single_collision_frames_low[0x20]; + + u8 dot3stats_multiple_collision_frames_high[0x20]; + + u8 dot3stats_multiple_collision_frames_low[0x20]; + + u8 dot3stats_sqe_test_errors_high[0x20]; + + u8 dot3stats_sqe_test_errors_low[0x20]; + + u8 dot3stats_deferred_transmissions_high[0x20]; + + u8 dot3stats_deferred_transmissions_low[0x20]; + + u8 dot3stats_late_collisions_high[0x20]; + + u8 dot3stats_late_collisions_low[0x20]; + + u8 dot3stats_excessive_collisions_high[0x20]; + + u8 dot3stats_excessive_collisions_low[0x20]; + + u8 dot3stats_internal_mac_transmit_errors_high[0x20]; + + u8 dot3stats_internal_mac_transmit_errors_low[0x20]; + + u8 dot3stats_carrier_sense_errors_high[0x20]; + + u8 dot3stats_carrier_sense_errors_low[0x20]; + + u8 dot3stats_frame_too_longs_high[0x20]; + + u8 dot3stats_frame_too_longs_low[0x20]; + + u8 dot3stats_internal_mac_receive_errors_high[0x20]; + + u8 dot3stats_internal_mac_receive_errors_low[0x20]; + + u8 dot3stats_symbol_errors_high[0x20]; + + u8 dot3stats_symbol_errors_low[0x20]; + + u8 dot3control_in_unknown_opcodes_high[0x20]; + + u8 dot3control_in_unknown_opcodes_low[0x20]; + + u8 dot3in_pause_frames_high[0x20]; + + u8 dot3in_pause_frames_low[0x20]; + + u8 dot3out_pause_frames_high[0x20]; + + u8 dot3out_pause_frames_low[0x20]; + + u8 reserved_at_400[0x3c0]; +}; + +struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits { + u8 ether_stats_drop_events_high[0x20]; + + u8 ether_stats_drop_events_low[0x20]; + + u8 ether_stats_octets_high[0x20]; + + u8 ether_stats_octets_low[0x20]; + + u8 ether_stats_pkts_high[0x20]; + + u8 ether_stats_pkts_low[0x20]; + + u8 ether_stats_broadcast_pkts_high[0x20]; + + u8 ether_stats_broadcast_pkts_low[0x20]; + + u8 ether_stats_multicast_pkts_high[0x20]; + + u8 ether_stats_multicast_pkts_low[0x20]; + + u8 ether_stats_crc_align_errors_high[0x20]; + + u8 ether_stats_crc_align_errors_low[0x20]; + + u8 ether_stats_undersize_pkts_high[0x20]; + + u8 ether_stats_undersize_pkts_low[0x20]; + + u8 ether_stats_oversize_pkts_high[0x20]; + + u8 ether_stats_oversize_pkts_low[0x20]; + + u8 ether_stats_fragments_high[0x20]; + + u8 ether_stats_fragments_low[0x20]; + + u8 ether_stats_jabbers_high[0x20]; + + u8 ether_stats_jabbers_low[0x20]; + + u8 ether_stats_collisions_high[0x20]; + + u8 ether_stats_collisions_low[0x20]; + + u8 ether_stats_pkts64octets_high[0x20]; + + u8 ether_stats_pkts64octets_low[0x20]; + + u8 ether_stats_pkts65to127octets_high[0x20]; + + u8 ether_stats_pkts65to127octets_low[0x20]; + + u8 ether_stats_pkts128to255octets_high[0x20]; + + u8 ether_stats_pkts128to255octets_low[0x20]; + + u8 ether_stats_pkts256to511octets_high[0x20]; + + u8 ether_stats_pkts256to511octets_low[0x20]; + + u8 ether_stats_pkts512to1023octets_high[0x20]; + + u8 ether_stats_pkts512to1023octets_low[0x20]; + + u8 ether_stats_pkts1024to1518octets_high[0x20]; + + u8 ether_stats_pkts1024to1518octets_low[0x20]; + + u8 ether_stats_pkts1519to2047octets_high[0x20]; + + u8 ether_stats_pkts1519to2047octets_low[0x20]; + + u8 ether_stats_pkts2048to4095octets_high[0x20]; + + u8 ether_stats_pkts2048to4095octets_low[0x20]; + + u8 ether_stats_pkts4096to8191octets_high[0x20]; + + u8 ether_stats_pkts4096to8191octets_low[0x20]; + + u8 ether_stats_pkts8192to10239octets_high[0x20]; + + u8 ether_stats_pkts8192to10239octets_low[0x20]; + + u8 reserved_at_540[0x280]; +}; + +struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits { + u8 if_in_octets_high[0x20]; + + u8 if_in_octets_low[0x20]; + + u8 if_in_ucast_pkts_high[0x20]; + + u8 if_in_ucast_pkts_low[0x20]; + + u8 if_in_discards_high[0x20]; + + u8 if_in_discards_low[0x20]; + + u8 if_in_errors_high[0x20]; + + u8 if_in_errors_low[0x20]; + + u8 if_in_unknown_protos_high[0x20]; + + u8 if_in_unknown_protos_low[0x20]; + + u8 if_out_octets_high[0x20]; + + u8 if_out_octets_low[0x20]; + + u8 if_out_ucast_pkts_high[0x20]; + + u8 if_out_ucast_pkts_low[0x20]; + + u8 if_out_discards_high[0x20]; + + u8 if_out_discards_low[0x20]; + + u8 if_out_errors_high[0x20]; + + u8 if_out_errors_low[0x20]; + + u8 if_in_multicast_pkts_high[0x20]; + + u8 if_in_multicast_pkts_low[0x20]; + + u8 if_in_broadcast_pkts_high[0x20]; + + u8 if_in_broadcast_pkts_low[0x20]; + + u8 if_out_multicast_pkts_high[0x20]; + + u8 if_out_multicast_pkts_low[0x20]; + + u8 if_out_broadcast_pkts_high[0x20]; + + u8 if_out_broadcast_pkts_low[0x20]; + + u8 reserved_at_340[0x480]; +}; + +struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits { + u8 a_frames_transmitted_ok_high[0x20]; + + u8 a_frames_transmitted_ok_low[0x20]; + + u8 a_frames_received_ok_high[0x20]; + + u8 a_frames_received_ok_low[0x20]; + + u8 a_frame_check_sequence_errors_high[0x20]; + + u8 a_frame_check_sequence_errors_low[0x20]; + + u8 a_alignment_errors_high[0x20]; + + u8 a_alignment_errors_low[0x20]; + + u8 a_octets_transmitted_ok_high[0x20]; + + u8 a_octets_transmitted_ok_low[0x20]; + + u8 a_octets_received_ok_high[0x20]; + + u8 a_octets_received_ok_low[0x20]; + + u8 a_multicast_frames_xmitted_ok_high[0x20]; + + u8 a_multicast_frames_xmitted_ok_low[0x20]; + + u8 a_broadcast_frames_xmitted_ok_high[0x20]; + + u8 a_broadcast_frames_xmitted_ok_low[0x20]; + + u8 a_multicast_frames_received_ok_high[0x20]; + + u8 a_multicast_frames_received_ok_low[0x20]; + + u8 a_broadcast_frames_received_ok_high[0x20]; + + u8 a_broadcast_frames_received_ok_low[0x20]; + + u8 a_in_range_length_errors_high[0x20]; + + u8 a_in_range_length_errors_low[0x20]; + + u8 a_out_of_range_length_field_high[0x20]; + + u8 a_out_of_range_length_field_low[0x20]; + + u8 a_frame_too_long_errors_high[0x20]; + + u8 a_frame_too_long_errors_low[0x20]; + + u8 a_symbol_error_during_carrier_high[0x20]; + + u8 a_symbol_error_during_carrier_low[0x20]; + + u8 a_mac_control_frames_transmitted_high[0x20]; + + u8 a_mac_control_frames_transmitted_low[0x20]; + + u8 a_mac_control_frames_received_high[0x20]; + + u8 a_mac_control_frames_received_low[0x20]; + + u8 a_unsupported_opcodes_received_high[0x20]; + + u8 a_unsupported_opcodes_received_low[0x20]; + + u8 a_pause_mac_ctrl_frames_received_high[0x20]; + + u8 a_pause_mac_ctrl_frames_received_low[0x20]; + + u8 a_pause_mac_ctrl_frames_transmitted_high[0x20]; + + u8 a_pause_mac_ctrl_frames_transmitted_low[0x20]; + + u8 reserved_at_4c0[0x300]; +}; + +struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits { + u8 life_time_counter_high[0x20]; + + u8 life_time_counter_low[0x20]; + + u8 rx_errors[0x20]; + + u8 tx_errors[0x20]; + + u8 l0_to_recovery_eieos[0x20]; + + u8 l0_to_recovery_ts[0x20]; + + u8 l0_to_recovery_framing[0x20]; + + u8 l0_to_recovery_retrain[0x20]; + + u8 crc_error_dllp[0x20]; + + u8 crc_error_tlp[0x20]; + + u8 tx_overflow_buffer_pkt_high[0x20]; + + u8 tx_overflow_buffer_pkt_low[0x20]; + + u8 outbound_stalled_reads[0x20]; + + u8 outbound_stalled_writes[0x20]; + + u8 outbound_stalled_reads_events[0x20]; + + u8 outbound_stalled_writes_events[0x20]; + + u8 reserved_at_200[0x5c0]; +}; + +struct mlx5_ifc_cmd_inter_comp_event_bits { + u8 command_completion_vector[0x20]; + + u8 reserved_at_20[0xc0]; +}; + +struct mlx5_ifc_stall_vl_event_bits { + u8 reserved_at_0[0x18]; + u8 port_num[0x1]; + u8 reserved_at_19[0x3]; + u8 vl[0x4]; + + u8 reserved_at_20[0xa0]; +}; + +struct mlx5_ifc_db_bf_congestion_event_bits { + u8 event_subtype[0x8]; + u8 reserved_at_8[0x8]; + u8 congestion_level[0x8]; + u8 reserved_at_18[0x8]; + + u8 reserved_at_20[0xa0]; +}; + +struct mlx5_ifc_gpio_event_bits { + u8 reserved_at_0[0x60]; + + u8 gpio_event_hi[0x20]; + + u8 gpio_event_lo[0x20]; + + u8 reserved_at_a0[0x40]; +}; + +struct mlx5_ifc_port_state_change_event_bits { + u8 reserved_at_0[0x40]; + + u8 port_num[0x4]; + u8 reserved_at_44[0x1c]; + + u8 reserved_at_60[0x80]; +}; + +struct mlx5_ifc_dropped_packet_logged_bits { + u8 reserved_at_0[0xe0]; +}; + +enum { + MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN = 0x1, + MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR = 0x2, +}; + +struct mlx5_ifc_cq_error_bits { + u8 reserved_at_0[0x8]; + u8 cqn[0x18]; + + u8 reserved_at_20[0x20]; + + u8 reserved_at_40[0x18]; + u8 syndrome[0x8]; + + u8 reserved_at_60[0x80]; +}; + +struct mlx5_ifc_rdma_page_fault_event_bits { + u8 bytes_committed[0x20]; + + u8 r_key[0x20]; + + u8 reserved_at_40[0x10]; + u8 packet_len[0x10]; + + u8 rdma_op_len[0x20]; + + u8 rdma_va[0x40]; + + u8 reserved_at_c0[0x5]; + u8 rdma[0x1]; + u8 write[0x1]; + u8 requestor[0x1]; + u8 qp_number[0x18]; +}; + +struct mlx5_ifc_wqe_associated_page_fault_event_bits { + u8 bytes_committed[0x20]; + + u8 reserved_at_20[0x10]; + u8 wqe_index[0x10]; + + u8 reserved_at_40[0x10]; + u8 len[0x10]; + + u8 reserved_at_60[0x60]; + + u8 reserved_at_c0[0x5]; + u8 rdma[0x1]; + u8 write_read[0x1]; + u8 requestor[0x1]; + u8 qpn[0x18]; +}; + +struct mlx5_ifc_qp_events_bits { + u8 reserved_at_0[0xa0]; + + u8 type[0x8]; + u8 reserved_at_a8[0x18]; + + u8 reserved_at_c0[0x8]; + u8 qpn_rqn_sqn[0x18]; +}; + +struct mlx5_ifc_dct_events_bits { + u8 reserved_at_0[0xc0]; + + u8 reserved_at_c0[0x8]; + u8 dct_number[0x18]; +}; + +struct mlx5_ifc_comp_event_bits { + u8 reserved_at_0[0xc0]; + + u8 reserved_at_c0[0x8]; + u8 cq_number[0x18]; +}; + +enum { + MLX5_QPC_STATE_RST = 0x0, + MLX5_QPC_STATE_INIT = 0x1, + MLX5_QPC_STATE_RTR = 0x2, + MLX5_QPC_STATE_RTS = 0x3, + MLX5_QPC_STATE_SQER = 0x4, + MLX5_QPC_STATE_ERR = 0x6, + MLX5_QPC_STATE_SQD = 0x7, + MLX5_QPC_STATE_SUSPENDED = 0x9, +}; + +enum { + MLX5_QPC_ST_RC = 0x0, + MLX5_QPC_ST_UC = 0x1, + MLX5_QPC_ST_UD = 0x2, + MLX5_QPC_ST_XRC = 0x3, + MLX5_QPC_ST_DCI = 0x5, + MLX5_QPC_ST_QP0 = 0x7, + MLX5_QPC_ST_QP1 = 0x8, + MLX5_QPC_ST_RAW_DATAGRAM = 0x9, + MLX5_QPC_ST_REG_UMR = 0xc, +}; + +enum { + MLX5_QPC_PM_STATE_ARMED = 0x0, + MLX5_QPC_PM_STATE_REARM = 0x1, + MLX5_QPC_PM_STATE_RESERVED = 0x2, + MLX5_QPC_PM_STATE_MIGRATED = 0x3, +}; + +enum { + MLX5_QPC_OFFLOAD_TYPE_RNDV = 0x1, +}; + +enum { + MLX5_QPC_END_PADDING_MODE_SCATTER_AS_IS = 0x0, + MLX5_QPC_END_PADDING_MODE_PAD_TO_CACHE_LINE_ALIGNMENT = 0x1, +}; + +enum { + MLX5_QPC_MTU_256_BYTES = 0x1, + MLX5_QPC_MTU_512_BYTES = 0x2, + MLX5_QPC_MTU_1K_BYTES = 0x3, + MLX5_QPC_MTU_2K_BYTES = 0x4, + MLX5_QPC_MTU_4K_BYTES = 0x5, + MLX5_QPC_MTU_RAW_ETHERNET_QP = 0x7, +}; + +enum { + MLX5_QPC_ATOMIC_MODE_IB_SPEC = 0x1, + MLX5_QPC_ATOMIC_MODE_ONLY_8B = 0x2, + MLX5_QPC_ATOMIC_MODE_UP_TO_8B = 0x3, + MLX5_QPC_ATOMIC_MODE_UP_TO_16B = 0x4, + MLX5_QPC_ATOMIC_MODE_UP_TO_32B = 0x5, + MLX5_QPC_ATOMIC_MODE_UP_TO_64B = 0x6, + MLX5_QPC_ATOMIC_MODE_UP_TO_128B = 0x7, + MLX5_QPC_ATOMIC_MODE_UP_TO_256B = 0x8, +}; + +enum { + MLX5_QPC_CS_REQ_DISABLE = 0x0, + MLX5_QPC_CS_REQ_UP_TO_32B = 0x11, + MLX5_QPC_CS_REQ_UP_TO_64B = 0x22, +}; + +enum { + MLX5_QPC_CS_RES_DISABLE = 0x0, + MLX5_QPC_CS_RES_UP_TO_32B = 0x1, + MLX5_QPC_CS_RES_UP_TO_64B = 0x2, +}; + +struct mlx5_ifc_qpc_bits { + u8 state[0x4]; + u8 lag_tx_port_affinity[0x4]; + u8 st[0x8]; + u8 reserved_at_10[0x3]; + u8 pm_state[0x2]; + u8 reserved_at_15[0x3]; + u8 offload_type[0x4]; + u8 end_padding_mode[0x2]; + u8 reserved_at_1e[0x2]; + + u8 wq_signature[0x1]; + u8 block_lb_mc[0x1]; + u8 atomic_like_write_en[0x1]; + u8 latency_sensitive[0x1]; + u8 reserved_at_24[0x1]; + u8 drain_sigerr[0x1]; + u8 reserved_at_26[0x2]; + u8 pd[0x18]; + + u8 mtu[0x3]; + u8 log_msg_max[0x5]; + u8 reserved_at_48[0x1]; + u8 log_rq_size[0x4]; + u8 log_rq_stride[0x3]; + u8 no_sq[0x1]; + u8 log_sq_size[0x4]; + u8 reserved_at_55[0x6]; + u8 rlky[0x1]; + u8 ulp_stateless_offload_mode[0x4]; + + u8 counter_set_id[0x8]; + u8 uar_page[0x18]; + + u8 reserved_at_80[0x8]; + u8 user_index[0x18]; + + u8 reserved_at_a0[0x3]; + u8 log_page_size[0x5]; + u8 remote_qpn[0x18]; + + struct mlx5_ifc_ads_bits primary_address_path; + + struct mlx5_ifc_ads_bits secondary_address_path; + + u8 log_ack_req_freq[0x4]; + u8 reserved_at_384[0x4]; + u8 log_sra_max[0x3]; + u8 reserved_at_38b[0x2]; + u8 retry_count[0x3]; + u8 rnr_retry[0x3]; + u8 reserved_at_393[0x1]; + u8 fre[0x1]; + u8 cur_rnr_retry[0x3]; + u8 cur_retry_count[0x3]; + u8 reserved_at_39b[0x5]; + + u8 reserved_at_3a0[0x20]; + + u8 reserved_at_3c0[0x8]; + u8 next_send_psn[0x18]; + + u8 reserved_at_3e0[0x8]; + u8 cqn_snd[0x18]; + + u8 reserved_at_400[0x8]; + u8 deth_sqpn[0x18]; + + u8 reserved_at_420[0x20]; + + u8 reserved_at_440[0x8]; + u8 last_acked_psn[0x18]; + + u8 reserved_at_460[0x8]; + u8 ssn[0x18]; + + u8 reserved_at_480[0x8]; + u8 log_rra_max[0x3]; + u8 reserved_at_48b[0x1]; + u8 atomic_mode[0x4]; + u8 rre[0x1]; + u8 rwe[0x1]; + u8 rae[0x1]; + u8 reserved_at_493[0x1]; + u8 page_offset[0x6]; + u8 reserved_at_49a[0x3]; + u8 cd_slave_receive[0x1]; + u8 cd_slave_send[0x1]; + u8 cd_master[0x1]; + + u8 reserved_at_4a0[0x3]; + u8 min_rnr_nak[0x5]; + u8 next_rcv_psn[0x18]; + + u8 reserved_at_4c0[0x8]; + u8 xrcd[0x18]; + + u8 reserved_at_4e0[0x8]; + u8 cqn_rcv[0x18]; + + u8 dbr_addr[0x40]; + + u8 q_key[0x20]; + + u8 reserved_at_560[0x5]; + u8 rq_type[0x3]; + u8 srqn_rmpn_xrqn[0x18]; + + u8 reserved_at_580[0x8]; + u8 rmsn[0x18]; + + u8 hw_sq_wqebb_counter[0x10]; + u8 sw_sq_wqebb_counter[0x10]; + + u8 hw_rq_counter[0x20]; + + u8 sw_rq_counter[0x20]; + + u8 reserved_at_600[0x20]; + + u8 reserved_at_620[0xf]; + u8 cgs[0x1]; + u8 cs_req[0x8]; + u8 cs_res[0x8]; + + u8 dc_access_key[0x40]; + + u8 reserved_at_680[0xc0]; +}; + +struct mlx5_ifc_roce_addr_layout_bits { + u8 source_l3_address[16][0x8]; + + u8 reserved_at_80[0x3]; + u8 vlan_valid[0x1]; + u8 vlan_id[0xc]; + u8 source_mac_47_32[0x10]; + + u8 source_mac_31_0[0x20]; + + u8 reserved_at_c0[0x14]; + u8 roce_l3_type[0x4]; + u8 roce_version[0x8]; + + u8 reserved_at_e0[0x20]; +}; + +union mlx5_ifc_hca_cap_union_bits { + struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap; + struct mlx5_ifc_odp_cap_bits odp_cap; + struct mlx5_ifc_atomic_caps_bits atomic_caps; + struct mlx5_ifc_roce_cap_bits roce_cap; + struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps; + struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap; + struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap; + struct mlx5_ifc_e_switch_cap_bits e_switch_cap; + struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap; + struct mlx5_ifc_qos_cap_bits qos_cap; + struct mlx5_ifc_fpga_cap_bits fpga_cap; + u8 reserved_at_0[0x8000]; +}; + +enum { + MLX5_FLOW_CONTEXT_ACTION_ALLOW = 0x1, + MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4, + MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8, + MLX5_FLOW_CONTEXT_ACTION_ENCAP = 0x10, + MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20, + MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40, + MLX5_FLOW_CONTEXT_ACTION_VLAN_POP = 0x80, + MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100, + MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 = 0x400, + MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800, +}; + +struct mlx5_ifc_vlan_bits { + u8 ethtype[0x10]; + u8 prio[0x3]; + u8 cfi[0x1]; + u8 vid[0xc]; +}; + +struct mlx5_ifc_flow_context_bits { + struct mlx5_ifc_vlan_bits push_vlan; + + u8 group_id[0x20]; + + u8 reserved_at_40[0x8]; + u8 flow_tag[0x18]; + + u8 reserved_at_60[0x10]; + u8 action[0x10]; + + u8 reserved_at_80[0x8]; + u8 destination_list_size[0x18]; + + u8 reserved_at_a0[0x8]; + u8 flow_counter_list_size[0x18]; + + u8 encap_id[0x20]; + + u8 modify_header_id[0x20]; + + struct mlx5_ifc_vlan_bits push_vlan_2; + + u8 reserved_at_120[0xe0]; + + struct mlx5_ifc_fte_match_param_bits match_value; + + u8 reserved_at_1200[0x600]; + + union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[0]; +}; + +enum { + MLX5_XRC_SRQC_STATE_GOOD = 0x0, + MLX5_XRC_SRQC_STATE_ERROR = 0x1, +}; + +struct mlx5_ifc_xrc_srqc_bits { + u8 state[0x4]; + u8 log_xrc_srq_size[0x4]; + u8 reserved_at_8[0x18]; + + u8 wq_signature[0x1]; + u8 cont_srq[0x1]; + u8 reserved_at_22[0x1]; + u8 rlky[0x1]; + u8 basic_cyclic_rcv_wqe[0x1]; + u8 log_rq_stride[0x3]; + u8 xrcd[0x18]; + + u8 page_offset[0x6]; + u8 reserved_at_46[0x2]; + u8 cqn[0x18]; + + u8 reserved_at_60[0x20]; + + u8 user_index_equal_xrc_srqn[0x1]; + u8 reserved_at_81[0x1]; + u8 log_page_size[0x6]; + u8 user_index[0x18]; + + u8 reserved_at_a0[0x20]; + + u8 reserved_at_c0[0x8]; + u8 pd[0x18]; + + u8 lwm[0x10]; + u8 wqe_cnt[0x10]; + + u8 reserved_at_100[0x40]; + + u8 db_record_addr_h[0x20]; + + u8 db_record_addr_l[0x1e]; + u8 reserved_at_17e[0x2]; + + u8 reserved_at_180[0x80]; +}; + +struct mlx5_ifc_vnic_diagnostic_statistics_bits { + u8 counter_error_queues[0x20]; + + u8 total_error_queues[0x20]; + + u8 send_queue_priority_update_flow[0x20]; + + u8 reserved_at_60[0x20]; + + u8 nic_receive_steering_discard[0x40]; + + u8 receive_discard_vport_down[0x40]; + + u8 transmit_discard_vport_down[0x40]; + + u8 reserved_at_140[0xec0]; +}; + +struct mlx5_ifc_traffic_counter_bits { + u8 packets[0x40]; + + u8 octets[0x40]; +}; + +struct mlx5_ifc_tisc_bits { + u8 strict_lag_tx_port_affinity[0x1]; + u8 reserved_at_1[0x3]; + u8 lag_tx_port_affinity[0x04]; + + u8 reserved_at_8[0x4]; + u8 prio[0x4]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x100]; + + u8 reserved_at_120[0x8]; + u8 transport_domain[0x18]; + + u8 reserved_at_140[0x8]; + u8 underlay_qpn[0x18]; + u8 reserved_at_160[0x3a0]; +}; + +enum { + MLX5_TIRC_DISP_TYPE_DIRECT = 0x0, + MLX5_TIRC_DISP_TYPE_INDIRECT = 0x1, +}; + +enum { + MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO = 0x1, + MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO = 0x2, +}; + +enum { + MLX5_RX_HASH_FN_NONE = 0x0, + MLX5_RX_HASH_FN_INVERTED_XOR8 = 0x1, + MLX5_RX_HASH_FN_TOEPLITZ = 0x2, +}; + +enum { + MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_ = 0x1, + MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST_ = 0x2, +}; + +struct mlx5_ifc_tirc_bits { + u8 reserved_at_0[0x20]; + + u8 disp_type[0x4]; + u8 reserved_at_24[0x1c]; + + u8 reserved_at_40[0x40]; + + u8 reserved_at_80[0x4]; + u8 lro_timeout_period_usecs[0x10]; + u8 lro_enable_mask[0x4]; + u8 lro_max_ip_payload_size[0x8]; + + u8 reserved_at_a0[0x40]; + + u8 reserved_at_e0[0x8]; + u8 inline_rqn[0x18]; + + u8 rx_hash_symmetric[0x1]; + u8 reserved_at_101[0x1]; + u8 tunneled_offload_en[0x1]; + u8 reserved_at_103[0x5]; + u8 indirect_table[0x18]; + + u8 rx_hash_fn[0x4]; + u8 reserved_at_124[0x2]; + u8 self_lb_block[0x2]; + u8 transport_domain[0x18]; + + u8 rx_hash_toeplitz_key[10][0x20]; + + struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_outer; + + struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner; + + u8 reserved_at_2c0[0x4c0]; +}; + +enum { + MLX5_SRQC_STATE_GOOD = 0x0, + MLX5_SRQC_STATE_ERROR = 0x1, +}; + +struct mlx5_ifc_srqc_bits { + u8 state[0x4]; + u8 log_srq_size[0x4]; + u8 reserved_at_8[0x18]; + + u8 wq_signature[0x1]; + u8 cont_srq[0x1]; + u8 reserved_at_22[0x1]; + u8 rlky[0x1]; + u8 reserved_at_24[0x1]; + u8 log_rq_stride[0x3]; + u8 xrcd[0x18]; + + u8 page_offset[0x6]; + u8 reserved_at_46[0x2]; + u8 cqn[0x18]; + + u8 reserved_at_60[0x20]; + + u8 reserved_at_80[0x2]; + u8 log_page_size[0x6]; + u8 reserved_at_88[0x18]; + + u8 reserved_at_a0[0x20]; + + u8 reserved_at_c0[0x8]; + u8 pd[0x18]; + + u8 lwm[0x10]; + u8 wqe_cnt[0x10]; + + u8 reserved_at_100[0x40]; + + u8 dbr_addr[0x40]; + + u8 reserved_at_180[0x80]; +}; + +enum { + MLX5_SQC_STATE_RST = 0x0, + MLX5_SQC_STATE_RDY = 0x1, + MLX5_SQC_STATE_ERR = 0x3, +}; + +struct mlx5_ifc_sqc_bits { + u8 rlky[0x1]; + u8 cd_master[0x1]; + u8 fre[0x1]; + u8 flush_in_error_en[0x1]; + u8 allow_multi_pkt_send_wqe[0x1]; + u8 min_wqe_inline_mode[0x3]; + u8 state[0x4]; + u8 reg_umr[0x1]; + u8 allow_swp[0x1]; + u8 hairpin[0x1]; + u8 reserved_at_f[0x11]; + + u8 reserved_at_20[0x8]; + u8 user_index[0x18]; + + u8 reserved_at_40[0x8]; + u8 cqn[0x18]; + + u8 reserved_at_60[0x8]; + u8 hairpin_peer_rq[0x18]; + + u8 reserved_at_80[0x10]; + u8 hairpin_peer_vhca[0x10]; + + u8 reserved_at_a0[0x50]; + + u8 packet_pacing_rate_limit_index[0x10]; + u8 tis_lst_sz[0x10]; + u8 reserved_at_110[0x10]; + + u8 reserved_at_120[0x40]; + + u8 reserved_at_160[0x8]; + u8 tis_num_0[0x18]; + + struct mlx5_ifc_wq_bits wq; +}; + +enum { + SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR = 0x0, + SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT = 0x1, + SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC = 0x2, + SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3, +}; + +struct mlx5_ifc_scheduling_context_bits { + u8 element_type[0x8]; + u8 reserved_at_8[0x18]; + + u8 element_attributes[0x20]; + + u8 parent_element_id[0x20]; + + u8 reserved_at_60[0x40]; + + u8 bw_share[0x20]; + + u8 max_average_bw[0x20]; + + u8 reserved_at_e0[0x120]; +}; + +struct mlx5_ifc_rqtc_bits { + u8 reserved_at_0[0xa0]; + + u8 reserved_at_a0[0x10]; + u8 rqt_max_size[0x10]; + + u8 reserved_at_c0[0x10]; + u8 rqt_actual_size[0x10]; + + u8 reserved_at_e0[0x6a0]; + + struct mlx5_ifc_rq_num_bits rq_num[0]; +}; + +enum { + MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE = 0x0, + MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP = 0x1, +}; + +enum { + MLX5_RQC_STATE_RST = 0x0, + MLX5_RQC_STATE_RDY = 0x1, + MLX5_RQC_STATE_ERR = 0x3, +}; + +struct mlx5_ifc_rqc_bits { + u8 rlky[0x1]; + u8 delay_drop_en[0x1]; + u8 scatter_fcs[0x1]; + u8 vsd[0x1]; + u8 mem_rq_type[0x4]; + u8 state[0x4]; + u8 reserved_at_c[0x1]; + u8 flush_in_error_en[0x1]; + u8 hairpin[0x1]; + u8 reserved_at_f[0x11]; + + u8 reserved_at_20[0x8]; + u8 user_index[0x18]; + + u8 reserved_at_40[0x8]; + u8 cqn[0x18]; + + u8 counter_set_id[0x8]; + u8 reserved_at_68[0x18]; + + u8 reserved_at_80[0x8]; + u8 rmpn[0x18]; + + u8 reserved_at_a0[0x8]; + u8 hairpin_peer_sq[0x18]; + + u8 reserved_at_c0[0x10]; + u8 hairpin_peer_vhca[0x10]; + + u8 reserved_at_e0[0xa0]; + + struct mlx5_ifc_wq_bits wq; +}; + +enum { + MLX5_RMPC_STATE_RDY = 0x1, + MLX5_RMPC_STATE_ERR = 0x3, +}; + +struct mlx5_ifc_rmpc_bits { + u8 reserved_at_0[0x8]; + u8 state[0x4]; + u8 reserved_at_c[0x14]; + + u8 basic_cyclic_rcv_wqe[0x1]; + u8 reserved_at_21[0x1f]; + + u8 reserved_at_40[0x140]; + + struct mlx5_ifc_wq_bits wq; +}; + +struct mlx5_ifc_nic_vport_context_bits { + u8 reserved_at_0[0x5]; + u8 min_wqe_inline_mode[0x3]; + u8 reserved_at_8[0x15]; + u8 disable_mc_local_lb[0x1]; + u8 disable_uc_local_lb[0x1]; + u8 roce_en[0x1]; + + u8 arm_change_event[0x1]; + u8 reserved_at_21[0x1a]; + u8 event_on_mtu[0x1]; + u8 event_on_promisc_change[0x1]; + u8 event_on_vlan_change[0x1]; + u8 event_on_mc_address_change[0x1]; + u8 event_on_uc_address_change[0x1]; + + u8 reserved_at_40[0xc]; + + u8 affiliation_criteria[0x4]; + u8 affiliated_vhca_id[0x10]; + + u8 reserved_at_60[0xd0]; + + u8 mtu[0x10]; + + u8 system_image_guid[0x40]; + u8 port_guid[0x40]; + u8 node_guid[0x40]; + + u8 reserved_at_200[0x140]; + u8 qkey_violation_counter[0x10]; + u8 reserved_at_350[0x430]; + + u8 promisc_uc[0x1]; + u8 promisc_mc[0x1]; + u8 promisc_all[0x1]; + u8 reserved_at_783[0x2]; + u8 allowed_list_type[0x3]; + u8 reserved_at_788[0xc]; + u8 allowed_list_size[0xc]; + + struct mlx5_ifc_mac_address_layout_bits permanent_address; + + u8 reserved_at_7e0[0x20]; + + u8 current_uc_mac_address[0][0x40]; +}; + +enum { + MLX5_MKC_ACCESS_MODE_PA = 0x0, + MLX5_MKC_ACCESS_MODE_MTT = 0x1, + MLX5_MKC_ACCESS_MODE_KLMS = 0x2, + MLX5_MKC_ACCESS_MODE_KSM = 0x3, + MLX5_MKC_ACCESS_MODE_MEMIC = 0x5, +}; + +struct mlx5_ifc_mkc_bits { + u8 reserved_at_0[0x1]; + u8 free[0x1]; + u8 reserved_at_2[0x1]; + u8 access_mode_4_2[0x3]; + u8 reserved_at_6[0x7]; + u8 relaxed_ordering_write[0x1]; + u8 reserved_at_e[0x1]; + u8 small_fence_on_rdma_read_response[0x1]; + u8 umr_en[0x1]; + u8 a[0x1]; + u8 rw[0x1]; + u8 rr[0x1]; + u8 lw[0x1]; + u8 lr[0x1]; + u8 access_mode_1_0[0x2]; + u8 reserved_at_18[0x8]; + + u8 qpn[0x18]; + u8 mkey_7_0[0x8]; + + u8 reserved_at_40[0x20]; + + u8 length64[0x1]; + u8 bsf_en[0x1]; + u8 sync_umr[0x1]; + u8 reserved_at_63[0x2]; + u8 expected_sigerr_count[0x1]; + u8 reserved_at_66[0x1]; + u8 en_rinval[0x1]; + u8 pd[0x18]; + + u8 start_addr[0x40]; + + u8 len[0x40]; + + u8 bsf_octword_size[0x20]; + + u8 reserved_at_120[0x80]; + + u8 translations_octword_size[0x20]; + + u8 reserved_at_1c0[0x1b]; + u8 log_page_size[0x5]; + + u8 reserved_at_1e0[0x20]; +}; + +struct mlx5_ifc_pkey_bits { + u8 reserved_at_0[0x10]; + u8 pkey[0x10]; +}; + +struct mlx5_ifc_array128_auto_bits { + u8 array128_auto[16][0x8]; +}; + +struct mlx5_ifc_hca_vport_context_bits { + u8 field_select[0x20]; + + u8 reserved_at_20[0xe0]; + + u8 sm_virt_aware[0x1]; + u8 has_smi[0x1]; + u8 has_raw[0x1]; + u8 grh_required[0x1]; + u8 reserved_at_104[0xc]; + u8 port_physical_state[0x4]; + u8 vport_state_policy[0x4]; + u8 port_state[0x4]; + u8 vport_state[0x4]; + + u8 reserved_at_120[0x20]; + + u8 system_image_guid[0x40]; + + u8 port_guid[0x40]; + + u8 node_guid[0x40]; + + u8 cap_mask1[0x20]; + + u8 cap_mask1_field_select[0x20]; + + u8 cap_mask2[0x20]; + + u8 cap_mask2_field_select[0x20]; + + u8 reserved_at_280[0x80]; + + u8 lid[0x10]; + u8 reserved_at_310[0x4]; + u8 init_type_reply[0x4]; + u8 lmc[0x3]; + u8 subnet_timeout[0x5]; + + u8 sm_lid[0x10]; + u8 sm_sl[0x4]; + u8 reserved_at_334[0xc]; + + u8 qkey_violation_counter[0x10]; + u8 pkey_violation_counter[0x10]; + + u8 reserved_at_360[0xca0]; +}; + +struct mlx5_ifc_esw_vport_context_bits { + u8 reserved_at_0[0x3]; + u8 vport_svlan_strip[0x1]; + u8 vport_cvlan_strip[0x1]; + u8 vport_svlan_insert[0x1]; + u8 vport_cvlan_insert[0x2]; + u8 reserved_at_8[0x18]; + + u8 reserved_at_20[0x20]; + + u8 svlan_cfi[0x1]; + u8 svlan_pcp[0x3]; + u8 svlan_id[0xc]; + u8 cvlan_cfi[0x1]; + u8 cvlan_pcp[0x3]; + u8 cvlan_id[0xc]; + + u8 reserved_at_60[0x7a0]; +}; + +enum { + MLX5_EQC_STATUS_OK = 0x0, + MLX5_EQC_STATUS_EQ_WRITE_FAILURE = 0xa, +}; + +enum { + MLX5_EQC_ST_ARMED = 0x9, + MLX5_EQC_ST_FIRED = 0xa, +}; + +struct mlx5_ifc_eqc_bits { + u8 status[0x4]; + u8 reserved_at_4[0x9]; + u8 ec[0x1]; + u8 oi[0x1]; + u8 reserved_at_f[0x5]; + u8 st[0x4]; + u8 reserved_at_18[0x8]; + + u8 reserved_at_20[0x20]; + + u8 reserved_at_40[0x14]; + u8 page_offset[0x6]; + u8 reserved_at_5a[0x6]; + + u8 reserved_at_60[0x3]; + u8 log_eq_size[0x5]; + u8 uar_page[0x18]; + + u8 reserved_at_80[0x20]; + + u8 reserved_at_a0[0x18]; + u8 intr[0x8]; + + u8 reserved_at_c0[0x3]; + u8 log_page_size[0x5]; + u8 reserved_at_c8[0x18]; + + u8 reserved_at_e0[0x60]; + + u8 reserved_at_140[0x8]; + u8 consumer_counter[0x18]; + + u8 reserved_at_160[0x8]; + u8 producer_counter[0x18]; + + u8 reserved_at_180[0x80]; +}; + +enum { + MLX5_DCTC_STATE_ACTIVE = 0x0, + MLX5_DCTC_STATE_DRAINING = 0x1, + MLX5_DCTC_STATE_DRAINED = 0x2, +}; + +enum { + MLX5_DCTC_CS_RES_DISABLE = 0x0, + MLX5_DCTC_CS_RES_NA = 0x1, + MLX5_DCTC_CS_RES_UP_TO_64B = 0x2, +}; + +enum { + MLX5_DCTC_MTU_256_BYTES = 0x1, + MLX5_DCTC_MTU_512_BYTES = 0x2, + MLX5_DCTC_MTU_1K_BYTES = 0x3, + MLX5_DCTC_MTU_2K_BYTES = 0x4, + MLX5_DCTC_MTU_4K_BYTES = 0x5, +}; + +struct mlx5_ifc_dctc_bits { + u8 reserved_at_0[0x4]; + u8 state[0x4]; + u8 reserved_at_8[0x18]; + + u8 reserved_at_20[0x8]; + u8 user_index[0x18]; + + u8 reserved_at_40[0x8]; + u8 cqn[0x18]; + + u8 counter_set_id[0x8]; + u8 atomic_mode[0x4]; + u8 rre[0x1]; + u8 rwe[0x1]; + u8 rae[0x1]; + u8 atomic_like_write_en[0x1]; + u8 latency_sensitive[0x1]; + u8 rlky[0x1]; + u8 free_ar[0x1]; + u8 reserved_at_73[0xd]; + + u8 reserved_at_80[0x8]; + u8 cs_res[0x8]; + u8 reserved_at_90[0x3]; + u8 min_rnr_nak[0x5]; + u8 reserved_at_98[0x8]; + + u8 reserved_at_a0[0x8]; + u8 srqn_xrqn[0x18]; + + u8 reserved_at_c0[0x8]; + u8 pd[0x18]; + + u8 tclass[0x8]; + u8 reserved_at_e8[0x4]; + u8 flow_label[0x14]; + + u8 dc_access_key[0x40]; + + u8 reserved_at_140[0x5]; + u8 mtu[0x3]; + u8 port[0x8]; + u8 pkey_index[0x10]; + + u8 reserved_at_160[0x8]; + u8 my_addr_index[0x8]; + u8 reserved_at_170[0x8]; + u8 hop_limit[0x8]; + + u8 dc_access_key_violation_count[0x20]; + + u8 reserved_at_1a0[0x14]; + u8 dei_cfi[0x1]; + u8 eth_prio[0x3]; + u8 ecn[0x2]; + u8 dscp[0x6]; + + u8 reserved_at_1c0[0x40]; +}; + +enum { + MLX5_CQC_STATUS_OK = 0x0, + MLX5_CQC_STATUS_CQ_OVERFLOW = 0x9, + MLX5_CQC_STATUS_CQ_WRITE_FAIL = 0xa, +}; + +enum { + MLX5_CQC_CQE_SZ_64_BYTES = 0x0, + MLX5_CQC_CQE_SZ_128_BYTES = 0x1, +}; + +enum { + MLX5_CQC_ST_SOLICITED_NOTIFICATION_REQUEST_ARMED = 0x6, + MLX5_CQC_ST_NOTIFICATION_REQUEST_ARMED = 0x9, + MLX5_CQC_ST_FIRED = 0xa, +}; + +enum { + MLX5_CQ_PERIOD_MODE_START_FROM_EQE = 0x0, + MLX5_CQ_PERIOD_MODE_START_FROM_CQE = 0x1, + MLX5_CQ_PERIOD_NUM_MODES +}; + +struct mlx5_ifc_cqc_bits { + u8 status[0x4]; + u8 reserved_at_4[0x4]; + u8 cqe_sz[0x3]; + u8 cc[0x1]; + u8 reserved_at_c[0x1]; + u8 scqe_break_moderation_en[0x1]; + u8 oi[0x1]; + u8 cq_period_mode[0x2]; + u8 cqe_comp_en[0x1]; + u8 mini_cqe_res_format[0x2]; + u8 st[0x4]; + u8 reserved_at_18[0x8]; + + u8 reserved_at_20[0x20]; + + u8 reserved_at_40[0x14]; + u8 page_offset[0x6]; + u8 reserved_at_5a[0x6]; + + u8 reserved_at_60[0x3]; + u8 log_cq_size[0x5]; + u8 uar_page[0x18]; + + u8 reserved_at_80[0x4]; + u8 cq_period[0xc]; + u8 cq_max_count[0x10]; + + u8 reserved_at_a0[0x18]; + u8 c_eqn[0x8]; + + u8 reserved_at_c0[0x3]; + u8 log_page_size[0x5]; + u8 reserved_at_c8[0x18]; + + u8 reserved_at_e0[0x20]; + + u8 reserved_at_100[0x8]; + u8 last_notified_index[0x18]; + + u8 reserved_at_120[0x8]; + u8 last_solicit_index[0x18]; + + u8 reserved_at_140[0x8]; + u8 consumer_counter[0x18]; + + u8 reserved_at_160[0x8]; + u8 producer_counter[0x18]; + + u8 reserved_at_180[0x40]; + + u8 dbr_addr[0x40]; +}; + +union mlx5_ifc_cong_control_roce_ecn_auto_bits { + struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp; + struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp; + struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np; + u8 reserved_at_0[0x800]; +}; + +struct mlx5_ifc_query_adapter_param_block_bits { + u8 reserved_at_0[0xc0]; + + u8 reserved_at_c0[0x8]; + u8 ieee_vendor_id[0x18]; + + u8 reserved_at_e0[0x10]; + u8 vsd_vendor_id[0x10]; + + u8 vsd[208][0x8]; + + u8 vsd_contd_psid[16][0x8]; +}; + +enum { + MLX5_XRQC_STATE_GOOD = 0x0, + MLX5_XRQC_STATE_ERROR = 0x1, +}; + +enum { + MLX5_XRQC_TOPOLOGY_NO_SPECIAL_TOPOLOGY = 0x0, + MLX5_XRQC_TOPOLOGY_TAG_MATCHING = 0x1, +}; + +enum { + MLX5_XRQC_OFFLOAD_RNDV = 0x1, +}; + +struct mlx5_ifc_tag_matching_topology_context_bits { + u8 log_matching_list_sz[0x4]; + u8 reserved_at_4[0xc]; + u8 append_next_index[0x10]; + + u8 sw_phase_cnt[0x10]; + u8 hw_phase_cnt[0x10]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_xrqc_bits { + u8 state[0x4]; + u8 rlkey[0x1]; + u8 reserved_at_5[0xf]; + u8 topology[0x4]; + u8 reserved_at_18[0x4]; + u8 offload[0x4]; + + u8 reserved_at_20[0x8]; + u8 user_index[0x18]; + + u8 reserved_at_40[0x8]; + u8 cqn[0x18]; + + u8 reserved_at_60[0xa0]; + + struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context; + + u8 reserved_at_180[0x280]; + + struct mlx5_ifc_wq_bits wq; +}; + +union mlx5_ifc_modify_field_select_resize_field_select_auto_bits { + struct mlx5_ifc_modify_field_select_bits modify_field_select; + struct mlx5_ifc_resize_field_select_bits resize_field_select; + u8 reserved_at_0[0x20]; +}; + +union mlx5_ifc_field_select_802_1_r_roce_auto_bits { + struct mlx5_ifc_field_select_802_1qau_rp_bits field_select_802_1qau_rp; + struct mlx5_ifc_field_select_r_roce_rp_bits field_select_r_roce_rp; + struct mlx5_ifc_field_select_r_roce_np_bits field_select_r_roce_np; + u8 reserved_at_0[0x20]; +}; + +union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits { + struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout; + struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout; + struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout; + struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout; + struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout; + struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout; + struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout; + struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout; + struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs; + struct mlx5_ifc_phys_layer_statistical_cntrs_bits phys_layer_statistical_cntrs; + u8 reserved_at_0[0x7c0]; +}; + +union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits { + struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits pcie_perf_cntrs_grp_data_layout; + u8 reserved_at_0[0x7c0]; +}; + +union mlx5_ifc_event_auto_bits { + struct mlx5_ifc_comp_event_bits comp_event; + struct mlx5_ifc_dct_events_bits dct_events; + struct mlx5_ifc_qp_events_bits qp_events; + struct mlx5_ifc_wqe_associated_page_fault_event_bits wqe_associated_page_fault_event; + struct mlx5_ifc_rdma_page_fault_event_bits rdma_page_fault_event; + struct mlx5_ifc_cq_error_bits cq_error; + struct mlx5_ifc_dropped_packet_logged_bits dropped_packet_logged; + struct mlx5_ifc_port_state_change_event_bits port_state_change_event; + struct mlx5_ifc_gpio_event_bits gpio_event; + struct mlx5_ifc_db_bf_congestion_event_bits db_bf_congestion_event; + struct mlx5_ifc_stall_vl_event_bits stall_vl_event; + struct mlx5_ifc_cmd_inter_comp_event_bits cmd_inter_comp_event; + u8 reserved_at_0[0xe0]; +}; + +struct mlx5_ifc_health_buffer_bits { + u8 reserved_at_0[0x100]; + + u8 assert_existptr[0x20]; + + u8 assert_callra[0x20]; + + u8 reserved_at_140[0x40]; + + u8 fw_version[0x20]; + + u8 hw_id[0x20]; + + u8 reserved_at_1c0[0x20]; + + u8 irisc_index[0x8]; + u8 synd[0x8]; + u8 ext_synd[0x10]; +}; + +struct mlx5_ifc_register_loopback_control_bits { + u8 no_lb[0x1]; + u8 reserved_at_1[0x7]; + u8 port[0x8]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x60]; +}; + +struct mlx5_ifc_vport_tc_element_bits { + u8 traffic_class[0x4]; + u8 reserved_at_4[0xc]; + u8 vport_number[0x10]; +}; + +struct mlx5_ifc_vport_element_bits { + u8 reserved_at_0[0x10]; + u8 vport_number[0x10]; +}; + +enum { + TSAR_ELEMENT_TSAR_TYPE_DWRR = 0x0, + TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN = 0x1, + TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2, +}; + +struct mlx5_ifc_tsar_element_bits { + u8 reserved_at_0[0x8]; + u8 tsar_type[0x8]; + u8 reserved_at_10[0x10]; +}; + +enum { + MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_SUCCESS = 0x0, + MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL = 0x1, +}; + +struct mlx5_ifc_teardown_hca_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x3f]; + + u8 force_state[0x1]; +}; + +enum { + MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0, + MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE = 0x1, +}; + +struct mlx5_ifc_teardown_hca_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 profile[0x10]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_sqerr2rts_qp_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_sqerr2rts_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 qpn[0x18]; + + u8 reserved_at_60[0x20]; + + u8 opt_param_mask[0x20]; + + u8 reserved_at_a0[0x20]; + + struct mlx5_ifc_qpc_bits qpc; + + u8 reserved_at_800[0x80]; +}; + +struct mlx5_ifc_sqd2rts_qp_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_sqd2rts_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 qpn[0x18]; + + u8 reserved_at_60[0x20]; + + u8 opt_param_mask[0x20]; + + u8 reserved_at_a0[0x20]; + + struct mlx5_ifc_qpc_bits qpc; + + u8 reserved_at_800[0x80]; +}; + +struct mlx5_ifc_set_roce_address_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_set_roce_address_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 roce_address_index[0x10]; + u8 reserved_at_50[0xc]; + u8 vhca_port_num[0x4]; + + u8 reserved_at_60[0x20]; + + struct mlx5_ifc_roce_addr_layout_bits roce_address; +}; + +struct mlx5_ifc_set_mad_demux_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +enum { + MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_PASS_ALL = 0x0, + MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_SELECTIVE = 0x2, +}; + +struct mlx5_ifc_set_mad_demux_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x20]; + + u8 reserved_at_60[0x6]; + u8 demux_mode[0x2]; + u8 reserved_at_68[0x18]; +}; + +struct mlx5_ifc_set_l2_table_entry_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_set_l2_table_entry_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x60]; + + u8 reserved_at_a0[0x8]; + u8 table_index[0x18]; + + u8 reserved_at_c0[0x20]; + + u8 reserved_at_e0[0x13]; + u8 vlan_valid[0x1]; + u8 vlan[0xc]; + + struct mlx5_ifc_mac_address_layout_bits mac_address; + + u8 reserved_at_140[0xc0]; +}; + +struct mlx5_ifc_set_issi_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_set_issi_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 current_issi[0x10]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_set_hca_cap_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_set_hca_cap_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; + + union mlx5_ifc_hca_cap_union_bits capability; +}; + +enum { + MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION = 0x0, + MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_TAG = 0x1, + MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST = 0x2, + MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS = 0x3 +}; + +struct mlx5_ifc_set_fte_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_set_fte_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; + + u8 table_type[0x8]; + u8 reserved_at_88[0x18]; + + u8 reserved_at_a0[0x8]; + u8 table_id[0x18]; + + u8 reserved_at_c0[0x18]; + u8 modify_enable_mask[0x8]; + + u8 reserved_at_e0[0x20]; + + u8 flow_index[0x20]; + + u8 reserved_at_120[0xe0]; + + struct mlx5_ifc_flow_context_bits flow_context; +}; + +struct mlx5_ifc_rts2rts_qp_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_rts2rts_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 qpn[0x18]; + + u8 reserved_at_60[0x20]; + + u8 opt_param_mask[0x20]; + + u8 reserved_at_a0[0x20]; + + struct mlx5_ifc_qpc_bits qpc; + + u8 reserved_at_800[0x80]; +}; + +struct mlx5_ifc_rtr2rts_qp_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_rtr2rts_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 qpn[0x18]; + + u8 reserved_at_60[0x20]; + + u8 opt_param_mask[0x20]; + + u8 reserved_at_a0[0x20]; + + struct mlx5_ifc_qpc_bits qpc; + + u8 reserved_at_800[0x80]; +}; + +struct mlx5_ifc_rst2init_qp_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_rst2init_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 qpn[0x18]; + + u8 reserved_at_60[0x20]; + + u8 opt_param_mask[0x20]; + + u8 reserved_at_a0[0x20]; + + struct mlx5_ifc_qpc_bits qpc; + + u8 reserved_at_800[0x80]; +}; + +struct mlx5_ifc_query_xrq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_xrqc_bits xrq_context; +}; + +struct mlx5_ifc_query_xrq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 xrqn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_query_xrc_srq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry; + + u8 reserved_at_280[0x600]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_query_xrc_srq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 xrc_srqn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +enum { + MLX5_QUERY_VPORT_STATE_OUT_STATE_DOWN = 0x0, + MLX5_QUERY_VPORT_STATE_OUT_STATE_UP = 0x1, +}; + +struct mlx5_ifc_query_vport_state_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x20]; + + u8 reserved_at_60[0x18]; + u8 admin_state[0x4]; + u8 state[0x4]; +}; + +enum { + MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT = 0x0, + MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1, +}; + +struct mlx5_ifc_query_vport_state_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_query_vnic_env_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_vnic_diagnostic_statistics_bits vport_env; +}; + +enum { + MLX5_QUERY_VNIC_ENV_IN_OP_MOD_VPORT_DIAG_STATISTICS = 0x0, +}; + +struct mlx5_ifc_query_vnic_env_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_query_vport_counter_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_traffic_counter_bits received_errors; + + struct mlx5_ifc_traffic_counter_bits transmit_errors; + + struct mlx5_ifc_traffic_counter_bits received_ib_unicast; + + struct mlx5_ifc_traffic_counter_bits transmitted_ib_unicast; + + struct mlx5_ifc_traffic_counter_bits received_ib_multicast; + + struct mlx5_ifc_traffic_counter_bits transmitted_ib_multicast; + + struct mlx5_ifc_traffic_counter_bits received_eth_broadcast; + + struct mlx5_ifc_traffic_counter_bits transmitted_eth_broadcast; + + struct mlx5_ifc_traffic_counter_bits received_eth_unicast; + + struct mlx5_ifc_traffic_counter_bits transmitted_eth_unicast; + + struct mlx5_ifc_traffic_counter_bits received_eth_multicast; + + struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast; + + u8 reserved_at_680[0xa00]; +}; + +enum { + MLX5_QUERY_VPORT_COUNTER_IN_OP_MOD_VPORT_COUNTERS = 0x0, +}; + +struct mlx5_ifc_query_vport_counter_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_at_41[0xb]; + u8 port_num[0x4]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x60]; + + u8 clear[0x1]; + u8 reserved_at_c1[0x1f]; + + u8 reserved_at_e0[0x20]; +}; + +struct mlx5_ifc_query_tis_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_tisc_bits tis_context; +}; + +struct mlx5_ifc_query_tis_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 tisn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_query_tir_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0xc0]; + + struct mlx5_ifc_tirc_bits tir_context; +}; + +struct mlx5_ifc_query_tir_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 tirn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_query_srq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_srqc_bits srq_context_entry; + + u8 reserved_at_280[0x600]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_query_srq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 srqn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_query_sq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0xc0]; + + struct mlx5_ifc_sqc_bits sq_context; +}; + +struct mlx5_ifc_query_sq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 sqn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_query_special_contexts_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 dump_fill_mkey[0x20]; + + u8 resd_lkey[0x20]; + + u8 null_mkey[0x20]; + + u8 reserved_at_a0[0x60]; +}; + +struct mlx5_ifc_query_special_contexts_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_query_scheduling_element_out_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0xc0]; + + struct mlx5_ifc_scheduling_context_bits scheduling_context; + + u8 reserved_at_300[0x100]; +}; + +enum { + SCHEDULING_HIERARCHY_E_SWITCH = 0x2, +}; + +struct mlx5_ifc_query_scheduling_element_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 scheduling_hierarchy[0x8]; + u8 reserved_at_48[0x18]; + + u8 scheduling_element_id[0x20]; + + u8 reserved_at_80[0x180]; +}; + +struct mlx5_ifc_query_rqt_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0xc0]; + + struct mlx5_ifc_rqtc_bits rqt_context; +}; + +struct mlx5_ifc_query_rqt_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 rqtn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_query_rq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0xc0]; + + struct mlx5_ifc_rqc_bits rq_context; +}; + +struct mlx5_ifc_query_rq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 rqn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_query_roce_address_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_roce_addr_layout_bits roce_address; +}; + +struct mlx5_ifc_query_roce_address_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 roce_address_index[0x10]; + u8 reserved_at_50[0xc]; + u8 vhca_port_num[0x4]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_query_rmp_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0xc0]; + + struct mlx5_ifc_rmpc_bits rmp_context; +}; + +struct mlx5_ifc_query_rmp_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 rmpn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_query_qp_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + u8 opt_param_mask[0x20]; + + u8 reserved_at_a0[0x20]; + + struct mlx5_ifc_qpc_bits qpc; + + u8 reserved_at_800[0x80]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_query_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 qpn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_query_q_counter_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + u8 rx_write_requests[0x20]; + + u8 reserved_at_a0[0x20]; + + u8 rx_read_requests[0x20]; + + u8 reserved_at_e0[0x20]; + + u8 rx_atomic_requests[0x20]; + + u8 reserved_at_120[0x20]; + + u8 rx_dct_connect[0x20]; + + u8 reserved_at_160[0x20]; + + u8 out_of_buffer[0x20]; + + u8 reserved_at_1a0[0x20]; + + u8 out_of_sequence[0x20]; + + u8 reserved_at_1e0[0x20]; + + u8 duplicate_request[0x20]; + + u8 reserved_at_220[0x20]; + + u8 rnr_nak_retry_err[0x20]; + + u8 reserved_at_260[0x20]; + + u8 packet_seq_err[0x20]; + + u8 reserved_at_2a0[0x20]; + + u8 implied_nak_seq_err[0x20]; + + u8 reserved_at_2e0[0x20]; + + u8 local_ack_timeout_err[0x20]; + + u8 reserved_at_320[0xa0]; + + u8 resp_local_length_error[0x20]; + + u8 req_local_length_error[0x20]; + + u8 resp_local_qp_error[0x20]; + + u8 local_operation_error[0x20]; + + u8 resp_local_protection[0x20]; + + u8 req_local_protection[0x20]; + + u8 resp_cqe_error[0x20]; + + u8 req_cqe_error[0x20]; + + u8 req_mw_binding[0x20]; + + u8 req_bad_response[0x20]; + + u8 req_remote_invalid_request[0x20]; + + u8 resp_remote_invalid_request[0x20]; + + u8 req_remote_access_errors[0x20]; + + u8 resp_remote_access_errors[0x20]; + + u8 req_remote_operation_errors[0x20]; + + u8 req_transport_retries_exceeded[0x20]; + + u8 cq_overflow[0x20]; + + u8 resp_cqe_flush_error[0x20]; + + u8 req_cqe_flush_error[0x20]; + + u8 reserved_at_620[0x1e0]; +}; + +struct mlx5_ifc_query_q_counter_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x80]; + + u8 clear[0x1]; + u8 reserved_at_c1[0x1f]; + + u8 reserved_at_e0[0x18]; + u8 counter_set_id[0x8]; +}; + +struct mlx5_ifc_query_pages_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x10]; + u8 function_id[0x10]; + + u8 num_pages[0x20]; +}; + +enum { + MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES = 0x1, + MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES = 0x2, + MLX5_QUERY_PAGES_IN_OP_MOD_REGULAR_PAGES = 0x3, +}; + +struct mlx5_ifc_query_pages_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 function_id[0x10]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_query_nic_vport_context_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_nic_vport_context_bits nic_vport_context; +}; + +struct mlx5_ifc_query_nic_vport_context_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x5]; + u8 allowed_list_type[0x3]; + u8 reserved_at_68[0x18]; +}; + +struct mlx5_ifc_query_mkey_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_mkc_bits memory_key_mkey_entry; + + u8 reserved_at_280[0x600]; + + u8 bsf0_klm0_pas_mtt0_1[16][0x8]; + + u8 bsf1_klm1_pas_mtt2_3[16][0x8]; +}; + +struct mlx5_ifc_query_mkey_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 mkey_index[0x18]; + + u8 pg_access[0x1]; + u8 reserved_at_61[0x1f]; +}; + +struct mlx5_ifc_query_mad_demux_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + u8 mad_dumux_parameters_block[0x20]; +}; + +struct mlx5_ifc_query_mad_demux_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_query_l2_table_entry_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0xa0]; + + u8 reserved_at_e0[0x13]; + u8 vlan_valid[0x1]; + u8 vlan[0xc]; + + struct mlx5_ifc_mac_address_layout_bits mac_address; + + u8 reserved_at_140[0xc0]; +}; + +struct mlx5_ifc_query_l2_table_entry_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x60]; + + u8 reserved_at_a0[0x8]; + u8 table_index[0x18]; + + u8 reserved_at_c0[0x140]; +}; + +struct mlx5_ifc_query_issi_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x10]; + u8 current_issi[0x10]; + + u8 reserved_at_60[0xa0]; + + u8 reserved_at_100[76][0x8]; + u8 supported_issi_dw0[0x20]; +}; + +struct mlx5_ifc_query_issi_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_set_driver_version_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_set_driver_version_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + u8 driver_version[64][0x8]; +}; + +struct mlx5_ifc_query_hca_vport_pkey_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_pkey_bits pkey[0]; +}; + +struct mlx5_ifc_query_hca_vport_pkey_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_at_41[0xb]; + u8 port_num[0x4]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x10]; + u8 pkey_index[0x10]; +}; + +enum { + MLX5_HCA_VPORT_SEL_PORT_GUID = 1 << 0, + MLX5_HCA_VPORT_SEL_NODE_GUID = 1 << 1, + MLX5_HCA_VPORT_SEL_STATE_POLICY = 1 << 2, +}; + +struct mlx5_ifc_query_hca_vport_gid_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x20]; + + u8 gids_num[0x10]; + u8 reserved_at_70[0x10]; + + struct mlx5_ifc_array128_auto_bits gid[0]; +}; + +struct mlx5_ifc_query_hca_vport_gid_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_at_41[0xb]; + u8 port_num[0x4]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x10]; + u8 gid_index[0x10]; +}; + +struct mlx5_ifc_query_hca_vport_context_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_hca_vport_context_bits hca_vport_context; +}; + +struct mlx5_ifc_query_hca_vport_context_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_at_41[0xb]; + u8 port_num[0x4]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_query_hca_cap_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + union mlx5_ifc_hca_cap_union_bits capability; +}; + +struct mlx5_ifc_query_hca_cap_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_query_flow_table_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x80]; + + u8 reserved_at_c0[0x8]; + u8 level[0x8]; + u8 reserved_at_d0[0x8]; + u8 log_size[0x8]; + + u8 reserved_at_e0[0x120]; +}; + +struct mlx5_ifc_query_flow_table_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; + + u8 table_type[0x8]; + u8 reserved_at_88[0x18]; + + u8 reserved_at_a0[0x8]; + u8 table_id[0x18]; + + u8 reserved_at_c0[0x140]; +}; + +struct mlx5_ifc_query_fte_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x1c0]; + + struct mlx5_ifc_flow_context_bits flow_context; +}; + +struct mlx5_ifc_query_fte_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; + + u8 table_type[0x8]; + u8 reserved_at_88[0x18]; + + u8 reserved_at_a0[0x8]; + u8 table_id[0x18]; + + u8 reserved_at_c0[0x40]; + + u8 flow_index[0x20]; + + u8 reserved_at_120[0xe0]; +}; + +enum { + MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, + MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, + MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2, + MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0X3, +}; + +struct mlx5_ifc_query_flow_group_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0xa0]; + + u8 start_flow_index[0x20]; + + u8 reserved_at_100[0x20]; + + u8 end_flow_index[0x20]; + + u8 reserved_at_140[0xa0]; + + u8 reserved_at_1e0[0x18]; + u8 match_criteria_enable[0x8]; + + struct mlx5_ifc_fte_match_param_bits match_criteria; + + u8 reserved_at_1200[0xe00]; +}; + +struct mlx5_ifc_query_flow_group_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; + + u8 table_type[0x8]; + u8 reserved_at_88[0x18]; + + u8 reserved_at_a0[0x8]; + u8 table_id[0x18]; + + u8 group_id[0x20]; + + u8 reserved_at_e0[0x120]; +}; + +struct mlx5_ifc_query_flow_counter_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_traffic_counter_bits flow_statistics[0]; +}; + +struct mlx5_ifc_query_flow_counter_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x80]; + + u8 clear[0x1]; + u8 reserved_at_c1[0xf]; + u8 num_of_counters[0x10]; + + u8 flow_counter_id[0x20]; +}; + +struct mlx5_ifc_query_esw_vport_context_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_esw_vport_context_bits esw_vport_context; +}; + +struct mlx5_ifc_query_esw_vport_context_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_modify_esw_vport_context_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_esw_vport_context_fields_select_bits { + u8 reserved_at_0[0x1c]; + u8 vport_cvlan_insert[0x1]; + u8 vport_svlan_insert[0x1]; + u8 vport_cvlan_strip[0x1]; + u8 vport_svlan_strip[0x1]; +}; + +struct mlx5_ifc_modify_esw_vport_context_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + struct mlx5_ifc_esw_vport_context_fields_select_bits field_select; + + struct mlx5_ifc_esw_vport_context_bits esw_vport_context; +}; + +struct mlx5_ifc_query_eq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_eqc_bits eq_context_entry; + + u8 reserved_at_280[0x40]; + + u8 event_bitmask[0x40]; + + u8 reserved_at_300[0x580]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_query_eq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x18]; + u8 eq_number[0x8]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_encap_header_in_bits { + u8 reserved_at_0[0x5]; + u8 header_type[0x3]; + u8 reserved_at_8[0xe]; + u8 encap_header_size[0xa]; + + u8 reserved_at_20[0x10]; + u8 encap_header[2][0x8]; + + u8 more_encap_header[0][0x8]; +}; + +struct mlx5_ifc_query_encap_header_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0xa0]; + + struct mlx5_ifc_encap_header_in_bits encap_header[0]; +}; + +struct mlx5_ifc_query_encap_header_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 encap_id[0x20]; + + u8 reserved_at_60[0xa0]; +}; + +struct mlx5_ifc_alloc_encap_header_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 encap_id[0x20]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_alloc_encap_header_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0xa0]; + + struct mlx5_ifc_encap_header_in_bits encap_header; +}; + +struct mlx5_ifc_dealloc_encap_header_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_dealloc_encap_header_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_20[0x10]; + u8 op_mod[0x10]; + + u8 encap_id[0x20]; + + u8 reserved_60[0x20]; +}; + +struct mlx5_ifc_set_action_in_bits { + u8 action_type[0x4]; + u8 field[0xc]; + u8 reserved_at_10[0x3]; + u8 offset[0x5]; + u8 reserved_at_18[0x3]; + u8 length[0x5]; + + u8 data[0x20]; +}; + +struct mlx5_ifc_add_action_in_bits { + u8 action_type[0x4]; + u8 field[0xc]; + u8 reserved_at_10[0x10]; + + u8 data[0x20]; +}; + +union mlx5_ifc_set_action_in_add_action_in_auto_bits { + struct mlx5_ifc_set_action_in_bits set_action_in; + struct mlx5_ifc_add_action_in_bits add_action_in; + u8 reserved_at_0[0x40]; +}; + +enum { + MLX5_ACTION_TYPE_SET = 0x1, + MLX5_ACTION_TYPE_ADD = 0x2, +}; + +enum { + MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16 = 0x1, + MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0 = 0x2, + MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE = 0x3, + MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16 = 0x4, + MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0 = 0x5, + MLX5_ACTION_IN_FIELD_OUT_IP_DSCP = 0x6, + MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS = 0x7, + MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT = 0x8, + MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT = 0x9, + MLX5_ACTION_IN_FIELD_OUT_IP_TTL = 0xa, + MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT = 0xb, + MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT = 0xc, + MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96 = 0xd, + MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64 = 0xe, + MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32 = 0xf, + MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0 = 0x10, + MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96 = 0x11, + MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64 = 0x12, + MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32 = 0x13, + MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0 = 0x14, + MLX5_ACTION_IN_FIELD_OUT_SIPV4 = 0x15, + MLX5_ACTION_IN_FIELD_OUT_DIPV4 = 0x16, + MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT = 0x47, +}; + +struct mlx5_ifc_alloc_modify_header_context_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 modify_header_id[0x20]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_alloc_modify_header_context_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x20]; + + u8 table_type[0x8]; + u8 reserved_at_68[0x10]; + u8 num_of_actions[0x8]; + + union mlx5_ifc_set_action_in_add_action_in_auto_bits actions[0]; +}; + +struct mlx5_ifc_dealloc_modify_header_context_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_dealloc_modify_header_context_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 modify_header_id[0x20]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_query_dct_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_dctc_bits dct_context_entry; + + u8 reserved_at_280[0x180]; +}; + +struct mlx5_ifc_query_dct_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 dctn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_query_cq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_cqc_bits cq_context; + + u8 reserved_at_280[0x600]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_query_cq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 cqn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_query_cong_status_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x20]; + + u8 enable[0x1]; + u8 tag_enable[0x1]; + u8 reserved_at_62[0x1e]; +}; + +struct mlx5_ifc_query_cong_status_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x18]; + u8 priority[0x4]; + u8 cong_protocol[0x4]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_query_cong_statistics_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + u8 rp_cur_flows[0x20]; + + u8 sum_flows[0x20]; + + u8 rp_cnp_ignored_high[0x20]; + + u8 rp_cnp_ignored_low[0x20]; + + u8 rp_cnp_handled_high[0x20]; + + u8 rp_cnp_handled_low[0x20]; + + u8 reserved_at_140[0x100]; + + u8 time_stamp_high[0x20]; + + u8 time_stamp_low[0x20]; + + u8 accumulators_period[0x20]; + + u8 np_ecn_marked_roce_packets_high[0x20]; + + u8 np_ecn_marked_roce_packets_low[0x20]; + + u8 np_cnp_sent_high[0x20]; + + u8 np_cnp_sent_low[0x20]; + + u8 reserved_at_320[0x560]; +}; + +struct mlx5_ifc_query_cong_statistics_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 clear[0x1]; + u8 reserved_at_41[0x1f]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_query_cong_params_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters; +}; + +struct mlx5_ifc_query_cong_params_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x1c]; + u8 cong_protocol[0x4]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_query_adapter_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_query_adapter_param_block_bits query_adapter_struct; +}; + +struct mlx5_ifc_query_adapter_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_qp_2rst_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_qp_2rst_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 qpn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_qp_2err_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_qp_2err_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 qpn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_page_fault_resume_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_page_fault_resume_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 error[0x1]; + u8 reserved_at_41[0x4]; + u8 page_fault_type[0x3]; + u8 wq_number[0x18]; + + u8 reserved_at_60[0x8]; + u8 token[0x18]; +}; + +struct mlx5_ifc_nop_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_nop_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_modify_vport_state_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_modify_vport_state_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x18]; + u8 admin_state[0x4]; + u8 reserved_at_7c[0x4]; +}; + +struct mlx5_ifc_modify_tis_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_modify_tis_bitmask_bits { + u8 reserved_at_0[0x20]; + + u8 reserved_at_20[0x1d]; + u8 lag_tx_port_affinity[0x1]; + u8 strict_lag_tx_port_affinity[0x1]; + u8 prio[0x1]; +}; + +struct mlx5_ifc_modify_tis_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 tisn[0x18]; + + u8 reserved_at_60[0x20]; + + struct mlx5_ifc_modify_tis_bitmask_bits bitmask; + + u8 reserved_at_c0[0x40]; + + struct mlx5_ifc_tisc_bits ctx; +}; + +struct mlx5_ifc_modify_tir_bitmask_bits { + u8 reserved_at_0[0x20]; + + u8 reserved_at_20[0x1b]; + u8 self_lb_en[0x1]; + u8 reserved_at_3c[0x1]; + u8 hash[0x1]; + u8 reserved_at_3e[0x1]; + u8 lro[0x1]; +}; + +struct mlx5_ifc_modify_tir_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_modify_tir_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 tirn[0x18]; + + u8 reserved_at_60[0x20]; + + struct mlx5_ifc_modify_tir_bitmask_bits bitmask; + + u8 reserved_at_c0[0x40]; + + struct mlx5_ifc_tirc_bits ctx; +}; + +struct mlx5_ifc_modify_sq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_modify_sq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 sq_state[0x4]; + u8 reserved_at_44[0x4]; + u8 sqn[0x18]; + + u8 reserved_at_60[0x20]; + + u8 modify_bitmask[0x40]; + + u8 reserved_at_c0[0x40]; + + struct mlx5_ifc_sqc_bits ctx; +}; + +struct mlx5_ifc_modify_scheduling_element_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x1c0]; +}; + +enum { + MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE = 0x1, + MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW = 0x2, +}; + +struct mlx5_ifc_modify_scheduling_element_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 scheduling_hierarchy[0x8]; + u8 reserved_at_48[0x18]; + + u8 scheduling_element_id[0x20]; + + u8 reserved_at_80[0x20]; + + u8 modify_bitmask[0x20]; + + u8 reserved_at_c0[0x40]; + + struct mlx5_ifc_scheduling_context_bits scheduling_context; + + u8 reserved_at_300[0x100]; +}; + +struct mlx5_ifc_modify_rqt_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_rqt_bitmask_bits { + u8 reserved_at_0[0x20]; + + u8 reserved_at_20[0x1f]; + u8 rqn_list[0x1]; +}; + +struct mlx5_ifc_modify_rqt_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 rqtn[0x18]; + + u8 reserved_at_60[0x20]; + + struct mlx5_ifc_rqt_bitmask_bits bitmask; + + u8 reserved_at_c0[0x40]; + + struct mlx5_ifc_rqtc_bits ctx; +}; + +struct mlx5_ifc_modify_rq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +enum { + MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD = 1ULL << 1, + MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS = 1ULL << 2, + MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID = 1ULL << 3, +}; + +struct mlx5_ifc_modify_rq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 rq_state[0x4]; + u8 reserved_at_44[0x4]; + u8 rqn[0x18]; + + u8 reserved_at_60[0x20]; + + u8 modify_bitmask[0x40]; + + u8 reserved_at_c0[0x40]; + + struct mlx5_ifc_rqc_bits ctx; +}; + +struct mlx5_ifc_modify_rmp_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_rmp_bitmask_bits { + u8 reserved_at_0[0x20]; + + u8 reserved_at_20[0x1f]; + u8 lwm[0x1]; +}; + +struct mlx5_ifc_modify_rmp_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 rmp_state[0x4]; + u8 reserved_at_44[0x4]; + u8 rmpn[0x18]; + + u8 reserved_at_60[0x20]; + + struct mlx5_ifc_rmp_bitmask_bits bitmask; + + u8 reserved_at_c0[0x40]; + + struct mlx5_ifc_rmpc_bits ctx; +}; + +struct mlx5_ifc_modify_nic_vport_context_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_modify_nic_vport_field_select_bits { + u8 reserved_at_0[0x12]; + u8 affiliation[0x1]; + u8 reserved_at_e[0x1]; + u8 disable_uc_local_lb[0x1]; + u8 disable_mc_local_lb[0x1]; + u8 node_guid[0x1]; + u8 port_guid[0x1]; + u8 min_inline[0x1]; + u8 mtu[0x1]; + u8 change_event[0x1]; + u8 promisc[0x1]; + u8 permanent_address[0x1]; + u8 addresses_list[0x1]; + u8 roce_en[0x1]; + u8 reserved_at_1f[0x1]; +}; + +struct mlx5_ifc_modify_nic_vport_context_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + struct mlx5_ifc_modify_nic_vport_field_select_bits field_select; + + u8 reserved_at_80[0x780]; + + struct mlx5_ifc_nic_vport_context_bits nic_vport_context; +}; + +struct mlx5_ifc_modify_hca_vport_context_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_modify_hca_vport_context_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_at_41[0xb]; + u8 port_num[0x4]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; + + struct mlx5_ifc_hca_vport_context_bits hca_vport_context; +}; + +struct mlx5_ifc_modify_cq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +enum { + MLX5_MODIFY_CQ_IN_OP_MOD_MODIFY_CQ = 0x0, + MLX5_MODIFY_CQ_IN_OP_MOD_RESIZE_CQ = 0x1, +}; + +struct mlx5_ifc_modify_cq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 cqn[0x18]; + + union mlx5_ifc_modify_field_select_resize_field_select_auto_bits modify_field_select_resize_field_select; + + struct mlx5_ifc_cqc_bits cq_context; + + u8 reserved_at_280[0x60]; + + u8 cq_umem_valid[0x1]; + u8 reserved_at_2e1[0x1f]; + + u8 reserved_at_300[0x580]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_modify_cong_status_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_modify_cong_status_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x18]; + u8 priority[0x4]; + u8 cong_protocol[0x4]; + + u8 enable[0x1]; + u8 tag_enable[0x1]; + u8 reserved_at_62[0x1e]; +}; + +struct mlx5_ifc_modify_cong_params_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_modify_cong_params_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x1c]; + u8 cong_protocol[0x4]; + + union mlx5_ifc_field_select_802_1_r_roce_auto_bits field_select; + + u8 reserved_at_80[0x80]; + + union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters; +}; + +struct mlx5_ifc_manage_pages_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 output_num_entries[0x20]; + + u8 reserved_at_60[0x20]; + + u8 pas[0][0x40]; +}; + +enum { + MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_FAIL = 0x0, + MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_SUCCESS = 0x1, + MLX5_MANAGE_PAGES_IN_OP_MOD_HCA_RETURN_PAGES = 0x2, +}; + +struct mlx5_ifc_manage_pages_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 function_id[0x10]; + + u8 input_num_entries[0x20]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_mad_ifc_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + u8 response_mad_packet[256][0x8]; +}; + +struct mlx5_ifc_mad_ifc_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 remote_lid[0x10]; + u8 reserved_at_50[0x8]; + u8 port[0x8]; + + u8 reserved_at_60[0x20]; + + u8 mad[256][0x8]; +}; + +struct mlx5_ifc_init_hca_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_init_hca_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; + u8 sw_owner_id[4][0x20]; +}; + +struct mlx5_ifc_init2rtr_qp_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_init2rtr_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 qpn[0x18]; + + u8 reserved_at_60[0x20]; + + u8 opt_param_mask[0x20]; + + u8 reserved_at_a0[0x20]; + + struct mlx5_ifc_qpc_bits qpc; + + u8 reserved_at_800[0x80]; +}; + +struct mlx5_ifc_init2init_qp_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_init2init_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 qpn[0x18]; + + u8 reserved_at_60[0x20]; + + u8 opt_param_mask[0x20]; + + u8 reserved_at_a0[0x20]; + + struct mlx5_ifc_qpc_bits qpc; + + u8 reserved_at_800[0x80]; +}; + +struct mlx5_ifc_get_dropped_packet_log_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + u8 packet_headers_log[128][0x8]; + + u8 packet_syndrome[64][0x8]; +}; + +struct mlx5_ifc_get_dropped_packet_log_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_gen_eqe_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x18]; + u8 eq_number[0x8]; + + u8 reserved_at_60[0x20]; + + u8 eqe[64][0x8]; +}; + +struct mlx5_ifc_gen_eq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_enable_hca_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x20]; +}; + +struct mlx5_ifc_enable_hca_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 function_id[0x10]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_drain_dct_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_drain_dct_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 dctn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_disable_hca_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x20]; +}; + +struct mlx5_ifc_disable_hca_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 function_id[0x10]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_detach_from_mcg_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_detach_from_mcg_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 qpn[0x18]; + + u8 reserved_at_60[0x20]; + + u8 multicast_gid[16][0x8]; +}; + +struct mlx5_ifc_destroy_xrq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_destroy_xrq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 xrqn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_destroy_xrc_srq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_destroy_xrc_srq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 xrc_srqn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_destroy_tis_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_destroy_tis_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 tisn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_destroy_tir_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_destroy_tir_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 tirn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_destroy_srq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_destroy_srq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 srqn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_destroy_sq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_destroy_sq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 sqn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_destroy_scheduling_element_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x1c0]; +}; + +struct mlx5_ifc_destroy_scheduling_element_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 scheduling_hierarchy[0x8]; + u8 reserved_at_48[0x18]; + + u8 scheduling_element_id[0x20]; + + u8 reserved_at_80[0x180]; +}; + +struct mlx5_ifc_destroy_rqt_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_destroy_rqt_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 rqtn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_destroy_rq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_destroy_rq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 rqn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_set_delay_drop_params_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x20]; + + u8 reserved_at_60[0x10]; + u8 delay_drop_timeout[0x10]; +}; + +struct mlx5_ifc_set_delay_drop_params_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_destroy_rmp_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_destroy_rmp_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 rmpn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_destroy_qp_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_destroy_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 qpn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_destroy_psv_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_destroy_psv_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 psvn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_destroy_mkey_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_destroy_mkey_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 mkey_index[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_destroy_flow_table_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_destroy_flow_table_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; + + u8 table_type[0x8]; + u8 reserved_at_88[0x18]; + + u8 reserved_at_a0[0x8]; + u8 table_id[0x18]; + + u8 reserved_at_c0[0x140]; +}; + +struct mlx5_ifc_destroy_flow_group_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_destroy_flow_group_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; + + u8 table_type[0x8]; + u8 reserved_at_88[0x18]; + + u8 reserved_at_a0[0x8]; + u8 table_id[0x18]; + + u8 group_id[0x20]; + + u8 reserved_at_e0[0x120]; +}; + +struct mlx5_ifc_destroy_eq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_destroy_eq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x18]; + u8 eq_number[0x8]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_destroy_dct_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_destroy_dct_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 dctn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_destroy_cq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_destroy_cq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 cqn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_delete_vxlan_udp_dport_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_delete_vxlan_udp_dport_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x20]; + + u8 reserved_at_60[0x10]; + u8 vxlan_udp_port[0x10]; +}; + +struct mlx5_ifc_delete_l2_table_entry_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_delete_l2_table_entry_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x60]; + + u8 reserved_at_a0[0x8]; + u8 table_index[0x18]; + + u8 reserved_at_c0[0x140]; +}; + +struct mlx5_ifc_delete_fte_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_delete_fte_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; + + u8 table_type[0x8]; + u8 reserved_at_88[0x18]; + + u8 reserved_at_a0[0x8]; + u8 table_id[0x18]; + + u8 reserved_at_c0[0x40]; + + u8 flow_index[0x20]; + + u8 reserved_at_120[0xe0]; +}; + +struct mlx5_ifc_dealloc_xrcd_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_dealloc_xrcd_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 xrcd[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_dealloc_uar_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_dealloc_uar_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 uar[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_dealloc_transport_domain_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_dealloc_transport_domain_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 transport_domain[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_dealloc_q_counter_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_dealloc_q_counter_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x18]; + u8 counter_set_id[0x8]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_dealloc_pd_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_dealloc_pd_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 pd[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_dealloc_flow_counter_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_dealloc_flow_counter_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 flow_counter_id[0x20]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_create_xrq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x8]; + u8 xrqn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_create_xrq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_xrqc_bits xrq_context; +}; + +struct mlx5_ifc_create_xrc_srq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x8]; + u8 xrc_srqn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_create_xrc_srq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry; + + u8 reserved_at_280[0x600]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_create_tis_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x8]; + u8 tisn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_create_tis_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0xc0]; + + struct mlx5_ifc_tisc_bits ctx; +}; + +struct mlx5_ifc_create_tir_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x8]; + u8 tirn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_create_tir_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0xc0]; + + struct mlx5_ifc_tirc_bits ctx; +}; + +struct mlx5_ifc_create_srq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x8]; + u8 srqn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_create_srq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_srqc_bits srq_context_entry; + + u8 reserved_at_280[0x600]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_create_sq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x8]; + u8 sqn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_create_sq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0xc0]; + + struct mlx5_ifc_sqc_bits ctx; +}; + +struct mlx5_ifc_create_scheduling_element_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + u8 scheduling_element_id[0x20]; + + u8 reserved_at_a0[0x160]; +}; + +struct mlx5_ifc_create_scheduling_element_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 scheduling_hierarchy[0x8]; + u8 reserved_at_48[0x18]; + + u8 reserved_at_60[0xa0]; + + struct mlx5_ifc_scheduling_context_bits scheduling_context; + + u8 reserved_at_300[0x100]; +}; + +struct mlx5_ifc_create_rqt_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x8]; + u8 rqtn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_create_rqt_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0xc0]; + + struct mlx5_ifc_rqtc_bits rqt_context; +}; + +struct mlx5_ifc_create_rq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x8]; + u8 rqn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_create_rq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0xc0]; + + struct mlx5_ifc_rqc_bits ctx; +}; + +struct mlx5_ifc_create_rmp_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x8]; + u8 rmpn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_create_rmp_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0xc0]; + + struct mlx5_ifc_rmpc_bits ctx; +}; + +struct mlx5_ifc_create_qp_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x8]; + u8 qpn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_create_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; + + u8 opt_param_mask[0x20]; + + u8 reserved_at_a0[0x20]; + + struct mlx5_ifc_qpc_bits qpc; + + u8 reserved_at_800[0x80]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_create_psv_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + u8 reserved_at_80[0x8]; + u8 psv0_index[0x18]; + + u8 reserved_at_a0[0x8]; + u8 psv1_index[0x18]; + + u8 reserved_at_c0[0x8]; + u8 psv2_index[0x18]; + + u8 reserved_at_e0[0x8]; + u8 psv3_index[0x18]; +}; + +struct mlx5_ifc_create_psv_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 num_psv[0x4]; + u8 reserved_at_44[0x4]; + u8 pd[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_create_mkey_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x8]; + u8 mkey_index[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_create_mkey_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x20]; + + u8 pg_access[0x1]; + u8 reserved_at_61[0x1f]; + + struct mlx5_ifc_mkc_bits memory_key_mkey_entry; + + u8 reserved_at_280[0x80]; + + u8 translations_octword_actual_size[0x20]; + + u8 reserved_at_320[0x560]; + + u8 klm_pas_mtt[0][0x20]; +}; + +struct mlx5_ifc_create_flow_table_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x8]; + u8 table_id[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_flow_table_context_bits { + u8 encap_en[0x1]; + u8 decap_en[0x1]; + u8 reserved_at_2[0x2]; + u8 table_miss_action[0x4]; + u8 level[0x8]; + u8 reserved_at_10[0x8]; + u8 log_size[0x8]; + + u8 reserved_at_20[0x8]; + u8 table_miss_id[0x18]; + + u8 reserved_at_40[0x8]; + u8 lag_master_next_table_id[0x18]; + + u8 reserved_at_60[0xe0]; +}; + +struct mlx5_ifc_create_flow_table_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; + + u8 table_type[0x8]; + u8 reserved_at_88[0x18]; + + u8 reserved_at_a0[0x20]; + + struct mlx5_ifc_flow_table_context_bits flow_table_context; +}; + +struct mlx5_ifc_create_flow_group_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x8]; + u8 group_id[0x18]; + + u8 reserved_at_60[0x20]; +}; + +enum { + MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, + MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, + MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2, + MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3, +}; + +struct mlx5_ifc_create_flow_group_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; + + u8 table_type[0x8]; + u8 reserved_at_88[0x18]; + + u8 reserved_at_a0[0x8]; + u8 table_id[0x18]; + + u8 source_eswitch_owner_vhca_id_valid[0x1]; + + u8 reserved_at_c1[0x1f]; + + u8 start_flow_index[0x20]; + + u8 reserved_at_100[0x20]; + + u8 end_flow_index[0x20]; + + u8 reserved_at_140[0xa0]; + + u8 reserved_at_1e0[0x18]; + u8 match_criteria_enable[0x8]; + + struct mlx5_ifc_fte_match_param_bits match_criteria; + + u8 reserved_at_1200[0xe00]; +}; + +struct mlx5_ifc_create_eq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x18]; + u8 eq_number[0x8]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_create_eq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_eqc_bits eq_context_entry; + + u8 reserved_at_280[0x40]; + + u8 event_bitmask[0x40]; + + u8 reserved_at_300[0x580]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_create_dct_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x8]; + u8 dctn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_create_dct_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_dctc_bits dct_context_entry; + + u8 reserved_at_280[0x180]; +}; + +struct mlx5_ifc_create_cq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x8]; + u8 cqn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_create_cq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_cqc_bits cq_context; + + u8 reserved_at_280[0x600]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_config_int_moderation_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x4]; + u8 min_delay[0xc]; + u8 int_vector[0x10]; + + u8 reserved_at_60[0x20]; +}; + +enum { + MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_WRITE = 0x0, + MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_READ = 0x1, +}; + +struct mlx5_ifc_config_int_moderation_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x4]; + u8 min_delay[0xc]; + u8 int_vector[0x10]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_attach_to_mcg_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_attach_to_mcg_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 qpn[0x18]; + + u8 reserved_at_60[0x20]; + + u8 multicast_gid[16][0x8]; +}; + +struct mlx5_ifc_arm_xrq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_arm_xrq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 xrqn[0x18]; + + u8 reserved_at_60[0x10]; + u8 lwm[0x10]; +}; + +struct mlx5_ifc_arm_xrc_srq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +enum { + MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ = 0x1, +}; + +struct mlx5_ifc_arm_xrc_srq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 xrc_srqn[0x18]; + + u8 reserved_at_60[0x10]; + u8 lwm[0x10]; +}; + +struct mlx5_ifc_arm_rq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +enum { + MLX5_ARM_RQ_IN_OP_MOD_SRQ = 0x1, + MLX5_ARM_RQ_IN_OP_MOD_XRQ = 0x2, +}; + +struct mlx5_ifc_arm_rq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 srq_number[0x18]; + + u8 reserved_at_60[0x10]; + u8 lwm[0x10]; +}; + +struct mlx5_ifc_arm_dct_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_arm_dct_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 dct_number[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_alloc_xrcd_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x8]; + u8 xrcd[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_alloc_xrcd_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_alloc_uar_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x8]; + u8 uar[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_alloc_uar_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_alloc_transport_domain_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x8]; + u8 transport_domain[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_alloc_transport_domain_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_alloc_q_counter_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x18]; + u8 counter_set_id[0x8]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_alloc_q_counter_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_alloc_pd_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x8]; + u8 pd[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_alloc_pd_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_alloc_flow_counter_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 flow_counter_id[0x20]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_alloc_flow_counter_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_add_vxlan_udp_dport_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_add_vxlan_udp_dport_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x20]; + + u8 reserved_at_60[0x10]; + u8 vxlan_udp_port[0x10]; +}; + +struct mlx5_ifc_set_pp_rate_limit_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_set_pp_rate_limit_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 rate_limit_index[0x10]; + + u8 reserved_at_60[0x20]; + + u8 rate_limit[0x20]; + + u8 burst_upper_bound[0x20]; + + u8 reserved_at_c0[0x10]; + u8 typical_packet_size[0x10]; + + u8 reserved_at_e0[0x120]; +}; + +struct mlx5_ifc_access_register_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + u8 register_data[0][0x20]; +}; + +enum { + MLX5_ACCESS_REGISTER_IN_OP_MOD_WRITE = 0x0, + MLX5_ACCESS_REGISTER_IN_OP_MOD_READ = 0x1, +}; + +struct mlx5_ifc_access_register_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 register_id[0x10]; + + u8 argument[0x20]; + + u8 register_data[0][0x20]; +}; + +struct mlx5_ifc_sltp_reg_bits { + u8 status[0x4]; + u8 version[0x4]; + u8 local_port[0x8]; + u8 pnat[0x2]; + u8 reserved_at_12[0x2]; + u8 lane[0x4]; + u8 reserved_at_18[0x8]; + + u8 reserved_at_20[0x20]; + + u8 reserved_at_40[0x7]; + u8 polarity[0x1]; + u8 ob_tap0[0x8]; + u8 ob_tap1[0x8]; + u8 ob_tap2[0x8]; + + u8 reserved_at_60[0xc]; + u8 ob_preemp_mode[0x4]; + u8 ob_reg[0x8]; + u8 ob_bias[0x8]; + + u8 reserved_at_80[0x20]; +}; + +struct mlx5_ifc_slrg_reg_bits { + u8 status[0x4]; + u8 version[0x4]; + u8 local_port[0x8]; + u8 pnat[0x2]; + u8 reserved_at_12[0x2]; + u8 lane[0x4]; + u8 reserved_at_18[0x8]; + + u8 time_to_link_up[0x10]; + u8 reserved_at_30[0xc]; + u8 grade_lane_speed[0x4]; + + u8 grade_version[0x8]; + u8 grade[0x18]; + + u8 reserved_at_60[0x4]; + u8 height_grade_type[0x4]; + u8 height_grade[0x18]; + + u8 height_dz[0x10]; + u8 height_dv[0x10]; + + u8 reserved_at_a0[0x10]; + u8 height_sigma[0x10]; + + u8 reserved_at_c0[0x20]; + + u8 reserved_at_e0[0x4]; + u8 phase_grade_type[0x4]; + u8 phase_grade[0x18]; + + u8 reserved_at_100[0x8]; + u8 phase_eo_pos[0x8]; + u8 reserved_at_110[0x8]; + u8 phase_eo_neg[0x8]; + + u8 ffe_set_tested[0x10]; + u8 test_errors_per_lane[0x10]; +}; + +struct mlx5_ifc_pvlc_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x1c]; + u8 vl_hw_cap[0x4]; + + u8 reserved_at_40[0x1c]; + u8 vl_admin[0x4]; + + u8 reserved_at_60[0x1c]; + u8 vl_operational[0x4]; +}; + +struct mlx5_ifc_pude_reg_bits { + u8 swid[0x8]; + u8 local_port[0x8]; + u8 reserved_at_10[0x4]; + u8 admin_status[0x4]; + u8 reserved_at_18[0x4]; + u8 oper_status[0x4]; + + u8 reserved_at_20[0x60]; +}; + +struct mlx5_ifc_ptys_reg_bits { + u8 reserved_at_0[0x1]; + u8 an_disable_admin[0x1]; + u8 an_disable_cap[0x1]; + u8 reserved_at_3[0x5]; + u8 local_port[0x8]; + u8 reserved_at_10[0xd]; + u8 proto_mask[0x3]; + + u8 an_status[0x4]; + u8 reserved_at_24[0x3c]; + + u8 eth_proto_capability[0x20]; + + u8 ib_link_width_capability[0x10]; + u8 ib_proto_capability[0x10]; + + u8 reserved_at_a0[0x20]; + + u8 eth_proto_admin[0x20]; + + u8 ib_link_width_admin[0x10]; + u8 ib_proto_admin[0x10]; + + u8 reserved_at_100[0x20]; + + u8 eth_proto_oper[0x20]; + + u8 ib_link_width_oper[0x10]; + u8 ib_proto_oper[0x10]; + + u8 reserved_at_160[0x1c]; + u8 connector_type[0x4]; + + u8 eth_proto_lp_advertise[0x20]; + + u8 reserved_at_1a0[0x60]; +}; + +struct mlx5_ifc_mlcr_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 reserved_at_10[0x20]; + + u8 beacon_duration[0x10]; + u8 reserved_at_40[0x10]; + + u8 beacon_remain[0x10]; +}; + +struct mlx5_ifc_ptas_reg_bits { + u8 reserved_at_0[0x20]; + + u8 algorithm_options[0x10]; + u8 reserved_at_30[0x4]; + u8 repetitions_mode[0x4]; + u8 num_of_repetitions[0x8]; + + u8 grade_version[0x8]; + u8 height_grade_type[0x4]; + u8 phase_grade_type[0x4]; + u8 height_grade_weight[0x8]; + u8 phase_grade_weight[0x8]; + + u8 gisim_measure_bits[0x10]; + u8 adaptive_tap_measure_bits[0x10]; + + u8 ber_bath_high_error_threshold[0x10]; + u8 ber_bath_mid_error_threshold[0x10]; + + u8 ber_bath_low_error_threshold[0x10]; + u8 one_ratio_high_threshold[0x10]; + + u8 one_ratio_high_mid_threshold[0x10]; + u8 one_ratio_low_mid_threshold[0x10]; + + u8 one_ratio_low_threshold[0x10]; + u8 ndeo_error_threshold[0x10]; + + u8 mixer_offset_step_size[0x10]; + u8 reserved_at_110[0x8]; + u8 mix90_phase_for_voltage_bath[0x8]; + + u8 mixer_offset_start[0x10]; + u8 mixer_offset_end[0x10]; + + u8 reserved_at_140[0x15]; + u8 ber_test_time[0xb]; +}; + +struct mlx5_ifc_pspa_reg_bits { + u8 swid[0x8]; + u8 local_port[0x8]; + u8 sub_port[0x8]; + u8 reserved_at_18[0x8]; + + u8 reserved_at_20[0x20]; +}; + +struct mlx5_ifc_pqdr_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 reserved_at_10[0x5]; + u8 prio[0x3]; + u8 reserved_at_18[0x6]; + u8 mode[0x2]; + + u8 reserved_at_20[0x20]; + + u8 reserved_at_40[0x10]; + u8 min_threshold[0x10]; + + u8 reserved_at_60[0x10]; + u8 max_threshold[0x10]; + + u8 reserved_at_80[0x10]; + u8 mark_probability_denominator[0x10]; + + u8 reserved_at_a0[0x60]; +}; + +struct mlx5_ifc_ppsc_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x60]; + + u8 reserved_at_80[0x1c]; + u8 wrps_admin[0x4]; + + u8 reserved_at_a0[0x1c]; + u8 wrps_status[0x4]; + + u8 reserved_at_c0[0x8]; + u8 up_threshold[0x8]; + u8 reserved_at_d0[0x8]; + u8 down_threshold[0x8]; + + u8 reserved_at_e0[0x20]; + + u8 reserved_at_100[0x1c]; + u8 srps_admin[0x4]; + + u8 reserved_at_120[0x1c]; + u8 srps_status[0x4]; + + u8 reserved_at_140[0x40]; +}; + +struct mlx5_ifc_pplr_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x8]; + u8 lb_cap[0x8]; + u8 reserved_at_30[0x8]; + u8 lb_en[0x8]; +}; + +struct mlx5_ifc_pplm_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x20]; + + u8 port_profile_mode[0x8]; + u8 static_port_profile[0x8]; + u8 active_port_profile[0x8]; + u8 reserved_at_58[0x8]; + + u8 retransmission_active[0x8]; + u8 fec_mode_active[0x18]; + + u8 reserved_at_80[0x20]; +}; + +struct mlx5_ifc_ppcnt_reg_bits { + u8 swid[0x8]; + u8 local_port[0x8]; + u8 pnat[0x2]; + u8 reserved_at_12[0x8]; + u8 grp[0x6]; + + u8 clr[0x1]; + u8 reserved_at_21[0x1c]; + u8 prio_tc[0x3]; + + union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set; +}; + +struct mlx5_ifc_mpcnt_reg_bits { + u8 reserved_at_0[0x8]; + u8 pcie_index[0x8]; + u8 reserved_at_10[0xa]; + u8 grp[0x6]; + + u8 clr[0x1]; + u8 reserved_at_21[0x1f]; + + union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits counter_set; +}; + +struct mlx5_ifc_ppad_reg_bits { + u8 reserved_at_0[0x3]; + u8 single_mac[0x1]; + u8 reserved_at_4[0x4]; + u8 local_port[0x8]; + u8 mac_47_32[0x10]; + + u8 mac_31_0[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_pmtu_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 reserved_at_10[0x10]; + + u8 max_mtu[0x10]; + u8 reserved_at_30[0x10]; + + u8 admin_mtu[0x10]; + u8 reserved_at_50[0x10]; + + u8 oper_mtu[0x10]; + u8 reserved_at_70[0x10]; +}; + +struct mlx5_ifc_pmpr_reg_bits { + u8 reserved_at_0[0x8]; + u8 module[0x8]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x18]; + u8 attenuation_5g[0x8]; + + u8 reserved_at_40[0x18]; + u8 attenuation_7g[0x8]; + + u8 reserved_at_60[0x18]; + u8 attenuation_12g[0x8]; +}; + +struct mlx5_ifc_pmpe_reg_bits { + u8 reserved_at_0[0x8]; + u8 module[0x8]; + u8 reserved_at_10[0xc]; + u8 module_status[0x4]; + + u8 reserved_at_20[0x60]; +}; + +struct mlx5_ifc_pmpc_reg_bits { + u8 module_state_updated[32][0x8]; +}; + +struct mlx5_ifc_pmlpn_reg_bits { + u8 reserved_at_0[0x4]; + u8 mlpn_status[0x4]; + u8 local_port[0x8]; + u8 reserved_at_10[0x10]; + + u8 e[0x1]; + u8 reserved_at_21[0x1f]; +}; + +struct mlx5_ifc_pmlp_reg_bits { + u8 rxtx[0x1]; + u8 reserved_at_1[0x7]; + u8 local_port[0x8]; + u8 reserved_at_10[0x8]; + u8 width[0x8]; + + u8 lane0_module_mapping[0x20]; + + u8 lane1_module_mapping[0x20]; + + u8 lane2_module_mapping[0x20]; + + u8 lane3_module_mapping[0x20]; + + u8 reserved_at_a0[0x160]; +}; + +struct mlx5_ifc_pmaos_reg_bits { + u8 reserved_at_0[0x8]; + u8 module[0x8]; + u8 reserved_at_10[0x4]; + u8 admin_status[0x4]; + u8 reserved_at_18[0x4]; + u8 oper_status[0x4]; + + u8 ase[0x1]; + u8 ee[0x1]; + u8 reserved_at_22[0x1c]; + u8 e[0x2]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_plpc_reg_bits { + u8 reserved_at_0[0x4]; + u8 profile_id[0xc]; + u8 reserved_at_10[0x4]; + u8 proto_mask[0x4]; + u8 reserved_at_18[0x8]; + + u8 reserved_at_20[0x10]; + u8 lane_speed[0x10]; + + u8 reserved_at_40[0x17]; + u8 lpbf[0x1]; + u8 fec_mode_policy[0x8]; + + u8 retransmission_capability[0x8]; + u8 fec_mode_capability[0x18]; + + u8 retransmission_support_admin[0x8]; + u8 fec_mode_support_admin[0x18]; + + u8 retransmission_request_admin[0x8]; + u8 fec_mode_request_admin[0x18]; + + u8 reserved_at_c0[0x80]; +}; + +struct mlx5_ifc_plib_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 reserved_at_10[0x8]; + u8 ib_port[0x8]; + + u8 reserved_at_20[0x60]; +}; + +struct mlx5_ifc_plbf_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 reserved_at_10[0xd]; + u8 lbf_mode[0x3]; + + u8 reserved_at_20[0x20]; +}; + +struct mlx5_ifc_pipg_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 reserved_at_10[0x10]; + + u8 dic[0x1]; + u8 reserved_at_21[0x19]; + u8 ipg[0x4]; + u8 reserved_at_3e[0x2]; +}; + +struct mlx5_ifc_pifr_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0xe0]; + + u8 port_filter[8][0x20]; + + u8 port_filter_update_en[8][0x20]; +}; + +struct mlx5_ifc_pfcc_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 reserved_at_10[0xb]; + u8 ppan_mask_n[0x1]; + u8 minor_stall_mask[0x1]; + u8 critical_stall_mask[0x1]; + u8 reserved_at_1e[0x2]; + + u8 ppan[0x4]; + u8 reserved_at_24[0x4]; + u8 prio_mask_tx[0x8]; + u8 reserved_at_30[0x8]; + u8 prio_mask_rx[0x8]; + + u8 pptx[0x1]; + u8 aptx[0x1]; + u8 pptx_mask_n[0x1]; + u8 reserved_at_43[0x5]; + u8 pfctx[0x8]; + u8 reserved_at_50[0x10]; + + u8 pprx[0x1]; + u8 aprx[0x1]; + u8 pprx_mask_n[0x1]; + u8 reserved_at_63[0x5]; + u8 pfcrx[0x8]; + u8 reserved_at_70[0x10]; + + u8 device_stall_minor_watermark[0x10]; + u8 device_stall_critical_watermark[0x10]; + + u8 reserved_at_a0[0x60]; +}; + +struct mlx5_ifc_pelc_reg_bits { + u8 op[0x4]; + u8 reserved_at_4[0x4]; + u8 local_port[0x8]; + u8 reserved_at_10[0x10]; + + u8 op_admin[0x8]; + u8 op_capability[0x8]; + u8 op_request[0x8]; + u8 op_active[0x8]; + + u8 admin[0x40]; + + u8 capability[0x40]; + + u8 request[0x40]; + + u8 active[0x40]; + + u8 reserved_at_140[0x80]; +}; + +struct mlx5_ifc_peir_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0xc]; + u8 error_count[0x4]; + u8 reserved_at_30[0x10]; + + u8 reserved_at_40[0xc]; + u8 lane[0x4]; + u8 reserved_at_50[0x8]; + u8 error_type[0x8]; +}; + +struct mlx5_ifc_mpegc_reg_bits { + u8 reserved_at_0[0x30]; + u8 field_select[0x10]; + + u8 tx_overflow_sense[0x1]; + u8 mark_cqe[0x1]; + u8 mark_cnp[0x1]; + u8 reserved_at_43[0x1b]; + u8 tx_lossy_overflow_oper[0x2]; + + u8 reserved_at_60[0x100]; +}; + +struct mlx5_ifc_pcam_enhanced_features_bits { + u8 reserved_at_0[0x6d]; + u8 rx_icrc_encapsulated_counter[0x1]; + u8 reserved_at_6e[0x8]; + u8 pfcc_mask[0x1]; + u8 reserved_at_77[0x4]; + u8 rx_buffer_fullness_counters[0x1]; + u8 ptys_connector_type[0x1]; + u8 reserved_at_7d[0x1]; + u8 ppcnt_discard_group[0x1]; + u8 ppcnt_statistical_group[0x1]; +}; + +struct mlx5_ifc_pcam_regs_5000_to_507f_bits { + u8 port_access_reg_cap_mask_127_to_96[0x20]; + u8 port_access_reg_cap_mask_95_to_64[0x20]; + u8 port_access_reg_cap_mask_63_to_32[0x20]; + + u8 port_access_reg_cap_mask_31_to_13[0x13]; + u8 pbmc[0x1]; + u8 pptb[0x1]; + u8 port_access_reg_cap_mask_10_to_0[0xb]; +}; + +struct mlx5_ifc_pcam_reg_bits { + u8 reserved_at_0[0x8]; + u8 feature_group[0x8]; + u8 reserved_at_10[0x8]; + u8 access_reg_group[0x8]; + + u8 reserved_at_20[0x20]; + + union { + struct mlx5_ifc_pcam_regs_5000_to_507f_bits regs_5000_to_507f; + u8 reserved_at_0[0x80]; + } port_access_reg_cap_mask; + + u8 reserved_at_c0[0x80]; + + union { + struct mlx5_ifc_pcam_enhanced_features_bits enhanced_features; + u8 reserved_at_0[0x80]; + } feature_cap_mask; + + u8 reserved_at_1c0[0xc0]; +}; + +struct mlx5_ifc_mcam_enhanced_features_bits { + u8 reserved_at_0[0x74]; + u8 mark_tx_action_cnp[0x1]; + u8 mark_tx_action_cqe[0x1]; + u8 dynamic_tx_overflow[0x1]; + u8 reserved_at_77[0x4]; + u8 pcie_outbound_stalled[0x1]; + u8 tx_overflow_buffer_pkt[0x1]; + u8 mtpps_enh_out_per_adj[0x1]; + u8 mtpps_fs[0x1]; + u8 pcie_performance_group[0x1]; +}; + +struct mlx5_ifc_mcam_access_reg_bits { + u8 reserved_at_0[0x1c]; + u8 mcda[0x1]; + u8 mcc[0x1]; + u8 mcqi[0x1]; + u8 reserved_at_1f[0x1]; + + u8 regs_95_to_87[0x9]; + u8 mpegc[0x1]; + u8 regs_85_to_68[0x12]; + u8 tracer_registers[0x4]; + + u8 regs_63_to_32[0x20]; + u8 regs_31_to_0[0x20]; +}; + +struct mlx5_ifc_mcam_reg_bits { + u8 reserved_at_0[0x8]; + u8 feature_group[0x8]; + u8 reserved_at_10[0x8]; + u8 access_reg_group[0x8]; + + u8 reserved_at_20[0x20]; + + union { + struct mlx5_ifc_mcam_access_reg_bits access_regs; + u8 reserved_at_0[0x80]; + } mng_access_reg_cap_mask; + + u8 reserved_at_c0[0x80]; + + union { + struct mlx5_ifc_mcam_enhanced_features_bits enhanced_features; + u8 reserved_at_0[0x80]; + } mng_feature_cap_mask; + + u8 reserved_at_1c0[0x80]; +}; + +struct mlx5_ifc_qcam_access_reg_cap_mask { + u8 qcam_access_reg_cap_mask_127_to_20[0x6C]; + u8 qpdpm[0x1]; + u8 qcam_access_reg_cap_mask_18_to_4[0x0F]; + u8 qdpm[0x1]; + u8 qpts[0x1]; + u8 qcap[0x1]; + u8 qcam_access_reg_cap_mask_0[0x1]; +}; + +struct mlx5_ifc_qcam_qos_feature_cap_mask { + u8 qcam_qos_feature_cap_mask_127_to_1[0x7F]; + u8 qpts_trust_both[0x1]; +}; + +struct mlx5_ifc_qcam_reg_bits { + u8 reserved_at_0[0x8]; + u8 feature_group[0x8]; + u8 reserved_at_10[0x8]; + u8 access_reg_group[0x8]; + u8 reserved_at_20[0x20]; + + union { + struct mlx5_ifc_qcam_access_reg_cap_mask reg_cap; + u8 reserved_at_0[0x80]; + } qos_access_reg_cap_mask; + + u8 reserved_at_c0[0x80]; + + union { + struct mlx5_ifc_qcam_qos_feature_cap_mask feature_cap; + u8 reserved_at_0[0x80]; + } qos_feature_cap_mask; + + u8 reserved_at_1c0[0x80]; +}; + +struct mlx5_ifc_pcap_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 reserved_at_10[0x10]; + + u8 port_capability_mask[4][0x20]; +}; + +struct mlx5_ifc_paos_reg_bits { + u8 swid[0x8]; + u8 local_port[0x8]; + u8 reserved_at_10[0x4]; + u8 admin_status[0x4]; + u8 reserved_at_18[0x4]; + u8 oper_status[0x4]; + + u8 ase[0x1]; + u8 ee[0x1]; + u8 reserved_at_22[0x1c]; + u8 e[0x2]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_pamp_reg_bits { + u8 reserved_at_0[0x8]; + u8 opamp_group[0x8]; + u8 reserved_at_10[0xc]; + u8 opamp_group_type[0x4]; + + u8 start_index[0x10]; + u8 reserved_at_30[0x4]; + u8 num_of_indices[0xc]; + + u8 index_data[18][0x10]; +}; + +struct mlx5_ifc_pcmr_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 reserved_at_10[0x2e]; + u8 fcs_cap[0x1]; + u8 reserved_at_3f[0x1f]; + u8 fcs_chk[0x1]; + u8 reserved_at_5f[0x1]; +}; + +struct mlx5_ifc_lane_2_module_mapping_bits { + u8 reserved_at_0[0x6]; + u8 rx_lane[0x2]; + u8 reserved_at_8[0x6]; + u8 tx_lane[0x2]; + u8 reserved_at_10[0x8]; + u8 module[0x8]; +}; + +struct mlx5_ifc_bufferx_reg_bits { + u8 reserved_at_0[0x6]; + u8 lossy[0x1]; + u8 epsb[0x1]; + u8 reserved_at_8[0x8]; + u8 size[0x10]; + + u8 xoff_threshold[0x10]; + u8 xon_threshold[0x10]; +}; + +struct mlx5_ifc_set_node_in_bits { + u8 node_description[64][0x8]; +}; + +struct mlx5_ifc_register_power_settings_bits { + u8 reserved_at_0[0x18]; + u8 power_settings_level[0x8]; + + u8 reserved_at_20[0x60]; +}; + +struct mlx5_ifc_register_host_endianness_bits { + u8 he[0x1]; + u8 reserved_at_1[0x1f]; + + u8 reserved_at_20[0x60]; +}; + +struct mlx5_ifc_umr_pointer_desc_argument_bits { + u8 reserved_at_0[0x20]; + + u8 mkey[0x20]; + + u8 addressh_63_32[0x20]; + + u8 addressl_31_0[0x20]; +}; + +struct mlx5_ifc_ud_adrs_vector_bits { + u8 dc_key[0x40]; + + u8 ext[0x1]; + u8 reserved_at_41[0x7]; + u8 destination_qp_dct[0x18]; + + u8 static_rate[0x4]; + u8 sl_eth_prio[0x4]; + u8 fl[0x1]; + u8 mlid[0x7]; + u8 rlid_udp_sport[0x10]; + + u8 reserved_at_80[0x20]; + + u8 rmac_47_16[0x20]; + + u8 rmac_15_0[0x10]; + u8 tclass[0x8]; + u8 hop_limit[0x8]; + + u8 reserved_at_e0[0x1]; + u8 grh[0x1]; + u8 reserved_at_e2[0x2]; + u8 src_addr_index[0x8]; + u8 flow_label[0x14]; + + u8 rgid_rip[16][0x8]; +}; + +struct mlx5_ifc_pages_req_event_bits { + u8 reserved_at_0[0x10]; + u8 function_id[0x10]; + + u8 num_pages[0x20]; + + u8 reserved_at_40[0xa0]; +}; + +struct mlx5_ifc_eqe_bits { + u8 reserved_at_0[0x8]; + u8 event_type[0x8]; + u8 reserved_at_10[0x8]; + u8 event_sub_type[0x8]; + + u8 reserved_at_20[0xe0]; + + union mlx5_ifc_event_auto_bits event_data; + + u8 reserved_at_1e0[0x10]; + u8 signature[0x8]; + u8 reserved_at_1f8[0x7]; + u8 owner[0x1]; +}; + +enum { + MLX5_CMD_QUEUE_ENTRY_TYPE_PCIE_CMD_IF_TRANSPORT = 0x7, +}; + +struct mlx5_ifc_cmd_queue_entry_bits { + u8 type[0x8]; + u8 reserved_at_8[0x18]; + + u8 input_length[0x20]; + + u8 input_mailbox_pointer_63_32[0x20]; + + u8 input_mailbox_pointer_31_9[0x17]; + u8 reserved_at_77[0x9]; + + u8 command_input_inline_data[16][0x8]; + + u8 command_output_inline_data[16][0x8]; + + u8 output_mailbox_pointer_63_32[0x20]; + + u8 output_mailbox_pointer_31_9[0x17]; + u8 reserved_at_1b7[0x9]; + + u8 output_length[0x20]; + + u8 token[0x8]; + u8 signature[0x8]; + u8 reserved_at_1f0[0x8]; + u8 status[0x7]; + u8 ownership[0x1]; +}; + +struct mlx5_ifc_cmd_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 command_output[0x20]; +}; + +struct mlx5_ifc_cmd_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 command[0][0x20]; +}; + +struct mlx5_ifc_cmd_if_box_bits { + u8 mailbox_data[512][0x8]; + + u8 reserved_at_1000[0x180]; + + u8 next_pointer_63_32[0x20]; + + u8 next_pointer_31_10[0x16]; + u8 reserved_at_11b6[0xa]; + + u8 block_number[0x20]; + + u8 reserved_at_11e0[0x8]; + u8 token[0x8]; + u8 ctrl_signature[0x8]; + u8 signature[0x8]; +}; + +struct mlx5_ifc_mtt_bits { + u8 ptag_63_32[0x20]; + + u8 ptag_31_8[0x18]; + u8 reserved_at_38[0x6]; + u8 wr_en[0x1]; + u8 rd_en[0x1]; +}; + +struct mlx5_ifc_query_wol_rol_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x10]; + u8 rol_mode[0x8]; + u8 wol_mode[0x8]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_query_wol_rol_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_set_wol_rol_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_set_wol_rol_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 rol_mode_valid[0x1]; + u8 wol_mode_valid[0x1]; + u8 reserved_at_42[0xe]; + u8 rol_mode[0x8]; + u8 wol_mode[0x8]; + + u8 reserved_at_60[0x20]; +}; + +enum { + MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER = 0x0, + MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED = 0x1, + MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC = 0x2, +}; + +enum { + MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_FULL_DRIVER = 0x0, + MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_DISABLED = 0x1, + MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_NO_DRAM_NIC = 0x2, +}; + +enum { + MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_INTERNAL_ERR = 0x1, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_DEAD_IRISC = 0x7, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_HW_FATAL_ERR = 0x8, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_CRC_ERR = 0x9, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_FETCH_PCI_ERR = 0xa, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PAGE_ERR = 0xb, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_ASYNCHRONOUS_EQ_BUF_OVERRUN = 0xc, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_IN_ERR = 0xd, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_INV = 0xe, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_FFSER_ERR = 0xf, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR = 0x10, +}; + +struct mlx5_ifc_initial_seg_bits { + u8 fw_rev_minor[0x10]; + u8 fw_rev_major[0x10]; + + u8 cmd_interface_rev[0x10]; + u8 fw_rev_subminor[0x10]; + + u8 reserved_at_40[0x40]; + + u8 cmdq_phy_addr_63_32[0x20]; + + u8 cmdq_phy_addr_31_12[0x14]; + u8 reserved_at_b4[0x2]; + u8 nic_interface[0x2]; + u8 log_cmdq_size[0x4]; + u8 log_cmdq_stride[0x4]; + + u8 command_doorbell_vector[0x20]; + + u8 reserved_at_e0[0xf00]; + + u8 initializing[0x1]; + u8 reserved_at_fe1[0x4]; + u8 nic_interface_supported[0x3]; + u8 reserved_at_fe8[0x18]; + + struct mlx5_ifc_health_buffer_bits health_buffer; + + u8 no_dram_nic_offset[0x20]; + + u8 reserved_at_1220[0x6e40]; + + u8 reserved_at_8060[0x1f]; + u8 clear_int[0x1]; + + u8 health_syndrome[0x8]; + u8 health_counter[0x18]; + + u8 reserved_at_80a0[0x17fc0]; +}; + +struct mlx5_ifc_mtpps_reg_bits { + u8 reserved_at_0[0xc]; + u8 cap_number_of_pps_pins[0x4]; + u8 reserved_at_10[0x4]; + u8 cap_max_num_of_pps_in_pins[0x4]; + u8 reserved_at_18[0x4]; + u8 cap_max_num_of_pps_out_pins[0x4]; + + u8 reserved_at_20[0x24]; + u8 cap_pin_3_mode[0x4]; + u8 reserved_at_48[0x4]; + u8 cap_pin_2_mode[0x4]; + u8 reserved_at_50[0x4]; + u8 cap_pin_1_mode[0x4]; + u8 reserved_at_58[0x4]; + u8 cap_pin_0_mode[0x4]; + + u8 reserved_at_60[0x4]; + u8 cap_pin_7_mode[0x4]; + u8 reserved_at_68[0x4]; + u8 cap_pin_6_mode[0x4]; + u8 reserved_at_70[0x4]; + u8 cap_pin_5_mode[0x4]; + u8 reserved_at_78[0x4]; + u8 cap_pin_4_mode[0x4]; + + u8 field_select[0x20]; + u8 reserved_at_a0[0x60]; + + u8 enable[0x1]; + u8 reserved_at_101[0xb]; + u8 pattern[0x4]; + u8 reserved_at_110[0x4]; + u8 pin_mode[0x4]; + u8 pin[0x8]; + + u8 reserved_at_120[0x20]; + + u8 time_stamp[0x40]; + + u8 out_pulse_duration[0x10]; + u8 out_periodic_adjustment[0x10]; + u8 enhanced_out_periodic_adjustment[0x20]; + + u8 reserved_at_1c0[0x20]; +}; + +struct mlx5_ifc_mtppse_reg_bits { + u8 reserved_at_0[0x18]; + u8 pin[0x8]; + u8 event_arm[0x1]; + u8 reserved_at_21[0x1b]; + u8 event_generation_mode[0x4]; + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_mcqi_cap_bits { + u8 supported_info_bitmask[0x20]; + + u8 component_size[0x20]; + + u8 max_component_size[0x20]; + + u8 log_mcda_word_size[0x4]; + u8 reserved_at_64[0xc]; + u8 mcda_max_write_size[0x10]; + + u8 rd_en[0x1]; + u8 reserved_at_81[0x1]; + u8 match_chip_id[0x1]; + u8 match_psid[0x1]; + u8 check_user_timestamp[0x1]; + u8 match_base_guid_mac[0x1]; + u8 reserved_at_86[0x1a]; +}; + +struct mlx5_ifc_mcqi_reg_bits { + u8 read_pending_component[0x1]; + u8 reserved_at_1[0xf]; + u8 component_index[0x10]; + + u8 reserved_at_20[0x20]; + + u8 reserved_at_40[0x1b]; + u8 info_type[0x5]; + + u8 info_size[0x20]; + + u8 offset[0x20]; + + u8 reserved_at_a0[0x10]; + u8 data_size[0x10]; + + u8 data[0][0x20]; +}; + +struct mlx5_ifc_mcc_reg_bits { + u8 reserved_at_0[0x4]; + u8 time_elapsed_since_last_cmd[0xc]; + u8 reserved_at_10[0x8]; + u8 instruction[0x8]; + + u8 reserved_at_20[0x10]; + u8 component_index[0x10]; + + u8 reserved_at_40[0x8]; + u8 update_handle[0x18]; + + u8 handle_owner_type[0x4]; + u8 handle_owner_host_id[0x4]; + u8 reserved_at_68[0x1]; + u8 control_progress[0x7]; + u8 error_code[0x8]; + u8 reserved_at_78[0x4]; + u8 control_state[0x4]; + + u8 component_size[0x20]; + + u8 reserved_at_a0[0x60]; +}; + +struct mlx5_ifc_mcda_reg_bits { + u8 reserved_at_0[0x8]; + u8 update_handle[0x18]; + + u8 offset[0x20]; + + u8 reserved_at_40[0x10]; + u8 size[0x10]; + + u8 reserved_at_60[0x20]; + + u8 data[0][0x20]; +}; + +union mlx5_ifc_ports_control_registers_document_bits { + struct mlx5_ifc_bufferx_reg_bits bufferx_reg; + struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout; + struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout; + struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout; + struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout; + struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout; + struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout; + struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout; + struct mlx5_ifc_lane_2_module_mapping_bits lane_2_module_mapping; + struct mlx5_ifc_pamp_reg_bits pamp_reg; + struct mlx5_ifc_paos_reg_bits paos_reg; + struct mlx5_ifc_pcap_reg_bits pcap_reg; + struct mlx5_ifc_peir_reg_bits peir_reg; + struct mlx5_ifc_pelc_reg_bits pelc_reg; + struct mlx5_ifc_pfcc_reg_bits pfcc_reg; + struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout; + struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs; + struct mlx5_ifc_pifr_reg_bits pifr_reg; + struct mlx5_ifc_pipg_reg_bits pipg_reg; + struct mlx5_ifc_plbf_reg_bits plbf_reg; + struct mlx5_ifc_plib_reg_bits plib_reg; + struct mlx5_ifc_plpc_reg_bits plpc_reg; + struct mlx5_ifc_pmaos_reg_bits pmaos_reg; + struct mlx5_ifc_pmlp_reg_bits pmlp_reg; + struct mlx5_ifc_pmlpn_reg_bits pmlpn_reg; + struct mlx5_ifc_pmpc_reg_bits pmpc_reg; + struct mlx5_ifc_pmpe_reg_bits pmpe_reg; + struct mlx5_ifc_pmpr_reg_bits pmpr_reg; + struct mlx5_ifc_pmtu_reg_bits pmtu_reg; + struct mlx5_ifc_ppad_reg_bits ppad_reg; + struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg; + struct mlx5_ifc_mpcnt_reg_bits mpcnt_reg; + struct mlx5_ifc_pplm_reg_bits pplm_reg; + struct mlx5_ifc_pplr_reg_bits pplr_reg; + struct mlx5_ifc_ppsc_reg_bits ppsc_reg; + struct mlx5_ifc_pqdr_reg_bits pqdr_reg; + struct mlx5_ifc_pspa_reg_bits pspa_reg; + struct mlx5_ifc_ptas_reg_bits ptas_reg; + struct mlx5_ifc_ptys_reg_bits ptys_reg; + struct mlx5_ifc_mlcr_reg_bits mlcr_reg; + struct mlx5_ifc_pude_reg_bits pude_reg; + struct mlx5_ifc_pvlc_reg_bits pvlc_reg; + struct mlx5_ifc_slrg_reg_bits slrg_reg; + struct mlx5_ifc_sltp_reg_bits sltp_reg; + struct mlx5_ifc_mtpps_reg_bits mtpps_reg; + struct mlx5_ifc_mtppse_reg_bits mtppse_reg; + struct mlx5_ifc_fpga_access_reg_bits fpga_access_reg; + struct mlx5_ifc_fpga_ctrl_bits fpga_ctrl_bits; + struct mlx5_ifc_fpga_cap_bits fpga_cap_bits; + struct mlx5_ifc_mcqi_reg_bits mcqi_reg; + struct mlx5_ifc_mcc_reg_bits mcc_reg; + struct mlx5_ifc_mcda_reg_bits mcda_reg; + u8 reserved_at_0[0x60e0]; +}; + +union mlx5_ifc_debug_enhancements_document_bits { + struct mlx5_ifc_health_buffer_bits health_buffer; + u8 reserved_at_0[0x200]; +}; + +union mlx5_ifc_uplink_pci_interface_document_bits { + struct mlx5_ifc_initial_seg_bits initial_seg; + u8 reserved_at_0[0x20060]; +}; + +struct mlx5_ifc_set_flow_table_root_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_set_flow_table_root_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; + + u8 table_type[0x8]; + u8 reserved_at_88[0x18]; + + u8 reserved_at_a0[0x8]; + u8 table_id[0x18]; + + u8 reserved_at_c0[0x8]; + u8 underlay_qpn[0x18]; + u8 reserved_at_e0[0x120]; +}; + +enum { + MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID = (1UL << 0), + MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID = (1UL << 15), +}; + +struct mlx5_ifc_modify_flow_table_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_modify_flow_table_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x10]; + u8 modify_field_select[0x10]; + + u8 table_type[0x8]; + u8 reserved_at_88[0x18]; + + u8 reserved_at_a0[0x8]; + u8 table_id[0x18]; + + struct mlx5_ifc_flow_table_context_bits flow_table_context; +}; + +struct mlx5_ifc_ets_tcn_config_reg_bits { + u8 g[0x1]; + u8 b[0x1]; + u8 r[0x1]; + u8 reserved_at_3[0x9]; + u8 group[0x4]; + u8 reserved_at_10[0x9]; + u8 bw_allocation[0x7]; + + u8 reserved_at_20[0xc]; + u8 max_bw_units[0x4]; + u8 reserved_at_30[0x8]; + u8 max_bw_value[0x8]; +}; + +struct mlx5_ifc_ets_global_config_reg_bits { + u8 reserved_at_0[0x2]; + u8 r[0x1]; + u8 reserved_at_3[0x1d]; + + u8 reserved_at_20[0xc]; + u8 max_bw_units[0x4]; + u8 reserved_at_30[0x8]; + u8 max_bw_value[0x8]; +}; + +struct mlx5_ifc_qetc_reg_bits { + u8 reserved_at_0[0x8]; + u8 port_number[0x8]; + u8 reserved_at_10[0x30]; + + struct mlx5_ifc_ets_tcn_config_reg_bits tc_configuration[0x8]; + struct mlx5_ifc_ets_global_config_reg_bits global_configuration; +}; + +struct mlx5_ifc_qpdpm_dscp_reg_bits { + u8 e[0x1]; + u8 reserved_at_01[0x0b]; + u8 prio[0x04]; +}; + +struct mlx5_ifc_qpdpm_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 reserved_at_10[0x10]; + struct mlx5_ifc_qpdpm_dscp_reg_bits dscp[64]; +}; + +struct mlx5_ifc_qpts_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 reserved_at_10[0x2d]; + u8 trust_state[0x3]; +}; + +struct mlx5_ifc_pptb_reg_bits { + u8 reserved_at_0[0x2]; + u8 mm[0x2]; + u8 reserved_at_4[0x4]; + u8 local_port[0x8]; + u8 reserved_at_10[0x6]; + u8 cm[0x1]; + u8 um[0x1]; + u8 pm[0x8]; + + u8 prio_x_buff[0x20]; + + u8 pm_msb[0x8]; + u8 reserved_at_48[0x10]; + u8 ctrl_buff[0x4]; + u8 untagged_buff[0x4]; +}; + +struct mlx5_ifc_pbmc_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 reserved_at_10[0x10]; + + u8 xoff_timer_value[0x10]; + u8 xoff_refresh[0x10]; + + u8 reserved_at_40[0x9]; + u8 fullness_threshold[0x7]; + u8 port_buffer_size[0x10]; + + struct mlx5_ifc_bufferx_reg_bits buffer[10]; + + u8 reserved_at_2e0[0x80]; +}; + +struct mlx5_ifc_qtct_reg_bits { + u8 reserved_at_0[0x8]; + u8 port_number[0x8]; + u8 reserved_at_10[0xd]; + u8 prio[0x3]; + + u8 reserved_at_20[0x1d]; + u8 tclass[0x3]; +}; + +struct mlx5_ifc_mcia_reg_bits { + u8 l[0x1]; + u8 reserved_at_1[0x7]; + u8 module[0x8]; + u8 reserved_at_10[0x8]; + u8 status[0x8]; + + u8 i2c_device_address[0x8]; + u8 page_number[0x8]; + u8 device_address[0x10]; + + u8 reserved_at_40[0x10]; + u8 size[0x10]; + + u8 reserved_at_60[0x20]; + + u8 dword_0[0x20]; + u8 dword_1[0x20]; + u8 dword_2[0x20]; + u8 dword_3[0x20]; + u8 dword_4[0x20]; + u8 dword_5[0x20]; + u8 dword_6[0x20]; + u8 dword_7[0x20]; + u8 dword_8[0x20]; + u8 dword_9[0x20]; + u8 dword_10[0x20]; + u8 dword_11[0x20]; +}; + +struct mlx5_ifc_dcbx_param_bits { + u8 dcbx_cee_cap[0x1]; + u8 dcbx_ieee_cap[0x1]; + u8 dcbx_standby_cap[0x1]; + u8 reserved_at_0[0x5]; + u8 port_number[0x8]; + u8 reserved_at_10[0xa]; + u8 max_application_table_size[6]; + u8 reserved_at_20[0x15]; + u8 version_oper[0x3]; + u8 reserved_at_38[5]; + u8 version_admin[0x3]; + u8 willing_admin[0x1]; + u8 reserved_at_41[0x3]; + u8 pfc_cap_oper[0x4]; + u8 reserved_at_48[0x4]; + u8 pfc_cap_admin[0x4]; + u8 reserved_at_50[0x4]; + u8 num_of_tc_oper[0x4]; + u8 reserved_at_58[0x4]; + u8 num_of_tc_admin[0x4]; + u8 remote_willing[0x1]; + u8 reserved_at_61[3]; + u8 remote_pfc_cap[4]; + u8 reserved_at_68[0x14]; + u8 remote_num_of_tc[0x4]; + u8 reserved_at_80[0x18]; + u8 error[0x8]; + u8 reserved_at_a0[0x160]; +}; + +struct mlx5_ifc_lagc_bits { + u8 reserved_at_0[0x1d]; + u8 lag_state[0x3]; + + u8 reserved_at_20[0x14]; + u8 tx_remap_affinity_2[0x4]; + u8 reserved_at_38[0x4]; + u8 tx_remap_affinity_1[0x4]; +}; + +struct mlx5_ifc_create_lag_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_create_lag_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + struct mlx5_ifc_lagc_bits ctx; +}; + +struct mlx5_ifc_modify_lag_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_modify_lag_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x20]; + u8 field_select[0x20]; + + struct mlx5_ifc_lagc_bits ctx; +}; + +struct mlx5_ifc_query_lag_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + struct mlx5_ifc_lagc_bits ctx; +}; + +struct mlx5_ifc_query_lag_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_destroy_lag_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_destroy_lag_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_create_vport_lag_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_create_vport_lag_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_destroy_vport_lag_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_destroy_vport_lag_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_alloc_memic_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_30[0x20]; + + u8 reserved_at_40[0x18]; + u8 log_memic_addr_alignment[0x8]; + + u8 range_start_addr[0x40]; + + u8 range_size[0x20]; + + u8 memic_size[0x20]; +}; + +struct mlx5_ifc_alloc_memic_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 memic_start_addr[0x40]; +}; + +struct mlx5_ifc_dealloc_memic_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; + + u8 memic_start_addr[0x40]; + + u8 memic_size[0x20]; + + u8 reserved_at_e0[0x20]; +}; + +struct mlx5_ifc_dealloc_memic_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_general_obj_in_cmd_hdr_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 obj_type[0x10]; + + u8 obj_id[0x20]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_general_obj_out_cmd_hdr_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 obj_id[0x20]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_umem_bits { + u8 modify_field_select[0x40]; + + u8 reserved_at_40[0x5b]; + u8 log_page_size[0x5]; + + u8 page_offset[0x20]; + + u8 num_of_mtt[0x40]; + + struct mlx5_ifc_mtt_bits mtt[0]; +}; + +struct mlx5_ifc_uctx_bits { + u8 modify_field_select[0x40]; + + u8 reserved_at_40[0x1c0]; +}; + +struct mlx5_ifc_create_umem_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; + struct mlx5_ifc_umem_bits umem; +}; + +struct mlx5_ifc_create_uctx_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; + struct mlx5_ifc_uctx_bits uctx; +}; + +struct mlx5_ifc_mtrc_string_db_param_bits { + u8 string_db_base_address[0x20]; + + u8 reserved_at_20[0x8]; + u8 string_db_size[0x18]; +}; + +struct mlx5_ifc_mtrc_cap_bits { + u8 trace_owner[0x1]; + u8 trace_to_memory[0x1]; + u8 reserved_at_2[0x4]; + u8 trc_ver[0x2]; + u8 reserved_at_8[0x14]; + u8 num_string_db[0x4]; + + u8 first_string_trace[0x8]; + u8 num_string_trace[0x8]; + u8 reserved_at_30[0x28]; + + u8 log_max_trace_buffer_size[0x8]; + + u8 reserved_at_60[0x20]; + + struct mlx5_ifc_mtrc_string_db_param_bits string_db_param[8]; + + u8 reserved_at_280[0x180]; +}; + +struct mlx5_ifc_mtrc_conf_bits { + u8 reserved_at_0[0x1c]; + u8 trace_mode[0x4]; + u8 reserved_at_20[0x18]; + u8 log_trace_buffer_size[0x8]; + u8 trace_mkey[0x20]; + u8 reserved_at_60[0x3a0]; +}; + +struct mlx5_ifc_mtrc_stdb_bits { + u8 string_db_index[0x4]; + u8 reserved_at_4[0x4]; + u8 read_size[0x18]; + u8 start_offset[0x20]; + u8 string_db_data[0]; +}; + +struct mlx5_ifc_mtrc_ctrl_bits { + u8 trace_status[0x2]; + u8 reserved_at_2[0x2]; + u8 arm_event[0x1]; + u8 reserved_at_5[0xb]; + u8 modify_field_select[0x10]; + u8 reserved_at_20[0x2b]; + u8 current_timestamp52_32[0x15]; + u8 current_timestamp31_0[0x20]; + u8 reserved_at_80[0x180]; +}; + +#endif /* MLX5_IFC_H */ diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h new file mode 100644 index 000000000..37e065a80 --- /dev/null +++ b/include/linux/mlx5/mlx5_ifc_fpga.h @@ -0,0 +1,616 @@ +/* + * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef MLX5_IFC_FPGA_H +#define MLX5_IFC_FPGA_H + +struct mlx5_ifc_ipv4_layout_bits { + u8 reserved_at_0[0x60]; + + u8 ipv4[0x20]; +}; + +struct mlx5_ifc_ipv6_layout_bits { + u8 ipv6[16][0x8]; +}; + +union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits { + struct mlx5_ifc_ipv6_layout_bits ipv6_layout; + struct mlx5_ifc_ipv4_layout_bits ipv4_layout; + u8 reserved_at_0[0x80]; +}; + +enum { + MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX = 0x2c9, +}; + +enum { + MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC = 0x2, + MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS = 0x3, +}; + +struct mlx5_ifc_fpga_shell_caps_bits { + u8 max_num_qps[0x10]; + u8 reserved_at_10[0x8]; + u8 total_rcv_credits[0x8]; + + u8 reserved_at_20[0xe]; + u8 qp_type[0x2]; + u8 reserved_at_30[0x5]; + u8 rae[0x1]; + u8 rwe[0x1]; + u8 rre[0x1]; + u8 reserved_at_38[0x4]; + u8 dc[0x1]; + u8 ud[0x1]; + u8 uc[0x1]; + u8 rc[0x1]; + + u8 reserved_at_40[0x1a]; + u8 log_ddr_size[0x6]; + + u8 max_fpga_qp_msg_size[0x20]; + + u8 reserved_at_80[0x180]; +}; + +struct mlx5_ifc_fpga_cap_bits { + u8 fpga_id[0x8]; + u8 fpga_device[0x18]; + + u8 register_file_ver[0x20]; + + u8 fpga_ctrl_modify[0x1]; + u8 reserved_at_41[0x5]; + u8 access_reg_query_mode[0x2]; + u8 reserved_at_48[0x6]; + u8 access_reg_modify_mode[0x2]; + u8 reserved_at_50[0x10]; + + u8 reserved_at_60[0x20]; + + u8 image_version[0x20]; + + u8 image_date[0x20]; + + u8 image_time[0x20]; + + u8 shell_version[0x20]; + + u8 reserved_at_100[0x80]; + + struct mlx5_ifc_fpga_shell_caps_bits shell_caps; + + u8 reserved_at_380[0x8]; + u8 ieee_vendor_id[0x18]; + + u8 sandbox_product_version[0x10]; + u8 sandbox_product_id[0x10]; + + u8 sandbox_basic_caps[0x20]; + + u8 reserved_at_3e0[0x10]; + u8 sandbox_extended_caps_len[0x10]; + + u8 sandbox_extended_caps_addr[0x40]; + + u8 fpga_ddr_start_addr[0x40]; + + u8 fpga_cr_space_start_addr[0x40]; + + u8 fpga_ddr_size[0x20]; + + u8 fpga_cr_space_size[0x20]; + + u8 reserved_at_500[0x300]; +}; + +enum { + MLX5_FPGA_CTRL_OPERATION_LOAD = 0x1, + MLX5_FPGA_CTRL_OPERATION_RESET = 0x2, + MLX5_FPGA_CTRL_OPERATION_FLASH_SELECT = 0x3, + MLX5_FPGA_CTRL_OPERATION_SANDBOX_BYPASS_ON = 0x4, + MLX5_FPGA_CTRL_OPERATION_SANDBOX_BYPASS_OFF = 0x5, + MLX5_FPGA_CTRL_OPERATION_RESET_SANDBOX = 0x6, +}; + +struct mlx5_ifc_fpga_ctrl_bits { + u8 reserved_at_0[0x8]; + u8 operation[0x8]; + u8 reserved_at_10[0x8]; + u8 status[0x8]; + + u8 reserved_at_20[0x8]; + u8 flash_select_admin[0x8]; + u8 reserved_at_30[0x8]; + u8 flash_select_oper[0x8]; + + u8 reserved_at_40[0x40]; +}; + +enum { + MLX5_FPGA_ERROR_EVENT_SYNDROME_CORRUPTED_DDR = 0x1, + MLX5_FPGA_ERROR_EVENT_SYNDROME_FLASH_TIMEOUT = 0x2, + MLX5_FPGA_ERROR_EVENT_SYNDROME_INTERNAL_LINK_ERROR = 0x3, + MLX5_FPGA_ERROR_EVENT_SYNDROME_WATCHDOG_FAILURE = 0x4, + MLX5_FPGA_ERROR_EVENT_SYNDROME_I2C_FAILURE = 0x5, + MLX5_FPGA_ERROR_EVENT_SYNDROME_IMAGE_CHANGED = 0x6, + MLX5_FPGA_ERROR_EVENT_SYNDROME_TEMPERATURE_CRITICAL = 0x7, +}; + +struct mlx5_ifc_fpga_error_event_bits { + u8 reserved_at_0[0x40]; + + u8 reserved_at_40[0x18]; + u8 syndrome[0x8]; + + u8 reserved_at_60[0x80]; +}; + +#define MLX5_FPGA_ACCESS_REG_SIZE_MAX 64 + +struct mlx5_ifc_fpga_access_reg_bits { + u8 reserved_at_0[0x20]; + + u8 reserved_at_20[0x10]; + u8 size[0x10]; + + u8 address[0x40]; + + u8 data[0][0x8]; +}; + +enum mlx5_ifc_fpga_qp_state { + MLX5_FPGA_QPC_STATE_INIT = 0x0, + MLX5_FPGA_QPC_STATE_ACTIVE = 0x1, + MLX5_FPGA_QPC_STATE_ERROR = 0x2, +}; + +enum mlx5_ifc_fpga_qp_type { + MLX5_FPGA_QPC_QP_TYPE_SHELL_QP = 0x0, + MLX5_FPGA_QPC_QP_TYPE_SANDBOX_QP = 0x1, +}; + +enum mlx5_ifc_fpga_qp_service_type { + MLX5_FPGA_QPC_ST_RC = 0x0, +}; + +struct mlx5_ifc_fpga_qpc_bits { + u8 state[0x4]; + u8 reserved_at_4[0x1b]; + u8 qp_type[0x1]; + + u8 reserved_at_20[0x4]; + u8 st[0x4]; + u8 reserved_at_28[0x10]; + u8 traffic_class[0x8]; + + u8 ether_type[0x10]; + u8 prio[0x3]; + u8 dei[0x1]; + u8 vid[0xc]; + + u8 reserved_at_60[0x20]; + + u8 reserved_at_80[0x8]; + u8 next_rcv_psn[0x18]; + + u8 reserved_at_a0[0x8]; + u8 next_send_psn[0x18]; + + u8 reserved_at_c0[0x10]; + u8 pkey[0x10]; + + u8 reserved_at_e0[0x8]; + u8 remote_qpn[0x18]; + + u8 reserved_at_100[0x15]; + u8 rnr_retry[0x3]; + u8 reserved_at_118[0x5]; + u8 retry_count[0x3]; + + u8 reserved_at_120[0x20]; + + u8 reserved_at_140[0x10]; + u8 remote_mac_47_32[0x10]; + + u8 remote_mac_31_0[0x20]; + + u8 remote_ip[16][0x8]; + + u8 reserved_at_200[0x40]; + + u8 reserved_at_240[0x10]; + u8 fpga_mac_47_32[0x10]; + + u8 fpga_mac_31_0[0x20]; + + u8 fpga_ip[16][0x8]; +}; + +struct mlx5_ifc_fpga_create_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_fpga_qpc_bits fpga_qpc; +}; + +struct mlx5_ifc_fpga_create_qp_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x8]; + u8 fpga_qpn[0x18]; + + u8 reserved_at_60[0x20]; + + struct mlx5_ifc_fpga_qpc_bits fpga_qpc; +}; + +struct mlx5_ifc_fpga_modify_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 fpga_qpn[0x18]; + + u8 field_select[0x20]; + + struct mlx5_ifc_fpga_qpc_bits fpga_qpc; +}; + +struct mlx5_ifc_fpga_modify_qp_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_fpga_query_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 fpga_qpn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_fpga_query_qp_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_fpga_qpc_bits fpga_qpc; +}; + +struct mlx5_ifc_fpga_query_qp_counters_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 clear[0x1]; + u8 reserved_at_41[0x7]; + u8 fpga_qpn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_fpga_query_qp_counters_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + u8 rx_ack_packets[0x40]; + + u8 rx_send_packets[0x40]; + + u8 tx_ack_packets[0x40]; + + u8 tx_send_packets[0x40]; + + u8 rx_total_drop[0x40]; + + u8 reserved_at_1c0[0x1c0]; +}; + +struct mlx5_ifc_fpga_destroy_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 fpga_qpn[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_fpga_destroy_qp_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_tls_extended_cap_bits { + u8 aes_gcm_128[0x1]; + u8 aes_gcm_256[0x1]; + u8 reserved_at_2[0x1e]; + u8 reserved_at_20[0x20]; + u8 context_capacity_total[0x20]; + u8 context_capacity_rx[0x20]; + u8 context_capacity_tx[0x20]; + u8 reserved_at_a0[0x10]; + u8 tls_counter_size[0x10]; + u8 tls_counters_addr_low[0x20]; + u8 tls_counters_addr_high[0x20]; + u8 rx[0x1]; + u8 tx[0x1]; + u8 tls_v12[0x1]; + u8 tls_v13[0x1]; + u8 lro[0x1]; + u8 ipv6[0x1]; + u8 reserved_at_106[0x1a]; +}; + +struct mlx5_ifc_ipsec_extended_cap_bits { + u8 encapsulation[0x20]; + + u8 reserved_0[0x12]; + u8 v2_command[0x1]; + u8 udp_encap[0x1]; + u8 rx_no_trailer[0x1]; + u8 ipv4_fragment[0x1]; + u8 ipv6[0x1]; + u8 esn[0x1]; + u8 lso[0x1]; + u8 transport_and_tunnel_mode[0x1]; + u8 tunnel_mode[0x1]; + u8 transport_mode[0x1]; + u8 ah_esp[0x1]; + u8 esp[0x1]; + u8 ah[0x1]; + u8 ipv4_options[0x1]; + + u8 auth_alg[0x20]; + + u8 enc_alg[0x20]; + + u8 sa_cap[0x20]; + + u8 reserved_1[0x10]; + u8 number_of_ipsec_counters[0x10]; + + u8 ipsec_counters_addr_low[0x20]; + u8 ipsec_counters_addr_high[0x20]; +}; + +struct mlx5_ifc_ipsec_counters_bits { + u8 dec_in_packets[0x40]; + + u8 dec_out_packets[0x40]; + + u8 dec_bypass_packets[0x40]; + + u8 enc_in_packets[0x40]; + + u8 enc_out_packets[0x40]; + + u8 enc_bypass_packets[0x40]; + + u8 drop_dec_packets[0x40]; + + u8 failed_auth_dec_packets[0x40]; + + u8 drop_enc_packets[0x40]; + + u8 success_add_sa[0x40]; + + u8 fail_add_sa[0x40]; + + u8 success_delete_sa[0x40]; + + u8 fail_delete_sa[0x40]; + + u8 dropped_cmd[0x40]; +}; + +enum { + MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RETRY_COUNTER_EXPIRED = 0x1, + MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RNR_EXPIRED = 0x2, +}; + +struct mlx5_ifc_fpga_qp_error_event_bits { + u8 reserved_at_0[0x40]; + + u8 reserved_at_40[0x18]; + u8 syndrome[0x8]; + + u8 reserved_at_60[0x60]; + + u8 reserved_at_c0[0x8]; + u8 fpga_qpn[0x18]; +}; +enum mlx5_ifc_fpga_ipsec_response_syndrome { + MLX5_FPGA_IPSEC_RESPONSE_SUCCESS = 0, + MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST = 1, + MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE = 2, + MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE = 3, +}; + +struct mlx5_ifc_fpga_ipsec_cmd_resp { + __be32 syndrome; + union { + __be32 sw_sa_handle; + __be32 flags; + }; + u8 reserved[24]; +} __packed; + +enum mlx5_ifc_fpga_ipsec_cmd_opcode { + MLX5_FPGA_IPSEC_CMD_OP_ADD_SA = 0, + MLX5_FPGA_IPSEC_CMD_OP_DEL_SA = 1, + MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 = 2, + MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 = 3, + MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2 = 4, + MLX5_FPGA_IPSEC_CMD_OP_SET_CAP = 5, +}; + +enum mlx5_ifc_fpga_ipsec_cap { + MLX5_FPGA_IPSEC_CAP_NO_TRAILER = BIT(0), +}; + +struct mlx5_ifc_fpga_ipsec_cmd_cap { + __be32 cmd; + __be32 flags; + u8 reserved[24]; +} __packed; + +enum mlx5_ifc_fpga_ipsec_sa_flags { + MLX5_FPGA_IPSEC_SA_ESN_EN = BIT(0), + MLX5_FPGA_IPSEC_SA_ESN_OVERLAP = BIT(1), + MLX5_FPGA_IPSEC_SA_IPV6 = BIT(2), + MLX5_FPGA_IPSEC_SA_DIR_SX = BIT(3), + MLX5_FPGA_IPSEC_SA_SPI_EN = BIT(4), + MLX5_FPGA_IPSEC_SA_SA_VALID = BIT(5), + MLX5_FPGA_IPSEC_SA_IP_ESP = BIT(6), + MLX5_FPGA_IPSEC_SA_IP_AH = BIT(7), +}; + +enum mlx5_ifc_fpga_ipsec_sa_enc_mode { + MLX5_FPGA_IPSEC_SA_ENC_MODE_NONE = 0, + MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128 = 1, + MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128 = 3, +}; + +struct mlx5_ifc_fpga_ipsec_sa_v1 { + __be32 cmd; + u8 key_enc[32]; + u8 key_auth[32]; + __be32 sip[4]; + __be32 dip[4]; + union { + struct { + __be32 reserved; + u8 salt_iv[8]; + __be32 salt; + } __packed gcm; + struct { + u8 salt[16]; + } __packed cbc; + }; + __be32 spi; + __be32 sw_sa_handle; + __be16 tfclen; + u8 enc_mode; + u8 reserved1[2]; + u8 flags; + u8 reserved2[2]; +}; + +struct mlx5_ifc_fpga_ipsec_sa { + struct mlx5_ifc_fpga_ipsec_sa_v1 ipsec_sa_v1; + __be16 udp_sp; + __be16 udp_dp; + u8 reserved1[4]; + __be32 esn; + __be16 vid; /* only 12 bits, rest is reserved */ + __be16 reserved2; +} __packed; + +enum fpga_tls_cmds { + CMD_SETUP_STREAM = 0x1001, + CMD_TEARDOWN_STREAM = 0x1002, + CMD_RESYNC_RX = 0x1003, +}; + +#define MLX5_TLS_1_2 (0) + +#define MLX5_TLS_ALG_AES_GCM_128 (0) +#define MLX5_TLS_ALG_AES_GCM_256 (1) + +struct mlx5_ifc_tls_cmd_bits { + u8 command_type[0x20]; + u8 ipv6[0x1]; + u8 direction_sx[0x1]; + u8 tls_version[0x2]; + u8 reserved[0x1c]; + u8 swid[0x20]; + u8 src_port[0x10]; + u8 dst_port[0x10]; + union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6; + union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6; + u8 tls_rcd_sn[0x40]; + u8 tcp_sn[0x20]; + u8 tls_implicit_iv[0x20]; + u8 tls_xor_iv[0x40]; + u8 encryption_key[0x100]; + u8 alg[4]; + u8 reserved2[0x1c]; + u8 reserved3[0x4a0]; +}; + +struct mlx5_ifc_tls_resp_bits { + u8 syndrome[0x20]; + u8 stream_id[0x20]; + u8 reserverd[0x40]; +}; + +#define MLX5_TLS_COMMAND_SIZE (0x100) + +#endif /* MLX5_IFC_FPGA_H */ diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h new file mode 100644 index 000000000..34aed6032 --- /dev/null +++ b/include/linux/mlx5/port.h @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __MLX5_PORT_H__ +#define __MLX5_PORT_H__ + +#include + +enum mlx5_beacon_duration { + MLX5_BEACON_DURATION_OFF = 0x0, + MLX5_BEACON_DURATION_INF = 0xffff, +}; + +enum mlx5_module_id { + MLX5_MODULE_ID_SFP = 0x3, + MLX5_MODULE_ID_QSFP = 0xC, + MLX5_MODULE_ID_QSFP_PLUS = 0xD, + MLX5_MODULE_ID_QSFP28 = 0x11, +}; + +enum mlx5_an_status { + MLX5_AN_UNAVAILABLE = 0, + MLX5_AN_COMPLETE = 1, + MLX5_AN_FAILED = 2, + MLX5_AN_LINK_UP = 3, + MLX5_AN_LINK_DOWN = 4, +}; + +#define MLX5_EEPROM_MAX_BYTES 32 +#define MLX5_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff +#define MLX5_I2C_ADDR_LOW 0x50 +#define MLX5_I2C_ADDR_HIGH 0x51 +#define MLX5_EEPROM_PAGE_LENGTH 256 + +enum mlx5e_link_mode { + MLX5E_1000BASE_CX_SGMII = 0, + MLX5E_1000BASE_KX = 1, + MLX5E_10GBASE_CX4 = 2, + MLX5E_10GBASE_KX4 = 3, + MLX5E_10GBASE_KR = 4, + MLX5E_20GBASE_KR2 = 5, + MLX5E_40GBASE_CR4 = 6, + MLX5E_40GBASE_KR4 = 7, + MLX5E_56GBASE_R4 = 8, + MLX5E_10GBASE_CR = 12, + MLX5E_10GBASE_SR = 13, + MLX5E_10GBASE_ER = 14, + MLX5E_40GBASE_SR4 = 15, + MLX5E_40GBASE_LR4 = 16, + MLX5E_50GBASE_SR2 = 18, + MLX5E_100GBASE_CR4 = 20, + MLX5E_100GBASE_SR4 = 21, + MLX5E_100GBASE_KR4 = 22, + MLX5E_100GBASE_LR4 = 23, + MLX5E_100BASE_TX = 24, + MLX5E_1000BASE_T = 25, + MLX5E_10GBASE_T = 26, + MLX5E_25GBASE_CR = 27, + MLX5E_25GBASE_KR = 28, + MLX5E_25GBASE_SR = 29, + MLX5E_50GBASE_CR2 = 30, + MLX5E_50GBASE_KR2 = 31, + MLX5E_LINK_MODES_NUMBER, +}; + +enum mlx5e_connector_type { + MLX5E_PORT_UNKNOWN = 0, + MLX5E_PORT_NONE = 1, + MLX5E_PORT_TP = 2, + MLX5E_PORT_AUI = 3, + MLX5E_PORT_BNC = 4, + MLX5E_PORT_MII = 5, + MLX5E_PORT_FIBRE = 6, + MLX5E_PORT_DA = 7, + MLX5E_PORT_OTHER = 8, + MLX5E_CONNECTOR_TYPE_NUMBER, +}; + +#define MLX5E_PROT_MASK(link_mode) (1 << link_mode) + +#define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF +#define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF + +int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); +int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, + int ptys_size, int proto_mask, u8 local_port); +int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev, + u32 *proto_cap, int proto_mask); +int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev, + u32 *proto_admin, int proto_mask); +int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, + u8 *link_width_oper, u8 local_port); +int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev, + u8 *proto_oper, u8 local_port); +int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev, + u32 *proto_oper, u8 local_port); +int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable, + u32 proto_admin, int proto_mask); +void mlx5_toggle_port_link(struct mlx5_core_dev *dev); +int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, + enum mlx5_port_status status); +int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, + enum mlx5_port_status *status); +int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration); +void mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask, + u8 *an_status, + u8 *an_disable_cap, u8 *an_disable_admin); + +int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port); +void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port); +void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu, + u8 port); + +int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev, + u8 *vl_hw_cap, u8 local_port); + +int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause); +int mlx5_query_port_pause(struct mlx5_core_dev *dev, + u32 *rx_pause, u32 *tx_pause); + +int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx); +int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, + u8 *pfc_en_rx); + +int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev, + u16 stall_critical_watermark, + u16 stall_minor_watermark); +int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev, + u16 *stall_critical_watermark, u16 *stall_minor_watermark); + +int mlx5_max_tc(struct mlx5_core_dev *mdev); + +int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc); +int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev, + u8 prio, u8 *tc); +int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group); +int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev, + u8 tc, u8 *tc_group); +int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw); +int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev, + u8 tc, u8 *bw_pct); +int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev, + u8 *max_bw_value, + u8 *max_bw_unit); +int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev, + u8 *max_bw_value, + u8 *max_bw_unit); +int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode); +int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode); + +int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable); +void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported, + bool *enabled); +int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, + u16 offset, u16 size, u8 *data); + +int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out); +int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in); + +int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state); +int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state); +int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio); +int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio); +#endif /* __MLX5_PORT_H__ */ diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h new file mode 100644 index 000000000..4778d4108 --- /dev/null +++ b/include/linux/mlx5/qp.h @@ -0,0 +1,643 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_QP_H +#define MLX5_QP_H + +#include +#include + +#define MLX5_INVALID_LKEY 0x100 +#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5) +#define MLX5_DIF_SIZE 8 +#define MLX5_STRIDE_BLOCK_OP 0x400 +#define MLX5_CPY_GRD_MASK 0xc0 +#define MLX5_CPY_APP_MASK 0x30 +#define MLX5_CPY_REF_MASK 0x0f +#define MLX5_BSF_INC_REFTAG (1 << 6) +#define MLX5_BSF_INL_VALID (1 << 15) +#define MLX5_BSF_REFRESH_DIF (1 << 14) +#define MLX5_BSF_REPEAT_BLOCK (1 << 7) +#define MLX5_BSF_APPTAG_ESCAPE 0x1 +#define MLX5_BSF_APPREF_ESCAPE 0x2 + +enum mlx5_qp_optpar { + MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, + MLX5_QP_OPTPAR_RRE = 1 << 1, + MLX5_QP_OPTPAR_RAE = 1 << 2, + MLX5_QP_OPTPAR_RWE = 1 << 3, + MLX5_QP_OPTPAR_PKEY_INDEX = 1 << 4, + MLX5_QP_OPTPAR_Q_KEY = 1 << 5, + MLX5_QP_OPTPAR_RNR_TIMEOUT = 1 << 6, + MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7, + MLX5_QP_OPTPAR_SRA_MAX = 1 << 8, + MLX5_QP_OPTPAR_RRA_MAX = 1 << 9, + MLX5_QP_OPTPAR_PM_STATE = 1 << 10, + MLX5_QP_OPTPAR_RETRY_COUNT = 1 << 12, + MLX5_QP_OPTPAR_RNR_RETRY = 1 << 13, + MLX5_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, + MLX5_QP_OPTPAR_PRI_PORT = 1 << 16, + MLX5_QP_OPTPAR_SRQN = 1 << 18, + MLX5_QP_OPTPAR_CQN_RCV = 1 << 19, + MLX5_QP_OPTPAR_DC_HS = 1 << 20, + MLX5_QP_OPTPAR_DC_KEY = 1 << 21, +}; + +enum mlx5_qp_state { + MLX5_QP_STATE_RST = 0, + MLX5_QP_STATE_INIT = 1, + MLX5_QP_STATE_RTR = 2, + MLX5_QP_STATE_RTS = 3, + MLX5_QP_STATE_SQER = 4, + MLX5_QP_STATE_SQD = 5, + MLX5_QP_STATE_ERR = 6, + MLX5_QP_STATE_SQ_DRAINING = 7, + MLX5_QP_STATE_SUSPENDED = 9, + MLX5_QP_NUM_STATE, + MLX5_QP_STATE, + MLX5_QP_STATE_BAD, +}; + +enum { + MLX5_SQ_STATE_NA = MLX5_SQC_STATE_ERR + 1, + MLX5_SQ_NUM_STATE = MLX5_SQ_STATE_NA + 1, + MLX5_RQ_STATE_NA = MLX5_RQC_STATE_ERR + 1, + MLX5_RQ_NUM_STATE = MLX5_RQ_STATE_NA + 1, +}; + +enum { + MLX5_QP_ST_RC = 0x0, + MLX5_QP_ST_UC = 0x1, + MLX5_QP_ST_UD = 0x2, + MLX5_QP_ST_XRC = 0x3, + MLX5_QP_ST_MLX = 0x4, + MLX5_QP_ST_DCI = 0x5, + MLX5_QP_ST_DCT = 0x6, + MLX5_QP_ST_QP0 = 0x7, + MLX5_QP_ST_QP1 = 0x8, + MLX5_QP_ST_RAW_ETHERTYPE = 0x9, + MLX5_QP_ST_RAW_IPV6 = 0xa, + MLX5_QP_ST_SNIFFER = 0xb, + MLX5_QP_ST_SYNC_UMR = 0xe, + MLX5_QP_ST_PTP_1588 = 0xd, + MLX5_QP_ST_REG_UMR = 0xc, + MLX5_QP_ST_MAX +}; + +enum { + MLX5_QP_PM_MIGRATED = 0x3, + MLX5_QP_PM_ARMED = 0x0, + MLX5_QP_PM_REARM = 0x1 +}; + +enum { + MLX5_NON_ZERO_RQ = 0x0, + MLX5_SRQ_RQ = 0x1, + MLX5_CRQ_RQ = 0x2, + MLX5_ZERO_LEN_RQ = 0x3 +}; + +/* TODO REM */ +enum { + /* params1 */ + MLX5_QP_BIT_SRE = 1 << 15, + MLX5_QP_BIT_SWE = 1 << 14, + MLX5_QP_BIT_SAE = 1 << 13, + /* params2 */ + MLX5_QP_BIT_RRE = 1 << 15, + MLX5_QP_BIT_RWE = 1 << 14, + MLX5_QP_BIT_RAE = 1 << 13, + MLX5_QP_BIT_RIC = 1 << 4, + MLX5_QP_BIT_CC_SLAVE_RECV = 1 << 2, + MLX5_QP_BIT_CC_SLAVE_SEND = 1 << 1, + MLX5_QP_BIT_CC_MASTER = 1 << 0 +}; + +enum { + MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2, + MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE = 3 << 2, + MLX5_WQE_CTRL_SOLICITED = 1 << 1, +}; + +enum { + MLX5_SEND_WQE_DS = 16, + MLX5_SEND_WQE_BB = 64, +}; + +#define MLX5_SEND_WQEBB_NUM_DS (MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS) + +enum { + MLX5_SEND_WQE_MAX_WQEBBS = 16, +}; + +enum { + MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27, + MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28, + MLX5_WQE_FMR_PERM_REMOTE_READ = 1 << 29, + MLX5_WQE_FMR_PERM_REMOTE_WRITE = 1 << 30, + MLX5_WQE_FMR_PERM_ATOMIC = 1 << 31 +}; + +enum { + MLX5_FENCE_MODE_NONE = 0 << 5, + MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5, + MLX5_FENCE_MODE_FENCE = 2 << 5, + MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5, + MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5, +}; + +enum { + MLX5_RCV_DBR = 0, + MLX5_SND_DBR = 1, +}; + +enum { + MLX5_FLAGS_INLINE = 1<<7, + MLX5_FLAGS_CHECK_FREE = 1<<5, +}; + +struct mlx5_wqe_fmr_seg { + __be32 flags; + __be32 mem_key; + __be64 buf_list; + __be64 start_addr; + __be64 reg_len; + __be32 offset; + __be32 page_size; + u32 reserved[2]; +}; + +struct mlx5_wqe_ctrl_seg { + __be32 opmod_idx_opcode; + __be32 qpn_ds; + u8 signature; + u8 rsvd[2]; + u8 fm_ce_se; + __be32 imm; +}; + +#define MLX5_WQE_CTRL_DS_MASK 0x3f +#define MLX5_WQE_CTRL_QPN_MASK 0xffffff00 +#define MLX5_WQE_CTRL_QPN_SHIFT 8 +#define MLX5_WQE_DS_UNITS 16 +#define MLX5_WQE_CTRL_OPCODE_MASK 0xff +#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00 +#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8 + +enum { + MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4, + MLX5_ETH_WQE_L4_INNER_CSUM = 1 << 5, + MLX5_ETH_WQE_L3_CSUM = 1 << 6, + MLX5_ETH_WQE_L4_CSUM = 1 << 7, +}; + +enum { + MLX5_ETH_WQE_SVLAN = 1 << 0, + MLX5_ETH_WQE_INSERT_VLAN = 1 << 15, +}; + +enum { + MLX5_ETH_WQE_SWP_INNER_L3_IPV6 = 1 << 0, + MLX5_ETH_WQE_SWP_INNER_L4_UDP = 1 << 1, + MLX5_ETH_WQE_SWP_OUTER_L3_IPV6 = 1 << 4, + MLX5_ETH_WQE_SWP_OUTER_L4_UDP = 1 << 5, +}; + +struct mlx5_wqe_eth_seg { + u8 swp_outer_l4_offset; + u8 swp_outer_l3_offset; + u8 swp_inner_l4_offset; + u8 swp_inner_l3_offset; + u8 cs_flags; + u8 swp_flags; + __be16 mss; + __be32 rsvd2; + union { + struct { + __be16 sz; + u8 start[2]; + } inline_hdr; + struct { + __be16 type; + __be16 vlan_tci; + } insert; + }; +}; + +struct mlx5_wqe_xrc_seg { + __be32 xrc_srqn; + u8 rsvd[12]; +}; + +struct mlx5_wqe_masked_atomic_seg { + __be64 swap_add; + __be64 compare; + __be64 swap_add_mask; + __be64 compare_mask; +}; + +struct mlx5_base_av { + union { + struct { + __be32 qkey; + __be32 reserved; + } qkey; + __be64 dc_key; + } key; + __be32 dqp_dct; + u8 stat_rate_sl; + u8 fl_mlid; + union { + __be16 rlid; + __be16 udp_sport; + }; +}; + +struct mlx5_av { + union { + struct { + __be32 qkey; + __be32 reserved; + } qkey; + __be64 dc_key; + } key; + __be32 dqp_dct; + u8 stat_rate_sl; + u8 fl_mlid; + union { + __be16 rlid; + __be16 udp_sport; + }; + u8 reserved0[4]; + u8 rmac[6]; + u8 tclass; + u8 hop_limit; + __be32 grh_gid_fl; + u8 rgid[16]; +}; + +struct mlx5_ib_ah { + struct ib_ah ibah; + struct mlx5_av av; +}; + +static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah) +{ + return container_of(ibah, struct mlx5_ib_ah, ibah); +} + +struct mlx5_wqe_datagram_seg { + struct mlx5_av av; +}; + +struct mlx5_wqe_raddr_seg { + __be64 raddr; + __be32 rkey; + u32 reserved; +}; + +struct mlx5_wqe_atomic_seg { + __be64 swap_add; + __be64 compare; +}; + +struct mlx5_wqe_data_seg { + __be32 byte_count; + __be32 lkey; + __be64 addr; +}; + +struct mlx5_wqe_umr_ctrl_seg { + u8 flags; + u8 rsvd0[3]; + __be16 xlt_octowords; + union { + __be16 xlt_offset; + __be16 bsf_octowords; + }; + __be64 mkey_mask; + __be32 xlt_offset_47_16; + u8 rsvd1[28]; +}; + +struct mlx5_seg_set_psv { + __be32 psv_num; + __be16 syndrome; + __be16 status; + __be32 transient_sig; + __be32 ref_tag; +}; + +struct mlx5_seg_get_psv { + u8 rsvd[19]; + u8 num_psv; + __be32 l_key; + __be64 va; + __be32 psv_index[4]; +}; + +struct mlx5_seg_check_psv { + u8 rsvd0[2]; + __be16 err_coalescing_op; + u8 rsvd1[2]; + __be16 xport_err_op; + u8 rsvd2[2]; + __be16 xport_err_mask; + u8 rsvd3[7]; + u8 num_psv; + __be32 l_key; + __be64 va; + __be32 psv_index[4]; +}; + +struct mlx5_rwqe_sig { + u8 rsvd0[4]; + u8 signature; + u8 rsvd1[11]; +}; + +struct mlx5_wqe_signature_seg { + u8 rsvd0[4]; + u8 signature; + u8 rsvd1[11]; +}; + +#define MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK 0x3ff + +struct mlx5_wqe_inline_seg { + __be32 byte_count; +}; + +enum mlx5_sig_type { + MLX5_DIF_CRC = 0x1, + MLX5_DIF_IPCS = 0x2, +}; + +struct mlx5_bsf_inl { + __be16 vld_refresh; + __be16 dif_apptag; + __be32 dif_reftag; + u8 sig_type; + u8 rp_inv_seed; + u8 rsvd[3]; + u8 dif_inc_ref_guard_check; + __be16 dif_app_bitmask_check; +}; + +struct mlx5_bsf { + struct mlx5_bsf_basic { + u8 bsf_size_sbs; + u8 check_byte_mask; + union { + u8 copy_byte_mask; + u8 bs_selector; + u8 rsvd_wflags; + } wire; + union { + u8 bs_selector; + u8 rsvd_mflags; + } mem; + __be32 raw_data_size; + __be32 w_bfs_psv; + __be32 m_bfs_psv; + } basic; + struct mlx5_bsf_ext { + __be32 t_init_gen_pro_size; + __be32 rsvd_epi_size; + __be32 w_tfs_psv; + __be32 m_tfs_psv; + } ext; + struct mlx5_bsf_inl w_inl; + struct mlx5_bsf_inl m_inl; +}; + +struct mlx5_mtt { + __be64 ptag; +}; + +struct mlx5_klm { + __be32 bcount; + __be32 key; + __be64 va; +}; + +struct mlx5_stride_block_entry { + __be16 stride; + __be16 bcount; + __be32 key; + __be64 va; +}; + +struct mlx5_stride_block_ctrl_seg { + __be32 bcount_per_cycle; + __be32 op; + __be32 repeat_count; + u16 rsvd; + __be16 num_entries; +}; + +struct mlx5_core_qp { + struct mlx5_core_rsc_common common; /* must be first */ + void (*event) (struct mlx5_core_qp *, int); + int qpn; + struct mlx5_rsc_debug *dbg; + int pid; +}; + +struct mlx5_core_dct { + struct mlx5_core_qp mqp; + struct completion drained; +}; + +struct mlx5_qp_path { + u8 fl_free_ar; + u8 rsvd3; + __be16 pkey_index; + u8 rsvd0; + u8 grh_mlid; + __be16 rlid; + u8 ackto_lt; + u8 mgid_index; + u8 static_rate; + u8 hop_limit; + __be32 tclass_flowlabel; + union { + u8 rgid[16]; + u8 rip[16]; + }; + u8 f_dscp_ecn_prio; + u8 ecn_dscp; + __be16 udp_sport; + u8 dci_cfi_prio_sl; + u8 port; + u8 rmac[6]; +}; + +/* FIXME: use mlx5_ifc.h qpc */ +struct mlx5_qp_context { + __be32 flags; + __be32 flags_pd; + u8 mtu_msgmax; + u8 rq_size_stride; + __be16 sq_crq_size; + __be32 qp_counter_set_usr_page; + __be32 wire_qpn; + __be32 log_pg_sz_remote_qpn; + struct mlx5_qp_path pri_path; + struct mlx5_qp_path alt_path; + __be32 params1; + u8 reserved2[4]; + __be32 next_send_psn; + __be32 cqn_send; + __be32 deth_sqpn; + u8 reserved3[4]; + __be32 last_acked_psn; + __be32 ssn; + __be32 params2; + __be32 rnr_nextrecvpsn; + __be32 xrcd; + __be32 cqn_recv; + __be64 db_rec_addr; + __be32 qkey; + __be32 rq_type_srqn; + __be32 rmsn; + __be16 hw_sq_wqe_counter; + __be16 sw_sq_wqe_counter; + __be16 hw_rcyclic_byte_counter; + __be16 hw_rq_counter; + __be16 sw_rcyclic_byte_counter; + __be16 sw_rq_counter; + u8 rsvd0[5]; + u8 cgs; + u8 cs_req; + u8 cs_res; + __be64 dc_access_key; + u8 rsvd1[24]; +}; + +static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn) +{ + return radix_tree_lookup(&dev->priv.qp_table.tree, qpn); +} + +static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key) +{ + return radix_tree_lookup(&dev->priv.mkey_table.tree, key); +} + +int mlx5_core_create_dct(struct mlx5_core_dev *dev, + struct mlx5_core_dct *qp, + u32 *in, int inlen); +int mlx5_core_create_qp(struct mlx5_core_dev *dev, + struct mlx5_core_qp *qp, + u32 *in, + int inlen); +int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode, + u32 opt_param_mask, void *qpc, + struct mlx5_core_qp *qp); +int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, + struct mlx5_core_qp *qp); +int mlx5_core_destroy_dct(struct mlx5_core_dev *dev, + struct mlx5_core_dct *dct); +int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, + u32 *out, int outlen); +int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct, + u32 *out, int outlen); + +int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev, + u32 timeout_usec); + +int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn); +int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn); +void mlx5_init_qp_table(struct mlx5_core_dev *dev); +void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev); +int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); +void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); +int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, + struct mlx5_core_qp *rq); +void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev, + struct mlx5_core_qp *rq); +int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, + struct mlx5_core_qp *sq); +void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev, + struct mlx5_core_qp *sq); +int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id); +int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id); +int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id, + int reset, void *out, int out_size); + +static inline const char *mlx5_qp_type_str(int type) +{ + switch (type) { + case MLX5_QP_ST_RC: return "RC"; + case MLX5_QP_ST_UC: return "C"; + case MLX5_QP_ST_UD: return "UD"; + case MLX5_QP_ST_XRC: return "XRC"; + case MLX5_QP_ST_MLX: return "MLX"; + case MLX5_QP_ST_QP0: return "QP0"; + case MLX5_QP_ST_QP1: return "QP1"; + case MLX5_QP_ST_RAW_ETHERTYPE: return "RAW_ETHERTYPE"; + case MLX5_QP_ST_RAW_IPV6: return "RAW_IPV6"; + case MLX5_QP_ST_SNIFFER: return "SNIFFER"; + case MLX5_QP_ST_SYNC_UMR: return "SYNC_UMR"; + case MLX5_QP_ST_PTP_1588: return "PTP_1588"; + case MLX5_QP_ST_REG_UMR: return "REG_UMR"; + default: return "Invalid transport type"; + } +} + +static inline const char *mlx5_qp_state_str(int state) +{ + switch (state) { + case MLX5_QP_STATE_RST: + return "RST"; + case MLX5_QP_STATE_INIT: + return "INIT"; + case MLX5_QP_STATE_RTR: + return "RTR"; + case MLX5_QP_STATE_RTS: + return "RTS"; + case MLX5_QP_STATE_SQER: + return "SQER"; + case MLX5_QP_STATE_SQD: + return "SQD"; + case MLX5_QP_STATE_ERR: + return "ERR"; + case MLX5_QP_STATE_SQ_DRAINING: + return "SQ_DRAINING"; + case MLX5_QP_STATE_SUSPENDED: + return "SUSPENDED"; + default: return "Invalid QP state"; + } +} + +#endif /* MLX5_QP_H */ diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h new file mode 100644 index 000000000..24ff23e27 --- /dev/null +++ b/include/linux/mlx5/srq.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_SRQ_H +#define MLX5_SRQ_H + +#include + +enum { + MLX5_SRQ_FLAG_ERR = (1 << 0), + MLX5_SRQ_FLAG_WQ_SIG = (1 << 1), + MLX5_SRQ_FLAG_RNDV = (1 << 2), +}; + +struct mlx5_srq_attr { + u32 type; + u32 flags; + u32 log_size; + u32 wqe_shift; + u32 log_page_size; + u32 wqe_cnt; + u32 srqn; + u32 xrcd; + u32 page_offset; + u32 cqn; + u32 pd; + u32 lwm; + u32 user_index; + u64 db_record; + __be64 *pas; + u32 tm_log_list_size; + u32 tm_next_tag; + u32 tm_hw_phase_cnt; + u32 tm_sw_phase_cnt; +}; + +struct mlx5_core_dev; + +void mlx5_init_srq_table(struct mlx5_core_dev *dev); +void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev); + +#endif /* MLX5_SRQ_H */ diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h new file mode 100644 index 000000000..7f5ca2cd3 --- /dev/null +++ b/include/linux/mlx5/transobj.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __TRANSOBJ_H__ +#define __TRANSOBJ_H__ + +#include + +int mlx5_core_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn); +void mlx5_core_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn); +int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *rqn); +int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen); +void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn); +int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out); +int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *sqn); +int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen); +void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn); +int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out); +int mlx5_core_query_sq_state(struct mlx5_core_dev *dev, u32 sqn, u8 *state); +int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *tirn); +int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in, + int inlen); +void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn); +int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *tisn); +int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in, + int inlen); +void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn); +int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *rmpn); +int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen); +int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn); +int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out); +int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm); +int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *rmpn); +int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn); +int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm); + +int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *rqtn); +int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in, + int inlen); +void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn); + +struct mlx5_hairpin_params { + u8 log_data_size; + u8 log_num_packets; + u16 q_counter; + int num_channels; +}; + +struct mlx5_hairpin { + struct mlx5_core_dev *func_mdev; + struct mlx5_core_dev *peer_mdev; + + int num_channels; + + u32 *rqn; + u32 *sqn; + + bool peer_gone; +}; + +struct mlx5_hairpin * +mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev, + struct mlx5_core_dev *peer_mdev, + struct mlx5_hairpin_params *params); + +void mlx5_core_hairpin_destroy(struct mlx5_hairpin *pair); +#endif /* __TRANSOBJ_H__ */ diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h new file mode 100644 index 000000000..7e7c6dfcf --- /dev/null +++ b/include/linux/mlx5/vport.h @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __MLX5_VPORT_H__ +#define __MLX5_VPORT_H__ + +#include +#include + +enum { + MLX5_CAP_INLINE_MODE_L2, + MLX5_CAP_INLINE_MODE_VPORT_CONTEXT, + MLX5_CAP_INLINE_MODE_NOT_REQUIRED, +}; + +u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport); +int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, + u16 vport, u8 state); +int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, + u16 vport, u8 *addr); +int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, + u16 vport, u8 *min_inline); +void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline); +int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev, + u16 vport, u8 min_inline); +int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, + u16 vport, u8 *addr); +int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu); +int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu); +int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, + u64 *system_image_guid); +int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid); +int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, + u32 vport, u64 node_guid); +int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, + u16 *qkey_viol_cntr); +int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, + u8 port_num, u16 vf_num, u16 gid_index, + union ib_gid *gid); +int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport, + u8 port_num, u16 vf_num, u16 pkey_index, + u16 *pkey); +int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev, + u8 other_vport, u8 port_num, + u16 vf_num, + struct mlx5_hca_vport_context *rep); +int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev, + u64 *sys_image_guid); +int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev, + u64 *node_guid); +int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, + u32 vport, + enum mlx5_list_type list_type, + u8 addr_list[][ETH_ALEN], + int *list_size); +int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev, + enum mlx5_list_type list_type, + u8 addr_list[][ETH_ALEN], + int list_size); +int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev, + u32 vport, + int *promisc_uc, + int *promisc_mc, + int *promisc_all); +int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev, + int promisc_uc, + int promisc_mc, + int promisc_all); +int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev, + u32 vport, + u16 vlans[], + int *size); +int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev, + u16 vlans[], + int list_size); + +int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev); +int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev); +int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport, + u64 *rx_discard_vport_down, + u64 *tx_discard_vport_down); +int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport, + int vf, u8 port_num, void *out, + size_t out_sz); +int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev, + u8 other_vport, u8 port_num, + int vf, + struct mlx5_hca_vport_context *req); +int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable); +int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status); + +int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev, + struct mlx5_core_dev *port_mdev); +int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev); +#endif /* __MLX5_VPORT_H__ */ diff --git a/include/linux/mm-arch-hooks.h b/include/linux/mm-arch-hooks.h new file mode 100644 index 000000000..4efc3f56e --- /dev/null +++ b/include/linux/mm-arch-hooks.h @@ -0,0 +1,25 @@ +/* + * Generic mm no-op hooks. + * + * Copyright (C) 2015, IBM Corporation + * Author: Laurent Dufour + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _LINUX_MM_ARCH_HOOKS_H +#define _LINUX_MM_ARCH_HOOKS_H + +#include + +#ifndef arch_remap +static inline void arch_remap(struct mm_struct *mm, + unsigned long old_start, unsigned long old_end, + unsigned long new_start, unsigned long new_end) +{ +} +#define arch_remap arch_remap +#endif + +#endif /* _LINUX_MM_ARCH_HOOKS_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h new file mode 100644 index 000000000..26a5fba22 --- /dev/null +++ b/include/linux/mm.h @@ -0,0 +1,2842 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MM_H +#define _LINUX_MM_H + +#include + +#ifdef __KERNEL__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct mempolicy; +struct anon_vma; +struct anon_vma_chain; +struct file_ra_state; +struct user_struct; +struct writeback_control; +struct bdi_writeback; + +void init_mm_internals(void); + +#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */ +extern unsigned long max_mapnr; + +static inline void set_max_mapnr(unsigned long limit) +{ + max_mapnr = limit; +} +#else +static inline void set_max_mapnr(unsigned long limit) { } +#endif + +extern unsigned long totalram_pages; +extern void * high_memory; +extern int page_cluster; + +#ifdef CONFIG_SYSCTL +extern int sysctl_legacy_va_layout; +#else +#define sysctl_legacy_va_layout 0 +#endif + +#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS +extern const int mmap_rnd_bits_min; +extern const int mmap_rnd_bits_max; +extern int mmap_rnd_bits __read_mostly; +#endif +#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS +extern const int mmap_rnd_compat_bits_min; +extern const int mmap_rnd_compat_bits_max; +extern int mmap_rnd_compat_bits __read_mostly; +#endif + +#include +#include +#include + +#ifndef __pa_symbol +#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) +#endif + +#ifndef page_to_virt +#define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x))) +#endif + +#ifndef lm_alias +#define lm_alias(x) __va(__pa_symbol(x)) +#endif + +/* + * To prevent common memory management code establishing + * a zero page mapping on a read fault. + * This macro should be defined within . + * s390 does this to prevent multiplexing of hardware bits + * related to the physical page in case of virtualization. + */ +#ifndef mm_forbids_zeropage +#define mm_forbids_zeropage(X) (0) +#endif + +/* + * On some architectures it is expensive to call memset() for small sizes. + * Those architectures should provide their own implementation of "struct page" + * zeroing by defining this macro in . + */ +#ifndef mm_zero_struct_page +#define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page))) +#endif + +/* + * Default maximum number of active map areas, this limits the number of vmas + * per mm struct. Users can overwrite this number by sysctl but there is a + * problem. + * + * When a program's coredump is generated as ELF format, a section is created + * per a vma. In ELF, the number of sections is represented in unsigned short. + * This means the number of sections should be smaller than 65535 at coredump. + * Because the kernel adds some informative sections to a image of program at + * generating coredump, we need some margin. The number of extra sections is + * 1-3 now and depends on arch. We use "5" as safe margin, here. + * + * ELF extended numbering allows more than 65535 sections, so 16-bit bound is + * not a hard limit any more. Although some userspace tools can be surprised by + * that. + */ +#define MAPCOUNT_ELF_CORE_MARGIN (5) +#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) + +extern int sysctl_max_map_count; + +extern unsigned long sysctl_user_reserve_kbytes; +extern unsigned long sysctl_admin_reserve_kbytes; + +extern int sysctl_overcommit_memory; +extern int sysctl_overcommit_ratio; +extern unsigned long sysctl_overcommit_kbytes; + +extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *, + size_t *, loff_t *); +extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *, + size_t *, loff_t *); + +#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) + +/* to align the pointer to the (next) page boundary */ +#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) + +/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */ +#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE) + +/* + * Linux kernel virtual memory manager primitives. + * The idea being to have a "virtual" mm in the same way + * we have a virtual fs - giving a cleaner interface to the + * mm details, and allowing different kinds of memory mappings + * (from shared memory to executable loading to arbitrary + * mmap() functions). + */ + +struct vm_area_struct *vm_area_alloc(struct mm_struct *); +struct vm_area_struct *vm_area_dup(struct vm_area_struct *); +void vm_area_free(struct vm_area_struct *); + +#ifndef CONFIG_MMU +extern struct rb_root nommu_region_tree; +extern struct rw_semaphore nommu_region_sem; + +extern unsigned int kobjsize(const void *objp); +#endif + +/* + * vm_flags in vm_area_struct, see mm_types.h. + * When changing, update also include/trace/events/mmflags.h + */ +#define VM_NONE 0x00000000 + +#define VM_READ 0x00000001 /* currently active flags */ +#define VM_WRITE 0x00000002 +#define VM_EXEC 0x00000004 +#define VM_SHARED 0x00000008 + +/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */ +#define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */ +#define VM_MAYWRITE 0x00000020 +#define VM_MAYEXEC 0x00000040 +#define VM_MAYSHARE 0x00000080 + +#define VM_GROWSDOWN 0x00000100 /* general info on the segment */ +#define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */ +#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ +#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ +#define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */ + +#define VM_LOCKED 0x00002000 +#define VM_IO 0x00004000 /* Memory mapped I/O or similar */ + + /* Used by sys_madvise() */ +#define VM_SEQ_READ 0x00008000 /* App will access data sequentially */ +#define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */ + +#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ +#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ +#define VM_LOCKONFAULT 0x00080000 /* Lock the pages covered when they are faulted in */ +#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ +#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ +#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ +#define VM_SYNC 0x00800000 /* Synchronous page faults */ +#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ +#define VM_WIPEONFORK 0x02000000 /* Wipe VMA contents in child. */ +#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ + +#ifdef CONFIG_MEM_SOFT_DIRTY +# define VM_SOFTDIRTY 0x08000000 /* Not soft dirty clean area */ +#else +# define VM_SOFTDIRTY 0 +#endif + +#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ +#define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */ +#define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */ +#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ + +#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS +#define VM_HIGH_ARCH_BIT_0 32 /* bit only usable on 64-bit architectures */ +#define VM_HIGH_ARCH_BIT_1 33 /* bit only usable on 64-bit architectures */ +#define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */ +#define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */ +#define VM_HIGH_ARCH_BIT_4 36 /* bit only usable on 64-bit architectures */ +#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0) +#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1) +#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2) +#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3) +#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4) +#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */ + +#ifdef CONFIG_ARCH_HAS_PKEYS +# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0 +# define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */ +# define VM_PKEY_BIT1 VM_HIGH_ARCH_1 /* on x86 and 5-bit value on ppc64 */ +# define VM_PKEY_BIT2 VM_HIGH_ARCH_2 +# define VM_PKEY_BIT3 VM_HIGH_ARCH_3 +#ifdef CONFIG_PPC +# define VM_PKEY_BIT4 VM_HIGH_ARCH_4 +#else +# define VM_PKEY_BIT4 0 +#endif +#endif /* CONFIG_ARCH_HAS_PKEYS */ + +#if defined(CONFIG_X86) +# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */ +#elif defined(CONFIG_PPC) +# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ +#elif defined(CONFIG_PARISC) +# define VM_GROWSUP VM_ARCH_1 +#elif defined(CONFIG_IA64) +# define VM_GROWSUP VM_ARCH_1 +#elif defined(CONFIG_SPARC64) +# define VM_SPARC_ADI VM_ARCH_1 /* Uses ADI tag for access control */ +# define VM_ARCH_CLEAR VM_SPARC_ADI +#elif !defined(CONFIG_MMU) +# define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */ +#endif + +#if defined(CONFIG_X86_INTEL_MPX) +/* MPX specific bounds table or bounds directory */ +# define VM_MPX VM_HIGH_ARCH_4 +#else +# define VM_MPX VM_NONE +#endif + +#ifndef VM_GROWSUP +# define VM_GROWSUP VM_NONE +#endif + +/* Bits set in the VMA until the stack is in its final location */ +#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ) + +#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ +#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS +#endif + +#ifdef CONFIG_STACK_GROWSUP +#define VM_STACK VM_GROWSUP +#else +#define VM_STACK VM_GROWSDOWN +#endif + +#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) + +/* + * Special vmas that are non-mergable, non-mlock()able. + * Note: mm/huge_memory.c VM_NO_THP depends on this definition. + */ +#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) + +/* This mask defines which mm->def_flags a process can inherit its parent */ +#define VM_INIT_DEF_MASK VM_NOHUGEPAGE + +/* This mask is used to clear all the VMA flags used by mlock */ +#define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT)) + +/* Arch-specific flags to clear when updating VM flags on protection change */ +#ifndef VM_ARCH_CLEAR +# define VM_ARCH_CLEAR VM_NONE +#endif +#define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR) + +/* + * mapping from the currently active vm_flags protection bits (the + * low four bits) to a page protection mask.. + */ +extern pgprot_t protection_map[16]; + +#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ +#define FAULT_FLAG_MKWRITE 0x02 /* Fault was mkwrite of existing pte */ +#define FAULT_FLAG_ALLOW_RETRY 0x04 /* Retry fault if blocking */ +#define FAULT_FLAG_RETRY_NOWAIT 0x08 /* Don't drop mmap_sem and wait when retrying */ +#define FAULT_FLAG_KILLABLE 0x10 /* The fault task is in SIGKILL killable region */ +#define FAULT_FLAG_TRIED 0x20 /* Second try */ +#define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */ +#define FAULT_FLAG_REMOTE 0x80 /* faulting for non current tsk/mm */ +#define FAULT_FLAG_INSTRUCTION 0x100 /* The fault was during an instruction fetch */ + +#define FAULT_FLAG_TRACE \ + { FAULT_FLAG_WRITE, "WRITE" }, \ + { FAULT_FLAG_MKWRITE, "MKWRITE" }, \ + { FAULT_FLAG_ALLOW_RETRY, "ALLOW_RETRY" }, \ + { FAULT_FLAG_RETRY_NOWAIT, "RETRY_NOWAIT" }, \ + { FAULT_FLAG_KILLABLE, "KILLABLE" }, \ + { FAULT_FLAG_TRIED, "TRIED" }, \ + { FAULT_FLAG_USER, "USER" }, \ + { FAULT_FLAG_REMOTE, "REMOTE" }, \ + { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" } + +/* + * vm_fault is filled by the the pagefault handler and passed to the vma's + * ->fault function. The vma's ->fault is responsible for returning a bitmask + * of VM_FAULT_xxx flags that give details about how the fault was handled. + * + * MM layer fills up gfp_mask for page allocations but fault handler might + * alter it if its implementation requires a different allocation context. + * + * pgoff should be used in favour of virtual_address, if possible. + */ +struct vm_fault { + struct vm_area_struct *vma; /* Target VMA */ + unsigned int flags; /* FAULT_FLAG_xxx flags */ + gfp_t gfp_mask; /* gfp mask to be used for allocations */ + pgoff_t pgoff; /* Logical page offset based on vma */ + unsigned long address; /* Faulting virtual address */ + pmd_t *pmd; /* Pointer to pmd entry matching + * the 'address' */ + pud_t *pud; /* Pointer to pud entry matching + * the 'address' + */ + pte_t orig_pte; /* Value of PTE at the time of fault */ + + struct page *cow_page; /* Page handler may use for COW fault */ + struct mem_cgroup *memcg; /* Cgroup cow_page belongs to */ + struct page *page; /* ->fault handlers should return a + * page here, unless VM_FAULT_NOPAGE + * is set (which is also implied by + * VM_FAULT_ERROR). + */ + /* These three entries are valid only while holding ptl lock */ + pte_t *pte; /* Pointer to pte entry matching + * the 'address'. NULL if the page + * table hasn't been allocated. + */ + spinlock_t *ptl; /* Page table lock. + * Protects pte page table if 'pte' + * is not NULL, otherwise pmd. + */ + pgtable_t prealloc_pte; /* Pre-allocated pte page table. + * vm_ops->map_pages() calls + * alloc_set_pte() from atomic context. + * do_fault_around() pre-allocates + * page table to avoid allocation from + * atomic context. + */ +}; + +/* page entry size for vm->huge_fault() */ +enum page_entry_size { + PE_SIZE_PTE = 0, + PE_SIZE_PMD, + PE_SIZE_PUD, +}; + +/* + * These are the virtual MM functions - opening of an area, closing and + * unmapping it (needed to keep files on disk up-to-date etc), pointer + * to the functions called when a no-page or a wp-page exception occurs. + */ +struct vm_operations_struct { + void (*open)(struct vm_area_struct * area); + void (*close)(struct vm_area_struct * area); + int (*split)(struct vm_area_struct * area, unsigned long addr); + int (*mremap)(struct vm_area_struct * area); + vm_fault_t (*fault)(struct vm_fault *vmf); + vm_fault_t (*huge_fault)(struct vm_fault *vmf, + enum page_entry_size pe_size); + void (*map_pages)(struct vm_fault *vmf, + pgoff_t start_pgoff, pgoff_t end_pgoff); + unsigned long (*pagesize)(struct vm_area_struct * area); + + /* notification that a previously read-only page is about to become + * writable, if an error is returned it will cause a SIGBUS */ + vm_fault_t (*page_mkwrite)(struct vm_fault *vmf); + + /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */ + vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf); + + /* called by access_process_vm when get_user_pages() fails, typically + * for use by special VMAs that can switch between memory and hardware + */ + int (*access)(struct vm_area_struct *vma, unsigned long addr, + void *buf, int len, int write); + + /* Called by the /proc/PID/maps code to ask the vma whether it + * has a special name. Returning non-NULL will also cause this + * vma to be dumped unconditionally. */ + const char *(*name)(struct vm_area_struct *vma); + +#ifdef CONFIG_NUMA + /* + * set_policy() op must add a reference to any non-NULL @new mempolicy + * to hold the policy upon return. Caller should pass NULL @new to + * remove a policy and fall back to surrounding context--i.e. do not + * install a MPOL_DEFAULT policy, nor the task or system default + * mempolicy. + */ + int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); + + /* + * get_policy() op must add reference [mpol_get()] to any policy at + * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure + * in mm/mempolicy.c will do this automatically. + * get_policy() must NOT add a ref if the policy at (vma,addr) is not + * marked as MPOL_SHARED. vma policies are protected by the mmap_sem. + * If no [shared/vma] mempolicy exists at the addr, get_policy() op + * must return NULL--i.e., do not "fallback" to task or system default + * policy. + */ + struct mempolicy *(*get_policy)(struct vm_area_struct *vma, + unsigned long addr); +#endif + /* + * Called by vm_normal_page() for special PTEs to find the + * page for @addr. This is useful if the default behavior + * (using pte_page()) would not find the correct page. + */ + struct page *(*find_special_page)(struct vm_area_struct *vma, + unsigned long addr); +}; + +static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) +{ + static const struct vm_operations_struct dummy_vm_ops = {}; + + memset(vma, 0, sizeof(*vma)); + vma->vm_mm = mm; + vma->vm_ops = &dummy_vm_ops; + INIT_LIST_HEAD(&vma->anon_vma_chain); +} + +static inline void vma_set_anonymous(struct vm_area_struct *vma) +{ + vma->vm_ops = NULL; +} + +/* flush_tlb_range() takes a vma, not a mm, and can care about flags */ +#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) } + +struct mmu_gather; +struct inode; + +#define page_private(page) ((page)->private) +#define set_page_private(page, v) ((page)->private = (v)) + +#if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE) +static inline int pmd_devmap(pmd_t pmd) +{ + return 0; +} +static inline int pud_devmap(pud_t pud) +{ + return 0; +} +static inline int pgd_devmap(pgd_t pgd) +{ + return 0; +} +#endif + +/* + * FIXME: take this include out, include page-flags.h in + * files which need it (119 of them) + */ +#include +#include + +/* + * Methods to modify the page usage count. + * + * What counts for a page usage: + * - cache mapping (page->mapping) + * - private data (page->private) + * - page mapped in a task's page tables, each mapping + * is counted separately + * + * Also, many kernel routines increase the page count before a critical + * routine so they can be sure the page doesn't go away from under them. + */ + +/* + * Drop a ref, return true if the refcount fell to zero (the page has no users) + */ +static inline int put_page_testzero(struct page *page) +{ + VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); + return page_ref_dec_and_test(page); +} + +/* + * Try to grab a ref unless the page has a refcount of zero, return false if + * that is the case. + * This can be called when MMU is off so it must not access + * any of the virtual mappings. + */ +static inline int get_page_unless_zero(struct page *page) +{ + return page_ref_add_unless(page, 1, 0); +} + +extern int page_is_ram(unsigned long pfn); + +enum { + REGION_INTERSECTS, + REGION_DISJOINT, + REGION_MIXED, +}; + +int region_intersects(resource_size_t offset, size_t size, unsigned long flags, + unsigned long desc); + +/* Support for virtually mapped pages */ +struct page *vmalloc_to_page(const void *addr); +unsigned long vmalloc_to_pfn(const void *addr); + +/* + * Determine if an address is within the vmalloc range + * + * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there + * is no special casing required. + */ +static inline bool is_vmalloc_addr(const void *x) +{ +#ifdef CONFIG_MMU + unsigned long addr = (unsigned long)x; + + return addr >= VMALLOC_START && addr < VMALLOC_END; +#else + return false; +#endif +} +#ifdef CONFIG_MMU +extern int is_vmalloc_or_module_addr(const void *x); +#else +static inline int is_vmalloc_or_module_addr(const void *x) +{ + return 0; +} +#endif + +extern void *kvmalloc_node(size_t size, gfp_t flags, int node); +static inline void *kvmalloc(size_t size, gfp_t flags) +{ + return kvmalloc_node(size, flags, NUMA_NO_NODE); +} +static inline void *kvzalloc_node(size_t size, gfp_t flags, int node) +{ + return kvmalloc_node(size, flags | __GFP_ZERO, node); +} +static inline void *kvzalloc(size_t size, gfp_t flags) +{ + return kvmalloc(size, flags | __GFP_ZERO); +} + +static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags) +{ + size_t bytes; + + if (unlikely(check_mul_overflow(n, size, &bytes))) + return NULL; + + return kvmalloc(bytes, flags); +} + +static inline void *kvcalloc(size_t n, size_t size, gfp_t flags) +{ + return kvmalloc_array(n, size, flags | __GFP_ZERO); +} + +extern void kvfree(const void *addr); +extern void kvfree_sensitive(const void *addr, size_t len); + +/* + * Mapcount of compound page as a whole, does not include mapped sub-pages. + * + * Must be called only for compound pages or any their tail sub-pages. + */ +static inline int compound_mapcount(struct page *page) +{ + VM_BUG_ON_PAGE(!PageCompound(page), page); + page = compound_head(page); + return atomic_read(compound_mapcount_ptr(page)) + 1; +} + +/* + * The atomic page->_mapcount, starts from -1: so that transitions + * both from it and to it can be tracked, using atomic_inc_and_test + * and atomic_add_negative(-1). + */ +static inline void page_mapcount_reset(struct page *page) +{ + atomic_set(&(page)->_mapcount, -1); +} + +int __page_mapcount(struct page *page); + +/* + * Mapcount of 0-order page; when compound sub-page, includes + * compound_mapcount(). + * + * Result is undefined for pages which cannot be mapped into userspace. + * For example SLAB or special types of pages. See function page_has_type(). + * They use this place in struct page differently. + */ +static inline int page_mapcount(struct page *page) +{ + if (unlikely(PageCompound(page))) + return __page_mapcount(page); + return atomic_read(&page->_mapcount) + 1; +} + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +int total_mapcount(struct page *page); +int page_trans_huge_mapcount(struct page *page, int *total_mapcount); +#else +static inline int total_mapcount(struct page *page) +{ + return page_mapcount(page); +} +static inline int page_trans_huge_mapcount(struct page *page, + int *total_mapcount) +{ + int mapcount = page_mapcount(page); + if (total_mapcount) + *total_mapcount = mapcount; + return mapcount; +} +#endif + +static inline struct page *virt_to_head_page(const void *x) +{ + struct page *page = virt_to_page(x); + + return compound_head(page); +} + +void __put_page(struct page *page); + +void put_pages_list(struct list_head *pages); + +void split_page(struct page *page, unsigned int order); + +/* + * Compound pages have a destructor function. Provide a + * prototype for that function and accessor functions. + * These are _only_ valid on the head of a compound page. + */ +typedef void compound_page_dtor(struct page *); + +/* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */ +enum compound_dtor_id { + NULL_COMPOUND_DTOR, + COMPOUND_PAGE_DTOR, +#ifdef CONFIG_HUGETLB_PAGE + HUGETLB_PAGE_DTOR, +#endif +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + TRANSHUGE_PAGE_DTOR, +#endif + NR_COMPOUND_DTORS, +}; +extern compound_page_dtor * const compound_page_dtors[]; + +static inline void set_compound_page_dtor(struct page *page, + enum compound_dtor_id compound_dtor) +{ + VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page); + page[1].compound_dtor = compound_dtor; +} + +static inline compound_page_dtor *get_compound_page_dtor(struct page *page) +{ + VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page); + return compound_page_dtors[page[1].compound_dtor]; +} + +static inline unsigned int compound_order(struct page *page) +{ + if (!PageHead(page)) + return 0; + return page[1].compound_order; +} + +static inline void set_compound_order(struct page *page, unsigned int order) +{ + page[1].compound_order = order; +} + +void free_compound_page(struct page *page); + +#ifdef CONFIG_MMU +/* + * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when + * servicing faults for write access. In the normal case, do always want + * pte_mkwrite. But get_user_pages can cause write faults for mappings + * that do not have writing enabled, when used by access_process_vm. + */ +static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) +{ + if (likely(vma->vm_flags & VM_WRITE)) + pte = pte_mkwrite(pte); + return pte; +} + +vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, + struct page *page); +vm_fault_t finish_fault(struct vm_fault *vmf); +vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); +#endif + +/* + * Multiple processes may "see" the same page. E.g. for untouched + * mappings of /dev/null, all processes see the same page full of + * zeroes, and text pages of executables and shared libraries have + * only one copy in memory, at most, normally. + * + * For the non-reserved pages, page_count(page) denotes a reference count. + * page_count() == 0 means the page is free. page->lru is then used for + * freelist management in the buddy allocator. + * page_count() > 0 means the page has been allocated. + * + * Pages are allocated by the slab allocator in order to provide memory + * to kmalloc and kmem_cache_alloc. In this case, the management of the + * page, and the fields in 'struct page' are the responsibility of mm/slab.c + * unless a particular usage is carefully commented. (the responsibility of + * freeing the kmalloc memory is the caller's, of course). + * + * A page may be used by anyone else who does a __get_free_page(). + * In this case, page_count still tracks the references, and should only + * be used through the normal accessor functions. The top bits of page->flags + * and page->virtual store page management information, but all other fields + * are unused and could be used privately, carefully. The management of this + * page is the responsibility of the one who allocated it, and those who have + * subsequently been given references to it. + * + * The other pages (we may call them "pagecache pages") are completely + * managed by the Linux memory manager: I/O, buffers, swapping etc. + * The following discussion applies only to them. + * + * A pagecache page contains an opaque `private' member, which belongs to the + * page's address_space. Usually, this is the address of a circular list of + * the page's disk buffers. PG_private must be set to tell the VM to call + * into the filesystem to release these pages. + * + * A page may belong to an inode's memory mapping. In this case, page->mapping + * is the pointer to the inode, and page->index is the file offset of the page, + * in units of PAGE_SIZE. + * + * If pagecache pages are not associated with an inode, they are said to be + * anonymous pages. These may become associated with the swapcache, and in that + * case PG_swapcache is set, and page->private is an offset into the swapcache. + * + * In either case (swapcache or inode backed), the pagecache itself holds one + * reference to the page. Setting PG_private should also increment the + * refcount. The each user mapping also has a reference to the page. + * + * The pagecache pages are stored in a per-mapping radix tree, which is + * rooted at mapping->i_pages, and indexed by offset. + * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space + * lists, we instead now tag pages as dirty/writeback in the radix tree. + * + * All pagecache pages may be subject to I/O: + * - inode pages may need to be read from disk, + * - inode pages which have been modified and are MAP_SHARED may need + * to be written back to the inode on disk, + * - anonymous pages (including MAP_PRIVATE file mappings) which have been + * modified may need to be swapped out to swap space and (later) to be read + * back into memory. + */ + +/* + * The zone field is never updated after free_area_init_core() + * sets it, so none of the operations on it need to be atomic. + */ + +/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */ +#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH) +#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) +#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) +#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH) + +/* + * Define the bit shifts to access each section. For non-existent + * sections we define the shift as 0; that plus a 0 mask ensures + * the compiler will optimise away reference to them. + */ +#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0)) +#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) +#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) +#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0)) + +/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ +#ifdef NODE_NOT_IN_PAGE_FLAGS +#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) +#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \ + SECTIONS_PGOFF : ZONES_PGOFF) +#else +#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT) +#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \ + NODES_PGOFF : ZONES_PGOFF) +#endif + +#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0)) + +#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS +#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS +#endif + +#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) +#define NODES_MASK ((1UL << NODES_WIDTH) - 1) +#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) +#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1) +#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) + +static inline enum zone_type page_zonenum(const struct page *page) +{ + return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; +} + +#ifdef CONFIG_ZONE_DEVICE +static inline bool is_zone_device_page(const struct page *page) +{ + return page_zonenum(page) == ZONE_DEVICE; +} +#else +static inline bool is_zone_device_page(const struct page *page) +{ + return false; +} +#endif + +#ifdef CONFIG_DEV_PAGEMAP_OPS +void dev_pagemap_get_ops(void); +void dev_pagemap_put_ops(void); +void __put_devmap_managed_page(struct page *page); +DECLARE_STATIC_KEY_FALSE(devmap_managed_key); +static inline bool put_devmap_managed_page(struct page *page) +{ + if (!static_branch_unlikely(&devmap_managed_key)) + return false; + if (!is_zone_device_page(page)) + return false; + switch (page->pgmap->type) { + case MEMORY_DEVICE_PRIVATE: + case MEMORY_DEVICE_PUBLIC: + case MEMORY_DEVICE_FS_DAX: + __put_devmap_managed_page(page); + return true; + default: + break; + } + return false; +} + +static inline bool is_device_private_page(const struct page *page) +{ + return is_zone_device_page(page) && + page->pgmap->type == MEMORY_DEVICE_PRIVATE; +} + +static inline bool is_device_public_page(const struct page *page) +{ + return is_zone_device_page(page) && + page->pgmap->type == MEMORY_DEVICE_PUBLIC; +} + +#else /* CONFIG_DEV_PAGEMAP_OPS */ +static inline void dev_pagemap_get_ops(void) +{ +} + +static inline void dev_pagemap_put_ops(void) +{ +} + +static inline bool put_devmap_managed_page(struct page *page) +{ + return false; +} + +static inline bool is_device_private_page(const struct page *page) +{ + return false; +} + +static inline bool is_device_public_page(const struct page *page) +{ + return false; +} +#endif /* CONFIG_DEV_PAGEMAP_OPS */ + +/* 127: arbitrary random number, small enough to assemble well */ +#define page_ref_zero_or_close_to_overflow(page) \ + ((unsigned int) page_ref_count(page) + 127u <= 127u) + +static inline void get_page(struct page *page) +{ + page = compound_head(page); + /* + * Getting a normal page or the head of a compound page + * requires to already have an elevated page->_refcount. + */ + VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page); + page_ref_inc(page); +} + +static inline __must_check bool try_get_page(struct page *page) +{ + page = compound_head(page); + if (WARN_ON_ONCE(page_ref_count(page) <= 0)) + return false; + page_ref_inc(page); + return true; +} + +static inline void put_page(struct page *page) +{ + page = compound_head(page); + + /* + * For devmap managed pages we need to catch refcount transition from + * 2 to 1, when refcount reach one it means the page is free and we + * need to inform the device driver through callback. See + * include/linux/memremap.h and HMM for details. + */ + if (put_devmap_managed_page(page)) + return; + + if (put_page_testzero(page)) + __put_page(page); +} + +#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) +#define SECTION_IN_PAGE_FLAGS +#endif + +/* + * The identification function is mainly used by the buddy allocator for + * determining if two pages could be buddies. We are not really identifying + * the zone since we could be using the section number id if we do not have + * node id available in page flags. + * We only guarantee that it will return the same value for two combinable + * pages in a zone. + */ +static inline int page_zone_id(struct page *page) +{ + return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK; +} + +#ifdef NODE_NOT_IN_PAGE_FLAGS +extern int page_to_nid(const struct page *page); +#else +static inline int page_to_nid(const struct page *page) +{ + struct page *p = (struct page *)page; + + return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK; +} +#endif + +#ifdef CONFIG_NUMA_BALANCING +static inline int cpu_pid_to_cpupid(int cpu, int pid) +{ + return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK); +} + +static inline int cpupid_to_pid(int cpupid) +{ + return cpupid & LAST__PID_MASK; +} + +static inline int cpupid_to_cpu(int cpupid) +{ + return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK; +} + +static inline int cpupid_to_nid(int cpupid) +{ + return cpu_to_node(cpupid_to_cpu(cpupid)); +} + +static inline bool cpupid_pid_unset(int cpupid) +{ + return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK); +} + +static inline bool cpupid_cpu_unset(int cpupid) +{ + return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK); +} + +static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid) +{ + return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid); +} + +#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid) +#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS +static inline int page_cpupid_xchg_last(struct page *page, int cpupid) +{ + return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK); +} + +static inline int page_cpupid_last(struct page *page) +{ + return page->_last_cpupid; +} +static inline void page_cpupid_reset_last(struct page *page) +{ + page->_last_cpupid = -1 & LAST_CPUPID_MASK; +} +#else +static inline int page_cpupid_last(struct page *page) +{ + return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; +} + +extern int page_cpupid_xchg_last(struct page *page, int cpupid); + +static inline void page_cpupid_reset_last(struct page *page) +{ + page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT; +} +#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */ +#else /* !CONFIG_NUMA_BALANCING */ +static inline int page_cpupid_xchg_last(struct page *page, int cpupid) +{ + return page_to_nid(page); /* XXX */ +} + +static inline int page_cpupid_last(struct page *page) +{ + return page_to_nid(page); /* XXX */ +} + +static inline int cpupid_to_nid(int cpupid) +{ + return -1; +} + +static inline int cpupid_to_pid(int cpupid) +{ + return -1; +} + +static inline int cpupid_to_cpu(int cpupid) +{ + return -1; +} + +static inline int cpu_pid_to_cpupid(int nid, int pid) +{ + return -1; +} + +static inline bool cpupid_pid_unset(int cpupid) +{ + return 1; +} + +static inline void page_cpupid_reset_last(struct page *page) +{ +} + +static inline bool cpupid_match_pid(struct task_struct *task, int cpupid) +{ + return false; +} +#endif /* CONFIG_NUMA_BALANCING */ + +static inline struct zone *page_zone(const struct page *page) +{ + return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; +} + +static inline pg_data_t *page_pgdat(const struct page *page) +{ + return NODE_DATA(page_to_nid(page)); +} + +#ifdef SECTION_IN_PAGE_FLAGS +static inline void set_page_section(struct page *page, unsigned long section) +{ + page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); + page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; +} + +static inline unsigned long page_to_section(const struct page *page) +{ + return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; +} +#endif + +static inline void set_page_zone(struct page *page, enum zone_type zone) +{ + page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); + page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; +} + +static inline void set_page_node(struct page *page, unsigned long node) +{ + page->flags &= ~(NODES_MASK << NODES_PGSHIFT); + page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; +} + +static inline void set_page_links(struct page *page, enum zone_type zone, + unsigned long node, unsigned long pfn) +{ + set_page_zone(page, zone); + set_page_node(page, node); +#ifdef SECTION_IN_PAGE_FLAGS + set_page_section(page, pfn_to_section_nr(pfn)); +#endif +} + +#ifdef CONFIG_MEMCG +static inline struct mem_cgroup *page_memcg(struct page *page) +{ + return page->mem_cgroup; +} +static inline struct mem_cgroup *page_memcg_rcu(struct page *page) +{ + WARN_ON_ONCE(!rcu_read_lock_held()); + return READ_ONCE(page->mem_cgroup); +} +#else +static inline struct mem_cgroup *page_memcg(struct page *page) +{ + return NULL; +} +static inline struct mem_cgroup *page_memcg_rcu(struct page *page) +{ + WARN_ON_ONCE(!rcu_read_lock_held()); + return NULL; +} +#endif + +/* + * Some inline functions in vmstat.h depend on page_zone() + */ +#include + +static __always_inline void *lowmem_page_address(const struct page *page) +{ + return page_to_virt(page); +} + +#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) +#define HASHED_PAGE_VIRTUAL +#endif + +#if defined(WANT_PAGE_VIRTUAL) +static inline void *page_address(const struct page *page) +{ + return page->virtual; +} +static inline void set_page_address(struct page *page, void *address) +{ + page->virtual = address; +} +#define page_address_init() do { } while(0) +#endif + +#if defined(HASHED_PAGE_VIRTUAL) +void *page_address(const struct page *page); +void set_page_address(struct page *page, void *virtual); +void page_address_init(void); +#endif + +#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL) +#define page_address(page) lowmem_page_address(page) +#define set_page_address(page, address) do { } while(0) +#define page_address_init() do { } while(0) +#endif + +extern void *page_rmapping(struct page *page); +extern struct anon_vma *page_anon_vma(struct page *page); +extern struct address_space *page_mapping(struct page *page); + +extern struct address_space *__page_file_mapping(struct page *); + +static inline +struct address_space *page_file_mapping(struct page *page) +{ + if (unlikely(PageSwapCache(page))) + return __page_file_mapping(page); + + return page->mapping; +} + +extern pgoff_t __page_file_index(struct page *page); + +/* + * Return the pagecache index of the passed page. Regular pagecache pages + * use ->index whereas swapcache pages use swp_offset(->private) + */ +static inline pgoff_t page_index(struct page *page) +{ + if (unlikely(PageSwapCache(page))) + return __page_file_index(page); + return page->index; +} + +bool page_mapped(struct page *page); +struct address_space *page_mapping(struct page *page); +struct address_space *page_mapping_file(struct page *page); + +/* + * Return true only if the page has been allocated with + * ALLOC_NO_WATERMARKS and the low watermark was not + * met implying that the system is under some pressure. + */ +static inline bool page_is_pfmemalloc(struct page *page) +{ + /* + * Page index cannot be this large so this must be + * a pfmemalloc page. + */ + return page->index == -1UL; +} + +/* + * Only to be called by the page allocator on a freshly allocated + * page. + */ +static inline void set_page_pfmemalloc(struct page *page) +{ + page->index = -1UL; +} + +static inline void clear_page_pfmemalloc(struct page *page) +{ + page->index = 0; +} + +/* + * Different kinds of faults, as returned by handle_mm_fault(). + * Used to decide whether a process gets delivered SIGBUS or + * just gets major/minor fault counters bumped up. + */ + +#define VM_FAULT_OOM 0x0001 +#define VM_FAULT_SIGBUS 0x0002 +#define VM_FAULT_MAJOR 0x0004 +#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ +#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */ +#define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */ +#define VM_FAULT_SIGSEGV 0x0040 + +#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ +#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ +#define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */ +#define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */ +#define VM_FAULT_DONE_COW 0x1000 /* ->fault has fully handled COW */ +#define VM_FAULT_NEEDDSYNC 0x2000 /* ->fault did not modify page tables + * and needs fsync() to complete (for + * synchronous page faults in DAX) */ + +#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \ + VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \ + VM_FAULT_FALLBACK) + +#define VM_FAULT_RESULT_TRACE \ + { VM_FAULT_OOM, "OOM" }, \ + { VM_FAULT_SIGBUS, "SIGBUS" }, \ + { VM_FAULT_MAJOR, "MAJOR" }, \ + { VM_FAULT_WRITE, "WRITE" }, \ + { VM_FAULT_HWPOISON, "HWPOISON" }, \ + { VM_FAULT_HWPOISON_LARGE, "HWPOISON_LARGE" }, \ + { VM_FAULT_SIGSEGV, "SIGSEGV" }, \ + { VM_FAULT_NOPAGE, "NOPAGE" }, \ + { VM_FAULT_LOCKED, "LOCKED" }, \ + { VM_FAULT_RETRY, "RETRY" }, \ + { VM_FAULT_FALLBACK, "FALLBACK" }, \ + { VM_FAULT_DONE_COW, "DONE_COW" }, \ + { VM_FAULT_NEEDDSYNC, "NEEDDSYNC" } + +/* Encode hstate index for a hwpoisoned large page */ +#define VM_FAULT_SET_HINDEX(x) ((x) << 12) +#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf) + +/* + * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. + */ +extern void pagefault_out_of_memory(void); + +#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) + +/* + * Flags passed to show_mem() and show_free_areas() to suppress output in + * various contexts. + */ +#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */ + +extern void show_free_areas(unsigned int flags, nodemask_t *nodemask); + +extern bool can_do_mlock(void); +extern int user_shm_lock(size_t, struct user_struct *); +extern void user_shm_unlock(size_t, struct user_struct *); + +/* + * Parameter block passed down to zap_pte_range in exceptional cases. + */ +struct zap_details { + struct address_space *check_mapping; /* Check page->mapping if set */ + pgoff_t first_index; /* Lowest page->index to unmap */ + pgoff_t last_index; /* Highest page->index to unmap */ + struct page *single_page; /* Locked page to be unmapped */ +}; + +struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, + pte_t pte, bool with_public_device); +#define vm_normal_page(vma, addr, pte) _vm_normal_page(vma, addr, pte, false) + +struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t pmd); + +void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, + unsigned long size); +void zap_page_range(struct vm_area_struct *vma, unsigned long address, + unsigned long size); +void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, + unsigned long start, unsigned long end); + +/** + * mm_walk - callbacks for walk_page_range + * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry + * this handler should only handle pud_trans_huge() puds. + * the pmd_entry or pte_entry callbacks will be used for + * regular PUDs. + * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry + * this handler is required to be able to handle + * pmd_trans_huge() pmds. They may simply choose to + * split_huge_page() instead of handling it explicitly. + * @pte_entry: if set, called for each non-empty PTE (4th-level) entry + * @pte_hole: if set, called for each hole at all levels + * @hugetlb_entry: if set, called for each hugetlb entry + * @test_walk: caller specific callback function to determine whether + * we walk over the current vma or not. Returning 0 + * value means "do page table walk over the current vma," + * and a negative one means "abort current page table walk + * right now." 1 means "skip the current vma." + * @mm: mm_struct representing the target process of page table walk + * @vma: vma currently walked (NULL if walking outside vmas) + * @private: private data for callbacks' usage + * + * (see the comment on walk_page_range() for more details) + */ +struct mm_walk { + int (*pud_entry)(pud_t *pud, unsigned long addr, + unsigned long next, struct mm_walk *walk); + int (*pmd_entry)(pmd_t *pmd, unsigned long addr, + unsigned long next, struct mm_walk *walk); + int (*pte_entry)(pte_t *pte, unsigned long addr, + unsigned long next, struct mm_walk *walk); + int (*pte_hole)(unsigned long addr, unsigned long next, + struct mm_walk *walk); + int (*hugetlb_entry)(pte_t *pte, unsigned long hmask, + unsigned long addr, unsigned long next, + struct mm_walk *walk); + int (*test_walk)(unsigned long addr, unsigned long next, + struct mm_walk *walk); + struct mm_struct *mm; + struct vm_area_struct *vma; + void *private; +}; + +int walk_page_range(unsigned long addr, unsigned long end, + struct mm_walk *walk); +int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk); +void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, + unsigned long end, unsigned long floor, unsigned long ceiling); +int copy_page_range(struct mm_struct *dst, struct mm_struct *src, + struct vm_area_struct *vma); +int follow_pte_pmd(struct mm_struct *mm, unsigned long address, + unsigned long *start, unsigned long *end, + pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp); +int follow_pfn(struct vm_area_struct *vma, unsigned long address, + unsigned long *pfn); +int follow_phys(struct vm_area_struct *vma, unsigned long address, + unsigned int flags, unsigned long *prot, resource_size_t *phys); +int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, + void *buf, int len, int write); + +extern void truncate_pagecache(struct inode *inode, loff_t new); +extern void truncate_setsize(struct inode *inode, loff_t newsize); +void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to); +void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); +int truncate_inode_page(struct address_space *mapping, struct page *page); +int generic_error_remove_page(struct address_space *mapping, struct page *page); +int invalidate_inode_page(struct page *page); + +#ifdef CONFIG_MMU +extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma, + unsigned long address, unsigned int flags); +extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, + unsigned long address, unsigned int fault_flags, + bool *unlocked); +void unmap_mapping_page(struct page *page); +void unmap_mapping_pages(struct address_space *mapping, + pgoff_t start, pgoff_t nr, bool even_cows); +void unmap_mapping_range(struct address_space *mapping, + loff_t const holebegin, loff_t const holelen, int even_cows); +#else +static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma, + unsigned long address, unsigned int flags) +{ + /* should never happen if there's no MMU */ + BUG(); + return VM_FAULT_SIGBUS; +} +static inline int fixup_user_fault(struct task_struct *tsk, + struct mm_struct *mm, unsigned long address, + unsigned int fault_flags, bool *unlocked) +{ + /* should never happen if there's no MMU */ + BUG(); + return -EFAULT; +} +static inline void unmap_mapping_page(struct page *page) { } +static inline void unmap_mapping_pages(struct address_space *mapping, + pgoff_t start, pgoff_t nr, bool even_cows) { } +static inline void unmap_mapping_range(struct address_space *mapping, + loff_t const holebegin, loff_t const holelen, int even_cows) { } +#endif + +static inline void unmap_shared_mapping_range(struct address_space *mapping, + loff_t const holebegin, loff_t const holelen) +{ + unmap_mapping_range(mapping, holebegin, holelen, 0); +} + +extern int access_process_vm(struct task_struct *tsk, unsigned long addr, + void *buf, int len, unsigned int gup_flags); +extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, + void *buf, int len, unsigned int gup_flags); +extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, + unsigned long addr, void *buf, int len, unsigned int gup_flags); + +long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, + unsigned long start, unsigned long nr_pages, + unsigned int gup_flags, struct page **pages, + struct vm_area_struct **vmas, int *locked); +long get_user_pages(unsigned long start, unsigned long nr_pages, + unsigned int gup_flags, struct page **pages, + struct vm_area_struct **vmas); +long get_user_pages_locked(unsigned long start, unsigned long nr_pages, + unsigned int gup_flags, struct page **pages, int *locked); +long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, + struct page **pages, unsigned int gup_flags); +#ifdef CONFIG_FS_DAX +long get_user_pages_longterm(unsigned long start, unsigned long nr_pages, + unsigned int gup_flags, struct page **pages, + struct vm_area_struct **vmas); +#else +static inline long get_user_pages_longterm(unsigned long start, + unsigned long nr_pages, unsigned int gup_flags, + struct page **pages, struct vm_area_struct **vmas) +{ + return get_user_pages(start, nr_pages, gup_flags, pages, vmas); +} +#endif /* CONFIG_FS_DAX */ + +int get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages); + +/* Container for pinned pfns / pages */ +struct frame_vector { + unsigned int nr_allocated; /* Number of frames we have space for */ + unsigned int nr_frames; /* Number of frames stored in ptrs array */ + bool got_ref; /* Did we pin pages by getting page ref? */ + bool is_pfns; /* Does array contain pages or pfns? */ + void *ptrs[0]; /* Array of pinned pfns / pages. Use + * pfns_vector_pages() or pfns_vector_pfns() + * for access */ +}; + +struct frame_vector *frame_vector_create(unsigned int nr_frames); +void frame_vector_destroy(struct frame_vector *vec); +int get_vaddr_frames(unsigned long start, unsigned int nr_pfns, + unsigned int gup_flags, struct frame_vector *vec); +void put_vaddr_frames(struct frame_vector *vec); +int frame_vector_to_pages(struct frame_vector *vec); +void frame_vector_to_pfns(struct frame_vector *vec); + +static inline unsigned int frame_vector_count(struct frame_vector *vec) +{ + return vec->nr_frames; +} + +static inline struct page **frame_vector_pages(struct frame_vector *vec) +{ + if (vec->is_pfns) { + int err = frame_vector_to_pages(vec); + + if (err) + return ERR_PTR(err); + } + return (struct page **)(vec->ptrs); +} + +static inline unsigned long *frame_vector_pfns(struct frame_vector *vec) +{ + if (!vec->is_pfns) + frame_vector_to_pfns(vec); + return (unsigned long *)(vec->ptrs); +} + +struct kvec; +int get_kernel_pages(const struct kvec *iov, int nr_pages, int write, + struct page **pages); +int get_kernel_page(unsigned long start, int write, struct page **pages); +struct page *get_dump_page(unsigned long addr); + +extern int try_to_release_page(struct page * page, gfp_t gfp_mask); +extern void do_invalidatepage(struct page *page, unsigned int offset, + unsigned int length); + +void __set_page_dirty(struct page *, struct address_space *, int warn); +int __set_page_dirty_nobuffers(struct page *page); +int __set_page_dirty_no_writeback(struct page *page); +int redirty_page_for_writepage(struct writeback_control *wbc, + struct page *page); +void account_page_dirtied(struct page *page, struct address_space *mapping); +void account_page_cleaned(struct page *page, struct address_space *mapping, + struct bdi_writeback *wb); +int set_page_dirty(struct page *page); +int set_page_dirty_lock(struct page *page); +void __cancel_dirty_page(struct page *page); +static inline void cancel_dirty_page(struct page *page) +{ + /* Avoid atomic ops, locking, etc. when not actually needed. */ + if (PageDirty(page)) + __cancel_dirty_page(page); +} +int clear_page_dirty_for_io(struct page *page); + +int get_cmdline(struct task_struct *task, char *buffer, int buflen); + +static inline bool vma_is_anonymous(struct vm_area_struct *vma) +{ + return !vma->vm_ops; +} + +#ifdef CONFIG_SHMEM +/* + * The vma_is_shmem is not inline because it is used only by slow + * paths in userfault. + */ +bool vma_is_shmem(struct vm_area_struct *vma); +#else +static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; } +#endif + +int vma_is_stack_for_current(struct vm_area_struct *vma); + +extern unsigned long move_page_tables(struct vm_area_struct *vma, + unsigned long old_addr, struct vm_area_struct *new_vma, + unsigned long new_addr, unsigned long len, + bool need_rmap_locks); +extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, + unsigned long end, pgprot_t newprot, + int dirty_accountable, int prot_numa); +extern int mprotect_fixup(struct vm_area_struct *vma, + struct vm_area_struct **pprev, unsigned long start, + unsigned long end, unsigned long newflags); + +/* + * doesn't attempt to fault and will return short. + */ +int __get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages); +/* + * per-process(per-mm_struct) statistics. + */ +static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) +{ + long val = atomic_long_read(&mm->rss_stat.count[member]); + +#ifdef SPLIT_RSS_COUNTING + /* + * counter is updated in asynchronous manner and may go to minus. + * But it's never be expected number for users. + */ + if (val < 0) + val = 0; +#endif + return (unsigned long)val; +} + +static inline void add_mm_counter(struct mm_struct *mm, int member, long value) +{ + atomic_long_add(value, &mm->rss_stat.count[member]); +} + +static inline void inc_mm_counter(struct mm_struct *mm, int member) +{ + atomic_long_inc(&mm->rss_stat.count[member]); +} + +static inline void dec_mm_counter(struct mm_struct *mm, int member) +{ + atomic_long_dec(&mm->rss_stat.count[member]); +} + +/* Optimized variant when page is already known not to be PageAnon */ +static inline int mm_counter_file(struct page *page) +{ + if (PageSwapBacked(page)) + return MM_SHMEMPAGES; + return MM_FILEPAGES; +} + +static inline int mm_counter(struct page *page) +{ + if (PageAnon(page)) + return MM_ANONPAGES; + return mm_counter_file(page); +} + +static inline unsigned long get_mm_rss(struct mm_struct *mm) +{ + return get_mm_counter(mm, MM_FILEPAGES) + + get_mm_counter(mm, MM_ANONPAGES) + + get_mm_counter(mm, MM_SHMEMPAGES); +} + +static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) +{ + return max(mm->hiwater_rss, get_mm_rss(mm)); +} + +static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) +{ + return max(mm->hiwater_vm, mm->total_vm); +} + +static inline void update_hiwater_rss(struct mm_struct *mm) +{ + unsigned long _rss = get_mm_rss(mm); + + if ((mm)->hiwater_rss < _rss) + (mm)->hiwater_rss = _rss; +} + +static inline void update_hiwater_vm(struct mm_struct *mm) +{ + if (mm->hiwater_vm < mm->total_vm) + mm->hiwater_vm = mm->total_vm; +} + +static inline void reset_mm_hiwater_rss(struct mm_struct *mm) +{ + mm->hiwater_rss = get_mm_rss(mm); +} + +static inline void setmax_mm_hiwater_rss(unsigned long *maxrss, + struct mm_struct *mm) +{ + unsigned long hiwater_rss = get_mm_hiwater_rss(mm); + + if (*maxrss < hiwater_rss) + *maxrss = hiwater_rss; +} + +#if defined(SPLIT_RSS_COUNTING) +void sync_mm_rss(struct mm_struct *mm); +#else +static inline void sync_mm_rss(struct mm_struct *mm) +{ +} +#endif + +#ifndef __HAVE_ARCH_PTE_DEVMAP +static inline int pte_devmap(pte_t pte) +{ + return 0; +} +#endif + +int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); + +extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, + spinlock_t **ptl); +static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, + spinlock_t **ptl) +{ + pte_t *ptep; + __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl)); + return ptep; +} + +#ifdef __PAGETABLE_P4D_FOLDED +static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, + unsigned long address) +{ + return 0; +} +#else +int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); +#endif + +#if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU) +static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, + unsigned long address) +{ + return 0; +} +static inline void mm_inc_nr_puds(struct mm_struct *mm) {} +static inline void mm_dec_nr_puds(struct mm_struct *mm) {} + +#else +int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address); + +static inline void mm_inc_nr_puds(struct mm_struct *mm) +{ + if (mm_pud_folded(mm)) + return; + atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes); +} + +static inline void mm_dec_nr_puds(struct mm_struct *mm) +{ + if (mm_pud_folded(mm)) + return; + atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes); +} +#endif + +#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU) +static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, + unsigned long address) +{ + return 0; +} + +static inline void mm_inc_nr_pmds(struct mm_struct *mm) {} +static inline void mm_dec_nr_pmds(struct mm_struct *mm) {} + +#else +int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); + +static inline void mm_inc_nr_pmds(struct mm_struct *mm) +{ + if (mm_pmd_folded(mm)) + return; + atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes); +} + +static inline void mm_dec_nr_pmds(struct mm_struct *mm) +{ + if (mm_pmd_folded(mm)) + return; + atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes); +} +#endif + +#ifdef CONFIG_MMU +static inline void mm_pgtables_bytes_init(struct mm_struct *mm) +{ + atomic_long_set(&mm->pgtables_bytes, 0); +} + +static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm) +{ + return atomic_long_read(&mm->pgtables_bytes); +} + +static inline void mm_inc_nr_ptes(struct mm_struct *mm) +{ + atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes); +} + +static inline void mm_dec_nr_ptes(struct mm_struct *mm) +{ + atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes); +} +#else + +static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {} +static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm) +{ + return 0; +} + +static inline void mm_inc_nr_ptes(struct mm_struct *mm) {} +static inline void mm_dec_nr_ptes(struct mm_struct *mm) {} +#endif + +int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address); +int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); + +/* + * The following ifdef needed to get the 4level-fixup.h header to work. + * Remove it when 4level-fixup.h has been removed. + */ +#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK) + +#ifndef __ARCH_HAS_5LEVEL_HACK +static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd, + unsigned long address) +{ + return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ? + NULL : p4d_offset(pgd, address); +} + +static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d, + unsigned long address) +{ + return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ? + NULL : pud_offset(p4d, address); +} +#endif /* !__ARCH_HAS_5LEVEL_HACK */ + +static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) +{ + return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))? + NULL: pmd_offset(pud, address); +} +#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ + +#if USE_SPLIT_PTE_PTLOCKS +#if ALLOC_SPLIT_PTLOCKS +void __init ptlock_cache_init(void); +extern bool ptlock_alloc(struct page *page); +extern void ptlock_free(struct page *page); + +static inline spinlock_t *ptlock_ptr(struct page *page) +{ + return page->ptl; +} +#else /* ALLOC_SPLIT_PTLOCKS */ +static inline void ptlock_cache_init(void) +{ +} + +static inline bool ptlock_alloc(struct page *page) +{ + return true; +} + +static inline void ptlock_free(struct page *page) +{ +} + +static inline spinlock_t *ptlock_ptr(struct page *page) +{ + return &page->ptl; +} +#endif /* ALLOC_SPLIT_PTLOCKS */ + +static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) +{ + return ptlock_ptr(pmd_page(*pmd)); +} + +static inline bool ptlock_init(struct page *page) +{ + /* + * prep_new_page() initialize page->private (and therefore page->ptl) + * with 0. Make sure nobody took it in use in between. + * + * It can happen if arch try to use slab for page table allocation: + * slab code uses page->slab_cache, which share storage with page->ptl. + */ + VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); + if (!ptlock_alloc(page)) + return false; + spin_lock_init(ptlock_ptr(page)); + return true; +} + +/* Reset page->mapping so free_pages_check won't complain. */ +static inline void pte_lock_deinit(struct page *page) +{ + page->mapping = NULL; + ptlock_free(page); +} + +#else /* !USE_SPLIT_PTE_PTLOCKS */ +/* + * We use mm->page_table_lock to guard all pagetable pages of the mm. + */ +static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) +{ + return &mm->page_table_lock; +} +static inline void ptlock_cache_init(void) {} +static inline bool ptlock_init(struct page *page) { return true; } +static inline void pte_lock_deinit(struct page *page) {} +#endif /* USE_SPLIT_PTE_PTLOCKS */ + +static inline void pgtable_init(void) +{ + ptlock_cache_init(); + pgtable_cache_init(); +} + +static inline bool pgtable_page_ctor(struct page *page) +{ + if (!ptlock_init(page)) + return false; + __SetPageTable(page); + inc_zone_page_state(page, NR_PAGETABLE); + return true; +} + +static inline void pgtable_page_dtor(struct page *page) +{ + pte_lock_deinit(page); + __ClearPageTable(page); + dec_zone_page_state(page, NR_PAGETABLE); +} + +#define pte_offset_map_lock(mm, pmd, address, ptlp) \ +({ \ + spinlock_t *__ptl = pte_lockptr(mm, pmd); \ + pte_t *__pte = pte_offset_map(pmd, address); \ + *(ptlp) = __ptl; \ + spin_lock(__ptl); \ + __pte; \ +}) + +#define pte_unmap_unlock(pte, ptl) do { \ + spin_unlock(ptl); \ + pte_unmap(pte); \ +} while (0) + +#define pte_alloc(mm, pmd, address) \ + (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd, address)) + +#define pte_alloc_map(mm, pmd, address) \ + (pte_alloc(mm, pmd, address) ? NULL : pte_offset_map(pmd, address)) + +#define pte_alloc_map_lock(mm, pmd, address, ptlp) \ + (pte_alloc(mm, pmd, address) ? \ + NULL : pte_offset_map_lock(mm, pmd, address, ptlp)) + +#define pte_alloc_kernel(pmd, address) \ + ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \ + NULL: pte_offset_kernel(pmd, address)) + +#if USE_SPLIT_PMD_PTLOCKS + +static struct page *pmd_to_page(pmd_t *pmd) +{ + unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1); + return virt_to_page((void *)((unsigned long) pmd & mask)); +} + +static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) +{ + return ptlock_ptr(pmd_to_page(pmd)); +} + +static inline bool pgtable_pmd_page_ctor(struct page *page) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + page->pmd_huge_pte = NULL; +#endif + return ptlock_init(page); +} + +static inline void pgtable_pmd_page_dtor(struct page *page) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + VM_BUG_ON_PAGE(page->pmd_huge_pte, page); +#endif + ptlock_free(page); +} + +#define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte) + +#else + +static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) +{ + return &mm->page_table_lock; +} + +static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; } +static inline void pgtable_pmd_page_dtor(struct page *page) {} + +#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte) + +#endif + +static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd) +{ + spinlock_t *ptl = pmd_lockptr(mm, pmd); + spin_lock(ptl); + return ptl; +} + +/* + * No scalability reason to split PUD locks yet, but follow the same pattern + * as the PMD locks to make it easier if we decide to. The VM should not be + * considered ready to switch to split PUD locks yet; there may be places + * which need to be converted from page_table_lock. + */ +static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud) +{ + return &mm->page_table_lock; +} + +static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) +{ + spinlock_t *ptl = pud_lockptr(mm, pud); + + spin_lock(ptl); + return ptl; +} + +extern void __init pagecache_init(void); +extern void free_area_init(unsigned long * zones_size); +extern void __init free_area_init_node(int nid, unsigned long * zones_size, + unsigned long zone_start_pfn, unsigned long *zholes_size); +extern void free_initmem(void); + +/* + * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK) + * into the buddy system. The freed pages will be poisoned with pattern + * "poison" if it's within range [0, UCHAR_MAX]. + * Return pages freed into the buddy system. + */ +extern unsigned long free_reserved_area(void *start, void *end, + int poison, char *s); + +#ifdef CONFIG_HIGHMEM +/* + * Free a highmem page into the buddy system, adjusting totalhigh_pages + * and totalram_pages. + */ +extern void free_highmem_page(struct page *page); +#endif + +extern void adjust_managed_page_count(struct page *page, long count); +extern void mem_init_print_info(const char *str); + +extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end); + +/* Free the reserved page into the buddy system, so it gets managed. */ +static inline void __free_reserved_page(struct page *page) +{ + ClearPageReserved(page); + init_page_count(page); + __free_page(page); +} + +static inline void free_reserved_page(struct page *page) +{ + __free_reserved_page(page); + adjust_managed_page_count(page, 1); +} + +static inline void mark_page_reserved(struct page *page) +{ + SetPageReserved(page); + adjust_managed_page_count(page, -1); +} + +/* + * Default method to free all the __init memory into the buddy system. + * The freed pages will be poisoned with pattern "poison" if it's within + * range [0, UCHAR_MAX]. + * Return pages freed into the buddy system. + */ +static inline unsigned long free_initmem_default(int poison) +{ + extern char __init_begin[], __init_end[]; + + return free_reserved_area(&__init_begin, &__init_end, + poison, "unused kernel"); +} + +static inline unsigned long get_num_physpages(void) +{ + int nid; + unsigned long phys_pages = 0; + + for_each_online_node(nid) + phys_pages += node_present_pages(nid); + + return phys_pages; +} + +#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP +/* + * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its + * zones, allocate the backing mem_map and account for memory holes in a more + * architecture independent manner. This is a substitute for creating the + * zone_sizes[] and zholes_size[] arrays and passing them to + * free_area_init_node() + * + * An architecture is expected to register range of page frames backed by + * physical memory with memblock_add[_node]() before calling + * free_area_init_nodes() passing in the PFN each zone ends at. At a basic + * usage, an architecture is expected to do something like + * + * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, + * max_highmem_pfn}; + * for_each_valid_physical_page_range() + * memblock_add_node(base, size, nid) + * free_area_init_nodes(max_zone_pfns); + * + * free_bootmem_with_active_regions() calls free_bootmem_node() for each + * registered physical page range. Similarly + * sparse_memory_present_with_active_regions() calls memory_present() for + * each range when SPARSEMEM is enabled. + * + * See mm/page_alloc.c for more information on each function exposed by + * CONFIG_HAVE_MEMBLOCK_NODE_MAP. + */ +extern void free_area_init_nodes(unsigned long *max_zone_pfn); +unsigned long node_map_pfn_alignment(void); +unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, + unsigned long end_pfn); +extern unsigned long absent_pages_in_range(unsigned long start_pfn, + unsigned long end_pfn); +extern void get_pfn_range_for_nid(unsigned int nid, + unsigned long *start_pfn, unsigned long *end_pfn); +extern unsigned long find_min_pfn_with_active_regions(void); +extern void free_bootmem_with_active_regions(int nid, + unsigned long max_low_pfn); +extern void sparse_memory_present_with_active_regions(int nid); + +#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ + +#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \ + !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) +static inline int __early_pfn_to_nid(unsigned long pfn, + struct mminit_pfnnid_cache *state) +{ + return 0; +} +#else +/* please see mm/page_alloc.c */ +extern int __meminit early_pfn_to_nid(unsigned long pfn); +/* there is a per-arch backend function. */ +extern int __meminit __early_pfn_to_nid(unsigned long pfn, + struct mminit_pfnnid_cache *state); +#endif + +#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP) +void zero_resv_unavail(void); +#else +static inline void zero_resv_unavail(void) {} +#endif + +extern void set_dma_reserve(unsigned long new_dma_reserve); +extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long, + enum meminit_context, struct vmem_altmap *); +extern void setup_per_zone_wmarks(void); +extern int __meminit init_per_zone_wmark_min(void); +extern void mem_init(void); +extern void __init mmap_init(void); +extern void show_mem(unsigned int flags, nodemask_t *nodemask); +extern long si_mem_available(void); +extern void si_meminfo(struct sysinfo * val); +extern void si_meminfo_node(struct sysinfo *val, int nid); +#ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES +extern unsigned long arch_reserved_kernel_pages(void); +#endif + +extern __printf(3, 4) +void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...); + +extern void setup_per_cpu_pageset(void); + +extern void zone_pcp_update(struct zone *zone); +extern void zone_pcp_reset(struct zone *zone); + +/* page_alloc.c */ +extern int min_free_kbytes; +extern int watermark_scale_factor; + +/* nommu.c */ +extern atomic_long_t mmap_pages_allocated; +extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); + +/* interval_tree.c */ +void vma_interval_tree_insert(struct vm_area_struct *node, + struct rb_root_cached *root); +void vma_interval_tree_insert_after(struct vm_area_struct *node, + struct vm_area_struct *prev, + struct rb_root_cached *root); +void vma_interval_tree_remove(struct vm_area_struct *node, + struct rb_root_cached *root); +struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root, + unsigned long start, unsigned long last); +struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node, + unsigned long start, unsigned long last); + +#define vma_interval_tree_foreach(vma, root, start, last) \ + for (vma = vma_interval_tree_iter_first(root, start, last); \ + vma; vma = vma_interval_tree_iter_next(vma, start, last)) + +void anon_vma_interval_tree_insert(struct anon_vma_chain *node, + struct rb_root_cached *root); +void anon_vma_interval_tree_remove(struct anon_vma_chain *node, + struct rb_root_cached *root); +struct anon_vma_chain * +anon_vma_interval_tree_iter_first(struct rb_root_cached *root, + unsigned long start, unsigned long last); +struct anon_vma_chain *anon_vma_interval_tree_iter_next( + struct anon_vma_chain *node, unsigned long start, unsigned long last); +#ifdef CONFIG_DEBUG_VM_RB +void anon_vma_interval_tree_verify(struct anon_vma_chain *node); +#endif + +#define anon_vma_interval_tree_foreach(avc, root, start, last) \ + for (avc = anon_vma_interval_tree_iter_first(root, start, last); \ + avc; avc = anon_vma_interval_tree_iter_next(avc, start, last)) + +/* mmap.c */ +extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); +extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start, + unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert, + struct vm_area_struct *expand); +static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start, + unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert) +{ + return __vma_adjust(vma, start, end, pgoff, insert, NULL); +} +extern struct vm_area_struct *vma_merge(struct mm_struct *, + struct vm_area_struct *prev, unsigned long addr, unsigned long end, + unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t, + struct mempolicy *, struct vm_userfaultfd_ctx); +extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); +extern int __split_vma(struct mm_struct *, struct vm_area_struct *, + unsigned long addr, int new_below); +extern int split_vma(struct mm_struct *, struct vm_area_struct *, + unsigned long addr, int new_below); +extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); +extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, + struct rb_node **, struct rb_node *); +extern void unlink_file_vma(struct vm_area_struct *); +extern struct vm_area_struct *copy_vma(struct vm_area_struct **, + unsigned long addr, unsigned long len, pgoff_t pgoff, + bool *need_rmap_locks); +extern void exit_mmap(struct mm_struct *); + +static inline int check_data_rlimit(unsigned long rlim, + unsigned long new, + unsigned long start, + unsigned long end_data, + unsigned long start_data) +{ + if (rlim < RLIM_INFINITY) { + if (((new - start) + (end_data - start_data)) > rlim) + return -ENOSPC; + } + + return 0; +} + +extern int mm_take_all_locks(struct mm_struct *mm); +extern void mm_drop_all_locks(struct mm_struct *mm); + +extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); +extern struct file *get_mm_exe_file(struct mm_struct *mm); +extern struct file *get_task_exe_file(struct task_struct *task); + +extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages); +extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages); + +extern bool vma_is_special_mapping(const struct vm_area_struct *vma, + const struct vm_special_mapping *sm); +extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, + unsigned long addr, unsigned long len, + unsigned long flags, + const struct vm_special_mapping *spec); +/* This is an obsolete alternative to _install_special_mapping. */ +extern int install_special_mapping(struct mm_struct *mm, + unsigned long addr, unsigned long len, + unsigned long flags, struct page **pages); + +unsigned long randomize_page(unsigned long start, unsigned long range); + +extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); + +extern unsigned long mmap_region(struct file *file, unsigned long addr, + unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, + struct list_head *uf); +extern unsigned long do_mmap(struct file *file, unsigned long addr, + unsigned long len, unsigned long prot, unsigned long flags, + vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate, + struct list_head *uf); +extern int do_munmap(struct mm_struct *, unsigned long, size_t, + struct list_head *uf); + +static inline unsigned long +do_mmap_pgoff(struct file *file, unsigned long addr, + unsigned long len, unsigned long prot, unsigned long flags, + unsigned long pgoff, unsigned long *populate, + struct list_head *uf) +{ + return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf); +} + +#ifdef CONFIG_MMU +extern int __mm_populate(unsigned long addr, unsigned long len, + int ignore_errors); +static inline void mm_populate(unsigned long addr, unsigned long len) +{ + /* Ignore errors */ + (void) __mm_populate(addr, len, 1); +} +#else +static inline void mm_populate(unsigned long addr, unsigned long len) {} +#endif + +/* These take the mm semaphore themselves */ +extern int __must_check vm_brk(unsigned long, unsigned long); +extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long); +extern int vm_munmap(unsigned long, size_t); +extern unsigned long __must_check vm_mmap(struct file *, unsigned long, + unsigned long, unsigned long, + unsigned long, unsigned long); + +struct vm_unmapped_area_info { +#define VM_UNMAPPED_AREA_TOPDOWN 1 + unsigned long flags; + unsigned long length; + unsigned long low_limit; + unsigned long high_limit; + unsigned long align_mask; + unsigned long align_offset; +}; + +extern unsigned long unmapped_area(struct vm_unmapped_area_info *info); +extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info); + +/* + * Search for an unmapped address range. + * + * We are looking for a range that: + * - does not intersect with any VMA; + * - is contained within the [low_limit, high_limit) interval; + * - is at least the desired size. + * - satisfies (begin_addr & align_mask) == (align_offset & align_mask) + */ +static inline unsigned long +vm_unmapped_area(struct vm_unmapped_area_info *info) +{ + if (info->flags & VM_UNMAPPED_AREA_TOPDOWN) + return unmapped_area_topdown(info); + else + return unmapped_area(info); +} + +/* truncate.c */ +extern void truncate_inode_pages(struct address_space *, loff_t); +extern void truncate_inode_pages_range(struct address_space *, + loff_t lstart, loff_t lend); +extern void truncate_inode_pages_final(struct address_space *); + +/* generic vm_area_ops exported for stackable file systems */ +extern vm_fault_t filemap_fault(struct vm_fault *vmf); +extern void filemap_map_pages(struct vm_fault *vmf, + pgoff_t start_pgoff, pgoff_t end_pgoff); +extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf); + +/* mm/page-writeback.c */ +int __must_check write_one_page(struct page *page); +void task_dirty_inc(struct task_struct *tsk); + +/* readahead.c */ +#define VM_MAX_READAHEAD 128 /* kbytes */ +#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ + +int force_page_cache_readahead(struct address_space *mapping, struct file *filp, + pgoff_t offset, unsigned long nr_to_read); + +void page_cache_sync_readahead(struct address_space *mapping, + struct file_ra_state *ra, + struct file *filp, + pgoff_t offset, + unsigned long size); + +void page_cache_async_readahead(struct address_space *mapping, + struct file_ra_state *ra, + struct file *filp, + struct page *pg, + pgoff_t offset, + unsigned long size); + +extern unsigned long stack_guard_gap; +/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ +extern int expand_stack(struct vm_area_struct *vma, unsigned long address); + +/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */ +extern int expand_downwards(struct vm_area_struct *vma, + unsigned long address); +#if VM_GROWSUP +extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); +#else + #define expand_upwards(vma, address) (0) +#endif + +/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ +extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); +extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, + struct vm_area_struct **pprev); + +/* Look up the first VMA which intersects the interval start_addr..end_addr-1, + NULL if none. Assume start_addr < end_addr. */ +static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr) +{ + struct vm_area_struct * vma = find_vma(mm,start_addr); + + if (vma && end_addr <= vma->vm_start) + vma = NULL; + return vma; +} + +static inline unsigned long vm_start_gap(struct vm_area_struct *vma) +{ + unsigned long vm_start = vma->vm_start; + + if (vma->vm_flags & VM_GROWSDOWN) { + vm_start -= stack_guard_gap; + if (vm_start > vma->vm_start) + vm_start = 0; + } + return vm_start; +} + +static inline unsigned long vm_end_gap(struct vm_area_struct *vma) +{ + unsigned long vm_end = vma->vm_end; + + if (vma->vm_flags & VM_GROWSUP) { + vm_end += stack_guard_gap; + if (vm_end < vma->vm_end) + vm_end = -PAGE_SIZE; + } + return vm_end; +} + +static inline unsigned long vma_pages(struct vm_area_struct *vma) +{ + return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; +} + +/* Look up the first VMA which exactly match the interval vm_start ... vm_end */ +static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm, + unsigned long vm_start, unsigned long vm_end) +{ + struct vm_area_struct *vma = find_vma(mm, vm_start); + + if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end)) + vma = NULL; + + return vma; +} + +static inline bool range_in_vma(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + return (vma && vma->vm_start <= start && end <= vma->vm_end); +} + +#ifdef CONFIG_MMU +pgprot_t vm_get_page_prot(unsigned long vm_flags); +void vma_set_page_prot(struct vm_area_struct *vma); +#else +static inline pgprot_t vm_get_page_prot(unsigned long vm_flags) +{ + return __pgprot(0); +} +static inline void vma_set_page_prot(struct vm_area_struct *vma) +{ + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); +} +#endif + +#ifdef CONFIG_NUMA_BALANCING +unsigned long change_prot_numa(struct vm_area_struct *vma, + unsigned long start, unsigned long end); +#endif + +struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); +int remap_pfn_range(struct vm_area_struct *, unsigned long addr, + unsigned long pfn, unsigned long size, pgprot_t); +int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); +int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn); +int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn, pgprot_t pgprot); +int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, + pfn_t pfn); +vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, + unsigned long addr, pfn_t pfn); +int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); + +static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma, + unsigned long addr, struct page *page) +{ + int err = vm_insert_page(vma, addr, page); + + if (err == -ENOMEM) + return VM_FAULT_OOM; + if (err < 0 && err != -EBUSY) + return VM_FAULT_SIGBUS; + + return VM_FAULT_NOPAGE; +} + +static inline vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, + unsigned long addr, pfn_t pfn) +{ + int err = vm_insert_mixed(vma, addr, pfn); + + if (err == -ENOMEM) + return VM_FAULT_OOM; + if (err < 0 && err != -EBUSY) + return VM_FAULT_SIGBUS; + + return VM_FAULT_NOPAGE; +} + +static inline vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn) +{ + int err = vm_insert_pfn(vma, addr, pfn); + + if (err == -ENOMEM) + return VM_FAULT_OOM; + if (err < 0 && err != -EBUSY) + return VM_FAULT_SIGBUS; + + return VM_FAULT_NOPAGE; +} + +#ifndef io_remap_pfn_range +static inline int io_remap_pfn_range(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn, + unsigned long size, pgprot_t prot) +{ + return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot)); +} +#endif + +static inline vm_fault_t vmf_error(int err) +{ + if (err == -ENOMEM) + return VM_FAULT_OOM; + return VM_FAULT_SIGBUS; +} + +struct page *follow_page_mask(struct vm_area_struct *vma, + unsigned long address, unsigned int foll_flags, + unsigned int *page_mask); + +static inline struct page *follow_page(struct vm_area_struct *vma, + unsigned long address, unsigned int foll_flags) +{ + unsigned int unused_page_mask; + return follow_page_mask(vma, address, foll_flags, &unused_page_mask); +} + +#define FOLL_WRITE 0x01 /* check pte is writable */ +#define FOLL_TOUCH 0x02 /* mark page accessed */ +#define FOLL_GET 0x04 /* do get_page on page */ +#define FOLL_DUMP 0x08 /* give error on hole if it would be zero */ +#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */ +#define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO + * and return without waiting upon it */ +#define FOLL_POPULATE 0x40 /* fault in page */ +#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */ +#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ +#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */ +#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */ +#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */ +#define FOLL_MLOCK 0x1000 /* lock present pages */ +#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ +#define FOLL_COW 0x4000 /* internal GUP flag */ +#define FOLL_ANON 0x8000 /* don't do file mappings */ + +static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags) +{ + if (vm_fault & VM_FAULT_OOM) + return -ENOMEM; + if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) + return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT; + if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) + return -EFAULT; + return 0; +} + +typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, + void *data); +extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, + unsigned long size, pte_fn_t fn, void *data); + + +#ifdef CONFIG_PAGE_POISONING +extern bool page_poisoning_enabled(void); +extern void kernel_poison_pages(struct page *page, int numpages, int enable); +#else +static inline bool page_poisoning_enabled(void) { return false; } +static inline void kernel_poison_pages(struct page *page, int numpages, + int enable) { } +#endif + +#ifdef CONFIG_DEBUG_PAGEALLOC +extern bool _debug_pagealloc_enabled; +extern void __kernel_map_pages(struct page *page, int numpages, int enable); + +static inline bool debug_pagealloc_enabled(void) +{ + return _debug_pagealloc_enabled; +} + +static inline void +kernel_map_pages(struct page *page, int numpages, int enable) +{ + if (!debug_pagealloc_enabled()) + return; + + __kernel_map_pages(page, numpages, enable); +} +#ifdef CONFIG_HIBERNATION +extern bool kernel_page_present(struct page *page); +#endif /* CONFIG_HIBERNATION */ +#else /* CONFIG_DEBUG_PAGEALLOC */ +static inline void +kernel_map_pages(struct page *page, int numpages, int enable) {} +#ifdef CONFIG_HIBERNATION +static inline bool kernel_page_present(struct page *page) { return true; } +#endif /* CONFIG_HIBERNATION */ +static inline bool debug_pagealloc_enabled(void) +{ + return false; +} +#endif /* CONFIG_DEBUG_PAGEALLOC */ + +#ifdef __HAVE_ARCH_GATE_AREA +extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); +extern int in_gate_area_no_mm(unsigned long addr); +extern int in_gate_area(struct mm_struct *mm, unsigned long addr); +#else +static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm) +{ + return NULL; +} +static inline int in_gate_area_no_mm(unsigned long addr) { return 0; } +static inline int in_gate_area(struct mm_struct *mm, unsigned long addr) +{ + return 0; +} +#endif /* __HAVE_ARCH_GATE_AREA */ + +extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm); + +#ifdef CONFIG_SYSCTL +extern int sysctl_drop_caches; +int drop_caches_sysctl_handler(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +#endif + +void drop_slab(void); +void drop_slab_node(int nid); + +#ifndef CONFIG_MMU +#define randomize_va_space 0 +#else +extern int randomize_va_space; +#endif + +const char * arch_vma_name(struct vm_area_struct *vma); +void print_vma_addr(char *prefix, unsigned long rip); + +void *sparse_buffer_alloc(unsigned long size); +struct page *sparse_mem_map_populate(unsigned long pnum, int nid, + struct vmem_altmap *altmap); +pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); +p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); +pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); +pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); +pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); +void *vmemmap_alloc_block(unsigned long size, int node); +struct vmem_altmap; +void *vmemmap_alloc_block_buf(unsigned long size, int node); +void *altmap_alloc_block_buf(unsigned long size, struct vmem_altmap *altmap); +void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); +int vmemmap_populate_basepages(unsigned long start, unsigned long end, + int node); +int vmemmap_populate(unsigned long start, unsigned long end, int node, + struct vmem_altmap *altmap); +void vmemmap_populate_print_last(void); +#ifdef CONFIG_MEMORY_HOTPLUG +void vmemmap_free(unsigned long start, unsigned long end, + struct vmem_altmap *altmap); +#endif +void register_page_bootmem_memmap(unsigned long section_nr, struct page *map, + unsigned long nr_pages); + +enum mf_flags { + MF_COUNT_INCREASED = 1 << 0, + MF_ACTION_REQUIRED = 1 << 1, + MF_MUST_KILL = 1 << 2, + MF_SOFT_OFFLINE = 1 << 3, +}; +extern int memory_failure(unsigned long pfn, int flags); +extern void memory_failure_queue(unsigned long pfn, int flags); +extern int unpoison_memory(unsigned long pfn); +extern int get_hwpoison_page(struct page *page); +#define put_hwpoison_page(page) put_page(page) +extern int sysctl_memory_failure_early_kill; +extern int sysctl_memory_failure_recovery; +extern void shake_page(struct page *p, int access); +extern atomic_long_t num_poisoned_pages __read_mostly; +extern int soft_offline_page(struct page *page, int flags); + + +/* + * Error handlers for various types of pages. + */ +enum mf_result { + MF_IGNORED, /* Error: cannot be handled */ + MF_FAILED, /* Error: handling failed */ + MF_DELAYED, /* Will be handled later */ + MF_RECOVERED, /* Successfully recovered */ +}; + +enum mf_action_page_type { + MF_MSG_KERNEL, + MF_MSG_KERNEL_HIGH_ORDER, + MF_MSG_SLAB, + MF_MSG_DIFFERENT_COMPOUND, + MF_MSG_POISONED_HUGE, + MF_MSG_HUGE, + MF_MSG_FREE_HUGE, + MF_MSG_NON_PMD_HUGE, + MF_MSG_UNMAP_FAILED, + MF_MSG_DIRTY_SWAPCACHE, + MF_MSG_CLEAN_SWAPCACHE, + MF_MSG_DIRTY_MLOCKED_LRU, + MF_MSG_CLEAN_MLOCKED_LRU, + MF_MSG_DIRTY_UNEVICTABLE_LRU, + MF_MSG_CLEAN_UNEVICTABLE_LRU, + MF_MSG_DIRTY_LRU, + MF_MSG_CLEAN_LRU, + MF_MSG_TRUNCATED_LRU, + MF_MSG_BUDDY, + MF_MSG_BUDDY_2ND, + MF_MSG_DAX, + MF_MSG_UNKNOWN, +}; + +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) +extern void clear_huge_page(struct page *page, + unsigned long addr_hint, + unsigned int pages_per_huge_page); +extern void copy_user_huge_page(struct page *dst, struct page *src, + unsigned long addr_hint, + struct vm_area_struct *vma, + unsigned int pages_per_huge_page); +extern long copy_huge_page_from_user(struct page *dst_page, + const void __user *usr_src, + unsigned int pages_per_huge_page, + bool allow_pagefault); +#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ + +extern struct page_ext_operations debug_guardpage_ops; + +#ifdef CONFIG_DEBUG_PAGEALLOC +extern unsigned int _debug_guardpage_minorder; +extern bool _debug_guardpage_enabled; + +static inline unsigned int debug_guardpage_minorder(void) +{ + return _debug_guardpage_minorder; +} + +static inline bool debug_guardpage_enabled(void) +{ + return _debug_guardpage_enabled; +} + +static inline bool page_is_guard(struct page *page) +{ + struct page_ext *page_ext; + + if (!debug_guardpage_enabled()) + return false; + + page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + return false; + + return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); +} +#else +static inline unsigned int debug_guardpage_minorder(void) { return 0; } +static inline bool debug_guardpage_enabled(void) { return false; } +static inline bool page_is_guard(struct page *page) { return false; } +#endif /* CONFIG_DEBUG_PAGEALLOC */ + +#if MAX_NUMNODES > 1 +void __init setup_nr_node_ids(void); +#else +static inline void setup_nr_node_ids(void) {} +#endif + +#endif /* __KERNEL__ */ +#endif /* _LINUX_MM_H */ diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h new file mode 100644 index 000000000..10191c28f --- /dev/null +++ b/include/linux/mm_inline.h @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_MM_INLINE_H +#define LINUX_MM_INLINE_H + +#include +#include + +/** + * page_is_file_cache - should the page be on a file LRU or anon LRU? + * @page: the page to test + * + * Returns 1 if @page is page cache page backed by a regular filesystem, + * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed. + * Used by functions that manipulate the LRU lists, to sort a page + * onto the right LRU list. + * + * We would like to get this info without a page flag, but the state + * needs to survive until the page is last deleted from the LRU, which + * could be as far down as __page_cache_release. + */ +static inline int page_is_file_cache(struct page *page) +{ + return !PageSwapBacked(page); +} + +static __always_inline void __update_lru_size(struct lruvec *lruvec, + enum lru_list lru, enum zone_type zid, + int nr_pages) +{ + struct pglist_data *pgdat = lruvec_pgdat(lruvec); + + __mod_node_page_state(pgdat, NR_LRU_BASE + lru, nr_pages); + __mod_zone_page_state(&pgdat->node_zones[zid], + NR_ZONE_LRU_BASE + lru, nr_pages); +} + +static __always_inline void update_lru_size(struct lruvec *lruvec, + enum lru_list lru, enum zone_type zid, + int nr_pages) +{ + __update_lru_size(lruvec, lru, zid, nr_pages); +#ifdef CONFIG_MEMCG + mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages); +#endif +} + +static __always_inline void add_page_to_lru_list(struct page *page, + struct lruvec *lruvec, enum lru_list lru) +{ + update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page)); + list_add(&page->lru, &lruvec->lists[lru]); +} + +static __always_inline void add_page_to_lru_list_tail(struct page *page, + struct lruvec *lruvec, enum lru_list lru) +{ + update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page)); + list_add_tail(&page->lru, &lruvec->lists[lru]); +} + +static __always_inline void del_page_from_lru_list(struct page *page, + struct lruvec *lruvec, enum lru_list lru) +{ + list_del(&page->lru); + update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page)); +} + +/** + * page_lru_base_type - which LRU list type should a page be on? + * @page: the page to test + * + * Used for LRU list index arithmetic. + * + * Returns the base LRU type - file or anon - @page should be on. + */ +static inline enum lru_list page_lru_base_type(struct page *page) +{ + if (page_is_file_cache(page)) + return LRU_INACTIVE_FILE; + return LRU_INACTIVE_ANON; +} + +/** + * page_off_lru - which LRU list was page on? clearing its lru flags. + * @page: the page to test + * + * Returns the LRU list a page was on, as an index into the array of LRU + * lists; and clears its Unevictable or Active flags, ready for freeing. + */ +static __always_inline enum lru_list page_off_lru(struct page *page) +{ + enum lru_list lru; + + if (PageUnevictable(page)) { + __ClearPageUnevictable(page); + lru = LRU_UNEVICTABLE; + } else { + lru = page_lru_base_type(page); + if (PageActive(page)) { + __ClearPageActive(page); + lru += LRU_ACTIVE; + } + } + return lru; +} + +/** + * page_lru - which LRU list should a page be on? + * @page: the page to test + * + * Returns the LRU list a page should be on, as an index + * into the array of LRU lists. + */ +static __always_inline enum lru_list page_lru(struct page *page) +{ + enum lru_list lru; + + if (PageUnevictable(page)) + lru = LRU_UNEVICTABLE; + else { + lru = page_lru_base_type(page); + if (PageActive(page)) + lru += LRU_ACTIVE; + } + return lru; +} + +#define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) + +#endif diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h new file mode 100644 index 000000000..3a9a996af --- /dev/null +++ b/include/linux/mm_types.h @@ -0,0 +1,657 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MM_TYPES_H +#define _LINUX_MM_TYPES_H + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#ifndef AT_VECTOR_SIZE_ARCH +#define AT_VECTOR_SIZE_ARCH 0 +#endif +#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1)) + +typedef int vm_fault_t; + +struct address_space; +struct mem_cgroup; +struct hmm; + +/* + * Each physical page in the system has a struct page associated with + * it to keep track of whatever it is we are using the page for at the + * moment. Note that we have no way to track which tasks are using + * a page, though if it is a pagecache page, rmap structures can tell us + * who is mapping it. + * + * If you allocate the page using alloc_pages(), you can use some of the + * space in struct page for your own purposes. The five words in the main + * union are available, except for bit 0 of the first word which must be + * kept clear. Many users use this word to store a pointer to an object + * which is guaranteed to be aligned. If you use the same storage as + * page->mapping, you must restore it to NULL before freeing the page. + * + * If your page will not be mapped to userspace, you can also use the four + * bytes in the mapcount union, but you must call page_mapcount_reset() + * before freeing it. + * + * If you want to use the refcount field, it must be used in such a way + * that other CPUs temporarily incrementing and then decrementing the + * refcount does not cause problems. On receiving the page from + * alloc_pages(), the refcount will be positive. + * + * If you allocate pages of order > 0, you can use some of the fields + * in each subpage, but you may need to restore some of their values + * afterwards. + * + * SLUB uses cmpxchg_double() to atomically update its freelist and + * counters. That requires that freelist & counters be adjacent and + * double-word aligned. We align all struct pages to double-word + * boundaries, and ensure that 'freelist' is aligned within the + * struct. + */ +#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE +#define _struct_page_alignment __aligned(2 * sizeof(unsigned long)) +#else +#define _struct_page_alignment +#endif + +struct page { + unsigned long flags; /* Atomic flags, some possibly + * updated asynchronously */ + /* + * Five words (20/40 bytes) are available in this union. + * WARNING: bit 0 of the first word is used for PageTail(). That + * means the other users of this union MUST NOT use the bit to + * avoid collision and false-positive PageTail(). + */ + union { + struct { /* Page cache and anonymous pages */ + /** + * @lru: Pageout list, eg. active_list protected by + * zone_lru_lock. Sometimes used as a generic list + * by the page owner. + */ + struct list_head lru; + /* See page-flags.h for PAGE_MAPPING_FLAGS */ + struct address_space *mapping; + pgoff_t index; /* Our offset within mapping. */ + /** + * @private: Mapping-private opaque data. + * Usually used for buffer_heads if PagePrivate. + * Used for swp_entry_t if PageSwapCache. + * Indicates order in the buddy system if PageBuddy. + */ + unsigned long private; + }; + struct { /* slab, slob and slub */ + union { + struct list_head slab_list; /* uses lru */ + struct { /* Partial pages */ + struct page *next; +#ifdef CONFIG_64BIT + int pages; /* Nr of pages left */ + int pobjects; /* Approximate count */ +#else + short int pages; + short int pobjects; +#endif + }; + }; + struct kmem_cache *slab_cache; /* not slob */ + /* Double-word boundary */ + void *freelist; /* first free object */ + union { + void *s_mem; /* slab: first object */ + unsigned long counters; /* SLUB */ + struct { /* SLUB */ + unsigned inuse:16; + unsigned objects:15; + unsigned frozen:1; + }; + }; + }; + struct { /* Tail pages of compound page */ + unsigned long compound_head; /* Bit zero is set */ + + /* First tail page only */ + unsigned char compound_dtor; + unsigned char compound_order; + atomic_t compound_mapcount; + }; + struct { /* Second tail page of compound page */ + unsigned long _compound_pad_1; /* compound_head */ + unsigned long _compound_pad_2; + struct list_head deferred_list; + }; + struct { /* Page table pages */ + unsigned long _pt_pad_1; /* compound_head */ + pgtable_t pmd_huge_pte; /* protected by page->ptl */ + unsigned long _pt_pad_2; /* mapping */ + union { + struct mm_struct *pt_mm; /* x86 pgds only */ + atomic_t pt_frag_refcount; /* powerpc */ + }; +#if ALLOC_SPLIT_PTLOCKS + spinlock_t *ptl; +#else + spinlock_t ptl; +#endif + }; + struct { /* ZONE_DEVICE pages */ + /** @pgmap: Points to the hosting device page map. */ + struct dev_pagemap *pgmap; + unsigned long hmm_data; + unsigned long _zd_pad_1; /* uses mapping */ + }; + + /** @rcu_head: You can use this to free a page by RCU. */ + struct rcu_head rcu_head; + }; + + union { /* This union is 4 bytes in size. */ + /* + * If the page can be mapped to userspace, encodes the number + * of times this page is referenced by a page table. + */ + atomic_t _mapcount; + + /* + * If the page is neither PageSlab nor mappable to userspace, + * the value stored here may help determine what this page + * is used for. See page-flags.h for a list of page types + * which are currently stored here. + */ + unsigned int page_type; + + unsigned int active; /* SLAB */ + int units; /* SLOB */ + }; + + /* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */ + atomic_t _refcount; + +#ifdef CONFIG_MEMCG + struct mem_cgroup *mem_cgroup; +#endif + + /* + * On machines where all RAM is mapped into kernel address space, + * we can simply calculate the virtual address. On machines with + * highmem some memory is mapped into kernel virtual memory + * dynamically, so we need a place to store that address. + * Note that this field could be 16 bits on x86 ... ;) + * + * Architectures with slow multiplication can define + * WANT_PAGE_VIRTUAL in asm/page.h + */ +#if defined(WANT_PAGE_VIRTUAL) + void *virtual; /* Kernel virtual address (NULL if + not kmapped, ie. highmem) */ +#endif /* WANT_PAGE_VIRTUAL */ + +#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS + int _last_cpupid; +#endif +} _struct_page_alignment; + +#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) +#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) + +struct page_frag_cache { + void * va; +#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) + __u16 offset; + __u16 size; +#else + __u32 offset; +#endif + /* we maintain a pagecount bias, so that we dont dirty cache line + * containing page->_refcount every time we allocate a fragment. + */ + unsigned int pagecnt_bias; + bool pfmemalloc; +}; + +typedef unsigned long vm_flags_t; + +static inline atomic_t *compound_mapcount_ptr(struct page *page) +{ + return &page[1].compound_mapcount; +} + +/* + * A region containing a mapping of a non-memory backed file under NOMMU + * conditions. These are held in a global tree and are pinned by the VMAs that + * map parts of them. + */ +struct vm_region { + struct rb_node vm_rb; /* link in global region tree */ + vm_flags_t vm_flags; /* VMA vm_flags */ + unsigned long vm_start; /* start address of region */ + unsigned long vm_end; /* region initialised to here */ + unsigned long vm_top; /* region allocated to here */ + unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */ + struct file *vm_file; /* the backing file or NULL */ + + int vm_usage; /* region usage count (access under nommu_region_sem) */ + bool vm_icache_flushed : 1; /* true if the icache has been flushed for + * this region */ +}; + +#ifdef CONFIG_USERFAULTFD +#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, }) +struct vm_userfaultfd_ctx { + struct userfaultfd_ctx *ctx; +}; +#else /* CONFIG_USERFAULTFD */ +#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {}) +struct vm_userfaultfd_ctx {}; +#endif /* CONFIG_USERFAULTFD */ + +/* + * This struct defines a memory VMM memory area. There is one of these + * per VM-area/task. A VM area is any part of the process virtual memory + * space that has a special rule for the page-fault handlers (ie a shared + * library, the executable area etc). + */ +struct vm_area_struct { + /* The first cache line has the info for VMA tree walking. */ + + unsigned long vm_start; /* Our start address within vm_mm. */ + unsigned long vm_end; /* The first byte after our end address + within vm_mm. */ + + /* linked list of VM areas per task, sorted by address */ + struct vm_area_struct *vm_next, *vm_prev; + + struct rb_node vm_rb; + + /* + * Largest free memory gap in bytes to the left of this VMA. + * Either between this VMA and vma->vm_prev, or between one of the + * VMAs below us in the VMA rbtree and its ->vm_prev. This helps + * get_unmapped_area find a free area of the right size. + */ + unsigned long rb_subtree_gap; + + /* Second cache line starts here. */ + + struct mm_struct *vm_mm; /* The address space we belong to. */ + pgprot_t vm_page_prot; /* Access permissions of this VMA. */ + unsigned long vm_flags; /* Flags, see mm.h. */ + + /* + * For areas with an address space and backing store, + * linkage into the address_space->i_mmap interval tree. + */ + struct { + struct rb_node rb; + unsigned long rb_subtree_last; + } shared; + + /* + * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma + * list, after a COW of one of the file pages. A MAP_SHARED vma + * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack + * or brk vma (with NULL file) can only be in an anon_vma list. + */ + struct list_head anon_vma_chain; /* Serialized by mmap_sem & + * page_table_lock */ + struct anon_vma *anon_vma; /* Serialized by page_table_lock */ + + /* Function pointers to deal with this struct. */ + const struct vm_operations_struct *vm_ops; + + /* Information about our backing store: */ + unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE + units */ + struct file * vm_file; /* File we map to (can be NULL). */ + void * vm_private_data; /* was vm_pte (shared mem) */ + + atomic_long_t swap_readahead_info; +#ifndef CONFIG_MMU + struct vm_region *vm_region; /* NOMMU mapping region */ +#endif +#ifdef CONFIG_NUMA + struct mempolicy *vm_policy; /* NUMA policy for the VMA */ +#endif + struct vm_userfaultfd_ctx vm_userfaultfd_ctx; +} __randomize_layout; + +struct core_thread { + struct task_struct *task; + struct core_thread *next; +}; + +struct core_state { + atomic_t nr_threads; + struct core_thread dumper; + struct completion startup; +}; + +struct kioctx_table; +struct mm_struct { + struct { + struct vm_area_struct *mmap; /* list of VMAs */ + struct rb_root mm_rb; + u64 vmacache_seqnum; /* per-thread vmacache */ +#ifdef CONFIG_MMU + unsigned long (*get_unmapped_area) (struct file *filp, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags); +#endif + unsigned long mmap_base; /* base of mmap area */ + unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */ +#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES + /* Base adresses for compatible mmap() */ + unsigned long mmap_compat_base; + unsigned long mmap_compat_legacy_base; +#endif + unsigned long task_size; /* size of task vm space */ + unsigned long highest_vm_end; /* highest vma end address */ + pgd_t * pgd; + + /** + * @mm_users: The number of users including userspace. + * + * Use mmget()/mmget_not_zero()/mmput() to modify. When this + * drops to 0 (i.e. when the task exits and there are no other + * temporary reference holders), we also release a reference on + * @mm_count (which may then free the &struct mm_struct if + * @mm_count also drops to 0). + */ + atomic_t mm_users; + + /** + * @mm_count: The number of references to &struct mm_struct + * (@mm_users count as 1). + * + * Use mmgrab()/mmdrop() to modify. When this drops to 0, the + * &struct mm_struct is freed. + */ + atomic_t mm_count; + +#ifdef CONFIG_MMU + atomic_long_t pgtables_bytes; /* PTE page table pages */ +#endif + int map_count; /* number of VMAs */ + + spinlock_t page_table_lock; /* Protects page tables and some + * counters + */ + struct rw_semaphore mmap_sem; + + struct list_head mmlist; /* List of maybe swapped mm's. These + * are globally strung together off + * init_mm.mmlist, and are protected + * by mmlist_lock + */ + + + unsigned long hiwater_rss; /* High-watermark of RSS usage */ + unsigned long hiwater_vm; /* High-water virtual memory usage */ + + unsigned long total_vm; /* Total pages mapped */ + unsigned long locked_vm; /* Pages that have PG_mlocked set */ + unsigned long pinned_vm; /* Refcount permanently increased */ + unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */ + unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */ + unsigned long stack_vm; /* VM_STACK */ + unsigned long def_flags; + + spinlock_t arg_lock; /* protect the below fields */ + unsigned long start_code, end_code, start_data, end_data; + unsigned long start_brk, brk, start_stack; + unsigned long arg_start, arg_end, env_start, env_end; + + unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ + + /* + * Special counters, in some configurations protected by the + * page_table_lock, in other configurations by being atomic. + */ + struct mm_rss_stat rss_stat; + + struct linux_binfmt *binfmt; + + /* Architecture-specific MM context */ + mm_context_t context; + + unsigned long flags; /* Must use atomic bitops to access */ + + struct core_state *core_state; /* coredumping support */ +#ifdef CONFIG_MEMBARRIER + atomic_t membarrier_state; +#endif +#ifdef CONFIG_AIO + spinlock_t ioctx_lock; + struct kioctx_table __rcu *ioctx_table; +#endif +#ifdef CONFIG_MEMCG + /* + * "owner" points to a task that is regarded as the canonical + * user/owner of this mm. All of the following must be true in + * order for it to be changed: + * + * current == mm->owner + * current->mm != mm + * new_owner->mm == mm + * new_owner->alloc_lock is held + */ + struct task_struct __rcu *owner; +#endif + struct user_namespace *user_ns; + + /* store ref to file /proc//exe symlink points to */ + struct file __rcu *exe_file; +#ifdef CONFIG_MMU_NOTIFIER + struct mmu_notifier_mm *mmu_notifier_mm; +#endif +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS + pgtable_t pmd_huge_pte; /* protected by page_table_lock */ +#endif +#ifdef CONFIG_NUMA_BALANCING + /* + * numa_next_scan is the next time that the PTEs will be marked + * pte_numa. NUMA hinting faults will gather statistics and + * migrate pages to new nodes if necessary. + */ + unsigned long numa_next_scan; + + /* Restart point for scanning and setting pte_numa */ + unsigned long numa_scan_offset; + + /* numa_scan_seq prevents two threads setting pte_numa */ + int numa_scan_seq; +#endif + /* + * An operation with batched TLB flushing is going on. Anything + * that can move process memory needs to flush the TLB when + * moving a PROT_NONE or PROT_NUMA mapped page. + */ + atomic_t tlb_flush_pending; +#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH + /* See flush_tlb_batched_pending() */ + bool tlb_flush_batched; +#endif + struct uprobes_state uprobes_state; +#ifdef CONFIG_HUGETLB_PAGE + atomic_long_t hugetlb_usage; +#endif + struct work_struct async_put_work; + +#if IS_ENABLED(CONFIG_HMM) + /* HMM needs to track a few things per mm */ + struct hmm *hmm; +#endif + } __randomize_layout; + + /* + * The mm_cpumask needs to be at the end of mm_struct, because it + * is dynamically sized based on nr_cpu_ids. + */ + unsigned long cpu_bitmap[]; +}; + +extern struct mm_struct init_mm; + +/* Pointer magic because the dynamic array size confuses some compilers. */ +static inline void mm_init_cpumask(struct mm_struct *mm) +{ + unsigned long cpu_bitmap = (unsigned long)mm; + + cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap); + cpumask_clear((struct cpumask *)cpu_bitmap); +} + +/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */ +static inline cpumask_t *mm_cpumask(struct mm_struct *mm) +{ + return (struct cpumask *)&mm->cpu_bitmap; +} + +struct mmu_gather; +extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, + unsigned long start, unsigned long end); +extern void tlb_finish_mmu(struct mmu_gather *tlb, + unsigned long start, unsigned long end); + +static inline void init_tlb_flush_pending(struct mm_struct *mm) +{ + atomic_set(&mm->tlb_flush_pending, 0); +} + +static inline void inc_tlb_flush_pending(struct mm_struct *mm) +{ + atomic_inc(&mm->tlb_flush_pending); + /* + * The only time this value is relevant is when there are indeed pages + * to flush. And we'll only flush pages after changing them, which + * requires the PTL. + * + * So the ordering here is: + * + * atomic_inc(&mm->tlb_flush_pending); + * spin_lock(&ptl); + * ... + * set_pte_at(); + * spin_unlock(&ptl); + * + * spin_lock(&ptl) + * mm_tlb_flush_pending(); + * .... + * spin_unlock(&ptl); + * + * flush_tlb_range(); + * atomic_dec(&mm->tlb_flush_pending); + * + * Where the increment if constrained by the PTL unlock, it thus + * ensures that the increment is visible if the PTE modification is + * visible. After all, if there is no PTE modification, nobody cares + * about TLB flushes either. + * + * This very much relies on users (mm_tlb_flush_pending() and + * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and + * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc + * locks (PPC) the unlock of one doesn't order against the lock of + * another PTL. + * + * The decrement is ordered by the flush_tlb_range(), such that + * mm_tlb_flush_pending() will not return false unless all flushes have + * completed. + */ +} + +static inline void dec_tlb_flush_pending(struct mm_struct *mm) +{ + /* + * See inc_tlb_flush_pending(). + * + * This cannot be smp_mb__before_atomic() because smp_mb() simply does + * not order against TLB invalidate completion, which is what we need. + * + * Therefore we must rely on tlb_flush_*() to guarantee order. + */ + atomic_dec(&mm->tlb_flush_pending); +} + +static inline bool mm_tlb_flush_pending(struct mm_struct *mm) +{ + /* + * Must be called after having acquired the PTL; orders against that + * PTLs release and therefore ensures that if we observe the modified + * PTE we must also observe the increment from inc_tlb_flush_pending(). + * + * That is, it only guarantees to return true if there is a flush + * pending for _this_ PTL. + */ + return atomic_read(&mm->tlb_flush_pending); +} + +static inline bool mm_tlb_flush_nested(struct mm_struct *mm) +{ + /* + * Similar to mm_tlb_flush_pending(), we must have acquired the PTL + * for which there is a TLB flush pending in order to guarantee + * we've seen both that PTE modification and the increment. + * + * (no requirement on actually still holding the PTL, that is irrelevant) + */ + return atomic_read(&mm->tlb_flush_pending) > 1; +} + +struct vm_fault; + +struct vm_special_mapping { + const char *name; /* The name, e.g. "[vdso]". */ + + /* + * If .fault is not provided, this points to a + * NULL-terminated array of pages that back the special mapping. + * + * This must not be NULL unless .fault is provided. + */ + struct page **pages; + + /* + * If non-NULL, then this is called to resolve page faults + * on the special mapping. If used, .pages is not checked. + */ + vm_fault_t (*fault)(const struct vm_special_mapping *sm, + struct vm_area_struct *vma, + struct vm_fault *vmf); + + int (*mremap)(const struct vm_special_mapping *sm, + struct vm_area_struct *new_vma); +}; + +enum tlb_flush_reason { + TLB_FLUSH_ON_TASK_SWITCH, + TLB_REMOTE_SHOOTDOWN, + TLB_LOCAL_SHOOTDOWN, + TLB_LOCAL_MM_SHOOTDOWN, + TLB_REMOTE_SEND_IPI, + NR_TLB_FLUSH_REASONS, +}; + + /* + * A swap entry has to fit into a "unsigned long", as the entry is hidden + * in the "index" field of the swapper address space. + */ +typedef struct { + unsigned long val; +} swp_entry_t; + +#endif /* _LINUX_MM_TYPES_H */ diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h new file mode 100644 index 000000000..d7016dcb2 --- /dev/null +++ b/include/linux/mm_types_task.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MM_TYPES_TASK_H +#define _LINUX_MM_TYPES_TASK_H + +/* + * Here are the definitions of the MM data types that are embedded in 'struct task_struct'. + * + * (These are defined separately to decouple sched.h from mm_types.h as much as possible.) + */ + +#include +#include +#include +#include + +#include + +#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH +#include +#endif + +#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) +#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ + IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) +#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8) + +/* + * The per task VMA cache array: + */ +#define VMACACHE_BITS 2 +#define VMACACHE_SIZE (1U << VMACACHE_BITS) +#define VMACACHE_MASK (VMACACHE_SIZE - 1) + +struct vmacache { + u64 seqnum; + struct vm_area_struct *vmas[VMACACHE_SIZE]; +}; + +enum { + MM_FILEPAGES, /* Resident file mapping pages */ + MM_ANONPAGES, /* Resident anonymous pages */ + MM_SWAPENTS, /* Anonymous swap entries */ + MM_SHMEMPAGES, /* Resident shared memory pages */ + NR_MM_COUNTERS +}; + +#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU) +#define SPLIT_RSS_COUNTING +/* per-thread cached information, */ +struct task_rss_stat { + int events; /* for synchronization threshold */ + int count[NR_MM_COUNTERS]; +}; +#endif /* USE_SPLIT_PTE_PTLOCKS */ + +struct mm_rss_stat { + atomic_long_t count[NR_MM_COUNTERS]; +}; + +struct page_frag { + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 offset; + __u32 size; +#else + __u16 offset; + __u16 size; +#endif +}; + +/* Track pages that require TLB flushes */ +struct tlbflush_unmap_batch { +#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH + /* + * The arch code makes the following promise: generic code can modify a + * PTE, then call arch_tlbbatch_add_mm() (which internally provides all + * needed barriers), then call arch_tlbbatch_flush(), and the entries + * will be flushed on all CPUs by the time that arch_tlbbatch_flush() + * returns. + */ + struct arch_tlbflush_unmap_batch arch; + + /* True if a flush is needed. */ + bool flush_required; + + /* + * If true then the PTE was dirty when unmapped. The entry must be + * flushed before IO is initiated or a stale TLB entry potentially + * allows an update without redirtying the page. + */ + bool writable; +#endif +}; + +#endif /* _LINUX_MM_TYPES_TASK_H */ diff --git a/include/linux/mman.h b/include/linux/mman.h new file mode 100644 index 000000000..4b08e9c9c --- /dev/null +++ b/include/linux/mman.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MMAN_H +#define _LINUX_MMAN_H + +#include +#include + +#include +#include + +/* + * Arrange for legacy / undefined architecture specific flags to be + * ignored by mmap handling code. + */ +#ifndef MAP_32BIT +#define MAP_32BIT 0 +#endif +#ifndef MAP_HUGE_2MB +#define MAP_HUGE_2MB 0 +#endif +#ifndef MAP_HUGE_1GB +#define MAP_HUGE_1GB 0 +#endif +#ifndef MAP_UNINITIALIZED +#define MAP_UNINITIALIZED 0 +#endif +#ifndef MAP_SYNC +#define MAP_SYNC 0 +#endif + +/* + * The historical set of flags that all mmap implementations implicitly + * support when a ->mmap_validate() op is not provided in file_operations. + */ +#define LEGACY_MAP_MASK (MAP_SHARED \ + | MAP_PRIVATE \ + | MAP_FIXED \ + | MAP_ANONYMOUS \ + | MAP_DENYWRITE \ + | MAP_EXECUTABLE \ + | MAP_UNINITIALIZED \ + | MAP_GROWSDOWN \ + | MAP_LOCKED \ + | MAP_NORESERVE \ + | MAP_POPULATE \ + | MAP_NONBLOCK \ + | MAP_STACK \ + | MAP_HUGETLB \ + | MAP_32BIT \ + | MAP_HUGE_2MB \ + | MAP_HUGE_1GB) + +extern int sysctl_overcommit_memory; +extern int sysctl_overcommit_ratio; +extern unsigned long sysctl_overcommit_kbytes; +extern struct percpu_counter vm_committed_as; + +#ifdef CONFIG_SMP +extern s32 vm_committed_as_batch; +#else +#define vm_committed_as_batch 0 +#endif + +unsigned long vm_memory_committed(void); + +static inline void vm_acct_memory(long pages) +{ + percpu_counter_add_batch(&vm_committed_as, pages, vm_committed_as_batch); +} + +static inline void vm_unacct_memory(long pages) +{ + vm_acct_memory(-pages); +} + +/* + * Allow architectures to handle additional protection bits + */ + +#ifndef arch_calc_vm_prot_bits +#define arch_calc_vm_prot_bits(prot, pkey) 0 +#endif + +#ifndef arch_vm_get_page_prot +#define arch_vm_get_page_prot(vm_flags) __pgprot(0) +#endif + +#ifndef arch_validate_prot +/* + * This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have + * already been masked out. + * + * Returns true if the prot flags are valid + */ +static inline bool arch_validate_prot(unsigned long prot, unsigned long addr) +{ + return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0; +} +#define arch_validate_prot arch_validate_prot +#endif + +/* + * Optimisation macro. It is equivalent to: + * (x & bit1) ? bit2 : 0 + * but this version is faster. + * ("bit1" and "bit2" must be single bits) + */ +#define _calc_vm_trans(x, bit1, bit2) \ + ((!(bit1) || !(bit2)) ? 0 : \ + ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \ + : ((x) & (bit1)) / ((bit1) / (bit2)))) + +/* + * Combine the mmap "prot" argument into "vm_flags" used internally. + */ +static inline unsigned long +calc_vm_prot_bits(unsigned long prot, unsigned long pkey) +{ + return _calc_vm_trans(prot, PROT_READ, VM_READ ) | + _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) | + _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) | + arch_calc_vm_prot_bits(prot, pkey); +} + +/* + * Combine the mmap "flags" argument into "vm_flags" used internally. + */ +static inline unsigned long +calc_vm_flag_bits(unsigned long flags) +{ + return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | + _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) | + _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) | + _calc_vm_trans(flags, MAP_SYNC, VM_SYNC ); +} + +unsigned long vm_commit_limit(void); +#endif /* _LINUX_MMAN_H */ diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h new file mode 100644 index 000000000..3f8e84a80 --- /dev/null +++ b/include/linux/mmc/card.h @@ -0,0 +1,325 @@ +/* + * linux/include/linux/mmc/card.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Card driver specific definitions. + */ +#ifndef LINUX_MMC_CARD_H +#define LINUX_MMC_CARD_H + +#include +#include + +struct mmc_cid { + unsigned int manfid; + char prod_name[8]; + unsigned char prv; + unsigned int serial; + unsigned short oemid; + unsigned short year; + unsigned char hwrev; + unsigned char fwrev; + unsigned char month; +}; + +struct mmc_csd { + unsigned char structure; + unsigned char mmca_vsn; + unsigned short cmdclass; + unsigned short taac_clks; + unsigned int taac_ns; + unsigned int c_size; + unsigned int r2w_factor; + unsigned int max_dtr; + unsigned int erase_size; /* In sectors */ + unsigned int read_blkbits; + unsigned int write_blkbits; + unsigned int capacity; + unsigned int read_partial:1, + read_misalign:1, + write_partial:1, + write_misalign:1, + dsr_imp:1; +}; + +struct mmc_ext_csd { + u8 rev; + u8 erase_group_def; + u8 sec_feature_support; + u8 rel_sectors; + u8 rel_param; + u8 part_config; + u8 cache_ctrl; + u8 rst_n_function; + u8 max_packed_writes; + u8 max_packed_reads; + u8 packed_event_en; + unsigned int part_time; /* Units: ms */ + unsigned int sa_timeout; /* Units: 100ns */ + unsigned int generic_cmd6_time; /* Units: 10ms */ + unsigned int power_off_longtime; /* Units: ms */ + u8 power_off_notification; /* state */ + unsigned int hs_max_dtr; + unsigned int hs200_max_dtr; +#define MMC_HIGH_26_MAX_DTR 26000000 +#define MMC_HIGH_52_MAX_DTR 52000000 +#define MMC_HIGH_DDR_MAX_DTR 52000000 +#define MMC_HS200_MAX_DTR 200000000 + unsigned int sectors; + unsigned int hc_erase_size; /* In sectors */ + unsigned int hc_erase_timeout; /* In milliseconds */ + unsigned int sec_trim_mult; /* Secure trim multiplier */ + unsigned int sec_erase_mult; /* Secure erase multiplier */ + unsigned int trim_timeout; /* In milliseconds */ + bool partition_setting_completed; /* enable bit */ + unsigned long long enhanced_area_offset; /* Units: Byte */ + unsigned int enhanced_area_size; /* Units: KB */ + unsigned int cache_size; /* Units: KB */ + bool hpi_en; /* HPI enablebit */ + bool hpi; /* HPI support bit */ + unsigned int hpi_cmd; /* cmd used as HPI */ + bool bkops; /* background support bit */ + bool man_bkops_en; /* manual bkops enable bit */ + bool auto_bkops_en; /* auto bkops enable bit */ + unsigned int data_sector_size; /* 512 bytes or 4KB */ + unsigned int data_tag_unit_size; /* DATA TAG UNIT size */ + unsigned int boot_ro_lock; /* ro lock support */ + bool boot_ro_lockable; + bool ffu_capable; /* Firmware upgrade support */ + bool cmdq_en; /* Command Queue enabled */ + bool cmdq_support; /* Command Queue supported */ + unsigned int cmdq_depth; /* Command Queue depth */ +#define MMC_FIRMWARE_LEN 8 + u8 fwrev[MMC_FIRMWARE_LEN]; /* FW version */ + u8 raw_exception_status; /* 54 */ + u8 raw_partition_support; /* 160 */ + u8 raw_rpmb_size_mult; /* 168 */ + u8 raw_erased_mem_count; /* 181 */ + u8 strobe_support; /* 184 */ + u8 raw_ext_csd_structure; /* 194 */ + u8 raw_card_type; /* 196 */ + u8 raw_driver_strength; /* 197 */ + u8 out_of_int_time; /* 198 */ + u8 raw_pwr_cl_52_195; /* 200 */ + u8 raw_pwr_cl_26_195; /* 201 */ + u8 raw_pwr_cl_52_360; /* 202 */ + u8 raw_pwr_cl_26_360; /* 203 */ + u8 raw_s_a_timeout; /* 217 */ + u8 raw_hc_erase_gap_size; /* 221 */ + u8 raw_erase_timeout_mult; /* 223 */ + u8 raw_hc_erase_grp_size; /* 224 */ + u8 raw_sec_trim_mult; /* 229 */ + u8 raw_sec_erase_mult; /* 230 */ + u8 raw_sec_feature_support;/* 231 */ + u8 raw_trim_mult; /* 232 */ + u8 raw_pwr_cl_200_195; /* 236 */ + u8 raw_pwr_cl_200_360; /* 237 */ + u8 raw_pwr_cl_ddr_52_195; /* 238 */ + u8 raw_pwr_cl_ddr_52_360; /* 239 */ + u8 raw_pwr_cl_ddr_200_360; /* 253 */ + u8 raw_bkops_status; /* 246 */ + u8 raw_sectors[4]; /* 212 - 4 bytes */ + u8 pre_eol_info; /* 267 */ + u8 device_life_time_est_typ_a; /* 268 */ + u8 device_life_time_est_typ_b; /* 269 */ + + unsigned int feature_support; +#define MMC_DISCARD_FEATURE BIT(0) /* CMD38 feature */ +}; + +struct sd_scr { + unsigned char sda_vsn; + unsigned char sda_spec3; + unsigned char bus_widths; +#define SD_SCR_BUS_WIDTH_1 (1<<0) +#define SD_SCR_BUS_WIDTH_4 (1<<2) + unsigned char cmds; +#define SD_SCR_CMD20_SUPPORT (1<<0) +#define SD_SCR_CMD23_SUPPORT (1<<1) +}; + +struct sd_ssr { + unsigned int au; /* In sectors */ + unsigned int erase_timeout; /* In milliseconds */ + unsigned int erase_offset; /* In milliseconds */ +}; + +struct sd_switch_caps { + unsigned int hs_max_dtr; + unsigned int uhs_max_dtr; +#define HIGH_SPEED_MAX_DTR 50000000 +#define UHS_SDR104_MAX_DTR 208000000 +#define UHS_SDR50_MAX_DTR 100000000 +#define UHS_DDR50_MAX_DTR 50000000 +#define UHS_SDR25_MAX_DTR UHS_DDR50_MAX_DTR +#define UHS_SDR12_MAX_DTR 25000000 +#define DEFAULT_SPEED_MAX_DTR UHS_SDR12_MAX_DTR + unsigned int sd3_bus_mode; +#define UHS_SDR12_BUS_SPEED 0 +#define HIGH_SPEED_BUS_SPEED 1 +#define UHS_SDR25_BUS_SPEED 1 +#define UHS_SDR50_BUS_SPEED 2 +#define UHS_SDR104_BUS_SPEED 3 +#define UHS_DDR50_BUS_SPEED 4 + +#define SD_MODE_HIGH_SPEED (1 << HIGH_SPEED_BUS_SPEED) +#define SD_MODE_UHS_SDR12 (1 << UHS_SDR12_BUS_SPEED) +#define SD_MODE_UHS_SDR25 (1 << UHS_SDR25_BUS_SPEED) +#define SD_MODE_UHS_SDR50 (1 << UHS_SDR50_BUS_SPEED) +#define SD_MODE_UHS_SDR104 (1 << UHS_SDR104_BUS_SPEED) +#define SD_MODE_UHS_DDR50 (1 << UHS_DDR50_BUS_SPEED) + unsigned int sd3_drv_type; +#define SD_DRIVER_TYPE_B 0x01 +#define SD_DRIVER_TYPE_A 0x02 +#define SD_DRIVER_TYPE_C 0x04 +#define SD_DRIVER_TYPE_D 0x08 + unsigned int sd3_curr_limit; +#define SD_SET_CURRENT_LIMIT_200 0 +#define SD_SET_CURRENT_LIMIT_400 1 +#define SD_SET_CURRENT_LIMIT_600 2 +#define SD_SET_CURRENT_LIMIT_800 3 +#define SD_SET_CURRENT_NO_CHANGE (-1) + +#define SD_MAX_CURRENT_200 (1 << SD_SET_CURRENT_LIMIT_200) +#define SD_MAX_CURRENT_400 (1 << SD_SET_CURRENT_LIMIT_400) +#define SD_MAX_CURRENT_600 (1 << SD_SET_CURRENT_LIMIT_600) +#define SD_MAX_CURRENT_800 (1 << SD_SET_CURRENT_LIMIT_800) +}; + +struct sdio_cccr { + unsigned int sdio_vsn; + unsigned int sd_vsn; + unsigned int multi_block:1, + low_speed:1, + wide_bus:1, + high_power:1, + high_speed:1, + disable_cd:1; +}; + +struct sdio_cis { + unsigned short vendor; + unsigned short device; + unsigned short blksize; + unsigned int max_dtr; +}; + +struct mmc_host; +struct sdio_func; +struct sdio_func_tuple; +struct mmc_queue_req; + +#define SDIO_MAX_FUNCS 7 + +/* The number of MMC physical partitions. These consist of: + * boot partitions (2), general purpose partitions (4) and + * RPMB partition (1) in MMC v4.4. + */ +#define MMC_NUM_BOOT_PARTITION 2 +#define MMC_NUM_GP_PARTITION 4 +#define MMC_NUM_PHY_PARTITION 7 +#define MAX_MMC_PART_NAME_LEN 20 + +/* + * MMC Physical partitions + */ +struct mmc_part { + u64 size; /* partition size (in bytes) */ + unsigned int part_cfg; /* partition type */ + char name[MAX_MMC_PART_NAME_LEN]; + bool force_ro; /* to make boot parts RO by default */ + unsigned int area_type; +#define MMC_BLK_DATA_AREA_MAIN (1<<0) +#define MMC_BLK_DATA_AREA_BOOT (1<<1) +#define MMC_BLK_DATA_AREA_GP (1<<2) +#define MMC_BLK_DATA_AREA_RPMB (1<<3) +}; + +/* + * MMC device + */ +struct mmc_card { + struct mmc_host *host; /* the host this device belongs to */ + struct device dev; /* the device */ + u32 ocr; /* the current OCR setting */ + unsigned int rca; /* relative card address of device */ + unsigned int type; /* card type */ +#define MMC_TYPE_MMC 0 /* MMC card */ +#define MMC_TYPE_SD 1 /* SD card */ +#define MMC_TYPE_SDIO 2 /* SDIO card */ +#define MMC_TYPE_SD_COMBO 3 /* SD combo (IO+mem) card */ + unsigned int state; /* (our) card state */ + unsigned int quirks; /* card quirks */ + unsigned int quirk_max_rate; /* max rate set by quirks */ +#define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */ +#define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */ + /* for byte mode */ +#define MMC_QUIRK_NONSTD_SDIO (1<<2) /* non-standard SDIO card attached */ + /* (missing CIA registers) */ +#define MMC_QUIRK_NONSTD_FUNC_IF (1<<4) /* SDIO card has nonstd function interfaces */ +#define MMC_QUIRK_DISABLE_CD (1<<5) /* disconnect CD/DAT[3] resistor */ +#define MMC_QUIRK_INAND_CMD38 (1<<6) /* iNAND devices have broken CMD38 */ +#define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */ +#define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */ + /* byte mode */ +#define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */ +#define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */ +#define MMC_QUIRK_BROKEN_IRQ_POLLING (1<<11) /* Polling SDIO_CCCR_INTx could create a fake interrupt */ +#define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */ +#define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */ + + bool reenable_cmdq; /* Re-enable Command Queue */ + + unsigned int erase_size; /* erase size in sectors */ + unsigned int erase_shift; /* if erase unit is power 2 */ + unsigned int pref_erase; /* in sectors */ + unsigned int eg_boundary; /* don't cross erase-group boundaries */ + u8 erased_byte; /* value of erased bytes */ + + u32 raw_cid[4]; /* raw card CID */ + u32 raw_csd[4]; /* raw card CSD */ + u32 raw_scr[2]; /* raw card SCR */ + u32 raw_ssr[16]; /* raw card SSR */ + struct mmc_cid cid; /* card identification */ + struct mmc_csd csd; /* card specific */ + struct mmc_ext_csd ext_csd; /* mmc v4 extended card specific */ + struct sd_scr scr; /* extra SD information */ + struct sd_ssr ssr; /* yet more SD information */ + struct sd_switch_caps sw_caps; /* switch (CMD6) caps */ + + unsigned int sdio_funcs; /* number of SDIO functions */ + struct sdio_cccr cccr; /* common card info */ + struct sdio_cis cis; /* common tuple info */ + struct sdio_func *sdio_func[SDIO_MAX_FUNCS]; /* SDIO functions (devices) */ + struct sdio_func *sdio_single_irq; /* SDIO function when only one IRQ active */ + unsigned num_info; /* number of info strings */ + const char **info; /* info strings */ + struct sdio_func_tuple *tuples; /* unknown common tuples */ + + unsigned int sd_bus_speed; /* Bus Speed Mode set for the card */ + unsigned int mmc_avail_type; /* supported device type by both host and card */ + unsigned int drive_strength; /* for UHS-I, HS200 or HS400 */ + + struct dentry *debugfs_root; + struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */ + unsigned int nr_parts; + + unsigned int bouncesz; /* Bounce buffer size */ + struct workqueue_struct *complete_wq; /* Private workqueue */ +}; + +static inline bool mmc_large_sector(struct mmc_card *card) +{ + return card->ext_csd.data_sector_size == 4096; +} + +bool mmc_card_is_blockaddr(struct mmc_card *card); + +#define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC) +#define mmc_card_sd(c) ((c)->type == MMC_TYPE_SD) +#define mmc_card_sdio(c) ((c)->type == MMC_TYPE_SDIO) + +#endif /* LINUX_MMC_CARD_H */ diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h new file mode 100644 index 000000000..134a64833 --- /dev/null +++ b/include/linux/mmc/core.h @@ -0,0 +1,183 @@ +/* + * linux/include/linux/mmc/core.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef LINUX_MMC_CORE_H +#define LINUX_MMC_CORE_H + +#include +#include + +struct mmc_data; +struct mmc_request; + +enum mmc_blk_status { + MMC_BLK_SUCCESS = 0, + MMC_BLK_PARTIAL, + MMC_BLK_CMD_ERR, + MMC_BLK_RETRY, + MMC_BLK_ABORT, + MMC_BLK_DATA_ERR, + MMC_BLK_ECC_ERR, + MMC_BLK_NOMEDIUM, + MMC_BLK_NEW_REQUEST, +}; + +struct mmc_command { + u32 opcode; + u32 arg; +#define MMC_CMD23_ARG_REL_WR (1 << 31) +#define MMC_CMD23_ARG_PACKED ((0 << 31) | (1 << 30)) +#define MMC_CMD23_ARG_TAG_REQ (1 << 29) + u32 resp[4]; + unsigned int flags; /* expected response type */ +#define MMC_RSP_PRESENT (1 << 0) +#define MMC_RSP_136 (1 << 1) /* 136 bit response */ +#define MMC_RSP_CRC (1 << 2) /* expect valid crc */ +#define MMC_RSP_BUSY (1 << 3) /* card may send busy */ +#define MMC_RSP_OPCODE (1 << 4) /* response contains opcode */ + +#define MMC_CMD_MASK (3 << 5) /* non-SPI command type */ +#define MMC_CMD_AC (0 << 5) +#define MMC_CMD_ADTC (1 << 5) +#define MMC_CMD_BC (2 << 5) +#define MMC_CMD_BCR (3 << 5) + +#define MMC_RSP_SPI_S1 (1 << 7) /* one status byte */ +#define MMC_RSP_SPI_S2 (1 << 8) /* second byte */ +#define MMC_RSP_SPI_B4 (1 << 9) /* four data bytes */ +#define MMC_RSP_SPI_BUSY (1 << 10) /* card may send busy */ + +/* + * These are the native response types, and correspond to valid bit + * patterns of the above flags. One additional valid pattern + * is all zeros, which means we don't expect a response. + */ +#define MMC_RSP_NONE (0) +#define MMC_RSP_R1 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE) +#define MMC_RSP_R1B (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE|MMC_RSP_BUSY) +#define MMC_RSP_R2 (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC) +#define MMC_RSP_R3 (MMC_RSP_PRESENT) +#define MMC_RSP_R4 (MMC_RSP_PRESENT) +#define MMC_RSP_R5 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE) +#define MMC_RSP_R6 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE) +#define MMC_RSP_R7 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE) + +/* Can be used by core to poll after switch to MMC HS mode */ +#define MMC_RSP_R1_NO_CRC (MMC_RSP_PRESENT|MMC_RSP_OPCODE) + +#define mmc_resp_type(cmd) ((cmd)->flags & (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC|MMC_RSP_BUSY|MMC_RSP_OPCODE)) + +/* + * These are the SPI response types for MMC, SD, and SDIO cards. + * Commands return R1, with maybe more info. Zero is an error type; + * callers must always provide the appropriate MMC_RSP_SPI_Rx flags. + */ +#define MMC_RSP_SPI_R1 (MMC_RSP_SPI_S1) +#define MMC_RSP_SPI_R1B (MMC_RSP_SPI_S1|MMC_RSP_SPI_BUSY) +#define MMC_RSP_SPI_R2 (MMC_RSP_SPI_S1|MMC_RSP_SPI_S2) +#define MMC_RSP_SPI_R3 (MMC_RSP_SPI_S1|MMC_RSP_SPI_B4) +#define MMC_RSP_SPI_R4 (MMC_RSP_SPI_S1|MMC_RSP_SPI_B4) +#define MMC_RSP_SPI_R5 (MMC_RSP_SPI_S1|MMC_RSP_SPI_S2) +#define MMC_RSP_SPI_R7 (MMC_RSP_SPI_S1|MMC_RSP_SPI_B4) + +#define mmc_spi_resp_type(cmd) ((cmd)->flags & \ + (MMC_RSP_SPI_S1|MMC_RSP_SPI_BUSY|MMC_RSP_SPI_S2|MMC_RSP_SPI_B4)) + +/* + * These are the command types. + */ +#define mmc_cmd_type(cmd) ((cmd)->flags & MMC_CMD_MASK) + + unsigned int retries; /* max number of retries */ + int error; /* command error */ + +/* + * Standard errno values are used for errors, but some have specific + * meaning in the MMC layer: + * + * ETIMEDOUT Card took too long to respond + * EILSEQ Basic format problem with the received or sent data + * (e.g. CRC check failed, incorrect opcode in response + * or bad end bit) + * EINVAL Request cannot be performed because of restrictions + * in hardware and/or the driver + * ENOMEDIUM Host can determine that the slot is empty and is + * actively failing requests + */ + + unsigned int busy_timeout; /* busy detect timeout in ms */ + /* Set this flag only for blocking sanitize request */ + bool sanitize_busy; + + struct mmc_data *data; /* data segment associated with cmd */ + struct mmc_request *mrq; /* associated request */ +}; + +struct mmc_data { + unsigned int timeout_ns; /* data timeout (in ns, max 80ms) */ + unsigned int timeout_clks; /* data timeout (in clocks) */ + unsigned int blksz; /* data block size */ + unsigned int blocks; /* number of blocks */ + unsigned int blk_addr; /* block address */ + int error; /* data error */ + unsigned int flags; + +#define MMC_DATA_WRITE BIT(8) +#define MMC_DATA_READ BIT(9) +/* Extra flags used by CQE */ +#define MMC_DATA_QBR BIT(10) /* CQE queue barrier*/ +#define MMC_DATA_PRIO BIT(11) /* CQE high priority */ +#define MMC_DATA_REL_WR BIT(12) /* Reliable write */ +#define MMC_DATA_DAT_TAG BIT(13) /* Tag request */ +#define MMC_DATA_FORCED_PRG BIT(14) /* Forced programming */ + + unsigned int bytes_xfered; + + struct mmc_command *stop; /* stop command */ + struct mmc_request *mrq; /* associated request */ + + unsigned int sg_len; /* size of scatter list */ + int sg_count; /* mapped sg entries */ + struct scatterlist *sg; /* I/O scatter list */ + s32 host_cookie; /* host private data */ +}; + +struct mmc_host; +struct mmc_request { + struct mmc_command *sbc; /* SET_BLOCK_COUNT for multiblock */ + struct mmc_command *cmd; + struct mmc_data *data; + struct mmc_command *stop; + + struct completion completion; + struct completion cmd_completion; + void (*done)(struct mmc_request *);/* completion function */ + /* + * Notify uppers layers (e.g. mmc block driver) that recovery is needed + * due to an error associated with the mmc_request. Currently used only + * by CQE. + */ + void (*recovery_notifier)(struct mmc_request *); + struct mmc_host *host; + + /* Allow other commands during this ongoing data transfer or busy wait */ + bool cap_cmd_during_tfr; + + int tag; +}; + +struct mmc_card; + +void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq); +int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, + int retries); + +int mmc_hw_reset(struct mmc_host *host); +int mmc_sw_reset(struct mmc_host *host); +void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card); + +#endif /* LINUX_MMC_CORE_H */ diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h new file mode 100644 index 000000000..7e8e5b20e --- /dev/null +++ b/include/linux/mmc/host.h @@ -0,0 +1,591 @@ +/* + * linux/include/linux/mmc/host.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Host driver specific definitions. + */ +#ifndef LINUX_MMC_HOST_H +#define LINUX_MMC_HOST_H + +#include +#include +#include + +#include +#include +#include +#include + +struct mmc_ios { + unsigned int clock; /* clock rate */ + unsigned short vdd; + unsigned int power_delay_ms; /* waiting for stable power */ + +/* vdd stores the bit number of the selected voltage range from below. */ + + unsigned char bus_mode; /* command output mode */ + +#define MMC_BUSMODE_OPENDRAIN 1 +#define MMC_BUSMODE_PUSHPULL 2 + + unsigned char chip_select; /* SPI chip select */ + +#define MMC_CS_DONTCARE 0 +#define MMC_CS_HIGH 1 +#define MMC_CS_LOW 2 + + unsigned char power_mode; /* power supply mode */ + +#define MMC_POWER_OFF 0 +#define MMC_POWER_UP 1 +#define MMC_POWER_ON 2 +#define MMC_POWER_UNDEFINED 3 + + unsigned char bus_width; /* data bus width */ + +#define MMC_BUS_WIDTH_1 0 +#define MMC_BUS_WIDTH_4 2 +#define MMC_BUS_WIDTH_8 3 + + unsigned char timing; /* timing specification used */ + +#define MMC_TIMING_LEGACY 0 +#define MMC_TIMING_MMC_HS 1 +#define MMC_TIMING_SD_HS 2 +#define MMC_TIMING_UHS_SDR12 3 +#define MMC_TIMING_UHS_SDR25 4 +#define MMC_TIMING_UHS_SDR50 5 +#define MMC_TIMING_UHS_SDR104 6 +#define MMC_TIMING_UHS_DDR50 7 +#define MMC_TIMING_MMC_DDR52 8 +#define MMC_TIMING_MMC_HS200 9 +#define MMC_TIMING_MMC_HS400 10 + + unsigned char signal_voltage; /* signalling voltage (1.8V or 3.3V) */ + +#define MMC_SIGNAL_VOLTAGE_330 0 +#define MMC_SIGNAL_VOLTAGE_180 1 +#define MMC_SIGNAL_VOLTAGE_120 2 + + unsigned char drv_type; /* driver type (A, B, C, D) */ + +#define MMC_SET_DRIVER_TYPE_B 0 +#define MMC_SET_DRIVER_TYPE_A 1 +#define MMC_SET_DRIVER_TYPE_C 2 +#define MMC_SET_DRIVER_TYPE_D 3 + + bool enhanced_strobe; /* hs400es selection */ +}; + +struct mmc_host; + +struct mmc_host_ops { + /* + * It is optional for the host to implement pre_req and post_req in + * order to support double buffering of requests (prepare one + * request while another request is active). + * pre_req() must always be followed by a post_req(). + * To undo a call made to pre_req(), call post_req() with + * a nonzero err condition. + */ + void (*post_req)(struct mmc_host *host, struct mmc_request *req, + int err); + void (*pre_req)(struct mmc_host *host, struct mmc_request *req); + void (*request)(struct mmc_host *host, struct mmc_request *req); + + /* + * Avoid calling the next three functions too often or in a "fast + * path", since underlaying controller might implement them in an + * expensive and/or slow way. Also note that these functions might + * sleep, so don't call them in the atomic contexts! + */ + + /* + * Notes to the set_ios callback: + * ios->clock might be 0. For some controllers, setting 0Hz + * as any other frequency works. However, some controllers + * explicitly need to disable the clock. Otherwise e.g. voltage + * switching might fail because the SDCLK is not really quiet. + */ + void (*set_ios)(struct mmc_host *host, struct mmc_ios *ios); + + /* + * Return values for the get_ro callback should be: + * 0 for a read/write card + * 1 for a read-only card + * -ENOSYS when not supported (equal to NULL callback) + * or a negative errno value when something bad happened + */ + int (*get_ro)(struct mmc_host *host); + + /* + * Return values for the get_cd callback should be: + * 0 for a absent card + * 1 for a present card + * -ENOSYS when not supported (equal to NULL callback) + * or a negative errno value when something bad happened + */ + int (*get_cd)(struct mmc_host *host); + + void (*enable_sdio_irq)(struct mmc_host *host, int enable); + void (*ack_sdio_irq)(struct mmc_host *host); + + /* optional callback for HC quirks */ + void (*init_card)(struct mmc_host *host, struct mmc_card *card); + + int (*start_signal_voltage_switch)(struct mmc_host *host, struct mmc_ios *ios); + + /* Check if the card is pulling dat[0:3] low */ + int (*card_busy)(struct mmc_host *host); + + /* The tuning command opcode value is different for SD and eMMC cards */ + int (*execute_tuning)(struct mmc_host *host, u32 opcode); + + /* Prepare HS400 target operating frequency depending host driver */ + int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios); + + /* Prepare for switching from HS400 to HS200 */ + void (*hs400_downgrade)(struct mmc_host *host); + + /* Complete selection of HS400 */ + void (*hs400_complete)(struct mmc_host *host); + + /* Prepare enhanced strobe depending host driver */ + void (*hs400_enhanced_strobe)(struct mmc_host *host, + struct mmc_ios *ios); + int (*select_drive_strength)(struct mmc_card *card, + unsigned int max_dtr, int host_drv, + int card_drv, int *drv_type); + void (*hw_reset)(struct mmc_host *host); + void (*card_event)(struct mmc_host *host); + + /* + * Optional callback to support controllers with HW issues for multiple + * I/O. Returns the number of supported blocks for the request. + */ + int (*multi_io_quirk)(struct mmc_card *card, + unsigned int direction, int blk_size); +}; + +struct mmc_cqe_ops { + /* Allocate resources, and make the CQE operational */ + int (*cqe_enable)(struct mmc_host *host, struct mmc_card *card); + /* Free resources, and make the CQE non-operational */ + void (*cqe_disable)(struct mmc_host *host); + /* + * Issue a read, write or DCMD request to the CQE. Also deal with the + * effect of ->cqe_off(). + */ + int (*cqe_request)(struct mmc_host *host, struct mmc_request *mrq); + /* Free resources (e.g. DMA mapping) associated with the request */ + void (*cqe_post_req)(struct mmc_host *host, struct mmc_request *mrq); + /* + * Prepare the CQE and host controller to accept non-CQ commands. There + * is no corresponding ->cqe_on(), instead ->cqe_request() is required + * to deal with that. + */ + void (*cqe_off)(struct mmc_host *host); + /* + * Wait for all CQE tasks to complete. Return an error if recovery + * becomes necessary. + */ + int (*cqe_wait_for_idle)(struct mmc_host *host); + /* + * Notify CQE that a request has timed out. Return false if the request + * completed or true if a timeout happened in which case indicate if + * recovery is needed. + */ + bool (*cqe_timeout)(struct mmc_host *host, struct mmc_request *mrq, + bool *recovery_needed); + /* + * Stop all CQE activity and prepare the CQE and host controller to + * accept recovery commands. + */ + void (*cqe_recovery_start)(struct mmc_host *host); + /* + * Clear the queue and call mmc_cqe_request_done() on all requests. + * Requests that errored will have the error set on the mmc_request + * (data->error or cmd->error for DCMD). Requests that did not error + * will have zero data bytes transferred. + */ + void (*cqe_recovery_finish)(struct mmc_host *host); +}; + +struct mmc_async_req { + /* active mmc request */ + struct mmc_request *mrq; + /* + * Check error status of completed mmc request. + * Returns 0 if success otherwise non zero. + */ + enum mmc_blk_status (*err_check)(struct mmc_card *, struct mmc_async_req *); +}; + +/** + * struct mmc_slot - MMC slot functions + * + * @cd_irq: MMC/SD-card slot hotplug detection IRQ or -EINVAL + * @handler_priv: MMC/SD-card slot context + * + * Some MMC/SD host controllers implement slot-functions like card and + * write-protect detection natively. However, a large number of controllers + * leave these functions to the CPU. This struct provides a hook to attach + * such slot-function drivers. + */ +struct mmc_slot { + int cd_irq; + bool cd_wake_enabled; + void *handler_priv; +}; + +/** + * mmc_context_info - synchronization details for mmc context + * @is_done_rcv wake up reason was done request + * @is_new_req wake up reason was new request + * @is_waiting_last_req mmc context waiting for single running request + * @wait wait queue + */ +struct mmc_context_info { + bool is_done_rcv; + bool is_new_req; + bool is_waiting_last_req; + wait_queue_head_t wait; +}; + +struct regulator; +struct mmc_pwrseq; + +struct mmc_supply { + struct regulator *vmmc; /* Card power supply */ + struct regulator *vqmmc; /* Optional Vccq supply */ +}; + +struct mmc_ctx { + struct task_struct *task; +}; + +struct mmc_host { + struct device *parent; + struct device class_dev; + int index; + const struct mmc_host_ops *ops; + struct mmc_pwrseq *pwrseq; + unsigned int f_min; + unsigned int f_max; + unsigned int f_init; + u32 ocr_avail; + u32 ocr_avail_sdio; /* SDIO-specific OCR */ + u32 ocr_avail_sd; /* SD-specific OCR */ + u32 ocr_avail_mmc; /* MMC-specific OCR */ +#ifdef CONFIG_PM_SLEEP + struct notifier_block pm_notify; +#endif + u32 max_current_330; + u32 max_current_300; + u32 max_current_180; + +#define MMC_VDD_165_195 0x00000080 /* VDD voltage 1.65 - 1.95 */ +#define MMC_VDD_20_21 0x00000100 /* VDD voltage 2.0 ~ 2.1 */ +#define MMC_VDD_21_22 0x00000200 /* VDD voltage 2.1 ~ 2.2 */ +#define MMC_VDD_22_23 0x00000400 /* VDD voltage 2.2 ~ 2.3 */ +#define MMC_VDD_23_24 0x00000800 /* VDD voltage 2.3 ~ 2.4 */ +#define MMC_VDD_24_25 0x00001000 /* VDD voltage 2.4 ~ 2.5 */ +#define MMC_VDD_25_26 0x00002000 /* VDD voltage 2.5 ~ 2.6 */ +#define MMC_VDD_26_27 0x00004000 /* VDD voltage 2.6 ~ 2.7 */ +#define MMC_VDD_27_28 0x00008000 /* VDD voltage 2.7 ~ 2.8 */ +#define MMC_VDD_28_29 0x00010000 /* VDD voltage 2.8 ~ 2.9 */ +#define MMC_VDD_29_30 0x00020000 /* VDD voltage 2.9 ~ 3.0 */ +#define MMC_VDD_30_31 0x00040000 /* VDD voltage 3.0 ~ 3.1 */ +#define MMC_VDD_31_32 0x00080000 /* VDD voltage 3.1 ~ 3.2 */ +#define MMC_VDD_32_33 0x00100000 /* VDD voltage 3.2 ~ 3.3 */ +#define MMC_VDD_33_34 0x00200000 /* VDD voltage 3.3 ~ 3.4 */ +#define MMC_VDD_34_35 0x00400000 /* VDD voltage 3.4 ~ 3.5 */ +#define MMC_VDD_35_36 0x00800000 /* VDD voltage 3.5 ~ 3.6 */ + + u32 caps; /* Host capabilities */ + +#define MMC_CAP_4_BIT_DATA (1 << 0) /* Can the host do 4 bit transfers */ +#define MMC_CAP_MMC_HIGHSPEED (1 << 1) /* Can do MMC high-speed timing */ +#define MMC_CAP_SD_HIGHSPEED (1 << 2) /* Can do SD high-speed timing */ +#define MMC_CAP_SDIO_IRQ (1 << 3) /* Can signal pending SDIO IRQs */ +#define MMC_CAP_SPI (1 << 4) /* Talks only SPI protocols */ +#define MMC_CAP_NEEDS_POLL (1 << 5) /* Needs polling for card-detection */ +#define MMC_CAP_8_BIT_DATA (1 << 6) /* Can the host do 8 bit transfers */ +#define MMC_CAP_AGGRESSIVE_PM (1 << 7) /* Suspend (e)MMC/SD at idle */ +#define MMC_CAP_NONREMOVABLE (1 << 8) /* Nonremovable e.g. eMMC */ +#define MMC_CAP_WAIT_WHILE_BUSY (1 << 9) /* Waits while card is busy */ +#define MMC_CAP_ERASE (1 << 10) /* Allow erase/trim commands */ +#define MMC_CAP_3_3V_DDR (1 << 11) /* Host supports eMMC DDR 3.3V */ +#define MMC_CAP_1_8V_DDR (1 << 12) /* Host supports eMMC DDR 1.8V */ +#define MMC_CAP_1_2V_DDR (1 << 13) /* Host supports eMMC DDR 1.2V */ +#define MMC_CAP_POWER_OFF_CARD (1 << 14) /* Can power off after boot */ +#define MMC_CAP_BUS_WIDTH_TEST (1 << 15) /* CMD14/CMD19 bus width ok */ +#define MMC_CAP_UHS_SDR12 (1 << 16) /* Host supports UHS SDR12 mode */ +#define MMC_CAP_UHS_SDR25 (1 << 17) /* Host supports UHS SDR25 mode */ +#define MMC_CAP_UHS_SDR50 (1 << 18) /* Host supports UHS SDR50 mode */ +#define MMC_CAP_UHS_SDR104 (1 << 19) /* Host supports UHS SDR104 mode */ +#define MMC_CAP_UHS_DDR50 (1 << 20) /* Host supports UHS DDR50 mode */ +#define MMC_CAP_UHS (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | \ + MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | \ + MMC_CAP_UHS_DDR50) +/* (1 << 21) is free for reuse */ +#define MMC_CAP_NEED_RSP_BUSY (1 << 22) /* Commands with R1B can't use R1. */ +#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */ +#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */ +#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */ +#define MMC_CAP_DONE_COMPLETE (1 << 27) /* RW reqs can be completed within mmc_request_done() */ +#define MMC_CAP_CD_WAKE (1 << 28) /* Enable card detect wake */ +#define MMC_CAP_CMD_DURING_TFR (1 << 29) /* Commands during data transfer */ +#define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */ +#define MMC_CAP_HW_RESET (1 << 31) /* Hardware reset */ + + u32 caps2; /* More host capabilities */ + +#define MMC_CAP2_BOOTPART_NOACC (1 << 0) /* Boot partition no access */ +#define MMC_CAP2_FULL_PWR_CYCLE (1 << 2) /* Can do full power cycle */ +#define MMC_CAP2_HS200_1_8V_SDR (1 << 5) /* can support */ +#define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */ +#define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \ + MMC_CAP2_HS200_1_2V_SDR) +#define MMC_CAP2_CD_ACTIVE_HIGH (1 << 10) /* Card-detect signal active high */ +#define MMC_CAP2_RO_ACTIVE_HIGH (1 << 11) /* Write-protect signal active high */ +#define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */ +#define MMC_CAP2_HS400_1_8V (1 << 15) /* Can support HS400 1.8V */ +#define MMC_CAP2_HS400_1_2V (1 << 16) /* Can support HS400 1.2V */ +#define MMC_CAP2_HS400 (MMC_CAP2_HS400_1_8V | \ + MMC_CAP2_HS400_1_2V) +#define MMC_CAP2_HSX00_1_8V (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V) +#define MMC_CAP2_HSX00_1_2V (MMC_CAP2_HS200_1_2V_SDR | MMC_CAP2_HS400_1_2V) +#define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17) +#define MMC_CAP2_NO_WRITE_PROTECT (1 << 18) /* No physical write protect pin, assume that card is always read-write */ +#define MMC_CAP2_NO_SDIO (1 << 19) /* Do not send SDIO commands during initialization */ +#define MMC_CAP2_HS400_ES (1 << 20) /* Host supports enhanced strobe */ +#define MMC_CAP2_NO_SD (1 << 21) /* Do not send SD commands during initialization */ +#define MMC_CAP2_NO_MMC (1 << 22) /* Do not send (e)MMC commands during initialization */ +#define MMC_CAP2_CQE (1 << 23) /* Has eMMC command queue engine */ +#define MMC_CAP2_CQE_DCMD (1 << 24) /* CQE can issue a direct command */ +#define MMC_CAP2_AVOID_3_3V (1 << 25) /* Host must negotiate down from 3.3V */ + + int fixed_drv_type; /* fixed driver type for non-removable media */ + + mmc_pm_flag_t pm_caps; /* supported pm features */ + + /* host specific block data */ + unsigned int max_seg_size; /* see blk_queue_max_segment_size */ + unsigned short max_segs; /* see blk_queue_max_segments */ + unsigned short unused; + unsigned int max_req_size; /* maximum number of bytes in one req */ + unsigned int max_blk_size; /* maximum size of one mmc block */ + unsigned int max_blk_count; /* maximum number of blocks in one req */ + unsigned int max_busy_timeout; /* max busy timeout in ms */ + + /* private data */ + spinlock_t lock; /* lock for claim and bus ops */ + + struct mmc_ios ios; /* current io bus settings */ + + /* group bitfields together to minimize padding */ + unsigned int use_spi_crc:1; + unsigned int claimed:1; /* host exclusively claimed */ + unsigned int bus_dead:1; /* bus has been released */ + unsigned int can_retune:1; /* re-tuning can be used */ + unsigned int doing_retune:1; /* re-tuning in progress */ + unsigned int retune_now:1; /* do re-tuning at next req */ + unsigned int retune_paused:1; /* re-tuning is temporarily disabled */ + unsigned int use_blk_mq:1; /* use blk-mq */ + unsigned int retune_crc_disable:1; /* don't trigger retune upon crc */ + + int rescan_disable; /* disable card detection */ + int rescan_entered; /* used with nonremovable devices */ + + int need_retune; /* re-tuning is needed */ + int hold_retune; /* hold off re-tuning */ + unsigned int retune_period; /* re-tuning period in secs */ + struct timer_list retune_timer; /* for periodic re-tuning */ + + bool trigger_card_event; /* card_event necessary */ + + struct mmc_card *card; /* device attached to this host */ + + wait_queue_head_t wq; + struct mmc_ctx *claimer; /* context that has host claimed */ + int claim_cnt; /* "claim" nesting count */ + struct mmc_ctx default_ctx; /* default context */ + + struct delayed_work detect; + int detect_change; /* card detect flag */ + struct mmc_slot slot; + + const struct mmc_bus_ops *bus_ops; /* current bus driver */ + unsigned int bus_refs; /* reference counter */ + + unsigned int sdio_irqs; + struct task_struct *sdio_irq_thread; + struct delayed_work sdio_irq_work; + bool sdio_irq_pending; + atomic_t sdio_irq_thread_abort; + + mmc_pm_flag_t pm_flags; /* requested pm features */ + + struct led_trigger *led; /* activity led */ + +#ifdef CONFIG_REGULATOR + bool regulator_enabled; /* regulator state */ +#endif + struct mmc_supply supply; + + struct dentry *debugfs_root; + + /* Ongoing data transfer that allows commands during transfer */ + struct mmc_request *ongoing_mrq; + +#ifdef CONFIG_FAIL_MMC_REQUEST + struct fault_attr fail_mmc_request; +#endif + + unsigned int actual_clock; /* Actual HC clock rate */ + + unsigned int slotno; /* used for sdio acpi binding */ + + int dsr_req; /* DSR value is valid */ + u32 dsr; /* optional driver stage (DSR) value */ + + /* Command Queue Engine (CQE) support */ + const struct mmc_cqe_ops *cqe_ops; + void *cqe_private; + int cqe_qdepth; + bool cqe_enabled; + bool cqe_on; + + unsigned long private[0] ____cacheline_aligned; +}; + +struct device_node; + +struct mmc_host *mmc_alloc_host(int extra, struct device *); +int mmc_add_host(struct mmc_host *); +void mmc_remove_host(struct mmc_host *); +void mmc_free_host(struct mmc_host *); +int mmc_of_parse(struct mmc_host *host); +int mmc_of_parse_voltage(struct device_node *np, u32 *mask); + +static inline void *mmc_priv(struct mmc_host *host) +{ + return (void *)host->private; +} + +#define mmc_host_is_spi(host) ((host)->caps & MMC_CAP_SPI) + +#define mmc_dev(x) ((x)->parent) +#define mmc_classdev(x) (&(x)->class_dev) +#define mmc_hostname(x) (dev_name(&(x)->class_dev)) + +void mmc_detect_change(struct mmc_host *, unsigned long delay); +void mmc_request_done(struct mmc_host *, struct mmc_request *); +void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq); + +void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq); + +/* + * May be called from host driver's system/runtime suspend/resume callbacks, + * to know if SDIO IRQs has been claimed. + */ +static inline bool sdio_irq_claimed(struct mmc_host *host) +{ + return host->sdio_irqs > 0; +} + +static inline void mmc_signal_sdio_irq(struct mmc_host *host) +{ + host->ops->enable_sdio_irq(host, 0); + host->sdio_irq_pending = true; + if (host->sdio_irq_thread) + wake_up_process(host->sdio_irq_thread); +} + +void sdio_run_irqs(struct mmc_host *host); +void sdio_signal_irq(struct mmc_host *host); + +#ifdef CONFIG_REGULATOR +int mmc_regulator_get_ocrmask(struct regulator *supply); +int mmc_regulator_set_ocr(struct mmc_host *mmc, + struct regulator *supply, + unsigned short vdd_bit); +int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios); +#else +static inline int mmc_regulator_get_ocrmask(struct regulator *supply) +{ + return 0; +} + +static inline int mmc_regulator_set_ocr(struct mmc_host *mmc, + struct regulator *supply, + unsigned short vdd_bit) +{ + return 0; +} + +static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + return -EINVAL; +} +#endif + +u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max); +int mmc_regulator_get_supply(struct mmc_host *mmc); + +static inline int mmc_card_is_removable(struct mmc_host *host) +{ + return !(host->caps & MMC_CAP_NONREMOVABLE); +} + +static inline int mmc_card_keep_power(struct mmc_host *host) +{ + return host->pm_flags & MMC_PM_KEEP_POWER; +} + +static inline int mmc_card_wake_sdio_irq(struct mmc_host *host) +{ + return host->pm_flags & MMC_PM_WAKE_SDIO_IRQ; +} + +/* TODO: Move to private header */ +static inline int mmc_card_hs(struct mmc_card *card) +{ + return card->host->ios.timing == MMC_TIMING_SD_HS || + card->host->ios.timing == MMC_TIMING_MMC_HS; +} + +/* TODO: Move to private header */ +static inline int mmc_card_uhs(struct mmc_card *card) +{ + return card->host->ios.timing >= MMC_TIMING_UHS_SDR12 && + card->host->ios.timing <= MMC_TIMING_UHS_DDR50; +} + +void mmc_retune_timer_stop(struct mmc_host *host); + +static inline void mmc_retune_needed(struct mmc_host *host) +{ + if (host->can_retune) + host->need_retune = 1; +} + +static inline bool mmc_can_retune(struct mmc_host *host) +{ + return host->can_retune == 1; +} + +static inline enum dma_data_direction mmc_get_dma_dir(struct mmc_data *data) +{ + return data->flags & MMC_DATA_WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE; +} + +int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error); +int mmc_abort_tuning(struct mmc_host *host, u32 opcode); + +#endif /* LINUX_MMC_HOST_H */ diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h new file mode 100644 index 000000000..897a87c4c --- /dev/null +++ b/include/linux/mmc/mmc.h @@ -0,0 +1,441 @@ +/* + * Header for MultiMediaCard (MMC) + * + * Copyright 2002 Hewlett-Packard Company + * + * Use consistent with the GNU GPL is permitted, + * provided that this copyright notice is + * preserved in its entirety in all copies and derived works. + * + * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, + * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS + * FITNESS FOR ANY PARTICULAR PURPOSE. + * + * Many thanks to Alessandro Rubini and Jonathan Corbet! + * + * Based strongly on code by: + * + * Author: Yong-iL Joh + * + * Author: Andrew Christian + * 15 May 2002 + */ + +#ifndef LINUX_MMC_MMC_H +#define LINUX_MMC_MMC_H + +#include + +/* Standard MMC commands (4.1) type argument response */ + /* class 1 */ +#define MMC_GO_IDLE_STATE 0 /* bc */ +#define MMC_SEND_OP_COND 1 /* bcr [31:0] OCR R3 */ +#define MMC_ALL_SEND_CID 2 /* bcr R2 */ +#define MMC_SET_RELATIVE_ADDR 3 /* ac [31:16] RCA R1 */ +#define MMC_SET_DSR 4 /* bc [31:16] RCA */ +#define MMC_SLEEP_AWAKE 5 /* ac [31:16] RCA 15:flg R1b */ +#define MMC_SWITCH 6 /* ac [31:0] See below R1b */ +#define MMC_SELECT_CARD 7 /* ac [31:16] RCA R1 */ +#define MMC_SEND_EXT_CSD 8 /* adtc R1 */ +#define MMC_SEND_CSD 9 /* ac [31:16] RCA R2 */ +#define MMC_SEND_CID 10 /* ac [31:16] RCA R2 */ +#define MMC_READ_DAT_UNTIL_STOP 11 /* adtc [31:0] dadr R1 */ +#define MMC_STOP_TRANSMISSION 12 /* ac R1b */ +#define MMC_SEND_STATUS 13 /* ac [31:16] RCA R1 */ +#define MMC_BUS_TEST_R 14 /* adtc R1 */ +#define MMC_GO_INACTIVE_STATE 15 /* ac [31:16] RCA */ +#define MMC_BUS_TEST_W 19 /* adtc R1 */ +#define MMC_SPI_READ_OCR 58 /* spi spi_R3 */ +#define MMC_SPI_CRC_ON_OFF 59 /* spi [0:0] flag spi_R1 */ + + /* class 2 */ +#define MMC_SET_BLOCKLEN 16 /* ac [31:0] block len R1 */ +#define MMC_READ_SINGLE_BLOCK 17 /* adtc [31:0] data addr R1 */ +#define MMC_READ_MULTIPLE_BLOCK 18 /* adtc [31:0] data addr R1 */ +#define MMC_SEND_TUNING_BLOCK 19 /* adtc R1 */ +#define MMC_SEND_TUNING_BLOCK_HS200 21 /* adtc R1 */ + + /* class 3 */ +#define MMC_WRITE_DAT_UNTIL_STOP 20 /* adtc [31:0] data addr R1 */ + + /* class 4 */ +#define MMC_SET_BLOCK_COUNT 23 /* adtc [31:0] data addr R1 */ +#define MMC_WRITE_BLOCK 24 /* adtc [31:0] data addr R1 */ +#define MMC_WRITE_MULTIPLE_BLOCK 25 /* adtc R1 */ +#define MMC_PROGRAM_CID 26 /* adtc R1 */ +#define MMC_PROGRAM_CSD 27 /* adtc R1 */ + + /* class 6 */ +#define MMC_SET_WRITE_PROT 28 /* ac [31:0] data addr R1b */ +#define MMC_CLR_WRITE_PROT 29 /* ac [31:0] data addr R1b */ +#define MMC_SEND_WRITE_PROT 30 /* adtc [31:0] wpdata addr R1 */ + + /* class 5 */ +#define MMC_ERASE_GROUP_START 35 /* ac [31:0] data addr R1 */ +#define MMC_ERASE_GROUP_END 36 /* ac [31:0] data addr R1 */ +#define MMC_ERASE 38 /* ac R1b */ + + /* class 9 */ +#define MMC_FAST_IO 39 /* ac R4 */ +#define MMC_GO_IRQ_STATE 40 /* bcr R5 */ + + /* class 7 */ +#define MMC_LOCK_UNLOCK 42 /* adtc R1b */ + + /* class 8 */ +#define MMC_APP_CMD 55 /* ac [31:16] RCA R1 */ +#define MMC_GEN_CMD 56 /* adtc [0] RD/WR R1 */ + + /* class 11 */ +#define MMC_QUE_TASK_PARAMS 44 /* ac [20:16] task id R1 */ +#define MMC_QUE_TASK_ADDR 45 /* ac [31:0] data addr R1 */ +#define MMC_EXECUTE_READ_TASK 46 /* adtc [20:16] task id R1 */ +#define MMC_EXECUTE_WRITE_TASK 47 /* adtc [20:16] task id R1 */ +#define MMC_CMDQ_TASK_MGMT 48 /* ac [20:16] task id R1b */ + +static inline bool mmc_op_multi(u32 opcode) +{ + return opcode == MMC_WRITE_MULTIPLE_BLOCK || + opcode == MMC_READ_MULTIPLE_BLOCK; +} + +/* + * MMC_SWITCH argument format: + * + * [31:26] Always 0 + * [25:24] Access Mode + * [23:16] Location of target Byte in EXT_CSD + * [15:08] Value Byte + * [07:03] Always 0 + * [02:00] Command Set + */ + +/* + MMC status in R1, for native mode (SPI bits are different) + Type + e : error bit + s : status bit + r : detected and set for the actual command response + x : detected and set during command execution. the host must poll + the card by sending status command in order to read these bits. + Clear condition + a : according to the card state + b : always related to the previous command. Reception of + a valid command will clear it (with a delay of one command) + c : clear by read + */ + +#define R1_OUT_OF_RANGE (1 << 31) /* er, c */ +#define R1_ADDRESS_ERROR (1 << 30) /* erx, c */ +#define R1_BLOCK_LEN_ERROR (1 << 29) /* er, c */ +#define R1_ERASE_SEQ_ERROR (1 << 28) /* er, c */ +#define R1_ERASE_PARAM (1 << 27) /* ex, c */ +#define R1_WP_VIOLATION (1 << 26) /* erx, c */ +#define R1_CARD_IS_LOCKED (1 << 25) /* sx, a */ +#define R1_LOCK_UNLOCK_FAILED (1 << 24) /* erx, c */ +#define R1_COM_CRC_ERROR (1 << 23) /* er, b */ +#define R1_ILLEGAL_COMMAND (1 << 22) /* er, b */ +#define R1_CARD_ECC_FAILED (1 << 21) /* ex, c */ +#define R1_CC_ERROR (1 << 20) /* erx, c */ +#define R1_ERROR (1 << 19) /* erx, c */ +#define R1_UNDERRUN (1 << 18) /* ex, c */ +#define R1_OVERRUN (1 << 17) /* ex, c */ +#define R1_CID_CSD_OVERWRITE (1 << 16) /* erx, c, CID/CSD overwrite */ +#define R1_WP_ERASE_SKIP (1 << 15) /* sx, c */ +#define R1_CARD_ECC_DISABLED (1 << 14) /* sx, a */ +#define R1_ERASE_RESET (1 << 13) /* sr, c */ +#define R1_STATUS(x) (x & 0xFFF9A000) +#define R1_CURRENT_STATE(x) ((x & 0x00001E00) >> 9) /* sx, b (4 bits) */ +#define R1_READY_FOR_DATA (1 << 8) /* sx, a */ +#define R1_SWITCH_ERROR (1 << 7) /* sx, c */ +#define R1_EXCEPTION_EVENT (1 << 6) /* sr, a */ +#define R1_APP_CMD (1 << 5) /* sr, c */ + +#define R1_STATE_IDLE 0 +#define R1_STATE_READY 1 +#define R1_STATE_IDENT 2 +#define R1_STATE_STBY 3 +#define R1_STATE_TRAN 4 +#define R1_STATE_DATA 5 +#define R1_STATE_RCV 6 +#define R1_STATE_PRG 7 +#define R1_STATE_DIS 8 + +/* + * MMC/SD in SPI mode reports R1 status always, and R2 for SEND_STATUS + * R1 is the low order byte; R2 is the next highest byte, when present. + */ +#define R1_SPI_IDLE (1 << 0) +#define R1_SPI_ERASE_RESET (1 << 1) +#define R1_SPI_ILLEGAL_COMMAND (1 << 2) +#define R1_SPI_COM_CRC (1 << 3) +#define R1_SPI_ERASE_SEQ (1 << 4) +#define R1_SPI_ADDRESS (1 << 5) +#define R1_SPI_PARAMETER (1 << 6) +/* R1 bit 7 is always zero */ +#define R2_SPI_CARD_LOCKED (1 << 8) +#define R2_SPI_WP_ERASE_SKIP (1 << 9) /* or lock/unlock fail */ +#define R2_SPI_LOCK_UNLOCK_FAIL R2_SPI_WP_ERASE_SKIP +#define R2_SPI_ERROR (1 << 10) +#define R2_SPI_CC_ERROR (1 << 11) +#define R2_SPI_CARD_ECC_ERROR (1 << 12) +#define R2_SPI_WP_VIOLATION (1 << 13) +#define R2_SPI_ERASE_PARAM (1 << 14) +#define R2_SPI_OUT_OF_RANGE (1 << 15) /* or CSD overwrite */ +#define R2_SPI_CSD_OVERWRITE R2_SPI_OUT_OF_RANGE + +/* + * OCR bits are mostly in host.h + */ +#define MMC_CARD_BUSY 0x80000000 /* Card Power up status bit */ + +/* + * Card Command Classes (CCC) + */ +#define CCC_BASIC (1<<0) /* (0) Basic protocol functions */ + /* (CMD0,1,2,3,4,7,9,10,12,13,15) */ + /* (and for SPI, CMD58,59) */ +#define CCC_STREAM_READ (1<<1) /* (1) Stream read commands */ + /* (CMD11) */ +#define CCC_BLOCK_READ (1<<2) /* (2) Block read commands */ + /* (CMD16,17,18) */ +#define CCC_STREAM_WRITE (1<<3) /* (3) Stream write commands */ + /* (CMD20) */ +#define CCC_BLOCK_WRITE (1<<4) /* (4) Block write commands */ + /* (CMD16,24,25,26,27) */ +#define CCC_ERASE (1<<5) /* (5) Ability to erase blocks */ + /* (CMD32,33,34,35,36,37,38,39) */ +#define CCC_WRITE_PROT (1<<6) /* (6) Able to write protect blocks */ + /* (CMD28,29,30) */ +#define CCC_LOCK_CARD (1<<7) /* (7) Able to lock down card */ + /* (CMD16,CMD42) */ +#define CCC_APP_SPEC (1<<8) /* (8) Application specific */ + /* (CMD55,56,57,ACMD*) */ +#define CCC_IO_MODE (1<<9) /* (9) I/O mode */ + /* (CMD5,39,40,52,53) */ +#define CCC_SWITCH (1<<10) /* (10) High speed switch */ + /* (CMD6,34,35,36,37,50) */ + /* (11) Reserved */ + /* (CMD?) */ + +/* + * CSD field definitions + */ + +#define CSD_STRUCT_VER_1_0 0 /* Valid for system specification 1.0 - 1.2 */ +#define CSD_STRUCT_VER_1_1 1 /* Valid for system specification 1.4 - 2.2 */ +#define CSD_STRUCT_VER_1_2 2 /* Valid for system specification 3.1 - 3.2 - 3.31 - 4.0 - 4.1 */ +#define CSD_STRUCT_EXT_CSD 3 /* Version is coded in CSD_STRUCTURE in EXT_CSD */ + +#define CSD_SPEC_VER_0 0 /* Implements system specification 1.0 - 1.2 */ +#define CSD_SPEC_VER_1 1 /* Implements system specification 1.4 */ +#define CSD_SPEC_VER_2 2 /* Implements system specification 2.0 - 2.2 */ +#define CSD_SPEC_VER_3 3 /* Implements system specification 3.1 - 3.2 - 3.31 */ +#define CSD_SPEC_VER_4 4 /* Implements system specification 4.0 - 4.1 */ + +/* + * EXT_CSD fields + */ + +#define EXT_CSD_CMDQ_MODE_EN 15 /* R/W */ +#define EXT_CSD_FLUSH_CACHE 32 /* W */ +#define EXT_CSD_CACHE_CTRL 33 /* R/W */ +#define EXT_CSD_POWER_OFF_NOTIFICATION 34 /* R/W */ +#define EXT_CSD_PACKED_FAILURE_INDEX 35 /* RO */ +#define EXT_CSD_PACKED_CMD_STATUS 36 /* RO */ +#define EXT_CSD_EXP_EVENTS_STATUS 54 /* RO, 2 bytes */ +#define EXT_CSD_EXP_EVENTS_CTRL 56 /* R/W, 2 bytes */ +#define EXT_CSD_DATA_SECTOR_SIZE 61 /* R */ +#define EXT_CSD_GP_SIZE_MULT 143 /* R/W */ +#define EXT_CSD_PARTITION_SETTING_COMPLETED 155 /* R/W */ +#define EXT_CSD_PARTITION_ATTRIBUTE 156 /* R/W */ +#define EXT_CSD_PARTITION_SUPPORT 160 /* RO */ +#define EXT_CSD_HPI_MGMT 161 /* R/W */ +#define EXT_CSD_RST_N_FUNCTION 162 /* R/W */ +#define EXT_CSD_BKOPS_EN 163 /* R/W */ +#define EXT_CSD_BKOPS_START 164 /* W */ +#define EXT_CSD_SANITIZE_START 165 /* W */ +#define EXT_CSD_WR_REL_PARAM 166 /* RO */ +#define EXT_CSD_RPMB_MULT 168 /* RO */ +#define EXT_CSD_FW_CONFIG 169 /* R/W */ +#define EXT_CSD_BOOT_WP 173 /* R/W */ +#define EXT_CSD_ERASE_GROUP_DEF 175 /* R/W */ +#define EXT_CSD_PART_CONFIG 179 /* R/W */ +#define EXT_CSD_ERASED_MEM_CONT 181 /* RO */ +#define EXT_CSD_BUS_WIDTH 183 /* R/W */ +#define EXT_CSD_STROBE_SUPPORT 184 /* RO */ +#define EXT_CSD_HS_TIMING 185 /* R/W */ +#define EXT_CSD_POWER_CLASS 187 /* R/W */ +#define EXT_CSD_REV 192 /* RO */ +#define EXT_CSD_STRUCTURE 194 /* RO */ +#define EXT_CSD_CARD_TYPE 196 /* RO */ +#define EXT_CSD_DRIVER_STRENGTH 197 /* RO */ +#define EXT_CSD_OUT_OF_INTERRUPT_TIME 198 /* RO */ +#define EXT_CSD_PART_SWITCH_TIME 199 /* RO */ +#define EXT_CSD_PWR_CL_52_195 200 /* RO */ +#define EXT_CSD_PWR_CL_26_195 201 /* RO */ +#define EXT_CSD_PWR_CL_52_360 202 /* RO */ +#define EXT_CSD_PWR_CL_26_360 203 /* RO */ +#define EXT_CSD_SEC_CNT 212 /* RO, 4 bytes */ +#define EXT_CSD_S_A_TIMEOUT 217 /* RO */ +#define EXT_CSD_REL_WR_SEC_C 222 /* RO */ +#define EXT_CSD_HC_WP_GRP_SIZE 221 /* RO */ +#define EXT_CSD_ERASE_TIMEOUT_MULT 223 /* RO */ +#define EXT_CSD_HC_ERASE_GRP_SIZE 224 /* RO */ +#define EXT_CSD_BOOT_MULT 226 /* RO */ +#define EXT_CSD_SEC_TRIM_MULT 229 /* RO */ +#define EXT_CSD_SEC_ERASE_MULT 230 /* RO */ +#define EXT_CSD_SEC_FEATURE_SUPPORT 231 /* RO */ +#define EXT_CSD_TRIM_MULT 232 /* RO */ +#define EXT_CSD_PWR_CL_200_195 236 /* RO */ +#define EXT_CSD_PWR_CL_200_360 237 /* RO */ +#define EXT_CSD_PWR_CL_DDR_52_195 238 /* RO */ +#define EXT_CSD_PWR_CL_DDR_52_360 239 /* RO */ +#define EXT_CSD_BKOPS_STATUS 246 /* RO */ +#define EXT_CSD_POWER_OFF_LONG_TIME 247 /* RO */ +#define EXT_CSD_GENERIC_CMD6_TIME 248 /* RO */ +#define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */ +#define EXT_CSD_PWR_CL_DDR_200_360 253 /* RO */ +#define EXT_CSD_FIRMWARE_VERSION 254 /* RO, 8 bytes */ +#define EXT_CSD_PRE_EOL_INFO 267 /* RO */ +#define EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_A 268 /* RO */ +#define EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_B 269 /* RO */ +#define EXT_CSD_CMDQ_DEPTH 307 /* RO */ +#define EXT_CSD_CMDQ_SUPPORT 308 /* RO */ +#define EXT_CSD_SUPPORTED_MODE 493 /* RO */ +#define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */ +#define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */ +#define EXT_CSD_MAX_PACKED_WRITES 500 /* RO */ +#define EXT_CSD_MAX_PACKED_READS 501 /* RO */ +#define EXT_CSD_BKOPS_SUPPORT 502 /* RO */ +#define EXT_CSD_HPI_FEATURES 503 /* RO */ + +/* + * EXT_CSD field definitions + */ + +#define EXT_CSD_WR_REL_PARAM_EN (1<<2) + +#define EXT_CSD_BOOT_WP_B_PWR_WP_DIS (0x40) +#define EXT_CSD_BOOT_WP_B_PERM_WP_DIS (0x10) +#define EXT_CSD_BOOT_WP_B_PERM_WP_EN (0x04) +#define EXT_CSD_BOOT_WP_B_PWR_WP_EN (0x01) + +#define EXT_CSD_PART_CONFIG_ACC_MASK (0x7) +#define EXT_CSD_PART_CONFIG_ACC_BOOT0 (0x1) +#define EXT_CSD_PART_CONFIG_ACC_RPMB (0x3) +#define EXT_CSD_PART_CONFIG_ACC_GP0 (0x4) + +#define EXT_CSD_PART_SETTING_COMPLETED (0x1) +#define EXT_CSD_PART_SUPPORT_PART_EN (0x1) + +#define EXT_CSD_CMD_SET_NORMAL (1<<0) +#define EXT_CSD_CMD_SET_SECURE (1<<1) +#define EXT_CSD_CMD_SET_CPSECURE (1<<2) + +#define EXT_CSD_CARD_TYPE_HS_26 (1<<0) /* Card can run at 26MHz */ +#define EXT_CSD_CARD_TYPE_HS_52 (1<<1) /* Card can run at 52MHz */ +#define EXT_CSD_CARD_TYPE_HS (EXT_CSD_CARD_TYPE_HS_26 | \ + EXT_CSD_CARD_TYPE_HS_52) +#define EXT_CSD_CARD_TYPE_DDR_1_8V (1<<2) /* Card can run at 52MHz */ + /* DDR mode @1.8V or 3V I/O */ +#define EXT_CSD_CARD_TYPE_DDR_1_2V (1<<3) /* Card can run at 52MHz */ + /* DDR mode @1.2V I/O */ +#define EXT_CSD_CARD_TYPE_DDR_52 (EXT_CSD_CARD_TYPE_DDR_1_8V \ + | EXT_CSD_CARD_TYPE_DDR_1_2V) +#define EXT_CSD_CARD_TYPE_HS200_1_8V (1<<4) /* Card can run at 200MHz */ +#define EXT_CSD_CARD_TYPE_HS200_1_2V (1<<5) /* Card can run at 200MHz */ + /* SDR mode @1.2V I/O */ +#define EXT_CSD_CARD_TYPE_HS200 (EXT_CSD_CARD_TYPE_HS200_1_8V | \ + EXT_CSD_CARD_TYPE_HS200_1_2V) +#define EXT_CSD_CARD_TYPE_HS400_1_8V (1<<6) /* Card can run at 200MHz DDR, 1.8V */ +#define EXT_CSD_CARD_TYPE_HS400_1_2V (1<<7) /* Card can run at 200MHz DDR, 1.2V */ +#define EXT_CSD_CARD_TYPE_HS400 (EXT_CSD_CARD_TYPE_HS400_1_8V | \ + EXT_CSD_CARD_TYPE_HS400_1_2V) +#define EXT_CSD_CARD_TYPE_HS400ES (1<<8) /* Card can run at HS400ES */ + +#define EXT_CSD_BUS_WIDTH_1 0 /* Card is in 1 bit mode */ +#define EXT_CSD_BUS_WIDTH_4 1 /* Card is in 4 bit mode */ +#define EXT_CSD_BUS_WIDTH_8 2 /* Card is in 8 bit mode */ +#define EXT_CSD_DDR_BUS_WIDTH_4 5 /* Card is in 4 bit DDR mode */ +#define EXT_CSD_DDR_BUS_WIDTH_8 6 /* Card is in 8 bit DDR mode */ +#define EXT_CSD_BUS_WIDTH_STROBE BIT(7) /* Enhanced strobe mode */ + +#define EXT_CSD_TIMING_BC 0 /* Backwards compatility */ +#define EXT_CSD_TIMING_HS 1 /* High speed */ +#define EXT_CSD_TIMING_HS200 2 /* HS200 */ +#define EXT_CSD_TIMING_HS400 3 /* HS400 */ +#define EXT_CSD_DRV_STR_SHIFT 4 /* Driver Strength shift */ + +#define EXT_CSD_SEC_ER_EN BIT(0) +#define EXT_CSD_SEC_BD_BLK_EN BIT(2) +#define EXT_CSD_SEC_GB_CL_EN BIT(4) +#define EXT_CSD_SEC_SANITIZE BIT(6) /* v4.5 only */ + +#define EXT_CSD_RST_N_EN_MASK 0x3 +#define EXT_CSD_RST_N_ENABLED 1 /* RST_n is enabled on card */ + +#define EXT_CSD_NO_POWER_NOTIFICATION 0 +#define EXT_CSD_POWER_ON 1 +#define EXT_CSD_POWER_OFF_SHORT 2 +#define EXT_CSD_POWER_OFF_LONG 3 + +#define EXT_CSD_PWR_CL_8BIT_MASK 0xF0 /* 8 bit PWR CLS */ +#define EXT_CSD_PWR_CL_4BIT_MASK 0x0F /* 8 bit PWR CLS */ +#define EXT_CSD_PWR_CL_8BIT_SHIFT 4 +#define EXT_CSD_PWR_CL_4BIT_SHIFT 0 + +#define EXT_CSD_PACKED_EVENT_EN BIT(3) + +/* + * EXCEPTION_EVENT_STATUS field + */ +#define EXT_CSD_URGENT_BKOPS BIT(0) +#define EXT_CSD_DYNCAP_NEEDED BIT(1) +#define EXT_CSD_SYSPOOL_EXHAUSTED BIT(2) +#define EXT_CSD_PACKED_FAILURE BIT(3) + +#define EXT_CSD_PACKED_GENERIC_ERROR BIT(0) +#define EXT_CSD_PACKED_INDEXED_ERROR BIT(1) + +/* + * BKOPS status level + */ +#define EXT_CSD_BKOPS_LEVEL_2 0x2 + +/* + * BKOPS modes + */ +#define EXT_CSD_MANUAL_BKOPS_MASK 0x01 +#define EXT_CSD_AUTO_BKOPS_MASK 0x02 + +/* + * Command Queue + */ +#define EXT_CSD_CMDQ_MODE_ENABLED BIT(0) +#define EXT_CSD_CMDQ_DEPTH_MASK GENMASK(4, 0) +#define EXT_CSD_CMDQ_SUPPORTED BIT(0) + +/* + * MMC_SWITCH access modes + */ +#define MMC_SWITCH_MODE_CMD_SET 0x00 /* Change the command set */ +#define MMC_SWITCH_MODE_SET_BITS 0x01 /* Set bits which are 1 in value */ +#define MMC_SWITCH_MODE_CLEAR_BITS 0x02 /* Clear bits which are 1 in value */ +#define MMC_SWITCH_MODE_WRITE_BYTE 0x03 /* Set target to value */ + +/* + * Erase/trim/discard + */ +#define MMC_ERASE_ARG 0x00000000 +#define MMC_SECURE_ERASE_ARG 0x80000000 +#define MMC_TRIM_ARG 0x00000001 +#define MMC_DISCARD_ARG 0x00000003 +#define MMC_SECURE_TRIM1_ARG 0x80000001 +#define MMC_SECURE_TRIM2_ARG 0x80008000 +#define MMC_SECURE_ARGS 0x80000000 +#define MMC_TRIM_ARGS 0x00008001 + +#define mmc_driver_type_mask(n) (1 << (n)) + +#endif /* LINUX_MMC_MMC_H */ diff --git a/include/linux/mmc/pm.h b/include/linux/mmc/pm.h new file mode 100644 index 000000000..4a139204c --- /dev/null +++ b/include/linux/mmc/pm.h @@ -0,0 +1,30 @@ +/* + * linux/include/linux/mmc/pm.h + * + * Author: Nicolas Pitre + * Copyright: (C) 2009 Marvell Technology Group Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef LINUX_MMC_PM_H +#define LINUX_MMC_PM_H + +/* + * These flags are used to describe power management features that + * some cards (typically SDIO cards) might wish to benefit from when + * the host system is being suspended. There are several layers of + * abstractions involved, from the host controller driver, to the MMC core + * code, to the SDIO core code, to finally get to the actual SDIO function + * driver. This file is therefore used for common definitions shared across + * all those layers. + */ + +typedef unsigned int mmc_pm_flag_t; + +#define MMC_PM_KEEP_POWER (1 << 0) /* preserve card power during suspend */ +#define MMC_PM_WAKE_SDIO_IRQ (1 << 1) /* wake up host system on SDIO IRQ assertion */ + +#endif /* LINUX_MMC_PM_H */ diff --git a/include/linux/mmc/sd.h b/include/linux/mmc/sd.h new file mode 100644 index 000000000..1ebcf9ba1 --- /dev/null +++ b/include/linux/mmc/sd.h @@ -0,0 +1,94 @@ +/* + * include/linux/mmc/sd.h + * + * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#ifndef LINUX_MMC_SD_H +#define LINUX_MMC_SD_H + +/* SD commands type argument response */ + /* class 0 */ +/* This is basically the same command as for MMC with some quirks. */ +#define SD_SEND_RELATIVE_ADDR 3 /* bcr R6 */ +#define SD_SEND_IF_COND 8 /* bcr [11:0] See below R7 */ +#define SD_SWITCH_VOLTAGE 11 /* ac R1 */ + + /* class 10 */ +#define SD_SWITCH 6 /* adtc [31:0] See below R1 */ + + /* class 5 */ +#define SD_ERASE_WR_BLK_START 32 /* ac [31:0] data addr R1 */ +#define SD_ERASE_WR_BLK_END 33 /* ac [31:0] data addr R1 */ + + /* Application commands */ +#define SD_APP_SET_BUS_WIDTH 6 /* ac [1:0] bus width R1 */ +#define SD_APP_SD_STATUS 13 /* adtc R1 */ +#define SD_APP_SEND_NUM_WR_BLKS 22 /* adtc R1 */ +#define SD_APP_OP_COND 41 /* bcr [31:0] OCR R3 */ +#define SD_APP_SEND_SCR 51 /* adtc R1 */ + +/* OCR bit definitions */ +#define SD_OCR_S18R (1 << 24) /* 1.8V switching request */ +#define SD_ROCR_S18A SD_OCR_S18R /* 1.8V switching accepted by card */ +#define SD_OCR_XPC (1 << 28) /* SDXC power control */ +#define SD_OCR_CCS (1 << 30) /* Card Capacity Status */ + +/* + * SD_SWITCH argument format: + * + * [31] Check (0) or switch (1) + * [30:24] Reserved (0) + * [23:20] Function group 6 + * [19:16] Function group 5 + * [15:12] Function group 4 + * [11:8] Function group 3 + * [7:4] Function group 2 + * [3:0] Function group 1 + */ + +/* + * SD_SEND_IF_COND argument format: + * + * [31:12] Reserved (0) + * [11:8] Host Voltage Supply Flags + * [7:0] Check Pattern (0xAA) + */ + +/* + * SCR field definitions + */ + +#define SCR_SPEC_VER_0 0 /* Implements system specification 1.0 - 1.01 */ +#define SCR_SPEC_VER_1 1 /* Implements system specification 1.10 */ +#define SCR_SPEC_VER_2 2 /* Implements system specification 2.00-3.0X */ + +/* + * SD bus widths + */ +#define SD_BUS_WIDTH_1 0 +#define SD_BUS_WIDTH_4 2 + +/* + * SD_SWITCH mode + */ +#define SD_SWITCH_CHECK 0 +#define SD_SWITCH_SET 1 + +/* + * SD_SWITCH function groups + */ +#define SD_SWITCH_GRP_ACCESS 0 + +/* + * SD_SWITCH access modes + */ +#define SD_SWITCH_ACCESS_DEF 0 +#define SD_SWITCH_ACCESS_HS 1 + +#endif /* LINUX_MMC_SD_H */ diff --git a/include/linux/mmc/sdhci-pci-data.h b/include/linux/mmc/sdhci-pci-data.h new file mode 100644 index 000000000..1d42872d2 --- /dev/null +++ b/include/linux/mmc/sdhci-pci-data.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_MMC_SDHCI_PCI_DATA_H +#define LINUX_MMC_SDHCI_PCI_DATA_H + +struct pci_dev; + +struct sdhci_pci_data { + struct pci_dev *pdev; + int slotno; + int rst_n_gpio; /* Set to -EINVAL if unused */ + int cd_gpio; /* Set to -EINVAL if unused */ + int (*setup)(struct sdhci_pci_data *data); + void (*cleanup)(struct sdhci_pci_data *data); +}; + +extern struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev, + int slotno); +#endif diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h new file mode 100644 index 000000000..17446d3c3 --- /dev/null +++ b/include/linux/mmc/sdio.h @@ -0,0 +1,193 @@ +/* + * include/linux/mmc/sdio.h + * + * Copyright 2006-2007 Pierre Ossman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#ifndef LINUX_MMC_SDIO_H +#define LINUX_MMC_SDIO_H + +/* SDIO commands type argument response */ +#define SD_IO_SEND_OP_COND 5 /* bcr [23:0] OCR R4 */ +#define SD_IO_RW_DIRECT 52 /* ac [31:0] See below R5 */ +#define SD_IO_RW_EXTENDED 53 /* adtc [31:0] See below R5 */ + +/* + * SD_IO_RW_DIRECT argument format: + * + * [31] R/W flag + * [30:28] Function number + * [27] RAW flag + * [25:9] Register address + * [7:0] Data + */ + +/* + * SD_IO_RW_EXTENDED argument format: + * + * [31] R/W flag + * [30:28] Function number + * [27] Block mode + * [26] Increment address + * [25:9] Register address + * [8:0] Byte/block count + */ + +#define R4_18V_PRESENT (1<<24) +#define R4_MEMORY_PRESENT (1 << 27) + +/* + SDIO status in R5 + Type + e : error bit + s : status bit + r : detected and set for the actual command response + x : detected and set during command execution. the host must poll + the card by sending status command in order to read these bits. + Clear condition + a : according to the card state + b : always related to the previous command. Reception of + a valid command will clear it (with a delay of one command) + c : clear by read + */ + +#define R5_COM_CRC_ERROR (1 << 15) /* er, b */ +#define R5_ILLEGAL_COMMAND (1 << 14) /* er, b */ +#define R5_ERROR (1 << 11) /* erx, c */ +#define R5_FUNCTION_NUMBER (1 << 9) /* er, c */ +#define R5_OUT_OF_RANGE (1 << 8) /* er, c */ +#define R5_STATUS(x) (x & 0xCB00) +#define R5_IO_CURRENT_STATE(x) ((x & 0x3000) >> 12) /* s, b */ + +/* + * Card Common Control Registers (CCCR) + */ + +#define SDIO_CCCR_CCCR 0x00 + +#define SDIO_CCCR_REV_1_00 0 /* CCCR/FBR Version 1.00 */ +#define SDIO_CCCR_REV_1_10 1 /* CCCR/FBR Version 1.10 */ +#define SDIO_CCCR_REV_1_20 2 /* CCCR/FBR Version 1.20 */ +#define SDIO_CCCR_REV_3_00 3 /* CCCR/FBR Version 3.00 */ + +#define SDIO_SDIO_REV_1_00 0 /* SDIO Spec Version 1.00 */ +#define SDIO_SDIO_REV_1_10 1 /* SDIO Spec Version 1.10 */ +#define SDIO_SDIO_REV_1_20 2 /* SDIO Spec Version 1.20 */ +#define SDIO_SDIO_REV_2_00 3 /* SDIO Spec Version 2.00 */ +#define SDIO_SDIO_REV_3_00 4 /* SDIO Spec Version 3.00 */ + +#define SDIO_CCCR_SD 0x01 + +#define SDIO_SD_REV_1_01 0 /* SD Physical Spec Version 1.01 */ +#define SDIO_SD_REV_1_10 1 /* SD Physical Spec Version 1.10 */ +#define SDIO_SD_REV_2_00 2 /* SD Physical Spec Version 2.00 */ +#define SDIO_SD_REV_3_00 3 /* SD Physical Spev Version 3.00 */ + +#define SDIO_CCCR_IOEx 0x02 +#define SDIO_CCCR_IORx 0x03 + +#define SDIO_CCCR_IENx 0x04 /* Function/Master Interrupt Enable */ +#define SDIO_CCCR_INTx 0x05 /* Function Interrupt Pending */ + +#define SDIO_CCCR_ABORT 0x06 /* function abort/card reset */ + +#define SDIO_CCCR_IF 0x07 /* bus interface controls */ + +#define SDIO_BUS_WIDTH_MASK 0x03 /* data bus width setting */ +#define SDIO_BUS_WIDTH_1BIT 0x00 +#define SDIO_BUS_WIDTH_RESERVED 0x01 +#define SDIO_BUS_WIDTH_4BIT 0x02 +#define SDIO_BUS_ECSI 0x20 /* Enable continuous SPI interrupt */ +#define SDIO_BUS_SCSI 0x40 /* Support continuous SPI interrupt */ + +#define SDIO_BUS_ASYNC_INT 0x20 + +#define SDIO_BUS_CD_DISABLE 0x80 /* disable pull-up on DAT3 (pin 1) */ + +#define SDIO_CCCR_CAPS 0x08 + +#define SDIO_CCCR_CAP_SDC 0x01 /* can do CMD52 while data transfer */ +#define SDIO_CCCR_CAP_SMB 0x02 /* can do multi-block xfers (CMD53) */ +#define SDIO_CCCR_CAP_SRW 0x04 /* supports read-wait protocol */ +#define SDIO_CCCR_CAP_SBS 0x08 /* supports suspend/resume */ +#define SDIO_CCCR_CAP_S4MI 0x10 /* interrupt during 4-bit CMD53 */ +#define SDIO_CCCR_CAP_E4MI 0x20 /* enable ints during 4-bit CMD53 */ +#define SDIO_CCCR_CAP_LSC 0x40 /* low speed card */ +#define SDIO_CCCR_CAP_4BLS 0x80 /* 4 bit low speed card */ + +#define SDIO_CCCR_CIS 0x09 /* common CIS pointer (3 bytes) */ + +/* Following 4 regs are valid only if SBS is set */ +#define SDIO_CCCR_SUSPEND 0x0c +#define SDIO_CCCR_SELx 0x0d +#define SDIO_CCCR_EXECx 0x0e +#define SDIO_CCCR_READYx 0x0f + +#define SDIO_CCCR_BLKSIZE 0x10 + +#define SDIO_CCCR_POWER 0x12 + +#define SDIO_POWER_SMPC 0x01 /* Supports Master Power Control */ +#define SDIO_POWER_EMPC 0x02 /* Enable Master Power Control */ + +#define SDIO_CCCR_SPEED 0x13 + +#define SDIO_SPEED_SHS 0x01 /* Supports High-Speed mode */ +#define SDIO_SPEED_BSS_SHIFT 1 +#define SDIO_SPEED_BSS_MASK (7< +#include + +#include + +struct mmc_card; +struct sdio_func; + +typedef void (sdio_irq_handler_t)(struct sdio_func *); + +/* + * SDIO function CIS tuple (unknown to the core) + */ +struct sdio_func_tuple { + struct sdio_func_tuple *next; + unsigned char code; + unsigned char size; + unsigned char data[0]; +}; + +/* + * SDIO function devices + */ +struct sdio_func { + struct mmc_card *card; /* the card this device belongs to */ + struct device dev; /* the device */ + sdio_irq_handler_t *irq_handler; /* IRQ callback */ + unsigned int num; /* function number */ + + unsigned char class; /* standard interface class */ + unsigned short vendor; /* vendor id */ + unsigned short device; /* device id */ + + unsigned max_blksize; /* maximum block size */ + unsigned cur_blksize; /* current block size */ + + unsigned enable_timeout; /* max enable timeout in msec */ + + unsigned int state; /* function state */ +#define SDIO_STATE_PRESENT (1<<0) /* present in sysfs */ + + u8 *tmpbuf; /* DMA:able scratch buffer */ + + unsigned num_info; /* number of info strings */ + const char **info; /* info strings */ + + struct sdio_func_tuple *tuples; +}; + +#define sdio_func_present(f) ((f)->state & SDIO_STATE_PRESENT) + +#define sdio_func_set_present(f) ((f)->state |= SDIO_STATE_PRESENT) + +#define sdio_func_id(f) (dev_name(&(f)->dev)) + +#define sdio_get_drvdata(f) dev_get_drvdata(&(f)->dev) +#define sdio_set_drvdata(f,d) dev_set_drvdata(&(f)->dev, d) +#define dev_to_sdio_func(d) container_of(d, struct sdio_func, dev) + +/* + * SDIO function device driver + */ +struct sdio_driver { + char *name; + const struct sdio_device_id *id_table; + + int (*probe)(struct sdio_func *, const struct sdio_device_id *); + void (*remove)(struct sdio_func *); + + struct device_driver drv; +}; + +/** + * SDIO_DEVICE - macro used to describe a specific SDIO device + * @vend: the 16 bit manufacturer code + * @dev: the 16 bit function id + * + * This macro is used to create a struct sdio_device_id that matches a + * specific device. The class field will be set to SDIO_ANY_ID. + */ +#define SDIO_DEVICE(vend,dev) \ + .class = SDIO_ANY_ID, \ + .vendor = (vend), .device = (dev) + +/** + * SDIO_DEVICE_CLASS - macro used to describe a specific SDIO device class + * @dev_class: the 8 bit standard interface code + * + * This macro is used to create a struct sdio_device_id that matches a + * specific standard SDIO function type. The vendor and device fields will + * be set to SDIO_ANY_ID. + */ +#define SDIO_DEVICE_CLASS(dev_class) \ + .class = (dev_class), \ + .vendor = SDIO_ANY_ID, .device = SDIO_ANY_ID + +extern int sdio_register_driver(struct sdio_driver *); +extern void sdio_unregister_driver(struct sdio_driver *); + +/* + * SDIO I/O operations + */ +extern void sdio_claim_host(struct sdio_func *func); +extern void sdio_release_host(struct sdio_func *func); + +extern int sdio_enable_func(struct sdio_func *func); +extern int sdio_disable_func(struct sdio_func *func); + +extern int sdio_set_block_size(struct sdio_func *func, unsigned blksz); + +extern int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler); +extern int sdio_release_irq(struct sdio_func *func); + +extern unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz); + +extern u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret); +extern u16 sdio_readw(struct sdio_func *func, unsigned int addr, int *err_ret); +extern u32 sdio_readl(struct sdio_func *func, unsigned int addr, int *err_ret); + +extern int sdio_memcpy_fromio(struct sdio_func *func, void *dst, + unsigned int addr, int count); +extern int sdio_readsb(struct sdio_func *func, void *dst, + unsigned int addr, int count); + +extern void sdio_writeb(struct sdio_func *func, u8 b, + unsigned int addr, int *err_ret); +extern void sdio_writew(struct sdio_func *func, u16 b, + unsigned int addr, int *err_ret); +extern void sdio_writel(struct sdio_func *func, u32 b, + unsigned int addr, int *err_ret); + +extern u8 sdio_writeb_readb(struct sdio_func *func, u8 write_byte, + unsigned int addr, int *err_ret); + +extern int sdio_memcpy_toio(struct sdio_func *func, unsigned int addr, + void *src, int count); +extern int sdio_writesb(struct sdio_func *func, unsigned int addr, + void *src, int count); + +extern unsigned char sdio_f0_readb(struct sdio_func *func, + unsigned int addr, int *err_ret); +extern void sdio_f0_writeb(struct sdio_func *func, unsigned char b, + unsigned int addr, int *err_ret); + +extern mmc_pm_flag_t sdio_get_host_pm_caps(struct sdio_func *func); +extern int sdio_set_host_pm_flags(struct sdio_func *func, mmc_pm_flag_t flags); + +extern void sdio_retune_crc_disable(struct sdio_func *func); +extern void sdio_retune_crc_enable(struct sdio_func *func); + +extern void sdio_retune_hold_now(struct sdio_func *func); +extern void sdio_retune_release(struct sdio_func *func); + +#endif /* LINUX_MMC_SDIO_FUNC_H */ diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h new file mode 100644 index 000000000..358d6be35 --- /dev/null +++ b/include/linux/mmc/sdio_ids.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * SDIO Classes, Interface Types, Manufacturer IDs, etc. + */ + +#ifndef LINUX_MMC_SDIO_IDS_H +#define LINUX_MMC_SDIO_IDS_H + +/* + * Standard SDIO Function Interfaces + */ + +#define SDIO_CLASS_NONE 0x00 /* Not a SDIO standard interface */ +#define SDIO_CLASS_UART 0x01 /* standard UART interface */ +#define SDIO_CLASS_BT_A 0x02 /* Type-A BlueTooth std interface */ +#define SDIO_CLASS_BT_B 0x03 /* Type-B BlueTooth std interface */ +#define SDIO_CLASS_GPS 0x04 /* GPS standard interface */ +#define SDIO_CLASS_CAMERA 0x05 /* Camera standard interface */ +#define SDIO_CLASS_PHS 0x06 /* PHS standard interface */ +#define SDIO_CLASS_WLAN 0x07 /* WLAN interface */ +#define SDIO_CLASS_ATA 0x08 /* Embedded SDIO-ATA std interface */ +#define SDIO_CLASS_BT_AMP 0x09 /* Type-A Bluetooth AMP interface */ + +/* + * Vendors and devices. Sort key: vendor first, device next. + */ +#define SDIO_VENDOR_ID_BROADCOM 0x02d0 +#define SDIO_DEVICE_ID_BROADCOM_43143 0xa887 +#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324 +#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329 +#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330 +#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334 +#define SDIO_DEVICE_ID_BROADCOM_43340 0xa94c +#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d +#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335 +#define SDIO_DEVICE_ID_BROADCOM_4339 0x4339 +#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962 +#define SDIO_DEVICE_ID_BROADCOM_43364 0xa9a4 +#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6 +#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345 +#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf +#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354 +#define SDIO_DEVICE_ID_BROADCOM_4356 0x4356 +#define SDIO_DEVICE_ID_CYPRESS_4373 0x4373 + +#define SDIO_VENDOR_ID_INTEL 0x0089 +#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402 +#define SDIO_DEVICE_ID_INTEL_IWMC3200WIFI 0x1403 +#define SDIO_DEVICE_ID_INTEL_IWMC3200TOP 0x1404 +#define SDIO_DEVICE_ID_INTEL_IWMC3200GPS 0x1405 +#define SDIO_DEVICE_ID_INTEL_IWMC3200BT 0x1406 +#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5 0x1407 + +#define SDIO_VENDOR_ID_MARVELL 0x02df +#define SDIO_DEVICE_ID_MARVELL_LIBERTAS 0x9103 +#define SDIO_DEVICE_ID_MARVELL_8688WLAN 0x9104 +#define SDIO_DEVICE_ID_MARVELL_8688BT 0x9105 +#define SDIO_DEVICE_ID_MARVELL_8797_F0 0x9128 +#define SDIO_DEVICE_ID_MARVELL_8887WLAN 0x9134 + +#define SDIO_VENDOR_ID_SIANO 0x039a +#define SDIO_DEVICE_ID_SIANO_NOVA_B0 0x0201 +#define SDIO_DEVICE_ID_SIANO_NICE 0x0202 +#define SDIO_DEVICE_ID_SIANO_VEGA_A0 0x0300 +#define SDIO_DEVICE_ID_SIANO_VENICE 0x0301 +#define SDIO_DEVICE_ID_SIANO_NOVA_A0 0x1100 +#define SDIO_DEVICE_ID_SIANO_STELLAR 0x5347 + +#define SDIO_VENDOR_ID_TI 0x0097 +#define SDIO_DEVICE_ID_TI_WL1271 0x4076 +#define SDIO_VENDOR_ID_TI_WL1251 0x104c +#define SDIO_DEVICE_ID_TI_WL1251 0x9066 + +#define SDIO_VENDOR_ID_STE 0x0020 +#define SDIO_DEVICE_ID_STE_CW1200 0x2280 + +#endif /* LINUX_MMC_SDIO_IDS_H */ diff --git a/include/linux/mmc/sh_mmcif.h b/include/linux/mmc/sh_mmcif.h new file mode 100644 index 000000000..a7baa2948 --- /dev/null +++ b/include/linux/mmc/sh_mmcif.h @@ -0,0 +1,213 @@ +/* + * include/linux/mmc/sh_mmcif.h + * + * platform data for eMMC driver + * + * Copyright (C) 2010 Renesas Solutions Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + */ + +#ifndef LINUX_MMC_SH_MMCIF_H +#define LINUX_MMC_SH_MMCIF_H + +#include +#include + +/* + * MMCIF : CE_CLK_CTRL [19:16] + * 1000 : Peripheral clock / 512 + * 0111 : Peripheral clock / 256 + * 0110 : Peripheral clock / 128 + * 0101 : Peripheral clock / 64 + * 0100 : Peripheral clock / 32 + * 0011 : Peripheral clock / 16 + * 0010 : Peripheral clock / 8 + * 0001 : Peripheral clock / 4 + * 0000 : Peripheral clock / 2 + * 1111 : Peripheral clock (sup_pclk set '1') + */ + +struct sh_mmcif_plat_data { + unsigned int slave_id_tx; /* embedded slave_id_[tr]x */ + unsigned int slave_id_rx; + u8 sup_pclk; /* 1 :SH7757, 0: SH7724/SH7372 */ + unsigned long caps; + u32 ocr; +}; + +#define MMCIF_CE_CMD_SET 0x00000000 +#define MMCIF_CE_ARG 0x00000008 +#define MMCIF_CE_ARG_CMD12 0x0000000C +#define MMCIF_CE_CMD_CTRL 0x00000010 +#define MMCIF_CE_BLOCK_SET 0x00000014 +#define MMCIF_CE_CLK_CTRL 0x00000018 +#define MMCIF_CE_BUF_ACC 0x0000001C +#define MMCIF_CE_RESP3 0x00000020 +#define MMCIF_CE_RESP2 0x00000024 +#define MMCIF_CE_RESP1 0x00000028 +#define MMCIF_CE_RESP0 0x0000002C +#define MMCIF_CE_RESP_CMD12 0x00000030 +#define MMCIF_CE_DATA 0x00000034 +#define MMCIF_CE_INT 0x00000040 +#define MMCIF_CE_INT_MASK 0x00000044 +#define MMCIF_CE_HOST_STS1 0x00000048 +#define MMCIF_CE_HOST_STS2 0x0000004C +#define MMCIF_CE_CLK_CTRL2 0x00000070 +#define MMCIF_CE_VERSION 0x0000007C + +/* CE_BUF_ACC */ +#define BUF_ACC_DMAWEN (1 << 25) +#define BUF_ACC_DMAREN (1 << 24) +#define BUF_ACC_BUSW_32 (0 << 17) +#define BUF_ACC_BUSW_16 (1 << 17) +#define BUF_ACC_ATYP (1 << 16) + +/* CE_CLK_CTRL */ +#define CLK_ENABLE (1 << 24) /* 1: output mmc clock */ +#define CLK_CLEAR (0xf << 16) +#define CLK_SUP_PCLK (0xf << 16) +#define CLKDIV_4 (1 << 16) /* mmc clock frequency. + * n: bus clock/(2^(n+1)) */ +#define CLKDIV_256 (7 << 16) /* mmc clock frequency. (see above) */ +#define SRSPTO_256 (2 << 12) /* resp timeout */ +#define SRBSYTO_29 (0xf << 8) /* resp busy timeout */ +#define SRWDTO_29 (0xf << 4) /* read/write timeout */ +#define SCCSTO_29 (0xf << 0) /* ccs timeout */ + +/* CE_VERSION */ +#define SOFT_RST_ON (1 << 31) +#define SOFT_RST_OFF 0 + +static inline u32 sh_mmcif_readl(void __iomem *addr, int reg) +{ + return __raw_readl(addr + reg); +} + +static inline void sh_mmcif_writel(void __iomem *addr, int reg, u32 val) +{ + __raw_writel(val, addr + reg); +} + +#define SH_MMCIF_BBS 512 /* boot block size */ + +static inline void sh_mmcif_boot_cmd_send(void __iomem *base, + unsigned long cmd, unsigned long arg) +{ + sh_mmcif_writel(base, MMCIF_CE_INT, 0); + sh_mmcif_writel(base, MMCIF_CE_ARG, arg); + sh_mmcif_writel(base, MMCIF_CE_CMD_SET, cmd); +} + +static inline int sh_mmcif_boot_cmd_poll(void __iomem *base, unsigned long mask) +{ + unsigned long tmp; + int cnt; + + for (cnt = 0; cnt < 1000000; cnt++) { + tmp = sh_mmcif_readl(base, MMCIF_CE_INT); + if (tmp & mask) { + sh_mmcif_writel(base, MMCIF_CE_INT, tmp & ~mask); + return 0; + } + } + + return -1; +} + +static inline int sh_mmcif_boot_cmd(void __iomem *base, + unsigned long cmd, unsigned long arg) +{ + sh_mmcif_boot_cmd_send(base, cmd, arg); + return sh_mmcif_boot_cmd_poll(base, 0x00010000); +} + +static inline int sh_mmcif_boot_do_read_single(void __iomem *base, + unsigned int block_nr, + unsigned long *buf) +{ + int k; + + /* CMD13 - Status */ + sh_mmcif_boot_cmd(base, 0x0d400000, 0x00010000); + + if (sh_mmcif_readl(base, MMCIF_CE_RESP0) != 0x0900) + return -1; + + /* CMD17 - Read */ + sh_mmcif_boot_cmd(base, 0x11480000, block_nr * SH_MMCIF_BBS); + if (sh_mmcif_boot_cmd_poll(base, 0x00100000) < 0) + return -1; + + for (k = 0; k < (SH_MMCIF_BBS / 4); k++) + buf[k] = sh_mmcif_readl(base, MMCIF_CE_DATA); + + return 0; +} + +static inline int sh_mmcif_boot_do_read(void __iomem *base, + unsigned long first_block, + unsigned long nr_blocks, + void *buf) +{ + unsigned long k; + int ret = 0; + + /* In data transfer mode: Set clock to Bus clock/4 (about 20Mhz) */ + sh_mmcif_writel(base, MMCIF_CE_CLK_CTRL, + CLK_ENABLE | CLKDIV_4 | SRSPTO_256 | + SRBSYTO_29 | SRWDTO_29 | SCCSTO_29); + + /* CMD9 - Get CSD */ + sh_mmcif_boot_cmd(base, 0x09806000, 0x00010000); + + /* CMD7 - Select the card */ + sh_mmcif_boot_cmd(base, 0x07400000, 0x00010000); + + /* CMD16 - Set the block size */ + sh_mmcif_boot_cmd(base, 0x10400000, SH_MMCIF_BBS); + + for (k = 0; !ret && k < nr_blocks; k++) + ret = sh_mmcif_boot_do_read_single(base, first_block + k, + buf + (k * SH_MMCIF_BBS)); + + return ret; +} + +static inline void sh_mmcif_boot_init(void __iomem *base) +{ + /* reset */ + sh_mmcif_writel(base, MMCIF_CE_VERSION, SOFT_RST_ON); + sh_mmcif_writel(base, MMCIF_CE_VERSION, SOFT_RST_OFF); + + /* byte swap */ + sh_mmcif_writel(base, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP); + + /* Set block size in MMCIF hardware */ + sh_mmcif_writel(base, MMCIF_CE_BLOCK_SET, SH_MMCIF_BBS); + + /* Enable the clock, set it to Bus clock/256 (about 325Khz). */ + sh_mmcif_writel(base, MMCIF_CE_CLK_CTRL, + CLK_ENABLE | CLKDIV_256 | SRSPTO_256 | + SRBSYTO_29 | SRWDTO_29 | SCCSTO_29); + + /* CMD0 */ + sh_mmcif_boot_cmd(base, 0x00000040, 0); + + /* CMD1 - Get OCR */ + do { + sh_mmcif_boot_cmd(base, 0x01405040, 0x40300000); /* CMD1 */ + } while ((sh_mmcif_readl(base, MMCIF_CE_RESP0) & 0x80000000) + != 0x80000000); + + /* CMD2 - Get CID */ + sh_mmcif_boot_cmd(base, 0x02806040, 0); + + /* CMD3 - Set card relative address */ + sh_mmcif_boot_cmd(base, 0x03400040, 0x00010000); +} + +#endif /* LINUX_MMC_SH_MMCIF_H */ diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h new file mode 100644 index 000000000..06607c59c --- /dev/null +++ b/include/linux/mmc/slot-gpio.h @@ -0,0 +1,39 @@ +/* + * Generic GPIO card-detect helper header + * + * Copyright (C) 2011, Guennadi Liakhovetski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef MMC_SLOT_GPIO_H +#define MMC_SLOT_GPIO_H + +#include +#include + +struct mmc_host; + +int mmc_gpio_get_ro(struct mmc_host *host); +int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio); + +int mmc_gpio_get_cd(struct mmc_host *host); +int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio, + unsigned int debounce); + +int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, + unsigned int idx, bool override_active_level, + unsigned int debounce, bool *gpio_invert); +int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, + unsigned int idx, bool override_active_level, + unsigned int debounce, bool *gpio_invert); +void mmc_gpio_set_cd_isr(struct mmc_host *host, + irqreturn_t (*isr)(int irq, void *dev_id)); +int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on); +void mmc_gpiod_request_cd_irq(struct mmc_host *host); +bool mmc_can_gpio_cd(struct mmc_host *host); +bool mmc_can_gpio_ro(struct mmc_host *host); + +#endif diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h new file mode 100644 index 000000000..5d0767cb4 --- /dev/null +++ b/include/linux/mmdebug.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_MM_DEBUG_H +#define LINUX_MM_DEBUG_H 1 + +#include +#include + +struct page; +struct vm_area_struct; +struct mm_struct; + +extern void dump_page(struct page *page, const char *reason); +extern void __dump_page(struct page *page, const char *reason); +void dump_vma(const struct vm_area_struct *vma); +void dump_mm(const struct mm_struct *mm); + +#ifdef CONFIG_DEBUG_VM +#define VM_BUG_ON(cond) BUG_ON(cond) +#define VM_BUG_ON_PAGE(cond, page) \ + do { \ + if (unlikely(cond)) { \ + dump_page(page, "VM_BUG_ON_PAGE(" __stringify(cond)")");\ + BUG(); \ + } \ + } while (0) +#define VM_BUG_ON_VMA(cond, vma) \ + do { \ + if (unlikely(cond)) { \ + dump_vma(vma); \ + BUG(); \ + } \ + } while (0) +#define VM_BUG_ON_MM(cond, mm) \ + do { \ + if (unlikely(cond)) { \ + dump_mm(mm); \ + BUG(); \ + } \ + } while (0) +#define VM_WARN_ON_ONCE_PAGE(cond, page) ({ \ + static bool __section(".data.once") __warned; \ + int __ret_warn_once = !!(cond); \ + \ + if (unlikely(__ret_warn_once && !__warned)) { \ + dump_page(page, "VM_WARN_ON_ONCE_PAGE(" __stringify(cond)")");\ + __warned = true; \ + WARN_ON(1); \ + } \ + unlikely(__ret_warn_once); \ +}) + +#define VM_WARN_ON(cond) (void)WARN_ON(cond) +#define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond) +#define VM_WARN_ONCE(cond, format...) (void)WARN_ONCE(cond, format) +#define VM_WARN(cond, format...) (void)WARN(cond, format) +#else +#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) +#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond) +#define VM_BUG_ON_VMA(cond, vma) VM_BUG_ON(cond) +#define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond) +#define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond) +#define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond) +#define VM_WARN_ON_ONCE_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond) +#define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) +#define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond) +#endif + +#ifdef CONFIG_DEBUG_VIRTUAL +#define VIRTUAL_BUG_ON(cond) BUG_ON(cond) +#else +#define VIRTUAL_BUG_ON(cond) do { } while (0) +#endif + +#ifdef CONFIG_DEBUG_VM_PGFLAGS +#define VM_BUG_ON_PGFLAGS(cond, page) VM_BUG_ON_PAGE(cond, page) +#else +#define VM_BUG_ON_PGFLAGS(cond, page) BUILD_BUG_ON_INVALID(cond) +#endif + +#endif diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h new file mode 100644 index 000000000..882368498 --- /dev/null +++ b/include/linux/mmiotrace.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MMIOTRACE_H +#define _LINUX_MMIOTRACE_H + +#include +#include + +struct kmmio_probe; +struct pt_regs; + +typedef void (*kmmio_pre_handler_t)(struct kmmio_probe *, + struct pt_regs *, unsigned long addr); +typedef void (*kmmio_post_handler_t)(struct kmmio_probe *, + unsigned long condition, struct pt_regs *); + +struct kmmio_probe { + /* kmmio internal list: */ + struct list_head list; + /* start location of the probe point: */ + unsigned long addr; + /* length of the probe region: */ + unsigned long len; + /* Called before addr is executed: */ + kmmio_pre_handler_t pre_handler; + /* Called after addr is executed: */ + kmmio_post_handler_t post_handler; + void *private; +}; + +extern unsigned int kmmio_count; + +extern int register_kmmio_probe(struct kmmio_probe *p); +extern void unregister_kmmio_probe(struct kmmio_probe *p); +extern int kmmio_init(void); +extern void kmmio_cleanup(void); + +#ifdef CONFIG_MMIOTRACE +/* kmmio is active by some kmmio_probes? */ +static inline int is_kmmio_active(void) +{ + return kmmio_count; +} + +/* Called from page fault handler. */ +extern int kmmio_handler(struct pt_regs *regs, unsigned long addr); + +/* Called from ioremap.c */ +extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size, + void __iomem *addr); +extern void mmiotrace_iounmap(volatile void __iomem *addr); + +/* For anyone to insert markers. Remember trailing newline. */ +extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...); +#else /* !CONFIG_MMIOTRACE: */ +static inline int is_kmmio_active(void) +{ + return 0; +} + +static inline int kmmio_handler(struct pt_regs *regs, unsigned long addr) +{ + return 0; +} + +static inline void mmiotrace_ioremap(resource_size_t offset, + unsigned long size, void __iomem *addr) +{ +} + +static inline void mmiotrace_iounmap(volatile void __iomem *addr) +{ +} + +static inline __printf(1, 2) int mmiotrace_printk(const char *fmt, ...) +{ + return 0; +} +#endif /* CONFIG_MMIOTRACE */ + +enum mm_io_opcode { + MMIO_READ = 0x1, /* struct mmiotrace_rw */ + MMIO_WRITE = 0x2, /* struct mmiotrace_rw */ + MMIO_PROBE = 0x3, /* struct mmiotrace_map */ + MMIO_UNPROBE = 0x4, /* struct mmiotrace_map */ + MMIO_UNKNOWN_OP = 0x5, /* struct mmiotrace_rw */ +}; + +struct mmiotrace_rw { + resource_size_t phys; /* PCI address of register */ + unsigned long value; + unsigned long pc; /* optional program counter */ + int map_id; + unsigned char opcode; /* one of MMIO_{READ,WRITE,UNKNOWN_OP} */ + unsigned char width; /* size of register access in bytes */ +}; + +struct mmiotrace_map { + resource_size_t phys; /* base address in PCI space */ + unsigned long virt; /* base virtual address */ + unsigned long len; /* mapping size */ + int map_id; + unsigned char opcode; /* MMIO_PROBE or MMIO_UNPROBE */ +}; + +/* in kernel/trace/trace_mmiotrace.c */ +extern void enable_mmiotrace(void); +extern void disable_mmiotrace(void); +extern void mmio_trace_rw(struct mmiotrace_rw *rw); +extern void mmio_trace_mapping(struct mmiotrace_map *map); +extern __printf(1, 0) int mmio_trace_printk(const char *fmt, va_list args); + +#endif /* _LINUX_MMIOTRACE_H */ diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h new file mode 100644 index 000000000..d9a543a9e --- /dev/null +++ b/include/linux/mmu_context.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MMU_CONTEXT_H +#define _LINUX_MMU_CONTEXT_H + +#include + +struct mm_struct; + +void use_mm(struct mm_struct *mm); +void unuse_mm(struct mm_struct *mm); + +/* Architectures that care about IRQ state in switch_mm can override this. */ +#ifndef switch_mm_irqs_off +# define switch_mm_irqs_off switch_mm +#endif + +#endif diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h new file mode 100644 index 000000000..133ba7882 --- /dev/null +++ b/include/linux/mmu_notifier.h @@ -0,0 +1,520 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MMU_NOTIFIER_H +#define _LINUX_MMU_NOTIFIER_H + +#include +#include +#include +#include +#include + +struct mmu_notifier; +struct mmu_notifier_ops; + +/* mmu_notifier_ops flags */ +#define MMU_INVALIDATE_DOES_NOT_BLOCK (0x01) + +#ifdef CONFIG_MMU_NOTIFIER + +/* + * The mmu notifier_mm structure is allocated and installed in + * mm->mmu_notifier_mm inside the mm_take_all_locks() protected + * critical section and it's released only when mm_count reaches zero + * in mmdrop(). + */ +struct mmu_notifier_mm { + /* all mmu notifiers registerd in this mm are queued in this list */ + struct hlist_head list; + /* to serialize the list modifications and hlist_unhashed */ + spinlock_t lock; +}; + +struct mmu_notifier_ops { + /* + * Flags to specify behavior of callbacks for this MMU notifier. + * Used to determine which context an operation may be called. + * + * MMU_INVALIDATE_DOES_NOT_BLOCK: invalidate_range_* callbacks do not + * block + */ + int flags; + + /* + * Called either by mmu_notifier_unregister or when the mm is + * being destroyed by exit_mmap, always before all pages are + * freed. This can run concurrently with other mmu notifier + * methods (the ones invoked outside the mm context) and it + * should tear down all secondary mmu mappings and freeze the + * secondary mmu. If this method isn't implemented you've to + * be sure that nothing could possibly write to the pages + * through the secondary mmu by the time the last thread with + * tsk->mm == mm exits. + * + * As side note: the pages freed after ->release returns could + * be immediately reallocated by the gart at an alias physical + * address with a different cache model, so if ->release isn't + * implemented because all _software_ driven memory accesses + * through the secondary mmu are terminated by the time the + * last thread of this mm quits, you've also to be sure that + * speculative _hardware_ operations can't allocate dirty + * cachelines in the cpu that could not be snooped and made + * coherent with the other read and write operations happening + * through the gart alias address, so leading to memory + * corruption. + */ + void (*release)(struct mmu_notifier *mn, + struct mm_struct *mm); + + /* + * clear_flush_young is called after the VM is + * test-and-clearing the young/accessed bitflag in the + * pte. This way the VM will provide proper aging to the + * accesses to the page through the secondary MMUs and not + * only to the ones through the Linux pte. + * Start-end is necessary in case the secondary MMU is mapping the page + * at a smaller granularity than the primary MMU. + */ + int (*clear_flush_young)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, + unsigned long end); + + /* + * clear_young is a lightweight version of clear_flush_young. Like the + * latter, it is supposed to test-and-clear the young/accessed bitflag + * in the secondary pte, but it may omit flushing the secondary tlb. + */ + int (*clear_young)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, + unsigned long end); + + /* + * test_young is called to check the young/accessed bitflag in + * the secondary pte. This is used to know if the page is + * frequently used without actually clearing the flag or tearing + * down the secondary mapping on the page. + */ + int (*test_young)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address); + + /* + * change_pte is called in cases that pte mapping to page is changed: + * for example, when ksm remaps pte to point to a new shared page. + */ + void (*change_pte)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address, + pte_t pte); + + /* + * invalidate_range_start() and invalidate_range_end() must be + * paired and are called only when the mmap_sem and/or the + * locks protecting the reverse maps are held. If the subsystem + * can't guarantee that no additional references are taken to + * the pages in the range, it has to implement the + * invalidate_range() notifier to remove any references taken + * after invalidate_range_start(). + * + * Invalidation of multiple concurrent ranges may be + * optionally permitted by the driver. Either way the + * establishment of sptes is forbidden in the range passed to + * invalidate_range_begin/end for the whole duration of the + * invalidate_range_begin/end critical section. + * + * invalidate_range_start() is called when all pages in the + * range are still mapped and have at least a refcount of one. + * + * invalidate_range_end() is called when all pages in the + * range have been unmapped and the pages have been freed by + * the VM. + * + * The VM will remove the page table entries and potentially + * the page between invalidate_range_start() and + * invalidate_range_end(). If the page must not be freed + * because of pending I/O or other circumstances then the + * invalidate_range_start() callback (or the initial mapping + * by the driver) must make sure that the refcount is kept + * elevated. + * + * If the driver increases the refcount when the pages are + * initially mapped into an address space then either + * invalidate_range_start() or invalidate_range_end() may + * decrease the refcount. If the refcount is decreased on + * invalidate_range_start() then the VM can free pages as page + * table entries are removed. If the refcount is only + * droppped on invalidate_range_end() then the driver itself + * will drop the last refcount but it must take care to flush + * any secondary tlb before doing the final free on the + * page. Pages will no longer be referenced by the linux + * address space but may still be referenced by sptes until + * the last refcount is dropped. + * + * If blockable argument is set to false then the callback cannot + * sleep and has to return with -EAGAIN. 0 should be returned + * otherwise. + * + */ + int (*invalidate_range_start)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, unsigned long end, + bool blockable); + void (*invalidate_range_end)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, unsigned long end); + + /* + * invalidate_range() is either called between + * invalidate_range_start() and invalidate_range_end() when the + * VM has to free pages that where unmapped, but before the + * pages are actually freed, or outside of _start()/_end() when + * a (remote) TLB is necessary. + * + * If invalidate_range() is used to manage a non-CPU TLB with + * shared page-tables, it not necessary to implement the + * invalidate_range_start()/end() notifiers, as + * invalidate_range() alread catches the points in time when an + * external TLB range needs to be flushed. For more in depth + * discussion on this see Documentation/vm/mmu_notifier.rst + * + * Note that this function might be called with just a sub-range + * of what was passed to invalidate_range_start()/end(), if + * called between those functions. + * + * If this callback cannot block, and invalidate_range_{start,end} + * cannot block, mmu_notifier_ops.flags should have + * MMU_INVALIDATE_DOES_NOT_BLOCK set. + */ + void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm, + unsigned long start, unsigned long end); +}; + +/* + * The notifier chains are protected by mmap_sem and/or the reverse map + * semaphores. Notifier chains are only changed when all reverse maps and + * the mmap_sem locks are taken. + * + * Therefore notifier chains can only be traversed when either + * + * 1. mmap_sem is held. + * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem). + * 3. No other concurrent thread can access the list (release) + */ +struct mmu_notifier { + struct hlist_node hlist; + const struct mmu_notifier_ops *ops; +}; + +static inline int mm_has_notifiers(struct mm_struct *mm) +{ + return unlikely(mm->mmu_notifier_mm); +} + +extern int mmu_notifier_register(struct mmu_notifier *mn, + struct mm_struct *mm); +extern int __mmu_notifier_register(struct mmu_notifier *mn, + struct mm_struct *mm); +extern void mmu_notifier_unregister(struct mmu_notifier *mn, + struct mm_struct *mm); +extern void mmu_notifier_unregister_no_release(struct mmu_notifier *mn, + struct mm_struct *mm); +extern void __mmu_notifier_mm_destroy(struct mm_struct *mm); +extern void __mmu_notifier_release(struct mm_struct *mm); +extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm, + unsigned long start, + unsigned long end); +extern int __mmu_notifier_clear_young(struct mm_struct *mm, + unsigned long start, + unsigned long end); +extern int __mmu_notifier_test_young(struct mm_struct *mm, + unsigned long address); +extern void __mmu_notifier_change_pte(struct mm_struct *mm, + unsigned long address, pte_t pte); +extern int __mmu_notifier_invalidate_range_start(struct mm_struct *mm, + unsigned long start, unsigned long end, + bool blockable); +extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, + unsigned long start, unsigned long end, + bool only_end); +extern void __mmu_notifier_invalidate_range(struct mm_struct *mm, + unsigned long start, unsigned long end); +extern bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm); + +static inline void mmu_notifier_release(struct mm_struct *mm) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_release(mm); +} + +static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + if (mm_has_notifiers(mm)) + return __mmu_notifier_clear_flush_young(mm, start, end); + return 0; +} + +static inline int mmu_notifier_clear_young(struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + if (mm_has_notifiers(mm)) + return __mmu_notifier_clear_young(mm, start, end); + return 0; +} + +static inline int mmu_notifier_test_young(struct mm_struct *mm, + unsigned long address) +{ + if (mm_has_notifiers(mm)) + return __mmu_notifier_test_young(mm, address); + return 0; +} + +static inline void mmu_notifier_change_pte(struct mm_struct *mm, + unsigned long address, pte_t pte) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_change_pte(mm, address, pte); +} + +static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_invalidate_range_start(mm, start, end, true); +} + +static inline int mmu_notifier_invalidate_range_start_nonblock(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + if (mm_has_notifiers(mm)) + return __mmu_notifier_invalidate_range_start(mm, start, end, false); + return 0; +} + +static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_invalidate_range_end(mm, start, end, false); +} + +static inline void mmu_notifier_invalidate_range_only_end(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_invalidate_range_end(mm, start, end, true); +} + +static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_invalidate_range(mm, start, end); +} + +static inline void mmu_notifier_mm_init(struct mm_struct *mm) +{ + mm->mmu_notifier_mm = NULL; +} + +static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_mm_destroy(mm); +} + +#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \ +({ \ + int __young; \ + struct vm_area_struct *___vma = __vma; \ + unsigned long ___address = __address; \ + __young = ptep_clear_flush_young(___vma, ___address, __ptep); \ + __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ + ___address, \ + ___address + \ + PAGE_SIZE); \ + __young; \ +}) + +#define pmdp_clear_flush_young_notify(__vma, __address, __pmdp) \ +({ \ + int __young; \ + struct vm_area_struct *___vma = __vma; \ + unsigned long ___address = __address; \ + __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \ + __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ + ___address, \ + ___address + \ + PMD_SIZE); \ + __young; \ +}) + +#define ptep_clear_young_notify(__vma, __address, __ptep) \ +({ \ + int __young; \ + struct vm_area_struct *___vma = __vma; \ + unsigned long ___address = __address; \ + __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\ + __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \ + ___address + PAGE_SIZE); \ + __young; \ +}) + +#define pmdp_clear_young_notify(__vma, __address, __pmdp) \ +({ \ + int __young; \ + struct vm_area_struct *___vma = __vma; \ + unsigned long ___address = __address; \ + __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\ + __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \ + ___address + PMD_SIZE); \ + __young; \ +}) + +#define ptep_clear_flush_notify(__vma, __address, __ptep) \ +({ \ + unsigned long ___addr = __address & PAGE_MASK; \ + struct mm_struct *___mm = (__vma)->vm_mm; \ + pte_t ___pte; \ + \ + ___pte = ptep_clear_flush(__vma, __address, __ptep); \ + mmu_notifier_invalidate_range(___mm, ___addr, \ + ___addr + PAGE_SIZE); \ + \ + ___pte; \ +}) + +#define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \ +({ \ + unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \ + struct mm_struct *___mm = (__vma)->vm_mm; \ + pmd_t ___pmd; \ + \ + ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \ + mmu_notifier_invalidate_range(___mm, ___haddr, \ + ___haddr + HPAGE_PMD_SIZE); \ + \ + ___pmd; \ +}) + +#define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \ +({ \ + unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \ + struct mm_struct *___mm = (__vma)->vm_mm; \ + pud_t ___pud; \ + \ + ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \ + mmu_notifier_invalidate_range(___mm, ___haddr, \ + ___haddr + HPAGE_PUD_SIZE); \ + \ + ___pud; \ +}) + +/* + * set_pte_at_notify() sets the pte _after_ running the notifier. + * This is safe to start by updating the secondary MMUs, because the primary MMU + * pte invalidate must have already happened with a ptep_clear_flush() before + * set_pte_at_notify() has been invoked. Updating the secondary MMUs first is + * required when we change both the protection of the mapping from read-only to + * read-write and the pfn (like during copy on write page faults). Otherwise the + * old page would remain mapped readonly in the secondary MMUs after the new + * page is already writable by some CPU through the primary MMU. + */ +#define set_pte_at_notify(__mm, __address, __ptep, __pte) \ +({ \ + struct mm_struct *___mm = __mm; \ + unsigned long ___address = __address; \ + pte_t ___pte = __pte; \ + \ + mmu_notifier_change_pte(___mm, ___address, ___pte); \ + set_pte_at(___mm, ___address, __ptep, ___pte); \ +}) + +extern void mmu_notifier_call_srcu(struct rcu_head *rcu, + void (*func)(struct rcu_head *rcu)); +extern void mmu_notifier_synchronize(void); + +#else /* CONFIG_MMU_NOTIFIER */ + +static inline int mm_has_notifiers(struct mm_struct *mm) +{ + return 0; +} + +static inline void mmu_notifier_release(struct mm_struct *mm) +{ +} + +static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + return 0; +} + +static inline int mmu_notifier_test_young(struct mm_struct *mm, + unsigned long address) +{ + return 0; +} + +static inline void mmu_notifier_change_pte(struct mm_struct *mm, + unsigned long address, pte_t pte) +{ +} + +static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + +static inline int mmu_notifier_invalidate_range_start_nonblock(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + return 0; +} + +static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + +static inline void mmu_notifier_invalidate_range_only_end(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + +static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + +static inline bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm) +{ + return false; +} + +static inline void mmu_notifier_mm_init(struct mm_struct *mm) +{ +} + +static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) +{ +} + +#define ptep_clear_flush_young_notify ptep_clear_flush_young +#define pmdp_clear_flush_young_notify pmdp_clear_flush_young +#define ptep_clear_young_notify ptep_test_and_clear_young +#define pmdp_clear_young_notify pmdp_test_and_clear_young +#define ptep_clear_flush_notify ptep_clear_flush +#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush +#define pudp_huge_clear_flush_notify pudp_huge_clear_flush +#define set_pte_at_notify set_pte_at + +#endif /* CONFIG_MMU_NOTIFIER */ + +#endif /* _LINUX_MMU_NOTIFIER_H */ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h new file mode 100644 index 000000000..b4084b6ed --- /dev/null +++ b/include/linux/mmzone.h @@ -0,0 +1,1343 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MMZONE_H +#define _LINUX_MMZONE_H + +#ifndef __ASSEMBLY__ +#ifndef __GENERATING_BOUNDS_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Free memory management - zoned buddy allocator. */ +#ifndef CONFIG_FORCE_MAX_ZONEORDER +#define MAX_ORDER 11 +#else +#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER +#endif +#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) + +/* + * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed + * costly to service. That is between allocation orders which should + * coalesce naturally under reasonable reclaim pressure and those which + * will not. + */ +#define PAGE_ALLOC_COSTLY_ORDER 3 + +enum migratetype { + MIGRATE_UNMOVABLE, + MIGRATE_MOVABLE, + MIGRATE_RECLAIMABLE, + MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ + MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, +#ifdef CONFIG_CMA + /* + * MIGRATE_CMA migration type is designed to mimic the way + * ZONE_MOVABLE works. Only movable pages can be allocated + * from MIGRATE_CMA pageblocks and page allocator never + * implicitly change migration type of MIGRATE_CMA pageblock. + * + * The way to use it is to change migratetype of a range of + * pageblocks to MIGRATE_CMA which can be done by + * __free_pageblock_cma() function. What is important though + * is that a range of pageblocks must be aligned to + * MAX_ORDER_NR_PAGES should biggest page be bigger then + * a single pageblock. + */ + MIGRATE_CMA, +#endif +#ifdef CONFIG_MEMORY_ISOLATION + MIGRATE_ISOLATE, /* can't allocate from here */ +#endif + MIGRATE_TYPES +}; + +/* In mm/page_alloc.c; keep in sync also with show_migration_types() there */ +extern char * const migratetype_names[MIGRATE_TYPES]; + +#ifdef CONFIG_CMA +# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) +# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA) +#else +# define is_migrate_cma(migratetype) false +# define is_migrate_cma_page(_page) false +#endif + +static inline bool is_migrate_movable(int mt) +{ + return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE; +} + +#define for_each_migratetype_order(order, type) \ + for (order = 0; order < MAX_ORDER; order++) \ + for (type = 0; type < MIGRATE_TYPES; type++) + +extern int page_group_by_mobility_disabled; + +#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1) +#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1) + +#define get_pageblock_migratetype(page) \ + get_pfnblock_flags_mask(page, page_to_pfn(page), \ + PB_migrate_end, MIGRATETYPE_MASK) + +struct free_area { + struct list_head free_list[MIGRATE_TYPES]; + unsigned long nr_free; +}; + +struct pglist_data; + +/* + * zone->lock and the zone lru_lock are two of the hottest locks in the kernel. + * So add a wild amount of padding here to ensure that they fall into separate + * cachelines. There are very few zone structures in the machine, so space + * consumption is not a concern here. + */ +#if defined(CONFIG_SMP) +struct zone_padding { + char x[0]; +} ____cacheline_internodealigned_in_smp; +#define ZONE_PADDING(name) struct zone_padding name; +#else +#define ZONE_PADDING(name) +#endif + +#ifdef CONFIG_NUMA +enum numa_stat_item { + NUMA_HIT, /* allocated in intended node */ + NUMA_MISS, /* allocated in non intended node */ + NUMA_FOREIGN, /* was intended here, hit elsewhere */ + NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ + NUMA_LOCAL, /* allocation from local node */ + NUMA_OTHER, /* allocation from other node */ + NR_VM_NUMA_STAT_ITEMS +}; +#else +#define NR_VM_NUMA_STAT_ITEMS 0 +#endif + +enum zone_stat_item { + /* First 128 byte cacheline (assuming 64 bit words) */ + NR_FREE_PAGES, + NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */ + NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE, + NR_ZONE_ACTIVE_ANON, + NR_ZONE_INACTIVE_FILE, + NR_ZONE_ACTIVE_FILE, + NR_ZONE_UNEVICTABLE, + NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */ + NR_MLOCK, /* mlock()ed pages found and moved off LRU */ + NR_PAGETABLE, /* used for pagetables */ + NR_KERNEL_STACK_KB, /* measured in KiB */ + /* Second 128 byte cacheline */ + NR_BOUNCE, +#if IS_ENABLED(CONFIG_ZSMALLOC) + NR_ZSPAGES, /* allocated in zsmalloc */ +#endif + NR_FREE_CMA_PAGES, + NR_VM_ZONE_STAT_ITEMS }; + +enum node_stat_item { + NR_LRU_BASE, + NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ + NR_ACTIVE_ANON, /* " " " " " */ + NR_INACTIVE_FILE, /* " " " " " */ + NR_ACTIVE_FILE, /* " " " " " */ + NR_UNEVICTABLE, /* " " " " " */ + NR_SLAB_RECLAIMABLE, + NR_SLAB_UNRECLAIMABLE, + NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ + NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ + WORKINGSET_REFAULT, + WORKINGSET_ACTIVATE, + WORKINGSET_NODERECLAIM, + NR_ANON_MAPPED, /* Mapped anonymous pages */ + NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. + only modified from process context */ + NR_FILE_PAGES, + NR_FILE_DIRTY, + NR_WRITEBACK, + NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ + NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ + NR_SHMEM_THPS, + NR_SHMEM_PMDMAPPED, + NR_ANON_THPS, + NR_UNSTABLE_NFS, /* NFS unstable pages */ + NR_VMSCAN_WRITE, + NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ + NR_DIRTIED, /* page dirtyings since bootup */ + NR_WRITTEN, /* page writings since bootup */ + NR_INDIRECTLY_RECLAIMABLE_BYTES, /* measured in bytes */ + NR_VM_NODE_STAT_ITEMS +}; + +/* + * We do arithmetic on the LRU lists in various places in the code, + * so it is important to keep the active lists LRU_ACTIVE higher in + * the array than the corresponding inactive lists, and to keep + * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists. + * + * This has to be kept in sync with the statistics in zone_stat_item + * above and the descriptions in vmstat_text in mm/vmstat.c + */ +#define LRU_BASE 0 +#define LRU_ACTIVE 1 +#define LRU_FILE 2 + +enum lru_list { + LRU_INACTIVE_ANON = LRU_BASE, + LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, + LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, + LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, + LRU_UNEVICTABLE, + NR_LRU_LISTS +}; + +#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++) + +#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) + +static inline int is_file_lru(enum lru_list lru) +{ + return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); +} + +static inline int is_active_lru(enum lru_list lru) +{ + return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); +} + +struct zone_reclaim_stat { + /* + * The pageout code in vmscan.c keeps track of how many of the + * mem/swap backed and file backed pages are referenced. + * The higher the rotated/scanned ratio, the more valuable + * that cache is. + * + * The anon LRU stats live in [0], file LRU stats in [1] + */ + unsigned long recent_rotated[2]; + unsigned long recent_scanned[2]; +}; + +struct lruvec { + struct list_head lists[NR_LRU_LISTS]; + struct zone_reclaim_stat reclaim_stat; + /* Evictions & activations on the inactive file list */ + atomic_long_t inactive_age; + /* Refaults at the time of last reclaim cycle */ + unsigned long refaults; +#ifdef CONFIG_MEMCG + struct pglist_data *pgdat; +#endif +}; + +/* Mask used at gathering information at once (see memcontrol.c) */ +#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) +#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) +#define LRU_ALL ((1 << NR_LRU_LISTS) - 1) + +/* Isolate unmapped file */ +#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2) +/* Isolate for asynchronous migration */ +#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4) +/* Isolate unevictable pages */ +#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8) + +/* LRU Isolation modes. */ +typedef unsigned __bitwise isolate_mode_t; + +enum zone_watermarks { + WMARK_MIN, + WMARK_LOW, + WMARK_HIGH, + NR_WMARK +}; + +#define min_wmark_pages(z) (z->watermark[WMARK_MIN]) +#define low_wmark_pages(z) (z->watermark[WMARK_LOW]) +#define high_wmark_pages(z) (z->watermark[WMARK_HIGH]) + +struct per_cpu_pages { + int count; /* number of pages in the list */ + int high; /* high watermark, emptying needed */ + int batch; /* chunk size for buddy add/remove */ + + /* Lists of pages, one per migrate type stored on the pcp-lists */ + struct list_head lists[MIGRATE_PCPTYPES]; +}; + +struct per_cpu_pageset { + struct per_cpu_pages pcp; +#ifdef CONFIG_NUMA + s8 expire; + u16 vm_numa_stat_diff[NR_VM_NUMA_STAT_ITEMS]; +#endif +#ifdef CONFIG_SMP + s8 stat_threshold; + s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; +#endif +}; + +struct per_cpu_nodestat { + s8 stat_threshold; + s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS]; +}; + +#endif /* !__GENERATING_BOUNDS.H */ + +enum zone_type { +#ifdef CONFIG_ZONE_DMA + /* + * ZONE_DMA is used when there are devices that are not able + * to do DMA to all of addressable memory (ZONE_NORMAL). Then we + * carve out the portion of memory that is needed for these devices. + * The range is arch specific. + * + * Some examples + * + * Architecture Limit + * --------------------------- + * parisc, ia64, sparc <4G + * s390 <2G + * arm Various + * alpha Unlimited or 0-16MB. + * + * i386, x86_64 and multiple other arches + * <16M. + */ + ZONE_DMA, +#endif +#ifdef CONFIG_ZONE_DMA32 + /* + * x86_64 needs two ZONE_DMAs because it supports devices that are + * only able to do DMA to the lower 16M but also 32 bit devices that + * can only do DMA areas below 4G. + */ + ZONE_DMA32, +#endif + /* + * Normal addressable memory is in ZONE_NORMAL. DMA operations can be + * performed on pages in ZONE_NORMAL if the DMA devices support + * transfers to all addressable memory. + */ + ZONE_NORMAL, +#ifdef CONFIG_HIGHMEM + /* + * A memory area that is only addressable by the kernel through + * mapping portions into its own address space. This is for example + * used by i386 to allow the kernel to address the memory beyond + * 900MB. The kernel will set up special mappings (page + * table entries on i386) for each page that the kernel needs to + * access. + */ + ZONE_HIGHMEM, +#endif + ZONE_MOVABLE, +#ifdef CONFIG_ZONE_DEVICE + ZONE_DEVICE, +#endif + __MAX_NR_ZONES + +}; + +#ifndef __GENERATING_BOUNDS_H + +struct zone { + /* Read-mostly fields */ + + /* zone watermarks, access with *_wmark_pages(zone) macros */ + unsigned long watermark[NR_WMARK]; + + unsigned long nr_reserved_highatomic; + + /* + * We don't know if the memory that we're going to allocate will be + * freeable or/and it will be released eventually, so to avoid totally + * wasting several GB of ram we must reserve some of the lower zone + * memory (otherwise we risk to run OOM on the lower zones despite + * there being tons of freeable ram on the higher zones). This array is + * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl + * changes. + */ + long lowmem_reserve[MAX_NR_ZONES]; + +#ifdef CONFIG_NUMA + int node; +#endif + struct pglist_data *zone_pgdat; + struct per_cpu_pageset __percpu *pageset; + +#ifndef CONFIG_SPARSEMEM + /* + * Flags for a pageblock_nr_pages block. See pageblock-flags.h. + * In SPARSEMEM, this map is stored in struct mem_section + */ + unsigned long *pageblock_flags; +#endif /* CONFIG_SPARSEMEM */ + + /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ + unsigned long zone_start_pfn; + + /* + * spanned_pages is the total pages spanned by the zone, including + * holes, which is calculated as: + * spanned_pages = zone_end_pfn - zone_start_pfn; + * + * present_pages is physical pages existing within the zone, which + * is calculated as: + * present_pages = spanned_pages - absent_pages(pages in holes); + * + * managed_pages is present pages managed by the buddy system, which + * is calculated as (reserved_pages includes pages allocated by the + * bootmem allocator): + * managed_pages = present_pages - reserved_pages; + * + * So present_pages may be used by memory hotplug or memory power + * management logic to figure out unmanaged pages by checking + * (present_pages - managed_pages). And managed_pages should be used + * by page allocator and vm scanner to calculate all kinds of watermarks + * and thresholds. + * + * Locking rules: + * + * zone_start_pfn and spanned_pages are protected by span_seqlock. + * It is a seqlock because it has to be read outside of zone->lock, + * and it is done in the main allocator path. But, it is written + * quite infrequently. + * + * The span_seq lock is declared along with zone->lock because it is + * frequently read in proximity to zone->lock. It's good to + * give them a chance of being in the same cacheline. + * + * Write access to present_pages at runtime should be protected by + * mem_hotplug_begin/end(). Any reader who can't tolerant drift of + * present_pages should get_online_mems() to get a stable value. + * + * Read access to managed_pages should be safe because it's unsigned + * long. Write access to zone->managed_pages and totalram_pages are + * protected by managed_page_count_lock at runtime. Idealy only + * adjust_managed_page_count() should be used instead of directly + * touching zone->managed_pages and totalram_pages. + */ + unsigned long managed_pages; + unsigned long spanned_pages; + unsigned long present_pages; + + const char *name; + +#ifdef CONFIG_MEMORY_ISOLATION + /* + * Number of isolated pageblock. It is used to solve incorrect + * freepage counting problem due to racy retrieving migratetype + * of pageblock. Protected by zone->lock. + */ + unsigned long nr_isolate_pageblock; +#endif + +#ifdef CONFIG_MEMORY_HOTPLUG + /* see spanned/present_pages for more description */ + seqlock_t span_seqlock; +#endif + + int initialized; + + /* Write-intensive fields used from the page allocator */ + ZONE_PADDING(_pad1_) + + /* free areas of different sizes */ + struct free_area free_area[MAX_ORDER]; + + /* zone flags, see below */ + unsigned long flags; + + /* Primarily protects free_area */ + spinlock_t lock; + + /* Write-intensive fields used by compaction and vmstats. */ + ZONE_PADDING(_pad2_) + + /* + * When free pages are below this point, additional steps are taken + * when reading the number of free pages to avoid per-cpu counter + * drift allowing watermarks to be breached + */ + unsigned long percpu_drift_mark; + +#if defined CONFIG_COMPACTION || defined CONFIG_CMA + /* pfn where compaction free scanner should start */ + unsigned long compact_cached_free_pfn; + /* pfn where async and sync compaction migration scanner should start */ + unsigned long compact_cached_migrate_pfn[2]; +#endif + +#ifdef CONFIG_COMPACTION + /* + * On compaction failure, 1<zone_start_pfn + zone->spanned_pages; +} + +static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn) +{ + return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); +} + +static inline bool zone_is_initialized(struct zone *zone) +{ + return zone->initialized; +} + +static inline bool zone_is_empty(struct zone *zone) +{ + return zone->spanned_pages == 0; +} + +/* + * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty + * intersection with the given zone + */ +static inline bool zone_intersects(struct zone *zone, + unsigned long start_pfn, unsigned long nr_pages) +{ + if (zone_is_empty(zone)) + return false; + if (start_pfn >= zone_end_pfn(zone) || + start_pfn + nr_pages <= zone->zone_start_pfn) + return false; + + return true; +} + +/* + * The "priority" of VM scanning is how much of the queues we will scan in one + * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the + * queues ("queue_length >> 12") during an aging round. + */ +#define DEF_PRIORITY 12 + +/* Maximum number of zones on a zonelist */ +#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) + +enum { + ZONELIST_FALLBACK, /* zonelist with fallback */ +#ifdef CONFIG_NUMA + /* + * The NUMA zonelists are doubled because we need zonelists that + * restrict the allocations to a single node for __GFP_THISNODE. + */ + ZONELIST_NOFALLBACK, /* zonelist without fallback (__GFP_THISNODE) */ +#endif + MAX_ZONELISTS +}; + +/* + * This struct contains information about a zone in a zonelist. It is stored + * here to avoid dereferences into large structures and lookups of tables + */ +struct zoneref { + struct zone *zone; /* Pointer to actual zone */ + int zone_idx; /* zone_idx(zoneref->zone) */ +}; + +/* + * One allocation request operates on a zonelist. A zonelist + * is a list of zones, the first one is the 'goal' of the + * allocation, the other zones are fallback zones, in decreasing + * priority. + * + * To speed the reading of the zonelist, the zonerefs contain the zone index + * of the entry being read. Helper functions to access information given + * a struct zoneref are + * + * zonelist_zone() - Return the struct zone * for an entry in _zonerefs + * zonelist_zone_idx() - Return the index of the zone for an entry + * zonelist_node_idx() - Return the index of the node for an entry + */ +struct zonelist { + struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; +}; + +#ifndef CONFIG_DISCONTIGMEM +/* The array of struct pages - for discontigmem use pgdat->lmem_map */ +extern struct page *mem_map; +#endif + +/* + * On NUMA machines, each NUMA node would have a pg_data_t to describe + * it's memory layout. On UMA machines there is a single pglist_data which + * describes the whole memory. + * + * Memory statistics and page replacement data structures are maintained on a + * per-zone basis. + */ +struct bootmem_data; +typedef struct pglist_data { + struct zone node_zones[MAX_NR_ZONES]; + struct zonelist node_zonelists[MAX_ZONELISTS]; + int nr_zones; +#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ + struct page *node_mem_map; +#ifdef CONFIG_PAGE_EXTENSION + struct page_ext *node_page_ext; +#endif +#endif +#ifndef CONFIG_NO_BOOTMEM + struct bootmem_data *bdata; +#endif +#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) + /* + * Must be held any time you expect node_start_pfn, node_present_pages + * or node_spanned_pages stay constant. + * Also synchronizes pgdat->first_deferred_pfn during deferred page + * init. + * + * pgdat_resize_lock() and pgdat_resize_unlock() are provided to + * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG + * or CONFIG_DEFERRED_STRUCT_PAGE_INIT. + * + * Nests above zone->lock and zone->span_seqlock + */ + spinlock_t node_size_lock; +#endif + unsigned long node_start_pfn; + unsigned long node_present_pages; /* total number of physical pages */ + unsigned long node_spanned_pages; /* total size of physical page + range, including holes */ + int node_id; + wait_queue_head_t kswapd_wait; + wait_queue_head_t pfmemalloc_wait; + struct task_struct *kswapd; /* Protected by + mem_hotplug_begin/end() */ + int kswapd_order; + enum zone_type kswapd_classzone_idx; + + int kswapd_failures; /* Number of 'reclaimed == 0' runs */ + +#ifdef CONFIG_COMPACTION + int kcompactd_max_order; + enum zone_type kcompactd_classzone_idx; + wait_queue_head_t kcompactd_wait; + struct task_struct *kcompactd; +#endif + /* + * This is a per-node reserve of pages that are not available + * to userspace allocations. + */ + unsigned long totalreserve_pages; + +#ifdef CONFIG_NUMA + /* + * zone reclaim becomes active if more unmapped pages exist. + */ + unsigned long min_unmapped_pages; + unsigned long min_slab_pages; +#endif /* CONFIG_NUMA */ + + /* Write-intensive fields used by page reclaim */ + ZONE_PADDING(_pad1_) + spinlock_t lru_lock; + +#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT + /* + * If memory initialisation on large machines is deferred then this + * is the first PFN that needs to be initialised. + */ + unsigned long first_deferred_pfn; + /* Number of non-deferred pages */ + unsigned long static_init_pgcnt; +#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + spinlock_t split_queue_lock; + struct list_head split_queue; + unsigned long split_queue_len; +#endif + + /* Fields commonly accessed by the page reclaim scanner */ + struct lruvec lruvec; + + unsigned long flags; + + ZONE_PADDING(_pad2_) + + /* Per-node vmstats */ + struct per_cpu_nodestat __percpu *per_cpu_nodestats; + atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS]; +} pg_data_t; + +#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) +#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) +#ifdef CONFIG_FLAT_NODE_MEM_MAP +#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr)) +#else +#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr)) +#endif +#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) + +#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) +#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid)) +static inline spinlock_t *zone_lru_lock(struct zone *zone) +{ + return &zone->zone_pgdat->lru_lock; +} + +static inline struct lruvec *node_lruvec(struct pglist_data *pgdat) +{ + return &pgdat->lruvec; +} + +static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat) +{ + return pgdat->node_start_pfn + pgdat->node_spanned_pages; +} + +static inline bool pgdat_is_empty(pg_data_t *pgdat) +{ + return !pgdat->node_start_pfn && !pgdat->node_spanned_pages; +} + +#include + +void build_all_zonelists(pg_data_t *pgdat); +void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order, + enum zone_type classzone_idx); +bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, + int classzone_idx, unsigned int alloc_flags, + long free_pages); +bool zone_watermark_ok(struct zone *z, unsigned int order, + unsigned long mark, int classzone_idx, + unsigned int alloc_flags); +bool zone_watermark_ok_safe(struct zone *z, unsigned int order, + unsigned long mark, int classzone_idx); +/* + * Memory initialization context, use to differentiate memory added by + * the platform statically or via memory hotplug interface. + */ +enum meminit_context { + MEMINIT_EARLY, + MEMINIT_HOTPLUG, +}; + +extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, + unsigned long size); + +extern void lruvec_init(struct lruvec *lruvec); + +static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) +{ +#ifdef CONFIG_MEMCG + return lruvec->pgdat; +#else + return container_of(lruvec, struct pglist_data, lruvec); +#endif +} + +extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx); + +#ifdef CONFIG_HAVE_MEMORY_PRESENT +void memory_present(int nid, unsigned long start, unsigned long end); +#else +static inline void memory_present(int nid, unsigned long start, unsigned long end) {} +#endif + +#ifdef CONFIG_HAVE_MEMORYLESS_NODES +int local_memory_node(int node_id); +#else +static inline int local_memory_node(int node_id) { return node_id; }; +#endif + +/* + * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. + */ +#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) + +#ifdef CONFIG_ZONE_DEVICE +static inline bool is_dev_zone(const struct zone *zone) +{ + return zone_idx(zone) == ZONE_DEVICE; +} +#else +static inline bool is_dev_zone(const struct zone *zone) +{ + return false; +} +#endif + +/* + * Returns true if a zone has pages managed by the buddy allocator. + * All the reclaim decisions have to use this function rather than + * populated_zone(). If the whole zone is reserved then we can easily + * end up with populated_zone() && !managed_zone(). + */ +static inline bool managed_zone(struct zone *zone) +{ + return zone->managed_pages; +} + +/* Returns true if a zone has memory */ +static inline bool populated_zone(struct zone *zone) +{ + return zone->present_pages; +} + +#ifdef CONFIG_NUMA +static inline int zone_to_nid(struct zone *zone) +{ + return zone->node; +} + +static inline void zone_set_nid(struct zone *zone, int nid) +{ + zone->node = nid; +} +#else +static inline int zone_to_nid(struct zone *zone) +{ + return 0; +} + +static inline void zone_set_nid(struct zone *zone, int nid) {} +#endif + +extern int movable_zone; + +#ifdef CONFIG_HIGHMEM +static inline int zone_movable_is_highmem(void) +{ +#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP + return movable_zone == ZONE_HIGHMEM; +#else + return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM; +#endif +} +#endif + +static inline int is_highmem_idx(enum zone_type idx) +{ +#ifdef CONFIG_HIGHMEM + return (idx == ZONE_HIGHMEM || + (idx == ZONE_MOVABLE && zone_movable_is_highmem())); +#else + return 0; +#endif +} + +/** + * is_highmem - helper function to quickly check if a struct zone is a + * highmem zone or not. This is an attempt to keep references + * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. + * @zone - pointer to struct zone variable + */ +static inline int is_highmem(struct zone *zone) +{ +#ifdef CONFIG_HIGHMEM + return is_highmem_idx(zone_idx(zone)); +#else + return 0; +#endif +} + +/* These two functions are used to setup the per zone pages min values */ +struct ctl_table; +int min_free_kbytes_sysctl_handler(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES]; +int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, + void __user *, size_t *, loff_t *); + +extern int numa_zonelist_order_handler(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +extern char numa_zonelist_order[]; +#define NUMA_ZONELIST_ORDER_LEN 16 + +#ifndef CONFIG_NEED_MULTIPLE_NODES + +extern struct pglist_data contig_page_data; +#define NODE_DATA(nid) (&contig_page_data) +#define NODE_MEM_MAP(nid) mem_map + +#else /* CONFIG_NEED_MULTIPLE_NODES */ + +#include + +#endif /* !CONFIG_NEED_MULTIPLE_NODES */ + +extern struct pglist_data *first_online_pgdat(void); +extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); +extern struct zone *next_zone(struct zone *zone); + +/** + * for_each_online_pgdat - helper macro to iterate over all online nodes + * @pgdat - pointer to a pg_data_t variable + */ +#define for_each_online_pgdat(pgdat) \ + for (pgdat = first_online_pgdat(); \ + pgdat; \ + pgdat = next_online_pgdat(pgdat)) +/** + * for_each_zone - helper macro to iterate over all memory zones + * @zone - pointer to struct zone variable + * + * The user only needs to declare the zone variable, for_each_zone + * fills it in. + */ +#define for_each_zone(zone) \ + for (zone = (first_online_pgdat())->node_zones; \ + zone; \ + zone = next_zone(zone)) + +#define for_each_populated_zone(zone) \ + for (zone = (first_online_pgdat())->node_zones; \ + zone; \ + zone = next_zone(zone)) \ + if (!populated_zone(zone)) \ + ; /* do nothing */ \ + else + +static inline struct zone *zonelist_zone(struct zoneref *zoneref) +{ + return zoneref->zone; +} + +static inline int zonelist_zone_idx(struct zoneref *zoneref) +{ + return zoneref->zone_idx; +} + +static inline int zonelist_node_idx(struct zoneref *zoneref) +{ + return zone_to_nid(zoneref->zone); +} + +struct zoneref *__next_zones_zonelist(struct zoneref *z, + enum zone_type highest_zoneidx, + nodemask_t *nodes); + +/** + * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point + * @z - The cursor used as a starting point for the search + * @highest_zoneidx - The zone index of the highest zone to return + * @nodes - An optional nodemask to filter the zonelist with + * + * This function returns the next zone at or below a given zone index that is + * within the allowed nodemask using a cursor as the starting point for the + * search. The zoneref returned is a cursor that represents the current zone + * being examined. It should be advanced by one before calling + * next_zones_zonelist again. + */ +static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, + enum zone_type highest_zoneidx, + nodemask_t *nodes) +{ + if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx)) + return z; + return __next_zones_zonelist(z, highest_zoneidx, nodes); +} + +/** + * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist + * @zonelist - The zonelist to search for a suitable zone + * @highest_zoneidx - The zone index of the highest zone to return + * @nodes - An optional nodemask to filter the zonelist with + * @return - Zoneref pointer for the first suitable zone found (see below) + * + * This function returns the first zone at or below a given zone index that is + * within the allowed nodemask. The zoneref returned is a cursor that can be + * used to iterate the zonelist with next_zones_zonelist by advancing it by + * one before calling. + * + * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is + * never NULL). This may happen either genuinely, or due to concurrent nodemask + * update due to cpuset modification. + */ +static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, + enum zone_type highest_zoneidx, + nodemask_t *nodes) +{ + return next_zones_zonelist(zonelist->_zonerefs, + highest_zoneidx, nodes); +} + +/** + * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask + * @zone - The current zone in the iterator + * @z - The current pointer within zonelist->zones being iterated + * @zlist - The zonelist being iterated + * @highidx - The zone index of the highest zone to return + * @nodemask - Nodemask allowed by the allocator + * + * This iterator iterates though all zones at or below a given zone index and + * within a given nodemask + */ +#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ + for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \ + zone; \ + z = next_zones_zonelist(++z, highidx, nodemask), \ + zone = zonelist_zone(z)) + +#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ + for (zone = z->zone; \ + zone; \ + z = next_zones_zonelist(++z, highidx, nodemask), \ + zone = zonelist_zone(z)) + + +/** + * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index + * @zone - The current zone in the iterator + * @z - The current pointer within zonelist->zones being iterated + * @zlist - The zonelist being iterated + * @highidx - The zone index of the highest zone to return + * + * This iterator iterates though all zones at or below a given zone index. + */ +#define for_each_zone_zonelist(zone, z, zlist, highidx) \ + for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL) + +#ifdef CONFIG_SPARSEMEM +#include +#endif + +#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ + !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) +static inline unsigned long early_pfn_to_nid(unsigned long pfn) +{ + BUILD_BUG_ON(IS_ENABLED(CONFIG_NUMA)); + return 0; +} +#endif + +#ifdef CONFIG_FLATMEM +#define pfn_to_nid(pfn) (0) +#endif + +#ifdef CONFIG_SPARSEMEM + +/* + * SECTION_SHIFT #bits space required to store a section # + * + * PA_SECTION_SHIFT physical address to/from section number + * PFN_SECTION_SHIFT pfn to/from section number + */ +#define PA_SECTION_SHIFT (SECTION_SIZE_BITS) +#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT) + +#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT) + +#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) +#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) + +#define SECTION_BLOCKFLAGS_BITS \ + ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS) + +#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS +#error Allocator MAX_ORDER exceeds SECTION_SIZE +#endif + +static inline unsigned long pfn_to_section_nr(unsigned long pfn) +{ + return pfn >> PFN_SECTION_SHIFT; +} +static inline unsigned long section_nr_to_pfn(unsigned long sec) +{ + return sec << PFN_SECTION_SHIFT; +} + +#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) +#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) + +struct page; +struct page_ext; +struct mem_section { + /* + * This is, logically, a pointer to an array of struct + * pages. However, it is stored with some other magic. + * (see sparse.c::sparse_init_one_section()) + * + * Additionally during early boot we encode node id of + * the location of the section here to guide allocation. + * (see sparse.c::memory_present()) + * + * Making it a UL at least makes someone do a cast + * before using it wrong. + */ + unsigned long section_mem_map; + + /* See declaration of similar field in struct zone */ + unsigned long *pageblock_flags; +#ifdef CONFIG_PAGE_EXTENSION + /* + * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use + * section. (see page_ext.h about this.) + */ + struct page_ext *page_ext; + unsigned long pad; +#endif + /* + * WARNING: mem_section must be a power-of-2 in size for the + * calculation and use of SECTION_ROOT_MASK to make sense. + */ +}; + +#ifdef CONFIG_SPARSEMEM_EXTREME +#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section)) +#else +#define SECTIONS_PER_ROOT 1 +#endif + +#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) +#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT) +#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) + +#ifdef CONFIG_SPARSEMEM_EXTREME +extern struct mem_section **mem_section; +#else +extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; +#endif + +static inline struct mem_section *__nr_to_section(unsigned long nr) +{ + unsigned long root = SECTION_NR_TO_ROOT(nr); + + if (unlikely(root >= NR_SECTION_ROOTS)) + return NULL; + +#ifdef CONFIG_SPARSEMEM_EXTREME + if (!mem_section || !mem_section[root]) + return NULL; +#endif + return &mem_section[root][nr & SECTION_ROOT_MASK]; +} +extern int __section_nr(struct mem_section* ms); +extern unsigned long usemap_size(void); + +/* + * We use the lower bits of the mem_map pointer to store + * a little bit of information. The pointer is calculated + * as mem_map - section_nr_to_pfn(pnum). The result is + * aligned to the minimum alignment of the two values: + * 1. All mem_map arrays are page-aligned. + * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT + * lowest bits. PFN_SECTION_SHIFT is arch-specific + * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the + * worst combination is powerpc with 256k pages, + * which results in PFN_SECTION_SHIFT equal 6. + * To sum it up, at least 6 bits are available. + */ +#define SECTION_MARKED_PRESENT (1UL<<0) +#define SECTION_HAS_MEM_MAP (1UL<<1) +#define SECTION_IS_ONLINE (1UL<<2) +#define SECTION_MAP_LAST_BIT (1UL<<3) +#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) +#define SECTION_NID_SHIFT 3 + +static inline struct page *__section_mem_map_addr(struct mem_section *section) +{ + unsigned long map = section->section_mem_map; + map &= SECTION_MAP_MASK; + return (struct page *)map; +} + +static inline int present_section(struct mem_section *section) +{ + return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); +} + +static inline int present_section_nr(unsigned long nr) +{ + return present_section(__nr_to_section(nr)); +} + +static inline int valid_section(struct mem_section *section) +{ + return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); +} + +static inline int valid_section_nr(unsigned long nr) +{ + return valid_section(__nr_to_section(nr)); +} + +static inline int online_section(struct mem_section *section) +{ + return (section && (section->section_mem_map & SECTION_IS_ONLINE)); +} + +static inline int online_section_nr(unsigned long nr) +{ + return online_section(__nr_to_section(nr)); +} + +#ifdef CONFIG_MEMORY_HOTPLUG +void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn); +#ifdef CONFIG_MEMORY_HOTREMOVE +void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn); +#endif +#endif + +static inline struct mem_section *__pfn_to_section(unsigned long pfn) +{ + return __nr_to_section(pfn_to_section_nr(pfn)); +} + +extern int __highest_present_section_nr; + +#ifndef CONFIG_HAVE_ARCH_PFN_VALID +static inline int pfn_valid(unsigned long pfn) +{ + if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) + return 0; + return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); +} +#endif + +static inline int pfn_present(unsigned long pfn) +{ + if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) + return 0; + return present_section(__nr_to_section(pfn_to_section_nr(pfn))); +} + +/* + * These are _only_ used during initialisation, therefore they + * can use __initdata ... They could have names to indicate + * this restriction. + */ +#ifdef CONFIG_NUMA +#define pfn_to_nid(pfn) \ +({ \ + unsigned long __pfn_to_nid_pfn = (pfn); \ + page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \ +}) +#else +#define pfn_to_nid(pfn) (0) +#endif + +#define early_pfn_valid(pfn) pfn_valid(pfn) +void sparse_init(void); +#else +#define sparse_init() do {} while (0) +#define sparse_index_init(_sec, _nid) do {} while (0) +#endif /* CONFIG_SPARSEMEM */ + +/* + * During memory init memblocks map pfns to nids. The search is expensive and + * this caches recent lookups. The implementation of __early_pfn_to_nid + * may treat start/end as pfns or sections. + */ +struct mminit_pfnnid_cache { + unsigned long last_start; + unsigned long last_end; + int last_nid; +}; + +#ifndef early_pfn_valid +#define early_pfn_valid(pfn) (1) +#endif + +void memory_present(int nid, unsigned long start, unsigned long end); + +/* + * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we + * need to check pfn validility within that MAX_ORDER_NR_PAGES block. + * pfn_valid_within() should be used in this case; we optimise this away + * when we have no holes within a MAX_ORDER_NR_PAGES block. + */ +#ifdef CONFIG_HOLES_IN_ZONE +#define pfn_valid_within(pfn) pfn_valid(pfn) +#else +#define pfn_valid_within(pfn) (1) +#endif + +#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL +/* + * pfn_valid() is meant to be able to tell if a given PFN has valid memmap + * associated with it or not. This means that a struct page exists for this + * pfn. The caller cannot assume the page is fully initialized in general. + * Hotplugable pages might not have been onlined yet. pfn_to_online_page() + * will ensure the struct page is fully online and initialized. Special pages + * (e.g. ZONE_DEVICE) are never onlined and should be treated accordingly. + * + * In FLATMEM, it is expected that holes always have valid memmap as long as + * there is valid PFNs either side of the hole. In SPARSEMEM, it is assumed + * that a valid section has a memmap for the entire section. + * + * However, an ARM, and maybe other embedded architectures in the future + * free memmap backing holes to save memory on the assumption the memmap is + * never used. The page_zone linkages are then broken even though pfn_valid() + * returns true. A walker of the full memmap must then do this additional + * check to ensure the memmap they are looking at is sane by making sure + * the zone and PFN linkages are still valid. This is expensive, but walkers + * of the full memmap are extremely rare. + */ +bool memmap_valid_within(unsigned long pfn, + struct page *page, struct zone *zone); +#else +static inline bool memmap_valid_within(unsigned long pfn, + struct page *page, struct zone *zone) +{ + return true; +} +#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ + +#endif /* !__GENERATING_BOUNDS.H */ +#endif /* !__ASSEMBLY__ */ +#endif /* _LINUX_MMZONE_H */ diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h new file mode 100644 index 000000000..35942084c --- /dev/null +++ b/include/linux/mnt_namespace.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _NAMESPACE_H_ +#define _NAMESPACE_H_ +#ifdef __KERNEL__ + +struct mnt_namespace; +struct fs_struct; +struct user_namespace; + +extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *, + struct user_namespace *, struct fs_struct *); +extern void put_mnt_ns(struct mnt_namespace *ns); + +extern const struct file_operations proc_mounts_operations; +extern const struct file_operations proc_mountinfo_operations; +extern const struct file_operations proc_mountstats_operations; + +#endif +#endif diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h new file mode 100644 index 000000000..610cdf808 --- /dev/null +++ b/include/linux/mod_devicetable.h @@ -0,0 +1,771 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Device tables which are exported to userspace via + * scripts/mod/file2alias.c. You must keep that file in sync with this + * header. + */ + +#ifndef LINUX_MOD_DEVICETABLE_H +#define LINUX_MOD_DEVICETABLE_H + +#ifdef __KERNEL__ +#include +#include +typedef unsigned long kernel_ulong_t; +#endif + +#define PCI_ANY_ID (~0) + +struct pci_device_id { + __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/ + __u32 subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */ + __u32 class, class_mask; /* (class,subclass,prog-if) triplet */ + kernel_ulong_t driver_data; /* Data private to the driver */ +}; + + +#define IEEE1394_MATCH_VENDOR_ID 0x0001 +#define IEEE1394_MATCH_MODEL_ID 0x0002 +#define IEEE1394_MATCH_SPECIFIER_ID 0x0004 +#define IEEE1394_MATCH_VERSION 0x0008 + +struct ieee1394_device_id { + __u32 match_flags; + __u32 vendor_id; + __u32 model_id; + __u32 specifier_id; + __u32 version; + kernel_ulong_t driver_data; +}; + + +/* + * Device table entry for "new style" table-driven USB drivers. + * User mode code can read these tables to choose which modules to load. + * Declare the table as a MODULE_DEVICE_TABLE. + * + * A probe() parameter will point to a matching entry from this table. + * Use the driver_info field for each match to hold information tied + * to that match: device quirks, etc. + * + * Terminate the driver's table with an all-zeroes entry. + * Use the flag values to control which fields are compared. + */ + +/** + * struct usb_device_id - identifies USB devices for probing and hotplugging + * @match_flags: Bit mask controlling which of the other fields are used to + * match against new devices. Any field except for driver_info may be + * used, although some only make sense in conjunction with other fields. + * This is usually set by a USB_DEVICE_*() macro, which sets all + * other fields in this structure except for driver_info. + * @idVendor: USB vendor ID for a device; numbers are assigned + * by the USB forum to its members. + * @idProduct: Vendor-assigned product ID. + * @bcdDevice_lo: Low end of range of vendor-assigned product version numbers. + * This is also used to identify individual product versions, for + * a range consisting of a single device. + * @bcdDevice_hi: High end of version number range. The range of product + * versions is inclusive. + * @bDeviceClass: Class of device; numbers are assigned + * by the USB forum. Products may choose to implement classes, + * or be vendor-specific. Device classes specify behavior of all + * the interfaces on a device. + * @bDeviceSubClass: Subclass of device; associated with bDeviceClass. + * @bDeviceProtocol: Protocol of device; associated with bDeviceClass. + * @bInterfaceClass: Class of interface; numbers are assigned + * by the USB forum. Products may choose to implement classes, + * or be vendor-specific. Interface classes specify behavior only + * of a given interface; other interfaces may support other classes. + * @bInterfaceSubClass: Subclass of interface; associated with bInterfaceClass. + * @bInterfaceProtocol: Protocol of interface; associated with bInterfaceClass. + * @bInterfaceNumber: Number of interface; composite devices may use + * fixed interface numbers to differentiate between vendor-specific + * interfaces. + * @driver_info: Holds information used by the driver. Usually it holds + * a pointer to a descriptor understood by the driver, or perhaps + * device flags. + * + * In most cases, drivers will create a table of device IDs by using + * USB_DEVICE(), or similar macros designed for that purpose. + * They will then export it to userspace using MODULE_DEVICE_TABLE(), + * and provide it to the USB core through their usb_driver structure. + * + * See the usb_match_id() function for information about how matches are + * performed. Briefly, you will normally use one of several macros to help + * construct these entries. Each entry you provide will either identify + * one or more specific products, or will identify a class of products + * which have agreed to behave the same. You should put the more specific + * matches towards the beginning of your table, so that driver_info can + * record quirks of specific products. + */ +struct usb_device_id { + /* which fields to match against? */ + __u16 match_flags; + + /* Used for product specific matches; range is inclusive */ + __u16 idVendor; + __u16 idProduct; + __u16 bcdDevice_lo; + __u16 bcdDevice_hi; + + /* Used for device class matches */ + __u8 bDeviceClass; + __u8 bDeviceSubClass; + __u8 bDeviceProtocol; + + /* Used for interface class matches */ + __u8 bInterfaceClass; + __u8 bInterfaceSubClass; + __u8 bInterfaceProtocol; + + /* Used for vendor-specific interface matches */ + __u8 bInterfaceNumber; + + /* not matched against */ + kernel_ulong_t driver_info + __attribute__((aligned(sizeof(kernel_ulong_t)))); +}; + +/* Some useful macros to use to create struct usb_device_id */ +#define USB_DEVICE_ID_MATCH_VENDOR 0x0001 +#define USB_DEVICE_ID_MATCH_PRODUCT 0x0002 +#define USB_DEVICE_ID_MATCH_DEV_LO 0x0004 +#define USB_DEVICE_ID_MATCH_DEV_HI 0x0008 +#define USB_DEVICE_ID_MATCH_DEV_CLASS 0x0010 +#define USB_DEVICE_ID_MATCH_DEV_SUBCLASS 0x0020 +#define USB_DEVICE_ID_MATCH_DEV_PROTOCOL 0x0040 +#define USB_DEVICE_ID_MATCH_INT_CLASS 0x0080 +#define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100 +#define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200 +#define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400 + +#define HID_ANY_ID (~0) +#define HID_BUS_ANY 0xffff +#define HID_GROUP_ANY 0x0000 + +struct hid_device_id { + __u16 bus; + __u16 group; + __u32 vendor; + __u32 product; + kernel_ulong_t driver_data; +}; + +/* s390 CCW devices */ +struct ccw_device_id { + __u16 match_flags; /* which fields to match against */ + + __u16 cu_type; /* control unit type */ + __u16 dev_type; /* device type */ + __u8 cu_model; /* control unit model */ + __u8 dev_model; /* device model */ + + kernel_ulong_t driver_info; +}; + +#define CCW_DEVICE_ID_MATCH_CU_TYPE 0x01 +#define CCW_DEVICE_ID_MATCH_CU_MODEL 0x02 +#define CCW_DEVICE_ID_MATCH_DEVICE_TYPE 0x04 +#define CCW_DEVICE_ID_MATCH_DEVICE_MODEL 0x08 + +/* s390 AP bus devices */ +struct ap_device_id { + __u16 match_flags; /* which fields to match against */ + __u8 dev_type; /* device type */ + kernel_ulong_t driver_info; +}; + +#define AP_DEVICE_ID_MATCH_CARD_TYPE 0x01 +#define AP_DEVICE_ID_MATCH_QUEUE_TYPE 0x02 + +/* s390 css bus devices (subchannels) */ +struct css_device_id { + __u8 match_flags; + __u8 type; /* subchannel type */ + kernel_ulong_t driver_data; +}; + +#define ACPI_ID_LEN 9 + +struct acpi_device_id { + __u8 id[ACPI_ID_LEN]; + kernel_ulong_t driver_data; + __u32 cls; + __u32 cls_msk; +}; + +#define PNP_ID_LEN 8 +#define PNP_MAX_DEVICES 8 + +struct pnp_device_id { + __u8 id[PNP_ID_LEN]; + kernel_ulong_t driver_data; +}; + +struct pnp_card_device_id { + __u8 id[PNP_ID_LEN]; + kernel_ulong_t driver_data; + struct { + __u8 id[PNP_ID_LEN]; + } devs[PNP_MAX_DEVICES]; +}; + + +#define SERIO_ANY 0xff + +struct serio_device_id { + __u8 type; + __u8 extra; + __u8 id; + __u8 proto; +}; + +struct hda_device_id { + __u32 vendor_id; + __u32 rev_id; + __u8 api_version; + const char *name; + unsigned long driver_data; +}; + +struct sdw_device_id { + __u16 mfg_id; + __u16 part_id; + kernel_ulong_t driver_data; +}; + +/* + * Struct used for matching a device + */ +struct of_device_id { + char name[32]; + char type[32]; + char compatible[128]; + const void *data; +}; + +/* VIO */ +struct vio_device_id { + char type[32]; + char compat[32]; +}; + +/* PCMCIA */ + +struct pcmcia_device_id { + __u16 match_flags; + + __u16 manf_id; + __u16 card_id; + + __u8 func_id; + + /* for real multi-function devices */ + __u8 function; + + /* for pseudo multi-function devices */ + __u8 device_no; + + __u32 prod_id_hash[4]; + + /* not matched against in kernelspace */ + const char * prod_id[4]; + + /* not matched against */ + kernel_ulong_t driver_info; + char * cisfile; +}; + +#define PCMCIA_DEV_ID_MATCH_MANF_ID 0x0001 +#define PCMCIA_DEV_ID_MATCH_CARD_ID 0x0002 +#define PCMCIA_DEV_ID_MATCH_FUNC_ID 0x0004 +#define PCMCIA_DEV_ID_MATCH_FUNCTION 0x0008 +#define PCMCIA_DEV_ID_MATCH_PROD_ID1 0x0010 +#define PCMCIA_DEV_ID_MATCH_PROD_ID2 0x0020 +#define PCMCIA_DEV_ID_MATCH_PROD_ID3 0x0040 +#define PCMCIA_DEV_ID_MATCH_PROD_ID4 0x0080 +#define PCMCIA_DEV_ID_MATCH_DEVICE_NO 0x0100 +#define PCMCIA_DEV_ID_MATCH_FAKE_CIS 0x0200 +#define PCMCIA_DEV_ID_MATCH_ANONYMOUS 0x0400 + +/* Input */ +#define INPUT_DEVICE_ID_EV_MAX 0x1f +#define INPUT_DEVICE_ID_KEY_MIN_INTERESTING 0x71 +#define INPUT_DEVICE_ID_KEY_MAX 0x2ff +#define INPUT_DEVICE_ID_REL_MAX 0x0f +#define INPUT_DEVICE_ID_ABS_MAX 0x3f +#define INPUT_DEVICE_ID_MSC_MAX 0x07 +#define INPUT_DEVICE_ID_LED_MAX 0x0f +#define INPUT_DEVICE_ID_SND_MAX 0x07 +#define INPUT_DEVICE_ID_FF_MAX 0x7f +#define INPUT_DEVICE_ID_SW_MAX 0x10 +#define INPUT_DEVICE_ID_PROP_MAX 0x1f + +#define INPUT_DEVICE_ID_MATCH_BUS 1 +#define INPUT_DEVICE_ID_MATCH_VENDOR 2 +#define INPUT_DEVICE_ID_MATCH_PRODUCT 4 +#define INPUT_DEVICE_ID_MATCH_VERSION 8 + +#define INPUT_DEVICE_ID_MATCH_EVBIT 0x0010 +#define INPUT_DEVICE_ID_MATCH_KEYBIT 0x0020 +#define INPUT_DEVICE_ID_MATCH_RELBIT 0x0040 +#define INPUT_DEVICE_ID_MATCH_ABSBIT 0x0080 +#define INPUT_DEVICE_ID_MATCH_MSCIT 0x0100 +#define INPUT_DEVICE_ID_MATCH_LEDBIT 0x0200 +#define INPUT_DEVICE_ID_MATCH_SNDBIT 0x0400 +#define INPUT_DEVICE_ID_MATCH_FFBIT 0x0800 +#define INPUT_DEVICE_ID_MATCH_SWBIT 0x1000 +#define INPUT_DEVICE_ID_MATCH_PROPBIT 0x2000 + +struct input_device_id { + + kernel_ulong_t flags; + + __u16 bustype; + __u16 vendor; + __u16 product; + __u16 version; + + kernel_ulong_t evbit[INPUT_DEVICE_ID_EV_MAX / BITS_PER_LONG + 1]; + kernel_ulong_t keybit[INPUT_DEVICE_ID_KEY_MAX / BITS_PER_LONG + 1]; + kernel_ulong_t relbit[INPUT_DEVICE_ID_REL_MAX / BITS_PER_LONG + 1]; + kernel_ulong_t absbit[INPUT_DEVICE_ID_ABS_MAX / BITS_PER_LONG + 1]; + kernel_ulong_t mscbit[INPUT_DEVICE_ID_MSC_MAX / BITS_PER_LONG + 1]; + kernel_ulong_t ledbit[INPUT_DEVICE_ID_LED_MAX / BITS_PER_LONG + 1]; + kernel_ulong_t sndbit[INPUT_DEVICE_ID_SND_MAX / BITS_PER_LONG + 1]; + kernel_ulong_t ffbit[INPUT_DEVICE_ID_FF_MAX / BITS_PER_LONG + 1]; + kernel_ulong_t swbit[INPUT_DEVICE_ID_SW_MAX / BITS_PER_LONG + 1]; + kernel_ulong_t propbit[INPUT_DEVICE_ID_PROP_MAX / BITS_PER_LONG + 1]; + + kernel_ulong_t driver_info; +}; + +/* EISA */ + +#define EISA_SIG_LEN 8 + +/* The EISA signature, in ASCII form, null terminated */ +struct eisa_device_id { + char sig[EISA_SIG_LEN]; + kernel_ulong_t driver_data; +}; + +#define EISA_DEVICE_MODALIAS_FMT "eisa:s%s" + +struct parisc_device_id { + __u8 hw_type; /* 5 bits used */ + __u8 hversion_rev; /* 4 bits */ + __u16 hversion; /* 12 bits */ + __u32 sversion; /* 20 bits */ +}; + +#define PA_HWTYPE_ANY_ID 0xff +#define PA_HVERSION_REV_ANY_ID 0xff +#define PA_HVERSION_ANY_ID 0xffff +#define PA_SVERSION_ANY_ID 0xffffffff + +/* SDIO */ + +#define SDIO_ANY_ID (~0) + +struct sdio_device_id { + __u8 class; /* Standard interface or SDIO_ANY_ID */ + __u16 vendor; /* Vendor or SDIO_ANY_ID */ + __u16 device; /* Device ID or SDIO_ANY_ID */ + kernel_ulong_t driver_data; /* Data private to the driver */ +}; + +/* SSB core, see drivers/ssb/ */ +struct ssb_device_id { + __u16 vendor; + __u16 coreid; + __u8 revision; + __u8 __pad; +} __attribute__((packed, aligned(2))); +#define SSB_DEVICE(_vendor, _coreid, _revision) \ + { .vendor = _vendor, .coreid = _coreid, .revision = _revision, } + +#define SSB_ANY_VENDOR 0xFFFF +#define SSB_ANY_ID 0xFFFF +#define SSB_ANY_REV 0xFF + +/* Broadcom's specific AMBA core, see drivers/bcma/ */ +struct bcma_device_id { + __u16 manuf; + __u16 id; + __u8 rev; + __u8 class; +} __attribute__((packed,aligned(2))); +#define BCMA_CORE(_manuf, _id, _rev, _class) \ + { .manuf = _manuf, .id = _id, .rev = _rev, .class = _class, } + +#define BCMA_ANY_MANUF 0xFFFF +#define BCMA_ANY_ID 0xFFFF +#define BCMA_ANY_REV 0xFF +#define BCMA_ANY_CLASS 0xFF + +struct virtio_device_id { + __u32 device; + __u32 vendor; +}; +#define VIRTIO_DEV_ANY_ID 0xffffffff + +/* + * For Hyper-V devices we use the device guid as the id. + */ +struct hv_vmbus_device_id { + uuid_le guid; + kernel_ulong_t driver_data; /* Data private to the driver */ +}; + +/* rpmsg */ + +#define RPMSG_NAME_SIZE 32 +#define RPMSG_DEVICE_MODALIAS_FMT "rpmsg:%s" + +struct rpmsg_device_id { + char name[RPMSG_NAME_SIZE]; +}; + +/* i2c */ + +#define I2C_NAME_SIZE 20 +#define I2C_MODULE_PREFIX "i2c:" + +struct i2c_device_id { + char name[I2C_NAME_SIZE]; + kernel_ulong_t driver_data; /* Data private to the driver */ +}; + +/* pci_epf */ + +#define PCI_EPF_NAME_SIZE 20 +#define PCI_EPF_MODULE_PREFIX "pci_epf:" + +struct pci_epf_device_id { + char name[PCI_EPF_NAME_SIZE]; + kernel_ulong_t driver_data; +}; + +/* spi */ + +#define SPI_NAME_SIZE 32 +#define SPI_MODULE_PREFIX "spi:" + +struct spi_device_id { + char name[SPI_NAME_SIZE]; + kernel_ulong_t driver_data; /* Data private to the driver */ +}; + +/* SLIMbus */ + +#define SLIMBUS_NAME_SIZE 32 +#define SLIMBUS_MODULE_PREFIX "slim:" + +struct slim_device_id { + __u16 manf_id, prod_code; + __u16 dev_index, instance; + + /* Data private to the driver */ + kernel_ulong_t driver_data; +}; + +#define APR_NAME_SIZE 32 +#define APR_MODULE_PREFIX "apr:" + +struct apr_device_id { + char name[APR_NAME_SIZE]; + __u32 domain_id; + __u32 svc_id; + __u32 svc_version; + kernel_ulong_t driver_data; /* Data private to the driver */ +}; + +#define SPMI_NAME_SIZE 32 +#define SPMI_MODULE_PREFIX "spmi:" + +struct spmi_device_id { + char name[SPMI_NAME_SIZE]; + kernel_ulong_t driver_data; /* Data private to the driver */ +}; + +/* dmi */ +enum dmi_field { + DMI_NONE, + DMI_BIOS_VENDOR, + DMI_BIOS_VERSION, + DMI_BIOS_DATE, + DMI_SYS_VENDOR, + DMI_PRODUCT_NAME, + DMI_PRODUCT_VERSION, + DMI_PRODUCT_SERIAL, + DMI_PRODUCT_UUID, + DMI_PRODUCT_SKU, + DMI_PRODUCT_FAMILY, + DMI_BOARD_VENDOR, + DMI_BOARD_NAME, + DMI_BOARD_VERSION, + DMI_BOARD_SERIAL, + DMI_BOARD_ASSET_TAG, + DMI_CHASSIS_VENDOR, + DMI_CHASSIS_TYPE, + DMI_CHASSIS_VERSION, + DMI_CHASSIS_SERIAL, + DMI_CHASSIS_ASSET_TAG, + DMI_STRING_MAX, + DMI_OEM_STRING, /* special case - will not be in dmi_ident */ +}; + +struct dmi_strmatch { + unsigned char slot:7; + unsigned char exact_match:1; + char substr[79]; +}; + +struct dmi_system_id { + int (*callback)(const struct dmi_system_id *); + const char *ident; + struct dmi_strmatch matches[4]; + void *driver_data; +}; +/* + * struct dmi_device_id appears during expansion of + * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it + * but this is enough for gcc 3.4.6 to error out: + * error: storage size of '__mod_dmi_device_table' isn't known + */ +#define dmi_device_id dmi_system_id + +#define DMI_MATCH(a, b) { .slot = a, .substr = b } +#define DMI_EXACT_MATCH(a, b) { .slot = a, .substr = b, .exact_match = 1 } + +#define PLATFORM_NAME_SIZE 20 +#define PLATFORM_MODULE_PREFIX "platform:" + +struct platform_device_id { + char name[PLATFORM_NAME_SIZE]; + kernel_ulong_t driver_data; +}; + +#define MDIO_NAME_SIZE 32 +#define MDIO_MODULE_PREFIX "mdio:" + +#define MDIO_ID_FMT "%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u" +#define MDIO_ID_ARGS(_id) \ + ((_id)>>31) & 1, ((_id)>>30) & 1, ((_id)>>29) & 1, ((_id)>>28) & 1, \ + ((_id)>>27) & 1, ((_id)>>26) & 1, ((_id)>>25) & 1, ((_id)>>24) & 1, \ + ((_id)>>23) & 1, ((_id)>>22) & 1, ((_id)>>21) & 1, ((_id)>>20) & 1, \ + ((_id)>>19) & 1, ((_id)>>18) & 1, ((_id)>>17) & 1, ((_id)>>16) & 1, \ + ((_id)>>15) & 1, ((_id)>>14) & 1, ((_id)>>13) & 1, ((_id)>>12) & 1, \ + ((_id)>>11) & 1, ((_id)>>10) & 1, ((_id)>>9) & 1, ((_id)>>8) & 1, \ + ((_id)>>7) & 1, ((_id)>>6) & 1, ((_id)>>5) & 1, ((_id)>>4) & 1, \ + ((_id)>>3) & 1, ((_id)>>2) & 1, ((_id)>>1) & 1, (_id) & 1 + +/** + * struct mdio_device_id - identifies PHY devices on an MDIO/MII bus + * @phy_id: The result of + * (mdio_read(&MII_PHYSID1) << 16 | mdio_read(&PHYSID2)) & @phy_id_mask + * for this PHY type + * @phy_id_mask: Defines the significant bits of @phy_id. A value of 0 + * is used to terminate an array of struct mdio_device_id. + */ +struct mdio_device_id { + __u32 phy_id; + __u32 phy_id_mask; +}; + +struct zorro_device_id { + __u32 id; /* Device ID or ZORRO_WILDCARD */ + kernel_ulong_t driver_data; /* Data private to the driver */ +}; + +#define ZORRO_WILDCARD (0xffffffff) /* not official */ + +#define ZORRO_DEVICE_MODALIAS_FMT "zorro:i%08X" + +#define ISAPNP_ANY_ID 0xffff +struct isapnp_device_id { + unsigned short card_vendor, card_device; + unsigned short vendor, function; + kernel_ulong_t driver_data; /* data private to the driver */ +}; + +/** + * struct amba_id - identifies a device on an AMBA bus + * @id: The significant bits if the hardware device ID + * @mask: Bitmask specifying which bits of the id field are significant when + * matching. A driver binds to a device when ((hardware device ID) & mask) + * == id. + * @data: Private data used by the driver. + */ +struct amba_id { + unsigned int id; + unsigned int mask; + void *data; +}; + +/** + * struct mips_cdmm_device_id - identifies devices in MIPS CDMM bus + * @type: Device type identifier. + */ +struct mips_cdmm_device_id { + __u8 type; +}; + +/* + * Match x86 CPUs for CPU specific drivers. + * See documentation of "x86_match_cpu" for details. + */ + +/* + * MODULE_DEVICE_TABLE expects this struct to be called x86cpu_device_id. + * Although gcc seems to ignore this error, clang fails without this define. + * + * Note: The ordering of the struct is different from upstream because the + * static initializers in kernels < 5.7 still use C89 style while upstream + * has been converted to proper C99 initializers. + */ +#define x86cpu_device_id x86_cpu_id +struct x86_cpu_id { + __u16 vendor; + __u16 family; + __u16 model; + __u16 feature; /* bit index */ + kernel_ulong_t driver_data; + __u16 steppings; +}; + +#define X86_FEATURE_MATCH(x) \ + { X86_VENDOR_ANY, X86_FAMILY_ANY, X86_MODEL_ANY, x } + +#define X86_VENDOR_ANY 0xffff +#define X86_FAMILY_ANY 0 +#define X86_MODEL_ANY 0 +#define X86_STEPPING_ANY 0 +#define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */ + +/* + * Generic table type for matching CPU features. + * @feature: the bit number of the feature (0 - 65535) + */ + +struct cpu_feature { + __u16 feature; +}; + +#define IPACK_ANY_FORMAT 0xff +#define IPACK_ANY_ID (~0) +struct ipack_device_id { + __u8 format; /* Format version or IPACK_ANY_ID */ + __u32 vendor; /* Vendor ID or IPACK_ANY_ID */ + __u32 device; /* Device ID or IPACK_ANY_ID */ +}; + +#define MEI_CL_MODULE_PREFIX "mei:" +#define MEI_CL_NAME_SIZE 32 +#define MEI_CL_VERSION_ANY 0xff + +/** + * struct mei_cl_device_id - MEI client device identifier + * @name: helper name + * @uuid: client uuid + * @version: client protocol version + * @driver_info: information used by the driver. + * + * identifies mei client device by uuid and name + */ +struct mei_cl_device_id { + char name[MEI_CL_NAME_SIZE]; + uuid_le uuid; + __u8 version; + kernel_ulong_t driver_info; +}; + +/* RapidIO */ + +#define RIO_ANY_ID 0xffff + +/** + * struct rio_device_id - RIO device identifier + * @did: RapidIO device ID + * @vid: RapidIO vendor ID + * @asm_did: RapidIO assembly device ID + * @asm_vid: RapidIO assembly vendor ID + * + * Identifies a RapidIO device based on both the device/vendor IDs and + * the assembly device/vendor IDs. + */ +struct rio_device_id { + __u16 did, vid; + __u16 asm_did, asm_vid; +}; + +struct mcb_device_id { + __u16 device; + kernel_ulong_t driver_data; +}; + +struct ulpi_device_id { + __u16 vendor; + __u16 product; + kernel_ulong_t driver_data; +}; + +/** + * struct fsl_mc_device_id - MC object device identifier + * @vendor: vendor ID + * @obj_type: MC object type + * + * Type of entries in the "device Id" table for MC object devices supported by + * a MC object device driver. The last entry of the table has vendor set to 0x0 + */ +struct fsl_mc_device_id { + __u16 vendor; + const char obj_type[16]; +}; + +/** + * struct tb_service_id - Thunderbolt service identifiers + * @match_flags: Flags used to match the structure + * @protocol_key: Protocol key the service supports + * @protocol_id: Protocol id the service supports + * @protocol_version: Version of the protocol + * @protocol_revision: Revision of the protocol software + * @driver_data: Driver specific data + * + * Thunderbolt XDomain services are exposed as devices where each device + * carries the protocol information the service supports. Thunderbolt + * XDomain service drivers match against that information. + */ +struct tb_service_id { + __u32 match_flags; + char protocol_key[8 + 1]; + __u32 protocol_id; + __u32 protocol_version; + __u32 protocol_revision; + kernel_ulong_t driver_data; +}; + +#define TBSVC_MATCH_PROTOCOL_KEY 0x0001 +#define TBSVC_MATCH_PROTOCOL_ID 0x0002 +#define TBSVC_MATCH_PROTOCOL_VERSION 0x0004 +#define TBSVC_MATCH_PROTOCOL_REVISION 0x0008 + +/* USB Type-C Alternate Modes */ + +#define TYPEC_ANY_MODE 0x7 + +/** + * struct typec_device_id - USB Type-C alternate mode identifiers + * @svid: Standard or Vendor ID + * @mode: Mode index + * @driver_data: Driver specific data + */ +struct typec_device_id { + __u16 svid; + __u8 mode; + kernel_ulong_t driver_data; +}; + +#endif /* LINUX_MOD_DEVICETABLE_H */ diff --git a/include/linux/module.h b/include/linux/module.h new file mode 100644 index 000000000..008cfc08a --- /dev/null +++ b/include/linux/module.h @@ -0,0 +1,822 @@ +#ifndef _LINUX_MODULE_H +#define _LINUX_MODULE_H +/* + * Dynamic loading of modules into the kernel. + * + * Rewritten by Richard Henderson Dec 1996 + * Rewritten again by Rusty Russell, 2002 + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */ +#define MODULE_SIG_STRING "~Module signature appended~\n" + +/* Not Yet Implemented */ +#define MODULE_SUPPORTED_DEVICE(name) + +#define MODULE_NAME_LEN MAX_PARAM_PREFIX_LEN + +struct modversion_info { + unsigned long crc; + char name[MODULE_NAME_LEN]; +}; + +struct module; +struct exception_table_entry; + +struct module_kobject { + struct kobject kobj; + struct module *mod; + struct kobject *drivers_dir; + struct module_param_attrs *mp; + struct completion *kobj_completion; +} __randomize_layout; + +struct module_attribute { + struct attribute attr; + ssize_t (*show)(struct module_attribute *, struct module_kobject *, + char *); + ssize_t (*store)(struct module_attribute *, struct module_kobject *, + const char *, size_t count); + void (*setup)(struct module *, const char *); + int (*test)(struct module *); + void (*free)(struct module *); +}; + +struct module_version_attribute { + struct module_attribute mattr; + const char *module_name; + const char *version; +} __attribute__ ((__aligned__(sizeof(void *)))); + +extern ssize_t __modver_version_show(struct module_attribute *, + struct module_kobject *, char *); + +extern struct module_attribute module_uevent; + +/* These are either module local, or the kernel's dummy ones. */ +extern int init_module(void); +extern void cleanup_module(void); + +#ifndef MODULE +/** + * module_init() - driver initialization entry point + * @x: function to be run at kernel boot time or module insertion + * + * module_init() will either be called during do_initcalls() (if + * builtin) or at module insertion time (if a module). There can only + * be one per module. + */ +#define module_init(x) __initcall(x); + +/** + * module_exit() - driver exit entry point + * @x: function to be run when driver is removed + * + * module_exit() will wrap the driver clean-up code + * with cleanup_module() when used with rmmod when + * the driver is a module. If the driver is statically + * compiled into the kernel, module_exit() has no effect. + * There can only be one per module. + */ +#define module_exit(x) __exitcall(x); + +#else /* MODULE */ + +/* + * In most cases loadable modules do not need custom + * initcall levels. There are still some valid cases where + * a driver may be needed early if built in, and does not + * matter when built as a loadable module. Like bus + * snooping debug drivers. + */ +#define early_initcall(fn) module_init(fn) +#define core_initcall(fn) module_init(fn) +#define core_initcall_sync(fn) module_init(fn) +#define postcore_initcall(fn) module_init(fn) +#define postcore_initcall_sync(fn) module_init(fn) +#define arch_initcall(fn) module_init(fn) +#define subsys_initcall(fn) module_init(fn) +#define subsys_initcall_sync(fn) module_init(fn) +#define fs_initcall(fn) module_init(fn) +#define fs_initcall_sync(fn) module_init(fn) +#define rootfs_initcall(fn) module_init(fn) +#define device_initcall(fn) module_init(fn) +#define device_initcall_sync(fn) module_init(fn) +#define late_initcall(fn) module_init(fn) +#define late_initcall_sync(fn) module_init(fn) + +#define console_initcall(fn) module_init(fn) +#define security_initcall(fn) module_init(fn) + +/* Each module must use one module_init(). */ +#define module_init(initfn) \ + static inline initcall_t __maybe_unused __inittest(void) \ + { return initfn; } \ + int init_module(void) __copy(initfn) __attribute__((alias(#initfn))); + +/* This is only required if you want to be unloadable. */ +#define module_exit(exitfn) \ + static inline exitcall_t __maybe_unused __exittest(void) \ + { return exitfn; } \ + void cleanup_module(void) __copy(exitfn) __attribute__((alias(#exitfn))); + +#endif + +/* This means "can be init if no module support, otherwise module load + may call it." */ +#ifdef CONFIG_MODULES +#define __init_or_module +#define __initdata_or_module +#define __initconst_or_module +#define __INIT_OR_MODULE .text +#define __INITDATA_OR_MODULE .data +#define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits +#else +#define __init_or_module __init +#define __initdata_or_module __initdata +#define __initconst_or_module __initconst +#define __INIT_OR_MODULE __INIT +#define __INITDATA_OR_MODULE __INITDATA +#define __INITRODATA_OR_MODULE __INITRODATA +#endif /*CONFIG_MODULES*/ + +/* Generic info of form tag = "info" */ +#define MODULE_INFO(tag, info) __MODULE_INFO(tag, tag, info) + +/* For userspace: you can also call me... */ +#define MODULE_ALIAS(_alias) MODULE_INFO(alias, _alias) + +/* Soft module dependencies. See man modprobe.d for details. + * Example: MODULE_SOFTDEP("pre: module-foo module-bar post: module-baz") + */ +#define MODULE_SOFTDEP(_softdep) MODULE_INFO(softdep, _softdep) + +/* + * The following license idents are currently accepted as indicating free + * software modules + * + * "GPL" [GNU Public License v2 or later] + * "GPL v2" [GNU Public License v2] + * "GPL and additional rights" [GNU Public License v2 rights and more] + * "Dual BSD/GPL" [GNU Public License v2 + * or BSD license choice] + * "Dual MIT/GPL" [GNU Public License v2 + * or MIT license choice] + * "Dual MPL/GPL" [GNU Public License v2 + * or Mozilla license choice] + * + * The following other idents are available + * + * "Proprietary" [Non free products] + * + * There are dual licensed components, but when running with Linux it is the + * GPL that is relevant so this is a non issue. Similarly LGPL linked with GPL + * is a GPL combined work. + * + * This exists for several reasons + * 1. So modinfo can show license info for users wanting to vet their setup + * is free + * 2. So the community can ignore bug reports including proprietary modules + * 3. So vendors can do likewise based on their own policies + */ +#define MODULE_LICENSE(_license) MODULE_INFO(license, _license) + +/* + * Author(s), use "Name " or just "Name", for multiple + * authors use multiple MODULE_AUTHOR() statements/lines. + */ +#define MODULE_AUTHOR(_author) MODULE_INFO(author, _author) + +/* What your module does. */ +#define MODULE_DESCRIPTION(_description) MODULE_INFO(description, _description) + +#ifdef MODULE +/* Creates an alias so file2alias.c can find device table. */ +#define MODULE_DEVICE_TABLE(type, name) \ +extern typeof(name) __mod_##type##__##name##_device_table \ + __attribute__ ((unused, alias(__stringify(name)))) +#else /* !MODULE */ +#define MODULE_DEVICE_TABLE(type, name) +#endif + +/* Version of form [:][-]. + * Or for CVS/RCS ID version, everything but the number is stripped. + * : A (small) unsigned integer which allows you to start versions + * anew. If not mentioned, it's zero. eg. "2:1.0" is after + * "1:2.0". + + * : The may contain only alphanumerics and the + * character `.'. Ordered by numeric sort for numeric parts, + * ascii sort for ascii parts (as per RPM or DEB algorithm). + + * : Like , but inserted for local + * customizations, eg "rh3" or "rusty1". + + * Using this automatically adds a checksum of the .c files and the + * local headers in "srcversion". + */ + +#if defined(MODULE) || !defined(CONFIG_SYSFS) +#define MODULE_VERSION(_version) MODULE_INFO(version, _version) +#else +#define MODULE_VERSION(_version) \ + static struct module_version_attribute ___modver_attr = { \ + .mattr = { \ + .attr = { \ + .name = "version", \ + .mode = S_IRUGO, \ + }, \ + .show = __modver_version_show, \ + }, \ + .module_name = KBUILD_MODNAME, \ + .version = _version, \ + }; \ + static const struct module_version_attribute \ + __used __attribute__ ((__section__ ("__modver"))) \ + * __moduleparam_const __modver_attr = &___modver_attr +#endif + +/* Optional firmware file (or files) needed by the module + * format is simply firmware file name. Multiple firmware + * files require multiple MODULE_FIRMWARE() specifiers */ +#define MODULE_FIRMWARE(_firmware) MODULE_INFO(firmware, _firmware) + +struct notifier_block; + +#ifdef CONFIG_MODULES + +extern int modules_disabled; /* for sysctl */ +/* Get/put a kernel symbol (calls must be symmetric) */ +void *__symbol_get(const char *symbol); +void *__symbol_get_gpl(const char *symbol); +#define symbol_get(x) ((typeof(&x))(__symbol_get(__stringify(x)))) + +/* modules using other modules: kdb wants to see this. */ +struct module_use { + struct list_head source_list; + struct list_head target_list; + struct module *source, *target; +}; + +enum module_state { + MODULE_STATE_LIVE, /* Normal state. */ + MODULE_STATE_COMING, /* Full formed, running module_init. */ + MODULE_STATE_GOING, /* Going away. */ + MODULE_STATE_UNFORMED, /* Still setting it up. */ +}; + +struct mod_tree_node { + struct module *mod; + struct latch_tree_node node; +}; + +struct module_layout { + /* The actual code + data. */ + void *base; + /* Total size. */ + unsigned int size; + /* The size of the executable code. */ + unsigned int text_size; + /* Size of RO section of the module (text+rodata) */ + unsigned int ro_size; + /* Size of RO after init section */ + unsigned int ro_after_init_size; + +#ifdef CONFIG_MODULES_TREE_LOOKUP + struct mod_tree_node mtn; +#endif +}; + +#ifdef CONFIG_MODULES_TREE_LOOKUP +/* Only touch one cacheline for common rbtree-for-core-layout case. */ +#define __module_layout_align ____cacheline_aligned +#else +#define __module_layout_align +#endif + +struct mod_kallsyms { + Elf_Sym *symtab; + unsigned int num_symtab; + char *strtab; +}; + +#ifdef CONFIG_LIVEPATCH +struct klp_modinfo { + Elf_Ehdr hdr; + Elf_Shdr *sechdrs; + char *secstrings; + unsigned int symndx; +}; +#endif + +struct module { + enum module_state state; + + /* Member of list of modules */ + struct list_head list; + + /* Unique handle for this module */ + char name[MODULE_NAME_LEN]; + + /* Sysfs stuff. */ + struct module_kobject mkobj; + struct module_attribute *modinfo_attrs; + const char *version; + const char *srcversion; + struct kobject *holders_dir; + + /* Exported symbols */ + const struct kernel_symbol *syms; + const s32 *crcs; + unsigned int num_syms; + + /* Kernel parameters. */ +#ifdef CONFIG_SYSFS + struct mutex param_lock; +#endif + struct kernel_param *kp; + unsigned int num_kp; + + /* GPL-only exported symbols. */ + unsigned int num_gpl_syms; + const struct kernel_symbol *gpl_syms; + const s32 *gpl_crcs; + bool using_gplonly_symbols; + +#ifdef CONFIG_UNUSED_SYMBOLS + /* unused exported symbols. */ + const struct kernel_symbol *unused_syms; + const s32 *unused_crcs; + unsigned int num_unused_syms; + + /* GPL-only, unused exported symbols. */ + unsigned int num_unused_gpl_syms; + const struct kernel_symbol *unused_gpl_syms; + const s32 *unused_gpl_crcs; +#endif + +#ifdef CONFIG_MODULE_SIG + /* Signature was verified. */ + bool sig_ok; +#endif + + bool async_probe_requested; + + /* symbols that will be GPL-only in the near future. */ + const struct kernel_symbol *gpl_future_syms; + const s32 *gpl_future_crcs; + unsigned int num_gpl_future_syms; + + /* Exception table */ + unsigned int num_exentries; + struct exception_table_entry *extable; + + /* Startup function. */ + int (*init)(void); + + /* Core layout: rbtree is accessed frequently, so keep together. */ + struct module_layout core_layout __module_layout_align; + struct module_layout init_layout; + + /* Arch-specific module values */ + struct mod_arch_specific arch; + + unsigned long taints; /* same bits as kernel:taint_flags */ + +#ifdef CONFIG_GENERIC_BUG + /* Support for BUG */ + unsigned num_bugs; + struct list_head bug_list; + struct bug_entry *bug_table; +#endif + +#ifdef CONFIG_KALLSYMS + /* Protected by RCU and/or module_mutex: use rcu_dereference() */ + struct mod_kallsyms *kallsyms; + struct mod_kallsyms core_kallsyms; + + /* Section attributes */ + struct module_sect_attrs *sect_attrs; + + /* Notes attributes */ + struct module_notes_attrs *notes_attrs; +#endif + + /* The command line arguments (may be mangled). People like + keeping pointers to this stuff */ + char *args; + +#ifdef CONFIG_SMP + /* Per-cpu data. */ + void __percpu *percpu; + unsigned int percpu_size; +#endif + +#ifdef CONFIG_TRACEPOINTS + unsigned int num_tracepoints; + tracepoint_ptr_t *tracepoints_ptrs; +#endif +#ifdef CONFIG_JUMP_LABEL + struct jump_entry *jump_entries; + unsigned int num_jump_entries; +#endif +#ifdef CONFIG_TRACING + unsigned int num_trace_bprintk_fmt; + const char **trace_bprintk_fmt_start; +#endif +#ifdef CONFIG_EVENT_TRACING + struct trace_event_call **trace_events; + unsigned int num_trace_events; + struct trace_eval_map **trace_evals; + unsigned int num_trace_evals; +#endif +#ifdef CONFIG_FTRACE_MCOUNT_RECORD + unsigned int num_ftrace_callsites; + unsigned long *ftrace_callsites; +#endif + +#ifdef CONFIG_LIVEPATCH + bool klp; /* Is this a livepatch module? */ + bool klp_alive; + + /* Elf information */ + struct klp_modinfo *klp_info; +#endif + +#ifdef CONFIG_MODULE_UNLOAD + /* What modules depend on me? */ + struct list_head source_list; + /* What modules do I depend on? */ + struct list_head target_list; + + /* Destruction function. */ + void (*exit)(void); + + atomic_t refcnt; +#endif + +#ifdef CONFIG_CONSTRUCTORS + /* Constructor functions. */ + ctor_fn_t *ctors; + unsigned int num_ctors; +#endif + +#ifdef CONFIG_FUNCTION_ERROR_INJECTION + struct error_injection_entry *ei_funcs; + unsigned int num_ei_funcs; +#endif +} ____cacheline_aligned __randomize_layout; +#ifndef MODULE_ARCH_INIT +#define MODULE_ARCH_INIT {} +#endif + +extern struct mutex module_mutex; + +/* FIXME: It'd be nice to isolate modules during init, too, so they + aren't used before they (may) fail. But presently too much code + (IDE & SCSI) require entry into the module during init.*/ +static inline bool module_is_live(struct module *mod) +{ + return mod->state != MODULE_STATE_GOING; +} + +struct module *__module_text_address(unsigned long addr); +struct module *__module_address(unsigned long addr); +bool is_module_address(unsigned long addr); +bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr); +bool is_module_percpu_address(unsigned long addr); +bool is_module_text_address(unsigned long addr); + +static inline bool within_module_core(unsigned long addr, + const struct module *mod) +{ + return (unsigned long)mod->core_layout.base <= addr && + addr < (unsigned long)mod->core_layout.base + mod->core_layout.size; +} + +static inline bool within_module_init(unsigned long addr, + const struct module *mod) +{ + return (unsigned long)mod->init_layout.base <= addr && + addr < (unsigned long)mod->init_layout.base + mod->init_layout.size; +} + +static inline bool within_module(unsigned long addr, const struct module *mod) +{ + return within_module_init(addr, mod) || within_module_core(addr, mod); +} + +/* Search for module by name: must hold module_mutex. */ +struct module *find_module(const char *name); + +struct symsearch { + const struct kernel_symbol *start, *stop; + const s32 *crcs; + enum mod_license { + NOT_GPL_ONLY, + GPL_ONLY, + WILL_BE_GPL_ONLY, + } license; + bool unused; +}; + +/* Returns 0 and fills in value, defined and namebuf, or -ERANGE if + symnum out of range. */ +int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + char *name, char *module_name, int *exported); + +/* Look for this name: can be of form module:name. */ +unsigned long module_kallsyms_lookup_name(const char *name); + +int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, + struct module *, unsigned long), + void *data); + +extern void __noreturn __module_put_and_exit(struct module *mod, + long code); +#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code) + +#ifdef CONFIG_MODULE_UNLOAD +int module_refcount(struct module *mod); +void __symbol_put(const char *symbol); +#define symbol_put(x) __symbol_put(__stringify(x)) +void symbol_put_addr(void *addr); + +/* Sometimes we know we already have a refcount, and it's easier not + to handle the error case (which only happens with rmmod --wait). */ +extern void __module_get(struct module *module); + +/* This is the Right Way to get a module: if it fails, it's being removed, + * so pretend it's not there. */ +extern bool try_module_get(struct module *module); + +extern void module_put(struct module *module); + +#else /*!CONFIG_MODULE_UNLOAD*/ +static inline bool try_module_get(struct module *module) +{ + return !module || module_is_live(module); +} +static inline void module_put(struct module *module) +{ +} +static inline void __module_get(struct module *module) +{ +} +#define symbol_put(x) do { } while (0) +#define symbol_put_addr(p) do { } while (0) + +#endif /* CONFIG_MODULE_UNLOAD */ + +/* This is a #define so the string doesn't get put in every .o file */ +#define module_name(mod) \ +({ \ + struct module *__mod = (mod); \ + __mod ? __mod->name : "kernel"; \ +}) + +/* Dereference module function descriptor */ +void *dereference_module_function_descriptor(struct module *mod, void *ptr); + +/* For kallsyms to ask for address resolution. namebuf should be at + * least KSYM_NAME_LEN long: a pointer to namebuf is returned if + * found, otherwise NULL. */ +const char *module_address_lookup(unsigned long addr, + unsigned long *symbolsize, + unsigned long *offset, + char **modname, + char *namebuf); +int lookup_module_symbol_name(unsigned long addr, char *symname); +int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); + +int register_module_notifier(struct notifier_block *nb); +int unregister_module_notifier(struct notifier_block *nb); + +extern void print_modules(void); + +static inline bool module_requested_async_probing(struct module *module) +{ + return module && module->async_probe_requested; +} + +#ifdef CONFIG_LIVEPATCH +static inline bool is_livepatch_module(struct module *mod) +{ + return mod->klp; +} +#else /* !CONFIG_LIVEPATCH */ +static inline bool is_livepatch_module(struct module *mod) +{ + return false; +} +#endif /* CONFIG_LIVEPATCH */ + +bool is_module_sig_enforced(void); + +#else /* !CONFIG_MODULES... */ + +static inline struct module *__module_address(unsigned long addr) +{ + return NULL; +} + +static inline struct module *__module_text_address(unsigned long addr) +{ + return NULL; +} + +static inline bool is_module_address(unsigned long addr) +{ + return false; +} + +static inline bool is_module_percpu_address(unsigned long addr) +{ + return false; +} + +static inline bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) +{ + return false; +} + +static inline bool is_module_text_address(unsigned long addr) +{ + return false; +} + +/* Get/put a kernel symbol (calls should be symmetric) */ +#define symbol_get(x) ({ extern typeof(x) x __attribute__((weak)); &(x); }) +#define symbol_put(x) do { } while (0) +#define symbol_put_addr(x) do { } while (0) + +static inline void __module_get(struct module *module) +{ +} + +static inline bool try_module_get(struct module *module) +{ + return true; +} + +static inline void module_put(struct module *module) +{ +} + +#define module_name(mod) "kernel" + +/* For kallsyms to ask for address resolution. NULL means not found. */ +static inline const char *module_address_lookup(unsigned long addr, + unsigned long *symbolsize, + unsigned long *offset, + char **modname, + char *namebuf) +{ + return NULL; +} + +static inline int lookup_module_symbol_name(unsigned long addr, char *symname) +{ + return -ERANGE; +} + +static inline int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name) +{ + return -ERANGE; +} + +static inline int module_get_kallsym(unsigned int symnum, unsigned long *value, + char *type, char *name, + char *module_name, int *exported) +{ + return -ERANGE; +} + +static inline unsigned long module_kallsyms_lookup_name(const char *name) +{ + return 0; +} + +static inline int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, + struct module *, + unsigned long), + void *data) +{ + return 0; +} + +static inline int register_module_notifier(struct notifier_block *nb) +{ + /* no events will happen anyway, so this can always succeed */ + return 0; +} + +static inline int unregister_module_notifier(struct notifier_block *nb) +{ + return 0; +} + +#define module_put_and_exit(code) do_exit(code) + +static inline void print_modules(void) +{ +} + +static inline bool module_requested_async_probing(struct module *module) +{ + return false; +} + +static inline bool is_module_sig_enforced(void) +{ + return false; +} + +/* Dereference module function descriptor */ +static inline +void *dereference_module_function_descriptor(struct module *mod, void *ptr) +{ + return ptr; +} + +#endif /* CONFIG_MODULES */ + +#ifdef CONFIG_SYSFS +extern struct kset *module_kset; +extern struct kobj_type module_ktype; +extern int module_sysfs_initialized; +#endif /* CONFIG_SYSFS */ + +#define symbol_request(x) try_then_request_module(symbol_get(x), "symbol:" #x) + +/* BELOW HERE ALL THESE ARE OBSOLETE AND WILL VANISH */ + +#define __MODULE_STRING(x) __stringify(x) + +#ifdef CONFIG_STRICT_MODULE_RWX +extern void set_all_modules_text_rw(void); +extern void set_all_modules_text_ro(void); +extern void module_enable_ro(const struct module *mod, bool after_init); +extern void module_disable_ro(const struct module *mod); +#else +static inline void set_all_modules_text_rw(void) { } +static inline void set_all_modules_text_ro(void) { } +static inline void module_enable_ro(const struct module *mod, bool after_init) { } +static inline void module_disable_ro(const struct module *mod) { } +#endif + +#ifdef CONFIG_GENERIC_BUG +void module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *, + struct module *); +void module_bug_cleanup(struct module *); + +#else /* !CONFIG_GENERIC_BUG */ + +static inline void module_bug_finalize(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + struct module *mod) +{ +} +static inline void module_bug_cleanup(struct module *mod) {} +#endif /* CONFIG_GENERIC_BUG */ + +#ifdef CONFIG_RETPOLINE +extern bool retpoline_module_ok(bool has_retpoline); +#else +static inline bool retpoline_module_ok(bool has_retpoline) +{ + return true; +} +#endif + +#ifdef CONFIG_MODULE_SIG +static inline bool module_sig_ok(struct module *module) +{ + return module->sig_ok; +} +#else /* !CONFIG_MODULE_SIG */ +static inline bool module_sig_ok(struct module *module) +{ + return true; +} +#endif /* CONFIG_MODULE_SIG */ + +#endif /* _LINUX_MODULE_H */ diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h new file mode 100644 index 000000000..31013c2ef --- /dev/null +++ b/include/linux/moduleloader.h @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MODULELOADER_H +#define _LINUX_MODULELOADER_H +/* The stuff needed for archs to support modules. */ + +#include +#include + +/* These may be implemented by architectures that need to hook into the + * module loader code. Architectures that don't need to do anything special + * can just rely on the 'weak' default hooks defined in kernel/module.c. + * Note, however, that at least one of apply_relocate or apply_relocate_add + * must be implemented by each architecture. + */ + +/* Adjust arch-specific sections. Return 0 on success. */ +int module_frob_arch_sections(Elf_Ehdr *hdr, + Elf_Shdr *sechdrs, + char *secstrings, + struct module *mod); + +/* Additional bytes needed by arch in front of individual sections */ +unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section); + +/* Allocator used for allocating struct module, core sections and init + sections. Returns NULL on failure. */ +void *module_alloc(unsigned long size); + +/* Free memory returned from module_alloc. */ +void module_memfree(void *module_region); + +/* + * Apply the given relocation to the (simplified) ELF. Return -error + * or 0. + */ +#ifdef CONFIG_MODULES_USE_ELF_REL +int apply_relocate(Elf_Shdr *sechdrs, + const char *strtab, + unsigned int symindex, + unsigned int relsec, + struct module *mod); +#else +static inline int apply_relocate(Elf_Shdr *sechdrs, + const char *strtab, + unsigned int symindex, + unsigned int relsec, + struct module *me) +{ + printk(KERN_ERR "module %s: REL relocation unsupported\n", + module_name(me)); + return -ENOEXEC; +} +#endif + +/* + * Apply the given add relocation to the (simplified) ELF. Return + * -error or 0 + */ +#ifdef CONFIG_MODULES_USE_ELF_RELA +int apply_relocate_add(Elf_Shdr *sechdrs, + const char *strtab, + unsigned int symindex, + unsigned int relsec, + struct module *mod); +#else +static inline int apply_relocate_add(Elf_Shdr *sechdrs, + const char *strtab, + unsigned int symindex, + unsigned int relsec, + struct module *me) +{ + printk(KERN_ERR "module %s: REL relocation unsupported\n", + module_name(me)); + return -ENOEXEC; +} +#endif + +/* Any final processing of module before access. Return -error or 0. */ +int module_finalize(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + struct module *mod); + +/* Any cleanup needed when module leaves. */ +void module_arch_cleanup(struct module *mod); + +/* Any cleanup before freeing mod->module_init */ +void module_arch_freeing_init(struct module *mod); + +#ifdef CONFIG_KASAN +#include +#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) +#else +#define MODULE_ALIGN PAGE_SIZE +#endif + +#endif diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h new file mode 100644 index 000000000..ba36506db --- /dev/null +++ b/include/linux/moduleparam.h @@ -0,0 +1,538 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MODULE_PARAMS_H +#define _LINUX_MODULE_PARAMS_H +/* (C) Copyright 2001, 2002 Rusty Russell IBM Corporation */ +#include +#include +#include + +/* You can override this manually, but generally this should match the + module name. */ +#ifdef MODULE +#define MODULE_PARAM_PREFIX /* empty */ +#else +#define MODULE_PARAM_PREFIX KBUILD_MODNAME "." +#endif + +/* Chosen so that structs with an unsigned long line up. */ +#define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long)) + +#ifdef MODULE +#define __MODULE_INFO(tag, name, info) \ +static const char __UNIQUE_ID(name)[] \ + __used __attribute__((section(".modinfo"), unused, aligned(1))) \ + = __stringify(tag) "=" info +#else /* !MODULE */ +/* This struct is here for syntactic coherency, it is not used */ +#define __MODULE_INFO(tag, name, info) \ + struct __UNIQUE_ID(name) {} +#endif +#define __MODULE_PARM_TYPE(name, _type) \ + __MODULE_INFO(parmtype, name##type, #name ":" _type) + +/* One for each parameter, describing how to use it. Some files do + multiple of these per line, so can't just use MODULE_INFO. */ +#define MODULE_PARM_DESC(_parm, desc) \ + __MODULE_INFO(parm, _parm, #_parm ":" desc) + +struct kernel_param; + +/* + * Flags available for kernel_param_ops + * + * NOARG - the parameter allows for no argument (foo instead of foo=1) + */ +enum { + KERNEL_PARAM_OPS_FL_NOARG = (1 << 0) +}; + +struct kernel_param_ops { + /* How the ops should behave */ + unsigned int flags; + /* Returns 0, or -errno. arg is in kp->arg. */ + int (*set)(const char *val, const struct kernel_param *kp); + /* Returns length written or -errno. Buffer is 4k (ie. be short!) */ + int (*get)(char *buffer, const struct kernel_param *kp); + /* Optional function to free kp->arg when module unloaded. */ + void (*free)(void *arg); +}; + +/* + * Flags available for kernel_param + * + * UNSAFE - the parameter is dangerous and setting it will taint the kernel + * HWPARAM - Hardware param not permitted in lockdown mode + */ +enum { + KERNEL_PARAM_FL_UNSAFE = (1 << 0), + KERNEL_PARAM_FL_HWPARAM = (1 << 1), +}; + +struct kernel_param { + const char *name; + struct module *mod; + const struct kernel_param_ops *ops; + const u16 perm; + s8 level; + u8 flags; + union { + void *arg; + const struct kparam_string *str; + const struct kparam_array *arr; + }; +}; + +extern const struct kernel_param __start___param[], __stop___param[]; + +/* Special one for strings we want to copy into */ +struct kparam_string { + unsigned int maxlen; + char *string; +}; + +/* Special one for arrays */ +struct kparam_array +{ + unsigned int max; + unsigned int elemsize; + unsigned int *num; + const struct kernel_param_ops *ops; + void *elem; +}; + +/** + * module_param - typesafe helper for a module/cmdline parameter + * @value: the variable to alter, and exposed parameter name. + * @type: the type of the parameter + * @perm: visibility in sysfs. + * + * @value becomes the module parameter, or (prefixed by KBUILD_MODNAME and a + * ".") the kernel commandline parameter. Note that - is changed to _, so + * the user can use "foo-bar=1" even for variable "foo_bar". + * + * @perm is 0 if the the variable is not to appear in sysfs, or 0444 + * for world-readable, 0644 for root-writable, etc. Note that if it + * is writable, you may need to use kernel_param_lock() around + * accesses (esp. charp, which can be kfreed when it changes). + * + * The @type is simply pasted to refer to a param_ops_##type and a + * param_check_##type: for convenience many standard types are provided but + * you can create your own by defining those variables. + * + * Standard types are: + * byte, short, ushort, int, uint, long, ulong + * charp: a character pointer + * bool: a bool, values 0/1, y/n, Y/N. + * invbool: the above, only sense-reversed (N = true). + */ +#define module_param(name, type, perm) \ + module_param_named(name, name, type, perm) + +/** + * module_param_unsafe - same as module_param but taints kernel + */ +#define module_param_unsafe(name, type, perm) \ + module_param_named_unsafe(name, name, type, perm) + +/** + * module_param_named - typesafe helper for a renamed module/cmdline parameter + * @name: a valid C identifier which is the parameter name. + * @value: the actual lvalue to alter. + * @type: the type of the parameter + * @perm: visibility in sysfs. + * + * Usually it's a good idea to have variable names and user-exposed names the + * same, but that's harder if the variable must be non-static or is inside a + * structure. This allows exposure under a different name. + */ +#define module_param_named(name, value, type, perm) \ + param_check_##type(name, &(value)); \ + module_param_cb(name, ¶m_ops_##type, &value, perm); \ + __MODULE_PARM_TYPE(name, #type) + +/** + * module_param_named_unsafe - same as module_param_named but taints kernel + */ +#define module_param_named_unsafe(name, value, type, perm) \ + param_check_##type(name, &(value)); \ + module_param_cb_unsafe(name, ¶m_ops_##type, &value, perm); \ + __MODULE_PARM_TYPE(name, #type) + +/** + * module_param_cb - general callback for a module/cmdline parameter + * @name: a valid C identifier which is the parameter name. + * @ops: the set & get operations for this parameter. + * @perm: visibility in sysfs. + * + * The ops can have NULL set or get functions. + */ +#define module_param_cb(name, ops, arg, perm) \ + __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1, 0) + +#define module_param_cb_unsafe(name, ops, arg, perm) \ + __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1, \ + KERNEL_PARAM_FL_UNSAFE) + +/** + * _param_cb - general callback for a module/cmdline parameter + * to be evaluated before certain initcall level + * @name: a valid C identifier which is the parameter name. + * @ops: the set & get operations for this parameter. + * @perm: visibility in sysfs. + * + * The ops can have NULL set or get functions. + */ +#define __level_param_cb(name, ops, arg, perm, level) \ + __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, level, 0) + +#define core_param_cb(name, ops, arg, perm) \ + __level_param_cb(name, ops, arg, perm, 1) + +#define postcore_param_cb(name, ops, arg, perm) \ + __level_param_cb(name, ops, arg, perm, 2) + +#define arch_param_cb(name, ops, arg, perm) \ + __level_param_cb(name, ops, arg, perm, 3) + +#define subsys_param_cb(name, ops, arg, perm) \ + __level_param_cb(name, ops, arg, perm, 4) + +#define fs_param_cb(name, ops, arg, perm) \ + __level_param_cb(name, ops, arg, perm, 5) + +#define device_param_cb(name, ops, arg, perm) \ + __level_param_cb(name, ops, arg, perm, 6) + +#define late_param_cb(name, ops, arg, perm) \ + __level_param_cb(name, ops, arg, perm, 7) + +/* On alpha, ia64 and ppc64 relocations to global data cannot go into + read-only sections (which is part of respective UNIX ABI on these + platforms). So 'const' makes no sense and even causes compile failures + with some compilers. */ +#if defined(CONFIG_ALPHA) || defined(CONFIG_IA64) || defined(CONFIG_PPC64) +#define __moduleparam_const +#else +#define __moduleparam_const const +#endif + +/* This is the fundamental function for registering boot/module + parameters. */ +#define __module_param_call(prefix, name, ops, arg, perm, level, flags) \ + /* Default value instead of permissions? */ \ + static const char __param_str_##name[] = prefix #name; \ + static struct kernel_param __moduleparam_const __param_##name \ + __used \ + __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \ + = { __param_str_##name, THIS_MODULE, ops, \ + VERIFY_OCTAL_PERMISSIONS(perm), level, flags, { arg } } + +/* Obsolete - use module_param_cb() */ +#define module_param_call(name, _set, _get, arg, perm) \ + static const struct kernel_param_ops __param_ops_##name = \ + { .flags = 0, .set = _set, .get = _get }; \ + __module_param_call(MODULE_PARAM_PREFIX, \ + name, &__param_ops_##name, arg, perm, -1, 0) + +#ifdef CONFIG_SYSFS +extern void kernel_param_lock(struct module *mod); +extern void kernel_param_unlock(struct module *mod); +#else +static inline void kernel_param_lock(struct module *mod) +{ +} +static inline void kernel_param_unlock(struct module *mod) +{ +} +#endif + +#ifndef MODULE +/** + * core_param - define a historical core kernel parameter. + * @name: the name of the cmdline and sysfs parameter (often the same as var) + * @var: the variable + * @type: the type of the parameter + * @perm: visibility in sysfs + * + * core_param is just like module_param(), but cannot be modular and + * doesn't add a prefix (such as "printk."). This is for compatibility + * with __setup(), and it makes sense as truly core parameters aren't + * tied to the particular file they're in. + */ +#define core_param(name, var, type, perm) \ + param_check_##type(name, &(var)); \ + __module_param_call("", name, ¶m_ops_##type, &var, perm, -1, 0) + +/** + * core_param_unsafe - same as core_param but taints kernel + */ +#define core_param_unsafe(name, var, type, perm) \ + param_check_##type(name, &(var)); \ + __module_param_call("", name, ¶m_ops_##type, &var, perm, \ + -1, KERNEL_PARAM_FL_UNSAFE) + +#endif /* !MODULE */ + +/** + * module_param_string - a char array parameter + * @name: the name of the parameter + * @string: the string variable + * @len: the maximum length of the string, incl. terminator + * @perm: visibility in sysfs. + * + * This actually copies the string when it's set (unlike type charp). + * @len is usually just sizeof(string). + */ +#define module_param_string(name, string, len, perm) \ + static const struct kparam_string __param_string_##name \ + = { len, string }; \ + __module_param_call(MODULE_PARAM_PREFIX, name, \ + ¶m_ops_string, \ + .str = &__param_string_##name, perm, -1, 0);\ + __MODULE_PARM_TYPE(name, "string") + +/** + * parameq - checks if two parameter names match + * @name1: parameter name 1 + * @name2: parameter name 2 + * + * Returns true if the two parameter names are equal. + * Dashes (-) are considered equal to underscores (_). + */ +extern bool parameq(const char *name1, const char *name2); + +/** + * parameqn - checks if two parameter names match + * @name1: parameter name 1 + * @name2: parameter name 2 + * @n: the length to compare + * + * Similar to parameq(), except it compares @n characters. + */ +extern bool parameqn(const char *name1, const char *name2, size_t n); + +/* Called on module insert or kernel boot */ +extern char *parse_args(const char *name, + char *args, + const struct kernel_param *params, + unsigned num, + s16 level_min, + s16 level_max, + void *arg, + int (*unknown)(char *param, char *val, + const char *doing, void *arg)); + +/* Called by module remove. */ +#ifdef CONFIG_SYSFS +extern void destroy_params(const struct kernel_param *params, unsigned num); +#else +static inline void destroy_params(const struct kernel_param *params, + unsigned num) +{ +} +#endif /* !CONFIG_SYSFS */ + +/* All the helper functions */ +/* The macros to do compile-time type checking stolen from Jakub + Jelinek, who IIRC came up with this idea for the 2.4 module init code. */ +#define __param_check(name, p, type) \ + static inline type __always_unused *__check_##name(void) { return(p); } + +extern const struct kernel_param_ops param_ops_byte; +extern int param_set_byte(const char *val, const struct kernel_param *kp); +extern int param_get_byte(char *buffer, const struct kernel_param *kp); +#define param_check_byte(name, p) __param_check(name, p, unsigned char) + +extern const struct kernel_param_ops param_ops_short; +extern int param_set_short(const char *val, const struct kernel_param *kp); +extern int param_get_short(char *buffer, const struct kernel_param *kp); +#define param_check_short(name, p) __param_check(name, p, short) + +extern const struct kernel_param_ops param_ops_ushort; +extern int param_set_ushort(const char *val, const struct kernel_param *kp); +extern int param_get_ushort(char *buffer, const struct kernel_param *kp); +#define param_check_ushort(name, p) __param_check(name, p, unsigned short) + +extern const struct kernel_param_ops param_ops_int; +extern int param_set_int(const char *val, const struct kernel_param *kp); +extern int param_get_int(char *buffer, const struct kernel_param *kp); +#define param_check_int(name, p) __param_check(name, p, int) + +extern const struct kernel_param_ops param_ops_uint; +extern int param_set_uint(const char *val, const struct kernel_param *kp); +extern int param_get_uint(char *buffer, const struct kernel_param *kp); +#define param_check_uint(name, p) __param_check(name, p, unsigned int) + +extern const struct kernel_param_ops param_ops_long; +extern int param_set_long(const char *val, const struct kernel_param *kp); +extern int param_get_long(char *buffer, const struct kernel_param *kp); +#define param_check_long(name, p) __param_check(name, p, long) + +extern const struct kernel_param_ops param_ops_ulong; +extern int param_set_ulong(const char *val, const struct kernel_param *kp); +extern int param_get_ulong(char *buffer, const struct kernel_param *kp); +#define param_check_ulong(name, p) __param_check(name, p, unsigned long) + +extern const struct kernel_param_ops param_ops_ullong; +extern int param_set_ullong(const char *val, const struct kernel_param *kp); +extern int param_get_ullong(char *buffer, const struct kernel_param *kp); +#define param_check_ullong(name, p) __param_check(name, p, unsigned long long) + +extern const struct kernel_param_ops param_ops_charp; +extern int param_set_charp(const char *val, const struct kernel_param *kp); +extern int param_get_charp(char *buffer, const struct kernel_param *kp); +extern void param_free_charp(void *arg); +#define param_check_charp(name, p) __param_check(name, p, char *) + +/* We used to allow int as well as bool. We're taking that away! */ +extern const struct kernel_param_ops param_ops_bool; +extern int param_set_bool(const char *val, const struct kernel_param *kp); +extern int param_get_bool(char *buffer, const struct kernel_param *kp); +#define param_check_bool(name, p) __param_check(name, p, bool) + +extern const struct kernel_param_ops param_ops_bool_enable_only; +extern int param_set_bool_enable_only(const char *val, + const struct kernel_param *kp); +/* getter is the same as for the regular bool */ +#define param_check_bool_enable_only param_check_bool + +extern const struct kernel_param_ops param_ops_invbool; +extern int param_set_invbool(const char *val, const struct kernel_param *kp); +extern int param_get_invbool(char *buffer, const struct kernel_param *kp); +#define param_check_invbool(name, p) __param_check(name, p, bool) + +/* An int, which can only be set like a bool (though it shows as an int). */ +extern const struct kernel_param_ops param_ops_bint; +extern int param_set_bint(const char *val, const struct kernel_param *kp); +#define param_get_bint param_get_int +#define param_check_bint param_check_int + +/** + * module_param_array - a parameter which is an array of some type + * @name: the name of the array variable + * @type: the type, as per module_param() + * @nump: optional pointer filled in with the number written + * @perm: visibility in sysfs + * + * Input and output are as comma-separated values. Commas inside values + * don't work properly (eg. an array of charp). + * + * ARRAY_SIZE(@name) is used to determine the number of elements in the + * array, so the definition must be visible. + */ +#define module_param_array(name, type, nump, perm) \ + module_param_array_named(name, name, type, nump, perm) + +/** + * module_param_array_named - renamed parameter which is an array of some type + * @name: a valid C identifier which is the parameter name + * @array: the name of the array variable + * @type: the type, as per module_param() + * @nump: optional pointer filled in with the number written + * @perm: visibility in sysfs + * + * This exposes a different name than the actual variable name. See + * module_param_named() for why this might be necessary. + */ +#define module_param_array_named(name, array, type, nump, perm) \ + param_check_##type(name, &(array)[0]); \ + static const struct kparam_array __param_arr_##name \ + = { .max = ARRAY_SIZE(array), .num = nump, \ + .ops = ¶m_ops_##type, \ + .elemsize = sizeof(array[0]), .elem = array }; \ + __module_param_call(MODULE_PARAM_PREFIX, name, \ + ¶m_array_ops, \ + .arr = &__param_arr_##name, \ + perm, -1, 0); \ + __MODULE_PARM_TYPE(name, "array of " #type) + +enum hwparam_type { + hwparam_ioport, /* Module parameter configures an I/O port */ + hwparam_iomem, /* Module parameter configures an I/O mem address */ + hwparam_ioport_or_iomem, /* Module parameter could be either, depending on other option */ + hwparam_irq, /* Module parameter configures an IRQ */ + hwparam_dma, /* Module parameter configures a DMA channel */ + hwparam_dma_addr, /* Module parameter configures a DMA buffer address */ + hwparam_other, /* Module parameter configures some other value */ +}; + +/** + * module_param_hw_named - A parameter representing a hw parameters + * @name: a valid C identifier which is the parameter name. + * @value: the actual lvalue to alter. + * @type: the type of the parameter + * @hwtype: what the value represents (enum hwparam_type) + * @perm: visibility in sysfs. + * + * Usually it's a good idea to have variable names and user-exposed names the + * same, but that's harder if the variable must be non-static or is inside a + * structure. This allows exposure under a different name. + */ +#define module_param_hw_named(name, value, type, hwtype, perm) \ + param_check_##type(name, &(value)); \ + __module_param_call(MODULE_PARAM_PREFIX, name, \ + ¶m_ops_##type, &value, \ + perm, -1, \ + KERNEL_PARAM_FL_HWPARAM | (hwparam_##hwtype & 0)); \ + __MODULE_PARM_TYPE(name, #type) + +#define module_param_hw(name, type, hwtype, perm) \ + module_param_hw_named(name, name, type, hwtype, perm) + +/** + * module_param_hw_array - A parameter representing an array of hw parameters + * @name: the name of the array variable + * @type: the type, as per module_param() + * @hwtype: what the value represents (enum hwparam_type) + * @nump: optional pointer filled in with the number written + * @perm: visibility in sysfs + * + * Input and output are as comma-separated values. Commas inside values + * don't work properly (eg. an array of charp). + * + * ARRAY_SIZE(@name) is used to determine the number of elements in the + * array, so the definition must be visible. + */ +#define module_param_hw_array(name, type, hwtype, nump, perm) \ + param_check_##type(name, &(name)[0]); \ + static const struct kparam_array __param_arr_##name \ + = { .max = ARRAY_SIZE(name), .num = nump, \ + .ops = ¶m_ops_##type, \ + .elemsize = sizeof(name[0]), .elem = name }; \ + __module_param_call(MODULE_PARAM_PREFIX, name, \ + ¶m_array_ops, \ + .arr = &__param_arr_##name, \ + perm, -1, \ + KERNEL_PARAM_FL_HWPARAM | (hwparam_##hwtype & 0)); \ + __MODULE_PARM_TYPE(name, "array of " #type) + + +extern const struct kernel_param_ops param_array_ops; + +extern const struct kernel_param_ops param_ops_string; +extern int param_set_copystring(const char *val, const struct kernel_param *); +extern int param_get_string(char *buffer, const struct kernel_param *kp); + +/* for exporting parameters in /sys/module/.../parameters */ + +struct module; + +#if defined(CONFIG_SYSFS) && defined(CONFIG_MODULES) +extern int module_param_sysfs_setup(struct module *mod, + const struct kernel_param *kparam, + unsigned int num_params); + +extern void module_param_sysfs_remove(struct module *mod); +#else +static inline int module_param_sysfs_setup(struct module *mod, + const struct kernel_param *kparam, + unsigned int num_params) +{ + return 0; +} + +static inline void module_param_sysfs_remove(struct module *mod) +{ } +#endif + +#endif /* _LINUX_MODULE_PARAMS_H */ diff --git a/include/linux/mount.h b/include/linux/mount.h new file mode 100644 index 000000000..4b0db4418 --- /dev/null +++ b/include/linux/mount.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * + * Definitions for mount interface. This describes the in the kernel build + * linkedlist with mounted filesystems. + * + * Author: Marco van Wieringen + * + */ +#ifndef _LINUX_MOUNT_H +#define _LINUX_MOUNT_H + +#include +#include +#include +#include +#include +#include + +struct super_block; +struct vfsmount; +struct dentry; +struct mnt_namespace; + +#define MNT_NOSUID 0x01 +#define MNT_NODEV 0x02 +#define MNT_NOEXEC 0x04 +#define MNT_NOATIME 0x08 +#define MNT_NODIRATIME 0x10 +#define MNT_RELATIME 0x20 +#define MNT_READONLY 0x40 /* does the user want this to be r/o? */ + +#define MNT_SHRINKABLE 0x100 +#define MNT_WRITE_HOLD 0x200 + +#define MNT_SHARED 0x1000 /* if the vfsmount is a shared mount */ +#define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */ +/* + * MNT_SHARED_MASK is the set of flags that should be cleared when a + * mount becomes shared. Currently, this is only the flag that says a + * mount cannot be bind mounted, since this is how we create a mount + * that shares events with another mount. If you add a new MNT_* + * flag, consider how it interacts with shared mounts. + */ +#define MNT_SHARED_MASK (MNT_UNBINDABLE) +#define MNT_USER_SETTABLE_MASK (MNT_NOSUID | MNT_NODEV | MNT_NOEXEC \ + | MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME \ + | MNT_READONLY) +#define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME ) + +#define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \ + MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED) + +#define MNT_INTERNAL 0x4000 + +#define MNT_LOCK_ATIME 0x040000 +#define MNT_LOCK_NOEXEC 0x080000 +#define MNT_LOCK_NOSUID 0x100000 +#define MNT_LOCK_NODEV 0x200000 +#define MNT_LOCK_READONLY 0x400000 +#define MNT_LOCKED 0x800000 +#define MNT_DOOMED 0x1000000 +#define MNT_SYNC_UMOUNT 0x2000000 +#define MNT_MARKED 0x4000000 +#define MNT_UMOUNT 0x8000000 + +struct vfsmount { + struct dentry *mnt_root; /* root of the mounted tree */ + struct super_block *mnt_sb; /* pointer to superblock */ + int mnt_flags; +} __randomize_layout; + +struct file; /* forward dec */ +struct path; + +extern int mnt_want_write(struct vfsmount *mnt); +extern int mnt_want_write_file(struct file *file); +extern int mnt_clone_write(struct vfsmount *mnt); +extern void mnt_drop_write(struct vfsmount *mnt); +extern void mnt_drop_write_file(struct file *file); +extern void mntput(struct vfsmount *mnt); +extern struct vfsmount *mntget(struct vfsmount *mnt); +extern struct vfsmount *mnt_clone_internal(const struct path *path); +extern int __mnt_is_readonly(struct vfsmount *mnt); +extern bool mnt_may_suid(struct vfsmount *mnt); + +struct path; +extern struct vfsmount *clone_private_mount(const struct path *path); +extern int __mnt_want_write(struct vfsmount *); +extern void __mnt_drop_write(struct vfsmount *); + +struct file_system_type; +extern struct vfsmount *vfs_kern_mount(struct file_system_type *type, + int flags, const char *name, + void *data); +extern struct vfsmount *vfs_submount(const struct dentry *mountpoint, + struct file_system_type *type, + const char *name, void *data); + +extern void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list); +extern void mark_mounts_for_expiry(struct list_head *mounts); + +extern dev_t name_to_dev_t(const char *name); + +extern unsigned int sysctl_mount_max; + +extern bool path_is_mountpoint(const struct path *path); + +#endif /* _LINUX_MOUNT_H */ diff --git a/include/linux/mpage.h b/include/linux/mpage.h new file mode 100644 index 000000000..001f1fcf9 --- /dev/null +++ b/include/linux/mpage.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/mpage.h + * + * Contains declarations related to preparing and submitting BIOS which contain + * multiple pagecache pages. + */ + +/* + * (And no, it doesn't do the #ifdef __MPAGE_H thing, and it doesn't do + * nested includes. Get it right in the .c file). + */ +#ifdef CONFIG_BLOCK + +struct writeback_control; + +int mpage_readpages(struct address_space *mapping, struct list_head *pages, + unsigned nr_pages, get_block_t get_block); +int mpage_readpage(struct page *page, get_block_t get_block); +int mpage_writepages(struct address_space *mapping, + struct writeback_control *wbc, get_block_t get_block); +int mpage_writepage(struct page *page, get_block_t *get_block, + struct writeback_control *wbc); + +#endif diff --git a/include/linux/mpi.h b/include/linux/mpi.h new file mode 100644 index 000000000..7cd1473c6 --- /dev/null +++ b/include/linux/mpi.h @@ -0,0 +1,96 @@ +/* mpi.h - Multi Precision Integers + * Copyright (C) 1994, 1996, 1998, 1999, + * 2000, 2001 Free Software Foundation, Inc. + * + * This file is part of GNUPG. + * + * GNUPG is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * GNUPG is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + * + * Note: This code is heavily based on the GNU MP Library. + * Actually it's the same code with only minor changes in the + * way the data is stored; this is to support the abstraction + * of an optional secure memory allocation which may be used + * to avoid revealing of sensitive data due to paging etc. + * The GNU MP Library itself is published under the LGPL; + * however I decided to publish this code under the plain GPL. + */ + +#ifndef G10_MPI_H +#define G10_MPI_H + +#include +#include + +#define BYTES_PER_MPI_LIMB (BITS_PER_LONG / 8) +#define BITS_PER_MPI_LIMB BITS_PER_LONG + +typedef unsigned long int mpi_limb_t; +typedef signed long int mpi_limb_signed_t; + +struct gcry_mpi { + int alloced; /* array size (# of allocated limbs) */ + int nlimbs; /* number of valid limbs */ + int nbits; /* the real number of valid bits (info only) */ + int sign; /* indicates a negative number */ + unsigned flags; /* bit 0: array must be allocated in secure memory space */ + /* bit 1: not used */ + /* bit 2: the limb is a pointer to some m_alloced data */ + mpi_limb_t *d; /* array with the limbs */ +}; + +typedef struct gcry_mpi *MPI; + +#define mpi_get_nlimbs(a) ((a)->nlimbs) + +/*-- mpiutil.c --*/ +MPI mpi_alloc(unsigned nlimbs); +void mpi_free(MPI a); +int mpi_resize(MPI a, unsigned nlimbs); + +/*-- mpicoder.c --*/ +MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes); +MPI mpi_read_from_buffer(const void *buffer, unsigned *ret_nread); +MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len); +void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign); +int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes, + int *sign); +int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned nbytes, + int *sign); + +/*-- mpi-pow.c --*/ +int mpi_powm(MPI res, MPI base, MPI exp, MPI mod); + +/*-- mpi-cmp.c --*/ +int mpi_cmp_ui(MPI u, ulong v); +int mpi_cmp(MPI u, MPI v); + +/*-- mpi-bit.c --*/ +void mpi_normalize(MPI a); +unsigned mpi_get_nbits(MPI a); + +/* inline functions */ + +/** + * mpi_get_size() - returns max size required to store the number + * + * @a: A multi precision integer for which we want to allocate a bufer + * + * Return: size required to store the number + */ +static inline unsigned int mpi_get_size(MPI a) +{ + return a->nlimbs * BYTES_PER_MPI_LIMB; +} +#endif /*G10_MPI_H */ diff --git a/include/linux/mpls.h b/include/linux/mpls.h new file mode 100644 index 000000000..ae1a188c0 --- /dev/null +++ b/include/linux/mpls.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MPLS_H +#define _LINUX_MPLS_H + +#include + +#define MPLS_TTL_MASK (MPLS_LS_TTL_MASK >> MPLS_LS_TTL_SHIFT) +#define MPLS_BOS_MASK (MPLS_LS_S_MASK >> MPLS_LS_S_SHIFT) +#define MPLS_TC_MASK (MPLS_LS_TC_MASK >> MPLS_LS_TC_SHIFT) +#define MPLS_LABEL_MASK (MPLS_LS_LABEL_MASK >> MPLS_LS_LABEL_SHIFT) + +#endif /* _LINUX_MPLS_H */ diff --git a/include/linux/mpls_iptunnel.h b/include/linux/mpls_iptunnel.h new file mode 100644 index 000000000..140c56954 --- /dev/null +++ b/include/linux/mpls_iptunnel.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MPLS_IPTUNNEL_H +#define _LINUX_MPLS_IPTUNNEL_H + +#include + +#endif /* _LINUX_MPLS_IPTUNNEL_H */ diff --git a/include/linux/mroute.h b/include/linux/mroute.h new file mode 100644 index 000000000..9a36fad9e --- /dev/null +++ b/include/linux/mroute.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_MROUTE_H +#define __LINUX_MROUTE_H + +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_IP_MROUTE +static inline int ip_mroute_opt(int opt) +{ + return opt >= MRT_BASE && opt <= MRT_MAX; +} + +int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int); +int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *); +int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg); +int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); +int ip_mr_init(void); +bool ipmr_rule_default(const struct fib_rule *rule); +#else +static inline int ip_mroute_setsockopt(struct sock *sock, int optname, + char __user *optval, unsigned int optlen) +{ + return -ENOPROTOOPT; +} + +static inline int ip_mroute_getsockopt(struct sock *sock, int optname, + char __user *optval, int __user *optlen) +{ + return -ENOPROTOOPT; +} + +static inline int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) +{ + return -ENOIOCTLCMD; +} + +static inline int ip_mr_init(void) +{ + return 0; +} + +static inline int ip_mroute_opt(int opt) +{ + return 0; +} + +static inline bool ipmr_rule_default(const struct fib_rule *rule) +{ + return true; +} +#endif + +#define VIFF_STATIC 0x8000 + +struct mfc_cache_cmp_arg { + __be32 mfc_mcastgrp; + __be32 mfc_origin; +}; + +/** + * struct mfc_cache - multicast routing entries + * @_c: Common multicast routing information; has to be first [for casting] + * @mfc_mcastgrp: destination multicast group address + * @mfc_origin: source address + * @cmparg: used for rhashtable comparisons + */ +struct mfc_cache { + struct mr_mfc _c; + union { + struct { + __be32 mfc_mcastgrp; + __be32 mfc_origin; + }; + struct mfc_cache_cmp_arg cmparg; + }; +}; + +struct rtmsg; +int ipmr_get_route(struct net *net, struct sk_buff *skb, + __be32 saddr, __be32 daddr, + struct rtmsg *rtm, u32 portid); +#endif diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h new file mode 100644 index 000000000..c4a45859f --- /dev/null +++ b/include/linux/mroute6.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_MROUTE6_H +#define __LINUX_MROUTE6_H + + +#include +#include /* for struct sk_buff_head */ +#include +#include +#include +#include + +#ifdef CONFIG_IPV6_MROUTE +static inline int ip6_mroute_opt(int opt) +{ + return (opt >= MRT6_BASE) && (opt <= MRT6_MAX); +} +#else +static inline int ip6_mroute_opt(int opt) +{ + return 0; +} +#endif + +struct sock; + +#ifdef CONFIG_IPV6_MROUTE +extern int ip6_mroute_setsockopt(struct sock *, int, char __user *, unsigned int); +extern int ip6_mroute_getsockopt(struct sock *, int, char __user *, int __user *); +extern int ip6_mr_input(struct sk_buff *skb); +extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg); +extern int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); +extern int ip6_mr_init(void); +extern void ip6_mr_cleanup(void); +#else +static inline +int ip6_mroute_setsockopt(struct sock *sock, + int optname, char __user *optval, unsigned int optlen) +{ + return -ENOPROTOOPT; +} + +static inline +int ip6_mroute_getsockopt(struct sock *sock, + int optname, char __user *optval, int __user *optlen) +{ + return -ENOPROTOOPT; +} + +static inline +int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg) +{ + return -ENOIOCTLCMD; +} + +static inline int ip6_mr_init(void) +{ + return 0; +} + +static inline void ip6_mr_cleanup(void) +{ + return; +} +#endif + +#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES +bool ip6mr_rule_default(const struct fib_rule *rule); +#else +static inline bool ip6mr_rule_default(const struct fib_rule *rule) +{ + return true; +} +#endif + +#define VIFF_STATIC 0x8000 + +struct mfc6_cache_cmp_arg { + struct in6_addr mf6c_mcastgrp; + struct in6_addr mf6c_origin; +}; + +struct mfc6_cache { + struct mr_mfc _c; + union { + struct { + struct in6_addr mf6c_mcastgrp; + struct in6_addr mf6c_origin; + }; + struct mfc6_cache_cmp_arg cmparg; + }; +}; + +#define MFC_ASSERT_THRESH (3*HZ) /* Maximal freq. of asserts */ + +struct rtmsg; +extern int ip6mr_get_route(struct net *net, struct sk_buff *skb, + struct rtmsg *rtm, u32 portid); + +#ifdef CONFIG_IPV6_MROUTE +bool mroute6_is_socket(struct net *net, struct sk_buff *skb); +extern int ip6mr_sk_done(struct sock *sk); +#else +static inline bool mroute6_is_socket(struct net *net, struct sk_buff *skb) +{ + return false; +} +static inline int ip6mr_sk_done(struct sock *sk) +{ + return 0; +} +#endif +#endif diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h new file mode 100644 index 000000000..6675b9f81 --- /dev/null +++ b/include/linux/mroute_base.h @@ -0,0 +1,465 @@ +#ifndef __LINUX_MROUTE_BASE_H +#define __LINUX_MROUTE_BASE_H + +#include +#include +#include +#include +#include +#include + +/** + * struct vif_device - interface representor for multicast routing + * @dev: network device being used + * @bytes_in: statistic; bytes ingressing + * @bytes_out: statistic; bytes egresing + * @pkt_in: statistic; packets ingressing + * @pkt_out: statistic; packets egressing + * @rate_limit: Traffic shaping (NI) + * @threshold: TTL threshold + * @flags: Control flags + * @link: Physical interface index + * @dev_parent_id: device parent id + * @local: Local address + * @remote: Remote address for tunnels + */ +struct vif_device { + struct net_device *dev; + unsigned long bytes_in, bytes_out; + unsigned long pkt_in, pkt_out; + unsigned long rate_limit; + unsigned char threshold; + unsigned short flags; + int link; + + /* Currently only used by ipmr */ + struct netdev_phys_item_id dev_parent_id; + __be32 local, remote; +}; + +struct vif_entry_notifier_info { + struct fib_notifier_info info; + struct net_device *dev; + unsigned short vif_index; + unsigned short vif_flags; + u32 tb_id; +}; + +static inline int mr_call_vif_notifier(struct notifier_block *nb, + struct net *net, + unsigned short family, + enum fib_event_type event_type, + struct vif_device *vif, + unsigned short vif_index, u32 tb_id) +{ + struct vif_entry_notifier_info info = { + .info = { + .family = family, + .net = net, + }, + .dev = vif->dev, + .vif_index = vif_index, + .vif_flags = vif->flags, + .tb_id = tb_id, + }; + + return call_fib_notifier(nb, net, event_type, &info.info); +} + +static inline int mr_call_vif_notifiers(struct net *net, + unsigned short family, + enum fib_event_type event_type, + struct vif_device *vif, + unsigned short vif_index, u32 tb_id, + unsigned int *ipmr_seq) +{ + struct vif_entry_notifier_info info = { + .info = { + .family = family, + .net = net, + }, + .dev = vif->dev, + .vif_index = vif_index, + .vif_flags = vif->flags, + .tb_id = tb_id, + }; + + ASSERT_RTNL(); + (*ipmr_seq)++; + return call_fib_notifiers(net, event_type, &info.info); +} + +#ifndef MAXVIFS +/* This one is nasty; value is defined in uapi using different symbols for + * mroute and morute6 but both map into same 32. + */ +#define MAXVIFS 32 +#endif + +#define VIF_EXISTS(_mrt, _idx) (!!((_mrt)->vif_table[_idx].dev)) + +/* mfc_flags: + * MFC_STATIC - the entry was added statically (not by a routing daemon) + * MFC_OFFLOAD - the entry was offloaded to the hardware + */ +enum { + MFC_STATIC = BIT(0), + MFC_OFFLOAD = BIT(1), +}; + +/** + * struct mr_mfc - common multicast routing entries + * @mnode: rhashtable list + * @mfc_parent: source interface (iif) + * @mfc_flags: entry flags + * @expires: unresolved entry expire time + * @unresolved: unresolved cached skbs + * @last_assert: time of last assert + * @minvif: minimum VIF id + * @maxvif: maximum VIF id + * @bytes: bytes that have passed for this entry + * @pkt: packets that have passed for this entry + * @wrong_if: number of wrong source interface hits + * @lastuse: time of last use of the group (traffic or update) + * @ttls: OIF TTL threshold array + * @refcount: reference count for this entry + * @list: global entry list + * @rcu: used for entry destruction + * @free: Operation used for freeing an entry under RCU + */ +struct mr_mfc { + struct rhlist_head mnode; + unsigned short mfc_parent; + int mfc_flags; + + union { + struct { + unsigned long expires; + struct sk_buff_head unresolved; + } unres; + struct { + unsigned long last_assert; + int minvif; + int maxvif; + unsigned long bytes; + unsigned long pkt; + unsigned long wrong_if; + unsigned long lastuse; + unsigned char ttls[MAXVIFS]; + refcount_t refcount; + } res; + } mfc_un; + struct list_head list; + struct rcu_head rcu; + void (*free)(struct rcu_head *head); +}; + +static inline void mr_cache_put(struct mr_mfc *c) +{ + if (refcount_dec_and_test(&c->mfc_un.res.refcount)) + call_rcu(&c->rcu, c->free); +} + +static inline void mr_cache_hold(struct mr_mfc *c) +{ + refcount_inc(&c->mfc_un.res.refcount); +} + +struct mfc_entry_notifier_info { + struct fib_notifier_info info; + struct mr_mfc *mfc; + u32 tb_id; +}; + +static inline int mr_call_mfc_notifier(struct notifier_block *nb, + struct net *net, + unsigned short family, + enum fib_event_type event_type, + struct mr_mfc *mfc, u32 tb_id) +{ + struct mfc_entry_notifier_info info = { + .info = { + .family = family, + .net = net, + }, + .mfc = mfc, + .tb_id = tb_id + }; + + return call_fib_notifier(nb, net, event_type, &info.info); +} + +static inline int mr_call_mfc_notifiers(struct net *net, + unsigned short family, + enum fib_event_type event_type, + struct mr_mfc *mfc, u32 tb_id, + unsigned int *ipmr_seq) +{ + struct mfc_entry_notifier_info info = { + .info = { + .family = family, + .net = net, + }, + .mfc = mfc, + .tb_id = tb_id + }; + + ASSERT_RTNL(); + (*ipmr_seq)++; + return call_fib_notifiers(net, event_type, &info.info); +} + +struct mr_table; + +/** + * struct mr_table_ops - callbacks and info for protocol-specific ops + * @rht_params: parameters for accessing the MFC hash + * @cmparg_any: a hash key to be used for matching on (*,*) routes + */ +struct mr_table_ops { + const struct rhashtable_params *rht_params; + void *cmparg_any; +}; + +/** + * struct mr_table - a multicast routing table + * @list: entry within a list of multicast routing tables + * @net: net where this table belongs + * @ops: protocol specific operations + * @id: identifier of the table + * @mroute_sk: socket associated with the table + * @ipmr_expire_timer: timer for handling unresolved routes + * @mfc_unres_queue: list of unresolved MFC entries + * @vif_table: array containing all possible vifs + * @mfc_hash: Hash table of all resolved routes for easy lookup + * @mfc_cache_list: list of resovled routes for possible traversal + * @maxvif: Identifier of highest value vif currently in use + * @cache_resolve_queue_len: current size of unresolved queue + * @mroute_do_assert: Whether to inform userspace on wrong ingress + * @mroute_do_pim: Whether to receive IGMP PIMv1 + * @mroute_reg_vif_num: PIM-device vif index + */ +struct mr_table { + struct list_head list; + possible_net_t net; + struct mr_table_ops ops; + u32 id; + struct sock __rcu *mroute_sk; + struct timer_list ipmr_expire_timer; + struct list_head mfc_unres_queue; + struct vif_device vif_table[MAXVIFS]; + struct rhltable mfc_hash; + struct list_head mfc_cache_list; + int maxvif; + atomic_t cache_resolve_queue_len; + bool mroute_do_assert; + bool mroute_do_pim; + bool mroute_do_wrvifwhole; + int mroute_reg_vif_num; +}; + +#ifdef CONFIG_IP_MROUTE_COMMON +void vif_device_init(struct vif_device *v, + struct net_device *dev, + unsigned long rate_limit, + unsigned char threshold, + unsigned short flags, + unsigned short get_iflink_mask); + +struct mr_table * +mr_table_alloc(struct net *net, u32 id, + struct mr_table_ops *ops, + void (*expire_func)(struct timer_list *t), + void (*table_set)(struct mr_table *mrt, + struct net *net)); + +/* These actually return 'struct mr_mfc *', but to avoid need for explicit + * castings they simply return void. + */ +void *mr_mfc_find_parent(struct mr_table *mrt, + void *hasharg, int parent); +void *mr_mfc_find_any_parent(struct mr_table *mrt, int vifi); +void *mr_mfc_find_any(struct mr_table *mrt, int vifi, void *hasharg); + +int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, + struct mr_mfc *c, struct rtmsg *rtm); +int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb, + struct mr_table *(*iter)(struct net *net, + struct mr_table *mrt), + int (*fill)(struct mr_table *mrt, + struct sk_buff *skb, + u32 portid, u32 seq, struct mr_mfc *c, + int cmd, int flags), + spinlock_t *lock); + +int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family, + int (*rules_dump)(struct net *net, + struct notifier_block *nb), + struct mr_table *(*mr_iter)(struct net *net, + struct mr_table *mrt), + rwlock_t *mrt_lock); +#else +static inline void vif_device_init(struct vif_device *v, + struct net_device *dev, + unsigned long rate_limit, + unsigned char threshold, + unsigned short flags, + unsigned short get_iflink_mask) +{ +} + +static inline void *mr_mfc_find_parent(struct mr_table *mrt, + void *hasharg, int parent) +{ + return NULL; +} + +static inline void *mr_mfc_find_any_parent(struct mr_table *mrt, + int vifi) +{ + return NULL; +} + +static inline struct mr_mfc *mr_mfc_find_any(struct mr_table *mrt, + int vifi, void *hasharg) +{ + return NULL; +} + +static inline int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, + struct mr_mfc *c, struct rtmsg *rtm) +{ + return -EINVAL; +} + +static inline int +mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb, + struct mr_table *(*iter)(struct net *net, + struct mr_table *mrt), + int (*fill)(struct mr_table *mrt, + struct sk_buff *skb, + u32 portid, u32 seq, struct mr_mfc *c, + int cmd, int flags), + spinlock_t *lock) +{ + return -EINVAL; +} + +static inline int mr_dump(struct net *net, struct notifier_block *nb, + unsigned short family, + int (*rules_dump)(struct net *net, + struct notifier_block *nb), + struct mr_table *(*mr_iter)(struct net *net, + struct mr_table *mrt), + rwlock_t *mrt_lock) +{ + return -EINVAL; +} +#endif + +static inline void *mr_mfc_find(struct mr_table *mrt, void *hasharg) +{ + return mr_mfc_find_parent(mrt, hasharg, -1); +} + +#ifdef CONFIG_PROC_FS +struct mr_vif_iter { + struct seq_net_private p; + struct mr_table *mrt; + int ct; +}; + +struct mr_mfc_iter { + struct seq_net_private p; + struct mr_table *mrt; + struct list_head *cache; + + /* Lock protecting the mr_table's unresolved queue */ + spinlock_t *lock; +}; + +#ifdef CONFIG_IP_MROUTE_COMMON +void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter, loff_t pos); +void *mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos); + +static inline void *mr_vif_seq_start(struct seq_file *seq, loff_t *pos) +{ + return *pos ? mr_vif_seq_idx(seq_file_net(seq), + seq->private, *pos - 1) + : SEQ_START_TOKEN; +} + +/* These actually return 'struct mr_mfc *', but to avoid need for explicit + * castings they simply return void. + */ +void *mr_mfc_seq_idx(struct net *net, + struct mr_mfc_iter *it, loff_t pos); +void *mr_mfc_seq_next(struct seq_file *seq, void *v, + loff_t *pos); + +static inline void *mr_mfc_seq_start(struct seq_file *seq, loff_t *pos, + struct mr_table *mrt, spinlock_t *lock) +{ + struct mr_mfc_iter *it = seq->private; + + it->mrt = mrt; + it->cache = NULL; + it->lock = lock; + + return *pos ? mr_mfc_seq_idx(seq_file_net(seq), + seq->private, *pos - 1) + : SEQ_START_TOKEN; +} + +static inline void mr_mfc_seq_stop(struct seq_file *seq, void *v) +{ + struct mr_mfc_iter *it = seq->private; + struct mr_table *mrt = it->mrt; + + if (it->cache == &mrt->mfc_unres_queue) + spin_unlock_bh(it->lock); + else if (it->cache == &mrt->mfc_cache_list) + rcu_read_unlock(); +} +#else +static inline void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter, + loff_t pos) +{ + return NULL; +} + +static inline void *mr_vif_seq_next(struct seq_file *seq, + void *v, loff_t *pos) +{ + return NULL; +} + +static inline void *mr_vif_seq_start(struct seq_file *seq, loff_t *pos) +{ + return NULL; +} + +static inline void *mr_mfc_seq_idx(struct net *net, + struct mr_mfc_iter *it, loff_t pos) +{ + return NULL; +} + +static inline void *mr_mfc_seq_next(struct seq_file *seq, void *v, + loff_t *pos) +{ + return NULL; +} + +static inline void *mr_mfc_seq_start(struct seq_file *seq, loff_t *pos, + struct mr_table *mrt, spinlock_t *lock) +{ + return NULL; +} + +static inline void mr_mfc_seq_stop(struct seq_file *seq, void *v) +{ +} +#endif +#endif +#endif diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h new file mode 100644 index 000000000..b7a5d4c72 --- /dev/null +++ b/include/linux/msdos_fs.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MSDOS_FS_H +#define _LINUX_MSDOS_FS_H + +#include + +/* media of boot sector */ +static inline int fat_valid_media(u8 media) +{ + return 0xf8 <= media || media == 0xf0; +} +#endif /* !_LINUX_MSDOS_FS_H */ diff --git a/include/linux/msg.h b/include/linux/msg.h new file mode 100644 index 000000000..9a972a296 --- /dev/null +++ b/include/linux/msg.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MSG_H +#define _LINUX_MSG_H + +#include +#include + +/* one msg_msg structure for each message */ +struct msg_msg { + struct list_head m_list; + long m_type; + size_t m_ts; /* message text size */ + struct msg_msgseg *next; + void *security; + /* the actual message follows immediately */ +}; + +#endif /* _LINUX_MSG_H */ diff --git a/include/linux/msi.h b/include/linux/msi.h new file mode 100644 index 000000000..62982e6af --- /dev/null +++ b/include/linux/msi.h @@ -0,0 +1,358 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_MSI_H +#define LINUX_MSI_H + +#include +#include + +struct msi_msg { + u32 address_lo; /* low 32 bits of msi message address */ + u32 address_hi; /* high 32 bits of msi message address */ + u32 data; /* 16 bits of msi message data */ +}; + +extern int pci_msi_ignore_mask; +/* Helper functions */ +struct irq_data; +struct msi_desc; +struct pci_dev; +struct platform_msi_priv_data; +void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); +#ifdef CONFIG_GENERIC_MSI_IRQ +void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); +#else +static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) +{ +} +#endif + +typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc, + struct msi_msg *msg); + +/** + * platform_msi_desc - Platform device specific msi descriptor data + * @msi_priv_data: Pointer to platform private data + * @msi_index: The index of the MSI descriptor for multi MSI + */ +struct platform_msi_desc { + struct platform_msi_priv_data *msi_priv_data; + u16 msi_index; +}; + +/** + * fsl_mc_msi_desc - FSL-MC device specific msi descriptor data + * @msi_index: The index of the MSI descriptor + */ +struct fsl_mc_msi_desc { + u16 msi_index; +}; + +/** + * struct msi_desc - Descriptor structure for MSI based interrupts + * @list: List head for management + * @irq: The base interrupt number + * @nvec_used: The number of vectors used + * @dev: Pointer to the device which uses this descriptor + * @msg: The last set MSI message cached for reuse + * @affinity: Optional pointer to a cpu affinity mask for this descriptor + * + * @masked: [PCI MSI/X] Mask bits + * @is_msix: [PCI MSI/X] True if MSI-X + * @multiple: [PCI MSI/X] log2 num of messages allocated + * @multi_cap: [PCI MSI/X] log2 num of messages supported + * @maskbit: [PCI MSI/X] Mask-Pending bit supported? + * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit + * @entry_nr: [PCI MSI/X] Entry which is described by this descriptor + * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq + * @mask_pos: [PCI MSI] Mask register position + * @mask_base: [PCI MSI-X] Mask register base address + * @platform: [platform] Platform device specific msi descriptor data + * @fsl_mc: [fsl-mc] FSL MC device specific msi descriptor data + */ +struct msi_desc { + /* Shared device/bus type independent data */ + struct list_head list; + unsigned int irq; + unsigned int nvec_used; + struct device *dev; + struct msi_msg msg; + struct cpumask *affinity; + + union { + /* PCI MSI/X specific data */ + struct { + u32 masked; + struct { + __u8 is_msix : 1; + __u8 multiple : 3; + __u8 multi_cap : 3; + __u8 maskbit : 1; + __u8 is_64 : 1; + __u16 entry_nr; + unsigned default_irq; + } msi_attrib; + union { + u8 mask_pos; + void __iomem *mask_base; + }; + }; + + /* + * Non PCI variants add their data structure here. New + * entries need to use a named structure. We want + * proper name spaces for this. The PCI part is + * anonymous for now as it would require an immediate + * tree wide cleanup. + */ + struct platform_msi_desc platform; + struct fsl_mc_msi_desc fsl_mc; + }; +}; + +/* Helpers to hide struct msi_desc implementation details */ +#define msi_desc_to_dev(desc) ((desc)->dev) +#define dev_to_msi_list(dev) (&(dev)->msi_list) +#define first_msi_entry(dev) \ + list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list) +#define for_each_msi_entry(desc, dev) \ + list_for_each_entry((desc), dev_to_msi_list((dev)), list) +#define for_each_msi_entry_safe(desc, tmp, dev) \ + list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list) +#define for_each_msi_vector(desc, __irq, dev) \ + for_each_msi_entry((desc), (dev)) \ + if ((desc)->irq) \ + for (__irq = (desc)->irq; \ + __irq < ((desc)->irq + (desc)->nvec_used); \ + __irq++) + +#ifdef CONFIG_PCI_MSI +#define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev) +#define for_each_pci_msi_entry(desc, pdev) \ + for_each_msi_entry((desc), &(pdev)->dev) + +struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc); +void *msi_desc_to_pci_sysdata(struct msi_desc *desc); +void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg); +#else /* CONFIG_PCI_MSI */ +static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc) +{ + return NULL; +} +static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) +{ +} +#endif /* CONFIG_PCI_MSI */ + +struct msi_desc *alloc_msi_entry(struct device *dev, int nvec, + const struct cpumask *affinity); +void free_msi_entry(struct msi_desc *entry); +void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); +void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); + +u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag); +void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); +void pci_msi_mask_irq(struct irq_data *data); +void pci_msi_unmask_irq(struct irq_data *data); + +/* Conversion helpers. Should be removed after merging */ +static inline void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) +{ + __pci_write_msi_msg(entry, msg); +} +static inline void write_msi_msg(int irq, struct msi_msg *msg) +{ + pci_write_msi_msg(irq, msg); +} +static inline void mask_msi_irq(struct irq_data *data) +{ + pci_msi_mask_irq(data); +} +static inline void unmask_msi_irq(struct irq_data *data) +{ + pci_msi_unmask_irq(data); +} + +/* + * The arch hooks to setup up msi irqs. Those functions are + * implemented as weak symbols so that they /can/ be overriden by + * architecture specific code if needed. + */ +int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc); +void arch_teardown_msi_irq(unsigned int irq); +int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); +void arch_teardown_msi_irqs(struct pci_dev *dev); +void arch_restore_msi_irqs(struct pci_dev *dev); + +void default_teardown_msi_irqs(struct pci_dev *dev); +void default_restore_msi_irqs(struct pci_dev *dev); + +struct msi_controller { + struct module *owner; + struct device *dev; + struct device_node *of_node; + struct list_head list; + + int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev, + struct msi_desc *desc); + int (*setup_irqs)(struct msi_controller *chip, struct pci_dev *dev, + int nvec, int type); + void (*teardown_irq)(struct msi_controller *chip, unsigned int irq); +}; + +#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN + +#include +#include + +struct irq_domain; +struct irq_domain_ops; +struct irq_chip; +struct device_node; +struct fwnode_handle; +struct msi_domain_info; + +/** + * struct msi_domain_ops - MSI interrupt domain callbacks + * @get_hwirq: Retrieve the resulting hw irq number + * @msi_init: Domain specific init function for MSI interrupts + * @msi_free: Domain specific function to free a MSI interrupts + * @msi_check: Callback for verification of the domain/info/dev data + * @msi_prepare: Prepare the allocation of the interrupts in the domain + * @msi_finish: Optional callback to finalize the allocation + * @set_desc: Set the msi descriptor for an interrupt + * @handle_error: Optional error handler if the allocation fails + * + * @get_hwirq, @msi_init and @msi_free are callbacks used by + * msi_create_irq_domain() and related interfaces + * + * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error + * are callbacks used by msi_domain_alloc_irqs() and related + * interfaces which are based on msi_desc. + */ +struct msi_domain_ops { + irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info, + msi_alloc_info_t *arg); + int (*msi_init)(struct irq_domain *domain, + struct msi_domain_info *info, + unsigned int virq, irq_hw_number_t hwirq, + msi_alloc_info_t *arg); + void (*msi_free)(struct irq_domain *domain, + struct msi_domain_info *info, + unsigned int virq); + int (*msi_check)(struct irq_domain *domain, + struct msi_domain_info *info, + struct device *dev); + int (*msi_prepare)(struct irq_domain *domain, + struct device *dev, int nvec, + msi_alloc_info_t *arg); + void (*msi_finish)(msi_alloc_info_t *arg, int retval); + void (*set_desc)(msi_alloc_info_t *arg, + struct msi_desc *desc); + int (*handle_error)(struct irq_domain *domain, + struct msi_desc *desc, int error); +}; + +/** + * struct msi_domain_info - MSI interrupt domain data + * @flags: Flags to decribe features and capabilities + * @ops: The callback data structure + * @chip: Optional: associated interrupt chip + * @chip_data: Optional: associated interrupt chip data + * @handler: Optional: associated interrupt flow handler + * @handler_data: Optional: associated interrupt flow handler data + * @handler_name: Optional: associated interrupt flow handler name + * @data: Optional: domain specific data + */ +struct msi_domain_info { + u32 flags; + struct msi_domain_ops *ops; + struct irq_chip *chip; + void *chip_data; + irq_flow_handler_t handler; + void *handler_data; + const char *handler_name; + void *data; +}; + +/* Flags for msi_domain_info */ +enum { + /* + * Init non implemented ops callbacks with default MSI domain + * callbacks. + */ + MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0), + /* + * Init non implemented chip callbacks with default MSI chip + * callbacks. + */ + MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1), + /* Support multiple PCI MSI interrupts */ + MSI_FLAG_MULTI_PCI_MSI = (1 << 2), + /* Support PCI MSIX interrupts */ + MSI_FLAG_PCI_MSIX = (1 << 3), + /* Needs early activate, required for PCI */ + MSI_FLAG_ACTIVATE_EARLY = (1 << 4), + /* + * Must reactivate when irq is started even when + * MSI_FLAG_ACTIVATE_EARLY has been set. + */ + MSI_FLAG_MUST_REACTIVATE = (1 << 5), + /* Is level-triggered capable, using two messages */ + MSI_FLAG_LEVEL_CAPABLE = (1 << 6), +}; + +int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, + bool force); + +struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode, + struct msi_domain_info *info, + struct irq_domain *parent); +int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, + int nvec); +void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); +struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain); + +struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode, + struct msi_domain_info *info, + struct irq_domain *parent); +int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, + irq_write_msi_msg_t write_msi_msg); +void platform_msi_domain_free_irqs(struct device *dev); + +/* When an MSI domain is used as an intermediate domain */ +int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *args); +int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev, + int virq, int nvec, msi_alloc_info_t *args); +struct irq_domain * +platform_msi_create_device_domain(struct device *dev, + unsigned int nvec, + irq_write_msi_msg_t write_msi_msg, + const struct irq_domain_ops *ops, + void *host_data); +int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs); +void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nvec); +void *platform_msi_get_host_data(struct irq_domain *domain); +#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */ + +#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN +void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg); +struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, + struct msi_domain_info *info, + struct irq_domain *parent); +irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, + struct msi_desc *desc); +int pci_msi_domain_check_cap(struct irq_domain *domain, + struct msi_domain_info *info, struct device *dev); +u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev); +struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev); +#else +static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) +{ + return NULL; +} +#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ + +#endif /* LINUX_MSI_H */ diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h new file mode 100644 index 000000000..3102bd754 --- /dev/null +++ b/include/linux/mtd/bbm.h @@ -0,0 +1,169 @@ +/* + * NAND family Bad Block Management (BBM) header file + * - Bad Block Table (BBT) implementation + * + * Copyright © 2005 Samsung Electronics + * Kyungmin Park + * + * Copyright © 2000-2005 + * Thomas Gleixner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ +#ifndef __LINUX_MTD_BBM_H +#define __LINUX_MTD_BBM_H + +/* The maximum number of NAND chips in an array */ +#define NAND_MAX_CHIPS 8 + +/** + * struct nand_bbt_descr - bad block table descriptor + * @options: options for this descriptor + * @pages: the page(s) where we find the bbt, used with option BBT_ABSPAGE + * when bbt is searched, then we store the found bbts pages here. + * Its an array and supports up to 8 chips now + * @offs: offset of the pattern in the oob area of the page + * @veroffs: offset of the bbt version counter in the oob are of the page + * @version: version read from the bbt page during scan + * @len: length of the pattern, if 0 no pattern check is performed + * @maxblocks: maximum number of blocks to search for a bbt. This number of + * blocks is reserved at the end of the device where the tables are + * written. + * @reserved_block_code: if non-0, this pattern denotes a reserved (rather than + * bad) block in the stored bbt + * @pattern: pattern to identify bad block table or factory marked good / + * bad blocks, can be NULL, if len = 0 + * + * Descriptor for the bad block table marker and the descriptor for the + * pattern which identifies good and bad blocks. The assumption is made + * that the pattern and the version count are always located in the oob area + * of the first block. + */ +struct nand_bbt_descr { + int options; + int pages[NAND_MAX_CHIPS]; + int offs; + int veroffs; + uint8_t version[NAND_MAX_CHIPS]; + int len; + int maxblocks; + int reserved_block_code; + uint8_t *pattern; +}; + +/* Options for the bad block table descriptors */ + +/* The number of bits used per block in the bbt on the device */ +#define NAND_BBT_NRBITS_MSK 0x0000000F +#define NAND_BBT_1BIT 0x00000001 +#define NAND_BBT_2BIT 0x00000002 +#define NAND_BBT_4BIT 0x00000004 +#define NAND_BBT_8BIT 0x00000008 +/* The bad block table is in the last good block of the device */ +#define NAND_BBT_LASTBLOCK 0x00000010 +/* The bbt is at the given page, else we must scan for the bbt */ +#define NAND_BBT_ABSPAGE 0x00000020 +/* bbt is stored per chip on multichip devices */ +#define NAND_BBT_PERCHIP 0x00000080 +/* bbt has a version counter at offset veroffs */ +#define NAND_BBT_VERSION 0x00000100 +/* Create a bbt if none exists */ +#define NAND_BBT_CREATE 0x00000200 +/* + * Create an empty BBT with no vendor information. Vendor's information may be + * unavailable, for example, if the NAND controller has a different data and OOB + * layout or if this information is already purged. Must be used in conjunction + * with NAND_BBT_CREATE. + */ +#define NAND_BBT_CREATE_EMPTY 0x00000400 +/* Write bbt if neccecary */ +#define NAND_BBT_WRITE 0x00002000 +/* Read and write back block contents when writing bbt */ +#define NAND_BBT_SAVECONTENT 0x00004000 +/* Search good / bad pattern on the first and the second page */ +#define NAND_BBT_SCAN2NDPAGE 0x00008000 +/* Search good / bad pattern on the last page of the eraseblock */ +#define NAND_BBT_SCANLASTPAGE 0x00010000 +/* + * Use a flash based bad block table. By default, OOB identifier is saved in + * OOB area. This option is passed to the default bad block table function. + */ +#define NAND_BBT_USE_FLASH 0x00020000 +/* + * Do not store flash based bad block table marker in the OOB area; store it + * in-band. + */ +#define NAND_BBT_NO_OOB 0x00040000 +/* + * Do not write new bad block markers to OOB; useful, e.g., when ECC covers + * entire spare area. Must be used with NAND_BBT_USE_FLASH. + */ +#define NAND_BBT_NO_OOB_BBM 0x00080000 + +/* + * Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr + * was allocated dynamicaly and must be freed in nand_release(). Has no meaning + * in nand_chip.bbt_options. + */ +#define NAND_BBT_DYNAMICSTRUCT 0x80000000 + +/* The maximum number of blocks to scan for a bbt */ +#define NAND_BBT_SCAN_MAXBLOCKS 4 + +/* + * Constants for oob configuration + */ +#define NAND_SMALL_BADBLOCK_POS 5 +#define NAND_LARGE_BADBLOCK_POS 0 +#define ONENAND_BADBLOCK_POS 0 + +/* + * Bad block scanning errors + */ +#define ONENAND_BBT_READ_ERROR 1 +#define ONENAND_BBT_READ_ECC_ERROR 2 +#define ONENAND_BBT_READ_FATAL_ERROR 4 + +/** + * struct bbm_info - [GENERIC] Bad Block Table data structure + * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry + * @badblockpos: [INTERN] position of the bad block marker in the oob area + * @options: options for this descriptor + * @bbt: [INTERN] bad block table pointer + * @isbad_bbt: function to determine if a block is bad + * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for + * initial bad block scan + * @priv: [OPTIONAL] pointer to private bbm date + */ +struct bbm_info { + int bbt_erase_shift; + int badblockpos; + int options; + + uint8_t *bbt; + + int (*isbad_bbt)(struct mtd_info *mtd, loff_t ofs, int allowbbt); + + /* TODO Add more NAND specific fileds */ + struct nand_bbt_descr *badblock_pattern; + + void *priv; +}; + +/* OneNAND BBT interface */ +extern int onenand_default_bbt(struct mtd_info *mtd); + +#endif /* __LINUX_MTD_BBM_H */ diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h new file mode 100644 index 000000000..e93837f64 --- /dev/null +++ b/include/linux/mtd/blktrans.h @@ -0,0 +1,96 @@ +/* + * Copyright © 2003-2010 David Woodhouse + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef __MTD_TRANS_H__ +#define __MTD_TRANS_H__ + +#include +#include +#include +#include + +struct hd_geometry; +struct mtd_info; +struct mtd_blktrans_ops; +struct file; +struct inode; + +struct mtd_blktrans_dev { + struct mtd_blktrans_ops *tr; + struct list_head list; + struct mtd_info *mtd; + struct mutex lock; + int devnum; + bool bg_stop; + unsigned long size; + int readonly; + int open; + struct kref ref; + struct gendisk *disk; + struct attribute_group *disk_attributes; + struct workqueue_struct *wq; + struct work_struct work; + struct request_queue *rq; + spinlock_t queue_lock; + void *priv; + fmode_t file_mode; +}; + +struct mtd_blktrans_ops { + char *name; + int major; + int part_bits; + int blksize; + int blkshift; + + /* Access functions */ + int (*readsect)(struct mtd_blktrans_dev *dev, + unsigned long block, char *buffer); + int (*writesect)(struct mtd_blktrans_dev *dev, + unsigned long block, char *buffer); + int (*discard)(struct mtd_blktrans_dev *dev, + unsigned long block, unsigned nr_blocks); + void (*background)(struct mtd_blktrans_dev *dev); + + /* Block layer ioctls */ + int (*getgeo)(struct mtd_blktrans_dev *dev, struct hd_geometry *geo); + int (*flush)(struct mtd_blktrans_dev *dev); + + /* Called with mtd_table_mutex held; no race with add/remove */ + int (*open)(struct mtd_blktrans_dev *dev); + void (*release)(struct mtd_blktrans_dev *dev); + + /* Called on {de,}registration and on subsequent addition/removal + of devices, with mtd_table_mutex held. */ + void (*add_mtd)(struct mtd_blktrans_ops *tr, struct mtd_info *mtd); + void (*remove_dev)(struct mtd_blktrans_dev *dev); + + struct list_head devs; + struct list_head list; + struct module *owner; +}; + +extern int register_mtd_blktrans(struct mtd_blktrans_ops *tr); +extern int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr); +extern int add_mtd_blktrans_dev(struct mtd_blktrans_dev *dev); +extern int del_mtd_blktrans_dev(struct mtd_blktrans_dev *dev); +extern int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev); + + +#endif /* __MTD_TRANS_H__ */ diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h new file mode 100644 index 000000000..4ead3d155 --- /dev/null +++ b/include/linux/mtd/cfi.h @@ -0,0 +1,393 @@ +/* + * Copyright © 2000-2010 David Woodhouse et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef __MTD_CFI_H__ +#define __MTD_CFI_H__ + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_MTD_CFI_I1 +#define cfi_interleave(cfi) 1 +#define cfi_interleave_is_1(cfi) (cfi_interleave(cfi) == 1) +#else +#define cfi_interleave_is_1(cfi) (0) +#endif + +#ifdef CONFIG_MTD_CFI_I2 +# ifdef cfi_interleave +# undef cfi_interleave +# define cfi_interleave(cfi) ((cfi)->interleave) +# else +# define cfi_interleave(cfi) 2 +# endif +#define cfi_interleave_is_2(cfi) (cfi_interleave(cfi) == 2) +#else +#define cfi_interleave_is_2(cfi) (0) +#endif + +#ifdef CONFIG_MTD_CFI_I4 +# ifdef cfi_interleave +# undef cfi_interleave +# define cfi_interleave(cfi) ((cfi)->interleave) +# else +# define cfi_interleave(cfi) 4 +# endif +#define cfi_interleave_is_4(cfi) (cfi_interleave(cfi) == 4) +#else +#define cfi_interleave_is_4(cfi) (0) +#endif + +#ifdef CONFIG_MTD_CFI_I8 +# ifdef cfi_interleave +# undef cfi_interleave +# define cfi_interleave(cfi) ((cfi)->interleave) +# else +# define cfi_interleave(cfi) 8 +# endif +#define cfi_interleave_is_8(cfi) (cfi_interleave(cfi) == 8) +#else +#define cfi_interleave_is_8(cfi) (0) +#endif + +#ifndef cfi_interleave +#warning No CONFIG_MTD_CFI_Ix selected. No NOR chip support can work. +static inline int cfi_interleave(void *cfi) +{ + BUG(); + return 0; +} +#endif + +static inline int cfi_interleave_supported(int i) +{ + switch (i) { +#ifdef CONFIG_MTD_CFI_I1 + case 1: +#endif +#ifdef CONFIG_MTD_CFI_I2 + case 2: +#endif +#ifdef CONFIG_MTD_CFI_I4 + case 4: +#endif +#ifdef CONFIG_MTD_CFI_I8 + case 8: +#endif + return 1; + + default: + return 0; + } +} + + +/* NB: these values must represents the number of bytes needed to meet the + * device type (x8, x16, x32). Eg. a 32 bit device is 4 x 8 bytes. + * These numbers are used in calculations. + */ +#define CFI_DEVICETYPE_X8 (8 / 8) +#define CFI_DEVICETYPE_X16 (16 / 8) +#define CFI_DEVICETYPE_X32 (32 / 8) +#define CFI_DEVICETYPE_X64 (64 / 8) + + +/* Device Interface Code Assignments from the "Common Flash Memory Interface + * Publication 100" dated December 1, 2001. + */ +#define CFI_INTERFACE_X8_ASYNC 0x0000 +#define CFI_INTERFACE_X16_ASYNC 0x0001 +#define CFI_INTERFACE_X8_BY_X16_ASYNC 0x0002 +#define CFI_INTERFACE_X32_ASYNC 0x0003 +#define CFI_INTERFACE_X16_BY_X32_ASYNC 0x0005 +#define CFI_INTERFACE_NOT_ALLOWED 0xffff + + +/* NB: We keep these structures in memory in HOST byteorder, except + * where individually noted. + */ + +/* Basic Query Structure */ +struct cfi_ident { + uint8_t qry[3]; + uint16_t P_ID; + uint16_t P_ADR; + uint16_t A_ID; + uint16_t A_ADR; + uint8_t VccMin; + uint8_t VccMax; + uint8_t VppMin; + uint8_t VppMax; + uint8_t WordWriteTimeoutTyp; + uint8_t BufWriteTimeoutTyp; + uint8_t BlockEraseTimeoutTyp; + uint8_t ChipEraseTimeoutTyp; + uint8_t WordWriteTimeoutMax; + uint8_t BufWriteTimeoutMax; + uint8_t BlockEraseTimeoutMax; + uint8_t ChipEraseTimeoutMax; + uint8_t DevSize; + uint16_t InterfaceDesc; + uint16_t MaxBufWriteSize; + uint8_t NumEraseRegions; + uint32_t EraseRegionInfo[0]; /* Not host ordered */ +} __packed; + +/* Extended Query Structure for both PRI and ALT */ + +struct cfi_extquery { + uint8_t pri[3]; + uint8_t MajorVersion; + uint8_t MinorVersion; +} __packed; + +/* Vendor-Specific PRI for Intel/Sharp Extended Command Set (0x0001) */ + +struct cfi_pri_intelext { + uint8_t pri[3]; + uint8_t MajorVersion; + uint8_t MinorVersion; + uint32_t FeatureSupport; /* if bit 31 is set then an additional uint32_t feature + block follows - FIXME - not currently supported */ + uint8_t SuspendCmdSupport; + uint16_t BlkStatusRegMask; + uint8_t VccOptimal; + uint8_t VppOptimal; + uint8_t NumProtectionFields; + uint16_t ProtRegAddr; + uint8_t FactProtRegSize; + uint8_t UserProtRegSize; + uint8_t extra[0]; +} __packed; + +struct cfi_intelext_otpinfo { + uint32_t ProtRegAddr; + uint16_t FactGroups; + uint8_t FactProtRegSize; + uint16_t UserGroups; + uint8_t UserProtRegSize; +} __packed; + +struct cfi_intelext_blockinfo { + uint16_t NumIdentBlocks; + uint16_t BlockSize; + uint16_t MinBlockEraseCycles; + uint8_t BitsPerCell; + uint8_t BlockCap; +} __packed; + +struct cfi_intelext_regioninfo { + uint16_t NumIdentPartitions; + uint8_t NumOpAllowed; + uint8_t NumOpAllowedSimProgMode; + uint8_t NumOpAllowedSimEraMode; + uint8_t NumBlockTypes; + struct cfi_intelext_blockinfo BlockTypes[1]; +} __packed; + +struct cfi_intelext_programming_regioninfo { + uint8_t ProgRegShift; + uint8_t Reserved1; + uint8_t ControlValid; + uint8_t Reserved2; + uint8_t ControlInvalid; + uint8_t Reserved3; +} __packed; + +/* Vendor-Specific PRI for AMD/Fujitsu Extended Command Set (0x0002) */ + +struct cfi_pri_amdstd { + uint8_t pri[3]; + uint8_t MajorVersion; + uint8_t MinorVersion; + uint8_t SiliconRevision; /* bits 1-0: Address Sensitive Unlock */ + uint8_t EraseSuspend; + uint8_t BlkProt; + uint8_t TmpBlkUnprotect; + uint8_t BlkProtUnprot; + uint8_t SimultaneousOps; + uint8_t BurstMode; + uint8_t PageMode; + uint8_t VppMin; + uint8_t VppMax; + uint8_t TopBottom; +} __packed; + +/* Vendor-Specific PRI for Atmel chips (command set 0x0002) */ + +struct cfi_pri_atmel { + uint8_t pri[3]; + uint8_t MajorVersion; + uint8_t MinorVersion; + uint8_t Features; + uint8_t BottomBoot; + uint8_t BurstMode; + uint8_t PageMode; +} __packed; + +struct cfi_pri_query { + uint8_t NumFields; + uint32_t ProtField[1]; /* Not host ordered */ +} __packed; + +struct cfi_bri_query { + uint8_t PageModeReadCap; + uint8_t NumFields; + uint32_t ConfField[1]; /* Not host ordered */ +} __packed; + +#define P_ID_NONE 0x0000 +#define P_ID_INTEL_EXT 0x0001 +#define P_ID_AMD_STD 0x0002 +#define P_ID_INTEL_STD 0x0003 +#define P_ID_AMD_EXT 0x0004 +#define P_ID_WINBOND 0x0006 +#define P_ID_ST_ADV 0x0020 +#define P_ID_MITSUBISHI_STD 0x0100 +#define P_ID_MITSUBISHI_EXT 0x0101 +#define P_ID_SST_PAGE 0x0102 +#define P_ID_SST_OLD 0x0701 +#define P_ID_INTEL_PERFORMANCE 0x0200 +#define P_ID_INTEL_DATA 0x0210 +#define P_ID_RESERVED 0xffff + + +#define CFI_MODE_CFI 1 +#define CFI_MODE_JEDEC 0 + +struct cfi_private { + uint16_t cmdset; + void *cmdset_priv; + int interleave; + int device_type; + int cfi_mode; /* Are we a JEDEC device pretending to be CFI? */ + int addr_unlock1; + int addr_unlock2; + struct mtd_info *(*cmdset_setup)(struct map_info *); + struct cfi_ident *cfiq; /* For now only one. We insist that all devs + must be of the same type. */ + int mfr, id; + int numchips; + map_word sector_erase_cmd; + unsigned long chipshift; /* Because they're of the same type */ + const char *im_name; /* inter_module name for cmdset_setup */ + unsigned long quirks; + struct flchip chips[0]; /* per-chip data structure for each chip */ +}; + +uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, + struct map_info *map, struct cfi_private *cfi); + +map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi); +#define CMD(x) cfi_build_cmd((x), map, cfi) + +unsigned long cfi_merge_status(map_word val, struct map_info *map, + struct cfi_private *cfi); +#define MERGESTATUS(x) cfi_merge_status((x), map, cfi) + +uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base, + struct map_info *map, struct cfi_private *cfi, + int type, map_word *prev_val); + +static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr) +{ + map_word val = map_read(map, addr); + + if (map_bankwidth_is_1(map)) { + return val.x[0]; + } else if (map_bankwidth_is_2(map)) { + return cfi16_to_cpu(map, val.x[0]); + } else { + /* No point in a 64-bit byteswap since that would just be + swapping the responses from different chips, and we are + only interested in one chip (a representative sample) */ + return cfi32_to_cpu(map, val.x[0]); + } +} + +static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr) +{ + map_word val = map_read(map, addr); + + if (map_bankwidth_is_1(map)) { + return val.x[0] & 0xff; + } else if (map_bankwidth_is_2(map)) { + return cfi16_to_cpu(map, val.x[0]); + } else { + /* No point in a 64-bit byteswap since that would just be + swapping the responses from different chips, and we are + only interested in one chip (a representative sample) */ + return cfi32_to_cpu(map, val.x[0]); + } +} + +void cfi_udelay(int us); + +int __xipram cfi_qry_present(struct map_info *map, __u32 base, + struct cfi_private *cfi); +int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map, + struct cfi_private *cfi); +void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map, + struct cfi_private *cfi); + +struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size, + const char* name); +struct cfi_fixup { + uint16_t mfr; + uint16_t id; + void (*fixup)(struct mtd_info *mtd); +}; + +#define CFI_MFR_ANY 0xFFFF +#define CFI_ID_ANY 0xFFFF +#define CFI_MFR_CONTINUATION 0x007F + +#define CFI_MFR_AMD 0x0001 +#define CFI_MFR_AMIC 0x0037 +#define CFI_MFR_ATMEL 0x001F +#define CFI_MFR_EON 0x001C +#define CFI_MFR_FUJITSU 0x0004 +#define CFI_MFR_HYUNDAI 0x00AD +#define CFI_MFR_INTEL 0x0089 +#define CFI_MFR_MACRONIX 0x00C2 +#define CFI_MFR_NEC 0x0010 +#define CFI_MFR_PMC 0x009D +#define CFI_MFR_SAMSUNG 0x00EC +#define CFI_MFR_SHARP 0x00B0 +#define CFI_MFR_SST 0x00BF +#define CFI_MFR_ST 0x0020 /* STMicroelectronics */ +#define CFI_MFR_TOSHIBA 0x0098 +#define CFI_MFR_WINBOND 0x00DA + +void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups); + +typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip, + unsigned long adr, int len, void *thunk); + +int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob, + loff_t ofs, size_t len, void *thunk); + + +#endif /* __MTD_CFI_H__ */ diff --git a/include/linux/mtd/cfi_endian.h b/include/linux/mtd/cfi_endian.h new file mode 100644 index 000000000..b97a62507 --- /dev/null +++ b/include/linux/mtd/cfi_endian.h @@ -0,0 +1,53 @@ +/* + * Copyright © 2001-2010 David Woodhouse + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include + +#define CFI_HOST_ENDIAN 1 +#define CFI_LITTLE_ENDIAN 2 +#define CFI_BIG_ENDIAN 3 + +#if !defined(CONFIG_MTD_CFI_ADV_OPTIONS) || defined(CONFIG_MTD_CFI_NOSWAP) +#define CFI_DEFAULT_ENDIAN CFI_HOST_ENDIAN +#elif defined(CONFIG_MTD_CFI_LE_BYTE_SWAP) +#define CFI_DEFAULT_ENDIAN CFI_LITTLE_ENDIAN +#elif defined(CONFIG_MTD_CFI_BE_BYTE_SWAP) +#define CFI_DEFAULT_ENDIAN CFI_BIG_ENDIAN +#else +#error No CFI endianness defined +#endif + +#define cfi_default(s) ((s)?:CFI_DEFAULT_ENDIAN) +#define cfi_be(s) (cfi_default(s) == CFI_BIG_ENDIAN) +#define cfi_le(s) (cfi_default(s) == CFI_LITTLE_ENDIAN) +#define cfi_host(s) (cfi_default(s) == CFI_HOST_ENDIAN) + +#define cpu_to_cfi8(map, x) (x) +#define cfi8_to_cpu(map, x) (x) +#define cpu_to_cfi16(map, x) _cpu_to_cfi(16, (map)->swap, (x)) +#define cpu_to_cfi32(map, x) _cpu_to_cfi(32, (map)->swap, (x)) +#define cpu_to_cfi64(map, x) _cpu_to_cfi(64, (map)->swap, (x)) +#define cfi16_to_cpu(map, x) _cfi_to_cpu(16, (map)->swap, (x)) +#define cfi32_to_cpu(map, x) _cfi_to_cpu(32, (map)->swap, (x)) +#define cfi64_to_cpu(map, x) _cfi_to_cpu(64, (map)->swap, (x)) + +#define _cpu_to_cfi(w, s, x) (cfi_host(s)?(x):_swap_to_cfi(w, s, x)) +#define _cfi_to_cpu(w, s, x) (cfi_host(s)?(x):_swap_to_cpu(w, s, x)) +#define _swap_to_cfi(w, s, x) (cfi_be(s)?cpu_to_be##w(x):cpu_to_le##w(x)) +#define _swap_to_cpu(w, s, x) (cfi_be(s)?be##w##_to_cpu(x):le##w##_to_cpu(x)) diff --git a/include/linux/mtd/concat.h b/include/linux/mtd/concat.h new file mode 100644 index 000000000..ccdbe93a9 --- /dev/null +++ b/include/linux/mtd/concat.h @@ -0,0 +1,34 @@ +/* + * MTD device concatenation layer definitions + * + * Copyright © 2002 Robert Kaiser + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef MTD_CONCAT_H +#define MTD_CONCAT_H + + +struct mtd_info *mtd_concat_create( + struct mtd_info *subdev[], /* subdevices to concatenate */ + int num_devs, /* number of subdevices */ + const char *name); /* name for the new device */ + +void mtd_concat_destroy(struct mtd_info *mtd); + +#endif + diff --git a/include/linux/mtd/doc2000.h b/include/linux/mtd/doc2000.h new file mode 100644 index 000000000..407d1e556 --- /dev/null +++ b/include/linux/mtd/doc2000.h @@ -0,0 +1,220 @@ +/* + * Linux driver for Disk-On-Chip devices + * + * Copyright © 1999 Machine Vision Holdings, Inc. + * Copyright © 1999-2010 David Woodhouse + * Copyright © 2002-2003 Greg Ungerer + * Copyright © 2002-2003 SnapGear Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef __MTD_DOC2000_H__ +#define __MTD_DOC2000_H__ + +#include +#include + +#define DoC_Sig1 0 +#define DoC_Sig2 1 + +#define DoC_ChipID 0x1000 +#define DoC_DOCStatus 0x1001 +#define DoC_DOCControl 0x1002 +#define DoC_FloorSelect 0x1003 +#define DoC_CDSNControl 0x1004 +#define DoC_CDSNDeviceSelect 0x1005 +#define DoC_ECCConf 0x1006 +#define DoC_2k_ECCStatus 0x1007 + +#define DoC_CDSNSlowIO 0x100d +#define DoC_ECCSyndrome0 0x1010 +#define DoC_ECCSyndrome1 0x1011 +#define DoC_ECCSyndrome2 0x1012 +#define DoC_ECCSyndrome3 0x1013 +#define DoC_ECCSyndrome4 0x1014 +#define DoC_ECCSyndrome5 0x1015 +#define DoC_AliasResolution 0x101b +#define DoC_ConfigInput 0x101c +#define DoC_ReadPipeInit 0x101d +#define DoC_WritePipeTerm 0x101e +#define DoC_LastDataRead 0x101f +#define DoC_NOP 0x1020 + +#define DoC_Mil_CDSN_IO 0x0800 +#define DoC_2k_CDSN_IO 0x1800 + +#define DoC_Mplus_NOP 0x1002 +#define DoC_Mplus_AliasResolution 0x1004 +#define DoC_Mplus_DOCControl 0x1006 +#define DoC_Mplus_AccessStatus 0x1008 +#define DoC_Mplus_DeviceSelect 0x1008 +#define DoC_Mplus_Configuration 0x100a +#define DoC_Mplus_OutputControl 0x100c +#define DoC_Mplus_FlashControl 0x1020 +#define DoC_Mplus_FlashSelect 0x1022 +#define DoC_Mplus_FlashCmd 0x1024 +#define DoC_Mplus_FlashAddress 0x1026 +#define DoC_Mplus_FlashData0 0x1028 +#define DoC_Mplus_FlashData1 0x1029 +#define DoC_Mplus_ReadPipeInit 0x102a +#define DoC_Mplus_LastDataRead 0x102c +#define DoC_Mplus_LastDataRead1 0x102d +#define DoC_Mplus_WritePipeTerm 0x102e +#define DoC_Mplus_ECCSyndrome0 0x1040 +#define DoC_Mplus_ECCSyndrome1 0x1041 +#define DoC_Mplus_ECCSyndrome2 0x1042 +#define DoC_Mplus_ECCSyndrome3 0x1043 +#define DoC_Mplus_ECCSyndrome4 0x1044 +#define DoC_Mplus_ECCSyndrome5 0x1045 +#define DoC_Mplus_ECCConf 0x1046 +#define DoC_Mplus_Toggle 0x1046 +#define DoC_Mplus_DownloadStatus 0x1074 +#define DoC_Mplus_CtrlConfirm 0x1076 +#define DoC_Mplus_Power 0x1fff + +/* How to access the device? + * On ARM, it'll be mmap'd directly with 32-bit wide accesses. + * On PPC, it's mmap'd and 16-bit wide. + * Others use readb/writeb + */ +#if defined(__arm__) +static inline u8 ReadDOC_(u32 __iomem *addr, unsigned long reg) +{ + return __raw_readl(addr + reg); +} +static inline void WriteDOC_(u8 data, u32 __iomem *addr, unsigned long reg) +{ + __raw_writel(data, addr + reg); + wmb(); +} +#define DOC_IOREMAP_LEN 0x8000 +#elif defined(__ppc__) +static inline u8 ReadDOC_(u16 __iomem *addr, unsigned long reg) +{ + return __raw_readw(addr + reg); +} +static inline void WriteDOC_(u8 data, u16 __iomem *addr, unsigned long reg) +{ + __raw_writew(data, addr + reg); + wmb(); +} +#define DOC_IOREMAP_LEN 0x4000 +#else +#define ReadDOC_(adr, reg) readb((void __iomem *)(adr) + (reg)) +#define WriteDOC_(d, adr, reg) writeb(d, (void __iomem *)(adr) + (reg)) +#define DOC_IOREMAP_LEN 0x2000 + +#endif + +#if defined(__i386__) || defined(__x86_64__) +#define USE_MEMCPY +#endif + +/* These are provided to directly use the DoC_xxx defines */ +#define ReadDOC(adr, reg) ReadDOC_(adr,DoC_##reg) +#define WriteDOC(d, adr, reg) WriteDOC_(d,adr,DoC_##reg) + +#define DOC_MODE_RESET 0 +#define DOC_MODE_NORMAL 1 +#define DOC_MODE_RESERVED1 2 +#define DOC_MODE_RESERVED2 3 + +#define DOC_MODE_CLR_ERR 0x80 +#define DOC_MODE_RST_LAT 0x10 +#define DOC_MODE_BDECT 0x08 +#define DOC_MODE_MDWREN 0x04 + +#define DOC_ChipID_Doc2k 0x20 +#define DOC_ChipID_Doc2kTSOP 0x21 /* internal number for MTD */ +#define DOC_ChipID_DocMil 0x30 +#define DOC_ChipID_DocMilPlus32 0x40 +#define DOC_ChipID_DocMilPlus16 0x41 + +#define CDSN_CTRL_FR_B 0x80 +#define CDSN_CTRL_FR_B0 0x40 +#define CDSN_CTRL_FR_B1 0x80 + +#define CDSN_CTRL_ECC_IO 0x20 +#define CDSN_CTRL_FLASH_IO 0x10 +#define CDSN_CTRL_WP 0x08 +#define CDSN_CTRL_ALE 0x04 +#define CDSN_CTRL_CLE 0x02 +#define CDSN_CTRL_CE 0x01 + +#define DOC_ECC_RESET 0 +#define DOC_ECC_ERROR 0x80 +#define DOC_ECC_RW 0x20 +#define DOC_ECC__EN 0x08 +#define DOC_TOGGLE_BIT 0x04 +#define DOC_ECC_RESV 0x02 +#define DOC_ECC_IGNORE 0x01 + +#define DOC_FLASH_CE 0x80 +#define DOC_FLASH_WP 0x40 +#define DOC_FLASH_BANK 0x02 + +/* We have to also set the reserved bit 1 for enable */ +#define DOC_ECC_EN (DOC_ECC__EN | DOC_ECC_RESV) +#define DOC_ECC_DIS (DOC_ECC_RESV) + +struct Nand { + char floor, chip; + unsigned long curadr; + unsigned char curmode; + /* Also some erase/write/pipeline info when we get that far */ +}; + +#define MAX_FLOORS 4 +#define MAX_CHIPS 4 + +#define MAX_FLOORS_MIL 1 +#define MAX_CHIPS_MIL 1 + +#define MAX_FLOORS_MPLUS 2 +#define MAX_CHIPS_MPLUS 1 + +#define ADDR_COLUMN 1 +#define ADDR_PAGE 2 +#define ADDR_COLUMN_PAGE 3 + +struct DiskOnChip { + unsigned long physadr; + void __iomem *virtadr; + unsigned long totlen; + unsigned char ChipID; /* Type of DiskOnChip */ + int ioreg; + + unsigned long mfr; /* Flash IDs - only one type of flash per device */ + unsigned long id; + int chipshift; + char page256; + char pageadrlen; + char interleave; /* Internal interleaving - Millennium Plus style */ + unsigned long erasesize; + + int curfloor; + int curchip; + + int numchips; + struct Nand *chips; + struct mtd_info *nextdoc; + struct mutex lock; +}; + +int doc_decode_ecc(unsigned char sector[512], unsigned char ecc1[6]); + +#endif /* __MTD_DOC2000_H__ */ diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h new file mode 100644 index 000000000..3529683f6 --- /dev/null +++ b/include/linux/mtd/flashchip.h @@ -0,0 +1,113 @@ +/* + * Copyright © 2000 Red Hat UK Limited + * Copyright © 2000-2010 David Woodhouse + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef __MTD_FLASHCHIP_H__ +#define __MTD_FLASHCHIP_H__ + +/* For spinlocks. sched.h includes spinlock.h from whichever directory it + * happens to be in - so we don't have to care whether we're on 2.2, which + * has asm/spinlock.h, or 2.4, which has linux/spinlock.h + */ +#include +#include + +typedef enum { + FL_READY, + FL_STATUS, + FL_CFI_QUERY, + FL_JEDEC_QUERY, + FL_ERASING, + FL_ERASE_SUSPENDING, + FL_ERASE_SUSPENDED, + FL_WRITING, + FL_WRITING_TO_BUFFER, + FL_OTP_WRITE, + FL_WRITE_SUSPENDING, + FL_WRITE_SUSPENDED, + FL_PM_SUSPENDED, + FL_SYNCING, + FL_UNLOADING, + FL_LOCKING, + FL_UNLOCKING, + FL_POINT, + FL_XIP_WHILE_ERASING, + FL_XIP_WHILE_WRITING, + FL_SHUTDOWN, + /* These 2 come from nand_state_t, which has been unified here */ + FL_READING, + FL_CACHEDPRG, + /* These 4 come from onenand_state_t, which has been unified here */ + FL_RESETING, + FL_OTPING, + FL_PREPARING_ERASE, + FL_VERIFYING_ERASE, + + FL_UNKNOWN +} flstate_t; + + + +/* NOTE: confusingly, this can be used to refer to more than one chip at a time, + if they're interleaved. This can even refer to individual partitions on + the same physical chip when present. */ + +struct flchip { + unsigned long start; /* Offset within the map */ + // unsigned long len; + /* We omit len for now, because when we group them together + we insist that they're all of the same size, and the chip size + is held in the next level up. If we get more versatile later, + it'll make it a damn sight harder to find which chip we want from + a given offset, and we'll want to add the per-chip length field + back in. + */ + int ref_point_counter; + flstate_t state; + flstate_t oldstate; + + unsigned int write_suspended:1; + unsigned int erase_suspended:1; + unsigned long in_progress_block_addr; + unsigned long in_progress_block_mask; + + struct mutex mutex; + wait_queue_head_t wq; /* Wait on here when we're waiting for the chip + to be ready */ + int word_write_time; + int buffer_write_time; + int erase_time; + + int word_write_time_max; + int buffer_write_time_max; + int erase_time_max; + + void *priv; +}; + +/* This is used to handle contention on write/erase operations + between partitions of the same physical chip. */ +struct flchip_shared { + struct mutex lock; + struct flchip *writing; + struct flchip *erasing; +}; + + +#endif /* __MTD_FLASHCHIP_H__ */ diff --git a/include/linux/mtd/ftl.h b/include/linux/mtd/ftl.h new file mode 100644 index 000000000..0555f7a0b --- /dev/null +++ b/include/linux/mtd/ftl.h @@ -0,0 +1,74 @@ +/* + * Derived from (and probably identical to): + * ftl.h 1.7 1999/10/25 20:23:17 + * + * The contents of this file are subject to the Mozilla Public License + * Version 1.1 (the "License"); you may not use this file except in + * compliance with the License. You may obtain a copy of the License + * at http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS IS" + * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + * the License for the specific language governing rights and + * limitations under the License. + * + * The initial developer of the original code is David A. Hinds + * . Portions created by David A. Hinds + * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. + * + * Alternatively, the contents of this file may be used under the + * terms of the GNU General Public License version 2 (the "GPL"), in + * which case the provisions of the GPL are applicable instead of the + * above. If you wish to allow the use of your version of this file + * only under the terms of the GPL and not to allow others to use + * your version of this file under the MPL, indicate your decision by + * deleting the provisions above and replace them with the notice and + * other provisions required by the GPL. If you do not delete the + * provisions above, a recipient may use your version of this file + * under either the MPL or the GPL. + */ + +#ifndef _LINUX_FTL_H +#define _LINUX_FTL_H + +typedef struct erase_unit_header_t { + uint8_t LinkTargetTuple[5]; + uint8_t DataOrgTuple[10]; + uint8_t NumTransferUnits; + uint32_t EraseCount; + uint16_t LogicalEUN; + uint8_t BlockSize; + uint8_t EraseUnitSize; + uint16_t FirstPhysicalEUN; + uint16_t NumEraseUnits; + uint32_t FormattedSize; + uint32_t FirstVMAddress; + uint16_t NumVMPages; + uint8_t Flags; + uint8_t Code; + uint32_t SerialNumber; + uint32_t AltEUHOffset; + uint32_t BAMOffset; + uint8_t Reserved[12]; + uint8_t EndTuple[2]; +} erase_unit_header_t; + +/* Flags in erase_unit_header_t */ +#define HIDDEN_AREA 0x01 +#define REVERSE_POLARITY 0x02 +#define DOUBLE_BAI 0x04 + +/* Definitions for block allocation information */ + +#define BLOCK_FREE(b) ((b) == 0xffffffff) +#define BLOCK_DELETED(b) (((b) == 0) || ((b) == 0xfffffffe)) + +#define BLOCK_TYPE(b) ((b) & 0x7f) +#define BLOCK_ADDRESS(b) ((b) & ~0x7f) +#define BLOCK_NUMBER(b) ((b) >> 9) +#define BLOCK_CONTROL 0x30 +#define BLOCK_DATA 0x40 +#define BLOCK_REPLACEMENT 0x60 +#define BLOCK_BAD 0x70 + +#endif /* _LINUX_FTL_H */ diff --git a/include/linux/mtd/gen_probe.h b/include/linux/mtd/gen_probe.h new file mode 100644 index 000000000..2c456054f --- /dev/null +++ b/include/linux/mtd/gen_probe.h @@ -0,0 +1,37 @@ +/* + * Copyright © 2001 Red Hat UK Limited + * Copyright © 2001-2010 David Woodhouse + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef __LINUX_MTD_GEN_PROBE_H__ +#define __LINUX_MTD_GEN_PROBE_H__ + +#include +#include +#include +#include + +struct chip_probe { + char *name; + int (*probe_chip)(struct map_info *map, __u32 base, + unsigned long *chip_map, struct cfi_private *cfi); +}; + +struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp); + +#endif /* __LINUX_MTD_GEN_PROBE_H__ */ diff --git a/include/linux/mtd/inftl.h b/include/linux/mtd/inftl.h new file mode 100644 index 000000000..fdfff8706 --- /dev/null +++ b/include/linux/mtd/inftl.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * inftl.h -- defines to support the Inverse NAND Flash Translation Layer + * + * (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com) + */ + +#ifndef __MTD_INFTL_H__ +#define __MTD_INFTL_H__ + +#ifndef __KERNEL__ +#error This is a kernel header. Perhaps include nftl-user.h instead? +#endif + +#include +#include +#include + +#include + +#ifndef INFTL_MAJOR +#define INFTL_MAJOR 96 +#endif +#define INFTL_PARTN_BITS 4 + +#ifdef __KERNEL__ + +struct INFTLrecord { + struct mtd_blktrans_dev mbd; + __u16 MediaUnit; + __u32 EraseSize; + struct INFTLMediaHeader MediaHdr; + int usecount; + unsigned char heads; + unsigned char sectors; + unsigned short cylinders; + __u16 numvunits; + __u16 firstEUN; + __u16 lastEUN; + __u16 numfreeEUNs; + __u16 LastFreeEUN; /* To speed up finding a free EUN */ + int head,sect,cyl; + __u16 *PUtable; /* Physical Unit Table */ + __u16 *VUtable; /* Virtual Unit Table */ + unsigned int nb_blocks; /* number of physical blocks */ + unsigned int nb_boot_blocks; /* number of blocks used by the bios */ + struct erase_info instr; +}; + +int INFTL_mount(struct INFTLrecord *s); +int INFTL_formatblock(struct INFTLrecord *s, int block); + +void INFTL_dumptables(struct INFTLrecord *s); +void INFTL_dumpVUchains(struct INFTLrecord *s); + +int inftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len, + size_t *retlen, uint8_t *buf); +int inftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len, + size_t *retlen, uint8_t *buf); + +#endif /* __KERNEL__ */ + +#endif /* __MTD_INFTL_H__ */ diff --git a/include/linux/mtd/latch-addr-flash.h b/include/linux/mtd/latch-addr-flash.h new file mode 100644 index 000000000..e94b8e128 --- /dev/null +++ b/include/linux/mtd/latch-addr-flash.h @@ -0,0 +1,29 @@ +/* + * Interface for NOR flash driver whose high address lines are latched + * + * Copyright © 2008 MontaVista Software, Inc. + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ +#ifndef __LATCH_ADDR_FLASH__ +#define __LATCH_ADDR_FLASH__ + +struct map_info; +struct mtd_partition; + +struct latch_addr_flash_data { + unsigned int width; + unsigned int size; + + int (*init)(void *data, int cs); + void (*done)(void *data); + void (*set_window)(unsigned long offset, void *data); + void *data; + + unsigned int nr_parts; + struct mtd_partition *parts; +}; + +#endif diff --git a/include/linux/mtd/lpc32xx_mlc.h b/include/linux/mtd/lpc32xx_mlc.h new file mode 100644 index 000000000..d91b1e356 --- /dev/null +++ b/include/linux/mtd/lpc32xx_mlc.h @@ -0,0 +1,20 @@ +/* + * Platform data for LPC32xx SoC MLC NAND controller + * + * Copyright © 2012 Roland Stigge + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_MTD_LPC32XX_MLC_H +#define __LINUX_MTD_LPC32XX_MLC_H + +#include + +struct lpc32xx_mlc_platform_data { + bool (*dma_filter)(struct dma_chan *chan, void *filter_param); +}; + +#endif /* __LINUX_MTD_LPC32XX_MLC_H */ diff --git a/include/linux/mtd/lpc32xx_slc.h b/include/linux/mtd/lpc32xx_slc.h new file mode 100644 index 000000000..1169548a1 --- /dev/null +++ b/include/linux/mtd/lpc32xx_slc.h @@ -0,0 +1,20 @@ +/* + * Platform data for LPC32xx SoC SLC NAND controller + * + * Copyright © 2012 Roland Stigge + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_MTD_LPC32XX_SLC_H +#define __LINUX_MTD_LPC32XX_SLC_H + +#include + +struct lpc32xx_slc_platform_data { + bool (*dma_filter)(struct dma_chan *chan, void *filter_param); +}; + +#endif /* __LINUX_MTD_LPC32XX_SLC_H */ diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h new file mode 100644 index 000000000..01b990e4b --- /dev/null +++ b/include/linux/mtd/map.h @@ -0,0 +1,478 @@ +/* + * Copyright © 2000-2010 David Woodhouse et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +/* Overhauled routines for dealing with different mmap regions of flash */ + +#ifndef __LINUX_MTD_MAP_H__ +#define __LINUX_MTD_MAP_H__ + +#include +#include +#include +#include +#include +#include + +#include +#include + +#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1 +#define map_bankwidth(map) 1 +#define map_bankwidth_is_1(map) (map_bankwidth(map) == 1) +#define map_bankwidth_is_large(map) (0) +#define map_words(map) (1) +#define MAX_MAP_BANKWIDTH 1 +#else +#define map_bankwidth_is_1(map) (0) +#endif + +#ifdef CONFIG_MTD_MAP_BANK_WIDTH_2 +# ifdef map_bankwidth +# undef map_bankwidth +# define map_bankwidth(map) ((map)->bankwidth) +# else +# define map_bankwidth(map) 2 +# define map_bankwidth_is_large(map) (0) +# define map_words(map) (1) +# endif +#define map_bankwidth_is_2(map) (map_bankwidth(map) == 2) +#undef MAX_MAP_BANKWIDTH +#define MAX_MAP_BANKWIDTH 2 +#else +#define map_bankwidth_is_2(map) (0) +#endif + +#ifdef CONFIG_MTD_MAP_BANK_WIDTH_4 +# ifdef map_bankwidth +# undef map_bankwidth +# define map_bankwidth(map) ((map)->bankwidth) +# else +# define map_bankwidth(map) 4 +# define map_bankwidth_is_large(map) (0) +# define map_words(map) (1) +# endif +#define map_bankwidth_is_4(map) (map_bankwidth(map) == 4) +#undef MAX_MAP_BANKWIDTH +#define MAX_MAP_BANKWIDTH 4 +#else +#define map_bankwidth_is_4(map) (0) +#endif + +/* ensure we never evaluate anything shorted than an unsigned long + * to zero, and ensure we'll never miss the end of an comparison (bjd) */ + +#define map_calc_words(map) ((map_bankwidth(map) + (sizeof(unsigned long)-1)) / sizeof(unsigned long)) + +#ifdef CONFIG_MTD_MAP_BANK_WIDTH_8 +# ifdef map_bankwidth +# undef map_bankwidth +# define map_bankwidth(map) ((map)->bankwidth) +# if BITS_PER_LONG < 64 +# undef map_bankwidth_is_large +# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8) +# undef map_words +# define map_words(map) map_calc_words(map) +# endif +# else +# define map_bankwidth(map) 8 +# define map_bankwidth_is_large(map) (BITS_PER_LONG < 64) +# define map_words(map) map_calc_words(map) +# endif +#define map_bankwidth_is_8(map) (map_bankwidth(map) == 8) +#undef MAX_MAP_BANKWIDTH +#define MAX_MAP_BANKWIDTH 8 +#else +#define map_bankwidth_is_8(map) (0) +#endif + +#ifdef CONFIG_MTD_MAP_BANK_WIDTH_16 +# ifdef map_bankwidth +# undef map_bankwidth +# define map_bankwidth(map) ((map)->bankwidth) +# undef map_bankwidth_is_large +# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8) +# undef map_words +# define map_words(map) map_calc_words(map) +# else +# define map_bankwidth(map) 16 +# define map_bankwidth_is_large(map) (1) +# define map_words(map) map_calc_words(map) +# endif +#define map_bankwidth_is_16(map) (map_bankwidth(map) == 16) +#undef MAX_MAP_BANKWIDTH +#define MAX_MAP_BANKWIDTH 16 +#else +#define map_bankwidth_is_16(map) (0) +#endif + +#ifdef CONFIG_MTD_MAP_BANK_WIDTH_32 +/* always use indirect access for 256-bit to preserve kernel stack */ +# undef map_bankwidth +# define map_bankwidth(map) ((map)->bankwidth) +# undef map_bankwidth_is_large +# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8) +# undef map_words +# define map_words(map) map_calc_words(map) +#define map_bankwidth_is_32(map) (map_bankwidth(map) == 32) +#undef MAX_MAP_BANKWIDTH +#define MAX_MAP_BANKWIDTH 32 +#else +#define map_bankwidth_is_32(map) (0) +#endif + +#ifndef map_bankwidth +#ifdef CONFIG_MTD +#warning "No CONFIG_MTD_MAP_BANK_WIDTH_xx selected. No NOR chip support can work" +#endif +static inline int map_bankwidth(void *map) +{ + BUG(); + return 0; +} +#define map_bankwidth_is_large(map) (0) +#define map_words(map) (0) +#define MAX_MAP_BANKWIDTH 1 +#endif + +static inline int map_bankwidth_supported(int w) +{ + switch (w) { +#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1 + case 1: +#endif +#ifdef CONFIG_MTD_MAP_BANK_WIDTH_2 + case 2: +#endif +#ifdef CONFIG_MTD_MAP_BANK_WIDTH_4 + case 4: +#endif +#ifdef CONFIG_MTD_MAP_BANK_WIDTH_8 + case 8: +#endif +#ifdef CONFIG_MTD_MAP_BANK_WIDTH_16 + case 16: +#endif +#ifdef CONFIG_MTD_MAP_BANK_WIDTH_32 + case 32: +#endif + return 1; + + default: + return 0; + } +} + +#define MAX_MAP_LONGS (((MAX_MAP_BANKWIDTH * 8) + BITS_PER_LONG - 1) / BITS_PER_LONG) + +typedef union { + unsigned long x[MAX_MAP_LONGS]; +} map_word; + +/* The map stuff is very simple. You fill in your struct map_info with + a handful of routines for accessing the device, making sure they handle + paging etc. correctly if your device needs it. Then you pass it off + to a chip probe routine -- either JEDEC or CFI probe or both -- via + do_map_probe(). If a chip is recognised, the probe code will invoke the + appropriate chip driver (if present) and return a struct mtd_info. + At which point, you fill in the mtd->module with your own module + address, and register it with the MTD core code. Or you could partition + it and register the partitions instead, or keep it for your own private + use; whatever. + + The mtd->priv field will point to the struct map_info, and any further + private data required by the chip driver is linked from the + mtd->priv->fldrv_priv field. This allows the map driver to get at + the destructor function map->fldrv_destroy() when it's tired + of living. +*/ + +struct map_info { + const char *name; + unsigned long size; + resource_size_t phys; +#define NO_XIP (-1UL) + + void __iomem *virt; + void *cached; + + int swap; /* this mapping's byte-swapping requirement */ + int bankwidth; /* in octets. This isn't necessarily the width + of actual bus cycles -- it's the repeat interval + in bytes, before you are talking to the first chip again. + */ + +#ifdef CONFIG_MTD_COMPLEX_MAPPINGS + map_word (*read)(struct map_info *, unsigned long); + void (*copy_from)(struct map_info *, void *, unsigned long, ssize_t); + + void (*write)(struct map_info *, const map_word, unsigned long); + void (*copy_to)(struct map_info *, unsigned long, const void *, ssize_t); + + /* We can perhaps put in 'point' and 'unpoint' methods, if we really + want to enable XIP for non-linear mappings. Not yet though. */ +#endif + /* It's possible for the map driver to use cached memory in its + copy_from implementation (and _only_ with copy_from). However, + when the chip driver knows some flash area has changed contents, + it will signal it to the map driver through this routine to let + the map driver invalidate the corresponding cache as needed. + If there is no cache to care about this can be set to NULL. */ + void (*inval_cache)(struct map_info *, unsigned long, ssize_t); + + /* This will be called with 1 as parameter when the first map user + * needs VPP, and called with 0 when the last user exits. The map + * core maintains a reference counter, and assumes that VPP is a + * global resource applying to all mapped flash chips on the system. + */ + void (*set_vpp)(struct map_info *, int); + + unsigned long pfow_base; + unsigned long map_priv_1; + unsigned long map_priv_2; + struct device_node *device_node; + void *fldrv_priv; + struct mtd_chip_driver *fldrv; +}; + +struct mtd_chip_driver { + struct mtd_info *(*probe)(struct map_info *map); + void (*destroy)(struct mtd_info *); + struct module *module; + char *name; + struct list_head list; +}; + +void register_mtd_chip_driver(struct mtd_chip_driver *); +void unregister_mtd_chip_driver(struct mtd_chip_driver *); + +struct mtd_info *do_map_probe(const char *name, struct map_info *map); +void map_destroy(struct mtd_info *mtd); + +#define ENABLE_VPP(map) do { if (map->set_vpp) map->set_vpp(map, 1); } while (0) +#define DISABLE_VPP(map) do { if (map->set_vpp) map->set_vpp(map, 0); } while (0) + +#define INVALIDATE_CACHED_RANGE(map, from, size) \ + do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0) + +#define map_word_equal(map, val1, val2) \ +({ \ + int i, ret = 1; \ + for (i = 0; i < map_words(map); i++) \ + if ((val1).x[i] != (val2).x[i]) { \ + ret = 0; \ + break; \ + } \ + ret; \ +}) + +#define map_word_and(map, val1, val2) \ +({ \ + map_word r; \ + int i; \ + for (i = 0; i < map_words(map); i++) \ + r.x[i] = (val1).x[i] & (val2).x[i]; \ + r; \ +}) + +#define map_word_clr(map, val1, val2) \ +({ \ + map_word r; \ + int i; \ + for (i = 0; i < map_words(map); i++) \ + r.x[i] = (val1).x[i] & ~(val2).x[i]; \ + r; \ +}) + +#define map_word_or(map, val1, val2) \ +({ \ + map_word r; \ + int i; \ + for (i = 0; i < map_words(map); i++) \ + r.x[i] = (val1).x[i] | (val2).x[i]; \ + r; \ +}) + +#define map_word_andequal(map, val1, val2, val3) \ +({ \ + int i, ret = 1; \ + for (i = 0; i < map_words(map); i++) { \ + if (((val1).x[i] & (val2).x[i]) != (val3).x[i]) { \ + ret = 0; \ + break; \ + } \ + } \ + ret; \ +}) + +#define map_word_bitsset(map, val1, val2) \ +({ \ + int i, ret = 0; \ + for (i = 0; i < map_words(map); i++) { \ + if ((val1).x[i] & (val2).x[i]) { \ + ret = 1; \ + break; \ + } \ + } \ + ret; \ +}) + +static inline map_word map_word_load(struct map_info *map, const void *ptr) +{ + map_word r; + + if (map_bankwidth_is_1(map)) + r.x[0] = *(unsigned char *)ptr; + else if (map_bankwidth_is_2(map)) + r.x[0] = get_unaligned((uint16_t *)ptr); + else if (map_bankwidth_is_4(map)) + r.x[0] = get_unaligned((uint32_t *)ptr); +#if BITS_PER_LONG >= 64 + else if (map_bankwidth_is_8(map)) + r.x[0] = get_unaligned((uint64_t *)ptr); +#endif + else if (map_bankwidth_is_large(map)) + memcpy(r.x, ptr, map->bankwidth); + else + BUG(); + + return r; +} + +static inline map_word map_word_load_partial(struct map_info *map, map_word orig, const unsigned char *buf, int start, int len) +{ + int i; + + if (map_bankwidth_is_large(map)) { + char *dest = (char *)&orig; + + memcpy(dest+start, buf, len); + } else { + for (i = start; i < start+len; i++) { + int bitpos; + +#ifdef __LITTLE_ENDIAN + bitpos = i * 8; +#else /* __BIG_ENDIAN */ + bitpos = (map_bankwidth(map) - 1 - i) * 8; +#endif + orig.x[0] &= ~(0xff << bitpos); + orig.x[0] |= (unsigned long)buf[i-start] << bitpos; + } + } + return orig; +} + +#if BITS_PER_LONG < 64 +#define MAP_FF_LIMIT 4 +#else +#define MAP_FF_LIMIT 8 +#endif + +static inline map_word map_word_ff(struct map_info *map) +{ + map_word r; + int i; + + if (map_bankwidth(map) < MAP_FF_LIMIT) { + int bw = 8 * map_bankwidth(map); + + r.x[0] = (1UL << bw) - 1; + } else { + for (i = 0; i < map_words(map); i++) + r.x[i] = ~0UL; + } + return r; +} + +static inline map_word inline_map_read(struct map_info *map, unsigned long ofs) +{ + map_word r; + + if (map_bankwidth_is_1(map)) + r.x[0] = __raw_readb(map->virt + ofs); + else if (map_bankwidth_is_2(map)) + r.x[0] = __raw_readw(map->virt + ofs); + else if (map_bankwidth_is_4(map)) + r.x[0] = __raw_readl(map->virt + ofs); +#if BITS_PER_LONG >= 64 + else if (map_bankwidth_is_8(map)) + r.x[0] = __raw_readq(map->virt + ofs); +#endif + else if (map_bankwidth_is_large(map)) + memcpy_fromio(r.x, map->virt + ofs, map->bankwidth); + else + BUG(); + + return r; +} + +static inline void inline_map_write(struct map_info *map, const map_word datum, unsigned long ofs) +{ + if (map_bankwidth_is_1(map)) + __raw_writeb(datum.x[0], map->virt + ofs); + else if (map_bankwidth_is_2(map)) + __raw_writew(datum.x[0], map->virt + ofs); + else if (map_bankwidth_is_4(map)) + __raw_writel(datum.x[0], map->virt + ofs); +#if BITS_PER_LONG >= 64 + else if (map_bankwidth_is_8(map)) + __raw_writeq(datum.x[0], map->virt + ofs); +#endif + else if (map_bankwidth_is_large(map)) + memcpy_toio(map->virt+ofs, datum.x, map->bankwidth); + else + BUG(); + mb(); +} + +static inline void inline_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) +{ + if (map->cached) + memcpy(to, (char *)map->cached + from, len); + else + memcpy_fromio(to, map->virt + from, len); +} + +static inline void inline_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) +{ + memcpy_toio(map->virt + to, from, len); +} + +#ifdef CONFIG_MTD_COMPLEX_MAPPINGS +#define map_read(map, ofs) (map)->read(map, ofs) +#define map_copy_from(map, to, from, len) (map)->copy_from(map, to, from, len) +#define map_write(map, datum, ofs) (map)->write(map, datum, ofs) +#define map_copy_to(map, to, from, len) (map)->copy_to(map, to, from, len) + +extern void simple_map_init(struct map_info *); +#define map_is_linear(map) (map->phys != NO_XIP) + +#else +#define map_read(map, ofs) inline_map_read(map, ofs) +#define map_copy_from(map, to, from, len) inline_map_copy_from(map, to, from, len) +#define map_write(map, datum, ofs) inline_map_write(map, datum, ofs) +#define map_copy_to(map, to, from, len) inline_map_copy_to(map, to, from, len) + + +#define simple_map_init(map) BUG_ON(!map_bankwidth_supported((map)->bankwidth)) +#define map_is_linear(map) ({ (void)(map); 1; }) + +#endif /* !CONFIG_MTD_COMPLEX_MAPPINGS */ + +#endif /* __LINUX_MTD_MAP_H__ */ diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h new file mode 100644 index 000000000..035d641e8 --- /dev/null +++ b/include/linux/mtd/mtd.h @@ -0,0 +1,601 @@ +/* + * Copyright © 1999-2010 David Woodhouse et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef __MTD_MTD_H__ +#define __MTD_MTD_H__ + +#include +#include +#include +#include +#include + +#include + +#include + +#define MTD_FAIL_ADDR_UNKNOWN -1LL + +struct mtd_info; + +/* + * If the erase fails, fail_addr might indicate exactly which block failed. If + * fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level + * or was not specific to any particular block. + */ +struct erase_info { + uint64_t addr; + uint64_t len; + uint64_t fail_addr; +}; + +struct mtd_erase_region_info { + uint64_t offset; /* At which this region starts, from the beginning of the MTD */ + uint32_t erasesize; /* For this region */ + uint32_t numblocks; /* Number of blocks of erasesize in this region */ + unsigned long *lockmap; /* If keeping bitmap of locks */ +}; + +/** + * struct mtd_oob_ops - oob operation operands + * @mode: operation mode + * + * @len: number of data bytes to write/read + * + * @retlen: number of data bytes written/read + * + * @ooblen: number of oob bytes to write/read + * @oobretlen: number of oob bytes written/read + * @ooboffs: offset of oob data in the oob area (only relevant when + * mode = MTD_OPS_PLACE_OOB or MTD_OPS_RAW) + * @datbuf: data buffer - if NULL only oob data are read/written + * @oobbuf: oob data buffer + * + * Note, some MTD drivers do not allow you to write more than one OOB area at + * one go. If you try to do that on such an MTD device, -EINVAL will be + * returned. If you want to make your implementation portable on all kind of MTD + * devices you should split the write request into several sub-requests when the + * request crosses a page boundary. + */ +struct mtd_oob_ops { + unsigned int mode; + size_t len; + size_t retlen; + size_t ooblen; + size_t oobretlen; + uint32_t ooboffs; + uint8_t *datbuf; + uint8_t *oobbuf; +}; + +#define MTD_MAX_OOBFREE_ENTRIES_LARGE 32 +#define MTD_MAX_ECCPOS_ENTRIES_LARGE 640 +/** + * struct mtd_oob_region - oob region definition + * @offset: region offset + * @length: region length + * + * This structure describes a region of the OOB area, and is used + * to retrieve ECC or free bytes sections. + * Each section is defined by an offset within the OOB area and a + * length. + */ +struct mtd_oob_region { + u32 offset; + u32 length; +}; + +/* + * struct mtd_ooblayout_ops - NAND OOB layout operations + * @ecc: function returning an ECC region in the OOB area. + * Should return -ERANGE if %section exceeds the total number of + * ECC sections. + * @free: function returning a free region in the OOB area. + * Should return -ERANGE if %section exceeds the total number of + * free sections. + */ +struct mtd_ooblayout_ops { + int (*ecc)(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobecc); + int (*free)(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobfree); +}; + +/** + * struct mtd_pairing_info - page pairing information + * + * @pair: pair id + * @group: group id + * + * The term "pair" is used here, even though TLC NANDs might group pages by 3 + * (3 bits in a single cell). A pair should regroup all pages that are sharing + * the same cell. Pairs are then indexed in ascending order. + * + * @group is defining the position of a page in a given pair. It can also be + * seen as the bit position in the cell: page attached to bit 0 belongs to + * group 0, page attached to bit 1 belongs to group 1, etc. + * + * Example: + * The H27UCG8T2BTR-BC datasheet describes the following pairing scheme: + * + * group-0 group-1 + * + * pair-0 page-0 page-4 + * pair-1 page-1 page-5 + * pair-2 page-2 page-8 + * ... + * pair-127 page-251 page-255 + * + * + * Note that the "group" and "pair" terms were extracted from Samsung and + * Hynix datasheets, and might be referenced under other names in other + * datasheets (Micron is describing this concept as "shared pages"). + */ +struct mtd_pairing_info { + int pair; + int group; +}; + +/** + * struct mtd_pairing_scheme - page pairing scheme description + * + * @ngroups: number of groups. Should be related to the number of bits + * per cell. + * @get_info: converts a write-unit (page number within an erase block) into + * mtd_pairing information (pair + group). This function should + * fill the info parameter based on the wunit index or return + * -EINVAL if the wunit parameter is invalid. + * @get_wunit: converts pairing information into a write-unit (page) number. + * This function should return the wunit index pointed by the + * pairing information described in the info argument. It should + * return -EINVAL, if there's no wunit corresponding to the + * passed pairing information. + * + * See mtd_pairing_info documentation for a detailed explanation of the + * pair and group concepts. + * + * The mtd_pairing_scheme structure provides a generic solution to represent + * NAND page pairing scheme. Instead of exposing two big tables to do the + * write-unit <-> (pair + group) conversions, we ask the MTD drivers to + * implement the ->get_info() and ->get_wunit() functions. + * + * MTD users will then be able to query these information by using the + * mtd_pairing_info_to_wunit() and mtd_wunit_to_pairing_info() helpers. + * + * @ngroups is here to help MTD users iterating over all the pages in a + * given pair. This value can be retrieved by MTD users using the + * mtd_pairing_groups() helper. + * + * Examples are given in the mtd_pairing_info_to_wunit() and + * mtd_wunit_to_pairing_info() documentation. + */ +struct mtd_pairing_scheme { + int ngroups; + int (*get_info)(struct mtd_info *mtd, int wunit, + struct mtd_pairing_info *info); + int (*get_wunit)(struct mtd_info *mtd, + const struct mtd_pairing_info *info); +}; + +struct module; /* only needed for owner field in mtd_info */ + +/** + * struct mtd_debug_info - debugging information for an MTD device. + * + * @dfs_dir: direntry object of the MTD device debugfs directory + */ +struct mtd_debug_info { + struct dentry *dfs_dir; +}; + +struct mtd_info { + u_char type; + uint32_t flags; + uint64_t size; // Total size of the MTD + + /* "Major" erase size for the device. Naïve users may take this + * to be the only erase size available, or may use the more detailed + * information below if they desire + */ + uint32_t erasesize; + /* Minimal writable flash unit size. In case of NOR flash it is 1 (even + * though individual bits can be cleared), in case of NAND flash it is + * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR + * it is of ECC block size, etc. It is illegal to have writesize = 0. + * Any driver registering a struct mtd_info must ensure a writesize of + * 1 or larger. + */ + uint32_t writesize; + + /* + * Size of the write buffer used by the MTD. MTD devices having a write + * buffer can write multiple writesize chunks at a time. E.g. while + * writing 4 * writesize bytes to a device with 2 * writesize bytes + * buffer the MTD driver can (but doesn't have to) do 2 writesize + * operations, but not 4. Currently, all NANDs have writebufsize + * equivalent to writesize (NAND page size). Some NOR flashes do have + * writebufsize greater than writesize. + */ + uint32_t writebufsize; + + uint32_t oobsize; // Amount of OOB data per block (e.g. 16) + uint32_t oobavail; // Available OOB bytes per block + + /* + * If erasesize is a power of 2 then the shift is stored in + * erasesize_shift otherwise erasesize_shift is zero. Ditto writesize. + */ + unsigned int erasesize_shift; + unsigned int writesize_shift; + /* Masks based on erasesize_shift and writesize_shift */ + unsigned int erasesize_mask; + unsigned int writesize_mask; + + /* + * read ops return -EUCLEAN if max number of bitflips corrected on any + * one region comprising an ecc step equals or exceeds this value. + * Settable by driver, else defaults to ecc_strength. User can override + * in sysfs. N.B. The meaning of the -EUCLEAN return code has changed; + * see Documentation/ABI/testing/sysfs-class-mtd for more detail. + */ + unsigned int bitflip_threshold; + + /* Kernel-only stuff starts here. */ + const char *name; + int index; + + /* OOB layout description */ + const struct mtd_ooblayout_ops *ooblayout; + + /* NAND pairing scheme, only provided for MLC/TLC NANDs */ + const struct mtd_pairing_scheme *pairing; + + /* the ecc step size. */ + unsigned int ecc_step_size; + + /* max number of correctible bit errors per ecc step */ + unsigned int ecc_strength; + + /* Data for variable erase regions. If numeraseregions is zero, + * it means that the whole device has erasesize as given above. + */ + int numeraseregions; + struct mtd_erase_region_info *eraseregions; + + /* + * Do not call via these pointers, use corresponding mtd_*() + * wrappers instead. + */ + int (*_erase) (struct mtd_info *mtd, struct erase_info *instr); + int (*_point) (struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, void **virt, resource_size_t *phys); + int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len); + int (*_read) (struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf); + int (*_write) (struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf); + int (*_panic_write) (struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf); + int (*_read_oob) (struct mtd_info *mtd, loff_t from, + struct mtd_oob_ops *ops); + int (*_write_oob) (struct mtd_info *mtd, loff_t to, + struct mtd_oob_ops *ops); + int (*_get_fact_prot_info) (struct mtd_info *mtd, size_t len, + size_t *retlen, struct otp_info *buf); + int (*_read_fact_prot_reg) (struct mtd_info *mtd, loff_t from, + size_t len, size_t *retlen, u_char *buf); + int (*_get_user_prot_info) (struct mtd_info *mtd, size_t len, + size_t *retlen, struct otp_info *buf); + int (*_read_user_prot_reg) (struct mtd_info *mtd, loff_t from, + size_t len, size_t *retlen, u_char *buf); + int (*_write_user_prot_reg) (struct mtd_info *mtd, loff_t to, + size_t len, size_t *retlen, u_char *buf); + int (*_lock_user_prot_reg) (struct mtd_info *mtd, loff_t from, + size_t len); + int (*_writev) (struct mtd_info *mtd, const struct kvec *vecs, + unsigned long count, loff_t to, size_t *retlen); + void (*_sync) (struct mtd_info *mtd); + int (*_lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); + int (*_unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); + int (*_is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len); + int (*_block_isreserved) (struct mtd_info *mtd, loff_t ofs); + int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs); + int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs); + int (*_max_bad_blocks) (struct mtd_info *mtd, loff_t ofs, size_t len); + int (*_suspend) (struct mtd_info *mtd); + void (*_resume) (struct mtd_info *mtd); + void (*_reboot) (struct mtd_info *mtd); + /* + * If the driver is something smart, like UBI, it may need to maintain + * its own reference counting. The below functions are only for driver. + */ + int (*_get_device) (struct mtd_info *mtd); + void (*_put_device) (struct mtd_info *mtd); + + struct notifier_block reboot_notifier; /* default mode before reboot */ + + /* ECC status information */ + struct mtd_ecc_stats ecc_stats; + /* Subpage shift (NAND) */ + int subpage_sft; + + void *priv; + + struct module *owner; + struct device dev; + int usecount; + struct mtd_debug_info dbg; +}; + +int mtd_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobecc); +int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte, + int *section, + struct mtd_oob_region *oobregion); +int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf, + const u8 *oobbuf, int start, int nbytes); +int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf, + u8 *oobbuf, int start, int nbytes); +int mtd_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobfree); +int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf, + const u8 *oobbuf, int start, int nbytes); +int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf, + u8 *oobbuf, int start, int nbytes); +int mtd_ooblayout_count_freebytes(struct mtd_info *mtd); +int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd); + +static inline void mtd_set_ooblayout(struct mtd_info *mtd, + const struct mtd_ooblayout_ops *ooblayout) +{ + mtd->ooblayout = ooblayout; +} + +static inline void mtd_set_pairing_scheme(struct mtd_info *mtd, + const struct mtd_pairing_scheme *pairing) +{ + mtd->pairing = pairing; +} + +static inline void mtd_set_of_node(struct mtd_info *mtd, + struct device_node *np) +{ + mtd->dev.of_node = np; + if (!mtd->name) + of_property_read_string(np, "label", &mtd->name); +} + +static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd) +{ + return dev_of_node(&mtd->dev); +} + +static inline u32 mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops) +{ + return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize; +} + +static inline int mtd_max_bad_blocks(struct mtd_info *mtd, + loff_t ofs, size_t len) +{ + if (!mtd->_max_bad_blocks) + return -ENOTSUPP; + + if (mtd->size < (len + ofs) || ofs < 0) + return -EINVAL; + + return mtd->_max_bad_blocks(mtd, ofs, len); +} + +int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit, + struct mtd_pairing_info *info); +int mtd_pairing_info_to_wunit(struct mtd_info *mtd, + const struct mtd_pairing_info *info); +int mtd_pairing_groups(struct mtd_info *mtd); +int mtd_erase(struct mtd_info *mtd, struct erase_info *instr); +int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, + void **virt, resource_size_t *phys); +int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len); +unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len, + unsigned long offset, unsigned long flags); +int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, + u_char *buf); +int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, + const u_char *buf); +int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, + const u_char *buf); + +int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops); +int mtd_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops); + +int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, + struct otp_info *buf); +int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf); +int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, + struct otp_info *buf); +int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf); +int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, u_char *buf); +int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len); + +int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, + unsigned long count, loff_t to, size_t *retlen); + +static inline void mtd_sync(struct mtd_info *mtd) +{ + if (mtd->_sync) + mtd->_sync(mtd); +} + +int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); +int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); +int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len); +int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs); +int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs); +int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs); + +static inline int mtd_suspend(struct mtd_info *mtd) +{ + return mtd->_suspend ? mtd->_suspend(mtd) : 0; +} + +static inline void mtd_resume(struct mtd_info *mtd) +{ + if (mtd->_resume) + mtd->_resume(mtd); +} + +static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) +{ + if (mtd->erasesize_shift) + return sz >> mtd->erasesize_shift; + do_div(sz, mtd->erasesize); + return sz; +} + +static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd) +{ + if (mtd->erasesize_shift) + return sz & mtd->erasesize_mask; + return do_div(sz, mtd->erasesize); +} + +/** + * mtd_align_erase_req - Adjust an erase request to align things on eraseblock + * boundaries. + * @mtd: the MTD device this erase request applies on + * @req: the erase request to adjust + * + * This function will adjust @req->addr and @req->len to align them on + * @mtd->erasesize. Of course we expect @mtd->erasesize to be != 0. + */ +static inline void mtd_align_erase_req(struct mtd_info *mtd, + struct erase_info *req) +{ + u32 mod; + + if (WARN_ON(!mtd->erasesize)) + return; + + mod = mtd_mod_by_eb(req->addr, mtd); + if (mod) { + req->addr -= mod; + req->len += mod; + } + + mod = mtd_mod_by_eb(req->addr + req->len, mtd); + if (mod) + req->len += mtd->erasesize - mod; +} + +static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd) +{ + if (mtd->writesize_shift) + return sz >> mtd->writesize_shift; + do_div(sz, mtd->writesize); + return sz; +} + +static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd) +{ + if (mtd->writesize_shift) + return sz & mtd->writesize_mask; + return do_div(sz, mtd->writesize); +} + +static inline int mtd_wunit_per_eb(struct mtd_info *mtd) +{ + return mtd->erasesize / mtd->writesize; +} + +static inline int mtd_offset_to_wunit(struct mtd_info *mtd, loff_t offs) +{ + return mtd_div_by_ws(mtd_mod_by_eb(offs, mtd), mtd); +} + +static inline loff_t mtd_wunit_to_offset(struct mtd_info *mtd, loff_t base, + int wunit) +{ + return base + (wunit * mtd->writesize); +} + + +static inline int mtd_has_oob(const struct mtd_info *mtd) +{ + return mtd->_read_oob && mtd->_write_oob; +} + +static inline int mtd_type_is_nand(const struct mtd_info *mtd) +{ + return mtd->type == MTD_NANDFLASH || mtd->type == MTD_MLCNANDFLASH; +} + +static inline int mtd_can_have_bb(const struct mtd_info *mtd) +{ + return !!mtd->_block_isbad; +} + + /* Kernel-side ioctl definitions */ + +struct mtd_partition; +struct mtd_part_parser_data; + +extern int mtd_device_parse_register(struct mtd_info *mtd, + const char * const *part_probe_types, + struct mtd_part_parser_data *parser_data, + const struct mtd_partition *defparts, + int defnr_parts); +#define mtd_device_register(master, parts, nr_parts) \ + mtd_device_parse_register(master, NULL, NULL, parts, nr_parts) +extern int mtd_device_unregister(struct mtd_info *master); +extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num); +extern int __get_mtd_device(struct mtd_info *mtd); +extern void __put_mtd_device(struct mtd_info *mtd); +extern struct mtd_info *get_mtd_device_nm(const char *name); +extern void put_mtd_device(struct mtd_info *mtd); + + +struct mtd_notifier { + void (*add)(struct mtd_info *mtd); + void (*remove)(struct mtd_info *mtd); + struct list_head list; +}; + + +extern void register_mtd_user (struct mtd_notifier *new); +extern int unregister_mtd_user (struct mtd_notifier *old); +void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size); + +static inline int mtd_is_bitflip(int err) { + return err == -EUCLEAN; +} + +static inline int mtd_is_eccerr(int err) { + return err == -EBADMSG; +} + +static inline int mtd_is_bitflip_or_eccerr(int err) { + return mtd_is_bitflip(err) || mtd_is_eccerr(err); +} + +unsigned mtd_mmap_capabilities(struct mtd_info *mtd); + +#endif /* __MTD_MTD_H__ */ diff --git a/include/linux/mtd/mtdram.h b/include/linux/mtd/mtdram.h new file mode 100644 index 000000000..ee8f95643 --- /dev/null +++ b/include/linux/mtd/mtdram.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __MTD_MTDRAM_H__ +#define __MTD_MTDRAM_H__ + +#include +int mtdram_init_device(struct mtd_info *mtd, void *mapped_address, + unsigned long size, const char *name); + +#endif /* __MTD_MTDRAM_H__ */ diff --git a/include/linux/mtd/nand-gpio.h b/include/linux/mtd/nand-gpio.h new file mode 100644 index 000000000..7ab51bc4a --- /dev/null +++ b/include/linux/mtd/nand-gpio.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_MTD_NAND_GPIO_H +#define __LINUX_MTD_NAND_GPIO_H + +#include + +struct gpio_nand_platdata { + void (*adjust_parts)(struct gpio_nand_platdata *, size_t); + struct mtd_partition *parts; + unsigned int num_parts; + unsigned int options; + int chip_delay; +}; + +#endif diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h new file mode 100644 index 000000000..7f53ece2c --- /dev/null +++ b/include/linux/mtd/nand.h @@ -0,0 +1,733 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2017 - Free Electrons + * + * Authors: + * Boris Brezillon + * Peter Pan + */ + +#ifndef __LINUX_MTD_NAND_H +#define __LINUX_MTD_NAND_H + +#include + +/** + * struct nand_memory_organization - Memory organization structure + * @bits_per_cell: number of bits per NAND cell + * @pagesize: page size + * @oobsize: OOB area size + * @pages_per_eraseblock: number of pages per eraseblock + * @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number) + * @planes_per_lun: number of planes per LUN + * @luns_per_target: number of LUN per target (target is a synonym for die) + * @ntargets: total number of targets exposed by the NAND device + */ +struct nand_memory_organization { + unsigned int bits_per_cell; + unsigned int pagesize; + unsigned int oobsize; + unsigned int pages_per_eraseblock; + unsigned int eraseblocks_per_lun; + unsigned int planes_per_lun; + unsigned int luns_per_target; + unsigned int ntargets; +}; + +#define NAND_MEMORG(bpc, ps, os, ppe, epl, ppl, lpt, nt) \ + { \ + .bits_per_cell = (bpc), \ + .pagesize = (ps), \ + .oobsize = (os), \ + .pages_per_eraseblock = (ppe), \ + .eraseblocks_per_lun = (epl), \ + .planes_per_lun = (ppl), \ + .luns_per_target = (lpt), \ + .ntargets = (nt), \ + } + +/** + * struct nand_row_converter - Information needed to convert an absolute offset + * into a row address + * @lun_addr_shift: position of the LUN identifier in the row address + * @eraseblock_addr_shift: position of the eraseblock identifier in the row + * address + */ +struct nand_row_converter { + unsigned int lun_addr_shift; + unsigned int eraseblock_addr_shift; +}; + +/** + * struct nand_pos - NAND position object + * @target: the NAND target/die + * @lun: the LUN identifier + * @plane: the plane within the LUN + * @eraseblock: the eraseblock within the LUN + * @page: the page within the LUN + * + * These information are usually used by specific sub-layers to select the + * appropriate target/die and generate a row address to pass to the device. + */ +struct nand_pos { + unsigned int target; + unsigned int lun; + unsigned int plane; + unsigned int eraseblock; + unsigned int page; +}; + +/** + * struct nand_page_io_req - NAND I/O request object + * @pos: the position this I/O request is targeting + * @dataoffs: the offset within the page + * @datalen: number of data bytes to read from/write to this page + * @databuf: buffer to store data in or get data from + * @ooboffs: the OOB offset within the page + * @ooblen: the number of OOB bytes to read from/write to this page + * @oobbuf: buffer to store OOB data in or get OOB data from + * @mode: one of the %MTD_OPS_XXX mode + * + * This object is used to pass per-page I/O requests to NAND sub-layers. This + * way all useful information are already formatted in a useful way and + * specific NAND layers can focus on translating these information into + * specific commands/operations. + */ +struct nand_page_io_req { + struct nand_pos pos; + unsigned int dataoffs; + unsigned int datalen; + union { + const void *out; + void *in; + } databuf; + unsigned int ooboffs; + unsigned int ooblen; + union { + const void *out; + void *in; + } oobbuf; + int mode; +}; + +/** + * struct nand_ecc_req - NAND ECC requirements + * @strength: ECC strength + * @step_size: ECC step/block size + */ +struct nand_ecc_req { + unsigned int strength; + unsigned int step_size; +}; + +#define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) } + +/** + * struct nand_bbt - bad block table object + * @cache: in memory BBT cache + */ +struct nand_bbt { + unsigned long *cache; +}; + +struct nand_device; + +/** + * struct nand_ops - NAND operations + * @erase: erase a specific block. No need to check if the block is bad before + * erasing, this has been taken care of by the generic NAND layer + * @markbad: mark a specific block bad. No need to check if the block is + * already marked bad, this has been taken care of by the generic + * NAND layer. This method should just write the BBM (Bad Block + * Marker) so that future call to struct_nand_ops->isbad() return + * true + * @isbad: check whether a block is bad or not. This method should just read + * the BBM and return whether the block is bad or not based on what it + * reads + * + * These are all low level operations that should be implemented by specialized + * NAND layers (SPI NAND, raw NAND, ...). + */ +struct nand_ops { + int (*erase)(struct nand_device *nand, const struct nand_pos *pos); + int (*markbad)(struct nand_device *nand, const struct nand_pos *pos); + bool (*isbad)(struct nand_device *nand, const struct nand_pos *pos); +}; + +/** + * struct nand_device - NAND device + * @mtd: MTD instance attached to the NAND device + * @memorg: memory layout + * @eccreq: ECC requirements + * @rowconv: position to row address converter + * @bbt: bad block table info + * @ops: NAND operations attached to the NAND device + * + * Generic NAND object. Specialized NAND layers (raw NAND, SPI NAND, OneNAND) + * should declare their own NAND object embedding a nand_device struct (that's + * how inheritance is done). + * struct_nand_device->memorg and struct_nand_device->eccreq should be filled + * at device detection time to reflect the NAND device + * capabilities/requirements. Once this is done nanddev_init() can be called. + * It will take care of converting NAND information into MTD ones, which means + * the specialized NAND layers should never manually tweak + * struct_nand_device->mtd except for the ->_read/write() hooks. + */ +struct nand_device { + struct mtd_info mtd; + struct nand_memory_organization memorg; + struct nand_ecc_req eccreq; + struct nand_row_converter rowconv; + struct nand_bbt bbt; + const struct nand_ops *ops; +}; + +/** + * struct nand_io_iter - NAND I/O iterator + * @req: current I/O request + * @oobbytes_per_page: maximum number of OOB bytes per page + * @dataleft: remaining number of data bytes to read/write + * @oobleft: remaining number of OOB bytes to read/write + * + * Can be used by specialized NAND layers to iterate over all pages covered + * by an MTD I/O request, which should greatly simplifies the boiler-plate + * code needed to read/write data from/to a NAND device. + */ +struct nand_io_iter { + struct nand_page_io_req req; + unsigned int oobbytes_per_page; + unsigned int dataleft; + unsigned int oobleft; +}; + +/** + * mtd_to_nanddev() - Get the NAND device attached to the MTD instance + * @mtd: MTD instance + * + * Return: the NAND device embedding @mtd. + */ +static inline struct nand_device *mtd_to_nanddev(struct mtd_info *mtd) +{ + return container_of(mtd, struct nand_device, mtd); +} + +/** + * nanddev_to_mtd() - Get the MTD device attached to a NAND device + * @nand: NAND device + * + * Return: the MTD device embedded in @nand. + */ +static inline struct mtd_info *nanddev_to_mtd(struct nand_device *nand) +{ + return &nand->mtd; +} + +/* + * nanddev_bits_per_cell() - Get the number of bits per cell + * @nand: NAND device + * + * Return: the number of bits per cell. + */ +static inline unsigned int nanddev_bits_per_cell(const struct nand_device *nand) +{ + return nand->memorg.bits_per_cell; +} + +/** + * nanddev_page_size() - Get NAND page size + * @nand: NAND device + * + * Return: the page size. + */ +static inline size_t nanddev_page_size(const struct nand_device *nand) +{ + return nand->memorg.pagesize; +} + +/** + * nanddev_per_page_oobsize() - Get NAND OOB size + * @nand: NAND device + * + * Return: the OOB size. + */ +static inline unsigned int +nanddev_per_page_oobsize(const struct nand_device *nand) +{ + return nand->memorg.oobsize; +} + +/** + * nanddev_pages_per_eraseblock() - Get the number of pages per eraseblock + * @nand: NAND device + * + * Return: the number of pages per eraseblock. + */ +static inline unsigned int +nanddev_pages_per_eraseblock(const struct nand_device *nand) +{ + return nand->memorg.pages_per_eraseblock; +} + +/** + * nanddev_per_page_oobsize() - Get NAND erase block size + * @nand: NAND device + * + * Return: the eraseblock size. + */ +static inline size_t nanddev_eraseblock_size(const struct nand_device *nand) +{ + return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock; +} + +/** + * nanddev_eraseblocks_per_lun() - Get the number of eraseblocks per LUN + * @nand: NAND device + * + * Return: the number of eraseblocks per LUN. + */ +static inline unsigned int +nanddev_eraseblocks_per_lun(const struct nand_device *nand) +{ + return nand->memorg.eraseblocks_per_lun; +} + +/** + * nanddev_target_size() - Get the total size provided by a single target/die + * @nand: NAND device + * + * Return: the total size exposed by a single target/die in bytes. + */ +static inline u64 nanddev_target_size(const struct nand_device *nand) +{ + return (u64)nand->memorg.luns_per_target * + nand->memorg.eraseblocks_per_lun * + nand->memorg.pages_per_eraseblock * + nand->memorg.pagesize; +} + +/** + * nanddev_ntarget() - Get the total of targets + * @nand: NAND device + * + * Return: the number of targets/dies exposed by @nand. + */ +static inline unsigned int nanddev_ntargets(const struct nand_device *nand) +{ + return nand->memorg.ntargets; +} + +/** + * nanddev_neraseblocks() - Get the total number of erasablocks + * @nand: NAND device + * + * Return: the total number of eraseblocks exposed by @nand. + */ +static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand) +{ + return nand->memorg.ntargets * nand->memorg.luns_per_target * + nand->memorg.eraseblocks_per_lun; +} + +/** + * nanddev_size() - Get NAND size + * @nand: NAND device + * + * Return: the total size (in bytes) exposed by @nand. + */ +static inline u64 nanddev_size(const struct nand_device *nand) +{ + return nanddev_target_size(nand) * nanddev_ntargets(nand); +} + +/** + * nanddev_get_memorg() - Extract memory organization info from a NAND device + * @nand: NAND device + * + * This can be used by the upper layer to fill the memorg info before calling + * nanddev_init(). + * + * Return: the memorg object embedded in the NAND device. + */ +static inline struct nand_memory_organization * +nanddev_get_memorg(struct nand_device *nand) +{ + return &nand->memorg; +} + +int nanddev_init(struct nand_device *nand, const struct nand_ops *ops, + struct module *owner); +void nanddev_cleanup(struct nand_device *nand); + +/** + * nanddev_register() - Register a NAND device + * @nand: NAND device + * + * Register a NAND device. + * This function is just a wrapper around mtd_device_register() + * registering the MTD device embedded in @nand. + * + * Return: 0 in case of success, a negative error code otherwise. + */ +static inline int nanddev_register(struct nand_device *nand) +{ + return mtd_device_register(&nand->mtd, NULL, 0); +} + +/** + * nanddev_unregister() - Unregister a NAND device + * @nand: NAND device + * + * Unregister a NAND device. + * This function is just a wrapper around mtd_device_unregister() + * unregistering the MTD device embedded in @nand. + * + * Return: 0 in case of success, a negative error code otherwise. + */ +static inline int nanddev_unregister(struct nand_device *nand) +{ + return mtd_device_unregister(&nand->mtd); +} + +/** + * nanddev_set_of_node() - Attach a DT node to a NAND device + * @nand: NAND device + * @np: DT node + * + * Attach a DT node to a NAND device. + */ +static inline void nanddev_set_of_node(struct nand_device *nand, + struct device_node *np) +{ + mtd_set_of_node(&nand->mtd, np); +} + +/** + * nanddev_get_of_node() - Retrieve the DT node attached to a NAND device + * @nand: NAND device + * + * Return: the DT node attached to @nand. + */ +static inline struct device_node *nanddev_get_of_node(struct nand_device *nand) +{ + return mtd_get_of_node(&nand->mtd); +} + +/** + * nanddev_offs_to_pos() - Convert an absolute NAND offset into a NAND position + * @nand: NAND device + * @offs: absolute NAND offset (usually passed by the MTD layer) + * @pos: a NAND position object to fill in + * + * Converts @offs into a nand_pos representation. + * + * Return: the offset within the NAND page pointed by @pos. + */ +static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand, + loff_t offs, + struct nand_pos *pos) +{ + unsigned int pageoffs; + u64 tmp = offs; + + pageoffs = do_div(tmp, nand->memorg.pagesize); + pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock); + pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun); + pos->plane = pos->eraseblock % nand->memorg.planes_per_lun; + pos->lun = do_div(tmp, nand->memorg.luns_per_target); + pos->target = tmp; + + return pageoffs; +} + +/** + * nanddev_pos_cmp() - Compare two NAND positions + * @a: First NAND position + * @b: Second NAND position + * + * Compares two NAND positions. + * + * Return: -1 if @a < @b, 0 if @a == @b and 1 if @a > @b. + */ +static inline int nanddev_pos_cmp(const struct nand_pos *a, + const struct nand_pos *b) +{ + if (a->target != b->target) + return a->target < b->target ? -1 : 1; + + if (a->lun != b->lun) + return a->lun < b->lun ? -1 : 1; + + if (a->eraseblock != b->eraseblock) + return a->eraseblock < b->eraseblock ? -1 : 1; + + if (a->page != b->page) + return a->page < b->page ? -1 : 1; + + return 0; +} + +/** + * nanddev_pos_to_offs() - Convert a NAND position into an absolute offset + * @nand: NAND device + * @pos: the NAND position to convert + * + * Converts @pos NAND position into an absolute offset. + * + * Return: the absolute offset. Note that @pos points to the beginning of a + * page, if one wants to point to a specific offset within this page + * the returned offset has to be adjusted manually. + */ +static inline loff_t nanddev_pos_to_offs(struct nand_device *nand, + const struct nand_pos *pos) +{ + unsigned int npages; + + npages = pos->page + + ((pos->eraseblock + + (pos->lun + + (pos->target * nand->memorg.luns_per_target)) * + nand->memorg.eraseblocks_per_lun) * + nand->memorg.pages_per_eraseblock); + + return (loff_t)npages * nand->memorg.pagesize; +} + +/** + * nanddev_pos_to_row() - Extract a row address from a NAND position + * @nand: NAND device + * @pos: the position to convert + * + * Converts a NAND position into a row address that can then be passed to the + * device. + * + * Return: the row address extracted from @pos. + */ +static inline unsigned int nanddev_pos_to_row(struct nand_device *nand, + const struct nand_pos *pos) +{ + return (pos->lun << nand->rowconv.lun_addr_shift) | + (pos->eraseblock << nand->rowconv.eraseblock_addr_shift) | + pos->page; +} + +/** + * nanddev_pos_next_target() - Move a position to the next target/die + * @nand: NAND device + * @pos: the position to update + * + * Updates @pos to point to the start of the next target/die. Useful when you + * want to iterate over all targets/dies of a NAND device. + */ +static inline void nanddev_pos_next_target(struct nand_device *nand, + struct nand_pos *pos) +{ + pos->page = 0; + pos->plane = 0; + pos->eraseblock = 0; + pos->lun = 0; + pos->target++; +} + +/** + * nanddev_pos_next_lun() - Move a position to the next LUN + * @nand: NAND device + * @pos: the position to update + * + * Updates @pos to point to the start of the next LUN. Useful when you want to + * iterate over all LUNs of a NAND device. + */ +static inline void nanddev_pos_next_lun(struct nand_device *nand, + struct nand_pos *pos) +{ + if (pos->lun >= nand->memorg.luns_per_target - 1) + return nanddev_pos_next_target(nand, pos); + + pos->lun++; + pos->page = 0; + pos->plane = 0; + pos->eraseblock = 0; +} + +/** + * nanddev_pos_next_eraseblock() - Move a position to the next eraseblock + * @nand: NAND device + * @pos: the position to update + * + * Updates @pos to point to the start of the next eraseblock. Useful when you + * want to iterate over all eraseblocks of a NAND device. + */ +static inline void nanddev_pos_next_eraseblock(struct nand_device *nand, + struct nand_pos *pos) +{ + if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1) + return nanddev_pos_next_lun(nand, pos); + + pos->eraseblock++; + pos->page = 0; + pos->plane = pos->eraseblock % nand->memorg.planes_per_lun; +} + +/** + * nanddev_pos_next_page() - Move a position to the next page + * @nand: NAND device + * @pos: the position to update + * + * Updates @pos to point to the start of the next page. Useful when you want to + * iterate over all pages of a NAND device. + */ +static inline void nanddev_pos_next_page(struct nand_device *nand, + struct nand_pos *pos) +{ + if (pos->page >= nand->memorg.pages_per_eraseblock - 1) + return nanddev_pos_next_eraseblock(nand, pos); + + pos->page++; +} + +/** + * nand_io_iter_init - Initialize a NAND I/O iterator + * @nand: NAND device + * @offs: absolute offset + * @req: MTD request + * @iter: NAND I/O iterator + * + * Initializes a NAND iterator based on the information passed by the MTD + * layer. + */ +static inline void nanddev_io_iter_init(struct nand_device *nand, + loff_t offs, struct mtd_oob_ops *req, + struct nand_io_iter *iter) +{ + struct mtd_info *mtd = nanddev_to_mtd(nand); + + iter->req.mode = req->mode; + iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos); + iter->req.ooboffs = req->ooboffs; + iter->oobbytes_per_page = mtd_oobavail(mtd, req); + iter->dataleft = req->len; + iter->oobleft = req->ooblen; + iter->req.databuf.in = req->datbuf; + iter->req.datalen = min_t(unsigned int, + nand->memorg.pagesize - iter->req.dataoffs, + iter->dataleft); + iter->req.oobbuf.in = req->oobbuf; + iter->req.ooblen = min_t(unsigned int, + iter->oobbytes_per_page - iter->req.ooboffs, + iter->oobleft); +} + +/** + * nand_io_iter_next_page - Move to the next page + * @nand: NAND device + * @iter: NAND I/O iterator + * + * Updates the @iter to point to the next page. + */ +static inline void nanddev_io_iter_next_page(struct nand_device *nand, + struct nand_io_iter *iter) +{ + nanddev_pos_next_page(nand, &iter->req.pos); + iter->dataleft -= iter->req.datalen; + iter->req.databuf.in += iter->req.datalen; + iter->oobleft -= iter->req.ooblen; + iter->req.oobbuf.in += iter->req.ooblen; + iter->req.dataoffs = 0; + iter->req.ooboffs = 0; + iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize, + iter->dataleft); + iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page, + iter->oobleft); +} + +/** + * nand_io_iter_end - Should end iteration or not + * @nand: NAND device + * @iter: NAND I/O iterator + * + * Check whether @iter has reached the end of the NAND portion it was asked to + * iterate on or not. + * + * Return: true if @iter has reached the end of the iteration request, false + * otherwise. + */ +static inline bool nanddev_io_iter_end(struct nand_device *nand, + const struct nand_io_iter *iter) +{ + if (iter->dataleft || iter->oobleft) + return false; + + return true; +} + +/** + * nand_io_for_each_page - Iterate over all NAND pages contained in an MTD I/O + * request + * @nand: NAND device + * @start: start address to read/write from + * @req: MTD I/O request + * @iter: NAND I/O iterator + * + * Should be used for iterate over pages that are contained in an MTD request. + */ +#define nanddev_io_for_each_page(nand, start, req, iter) \ + for (nanddev_io_iter_init(nand, start, req, iter); \ + !nanddev_io_iter_end(nand, iter); \ + nanddev_io_iter_next_page(nand, iter)) + +bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos); +bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos); +int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos); +int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos); + +/* BBT related functions */ +enum nand_bbt_block_status { + NAND_BBT_BLOCK_STATUS_UNKNOWN, + NAND_BBT_BLOCK_GOOD, + NAND_BBT_BLOCK_WORN, + NAND_BBT_BLOCK_RESERVED, + NAND_BBT_BLOCK_FACTORY_BAD, + NAND_BBT_BLOCK_NUM_STATUS, +}; + +int nanddev_bbt_init(struct nand_device *nand); +void nanddev_bbt_cleanup(struct nand_device *nand); +int nanddev_bbt_update(struct nand_device *nand); +int nanddev_bbt_get_block_status(const struct nand_device *nand, + unsigned int entry); +int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry, + enum nand_bbt_block_status status); +int nanddev_bbt_markbad(struct nand_device *nand, unsigned int block); + +/** + * nanddev_bbt_pos_to_entry() - Convert a NAND position into a BBT entry + * @nand: NAND device + * @pos: the NAND position we want to get BBT entry for + * + * Return the BBT entry used to store information about the eraseblock pointed + * by @pos. + * + * Return: the BBT entry storing information about eraseblock pointed by @pos. + */ +static inline unsigned int nanddev_bbt_pos_to_entry(struct nand_device *nand, + const struct nand_pos *pos) +{ + return pos->eraseblock + + ((pos->lun + (pos->target * nand->memorg.luns_per_target)) * + nand->memorg.eraseblocks_per_lun); +} + +/** + * nanddev_bbt_is_initialized() - Check if the BBT has been initialized + * @nand: NAND device + * + * Return: true if the BBT has been initialized, false otherwise. + */ +static inline bool nanddev_bbt_is_initialized(struct nand_device *nand) +{ + return !!nand->bbt.cache; +} + +/* MTD -> NAND helper functions. */ +int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo); + +#endif /* __LINUX_MTD_NAND_H */ diff --git a/include/linux/mtd/nand_bch.h b/include/linux/mtd/nand_bch.h new file mode 100644 index 000000000..98f20ef05 --- /dev/null +++ b/include/linux/mtd/nand_bch.h @@ -0,0 +1,68 @@ +/* + * Copyright © 2011 Ivan Djelic + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This file is the header for the NAND BCH ECC implementation. + */ + +#ifndef __MTD_NAND_BCH_H__ +#define __MTD_NAND_BCH_H__ + +struct mtd_info; +struct nand_bch_control; + +#if defined(CONFIG_MTD_NAND_ECC_BCH) + +static inline int mtd_nand_has_bch(void) { return 1; } + +/* + * Calculate BCH ecc code + */ +int nand_bch_calculate_ecc(struct mtd_info *mtd, const u_char *dat, + u_char *ecc_code); + +/* + * Detect and correct bit errors + */ +int nand_bch_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, + u_char *calc_ecc); +/* + * Initialize BCH encoder/decoder + */ +struct nand_bch_control *nand_bch_init(struct mtd_info *mtd); +/* + * Release BCH encoder/decoder resources + */ +void nand_bch_free(struct nand_bch_control *nbc); + +#else /* !CONFIG_MTD_NAND_ECC_BCH */ + +static inline int mtd_nand_has_bch(void) { return 0; } + +static inline int +nand_bch_calculate_ecc(struct mtd_info *mtd, const u_char *dat, + u_char *ecc_code) +{ + return -1; +} + +static inline int +nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf, + unsigned char *read_ecc, unsigned char *calc_ecc) +{ + return -ENOTSUPP; +} + +static inline struct nand_bch_control *nand_bch_init(struct mtd_info *mtd) +{ + return NULL; +} + +static inline void nand_bch_free(struct nand_bch_control *nbc) {} + +#endif /* CONFIG_MTD_NAND_ECC_BCH */ + +#endif /* __MTD_NAND_BCH_H__ */ diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h new file mode 100644 index 000000000..8a2decf74 --- /dev/null +++ b/include/linux/mtd/nand_ecc.h @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2000-2010 Steven J. Hill + * David Woodhouse + * Thomas Gleixner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This file is the header for the ECC algorithm. + */ + +#ifndef __MTD_NAND_ECC_H__ +#define __MTD_NAND_ECC_H__ + +struct mtd_info; + +/* + * Calculate 3 byte ECC code for eccsize byte block + */ +void __nand_calculate_ecc(const u_char *dat, unsigned int eccsize, + u_char *ecc_code); + +/* + * Calculate 3 byte ECC code for 256/512 byte block + */ +int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code); + +/* + * Detect and correct a 1 bit error for eccsize byte block + */ +int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc, + unsigned int eccsize); + +/* + * Detect and correct a 1 bit error for 256/512 byte block + */ +int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc); + +#endif /* __MTD_NAND_ECC_H__ */ diff --git a/include/linux/mtd/ndfc.h b/include/linux/mtd/ndfc.h new file mode 100644 index 000000000..357e88b32 --- /dev/null +++ b/include/linux/mtd/ndfc.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2006 Thomas Gleixner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Info: + * Contains defines, datastructures for ndfc nand controller + * + */ +#ifndef __LINUX_MTD_NDFC_H +#define __LINUX_MTD_NDFC_H + +/* NDFC Register definitions */ +#define NDFC_CMD 0x00 +#define NDFC_ALE 0x04 +#define NDFC_DATA 0x08 +#define NDFC_ECC 0x10 +#define NDFC_BCFG0 0x30 +#define NDFC_BCFG1 0x34 +#define NDFC_BCFG2 0x38 +#define NDFC_BCFG3 0x3c +#define NDFC_CCR 0x40 +#define NDFC_STAT 0x44 +#define NDFC_HWCTL 0x48 +#define NDFC_REVID 0x50 + +#define NDFC_STAT_IS_READY 0x01000000 + +#define NDFC_CCR_RESET_CE 0x80000000 /* CE Reset */ +#define NDFC_CCR_RESET_ECC 0x40000000 /* ECC Reset */ +#define NDFC_CCR_RIE 0x20000000 /* Interrupt Enable on Device Rdy */ +#define NDFC_CCR_REN 0x10000000 /* Enable wait for Rdy in LinearR */ +#define NDFC_CCR_ROMEN 0x08000000 /* Enable ROM In LinearR */ +#define NDFC_CCR_ARE 0x04000000 /* Auto-Read Enable */ +#define NDFC_CCR_BS(x) (((x) & 0x3) << 24) /* Select Bank on CE[x] */ +#define NDFC_CCR_BS_MASK 0x03000000 /* Select Bank */ +#define NDFC_CCR_ARAC0 0x00000000 /* 3 Addr, 1 Col 2 Row 512b page */ +#define NDFC_CCR_ARAC1 0x00001000 /* 4 Addr, 1 Col 3 Row 512b page */ +#define NDFC_CCR_ARAC2 0x00002000 /* 4 Addr, 2 Col 2 Row 2K page */ +#define NDFC_CCR_ARAC3 0x00003000 /* 5 Addr, 2 Col 3 Row 2K page */ +#define NDFC_CCR_ARAC_MASK 0x00003000 /* Auto-Read mode Addr Cycles */ +#define NDFC_CCR_RPG 0x0000C000 /* Auto-Read Page */ +#define NDFC_CCR_EBCC 0x00000004 /* EBC Configuration Completed */ +#define NDFC_CCR_DHC 0x00000002 /* Direct Hardware Control Enable */ + +#define NDFC_BxCFG_EN 0x80000000 /* Bank Enable */ +#define NDFC_BxCFG_CED 0x40000000 /* nCE Style */ +#define NDFC_BxCFG_SZ_MASK 0x08000000 /* Bank Size */ +#define NDFC_BxCFG_SZ_8BIT 0x00000000 /* 8bit */ +#define NDFC_BxCFG_SZ_16BIT 0x08000000 /* 16bit */ + +#define NDFC_MAX_BANKS 4 + +struct ndfc_controller_settings { + uint32_t ccr_settings; + uint64_t ndfc_erpn; +}; + +struct ndfc_chip_settings { + uint32_t bank_settings; +}; + +#endif diff --git a/include/linux/mtd/nftl.h b/include/linux/mtd/nftl.h new file mode 100644 index 000000000..044daa02b --- /dev/null +++ b/include/linux/mtd/nftl.h @@ -0,0 +1,71 @@ +/* + * Copyright © 1999-2010 David Woodhouse + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef __MTD_NFTL_H__ +#define __MTD_NFTL_H__ + +#include +#include + +#include + +/* these info are used in ReplUnitTable */ +#define BLOCK_NIL 0xffff /* last block of a chain */ +#define BLOCK_FREE 0xfffe /* free block */ +#define BLOCK_NOTEXPLORED 0xfffd /* non explored block, only used during mounting */ +#define BLOCK_RESERVED 0xfffc /* bios block or bad block */ + +struct NFTLrecord { + struct mtd_blktrans_dev mbd; + __u16 MediaUnit, SpareMediaUnit; + __u32 EraseSize; + struct NFTLMediaHeader MediaHdr; + int usecount; + unsigned char heads; + unsigned char sectors; + unsigned short cylinders; + __u16 numvunits; + __u16 lastEUN; /* should be suppressed */ + __u16 numfreeEUNs; + __u16 LastFreeEUN; /* To speed up finding a free EUN */ + int head,sect,cyl; + __u16 *EUNtable; /* [numvunits]: First EUN for each virtual unit */ + __u16 *ReplUnitTable; /* [numEUNs]: ReplUnitNumber for each */ + unsigned int nb_blocks; /* number of physical blocks */ + unsigned int nb_boot_blocks; /* number of blocks used by the bios */ + struct erase_info instr; +}; + +int NFTL_mount(struct NFTLrecord *s); +int NFTL_formatblock(struct NFTLrecord *s, int block); + +int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len, + size_t *retlen, uint8_t *buf); +int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len, + size_t *retlen, uint8_t *buf); + +#ifndef NFTL_MAJOR +#define NFTL_MAJOR 93 +#endif + +#define MAX_NFTLS 16 +#define MAX_SECTORS_PER_UNIT 64 +#define NFTL_PARTN_BITS 4 + +#endif /* __MTD_NFTL_H__ */ diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h new file mode 100644 index 000000000..0aaa98b21 --- /dev/null +++ b/include/linux/mtd/onenand.h @@ -0,0 +1,240 @@ +/* + * linux/include/linux/mtd/onenand.h + * + * Copyright © 2005-2009 Samsung Electronics + * Kyungmin Park + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_MTD_ONENAND_H +#define __LINUX_MTD_ONENAND_H + +#include +#include +#include +#include +#include + +#define MAX_DIES 2 +#define MAX_BUFFERRAM 2 + +/* Scan and identify a OneNAND device */ +extern int onenand_scan(struct mtd_info *mtd, int max_chips); +/* Free resources held by the OneNAND device */ +extern void onenand_release(struct mtd_info *mtd); + +/** + * struct onenand_bufferram - OneNAND BufferRAM Data + * @blockpage: block & page address in BufferRAM + */ +struct onenand_bufferram { + int blockpage; +}; + +/** + * struct onenand_chip - OneNAND Private Flash Chip Data + * @base: [BOARDSPECIFIC] address to access OneNAND + * @dies: [INTERN][FLEX-ONENAND] number of dies on chip + * @boundary: [INTERN][FLEX-ONENAND] Boundary of the dies + * @diesize: [INTERN][FLEX-ONENAND] Size of the dies + * @chipsize: [INTERN] the size of one chip for multichip arrays + * FIXME For Flex-OneNAND, chipsize holds maximum possible + * device size ie when all blocks are considered MLC + * @device_id: [INTERN] device ID + * @density_mask: chip density, used for DDP devices + * @verstion_id: [INTERN] version ID + * @options: [BOARDSPECIFIC] various chip options. They can + * partly be set to inform onenand_scan about + * @erase_shift: [INTERN] number of address bits in a block + * @page_shift: [INTERN] number of address bits in a page + * @page_mask: [INTERN] a page per block mask + * @writesize: [INTERN] a real page size + * @bufferram_index: [INTERN] BufferRAM index + * @bufferram: [INTERN] BufferRAM info + * @readw: [REPLACEABLE] hardware specific function for read short + * @writew: [REPLACEABLE] hardware specific function for write short + * @command: [REPLACEABLE] hardware specific function for writing + * commands to the chip + * @wait: [REPLACEABLE] hardware specific function for wait on ready + * @bbt_wait: [REPLACEABLE] hardware specific function for bbt wait on ready + * @unlock_all: [REPLACEABLE] hardware specific function for unlock all + * @read_bufferram: [REPLACEABLE] hardware specific function for BufferRAM Area + * @write_bufferram: [REPLACEABLE] hardware specific function for BufferRAM Area + * @read_word: [REPLACEABLE] hardware specific function for read + * register of OneNAND + * @write_word: [REPLACEABLE] hardware specific function for write + * register of OneNAND + * @mmcontrol: sync burst read function + * @chip_probe: [REPLACEABLE] hardware specific function for chip probe + * @block_markbad: function to mark a block as bad + * @scan_bbt: [REPLACEALBE] hardware specific function for scanning + * Bad block Table + * @chip_lock: [INTERN] spinlock used to protect access to this + * structure and the chip + * @wq: [INTERN] wait queue to sleep on if a OneNAND + * operation is in progress + * @state: [INTERN] the current state of the OneNAND device + * @page_buf: [INTERN] page main data buffer + * @oob_buf: [INTERN] page oob data buffer + * @subpagesize: [INTERN] holds the subpagesize + * @bbm: [REPLACEABLE] pointer to Bad Block Management + * @priv: [OPTIONAL] pointer to private chip date + */ +struct onenand_chip { + void __iomem *base; + unsigned dies; + unsigned boundary[MAX_DIES]; + loff_t diesize[MAX_DIES]; + unsigned int chipsize; + unsigned int device_id; + unsigned int version_id; + unsigned int technology; + unsigned int density_mask; + unsigned int options; + + unsigned int erase_shift; + unsigned int page_shift; + unsigned int page_mask; + unsigned int writesize; + + unsigned int bufferram_index; + struct onenand_bufferram bufferram[MAX_BUFFERRAM]; + + int (*command)(struct mtd_info *mtd, int cmd, loff_t address, size_t len); + int (*wait)(struct mtd_info *mtd, int state); + int (*bbt_wait)(struct mtd_info *mtd, int state); + void (*unlock_all)(struct mtd_info *mtd); + int (*read_bufferram)(struct mtd_info *mtd, int area, + unsigned char *buffer, int offset, size_t count); + int (*write_bufferram)(struct mtd_info *mtd, int area, + const unsigned char *buffer, int offset, size_t count); + unsigned short (*read_word)(void __iomem *addr); + void (*write_word)(unsigned short value, void __iomem *addr); + void (*mmcontrol)(struct mtd_info *mtd, int sync_read); + int (*chip_probe)(struct mtd_info *mtd); + int (*block_markbad)(struct mtd_info *mtd, loff_t ofs); + int (*scan_bbt)(struct mtd_info *mtd); + int (*enable)(struct mtd_info *mtd); + int (*disable)(struct mtd_info *mtd); + + struct completion complete; + int irq; + + spinlock_t chip_lock; + wait_queue_head_t wq; + flstate_t state; + unsigned char *page_buf; + unsigned char *oob_buf; +#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE + unsigned char *verify_buf; +#endif + + int subpagesize; + + void *bbm; + + void *priv; + + /* + * Shows that the current operation is composed + * of sequence of commands. For example, cache program. + * Such command status OnGo bit is checked at the end of + * sequence. + */ + unsigned int ongoing; +}; + +/* + * Helper macros + */ +#define ONENAND_PAGES_PER_BLOCK (1<<6) + +#define ONENAND_CURRENT_BUFFERRAM(this) (this->bufferram_index) +#define ONENAND_NEXT_BUFFERRAM(this) (this->bufferram_index ^ 1) +#define ONENAND_SET_NEXT_BUFFERRAM(this) (this->bufferram_index ^= 1) +#define ONENAND_SET_PREV_BUFFERRAM(this) (this->bufferram_index ^= 1) +#define ONENAND_SET_BUFFERRAM0(this) (this->bufferram_index = 0) +#define ONENAND_SET_BUFFERRAM1(this) (this->bufferram_index = 1) + +#define FLEXONENAND(this) \ + (this->device_id & DEVICE_IS_FLEXONENAND) +#define ONENAND_GET_SYS_CFG1(this) \ + (this->read_word(this->base + ONENAND_REG_SYS_CFG1)) +#define ONENAND_SET_SYS_CFG1(v, this) \ + (this->write_word(v, this->base + ONENAND_REG_SYS_CFG1)) + +#define ONENAND_IS_DDP(this) \ + (this->device_id & ONENAND_DEVICE_IS_DDP) + +#define ONENAND_IS_MLC(this) \ + (this->technology & ONENAND_TECHNOLOGY_IS_MLC) + +#ifdef CONFIG_MTD_ONENAND_2X_PROGRAM +#define ONENAND_IS_2PLANE(this) \ + (this->options & ONENAND_HAS_2PLANE) +#else +#define ONENAND_IS_2PLANE(this) (0) +#endif + +#define ONENAND_IS_CACHE_PROGRAM(this) \ + (this->options & ONENAND_HAS_CACHE_PROGRAM) + +#define ONENAND_IS_NOP_1(this) \ + (this->options & ONENAND_HAS_NOP_1) + +/* Check byte access in OneNAND */ +#define ONENAND_CHECK_BYTE_ACCESS(addr) (addr & 0x1) + +/* + * Options bits + */ +#define ONENAND_HAS_CONT_LOCK (0x0001) +#define ONENAND_HAS_UNLOCK_ALL (0x0002) +#define ONENAND_HAS_2PLANE (0x0004) +#define ONENAND_HAS_4KB_PAGE (0x0008) +#define ONENAND_HAS_CACHE_PROGRAM (0x0010) +#define ONENAND_HAS_NOP_1 (0x0020) +#define ONENAND_SKIP_UNLOCK_CHECK (0x0100) +#define ONENAND_PAGEBUF_ALLOC (0x1000) +#define ONENAND_OOBBUF_ALLOC (0x2000) +#define ONENAND_SKIP_INITIAL_UNLOCKING (0x4000) + +#define ONENAND_IS_4KB_PAGE(this) \ + (this->options & ONENAND_HAS_4KB_PAGE) + +/* + * OneNAND Flash Manufacturer ID Codes + */ +#define ONENAND_MFR_SAMSUNG 0xec +#define ONENAND_MFR_NUMONYX 0x20 + +/** + * struct onenand_manufacturers - NAND Flash Manufacturer ID Structure + * @name: Manufacturer name + * @id: manufacturer ID code of device. +*/ +struct onenand_manufacturers { + int id; + char *name; +}; + +int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from, + struct mtd_oob_ops *ops); +unsigned onenand_block(struct onenand_chip *this, loff_t addr); +loff_t onenand_addr(struct onenand_chip *this, int block); +int flexonenand_region(struct mtd_info *mtd, loff_t addr); + +struct mtd_partition; + +struct onenand_platform_data { + void (*mmcontrol)(struct mtd_info *mtd, int sync_read); + int (*read_bufferram)(struct mtd_info *mtd, int area, + unsigned char *buffer, int offset, size_t count); + struct mtd_partition *parts; + unsigned int nr_parts; +}; + +#endif /* __LINUX_MTD_ONENAND_H */ diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h new file mode 100644 index 000000000..d60130f88 --- /dev/null +++ b/include/linux/mtd/onenand_regs.h @@ -0,0 +1,223 @@ +/* + * linux/include/linux/mtd/onenand_regs.h + * + * OneNAND Register header file + * + * Copyright (C) 2005-2007 Samsung Electronics + * Kyungmin Park + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ONENAND_REG_H +#define __ONENAND_REG_H + +/* Memory Address Map Translation (Word order) */ +#define ONENAND_MEMORY_MAP(x) ((x) << 1) + +/* + * External BufferRAM area + */ +#define ONENAND_BOOTRAM ONENAND_MEMORY_MAP(0x0000) +#define ONENAND_DATARAM ONENAND_MEMORY_MAP(0x0200) +#define ONENAND_SPARERAM ONENAND_MEMORY_MAP(0x8010) + +/* + * OneNAND Registers + */ +#define ONENAND_REG_MANUFACTURER_ID ONENAND_MEMORY_MAP(0xF000) +#define ONENAND_REG_DEVICE_ID ONENAND_MEMORY_MAP(0xF001) +#define ONENAND_REG_VERSION_ID ONENAND_MEMORY_MAP(0xF002) +#define ONENAND_REG_DATA_BUFFER_SIZE ONENAND_MEMORY_MAP(0xF003) +#define ONENAND_REG_BOOT_BUFFER_SIZE ONENAND_MEMORY_MAP(0xF004) +#define ONENAND_REG_NUM_BUFFERS ONENAND_MEMORY_MAP(0xF005) +#define ONENAND_REG_TECHNOLOGY ONENAND_MEMORY_MAP(0xF006) + +#define ONENAND_REG_START_ADDRESS1 ONENAND_MEMORY_MAP(0xF100) +#define ONENAND_REG_START_ADDRESS2 ONENAND_MEMORY_MAP(0xF101) +#define ONENAND_REG_START_ADDRESS3 ONENAND_MEMORY_MAP(0xF102) +#define ONENAND_REG_START_ADDRESS4 ONENAND_MEMORY_MAP(0xF103) +#define ONENAND_REG_START_ADDRESS5 ONENAND_MEMORY_MAP(0xF104) +#define ONENAND_REG_START_ADDRESS6 ONENAND_MEMORY_MAP(0xF105) +#define ONENAND_REG_START_ADDRESS7 ONENAND_MEMORY_MAP(0xF106) +#define ONENAND_REG_START_ADDRESS8 ONENAND_MEMORY_MAP(0xF107) + +#define ONENAND_REG_START_BUFFER ONENAND_MEMORY_MAP(0xF200) +#define ONENAND_REG_COMMAND ONENAND_MEMORY_MAP(0xF220) +#define ONENAND_REG_SYS_CFG1 ONENAND_MEMORY_MAP(0xF221) +#define ONENAND_REG_SYS_CFG2 ONENAND_MEMORY_MAP(0xF222) +#define ONENAND_REG_CTRL_STATUS ONENAND_MEMORY_MAP(0xF240) +#define ONENAND_REG_INTERRUPT ONENAND_MEMORY_MAP(0xF241) +#define ONENAND_REG_START_BLOCK_ADDRESS ONENAND_MEMORY_MAP(0xF24C) +#define ONENAND_REG_END_BLOCK_ADDRESS ONENAND_MEMORY_MAP(0xF24D) +#define ONENAND_REG_WP_STATUS ONENAND_MEMORY_MAP(0xF24E) + +#define ONENAND_REG_ECC_STATUS ONENAND_MEMORY_MAP(0xFF00) +#define ONENAND_REG_ECC_M0 ONENAND_MEMORY_MAP(0xFF01) +#define ONENAND_REG_ECC_S0 ONENAND_MEMORY_MAP(0xFF02) +#define ONENAND_REG_ECC_M1 ONENAND_MEMORY_MAP(0xFF03) +#define ONENAND_REG_ECC_S1 ONENAND_MEMORY_MAP(0xFF04) +#define ONENAND_REG_ECC_M2 ONENAND_MEMORY_MAP(0xFF05) +#define ONENAND_REG_ECC_S2 ONENAND_MEMORY_MAP(0xFF06) +#define ONENAND_REG_ECC_M3 ONENAND_MEMORY_MAP(0xFF07) +#define ONENAND_REG_ECC_S3 ONENAND_MEMORY_MAP(0xFF08) + +/* + * Device ID Register F001h (R) + */ +#define DEVICE_IS_FLEXONENAND (1 << 9) +#define FLEXONENAND_PI_MASK (0x3ff) +#define FLEXONENAND_PI_UNLOCK_SHIFT (14) +#define ONENAND_DEVICE_DENSITY_MASK (0xf) +#define ONENAND_DEVICE_DENSITY_SHIFT (4) +#define ONENAND_DEVICE_IS_DDP (1 << 3) +#define ONENAND_DEVICE_IS_DEMUX (1 << 2) +#define ONENAND_DEVICE_VCC_MASK (0x3) + +#define ONENAND_DEVICE_DENSITY_512Mb (0x002) +#define ONENAND_DEVICE_DENSITY_1Gb (0x003) +#define ONENAND_DEVICE_DENSITY_2Gb (0x004) +#define ONENAND_DEVICE_DENSITY_4Gb (0x005) + +/* + * Version ID Register F002h (R) + */ +#define ONENAND_VERSION_PROCESS_SHIFT (8) + +/* + * Technology Register F006h (R) + */ +#define ONENAND_TECHNOLOGY_IS_MLC (1 << 0) + +/* + * Start Address 1 F100h (R/W) & Start Address 2 F101h (R/W) + */ +#define ONENAND_DDP_SHIFT (15) +#define ONENAND_DDP_CHIP0 (0) +#define ONENAND_DDP_CHIP1 (1 << ONENAND_DDP_SHIFT) + +/* + * Start Address 8 F107h (R/W) + */ +/* Note: It's actually 0x3f in case of SLC */ +#define ONENAND_FPA_MASK (0x7f) +#define ONENAND_FPA_SHIFT (2) +#define ONENAND_FSA_MASK (0x03) + +/* + * Start Buffer Register F200h (R/W) + */ +#define ONENAND_BSA_MASK (0x03) +#define ONENAND_BSA_SHIFT (8) +#define ONENAND_BSA_BOOTRAM (0 << 2) +#define ONENAND_BSA_DATARAM0 (2 << 2) +#define ONENAND_BSA_DATARAM1 (3 << 2) +/* Note: It's actually 0x03 in case of SLC */ +#define ONENAND_BSC_MASK (0x07) + +/* + * Command Register F220h (R/W) + */ +#define ONENAND_CMD_READ (0x00) +#define ONENAND_CMD_READOOB (0x13) +#define ONENAND_CMD_PROG (0x80) +#define ONENAND_CMD_PROGOOB (0x1A) +#define ONENAND_CMD_2X_PROG (0x7D) +#define ONENAND_CMD_2X_CACHE_PROG (0x7F) +#define ONENAND_CMD_UNLOCK (0x23) +#define ONENAND_CMD_LOCK (0x2A) +#define ONENAND_CMD_LOCK_TIGHT (0x2C) +#define ONENAND_CMD_UNLOCK_ALL (0x27) +#define ONENAND_CMD_ERASE (0x94) +#define ONENAND_CMD_MULTIBLOCK_ERASE (0x95) +#define ONENAND_CMD_ERASE_VERIFY (0x71) +#define ONENAND_CMD_RESET (0xF0) +#define ONENAND_CMD_OTP_ACCESS (0x65) +#define ONENAND_CMD_READID (0x90) +#define FLEXONENAND_CMD_PI_UPDATE (0x05) +#define FLEXONENAND_CMD_PI_ACCESS (0x66) +#define FLEXONENAND_CMD_RECOVER_LSB (0x05) + +/* NOTE: Those are not *REAL* commands */ +#define ONENAND_CMD_BUFFERRAM (0x1978) +#define FLEXONENAND_CMD_READ_PI (0x1985) + +/* + * System Configuration 1 Register F221h (R, R/W) + */ +#define ONENAND_SYS_CFG1_SYNC_READ (1 << 15) +#define ONENAND_SYS_CFG1_BRL_7 (7 << 12) +#define ONENAND_SYS_CFG1_BRL_6 (6 << 12) +#define ONENAND_SYS_CFG1_BRL_5 (5 << 12) +#define ONENAND_SYS_CFG1_BRL_4 (4 << 12) +#define ONENAND_SYS_CFG1_BRL_3 (3 << 12) +#define ONENAND_SYS_CFG1_BRL_10 (2 << 12) +#define ONENAND_SYS_CFG1_BRL_9 (1 << 12) +#define ONENAND_SYS_CFG1_BRL_8 (0 << 12) +#define ONENAND_SYS_CFG1_BRL_SHIFT (12) +#define ONENAND_SYS_CFG1_BL_32 (4 << 9) +#define ONENAND_SYS_CFG1_BL_16 (3 << 9) +#define ONENAND_SYS_CFG1_BL_8 (2 << 9) +#define ONENAND_SYS_CFG1_BL_4 (1 << 9) +#define ONENAND_SYS_CFG1_BL_CONT (0 << 9) +#define ONENAND_SYS_CFG1_BL_SHIFT (9) +#define ONENAND_SYS_CFG1_NO_ECC (1 << 8) +#define ONENAND_SYS_CFG1_RDY (1 << 7) +#define ONENAND_SYS_CFG1_INT (1 << 6) +#define ONENAND_SYS_CFG1_IOBE (1 << 5) +#define ONENAND_SYS_CFG1_RDY_CONF (1 << 4) +#define ONENAND_SYS_CFG1_VHF (1 << 3) +#define ONENAND_SYS_CFG1_HF (1 << 2) +#define ONENAND_SYS_CFG1_SYNC_WRITE (1 << 1) + +/* + * Controller Status Register F240h (R) + */ +#define ONENAND_CTRL_ONGO (1 << 15) +#define ONENAND_CTRL_LOCK (1 << 14) +#define ONENAND_CTRL_LOAD (1 << 13) +#define ONENAND_CTRL_PROGRAM (1 << 12) +#define ONENAND_CTRL_ERASE (1 << 11) +#define ONENAND_CTRL_ERROR (1 << 10) +#define ONENAND_CTRL_RSTB (1 << 7) +#define ONENAND_CTRL_OTP_L (1 << 6) +#define ONENAND_CTRL_OTP_BL (1 << 5) + +/* + * Interrupt Status Register F241h (R) + */ +#define ONENAND_INT_MASTER (1 << 15) +#define ONENAND_INT_READ (1 << 7) +#define ONENAND_INT_WRITE (1 << 6) +#define ONENAND_INT_ERASE (1 << 5) +#define ONENAND_INT_RESET (1 << 4) +#define ONENAND_INT_CLEAR (0 << 0) + +/* + * NAND Flash Write Protection Status Register F24Eh (R) + */ +#define ONENAND_WP_US (1 << 2) +#define ONENAND_WP_LS (1 << 1) +#define ONENAND_WP_LTS (1 << 0) + +/* + * ECC Status Reigser FF00h (R) + */ +#define ONENAND_ECC_1BIT (1 << 0) +#define ONENAND_ECC_1BIT_ALL (0x5555) +#define ONENAND_ECC_2BIT (1 << 1) +#define ONENAND_ECC_2BIT_ALL (0xAAAA) +#define FLEXONENAND_UNCORRECTABLE_ERROR (0x1010) +#define ONENAND_ECC_3BIT (1 << 2) +#define ONENAND_ECC_4BIT (1 << 3) +#define ONENAND_ECC_4BIT_UNCORRECTABLE (0x1010) + +/* + * One-Time Programmable (OTP) + */ +#define FLEXONENAND_OTP_LOCK_OFFSET (2048) +#define ONENAND_OTP_LOCK_OFFSET (14) + +#endif /* __ONENAND_REG_H */ diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h new file mode 100644 index 000000000..11cb0c50c --- /dev/null +++ b/include/linux/mtd/partitions.h @@ -0,0 +1,114 @@ +/* + * MTD partitioning layer definitions + * + * (C) 2000 Nicolas Pitre + * + * This code is GPL + */ + +#ifndef MTD_PARTITIONS_H +#define MTD_PARTITIONS_H + +#include + + +/* + * Partition definition structure: + * + * An array of struct partition is passed along with a MTD object to + * mtd_device_register() to create them. + * + * For each partition, these fields are available: + * name: string that will be used to label the partition's MTD device. + * types: some partitions can be containers using specific format to describe + * embedded subpartitions / volumes. E.g. many home routers use "firmware" + * partition that contains at least kernel and rootfs. In such case an + * extra parser is needed that will detect these dynamic partitions and + * report them to the MTD subsystem. If set this property stores an array + * of parser names to use when looking for subpartitions. + * size: the partition size; if defined as MTDPART_SIZ_FULL, the partition + * will extend to the end of the master MTD device. + * offset: absolute starting position within the master MTD device; if + * defined as MTDPART_OFS_APPEND, the partition will start where the + * previous one ended; if MTDPART_OFS_NXTBLK, at the next erase block; + * if MTDPART_OFS_RETAIN, consume as much as possible, leaving size + * after the end of partition. + * mask_flags: contains flags that have to be masked (removed) from the + * master MTD flag set for the corresponding MTD partition. + * For example, to force a read-only partition, simply adding + * MTD_WRITEABLE to the mask_flags will do the trick. + * + * Note: writeable partitions require their size and offset be + * erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK). + */ + +struct mtd_partition { + const char *name; /* identifier string */ + const char *const *types; /* names of parsers to use if any */ + uint64_t size; /* partition size */ + uint64_t offset; /* offset within the master MTD space */ + uint32_t mask_flags; /* master MTD flags to mask out for this partition */ + struct device_node *of_node; +}; + +#define MTDPART_OFS_RETAIN (-3) +#define MTDPART_OFS_NXTBLK (-2) +#define MTDPART_OFS_APPEND (-1) +#define MTDPART_SIZ_FULL (0) + + +struct mtd_info; +struct device_node; + +/** + * struct mtd_part_parser_data - used to pass data to MTD partition parsers. + * @origin: for RedBoot, start address of MTD device + */ +struct mtd_part_parser_data { + unsigned long origin; +}; + + +/* + * Functions dealing with the various ways of partitioning the space + */ + +struct mtd_part_parser { + struct list_head list; + struct module *owner; + const char *name; + const struct of_device_id *of_match_table; + int (*parse_fn)(struct mtd_info *, const struct mtd_partition **, + struct mtd_part_parser_data *); + void (*cleanup)(const struct mtd_partition *pparts, int nr_parts); +}; + +/* Container for passing around a set of parsed partitions */ +struct mtd_partitions { + const struct mtd_partition *parts; + int nr_parts; + const struct mtd_part_parser *parser; +}; + +extern int __register_mtd_parser(struct mtd_part_parser *parser, + struct module *owner); +#define register_mtd_parser(parser) __register_mtd_parser(parser, THIS_MODULE) + +extern void deregister_mtd_parser(struct mtd_part_parser *parser); + +/* + * module_mtd_part_parser() - Helper macro for MTD partition parsers that don't + * do anything special in module init/exit. Each driver may only use this macro + * once, and calling it replaces module_init() and module_exit(). + */ +#define module_mtd_part_parser(__mtd_part_parser) \ + module_driver(__mtd_part_parser, register_mtd_parser, \ + deregister_mtd_parser) + +int mtd_is_partition(const struct mtd_info *mtd); +int mtd_add_partition(struct mtd_info *master, const char *name, + long long offset, long long length); +int mtd_del_partition(struct mtd_info *master, int partno); +uint64_t mtd_get_device_size(const struct mtd_info *mtd); + +#endif diff --git a/include/linux/mtd/pfow.h b/include/linux/mtd/pfow.h new file mode 100644 index 000000000..c65d7a3be --- /dev/null +++ b/include/linux/mtd/pfow.h @@ -0,0 +1,157 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Primary function overlay window definitions + * and service functions used by LPDDR chips + */ +#ifndef __LINUX_MTD_PFOW_H +#define __LINUX_MTD_PFOW_H + +#include + +/* PFOW registers addressing */ +/* Address of symbol "P" */ +#define PFOW_QUERY_STRING_P 0x0000 +/* Address of symbol "F" */ +#define PFOW_QUERY_STRING_F 0x0002 +/* Address of symbol "O" */ +#define PFOW_QUERY_STRING_O 0x0004 +/* Address of symbol "W" */ +#define PFOW_QUERY_STRING_W 0x0006 +/* Identification info for LPDDR chip */ +#define PFOW_MANUFACTURER_ID 0x0020 +#define PFOW_DEVICE_ID 0x0022 +/* Address in PFOW where prog buffer can can be found */ +#define PFOW_PROGRAM_BUFFER_OFFSET 0x0040 +/* Size of program buffer in words */ +#define PFOW_PROGRAM_BUFFER_SIZE 0x0042 +/* Address command code register */ +#define PFOW_COMMAND_CODE 0x0080 +/* command data register */ +#define PFOW_COMMAND_DATA 0x0084 +/* command address register lower address bits */ +#define PFOW_COMMAND_ADDRESS_L 0x0088 +/* command address register upper address bits */ +#define PFOW_COMMAND_ADDRESS_H 0x008a +/* number of bytes to be proggrammed lower address bits */ +#define PFOW_DATA_COUNT_L 0x0090 +/* number of bytes to be proggrammed higher address bits */ +#define PFOW_DATA_COUNT_H 0x0092 +/* command execution register, the only possible value is 0x01 */ +#define PFOW_COMMAND_EXECUTE 0x00c0 +/* 0x01 should be written at this address to clear buffer */ +#define PFOW_CLEAR_PROGRAM_BUFFER 0x00c4 +/* device program/erase suspend register */ +#define PFOW_PROGRAM_ERASE_SUSPEND 0x00c8 +/* device status register */ +#define PFOW_DSR 0x00cc + +/* LPDDR memory device command codes */ +/* They are possible values of PFOW command code register */ +#define LPDDR_WORD_PROGRAM 0x0041 +#define LPDDR_BUFF_PROGRAM 0x00E9 +#define LPDDR_BLOCK_ERASE 0x0020 +#define LPDDR_LOCK_BLOCK 0x0061 +#define LPDDR_UNLOCK_BLOCK 0x0062 +#define LPDDR_READ_BLOCK_LOCK_STATUS 0x0065 +#define LPDDR_INFO_QUERY 0x0098 +#define LPDDR_READ_OTP 0x0097 +#define LPDDR_PROG_OTP 0x00C0 +#define LPDDR_RESUME 0x00D0 + +/* Defines possible value of PFOW command execution register */ +#define LPDDR_START_EXECUTION 0x0001 + +/* Defines possible value of PFOW program/erase suspend register */ +#define LPDDR_SUSPEND 0x0001 + +/* Possible values of PFOW device status register */ +/* access R - read; RC read & clearable */ +#define DSR_DPS (1<<1) /* RC; device protect status + * 0 - not protected 1 - locked */ +#define DSR_PSS (1<<2) /* R; program suspend status; + * 0-prog in progress/completed, + * 1- prog suspended */ +#define DSR_VPPS (1<<3) /* RC; 0-Vpp OK, * 1-Vpp low */ +#define DSR_PROGRAM_STATUS (1<<4) /* RC; 0-successful, 1-error */ +#define DSR_ERASE_STATUS (1<<5) /* RC; erase or blank check status; + * 0-success erase/blank check, + * 1 blank check error */ +#define DSR_ESS (1<<6) /* R; erase suspend status; + * 0-erase in progress/complete, + * 1 erase suspended */ +#define DSR_READY_STATUS (1<<7) /* R; Device status + * 0-busy, + * 1-ready */ +#define DSR_RPS (0x3<<8) /* RC; region program status + * 00 - Success, + * 01-re-program attempt in region with + * object mode data, + * 10-object mode program w attempt in + * region with control mode data + * 11-attempt to program invalid half + * with 0x41 command */ +#define DSR_AOS (1<<12) /* RC; 1- AO related failure */ +#define DSR_AVAILABLE (1<<15) /* R; Device availbility + * 1 - Device available + * 0 - not available */ + +/* The superset of all possible error bits in DSR */ +#define DSR_ERR 0x133A + +static inline void send_pfow_command(struct map_info *map, + unsigned long cmd_code, unsigned long adr, + unsigned long len, map_word *datum) +{ + int bits_per_chip = map_bankwidth(map) * 8; + + map_write(map, CMD(cmd_code), map->pfow_base + PFOW_COMMAND_CODE); + map_write(map, CMD(adr & ((1<pfow_base + PFOW_COMMAND_ADDRESS_L); + map_write(map, CMD(adr>>bits_per_chip), + map->pfow_base + PFOW_COMMAND_ADDRESS_H); + if (len) { + map_write(map, CMD(len & ((1<pfow_base + PFOW_DATA_COUNT_L); + map_write(map, CMD(len>>bits_per_chip), + map->pfow_base + PFOW_DATA_COUNT_H); + } + if (datum) + map_write(map, *datum, map->pfow_base + PFOW_COMMAND_DATA); + + /* Command execution start */ + map_write(map, CMD(LPDDR_START_EXECUTION), + map->pfow_base + PFOW_COMMAND_EXECUTE); +} + +static inline void print_drs_error(unsigned dsr) +{ + int prog_status = (dsr & DSR_RPS) >> 8; + + if (!(dsr & DSR_AVAILABLE)) + printk(KERN_NOTICE"DSR.15: (0) Device not Available\n"); + if ((prog_status & 0x03) == 0x03) + printk(KERN_NOTICE"DSR.9,8: (11) Attempt to program invalid " + "half with 41h command\n"); + else if (prog_status & 0x02) + printk(KERN_NOTICE"DSR.9,8: (10) Object Mode Program attempt " + "in region with Control Mode data\n"); + else if (prog_status & 0x01) + printk(KERN_NOTICE"DSR.9,8: (01) Program attempt in region " + "with Object Mode data\n"); + if (!(dsr & DSR_READY_STATUS)) + printk(KERN_NOTICE"DSR.7: (0) Device is Busy\n"); + if (dsr & DSR_ESS) + printk(KERN_NOTICE"DSR.6: (1) Erase Suspended\n"); + if (dsr & DSR_ERASE_STATUS) + printk(KERN_NOTICE"DSR.5: (1) Erase/Blank check error\n"); + if (dsr & DSR_PROGRAM_STATUS) + printk(KERN_NOTICE"DSR.4: (1) Program Error\n"); + if (dsr & DSR_VPPS) + printk(KERN_NOTICE"DSR.3: (1) Vpp low detect, operation " + "aborted\n"); + if (dsr & DSR_PSS) + printk(KERN_NOTICE"DSR.2: (1) Program suspended\n"); + if (dsr & DSR_DPS) + printk(KERN_NOTICE"DSR.1: (1) Aborted Erase/Program attempt " + "on locked block\n"); +} +#endif /* __LINUX_MTD_PFOW_H */ diff --git a/include/linux/mtd/physmap.h b/include/linux/mtd/physmap.h new file mode 100644 index 000000000..aa6a2633c --- /dev/null +++ b/include/linux/mtd/physmap.h @@ -0,0 +1,36 @@ +/* + * For boards with physically mapped flash and using + * drivers/mtd/maps/physmap.c mapping driver. + * + * Copyright (C) 2003 MontaVista Software Inc. + * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MTD_PHYSMAP__ +#define __LINUX_MTD_PHYSMAP__ + +#include +#include + +struct map_info; +struct platform_device; + +struct physmap_flash_data { + unsigned int width; + int (*init)(struct platform_device *); + void (*exit)(struct platform_device *); + void (*set_vpp)(struct platform_device *, int); + unsigned int nr_parts; + unsigned int pfow_base; + char *probe_type; + struct mtd_partition *parts; + const char * const *part_probe_types; +}; + +#endif /* __LINUX_MTD_PHYSMAP__ */ diff --git a/include/linux/mtd/pismo.h b/include/linux/mtd/pismo.h new file mode 100644 index 000000000..8dfb7e142 --- /dev/null +++ b/include/linux/mtd/pismo.h @@ -0,0 +1,17 @@ +/* + * PISMO memory driver - http://www.pismoworld.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + */ +#ifndef __LINUX_MTD_PISMO_H +#define __LINUX_MTD_PISMO_H + +struct pismo_pdata { + void (*set_vpp)(void *, int); + void *vpp_data; + phys_addr_t cs_addrs[5]; +}; + +#endif diff --git a/include/linux/mtd/plat-ram.h b/include/linux/mtd/plat-ram.h new file mode 100644 index 000000000..44212d65a --- /dev/null +++ b/include/linux/mtd/plat-ram.h @@ -0,0 +1,34 @@ +/* linux/include/linux/mtd/plat-ram.h + * + * (c) 2004 Simtec Electronics + * http://www.simtec.co.uk/products/SWLINUX/ + * Ben Dooks + * + * Generic platform device based RAM map + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __LINUX_MTD_PLATRAM_H +#define __LINUX_MTD_PLATRAM_H __FILE__ + +#define PLATRAM_RO (0) +#define PLATRAM_RW (1) + +struct platdata_mtd_ram { + const char *mapname; + const char * const *map_probes; + const char * const *probes; + struct mtd_partition *partitions; + int nr_partitions; + int bankwidth; + + /* control callbacks */ + + void (*set_rw)(struct device *dev, int to); +}; + +#endif /* __LINUX_MTD_PLATRAM_H */ diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h new file mode 100644 index 000000000..df5b9fdde --- /dev/null +++ b/include/linux/mtd/qinfo.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_MTD_QINFO_H +#define __LINUX_MTD_QINFO_H + +#include +#include +#include +#include +#include +#include +#include + +/* lpddr_private describes lpddr flash chip in memory map + * @ManufactId - Chip Manufacture ID + * @DevId - Chip Device ID + * @qinfo - pointer to qinfo records describing the chip + * @numchips - number of chips including virual RWW partitions + * @chipshift - Chip/partition size 2^chipshift + * @chips - per-chip data structure + */ +struct lpddr_private { + uint16_t ManufactId; + uint16_t DevId; + struct qinfo_chip *qinfo; + int numchips; + unsigned long chipshift; + struct flchip chips[0]; +}; + +/* qinfo_query_info structure contains request information for + * each qinfo record + * @major - major number of qinfo record + * @major - minor number of qinfo record + * @id_str - descriptive string to access the record + * @desc - detailed description for the qinfo record + */ +struct qinfo_query_info { + uint8_t major; + uint8_t minor; + char *id_str; + char *desc; +}; + +/* + * qinfo_chip structure contains necessary qinfo records data + * @DevSizeShift - Device size 2^n bytes + * @BufSizeShift - Program buffer size 2^n bytes + * @TotalBlocksNum - Total number of blocks + * @UniformBlockSizeShift - Uniform block size 2^UniformBlockSizeShift bytes + * @HWPartsNum - Number of hardware partitions + * @SuspEraseSupp - Suspend erase supported + * @SingleWordProgTime - Single word program 2^SingleWordProgTime u-sec + * @ProgBufferTime - Program buffer write 2^ProgBufferTime u-sec + * @BlockEraseTime - Block erase 2^BlockEraseTime m-sec + */ +struct qinfo_chip { + /* General device info */ + uint16_t DevSizeShift; + uint16_t BufSizeShift; + /* Erase block information */ + uint16_t TotalBlocksNum; + uint16_t UniformBlockSizeShift; + /* Partition information */ + uint16_t HWPartsNum; + /* Optional features */ + uint16_t SuspEraseSupp; + /* Operation typical time */ + uint16_t SingleWordProgTime; + uint16_t ProgBufferTime; + uint16_t BlockEraseTime; +}; + +/* defines for fixup usage */ +#define LPDDR_MFR_ANY 0xffff +#define LPDDR_ID_ANY 0xffff +#define NUMONYX_MFGR_ID 0x0089 +#define R18_DEVICE_ID_1G 0x893c + +static inline map_word lpddr_build_cmd(u_long cmd, struct map_info *map) +{ + map_word val = { {0} }; + val.x[0] = cmd; + return val; +} + +#define CMD(x) lpddr_build_cmd(x, map) +#define CMDVAL(cmd) cmd.x[0] + +struct mtd_info *lpddr_cmdset(struct map_info *); + +#endif + diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h new file mode 100644 index 000000000..a4be6b2bc --- /dev/null +++ b/include/linux/mtd/rawnand.h @@ -0,0 +1,1755 @@ +/* + * Copyright © 2000-2010 David Woodhouse + * Steven J. Hill + * Thomas Gleixner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Info: + * Contains standard defines and IDs for NAND flash devices + * + * Changelog: + * See git changelog. + */ +#ifndef __LINUX_MTD_RAWNAND_H +#define __LINUX_MTD_RAWNAND_H + +#include +#include +#include +#include +#include +#include +#include + +struct nand_chip; +struct nand_flash_dev; + +/* Scan and identify a NAND device */ +int nand_scan_with_ids(struct nand_chip *chip, int max_chips, + struct nand_flash_dev *ids); + +static inline int nand_scan(struct nand_chip *chip, int max_chips) +{ + return nand_scan_with_ids(chip, max_chips, NULL); +} + +/* Internal helper for board drivers which need to override command function */ +void nand_wait_ready(struct mtd_info *mtd); + +/* The maximum number of NAND chips in an array */ +#define NAND_MAX_CHIPS 8 + +/* + * Constants for hardware specific CLE/ALE/NCE function + * + * These are bits which can be or'ed to set/clear multiple + * bits in one go. + */ +/* Select the chip by setting nCE to low */ +#define NAND_NCE 0x01 +/* Select the command latch by setting CLE to high */ +#define NAND_CLE 0x02 +/* Select the address latch by setting ALE to high */ +#define NAND_ALE 0x04 + +#define NAND_CTRL_CLE (NAND_NCE | NAND_CLE) +#define NAND_CTRL_ALE (NAND_NCE | NAND_ALE) +#define NAND_CTRL_CHANGE 0x80 + +/* + * Standard NAND flash commands + */ +#define NAND_CMD_READ0 0 +#define NAND_CMD_READ1 1 +#define NAND_CMD_RNDOUT 5 +#define NAND_CMD_PAGEPROG 0x10 +#define NAND_CMD_READOOB 0x50 +#define NAND_CMD_ERASE1 0x60 +#define NAND_CMD_STATUS 0x70 +#define NAND_CMD_SEQIN 0x80 +#define NAND_CMD_RNDIN 0x85 +#define NAND_CMD_READID 0x90 +#define NAND_CMD_ERASE2 0xd0 +#define NAND_CMD_PARAM 0xec +#define NAND_CMD_GET_FEATURES 0xee +#define NAND_CMD_SET_FEATURES 0xef +#define NAND_CMD_RESET 0xff + +/* Extended commands for large page devices */ +#define NAND_CMD_READSTART 0x30 +#define NAND_CMD_RNDOUTSTART 0xE0 +#define NAND_CMD_CACHEDPROG 0x15 + +#define NAND_CMD_NONE -1 + +/* Status bits */ +#define NAND_STATUS_FAIL 0x01 +#define NAND_STATUS_FAIL_N1 0x02 +#define NAND_STATUS_TRUE_READY 0x20 +#define NAND_STATUS_READY 0x40 +#define NAND_STATUS_WP 0x80 + +#define NAND_DATA_IFACE_CHECK_ONLY -1 + +/* + * Constants for ECC_MODES + */ +typedef enum { + NAND_ECC_NONE, + NAND_ECC_SOFT, + NAND_ECC_HW, + NAND_ECC_HW_SYNDROME, + NAND_ECC_HW_OOB_FIRST, + NAND_ECC_ON_DIE, +} nand_ecc_modes_t; + +enum nand_ecc_algo { + NAND_ECC_UNKNOWN, + NAND_ECC_HAMMING, + NAND_ECC_BCH, + NAND_ECC_RS, +}; + +/* + * Constants for Hardware ECC + */ +/* Reset Hardware ECC for read */ +#define NAND_ECC_READ 0 +/* Reset Hardware ECC for write */ +#define NAND_ECC_WRITE 1 +/* Enable Hardware ECC before syndrome is read back from flash */ +#define NAND_ECC_READSYN 2 + +/* + * Enable generic NAND 'page erased' check. This check is only done when + * ecc.correct() returns -EBADMSG. + * Set this flag if your implementation does not fix bitflips in erased + * pages and you want to rely on the default implementation. + */ +#define NAND_ECC_GENERIC_ERASED_CHECK BIT(0) +#define NAND_ECC_MAXIMIZE BIT(1) + +/* Bit mask for flags passed to do_nand_read_ecc */ +#define NAND_GET_DEVICE 0x80 + + +/* + * Option constants for bizarre disfunctionality and real + * features. + */ +/* Buswidth is 16 bit */ +#define NAND_BUSWIDTH_16 0x00000002 +/* Chip has cache program function */ +#define NAND_CACHEPRG 0x00000008 +/* + * Chip requires ready check on read (for auto-incremented sequential read). + * True only for small page devices; large page devices do not support + * autoincrement. + */ +#define NAND_NEED_READRDY 0x00000100 + +/* Chip does not allow subpage writes */ +#define NAND_NO_SUBPAGE_WRITE 0x00000200 + +/* Device is one of 'new' xD cards that expose fake nand command set */ +#define NAND_BROKEN_XD 0x00000400 + +/* Device behaves just like nand, but is readonly */ +#define NAND_ROM 0x00000800 + +/* Device supports subpage reads */ +#define NAND_SUBPAGE_READ 0x00001000 + +/* + * Some MLC NANDs need data scrambling to limit bitflips caused by repeated + * patterns. + */ +#define NAND_NEED_SCRAMBLING 0x00002000 + +/* Device needs 3rd row address cycle */ +#define NAND_ROW_ADDR_3 0x00004000 + +/* Options valid for Samsung large page devices */ +#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG + +/* Macros to identify the above */ +#define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG)) +#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ)) +#define NAND_HAS_SUBPAGE_WRITE(chip) !((chip)->options & NAND_NO_SUBPAGE_WRITE) + +/* Non chip related options */ +/* This option skips the bbt scan during initialization. */ +#define NAND_SKIP_BBTSCAN 0x00010000 +/* Chip may not exist, so silence any errors in scan */ +#define NAND_SCAN_SILENT_NODEV 0x00040000 +/* + * Autodetect nand buswidth with readid/onfi. + * This suppose the driver will configure the hardware in 8 bits mode + * when calling nand_scan_ident, and update its configuration + * before calling nand_scan_tail. + */ +#define NAND_BUSWIDTH_AUTO 0x00080000 +/* + * This option could be defined by controller drivers to protect against + * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers + */ +#define NAND_USE_BOUNCE_BUFFER 0x00100000 + +/* + * In case your controller is implementing ->cmd_ctrl() and is relying on the + * default ->cmdfunc() implementation, you may want to let the core handle the + * tCCS delay which is required when a column change (RNDIN or RNDOUT) is + * requested. + * If your controller already takes care of this delay, you don't need to set + * this flag. + */ +#define NAND_WAIT_TCCS 0x00200000 + +/* + * Whether the NAND chip is a boot medium. Drivers might use this information + * to select ECC algorithms supported by the boot ROM or similar restrictions. + */ +#define NAND_IS_BOOT_MEDIUM 0x00400000 + +/* Options set by nand scan */ +/* Nand scan has allocated controller struct */ +#define NAND_CONTROLLER_ALLOC 0x80000000 + +/* Cell info constants */ +#define NAND_CI_CHIPNR_MSK 0x03 +#define NAND_CI_CELLTYPE_MSK 0x0C +#define NAND_CI_CELLTYPE_SHIFT 2 + +/* Keep gcc happy */ +struct nand_chip; + +/* ONFI version bits */ +#define ONFI_VERSION_1_0 BIT(1) +#define ONFI_VERSION_2_0 BIT(2) +#define ONFI_VERSION_2_1 BIT(3) +#define ONFI_VERSION_2_2 BIT(4) +#define ONFI_VERSION_2_3 BIT(5) +#define ONFI_VERSION_3_0 BIT(6) +#define ONFI_VERSION_3_1 BIT(7) +#define ONFI_VERSION_3_2 BIT(8) +#define ONFI_VERSION_4_0 BIT(9) + +/* ONFI features */ +#define ONFI_FEATURE_16_BIT_BUS (1 << 0) +#define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7) + +/* ONFI timing mode, used in both asynchronous and synchronous mode */ +#define ONFI_TIMING_MODE_0 (1 << 0) +#define ONFI_TIMING_MODE_1 (1 << 1) +#define ONFI_TIMING_MODE_2 (1 << 2) +#define ONFI_TIMING_MODE_3 (1 << 3) +#define ONFI_TIMING_MODE_4 (1 << 4) +#define ONFI_TIMING_MODE_5 (1 << 5) +#define ONFI_TIMING_MODE_UNKNOWN (1 << 6) + +/* ONFI feature number/address */ +#define ONFI_FEATURE_NUMBER 256 +#define ONFI_FEATURE_ADDR_TIMING_MODE 0x1 + +/* Vendor-specific feature address (Micron) */ +#define ONFI_FEATURE_ADDR_READ_RETRY 0x89 +#define ONFI_FEATURE_ON_DIE_ECC 0x90 +#define ONFI_FEATURE_ON_DIE_ECC_EN BIT(3) + +/* ONFI subfeature parameters length */ +#define ONFI_SUBFEATURE_PARAM_LEN 4 + +/* ONFI optional commands SET/GET FEATURES supported? */ +#define ONFI_OPT_CMD_SET_GET_FEATURES (1 << 2) + +struct nand_onfi_params { + /* rev info and features block */ + /* 'O' 'N' 'F' 'I' */ + u8 sig[4]; + __le16 revision; + __le16 features; + __le16 opt_cmd; + u8 reserved0[2]; + __le16 ext_param_page_length; /* since ONFI 2.1 */ + u8 num_of_param_pages; /* since ONFI 2.1 */ + u8 reserved1[17]; + + /* manufacturer information block */ + char manufacturer[12]; + char model[20]; + u8 jedec_id; + __le16 date_code; + u8 reserved2[13]; + + /* memory organization block */ + __le32 byte_per_page; + __le16 spare_bytes_per_page; + __le32 data_bytes_per_ppage; + __le16 spare_bytes_per_ppage; + __le32 pages_per_block; + __le32 blocks_per_lun; + u8 lun_count; + u8 addr_cycles; + u8 bits_per_cell; + __le16 bb_per_lun; + __le16 block_endurance; + u8 guaranteed_good_blocks; + __le16 guaranteed_block_endurance; + u8 programs_per_page; + u8 ppage_attr; + u8 ecc_bits; + u8 interleaved_bits; + u8 interleaved_ops; + u8 reserved3[13]; + + /* electrical parameter block */ + u8 io_pin_capacitance_max; + __le16 async_timing_mode; + __le16 program_cache_timing_mode; + __le16 t_prog; + __le16 t_bers; + __le16 t_r; + __le16 t_ccs; + __le16 src_sync_timing_mode; + u8 src_ssync_features; + __le16 clk_pin_capacitance_typ; + __le16 io_pin_capacitance_typ; + __le16 input_pin_capacitance_typ; + u8 input_pin_capacitance_max; + u8 driver_strength_support; + __le16 t_int_r; + __le16 t_adl; + u8 reserved4[8]; + + /* vendor */ + __le16 vendor_revision; + u8 vendor[88]; + + __le16 crc; +} __packed; + +#define ONFI_CRC_BASE 0x4F4E + +/* Extended ECC information Block Definition (since ONFI 2.1) */ +struct onfi_ext_ecc_info { + u8 ecc_bits; + u8 codeword_size; + __le16 bb_per_lun; + __le16 block_endurance; + u8 reserved[2]; +} __packed; + +#define ONFI_SECTION_TYPE_0 0 /* Unused section. */ +#define ONFI_SECTION_TYPE_1 1 /* for additional sections. */ +#define ONFI_SECTION_TYPE_2 2 /* for ECC information. */ +struct onfi_ext_section { + u8 type; + u8 length; +} __packed; + +#define ONFI_EXT_SECTION_MAX 8 + +/* Extended Parameter Page Definition (since ONFI 2.1) */ +struct onfi_ext_param_page { + __le16 crc; + u8 sig[4]; /* 'E' 'P' 'P' 'S' */ + u8 reserved0[10]; + struct onfi_ext_section sections[ONFI_EXT_SECTION_MAX]; + + /* + * The actual size of the Extended Parameter Page is in + * @ext_param_page_length of nand_onfi_params{}. + * The following are the variable length sections. + * So we do not add any fields below. Please see the ONFI spec. + */ +} __packed; + +struct jedec_ecc_info { + u8 ecc_bits; + u8 codeword_size; + __le16 bb_per_lun; + __le16 block_endurance; + u8 reserved[2]; +} __packed; + +/* JEDEC features */ +#define JEDEC_FEATURE_16_BIT_BUS (1 << 0) + +struct nand_jedec_params { + /* rev info and features block */ + /* 'J' 'E' 'S' 'D' */ + u8 sig[4]; + __le16 revision; + __le16 features; + u8 opt_cmd[3]; + __le16 sec_cmd; + u8 num_of_param_pages; + u8 reserved0[18]; + + /* manufacturer information block */ + char manufacturer[12]; + char model[20]; + u8 jedec_id[6]; + u8 reserved1[10]; + + /* memory organization block */ + __le32 byte_per_page; + __le16 spare_bytes_per_page; + u8 reserved2[6]; + __le32 pages_per_block; + __le32 blocks_per_lun; + u8 lun_count; + u8 addr_cycles; + u8 bits_per_cell; + u8 programs_per_page; + u8 multi_plane_addr; + u8 multi_plane_op_attr; + u8 reserved3[38]; + + /* electrical parameter block */ + __le16 async_sdr_speed_grade; + __le16 toggle_ddr_speed_grade; + __le16 sync_ddr_speed_grade; + u8 async_sdr_features; + u8 toggle_ddr_features; + u8 sync_ddr_features; + __le16 t_prog; + __le16 t_bers; + __le16 t_r; + __le16 t_r_multi_plane; + __le16 t_ccs; + __le16 io_pin_capacitance_typ; + __le16 input_pin_capacitance_typ; + __le16 clk_pin_capacitance_typ; + u8 driver_strength_support; + __le16 t_adl; + u8 reserved4[36]; + + /* ECC and endurance block */ + u8 guaranteed_good_blocks; + __le16 guaranteed_block_endurance; + struct jedec_ecc_info ecc_info[4]; + u8 reserved5[29]; + + /* reserved */ + u8 reserved6[148]; + + /* vendor */ + __le16 vendor_rev_num; + u8 reserved7[88]; + + /* CRC for Parameter Page */ + __le16 crc; +} __packed; + +/** + * struct onfi_params - ONFI specific parameters that will be reused + * @version: ONFI version (BCD encoded), 0 if ONFI is not supported + * @tPROG: Page program time + * @tBERS: Block erase time + * @tR: Page read time + * @tCCS: Change column setup time + * @async_timing_mode: Supported asynchronous timing mode + * @vendor_revision: Vendor specific revision number + * @vendor: Vendor specific data + */ +struct onfi_params { + int version; + u16 tPROG; + u16 tBERS; + u16 tR; + u16 tCCS; + u16 async_timing_mode; + u16 vendor_revision; + u8 vendor[88]; +}; + +/** + * struct nand_parameters - NAND generic parameters from the parameter page + * @model: Model name + * @supports_set_get_features: The NAND chip supports setting/getting features + * @set_feature_list: Bitmap of features that can be set + * @get_feature_list: Bitmap of features that can be get + * @onfi: ONFI specific parameters + */ +struct nand_parameters { + /* Generic parameters */ + const char *model; + bool supports_set_get_features; + DECLARE_BITMAP(set_feature_list, ONFI_FEATURE_NUMBER); + DECLARE_BITMAP(get_feature_list, ONFI_FEATURE_NUMBER); + + /* ONFI parameters */ + struct onfi_params *onfi; +}; + +/* The maximum expected count of bytes in the NAND ID sequence */ +#define NAND_MAX_ID_LEN 8 + +/** + * struct nand_id - NAND id structure + * @data: buffer containing the id bytes. + * @len: ID length. + */ +struct nand_id { + u8 data[NAND_MAX_ID_LEN]; + int len; +}; + +/** + * struct nand_controller_ops - Controller operations + * + * @attach_chip: this method is called after the NAND detection phase after + * flash ID and MTD fields such as erase size, page size and OOB + * size have been set up. ECC requirements are available if + * provided by the NAND chip or device tree. Typically used to + * choose the appropriate ECC configuration and allocate + * associated resources. + * This hook is optional. + * @detach_chip: free all resources allocated/claimed in + * nand_controller_ops->attach_chip(). + * This hook is optional. + */ +struct nand_controller_ops { + int (*attach_chip)(struct nand_chip *chip); + void (*detach_chip)(struct nand_chip *chip); +}; + +/** + * struct nand_controller - Structure used to describe a NAND controller + * + * @lock: protection lock + * @active: the mtd device which holds the controller currently + * @wq: wait queue to sleep on if a NAND operation is in + * progress used instead of the per chip wait queue + * when a hw controller is available. + * @ops: NAND controller operations. + */ +struct nand_controller { + spinlock_t lock; + struct nand_chip *active; + wait_queue_head_t wq; + const struct nand_controller_ops *ops; +}; + +static inline void nand_controller_init(struct nand_controller *nfc) +{ + nfc->active = NULL; + spin_lock_init(&nfc->lock); + init_waitqueue_head(&nfc->wq); +} + +/** + * struct nand_ecc_step_info - ECC step information of ECC engine + * @stepsize: data bytes per ECC step + * @strengths: array of supported strengths + * @nstrengths: number of supported strengths + */ +struct nand_ecc_step_info { + int stepsize; + const int *strengths; + int nstrengths; +}; + +/** + * struct nand_ecc_caps - capability of ECC engine + * @stepinfos: array of ECC step information + * @nstepinfos: number of ECC step information + * @calc_ecc_bytes: driver's hook to calculate ECC bytes per step + */ +struct nand_ecc_caps { + const struct nand_ecc_step_info *stepinfos; + int nstepinfos; + int (*calc_ecc_bytes)(int step_size, int strength); +}; + +/* a shorthand to generate struct nand_ecc_caps with only one ECC stepsize */ +#define NAND_ECC_CAPS_SINGLE(__name, __calc, __step, ...) \ +static const int __name##_strengths[] = { __VA_ARGS__ }; \ +static const struct nand_ecc_step_info __name##_stepinfo = { \ + .stepsize = __step, \ + .strengths = __name##_strengths, \ + .nstrengths = ARRAY_SIZE(__name##_strengths), \ +}; \ +static const struct nand_ecc_caps __name = { \ + .stepinfos = &__name##_stepinfo, \ + .nstepinfos = 1, \ + .calc_ecc_bytes = __calc, \ +} + +/** + * struct nand_ecc_ctrl - Control structure for ECC + * @mode: ECC mode + * @algo: ECC algorithm + * @steps: number of ECC steps per page + * @size: data bytes per ECC step + * @bytes: ECC bytes per step + * @strength: max number of correctible bits per ECC step + * @total: total number of ECC bytes per page + * @prepad: padding information for syndrome based ECC generators + * @postpad: padding information for syndrome based ECC generators + * @options: ECC specific options (see NAND_ECC_XXX flags defined above) + * @priv: pointer to private ECC control data + * @calc_buf: buffer for calculated ECC, size is oobsize. + * @code_buf: buffer for ECC read from flash, size is oobsize. + * @hwctl: function to control hardware ECC generator. Must only + * be provided if an hardware ECC is available + * @calculate: function for ECC calculation or readback from ECC hardware + * @correct: function for ECC correction, matching to ECC generator (sw/hw). + * Should return a positive number representing the number of + * corrected bitflips, -EBADMSG if the number of bitflips exceed + * ECC strength, or any other error code if the error is not + * directly related to correction. + * If -EBADMSG is returned the input buffers should be left + * untouched. + * @read_page_raw: function to read a raw page without ECC. This function + * should hide the specific layout used by the ECC + * controller and always return contiguous in-band and + * out-of-band data even if they're not stored + * contiguously on the NAND chip (e.g. + * NAND_ECC_HW_SYNDROME interleaves in-band and + * out-of-band data). + * @write_page_raw: function to write a raw page without ECC. This function + * should hide the specific layout used by the ECC + * controller and consider the passed data as contiguous + * in-band and out-of-band data. ECC controller is + * responsible for doing the appropriate transformations + * to adapt to its specific layout (e.g. + * NAND_ECC_HW_SYNDROME interleaves in-band and + * out-of-band data). + * @read_page: function to read a page according to the ECC generator + * requirements; returns maximum number of bitflips corrected in + * any single ECC step, -EIO hw error + * @read_subpage: function to read parts of the page covered by ECC; + * returns same as read_page() + * @write_subpage: function to write parts of the page covered by ECC. + * @write_page: function to write a page according to the ECC generator + * requirements. + * @write_oob_raw: function to write chip OOB data without ECC + * @read_oob_raw: function to read chip OOB data without ECC + * @read_oob: function to read chip OOB data + * @write_oob: function to write chip OOB data + */ +struct nand_ecc_ctrl { + nand_ecc_modes_t mode; + enum nand_ecc_algo algo; + int steps; + int size; + int bytes; + int total; + int strength; + int prepad; + int postpad; + unsigned int options; + void *priv; + u8 *calc_buf; + u8 *code_buf; + void (*hwctl)(struct mtd_info *mtd, int mode); + int (*calculate)(struct mtd_info *mtd, const uint8_t *dat, + uint8_t *ecc_code); + int (*correct)(struct mtd_info *mtd, uint8_t *dat, uint8_t *read_ecc, + uint8_t *calc_ecc); + int (*read_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, + uint8_t *buf, int oob_required, int page); + int (*write_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, + const uint8_t *buf, int oob_required, int page); + int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip, + uint8_t *buf, int oob_required, int page); + int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip, + uint32_t offs, uint32_t len, uint8_t *buf, int page); + int (*write_subpage)(struct mtd_info *mtd, struct nand_chip *chip, + uint32_t offset, uint32_t data_len, + const uint8_t *data_buf, int oob_required, int page); + int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip, + const uint8_t *buf, int oob_required, int page); + int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip, + int page); + int (*read_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip, + int page); + int (*read_oob)(struct mtd_info *mtd, struct nand_chip *chip, int page); + int (*write_oob)(struct mtd_info *mtd, struct nand_chip *chip, + int page); +}; + +/** + * struct nand_sdr_timings - SDR NAND chip timings + * + * This struct defines the timing requirements of a SDR NAND chip. + * These information can be found in every NAND datasheets and the timings + * meaning are described in the ONFI specifications: + * www.onfi.org/~/media/ONFI/specs/onfi_3_1_spec.pdf (chapter 4.15 Timing + * Parameters) + * + * All these timings are expressed in picoseconds. + * + * @tBERS_max: Block erase time + * @tCCS_min: Change column setup time + * @tPROG_max: Page program time + * @tR_max: Page read time + * @tALH_min: ALE hold time + * @tADL_min: ALE to data loading time + * @tALS_min: ALE setup time + * @tAR_min: ALE to RE# delay + * @tCEA_max: CE# access time + * @tCEH_min: CE# high hold time + * @tCH_min: CE# hold time + * @tCHZ_max: CE# high to output hi-Z + * @tCLH_min: CLE hold time + * @tCLR_min: CLE to RE# delay + * @tCLS_min: CLE setup time + * @tCOH_min: CE# high to output hold + * @tCS_min: CE# setup time + * @tDH_min: Data hold time + * @tDS_min: Data setup time + * @tFEAT_max: Busy time for Set Features and Get Features + * @tIR_min: Output hi-Z to RE# low + * @tITC_max: Interface and Timing Mode Change time + * @tRC_min: RE# cycle time + * @tREA_max: RE# access time + * @tREH_min: RE# high hold time + * @tRHOH_min: RE# high to output hold + * @tRHW_min: RE# high to WE# low + * @tRHZ_max: RE# high to output hi-Z + * @tRLOH_min: RE# low to output hold + * @tRP_min: RE# pulse width + * @tRR_min: Ready to RE# low (data only) + * @tRST_max: Device reset time, measured from the falling edge of R/B# to the + * rising edge of R/B#. + * @tWB_max: WE# high to SR[6] low + * @tWC_min: WE# cycle time + * @tWH_min: WE# high hold time + * @tWHR_min: WE# high to RE# low + * @tWP_min: WE# pulse width + * @tWW_min: WP# transition to WE# low + */ +struct nand_sdr_timings { + u64 tBERS_max; + u32 tCCS_min; + u64 tPROG_max; + u64 tR_max; + u32 tALH_min; + u32 tADL_min; + u32 tALS_min; + u32 tAR_min; + u32 tCEA_max; + u32 tCEH_min; + u32 tCH_min; + u32 tCHZ_max; + u32 tCLH_min; + u32 tCLR_min; + u32 tCLS_min; + u32 tCOH_min; + u32 tCS_min; + u32 tDH_min; + u32 tDS_min; + u32 tFEAT_max; + u32 tIR_min; + u32 tITC_max; + u32 tRC_min; + u32 tREA_max; + u32 tREH_min; + u32 tRHOH_min; + u32 tRHW_min; + u32 tRHZ_max; + u32 tRLOH_min; + u32 tRP_min; + u32 tRR_min; + u64 tRST_max; + u32 tWB_max; + u32 tWC_min; + u32 tWH_min; + u32 tWHR_min; + u32 tWP_min; + u32 tWW_min; +}; + +/** + * enum nand_data_interface_type - NAND interface timing type + * @NAND_SDR_IFACE: Single Data Rate interface + */ +enum nand_data_interface_type { + NAND_SDR_IFACE, +}; + +/** + * struct nand_data_interface - NAND interface timing + * @type: type of the timing + * @timings: The timing, type according to @type + * @timings.sdr: Use it when @type is %NAND_SDR_IFACE. + */ +struct nand_data_interface { + enum nand_data_interface_type type; + union { + struct nand_sdr_timings sdr; + } timings; +}; + +/** + * nand_get_sdr_timings - get SDR timing from data interface + * @conf: The data interface + */ +static inline const struct nand_sdr_timings * +nand_get_sdr_timings(const struct nand_data_interface *conf) +{ + if (conf->type != NAND_SDR_IFACE) + return ERR_PTR(-EINVAL); + + return &conf->timings.sdr; +} + +/** + * struct nand_manufacturer_ops - NAND Manufacturer operations + * @detect: detect the NAND memory organization and capabilities + * @init: initialize all vendor specific fields (like the ->read_retry() + * implementation) if any. + * @cleanup: the ->init() function may have allocated resources, ->cleanup() + * is here to let vendor specific code release those resources. + * @fixup_onfi_param_page: apply vendor specific fixups to the ONFI parameter + * page. This is called after the checksum is verified. + */ +struct nand_manufacturer_ops { + void (*detect)(struct nand_chip *chip); + int (*init)(struct nand_chip *chip); + void (*cleanup)(struct nand_chip *chip); + void (*fixup_onfi_param_page)(struct nand_chip *chip, + struct nand_onfi_params *p); +}; + +/** + * struct nand_op_cmd_instr - Definition of a command instruction + * @opcode: the command to issue in one cycle + */ +struct nand_op_cmd_instr { + u8 opcode; +}; + +/** + * struct nand_op_addr_instr - Definition of an address instruction + * @naddrs: length of the @addrs array + * @addrs: array containing the address cycles to issue + */ +struct nand_op_addr_instr { + unsigned int naddrs; + const u8 *addrs; +}; + +/** + * struct nand_op_data_instr - Definition of a data instruction + * @len: number of data bytes to move + * @buf: buffer to fill + * @buf.in: buffer to fill when reading from the NAND chip + * @buf.out: buffer to read from when writing to the NAND chip + * @force_8bit: force 8-bit access + * + * Please note that "in" and "out" are inverted from the ONFI specification + * and are from the controller perspective, so a "in" is a read from the NAND + * chip while a "out" is a write to the NAND chip. + */ +struct nand_op_data_instr { + unsigned int len; + union { + void *in; + const void *out; + } buf; + bool force_8bit; +}; + +/** + * struct nand_op_waitrdy_instr - Definition of a wait ready instruction + * @timeout_ms: maximum delay while waiting for the ready/busy pin in ms + */ +struct nand_op_waitrdy_instr { + unsigned int timeout_ms; +}; + +/** + * enum nand_op_instr_type - Definition of all instruction types + * @NAND_OP_CMD_INSTR: command instruction + * @NAND_OP_ADDR_INSTR: address instruction + * @NAND_OP_DATA_IN_INSTR: data in instruction + * @NAND_OP_DATA_OUT_INSTR: data out instruction + * @NAND_OP_WAITRDY_INSTR: wait ready instruction + */ +enum nand_op_instr_type { + NAND_OP_CMD_INSTR, + NAND_OP_ADDR_INSTR, + NAND_OP_DATA_IN_INSTR, + NAND_OP_DATA_OUT_INSTR, + NAND_OP_WAITRDY_INSTR, +}; + +/** + * struct nand_op_instr - Instruction object + * @type: the instruction type + * @ctx: extra data associated to the instruction. You'll have to use the + * appropriate element depending on @type + * @ctx.cmd: use it if @type is %NAND_OP_CMD_INSTR + * @ctx.addr: use it if @type is %NAND_OP_ADDR_INSTR + * @ctx.data: use it if @type is %NAND_OP_DATA_IN_INSTR + * or %NAND_OP_DATA_OUT_INSTR + * @ctx.waitrdy: use it if @type is %NAND_OP_WAITRDY_INSTR + * @delay_ns: delay the controller should apply after the instruction has been + * issued on the bus. Most modern controllers have internal timings + * control logic, and in this case, the controller driver can ignore + * this field. + */ +struct nand_op_instr { + enum nand_op_instr_type type; + union { + struct nand_op_cmd_instr cmd; + struct nand_op_addr_instr addr; + struct nand_op_data_instr data; + struct nand_op_waitrdy_instr waitrdy; + } ctx; + unsigned int delay_ns; +}; + +/* + * Special handling must be done for the WAITRDY timeout parameter as it usually + * is either tPROG (after a prog), tR (before a read), tRST (during a reset) or + * tBERS (during an erase) which all of them are u64 values that cannot be + * divided by usual kernel macros and must be handled with the special + * DIV_ROUND_UP_ULL() macro. + * + * Cast to type of dividend is needed here to guarantee that the result won't + * be an unsigned long long when the dividend is an unsigned long (or smaller), + * which is what the compiler does when it sees ternary operator with 2 + * different return types (picks the largest type to make sure there's no + * loss). + */ +#define __DIVIDE(dividend, divisor) ({ \ + (__typeof__(dividend))(sizeof(dividend) <= sizeof(unsigned long) ? \ + DIV_ROUND_UP(dividend, divisor) : \ + DIV_ROUND_UP_ULL(dividend, divisor)); \ + }) +#define PSEC_TO_NSEC(x) __DIVIDE(x, 1000) +#define PSEC_TO_MSEC(x) __DIVIDE(x, 1000000000) + +#define NAND_OP_CMD(id, ns) \ + { \ + .type = NAND_OP_CMD_INSTR, \ + .ctx.cmd.opcode = id, \ + .delay_ns = ns, \ + } + +#define NAND_OP_ADDR(ncycles, cycles, ns) \ + { \ + .type = NAND_OP_ADDR_INSTR, \ + .ctx.addr = { \ + .naddrs = ncycles, \ + .addrs = cycles, \ + }, \ + .delay_ns = ns, \ + } + +#define NAND_OP_DATA_IN(l, b, ns) \ + { \ + .type = NAND_OP_DATA_IN_INSTR, \ + .ctx.data = { \ + .len = l, \ + .buf.in = b, \ + .force_8bit = false, \ + }, \ + .delay_ns = ns, \ + } + +#define NAND_OP_DATA_OUT(l, b, ns) \ + { \ + .type = NAND_OP_DATA_OUT_INSTR, \ + .ctx.data = { \ + .len = l, \ + .buf.out = b, \ + .force_8bit = false, \ + }, \ + .delay_ns = ns, \ + } + +#define NAND_OP_8BIT_DATA_IN(l, b, ns) \ + { \ + .type = NAND_OP_DATA_IN_INSTR, \ + .ctx.data = { \ + .len = l, \ + .buf.in = b, \ + .force_8bit = true, \ + }, \ + .delay_ns = ns, \ + } + +#define NAND_OP_8BIT_DATA_OUT(l, b, ns) \ + { \ + .type = NAND_OP_DATA_OUT_INSTR, \ + .ctx.data = { \ + .len = l, \ + .buf.out = b, \ + .force_8bit = true, \ + }, \ + .delay_ns = ns, \ + } + +#define NAND_OP_WAIT_RDY(tout_ms, ns) \ + { \ + .type = NAND_OP_WAITRDY_INSTR, \ + .ctx.waitrdy.timeout_ms = tout_ms, \ + .delay_ns = ns, \ + } + +/** + * struct nand_subop - a sub operation + * @instrs: array of instructions + * @ninstrs: length of the @instrs array + * @first_instr_start_off: offset to start from for the first instruction + * of the sub-operation + * @last_instr_end_off: offset to end at (excluded) for the last instruction + * of the sub-operation + * + * Both @first_instr_start_off and @last_instr_end_off only apply to data or + * address instructions. + * + * When an operation cannot be handled as is by the NAND controller, it will + * be split by the parser into sub-operations which will be passed to the + * controller driver. + */ +struct nand_subop { + const struct nand_op_instr *instrs; + unsigned int ninstrs; + unsigned int first_instr_start_off; + unsigned int last_instr_end_off; +}; + +unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop, + unsigned int op_id); +unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop, + unsigned int op_id); +unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop, + unsigned int op_id); +unsigned int nand_subop_get_data_len(const struct nand_subop *subop, + unsigned int op_id); + +/** + * struct nand_op_parser_addr_constraints - Constraints for address instructions + * @maxcycles: maximum number of address cycles the controller can issue in a + * single step + */ +struct nand_op_parser_addr_constraints { + unsigned int maxcycles; +}; + +/** + * struct nand_op_parser_data_constraints - Constraints for data instructions + * @maxlen: maximum data length that the controller can handle in a single step + */ +struct nand_op_parser_data_constraints { + unsigned int maxlen; +}; + +/** + * struct nand_op_parser_pattern_elem - One element of a pattern + * @type: the instructuction type + * @optional: whether this element of the pattern is optional or mandatory + * @ctx: address or data constraint + * @ctx.addr: address constraint (number of cycles) + * @ctx.data: data constraint (data length) + */ +struct nand_op_parser_pattern_elem { + enum nand_op_instr_type type; + bool optional; + union { + struct nand_op_parser_addr_constraints addr; + struct nand_op_parser_data_constraints data; + } ctx; +}; + +#define NAND_OP_PARSER_PAT_CMD_ELEM(_opt) \ + { \ + .type = NAND_OP_CMD_INSTR, \ + .optional = _opt, \ + } + +#define NAND_OP_PARSER_PAT_ADDR_ELEM(_opt, _maxcycles) \ + { \ + .type = NAND_OP_ADDR_INSTR, \ + .optional = _opt, \ + .ctx.addr.maxcycles = _maxcycles, \ + } + +#define NAND_OP_PARSER_PAT_DATA_IN_ELEM(_opt, _maxlen) \ + { \ + .type = NAND_OP_DATA_IN_INSTR, \ + .optional = _opt, \ + .ctx.data.maxlen = _maxlen, \ + } + +#define NAND_OP_PARSER_PAT_DATA_OUT_ELEM(_opt, _maxlen) \ + { \ + .type = NAND_OP_DATA_OUT_INSTR, \ + .optional = _opt, \ + .ctx.data.maxlen = _maxlen, \ + } + +#define NAND_OP_PARSER_PAT_WAITRDY_ELEM(_opt) \ + { \ + .type = NAND_OP_WAITRDY_INSTR, \ + .optional = _opt, \ + } + +/** + * struct nand_op_parser_pattern - NAND sub-operation pattern descriptor + * @elems: array of pattern elements + * @nelems: number of pattern elements in @elems array + * @exec: the function that will issue a sub-operation + * + * A pattern is a list of elements, each element reprensenting one instruction + * with its constraints. The pattern itself is used by the core to match NAND + * chip operation with NAND controller operations. + * Once a match between a NAND controller operation pattern and a NAND chip + * operation (or a sub-set of a NAND operation) is found, the pattern ->exec() + * hook is called so that the controller driver can issue the operation on the + * bus. + * + * Controller drivers should declare as many patterns as they support and pass + * this list of patterns (created with the help of the following macro) to + * the nand_op_parser_exec_op() helper. + */ +struct nand_op_parser_pattern { + const struct nand_op_parser_pattern_elem *elems; + unsigned int nelems; + int (*exec)(struct nand_chip *chip, const struct nand_subop *subop); +}; + +#define NAND_OP_PARSER_PATTERN(_exec, ...) \ + { \ + .exec = _exec, \ + .elems = (struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }, \ + .nelems = sizeof((struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }) / \ + sizeof(struct nand_op_parser_pattern_elem), \ + } + +/** + * struct nand_op_parser - NAND controller operation parser descriptor + * @patterns: array of supported patterns + * @npatterns: length of the @patterns array + * + * The parser descriptor is just an array of supported patterns which will be + * iterated by nand_op_parser_exec_op() everytime it tries to execute an + * NAND operation (or tries to determine if a specific operation is supported). + * + * It is worth mentioning that patterns will be tested in their declaration + * order, and the first match will be taken, so it's important to order patterns + * appropriately so that simple/inefficient patterns are placed at the end of + * the list. Usually, this is where you put single instruction patterns. + */ +struct nand_op_parser { + const struct nand_op_parser_pattern *patterns; + unsigned int npatterns; +}; + +#define NAND_OP_PARSER(...) \ + { \ + .patterns = (struct nand_op_parser_pattern[]) { __VA_ARGS__ }, \ + .npatterns = sizeof((struct nand_op_parser_pattern[]) { __VA_ARGS__ }) / \ + sizeof(struct nand_op_parser_pattern), \ + } + +/** + * struct nand_operation - NAND operation descriptor + * @instrs: array of instructions to execute + * @ninstrs: length of the @instrs array + * + * The actual operation structure that will be passed to chip->exec_op(). + */ +struct nand_operation { + const struct nand_op_instr *instrs; + unsigned int ninstrs; +}; + +#define NAND_OPERATION(_instrs) \ + { \ + .instrs = _instrs, \ + .ninstrs = ARRAY_SIZE(_instrs), \ + } + +int nand_op_parser_exec_op(struct nand_chip *chip, + const struct nand_op_parser *parser, + const struct nand_operation *op, bool check_only); + +/** + * struct nand_chip - NAND Private Flash Chip Data + * @mtd: MTD device registered to the MTD framework + * @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the + * flash device + * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the + * flash device. + * @read_byte: [REPLACEABLE] read one byte from the chip + * @read_word: [REPLACEABLE] read one word from the chip + * @write_byte: [REPLACEABLE] write a single byte to the chip on the + * low 8 I/O lines + * @write_buf: [REPLACEABLE] write data from the buffer to the chip + * @read_buf: [REPLACEABLE] read data from the chip into the buffer + * @select_chip: [REPLACEABLE] select chip nr + * @block_bad: [REPLACEABLE] check if a block is bad, using OOB markers + * @block_markbad: [REPLACEABLE] mark a block bad + * @cmd_ctrl: [BOARDSPECIFIC] hardwarespecific function for controlling + * ALE/CLE/nCE. Also used to write command and address + * @dev_ready: [BOARDSPECIFIC] hardwarespecific function for accessing + * device ready/busy line. If set to NULL no access to + * ready/busy is available and the ready/busy information + * is read from the chip status register. + * @cmdfunc: [REPLACEABLE] hardwarespecific function for writing + * commands to the chip. + * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on + * ready. + * @exec_op: controller specific method to execute NAND operations. + * This method replaces ->cmdfunc(), + * ->{read,write}_{buf,byte,word}(), ->dev_ready() and + * ->waifunc(). + * @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for + * setting the read-retry mode. Mostly needed for MLC NAND. + * @ecc: [BOARDSPECIFIC] ECC control structure + * @buf_align: minimum buffer alignment required by a platform + * @dummy_controller: dummy controller implementation for drivers that can + * only control a single chip + * @erase: [REPLACEABLE] erase function + * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transferring + * data from array to read regs (tR). + * @state: [INTERN] the current state of the NAND device + * @oob_poi: "poison value buffer," used for laying out OOB data + * before writing + * @page_shift: [INTERN] number of address bits in a page (column + * address bits). + * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock + * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry + * @chip_shift: [INTERN] number of address bits in one chip + * @options: [BOARDSPECIFIC] various chip options. They can partly + * be set to inform nand_scan about special functionality. + * See the defines for further explanation. + * @bbt_options: [INTERN] bad block specific options. All options used + * here must come from bbm.h. By default, these options + * will be copied to the appropriate nand_bbt_descr's. + * @badblockpos: [INTERN] position of the bad block marker in the oob + * area. + * @badblockbits: [INTERN] minimum number of set bits in a good block's + * bad block marker position; i.e., BBM == 11110111b is + * not bad when badblockbits == 7 + * @bits_per_cell: [INTERN] number of bits per cell. i.e., 1 means SLC. + * @ecc_strength_ds: [INTERN] ECC correctability from the datasheet. + * Minimum amount of bit errors per @ecc_step_ds guaranteed + * to be correctable. If unknown, set to zero. + * @ecc_step_ds: [INTERN] ECC step required by the @ecc_strength_ds, + * also from the datasheet. It is the recommended ECC step + * size, if known; if unknown, set to zero. + * @onfi_timing_mode_default: [INTERN] default ONFI timing mode. This field is + * set to the actually used ONFI mode if the chip is + * ONFI compliant or deduced from the datasheet if + * the NAND chip is not ONFI compliant. + * @numchips: [INTERN] number of physical chips + * @chipsize: [INTERN] the size of one chip for multichip arrays + * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1 + * @data_buf: [INTERN] buffer for data, size is (page size + oobsize). + * @pagebuf: [INTERN] holds the pagenumber which is currently in + * data_buf. + * @pagebuf_bitflips: [INTERN] holds the bitflip count for the page which is + * currently in data_buf. + * @subpagesize: [INTERN] holds the subpagesize + * @id: [INTERN] holds NAND ID + * @parameters: [INTERN] holds generic parameters under an easily + * readable form. + * @max_bb_per_die: [INTERN] the max number of bad blocks each die of a + * this nand device will encounter their life times. + * @blocks_per_die: [INTERN] The number of PEBs in a die + * @data_interface: [INTERN] NAND interface timing information + * @read_retries: [INTERN] the number of read retry modes supported + * @set_features: [REPLACEABLE] set the NAND chip features + * @get_features: [REPLACEABLE] get the NAND chip features + * @setup_data_interface: [OPTIONAL] setup the data interface and timing. If + * chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this + * means the configuration should not be applied but + * only checked. + * @bbt: [INTERN] bad block table pointer + * @bbt_td: [REPLACEABLE] bad block table descriptor for flash + * lookup. + * @bbt_md: [REPLACEABLE] bad block table mirror descriptor + * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for initial + * bad block scan. + * @controller: [REPLACEABLE] a pointer to a hardware controller + * structure which is shared among multiple independent + * devices. + * @priv: [OPTIONAL] pointer to private chip data + * @manufacturer: [INTERN] Contains manufacturer information + * @manufacturer.desc: [INTERN] Contains manufacturer's description + * @manufacturer.priv: [INTERN] Contains manufacturer private information + */ + +struct nand_chip { + struct mtd_info mtd; + void __iomem *IO_ADDR_R; + void __iomem *IO_ADDR_W; + + uint8_t (*read_byte)(struct mtd_info *mtd); + u16 (*read_word)(struct mtd_info *mtd); + void (*write_byte)(struct mtd_info *mtd, uint8_t byte); + void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len); + void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len); + void (*select_chip)(struct mtd_info *mtd, int chip); + int (*block_bad)(struct mtd_info *mtd, loff_t ofs); + int (*block_markbad)(struct mtd_info *mtd, loff_t ofs); + void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl); + int (*dev_ready)(struct mtd_info *mtd); + void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column, + int page_addr); + int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this); + int (*exec_op)(struct nand_chip *chip, + const struct nand_operation *op, + bool check_only); + int (*erase)(struct mtd_info *mtd, int page); + int (*set_features)(struct mtd_info *mtd, struct nand_chip *chip, + int feature_addr, uint8_t *subfeature_para); + int (*get_features)(struct mtd_info *mtd, struct nand_chip *chip, + int feature_addr, uint8_t *subfeature_para); + int (*setup_read_retry)(struct mtd_info *mtd, int retry_mode); + int (*setup_data_interface)(struct mtd_info *mtd, int chipnr, + const struct nand_data_interface *conf); + + int chip_delay; + unsigned int options; + unsigned int bbt_options; + + int page_shift; + int phys_erase_shift; + int bbt_erase_shift; + int chip_shift; + int numchips; + uint64_t chipsize; + int pagemask; + u8 *data_buf; + int pagebuf; + unsigned int pagebuf_bitflips; + int subpagesize; + uint8_t bits_per_cell; + uint16_t ecc_strength_ds; + uint16_t ecc_step_ds; + int onfi_timing_mode_default; + int badblockpos; + int badblockbits; + + struct nand_id id; + struct nand_parameters parameters; + u16 max_bb_per_die; + u32 blocks_per_die; + + struct nand_data_interface data_interface; + + int read_retries; + + flstate_t state; + + uint8_t *oob_poi; + struct nand_controller *controller; + + struct nand_ecc_ctrl ecc; + unsigned long buf_align; + struct nand_controller dummy_controller; + + uint8_t *bbt; + struct nand_bbt_descr *bbt_td; + struct nand_bbt_descr *bbt_md; + + struct nand_bbt_descr *badblock_pattern; + + void *priv; + + struct { + const struct nand_manufacturer *desc; + void *priv; + } manufacturer; +}; + +static inline int nand_exec_op(struct nand_chip *chip, + const struct nand_operation *op) +{ + if (!chip->exec_op) + return -ENOTSUPP; + + return chip->exec_op(chip, op, false); +} + +extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops; +extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops; + +static inline void nand_set_flash_node(struct nand_chip *chip, + struct device_node *np) +{ + mtd_set_of_node(&chip->mtd, np); +} + +static inline struct device_node *nand_get_flash_node(struct nand_chip *chip) +{ + return mtd_get_of_node(&chip->mtd); +} + +static inline struct nand_chip *mtd_to_nand(struct mtd_info *mtd) +{ + return container_of(mtd, struct nand_chip, mtd); +} + +static inline struct mtd_info *nand_to_mtd(struct nand_chip *chip) +{ + return &chip->mtd; +} + +static inline void *nand_get_controller_data(struct nand_chip *chip) +{ + return chip->priv; +} + +static inline void nand_set_controller_data(struct nand_chip *chip, void *priv) +{ + chip->priv = priv; +} + +static inline void nand_set_manufacturer_data(struct nand_chip *chip, + void *priv) +{ + chip->manufacturer.priv = priv; +} + +static inline void *nand_get_manufacturer_data(struct nand_chip *chip) +{ + return chip->manufacturer.priv; +} + +/* + * NAND Flash Manufacturer ID Codes + */ +#define NAND_MFR_TOSHIBA 0x98 +#define NAND_MFR_ESMT 0xc8 +#define NAND_MFR_SAMSUNG 0xec +#define NAND_MFR_FUJITSU 0x04 +#define NAND_MFR_NATIONAL 0x8f +#define NAND_MFR_RENESAS 0x07 +#define NAND_MFR_STMICRO 0x20 +#define NAND_MFR_HYNIX 0xad +#define NAND_MFR_MICRON 0x2c +#define NAND_MFR_AMD 0x01 +#define NAND_MFR_MACRONIX 0xc2 +#define NAND_MFR_EON 0x92 +#define NAND_MFR_SANDISK 0x45 +#define NAND_MFR_INTEL 0x89 +#define NAND_MFR_ATO 0x9b +#define NAND_MFR_WINBOND 0xef + + +/* + * A helper for defining older NAND chips where the second ID byte fully + * defined the chip, including the geometry (chip size, eraseblock size, page + * size). All these chips have 512 bytes NAND page size. + */ +#define LEGACY_ID_NAND(nm, devid, chipsz, erasesz, opts) \ + { .name = (nm), {{ .dev_id = (devid) }}, .pagesize = 512, \ + .chipsize = (chipsz), .erasesize = (erasesz), .options = (opts) } + +/* + * A helper for defining newer chips which report their page size and + * eraseblock size via the extended ID bytes. + * + * The real difference between LEGACY_ID_NAND and EXTENDED_ID_NAND is that with + * EXTENDED_ID_NAND, manufacturers overloaded the same device ID so that the + * device ID now only represented a particular total chip size (and voltage, + * buswidth), and the page size, eraseblock size, and OOB size could vary while + * using the same device ID. + */ +#define EXTENDED_ID_NAND(nm, devid, chipsz, opts) \ + { .name = (nm), {{ .dev_id = (devid) }}, .chipsize = (chipsz), \ + .options = (opts) } + +#define NAND_ECC_INFO(_strength, _step) \ + { .strength_ds = (_strength), .step_ds = (_step) } +#define NAND_ECC_STRENGTH(type) ((type)->ecc.strength_ds) +#define NAND_ECC_STEP(type) ((type)->ecc.step_ds) + +/** + * struct nand_flash_dev - NAND Flash Device ID Structure + * @name: a human-readable name of the NAND chip + * @dev_id: the device ID (the second byte of the full chip ID array) + * @mfr_id: manufecturer ID part of the full chip ID array (refers the same + * memory address as @id[0]) + * @dev_id: device ID part of the full chip ID array (refers the same memory + * address as @id[1]) + * @id: full device ID array + * @pagesize: size of the NAND page in bytes; if 0, then the real page size (as + * well as the eraseblock size) is determined from the extended NAND + * chip ID array) + * @chipsize: total chip size in MiB + * @erasesize: eraseblock size in bytes (determined from the extended ID if 0) + * @options: stores various chip bit options + * @id_len: The valid length of the @id. + * @oobsize: OOB size + * @ecc: ECC correctability and step information from the datasheet. + * @ecc.strength_ds: The ECC correctability from the datasheet, same as the + * @ecc_strength_ds in nand_chip{}. + * @ecc.step_ds: The ECC step required by the @ecc.strength_ds, same as the + * @ecc_step_ds in nand_chip{}, also from the datasheet. + * For example, the "4bit ECC for each 512Byte" can be set with + * NAND_ECC_INFO(4, 512). + * @onfi_timing_mode_default: the default ONFI timing mode entered after a NAND + * reset. Should be deduced from timings described + * in the datasheet. + * + */ +struct nand_flash_dev { + char *name; + union { + struct { + uint8_t mfr_id; + uint8_t dev_id; + }; + uint8_t id[NAND_MAX_ID_LEN]; + }; + unsigned int pagesize; + unsigned int chipsize; + unsigned int erasesize; + unsigned int options; + uint16_t id_len; + uint16_t oobsize; + struct { + uint16_t strength_ds; + uint16_t step_ds; + } ecc; + int onfi_timing_mode_default; +}; + +/** + * struct nand_manufacturer - NAND Flash Manufacturer structure + * @name: Manufacturer name + * @id: manufacturer ID code of device. + * @ops: manufacturer operations +*/ +struct nand_manufacturer { + int id; + char *name; + const struct nand_manufacturer_ops *ops; +}; + +const struct nand_manufacturer *nand_get_manufacturer(u8 id); + +static inline const char * +nand_manufacturer_name(const struct nand_manufacturer *manufacturer) +{ + return manufacturer ? manufacturer->name : "Unknown"; +} + +extern struct nand_flash_dev nand_flash_ids[]; + +extern const struct nand_manufacturer_ops toshiba_nand_manuf_ops; +extern const struct nand_manufacturer_ops samsung_nand_manuf_ops; +extern const struct nand_manufacturer_ops hynix_nand_manuf_ops; +extern const struct nand_manufacturer_ops micron_nand_manuf_ops; +extern const struct nand_manufacturer_ops amd_nand_manuf_ops; +extern const struct nand_manufacturer_ops macronix_nand_manuf_ops; + +int nand_create_bbt(struct nand_chip *chip); +int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs); +int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs); +int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt); +int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, + int allowbbt); + +/** + * struct platform_nand_chip - chip level device structure + * @nr_chips: max. number of chips to scan for + * @chip_offset: chip number offset + * @nr_partitions: number of partitions pointed to by partitions (or zero) + * @partitions: mtd partition list + * @chip_delay: R/B delay value in us + * @options: Option flags, e.g. 16bit buswidth + * @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH + * @part_probe_types: NULL-terminated array of probe types + */ +struct platform_nand_chip { + int nr_chips; + int chip_offset; + int nr_partitions; + struct mtd_partition *partitions; + int chip_delay; + unsigned int options; + unsigned int bbt_options; + const char **part_probe_types; +}; + +/* Keep gcc happy */ +struct platform_device; + +/** + * struct platform_nand_ctrl - controller level device structure + * @probe: platform specific function to probe/setup hardware + * @remove: platform specific function to remove/teardown hardware + * @dev_ready: platform specific function to read ready/busy pin + * @select_chip: platform specific chip select function + * @cmd_ctrl: platform specific function for controlling + * ALE/CLE/nCE. Also used to write command and address + * @write_buf: platform specific function for write buffer + * @read_buf: platform specific function for read buffer + * @priv: private data to transport driver specific settings + * + * All fields are optional and depend on the hardware driver requirements + */ +struct platform_nand_ctrl { + int (*probe)(struct platform_device *pdev); + void (*remove)(struct platform_device *pdev); + int (*dev_ready)(struct mtd_info *mtd); + void (*select_chip)(struct mtd_info *mtd, int chip); + void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl); + void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len); + void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len); + void *priv; +}; + +/** + * struct platform_nand_data - container structure for platform-specific data + * @chip: chip level chip structure + * @ctrl: controller level device structure + */ +struct platform_nand_data { + struct platform_nand_chip chip; + struct platform_nand_ctrl ctrl; +}; + +/* return the supported asynchronous timing mode. */ +static inline int onfi_get_async_timing_mode(struct nand_chip *chip) +{ + if (!chip->parameters.onfi) + return ONFI_TIMING_MODE_UNKNOWN; + + return chip->parameters.onfi->async_timing_mode; +} + +int onfi_fill_data_interface(struct nand_chip *chip, + enum nand_data_interface_type type, + int timing_mode); + +/* + * Check if it is a SLC nand. + * The !nand_is_slc() can be used to check the MLC/TLC nand chips. + * We do not distinguish the MLC and TLC now. + */ +static inline bool nand_is_slc(struct nand_chip *chip) +{ + WARN(chip->bits_per_cell == 0, + "chip->bits_per_cell is used uninitialized\n"); + return chip->bits_per_cell == 1; +} + +/** + * Check if the opcode's address should be sent only on the lower 8 bits + * @command: opcode to check + */ +static inline int nand_opcode_8bits(unsigned int command) +{ + switch (command) { + case NAND_CMD_READID: + case NAND_CMD_PARAM: + case NAND_CMD_GET_FEATURES: + case NAND_CMD_SET_FEATURES: + return 1; + default: + break; + } + return 0; +} + +/* get timing characteristics from ONFI timing mode. */ +const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode); + +int nand_check_erased_ecc_chunk(void *data, int datalen, + void *ecc, int ecclen, + void *extraoob, int extraooblen, + int threshold); + +int nand_ecc_choose_conf(struct nand_chip *chip, + const struct nand_ecc_caps *caps, int oobavail); + +/* Default write_oob implementation */ +int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page); + +/* Default write_oob syndrome implementation */ +int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, + int page); + +/* Default read_oob implementation */ +int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page); + +/* Default read_oob syndrome implementation */ +int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, + int page); + +/* Wrapper to use in order for controllers/vendors to GET/SET FEATURES */ +int nand_get_features(struct nand_chip *chip, int addr, u8 *subfeature_param); +int nand_set_features(struct nand_chip *chip, int addr, u8 *subfeature_param); +/* Stub used by drivers that do not support GET/SET FEATURES operations */ +int nand_get_set_features_notsupp(struct mtd_info *mtd, struct nand_chip *chip, + int addr, u8 *subfeature_param); + +/* Default read_page_raw implementation */ +int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, + uint8_t *buf, int oob_required, int page); +int nand_read_page_raw_notsupp(struct mtd_info *mtd, struct nand_chip *chip, + u8 *buf, int oob_required, int page); + +/* Default write_page_raw implementation */ +int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, + const uint8_t *buf, int oob_required, int page); +int nand_write_page_raw_notsupp(struct mtd_info *mtd, struct nand_chip *chip, + const u8 *buf, int oob_required, int page); + +/* Reset and initialize a NAND device */ +int nand_reset(struct nand_chip *chip, int chipnr); + +/* NAND operation helpers */ +int nand_reset_op(struct nand_chip *chip); +int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf, + unsigned int len); +int nand_status_op(struct nand_chip *chip, u8 *status); +int nand_exit_status_op(struct nand_chip *chip); +int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock); +int nand_read_page_op(struct nand_chip *chip, unsigned int page, + unsigned int offset_in_page, void *buf, unsigned int len); +int nand_change_read_column_op(struct nand_chip *chip, + unsigned int offset_in_page, void *buf, + unsigned int len, bool force_8bit); +int nand_read_oob_op(struct nand_chip *chip, unsigned int page, + unsigned int offset_in_page, void *buf, unsigned int len); +int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page, + unsigned int offset_in_page, const void *buf, + unsigned int len); +int nand_prog_page_end_op(struct nand_chip *chip); +int nand_prog_page_op(struct nand_chip *chip, unsigned int page, + unsigned int offset_in_page, const void *buf, + unsigned int len); +int nand_change_write_column_op(struct nand_chip *chip, + unsigned int offset_in_page, const void *buf, + unsigned int len, bool force_8bit); +int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len, + bool force_8bit); +int nand_write_data_op(struct nand_chip *chip, const void *buf, + unsigned int len, bool force_8bit); + +/* + * Free resources held by the NAND device, must be called on error after a + * sucessful nand_scan(). + */ +void nand_cleanup(struct nand_chip *chip); +/* Unregister the MTD device and calls nand_cleanup() */ +void nand_release(struct nand_chip *chip); + +/* Default extended ID decoding function */ +void nand_decode_ext_id(struct nand_chip *chip); + +/* + * External helper for controller drivers that have to implement the WAITRDY + * instruction and have no physical pin to check it. + */ +int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms); + +#endif /* __LINUX_MTD_RAWNAND_H */ diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h new file mode 100644 index 000000000..c759d403c --- /dev/null +++ b/include/linux/mtd/sh_flctl.h @@ -0,0 +1,192 @@ +/* + * SuperH FLCTL nand controller + * + * Copyright © 2008 Renesas Solutions Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef __SH_FLCTL_H__ +#define __SH_FLCTL_H__ + +#include +#include +#include +#include +#include + +/* FLCTL registers */ +#define FLCMNCR(f) (f->reg + 0x0) +#define FLCMDCR(f) (f->reg + 0x4) +#define FLCMCDR(f) (f->reg + 0x8) +#define FLADR(f) (f->reg + 0xC) +#define FLADR2(f) (f->reg + 0x3C) +#define FLDATAR(f) (f->reg + 0x10) +#define FLDTCNTR(f) (f->reg + 0x14) +#define FLINTDMACR(f) (f->reg + 0x18) +#define FLBSYTMR(f) (f->reg + 0x1C) +#define FLBSYCNT(f) (f->reg + 0x20) +#define FLDTFIFO(f) (f->reg + 0x24) +#define FLECFIFO(f) (f->reg + 0x28) +#define FLTRCR(f) (f->reg + 0x2C) +#define FLHOLDCR(f) (f->reg + 0x38) +#define FL4ECCRESULT0(f) (f->reg + 0x80) +#define FL4ECCRESULT1(f) (f->reg + 0x84) +#define FL4ECCRESULT2(f) (f->reg + 0x88) +#define FL4ECCRESULT3(f) (f->reg + 0x8C) +#define FL4ECCCR(f) (f->reg + 0x90) +#define FL4ECCCNT(f) (f->reg + 0x94) +#define FLERRADR(f) (f->reg + 0x98) + +/* FLCMNCR control bits */ +#define _4ECCCNTEN (0x1 << 24) +#define _4ECCEN (0x1 << 23) +#define _4ECCCORRECT (0x1 << 22) +#define SHBUSSEL (0x1 << 20) +#define SEL_16BIT (0x1 << 19) +#define SNAND_E (0x1 << 18) /* SNAND (0=512 1=2048)*/ +#define QTSEL_E (0x1 << 17) +#define ENDIAN (0x1 << 16) /* 1 = little endian */ +#define FCKSEL_E (0x1 << 15) +#define ACM_SACCES_MODE (0x01 << 10) +#define NANWF_E (0x1 << 9) +#define SE_D (0x1 << 8) /* Spare area disable */ +#define CE1_ENABLE (0x1 << 4) /* Chip Enable 1 */ +#define CE0_ENABLE (0x1 << 3) /* Chip Enable 0 */ +#define TYPESEL_SET (0x1 << 0) + +/* + * Clock settings using the PULSEx registers from FLCMNCR + * + * Some hardware uses bits called PULSEx instead of FCKSEL_E and QTSEL_E + * to control the clock divider used between the High-Speed Peripheral Clock + * and the FLCTL internal clock. If so, use CLK_8_BIT_xxx for connecting 8 bit + * and CLK_16_BIT_xxx for connecting 16 bit bus bandwith NAND chips. For the 16 + * bit version the divider is seperate for the pulse width of high and low + * signals. + */ +#define PULSE3 (0x1 << 27) +#define PULSE2 (0x1 << 17) +#define PULSE1 (0x1 << 15) +#define PULSE0 (0x1 << 9) +#define CLK_8B_0_5 PULSE1 +#define CLK_8B_1 0x0 +#define CLK_8B_1_5 (PULSE1 | PULSE2) +#define CLK_8B_2 PULSE0 +#define CLK_8B_3 (PULSE0 | PULSE1 | PULSE2) +#define CLK_8B_4 (PULSE0 | PULSE2) +#define CLK_16B_6L_2H PULSE0 +#define CLK_16B_9L_3H (PULSE0 | PULSE1 | PULSE2) +#define CLK_16B_12L_4H (PULSE0 | PULSE2) + +/* FLCMDCR control bits */ +#define ADRCNT2_E (0x1 << 31) /* 5byte address enable */ +#define ADRMD_E (0x1 << 26) /* Sector address access */ +#define CDSRC_E (0x1 << 25) /* Data buffer selection */ +#define DOSR_E (0x1 << 24) /* Status read check */ +#define SELRW (0x1 << 21) /* 0:read 1:write */ +#define DOADR_E (0x1 << 20) /* Address stage execute */ +#define ADRCNT_1 (0x00 << 18) /* Address data bytes: 1byte */ +#define ADRCNT_2 (0x01 << 18) /* Address data bytes: 2byte */ +#define ADRCNT_3 (0x02 << 18) /* Address data bytes: 3byte */ +#define ADRCNT_4 (0x03 << 18) /* Address data bytes: 4byte */ +#define DOCMD2_E (0x1 << 17) /* 2nd cmd stage execute */ +#define DOCMD1_E (0x1 << 16) /* 1st cmd stage execute */ + +/* FLINTDMACR control bits */ +#define ESTERINTE (0x1 << 24) /* ECC error interrupt enable */ +#define AC1CLR (0x1 << 19) /* ECC FIFO clear */ +#define AC0CLR (0x1 << 18) /* Data FIFO clear */ +#define DREQ0EN (0x1 << 16) /* FLDTFIFODMA Request Enable */ +#define ECERB (0x1 << 9) /* ECC error */ +#define STERB (0x1 << 8) /* Status error */ +#define STERINTE (0x1 << 4) /* Status error enable */ + +/* FLTRCR control bits */ +#define TRSTRT (0x1 << 0) /* translation start */ +#define TREND (0x1 << 1) /* translation end */ + +/* + * FLHOLDCR control bits + * + * HOLDEN: Bus Occupancy Enable (inverted) + * Enable this bit when the external bus might be used in between transfers. + * If not set and the bus gets used by other modules, a deadlock occurs. + */ +#define HOLDEN (0x1 << 0) + +/* FL4ECCCR control bits */ +#define _4ECCFA (0x1 << 2) /* 4 symbols correct fault */ +#define _4ECCEND (0x1 << 1) /* 4 symbols end */ +#define _4ECCEXST (0x1 << 0) /* 4 symbols exist */ + +#define LOOP_TIMEOUT_MAX 0x00010000 + +enum flctl_ecc_res_t { + FL_SUCCESS, + FL_REPAIRABLE, + FL_ERROR, + FL_TIMEOUT +}; + +struct dma_chan; + +struct sh_flctl { + struct nand_chip chip; + struct platform_device *pdev; + struct dev_pm_qos_request pm_qos; + void __iomem *reg; + resource_size_t fifo; + + uint8_t done_buff[2048 + 64]; /* max size 2048 + 64 */ + int read_bytes; + unsigned int index; + int seqin_column; /* column in SEQIN cmd */ + int seqin_page_addr; /* page_addr in SEQIN cmd */ + uint32_t seqin_read_cmd; /* read cmd in SEQIN cmd */ + int erase1_page_addr; /* page_addr in ERASE1 cmd */ + uint32_t erase_ADRCNT; /* bits of FLCMDCR in ERASE1 cmd */ + uint32_t rw_ADRCNT; /* bits of FLCMDCR in READ WRITE cmd */ + uint32_t flcmncr_base; /* base value of FLCMNCR */ + uint32_t flintdmacr_base; /* irq enable bits */ + + unsigned page_size:1; /* NAND page size (0 = 512, 1 = 2048) */ + unsigned hwecc:1; /* Hardware ECC (0 = disabled, 1 = enabled) */ + unsigned holden:1; /* Hardware has FLHOLDCR and HOLDEN is set */ + unsigned qos_request:1; /* QoS request to prevent deep power shutdown */ + + /* DMA related objects */ + struct dma_chan *chan_fifo0_rx; + struct dma_chan *chan_fifo0_tx; + struct completion dma_complete; +}; + +struct sh_flctl_platform_data { + struct mtd_partition *parts; + int nr_parts; + unsigned long flcmncr_val; + + unsigned has_hwecc:1; + unsigned use_holden:1; + + unsigned int slave_id_fifo0_tx; + unsigned int slave_id_fifo0_rx; +}; + +static inline struct sh_flctl *mtd_to_flctl(struct mtd_info *mtdinfo) +{ + return container_of(mtd_to_nand(mtdinfo), struct sh_flctl, chip); +} + +#endif /* __SH_FLCTL_H__ */ diff --git a/include/linux/mtd/sharpsl.h b/include/linux/mtd/sharpsl.h new file mode 100644 index 000000000..e1845fc4a --- /dev/null +++ b/include/linux/mtd/sharpsl.h @@ -0,0 +1,21 @@ +/* + * SharpSL NAND support + * + * Copyright (C) 2008 Dmitry Baryshkov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +struct sharpsl_nand_platform_data { + struct nand_bbt_descr *badblock_pattern; + const struct mtd_ooblayout_ops *ecc_layout; + struct mtd_partition *partitions; + unsigned int nr_partitions; + const char *const *part_parsers; +}; diff --git a/include/linux/mtd/spear_smi.h b/include/linux/mtd/spear_smi.h new file mode 100644 index 000000000..581603ac1 --- /dev/null +++ b/include/linux/mtd/spear_smi.h @@ -0,0 +1,65 @@ +/* + * Copyright © 2010 ST Microelectronics + * Shiraz Hashim + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __MTD_SPEAR_SMI_H +#define __MTD_SPEAR_SMI_H + +#include +#include +#include +#include +#include + +/* max possible slots for serial-nor flash chip in the SMI controller */ +#define MAX_NUM_FLASH_CHIP 4 + +/* macro to define partitions for flash devices */ +#define DEFINE_PARTS(n, of, s) \ +{ \ + .name = n, \ + .offset = of, \ + .size = s, \ +} + +/** + * struct spear_smi_flash_info - platform structure for passing flash + * information + * + * name: name of the serial nor flash for identification + * mem_base: the memory base on which the flash is mapped + * size: size of the flash in bytes + * partitions: parition details + * nr_partitions: number of partitions + * fast_mode: whether flash supports fast mode + */ + +struct spear_smi_flash_info { + char *name; + unsigned long mem_base; + unsigned long size; + struct mtd_partition *partitions; + int nr_partitions; + u8 fast_mode; +}; + +/** + * struct spear_smi_plat_data - platform structure for configuring smi + * + * clk_rate: clk rate at which SMI must operate + * num_flashes: number of flashes present on board + * board_flash_info: specific details of each flash present on board + */ +struct spear_smi_plat_data { + unsigned long clk_rate; + int num_flashes; + struct spear_smi_flash_info *board_flash_info; + struct device_node *np[MAX_NUM_FLASH_CHIP]; +}; + +#endif /* __MTD_SPEAR_SMI_H */ diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h new file mode 100644 index 000000000..c922e97f2 --- /dev/null +++ b/include/linux/mtd/spi-nor.h @@ -0,0 +1,415 @@ +/* + * Copyright (C) 2014 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __LINUX_MTD_SPI_NOR_H +#define __LINUX_MTD_SPI_NOR_H + +#include +#include +#include + +/* + * Manufacturer IDs + * + * The first byte returned from the flash after sending opcode SPINOR_OP_RDID. + * Sometimes these are the same as CFI IDs, but sometimes they aren't. + */ +#define SNOR_MFR_ATMEL CFI_MFR_ATMEL +#define SNOR_MFR_GIGADEVICE 0xc8 +#define SNOR_MFR_INTEL CFI_MFR_INTEL +#define SNOR_MFR_MICRON CFI_MFR_ST /* ST Micro <--> Micron */ +#define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX +#define SNOR_MFR_SPANSION CFI_MFR_AMD +#define SNOR_MFR_SST CFI_MFR_SST +#define SNOR_MFR_WINBOND 0xef /* Also used by some Spansion */ + +/* + * Note on opcode nomenclature: some opcodes have a format like + * SPINOR_OP_FUNCTION{4,}_x_y_z. The numbers x, y, and z stand for the number + * of I/O lines used for the opcode, address, and data (respectively). The + * FUNCTION has an optional suffix of '4', to represent an opcode which + * requires a 4-byte (32-bit) address. + */ + +/* Flash opcodes. */ +#define SPINOR_OP_WREN 0x06 /* Write enable */ +#define SPINOR_OP_RDSR 0x05 /* Read status register */ +#define SPINOR_OP_WRSR 0x01 /* Write status register 1 byte */ +#define SPINOR_OP_RDSR2 0x3f /* Read status register 2 */ +#define SPINOR_OP_WRSR2 0x3e /* Write status register 2 */ +#define SPINOR_OP_READ 0x03 /* Read data bytes (low frequency) */ +#define SPINOR_OP_READ_FAST 0x0b /* Read data bytes (high frequency) */ +#define SPINOR_OP_READ_1_1_2 0x3b /* Read data bytes (Dual Output SPI) */ +#define SPINOR_OP_READ_1_2_2 0xbb /* Read data bytes (Dual I/O SPI) */ +#define SPINOR_OP_READ_1_1_4 0x6b /* Read data bytes (Quad Output SPI) */ +#define SPINOR_OP_READ_1_4_4 0xeb /* Read data bytes (Quad I/O SPI) */ +#define SPINOR_OP_PP 0x02 /* Page program (up to 256 bytes) */ +#define SPINOR_OP_PP_1_1_4 0x32 /* Quad page program */ +#define SPINOR_OP_PP_1_4_4 0x38 /* Quad page program */ +#define SPINOR_OP_BE_4K 0x20 /* Erase 4KiB block */ +#define SPINOR_OP_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips */ +#define SPINOR_OP_BE_32K 0x52 /* Erase 32KiB block */ +#define SPINOR_OP_CHIP_ERASE 0xc7 /* Erase whole flash chip */ +#define SPINOR_OP_SE 0xd8 /* Sector erase (usually 64KiB) */ +#define SPINOR_OP_RDID 0x9f /* Read JEDEC ID */ +#define SPINOR_OP_RDSFDP 0x5a /* Read SFDP */ +#define SPINOR_OP_RDCR 0x35 /* Read configuration register */ +#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */ +#define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */ +#define SPINOR_OP_RDEAR 0xc8 /* Read Extended Address Register */ +#define SPINOR_OP_WREAR 0xc5 /* Write Extended Address Register */ + +/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */ +#define SPINOR_OP_READ_4B 0x13 /* Read data bytes (low frequency) */ +#define SPINOR_OP_READ_FAST_4B 0x0c /* Read data bytes (high frequency) */ +#define SPINOR_OP_READ_1_1_2_4B 0x3c /* Read data bytes (Dual Output SPI) */ +#define SPINOR_OP_READ_1_2_2_4B 0xbc /* Read data bytes (Dual I/O SPI) */ +#define SPINOR_OP_READ_1_1_4_4B 0x6c /* Read data bytes (Quad Output SPI) */ +#define SPINOR_OP_READ_1_4_4_4B 0xec /* Read data bytes (Quad I/O SPI) */ +#define SPINOR_OP_PP_4B 0x12 /* Page program (up to 256 bytes) */ +#define SPINOR_OP_PP_1_1_4_4B 0x34 /* Quad page program */ +#define SPINOR_OP_PP_1_4_4_4B 0x3e /* Quad page program */ +#define SPINOR_OP_BE_4K_4B 0x21 /* Erase 4KiB block */ +#define SPINOR_OP_BE_32K_4B 0x5c /* Erase 32KiB block */ +#define SPINOR_OP_SE_4B 0xdc /* Sector erase (usually 64KiB) */ + +/* Double Transfer Rate opcodes - defined in JEDEC JESD216B. */ +#define SPINOR_OP_READ_1_1_1_DTR 0x0d +#define SPINOR_OP_READ_1_2_2_DTR 0xbd +#define SPINOR_OP_READ_1_4_4_DTR 0xed + +#define SPINOR_OP_READ_1_1_1_DTR_4B 0x0e +#define SPINOR_OP_READ_1_2_2_DTR_4B 0xbe +#define SPINOR_OP_READ_1_4_4_DTR_4B 0xee + +/* Used for SST flashes only. */ +#define SPINOR_OP_BP 0x02 /* Byte program */ +#define SPINOR_OP_WRDI 0x04 /* Write disable */ +#define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */ + +/* Used for S3AN flashes only */ +#define SPINOR_OP_XSE 0x50 /* Sector erase */ +#define SPINOR_OP_XPP 0x82 /* Page program */ +#define SPINOR_OP_XRDSR 0xd7 /* Read status register */ + +#define XSR_PAGESIZE BIT(0) /* Page size in Po2 or Linear */ +#define XSR_RDY BIT(7) /* Ready */ + + +/* Used for Macronix and Winbond flashes. */ +#define SPINOR_OP_EN4B 0xb7 /* Enter 4-byte mode */ +#define SPINOR_OP_EX4B 0xe9 /* Exit 4-byte mode */ + +/* Used for Spansion flashes only. */ +#define SPINOR_OP_BRWR 0x17 /* Bank register write */ +#define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */ + +/* Used for Micron flashes only. */ +#define SPINOR_OP_RD_EVCR 0x65 /* Read EVCR register */ +#define SPINOR_OP_WD_EVCR 0x61 /* Write EVCR register */ + +/* Status Register bits. */ +#define SR_WIP BIT(0) /* Write in progress */ +#define SR_WEL BIT(1) /* Write enable latch */ +/* meaning of other SR_* bits may differ between vendors */ +#define SR_BP0 BIT(2) /* Block protect 0 */ +#define SR_BP1 BIT(3) /* Block protect 1 */ +#define SR_BP2 BIT(4) /* Block protect 2 */ +#define SR_TB BIT(5) /* Top/Bottom protect */ +#define SR_SRWD BIT(7) /* SR write protect */ +/* Spansion/Cypress specific status bits */ +#define SR_E_ERR BIT(5) +#define SR_P_ERR BIT(6) + +#define SR_QUAD_EN_MX BIT(6) /* Macronix Quad I/O */ + +/* Enhanced Volatile Configuration Register bits */ +#define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */ + +/* Flag Status Register bits */ +#define FSR_READY BIT(7) /* Device status, 0 = Busy, 1 = Ready */ +#define FSR_E_ERR BIT(5) /* Erase operation status */ +#define FSR_P_ERR BIT(4) /* Program operation status */ +#define FSR_PT_ERR BIT(1) /* Protection error bit */ + +/* Configuration Register bits. */ +#define CR_QUAD_EN_SPAN BIT(1) /* Spansion Quad I/O */ + +/* Status Register 2 bits. */ +#define SR2_QUAD_EN_BIT7 BIT(7) + +/* Supported SPI protocols */ +#define SNOR_PROTO_INST_MASK GENMASK(23, 16) +#define SNOR_PROTO_INST_SHIFT 16 +#define SNOR_PROTO_INST(_nbits) \ + ((((unsigned long)(_nbits)) << SNOR_PROTO_INST_SHIFT) & \ + SNOR_PROTO_INST_MASK) + +#define SNOR_PROTO_ADDR_MASK GENMASK(15, 8) +#define SNOR_PROTO_ADDR_SHIFT 8 +#define SNOR_PROTO_ADDR(_nbits) \ + ((((unsigned long)(_nbits)) << SNOR_PROTO_ADDR_SHIFT) & \ + SNOR_PROTO_ADDR_MASK) + +#define SNOR_PROTO_DATA_MASK GENMASK(7, 0) +#define SNOR_PROTO_DATA_SHIFT 0 +#define SNOR_PROTO_DATA(_nbits) \ + ((((unsigned long)(_nbits)) << SNOR_PROTO_DATA_SHIFT) & \ + SNOR_PROTO_DATA_MASK) + +#define SNOR_PROTO_IS_DTR BIT(24) /* Double Transfer Rate */ + +#define SNOR_PROTO_STR(_inst_nbits, _addr_nbits, _data_nbits) \ + (SNOR_PROTO_INST(_inst_nbits) | \ + SNOR_PROTO_ADDR(_addr_nbits) | \ + SNOR_PROTO_DATA(_data_nbits)) +#define SNOR_PROTO_DTR(_inst_nbits, _addr_nbits, _data_nbits) \ + (SNOR_PROTO_IS_DTR | \ + SNOR_PROTO_STR(_inst_nbits, _addr_nbits, _data_nbits)) + +enum spi_nor_protocol { + SNOR_PROTO_1_1_1 = SNOR_PROTO_STR(1, 1, 1), + SNOR_PROTO_1_1_2 = SNOR_PROTO_STR(1, 1, 2), + SNOR_PROTO_1_1_4 = SNOR_PROTO_STR(1, 1, 4), + SNOR_PROTO_1_1_8 = SNOR_PROTO_STR(1, 1, 8), + SNOR_PROTO_1_2_2 = SNOR_PROTO_STR(1, 2, 2), + SNOR_PROTO_1_4_4 = SNOR_PROTO_STR(1, 4, 4), + SNOR_PROTO_1_8_8 = SNOR_PROTO_STR(1, 8, 8), + SNOR_PROTO_2_2_2 = SNOR_PROTO_STR(2, 2, 2), + SNOR_PROTO_4_4_4 = SNOR_PROTO_STR(4, 4, 4), + SNOR_PROTO_8_8_8 = SNOR_PROTO_STR(8, 8, 8), + + SNOR_PROTO_1_1_1_DTR = SNOR_PROTO_DTR(1, 1, 1), + SNOR_PROTO_1_2_2_DTR = SNOR_PROTO_DTR(1, 2, 2), + SNOR_PROTO_1_4_4_DTR = SNOR_PROTO_DTR(1, 4, 4), + SNOR_PROTO_1_8_8_DTR = SNOR_PROTO_DTR(1, 8, 8), +}; + +static inline bool spi_nor_protocol_is_dtr(enum spi_nor_protocol proto) +{ + return !!(proto & SNOR_PROTO_IS_DTR); +} + +static inline u8 spi_nor_get_protocol_inst_nbits(enum spi_nor_protocol proto) +{ + return ((unsigned long)(proto & SNOR_PROTO_INST_MASK)) >> + SNOR_PROTO_INST_SHIFT; +} + +static inline u8 spi_nor_get_protocol_addr_nbits(enum spi_nor_protocol proto) +{ + return ((unsigned long)(proto & SNOR_PROTO_ADDR_MASK)) >> + SNOR_PROTO_ADDR_SHIFT; +} + +static inline u8 spi_nor_get_protocol_data_nbits(enum spi_nor_protocol proto) +{ + return ((unsigned long)(proto & SNOR_PROTO_DATA_MASK)) >> + SNOR_PROTO_DATA_SHIFT; +} + +static inline u8 spi_nor_get_protocol_width(enum spi_nor_protocol proto) +{ + return spi_nor_get_protocol_data_nbits(proto); +} + +#define SPI_NOR_MAX_CMD_SIZE 8 +enum spi_nor_ops { + SPI_NOR_OPS_READ = 0, + SPI_NOR_OPS_WRITE, + SPI_NOR_OPS_ERASE, + SPI_NOR_OPS_LOCK, + SPI_NOR_OPS_UNLOCK, +}; + +enum spi_nor_option_flags { + SNOR_F_USE_FSR = BIT(0), + SNOR_F_HAS_SR_TB = BIT(1), + SNOR_F_NO_OP_CHIP_ERASE = BIT(2), + SNOR_F_S3AN_ADDR_DEFAULT = BIT(3), + SNOR_F_READY_XSR_RDY = BIT(4), + SNOR_F_USE_CLSR = BIT(5), + SNOR_F_BROKEN_RESET = BIT(6), +}; + +/** + * struct flash_info - Forward declaration of a structure used internally by + * spi_nor_scan() + */ +struct flash_info; + +/** + * struct spi_nor - Structure for defining a the SPI NOR layer + * @mtd: point to a mtd_info structure + * @lock: the lock for the read/write/erase/lock/unlock operations + * @dev: point to a spi device, or a spi nor controller device. + * @info: spi-nor part JDEC MFR id and other info + * @page_size: the page size of the SPI NOR + * @addr_width: number of address bytes + * @erase_opcode: the opcode for erasing a sector + * @read_opcode: the read opcode + * @read_dummy: the dummy needed by the read operation + * @program_opcode: the program opcode + * @sst_write_second: used by the SST write operation + * @flags: flag options for the current SPI-NOR (SNOR_F_*) + * @read_proto: the SPI protocol for read operations + * @write_proto: the SPI protocol for write operations + * @reg_proto the SPI protocol for read_reg/write_reg/erase operations + * @cmd_buf: used by the write_reg + * @prepare: [OPTIONAL] do some preparations for the + * read/write/erase/lock/unlock operations + * @unprepare: [OPTIONAL] do some post work after the + * read/write/erase/lock/unlock operations + * @read_reg: [DRIVER-SPECIFIC] read out the register + * @write_reg: [DRIVER-SPECIFIC] write data to the register + * @read: [DRIVER-SPECIFIC] read data from the SPI NOR + * @write: [DRIVER-SPECIFIC] write data to the SPI NOR + * @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR + * at the offset @offs; if not provided by the driver, + * spi-nor will send the erase opcode via write_reg() + * @flash_lock: [FLASH-SPECIFIC] lock a region of the SPI NOR + * @flash_unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR + * @flash_is_locked: [FLASH-SPECIFIC] check if a region of the SPI NOR is + * @quad_enable: [FLASH-SPECIFIC] enables SPI NOR quad mode + * completely locked + * @priv: the private data + */ +struct spi_nor { + struct mtd_info mtd; + struct mutex lock; + struct device *dev; + const struct flash_info *info; + u32 page_size; + u8 addr_width; + u8 erase_opcode; + u8 read_opcode; + u8 read_dummy; + u8 program_opcode; + enum spi_nor_protocol read_proto; + enum spi_nor_protocol write_proto; + enum spi_nor_protocol reg_proto; + bool sst_write_second; + u32 flags; + u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE]; + + int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops); + void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops); + int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); + int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); + + ssize_t (*read)(struct spi_nor *nor, loff_t from, + size_t len, u_char *read_buf); + ssize_t (*write)(struct spi_nor *nor, loff_t to, + size_t len, const u_char *write_buf); + int (*erase)(struct spi_nor *nor, loff_t offs); + + int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len); + int (*flash_unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len); + int (*flash_is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len); + int (*quad_enable)(struct spi_nor *nor); + + void *priv; +}; + +static inline void spi_nor_set_flash_node(struct spi_nor *nor, + struct device_node *np) +{ + mtd_set_of_node(&nor->mtd, np); +} + +static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor) +{ + return mtd_get_of_node(&nor->mtd); +} + +/** + * struct spi_nor_hwcaps - Structure for describing the hardware capabilies + * supported by the SPI controller (bus master). + * @mask: the bitmask listing all the supported hw capabilies + */ +struct spi_nor_hwcaps { + u32 mask; +}; + +/* + *(Fast) Read capabilities. + * MUST be ordered by priority: the higher bit position, the higher priority. + * As a matter of performances, it is relevant to use Octo SPI protocols first, + * then Quad SPI protocols before Dual SPI protocols, Fast Read and lastly + * (Slow) Read. + */ +#define SNOR_HWCAPS_READ_MASK GENMASK(14, 0) +#define SNOR_HWCAPS_READ BIT(0) +#define SNOR_HWCAPS_READ_FAST BIT(1) +#define SNOR_HWCAPS_READ_1_1_1_DTR BIT(2) + +#define SNOR_HWCAPS_READ_DUAL GENMASK(6, 3) +#define SNOR_HWCAPS_READ_1_1_2 BIT(3) +#define SNOR_HWCAPS_READ_1_2_2 BIT(4) +#define SNOR_HWCAPS_READ_2_2_2 BIT(5) +#define SNOR_HWCAPS_READ_1_2_2_DTR BIT(6) + +#define SNOR_HWCAPS_READ_QUAD GENMASK(10, 7) +#define SNOR_HWCAPS_READ_1_1_4 BIT(7) +#define SNOR_HWCAPS_READ_1_4_4 BIT(8) +#define SNOR_HWCAPS_READ_4_4_4 BIT(9) +#define SNOR_HWCAPS_READ_1_4_4_DTR BIT(10) + +#define SNOR_HWCPAS_READ_OCTO GENMASK(14, 11) +#define SNOR_HWCAPS_READ_1_1_8 BIT(11) +#define SNOR_HWCAPS_READ_1_8_8 BIT(12) +#define SNOR_HWCAPS_READ_8_8_8 BIT(13) +#define SNOR_HWCAPS_READ_1_8_8_DTR BIT(14) + +/* + * Page Program capabilities. + * MUST be ordered by priority: the higher bit position, the higher priority. + * Like (Fast) Read capabilities, Octo/Quad SPI protocols are preferred to the + * legacy SPI 1-1-1 protocol. + * Note that Dual Page Programs are not supported because there is no existing + * JEDEC/SFDP standard to define them. Also at this moment no SPI flash memory + * implements such commands. + */ +#define SNOR_HWCAPS_PP_MASK GENMASK(22, 16) +#define SNOR_HWCAPS_PP BIT(16) + +#define SNOR_HWCAPS_PP_QUAD GENMASK(19, 17) +#define SNOR_HWCAPS_PP_1_1_4 BIT(17) +#define SNOR_HWCAPS_PP_1_4_4 BIT(18) +#define SNOR_HWCAPS_PP_4_4_4 BIT(19) + +#define SNOR_HWCAPS_PP_OCTO GENMASK(22, 20) +#define SNOR_HWCAPS_PP_1_1_8 BIT(20) +#define SNOR_HWCAPS_PP_1_8_8 BIT(21) +#define SNOR_HWCAPS_PP_8_8_8 BIT(22) + +/** + * spi_nor_scan() - scan the SPI NOR + * @nor: the spi_nor structure + * @name: the chip type name + * @hwcaps: the hardware capabilities supported by the controller driver + * + * The drivers can use this fuction to scan the SPI NOR. + * In the scanning, it will try to get all the necessary information to + * fill the mtd_info{} and the spi_nor{}. + * + * The chip type name can be provided through the @name parameter. + * + * Return: 0 for success, others for failure. + */ +int spi_nor_scan(struct spi_nor *nor, const char *name, + const struct spi_nor_hwcaps *hwcaps); + +/** + * spi_nor_restore_addr_mode() - restore the status of SPI NOR + * @nor: the spi_nor structure + */ +void spi_nor_restore(struct spi_nor *nor); + +#endif diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h new file mode 100644 index 000000000..088ff96c3 --- /dev/null +++ b/include/linux/mtd/spinand.h @@ -0,0 +1,421 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2016-2017 Micron Technology, Inc. + * + * Authors: + * Peter Pan + */ +#ifndef __LINUX_MTD_SPINAND_H +#define __LINUX_MTD_SPINAND_H + +#include +#include +#include +#include +#include +#include +#include + +/** + * Standard SPI NAND flash operations + */ + +#define SPINAND_RESET_OP \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0xff, 1), \ + SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_NO_DATA) + +#define SPINAND_WR_EN_DIS_OP(enable) \ + SPI_MEM_OP(SPI_MEM_OP_CMD((enable) ? 0x06 : 0x04, 1), \ + SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_NO_DATA) + +#define SPINAND_READID_OP(ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0x9f, 1), \ + SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_DUMMY(ndummy, 1), \ + SPI_MEM_OP_DATA_IN(len, buf, 1)) + +#define SPINAND_SET_FEATURE_OP(reg, valptr) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0x1f, 1), \ + SPI_MEM_OP_ADDR(1, reg, 1), \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_DATA_OUT(1, valptr, 1)) + +#define SPINAND_GET_FEATURE_OP(reg, valptr) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0x0f, 1), \ + SPI_MEM_OP_ADDR(1, reg, 1), \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_DATA_IN(1, valptr, 1)) + +#define SPINAND_BLK_ERASE_OP(addr) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0xd8, 1), \ + SPI_MEM_OP_ADDR(3, addr, 1), \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_NO_DATA) + +#define SPINAND_PAGE_READ_OP(addr) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0x13, 1), \ + SPI_MEM_OP_ADDR(3, addr, 1), \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_NO_DATA) + +#define SPINAND_PAGE_READ_FROM_CACHE_OP(fast, addr, ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \ + SPI_MEM_OP_ADDR(2, addr, 1), \ + SPI_MEM_OP_DUMMY(ndummy, 1), \ + SPI_MEM_OP_DATA_IN(len, buf, 1)) + +#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP(addr, ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \ + SPI_MEM_OP_ADDR(2, addr, 1), \ + SPI_MEM_OP_DUMMY(ndummy, 1), \ + SPI_MEM_OP_DATA_IN(len, buf, 2)) + +#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP(addr, ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \ + SPI_MEM_OP_ADDR(2, addr, 1), \ + SPI_MEM_OP_DUMMY(ndummy, 1), \ + SPI_MEM_OP_DATA_IN(len, buf, 4)) + +#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(addr, ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \ + SPI_MEM_OP_ADDR(2, addr, 2), \ + SPI_MEM_OP_DUMMY(ndummy, 2), \ + SPI_MEM_OP_DATA_IN(len, buf, 2)) + +#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(addr, ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \ + SPI_MEM_OP_ADDR(2, addr, 4), \ + SPI_MEM_OP_DUMMY(ndummy, 4), \ + SPI_MEM_OP_DATA_IN(len, buf, 4)) + +#define SPINAND_PROG_EXEC_OP(addr) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1), \ + SPI_MEM_OP_ADDR(3, addr, 1), \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_NO_DATA) + +#define SPINAND_PROG_LOAD(reset, addr, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x02 : 0x84, 1), \ + SPI_MEM_OP_ADDR(2, addr, 1), \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_DATA_OUT(len, buf, 1)) + +#define SPINAND_PROG_LOAD_X4(reset, addr, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x32 : 0x34, 1), \ + SPI_MEM_OP_ADDR(2, addr, 1), \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_DATA_OUT(len, buf, 4)) + +/** + * Standard SPI NAND flash commands + */ +#define SPINAND_CMD_PROG_LOAD_X4 0x32 +#define SPINAND_CMD_PROG_LOAD_RDM_DATA_X4 0x34 + +/* feature register */ +#define REG_BLOCK_LOCK 0xa0 +#define BL_ALL_UNLOCKED 0x00 + +/* configuration register */ +#define REG_CFG 0xb0 +#define CFG_OTP_ENABLE BIT(6) +#define CFG_ECC_ENABLE BIT(4) +#define CFG_QUAD_ENABLE BIT(0) + +/* status register */ +#define REG_STATUS 0xc0 +#define STATUS_BUSY BIT(0) +#define STATUS_ERASE_FAILED BIT(2) +#define STATUS_PROG_FAILED BIT(3) +#define STATUS_ECC_MASK GENMASK(5, 4) +#define STATUS_ECC_NO_BITFLIPS (0 << 4) +#define STATUS_ECC_HAS_BITFLIPS (1 << 4) +#define STATUS_ECC_UNCOR_ERROR (2 << 4) + +struct spinand_op; +struct spinand_device; + +#define SPINAND_MAX_ID_LEN 4 + +/** + * struct spinand_id - SPI NAND id structure + * @data: buffer containing the id bytes. Currently 4 bytes large, but can + * be extended if required + * @len: ID length + * + * struct_spinand_id->data contains all bytes returned after a READ_ID command, + * including dummy bytes if the chip does not emit ID bytes right after the + * READ_ID command. The responsibility to extract real ID bytes is left to + * struct_manufacurer_ops->detect(). + */ +struct spinand_id { + u8 data[SPINAND_MAX_ID_LEN]; + int len; +}; + +/** + * struct manufacurer_ops - SPI NAND manufacturer specific operations + * @detect: detect a SPI NAND device. Every time a SPI NAND device is probed + * the core calls the struct_manufacurer_ops->detect() hook of each + * registered manufacturer until one of them return 1. Note that + * the first thing to check in this hook is that the manufacturer ID + * in struct_spinand_device->id matches the manufacturer whose + * ->detect() hook has been called. Should return 1 if there's a + * match, 0 if the manufacturer ID does not match and a negative + * error code otherwise. When true is returned, the core assumes + * that properties of the NAND chip (spinand->base.memorg and + * spinand->base.eccreq) have been filled + * @init: initialize a SPI NAND device + * @cleanup: cleanup a SPI NAND device + * + * Each SPI NAND manufacturer driver should implement this interface so that + * NAND chips coming from this vendor can be detected and initialized properly. + */ +struct spinand_manufacturer_ops { + int (*detect)(struct spinand_device *spinand); + int (*init)(struct spinand_device *spinand); + void (*cleanup)(struct spinand_device *spinand); +}; + +/** + * struct spinand_manufacturer - SPI NAND manufacturer instance + * @id: manufacturer ID + * @name: manufacturer name + * @ops: manufacturer operations + */ +struct spinand_manufacturer { + u8 id; + char *name; + const struct spinand_manufacturer_ops *ops; +}; + +/* SPI NAND manufacturers */ +extern const struct spinand_manufacturer macronix_spinand_manufacturer; +extern const struct spinand_manufacturer micron_spinand_manufacturer; +extern const struct spinand_manufacturer winbond_spinand_manufacturer; + +/** + * struct spinand_op_variants - SPI NAND operation variants + * @ops: the list of variants for a given operation + * @nops: the number of variants + * + * Some operations like read-from-cache/write-to-cache have several variants + * depending on the number of IO lines you use to transfer data or address + * cycles. This structure is a way to describe the different variants supported + * by a chip and let the core pick the best one based on the SPI mem controller + * capabilities. + */ +struct spinand_op_variants { + const struct spi_mem_op *ops; + unsigned int nops; +}; + +#define SPINAND_OP_VARIANTS(name, ...) \ + const struct spinand_op_variants name = { \ + .ops = (struct spi_mem_op[]) { __VA_ARGS__ }, \ + .nops = sizeof((struct spi_mem_op[]){ __VA_ARGS__ }) / \ + sizeof(struct spi_mem_op), \ + } + +/** + * spinand_ecc_info - description of the on-die ECC implemented by a SPI NAND + * chip + * @get_status: get the ECC status. Should return a positive number encoding + * the number of corrected bitflips if correction was possible or + * -EBADMSG if there are uncorrectable errors. I can also return + * other negative error codes if the error is not caused by + * uncorrectable bitflips + * @ooblayout: the OOB layout used by the on-die ECC implementation + */ +struct spinand_ecc_info { + int (*get_status)(struct spinand_device *spinand, u8 status); + const struct mtd_ooblayout_ops *ooblayout; +}; + +#define SPINAND_HAS_QE_BIT BIT(0) + +/** + * struct spinand_info - Structure used to describe SPI NAND chips + * @model: model name + * @devid: device ID + * @flags: OR-ing of the SPINAND_XXX flags + * @memorg: memory organization + * @eccreq: ECC requirements + * @eccinfo: on-die ECC info + * @op_variants: operations variants + * @op_variants.read_cache: variants of the read-cache operation + * @op_variants.write_cache: variants of the write-cache operation + * @op_variants.update_cache: variants of the update-cache operation + * @select_target: function used to select a target/die. Required only for + * multi-die chips + * + * Each SPI NAND manufacturer driver should have a spinand_info table + * describing all the chips supported by the driver. + */ +struct spinand_info { + const char *model; + u8 devid; + u32 flags; + struct nand_memory_organization memorg; + struct nand_ecc_req eccreq; + struct spinand_ecc_info eccinfo; + struct { + const struct spinand_op_variants *read_cache; + const struct spinand_op_variants *write_cache; + const struct spinand_op_variants *update_cache; + } op_variants; + int (*select_target)(struct spinand_device *spinand, + unsigned int target); +}; + +#define SPINAND_INFO_OP_VARIANTS(__read, __write, __update) \ + { \ + .read_cache = __read, \ + .write_cache = __write, \ + .update_cache = __update, \ + } + +#define SPINAND_ECCINFO(__ooblayout, __get_status) \ + .eccinfo = { \ + .ooblayout = __ooblayout, \ + .get_status = __get_status, \ + } + +#define SPINAND_SELECT_TARGET(__func) \ + .select_target = __func, + +#define SPINAND_INFO(__model, __id, __memorg, __eccreq, __op_variants, \ + __flags, ...) \ + { \ + .model = __model, \ + .devid = __id, \ + .memorg = __memorg, \ + .eccreq = __eccreq, \ + .op_variants = __op_variants, \ + .flags = __flags, \ + __VA_ARGS__ \ + } + +/** + * struct spinand_device - SPI NAND device instance + * @base: NAND device instance + * @spimem: pointer to the SPI mem object + * @lock: lock used to serialize accesses to the NAND + * @id: NAND ID as returned by READ_ID + * @flags: NAND flags + * @op_templates: various SPI mem op templates + * @op_templates.read_cache: read cache op template + * @op_templates.write_cache: write cache op template + * @op_templates.update_cache: update cache op template + * @select_target: select a specific target/die. Usually called before sending + * a command addressing a page or an eraseblock embedded in + * this die. Only required if your chip exposes several dies + * @cur_target: currently selected target/die + * @eccinfo: on-die ECC information + * @cfg_cache: config register cache. One entry per die + * @databuf: bounce buffer for data + * @oobbuf: bounce buffer for OOB data + * @scratchbuf: buffer used for everything but page accesses. This is needed + * because the spi-mem interface explicitly requests that buffers + * passed in spi_mem_op be DMA-able, so we can't based the bufs on + * the stack + * @manufacturer: SPI NAND manufacturer information + * @priv: manufacturer private data + */ +struct spinand_device { + struct nand_device base; + struct spi_mem *spimem; + struct mutex lock; + struct spinand_id id; + u32 flags; + + struct { + const struct spi_mem_op *read_cache; + const struct spi_mem_op *write_cache; + const struct spi_mem_op *update_cache; + } op_templates; + + int (*select_target)(struct spinand_device *spinand, + unsigned int target); + unsigned int cur_target; + + struct spinand_ecc_info eccinfo; + + u8 *cfg_cache; + u8 *databuf; + u8 *oobbuf; + u8 *scratchbuf; + const struct spinand_manufacturer *manufacturer; + void *priv; +}; + +/** + * mtd_to_spinand() - Get the SPI NAND device attached to an MTD instance + * @mtd: MTD instance + * + * Return: the SPI NAND device attached to @mtd. + */ +static inline struct spinand_device *mtd_to_spinand(struct mtd_info *mtd) +{ + return container_of(mtd_to_nanddev(mtd), struct spinand_device, base); +} + +/** + * spinand_to_mtd() - Get the MTD device embedded in a SPI NAND device + * @spinand: SPI NAND device + * + * Return: the MTD device embedded in @spinand. + */ +static inline struct mtd_info *spinand_to_mtd(struct spinand_device *spinand) +{ + return nanddev_to_mtd(&spinand->base); +} + +/** + * nand_to_spinand() - Get the SPI NAND device embedding an NAND object + * @nand: NAND object + * + * Return: the SPI NAND device embedding @nand. + */ +static inline struct spinand_device *nand_to_spinand(struct nand_device *nand) +{ + return container_of(nand, struct spinand_device, base); +} + +/** + * spinand_to_nand() - Get the NAND device embedded in a SPI NAND object + * @spinand: SPI NAND device + * + * Return: the NAND device embedded in @spinand. + */ +static inline struct nand_device * +spinand_to_nand(struct spinand_device *spinand) +{ + return &spinand->base; +} + +/** + * spinand_set_of_node - Attach a DT node to a SPI NAND device + * @spinand: SPI NAND device + * @np: DT node + * + * Attach a DT node to a SPI NAND device. + */ +static inline void spinand_set_of_node(struct spinand_device *spinand, + struct device_node *np) +{ + nanddev_set_of_node(&spinand->base, np); +} + +int spinand_match_and_init(struct spinand_device *dev, + const struct spinand_info *table, + unsigned int table_size, u8 devid); + +int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val); +int spinand_select_target(struct spinand_device *spinand, unsigned int target); + +#endif /* __LINUX_MTD_SPINAND_H */ diff --git a/include/linux/mtd/super.h b/include/linux/mtd/super.h new file mode 100644 index 000000000..f456230f9 --- /dev/null +++ b/include/linux/mtd/super.h @@ -0,0 +1,29 @@ +/* MTD-based superblock handling + * + * Copyright © 2006 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef __MTD_SUPER_H__ +#define __MTD_SUPER_H__ + +#ifdef __KERNEL__ + +#include +#include +#include + +extern struct dentry *mount_mtd(struct file_system_type *fs_type, int flags, + const char *dev_name, void *data, + int (*fill_super)(struct super_block *, void *, int)); +extern void kill_mtd_super(struct super_block *sb); + + +#endif /* __KERNEL__ */ + +#endif /* __MTD_SUPER_H__ */ diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h new file mode 100644 index 000000000..1e271cb55 --- /dev/null +++ b/include/linux/mtd/ubi.h @@ -0,0 +1,284 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Author: Artem Bityutskiy (Битюцкий Артём) + */ + +#ifndef __LINUX_UBI_H__ +#define __LINUX_UBI_H__ + +#include +#include +#include +#include + +/* All voumes/LEBs */ +#define UBI_ALL -1 + +/* + * Maximum number of scatter gather list entries, + * we use only 64 to have a lower memory foot print. + */ +#define UBI_MAX_SG_COUNT 64 + +/* + * enum ubi_open_mode - UBI volume open mode constants. + * + * UBI_READONLY: read-only mode + * UBI_READWRITE: read-write mode + * UBI_EXCLUSIVE: exclusive mode + * UBI_METAONLY: modify only the volume meta-data, + * i.e. the data stored in the volume table, but not in any of volume LEBs. + */ +enum { + UBI_READONLY = 1, + UBI_READWRITE, + UBI_EXCLUSIVE, + UBI_METAONLY +}; + +/** + * struct ubi_volume_info - UBI volume description data structure. + * @vol_id: volume ID + * @ubi_num: UBI device number this volume belongs to + * @size: how many physical eraseblocks are reserved for this volume + * @used_bytes: how many bytes of data this volume contains + * @used_ebs: how many physical eraseblocks of this volume actually contain any + * data + * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) + * @corrupted: non-zero if the volume is corrupted (static volumes only) + * @upd_marker: non-zero if the volume has update marker set + * @alignment: volume alignment + * @usable_leb_size: how many bytes are available in logical eraseblocks of + * this volume + * @name_len: volume name length + * @name: volume name + * @cdev: UBI volume character device major and minor numbers + * + * The @corrupted flag is only relevant to static volumes and is always zero + * for dynamic ones. This is because UBI does not care about dynamic volume + * data protection and only cares about protecting static volume data. + * + * The @upd_marker flag is set if the volume update operation was interrupted. + * Before touching the volume data during the update operation, UBI first sets + * the update marker flag for this volume. If the volume update operation was + * further interrupted, the update marker indicates this. If the update marker + * is set, the contents of the volume is certainly damaged and a new volume + * update operation has to be started. + * + * To put it differently, @corrupted and @upd_marker fields have different + * semantics: + * o the @corrupted flag means that this static volume is corrupted for some + * reasons, but not because an interrupted volume update + * o the @upd_marker field means that the volume is damaged because of an + * interrupted update operation. + * + * I.e., the @corrupted flag is never set if the @upd_marker flag is set. + * + * The @used_bytes and @used_ebs fields are only really needed for static + * volumes and contain the number of bytes stored in this static volume and how + * many eraseblock this data occupies. In case of dynamic volumes, the + * @used_bytes field is equivalent to @size*@usable_leb_size, and the @used_ebs + * field is equivalent to @size. + * + * In general, logical eraseblock size is a property of the UBI device, not + * of the UBI volume. Indeed, the logical eraseblock size depends on the + * physical eraseblock size and on how much bytes UBI headers consume. But + * because of the volume alignment (@alignment), the usable size of logical + * eraseblocks if a volume may be less. The following equation is true: + * @usable_leb_size = LEB size - (LEB size mod @alignment), + * where LEB size is the logical eraseblock size defined by the UBI device. + * + * The alignment is multiple to the minimal flash input/output unit size or %1 + * if all the available space is used. + * + * To put this differently, alignment may be considered is a way to change + * volume logical eraseblock sizes. + */ +struct ubi_volume_info { + int ubi_num; + int vol_id; + int size; + long long used_bytes; + int used_ebs; + int vol_type; + int corrupted; + int upd_marker; + int alignment; + int usable_leb_size; + int name_len; + const char *name; + dev_t cdev; +}; + +/** + * struct ubi_sgl - UBI scatter gather list data structure. + * @list_pos: current position in @sg[] + * @page_pos: current position in @sg[@list_pos] + * @sg: the scatter gather list itself + * + * ubi_sgl is a wrapper around a scatter list which keeps track of the + * current position in the list and the current list item such that + * it can be used across multiple ubi_leb_read_sg() calls. + */ +struct ubi_sgl { + int list_pos; + int page_pos; + struct scatterlist sg[UBI_MAX_SG_COUNT]; +}; + +/** + * ubi_sgl_init - initialize an UBI scatter gather list data structure. + * @usgl: the UBI scatter gather struct itself + * + * Please note that you still have to use sg_init_table() or any adequate + * function to initialize the unterlaying struct scatterlist. + */ +static inline void ubi_sgl_init(struct ubi_sgl *usgl) +{ + usgl->list_pos = 0; + usgl->page_pos = 0; +} + +/** + * struct ubi_device_info - UBI device description data structure. + * @ubi_num: ubi device number + * @leb_size: logical eraseblock size on this UBI device + * @leb_start: starting offset of logical eraseblocks within physical + * eraseblocks + * @min_io_size: minimal I/O unit size + * @max_write_size: maximum amount of bytes the underlying flash can write at a + * time (MTD write buffer size) + * @ro_mode: if this device is in read-only mode + * @cdev: UBI character device major and minor numbers + * + * Note, @leb_size is the logical eraseblock size offered by the UBI device. + * Volumes of this UBI device may have smaller logical eraseblock size if their + * alignment is not equivalent to %1. + * + * The @max_write_size field describes flash write maximum write unit. For + * example, NOR flash allows for changing individual bytes, so @min_io_size is + * %1. However, it does not mean than NOR flash has to write data byte-by-byte. + * Instead, CFI NOR flashes have a write-buffer of, e.g., 64 bytes, and when + * writing large chunks of data, they write 64-bytes at a time. Obviously, this + * improves write throughput. + * + * Also, the MTD device may have N interleaved (striped) flash chips + * underneath, in which case @min_io_size can be physical min. I/O size of + * single flash chip, while @max_write_size can be N * @min_io_size. + * + * The @max_write_size field is always greater or equivalent to @min_io_size. + * E.g., some NOR flashes may have (@min_io_size = 1, @max_write_size = 64). In + * contrast, NAND flashes usually have @min_io_size = @max_write_size = NAND + * page size. + */ +struct ubi_device_info { + int ubi_num; + int leb_size; + int leb_start; + int min_io_size; + int max_write_size; + int ro_mode; + dev_t cdev; +}; + +/* + * Volume notification types. + * @UBI_VOLUME_ADDED: a volume has been added (an UBI device was attached or a + * volume was created) + * @UBI_VOLUME_REMOVED: a volume has been removed (an UBI device was detached + * or a volume was removed) + * @UBI_VOLUME_RESIZED: a volume has been re-sized + * @UBI_VOLUME_RENAMED: a volume has been re-named + * @UBI_VOLUME_UPDATED: data has been written to a volume + * + * These constants define which type of event has happened when a volume + * notification function is invoked. + */ +enum { + UBI_VOLUME_ADDED, + UBI_VOLUME_REMOVED, + UBI_VOLUME_RESIZED, + UBI_VOLUME_RENAMED, + UBI_VOLUME_UPDATED, +}; + +/* + * struct ubi_notification - UBI notification description structure. + * @di: UBI device description object + * @vi: UBI volume description object + * + * UBI notifiers are called with a pointer to an object of this type. The + * object describes the notification. Namely, it provides a description of the + * UBI device and UBI volume the notification informs about. + */ +struct ubi_notification { + struct ubi_device_info di; + struct ubi_volume_info vi; +}; + +/* UBI descriptor given to users when they open UBI volumes */ +struct ubi_volume_desc; + +int ubi_get_device_info(int ubi_num, struct ubi_device_info *di); +void ubi_get_volume_info(struct ubi_volume_desc *desc, + struct ubi_volume_info *vi); +struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode); +struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name, + int mode); +struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode); + +int ubi_register_volume_notifier(struct notifier_block *nb, + int ignore_existing); +int ubi_unregister_volume_notifier(struct notifier_block *nb); + +void ubi_close_volume(struct ubi_volume_desc *desc); +int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset, + int len, int check); +int ubi_leb_read_sg(struct ubi_volume_desc *desc, int lnum, struct ubi_sgl *sgl, + int offset, int len, int check); +int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf, + int offset, int len); +int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf, + int len); +int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum); +int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum); +int ubi_leb_map(struct ubi_volume_desc *desc, int lnum); +int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum); +int ubi_sync(int ubi_num); +int ubi_flush(int ubi_num, int vol_id, int lnum); + +/* + * This function is the same as the 'ubi_leb_read()' function, but it does not + * provide the checking capability. + */ +static inline int ubi_read(struct ubi_volume_desc *desc, int lnum, char *buf, + int offset, int len) +{ + return ubi_leb_read(desc, lnum, buf, offset, len, 0); +} + +/* + * This function is the same as the 'ubi_leb_read_sg()' function, but it does + * not provide the checking capability. + */ +static inline int ubi_read_sg(struct ubi_volume_desc *desc, int lnum, + struct ubi_sgl *sgl, int offset, int len) +{ + return ubi_leb_read_sg(desc, lnum, sgl, offset, len, 0); +} +#endif /* !__LINUX_UBI_H__ */ diff --git a/include/linux/mtd/xip.h b/include/linux/mtd/xip.h new file mode 100644 index 000000000..e373690cc --- /dev/null +++ b/include/linux/mtd/xip.h @@ -0,0 +1,101 @@ +/* + * MTD primitives for XIP support + * + * Author: Nicolas Pitre + * Created: Nov 2, 2004 + * Copyright: (C) 2004 MontaVista Software, Inc. + * + * This XIP support for MTD has been loosely inspired + * by an earlier patch authored by David Woodhouse. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_MTD_XIP_H__ +#define __LINUX_MTD_XIP_H__ + + +#ifdef CONFIG_MTD_XIP + +/* + * We really don't want gcc to guess anything. + * We absolutely _need_ proper inlining. + */ +#include + +/* + * Function that are modifying the flash state away from array mode must + * obviously not be running from flash. The __xipram is therefore marking + * those functions so they get relocated to ram. + */ +#ifdef CONFIG_XIP_KERNEL +#define __xipram noinline __attribute__ ((__section__ (".xiptext"))) +#endif + +/* + * Each architecture has to provide the following macros. They must access + * the hardware directly and not rely on any other (XIP) functions since they + * won't be available when used (flash not in array mode). + * + * xip_irqpending() + * + * return non zero when any hardware interrupt is pending. + * + * xip_currtime() + * + * return a platform specific time reference to be used with + * xip_elapsed_since(). + * + * xip_elapsed_since(x) + * + * return in usecs the elapsed timebetween now and the reference x as + * returned by xip_currtime(). + * + * note 1: conversion to usec can be approximated, as long as the + * returned value is <= the real elapsed time. + * note 2: this should be able to cope with a few seconds without + * overflowing. + * + * xip_iprefetch() + * + * Macro to fill instruction prefetch + * e.g. a series of nops: asm volatile (".rep 8; nop; .endr"); + */ + +#include + +#ifndef xip_irqpending + +#warning "missing IRQ and timer primitives for XIP MTD support" +#warning "some of the XIP MTD support code will be disabled" +#warning "your system will therefore be unresponsive when writing or erasing flash" + +#define xip_irqpending() (0) +#define xip_currtime() (0) +#define xip_elapsed_since(x) (0) + +#endif + +#ifndef xip_iprefetch +#define xip_iprefetch() do { } while (0) +#endif + +/* + * xip_cpu_idle() is used when waiting for a delay equal or larger than + * the system timer tick period. This should put the CPU into idle mode + * to save power and to be woken up only when some interrupts are pending. + * This should not rely upon standard kernel code. + */ +#ifndef xip_cpu_idle +#define xip_cpu_idle() do { } while (0) +#endif + +#endif /* CONFIG_MTD_XIP */ + +#ifndef __xipram +#define __xipram +#endif + +#endif /* __LINUX_MTD_XIP_H__ */ diff --git a/include/linux/mutex.h b/include/linux/mutex.h new file mode 100644 index 000000000..8f7cdf83f --- /dev/null +++ b/include/linux/mutex.h @@ -0,0 +1,232 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Mutexes: blocking mutual exclusion locks + * + * started by Ingo Molnar: + * + * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar + * + * This file contains the main data structure and API definitions. + */ +#ifndef __LINUX_MUTEX_H +#define __LINUX_MUTEX_H + +#include +#include +#include +#include +#include +#include +#include +#include + +struct ww_acquire_ctx; + +/* + * Simple, straightforward mutexes with strict semantics: + * + * - only one task can hold the mutex at a time + * - only the owner can unlock the mutex + * - multiple unlocks are not permitted + * - recursive locking is not permitted + * - a mutex object must be initialized via the API + * - a mutex object must not be initialized via memset or copying + * - task may not exit with mutex held + * - memory areas where held locks reside must not be freed + * - held mutexes must not be reinitialized + * - mutexes may not be used in hardware or software interrupt + * contexts such as tasklets and timers + * + * These semantics are fully enforced when DEBUG_MUTEXES is + * enabled. Furthermore, besides enforcing the above rules, the mutex + * debugging code also implements a number of additional features + * that make lock debugging easier and faster: + * + * - uses symbolic names of mutexes, whenever they are printed in debug output + * - point-of-acquire tracking, symbolic lookup of function names + * - list of all locks held in the system, printout of them + * - owner tracking + * - detects self-recursing locks and prints out all relevant info + * - detects multi-task circular deadlocks and prints out all affected + * locks and tasks (and only those tasks) + */ +struct mutex { + atomic_long_t owner; + spinlock_t wait_lock; +#ifdef CONFIG_MUTEX_SPIN_ON_OWNER + struct optimistic_spin_queue osq; /* Spinner MCS lock */ +#endif + struct list_head wait_list; +#ifdef CONFIG_DEBUG_MUTEXES + void *magic; +#endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +/* + * Internal helper function; C doesn't allow us to hide it :/ + * + * DO NOT USE (outside of mutex code). + */ +static inline struct task_struct *__mutex_owner(struct mutex *lock) +{ + return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x07); +} + +/* + * This is the control structure for tasks blocked on mutex, + * which resides on the blocked task's kernel stack: + */ +struct mutex_waiter { + struct list_head list; + struct task_struct *task; + struct ww_acquire_ctx *ww_ctx; +#ifdef CONFIG_DEBUG_MUTEXES + void *magic; +#endif +}; + +#ifdef CONFIG_DEBUG_MUTEXES + +#define __DEBUG_MUTEX_INITIALIZER(lockname) \ + , .magic = &lockname + +extern void mutex_destroy(struct mutex *lock); + +#else + +# define __DEBUG_MUTEX_INITIALIZER(lockname) + +static inline void mutex_destroy(struct mutex *lock) {} + +#endif + +/** + * mutex_init - initialize the mutex + * @mutex: the mutex to be initialized + * + * Initialize the mutex to unlocked state. + * + * It is not allowed to initialize an already locked mutex. + */ +#define mutex_init(mutex) \ +do { \ + static struct lock_class_key __key; \ + \ + __mutex_init((mutex), #mutex, &__key); \ +} while (0) + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ + , .dep_map = { .name = #lockname } +#else +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) +#endif + +#define __MUTEX_INITIALIZER(lockname) \ + { .owner = ATOMIC_LONG_INIT(0) \ + , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ + , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \ + __DEBUG_MUTEX_INITIALIZER(lockname) \ + __DEP_MAP_MUTEX_INITIALIZER(lockname) } + +#define DEFINE_MUTEX(mutexname) \ + struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) + +extern void __mutex_init(struct mutex *lock, const char *name, + struct lock_class_key *key); + +/** + * mutex_is_locked - is the mutex locked + * @lock: the mutex to be queried + * + * Returns true if the mutex is locked, false if unlocked. + */ +static inline bool mutex_is_locked(struct mutex *lock) +{ + return __mutex_owner(lock) != NULL; +} + +/* + * See kernel/locking/mutex.c for detailed documentation of these APIs. + * Also see Documentation/locking/mutex-design.txt. + */ +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); +extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); + +extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, + unsigned int subclass); +extern int __must_check mutex_lock_killable_nested(struct mutex *lock, + unsigned int subclass); +extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass); + +#define mutex_lock(lock) mutex_lock_nested(lock, 0) +#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0) +#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0) +#define mutex_lock_io(lock) mutex_lock_io_nested(lock, 0) + +#define mutex_lock_nest_lock(lock, nest_lock) \ +do { \ + typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ + _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ +} while (0) + +#else +extern void mutex_lock(struct mutex *lock); +extern int __must_check mutex_lock_interruptible(struct mutex *lock); +extern int __must_check mutex_lock_killable(struct mutex *lock); +extern void mutex_lock_io(struct mutex *lock); + +# define mutex_lock_nested(lock, subclass) mutex_lock(lock) +# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) +# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock) +# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) +# define mutex_lock_io_nested(lock, subclass) mutex_lock_io(lock) +#endif + +/* + * NOTE: mutex_trylock() follows the spin_trylock() convention, + * not the down_trylock() convention! + * + * Returns 1 if the mutex has been acquired successfully, and 0 on contention. + */ +extern int mutex_trylock(struct mutex *lock); +extern void mutex_unlock(struct mutex *lock); + +extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); + +/* + * These values are chosen such that FAIL and SUCCESS match the + * values of the regular mutex_trylock(). + */ +enum mutex_trylock_recursive_enum { + MUTEX_TRYLOCK_FAILED = 0, + MUTEX_TRYLOCK_SUCCESS = 1, + MUTEX_TRYLOCK_RECURSIVE, +}; + +/** + * mutex_trylock_recursive - trylock variant that allows recursive locking + * @lock: mutex to be locked + * + * This function should not be used, _ever_. It is purely for hysterical GEM + * raisins, and once those are gone this will be removed. + * + * Returns: + * - MUTEX_TRYLOCK_FAILED - trylock failed, + * - MUTEX_TRYLOCK_SUCCESS - lock acquired, + * - MUTEX_TRYLOCK_RECURSIVE - we already owned the lock. + */ +static inline /* __deprecated */ __must_check enum mutex_trylock_recursive_enum +mutex_trylock_recursive(struct mutex *lock) +{ + if (unlikely(__mutex_owner(lock) == current)) + return MUTEX_TRYLOCK_RECURSIVE; + + return mutex_trylock(lock); +} + +#endif /* __LINUX_MUTEX_H */ diff --git a/include/linux/mux/consumer.h b/include/linux/mux/consumer.h new file mode 100644 index 000000000..5fc6bb2fe --- /dev/null +++ b/include/linux/mux/consumer.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * mux/consumer.h - definitions for the multiplexer consumer interface + * + * Copyright (C) 2017 Axentia Technologies AB + * + * Author: Peter Rosin + */ + +#ifndef _LINUX_MUX_CONSUMER_H +#define _LINUX_MUX_CONSUMER_H + +#include + +struct device; +struct mux_control; + +unsigned int mux_control_states(struct mux_control *mux); +int __must_check mux_control_select(struct mux_control *mux, + unsigned int state); +int __must_check mux_control_try_select(struct mux_control *mux, + unsigned int state); +int mux_control_deselect(struct mux_control *mux); + +struct mux_control *mux_control_get(struct device *dev, const char *mux_name); +void mux_control_put(struct mux_control *mux); + +struct mux_control *devm_mux_control_get(struct device *dev, + const char *mux_name); + +#endif /* _LINUX_MUX_CONSUMER_H */ diff --git a/include/linux/mux/driver.h b/include/linux/mux/driver.h new file mode 100644 index 000000000..627a2c6bc --- /dev/null +++ b/include/linux/mux/driver.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * mux/driver.h - definitions for the multiplexer driver interface + * + * Copyright (C) 2017 Axentia Technologies AB + * + * Author: Peter Rosin + */ + +#ifndef _LINUX_MUX_DRIVER_H +#define _LINUX_MUX_DRIVER_H + +#include +#include +#include + +struct mux_chip; +struct mux_control; + +/** + * struct mux_control_ops - Mux controller operations for a mux chip. + * @set: Set the state of the given mux controller. + */ +struct mux_control_ops { + int (*set)(struct mux_control *mux, int state); +}; + +/** + * struct mux_control - Represents a mux controller. + * @lock: Protects the mux controller state. + * @chip: The mux chip that is handling this mux controller. + * @cached_state: The current mux controller state, or -1 if none. + * @states: The number of mux controller states. + * @idle_state: The mux controller state to use when inactive, or one + * of MUX_IDLE_AS_IS and MUX_IDLE_DISCONNECT. + * + * Mux drivers may only change @states and @idle_state, and may only do so + * between allocation and registration of the mux controller. Specifically, + * @cached_state is internal to the mux core and should never be written by + * mux drivers. + */ +struct mux_control { + struct semaphore lock; /* protects the state of the mux */ + + struct mux_chip *chip; + int cached_state; + + unsigned int states; + int idle_state; +}; + +/** + * struct mux_chip - Represents a chip holding mux controllers. + * @controllers: Number of mux controllers handled by the chip. + * @mux: Array of mux controllers that are handled. + * @dev: Device structure. + * @id: Used to identify the device internally. + * @ops: Mux controller operations. + */ +struct mux_chip { + unsigned int controllers; + struct mux_control *mux; + struct device dev; + int id; + + const struct mux_control_ops *ops; +}; + +#define to_mux_chip(x) container_of((x), struct mux_chip, dev) + +/** + * mux_chip_priv() - Get the extra memory reserved by mux_chip_alloc(). + * @mux_chip: The mux-chip to get the private memory from. + * + * Return: Pointer to the private memory reserved by the allocator. + */ +static inline void *mux_chip_priv(struct mux_chip *mux_chip) +{ + return &mux_chip->mux[mux_chip->controllers]; +} + +struct mux_chip *mux_chip_alloc(struct device *dev, + unsigned int controllers, size_t sizeof_priv); +int mux_chip_register(struct mux_chip *mux_chip); +void mux_chip_unregister(struct mux_chip *mux_chip); +void mux_chip_free(struct mux_chip *mux_chip); + +struct mux_chip *devm_mux_chip_alloc(struct device *dev, + unsigned int controllers, + size_t sizeof_priv); +int devm_mux_chip_register(struct device *dev, struct mux_chip *mux_chip); + +/** + * mux_control_get_index() - Get the index of the given mux controller + * @mux: The mux-control to get the index for. + * + * Return: The index of the mux controller within the mux chip the mux + * controller is a part of. + */ +static inline unsigned int mux_control_get_index(struct mux_control *mux) +{ + return mux - mux->chip->mux; +} + +#endif /* _LINUX_MUX_DRIVER_H */ diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h new file mode 100644 index 000000000..69327b7b4 --- /dev/null +++ b/include/linux/mv643xx.h @@ -0,0 +1,979 @@ +/* + * mv643xx.h - MV-643XX Internal registers definition file. + * + * Copyright 2002 Momentum Computer, Inc. + * Author: Matthew Dharm + * Copyright 2002 GALILEO TECHNOLOGY, LTD. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef __ASM_MV643XX_H +#define __ASM_MV643XX_H + +#include +#include +#include + +/****************************************/ +/* Processor Address Space */ +/****************************************/ + +/* DDR SDRAM BAR and size registers */ + +#define MV64340_CS_0_BASE_ADDR 0x008 +#define MV64340_CS_0_SIZE 0x010 +#define MV64340_CS_1_BASE_ADDR 0x208 +#define MV64340_CS_1_SIZE 0x210 +#define MV64340_CS_2_BASE_ADDR 0x018 +#define MV64340_CS_2_SIZE 0x020 +#define MV64340_CS_3_BASE_ADDR 0x218 +#define MV64340_CS_3_SIZE 0x220 + +/* Devices BAR and size registers */ + +#define MV64340_DEV_CS0_BASE_ADDR 0x028 +#define MV64340_DEV_CS0_SIZE 0x030 +#define MV64340_DEV_CS1_BASE_ADDR 0x228 +#define MV64340_DEV_CS1_SIZE 0x230 +#define MV64340_DEV_CS2_BASE_ADDR 0x248 +#define MV64340_DEV_CS2_SIZE 0x250 +#define MV64340_DEV_CS3_BASE_ADDR 0x038 +#define MV64340_DEV_CS3_SIZE 0x040 +#define MV64340_BOOTCS_BASE_ADDR 0x238 +#define MV64340_BOOTCS_SIZE 0x240 + +/* PCI 0 BAR and size registers */ + +#define MV64340_PCI_0_IO_BASE_ADDR 0x048 +#define MV64340_PCI_0_IO_SIZE 0x050 +#define MV64340_PCI_0_MEMORY0_BASE_ADDR 0x058 +#define MV64340_PCI_0_MEMORY0_SIZE 0x060 +#define MV64340_PCI_0_MEMORY1_BASE_ADDR 0x080 +#define MV64340_PCI_0_MEMORY1_SIZE 0x088 +#define MV64340_PCI_0_MEMORY2_BASE_ADDR 0x258 +#define MV64340_PCI_0_MEMORY2_SIZE 0x260 +#define MV64340_PCI_0_MEMORY3_BASE_ADDR 0x280 +#define MV64340_PCI_0_MEMORY3_SIZE 0x288 + +/* PCI 1 BAR and size registers */ +#define MV64340_PCI_1_IO_BASE_ADDR 0x090 +#define MV64340_PCI_1_IO_SIZE 0x098 +#define MV64340_PCI_1_MEMORY0_BASE_ADDR 0x0a0 +#define MV64340_PCI_1_MEMORY0_SIZE 0x0a8 +#define MV64340_PCI_1_MEMORY1_BASE_ADDR 0x0b0 +#define MV64340_PCI_1_MEMORY1_SIZE 0x0b8 +#define MV64340_PCI_1_MEMORY2_BASE_ADDR 0x2a0 +#define MV64340_PCI_1_MEMORY2_SIZE 0x2a8 +#define MV64340_PCI_1_MEMORY3_BASE_ADDR 0x2b0 +#define MV64340_PCI_1_MEMORY3_SIZE 0x2b8 + +/* SRAM base address */ +#define MV64340_INTEGRATED_SRAM_BASE_ADDR 0x268 + +/* internal registers space base address */ +#define MV64340_INTERNAL_SPACE_BASE_ADDR 0x068 + +/* Enables the CS , DEV_CS , PCI 0 and PCI 1 + windows above */ +#define MV64340_BASE_ADDR_ENABLE 0x278 + +/****************************************/ +/* PCI remap registers */ +/****************************************/ + /* PCI 0 */ +#define MV64340_PCI_0_IO_ADDR_REMAP 0x0f0 +#define MV64340_PCI_0_MEMORY0_LOW_ADDR_REMAP 0x0f8 +#define MV64340_PCI_0_MEMORY0_HIGH_ADDR_REMAP 0x320 +#define MV64340_PCI_0_MEMORY1_LOW_ADDR_REMAP 0x100 +#define MV64340_PCI_0_MEMORY1_HIGH_ADDR_REMAP 0x328 +#define MV64340_PCI_0_MEMORY2_LOW_ADDR_REMAP 0x2f8 +#define MV64340_PCI_0_MEMORY2_HIGH_ADDR_REMAP 0x330 +#define MV64340_PCI_0_MEMORY3_LOW_ADDR_REMAP 0x300 +#define MV64340_PCI_0_MEMORY3_HIGH_ADDR_REMAP 0x338 + /* PCI 1 */ +#define MV64340_PCI_1_IO_ADDR_REMAP 0x108 +#define MV64340_PCI_1_MEMORY0_LOW_ADDR_REMAP 0x110 +#define MV64340_PCI_1_MEMORY0_HIGH_ADDR_REMAP 0x340 +#define MV64340_PCI_1_MEMORY1_LOW_ADDR_REMAP 0x118 +#define MV64340_PCI_1_MEMORY1_HIGH_ADDR_REMAP 0x348 +#define MV64340_PCI_1_MEMORY2_LOW_ADDR_REMAP 0x310 +#define MV64340_PCI_1_MEMORY2_HIGH_ADDR_REMAP 0x350 +#define MV64340_PCI_1_MEMORY3_LOW_ADDR_REMAP 0x318 +#define MV64340_PCI_1_MEMORY3_HIGH_ADDR_REMAP 0x358 + +#define MV64340_CPU_PCI_0_HEADERS_RETARGET_CONTROL 0x3b0 +#define MV64340_CPU_PCI_0_HEADERS_RETARGET_BASE 0x3b8 +#define MV64340_CPU_PCI_1_HEADERS_RETARGET_CONTROL 0x3c0 +#define MV64340_CPU_PCI_1_HEADERS_RETARGET_BASE 0x3c8 +#define MV64340_CPU_GE_HEADERS_RETARGET_CONTROL 0x3d0 +#define MV64340_CPU_GE_HEADERS_RETARGET_BASE 0x3d8 +#define MV64340_CPU_IDMA_HEADERS_RETARGET_CONTROL 0x3e0 +#define MV64340_CPU_IDMA_HEADERS_RETARGET_BASE 0x3e8 + +/****************************************/ +/* CPU Control Registers */ +/****************************************/ + +#define MV64340_CPU_CONFIG 0x000 +#define MV64340_CPU_MODE 0x120 +#define MV64340_CPU_MASTER_CONTROL 0x160 +#define MV64340_CPU_CROSS_BAR_CONTROL_LOW 0x150 +#define MV64340_CPU_CROSS_BAR_CONTROL_HIGH 0x158 +#define MV64340_CPU_CROSS_BAR_TIMEOUT 0x168 + +/****************************************/ +/* SMP RegisterS */ +/****************************************/ + +#define MV64340_SMP_WHO_AM_I 0x200 +#define MV64340_SMP_CPU0_DOORBELL 0x214 +#define MV64340_SMP_CPU0_DOORBELL_CLEAR 0x21C +#define MV64340_SMP_CPU1_DOORBELL 0x224 +#define MV64340_SMP_CPU1_DOORBELL_CLEAR 0x22C +#define MV64340_SMP_CPU0_DOORBELL_MASK 0x234 +#define MV64340_SMP_CPU1_DOORBELL_MASK 0x23C +#define MV64340_SMP_SEMAPHOR0 0x244 +#define MV64340_SMP_SEMAPHOR1 0x24c +#define MV64340_SMP_SEMAPHOR2 0x254 +#define MV64340_SMP_SEMAPHOR3 0x25c +#define MV64340_SMP_SEMAPHOR4 0x264 +#define MV64340_SMP_SEMAPHOR5 0x26c +#define MV64340_SMP_SEMAPHOR6 0x274 +#define MV64340_SMP_SEMAPHOR7 0x27c + +/****************************************/ +/* CPU Sync Barrier Register */ +/****************************************/ + +#define MV64340_CPU_0_SYNC_BARRIER_TRIGGER 0x0c0 +#define MV64340_CPU_0_SYNC_BARRIER_VIRTUAL 0x0c8 +#define MV64340_CPU_1_SYNC_BARRIER_TRIGGER 0x0d0 +#define MV64340_CPU_1_SYNC_BARRIER_VIRTUAL 0x0d8 + +/****************************************/ +/* CPU Access Protect */ +/****************************************/ + +#define MV64340_CPU_PROTECT_WINDOW_0_BASE_ADDR 0x180 +#define MV64340_CPU_PROTECT_WINDOW_0_SIZE 0x188 +#define MV64340_CPU_PROTECT_WINDOW_1_BASE_ADDR 0x190 +#define MV64340_CPU_PROTECT_WINDOW_1_SIZE 0x198 +#define MV64340_CPU_PROTECT_WINDOW_2_BASE_ADDR 0x1a0 +#define MV64340_CPU_PROTECT_WINDOW_2_SIZE 0x1a8 +#define MV64340_CPU_PROTECT_WINDOW_3_BASE_ADDR 0x1b0 +#define MV64340_CPU_PROTECT_WINDOW_3_SIZE 0x1b8 + + +/****************************************/ +/* CPU Error Report */ +/****************************************/ + +#define MV64340_CPU_ERROR_ADDR_LOW 0x070 +#define MV64340_CPU_ERROR_ADDR_HIGH 0x078 +#define MV64340_CPU_ERROR_DATA_LOW 0x128 +#define MV64340_CPU_ERROR_DATA_HIGH 0x130 +#define MV64340_CPU_ERROR_PARITY 0x138 +#define MV64340_CPU_ERROR_CAUSE 0x140 +#define MV64340_CPU_ERROR_MASK 0x148 + +/****************************************/ +/* CPU Interface Debug Registers */ +/****************************************/ + +#define MV64340_PUNIT_SLAVE_DEBUG_LOW 0x360 +#define MV64340_PUNIT_SLAVE_DEBUG_HIGH 0x368 +#define MV64340_PUNIT_MASTER_DEBUG_LOW 0x370 +#define MV64340_PUNIT_MASTER_DEBUG_HIGH 0x378 +#define MV64340_PUNIT_MMASK 0x3e4 + +/****************************************/ +/* Integrated SRAM Registers */ +/****************************************/ + +#define MV64340_SRAM_CONFIG 0x380 +#define MV64340_SRAM_TEST_MODE 0X3F4 +#define MV64340_SRAM_ERROR_CAUSE 0x388 +#define MV64340_SRAM_ERROR_ADDR 0x390 +#define MV64340_SRAM_ERROR_ADDR_HIGH 0X3F8 +#define MV64340_SRAM_ERROR_DATA_LOW 0x398 +#define MV64340_SRAM_ERROR_DATA_HIGH 0x3a0 +#define MV64340_SRAM_ERROR_DATA_PARITY 0x3a8 + +/****************************************/ +/* SDRAM Configuration */ +/****************************************/ + +#define MV64340_SDRAM_CONFIG 0x1400 +#define MV64340_D_UNIT_CONTROL_LOW 0x1404 +#define MV64340_D_UNIT_CONTROL_HIGH 0x1424 +#define MV64340_SDRAM_TIMING_CONTROL_LOW 0x1408 +#define MV64340_SDRAM_TIMING_CONTROL_HIGH 0x140c +#define MV64340_SDRAM_ADDR_CONTROL 0x1410 +#define MV64340_SDRAM_OPEN_PAGES_CONTROL 0x1414 +#define MV64340_SDRAM_OPERATION 0x1418 +#define MV64340_SDRAM_MODE 0x141c +#define MV64340_EXTENDED_DRAM_MODE 0x1420 +#define MV64340_SDRAM_CROSS_BAR_CONTROL_LOW 0x1430 +#define MV64340_SDRAM_CROSS_BAR_CONTROL_HIGH 0x1434 +#define MV64340_SDRAM_CROSS_BAR_TIMEOUT 0x1438 +#define MV64340_SDRAM_ADDR_CTRL_PADS_CALIBRATION 0x14c0 +#define MV64340_SDRAM_DATA_PADS_CALIBRATION 0x14c4 + +/****************************************/ +/* SDRAM Error Report */ +/****************************************/ + +#define MV64340_SDRAM_ERROR_DATA_LOW 0x1444 +#define MV64340_SDRAM_ERROR_DATA_HIGH 0x1440 +#define MV64340_SDRAM_ERROR_ADDR 0x1450 +#define MV64340_SDRAM_RECEIVED_ECC 0x1448 +#define MV64340_SDRAM_CALCULATED_ECC 0x144c +#define MV64340_SDRAM_ECC_CONTROL 0x1454 +#define MV64340_SDRAM_ECC_ERROR_COUNTER 0x1458 + +/******************************************/ +/* Controlled Delay Line (CDL) Registers */ +/******************************************/ + +#define MV64340_DFCDL_CONFIG0 0x1480 +#define MV64340_DFCDL_CONFIG1 0x1484 +#define MV64340_DLL_WRITE 0x1488 +#define MV64340_DLL_READ 0x148c +#define MV64340_SRAM_ADDR 0x1490 +#define MV64340_SRAM_DATA0 0x1494 +#define MV64340_SRAM_DATA1 0x1498 +#define MV64340_SRAM_DATA2 0x149c +#define MV64340_DFCL_PROBE 0x14a0 + +/******************************************/ +/* Debug Registers */ +/******************************************/ + +#define MV64340_DUNIT_DEBUG_LOW 0x1460 +#define MV64340_DUNIT_DEBUG_HIGH 0x1464 +#define MV64340_DUNIT_MMASK 0X1b40 + +/****************************************/ +/* Device Parameters */ +/****************************************/ + +#define MV64340_DEVICE_BANK0_PARAMETERS 0x45c +#define MV64340_DEVICE_BANK1_PARAMETERS 0x460 +#define MV64340_DEVICE_BANK2_PARAMETERS 0x464 +#define MV64340_DEVICE_BANK3_PARAMETERS 0x468 +#define MV64340_DEVICE_BOOT_BANK_PARAMETERS 0x46c +#define MV64340_DEVICE_INTERFACE_CONTROL 0x4c0 +#define MV64340_DEVICE_INTERFACE_CROSS_BAR_CONTROL_LOW 0x4c8 +#define MV64340_DEVICE_INTERFACE_CROSS_BAR_CONTROL_HIGH 0x4cc +#define MV64340_DEVICE_INTERFACE_CROSS_BAR_TIMEOUT 0x4c4 + +/****************************************/ +/* Device interrupt registers */ +/****************************************/ + +#define MV64340_DEVICE_INTERRUPT_CAUSE 0x4d0 +#define MV64340_DEVICE_INTERRUPT_MASK 0x4d4 +#define MV64340_DEVICE_ERROR_ADDR 0x4d8 +#define MV64340_DEVICE_ERROR_DATA 0x4dc +#define MV64340_DEVICE_ERROR_PARITY 0x4e0 + +/****************************************/ +/* Device debug registers */ +/****************************************/ + +#define MV64340_DEVICE_DEBUG_LOW 0x4e4 +#define MV64340_DEVICE_DEBUG_HIGH 0x4e8 +#define MV64340_RUNIT_MMASK 0x4f0 + +/****************************************/ +/* PCI Slave Address Decoding registers */ +/****************************************/ + +#define MV64340_PCI_0_CS_0_BANK_SIZE 0xc08 +#define MV64340_PCI_1_CS_0_BANK_SIZE 0xc88 +#define MV64340_PCI_0_CS_1_BANK_SIZE 0xd08 +#define MV64340_PCI_1_CS_1_BANK_SIZE 0xd88 +#define MV64340_PCI_0_CS_2_BANK_SIZE 0xc0c +#define MV64340_PCI_1_CS_2_BANK_SIZE 0xc8c +#define MV64340_PCI_0_CS_3_BANK_SIZE 0xd0c +#define MV64340_PCI_1_CS_3_BANK_SIZE 0xd8c +#define MV64340_PCI_0_DEVCS_0_BANK_SIZE 0xc10 +#define MV64340_PCI_1_DEVCS_0_BANK_SIZE 0xc90 +#define MV64340_PCI_0_DEVCS_1_BANK_SIZE 0xd10 +#define MV64340_PCI_1_DEVCS_1_BANK_SIZE 0xd90 +#define MV64340_PCI_0_DEVCS_2_BANK_SIZE 0xd18 +#define MV64340_PCI_1_DEVCS_2_BANK_SIZE 0xd98 +#define MV64340_PCI_0_DEVCS_3_BANK_SIZE 0xc14 +#define MV64340_PCI_1_DEVCS_3_BANK_SIZE 0xc94 +#define MV64340_PCI_0_DEVCS_BOOT_BANK_SIZE 0xd14 +#define MV64340_PCI_1_DEVCS_BOOT_BANK_SIZE 0xd94 +#define MV64340_PCI_0_P2P_MEM0_BAR_SIZE 0xd1c +#define MV64340_PCI_1_P2P_MEM0_BAR_SIZE 0xd9c +#define MV64340_PCI_0_P2P_MEM1_BAR_SIZE 0xd20 +#define MV64340_PCI_1_P2P_MEM1_BAR_SIZE 0xda0 +#define MV64340_PCI_0_P2P_I_O_BAR_SIZE 0xd24 +#define MV64340_PCI_1_P2P_I_O_BAR_SIZE 0xda4 +#define MV64340_PCI_0_CPU_BAR_SIZE 0xd28 +#define MV64340_PCI_1_CPU_BAR_SIZE 0xda8 +#define MV64340_PCI_0_INTERNAL_SRAM_BAR_SIZE 0xe00 +#define MV64340_PCI_1_INTERNAL_SRAM_BAR_SIZE 0xe80 +#define MV64340_PCI_0_EXPANSION_ROM_BAR_SIZE 0xd2c +#define MV64340_PCI_1_EXPANSION_ROM_BAR_SIZE 0xd9c +#define MV64340_PCI_0_BASE_ADDR_REG_ENABLE 0xc3c +#define MV64340_PCI_1_BASE_ADDR_REG_ENABLE 0xcbc +#define MV64340_PCI_0_CS_0_BASE_ADDR_REMAP 0xc48 +#define MV64340_PCI_1_CS_0_BASE_ADDR_REMAP 0xcc8 +#define MV64340_PCI_0_CS_1_BASE_ADDR_REMAP 0xd48 +#define MV64340_PCI_1_CS_1_BASE_ADDR_REMAP 0xdc8 +#define MV64340_PCI_0_CS_2_BASE_ADDR_REMAP 0xc4c +#define MV64340_PCI_1_CS_2_BASE_ADDR_REMAP 0xccc +#define MV64340_PCI_0_CS_3_BASE_ADDR_REMAP 0xd4c +#define MV64340_PCI_1_CS_3_BASE_ADDR_REMAP 0xdcc +#define MV64340_PCI_0_CS_0_BASE_HIGH_ADDR_REMAP 0xF04 +#define MV64340_PCI_1_CS_0_BASE_HIGH_ADDR_REMAP 0xF84 +#define MV64340_PCI_0_CS_1_BASE_HIGH_ADDR_REMAP 0xF08 +#define MV64340_PCI_1_CS_1_BASE_HIGH_ADDR_REMAP 0xF88 +#define MV64340_PCI_0_CS_2_BASE_HIGH_ADDR_REMAP 0xF0C +#define MV64340_PCI_1_CS_2_BASE_HIGH_ADDR_REMAP 0xF8C +#define MV64340_PCI_0_CS_3_BASE_HIGH_ADDR_REMAP 0xF10 +#define MV64340_PCI_1_CS_3_BASE_HIGH_ADDR_REMAP 0xF90 +#define MV64340_PCI_0_DEVCS_0_BASE_ADDR_REMAP 0xc50 +#define MV64340_PCI_1_DEVCS_0_BASE_ADDR_REMAP 0xcd0 +#define MV64340_PCI_0_DEVCS_1_BASE_ADDR_REMAP 0xd50 +#define MV64340_PCI_1_DEVCS_1_BASE_ADDR_REMAP 0xdd0 +#define MV64340_PCI_0_DEVCS_2_BASE_ADDR_REMAP 0xd58 +#define MV64340_PCI_1_DEVCS_2_BASE_ADDR_REMAP 0xdd8 +#define MV64340_PCI_0_DEVCS_3_BASE_ADDR_REMAP 0xc54 +#define MV64340_PCI_1_DEVCS_3_BASE_ADDR_REMAP 0xcd4 +#define MV64340_PCI_0_DEVCS_BOOTCS_BASE_ADDR_REMAP 0xd54 +#define MV64340_PCI_1_DEVCS_BOOTCS_BASE_ADDR_REMAP 0xdd4 +#define MV64340_PCI_0_P2P_MEM0_BASE_ADDR_REMAP_LOW 0xd5c +#define MV64340_PCI_1_P2P_MEM0_BASE_ADDR_REMAP_LOW 0xddc +#define MV64340_PCI_0_P2P_MEM0_BASE_ADDR_REMAP_HIGH 0xd60 +#define MV64340_PCI_1_P2P_MEM0_BASE_ADDR_REMAP_HIGH 0xde0 +#define MV64340_PCI_0_P2P_MEM1_BASE_ADDR_REMAP_LOW 0xd64 +#define MV64340_PCI_1_P2P_MEM1_BASE_ADDR_REMAP_LOW 0xde4 +#define MV64340_PCI_0_P2P_MEM1_BASE_ADDR_REMAP_HIGH 0xd68 +#define MV64340_PCI_1_P2P_MEM1_BASE_ADDR_REMAP_HIGH 0xde8 +#define MV64340_PCI_0_P2P_I_O_BASE_ADDR_REMAP 0xd6c +#define MV64340_PCI_1_P2P_I_O_BASE_ADDR_REMAP 0xdec +#define MV64340_PCI_0_CPU_BASE_ADDR_REMAP_LOW 0xd70 +#define MV64340_PCI_1_CPU_BASE_ADDR_REMAP_LOW 0xdf0 +#define MV64340_PCI_0_CPU_BASE_ADDR_REMAP_HIGH 0xd74 +#define MV64340_PCI_1_CPU_BASE_ADDR_REMAP_HIGH 0xdf4 +#define MV64340_PCI_0_INTEGRATED_SRAM_BASE_ADDR_REMAP 0xf00 +#define MV64340_PCI_1_INTEGRATED_SRAM_BASE_ADDR_REMAP 0xf80 +#define MV64340_PCI_0_EXPANSION_ROM_BASE_ADDR_REMAP 0xf38 +#define MV64340_PCI_1_EXPANSION_ROM_BASE_ADDR_REMAP 0xfb8 +#define MV64340_PCI_0_ADDR_DECODE_CONTROL 0xd3c +#define MV64340_PCI_1_ADDR_DECODE_CONTROL 0xdbc +#define MV64340_PCI_0_HEADERS_RETARGET_CONTROL 0xF40 +#define MV64340_PCI_1_HEADERS_RETARGET_CONTROL 0xFc0 +#define MV64340_PCI_0_HEADERS_RETARGET_BASE 0xF44 +#define MV64340_PCI_1_HEADERS_RETARGET_BASE 0xFc4 +#define MV64340_PCI_0_HEADERS_RETARGET_HIGH 0xF48 +#define MV64340_PCI_1_HEADERS_RETARGET_HIGH 0xFc8 + +/***********************************/ +/* PCI Control Register Map */ +/***********************************/ + +#define MV64340_PCI_0_DLL_STATUS_AND_COMMAND 0x1d20 +#define MV64340_PCI_1_DLL_STATUS_AND_COMMAND 0x1da0 +#define MV64340_PCI_0_MPP_PADS_DRIVE_CONTROL 0x1d1C +#define MV64340_PCI_1_MPP_PADS_DRIVE_CONTROL 0x1d9C +#define MV64340_PCI_0_COMMAND 0xc00 +#define MV64340_PCI_1_COMMAND 0xc80 +#define MV64340_PCI_0_MODE 0xd00 +#define MV64340_PCI_1_MODE 0xd80 +#define MV64340_PCI_0_RETRY 0xc04 +#define MV64340_PCI_1_RETRY 0xc84 +#define MV64340_PCI_0_READ_BUFFER_DISCARD_TIMER 0xd04 +#define MV64340_PCI_1_READ_BUFFER_DISCARD_TIMER 0xd84 +#define MV64340_PCI_0_MSI_TRIGGER_TIMER 0xc38 +#define MV64340_PCI_1_MSI_TRIGGER_TIMER 0xcb8 +#define MV64340_PCI_0_ARBITER_CONTROL 0x1d00 +#define MV64340_PCI_1_ARBITER_CONTROL 0x1d80 +#define MV64340_PCI_0_CROSS_BAR_CONTROL_LOW 0x1d08 +#define MV64340_PCI_1_CROSS_BAR_CONTROL_LOW 0x1d88 +#define MV64340_PCI_0_CROSS_BAR_CONTROL_HIGH 0x1d0c +#define MV64340_PCI_1_CROSS_BAR_CONTROL_HIGH 0x1d8c +#define MV64340_PCI_0_CROSS_BAR_TIMEOUT 0x1d04 +#define MV64340_PCI_1_CROSS_BAR_TIMEOUT 0x1d84 +#define MV64340_PCI_0_SYNC_BARRIER_TRIGGER_REG 0x1D18 +#define MV64340_PCI_1_SYNC_BARRIER_TRIGGER_REG 0x1D98 +#define MV64340_PCI_0_SYNC_BARRIER_VIRTUAL_REG 0x1d10 +#define MV64340_PCI_1_SYNC_BARRIER_VIRTUAL_REG 0x1d90 +#define MV64340_PCI_0_P2P_CONFIG 0x1d14 +#define MV64340_PCI_1_P2P_CONFIG 0x1d94 + +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_0_LOW 0x1e00 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_0_HIGH 0x1e04 +#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_0 0x1e08 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_1_LOW 0x1e10 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_1_HIGH 0x1e14 +#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_1 0x1e18 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_2_LOW 0x1e20 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_2_HIGH 0x1e24 +#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_2 0x1e28 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_3_LOW 0x1e30 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_3_HIGH 0x1e34 +#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_3 0x1e38 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_4_LOW 0x1e40 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_4_HIGH 0x1e44 +#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_4 0x1e48 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_5_LOW 0x1e50 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_5_HIGH 0x1e54 +#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_5 0x1e58 + +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_0_LOW 0x1e80 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_0_HIGH 0x1e84 +#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_0 0x1e88 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_1_LOW 0x1e90 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_1_HIGH 0x1e94 +#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_1 0x1e98 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_2_LOW 0x1ea0 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_2_HIGH 0x1ea4 +#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_2 0x1ea8 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_3_LOW 0x1eb0 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_3_HIGH 0x1eb4 +#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_3 0x1eb8 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_4_LOW 0x1ec0 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_4_HIGH 0x1ec4 +#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_4 0x1ec8 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_5_LOW 0x1ed0 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_5_HIGH 0x1ed4 +#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_5 0x1ed8 + +/****************************************/ +/* PCI Configuration Access Registers */ +/****************************************/ + +#define MV64340_PCI_0_CONFIG_ADDR 0xcf8 +#define MV64340_PCI_0_CONFIG_DATA_VIRTUAL_REG 0xcfc +#define MV64340_PCI_1_CONFIG_ADDR 0xc78 +#define MV64340_PCI_1_CONFIG_DATA_VIRTUAL_REG 0xc7c +#define MV64340_PCI_0_INTERRUPT_ACKNOWLEDGE_VIRTUAL_REG 0xc34 +#define MV64340_PCI_1_INTERRUPT_ACKNOWLEDGE_VIRTUAL_REG 0xcb4 + +/****************************************/ +/* PCI Error Report Registers */ +/****************************************/ + +#define MV64340_PCI_0_SERR_MASK 0xc28 +#define MV64340_PCI_1_SERR_MASK 0xca8 +#define MV64340_PCI_0_ERROR_ADDR_LOW 0x1d40 +#define MV64340_PCI_1_ERROR_ADDR_LOW 0x1dc0 +#define MV64340_PCI_0_ERROR_ADDR_HIGH 0x1d44 +#define MV64340_PCI_1_ERROR_ADDR_HIGH 0x1dc4 +#define MV64340_PCI_0_ERROR_ATTRIBUTE 0x1d48 +#define MV64340_PCI_1_ERROR_ATTRIBUTE 0x1dc8 +#define MV64340_PCI_0_ERROR_COMMAND 0x1d50 +#define MV64340_PCI_1_ERROR_COMMAND 0x1dd0 +#define MV64340_PCI_0_ERROR_CAUSE 0x1d58 +#define MV64340_PCI_1_ERROR_CAUSE 0x1dd8 +#define MV64340_PCI_0_ERROR_MASK 0x1d5c +#define MV64340_PCI_1_ERROR_MASK 0x1ddc + +/****************************************/ +/* PCI Debug Registers */ +/****************************************/ + +#define MV64340_PCI_0_MMASK 0X1D24 +#define MV64340_PCI_1_MMASK 0X1DA4 + +/*********************************************/ +/* PCI Configuration, Function 0, Registers */ +/*********************************************/ + +#define MV64340_PCI_DEVICE_AND_VENDOR_ID 0x000 +#define MV64340_PCI_STATUS_AND_COMMAND 0x004 +#define MV64340_PCI_CLASS_CODE_AND_REVISION_ID 0x008 +#define MV64340_PCI_BIST_HEADER_TYPE_LATENCY_TIMER_CACHE_LINE 0x00C + +#define MV64340_PCI_SCS_0_BASE_ADDR_LOW 0x010 +#define MV64340_PCI_SCS_0_BASE_ADDR_HIGH 0x014 +#define MV64340_PCI_SCS_1_BASE_ADDR_LOW 0x018 +#define MV64340_PCI_SCS_1_BASE_ADDR_HIGH 0x01C +#define MV64340_PCI_INTERNAL_REG_MEM_MAPPED_BASE_ADDR_LOW 0x020 +#define MV64340_PCI_INTERNAL_REG_MEM_MAPPED_BASE_ADDR_HIGH 0x024 +#define MV64340_PCI_SUBSYSTEM_ID_AND_SUBSYSTEM_VENDOR_ID 0x02c +#define MV64340_PCI_EXPANSION_ROM_BASE_ADDR_REG 0x030 +#define MV64340_PCI_CAPABILTY_LIST_POINTER 0x034 +#define MV64340_PCI_INTERRUPT_PIN_AND_LINE 0x03C + /* capability list */ +#define MV64340_PCI_POWER_MANAGEMENT_CAPABILITY 0x040 +#define MV64340_PCI_POWER_MANAGEMENT_STATUS_AND_CONTROL 0x044 +#define MV64340_PCI_VPD_ADDR 0x048 +#define MV64340_PCI_VPD_DATA 0x04c +#define MV64340_PCI_MSI_MESSAGE_CONTROL 0x050 +#define MV64340_PCI_MSI_MESSAGE_ADDR 0x054 +#define MV64340_PCI_MSI_MESSAGE_UPPER_ADDR 0x058 +#define MV64340_PCI_MSI_MESSAGE_DATA 0x05c +#define MV64340_PCI_X_COMMAND 0x060 +#define MV64340_PCI_X_STATUS 0x064 +#define MV64340_PCI_COMPACT_PCI_HOT_SWAP 0x068 + +/***********************************************/ +/* PCI Configuration, Function 1, Registers */ +/***********************************************/ + +#define MV64340_PCI_SCS_2_BASE_ADDR_LOW 0x110 +#define MV64340_PCI_SCS_2_BASE_ADDR_HIGH 0x114 +#define MV64340_PCI_SCS_3_BASE_ADDR_LOW 0x118 +#define MV64340_PCI_SCS_3_BASE_ADDR_HIGH 0x11c +#define MV64340_PCI_INTERNAL_SRAM_BASE_ADDR_LOW 0x120 +#define MV64340_PCI_INTERNAL_SRAM_BASE_ADDR_HIGH 0x124 + +/***********************************************/ +/* PCI Configuration, Function 2, Registers */ +/***********************************************/ + +#define MV64340_PCI_DEVCS_0_BASE_ADDR_LOW 0x210 +#define MV64340_PCI_DEVCS_0_BASE_ADDR_HIGH 0x214 +#define MV64340_PCI_DEVCS_1_BASE_ADDR_LOW 0x218 +#define MV64340_PCI_DEVCS_1_BASE_ADDR_HIGH 0x21c +#define MV64340_PCI_DEVCS_2_BASE_ADDR_LOW 0x220 +#define MV64340_PCI_DEVCS_2_BASE_ADDR_HIGH 0x224 + +/***********************************************/ +/* PCI Configuration, Function 3, Registers */ +/***********************************************/ + +#define MV64340_PCI_DEVCS_3_BASE_ADDR_LOW 0x310 +#define MV64340_PCI_DEVCS_3_BASE_ADDR_HIGH 0x314 +#define MV64340_PCI_BOOT_CS_BASE_ADDR_LOW 0x318 +#define MV64340_PCI_BOOT_CS_BASE_ADDR_HIGH 0x31c +#define MV64340_PCI_CPU_BASE_ADDR_LOW 0x220 +#define MV64340_PCI_CPU_BASE_ADDR_HIGH 0x224 + +/***********************************************/ +/* PCI Configuration, Function 4, Registers */ +/***********************************************/ + +#define MV64340_PCI_P2P_MEM0_BASE_ADDR_LOW 0x410 +#define MV64340_PCI_P2P_MEM0_BASE_ADDR_HIGH 0x414 +#define MV64340_PCI_P2P_MEM1_BASE_ADDR_LOW 0x418 +#define MV64340_PCI_P2P_MEM1_BASE_ADDR_HIGH 0x41c +#define MV64340_PCI_P2P_I_O_BASE_ADDR 0x420 +#define MV64340_PCI_INTERNAL_REGS_I_O_MAPPED_BASE_ADDR 0x424 + +/****************************************/ +/* Messaging Unit Registers (I20) */ +/****************************************/ + +#define MV64340_I2O_INBOUND_MESSAGE_REG0_PCI_0_SIDE 0x010 +#define MV64340_I2O_INBOUND_MESSAGE_REG1_PCI_0_SIDE 0x014 +#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_PCI_0_SIDE 0x018 +#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_PCI_0_SIDE 0x01C +#define MV64340_I2O_INBOUND_DOORBELL_REG_PCI_0_SIDE 0x020 +#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_PCI_0_SIDE 0x024 +#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_PCI_0_SIDE 0x028 +#define MV64340_I2O_OUTBOUND_DOORBELL_REG_PCI_0_SIDE 0x02C +#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_PCI_0_SIDE 0x030 +#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_PCI_0_SIDE 0x034 +#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_0_SIDE 0x040 +#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_0_SIDE 0x044 +#define MV64340_I2O_QUEUE_CONTROL_REG_PCI_0_SIDE 0x050 +#define MV64340_I2O_QUEUE_BASE_ADDR_REG_PCI_0_SIDE 0x054 +#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_PCI_0_SIDE 0x060 +#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_PCI_0_SIDE 0x064 +#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_PCI_0_SIDE 0x068 +#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_PCI_0_SIDE 0x06C +#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_PCI_0_SIDE 0x070 +#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_PCI_0_SIDE 0x074 +#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_PCI_0_SIDE 0x0F8 +#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_PCI_0_SIDE 0x0FC + +#define MV64340_I2O_INBOUND_MESSAGE_REG0_PCI_1_SIDE 0x090 +#define MV64340_I2O_INBOUND_MESSAGE_REG1_PCI_1_SIDE 0x094 +#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_PCI_1_SIDE 0x098 +#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_PCI_1_SIDE 0x09C +#define MV64340_I2O_INBOUND_DOORBELL_REG_PCI_1_SIDE 0x0A0 +#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_PCI_1_SIDE 0x0A4 +#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_PCI_1_SIDE 0x0A8 +#define MV64340_I2O_OUTBOUND_DOORBELL_REG_PCI_1_SIDE 0x0AC +#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_PCI_1_SIDE 0x0B0 +#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_PCI_1_SIDE 0x0B4 +#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_1_SIDE 0x0C0 +#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_1_SIDE 0x0C4 +#define MV64340_I2O_QUEUE_CONTROL_REG_PCI_1_SIDE 0x0D0 +#define MV64340_I2O_QUEUE_BASE_ADDR_REG_PCI_1_SIDE 0x0D4 +#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_PCI_1_SIDE 0x0E0 +#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_PCI_1_SIDE 0x0E4 +#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_PCI_1_SIDE 0x0E8 +#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_PCI_1_SIDE 0x0EC +#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_PCI_1_SIDE 0x0F0 +#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_PCI_1_SIDE 0x0F4 +#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_PCI_1_SIDE 0x078 +#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_PCI_1_SIDE 0x07C + +#define MV64340_I2O_INBOUND_MESSAGE_REG0_CPU0_SIDE 0x1C10 +#define MV64340_I2O_INBOUND_MESSAGE_REG1_CPU0_SIDE 0x1C14 +#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_CPU0_SIDE 0x1C18 +#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_CPU0_SIDE 0x1C1C +#define MV64340_I2O_INBOUND_DOORBELL_REG_CPU0_SIDE 0x1C20 +#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_CPU0_SIDE 0x1C24 +#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_CPU0_SIDE 0x1C28 +#define MV64340_I2O_OUTBOUND_DOORBELL_REG_CPU0_SIDE 0x1C2C +#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_CPU0_SIDE 0x1C30 +#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_CPU0_SIDE 0x1C34 +#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_CPU0_SIDE 0x1C40 +#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_CPU0_SIDE 0x1C44 +#define MV64340_I2O_QUEUE_CONTROL_REG_CPU0_SIDE 0x1C50 +#define MV64340_I2O_QUEUE_BASE_ADDR_REG_CPU0_SIDE 0x1C54 +#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_CPU0_SIDE 0x1C60 +#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_CPU0_SIDE 0x1C64 +#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_CPU0_SIDE 0x1C68 +#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_CPU0_SIDE 0x1C6C +#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_CPU0_SIDE 0x1C70 +#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_CPU0_SIDE 0x1C74 +#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_CPU0_SIDE 0x1CF8 +#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_CPU0_SIDE 0x1CFC +#define MV64340_I2O_INBOUND_MESSAGE_REG0_CPU1_SIDE 0x1C90 +#define MV64340_I2O_INBOUND_MESSAGE_REG1_CPU1_SIDE 0x1C94 +#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_CPU1_SIDE 0x1C98 +#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_CPU1_SIDE 0x1C9C +#define MV64340_I2O_INBOUND_DOORBELL_REG_CPU1_SIDE 0x1CA0 +#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_CPU1_SIDE 0x1CA4 +#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_CPU1_SIDE 0x1CA8 +#define MV64340_I2O_OUTBOUND_DOORBELL_REG_CPU1_SIDE 0x1CAC +#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_CPU1_SIDE 0x1CB0 +#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_CPU1_SIDE 0x1CB4 +#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_CPU1_SIDE 0x1CC0 +#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_CPU1_SIDE 0x1CC4 +#define MV64340_I2O_QUEUE_CONTROL_REG_CPU1_SIDE 0x1CD0 +#define MV64340_I2O_QUEUE_BASE_ADDR_REG_CPU1_SIDE 0x1CD4 +#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_CPU1_SIDE 0x1CE0 +#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_CPU1_SIDE 0x1CE4 +#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_CPU1_SIDE 0x1CE8 +#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_CPU1_SIDE 0x1CEC +#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_CPU1_SIDE 0x1CF0 +#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_CPU1_SIDE 0x1CF4 +#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_CPU1_SIDE 0x1C78 +#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_CPU1_SIDE 0x1C7C + +/****************************************/ +/* Ethernet Unit Registers */ +/****************************************/ + +/*******************************************/ +/* CUNIT Registers */ +/*******************************************/ + + /* Address Decoding Register Map */ + +#define MV64340_CUNIT_BASE_ADDR_REG0 0xf200 +#define MV64340_CUNIT_BASE_ADDR_REG1 0xf208 +#define MV64340_CUNIT_BASE_ADDR_REG2 0xf210 +#define MV64340_CUNIT_BASE_ADDR_REG3 0xf218 +#define MV64340_CUNIT_SIZE0 0xf204 +#define MV64340_CUNIT_SIZE1 0xf20c +#define MV64340_CUNIT_SIZE2 0xf214 +#define MV64340_CUNIT_SIZE3 0xf21c +#define MV64340_CUNIT_HIGH_ADDR_REMAP_REG0 0xf240 +#define MV64340_CUNIT_HIGH_ADDR_REMAP_REG1 0xf244 +#define MV64340_CUNIT_BASE_ADDR_ENABLE_REG 0xf250 +#define MV64340_MPSC0_ACCESS_PROTECTION_REG 0xf254 +#define MV64340_MPSC1_ACCESS_PROTECTION_REG 0xf258 +#define MV64340_CUNIT_INTERNAL_SPACE_BASE_ADDR_REG 0xf25C + + /* Error Report Registers */ + +#define MV64340_CUNIT_INTERRUPT_CAUSE_REG 0xf310 +#define MV64340_CUNIT_INTERRUPT_MASK_REG 0xf314 +#define MV64340_CUNIT_ERROR_ADDR 0xf318 + + /* Cunit Control Registers */ + +#define MV64340_CUNIT_ARBITER_CONTROL_REG 0xf300 +#define MV64340_CUNIT_CONFIG_REG 0xb40c +#define MV64340_CUNIT_CRROSBAR_TIMEOUT_REG 0xf304 + + /* Cunit Debug Registers */ + +#define MV64340_CUNIT_DEBUG_LOW 0xf340 +#define MV64340_CUNIT_DEBUG_HIGH 0xf344 +#define MV64340_CUNIT_MMASK 0xf380 + + /* MPSCs Clocks Routing Registers */ + +#define MV64340_MPSC_ROUTING_REG 0xb400 +#define MV64340_MPSC_RX_CLOCK_ROUTING_REG 0xb404 +#define MV64340_MPSC_TX_CLOCK_ROUTING_REG 0xb408 + + /* MPSCs Interrupts Registers */ + +#define MV64340_MPSC_CAUSE_REG(port) (0xb804 + (port<<3)) +#define MV64340_MPSC_MASK_REG(port) (0xb884 + (port<<3)) + +#define MV64340_MPSC_MAIN_CONFIG_LOW(port) (0x8000 + (port<<12)) +#define MV64340_MPSC_MAIN_CONFIG_HIGH(port) (0x8004 + (port<<12)) +#define MV64340_MPSC_PROTOCOL_CONFIG(port) (0x8008 + (port<<12)) +#define MV64340_MPSC_CHANNEL_REG1(port) (0x800c + (port<<12)) +#define MV64340_MPSC_CHANNEL_REG2(port) (0x8010 + (port<<12)) +#define MV64340_MPSC_CHANNEL_REG3(port) (0x8014 + (port<<12)) +#define MV64340_MPSC_CHANNEL_REG4(port) (0x8018 + (port<<12)) +#define MV64340_MPSC_CHANNEL_REG5(port) (0x801c + (port<<12)) +#define MV64340_MPSC_CHANNEL_REG6(port) (0x8020 + (port<<12)) +#define MV64340_MPSC_CHANNEL_REG7(port) (0x8024 + (port<<12)) +#define MV64340_MPSC_CHANNEL_REG8(port) (0x8028 + (port<<12)) +#define MV64340_MPSC_CHANNEL_REG9(port) (0x802c + (port<<12)) +#define MV64340_MPSC_CHANNEL_REG10(port) (0x8030 + (port<<12)) + + /* MPSC0 Registers */ + + +/***************************************/ +/* SDMA Registers */ +/***************************************/ + +#define MV64340_SDMA_CONFIG_REG(channel) (0x4000 + (channel<<13)) +#define MV64340_SDMA_COMMAND_REG(channel) (0x4008 + (channel<<13)) +#define MV64340_SDMA_CURRENT_RX_DESCRIPTOR_POINTER(channel) (0x4810 + (channel<<13)) +#define MV64340_SDMA_CURRENT_TX_DESCRIPTOR_POINTER(channel) (0x4c10 + (channel<<13)) +#define MV64340_SDMA_FIRST_TX_DESCRIPTOR_POINTER(channel) (0x4c14 + (channel<<13)) + +#define MV64340_SDMA_CAUSE_REG 0xb800 +#define MV64340_SDMA_MASK_REG 0xb880 + +/* BRG Interrupts */ + +#define MV64340_BRG_CONFIG_REG(brg) (0xb200 + (brg<<3)) +#define MV64340_BRG_BAUDE_TUNING_REG(brg) (0xb208 + (brg<<3)) +#define MV64340_BRG_CAUSE_REG 0xb834 +#define MV64340_BRG_MASK_REG 0xb8b4 + +/****************************************/ +/* DMA Channel Control */ +/****************************************/ + +#define MV64340_DMA_CHANNEL0_CONTROL 0x840 +#define MV64340_DMA_CHANNEL0_CONTROL_HIGH 0x880 +#define MV64340_DMA_CHANNEL1_CONTROL 0x844 +#define MV64340_DMA_CHANNEL1_CONTROL_HIGH 0x884 +#define MV64340_DMA_CHANNEL2_CONTROL 0x848 +#define MV64340_DMA_CHANNEL2_CONTROL_HIGH 0x888 +#define MV64340_DMA_CHANNEL3_CONTROL 0x84C +#define MV64340_DMA_CHANNEL3_CONTROL_HIGH 0x88C + + +/****************************************/ +/* IDMA Registers */ +/****************************************/ + +#define MV64340_DMA_CHANNEL0_BYTE_COUNT 0x800 +#define MV64340_DMA_CHANNEL1_BYTE_COUNT 0x804 +#define MV64340_DMA_CHANNEL2_BYTE_COUNT 0x808 +#define MV64340_DMA_CHANNEL3_BYTE_COUNT 0x80C +#define MV64340_DMA_CHANNEL0_SOURCE_ADDR 0x810 +#define MV64340_DMA_CHANNEL1_SOURCE_ADDR 0x814 +#define MV64340_DMA_CHANNEL2_SOURCE_ADDR 0x818 +#define MV64340_DMA_CHANNEL3_SOURCE_ADDR 0x81c +#define MV64340_DMA_CHANNEL0_DESTINATION_ADDR 0x820 +#define MV64340_DMA_CHANNEL1_DESTINATION_ADDR 0x824 +#define MV64340_DMA_CHANNEL2_DESTINATION_ADDR 0x828 +#define MV64340_DMA_CHANNEL3_DESTINATION_ADDR 0x82C +#define MV64340_DMA_CHANNEL0_NEXT_DESCRIPTOR_POINTER 0x830 +#define MV64340_DMA_CHANNEL1_NEXT_DESCRIPTOR_POINTER 0x834 +#define MV64340_DMA_CHANNEL2_NEXT_DESCRIPTOR_POINTER 0x838 +#define MV64340_DMA_CHANNEL3_NEXT_DESCRIPTOR_POINTER 0x83C +#define MV64340_DMA_CHANNEL0_CURRENT_DESCRIPTOR_POINTER 0x870 +#define MV64340_DMA_CHANNEL1_CURRENT_DESCRIPTOR_POINTER 0x874 +#define MV64340_DMA_CHANNEL2_CURRENT_DESCRIPTOR_POINTER 0x878 +#define MV64340_DMA_CHANNEL3_CURRENT_DESCRIPTOR_POINTER 0x87C + + /* IDMA Address Decoding Base Address Registers */ + +#define MV64340_DMA_BASE_ADDR_REG0 0xa00 +#define MV64340_DMA_BASE_ADDR_REG1 0xa08 +#define MV64340_DMA_BASE_ADDR_REG2 0xa10 +#define MV64340_DMA_BASE_ADDR_REG3 0xa18 +#define MV64340_DMA_BASE_ADDR_REG4 0xa20 +#define MV64340_DMA_BASE_ADDR_REG5 0xa28 +#define MV64340_DMA_BASE_ADDR_REG6 0xa30 +#define MV64340_DMA_BASE_ADDR_REG7 0xa38 + + /* IDMA Address Decoding Size Address Register */ + +#define MV64340_DMA_SIZE_REG0 0xa04 +#define MV64340_DMA_SIZE_REG1 0xa0c +#define MV64340_DMA_SIZE_REG2 0xa14 +#define MV64340_DMA_SIZE_REG3 0xa1c +#define MV64340_DMA_SIZE_REG4 0xa24 +#define MV64340_DMA_SIZE_REG5 0xa2c +#define MV64340_DMA_SIZE_REG6 0xa34 +#define MV64340_DMA_SIZE_REG7 0xa3C + + /* IDMA Address Decoding High Address Remap and Access + Protection Registers */ + +#define MV64340_DMA_HIGH_ADDR_REMAP_REG0 0xa60 +#define MV64340_DMA_HIGH_ADDR_REMAP_REG1 0xa64 +#define MV64340_DMA_HIGH_ADDR_REMAP_REG2 0xa68 +#define MV64340_DMA_HIGH_ADDR_REMAP_REG3 0xa6C +#define MV64340_DMA_BASE_ADDR_ENABLE_REG 0xa80 +#define MV64340_DMA_CHANNEL0_ACCESS_PROTECTION_REG 0xa70 +#define MV64340_DMA_CHANNEL1_ACCESS_PROTECTION_REG 0xa74 +#define MV64340_DMA_CHANNEL2_ACCESS_PROTECTION_REG 0xa78 +#define MV64340_DMA_CHANNEL3_ACCESS_PROTECTION_REG 0xa7c +#define MV64340_DMA_ARBITER_CONTROL 0x860 +#define MV64340_DMA_CROSS_BAR_TIMEOUT 0x8d0 + + /* IDMA Headers Retarget Registers */ + +#define MV64340_DMA_HEADERS_RETARGET_CONTROL 0xa84 +#define MV64340_DMA_HEADERS_RETARGET_BASE 0xa88 + + /* IDMA Interrupt Register */ + +#define MV64340_DMA_INTERRUPT_CAUSE_REG 0x8c0 +#define MV64340_DMA_INTERRUPT_CAUSE_MASK 0x8c4 +#define MV64340_DMA_ERROR_ADDR 0x8c8 +#define MV64340_DMA_ERROR_SELECT 0x8cc + + /* IDMA Debug Register ( for internal use ) */ + +#define MV64340_DMA_DEBUG_LOW 0x8e0 +#define MV64340_DMA_DEBUG_HIGH 0x8e4 +#define MV64340_DMA_SPARE 0xA8C + +/****************************************/ +/* Timer_Counter */ +/****************************************/ + +#define MV64340_TIMER_COUNTER0 0x850 +#define MV64340_TIMER_COUNTER1 0x854 +#define MV64340_TIMER_COUNTER2 0x858 +#define MV64340_TIMER_COUNTER3 0x85C +#define MV64340_TIMER_COUNTER_0_3_CONTROL 0x864 +#define MV64340_TIMER_COUNTER_0_3_INTERRUPT_CAUSE 0x868 +#define MV64340_TIMER_COUNTER_0_3_INTERRUPT_MASK 0x86c + +/****************************************/ +/* Watchdog registers */ +/****************************************/ + +#define MV64340_WATCHDOG_CONFIG_REG 0xb410 +#define MV64340_WATCHDOG_VALUE_REG 0xb414 + +/****************************************/ +/* I2C Registers */ +/****************************************/ + +#define MV64XXX_I2C_OFFSET 0xc000 +#define MV64XXX_I2C_REG_BLOCK_SIZE 0x0020 + +/****************************************/ +/* GPP Interface Registers */ +/****************************************/ + +#define MV64340_GPP_IO_CONTROL 0xf100 +#define MV64340_GPP_LEVEL_CONTROL 0xf110 +#define MV64340_GPP_VALUE 0xf104 +#define MV64340_GPP_INTERRUPT_CAUSE 0xf108 +#define MV64340_GPP_INTERRUPT_MASK0 0xf10c +#define MV64340_GPP_INTERRUPT_MASK1 0xf114 +#define MV64340_GPP_VALUE_SET 0xf118 +#define MV64340_GPP_VALUE_CLEAR 0xf11c + +/****************************************/ +/* Interrupt Controller Registers */ +/****************************************/ + +/****************************************/ +/* Interrupts */ +/****************************************/ + +#define MV64340_MAIN_INTERRUPT_CAUSE_LOW 0x004 +#define MV64340_MAIN_INTERRUPT_CAUSE_HIGH 0x00c +#define MV64340_CPU_INTERRUPT0_MASK_LOW 0x014 +#define MV64340_CPU_INTERRUPT0_MASK_HIGH 0x01c +#define MV64340_CPU_INTERRUPT0_SELECT_CAUSE 0x024 +#define MV64340_CPU_INTERRUPT1_MASK_LOW 0x034 +#define MV64340_CPU_INTERRUPT1_MASK_HIGH 0x03c +#define MV64340_CPU_INTERRUPT1_SELECT_CAUSE 0x044 +#define MV64340_INTERRUPT0_MASK_0_LOW 0x054 +#define MV64340_INTERRUPT0_MASK_0_HIGH 0x05c +#define MV64340_INTERRUPT0_SELECT_CAUSE 0x064 +#define MV64340_INTERRUPT1_MASK_0_LOW 0x074 +#define MV64340_INTERRUPT1_MASK_0_HIGH 0x07c +#define MV64340_INTERRUPT1_SELECT_CAUSE 0x084 + +/****************************************/ +/* MPP Interface Registers */ +/****************************************/ + +#define MV64340_MPP_CONTROL0 0xf000 +#define MV64340_MPP_CONTROL1 0xf004 +#define MV64340_MPP_CONTROL2 0xf008 +#define MV64340_MPP_CONTROL3 0xf00c + +/****************************************/ +/* Serial Initialization registers */ +/****************************************/ + +#define MV64340_SERIAL_INIT_LAST_DATA 0xf324 +#define MV64340_SERIAL_INIT_CONTROL 0xf328 +#define MV64340_SERIAL_INIT_STATUS 0xf32c + +extern void mv64340_irq_init(unsigned int base); + +/* MPSC Platform Device, Driver Data (Shared register regions) */ +#define MPSC_SHARED_NAME "mpsc_shared" + +#define MPSC_ROUTING_BASE_ORDER 0 +#define MPSC_SDMA_INTR_BASE_ORDER 1 + +#define MPSC_ROUTING_REG_BLOCK_SIZE 0x000c +#define MPSC_SDMA_INTR_REG_BLOCK_SIZE 0x0084 + +struct mpsc_shared_pdata { + u32 mrr_val; + u32 rcrr_val; + u32 tcrr_val; + u32 intr_cause_val; + u32 intr_mask_val; +}; + +/* MPSC Platform Device, Driver Data */ +#define MPSC_CTLR_NAME "mpsc" + +#define MPSC_BASE_ORDER 0 +#define MPSC_SDMA_BASE_ORDER 1 +#define MPSC_BRG_BASE_ORDER 2 + +#define MPSC_REG_BLOCK_SIZE 0x0038 +#define MPSC_SDMA_REG_BLOCK_SIZE 0x0c18 +#define MPSC_BRG_REG_BLOCK_SIZE 0x0008 + +struct mpsc_pdata { + u8 mirror_regs; + u8 cache_mgmt; + u8 max_idle; + int default_baud; + int default_bits; + int default_parity; + int default_flow; + u32 chr_1_val; + u32 chr_2_val; + u32 chr_10_val; + u32 mpcr_val; + u32 bcr_val; + u8 brg_can_tune; + u8 brg_clk_src; + u32 brg_clk_freq; +}; + +/* Watchdog Platform Device, Driver Data */ +#define MV64x60_WDT_NAME "mv64x60_wdt" + +struct mv64x60_wdt_pdata { + int timeout; /* watchdog expiry in seconds, default 10 */ + int bus_clk; /* bus clock in MHz, default 133 */ +}; + +#endif /* __ASM_MV643XX_H */ diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h new file mode 100644 index 000000000..3682ae75c --- /dev/null +++ b/include/linux/mv643xx_eth.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * MV-643XX ethernet platform device data definition file. + */ + +#ifndef __LINUX_MV643XX_ETH_H +#define __LINUX_MV643XX_ETH_H + +#include +#include + +#define MV643XX_ETH_SHARED_NAME "mv643xx_eth" +#define MV643XX_ETH_NAME "mv643xx_eth_port" +#define MV643XX_ETH_SHARED_REGS 0x2000 +#define MV643XX_ETH_SHARED_REGS_SIZE 0x2000 +#define MV643XX_ETH_BAR_4 0x2220 +#define MV643XX_ETH_SIZE_REG_4 0x2224 +#define MV643XX_ETH_BASE_ADDR_ENABLE_REG 0x2290 + +#define MV643XX_TX_CSUM_DEFAULT_LIMIT 0 + +struct mv643xx_eth_shared_platform_data { + struct mbus_dram_target_info *dram; + /* + * Max packet size for Tx IP/Layer 4 checksum, when set to 0, default + * limit of 9KiB will be used. + */ + int tx_csum_limit; +}; + +#define MV643XX_ETH_PHY_ADDR_DEFAULT 0 +#define MV643XX_ETH_PHY_ADDR(x) (0x80 | (x)) +#define MV643XX_ETH_PHY_NONE 0xff + +struct device_node; +struct mv643xx_eth_platform_data { + /* + * Pointer back to our parent instance, and our port number. + */ + struct platform_device *shared; + int port_number; + + /* + * Whether a PHY is present, and if yes, at which address. + */ + int phy_addr; + struct device_node *phy_node; + + /* + * Use this MAC address if it is valid, overriding the + * address that is already in the hardware. + */ + u8 mac_addr[ETH_ALEN]; + + /* + * If speed is 0, autonegotiation is enabled. + * Valid values for speed: 0, SPEED_10, SPEED_100, SPEED_1000. + * Valid values for duplex: DUPLEX_HALF, DUPLEX_FULL. + */ + int speed; + int duplex; + + /* + * How many RX/TX queues to use. + */ + int rx_queue_count; + int tx_queue_count; + + /* + * Override default RX/TX queue sizes if nonzero. + */ + int rx_queue_size; + int tx_queue_size; + + /* + * Use on-chip SRAM for RX/TX descriptors if size is nonzero + * and sufficient to contain all descriptors for the requested + * ring sizes. + */ + unsigned long rx_sram_addr; + int rx_sram_size; + unsigned long tx_sram_addr; + int tx_sram_size; +}; + + +#endif diff --git a/include/linux/mv643xx_i2c.h b/include/linux/mv643xx_i2c.h new file mode 100644 index 000000000..5db5152e9 --- /dev/null +++ b/include/linux/mv643xx_i2c.h @@ -0,0 +1,22 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef _MV64XXX_I2C_H_ +#define _MV64XXX_I2C_H_ + +#include + +#define MV64XXX_I2C_CTLR_NAME "mv64xxx_i2c" + +/* i2c Platform Device, Driver Data */ +struct mv64xxx_i2c_pdata { + u32 freq_m; + u32 freq_n; + u32 timeout; /* In milliseconds */ +}; + +#endif /*_MV64XXX_I2C_H_*/ diff --git a/include/linux/mvebu-pmsu.h b/include/linux/mvebu-pmsu.h new file mode 100644 index 000000000..b918d07ef --- /dev/null +++ b/include/linux/mvebu-pmsu.h @@ -0,0 +1,20 @@ +/* + * Copyright (C) 2012 Marvell + * + * Thomas Petazzoni + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __MVEBU_PMSU_H__ +#define __MVEBU_PMSU_H__ + +#ifdef CONFIG_MACH_MVEBU_V7 +int mvebu_pmsu_dfs_request(int cpu); +#else +static inline int mvebu_pmsu_dfs_request(int cpu) { return -ENODEV; } +#endif + +#endif /* __MVEBU_PMSU_H__ */ diff --git a/include/linux/mxm-wmi.h b/include/linux/mxm-wmi.h new file mode 100644 index 000000000..617a29505 --- /dev/null +++ b/include/linux/mxm-wmi.h @@ -0,0 +1,33 @@ +/* + * MXM WMI driver + * + * Copyright(C) 2010 Red Hat. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef MXM_WMI_H +#define MXM_WMI_H + +/* discrete adapters */ +#define MXM_MXDS_ADAPTER_0 0x0 +#define MXM_MXDS_ADAPTER_1 0x0 +/* integrated adapter */ +#define MXM_MXDS_ADAPTER_IGD 0x10 +int mxm_wmi_call_mxds(int adapter); +int mxm_wmi_call_mxmx(int adapter); +bool mxm_wmi_supported(void); + +#endif diff --git a/include/linux/n_r3964.h b/include/linux/n_r3964.h new file mode 100644 index 000000000..90a803aa4 --- /dev/null +++ b/include/linux/n_r3964.h @@ -0,0 +1,175 @@ +/* r3964 linediscipline for linux + * + * ----------------------------------------------------------- + * Copyright by + * Philips Automation Projects + * Kassel (Germany) + * ----------------------------------------------------------- + * This software may be used and distributed according to the terms of + * the GNU General Public License, incorporated herein by reference. + * + * Author: + * L. Haag + * + * $Log: r3964.h,v $ + * Revision 1.4 2005/12/21 19:54:24 Kurt Huwig + * Fixed HZ usage on 2.6 kernels + * Removed unnecessary include + * + * Revision 1.3 2001/03/18 13:02:24 dwmw2 + * Fix timer usage, use spinlocks properly. + * + * Revision 1.2 2001/03/18 12:53:15 dwmw2 + * Merge changes in 2.4.2 + * + * Revision 1.1.1.1 1998/10/13 16:43:14 dwmw2 + * This'll screw the version control + * + * Revision 1.6 1998/09/30 00:40:38 dwmw2 + * Updated to use kernel's N_R3964 if available + * + * Revision 1.4 1998/04/02 20:29:44 lhaag + * select, blocking, ... + * + * Revision 1.3 1998/02/12 18:58:43 root + * fixed some memory leaks + * calculation of checksum characters + * + * Revision 1.2 1998/02/07 13:03:17 root + * ioctl read_telegram + * + * Revision 1.1 1998/02/06 19:19:43 root + * Initial revision + * + * + */ +#ifndef __LINUX_N_R3964_H__ +#define __LINUX_N_R3964_H__ + + +#include +#include + +/* + * Common ascii handshake characters: + */ + +#define STX 0x02 +#define ETX 0x03 +#define DLE 0x10 +#define NAK 0x15 + +/* + * Timeouts (from milliseconds to jiffies) + */ + +#define R3964_TO_QVZ ((550)*HZ/1000) +#define R3964_TO_ZVZ ((220)*HZ/1000) +#define R3964_TO_NO_BUF ((400)*HZ/1000) +#define R3964_NO_TX_ROOM ((100)*HZ/1000) +#define R3964_TO_RX_PANIC ((4000)*HZ/1000) +#define R3964_MAX_RETRIES 5 + + +enum { R3964_IDLE, + R3964_TX_REQUEST, R3964_TRANSMITTING, + R3964_WAIT_ZVZ_BEFORE_TX_RETRY, R3964_WAIT_FOR_TX_ACK, + R3964_WAIT_FOR_RX_BUF, + R3964_RECEIVING, R3964_WAIT_FOR_BCC, R3964_WAIT_FOR_RX_REPEAT + }; + +/* + * All open file-handles are 'clients' and are stored in a linked list: + */ + +struct r3964_message; + +struct r3964_client_info { + spinlock_t lock; + struct pid *pid; + unsigned int sig_flags; + + struct r3964_client_info *next; + + struct r3964_message *first_msg; + struct r3964_message *last_msg; + struct r3964_block_header *next_block_to_read; + int msg_count; +}; + + + +struct r3964_block_header; + +/* internal version of client_message: */ +struct r3964_message { + int msg_id; + int arg; + int error_code; + struct r3964_block_header *block; + struct r3964_message *next; +}; + +/* + * Header of received block in rx_buf/tx_buf: + */ + +struct r3964_block_header +{ + unsigned int length; /* length in chars without header */ + unsigned char *data; /* usually data is located + immediately behind this struct */ + unsigned int locks; /* only used in rx_buffer */ + + struct r3964_block_header *next; + struct r3964_client_info *owner; /* =NULL in rx_buffer */ +}; + +/* + * If rx_buf hasn't enough space to store R3964_MTU chars, + * we will reject all incoming STX-requests by sending NAK. + */ + +#define RX_BUF_SIZE 4000 +#define TX_BUF_SIZE 4000 +#define R3964_MAX_BLOCKS_IN_RX_QUEUE 100 + +#define R3964_PARITY 0x0001 +#define R3964_FRAME 0x0002 +#define R3964_OVERRUN 0x0004 +#define R3964_UNKNOWN 0x0008 +#define R3964_BREAK 0x0010 +#define R3964_CHECKSUM 0x0020 +#define R3964_ERROR 0x003f +#define R3964_BCC 0x4000 +#define R3964_DEBUG 0x8000 + + +struct r3964_info { + spinlock_t lock; + struct tty_struct *tty; + unsigned char priority; + unsigned char *rx_buf; /* ring buffer */ + unsigned char *tx_buf; + + struct r3964_block_header *rx_first; + struct r3964_block_header *rx_last; + struct r3964_block_header *tx_first; + struct r3964_block_header *tx_last; + unsigned int tx_position; + unsigned int rx_position; + unsigned char last_rx; + unsigned char bcc; + unsigned int blocks_in_rx_queue; + + struct mutex read_lock; /* serialize r3964_read */ + + struct r3964_client_info *firstClient; + unsigned int state; + unsigned int flags; + + struct timer_list tmr; + int nRetry; +}; + +#endif diff --git a/include/linux/namei.h b/include/linux/namei.h new file mode 100644 index 000000000..a78606e8e --- /dev/null +++ b/include/linux/namei.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_NAMEI_H +#define _LINUX_NAMEI_H + +#include +#include +#include +#include + +enum { MAX_NESTED_LINKS = 8 }; + +#define MAXSYMLINKS 40 + +/* + * Type of the last component on LOOKUP_PARENT + */ +enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND}; + +/* + * The bitmask for a lookup event: + * - follow links at the end + * - require a directory + * - ending slashes ok even for nonexistent files + * - internal "there are more path components" flag + * - dentry cache is untrusted; force a real lookup + * - suppress terminal automount + */ +#define LOOKUP_FOLLOW 0x0001 +#define LOOKUP_DIRECTORY 0x0002 +#define LOOKUP_AUTOMOUNT 0x0004 + +#define LOOKUP_PARENT 0x0010 +#define LOOKUP_REVAL 0x0020 +#define LOOKUP_RCU 0x0040 +#define LOOKUP_NO_REVAL 0x0080 + +/* + * Intent data + */ +#define LOOKUP_OPEN 0x0100 +#define LOOKUP_CREATE 0x0200 +#define LOOKUP_EXCL 0x0400 +#define LOOKUP_RENAME_TARGET 0x0800 + +#define LOOKUP_JUMPED 0x1000 +#define LOOKUP_ROOT 0x2000 +#define LOOKUP_EMPTY 0x4000 +#define LOOKUP_DOWN 0x8000 + +extern int path_pts(struct path *path); + +extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty); + +static inline int user_path_at(int dfd, const char __user *name, unsigned flags, + struct path *path) +{ + return user_path_at_empty(dfd, name, flags, path, NULL); +} + +static inline int user_path(const char __user *name, struct path *path) +{ + return user_path_at_empty(AT_FDCWD, name, LOOKUP_FOLLOW, path, NULL); +} + +static inline int user_lpath(const char __user *name, struct path *path) +{ + return user_path_at_empty(AT_FDCWD, name, 0, path, NULL); +} + +static inline int user_path_dir(const char __user *name, struct path *path) +{ + return user_path_at_empty(AT_FDCWD, name, + LOOKUP_FOLLOW | LOOKUP_DIRECTORY, path, NULL); +} + +extern int kern_path(const char *, unsigned, struct path *); + +extern struct dentry *kern_path_create(int, const char *, struct path *, unsigned int); +extern struct dentry *user_path_create(int, const char __user *, struct path *, unsigned int); +extern void done_path_create(struct path *, struct dentry *); +extern struct dentry *kern_path_locked(const char *, struct path *); +extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int); + +extern struct dentry *try_lookup_one_len(const char *, struct dentry *, int); +extern struct dentry *lookup_one_len(const char *, struct dentry *, int); +extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int); + +extern int follow_down_one(struct path *); +extern int follow_down(struct path *); +extern int follow_up(struct path *); + +extern struct dentry *lock_rename(struct dentry *, struct dentry *); +extern void unlock_rename(struct dentry *, struct dentry *); + +extern void nd_jump_link(struct path *path); + +static inline void nd_terminate_link(void *name, size_t len, size_t maxlen) +{ + ((char *) name)[min(len, maxlen)] = '\0'; +} + +/** + * retry_estale - determine whether the caller should retry an operation + * @error: the error that would currently be returned + * @flags: flags being used for next lookup attempt + * + * Check to see if the error code was -ESTALE, and then determine whether + * to retry the call based on whether "flags" already has LOOKUP_REVAL set. + * + * Returns true if the caller should try the operation again. + */ +static inline bool +retry_estale(const long error, const unsigned int flags) +{ + return error == -ESTALE && !(flags & LOOKUP_REVAL); +} + +#endif /* _LINUX_NAMEI_H */ diff --git a/include/linux/nd.h b/include/linux/nd.h new file mode 100644 index 000000000..43c181a6a --- /dev/null +++ b/include/linux/nd.h @@ -0,0 +1,191 @@ +/* + * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#ifndef __LINUX_ND_H__ +#define __LINUX_ND_H__ +#include +#include +#include +#include + +enum nvdimm_event { + NVDIMM_REVALIDATE_POISON, +}; + +enum nvdimm_claim_class { + NVDIMM_CCLASS_NONE, + NVDIMM_CCLASS_BTT, + NVDIMM_CCLASS_BTT2, + NVDIMM_CCLASS_PFN, + NVDIMM_CCLASS_DAX, + NVDIMM_CCLASS_UNKNOWN, +}; + +struct nd_device_driver { + struct device_driver drv; + unsigned long type; + int (*probe)(struct device *dev); + int (*remove)(struct device *dev); + void (*shutdown)(struct device *dev); + void (*notify)(struct device *dev, enum nvdimm_event event); +}; + +static inline struct nd_device_driver *to_nd_device_driver( + struct device_driver *drv) +{ + return container_of(drv, struct nd_device_driver, drv); +}; + +/** + * struct nd_namespace_common - core infrastructure of a namespace + * @force_raw: ignore other personalities for the namespace (e.g. btt) + * @dev: device model node + * @claim: when set a another personality has taken ownership of the namespace + * @claim_class: restrict claim type to a given class + * @rw_bytes: access the raw namespace capacity with byte-aligned transfers + */ +struct nd_namespace_common { + int force_raw; + struct device dev; + struct device *claim; + enum nvdimm_claim_class claim_class; + int (*rw_bytes)(struct nd_namespace_common *, resource_size_t offset, + void *buf, size_t size, int rw, unsigned long flags); +}; + +static inline struct nd_namespace_common *to_ndns(struct device *dev) +{ + return container_of(dev, struct nd_namespace_common, dev); +} + +/** + * struct nd_namespace_io - device representation of a persistent memory range + * @dev: namespace device created by the nd region driver + * @res: struct resource conversion of a NFIT SPA table + * @size: cached resource_size(@res) for fast path size checks + * @addr: virtual address to access the namespace range + * @bb: badblocks list for the namespace range + */ +struct nd_namespace_io { + struct nd_namespace_common common; + struct resource res; + resource_size_t size; + void *addr; + struct badblocks bb; +}; + +/** + * struct nd_namespace_pmem - namespace device for dimm-backed interleaved memory + * @nsio: device and system physical address range to drive + * @lbasize: logical sector size for the namespace in block-device-mode + * @alt_name: namespace name supplied in the dimm label + * @uuid: namespace name supplied in the dimm label + * @id: ida allocated id + */ +struct nd_namespace_pmem { + struct nd_namespace_io nsio; + unsigned long lbasize; + char *alt_name; + u8 *uuid; + int id; +}; + +/** + * struct nd_namespace_blk - namespace for dimm-bounded persistent memory + * @alt_name: namespace name supplied in the dimm label + * @uuid: namespace name supplied in the dimm label + * @id: ida allocated id + * @lbasize: blk namespaces have a native sector size when btt not present + * @size: sum of all the resource ranges allocated to this namespace + * @num_resources: number of dpa extents to claim + * @res: discontiguous dpa extents for given dimm + */ +struct nd_namespace_blk { + struct nd_namespace_common common; + char *alt_name; + u8 *uuid; + int id; + unsigned long lbasize; + resource_size_t size; + int num_resources; + struct resource **res; +}; + +static inline struct nd_namespace_io *to_nd_namespace_io(const struct device *dev) +{ + return container_of(dev, struct nd_namespace_io, common.dev); +} + +static inline struct nd_namespace_pmem *to_nd_namespace_pmem(const struct device *dev) +{ + struct nd_namespace_io *nsio = to_nd_namespace_io(dev); + + return container_of(nsio, struct nd_namespace_pmem, nsio); +} + +static inline struct nd_namespace_blk *to_nd_namespace_blk(const struct device *dev) +{ + return container_of(dev, struct nd_namespace_blk, common.dev); +} + +/** + * nvdimm_read_bytes() - synchronously read bytes from an nvdimm namespace + * @ndns: device to read + * @offset: namespace-relative starting offset + * @buf: buffer to fill + * @size: transfer length + * + * @buf is up-to-date upon return from this routine. + */ +static inline int nvdimm_read_bytes(struct nd_namespace_common *ndns, + resource_size_t offset, void *buf, size_t size, + unsigned long flags) +{ + return ndns->rw_bytes(ndns, offset, buf, size, READ, flags); +} + +/** + * nvdimm_write_bytes() - synchronously write bytes to an nvdimm namespace + * @ndns: device to read + * @offset: namespace-relative starting offset + * @buf: buffer to drain + * @size: transfer length + * + * NVDIMM Namepaces disks do not implement sectors internally. Depending on + * the @ndns, the contents of @buf may be in cpu cache, platform buffers, + * or on backing memory media upon return from this routine. Flushing + * to media is handled internal to the @ndns driver, if at all. + */ +static inline int nvdimm_write_bytes(struct nd_namespace_common *ndns, + resource_size_t offset, void *buf, size_t size, + unsigned long flags) +{ + return ndns->rw_bytes(ndns, offset, buf, size, WRITE, flags); +} + +#define MODULE_ALIAS_ND_DEVICE(type) \ + MODULE_ALIAS("nd:t" __stringify(type) "*") +#define ND_DEVICE_MODALIAS_FMT "nd:t%d" + +struct nd_region; +void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event); +int __must_check __nd_driver_register(struct nd_device_driver *nd_drv, + struct module *module, const char *mod_name); +static inline void nd_driver_unregister(struct nd_device_driver *drv) +{ + driver_unregister(&drv->drv); +} +#define nd_driver_register(driver) \ + __nd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) +#define module_nd_driver(driver) \ + module_driver(driver, nd_driver_register, nd_driver_unregister) +#endif /* __LINUX_ND_H__ */ diff --git a/include/linux/net.h b/include/linux/net.h new file mode 100644 index 000000000..41dc703b2 --- /dev/null +++ b/include/linux/net.h @@ -0,0 +1,333 @@ +/* + * NET An implementation of the SOCKET network access protocol. + * This is the master header file for the Linux NET layer, + * or, in plain English: the networking handling part of the + * kernel. + * + * Version: @(#)net.h 1.0.3 05/25/93 + * + * Authors: Orest Zborowski, + * Ross Biro + * Fred N. van Kempen, + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_NET_H +#define _LINUX_NET_H + +#include +#include +#include +#include /* For O_CLOEXEC and O_NONBLOCK */ +#include +#include +#include + +#include + +struct poll_table_struct; +struct pipe_inode_info; +struct inode; +struct file; +struct net; + +/* Historically, SOCKWQ_ASYNC_NOSPACE & SOCKWQ_ASYNC_WAITDATA were located + * in sock->flags, but moved into sk->sk_wq->flags to be RCU protected. + * Eventually all flags will be in sk->sk_wq->flags. + */ +#define SOCKWQ_ASYNC_NOSPACE 0 +#define SOCKWQ_ASYNC_WAITDATA 1 +#define SOCK_NOSPACE 2 +#define SOCK_PASSCRED 3 +#define SOCK_PASSSEC 4 + +#ifndef ARCH_HAS_SOCKET_TYPES +/** + * enum sock_type - Socket types + * @SOCK_STREAM: stream (connection) socket + * @SOCK_DGRAM: datagram (conn.less) socket + * @SOCK_RAW: raw socket + * @SOCK_RDM: reliably-delivered message + * @SOCK_SEQPACKET: sequential packet socket + * @SOCK_DCCP: Datagram Congestion Control Protocol socket + * @SOCK_PACKET: linux specific way of getting packets at the dev level. + * For writing rarp and other similar things on the user level. + * + * When adding some new socket type please + * grep ARCH_HAS_SOCKET_TYPE include/asm-* /socket.h, at least MIPS + * overrides this enum for binary compat reasons. + */ +enum sock_type { + SOCK_STREAM = 1, + SOCK_DGRAM = 2, + SOCK_RAW = 3, + SOCK_RDM = 4, + SOCK_SEQPACKET = 5, + SOCK_DCCP = 6, + SOCK_PACKET = 10, +}; + +#define SOCK_MAX (SOCK_PACKET + 1) +/* Mask which covers at least up to SOCK_MASK-1. The + * remaining bits are used as flags. */ +#define SOCK_TYPE_MASK 0xf + +/* Flags for socket, socketpair, accept4 */ +#define SOCK_CLOEXEC O_CLOEXEC +#ifndef SOCK_NONBLOCK +#define SOCK_NONBLOCK O_NONBLOCK +#endif + +#endif /* ARCH_HAS_SOCKET_TYPES */ + +/** + * enum sock_shutdown_cmd - Shutdown types + * @SHUT_RD: shutdown receptions + * @SHUT_WR: shutdown transmissions + * @SHUT_RDWR: shutdown receptions/transmissions + */ +enum sock_shutdown_cmd { + SHUT_RD, + SHUT_WR, + SHUT_RDWR, +}; + +struct socket_wq { + /* Note: wait MUST be first field of socket_wq */ + wait_queue_head_t wait; + struct fasync_struct *fasync_list; + unsigned long flags; /* %SOCKWQ_ASYNC_NOSPACE, etc */ + struct rcu_head rcu; +} ____cacheline_aligned_in_smp; + +/** + * struct socket - general BSD socket + * @state: socket state (%SS_CONNECTED, etc) + * @type: socket type (%SOCK_STREAM, etc) + * @flags: socket flags (%SOCK_NOSPACE, etc) + * @ops: protocol specific socket operations + * @file: File back pointer for gc + * @sk: internal networking protocol agnostic socket representation + * @wq: wait queue for several uses + */ +struct socket { + socket_state state; + + short type; + + unsigned long flags; + + struct socket_wq *wq; + + struct file *file; + struct sock *sk; + const struct proto_ops *ops; +}; + +struct vm_area_struct; +struct page; +struct sockaddr; +struct msghdr; +struct module; +struct sk_buff; +typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, + unsigned int, size_t); + +struct proto_ops { + int family; + struct module *owner; + int (*release) (struct socket *sock); + int (*bind) (struct socket *sock, + struct sockaddr *myaddr, + int sockaddr_len); + int (*connect) (struct socket *sock, + struct sockaddr *vaddr, + int sockaddr_len, int flags); + int (*socketpair)(struct socket *sock1, + struct socket *sock2); + int (*accept) (struct socket *sock, + struct socket *newsock, int flags, bool kern); + int (*getname) (struct socket *sock, + struct sockaddr *addr, + int peer); + __poll_t (*poll) (struct file *file, struct socket *sock, + struct poll_table_struct *wait); + int (*ioctl) (struct socket *sock, unsigned int cmd, + unsigned long arg); +#ifdef CONFIG_COMPAT + int (*compat_ioctl) (struct socket *sock, unsigned int cmd, + unsigned long arg); +#endif + int (*listen) (struct socket *sock, int len); + int (*shutdown) (struct socket *sock, int flags); + int (*setsockopt)(struct socket *sock, int level, + int optname, char __user *optval, unsigned int optlen); + int (*getsockopt)(struct socket *sock, int level, + int optname, char __user *optval, int __user *optlen); +#ifdef CONFIG_COMPAT + int (*compat_setsockopt)(struct socket *sock, int level, + int optname, char __user *optval, unsigned int optlen); + int (*compat_getsockopt)(struct socket *sock, int level, + int optname, char __user *optval, int __user *optlen); +#endif + int (*sendmsg) (struct socket *sock, struct msghdr *m, + size_t total_len); + /* Notes for implementing recvmsg: + * =============================== + * msg->msg_namelen should get updated by the recvmsg handlers + * iff msg_name != NULL. It is by default 0 to prevent + * returning uninitialized memory to user space. The recvfrom + * handlers can assume that msg.msg_name is either NULL or has + * a minimum size of sizeof(struct sockaddr_storage). + */ + int (*recvmsg) (struct socket *sock, struct msghdr *m, + size_t total_len, int flags); + int (*mmap) (struct file *file, struct socket *sock, + struct vm_area_struct * vma); + ssize_t (*sendpage) (struct socket *sock, struct page *page, + int offset, size_t size, int flags); + ssize_t (*splice_read)(struct socket *sock, loff_t *ppos, + struct pipe_inode_info *pipe, size_t len, unsigned int flags); + int (*set_peek_off)(struct sock *sk, int val); + int (*peek_len)(struct socket *sock); + + /* The following functions are called internally by kernel with + * sock lock already held. + */ + int (*read_sock)(struct sock *sk, read_descriptor_t *desc, + sk_read_actor_t recv_actor); + int (*sendpage_locked)(struct sock *sk, struct page *page, + int offset, size_t size, int flags); + int (*sendmsg_locked)(struct sock *sk, struct msghdr *msg, + size_t size); + int (*set_rcvlowat)(struct sock *sk, int val); +}; + +#define DECLARE_SOCKADDR(type, dst, src) \ + type dst = ({ __sockaddr_check_size(sizeof(*dst)); (type) src; }) + +struct net_proto_family { + int family; + int (*create)(struct net *net, struct socket *sock, + int protocol, int kern); + struct module *owner; +}; + +struct iovec; +struct kvec; + +enum { + SOCK_WAKE_IO, + SOCK_WAKE_WAITD, + SOCK_WAKE_SPACE, + SOCK_WAKE_URG, +}; + +int sock_wake_async(struct socket_wq *sk_wq, int how, int band); +int sock_register(const struct net_proto_family *fam); +void sock_unregister(int family); +bool sock_is_registered(int family); +int __sock_create(struct net *net, int family, int type, int proto, + struct socket **res, int kern); +int sock_create(int family, int type, int proto, struct socket **res); +int sock_create_kern(struct net *net, int family, int type, int proto, struct socket **res); +int sock_create_lite(int family, int type, int proto, struct socket **res); +struct socket *sock_alloc(void); +void sock_release(struct socket *sock); +int sock_sendmsg(struct socket *sock, struct msghdr *msg); +int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags); +struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname); +struct socket *sockfd_lookup(int fd, int *err); +struct socket *sock_from_file(struct file *file, int *err); +#define sockfd_put(sock) fput(sock->file) +int net_ratelimit(void); + +#define net_ratelimited_function(function, ...) \ +do { \ + if (net_ratelimit()) \ + function(__VA_ARGS__); \ +} while (0) + +#define net_emerg_ratelimited(fmt, ...) \ + net_ratelimited_function(pr_emerg, fmt, ##__VA_ARGS__) +#define net_alert_ratelimited(fmt, ...) \ + net_ratelimited_function(pr_alert, fmt, ##__VA_ARGS__) +#define net_crit_ratelimited(fmt, ...) \ + net_ratelimited_function(pr_crit, fmt, ##__VA_ARGS__) +#define net_err_ratelimited(fmt, ...) \ + net_ratelimited_function(pr_err, fmt, ##__VA_ARGS__) +#define net_notice_ratelimited(fmt, ...) \ + net_ratelimited_function(pr_notice, fmt, ##__VA_ARGS__) +#define net_warn_ratelimited(fmt, ...) \ + net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__) +#define net_info_ratelimited(fmt, ...) \ + net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__) +#if defined(CONFIG_DYNAMIC_DEBUG) +#define net_dbg_ratelimited(fmt, ...) \ +do { \ + DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ + if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \ + net_ratelimit()) \ + __dynamic_pr_debug(&descriptor, pr_fmt(fmt), \ + ##__VA_ARGS__); \ +} while (0) +#elif defined(DEBUG) +#define net_dbg_ratelimited(fmt, ...) \ + net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__) +#else +#define net_dbg_ratelimited(fmt, ...) \ + do { \ + if (0) \ + no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \ + } while (0) +#endif + +#define net_get_random_once(buf, nbytes) \ + get_random_once((buf), (nbytes)) +#define net_get_random_once_wait(buf, nbytes) \ + get_random_once_wait((buf), (nbytes)) + +int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, + size_t num, size_t len); +int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg, + struct kvec *vec, size_t num, size_t len); +int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, + size_t num, size_t len, int flags); + +int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen); +int kernel_listen(struct socket *sock, int backlog); +int kernel_accept(struct socket *sock, struct socket **newsock, int flags); +int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, + int flags); +int kernel_getsockname(struct socket *sock, struct sockaddr *addr); +int kernel_getpeername(struct socket *sock, struct sockaddr *addr); +int kernel_getsockopt(struct socket *sock, int level, int optname, char *optval, + int *optlen); +int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval, + unsigned int optlen); +int kernel_sendpage(struct socket *sock, struct page *page, int offset, + size_t size, int flags); +int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset, + size_t size, int flags); +int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how); + +/* Routine returns the IP overhead imposed by a (caller-protected) socket. */ +u32 kernel_sock_ip_overhead(struct sock *sk); + +#define MODULE_ALIAS_NETPROTO(proto) \ + MODULE_ALIAS("net-pf-" __stringify(proto)) + +#define MODULE_ALIAS_NET_PF_PROTO(pf, proto) \ + MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto)) + +#define MODULE_ALIAS_NET_PF_PROTO_TYPE(pf, proto, type) \ + MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \ + "-type-" __stringify(type)) + +#define MODULE_ALIAS_NET_PF_PROTO_NAME(pf, proto, name) \ + MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \ + name) +#endif /* _LINUX_NET_H */ diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h new file mode 100644 index 000000000..fd458389f --- /dev/null +++ b/include/linux/net_dim.h @@ -0,0 +1,418 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * Copyright (c) 2017-2018, Broadcom Limited. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef NET_DIM_H +#define NET_DIM_H + +#include + +struct net_dim_cq_moder { + u16 usec; + u16 pkts; + u8 cq_period_mode; +}; + +struct net_dim_sample { + ktime_t time; + u32 pkt_ctr; + u32 byte_ctr; + u16 event_ctr; +}; + +struct net_dim_stats { + int ppms; /* packets per msec */ + int bpms; /* bytes per msec */ + int epms; /* events per msec */ +}; + +struct net_dim { /* Adaptive Moderation */ + u8 state; + struct net_dim_stats prev_stats; + struct net_dim_sample start_sample; + struct work_struct work; + u8 profile_ix; + u8 mode; + u8 tune_state; + u8 steps_right; + u8 steps_left; + u8 tired; +}; + +enum { + NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0x0, + NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE = 0x1, + NET_DIM_CQ_PERIOD_NUM_MODES +}; + +/* Adaptive moderation logic */ +enum { + NET_DIM_START_MEASURE, + NET_DIM_MEASURE_IN_PROGRESS, + NET_DIM_APPLY_NEW_PROFILE, +}; + +enum { + NET_DIM_PARKING_ON_TOP, + NET_DIM_PARKING_TIRED, + NET_DIM_GOING_RIGHT, + NET_DIM_GOING_LEFT, +}; + +enum { + NET_DIM_STATS_WORSE, + NET_DIM_STATS_SAME, + NET_DIM_STATS_BETTER, +}; + +enum { + NET_DIM_STEPPED, + NET_DIM_TOO_TIRED, + NET_DIM_ON_EDGE, +}; + +#define NET_DIM_PARAMS_NUM_PROFILES 5 +/* Adaptive moderation profiles */ +#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256 +#define NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE 128 +#define NET_DIM_DEF_PROFILE_CQE 1 +#define NET_DIM_DEF_PROFILE_EQE 1 + +/* All profiles sizes must be NET_PARAMS_DIM_NUM_PROFILES */ +#define NET_DIM_RX_EQE_PROFILES { \ + {1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ +} + +#define NET_DIM_RX_CQE_PROFILES { \ + {2, 256}, \ + {8, 128}, \ + {16, 64}, \ + {32, 64}, \ + {64, 64} \ +} + +#define NET_DIM_TX_EQE_PROFILES { \ + {1, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {8, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {32, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {64, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {128, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE} \ +} + +#define NET_DIM_TX_CQE_PROFILES { \ + {5, 128}, \ + {8, 64}, \ + {16, 32}, \ + {32, 32}, \ + {64, 32} \ +} + +static const struct net_dim_cq_moder +rx_profile[NET_DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = { + NET_DIM_RX_EQE_PROFILES, + NET_DIM_RX_CQE_PROFILES, +}; + +static const struct net_dim_cq_moder +tx_profile[NET_DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = { + NET_DIM_TX_EQE_PROFILES, + NET_DIM_TX_CQE_PROFILES, +}; + +static inline struct net_dim_cq_moder +net_dim_get_rx_moderation(u8 cq_period_mode, int ix) +{ + struct net_dim_cq_moder cq_moder = rx_profile[cq_period_mode][ix]; + + cq_moder.cq_period_mode = cq_period_mode; + return cq_moder; +} + +static inline struct net_dim_cq_moder +net_dim_get_def_rx_moderation(u8 cq_period_mode) +{ + u8 profile_ix = cq_period_mode == NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE ? + NET_DIM_DEF_PROFILE_CQE : NET_DIM_DEF_PROFILE_EQE; + + return net_dim_get_rx_moderation(cq_period_mode, profile_ix); +} + +static inline struct net_dim_cq_moder +net_dim_get_tx_moderation(u8 cq_period_mode, int ix) +{ + struct net_dim_cq_moder cq_moder = tx_profile[cq_period_mode][ix]; + + cq_moder.cq_period_mode = cq_period_mode; + return cq_moder; +} + +static inline struct net_dim_cq_moder +net_dim_get_def_tx_moderation(u8 cq_period_mode) +{ + u8 profile_ix = cq_period_mode == NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE ? + NET_DIM_DEF_PROFILE_CQE : NET_DIM_DEF_PROFILE_EQE; + + return net_dim_get_tx_moderation(cq_period_mode, profile_ix); +} + +static inline bool net_dim_on_top(struct net_dim *dim) +{ + switch (dim->tune_state) { + case NET_DIM_PARKING_ON_TOP: + case NET_DIM_PARKING_TIRED: + return true; + case NET_DIM_GOING_RIGHT: + return (dim->steps_left > 1) && (dim->steps_right == 1); + default: /* NET_DIM_GOING_LEFT */ + return (dim->steps_right > 1) && (dim->steps_left == 1); + } +} + +static inline void net_dim_turn(struct net_dim *dim) +{ + switch (dim->tune_state) { + case NET_DIM_PARKING_ON_TOP: + case NET_DIM_PARKING_TIRED: + break; + case NET_DIM_GOING_RIGHT: + dim->tune_state = NET_DIM_GOING_LEFT; + dim->steps_left = 0; + break; + case NET_DIM_GOING_LEFT: + dim->tune_state = NET_DIM_GOING_RIGHT; + dim->steps_right = 0; + break; + } +} + +static inline int net_dim_step(struct net_dim *dim) +{ + if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2)) + return NET_DIM_TOO_TIRED; + + switch (dim->tune_state) { + case NET_DIM_PARKING_ON_TOP: + case NET_DIM_PARKING_TIRED: + break; + case NET_DIM_GOING_RIGHT: + if (dim->profile_ix == (NET_DIM_PARAMS_NUM_PROFILES - 1)) + return NET_DIM_ON_EDGE; + dim->profile_ix++; + dim->steps_right++; + break; + case NET_DIM_GOING_LEFT: + if (dim->profile_ix == 0) + return NET_DIM_ON_EDGE; + dim->profile_ix--; + dim->steps_left++; + break; + } + + dim->tired++; + return NET_DIM_STEPPED; +} + +static inline void net_dim_park_on_top(struct net_dim *dim) +{ + dim->steps_right = 0; + dim->steps_left = 0; + dim->tired = 0; + dim->tune_state = NET_DIM_PARKING_ON_TOP; +} + +static inline void net_dim_park_tired(struct net_dim *dim) +{ + dim->steps_right = 0; + dim->steps_left = 0; + dim->tune_state = NET_DIM_PARKING_TIRED; +} + +static inline void net_dim_exit_parking(struct net_dim *dim) +{ + dim->tune_state = dim->profile_ix ? NET_DIM_GOING_LEFT : + NET_DIM_GOING_RIGHT; + net_dim_step(dim); +} + +#define IS_SIGNIFICANT_DIFF(val, ref) \ + (((100UL * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */ + +static inline int net_dim_stats_compare(struct net_dim_stats *curr, + struct net_dim_stats *prev) +{ + if (!prev->bpms) + return curr->bpms ? NET_DIM_STATS_BETTER : + NET_DIM_STATS_SAME; + + if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms)) + return (curr->bpms > prev->bpms) ? NET_DIM_STATS_BETTER : + NET_DIM_STATS_WORSE; + + if (!prev->ppms) + return curr->ppms ? NET_DIM_STATS_BETTER : + NET_DIM_STATS_SAME; + + if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms)) + return (curr->ppms > prev->ppms) ? NET_DIM_STATS_BETTER : + NET_DIM_STATS_WORSE; + + if (!prev->epms) + return NET_DIM_STATS_SAME; + + if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms)) + return (curr->epms < prev->epms) ? NET_DIM_STATS_BETTER : + NET_DIM_STATS_WORSE; + + return NET_DIM_STATS_SAME; +} + +static inline bool net_dim_decision(struct net_dim_stats *curr_stats, + struct net_dim *dim) +{ + int prev_state = dim->tune_state; + int prev_ix = dim->profile_ix; + int stats_res; + int step_res; + + switch (dim->tune_state) { + case NET_DIM_PARKING_ON_TOP: + stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats); + if (stats_res != NET_DIM_STATS_SAME) + net_dim_exit_parking(dim); + break; + + case NET_DIM_PARKING_TIRED: + dim->tired--; + if (!dim->tired) + net_dim_exit_parking(dim); + break; + + case NET_DIM_GOING_RIGHT: + case NET_DIM_GOING_LEFT: + stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats); + if (stats_res != NET_DIM_STATS_BETTER) + net_dim_turn(dim); + + if (net_dim_on_top(dim)) { + net_dim_park_on_top(dim); + break; + } + + step_res = net_dim_step(dim); + switch (step_res) { + case NET_DIM_ON_EDGE: + net_dim_park_on_top(dim); + break; + case NET_DIM_TOO_TIRED: + net_dim_park_tired(dim); + break; + } + + break; + } + + if ((prev_state != NET_DIM_PARKING_ON_TOP) || + (dim->tune_state != NET_DIM_PARKING_ON_TOP)) + dim->prev_stats = *curr_stats; + + return dim->profile_ix != prev_ix; +} + +static inline void net_dim_sample(u16 event_ctr, + u64 packets, + u64 bytes, + struct net_dim_sample *s) +{ + s->time = ktime_get(); + s->pkt_ctr = packets; + s->byte_ctr = bytes; + s->event_ctr = event_ctr; +} + +#define NET_DIM_NEVENTS 64 +#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1)) + +static inline void net_dim_calc_stats(struct net_dim_sample *start, + struct net_dim_sample *end, + struct net_dim_stats *curr_stats) +{ + /* u32 holds up to 71 minutes, should be enough */ + u32 delta_us = ktime_us_delta(end->time, start->time); + u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr); + u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr, + start->byte_ctr); + + if (!delta_us) + return; + + curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us); + curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us); + curr_stats->epms = DIV_ROUND_UP(NET_DIM_NEVENTS * USEC_PER_MSEC, + delta_us); +} + +static inline void net_dim(struct net_dim *dim, + struct net_dim_sample end_sample) +{ + struct net_dim_stats curr_stats; + u16 nevents; + + switch (dim->state) { + case NET_DIM_MEASURE_IN_PROGRESS: + nevents = BIT_GAP(BITS_PER_TYPE(u16), + end_sample.event_ctr, + dim->start_sample.event_ctr); + if (nevents < NET_DIM_NEVENTS) + break; + net_dim_calc_stats(&dim->start_sample, &end_sample, + &curr_stats); + if (net_dim_decision(&curr_stats, dim)) { + dim->state = NET_DIM_APPLY_NEW_PROFILE; + schedule_work(&dim->work); + break; + } + /* fall through */ + case NET_DIM_START_MEASURE: + net_dim_sample(end_sample.event_ctr, end_sample.pkt_ctr, end_sample.byte_ctr, + &dim->start_sample); + dim->state = NET_DIM_MEASURE_IN_PROGRESS; + break; + case NET_DIM_APPLY_NEW_PROFILE: + break; + } +} + +#endif /* NET_DIM_H */ diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h new file mode 100644 index 000000000..78411dc4a --- /dev/null +++ b/include/linux/netdev_features.h @@ -0,0 +1,248 @@ +/* + * Network device features. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_NETDEV_FEATURES_H +#define _LINUX_NETDEV_FEATURES_H + +#include +#include +#include + +typedef u64 netdev_features_t; + +enum { + NETIF_F_SG_BIT, /* Scatter/gather IO. */ + NETIF_F_IP_CSUM_BIT, /* Can checksum TCP/UDP over IPv4. */ + __UNUSED_NETIF_F_1, + NETIF_F_HW_CSUM_BIT, /* Can checksum all the packets. */ + NETIF_F_IPV6_CSUM_BIT, /* Can checksum TCP/UDP over IPV6 */ + NETIF_F_HIGHDMA_BIT, /* Can DMA to high memory. */ + NETIF_F_FRAGLIST_BIT, /* Scatter/gather IO. */ + NETIF_F_HW_VLAN_CTAG_TX_BIT, /* Transmit VLAN CTAG HW acceleration */ + NETIF_F_HW_VLAN_CTAG_RX_BIT, /* Receive VLAN CTAG HW acceleration */ + NETIF_F_HW_VLAN_CTAG_FILTER_BIT,/* Receive filtering on VLAN CTAGs */ + NETIF_F_VLAN_CHALLENGED_BIT, /* Device cannot handle VLAN packets */ + NETIF_F_GSO_BIT, /* Enable software GSO. */ + NETIF_F_LLTX_BIT, /* LockLess TX - deprecated. Please */ + /* do not use LLTX in new drivers */ + NETIF_F_NETNS_LOCAL_BIT, /* Does not change network namespaces */ + NETIF_F_GRO_BIT, /* Generic receive offload */ + NETIF_F_LRO_BIT, /* large receive offload */ + + /**/NETIF_F_GSO_SHIFT, /* keep the order of SKB_GSO_* bits */ + NETIF_F_TSO_BIT /* ... TCPv4 segmentation */ + = NETIF_F_GSO_SHIFT, + NETIF_F_GSO_ROBUST_BIT, /* ... ->SKB_GSO_DODGY */ + NETIF_F_TSO_ECN_BIT, /* ... TCP ECN support */ + NETIF_F_TSO_MANGLEID_BIT, /* ... IPV4 ID mangling allowed */ + NETIF_F_TSO6_BIT, /* ... TCPv6 segmentation */ + NETIF_F_FSO_BIT, /* ... FCoE segmentation */ + NETIF_F_GSO_GRE_BIT, /* ... GRE with TSO */ + NETIF_F_GSO_GRE_CSUM_BIT, /* ... GRE with csum with TSO */ + NETIF_F_GSO_IPXIP4_BIT, /* ... IP4 or IP6 over IP4 with TSO */ + NETIF_F_GSO_IPXIP6_BIT, /* ... IP4 or IP6 over IP6 with TSO */ + NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */ + NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */ + NETIF_F_GSO_PARTIAL_BIT, /* ... Only segment inner-most L4 + * in hardware and all other + * headers in software. + */ + NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */ + NETIF_F_GSO_SCTP_BIT, /* ... SCTP fragmentation */ + NETIF_F_GSO_ESP_BIT, /* ... ESP with TSO */ + NETIF_F_GSO_UDP_BIT, /* ... UFO, deprecated except tuntap */ + NETIF_F_GSO_UDP_L4_BIT, /* ... UDP payload GSO (not UFO) */ + /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ + NETIF_F_GSO_UDP_L4_BIT, + + NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ + NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */ + NETIF_F_FCOE_MTU_BIT, /* Supports max FCoE MTU, 2158 bytes*/ + NETIF_F_NTUPLE_BIT, /* N-tuple filters supported */ + NETIF_F_RXHASH_BIT, /* Receive hashing offload */ + NETIF_F_RXCSUM_BIT, /* Receive checksumming offload */ + NETIF_F_NOCACHE_COPY_BIT, /* Use no-cache copyfromuser */ + NETIF_F_LOOPBACK_BIT, /* Enable loopback */ + NETIF_F_RXFCS_BIT, /* Append FCS to skb pkt data */ + NETIF_F_RXALL_BIT, /* Receive errored frames too */ + NETIF_F_HW_VLAN_STAG_TX_BIT, /* Transmit VLAN STAG HW acceleration */ + NETIF_F_HW_VLAN_STAG_RX_BIT, /* Receive VLAN STAG HW acceleration */ + NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */ + NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */ + + NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */ + NETIF_F_HW_ESP_BIT, /* Hardware ESP transformation offload */ + NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */ + NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */ + NETIF_F_HW_TLS_TX_BIT, /* Hardware TLS TX offload */ + NETIF_F_HW_TLS_RX_BIT, /* Hardware TLS RX offload */ + + NETIF_F_GRO_HW_BIT, /* Hardware Generic receive offload */ + NETIF_F_HW_TLS_RECORD_BIT, /* Offload TLS record */ + + /* + * Add your fresh new feature above and remember to update + * netdev_features_strings[] in net/ethtool/common.c and maybe + * some feature mask #defines below. Please also describe it + * in Documentation/networking/netdev-features.txt. + */ + + /**/NETDEV_FEATURE_COUNT +}; + +/* copy'n'paste compression ;) */ +#define __NETIF_F_BIT(bit) ((netdev_features_t)1 << (bit)) +#define __NETIF_F(name) __NETIF_F_BIT(NETIF_F_##name##_BIT) + +#define NETIF_F_FCOE_CRC __NETIF_F(FCOE_CRC) +#define NETIF_F_FCOE_MTU __NETIF_F(FCOE_MTU) +#define NETIF_F_FRAGLIST __NETIF_F(FRAGLIST) +#define NETIF_F_FSO __NETIF_F(FSO) +#define NETIF_F_GRO __NETIF_F(GRO) +#define NETIF_F_GRO_HW __NETIF_F(GRO_HW) +#define NETIF_F_GSO __NETIF_F(GSO) +#define NETIF_F_GSO_ROBUST __NETIF_F(GSO_ROBUST) +#define NETIF_F_HIGHDMA __NETIF_F(HIGHDMA) +#define NETIF_F_HW_CSUM __NETIF_F(HW_CSUM) +#define NETIF_F_HW_VLAN_CTAG_FILTER __NETIF_F(HW_VLAN_CTAG_FILTER) +#define NETIF_F_HW_VLAN_CTAG_RX __NETIF_F(HW_VLAN_CTAG_RX) +#define NETIF_F_HW_VLAN_CTAG_TX __NETIF_F(HW_VLAN_CTAG_TX) +#define NETIF_F_IP_CSUM __NETIF_F(IP_CSUM) +#define NETIF_F_IPV6_CSUM __NETIF_F(IPV6_CSUM) +#define NETIF_F_LLTX __NETIF_F(LLTX) +#define NETIF_F_LOOPBACK __NETIF_F(LOOPBACK) +#define NETIF_F_LRO __NETIF_F(LRO) +#define NETIF_F_NETNS_LOCAL __NETIF_F(NETNS_LOCAL) +#define NETIF_F_NOCACHE_COPY __NETIF_F(NOCACHE_COPY) +#define NETIF_F_NTUPLE __NETIF_F(NTUPLE) +#define NETIF_F_RXCSUM __NETIF_F(RXCSUM) +#define NETIF_F_RXHASH __NETIF_F(RXHASH) +#define NETIF_F_SCTP_CRC __NETIF_F(SCTP_CRC) +#define NETIF_F_SG __NETIF_F(SG) +#define NETIF_F_TSO6 __NETIF_F(TSO6) +#define NETIF_F_TSO_ECN __NETIF_F(TSO_ECN) +#define NETIF_F_TSO __NETIF_F(TSO) +#define NETIF_F_VLAN_CHALLENGED __NETIF_F(VLAN_CHALLENGED) +#define NETIF_F_RXFCS __NETIF_F(RXFCS) +#define NETIF_F_RXALL __NETIF_F(RXALL) +#define NETIF_F_GSO_GRE __NETIF_F(GSO_GRE) +#define NETIF_F_GSO_GRE_CSUM __NETIF_F(GSO_GRE_CSUM) +#define NETIF_F_GSO_IPXIP4 __NETIF_F(GSO_IPXIP4) +#define NETIF_F_GSO_IPXIP6 __NETIF_F(GSO_IPXIP6) +#define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) +#define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM) +#define NETIF_F_TSO_MANGLEID __NETIF_F(TSO_MANGLEID) +#define NETIF_F_GSO_PARTIAL __NETIF_F(GSO_PARTIAL) +#define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM) +#define NETIF_F_GSO_SCTP __NETIF_F(GSO_SCTP) +#define NETIF_F_GSO_ESP __NETIF_F(GSO_ESP) +#define NETIF_F_GSO_UDP __NETIF_F(GSO_UDP) +#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) +#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) +#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) +#define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD) +#define NETIF_F_HW_TC __NETIF_F(HW_TC) +#define NETIF_F_HW_ESP __NETIF_F(HW_ESP) +#define NETIF_F_HW_ESP_TX_CSUM __NETIF_F(HW_ESP_TX_CSUM) +#define NETIF_F_RX_UDP_TUNNEL_PORT __NETIF_F(RX_UDP_TUNNEL_PORT) +#define NETIF_F_HW_TLS_RECORD __NETIF_F(HW_TLS_RECORD) +#define NETIF_F_GSO_UDP_L4 __NETIF_F(GSO_UDP_L4) +#define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX) +#define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX) + +/* Finds the next feature with the highest number of the range of start-1 till 0. + */ +static inline int find_next_netdev_feature(u64 feature, unsigned long start) +{ + /* like BITMAP_LAST_WORD_MASK() for u64 + * this sets the most significant 64 - start to 0. + */ + feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1)); + + return fls64(feature) - 1; +} + +/* This goes for the MSB to the LSB through the set feature bits, + * mask_addr should be a u64 and bit an int + */ +#define for_each_netdev_feature(mask_addr, bit) \ + for ((bit) = find_next_netdev_feature((mask_addr), \ + NETDEV_FEATURE_COUNT); \ + (bit) >= 0; \ + (bit) = find_next_netdev_feature((mask_addr), (bit))) + +/* Features valid for ethtool to change */ +/* = all defined minus driver/device-class-related */ +#define NETIF_F_NEVER_CHANGE (NETIF_F_VLAN_CHALLENGED | \ + NETIF_F_LLTX | NETIF_F_NETNS_LOCAL) + +/* remember that ((t)1 << t_BITS) is undefined in C99 */ +#define NETIF_F_ETHTOOL_BITS ((__NETIF_F_BIT(NETDEV_FEATURE_COUNT - 1) | \ + (__NETIF_F_BIT(NETDEV_FEATURE_COUNT - 1) - 1)) & \ + ~NETIF_F_NEVER_CHANGE) + +/* Segmentation offload feature mask */ +#define NETIF_F_GSO_MASK (__NETIF_F_BIT(NETIF_F_GSO_LAST + 1) - \ + __NETIF_F_BIT(NETIF_F_GSO_SHIFT)) + +/* List of IP checksum features. Note that NETIF_F_ HW_CSUM should not be + * set in features when NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM are set-- + * this would be contradictory + */ +#define NETIF_F_CSUM_MASK (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \ + NETIF_F_HW_CSUM) + +#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | \ + NETIF_F_TSO_ECN | NETIF_F_TSO_MANGLEID) + +#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ + NETIF_F_FSO) + +/* List of features with software fallbacks. */ +#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | \ + NETIF_F_GSO_SCTP) + +/* + * If one device supports one of these features, then enable them + * for all in netdev_increment_features. + */ +#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \ + NETIF_F_SG | NETIF_F_HIGHDMA | \ + NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED) + +/* + * If one device doesn't support one of these features, then disable it + * for all in netdev_increment_features. + */ +#define NETIF_F_ALL_FOR_ALL (NETIF_F_NOCACHE_COPY | NETIF_F_FSO) + +/* + * If upper/master device has these features disabled, they must be disabled + * on all lower/slave devices as well. + */ +#define NETIF_F_UPPER_DISABLES NETIF_F_LRO + +/* changeable features with no special hardware requirements */ +#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO) + +#define NETIF_F_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \ + NETIF_F_HW_VLAN_CTAG_RX | \ + NETIF_F_HW_VLAN_CTAG_TX | \ + NETIF_F_HW_VLAN_STAG_FILTER | \ + NETIF_F_HW_VLAN_STAG_RX | \ + NETIF_F_HW_VLAN_STAG_TX) + +#define NETIF_F_GSO_ENCAP_ALL (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + +#endif /* _LINUX_NETDEV_FEATURES_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h new file mode 100644 index 000000000..8d48b352e --- /dev/null +++ b/include/linux/netdevice.h @@ -0,0 +1,4826 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Definitions for the Interfaces handler. + * + * Version: @(#)dev.h 1.0.10 08/12/93 + * + * Authors: Ross Biro + * Fred N. van Kempen, + * Corey Minyard + * Donald J. Becker, + * Alan Cox, + * Bjorn Ekwall. + * Pekka Riikonen + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Moved to /usr/include/linux for NET3 + */ +#ifndef _LINUX_NETDEVICE_H +#define _LINUX_NETDEVICE_H + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#ifdef CONFIG_DCB +#include +#endif +#include +#include + +#include +#include +#include +#include +#include +#include + +struct netpoll_info; +struct device; +struct phy_device; +struct dsa_port; + +struct sfp_bus; +/* 802.11 specific */ +struct wireless_dev; +/* 802.15.4 specific */ +struct wpan_dev; +struct mpls_dev; +/* UDP Tunnel offloads */ +struct udp_tunnel_info; +struct bpf_prog; +struct xdp_buff; + +void netdev_set_default_ethtool_ops(struct net_device *dev, + const struct ethtool_ops *ops); + +/* Backlog congestion levels */ +#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ +#define NET_RX_DROP 1 /* packet dropped */ + +/* + * Transmit return codes: transmit return codes originate from three different + * namespaces: + * + * - qdisc return codes + * - driver transmit return codes + * - errno values + * + * Drivers are allowed to return any one of those in their hard_start_xmit() + * function. Real network devices commonly used with qdiscs should only return + * the driver transmit return codes though - when qdiscs are used, the actual + * transmission happens asynchronously, so the value is not propagated to + * higher layers. Virtual network devices transmit synchronously; in this case + * the driver transmit return codes are consumed by dev_queue_xmit(), and all + * others are propagated to higher layers. + */ + +/* qdisc ->enqueue() return codes. */ +#define NET_XMIT_SUCCESS 0x00 +#define NET_XMIT_DROP 0x01 /* skb dropped */ +#define NET_XMIT_CN 0x02 /* congestion notification */ +#define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */ + +/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It + * indicates that the device will soon be dropping packets, or already drops + * some packets of the same priority; prompting us to send less aggressively. */ +#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e)) +#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) + +/* Driver transmit return codes */ +#define NETDEV_TX_MASK 0xf0 + +enum netdev_tx { + __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */ + NETDEV_TX_OK = 0x00, /* driver took care of packet */ + NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/ +}; +typedef enum netdev_tx netdev_tx_t; + +/* + * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant; + * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed. + */ +static inline bool dev_xmit_complete(int rc) +{ + /* + * Positive cases with an skb consumed by a driver: + * - successful transmission (rc == NETDEV_TX_OK) + * - error while transmitting (rc < 0) + * - error while queueing to a different device (rc & NET_XMIT_MASK) + */ + if (likely(rc < NET_XMIT_MASK)) + return true; + + return false; +} + +/* + * Compute the worst-case header length according to the protocols + * used. + */ + +#if defined(CONFIG_HYPERV_NET) +# define LL_MAX_HEADER 128 +#elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) +# if defined(CONFIG_MAC80211_MESH) +# define LL_MAX_HEADER 128 +# else +# define LL_MAX_HEADER 96 +# endif +#else +# define LL_MAX_HEADER 32 +#endif + +#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \ + !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL) +#define MAX_HEADER LL_MAX_HEADER +#else +#define MAX_HEADER (LL_MAX_HEADER + 48) +#endif + +/* + * Old network device statistics. Fields are native words + * (unsigned long) so they can be read and written atomically. + */ + +struct net_device_stats { + unsigned long rx_packets; + unsigned long tx_packets; + unsigned long rx_bytes; + unsigned long tx_bytes; + unsigned long rx_errors; + unsigned long tx_errors; + unsigned long rx_dropped; + unsigned long tx_dropped; + unsigned long multicast; + unsigned long collisions; + unsigned long rx_length_errors; + unsigned long rx_over_errors; + unsigned long rx_crc_errors; + unsigned long rx_frame_errors; + unsigned long rx_fifo_errors; + unsigned long rx_missed_errors; + unsigned long tx_aborted_errors; + unsigned long tx_carrier_errors; + unsigned long tx_fifo_errors; + unsigned long tx_heartbeat_errors; + unsigned long tx_window_errors; + unsigned long rx_compressed; + unsigned long tx_compressed; +}; + + +#include +#include + +#ifdef CONFIG_RPS +#include +extern struct static_key rps_needed; +extern struct static_key rfs_needed; +#endif + +struct neighbour; +struct neigh_parms; +struct sk_buff; + +struct netdev_hw_addr { + struct list_head list; + unsigned char addr[MAX_ADDR_LEN]; + unsigned char type; +#define NETDEV_HW_ADDR_T_LAN 1 +#define NETDEV_HW_ADDR_T_SAN 2 +#define NETDEV_HW_ADDR_T_SLAVE 3 +#define NETDEV_HW_ADDR_T_UNICAST 4 +#define NETDEV_HW_ADDR_T_MULTICAST 5 + bool global_use; + int sync_cnt; + int refcount; + int synced; + struct rcu_head rcu_head; +}; + +struct netdev_hw_addr_list { + struct list_head list; + int count; +}; + +#define netdev_hw_addr_list_count(l) ((l)->count) +#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0) +#define netdev_hw_addr_list_for_each(ha, l) \ + list_for_each_entry(ha, &(l)->list, list) + +#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc) +#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc) +#define netdev_for_each_uc_addr(ha, dev) \ + netdev_hw_addr_list_for_each(ha, &(dev)->uc) + +#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc) +#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc) +#define netdev_for_each_mc_addr(ha, dev) \ + netdev_hw_addr_list_for_each(ha, &(dev)->mc) + +struct hh_cache { + unsigned int hh_len; + seqlock_t hh_lock; + + /* cached hardware header; allow for machine alignment needs. */ +#define HH_DATA_MOD 16 +#define HH_DATA_OFF(__len) \ + (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) +#define HH_DATA_ALIGN(__len) \ + (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) + unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; +}; + +/* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much. + * Alternative is: + * dev->hard_header_len ? (dev->hard_header_len + + * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 + * + * We could use other alignment values, but we must maintain the + * relationship HH alignment <= LL alignment. + */ +#define LL_RESERVED_SPACE(dev) \ + ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) +#define LL_RESERVED_SPACE_EXTRA(dev,extra) \ + ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) + +struct header_ops { + int (*create) (struct sk_buff *skb, struct net_device *dev, + unsigned short type, const void *daddr, + const void *saddr, unsigned int len); + int (*parse)(const struct sk_buff *skb, unsigned char *haddr); + int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); + void (*cache_update)(struct hh_cache *hh, + const struct net_device *dev, + const unsigned char *haddr); + bool (*validate)(const char *ll_header, unsigned int len); + __be16 (*parse_protocol)(const struct sk_buff *skb); +}; + +/* These flag bits are private to the generic network queueing + * layer; they may not be explicitly referenced by any other + * code. + */ + +enum netdev_state_t { + __LINK_STATE_START, + __LINK_STATE_PRESENT, + __LINK_STATE_NOCARRIER, + __LINK_STATE_LINKWATCH_PENDING, + __LINK_STATE_DORMANT, +}; + + +/* + * This structure holds boot-time configured netdevice settings. They + * are then used in the device probing. + */ +struct netdev_boot_setup { + char name[IFNAMSIZ]; + struct ifmap map; +}; +#define NETDEV_BOOT_SETUP_MAX 8 + +int __init netdev_boot_setup(char *str); + +struct gro_list { + struct list_head list; + int count; +}; + +/* + * size of gro hash buckets, must less than bit number of + * napi_struct::gro_bitmask + */ +#define GRO_HASH_BUCKETS 8 + +/* + * Structure for NAPI scheduling similar to tasklet but with weighting + */ +struct napi_struct { + /* The poll_list must only be managed by the entity which + * changes the state of the NAPI_STATE_SCHED bit. This means + * whoever atomically sets that bit can add this napi_struct + * to the per-CPU poll_list, and whoever clears that bit + * can remove from the list right before clearing the bit. + */ + struct list_head poll_list; + + unsigned long state; + int weight; + unsigned long gro_bitmask; + int (*poll)(struct napi_struct *, int); +#ifdef CONFIG_NETPOLL + int poll_owner; +#endif + struct net_device *dev; + struct gro_list gro_hash[GRO_HASH_BUCKETS]; + struct sk_buff *skb; + struct hrtimer timer; + struct list_head dev_list; + struct hlist_node napi_hash_node; + unsigned int napi_id; +}; + +enum { + NAPI_STATE_SCHED, /* Poll is scheduled */ + NAPI_STATE_MISSED, /* reschedule a napi */ + NAPI_STATE_DISABLE, /* Disable pending */ + NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ + NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */ + NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */ + NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */ +}; + +enum { + NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED), + NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED), + NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE), + NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC), + NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED), + NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), + NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), +}; + +enum gro_result { + GRO_MERGED, + GRO_MERGED_FREE, + GRO_HELD, + GRO_NORMAL, + GRO_DROP, + GRO_CONSUMED, +}; +typedef enum gro_result gro_result_t; + +/* + * enum rx_handler_result - Possible return values for rx_handlers. + * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it + * further. + * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in + * case skb->dev was changed by rx_handler. + * @RX_HANDLER_EXACT: Force exact delivery, no wildcard. + * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called. + * + * rx_handlers are functions called from inside __netif_receive_skb(), to do + * special processing of the skb, prior to delivery to protocol handlers. + * + * Currently, a net_device can only have a single rx_handler registered. Trying + * to register a second rx_handler will return -EBUSY. + * + * To register a rx_handler on a net_device, use netdev_rx_handler_register(). + * To unregister a rx_handler on a net_device, use + * netdev_rx_handler_unregister(). + * + * Upon return, rx_handler is expected to tell __netif_receive_skb() what to + * do with the skb. + * + * If the rx_handler consumed the skb in some way, it should return + * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for + * the skb to be delivered in some other way. + * + * If the rx_handler changed skb->dev, to divert the skb to another + * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the + * new device will be called if it exists. + * + * If the rx_handler decides the skb should be ignored, it should return + * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that + * are registered on exact device (ptype->dev == skb->dev). + * + * If the rx_handler didn't change skb->dev, but wants the skb to be normally + * delivered, it should return RX_HANDLER_PASS. + * + * A device without a registered rx_handler will behave as if rx_handler + * returned RX_HANDLER_PASS. + */ + +enum rx_handler_result { + RX_HANDLER_CONSUMED, + RX_HANDLER_ANOTHER, + RX_HANDLER_EXACT, + RX_HANDLER_PASS, +}; +typedef enum rx_handler_result rx_handler_result_t; +typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); + +void __napi_schedule(struct napi_struct *n); +void __napi_schedule_irqoff(struct napi_struct *n); + +static inline bool napi_disable_pending(struct napi_struct *n) +{ + return test_bit(NAPI_STATE_DISABLE, &n->state); +} + +bool napi_schedule_prep(struct napi_struct *n); + +/** + * napi_schedule - schedule NAPI poll + * @n: NAPI context + * + * Schedule NAPI poll routine to be called if it is not already + * running. + */ +static inline void napi_schedule(struct napi_struct *n) +{ + if (napi_schedule_prep(n)) + __napi_schedule(n); +} + +/** + * napi_schedule_irqoff - schedule NAPI poll + * @n: NAPI context + * + * Variant of napi_schedule(), assuming hard irqs are masked. + */ +static inline void napi_schedule_irqoff(struct napi_struct *n) +{ + if (napi_schedule_prep(n)) + __napi_schedule_irqoff(n); +} + +/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ +static inline bool napi_reschedule(struct napi_struct *napi) +{ + if (napi_schedule_prep(napi)) { + __napi_schedule(napi); + return true; + } + return false; +} + +bool napi_complete_done(struct napi_struct *n, int work_done); +/** + * napi_complete - NAPI processing complete + * @n: NAPI context + * + * Mark NAPI processing as complete. + * Consider using napi_complete_done() instead. + * Return false if device should avoid rearming interrupts. + */ +static inline bool napi_complete(struct napi_struct *n) +{ + return napi_complete_done(n, 0); +} + +/** + * napi_hash_del - remove a NAPI from global table + * @napi: NAPI context + * + * Warning: caller must observe RCU grace period + * before freeing memory containing @napi, if + * this function returns true. + * Note: core networking stack automatically calls it + * from netif_napi_del(). + * Drivers might want to call this helper to combine all + * the needed RCU grace periods into a single one. + */ +bool napi_hash_del(struct napi_struct *napi); + +/** + * napi_disable - prevent NAPI from scheduling + * @n: NAPI context + * + * Stop NAPI from being scheduled on this context. + * Waits till any outstanding processing completes. + */ +void napi_disable(struct napi_struct *n); + +/** + * napi_enable - enable NAPI scheduling + * @n: NAPI context + * + * Resume NAPI from being scheduled on this context. + * Must be paired with napi_disable. + */ +static inline void napi_enable(struct napi_struct *n) +{ + BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); + smp_mb__before_atomic(); + clear_bit(NAPI_STATE_SCHED, &n->state); + clear_bit(NAPI_STATE_NPSVC, &n->state); +} + +/** + * napi_synchronize - wait until NAPI is not running + * @n: NAPI context + * + * Wait until NAPI is done being scheduled on this context. + * Waits till any outstanding processing completes but + * does not disable future activations. + */ +static inline void napi_synchronize(const struct napi_struct *n) +{ + if (IS_ENABLED(CONFIG_SMP)) + while (test_bit(NAPI_STATE_SCHED, &n->state)) + msleep(1); + else + barrier(); +} + +enum netdev_queue_state_t { + __QUEUE_STATE_DRV_XOFF, + __QUEUE_STATE_STACK_XOFF, + __QUEUE_STATE_FROZEN, +}; + +#define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF) +#define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF) +#define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN) + +#define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF) +#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \ + QUEUE_STATE_FROZEN) +#define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \ + QUEUE_STATE_FROZEN) + +/* + * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The + * netif_tx_* functions below are used to manipulate this flag. The + * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit + * queue independently. The netif_xmit_*stopped functions below are called + * to check if the queue has been stopped by the driver or stack (either + * of the XOFF bits are set in the state). Drivers should not need to call + * netif_xmit*stopped functions, they should only be using netif_tx_*. + */ + +struct netdev_queue { +/* + * read-mostly part + */ + struct net_device *dev; + struct Qdisc __rcu *qdisc; + struct Qdisc *qdisc_sleeping; +#ifdef CONFIG_SYSFS + struct kobject kobj; +#endif +#if defined(CONFIG_XPS) && defined(CONFIG_NUMA) + int numa_node; +#endif + unsigned long tx_maxrate; + /* + * Number of TX timeouts for this queue + * (/sys/class/net/DEV/Q/trans_timeout) + */ + unsigned long trans_timeout; + + /* Subordinate device that the queue has been assigned to */ + struct net_device *sb_dev; +/* + * write-mostly part + */ + spinlock_t _xmit_lock ____cacheline_aligned_in_smp; + int xmit_lock_owner; + /* + * Time (in jiffies) of last Tx + */ + unsigned long trans_start; + + unsigned long state; + +#ifdef CONFIG_BQL + struct dql dql; +#endif +} ____cacheline_aligned_in_smp; + +extern int sysctl_fb_tunnels_only_for_init_net; + +static inline bool net_has_fallback_tunnels(const struct net *net) +{ + return net == &init_net || + !IS_ENABLED(CONFIG_SYSCTL) || + !sysctl_fb_tunnels_only_for_init_net; +} + +static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) +{ +#if defined(CONFIG_XPS) && defined(CONFIG_NUMA) + return q->numa_node; +#else + return NUMA_NO_NODE; +#endif +} + +static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node) +{ +#if defined(CONFIG_XPS) && defined(CONFIG_NUMA) + q->numa_node = node; +#endif +} + +#ifdef CONFIG_RPS +/* + * This structure holds an RPS map which can be of variable length. The + * map is an array of CPUs. + */ +struct rps_map { + unsigned int len; + struct rcu_head rcu; + u16 cpus[0]; +}; +#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16))) + +/* + * The rps_dev_flow structure contains the mapping of a flow to a CPU, the + * tail pointer for that CPU's input queue at the time of last enqueue, and + * a hardware filter index. + */ +struct rps_dev_flow { + u16 cpu; + u16 filter; + unsigned int last_qtail; +}; +#define RPS_NO_FILTER 0xffff + +/* + * The rps_dev_flow_table structure contains a table of flow mappings. + */ +struct rps_dev_flow_table { + unsigned int mask; + struct rcu_head rcu; + struct rps_dev_flow flows[0]; +}; +#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ + ((_num) * sizeof(struct rps_dev_flow))) + +/* + * The rps_sock_flow_table contains mappings of flows to the last CPU + * on which they were processed by the application (set in recvmsg). + * Each entry is a 32bit value. Upper part is the high-order bits + * of flow hash, lower part is CPU number. + * rps_cpu_mask is used to partition the space, depending on number of + * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1 + * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f, + * meaning we use 32-6=26 bits for the hash. + */ +struct rps_sock_flow_table { + u32 mask; + + u32 ents[0] ____cacheline_aligned_in_smp; +}; +#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num])) + +#define RPS_NO_CPU 0xffff + +extern u32 rps_cpu_mask; +extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; + +static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, + u32 hash) +{ + if (table && hash) { + unsigned int index = hash & table->mask; + u32 val = hash & ~rps_cpu_mask; + + /* We only give a hint, preemption can change CPU under us */ + val |= raw_smp_processor_id(); + + if (table->ents[index] != val) + table->ents[index] = val; + } +} + +#ifdef CONFIG_RFS_ACCEL +bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, + u16 filter_id); +#endif +#endif /* CONFIG_RPS */ + +/* This structure contains an instance of an RX queue. */ +struct netdev_rx_queue { +#ifdef CONFIG_RPS + struct rps_map __rcu *rps_map; + struct rps_dev_flow_table __rcu *rps_flow_table; +#endif + struct kobject kobj; + struct net_device *dev; + struct xdp_rxq_info xdp_rxq; +} ____cacheline_aligned_in_smp; + +/* + * RX queue sysfs structures and functions. + */ +struct rx_queue_attribute { + struct attribute attr; + ssize_t (*show)(struct netdev_rx_queue *queue, char *buf); + ssize_t (*store)(struct netdev_rx_queue *queue, + const char *buf, size_t len); +}; + +#ifdef CONFIG_XPS +/* + * This structure holds an XPS map which can be of variable length. The + * map is an array of queues. + */ +struct xps_map { + unsigned int len; + unsigned int alloc_len; + struct rcu_head rcu; + u16 queues[0]; +}; +#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) +#define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \ + - sizeof(struct xps_map)) / sizeof(u16)) + +/* + * This structure holds all XPS maps for device. Maps are indexed by CPU. + */ +struct xps_dev_maps { + struct rcu_head rcu; + struct xps_map __rcu *attr_map[0]; /* Either CPUs map or RXQs map */ +}; + +#define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \ + (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *))) + +#define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\ + (_rxqs * (_tcs) * sizeof(struct xps_map *))) + +#endif /* CONFIG_XPS */ + +#define TC_MAX_QUEUE 16 +#define TC_BITMASK 15 +/* HW offloaded queuing disciplines txq count and offset maps */ +struct netdev_tc_txq { + u16 count; + u16 offset; +}; + +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +/* + * This structure is to hold information about the device + * configured to run FCoE protocol stack. + */ +struct netdev_fcoe_hbainfo { + char manufacturer[64]; + char serial_number[64]; + char hardware_version[64]; + char driver_version[64]; + char optionrom_version[64]; + char firmware_version[64]; + char model[256]; + char model_description[256]; +}; +#endif + +#define MAX_PHYS_ITEM_ID_LEN 32 + +/* This structure holds a unique identifier to identify some + * physical item (port for example) used by a netdevice. + */ +struct netdev_phys_item_id { + unsigned char id[MAX_PHYS_ITEM_ID_LEN]; + unsigned char id_len; +}; + +static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a, + struct netdev_phys_item_id *b) +{ + return a->id_len == b->id_len && + memcmp(a->id, b->id, a->id_len) == 0; +} + +typedef u16 (*select_queue_fallback_t)(struct net_device *dev, + struct sk_buff *skb, + struct net_device *sb_dev); + +enum tc_setup_type { + TC_SETUP_QDISC_MQPRIO, + TC_SETUP_CLSU32, + TC_SETUP_CLSFLOWER, + TC_SETUP_CLSMATCHALL, + TC_SETUP_CLSBPF, + TC_SETUP_BLOCK, + TC_SETUP_QDISC_CBS, + TC_SETUP_QDISC_RED, + TC_SETUP_QDISC_PRIO, + TC_SETUP_QDISC_MQ, + TC_SETUP_QDISC_ETF, +}; + +/* These structures hold the attributes of bpf state that are being passed + * to the netdevice through the bpf op. + */ +enum bpf_netdev_command { + /* Set or clear a bpf program used in the earliest stages of packet + * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee + * is responsible for calling bpf_prog_put on any old progs that are + * stored. In case of error, the callee need not release the new prog + * reference, but on success it takes ownership and must bpf_prog_put + * when it is no longer used. + */ + XDP_SETUP_PROG, + XDP_SETUP_PROG_HW, + XDP_QUERY_PROG, + XDP_QUERY_PROG_HW, + /* BPF program for offload callbacks, invoked at program load time. */ + BPF_OFFLOAD_VERIFIER_PREP, + BPF_OFFLOAD_TRANSLATE, + BPF_OFFLOAD_DESTROY, + BPF_OFFLOAD_MAP_ALLOC, + BPF_OFFLOAD_MAP_FREE, + XDP_QUERY_XSK_UMEM, + XDP_SETUP_XSK_UMEM, +}; + +struct bpf_prog_offload_ops; +struct netlink_ext_ack; +struct xdp_umem; + +struct netdev_bpf { + enum bpf_netdev_command command; + union { + /* XDP_SETUP_PROG */ + struct { + u32 flags; + struct bpf_prog *prog; + struct netlink_ext_ack *extack; + }; + /* XDP_QUERY_PROG, XDP_QUERY_PROG_HW */ + struct { + u32 prog_id; + /* flags with which program was installed */ + u32 prog_flags; + }; + /* BPF_OFFLOAD_VERIFIER_PREP */ + struct { + struct bpf_prog *prog; + const struct bpf_prog_offload_ops *ops; /* callee set */ + } verifier; + /* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */ + struct { + struct bpf_prog *prog; + } offload; + /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */ + struct { + struct bpf_offloaded_map *offmap; + }; + /* XDP_QUERY_XSK_UMEM, XDP_SETUP_XSK_UMEM */ + struct { + struct xdp_umem *umem; /* out for query*/ + u16 queue_id; /* in for query */ + } xsk; + }; +}; + +#ifdef CONFIG_XFRM_OFFLOAD +struct xfrmdev_ops { + int (*xdo_dev_state_add) (struct xfrm_state *x); + void (*xdo_dev_state_delete) (struct xfrm_state *x); + void (*xdo_dev_state_free) (struct xfrm_state *x); + bool (*xdo_dev_offload_ok) (struct sk_buff *skb, + struct xfrm_state *x); + void (*xdo_dev_state_advance_esn) (struct xfrm_state *x); +}; +#endif + +#if IS_ENABLED(CONFIG_TLS_DEVICE) +enum tls_offload_ctx_dir { + TLS_OFFLOAD_CTX_DIR_RX, + TLS_OFFLOAD_CTX_DIR_TX, +}; + +struct tls_crypto_info; +struct tls_context; + +struct tlsdev_ops { + int (*tls_dev_add)(struct net_device *netdev, struct sock *sk, + enum tls_offload_ctx_dir direction, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn); + void (*tls_dev_del)(struct net_device *netdev, + struct tls_context *ctx, + enum tls_offload_ctx_dir direction); + void (*tls_dev_resync_rx)(struct net_device *netdev, + struct sock *sk, u32 seq, u64 rcd_sn); +}; +#endif + +struct dev_ifalias { + struct rcu_head rcuhead; + char ifalias[]; +}; + +/* + * This structure defines the management hooks for network devices. + * The following hooks can be defined; unless noted otherwise, they are + * optional and can be filled with a null pointer. + * + * int (*ndo_init)(struct net_device *dev); + * This function is called once when a network device is registered. + * The network device can use this for any late stage initialization + * or semantic validation. It can fail with an error code which will + * be propagated back to register_netdev. + * + * void (*ndo_uninit)(struct net_device *dev); + * This function is called when device is unregistered or when registration + * fails. It is not called if init fails. + * + * int (*ndo_open)(struct net_device *dev); + * This function is called when a network device transitions to the up + * state. + * + * int (*ndo_stop)(struct net_device *dev); + * This function is called when a network device transitions to the down + * state. + * + * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, + * struct net_device *dev); + * Called when a packet needs to be transmitted. + * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop + * the queue before that can happen; it's for obsolete devices and weird + * corner cases, but the stack really does a non-trivial amount + * of useless work if you return NETDEV_TX_BUSY. + * Required; cannot be NULL. + * + * netdev_features_t (*ndo_features_check)(struct sk_buff *skb, + * struct net_device *dev + * netdev_features_t features); + * Called by core transmit path to determine if device is capable of + * performing offload operations on a given packet. This is to give + * the device an opportunity to implement any restrictions that cannot + * be otherwise expressed by feature flags. The check is called with + * the set of features that the stack has calculated and it returns + * those the driver believes to be appropriate. + * + * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, + * struct net_device *sb_dev, + * select_queue_fallback_t fallback); + * Called to decide which queue to use when device supports multiple + * transmit queues. + * + * void (*ndo_change_rx_flags)(struct net_device *dev, int flags); + * This function is called to allow device receiver to make + * changes to configuration when multicast or promiscuous is enabled. + * + * void (*ndo_set_rx_mode)(struct net_device *dev); + * This function is called device changes address list filtering. + * If driver handles unicast address filtering, it should set + * IFF_UNICAST_FLT in its priv_flags. + * + * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); + * This function is called when the Media Access Control address + * needs to be changed. If this interface is not defined, the + * MAC address can not be changed. + * + * int (*ndo_validate_addr)(struct net_device *dev); + * Test if Media Access Control address is valid for the device. + * + * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); + * Called when a user requests an ioctl which can't be handled by + * the generic interface code. If not defined ioctls return + * not supported error code. + * + * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); + * Used to set network devices bus interface parameters. This interface + * is retained for legacy reasons; new devices should use the bus + * interface (PCI) for low level management. + * + * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); + * Called when a user wants to change the Maximum Transfer Unit + * of a device. + * + * void (*ndo_tx_timeout)(struct net_device *dev); + * Callback used when the transmitter has not made any progress + * for dev->watchdog ticks. + * + * void (*ndo_get_stats64)(struct net_device *dev, + * struct rtnl_link_stats64 *storage); + * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); + * Called when a user wants to get the network device usage + * statistics. Drivers must do one of the following: + * 1. Define @ndo_get_stats64 to fill in a zero-initialised + * rtnl_link_stats64 structure passed by the caller. + * 2. Define @ndo_get_stats to update a net_device_stats structure + * (which should normally be dev->stats) and return a pointer to + * it. The structure may be changed asynchronously only if each + * field is written atomically. + * 3. Update dev->stats asynchronously and atomically, and define + * neither operation. + * + * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id) + * Return true if this device supports offload stats of this attr_id. + * + * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev, + * void *attr_data) + * Get statistics for offload operations by attr_id. Write it into the + * attr_data pointer. + * + * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); + * If device supports VLAN filtering this function is called when a + * VLAN id is registered. + * + * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid); + * If device supports VLAN filtering this function is called when a + * VLAN id is unregistered. + * + * void (*ndo_poll_controller)(struct net_device *dev); + * + * SR-IOV management functions. + * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); + * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, + * u8 qos, __be16 proto); + * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate, + * int max_tx_rate); + * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); + * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting); + * int (*ndo_get_vf_config)(struct net_device *dev, + * int vf, struct ifla_vf_info *ivf); + * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state); + * int (*ndo_set_vf_port)(struct net_device *dev, int vf, + * struct nlattr *port[]); + * + * Enable or disable the VF ability to query its RSS Redirection Table and + * Hash Key. This is needed since on some devices VF share this information + * with PF and querying it may introduce a theoretical security risk. + * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting); + * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); + * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type, + * void *type_data); + * Called to setup any 'tc' scheduler, classifier or action on @dev. + * This is always called from the stack with the rtnl lock held and netif + * tx queues stopped. This allows the netdevice to perform queue + * management safely. + * + * Fiber Channel over Ethernet (FCoE) offload functions. + * int (*ndo_fcoe_enable)(struct net_device *dev); + * Called when the FCoE protocol stack wants to start using LLD for FCoE + * so the underlying device can perform whatever needed configuration or + * initialization to support acceleration of FCoE traffic. + * + * int (*ndo_fcoe_disable)(struct net_device *dev); + * Called when the FCoE protocol stack wants to stop using LLD for FCoE + * so the underlying device can perform whatever needed clean-ups to + * stop supporting acceleration of FCoE traffic. + * + * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid, + * struct scatterlist *sgl, unsigned int sgc); + * Called when the FCoE Initiator wants to initialize an I/O that + * is a possible candidate for Direct Data Placement (DDP). The LLD can + * perform necessary setup and returns 1 to indicate the device is set up + * successfully to perform DDP on this I/O, otherwise this returns 0. + * + * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid); + * Called when the FCoE Initiator/Target is done with the DDPed I/O as + * indicated by the FC exchange id 'xid', so the underlying device can + * clean up and reuse resources for later DDP requests. + * + * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid, + * struct scatterlist *sgl, unsigned int sgc); + * Called when the FCoE Target wants to initialize an I/O that + * is a possible candidate for Direct Data Placement (DDP). The LLD can + * perform necessary setup and returns 1 to indicate the device is set up + * successfully to perform DDP on this I/O, otherwise this returns 0. + * + * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, + * struct netdev_fcoe_hbainfo *hbainfo); + * Called when the FCoE Protocol stack wants information on the underlying + * device. This information is utilized by the FCoE protocol stack to + * register attributes with Fiber Channel management service as per the + * FC-GS Fabric Device Management Information(FDMI) specification. + * + * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); + * Called when the underlying device wants to override default World Wide + * Name (WWN) generation mechanism in FCoE protocol stack to pass its own + * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE + * protocol stack to use. + * + * RFS acceleration. + * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb, + * u16 rxq_index, u32 flow_id); + * Set hardware filter for RFS. rxq_index is the target queue index; + * flow_id is a flow ID to be passed to rps_may_expire_flow() later. + * Return the filter ID on success, or a negative error code. + * + * Slave management functions (for bridge, bonding, etc). + * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev); + * Called to make another netdev an underling. + * + * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); + * Called to release previously enslaved netdev. + * + * Feature/offload setting functions. + * netdev_features_t (*ndo_fix_features)(struct net_device *dev, + * netdev_features_t features); + * Adjusts the requested feature flags according to device-specific + * constraints, and returns the resulting flags. Must not modify + * the device state. + * + * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); + * Called to update device configuration to new features. Passed + * feature set might be less than what was returned by ndo_fix_features()). + * Must return >0 or -errno if it changed dev->features itself. + * + * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], + * struct net_device *dev, + * const unsigned char *addr, u16 vid, u16 flags) + * Adds an FDB entry to dev for addr. + * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], + * struct net_device *dev, + * const unsigned char *addr, u16 vid) + * Deletes the FDB entry from dev coresponding to addr. + * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, + * struct net_device *dev, struct net_device *filter_dev, + * int *idx) + * Used to add FDB entries to dump requests. Implementers should add + * entries to skb and update idx with the number of entries. + * + * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, + * u16 flags) + * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, + * struct net_device *dev, u32 filter_mask, + * int nlflags) + * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh, + * u16 flags); + * + * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); + * Called to change device carrier. Soft-devices (like dummy, team, etc) + * which do not represent real hardware may define this to allow their + * userspace components to manage their virtual carrier state. Devices + * that determine carrier state from physical hardware properties (eg + * network cables) or protocol-dependent mechanisms (eg + * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. + * + * int (*ndo_get_phys_port_id)(struct net_device *dev, + * struct netdev_phys_item_id *ppid); + * Called to get ID of physical port of this device. If driver does + * not implement this, it is assumed that the hw is not able to have + * multiple net devices on single physical port. + * + * void (*ndo_udp_tunnel_add)(struct net_device *dev, + * struct udp_tunnel_info *ti); + * Called by UDP tunnel to notify a driver about the UDP port and socket + * address family that a UDP tunnel is listnening to. It is called only + * when a new port starts listening. The operation is protected by the + * RTNL. + * + * void (*ndo_udp_tunnel_del)(struct net_device *dev, + * struct udp_tunnel_info *ti); + * Called by UDP tunnel to notify the driver about a UDP port and socket + * address family that the UDP tunnel is not listening to anymore. The + * operation is protected by the RTNL. + * + * void* (*ndo_dfwd_add_station)(struct net_device *pdev, + * struct net_device *dev) + * Called by upper layer devices to accelerate switching or other + * station functionality into hardware. 'pdev is the lowerdev + * to use for the offload and 'dev' is the net device that will + * back the offload. Returns a pointer to the private structure + * the upper layer will maintain. + * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv) + * Called by upper layer device to delete the station created + * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing + * the station and priv is the structure returned by the add + * operation. + * int (*ndo_set_tx_maxrate)(struct net_device *dev, + * int queue_index, u32 maxrate); + * Called when a user wants to set a max-rate limitation of specific + * TX queue. + * int (*ndo_get_iflink)(const struct net_device *dev); + * Called to get the iflink value of this device. + * void (*ndo_change_proto_down)(struct net_device *dev, + * bool proto_down); + * This function is used to pass protocol port error state information + * to the switch driver. The switch driver can react to the proto_down + * by doing a phys down on the associated switch port. + * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb); + * This function is used to get egress tunnel information for given skb. + * This is useful for retrieving outer tunnel header parameters while + * sampling packet. + * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom); + * This function is used to specify the headroom that the skb must + * consider when allocation skb during packet reception. Setting + * appropriate rx headroom value allows avoiding skb head copy on + * forward. Setting a negative value resets the rx headroom to the + * default value. + * int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf); + * This function is used to set or query state related to XDP on the + * netdevice and manage BPF offload. See definition of + * enum bpf_netdev_command for details. + * int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp, + * u32 flags); + * This function is used to submit @n XDP packets for transmit on a + * netdevice. Returns number of frames successfully transmitted, frames + * that got dropped are freed/returned via xdp_return_frame(). + * Returns negative number, means general error invoking ndo, meaning + * no frames were xmit'ed and core-caller will free all frames. + */ +struct net_device_ops { + int (*ndo_init)(struct net_device *dev); + void (*ndo_uninit)(struct net_device *dev); + int (*ndo_open)(struct net_device *dev); + int (*ndo_stop)(struct net_device *dev); + netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, + struct net_device *dev); + netdev_features_t (*ndo_features_check)(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features); + u16 (*ndo_select_queue)(struct net_device *dev, + struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback); + void (*ndo_change_rx_flags)(struct net_device *dev, + int flags); + void (*ndo_set_rx_mode)(struct net_device *dev); + int (*ndo_set_mac_address)(struct net_device *dev, + void *addr); + int (*ndo_validate_addr)(struct net_device *dev); + int (*ndo_do_ioctl)(struct net_device *dev, + struct ifreq *ifr, int cmd); + int (*ndo_set_config)(struct net_device *dev, + struct ifmap *map); + int (*ndo_change_mtu)(struct net_device *dev, + int new_mtu); + int (*ndo_neigh_setup)(struct net_device *dev, + struct neigh_parms *); + void (*ndo_tx_timeout) (struct net_device *dev); + + void (*ndo_get_stats64)(struct net_device *dev, + struct rtnl_link_stats64 *storage); + bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id); + int (*ndo_get_offload_stats)(int attr_id, + const struct net_device *dev, + void *attr_data); + struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); + + int (*ndo_vlan_rx_add_vid)(struct net_device *dev, + __be16 proto, u16 vid); + int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, + __be16 proto, u16 vid); +#ifdef CONFIG_NET_POLL_CONTROLLER + void (*ndo_poll_controller)(struct net_device *dev); + int (*ndo_netpoll_setup)(struct net_device *dev, + struct netpoll_info *info); + void (*ndo_netpoll_cleanup)(struct net_device *dev); +#endif + int (*ndo_set_vf_mac)(struct net_device *dev, + int queue, u8 *mac); + int (*ndo_set_vf_vlan)(struct net_device *dev, + int queue, u16 vlan, + u8 qos, __be16 proto); + int (*ndo_set_vf_rate)(struct net_device *dev, + int vf, int min_tx_rate, + int max_tx_rate); + int (*ndo_set_vf_spoofchk)(struct net_device *dev, + int vf, bool setting); + int (*ndo_set_vf_trust)(struct net_device *dev, + int vf, bool setting); + int (*ndo_get_vf_config)(struct net_device *dev, + int vf, + struct ifla_vf_info *ivf); + int (*ndo_set_vf_link_state)(struct net_device *dev, + int vf, int link_state); + int (*ndo_get_vf_stats)(struct net_device *dev, + int vf, + struct ifla_vf_stats + *vf_stats); + int (*ndo_set_vf_port)(struct net_device *dev, + int vf, + struct nlattr *port[]); + int (*ndo_get_vf_port)(struct net_device *dev, + int vf, struct sk_buff *skb); + int (*ndo_set_vf_guid)(struct net_device *dev, + int vf, u64 guid, + int guid_type); + int (*ndo_set_vf_rss_query_en)( + struct net_device *dev, + int vf, bool setting); + int (*ndo_setup_tc)(struct net_device *dev, + enum tc_setup_type type, + void *type_data); +#if IS_ENABLED(CONFIG_FCOE) + int (*ndo_fcoe_enable)(struct net_device *dev); + int (*ndo_fcoe_disable)(struct net_device *dev); + int (*ndo_fcoe_ddp_setup)(struct net_device *dev, + u16 xid, + struct scatterlist *sgl, + unsigned int sgc); + int (*ndo_fcoe_ddp_done)(struct net_device *dev, + u16 xid); + int (*ndo_fcoe_ddp_target)(struct net_device *dev, + u16 xid, + struct scatterlist *sgl, + unsigned int sgc); + int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, + struct netdev_fcoe_hbainfo *hbainfo); +#endif + +#if IS_ENABLED(CONFIG_LIBFCOE) +#define NETDEV_FCOE_WWNN 0 +#define NETDEV_FCOE_WWPN 1 + int (*ndo_fcoe_get_wwn)(struct net_device *dev, + u64 *wwn, int type); +#endif + +#ifdef CONFIG_RFS_ACCEL + int (*ndo_rx_flow_steer)(struct net_device *dev, + const struct sk_buff *skb, + u16 rxq_index, + u32 flow_id); +#endif + int (*ndo_add_slave)(struct net_device *dev, + struct net_device *slave_dev, + struct netlink_ext_ack *extack); + int (*ndo_del_slave)(struct net_device *dev, + struct net_device *slave_dev); + netdev_features_t (*ndo_fix_features)(struct net_device *dev, + netdev_features_t features); + int (*ndo_set_features)(struct net_device *dev, + netdev_features_t features); + int (*ndo_neigh_construct)(struct net_device *dev, + struct neighbour *n); + void (*ndo_neigh_destroy)(struct net_device *dev, + struct neighbour *n); + + int (*ndo_fdb_add)(struct ndmsg *ndm, + struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, + u16 vid, + u16 flags); + int (*ndo_fdb_del)(struct ndmsg *ndm, + struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, + u16 vid); + int (*ndo_fdb_dump)(struct sk_buff *skb, + struct netlink_callback *cb, + struct net_device *dev, + struct net_device *filter_dev, + int *idx); + + int (*ndo_bridge_setlink)(struct net_device *dev, + struct nlmsghdr *nlh, + u16 flags); + int (*ndo_bridge_getlink)(struct sk_buff *skb, + u32 pid, u32 seq, + struct net_device *dev, + u32 filter_mask, + int nlflags); + int (*ndo_bridge_dellink)(struct net_device *dev, + struct nlmsghdr *nlh, + u16 flags); + int (*ndo_change_carrier)(struct net_device *dev, + bool new_carrier); + int (*ndo_get_phys_port_id)(struct net_device *dev, + struct netdev_phys_item_id *ppid); + int (*ndo_get_phys_port_name)(struct net_device *dev, + char *name, size_t len); + void (*ndo_udp_tunnel_add)(struct net_device *dev, + struct udp_tunnel_info *ti); + void (*ndo_udp_tunnel_del)(struct net_device *dev, + struct udp_tunnel_info *ti); + void* (*ndo_dfwd_add_station)(struct net_device *pdev, + struct net_device *dev); + void (*ndo_dfwd_del_station)(struct net_device *pdev, + void *priv); + + int (*ndo_get_lock_subclass)(struct net_device *dev); + int (*ndo_set_tx_maxrate)(struct net_device *dev, + int queue_index, + u32 maxrate); + int (*ndo_get_iflink)(const struct net_device *dev); + int (*ndo_change_proto_down)(struct net_device *dev, + bool proto_down); + int (*ndo_fill_metadata_dst)(struct net_device *dev, + struct sk_buff *skb); + void (*ndo_set_rx_headroom)(struct net_device *dev, + int needed_headroom); + int (*ndo_bpf)(struct net_device *dev, + struct netdev_bpf *bpf); + int (*ndo_xdp_xmit)(struct net_device *dev, int n, + struct xdp_frame **xdp, + u32 flags); + int (*ndo_xsk_async_xmit)(struct net_device *dev, + u32 queue_id); +}; + +/** + * enum net_device_priv_flags - &struct net_device priv_flags + * + * These are the &struct net_device, they are only set internally + * by drivers and used in the kernel. These flags are invisible to + * userspace; this means that the order of these flags can change + * during any kernel release. + * + * You should have a pretty good reason to be extending these flags. + * + * @IFF_802_1Q_VLAN: 802.1Q VLAN device + * @IFF_EBRIDGE: Ethernet bridging device + * @IFF_BONDING: bonding master or slave + * @IFF_ISATAP: ISATAP interface (RFC4214) + * @IFF_WAN_HDLC: WAN HDLC device + * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to + * release skb->dst + * @IFF_DONT_BRIDGE: disallow bridging this ether dev + * @IFF_DISABLE_NETPOLL: disable netpoll at run-time + * @IFF_MACVLAN_PORT: device used as macvlan port + * @IFF_BRIDGE_PORT: device used as bridge port + * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port + * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit + * @IFF_UNICAST_FLT: Supports unicast filtering + * @IFF_TEAM_PORT: device used as team port + * @IFF_SUPP_NOFCS: device supports sending custom FCS + * @IFF_LIVE_ADDR_CHANGE: device supports hardware address + * change when it's running + * @IFF_MACVLAN: Macvlan device + * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account + * underlying stacked devices + * @IFF_L3MDEV_MASTER: device is an L3 master device + * @IFF_NO_QUEUE: device can run without qdisc attached + * @IFF_OPENVSWITCH: device is a Open vSwitch master + * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device + * @IFF_TEAM: device is a team device + * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured + * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external + * entity (i.e. the master device for bridged veth) + * @IFF_MACSEC: device is a MACsec device + * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook + * @IFF_FAILOVER: device is a failover master device + * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device + * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device + * @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running + */ +enum netdev_priv_flags { + IFF_802_1Q_VLAN = 1<<0, + IFF_EBRIDGE = 1<<1, + IFF_BONDING = 1<<2, + IFF_ISATAP = 1<<3, + IFF_WAN_HDLC = 1<<4, + IFF_XMIT_DST_RELEASE = 1<<5, + IFF_DONT_BRIDGE = 1<<6, + IFF_DISABLE_NETPOLL = 1<<7, + IFF_MACVLAN_PORT = 1<<8, + IFF_BRIDGE_PORT = 1<<9, + IFF_OVS_DATAPATH = 1<<10, + IFF_TX_SKB_SHARING = 1<<11, + IFF_UNICAST_FLT = 1<<12, + IFF_TEAM_PORT = 1<<13, + IFF_SUPP_NOFCS = 1<<14, + IFF_LIVE_ADDR_CHANGE = 1<<15, + IFF_MACVLAN = 1<<16, + IFF_XMIT_DST_RELEASE_PERM = 1<<17, + IFF_L3MDEV_MASTER = 1<<18, + IFF_NO_QUEUE = 1<<19, + IFF_OPENVSWITCH = 1<<20, + IFF_L3MDEV_SLAVE = 1<<21, + IFF_TEAM = 1<<22, + IFF_RXFH_CONFIGURED = 1<<23, + IFF_PHONY_HEADROOM = 1<<24, + IFF_MACSEC = 1<<25, + IFF_NO_RX_HANDLER = 1<<26, + IFF_FAILOVER = 1<<27, + IFF_FAILOVER_SLAVE = 1<<28, + IFF_L3MDEV_RX_HANDLER = 1<<29, + IFF_LIVE_RENAME_OK = 1<<30, +}; + +#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN +#define IFF_EBRIDGE IFF_EBRIDGE +#define IFF_BONDING IFF_BONDING +#define IFF_ISATAP IFF_ISATAP +#define IFF_WAN_HDLC IFF_WAN_HDLC +#define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE +#define IFF_DONT_BRIDGE IFF_DONT_BRIDGE +#define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL +#define IFF_MACVLAN_PORT IFF_MACVLAN_PORT +#define IFF_BRIDGE_PORT IFF_BRIDGE_PORT +#define IFF_OVS_DATAPATH IFF_OVS_DATAPATH +#define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING +#define IFF_UNICAST_FLT IFF_UNICAST_FLT +#define IFF_TEAM_PORT IFF_TEAM_PORT +#define IFF_SUPP_NOFCS IFF_SUPP_NOFCS +#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE +#define IFF_MACVLAN IFF_MACVLAN +#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM +#define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER +#define IFF_NO_QUEUE IFF_NO_QUEUE +#define IFF_OPENVSWITCH IFF_OPENVSWITCH +#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE +#define IFF_TEAM IFF_TEAM +#define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED +#define IFF_MACSEC IFF_MACSEC +#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER +#define IFF_FAILOVER IFF_FAILOVER +#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE +#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER +#define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK + +/** + * struct net_device - The DEVICE structure. + * + * Actually, this whole structure is a big mistake. It mixes I/O + * data with strictly "high-level" data, and it has to know about + * almost every data structure used in the INET module. + * + * @name: This is the first field of the "visible" part of this structure + * (i.e. as seen by users in the "Space.c" file). It is the name + * of the interface. + * + * @name_hlist: Device name hash chain, please keep it close to name[] + * @ifalias: SNMP alias + * @mem_end: Shared memory end + * @mem_start: Shared memory start + * @base_addr: Device I/O address + * @irq: Device IRQ number + * + * @state: Generic network queuing layer state, see netdev_state_t + * @dev_list: The global list of network devices + * @napi_list: List entry used for polling NAPI devices + * @unreg_list: List entry when we are unregistering the + * device; see the function unregister_netdev + * @close_list: List entry used when we are closing the device + * @ptype_all: Device-specific packet handlers for all protocols + * @ptype_specific: Device-specific, protocol-specific packet handlers + * + * @adj_list: Directly linked devices, like slaves for bonding + * @features: Currently active device features + * @hw_features: User-changeable features + * + * @wanted_features: User-requested features + * @vlan_features: Mask of features inheritable by VLAN devices + * + * @hw_enc_features: Mask of features inherited by encapsulating devices + * This field indicates what encapsulation + * offloads the hardware is capable of doing, + * and drivers will need to set them appropriately. + * + * @mpls_features: Mask of features inheritable by MPLS + * + * @ifindex: interface index + * @group: The group the device belongs to + * + * @stats: Statistics struct, which was left as a legacy, use + * rtnl_link_stats64 instead + * + * @rx_dropped: Dropped packets by core network, + * do not use this in drivers + * @tx_dropped: Dropped packets by core network, + * do not use this in drivers + * @rx_nohandler: nohandler dropped packets by core network on + * inactive devices, do not use this in drivers + * @carrier_up_count: Number of times the carrier has been up + * @carrier_down_count: Number of times the carrier has been down + * + * @wireless_handlers: List of functions to handle Wireless Extensions, + * instead of ioctl, + * see for details. + * @wireless_data: Instance data managed by the core of wireless extensions + * + * @netdev_ops: Includes several pointers to callbacks, + * if one wants to override the ndo_*() functions + * @ethtool_ops: Management operations + * @ndisc_ops: Includes callbacks for different IPv6 neighbour + * discovery handling. Necessary for e.g. 6LoWPAN. + * @header_ops: Includes callbacks for creating,parsing,caching,etc + * of Layer 2 headers. + * + * @flags: Interface flags (a la BSD) + * @priv_flags: Like 'flags' but invisible to userspace, + * see if.h for the definitions + * @gflags: Global flags ( kept as legacy ) + * @padded: How much padding added by alloc_netdev() + * @operstate: RFC2863 operstate + * @link_mode: Mapping policy to operstate + * @if_port: Selectable AUI, TP, ... + * @dma: DMA channel + * @mtu: Interface MTU value + * @min_mtu: Interface Minimum MTU value + * @max_mtu: Interface Maximum MTU value + * @type: Interface hardware type + * @hard_header_len: Maximum hardware header length. + * @min_header_len: Minimum hardware header length + * + * @needed_headroom: Extra headroom the hardware may need, but not in all + * cases can this be guaranteed + * @needed_tailroom: Extra tailroom the hardware may need, but not in all + * cases can this be guaranteed. Some cases also use + * LL_MAX_HEADER instead to allocate the skb + * + * interface address info: + * + * @perm_addr: Permanent hw address + * @addr_assign_type: Hw address assignment type + * @addr_len: Hardware address length + * @upper_level: Maximum depth level of upper devices. + * @lower_level: Maximum depth level of lower devices. + * @neigh_priv_len: Used in neigh_alloc() + * @dev_id: Used to differentiate devices that share + * the same link layer address + * @dev_port: Used to differentiate devices that share + * the same function + * @addr_list_lock: XXX: need comments on this one + * @uc_promisc: Counter that indicates promiscuous mode + * has been enabled due to the need to listen to + * additional unicast addresses in a device that + * does not implement ndo_set_rx_mode() + * @uc: unicast mac addresses + * @mc: multicast mac addresses + * @dev_addrs: list of device hw addresses + * @queues_kset: Group of all Kobjects in the Tx and RX queues + * @promiscuity: Number of times the NIC is told to work in + * promiscuous mode; if it becomes 0 the NIC will + * exit promiscuous mode + * @allmulti: Counter, enables or disables allmulticast mode + * + * @vlan_info: VLAN info + * @dsa_ptr: dsa specific data + * @tipc_ptr: TIPC specific data + * @atalk_ptr: AppleTalk link + * @ip_ptr: IPv4 specific data + * @dn_ptr: DECnet specific data + * @ip6_ptr: IPv6 specific data + * @ax25_ptr: AX.25 specific data + * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering + * + * @dev_addr: Hw address (before bcast, + * because most packets are unicast) + * + * @_rx: Array of RX queues + * @num_rx_queues: Number of RX queues + * allocated at register_netdev() time + * @real_num_rx_queues: Number of RX queues currently active in device + * + * @rx_handler: handler for received packets + * @rx_handler_data: XXX: need comments on this one + * @miniq_ingress: ingress/clsact qdisc specific data for + * ingress processing + * @ingress_queue: XXX: need comments on this one + * @broadcast: hw bcast address + * + * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts, + * indexed by RX queue number. Assigned by driver. + * This must only be set if the ndo_rx_flow_steer + * operation is defined + * @index_hlist: Device index hash chain + * + * @_tx: Array of TX queues + * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time + * @real_num_tx_queues: Number of TX queues currently active in device + * @qdisc: Root qdisc from userspace point of view + * @tx_queue_len: Max frames per queue allowed + * @tx_global_lock: XXX: need comments on this one + * + * @xps_maps: XXX: need comments on this one + * @miniq_egress: clsact qdisc specific data for + * egress processing + * @watchdog_timeo: Represents the timeout that is used by + * the watchdog (see dev_watchdog()) + * @watchdog_timer: List of timers + * + * @pcpu_refcnt: Number of references to this device + * @todo_list: Delayed register/unregister + * @link_watch_list: XXX: need comments on this one + * + * @reg_state: Register/unregister state machine + * @dismantle: Device is going to be freed + * @rtnl_link_state: This enum represents the phases of creating + * a new link + * + * @needs_free_netdev: Should unregister perform free_netdev? + * @priv_destructor: Called from unregister + * @npinfo: XXX: need comments on this one + * @nd_net: Network namespace this network device is inside + * + * @ml_priv: Mid-layer private + * @lstats: Loopback statistics + * @tstats: Tunnel statistics + * @dstats: Dummy statistics + * @vstats: Virtual ethernet statistics + * + * @garp_port: GARP + * @mrp_port: MRP + * + * @dev: Class/net/name entry + * @sysfs_groups: Space for optional device, statistics and wireless + * sysfs groups + * + * @sysfs_rx_queue_group: Space for optional per-rx queue attributes + * @rtnl_link_ops: Rtnl_link_ops + * + * @gso_max_size: Maximum size of generic segmentation offload + * @gso_max_segs: Maximum number of segments that can be passed to the + * NIC for GSO + * + * @dcbnl_ops: Data Center Bridging netlink ops + * @num_tc: Number of traffic classes in the net device + * @tc_to_txq: XXX: need comments on this one + * @prio_tc_map: XXX: need comments on this one + * + * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp + * + * @priomap: XXX: need comments on this one + * @phydev: Physical device may attach itself + * for hardware timestamping + * @sfp_bus: attached &struct sfp_bus structure. + * + * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock + * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount + * + * @proto_down: protocol port state information can be sent to the + * switch driver and used to set the phys state of the + * switch port. + * + * @wol_enabled: Wake-on-LAN is enabled + * + * FIXME: cleanup struct net_device such that network protocol info + * moves out. + */ + +struct net_device { + char name[IFNAMSIZ]; + struct hlist_node name_hlist; + struct dev_ifalias __rcu *ifalias; + /* + * I/O specific fields + * FIXME: Merge these and struct ifmap into one + */ + unsigned long mem_end; + unsigned long mem_start; + unsigned long base_addr; + int irq; + + /* + * Some hardware also needs these fields (state,dev_list, + * napi_list,unreg_list,close_list) but they are not + * part of the usual set specified in Space.c. + */ + + unsigned long state; + + struct list_head dev_list; + struct list_head napi_list; + struct list_head unreg_list; + struct list_head close_list; + struct list_head ptype_all; + struct list_head ptype_specific; + + struct { + struct list_head upper; + struct list_head lower; + } adj_list; + + netdev_features_t features; + netdev_features_t hw_features; + netdev_features_t wanted_features; + netdev_features_t vlan_features; + netdev_features_t hw_enc_features; + netdev_features_t mpls_features; + netdev_features_t gso_partial_features; + + int ifindex; + int group; + + struct net_device_stats stats; + + atomic_long_t rx_dropped; + atomic_long_t tx_dropped; + atomic_long_t rx_nohandler; + + /* Stats to monitor link on/off, flapping */ + atomic_t carrier_up_count; + atomic_t carrier_down_count; + +#ifdef CONFIG_WIRELESS_EXT + const struct iw_handler_def *wireless_handlers; + struct iw_public_data *wireless_data; +#endif + const struct net_device_ops *netdev_ops; + const struct ethtool_ops *ethtool_ops; +#ifdef CONFIG_NET_SWITCHDEV + const struct switchdev_ops *switchdev_ops; +#endif +#ifdef CONFIG_NET_L3_MASTER_DEV + const struct l3mdev_ops *l3mdev_ops; +#endif +#if IS_ENABLED(CONFIG_IPV6) + const struct ndisc_ops *ndisc_ops; +#endif + +#ifdef CONFIG_XFRM_OFFLOAD + const struct xfrmdev_ops *xfrmdev_ops; +#endif + +#if IS_ENABLED(CONFIG_TLS_DEVICE) + const struct tlsdev_ops *tlsdev_ops; +#endif + + const struct header_ops *header_ops; + + unsigned int flags; + unsigned int priv_flags; + + unsigned short gflags; + unsigned short padded; + + unsigned char operstate; + unsigned char link_mode; + + unsigned char if_port; + unsigned char dma; + + /* Note : dev->mtu is often read without holding a lock. + * Writers usually hold RTNL. + * It is recommended to use READ_ONCE() to annotate the reads, + * and to use WRITE_ONCE() to annotate the writes. + */ + unsigned int mtu; + unsigned int min_mtu; + unsigned int max_mtu; + unsigned short type; + unsigned short hard_header_len; + unsigned char min_header_len; + + unsigned short needed_headroom; + unsigned short needed_tailroom; + + /* Interface address info. */ + unsigned char perm_addr[MAX_ADDR_LEN]; + unsigned char addr_assign_type; + unsigned char addr_len; + unsigned char upper_level; + unsigned char lower_level; + unsigned short neigh_priv_len; + unsigned short dev_id; + unsigned short dev_port; + spinlock_t addr_list_lock; + unsigned char name_assign_type; + bool uc_promisc; + struct netdev_hw_addr_list uc; + struct netdev_hw_addr_list mc; + struct netdev_hw_addr_list dev_addrs; + +#ifdef CONFIG_SYSFS + struct kset *queues_kset; +#endif + unsigned int promiscuity; + unsigned int allmulti; + + + /* Protocol-specific pointers */ + +#if IS_ENABLED(CONFIG_VLAN_8021Q) + struct vlan_info __rcu *vlan_info; +#endif +#if IS_ENABLED(CONFIG_NET_DSA) + struct dsa_port *dsa_ptr; +#endif +#if IS_ENABLED(CONFIG_TIPC) + struct tipc_bearer __rcu *tipc_ptr; +#endif +#if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK) + void *atalk_ptr; +#endif + struct in_device __rcu *ip_ptr; +#if IS_ENABLED(CONFIG_DECNET) + struct dn_dev __rcu *dn_ptr; +#endif + struct inet6_dev __rcu *ip6_ptr; +#if IS_ENABLED(CONFIG_AX25) + void *ax25_ptr; +#endif + struct wireless_dev *ieee80211_ptr; + struct wpan_dev *ieee802154_ptr; +#if IS_ENABLED(CONFIG_MPLS_ROUTING) + struct mpls_dev __rcu *mpls_ptr; +#endif + +/* + * Cache lines mostly used on receive path (including eth_type_trans()) + */ + /* Interface address info used in eth_type_trans() */ + unsigned char *dev_addr; + + struct netdev_rx_queue *_rx; + unsigned int num_rx_queues; + unsigned int real_num_rx_queues; + + struct bpf_prog __rcu *xdp_prog; + unsigned long gro_flush_timeout; + rx_handler_func_t __rcu *rx_handler; + void __rcu *rx_handler_data; + +#ifdef CONFIG_NET_CLS_ACT + struct mini_Qdisc __rcu *miniq_ingress; +#endif + struct netdev_queue __rcu *ingress_queue; +#ifdef CONFIG_NETFILTER_INGRESS + struct nf_hook_entries __rcu *nf_hooks_ingress; +#endif + + unsigned char broadcast[MAX_ADDR_LEN]; +#ifdef CONFIG_RFS_ACCEL + struct cpu_rmap *rx_cpu_rmap; +#endif + struct hlist_node index_hlist; + +/* + * Cache lines mostly used on transmit path + */ + struct netdev_queue *_tx ____cacheline_aligned_in_smp; + unsigned int num_tx_queues; + unsigned int real_num_tx_queues; + struct Qdisc *qdisc; +#ifdef CONFIG_NET_SCHED + DECLARE_HASHTABLE (qdisc_hash, 4); +#endif + unsigned int tx_queue_len; + spinlock_t tx_global_lock; + int watchdog_timeo; + +#ifdef CONFIG_XPS + struct xps_dev_maps __rcu *xps_cpus_map; + struct xps_dev_maps __rcu *xps_rxqs_map; +#endif +#ifdef CONFIG_NET_CLS_ACT + struct mini_Qdisc __rcu *miniq_egress; +#endif + + /* These may be needed for future network-power-down code. */ + struct timer_list watchdog_timer; + + int __percpu *pcpu_refcnt; + struct list_head todo_list; + + struct list_head link_watch_list; + + enum { NETREG_UNINITIALIZED=0, + NETREG_REGISTERED, /* completed register_netdevice */ + NETREG_UNREGISTERING, /* called unregister_netdevice */ + NETREG_UNREGISTERED, /* completed unregister todo */ + NETREG_RELEASED, /* called free_netdev */ + NETREG_DUMMY, /* dummy device for NAPI poll */ + } reg_state:8; + + bool dismantle; + + enum { + RTNL_LINK_INITIALIZED, + RTNL_LINK_INITIALIZING, + } rtnl_link_state:16; + + bool needs_free_netdev; + void (*priv_destructor)(struct net_device *dev); + +#ifdef CONFIG_NETPOLL + struct netpoll_info __rcu *npinfo; +#endif + + possible_net_t nd_net; + + /* mid-layer private */ + union { + void *ml_priv; + struct pcpu_lstats __percpu *lstats; + struct pcpu_sw_netstats __percpu *tstats; + struct pcpu_dstats __percpu *dstats; + struct pcpu_vstats __percpu *vstats; + }; + +#if IS_ENABLED(CONFIG_GARP) + struct garp_port __rcu *garp_port; +#endif +#if IS_ENABLED(CONFIG_MRP) + struct mrp_port __rcu *mrp_port; +#endif + + struct device dev; + const struct attribute_group *sysfs_groups[4]; + const struct attribute_group *sysfs_rx_queue_group; + + const struct rtnl_link_ops *rtnl_link_ops; + + /* for setting kernel sock attribute on TCP connection setup */ +#define GSO_MAX_SIZE 65536 + unsigned int gso_max_size; +#define GSO_MAX_SEGS 65535 + u16 gso_max_segs; + +#ifdef CONFIG_DCB + const struct dcbnl_rtnl_ops *dcbnl_ops; +#endif + s16 num_tc; + struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; + u8 prio_tc_map[TC_BITMASK + 1]; + +#if IS_ENABLED(CONFIG_FCOE) + unsigned int fcoe_ddp_xid; +#endif +#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) + struct netprio_map __rcu *priomap; +#endif + struct phy_device *phydev; + struct sfp_bus *sfp_bus; + struct lock_class_key *qdisc_tx_busylock; + struct lock_class_key *qdisc_running_key; + bool proto_down; + unsigned wol_enabled:1; +}; +#define to_net_dev(d) container_of(d, struct net_device, dev) + +static inline bool netif_elide_gro(const struct net_device *dev) +{ + if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog) + return true; + return false; +} + +#define NETDEV_ALIGN 32 + +static inline +int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio) +{ + return dev->prio_tc_map[prio & TC_BITMASK]; +} + +static inline +int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) +{ + if (tc >= dev->num_tc) + return -EINVAL; + + dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; + return 0; +} + +int netdev_txq_to_tc(struct net_device *dev, unsigned int txq); +void netdev_reset_tc(struct net_device *dev); +int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset); +int netdev_set_num_tc(struct net_device *dev, u8 num_tc); + +static inline +int netdev_get_num_tc(struct net_device *dev) +{ + return dev->num_tc; +} + +void netdev_unbind_sb_channel(struct net_device *dev, + struct net_device *sb_dev); +int netdev_bind_sb_channel_queue(struct net_device *dev, + struct net_device *sb_dev, + u8 tc, u16 count, u16 offset); +int netdev_set_sb_channel(struct net_device *dev, u16 channel); +static inline int netdev_get_sb_channel(struct net_device *dev) +{ + return max_t(int, -dev->num_tc, 0); +} + +static inline +struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, + unsigned int index) +{ + return &dev->_tx[index]; +} + +static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev, + const struct sk_buff *skb) +{ + return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); +} + +static inline void netdev_for_each_tx_queue(struct net_device *dev, + void (*f)(struct net_device *, + struct netdev_queue *, + void *), + void *arg) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) + f(dev, &dev->_tx[i], arg); +} + +#define netdev_lockdep_set_classes(dev) \ +{ \ + static struct lock_class_key qdisc_tx_busylock_key; \ + static struct lock_class_key qdisc_running_key; \ + static struct lock_class_key qdisc_xmit_lock_key; \ + static struct lock_class_key dev_addr_list_lock_key; \ + unsigned int i; \ + \ + (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ + (dev)->qdisc_running_key = &qdisc_running_key; \ + lockdep_set_class(&(dev)->addr_list_lock, \ + &dev_addr_list_lock_key); \ + for (i = 0; i < (dev)->num_tx_queues; i++) \ + lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \ + &qdisc_xmit_lock_key); \ +} + +struct netdev_queue *netdev_pick_tx(struct net_device *dev, + struct sk_buff *skb, + struct net_device *sb_dev); + +/* returns the headroom that the master device needs to take in account + * when forwarding to this dev + */ +static inline unsigned netdev_get_fwd_headroom(struct net_device *dev) +{ + return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom; +} + +static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr) +{ + if (dev->netdev_ops->ndo_set_rx_headroom) + dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr); +} + +/* set the device rx headroom to the dev's default */ +static inline void netdev_reset_rx_headroom(struct net_device *dev) +{ + netdev_set_rx_headroom(dev, -1); +} + +/* + * Net namespace inlines + */ +static inline +struct net *dev_net(const struct net_device *dev) +{ + return read_pnet(&dev->nd_net); +} + +static inline +void dev_net_set(struct net_device *dev, struct net *net) +{ + write_pnet(&dev->nd_net, net); +} + +/** + * netdev_priv - access network device private data + * @dev: network device + * + * Get network device private data + */ +static inline void *netdev_priv(const struct net_device *dev) +{ + return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN); +} + +/* Set the sysfs physical device reference for the network logical device + * if set prior to registration will cause a symlink during initialization. + */ +#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) + +/* Set the sysfs device type for the network logical device to allow + * fine-grained identification of different network device types. For + * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc. + */ +#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) + +/* Default NAPI poll() weight + * Device drivers are strongly advised to not use bigger value + */ +#define NAPI_POLL_WEIGHT 64 + +/** + * netif_napi_add - initialize a NAPI context + * @dev: network device + * @napi: NAPI context + * @poll: polling function + * @weight: default weight + * + * netif_napi_add() must be used to initialize a NAPI context prior to calling + * *any* of the other NAPI-related functions. + */ +void netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight); + +/** + * netif_tx_napi_add - initialize a NAPI context + * @dev: network device + * @napi: NAPI context + * @poll: polling function + * @weight: default weight + * + * This variant of netif_napi_add() should be used from drivers using NAPI + * to exclusively poll a TX queue. + * This will avoid we add it into napi_hash[], thus polluting this hash table. + */ +static inline void netif_tx_napi_add(struct net_device *dev, + struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), + int weight) +{ + set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state); + netif_napi_add(dev, napi, poll, weight); +} + +/** + * netif_napi_del - remove a NAPI context + * @napi: NAPI context + * + * netif_napi_del() removes a NAPI context from the network device NAPI list + */ +void netif_napi_del(struct napi_struct *napi); + +struct napi_gro_cb { + /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */ + void *frag0; + + /* Length of frag0. */ + unsigned int frag0_len; + + /* This indicates where we are processing relative to skb->data. */ + int data_offset; + + /* This is non-zero if the packet cannot be merged with the new skb. */ + u16 flush; + + /* Save the IP ID here and check when we get to the transport layer */ + u16 flush_id; + + /* Number of segments aggregated. */ + u16 count; + + /* Start offset for remote checksum offload */ + u16 gro_remcsum_start; + + /* jiffies when first packet was created/queued */ + unsigned long age; + + /* Used in ipv6_gro_receive() and foo-over-udp */ + u16 proto; + + /* This is non-zero if the packet may be of the same flow. */ + u8 same_flow:1; + + /* Used in tunnel GRO receive */ + u8 encap_mark:1; + + /* GRO checksum is valid */ + u8 csum_valid:1; + + /* Number of checksums via CHECKSUM_UNNECESSARY */ + u8 csum_cnt:3; + + /* Free the skb? */ + u8 free:2; +#define NAPI_GRO_FREE 1 +#define NAPI_GRO_FREE_STOLEN_HEAD 2 + + /* Used in foo-over-udp, set in udp[46]_gro_receive */ + u8 is_ipv6:1; + + /* Used in GRE, set in fou/gue_gro_receive */ + u8 is_fou:1; + + /* Used to determine if flush_id can be ignored */ + u8 is_atomic:1; + + /* Number of gro_receive callbacks this packet already went through */ + u8 recursion_counter:4; + + /* 1 bit hole */ + + /* used to support CHECKSUM_COMPLETE for tunneling protocols */ + __wsum csum; + + /* used in skb_gro_receive() slow path */ + struct sk_buff *last; +}; + +#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) + +#define GRO_RECURSION_LIMIT 15 +static inline int gro_recursion_inc_test(struct sk_buff *skb) +{ + return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT; +} + +typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *); +static inline struct sk_buff *call_gro_receive(gro_receive_t cb, + struct list_head *head, + struct sk_buff *skb) +{ + if (unlikely(gro_recursion_inc_test(skb))) { + NAPI_GRO_CB(skb)->flush |= 1; + return NULL; + } + + return cb(head, skb); +} + +typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *, + struct sk_buff *); +static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb, + struct sock *sk, + struct list_head *head, + struct sk_buff *skb) +{ + if (unlikely(gro_recursion_inc_test(skb))) { + NAPI_GRO_CB(skb)->flush |= 1; + return NULL; + } + + return cb(sk, head, skb); +} + +struct packet_type { + __be16 type; /* This is really htons(ether_type). */ + struct net_device *dev; /* NULL is wildcarded here */ + int (*func) (struct sk_buff *, + struct net_device *, + struct packet_type *, + struct net_device *); + void (*list_func) (struct list_head *, + struct packet_type *, + struct net_device *); + bool (*id_match)(struct packet_type *ptype, + struct sock *sk); + struct net *af_packet_net; + void *af_packet_priv; + struct list_head list; +}; + +struct offload_callbacks { + struct sk_buff *(*gso_segment)(struct sk_buff *skb, + netdev_features_t features); + struct sk_buff *(*gro_receive)(struct list_head *head, + struct sk_buff *skb); + int (*gro_complete)(struct sk_buff *skb, int nhoff); +}; + +struct packet_offload { + __be16 type; /* This is really htons(ether_type). */ + u16 priority; + struct offload_callbacks callbacks; + struct list_head list; +}; + +/* often modified stats are per-CPU, other are shared (netdev->stats) */ +struct pcpu_sw_netstats { + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + struct u64_stats_sync syncp; +}; + +#define __netdev_alloc_pcpu_stats(type, gfp) \ +({ \ + typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\ + if (pcpu_stats) { \ + int __cpu; \ + for_each_possible_cpu(__cpu) { \ + typeof(type) *stat; \ + stat = per_cpu_ptr(pcpu_stats, __cpu); \ + u64_stats_init(&stat->syncp); \ + } \ + } \ + pcpu_stats; \ +}) + +#define netdev_alloc_pcpu_stats(type) \ + __netdev_alloc_pcpu_stats(type, GFP_KERNEL) + +enum netdev_lag_tx_type { + NETDEV_LAG_TX_TYPE_UNKNOWN, + NETDEV_LAG_TX_TYPE_RANDOM, + NETDEV_LAG_TX_TYPE_BROADCAST, + NETDEV_LAG_TX_TYPE_ROUNDROBIN, + NETDEV_LAG_TX_TYPE_ACTIVEBACKUP, + NETDEV_LAG_TX_TYPE_HASH, +}; + +enum netdev_lag_hash { + NETDEV_LAG_HASH_NONE, + NETDEV_LAG_HASH_L2, + NETDEV_LAG_HASH_L34, + NETDEV_LAG_HASH_L23, + NETDEV_LAG_HASH_E23, + NETDEV_LAG_HASH_E34, + NETDEV_LAG_HASH_UNKNOWN, +}; + +struct netdev_lag_upper_info { + enum netdev_lag_tx_type tx_type; + enum netdev_lag_hash hash_type; +}; + +struct netdev_lag_lower_state_info { + u8 link_up : 1, + tx_enabled : 1; +}; + +#include + +/* netdevice notifier chain. Please remember to update netdev_cmd_to_name() + * and the rtnetlink notification exclusion list in rtnetlink_event() when + * adding new types. + */ +enum netdev_cmd { + NETDEV_UP = 1, /* For now you can't veto a device up/down */ + NETDEV_DOWN, + NETDEV_REBOOT, /* Tell a protocol stack a network interface + detected a hardware crash and restarted + - we can use this eg to kick tcp sessions + once done */ + NETDEV_CHANGE, /* Notify device state change */ + NETDEV_REGISTER, + NETDEV_UNREGISTER, + NETDEV_CHANGEMTU, /* notify after mtu change happened */ + NETDEV_CHANGEADDR, + NETDEV_GOING_DOWN, + NETDEV_CHANGENAME, + NETDEV_FEAT_CHANGE, + NETDEV_BONDING_FAILOVER, + NETDEV_PRE_UP, + NETDEV_PRE_TYPE_CHANGE, + NETDEV_POST_TYPE_CHANGE, + NETDEV_POST_INIT, + NETDEV_RELEASE, + NETDEV_NOTIFY_PEERS, + NETDEV_JOIN, + NETDEV_CHANGEUPPER, + NETDEV_RESEND_IGMP, + NETDEV_PRECHANGEMTU, /* notify before mtu change happened */ + NETDEV_CHANGEINFODATA, + NETDEV_BONDING_INFO, + NETDEV_PRECHANGEUPPER, + NETDEV_CHANGELOWERSTATE, + NETDEV_UDP_TUNNEL_PUSH_INFO, + NETDEV_UDP_TUNNEL_DROP_INFO, + NETDEV_CHANGE_TX_QUEUE_LEN, + NETDEV_CVLAN_FILTER_PUSH_INFO, + NETDEV_CVLAN_FILTER_DROP_INFO, + NETDEV_SVLAN_FILTER_PUSH_INFO, + NETDEV_SVLAN_FILTER_DROP_INFO, +}; +const char *netdev_cmd_to_name(enum netdev_cmd cmd); + +int register_netdevice_notifier(struct notifier_block *nb); +int unregister_netdevice_notifier(struct notifier_block *nb); + +struct netdev_notifier_info { + struct net_device *dev; + struct netlink_ext_ack *extack; +}; + +struct netdev_notifier_info_ext { + struct netdev_notifier_info info; /* must be first */ + union { + u32 mtu; + } ext; +}; + +struct netdev_notifier_change_info { + struct netdev_notifier_info info; /* must be first */ + unsigned int flags_changed; +}; + +struct netdev_notifier_changeupper_info { + struct netdev_notifier_info info; /* must be first */ + struct net_device *upper_dev; /* new upper dev */ + bool master; /* is upper dev master */ + bool linking; /* is the notification for link or unlink */ + void *upper_info; /* upper dev info */ +}; + +struct netdev_notifier_changelowerstate_info { + struct netdev_notifier_info info; /* must be first */ + void *lower_state_info; /* is lower dev state */ +}; + +static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, + struct net_device *dev) +{ + info->dev = dev; + info->extack = NULL; +} + +static inline struct net_device * +netdev_notifier_info_to_dev(const struct netdev_notifier_info *info) +{ + return info->dev; +} + +static inline struct netlink_ext_ack * +netdev_notifier_info_to_extack(const struct netdev_notifier_info *info) +{ + return info->extack; +} + +int call_netdevice_notifiers(unsigned long val, struct net_device *dev); + + +extern rwlock_t dev_base_lock; /* Device list lock */ + +#define for_each_netdev(net, d) \ + list_for_each_entry(d, &(net)->dev_base_head, dev_list) +#define for_each_netdev_reverse(net, d) \ + list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list) +#define for_each_netdev_rcu(net, d) \ + list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list) +#define for_each_netdev_safe(net, d, n) \ + list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) +#define for_each_netdev_continue(net, d) \ + list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) +#define for_each_netdev_continue_rcu(net, d) \ + list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) +#define for_each_netdev_in_bond_rcu(bond, slave) \ + for_each_netdev_rcu(&init_net, slave) \ + if (netdev_master_upper_dev_get_rcu(slave) == (bond)) +#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) + +static inline struct net_device *next_net_device(struct net_device *dev) +{ + struct list_head *lh; + struct net *net; + + net = dev_net(dev); + lh = dev->dev_list.next; + return lh == &net->dev_base_head ? NULL : net_device_entry(lh); +} + +static inline struct net_device *next_net_device_rcu(struct net_device *dev) +{ + struct list_head *lh; + struct net *net; + + net = dev_net(dev); + lh = rcu_dereference(list_next_rcu(&dev->dev_list)); + return lh == &net->dev_base_head ? NULL : net_device_entry(lh); +} + +static inline struct net_device *first_net_device(struct net *net) +{ + return list_empty(&net->dev_base_head) ? NULL : + net_device_entry(net->dev_base_head.next); +} + +static inline struct net_device *first_net_device_rcu(struct net *net) +{ + struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); + + return lh == &net->dev_base_head ? NULL : net_device_entry(lh); +} + +int netdev_boot_setup_check(struct net_device *dev); +unsigned long netdev_boot_base(const char *prefix, int unit); +struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, + const char *hwaddr); +struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); +struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type); +void dev_add_pack(struct packet_type *pt); +void dev_remove_pack(struct packet_type *pt); +void __dev_remove_pack(struct packet_type *pt); +void dev_add_offload(struct packet_offload *po); +void dev_remove_offload(struct packet_offload *po); + +int dev_get_iflink(const struct net_device *dev); +int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb); +struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, + unsigned short mask); +struct net_device *dev_get_by_name(struct net *net, const char *name); +struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); +struct net_device *__dev_get_by_name(struct net *net, const char *name); +int dev_alloc_name(struct net_device *dev, const char *name); +int dev_open(struct net_device *dev); +void dev_close(struct net_device *dev); +void dev_close_many(struct list_head *head, bool unlink); +void dev_disable_lro(struct net_device *dev); +int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); +u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback); +u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback); +int dev_queue_xmit(struct sk_buff *skb); +int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev); +int dev_direct_xmit(struct sk_buff *skb, u16 queue_id); +int register_netdevice(struct net_device *dev); +void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); +void unregister_netdevice_many(struct list_head *head); +static inline void unregister_netdevice(struct net_device *dev) +{ + unregister_netdevice_queue(dev, NULL); +} + +int netdev_refcnt_read(const struct net_device *dev); +void free_netdev(struct net_device *dev); +void netdev_freemem(struct net_device *dev); +void synchronize_net(void); +int init_dummy_netdev(struct net_device *dev); + +struct net_device *dev_get_by_index(struct net *net, int ifindex); +struct net_device *__dev_get_by_index(struct net *net, int ifindex); +struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); +struct net_device *dev_get_by_napi_id(unsigned int napi_id); +int netdev_get_name(struct net *net, char *name, int ifindex); +int dev_restart(struct net_device *dev); +int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb); + +static inline unsigned int skb_gro_offset(const struct sk_buff *skb) +{ + return NAPI_GRO_CB(skb)->data_offset; +} + +static inline unsigned int skb_gro_len(const struct sk_buff *skb) +{ + return skb->len - NAPI_GRO_CB(skb)->data_offset; +} + +static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len) +{ + NAPI_GRO_CB(skb)->data_offset += len; +} + +static inline void *skb_gro_header_fast(struct sk_buff *skb, + unsigned int offset) +{ + return NAPI_GRO_CB(skb)->frag0 + offset; +} + +static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen) +{ + return NAPI_GRO_CB(skb)->frag0_len < hlen; +} + +static inline void skb_gro_frag0_invalidate(struct sk_buff *skb) +{ + NAPI_GRO_CB(skb)->frag0 = NULL; + NAPI_GRO_CB(skb)->frag0_len = 0; +} + +static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen, + unsigned int offset) +{ + if (!pskb_may_pull(skb, hlen)) + return NULL; + + skb_gro_frag0_invalidate(skb); + return skb->data + offset; +} + +static inline void *skb_gro_network_header(struct sk_buff *skb) +{ + return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) + + skb_network_offset(skb); +} + +static inline void skb_gro_postpull_rcsum(struct sk_buff *skb, + const void *start, unsigned int len) +{ + if (NAPI_GRO_CB(skb)->csum_valid) + NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum, + csum_partial(start, len, 0)); +} + +/* GRO checksum functions. These are logical equivalents of the normal + * checksum functions (in skbuff.h) except that they operate on the GRO + * offsets and fields in sk_buff. + */ + +__sum16 __skb_gro_checksum_complete(struct sk_buff *skb); + +static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb) +{ + return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb)); +} + +static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb, + bool zero_okay, + __sum16 check) +{ + return ((skb->ip_summed != CHECKSUM_PARTIAL || + skb_checksum_start_offset(skb) < + skb_gro_offset(skb)) && + !skb_at_gro_remcsum_start(skb) && + NAPI_GRO_CB(skb)->csum_cnt == 0 && + (!zero_okay || check)); +} + +static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb, + __wsum psum) +{ + if (NAPI_GRO_CB(skb)->csum_valid && + !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum))) + return 0; + + NAPI_GRO_CB(skb)->csum = psum; + + return __skb_gro_checksum_complete(skb); +} + +static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb) +{ + if (NAPI_GRO_CB(skb)->csum_cnt > 0) { + /* Consume a checksum from CHECKSUM_UNNECESSARY */ + NAPI_GRO_CB(skb)->csum_cnt--; + } else { + /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we + * verified a new top level checksum or an encapsulated one + * during GRO. This saves work if we fallback to normal path. + */ + __skb_incr_checksum_unnecessary(skb); + } +} + +#define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \ + compute_pseudo) \ +({ \ + __sum16 __ret = 0; \ + if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \ + __ret = __skb_gro_checksum_validate_complete(skb, \ + compute_pseudo(skb, proto)); \ + if (!__ret) \ + skb_gro_incr_csum_unnecessary(skb); \ + __ret; \ +}) + +#define skb_gro_checksum_validate(skb, proto, compute_pseudo) \ + __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo) + +#define skb_gro_checksum_validate_zero_check(skb, proto, check, \ + compute_pseudo) \ + __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo) + +#define skb_gro_checksum_simple_validate(skb) \ + __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo) + +static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb) +{ + return (NAPI_GRO_CB(skb)->csum_cnt == 0 && + !NAPI_GRO_CB(skb)->csum_valid); +} + +static inline void __skb_gro_checksum_convert(struct sk_buff *skb, + __sum16 check, __wsum pseudo) +{ + NAPI_GRO_CB(skb)->csum = ~pseudo; + NAPI_GRO_CB(skb)->csum_valid = 1; +} + +#define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \ +do { \ + if (__skb_gro_checksum_convert_check(skb)) \ + __skb_gro_checksum_convert(skb, check, \ + compute_pseudo(skb, proto)); \ +} while (0) + +struct gro_remcsum { + int offset; + __wsum delta; +}; + +static inline void skb_gro_remcsum_init(struct gro_remcsum *grc) +{ + grc->offset = 0; + grc->delta = 0; +} + +static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr, + unsigned int off, size_t hdrlen, + int start, int offset, + struct gro_remcsum *grc, + bool nopartial) +{ + __wsum delta; + size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start); + + BUG_ON(!NAPI_GRO_CB(skb)->csum_valid); + + if (!nopartial) { + NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start; + return ptr; + } + + ptr = skb_gro_header_fast(skb, off); + if (skb_gro_header_hard(skb, off + plen)) { + ptr = skb_gro_header_slow(skb, off + plen, off); + if (!ptr) + return NULL; + } + + delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum, + start, offset); + + /* Adjust skb->csum since we changed the packet */ + NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta); + + grc->offset = off + hdrlen + offset; + grc->delta = delta; + + return ptr; +} + +static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb, + struct gro_remcsum *grc) +{ + void *ptr; + size_t plen = grc->offset + sizeof(u16); + + if (!grc->delta) + return; + + ptr = skb_gro_header_fast(skb, grc->offset); + if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) { + ptr = skb_gro_header_slow(skb, plen, grc->offset); + if (!ptr) + return; + } + + remcsum_unadjust((__sum16 *)ptr, grc->delta); +} + +#ifdef CONFIG_XFRM_OFFLOAD +static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush) +{ + if (PTR_ERR(pp) != -EINPROGRESS) + NAPI_GRO_CB(skb)->flush |= flush; +} +static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, + struct sk_buff *pp, + int flush, + struct gro_remcsum *grc) +{ + if (PTR_ERR(pp) != -EINPROGRESS) { + NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_remcsum_cleanup(skb, grc); + skb->remcsum_offload = 0; + } +} +#else +static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush) +{ + NAPI_GRO_CB(skb)->flush |= flush; +} +static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, + struct sk_buff *pp, + int flush, + struct gro_remcsum *grc) +{ + NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_remcsum_cleanup(skb, grc); + skb->remcsum_offload = 0; +} +#endif + +static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, + unsigned short type, + const void *daddr, const void *saddr, + unsigned int len) +{ + if (!dev->header_ops || !dev->header_ops->create) + return 0; + + return dev->header_ops->create(skb, dev, type, daddr, saddr, len); +} + +static inline int dev_parse_header(const struct sk_buff *skb, + unsigned char *haddr) +{ + const struct net_device *dev = skb->dev; + + if (!dev->header_ops || !dev->header_ops->parse) + return 0; + return dev->header_ops->parse(skb, haddr); +} + +static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb) +{ + const struct net_device *dev = skb->dev; + + if (!dev->header_ops || !dev->header_ops->parse_protocol) + return 0; + return dev->header_ops->parse_protocol(skb); +} + +/* ll_header must have at least hard_header_len allocated */ +static inline bool dev_validate_header(const struct net_device *dev, + char *ll_header, int len) +{ + if (likely(len >= dev->hard_header_len)) + return true; + if (len < dev->min_header_len) + return false; + + if (capable(CAP_SYS_RAWIO)) { + memset(ll_header + len, 0, dev->hard_header_len - len); + return true; + } + + if (dev->header_ops && dev->header_ops->validate) + return dev->header_ops->validate(ll_header, len); + + return false; +} + +typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, + int len, int size); +int register_gifconf(unsigned int family, gifconf_func_t *gifconf); +static inline int unregister_gifconf(unsigned int family) +{ + return register_gifconf(family, NULL); +} + +#ifdef CONFIG_NET_FLOW_LIMIT +#define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */ +struct sd_flow_limit { + u64 count; + unsigned int num_buckets; + unsigned int history_head; + u16 history[FLOW_LIMIT_HISTORY]; + u8 buckets[]; +}; + +extern int netdev_flow_limit_table_len; +#endif /* CONFIG_NET_FLOW_LIMIT */ + +/* + * Incoming packets are placed on per-CPU queues + */ +struct softnet_data { + struct list_head poll_list; + struct sk_buff_head process_queue; + + /* stats */ + unsigned int processed; + unsigned int time_squeeze; + unsigned int received_rps; +#ifdef CONFIG_RPS + struct softnet_data *rps_ipi_list; +#endif +#ifdef CONFIG_NET_FLOW_LIMIT + struct sd_flow_limit __rcu *flow_limit; +#endif + struct Qdisc *output_queue; + struct Qdisc **output_queue_tailp; + struct sk_buff *completion_queue; +#ifdef CONFIG_XFRM_OFFLOAD + struct sk_buff_head xfrm_backlog; +#endif + /* written and read only by owning cpu: */ + struct { + u16 recursion; + u8 more; + } xmit; +#ifdef CONFIG_RPS + /* input_queue_head should be written by cpu owning this struct, + * and only read by other cpus. Worth using a cache line. + */ + unsigned int input_queue_head ____cacheline_aligned_in_smp; + + /* Elements below can be accessed between CPUs for RPS/RFS */ + call_single_data_t csd ____cacheline_aligned_in_smp; + struct softnet_data *rps_ipi_next; + unsigned int cpu; + unsigned int input_queue_tail; +#endif + unsigned int dropped; + struct sk_buff_head input_pkt_queue; + struct napi_struct backlog; + +}; + +static inline void input_queue_head_incr(struct softnet_data *sd) +{ +#ifdef CONFIG_RPS + sd->input_queue_head++; +#endif +} + +static inline void input_queue_tail_incr_save(struct softnet_data *sd, + unsigned int *qtail) +{ +#ifdef CONFIG_RPS + *qtail = ++sd->input_queue_tail; +#endif +} + +DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); + +static inline int dev_recursion_level(void) +{ + return this_cpu_read(softnet_data.xmit.recursion); +} + +#define XMIT_RECURSION_LIMIT 8 +static inline bool dev_xmit_recursion(void) +{ + return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > + XMIT_RECURSION_LIMIT); +} + +static inline void dev_xmit_recursion_inc(void) +{ + __this_cpu_inc(softnet_data.xmit.recursion); +} + +static inline void dev_xmit_recursion_dec(void) +{ + __this_cpu_dec(softnet_data.xmit.recursion); +} + +void __netif_schedule(struct Qdisc *q); +void netif_schedule_queue(struct netdev_queue *txq); + +static inline void netif_tx_schedule_all(struct net_device *dev) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) + netif_schedule_queue(netdev_get_tx_queue(dev, i)); +} + +static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue) +{ + clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); +} + +/** + * netif_start_queue - allow transmit + * @dev: network device + * + * Allow upper layers to call the device hard_start_xmit routine. + */ +static inline void netif_start_queue(struct net_device *dev) +{ + netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); +} + +static inline void netif_tx_start_all_queues(struct net_device *dev) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + netif_tx_start_queue(txq); + } +} + +void netif_tx_wake_queue(struct netdev_queue *dev_queue); + +/** + * netif_wake_queue - restart transmit + * @dev: network device + * + * Allow upper layers to call the device hard_start_xmit routine. + * Used for flow control when transmit resources are available. + */ +static inline void netif_wake_queue(struct net_device *dev) +{ + netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); +} + +static inline void netif_tx_wake_all_queues(struct net_device *dev) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + netif_tx_wake_queue(txq); + } +} + +static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) +{ + set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); +} + +/** + * netif_stop_queue - stop transmitted packets + * @dev: network device + * + * Stop upper layers calling the device hard_start_xmit routine. + * Used for flow control when transmit resources are unavailable. + */ +static inline void netif_stop_queue(struct net_device *dev) +{ + netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); +} + +void netif_tx_stop_all_queues(struct net_device *dev); + +static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) +{ + return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); +} + +/** + * netif_queue_stopped - test if transmit queue is flowblocked + * @dev: network device + * + * Test if transmit queue on device is currently unable to send. + */ +static inline bool netif_queue_stopped(const struct net_device *dev) +{ + return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); +} + +static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue) +{ + return dev_queue->state & QUEUE_STATE_ANY_XOFF; +} + +static inline bool +netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue) +{ + return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; +} + +static inline bool +netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue) +{ + return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN; +} + +/** + * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write + * @dev_queue: pointer to transmit queue + * + * BQL enabled drivers might use this helper in their ndo_start_xmit(), + * to give appropriate hint to the CPU. + */ +static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue) +{ +#ifdef CONFIG_BQL + prefetchw(&dev_queue->dql.num_queued); +#endif +} + +/** + * netdev_txq_bql_complete_prefetchw - prefetch bql data for write + * @dev_queue: pointer to transmit queue + * + * BQL enabled drivers might use this helper in their TX completion path, + * to give appropriate hint to the CPU. + */ +static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue) +{ +#ifdef CONFIG_BQL + prefetchw(&dev_queue->dql.limit); +#endif +} + +static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, + unsigned int bytes) +{ +#ifdef CONFIG_BQL + dql_queued(&dev_queue->dql, bytes); + + if (likely(dql_avail(&dev_queue->dql) >= 0)) + return; + + set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); + + /* + * The XOFF flag must be set before checking the dql_avail below, + * because in netdev_tx_completed_queue we update the dql_completed + * before checking the XOFF flag. + */ + smp_mb(); + + /* check again in case another CPU has just made room avail */ + if (unlikely(dql_avail(&dev_queue->dql) >= 0)) + clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); +#endif +} + +/** + * netdev_sent_queue - report the number of bytes queued to hardware + * @dev: network device + * @bytes: number of bytes queued to the hardware device queue + * + * Report the number of bytes queued for sending/completion to the network + * device hardware queue. @bytes should be a good approximation and should + * exactly match netdev_completed_queue() @bytes + */ +static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) +{ + netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); +} + +static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, + unsigned int pkts, unsigned int bytes) +{ +#ifdef CONFIG_BQL + if (unlikely(!bytes)) + return; + + dql_completed(&dev_queue->dql, bytes); + + /* + * Without the memory barrier there is a small possiblity that + * netdev_tx_sent_queue will miss the update and cause the queue to + * be stopped forever + */ + smp_mb(); + + if (dql_avail(&dev_queue->dql) < 0) + return; + + if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) + netif_schedule_queue(dev_queue); +#endif +} + +/** + * netdev_completed_queue - report bytes and packets completed by device + * @dev: network device + * @pkts: actual number of packets sent over the medium + * @bytes: actual number of bytes sent over the medium + * + * Report the number of bytes and packets transmitted by the network device + * hardware queue over the physical medium, @bytes must exactly match the + * @bytes amount passed to netdev_sent_queue() + */ +static inline void netdev_completed_queue(struct net_device *dev, + unsigned int pkts, unsigned int bytes) +{ + netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes); +} + +static inline void netdev_tx_reset_queue(struct netdev_queue *q) +{ +#ifdef CONFIG_BQL + clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state); + dql_reset(&q->dql); +#endif +} + +/** + * netdev_reset_queue - reset the packets and bytes count of a network device + * @dev_queue: network device + * + * Reset the bytes and packet count of a network device and clear the + * software flow control OFF bit for this network device + */ +static inline void netdev_reset_queue(struct net_device *dev_queue) +{ + netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0)); +} + +/** + * netdev_cap_txqueue - check if selected tx queue exceeds device queues + * @dev: network device + * @queue_index: given tx queue index + * + * Returns 0 if given tx queue index >= number of device tx queues, + * otherwise returns the originally passed tx queue index. + */ +static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index) +{ + if (unlikely(queue_index >= dev->real_num_tx_queues)) { + net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", + dev->name, queue_index, + dev->real_num_tx_queues); + return 0; + } + + return queue_index; +} + +/** + * netif_running - test if up + * @dev: network device + * + * Test if the device has been brought up. + */ +static inline bool netif_running(const struct net_device *dev) +{ + return test_bit(__LINK_STATE_START, &dev->state); +} + +/* + * Routines to manage the subqueues on a device. We only need start, + * stop, and a check if it's stopped. All other device management is + * done at the overall netdevice level. + * Also test the device if we're multiqueue. + */ + +/** + * netif_start_subqueue - allow sending packets on subqueue + * @dev: network device + * @queue_index: sub queue index + * + * Start individual transmit queue of a device with multiple transmit queues. + */ +static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) +{ + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); + + netif_tx_start_queue(txq); +} + +/** + * netif_stop_subqueue - stop sending packets on subqueue + * @dev: network device + * @queue_index: sub queue index + * + * Stop individual transmit queue of a device with multiple transmit queues. + */ +static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) +{ + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); + netif_tx_stop_queue(txq); +} + +/** + * netif_subqueue_stopped - test status of subqueue + * @dev: network device + * @queue_index: sub queue index + * + * Check individual transmit queue of a device with multiple transmit queues. + */ +static inline bool __netif_subqueue_stopped(const struct net_device *dev, + u16 queue_index) +{ + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); + + return netif_tx_queue_stopped(txq); +} + +static inline bool netif_subqueue_stopped(const struct net_device *dev, + struct sk_buff *skb) +{ + return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); +} + +/** + * netif_wake_subqueue - allow sending packets on subqueue + * @dev: network device + * @queue_index: sub queue index + * + * Resume individual transmit queue of a device with multiple transmit queues. + */ +static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) +{ + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); + + netif_tx_wake_queue(txq); +} + +#ifdef CONFIG_XPS +int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, + u16 index); +int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, + u16 index, bool is_rxqs_map); + +/** + * netif_attr_test_mask - Test a CPU or Rx queue set in a mask + * @j: CPU/Rx queue index + * @mask: bitmask of all cpus/rx queues + * @nr_bits: number of bits in the bitmask + * + * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues. + */ +static inline bool netif_attr_test_mask(unsigned long j, + const unsigned long *mask, + unsigned int nr_bits) +{ + cpu_max_bits_warn(j, nr_bits); + return test_bit(j, mask); +} + +/** + * netif_attr_test_online - Test for online CPU/Rx queue + * @j: CPU/Rx queue index + * @online_mask: bitmask for CPUs/Rx queues that are online + * @nr_bits: number of bits in the bitmask + * + * Returns true if a CPU/Rx queue is online. + */ +static inline bool netif_attr_test_online(unsigned long j, + const unsigned long *online_mask, + unsigned int nr_bits) +{ + cpu_max_bits_warn(j, nr_bits); + + if (online_mask) + return test_bit(j, online_mask); + + return (j < nr_bits); +} + +/** + * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask + * @n: CPU/Rx queue index + * @srcp: the cpumask/Rx queue mask pointer + * @nr_bits: number of bits in the bitmask + * + * Returns >= nr_bits if no further CPUs/Rx queues set. + */ +static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp, + unsigned int nr_bits) +{ + /* -1 is a legal arg here. */ + if (n != -1) + cpu_max_bits_warn(n, nr_bits); + + if (srcp) + return find_next_bit(srcp, nr_bits, n + 1); + + return n + 1; +} + +/** + * netif_attrmask_next_and - get the next CPU/Rx queue in *src1p & *src2p + * @n: CPU/Rx queue index + * @src1p: the first CPUs/Rx queues mask pointer + * @src2p: the second CPUs/Rx queues mask pointer + * @nr_bits: number of bits in the bitmask + * + * Returns >= nr_bits if no further CPUs/Rx queues set in both. + */ +static inline int netif_attrmask_next_and(int n, const unsigned long *src1p, + const unsigned long *src2p, + unsigned int nr_bits) +{ + /* -1 is a legal arg here. */ + if (n != -1) + cpu_max_bits_warn(n, nr_bits); + + if (src1p && src2p) + return find_next_and_bit(src1p, src2p, nr_bits, n + 1); + else if (src1p) + return find_next_bit(src1p, nr_bits, n + 1); + else if (src2p) + return find_next_bit(src2p, nr_bits, n + 1); + + return n + 1; +} +#else +static inline int netif_set_xps_queue(struct net_device *dev, + const struct cpumask *mask, + u16 index) +{ + return 0; +} + +static inline int __netif_set_xps_queue(struct net_device *dev, + const unsigned long *mask, + u16 index, bool is_rxqs_map) +{ + return 0; +} +#endif + +/** + * netif_is_multiqueue - test if device has multiple transmit queues + * @dev: network device + * + * Check if device has multiple transmit queues + */ +static inline bool netif_is_multiqueue(const struct net_device *dev) +{ + return dev->num_tx_queues > 1; +} + +int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq); + +#ifdef CONFIG_SYSFS +int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq); +#else +static inline int netif_set_real_num_rx_queues(struct net_device *dev, + unsigned int rxqs) +{ + dev->real_num_rx_queues = rxqs; + return 0; +} +#endif + +static inline struct netdev_rx_queue * +__netif_get_rx_queue(struct net_device *dev, unsigned int rxq) +{ + return dev->_rx + rxq; +} + +#ifdef CONFIG_SYSFS +static inline unsigned int get_netdev_rx_queue_index( + struct netdev_rx_queue *queue) +{ + struct net_device *dev = queue->dev; + int index = queue - dev->_rx; + + BUG_ON(index >= dev->num_rx_queues); + return index; +} +#endif + +#define DEFAULT_MAX_NUM_RSS_QUEUES (8) +int netif_get_num_default_rss_queues(void); + +enum skb_free_reason { + SKB_REASON_CONSUMED, + SKB_REASON_DROPPED, +}; + +void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason); +void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason); + +/* + * It is not allowed to call kfree_skb() or consume_skb() from hardware + * interrupt context or with hardware interrupts being disabled. + * (in_irq() || irqs_disabled()) + * + * We provide four helpers that can be used in following contexts : + * + * dev_kfree_skb_irq(skb) when caller drops a packet from irq context, + * replacing kfree_skb(skb) + * + * dev_consume_skb_irq(skb) when caller consumes a packet from irq context. + * Typically used in place of consume_skb(skb) in TX completion path + * + * dev_kfree_skb_any(skb) when caller doesn't know its current irq context, + * replacing kfree_skb(skb) + * + * dev_consume_skb_any(skb) when caller doesn't know its current irq context, + * and consumed a packet. Used in place of consume_skb(skb) + */ +static inline void dev_kfree_skb_irq(struct sk_buff *skb) +{ + __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED); +} + +static inline void dev_consume_skb_irq(struct sk_buff *skb) +{ + __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED); +} + +static inline void dev_kfree_skb_any(struct sk_buff *skb) +{ + __dev_kfree_skb_any(skb, SKB_REASON_DROPPED); +} + +static inline void dev_consume_skb_any(struct sk_buff *skb) +{ + __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED); +} + +void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog); +int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb); +int netif_rx(struct sk_buff *skb); +int netif_rx_ni(struct sk_buff *skb); +int netif_receive_skb(struct sk_buff *skb); +int netif_receive_skb_core(struct sk_buff *skb); +void netif_receive_skb_list(struct list_head *head); +gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); +void napi_gro_flush(struct napi_struct *napi, bool flush_old); +struct sk_buff *napi_get_frags(struct napi_struct *napi); +gro_result_t napi_gro_frags(struct napi_struct *napi); +struct packet_offload *gro_find_receive_by_type(__be16 type); +struct packet_offload *gro_find_complete_by_type(__be16 type); + +static inline void napi_free_frags(struct napi_struct *napi) +{ + kfree_skb(napi->skb); + napi->skb = NULL; +} + +bool netdev_is_rx_handler_busy(struct net_device *dev); +int netdev_rx_handler_register(struct net_device *dev, + rx_handler_func_t *rx_handler, + void *rx_handler_data); +void netdev_rx_handler_unregister(struct net_device *dev); + +bool dev_valid_name(const char *name); +static inline bool is_socket_ioctl_cmd(unsigned int cmd) +{ + return _IOC_TYPE(cmd) == SOCK_IOC_TYPE; +} +int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, + bool *need_copyout); +int dev_ifconf(struct net *net, struct ifconf *, int); +int dev_ethtool(struct net *net, struct ifreq *); +unsigned int dev_get_flags(const struct net_device *); +int __dev_change_flags(struct net_device *, unsigned int flags); +int dev_change_flags(struct net_device *, unsigned int); +void __dev_notify_flags(struct net_device *, unsigned int old_flags, + unsigned int gchanges); +int dev_change_name(struct net_device *, const char *); +int dev_set_alias(struct net_device *, const char *, size_t); +int dev_get_alias(const struct net_device *, char *, size_t); +int dev_change_net_namespace(struct net_device *, struct net *, const char *); +int __dev_set_mtu(struct net_device *, int); +int dev_validate_mtu(struct net_device *dev, int mtu, + struct netlink_ext_ack *extack); +int dev_set_mtu_ext(struct net_device *dev, int mtu, + struct netlink_ext_ack *extack); +int dev_set_mtu(struct net_device *, int); +int dev_change_tx_queue_len(struct net_device *, unsigned long); +void dev_set_group(struct net_device *, int); +int dev_set_mac_address(struct net_device *, struct sockaddr *); +int dev_change_carrier(struct net_device *, bool new_carrier); +int dev_get_phys_port_id(struct net_device *dev, + struct netdev_phys_item_id *ppid); +int dev_get_phys_port_name(struct net_device *dev, + char *name, size_t len); +int dev_change_proto_down(struct net_device *dev, bool proto_down); +struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); +struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, + struct netdev_queue *txq, int *ret); + +typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); +int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, + int fd, u32 flags); +u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op, + enum bpf_netdev_command cmd); +int xdp_umem_query(struct net_device *dev, u16 queue_id); + +int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); +int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); +bool is_skb_forwardable(const struct net_device *dev, + const struct sk_buff *skb); + +static __always_inline int ____dev_forward_skb(struct net_device *dev, + struct sk_buff *skb) +{ + if (skb_orphan_frags(skb, GFP_ATOMIC) || + unlikely(!is_skb_forwardable(dev, skb))) { + atomic_long_inc(&dev->rx_dropped); + kfree_skb(skb); + return NET_RX_DROP; + } + + skb_scrub_packet(skb, true); + skb->priority = 0; + return 0; +} + +void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); + +extern int netdev_budget; +extern unsigned int netdev_budget_usecs; + +/* Called by rtnetlink.c:rtnl_unlock() */ +void netdev_run_todo(void); + +/** + * dev_put - release reference to device + * @dev: network device + * + * Release reference to device to allow it to be freed. + */ +static inline void dev_put(struct net_device *dev) +{ + if (dev) + this_cpu_dec(*dev->pcpu_refcnt); +} + +/** + * dev_hold - get reference to device + * @dev: network device + * + * Hold reference to device to keep it from being freed. + */ +static inline void dev_hold(struct net_device *dev) +{ + if (dev) + this_cpu_inc(*dev->pcpu_refcnt); +} + +/* Carrier loss detection, dial on demand. The functions netif_carrier_on + * and _off may be called from IRQ context, but it is caller + * who is responsible for serialization of these calls. + * + * The name carrier is inappropriate, these functions should really be + * called netif_lowerlayer_*() because they represent the state of any + * kind of lower layer not just hardware media. + */ + +void linkwatch_init_dev(struct net_device *dev); +void linkwatch_fire_event(struct net_device *dev); +void linkwatch_forget_dev(struct net_device *dev); + +/** + * netif_carrier_ok - test if carrier present + * @dev: network device + * + * Check if carrier is present on device + */ +static inline bool netif_carrier_ok(const struct net_device *dev) +{ + return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); +} + +unsigned long dev_trans_start(struct net_device *dev); + +void __netdev_watchdog_up(struct net_device *dev); + +void netif_carrier_on(struct net_device *dev); + +void netif_carrier_off(struct net_device *dev); + +/** + * netif_dormant_on - mark device as dormant. + * @dev: network device + * + * Mark device as dormant (as per RFC2863). + * + * The dormant state indicates that the relevant interface is not + * actually in a condition to pass packets (i.e., it is not 'up') but is + * in a "pending" state, waiting for some external event. For "on- + * demand" interfaces, this new state identifies the situation where the + * interface is waiting for events to place it in the up state. + */ +static inline void netif_dormant_on(struct net_device *dev) +{ + if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) + linkwatch_fire_event(dev); +} + +/** + * netif_dormant_off - set device as not dormant. + * @dev: network device + * + * Device is not in dormant state. + */ +static inline void netif_dormant_off(struct net_device *dev) +{ + if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) + linkwatch_fire_event(dev); +} + +/** + * netif_dormant - test if device is dormant + * @dev: network device + * + * Check if device is dormant. + */ +static inline bool netif_dormant(const struct net_device *dev) +{ + return test_bit(__LINK_STATE_DORMANT, &dev->state); +} + + +/** + * netif_oper_up - test if device is operational + * @dev: network device + * + * Check if carrier is operational + */ +static inline bool netif_oper_up(const struct net_device *dev) +{ + return (dev->operstate == IF_OPER_UP || + dev->operstate == IF_OPER_UNKNOWN /* backward compat */); +} + +/** + * netif_device_present - is device available or removed + * @dev: network device + * + * Check if device has not been removed from system. + */ +static inline bool netif_device_present(struct net_device *dev) +{ + return test_bit(__LINK_STATE_PRESENT, &dev->state); +} + +void netif_device_detach(struct net_device *dev); + +void netif_device_attach(struct net_device *dev); + +/* + * Network interface message level settings + */ + +enum { + NETIF_MSG_DRV = 0x0001, + NETIF_MSG_PROBE = 0x0002, + NETIF_MSG_LINK = 0x0004, + NETIF_MSG_TIMER = 0x0008, + NETIF_MSG_IFDOWN = 0x0010, + NETIF_MSG_IFUP = 0x0020, + NETIF_MSG_RX_ERR = 0x0040, + NETIF_MSG_TX_ERR = 0x0080, + NETIF_MSG_TX_QUEUED = 0x0100, + NETIF_MSG_INTR = 0x0200, + NETIF_MSG_TX_DONE = 0x0400, + NETIF_MSG_RX_STATUS = 0x0800, + NETIF_MSG_PKTDATA = 0x1000, + NETIF_MSG_HW = 0x2000, + NETIF_MSG_WOL = 0x4000, +}; + +#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) +#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) +#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) +#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) +#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) +#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) +#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) +#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) +#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) +#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) +#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) +#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) +#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) +#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) +#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) + +static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) +{ + /* use default */ + if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) + return default_msg_enable_bits; + if (debug_value == 0) /* no output */ + return 0; + /* set low N bits */ + return (1U << debug_value) - 1; +} + +static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) +{ + spin_lock(&txq->_xmit_lock); + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ + WRITE_ONCE(txq->xmit_lock_owner, cpu); +} + +static inline bool __netif_tx_acquire(struct netdev_queue *txq) +{ + __acquire(&txq->_xmit_lock); + return true; +} + +static inline void __netif_tx_release(struct netdev_queue *txq) +{ + __release(&txq->_xmit_lock); +} + +static inline void __netif_tx_lock_bh(struct netdev_queue *txq) +{ + spin_lock_bh(&txq->_xmit_lock); + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ + WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); +} + +static inline bool __netif_tx_trylock(struct netdev_queue *txq) +{ + bool ok = spin_trylock(&txq->_xmit_lock); + + if (likely(ok)) { + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ + WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); + } + return ok; +} + +static inline void __netif_tx_unlock(struct netdev_queue *txq) +{ + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ + WRITE_ONCE(txq->xmit_lock_owner, -1); + spin_unlock(&txq->_xmit_lock); +} + +static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) +{ + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ + WRITE_ONCE(txq->xmit_lock_owner, -1); + spin_unlock_bh(&txq->_xmit_lock); +} + +static inline void txq_trans_update(struct netdev_queue *txq) +{ + if (txq->xmit_lock_owner != -1) + txq->trans_start = jiffies; +} + +/* legacy drivers only, netdev_start_xmit() sets txq->trans_start */ +static inline void netif_trans_update(struct net_device *dev) +{ + struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); + + if (txq->trans_start != jiffies) + txq->trans_start = jiffies; +} + +/** + * netif_tx_lock - grab network device transmit lock + * @dev: network device + * + * Get network device transmit lock + */ +static inline void netif_tx_lock(struct net_device *dev) +{ + unsigned int i; + int cpu; + + spin_lock(&dev->tx_global_lock); + cpu = smp_processor_id(); + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + + /* We are the only thread of execution doing a + * freeze, but we have to grab the _xmit_lock in + * order to synchronize with threads which are in + * the ->hard_start_xmit() handler and already + * checked the frozen bit. + */ + __netif_tx_lock(txq, cpu); + set_bit(__QUEUE_STATE_FROZEN, &txq->state); + __netif_tx_unlock(txq); + } +} + +static inline void netif_tx_lock_bh(struct net_device *dev) +{ + local_bh_disable(); + netif_tx_lock(dev); +} + +static inline void netif_tx_unlock(struct net_device *dev) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + + /* No need to grab the _xmit_lock here. If the + * queue is not stopped for another reason, we + * force a schedule. + */ + clear_bit(__QUEUE_STATE_FROZEN, &txq->state); + netif_schedule_queue(txq); + } + spin_unlock(&dev->tx_global_lock); +} + +static inline void netif_tx_unlock_bh(struct net_device *dev) +{ + netif_tx_unlock(dev); + local_bh_enable(); +} + +#define HARD_TX_LOCK(dev, txq, cpu) { \ + if ((dev->features & NETIF_F_LLTX) == 0) { \ + __netif_tx_lock(txq, cpu); \ + } else { \ + __netif_tx_acquire(txq); \ + } \ +} + +#define HARD_TX_TRYLOCK(dev, txq) \ + (((dev->features & NETIF_F_LLTX) == 0) ? \ + __netif_tx_trylock(txq) : \ + __netif_tx_acquire(txq)) + +#define HARD_TX_UNLOCK(dev, txq) { \ + if ((dev->features & NETIF_F_LLTX) == 0) { \ + __netif_tx_unlock(txq); \ + } else { \ + __netif_tx_release(txq); \ + } \ +} + +static inline void netif_tx_disable(struct net_device *dev) +{ + unsigned int i; + int cpu; + + local_bh_disable(); + cpu = smp_processor_id(); + spin_lock(&dev->tx_global_lock); + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + + __netif_tx_lock(txq, cpu); + netif_tx_stop_queue(txq); + __netif_tx_unlock(txq); + } + spin_unlock(&dev->tx_global_lock); + local_bh_enable(); +} + +static inline void netif_addr_lock(struct net_device *dev) +{ + spin_lock(&dev->addr_list_lock); +} + +static inline void netif_addr_lock_nested(struct net_device *dev) +{ + int subclass = SINGLE_DEPTH_NESTING; + + if (dev->netdev_ops->ndo_get_lock_subclass) + subclass = dev->netdev_ops->ndo_get_lock_subclass(dev); + + spin_lock_nested(&dev->addr_list_lock, subclass); +} + +static inline void netif_addr_lock_bh(struct net_device *dev) +{ + spin_lock_bh(&dev->addr_list_lock); +} + +static inline void netif_addr_unlock(struct net_device *dev) +{ + spin_unlock(&dev->addr_list_lock); +} + +static inline void netif_addr_unlock_bh(struct net_device *dev) +{ + spin_unlock_bh(&dev->addr_list_lock); +} + +/* + * dev_addrs walker. Should be used only for read access. Call with + * rcu_read_lock held. + */ +#define for_each_dev_addr(dev, ha) \ + list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list) + +/* These functions live elsewhere (drivers/net/net_init.c, but related) */ + +void ether_setup(struct net_device *dev); + +/* Support for loadable net-drivers */ +struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, + unsigned char name_assign_type, + void (*setup)(struct net_device *), + unsigned int txqs, unsigned int rxqs); +int dev_get_valid_name(struct net *net, struct net_device *dev, + const char *name); + +#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \ + alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1) + +#define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \ + alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \ + count) + +int register_netdev(struct net_device *dev); +void unregister_netdev(struct net_device *dev); + +/* General hardware address lists handling functions */ +int __hw_addr_sync(struct netdev_hw_addr_list *to_list, + struct netdev_hw_addr_list *from_list, int addr_len); +void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, + struct netdev_hw_addr_list *from_list, int addr_len); +int __hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, + const unsigned char *)); +void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, + const unsigned char *)); +void __hw_addr_init(struct netdev_hw_addr_list *list); + +/* Functions used for device addresses handling */ +int dev_addr_add(struct net_device *dev, const unsigned char *addr, + unsigned char addr_type); +int dev_addr_del(struct net_device *dev, const unsigned char *addr, + unsigned char addr_type); +void dev_addr_flush(struct net_device *dev); +int dev_addr_init(struct net_device *dev); + +/* Functions used for unicast addresses handling */ +int dev_uc_add(struct net_device *dev, const unsigned char *addr); +int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr); +int dev_uc_del(struct net_device *dev, const unsigned char *addr); +int dev_uc_sync(struct net_device *to, struct net_device *from); +int dev_uc_sync_multiple(struct net_device *to, struct net_device *from); +void dev_uc_unsync(struct net_device *to, struct net_device *from); +void dev_uc_flush(struct net_device *dev); +void dev_uc_init(struct net_device *dev); + +/** + * __dev_uc_sync - Synchonize device's unicast list + * @dev: device to sync + * @sync: function to call if address should be added + * @unsync: function to call if address should be removed + * + * Add newly added addresses to the interface, and release + * addresses that have been deleted. + */ +static inline int __dev_uc_sync(struct net_device *dev, + int (*sync)(struct net_device *, + const unsigned char *), + int (*unsync)(struct net_device *, + const unsigned char *)) +{ + return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync); +} + +/** + * __dev_uc_unsync - Remove synchronized addresses from device + * @dev: device to sync + * @unsync: function to call if address should be removed + * + * Remove all addresses that were added to the device by dev_uc_sync(). + */ +static inline void __dev_uc_unsync(struct net_device *dev, + int (*unsync)(struct net_device *, + const unsigned char *)) +{ + __hw_addr_unsync_dev(&dev->uc, dev, unsync); +} + +/* Functions used for multicast addresses handling */ +int dev_mc_add(struct net_device *dev, const unsigned char *addr); +int dev_mc_add_global(struct net_device *dev, const unsigned char *addr); +int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr); +int dev_mc_del(struct net_device *dev, const unsigned char *addr); +int dev_mc_del_global(struct net_device *dev, const unsigned char *addr); +int dev_mc_sync(struct net_device *to, struct net_device *from); +int dev_mc_sync_multiple(struct net_device *to, struct net_device *from); +void dev_mc_unsync(struct net_device *to, struct net_device *from); +void dev_mc_flush(struct net_device *dev); +void dev_mc_init(struct net_device *dev); + +/** + * __dev_mc_sync - Synchonize device's multicast list + * @dev: device to sync + * @sync: function to call if address should be added + * @unsync: function to call if address should be removed + * + * Add newly added addresses to the interface, and release + * addresses that have been deleted. + */ +static inline int __dev_mc_sync(struct net_device *dev, + int (*sync)(struct net_device *, + const unsigned char *), + int (*unsync)(struct net_device *, + const unsigned char *)) +{ + return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync); +} + +/** + * __dev_mc_unsync - Remove synchronized addresses from device + * @dev: device to sync + * @unsync: function to call if address should be removed + * + * Remove all addresses that were added to the device by dev_mc_sync(). + */ +static inline void __dev_mc_unsync(struct net_device *dev, + int (*unsync)(struct net_device *, + const unsigned char *)) +{ + __hw_addr_unsync_dev(&dev->mc, dev, unsync); +} + +/* Functions used for secondary unicast and multicast support */ +void dev_set_rx_mode(struct net_device *dev); +void __dev_set_rx_mode(struct net_device *dev); +int dev_set_promiscuity(struct net_device *dev, int inc); +int dev_set_allmulti(struct net_device *dev, int inc); +void netdev_state_change(struct net_device *dev); +void netdev_notify_peers(struct net_device *dev); +void netdev_features_change(struct net_device *dev); +/* Load a device via the kmod */ +void dev_load(struct net *net, const char *name); +struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *storage); +void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, + const struct net_device_stats *netdev_stats); + +extern int netdev_max_backlog; +extern int netdev_tstamp_prequeue; +extern int weight_p; +extern int dev_weight_rx_bias; +extern int dev_weight_tx_bias; +extern int dev_rx_weight; +extern int dev_tx_weight; + +bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); +struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, + struct list_head **iter); +struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, + struct list_head **iter); + +/* iterate through upper list, must be called under RCU read lock */ +#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \ + for (iter = &(dev)->adj_list.upper, \ + updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \ + updev; \ + updev = netdev_upper_get_next_dev_rcu(dev, &(iter))) + +int netdev_walk_all_upper_dev_rcu(struct net_device *dev, + int (*fn)(struct net_device *upper_dev, + void *data), + void *data); + +bool netdev_has_upper_dev_all_rcu(struct net_device *dev, + struct net_device *upper_dev); + +bool netdev_has_any_upper_dev(struct net_device *dev); + +void *netdev_lower_get_next_private(struct net_device *dev, + struct list_head **iter); +void *netdev_lower_get_next_private_rcu(struct net_device *dev, + struct list_head **iter); + +#define netdev_for_each_lower_private(dev, priv, iter) \ + for (iter = (dev)->adj_list.lower.next, \ + priv = netdev_lower_get_next_private(dev, &(iter)); \ + priv; \ + priv = netdev_lower_get_next_private(dev, &(iter))) + +#define netdev_for_each_lower_private_rcu(dev, priv, iter) \ + for (iter = &(dev)->adj_list.lower, \ + priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \ + priv; \ + priv = netdev_lower_get_next_private_rcu(dev, &(iter))) + +void *netdev_lower_get_next(struct net_device *dev, + struct list_head **iter); + +#define netdev_for_each_lower_dev(dev, ldev, iter) \ + for (iter = (dev)->adj_list.lower.next, \ + ldev = netdev_lower_get_next(dev, &(iter)); \ + ldev; \ + ldev = netdev_lower_get_next(dev, &(iter))) + +struct net_device *netdev_all_lower_get_next(struct net_device *dev, + struct list_head **iter); +struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev, + struct list_head **iter); + +int netdev_walk_all_lower_dev(struct net_device *dev, + int (*fn)(struct net_device *lower_dev, + void *data), + void *data); +int netdev_walk_all_lower_dev_rcu(struct net_device *dev, + int (*fn)(struct net_device *lower_dev, + void *data), + void *data); + +void *netdev_adjacent_get_private(struct list_head *adj_list); +void *netdev_lower_get_first_private_rcu(struct net_device *dev); +struct net_device *netdev_master_upper_dev_get(struct net_device *dev); +struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); +int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev, + struct netlink_ext_ack *extack); +int netdev_master_upper_dev_link(struct net_device *dev, + struct net_device *upper_dev, + void *upper_priv, void *upper_info, + struct netlink_ext_ack *extack); +void netdev_upper_dev_unlink(struct net_device *dev, + struct net_device *upper_dev); +void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); +void *netdev_lower_dev_get_private(struct net_device *dev, + struct net_device *lower_dev); +void netdev_lower_state_changed(struct net_device *lower_dev, + void *lower_state_info); + +/* RSS keys are 40 or 52 bytes long */ +#define NETDEV_RSS_KEY_LEN 52 +extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; +void netdev_rss_key_fill(void *buffer, size_t len); + +int dev_get_nest_level(struct net_device *dev); +int skb_checksum_help(struct sk_buff *skb); +int skb_crc32c_csum_help(struct sk_buff *skb); +int skb_csum_hwoffload_help(struct sk_buff *skb, + const netdev_features_t features); + +struct sk_buff *__skb_gso_segment(struct sk_buff *skb, + netdev_features_t features, bool tx_path); +struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, + netdev_features_t features); + +struct netdev_bonding_info { + ifslave slave; + ifbond master; +}; + +struct netdev_notifier_bonding_info { + struct netdev_notifier_info info; /* must be first */ + struct netdev_bonding_info bonding_info; +}; + +void netdev_bonding_info_change(struct net_device *dev, + struct netdev_bonding_info *bonding_info); + +static inline +struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) +{ + return __skb_gso_segment(skb, features, true); +} +__be16 skb_network_protocol(struct sk_buff *skb, int *depth); + +static inline bool can_checksum_protocol(netdev_features_t features, + __be16 protocol) +{ + if (protocol == htons(ETH_P_FCOE)) + return !!(features & NETIF_F_FCOE_CRC); + + /* Assume this is an IP checksum (not SCTP CRC) */ + + if (features & NETIF_F_HW_CSUM) { + /* Can checksum everything */ + return true; + } + + switch (protocol) { + case htons(ETH_P_IP): + return !!(features & NETIF_F_IP_CSUM); + case htons(ETH_P_IPV6): + return !!(features & NETIF_F_IPV6_CSUM); + default: + return false; + } +} + +#ifdef CONFIG_BUG +void netdev_rx_csum_fault(struct net_device *dev); +#else +static inline void netdev_rx_csum_fault(struct net_device *dev) +{ +} +#endif +/* rx skb timestamps */ +void net_enable_timestamp(void); +void net_disable_timestamp(void); + +#ifdef CONFIG_PROC_FS +int __init dev_proc_init(void); +#else +#define dev_proc_init() 0 +#endif + +static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, + struct sk_buff *skb, struct net_device *dev, + bool more) +{ + skb->xmit_more = more ? 1 : 0; + return ops->ndo_start_xmit(skb, dev); +} + +static inline bool netdev_xmit_more(void) +{ + return __this_cpu_read(softnet_data.xmit.more); +} + +static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev, + struct netdev_queue *txq, bool more) +{ + const struct net_device_ops *ops = dev->netdev_ops; + int rc; + + rc = __netdev_start_xmit(ops, skb, dev, more); + if (rc == NETDEV_TX_OK) + txq_trans_update(txq); + + return rc; +} + +int netdev_class_create_file_ns(const struct class_attribute *class_attr, + const void *ns); +void netdev_class_remove_file_ns(const struct class_attribute *class_attr, + const void *ns); + +static inline int netdev_class_create_file(const struct class_attribute *class_attr) +{ + return netdev_class_create_file_ns(class_attr, NULL); +} + +static inline void netdev_class_remove_file(const struct class_attribute *class_attr) +{ + netdev_class_remove_file_ns(class_attr, NULL); +} + +extern const struct kobj_ns_type_operations net_ns_type_operations; + +const char *netdev_drivername(const struct net_device *dev); + +void linkwatch_run_queue(void); + +static inline netdev_features_t netdev_intersect_features(netdev_features_t f1, + netdev_features_t f2) +{ + if ((f1 ^ f2) & NETIF_F_HW_CSUM) { + if (f1 & NETIF_F_HW_CSUM) + f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); + else + f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); + } + + return f1 & f2; +} + +static inline netdev_features_t netdev_get_wanted_features( + struct net_device *dev) +{ + return (dev->features & ~dev->hw_features) | dev->wanted_features; +} +netdev_features_t netdev_increment_features(netdev_features_t all, + netdev_features_t one, netdev_features_t mask); + +/* Allow TSO being used on stacked device : + * Performing the GSO segmentation before last device + * is a performance improvement. + */ +static inline netdev_features_t netdev_add_tso_features(netdev_features_t features, + netdev_features_t mask) +{ + return netdev_increment_features(features, NETIF_F_ALL_TSO, mask); +} + +int __netdev_update_features(struct net_device *dev); +void netdev_update_features(struct net_device *dev); +void netdev_change_features(struct net_device *dev); + +void netif_stacked_transfer_operstate(const struct net_device *rootdev, + struct net_device *dev); + +netdev_features_t passthru_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features); +netdev_features_t netif_skb_features(struct sk_buff *skb); + +static inline bool net_gso_ok(netdev_features_t features, int gso_type) +{ + netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT; + + /* check flags correspondence */ + BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT)); + + return (features & feature) == feature; +} + +static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features) +{ + return net_gso_ok(features, skb_shinfo(skb)->gso_type) && + (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); +} + +static inline bool netif_needs_gso(struct sk_buff *skb, + netdev_features_t features) +{ + return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || + unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && + (skb->ip_summed != CHECKSUM_UNNECESSARY))); +} + +static inline void netif_set_gso_max_size(struct net_device *dev, + unsigned int size) +{ + dev->gso_max_size = size; +} + +static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol, + int pulled_hlen, u16 mac_offset, + int mac_len) +{ + skb->protocol = protocol; + skb->encapsulation = 1; + skb_push(skb, pulled_hlen); + skb_reset_transport_header(skb); + skb->mac_header = mac_offset; + skb->network_header = skb->mac_header + mac_len; + skb->mac_len = mac_len; +} + +static inline bool netif_is_macsec(const struct net_device *dev) +{ + return dev->priv_flags & IFF_MACSEC; +} + +static inline bool netif_is_macvlan(const struct net_device *dev) +{ + return dev->priv_flags & IFF_MACVLAN; +} + +static inline bool netif_is_macvlan_port(const struct net_device *dev) +{ + return dev->priv_flags & IFF_MACVLAN_PORT; +} + +static inline bool netif_is_bond_master(const struct net_device *dev) +{ + return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; +} + +static inline bool netif_is_bond_slave(const struct net_device *dev) +{ + return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; +} + +static inline bool netif_supports_nofcs(struct net_device *dev) +{ + return dev->priv_flags & IFF_SUPP_NOFCS; +} + +static inline bool netif_has_l3_rx_handler(const struct net_device *dev) +{ + return dev->priv_flags & IFF_L3MDEV_RX_HANDLER; +} + +static inline bool netif_is_l3_master(const struct net_device *dev) +{ + return dev->priv_flags & IFF_L3MDEV_MASTER; +} + +static inline bool netif_is_l3_slave(const struct net_device *dev) +{ + return dev->priv_flags & IFF_L3MDEV_SLAVE; +} + +static inline bool netif_is_bridge_master(const struct net_device *dev) +{ + return dev->priv_flags & IFF_EBRIDGE; +} + +static inline bool netif_is_bridge_port(const struct net_device *dev) +{ + return dev->priv_flags & IFF_BRIDGE_PORT; +} + +static inline bool netif_is_ovs_master(const struct net_device *dev) +{ + return dev->priv_flags & IFF_OPENVSWITCH; +} + +static inline bool netif_is_ovs_port(const struct net_device *dev) +{ + return dev->priv_flags & IFF_OVS_DATAPATH; +} + +static inline bool netif_is_team_master(const struct net_device *dev) +{ + return dev->priv_flags & IFF_TEAM; +} + +static inline bool netif_is_team_port(const struct net_device *dev) +{ + return dev->priv_flags & IFF_TEAM_PORT; +} + +static inline bool netif_is_lag_master(const struct net_device *dev) +{ + return netif_is_bond_master(dev) || netif_is_team_master(dev); +} + +static inline bool netif_is_lag_port(const struct net_device *dev) +{ + return netif_is_bond_slave(dev) || netif_is_team_port(dev); +} + +static inline bool netif_is_rxfh_configured(const struct net_device *dev) +{ + return dev->priv_flags & IFF_RXFH_CONFIGURED; +} + +static inline bool netif_is_failover(const struct net_device *dev) +{ + return dev->priv_flags & IFF_FAILOVER; +} + +static inline bool netif_is_failover_slave(const struct net_device *dev) +{ + return dev->priv_flags & IFF_FAILOVER_SLAVE; +} + +/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ +static inline void netif_keep_dst(struct net_device *dev) +{ + dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM); +} + +/* return true if dev can't cope with mtu frames that need vlan tag insertion */ +static inline bool netif_reduces_vlan_mtu(struct net_device *dev) +{ + /* TODO: reserve and use an additional IFF bit, if we get more users */ + return dev->priv_flags & IFF_MACSEC; +} + +extern struct pernet_operations __net_initdata loopback_net_ops; + +/* Logging, debugging and troubleshooting/diagnostic helpers. */ + +/* netdev_printk helpers, similar to dev_printk */ + +static inline const char *netdev_name(const struct net_device *dev) +{ + if (!dev->name[0] || strchr(dev->name, '%')) + return "(unnamed net_device)"; + return dev->name; +} + +static inline bool netdev_unregistering(const struct net_device *dev) +{ + return dev->reg_state == NETREG_UNREGISTERING; +} + +static inline const char *netdev_reg_state(const struct net_device *dev) +{ + switch (dev->reg_state) { + case NETREG_UNINITIALIZED: return " (uninitialized)"; + case NETREG_REGISTERED: return ""; + case NETREG_UNREGISTERING: return " (unregistering)"; + case NETREG_UNREGISTERED: return " (unregistered)"; + case NETREG_RELEASED: return " (released)"; + case NETREG_DUMMY: return " (dummy)"; + } + + WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state); + return " (unknown)"; +} + +__printf(3, 4) +void netdev_printk(const char *level, const struct net_device *dev, + const char *format, ...); +__printf(2, 3) +void netdev_emerg(const struct net_device *dev, const char *format, ...); +__printf(2, 3) +void netdev_alert(const struct net_device *dev, const char *format, ...); +__printf(2, 3) +void netdev_crit(const struct net_device *dev, const char *format, ...); +__printf(2, 3) +void netdev_err(const struct net_device *dev, const char *format, ...); +__printf(2, 3) +void netdev_warn(const struct net_device *dev, const char *format, ...); +__printf(2, 3) +void netdev_notice(const struct net_device *dev, const char *format, ...); +__printf(2, 3) +void netdev_info(const struct net_device *dev, const char *format, ...); + +#define netdev_level_once(level, dev, fmt, ...) \ +do { \ + static bool __print_once __read_mostly; \ + \ + if (!__print_once) { \ + __print_once = true; \ + netdev_printk(level, dev, fmt, ##__VA_ARGS__); \ + } \ +} while (0) + +#define netdev_emerg_once(dev, fmt, ...) \ + netdev_level_once(KERN_EMERG, dev, fmt, ##__VA_ARGS__) +#define netdev_alert_once(dev, fmt, ...) \ + netdev_level_once(KERN_ALERT, dev, fmt, ##__VA_ARGS__) +#define netdev_crit_once(dev, fmt, ...) \ + netdev_level_once(KERN_CRIT, dev, fmt, ##__VA_ARGS__) +#define netdev_err_once(dev, fmt, ...) \ + netdev_level_once(KERN_ERR, dev, fmt, ##__VA_ARGS__) +#define netdev_warn_once(dev, fmt, ...) \ + netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__) +#define netdev_notice_once(dev, fmt, ...) \ + netdev_level_once(KERN_NOTICE, dev, fmt, ##__VA_ARGS__) +#define netdev_info_once(dev, fmt, ...) \ + netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__) + +#define MODULE_ALIAS_NETDEV(device) \ + MODULE_ALIAS("netdev-" device) + +#if defined(CONFIG_DYNAMIC_DEBUG) +#define netdev_dbg(__dev, format, args...) \ +do { \ + dynamic_netdev_dbg(__dev, format, ##args); \ +} while (0) +#elif defined(DEBUG) +#define netdev_dbg(__dev, format, args...) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args) +#else +#define netdev_dbg(__dev, format, args...) \ +({ \ + if (0) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args); \ +}) +#endif + +#if defined(VERBOSE_DEBUG) +#define netdev_vdbg netdev_dbg +#else + +#define netdev_vdbg(dev, format, args...) \ +({ \ + if (0) \ + netdev_printk(KERN_DEBUG, dev, format, ##args); \ + 0; \ +}) +#endif + +/* + * netdev_WARN() acts like dev_printk(), but with the key difference + * of using a WARN/WARN_ON to get the message out, including the + * file/line information and a backtrace. + */ +#define netdev_WARN(dev, format, args...) \ + WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \ + netdev_reg_state(dev), ##args) + +#define netdev_WARN_ONCE(dev, format, args...) \ + WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \ + netdev_reg_state(dev), ##args) + +/* netif printk helpers, similar to netdev_printk */ + +#define netif_printk(priv, type, level, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_printk(level, (dev), fmt, ##args); \ +} while (0) + +#define netif_level(level, priv, type, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_##level(dev, fmt, ##args); \ +} while (0) + +#define netif_emerg(priv, type, dev, fmt, args...) \ + netif_level(emerg, priv, type, dev, fmt, ##args) +#define netif_alert(priv, type, dev, fmt, args...) \ + netif_level(alert, priv, type, dev, fmt, ##args) +#define netif_crit(priv, type, dev, fmt, args...) \ + netif_level(crit, priv, type, dev, fmt, ##args) +#define netif_err(priv, type, dev, fmt, args...) \ + netif_level(err, priv, type, dev, fmt, ##args) +#define netif_warn(priv, type, dev, fmt, args...) \ + netif_level(warn, priv, type, dev, fmt, ##args) +#define netif_notice(priv, type, dev, fmt, args...) \ + netif_level(notice, priv, type, dev, fmt, ##args) +#define netif_info(priv, type, dev, fmt, args...) \ + netif_level(info, priv, type, dev, fmt, ##args) + +#if defined(CONFIG_DYNAMIC_DEBUG) +#define netif_dbg(priv, type, netdev, format, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + dynamic_netdev_dbg(netdev, format, ##args); \ +} while (0) +#elif defined(DEBUG) +#define netif_dbg(priv, type, dev, format, args...) \ + netif_printk(priv, type, KERN_DEBUG, dev, format, ##args) +#else +#define netif_dbg(priv, type, dev, format, args...) \ +({ \ + if (0) \ + netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ + 0; \ +}) +#endif + +/* if @cond then downgrade to debug, else print at @level */ +#define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \ + do { \ + if (cond) \ + netif_dbg(priv, type, netdev, fmt, ##args); \ + else \ + netif_ ## level(priv, type, netdev, fmt, ##args); \ + } while (0) + +#if defined(VERBOSE_DEBUG) +#define netif_vdbg netif_dbg +#else +#define netif_vdbg(priv, type, dev, format, args...) \ +({ \ + if (0) \ + netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ + 0; \ +}) +#endif + +/* + * The list of packet types we will receive (as opposed to discard) + * and the routines to invoke. + * + * Why 16. Because with 16 the only overlap we get on a hash of the + * low nibble of the protocol value is RARP/SNAP/X.25. + * + * 0800 IP + * 0001 802.3 + * 0002 AX.25 + * 0004 802.2 + * 8035 RARP + * 0005 SNAP + * 0805 X.25 + * 0806 ARP + * 8137 IPX + * 0009 Localtalk + * 86DD IPv6 + */ +#define PTYPE_HASH_SIZE (16) +#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) + +#endif /* _LINUX_NETDEVICE_H */ diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h new file mode 100644 index 000000000..9460a5635 --- /dev/null +++ b/include/linux/netfilter.h @@ -0,0 +1,471 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_NETFILTER_H +#define __LINUX_NETFILTER_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_NETFILTER +static inline int NF_DROP_GETERR(int verdict) +{ + return -(verdict >> NF_VERDICT_QBITS); +} + +static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1, + const union nf_inet_addr *a2) +{ + return a1->all[0] == a2->all[0] && + a1->all[1] == a2->all[1] && + a1->all[2] == a2->all[2] && + a1->all[3] == a2->all[3]; +} + +static inline void nf_inet_addr_mask(const union nf_inet_addr *a1, + union nf_inet_addr *result, + const union nf_inet_addr *mask) +{ + result->all[0] = a1->all[0] & mask->all[0]; + result->all[1] = a1->all[1] & mask->all[1]; + result->all[2] = a1->all[2] & mask->all[2]; + result->all[3] = a1->all[3] & mask->all[3]; +} + +int netfilter_init(void); + +struct sk_buff; + +struct nf_hook_ops; + +struct sock; + +struct nf_hook_state { + unsigned int hook; + u_int8_t pf; + struct net_device *in; + struct net_device *out; + struct sock *sk; + struct net *net; + int (*okfn)(struct net *, struct sock *, struct sk_buff *); +}; + +typedef unsigned int nf_hookfn(void *priv, + struct sk_buff *skb, + const struct nf_hook_state *state); +struct nf_hook_ops { + /* User fills in from here down. */ + nf_hookfn *hook; + struct net_device *dev; + void *priv; + u_int8_t pf; + unsigned int hooknum; + /* Hooks are ordered in ascending priority. */ + int priority; +}; + +struct nf_hook_entry { + nf_hookfn *hook; + void *priv; +}; + +struct nf_hook_entries_rcu_head { + struct rcu_head head; + void *allocation; +}; + +struct nf_hook_entries { + u16 num_hook_entries; + /* padding */ + struct nf_hook_entry hooks[]; + + /* trailer: pointers to original orig_ops of each hook, + * followed by rcu_head and scratch space used for freeing + * the structure via call_rcu. + * + * This is not part of struct nf_hook_entry since its only + * needed in slow path (hook register/unregister): + * const struct nf_hook_ops *orig_ops[] + * + * For the same reason, we store this at end -- its + * only needed when a hook is deleted, not during + * packet path processing: + * struct nf_hook_entries_rcu_head head + */ +}; + +static inline struct nf_hook_ops **nf_hook_entries_get_hook_ops(const struct nf_hook_entries *e) +{ + unsigned int n = e->num_hook_entries; + const void *hook_end; + + hook_end = &e->hooks[n]; /* this is *past* ->hooks[]! */ + + return (struct nf_hook_ops **)hook_end; +} + +static inline int +nf_hook_entry_hookfn(const struct nf_hook_entry *entry, struct sk_buff *skb, + struct nf_hook_state *state) +{ + return entry->hook(entry->priv, skb, state); +} + +static inline void nf_hook_state_init(struct nf_hook_state *p, + unsigned int hook, + u_int8_t pf, + struct net_device *indev, + struct net_device *outdev, + struct sock *sk, + struct net *net, + int (*okfn)(struct net *, struct sock *, struct sk_buff *)) +{ + p->hook = hook; + p->pf = pf; + p->in = indev; + p->out = outdev; + p->sk = sk; + p->net = net; + p->okfn = okfn; +} + + + +struct nf_sockopt_ops { + struct list_head list; + + u_int8_t pf; + + /* Non-inclusive ranges: use 0/0/NULL to never get called. */ + int set_optmin; + int set_optmax; + int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len); +#ifdef CONFIG_COMPAT + int (*compat_set)(struct sock *sk, int optval, + void __user *user, unsigned int len); +#endif + int get_optmin; + int get_optmax; + int (*get)(struct sock *sk, int optval, void __user *user, int *len); +#ifdef CONFIG_COMPAT + int (*compat_get)(struct sock *sk, int optval, + void __user *user, int *len); +#endif + /* Use the module struct to lock set/get code in place */ + struct module *owner; +}; + +/* Function to register/unregister hook points. */ +int nf_register_net_hook(struct net *net, const struct nf_hook_ops *ops); +void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *ops); +int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg, + unsigned int n); +void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg, + unsigned int n); + +/* Functions to register get/setsockopt ranges (non-inclusive). You + need to check permissions yourself! */ +int nf_register_sockopt(struct nf_sockopt_ops *reg); +void nf_unregister_sockopt(struct nf_sockopt_ops *reg); + +#ifdef CONFIG_JUMP_LABEL +extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; +#endif + +int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state, + const struct nf_hook_entries *e, unsigned int i); + +/** + * nf_hook - call a netfilter hook + * + * Returns 1 if the hook has allowed the packet to pass. The function + * okfn must be invoked by the caller in this case. Any other return + * value indicates the packet has been consumed by the hook. + */ +static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, + struct sock *sk, struct sk_buff *skb, + struct net_device *indev, struct net_device *outdev, + int (*okfn)(struct net *, struct sock *, struct sk_buff *)) +{ + struct nf_hook_entries *hook_head = NULL; + int ret = 1; + +#ifdef CONFIG_JUMP_LABEL + if (__builtin_constant_p(pf) && + __builtin_constant_p(hook) && + !static_key_false(&nf_hooks_needed[pf][hook])) + return 1; +#endif + + rcu_read_lock(); + switch (pf) { + case NFPROTO_IPV4: + hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]); + break; + case NFPROTO_IPV6: + hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]); + break; + case NFPROTO_ARP: +#ifdef CONFIG_NETFILTER_FAMILY_ARP + if (WARN_ON_ONCE(hook >= ARRAY_SIZE(net->nf.hooks_arp))) + break; + hook_head = rcu_dereference(net->nf.hooks_arp[hook]); +#endif + break; + case NFPROTO_BRIDGE: +#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE + hook_head = rcu_dereference(net->nf.hooks_bridge[hook]); +#endif + break; +#if IS_ENABLED(CONFIG_DECNET) + case NFPROTO_DECNET: + hook_head = rcu_dereference(net->nf.hooks_decnet[hook]); + break; +#endif + default: + WARN_ON_ONCE(1); + break; + } + + if (hook_head) { + struct nf_hook_state state; + + nf_hook_state_init(&state, hook, pf, indev, outdev, + sk, net, okfn); + + ret = nf_hook_slow(skb, &state, hook_head, 0); + } + rcu_read_unlock(); + + return ret; +} + +/* Activate hook; either okfn or kfree_skb called, unless a hook + returns NF_STOLEN (in which case, it's up to the hook to deal with + the consequences). + + Returns -ERRNO if packet dropped. Zero means queued, stolen or + accepted. +*/ + +/* RR: + > I don't want nf_hook to return anything because people might forget + > about async and trust the return value to mean "packet was ok". + + AK: + Just document it clearly, then you can expect some sense from kernel + coders :) +*/ + +static inline int +NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, + struct sk_buff *skb, struct net_device *in, struct net_device *out, + int (*okfn)(struct net *, struct sock *, struct sk_buff *), + bool cond) +{ + int ret; + + if (!cond || + ((ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn)) == 1)) + ret = okfn(net, sk, skb); + return ret; +} + +static inline int +NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, + struct net_device *in, struct net_device *out, + int (*okfn)(struct net *, struct sock *, struct sk_buff *)) +{ + int ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn); + if (ret == 1) + ret = okfn(net, sk, skb); + return ret; +} + +static inline void +NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, + struct list_head *head, struct net_device *in, struct net_device *out, + int (*okfn)(struct net *, struct sock *, struct sk_buff *)) +{ + struct sk_buff *skb, *next; + struct list_head sublist; + + INIT_LIST_HEAD(&sublist); + list_for_each_entry_safe(skb, next, head, list) { + skb_list_del_init(skb); + if (nf_hook(pf, hook, net, sk, skb, in, out, okfn) == 1) + list_add_tail(&skb->list, &sublist); + } + /* Put passed packets back on main list */ + list_splice(&sublist, head); +} + +/* Call setsockopt() */ +int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, + unsigned int len); +int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, + int *len); +#ifdef CONFIG_COMPAT +int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, + char __user *opt, unsigned int len); +int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, + char __user *opt, int *len); +#endif + +/* Call this before modifying an existing packet: ensures it is + modifiable and linear to the point you care about (writable_len). + Returns true or false. */ +int skb_make_writable(struct sk_buff *skb, unsigned int writable_len); + +struct flowi; +struct nf_queue_entry; + +__sum16 nf_checksum(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, u_int8_t protocol, + unsigned short family); + +__sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, unsigned int len, + u_int8_t protocol, unsigned short family); +int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl, + bool strict, unsigned short family); +int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry); + +#include + +struct nf_conn; +enum nf_nat_manip_type; +struct nlattr; +enum ip_conntrack_dir; + +struct nf_nat_hook { + int (*parse_nat_setup)(struct nf_conn *ct, enum nf_nat_manip_type manip, + const struct nlattr *attr); + void (*decode_session)(struct sk_buff *skb, struct flowi *fl); + unsigned int (*manip_pkt)(struct sk_buff *skb, struct nf_conn *ct, + enum nf_nat_manip_type mtype, + enum ip_conntrack_dir dir); +}; + +extern struct nf_nat_hook __rcu *nf_nat_hook; + +static inline void +nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) +{ +#ifdef CONFIG_NF_NAT_NEEDED + struct nf_nat_hook *nat_hook; + + rcu_read_lock(); + nat_hook = rcu_dereference(nf_nat_hook); + if (nat_hook && nat_hook->decode_session) + nat_hook->decode_session(skb, fl); + rcu_read_unlock(); +#endif +} + +#else /* !CONFIG_NETFILTER */ +static inline int +NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, + struct sk_buff *skb, struct net_device *in, struct net_device *out, + int (*okfn)(struct net *, struct sock *, struct sk_buff *), + bool cond) +{ + return okfn(net, sk, skb); +} + +static inline int +NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, + struct sk_buff *skb, struct net_device *in, struct net_device *out, + int (*okfn)(struct net *, struct sock *, struct sk_buff *)) +{ + return okfn(net, sk, skb); +} + +static inline void +NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, + struct list_head *head, struct net_device *in, struct net_device *out, + int (*okfn)(struct net *, struct sock *, struct sk_buff *)) +{ + /* nothing to do */ +} + +static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, + struct sock *sk, struct sk_buff *skb, + struct net_device *indev, struct net_device *outdev, + int (*okfn)(struct net *, struct sock *, struct sk_buff *)) +{ + return 1; +} +struct flowi; +static inline void +nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) +{ +} +#endif /*CONFIG_NETFILTER*/ + +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) +#include + +extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu; +void nf_ct_attach(struct sk_buff *, const struct sk_buff *); +struct nf_conntrack_tuple; +bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, + const struct sk_buff *skb); +#else +static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} +struct nf_conntrack_tuple; +static inline bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, + const struct sk_buff *skb) +{ + return false; +} +#endif + +struct nf_conn; +enum ip_conntrack_info; + +struct nf_ct_hook { + int (*update)(struct net *net, struct sk_buff *skb); + void (*destroy)(struct nf_conntrack *); + bool (*get_tuple_skb)(struct nf_conntrack_tuple *, + const struct sk_buff *); +}; +extern struct nf_ct_hook __rcu *nf_ct_hook; + +struct nlattr; + +struct nfnl_ct_hook { + struct nf_conn *(*get_ct)(const struct sk_buff *skb, + enum ip_conntrack_info *ctinfo); + size_t (*build_size)(const struct nf_conn *ct); + int (*build)(struct sk_buff *skb, struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + u_int16_t ct_attr, u_int16_t ct_info_attr); + int (*parse)(const struct nlattr *attr, struct nf_conn *ct); + int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct, + u32 portid, u32 report); + void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct, + enum ip_conntrack_info ctinfo, s32 off); +}; +extern struct nfnl_ct_hook __rcu *nfnl_ct_hook; + +/** + * nf_skb_duplicated - TEE target has sent a packet + * + * When a xtables target sends a packet, the OUTPUT and POSTROUTING + * hooks are traversed again, i.e. nft and xtables are invoked recursively. + * + * This is used by xtables TEE target to prevent the duplicated skb from + * being duplicated again. + */ +DECLARE_PER_CPU(bool, nf_skb_duplicated); + +#endif /*__LINUX_NETFILTER_H*/ diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h new file mode 100644 index 000000000..7e39049d2 --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set.h @@ -0,0 +1,470 @@ +/* Copyright (C) 2000-2002 Joakim Axelsson + * Patrick Schaaf + * Martin Josefsson + * Copyright (C) 2003-2013 Jozsef Kadlecsik + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _IP_SET_H +#define _IP_SET_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define _IP_SET_MODULE_DESC(a, b, c) \ + MODULE_DESCRIPTION(a " type of IP sets, revisions " b "-" c) +#define IP_SET_MODULE_DESC(a, b, c) \ + _IP_SET_MODULE_DESC(a, __stringify(b), __stringify(c)) + +/* Set features */ +enum ip_set_feature { + IPSET_TYPE_IP_FLAG = 0, + IPSET_TYPE_IP = (1 << IPSET_TYPE_IP_FLAG), + IPSET_TYPE_PORT_FLAG = 1, + IPSET_TYPE_PORT = (1 << IPSET_TYPE_PORT_FLAG), + IPSET_TYPE_MAC_FLAG = 2, + IPSET_TYPE_MAC = (1 << IPSET_TYPE_MAC_FLAG), + IPSET_TYPE_IP2_FLAG = 3, + IPSET_TYPE_IP2 = (1 << IPSET_TYPE_IP2_FLAG), + IPSET_TYPE_NAME_FLAG = 4, + IPSET_TYPE_NAME = (1 << IPSET_TYPE_NAME_FLAG), + IPSET_TYPE_IFACE_FLAG = 5, + IPSET_TYPE_IFACE = (1 << IPSET_TYPE_IFACE_FLAG), + IPSET_TYPE_MARK_FLAG = 6, + IPSET_TYPE_MARK = (1 << IPSET_TYPE_MARK_FLAG), + IPSET_TYPE_NOMATCH_FLAG = 7, + IPSET_TYPE_NOMATCH = (1 << IPSET_TYPE_NOMATCH_FLAG), + /* Strictly speaking not a feature, but a flag for dumping: + * this settype must be dumped last */ + IPSET_DUMP_LAST_FLAG = 8, + IPSET_DUMP_LAST = (1 << IPSET_DUMP_LAST_FLAG), +}; + +/* Set extensions */ +enum ip_set_extension { + IPSET_EXT_BIT_TIMEOUT = 0, + IPSET_EXT_TIMEOUT = (1 << IPSET_EXT_BIT_TIMEOUT), + IPSET_EXT_BIT_COUNTER = 1, + IPSET_EXT_COUNTER = (1 << IPSET_EXT_BIT_COUNTER), + IPSET_EXT_BIT_COMMENT = 2, + IPSET_EXT_COMMENT = (1 << IPSET_EXT_BIT_COMMENT), + IPSET_EXT_BIT_SKBINFO = 3, + IPSET_EXT_SKBINFO = (1 << IPSET_EXT_BIT_SKBINFO), + /* Mark set with an extension which needs to call destroy */ + IPSET_EXT_BIT_DESTROY = 7, + IPSET_EXT_DESTROY = (1 << IPSET_EXT_BIT_DESTROY), +}; + +#define SET_WITH_TIMEOUT(s) ((s)->extensions & IPSET_EXT_TIMEOUT) +#define SET_WITH_COUNTER(s) ((s)->extensions & IPSET_EXT_COUNTER) +#define SET_WITH_COMMENT(s) ((s)->extensions & IPSET_EXT_COMMENT) +#define SET_WITH_SKBINFO(s) ((s)->extensions & IPSET_EXT_SKBINFO) +#define SET_WITH_FORCEADD(s) ((s)->flags & IPSET_CREATE_FLAG_FORCEADD) + +/* Extension id, in size order */ +enum ip_set_ext_id { + IPSET_EXT_ID_COUNTER = 0, + IPSET_EXT_ID_TIMEOUT, + IPSET_EXT_ID_SKBINFO, + IPSET_EXT_ID_COMMENT, + IPSET_EXT_ID_MAX, +}; + +struct ip_set; + +/* Extension type */ +struct ip_set_ext_type { + /* Destroy extension private data (can be NULL) */ + void (*destroy)(struct ip_set *set, void *ext); + enum ip_set_extension type; + enum ipset_cadt_flags flag; + /* Size and minimal alignment */ + u8 len; + u8 align; +}; + +extern const struct ip_set_ext_type ip_set_extensions[]; + +struct ip_set_counter { + atomic64_t bytes; + atomic64_t packets; +}; + +struct ip_set_comment_rcu { + struct rcu_head rcu; + char str[0]; +}; + +struct ip_set_comment { + struct ip_set_comment_rcu __rcu *c; +}; + +struct ip_set_skbinfo { + u32 skbmark; + u32 skbmarkmask; + u32 skbprio; + u16 skbqueue; + u16 __pad; +}; + +struct ip_set_ext { + struct ip_set_skbinfo skbinfo; + u64 packets; + u64 bytes; + char *comment; + u32 timeout; + u8 packets_op; + u8 bytes_op; +}; + +struct ip_set; + +#define ext_timeout(e, s) \ +((unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT])) +#define ext_counter(e, s) \ +((struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER])) +#define ext_comment(e, s) \ +((struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT])) +#define ext_skbinfo(e, s) \ +((struct ip_set_skbinfo *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_SKBINFO])) + +typedef int (*ipset_adtfn)(struct ip_set *set, void *value, + const struct ip_set_ext *ext, + struct ip_set_ext *mext, u32 cmdflags); + +/* Kernel API function options */ +struct ip_set_adt_opt { + u8 family; /* Actual protocol family */ + u8 dim; /* Dimension of match/target */ + u8 flags; /* Direction and negation flags */ + u32 cmdflags; /* Command-like flags */ + struct ip_set_ext ext; /* Extensions */ +}; + +/* Set type, variant-specific part */ +struct ip_set_type_variant { + /* Kernelspace: test/add/del entries + * returns negative error code, + * zero for no match/success to add/delete + * positive for matching element */ + int (*kadt)(struct ip_set *set, const struct sk_buff *skb, + const struct xt_action_param *par, + enum ipset_adt adt, struct ip_set_adt_opt *opt); + + /* Userspace: test/add/del entries + * returns negative error code, + * zero for no match/success to add/delete + * positive for matching element */ + int (*uadt)(struct ip_set *set, struct nlattr *tb[], + enum ipset_adt adt, u32 *lineno, u32 flags, bool retried); + + /* Low level add/del/test functions */ + ipset_adtfn adt[IPSET_ADT_MAX]; + + /* When adding entries and set is full, try to resize the set */ + int (*resize)(struct ip_set *set, bool retried); + /* Destroy the set */ + void (*destroy)(struct ip_set *set); + /* Flush the elements */ + void (*flush)(struct ip_set *set); + /* Expire entries before listing */ + void (*expire)(struct ip_set *set); + /* List set header data */ + int (*head)(struct ip_set *set, struct sk_buff *skb); + /* List elements */ + int (*list)(const struct ip_set *set, struct sk_buff *skb, + struct netlink_callback *cb); + /* Keep listing private when resizing runs parallel */ + void (*uref)(struct ip_set *set, struct netlink_callback *cb, + bool start); + + /* Return true if "b" set is the same as "a" + * according to the create set parameters */ + bool (*same_set)(const struct ip_set *a, const struct ip_set *b); +}; + +/* The core set type structure */ +struct ip_set_type { + struct list_head list; + + /* Typename */ + char name[IPSET_MAXNAMELEN]; + /* Protocol version */ + u8 protocol; + /* Set type dimension */ + u8 dimension; + /* + * Supported family: may be NFPROTO_UNSPEC for both + * NFPROTO_IPV4/NFPROTO_IPV6. + */ + u8 family; + /* Type revisions */ + u8 revision_min, revision_max; + /* Set features to control swapping */ + u16 features; + + /* Create set */ + int (*create)(struct net *net, struct ip_set *set, + struct nlattr *tb[], u32 flags); + + /* Attribute policies */ + const struct nla_policy create_policy[IPSET_ATTR_CREATE_MAX + 1]; + const struct nla_policy adt_policy[IPSET_ATTR_ADT_MAX + 1]; + + /* Set this to THIS_MODULE if you are a module, otherwise NULL */ + struct module *me; +}; + +/* register and unregister set type */ +extern int ip_set_type_register(struct ip_set_type *set_type); +extern void ip_set_type_unregister(struct ip_set_type *set_type); + +/* A generic IP set */ +struct ip_set { + /* The name of the set */ + char name[IPSET_MAXNAMELEN]; + /* Lock protecting the set data */ + spinlock_t lock; + /* References to the set */ + u32 ref; + /* References to the set for netlink events like dump, + * ref can be swapped out by ip_set_swap + */ + u32 ref_netlink; + /* The core set type */ + struct ip_set_type *type; + /* The type variant doing the real job */ + const struct ip_set_type_variant *variant; + /* The actual INET family of the set */ + u8 family; + /* The type revision */ + u8 revision; + /* Extensions */ + u8 extensions; + /* Create flags */ + u8 flags; + /* Default timeout value, if enabled */ + u32 timeout; + /* Number of elements (vs timeout) */ + u32 elements; + /* Size of the dynamic extensions (vs timeout) */ + size_t ext_size; + /* Element data size */ + size_t dsize; + /* Offsets to extensions in elements */ + size_t offset[IPSET_EXT_ID_MAX]; + /* The type specific data */ + void *data; +}; + +static inline void +ip_set_ext_destroy(struct ip_set *set, void *data) +{ + /* Check that the extension is enabled for the set and + * call it's destroy function for its extension part in data. + */ + if (SET_WITH_COMMENT(set)) + ip_set_extensions[IPSET_EXT_ID_COMMENT].destroy( + set, ext_comment(data, set)); +} + +static inline int +ip_set_put_flags(struct sk_buff *skb, struct ip_set *set) +{ + u32 cadt_flags = 0; + + if (SET_WITH_TIMEOUT(set)) + if (unlikely(nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(set->timeout)))) + return -EMSGSIZE; + if (SET_WITH_COUNTER(set)) + cadt_flags |= IPSET_FLAG_WITH_COUNTERS; + if (SET_WITH_COMMENT(set)) + cadt_flags |= IPSET_FLAG_WITH_COMMENT; + if (SET_WITH_SKBINFO(set)) + cadt_flags |= IPSET_FLAG_WITH_SKBINFO; + if (SET_WITH_FORCEADD(set)) + cadt_flags |= IPSET_FLAG_WITH_FORCEADD; + + if (!cadt_flags) + return 0; + return nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(cadt_flags)); +} + +/* Netlink CB args */ +enum { + IPSET_CB_NET = 0, /* net namespace */ + IPSET_CB_DUMP, /* dump single set/all sets */ + IPSET_CB_INDEX, /* set index */ + IPSET_CB_PRIVATE, /* set private data */ + IPSET_CB_ARG0, /* type specific */ + IPSET_CB_ARG1, +}; + +/* register and unregister set references */ +extern ip_set_id_t ip_set_get_byname(struct net *net, + const char *name, struct ip_set **set); +extern void ip_set_put_byindex(struct net *net, ip_set_id_t index); +extern void ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name); +extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index); +extern void ip_set_nfnl_put(struct net *net, ip_set_id_t index); + +/* API for iptables set match, and SET target */ + +extern int ip_set_add(ip_set_id_t id, const struct sk_buff *skb, + const struct xt_action_param *par, + struct ip_set_adt_opt *opt); +extern int ip_set_del(ip_set_id_t id, const struct sk_buff *skb, + const struct xt_action_param *par, + struct ip_set_adt_opt *opt); +extern int ip_set_test(ip_set_id_t id, const struct sk_buff *skb, + const struct xt_action_param *par, + struct ip_set_adt_opt *opt); + +/* Utility functions */ +extern void *ip_set_alloc(size_t size); +extern void ip_set_free(void *members); +extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr); +extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr); +extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], + size_t len, size_t align); +extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], + struct ip_set_ext *ext); +extern int ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set, + const void *e, bool active); +extern bool ip_set_match_extensions(struct ip_set *set, + const struct ip_set_ext *ext, + struct ip_set_ext *mext, + u32 flags, void *data); + +static inline int +ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr) +{ + __be32 ip; + int ret = ip_set_get_ipaddr4(nla, &ip); + + if (ret) + return ret; + *ipaddr = ntohl(ip); + return 0; +} + +/* Ignore IPSET_ERR_EXIST errors if asked to do so? */ +static inline bool +ip_set_eexist(int ret, u32 flags) +{ + return ret == -IPSET_ERR_EXIST && (flags & IPSET_FLAG_EXIST); +} + +/* Match elements marked with nomatch */ +static inline bool +ip_set_enomatch(int ret, u32 flags, enum ipset_adt adt, struct ip_set *set) +{ + return adt == IPSET_TEST && + (set->type->features & IPSET_TYPE_NOMATCH) && + ((flags >> 16) & IPSET_FLAG_NOMATCH) && + (ret > 0 || ret == -ENOTEMPTY); +} + +/* Check the NLA_F_NET_BYTEORDER flag */ +static inline bool +ip_set_attr_netorder(struct nlattr *tb[], int type) +{ + return tb[type] && (tb[type]->nla_type & NLA_F_NET_BYTEORDER); +} + +static inline bool +ip_set_optattr_netorder(struct nlattr *tb[], int type) +{ + return !tb[type] || (tb[type]->nla_type & NLA_F_NET_BYTEORDER); +} + +/* Useful converters */ +static inline u32 +ip_set_get_h32(const struct nlattr *attr) +{ + return ntohl(nla_get_be32(attr)); +} + +static inline u16 +ip_set_get_h16(const struct nlattr *attr) +{ + return ntohs(nla_get_be16(attr)); +} + +#define ipset_nest_start(skb, attr) nla_nest_start(skb, attr | NLA_F_NESTED) +#define ipset_nest_end(skb, start) nla_nest_end(skb, start) + +static inline int nla_put_ipaddr4(struct sk_buff *skb, int type, __be32 ipaddr) +{ + struct nlattr *__nested = ipset_nest_start(skb, type); + int ret; + + if (!__nested) + return -EMSGSIZE; + ret = nla_put_in_addr(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr); + if (!ret) + ipset_nest_end(skb, __nested); + return ret; +} + +static inline int nla_put_ipaddr6(struct sk_buff *skb, int type, + const struct in6_addr *ipaddrptr) +{ + struct nlattr *__nested = ipset_nest_start(skb, type); + int ret; + + if (!__nested) + return -EMSGSIZE; + ret = nla_put_in6_addr(skb, IPSET_ATTR_IPADDR_IPV6, ipaddrptr); + if (!ret) + ipset_nest_end(skb, __nested); + return ret; +} + +/* Get address from skbuff */ +static inline __be32 +ip4addr(const struct sk_buff *skb, bool src) +{ + return src ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr; +} + +static inline void +ip4addrptr(const struct sk_buff *skb, bool src, __be32 *addr) +{ + *addr = src ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr; +} + +static inline void +ip6addrptr(const struct sk_buff *skb, bool src, struct in6_addr *addr) +{ + memcpy(addr, src ? &ipv6_hdr(skb)->saddr : &ipv6_hdr(skb)->daddr, + sizeof(*addr)); +} + +#include +#include +#include +#include + +#define IP_SET_INIT_KEXT(skb, opt, set) \ + { .bytes = (skb)->len, .packets = 1, \ + .timeout = ip_set_adt_opt_timeout(opt, set) } + +#define IP_SET_INIT_UEXT(set) \ + { .bytes = ULLONG_MAX, .packets = ULLONG_MAX, \ + .timeout = (set)->timeout } + +#define IPSET_CONCAT(a, b) a##b +#define IPSET_TOKEN(a, b) IPSET_CONCAT(a, b) + +#endif /*_IP_SET_H */ diff --git a/include/linux/netfilter/ipset/ip_set_bitmap.h b/include/linux/netfilter/ipset/ip_set_bitmap.h new file mode 100644 index 000000000..2dddbc6dc --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set_bitmap.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __IP_SET_BITMAP_H +#define __IP_SET_BITMAP_H + +#include + +#define IPSET_BITMAP_MAX_RANGE 0x0000FFFF + +enum { + IPSET_ADD_STORE_PLAIN_TIMEOUT = -1, + IPSET_ADD_FAILED = 1, + IPSET_ADD_START_STORED_TIMEOUT, +}; + +/* Common functions */ + +static inline u32 +range_to_mask(u32 from, u32 to, u8 *bits) +{ + u32 mask = 0xFFFFFFFE; + + *bits = 32; + while (--(*bits) > 0 && mask && (to & mask) != from) + mask <<= 1; + + return mask; +} + +#endif /* __IP_SET_BITMAP_H */ diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h new file mode 100644 index 000000000..70877f8de --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set_comment.h @@ -0,0 +1,76 @@ +#ifndef _IP_SET_COMMENT_H +#define _IP_SET_COMMENT_H + +/* Copyright (C) 2013 Oliver Smith + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifdef __KERNEL__ + +static inline char* +ip_set_comment_uget(struct nlattr *tb) +{ + return nla_data(tb); +} + +/* Called from uadd only, protected by the set spinlock. + * The kadt functions don't use the comment extensions in any way. + */ +static inline void +ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment, + const struct ip_set_ext *ext) +{ + struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1); + size_t len = ext->comment ? strlen(ext->comment) : 0; + + if (unlikely(c)) { + set->ext_size -= sizeof(*c) + strlen(c->str) + 1; + kfree_rcu(c, rcu); + rcu_assign_pointer(comment->c, NULL); + } + if (!len) + return; + if (unlikely(len > IPSET_MAX_COMMENT_SIZE)) + len = IPSET_MAX_COMMENT_SIZE; + c = kmalloc(sizeof(*c) + len + 1, GFP_ATOMIC); + if (unlikely(!c)) + return; + strlcpy(c->str, ext->comment, len + 1); + set->ext_size += sizeof(*c) + strlen(c->str) + 1; + rcu_assign_pointer(comment->c, c); +} + +/* Used only when dumping a set, protected by rcu_read_lock() */ +static inline int +ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment) +{ + struct ip_set_comment_rcu *c = rcu_dereference(comment->c); + + if (!c) + return 0; + return nla_put_string(skb, IPSET_ATTR_COMMENT, c->str); +} + +/* Called from uadd/udel, flush or the garbage collectors protected + * by the set spinlock. + * Called when the set is destroyed and when there can't be any user + * of the set data anymore. + */ +static inline void +ip_set_comment_free(struct ip_set *set, struct ip_set_comment *comment) +{ + struct ip_set_comment_rcu *c; + + c = rcu_dereference_protected(comment->c, 1); + if (unlikely(!c)) + return; + set->ext_size -= sizeof(*c) + strlen(c->str) + 1; + kfree_rcu(c, rcu); + rcu_assign_pointer(comment->c, NULL); +} + +#endif +#endif diff --git a/include/linux/netfilter/ipset/ip_set_counter.h b/include/linux/netfilter/ipset/ip_set_counter.h new file mode 100644 index 000000000..3d33a2c3f --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set_counter.h @@ -0,0 +1,88 @@ +#ifndef _IP_SET_COUNTER_H +#define _IP_SET_COUNTER_H + +/* Copyright (C) 2015 Jozsef Kadlecsik + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifdef __KERNEL__ + +static inline void +ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter) +{ + atomic64_add((long long)bytes, &(counter)->bytes); +} + +static inline void +ip_set_add_packets(u64 packets, struct ip_set_counter *counter) +{ + atomic64_add((long long)packets, &(counter)->packets); +} + +static inline u64 +ip_set_get_bytes(const struct ip_set_counter *counter) +{ + return (u64)atomic64_read(&(counter)->bytes); +} + +static inline u64 +ip_set_get_packets(const struct ip_set_counter *counter) +{ + return (u64)atomic64_read(&(counter)->packets); +} + +static inline bool +ip_set_match_counter(u64 counter, u64 match, u8 op) +{ + switch (op) { + case IPSET_COUNTER_NONE: + return true; + case IPSET_COUNTER_EQ: + return counter == match; + case IPSET_COUNTER_NE: + return counter != match; + case IPSET_COUNTER_LT: + return counter < match; + case IPSET_COUNTER_GT: + return counter > match; + } + return false; +} + +static inline void +ip_set_update_counter(struct ip_set_counter *counter, + const struct ip_set_ext *ext, u32 flags) +{ + if (ext->packets != ULLONG_MAX && + !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) { + ip_set_add_bytes(ext->bytes, counter); + ip_set_add_packets(ext->packets, counter); + } +} + +static inline bool +ip_set_put_counter(struct sk_buff *skb, const struct ip_set_counter *counter) +{ + return nla_put_net64(skb, IPSET_ATTR_BYTES, + cpu_to_be64(ip_set_get_bytes(counter)), + IPSET_ATTR_PAD) || + nla_put_net64(skb, IPSET_ATTR_PACKETS, + cpu_to_be64(ip_set_get_packets(counter)), + IPSET_ATTR_PAD); +} + +static inline void +ip_set_init_counter(struct ip_set_counter *counter, + const struct ip_set_ext *ext) +{ + if (ext->bytes != ULLONG_MAX) + atomic64_set(&(counter)->bytes, (long long)(ext->bytes)); + if (ext->packets != ULLONG_MAX) + atomic64_set(&(counter)->packets, (long long)(ext->packets)); +} + +#endif /* __KERNEL__ */ +#endif /* _IP_SET_COUNTER_H */ diff --git a/include/linux/netfilter/ipset/ip_set_getport.h b/include/linux/netfilter/ipset/ip_set_getport.h new file mode 100644 index 000000000..ac6a11d38 --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set_getport.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _IP_SET_GETPORT_H +#define _IP_SET_GETPORT_H + +extern bool ip_set_get_ip4_port(const struct sk_buff *skb, bool src, + __be16 *port, u8 *proto); + +#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) +extern bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src, + __be16 *port, u8 *proto); +#else +static inline bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src, + __be16 *port, u8 *proto) +{ + return false; +} +#endif + +extern bool ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src, + __be16 *port); + +static inline bool ip_set_proto_with_ports(u8 proto) +{ + switch (proto) { + case IPPROTO_TCP: + case IPPROTO_SCTP: + case IPPROTO_UDP: + case IPPROTO_UDPLITE: + return true; + } + return false; +} + +#endif /*_IP_SET_GETPORT_H*/ diff --git a/include/linux/netfilter/ipset/ip_set_hash.h b/include/linux/netfilter/ipset/ip_set_hash.h new file mode 100644 index 000000000..838abab67 --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set_hash.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __IP_SET_HASH_H +#define __IP_SET_HASH_H + +#include + + +#define IPSET_DEFAULT_HASHSIZE 1024 +#define IPSET_MIMINAL_HASHSIZE 64 +#define IPSET_DEFAULT_MAXELEM 65536 +#define IPSET_DEFAULT_PROBES 4 +#define IPSET_DEFAULT_RESIZE 100 + +#endif /* __IP_SET_HASH_H */ diff --git a/include/linux/netfilter/ipset/ip_set_list.h b/include/linux/netfilter/ipset/ip_set_list.h new file mode 100644 index 000000000..a61fe2a7e --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set_list.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __IP_SET_LIST_H +#define __IP_SET_LIST_H + +#include + + +#define IP_SET_LIST_DEFAULT_SIZE 8 +#define IP_SET_LIST_MIN_SIZE 4 +#define IP_SET_LIST_MAX_SIZE 65536 + +#endif /* __IP_SET_LIST_H */ diff --git a/include/linux/netfilter/ipset/ip_set_skbinfo.h b/include/linux/netfilter/ipset/ip_set_skbinfo.h new file mode 100644 index 000000000..29d7ef2bc --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set_skbinfo.h @@ -0,0 +1,46 @@ +#ifndef _IP_SET_SKBINFO_H +#define _IP_SET_SKBINFO_H + +/* Copyright (C) 2015 Jozsef Kadlecsik + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifdef __KERNEL__ + +static inline void +ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo, + const struct ip_set_ext *ext, + struct ip_set_ext *mext, u32 flags) +{ + mext->skbinfo = *skbinfo; +} + +static inline bool +ip_set_put_skbinfo(struct sk_buff *skb, const struct ip_set_skbinfo *skbinfo) +{ + /* Send nonzero parameters only */ + return ((skbinfo->skbmark || skbinfo->skbmarkmask) && + nla_put_net64(skb, IPSET_ATTR_SKBMARK, + cpu_to_be64((u64)skbinfo->skbmark << 32 | + skbinfo->skbmarkmask), + IPSET_ATTR_PAD)) || + (skbinfo->skbprio && + nla_put_net32(skb, IPSET_ATTR_SKBPRIO, + cpu_to_be32(skbinfo->skbprio))) || + (skbinfo->skbqueue && + nla_put_net16(skb, IPSET_ATTR_SKBQUEUE, + cpu_to_be16(skbinfo->skbqueue))); +} + +static inline void +ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo, + const struct ip_set_ext *ext) +{ + *skbinfo = ext->skbinfo; +} + +#endif /* __KERNEL__ */ +#endif /* _IP_SET_SKBINFO_H */ diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h new file mode 100644 index 000000000..8ce271e18 --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set_timeout.h @@ -0,0 +1,81 @@ +#ifndef _IP_SET_TIMEOUT_H +#define _IP_SET_TIMEOUT_H + +/* Copyright (C) 2003-2013 Jozsef Kadlecsik + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifdef __KERNEL__ + +/* How often should the gc be run by default */ +#define IPSET_GC_TIME (3 * 60) + +/* Timeout period depending on the timeout value of the given set */ +#define IPSET_GC_PERIOD(timeout) \ + ((timeout/3) ? min_t(u32, (timeout)/3, IPSET_GC_TIME) : 1) + +/* Entry is set with no timeout value */ +#define IPSET_ELEM_PERMANENT 0 + +/* Set is defined with timeout support: timeout value may be 0 */ +#define IPSET_NO_TIMEOUT UINT_MAX + +/* Max timeout value, see msecs_to_jiffies() in jiffies.h */ +#define IPSET_MAX_TIMEOUT (UINT_MAX >> 1)/MSEC_PER_SEC + +#define ip_set_adt_opt_timeout(opt, set) \ +((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (set)->timeout) + +static inline unsigned int +ip_set_timeout_uget(struct nlattr *tb) +{ + unsigned int timeout = ip_set_get_h32(tb); + + /* Normalize to fit into jiffies */ + if (timeout > IPSET_MAX_TIMEOUT) + timeout = IPSET_MAX_TIMEOUT; + + return timeout; +} + +static inline bool +ip_set_timeout_expired(const unsigned long *t) +{ + return *t != IPSET_ELEM_PERMANENT && time_is_before_jiffies(*t); +} + +static inline void +ip_set_timeout_set(unsigned long *timeout, u32 value) +{ + unsigned long t; + + if (!value) { + *timeout = IPSET_ELEM_PERMANENT; + return; + } + + t = msecs_to_jiffies(value * MSEC_PER_SEC) + jiffies; + if (t == IPSET_ELEM_PERMANENT) + /* Bingo! :-) */ + t--; + *timeout = t; +} + +static inline u32 +ip_set_timeout_get(const unsigned long *timeout) +{ + u32 t; + + if (*timeout == IPSET_ELEM_PERMANENT) + return 0; + + t = jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC; + /* Zero value in userspace means no timeout */ + return t == 0 ? 1 : t; +} + +#endif /* __KERNEL__ */ +#endif /* _IP_SET_TIMEOUT_H */ diff --git a/include/linux/netfilter/ipset/pfxlen.h b/include/linux/netfilter/ipset/pfxlen.h new file mode 100644 index 000000000..f59094e61 --- /dev/null +++ b/include/linux/netfilter/ipset/pfxlen.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _PFXLEN_H +#define _PFXLEN_H + +#include +#include +#include + +/* Prefixlen maps, by Jan Engelhardt */ +extern const union nf_inet_addr ip_set_netmask_map[]; +extern const union nf_inet_addr ip_set_hostmask_map[]; + +static inline __be32 +ip_set_netmask(u8 pfxlen) +{ + return ip_set_netmask_map[pfxlen].ip; +} + +static inline const __be32 * +ip_set_netmask6(u8 pfxlen) +{ + return &ip_set_netmask_map[pfxlen].ip6[0]; +} + +static inline u32 +ip_set_hostmask(u8 pfxlen) +{ + return (__force u32) ip_set_hostmask_map[pfxlen].ip; +} + +static inline const __be32 * +ip_set_hostmask6(u8 pfxlen) +{ + return &ip_set_hostmask_map[pfxlen].ip6[0]; +} + +extern u32 ip_set_range_to_cidr(u32 from, u32 to, u8 *cidr); + +#define ip_set_mask_from_to(from, to, cidr) \ +do { \ + from &= ip_set_hostmask(cidr); \ + to = from | ~ip_set_hostmask(cidr); \ +} while (0) + +static inline void +ip6_netmask(union nf_inet_addr *ip, u8 prefix) +{ + ip->ip6[0] &= ip_set_netmask6(prefix)[0]; + ip->ip6[1] &= ip_set_netmask6(prefix)[1]; + ip->ip6[2] &= ip_set_netmask6(prefix)[2]; + ip->ip6[3] &= ip_set_netmask6(prefix)[3]; +} + +#endif /*_PFXLEN_H */ diff --git a/include/linux/netfilter/nf_conntrack_amanda.h b/include/linux/netfilter/nf_conntrack_amanda.h new file mode 100644 index 000000000..34345e543 --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_amanda.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _NF_CONNTRACK_AMANDA_H +#define _NF_CONNTRACK_AMANDA_H +/* AMANDA tracking. */ + +extern unsigned int (*nf_nat_amanda_hook)(struct sk_buff *skb, + enum ip_conntrack_info ctinfo, + unsigned int protoff, + unsigned int matchoff, + unsigned int matchlen, + struct nf_conntrack_expect *exp); +#endif /* _NF_CONNTRACK_AMANDA_H */ diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h new file mode 100644 index 000000000..03097fa70 --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_common.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _NF_CONNTRACK_COMMON_H +#define _NF_CONNTRACK_COMMON_H + +#include + +struct ip_conntrack_stat { + unsigned int found; + unsigned int invalid; + unsigned int ignore; + unsigned int insert; + unsigned int insert_failed; + unsigned int drop; + unsigned int early_drop; + unsigned int error; + unsigned int expect_new; + unsigned int expect_create; + unsigned int expect_delete; + unsigned int search_restart; +}; + +/* call to create an explicit dependency on nf_conntrack. */ +void need_conntrack(void); + +#endif /* _NF_CONNTRACK_COMMON_H */ diff --git a/include/linux/netfilter/nf_conntrack_dccp.h b/include/linux/netfilter/nf_conntrack_dccp.h new file mode 100644 index 000000000..ace0f952d --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_dccp.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _NF_CONNTRACK_DCCP_H +#define _NF_CONNTRACK_DCCP_H + +/* Exposed to userspace over nfnetlink */ +enum ct_dccp_states { + CT_DCCP_NONE, + CT_DCCP_REQUEST, + CT_DCCP_RESPOND, + CT_DCCP_PARTOPEN, + CT_DCCP_OPEN, + CT_DCCP_CLOSEREQ, + CT_DCCP_CLOSING, + CT_DCCP_TIMEWAIT, + CT_DCCP_IGNORE, + CT_DCCP_INVALID, + __CT_DCCP_MAX +}; +#define CT_DCCP_MAX (__CT_DCCP_MAX - 1) + +enum ct_dccp_roles { + CT_DCCP_ROLE_CLIENT, + CT_DCCP_ROLE_SERVER, + __CT_DCCP_ROLE_MAX +}; +#define CT_DCCP_ROLE_MAX (__CT_DCCP_ROLE_MAX - 1) + +#ifdef __KERNEL__ +#include + +struct nf_ct_dccp { + u_int8_t role[IP_CT_DIR_MAX]; + u_int8_t state; + u_int8_t last_pkt; + u_int8_t last_dir; + u_int64_t handshake_seq; +}; + +#endif /* __KERNEL__ */ + +#endif /* _NF_CONNTRACK_DCCP_H */ diff --git a/include/linux/netfilter/nf_conntrack_ftp.h b/include/linux/netfilter/nf_conntrack_ftp.h new file mode 100644 index 000000000..73a296dfd --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_ftp.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _NF_CONNTRACK_FTP_H +#define _NF_CONNTRACK_FTP_H + +#include + + +#define FTP_PORT 21 + +#define NF_CT_FTP_SEQ_PICKUP (1 << 0) + +#define NUM_SEQ_TO_REMEMBER 2 +/* This structure exists only once per master */ +struct nf_ct_ftp_master { + /* Valid seq positions for cmd matching after newline */ + u_int32_t seq_aft_nl[IP_CT_DIR_MAX][NUM_SEQ_TO_REMEMBER]; + /* 0 means seq_match_aft_nl not set */ + u_int16_t seq_aft_nl_num[IP_CT_DIR_MAX]; + /* pickup sequence tracking, useful for conntrackd */ + u_int16_t flags[IP_CT_DIR_MAX]; +}; + +struct nf_conntrack_expect; + +/* For NAT to hook in when we find a packet which describes what other + * connection we should expect. */ +extern unsigned int (*nf_nat_ftp_hook)(struct sk_buff *skb, + enum ip_conntrack_info ctinfo, + enum nf_ct_ftp_type type, + unsigned int protoff, + unsigned int matchoff, + unsigned int matchlen, + struct nf_conntrack_expect *exp); +#endif /* _NF_CONNTRACK_FTP_H */ diff --git a/include/linux/netfilter/nf_conntrack_h323.h b/include/linux/netfilter/nf_conntrack_h323.h new file mode 100644 index 000000000..f76ed373a --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_h323.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _NF_CONNTRACK_H323_H +#define _NF_CONNTRACK_H323_H + +#ifdef __KERNEL__ + +#include + +#define RAS_PORT 1719 +#define Q931_PORT 1720 +#define H323_RTP_CHANNEL_MAX 4 /* Audio, video, FAX and other */ + +/* This structure exists only once per master */ +struct nf_ct_h323_master { + + /* Original and NATed Q.931 or H.245 signal ports */ + __be16 sig_port[IP_CT_DIR_MAX]; + + /* Original and NATed RTP ports */ + __be16 rtp_port[H323_RTP_CHANNEL_MAX][IP_CT_DIR_MAX]; + + union { + /* RAS connection timeout */ + u_int32_t timeout; + + /* Next TPKT length (for separate TPKT header and data) */ + u_int16_t tpkt_len[IP_CT_DIR_MAX]; + }; +}; + +struct nf_conn; + +int get_h225_addr(struct nf_conn *ct, unsigned char *data, + TransportAddress *taddr, union nf_inet_addr *addr, + __be16 *port); +void nf_conntrack_h245_expect(struct nf_conn *new, + struct nf_conntrack_expect *this); +void nf_conntrack_q931_expect(struct nf_conn *new, + struct nf_conntrack_expect *this); +extern int (*set_h245_addr_hook) (struct sk_buff *skb, unsigned int protoff, + unsigned char **data, int dataoff, + H245_TransportAddress *taddr, + union nf_inet_addr *addr, + __be16 port); +extern int (*set_h225_addr_hook) (struct sk_buff *skb, unsigned int protoff, + unsigned char **data, int dataoff, + TransportAddress *taddr, + union nf_inet_addr *addr, + __be16 port); +extern int (*set_sig_addr_hook) (struct sk_buff *skb, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned int protoff, unsigned char **data, + TransportAddress *taddr, int count); +extern int (*set_ras_addr_hook) (struct sk_buff *skb, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned int protoff, unsigned char **data, + TransportAddress *taddr, int count); +extern int (*nat_rtp_rtcp_hook) (struct sk_buff *skb, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned int protoff, unsigned char **data, + int dataoff, + H245_TransportAddress *taddr, + __be16 port, __be16 rtp_port, + struct nf_conntrack_expect *rtp_exp, + struct nf_conntrack_expect *rtcp_exp); +extern int (*nat_t120_hook) (struct sk_buff *skb, struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned int protoff, + unsigned char **data, int dataoff, + H245_TransportAddress *taddr, __be16 port, + struct nf_conntrack_expect *exp); +extern int (*nat_h245_hook) (struct sk_buff *skb, struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned int protoff, + unsigned char **data, int dataoff, + TransportAddress *taddr, __be16 port, + struct nf_conntrack_expect *exp); +extern int (*nat_callforwarding_hook) (struct sk_buff *skb, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned int protoff, + unsigned char **data, int dataoff, + TransportAddress *taddr, + __be16 port, + struct nf_conntrack_expect *exp); +extern int (*nat_q931_hook) (struct sk_buff *skb, struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned int protoff, + unsigned char **data, TransportAddress *taddr, + int idx, __be16 port, + struct nf_conntrack_expect *exp); + +#endif + +#endif diff --git a/include/linux/netfilter/nf_conntrack_h323_asn1.h b/include/linux/netfilter/nf_conntrack_h323_asn1.h new file mode 100644 index 000000000..3176a277e --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_h323_asn1.h @@ -0,0 +1,98 @@ +/**************************************************************************** + * ip_conntrack_h323_asn1.h - BER and PER decoding library for H.323 + * conntrack/NAT module. + * + * Copyright (c) 2006 by Jing Min Zhao + * + * This source code is licensed under General Public License version 2. + * + * + * This library is based on H.225 version 4, H.235 version 2 and H.245 + * version 7. It is extremely optimized to decode only the absolutely + * necessary objects in a signal for Linux kernel NAT module use, so don't + * expect it to be a full ASN.1 library. + * + * Features: + * + * 1. Small. The total size of code plus data is less than 20 KB (IA32). + * 2. Fast. Decoding Netmeeting's Setup signal 1 million times on a PIII 866 + * takes only 3.9 seconds. + * 3. No memory allocation. It uses a static object. No need to initialize or + * cleanup. + * 4. Thread safe. + * 5. Support embedded architectures that has no misaligned memory access + * support. + * + * Limitations: + * + * 1. At most 30 faststart entries. Actually this is limited by ethernet's MTU. + * If a Setup signal contains more than 30 faststart, the packet size will + * very likely exceed the MTU size, then the TPKT will be fragmented. I + * don't know how to handle this in a Netfilter module. Anybody can help? + * Although I think 30 is enough for most of the cases. + * 2. IPv4 addresses only. + * + ****************************************************************************/ + +#ifndef _NF_CONNTRACK_HELPER_H323_ASN1_H_ +#define _NF_CONNTRACK_HELPER_H323_ASN1_H_ + +/***************************************************************************** + * H.323 Types + ****************************************************************************/ +#include + +typedef struct { + enum { + Q931_NationalEscape = 0x00, + Q931_Alerting = 0x01, + Q931_CallProceeding = 0x02, + Q931_Connect = 0x07, + Q931_ConnectAck = 0x0F, + Q931_Progress = 0x03, + Q931_Setup = 0x05, + Q931_SetupAck = 0x0D, + Q931_Resume = 0x26, + Q931_ResumeAck = 0x2E, + Q931_ResumeReject = 0x22, + Q931_Suspend = 0x25, + Q931_SuspendAck = 0x2D, + Q931_SuspendReject = 0x21, + Q931_UserInformation = 0x20, + Q931_Disconnect = 0x45, + Q931_Release = 0x4D, + Q931_ReleaseComplete = 0x5A, + Q931_Restart = 0x46, + Q931_RestartAck = 0x4E, + Q931_Segment = 0x60, + Q931_CongestionCtrl = 0x79, + Q931_Information = 0x7B, + Q931_Notify = 0x6E, + Q931_Status = 0x7D, + Q931_StatusEnquiry = 0x75, + Q931_Facility = 0x62 + } MessageType; + H323_UserInformation UUIE; +} Q931; + +/***************************************************************************** + * Decode Functions Return Codes + ****************************************************************************/ + +#define H323_ERROR_NONE 0 /* Decoded successfully */ +#define H323_ERROR_STOP 1 /* Decoding stopped, not really an error */ +#define H323_ERROR_BOUND -1 +#define H323_ERROR_RANGE -2 + + +/***************************************************************************** + * Decode Functions + ****************************************************************************/ + +int DecodeRasMessage(unsigned char *buf, size_t sz, RasMessage * ras); +int DecodeQ931(unsigned char *buf, size_t sz, Q931 * q931); +int DecodeMultimediaSystemControlMessage(unsigned char *buf, size_t sz, + MultimediaSystemControlMessage * + mscm); + +#endif diff --git a/include/linux/netfilter/nf_conntrack_h323_types.h b/include/linux/netfilter/nf_conntrack_h323_types.h new file mode 100644 index 000000000..b0821f45f --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_h323_types.h @@ -0,0 +1,934 @@ +/* Generated by Jing Min Zhao's ASN.1 parser, May 16 2007 + * + * Copyright (c) 2006 Jing Min Zhao + * + * This source code is licensed under General Public License version 2. + */ + +typedef struct TransportAddress_ipAddress { /* SEQUENCE */ + int options; /* No use */ + unsigned int ip; +} TransportAddress_ipAddress; + +typedef struct TransportAddress_ip6Address { /* SEQUENCE */ + int options; /* No use */ + unsigned int ip; +} TransportAddress_ip6Address; + +typedef struct TransportAddress { /* CHOICE */ + enum { + eTransportAddress_ipAddress, + eTransportAddress_ipSourceRoute, + eTransportAddress_ipxAddress, + eTransportAddress_ip6Address, + eTransportAddress_netBios, + eTransportAddress_nsap, + eTransportAddress_nonStandardAddress, + } choice; + union { + TransportAddress_ipAddress ipAddress; + TransportAddress_ip6Address ip6Address; + }; +} TransportAddress; + +typedef struct DataProtocolCapability { /* CHOICE */ + enum { + eDataProtocolCapability_nonStandard, + eDataProtocolCapability_v14buffered, + eDataProtocolCapability_v42lapm, + eDataProtocolCapability_hdlcFrameTunnelling, + eDataProtocolCapability_h310SeparateVCStack, + eDataProtocolCapability_h310SingleVCStack, + eDataProtocolCapability_transparent, + eDataProtocolCapability_segmentationAndReassembly, + eDataProtocolCapability_hdlcFrameTunnelingwSAR, + eDataProtocolCapability_v120, + eDataProtocolCapability_separateLANStack, + eDataProtocolCapability_v76wCompression, + eDataProtocolCapability_tcp, + eDataProtocolCapability_udp, + } choice; +} DataProtocolCapability; + +typedef struct DataApplicationCapability_application { /* CHOICE */ + enum { + eDataApplicationCapability_application_nonStandard, + eDataApplicationCapability_application_t120, + eDataApplicationCapability_application_dsm_cc, + eDataApplicationCapability_application_userData, + eDataApplicationCapability_application_t84, + eDataApplicationCapability_application_t434, + eDataApplicationCapability_application_h224, + eDataApplicationCapability_application_nlpid, + eDataApplicationCapability_application_dsvdControl, + eDataApplicationCapability_application_h222DataPartitioning, + eDataApplicationCapability_application_t30fax, + eDataApplicationCapability_application_t140, + eDataApplicationCapability_application_t38fax, + eDataApplicationCapability_application_genericDataCapability, + } choice; + union { + DataProtocolCapability t120; + }; +} DataApplicationCapability_application; + +typedef struct DataApplicationCapability { /* SEQUENCE */ + int options; /* No use */ + DataApplicationCapability_application application; +} DataApplicationCapability; + +typedef struct DataType { /* CHOICE */ + enum { + eDataType_nonStandard, + eDataType_nullData, + eDataType_videoData, + eDataType_audioData, + eDataType_data, + eDataType_encryptionData, + eDataType_h235Control, + eDataType_h235Media, + eDataType_multiplexedStream, + } choice; + union { + DataApplicationCapability data; + }; +} DataType; + +typedef struct UnicastAddress_iPAddress { /* SEQUENCE */ + int options; /* No use */ + unsigned int network; +} UnicastAddress_iPAddress; + +typedef struct UnicastAddress_iP6Address { /* SEQUENCE */ + int options; /* No use */ + unsigned int network; +} UnicastAddress_iP6Address; + +typedef struct UnicastAddress { /* CHOICE */ + enum { + eUnicastAddress_iPAddress, + eUnicastAddress_iPXAddress, + eUnicastAddress_iP6Address, + eUnicastAddress_netBios, + eUnicastAddress_iPSourceRouteAddress, + eUnicastAddress_nsap, + eUnicastAddress_nonStandardAddress, + } choice; + union { + UnicastAddress_iPAddress iPAddress; + UnicastAddress_iP6Address iP6Address; + }; +} UnicastAddress; + +typedef struct H245_TransportAddress { /* CHOICE */ + enum { + eH245_TransportAddress_unicastAddress, + eH245_TransportAddress_multicastAddress, + } choice; + union { + UnicastAddress unicastAddress; + }; +} H245_TransportAddress; + +typedef struct H2250LogicalChannelParameters { /* SEQUENCE */ + enum { + eH2250LogicalChannelParameters_nonStandard = (1 << 31), + eH2250LogicalChannelParameters_associatedSessionID = + (1 << 30), + eH2250LogicalChannelParameters_mediaChannel = (1 << 29), + eH2250LogicalChannelParameters_mediaGuaranteedDelivery = + (1 << 28), + eH2250LogicalChannelParameters_mediaControlChannel = + (1 << 27), + eH2250LogicalChannelParameters_mediaControlGuaranteedDelivery + = (1 << 26), + eH2250LogicalChannelParameters_silenceSuppression = (1 << 25), + eH2250LogicalChannelParameters_destination = (1 << 24), + eH2250LogicalChannelParameters_dynamicRTPPayloadType = + (1 << 23), + eH2250LogicalChannelParameters_mediaPacketization = (1 << 22), + eH2250LogicalChannelParameters_transportCapability = + (1 << 21), + eH2250LogicalChannelParameters_redundancyEncoding = (1 << 20), + eH2250LogicalChannelParameters_source = (1 << 19), + } options; + H245_TransportAddress mediaChannel; + H245_TransportAddress mediaControlChannel; +} H2250LogicalChannelParameters; + +typedef struct OpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters { /* CHOICE */ + enum { + eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h222LogicalChannelParameters, + eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h223LogicalChannelParameters, + eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_v76LogicalChannelParameters, + eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters, + eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_none, + } choice; + union { + H2250LogicalChannelParameters h2250LogicalChannelParameters; + }; +} OpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters; + +typedef struct OpenLogicalChannel_forwardLogicalChannelParameters { /* SEQUENCE */ + enum { + eOpenLogicalChannel_forwardLogicalChannelParameters_portNumber + = (1 << 31), + eOpenLogicalChannel_forwardLogicalChannelParameters_forwardLogicalChannelDependency + = (1 << 30), + eOpenLogicalChannel_forwardLogicalChannelParameters_replacementFor + = (1 << 29), + } options; + DataType dataType; + OpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters + multiplexParameters; +} OpenLogicalChannel_forwardLogicalChannelParameters; + +typedef struct OpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters { /* CHOICE */ + enum { + eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_h223LogicalChannelParameters, + eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_v76LogicalChannelParameters, + eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters, + } choice; + union { + H2250LogicalChannelParameters h2250LogicalChannelParameters; + }; +} OpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters; + +typedef struct OpenLogicalChannel_reverseLogicalChannelParameters { /* SEQUENCE */ + enum { + eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters + = (1 << 31), + eOpenLogicalChannel_reverseLogicalChannelParameters_reverseLogicalChannelDependency + = (1 << 30), + eOpenLogicalChannel_reverseLogicalChannelParameters_replacementFor + = (1 << 29), + } options; + OpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters + multiplexParameters; +} OpenLogicalChannel_reverseLogicalChannelParameters; + +typedef struct NetworkAccessParameters_networkAddress { /* CHOICE */ + enum { + eNetworkAccessParameters_networkAddress_q2931Address, + eNetworkAccessParameters_networkAddress_e164Address, + eNetworkAccessParameters_networkAddress_localAreaAddress, + } choice; + union { + H245_TransportAddress localAreaAddress; + }; +} NetworkAccessParameters_networkAddress; + +typedef struct NetworkAccessParameters { /* SEQUENCE */ + enum { + eNetworkAccessParameters_distribution = (1 << 31), + eNetworkAccessParameters_externalReference = (1 << 30), + eNetworkAccessParameters_t120SetupProcedure = (1 << 29), + } options; + NetworkAccessParameters_networkAddress networkAddress; +} NetworkAccessParameters; + +typedef struct OpenLogicalChannel { /* SEQUENCE */ + enum { + eOpenLogicalChannel_reverseLogicalChannelParameters = + (1 << 31), + eOpenLogicalChannel_separateStack = (1 << 30), + eOpenLogicalChannel_encryptionSync = (1 << 29), + } options; + OpenLogicalChannel_forwardLogicalChannelParameters + forwardLogicalChannelParameters; + OpenLogicalChannel_reverseLogicalChannelParameters + reverseLogicalChannelParameters; + NetworkAccessParameters separateStack; +} OpenLogicalChannel; + +typedef struct Setup_UUIE_fastStart { /* SEQUENCE OF */ + int count; + OpenLogicalChannel item[30]; +} Setup_UUIE_fastStart; + +typedef struct Setup_UUIE { /* SEQUENCE */ + enum { + eSetup_UUIE_h245Address = (1 << 31), + eSetup_UUIE_sourceAddress = (1 << 30), + eSetup_UUIE_destinationAddress = (1 << 29), + eSetup_UUIE_destCallSignalAddress = (1 << 28), + eSetup_UUIE_destExtraCallInfo = (1 << 27), + eSetup_UUIE_destExtraCRV = (1 << 26), + eSetup_UUIE_callServices = (1 << 25), + eSetup_UUIE_sourceCallSignalAddress = (1 << 24), + eSetup_UUIE_remoteExtensionAddress = (1 << 23), + eSetup_UUIE_callIdentifier = (1 << 22), + eSetup_UUIE_h245SecurityCapability = (1 << 21), + eSetup_UUIE_tokens = (1 << 20), + eSetup_UUIE_cryptoTokens = (1 << 19), + eSetup_UUIE_fastStart = (1 << 18), + eSetup_UUIE_mediaWaitForConnect = (1 << 17), + eSetup_UUIE_canOverlapSend = (1 << 16), + eSetup_UUIE_endpointIdentifier = (1 << 15), + eSetup_UUIE_multipleCalls = (1 << 14), + eSetup_UUIE_maintainConnection = (1 << 13), + eSetup_UUIE_connectionParameters = (1 << 12), + eSetup_UUIE_language = (1 << 11), + eSetup_UUIE_presentationIndicator = (1 << 10), + eSetup_UUIE_screeningIndicator = (1 << 9), + eSetup_UUIE_serviceControl = (1 << 8), + eSetup_UUIE_symmetricOperationRequired = (1 << 7), + eSetup_UUIE_capacity = (1 << 6), + eSetup_UUIE_circuitInfo = (1 << 5), + eSetup_UUIE_desiredProtocols = (1 << 4), + eSetup_UUIE_neededFeatures = (1 << 3), + eSetup_UUIE_desiredFeatures = (1 << 2), + eSetup_UUIE_supportedFeatures = (1 << 1), + eSetup_UUIE_parallelH245Control = (1 << 0), + } options; + TransportAddress h245Address; + TransportAddress destCallSignalAddress; + TransportAddress sourceCallSignalAddress; + Setup_UUIE_fastStart fastStart; +} Setup_UUIE; + +typedef struct CallProceeding_UUIE_fastStart { /* SEQUENCE OF */ + int count; + OpenLogicalChannel item[30]; +} CallProceeding_UUIE_fastStart; + +typedef struct CallProceeding_UUIE { /* SEQUENCE */ + enum { + eCallProceeding_UUIE_h245Address = (1 << 31), + eCallProceeding_UUIE_callIdentifier = (1 << 30), + eCallProceeding_UUIE_h245SecurityMode = (1 << 29), + eCallProceeding_UUIE_tokens = (1 << 28), + eCallProceeding_UUIE_cryptoTokens = (1 << 27), + eCallProceeding_UUIE_fastStart = (1 << 26), + eCallProceeding_UUIE_multipleCalls = (1 << 25), + eCallProceeding_UUIE_maintainConnection = (1 << 24), + eCallProceeding_UUIE_fastConnectRefused = (1 << 23), + eCallProceeding_UUIE_featureSet = (1 << 22), + } options; + TransportAddress h245Address; + CallProceeding_UUIE_fastStart fastStart; +} CallProceeding_UUIE; + +typedef struct Connect_UUIE_fastStart { /* SEQUENCE OF */ + int count; + OpenLogicalChannel item[30]; +} Connect_UUIE_fastStart; + +typedef struct Connect_UUIE { /* SEQUENCE */ + enum { + eConnect_UUIE_h245Address = (1 << 31), + eConnect_UUIE_callIdentifier = (1 << 30), + eConnect_UUIE_h245SecurityMode = (1 << 29), + eConnect_UUIE_tokens = (1 << 28), + eConnect_UUIE_cryptoTokens = (1 << 27), + eConnect_UUIE_fastStart = (1 << 26), + eConnect_UUIE_multipleCalls = (1 << 25), + eConnect_UUIE_maintainConnection = (1 << 24), + eConnect_UUIE_language = (1 << 23), + eConnect_UUIE_connectedAddress = (1 << 22), + eConnect_UUIE_presentationIndicator = (1 << 21), + eConnect_UUIE_screeningIndicator = (1 << 20), + eConnect_UUIE_fastConnectRefused = (1 << 19), + eConnect_UUIE_serviceControl = (1 << 18), + eConnect_UUIE_capacity = (1 << 17), + eConnect_UUIE_featureSet = (1 << 16), + } options; + TransportAddress h245Address; + Connect_UUIE_fastStart fastStart; +} Connect_UUIE; + +typedef struct Alerting_UUIE_fastStart { /* SEQUENCE OF */ + int count; + OpenLogicalChannel item[30]; +} Alerting_UUIE_fastStart; + +typedef struct Alerting_UUIE { /* SEQUENCE */ + enum { + eAlerting_UUIE_h245Address = (1 << 31), + eAlerting_UUIE_callIdentifier = (1 << 30), + eAlerting_UUIE_h245SecurityMode = (1 << 29), + eAlerting_UUIE_tokens = (1 << 28), + eAlerting_UUIE_cryptoTokens = (1 << 27), + eAlerting_UUIE_fastStart = (1 << 26), + eAlerting_UUIE_multipleCalls = (1 << 25), + eAlerting_UUIE_maintainConnection = (1 << 24), + eAlerting_UUIE_alertingAddress = (1 << 23), + eAlerting_UUIE_presentationIndicator = (1 << 22), + eAlerting_UUIE_screeningIndicator = (1 << 21), + eAlerting_UUIE_fastConnectRefused = (1 << 20), + eAlerting_UUIE_serviceControl = (1 << 19), + eAlerting_UUIE_capacity = (1 << 18), + eAlerting_UUIE_featureSet = (1 << 17), + } options; + TransportAddress h245Address; + Alerting_UUIE_fastStart fastStart; +} Alerting_UUIE; + +typedef struct FacilityReason { /* CHOICE */ + enum { + eFacilityReason_routeCallToGatekeeper, + eFacilityReason_callForwarded, + eFacilityReason_routeCallToMC, + eFacilityReason_undefinedReason, + eFacilityReason_conferenceListChoice, + eFacilityReason_startH245, + eFacilityReason_noH245, + eFacilityReason_newTokens, + eFacilityReason_featureSetUpdate, + eFacilityReason_forwardedElements, + eFacilityReason_transportedInformation, + } choice; +} FacilityReason; + +typedef struct Facility_UUIE_fastStart { /* SEQUENCE OF */ + int count; + OpenLogicalChannel item[30]; +} Facility_UUIE_fastStart; + +typedef struct Facility_UUIE { /* SEQUENCE */ + enum { + eFacility_UUIE_alternativeAddress = (1 << 31), + eFacility_UUIE_alternativeAliasAddress = (1 << 30), + eFacility_UUIE_conferenceID = (1 << 29), + eFacility_UUIE_callIdentifier = (1 << 28), + eFacility_UUIE_destExtraCallInfo = (1 << 27), + eFacility_UUIE_remoteExtensionAddress = (1 << 26), + eFacility_UUIE_tokens = (1 << 25), + eFacility_UUIE_cryptoTokens = (1 << 24), + eFacility_UUIE_conferences = (1 << 23), + eFacility_UUIE_h245Address = (1 << 22), + eFacility_UUIE_fastStart = (1 << 21), + eFacility_UUIE_multipleCalls = (1 << 20), + eFacility_UUIE_maintainConnection = (1 << 19), + eFacility_UUIE_fastConnectRefused = (1 << 18), + eFacility_UUIE_serviceControl = (1 << 17), + eFacility_UUIE_circuitInfo = (1 << 16), + eFacility_UUIE_featureSet = (1 << 15), + eFacility_UUIE_destinationInfo = (1 << 14), + eFacility_UUIE_h245SecurityMode = (1 << 13), + } options; + TransportAddress alternativeAddress; + FacilityReason reason; + TransportAddress h245Address; + Facility_UUIE_fastStart fastStart; +} Facility_UUIE; + +typedef struct Progress_UUIE_fastStart { /* SEQUENCE OF */ + int count; + OpenLogicalChannel item[30]; +} Progress_UUIE_fastStart; + +typedef struct Progress_UUIE { /* SEQUENCE */ + enum { + eProgress_UUIE_h245Address = (1 << 31), + eProgress_UUIE_h245SecurityMode = (1 << 30), + eProgress_UUIE_tokens = (1 << 29), + eProgress_UUIE_cryptoTokens = (1 << 28), + eProgress_UUIE_fastStart = (1 << 27), + eProgress_UUIE_multipleCalls = (1 << 26), + eProgress_UUIE_maintainConnection = (1 << 25), + eProgress_UUIE_fastConnectRefused = (1 << 24), + } options; + TransportAddress h245Address; + Progress_UUIE_fastStart fastStart; +} Progress_UUIE; + +typedef struct H323_UU_PDU_h323_message_body { /* CHOICE */ + enum { + eH323_UU_PDU_h323_message_body_setup, + eH323_UU_PDU_h323_message_body_callProceeding, + eH323_UU_PDU_h323_message_body_connect, + eH323_UU_PDU_h323_message_body_alerting, + eH323_UU_PDU_h323_message_body_information, + eH323_UU_PDU_h323_message_body_releaseComplete, + eH323_UU_PDU_h323_message_body_facility, + eH323_UU_PDU_h323_message_body_progress, + eH323_UU_PDU_h323_message_body_empty, + eH323_UU_PDU_h323_message_body_status, + eH323_UU_PDU_h323_message_body_statusInquiry, + eH323_UU_PDU_h323_message_body_setupAcknowledge, + eH323_UU_PDU_h323_message_body_notify, + } choice; + union { + Setup_UUIE setup; + CallProceeding_UUIE callProceeding; + Connect_UUIE connect; + Alerting_UUIE alerting; + Facility_UUIE facility; + Progress_UUIE progress; + }; +} H323_UU_PDU_h323_message_body; + +typedef struct RequestMessage { /* CHOICE */ + enum { + eRequestMessage_nonStandard, + eRequestMessage_masterSlaveDetermination, + eRequestMessage_terminalCapabilitySet, + eRequestMessage_openLogicalChannel, + eRequestMessage_closeLogicalChannel, + eRequestMessage_requestChannelClose, + eRequestMessage_multiplexEntrySend, + eRequestMessage_requestMultiplexEntry, + eRequestMessage_requestMode, + eRequestMessage_roundTripDelayRequest, + eRequestMessage_maintenanceLoopRequest, + eRequestMessage_communicationModeRequest, + eRequestMessage_conferenceRequest, + eRequestMessage_multilinkRequest, + eRequestMessage_logicalChannelRateRequest, + } choice; + union { + OpenLogicalChannel openLogicalChannel; + }; +} RequestMessage; + +typedef struct OpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters { /* CHOICE */ + enum { + eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters_h222LogicalChannelParameters, + eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters, + } choice; + union { + H2250LogicalChannelParameters h2250LogicalChannelParameters; + }; +} OpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters; + +typedef struct OpenLogicalChannelAck_reverseLogicalChannelParameters { /* SEQUENCE */ + enum { + eOpenLogicalChannelAck_reverseLogicalChannelParameters_portNumber + = (1 << 31), + eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters + = (1 << 30), + eOpenLogicalChannelAck_reverseLogicalChannelParameters_replacementFor + = (1 << 29), + } options; + OpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters + multiplexParameters; +} OpenLogicalChannelAck_reverseLogicalChannelParameters; + +typedef struct H2250LogicalChannelAckParameters { /* SEQUENCE */ + enum { + eH2250LogicalChannelAckParameters_nonStandard = (1 << 31), + eH2250LogicalChannelAckParameters_sessionID = (1 << 30), + eH2250LogicalChannelAckParameters_mediaChannel = (1 << 29), + eH2250LogicalChannelAckParameters_mediaControlChannel = + (1 << 28), + eH2250LogicalChannelAckParameters_dynamicRTPPayloadType = + (1 << 27), + eH2250LogicalChannelAckParameters_flowControlToZero = + (1 << 26), + eH2250LogicalChannelAckParameters_portNumber = (1 << 25), + } options; + H245_TransportAddress mediaChannel; + H245_TransportAddress mediaControlChannel; +} H2250LogicalChannelAckParameters; + +typedef struct OpenLogicalChannelAck_forwardMultiplexAckParameters { /* CHOICE */ + enum { + eOpenLogicalChannelAck_forwardMultiplexAckParameters_h2250LogicalChannelAckParameters, + } choice; + union { + H2250LogicalChannelAckParameters + h2250LogicalChannelAckParameters; + }; +} OpenLogicalChannelAck_forwardMultiplexAckParameters; + +typedef struct OpenLogicalChannelAck { /* SEQUENCE */ + enum { + eOpenLogicalChannelAck_reverseLogicalChannelParameters = + (1 << 31), + eOpenLogicalChannelAck_separateStack = (1 << 30), + eOpenLogicalChannelAck_forwardMultiplexAckParameters = + (1 << 29), + eOpenLogicalChannelAck_encryptionSync = (1 << 28), + } options; + OpenLogicalChannelAck_reverseLogicalChannelParameters + reverseLogicalChannelParameters; + NetworkAccessParameters separateStack; + OpenLogicalChannelAck_forwardMultiplexAckParameters + forwardMultiplexAckParameters; +} OpenLogicalChannelAck; + +typedef struct ResponseMessage { /* CHOICE */ + enum { + eResponseMessage_nonStandard, + eResponseMessage_masterSlaveDeterminationAck, + eResponseMessage_masterSlaveDeterminationReject, + eResponseMessage_terminalCapabilitySetAck, + eResponseMessage_terminalCapabilitySetReject, + eResponseMessage_openLogicalChannelAck, + eResponseMessage_openLogicalChannelReject, + eResponseMessage_closeLogicalChannelAck, + eResponseMessage_requestChannelCloseAck, + eResponseMessage_requestChannelCloseReject, + eResponseMessage_multiplexEntrySendAck, + eResponseMessage_multiplexEntrySendReject, + eResponseMessage_requestMultiplexEntryAck, + eResponseMessage_requestMultiplexEntryReject, + eResponseMessage_requestModeAck, + eResponseMessage_requestModeReject, + eResponseMessage_roundTripDelayResponse, + eResponseMessage_maintenanceLoopAck, + eResponseMessage_maintenanceLoopReject, + eResponseMessage_communicationModeResponse, + eResponseMessage_conferenceResponse, + eResponseMessage_multilinkResponse, + eResponseMessage_logicalChannelRateAcknowledge, + eResponseMessage_logicalChannelRateReject, + } choice; + union { + OpenLogicalChannelAck openLogicalChannelAck; + }; +} ResponseMessage; + +typedef struct MultimediaSystemControlMessage { /* CHOICE */ + enum { + eMultimediaSystemControlMessage_request, + eMultimediaSystemControlMessage_response, + eMultimediaSystemControlMessage_command, + eMultimediaSystemControlMessage_indication, + } choice; + union { + RequestMessage request; + ResponseMessage response; + }; +} MultimediaSystemControlMessage; + +typedef struct H323_UU_PDU_h245Control { /* SEQUENCE OF */ + int count; + MultimediaSystemControlMessage item[4]; +} H323_UU_PDU_h245Control; + +typedef struct H323_UU_PDU { /* SEQUENCE */ + enum { + eH323_UU_PDU_nonStandardData = (1 << 31), + eH323_UU_PDU_h4501SupplementaryService = (1 << 30), + eH323_UU_PDU_h245Tunneling = (1 << 29), + eH323_UU_PDU_h245Control = (1 << 28), + eH323_UU_PDU_nonStandardControl = (1 << 27), + eH323_UU_PDU_callLinkage = (1 << 26), + eH323_UU_PDU_tunnelledSignallingMessage = (1 << 25), + eH323_UU_PDU_provisionalRespToH245Tunneling = (1 << 24), + eH323_UU_PDU_stimulusControl = (1 << 23), + eH323_UU_PDU_genericData = (1 << 22), + } options; + H323_UU_PDU_h323_message_body h323_message_body; + H323_UU_PDU_h245Control h245Control; +} H323_UU_PDU; + +typedef struct H323_UserInformation { /* SEQUENCE */ + enum { + eH323_UserInformation_user_data = (1 << 31), + } options; + H323_UU_PDU h323_uu_pdu; +} H323_UserInformation; + +typedef struct GatekeeperRequest { /* SEQUENCE */ + enum { + eGatekeeperRequest_nonStandardData = (1 << 31), + eGatekeeperRequest_gatekeeperIdentifier = (1 << 30), + eGatekeeperRequest_callServices = (1 << 29), + eGatekeeperRequest_endpointAlias = (1 << 28), + eGatekeeperRequest_alternateEndpoints = (1 << 27), + eGatekeeperRequest_tokens = (1 << 26), + eGatekeeperRequest_cryptoTokens = (1 << 25), + eGatekeeperRequest_authenticationCapability = (1 << 24), + eGatekeeperRequest_algorithmOIDs = (1 << 23), + eGatekeeperRequest_integrity = (1 << 22), + eGatekeeperRequest_integrityCheckValue = (1 << 21), + eGatekeeperRequest_supportsAltGK = (1 << 20), + eGatekeeperRequest_featureSet = (1 << 19), + eGatekeeperRequest_genericData = (1 << 18), + } options; + TransportAddress rasAddress; +} GatekeeperRequest; + +typedef struct GatekeeperConfirm { /* SEQUENCE */ + enum { + eGatekeeperConfirm_nonStandardData = (1 << 31), + eGatekeeperConfirm_gatekeeperIdentifier = (1 << 30), + eGatekeeperConfirm_alternateGatekeeper = (1 << 29), + eGatekeeperConfirm_authenticationMode = (1 << 28), + eGatekeeperConfirm_tokens = (1 << 27), + eGatekeeperConfirm_cryptoTokens = (1 << 26), + eGatekeeperConfirm_algorithmOID = (1 << 25), + eGatekeeperConfirm_integrity = (1 << 24), + eGatekeeperConfirm_integrityCheckValue = (1 << 23), + eGatekeeperConfirm_featureSet = (1 << 22), + eGatekeeperConfirm_genericData = (1 << 21), + } options; + TransportAddress rasAddress; +} GatekeeperConfirm; + +typedef struct RegistrationRequest_callSignalAddress { /* SEQUENCE OF */ + int count; + TransportAddress item[10]; +} RegistrationRequest_callSignalAddress; + +typedef struct RegistrationRequest_rasAddress { /* SEQUENCE OF */ + int count; + TransportAddress item[10]; +} RegistrationRequest_rasAddress; + +typedef struct RegistrationRequest { /* SEQUENCE */ + enum { + eRegistrationRequest_nonStandardData = (1 << 31), + eRegistrationRequest_terminalAlias = (1 << 30), + eRegistrationRequest_gatekeeperIdentifier = (1 << 29), + eRegistrationRequest_alternateEndpoints = (1 << 28), + eRegistrationRequest_timeToLive = (1 << 27), + eRegistrationRequest_tokens = (1 << 26), + eRegistrationRequest_cryptoTokens = (1 << 25), + eRegistrationRequest_integrityCheckValue = (1 << 24), + eRegistrationRequest_keepAlive = (1 << 23), + eRegistrationRequest_endpointIdentifier = (1 << 22), + eRegistrationRequest_willSupplyUUIEs = (1 << 21), + eRegistrationRequest_maintainConnection = (1 << 20), + eRegistrationRequest_alternateTransportAddresses = (1 << 19), + eRegistrationRequest_additiveRegistration = (1 << 18), + eRegistrationRequest_terminalAliasPattern = (1 << 17), + eRegistrationRequest_supportsAltGK = (1 << 16), + eRegistrationRequest_usageReportingCapability = (1 << 15), + eRegistrationRequest_multipleCalls = (1 << 14), + eRegistrationRequest_supportedH248Packages = (1 << 13), + eRegistrationRequest_callCreditCapability = (1 << 12), + eRegistrationRequest_capacityReportingCapability = (1 << 11), + eRegistrationRequest_capacity = (1 << 10), + eRegistrationRequest_featureSet = (1 << 9), + eRegistrationRequest_genericData = (1 << 8), + } options; + RegistrationRequest_callSignalAddress callSignalAddress; + RegistrationRequest_rasAddress rasAddress; + unsigned int timeToLive; +} RegistrationRequest; + +typedef struct RegistrationConfirm_callSignalAddress { /* SEQUENCE OF */ + int count; + TransportAddress item[10]; +} RegistrationConfirm_callSignalAddress; + +typedef struct RegistrationConfirm { /* SEQUENCE */ + enum { + eRegistrationConfirm_nonStandardData = (1 << 31), + eRegistrationConfirm_terminalAlias = (1 << 30), + eRegistrationConfirm_gatekeeperIdentifier = (1 << 29), + eRegistrationConfirm_alternateGatekeeper = (1 << 28), + eRegistrationConfirm_timeToLive = (1 << 27), + eRegistrationConfirm_tokens = (1 << 26), + eRegistrationConfirm_cryptoTokens = (1 << 25), + eRegistrationConfirm_integrityCheckValue = (1 << 24), + eRegistrationConfirm_willRespondToIRR = (1 << 23), + eRegistrationConfirm_preGrantedARQ = (1 << 22), + eRegistrationConfirm_maintainConnection = (1 << 21), + eRegistrationConfirm_serviceControl = (1 << 20), + eRegistrationConfirm_supportsAdditiveRegistration = (1 << 19), + eRegistrationConfirm_terminalAliasPattern = (1 << 18), + eRegistrationConfirm_supportedPrefixes = (1 << 17), + eRegistrationConfirm_usageSpec = (1 << 16), + eRegistrationConfirm_featureServerAlias = (1 << 15), + eRegistrationConfirm_capacityReportingSpec = (1 << 14), + eRegistrationConfirm_featureSet = (1 << 13), + eRegistrationConfirm_genericData = (1 << 12), + } options; + RegistrationConfirm_callSignalAddress callSignalAddress; + unsigned int timeToLive; +} RegistrationConfirm; + +typedef struct UnregistrationRequest_callSignalAddress { /* SEQUENCE OF */ + int count; + TransportAddress item[10]; +} UnregistrationRequest_callSignalAddress; + +typedef struct UnregistrationRequest { /* SEQUENCE */ + enum { + eUnregistrationRequest_endpointAlias = (1 << 31), + eUnregistrationRequest_nonStandardData = (1 << 30), + eUnregistrationRequest_endpointIdentifier = (1 << 29), + eUnregistrationRequest_alternateEndpoints = (1 << 28), + eUnregistrationRequest_gatekeeperIdentifier = (1 << 27), + eUnregistrationRequest_tokens = (1 << 26), + eUnregistrationRequest_cryptoTokens = (1 << 25), + eUnregistrationRequest_integrityCheckValue = (1 << 24), + eUnregistrationRequest_reason = (1 << 23), + eUnregistrationRequest_endpointAliasPattern = (1 << 22), + eUnregistrationRequest_supportedPrefixes = (1 << 21), + eUnregistrationRequest_alternateGatekeeper = (1 << 20), + eUnregistrationRequest_genericData = (1 << 19), + } options; + UnregistrationRequest_callSignalAddress callSignalAddress; +} UnregistrationRequest; + +typedef struct AdmissionRequest { /* SEQUENCE */ + enum { + eAdmissionRequest_callModel = (1 << 31), + eAdmissionRequest_destinationInfo = (1 << 30), + eAdmissionRequest_destCallSignalAddress = (1 << 29), + eAdmissionRequest_destExtraCallInfo = (1 << 28), + eAdmissionRequest_srcCallSignalAddress = (1 << 27), + eAdmissionRequest_nonStandardData = (1 << 26), + eAdmissionRequest_callServices = (1 << 25), + eAdmissionRequest_canMapAlias = (1 << 24), + eAdmissionRequest_callIdentifier = (1 << 23), + eAdmissionRequest_srcAlternatives = (1 << 22), + eAdmissionRequest_destAlternatives = (1 << 21), + eAdmissionRequest_gatekeeperIdentifier = (1 << 20), + eAdmissionRequest_tokens = (1 << 19), + eAdmissionRequest_cryptoTokens = (1 << 18), + eAdmissionRequest_integrityCheckValue = (1 << 17), + eAdmissionRequest_transportQOS = (1 << 16), + eAdmissionRequest_willSupplyUUIEs = (1 << 15), + eAdmissionRequest_callLinkage = (1 << 14), + eAdmissionRequest_gatewayDataRate = (1 << 13), + eAdmissionRequest_capacity = (1 << 12), + eAdmissionRequest_circuitInfo = (1 << 11), + eAdmissionRequest_desiredProtocols = (1 << 10), + eAdmissionRequest_desiredTunnelledProtocol = (1 << 9), + eAdmissionRequest_featureSet = (1 << 8), + eAdmissionRequest_genericData = (1 << 7), + } options; + TransportAddress destCallSignalAddress; + TransportAddress srcCallSignalAddress; +} AdmissionRequest; + +typedef struct AdmissionConfirm { /* SEQUENCE */ + enum { + eAdmissionConfirm_irrFrequency = (1 << 31), + eAdmissionConfirm_nonStandardData = (1 << 30), + eAdmissionConfirm_destinationInfo = (1 << 29), + eAdmissionConfirm_destExtraCallInfo = (1 << 28), + eAdmissionConfirm_destinationType = (1 << 27), + eAdmissionConfirm_remoteExtensionAddress = (1 << 26), + eAdmissionConfirm_alternateEndpoints = (1 << 25), + eAdmissionConfirm_tokens = (1 << 24), + eAdmissionConfirm_cryptoTokens = (1 << 23), + eAdmissionConfirm_integrityCheckValue = (1 << 22), + eAdmissionConfirm_transportQOS = (1 << 21), + eAdmissionConfirm_willRespondToIRR = (1 << 20), + eAdmissionConfirm_uuiesRequested = (1 << 19), + eAdmissionConfirm_language = (1 << 18), + eAdmissionConfirm_alternateTransportAddresses = (1 << 17), + eAdmissionConfirm_useSpecifiedTransport = (1 << 16), + eAdmissionConfirm_circuitInfo = (1 << 15), + eAdmissionConfirm_usageSpec = (1 << 14), + eAdmissionConfirm_supportedProtocols = (1 << 13), + eAdmissionConfirm_serviceControl = (1 << 12), + eAdmissionConfirm_multipleCalls = (1 << 11), + eAdmissionConfirm_featureSet = (1 << 10), + eAdmissionConfirm_genericData = (1 << 9), + } options; + TransportAddress destCallSignalAddress; +} AdmissionConfirm; + +typedef struct LocationRequest { /* SEQUENCE */ + enum { + eLocationRequest_endpointIdentifier = (1 << 31), + eLocationRequest_nonStandardData = (1 << 30), + eLocationRequest_sourceInfo = (1 << 29), + eLocationRequest_canMapAlias = (1 << 28), + eLocationRequest_gatekeeperIdentifier = (1 << 27), + eLocationRequest_tokens = (1 << 26), + eLocationRequest_cryptoTokens = (1 << 25), + eLocationRequest_integrityCheckValue = (1 << 24), + eLocationRequest_desiredProtocols = (1 << 23), + eLocationRequest_desiredTunnelledProtocol = (1 << 22), + eLocationRequest_featureSet = (1 << 21), + eLocationRequest_genericData = (1 << 20), + eLocationRequest_hopCount = (1 << 19), + eLocationRequest_circuitInfo = (1 << 18), + } options; + TransportAddress replyAddress; +} LocationRequest; + +typedef struct LocationConfirm { /* SEQUENCE */ + enum { + eLocationConfirm_nonStandardData = (1 << 31), + eLocationConfirm_destinationInfo = (1 << 30), + eLocationConfirm_destExtraCallInfo = (1 << 29), + eLocationConfirm_destinationType = (1 << 28), + eLocationConfirm_remoteExtensionAddress = (1 << 27), + eLocationConfirm_alternateEndpoints = (1 << 26), + eLocationConfirm_tokens = (1 << 25), + eLocationConfirm_cryptoTokens = (1 << 24), + eLocationConfirm_integrityCheckValue = (1 << 23), + eLocationConfirm_alternateTransportAddresses = (1 << 22), + eLocationConfirm_supportedProtocols = (1 << 21), + eLocationConfirm_multipleCalls = (1 << 20), + eLocationConfirm_featureSet = (1 << 19), + eLocationConfirm_genericData = (1 << 18), + eLocationConfirm_circuitInfo = (1 << 17), + eLocationConfirm_serviceControl = (1 << 16), + } options; + TransportAddress callSignalAddress; + TransportAddress rasAddress; +} LocationConfirm; + +typedef struct InfoRequestResponse_callSignalAddress { /* SEQUENCE OF */ + int count; + TransportAddress item[10]; +} InfoRequestResponse_callSignalAddress; + +typedef struct InfoRequestResponse { /* SEQUENCE */ + enum { + eInfoRequestResponse_nonStandardData = (1 << 31), + eInfoRequestResponse_endpointAlias = (1 << 30), + eInfoRequestResponse_perCallInfo = (1 << 29), + eInfoRequestResponse_tokens = (1 << 28), + eInfoRequestResponse_cryptoTokens = (1 << 27), + eInfoRequestResponse_integrityCheckValue = (1 << 26), + eInfoRequestResponse_needResponse = (1 << 25), + eInfoRequestResponse_capacity = (1 << 24), + eInfoRequestResponse_irrStatus = (1 << 23), + eInfoRequestResponse_unsolicited = (1 << 22), + eInfoRequestResponse_genericData = (1 << 21), + } options; + TransportAddress rasAddress; + InfoRequestResponse_callSignalAddress callSignalAddress; +} InfoRequestResponse; + +typedef struct RasMessage { /* CHOICE */ + enum { + eRasMessage_gatekeeperRequest, + eRasMessage_gatekeeperConfirm, + eRasMessage_gatekeeperReject, + eRasMessage_registrationRequest, + eRasMessage_registrationConfirm, + eRasMessage_registrationReject, + eRasMessage_unregistrationRequest, + eRasMessage_unregistrationConfirm, + eRasMessage_unregistrationReject, + eRasMessage_admissionRequest, + eRasMessage_admissionConfirm, + eRasMessage_admissionReject, + eRasMessage_bandwidthRequest, + eRasMessage_bandwidthConfirm, + eRasMessage_bandwidthReject, + eRasMessage_disengageRequest, + eRasMessage_disengageConfirm, + eRasMessage_disengageReject, + eRasMessage_locationRequest, + eRasMessage_locationConfirm, + eRasMessage_locationReject, + eRasMessage_infoRequest, + eRasMessage_infoRequestResponse, + eRasMessage_nonStandardMessage, + eRasMessage_unknownMessageResponse, + eRasMessage_requestInProgress, + eRasMessage_resourcesAvailableIndicate, + eRasMessage_resourcesAvailableConfirm, + eRasMessage_infoRequestAck, + eRasMessage_infoRequestNak, + eRasMessage_serviceControlIndication, + eRasMessage_serviceControlResponse, + } choice; + union { + GatekeeperRequest gatekeeperRequest; + GatekeeperConfirm gatekeeperConfirm; + RegistrationRequest registrationRequest; + RegistrationConfirm registrationConfirm; + UnregistrationRequest unregistrationRequest; + AdmissionRequest admissionRequest; + AdmissionConfirm admissionConfirm; + LocationRequest locationRequest; + LocationConfirm locationConfirm; + InfoRequestResponse infoRequestResponse; + }; +} RasMessage; diff --git a/include/linux/netfilter/nf_conntrack_irc.h b/include/linux/netfilter/nf_conntrack_irc.h new file mode 100644 index 000000000..00c2b7420 --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_irc.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _NF_CONNTRACK_IRC_H +#define _NF_CONNTRACK_IRC_H + +#ifdef __KERNEL__ + +#define IRC_PORT 6667 + +extern unsigned int (*nf_nat_irc_hook)(struct sk_buff *skb, + enum ip_conntrack_info ctinfo, + unsigned int protoff, + unsigned int matchoff, + unsigned int matchlen, + struct nf_conntrack_expect *exp); + +#endif /* __KERNEL__ */ +#endif /* _NF_CONNTRACK_IRC_H */ diff --git a/include/linux/netfilter/nf_conntrack_pptp.h b/include/linux/netfilter/nf_conntrack_pptp.h new file mode 100644 index 000000000..ade993809 --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_pptp.h @@ -0,0 +1,327 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* PPTP constants and structs */ +#ifndef _NF_CONNTRACK_PPTP_H +#define _NF_CONNTRACK_PPTP_H + +#include + +const char *pptp_msg_name(u_int16_t msg); + +/* state of the control session */ +enum pptp_ctrlsess_state { + PPTP_SESSION_NONE, /* no session present */ + PPTP_SESSION_ERROR, /* some session error */ + PPTP_SESSION_STOPREQ, /* stop_sess request seen */ + PPTP_SESSION_REQUESTED, /* start_sess request seen */ + PPTP_SESSION_CONFIRMED, /* session established */ +}; + +/* state of the call inside the control session */ +enum pptp_ctrlcall_state { + PPTP_CALL_NONE, + PPTP_CALL_ERROR, + PPTP_CALL_OUT_REQ, + PPTP_CALL_OUT_CONF, + PPTP_CALL_IN_REQ, + PPTP_CALL_IN_REP, + PPTP_CALL_IN_CONF, + PPTP_CALL_CLEAR_REQ, +}; + +/* conntrack private data */ +struct nf_ct_pptp_master { + enum pptp_ctrlsess_state sstate; /* session state */ + enum pptp_ctrlcall_state cstate; /* call state */ + __be16 pac_call_id; /* call id of PAC */ + __be16 pns_call_id; /* call id of PNS */ + + /* in pre-2.6.11 this used to be per-expect. Now it is per-conntrack + * and therefore imposes a fixed limit on the number of maps */ + struct nf_ct_gre_keymap *keymap[IP_CT_DIR_MAX]; +}; + +struct nf_nat_pptp { + __be16 pns_call_id; /* NAT'ed PNS call id */ + __be16 pac_call_id; /* NAT'ed PAC call id */ +}; + +#ifdef __KERNEL__ + +#define PPTP_CONTROL_PORT 1723 + +#define PPTP_PACKET_CONTROL 1 +#define PPTP_PACKET_MGMT 2 + +#define PPTP_MAGIC_COOKIE 0x1a2b3c4d + +struct pptp_pkt_hdr { + __u16 packetLength; + __be16 packetType; + __be32 magicCookie; +}; + +/* PptpControlMessageType values */ +#define PPTP_START_SESSION_REQUEST 1 +#define PPTP_START_SESSION_REPLY 2 +#define PPTP_STOP_SESSION_REQUEST 3 +#define PPTP_STOP_SESSION_REPLY 4 +#define PPTP_ECHO_REQUEST 5 +#define PPTP_ECHO_REPLY 6 +#define PPTP_OUT_CALL_REQUEST 7 +#define PPTP_OUT_CALL_REPLY 8 +#define PPTP_IN_CALL_REQUEST 9 +#define PPTP_IN_CALL_REPLY 10 +#define PPTP_IN_CALL_CONNECT 11 +#define PPTP_CALL_CLEAR_REQUEST 12 +#define PPTP_CALL_DISCONNECT_NOTIFY 13 +#define PPTP_WAN_ERROR_NOTIFY 14 +#define PPTP_SET_LINK_INFO 15 + +#define PPTP_MSG_MAX 15 + +/* PptpGeneralError values */ +#define PPTP_ERROR_CODE_NONE 0 +#define PPTP_NOT_CONNECTED 1 +#define PPTP_BAD_FORMAT 2 +#define PPTP_BAD_VALUE 3 +#define PPTP_NO_RESOURCE 4 +#define PPTP_BAD_CALLID 5 +#define PPTP_REMOVE_DEVICE_ERROR 6 + +struct PptpControlHeader { + __be16 messageType; + __u16 reserved; +}; + +/* FramingCapability Bitmap Values */ +#define PPTP_FRAME_CAP_ASYNC 0x1 +#define PPTP_FRAME_CAP_SYNC 0x2 + +/* BearerCapability Bitmap Values */ +#define PPTP_BEARER_CAP_ANALOG 0x1 +#define PPTP_BEARER_CAP_DIGITAL 0x2 + +struct PptpStartSessionRequest { + __be16 protocolVersion; + __u16 reserved1; + __be32 framingCapability; + __be32 bearerCapability; + __be16 maxChannels; + __be16 firmwareRevision; + __u8 hostName[64]; + __u8 vendorString[64]; +}; + +/* PptpStartSessionResultCode Values */ +#define PPTP_START_OK 1 +#define PPTP_START_GENERAL_ERROR 2 +#define PPTP_START_ALREADY_CONNECTED 3 +#define PPTP_START_NOT_AUTHORIZED 4 +#define PPTP_START_UNKNOWN_PROTOCOL 5 + +struct PptpStartSessionReply { + __be16 protocolVersion; + __u8 resultCode; + __u8 generalErrorCode; + __be32 framingCapability; + __be32 bearerCapability; + __be16 maxChannels; + __be16 firmwareRevision; + __u8 hostName[64]; + __u8 vendorString[64]; +}; + +/* PptpStopReasons */ +#define PPTP_STOP_NONE 1 +#define PPTP_STOP_PROTOCOL 2 +#define PPTP_STOP_LOCAL_SHUTDOWN 3 + +struct PptpStopSessionRequest { + __u8 reason; + __u8 reserved1; + __u16 reserved2; +}; + +/* PptpStopSessionResultCode */ +#define PPTP_STOP_OK 1 +#define PPTP_STOP_GENERAL_ERROR 2 + +struct PptpStopSessionReply { + __u8 resultCode; + __u8 generalErrorCode; + __u16 reserved1; +}; + +struct PptpEchoRequest { + __be32 identNumber; +}; + +/* PptpEchoReplyResultCode */ +#define PPTP_ECHO_OK 1 +#define PPTP_ECHO_GENERAL_ERROR 2 + +struct PptpEchoReply { + __be32 identNumber; + __u8 resultCode; + __u8 generalErrorCode; + __u16 reserved; +}; + +/* PptpFramingType */ +#define PPTP_ASYNC_FRAMING 1 +#define PPTP_SYNC_FRAMING 2 +#define PPTP_DONT_CARE_FRAMING 3 + +/* PptpCallBearerType */ +#define PPTP_ANALOG_TYPE 1 +#define PPTP_DIGITAL_TYPE 2 +#define PPTP_DONT_CARE_BEARER_TYPE 3 + +struct PptpOutCallRequest { + __be16 callID; + __be16 callSerialNumber; + __be32 minBPS; + __be32 maxBPS; + __be32 bearerType; + __be32 framingType; + __be16 packetWindow; + __be16 packetProcDelay; + __be16 phoneNumberLength; + __u16 reserved1; + __u8 phoneNumber[64]; + __u8 subAddress[64]; +}; + +/* PptpCallResultCode */ +#define PPTP_OUTCALL_CONNECT 1 +#define PPTP_OUTCALL_GENERAL_ERROR 2 +#define PPTP_OUTCALL_NO_CARRIER 3 +#define PPTP_OUTCALL_BUSY 4 +#define PPTP_OUTCALL_NO_DIAL_TONE 5 +#define PPTP_OUTCALL_TIMEOUT 6 +#define PPTP_OUTCALL_DONT_ACCEPT 7 + +struct PptpOutCallReply { + __be16 callID; + __be16 peersCallID; + __u8 resultCode; + __u8 generalErrorCode; + __be16 causeCode; + __be32 connectSpeed; + __be16 packetWindow; + __be16 packetProcDelay; + __be32 physChannelID; +}; + +struct PptpInCallRequest { + __be16 callID; + __be16 callSerialNumber; + __be32 callBearerType; + __be32 physChannelID; + __be16 dialedNumberLength; + __be16 dialingNumberLength; + __u8 dialedNumber[64]; + __u8 dialingNumber[64]; + __u8 subAddress[64]; +}; + +/* PptpInCallResultCode */ +#define PPTP_INCALL_ACCEPT 1 +#define PPTP_INCALL_GENERAL_ERROR 2 +#define PPTP_INCALL_DONT_ACCEPT 3 + +struct PptpInCallReply { + __be16 callID; + __be16 peersCallID; + __u8 resultCode; + __u8 generalErrorCode; + __be16 packetWindow; + __be16 packetProcDelay; + __u16 reserved; +}; + +struct PptpInCallConnected { + __be16 peersCallID; + __u16 reserved; + __be32 connectSpeed; + __be16 packetWindow; + __be16 packetProcDelay; + __be32 callFramingType; +}; + +struct PptpClearCallRequest { + __be16 callID; + __u16 reserved; +}; + +struct PptpCallDisconnectNotify { + __be16 callID; + __u8 resultCode; + __u8 generalErrorCode; + __be16 causeCode; + __u16 reserved; + __u8 callStatistics[128]; +}; + +struct PptpWanErrorNotify { + __be16 peersCallID; + __u16 reserved; + __be32 crcErrors; + __be32 framingErrors; + __be32 hardwareOverRuns; + __be32 bufferOverRuns; + __be32 timeoutErrors; + __be32 alignmentErrors; +}; + +struct PptpSetLinkInfo { + __be16 peersCallID; + __u16 reserved; + __be32 sendAccm; + __be32 recvAccm; +}; + +union pptp_ctrl_union { + struct PptpStartSessionRequest sreq; + struct PptpStartSessionReply srep; + struct PptpStopSessionRequest streq; + struct PptpStopSessionReply strep; + struct PptpOutCallRequest ocreq; + struct PptpOutCallReply ocack; + struct PptpInCallRequest icreq; + struct PptpInCallReply icack; + struct PptpInCallConnected iccon; + struct PptpClearCallRequest clrreq; + struct PptpCallDisconnectNotify disc; + struct PptpWanErrorNotify wanerr; + struct PptpSetLinkInfo setlink; +}; + +/* crap needed for nf_conntrack_compat.h */ +struct nf_conn; +struct nf_conntrack_expect; + +extern int +(*nf_nat_pptp_hook_outbound)(struct sk_buff *skb, + struct nf_conn *ct, enum ip_conntrack_info ctinfo, + unsigned int protoff, + struct PptpControlHeader *ctlh, + union pptp_ctrl_union *pptpReq); + +extern int +(*nf_nat_pptp_hook_inbound)(struct sk_buff *skb, + struct nf_conn *ct, enum ip_conntrack_info ctinfo, + unsigned int protoff, + struct PptpControlHeader *ctlh, + union pptp_ctrl_union *pptpReq); + +extern void +(*nf_nat_pptp_hook_exp_gre)(struct nf_conntrack_expect *exp_orig, + struct nf_conntrack_expect *exp_reply); + +extern void +(*nf_nat_pptp_hook_expectfn)(struct nf_conn *ct, + struct nf_conntrack_expect *exp); + +#endif /* __KERNEL__ */ +#endif /* _NF_CONNTRACK_PPTP_H */ diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h new file mode 100644 index 000000000..14edb795a --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_proto_gre.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CONNTRACK_PROTO_GRE_H +#define _CONNTRACK_PROTO_GRE_H +#include +#include +#include + +struct nf_ct_gre { + unsigned int stream_timeout; + unsigned int timeout; +}; + +#ifdef __KERNEL__ +#include + +struct nf_conn; + +/* structure for original <-> reply keymap */ +struct nf_ct_gre_keymap { + struct list_head list; + struct nf_conntrack_tuple tuple; +}; + +enum grep_conntrack { + GRE_CT_UNREPLIED, + GRE_CT_REPLIED, + GRE_CT_MAX +}; + +struct netns_proto_gre { + struct nf_proto_net nf; + rwlock_t keymap_lock; + struct list_head keymap_list; + unsigned int gre_timeouts[GRE_CT_MAX]; +}; + +/* add new tuple->key_reply pair to keymap */ +int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir, + struct nf_conntrack_tuple *t); + +/* delete keymap entries */ +void nf_ct_gre_keymap_destroy(struct nf_conn *ct); + +void nf_nat_need_gre(void); + +#endif /* __KERNEL__ */ +#endif /* _CONNTRACK_PROTO_GRE_H */ diff --git a/include/linux/netfilter/nf_conntrack_sane.h b/include/linux/netfilter/nf_conntrack_sane.h new file mode 100644 index 000000000..7d2de44ed --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_sane.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _NF_CONNTRACK_SANE_H +#define _NF_CONNTRACK_SANE_H +/* SANE tracking. */ + +#ifdef __KERNEL__ + +#define SANE_PORT 6566 + +enum sane_state { + SANE_STATE_NORMAL, + SANE_STATE_START_REQUESTED, +}; + +/* This structure exists only once per master */ +struct nf_ct_sane_master { + enum sane_state state; +}; + +#endif /* __KERNEL__ */ + +#endif /* _NF_CONNTRACK_SANE_H */ diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h new file mode 100644 index 000000000..625f491b9 --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_sctp.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _NF_CONNTRACK_SCTP_H +#define _NF_CONNTRACK_SCTP_H +/* SCTP tracking. */ + +#include + +struct ip_ct_sctp { + enum sctp_conntrack state; + + __be32 vtag[IP_CT_DIR_MAX]; + u8 last_dir; + u8 flags; +}; + +#endif /* _NF_CONNTRACK_SCTP_H */ diff --git a/include/linux/netfilter/nf_conntrack_sip.h b/include/linux/netfilter/nf_conntrack_sip.h new file mode 100644 index 000000000..c7fc38807 --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_sip.h @@ -0,0 +1,200 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __NF_CONNTRACK_SIP_H__ +#define __NF_CONNTRACK_SIP_H__ +#ifdef __KERNEL__ + +#include + +#include + +#define SIP_PORT 5060 +#define SIP_TIMEOUT 3600 + +struct nf_ct_sip_master { + unsigned int register_cseq; + unsigned int invite_cseq; + __be16 forced_dport; +}; + +enum sip_expectation_classes { + SIP_EXPECT_SIGNALLING, + SIP_EXPECT_AUDIO, + SIP_EXPECT_VIDEO, + SIP_EXPECT_IMAGE, + __SIP_EXPECT_MAX +}; +#define SIP_EXPECT_MAX (__SIP_EXPECT_MAX - 1) + +struct sdp_media_type { + const char *name; + unsigned int len; + enum sip_expectation_classes class; +}; + +#define SDP_MEDIA_TYPE(__name, __class) \ +{ \ + .name = (__name), \ + .len = sizeof(__name) - 1, \ + .class = (__class), \ +} + +struct sip_handler { + const char *method; + unsigned int len; + int (*request)(struct sk_buff *skb, unsigned int protoff, + unsigned int dataoff, + const char **dptr, unsigned int *datalen, + unsigned int cseq); + int (*response)(struct sk_buff *skb, unsigned int protoff, + unsigned int dataoff, + const char **dptr, unsigned int *datalen, + unsigned int cseq, unsigned int code); +}; + +#define SIP_HANDLER(__method, __request, __response) \ +{ \ + .method = (__method), \ + .len = sizeof(__method) - 1, \ + .request = (__request), \ + .response = (__response), \ +} + +struct sip_header { + const char *name; + const char *cname; + const char *search; + unsigned int len; + unsigned int clen; + unsigned int slen; + int (*match_len)(const struct nf_conn *ct, + const char *dptr, const char *limit, + int *shift); +}; + +#define __SIP_HDR(__name, __cname, __search, __match) \ +{ \ + .name = (__name), \ + .len = sizeof(__name) - 1, \ + .cname = (__cname), \ + .clen = (__cname) ? sizeof(__cname) - 1 : 0, \ + .search = (__search), \ + .slen = (__search) ? sizeof(__search) - 1 : 0, \ + .match_len = (__match), \ +} + +#define SIP_HDR(__name, __cname, __search, __match) \ + __SIP_HDR(__name, __cname, __search, __match) + +#define SDP_HDR(__name, __search, __match) \ + __SIP_HDR(__name, NULL, __search, __match) + +enum sip_header_types { + SIP_HDR_CSEQ, + SIP_HDR_FROM, + SIP_HDR_TO, + SIP_HDR_CONTACT, + SIP_HDR_VIA_UDP, + SIP_HDR_VIA_TCP, + SIP_HDR_EXPIRES, + SIP_HDR_CONTENT_LENGTH, + SIP_HDR_CALL_ID, +}; + +enum sdp_header_types { + SDP_HDR_UNSPEC, + SDP_HDR_VERSION, + SDP_HDR_OWNER, + SDP_HDR_CONNECTION, + SDP_HDR_MEDIA, +}; + +struct nf_nat_sip_hooks { + unsigned int (*msg)(struct sk_buff *skb, + unsigned int protoff, + unsigned int dataoff, + const char **dptr, + unsigned int *datalen); + + void (*seq_adjust)(struct sk_buff *skb, + unsigned int protoff, s16 off); + + unsigned int (*expect)(struct sk_buff *skb, + unsigned int protoff, + unsigned int dataoff, + const char **dptr, + unsigned int *datalen, + struct nf_conntrack_expect *exp, + unsigned int matchoff, + unsigned int matchlen); + + unsigned int (*sdp_addr)(struct sk_buff *skb, + unsigned int protoff, + unsigned int dataoff, + const char **dptr, + unsigned int *datalen, + unsigned int sdpoff, + enum sdp_header_types type, + enum sdp_header_types term, + const union nf_inet_addr *addr); + + unsigned int (*sdp_port)(struct sk_buff *skb, + unsigned int protoff, + unsigned int dataoff, + const char **dptr, + unsigned int *datalen, + unsigned int matchoff, + unsigned int matchlen, + u_int16_t port); + + unsigned int (*sdp_session)(struct sk_buff *skb, + unsigned int protoff, + unsigned int dataoff, + const char **dptr, + unsigned int *datalen, + unsigned int sdpoff, + const union nf_inet_addr *addr); + + unsigned int (*sdp_media)(struct sk_buff *skb, + unsigned int protoff, + unsigned int dataoff, + const char **dptr, + unsigned int *datalen, + struct nf_conntrack_expect *rtp_exp, + struct nf_conntrack_expect *rtcp_exp, + unsigned int mediaoff, + unsigned int medialen, + union nf_inet_addr *rtp_addr); +}; +extern const struct nf_nat_sip_hooks *nf_nat_sip_hooks; + +int ct_sip_parse_request(const struct nf_conn *ct, const char *dptr, + unsigned int datalen, unsigned int *matchoff, + unsigned int *matchlen, union nf_inet_addr *addr, + __be16 *port); +int ct_sip_get_header(const struct nf_conn *ct, const char *dptr, + unsigned int dataoff, unsigned int datalen, + enum sip_header_types type, unsigned int *matchoff, + unsigned int *matchlen); +int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr, + unsigned int *dataoff, unsigned int datalen, + enum sip_header_types type, int *in_header, + unsigned int *matchoff, unsigned int *matchlen, + union nf_inet_addr *addr, __be16 *port); +int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr, + unsigned int dataoff, unsigned int datalen, + const char *name, unsigned int *matchoff, + unsigned int *matchlen, union nf_inet_addr *addr, + bool delim); +int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr, + unsigned int off, unsigned int datalen, + const char *name, unsigned int *matchoff, + unsigned int *matchen, unsigned int *val); + +int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr, + unsigned int dataoff, unsigned int datalen, + enum sdp_header_types type, + enum sdp_header_types term, + unsigned int *matchoff, unsigned int *matchlen); + +#endif /* __KERNEL__ */ +#endif /* __NF_CONNTRACK_SIP_H__ */ diff --git a/include/linux/netfilter/nf_conntrack_snmp.h b/include/linux/netfilter/nf_conntrack_snmp.h new file mode 100644 index 000000000..818088c47 --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_snmp.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _NF_CONNTRACK_SNMP_H +#define _NF_CONNTRACK_SNMP_H + +extern int (*nf_nat_snmp_hook)(struct sk_buff *skb, + unsigned int protoff, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo); + +#endif /* _NF_CONNTRACK_SNMP_H */ diff --git a/include/linux/netfilter/nf_conntrack_tcp.h b/include/linux/netfilter/nf_conntrack_tcp.h new file mode 100644 index 000000000..f9e3a6630 --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_tcp.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _NF_CONNTRACK_TCP_H +#define _NF_CONNTRACK_TCP_H + +#include + + +struct ip_ct_tcp_state { + u_int32_t td_end; /* max of seq + len */ + u_int32_t td_maxend; /* max of ack + max(win, 1) */ + u_int32_t td_maxwin; /* max(win) */ + u_int32_t td_maxack; /* max of ack */ + u_int8_t td_scale; /* window scale factor */ + u_int8_t flags; /* per direction options */ +}; + +struct ip_ct_tcp { + struct ip_ct_tcp_state seen[2]; /* connection parameters per direction */ + u_int8_t state; /* state of the connection (enum tcp_conntrack) */ + /* For detecting stale connections */ + u_int8_t last_dir; /* Direction of the last packet (enum ip_conntrack_dir) */ + u_int8_t retrans; /* Number of retransmitted packets */ + u_int8_t last_index; /* Index of the last packet */ + u_int32_t last_seq; /* Last sequence number seen in dir */ + u_int32_t last_ack; /* Last sequence number seen in opposite dir */ + u_int32_t last_end; /* Last seq + len */ + u_int16_t last_win; /* Last window advertisement seen in dir */ + /* For SYN packets while we may be out-of-sync */ + u_int8_t last_wscale; /* Last window scaling factor seen */ + u_int8_t last_flags; /* Last flags set */ +}; + +#endif /* _NF_CONNTRACK_TCP_H */ diff --git a/include/linux/netfilter/nf_conntrack_tftp.h b/include/linux/netfilter/nf_conntrack_tftp.h new file mode 100644 index 000000000..5769e12dd --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_tftp.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _NF_CONNTRACK_TFTP_H +#define _NF_CONNTRACK_TFTP_H + +#define TFTP_PORT 69 + +struct tftphdr { + __be16 opcode; +}; + +#define TFTP_OPCODE_READ 1 +#define TFTP_OPCODE_WRITE 2 +#define TFTP_OPCODE_DATA 3 +#define TFTP_OPCODE_ACK 4 +#define TFTP_OPCODE_ERROR 5 + +extern unsigned int (*nf_nat_tftp_hook)(struct sk_buff *skb, + enum ip_conntrack_info ctinfo, + struct nf_conntrack_expect *exp); + +#endif /* _NF_CONNTRACK_TFTP_H */ diff --git a/include/linux/netfilter/nf_conntrack_zones_common.h b/include/linux/netfilter/nf_conntrack_zones_common.h new file mode 100644 index 000000000..8f3905e12 --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_zones_common.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _NF_CONNTRACK_ZONES_COMMON_H +#define _NF_CONNTRACK_ZONES_COMMON_H + +#include + +#define NF_CT_DEFAULT_ZONE_ID 0 + +#define NF_CT_ZONE_DIR_ORIG (1 << IP_CT_DIR_ORIGINAL) +#define NF_CT_ZONE_DIR_REPL (1 << IP_CT_DIR_REPLY) + +#define NF_CT_DEFAULT_ZONE_DIR (NF_CT_ZONE_DIR_ORIG | NF_CT_ZONE_DIR_REPL) + +#define NF_CT_FLAG_MARK 1 + +struct nf_conntrack_zone { + u16 id; + u8 flags; + u8 dir; +}; + +extern const struct nf_conntrack_zone nf_ct_zone_dflt; + +#endif /* _NF_CONNTRACK_ZONES_COMMON_H */ diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h new file mode 100644 index 000000000..e713476ff --- /dev/null +++ b/include/linux/netfilter/nfnetlink.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _NFNETLINK_H +#define _NFNETLINK_H + +#include +#include +#include +#include + +struct nfnl_callback { + int (*call)(struct net *net, struct sock *nl, struct sk_buff *skb, + const struct nlmsghdr *nlh, + const struct nlattr * const cda[], + struct netlink_ext_ack *extack); + int (*call_rcu)(struct net *net, struct sock *nl, struct sk_buff *skb, + const struct nlmsghdr *nlh, + const struct nlattr * const cda[], + struct netlink_ext_ack *extack); + int (*call_batch)(struct net *net, struct sock *nl, struct sk_buff *skb, + const struct nlmsghdr *nlh, + const struct nlattr * const cda[], + struct netlink_ext_ack *extack); + const struct nla_policy *policy; /* netlink attribute policy */ + const u_int16_t attr_count; /* number of nlattr's */ +}; + +struct nfnetlink_subsystem { + const char *name; + __u8 subsys_id; /* nfnetlink subsystem ID */ + __u8 cb_count; /* number of callbacks */ + const struct nfnl_callback *cb; /* callback for individual types */ + struct module *owner; + int (*commit)(struct net *net, struct sk_buff *skb); + int (*abort)(struct net *net, struct sk_buff *skb); + void (*cleanup)(struct net *net); + bool (*valid_genid)(struct net *net, u32 genid); +}; + +int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n); +int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n); + +int nfnetlink_has_listeners(struct net *net, unsigned int group); +int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid, + unsigned int group, int echo, gfp_t flags); +int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error); +int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid); + +static inline u16 nfnl_msg_type(u8 subsys, u8 msg_type) +{ + return subsys << 8 | msg_type; +} + +void nfnl_lock(__u8 subsys_id); +void nfnl_unlock(__u8 subsys_id); +#ifdef CONFIG_PROVE_LOCKING +bool lockdep_nfnl_is_held(__u8 subsys_id); +#else +static inline bool lockdep_nfnl_is_held(__u8 subsys_id) +{ + return true; +} +#endif /* CONFIG_PROVE_LOCKING */ + +#define MODULE_ALIAS_NFNL_SUBSYS(subsys) \ + MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys)) + +#endif /* _NFNETLINK_H */ diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h new file mode 100644 index 000000000..beee8bffe --- /dev/null +++ b/include/linux/netfilter/nfnetlink_acct.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _NFNL_ACCT_H_ +#define _NFNL_ACCT_H_ + +#include +#include + +enum { + NFACCT_NO_QUOTA = -1, + NFACCT_UNDERQUOTA, + NFACCT_OVERQUOTA, +}; + +struct nf_acct; + +struct nf_acct *nfnl_acct_find_get(struct net *net, const char *filter_name); +void nfnl_acct_put(struct nf_acct *acct); +void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct); +int nfnl_acct_overquota(struct net *net, struct nf_acct *nfacct); +#endif /* _NFNL_ACCT_H */ diff --git a/include/linux/netfilter/nfnetlink_osf.h b/include/linux/netfilter/nfnetlink_osf.h new file mode 100644 index 000000000..ecf7dab81 --- /dev/null +++ b/include/linux/netfilter/nfnetlink_osf.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _NFOSF_H +#define _NFOSF_H + +#include + +enum osf_fmatch_states { + /* Packet does not match the fingerprint */ + FMATCH_WRONG = 0, + /* Packet matches the fingerprint */ + FMATCH_OK, + /* Options do not match the fingerprint, but header does */ + FMATCH_OPT_WRONG, +}; + +extern struct list_head nf_osf_fingers[2]; + +struct nf_osf_finger { + struct rcu_head rcu_head; + struct list_head finger_entry; + struct nf_osf_user_finger finger; +}; + +bool nf_osf_match(const struct sk_buff *skb, u_int8_t family, + int hooknum, struct net_device *in, struct net_device *out, + const struct nf_osf_info *info, struct net *net, + const struct list_head *nf_osf_fingers); + +const char *nf_osf_find(const struct sk_buff *skb, + const struct list_head *nf_osf_fingers); + +#endif /* _NFOSF_H */ diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h new file mode 100644 index 000000000..0ade4d1e4 --- /dev/null +++ b/include/linux/netfilter/x_tables.h @@ -0,0 +1,534 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _X_TABLES_H +#define _X_TABLES_H + + +#include +#include +#include +#include + +/* Test a struct->invflags and a boolean for inequality */ +#define NF_INVF(ptr, flag, boolean) \ + ((boolean) ^ !!((ptr)->invflags & (flag))) + +/** + * struct xt_action_param - parameters for matches/targets + * + * @match: the match extension + * @target: the target extension + * @matchinfo: per-match data + * @targetinfo: per-target data + * @state: pointer to hook state this packet came from + * @fragoff: packet is a fragment, this is the data offset + * @thoff: position of transport header relative to skb->data + * + * Fields written to by extensions: + * + * @hotdrop: drop packet if we had inspection problems + */ +struct xt_action_param { + union { + const struct xt_match *match; + const struct xt_target *target; + }; + union { + const void *matchinfo, *targinfo; + }; + const struct nf_hook_state *state; + int fragoff; + unsigned int thoff; + bool hotdrop; +}; + +static inline struct net *xt_net(const struct xt_action_param *par) +{ + return par->state->net; +} + +static inline struct net_device *xt_in(const struct xt_action_param *par) +{ + return par->state->in; +} + +static inline const char *xt_inname(const struct xt_action_param *par) +{ + return par->state->in->name; +} + +static inline struct net_device *xt_out(const struct xt_action_param *par) +{ + return par->state->out; +} + +static inline const char *xt_outname(const struct xt_action_param *par) +{ + return par->state->out->name; +} + +static inline unsigned int xt_hooknum(const struct xt_action_param *par) +{ + return par->state->hook; +} + +static inline u_int8_t xt_family(const struct xt_action_param *par) +{ + return par->state->pf; +} + +/** + * struct xt_mtchk_param - parameters for match extensions' + * checkentry functions + * + * @net: network namespace through which the check was invoked + * @table: table the rule is tried to be inserted into + * @entryinfo: the family-specific rule data + * (struct ipt_ip, ip6t_ip, arpt_arp or (note) ebt_entry) + * @match: struct xt_match through which this function was invoked + * @matchinfo: per-match data + * @hook_mask: via which hooks the new rule is reachable + * Other fields as above. + */ +struct xt_mtchk_param { + struct net *net; + const char *table; + const void *entryinfo; + const struct xt_match *match; + void *matchinfo; + unsigned int hook_mask; + u_int8_t family; + bool nft_compat; +}; + +/** + * struct xt_mdtor_param - match destructor parameters + * Fields as above. + */ +struct xt_mtdtor_param { + struct net *net; + const struct xt_match *match; + void *matchinfo; + u_int8_t family; +}; + +/** + * struct xt_tgchk_param - parameters for target extensions' + * checkentry functions + * + * @entryinfo: the family-specific rule data + * (struct ipt_entry, ip6t_entry, arpt_entry, ebt_entry) + * + * Other fields see above. + */ +struct xt_tgchk_param { + struct net *net; + const char *table; + const void *entryinfo; + const struct xt_target *target; + void *targinfo; + unsigned int hook_mask; + u_int8_t family; + bool nft_compat; +}; + +/* Target destructor parameters */ +struct xt_tgdtor_param { + struct net *net; + const struct xt_target *target; + void *targinfo; + u_int8_t family; +}; + +struct xt_match { + struct list_head list; + + const char name[XT_EXTENSION_MAXNAMELEN]; + u_int8_t revision; + + /* Return true or false: return FALSE and set *hotdrop = 1 to + force immediate packet drop. */ + /* Arguments changed since 2.6.9, as this must now handle + non-linear skb, using skb_header_pointer and + skb_ip_make_writable. */ + bool (*match)(const struct sk_buff *skb, + struct xt_action_param *); + + /* Called when user tries to insert an entry of this type. */ + int (*checkentry)(const struct xt_mtchk_param *); + + /* Called when entry of this type deleted. */ + void (*destroy)(const struct xt_mtdtor_param *); +#ifdef CONFIG_COMPAT + /* Called when userspace align differs from kernel space one */ + void (*compat_from_user)(void *dst, const void *src); + int (*compat_to_user)(void __user *dst, const void *src); +#endif + /* Set this to THIS_MODULE if you are a module, otherwise NULL */ + struct module *me; + + const char *table; + unsigned int matchsize; + unsigned int usersize; +#ifdef CONFIG_COMPAT + unsigned int compatsize; +#endif + unsigned int hooks; + unsigned short proto; + + unsigned short family; +}; + +/* Registration hooks for targets. */ +struct xt_target { + struct list_head list; + + const char name[XT_EXTENSION_MAXNAMELEN]; + u_int8_t revision; + + /* Returns verdict. Argument order changed since 2.6.9, as this + must now handle non-linear skbs, using skb_copy_bits and + skb_ip_make_writable. */ + unsigned int (*target)(struct sk_buff *skb, + const struct xt_action_param *); + + /* Called when user tries to insert an entry of this type: + hook_mask is a bitmask of hooks from which it can be + called. */ + /* Should return 0 on success or an error code otherwise (-Exxxx). */ + int (*checkentry)(const struct xt_tgchk_param *); + + /* Called when entry of this type deleted. */ + void (*destroy)(const struct xt_tgdtor_param *); +#ifdef CONFIG_COMPAT + /* Called when userspace align differs from kernel space one */ + void (*compat_from_user)(void *dst, const void *src); + int (*compat_to_user)(void __user *dst, const void *src); +#endif + /* Set this to THIS_MODULE if you are a module, otherwise NULL */ + struct module *me; + + const char *table; + unsigned int targetsize; + unsigned int usersize; +#ifdef CONFIG_COMPAT + unsigned int compatsize; +#endif + unsigned int hooks; + unsigned short proto; + + unsigned short family; +}; + +/* Furniture shopping... */ +struct xt_table { + struct list_head list; + + /* What hooks you will enter on */ + unsigned int valid_hooks; + + /* Man behind the curtain... */ + struct xt_table_info *private; + + /* Set this to THIS_MODULE if you are a module, otherwise NULL */ + struct module *me; + + u_int8_t af; /* address/protocol family */ + int priority; /* hook order */ + + /* called when table is needed in the given netns */ + int (*table_init)(struct net *net); + + /* A unique name... */ + const char name[XT_TABLE_MAXNAMELEN]; +}; + +#include + +/* The table itself */ +struct xt_table_info { + /* Size per table */ + unsigned int size; + /* Number of entries: FIXME. --RR */ + unsigned int number; + /* Initial number of entries. Needed for module usage count */ + unsigned int initial_entries; + + /* Entry points and underflows */ + unsigned int hook_entry[NF_INET_NUMHOOKS]; + unsigned int underflow[NF_INET_NUMHOOKS]; + + /* + * Number of user chains. Since tables cannot have loops, at most + * @stacksize jumps (number of user chains) can possibly be made. + */ + unsigned int stacksize; + void ***jumpstack; + + unsigned char entries[0] __aligned(8); +}; + +int xt_register_target(struct xt_target *target); +void xt_unregister_target(struct xt_target *target); +int xt_register_targets(struct xt_target *target, unsigned int n); +void xt_unregister_targets(struct xt_target *target, unsigned int n); + +int xt_register_match(struct xt_match *target); +void xt_unregister_match(struct xt_match *target); +int xt_register_matches(struct xt_match *match, unsigned int n); +void xt_unregister_matches(struct xt_match *match, unsigned int n); + +int xt_check_entry_offsets(const void *base, const char *elems, + unsigned int target_offset, + unsigned int next_offset); + +int xt_check_table_hooks(const struct xt_table_info *info, unsigned int valid_hooks); + +unsigned int *xt_alloc_entry_offsets(unsigned int size); +bool xt_find_jump_offset(const unsigned int *offsets, + unsigned int target, unsigned int size); + +int xt_check_proc_name(const char *name, unsigned int size); + +int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto, + bool inv_proto); +int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto, + bool inv_proto); + +int xt_match_to_user(const struct xt_entry_match *m, + struct xt_entry_match __user *u); +int xt_target_to_user(const struct xt_entry_target *t, + struct xt_entry_target __user *u); +int xt_data_to_user(void __user *dst, const void *src, + int usersize, int size, int aligned_size); + +void *xt_copy_counters_from_user(const void __user *user, unsigned int len, + struct xt_counters_info *info, bool compat); +struct xt_counters *xt_counters_alloc(unsigned int counters); + +struct xt_table *xt_register_table(struct net *net, + const struct xt_table *table, + struct xt_table_info *bootstrap, + struct xt_table_info *newinfo); +void *xt_unregister_table(struct xt_table *table); + +struct xt_table_info *xt_replace_table(struct xt_table *table, + unsigned int num_counters, + struct xt_table_info *newinfo, + int *error); + +struct xt_match *xt_find_match(u8 af, const char *name, u8 revision); +struct xt_target *xt_find_target(u8 af, const char *name, u8 revision); +struct xt_match *xt_request_find_match(u8 af, const char *name, u8 revision); +struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision); +int xt_find_revision(u8 af, const char *name, u8 revision, int target, + int *err); + +struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, + const char *name); +struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af, + const char *name); +void xt_table_unlock(struct xt_table *t); + +int xt_proto_init(struct net *net, u_int8_t af); +void xt_proto_fini(struct net *net, u_int8_t af); + +struct xt_table_info *xt_alloc_table_info(unsigned int size); +void xt_free_table_info(struct xt_table_info *info); + +/** + * xt_recseq - recursive seqcount for netfilter use + * + * Packet processing changes the seqcount only if no recursion happened + * get_counters() can use read_seqcount_begin()/read_seqcount_retry(), + * because we use the normal seqcount convention : + * Low order bit set to 1 if a writer is active. + */ +DECLARE_PER_CPU(seqcount_t, xt_recseq); + +/* xt_tee_enabled - true if x_tables needs to handle reentrancy + * + * Enabled if current ip(6)tables ruleset has at least one -j TEE rule. + */ +extern struct static_key xt_tee_enabled; + +/** + * xt_write_recseq_begin - start of a write section + * + * Begin packet processing : all readers must wait the end + * 1) Must be called with preemption disabled + * 2) softirqs must be disabled too (or we should use this_cpu_add()) + * Returns : + * 1 if no recursion on this cpu + * 0 if recursion detected + */ +static inline unsigned int xt_write_recseq_begin(void) +{ + unsigned int addend; + + /* + * Low order bit of sequence is set if we already + * called xt_write_recseq_begin(). + */ + addend = (__this_cpu_read(xt_recseq.sequence) + 1) & 1; + + /* + * This is kind of a write_seqcount_begin(), but addend is 0 or 1 + * We dont check addend value to avoid a test and conditional jump, + * since addend is most likely 1 + */ + __this_cpu_add(xt_recseq.sequence, addend); + smp_mb(); + + return addend; +} + +/** + * xt_write_recseq_end - end of a write section + * @addend: return value from previous xt_write_recseq_begin() + * + * End packet processing : all readers can proceed + * 1) Must be called with preemption disabled + * 2) softirqs must be disabled too (or we should use this_cpu_add()) + */ +static inline void xt_write_recseq_end(unsigned int addend) +{ + /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */ + smp_wmb(); + __this_cpu_add(xt_recseq.sequence, addend); +} + +/* + * This helper is performance critical and must be inlined + */ +static inline unsigned long ifname_compare_aligned(const char *_a, + const char *_b, + const char *_mask) +{ + const unsigned long *a = (const unsigned long *)_a; + const unsigned long *b = (const unsigned long *)_b; + const unsigned long *mask = (const unsigned long *)_mask; + unsigned long ret; + + ret = (a[0] ^ b[0]) & mask[0]; + if (IFNAMSIZ > sizeof(unsigned long)) + ret |= (a[1] ^ b[1]) & mask[1]; + if (IFNAMSIZ > 2 * sizeof(unsigned long)) + ret |= (a[2] ^ b[2]) & mask[2]; + if (IFNAMSIZ > 3 * sizeof(unsigned long)) + ret |= (a[3] ^ b[3]) & mask[3]; + BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long)); + return ret; +} + +struct xt_percpu_counter_alloc_state { + unsigned int off; + const char __percpu *mem; +}; + +bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state, + struct xt_counters *counter); +void xt_percpu_counter_free(struct xt_counters *cnt); + +static inline struct xt_counters * +xt_get_this_cpu_counter(struct xt_counters *cnt) +{ + if (nr_cpu_ids > 1) + return this_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt); + + return cnt; +} + +static inline struct xt_counters * +xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu) +{ + if (nr_cpu_ids > 1) + return per_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt, cpu); + + return cnt; +} + +struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *); + +#ifdef CONFIG_COMPAT +#include + +struct compat_xt_entry_match { + union { + struct { + u_int16_t match_size; + char name[XT_FUNCTION_MAXNAMELEN - 1]; + u_int8_t revision; + } user; + struct { + u_int16_t match_size; + compat_uptr_t match; + } kernel; + u_int16_t match_size; + } u; + unsigned char data[0]; +}; + +struct compat_xt_entry_target { + union { + struct { + u_int16_t target_size; + char name[XT_FUNCTION_MAXNAMELEN - 1]; + u_int8_t revision; + } user; + struct { + u_int16_t target_size; + compat_uptr_t target; + } kernel; + u_int16_t target_size; + } u; + unsigned char data[0]; +}; + +/* FIXME: this works only on 32 bit tasks + * need to change whole approach in order to calculate align as function of + * current task alignment */ + +struct compat_xt_counters { + compat_u64 pcnt, bcnt; /* Packet and byte counters */ +}; + +struct compat_xt_counters_info { + char name[XT_TABLE_MAXNAMELEN]; + compat_uint_t num_counters; + struct compat_xt_counters counters[0]; +}; + +struct _compat_xt_align { + __u8 u8; + __u16 u16; + __u32 u32; + compat_u64 u64; +}; + +#define COMPAT_XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _compat_xt_align)) + +void xt_compat_lock(u_int8_t af); +void xt_compat_unlock(u_int8_t af); + +int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta); +void xt_compat_flush_offsets(u_int8_t af); +int xt_compat_init_offsets(u8 af, unsigned int number); +int xt_compat_calc_jump(u_int8_t af, unsigned int offset); + +int xt_compat_match_offset(const struct xt_match *match); +void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, + unsigned int *size); +int xt_compat_match_to_user(const struct xt_entry_match *m, + void __user **dstptr, unsigned int *size); + +int xt_compat_target_offset(const struct xt_target *target); +void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, + unsigned int *size); +int xt_compat_target_to_user(const struct xt_entry_target *t, + void __user **dstptr, unsigned int *size); +int xt_compat_check_entry_offsets(const void *base, const char *elems, + unsigned int target_offset, + unsigned int next_offset); + +#endif /* CONFIG_COMPAT */ +#endif /* _X_TABLES_H */ diff --git a/include/linux/netfilter/xt_hashlimit.h b/include/linux/netfilter/xt_hashlimit.h new file mode 100644 index 000000000..169d03983 --- /dev/null +++ b/include/linux/netfilter/xt_hashlimit.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _XT_HASHLIMIT_H +#define _XT_HASHLIMIT_H + +#include + +#define XT_HASHLIMIT_ALL (XT_HASHLIMIT_HASH_DIP | XT_HASHLIMIT_HASH_DPT | \ + XT_HASHLIMIT_HASH_SIP | XT_HASHLIMIT_HASH_SPT | \ + XT_HASHLIMIT_INVERT | XT_HASHLIMIT_BYTES |\ + XT_HASHLIMIT_RATE_MATCH) +#endif /*_XT_HASHLIMIT_H*/ diff --git a/include/linux/netfilter/xt_physdev.h b/include/linux/netfilter/xt_physdev.h new file mode 100644 index 000000000..4ca059394 --- /dev/null +++ b/include/linux/netfilter/xt_physdev.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _XT_PHYSDEV_H +#define _XT_PHYSDEV_H + +#include +#include + +#endif /*_XT_PHYSDEV_H*/ diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h new file mode 100644 index 000000000..e98028f00 --- /dev/null +++ b/include/linux/netfilter_arp/arp_tables.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Format of an ARP firewall descriptor + * + * src, tgt, src_mask, tgt_mask, arpop, arpop_mask are always stored in + * network byte order. + * flags are stored in host byte order (of course). + */ +#ifndef _ARPTABLES_H +#define _ARPTABLES_H + +#include +#include +#include +#include +#include + +/* Standard entry. */ +struct arpt_standard { + struct arpt_entry entry; + struct xt_standard_target target; +}; + +struct arpt_error { + struct arpt_entry entry; + struct xt_error_target target; +}; + +#define ARPT_ENTRY_INIT(__size) \ +{ \ + .target_offset = sizeof(struct arpt_entry), \ + .next_offset = (__size), \ +} + +#define ARPT_STANDARD_INIT(__verdict) \ +{ \ + .entry = ARPT_ENTRY_INIT(sizeof(struct arpt_standard)), \ + .target = XT_TARGET_INIT(XT_STANDARD_TARGET, \ + sizeof(struct xt_standard_target)), \ + .target.verdict = -(__verdict) - 1, \ +} + +#define ARPT_ERROR_INIT \ +{ \ + .entry = ARPT_ENTRY_INIT(sizeof(struct arpt_error)), \ + .target = XT_TARGET_INIT(XT_ERROR_TARGET, \ + sizeof(struct xt_error_target)), \ + .target.errorname = "ERROR", \ +} + +extern void *arpt_alloc_initial_table(const struct xt_table *); +int arpt_register_table(struct net *net, const struct xt_table *table, + const struct arpt_replace *repl, + const struct nf_hook_ops *ops, struct xt_table **res); +void arpt_unregister_table(struct net *net, struct xt_table *table, + const struct nf_hook_ops *ops); +extern unsigned int arpt_do_table(struct sk_buff *skb, + const struct nf_hook_state *state, + struct xt_table *table); + +#ifdef CONFIG_COMPAT +#include + +struct compat_arpt_entry { + struct arpt_arp arp; + __u16 target_offset; + __u16 next_offset; + compat_uint_t comefrom; + struct compat_xt_counters counters; + unsigned char elems[0]; +}; + +static inline struct xt_entry_target * +compat_arpt_get_target(struct compat_arpt_entry *e) +{ + return (void *)e + e->target_offset; +} + +#endif /* CONFIG_COMPAT */ +#endif /* _ARPTABLES_H */ diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h new file mode 100644 index 000000000..fa0686500 --- /dev/null +++ b/include/linux/netfilter_bridge.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_BRIDGE_NETFILTER_H +#define __LINUX_BRIDGE_NETFILTER_H + +#include +#include + +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) + +int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb); + +static inline void br_drop_fake_rtable(struct sk_buff *skb) +{ + struct dst_entry *dst = skb_dst(skb); + + if (dst && (dst->flags & DST_FAKE_RTABLE)) + skb_dst_drop(skb); +} + +static inline int nf_bridge_get_physinif(const struct sk_buff *skb) +{ + struct nf_bridge_info *nf_bridge; + + if (skb->nf_bridge == NULL) + return 0; + + nf_bridge = skb->nf_bridge; + return nf_bridge->physindev ? nf_bridge->physindev->ifindex : 0; +} + +static inline int nf_bridge_get_physoutif(const struct sk_buff *skb) +{ + struct nf_bridge_info *nf_bridge; + + if (skb->nf_bridge == NULL) + return 0; + + nf_bridge = skb->nf_bridge; + return nf_bridge->physoutdev ? nf_bridge->physoutdev->ifindex : 0; +} + +static inline struct net_device * +nf_bridge_get_physindev(const struct sk_buff *skb) +{ + return skb->nf_bridge ? skb->nf_bridge->physindev : NULL; +} + +static inline struct net_device * +nf_bridge_get_physoutdev(const struct sk_buff *skb) +{ + return skb->nf_bridge ? skb->nf_bridge->physoutdev : NULL; +} + +static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb) +{ + return skb->nf_bridge && skb->nf_bridge->in_prerouting; +} +#else +#define br_drop_fake_rtable(skb) do { } while (0) +static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb) +{ + return false; +} +#endif /* CONFIG_BRIDGE_NETFILTER */ + +#endif diff --git a/include/linux/netfilter_bridge/ebt_802_3.h b/include/linux/netfilter_bridge/ebt_802_3.h new file mode 100644 index 000000000..c6147f9c0 --- /dev/null +++ b/include/linux/netfilter_bridge/ebt_802_3.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_BRIDGE_EBT_802_3_H +#define __LINUX_BRIDGE_EBT_802_3_H + +#include +#include + +static inline struct ebt_802_3_hdr *ebt_802_3_hdr(const struct sk_buff *skb) +{ + return (struct ebt_802_3_hdr *)skb_mac_header(skb); +} +#endif diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h new file mode 100644 index 000000000..c6935be7c --- /dev/null +++ b/include/linux/netfilter_bridge/ebtables.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ebtables + * + * Authors: + * Bart De Schuymer + * + * ebtables.c,v 2.0, April, 2002 + * + * This code is strongly inspired by the iptables code which is + * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling + */ +#ifndef __LINUX_BRIDGE_EFF_H +#define __LINUX_BRIDGE_EFF_H + +#include +#include +#include + +struct ebt_match { + struct list_head list; + const char name[EBT_FUNCTION_MAXNAMELEN]; + bool (*match)(const struct sk_buff *skb, const struct net_device *in, + const struct net_device *out, const struct xt_match *match, + const void *matchinfo, int offset, unsigned int protoff, + bool *hotdrop); + bool (*checkentry)(const char *table, const void *entry, + const struct xt_match *match, void *matchinfo, + unsigned int hook_mask); + void (*destroy)(const struct xt_match *match, void *matchinfo); + unsigned int matchsize; + u_int8_t revision; + u_int8_t family; + struct module *me; +}; + +struct ebt_watcher { + struct list_head list; + const char name[EBT_FUNCTION_MAXNAMELEN]; + unsigned int (*target)(struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + unsigned int hook_num, const struct xt_target *target, + const void *targinfo); + bool (*checkentry)(const char *table, const void *entry, + const struct xt_target *target, void *targinfo, + unsigned int hook_mask); + void (*destroy)(const struct xt_target *target, void *targinfo); + unsigned int targetsize; + u_int8_t revision; + u_int8_t family; + struct module *me; +}; + +struct ebt_target { + struct list_head list; + const char name[EBT_FUNCTION_MAXNAMELEN]; + /* returns one of the standard EBT_* verdicts */ + unsigned int (*target)(struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + unsigned int hook_num, const struct xt_target *target, + const void *targinfo); + bool (*checkentry)(const char *table, const void *entry, + const struct xt_target *target, void *targinfo, + unsigned int hook_mask); + void (*destroy)(const struct xt_target *target, void *targinfo); + unsigned int targetsize; + u_int8_t revision; + u_int8_t family; + struct module *me; +}; + +/* used for jumping from and into user defined chains (udc) */ +struct ebt_chainstack { + struct ebt_entries *chaininfo; /* pointer to chain data */ + struct ebt_entry *e; /* pointer to entry data */ + unsigned int n; /* n'th entry */ +}; + +struct ebt_table_info { + /* total size of the entries */ + unsigned int entries_size; + unsigned int nentries; + /* pointers to the start of the chains */ + struct ebt_entries *hook_entry[NF_BR_NUMHOOKS]; + /* room to maintain the stack used for jumping from and into udc */ + struct ebt_chainstack **chainstack; + char *entries; + struct ebt_counter counters[0] ____cacheline_aligned; +}; + +struct ebt_table { + struct list_head list; + char name[EBT_TABLE_MAXNAMELEN]; + struct ebt_replace_kernel *table; + unsigned int valid_hooks; + rwlock_t lock; + /* e.g. could be the table explicitly only allows certain + * matches, targets, ... 0 == let it in */ + int (*check)(const struct ebt_table_info *info, + unsigned int valid_hooks); + /* the data used by the kernel */ + struct ebt_table_info *private; + struct module *me; +}; + +#define EBT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) & \ + ~(__alignof__(struct _xt_align)-1)) +extern int ebt_register_table(struct net *net, + const struct ebt_table *table, + const struct nf_hook_ops *ops, + struct ebt_table **res); +extern void ebt_unregister_table(struct net *net, struct ebt_table *table, + const struct nf_hook_ops *); +extern unsigned int ebt_do_table(struct sk_buff *skb, + const struct nf_hook_state *state, + struct ebt_table *table); + +/* True if the hook mask denotes that the rule is in a base chain, + * used in the check() functions */ +#define BASE_CHAIN (par->hook_mask & (1 << NF_BR_NUMHOOKS)) +/* Clear the bit in the hook mask that tells if the rule is on a base chain */ +#define CLEAR_BASE_CHAIN_BIT (par->hook_mask &= ~(1 << NF_BR_NUMHOOKS)) + +static inline bool ebt_invalid_target(int target) +{ + return (target < -NUM_STANDARD_TARGETS || target >= 0); +} + +#endif diff --git a/include/linux/netfilter_defs.h b/include/linux/netfilter_defs.h new file mode 100644 index 000000000..8dddfb151 --- /dev/null +++ b/include/linux/netfilter_defs.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_NETFILTER_CORE_H_ +#define __LINUX_NETFILTER_CORE_H_ + +#include + +/* in/out/forward only */ +#define NF_ARP_NUMHOOKS 3 + +/* max hook is NF_DN_ROUTE (6), also see uapi/linux/netfilter_decnet.h */ +#define NF_DN_NUMHOOKS 7 + +#if IS_ENABLED(CONFIG_DECNET) +/* Largest hook number + 1, see uapi/linux/netfilter_decnet.h */ +#define NF_MAX_HOOKS NF_DN_NUMHOOKS +#else +#define NF_MAX_HOOKS NF_INET_NUMHOOKS +#endif + +#endif diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h new file mode 100644 index 000000000..a13774be2 --- /dev/null +++ b/include/linux/netfilter_ingress.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _NETFILTER_INGRESS_H_ +#define _NETFILTER_INGRESS_H_ + +#include +#include + +#ifdef CONFIG_NETFILTER_INGRESS +static inline bool nf_hook_ingress_active(const struct sk_buff *skb) +{ +#ifdef CONFIG_JUMP_LABEL + if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS])) + return false; +#endif + return rcu_access_pointer(skb->dev->nf_hooks_ingress); +} + +/* caller must hold rcu_read_lock */ +static inline int nf_hook_ingress(struct sk_buff *skb) +{ + struct nf_hook_entries *e = rcu_dereference(skb->dev->nf_hooks_ingress); + struct nf_hook_state state; + int ret; + + /* Must recheck the ingress hook head, in the event it became NULL + * after the check in nf_hook_ingress_active evaluated to true. + */ + if (unlikely(!e)) + return 0; + + nf_hook_state_init(&state, NF_NETDEV_INGRESS, + NFPROTO_NETDEV, skb->dev, NULL, NULL, + dev_net(skb->dev), NULL); + ret = nf_hook_slow(skb, &state, e, 0); + if (ret == 0) + return -1; + + return ret; +} + +static inline void nf_hook_ingress_init(struct net_device *dev) +{ + RCU_INIT_POINTER(dev->nf_hooks_ingress, NULL); +} +#else /* CONFIG_NETFILTER_INGRESS */ +static inline int nf_hook_ingress_active(struct sk_buff *skb) +{ + return 0; +} + +static inline int nf_hook_ingress(struct sk_buff *skb) +{ + return 0; +} + +static inline void nf_hook_ingress_init(struct net_device *dev) {} +#endif /* CONFIG_NETFILTER_INGRESS */ +#endif /* _NETFILTER_INGRESS_H_ */ diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h new file mode 100644 index 000000000..45ff1330b --- /dev/null +++ b/include/linux/netfilter_ipv4.h @@ -0,0 +1,47 @@ +/* IPv4-specific defines for netfilter. + * (C)1998 Rusty Russell -- This code is GPL. + */ +#ifndef __LINUX_IP_NETFILTER_H +#define __LINUX_IP_NETFILTER_H + +#include + +/* Extra routing may needed on local out, as the QUEUE target never returns + * control to the table. + */ +struct ip_rt_info { + __be32 daddr; + __be32 saddr; + u_int8_t tos; + u_int32_t mark; +}; + +int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned addr_type); + +struct nf_queue_entry; + +#ifdef CONFIG_INET +__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, u_int8_t protocol); +int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl, + bool strict); +int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry); +#else +static inline __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, u_int8_t protocol) +{ + return 0; +} +static inline int nf_ip_route(struct net *net, struct dst_entry **dst, + struct flowi *fl, bool strict) +{ + return -EOPNOTSUPP; +} +static inline int nf_ip_reroute(struct sk_buff *skb, + const struct nf_queue_entry *entry) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_INET */ + +#endif /*__LINUX_IP_NETFILTER_H*/ diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h new file mode 100644 index 000000000..d026e63a5 --- /dev/null +++ b/include/linux/netfilter_ipv4/ip_tables.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * 25-Jul-1998 Major changes to allow for ip chain table + * + * 3-Jan-2000 Named tables to allow packet selection for different uses. + */ + +/* + * Format of an IP firewall descriptor + * + * src, dst, src_mask, dst_mask are always stored in network byte order. + * flags are stored in host byte order (of course). + * Port numbers are stored in HOST byte order. + */ +#ifndef _IPTABLES_H +#define _IPTABLES_H + +#include +#include +#include +#include + +#include +#include + +extern void ipt_init(void) __init; + +int ipt_register_table(struct net *net, const struct xt_table *table, + const struct ipt_replace *repl, + const struct nf_hook_ops *ops, struct xt_table **res); +void ipt_unregister_table(struct net *net, struct xt_table *table, + const struct nf_hook_ops *ops); + +/* Standard entry. */ +struct ipt_standard { + struct ipt_entry entry; + struct xt_standard_target target; +}; + +struct ipt_error { + struct ipt_entry entry; + struct xt_error_target target; +}; + +#define IPT_ENTRY_INIT(__size) \ +{ \ + .target_offset = sizeof(struct ipt_entry), \ + .next_offset = (__size), \ +} + +#define IPT_STANDARD_INIT(__verdict) \ +{ \ + .entry = IPT_ENTRY_INIT(sizeof(struct ipt_standard)), \ + .target = XT_TARGET_INIT(XT_STANDARD_TARGET, \ + sizeof(struct xt_standard_target)), \ + .target.verdict = -(__verdict) - 1, \ +} + +#define IPT_ERROR_INIT \ +{ \ + .entry = IPT_ENTRY_INIT(sizeof(struct ipt_error)), \ + .target = XT_TARGET_INIT(XT_ERROR_TARGET, \ + sizeof(struct xt_error_target)), \ + .target.errorname = "ERROR", \ +} + +extern void *ipt_alloc_initial_table(const struct xt_table *); +extern unsigned int ipt_do_table(struct sk_buff *skb, + const struct nf_hook_state *state, + struct xt_table *table); + +#ifdef CONFIG_COMPAT +#include + +struct compat_ipt_entry { + struct ipt_ip ip; + compat_uint_t nfcache; + __u16 target_offset; + __u16 next_offset; + compat_uint_t comefrom; + struct compat_xt_counters counters; + unsigned char elems[0]; +}; + +/* Helper functions */ +static inline struct xt_entry_target * +compat_ipt_get_target(struct compat_ipt_entry *e) +{ + return (void *)e + e->target_offset; +} + +#endif /* CONFIG_COMPAT */ +#endif /* _IPTABLES_H */ diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h new file mode 100644 index 000000000..47a2de582 --- /dev/null +++ b/include/linux/netfilter_ipv6.h @@ -0,0 +1,58 @@ +/* IPv6-specific defines for netfilter. + * (C)1998 Rusty Russell -- This code is GPL. + * (C)1999 David Jeffery + * this header was blatantly ripped from netfilter_ipv4.h + * it's amazing what adding a bunch of 6s can do =8^) + */ +#ifndef __LINUX_IP6_NETFILTER_H +#define __LINUX_IP6_NETFILTER_H + +#include + +/* Extra routing may needed on local out, as the QUEUE target never returns + * control to the table. + */ +struct ip6_rt_info { + struct in6_addr daddr; + struct in6_addr saddr; + u_int32_t mark; +}; + +struct nf_queue_entry; + +/* + * Hook functions for ipv6 to allow xt_* modules to be built-in even + * if IPv6 is a module. + */ +struct nf_ipv6_ops { + int (*chk_addr)(struct net *net, const struct in6_addr *addr, + const struct net_device *dev, int strict); + void (*route_input)(struct sk_buff *skb); + int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, + int (*output)(struct net *, struct sock *, struct sk_buff *)); + int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl, + bool strict); + int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry); +}; + +#ifdef CONFIG_NETFILTER +int ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb); +__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, u_int8_t protocol); + +int ipv6_netfilter_init(void); +void ipv6_netfilter_fini(void); + +extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops; +static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void) +{ + return rcu_dereference(nf_ipv6_ops); +} + +#else /* CONFIG_NETFILTER */ +static inline int ipv6_netfilter_init(void) { return 0; } +static inline void ipv6_netfilter_fini(void) { return; } +static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void) { return NULL; } +#endif /* CONFIG_NETFILTER */ + +#endif /*__LINUX_IP6_NETFILTER_H*/ diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h new file mode 100644 index 000000000..99cbfd3ad --- /dev/null +++ b/include/linux/netfilter_ipv6/ip6_tables.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * 25-Jul-1998 Major changes to allow for ip chain table + * + * 3-Jan-2000 Named tables to allow packet selection for different uses. + */ + +/* + * Format of an IP6 firewall descriptor + * + * src, dst, src_mask, dst_mask are always stored in network byte order. + * flags are stored in host byte order (of course). + * Port numbers are stored in HOST byte order. + */ +#ifndef _IP6_TABLES_H +#define _IP6_TABLES_H + +#include +#include +#include +#include + +#include +#include + +extern void ip6t_init(void) __init; + +extern void *ip6t_alloc_initial_table(const struct xt_table *); +int ip6t_register_table(struct net *net, const struct xt_table *table, + const struct ip6t_replace *repl, + const struct nf_hook_ops *ops, struct xt_table **res); +void ip6t_unregister_table(struct net *net, struct xt_table *table, + const struct nf_hook_ops *ops); +extern unsigned int ip6t_do_table(struct sk_buff *skb, + const struct nf_hook_state *state, + struct xt_table *table); + +/* Check for an extension */ +static inline int +ip6t_ext_hdr(u8 nexthdr) +{ return (nexthdr == IPPROTO_HOPOPTS) || + (nexthdr == IPPROTO_ROUTING) || + (nexthdr == IPPROTO_FRAGMENT) || + (nexthdr == IPPROTO_ESP) || + (nexthdr == IPPROTO_AH) || + (nexthdr == IPPROTO_NONE) || + (nexthdr == IPPROTO_DSTOPTS); +} + +#ifdef CONFIG_COMPAT +#include + +struct compat_ip6t_entry { + struct ip6t_ip6 ipv6; + compat_uint_t nfcache; + __u16 target_offset; + __u16 next_offset; + compat_uint_t comefrom; + struct compat_xt_counters counters; + unsigned char elems[0]; +}; + +static inline struct xt_entry_target * +compat_ip6t_get_target(struct compat_ip6t_entry *e) +{ + return (void *)e + e->target_offset; +} + +#endif /* CONFIG_COMPAT */ +#endif /* _IP6_TABLES_H */ diff --git a/include/linux/netlink.h b/include/linux/netlink.h new file mode 100644 index 000000000..71f121b66 --- /dev/null +++ b/include/linux/netlink.h @@ -0,0 +1,232 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_NETLINK_H +#define __LINUX_NETLINK_H + + +#include +#include +#include +#include +#include + +struct net; + +static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb) +{ + return (struct nlmsghdr *)skb->data; +} + +enum netlink_skb_flags { + NETLINK_SKB_DST = 0x8, /* Dst set in sendto or sendmsg */ +}; + +struct netlink_skb_parms { + struct scm_creds creds; /* Skb credentials */ + __u32 portid; + __u32 dst_group; + __u32 flags; + struct sock *sk; + bool nsid_is_set; + int nsid; +}; + +#define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb)) +#define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds) + + +extern void netlink_table_grab(void); +extern void netlink_table_ungrab(void); + +#define NL_CFG_F_NONROOT_RECV (1 << 0) +#define NL_CFG_F_NONROOT_SEND (1 << 1) + +/* optional Netlink kernel configuration parameters */ +struct netlink_kernel_cfg { + unsigned int groups; + unsigned int flags; + void (*input)(struct sk_buff *skb); + struct mutex *cb_mutex; + int (*bind)(struct net *net, int group); + void (*unbind)(struct net *net, int group); + bool (*compare)(struct net *net, struct sock *sk); +}; + +extern struct sock *__netlink_kernel_create(struct net *net, int unit, + struct module *module, + struct netlink_kernel_cfg *cfg); +static inline struct sock * +netlink_kernel_create(struct net *net, int unit, struct netlink_kernel_cfg *cfg) +{ + return __netlink_kernel_create(net, unit, THIS_MODULE, cfg); +} + +/* this can be increased when necessary - don't expose to userland */ +#define NETLINK_MAX_COOKIE_LEN 20 + +/** + * struct netlink_ext_ack - netlink extended ACK report struct + * @_msg: message string to report - don't access directly, use + * %NL_SET_ERR_MSG + * @bad_attr: attribute with error + * @cookie: cookie data to return to userspace (for success) + * @cookie_len: actual cookie data length + */ +struct netlink_ext_ack { + const char *_msg; + const struct nlattr *bad_attr; + u8 cookie[NETLINK_MAX_COOKIE_LEN]; + u8 cookie_len; +}; + +/* Always use this macro, this allows later putting the + * message into a separate section or such for things + * like translation or listing all possible messages. + * Currently string formatting is not supported (due + * to the lack of an output buffer.) + */ +#define NL_SET_ERR_MSG(extack, msg) do { \ + static const char __msg[] = msg; \ + struct netlink_ext_ack *__extack = (extack); \ + \ + if (__extack) \ + __extack->_msg = __msg; \ +} while (0) + +#define NL_SET_ERR_MSG_MOD(extack, msg) \ + NL_SET_ERR_MSG((extack), KBUILD_MODNAME ": " msg) + +#define NL_SET_BAD_ATTR(extack, attr) do { \ + if ((extack)) \ + (extack)->bad_attr = (attr); \ +} while (0) + +#define NL_SET_ERR_MSG_ATTR(extack, attr, msg) do { \ + static const char __msg[] = msg; \ + struct netlink_ext_ack *__extack = (extack); \ + \ + if (__extack) { \ + __extack->_msg = __msg; \ + __extack->bad_attr = (attr); \ + } \ +} while (0) + +extern void netlink_kernel_release(struct sock *sk); +extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups); +extern int netlink_change_ngroups(struct sock *sk, unsigned int groups); +extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group); +extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err, + const struct netlink_ext_ack *extack); +extern int netlink_has_listeners(struct sock *sk, unsigned int group); + +extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock); +extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid, + __u32 group, gfp_t allocation); +extern int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, + __u32 portid, __u32 group, gfp_t allocation, + int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data), + void *filter_data); +extern int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code); +extern int netlink_register_notifier(struct notifier_block *nb); +extern int netlink_unregister_notifier(struct notifier_block *nb); + +/* finegrained unicast helpers: */ +struct sock *netlink_getsockbyfilp(struct file *filp); +int netlink_attachskb(struct sock *sk, struct sk_buff *skb, + long *timeo, struct sock *ssk); +void netlink_detachskb(struct sock *sk, struct sk_buff *skb); +int netlink_sendskb(struct sock *sk, struct sk_buff *skb); + +static inline struct sk_buff * +netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask) +{ + struct sk_buff *nskb; + + nskb = skb_clone(skb, gfp_mask); + if (!nskb) + return NULL; + + /* This is a large skb, set destructor callback to release head */ + if (is_vmalloc_addr(skb->head)) + nskb->destructor = skb->destructor; + + return nskb; +} + +/* + * skb should fit one page. This choice is good for headerless malloc. + * But we should limit to 8K so that userspace does not have to + * use enormous buffer sizes on recvmsg() calls just to avoid + * MSG_TRUNC when PAGE_SIZE is very large. + */ +#if PAGE_SIZE < 8192UL +#define NLMSG_GOODSIZE SKB_WITH_OVERHEAD(PAGE_SIZE) +#else +#define NLMSG_GOODSIZE SKB_WITH_OVERHEAD(8192UL) +#endif + +#define NLMSG_DEFAULT_SIZE (NLMSG_GOODSIZE - NLMSG_HDRLEN) + + +struct netlink_callback { + struct sk_buff *skb; + const struct nlmsghdr *nlh; + int (*dump)(struct sk_buff * skb, + struct netlink_callback *cb); + int (*done)(struct netlink_callback *cb); + void *data; + /* the module that dump function belong to */ + struct module *module; + u16 family; + u16 min_dump_alloc; + unsigned int prev_seq, seq; + long args[6]; +}; + +struct netlink_notify { + struct net *net; + u32 portid; + int protocol; +}; + +struct nlmsghdr * +__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags); + +struct netlink_dump_control { + int (*start)(struct netlink_callback *); + int (*dump)(struct sk_buff *skb, struct netlink_callback *); + int (*done)(struct netlink_callback *); + void *data; + struct module *module; + u16 min_dump_alloc; +}; + +extern int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, + const struct nlmsghdr *nlh, + struct netlink_dump_control *control); +static inline int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, + const struct nlmsghdr *nlh, + struct netlink_dump_control *control) +{ + if (!control->module) + control->module = THIS_MODULE; + + return __netlink_dump_start(ssk, skb, nlh, control); +} + +struct netlink_tap { + struct net_device *dev; + struct module *module; + struct list_head list; +}; + +extern int netlink_add_tap(struct netlink_tap *nt); +extern int netlink_remove_tap(struct netlink_tap *nt); + +bool __netlink_ns_capable(const struct netlink_skb_parms *nsp, + struct user_namespace *ns, int cap); +bool netlink_ns_capable(const struct sk_buff *skb, + struct user_namespace *ns, int cap); +bool netlink_capable(const struct sk_buff *skb, int cap); +bool netlink_net_capable(const struct sk_buff *skb, int cap); + +#endif /* __LINUX_NETLINK_H */ diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h new file mode 100644 index 000000000..3ef82d3a7 --- /dev/null +++ b/include/linux/netpoll.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common code for low-level network console, dump, and debugger code + * + * Derived from netconsole, kgdb-over-ethernet, and netdump patches + */ + +#ifndef _LINUX_NETPOLL_H +#define _LINUX_NETPOLL_H + +#include +#include +#include +#include +#include + +union inet_addr { + __u32 all[4]; + __be32 ip; + __be32 ip6[4]; + struct in_addr in; + struct in6_addr in6; +}; + +struct netpoll { + struct net_device *dev; + char dev_name[IFNAMSIZ]; + const char *name; + + union inet_addr local_ip, remote_ip; + bool ipv6; + u16 local_port, remote_port; + u8 remote_mac[ETH_ALEN]; + + struct work_struct cleanup_work; +}; + +struct netpoll_info { + refcount_t refcnt; + + struct semaphore dev_lock; + + struct sk_buff_head txq; + + struct delayed_work tx_work; + + struct netpoll *netpoll; + struct rcu_head rcu; +}; + +#ifdef CONFIG_NETPOLL +void netpoll_poll_dev(struct net_device *dev); +void netpoll_poll_disable(struct net_device *dev); +void netpoll_poll_enable(struct net_device *dev); +#else +static inline void netpoll_poll_disable(struct net_device *dev) { return; } +static inline void netpoll_poll_enable(struct net_device *dev) { return; } +#endif + +void netpoll_send_udp(struct netpoll *np, const char *msg, int len); +void netpoll_print_options(struct netpoll *np); +int netpoll_parse_options(struct netpoll *np, char *opt); +int __netpoll_setup(struct netpoll *np, struct net_device *ndev); +int netpoll_setup(struct netpoll *np); +void __netpoll_cleanup(struct netpoll *np); +void __netpoll_free_async(struct netpoll *np); +void netpoll_cleanup(struct netpoll *np); +void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, + struct net_device *dev); +static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) +{ + unsigned long flags; + local_irq_save(flags); + netpoll_send_skb_on_dev(np, skb, np->dev); + local_irq_restore(flags); +} + +#ifdef CONFIG_NETPOLL +static inline void *netpoll_poll_lock(struct napi_struct *napi) +{ + struct net_device *dev = napi->dev; + + if (dev && dev->npinfo) { + int owner = smp_processor_id(); + + while (cmpxchg(&napi->poll_owner, -1, owner) != -1) + cpu_relax(); + + return napi; + } + return NULL; +} + +static inline void netpoll_poll_unlock(void *have) +{ + struct napi_struct *napi = have; + + if (napi) + smp_store_release(&napi->poll_owner, -1); +} + +static inline bool netpoll_tx_running(struct net_device *dev) +{ + return irqs_disabled(); +} + +#else +static inline void *netpoll_poll_lock(struct napi_struct *napi) +{ + return NULL; +} +static inline void netpoll_poll_unlock(void *have) +{ +} +static inline void netpoll_netdev_init(struct net_device *dev) +{ +} +static inline bool netpoll_tx_running(struct net_device *dev) +{ + return false; +} +#endif + +#endif diff --git a/include/linux/nfs.h b/include/linux/nfs.h new file mode 100644 index 000000000..0dc7ad38a --- /dev/null +++ b/include/linux/nfs.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * NFS protocol definitions + * + * This file contains constants mostly for Version 2 of the protocol, + * but also has a couple of NFSv3 bits in (notably the error codes). + */ +#ifndef _LINUX_NFS_H +#define _LINUX_NFS_H + +#include +#include +#include + +/* + * This is the kernel NFS client file handle representation + */ +#define NFS_MAXFHSIZE 128 +struct nfs_fh { + unsigned short size; + unsigned char data[NFS_MAXFHSIZE]; +}; + +/* + * Returns a zero iff the size and data fields match. + * Checks only "size" bytes in the data field. + */ +static inline int nfs_compare_fh(const struct nfs_fh *a, const struct nfs_fh *b) +{ + return a->size != b->size || memcmp(a->data, b->data, a->size) != 0; +} + +static inline void nfs_copy_fh(struct nfs_fh *target, const struct nfs_fh *source) +{ + target->size = source->size; + memcpy(target->data, source->data, source->size); +} + + +/* + * This is really a general kernel constant, but since nothing like + * this is defined in the kernel headers, I have to do it here. + */ +#define NFS_OFFSET_MAX ((__s64)((~(__u64)0) >> 1)) + + +enum nfs3_stable_how { + NFS_UNSTABLE = 0, + NFS_DATA_SYNC = 1, + NFS_FILE_SYNC = 2, + + /* used by direct.c to mark verf as invalid */ + NFS_INVALID_STABLE_HOW = -1 +}; +#endif /* _LINUX_NFS_H */ diff --git a/include/linux/nfs3.h b/include/linux/nfs3.h new file mode 100644 index 000000000..404b8f724 --- /dev/null +++ b/include/linux/nfs3.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * NFSv3 protocol definitions + */ +#ifndef _LINUX_NFS3_H +#define _LINUX_NFS3_H + +#include + + +/* Number of 32bit words in post_op_attr */ +#define NFS3_POST_OP_ATTR_WORDS 22 + +#endif /* _LINUX_NFS3_H */ diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h new file mode 100644 index 000000000..1b06f0b28 --- /dev/null +++ b/include/linux/nfs4.h @@ -0,0 +1,675 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/nfs4.h + * + * NFSv4 protocol definitions. + * + * Copyright (c) 2002 The Regents of the University of Michigan. + * All rights reserved. + * + * Kendrick Smith + * Andy Adamson + */ +#ifndef _LINUX_NFS4_H +#define _LINUX_NFS4_H + +#include +#include +#include + +enum nfs4_acl_whotype { + NFS4_ACL_WHO_NAMED = 0, + NFS4_ACL_WHO_OWNER, + NFS4_ACL_WHO_GROUP, + NFS4_ACL_WHO_EVERYONE, +}; + +struct nfs4_ace { + uint32_t type; + uint32_t flag; + uint32_t access_mask; + int whotype; + union { + kuid_t who_uid; + kgid_t who_gid; + }; +}; + +struct nfs4_acl { + uint32_t naces; + struct nfs4_ace aces[0]; +}; + +#define NFS4_MAXLABELLEN 2048 + +struct nfs4_label { + uint32_t lfs; + uint32_t pi; + u32 len; + char *label; +}; + +typedef struct { char data[NFS4_VERIFIER_SIZE]; } nfs4_verifier; + +struct nfs4_stateid_struct { + union { + char data[NFS4_STATEID_SIZE]; + struct { + __be32 seqid; + char other[NFS4_STATEID_OTHER_SIZE]; + } __attribute__ ((packed)); + }; + + enum { + NFS4_INVALID_STATEID_TYPE = 0, + NFS4_SPECIAL_STATEID_TYPE, + NFS4_OPEN_STATEID_TYPE, + NFS4_LOCK_STATEID_TYPE, + NFS4_DELEGATION_STATEID_TYPE, + NFS4_LAYOUT_STATEID_TYPE, + NFS4_PNFS_DS_STATEID_TYPE, + NFS4_REVOKED_STATEID_TYPE, + } type; +}; + +typedef struct nfs4_stateid_struct nfs4_stateid; + +enum nfs_opnum4 { + OP_ACCESS = 3, + OP_CLOSE = 4, + OP_COMMIT = 5, + OP_CREATE = 6, + OP_DELEGPURGE = 7, + OP_DELEGRETURN = 8, + OP_GETATTR = 9, + OP_GETFH = 10, + OP_LINK = 11, + OP_LOCK = 12, + OP_LOCKT = 13, + OP_LOCKU = 14, + OP_LOOKUP = 15, + OP_LOOKUPP = 16, + OP_NVERIFY = 17, + OP_OPEN = 18, + OP_OPENATTR = 19, + OP_OPEN_CONFIRM = 20, + OP_OPEN_DOWNGRADE = 21, + OP_PUTFH = 22, + OP_PUTPUBFH = 23, + OP_PUTROOTFH = 24, + OP_READ = 25, + OP_READDIR = 26, + OP_READLINK = 27, + OP_REMOVE = 28, + OP_RENAME = 29, + OP_RENEW = 30, + OP_RESTOREFH = 31, + OP_SAVEFH = 32, + OP_SECINFO = 33, + OP_SETATTR = 34, + OP_SETCLIENTID = 35, + OP_SETCLIENTID_CONFIRM = 36, + OP_VERIFY = 37, + OP_WRITE = 38, + OP_RELEASE_LOCKOWNER = 39, + + /* nfs41 */ + OP_BACKCHANNEL_CTL = 40, + OP_BIND_CONN_TO_SESSION = 41, + OP_EXCHANGE_ID = 42, + OP_CREATE_SESSION = 43, + OP_DESTROY_SESSION = 44, + OP_FREE_STATEID = 45, + OP_GET_DIR_DELEGATION = 46, + OP_GETDEVICEINFO = 47, + OP_GETDEVICELIST = 48, + OP_LAYOUTCOMMIT = 49, + OP_LAYOUTGET = 50, + OP_LAYOUTRETURN = 51, + OP_SECINFO_NO_NAME = 52, + OP_SEQUENCE = 53, + OP_SET_SSV = 54, + OP_TEST_STATEID = 55, + OP_WANT_DELEGATION = 56, + OP_DESTROY_CLIENTID = 57, + OP_RECLAIM_COMPLETE = 58, + + /* nfs42 */ + OP_ALLOCATE = 59, + OP_COPY = 60, + OP_COPY_NOTIFY = 61, + OP_DEALLOCATE = 62, + OP_IO_ADVISE = 63, + OP_LAYOUTERROR = 64, + OP_LAYOUTSTATS = 65, + OP_OFFLOAD_CANCEL = 66, + OP_OFFLOAD_STATUS = 67, + OP_READ_PLUS = 68, + OP_SEEK = 69, + OP_WRITE_SAME = 70, + OP_CLONE = 71, + + OP_ILLEGAL = 10044, +}; + +/*Defining first and last NFS4 operations implemented. +Needs to be updated if more operations are defined in future.*/ + +#define FIRST_NFS4_OP OP_ACCESS +#define LAST_NFS40_OP OP_RELEASE_LOCKOWNER +#define LAST_NFS41_OP OP_RECLAIM_COMPLETE +#define LAST_NFS42_OP OP_CLONE +#define LAST_NFS4_OP LAST_NFS42_OP + +enum nfsstat4 { + NFS4_OK = 0, + NFS4ERR_PERM = 1, + NFS4ERR_NOENT = 2, + NFS4ERR_IO = 5, + NFS4ERR_NXIO = 6, + NFS4ERR_ACCESS = 13, + NFS4ERR_EXIST = 17, + NFS4ERR_XDEV = 18, + /* Unused/reserved 19 */ + NFS4ERR_NOTDIR = 20, + NFS4ERR_ISDIR = 21, + NFS4ERR_INVAL = 22, + NFS4ERR_FBIG = 27, + NFS4ERR_NOSPC = 28, + NFS4ERR_ROFS = 30, + NFS4ERR_MLINK = 31, + NFS4ERR_NAMETOOLONG = 63, + NFS4ERR_NOTEMPTY = 66, + NFS4ERR_DQUOT = 69, + NFS4ERR_STALE = 70, + NFS4ERR_BADHANDLE = 10001, + NFS4ERR_BAD_COOKIE = 10003, + NFS4ERR_NOTSUPP = 10004, + NFS4ERR_TOOSMALL = 10005, + NFS4ERR_SERVERFAULT = 10006, + NFS4ERR_BADTYPE = 10007, + NFS4ERR_DELAY = 10008, + NFS4ERR_SAME = 10009, + NFS4ERR_DENIED = 10010, + NFS4ERR_EXPIRED = 10011, + NFS4ERR_LOCKED = 10012, + NFS4ERR_GRACE = 10013, + NFS4ERR_FHEXPIRED = 10014, + NFS4ERR_SHARE_DENIED = 10015, + NFS4ERR_WRONGSEC = 10016, + NFS4ERR_CLID_INUSE = 10017, + NFS4ERR_RESOURCE = 10018, + NFS4ERR_MOVED = 10019, + NFS4ERR_NOFILEHANDLE = 10020, + NFS4ERR_MINOR_VERS_MISMATCH = 10021, + NFS4ERR_STALE_CLIENTID = 10022, + NFS4ERR_STALE_STATEID = 10023, + NFS4ERR_OLD_STATEID = 10024, + NFS4ERR_BAD_STATEID = 10025, + NFS4ERR_BAD_SEQID = 10026, + NFS4ERR_NOT_SAME = 10027, + NFS4ERR_LOCK_RANGE = 10028, + NFS4ERR_SYMLINK = 10029, + NFS4ERR_RESTOREFH = 10030, + NFS4ERR_LEASE_MOVED = 10031, + NFS4ERR_ATTRNOTSUPP = 10032, + NFS4ERR_NO_GRACE = 10033, + NFS4ERR_RECLAIM_BAD = 10034, + NFS4ERR_RECLAIM_CONFLICT = 10035, + NFS4ERR_BADXDR = 10036, + NFS4ERR_LOCKS_HELD = 10037, + NFS4ERR_OPENMODE = 10038, + NFS4ERR_BADOWNER = 10039, + NFS4ERR_BADCHAR = 10040, + NFS4ERR_BADNAME = 10041, + NFS4ERR_BAD_RANGE = 10042, + NFS4ERR_LOCK_NOTSUPP = 10043, + NFS4ERR_OP_ILLEGAL = 10044, + NFS4ERR_DEADLOCK = 10045, + NFS4ERR_FILE_OPEN = 10046, + NFS4ERR_ADMIN_REVOKED = 10047, + NFS4ERR_CB_PATH_DOWN = 10048, + + /* nfs41 */ + NFS4ERR_BADIOMODE = 10049, + NFS4ERR_BADLAYOUT = 10050, + NFS4ERR_BAD_SESSION_DIGEST = 10051, + NFS4ERR_BADSESSION = 10052, + NFS4ERR_BADSLOT = 10053, + NFS4ERR_COMPLETE_ALREADY = 10054, + NFS4ERR_CONN_NOT_BOUND_TO_SESSION = 10055, + NFS4ERR_DELEG_ALREADY_WANTED = 10056, + NFS4ERR_BACK_CHAN_BUSY = 10057, /* backchan reqs outstanding */ + NFS4ERR_LAYOUTTRYLATER = 10058, + NFS4ERR_LAYOUTUNAVAILABLE = 10059, + NFS4ERR_NOMATCHING_LAYOUT = 10060, + NFS4ERR_RECALLCONFLICT = 10061, + NFS4ERR_UNKNOWN_LAYOUTTYPE = 10062, + NFS4ERR_SEQ_MISORDERED = 10063, /* unexpected seq.id in req */ + NFS4ERR_SEQUENCE_POS = 10064, /* [CB_]SEQ. op not 1st op */ + NFS4ERR_REQ_TOO_BIG = 10065, /* request too big */ + NFS4ERR_REP_TOO_BIG = 10066, /* reply too big */ + NFS4ERR_REP_TOO_BIG_TO_CACHE = 10067, /* rep. not all cached */ + NFS4ERR_RETRY_UNCACHED_REP = 10068, /* retry & rep. uncached */ + NFS4ERR_UNSAFE_COMPOUND = 10069, /* retry/recovery too hard */ + NFS4ERR_TOO_MANY_OPS = 10070, /* too many ops in [CB_]COMP */ + NFS4ERR_OP_NOT_IN_SESSION = 10071, /* op needs [CB_]SEQ. op */ + NFS4ERR_HASH_ALG_UNSUPP = 10072, /* hash alg. not supp. */ + /* Error 10073 is unused. */ + NFS4ERR_CLIENTID_BUSY = 10074, /* clientid has state */ + NFS4ERR_PNFS_IO_HOLE = 10075, /* IO to _SPARSE file hole */ + NFS4ERR_SEQ_FALSE_RETRY = 10076, /* retry not original */ + NFS4ERR_BAD_HIGH_SLOT = 10077, /* sequence arg bad */ + NFS4ERR_DEADSESSION = 10078, /* persistent session dead */ + NFS4ERR_ENCR_ALG_UNSUPP = 10079, /* SSV alg mismatch */ + NFS4ERR_PNFS_NO_LAYOUT = 10080, /* direct I/O with no layout */ + NFS4ERR_NOT_ONLY_OP = 10081, /* bad compound */ + NFS4ERR_WRONG_CRED = 10082, /* permissions:state change */ + NFS4ERR_WRONG_TYPE = 10083, /* current operation mismatch */ + NFS4ERR_DIRDELEG_UNAVAIL = 10084, /* no directory delegation */ + NFS4ERR_REJECT_DELEG = 10085, /* on callback */ + NFS4ERR_RETURNCONFLICT = 10086, /* outstanding layoutreturn */ + NFS4ERR_DELEG_REVOKED = 10087, /* deleg./layout revoked */ + + /* nfs42 */ + NFS4ERR_PARTNER_NOTSUPP = 10088, + NFS4ERR_PARTNER_NO_AUTH = 10089, + NFS4ERR_UNION_NOTSUPP = 10090, + NFS4ERR_OFFLOAD_DENIED = 10091, + NFS4ERR_WRONG_LFS = 10092, + NFS4ERR_BADLABEL = 10093, + NFS4ERR_OFFLOAD_NO_REQS = 10094, +}; + +static inline bool seqid_mutating_err(u32 err) +{ + /* See RFC 7530, section 9.1.7 */ + switch (err) { + case NFS4ERR_STALE_CLIENTID: + case NFS4ERR_STALE_STATEID: + case NFS4ERR_BAD_STATEID: + case NFS4ERR_BAD_SEQID: + case NFS4ERR_BADXDR: + case NFS4ERR_RESOURCE: + case NFS4ERR_NOFILEHANDLE: + case NFS4ERR_MOVED: + return false; + }; + return true; +} + +/* + * Note: NF4BAD is not actually part of the protocol; it is just used + * internally by nfsd. + */ +enum nfs_ftype4 { + NF4BAD = 0, + NF4REG = 1, /* Regular File */ + NF4DIR = 2, /* Directory */ + NF4BLK = 3, /* Special File - block device */ + NF4CHR = 4, /* Special File - character device */ + NF4LNK = 5, /* Symbolic Link */ + NF4SOCK = 6, /* Special File - socket */ + NF4FIFO = 7, /* Special File - fifo */ + NF4ATTRDIR = 8, /* Attribute Directory */ + NF4NAMEDATTR = 9 /* Named Attribute */ +}; + +enum open_claim_type4 { + NFS4_OPEN_CLAIM_NULL = 0, + NFS4_OPEN_CLAIM_PREVIOUS = 1, + NFS4_OPEN_CLAIM_DELEGATE_CUR = 2, + NFS4_OPEN_CLAIM_DELEGATE_PREV = 3, + NFS4_OPEN_CLAIM_FH = 4, /* 4.1 */ + NFS4_OPEN_CLAIM_DELEG_CUR_FH = 5, /* 4.1 */ + NFS4_OPEN_CLAIM_DELEG_PREV_FH = 6, /* 4.1 */ +}; + +enum opentype4 { + NFS4_OPEN_NOCREATE = 0, + NFS4_OPEN_CREATE = 1 +}; + +enum createmode4 { + NFS4_CREATE_UNCHECKED = 0, + NFS4_CREATE_GUARDED = 1, + NFS4_CREATE_EXCLUSIVE = 2, + /* + * New to NFSv4.1. If session is persistent, + * GUARDED4 MUST be used. Otherwise, use + * EXCLUSIVE4_1 instead of EXCLUSIVE4. + */ + NFS4_CREATE_EXCLUSIVE4_1 = 3 +}; + +enum limit_by4 { + NFS4_LIMIT_SIZE = 1, + NFS4_LIMIT_BLOCKS = 2 +}; + +enum open_delegation_type4 { + NFS4_OPEN_DELEGATE_NONE = 0, + NFS4_OPEN_DELEGATE_READ = 1, + NFS4_OPEN_DELEGATE_WRITE = 2, + NFS4_OPEN_DELEGATE_NONE_EXT = 3, /* 4.1 */ +}; + +enum why_no_delegation4 { /* new to v4.1 */ + WND4_NOT_WANTED = 0, + WND4_CONTENTION = 1, + WND4_RESOURCE = 2, + WND4_NOT_SUPP_FTYPE = 3, + WND4_WRITE_DELEG_NOT_SUPP_FTYPE = 4, + WND4_NOT_SUPP_UPGRADE = 5, + WND4_NOT_SUPP_DOWNGRADE = 6, + WND4_CANCELLED = 7, + WND4_IS_DIR = 8, +}; + +enum lock_type4 { + NFS4_UNLOCK_LT = 0, + NFS4_READ_LT = 1, + NFS4_WRITE_LT = 2, + NFS4_READW_LT = 3, + NFS4_WRITEW_LT = 4 +}; + +enum change_attr_type4 { + NFS4_CHANGE_TYPE_IS_MONOTONIC_INCR = 0, + NFS4_CHANGE_TYPE_IS_VERSION_COUNTER = 1, + NFS4_CHANGE_TYPE_IS_VERSION_COUNTER_NOPNFS = 2, + NFS4_CHANGE_TYPE_IS_TIME_METADATA = 3, + NFS4_CHANGE_TYPE_IS_UNDEFINED = 4 +}; + +/* Mandatory Attributes */ +#define FATTR4_WORD0_SUPPORTED_ATTRS (1UL << 0) +#define FATTR4_WORD0_TYPE (1UL << 1) +#define FATTR4_WORD0_FH_EXPIRE_TYPE (1UL << 2) +#define FATTR4_WORD0_CHANGE (1UL << 3) +#define FATTR4_WORD0_SIZE (1UL << 4) +#define FATTR4_WORD0_LINK_SUPPORT (1UL << 5) +#define FATTR4_WORD0_SYMLINK_SUPPORT (1UL << 6) +#define FATTR4_WORD0_NAMED_ATTR (1UL << 7) +#define FATTR4_WORD0_FSID (1UL << 8) +#define FATTR4_WORD0_UNIQUE_HANDLES (1UL << 9) +#define FATTR4_WORD0_LEASE_TIME (1UL << 10) +#define FATTR4_WORD0_RDATTR_ERROR (1UL << 11) +/* Mandatory in NFSv4.1 */ +#define FATTR4_WORD2_SUPPATTR_EXCLCREAT (1UL << 11) + +/* Recommended Attributes */ +#define FATTR4_WORD0_ACL (1UL << 12) +#define FATTR4_WORD0_ACLSUPPORT (1UL << 13) +#define FATTR4_WORD0_ARCHIVE (1UL << 14) +#define FATTR4_WORD0_CANSETTIME (1UL << 15) +#define FATTR4_WORD0_CASE_INSENSITIVE (1UL << 16) +#define FATTR4_WORD0_CASE_PRESERVING (1UL << 17) +#define FATTR4_WORD0_CHOWN_RESTRICTED (1UL << 18) +#define FATTR4_WORD0_FILEHANDLE (1UL << 19) +#define FATTR4_WORD0_FILEID (1UL << 20) +#define FATTR4_WORD0_FILES_AVAIL (1UL << 21) +#define FATTR4_WORD0_FILES_FREE (1UL << 22) +#define FATTR4_WORD0_FILES_TOTAL (1UL << 23) +#define FATTR4_WORD0_FS_LOCATIONS (1UL << 24) +#define FATTR4_WORD0_HIDDEN (1UL << 25) +#define FATTR4_WORD0_HOMOGENEOUS (1UL << 26) +#define FATTR4_WORD0_MAXFILESIZE (1UL << 27) +#define FATTR4_WORD0_MAXLINK (1UL << 28) +#define FATTR4_WORD0_MAXNAME (1UL << 29) +#define FATTR4_WORD0_MAXREAD (1UL << 30) +#define FATTR4_WORD0_MAXWRITE (1UL << 31) +#define FATTR4_WORD1_MIMETYPE (1UL << 0) +#define FATTR4_WORD1_MODE (1UL << 1) +#define FATTR4_WORD1_NO_TRUNC (1UL << 2) +#define FATTR4_WORD1_NUMLINKS (1UL << 3) +#define FATTR4_WORD1_OWNER (1UL << 4) +#define FATTR4_WORD1_OWNER_GROUP (1UL << 5) +#define FATTR4_WORD1_QUOTA_HARD (1UL << 6) +#define FATTR4_WORD1_QUOTA_SOFT (1UL << 7) +#define FATTR4_WORD1_QUOTA_USED (1UL << 8) +#define FATTR4_WORD1_RAWDEV (1UL << 9) +#define FATTR4_WORD1_SPACE_AVAIL (1UL << 10) +#define FATTR4_WORD1_SPACE_FREE (1UL << 11) +#define FATTR4_WORD1_SPACE_TOTAL (1UL << 12) +#define FATTR4_WORD1_SPACE_USED (1UL << 13) +#define FATTR4_WORD1_SYSTEM (1UL << 14) +#define FATTR4_WORD1_TIME_ACCESS (1UL << 15) +#define FATTR4_WORD1_TIME_ACCESS_SET (1UL << 16) +#define FATTR4_WORD1_TIME_BACKUP (1UL << 17) +#define FATTR4_WORD1_TIME_CREATE (1UL << 18) +#define FATTR4_WORD1_TIME_DELTA (1UL << 19) +#define FATTR4_WORD1_TIME_METADATA (1UL << 20) +#define FATTR4_WORD1_TIME_MODIFY (1UL << 21) +#define FATTR4_WORD1_TIME_MODIFY_SET (1UL << 22) +#define FATTR4_WORD1_MOUNTED_ON_FILEID (1UL << 23) +#define FATTR4_WORD1_FS_LAYOUT_TYPES (1UL << 30) +#define FATTR4_WORD2_LAYOUT_TYPES (1UL << 0) +#define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1) +#define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4) +#define FATTR4_WORD2_CLONE_BLKSIZE (1UL << 13) +#define FATTR4_WORD2_CHANGE_ATTR_TYPE (1UL << 15) +#define FATTR4_WORD2_SECURITY_LABEL (1UL << 16) +#define FATTR4_WORD2_MODE_UMASK (1UL << 17) + +/* MDS threshold bitmap bits */ +#define THRESHOLD_RD (1UL << 0) +#define THRESHOLD_WR (1UL << 1) +#define THRESHOLD_RD_IO (1UL << 2) +#define THRESHOLD_WR_IO (1UL << 3) + +#define NFSPROC4_NULL 0 +#define NFSPROC4_COMPOUND 1 +#define NFS4_VERSION 4 +#define NFS4_MINOR_VERSION 0 + +#define NFS4_DEBUG 1 + +/* + * Index of predefined Linux client operations + * + * To ensure that /proc/net/rpc/nfs remains correctly ordered, please + * append only to this enum when adding new client operations. + */ + +enum { + NFSPROC4_CLNT_NULL = 0, /* Unused */ + NFSPROC4_CLNT_READ, + NFSPROC4_CLNT_WRITE, + NFSPROC4_CLNT_COMMIT, + NFSPROC4_CLNT_OPEN, + NFSPROC4_CLNT_OPEN_CONFIRM, + NFSPROC4_CLNT_OPEN_NOATTR, + NFSPROC4_CLNT_OPEN_DOWNGRADE, + NFSPROC4_CLNT_CLOSE, + NFSPROC4_CLNT_SETATTR, + NFSPROC4_CLNT_FSINFO, + NFSPROC4_CLNT_RENEW, + NFSPROC4_CLNT_SETCLIENTID, + NFSPROC4_CLNT_SETCLIENTID_CONFIRM, + NFSPROC4_CLNT_LOCK, + NFSPROC4_CLNT_LOCKT, + NFSPROC4_CLNT_LOCKU, + NFSPROC4_CLNT_ACCESS, + NFSPROC4_CLNT_GETATTR, + NFSPROC4_CLNT_LOOKUP, + NFSPROC4_CLNT_LOOKUP_ROOT, + NFSPROC4_CLNT_REMOVE, + NFSPROC4_CLNT_RENAME, + NFSPROC4_CLNT_LINK, + NFSPROC4_CLNT_SYMLINK, + NFSPROC4_CLNT_CREATE, + NFSPROC4_CLNT_PATHCONF, + NFSPROC4_CLNT_STATFS, + NFSPROC4_CLNT_READLINK, + NFSPROC4_CLNT_READDIR, + NFSPROC4_CLNT_SERVER_CAPS, + NFSPROC4_CLNT_DELEGRETURN, + NFSPROC4_CLNT_GETACL, + NFSPROC4_CLNT_SETACL, + NFSPROC4_CLNT_FS_LOCATIONS, + NFSPROC4_CLNT_RELEASE_LOCKOWNER, + NFSPROC4_CLNT_SECINFO, + NFSPROC4_CLNT_FSID_PRESENT, + + NFSPROC4_CLNT_EXCHANGE_ID, + NFSPROC4_CLNT_CREATE_SESSION, + NFSPROC4_CLNT_DESTROY_SESSION, + NFSPROC4_CLNT_SEQUENCE, + NFSPROC4_CLNT_GET_LEASE_TIME, + NFSPROC4_CLNT_RECLAIM_COMPLETE, + NFSPROC4_CLNT_LAYOUTGET, + NFSPROC4_CLNT_GETDEVICEINFO, + NFSPROC4_CLNT_LAYOUTCOMMIT, + NFSPROC4_CLNT_LAYOUTRETURN, + NFSPROC4_CLNT_SECINFO_NO_NAME, + NFSPROC4_CLNT_TEST_STATEID, + NFSPROC4_CLNT_FREE_STATEID, + NFSPROC4_CLNT_GETDEVICELIST, + NFSPROC4_CLNT_BIND_CONN_TO_SESSION, + NFSPROC4_CLNT_DESTROY_CLIENTID, + + NFSPROC4_CLNT_SEEK, + NFSPROC4_CLNT_ALLOCATE, + NFSPROC4_CLNT_DEALLOCATE, + NFSPROC4_CLNT_LAYOUTSTATS, + NFSPROC4_CLNT_CLONE, + NFSPROC4_CLNT_COPY, + NFSPROC4_CLNT_OFFLOAD_CANCEL, + + NFSPROC4_CLNT_LOOKUPP, +}; + +/* nfs41 types */ +struct nfs4_sessionid { + unsigned char data[NFS4_MAX_SESSIONID_LEN]; +}; + +/* Create Session Flags */ +#define SESSION4_PERSIST 0x001 +#define SESSION4_BACK_CHAN 0x002 +#define SESSION4_RDMA 0x004 + +#define SESSION4_FLAG_MASK_A 0x007 + +enum state_protect_how4 { + SP4_NONE = 0, + SP4_MACH_CRED = 1, + SP4_SSV = 2 +}; + +enum pnfs_layouttype { + LAYOUT_NFSV4_1_FILES = 1, + LAYOUT_OSD2_OBJECTS = 2, + LAYOUT_BLOCK_VOLUME = 3, + LAYOUT_FLEX_FILES = 4, + LAYOUT_SCSI = 5, + LAYOUT_TYPE_MAX +}; + +/* used for both layout return and recall */ +enum pnfs_layoutreturn_type { + RETURN_FILE = 1, + RETURN_FSID = 2, + RETURN_ALL = 3 +}; + +enum pnfs_iomode { + IOMODE_READ = 1, + IOMODE_RW = 2, + IOMODE_ANY = 3, +}; + +enum pnfs_notify_deviceid_type4 { + NOTIFY_DEVICEID4_CHANGE = 1 << 1, + NOTIFY_DEVICEID4_DELETE = 1 << 2, +}; + +enum pnfs_block_volume_type { + PNFS_BLOCK_VOLUME_SIMPLE = 0, + PNFS_BLOCK_VOLUME_SLICE = 1, + PNFS_BLOCK_VOLUME_CONCAT = 2, + PNFS_BLOCK_VOLUME_STRIPE = 3, + PNFS_BLOCK_VOLUME_SCSI = 4, +}; + +enum pnfs_block_extent_state { + PNFS_BLOCK_READWRITE_DATA = 0, + PNFS_BLOCK_READ_DATA = 1, + PNFS_BLOCK_INVALID_DATA = 2, + PNFS_BLOCK_NONE_DATA = 3, +}; + +/* on the wire size of a block layout extent */ +#define PNFS_BLOCK_EXTENT_SIZE \ + (7 * sizeof(__be32) + NFS4_DEVICEID4_SIZE) + +/* on the wire size of a scsi commit range */ +#define PNFS_SCSI_RANGE_SIZE \ + (4 * sizeof(__be32)) + +enum scsi_code_set { + PS_CODE_SET_BINARY = 1, + PS_CODE_SET_ASCII = 2, + PS_CODE_SET_UTF8 = 3 +}; + +enum scsi_designator_type { + PS_DESIGNATOR_T10 = 1, + PS_DESIGNATOR_EUI64 = 2, + PS_DESIGNATOR_NAA = 3, + PS_DESIGNATOR_NAME = 8 +}; + +#define NFL4_UFLG_MASK 0x0000003F +#define NFL4_UFLG_DENSE 0x00000001 +#define NFL4_UFLG_COMMIT_THRU_MDS 0x00000002 +#define NFL4_UFLG_STRIPE_UNIT_SIZE_MASK 0xFFFFFFC0 + +/* Encoded in the loh_body field of type layouthint4 */ +enum filelayout_hint_care4 { + NFLH4_CARE_DENSE = NFL4_UFLG_DENSE, + NFLH4_CARE_COMMIT_THRU_MDS = NFL4_UFLG_COMMIT_THRU_MDS, + NFLH4_CARE_STRIPE_UNIT_SIZE = 0x00000040, + NFLH4_CARE_STRIPE_COUNT = 0x00000080 +}; + +#define NFS4_DEVICEID4_SIZE 16 + +struct nfs4_deviceid { + char data[NFS4_DEVICEID4_SIZE]; +}; + +enum data_content4 { + NFS4_CONTENT_DATA = 0, + NFS4_CONTENT_HOLE = 1, +}; + +enum pnfs_update_layout_reason { + PNFS_UPDATE_LAYOUT_UNKNOWN = 0, + PNFS_UPDATE_LAYOUT_NO_PNFS, + PNFS_UPDATE_LAYOUT_RD_ZEROLEN, + PNFS_UPDATE_LAYOUT_MDSTHRESH, + PNFS_UPDATE_LAYOUT_NOMEM, + PNFS_UPDATE_LAYOUT_BULK_RECALL, + PNFS_UPDATE_LAYOUT_IO_TEST_FAIL, + PNFS_UPDATE_LAYOUT_FOUND_CACHED, + PNFS_UPDATE_LAYOUT_RETURN, + PNFS_UPDATE_LAYOUT_RETRY, + PNFS_UPDATE_LAYOUT_BLOCKED, + PNFS_UPDATE_LAYOUT_INVALID_OPEN, + PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET, +}; + +#define NFS4_OP_MAP_NUM_LONGS \ + DIV_ROUND_UP(LAST_NFS4_OP, 8 * sizeof(unsigned long)) +#define NFS4_OP_MAP_NUM_WORDS \ + (NFS4_OP_MAP_NUM_LONGS * sizeof(unsigned long) / sizeof(u32)) +struct nfs4_op_map { + union { + unsigned long longs[NFS4_OP_MAP_NUM_LONGS]; + u32 words[NFS4_OP_MAP_NUM_WORDS]; + } u; +}; + +#endif diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h new file mode 100644 index 000000000..8ea7ceed8 --- /dev/null +++ b/include/linux/nfs_fs.h @@ -0,0 +1,589 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/nfs_fs.h + * + * Copyright (C) 1992 Rick Sladkey + * + * OS-specific nfs filesystem definitions and declarations + */ +#ifndef _LINUX_NFS_FS_H +#define _LINUX_NFS_FS_H + +#include + + +/* + * Enable dprintk() debugging support for nfs client. + */ +#ifdef CONFIG_NFS_DEBUG +# define NFS_DEBUG +#endif + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +/* + * These are the default flags for swap requests + */ +#define NFS_RPC_SWAPFLAGS (RPC_TASK_SWAPPER|RPC_TASK_ROOTCREDS) + +/* + * NFSv3/v4 Access mode cache entry + */ +struct nfs_access_entry { + struct rb_node rb_node; + struct list_head lru; + struct rpc_cred * cred; + __u32 mask; + struct rcu_head rcu_head; +}; + +struct nfs_lock_context { + refcount_t count; + struct list_head list; + struct nfs_open_context *open_context; + fl_owner_t lockowner; + atomic_t io_count; +}; + +struct nfs4_state; +struct nfs_open_context { + struct nfs_lock_context lock_context; + fl_owner_t flock_owner; + struct dentry *dentry; + struct rpc_cred *cred; + struct nfs4_state *state; + fmode_t mode; + + unsigned long flags; +#define NFS_CONTEXT_ERROR_WRITE (0) +#define NFS_CONTEXT_RESEND_WRITES (1) +#define NFS_CONTEXT_BAD (2) +#define NFS_CONTEXT_UNLOCK (3) +#define NFS_CONTEXT_FILE_OPEN (4) + int error; + + struct list_head list; + struct nfs4_threshold *mdsthreshold; +}; + +struct nfs_open_dir_context { + struct list_head list; + struct rpc_cred *cred; + unsigned long attr_gencount; + __u64 dir_cookie; + __u64 dup_cookie; + signed char duped; +}; + +/* + * NFSv4 delegation + */ +struct nfs_delegation; + +struct posix_acl; + +/* + * nfs fs inode data in memory + */ +struct nfs_inode { + /* + * The 64bit 'inode number' + */ + __u64 fileid; + + /* + * NFS file handle + */ + struct nfs_fh fh; + + /* + * Various flags + */ + unsigned long flags; /* atomic bit ops */ + unsigned long cache_validity; /* bit mask */ + + /* + * read_cache_jiffies is when we started read-caching this inode. + * attrtimeo is for how long the cached information is assumed + * to be valid. A successful attribute revalidation doubles + * attrtimeo (up to acregmax/acdirmax), a failure resets it to + * acregmin/acdirmin. + * + * We need to revalidate the cached attrs for this inode if + * + * jiffies - read_cache_jiffies >= attrtimeo + * + * Please note the comparison is greater than or equal + * so that zero timeout values can be specified. + */ + unsigned long read_cache_jiffies; + unsigned long attrtimeo; + unsigned long attrtimeo_timestamp; + + unsigned long attr_gencount; + /* "Generation counter" for the attribute cache. This is + * bumped whenever we update the metadata on the + * server. + */ + unsigned long cache_change_attribute; + + struct rb_root access_cache; + struct list_head access_cache_entry_lru; + struct list_head access_cache_inode_lru; + + /* + * This is the cookie verifier used for NFSv3 readdir + * operations + */ + __be32 cookieverf[2]; + + atomic_long_t nrequests; + struct nfs_mds_commit_info commit_info; + + /* Open contexts for shared mmap writes */ + struct list_head open_files; + + /* Readers: in-flight sillydelete RPC calls */ + /* Writers: rmdir */ + struct rw_semaphore rmdir_sem; + struct mutex commit_mutex; + +#if IS_ENABLED(CONFIG_NFS_V4) + struct nfs4_cached_acl *nfs4_acl; + /* NFSv4 state */ + struct list_head open_states; + struct nfs_delegation __rcu *delegation; + struct rw_semaphore rwsem; + + /* pNFS layout information */ + struct pnfs_layout_hdr *layout; +#endif /* CONFIG_NFS_V4*/ + /* how many bytes have been written/read and how many bytes queued up */ + __u64 write_io; + __u64 read_io; +#ifdef CONFIG_NFS_FSCACHE + struct fscache_cookie *fscache; +#endif + struct inode vfs_inode; +}; + +struct nfs4_copy_state { + struct list_head copies; + nfs4_stateid stateid; + struct completion completion; + uint64_t count; + struct nfs_writeverf verf; + int error; + int flags; + struct nfs4_state *parent_state; +}; + +/* + * Access bit flags + */ +#define NFS_ACCESS_READ 0x0001 +#define NFS_ACCESS_LOOKUP 0x0002 +#define NFS_ACCESS_MODIFY 0x0004 +#define NFS_ACCESS_EXTEND 0x0008 +#define NFS_ACCESS_DELETE 0x0010 +#define NFS_ACCESS_EXECUTE 0x0020 + +/* + * Cache validity bit flags + */ +#define NFS_INO_INVALID_DATA BIT(1) /* cached data is invalid */ +#define NFS_INO_INVALID_ATIME BIT(2) /* cached atime is invalid */ +#define NFS_INO_INVALID_ACCESS BIT(3) /* cached access cred invalid */ +#define NFS_INO_INVALID_ACL BIT(4) /* cached acls are invalid */ +#define NFS_INO_REVAL_PAGECACHE BIT(5) /* must revalidate pagecache */ +#define NFS_INO_REVAL_FORCED BIT(6) /* force revalidation ignoring a delegation */ +#define NFS_INO_INVALID_LABEL BIT(7) /* cached label is invalid */ +#define NFS_INO_INVALID_CHANGE BIT(8) /* cached change is invalid */ +#define NFS_INO_INVALID_CTIME BIT(9) /* cached ctime is invalid */ +#define NFS_INO_INVALID_MTIME BIT(10) /* cached mtime is invalid */ +#define NFS_INO_INVALID_SIZE BIT(11) /* cached size is invalid */ +#define NFS_INO_INVALID_OTHER BIT(12) /* other attrs are invalid */ + +#define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \ + | NFS_INO_INVALID_CTIME \ + | NFS_INO_INVALID_MTIME \ + | NFS_INO_INVALID_SIZE \ + | NFS_INO_INVALID_OTHER) /* inode metadata is invalid */ + +/* + * Bit offsets in flags field + */ +#define NFS_INO_ADVISE_RDPLUS (0) /* advise readdirplus */ +#define NFS_INO_STALE (1) /* possible stale inode */ +#define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */ +#define NFS_INO_INVALIDATING (3) /* inode is being invalidated */ +#define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */ +#define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */ +#define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */ +#define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */ +#define NFS_INO_LAYOUTSTATS (11) /* layoutstats inflight */ +#define NFS_INO_ODIRECT (12) /* I/O setting is O_DIRECT */ + +static inline struct nfs_inode *NFS_I(const struct inode *inode) +{ + return container_of(inode, struct nfs_inode, vfs_inode); +} + +static inline struct nfs_server *NFS_SB(const struct super_block *s) +{ + return (struct nfs_server *)(s->s_fs_info); +} + +static inline struct nfs_fh *NFS_FH(const struct inode *inode) +{ + return &NFS_I(inode)->fh; +} + +static inline struct nfs_server *NFS_SERVER(const struct inode *inode) +{ + return NFS_SB(inode->i_sb); +} + +static inline struct rpc_clnt *NFS_CLIENT(const struct inode *inode) +{ + return NFS_SERVER(inode)->client; +} + +static inline const struct nfs_rpc_ops *NFS_PROTO(const struct inode *inode) +{ + return NFS_SERVER(inode)->nfs_client->rpc_ops; +} + +static inline unsigned NFS_MINATTRTIMEO(const struct inode *inode) +{ + struct nfs_server *nfss = NFS_SERVER(inode); + return S_ISDIR(inode->i_mode) ? nfss->acdirmin : nfss->acregmin; +} + +static inline unsigned NFS_MAXATTRTIMEO(const struct inode *inode) +{ + struct nfs_server *nfss = NFS_SERVER(inode); + return S_ISDIR(inode->i_mode) ? nfss->acdirmax : nfss->acregmax; +} + +static inline int NFS_STALE(const struct inode *inode) +{ + return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags); +} + +static inline struct fscache_cookie *nfs_i_fscache(struct inode *inode) +{ +#ifdef CONFIG_NFS_FSCACHE + return NFS_I(inode)->fscache; +#else + return NULL; +#endif +} + +static inline __u64 NFS_FILEID(const struct inode *inode) +{ + return NFS_I(inode)->fileid; +} + +static inline void set_nfs_fileid(struct inode *inode, __u64 fileid) +{ + NFS_I(inode)->fileid = fileid; +} + +static inline void nfs_mark_for_revalidate(struct inode *inode) +{ + struct nfs_inode *nfsi = NFS_I(inode); + + spin_lock(&inode->i_lock); + nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE + | NFS_INO_INVALID_ACCESS + | NFS_INO_INVALID_ACL + | NFS_INO_INVALID_CHANGE + | NFS_INO_INVALID_CTIME; + if (S_ISDIR(inode->i_mode)) + nfsi->cache_validity |= NFS_INO_INVALID_DATA; + spin_unlock(&inode->i_lock); +} + +static inline int nfs_server_capable(struct inode *inode, int cap) +{ + return NFS_SERVER(inode)->caps & cap; +} + +static inline void nfs_set_verifier(struct dentry * dentry, unsigned long verf) +{ + dentry->d_time = verf; +} + +/** + * nfs_save_change_attribute - Returns the inode attribute change cookie + * @dir - pointer to parent directory inode + * The "change attribute" is updated every time we finish an operation + * that will result in a metadata change on the server. + */ +static inline unsigned long nfs_save_change_attribute(struct inode *dir) +{ + return NFS_I(dir)->cache_change_attribute; +} + +/** + * nfs_verify_change_attribute - Detects NFS remote directory changes + * @dir - pointer to parent directory inode + * @chattr - previously saved change attribute + * Return "false" if the verifiers doesn't match the change attribute. + * This would usually indicate that the directory contents have changed on + * the server, and that any dentries need revalidating. + */ +static inline int nfs_verify_change_attribute(struct inode *dir, unsigned long chattr) +{ + return chattr == NFS_I(dir)->cache_change_attribute; +} + +/* + * linux/fs/nfs/inode.c + */ +extern int nfs_sync_mapping(struct address_space *mapping); +extern void nfs_zap_mapping(struct inode *inode, struct address_space *mapping); +extern void nfs_zap_caches(struct inode *); +extern void nfs_invalidate_atime(struct inode *); +extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *, + struct nfs_fattr *, struct nfs4_label *); +struct inode *nfs_ilookup(struct super_block *sb, struct nfs_fattr *, struct nfs_fh *); +extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *); +extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr); +extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr); +extern int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr); +extern int nfs_getattr(const struct path *, struct kstat *, u32, unsigned int); +extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *); +extern void nfs_access_set_mask(struct nfs_access_entry *, u32); +extern int nfs_permission(struct inode *, int); +extern int nfs_open(struct inode *, struct file *); +extern int nfs_attribute_cache_expired(struct inode *inode); +extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode); +extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); +extern bool nfs_mapping_need_revalidate_inode(struct inode *inode); +extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping); +extern int nfs_revalidate_mapping_rcu(struct inode *inode); +extern int nfs_setattr(struct dentry *, struct iattr *); +extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, struct nfs_fattr *); +extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr, + struct nfs4_label *label); +extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); +extern void put_nfs_open_context(struct nfs_open_context *ctx); +extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode); +extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode, struct file *filp); +extern void nfs_inode_attach_open_context(struct nfs_open_context *ctx); +extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx); +extern void nfs_file_clear_open_context(struct file *flip); +extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx); +extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx); +extern u64 nfs_compat_user_ino64(u64 fileid); +extern void nfs_fattr_init(struct nfs_fattr *fattr); +extern void nfs_fattr_set_barrier(struct nfs_fattr *fattr); +extern unsigned long nfs_inc_attr_generation_counter(void); + +extern struct nfs_fattr *nfs_alloc_fattr(void); + +static inline void nfs_free_fattr(const struct nfs_fattr *fattr) +{ + kfree(fattr); +} + +extern struct nfs_fh *nfs_alloc_fhandle(void); + +static inline void nfs_free_fhandle(const struct nfs_fh *fh) +{ + kfree(fh); +} + +#ifdef NFS_DEBUG +extern u32 _nfs_display_fhandle_hash(const struct nfs_fh *fh); +static inline u32 nfs_display_fhandle_hash(const struct nfs_fh *fh) +{ + return _nfs_display_fhandle_hash(fh); +} +extern void _nfs_display_fhandle(const struct nfs_fh *fh, const char *caption); +#define nfs_display_fhandle(fh, caption) \ + do { \ + if (unlikely(nfs_debug & NFSDBG_FACILITY)) \ + _nfs_display_fhandle(fh, caption); \ + } while (0) +#else +static inline u32 nfs_display_fhandle_hash(const struct nfs_fh *fh) +{ + return 0; +} +static inline void nfs_display_fhandle(const struct nfs_fh *fh, + const char *caption) +{ +} +#endif + +/* + * linux/fs/nfs/nfsroot.c + */ +extern int nfs_root_data(char **root_device, char **root_data); /*__init*/ +/* linux/net/ipv4/ipconfig.c: trims ip addr off front of name, too. */ +extern __be32 root_nfs_parse_addr(char *name); /*__init*/ + +/* + * linux/fs/nfs/file.c + */ +extern const struct file_operations nfs_file_operations; +#if IS_ENABLED(CONFIG_NFS_V4) +extern const struct file_operations nfs4_file_operations; +#endif /* CONFIG_NFS_V4 */ +extern const struct address_space_operations nfs_file_aops; +extern const struct address_space_operations nfs_dir_aops; + +static inline struct nfs_open_context *nfs_file_open_context(struct file *filp) +{ + return filp->private_data; +} + +static inline struct rpc_cred *nfs_file_cred(struct file *file) +{ + if (file != NULL) { + struct nfs_open_context *ctx = + nfs_file_open_context(file); + if (ctx) + return ctx->cred; + } + return NULL; +} + +/* + * linux/fs/nfs/direct.c + */ +extern ssize_t nfs_direct_IO(struct kiocb *, struct iov_iter *); +ssize_t nfs_file_direct_read(struct kiocb *iocb, + struct iov_iter *iter, bool swap); +ssize_t nfs_file_direct_write(struct kiocb *iocb, + struct iov_iter *iter, bool swap); + +/* + * linux/fs/nfs/dir.c + */ +extern const struct file_operations nfs_dir_operations; +extern const struct dentry_operations nfs_dentry_operations; + +extern void nfs_force_lookup_revalidate(struct inode *dir); +extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, + struct nfs_fattr *fattr, struct nfs4_label *label); +extern int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags); +extern void nfs_access_zap_cache(struct inode *inode); + +/* + * linux/fs/nfs/symlink.c + */ +extern const struct inode_operations nfs_symlink_inode_operations; + +/* + * linux/fs/nfs/sysctl.c + */ +#ifdef CONFIG_SYSCTL +extern int nfs_register_sysctl(void); +extern void nfs_unregister_sysctl(void); +#else +#define nfs_register_sysctl() 0 +#define nfs_unregister_sysctl() do { } while(0) +#endif + +/* + * linux/fs/nfs/namespace.c + */ +extern const struct inode_operations nfs_mountpoint_inode_operations; +extern const struct inode_operations nfs_referral_inode_operations; +extern int nfs_mountpoint_expiry_timeout; +extern void nfs_release_automount_timer(void); + +/* + * linux/fs/nfs/unlink.c + */ +extern void nfs_complete_unlink(struct dentry *dentry, struct inode *); + +/* + * linux/fs/nfs/write.c + */ +extern int nfs_congestion_kb; +extern int nfs_writepage(struct page *page, struct writeback_control *wbc); +extern int nfs_writepages(struct address_space *, struct writeback_control *); +extern int nfs_flush_incompatible(struct file *file, struct page *page); +extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int); + +/* + * Try to write back everything synchronously (but check the + * return value!) + */ +extern int nfs_sync_inode(struct inode *inode); +extern int nfs_wb_all(struct inode *inode); +extern int nfs_wb_page(struct inode *inode, struct page *page); +extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); +extern int nfs_commit_inode(struct inode *, int); +extern struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail); +extern void nfs_commit_free(struct nfs_commit_data *data); + +static inline int +nfs_have_writebacks(struct inode *inode) +{ + return atomic_long_read(&NFS_I(inode)->nrequests) != 0; +} + +/* + * linux/fs/nfs/read.c + */ +extern int nfs_readpage(struct file *, struct page *); +extern int nfs_readpages(struct file *, struct address_space *, + struct list_head *, unsigned); +extern int nfs_readpage_async(struct nfs_open_context *, struct inode *, + struct page *); + +/* + * inline functions + */ + +static inline loff_t nfs_size_to_loff_t(__u64 size) +{ + return min_t(u64, size, OFFSET_MAX); +} + +static inline ino_t +nfs_fileid_to_ino_t(u64 fileid) +{ + ino_t ino = (ino_t) fileid; + if (sizeof(ino_t) < sizeof(u64)) + ino ^= fileid >> (sizeof(u64)-sizeof(ino_t)) * 8; + return ino; +} + +#define NFS_JUKEBOX_RETRY_TIME (5 * HZ) + + +# undef ifdebug +# ifdef NFS_DEBUG +# define ifdebug(fac) if (unlikely(nfs_debug & NFSDBG_##fac)) +# define NFS_IFDEBUG(x) x +# else +# define ifdebug(fac) if (0) +# define NFS_IFDEBUG(x) +# endif +#endif diff --git a/include/linux/nfs_fs_i.h b/include/linux/nfs_fs_i.h new file mode 100644 index 000000000..98f9268fc --- /dev/null +++ b/include/linux/nfs_fs_i.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _NFS_FS_I +#define _NFS_FS_I + +struct nlm_lockowner; + +/* + * NFS lock info + */ +struct nfs_lock_info { + u32 state; + struct nlm_lockowner *owner; + struct list_head list; +}; + +struct nfs4_lock_state; +struct nfs4_lock_info { + struct nfs4_lock_state *owner; +}; + +#endif diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h new file mode 100644 index 000000000..bf39d9c92 --- /dev/null +++ b/include/linux/nfs_fs_sb.h @@ -0,0 +1,262 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _NFS_FS_SB +#define _NFS_FS_SB + +#include +#include +#include +#include +#include +#include + +#include +#include + +struct nfs4_session; +struct nfs_iostats; +struct nlm_host; +struct nfs4_sequence_args; +struct nfs4_sequence_res; +struct nfs_server; +struct nfs4_minor_version_ops; +struct nfs41_server_scope; +struct nfs41_impl_id; + +/* + * The nfs_client identifies our client state to the server. + */ +struct nfs_client { + refcount_t cl_count; + atomic_t cl_mds_count; + int cl_cons_state; /* current construction state (-ve: init error) */ +#define NFS_CS_READY 0 /* ready to be used */ +#define NFS_CS_INITING 1 /* busy initialising */ +#define NFS_CS_SESSION_INITING 2 /* busy initialising session */ + unsigned long cl_res_state; /* NFS resources state */ +#define NFS_CS_CALLBACK 1 /* - callback started */ +#define NFS_CS_IDMAP 2 /* - idmap started */ +#define NFS_CS_RENEWD 3 /* - renewd started */ +#define NFS_CS_STOP_RENEW 4 /* no more state to renew */ +#define NFS_CS_CHECK_LEASE_TIME 5 /* need to check lease time */ + unsigned long cl_flags; /* behavior switches */ +#define NFS_CS_NORESVPORT 0 /* - use ephemeral src port */ +#define NFS_CS_DISCRTRY 1 /* - disconnect on RPC retry */ +#define NFS_CS_MIGRATION 2 /* - transparent state migr */ +#define NFS_CS_INFINITE_SLOTS 3 /* - don't limit TCP slots */ +#define NFS_CS_NO_RETRANS_TIMEOUT 4 /* - Disable retransmit timeouts */ +#define NFS_CS_TSM_POSSIBLE 5 /* - Maybe state migration */ + struct sockaddr_storage cl_addr; /* server identifier */ + size_t cl_addrlen; + char * cl_hostname; /* hostname of server */ + char * cl_acceptor; /* GSSAPI acceptor name */ + struct list_head cl_share_link; /* link in global client list */ + struct list_head cl_superblocks; /* List of nfs_server structs */ + + struct rpc_clnt * cl_rpcclient; + const struct nfs_rpc_ops *rpc_ops; /* NFS protocol vector */ + int cl_proto; /* Network transport protocol */ + struct nfs_subversion * cl_nfs_mod; /* pointer to nfs version module */ + + u32 cl_minorversion;/* NFSv4 minorversion */ + struct rpc_cred *cl_machine_cred; + +#if IS_ENABLED(CONFIG_NFS_V4) + struct list_head cl_ds_clients; /* auth flavor data servers */ + u64 cl_clientid; /* constant */ + nfs4_verifier cl_confirm; /* Clientid verifier */ + unsigned long cl_state; + + spinlock_t cl_lock; + + unsigned long cl_lease_time; + unsigned long cl_last_renewal; + struct delayed_work cl_renewd; + + struct rpc_wait_queue cl_rpcwaitq; + + /* idmapper */ + struct idmap * cl_idmap; + + /* Client owner identifier */ + const char * cl_owner_id; + + u32 cl_cb_ident; /* v4.0 callback identifier */ + const struct nfs4_minor_version_ops *cl_mvops; + unsigned long cl_mig_gen; + + /* NFSv4.0 transport blocking */ + struct nfs4_slot_table *cl_slot_tbl; + + /* The sequence id to use for the next CREATE_SESSION */ + u32 cl_seqid; + /* The flags used for obtaining the clientid during EXCHANGE_ID */ + u32 cl_exchange_flags; + struct nfs4_session *cl_session; /* shared session */ + bool cl_preserve_clid; + struct nfs41_server_owner *cl_serverowner; + struct nfs41_server_scope *cl_serverscope; + struct nfs41_impl_id *cl_implid; + /* nfs 4.1+ state protection modes: */ + unsigned long cl_sp4_flags; +#define NFS_SP4_MACH_CRED_MINIMAL 1 /* Minimal sp4_mach_cred - state ops + * must use machine cred */ +#define NFS_SP4_MACH_CRED_CLEANUP 2 /* CLOSE and LOCKU */ +#define NFS_SP4_MACH_CRED_SECINFO 3 /* SECINFO and SECINFO_NO_NAME */ +#define NFS_SP4_MACH_CRED_STATEID 4 /* TEST_STATEID and FREE_STATEID */ +#define NFS_SP4_MACH_CRED_WRITE 5 /* WRITE */ +#define NFS_SP4_MACH_CRED_COMMIT 6 /* COMMIT */ +#define NFS_SP4_MACH_CRED_PNFS_CLEANUP 7 /* LAYOUTRETURN */ +#if IS_ENABLED(CONFIG_NFS_V4_1) + wait_queue_head_t cl_lock_waitq; +#endif /* CONFIG_NFS_V4_1 */ +#endif /* CONFIG_NFS_V4 */ + + /* Our own IP address, as a null-terminated string. + * This is used to generate the mv0 callback address. + */ + char cl_ipaddr[48]; + +#ifdef CONFIG_NFS_FSCACHE + struct fscache_cookie *fscache; /* client index cache cookie */ +#endif + + struct net *cl_net; + struct list_head pending_cb_stateids; +}; + +/* + * NFS client parameters stored in the superblock. + */ +struct nfs_server { + struct nfs_client * nfs_client; /* shared client and NFS4 state */ + struct list_head client_link; /* List of other nfs_server structs + * that share the same client + */ + struct list_head master_link; /* link in master servers list */ + struct rpc_clnt * client; /* RPC client handle */ + struct rpc_clnt * client_acl; /* ACL RPC client handle */ + struct nlm_host *nlm_host; /* NLM client handle */ + struct nfs_iostats __percpu *io_stats; /* I/O statistics */ + atomic_long_t writeback; /* number of writeback pages */ + int flags; /* various flags */ + unsigned int caps; /* server capabilities */ + unsigned int rsize; /* read size */ + unsigned int rpages; /* read size (in pages) */ + unsigned int wsize; /* write size */ + unsigned int wpages; /* write size (in pages) */ + unsigned int wtmult; /* server disk block size */ + unsigned int dtsize; /* readdir size */ + unsigned short port; /* "port=" setting */ + unsigned int bsize; /* server block size */ + unsigned int acregmin; /* attr cache timeouts */ + unsigned int acregmax; + unsigned int acdirmin; + unsigned int acdirmax; + unsigned int namelen; + unsigned int options; /* extra options enabled by mount */ + unsigned int clone_blksize; /* granularity of a CLONE operation */ +#define NFS_OPTION_FSCACHE 0x00000001 /* - local caching enabled */ +#define NFS_OPTION_MIGRATION 0x00000002 /* - NFSv4 migration enabled */ + + struct nfs_fsid fsid; + __u64 maxfilesize; /* maximum file size */ + struct timespec time_delta; /* smallest time granularity */ + unsigned long mount_time; /* when this fs was mounted */ + struct super_block *super; /* VFS super block */ + dev_t s_dev; /* superblock dev numbers */ + struct nfs_auth_info auth_info; /* parsed auth flavors */ + +#ifdef CONFIG_NFS_FSCACHE + struct nfs_fscache_key *fscache_key; /* unique key for superblock */ + struct fscache_cookie *fscache; /* superblock cookie */ +#endif + + u32 pnfs_blksize; /* layout_blksize attr */ +#if IS_ENABLED(CONFIG_NFS_V4) + u32 attr_bitmask[3];/* V4 bitmask representing the set + of attributes supported on this + filesystem */ + u32 attr_bitmask_nl[3]; + /* V4 bitmask representing the + set of attributes supported + on this filesystem excluding + the label support bit. */ + u32 exclcreat_bitmask[3]; + /* V4 bitmask representing the + set of attributes supported + on this filesystem for the + exclusive create. */ + u32 cache_consistency_bitmask[3]; + /* V4 bitmask representing the subset + of change attribute, size, ctime + and mtime attributes supported by + the server */ + u32 acl_bitmask; /* V4 bitmask representing the ACEs + that are supported on this + filesystem */ + u32 fh_expire_type; /* V4 bitmask representing file + handle volatility type for + this filesystem */ + struct pnfs_layoutdriver_type *pnfs_curr_ld; /* Active layout driver */ + struct rpc_wait_queue roc_rpcwaitq; + void *pnfs_ld_data; /* per mount point data */ + + /* the following fields are protected by nfs_client->cl_lock */ + struct rb_root state_owners; +#endif + struct ida openowner_id; + struct ida lockowner_id; + struct list_head state_owners_lru; + struct list_head layouts; + struct list_head delegations; + struct list_head ss_copies; + + unsigned long mig_gen; + unsigned long mig_status; +#define NFS_MIG_IN_TRANSITION (1) +#define NFS_MIG_FAILED (2) +#define NFS_MIG_TSM_POSSIBLE (3) + + void (*destroy)(struct nfs_server *); + + atomic_t active; /* Keep trace of any activity to this server */ + + /* mountd-related mount options */ + struct sockaddr_storage mountd_address; + size_t mountd_addrlen; + u32 mountd_version; + unsigned short mountd_port; + unsigned short mountd_protocol; + struct rpc_wait_queue uoc_rpcwaitq; +}; + +/* Server capabilities */ +#define NFS_CAP_READDIRPLUS (1U << 0) +#define NFS_CAP_HARDLINKS (1U << 1) +#define NFS_CAP_SYMLINKS (1U << 2) +#define NFS_CAP_ACLS (1U << 3) +#define NFS_CAP_ATOMIC_OPEN (1U << 4) +/* #define NFS_CAP_CHANGE_ATTR (1U << 5) */ +#define NFS_CAP_LGOPEN (1U << 5) +#define NFS_CAP_FILEID (1U << 6) +#define NFS_CAP_MODE (1U << 7) +#define NFS_CAP_NLINK (1U << 8) +#define NFS_CAP_OWNER (1U << 9) +#define NFS_CAP_OWNER_GROUP (1U << 10) +#define NFS_CAP_ATIME (1U << 11) +#define NFS_CAP_CTIME (1U << 12) +#define NFS_CAP_MTIME (1U << 13) +#define NFS_CAP_POSIX_LOCK (1U << 14) +#define NFS_CAP_UIDGID_NOMAP (1U << 15) +#define NFS_CAP_STATEID_NFSV41 (1U << 16) +#define NFS_CAP_ATOMIC_OPEN_V1 (1U << 17) +#define NFS_CAP_SECURITY_LABEL (1U << 18) +#define NFS_CAP_SEEK (1U << 19) +#define NFS_CAP_ALLOCATE (1U << 20) +#define NFS_CAP_DEALLOCATE (1U << 21) +#define NFS_CAP_LAYOUTSTATS (1U << 22) +#define NFS_CAP_CLONE (1U << 23) +#define NFS_CAP_COPY (1U << 24) +#define NFS_CAP_OFFLOAD_CANCEL (1U << 25) + +#endif diff --git a/include/linux/nfs_iostat.h b/include/linux/nfs_iostat.h new file mode 100644 index 000000000..027874c36 --- /dev/null +++ b/include/linux/nfs_iostat.h @@ -0,0 +1,134 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * User-space visible declarations for NFS client per-mount + * point statistics + * + * Copyright (C) 2005, 2006 Chuck Lever + * + * NFS client per-mount statistics provide information about the + * health of the NFS client and the health of each NFS mount point. + * Generally these are not for detailed problem diagnosis, but + * simply to indicate that there is a problem. + * + * These counters are not meant to be human-readable, but are meant + * to be integrated into system monitoring tools such as "sar" and + * "iostat". As such, the counters are sampled by the tools over + * time, and are never zeroed after a file system is mounted. + * Moving averages can be computed by the tools by taking the + * difference between two instantaneous samples and dividing that + * by the time between the samples. + */ + +#ifndef _LINUX_NFS_IOSTAT +#define _LINUX_NFS_IOSTAT + +#define NFS_IOSTAT_VERS "1.1" + +/* + * NFS byte counters + * + * 1. SERVER - the number of payload bytes read from or written + * to the server by the NFS client via an NFS READ or WRITE + * request. + * + * 2. NORMAL - the number of bytes read or written by applications + * via the read(2) and write(2) system call interfaces. + * + * 3. DIRECT - the number of bytes read or written from files + * opened with the O_DIRECT flag. + * + * These counters give a view of the data throughput into and out + * of the NFS client. Comparing the number of bytes requested by + * an application with the number of bytes the client requests from + * the server can provide an indication of client efficiency + * (per-op, cache hits, etc). + * + * These counters can also help characterize which access methods + * are in use. DIRECT by itself shows whether there is any O_DIRECT + * traffic. NORMAL + DIRECT shows how much data is going through + * the system call interface. A large amount of SERVER traffic + * without much NORMAL or DIRECT traffic shows that applications + * are using mapped files. + * + * NFS page counters + * + * These count the number of pages read or written via nfs_readpage(), + * nfs_readpages(), or their write equivalents. + * + * NB: When adding new byte counters, please include the measured + * units in the name of each byte counter to help users of this + * interface determine what exactly is being counted. + */ +enum nfs_stat_bytecounters { + NFSIOS_NORMALREADBYTES = 0, + NFSIOS_NORMALWRITTENBYTES, + NFSIOS_DIRECTREADBYTES, + NFSIOS_DIRECTWRITTENBYTES, + NFSIOS_SERVERREADBYTES, + NFSIOS_SERVERWRITTENBYTES, + NFSIOS_READPAGES, + NFSIOS_WRITEPAGES, + __NFSIOS_BYTESMAX, +}; + +/* + * NFS event counters + * + * These counters provide a low-overhead way of monitoring client + * activity without enabling NFS trace debugging. The counters + * show the rate at which VFS requests are made, and how often the + * client invalidates its data and attribute caches. This allows + * system administrators to monitor such things as how close-to-open + * is working, and answer questions such as "why are there so many + * GETATTR requests on the wire?" + * + * They also count anamolous events such as short reads and writes, + * silly renames due to close-after-delete, and operations that + * change the size of a file (such operations can often be the + * source of data corruption if applications aren't using file + * locking properly). + */ +enum nfs_stat_eventcounters { + NFSIOS_INODEREVALIDATE = 0, + NFSIOS_DENTRYREVALIDATE, + NFSIOS_DATAINVALIDATE, + NFSIOS_ATTRINVALIDATE, + NFSIOS_VFSOPEN, + NFSIOS_VFSLOOKUP, + NFSIOS_VFSACCESS, + NFSIOS_VFSUPDATEPAGE, + NFSIOS_VFSREADPAGE, + NFSIOS_VFSREADPAGES, + NFSIOS_VFSWRITEPAGE, + NFSIOS_VFSWRITEPAGES, + NFSIOS_VFSGETDENTS, + NFSIOS_VFSSETATTR, + NFSIOS_VFSFLUSH, + NFSIOS_VFSFSYNC, + NFSIOS_VFSLOCK, + NFSIOS_VFSRELEASE, + NFSIOS_CONGESTIONWAIT, + NFSIOS_SETATTRTRUNC, + NFSIOS_EXTENDWRITE, + NFSIOS_SILLYRENAME, + NFSIOS_SHORTREAD, + NFSIOS_SHORTWRITE, + NFSIOS_DELAY, + NFSIOS_PNFS_READ, + NFSIOS_PNFS_WRITE, + __NFSIOS_COUNTSMAX, +}; + +/* + * NFS local caching servicing counters + */ +enum nfs_stat_fscachecounters { + NFSIOS_FSCACHE_PAGES_READ_OK, + NFSIOS_FSCACHE_PAGES_READ_FAIL, + NFSIOS_FSCACHE_PAGES_WRITTEN_OK, + NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL, + NFSIOS_FSCACHE_PAGES_UNCACHED, + __NFSIOS_FSCACHEMAX, +}; + +#endif /* _LINUX_NFS_IOSTAT */ diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h new file mode 100644 index 000000000..5162fc153 --- /dev/null +++ b/include/linux/nfs_page.h @@ -0,0 +1,204 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/nfs_page.h + * + * Copyright (C) 2000 Trond Myklebust + * + * NFS page cache wrapper. + */ + +#ifndef _LINUX_NFS_PAGE_H +#define _LINUX_NFS_PAGE_H + + +#include +#include +#include +#include +#include + +#include + +/* + * Valid flags for a dirty buffer + */ +enum { + PG_BUSY = 0, /* nfs_{un}lock_request */ + PG_MAPPED, /* page private set for buffered io */ + PG_CLEAN, /* write succeeded */ + PG_COMMIT_TO_DS, /* used by pnfs layouts */ + PG_INODE_REF, /* extra ref held by inode when in writeback */ + PG_HEADLOCK, /* page group lock of wb_head */ + PG_TEARDOWN, /* page group sync for destroy */ + PG_UNLOCKPAGE, /* page group sync bit in read path */ + PG_UPTODATE, /* page group sync bit in read path */ + PG_WB_END, /* page group sync bit in write path */ + PG_REMOVE, /* page group sync bit in write path */ + PG_CONTENDED1, /* Is someone waiting for a lock? */ + PG_CONTENDED2, /* Is someone waiting for a lock? */ +}; + +struct nfs_inode; +struct nfs_page { + struct list_head wb_list; /* Defines state of page: */ + struct page *wb_page; /* page to read in/write out */ + struct nfs_open_context *wb_context; /* File state context info */ + struct nfs_lock_context *wb_lock_context; /* lock context info */ + pgoff_t wb_index; /* Offset >> PAGE_SHIFT */ + unsigned int wb_offset, /* Offset & ~PAGE_MASK */ + wb_pgbase, /* Start of page data */ + wb_bytes; /* Length of request */ + struct kref wb_kref; /* reference count */ + unsigned long wb_flags; + struct nfs_write_verifier wb_verf; /* Commit cookie */ + struct nfs_page *wb_this_page; /* list of reqs for this page */ + struct nfs_page *wb_head; /* head pointer for req list */ +}; + +struct nfs_pageio_descriptor; +struct nfs_pageio_ops { + void (*pg_init)(struct nfs_pageio_descriptor *, struct nfs_page *); + size_t (*pg_test)(struct nfs_pageio_descriptor *, struct nfs_page *, + struct nfs_page *); + int (*pg_doio)(struct nfs_pageio_descriptor *); + unsigned int (*pg_get_mirror_count)(struct nfs_pageio_descriptor *, + struct nfs_page *); + void (*pg_cleanup)(struct nfs_pageio_descriptor *); +}; + +struct nfs_rw_ops { + struct nfs_pgio_header *(*rw_alloc_header)(void); + void (*rw_free_header)(struct nfs_pgio_header *); + int (*rw_done)(struct rpc_task *, struct nfs_pgio_header *, + struct inode *); + void (*rw_result)(struct rpc_task *, struct nfs_pgio_header *); + void (*rw_initiate)(struct nfs_pgio_header *, struct rpc_message *, + const struct nfs_rpc_ops *, + struct rpc_task_setup *, int); +}; + +struct nfs_pgio_mirror { + struct list_head pg_list; + unsigned long pg_bytes_written; + size_t pg_count; + size_t pg_bsize; + unsigned int pg_base; + unsigned char pg_recoalesce : 1; +}; + +struct nfs_pageio_descriptor { + unsigned char pg_moreio : 1; + struct inode *pg_inode; + const struct nfs_pageio_ops *pg_ops; + const struct nfs_rw_ops *pg_rw_ops; + int pg_ioflags; + int pg_error; + const struct rpc_call_ops *pg_rpc_callops; + const struct nfs_pgio_completion_ops *pg_completion_ops; + struct pnfs_layout_segment *pg_lseg; + struct nfs_io_completion *pg_io_completion; + struct nfs_direct_req *pg_dreq; + unsigned int pg_bsize; /* default bsize for mirrors */ + + u32 pg_mirror_count; + struct nfs_pgio_mirror *pg_mirrors; + struct nfs_pgio_mirror pg_mirrors_static[1]; + struct nfs_pgio_mirror *pg_mirrors_dynamic; + u32 pg_mirror_idx; /* current mirror */ +}; + +/* arbitrarily selected limit to number of mirrors */ +#define NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX 16 + +#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags)) + +extern struct nfs_page *nfs_create_request(struct nfs_open_context *ctx, + struct page *page, + struct nfs_page *last, + unsigned int offset, + unsigned int count); +extern void nfs_release_request(struct nfs_page *); + + +extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc, + struct inode *inode, + const struct nfs_pageio_ops *pg_ops, + const struct nfs_pgio_completion_ops *compl_ops, + const struct nfs_rw_ops *rw_ops, + size_t bsize, + int how); +extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *, + struct nfs_page *); +extern int nfs_pageio_resend(struct nfs_pageio_descriptor *, + struct nfs_pgio_header *); +extern void nfs_pageio_complete(struct nfs_pageio_descriptor *desc); +extern void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t); +extern size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, + struct nfs_page *prev, + struct nfs_page *req); +extern int nfs_wait_on_request(struct nfs_page *); +extern void nfs_unlock_request(struct nfs_page *req); +extern void nfs_unlock_and_release_request(struct nfs_page *); +extern int nfs_page_group_lock(struct nfs_page *); +extern void nfs_page_group_unlock(struct nfs_page *); +extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int); +extern int nfs_page_set_headlock(struct nfs_page *req); +extern void nfs_page_clear_headlock(struct nfs_page *req); +extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *); + +/* + * Lock the page of an asynchronous request + */ +static inline int +nfs_lock_request(struct nfs_page *req) +{ + return !test_and_set_bit(PG_BUSY, &req->wb_flags); +} + +/** + * nfs_list_add_request - Insert a request into a list + * @req: request + * @head: head of list into which to insert the request. + */ +static inline void +nfs_list_add_request(struct nfs_page *req, struct list_head *head) +{ + list_add_tail(&req->wb_list, head); +} + +/** + * nfs_list_move_request - Move a request to a new list + * @req: request + * @head: head of list into which to insert the request. + */ +static inline void +nfs_list_move_request(struct nfs_page *req, struct list_head *head) +{ + list_move_tail(&req->wb_list, head); +} + +/** + * nfs_list_remove_request - Remove a request from its wb_list + * @req: request + */ +static inline void +nfs_list_remove_request(struct nfs_page *req) +{ + if (list_empty(&req->wb_list)) + return; + list_del_init(&req->wb_list); +} + +static inline struct nfs_page * +nfs_list_entry(struct list_head *head) +{ + return list_entry(head, struct nfs_page, wb_list); +} + +static inline +loff_t req_offset(struct nfs_page *req) +{ + return (((loff_t)req->wb_index) << PAGE_SHIFT) + req->wb_offset; +} + +#endif /* _LINUX_NFS_PAGE_H */ diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h new file mode 100644 index 000000000..cab24a127 --- /dev/null +++ b/include/linux/nfs_xdr.h @@ -0,0 +1,1689 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_NFS_XDR_H +#define _LINUX_NFS_XDR_H + +#include +#include + +/* + * To change the maximum rsize and wsize supported by the NFS client, adjust + * NFS_MAX_FILE_IO_SIZE. 64KB is a typical maximum, but some servers can + * support a megabyte or more. The default is left at 4096 bytes, which is + * reasonable for NFS over UDP. + */ +#define NFS_MAX_FILE_IO_SIZE (1048576U) +#define NFS_DEF_FILE_IO_SIZE (4096U) +#define NFS_MIN_FILE_IO_SIZE (1024U) + +struct nfs4_string { + unsigned int len; + char *data; +}; + +struct nfs_fsid { + uint64_t major; + uint64_t minor; +}; + +/* + * Helper for checking equality between 2 fsids. + */ +static inline int nfs_fsid_equal(const struct nfs_fsid *a, const struct nfs_fsid *b) +{ + return a->major == b->major && a->minor == b->minor; +} + +struct nfs4_threshold { + __u32 bm; + __u32 l_type; + __u64 rd_sz; + __u64 wr_sz; + __u64 rd_io_sz; + __u64 wr_io_sz; +}; + +struct nfs_fattr { + unsigned int valid; /* which fields are valid */ + umode_t mode; + __u32 nlink; + kuid_t uid; + kgid_t gid; + dev_t rdev; + __u64 size; + union { + struct { + __u32 blocksize; + __u32 blocks; + } nfs2; + struct { + __u64 used; + } nfs3; + } du; + struct nfs_fsid fsid; + __u64 fileid; + __u64 mounted_on_fileid; + struct timespec atime; + struct timespec mtime; + struct timespec ctime; + __u64 change_attr; /* NFSv4 change attribute */ + __u64 pre_change_attr;/* pre-op NFSv4 change attribute */ + __u64 pre_size; /* pre_op_attr.size */ + struct timespec pre_mtime; /* pre_op_attr.mtime */ + struct timespec pre_ctime; /* pre_op_attr.ctime */ + unsigned long time_start; + unsigned long gencount; + struct nfs4_string *owner_name; + struct nfs4_string *group_name; + struct nfs4_threshold *mdsthreshold; /* pNFS threshold hints */ +}; + +#define NFS_ATTR_FATTR_TYPE (1U << 0) +#define NFS_ATTR_FATTR_MODE (1U << 1) +#define NFS_ATTR_FATTR_NLINK (1U << 2) +#define NFS_ATTR_FATTR_OWNER (1U << 3) +#define NFS_ATTR_FATTR_GROUP (1U << 4) +#define NFS_ATTR_FATTR_RDEV (1U << 5) +#define NFS_ATTR_FATTR_SIZE (1U << 6) +#define NFS_ATTR_FATTR_PRESIZE (1U << 7) +#define NFS_ATTR_FATTR_BLOCKS_USED (1U << 8) +#define NFS_ATTR_FATTR_SPACE_USED (1U << 9) +#define NFS_ATTR_FATTR_FSID (1U << 10) +#define NFS_ATTR_FATTR_FILEID (1U << 11) +#define NFS_ATTR_FATTR_ATIME (1U << 12) +#define NFS_ATTR_FATTR_MTIME (1U << 13) +#define NFS_ATTR_FATTR_CTIME (1U << 14) +#define NFS_ATTR_FATTR_PREMTIME (1U << 15) +#define NFS_ATTR_FATTR_PRECTIME (1U << 16) +#define NFS_ATTR_FATTR_CHANGE (1U << 17) +#define NFS_ATTR_FATTR_PRECHANGE (1U << 18) +#define NFS_ATTR_FATTR_V4_LOCATIONS (1U << 19) +#define NFS_ATTR_FATTR_V4_REFERRAL (1U << 20) +#define NFS_ATTR_FATTR_MOUNTPOINT (1U << 21) +#define NFS_ATTR_FATTR_MOUNTED_ON_FILEID (1U << 22) +#define NFS_ATTR_FATTR_OWNER_NAME (1U << 23) +#define NFS_ATTR_FATTR_GROUP_NAME (1U << 24) +#define NFS_ATTR_FATTR_V4_SECURITY_LABEL (1U << 25) + +#define NFS_ATTR_FATTR (NFS_ATTR_FATTR_TYPE \ + | NFS_ATTR_FATTR_MODE \ + | NFS_ATTR_FATTR_NLINK \ + | NFS_ATTR_FATTR_OWNER \ + | NFS_ATTR_FATTR_GROUP \ + | NFS_ATTR_FATTR_RDEV \ + | NFS_ATTR_FATTR_SIZE \ + | NFS_ATTR_FATTR_FSID \ + | NFS_ATTR_FATTR_FILEID \ + | NFS_ATTR_FATTR_ATIME \ + | NFS_ATTR_FATTR_MTIME \ + | NFS_ATTR_FATTR_CTIME \ + | NFS_ATTR_FATTR_CHANGE) +#define NFS_ATTR_FATTR_V2 (NFS_ATTR_FATTR \ + | NFS_ATTR_FATTR_BLOCKS_USED) +#define NFS_ATTR_FATTR_V3 (NFS_ATTR_FATTR \ + | NFS_ATTR_FATTR_SPACE_USED) +#define NFS_ATTR_FATTR_V4 (NFS_ATTR_FATTR \ + | NFS_ATTR_FATTR_SPACE_USED \ + | NFS_ATTR_FATTR_V4_SECURITY_LABEL) + +/* + * Maximal number of supported layout drivers. + */ +#define NFS_MAX_LAYOUT_TYPES 8 + +/* + * Info on the file system + */ +struct nfs_fsinfo { + struct nfs_fattr *fattr; /* Post-op attributes */ + __u32 rtmax; /* max. read transfer size */ + __u32 rtpref; /* pref. read transfer size */ + __u32 rtmult; /* reads should be multiple of this */ + __u32 wtmax; /* max. write transfer size */ + __u32 wtpref; /* pref. write transfer size */ + __u32 wtmult; /* writes should be multiple of this */ + __u32 dtpref; /* pref. readdir transfer size */ + __u64 maxfilesize; + struct timespec time_delta; /* server time granularity */ + __u32 lease_time; /* in seconds */ + __u32 nlayouttypes; /* number of layouttypes */ + __u32 layouttype[NFS_MAX_LAYOUT_TYPES]; /* supported pnfs layout driver */ + __u32 blksize; /* preferred pnfs io block size */ + __u32 clone_blksize; /* granularity of a CLONE operation */ +}; + +struct nfs_fsstat { + struct nfs_fattr *fattr; /* Post-op attributes */ + __u64 tbytes; /* total size in bytes */ + __u64 fbytes; /* # of free bytes */ + __u64 abytes; /* # of bytes available to user */ + __u64 tfiles; /* # of files */ + __u64 ffiles; /* # of free files */ + __u64 afiles; /* # of files available to user */ +}; + +struct nfs2_fsstat { + __u32 tsize; /* Server transfer size */ + __u32 bsize; /* Filesystem block size */ + __u32 blocks; /* No. of "bsize" blocks on filesystem */ + __u32 bfree; /* No. of free "bsize" blocks */ + __u32 bavail; /* No. of available "bsize" blocks */ +}; + +struct nfs_pathconf { + struct nfs_fattr *fattr; /* Post-op attributes */ + __u32 max_link; /* max # of hard links */ + __u32 max_namelen; /* max name length */ +}; + +struct nfs4_change_info { + u32 atomic; + u64 before; + u64 after; +}; + +struct nfs_seqid; + +/* nfs41 sessions channel attributes */ +struct nfs4_channel_attrs { + u32 max_rqst_sz; + u32 max_resp_sz; + u32 max_resp_sz_cached; + u32 max_ops; + u32 max_reqs; +}; + +struct nfs4_slot; +struct nfs4_sequence_args { + struct nfs4_slot *sa_slot; + u8 sa_cache_this : 1, + sa_privileged : 1; +}; + +struct nfs4_sequence_res { + struct nfs4_slot *sr_slot; /* slot used to send request */ + unsigned long sr_timestamp; + int sr_status; /* sequence operation status */ + u32 sr_status_flags; + u32 sr_highest_slotid; + u32 sr_target_highest_slotid; +}; + +struct nfs4_get_lease_time_args { + struct nfs4_sequence_args la_seq_args; +}; + +struct nfs4_get_lease_time_res { + struct nfs4_sequence_res lr_seq_res; + struct nfs_fsinfo *lr_fsinfo; +}; + +struct xdr_stream; +struct nfs4_xdr_opaque_data; + +struct nfs4_xdr_opaque_ops { + void (*encode)(struct xdr_stream *, const void *args, + const struct nfs4_xdr_opaque_data *); + void (*free)(struct nfs4_xdr_opaque_data *); +}; + +struct nfs4_xdr_opaque_data { + const struct nfs4_xdr_opaque_ops *ops; + void *data; +}; + +#define PNFS_LAYOUT_MAXSIZE 4096 + +struct nfs4_layoutdriver_data { + struct page **pages; + __u32 pglen; + __u32 len; +}; + +struct pnfs_layout_range { + u32 iomode; + u64 offset; + u64 length; +}; + +struct nfs4_layoutget_args { + struct nfs4_sequence_args seq_args; + __u32 type; + struct pnfs_layout_range range; + __u64 minlength; + __u32 maxcount; + struct inode *inode; + struct nfs_open_context *ctx; + nfs4_stateid stateid; + struct nfs4_layoutdriver_data layout; +}; + +struct nfs4_layoutget_res { + struct nfs4_sequence_res seq_res; + int status; + __u32 return_on_close; + struct pnfs_layout_range range; + __u32 type; + nfs4_stateid stateid; + struct nfs4_layoutdriver_data *layoutp; +}; + +struct nfs4_layoutget { + struct nfs4_layoutget_args args; + struct nfs4_layoutget_res res; + struct rpc_cred *cred; + gfp_t gfp_flags; +}; + +struct nfs4_getdeviceinfo_args { + struct nfs4_sequence_args seq_args; + struct pnfs_device *pdev; + __u32 notify_types; +}; + +struct nfs4_getdeviceinfo_res { + struct nfs4_sequence_res seq_res; + struct pnfs_device *pdev; + __u32 notification; +}; + +struct nfs4_layoutcommit_args { + struct nfs4_sequence_args seq_args; + nfs4_stateid stateid; + __u64 lastbytewritten; + struct inode *inode; + const u32 *bitmask; + size_t layoutupdate_len; + struct page *layoutupdate_page; + struct page **layoutupdate_pages; + __be32 *start_p; +}; + +struct nfs4_layoutcommit_res { + struct nfs4_sequence_res seq_res; + struct nfs_fattr *fattr; + const struct nfs_server *server; + int status; +}; + +struct nfs4_layoutcommit_data { + struct rpc_task task; + struct nfs_fattr fattr; + struct list_head lseg_list; + struct rpc_cred *cred; + struct inode *inode; + struct nfs4_layoutcommit_args args; + struct nfs4_layoutcommit_res res; +}; + +struct nfs4_layoutreturn_args { + struct nfs4_sequence_args seq_args; + struct pnfs_layout_hdr *layout; + struct inode *inode; + struct pnfs_layout_range range; + nfs4_stateid stateid; + __u32 layout_type; + struct nfs4_xdr_opaque_data *ld_private; +}; + +struct nfs4_layoutreturn_res { + struct nfs4_sequence_res seq_res; + u32 lrs_present; + nfs4_stateid stateid; +}; + +struct nfs4_layoutreturn { + struct nfs4_layoutreturn_args args; + struct nfs4_layoutreturn_res res; + struct rpc_cred *cred; + struct nfs_client *clp; + struct inode *inode; + int rpc_status; + struct nfs4_xdr_opaque_data ld_private; +}; + +#define PNFS_LAYOUTSTATS_MAXSIZE 256 + +struct nfs42_layoutstat_args; +struct nfs42_layoutstat_devinfo; +typedef void (*layoutstats_encode_t)(struct xdr_stream *, + struct nfs42_layoutstat_args *, + struct nfs42_layoutstat_devinfo *); + +/* Per file per deviceid layoutstats */ +struct nfs42_layoutstat_devinfo { + struct nfs4_deviceid dev_id; + __u64 offset; + __u64 length; + __u64 read_count; + __u64 read_bytes; + __u64 write_count; + __u64 write_bytes; + __u32 layout_type; + struct nfs4_xdr_opaque_data ld_private; +}; + +struct nfs42_layoutstat_args { + struct nfs4_sequence_args seq_args; + struct nfs_fh *fh; + struct inode *inode; + nfs4_stateid stateid; + int num_dev; + struct nfs42_layoutstat_devinfo *devinfo; +}; + +struct nfs42_layoutstat_res { + struct nfs4_sequence_res seq_res; + int num_dev; + int rpc_status; +}; + +struct nfs42_layoutstat_data { + struct inode *inode; + struct nfs42_layoutstat_args args; + struct nfs42_layoutstat_res res; +}; + +struct nfs42_clone_args { + struct nfs4_sequence_args seq_args; + struct nfs_fh *src_fh; + struct nfs_fh *dst_fh; + nfs4_stateid src_stateid; + nfs4_stateid dst_stateid; + __u64 src_offset; + __u64 dst_offset; + __u64 count; + const u32 *dst_bitmask; +}; + +struct nfs42_clone_res { + struct nfs4_sequence_res seq_res; + unsigned int rpc_status; + struct nfs_fattr *dst_fattr; + const struct nfs_server *server; +}; + +struct stateowner_id { + __u64 create_time; + __u32 uniquifier; +}; + +/* + * Arguments to the open call. + */ +struct nfs_openargs { + struct nfs4_sequence_args seq_args; + const struct nfs_fh * fh; + struct nfs_seqid * seqid; + int open_flags; + fmode_t fmode; + u32 share_access; + u32 access; + __u64 clientid; + struct stateowner_id id; + union { + struct { + struct iattr * attrs; /* UNCHECKED, GUARDED, EXCLUSIVE4_1 */ + nfs4_verifier verifier; /* EXCLUSIVE */ + }; + nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */ + fmode_t delegation_type; /* CLAIM_PREVIOUS */ + } u; + const struct qstr * name; + const struct nfs_server *server; /* Needed for ID mapping */ + const u32 * bitmask; + const u32 * open_bitmap; + enum open_claim_type4 claim; + enum createmode4 createmode; + const struct nfs4_label *label; + umode_t umask; + struct nfs4_layoutget_args *lg_args; +}; + +struct nfs_openres { + struct nfs4_sequence_res seq_res; + nfs4_stateid stateid; + struct nfs_fh fh; + struct nfs4_change_info cinfo; + __u32 rflags; + struct nfs_fattr * f_attr; + struct nfs4_label *f_label; + struct nfs_seqid * seqid; + const struct nfs_server *server; + fmode_t delegation_type; + nfs4_stateid delegation; + unsigned long pagemod_limit; + __u32 do_recall; + __u32 attrset[NFS4_BITMAP_SIZE]; + struct nfs4_string *owner; + struct nfs4_string *group_owner; + __u32 access_request; + __u32 access_supported; + __u32 access_result; + struct nfs4_layoutget_res *lg_res; +}; + +/* + * Arguments to the open_confirm call. + */ +struct nfs_open_confirmargs { + struct nfs4_sequence_args seq_args; + const struct nfs_fh * fh; + nfs4_stateid * stateid; + struct nfs_seqid * seqid; +}; + +struct nfs_open_confirmres { + struct nfs4_sequence_res seq_res; + nfs4_stateid stateid; + struct nfs_seqid * seqid; +}; + +/* + * Arguments to the close call. + */ +struct nfs_closeargs { + struct nfs4_sequence_args seq_args; + struct nfs_fh * fh; + nfs4_stateid stateid; + struct nfs_seqid * seqid; + fmode_t fmode; + u32 share_access; + const u32 * bitmask; + struct nfs4_layoutreturn_args *lr_args; +}; + +struct nfs_closeres { + struct nfs4_sequence_res seq_res; + nfs4_stateid stateid; + struct nfs_fattr * fattr; + struct nfs_seqid * seqid; + const struct nfs_server *server; + struct nfs4_layoutreturn_res *lr_res; + int lr_ret; +}; +/* + * * Arguments to the lock,lockt, and locku call. + * */ +struct nfs_lowner { + __u64 clientid; + __u64 id; + dev_t s_dev; +}; + +struct nfs_lock_args { + struct nfs4_sequence_args seq_args; + struct nfs_fh * fh; + struct file_lock * fl; + struct nfs_seqid * lock_seqid; + nfs4_stateid lock_stateid; + struct nfs_seqid * open_seqid; + nfs4_stateid open_stateid; + struct nfs_lowner lock_owner; + unsigned char block : 1; + unsigned char reclaim : 1; + unsigned char new_lock : 1; + unsigned char new_lock_owner : 1; +}; + +struct nfs_lock_res { + struct nfs4_sequence_res seq_res; + nfs4_stateid stateid; + struct nfs_seqid * lock_seqid; + struct nfs_seqid * open_seqid; +}; + +struct nfs_locku_args { + struct nfs4_sequence_args seq_args; + struct nfs_fh * fh; + struct file_lock * fl; + struct nfs_seqid * seqid; + nfs4_stateid stateid; +}; + +struct nfs_locku_res { + struct nfs4_sequence_res seq_res; + nfs4_stateid stateid; + struct nfs_seqid * seqid; +}; + +struct nfs_lockt_args { + struct nfs4_sequence_args seq_args; + struct nfs_fh * fh; + struct file_lock * fl; + struct nfs_lowner lock_owner; +}; + +struct nfs_lockt_res { + struct nfs4_sequence_res seq_res; + struct file_lock * denied; /* LOCK, LOCKT failed */ +}; + +struct nfs_release_lockowner_args { + struct nfs4_sequence_args seq_args; + struct nfs_lowner lock_owner; +}; + +struct nfs_release_lockowner_res { + struct nfs4_sequence_res seq_res; +}; + +struct nfs4_delegreturnargs { + struct nfs4_sequence_args seq_args; + const struct nfs_fh *fhandle; + const nfs4_stateid *stateid; + const u32 * bitmask; + struct nfs4_layoutreturn_args *lr_args; +}; + +struct nfs4_delegreturnres { + struct nfs4_sequence_res seq_res; + struct nfs_fattr * fattr; + struct nfs_server *server; + struct nfs4_layoutreturn_res *lr_res; + int lr_ret; +}; + +/* + * Arguments to the write call. + */ +struct nfs_write_verifier { + char data[8]; +}; + +struct nfs_writeverf { + struct nfs_write_verifier verifier; + enum nfs3_stable_how committed; +}; + +/* + * Arguments shared by the read and write call. + */ +struct nfs_pgio_args { + struct nfs4_sequence_args seq_args; + struct nfs_fh * fh; + struct nfs_open_context *context; + struct nfs_lock_context *lock_context; + nfs4_stateid stateid; + __u64 offset; + __u32 count; + unsigned int pgbase; + struct page ** pages; + const u32 * bitmask; /* used by write */ + enum nfs3_stable_how stable; /* used by write */ +}; + +struct nfs_pgio_res { + struct nfs4_sequence_res seq_res; + struct nfs_fattr * fattr; + __u32 count; + __u32 op_status; + int eof; /* used by read */ + struct nfs_writeverf * verf; /* used by write */ + const struct nfs_server *server; /* used by write */ + +}; + +/* + * Arguments to the commit call. + */ +struct nfs_commitargs { + struct nfs4_sequence_args seq_args; + struct nfs_fh *fh; + __u64 offset; + __u32 count; + const u32 *bitmask; +}; + +struct nfs_commitres { + struct nfs4_sequence_res seq_res; + __u32 op_status; + struct nfs_fattr *fattr; + struct nfs_writeverf *verf; + const struct nfs_server *server; +}; + +/* + * Common arguments to the unlink call + */ +struct nfs_removeargs { + struct nfs4_sequence_args seq_args; + const struct nfs_fh *fh; + struct qstr name; +}; + +struct nfs_removeres { + struct nfs4_sequence_res seq_res; + struct nfs_server *server; + struct nfs_fattr *dir_attr; + struct nfs4_change_info cinfo; +}; + +/* + * Common arguments to the rename call + */ +struct nfs_renameargs { + struct nfs4_sequence_args seq_args; + const struct nfs_fh *old_dir; + const struct nfs_fh *new_dir; + const struct qstr *old_name; + const struct qstr *new_name; +}; + +struct nfs_renameres { + struct nfs4_sequence_res seq_res; + struct nfs_server *server; + struct nfs4_change_info old_cinfo; + struct nfs_fattr *old_fattr; + struct nfs4_change_info new_cinfo; + struct nfs_fattr *new_fattr; +}; + +/* parsed sec= options */ +#define NFS_AUTH_INFO_MAX_FLAVORS 12 /* see fs/nfs/super.c */ +struct nfs_auth_info { + unsigned int flavor_len; + rpc_authflavor_t flavors[NFS_AUTH_INFO_MAX_FLAVORS]; +}; + +/* + * Argument struct for decode_entry function + */ +struct nfs_entry { + __u64 ino; + __u64 cookie, + prev_cookie; + const char * name; + unsigned int len; + int eof; + struct nfs_fh * fh; + struct nfs_fattr * fattr; + struct nfs4_label *label; + unsigned char d_type; + struct nfs_server * server; +}; + +/* + * The following types are for NFSv2 only. + */ +struct nfs_sattrargs { + struct nfs_fh * fh; + struct iattr * sattr; +}; + +struct nfs_diropargs { + struct nfs_fh * fh; + const char * name; + unsigned int len; +}; + +struct nfs_createargs { + struct nfs_fh * fh; + const char * name; + unsigned int len; + struct iattr * sattr; +}; + +struct nfs_setattrargs { + struct nfs4_sequence_args seq_args; + struct nfs_fh * fh; + nfs4_stateid stateid; + struct iattr * iap; + const struct nfs_server * server; /* Needed for name mapping */ + const u32 * bitmask; + const struct nfs4_label *label; +}; + +struct nfs_setaclargs { + struct nfs4_sequence_args seq_args; + struct nfs_fh * fh; + size_t acl_len; + struct page ** acl_pages; +}; + +struct nfs_setaclres { + struct nfs4_sequence_res seq_res; +}; + +struct nfs_getaclargs { + struct nfs4_sequence_args seq_args; + struct nfs_fh * fh; + size_t acl_len; + struct page ** acl_pages; +}; + +/* getxattr ACL interface flags */ +#define NFS4_ACL_TRUNC 0x0001 /* ACL was truncated */ +struct nfs_getaclres { + struct nfs4_sequence_res seq_res; + size_t acl_len; + size_t acl_data_offset; + int acl_flags; + struct page * acl_scratch; +}; + +struct nfs_setattrres { + struct nfs4_sequence_res seq_res; + struct nfs_fattr * fattr; + struct nfs4_label *label; + const struct nfs_server * server; +}; + +struct nfs_linkargs { + struct nfs_fh * fromfh; + struct nfs_fh * tofh; + const char * toname; + unsigned int tolen; +}; + +struct nfs_symlinkargs { + struct nfs_fh * fromfh; + const char * fromname; + unsigned int fromlen; + struct page ** pages; + unsigned int pathlen; + struct iattr * sattr; +}; + +struct nfs_readdirargs { + struct nfs_fh * fh; + __u32 cookie; + unsigned int count; + struct page ** pages; +}; + +struct nfs3_getaclargs { + struct nfs_fh * fh; + int mask; + struct page ** pages; +}; + +struct nfs3_setaclargs { + struct inode * inode; + int mask; + struct posix_acl * acl_access; + struct posix_acl * acl_default; + size_t len; + unsigned int npages; + struct page ** pages; +}; + +struct nfs_diropok { + struct nfs_fh * fh; + struct nfs_fattr * fattr; +}; + +struct nfs_readlinkargs { + struct nfs_fh * fh; + unsigned int pgbase; + unsigned int pglen; + struct page ** pages; +}; + +struct nfs3_sattrargs { + struct nfs_fh * fh; + struct iattr * sattr; + unsigned int guard; + struct timespec guardtime; +}; + +struct nfs3_diropargs { + struct nfs_fh * fh; + const char * name; + unsigned int len; +}; + +struct nfs3_accessargs { + struct nfs_fh * fh; + __u32 access; +}; + +struct nfs3_createargs { + struct nfs_fh * fh; + const char * name; + unsigned int len; + struct iattr * sattr; + enum nfs3_createmode createmode; + __be32 verifier[2]; +}; + +struct nfs3_mkdirargs { + struct nfs_fh * fh; + const char * name; + unsigned int len; + struct iattr * sattr; +}; + +struct nfs3_symlinkargs { + struct nfs_fh * fromfh; + const char * fromname; + unsigned int fromlen; + struct page ** pages; + unsigned int pathlen; + struct iattr * sattr; +}; + +struct nfs3_mknodargs { + struct nfs_fh * fh; + const char * name; + unsigned int len; + enum nfs3_ftype type; + struct iattr * sattr; + dev_t rdev; +}; + +struct nfs3_linkargs { + struct nfs_fh * fromfh; + struct nfs_fh * tofh; + const char * toname; + unsigned int tolen; +}; + +struct nfs3_readdirargs { + struct nfs_fh * fh; + __u64 cookie; + __be32 verf[2]; + bool plus; + unsigned int count; + struct page ** pages; +}; + +struct nfs3_diropres { + struct nfs_fattr * dir_attr; + struct nfs_fh * fh; + struct nfs_fattr * fattr; +}; + +struct nfs3_accessres { + struct nfs_fattr * fattr; + __u32 access; +}; + +struct nfs3_readlinkargs { + struct nfs_fh * fh; + unsigned int pgbase; + unsigned int pglen; + struct page ** pages; +}; + +struct nfs3_linkres { + struct nfs_fattr * dir_attr; + struct nfs_fattr * fattr; +}; + +struct nfs3_readdirres { + struct nfs_fattr * dir_attr; + __be32 * verf; + bool plus; +}; + +struct nfs3_getaclres { + struct nfs_fattr * fattr; + int mask; + unsigned int acl_access_count; + unsigned int acl_default_count; + struct posix_acl * acl_access; + struct posix_acl * acl_default; +}; + +#if IS_ENABLED(CONFIG_NFS_V4) + +typedef u64 clientid4; + +struct nfs4_accessargs { + struct nfs4_sequence_args seq_args; + const struct nfs_fh * fh; + const u32 * bitmask; + u32 access; +}; + +struct nfs4_accessres { + struct nfs4_sequence_res seq_res; + const struct nfs_server * server; + struct nfs_fattr * fattr; + u32 supported; + u32 access; +}; + +struct nfs4_create_arg { + struct nfs4_sequence_args seq_args; + u32 ftype; + union { + struct { + struct page ** pages; + unsigned int len; + } symlink; /* NF4LNK */ + struct { + u32 specdata1; + u32 specdata2; + } device; /* NF4BLK, NF4CHR */ + } u; + const struct qstr * name; + const struct nfs_server * server; + const struct iattr * attrs; + const struct nfs_fh * dir_fh; + const u32 * bitmask; + const struct nfs4_label *label; + umode_t umask; +}; + +struct nfs4_create_res { + struct nfs4_sequence_res seq_res; + const struct nfs_server * server; + struct nfs_fh * fh; + struct nfs_fattr * fattr; + struct nfs4_label *label; + struct nfs4_change_info dir_cinfo; +}; + +struct nfs4_fsinfo_arg { + struct nfs4_sequence_args seq_args; + const struct nfs_fh * fh; + const u32 * bitmask; +}; + +struct nfs4_fsinfo_res { + struct nfs4_sequence_res seq_res; + struct nfs_fsinfo *fsinfo; +}; + +struct nfs4_getattr_arg { + struct nfs4_sequence_args seq_args; + const struct nfs_fh * fh; + const u32 * bitmask; +}; + +struct nfs4_getattr_res { + struct nfs4_sequence_res seq_res; + const struct nfs_server * server; + struct nfs_fattr * fattr; + struct nfs4_label *label; +}; + +struct nfs4_link_arg { + struct nfs4_sequence_args seq_args; + const struct nfs_fh * fh; + const struct nfs_fh * dir_fh; + const struct qstr * name; + const u32 * bitmask; +}; + +struct nfs4_link_res { + struct nfs4_sequence_res seq_res; + const struct nfs_server * server; + struct nfs_fattr * fattr; + struct nfs4_label *label; + struct nfs4_change_info cinfo; + struct nfs_fattr * dir_attr; +}; + +struct nfs4_lookup_arg { + struct nfs4_sequence_args seq_args; + const struct nfs_fh * dir_fh; + const struct qstr * name; + const u32 * bitmask; +}; + +struct nfs4_lookup_res { + struct nfs4_sequence_res seq_res; + const struct nfs_server * server; + struct nfs_fattr * fattr; + struct nfs_fh * fh; + struct nfs4_label *label; +}; + +struct nfs4_lookupp_arg { + struct nfs4_sequence_args seq_args; + const struct nfs_fh *fh; + const u32 *bitmask; +}; + +struct nfs4_lookupp_res { + struct nfs4_sequence_res seq_res; + const struct nfs_server *server; + struct nfs_fattr *fattr; + struct nfs_fh *fh; + struct nfs4_label *label; +}; + +struct nfs4_lookup_root_arg { + struct nfs4_sequence_args seq_args; + const u32 * bitmask; +}; + +struct nfs4_pathconf_arg { + struct nfs4_sequence_args seq_args; + const struct nfs_fh * fh; + const u32 * bitmask; +}; + +struct nfs4_pathconf_res { + struct nfs4_sequence_res seq_res; + struct nfs_pathconf *pathconf; +}; + +struct nfs4_readdir_arg { + struct nfs4_sequence_args seq_args; + const struct nfs_fh * fh; + u64 cookie; + nfs4_verifier verifier; + u32 count; + struct page ** pages; /* zero-copy data */ + unsigned int pgbase; /* zero-copy data */ + const u32 * bitmask; + bool plus; +}; + +struct nfs4_readdir_res { + struct nfs4_sequence_res seq_res; + nfs4_verifier verifier; + unsigned int pgbase; +}; + +struct nfs4_readlink { + struct nfs4_sequence_args seq_args; + const struct nfs_fh * fh; + unsigned int pgbase; + unsigned int pglen; /* zero-copy data */ + struct page ** pages; /* zero-copy data */ +}; + +struct nfs4_readlink_res { + struct nfs4_sequence_res seq_res; +}; + +struct nfs4_setclientid { + const nfs4_verifier * sc_verifier; + u32 sc_prog; + unsigned int sc_netid_len; + char sc_netid[RPCBIND_MAXNETIDLEN + 1]; + unsigned int sc_uaddr_len; + char sc_uaddr[RPCBIND_MAXUADDRLEN + 1]; + struct nfs_client *sc_clnt; + struct rpc_cred *sc_cred; +}; + +struct nfs4_setclientid_res { + u64 clientid; + nfs4_verifier confirm; +}; + +struct nfs4_statfs_arg { + struct nfs4_sequence_args seq_args; + const struct nfs_fh * fh; + const u32 * bitmask; +}; + +struct nfs4_statfs_res { + struct nfs4_sequence_res seq_res; + struct nfs_fsstat *fsstat; +}; + +struct nfs4_server_caps_arg { + struct nfs4_sequence_args seq_args; + struct nfs_fh *fhandle; + const u32 * bitmask; +}; + +struct nfs4_server_caps_res { + struct nfs4_sequence_res seq_res; + u32 attr_bitmask[3]; + u32 exclcreat_bitmask[3]; + u32 acl_bitmask; + u32 has_links; + u32 has_symlinks; + u32 fh_expire_type; +}; + +#define NFS4_PATHNAME_MAXCOMPONENTS 512 +struct nfs4_pathname { + unsigned int ncomponents; + struct nfs4_string components[NFS4_PATHNAME_MAXCOMPONENTS]; +}; + +#define NFS4_FS_LOCATION_MAXSERVERS 10 +struct nfs4_fs_location { + unsigned int nservers; + struct nfs4_string servers[NFS4_FS_LOCATION_MAXSERVERS]; + struct nfs4_pathname rootpath; +}; + +#define NFS4_FS_LOCATIONS_MAXENTRIES 10 +struct nfs4_fs_locations { + struct nfs_fattr fattr; + const struct nfs_server *server; + struct nfs4_pathname fs_path; + int nlocations; + struct nfs4_fs_location locations[NFS4_FS_LOCATIONS_MAXENTRIES]; +}; + +struct nfs4_fs_locations_arg { + struct nfs4_sequence_args seq_args; + const struct nfs_fh *dir_fh; + const struct nfs_fh *fh; + const struct qstr *name; + struct page *page; + const u32 *bitmask; + clientid4 clientid; + unsigned char migration:1, renew:1; +}; + +struct nfs4_fs_locations_res { + struct nfs4_sequence_res seq_res; + struct nfs4_fs_locations *fs_locations; + unsigned char migration:1, renew:1; +}; + +struct nfs4_secinfo4 { + u32 flavor; + struct rpcsec_gss_info flavor_info; +}; + +struct nfs4_secinfo_flavors { + unsigned int num_flavors; + struct nfs4_secinfo4 flavors[0]; +}; + +struct nfs4_secinfo_arg { + struct nfs4_sequence_args seq_args; + const struct nfs_fh *dir_fh; + const struct qstr *name; +}; + +struct nfs4_secinfo_res { + struct nfs4_sequence_res seq_res; + struct nfs4_secinfo_flavors *flavors; +}; + +struct nfs4_fsid_present_arg { + struct nfs4_sequence_args seq_args; + const struct nfs_fh *fh; + clientid4 clientid; + unsigned char renew:1; +}; + +struct nfs4_fsid_present_res { + struct nfs4_sequence_res seq_res; + struct nfs_fh *fh; + unsigned char renew:1; +}; + +#endif /* CONFIG_NFS_V4 */ + +struct nfstime4 { + u64 seconds; + u32 nseconds; +}; + +#ifdef CONFIG_NFS_V4_1 + +struct pnfs_commit_bucket { + struct list_head written; + struct list_head committing; + struct pnfs_layout_segment *wlseg; + struct pnfs_layout_segment *clseg; + struct nfs_writeverf direct_verf; +}; + +struct pnfs_ds_commit_info { + int nwritten; + int ncommitting; + int nbuckets; + struct pnfs_commit_bucket *buckets; +}; + +struct nfs41_state_protection { + u32 how; + struct nfs4_op_map enforce; + struct nfs4_op_map allow; +}; + +struct nfs41_exchange_id_args { + struct nfs_client *client; + nfs4_verifier verifier; + u32 flags; + struct nfs41_state_protection state_protect; +}; + +struct nfs41_server_owner { + uint64_t minor_id; + uint32_t major_id_sz; + char major_id[NFS4_OPAQUE_LIMIT]; +}; + +struct nfs41_server_scope { + uint32_t server_scope_sz; + char server_scope[NFS4_OPAQUE_LIMIT]; +}; + +struct nfs41_impl_id { + char domain[NFS4_OPAQUE_LIMIT + 1]; + char name[NFS4_OPAQUE_LIMIT + 1]; + struct nfstime4 date; +}; + +struct nfs41_bind_conn_to_session_args { + struct nfs_client *client; + struct nfs4_sessionid sessionid; + u32 dir; + bool use_conn_in_rdma_mode; +}; + +struct nfs41_bind_conn_to_session_res { + struct nfs4_sessionid sessionid; + u32 dir; + bool use_conn_in_rdma_mode; +}; + +struct nfs41_exchange_id_res { + u64 clientid; + u32 seqid; + u32 flags; + struct nfs41_server_owner *server_owner; + struct nfs41_server_scope *server_scope; + struct nfs41_impl_id *impl_id; + struct nfs41_state_protection state_protect; +}; + +struct nfs41_create_session_args { + struct nfs_client *client; + u64 clientid; + uint32_t seqid; + uint32_t flags; + uint32_t cb_program; + struct nfs4_channel_attrs fc_attrs; /* Fore Channel */ + struct nfs4_channel_attrs bc_attrs; /* Back Channel */ +}; + +struct nfs41_create_session_res { + struct nfs4_sessionid sessionid; + uint32_t seqid; + uint32_t flags; + struct nfs4_channel_attrs fc_attrs; /* Fore Channel */ + struct nfs4_channel_attrs bc_attrs; /* Back Channel */ +}; + +struct nfs41_reclaim_complete_args { + struct nfs4_sequence_args seq_args; + /* In the future extend to include curr_fh for use with migration */ + unsigned char one_fs:1; +}; + +struct nfs41_reclaim_complete_res { + struct nfs4_sequence_res seq_res; +}; + +#define SECINFO_STYLE_CURRENT_FH 0 +#define SECINFO_STYLE_PARENT 1 +struct nfs41_secinfo_no_name_args { + struct nfs4_sequence_args seq_args; + int style; +}; + +struct nfs41_test_stateid_args { + struct nfs4_sequence_args seq_args; + nfs4_stateid *stateid; +}; + +struct nfs41_test_stateid_res { + struct nfs4_sequence_res seq_res; + unsigned int status; +}; + +struct nfs41_free_stateid_args { + struct nfs4_sequence_args seq_args; + nfs4_stateid stateid; +}; + +struct nfs41_free_stateid_res { + struct nfs4_sequence_res seq_res; + unsigned int status; +}; + +static inline void +nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo) +{ + kfree(cinfo->buckets); +} + +#else + +struct pnfs_ds_commit_info { +}; + +static inline void +nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo) +{ +} + +#endif /* CONFIG_NFS_V4_1 */ + +#ifdef CONFIG_NFS_V4_2 +struct nfs42_falloc_args { + struct nfs4_sequence_args seq_args; + + struct nfs_fh *falloc_fh; + nfs4_stateid falloc_stateid; + u64 falloc_offset; + u64 falloc_length; + const u32 *falloc_bitmask; +}; + +struct nfs42_falloc_res { + struct nfs4_sequence_res seq_res; + unsigned int status; + + struct nfs_fattr *falloc_fattr; + const struct nfs_server *falloc_server; +}; + +struct nfs42_copy_args { + struct nfs4_sequence_args seq_args; + + struct nfs_fh *src_fh; + nfs4_stateid src_stateid; + u64 src_pos; + + struct nfs_fh *dst_fh; + nfs4_stateid dst_stateid; + u64 dst_pos; + + u64 count; + bool sync; +}; + +struct nfs42_write_res { + nfs4_stateid stateid; + u64 count; + struct nfs_writeverf verifier; +}; + +struct nfs42_copy_res { + struct nfs4_sequence_res seq_res; + struct nfs42_write_res write_res; + bool consecutive; + bool synchronous; + struct nfs_commitres commit_res; +}; + +struct nfs42_offload_status_args { + struct nfs4_sequence_args osa_seq_args; + struct nfs_fh *osa_src_fh; + nfs4_stateid osa_stateid; +}; + +struct nfs42_offload_status_res { + struct nfs4_sequence_res osr_seq_res; + uint64_t osr_count; + int osr_status; +}; + +struct nfs42_seek_args { + struct nfs4_sequence_args seq_args; + + struct nfs_fh *sa_fh; + nfs4_stateid sa_stateid; + u64 sa_offset; + u32 sa_what; +}; + +struct nfs42_seek_res { + struct nfs4_sequence_res seq_res; + unsigned int status; + + u32 sr_eof; + u64 sr_offset; +}; +#endif + +struct nfs_page; + +#define NFS_PAGEVEC_SIZE (8U) + +struct nfs_page_array { + struct page **pagevec; + unsigned int npages; /* Max length of pagevec */ + struct page *page_array[NFS_PAGEVEC_SIZE]; +}; + +/* used as flag bits in nfs_pgio_header */ +enum { + NFS_IOHDR_ERROR = 0, + NFS_IOHDR_EOF, + NFS_IOHDR_REDO, + NFS_IOHDR_STAT, + NFS_IOHDR_RESEND_PNFS, + NFS_IOHDR_RESEND_MDS, +}; + +struct nfs_io_completion; +struct nfs_pgio_header { + struct inode *inode; + struct rpc_cred *cred; + struct list_head pages; + struct nfs_page *req; + struct nfs_writeverf verf; /* Used for writes */ + fmode_t rw_mode; + struct pnfs_layout_segment *lseg; + loff_t io_start; + const struct rpc_call_ops *mds_ops; + void (*release) (struct nfs_pgio_header *hdr); + const struct nfs_pgio_completion_ops *completion_ops; + const struct nfs_rw_ops *rw_ops; + struct nfs_io_completion *io_completion; + struct nfs_direct_req *dreq; + spinlock_t lock; + /* fields protected by lock */ + int pnfs_error; + int error; /* merge with pnfs_error */ + unsigned long good_bytes; /* boundary of good data */ + unsigned long flags; + + /* + * rpc data + */ + struct rpc_task task; + struct nfs_fattr fattr; + struct nfs_pgio_args args; /* argument struct */ + struct nfs_pgio_res res; /* result struct */ + unsigned long timestamp; /* For lease renewal */ + int (*pgio_done_cb)(struct rpc_task *, struct nfs_pgio_header *); + __u64 mds_offset; /* Filelayout dense stripe */ + struct nfs_page_array page_array; + struct nfs_client *ds_clp; /* pNFS data server */ + int ds_commit_idx; /* ds index if ds_clp is set */ + int pgio_mirror_idx;/* mirror index in pgio layer */ +}; + +struct nfs_mds_commit_info { + atomic_t rpcs_out; + atomic_long_t ncommit; + struct list_head list; +}; + +struct nfs_commit_info; +struct nfs_commit_data; +struct nfs_inode; +struct nfs_commit_completion_ops { + void (*completion) (struct nfs_commit_data *data); + void (*resched_write) (struct nfs_commit_info *, struct nfs_page *); +}; + +struct nfs_commit_info { + struct inode *inode; /* Needed for inode->i_lock */ + struct nfs_mds_commit_info *mds; + struct pnfs_ds_commit_info *ds; + struct nfs_direct_req *dreq; /* O_DIRECT request */ + const struct nfs_commit_completion_ops *completion_ops; +}; + +struct nfs_commit_data { + struct rpc_task task; + struct inode *inode; + struct rpc_cred *cred; + struct nfs_fattr fattr; + struct nfs_writeverf verf; + struct list_head pages; /* Coalesced requests we wish to flush */ + struct list_head list; /* lists of struct nfs_write_data */ + struct nfs_direct_req *dreq; /* O_DIRECT request */ + struct nfs_commitargs args; /* argument struct */ + struct nfs_commitres res; /* result struct */ + struct nfs_open_context *context; + struct pnfs_layout_segment *lseg; + struct nfs_client *ds_clp; /* pNFS data server */ + int ds_commit_index; + loff_t lwb; + const struct rpc_call_ops *mds_ops; + const struct nfs_commit_completion_ops *completion_ops; + int (*commit_done_cb) (struct rpc_task *task, struct nfs_commit_data *data); + unsigned long flags; +}; + +struct nfs_pgio_completion_ops { + void (*error_cleanup)(struct list_head *head, int); + void (*init_hdr)(struct nfs_pgio_header *hdr); + void (*completion)(struct nfs_pgio_header *hdr); + void (*reschedule_io)(struct nfs_pgio_header *hdr); +}; + +struct nfs_unlinkdata { + struct nfs_removeargs args; + struct nfs_removeres res; + struct dentry *dentry; + wait_queue_head_t wq; + struct rpc_cred *cred; + struct nfs_fattr dir_attr; + long timeout; +}; + +struct nfs_renamedata { + struct nfs_renameargs args; + struct nfs_renameres res; + struct rpc_cred *cred; + struct inode *old_dir; + struct dentry *old_dentry; + struct nfs_fattr old_fattr; + struct inode *new_dir; + struct dentry *new_dentry; + struct nfs_fattr new_fattr; + void (*complete)(struct rpc_task *, struct nfs_renamedata *); + long timeout; + bool cancelled; +}; + +struct nfs_access_entry; +struct nfs_client; +struct rpc_timeout; +struct nfs_subversion; +struct nfs_mount_info; +struct nfs_client_initdata; +struct nfs_pageio_descriptor; + +/* + * RPC procedure vector for NFSv2/NFSv3 demuxing + */ +struct nfs_rpc_ops { + u32 version; /* Protocol version */ + const struct dentry_operations *dentry_ops; + const struct inode_operations *dir_inode_ops; + const struct inode_operations *file_inode_ops; + const struct file_operations *file_ops; + const struct nlmclnt_operations *nlmclnt_ops; + + int (*getroot) (struct nfs_server *, struct nfs_fh *, + struct nfs_fsinfo *); + struct vfsmount *(*submount) (struct nfs_server *, struct dentry *, + struct nfs_fh *, struct nfs_fattr *); + struct dentry *(*try_mount) (int, const char *, struct nfs_mount_info *, + struct nfs_subversion *); + int (*getattr) (struct nfs_server *, struct nfs_fh *, + struct nfs_fattr *, struct nfs4_label *, + struct inode *); + int (*setattr) (struct dentry *, struct nfs_fattr *, + struct iattr *); + int (*lookup) (struct inode *, const struct qstr *, + struct nfs_fh *, struct nfs_fattr *, + struct nfs4_label *); + int (*lookupp) (struct inode *, struct nfs_fh *, + struct nfs_fattr *, struct nfs4_label *); + int (*access) (struct inode *, struct nfs_access_entry *); + int (*readlink)(struct inode *, struct page *, unsigned int, + unsigned int); + int (*create) (struct inode *, struct dentry *, + struct iattr *, int); + int (*remove) (struct inode *, struct dentry *); + void (*unlink_setup) (struct rpc_message *, struct dentry *, struct inode *); + void (*unlink_rpc_prepare) (struct rpc_task *, struct nfs_unlinkdata *); + int (*unlink_done) (struct rpc_task *, struct inode *); + void (*rename_setup) (struct rpc_message *msg, + struct dentry *old_dentry, + struct dentry *new_dentry); + void (*rename_rpc_prepare)(struct rpc_task *task, struct nfs_renamedata *); + int (*rename_done) (struct rpc_task *task, struct inode *old_dir, struct inode *new_dir); + int (*link) (struct inode *, struct inode *, const struct qstr *); + int (*symlink) (struct inode *, struct dentry *, struct page *, + unsigned int, struct iattr *); + int (*mkdir) (struct inode *, struct dentry *, struct iattr *); + int (*rmdir) (struct inode *, const struct qstr *); + int (*readdir) (struct dentry *, struct rpc_cred *, + u64, struct page **, unsigned int, bool); + int (*mknod) (struct inode *, struct dentry *, struct iattr *, + dev_t); + int (*statfs) (struct nfs_server *, struct nfs_fh *, + struct nfs_fsstat *); + int (*fsinfo) (struct nfs_server *, struct nfs_fh *, + struct nfs_fsinfo *); + int (*pathconf) (struct nfs_server *, struct nfs_fh *, + struct nfs_pathconf *); + int (*set_capabilities)(struct nfs_server *, struct nfs_fh *); + int (*decode_dirent)(struct xdr_stream *, struct nfs_entry *, bool); + int (*pgio_rpc_prepare)(struct rpc_task *, + struct nfs_pgio_header *); + void (*read_setup)(struct nfs_pgio_header *, struct rpc_message *); + int (*read_done)(struct rpc_task *, struct nfs_pgio_header *); + void (*write_setup)(struct nfs_pgio_header *, struct rpc_message *, + struct rpc_clnt **); + int (*write_done)(struct rpc_task *, struct nfs_pgio_header *); + void (*commit_setup) (struct nfs_commit_data *, struct rpc_message *, + struct rpc_clnt **); + void (*commit_rpc_prepare)(struct rpc_task *, struct nfs_commit_data *); + int (*commit_done) (struct rpc_task *, struct nfs_commit_data *); + int (*lock)(struct file *, int, struct file_lock *); + int (*lock_check_bounds)(const struct file_lock *); + void (*clear_acl_cache)(struct inode *); + void (*close_context)(struct nfs_open_context *ctx, int); + struct inode * (*open_context) (struct inode *dir, + struct nfs_open_context *ctx, + int open_flags, + struct iattr *iattr, + int *); + int (*have_delegation)(struct inode *, fmode_t); + struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *); + struct nfs_client *(*init_client) (struct nfs_client *, + const struct nfs_client_initdata *); + void (*free_client) (struct nfs_client *); + struct nfs_server *(*create_server)(struct nfs_mount_info *, struct nfs_subversion *); + struct nfs_server *(*clone_server)(struct nfs_server *, struct nfs_fh *, + struct nfs_fattr *, rpc_authflavor_t); +}; + +/* + * NFS_CALL(getattr, inode, (fattr)); + * into + * NFS_PROTO(inode)->getattr(fattr); + */ +#define NFS_CALL(op, inode, args) NFS_PROTO(inode)->op args + +/* + * Function vectors etc. for the NFS client + */ +extern const struct nfs_rpc_ops nfs_v2_clientops; +extern const struct nfs_rpc_ops nfs_v3_clientops; +extern const struct nfs_rpc_ops nfs_v4_clientops; +extern const struct rpc_version nfs_version2; +extern const struct rpc_version nfs_version3; +extern const struct rpc_version nfs_version4; + +extern const struct rpc_version nfsacl_version3; +extern const struct rpc_program nfsacl_program; + +#endif diff --git a/include/linux/nfsacl.h b/include/linux/nfsacl.h new file mode 100644 index 000000000..103d44695 --- /dev/null +++ b/include/linux/nfsacl.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * File: linux/nfsacl.h + * + * (C) 2003 Andreas Gruenbacher + */ +#ifndef __LINUX_NFSACL_H +#define __LINUX_NFSACL_H + + +#include +#include +#include + +/* Maximum number of ACL entries over NFS */ +#define NFS_ACL_MAX_ENTRIES 1024 + +#define NFSACL_MAXWORDS (2*(2+3*NFS_ACL_MAX_ENTRIES)) +#define NFSACL_MAXPAGES ((2*(8+12*NFS_ACL_MAX_ENTRIES) + PAGE_SIZE-1) \ + >> PAGE_SHIFT) + +#define NFS_ACL_MAX_ENTRIES_INLINE (5) +#define NFS_ACL_INLINE_BUFSIZE ((2*(2+3*NFS_ACL_MAX_ENTRIES_INLINE)) << 2) + +static inline unsigned int +nfsacl_size(struct posix_acl *acl_access, struct posix_acl *acl_default) +{ + unsigned int w = 16; + w += max(acl_access ? (int)acl_access->a_count : 3, 4) * 12; + if (acl_default) + w += max((int)acl_default->a_count, 4) * 12; + return w; +} + +extern int +nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode, + struct posix_acl *acl, int encode_entries, int typeflag); +extern int +nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt, + struct posix_acl **pacl); + +#endif /* __LINUX_NFSACL_H */ diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h new file mode 100644 index 000000000..0f6f6607f --- /dev/null +++ b/include/linux/nl802154.h @@ -0,0 +1,180 @@ +/* + * nl802154.h + * + * Copyright (C) 2007, 2008, 2009 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef NL802154_H +#define NL802154_H + +#define IEEE802154_NL_NAME "802.15.4 MAC" +#define IEEE802154_MCAST_COORD_NAME "coordinator" +#define IEEE802154_MCAST_BEACON_NAME "beacon" + +enum { + __IEEE802154_ATTR_INVALID, + + IEEE802154_ATTR_DEV_NAME, + IEEE802154_ATTR_DEV_INDEX, + + IEEE802154_ATTR_STATUS, + + IEEE802154_ATTR_SHORT_ADDR, + IEEE802154_ATTR_HW_ADDR, + IEEE802154_ATTR_PAN_ID, + + IEEE802154_ATTR_CHANNEL, + + IEEE802154_ATTR_COORD_SHORT_ADDR, + IEEE802154_ATTR_COORD_HW_ADDR, + IEEE802154_ATTR_COORD_PAN_ID, + + IEEE802154_ATTR_SRC_SHORT_ADDR, + IEEE802154_ATTR_SRC_HW_ADDR, + IEEE802154_ATTR_SRC_PAN_ID, + + IEEE802154_ATTR_DEST_SHORT_ADDR, + IEEE802154_ATTR_DEST_HW_ADDR, + IEEE802154_ATTR_DEST_PAN_ID, + + IEEE802154_ATTR_CAPABILITY, + IEEE802154_ATTR_REASON, + IEEE802154_ATTR_SCAN_TYPE, + IEEE802154_ATTR_CHANNELS, + IEEE802154_ATTR_DURATION, + IEEE802154_ATTR_ED_LIST, + IEEE802154_ATTR_BCN_ORD, + IEEE802154_ATTR_SF_ORD, + IEEE802154_ATTR_PAN_COORD, + IEEE802154_ATTR_BAT_EXT, + IEEE802154_ATTR_COORD_REALIGN, + IEEE802154_ATTR_SEC, + + IEEE802154_ATTR_PAGE, + IEEE802154_ATTR_CHANNEL_PAGE_LIST, + + IEEE802154_ATTR_PHY_NAME, + IEEE802154_ATTR_DEV_TYPE, + + IEEE802154_ATTR_TXPOWER, + IEEE802154_ATTR_LBT_ENABLED, + IEEE802154_ATTR_CCA_MODE, + IEEE802154_ATTR_CCA_ED_LEVEL, + IEEE802154_ATTR_CSMA_RETRIES, + IEEE802154_ATTR_CSMA_MIN_BE, + IEEE802154_ATTR_CSMA_MAX_BE, + + IEEE802154_ATTR_FRAME_RETRIES, + + IEEE802154_ATTR_LLSEC_ENABLED, + IEEE802154_ATTR_LLSEC_SECLEVEL, + IEEE802154_ATTR_LLSEC_KEY_MODE, + IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT, + IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED, + IEEE802154_ATTR_LLSEC_KEY_ID, + IEEE802154_ATTR_LLSEC_FRAME_COUNTER, + IEEE802154_ATTR_LLSEC_KEY_BYTES, + IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES, + IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS, + IEEE802154_ATTR_LLSEC_FRAME_TYPE, + IEEE802154_ATTR_LLSEC_CMD_FRAME_ID, + IEEE802154_ATTR_LLSEC_SECLEVELS, + IEEE802154_ATTR_LLSEC_DEV_OVERRIDE, + IEEE802154_ATTR_LLSEC_DEV_KEY_MODE, + + IEEE802154_ATTR_PAD, + + __IEEE802154_ATTR_MAX, +}; + +#define IEEE802154_ATTR_MAX (__IEEE802154_ATTR_MAX - 1) + +extern const struct nla_policy ieee802154_policy[]; + +/* commands */ +/* REQ should be responded with CONF + * and INDIC with RESP + */ +enum { + __IEEE802154_COMMAND_INVALID, + + IEEE802154_ASSOCIATE_REQ, + IEEE802154_ASSOCIATE_CONF, + IEEE802154_DISASSOCIATE_REQ, + IEEE802154_DISASSOCIATE_CONF, + IEEE802154_GET_REQ, + IEEE802154_GET_CONF, + IEEE802154_RESET_REQ, + IEEE802154_RESET_CONF, + IEEE802154_SCAN_REQ, + IEEE802154_SCAN_CONF, + IEEE802154_SET_REQ, + IEEE802154_SET_CONF, + IEEE802154_START_REQ, + IEEE802154_START_CONF, + IEEE802154_SYNC_REQ, + IEEE802154_POLL_REQ, + IEEE802154_POLL_CONF, + + IEEE802154_ASSOCIATE_INDIC, + IEEE802154_ASSOCIATE_RESP, + IEEE802154_DISASSOCIATE_INDIC, + IEEE802154_BEACON_NOTIFY_INDIC, + IEEE802154_ORPHAN_INDIC, + IEEE802154_ORPHAN_RESP, + IEEE802154_COMM_STATUS_INDIC, + IEEE802154_SYNC_LOSS_INDIC, + + IEEE802154_GTS_REQ, /* Not supported yet */ + IEEE802154_GTS_INDIC, /* Not supported yet */ + IEEE802154_GTS_CONF, /* Not supported yet */ + IEEE802154_RX_ENABLE_REQ, /* Not supported yet */ + IEEE802154_RX_ENABLE_CONF, /* Not supported yet */ + + IEEE802154_LIST_IFACE, + IEEE802154_LIST_PHY, + IEEE802154_ADD_IFACE, + IEEE802154_DEL_IFACE, + + IEEE802154_SET_MACPARAMS, + + IEEE802154_LLSEC_GETPARAMS, + IEEE802154_LLSEC_SETPARAMS, + IEEE802154_LLSEC_LIST_KEY, + IEEE802154_LLSEC_ADD_KEY, + IEEE802154_LLSEC_DEL_KEY, + IEEE802154_LLSEC_LIST_DEV, + IEEE802154_LLSEC_ADD_DEV, + IEEE802154_LLSEC_DEL_DEV, + IEEE802154_LLSEC_LIST_DEVKEY, + IEEE802154_LLSEC_ADD_DEVKEY, + IEEE802154_LLSEC_DEL_DEVKEY, + IEEE802154_LLSEC_LIST_SECLEVEL, + IEEE802154_LLSEC_ADD_SECLEVEL, + IEEE802154_LLSEC_DEL_SECLEVEL, + + __IEEE802154_CMD_MAX, +}; + +#define IEEE802154_CMD_MAX (__IEEE802154_CMD_MAX - 1) + +enum { + __IEEE802154_DEV_INVALID = -1, + + IEEE802154_DEV_WPAN, + IEEE802154_DEV_MONITOR, + + __IEEE802154_DEV_MAX, +}; + +#endif diff --git a/include/linux/nls.h b/include/linux/nls.h new file mode 100644 index 000000000..499e486b3 --- /dev/null +++ b/include/linux/nls.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_NLS_H +#define _LINUX_NLS_H + +#include + +/* Unicode has changed over the years. Unicode code points no longer + * fit into 16 bits; as of Unicode 5 valid code points range from 0 + * to 0x10ffff (17 planes, where each plane holds 65536 code points). + * + * The original decision to represent Unicode characters as 16-bit + * wchar_t values is now outdated. But plane 0 still includes the + * most commonly used characters, so we will retain it. The newer + * 32-bit unicode_t type can be used when it is necessary to + * represent the full Unicode character set. + */ + +/* Plane-0 Unicode character */ +typedef u16 wchar_t; +#define MAX_WCHAR_T 0xffff + +/* Arbitrary Unicode character */ +typedef u32 unicode_t; + +struct nls_table { + const char *charset; + const char *alias; + int (*uni2char) (wchar_t uni, unsigned char *out, int boundlen); + int (*char2uni) (const unsigned char *rawstring, int boundlen, + wchar_t *uni); + const unsigned char *charset2lower; + const unsigned char *charset2upper; + struct module *owner; + struct nls_table *next; +}; + +/* this value hold the maximum octet of charset */ +#define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */ + +/* Byte order for UTF-16 strings */ +enum utf16_endian { + UTF16_HOST_ENDIAN, + UTF16_LITTLE_ENDIAN, + UTF16_BIG_ENDIAN +}; + +/* nls_base.c */ +extern int __register_nls(struct nls_table *, struct module *); +extern int unregister_nls(struct nls_table *); +extern struct nls_table *load_nls(char *); +extern void unload_nls(struct nls_table *); +extern struct nls_table *load_nls_default(void); +#define register_nls(nls) __register_nls((nls), THIS_MODULE) + +extern int utf8_to_utf32(const u8 *s, int len, unicode_t *pu); +extern int utf32_to_utf8(unicode_t u, u8 *s, int maxlen); +extern int utf8s_to_utf16s(const u8 *s, int len, + enum utf16_endian endian, wchar_t *pwcs, int maxlen); +extern int utf16s_to_utf8s(const wchar_t *pwcs, int len, + enum utf16_endian endian, u8 *s, int maxlen); + +static inline unsigned char nls_tolower(struct nls_table *t, unsigned char c) +{ + unsigned char nc = t->charset2lower[c]; + + return nc ? nc : c; +} + +static inline unsigned char nls_toupper(struct nls_table *t, unsigned char c) +{ + unsigned char nc = t->charset2upper[c]; + + return nc ? nc : c; +} + +static inline int nls_strnicmp(struct nls_table *t, const unsigned char *s1, + const unsigned char *s2, int len) +{ + while (len--) { + if (nls_tolower(t, *s1++) != nls_tolower(t, *s2++)) + return 1; + } + + return 0; +} + +/* + * nls_nullsize - return length of null character for codepage + * @codepage - codepage for which to return length of NULL terminator + * + * Since we can't guarantee that the null terminator will be a particular + * length, we have to check against the codepage. If there's a problem + * determining it, assume a single-byte NULL terminator. + */ +static inline int +nls_nullsize(const struct nls_table *codepage) +{ + int charlen; + char tmp[NLS_MAX_CHARSET_SIZE]; + + charlen = codepage->uni2char(0, tmp, NLS_MAX_CHARSET_SIZE); + + return charlen > 0 ? charlen : 1; +} + +#define MODULE_ALIAS_NLS(name) MODULE_ALIAS("nls_" __stringify(name)) + +#endif /* _LINUX_NLS_H */ + diff --git a/include/linux/nmi.h b/include/linux/nmi.h new file mode 100644 index 000000000..9003e29cd --- /dev/null +++ b/include/linux/nmi.h @@ -0,0 +1,220 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/nmi.h + */ +#ifndef LINUX_NMI_H +#define LINUX_NMI_H + +#include +#include +#if defined(CONFIG_HAVE_NMI_WATCHDOG) +#include +#endif + +#ifdef CONFIG_LOCKUP_DETECTOR +void lockup_detector_init(void); +void lockup_detector_soft_poweroff(void); +void lockup_detector_cleanup(void); +bool is_hardlockup(void); + +extern int watchdog_user_enabled; +extern int nmi_watchdog_user_enabled; +extern int soft_watchdog_user_enabled; +extern int watchdog_thresh; +extern unsigned long watchdog_enabled; + +extern struct cpumask watchdog_cpumask; +extern unsigned long *watchdog_cpumask_bits; +#ifdef CONFIG_SMP +extern int sysctl_softlockup_all_cpu_backtrace; +extern int sysctl_hardlockup_all_cpu_backtrace; +#else +#define sysctl_softlockup_all_cpu_backtrace 0 +#define sysctl_hardlockup_all_cpu_backtrace 0 +#endif /* !CONFIG_SMP */ + +#else /* CONFIG_LOCKUP_DETECTOR */ +static inline void lockup_detector_init(void) { } +static inline void lockup_detector_soft_poweroff(void) { } +static inline void lockup_detector_cleanup(void) { } +#endif /* !CONFIG_LOCKUP_DETECTOR */ + +#ifdef CONFIG_SOFTLOCKUP_DETECTOR +extern void touch_softlockup_watchdog_sched(void); +extern void touch_softlockup_watchdog(void); +extern void touch_softlockup_watchdog_sync(void); +extern void touch_all_softlockup_watchdogs(void); +extern unsigned int softlockup_panic; + +extern int lockup_detector_online_cpu(unsigned int cpu); +extern int lockup_detector_offline_cpu(unsigned int cpu); +#else /* CONFIG_SOFTLOCKUP_DETECTOR */ +static inline void touch_softlockup_watchdog_sched(void) { } +static inline void touch_softlockup_watchdog(void) { } +static inline void touch_softlockup_watchdog_sync(void) { } +static inline void touch_all_softlockup_watchdogs(void) { } + +#define lockup_detector_online_cpu NULL +#define lockup_detector_offline_cpu NULL +#endif /* CONFIG_SOFTLOCKUP_DETECTOR */ + +#ifdef CONFIG_DETECT_HUNG_TASK +void reset_hung_task_detector(void); +#else +static inline void reset_hung_task_detector(void) { } +#endif + +/* + * The run state of the lockup detectors is controlled by the content of the + * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - + * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector. + * + * 'watchdog_user_enabled', 'nmi_watchdog_user_enabled' and + * 'soft_watchdog_user_enabled' are variables that are only used as an + * 'interface' between the parameters in /proc/sys/kernel and the internal + * state bits in 'watchdog_enabled'. The 'watchdog_thresh' variable is + * handled differently because its value is not boolean, and the lockup + * detectors are 'suspended' while 'watchdog_thresh' is equal zero. + */ +#define NMI_WATCHDOG_ENABLED_BIT 0 +#define SOFT_WATCHDOG_ENABLED_BIT 1 +#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT) +#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT) + +#if defined(CONFIG_HARDLOCKUP_DETECTOR) +extern void hardlockup_detector_disable(void); +extern unsigned int hardlockup_panic; +#else +static inline void hardlockup_detector_disable(void) {} +#endif + +#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR) +# define NMI_WATCHDOG_SYSCTL_PERM 0644 +#else +# define NMI_WATCHDOG_SYSCTL_PERM 0444 +#endif + +#if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF) +extern void arch_touch_nmi_watchdog(void); +extern void hardlockup_detector_perf_stop(void); +extern void hardlockup_detector_perf_restart(void); +extern void hardlockup_detector_perf_disable(void); +extern void hardlockup_detector_perf_enable(void); +extern void hardlockup_detector_perf_cleanup(void); +extern int hardlockup_detector_perf_init(void); +#else +static inline void hardlockup_detector_perf_stop(void) { } +static inline void hardlockup_detector_perf_restart(void) { } +static inline void hardlockup_detector_perf_disable(void) { } +static inline void hardlockup_detector_perf_enable(void) { } +static inline void hardlockup_detector_perf_cleanup(void) { } +# if !defined(CONFIG_HAVE_NMI_WATCHDOG) +static inline int hardlockup_detector_perf_init(void) { return -ENODEV; } +static inline void arch_touch_nmi_watchdog(void) {} +# else +static inline int hardlockup_detector_perf_init(void) { return 0; } +# endif +#endif + +void watchdog_nmi_stop(void); +void watchdog_nmi_start(void); +int watchdog_nmi_probe(void); +int watchdog_nmi_enable(unsigned int cpu); +void watchdog_nmi_disable(unsigned int cpu); + +/** + * touch_nmi_watchdog - restart NMI watchdog timeout. + * + * If the architecture supports the NMI watchdog, touch_nmi_watchdog() + * may be used to reset the timeout - for code which intentionally + * disables interrupts for a long time. This call is stateless. + */ +static inline void touch_nmi_watchdog(void) +{ + arch_touch_nmi_watchdog(); + touch_softlockup_watchdog(); +} + +/* + * Create trigger_all_cpu_backtrace() out of the arch-provided + * base function. Return whether such support was available, + * to allow calling code to fall back to some other mechanism: + */ +#ifdef arch_trigger_cpumask_backtrace +static inline bool trigger_all_cpu_backtrace(void) +{ + arch_trigger_cpumask_backtrace(cpu_online_mask, false); + return true; +} + +static inline bool trigger_allbutself_cpu_backtrace(void) +{ + arch_trigger_cpumask_backtrace(cpu_online_mask, true); + return true; +} + +static inline bool trigger_cpumask_backtrace(struct cpumask *mask) +{ + arch_trigger_cpumask_backtrace(mask, false); + return true; +} + +static inline bool trigger_single_cpu_backtrace(int cpu) +{ + arch_trigger_cpumask_backtrace(cpumask_of(cpu), false); + return true; +} + +/* generic implementation */ +void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, + bool exclude_self, + void (*raise)(cpumask_t *mask)); +bool nmi_cpu_backtrace(struct pt_regs *regs); + +#else +static inline bool trigger_all_cpu_backtrace(void) +{ + return false; +} +static inline bool trigger_allbutself_cpu_backtrace(void) +{ + return false; +} +static inline bool trigger_cpumask_backtrace(struct cpumask *mask) +{ + return false; +} +static inline bool trigger_single_cpu_backtrace(int cpu) +{ + return false; +} +#endif + +#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF +u64 hw_nmi_get_sample_period(int watchdog_thresh); +#endif + +#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \ + defined(CONFIG_HARDLOCKUP_DETECTOR) +void watchdog_update_hrtimer_threshold(u64 period); +#else +static inline void watchdog_update_hrtimer_threshold(u64 period) { } +#endif + +struct ctl_table; +extern int proc_watchdog(struct ctl_table *, int , + void __user *, size_t *, loff_t *); +extern int proc_nmi_watchdog(struct ctl_table *, int , + void __user *, size_t *, loff_t *); +extern int proc_soft_watchdog(struct ctl_table *, int , + void __user *, size_t *, loff_t *); +extern int proc_watchdog_thresh(struct ctl_table *, int , + void __user *, size_t *, loff_t *); +extern int proc_watchdog_cpumask(struct ctl_table *, int, + void __user *, size_t *, loff_t *); + +#ifdef CONFIG_HAVE_ACPI_APEI_NMI +#include +#endif + +#endif diff --git a/include/linux/node.h b/include/linux/node.h new file mode 100644 index 000000000..a79ec4492 --- /dev/null +++ b/include/linux/node.h @@ -0,0 +1,122 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/node.h - generic node definition + * + * This is mainly for topological representation. We define the + * basic 'struct node' here, which can be embedded in per-arch + * definitions of processors. + * + * Basic handling of the devices is done in drivers/base/node.c + * and system devices are handled in drivers/base/sys.c. + * + * Nodes are exported via driverfs in the class/node/devices/ + * directory. + */ +#ifndef _LINUX_NODE_H_ +#define _LINUX_NODE_H_ + +#include +#include +#include + +struct node { + struct device dev; + +#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS) + struct work_struct node_work; +#endif +}; + +struct memory_block; +extern struct node *node_devices[]; +typedef void (*node_registration_func_t)(struct node *); + +#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA) +int link_mem_sections(int nid, unsigned long start_pfn, + unsigned long end_pfn, + enum meminit_context context); +#else +static inline int link_mem_sections(int nid, unsigned long start_pfn, + unsigned long end_pfn, + enum meminit_context context) +{ + return 0; +} +#endif + +extern void unregister_node(struct node *node); +#ifdef CONFIG_NUMA +/* Core of the node registration - only memory hotplug should use this */ +extern int __register_one_node(int nid); + +/* Registers an online node */ +static inline int register_one_node(int nid) +{ + int error = 0; + + if (node_online(nid)) { + struct pglist_data *pgdat = NODE_DATA(nid); + unsigned long start_pfn = pgdat->node_start_pfn; + unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; + + error = __register_one_node(nid); + if (error) + return error; + /* link memory sections under this node */ + error = link_mem_sections(nid, start_pfn, end_pfn, + MEMINIT_EARLY); + } + + return error; +} + +extern void unregister_one_node(int nid); +extern int register_cpu_under_node(unsigned int cpu, unsigned int nid); +extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid); +extern int register_mem_sect_under_node(struct memory_block *mem_blk, + void *arg); +extern void unregister_memory_block_under_nodes(struct memory_block *mem_blk); + +#ifdef CONFIG_HUGETLBFS +extern void register_hugetlbfs_with_node(node_registration_func_t doregister, + node_registration_func_t unregister); +#endif +#else +static inline int __register_one_node(int nid) +{ + return 0; +} +static inline int register_one_node(int nid) +{ + return 0; +} +static inline int unregister_one_node(int nid) +{ + return 0; +} +static inline int register_cpu_under_node(unsigned int cpu, unsigned int nid) +{ + return 0; +} +static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) +{ + return 0; +} +static inline int register_mem_sect_under_node(struct memory_block *mem_blk, + void *arg) +{ + return 0; +} +static inline void unregister_memory_block_under_nodes(struct memory_block *mem_blk) +{ +} + +static inline void register_hugetlbfs_with_node(node_registration_func_t reg, + node_registration_func_t unreg) +{ +} +#endif + +#define to_node(device) container_of(device, struct node, dev) + +#endif /* _LINUX_NODE_H_ */ diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h new file mode 100644 index 000000000..326744b7d --- /dev/null +++ b/include/linux/nodemask.h @@ -0,0 +1,541 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_NODEMASK_H +#define __LINUX_NODEMASK_H + +/* + * Nodemasks provide a bitmap suitable for representing the + * set of Node's in a system, one bit position per Node number. + * + * See detailed comments in the file linux/bitmap.h describing the + * data type on which these nodemasks are based. + * + * For details of nodemask_parse_user(), see bitmap_parse_user() in + * lib/bitmap.c. For details of nodelist_parse(), see bitmap_parselist(), + * also in bitmap.c. For details of node_remap(), see bitmap_bitremap in + * lib/bitmap.c. For details of nodes_remap(), see bitmap_remap in + * lib/bitmap.c. For details of nodes_onto(), see bitmap_onto in + * lib/bitmap.c. For details of nodes_fold(), see bitmap_fold in + * lib/bitmap.c. + * + * The available nodemask operations are: + * + * void node_set(node, mask) turn on bit 'node' in mask + * void node_clear(node, mask) turn off bit 'node' in mask + * void nodes_setall(mask) set all bits + * void nodes_clear(mask) clear all bits + * int node_isset(node, mask) true iff bit 'node' set in mask + * int node_test_and_set(node, mask) test and set bit 'node' in mask + * + * void nodes_and(dst, src1, src2) dst = src1 & src2 [intersection] + * void nodes_or(dst, src1, src2) dst = src1 | src2 [union] + * void nodes_xor(dst, src1, src2) dst = src1 ^ src2 + * void nodes_andnot(dst, src1, src2) dst = src1 & ~src2 + * void nodes_complement(dst, src) dst = ~src + * + * int nodes_equal(mask1, mask2) Does mask1 == mask2? + * int nodes_intersects(mask1, mask2) Do mask1 and mask2 intersect? + * int nodes_subset(mask1, mask2) Is mask1 a subset of mask2? + * int nodes_empty(mask) Is mask empty (no bits sets)? + * int nodes_full(mask) Is mask full (all bits sets)? + * int nodes_weight(mask) Hamming weight - number of set bits + * + * void nodes_shift_right(dst, src, n) Shift right + * void nodes_shift_left(dst, src, n) Shift left + * + * unsigned int first_node(mask) Number lowest set bit, or MAX_NUMNODES + * unsigend int next_node(node, mask) Next node past 'node', or MAX_NUMNODES + * unsigned int next_node_in(node, mask) Next node past 'node', or wrap to first, + * or MAX_NUMNODES + * unsigned int first_unset_node(mask) First node not set in mask, or + * MAX_NUMNODES + * + * nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set + * NODE_MASK_ALL Initializer - all bits set + * NODE_MASK_NONE Initializer - no bits set + * unsigned long *nodes_addr(mask) Array of unsigned long's in mask + * + * int nodemask_parse_user(ubuf, ulen, mask) Parse ascii string as nodemask + * int nodelist_parse(buf, map) Parse ascii string as nodelist + * int node_remap(oldbit, old, new) newbit = map(old, new)(oldbit) + * void nodes_remap(dst, src, old, new) *dst = map(old, new)(src) + * void nodes_onto(dst, orig, relmap) *dst = orig relative to relmap + * void nodes_fold(dst, orig, sz) dst bits = orig bits mod sz + * + * for_each_node_mask(node, mask) for-loop node over mask + * + * int num_online_nodes() Number of online Nodes + * int num_possible_nodes() Number of all possible Nodes + * + * int node_random(mask) Random node with set bit in mask + * + * int node_online(node) Is some node online? + * int node_possible(node) Is some node possible? + * + * node_set_online(node) set bit 'node' in node_online_map + * node_set_offline(node) clear bit 'node' in node_online_map + * + * for_each_node(node) for-loop node over node_possible_map + * for_each_online_node(node) for-loop node over node_online_map + * + * Subtlety: + * 1) The 'type-checked' form of node_isset() causes gcc (3.3.2, anyway) + * to generate slightly worse code. So use a simple one-line #define + * for node_isset(), instead of wrapping an inline inside a macro, the + * way we do the other calls. + * + * NODEMASK_SCRATCH + * When doing above logical AND, OR, XOR, Remap operations the callers tend to + * need temporary nodemask_t's on the stack. But if NODES_SHIFT is large, + * nodemask_t's consume too much stack space. NODEMASK_SCRATCH is a helper + * for such situations. See below and CPUMASK_ALLOC also. + */ + +#include +#include +#include +#include + +typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t; +extern nodemask_t _unused_nodemask_arg_; + +/** + * nodemask_pr_args - printf args to output a nodemask + * @maskp: nodemask to be printed + * + * Can be used to provide arguments for '%*pb[l]' when printing a nodemask. + */ +#define nodemask_pr_args(maskp) __nodemask_pr_numnodes(maskp), \ + __nodemask_pr_bits(maskp) +static inline unsigned int __nodemask_pr_numnodes(const nodemask_t *m) +{ + return m ? MAX_NUMNODES : 0; +} +static inline const unsigned long *__nodemask_pr_bits(const nodemask_t *m) +{ + return m ? m->bits : NULL; +} + +/* + * The inline keyword gives the compiler room to decide to inline, or + * not inline a function as it sees best. However, as these functions + * are called in both __init and non-__init functions, if they are not + * inlined we will end up with a section mis-match error (of the type of + * freeable items not being freed). So we must use __always_inline here + * to fix the problem. If other functions in the future also end up in + * this situation they will also need to be annotated as __always_inline + */ +#define node_set(node, dst) __node_set((node), &(dst)) +static __always_inline void __node_set(int node, volatile nodemask_t *dstp) +{ + set_bit(node, dstp->bits); +} + +#define node_clear(node, dst) __node_clear((node), &(dst)) +static inline void __node_clear(int node, volatile nodemask_t *dstp) +{ + clear_bit(node, dstp->bits); +} + +#define nodes_setall(dst) __nodes_setall(&(dst), MAX_NUMNODES) +static inline void __nodes_setall(nodemask_t *dstp, unsigned int nbits) +{ + bitmap_fill(dstp->bits, nbits); +} + +#define nodes_clear(dst) __nodes_clear(&(dst), MAX_NUMNODES) +static inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits) +{ + bitmap_zero(dstp->bits, nbits); +} + +/* No static inline type checking - see Subtlety (1) above. */ +#define node_isset(node, nodemask) test_bit((node), (nodemask).bits) + +#define node_test_and_set(node, nodemask) \ + __node_test_and_set((node), &(nodemask)) +static inline bool __node_test_and_set(int node, nodemask_t *addr) +{ + return test_and_set_bit(node, addr->bits); +} + +#define nodes_and(dst, src1, src2) \ + __nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES) +static inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p, + const nodemask_t *src2p, unsigned int nbits) +{ + bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); +} + +#define nodes_or(dst, src1, src2) \ + __nodes_or(&(dst), &(src1), &(src2), MAX_NUMNODES) +static inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p, + const nodemask_t *src2p, unsigned int nbits) +{ + bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); +} + +#define nodes_xor(dst, src1, src2) \ + __nodes_xor(&(dst), &(src1), &(src2), MAX_NUMNODES) +static inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p, + const nodemask_t *src2p, unsigned int nbits) +{ + bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); +} + +#define nodes_andnot(dst, src1, src2) \ + __nodes_andnot(&(dst), &(src1), &(src2), MAX_NUMNODES) +static inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p, + const nodemask_t *src2p, unsigned int nbits) +{ + bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); +} + +#define nodes_complement(dst, src) \ + __nodes_complement(&(dst), &(src), MAX_NUMNODES) +static inline void __nodes_complement(nodemask_t *dstp, + const nodemask_t *srcp, unsigned int nbits) +{ + bitmap_complement(dstp->bits, srcp->bits, nbits); +} + +#define nodes_equal(src1, src2) \ + __nodes_equal(&(src1), &(src2), MAX_NUMNODES) +static inline bool __nodes_equal(const nodemask_t *src1p, + const nodemask_t *src2p, unsigned int nbits) +{ + return bitmap_equal(src1p->bits, src2p->bits, nbits); +} + +#define nodes_intersects(src1, src2) \ + __nodes_intersects(&(src1), &(src2), MAX_NUMNODES) +static inline bool __nodes_intersects(const nodemask_t *src1p, + const nodemask_t *src2p, unsigned int nbits) +{ + return bitmap_intersects(src1p->bits, src2p->bits, nbits); +} + +#define nodes_subset(src1, src2) \ + __nodes_subset(&(src1), &(src2), MAX_NUMNODES) +static inline bool __nodes_subset(const nodemask_t *src1p, + const nodemask_t *src2p, unsigned int nbits) +{ + return bitmap_subset(src1p->bits, src2p->bits, nbits); +} + +#define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES) +static inline bool __nodes_empty(const nodemask_t *srcp, unsigned int nbits) +{ + return bitmap_empty(srcp->bits, nbits); +} + +#define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES) +static inline bool __nodes_full(const nodemask_t *srcp, unsigned int nbits) +{ + return bitmap_full(srcp->bits, nbits); +} + +#define nodes_weight(nodemask) __nodes_weight(&(nodemask), MAX_NUMNODES) +static inline int __nodes_weight(const nodemask_t *srcp, unsigned int nbits) +{ + return bitmap_weight(srcp->bits, nbits); +} + +#define nodes_shift_right(dst, src, n) \ + __nodes_shift_right(&(dst), &(src), (n), MAX_NUMNODES) +static inline void __nodes_shift_right(nodemask_t *dstp, + const nodemask_t *srcp, int n, int nbits) +{ + bitmap_shift_right(dstp->bits, srcp->bits, n, nbits); +} + +#define nodes_shift_left(dst, src, n) \ + __nodes_shift_left(&(dst), &(src), (n), MAX_NUMNODES) +static inline void __nodes_shift_left(nodemask_t *dstp, + const nodemask_t *srcp, int n, int nbits) +{ + bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); +} + +/* FIXME: better would be to fix all architectures to never return + > MAX_NUMNODES, then the silly min_ts could be dropped. */ + +#define first_node(src) __first_node(&(src)) +static inline unsigned int __first_node(const nodemask_t *srcp) +{ + return min_t(unsigned int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES)); +} + +#define next_node(n, src) __next_node((n), &(src)) +static inline unsigned int __next_node(int n, const nodemask_t *srcp) +{ + return min_t(unsigned int, MAX_NUMNODES, find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); +} + +/* + * Find the next present node in src, starting after node n, wrapping around to + * the first node in src if needed. Returns MAX_NUMNODES if src is empty. + */ +#define next_node_in(n, src) __next_node_in((n), &(src)) +unsigned int __next_node_in(int node, const nodemask_t *srcp); + +static inline void init_nodemask_of_node(nodemask_t *mask, int node) +{ + nodes_clear(*mask); + node_set(node, *mask); +} + +#define nodemask_of_node(node) \ +({ \ + typeof(_unused_nodemask_arg_) m; \ + if (sizeof(m) == sizeof(unsigned long)) { \ + m.bits[0] = 1UL << (node); \ + } else { \ + init_nodemask_of_node(&m, (node)); \ + } \ + m; \ +}) + +#define first_unset_node(mask) __first_unset_node(&(mask)) +static inline unsigned int __first_unset_node(const nodemask_t *maskp) +{ + return min_t(unsigned int, MAX_NUMNODES, + find_first_zero_bit(maskp->bits, MAX_NUMNODES)); +} + +#define NODE_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(MAX_NUMNODES) + +#if MAX_NUMNODES <= BITS_PER_LONG + +#define NODE_MASK_ALL \ +((nodemask_t) { { \ + [BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD \ +} }) + +#else + +#define NODE_MASK_ALL \ +((nodemask_t) { { \ + [0 ... BITS_TO_LONGS(MAX_NUMNODES)-2] = ~0UL, \ + [BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD \ +} }) + +#endif + +#define NODE_MASK_NONE \ +((nodemask_t) { { \ + [0 ... BITS_TO_LONGS(MAX_NUMNODES)-1] = 0UL \ +} }) + +#define nodes_addr(src) ((src).bits) + +#define nodemask_parse_user(ubuf, ulen, dst) \ + __nodemask_parse_user((ubuf), (ulen), &(dst), MAX_NUMNODES) +static inline int __nodemask_parse_user(const char __user *buf, int len, + nodemask_t *dstp, int nbits) +{ + return bitmap_parse_user(buf, len, dstp->bits, nbits); +} + +#define nodelist_parse(buf, dst) __nodelist_parse((buf), &(dst), MAX_NUMNODES) +static inline int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits) +{ + return bitmap_parselist(buf, dstp->bits, nbits); +} + +#define node_remap(oldbit, old, new) \ + __node_remap((oldbit), &(old), &(new), MAX_NUMNODES) +static inline int __node_remap(int oldbit, + const nodemask_t *oldp, const nodemask_t *newp, int nbits) +{ + return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits); +} + +#define nodes_remap(dst, src, old, new) \ + __nodes_remap(&(dst), &(src), &(old), &(new), MAX_NUMNODES) +static inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp, + const nodemask_t *oldp, const nodemask_t *newp, int nbits) +{ + bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits); +} + +#define nodes_onto(dst, orig, relmap) \ + __nodes_onto(&(dst), &(orig), &(relmap), MAX_NUMNODES) +static inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp, + const nodemask_t *relmapp, int nbits) +{ + bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits); +} + +#define nodes_fold(dst, orig, sz) \ + __nodes_fold(&(dst), &(orig), sz, MAX_NUMNODES) +static inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp, + int sz, int nbits) +{ + bitmap_fold(dstp->bits, origp->bits, sz, nbits); +} + +#if MAX_NUMNODES > 1 +#define for_each_node_mask(node, mask) \ + for ((node) = first_node(mask); \ + (node >= 0) && (node) < MAX_NUMNODES; \ + (node) = next_node((node), (mask))) +#else /* MAX_NUMNODES == 1 */ +#define for_each_node_mask(node, mask) \ + for ((node) = 0; (node) < 1 && !nodes_empty(mask); (node)++) +#endif /* MAX_NUMNODES */ + +/* + * Bitmasks that are kept for all the nodes. + */ +enum node_states { + N_POSSIBLE, /* The node could become online at some point */ + N_ONLINE, /* The node is online */ + N_NORMAL_MEMORY, /* The node has regular memory */ +#ifdef CONFIG_HIGHMEM + N_HIGH_MEMORY, /* The node has regular or high memory */ +#else + N_HIGH_MEMORY = N_NORMAL_MEMORY, +#endif + N_MEMORY, /* The node has memory(regular, high, movable) */ + N_CPU, /* The node has one or more cpus */ + NR_NODE_STATES +}; + +/* + * The following particular system nodemasks and operations + * on them manage all possible and online nodes. + */ + +extern nodemask_t node_states[NR_NODE_STATES]; + +#if MAX_NUMNODES > 1 +static inline int node_state(int node, enum node_states state) +{ + return node_isset(node, node_states[state]); +} + +static inline void node_set_state(int node, enum node_states state) +{ + __node_set(node, &node_states[state]); +} + +static inline void node_clear_state(int node, enum node_states state) +{ + __node_clear(node, &node_states[state]); +} + +static inline int num_node_state(enum node_states state) +{ + return nodes_weight(node_states[state]); +} + +#define for_each_node_state(__node, __state) \ + for_each_node_mask((__node), node_states[__state]) + +#define first_online_node first_node(node_states[N_ONLINE]) +#define first_memory_node first_node(node_states[N_MEMORY]) +static inline unsigned int next_online_node(int nid) +{ + return next_node(nid, node_states[N_ONLINE]); +} +static inline unsigned int next_memory_node(int nid) +{ + return next_node(nid, node_states[N_MEMORY]); +} + +extern int nr_node_ids; +extern int nr_online_nodes; + +static inline void node_set_online(int nid) +{ + node_set_state(nid, N_ONLINE); + nr_online_nodes = num_node_state(N_ONLINE); +} + +static inline void node_set_offline(int nid) +{ + node_clear_state(nid, N_ONLINE); + nr_online_nodes = num_node_state(N_ONLINE); +} + +#else + +static inline int node_state(int node, enum node_states state) +{ + return node == 0; +} + +static inline void node_set_state(int node, enum node_states state) +{ +} + +static inline void node_clear_state(int node, enum node_states state) +{ +} + +static inline int num_node_state(enum node_states state) +{ + return 1; +} + +#define for_each_node_state(node, __state) \ + for ( (node) = 0; (node) == 0; (node) = 1) + +#define first_online_node 0 +#define first_memory_node 0 +#define next_online_node(nid) (MAX_NUMNODES) +#define nr_node_ids 1 +#define nr_online_nodes 1 + +#define node_set_online(node) node_set_state((node), N_ONLINE) +#define node_set_offline(node) node_clear_state((node), N_ONLINE) + +#endif + +#if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1) +extern int node_random(const nodemask_t *maskp); +#else +static inline int node_random(const nodemask_t *mask) +{ + return 0; +} +#endif + +#define node_online_map node_states[N_ONLINE] +#define node_possible_map node_states[N_POSSIBLE] + +#define num_online_nodes() num_node_state(N_ONLINE) +#define num_possible_nodes() num_node_state(N_POSSIBLE) +#define node_online(node) node_state((node), N_ONLINE) +#define node_possible(node) node_state((node), N_POSSIBLE) + +#define for_each_node(node) for_each_node_state(node, N_POSSIBLE) +#define for_each_online_node(node) for_each_node_state(node, N_ONLINE) + +/* + * For nodemask scrach area. + * NODEMASK_ALLOC(type, name) allocates an object with a specified type and + * name. + */ +#if NODES_SHIFT > 8 /* nodemask_t > 32 bytes */ +#define NODEMASK_ALLOC(type, name, gfp_flags) \ + type *name = kmalloc(sizeof(*name), gfp_flags) +#define NODEMASK_FREE(m) kfree(m) +#else +#define NODEMASK_ALLOC(type, name, gfp_flags) type _##name, *name = &_##name +#define NODEMASK_FREE(m) do {} while (0) +#endif + +/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */ +struct nodemask_scratch { + nodemask_t mask1; + nodemask_t mask2; +}; + +#define NODEMASK_SCRATCH(x) \ + NODEMASK_ALLOC(struct nodemask_scratch, x, \ + GFP_KERNEL | __GFP_NORETRY) +#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x) + + +#endif /* __LINUX_NODEMASK_H */ diff --git a/include/linux/nospec.h b/include/linux/nospec.h new file mode 100644 index 000000000..0c5ef54fd --- /dev/null +++ b/include/linux/nospec.h @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright(c) 2018 Linus Torvalds. All rights reserved. +// Copyright(c) 2018 Alexei Starovoitov. All rights reserved. +// Copyright(c) 2018 Intel Corporation. All rights reserved. + +#ifndef _LINUX_NOSPEC_H +#define _LINUX_NOSPEC_H +#include + +struct task_struct; + +/** + * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise + * @index: array element index + * @size: number of elements in array + * + * When @index is out of bounds (@index >= @size), the sign bit will be + * set. Extend the sign bit to all bits and invert, giving a result of + * zero for an out of bounds index, or ~0 if within bounds [0, @size). + */ +#ifndef array_index_mask_nospec +static inline unsigned long array_index_mask_nospec(unsigned long index, + unsigned long size) +{ + /* + * Always calculate and emit the mask even if the compiler + * thinks the mask is not needed. The compiler does not take + * into account the value of @index under speculation. + */ + OPTIMIZER_HIDE_VAR(index); + return ~(long)(index | (size - 1UL - index)) >> (BITS_PER_LONG - 1); +} +#endif + +/* + * array_index_nospec - sanitize an array index after a bounds check + * + * For a code sequence like: + * + * if (index < size) { + * index = array_index_nospec(index, size); + * val = array[index]; + * } + * + * ...if the CPU speculates past the bounds check then + * array_index_nospec() will clamp the index within the range of [0, + * size). + */ +#define array_index_nospec(index, size) \ +({ \ + typeof(index) _i = (index); \ + typeof(size) _s = (size); \ + unsigned long _mask = array_index_mask_nospec(_i, _s); \ + \ + BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \ + BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \ + \ + (typeof(_i)) (_i & _mask); \ +}) + +/* Speculation control prctl */ +int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which); +int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, + unsigned long ctrl); +/* Speculation control for seccomp enforced mitigation */ +void arch_seccomp_spec_mitigate(struct task_struct *task); + +#endif /* _LINUX_NOSPEC_H */ diff --git a/include/linux/notifier.h b/include/linux/notifier.h new file mode 100644 index 000000000..0096a0539 --- /dev/null +++ b/include/linux/notifier.h @@ -0,0 +1,241 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Routines to manage notifier chains for passing status changes to any + * interested routines. We need this instead of hard coded call lists so + * that modules can poke their nose into the innards. The network devices + * needed them so here they are for the rest of you. + * + * Alan Cox + */ + +#ifndef _LINUX_NOTIFIER_H +#define _LINUX_NOTIFIER_H +#include +#include +#include +#include + +/* + * Notifier chains are of four types: + * + * Atomic notifier chains: Chain callbacks run in interrupt/atomic + * context. Callouts are not allowed to block. + * Blocking notifier chains: Chain callbacks run in process context. + * Callouts are allowed to block. + * Raw notifier chains: There are no restrictions on callbacks, + * registration, or unregistration. All locking and protection + * must be provided by the caller. + * SRCU notifier chains: A variant of blocking notifier chains, with + * the same restrictions. + * + * atomic_notifier_chain_register() may be called from an atomic context, + * but blocking_notifier_chain_register() and srcu_notifier_chain_register() + * must be called from a process context. Ditto for the corresponding + * _unregister() routines. + * + * atomic_notifier_chain_unregister(), blocking_notifier_chain_unregister(), + * and srcu_notifier_chain_unregister() _must not_ be called from within + * the call chain. + * + * SRCU notifier chains are an alternative form of blocking notifier chains. + * They use SRCU (Sleepable Read-Copy Update) instead of rw-semaphores for + * protection of the chain links. This means there is _very_ low overhead + * in srcu_notifier_call_chain(): no cache bounces and no memory barriers. + * As compensation, srcu_notifier_chain_unregister() is rather expensive. + * SRCU notifier chains should be used when the chain will be called very + * often but notifier_blocks will seldom be removed. + */ + +struct notifier_block; + +typedef int (*notifier_fn_t)(struct notifier_block *nb, + unsigned long action, void *data); + +struct notifier_block { + notifier_fn_t notifier_call; + struct notifier_block __rcu *next; + int priority; +}; + +struct atomic_notifier_head { + spinlock_t lock; + struct notifier_block __rcu *head; +}; + +struct blocking_notifier_head { + struct rw_semaphore rwsem; + struct notifier_block __rcu *head; +}; + +struct raw_notifier_head { + struct notifier_block __rcu *head; +}; + +struct srcu_notifier_head { + struct mutex mutex; + struct srcu_struct srcu; + struct notifier_block __rcu *head; +}; + +#define ATOMIC_INIT_NOTIFIER_HEAD(name) do { \ + spin_lock_init(&(name)->lock); \ + (name)->head = NULL; \ + } while (0) +#define BLOCKING_INIT_NOTIFIER_HEAD(name) do { \ + init_rwsem(&(name)->rwsem); \ + (name)->head = NULL; \ + } while (0) +#define RAW_INIT_NOTIFIER_HEAD(name) do { \ + (name)->head = NULL; \ + } while (0) + +/* srcu_notifier_heads must be cleaned up dynamically */ +extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); +#define srcu_cleanup_notifier_head(name) \ + cleanup_srcu_struct(&(name)->srcu); + +#define ATOMIC_NOTIFIER_INIT(name) { \ + .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ + .head = NULL } +#define BLOCKING_NOTIFIER_INIT(name) { \ + .rwsem = __RWSEM_INITIALIZER((name).rwsem), \ + .head = NULL } +#define RAW_NOTIFIER_INIT(name) { \ + .head = NULL } + +#define SRCU_NOTIFIER_INIT(name, pcpu) \ + { \ + .mutex = __MUTEX_INITIALIZER(name.mutex), \ + .head = NULL, \ + .srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu), \ + } + +#define ATOMIC_NOTIFIER_HEAD(name) \ + struct atomic_notifier_head name = \ + ATOMIC_NOTIFIER_INIT(name) +#define BLOCKING_NOTIFIER_HEAD(name) \ + struct blocking_notifier_head name = \ + BLOCKING_NOTIFIER_INIT(name) +#define RAW_NOTIFIER_HEAD(name) \ + struct raw_notifier_head name = \ + RAW_NOTIFIER_INIT(name) + +#ifdef CONFIG_TREE_SRCU +#define _SRCU_NOTIFIER_HEAD(name, mod) \ + static DEFINE_PER_CPU(struct srcu_data, name##_head_srcu_data); \ + mod struct srcu_notifier_head name = \ + SRCU_NOTIFIER_INIT(name, name##_head_srcu_data) + +#else +#define _SRCU_NOTIFIER_HEAD(name, mod) \ + mod struct srcu_notifier_head name = \ + SRCU_NOTIFIER_INIT(name, name) + +#endif + +#define SRCU_NOTIFIER_HEAD(name) \ + _SRCU_NOTIFIER_HEAD(name, /* not static */) + +#define SRCU_NOTIFIER_HEAD_STATIC(name) \ + _SRCU_NOTIFIER_HEAD(name, static) + +#ifdef __KERNEL__ + +extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh, + struct notifier_block *nb); +extern int blocking_notifier_chain_register(struct blocking_notifier_head *nh, + struct notifier_block *nb); +extern int raw_notifier_chain_register(struct raw_notifier_head *nh, + struct notifier_block *nb); +extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh, + struct notifier_block *nb); + +extern int blocking_notifier_chain_cond_register( + struct blocking_notifier_head *nh, + struct notifier_block *nb); + +extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, + struct notifier_block *nb); +extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, + struct notifier_block *nb); +extern int raw_notifier_chain_unregister(struct raw_notifier_head *nh, + struct notifier_block *nb); +extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh, + struct notifier_block *nb); + +extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh, + unsigned long val, void *v); +extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh, + unsigned long val, void *v, int nr_to_call, int *nr_calls); +extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh, + unsigned long val, void *v); +extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh, + unsigned long val, void *v, int nr_to_call, int *nr_calls); +extern int raw_notifier_call_chain(struct raw_notifier_head *nh, + unsigned long val, void *v); +extern int __raw_notifier_call_chain(struct raw_notifier_head *nh, + unsigned long val, void *v, int nr_to_call, int *nr_calls); +extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh, + unsigned long val, void *v); +extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh, + unsigned long val, void *v, int nr_to_call, int *nr_calls); + +#define NOTIFY_DONE 0x0000 /* Don't care */ +#define NOTIFY_OK 0x0001 /* Suits me */ +#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */ +#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002) + /* Bad/Veto action */ +/* + * Clean way to return from the notifier and stop further calls. + */ +#define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK) + +/* Encapsulate (negative) errno value (in particular, NOTIFY_BAD <=> EPERM). */ +static inline int notifier_from_errno(int err) +{ + if (err) + return NOTIFY_STOP_MASK | (NOTIFY_OK - err); + + return NOTIFY_OK; +} + +/* Restore (negative) errno value from notify return value. */ +static inline int notifier_to_errno(int ret) +{ + ret &= ~NOTIFY_STOP_MASK; + return ret > NOTIFY_OK ? NOTIFY_OK - ret : 0; +} + +/* + * Declared notifiers so far. I can imagine quite a few more chains + * over time (eg laptop power reset chains, reboot chain (to clean + * device units up), device [un]mount chain, module load/unload chain, + * low memory chain, screenblank chain (for plug in modular screenblankers) + * VC switch chains (for loadable kernel svgalib VC switch helpers) etc... + */ + +/* CPU notfiers are defined in include/linux/cpu.h. */ + +/* netdevice notifiers are defined in include/linux/netdevice.h */ + +/* reboot notifiers are defined in include/linux/reboot.h. */ + +/* Hibernation and suspend events are defined in include/linux/suspend.h. */ + +/* Virtual Terminal events are defined in include/linux/vt.h. */ + +#define NETLINK_URELEASE 0x0001 /* Unicast netlink socket released */ + +/* Console keyboard events. + * Note: KBD_KEYCODE is always sent before KBD_UNBOUND_KEYCODE, KBD_UNICODE and + * KBD_KEYSYM. */ +#define KBD_KEYCODE 0x0001 /* Keyboard keycode, called before any other */ +#define KBD_UNBOUND_KEYCODE 0x0002 /* Keyboard keycode which is not bound to any other */ +#define KBD_UNICODE 0x0003 /* Keyboard unicode */ +#define KBD_KEYSYM 0x0004 /* Keyboard keysym */ +#define KBD_POST_KEYSYM 0x0005 /* Called after keyboard keysym interpretation */ + +extern struct blocking_notifier_head reboot_notifier_list; + +#endif /* __KERNEL__ */ +#endif /* _LINUX_NOTIFIER_H */ diff --git a/include/linux/ns_common.h b/include/linux/ns_common.h new file mode 100644 index 000000000..5fbc40003 --- /dev/null +++ b/include/linux/ns_common.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_NS_COMMON_H +#define _LINUX_NS_COMMON_H + +struct proc_ns_operations; + +struct ns_common { + atomic_long_t stashed; + const struct proc_ns_operations *ops; + unsigned int inum; +}; + +#endif diff --git a/include/linux/nsc_gpio.h b/include/linux/nsc_gpio.h new file mode 100644 index 000000000..d7a04a6e3 --- /dev/null +++ b/include/linux/nsc_gpio.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + nsc_gpio.c + + National Semiconductor GPIO common access methods. + + struct nsc_gpio_ops abstracts the low-level access + operations for the GPIO units on 2 NSC chip families; the GEODE + integrated CPU, and the PC-8736[03456] integrated PC-peripheral + chips. + + The GPIO units on these chips have the same pin architecture, but + the access methods differ. Thus, scx200_gpio and pc8736x_gpio + implement their own versions of these routines; and use the common + file-operations routines implemented in nsc_gpio module. + + Copyright (c) 2005 Jim Cromie + + NB: this work was tested on the Geode SC-1100 and PC-87366 chips. + NSC sold the GEODE line to AMD, and the PC-8736x line to Winbond. +*/ + +struct nsc_gpio_ops { + struct module* owner; + u32 (*gpio_config) (unsigned iminor, u32 mask, u32 bits); + void (*gpio_dump) (struct nsc_gpio_ops *amp, unsigned iminor); + int (*gpio_get) (unsigned iminor); + void (*gpio_set) (unsigned iminor, int state); + void (*gpio_change) (unsigned iminor); + int (*gpio_current) (unsigned iminor); + struct device* dev; /* for dev_dbg() support, set in init */ +}; + +extern ssize_t nsc_gpio_write(struct file *file, const char __user *data, + size_t len, loff_t *ppos); + +extern ssize_t nsc_gpio_read(struct file *file, char __user *buf, + size_t len, loff_t *ppos); + +extern void nsc_gpio_dump(struct nsc_gpio_ops *amp, unsigned index); + diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h new file mode 100644 index 000000000..2ae1b1a4d --- /dev/null +++ b/include/linux/nsproxy.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_NSPROXY_H +#define _LINUX_NSPROXY_H + +#include +#include + +struct mnt_namespace; +struct uts_namespace; +struct ipc_namespace; +struct pid_namespace; +struct cgroup_namespace; +struct fs_struct; + +/* + * A structure to contain pointers to all per-process + * namespaces - fs (mount), uts, network, sysvipc, etc. + * + * The pid namespace is an exception -- it's accessed using + * task_active_pid_ns. The pid namespace here is the + * namespace that children will use. + * + * 'count' is the number of tasks holding a reference. + * The count for each namespace, then, will be the number + * of nsproxies pointing to it, not the number of tasks. + * + * The nsproxy is shared by tasks which share all namespaces. + * As soon as a single namespace is cloned or unshared, the + * nsproxy is copied. + */ +struct nsproxy { + atomic_t count; + struct uts_namespace *uts_ns; + struct ipc_namespace *ipc_ns; + struct mnt_namespace *mnt_ns; + struct pid_namespace *pid_ns_for_children; + struct net *net_ns; + struct cgroup_namespace *cgroup_ns; +}; +extern struct nsproxy init_nsproxy; + +/* + * the namespaces access rules are: + * + * 1. only current task is allowed to change tsk->nsproxy pointer or + * any pointer on the nsproxy itself. Current must hold the task_lock + * when changing tsk->nsproxy. + * + * 2. when accessing (i.e. reading) current task's namespaces - no + * precautions should be taken - just dereference the pointers + * + * 3. the access to other task namespaces is performed like this + * task_lock(task); + * nsproxy = task->nsproxy; + * if (nsproxy != NULL) { + * / * + * * work with the namespaces here + * * e.g. get the reference on one of them + * * / + * } / * + * * NULL task->nsproxy means that this task is + * * almost dead (zombie) + * * / + * task_unlock(task); + * + */ + +int copy_namespaces(unsigned long flags, struct task_struct *tsk); +void exit_task_namespaces(struct task_struct *tsk); +void switch_task_namespaces(struct task_struct *tsk, struct nsproxy *new); +void free_nsproxy(struct nsproxy *ns); +int unshare_nsproxy_namespaces(unsigned long, struct nsproxy **, + struct cred *, struct fs_struct *); +int __init nsproxy_cache_init(void); + +static inline void put_nsproxy(struct nsproxy *ns) +{ + if (atomic_dec_and_test(&ns->count)) { + free_nsproxy(ns); + } +} + +static inline void get_nsproxy(struct nsproxy *ns) +{ + atomic_inc(&ns->count); +} + +#endif diff --git a/include/linux/ntb.h b/include/linux/ntb.h new file mode 100644 index 000000000..181d16601 --- /dev/null +++ b/include/linux/ntb.h @@ -0,0 +1,1505 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (C) 2015 EMC Corporation. All Rights Reserved. + * Copyright (C) 2016 T-Platforms. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright (C) 2015 EMC Corporation. All Rights Reserved. + * Copyright (C) 2016 T-Platforms. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copy + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * PCIe NTB Linux driver + * + * Contact Information: + * Allen Hubbe + */ + +#ifndef _NTB_H_ +#define _NTB_H_ + +#include +#include + +struct ntb_client; +struct ntb_dev; +struct pci_dev; + +/** + * enum ntb_topo - NTB connection topology + * @NTB_TOPO_NONE: Topology is unknown or invalid. + * @NTB_TOPO_PRI: On primary side of local ntb. + * @NTB_TOPO_SEC: On secondary side of remote ntb. + * @NTB_TOPO_B2B_USD: On primary side of local ntb upstream of remote ntb. + * @NTB_TOPO_B2B_DSD: On primary side of local ntb downstream of remote ntb. + * @NTB_TOPO_SWITCH: Connected via a switch which supports ntb. + * @NTB_TOPO_CROSSLINK: Connected via two symmetric switchecs + */ +enum ntb_topo { + NTB_TOPO_NONE = -1, + NTB_TOPO_PRI, + NTB_TOPO_SEC, + NTB_TOPO_B2B_USD, + NTB_TOPO_B2B_DSD, + NTB_TOPO_SWITCH, + NTB_TOPO_CROSSLINK, +}; + +static inline int ntb_topo_is_b2b(enum ntb_topo topo) +{ + switch ((int)topo) { + case NTB_TOPO_B2B_USD: + case NTB_TOPO_B2B_DSD: + return 1; + } + return 0; +} + +static inline char *ntb_topo_string(enum ntb_topo topo) +{ + switch (topo) { + case NTB_TOPO_NONE: return "NTB_TOPO_NONE"; + case NTB_TOPO_PRI: return "NTB_TOPO_PRI"; + case NTB_TOPO_SEC: return "NTB_TOPO_SEC"; + case NTB_TOPO_B2B_USD: return "NTB_TOPO_B2B_USD"; + case NTB_TOPO_B2B_DSD: return "NTB_TOPO_B2B_DSD"; + case NTB_TOPO_SWITCH: return "NTB_TOPO_SWITCH"; + case NTB_TOPO_CROSSLINK: return "NTB_TOPO_CROSSLINK"; + } + return "NTB_TOPO_INVALID"; +} + +/** + * enum ntb_speed - NTB link training speed + * @NTB_SPEED_AUTO: Request the max supported speed. + * @NTB_SPEED_NONE: Link is not trained to any speed. + * @NTB_SPEED_GEN1: Link is trained to gen1 speed. + * @NTB_SPEED_GEN2: Link is trained to gen2 speed. + * @NTB_SPEED_GEN3: Link is trained to gen3 speed. + * @NTB_SPEED_GEN4: Link is trained to gen4 speed. + */ +enum ntb_speed { + NTB_SPEED_AUTO = -1, + NTB_SPEED_NONE = 0, + NTB_SPEED_GEN1 = 1, + NTB_SPEED_GEN2 = 2, + NTB_SPEED_GEN3 = 3, + NTB_SPEED_GEN4 = 4 +}; + +/** + * enum ntb_width - NTB link training width + * @NTB_WIDTH_AUTO: Request the max supported width. + * @NTB_WIDTH_NONE: Link is not trained to any width. + * @NTB_WIDTH_1: Link is trained to 1 lane width. + * @NTB_WIDTH_2: Link is trained to 2 lane width. + * @NTB_WIDTH_4: Link is trained to 4 lane width. + * @NTB_WIDTH_8: Link is trained to 8 lane width. + * @NTB_WIDTH_12: Link is trained to 12 lane width. + * @NTB_WIDTH_16: Link is trained to 16 lane width. + * @NTB_WIDTH_32: Link is trained to 32 lane width. + */ +enum ntb_width { + NTB_WIDTH_AUTO = -1, + NTB_WIDTH_NONE = 0, + NTB_WIDTH_1 = 1, + NTB_WIDTH_2 = 2, + NTB_WIDTH_4 = 4, + NTB_WIDTH_8 = 8, + NTB_WIDTH_12 = 12, + NTB_WIDTH_16 = 16, + NTB_WIDTH_32 = 32, +}; + +/** + * enum ntb_default_port - NTB default port number + * @NTB_PORT_PRI_USD: Default port of the NTB_TOPO_PRI/NTB_TOPO_B2B_USD + * topologies + * @NTB_PORT_SEC_DSD: Default port of the NTB_TOPO_SEC/NTB_TOPO_B2B_DSD + * topologies + */ +enum ntb_default_port { + NTB_PORT_PRI_USD, + NTB_PORT_SEC_DSD +}; +#define NTB_DEF_PEER_CNT (1) +#define NTB_DEF_PEER_IDX (0) + +/** + * struct ntb_client_ops - ntb client operations + * @probe: Notify client of a new device. + * @remove: Notify client to remove a device. + */ +struct ntb_client_ops { + int (*probe)(struct ntb_client *client, struct ntb_dev *ntb); + void (*remove)(struct ntb_client *client, struct ntb_dev *ntb); +}; + +static inline int ntb_client_ops_is_valid(const struct ntb_client_ops *ops) +{ + /* commented callbacks are not required: */ + return + ops->probe && + ops->remove && + 1; +} + +/** + * struct ntb_ctx_ops - ntb driver context operations + * @link_event: See ntb_link_event(). + * @db_event: See ntb_db_event(). + * @msg_event: See ntb_msg_event(). + */ +struct ntb_ctx_ops { + void (*link_event)(void *ctx); + void (*db_event)(void *ctx, int db_vector); + void (*msg_event)(void *ctx); +}; + +static inline int ntb_ctx_ops_is_valid(const struct ntb_ctx_ops *ops) +{ + /* commented callbacks are not required: */ + return + /* ops->link_event && */ + /* ops->db_event && */ + /* ops->msg_event && */ + 1; +} + +/** + * struct ntb_ctx_ops - ntb device operations + * @port_number: See ntb_port_number(). + * @peer_port_count: See ntb_peer_port_count(). + * @peer_port_number: See ntb_peer_port_number(). + * @peer_port_idx: See ntb_peer_port_idx(). + * @link_is_up: See ntb_link_is_up(). + * @link_enable: See ntb_link_enable(). + * @link_disable: See ntb_link_disable(). + * @mw_count: See ntb_mw_count(). + * @mw_get_align: See ntb_mw_get_align(). + * @mw_set_trans: See ntb_mw_set_trans(). + * @mw_clear_trans: See ntb_mw_clear_trans(). + * @peer_mw_count: See ntb_peer_mw_count(). + * @peer_mw_get_addr: See ntb_peer_mw_get_addr(). + * @peer_mw_set_trans: See ntb_peer_mw_set_trans(). + * @peer_mw_clear_trans:See ntb_peer_mw_clear_trans(). + * @db_is_unsafe: See ntb_db_is_unsafe(). + * @db_valid_mask: See ntb_db_valid_mask(). + * @db_vector_count: See ntb_db_vector_count(). + * @db_vector_mask: See ntb_db_vector_mask(). + * @db_read: See ntb_db_read(). + * @db_set: See ntb_db_set(). + * @db_clear: See ntb_db_clear(). + * @db_read_mask: See ntb_db_read_mask(). + * @db_set_mask: See ntb_db_set_mask(). + * @db_clear_mask: See ntb_db_clear_mask(). + * @peer_db_addr: See ntb_peer_db_addr(). + * @peer_db_read: See ntb_peer_db_read(). + * @peer_db_set: See ntb_peer_db_set(). + * @peer_db_clear: See ntb_peer_db_clear(). + * @peer_db_read_mask: See ntb_peer_db_read_mask(). + * @peer_db_set_mask: See ntb_peer_db_set_mask(). + * @peer_db_clear_mask: See ntb_peer_db_clear_mask(). + * @spad_is_unsafe: See ntb_spad_is_unsafe(). + * @spad_count: See ntb_spad_count(). + * @spad_read: See ntb_spad_read(). + * @spad_write: See ntb_spad_write(). + * @peer_spad_addr: See ntb_peer_spad_addr(). + * @peer_spad_read: See ntb_peer_spad_read(). + * @peer_spad_write: See ntb_peer_spad_write(). + * @msg_count: See ntb_msg_count(). + * @msg_inbits: See ntb_msg_inbits(). + * @msg_outbits: See ntb_msg_outbits(). + * @msg_read_sts: See ntb_msg_read_sts(). + * @msg_clear_sts: See ntb_msg_clear_sts(). + * @msg_set_mask: See ntb_msg_set_mask(). + * @msg_clear_mask: See ntb_msg_clear_mask(). + * @msg_read: See ntb_msg_read(). + * @peer_msg_write: See ntb_peer_msg_write(). + */ +struct ntb_dev_ops { + int (*port_number)(struct ntb_dev *ntb); + int (*peer_port_count)(struct ntb_dev *ntb); + int (*peer_port_number)(struct ntb_dev *ntb, int pidx); + int (*peer_port_idx)(struct ntb_dev *ntb, int port); + + u64 (*link_is_up)(struct ntb_dev *ntb, + enum ntb_speed *speed, enum ntb_width *width); + int (*link_enable)(struct ntb_dev *ntb, + enum ntb_speed max_speed, enum ntb_width max_width); + int (*link_disable)(struct ntb_dev *ntb); + + int (*mw_count)(struct ntb_dev *ntb, int pidx); + int (*mw_get_align)(struct ntb_dev *ntb, int pidx, int widx, + resource_size_t *addr_align, + resource_size_t *size_align, + resource_size_t *size_max); + int (*mw_set_trans)(struct ntb_dev *ntb, int pidx, int widx, + dma_addr_t addr, resource_size_t size); + int (*mw_clear_trans)(struct ntb_dev *ntb, int pidx, int widx); + int (*peer_mw_count)(struct ntb_dev *ntb); + int (*peer_mw_get_addr)(struct ntb_dev *ntb, int widx, + phys_addr_t *base, resource_size_t *size); + int (*peer_mw_set_trans)(struct ntb_dev *ntb, int pidx, int widx, + u64 addr, resource_size_t size); + int (*peer_mw_clear_trans)(struct ntb_dev *ntb, int pidx, int widx); + + int (*db_is_unsafe)(struct ntb_dev *ntb); + u64 (*db_valid_mask)(struct ntb_dev *ntb); + int (*db_vector_count)(struct ntb_dev *ntb); + u64 (*db_vector_mask)(struct ntb_dev *ntb, int db_vector); + + u64 (*db_read)(struct ntb_dev *ntb); + int (*db_set)(struct ntb_dev *ntb, u64 db_bits); + int (*db_clear)(struct ntb_dev *ntb, u64 db_bits); + + u64 (*db_read_mask)(struct ntb_dev *ntb); + int (*db_set_mask)(struct ntb_dev *ntb, u64 db_bits); + int (*db_clear_mask)(struct ntb_dev *ntb, u64 db_bits); + + int (*peer_db_addr)(struct ntb_dev *ntb, + phys_addr_t *db_addr, resource_size_t *db_size); + u64 (*peer_db_read)(struct ntb_dev *ntb); + int (*peer_db_set)(struct ntb_dev *ntb, u64 db_bits); + int (*peer_db_clear)(struct ntb_dev *ntb, u64 db_bits); + + u64 (*peer_db_read_mask)(struct ntb_dev *ntb); + int (*peer_db_set_mask)(struct ntb_dev *ntb, u64 db_bits); + int (*peer_db_clear_mask)(struct ntb_dev *ntb, u64 db_bits); + + int (*spad_is_unsafe)(struct ntb_dev *ntb); + int (*spad_count)(struct ntb_dev *ntb); + + u32 (*spad_read)(struct ntb_dev *ntb, int sidx); + int (*spad_write)(struct ntb_dev *ntb, int sidx, u32 val); + + int (*peer_spad_addr)(struct ntb_dev *ntb, int pidx, int sidx, + phys_addr_t *spad_addr); + u32 (*peer_spad_read)(struct ntb_dev *ntb, int pidx, int sidx); + int (*peer_spad_write)(struct ntb_dev *ntb, int pidx, int sidx, + u32 val); + + int (*msg_count)(struct ntb_dev *ntb); + u64 (*msg_inbits)(struct ntb_dev *ntb); + u64 (*msg_outbits)(struct ntb_dev *ntb); + u64 (*msg_read_sts)(struct ntb_dev *ntb); + int (*msg_clear_sts)(struct ntb_dev *ntb, u64 sts_bits); + int (*msg_set_mask)(struct ntb_dev *ntb, u64 mask_bits); + int (*msg_clear_mask)(struct ntb_dev *ntb, u64 mask_bits); + u32 (*msg_read)(struct ntb_dev *ntb, int *pidx, int midx); + int (*peer_msg_write)(struct ntb_dev *ntb, int pidx, int midx, u32 msg); +}; + +static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops) +{ + /* commented callbacks are not required: */ + return + /* Port operations are required for multiport devices */ + !ops->peer_port_count == !ops->port_number && + !ops->peer_port_number == !ops->port_number && + !ops->peer_port_idx == !ops->port_number && + + /* Link operations are required */ + ops->link_is_up && + ops->link_enable && + ops->link_disable && + + /* One or both MW interfaces should be developed */ + ops->mw_count && + ops->mw_get_align && + (ops->mw_set_trans || + ops->peer_mw_set_trans) && + /* ops->mw_clear_trans && */ + ops->peer_mw_count && + ops->peer_mw_get_addr && + /* ops->peer_mw_clear_trans && */ + + /* Doorbell operations are mostly required */ + /* ops->db_is_unsafe && */ + ops->db_valid_mask && + /* both set, or both unset */ + (!ops->db_vector_count == !ops->db_vector_mask) && + ops->db_read && + /* ops->db_set && */ + ops->db_clear && + /* ops->db_read_mask && */ + ops->db_set_mask && + ops->db_clear_mask && + /* ops->peer_db_addr && */ + /* ops->peer_db_read && */ + ops->peer_db_set && + /* ops->peer_db_clear && */ + /* ops->peer_db_read_mask && */ + /* ops->peer_db_set_mask && */ + /* ops->peer_db_clear_mask && */ + + /* Scrachpads interface is optional */ + /* !ops->spad_is_unsafe == !ops->spad_count && */ + !ops->spad_read == !ops->spad_count && + !ops->spad_write == !ops->spad_count && + /* !ops->peer_spad_addr == !ops->spad_count && */ + /* !ops->peer_spad_read == !ops->spad_count && */ + !ops->peer_spad_write == !ops->spad_count && + + /* Messaging interface is optional */ + !ops->msg_inbits == !ops->msg_count && + !ops->msg_outbits == !ops->msg_count && + !ops->msg_read_sts == !ops->msg_count && + !ops->msg_clear_sts == !ops->msg_count && + /* !ops->msg_set_mask == !ops->msg_count && */ + /* !ops->msg_clear_mask == !ops->msg_count && */ + !ops->msg_read == !ops->msg_count && + !ops->peer_msg_write == !ops->msg_count && + 1; +} + +/** + * struct ntb_client - client interested in ntb devices + * @drv: Linux driver object. + * @ops: See &ntb_client_ops. + */ +struct ntb_client { + struct device_driver drv; + const struct ntb_client_ops ops; +}; +#define drv_ntb_client(__drv) container_of((__drv), struct ntb_client, drv) + +/** + * struct ntb_device - ntb device + * @dev: Linux device object. + * @pdev: PCI device entry of the ntb. + * @topo: Detected topology of the ntb. + * @ops: See &ntb_dev_ops. + * @ctx: See &ntb_ctx_ops. + * @ctx_ops: See &ntb_ctx_ops. + */ +struct ntb_dev { + struct device dev; + struct pci_dev *pdev; + enum ntb_topo topo; + const struct ntb_dev_ops *ops; + void *ctx; + const struct ntb_ctx_ops *ctx_ops; + + /* private: */ + + /* synchronize setting, clearing, and calling ctx_ops */ + spinlock_t ctx_lock; + /* block unregister until device is fully released */ + struct completion released; +}; +#define dev_ntb(__dev) container_of((__dev), struct ntb_dev, dev) + +/** + * ntb_register_client() - register a client for interest in ntb devices + * @client: Client context. + * + * The client will be added to the list of clients interested in ntb devices. + * The client will be notified of any ntb devices that are not already + * associated with a client, or if ntb devices are registered later. + * + * Return: Zero if the client is registered, otherwise an error number. + */ +#define ntb_register_client(client) \ + __ntb_register_client((client), THIS_MODULE, KBUILD_MODNAME) + +int __ntb_register_client(struct ntb_client *client, struct module *mod, + const char *mod_name); + +/** + * ntb_unregister_client() - unregister a client for interest in ntb devices + * @client: Client context. + * + * The client will be removed from the list of clients interested in ntb + * devices. If any ntb devices are associated with the client, the client will + * be notified to remove those devices. + */ +void ntb_unregister_client(struct ntb_client *client); + +#define module_ntb_client(__ntb_client) \ + module_driver(__ntb_client, ntb_register_client, \ + ntb_unregister_client) + +/** + * ntb_register_device() - register a ntb device + * @ntb: NTB device context. + * + * The device will be added to the list of ntb devices. If any clients are + * interested in ntb devices, each client will be notified of the ntb device, + * until at most one client accepts the device. + * + * Return: Zero if the device is registered, otherwise an error number. + */ +int ntb_register_device(struct ntb_dev *ntb); + +/** + * ntb_register_device() - unregister a ntb device + * @ntb: NTB device context. + * + * The device will be removed from the list of ntb devices. If the ntb device + * is associated with a client, the client will be notified to remove the + * device. + */ +void ntb_unregister_device(struct ntb_dev *ntb); + +/** + * ntb_set_ctx() - associate a driver context with an ntb device + * @ntb: NTB device context. + * @ctx: Driver context. + * @ctx_ops: Driver context operations. + * + * Associate a driver context and operations with a ntb device. The context is + * provided by the client driver, and the driver may associate a different + * context with each ntb device. + * + * Return: Zero if the context is associated, otherwise an error number. + */ +int ntb_set_ctx(struct ntb_dev *ntb, void *ctx, + const struct ntb_ctx_ops *ctx_ops); + +/** + * ntb_clear_ctx() - disassociate any driver context from an ntb device + * @ntb: NTB device context. + * + * Clear any association that may exist between a driver context and the ntb + * device. + */ +void ntb_clear_ctx(struct ntb_dev *ntb); + +/** + * ntb_link_event() - notify driver context of a change in link status + * @ntb: NTB device context. + * + * Notify the driver context that the link status may have changed. The driver + * should call ntb_link_is_up() to get the current status. + */ +void ntb_link_event(struct ntb_dev *ntb); + +/** + * ntb_db_event() - notify driver context of a doorbell event + * @ntb: NTB device context. + * @vector: Interrupt vector number. + * + * Notify the driver context of a doorbell event. If hardware supports + * multiple interrupt vectors for doorbells, the vector number indicates which + * vector received the interrupt. The vector number is relative to the first + * vector used for doorbells, starting at zero, and must be less than + * ntb_db_vector_count(). The driver may call ntb_db_read() to check which + * doorbell bits need service, and ntb_db_vector_mask() to determine which of + * those bits are associated with the vector number. + */ +void ntb_db_event(struct ntb_dev *ntb, int vector); + +/** + * ntb_msg_event() - notify driver context of a message event + * @ntb: NTB device context. + * + * Notify the driver context of a message event. If hardware supports + * message registers, this event indicates, that a new message arrived in + * some incoming message register or last sent message couldn't be delivered. + * The events can be masked/unmasked by the methods ntb_msg_set_mask() and + * ntb_msg_clear_mask(). + */ +void ntb_msg_event(struct ntb_dev *ntb); + +/** + * ntb_default_port_number() - get the default local port number + * @ntb: NTB device context. + * + * If hardware driver doesn't specify port_number() callback method, the NTB + * is considered with just two ports. So this method returns default local + * port number in compliance with topology. + * + * NOTE Don't call this method directly. The ntb_port_number() function should + * be used instead. + * + * Return: the default local port number + */ +int ntb_default_port_number(struct ntb_dev *ntb); + +/** + * ntb_default_port_count() - get the default number of peer device ports + * @ntb: NTB device context. + * + * By default hardware driver supports just one peer device. + * + * NOTE Don't call this method directly. The ntb_peer_port_count() function + * should be used instead. + * + * Return: the default number of peer ports + */ +int ntb_default_peer_port_count(struct ntb_dev *ntb); + +/** + * ntb_default_peer_port_number() - get the default peer port by given index + * @ntb: NTB device context. + * @idx: Peer port index (should not differ from zero). + * + * By default hardware driver supports just one peer device, so this method + * shall return the corresponding value from enum ntb_default_port. + * + * NOTE Don't call this method directly. The ntb_peer_port_number() function + * should be used instead. + * + * Return: the peer device port or negative value indicating an error + */ +int ntb_default_peer_port_number(struct ntb_dev *ntb, int pidx); + +/** + * ntb_default_peer_port_idx() - get the default peer device port index by + * given port number + * @ntb: NTB device context. + * @port: Peer port number (should be one of enum ntb_default_port). + * + * By default hardware driver supports just one peer device, so while + * specified port-argument indicates peer port from enum ntb_default_port, + * the return value shall be zero. + * + * NOTE Don't call this method directly. The ntb_peer_port_idx() function + * should be used instead. + * + * Return: the peer port index or negative value indicating an error + */ +int ntb_default_peer_port_idx(struct ntb_dev *ntb, int port); + +/** + * ntb_port_number() - get the local port number + * @ntb: NTB device context. + * + * Hardware must support at least simple two-ports ntb connection + * + * Return: the local port number + */ +static inline int ntb_port_number(struct ntb_dev *ntb) +{ + if (!ntb->ops->port_number) + return ntb_default_port_number(ntb); + + return ntb->ops->port_number(ntb); +} + +/** + * ntb_peer_port_count() - get the number of peer device ports + * @ntb: NTB device context. + * + * Hardware may support an access to memory of several remote domains + * over multi-port NTB devices. This method returns the number of peers, + * local device can have shared memory with. + * + * Return: the number of peer ports + */ +static inline int ntb_peer_port_count(struct ntb_dev *ntb) +{ + if (!ntb->ops->peer_port_count) + return ntb_default_peer_port_count(ntb); + + return ntb->ops->peer_port_count(ntb); +} + +/** + * ntb_peer_port_number() - get the peer port by given index + * @ntb: NTB device context. + * @pidx: Peer port index. + * + * Peer ports are continuously enumerated by NTB API logic, so this method + * lets to retrieve port real number by its index. + * + * Return: the peer device port or negative value indicating an error + */ +static inline int ntb_peer_port_number(struct ntb_dev *ntb, int pidx) +{ + if (!ntb->ops->peer_port_number) + return ntb_default_peer_port_number(ntb, pidx); + + return ntb->ops->peer_port_number(ntb, pidx); +} + +/** + * ntb_peer_port_idx() - get the peer device port index by given port number + * @ntb: NTB device context. + * @port: Peer port number. + * + * Inverse operation of ntb_peer_port_number(), so one can get port index + * by specified port number. + * + * Return: the peer port index or negative value indicating an error + */ +static inline int ntb_peer_port_idx(struct ntb_dev *ntb, int port) +{ + if (!ntb->ops->peer_port_idx) + return ntb_default_peer_port_idx(ntb, port); + + return ntb->ops->peer_port_idx(ntb, port); +} + +/** + * ntb_link_is_up() - get the current ntb link state + * @ntb: NTB device context. + * @speed: OUT - The link speed expressed as PCIe generation number. + * @width: OUT - The link width expressed as the number of PCIe lanes. + * + * Get the current state of the ntb link. It is recommended to query the link + * state once after every link event. It is safe to query the link state in + * the context of the link event callback. + * + * Return: bitfield of indexed ports link state: bit is set/cleared if the + * link is up/down respectively. + */ +static inline u64 ntb_link_is_up(struct ntb_dev *ntb, + enum ntb_speed *speed, enum ntb_width *width) +{ + return ntb->ops->link_is_up(ntb, speed, width); +} + +/** + * ntb_link_enable() - enable the local port ntb connection + * @ntb: NTB device context. + * @max_speed: The maximum link speed expressed as PCIe generation number. + * @max_width: The maximum link width expressed as the number of PCIe lanes. + * + * Enable the NTB/PCIe link on the local or remote (for bridge-to-bridge + * topology) side of the bridge. If it's supported the ntb device should train + * the link to its maximum speed and width, or the requested speed and width, + * whichever is smaller. Some hardware doesn't support PCIe link training, so + * the last two arguments will be ignored then. + * + * Return: Zero on success, otherwise an error number. + */ +static inline int ntb_link_enable(struct ntb_dev *ntb, + enum ntb_speed max_speed, + enum ntb_width max_width) +{ + return ntb->ops->link_enable(ntb, max_speed, max_width); +} + +/** + * ntb_link_disable() - disable the local port ntb connection + * @ntb: NTB device context. + * + * Disable the link on the local or remote (for b2b topology) of the ntb. + * The ntb device should disable the link. Returning from this call must + * indicate that a barrier has passed, though with no more writes may pass in + * either direction across the link, except if this call returns an error + * number. + * + * Return: Zero on success, otherwise an error number. + */ +static inline int ntb_link_disable(struct ntb_dev *ntb) +{ + return ntb->ops->link_disable(ntb); +} + +/** + * ntb_mw_count() - get the number of inbound memory windows, which could + * be created for a specified peer device + * @ntb: NTB device context. + * @pidx: Port index of peer device. + * + * Hardware and topology may support a different number of memory windows. + * Moreover different peer devices can support different number of memory + * windows. Simply speaking this method returns the number of possible inbound + * memory windows to share with specified peer device. Note: this may return + * zero if the link is not up yet. + * + * Return: the number of memory windows. + */ +static inline int ntb_mw_count(struct ntb_dev *ntb, int pidx) +{ + return ntb->ops->mw_count(ntb, pidx); +} + +/** + * ntb_mw_get_align() - get the restriction parameters of inbound memory window + * @ntb: NTB device context. + * @pidx: Port index of peer device. + * @widx: Memory window index. + * @addr_align: OUT - the base alignment for translating the memory window + * @size_align: OUT - the size alignment for translating the memory window + * @size_max: OUT - the maximum size of the memory window + * + * Get the alignments of an inbound memory window with specified index. + * NULL may be given for any output parameter if the value is not needed. + * The alignment and size parameters may be used for allocation of proper + * shared memory. Note: this must only be called when the link is up. + * + * Return: Zero on success, otherwise a negative error number. + */ +static inline int ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int widx, + resource_size_t *addr_align, + resource_size_t *size_align, + resource_size_t *size_max) +{ + if (!(ntb_link_is_up(ntb, NULL, NULL) & BIT_ULL(pidx))) + return -ENOTCONN; + + return ntb->ops->mw_get_align(ntb, pidx, widx, addr_align, size_align, + size_max); +} + +/** + * ntb_mw_set_trans() - set the translation of an inbound memory window + * @ntb: NTB device context. + * @pidx: Port index of peer device. + * @widx: Memory window index. + * @addr: The dma address of local memory to expose to the peer. + * @size: The size of the local memory to expose to the peer. + * + * Set the translation of a memory window. The peer may access local memory + * through the window starting at the address, up to the size. The address + * and size must be aligned in compliance with restrictions of + * ntb_mw_get_align(). The region size should not exceed the size_max parameter + * of that method. + * + * This method may not be implemented due to the hardware specific memory + * windows interface. + * + * Return: Zero on success, otherwise an error number. + */ +static inline int ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx, + dma_addr_t addr, resource_size_t size) +{ + if (!ntb->ops->mw_set_trans) + return 0; + + return ntb->ops->mw_set_trans(ntb, pidx, widx, addr, size); +} + +/** + * ntb_mw_clear_trans() - clear the translation address of an inbound memory + * window + * @ntb: NTB device context. + * @pidx: Port index of peer device. + * @widx: Memory window index. + * + * Clear the translation of an inbound memory window. The peer may no longer + * access local memory through the window. + * + * Return: Zero on success, otherwise an error number. + */ +static inline int ntb_mw_clear_trans(struct ntb_dev *ntb, int pidx, int widx) +{ + if (!ntb->ops->mw_clear_trans) + return ntb_mw_set_trans(ntb, pidx, widx, 0, 0); + + return ntb->ops->mw_clear_trans(ntb, pidx, widx); +} + +/** + * ntb_peer_mw_count() - get the number of outbound memory windows, which could + * be mapped to access a shared memory + * @ntb: NTB device context. + * + * Hardware and topology may support a different number of memory windows. + * This method returns the number of outbound memory windows supported by + * local device. + * + * Return: the number of memory windows. + */ +static inline int ntb_peer_mw_count(struct ntb_dev *ntb) +{ + return ntb->ops->peer_mw_count(ntb); +} + +/** + * ntb_peer_mw_get_addr() - get map address of an outbound memory window + * @ntb: NTB device context. + * @widx: Memory window index (within ntb_peer_mw_count() return value). + * @base: OUT - the base address of mapping region. + * @size: OUT - the size of mapping region. + * + * Get base and size of memory region to map. NULL may be given for any output + * parameter if the value is not needed. The base and size may be used for + * mapping the memory window, to access the peer memory. + * + * Return: Zero on success, otherwise a negative error number. + */ +static inline int ntb_peer_mw_get_addr(struct ntb_dev *ntb, int widx, + phys_addr_t *base, resource_size_t *size) +{ + return ntb->ops->peer_mw_get_addr(ntb, widx, base, size); +} + +/** + * ntb_peer_mw_set_trans() - set a translation address of a memory window + * retrieved from a peer device + * @ntb: NTB device context. + * @pidx: Port index of peer device the translation address received from. + * @widx: Memory window index. + * @addr: The dma address of the shared memory to access. + * @size: The size of the shared memory to access. + * + * Set the translation of an outbound memory window. The local device may + * access shared memory allocated by a peer device sent the address. + * + * This method may not be implemented due to the hardware specific memory + * windows interface, so a translation address can be only set on the side, + * where shared memory (inbound memory windows) is allocated. + * + * Return: Zero on success, otherwise an error number. + */ +static inline int ntb_peer_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx, + u64 addr, resource_size_t size) +{ + if (!ntb->ops->peer_mw_set_trans) + return 0; + + return ntb->ops->peer_mw_set_trans(ntb, pidx, widx, addr, size); +} + +/** + * ntb_peer_mw_clear_trans() - clear the translation address of an outbound + * memory window + * @ntb: NTB device context. + * @pidx: Port index of peer device. + * @widx: Memory window index. + * + * Clear the translation of a outbound memory window. The local device may no + * longer access a shared memory through the window. + * + * This method may not be implemented due to the hardware specific memory + * windows interface. + * + * Return: Zero on success, otherwise an error number. + */ +static inline int ntb_peer_mw_clear_trans(struct ntb_dev *ntb, int pidx, + int widx) +{ + if (!ntb->ops->peer_mw_clear_trans) + return ntb_peer_mw_set_trans(ntb, pidx, widx, 0, 0); + + return ntb->ops->peer_mw_clear_trans(ntb, pidx, widx); +} + +/** + * ntb_db_is_unsafe() - check if it is safe to use hardware doorbell + * @ntb: NTB device context. + * + * It is possible for some ntb hardware to be affected by errata. Hardware + * drivers can advise clients to avoid using doorbells. Clients may ignore + * this advice, though caution is recommended. + * + * Return: Zero if it is safe to use doorbells, or One if it is not safe. + */ +static inline int ntb_db_is_unsafe(struct ntb_dev *ntb) +{ + if (!ntb->ops->db_is_unsafe) + return 0; + + return ntb->ops->db_is_unsafe(ntb); +} + +/** + * ntb_db_valid_mask() - get a mask of doorbell bits supported by the ntb + * @ntb: NTB device context. + * + * Hardware may support different number or arrangement of doorbell bits. + * + * Return: A mask of doorbell bits supported by the ntb. + */ +static inline u64 ntb_db_valid_mask(struct ntb_dev *ntb) +{ + return ntb->ops->db_valid_mask(ntb); +} + +/** + * ntb_db_vector_count() - get the number of doorbell interrupt vectors + * @ntb: NTB device context. + * + * Hardware may support different number of interrupt vectors. + * + * Return: The number of doorbell interrupt vectors. + */ +static inline int ntb_db_vector_count(struct ntb_dev *ntb) +{ + if (!ntb->ops->db_vector_count) + return 1; + + return ntb->ops->db_vector_count(ntb); +} + +/** + * ntb_db_vector_mask() - get a mask of doorbell bits serviced by a vector + * @ntb: NTB device context. + * @vector: Doorbell vector number. + * + * Each interrupt vector may have a different number or arrangement of bits. + * + * Return: A mask of doorbell bits serviced by a vector. + */ +static inline u64 ntb_db_vector_mask(struct ntb_dev *ntb, int vector) +{ + if (!ntb->ops->db_vector_mask) + return ntb_db_valid_mask(ntb); + + return ntb->ops->db_vector_mask(ntb, vector); +} + +/** + * ntb_db_read() - read the local doorbell register + * @ntb: NTB device context. + * + * Read the local doorbell register, and return the bits that are set. + * + * Return: The bits currently set in the local doorbell register. + */ +static inline u64 ntb_db_read(struct ntb_dev *ntb) +{ + return ntb->ops->db_read(ntb); +} + +/** + * ntb_db_set() - set bits in the local doorbell register + * @ntb: NTB device context. + * @db_bits: Doorbell bits to set. + * + * Set bits in the local doorbell register, which may generate a local doorbell + * interrupt. Bits that were already set must remain set. + * + * This is unusual, and hardware may not support it. + * + * Return: Zero on success, otherwise an error number. + */ +static inline int ntb_db_set(struct ntb_dev *ntb, u64 db_bits) +{ + if (!ntb->ops->db_set) + return -EINVAL; + + return ntb->ops->db_set(ntb, db_bits); +} + +/** + * ntb_db_clear() - clear bits in the local doorbell register + * @ntb: NTB device context. + * @db_bits: Doorbell bits to clear. + * + * Clear bits in the local doorbell register, arming the bits for the next + * doorbell. + * + * Return: Zero on success, otherwise an error number. + */ +static inline int ntb_db_clear(struct ntb_dev *ntb, u64 db_bits) +{ + return ntb->ops->db_clear(ntb, db_bits); +} + +/** + * ntb_db_read_mask() - read the local doorbell mask + * @ntb: NTB device context. + * + * Read the local doorbell mask register, and return the bits that are set. + * + * This is unusual, though hardware is likely to support it. + * + * Return: The bits currently set in the local doorbell mask register. + */ +static inline u64 ntb_db_read_mask(struct ntb_dev *ntb) +{ + if (!ntb->ops->db_read_mask) + return 0; + + return ntb->ops->db_read_mask(ntb); +} + +/** + * ntb_db_set_mask() - set bits in the local doorbell mask + * @ntb: NTB device context. + * @db_bits: Doorbell mask bits to set. + * + * Set bits in the local doorbell mask register, preventing doorbell interrupts + * from being generated for those doorbell bits. Bits that were already set + * must remain set. + * + * Return: Zero on success, otherwise an error number. + */ +static inline int ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits) +{ + return ntb->ops->db_set_mask(ntb, db_bits); +} + +/** + * ntb_db_clear_mask() - clear bits in the local doorbell mask + * @ntb: NTB device context. + * @db_bits: Doorbell bits to clear. + * + * Clear bits in the local doorbell mask register, allowing doorbell interrupts + * from being generated for those doorbell bits. If a doorbell bit is already + * set at the time the mask is cleared, and the corresponding mask bit is + * changed from set to clear, then the ntb driver must ensure that + * ntb_db_event() is called. If the hardware does not generate the interrupt + * on clearing the mask bit, then the driver must call ntb_db_event() anyway. + * + * Return: Zero on success, otherwise an error number. + */ +static inline int ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) +{ + return ntb->ops->db_clear_mask(ntb, db_bits); +} + +/** + * ntb_peer_db_addr() - address and size of the peer doorbell register + * @ntb: NTB device context. + * @db_addr: OUT - The address of the peer doorbell register. + * @db_size: OUT - The number of bytes to write the peer doorbell register. + * + * Return the address of the peer doorbell register. This may be used, for + * example, by drivers that offload memory copy operations to a dma engine. + * The drivers may wish to ring the peer doorbell at the completion of memory + * copy operations. For efficiency, and to simplify ordering of operations + * between the dma memory copies and the ringing doorbell, the driver may + * append one additional dma memory copy with the doorbell register as the + * destination, after the memory copy operations. + * + * Return: Zero on success, otherwise an error number. + */ +static inline int ntb_peer_db_addr(struct ntb_dev *ntb, + phys_addr_t *db_addr, + resource_size_t *db_size) +{ + if (!ntb->ops->peer_db_addr) + return -EINVAL; + + return ntb->ops->peer_db_addr(ntb, db_addr, db_size); +} + +/** + * ntb_peer_db_read() - read the peer doorbell register + * @ntb: NTB device context. + * + * Read the peer doorbell register, and return the bits that are set. + * + * This is unusual, and hardware may not support it. + * + * Return: The bits currently set in the peer doorbell register. + */ +static inline u64 ntb_peer_db_read(struct ntb_dev *ntb) +{ + if (!ntb->ops->peer_db_read) + return 0; + + return ntb->ops->peer_db_read(ntb); +} + +/** + * ntb_peer_db_set() - set bits in the peer doorbell register + * @ntb: NTB device context. + * @db_bits: Doorbell bits to set. + * + * Set bits in the peer doorbell register, which may generate a peer doorbell + * interrupt. Bits that were already set must remain set. + * + * Return: Zero on success, otherwise an error number. + */ +static inline int ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits) +{ + return ntb->ops->peer_db_set(ntb, db_bits); +} + +/** + * ntb_peer_db_clear() - clear bits in the peer doorbell register + * @ntb: NTB device context. + * @db_bits: Doorbell bits to clear. + * + * Clear bits in the peer doorbell register, arming the bits for the next + * doorbell. + * + * This is unusual, and hardware may not support it. + * + * Return: Zero on success, otherwise an error number. + */ +static inline int ntb_peer_db_clear(struct ntb_dev *ntb, u64 db_bits) +{ + if (!ntb->ops->db_clear) + return -EINVAL; + + return ntb->ops->peer_db_clear(ntb, db_bits); +} + +/** + * ntb_peer_db_read_mask() - read the peer doorbell mask + * @ntb: NTB device context. + * + * Read the peer doorbell mask register, and return the bits that are set. + * + * This is unusual, and hardware may not support it. + * + * Return: The bits currently set in the peer doorbell mask register. + */ +static inline u64 ntb_peer_db_read_mask(struct ntb_dev *ntb) +{ + if (!ntb->ops->db_read_mask) + return 0; + + return ntb->ops->peer_db_read_mask(ntb); +} + +/** + * ntb_peer_db_set_mask() - set bits in the peer doorbell mask + * @ntb: NTB device context. + * @db_bits: Doorbell mask bits to set. + * + * Set bits in the peer doorbell mask register, preventing doorbell interrupts + * from being generated for those doorbell bits. Bits that were already set + * must remain set. + * + * This is unusual, and hardware may not support it. + * + * Return: Zero on success, otherwise an error number. + */ +static inline int ntb_peer_db_set_mask(struct ntb_dev *ntb, u64 db_bits) +{ + if (!ntb->ops->db_set_mask) + return -EINVAL; + + return ntb->ops->peer_db_set_mask(ntb, db_bits); +} + +/** + * ntb_peer_db_clear_mask() - clear bits in the peer doorbell mask + * @ntb: NTB device context. + * @db_bits: Doorbell bits to clear. + * + * Clear bits in the peer doorbell mask register, allowing doorbell interrupts + * from being generated for those doorbell bits. If the hardware does not + * generate the interrupt on clearing the mask bit, then the driver should not + * implement this function! + * + * This is unusual, and hardware may not support it. + * + * Return: Zero on success, otherwise an error number. + */ +static inline int ntb_peer_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) +{ + if (!ntb->ops->db_clear_mask) + return -EINVAL; + + return ntb->ops->peer_db_clear_mask(ntb, db_bits); +} + +/** + * ntb_spad_is_unsafe() - check if it is safe to use the hardware scratchpads + * @ntb: NTB device context. + * + * It is possible for some ntb hardware to be affected by errata. Hardware + * drivers can advise clients to avoid using scratchpads. Clients may ignore + * this advice, though caution is recommended. + * + * Return: Zero if it is safe to use scratchpads, or One if it is not safe. + */ +static inline int ntb_spad_is_unsafe(struct ntb_dev *ntb) +{ + if (!ntb->ops->spad_is_unsafe) + return 0; + + return ntb->ops->spad_is_unsafe(ntb); +} + +/** + * ntb_spad_count() - get the number of scratchpads + * @ntb: NTB device context. + * + * Hardware and topology may support a different number of scratchpads. + * Although it must be the same for all ports per NTB device. + * + * Return: the number of scratchpads. + */ +static inline int ntb_spad_count(struct ntb_dev *ntb) +{ + if (!ntb->ops->spad_count) + return 0; + + return ntb->ops->spad_count(ntb); +} + +/** + * ntb_spad_read() - read the local scratchpad register + * @ntb: NTB device context. + * @sidx: Scratchpad index. + * + * Read the local scratchpad register, and return the value. + * + * Return: The value of the local scratchpad register. + */ +static inline u32 ntb_spad_read(struct ntb_dev *ntb, int sidx) +{ + if (!ntb->ops->spad_read) + return ~(u32)0; + + return ntb->ops->spad_read(ntb, sidx); +} + +/** + * ntb_spad_write() - write the local scratchpad register + * @ntb: NTB device context. + * @sidx: Scratchpad index. + * @val: Scratchpad value. + * + * Write the value to the local scratchpad register. + * + * Return: Zero on success, otherwise an error number. + */ +static inline int ntb_spad_write(struct ntb_dev *ntb, int sidx, u32 val) +{ + if (!ntb->ops->spad_write) + return -EINVAL; + + return ntb->ops->spad_write(ntb, sidx, val); +} + +/** + * ntb_peer_spad_addr() - address of the peer scratchpad register + * @ntb: NTB device context. + * @pidx: Port index of peer device. + * @sidx: Scratchpad index. + * @spad_addr: OUT - The address of the peer scratchpad register. + * + * Return the address of the peer doorbell register. This may be used, for + * example, by drivers that offload memory copy operations to a dma engine. + * + * Return: Zero on success, otherwise an error number. + */ +static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx, + phys_addr_t *spad_addr) +{ + if (!ntb->ops->peer_spad_addr) + return -EINVAL; + + return ntb->ops->peer_spad_addr(ntb, pidx, sidx, spad_addr); +} + +/** + * ntb_peer_spad_read() - read the peer scratchpad register + * @ntb: NTB device context. + * @pidx: Port index of peer device. + * @sidx: Scratchpad index. + * + * Read the peer scratchpad register, and return the value. + * + * Return: The value of the local scratchpad register. + */ +static inline u32 ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx) +{ + if (!ntb->ops->peer_spad_read) + return ~(u32)0; + + return ntb->ops->peer_spad_read(ntb, pidx, sidx); +} + +/** + * ntb_peer_spad_write() - write the peer scratchpad register + * @ntb: NTB device context. + * @pidx: Port index of peer device. + * @sidx: Scratchpad index. + * @val: Scratchpad value. + * + * Write the value to the peer scratchpad register. + * + * Return: Zero on success, otherwise an error number. + */ +static inline int ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, int sidx, + u32 val) +{ + if (!ntb->ops->peer_spad_write) + return -EINVAL; + + return ntb->ops->peer_spad_write(ntb, pidx, sidx, val); +} + +/** + * ntb_msg_count() - get the number of message registers + * @ntb: NTB device context. + * + * Hardware may support a different number of message registers. + * + * Return: the number of message registers. + */ +static inline int ntb_msg_count(struct ntb_dev *ntb) +{ + if (!ntb->ops->msg_count) + return 0; + + return ntb->ops->msg_count(ntb); +} + +/** + * ntb_msg_inbits() - get a bitfield of inbound message registers status + * @ntb: NTB device context. + * + * The method returns the bitfield of status and mask registers, which related + * to inbound message registers. + * + * Return: bitfield of inbound message registers. + */ +static inline u64 ntb_msg_inbits(struct ntb_dev *ntb) +{ + if (!ntb->ops->msg_inbits) + return 0; + + return ntb->ops->msg_inbits(ntb); +} + +/** + * ntb_msg_outbits() - get a bitfield of outbound message registers status + * @ntb: NTB device context. + * + * The method returns the bitfield of status and mask registers, which related + * to outbound message registers. + * + * Return: bitfield of outbound message registers. + */ +static inline u64 ntb_msg_outbits(struct ntb_dev *ntb) +{ + if (!ntb->ops->msg_outbits) + return 0; + + return ntb->ops->msg_outbits(ntb); +} + +/** + * ntb_msg_read_sts() - read the message registers status + * @ntb: NTB device context. + * + * Read the status of message register. Inbound and outbound message registers + * related bits can be filtered by masks retrieved from ntb_msg_inbits() and + * ntb_msg_outbits(). + * + * Return: status bits of message registers + */ +static inline u64 ntb_msg_read_sts(struct ntb_dev *ntb) +{ + if (!ntb->ops->msg_read_sts) + return 0; + + return ntb->ops->msg_read_sts(ntb); +} + +/** + * ntb_msg_clear_sts() - clear status bits of message registers + * @ntb: NTB device context. + * @sts_bits: Status bits to clear. + * + * Clear bits in the status register. + * + * Return: Zero on success, otherwise a negative error number. + */ +static inline int ntb_msg_clear_sts(struct ntb_dev *ntb, u64 sts_bits) +{ + if (!ntb->ops->msg_clear_sts) + return -EINVAL; + + return ntb->ops->msg_clear_sts(ntb, sts_bits); +} + +/** + * ntb_msg_set_mask() - set mask of message register status bits + * @ntb: NTB device context. + * @mask_bits: Mask bits. + * + * Mask the message registers status bits from raising the message event. + * + * Return: Zero on success, otherwise a negative error number. + */ +static inline int ntb_msg_set_mask(struct ntb_dev *ntb, u64 mask_bits) +{ + if (!ntb->ops->msg_set_mask) + return -EINVAL; + + return ntb->ops->msg_set_mask(ntb, mask_bits); +} + +/** + * ntb_msg_clear_mask() - clear message registers mask + * @ntb: NTB device context. + * @mask_bits: Mask bits to clear. + * + * Clear bits in the message events mask register. + * + * Return: Zero on success, otherwise a negative error number. + */ +static inline int ntb_msg_clear_mask(struct ntb_dev *ntb, u64 mask_bits) +{ + if (!ntb->ops->msg_clear_mask) + return -EINVAL; + + return ntb->ops->msg_clear_mask(ntb, mask_bits); +} + +/** + * ntb_msg_read() - read inbound message register with specified index + * @ntb: NTB device context. + * @pidx: OUT - Port index of peer device a message retrieved from + * @midx: Message register index + * + * Read data from the specified message register. Source port index of a + * message is retrieved as well. + * + * Return: The value of the inbound message register. + */ +static inline u32 ntb_msg_read(struct ntb_dev *ntb, int *pidx, int midx) +{ + if (!ntb->ops->msg_read) + return ~(u32)0; + + return ntb->ops->msg_read(ntb, pidx, midx); +} + +/** + * ntb_peer_msg_write() - write data to the specified peer message register + * @ntb: NTB device context. + * @pidx: Port index of peer device a message being sent to + * @midx: Message register index + * @msg: Data to send + * + * Send data to a specified peer device using the defined message register. + * Message event can be raised if the midx registers isn't empty while + * calling this method and the corresponding interrupt isn't masked. + * + * Return: Zero on success, otherwise a negative error number. + */ +static inline int ntb_peer_msg_write(struct ntb_dev *ntb, int pidx, int midx, + u32 msg) +{ + if (!ntb->ops->peer_msg_write) + return -EINVAL; + + return ntb->ops->peer_msg_write(ntb, pidx, midx, msg); +} + +#endif diff --git a/include/linux/ntb_transport.h b/include/linux/ntb_transport.h new file mode 100644 index 000000000..7243eb98a --- /dev/null +++ b/include/linux/ntb_transport.h @@ -0,0 +1,86 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 Intel Corporation. All rights reserved. + * Copyright (C) 2015 EMC Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * BSD LICENSE + * + * Copyright(c) 2012 Intel Corporation. All rights reserved. + * Copyright (C) 2015 EMC Corporation. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copy + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * PCIe NTB Transport Linux driver + * + * Contact Information: + * Jon Mason + */ + +struct ntb_transport_qp; + +struct ntb_transport_client { + struct device_driver driver; + int (*probe)(struct device *client_dev); + void (*remove)(struct device *client_dev); +}; + +int ntb_transport_register_client(struct ntb_transport_client *drvr); +void ntb_transport_unregister_client(struct ntb_transport_client *drvr); +int ntb_transport_register_client_dev(char *device_name); +void ntb_transport_unregister_client_dev(char *device_name); + +struct ntb_queue_handlers { + void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, + void *data, int len); + void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, + void *data, int len); + void (*event_handler)(void *data, int status); +}; + +unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp); +unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp); +struct ntb_transport_qp * +ntb_transport_create_queue(void *data, struct device *client_dev, + const struct ntb_queue_handlers *handlers); +void ntb_transport_free_queue(struct ntb_transport_qp *qp); +int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, + unsigned int len); +int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, + unsigned int len); +void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len); +void ntb_transport_link_up(struct ntb_transport_qp *qp); +void ntb_transport_link_down(struct ntb_transport_qp *qp); +bool ntb_transport_link_query(struct ntb_transport_qp *qp); +unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp); diff --git a/include/linux/nubus.h b/include/linux/nubus.h new file mode 100644 index 000000000..eba50b057 --- /dev/null +++ b/include/linux/nubus.h @@ -0,0 +1,188 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + nubus.h: various definitions and prototypes for NuBus drivers to use. + + Originally written by Alan Cox. + + Hacked to death by C. Scott Ananian and David Huggins-Daines. +*/ + +#ifndef LINUX_NUBUS_H +#define LINUX_NUBUS_H + +#include +#include +#include + +struct proc_dir_entry; +struct seq_file; + +struct nubus_dir { + unsigned char *base; + unsigned char *ptr; + int done; + int mask; + struct proc_dir_entry *procdir; +}; + +struct nubus_dirent { + unsigned char *base; + unsigned char type; + __u32 data; /* Actually 24 bits used */ + int mask; +}; + +struct nubus_board { + struct device dev; + + /* Only 9-E actually exist, though 0-8 are also theoretically + possible, and 0 is a special case which represents the + motherboard and onboard peripherals (Ethernet, video) */ + int slot; + /* For slot 0, this is bogus. */ + char name[64]; + + /* Format block */ + unsigned char *fblock; + /* Root directory (does *not* always equal fblock + doffset!) */ + unsigned char *directory; + + unsigned long slot_addr; + /* Offset to root directory (sometimes) */ + unsigned long doffset; + /* Length over which to compute the crc */ + unsigned long rom_length; + /* Completely useless most of the time */ + unsigned long crc; + unsigned char rev; + unsigned char format; + unsigned char lanes; + + /* Directory entry in /proc/bus/nubus */ + struct proc_dir_entry *procdir; +}; + +struct nubus_rsrc { + struct list_head list; + + /* The functional resource ID */ + unsigned char resid; + /* These are mostly here for convenience; we could always read + them from the ROMs if we wanted to */ + unsigned short category; + unsigned short type; + unsigned short dr_sw; + unsigned short dr_hw; + + /* Functional directory */ + unsigned char *directory; + /* Much of our info comes from here */ + struct nubus_board *board; +}; + +/* This is all NuBus functional resources (used to find devices later on) */ +extern struct list_head nubus_func_rsrcs; + +struct nubus_driver { + struct device_driver driver; + int (*probe)(struct nubus_board *board); + int (*remove)(struct nubus_board *board); +}; + +extern struct bus_type nubus_bus_type; + +/* Generic NuBus interface functions, modelled after the PCI interface */ +#ifdef CONFIG_PROC_FS +void nubus_proc_init(void); +struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board); +struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir, + const struct nubus_dirent *ent, + struct nubus_board *board); +void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir, + const struct nubus_dirent *ent, + unsigned int size); +void nubus_proc_add_rsrc(struct proc_dir_entry *procdir, + const struct nubus_dirent *ent); +#else +static inline void nubus_proc_init(void) {} +static inline +struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board) +{ return NULL; } +static inline +struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir, + const struct nubus_dirent *ent, + struct nubus_board *board) +{ return NULL; } +static inline void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir, + const struct nubus_dirent *ent, + unsigned int size) {} +static inline void nubus_proc_add_rsrc(struct proc_dir_entry *procdir, + const struct nubus_dirent *ent) {} +#endif + +struct nubus_rsrc *nubus_first_rsrc_or_null(void); +struct nubus_rsrc *nubus_next_rsrc_or_null(struct nubus_rsrc *from); + +#define for_each_func_rsrc(f) \ + for (f = nubus_first_rsrc_or_null(); f; f = nubus_next_rsrc_or_null(f)) + +#define for_each_board_func_rsrc(b, f) \ + for_each_func_rsrc(f) if (f->board != b) {} else + +/* These are somewhat more NuBus-specific. They all return 0 for + success and -1 for failure, as you'd expect. */ + +/* The root directory which contains the board and functional + directories */ +int nubus_get_root_dir(const struct nubus_board *board, + struct nubus_dir *dir); +/* The board directory */ +int nubus_get_board_dir(const struct nubus_board *board, + struct nubus_dir *dir); +/* The functional directory */ +int nubus_get_func_dir(const struct nubus_rsrc *fres, struct nubus_dir *dir); + +/* These work on any directory gotten via the above */ +int nubus_readdir(struct nubus_dir *dir, + struct nubus_dirent *ent); +int nubus_find_rsrc(struct nubus_dir *dir, + unsigned char rsrc_type, + struct nubus_dirent *ent); +int nubus_rewinddir(struct nubus_dir *dir); + +/* Things to do with directory entries */ +int nubus_get_subdir(const struct nubus_dirent *ent, + struct nubus_dir *dir); +void nubus_get_rsrc_mem(void *dest, const struct nubus_dirent *dirent, + unsigned int len); +unsigned int nubus_get_rsrc_str(char *dest, const struct nubus_dirent *dirent, + unsigned int len); +void nubus_seq_write_rsrc_mem(struct seq_file *m, + const struct nubus_dirent *dirent, + unsigned int len); +unsigned char *nubus_dirptr(const struct nubus_dirent *nd); + +/* Declarations relating to driver model objects */ +int nubus_parent_device_register(void); +int nubus_device_register(struct nubus_board *board); +int nubus_driver_register(struct nubus_driver *ndrv); +void nubus_driver_unregister(struct nubus_driver *ndrv); +int nubus_proc_show(struct seq_file *m, void *data); + +static inline void nubus_set_drvdata(struct nubus_board *board, void *data) +{ + dev_set_drvdata(&board->dev, data); +} + +static inline void *nubus_get_drvdata(struct nubus_board *board) +{ + return dev_get_drvdata(&board->dev); +} + +/* Returns a pointer to the "standard" slot space. */ +static inline void *nubus_slot_addr(int slot) +{ + return (void *)(0xF0000000 | (slot << 24)); +} + +#endif /* LINUX_NUBUS_H */ diff --git a/include/linux/numa.h b/include/linux/numa.h new file mode 100644 index 000000000..110b0e5d0 --- /dev/null +++ b/include/linux/numa.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_NUMA_H +#define _LINUX_NUMA_H + + +#ifdef CONFIG_NODES_SHIFT +#define NODES_SHIFT CONFIG_NODES_SHIFT +#else +#define NODES_SHIFT 0 +#endif + +#define MAX_NUMNODES (1 << NODES_SHIFT) + +#define NUMA_NO_NODE (-1) + +#endif /* _LINUX_NUMA_H */ diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h new file mode 100644 index 000000000..496ff759f --- /dev/null +++ b/include/linux/nvme-fc-driver.h @@ -0,0 +1,896 @@ +/* + * Copyright (c) 2016, Avago Technologies + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef _NVME_FC_DRIVER_H +#define _NVME_FC_DRIVER_H 1 + + +/* + * ********************** LLDD FC-NVME Host API ******************** + * + * For FC LLDD's that are the NVME Host role. + * + * ****************************************************************** + */ + + + +/* FC Port role bitmask - can merge with FC Port Roles in fc transport */ +#define FC_PORT_ROLE_NVME_INITIATOR 0x10 +#define FC_PORT_ROLE_NVME_TARGET 0x20 +#define FC_PORT_ROLE_NVME_DISCOVERY 0x40 + + +/** + * struct nvme_fc_port_info - port-specific ids and FC connection-specific + * data element used during NVME Host role + * registrations + * + * Static fields describing the port being registered: + * @node_name: FC WWNN for the port + * @port_name: FC WWPN for the port + * @port_role: What NVME roles are supported (see FC_PORT_ROLE_xxx) + * @dev_loss_tmo: maximum delay for reconnects to an association on + * this device. Used only on a remoteport. + * + * Initialization values for dynamic port fields: + * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must + * be set to 0. + */ +struct nvme_fc_port_info { + u64 node_name; + u64 port_name; + u32 port_role; + u32 port_id; + u32 dev_loss_tmo; +}; + + +/** + * struct nvmefc_ls_req - Request structure passed from NVME-FC transport + * to LLDD in order to perform a NVME FC-4 LS + * request and obtain a response. + * + * Values set by the NVME-FC layer prior to calling the LLDD ls_req + * entrypoint. + * @rqstaddr: pointer to request buffer + * @rqstdma: PCI DMA address of request buffer + * @rqstlen: Length, in bytes, of request buffer + * @rspaddr: pointer to response buffer + * @rspdma: PCI DMA address of response buffer + * @rsplen: Length, in bytes, of response buffer + * @timeout: Maximum amount of time, in seconds, to wait for the LS response. + * If timeout exceeded, LLDD to abort LS exchange and complete + * LS request with error status. + * @private: pointer to memory allocated alongside the ls request structure + * that is specifically for the LLDD to use while processing the + * request. The length of the buffer corresponds to the + * lsrqst_priv_sz value specified in the nvme_fc_port_template + * supplied by the LLDD. + * @done: The callback routine the LLDD is to invoke upon completion of + * the LS request. req argument is the pointer to the original LS + * request structure. Status argument must be 0 upon success, a + * negative errno on failure (example: -ENXIO). + */ +struct nvmefc_ls_req { + void *rqstaddr; + dma_addr_t rqstdma; + u32 rqstlen; + void *rspaddr; + dma_addr_t rspdma; + u32 rsplen; + u32 timeout; + + void *private; + + void (*done)(struct nvmefc_ls_req *req, int status); + +} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ + + +enum nvmefc_fcp_datadir { + NVMEFC_FCP_NODATA, /* payload_length and sg_cnt will be zero */ + NVMEFC_FCP_WRITE, + NVMEFC_FCP_READ, +}; + + +/** + * struct nvmefc_fcp_req - Request structure passed from NVME-FC transport + * to LLDD in order to perform a NVME FCP IO operation. + * + * Values set by the NVME-FC layer prior to calling the LLDD fcp_io + * entrypoint. + * @cmdaddr: pointer to the FCP CMD IU buffer + * @rspaddr: pointer to the FCP RSP IU buffer + * @cmddma: PCI DMA address of the FCP CMD IU buffer + * @rspdma: PCI DMA address of the FCP RSP IU buffer + * @cmdlen: Length, in bytes, of the FCP CMD IU buffer + * @rsplen: Length, in bytes, of the FCP RSP IU buffer + * @payload_length: Length of DATA_IN or DATA_OUT payload data to transfer + * @sg_table: scatter/gather structure for payload data + * @first_sgl: memory for 1st scatter/gather list segment for payload data + * @sg_cnt: number of elements in the scatter/gather list + * @io_dir: direction of the FCP request (see NVMEFC_FCP_xxx) + * @sqid: The nvme SQID the command is being issued on + * @done: The callback routine the LLDD is to invoke upon completion of + * the FCP operation. req argument is the pointer to the original + * FCP IO operation. + * @private: pointer to memory allocated alongside the FCP operation + * request structure that is specifically for the LLDD to use + * while processing the operation. The length of the buffer + * corresponds to the fcprqst_priv_sz value specified in the + * nvme_fc_port_template supplied by the LLDD. + * + * Values set by the LLDD indicating completion status of the FCP operation. + * Must be set prior to calling the done() callback. + * @transferred_length: amount of payload data, in bytes, that were + * transferred. Should equal payload_length on success. + * @rcv_rsplen: length, in bytes, of the FCP RSP IU received. + * @status: Completion status of the FCP operation. must be 0 upon success, + * negative errno value upon failure (ex: -EIO). Note: this is + * NOT a reflection of the NVME CQE completion status. Only the + * status of the FCP operation at the NVME-FC level. + */ +struct nvmefc_fcp_req { + void *cmdaddr; + void *rspaddr; + dma_addr_t cmddma; + dma_addr_t rspdma; + u16 cmdlen; + u16 rsplen; + + u32 payload_length; + struct sg_table sg_table; + struct scatterlist *first_sgl; + int sg_cnt; + enum nvmefc_fcp_datadir io_dir; + + __le16 sqid; + + void (*done)(struct nvmefc_fcp_req *req); + + void *private; + + u32 transferred_length; + u16 rcv_rsplen; + u32 status; +} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ + + +/* + * Direct copy of fc_port_state enum. For later merging + */ +enum nvme_fc_obj_state { + FC_OBJSTATE_UNKNOWN, + FC_OBJSTATE_NOTPRESENT, + FC_OBJSTATE_ONLINE, + FC_OBJSTATE_OFFLINE, /* User has taken Port Offline */ + FC_OBJSTATE_BLOCKED, + FC_OBJSTATE_BYPASSED, + FC_OBJSTATE_DIAGNOSTICS, + FC_OBJSTATE_LINKDOWN, + FC_OBJSTATE_ERROR, + FC_OBJSTATE_LOOPBACK, + FC_OBJSTATE_DELETED, +}; + + +/** + * struct nvme_fc_local_port - structure used between NVME-FC transport and + * a LLDD to reference a local NVME host port. + * Allocated/created by the nvme_fc_register_localport() + * transport interface. + * + * Fields with static values for the port. Initialized by the + * port_info struct supplied to the registration call. + * @port_num: NVME-FC transport host port number + * @port_role: NVME roles are supported on the port (see FC_PORT_ROLE_xxx) + * @node_name: FC WWNN for the port + * @port_name: FC WWPN for the port + * @private: pointer to memory allocated alongside the local port + * structure that is specifically for the LLDD to use. + * The length of the buffer corresponds to the local_priv_sz + * value specified in the nvme_fc_port_template supplied by + * the LLDD. + * @dev_loss_tmo: maximum delay for reconnects to an association on + * this device. To modify, lldd must call + * nvme_fc_set_remoteport_devloss(). + * + * Fields with dynamic values. Values may change base on link state. LLDD + * may reference fields directly to change them. Initialized by the + * port_info struct supplied to the registration call. + * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must + * be set to 0. + * @port_state: Operational state of the port. + */ +struct nvme_fc_local_port { + /* static/read-only fields */ + u32 port_num; + u32 port_role; + u64 node_name; + u64 port_name; + + void *private; + + /* dynamic fields */ + u32 port_id; + enum nvme_fc_obj_state port_state; +} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ + + +/** + * struct nvme_fc_remote_port - structure used between NVME-FC transport and + * a LLDD to reference a remote NVME subsystem port. + * Allocated/created by the nvme_fc_register_remoteport() + * transport interface. + * + * Fields with static values for the port. Initialized by the + * port_info struct supplied to the registration call. + * @port_num: NVME-FC transport remote subsystem port number + * @port_role: NVME roles are supported on the port (see FC_PORT_ROLE_xxx) + * @node_name: FC WWNN for the port + * @port_name: FC WWPN for the port + * @localport: pointer to the NVME-FC local host port the subsystem is + * connected to. + * @private: pointer to memory allocated alongside the remote port + * structure that is specifically for the LLDD to use. + * The length of the buffer corresponds to the remote_priv_sz + * value specified in the nvme_fc_port_template supplied by + * the LLDD. + * + * Fields with dynamic values. Values may change base on link or login + * state. LLDD may reference fields directly to change them. Initialized by + * the port_info struct supplied to the registration call. + * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must + * be set to 0. + * @port_state: Operational state of the remote port. Valid values are + * ONLINE or UNKNOWN. + */ +struct nvme_fc_remote_port { + /* static fields */ + u32 port_num; + u32 port_role; + u64 node_name; + u64 port_name; + struct nvme_fc_local_port *localport; + void *private; + u32 dev_loss_tmo; + + /* dynamic fields */ + u32 port_id; + enum nvme_fc_obj_state port_state; +} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ + + +/** + * struct nvme_fc_port_template - structure containing static entrypoints and + * operational parameters for an LLDD that supports NVME host + * behavior. Passed by reference in port registrations. + * NVME-FC transport remembers template reference and may + * access it during runtime operation. + * + * Host/Initiator Transport Entrypoints/Parameters: + * + * @localport_delete: The LLDD initiates deletion of a localport via + * nvme_fc_deregister_localport(). However, the teardown is + * asynchronous. This routine is called upon the completion of the + * teardown to inform the LLDD that the localport has been deleted. + * Entrypoint is Mandatory. + * + * @remoteport_delete: The LLDD initiates deletion of a remoteport via + * nvme_fc_deregister_remoteport(). However, the teardown is + * asynchronous. This routine is called upon the completion of the + * teardown to inform the LLDD that the remoteport has been deleted. + * Entrypoint is Mandatory. + * + * @create_queue: Upon creating a host<->controller association, queues are + * created such that they can be affinitized to cpus/cores. This + * callback into the LLDD to notify that a controller queue is being + * created. The LLDD may choose to allocate an associated hw queue + * or map it onto a shared hw queue. Upon return from the call, the + * LLDD specifies a handle that will be given back to it for any + * command that is posted to the controller queue. The handle can + * be used by the LLDD to map quickly to the proper hw queue for + * command execution. The mask of cpu's that will map to this queue + * at the block-level is also passed in. The LLDD should use the + * queue id and/or cpu masks to ensure proper affinitization of the + * controller queue to the hw queue. + * Entrypoint is Optional. + * + * @delete_queue: This is the inverse of the crete_queue. During + * host<->controller association teardown, this routine is called + * when a controller queue is being terminated. Any association with + * a hw queue should be termined. If there is a unique hw queue, the + * hw queue should be torn down. + * Entrypoint is Optional. + * + * @poll_queue: Called to poll for the completion of an io on a blk queue. + * Entrypoint is Optional. + * + * @ls_req: Called to issue a FC-NVME FC-4 LS service request. + * The nvme_fc_ls_req structure will fully describe the buffers for + * the request payload and where to place the response payload. The + * LLDD is to allocate an exchange, issue the LS request, obtain the + * LS response, and call the "done" routine specified in the request + * structure (argument to done is the ls request structure itself). + * Entrypoint is Mandatory. + * + * @fcp_io: called to issue a FC-NVME I/O request. The I/O may be for + * an admin queue or an i/o queue. The nvmefc_fcp_req structure will + * fully describe the io: the buffer containing the FC-NVME CMD IU + * (which contains the SQE), the sg list for the payload if applicable, + * and the buffer to place the FC-NVME RSP IU into. The LLDD will + * complete the i/o, indicating the amount of data transferred or + * any transport error, and call the "done" routine specified in the + * request structure (argument to done is the fcp request structure + * itself). + * Entrypoint is Mandatory. + * + * @ls_abort: called to request the LLDD to abort the indicated ls request. + * The call may return before the abort has completed. After aborting + * the request, the LLDD must still call the ls request done routine + * indicating an FC transport Aborted status. + * Entrypoint is Mandatory. + * + * @fcp_abort: called to request the LLDD to abort the indicated fcp request. + * The call may return before the abort has completed. After aborting + * the request, the LLDD must still call the fcp request done routine + * indicating an FC transport Aborted status. + * Entrypoint is Mandatory. + * + * @max_hw_queues: indicates the maximum number of hw queues the LLDD + * supports for cpu affinitization. + * Value is Mandatory. Must be at least 1. + * + * @max_sgl_segments: indicates the maximum number of sgl segments supported + * by the LLDD + * Value is Mandatory. Must be at least 1. Recommend at least 256. + * + * @max_dif_sgl_segments: indicates the maximum number of sgl segments + * supported by the LLDD for DIF operations. + * Value is Mandatory. Must be at least 1. Recommend at least 256. + * + * @dma_boundary: indicates the dma address boundary where dma mappings + * will be split across. + * Value is Mandatory. Typical value is 0xFFFFFFFF to split across + * 4Gig address boundarys + * + * @local_priv_sz: The LLDD sets this field to the amount of additional + * memory that it would like fc nvme layer to allocate on the LLDD's + * behalf whenever a localport is allocated. The additional memory + * area solely for the of the LLDD and its location is specified by + * the localport->private pointer. + * Value is Mandatory. Allowed to be zero. + * + * @remote_priv_sz: The LLDD sets this field to the amount of additional + * memory that it would like fc nvme layer to allocate on the LLDD's + * behalf whenever a remoteport is allocated. The additional memory + * area solely for the of the LLDD and its location is specified by + * the remoteport->private pointer. + * Value is Mandatory. Allowed to be zero. + * + * @lsrqst_priv_sz: The LLDD sets this field to the amount of additional + * memory that it would like fc nvme layer to allocate on the LLDD's + * behalf whenever a ls request structure is allocated. The additional + * memory area solely for the of the LLDD and its location is + * specified by the ls_request->private pointer. + * Value is Mandatory. Allowed to be zero. + * + * @fcprqst_priv_sz: The LLDD sets this field to the amount of additional + * memory that it would like fc nvme layer to allocate on the LLDD's + * behalf whenever a fcp request structure is allocated. The additional + * memory area solely for the of the LLDD and its location is + * specified by the fcp_request->private pointer. + * Value is Mandatory. Allowed to be zero. + */ +struct nvme_fc_port_template { + /* initiator-based functions */ + void (*localport_delete)(struct nvme_fc_local_port *); + void (*remoteport_delete)(struct nvme_fc_remote_port *); + int (*create_queue)(struct nvme_fc_local_port *, + unsigned int qidx, u16 qsize, + void **handle); + void (*delete_queue)(struct nvme_fc_local_port *, + unsigned int qidx, void *handle); + void (*poll_queue)(struct nvme_fc_local_port *, void *handle); + int (*ls_req)(struct nvme_fc_local_port *, + struct nvme_fc_remote_port *, + struct nvmefc_ls_req *); + int (*fcp_io)(struct nvme_fc_local_port *, + struct nvme_fc_remote_port *, + void *hw_queue_handle, + struct nvmefc_fcp_req *); + void (*ls_abort)(struct nvme_fc_local_port *, + struct nvme_fc_remote_port *, + struct nvmefc_ls_req *); + void (*fcp_abort)(struct nvme_fc_local_port *, + struct nvme_fc_remote_port *, + void *hw_queue_handle, + struct nvmefc_fcp_req *); + + u32 max_hw_queues; + u16 max_sgl_segments; + u16 max_dif_sgl_segments; + u64 dma_boundary; + + /* sizes of additional private data for data structures */ + u32 local_priv_sz; + u32 remote_priv_sz; + u32 lsrqst_priv_sz; + u32 fcprqst_priv_sz; +}; + + +/* + * Initiator/Host functions + */ + +int nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, + struct nvme_fc_port_template *template, + struct device *dev, + struct nvme_fc_local_port **lport_p); + +int nvme_fc_unregister_localport(struct nvme_fc_local_port *localport); + +int nvme_fc_register_remoteport(struct nvme_fc_local_port *localport, + struct nvme_fc_port_info *pinfo, + struct nvme_fc_remote_port **rport_p); + +int nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *remoteport); + +void nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport); + +int nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *remoteport, + u32 dev_loss_tmo); + + +/* + * *************** LLDD FC-NVME Target/Subsystem API *************** + * + * For FC LLDD's that are the NVME Subsystem role + * + * ****************************************************************** + */ + +/** + * struct nvmet_fc_port_info - port-specific ids and FC connection-specific + * data element used during NVME Subsystem role + * registrations + * + * Static fields describing the port being registered: + * @node_name: FC WWNN for the port + * @port_name: FC WWPN for the port + * + * Initialization values for dynamic port fields: + * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must + * be set to 0. + */ +struct nvmet_fc_port_info { + u64 node_name; + u64 port_name; + u32 port_id; +}; + + +/** + * struct nvmefc_tgt_ls_req - Structure used between LLDD and NVMET-FC + * layer to represent the exchange context for + * a FC-NVME Link Service (LS). + * + * The structure is allocated by the LLDD whenever a LS Request is received + * from the FC link. The address of the structure is passed to the nvmet-fc + * layer via the nvmet_fc_rcv_ls_req() call. The address of the structure + * will be passed back to the LLDD when the response is to be transmit. + * The LLDD is to use the address to map back to the LLDD exchange structure + * which maintains information such as the targetport the LS was received + * on, the remote FC NVME initiator that sent the LS, and any FC exchange + * context. Upon completion of the LS response transmit, the address of the + * structure will be passed back to the LS rsp done() routine, allowing the + * nvmet-fc layer to release dma resources. Upon completion of the done() + * routine, no further access will be made by the nvmet-fc layer and the + * LLDD can de-allocate the structure. + * + * Field initialization: + * At the time of the nvmet_fc_rcv_ls_req() call, there is no content that + * is valid in the structure. + * + * When the structure is used for the LLDD->xmt_ls_rsp() call, the nvmet-fc + * layer will fully set the fields in order to specify the response + * payload buffer and its length as well as the done routine to be called + * upon compeletion of the transmit. The nvmet-fc layer will also set a + * private pointer for its own use in the done routine. + * + * Values set by the NVMET-FC layer prior to calling the LLDD xmt_ls_rsp + * entrypoint. + * @rspbuf: pointer to the LS response buffer + * @rspdma: PCI DMA address of the LS response buffer + * @rsplen: Length, in bytes, of the LS response buffer + * @done: The callback routine the LLDD is to invoke upon completion of + * transmitting the LS response. req argument is the pointer to + * the original ls request. + * @nvmet_fc_private: pointer to an internal NVMET-FC layer structure used + * as part of the NVMET-FC processing. The LLDD is not to access + * this pointer. + */ +struct nvmefc_tgt_ls_req { + void *rspbuf; + dma_addr_t rspdma; + u16 rsplen; + + void (*done)(struct nvmefc_tgt_ls_req *req); + void *nvmet_fc_private; /* LLDD is not to access !! */ +}; + +/* Operations that NVME-FC layer may request the LLDD to perform for FCP */ +enum { + NVMET_FCOP_READDATA = 1, /* xmt data to initiator */ + NVMET_FCOP_WRITEDATA = 2, /* xmt data from initiator */ + NVMET_FCOP_READDATA_RSP = 3, /* xmt data to initiator and send + * rsp as well + */ + NVMET_FCOP_RSP = 4, /* send rsp frame */ +}; + +/** + * struct nvmefc_tgt_fcp_req - Structure used between LLDD and NVMET-FC + * layer to represent the exchange context and + * the specific FC-NVME IU operation(s) to perform + * for a FC-NVME FCP IO. + * + * Structure used between LLDD and nvmet-fc layer to represent the exchange + * context for a FC-NVME FCP I/O operation (e.g. a nvme sqe, the sqe-related + * memory transfers, and its assocated cqe transfer). + * + * The structure is allocated by the LLDD whenever a FCP CMD IU is received + * from the FC link. The address of the structure is passed to the nvmet-fc + * layer via the nvmet_fc_rcv_fcp_req() call. The address of the structure + * will be passed back to the LLDD for the data operations and transmit of + * the response. The LLDD is to use the address to map back to the LLDD + * exchange structure which maintains information such as the targetport + * the FCP I/O was received on, the remote FC NVME initiator that sent the + * FCP I/O, and any FC exchange context. Upon completion of the FCP target + * operation, the address of the structure will be passed back to the FCP + * op done() routine, allowing the nvmet-fc layer to release dma resources. + * Upon completion of the done() routine for either RSP or ABORT ops, no + * further access will be made by the nvmet-fc layer and the LLDD can + * de-allocate the structure. + * + * Field initialization: + * At the time of the nvmet_fc_rcv_fcp_req() call, there is no content that + * is valid in the structure. + * + * When the structure is used for an FCP target operation, the nvmet-fc + * layer will fully set the fields in order to specify the scattergather + * list, the transfer length, as well as the done routine to be called + * upon compeletion of the operation. The nvmet-fc layer will also set a + * private pointer for its own use in the done routine. + * + * Values set by the NVMET-FC layer prior to calling the LLDD fcp_op + * entrypoint. + * @op: Indicates the FCP IU operation to perform (see NVMET_FCOP_xxx) + * @hwqid: Specifies the hw queue index (0..N-1, where N is the + * max_hw_queues value from the LLD's nvmet_fc_target_template) + * that the operation is to use. + * @offset: Indicates the DATA_OUT/DATA_IN payload offset to be tranferred. + * Field is only valid on WRITEDATA, READDATA, or READDATA_RSP ops. + * @timeout: amount of time, in seconds, to wait for a response from the NVME + * host. A value of 0 is an infinite wait. + * Valid only for the following ops: + * WRITEDATA: caps the wait for data reception + * READDATA_RSP & RSP: caps wait for FCP_CONF reception (if used) + * @transfer_length: the length, in bytes, of the DATA_OUT or DATA_IN payload + * that is to be transferred. + * Valid only for the WRITEDATA, READDATA, or READDATA_RSP ops. + * @ba_rjt: Contains the BA_RJT payload that is to be transferred. + * Valid only for the NVMET_FCOP_BA_RJT op. + * @sg: Scatter/gather list for the DATA_OUT/DATA_IN payload data. + * Valid only for the WRITEDATA, READDATA, or READDATA_RSP ops. + * @sg_cnt: Number of valid entries in the scatter/gather list. + * Valid only for the WRITEDATA, READDATA, or READDATA_RSP ops. + * @rspaddr: pointer to the FCP RSP IU buffer to be transmit + * Used by RSP and READDATA_RSP ops + * @rspdma: PCI DMA address of the FCP RSP IU buffer + * Used by RSP and READDATA_RSP ops + * @rsplen: Length, in bytes, of the FCP RSP IU buffer + * Used by RSP and READDATA_RSP ops + * @done: The callback routine the LLDD is to invoke upon completion of + * the operation. req argument is the pointer to the original + * FCP subsystem op request. + * @nvmet_fc_private: pointer to an internal NVMET-FC layer structure used + * as part of the NVMET-FC processing. The LLDD is not to + * reference this field. + * + * Values set by the LLDD indicating completion status of the FCP operation. + * Must be set prior to calling the done() callback. + * @transferred_length: amount of DATA_OUT payload data received by a + * a WRITEDATA operation. If not a WRITEDATA operation, value must + * be set to 0. Should equal transfer_length on success. + * @fcp_error: status of the FCP operation. Must be 0 on success; on failure + * must be a NVME_SC_FC_xxxx value. + */ +struct nvmefc_tgt_fcp_req { + u8 op; + u16 hwqid; + u32 offset; + u32 timeout; + u32 transfer_length; + struct fc_ba_rjt ba_rjt; + struct scatterlist *sg; + int sg_cnt; + void *rspaddr; + dma_addr_t rspdma; + u16 rsplen; + + void (*done)(struct nvmefc_tgt_fcp_req *); + + void *nvmet_fc_private; /* LLDD is not to access !! */ + + u32 transferred_length; + int fcp_error; +}; + + +/* Target Features (Bit fields) LLDD supports */ +enum { + NVMET_FCTGTFEAT_READDATA_RSP = (1 << 0), + /* Bit 0: supports the NVMET_FCPOP_READDATA_RSP op, which + * sends (the last) Read Data sequence followed by the RSP + * sequence in one LLDD operation. Errors during Data + * sequence transmit must not allow RSP sequence to be sent. + */ + NVMET_FCTGTFEAT_CMD_IN_ISR = (1 << 1), + /* Bit 2: When 0, the LLDD is calling the cmd rcv handler + * in a non-isr context, allowing the transport to finish + * op completion in the calling context. When 1, the LLDD + * is calling the cmd rcv handler in an ISR context, + * requiring the transport to transition to a workqueue + * for op completion. + */ + NVMET_FCTGTFEAT_OPDONE_IN_ISR = (1 << 2), + /* Bit 3: When 0, the LLDD is calling the op done handler + * in a non-isr context, allowing the transport to finish + * op completion in the calling context. When 1, the LLDD + * is calling the op done handler in an ISR context, + * requiring the transport to transition to a workqueue + * for op completion. + */ +}; + + +/** + * struct nvmet_fc_target_port - structure used between NVME-FC transport and + * a LLDD to reference a local NVME subsystem port. + * Allocated/created by the nvme_fc_register_targetport() + * transport interface. + * + * Fields with static values for the port. Initialized by the + * port_info struct supplied to the registration call. + * @port_num: NVME-FC transport subsytem port number + * @node_name: FC WWNN for the port + * @port_name: FC WWPN for the port + * @private: pointer to memory allocated alongside the local port + * structure that is specifically for the LLDD to use. + * The length of the buffer corresponds to the target_priv_sz + * value specified in the nvme_fc_target_template supplied by + * the LLDD. + * + * Fields with dynamic values. Values may change base on link state. LLDD + * may reference fields directly to change them. Initialized by the + * port_info struct supplied to the registration call. + * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must + * be set to 0. + * @port_state: Operational state of the port. + */ +struct nvmet_fc_target_port { + /* static/read-only fields */ + u32 port_num; + u64 node_name; + u64 port_name; + + void *private; + + /* dynamic fields */ + u32 port_id; + enum nvme_fc_obj_state port_state; +} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ + + +/** + * struct nvmet_fc_target_template - structure containing static entrypoints + * and operational parameters for an LLDD that supports NVME + * subsystem behavior. Passed by reference in port + * registrations. NVME-FC transport remembers template + * reference and may access it during runtime operation. + * + * Subsystem/Target Transport Entrypoints/Parameters: + * + * @targetport_delete: The LLDD initiates deletion of a targetport via + * nvmet_fc_unregister_targetport(). However, the teardown is + * asynchronous. This routine is called upon the completion of the + * teardown to inform the LLDD that the targetport has been deleted. + * Entrypoint is Mandatory. + * + * @xmt_ls_rsp: Called to transmit the response to a FC-NVME FC-4 LS service. + * The nvmefc_tgt_ls_req structure is the same LLDD-supplied exchange + * structure specified in the nvmet_fc_rcv_ls_req() call made when + * the LS request was received. The structure will fully describe + * the buffers for the response payload and the dma address of the + * payload. The LLDD is to transmit the response (or return a non-zero + * errno status), and upon completion of the transmit, call the + * "done" routine specified in the nvmefc_tgt_ls_req structure + * (argument to done is the ls reqwuest structure itself). + * After calling the done routine, the LLDD shall consider the + * LS handling complete and the nvmefc_tgt_ls_req structure may + * be freed/released. + * Entrypoint is Mandatory. + * + * @fcp_op: Called to perform a data transfer or transmit a response. + * The nvmefc_tgt_fcp_req structure is the same LLDD-supplied + * exchange structure specified in the nvmet_fc_rcv_fcp_req() call + * made when the FCP CMD IU was received. The op field in the + * structure shall indicate the operation for the LLDD to perform + * relative to the io. + * NVMET_FCOP_READDATA operation: the LLDD is to send the + * payload data (described by sglist) to the host in 1 or + * more FC sequences (preferrably 1). Note: the fc-nvme layer + * may call the READDATA operation multiple times for longer + * payloads. + * NVMET_FCOP_WRITEDATA operation: the LLDD is to receive the + * payload data (described by sglist) from the host via 1 or + * more FC sequences (preferrably 1). The LLDD is to generate + * the XFER_RDY IU(s) corresponding to the data being requested. + * Note: the FC-NVME layer may call the WRITEDATA operation + * multiple times for longer payloads. + * NVMET_FCOP_READDATA_RSP operation: the LLDD is to send the + * payload data (described by sglist) to the host in 1 or + * more FC sequences (preferrably 1). If an error occurs during + * payload data transmission, the LLDD is to set the + * nvmefc_tgt_fcp_req fcp_error and transferred_length field, then + * consider the operation complete. On error, the LLDD is to not + * transmit the FCP_RSP iu. If all payload data is transferred + * successfully, the LLDD is to update the nvmefc_tgt_fcp_req + * transferred_length field and may subsequently transmit the + * FCP_RSP iu payload (described by rspbuf, rspdma, rsplen). + * If FCP_CONF is supported, the LLDD is to await FCP_CONF + * reception to confirm the RSP reception by the host. The LLDD + * may retramsit the FCP_RSP iu if necessary per FC-NVME. Upon + * transmission of the FCP_RSP iu if FCP_CONF is not supported, + * or upon success/failure of FCP_CONF if it is supported, the + * LLDD is to set the nvmefc_tgt_fcp_req fcp_error field and + * consider the operation complete. + * NVMET_FCOP_RSP: the LLDD is to transmit the FCP_RSP iu payload + * (described by rspbuf, rspdma, rsplen). If FCP_CONF is + * supported, the LLDD is to await FCP_CONF reception to confirm + * the RSP reception by the host. The LLDD may retramsit the + * FCP_RSP iu if FCP_CONF is not received per FC-NVME. Upon + * transmission of the FCP_RSP iu if FCP_CONF is not supported, + * or upon success/failure of FCP_CONF if it is supported, the + * LLDD is to set the nvmefc_tgt_fcp_req fcp_error field and + * consider the operation complete. + * Upon completing the indicated operation, the LLDD is to set the + * status fields for the operation (tranferred_length and fcp_error + * status) in the request, then call the "done" routine + * indicated in the fcp request. After the operation completes, + * regardless of whether the FCP_RSP iu was successfully transmit, + * the LLDD-supplied exchange structure must remain valid until the + * transport calls the fcp_req_release() callback to return ownership + * of the exchange structure back to the LLDD so that it may be used + * for another fcp command. + * Note: when calling the done routine for READDATA or WRITEDATA + * operations, the fc-nvme layer may immediate convert, in the same + * thread and before returning to the LLDD, the fcp operation to + * the next operation for the fcp io and call the LLDDs fcp_op + * call again. If fields in the fcp request are to be accessed post + * the done call, the LLDD should save their values prior to calling + * the done routine, and inspect the save values after the done + * routine. + * Returns 0 on success, - on failure (Ex: -EIO) + * Entrypoint is Mandatory. + * + * @fcp_abort: Called by the transport to abort an active command. + * The command may be in-between operations (nothing active in LLDD) + * or may have an active WRITEDATA operation pending. The LLDD is to + * initiate the ABTS process for the command and return from the + * callback. The ABTS does not need to be complete on the command. + * The fcp_abort callback inherently cannot fail. After the + * fcp_abort() callback completes, the transport will wait for any + * outstanding operation (if there was one) to complete, then will + * call the fcp_req_release() callback to return the command's + * exchange context back to the LLDD. + * Entrypoint is Mandatory. + * + * @fcp_req_release: Called by the transport to return a nvmefc_tgt_fcp_req + * to the LLDD after all operations on the fcp operation are complete. + * This may be due to the command completing or upon completion of + * abort cleanup. + * Entrypoint is Mandatory. + * + * @defer_rcv: Called by the transport to signal the LLLD that it has + * begun processing of a previously received NVME CMD IU. The LLDD + * is now free to re-use the rcv buffer associated with the + * nvmefc_tgt_fcp_req. + * Entrypoint is Optional. + * + * @max_hw_queues: indicates the maximum number of hw queues the LLDD + * supports for cpu affinitization. + * Value is Mandatory. Must be at least 1. + * + * @max_sgl_segments: indicates the maximum number of sgl segments supported + * by the LLDD + * Value is Mandatory. Must be at least 1. Recommend at least 256. + * + * @max_dif_sgl_segments: indicates the maximum number of sgl segments + * supported by the LLDD for DIF operations. + * Value is Mandatory. Must be at least 1. Recommend at least 256. + * + * @dma_boundary: indicates the dma address boundary where dma mappings + * will be split across. + * Value is Mandatory. Typical value is 0xFFFFFFFF to split across + * 4Gig address boundarys + * + * @target_features: The LLDD sets bits in this field to correspond to + * optional features that are supported by the LLDD. + * Refer to the NVMET_FCTGTFEAT_xxx values. + * Value is Mandatory. Allowed to be zero. + * + * @target_priv_sz: The LLDD sets this field to the amount of additional + * memory that it would like fc nvme layer to allocate on the LLDD's + * behalf whenever a targetport is allocated. The additional memory + * area solely for the of the LLDD and its location is specified by + * the targetport->private pointer. + * Value is Mandatory. Allowed to be zero. + */ +struct nvmet_fc_target_template { + void (*targetport_delete)(struct nvmet_fc_target_port *tgtport); + int (*xmt_ls_rsp)(struct nvmet_fc_target_port *tgtport, + struct nvmefc_tgt_ls_req *tls_req); + int (*fcp_op)(struct nvmet_fc_target_port *tgtport, + struct nvmefc_tgt_fcp_req *fcpreq); + void (*fcp_abort)(struct nvmet_fc_target_port *tgtport, + struct nvmefc_tgt_fcp_req *fcpreq); + void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport, + struct nvmefc_tgt_fcp_req *fcpreq); + void (*defer_rcv)(struct nvmet_fc_target_port *tgtport, + struct nvmefc_tgt_fcp_req *fcpreq); + + u32 max_hw_queues; + u16 max_sgl_segments; + u16 max_dif_sgl_segments; + u64 dma_boundary; + + u32 target_features; + + u32 target_priv_sz; +}; + + +int nvmet_fc_register_targetport(struct nvmet_fc_port_info *portinfo, + struct nvmet_fc_target_template *template, + struct device *dev, + struct nvmet_fc_target_port **tgtport_p); + +int nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *tgtport); + +int nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *tgtport, + struct nvmefc_tgt_ls_req *lsreq, + void *lsreqbuf, u32 lsreqbuf_len); + +int nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *tgtport, + struct nvmefc_tgt_fcp_req *fcpreq, + void *cmdiubuf, u32 cmdiubuf_len); + +void nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *tgtport, + struct nvmefc_tgt_fcp_req *fcpreq); + +#endif /* _NVME_FC_DRIVER_H */ diff --git a/include/linux/nvme-fc.h b/include/linux/nvme-fc.h new file mode 100644 index 000000000..36cca93a5 --- /dev/null +++ b/include/linux/nvme-fc.h @@ -0,0 +1,357 @@ +/* + * Copyright (c) 2016 Avago Technologies. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful. + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, + * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A + * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO + * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. + * See the GNU General Public License for more details, a copy of which + * can be found in the file COPYING included with this package + * + */ + +/* + * This file contains definitions relative to FC-NVME r1.14 (16-020vB). + * The fcnvme_lsdesc_cr_assoc_cmd struct reflects expected r1.16 content. + */ + +#ifndef _NVME_FC_H +#define _NVME_FC_H 1 + + +#define NVME_CMD_SCSI_ID 0xFD +#define NVME_CMD_FC_ID FC_TYPE_NVME + +/* FC-NVME Cmd IU Flags */ +#define FCNVME_CMD_FLAGS_DIRMASK 0x03 +#define FCNVME_CMD_FLAGS_WRITE 0x01 +#define FCNVME_CMD_FLAGS_READ 0x02 + +struct nvme_fc_cmd_iu { + __u8 scsi_id; + __u8 fc_id; + __be16 iu_len; + __u8 rsvd4[3]; + __u8 flags; + __be64 connection_id; + __be32 csn; + __be32 data_len; + struct nvme_command sqe; + __be32 rsvd88[2]; +}; + +#define NVME_FC_SIZEOF_ZEROS_RSP 12 + +enum { + FCNVME_SC_SUCCESS = 0, + FCNVME_SC_INVALID_FIELD = 1, + FCNVME_SC_INVALID_CONNID = 2, +}; + +struct nvme_fc_ersp_iu { + __u8 status_code; + __u8 rsvd1; + __be16 iu_len; + __be32 rsn; + __be32 xfrd_len; + __be32 rsvd12; + struct nvme_completion cqe; + /* for now - no additional payload */ +}; + + +/* FC-NVME Link Services */ +enum { + FCNVME_LS_RSVD = 0, + FCNVME_LS_RJT = 1, + FCNVME_LS_ACC = 2, + FCNVME_LS_CREATE_ASSOCIATION = 3, + FCNVME_LS_CREATE_CONNECTION = 4, + FCNVME_LS_DISCONNECT = 5, +}; + +/* FC-NVME Link Service Descriptors */ +enum { + FCNVME_LSDESC_RSVD = 0x0, + FCNVME_LSDESC_RQST = 0x1, + FCNVME_LSDESC_RJT = 0x2, + FCNVME_LSDESC_CREATE_ASSOC_CMD = 0x3, + FCNVME_LSDESC_CREATE_CONN_CMD = 0x4, + FCNVME_LSDESC_DISCONN_CMD = 0x5, + FCNVME_LSDESC_CONN_ID = 0x6, + FCNVME_LSDESC_ASSOC_ID = 0x7, +}; + + +/* ********** start of Link Service Descriptors ********** */ + + +/* + * fills in length of a descriptor. Struture minus descriptor header + */ +static inline __be32 fcnvme_lsdesc_len(size_t sz) +{ + return cpu_to_be32(sz - (2 * sizeof(u32))); +} + +struct fcnvme_ls_rqst_w0 { + u8 ls_cmd; /* FCNVME_LS_xxx */ + u8 zeros[3]; +}; + +/* FCNVME_LSDESC_RQST */ +struct fcnvme_lsdesc_rqst { + __be32 desc_tag; /* FCNVME_LSDESC_xxx */ + __be32 desc_len; + struct fcnvme_ls_rqst_w0 w0; + __be32 rsvd12; +}; + +/* FC-NVME LS RJT reason_code values */ +enum fcnvme_ls_rjt_reason { + FCNVME_RJT_RC_NONE = 0, + /* no reason - not to be sent */ + + FCNVME_RJT_RC_INVAL = 0x01, + /* invalid NVMe_LS command code */ + + FCNVME_RJT_RC_LOGIC = 0x03, + /* logical error */ + + FCNVME_RJT_RC_UNAB = 0x09, + /* unable to perform command request */ + + FCNVME_RJT_RC_UNSUP = 0x0b, + /* command not supported */ + + FCNVME_RJT_RC_INPROG = 0x0e, + /* command already in progress */ + + FCNVME_RJT_RC_INV_ASSOC = 0x40, + /* Invalid Association ID*/ + + FCNVME_RJT_RC_INV_CONN = 0x41, + /* Invalid Connection ID*/ + + FCNVME_RJT_RC_VENDOR = 0xff, + /* vendor specific error */ +}; + +/* FC-NVME LS RJT reason_explanation values */ +enum fcnvme_ls_rjt_explan { + FCNVME_RJT_EXP_NONE = 0x00, + /* No additional explanation */ + + FCNVME_RJT_EXP_OXID_RXID = 0x17, + /* invalid OX_ID-RX_ID combination */ + + FCNVME_RJT_EXP_INSUF_RES = 0x29, + /* insufficient resources */ + + FCNVME_RJT_EXP_UNAB_DATA = 0x2a, + /* unable to supply requested data */ + + FCNVME_RJT_EXP_INV_LEN = 0x2d, + /* Invalid payload length */ +}; + +/* FCNVME_LSDESC_RJT */ +struct fcnvme_lsdesc_rjt { + __be32 desc_tag; /* FCNVME_LSDESC_xxx */ + __be32 desc_len; + u8 rsvd8; + + /* + * Reject reason and explanaction codes are generic + * to ELs's from LS-3. + */ + u8 reason_code; /* fcnvme_ls_rjt_reason */ + u8 reason_explanation; /* fcnvme_ls_rjt_explan */ + + u8 vendor; + __be32 rsvd12; +}; + + +#define FCNVME_ASSOC_HOSTNQN_LEN 256 +#define FCNVME_ASSOC_SUBNQN_LEN 256 + +/* FCNVME_LSDESC_CREATE_ASSOC_CMD */ +struct fcnvme_lsdesc_cr_assoc_cmd { + __be32 desc_tag; /* FCNVME_LSDESC_xxx */ + __be32 desc_len; + __be16 ersp_ratio; + __be16 rsvd10; + __be32 rsvd12[9]; + __be16 cntlid; + __be16 sqsize; + __be32 rsvd52; + uuid_t hostid; + u8 hostnqn[FCNVME_ASSOC_HOSTNQN_LEN]; + u8 subnqn[FCNVME_ASSOC_SUBNQN_LEN]; + __be32 rsvd584[108]; /* pad to 1016 bytes, + * which makes overall LS rqst + * payload 1024 bytes + */ +}; + +#define FCNVME_LSDESC_CRA_CMD_DESC_MINLEN \ + offsetof(struct fcnvme_lsdesc_cr_assoc_cmd, rsvd584) + +#define FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN \ + (FCNVME_LSDESC_CRA_CMD_DESC_MINLEN - \ + offsetof(struct fcnvme_lsdesc_cr_assoc_cmd, ersp_ratio)) + + + +/* FCNVME_LSDESC_CREATE_CONN_CMD */ +struct fcnvme_lsdesc_cr_conn_cmd { + __be32 desc_tag; /* FCNVME_LSDESC_xxx */ + __be32 desc_len; + __be16 ersp_ratio; + __be16 rsvd10; + __be32 rsvd12[9]; + __be16 qid; + __be16 sqsize; + __be32 rsvd52; +}; + +/* Disconnect Scope Values */ +enum { + FCNVME_DISCONN_ASSOCIATION = 0, + FCNVME_DISCONN_CONNECTION = 1, +}; + +/* FCNVME_LSDESC_DISCONN_CMD */ +struct fcnvme_lsdesc_disconn_cmd { + __be32 desc_tag; /* FCNVME_LSDESC_xxx */ + __be32 desc_len; + u8 rsvd8[3]; + /* note: scope is really a 1 bit field */ + u8 scope; /* FCNVME_DISCONN_xxx */ + __be32 rsvd12; + __be64 id; +}; + +/* FCNVME_LSDESC_CONN_ID */ +struct fcnvme_lsdesc_conn_id { + __be32 desc_tag; /* FCNVME_LSDESC_xxx */ + __be32 desc_len; + __be64 connection_id; +}; + +/* FCNVME_LSDESC_ASSOC_ID */ +struct fcnvme_lsdesc_assoc_id { + __be32 desc_tag; /* FCNVME_LSDESC_xxx */ + __be32 desc_len; + __be64 association_id; +}; + +/* r_ctl values */ +enum { + FCNVME_RS_RCTL_DATA = 1, + FCNVME_RS_RCTL_XFER_RDY = 5, + FCNVME_RS_RCTL_RSP = 8, +}; + + +/* ********** start of Link Services ********** */ + + +/* FCNVME_LS_RJT */ +struct fcnvme_ls_rjt { + struct fcnvme_ls_rqst_w0 w0; + __be32 desc_list_len; + struct fcnvme_lsdesc_rqst rqst; + struct fcnvme_lsdesc_rjt rjt; +}; + +/* FCNVME_LS_ACC */ +struct fcnvme_ls_acc_hdr { + struct fcnvme_ls_rqst_w0 w0; + __be32 desc_list_len; + struct fcnvme_lsdesc_rqst rqst; + /* Followed by cmd-specific ACC descriptors, see next definitions */ +}; + +/* FCNVME_LS_CREATE_ASSOCIATION */ +struct fcnvme_ls_cr_assoc_rqst { + struct fcnvme_ls_rqst_w0 w0; + __be32 desc_list_len; + struct fcnvme_lsdesc_cr_assoc_cmd assoc_cmd; +}; + +#define FCNVME_LSDESC_CRA_RQST_MINLEN \ + (offsetof(struct fcnvme_ls_cr_assoc_rqst, assoc_cmd) + \ + FCNVME_LSDESC_CRA_CMD_DESC_MINLEN) + +#define FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN \ + FCNVME_LSDESC_CRA_CMD_DESC_MINLEN + + +struct fcnvme_ls_cr_assoc_acc { + struct fcnvme_ls_acc_hdr hdr; + struct fcnvme_lsdesc_assoc_id associd; + struct fcnvme_lsdesc_conn_id connectid; +}; + + +/* FCNVME_LS_CREATE_CONNECTION */ +struct fcnvme_ls_cr_conn_rqst { + struct fcnvme_ls_rqst_w0 w0; + __be32 desc_list_len; + struct fcnvme_lsdesc_assoc_id associd; + struct fcnvme_lsdesc_cr_conn_cmd connect_cmd; +}; + +struct fcnvme_ls_cr_conn_acc { + struct fcnvme_ls_acc_hdr hdr; + struct fcnvme_lsdesc_conn_id connectid; +}; + +/* FCNVME_LS_DISCONNECT */ +struct fcnvme_ls_disconnect_rqst { + struct fcnvme_ls_rqst_w0 w0; + __be32 desc_list_len; + struct fcnvme_lsdesc_assoc_id associd; + struct fcnvme_lsdesc_disconn_cmd discon_cmd; +}; + +struct fcnvme_ls_disconnect_acc { + struct fcnvme_ls_acc_hdr hdr; +}; + + +/* + * Yet to be defined in FC-NVME: + */ +#define NVME_FC_CONNECT_TIMEOUT_SEC 2 /* 2 seconds */ +#define NVME_FC_LS_TIMEOUT_SEC 2 /* 2 seconds */ +#define NVME_FC_TGTOP_TIMEOUT_SEC 2 /* 2 seconds */ + +/* + * TRADDR string must be of form "nn-<16hexdigits>:pn-<16hexdigits>" + * the string is allowed to be specified with or without a "0x" prefix + * infront of the <16hexdigits>. Without is considered the "min" string + * and with is considered the "max" string. The hexdigits may be upper + * or lower case. + */ +#define NVME_FC_TRADDR_NNLEN 3 /* "?n-" */ +#define NVME_FC_TRADDR_OXNNLEN 5 /* "?n-0x" */ +#define NVME_FC_TRADDR_HEXNAMELEN 16 +#define NVME_FC_TRADDR_MINLENGTH \ + (2 * (NVME_FC_TRADDR_NNLEN + NVME_FC_TRADDR_HEXNAMELEN) + 1) +#define NVME_FC_TRADDR_MAXLENGTH \ + (2 * (NVME_FC_TRADDR_OXNNLEN + NVME_FC_TRADDR_HEXNAMELEN) + 1) +#define NVME_FC_TRADDR_MIN_PN_OFFSET \ + (NVME_FC_TRADDR_NNLEN + NVME_FC_TRADDR_HEXNAMELEN + 1) +#define NVME_FC_TRADDR_MAX_PN_OFFSET \ + (NVME_FC_TRADDR_OXNNLEN + NVME_FC_TRADDR_HEXNAMELEN + 1) + + +#endif /* _NVME_FC_H */ diff --git a/include/linux/nvme-rdma.h b/include/linux/nvme-rdma.h new file mode 100644 index 000000000..a72fd04aa --- /dev/null +++ b/include/linux/nvme-rdma.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef _LINUX_NVME_RDMA_H +#define _LINUX_NVME_RDMA_H + +enum nvme_rdma_cm_fmt { + NVME_RDMA_CM_FMT_1_0 = 0x0, +}; + +enum nvme_rdma_cm_status { + NVME_RDMA_CM_INVALID_LEN = 0x01, + NVME_RDMA_CM_INVALID_RECFMT = 0x02, + NVME_RDMA_CM_INVALID_QID = 0x03, + NVME_RDMA_CM_INVALID_HSQSIZE = 0x04, + NVME_RDMA_CM_INVALID_HRQSIZE = 0x05, + NVME_RDMA_CM_NO_RSC = 0x06, + NVME_RDMA_CM_INVALID_IRD = 0x07, + NVME_RDMA_CM_INVALID_ORD = 0x08, +}; + +static inline const char *nvme_rdma_cm_msg(enum nvme_rdma_cm_status status) +{ + switch (status) { + case NVME_RDMA_CM_INVALID_LEN: + return "invalid length"; + case NVME_RDMA_CM_INVALID_RECFMT: + return "invalid record format"; + case NVME_RDMA_CM_INVALID_QID: + return "invalid queue ID"; + case NVME_RDMA_CM_INVALID_HSQSIZE: + return "invalid host SQ size"; + case NVME_RDMA_CM_INVALID_HRQSIZE: + return "invalid host RQ size"; + case NVME_RDMA_CM_NO_RSC: + return "resource not found"; + case NVME_RDMA_CM_INVALID_IRD: + return "invalid IRD"; + case NVME_RDMA_CM_INVALID_ORD: + return "Invalid ORD"; + default: + return "unrecognized reason"; + } +} + +/** + * struct nvme_rdma_cm_req - rdma connect request + * + * @recfmt: format of the RDMA Private Data + * @qid: queue Identifier for the Admin or I/O Queue + * @hrqsize: host receive queue size to be created + * @hsqsize: host send queue size to be created + */ +struct nvme_rdma_cm_req { + __le16 recfmt; + __le16 qid; + __le16 hrqsize; + __le16 hsqsize; + u8 rsvd[24]; +}; + +/** + * struct nvme_rdma_cm_rep - rdma connect reply + * + * @recfmt: format of the RDMA Private Data + * @crqsize: controller receive queue size + */ +struct nvme_rdma_cm_rep { + __le16 recfmt; + __le16 crqsize; + u8 rsvd[28]; +}; + +/** + * struct nvme_rdma_cm_rej - rdma connect reject + * + * @recfmt: format of the RDMA Private Data + * @fsts: error status for the associated connect request + */ +struct nvme_rdma_cm_rej { + __le16 recfmt; + __le16 sts; +}; + +#endif /* _LINUX_NVME_RDMA_H */ diff --git a/include/linux/nvme.h b/include/linux/nvme.h new file mode 100644 index 000000000..818dbe933 --- /dev/null +++ b/include/linux/nvme.h @@ -0,0 +1,1271 @@ +/* + * Definitions for the NVM Express interface + * Copyright (c) 2011-2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef _LINUX_NVME_H +#define _LINUX_NVME_H + +#include +#include + +/* NQN names in commands fields specified one size */ +#define NVMF_NQN_FIELD_LEN 256 + +/* However the max length of a qualified name is another size */ +#define NVMF_NQN_SIZE 223 + +#define NVMF_TRSVCID_SIZE 32 +#define NVMF_TRADDR_SIZE 256 +#define NVMF_TSAS_SIZE 256 + +#define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery" + +#define NVME_RDMA_IP_PORT 4420 + +#define NVME_NSID_ALL 0xffffffff + +enum nvme_subsys_type { + NVME_NQN_DISC = 1, /* Discovery type target subsystem */ + NVME_NQN_NVME = 2, /* NVME type target subsystem */ +}; + +/* Address Family codes for Discovery Log Page entry ADRFAM field */ +enum { + NVMF_ADDR_FAMILY_PCI = 0, /* PCIe */ + NVMF_ADDR_FAMILY_IP4 = 1, /* IP4 */ + NVMF_ADDR_FAMILY_IP6 = 2, /* IP6 */ + NVMF_ADDR_FAMILY_IB = 3, /* InfiniBand */ + NVMF_ADDR_FAMILY_FC = 4, /* Fibre Channel */ +}; + +/* Transport Type codes for Discovery Log Page entry TRTYPE field */ +enum { + NVMF_TRTYPE_RDMA = 1, /* RDMA */ + NVMF_TRTYPE_FC = 2, /* Fibre Channel */ + NVMF_TRTYPE_LOOP = 254, /* Reserved for host usage */ + NVMF_TRTYPE_MAX, +}; + +/* Transport Requirements codes for Discovery Log Page entry TREQ field */ +enum { + NVMF_TREQ_NOT_SPECIFIED = 0, /* Not specified */ + NVMF_TREQ_REQUIRED = 1, /* Required */ + NVMF_TREQ_NOT_REQUIRED = 2, /* Not Required */ +}; + +/* RDMA QP Service Type codes for Discovery Log Page entry TSAS + * RDMA_QPTYPE field + */ +enum { + NVMF_RDMA_QPTYPE_CONNECTED = 1, /* Reliable Connected */ + NVMF_RDMA_QPTYPE_DATAGRAM = 2, /* Reliable Datagram */ +}; + +/* RDMA QP Service Type codes for Discovery Log Page entry TSAS + * RDMA_QPTYPE field + */ +enum { + NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1, /* No Provider Specified */ + NVMF_RDMA_PRTYPE_IB = 2, /* InfiniBand */ + NVMF_RDMA_PRTYPE_ROCE = 3, /* InfiniBand RoCE */ + NVMF_RDMA_PRTYPE_ROCEV2 = 4, /* InfiniBand RoCEV2 */ + NVMF_RDMA_PRTYPE_IWARP = 5, /* IWARP */ +}; + +/* RDMA Connection Management Service Type codes for Discovery Log Page + * entry TSAS RDMA_CMS field + */ +enum { + NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */ +}; + +#define NVME_AQ_DEPTH 32 +#define NVME_NR_AEN_COMMANDS 1 +#define NVME_AQ_BLK_MQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS) + +/* + * Subtract one to leave an empty queue entry for 'Full Queue' condition. See + * NVM-Express 1.2 specification, section 4.1.2. + */ +#define NVME_AQ_MQ_TAG_DEPTH (NVME_AQ_BLK_MQ_DEPTH - 1) + +enum { + NVME_REG_CAP = 0x0000, /* Controller Capabilities */ + NVME_REG_VS = 0x0008, /* Version */ + NVME_REG_INTMS = 0x000c, /* Interrupt Mask Set */ + NVME_REG_INTMC = 0x0010, /* Interrupt Mask Clear */ + NVME_REG_CC = 0x0014, /* Controller Configuration */ + NVME_REG_CSTS = 0x001c, /* Controller Status */ + NVME_REG_NSSR = 0x0020, /* NVM Subsystem Reset */ + NVME_REG_AQA = 0x0024, /* Admin Queue Attributes */ + NVME_REG_ASQ = 0x0028, /* Admin SQ Base Address */ + NVME_REG_ACQ = 0x0030, /* Admin CQ Base Address */ + NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */ + NVME_REG_CMBSZ = 0x003c, /* Controller Memory Buffer Size */ + NVME_REG_DBS = 0x1000, /* SQ 0 Tail Doorbell */ +}; + +#define NVME_CAP_MQES(cap) ((cap) & 0xffff) +#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff) +#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf) +#define NVME_CAP_NSSRC(cap) (((cap) >> 36) & 0x1) +#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf) +#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf) + +#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7) +#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff) + +enum { + NVME_CMBSZ_SQS = 1 << 0, + NVME_CMBSZ_CQS = 1 << 1, + NVME_CMBSZ_LISTS = 1 << 2, + NVME_CMBSZ_RDS = 1 << 3, + NVME_CMBSZ_WDS = 1 << 4, + + NVME_CMBSZ_SZ_SHIFT = 12, + NVME_CMBSZ_SZ_MASK = 0xfffff, + + NVME_CMBSZ_SZU_SHIFT = 8, + NVME_CMBSZ_SZU_MASK = 0xf, +}; + +/* + * Submission and Completion Queue Entry Sizes for the NVM command set. + * (In bytes and specified as a power of two (2^n)). + */ +#define NVME_NVM_IOSQES 6 +#define NVME_NVM_IOCQES 4 + +enum { + NVME_CC_ENABLE = 1 << 0, + NVME_CC_CSS_NVM = 0 << 4, + NVME_CC_EN_SHIFT = 0, + NVME_CC_CSS_SHIFT = 4, + NVME_CC_MPS_SHIFT = 7, + NVME_CC_AMS_SHIFT = 11, + NVME_CC_SHN_SHIFT = 14, + NVME_CC_IOSQES_SHIFT = 16, + NVME_CC_IOCQES_SHIFT = 20, + NVME_CC_AMS_RR = 0 << NVME_CC_AMS_SHIFT, + NVME_CC_AMS_WRRU = 1 << NVME_CC_AMS_SHIFT, + NVME_CC_AMS_VS = 7 << NVME_CC_AMS_SHIFT, + NVME_CC_SHN_NONE = 0 << NVME_CC_SHN_SHIFT, + NVME_CC_SHN_NORMAL = 1 << NVME_CC_SHN_SHIFT, + NVME_CC_SHN_ABRUPT = 2 << NVME_CC_SHN_SHIFT, + NVME_CC_SHN_MASK = 3 << NVME_CC_SHN_SHIFT, + NVME_CC_IOSQES = NVME_NVM_IOSQES << NVME_CC_IOSQES_SHIFT, + NVME_CC_IOCQES = NVME_NVM_IOCQES << NVME_CC_IOCQES_SHIFT, + NVME_CSTS_RDY = 1 << 0, + NVME_CSTS_CFS = 1 << 1, + NVME_CSTS_NSSRO = 1 << 4, + NVME_CSTS_PP = 1 << 5, + NVME_CSTS_SHST_NORMAL = 0 << 2, + NVME_CSTS_SHST_OCCUR = 1 << 2, + NVME_CSTS_SHST_CMPLT = 2 << 2, + NVME_CSTS_SHST_MASK = 3 << 2, +}; + +struct nvme_id_power_state { + __le16 max_power; /* centiwatts */ + __u8 rsvd2; + __u8 flags; + __le32 entry_lat; /* microseconds */ + __le32 exit_lat; /* microseconds */ + __u8 read_tput; + __u8 read_lat; + __u8 write_tput; + __u8 write_lat; + __le16 idle_power; + __u8 idle_scale; + __u8 rsvd19; + __le16 active_power; + __u8 active_work_scale; + __u8 rsvd23[9]; +}; + +enum { + NVME_PS_FLAGS_MAX_POWER_SCALE = 1 << 0, + NVME_PS_FLAGS_NON_OP_STATE = 1 << 1, +}; + +struct nvme_id_ctrl { + __le16 vid; + __le16 ssvid; + char sn[20]; + char mn[40]; + char fr[8]; + __u8 rab; + __u8 ieee[3]; + __u8 cmic; + __u8 mdts; + __le16 cntlid; + __le32 ver; + __le32 rtd3r; + __le32 rtd3e; + __le32 oaes; + __le32 ctratt; + __u8 rsvd100[156]; + __le16 oacs; + __u8 acl; + __u8 aerl; + __u8 frmw; + __u8 lpa; + __u8 elpe; + __u8 npss; + __u8 avscc; + __u8 apsta; + __le16 wctemp; + __le16 cctemp; + __le16 mtfa; + __le32 hmpre; + __le32 hmmin; + __u8 tnvmcap[16]; + __u8 unvmcap[16]; + __le32 rpmbs; + __le16 edstt; + __u8 dsto; + __u8 fwug; + __le16 kas; + __le16 hctma; + __le16 mntmt; + __le16 mxtmt; + __le32 sanicap; + __le32 hmminds; + __le16 hmmaxd; + __u8 rsvd338[4]; + __u8 anatt; + __u8 anacap; + __le32 anagrpmax; + __le32 nanagrpid; + __u8 rsvd352[160]; + __u8 sqes; + __u8 cqes; + __le16 maxcmd; + __le32 nn; + __le16 oncs; + __le16 fuses; + __u8 fna; + __u8 vwc; + __le16 awun; + __le16 awupf; + __u8 nvscc; + __u8 nwpc; + __le16 acwu; + __u8 rsvd534[2]; + __le32 sgls; + __le32 mnan; + __u8 rsvd544[224]; + char subnqn[256]; + __u8 rsvd1024[768]; + __le32 ioccsz; + __le32 iorcsz; + __le16 icdoff; + __u8 ctrattr; + __u8 msdbd; + __u8 rsvd1804[244]; + struct nvme_id_power_state psd[32]; + __u8 vs[1024]; +}; + +enum { + NVME_CTRL_ONCS_COMPARE = 1 << 0, + NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1, + NVME_CTRL_ONCS_DSM = 1 << 2, + NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3, + NVME_CTRL_ONCS_TIMESTAMP = 1 << 6, + NVME_CTRL_VWC_PRESENT = 1 << 0, + NVME_CTRL_OACS_SEC_SUPP = 1 << 0, + NVME_CTRL_OACS_DIRECTIVES = 1 << 5, + NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8, + NVME_CTRL_LPA_CMD_EFFECTS_LOG = 1 << 1, +}; + +struct nvme_lbaf { + __le16 ms; + __u8 ds; + __u8 rp; +}; + +struct nvme_id_ns { + __le64 nsze; + __le64 ncap; + __le64 nuse; + __u8 nsfeat; + __u8 nlbaf; + __u8 flbas; + __u8 mc; + __u8 dpc; + __u8 dps; + __u8 nmic; + __u8 rescap; + __u8 fpi; + __u8 rsvd33; + __le16 nawun; + __le16 nawupf; + __le16 nacwu; + __le16 nabsn; + __le16 nabo; + __le16 nabspf; + __le16 noiob; + __u8 nvmcap[16]; + __u8 rsvd64[28]; + __le32 anagrpid; + __u8 rsvd96[3]; + __u8 nsattr; + __u8 rsvd100[4]; + __u8 nguid[16]; + __u8 eui64[8]; + struct nvme_lbaf lbaf[16]; + __u8 rsvd192[192]; + __u8 vs[3712]; +}; + +enum { + NVME_ID_CNS_NS = 0x00, + NVME_ID_CNS_CTRL = 0x01, + NVME_ID_CNS_NS_ACTIVE_LIST = 0x02, + NVME_ID_CNS_NS_DESC_LIST = 0x03, + NVME_ID_CNS_NS_PRESENT_LIST = 0x10, + NVME_ID_CNS_NS_PRESENT = 0x11, + NVME_ID_CNS_CTRL_NS_LIST = 0x12, + NVME_ID_CNS_CTRL_LIST = 0x13, +}; + +enum { + NVME_DIR_IDENTIFY = 0x00, + NVME_DIR_STREAMS = 0x01, + NVME_DIR_SND_ID_OP_ENABLE = 0x01, + NVME_DIR_SND_ST_OP_REL_ID = 0x01, + NVME_DIR_SND_ST_OP_REL_RSC = 0x02, + NVME_DIR_RCV_ID_OP_PARAM = 0x01, + NVME_DIR_RCV_ST_OP_PARAM = 0x01, + NVME_DIR_RCV_ST_OP_STATUS = 0x02, + NVME_DIR_RCV_ST_OP_RESOURCE = 0x03, + NVME_DIR_ENDIR = 0x01, +}; + +enum { + NVME_NS_FEAT_THIN = 1 << 0, + NVME_NS_FLBAS_LBA_MASK = 0xf, + NVME_NS_FLBAS_META_EXT = 0x10, + NVME_LBAF_RP_BEST = 0, + NVME_LBAF_RP_BETTER = 1, + NVME_LBAF_RP_GOOD = 2, + NVME_LBAF_RP_DEGRADED = 3, + NVME_NS_DPC_PI_LAST = 1 << 4, + NVME_NS_DPC_PI_FIRST = 1 << 3, + NVME_NS_DPC_PI_TYPE3 = 1 << 2, + NVME_NS_DPC_PI_TYPE2 = 1 << 1, + NVME_NS_DPC_PI_TYPE1 = 1 << 0, + NVME_NS_DPS_PI_FIRST = 1 << 3, + NVME_NS_DPS_PI_MASK = 0x7, + NVME_NS_DPS_PI_TYPE1 = 1, + NVME_NS_DPS_PI_TYPE2 = 2, + NVME_NS_DPS_PI_TYPE3 = 3, +}; + +struct nvme_ns_id_desc { + __u8 nidt; + __u8 nidl; + __le16 reserved; +}; + +#define NVME_NIDT_EUI64_LEN 8 +#define NVME_NIDT_NGUID_LEN 16 +#define NVME_NIDT_UUID_LEN 16 + +enum { + NVME_NIDT_EUI64 = 0x01, + NVME_NIDT_NGUID = 0x02, + NVME_NIDT_UUID = 0x03, +}; + +struct nvme_smart_log { + __u8 critical_warning; + __u8 temperature[2]; + __u8 avail_spare; + __u8 spare_thresh; + __u8 percent_used; + __u8 rsvd6[26]; + __u8 data_units_read[16]; + __u8 data_units_written[16]; + __u8 host_reads[16]; + __u8 host_writes[16]; + __u8 ctrl_busy_time[16]; + __u8 power_cycles[16]; + __u8 power_on_hours[16]; + __u8 unsafe_shutdowns[16]; + __u8 media_errors[16]; + __u8 num_err_log_entries[16]; + __le32 warning_temp_time; + __le32 critical_comp_time; + __le16 temp_sensor[8]; + __u8 rsvd216[296]; +}; + +struct nvme_fw_slot_info_log { + __u8 afi; + __u8 rsvd1[7]; + __le64 frs[7]; + __u8 rsvd64[448]; +}; + +enum { + NVME_CMD_EFFECTS_CSUPP = 1 << 0, + NVME_CMD_EFFECTS_LBCC = 1 << 1, + NVME_CMD_EFFECTS_NCC = 1 << 2, + NVME_CMD_EFFECTS_NIC = 1 << 3, + NVME_CMD_EFFECTS_CCC = 1 << 4, + NVME_CMD_EFFECTS_CSE_MASK = 3 << 16, +}; + +struct nvme_effects_log { + __le32 acs[256]; + __le32 iocs[256]; + __u8 resv[2048]; +}; + +enum nvme_ana_state { + NVME_ANA_OPTIMIZED = 0x01, + NVME_ANA_NONOPTIMIZED = 0x02, + NVME_ANA_INACCESSIBLE = 0x03, + NVME_ANA_PERSISTENT_LOSS = 0x04, + NVME_ANA_CHANGE = 0x0f, +}; + +struct nvme_ana_group_desc { + __le32 grpid; + __le32 nnsids; + __le64 chgcnt; + __u8 state; + __u8 rsvd17[15]; + __le32 nsids[]; +}; + +/* flag for the log specific field of the ANA log */ +#define NVME_ANA_LOG_RGO (1 << 0) + +struct nvme_ana_rsp_hdr { + __le64 chgcnt; + __le16 ngrps; + __le16 rsvd10[3]; +}; + +enum { + NVME_SMART_CRIT_SPARE = 1 << 0, + NVME_SMART_CRIT_TEMPERATURE = 1 << 1, + NVME_SMART_CRIT_RELIABILITY = 1 << 2, + NVME_SMART_CRIT_MEDIA = 1 << 3, + NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4, +}; + +enum { + NVME_AER_ERROR = 0, + NVME_AER_SMART = 1, + NVME_AER_NOTICE = 2, + NVME_AER_CSS = 6, + NVME_AER_VS = 7, +}; + +enum { + NVME_AER_NOTICE_NS_CHANGED = 0x00, + NVME_AER_NOTICE_FW_ACT_STARTING = 0x01, + NVME_AER_NOTICE_ANA = 0x03, +}; + +enum { + NVME_AEN_CFG_NS_ATTR = 1 << 8, + NVME_AEN_CFG_FW_ACT = 1 << 9, + NVME_AEN_CFG_ANA_CHANGE = 1 << 11, +}; + +struct nvme_lba_range_type { + __u8 type; + __u8 attributes; + __u8 rsvd2[14]; + __u64 slba; + __u64 nlb; + __u8 guid[16]; + __u8 rsvd48[16]; +}; + +enum { + NVME_LBART_TYPE_FS = 0x01, + NVME_LBART_TYPE_RAID = 0x02, + NVME_LBART_TYPE_CACHE = 0x03, + NVME_LBART_TYPE_SWAP = 0x04, + + NVME_LBART_ATTRIB_TEMP = 1 << 0, + NVME_LBART_ATTRIB_HIDE = 1 << 1, +}; + +struct nvme_reservation_status { + __le32 gen; + __u8 rtype; + __u8 regctl[2]; + __u8 resv5[2]; + __u8 ptpls; + __u8 resv10[13]; + struct { + __le16 cntlid; + __u8 rcsts; + __u8 resv3[5]; + __le64 hostid; + __le64 rkey; + } regctl_ds[]; +}; + +enum nvme_async_event_type { + NVME_AER_TYPE_ERROR = 0, + NVME_AER_TYPE_SMART = 1, + NVME_AER_TYPE_NOTICE = 2, +}; + +/* I/O commands */ + +enum nvme_opcode { + nvme_cmd_flush = 0x00, + nvme_cmd_write = 0x01, + nvme_cmd_read = 0x02, + nvme_cmd_write_uncor = 0x04, + nvme_cmd_compare = 0x05, + nvme_cmd_write_zeroes = 0x08, + nvme_cmd_dsm = 0x09, + nvme_cmd_resv_register = 0x0d, + nvme_cmd_resv_report = 0x0e, + nvme_cmd_resv_acquire = 0x11, + nvme_cmd_resv_release = 0x15, +}; + +/* + * Descriptor subtype - lower 4 bits of nvme_(keyed_)sgl_desc identifier + * + * @NVME_SGL_FMT_ADDRESS: absolute address of the data block + * @NVME_SGL_FMT_OFFSET: relative offset of the in-capsule data block + * @NVME_SGL_FMT_TRANSPORT_A: transport defined format, value 0xA + * @NVME_SGL_FMT_INVALIDATE: RDMA transport specific remote invalidation + * request subtype + */ +enum { + NVME_SGL_FMT_ADDRESS = 0x00, + NVME_SGL_FMT_OFFSET = 0x01, + NVME_SGL_FMT_TRANSPORT_A = 0x0A, + NVME_SGL_FMT_INVALIDATE = 0x0f, +}; + +/* + * Descriptor type - upper 4 bits of nvme_(keyed_)sgl_desc identifier + * + * For struct nvme_sgl_desc: + * @NVME_SGL_FMT_DATA_DESC: data block descriptor + * @NVME_SGL_FMT_SEG_DESC: sgl segment descriptor + * @NVME_SGL_FMT_LAST_SEG_DESC: last sgl segment descriptor + * + * For struct nvme_keyed_sgl_desc: + * @NVME_KEY_SGL_FMT_DATA_DESC: keyed data block descriptor + * + * Transport-specific SGL types: + * @NVME_TRANSPORT_SGL_DATA_DESC: Transport SGL data dlock descriptor + */ +enum { + NVME_SGL_FMT_DATA_DESC = 0x00, + NVME_SGL_FMT_SEG_DESC = 0x02, + NVME_SGL_FMT_LAST_SEG_DESC = 0x03, + NVME_KEY_SGL_FMT_DATA_DESC = 0x04, + NVME_TRANSPORT_SGL_DATA_DESC = 0x05, +}; + +struct nvme_sgl_desc { + __le64 addr; + __le32 length; + __u8 rsvd[3]; + __u8 type; +}; + +struct nvme_keyed_sgl_desc { + __le64 addr; + __u8 length[3]; + __u8 key[4]; + __u8 type; +}; + +union nvme_data_ptr { + struct { + __le64 prp1; + __le64 prp2; + }; + struct nvme_sgl_desc sgl; + struct nvme_keyed_sgl_desc ksgl; +}; + +/* + * Lowest two bits of our flags field (FUSE field in the spec): + * + * @NVME_CMD_FUSE_FIRST: Fused Operation, first command + * @NVME_CMD_FUSE_SECOND: Fused Operation, second command + * + * Highest two bits in our flags field (PSDT field in the spec): + * + * @NVME_CMD_PSDT_SGL_METABUF: Use SGLS for this transfer, + * If used, MPTR contains addr of single physical buffer (byte aligned). + * @NVME_CMD_PSDT_SGL_METASEG: Use SGLS for this transfer, + * If used, MPTR contains an address of an SGL segment containing + * exactly 1 SGL descriptor (qword aligned). + */ +enum { + NVME_CMD_FUSE_FIRST = (1 << 0), + NVME_CMD_FUSE_SECOND = (1 << 1), + + NVME_CMD_SGL_METABUF = (1 << 6), + NVME_CMD_SGL_METASEG = (1 << 7), + NVME_CMD_SGL_ALL = NVME_CMD_SGL_METABUF | NVME_CMD_SGL_METASEG, +}; + +struct nvme_common_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le32 cdw2[2]; + __le64 metadata; + union nvme_data_ptr dptr; + __le32 cdw10[6]; +}; + +struct nvme_rw_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2; + __le64 metadata; + union nvme_data_ptr dptr; + __le64 slba; + __le16 length; + __le16 control; + __le32 dsmgmt; + __le32 reftag; + __le16 apptag; + __le16 appmask; +}; + +enum { + NVME_RW_LR = 1 << 15, + NVME_RW_FUA = 1 << 14, + NVME_RW_DSM_FREQ_UNSPEC = 0, + NVME_RW_DSM_FREQ_TYPICAL = 1, + NVME_RW_DSM_FREQ_RARE = 2, + NVME_RW_DSM_FREQ_READS = 3, + NVME_RW_DSM_FREQ_WRITES = 4, + NVME_RW_DSM_FREQ_RW = 5, + NVME_RW_DSM_FREQ_ONCE = 6, + NVME_RW_DSM_FREQ_PREFETCH = 7, + NVME_RW_DSM_FREQ_TEMP = 8, + NVME_RW_DSM_LATENCY_NONE = 0 << 4, + NVME_RW_DSM_LATENCY_IDLE = 1 << 4, + NVME_RW_DSM_LATENCY_NORM = 2 << 4, + NVME_RW_DSM_LATENCY_LOW = 3 << 4, + NVME_RW_DSM_SEQ_REQ = 1 << 6, + NVME_RW_DSM_COMPRESSED = 1 << 7, + NVME_RW_PRINFO_PRCHK_REF = 1 << 10, + NVME_RW_PRINFO_PRCHK_APP = 1 << 11, + NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12, + NVME_RW_PRINFO_PRACT = 1 << 13, + NVME_RW_DTYPE_STREAMS = 1 << 4, +}; + +struct nvme_dsm_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __le32 nr; + __le32 attributes; + __u32 rsvd12[4]; +}; + +enum { + NVME_DSMGMT_IDR = 1 << 0, + NVME_DSMGMT_IDW = 1 << 1, + NVME_DSMGMT_AD = 1 << 2, +}; + +#define NVME_DSM_MAX_RANGES 256 + +struct nvme_dsm_range { + __le32 cattr; + __le32 nlb; + __le64 slba; +}; + +struct nvme_write_zeroes_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2; + __le64 metadata; + union nvme_data_ptr dptr; + __le64 slba; + __le16 length; + __le16 control; + __le32 dsmgmt; + __le32 reftag; + __le16 apptag; + __le16 appmask; +}; + +/* Features */ + +struct nvme_feat_auto_pst { + __le64 entries[32]; +}; + +enum { + NVME_HOST_MEM_ENABLE = (1 << 0), + NVME_HOST_MEM_RETURN = (1 << 1), +}; + +/* Admin commands */ + +enum nvme_admin_opcode { + nvme_admin_delete_sq = 0x00, + nvme_admin_create_sq = 0x01, + nvme_admin_get_log_page = 0x02, + nvme_admin_delete_cq = 0x04, + nvme_admin_create_cq = 0x05, + nvme_admin_identify = 0x06, + nvme_admin_abort_cmd = 0x08, + nvme_admin_set_features = 0x09, + nvme_admin_get_features = 0x0a, + nvme_admin_async_event = 0x0c, + nvme_admin_ns_mgmt = 0x0d, + nvme_admin_activate_fw = 0x10, + nvme_admin_download_fw = 0x11, + nvme_admin_ns_attach = 0x15, + nvme_admin_keep_alive = 0x18, + nvme_admin_directive_send = 0x19, + nvme_admin_directive_recv = 0x1a, + nvme_admin_dbbuf = 0x7C, + nvme_admin_format_nvm = 0x80, + nvme_admin_security_send = 0x81, + nvme_admin_security_recv = 0x82, + nvme_admin_sanitize_nvm = 0x84, +}; + +enum { + NVME_QUEUE_PHYS_CONTIG = (1 << 0), + NVME_CQ_IRQ_ENABLED = (1 << 1), + NVME_SQ_PRIO_URGENT = (0 << 1), + NVME_SQ_PRIO_HIGH = (1 << 1), + NVME_SQ_PRIO_MEDIUM = (2 << 1), + NVME_SQ_PRIO_LOW = (3 << 1), + NVME_FEAT_ARBITRATION = 0x01, + NVME_FEAT_POWER_MGMT = 0x02, + NVME_FEAT_LBA_RANGE = 0x03, + NVME_FEAT_TEMP_THRESH = 0x04, + NVME_FEAT_ERR_RECOVERY = 0x05, + NVME_FEAT_VOLATILE_WC = 0x06, + NVME_FEAT_NUM_QUEUES = 0x07, + NVME_FEAT_IRQ_COALESCE = 0x08, + NVME_FEAT_IRQ_CONFIG = 0x09, + NVME_FEAT_WRITE_ATOMIC = 0x0a, + NVME_FEAT_ASYNC_EVENT = 0x0b, + NVME_FEAT_AUTO_PST = 0x0c, + NVME_FEAT_HOST_MEM_BUF = 0x0d, + NVME_FEAT_TIMESTAMP = 0x0e, + NVME_FEAT_KATO = 0x0f, + NVME_FEAT_HCTM = 0x10, + NVME_FEAT_NOPSC = 0x11, + NVME_FEAT_RRL = 0x12, + NVME_FEAT_PLM_CONFIG = 0x13, + NVME_FEAT_PLM_WINDOW = 0x14, + NVME_FEAT_SW_PROGRESS = 0x80, + NVME_FEAT_HOST_ID = 0x81, + NVME_FEAT_RESV_MASK = 0x82, + NVME_FEAT_RESV_PERSIST = 0x83, + NVME_FEAT_WRITE_PROTECT = 0x84, + NVME_LOG_ERROR = 0x01, + NVME_LOG_SMART = 0x02, + NVME_LOG_FW_SLOT = 0x03, + NVME_LOG_CHANGED_NS = 0x04, + NVME_LOG_CMD_EFFECTS = 0x05, + NVME_LOG_ANA = 0x0c, + NVME_LOG_DISC = 0x70, + NVME_LOG_RESERVATION = 0x80, + NVME_FWACT_REPL = (0 << 3), + NVME_FWACT_REPL_ACTV = (1 << 3), + NVME_FWACT_ACTV = (2 << 3), +}; + +/* NVMe Namespace Write Protect State */ +enum { + NVME_NS_NO_WRITE_PROTECT = 0, + NVME_NS_WRITE_PROTECT, + NVME_NS_WRITE_PROTECT_POWER_CYCLE, + NVME_NS_WRITE_PROTECT_PERMANENT, +}; + +#define NVME_MAX_CHANGED_NAMESPACES 1024 + +struct nvme_identify { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __u8 cns; + __u8 rsvd3; + __le16 ctrlid; + __u32 rsvd11[5]; +}; + +#define NVME_IDENTIFY_DATA_SIZE 4096 + +struct nvme_features { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __le32 fid; + __le32 dword11; + __le32 dword12; + __le32 dword13; + __le32 dword14; + __le32 dword15; +}; + +struct nvme_host_mem_buf_desc { + __le64 addr; + __le32 size; + __u32 rsvd; +}; + +struct nvme_create_cq { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __u64 rsvd8; + __le16 cqid; + __le16 qsize; + __le16 cq_flags; + __le16 irq_vector; + __u32 rsvd12[4]; +}; + +struct nvme_create_sq { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __u64 rsvd8; + __le16 sqid; + __le16 qsize; + __le16 sq_flags; + __le16 cqid; + __u32 rsvd12[4]; +}; + +struct nvme_delete_queue { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[9]; + __le16 qid; + __u16 rsvd10; + __u32 rsvd11[5]; +}; + +struct nvme_abort_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[9]; + __le16 sqid; + __u16 cid; + __u32 rsvd11[5]; +}; + +struct nvme_download_firmware { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + union nvme_data_ptr dptr; + __le32 numd; + __le32 offset; + __u32 rsvd12[4]; +}; + +struct nvme_format_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[4]; + __le32 cdw10; + __u32 rsvd11[5]; +}; + +struct nvme_get_log_page_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __u8 lid; + __u8 lsp; /* upper 4 bits reserved */ + __le16 numdl; + __le16 numdu; + __u16 rsvd11; + __le32 lpol; + __le32 lpou; + __u32 rsvd14[2]; +}; + +struct nvme_directive_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __le32 numd; + __u8 doper; + __u8 dtype; + __le16 dspec; + __u8 endir; + __u8 tdtype; + __u16 rsvd15; + + __u32 rsvd16[3]; +}; + +/* + * Fabrics subcommands. + */ +enum nvmf_fabrics_opcode { + nvme_fabrics_command = 0x7f, +}; + +enum nvmf_capsule_command { + nvme_fabrics_type_property_set = 0x00, + nvme_fabrics_type_connect = 0x01, + nvme_fabrics_type_property_get = 0x04, +}; + +struct nvmf_common_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[35]; + __u8 ts[24]; +}; + +/* + * The legal cntlid range a NVMe Target will provide. + * Note that cntlid of value 0 is considered illegal in the fabrics world. + * Devices based on earlier specs did not have the subsystem concept; + * therefore, those devices had their cntlid value set to 0 as a result. + */ +#define NVME_CNTLID_MIN 1 +#define NVME_CNTLID_MAX 0xffef +#define NVME_CNTLID_DYNAMIC 0xffff + +#define MAX_DISC_LOGS 255 + +/* Discovery log page entry */ +struct nvmf_disc_rsp_page_entry { + __u8 trtype; + __u8 adrfam; + __u8 subtype; + __u8 treq; + __le16 portid; + __le16 cntlid; + __le16 asqsz; + __u8 resv8[22]; + char trsvcid[NVMF_TRSVCID_SIZE]; + __u8 resv64[192]; + char subnqn[NVMF_NQN_FIELD_LEN]; + char traddr[NVMF_TRADDR_SIZE]; + union tsas { + char common[NVMF_TSAS_SIZE]; + struct rdma { + __u8 qptype; + __u8 prtype; + __u8 cms; + __u8 resv3[5]; + __u16 pkey; + __u8 resv10[246]; + } rdma; + } tsas; +}; + +/* Discovery log page header */ +struct nvmf_disc_rsp_page_hdr { + __le64 genctr; + __le64 numrec; + __le16 recfmt; + __u8 resv14[1006]; + struct nvmf_disc_rsp_page_entry entries[0]; +}; + +struct nvmf_connect_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[19]; + union nvme_data_ptr dptr; + __le16 recfmt; + __le16 qid; + __le16 sqsize; + __u8 cattr; + __u8 resv3; + __le32 kato; + __u8 resv4[12]; +}; + +struct nvmf_connect_data { + uuid_t hostid; + __le16 cntlid; + char resv4[238]; + char subsysnqn[NVMF_NQN_FIELD_LEN]; + char hostnqn[NVMF_NQN_FIELD_LEN]; + char resv5[256]; +}; + +struct nvmf_property_set_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[35]; + __u8 attrib; + __u8 resv3[3]; + __le32 offset; + __le64 value; + __u8 resv4[8]; +}; + +struct nvmf_property_get_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[35]; + __u8 attrib; + __u8 resv3[3]; + __le32 offset; + __u8 resv4[16]; +}; + +struct nvme_dbbuf { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __le64 prp2; + __u32 rsvd12[6]; +}; + +struct streams_directive_params { + __le16 msl; + __le16 nssa; + __le16 nsso; + __u8 rsvd[10]; + __le32 sws; + __le16 sgs; + __le16 nsa; + __le16 nso; + __u8 rsvd2[6]; +}; + +struct nvme_command { + union { + struct nvme_common_command common; + struct nvme_rw_command rw; + struct nvme_identify identify; + struct nvme_features features; + struct nvme_create_cq create_cq; + struct nvme_create_sq create_sq; + struct nvme_delete_queue delete_queue; + struct nvme_download_firmware dlfw; + struct nvme_format_cmd format; + struct nvme_dsm_cmd dsm; + struct nvme_write_zeroes_cmd write_zeroes; + struct nvme_abort_cmd abort; + struct nvme_get_log_page_command get_log_page; + struct nvmf_common_command fabrics; + struct nvmf_connect_command connect; + struct nvmf_property_set_command prop_set; + struct nvmf_property_get_command prop_get; + struct nvme_dbbuf dbbuf; + struct nvme_directive_cmd directive; + }; +}; + +static inline bool nvme_is_write(struct nvme_command *cmd) +{ + /* + * What a mess... + * + * Why can't we simply have a Fabrics In and Fabrics out command? + */ + if (unlikely(cmd->common.opcode == nvme_fabrics_command)) + return cmd->fabrics.fctype & 1; + return cmd->common.opcode & 1; +} + +enum { + /* + * Generic Command Status: + */ + NVME_SC_SUCCESS = 0x0, + NVME_SC_INVALID_OPCODE = 0x1, + NVME_SC_INVALID_FIELD = 0x2, + NVME_SC_CMDID_CONFLICT = 0x3, + NVME_SC_DATA_XFER_ERROR = 0x4, + NVME_SC_POWER_LOSS = 0x5, + NVME_SC_INTERNAL = 0x6, + NVME_SC_ABORT_REQ = 0x7, + NVME_SC_ABORT_QUEUE = 0x8, + NVME_SC_FUSED_FAIL = 0x9, + NVME_SC_FUSED_MISSING = 0xa, + NVME_SC_INVALID_NS = 0xb, + NVME_SC_CMD_SEQ_ERROR = 0xc, + NVME_SC_SGL_INVALID_LAST = 0xd, + NVME_SC_SGL_INVALID_COUNT = 0xe, + NVME_SC_SGL_INVALID_DATA = 0xf, + NVME_SC_SGL_INVALID_METADATA = 0x10, + NVME_SC_SGL_INVALID_TYPE = 0x11, + + NVME_SC_SGL_INVALID_OFFSET = 0x16, + NVME_SC_SGL_INVALID_SUBTYPE = 0x17, + + NVME_SC_NS_WRITE_PROTECTED = 0x20, + + NVME_SC_LBA_RANGE = 0x80, + NVME_SC_CAP_EXCEEDED = 0x81, + NVME_SC_NS_NOT_READY = 0x82, + NVME_SC_RESERVATION_CONFLICT = 0x83, + + /* + * Command Specific Status: + */ + NVME_SC_CQ_INVALID = 0x100, + NVME_SC_QID_INVALID = 0x101, + NVME_SC_QUEUE_SIZE = 0x102, + NVME_SC_ABORT_LIMIT = 0x103, + NVME_SC_ABORT_MISSING = 0x104, + NVME_SC_ASYNC_LIMIT = 0x105, + NVME_SC_FIRMWARE_SLOT = 0x106, + NVME_SC_FIRMWARE_IMAGE = 0x107, + NVME_SC_INVALID_VECTOR = 0x108, + NVME_SC_INVALID_LOG_PAGE = 0x109, + NVME_SC_INVALID_FORMAT = 0x10a, + NVME_SC_FW_NEEDS_CONV_RESET = 0x10b, + NVME_SC_INVALID_QUEUE = 0x10c, + NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d, + NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e, + NVME_SC_FEATURE_NOT_PER_NS = 0x10f, + NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x110, + NVME_SC_FW_NEEDS_RESET = 0x111, + NVME_SC_FW_NEEDS_MAX_TIME = 0x112, + NVME_SC_FW_ACIVATE_PROHIBITED = 0x113, + NVME_SC_OVERLAPPING_RANGE = 0x114, + NVME_SC_NS_INSUFFICENT_CAP = 0x115, + NVME_SC_NS_ID_UNAVAILABLE = 0x116, + NVME_SC_NS_ALREADY_ATTACHED = 0x118, + NVME_SC_NS_IS_PRIVATE = 0x119, + NVME_SC_NS_NOT_ATTACHED = 0x11a, + NVME_SC_THIN_PROV_NOT_SUPP = 0x11b, + NVME_SC_CTRL_LIST_INVALID = 0x11c, + + /* + * I/O Command Set Specific - NVM commands: + */ + NVME_SC_BAD_ATTRIBUTES = 0x180, + NVME_SC_INVALID_PI = 0x181, + NVME_SC_READ_ONLY = 0x182, + NVME_SC_ONCS_NOT_SUPPORTED = 0x183, + + /* + * I/O Command Set Specific - Fabrics commands: + */ + NVME_SC_CONNECT_FORMAT = 0x180, + NVME_SC_CONNECT_CTRL_BUSY = 0x181, + NVME_SC_CONNECT_INVALID_PARAM = 0x182, + NVME_SC_CONNECT_RESTART_DISC = 0x183, + NVME_SC_CONNECT_INVALID_HOST = 0x184, + + NVME_SC_DISCOVERY_RESTART = 0x190, + NVME_SC_AUTH_REQUIRED = 0x191, + + /* + * Media and Data Integrity Errors: + */ + NVME_SC_WRITE_FAULT = 0x280, + NVME_SC_READ_ERROR = 0x281, + NVME_SC_GUARD_CHECK = 0x282, + NVME_SC_APPTAG_CHECK = 0x283, + NVME_SC_REFTAG_CHECK = 0x284, + NVME_SC_COMPARE_FAILED = 0x285, + NVME_SC_ACCESS_DENIED = 0x286, + NVME_SC_UNWRITTEN_BLOCK = 0x287, + + /* + * Path-related Errors: + */ + NVME_SC_ANA_PERSISTENT_LOSS = 0x301, + NVME_SC_ANA_INACCESSIBLE = 0x302, + NVME_SC_ANA_TRANSITION = 0x303, + NVME_SC_HOST_PATH_ERROR = 0x370, + + NVME_SC_DNR = 0x4000, +}; + +struct nvme_completion { + /* + * Used by Admin and Fabrics commands to return data: + */ + union nvme_result { + __le16 u16; + __le32 u32; + __le64 u64; + } result; + __le16 sq_head; /* how much of this queue may be reclaimed */ + __le16 sq_id; /* submission queue that generated this entry */ + __u16 command_id; /* of the command which completed */ + __le16 status; /* did the command fail, and if so, why? */ +}; + +#define NVME_VS(major, minor, tertiary) \ + (((major) << 16) | ((minor) << 8) | (tertiary)) + +#define NVME_MAJOR(ver) ((ver) >> 16) +#define NVME_MINOR(ver) (((ver) >> 8) & 0xff) +#define NVME_TERTIARY(ver) ((ver) & 0xff) + +#endif /* _LINUX_NVME_H */ diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h new file mode 100644 index 000000000..4e85447f7 --- /dev/null +++ b/include/linux/nvmem-consumer.h @@ -0,0 +1,167 @@ +/* + * nvmem framework consumer. + * + * Copyright (C) 2015 Srinivas Kandagatla + * Copyright (C) 2013 Maxime Ripard + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef _LINUX_NVMEM_CONSUMER_H +#define _LINUX_NVMEM_CONSUMER_H + +#include +#include + +struct device; +struct device_node; +/* consumer cookie */ +struct nvmem_cell; +struct nvmem_device; + +struct nvmem_cell_info { + const char *name; + unsigned int offset; + unsigned int bytes; + unsigned int bit_offset; + unsigned int nbits; +}; + +#if IS_ENABLED(CONFIG_NVMEM) + +/* Cell based interface */ +struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *name); +struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *name); +void nvmem_cell_put(struct nvmem_cell *cell); +void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell); +void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len); +int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len); +int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val); + +/* direct nvmem device read/write interface */ +struct nvmem_device *nvmem_device_get(struct device *dev, const char *name); +struct nvmem_device *devm_nvmem_device_get(struct device *dev, + const char *name); +void nvmem_device_put(struct nvmem_device *nvmem); +void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem); +int nvmem_device_read(struct nvmem_device *nvmem, unsigned int offset, + size_t bytes, void *buf); +int nvmem_device_write(struct nvmem_device *nvmem, unsigned int offset, + size_t bytes, void *buf); +ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, + struct nvmem_cell_info *info, void *buf); +int nvmem_device_cell_write(struct nvmem_device *nvmem, + struct nvmem_cell_info *info, void *buf); + +#else + +static inline struct nvmem_cell *nvmem_cell_get(struct device *dev, + const char *name) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, + const char *name) +{ + return ERR_PTR(-ENOSYS); +} + +static inline void devm_nvmem_cell_put(struct device *dev, + struct nvmem_cell *cell) +{ + +} +static inline void nvmem_cell_put(struct nvmem_cell *cell) +{ +} + +static inline void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) +{ + return ERR_PTR(-ENOSYS); +} + +static inline int nvmem_cell_write(struct nvmem_cell *cell, + const char *buf, size_t len) +{ + return -ENOSYS; +} + +static inline int nvmem_cell_read_u32(struct device *dev, + const char *cell_id, u32 *val) +{ + return -ENOSYS; +} + +static inline struct nvmem_device *nvmem_device_get(struct device *dev, + const char *name) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct nvmem_device *devm_nvmem_device_get(struct device *dev, + const char *name) +{ + return ERR_PTR(-ENOSYS); +} + +static inline void nvmem_device_put(struct nvmem_device *nvmem) +{ +} + +static inline void devm_nvmem_device_put(struct device *dev, + struct nvmem_device *nvmem) +{ +} + +static inline ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, + struct nvmem_cell_info *info, + void *buf) +{ + return -ENOSYS; +} + +static inline int nvmem_device_cell_write(struct nvmem_device *nvmem, + struct nvmem_cell_info *info, + void *buf) +{ + return -ENOSYS; +} + +static inline int nvmem_device_read(struct nvmem_device *nvmem, + unsigned int offset, size_t bytes, + void *buf) +{ + return -ENOSYS; +} + +static inline int nvmem_device_write(struct nvmem_device *nvmem, + unsigned int offset, size_t bytes, + void *buf) +{ + return -ENOSYS; +} +#endif /* CONFIG_NVMEM */ + +#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF) +struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, + const char *name); +struct nvmem_device *of_nvmem_device_get(struct device_node *np, + const char *name); +#else +static inline struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, + const char *name) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct nvmem_device *of_nvmem_device_get(struct device_node *np, + const char *name) +{ + return ERR_PTR(-ENOSYS); +} +#endif /* CONFIG_NVMEM && CONFIG_OF */ + +#endif /* ifndef _LINUX_NVMEM_CONSUMER_H */ diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h new file mode 100644 index 000000000..24def6ad0 --- /dev/null +++ b/include/linux/nvmem-provider.h @@ -0,0 +1,116 @@ +/* + * nvmem framework provider. + * + * Copyright (C) 2015 Srinivas Kandagatla + * Copyright (C) 2013 Maxime Ripard + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef _LINUX_NVMEM_PROVIDER_H +#define _LINUX_NVMEM_PROVIDER_H + +#include +#include + +struct nvmem_device; +struct nvmem_cell_info; +typedef int (*nvmem_reg_read_t)(void *priv, unsigned int offset, + void *val, size_t bytes); +typedef int (*nvmem_reg_write_t)(void *priv, unsigned int offset, + void *val, size_t bytes); + +/** + * struct nvmem_config - NVMEM device configuration + * + * @dev: Parent device. + * @name: Optional name. + * @id: Optional device ID used in full name. Ignored if name is NULL. + * @owner: Pointer to exporter module. Used for refcounting. + * @cells: Optional array of pre-defined NVMEM cells. + * @ncells: Number of elements in cells. + * @read_only: Device is read-only. + * @root_only: Device is accessibly to root only. + * @reg_read: Callback to read data. + * @reg_write: Callback to write data. + * @size: Device size. + * @word_size: Minimum read/write access granularity. + * @stride: Minimum read/write access stride. + * @priv: User context passed to read/write callbacks. + * + * Note: A default "nvmem" name will be assigned to the device if + * no name is specified in its configuration. In such case "" is + * generated with ida_simple_get() and provided id field is ignored. + * + * Note: Specifying name and setting id to -1 implies a unique device + * whose name is provided as-is (kept unaltered). + */ +struct nvmem_config { + struct device *dev; + const char *name; + int id; + struct module *owner; + const struct nvmem_cell_info *cells; + int ncells; + bool read_only; + bool root_only; + nvmem_reg_read_t reg_read; + nvmem_reg_write_t reg_write; + int size; + int word_size; + int stride; + void *priv; + /* To be only used by old driver/misc/eeprom drivers */ + bool compat; + struct device *base_dev; +}; + +#if IS_ENABLED(CONFIG_NVMEM) + +struct nvmem_device *nvmem_register(const struct nvmem_config *cfg); +int nvmem_unregister(struct nvmem_device *nvmem); + +struct nvmem_device *devm_nvmem_register(struct device *dev, + const struct nvmem_config *cfg); + +int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem); + +int nvmem_add_cells(struct nvmem_device *nvmem, + const struct nvmem_cell_info *info, + int ncells); +#else + +static inline struct nvmem_device *nvmem_register(const struct nvmem_config *c) +{ + return ERR_PTR(-ENOSYS); +} + +static inline int nvmem_unregister(struct nvmem_device *nvmem) +{ + return -ENOSYS; +} + +static inline struct nvmem_device * +devm_nvmem_register(struct device *dev, const struct nvmem_config *c) +{ + return nvmem_register(c); +} + +static inline int +devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem) +{ + return nvmem_unregister(nvmem); + +} + +static inline int nvmem_add_cells(struct nvmem_device *nvmem, + const struct nvmem_cell_info *info, + int ncells) +{ + return -ENOSYS; +} + +#endif /* CONFIG_NVMEM */ +#endif /* ifndef _LINUX_NVMEM_PROVIDER_H */ diff --git a/include/linux/nvram.h b/include/linux/nvram.h new file mode 100644 index 000000000..28bfb9ab9 --- /dev/null +++ b/include/linux/nvram.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_NVRAM_H +#define _LINUX_NVRAM_H + +#include + +/* __foo is foo without grabbing the rtc_lock - get it yourself */ +extern unsigned char __nvram_read_byte(int i); +extern unsigned char nvram_read_byte(int i); +extern void __nvram_write_byte(unsigned char c, int i); +extern void nvram_write_byte(unsigned char c, int i); +extern int __nvram_check_checksum(void); +extern int nvram_check_checksum(void); +#endif /* _LINUX_NVRAM_H */ diff --git a/include/linux/of.h b/include/linux/of.h new file mode 100644 index 000000000..6429f0034 --- /dev/null +++ b/include/linux/of.h @@ -0,0 +1,1457 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#ifndef _LINUX_OF_H +#define _LINUX_OF_H +/* + * Definitions for talking to the Open Firmware PROM on + * Power Macintosh and other computers. + * + * Copyright (C) 1996-2005 Paul Mackerras. + * + * Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp. + * Updates for SPARC64 by David S. Miller + * Derived from PowerPC and Sparc prom.h files by Stephen Rothwell, IBM Corp. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +typedef u32 phandle; +typedef u32 ihandle; + +struct property { + char *name; + int length; + void *value; + struct property *next; +#if defined(CONFIG_OF_DYNAMIC) || defined(CONFIG_SPARC) + unsigned long _flags; +#endif +#if defined(CONFIG_OF_PROMTREE) + unsigned int unique_id; +#endif +#if defined(CONFIG_OF_KOBJ) + struct bin_attribute attr; +#endif +}; + +#if defined(CONFIG_SPARC) +struct of_irq_controller; +#endif + +struct device_node { + const char *name; + const char *type; + phandle phandle; + const char *full_name; + struct fwnode_handle fwnode; + + struct property *properties; + struct property *deadprops; /* removed properties */ + struct device_node *parent; + struct device_node *child; + struct device_node *sibling; +#if defined(CONFIG_OF_KOBJ) + struct kobject kobj; +#endif + unsigned long _flags; + void *data; +#if defined(CONFIG_SPARC) + const char *path_component_name; + unsigned int unique_id; + struct of_irq_controller *irq_trans; +#endif +}; + +#define MAX_PHANDLE_ARGS 16 +struct of_phandle_args { + struct device_node *np; + int args_count; + uint32_t args[MAX_PHANDLE_ARGS]; +}; + +struct of_phandle_iterator { + /* Common iterator information */ + const char *cells_name; + int cell_count; + const struct device_node *parent; + + /* List size information */ + const __be32 *list_end; + const __be32 *phandle_end; + + /* Current position state */ + const __be32 *cur; + uint32_t cur_count; + phandle phandle; + struct device_node *node; +}; + +struct of_reconfig_data { + struct device_node *dn; + struct property *prop; + struct property *old_prop; +}; + +/* initialize a node */ +extern struct kobj_type of_node_ktype; +extern const struct fwnode_operations of_fwnode_ops; +static inline void of_node_init(struct device_node *node) +{ +#if defined(CONFIG_OF_KOBJ) + kobject_init(&node->kobj, &of_node_ktype); +#endif + node->fwnode.ops = &of_fwnode_ops; +} + +#if defined(CONFIG_OF_KOBJ) +#define of_node_kobj(n) (&(n)->kobj) +#else +#define of_node_kobj(n) NULL +#endif + +#ifdef CONFIG_OF_DYNAMIC +extern struct device_node *of_node_get(struct device_node *node); +extern void of_node_put(struct device_node *node); +#else /* CONFIG_OF_DYNAMIC */ +/* Dummy ref counting routines - to be implemented later */ +static inline struct device_node *of_node_get(struct device_node *node) +{ + return node; +} +static inline void of_node_put(struct device_node *node) { } +#endif /* !CONFIG_OF_DYNAMIC */ + +/* Pointer for first entry in chain of all nodes. */ +extern struct device_node *of_root; +extern struct device_node *of_chosen; +extern struct device_node *of_aliases; +extern struct device_node *of_stdout; +extern raw_spinlock_t devtree_lock; + +/* + * struct device_node flag descriptions + * (need to be visible even when !CONFIG_OF) + */ +#define OF_DYNAMIC 1 /* (and properties) allocated via kmalloc */ +#define OF_DETACHED 2 /* detached from the device tree */ +#define OF_POPULATED 3 /* device already created */ +#define OF_POPULATED_BUS 4 /* platform bus created for children */ +#define OF_OVERLAY 5 /* allocated for an overlay */ +#define OF_OVERLAY_FREE_CSET 6 /* in overlay cset being freed */ + +#define OF_BAD_ADDR ((u64)-1) + +#ifdef CONFIG_OF +void of_core_init(void); + +static inline bool is_of_node(const struct fwnode_handle *fwnode) +{ + return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &of_fwnode_ops; +} + +#define to_of_node(__fwnode) \ + ({ \ + typeof(__fwnode) __to_of_node_fwnode = (__fwnode); \ + \ + is_of_node(__to_of_node_fwnode) ? \ + container_of(__to_of_node_fwnode, \ + struct device_node, fwnode) : \ + NULL; \ + }) + +#define of_fwnode_handle(node) \ + ({ \ + typeof(node) __of_fwnode_handle_node = (node); \ + \ + __of_fwnode_handle_node ? \ + &__of_fwnode_handle_node->fwnode : NULL; \ + }) + +static inline bool of_have_populated_dt(void) +{ + return of_root != NULL; +} + +static inline bool of_node_is_root(const struct device_node *node) +{ + return node && (node->parent == NULL); +} + +static inline int of_node_check_flag(struct device_node *n, unsigned long flag) +{ + return test_bit(flag, &n->_flags); +} + +static inline int of_node_test_and_set_flag(struct device_node *n, + unsigned long flag) +{ + return test_and_set_bit(flag, &n->_flags); +} + +static inline void of_node_set_flag(struct device_node *n, unsigned long flag) +{ + set_bit(flag, &n->_flags); +} + +static inline void of_node_clear_flag(struct device_node *n, unsigned long flag) +{ + clear_bit(flag, &n->_flags); +} + +#if defined(CONFIG_OF_DYNAMIC) || defined(CONFIG_SPARC) +static inline int of_property_check_flag(struct property *p, unsigned long flag) +{ + return test_bit(flag, &p->_flags); +} + +static inline void of_property_set_flag(struct property *p, unsigned long flag) +{ + set_bit(flag, &p->_flags); +} + +static inline void of_property_clear_flag(struct property *p, unsigned long flag) +{ + clear_bit(flag, &p->_flags); +} +#endif + +extern struct device_node *__of_find_all_nodes(struct device_node *prev); +extern struct device_node *of_find_all_nodes(struct device_node *prev); + +/* + * OF address retrieval & translation + */ + +/* Helper to read a big number; size is in cells (not bytes) */ +static inline u64 of_read_number(const __be32 *cell, int size) +{ + u64 r = 0; + for (; size--; cell++) + r = (r << 32) | be32_to_cpu(*cell); + return r; +} + +/* Like of_read_number, but we want an unsigned long result */ +static inline unsigned long of_read_ulong(const __be32 *cell, int size) +{ + /* toss away upper bits if unsigned long is smaller than u64 */ + return of_read_number(cell, size); +} + +#if defined(CONFIG_SPARC) +#include +#endif + +/* Default #address and #size cells. Allow arch asm/prom.h to override */ +#if !defined(OF_ROOT_NODE_ADDR_CELLS_DEFAULT) +#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 1 +#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1 +#endif + +#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) +#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) + +extern bool of_node_name_eq(const struct device_node *np, const char *name); +extern bool of_node_name_prefix(const struct device_node *np, const char *prefix); + +static inline const char *of_node_full_name(const struct device_node *np) +{ + return np ? np->full_name : ""; +} + +#define for_each_of_allnodes_from(from, dn) \ + for (dn = __of_find_all_nodes(from); dn; dn = __of_find_all_nodes(dn)) +#define for_each_of_allnodes(dn) for_each_of_allnodes_from(NULL, dn) +extern struct device_node *of_find_node_by_name(struct device_node *from, + const char *name); +extern struct device_node *of_find_node_by_type(struct device_node *from, + const char *type); +extern struct device_node *of_find_compatible_node(struct device_node *from, + const char *type, const char *compat); +extern struct device_node *of_find_matching_node_and_match( + struct device_node *from, + const struct of_device_id *matches, + const struct of_device_id **match); + +extern struct device_node *of_find_node_opts_by_path(const char *path, + const char **opts); +static inline struct device_node *of_find_node_by_path(const char *path) +{ + return of_find_node_opts_by_path(path, NULL); +} + +extern struct device_node *of_find_node_by_phandle(phandle handle); +extern struct device_node *of_get_parent(const struct device_node *node); +extern struct device_node *of_get_next_parent(struct device_node *node); +extern struct device_node *of_get_next_child(const struct device_node *node, + struct device_node *prev); +extern struct device_node *of_get_next_available_child( + const struct device_node *node, struct device_node *prev); + +extern struct device_node *of_get_compatible_child(const struct device_node *parent, + const char *compatible); +extern struct device_node *of_get_child_by_name(const struct device_node *node, + const char *name); + +/* cache lookup */ +extern struct device_node *of_find_next_cache_node(const struct device_node *); +extern int of_find_last_cache_level(unsigned int cpu); +extern struct device_node *of_find_node_with_property( + struct device_node *from, const char *prop_name); + +extern struct property *of_find_property(const struct device_node *np, + const char *name, + int *lenp); +extern int of_property_count_elems_of_size(const struct device_node *np, + const char *propname, int elem_size); +extern int of_property_read_u32_index(const struct device_node *np, + const char *propname, + u32 index, u32 *out_value); +extern int of_property_read_u64_index(const struct device_node *np, + const char *propname, + u32 index, u64 *out_value); +extern int of_property_read_variable_u8_array(const struct device_node *np, + const char *propname, u8 *out_values, + size_t sz_min, size_t sz_max); +extern int of_property_read_variable_u16_array(const struct device_node *np, + const char *propname, u16 *out_values, + size_t sz_min, size_t sz_max); +extern int of_property_read_variable_u32_array(const struct device_node *np, + const char *propname, + u32 *out_values, + size_t sz_min, + size_t sz_max); +extern int of_property_read_u64(const struct device_node *np, + const char *propname, u64 *out_value); +extern int of_property_read_variable_u64_array(const struct device_node *np, + const char *propname, + u64 *out_values, + size_t sz_min, + size_t sz_max); + +extern int of_property_read_string(const struct device_node *np, + const char *propname, + const char **out_string); +extern int of_property_match_string(const struct device_node *np, + const char *propname, + const char *string); +extern int of_property_read_string_helper(const struct device_node *np, + const char *propname, + const char **out_strs, size_t sz, int index); +extern int of_device_is_compatible(const struct device_node *device, + const char *); +extern int of_device_compatible_match(struct device_node *device, + const char *const *compat); +extern bool of_device_is_available(const struct device_node *device); +extern bool of_device_is_big_endian(const struct device_node *device); +extern const void *of_get_property(const struct device_node *node, + const char *name, + int *lenp); +extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread); +#define for_each_property_of_node(dn, pp) \ + for (pp = dn->properties; pp != NULL; pp = pp->next) + +extern int of_n_addr_cells(struct device_node *np); +extern int of_n_size_cells(struct device_node *np); +extern const struct of_device_id *of_match_node( + const struct of_device_id *matches, const struct device_node *node); +extern int of_modalias_node(struct device_node *node, char *modalias, int len); +extern void of_print_phandle_args(const char *msg, const struct of_phandle_args *args); +extern struct device_node *of_parse_phandle(const struct device_node *np, + const char *phandle_name, + int index); +extern int of_parse_phandle_with_args(const struct device_node *np, + const char *list_name, const char *cells_name, int index, + struct of_phandle_args *out_args); +extern int of_parse_phandle_with_args_map(const struct device_node *np, + const char *list_name, const char *stem_name, int index, + struct of_phandle_args *out_args); +extern int of_parse_phandle_with_fixed_args(const struct device_node *np, + const char *list_name, int cells_count, int index, + struct of_phandle_args *out_args); +extern int of_count_phandle_with_args(const struct device_node *np, + const char *list_name, const char *cells_name); + +/* phandle iterator functions */ +extern int of_phandle_iterator_init(struct of_phandle_iterator *it, + const struct device_node *np, + const char *list_name, + const char *cells_name, + int cell_count); + +extern int of_phandle_iterator_next(struct of_phandle_iterator *it); +extern int of_phandle_iterator_args(struct of_phandle_iterator *it, + uint32_t *args, + int size); + +extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)); +extern int of_alias_get_id(struct device_node *np, const char *stem); +extern int of_alias_get_highest_id(const char *stem); + +extern int of_machine_is_compatible(const char *compat); + +extern int of_add_property(struct device_node *np, struct property *prop); +extern int of_remove_property(struct device_node *np, struct property *prop); +extern int of_update_property(struct device_node *np, struct property *newprop); + +/* For updating the device tree at runtime */ +#define OF_RECONFIG_ATTACH_NODE 0x0001 +#define OF_RECONFIG_DETACH_NODE 0x0002 +#define OF_RECONFIG_ADD_PROPERTY 0x0003 +#define OF_RECONFIG_REMOVE_PROPERTY 0x0004 +#define OF_RECONFIG_UPDATE_PROPERTY 0x0005 + +extern int of_attach_node(struct device_node *); +extern int of_detach_node(struct device_node *); + +#define of_match_ptr(_ptr) (_ptr) + +/** + * of_property_read_u8_array - Find and read an array of u8 from a property. + * + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * @out_values: pointer to return value, modified only if return value is 0. + * @sz: number of array elements to read + * + * Search for a property in a device node and read 8-bit value(s) from + * it. Returns 0 on success, -EINVAL if the property does not exist, + * -ENODATA if property does not have a value, and -EOVERFLOW if the + * property data isn't large enough. + * + * dts entry of array should be like: + * property = /bits/ 8 <0x50 0x60 0x70>; + * + * The out_values is modified only if a valid u8 value can be decoded. + */ +static inline int of_property_read_u8_array(const struct device_node *np, + const char *propname, + u8 *out_values, size_t sz) +{ + int ret = of_property_read_variable_u8_array(np, propname, out_values, + sz, 0); + if (ret >= 0) + return 0; + else + return ret; +} + +/** + * of_property_read_u16_array - Find and read an array of u16 from a property. + * + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * @out_values: pointer to return value, modified only if return value is 0. + * @sz: number of array elements to read + * + * Search for a property in a device node and read 16-bit value(s) from + * it. Returns 0 on success, -EINVAL if the property does not exist, + * -ENODATA if property does not have a value, and -EOVERFLOW if the + * property data isn't large enough. + * + * dts entry of array should be like: + * property = /bits/ 16 <0x5000 0x6000 0x7000>; + * + * The out_values is modified only if a valid u16 value can be decoded. + */ +static inline int of_property_read_u16_array(const struct device_node *np, + const char *propname, + u16 *out_values, size_t sz) +{ + int ret = of_property_read_variable_u16_array(np, propname, out_values, + sz, 0); + if (ret >= 0) + return 0; + else + return ret; +} + +/** + * of_property_read_u32_array - Find and read an array of 32 bit integers + * from a property. + * + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * @out_values: pointer to return value, modified only if return value is 0. + * @sz: number of array elements to read + * + * Search for a property in a device node and read 32-bit value(s) from + * it. Returns 0 on success, -EINVAL if the property does not exist, + * -ENODATA if property does not have a value, and -EOVERFLOW if the + * property data isn't large enough. + * + * The out_values is modified only if a valid u32 value can be decoded. + */ +static inline int of_property_read_u32_array(const struct device_node *np, + const char *propname, + u32 *out_values, size_t sz) +{ + int ret = of_property_read_variable_u32_array(np, propname, out_values, + sz, 0); + if (ret >= 0) + return 0; + else + return ret; +} + +/** + * of_property_read_u64_array - Find and read an array of 64 bit integers + * from a property. + * + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * @out_values: pointer to return value, modified only if return value is 0. + * @sz: number of array elements to read + * + * Search for a property in a device node and read 64-bit value(s) from + * it. Returns 0 on success, -EINVAL if the property does not exist, + * -ENODATA if property does not have a value, and -EOVERFLOW if the + * property data isn't large enough. + * + * The out_values is modified only if a valid u64 value can be decoded. + */ +static inline int of_property_read_u64_array(const struct device_node *np, + const char *propname, + u64 *out_values, size_t sz) +{ + int ret = of_property_read_variable_u64_array(np, propname, out_values, + sz, 0); + if (ret >= 0) + return 0; + else + return ret; +} + +/* + * struct property *prop; + * const __be32 *p; + * u32 u; + * + * of_property_for_each_u32(np, "propname", prop, p, u) + * printk("U32 value: %x\n", u); + */ +const __be32 *of_prop_next_u32(struct property *prop, const __be32 *cur, + u32 *pu); +/* + * struct property *prop; + * const char *s; + * + * of_property_for_each_string(np, "propname", prop, s) + * printk("String value: %s\n", s); + */ +const char *of_prop_next_string(struct property *prop, const char *cur); + +bool of_console_check(struct device_node *dn, char *name, int index); + +extern int of_cpu_node_to_id(struct device_node *np); + +#else /* CONFIG_OF */ + +static inline void of_core_init(void) +{ +} + +static inline bool is_of_node(const struct fwnode_handle *fwnode) +{ + return false; +} + +static inline struct device_node *to_of_node(const struct fwnode_handle *fwnode) +{ + return NULL; +} + +static inline bool of_node_name_eq(const struct device_node *np, const char *name) +{ + return false; +} + +static inline bool of_node_name_prefix(const struct device_node *np, const char *prefix) +{ + return false; +} + +static inline const char* of_node_full_name(const struct device_node *np) +{ + return ""; +} + +static inline struct device_node *of_find_node_by_name(struct device_node *from, + const char *name) +{ + return NULL; +} + +static inline struct device_node *of_find_node_by_type(struct device_node *from, + const char *type) +{ + return NULL; +} + +static inline struct device_node *of_find_matching_node_and_match( + struct device_node *from, + const struct of_device_id *matches, + const struct of_device_id **match) +{ + return NULL; +} + +static inline struct device_node *of_find_node_by_path(const char *path) +{ + return NULL; +} + +static inline struct device_node *of_find_node_opts_by_path(const char *path, + const char **opts) +{ + return NULL; +} + +static inline struct device_node *of_find_node_by_phandle(phandle handle) +{ + return NULL; +} + +static inline struct device_node *of_get_parent(const struct device_node *node) +{ + return NULL; +} + +static inline struct device_node *of_get_next_child( + const struct device_node *node, struct device_node *prev) +{ + return NULL; +} + +static inline struct device_node *of_get_next_available_child( + const struct device_node *node, struct device_node *prev) +{ + return NULL; +} + +static inline struct device_node *of_find_node_with_property( + struct device_node *from, const char *prop_name) +{ + return NULL; +} + +#define of_fwnode_handle(node) NULL + +static inline bool of_have_populated_dt(void) +{ + return false; +} + +static inline struct device_node *of_get_compatible_child(const struct device_node *parent, + const char *compatible) +{ + return NULL; +} + +static inline struct device_node *of_get_child_by_name( + const struct device_node *node, + const char *name) +{ + return NULL; +} + +static inline int of_device_is_compatible(const struct device_node *device, + const char *name) +{ + return 0; +} + +static inline int of_device_compatible_match(struct device_node *device, + const char *const *compat) +{ + return 0; +} + +static inline bool of_device_is_available(const struct device_node *device) +{ + return false; +} + +static inline bool of_device_is_big_endian(const struct device_node *device) +{ + return false; +} + +static inline struct property *of_find_property(const struct device_node *np, + const char *name, + int *lenp) +{ + return NULL; +} + +static inline struct device_node *of_find_compatible_node( + struct device_node *from, + const char *type, + const char *compat) +{ + return NULL; +} + +static inline int of_property_count_elems_of_size(const struct device_node *np, + const char *propname, int elem_size) +{ + return -ENOSYS; +} + +static inline int of_property_read_u8_array(const struct device_node *np, + const char *propname, u8 *out_values, size_t sz) +{ + return -ENOSYS; +} + +static inline int of_property_read_u16_array(const struct device_node *np, + const char *propname, u16 *out_values, size_t sz) +{ + return -ENOSYS; +} + +static inline int of_property_read_u32_array(const struct device_node *np, + const char *propname, + u32 *out_values, size_t sz) +{ + return -ENOSYS; +} + +static inline int of_property_read_u64_array(const struct device_node *np, + const char *propname, + u64 *out_values, size_t sz) +{ + return -ENOSYS; +} + +static inline int of_property_read_u32_index(const struct device_node *np, + const char *propname, u32 index, u32 *out_value) +{ + return -ENOSYS; +} + +static inline int of_property_read_u64_index(const struct device_node *np, + const char *propname, u32 index, u64 *out_value) +{ + return -ENOSYS; +} + +static inline const void *of_get_property(const struct device_node *node, + const char *name, + int *lenp) +{ + return NULL; +} + +static inline struct device_node *of_get_cpu_node(int cpu, + unsigned int *thread) +{ + return NULL; +} + +static inline int of_n_addr_cells(struct device_node *np) +{ + return 0; + +} +static inline int of_n_size_cells(struct device_node *np) +{ + return 0; +} + +static inline int of_property_read_variable_u8_array(const struct device_node *np, + const char *propname, u8 *out_values, + size_t sz_min, size_t sz_max) +{ + return -ENOSYS; +} + +static inline int of_property_read_variable_u16_array(const struct device_node *np, + const char *propname, u16 *out_values, + size_t sz_min, size_t sz_max) +{ + return -ENOSYS; +} + +static inline int of_property_read_variable_u32_array(const struct device_node *np, + const char *propname, + u32 *out_values, + size_t sz_min, + size_t sz_max) +{ + return -ENOSYS; +} + +static inline int of_property_read_u64(const struct device_node *np, + const char *propname, u64 *out_value) +{ + return -ENOSYS; +} + +static inline int of_property_read_variable_u64_array(const struct device_node *np, + const char *propname, + u64 *out_values, + size_t sz_min, + size_t sz_max) +{ + return -ENOSYS; +} + +static inline int of_property_read_string(const struct device_node *np, + const char *propname, + const char **out_string) +{ + return -ENOSYS; +} + +static inline int of_property_match_string(const struct device_node *np, + const char *propname, + const char *string) +{ + return -ENOSYS; +} + +static inline int of_property_read_string_helper(const struct device_node *np, + const char *propname, + const char **out_strs, size_t sz, int index) +{ + return -ENOSYS; +} + +static inline struct device_node *of_parse_phandle(const struct device_node *np, + const char *phandle_name, + int index) +{ + return NULL; +} + +static inline int of_parse_phandle_with_args(const struct device_node *np, + const char *list_name, + const char *cells_name, + int index, + struct of_phandle_args *out_args) +{ + return -ENOSYS; +} + +static inline int of_parse_phandle_with_args_map(const struct device_node *np, + const char *list_name, + const char *stem_name, + int index, + struct of_phandle_args *out_args) +{ + return -ENOSYS; +} + +static inline int of_parse_phandle_with_fixed_args(const struct device_node *np, + const char *list_name, int cells_count, int index, + struct of_phandle_args *out_args) +{ + return -ENOSYS; +} + +static inline int of_count_phandle_with_args(struct device_node *np, + const char *list_name, + const char *cells_name) +{ + return -ENOSYS; +} + +static inline int of_phandle_iterator_init(struct of_phandle_iterator *it, + const struct device_node *np, + const char *list_name, + const char *cells_name, + int cell_count) +{ + return -ENOSYS; +} + +static inline int of_phandle_iterator_next(struct of_phandle_iterator *it) +{ + return -ENOSYS; +} + +static inline int of_phandle_iterator_args(struct of_phandle_iterator *it, + uint32_t *args, + int size) +{ + return 0; +} + +static inline int of_alias_get_id(struct device_node *np, const char *stem) +{ + return -ENOSYS; +} + +static inline int of_alias_get_highest_id(const char *stem) +{ + return -ENOSYS; +} + +static inline int of_machine_is_compatible(const char *compat) +{ + return 0; +} + +static inline bool of_console_check(const struct device_node *dn, const char *name, int index) +{ + return false; +} + +static inline const __be32 *of_prop_next_u32(struct property *prop, + const __be32 *cur, u32 *pu) +{ + return NULL; +} + +static inline const char *of_prop_next_string(struct property *prop, + const char *cur) +{ + return NULL; +} + +static inline int of_node_check_flag(struct device_node *n, unsigned long flag) +{ + return 0; +} + +static inline int of_node_test_and_set_flag(struct device_node *n, + unsigned long flag) +{ + return 0; +} + +static inline void of_node_set_flag(struct device_node *n, unsigned long flag) +{ +} + +static inline void of_node_clear_flag(struct device_node *n, unsigned long flag) +{ +} + +static inline int of_property_check_flag(struct property *p, unsigned long flag) +{ + return 0; +} + +static inline void of_property_set_flag(struct property *p, unsigned long flag) +{ +} + +static inline void of_property_clear_flag(struct property *p, unsigned long flag) +{ +} + +static inline int of_cpu_node_to_id(struct device_node *np) +{ + return -ENODEV; +} + +#define of_match_ptr(_ptr) NULL +#define of_match_node(_matches, _node) NULL +#endif /* CONFIG_OF */ + +/* Default string compare functions, Allow arch asm/prom.h to override */ +#if !defined(of_compat_cmp) +#define of_compat_cmp(s1, s2, l) strcasecmp((s1), (s2)) +#define of_prop_cmp(s1, s2) strcmp((s1), (s2)) +#define of_node_cmp(s1, s2) strcasecmp((s1), (s2)) +#endif + +static inline int of_prop_val_eq(struct property *p1, struct property *p2) +{ + return p1->length == p2->length && + !memcmp(p1->value, p2->value, (size_t)p1->length); +} + +#if defined(CONFIG_OF) && defined(CONFIG_NUMA) +extern int of_node_to_nid(struct device_node *np); +#else +static inline int of_node_to_nid(struct device_node *device) +{ + return NUMA_NO_NODE; +} +#endif + +#ifdef CONFIG_OF_NUMA +extern int of_numa_init(void); +#else +static inline int of_numa_init(void) +{ + return -ENOSYS; +} +#endif + +static inline struct device_node *of_find_matching_node( + struct device_node *from, + const struct of_device_id *matches) +{ + return of_find_matching_node_and_match(from, matches, NULL); +} + +static inline const char *of_node_get_device_type(const struct device_node *np) +{ + return of_get_property(np, "device_type", NULL); +} + +static inline bool of_node_is_type(const struct device_node *np, const char *type) +{ + const char *match = of_node_get_device_type(np); + + return np && match && type && !strcmp(match, type); +} + +/** + * of_property_count_u8_elems - Count the number of u8 elements in a property + * + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * + * Search for a property in a device node and count the number of u8 elements + * in it. Returns number of elements on sucess, -EINVAL if the property does + * not exist or its length does not match a multiple of u8 and -ENODATA if the + * property does not have a value. + */ +static inline int of_property_count_u8_elems(const struct device_node *np, + const char *propname) +{ + return of_property_count_elems_of_size(np, propname, sizeof(u8)); +} + +/** + * of_property_count_u16_elems - Count the number of u16 elements in a property + * + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * + * Search for a property in a device node and count the number of u16 elements + * in it. Returns number of elements on sucess, -EINVAL if the property does + * not exist or its length does not match a multiple of u16 and -ENODATA if the + * property does not have a value. + */ +static inline int of_property_count_u16_elems(const struct device_node *np, + const char *propname) +{ + return of_property_count_elems_of_size(np, propname, sizeof(u16)); +} + +/** + * of_property_count_u32_elems - Count the number of u32 elements in a property + * + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * + * Search for a property in a device node and count the number of u32 elements + * in it. Returns number of elements on sucess, -EINVAL if the property does + * not exist or its length does not match a multiple of u32 and -ENODATA if the + * property does not have a value. + */ +static inline int of_property_count_u32_elems(const struct device_node *np, + const char *propname) +{ + return of_property_count_elems_of_size(np, propname, sizeof(u32)); +} + +/** + * of_property_count_u64_elems - Count the number of u64 elements in a property + * + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * + * Search for a property in a device node and count the number of u64 elements + * in it. Returns number of elements on sucess, -EINVAL if the property does + * not exist or its length does not match a multiple of u64 and -ENODATA if the + * property does not have a value. + */ +static inline int of_property_count_u64_elems(const struct device_node *np, + const char *propname) +{ + return of_property_count_elems_of_size(np, propname, sizeof(u64)); +} + +/** + * of_property_read_string_array() - Read an array of strings from a multiple + * strings property. + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * @out_strs: output array of string pointers. + * @sz: number of array elements to read. + * + * Search for a property in a device tree node and retrieve a list of + * terminated string values (pointer to data, not a copy) in that property. + * + * If @out_strs is NULL, the number of strings in the property is returned. + */ +static inline int of_property_read_string_array(const struct device_node *np, + const char *propname, const char **out_strs, + size_t sz) +{ + return of_property_read_string_helper(np, propname, out_strs, sz, 0); +} + +/** + * of_property_count_strings() - Find and return the number of strings from a + * multiple strings property. + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * + * Search for a property in a device tree node and retrieve the number of null + * terminated string contain in it. Returns the number of strings on + * success, -EINVAL if the property does not exist, -ENODATA if property + * does not have a value, and -EILSEQ if the string is not null-terminated + * within the length of the property data. + */ +static inline int of_property_count_strings(const struct device_node *np, + const char *propname) +{ + return of_property_read_string_helper(np, propname, NULL, 0, 0); +} + +/** + * of_property_read_string_index() - Find and read a string from a multiple + * strings property. + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * @index: index of the string in the list of strings + * @out_string: pointer to null terminated return string, modified only if + * return value is 0. + * + * Search for a property in a device tree node and retrieve a null + * terminated string value (pointer to data, not a copy) in the list of strings + * contained in that property. + * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if + * property does not have a value, and -EILSEQ if the string is not + * null-terminated within the length of the property data. + * + * The out_string pointer is modified only if a valid string can be decoded. + */ +static inline int of_property_read_string_index(const struct device_node *np, + const char *propname, + int index, const char **output) +{ + int rc = of_property_read_string_helper(np, propname, output, 1, index); + return rc < 0 ? rc : 0; +} + +/** + * of_property_read_bool - Findfrom a property + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * + * Search for a property in a device node. + * Returns true if the property exists false otherwise. + */ +static inline bool of_property_read_bool(const struct device_node *np, + const char *propname) +{ + struct property *prop = of_find_property(np, propname, NULL); + + return prop ? true : false; +} + +static inline int of_property_read_u8(const struct device_node *np, + const char *propname, + u8 *out_value) +{ + return of_property_read_u8_array(np, propname, out_value, 1); +} + +static inline int of_property_read_u16(const struct device_node *np, + const char *propname, + u16 *out_value) +{ + return of_property_read_u16_array(np, propname, out_value, 1); +} + +static inline int of_property_read_u32(const struct device_node *np, + const char *propname, + u32 *out_value) +{ + return of_property_read_u32_array(np, propname, out_value, 1); +} + +static inline int of_property_read_s32(const struct device_node *np, + const char *propname, + s32 *out_value) +{ + return of_property_read_u32(np, propname, (u32*) out_value); +} + +#define of_for_each_phandle(it, err, np, ln, cn, cc) \ + for (of_phandle_iterator_init((it), (np), (ln), (cn), (cc)), \ + err = of_phandle_iterator_next(it); \ + err == 0; \ + err = of_phandle_iterator_next(it)) + +#define of_property_for_each_u32(np, propname, prop, p, u) \ + for (prop = of_find_property(np, propname, NULL), \ + p = of_prop_next_u32(prop, NULL, &u); \ + p; \ + p = of_prop_next_u32(prop, p, &u)) + +#define of_property_for_each_string(np, propname, prop, s) \ + for (prop = of_find_property(np, propname, NULL), \ + s = of_prop_next_string(prop, NULL); \ + s; \ + s = of_prop_next_string(prop, s)) + +#define for_each_node_by_name(dn, name) \ + for (dn = of_find_node_by_name(NULL, name); dn; \ + dn = of_find_node_by_name(dn, name)) +#define for_each_node_by_type(dn, type) \ + for (dn = of_find_node_by_type(NULL, type); dn; \ + dn = of_find_node_by_type(dn, type)) +#define for_each_compatible_node(dn, type, compatible) \ + for (dn = of_find_compatible_node(NULL, type, compatible); dn; \ + dn = of_find_compatible_node(dn, type, compatible)) +#define for_each_matching_node(dn, matches) \ + for (dn = of_find_matching_node(NULL, matches); dn; \ + dn = of_find_matching_node(dn, matches)) +#define for_each_matching_node_and_match(dn, matches, match) \ + for (dn = of_find_matching_node_and_match(NULL, matches, match); \ + dn; dn = of_find_matching_node_and_match(dn, matches, match)) + +#define for_each_child_of_node(parent, child) \ + for (child = of_get_next_child(parent, NULL); child != NULL; \ + child = of_get_next_child(parent, child)) +#define for_each_available_child_of_node(parent, child) \ + for (child = of_get_next_available_child(parent, NULL); child != NULL; \ + child = of_get_next_available_child(parent, child)) + +#define for_each_node_with_property(dn, prop_name) \ + for (dn = of_find_node_with_property(NULL, prop_name); dn; \ + dn = of_find_node_with_property(dn, prop_name)) + +static inline int of_get_child_count(const struct device_node *np) +{ + struct device_node *child; + int num = 0; + + for_each_child_of_node(np, child) + num++; + + return num; +} + +static inline int of_get_available_child_count(const struct device_node *np) +{ + struct device_node *child; + int num = 0; + + for_each_available_child_of_node(np, child) + num++; + + return num; +} + +#if defined(CONFIG_OF) && !defined(MODULE) +#define _OF_DECLARE(table, name, compat, fn, fn_type) \ + static const struct of_device_id __of_table_##name \ + __used __section(__##table##_of_table) \ + __aligned(__alignof__(struct of_device_id)) \ + = { .compatible = compat, \ + .data = (fn == (fn_type)NULL) ? fn : fn } +#else +#define _OF_DECLARE(table, name, compat, fn, fn_type) \ + static const struct of_device_id __of_table_##name \ + __attribute__((unused)) \ + = { .compatible = compat, \ + .data = (fn == (fn_type)NULL) ? fn : fn } +#endif + +typedef int (*of_init_fn_2)(struct device_node *, struct device_node *); +typedef int (*of_init_fn_1_ret)(struct device_node *); +typedef void (*of_init_fn_1)(struct device_node *); + +#define OF_DECLARE_1(table, name, compat, fn) \ + _OF_DECLARE(table, name, compat, fn, of_init_fn_1) +#define OF_DECLARE_1_RET(table, name, compat, fn) \ + _OF_DECLARE(table, name, compat, fn, of_init_fn_1_ret) +#define OF_DECLARE_2(table, name, compat, fn) \ + _OF_DECLARE(table, name, compat, fn, of_init_fn_2) + +/** + * struct of_changeset_entry - Holds a changeset entry + * + * @node: list_head for the log list + * @action: notifier action + * @np: pointer to the device node affected + * @prop: pointer to the property affected + * @old_prop: hold a pointer to the original property + * + * Every modification of the device tree during a changeset + * is held in a list of of_changeset_entry structures. + * That way we can recover from a partial application, or we can + * revert the changeset + */ +struct of_changeset_entry { + struct list_head node; + unsigned long action; + struct device_node *np; + struct property *prop; + struct property *old_prop; +}; + +/** + * struct of_changeset - changeset tracker structure + * + * @entries: list_head for the changeset entries + * + * changesets are a convenient way to apply bulk changes to the + * live tree. In case of an error, changes are rolled-back. + * changesets live on after initial application, and if not + * destroyed after use, they can be reverted in one single call. + */ +struct of_changeset { + struct list_head entries; +}; + +enum of_reconfig_change { + OF_RECONFIG_NO_CHANGE = 0, + OF_RECONFIG_CHANGE_ADD, + OF_RECONFIG_CHANGE_REMOVE, +}; + +#ifdef CONFIG_OF_DYNAMIC +extern int of_reconfig_notifier_register(struct notifier_block *); +extern int of_reconfig_notifier_unregister(struct notifier_block *); +extern int of_reconfig_notify(unsigned long, struct of_reconfig_data *rd); +extern int of_reconfig_get_state_change(unsigned long action, + struct of_reconfig_data *arg); + +extern void of_changeset_init(struct of_changeset *ocs); +extern void of_changeset_destroy(struct of_changeset *ocs); +extern int of_changeset_apply(struct of_changeset *ocs); +extern int of_changeset_revert(struct of_changeset *ocs); +extern int of_changeset_action(struct of_changeset *ocs, + unsigned long action, struct device_node *np, + struct property *prop); + +static inline int of_changeset_attach_node(struct of_changeset *ocs, + struct device_node *np) +{ + return of_changeset_action(ocs, OF_RECONFIG_ATTACH_NODE, np, NULL); +} + +static inline int of_changeset_detach_node(struct of_changeset *ocs, + struct device_node *np) +{ + return of_changeset_action(ocs, OF_RECONFIG_DETACH_NODE, np, NULL); +} + +static inline int of_changeset_add_property(struct of_changeset *ocs, + struct device_node *np, struct property *prop) +{ + return of_changeset_action(ocs, OF_RECONFIG_ADD_PROPERTY, np, prop); +} + +static inline int of_changeset_remove_property(struct of_changeset *ocs, + struct device_node *np, struct property *prop) +{ + return of_changeset_action(ocs, OF_RECONFIG_REMOVE_PROPERTY, np, prop); +} + +static inline int of_changeset_update_property(struct of_changeset *ocs, + struct device_node *np, struct property *prop) +{ + return of_changeset_action(ocs, OF_RECONFIG_UPDATE_PROPERTY, np, prop); +} +#else /* CONFIG_OF_DYNAMIC */ +static inline int of_reconfig_notifier_register(struct notifier_block *nb) +{ + return -EINVAL; +} +static inline int of_reconfig_notifier_unregister(struct notifier_block *nb) +{ + return -EINVAL; +} +static inline int of_reconfig_notify(unsigned long action, + struct of_reconfig_data *arg) +{ + return -EINVAL; +} +static inline int of_reconfig_get_state_change(unsigned long action, + struct of_reconfig_data *arg) +{ + return -EINVAL; +} +#endif /* CONFIG_OF_DYNAMIC */ + +/** + * of_device_is_system_power_controller - Tells if system-power-controller is found for device_node + * @np: Pointer to the given device_node + * + * return true if present false otherwise + */ +static inline bool of_device_is_system_power_controller(const struct device_node *np) +{ + return of_property_read_bool(np, "system-power-controller"); +} + +/** + * Overlay support + */ + +enum of_overlay_notify_action { + OF_OVERLAY_PRE_APPLY = 0, + OF_OVERLAY_POST_APPLY, + OF_OVERLAY_PRE_REMOVE, + OF_OVERLAY_POST_REMOVE, +}; + +struct of_overlay_notify_data { + struct device_node *overlay; + struct device_node *target; +}; + +#ifdef CONFIG_OF_OVERLAY + +int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size, + int *ovcs_id); +int of_overlay_remove(int *ovcs_id); +int of_overlay_remove_all(void); + +int of_overlay_notifier_register(struct notifier_block *nb); +int of_overlay_notifier_unregister(struct notifier_block *nb); + +#else + +static inline int of_overlay_fdt_apply(void *overlay_fdt, u32 overlay_fdt_size, + int *ovcs_id) +{ + return -ENOTSUPP; +} + +static inline int of_overlay_remove(int *ovcs_id) +{ + return -ENOTSUPP; +} + +static inline int of_overlay_remove_all(void) +{ + return -ENOTSUPP; +} + +static inline int of_overlay_notifier_register(struct notifier_block *nb) +{ + return 0; +} + +static inline int of_overlay_notifier_unregister(struct notifier_block *nb) +{ + return 0; +} + +#endif + +#endif /* _LINUX_OF_H */ diff --git a/include/linux/of_address.h b/include/linux/of_address.h new file mode 100644 index 000000000..30e40fb69 --- /dev/null +++ b/include/linux/of_address.h @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __OF_ADDRESS_H +#define __OF_ADDRESS_H +#include +#include +#include +#include + +struct of_pci_range_parser { + struct device_node *node; + const __be32 *range; + const __be32 *end; + int np; + int pna; +}; + +struct of_pci_range { + u32 pci_space; + u64 pci_addr; + u64 cpu_addr; + u64 size; + u32 flags; +}; + +#define for_each_of_pci_range(parser, range) \ + for (; of_pci_range_parser_one(parser, range);) + +/* Translate a DMA address from device space to CPU space */ +extern u64 of_translate_dma_address(struct device_node *dev, + const __be32 *in_addr); + +#ifdef CONFIG_OF_ADDRESS +extern u64 of_translate_address(struct device_node *np, const __be32 *addr); +extern int of_address_to_resource(struct device_node *dev, int index, + struct resource *r); +extern struct device_node *of_find_matching_node_by_address( + struct device_node *from, + const struct of_device_id *matches, + u64 base_address); +extern void __iomem *of_iomap(struct device_node *device, int index); +void __iomem *of_io_request_and_map(struct device_node *device, + int index, const char *name); + +/* Extract an address from a device, returns the region size and + * the address space flags too. The PCI version uses a BAR number + * instead of an absolute index + */ +extern const __be32 *of_get_address(struct device_node *dev, int index, + u64 *size, unsigned int *flags); + +extern int of_pci_range_parser_init(struct of_pci_range_parser *parser, + struct device_node *node); +extern int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser, + struct device_node *node); +extern struct of_pci_range *of_pci_range_parser_one( + struct of_pci_range_parser *parser, + struct of_pci_range *range); +extern int of_dma_get_range(struct device_node *np, u64 *dma_addr, + u64 *paddr, u64 *size); +extern bool of_dma_is_coherent(struct device_node *np); +#else /* CONFIG_OF_ADDRESS */ +static inline void __iomem *of_io_request_and_map(struct device_node *device, + int index, const char *name) +{ + return IOMEM_ERR_PTR(-EINVAL); +} + +static inline u64 of_translate_address(struct device_node *np, + const __be32 *addr) +{ + return OF_BAD_ADDR; +} + +static inline struct device_node *of_find_matching_node_by_address( + struct device_node *from, + const struct of_device_id *matches, + u64 base_address) +{ + return NULL; +} + +static inline const __be32 *of_get_address(struct device_node *dev, int index, + u64 *size, unsigned int *flags) +{ + return NULL; +} + +static inline int of_pci_range_parser_init(struct of_pci_range_parser *parser, + struct device_node *node) +{ + return -ENOSYS; +} + +static inline int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser, + struct device_node *node) +{ + return -ENOSYS; +} + +static inline struct of_pci_range *of_pci_range_parser_one( + struct of_pci_range_parser *parser, + struct of_pci_range *range) +{ + return NULL; +} + +static inline int of_dma_get_range(struct device_node *np, u64 *dma_addr, + u64 *paddr, u64 *size) +{ + return -ENODEV; +} + +static inline bool of_dma_is_coherent(struct device_node *np) +{ + return false; +} +#endif /* CONFIG_OF_ADDRESS */ + +#ifdef CONFIG_OF +extern int of_address_to_resource(struct device_node *dev, int index, + struct resource *r); +void __iomem *of_iomap(struct device_node *node, int index); +#else +static inline int of_address_to_resource(struct device_node *dev, int index, + struct resource *r) +{ + return -EINVAL; +} + +static inline void __iomem *of_iomap(struct device_node *device, int index) +{ + return NULL; +} +#endif + +#if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_PCI) +extern const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, + u64 *size, unsigned int *flags); +extern int of_pci_address_to_resource(struct device_node *dev, int bar, + struct resource *r); +extern int of_pci_range_to_resource(struct of_pci_range *range, + struct device_node *np, + struct resource *res); +#else /* CONFIG_OF_ADDRESS && CONFIG_PCI */ +static inline int of_pci_address_to_resource(struct device_node *dev, int bar, + struct resource *r) +{ + return -ENOSYS; +} + +static inline const __be32 *of_get_pci_address(struct device_node *dev, + int bar_no, u64 *size, unsigned int *flags) +{ + return NULL; +} +static inline int of_pci_range_to_resource(struct of_pci_range *range, + struct device_node *np, + struct resource *res) +{ + return -ENOSYS; +} +#endif /* CONFIG_OF_ADDRESS && CONFIG_PCI */ + +#endif /* __OF_ADDRESS_H */ + diff --git a/include/linux/of_clk.h b/include/linux/of_clk.h new file mode 100644 index 000000000..c86fcad23 --- /dev/null +++ b/include/linux/of_clk.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * OF clock helpers + */ + +#ifndef __LINUX_OF_CLK_H +#define __LINUX_OF_CLK_H + +struct device_node; +struct of_device_id; + +#if defined(CONFIG_COMMON_CLK) && defined(CONFIG_OF) + +unsigned int of_clk_get_parent_count(struct device_node *np); +const char *of_clk_get_parent_name(struct device_node *np, int index); +void of_clk_init(const struct of_device_id *matches); + +#else /* !CONFIG_COMMON_CLK || !CONFIG_OF */ + +static inline unsigned int of_clk_get_parent_count(struct device_node *np) +{ + return 0; +} +static inline const char *of_clk_get_parent_name(struct device_node *np, + int index) +{ + return NULL; +} +static inline void of_clk_init(const struct of_device_id *matches) {} + +#endif /* !CONFIG_COMMON_CLK || !CONFIG_OF */ + +#endif /* __LINUX_OF_CLK_H */ diff --git a/include/linux/of_device.h b/include/linux/of_device.h new file mode 100644 index 000000000..165fd302b --- /dev/null +++ b/include/linux/of_device.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_OF_DEVICE_H +#define _LINUX_OF_DEVICE_H + +#include +#include +#include /* temporary until merge */ + +#include +#include + +struct device; + +#ifdef CONFIG_OF +extern const struct of_device_id *of_match_device( + const struct of_device_id *matches, const struct device *dev); + +/** + * of_driver_match_device - Tell if a driver's of_match_table matches a device. + * @drv: the device_driver structure to test + * @dev: the device structure to match against + */ +static inline int of_driver_match_device(struct device *dev, + const struct device_driver *drv) +{ + return of_match_device(drv->of_match_table, dev) != NULL; +} + +extern struct platform_device *of_dev_get(struct platform_device *dev); +extern void of_dev_put(struct platform_device *dev); + +extern int of_device_add(struct platform_device *pdev); +extern int of_device_register(struct platform_device *ofdev); +extern void of_device_unregister(struct platform_device *ofdev); + +extern const void *of_device_get_match_data(const struct device *dev); + +extern ssize_t of_device_modalias(struct device *dev, char *str, ssize_t len); +extern int of_device_request_module(struct device *dev); + +extern void of_device_uevent(struct device *dev, struct kobj_uevent_env *env); +extern int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env); + +static inline void of_device_node_put(struct device *dev) +{ + of_node_put(dev->of_node); +} + +static inline struct device_node *of_cpu_device_node_get(int cpu) +{ + struct device *cpu_dev; + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) + return of_get_cpu_node(cpu, NULL); + return of_node_get(cpu_dev->of_node); +} + +int of_dma_configure(struct device *dev, + struct device_node *np, + bool force_dma); +void of_dma_deconfigure(struct device *dev); +#else /* CONFIG_OF */ + +static inline int of_driver_match_device(struct device *dev, + const struct device_driver *drv) +{ + return 0; +} + +static inline void of_device_uevent(struct device *dev, + struct kobj_uevent_env *env) { } + +static inline const void *of_device_get_match_data(const struct device *dev) +{ + return NULL; +} + +static inline int of_device_modalias(struct device *dev, + char *str, ssize_t len) +{ + return -ENODEV; +} + +static inline int of_device_request_module(struct device *dev) +{ + return -ENODEV; +} + +static inline int of_device_uevent_modalias(struct device *dev, + struct kobj_uevent_env *env) +{ + return -ENODEV; +} + +static inline void of_device_node_put(struct device *dev) { } + +static inline const struct of_device_id *__of_match_device( + const struct of_device_id *matches, const struct device *dev) +{ + return NULL; +} +#define of_match_device(matches, dev) \ + __of_match_device(of_match_ptr(matches), (dev)) + +static inline struct device_node *of_cpu_device_node_get(int cpu) +{ + return NULL; +} + +static inline int of_dma_configure(struct device *dev, + struct device_node *np, + bool force_dma) +{ + return 0; +} +static inline void of_dma_deconfigure(struct device *dev) +{} +#endif /* CONFIG_OF */ + +#endif /* _LINUX_OF_DEVICE_H */ diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h new file mode 100644 index 000000000..fd706cdf2 --- /dev/null +++ b/include/linux/of_dma.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * OF helpers for DMA request / controller + * + * Based on of_gpio.h + * + * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ + */ + +#ifndef __LINUX_OF_DMA_H +#define __LINUX_OF_DMA_H + +#include +#include + +struct device_node; + +struct of_dma { + struct list_head of_dma_controllers; + struct device_node *of_node; + struct dma_chan *(*of_dma_xlate) + (struct of_phandle_args *, struct of_dma *); + void *(*of_dma_route_allocate) + (struct of_phandle_args *, struct of_dma *); + struct dma_router *dma_router; + void *of_dma_data; +}; + +struct of_dma_filter_info { + dma_cap_mask_t dma_cap; + dma_filter_fn filter_fn; +}; + +#ifdef CONFIG_DMA_OF +extern int of_dma_controller_register(struct device_node *np, + struct dma_chan *(*of_dma_xlate) + (struct of_phandle_args *, struct of_dma *), + void *data); +extern void of_dma_controller_free(struct device_node *np); + +extern int of_dma_router_register(struct device_node *np, + void *(*of_dma_route_allocate) + (struct of_phandle_args *, struct of_dma *), + struct dma_router *dma_router); +#define of_dma_router_free of_dma_controller_free + +extern struct dma_chan *of_dma_request_slave_channel(struct device_node *np, + const char *name); +extern struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma); +extern struct dma_chan *of_dma_xlate_by_chan_id(struct of_phandle_args *dma_spec, + struct of_dma *ofdma); + +#else +static inline int of_dma_controller_register(struct device_node *np, + struct dma_chan *(*of_dma_xlate) + (struct of_phandle_args *, struct of_dma *), + void *data) +{ + return -ENODEV; +} + +static inline void of_dma_controller_free(struct device_node *np) +{ +} + +static inline int of_dma_router_register(struct device_node *np, + void *(*of_dma_route_allocate) + (struct of_phandle_args *, struct of_dma *), + struct dma_router *dma_router) +{ + return -ENODEV; +} + +#define of_dma_router_free of_dma_controller_free + +static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np, + const char *name) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + return NULL; +} + +#define of_dma_xlate_by_chan_id NULL + +#endif + +#endif /* __LINUX_OF_DMA_H */ diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h new file mode 100644 index 000000000..b9cd9ebdf --- /dev/null +++ b/include/linux/of_fdt.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Definitions for working with the Flattened Device Tree data format + * + * Copyright 2009 Benjamin Herrenschmidt, IBM Corp + * benh@kernel.crashing.org + */ + +#ifndef _LINUX_OF_FDT_H +#define _LINUX_OF_FDT_H + +#include +#include +#include + +/* Definitions used by the flattened device tree */ +#define OF_DT_HEADER 0xd00dfeed /* marker */ + +#ifndef __ASSEMBLY__ + +#if defined(CONFIG_OF_FLATTREE) + +struct device_node; + +/* For scanning an arbitrary device-tree at any time */ +extern char *of_fdt_get_string(const void *blob, u32 offset); +extern void *of_fdt_get_property(const void *blob, + unsigned long node, + const char *name, + int *size); +extern bool of_fdt_is_big_endian(const void *blob, + unsigned long node); +extern int of_fdt_match(const void *blob, unsigned long node, + const char *const *compat); +extern void *of_fdt_unflatten_tree(const unsigned long *blob, + struct device_node *dad, + struct device_node **mynodes); + +/* TBD: Temporary export of fdt globals - remove when code fully merged */ +extern int __initdata dt_root_addr_cells; +extern int __initdata dt_root_size_cells; +extern void *initial_boot_params; + +extern char __dtb_start[]; +extern char __dtb_end[]; + +/* Other Prototypes */ +extern u64 of_flat_dt_translate_address(unsigned long node); +extern void of_fdt_limit_memory(int limit); +#endif /* CONFIG_OF_FLATTREE */ + +#ifdef CONFIG_OF_EARLY_FLATTREE +/* For scanning the flat device-tree at boot time */ +extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname, + int depth, void *data), + void *data); +extern int of_scan_flat_dt_subnodes(unsigned long node, + int (*it)(unsigned long node, + const char *uname, + void *data), + void *data); +extern int of_get_flat_dt_subnode_by_name(unsigned long node, + const char *uname); +extern const void *of_get_flat_dt_prop(unsigned long node, const char *name, + int *size); +extern int of_flat_dt_is_compatible(unsigned long node, const char *name); +extern int of_flat_dt_match(unsigned long node, const char *const *matches); +extern unsigned long of_get_flat_dt_root(void); +extern int of_get_flat_dt_size(void); +extern uint32_t of_get_flat_dt_phandle(unsigned long node); + +extern int early_init_dt_scan_chosen(unsigned long node, const char *uname, + int depth, void *data); +extern int early_init_dt_scan_memory(unsigned long node, const char *uname, + int depth, void *data); +extern int early_init_dt_scan_chosen_stdout(void); +extern void early_init_fdt_scan_reserved_mem(void); +extern void early_init_fdt_reserve_self(void); +extern void early_init_dt_add_memory_arch(u64 base, u64 size); +extern int early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size); +extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, + bool no_map); +extern u64 dt_mem_next_cell(int s, const __be32 **cellp); + +/* Early flat tree scan hooks */ +extern int early_init_dt_scan_root(unsigned long node, const char *uname, + int depth, void *data); + +extern bool early_init_dt_scan(void *params); +extern bool early_init_dt_verify(void *params); +extern void early_init_dt_scan_nodes(void); + +extern const char *of_flat_dt_get_machine_name(void); +extern const void *of_flat_dt_match_machine(const void *default_match, + const void * (*get_next_compat)(const char * const**)); + +/* Other Prototypes */ +extern void unflatten_device_tree(void); +extern void unflatten_and_copy_device_tree(void); +extern void early_init_devtree(void *); +extern void early_get_first_memblock_info(void *, phys_addr_t *); +#else /* CONFIG_OF_EARLY_FLATTREE */ +static inline int early_init_dt_scan_chosen_stdout(void) { return -ENODEV; } +static inline void early_init_fdt_scan_reserved_mem(void) {} +static inline void early_init_fdt_reserve_self(void) {} +static inline const char *of_flat_dt_get_machine_name(void) { return NULL; } +static inline void unflatten_device_tree(void) {} +static inline void unflatten_and_copy_device_tree(void) {} +#endif /* CONFIG_OF_EARLY_FLATTREE */ + +#endif /* __ASSEMBLY__ */ +#endif /* _LINUX_OF_FDT_H */ diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h new file mode 100644 index 000000000..163b79ecd --- /dev/null +++ b/include/linux/of_gpio.h @@ -0,0 +1,157 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * OF helpers for the GPIO API + * + * Copyright (c) 2007-2008 MontaVista Software, Inc. + * + * Author: Anton Vorontsov + */ + +#ifndef __LINUX_OF_GPIO_H +#define __LINUX_OF_GPIO_H + +#include +#include +#include +#include +#include + +struct device_node; + +/* + * This is Linux-specific flags. By default controllers' and Linux' mapping + * match, but GPIO controllers are free to translate their own flags to + * Linux-specific in their .xlate callback. Though, 1:1 mapping is recommended. + */ +enum of_gpio_flags { + OF_GPIO_ACTIVE_LOW = 0x1, + OF_GPIO_SINGLE_ENDED = 0x2, + OF_GPIO_OPEN_DRAIN = 0x4, + OF_GPIO_TRANSITORY = 0x8, +}; + +#ifdef CONFIG_OF_GPIO + +/* + * OF GPIO chip for memory mapped banks + */ +struct of_mm_gpio_chip { + struct gpio_chip gc; + void (*save_regs)(struct of_mm_gpio_chip *mm_gc); + void __iomem *regs; +}; + +static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc) +{ + return container_of(gc, struct of_mm_gpio_chip, gc); +} + +extern int of_get_named_gpio_flags(struct device_node *np, + const char *list_name, int index, enum of_gpio_flags *flags); + +extern int of_mm_gpiochip_add_data(struct device_node *np, + struct of_mm_gpio_chip *mm_gc, + void *data); +static inline int of_mm_gpiochip_add(struct device_node *np, + struct of_mm_gpio_chip *mm_gc) +{ + return of_mm_gpiochip_add_data(np, mm_gc, NULL); +} +extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc); + +extern int of_gpio_simple_xlate(struct gpio_chip *gc, + const struct of_phandle_args *gpiospec, + u32 *flags); + +#else /* CONFIG_OF_GPIO */ + +/* Drivers may not strictly depend on the GPIO support, so let them link. */ +static inline int of_get_named_gpio_flags(struct device_node *np, + const char *list_name, int index, enum of_gpio_flags *flags) +{ + if (flags) + *flags = 0; + + return -ENOSYS; +} + +static inline int of_gpio_simple_xlate(struct gpio_chip *gc, + const struct of_phandle_args *gpiospec, + u32 *flags) +{ + return -ENOSYS; +} + +#endif /* CONFIG_OF_GPIO */ + +/** + * of_gpio_named_count() - Count GPIOs for a device + * @np: device node to count GPIOs for + * @propname: property name containing gpio specifier(s) + * + * The function returns the count of GPIOs specified for a node. + * Note that the empty GPIO specifiers count too. Returns either + * Number of gpios defined in property, + * -EINVAL for an incorrectly formed gpios property, or + * -ENOENT for a missing gpios property + * + * Example: + * gpios = <0 + * &gpio1 1 2 + * 0 + * &gpio2 3 4>; + * + * The above example defines four GPIOs, two of which are not specified. + * This function will return '4' + */ +static inline int of_gpio_named_count(struct device_node *np, const char* propname) +{ + return of_count_phandle_with_args(np, propname, "#gpio-cells"); +} + +/** + * of_gpio_count() - Count GPIOs for a device + * @np: device node to count GPIOs for + * + * Same as of_gpio_named_count, but hard coded to use the 'gpios' property + */ +static inline int of_gpio_count(struct device_node *np) +{ + return of_gpio_named_count(np, "gpios"); +} + +static inline int of_get_gpio_flags(struct device_node *np, int index, + enum of_gpio_flags *flags) +{ + return of_get_named_gpio_flags(np, "gpios", index, flags); +} + +/** + * of_get_named_gpio() - Get a GPIO number to use with GPIO API + * @np: device node to get GPIO from + * @propname: Name of property containing gpio specifier(s) + * @index: index of the GPIO + * + * Returns GPIO number to use with Linux generic GPIO API, or one of the errno + * value on the error condition. + */ +static inline int of_get_named_gpio(struct device_node *np, + const char *propname, int index) +{ + return of_get_named_gpio_flags(np, propname, index, NULL); +} + +/** + * of_get_gpio() - Get a GPIO number to use with GPIO API + * @np: device node to get GPIO from + * @index: index of the GPIO + * + * Returns GPIO number to use with Linux generic GPIO API, or one of the errno + * value on the error condition. + */ +static inline int of_get_gpio(struct device_node *np, int index) +{ + return of_get_gpio_flags(np, index, NULL); +} + +#endif /* __LINUX_OF_GPIO_H */ diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h new file mode 100644 index 000000000..01038a6aa --- /dev/null +++ b/include/linux/of_graph.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * OF graph binding parsing helpers + * + * Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd. + * Author: Sylwester Nawrocki + * + * Copyright (C) 2012 Renesas Electronics Corp. + * Author: Guennadi Liakhovetski + */ +#ifndef __LINUX_OF_GRAPH_H +#define __LINUX_OF_GRAPH_H + +#include +#include + +/** + * struct of_endpoint - the OF graph endpoint data structure + * @port: identifier (value of reg property) of a port this endpoint belongs to + * @id: identifier (value of reg property) of this endpoint + * @local_node: pointer to device_node of this endpoint + */ +struct of_endpoint { + unsigned int port; + unsigned int id; + const struct device_node *local_node; +}; + +/** + * for_each_endpoint_of_node - iterate over every endpoint in a device node + * @parent: parent device node containing ports and endpoints + * @child: loop variable pointing to the current endpoint node + * + * When breaking out of the loop, of_node_put(child) has to be called manually. + */ +#define for_each_endpoint_of_node(parent, child) \ + for (child = of_graph_get_next_endpoint(parent, NULL); child != NULL; \ + child = of_graph_get_next_endpoint(parent, child)) + +#ifdef CONFIG_OF +int of_graph_parse_endpoint(const struct device_node *node, + struct of_endpoint *endpoint); +int of_graph_get_endpoint_count(const struct device_node *np); +struct device_node *of_graph_get_port_by_id(struct device_node *node, u32 id); +struct device_node *of_graph_get_next_endpoint(const struct device_node *parent, + struct device_node *previous); +struct device_node *of_graph_get_endpoint_by_regs( + const struct device_node *parent, int port_reg, int reg); +struct device_node *of_graph_get_remote_endpoint( + const struct device_node *node); +struct device_node *of_graph_get_port_parent(struct device_node *node); +struct device_node *of_graph_get_remote_port_parent( + const struct device_node *node); +struct device_node *of_graph_get_remote_port(const struct device_node *node); +struct device_node *of_graph_get_remote_node(const struct device_node *node, + u32 port, u32 endpoint); +#else + +static inline int of_graph_parse_endpoint(const struct device_node *node, + struct of_endpoint *endpoint) +{ + return -ENOSYS; +} + +static inline int of_graph_get_endpoint_count(const struct device_node *np) +{ + return 0; +} + +static inline struct device_node *of_graph_get_port_by_id( + struct device_node *node, u32 id) +{ + return NULL; +} + +static inline struct device_node *of_graph_get_next_endpoint( + const struct device_node *parent, + struct device_node *previous) +{ + return NULL; +} + +static inline struct device_node *of_graph_get_endpoint_by_regs( + const struct device_node *parent, int port_reg, int reg) +{ + return NULL; +} + +static inline struct device_node *of_graph_get_remote_endpoint( + const struct device_node *node) +{ + return NULL; +} + +static inline struct device_node *of_graph_get_port_parent( + struct device_node *node) +{ + return NULL; +} + +static inline struct device_node *of_graph_get_remote_port_parent( + const struct device_node *node) +{ + return NULL; +} + +static inline struct device_node *of_graph_get_remote_port( + const struct device_node *node) +{ + return NULL; +} +static inline struct device_node *of_graph_get_remote_node( + const struct device_node *node, + u32 port, u32 endpoint) +{ + return NULL; +} + +#endif /* CONFIG_OF */ + +#endif /* __LINUX_OF_GRAPH_H */ diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h new file mode 100644 index 000000000..f3d40dd7b --- /dev/null +++ b/include/linux/of_iommu.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __OF_IOMMU_H +#define __OF_IOMMU_H + +#include +#include +#include + +#ifdef CONFIG_OF_IOMMU + +extern int of_get_dma_window(struct device_node *dn, const char *prefix, + int index, unsigned long *busno, dma_addr_t *addr, + size_t *size); + +extern const struct iommu_ops *of_iommu_configure(struct device *dev, + struct device_node *master_np); + +#else + +static inline int of_get_dma_window(struct device_node *dn, const char *prefix, + int index, unsigned long *busno, dma_addr_t *addr, + size_t *size) +{ + return -EINVAL; +} + +static inline const struct iommu_ops *of_iommu_configure(struct device *dev, + struct device_node *master_np) +{ + return NULL; +} + +#endif /* CONFIG_OF_IOMMU */ + +#endif /* __OF_IOMMU_H */ diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h new file mode 100644 index 000000000..1214cabb2 --- /dev/null +++ b/include/linux/of_irq.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __OF_IRQ_H +#define __OF_IRQ_H + +#include +#include +#include +#include +#include +#include + +typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *); + +/* + * Workarounds only applied to 32bit powermac machines + */ +#define OF_IMAP_OLDWORLD_MAC 0x00000001 +#define OF_IMAP_NO_PHANDLE 0x00000002 + +#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC) +extern unsigned int of_irq_workarounds; +extern struct device_node *of_irq_dflt_pic; +extern int of_irq_parse_oldworld(struct device_node *device, int index, + struct of_phandle_args *out_irq); +#else /* CONFIG_PPC32 && CONFIG_PPC_PMAC */ +#define of_irq_workarounds (0) +#define of_irq_dflt_pic (NULL) +static inline int of_irq_parse_oldworld(struct device_node *device, int index, + struct of_phandle_args *out_irq) +{ + return -EINVAL; +} +#endif /* CONFIG_PPC32 && CONFIG_PPC_PMAC */ + +extern int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq); +extern int of_irq_parse_one(struct device_node *device, int index, + struct of_phandle_args *out_irq); +extern unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data); +extern int of_irq_to_resource(struct device_node *dev, int index, + struct resource *r); + +extern void of_irq_init(const struct of_device_id *matches); + +#ifdef CONFIG_OF_IRQ +extern int of_irq_count(struct device_node *dev); +extern int of_irq_get(struct device_node *dev, int index); +extern int of_irq_get_byname(struct device_node *dev, const char *name); +extern int of_irq_to_resource_table(struct device_node *dev, + struct resource *res, int nr_irqs); +extern struct device_node *of_irq_find_parent(struct device_node *child); +extern struct irq_domain *of_msi_get_domain(struct device *dev, + struct device_node *np, + enum irq_domain_bus_token token); +extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev, + u32 rid); +extern void of_msi_configure(struct device *dev, struct device_node *np); +u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in); +#else +static inline int of_irq_count(struct device_node *dev) +{ + return 0; +} +static inline int of_irq_get(struct device_node *dev, int index) +{ + return 0; +} +static inline int of_irq_get_byname(struct device_node *dev, const char *name) +{ + return 0; +} +static inline int of_irq_to_resource_table(struct device_node *dev, + struct resource *res, int nr_irqs) +{ + return 0; +} +static inline void *of_irq_find_parent(struct device_node *child) +{ + return NULL; +} + +static inline struct irq_domain *of_msi_get_domain(struct device *dev, + struct device_node *np, + enum irq_domain_bus_token token) +{ + return NULL; +} +static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev, + u32 rid) +{ + return NULL; +} +static inline void of_msi_configure(struct device *dev, struct device_node *np) +{ +} +static inline u32 of_msi_map_rid(struct device *dev, + struct device_node *msi_np, u32 rid_in) +{ + return rid_in; +} +#endif + +#if defined(CONFIG_OF_IRQ) || defined(CONFIG_SPARC) +/* + * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC + * implements it differently. However, the prototype is the same for all, + * so declare it here regardless of the CONFIG_OF_IRQ setting. + */ +extern unsigned int irq_of_parse_and_map(struct device_node *node, int index); + +#else /* !CONFIG_OF && !CONFIG_SPARC */ +static inline unsigned int irq_of_parse_and_map(struct device_node *dev, + int index) +{ + return 0; +} +#endif /* !CONFIG_OF */ + +#endif /* __OF_IRQ_H */ diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h new file mode 100644 index 000000000..f5db93bcd --- /dev/null +++ b/include/linux/of_mdio.h @@ -0,0 +1,119 @@ +/* + * OF helpers for the MDIO (Ethernet PHY) API + * + * Copyright (c) 2009 Secret Lab Technologies, Ltd. + * + * This file is released under the GPLv2 + */ + +#ifndef __LINUX_OF_MDIO_H +#define __LINUX_OF_MDIO_H + +#include +#include + +#if IS_ENABLED(CONFIG_OF_MDIO) +extern int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np); +extern struct phy_device *of_phy_find_device(struct device_node *phy_np); +extern struct phy_device *of_phy_connect(struct net_device *dev, + struct device_node *phy_np, + void (*hndlr)(struct net_device *), + u32 flags, phy_interface_t iface); +extern struct phy_device * +of_phy_get_and_connect(struct net_device *dev, struct device_node *np, + void (*hndlr)(struct net_device *)); +struct phy_device *of_phy_attach(struct net_device *dev, + struct device_node *phy_np, u32 flags, + phy_interface_t iface); + +extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); +extern int of_phy_register_fixed_link(struct device_node *np); +extern void of_phy_deregister_fixed_link(struct device_node *np); +extern bool of_phy_is_fixed_link(struct device_node *np); + + +static inline int of_mdio_parse_addr(struct device *dev, + const struct device_node *np) +{ + u32 addr; + int ret; + + ret = of_property_read_u32(np, "reg", &addr); + if (ret < 0) { + dev_err(dev, "%s has invalid PHY address\n", np->full_name); + return ret; + } + + /* A PHY must have a reg property in the range [0-31] */ + if (addr >= PHY_MAX_ADDR) { + dev_err(dev, "%s PHY address %i is too large\n", + np->full_name, addr); + return -EINVAL; + } + + return addr; +} + +#else /* CONFIG_OF_MDIO */ +static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) +{ + /* + * Fall back to the non-DT function to register a bus. + * This way, we don't have to keep compat bits around in drivers. + */ + + return mdiobus_register(mdio); +} + +static inline struct phy_device *of_phy_find_device(struct device_node *phy_np) +{ + return NULL; +} + +static inline struct phy_device *of_phy_connect(struct net_device *dev, + struct device_node *phy_np, + void (*hndlr)(struct net_device *), + u32 flags, phy_interface_t iface) +{ + return NULL; +} + +static inline struct phy_device * +of_phy_get_and_connect(struct net_device *dev, struct device_node *np, + void (*hndlr)(struct net_device *)) +{ + return NULL; +} + +static inline struct phy_device *of_phy_attach(struct net_device *dev, + struct device_node *phy_np, + u32 flags, phy_interface_t iface) +{ + return NULL; +} + +static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np) +{ + return NULL; +} + +static inline int of_mdio_parse_addr(struct device *dev, + const struct device_node *np) +{ + return -ENOSYS; +} +static inline int of_phy_register_fixed_link(struct device_node *np) +{ + return -ENOSYS; +} +static inline void of_phy_deregister_fixed_link(struct device_node *np) +{ +} +static inline bool of_phy_is_fixed_link(struct device_node *np) +{ + return false; +} +#endif + + +#endif /* __LINUX_OF_MDIO_H */ diff --git a/include/linux/of_net.h b/include/linux/of_net.h new file mode 100644 index 000000000..90d81ee9e --- /dev/null +++ b/include/linux/of_net.h @@ -0,0 +1,40 @@ +/* + * OF helpers for network devices. + * + * This file is released under the GPLv2 + */ + +#ifndef __LINUX_OF_NET_H +#define __LINUX_OF_NET_H + +#ifdef CONFIG_OF_NET +#include + +struct net_device; +extern int of_get_phy_mode(struct device_node *np); +extern const void *of_get_mac_address(struct device_node *np); +extern int of_get_nvmem_mac_address(struct device_node *np, void *addr); +extern struct net_device *of_find_net_device_by_node(struct device_node *np); +#else +static inline int of_get_phy_mode(struct device_node *np) +{ + return -ENODEV; +} + +static inline const void *of_get_mac_address(struct device_node *np) +{ + return NULL; +} + +static inline int of_get_nvmem_mac_address(struct device_node *np, void *addr) +{ + return -ENODEV; +} + +static inline struct net_device *of_find_net_device_by_node(struct device_node *np) +{ + return NULL; +} +#endif + +#endif /* __LINUX_OF_NET_H */ diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h new file mode 100644 index 000000000..e83d87fc5 --- /dev/null +++ b/include/linux/of_pci.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __OF_PCI_H +#define __OF_PCI_H + +#include +#include + +struct pci_dev; +struct of_phandle_args; +struct device_node; + +#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_PCI) +struct device_node *of_pci_find_child_device(struct device_node *parent, + unsigned int devfn); +int of_pci_get_devfn(struct device_node *np); +void of_pci_check_probe_only(void); +int of_pci_map_rid(struct device_node *np, u32 rid, + const char *map_name, const char *map_mask_name, + struct device_node **target, u32 *id_out); +#else +static inline struct device_node *of_pci_find_child_device(struct device_node *parent, + unsigned int devfn) +{ + return NULL; +} + +static inline int of_pci_get_devfn(struct device_node *np) +{ + return -EINVAL; +} + +static inline int of_pci_map_rid(struct device_node *np, u32 rid, + const char *map_name, const char *map_mask_name, + struct device_node **target, u32 *id_out) +{ + return -EINVAL; +} + +static inline void of_pci_check_probe_only(void) { } +#endif + +#if IS_ENABLED(CONFIG_OF_IRQ) +int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin); +#else +static inline int +of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin) +{ + return 0; +} +#endif + +#endif diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h new file mode 100644 index 000000000..d0b183ab6 --- /dev/null +++ b/include/linux/of_pdt.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Definitions for building a device tree by calling into the + * Open Firmware PROM. + * + * Copyright (C) 2010 Andres Salomon + */ + +#ifndef _LINUX_OF_PDT_H +#define _LINUX_OF_PDT_H + +/* overridable operations for calling into the PROM */ +struct of_pdt_ops { + /* + * buf should be 32 bytes; return 0 on success. + * If prev is NULL, the first property will be returned. + */ + int (*nextprop)(phandle node, char *prev, char *buf); + + /* for both functions, return proplen on success; -1 on error */ + int (*getproplen)(phandle node, const char *prop); + int (*getproperty)(phandle node, const char *prop, char *buf, + int bufsize); + + /* phandles are 0 if no child or sibling exists */ + phandle (*getchild)(phandle parent); + phandle (*getsibling)(phandle node); + + /* return 0 on success; fill in 'len' with number of bytes in path */ + int (*pkg2path)(phandle node, char *buf, const int buflen, int *len); +}; + +extern void *prom_early_alloc(unsigned long size); + +/* for building the device tree */ +extern void of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops); + +extern void (*of_pdt_build_more)(struct device_node *dp); + +#endif /* _LINUX_OF_PDT_H */ diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h new file mode 100644 index 000000000..84a966623 --- /dev/null +++ b/include/linux/of_platform.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#ifndef _LINUX_OF_PLATFORM_H +#define _LINUX_OF_PLATFORM_H +/* + * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp. + * + */ + +#include +#include +#include +#include +#include + +/** + * struct of_dev_auxdata - lookup table entry for device names & platform_data + * @compatible: compatible value of node to match against node + * @phys_addr: Start address of registers to match against node + * @name: Name to assign for matching nodes + * @platform_data: platform_data to assign for matching nodes + * + * This lookup table allows the caller of of_platform_populate() to override + * the names of devices when creating devices from the device tree. The table + * should be terminated with an empty entry. It also allows the platform_data + * pointer to be set. + * + * The reason for this functionality is that some Linux infrastructure uses + * the device name to look up a specific device, but the Linux-specific names + * are not encoded into the device tree, so the kernel needs to provide specific + * values. + * + * Note: Using an auxdata lookup table should be considered a last resort when + * converting a platform to use the DT. Normally the automatically generated + * device name will not matter, and drivers should obtain data from the device + * node instead of from an anonymous platform_data pointer. + */ +struct of_dev_auxdata { + char *compatible; + resource_size_t phys_addr; + char *name; + void *platform_data; +}; + +/* Macro to simplify populating a lookup table */ +#define OF_DEV_AUXDATA(_compat,_phys,_name,_pdata) \ + { .compatible = _compat, .phys_addr = _phys, .name = _name, \ + .platform_data = _pdata } + +extern const struct of_device_id of_default_bus_match_table[]; + +/* Platform drivers register/unregister */ +extern struct platform_device *of_device_alloc(struct device_node *np, + const char *bus_id, + struct device *parent); +#ifdef CONFIG_OF +extern struct platform_device *of_find_device_by_node(struct device_node *np); +#else +static inline struct platform_device *of_find_device_by_node(struct device_node *np) +{ + return NULL; +} +#endif + +/* Platform devices and busses creation */ +extern struct platform_device *of_platform_device_create(struct device_node *np, + const char *bus_id, + struct device *parent); + +extern int of_platform_device_destroy(struct device *dev, void *data); +extern int of_platform_bus_probe(struct device_node *root, + const struct of_device_id *matches, + struct device *parent); +#ifdef CONFIG_OF_ADDRESS +extern int of_platform_populate(struct device_node *root, + const struct of_device_id *matches, + const struct of_dev_auxdata *lookup, + struct device *parent); +extern int of_platform_default_populate(struct device_node *root, + const struct of_dev_auxdata *lookup, + struct device *parent); +extern void of_platform_depopulate(struct device *parent); + +extern int devm_of_platform_populate(struct device *dev); + +extern void devm_of_platform_depopulate(struct device *dev); +#else +static inline int of_platform_populate(struct device_node *root, + const struct of_device_id *matches, + const struct of_dev_auxdata *lookup, + struct device *parent) +{ + return -ENODEV; +} +static inline int of_platform_default_populate(struct device_node *root, + const struct of_dev_auxdata *lookup, + struct device *parent) +{ + return -ENODEV; +} +static inline void of_platform_depopulate(struct device *parent) { } + +static inline int devm_of_platform_populate(struct device *dev) +{ + return -ENODEV; +} + +static inline void devm_of_platform_depopulate(struct device *dev) { } +#endif + +#if defined(CONFIG_OF_DYNAMIC) && defined(CONFIG_OF_ADDRESS) +extern void of_platform_register_reconfig_notifier(void); +#else +static inline void of_platform_register_reconfig_notifier(void) { } +#endif + +#endif /* _LINUX_OF_PLATFORM_H */ diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h new file mode 100644 index 000000000..67ab8d271 --- /dev/null +++ b/include/linux/of_reserved_mem.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __OF_RESERVED_MEM_H +#define __OF_RESERVED_MEM_H + +#include + +struct of_phandle_args; +struct reserved_mem_ops; + +struct reserved_mem { + const char *name; + unsigned long fdt_node; + unsigned long phandle; + const struct reserved_mem_ops *ops; + phys_addr_t base; + phys_addr_t size; + void *priv; +}; + +struct reserved_mem_ops { + int (*device_init)(struct reserved_mem *rmem, + struct device *dev); + void (*device_release)(struct reserved_mem *rmem, + struct device *dev); +}; + +typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem); + +#define RESERVEDMEM_OF_DECLARE(name, compat, init) \ + _OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn) + +#ifdef CONFIG_OF_RESERVED_MEM + +int of_reserved_mem_device_init_by_idx(struct device *dev, + struct device_node *np, int idx); +void of_reserved_mem_device_release(struct device *dev); + +int early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, + phys_addr_t align, + phys_addr_t start, + phys_addr_t end, + bool nomap, + phys_addr_t *res_base); + +void fdt_init_reserved_mem(void); +void fdt_reserved_mem_save_node(unsigned long node, const char *uname, + phys_addr_t base, phys_addr_t size); +struct reserved_mem *of_reserved_mem_lookup(struct device_node *np); +#else +static inline int of_reserved_mem_device_init_by_idx(struct device *dev, + struct device_node *np, int idx) +{ + return -ENOSYS; +} +static inline void of_reserved_mem_device_release(struct device *pdev) { } + +static inline void fdt_init_reserved_mem(void) { } +static inline void fdt_reserved_mem_save_node(unsigned long node, + const char *uname, phys_addr_t base, phys_addr_t size) { } +static inline struct reserved_mem *of_reserved_mem_lookup(struct device_node *np) +{ + return NULL; +} +#endif + +/** + * of_reserved_mem_device_init() - assign reserved memory region to given device + * @dev: Pointer to the device to configure + * + * This function assigns respective DMA-mapping operations based on the first + * reserved memory region specified by 'memory-region' property in device tree + * node of the given device. + * + * Returns error code or zero on success. + */ +static inline int of_reserved_mem_device_init(struct device *dev) +{ + return of_reserved_mem_device_init_by_idx(dev, dev->of_node, 0); +} + +#endif /* __OF_RESERVED_MEM_H */ diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h new file mode 100644 index 000000000..d2fa9ca42 --- /dev/null +++ b/include/linux/oid_registry.h @@ -0,0 +1,103 @@ +/* ASN.1 Object identifier (OID) registry + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _LINUX_OID_REGISTRY_H +#define _LINUX_OID_REGISTRY_H + +#include + +/* + * OIDs are turned into these values if possible, or OID__NR if not held here. + * + * NOTE! Do not mess with the format of each line as this is read by + * build_OID_registry.pl to generate the data for look_up_OID(). + */ +enum OID { + OID_id_dsa_with_sha1, /* 1.2.840.10030.4.3 */ + OID_id_dsa, /* 1.2.840.10040.4.1 */ + OID_id_ecdsa_with_sha1, /* 1.2.840.10045.4.1 */ + OID_id_ecPublicKey, /* 1.2.840.10045.2.1 */ + + /* PKCS#1 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-1(1)} */ + OID_rsaEncryption, /* 1.2.840.113549.1.1.1 */ + OID_md2WithRSAEncryption, /* 1.2.840.113549.1.1.2 */ + OID_md3WithRSAEncryption, /* 1.2.840.113549.1.1.3 */ + OID_md4WithRSAEncryption, /* 1.2.840.113549.1.1.4 */ + OID_sha1WithRSAEncryption, /* 1.2.840.113549.1.1.5 */ + OID_sha256WithRSAEncryption, /* 1.2.840.113549.1.1.11 */ + OID_sha384WithRSAEncryption, /* 1.2.840.113549.1.1.12 */ + OID_sha512WithRSAEncryption, /* 1.2.840.113549.1.1.13 */ + OID_sha224WithRSAEncryption, /* 1.2.840.113549.1.1.14 */ + /* PKCS#7 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-7(7)} */ + OID_data, /* 1.2.840.113549.1.7.1 */ + OID_signed_data, /* 1.2.840.113549.1.7.2 */ + /* PKCS#9 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-9(9)} */ + OID_email_address, /* 1.2.840.113549.1.9.1 */ + OID_contentType, /* 1.2.840.113549.1.9.3 */ + OID_messageDigest, /* 1.2.840.113549.1.9.4 */ + OID_signingTime, /* 1.2.840.113549.1.9.5 */ + OID_smimeCapabilites, /* 1.2.840.113549.1.9.15 */ + OID_smimeAuthenticatedAttrs, /* 1.2.840.113549.1.9.16.2.11 */ + + /* {iso(1) member-body(2) us(840) rsadsi(113549) digestAlgorithm(2)} */ + OID_md2, /* 1.2.840.113549.2.2 */ + OID_md4, /* 1.2.840.113549.2.4 */ + OID_md5, /* 1.2.840.113549.2.5 */ + + /* Microsoft Authenticode & Software Publishing */ + OID_msIndirectData, /* 1.3.6.1.4.1.311.2.1.4 */ + OID_msStatementType, /* 1.3.6.1.4.1.311.2.1.11 */ + OID_msSpOpusInfo, /* 1.3.6.1.4.1.311.2.1.12 */ + OID_msPeImageDataObjId, /* 1.3.6.1.4.1.311.2.1.15 */ + OID_msIndividualSPKeyPurpose, /* 1.3.6.1.4.1.311.2.1.21 */ + OID_msOutlookExpress, /* 1.3.6.1.4.1.311.16.4 */ + + OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */ + OID_sha1, /* 1.3.14.3.2.26 */ + OID_sha256, /* 2.16.840.1.101.3.4.2.1 */ + OID_sha384, /* 2.16.840.1.101.3.4.2.2 */ + OID_sha512, /* 2.16.840.1.101.3.4.2.3 */ + OID_sha224, /* 2.16.840.1.101.3.4.2.4 */ + + /* Distinguished Name attribute IDs [RFC 2256] */ + OID_commonName, /* 2.5.4.3 */ + OID_surname, /* 2.5.4.4 */ + OID_countryName, /* 2.5.4.6 */ + OID_locality, /* 2.5.4.7 */ + OID_stateOrProvinceName, /* 2.5.4.8 */ + OID_organizationName, /* 2.5.4.10 */ + OID_organizationUnitName, /* 2.5.4.11 */ + OID_title, /* 2.5.4.12 */ + OID_description, /* 2.5.4.13 */ + OID_name, /* 2.5.4.41 */ + OID_givenName, /* 2.5.4.42 */ + OID_initials, /* 2.5.4.43 */ + OID_generationalQualifier, /* 2.5.4.44 */ + + /* Certificate extension IDs */ + OID_subjectKeyIdentifier, /* 2.5.29.14 */ + OID_keyUsage, /* 2.5.29.15 */ + OID_subjectAltName, /* 2.5.29.17 */ + OID_issuerAltName, /* 2.5.29.18 */ + OID_basicConstraints, /* 2.5.29.19 */ + OID_crlDistributionPoints, /* 2.5.29.31 */ + OID_certPolicies, /* 2.5.29.32 */ + OID_authorityKeyIdentifier, /* 2.5.29.35 */ + OID_extKeyUsage, /* 2.5.29.37 */ + + OID__NR +}; + +extern enum OID look_up_OID(const void *data, size_t datasize); +extern int sprint_oid(const void *, size_t, char *, size_t); +extern int sprint_OID(enum OID, char *, size_t); + +#endif /* _LINUX_OID_REGISTRY_H */ diff --git a/include/linux/olpc-ec.h b/include/linux/olpc-ec.h new file mode 100644 index 000000000..79bdc6328 --- /dev/null +++ b/include/linux/olpc-ec.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_OLPC_EC_H +#define _LINUX_OLPC_EC_H + +/* XO-1 EC commands */ +#define EC_FIRMWARE_REV 0x08 +#define EC_WRITE_SCI_MASK 0x1b +#define EC_WAKE_UP_WLAN 0x24 +#define EC_WLAN_LEAVE_RESET 0x25 +#define EC_DCON_POWER_MODE 0x26 +#define EC_READ_EB_MODE 0x2a +#define EC_SET_SCI_INHIBIT 0x32 +#define EC_SET_SCI_INHIBIT_RELEASE 0x34 +#define EC_WLAN_ENTER_RESET 0x35 +#define EC_WRITE_EXT_SCI_MASK 0x38 +#define EC_SCI_QUERY 0x84 +#define EC_EXT_SCI_QUERY 0x85 + +struct platform_device; + +struct olpc_ec_driver { + int (*probe)(struct platform_device *); + int (*suspend)(struct platform_device *); + int (*resume)(struct platform_device *); + + int (*ec_cmd)(u8, u8 *, size_t, u8 *, size_t, void *); +}; + +#ifdef CONFIG_OLPC + +extern void olpc_ec_driver_register(struct olpc_ec_driver *drv, void *arg); + +extern int olpc_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf, + size_t outlen); + +#else + +static inline int olpc_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf, + size_t outlen) { return -ENODEV; } + +#endif /* CONFIG_OLPC */ + +#endif /* _LINUX_OLPC_EC_H */ diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h new file mode 100644 index 000000000..840ce551e --- /dev/null +++ b/include/linux/omap-dma.h @@ -0,0 +1,378 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_OMAP_DMA_H +#define __LINUX_OMAP_DMA_H +#include + +/* + * Legacy OMAP DMA handling defines and functions + * + * NOTE: Do not use these any longer. + * + * Use the generic dmaengine functions as defined in + * include/linux/dmaengine.h. + * + * Copyright (C) 2003 Nokia Corporation + * Author: Juha Yrjölä + * + */ + +#include + +#define INT_DMA_LCD (NR_IRQS_LEGACY + 25) + +#define OMAP1_DMA_TOUT_IRQ (1 << 0) +#define OMAP_DMA_DROP_IRQ (1 << 1) +#define OMAP_DMA_HALF_IRQ (1 << 2) +#define OMAP_DMA_FRAME_IRQ (1 << 3) +#define OMAP_DMA_LAST_IRQ (1 << 4) +#define OMAP_DMA_BLOCK_IRQ (1 << 5) +#define OMAP1_DMA_SYNC_IRQ (1 << 6) +#define OMAP2_DMA_PKT_IRQ (1 << 7) +#define OMAP2_DMA_TRANS_ERR_IRQ (1 << 8) +#define OMAP2_DMA_SECURE_ERR_IRQ (1 << 9) +#define OMAP2_DMA_SUPERVISOR_ERR_IRQ (1 << 10) +#define OMAP2_DMA_MISALIGNED_ERR_IRQ (1 << 11) + +#define OMAP_DMA_CCR_EN (1 << 7) +#define OMAP_DMA_CCR_RD_ACTIVE (1 << 9) +#define OMAP_DMA_CCR_WR_ACTIVE (1 << 10) +#define OMAP_DMA_CCR_SEL_SRC_DST_SYNC (1 << 24) +#define OMAP_DMA_CCR_BUFFERING_DISABLE (1 << 25) + +#define OMAP_DMA_DATA_TYPE_S8 0x00 +#define OMAP_DMA_DATA_TYPE_S16 0x01 +#define OMAP_DMA_DATA_TYPE_S32 0x02 + +#define OMAP_DMA_SYNC_ELEMENT 0x00 +#define OMAP_DMA_SYNC_FRAME 0x01 +#define OMAP_DMA_SYNC_BLOCK 0x02 +#define OMAP_DMA_SYNC_PACKET 0x03 + +#define OMAP_DMA_DST_SYNC_PREFETCH 0x02 +#define OMAP_DMA_SRC_SYNC 0x01 +#define OMAP_DMA_DST_SYNC 0x00 + +#define OMAP_DMA_PORT_EMIFF 0x00 +#define OMAP_DMA_PORT_EMIFS 0x01 +#define OMAP_DMA_PORT_OCP_T1 0x02 +#define OMAP_DMA_PORT_TIPB 0x03 +#define OMAP_DMA_PORT_OCP_T2 0x04 +#define OMAP_DMA_PORT_MPUI 0x05 + +#define OMAP_DMA_AMODE_CONSTANT 0x00 +#define OMAP_DMA_AMODE_POST_INC 0x01 +#define OMAP_DMA_AMODE_SINGLE_IDX 0x02 +#define OMAP_DMA_AMODE_DOUBLE_IDX 0x03 + +#define DMA_DEFAULT_FIFO_DEPTH 0x10 +#define DMA_DEFAULT_ARB_RATE 0x01 +/* Pass THREAD_RESERVE ORed with THREAD_FIFO for tparams */ +#define DMA_THREAD_RESERVE_NORM (0x00 << 12) /* Def */ +#define DMA_THREAD_RESERVE_ONET (0x01 << 12) +#define DMA_THREAD_RESERVE_TWOT (0x02 << 12) +#define DMA_THREAD_RESERVE_THREET (0x03 << 12) +#define DMA_THREAD_FIFO_NONE (0x00 << 14) /* Def */ +#define DMA_THREAD_FIFO_75 (0x01 << 14) +#define DMA_THREAD_FIFO_25 (0x02 << 14) +#define DMA_THREAD_FIFO_50 (0x03 << 14) + +/* DMA4_OCP_SYSCONFIG bits */ +#define DMA_SYSCONFIG_MIDLEMODE_MASK (3 << 12) +#define DMA_SYSCONFIG_CLOCKACTIVITY_MASK (3 << 8) +#define DMA_SYSCONFIG_EMUFREE (1 << 5) +#define DMA_SYSCONFIG_SIDLEMODE_MASK (3 << 3) +#define DMA_SYSCONFIG_SOFTRESET (1 << 2) +#define DMA_SYSCONFIG_AUTOIDLE (1 << 0) + +#define DMA_SYSCONFIG_MIDLEMODE(n) ((n) << 12) +#define DMA_SYSCONFIG_SIDLEMODE(n) ((n) << 3) + +#define DMA_IDLEMODE_SMARTIDLE 0x2 +#define DMA_IDLEMODE_NO_IDLE 0x1 +#define DMA_IDLEMODE_FORCE_IDLE 0x0 + +/* Chaining modes*/ +#ifndef CONFIG_ARCH_OMAP1 +#define OMAP_DMA_STATIC_CHAIN 0x1 +#define OMAP_DMA_DYNAMIC_CHAIN 0x2 +#define OMAP_DMA_CHAIN_ACTIVE 0x1 +#define OMAP_DMA_CHAIN_INACTIVE 0x0 +#endif + +#define DMA_CH_PRIO_HIGH 0x1 +#define DMA_CH_PRIO_LOW 0x0 /* Def */ + +/* Errata handling */ +#define IS_DMA_ERRATA(id) (errata & (id)) +#define SET_DMA_ERRATA(id) (errata |= (id)) + +#define DMA_ERRATA_IFRAME_BUFFERING BIT(0x0) +#define DMA_ERRATA_PARALLEL_CHANNELS BIT(0x1) +#define DMA_ERRATA_i378 BIT(0x2) +#define DMA_ERRATA_i541 BIT(0x3) +#define DMA_ERRATA_i88 BIT(0x4) +#define DMA_ERRATA_3_3 BIT(0x5) +#define DMA_ROMCODE_BUG BIT(0x6) + +/* Attributes for OMAP DMA Contrller */ +#define DMA_LINKED_LCH BIT(0x0) +#define GLOBAL_PRIORITY BIT(0x1) +#define RESERVE_CHANNEL BIT(0x2) +#define IS_CSSA_32 BIT(0x3) +#define IS_CDSA_32 BIT(0x4) +#define IS_RW_PRIORITY BIT(0x5) +#define ENABLE_1510_MODE BIT(0x6) +#define SRC_PORT BIT(0x7) +#define DST_PORT BIT(0x8) +#define SRC_INDEX BIT(0x9) +#define DST_INDEX BIT(0xa) +#define IS_BURST_ONLY4 BIT(0xb) +#define CLEAR_CSR_ON_READ BIT(0xc) +#define IS_WORD_16 BIT(0xd) +#define ENABLE_16XX_MODE BIT(0xe) +#define HS_CHANNELS_RESERVED BIT(0xf) +#define DMA_ENGINE_HANDLE_IRQ BIT(0x10) + +/* Defines for DMA Capabilities */ +#define DMA_HAS_TRANSPARENT_CAPS (0x1 << 18) +#define DMA_HAS_CONSTANT_FILL_CAPS (0x1 << 19) +#define DMA_HAS_DESCRIPTOR_CAPS (0x3 << 20) + +enum omap_reg_offsets { + +GCR, GSCR, GRST1, HW_ID, +PCH2_ID, PCH0_ID, PCH1_ID, PCHG_ID, +PCHD_ID, CAPS_0, CAPS_1, CAPS_2, +CAPS_3, CAPS_4, PCH2_SR, PCH0_SR, +PCH1_SR, PCHD_SR, REVISION, IRQSTATUS_L0, +IRQSTATUS_L1, IRQSTATUS_L2, IRQSTATUS_L3, IRQENABLE_L0, +IRQENABLE_L1, IRQENABLE_L2, IRQENABLE_L3, SYSSTATUS, +OCP_SYSCONFIG, + +/* omap1+ specific */ +CPC, CCR2, LCH_CTRL, + +/* Common registers for all omap's */ +CSDP, CCR, CICR, CSR, +CEN, CFN, CSFI, CSEI, +CSAC, CDAC, CDEI, +CDFI, CLNK_CTRL, + +/* Channel specific registers */ +CSSA, CDSA, COLOR, +CCEN, CCFN, + +/* omap3630 and omap4 specific */ +CDP, CNDP, CCDN, + +}; + +enum omap_dma_burst_mode { + OMAP_DMA_DATA_BURST_DIS = 0, + OMAP_DMA_DATA_BURST_4, + OMAP_DMA_DATA_BURST_8, + OMAP_DMA_DATA_BURST_16, +}; + +enum end_type { + OMAP_DMA_LITTLE_ENDIAN = 0, + OMAP_DMA_BIG_ENDIAN +}; + +enum omap_dma_color_mode { + OMAP_DMA_COLOR_DIS = 0, + OMAP_DMA_CONSTANT_FILL, + OMAP_DMA_TRANSPARENT_COPY +}; + +enum omap_dma_write_mode { + OMAP_DMA_WRITE_NON_POSTED = 0, + OMAP_DMA_WRITE_POSTED, + OMAP_DMA_WRITE_LAST_NON_POSTED +}; + +enum omap_dma_channel_mode { + OMAP_DMA_LCH_2D = 0, + OMAP_DMA_LCH_G, + OMAP_DMA_LCH_P, + OMAP_DMA_LCH_PD +}; + +struct omap_dma_channel_params { + int data_type; /* data type 8,16,32 */ + int elem_count; /* number of elements in a frame */ + int frame_count; /* number of frames in a element */ + + int src_port; /* Only on OMAP1 REVISIT: Is this needed? */ + int src_amode; /* constant, post increment, indexed, + double indexed */ + unsigned long src_start; /* source address : physical */ + int src_ei; /* source element index */ + int src_fi; /* source frame index */ + + int dst_port; /* Only on OMAP1 REVISIT: Is this needed? */ + int dst_amode; /* constant, post increment, indexed, + double indexed */ + unsigned long dst_start; /* source address : physical */ + int dst_ei; /* source element index */ + int dst_fi; /* source frame index */ + + int trigger; /* trigger attached if the channel is + synchronized */ + int sync_mode; /* sycn on element, frame , block or packet */ + int src_or_dst_synch; /* source synch(1) or destination synch(0) */ + + int ie; /* interrupt enabled */ + + unsigned char read_prio;/* read priority */ + unsigned char write_prio;/* write priority */ + +#ifndef CONFIG_ARCH_OMAP1 + enum omap_dma_burst_mode burst_mode; /* Burst mode 4/8/16 words */ +#endif +}; + +struct omap_dma_lch { + int next_lch; + int dev_id; + u16 saved_csr; + u16 enabled_irqs; + const char *dev_name; + void (*callback)(int lch, u16 ch_status, void *data); + void *data; + long flags; + /* required for Dynamic chaining */ + int prev_linked_ch; + int next_linked_ch; + int state; + int chain_id; + int status; +}; + +struct omap_dma_dev_attr { + u32 dev_caps; + u16 lch_count; + u16 chan_count; +}; + +enum { + OMAP_DMA_REG_NONE, + OMAP_DMA_REG_16BIT, + OMAP_DMA_REG_2X16BIT, + OMAP_DMA_REG_32BIT, +}; + +struct omap_dma_reg { + u16 offset; + u8 stride; + u8 type; +}; + +#define SDMA_FILTER_PARAM(hw_req) ((int[]) { (hw_req) }) +struct dma_slave_map; + +/* System DMA platform data structure */ +struct omap_system_dma_plat_info { + const struct omap_dma_reg *reg_map; + unsigned channel_stride; + struct omap_dma_dev_attr *dma_attr; + u32 errata; + void (*show_dma_caps)(void); + void (*clear_lch_regs)(int lch); + void (*clear_dma)(int lch); + void (*dma_write)(u32 val, int reg, int lch); + u32 (*dma_read)(int reg, int lch); + + const struct dma_slave_map *slave_map; + int slavecnt; +}; + +#ifdef CONFIG_ARCH_OMAP2PLUS +#define dma_omap2plus() 1 +#else +#define dma_omap2plus() 0 +#endif +#define dma_omap1() (!dma_omap2plus()) +#define __dma_omap15xx(d) (dma_omap1() && (d)->dev_caps & ENABLE_1510_MODE) +#define __dma_omap16xx(d) (dma_omap1() && (d)->dev_caps & ENABLE_16XX_MODE) +#define dma_omap15xx() __dma_omap15xx(d) +#define dma_omap16xx() __dma_omap16xx(d) + +#if defined(CONFIG_ARCH_OMAP) +extern struct omap_system_dma_plat_info *omap_get_plat_info(void); + +extern void omap_set_dma_priority(int lch, int dst_port, int priority); +extern int omap_request_dma(int dev_id, const char *dev_name, + void (*callback)(int lch, u16 ch_status, void *data), + void *data, int *dma_ch); +extern void omap_enable_dma_irq(int ch, u16 irq_bits); +extern void omap_disable_dma_irq(int ch, u16 irq_bits); +extern void omap_free_dma(int ch); +extern void omap_start_dma(int lch); +extern void omap_stop_dma(int lch); +extern void omap_set_dma_transfer_params(int lch, int data_type, + int elem_count, int frame_count, + int sync_mode, + int dma_trigger, int src_or_dst_synch); +extern void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode); +extern void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode); + +extern void omap_set_dma_src_params(int lch, int src_port, int src_amode, + unsigned long src_start, + int src_ei, int src_fi); +extern void omap_set_dma_src_data_pack(int lch, int enable); +extern void omap_set_dma_src_burst_mode(int lch, + enum omap_dma_burst_mode burst_mode); + +extern void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode, + unsigned long dest_start, + int dst_ei, int dst_fi); +extern void omap_set_dma_dest_data_pack(int lch, int enable); +extern void omap_set_dma_dest_burst_mode(int lch, + enum omap_dma_burst_mode burst_mode); + +extern void omap_set_dma_params(int lch, + struct omap_dma_channel_params *params); + +extern void omap_dma_link_lch(int lch_head, int lch_queue); + +extern int omap_set_dma_callback(int lch, + void (*callback)(int lch, u16 ch_status, void *data), + void *data); +extern dma_addr_t omap_get_dma_src_pos(int lch); +extern dma_addr_t omap_get_dma_dst_pos(int lch); +extern int omap_get_dma_active_status(int lch); +extern int omap_dma_running(void); +extern void omap_dma_set_global_params(int arb_rate, int max_fifo_depth, + int tparams); +void omap_dma_global_context_save(void); +void omap_dma_global_context_restore(void); + +#if defined(CONFIG_ARCH_OMAP1) && IS_ENABLED(CONFIG_FB_OMAP) +#include +#else +static inline int omap_lcd_dma_running(void) +{ + return 0; +} +#endif + +#else /* CONFIG_ARCH_OMAP */ + +static inline struct omap_system_dma_plat_info *omap_get_plat_info(void) +{ + return NULL; +} + +static inline int omap_request_dma(int dev_id, const char *dev_name, + void (*callback)(int lch, u16 ch_status, void *data), + void *data, int *dma_ch) +{ + return -ENODEV; +} + +static inline void omap_free_dma(int ch) { } + +#endif /* CONFIG_ARCH_OMAP */ + +#endif /* __LINUX_OMAP_DMA_H */ diff --git a/include/linux/omap-dmaengine.h b/include/linux/omap-dmaengine.h new file mode 100644 index 000000000..8e6906c72 --- /dev/null +++ b/include/linux/omap-dmaengine.h @@ -0,0 +1,21 @@ +/* + * OMAP DMA Engine support + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __LINUX_OMAP_DMAENGINE_H +#define __LINUX_OMAP_DMAENGINE_H + +struct dma_chan; + +#if defined(CONFIG_DMA_OMAP) || (defined(CONFIG_DMA_OMAP_MODULE) && defined(MODULE)) +bool omap_dma_filter_fn(struct dma_chan *, void *); +#else +static inline bool omap_dma_filter_fn(struct dma_chan *c, void *d) +{ + return false; +} +#endif +#endif /* __LINUX_OMAP_DMAENGINE_H */ diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h new file mode 100644 index 000000000..053feb415 --- /dev/null +++ b/include/linux/omap-gpmc.h @@ -0,0 +1,103 @@ +/* + * OMAP GPMC (General Purpose Memory Controller) defines + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include + +#define GPMC_CONFIG_WP 0x00000005 + +/* IRQ numbers in GPMC IRQ domain for legacy boot use */ +#define GPMC_IRQ_FIFOEVENTENABLE 0 +#define GPMC_IRQ_COUNT_EVENT 1 + +/** + * gpmc_nand_ops - Interface between NAND and GPMC + * @nand_write_buffer_empty: get the NAND write buffer empty status. + */ +struct gpmc_nand_ops { + bool (*nand_writebuffer_empty)(void); +}; + +struct gpmc_nand_regs; + +struct gpmc_onenand_info { + bool sync_read; + bool sync_write; + int burst_len; +}; + +#if IS_ENABLED(CONFIG_OMAP_GPMC) +struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs, + int cs); +/** + * gpmc_omap_onenand_set_timings - set optimized sync timings. + * @cs: Chip Select Region + * @freq: Chip frequency + * @latency: Burst latency cycle count + * @info: Structure describing parameters used + * + * Sets optimized timings for the @cs region based on @freq and @latency. + * Updates the @info structure based on the GPMC settings. + */ +int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq, + int latency, + struct gpmc_onenand_info *info); + +#else +static inline struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs, + int cs) +{ + return NULL; +} + +static inline +int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq, + int latency, + struct gpmc_onenand_info *info) +{ + return -EINVAL; +} +#endif /* CONFIG_OMAP_GPMC */ + +extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t, + struct gpmc_settings *gpmc_s, + struct gpmc_device_timings *dev_t); + +struct device_node; + +extern int gpmc_get_client_irq(unsigned irq_config); + +extern unsigned int gpmc_ticks_to_ns(unsigned int ticks); + +extern void gpmc_cs_write_reg(int cs, int idx, u32 val); +extern int gpmc_calc_divider(unsigned int sync_clk); +extern int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t, + const struct gpmc_settings *s); +extern int gpmc_cs_program_settings(int cs, struct gpmc_settings *p); +extern int gpmc_cs_request(int cs, unsigned long size, unsigned long *base); +extern void gpmc_cs_free(int cs); +extern int gpmc_configure(int cmd, int wval); +extern void gpmc_read_settings_dt(struct device_node *np, + struct gpmc_settings *p); + +extern void omap3_gpmc_save_context(void); +extern void omap3_gpmc_restore_context(void); + +struct gpmc_timings; +struct omap_nand_platform_data; +struct omap_onenand_platform_data; + +#if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2) +extern int gpmc_onenand_init(struct omap_onenand_platform_data *d); +#else +#define board_onenand_data NULL +static inline int gpmc_onenand_init(struct omap_onenand_platform_data *d) +{ + return 0; +} +#endif diff --git a/include/linux/omap-iommu.h b/include/linux/omap-iommu.h new file mode 100644 index 000000000..ce1b7c628 --- /dev/null +++ b/include/linux/omap-iommu.h @@ -0,0 +1,24 @@ +/* + * omap iommu: simple virtual address space management + * + * Copyright (C) 2008-2009 Nokia Corporation + * + * Written by Hiroshi DOYU + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _OMAP_IOMMU_H_ +#define _OMAP_IOMMU_H_ + +#ifdef CONFIG_OMAP_IOMMU +extern void omap_iommu_save_ctx(struct device *dev); +extern void omap_iommu_restore_ctx(struct device *dev); +#else +static inline void omap_iommu_save_ctx(struct device *dev) {} +static inline void omap_iommu_restore_ctx(struct device *dev) {} +#endif + +#endif diff --git a/include/linux/omap-mailbox.h b/include/linux/omap-mailbox.h new file mode 100644 index 000000000..6dbcd2da0 --- /dev/null +++ b/include/linux/omap-mailbox.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * omap-mailbox: interprocessor communication module for OMAP + */ + +#ifndef OMAP_MAILBOX_H +#define OMAP_MAILBOX_H + +typedef u32 mbox_msg_t; + +typedef int __bitwise omap_mbox_irq_t; +#define IRQ_TX ((__force omap_mbox_irq_t) 1) +#define IRQ_RX ((__force omap_mbox_irq_t) 2) + +struct mbox_chan; +struct mbox_client; + +struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl, + const char *chan_name); + +void omap_mbox_enable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq); +void omap_mbox_disable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq); + +#endif /* OMAP_MAILBOX_H */ diff --git a/include/linux/omapfb.h b/include/linux/omapfb.h new file mode 100644 index 000000000..d1f4dccae --- /dev/null +++ b/include/linux/omapfb.h @@ -0,0 +1,42 @@ +/* + * File: include/linux/omapfb.h + * + * Framebuffer driver for TI OMAP boards + * + * Copyright (C) 2004 Nokia Corporation + * Author: Imre Deak + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef __LINUX_OMAPFB_H__ +#define __LINUX_OMAPFB_H__ + +#include + + +struct omap_lcd_config { + char panel_name[16]; + char ctrl_name[16]; + s16 nreset_gpio; + u8 data_lines; +}; + +struct omapfb_platform_data { + struct omap_lcd_config lcd; +}; + +void __init omapfb_set_lcd_config(const struct omap_lcd_config *config); + +#endif /* __OMAPFB_H */ diff --git a/include/linux/once.h b/include/linux/once.h new file mode 100644 index 000000000..ae6f4eb41 --- /dev/null +++ b/include/linux/once.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_ONCE_H +#define _LINUX_ONCE_H + +#include +#include + +bool __do_once_start(bool *done, unsigned long *flags); +void __do_once_done(bool *done, struct static_key_true *once_key, + unsigned long *flags, struct module *mod); + +/* Call a function exactly once. The idea of DO_ONCE() is to perform + * a function call such as initialization of random seeds, etc, only + * once, where DO_ONCE() can live in the fast-path. After @func has + * been called with the passed arguments, the static key will patch + * out the condition into a nop. DO_ONCE() guarantees type safety of + * arguments! + * + * Not that the following is not equivalent ... + * + * DO_ONCE(func, arg); + * DO_ONCE(func, arg); + * + * ... to this version: + * + * void foo(void) + * { + * DO_ONCE(func, arg); + * } + * + * foo(); + * foo(); + * + * In case the one-time invocation could be triggered from multiple + * places, then a common helper function must be defined, so that only + * a single static key will be placed there! + */ +#define DO_ONCE(func, ...) \ + ({ \ + bool ___ret = false; \ + static bool ___done = false; \ + static DEFINE_STATIC_KEY_TRUE(___once_key); \ + if (static_branch_unlikely(&___once_key)) { \ + unsigned long ___flags; \ + ___ret = __do_once_start(&___done, &___flags); \ + if (unlikely(___ret)) { \ + func(__VA_ARGS__); \ + __do_once_done(&___done, &___once_key, \ + &___flags, THIS_MODULE); \ + } \ + } \ + ___ret; \ + }) + +#define get_random_once(buf, nbytes) \ + DO_ONCE(get_random_bytes, (buf), (nbytes)) +#define get_random_once_wait(buf, nbytes) \ + DO_ONCE(get_random_bytes_wait, (buf), (nbytes)) \ + +#endif /* _LINUX_ONCE_H */ diff --git a/include/linux/oom.h b/include/linux/oom.h new file mode 100644 index 000000000..3f649be17 --- /dev/null +++ b/include/linux/oom.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __INCLUDE_LINUX_OOM_H +#define __INCLUDE_LINUX_OOM_H + + +#include +#include +#include +#include +#include /* MMF_* */ +#include /* VM_FAULT* */ + +struct zonelist; +struct notifier_block; +struct mem_cgroup; +struct task_struct; + +/* + * Details of the page allocation that triggered the oom killer that are used to + * determine what should be killed. + */ +struct oom_control { + /* Used to determine cpuset */ + struct zonelist *zonelist; + + /* Used to determine mempolicy */ + nodemask_t *nodemask; + + /* Memory cgroup in which oom is invoked, or NULL for global oom */ + struct mem_cgroup *memcg; + + /* Used to determine cpuset and node locality requirement */ + const gfp_t gfp_mask; + + /* + * order == -1 means the oom kill is required by sysrq, otherwise only + * for display purposes. + */ + const int order; + + /* Used by oom implementation, do not set */ + unsigned long totalpages; + struct task_struct *chosen; + unsigned long chosen_points; +}; + +extern struct mutex oom_lock; +extern struct mutex oom_adj_mutex; + +static inline void set_current_oom_origin(void) +{ + current->signal->oom_flag_origin = true; +} + +static inline void clear_current_oom_origin(void) +{ + current->signal->oom_flag_origin = false; +} + +static inline bool oom_task_origin(const struct task_struct *p) +{ + return p->signal->oom_flag_origin; +} + +static inline bool tsk_is_oom_victim(struct task_struct * tsk) +{ + return tsk->signal->oom_mm; +} + +/* + * Use this helper if tsk->mm != mm and the victim mm needs a special + * handling. This is guaranteed to stay true after once set. + */ +static inline bool mm_is_oom_victim(struct mm_struct *mm) +{ + return test_bit(MMF_OOM_VICTIM, &mm->flags); +} + +/* + * Checks whether a page fault on the given mm is still reliable. + * This is no longer true if the oom reaper started to reap the + * address space which is reflected by MMF_UNSTABLE flag set in + * the mm. At that moment any !shared mapping would lose the content + * and could cause a memory corruption (zero pages instead of the + * original content). + * + * User should call this before establishing a page table entry for + * a !shared mapping and under the proper page table lock. + * + * Return 0 when the PF is safe VM_FAULT_SIGBUS otherwise. + */ +static inline vm_fault_t check_stable_address_space(struct mm_struct *mm) +{ + if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags))) + return VM_FAULT_SIGBUS; + return 0; +} + +bool __oom_reap_task_mm(struct mm_struct *mm); + +extern unsigned long oom_badness(struct task_struct *p, + struct mem_cgroup *memcg, const nodemask_t *nodemask, + unsigned long totalpages); + +extern bool out_of_memory(struct oom_control *oc); + +extern void exit_oom_victim(void); + +extern int register_oom_notifier(struct notifier_block *nb); +extern int unregister_oom_notifier(struct notifier_block *nb); + +extern bool oom_killer_disable(signed long timeout); +extern void oom_killer_enable(void); + +extern struct task_struct *find_lock_task_mm(struct task_struct *p); + +/* sysctls */ +extern int sysctl_oom_dump_tasks; +extern int sysctl_oom_kill_allocating_task; +extern int sysctl_panic_on_oom; +#endif /* _INCLUDE_LINUX_OOM_H */ diff --git a/include/linux/openvswitch.h b/include/linux/openvswitch.h new file mode 100644 index 000000000..379affc63 --- /dev/null +++ b/include/linux/openvswitch.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2007-2011 Nicira Networks. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + */ + +#ifndef _LINUX_OPENVSWITCH_H +#define _LINUX_OPENVSWITCH_H 1 + +#include + +#define OVS_CLONE_ATTR_EXEC 0 /* Specify an u32 value. When nonzero, + * actions in clone will not change flow + * keys. False otherwise. + */ + +#endif /* _LINUX_OPENVSWITCH_H */ diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h new file mode 100644 index 000000000..b2a0f15f1 --- /dev/null +++ b/include/linux/oprofile.h @@ -0,0 +1,209 @@ +/** + * @file oprofile.h + * + * API for machine-specific interrupts to interface + * to oprofile. + * + * @remark Copyright 2002 OProfile authors + * @remark Read the file COPYING + * + * @author John Levon + */ + +#ifndef OPROFILE_H +#define OPROFILE_H + +#include +#include +#include +#include +#include +#include + +/* Each escaped entry is prefixed by ESCAPE_CODE + * then one of the following codes, then the + * relevant data. + * These #defines live in this file so that arch-specific + * buffer sync'ing code can access them. + */ +#define ESCAPE_CODE ~0UL +#define CTX_SWITCH_CODE 1 +#define CPU_SWITCH_CODE 2 +#define COOKIE_SWITCH_CODE 3 +#define KERNEL_ENTER_SWITCH_CODE 4 +#define KERNEL_EXIT_SWITCH_CODE 5 +#define MODULE_LOADED_CODE 6 +#define CTX_TGID_CODE 7 +#define TRACE_BEGIN_CODE 8 +#define TRACE_END_CODE 9 +#define XEN_ENTER_SWITCH_CODE 10 +#define SPU_PROFILING_CODE 11 +#define SPU_CTX_SWITCH_CODE 12 +#define IBS_FETCH_CODE 13 +#define IBS_OP_CODE 14 + +struct dentry; +struct file_operations; +struct pt_regs; + +/* Operations structure to be filled in */ +struct oprofile_operations { + /* create any necessary configuration files in the oprofile fs. + * Optional. */ + int (*create_files)(struct dentry * root); + /* Do any necessary interrupt setup. Optional. */ + int (*setup)(void); + /* Do any necessary interrupt shutdown. Optional. */ + void (*shutdown)(void); + /* Start delivering interrupts. */ + int (*start)(void); + /* Stop delivering interrupts. */ + void (*stop)(void); + /* Arch-specific buffer sync functions. + * Return value = 0: Success + * Return value = -1: Failure + * Return value = 1: Run generic sync function + */ + int (*sync_start)(void); + int (*sync_stop)(void); + + /* Initiate a stack backtrace. Optional. */ + void (*backtrace)(struct pt_regs * const regs, unsigned int depth); + + /* Multiplex between different events. Optional. */ + int (*switch_events)(void); + /* CPU identification string. */ + char * cpu_type; +}; + +/** + * One-time initialisation. *ops must be set to a filled-in + * operations structure. This is called even in timer interrupt + * mode so an arch can set a backtrace callback. + * + * If an error occurs, the fields should be left untouched. + */ +int oprofile_arch_init(struct oprofile_operations * ops); + +/** + * One-time exit/cleanup for the arch. + */ +void oprofile_arch_exit(void); + +/** + * Add a sample. This may be called from any context. + */ +void oprofile_add_sample(struct pt_regs * const regs, unsigned long event); + +/** + * Add an extended sample. Use this when the PC is not from the regs, and + * we cannot determine if we're in kernel mode from the regs. + * + * This function does perform a backtrace. + * + */ +void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, + unsigned long event, int is_kernel); + +/** + * Add an hardware sample. + */ +void oprofile_add_ext_hw_sample(unsigned long pc, struct pt_regs * const regs, + unsigned long event, int is_kernel, + struct task_struct *task); + +/* Use this instead when the PC value is not from the regs. Doesn't + * backtrace. */ +void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event); + +/* add a backtrace entry, to be called from the ->backtrace callback */ +void oprofile_add_trace(unsigned long eip); + + +/** + * Create a file of the given name as a child of the given root, with + * the specified file operations. + */ +int oprofilefs_create_file(struct dentry * root, + char const * name, const struct file_operations * fops); + +int oprofilefs_create_file_perm(struct dentry * root, + char const * name, const struct file_operations * fops, int perm); + +/** Create a file for read/write access to an unsigned long. */ +int oprofilefs_create_ulong(struct dentry * root, + char const * name, ulong * val); + +/** Create a file for read-only access to an unsigned long. */ +int oprofilefs_create_ro_ulong(struct dentry * root, + char const * name, ulong * val); + +/** Create a file for read-only access to an atomic_t. */ +int oprofilefs_create_ro_atomic(struct dentry * root, + char const * name, atomic_t * val); + +/** create a directory */ +struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name); + +/** + * Write the given asciz string to the given user buffer @buf, updating *offset + * appropriately. Returns bytes written or -EFAULT. + */ +ssize_t oprofilefs_str_to_user(char const * str, char __user * buf, size_t count, loff_t * offset); + +/** + * Convert an unsigned long value into ASCII and copy it to the user buffer @buf, + * updating *offset appropriately. Returns bytes written or -EFAULT. + */ +ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t count, loff_t * offset); + +/** + * Read an ASCII string for a number from a userspace buffer and fill *val on success. + * Returns 0 on success, < 0 on error. + */ +int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count); + +/** lock for read/write safety */ +extern raw_spinlock_t oprofilefs_lock; + +/** + * Add the contents of a circular buffer to the event buffer. + */ +void oprofile_put_buff(unsigned long *buf, unsigned int start, + unsigned int stop, unsigned int max); + +unsigned long oprofile_get_cpu_buffer_size(void); +void oprofile_cpu_buffer_inc_smpl_lost(void); + +/* cpu buffer functions */ + +struct op_sample; + +struct op_entry { + struct ring_buffer_event *event; + struct op_sample *sample; + unsigned long size; + unsigned long *data; +}; + +void oprofile_write_reserve(struct op_entry *entry, + struct pt_regs * const regs, + unsigned long pc, int code, int size); +int oprofile_add_data(struct op_entry *entry, unsigned long val); +int oprofile_add_data64(struct op_entry *entry, u64 val); +int oprofile_write_commit(struct op_entry *entry); + +#ifdef CONFIG_HW_PERF_EVENTS +int __init oprofile_perf_init(struct oprofile_operations *ops); +void oprofile_perf_exit(void); +char *op_name_from_perf_id(void); +#else +static inline int __init oprofile_perf_init(struct oprofile_operations *ops) +{ + pr_info("oprofile: hardware counters not available\n"); + return -ENODEV; +} +static inline void oprofile_perf_exit(void) { } +#endif /* CONFIG_HW_PERF_EVENTS */ + +#endif /* OPROFILE_H */ diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h new file mode 100644 index 000000000..5581dbd3b --- /dev/null +++ b/include/linux/osq_lock.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_OSQ_LOCK_H +#define __LINUX_OSQ_LOCK_H + +/* + * An MCS like lock especially tailored for optimistic spinning for sleeping + * lock implementations (mutex, rwsem, etc). + */ +struct optimistic_spin_node { + struct optimistic_spin_node *next, *prev; + int locked; /* 1 if lock acquired */ + int cpu; /* encoded CPU # + 1 value */ +}; + +struct optimistic_spin_queue { + /* + * Stores an encoded value of the CPU # of the tail node in the queue. + * If the queue is empty, then it's set to OSQ_UNLOCKED_VAL. + */ + atomic_t tail; +}; + +#define OSQ_UNLOCKED_VAL (0) + +/* Init macro and function. */ +#define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) } + +static inline void osq_lock_init(struct optimistic_spin_queue *lock) +{ + atomic_set(&lock->tail, OSQ_UNLOCKED_VAL); +} + +extern bool osq_lock(struct optimistic_spin_queue *lock); +extern void osq_unlock(struct optimistic_spin_queue *lock); + +static inline bool osq_is_locked(struct optimistic_spin_queue *lock) +{ + return atomic_read(&lock->tail) != OSQ_UNLOCKED_VAL; +} + +#endif diff --git a/include/linux/overflow.h b/include/linux/overflow.h new file mode 100644 index 000000000..4564a175e --- /dev/null +++ b/include/linux/overflow.h @@ -0,0 +1,316 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +#ifndef __LINUX_OVERFLOW_H +#define __LINUX_OVERFLOW_H + +#include +#include + +/* + * In the fallback code below, we need to compute the minimum and + * maximum values representable in a given type. These macros may also + * be useful elsewhere, so we provide them outside the + * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block. + * + * It would seem more obvious to do something like + * + * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0) + * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0) + * + * Unfortunately, the middle expressions, strictly speaking, have + * undefined behaviour, and at least some versions of gcc warn about + * the type_max expression (but not if -fsanitize=undefined is in + * effect; in that case, the warning is deferred to runtime...). + * + * The slightly excessive casting in type_min is to make sure the + * macros also produce sensible values for the exotic type _Bool. [The + * overflow checkers only almost work for _Bool, but that's + * a-feature-not-a-bug, since people shouldn't be doing arithmetic on + * _Bools. Besides, the gcc builtins don't allow _Bool* as third + * argument.] + * + * Idea stolen from + * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html - + * credit to Christian Biere. + */ +#define is_signed_type(type) (((type)(-1)) < (type)1) +#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type))) +#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T))) +#define type_min(T) ((T)((T)-type_max(T)-(T)1)) + +/* + * Avoids triggering -Wtype-limits compilation warning, + * while using unsigned data types to check a < 0. + */ +#define is_non_negative(a) ((a) > 0 || (a) == 0) +#define is_negative(a) (!(is_non_negative(a))) + +#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW +/* + * For simplicity and code hygiene, the fallback code below insists on + * a, b and *d having the same type (similar to the min() and max() + * macros), whereas gcc's type-generic overflow checkers accept + * different types. Hence we don't just make check_add_overflow an + * alias for __builtin_add_overflow, but add type checks similar to + * below. + */ +#define check_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_add_overflow(__a, __b, __d); \ +}) + +#define check_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_sub_overflow(__a, __b, __d); \ +}) + +#define check_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_mul_overflow(__a, __b, __d); \ +}) + +#else + + +/* Checking for unsigned overflow is relatively easy without causing UB. */ +#define __unsigned_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a + __b; \ + *__d < __a; \ +}) +#define __unsigned_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a - __b; \ + __a < __b; \ +}) +/* + * If one of a or b is a compile-time constant, this avoids a division. + */ +#define __unsigned_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a * __b; \ + __builtin_constant_p(__b) ? \ + __b > 0 && __a > type_max(typeof(__a)) / __b : \ + __a > 0 && __b > type_max(typeof(__b)) / __a; \ +}) + +/* + * For signed types, detecting overflow is much harder, especially if + * we want to avoid UB. But the interface of these macros is such that + * we must provide a result in *d, and in fact we must produce the + * result promised by gcc's builtins, which is simply the possibly + * wrapped-around value. Fortunately, we can just formally do the + * operations in the widest relevant unsigned type (u64) and then + * truncate the result - gcc is smart enough to generate the same code + * with and without the (u64) casts. + */ + +/* + * Adding two signed integers can overflow only if they have the same + * sign, and overflow has happened iff the result has the opposite + * sign. + */ +#define __signed_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a + (u64)__b; \ + (((~(__a ^ __b)) & (*__d ^ __a)) \ + & type_min(typeof(__a))) != 0; \ +}) + +/* + * Subtraction is similar, except that overflow can now happen only + * when the signs are opposite. In this case, overflow has happened if + * the result has the opposite sign of a. + */ +#define __signed_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a - (u64)__b; \ + ((((__a ^ __b)) & (*__d ^ __a)) \ + & type_min(typeof(__a))) != 0; \ +}) + +/* + * Signed multiplication is rather hard. gcc always follows C99, so + * division is truncated towards 0. This means that we can write the + * overflow check like this: + * + * (a > 0 && (b > MAX/a || b < MIN/a)) || + * (a < -1 && (b > MIN/a || b < MAX/a) || + * (a == -1 && b == MIN) + * + * The redundant casts of -1 are to silence an annoying -Wtype-limits + * (included in -Wextra) warning: When the type is u8 or u16, the + * __b_c_e in check_mul_overflow obviously selects + * __unsigned_mul_overflow, but unfortunately gcc still parses this + * code and warns about the limited range of __b. + */ + +#define __signed_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + typeof(a) __tmax = type_max(typeof(a)); \ + typeof(a) __tmin = type_min(typeof(a)); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a * (u64)__b; \ + (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \ + (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \ + (__b == (typeof(__b))-1 && __a == __tmin); \ +}) + + +#define check_add_overflow(a, b, d) \ + __builtin_choose_expr(is_signed_type(typeof(a)), \ + __signed_add_overflow(a, b, d), \ + __unsigned_add_overflow(a, b, d)) + +#define check_sub_overflow(a, b, d) \ + __builtin_choose_expr(is_signed_type(typeof(a)), \ + __signed_sub_overflow(a, b, d), \ + __unsigned_sub_overflow(a, b, d)) + +#define check_mul_overflow(a, b, d) \ + __builtin_choose_expr(is_signed_type(typeof(a)), \ + __signed_mul_overflow(a, b, d), \ + __unsigned_mul_overflow(a, b, d)) + + +#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */ + +/** check_shl_overflow() - Calculate a left-shifted value and check overflow + * + * @a: Value to be shifted + * @s: How many bits left to shift + * @d: Pointer to where to store the result + * + * Computes *@d = (@a << @s) + * + * Returns true if '*d' cannot hold the result or when 'a << s' doesn't + * make sense. Example conditions: + * - 'a << s' causes bits to be lost when stored in *d. + * - 's' is garbage (e.g. negative) or so large that the result of + * 'a << s' is guaranteed to be 0. + * - 'a' is negative. + * - 'a << s' sets the sign bit, if any, in '*d'. + * + * '*d' will hold the results of the attempted shift, but is not + * considered "safe for use" if false is returned. + */ +#define check_shl_overflow(a, s, d) ({ \ + typeof(a) _a = a; \ + typeof(s) _s = s; \ + typeof(d) _d = d; \ + u64 _a_full = _a; \ + unsigned int _to_shift = \ + is_non_negative(_s) && _s < 8 * sizeof(*d) ? _s : 0; \ + *_d = (_a_full << _to_shift); \ + (_to_shift != _s || is_negative(*_d) || is_negative(_a) || \ + (*_d >> _to_shift) != _a); \ +}) + +/** + * array_size() - Calculate size of 2-dimensional array. + * + * @a: dimension one + * @b: dimension two + * + * Calculates size of 2-dimensional array: @a * @b. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. + */ +static inline __must_check size_t array_size(size_t a, size_t b) +{ + size_t bytes; + + if (check_mul_overflow(a, b, &bytes)) + return SIZE_MAX; + + return bytes; +} + +/** + * array3_size() - Calculate size of 3-dimensional array. + * + * @a: dimension one + * @b: dimension two + * @c: dimension three + * + * Calculates size of 3-dimensional array: @a * @b * @c. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. + */ +static inline __must_check size_t array3_size(size_t a, size_t b, size_t c) +{ + size_t bytes; + + if (check_mul_overflow(a, b, &bytes)) + return SIZE_MAX; + if (check_mul_overflow(bytes, c, &bytes)) + return SIZE_MAX; + + return bytes; +} + +static inline __must_check size_t __ab_c_size(size_t n, size_t size, size_t c) +{ + size_t bytes; + + if (check_mul_overflow(n, size, &bytes)) + return SIZE_MAX; + if (check_add_overflow(bytes, c, &bytes)) + return SIZE_MAX; + + return bytes; +} + +/** + * struct_size() - Calculate size of structure with trailing array. + * @p: Pointer to the structure. + * @member: Name of the array member. + * @n: Number of elements in the array. + * + * Calculates size of memory needed for structure @p followed by an + * array of @n @member elements. + * + * Return: number of bytes needed or SIZE_MAX on overflow. + */ +#define struct_size(p, member, n) \ + __ab_c_size(n, \ + sizeof(*(p)->member) + __must_be_array((p)->member),\ + sizeof(*(p))) + +#endif /* __LINUX_OVERFLOW_H */ diff --git a/include/linux/oxu210hp.h b/include/linux/oxu210hp.h new file mode 100644 index 000000000..94cd25165 --- /dev/null +++ b/include/linux/oxu210hp.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* platform data for the OXU210HP HCD */ + +struct oxu210hp_platform_data { + unsigned int bus16:1; + unsigned int use_hcd_otg:1; + unsigned int use_hcd_sph:1; +}; diff --git a/include/linux/padata.h b/include/linux/padata.h new file mode 100644 index 000000000..8c9827cc6 --- /dev/null +++ b/include/linux/padata.h @@ -0,0 +1,182 @@ +/* + * padata.h - header for the padata parallelization interface + * + * Copyright (C) 2008, 2009 secunet Security Networks AG + * Copyright (C) 2008, 2009 Steffen Klassert + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef PADATA_H +#define PADATA_H + +#include +#include +#include +#include +#include + +#define PADATA_CPU_SERIAL 0x01 +#define PADATA_CPU_PARALLEL 0x02 + +/** + * struct padata_priv - Embedded to the users data structure. + * + * @list: List entry, to attach to the padata lists. + * @pd: Pointer to the internal control structure. + * @cb_cpu: Callback cpu for serializatioon. + * @cpu: Cpu for parallelization. + * @seq_nr: Sequence number of the parallelized data object. + * @info: Used to pass information from the parallel to the serial function. + * @parallel: Parallel execution function. + * @serial: Serial complete function. + */ +struct padata_priv { + struct list_head list; + struct parallel_data *pd; + int cb_cpu; + int cpu; + int info; + void (*parallel)(struct padata_priv *padata); + void (*serial)(struct padata_priv *padata); +}; + +/** + * struct padata_list + * + * @list: List head. + * @lock: List lock. + */ +struct padata_list { + struct list_head list; + spinlock_t lock; +}; + +/** +* struct padata_serial_queue - The percpu padata serial queue +* +* @serial: List to wait for serialization after reordering. +* @work: work struct for serialization. +* @pd: Backpointer to the internal control structure. +*/ +struct padata_serial_queue { + struct padata_list serial; + struct work_struct work; + struct parallel_data *pd; +}; + +/** + * struct padata_parallel_queue - The percpu padata parallel queue + * + * @parallel: List to wait for parallelization. + * @reorder: List to wait for reordering after parallel processing. + * @serial: List to wait for serialization after reordering. + * @pwork: work struct for parallelization. + * @swork: work struct for serialization. + * @work: work struct for parallelization. + * @num_obj: Number of objects that are processed by this cpu. + * @cpu_index: Index of the cpu. + */ +struct padata_parallel_queue { + struct padata_list parallel; + struct padata_list reorder; + struct work_struct work; + atomic_t num_obj; + int cpu_index; +}; + +/** + * struct padata_cpumask - The cpumasks for the parallel/serial workers + * + * @pcpu: cpumask for the parallel workers. + * @cbcpu: cpumask for the serial (callback) workers. + */ +struct padata_cpumask { + cpumask_var_t pcpu; + cpumask_var_t cbcpu; +}; + +/** + * struct parallel_data - Internal control structure, covers everything + * that depends on the cpumask in use. + * + * @pinst: padata instance. + * @pqueue: percpu padata queues used for parallelization. + * @squeue: percpu padata queues used for serialuzation. + * @reorder_objects: Number of objects waiting in the reorder queues. + * @refcnt: Number of objects holding a reference on this parallel_data. + * @max_seq_nr: Maximal used sequence number. + * @cpu: Next CPU to be processed. + * @cpumask: The cpumasks in use for parallel and serial workers. + * @reorder_work: work struct for reordering. + * @lock: Reorder lock. + */ +struct parallel_data { + struct padata_instance *pinst; + struct padata_parallel_queue __percpu *pqueue; + struct padata_serial_queue __percpu *squeue; + atomic_t reorder_objects; + atomic_t refcnt; + atomic_t seq_nr; + int cpu; + struct padata_cpumask cpumask; + struct work_struct reorder_work; + spinlock_t lock ____cacheline_aligned; +}; + +/** + * struct padata_instance - The overall control structure. + * + * @cpu_online_node: Linkage for CPU online callback. + * @cpu_dead_node: Linkage for CPU offline callback. + * @wq: The workqueue in use. + * @pd: The internal control structure. + * @cpumask: User supplied cpumasks for parallel and serial works. + * @cpumask_change_notifier: Notifiers chain for user-defined notify + * callbacks that will be called when either @pcpu or @cbcpu + * or both cpumasks change. + * @kobj: padata instance kernel object. + * @lock: padata instance lock. + * @flags: padata flags. + */ +struct padata_instance { + struct hlist_node cpu_online_node; + struct hlist_node cpu_dead_node; + struct workqueue_struct *wq; + struct parallel_data *pd; + struct padata_cpumask cpumask; + struct blocking_notifier_head cpumask_change_notifier; + struct kobject kobj; + struct mutex lock; + u8 flags; +#define PADATA_INIT 1 +#define PADATA_RESET 2 +#define PADATA_INVALID 4 +}; + +extern struct padata_instance *padata_alloc_possible( + struct workqueue_struct *wq); +extern void padata_free(struct padata_instance *pinst); +extern int padata_do_parallel(struct padata_instance *pinst, + struct padata_priv *padata, int cb_cpu); +extern void padata_do_serial(struct padata_priv *padata); +extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, + cpumask_var_t cpumask); +extern int padata_start(struct padata_instance *pinst); +extern void padata_stop(struct padata_instance *pinst); +extern int padata_register_cpumask_notifier(struct padata_instance *pinst, + struct notifier_block *nblock); +extern int padata_unregister_cpumask_notifier(struct padata_instance *pinst, + struct notifier_block *nblock); +#endif diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h new file mode 100644 index 000000000..7ec86bf31 --- /dev/null +++ b/include/linux/page-flags-layout.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef PAGE_FLAGS_LAYOUT_H +#define PAGE_FLAGS_LAYOUT_H + +#include +#include + +/* + * When a memory allocation must conform to specific limitations (such + * as being suitable for DMA) the caller will pass in hints to the + * allocator in the gfp_mask, in the zone modifier bits. These bits + * are used to select a priority ordered list of memory zones which + * match the requested limits. See gfp_zone() in include/linux/gfp.h + */ +#if MAX_NR_ZONES < 2 +#define ZONES_SHIFT 0 +#elif MAX_NR_ZONES <= 2 +#define ZONES_SHIFT 1 +#elif MAX_NR_ZONES <= 4 +#define ZONES_SHIFT 2 +#elif MAX_NR_ZONES <= 8 +#define ZONES_SHIFT 3 +#else +#error ZONES_SHIFT -- too many zones configured adjust calculation +#endif + +#ifdef CONFIG_SPARSEMEM +#include + +/* SECTION_SHIFT #bits space required to store a section # */ +#define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS) + +#endif /* CONFIG_SPARSEMEM */ + +/* + * page->flags layout: + * + * There are five possibilities for how page->flags get laid out. The first + * pair is for the normal case without sparsemem. The second pair is for + * sparsemem when there is plenty of space for node and section information. + * The last is when there is insufficient space in page->flags and a separate + * lookup is necessary. + * + * No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS | + * " plus space for last_cpupid: | NODE | ZONE | LAST_CPUPID ... | FLAGS | + * classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS | + * " plus space for last_cpupid: | SECTION | NODE | ZONE | LAST_CPUPID ... | FLAGS | + * classic sparse no space for node: | SECTION | ZONE | ... | FLAGS | + */ +#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) +#define SECTIONS_WIDTH SECTIONS_SHIFT +#else +#define SECTIONS_WIDTH 0 +#endif + +#define ZONES_WIDTH ZONES_SHIFT + +#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS +#define NODES_WIDTH NODES_SHIFT +#else +#ifdef CONFIG_SPARSEMEM_VMEMMAP +#error "Vmemmap: No space for nodes field in page flags" +#endif +#define NODES_WIDTH 0 +#endif + +#ifdef CONFIG_NUMA_BALANCING +#define LAST__PID_SHIFT 8 +#define LAST__PID_MASK ((1 << LAST__PID_SHIFT)-1) + +#define LAST__CPU_SHIFT NR_CPUS_BITS +#define LAST__CPU_MASK ((1 << LAST__CPU_SHIFT)-1) + +#define LAST_CPUPID_SHIFT (LAST__PID_SHIFT+LAST__CPU_SHIFT) +#else +#define LAST_CPUPID_SHIFT 0 +#endif + +#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS +#define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT +#else +#define LAST_CPUPID_WIDTH 0 +#endif + +/* + * We are going to use the flags for the page to node mapping if its in + * there. This includes the case where there is no node, so it is implicit. + */ +#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0) +#define NODE_NOT_IN_PAGE_FLAGS +#endif + +#if defined(CONFIG_NUMA_BALANCING) && LAST_CPUPID_WIDTH == 0 +#define LAST_CPUPID_NOT_IN_PAGE_FLAGS +#endif + +#endif /* _LINUX_PAGE_FLAGS_LAYOUT */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h new file mode 100644 index 000000000..f6e943366 --- /dev/null +++ b/include/linux/page-flags.h @@ -0,0 +1,804 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Macros for manipulating and testing page->flags + */ + +#ifndef PAGE_FLAGS_H +#define PAGE_FLAGS_H + +#include +#include +#include +#ifndef __GENERATING_BOUNDS_H +#include +#include +#endif /* !__GENERATING_BOUNDS_H */ + +/* + * Various page->flags bits: + * + * PG_reserved is set for special pages, which can never be swapped out. Some + * of them might not even exist... + * + * The PG_private bitflag is set on pagecache pages if they contain filesystem + * specific data (which is normally at page->private). It can be used by + * private allocations for its own usage. + * + * During initiation of disk I/O, PG_locked is set. This bit is set before I/O + * and cleared when writeback _starts_ or when read _completes_. PG_writeback + * is set before writeback starts and cleared when it finishes. + * + * PG_locked also pins a page in pagecache, and blocks truncation of the file + * while it is held. + * + * page_waitqueue(page) is a wait queue of all tasks waiting for the page + * to become unlocked. + * + * PG_uptodate tells whether the page's contents is valid. When a read + * completes, the page becomes uptodate, unless a disk I/O error happened. + * + * PG_referenced, PG_reclaim are used for page reclaim for anonymous and + * file-backed pagecache (see mm/vmscan.c). + * + * PG_error is set to indicate that an I/O error occurred on this page. + * + * PG_arch_1 is an architecture specific page state bit. The generic code + * guarantees that this bit is cleared for a page when it first is entered into + * the page cache. + * + * PG_hwpoison indicates that a page got corrupted in hardware and contains + * data with incorrect ECC bits that triggered a machine check. Accessing is + * not safe since it may cause another machine check. Don't touch! + */ + +/* + * Don't use the *_dontuse flags. Use the macros. Otherwise you'll break + * locked- and dirty-page accounting. + * + * The page flags field is split into two parts, the main flags area + * which extends from the low bits upwards, and the fields area which + * extends from the high bits downwards. + * + * | FIELD | ... | FLAGS | + * N-1 ^ 0 + * (NR_PAGEFLAGS) + * + * The fields area is reserved for fields mapping zone, node (for NUMA) and + * SPARSEMEM section (for variants of SPARSEMEM that require section ids like + * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP). + */ +enum pageflags { + PG_locked, /* Page is locked. Don't touch. */ + PG_error, + PG_referenced, + PG_uptodate, + PG_dirty, + PG_lru, + PG_active, + PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */ + PG_slab, + PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/ + PG_arch_1, + PG_reserved, + PG_private, /* If pagecache, has fs-private data */ + PG_private_2, /* If pagecache, has fs aux data */ + PG_writeback, /* Page is under writeback */ + PG_head, /* A head page */ + PG_mappedtodisk, /* Has blocks allocated on-disk */ + PG_reclaim, /* To be reclaimed asap */ + PG_swapbacked, /* Page is backed by RAM/swap */ + PG_unevictable, /* Page is "unevictable" */ +#ifdef CONFIG_MMU + PG_mlocked, /* Page is vma mlocked */ +#endif +#ifdef CONFIG_ARCH_USES_PG_UNCACHED + PG_uncached, /* Page has been mapped as uncached */ +#endif +#ifdef CONFIG_MEMORY_FAILURE + PG_hwpoison, /* hardware poisoned page. Don't touch */ +#endif +#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT) + PG_young, + PG_idle, +#endif + __NR_PAGEFLAGS, + + /* Filesystems */ + PG_checked = PG_owner_priv_1, + + /* SwapBacked */ + PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */ + + /* Two page bits are conscripted by FS-Cache to maintain local caching + * state. These bits are set on pages belonging to the netfs's inodes + * when those inodes are being locally cached. + */ + PG_fscache = PG_private_2, /* page backed by cache */ + + /* XEN */ + /* Pinned in Xen as a read-only pagetable page. */ + PG_pinned = PG_owner_priv_1, + /* Pinned as part of domain save (see xen_mm_pin_all()). */ + PG_savepinned = PG_dirty, + /* Has a grant mapping of another (foreign) domain's page. */ + PG_foreign = PG_owner_priv_1, + + /* SLOB */ + PG_slob_free = PG_private, + + /* Compound pages. Stored in first tail page's flags */ + PG_double_map = PG_private_2, + + /* non-lru isolated movable page */ + PG_isolated = PG_reclaim, +}; + +#ifndef __GENERATING_BOUNDS_H + +struct page; /* forward declaration */ + +static inline struct page *compound_head(struct page *page) +{ + unsigned long head = READ_ONCE(page->compound_head); + + if (unlikely(head & 1)) + return (struct page *) (head - 1); + return page; +} + +static __always_inline int PageTail(struct page *page) +{ + return READ_ONCE(page->compound_head) & 1; +} + +static __always_inline int PageCompound(struct page *page) +{ + return test_bit(PG_head, &page->flags) || PageTail(page); +} + +#define PAGE_POISON_PATTERN -1l +static inline int PagePoisoned(const struct page *page) +{ + return page->flags == PAGE_POISON_PATTERN; +} + +/* + * Page flags policies wrt compound pages + * + * PF_POISONED_CHECK + * check if this struct page poisoned/uninitialized + * + * PF_ANY: + * the page flag is relevant for small, head and tail pages. + * + * PF_HEAD: + * for compound page all operations related to the page flag applied to + * head page. + * + * PF_ONLY_HEAD: + * for compound page, callers only ever operate on the head page. + * + * PF_NO_TAIL: + * modifications of the page flag must be done on small or head pages, + * checks can be done on tail pages too. + * + * PF_NO_COMPOUND: + * the page flag is not relevant for compound pages. + */ +#define PF_POISONED_CHECK(page) ({ \ + VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \ + page; }) +#define PF_ANY(page, enforce) PF_POISONED_CHECK(page) +#define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page)) +#define PF_ONLY_HEAD(page, enforce) ({ \ + VM_BUG_ON_PGFLAGS(PageTail(page), page); \ + PF_POISONED_CHECK(page); }) +#define PF_NO_TAIL(page, enforce) ({ \ + VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \ + PF_POISONED_CHECK(compound_head(page)); }) +#define PF_NO_COMPOUND(page, enforce) ({ \ + VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \ + PF_POISONED_CHECK(page); }) + +/* + * Macros to create function definitions for page flags + */ +#define TESTPAGEFLAG(uname, lname, policy) \ +static __always_inline int Page##uname(struct page *page) \ + { return test_bit(PG_##lname, &policy(page, 0)->flags); } + +#define SETPAGEFLAG(uname, lname, policy) \ +static __always_inline void SetPage##uname(struct page *page) \ + { set_bit(PG_##lname, &policy(page, 1)->flags); } + +#define CLEARPAGEFLAG(uname, lname, policy) \ +static __always_inline void ClearPage##uname(struct page *page) \ + { clear_bit(PG_##lname, &policy(page, 1)->flags); } + +#define __SETPAGEFLAG(uname, lname, policy) \ +static __always_inline void __SetPage##uname(struct page *page) \ + { __set_bit(PG_##lname, &policy(page, 1)->flags); } + +#define __CLEARPAGEFLAG(uname, lname, policy) \ +static __always_inline void __ClearPage##uname(struct page *page) \ + { __clear_bit(PG_##lname, &policy(page, 1)->flags); } + +#define TESTSETFLAG(uname, lname, policy) \ +static __always_inline int TestSetPage##uname(struct page *page) \ + { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); } + +#define TESTCLEARFLAG(uname, lname, policy) \ +static __always_inline int TestClearPage##uname(struct page *page) \ + { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); } + +#define PAGEFLAG(uname, lname, policy) \ + TESTPAGEFLAG(uname, lname, policy) \ + SETPAGEFLAG(uname, lname, policy) \ + CLEARPAGEFLAG(uname, lname, policy) + +#define __PAGEFLAG(uname, lname, policy) \ + TESTPAGEFLAG(uname, lname, policy) \ + __SETPAGEFLAG(uname, lname, policy) \ + __CLEARPAGEFLAG(uname, lname, policy) + +#define TESTSCFLAG(uname, lname, policy) \ + TESTSETFLAG(uname, lname, policy) \ + TESTCLEARFLAG(uname, lname, policy) + +#define TESTPAGEFLAG_FALSE(uname) \ +static inline int Page##uname(const struct page *page) { return 0; } + +#define SETPAGEFLAG_NOOP(uname) \ +static inline void SetPage##uname(struct page *page) { } + +#define CLEARPAGEFLAG_NOOP(uname) \ +static inline void ClearPage##uname(struct page *page) { } + +#define __CLEARPAGEFLAG_NOOP(uname) \ +static inline void __ClearPage##uname(struct page *page) { } + +#define TESTSETFLAG_FALSE(uname) \ +static inline int TestSetPage##uname(struct page *page) { return 0; } + +#define TESTCLEARFLAG_FALSE(uname) \ +static inline int TestClearPage##uname(struct page *page) { return 0; } + +#define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname) \ + SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname) + +#define TESTSCFLAG_FALSE(uname) \ + TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname) + +__PAGEFLAG(Locked, locked, PF_NO_TAIL) +PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) +PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL) +PAGEFLAG(Referenced, referenced, PF_HEAD) + TESTCLEARFLAG(Referenced, referenced, PF_HEAD) + __SETPAGEFLAG(Referenced, referenced, PF_HEAD) +PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD) + __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD) +PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD) +PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD) + TESTCLEARFLAG(Active, active, PF_HEAD) +__PAGEFLAG(Slab, slab, PF_NO_TAIL) +__PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL) +PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */ + +/* Xen */ +PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND) + TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND) +PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND); +PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND); + +PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) + __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) +PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) + __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) + __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) + +/* + * Private page markings that may be used by the filesystem that owns the page + * for its own purposes. + * - PG_private and PG_private_2 cause releasepage() and co to be invoked + */ +PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY) + __CLEARPAGEFLAG(Private, private, PF_ANY) +PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY) +PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY) + TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY) + +/* + * Only test-and-set exist for PG_writeback. The unconditional operators are + * risky: they bypass page accounting. + */ +TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL) + TESTSCFLAG(Writeback, writeback, PF_NO_TAIL) +PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL) + +/* PG_readahead is only used for reads; PG_reclaim is only for writes */ +PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL) + TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL) +PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND) + TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND) + +#ifdef CONFIG_HIGHMEM +/* + * Must use a macro here due to header dependency issues. page_zone() is not + * available at this point. + */ +#define PageHighMem(__p) is_highmem_idx(page_zonenum(__p)) +#else +PAGEFLAG_FALSE(HighMem) +#endif + +#ifdef CONFIG_SWAP +static __always_inline int PageSwapCache(struct page *page) +{ +#ifdef CONFIG_THP_SWAP + page = compound_head(page); +#endif + return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags); + +} +SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL) +CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL) +#else +PAGEFLAG_FALSE(SwapCache) +#endif + +PAGEFLAG(Unevictable, unevictable, PF_HEAD) + __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD) + TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD) + +#ifdef CONFIG_MMU +PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL) + __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL) + TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL) +#else +PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked) + TESTSCFLAG_FALSE(Mlocked) +#endif + +#ifdef CONFIG_ARCH_USES_PG_UNCACHED +PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND) +#else +PAGEFLAG_FALSE(Uncached) +#endif + +#ifdef CONFIG_MEMORY_FAILURE +PAGEFLAG(HWPoison, hwpoison, PF_ANY) +TESTSCFLAG(HWPoison, hwpoison, PF_ANY) +#define __PG_HWPOISON (1UL << PG_hwpoison) +extern bool set_hwpoison_free_buddy_page(struct page *page); +#else +PAGEFLAG_FALSE(HWPoison) +static inline bool set_hwpoison_free_buddy_page(struct page *page) +{ + return 0; +} +#define __PG_HWPOISON 0 +#endif + +#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT) +TESTPAGEFLAG(Young, young, PF_ANY) +SETPAGEFLAG(Young, young, PF_ANY) +TESTCLEARFLAG(Young, young, PF_ANY) +PAGEFLAG(Idle, idle, PF_ANY) +#endif + +/* + * On an anonymous page mapped into a user virtual memory area, + * page->mapping points to its anon_vma, not to a struct address_space; + * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. + * + * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, + * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON + * bit; and then page->mapping points, not to an anon_vma, but to a private + * structure which KSM associates with that merged page. See ksm.h. + * + * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable + * page and then page->mapping points a struct address_space. + * + * Please note that, confusingly, "page_mapping" refers to the inode + * address_space which maps the page from disk; whereas "page_mapped" + * refers to user virtual address space into which the page is mapped. + */ +#define PAGE_MAPPING_ANON 0x1 +#define PAGE_MAPPING_MOVABLE 0x2 +#define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) +#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) + +static __always_inline int PageMappingFlags(struct page *page) +{ + return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0; +} + +static __always_inline int PageAnon(struct page *page) +{ + page = compound_head(page); + return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; +} + +static __always_inline int __PageMovable(struct page *page) +{ + return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == + PAGE_MAPPING_MOVABLE; +} + +#ifdef CONFIG_KSM +/* + * A KSM page is one of those write-protected "shared pages" or "merged pages" + * which KSM maps into multiple mms, wherever identical anonymous page content + * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any + * anon_vma, but to that page's node of the stable tree. + */ +static __always_inline int PageKsm(struct page *page) +{ + page = compound_head(page); + return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == + PAGE_MAPPING_KSM; +} +#else +TESTPAGEFLAG_FALSE(Ksm) +#endif + +u64 stable_page_flags(struct page *page); + +static inline int PageUptodate(struct page *page) +{ + int ret; + page = compound_head(page); + ret = test_bit(PG_uptodate, &(page)->flags); + /* + * Must ensure that the data we read out of the page is loaded + * _after_ we've loaded page->flags to check for PageUptodate. + * We can skip the barrier if the page is not uptodate, because + * we wouldn't be reading anything from it. + * + * See SetPageUptodate() for the other side of the story. + */ + if (ret) + smp_rmb(); + + return ret; +} + +static __always_inline void __SetPageUptodate(struct page *page) +{ + VM_BUG_ON_PAGE(PageTail(page), page); + smp_wmb(); + __set_bit(PG_uptodate, &page->flags); +} + +static __always_inline void SetPageUptodate(struct page *page) +{ + VM_BUG_ON_PAGE(PageTail(page), page); + /* + * Memory barrier must be issued before setting the PG_uptodate bit, + * so that all previous stores issued in order to bring the page + * uptodate are actually visible before PageUptodate becomes true. + */ + smp_wmb(); + set_bit(PG_uptodate, &page->flags); +} + +CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL) + +int test_clear_page_writeback(struct page *page); +int __test_set_page_writeback(struct page *page, bool keep_write); + +#define test_set_page_writeback(page) \ + __test_set_page_writeback(page, false) +#define test_set_page_writeback_keepwrite(page) \ + __test_set_page_writeback(page, true) + +static inline void set_page_writeback(struct page *page) +{ + test_set_page_writeback(page); +} + +static inline void set_page_writeback_keepwrite(struct page *page) +{ + test_set_page_writeback_keepwrite(page); +} + +__PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY) + +static __always_inline void set_compound_head(struct page *page, struct page *head) +{ + WRITE_ONCE(page->compound_head, (unsigned long)head + 1); +} + +static __always_inline void clear_compound_head(struct page *page) +{ + WRITE_ONCE(page->compound_head, 0); +} + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline void ClearPageCompound(struct page *page) +{ + BUG_ON(!PageHead(page)); + ClearPageHead(page); +} +#endif + +#define PG_head_mask ((1UL << PG_head)) + +#ifdef CONFIG_HUGETLB_PAGE +int PageHuge(struct page *page); +int PageHeadHuge(struct page *page); +bool page_huge_active(struct page *page); +#else +TESTPAGEFLAG_FALSE(Huge) +TESTPAGEFLAG_FALSE(HeadHuge) + +static inline bool page_huge_active(struct page *page) +{ + return 0; +} +#endif + + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +/* + * PageHuge() only returns true for hugetlbfs pages, but not for + * normal or transparent huge pages. + * + * PageTransHuge() returns true for both transparent huge and + * hugetlbfs pages, but not normal pages. PageTransHuge() can only be + * called only in the core VM paths where hugetlbfs pages can't exist. + */ +static inline int PageTransHuge(struct page *page) +{ + VM_BUG_ON_PAGE(PageTail(page), page); + return PageHead(page); +} + +/* + * PageTransCompound returns true for both transparent huge pages + * and hugetlbfs pages, so it should only be called when it's known + * that hugetlbfs pages aren't involved. + */ +static inline int PageTransCompound(struct page *page) +{ + return PageCompound(page); +} + +/* + * PageTransCompoundMap is the same as PageTransCompound, but it also + * guarantees the primary MMU has the entire compound page mapped + * through pmd_trans_huge, which in turn guarantees the secondary MMUs + * can also map the entire compound page. This allows the secondary + * MMUs to call get_user_pages() only once for each compound page and + * to immediately map the entire compound page with a single secondary + * MMU fault. If there will be a pmd split later, the secondary MMUs + * will get an update through the MMU notifier invalidation through + * split_huge_pmd(). + * + * Unlike PageTransCompound, this is safe to be called only while + * split_huge_pmd() cannot run from under us, like if protected by the + * MMU notifier, otherwise it may result in page->_mapcount check false + * positives. + * + * We have to treat page cache THP differently since every subpage of it + * would get _mapcount inc'ed once it is PMD mapped. But, it may be PTE + * mapped in the current process so comparing subpage's _mapcount to + * compound_mapcount to filter out PTE mapped case. + */ +static inline int PageTransCompoundMap(struct page *page) +{ + struct page *head; + + if (!PageTransCompound(page)) + return 0; + + if (PageAnon(page)) + return atomic_read(&page->_mapcount) < 0; + + head = compound_head(page); + /* File THP is PMD mapped and not PTE mapped */ + return atomic_read(&page->_mapcount) == + atomic_read(compound_mapcount_ptr(head)); +} + +/* + * PageTransTail returns true for both transparent huge pages + * and hugetlbfs pages, so it should only be called when it's known + * that hugetlbfs pages aren't involved. + */ +static inline int PageTransTail(struct page *page) +{ + return PageTail(page); +} + +/* + * PageDoubleMap indicates that the compound page is mapped with PTEs as well + * as PMDs. + * + * This is required for optimization of rmap operations for THP: we can postpone + * per small page mapcount accounting (and its overhead from atomic operations) + * until the first PMD split. + * + * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up + * by one. This reference will go away with last compound_mapcount. + * + * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap(). + */ +static inline int PageDoubleMap(struct page *page) +{ + return PageHead(page) && test_bit(PG_double_map, &page[1].flags); +} + +static inline void SetPageDoubleMap(struct page *page) +{ + VM_BUG_ON_PAGE(!PageHead(page), page); + set_bit(PG_double_map, &page[1].flags); +} + +static inline void ClearPageDoubleMap(struct page *page) +{ + VM_BUG_ON_PAGE(!PageHead(page), page); + clear_bit(PG_double_map, &page[1].flags); +} +static inline int TestSetPageDoubleMap(struct page *page) +{ + VM_BUG_ON_PAGE(!PageHead(page), page); + return test_and_set_bit(PG_double_map, &page[1].flags); +} + +static inline int TestClearPageDoubleMap(struct page *page) +{ + VM_BUG_ON_PAGE(!PageHead(page), page); + return test_and_clear_bit(PG_double_map, &page[1].flags); +} + +#else +TESTPAGEFLAG_FALSE(TransHuge) +TESTPAGEFLAG_FALSE(TransCompound) +TESTPAGEFLAG_FALSE(TransCompoundMap) +TESTPAGEFLAG_FALSE(TransTail) +PAGEFLAG_FALSE(DoubleMap) + TESTSETFLAG_FALSE(DoubleMap) + TESTCLEARFLAG_FALSE(DoubleMap) +#endif + +/* + * For pages that are never mapped to userspace (and aren't PageSlab), + * page_type may be used. Because it is initialised to -1, we invert the + * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and + * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and + * low bits so that an underflow or overflow of page_mapcount() won't be + * mistaken for a page type value. + */ + +#define PAGE_TYPE_BASE 0xf0000000 +/* Reserve 0x0000007f to catch underflows of page_mapcount */ +#define PG_buddy 0x00000080 +#define PG_balloon 0x00000100 +#define PG_kmemcg 0x00000200 +#define PG_table 0x00000400 + +#define PageType(page, flag) \ + ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) + +#define PAGE_TYPE_OPS(uname, lname) \ +static __always_inline int Page##uname(struct page *page) \ +{ \ + return PageType(page, PG_##lname); \ +} \ +static __always_inline void __SetPage##uname(struct page *page) \ +{ \ + VM_BUG_ON_PAGE(!PageType(page, 0), page); \ + page->page_type &= ~PG_##lname; \ +} \ +static __always_inline void __ClearPage##uname(struct page *page) \ +{ \ + VM_BUG_ON_PAGE(!Page##uname(page), page); \ + page->page_type |= PG_##lname; \ +} + +/* + * PageBuddy() indicates that the page is free and in the buddy system + * (see mm/page_alloc.c). + */ +PAGE_TYPE_OPS(Buddy, buddy) + +/* + * PageBalloon() is true for pages that are on the balloon page list + * (see mm/balloon_compaction.c). + */ +PAGE_TYPE_OPS(Balloon, balloon) + +/* + * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on + * pages allocated with __GFP_ACCOUNT. It gets cleared on page free. + */ +PAGE_TYPE_OPS(Kmemcg, kmemcg) + +/* + * Marks pages in use as page tables. + */ +PAGE_TYPE_OPS(Table, table) + +extern bool is_free_buddy_page(struct page *page); + +__PAGEFLAG(Isolated, isolated, PF_ANY); + +/* + * If network-based swap is enabled, sl*b must keep track of whether pages + * were allocated from pfmemalloc reserves. + */ +static inline int PageSlabPfmemalloc(struct page *page) +{ + VM_BUG_ON_PAGE(!PageSlab(page), page); + return PageActive(page); +} + +static inline void SetPageSlabPfmemalloc(struct page *page) +{ + VM_BUG_ON_PAGE(!PageSlab(page), page); + SetPageActive(page); +} + +static inline void __ClearPageSlabPfmemalloc(struct page *page) +{ + VM_BUG_ON_PAGE(!PageSlab(page), page); + __ClearPageActive(page); +} + +static inline void ClearPageSlabPfmemalloc(struct page *page) +{ + VM_BUG_ON_PAGE(!PageSlab(page), page); + ClearPageActive(page); +} + +#ifdef CONFIG_MMU +#define __PG_MLOCKED (1UL << PG_mlocked) +#else +#define __PG_MLOCKED 0 +#endif + +/* + * Flags checked when a page is freed. Pages being freed should not have + * these flags set. It they are, there is a problem. + */ +#define PAGE_FLAGS_CHECK_AT_FREE \ + (1UL << PG_lru | 1UL << PG_locked | \ + 1UL << PG_private | 1UL << PG_private_2 | \ + 1UL << PG_writeback | 1UL << PG_reserved | \ + 1UL << PG_slab | 1UL << PG_active | \ + 1UL << PG_unevictable | __PG_MLOCKED) + +/* + * Flags checked when a page is prepped for return by the page allocator. + * Pages being prepped should not have these flags set. It they are set, + * there has been a kernel bug or struct page corruption. + * + * __PG_HWPOISON is exceptional because it needs to be kept beyond page's + * alloc-free cycle to prevent from reusing the page. + */ +#define PAGE_FLAGS_CHECK_AT_PREP \ + (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON) + +#define PAGE_FLAGS_PRIVATE \ + (1UL << PG_private | 1UL << PG_private_2) +/** + * page_has_private - Determine if page has private stuff + * @page: The page to be checked + * + * Determine if a page has private stuff, indicating that release routines + * should be invoked upon it. + */ +static inline int page_has_private(struct page *page) +{ + return !!(page->flags & PAGE_FLAGS_PRIVATE); +} + +#undef PF_ANY +#undef PF_HEAD +#undef PF_ONLY_HEAD +#undef PF_NO_TAIL +#undef PF_NO_COMPOUND +#endif /* !__GENERATING_BOUNDS_H */ + +#endif /* PAGE_FLAGS_H */ diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h new file mode 100644 index 000000000..4ae347cbc --- /dev/null +++ b/include/linux/page-isolation.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_PAGEISOLATION_H +#define __LINUX_PAGEISOLATION_H + +#ifdef CONFIG_MEMORY_ISOLATION +static inline bool has_isolate_pageblock(struct zone *zone) +{ + return zone->nr_isolate_pageblock; +} +static inline bool is_migrate_isolate_page(struct page *page) +{ + return get_pageblock_migratetype(page) == MIGRATE_ISOLATE; +} +static inline bool is_migrate_isolate(int migratetype) +{ + return migratetype == MIGRATE_ISOLATE; +} +#else +static inline bool has_isolate_pageblock(struct zone *zone) +{ + return false; +} +static inline bool is_migrate_isolate_page(struct page *page) +{ + return false; +} +static inline bool is_migrate_isolate(int migratetype) +{ + return false; +} +#endif + +bool has_unmovable_pages(struct zone *zone, struct page *page, int count, + int migratetype, bool skip_hwpoisoned_pages); +void set_pageblock_migratetype(struct page *page, int migratetype); +int move_freepages_block(struct zone *zone, struct page *page, + int migratetype, int *num_movable); + +/* + * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. + * If specified range includes migrate types other than MOVABLE or CMA, + * this will fail with -EBUSY. + * + * For isolating all pages in the range finally, the caller have to + * free all pages in the range. test_page_isolated() can be used for + * test it. + */ +int +start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, + unsigned migratetype, bool skip_hwpoisoned_pages); + +/* + * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE. + * target range is [start_pfn, end_pfn) + */ +int +undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, + unsigned migratetype); + +/* + * Test all pages in [start_pfn, end_pfn) are isolated or not. + */ +int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, + bool skip_hwpoisoned_pages); + +struct page *alloc_migrate_target(struct page *page, unsigned long private); + +#endif diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h new file mode 100644 index 000000000..bab7e57f6 --- /dev/null +++ b/include/linux/page_counter.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PAGE_COUNTER_H +#define _LINUX_PAGE_COUNTER_H + +#include +#include +#include + +struct page_counter { + atomic_long_t usage; + unsigned long min; + unsigned long low; + unsigned long max; + struct page_counter *parent; + + /* effective memory.min and memory.min usage tracking */ + unsigned long emin; + atomic_long_t min_usage; + atomic_long_t children_min_usage; + + /* effective memory.low and memory.low usage tracking */ + unsigned long elow; + atomic_long_t low_usage; + atomic_long_t children_low_usage; + + /* legacy */ + unsigned long watermark; + unsigned long failcnt; +}; + +#if BITS_PER_LONG == 32 +#define PAGE_COUNTER_MAX LONG_MAX +#else +#define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE) +#endif + +static inline void page_counter_init(struct page_counter *counter, + struct page_counter *parent) +{ + atomic_long_set(&counter->usage, 0); + counter->max = PAGE_COUNTER_MAX; + counter->parent = parent; +} + +static inline unsigned long page_counter_read(struct page_counter *counter) +{ + return atomic_long_read(&counter->usage); +} + +void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages); +void page_counter_charge(struct page_counter *counter, unsigned long nr_pages); +bool page_counter_try_charge(struct page_counter *counter, + unsigned long nr_pages, + struct page_counter **fail); +void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); +void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages); +void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages); +int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages); +int page_counter_memparse(const char *buf, const char *max, + unsigned long *nr_pages); + +static inline void page_counter_reset_watermark(struct page_counter *counter) +{ + counter->watermark = page_counter_read(counter); +} + +#endif /* _LINUX_PAGE_COUNTER_H */ diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h new file mode 100644 index 000000000..f84f167ec --- /dev/null +++ b/include/linux/page_ext.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_PAGE_EXT_H +#define __LINUX_PAGE_EXT_H + +#include +#include +#include + +struct pglist_data; +struct page_ext_operations { + size_t offset; + size_t size; + bool (*need)(void); + void (*init)(void); +}; + +#ifdef CONFIG_PAGE_EXTENSION + +enum page_ext_flags { + PAGE_EXT_DEBUG_GUARD, + PAGE_EXT_OWNER, +#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT) + PAGE_EXT_YOUNG, + PAGE_EXT_IDLE, +#endif +}; + +/* + * Page Extension can be considered as an extended mem_map. + * A page_ext page is associated with every page descriptor. The + * page_ext helps us add more information about the page. + * All page_ext are allocated at boot or memory hotplug event, + * then the page_ext for pfn always exists. + */ +struct page_ext { + unsigned long flags; +}; + +extern void pgdat_page_ext_init(struct pglist_data *pgdat); + +#ifdef CONFIG_SPARSEMEM +static inline void page_ext_init_flatmem(void) +{ +} +extern void page_ext_init(void); +#else +extern void page_ext_init_flatmem(void); +static inline void page_ext_init(void) +{ +} +#endif + +struct page_ext *lookup_page_ext(const struct page *page); + +#else /* !CONFIG_PAGE_EXTENSION */ +struct page_ext; + +static inline void pgdat_page_ext_init(struct pglist_data *pgdat) +{ +} + +static inline struct page_ext *lookup_page_ext(const struct page *page) +{ + return NULL; +} + +static inline void page_ext_init(void) +{ +} + +static inline void page_ext_init_flatmem(void) +{ +} +#endif /* CONFIG_PAGE_EXTENSION */ +#endif /* __LINUX_PAGE_EXT_H */ diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h new file mode 100644 index 000000000..1e894d34b --- /dev/null +++ b/include/linux/page_idle.h @@ -0,0 +1,140 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MM_PAGE_IDLE_H +#define _LINUX_MM_PAGE_IDLE_H + +#include +#include +#include + +#ifdef CONFIG_IDLE_PAGE_TRACKING + +#ifdef CONFIG_64BIT +static inline bool page_is_young(struct page *page) +{ + return PageYoung(page); +} + +static inline void set_page_young(struct page *page) +{ + SetPageYoung(page); +} + +static inline bool test_and_clear_page_young(struct page *page) +{ + return TestClearPageYoung(page); +} + +static inline bool page_is_idle(struct page *page) +{ + return PageIdle(page); +} + +static inline void set_page_idle(struct page *page) +{ + SetPageIdle(page); +} + +static inline void clear_page_idle(struct page *page) +{ + ClearPageIdle(page); +} +#else /* !CONFIG_64BIT */ +/* + * If there is not enough space to store Idle and Young bits in page flags, use + * page ext flags instead. + */ +extern struct page_ext_operations page_idle_ops; + +static inline bool page_is_young(struct page *page) +{ + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return false; + + return test_bit(PAGE_EXT_YOUNG, &page_ext->flags); +} + +static inline void set_page_young(struct page *page) +{ + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return; + + set_bit(PAGE_EXT_YOUNG, &page_ext->flags); +} + +static inline bool test_and_clear_page_young(struct page *page) +{ + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return false; + + return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags); +} + +static inline bool page_is_idle(struct page *page) +{ + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return false; + + return test_bit(PAGE_EXT_IDLE, &page_ext->flags); +} + +static inline void set_page_idle(struct page *page) +{ + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return; + + set_bit(PAGE_EXT_IDLE, &page_ext->flags); +} + +static inline void clear_page_idle(struct page *page) +{ + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return; + + clear_bit(PAGE_EXT_IDLE, &page_ext->flags); +} +#endif /* CONFIG_64BIT */ + +#else /* !CONFIG_IDLE_PAGE_TRACKING */ + +static inline bool page_is_young(struct page *page) +{ + return false; +} + +static inline void set_page_young(struct page *page) +{ +} + +static inline bool test_and_clear_page_young(struct page *page) +{ + return false; +} + +static inline bool page_is_idle(struct page *page) +{ + return false; +} + +static inline void set_page_idle(struct page *page) +{ +} + +static inline void clear_page_idle(struct page *page) +{ +} + +#endif /* CONFIG_IDLE_PAGE_TRACKING */ + +#endif /* _LINUX_MM_PAGE_IDLE_H */ diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h new file mode 100644 index 000000000..8679ccd72 --- /dev/null +++ b/include/linux/page_owner.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_PAGE_OWNER_H +#define __LINUX_PAGE_OWNER_H + +#include + +#ifdef CONFIG_PAGE_OWNER +extern struct static_key_false page_owner_inited; +extern struct page_ext_operations page_owner_ops; + +extern void __reset_page_owner(struct page *page, unsigned int order); +extern void __set_page_owner(struct page *page, + unsigned int order, gfp_t gfp_mask); +extern void __split_page_owner(struct page *page, unsigned int order); +extern void __copy_page_owner(struct page *oldpage, struct page *newpage); +extern void __set_page_owner_migrate_reason(struct page *page, int reason); +extern void __dump_page_owner(struct page *page); +extern void pagetypeinfo_showmixedcount_print(struct seq_file *m, + pg_data_t *pgdat, struct zone *zone); + +static inline void reset_page_owner(struct page *page, unsigned int order) +{ + if (static_branch_unlikely(&page_owner_inited)) + __reset_page_owner(page, order); +} + +static inline void set_page_owner(struct page *page, + unsigned int order, gfp_t gfp_mask) +{ + if (static_branch_unlikely(&page_owner_inited)) + __set_page_owner(page, order, gfp_mask); +} + +static inline void split_page_owner(struct page *page, unsigned int order) +{ + if (static_branch_unlikely(&page_owner_inited)) + __split_page_owner(page, order); +} +static inline void copy_page_owner(struct page *oldpage, struct page *newpage) +{ + if (static_branch_unlikely(&page_owner_inited)) + __copy_page_owner(oldpage, newpage); +} +static inline void set_page_owner_migrate_reason(struct page *page, int reason) +{ + if (static_branch_unlikely(&page_owner_inited)) + __set_page_owner_migrate_reason(page, reason); +} +static inline void dump_page_owner(struct page *page) +{ + if (static_branch_unlikely(&page_owner_inited)) + __dump_page_owner(page); +} +#else +static inline void reset_page_owner(struct page *page, unsigned int order) +{ +} +static inline void set_page_owner(struct page *page, + unsigned int order, gfp_t gfp_mask) +{ +} +static inline void split_page_owner(struct page *page, + unsigned int order) +{ +} +static inline void copy_page_owner(struct page *oldpage, struct page *newpage) +{ +} +static inline void set_page_owner_migrate_reason(struct page *page, int reason) +{ +} +static inline void dump_page_owner(struct page *page) +{ +} +#endif /* CONFIG_PAGE_OWNER */ +#endif /* __LINUX_PAGE_OWNER_H */ diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h new file mode 100644 index 000000000..14d14beb1 --- /dev/null +++ b/include/linux/page_ref.h @@ -0,0 +1,183 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PAGE_REF_H +#define _LINUX_PAGE_REF_H + +#include +#include +#include +#include + +extern struct tracepoint __tracepoint_page_ref_set; +extern struct tracepoint __tracepoint_page_ref_mod; +extern struct tracepoint __tracepoint_page_ref_mod_and_test; +extern struct tracepoint __tracepoint_page_ref_mod_and_return; +extern struct tracepoint __tracepoint_page_ref_mod_unless; +extern struct tracepoint __tracepoint_page_ref_freeze; +extern struct tracepoint __tracepoint_page_ref_unfreeze; + +#ifdef CONFIG_DEBUG_PAGE_REF + +/* + * Ideally we would want to use the trace__enabled() helper + * functions. But due to include header file issues, that is not + * feasible. Instead we have to open code the static key functions. + * + * See trace_##name##_enabled(void) in include/linux/tracepoint.h + */ +#define page_ref_tracepoint_active(t) static_key_false(&(t).key) + +extern void __page_ref_set(struct page *page, int v); +extern void __page_ref_mod(struct page *page, int v); +extern void __page_ref_mod_and_test(struct page *page, int v, int ret); +extern void __page_ref_mod_and_return(struct page *page, int v, int ret); +extern void __page_ref_mod_unless(struct page *page, int v, int u); +extern void __page_ref_freeze(struct page *page, int v, int ret); +extern void __page_ref_unfreeze(struct page *page, int v); + +#else + +#define page_ref_tracepoint_active(t) false + +static inline void __page_ref_set(struct page *page, int v) +{ +} +static inline void __page_ref_mod(struct page *page, int v) +{ +} +static inline void __page_ref_mod_and_test(struct page *page, int v, int ret) +{ +} +static inline void __page_ref_mod_and_return(struct page *page, int v, int ret) +{ +} +static inline void __page_ref_mod_unless(struct page *page, int v, int u) +{ +} +static inline void __page_ref_freeze(struct page *page, int v, int ret) +{ +} +static inline void __page_ref_unfreeze(struct page *page, int v) +{ +} + +#endif + +static inline int page_ref_count(struct page *page) +{ + return atomic_read(&page->_refcount); +} + +static inline int page_count(struct page *page) +{ + return atomic_read(&compound_head(page)->_refcount); +} + +static inline void set_page_count(struct page *page, int v) +{ + atomic_set(&page->_refcount, v); + if (page_ref_tracepoint_active(__tracepoint_page_ref_set)) + __page_ref_set(page, v); +} + +/* + * Setup the page count before being freed into the page allocator for + * the first time (boot or memory hotplug) + */ +static inline void init_page_count(struct page *page) +{ + set_page_count(page, 1); +} + +static inline void page_ref_add(struct page *page, int nr) +{ + atomic_add(nr, &page->_refcount); + if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) + __page_ref_mod(page, nr); +} + +static inline void page_ref_sub(struct page *page, int nr) +{ + atomic_sub(nr, &page->_refcount); + if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) + __page_ref_mod(page, -nr); +} + +static inline void page_ref_inc(struct page *page) +{ + atomic_inc(&page->_refcount); + if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) + __page_ref_mod(page, 1); +} + +static inline void page_ref_dec(struct page *page) +{ + atomic_dec(&page->_refcount); + if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) + __page_ref_mod(page, -1); +} + +static inline int page_ref_sub_and_test(struct page *page, int nr) +{ + int ret = atomic_sub_and_test(nr, &page->_refcount); + + if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test)) + __page_ref_mod_and_test(page, -nr, ret); + return ret; +} + +static inline int page_ref_inc_return(struct page *page) +{ + int ret = atomic_inc_return(&page->_refcount); + + if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return)) + __page_ref_mod_and_return(page, 1, ret); + return ret; +} + +static inline int page_ref_dec_and_test(struct page *page) +{ + int ret = atomic_dec_and_test(&page->_refcount); + + if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test)) + __page_ref_mod_and_test(page, -1, ret); + return ret; +} + +static inline int page_ref_dec_return(struct page *page) +{ + int ret = atomic_dec_return(&page->_refcount); + + if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return)) + __page_ref_mod_and_return(page, -1, ret); + return ret; +} + +static inline int page_ref_add_unless(struct page *page, int nr, int u) +{ + int ret = atomic_add_unless(&page->_refcount, nr, u); + + if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless)) + __page_ref_mod_unless(page, nr, ret); + return ret; +} + +static inline int page_ref_freeze(struct page *page, int count) +{ + int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count); + + if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze)) + __page_ref_freeze(page, count, ret); + return ret; +} + +static inline void page_ref_unfreeze(struct page *page, int count) +{ + VM_BUG_ON_PAGE(page_count(page) != 0, page); + VM_BUG_ON(count == 0); + + atomic_set_release(&page->_refcount, count); + if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze)) + __page_ref_unfreeze(page, count); +} + +#endif diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h new file mode 100644 index 000000000..9132c5cb4 --- /dev/null +++ b/include/linux/pageblock-flags.h @@ -0,0 +1,112 @@ +/* + * Macros for manipulating and testing flags related to a + * pageblock_nr_pages number of pages. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2006 + * + * Original author, Mel Gorman + * Major cleanups and reduction of bit operations, Andy Whitcroft + */ +#ifndef PAGEBLOCK_FLAGS_H +#define PAGEBLOCK_FLAGS_H + +#include + +/* Bit indices that affect a whole block of pages */ +enum pageblock_bits { + PB_migrate, + PB_migrate_end = PB_migrate + 3 - 1, + /* 3 bits required for migrate types */ + PB_migrate_skip,/* If set the block is skipped by compaction */ + + /* + * Assume the bits will always align on a word. If this assumption + * changes then get/set pageblock needs updating. + */ + NR_PAGEBLOCK_BITS +}; + +#ifdef CONFIG_HUGETLB_PAGE + +#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE + +/* Huge page sizes are variable */ +extern unsigned int pageblock_order; + +#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ + +/* Huge pages are a constant size */ +#define pageblock_order HUGETLB_PAGE_ORDER + +#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ + +#else /* CONFIG_HUGETLB_PAGE */ + +/* If huge pages are not used, group by MAX_ORDER_NR_PAGES */ +#define pageblock_order (MAX_ORDER-1) + +#endif /* CONFIG_HUGETLB_PAGE */ + +#define pageblock_nr_pages (1UL << pageblock_order) + +/* Forward declaration */ +struct page; + +unsigned long get_pfnblock_flags_mask(struct page *page, + unsigned long pfn, + unsigned long end_bitidx, + unsigned long mask); + +void set_pfnblock_flags_mask(struct page *page, + unsigned long flags, + unsigned long pfn, + unsigned long end_bitidx, + unsigned long mask); + +/* Declarations for getting and setting flags. See mm/page_alloc.c */ +#define get_pageblock_flags_group(page, start_bitidx, end_bitidx) \ + get_pfnblock_flags_mask(page, page_to_pfn(page), \ + end_bitidx, \ + (1 << (end_bitidx - start_bitidx + 1)) - 1) +#define set_pageblock_flags_group(page, flags, start_bitidx, end_bitidx) \ + set_pfnblock_flags_mask(page, flags, page_to_pfn(page), \ + end_bitidx, \ + (1 << (end_bitidx - start_bitidx + 1)) - 1) + +#ifdef CONFIG_COMPACTION +#define get_pageblock_skip(page) \ + get_pageblock_flags_group(page, PB_migrate_skip, \ + PB_migrate_skip) +#define clear_pageblock_skip(page) \ + set_pageblock_flags_group(page, 0, PB_migrate_skip, \ + PB_migrate_skip) +#define set_pageblock_skip(page) \ + set_pageblock_flags_group(page, 1, PB_migrate_skip, \ + PB_migrate_skip) +#else +static inline bool get_pageblock_skip(struct page *page) +{ + return false; +} +static inline void clear_pageblock_skip(struct page *page) +{ +} +static inline void set_pageblock_skip(struct page *page) +{ +} +#endif /* CONFIG_COMPACTION */ + +#endif /* PAGEBLOCK_FLAGS_H */ diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h new file mode 100644 index 000000000..33b63b2a1 --- /dev/null +++ b/include/linux/pagemap.h @@ -0,0 +1,649 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PAGEMAP_H +#define _LINUX_PAGEMAP_H + +/* + * Copyright 1995 Linus Torvalds + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for in_interrupt() */ +#include + +struct pagevec; + +/* + * Bits in mapping->flags. + */ +enum mapping_flags { + AS_EIO = 0, /* IO error on async write */ + AS_ENOSPC = 1, /* ENOSPC on async write */ + AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */ + AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */ + AS_EXITING = 4, /* final truncate in progress */ + /* writeback related tags are not used */ + AS_NO_WRITEBACK_TAGS = 5, +}; + +/** + * mapping_set_error - record a writeback error in the address_space + * @mapping - the mapping in which an error should be set + * @error - the error to set in the mapping + * + * When writeback fails in some way, we must record that error so that + * userspace can be informed when fsync and the like are called. We endeavor + * to report errors on any file that was open at the time of the error. Some + * internal callers also need to know when writeback errors have occurred. + * + * When a writeback error occurs, most filesystems will want to call + * mapping_set_error to record the error in the mapping so that it can be + * reported when the application calls fsync(2). + */ +static inline void mapping_set_error(struct address_space *mapping, int error) +{ + if (likely(!error)) + return; + + /* Record in wb_err for checkers using errseq_t based tracking */ + filemap_set_wb_err(mapping, error); + + /* Record it in flags for now, for legacy callers */ + if (error == -ENOSPC) + set_bit(AS_ENOSPC, &mapping->flags); + else + set_bit(AS_EIO, &mapping->flags); +} + +static inline void mapping_set_unevictable(struct address_space *mapping) +{ + set_bit(AS_UNEVICTABLE, &mapping->flags); +} + +static inline void mapping_clear_unevictable(struct address_space *mapping) +{ + clear_bit(AS_UNEVICTABLE, &mapping->flags); +} + +static inline int mapping_unevictable(struct address_space *mapping) +{ + if (mapping) + return test_bit(AS_UNEVICTABLE, &mapping->flags); + return !!mapping; +} + +static inline void mapping_set_exiting(struct address_space *mapping) +{ + set_bit(AS_EXITING, &mapping->flags); +} + +static inline int mapping_exiting(struct address_space *mapping) +{ + return test_bit(AS_EXITING, &mapping->flags); +} + +static inline void mapping_set_no_writeback_tags(struct address_space *mapping) +{ + set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); +} + +static inline int mapping_use_writeback_tags(struct address_space *mapping) +{ + return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); +} + +static inline gfp_t mapping_gfp_mask(struct address_space * mapping) +{ + return mapping->gfp_mask; +} + +/* Restricts the given gfp_mask to what the mapping allows. */ +static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, + gfp_t gfp_mask) +{ + return mapping_gfp_mask(mapping) & gfp_mask; +} + +/* + * This is non-atomic. Only to be used before the mapping is activated. + * Probably needs a barrier... + */ +static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) +{ + m->gfp_mask = mask; +} + +void release_pages(struct page **pages, int nr); + +/* + * speculatively take a reference to a page. + * If the page is free (_refcount == 0), then _refcount is untouched, and 0 + * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned. + * + * This function must be called inside the same rcu_read_lock() section as has + * been used to lookup the page in the pagecache radix-tree (or page table): + * this allows allocators to use a synchronize_rcu() to stabilize _refcount. + * + * Unless an RCU grace period has passed, the count of all pages coming out + * of the allocator must be considered unstable. page_count may return higher + * than expected, and put_page must be able to do the right thing when the + * page has been finished with, no matter what it is subsequently allocated + * for (because put_page is what is used here to drop an invalid speculative + * reference). + * + * This is the interesting part of the lockless pagecache (and lockless + * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) + * has the following pattern: + * 1. find page in radix tree + * 2. conditionally increment refcount + * 3. check the page is still in pagecache (if no, goto 1) + * + * Remove-side that cares about stability of _refcount (eg. reclaim) has the + * following (with the i_pages lock held): + * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) + * B. remove page from pagecache + * C. free the page + * + * There are 2 critical interleavings that matter: + * - 2 runs before A: in this case, A sees elevated refcount and bails out + * - A runs before 2: in this case, 2 sees zero refcount and retries; + * subsequently, B will complete and 1 will find no page, causing the + * lookup to return NULL. + * + * It is possible that between 1 and 2, the page is removed then the exact same + * page is inserted into the same position in pagecache. That's OK: the + * old find_get_page using a lock could equally have run before or after + * such a re-insertion, depending on order that locks are granted. + * + * Lookups racing against pagecache insertion isn't a big problem: either 1 + * will find the page or it will not. Likewise, the old find_get_page could run + * either before the insertion or afterwards, depending on timing. + */ +static inline int page_cache_get_speculative(struct page *page) +{ +#ifdef CONFIG_TINY_RCU +# ifdef CONFIG_PREEMPT_COUNT + VM_BUG_ON(!in_atomic() && !irqs_disabled()); +# endif + /* + * Preempt must be disabled here - we rely on rcu_read_lock doing + * this for us. + * + * Pagecache won't be truncated from interrupt context, so if we have + * found a page in the radix tree here, we have pinned its refcount by + * disabling preempt, and hence no need for the "speculative get" that + * SMP requires. + */ + VM_BUG_ON_PAGE(page_count(page) == 0, page); + page_ref_inc(page); + +#else + if (unlikely(!get_page_unless_zero(page))) { + /* + * Either the page has been freed, or will be freed. + * In either case, retry here and the caller should + * do the right thing (see comments above). + */ + return 0; + } +#endif + VM_BUG_ON_PAGE(PageTail(page), page); + + return 1; +} + +/* + * Same as above, but add instead of inc (could just be merged) + */ +static inline int page_cache_add_speculative(struct page *page, int count) +{ + VM_BUG_ON(in_interrupt()); + +#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) +# ifdef CONFIG_PREEMPT_COUNT + VM_BUG_ON(!in_atomic() && !irqs_disabled()); +# endif + VM_BUG_ON_PAGE(page_count(page) == 0, page); + page_ref_add(page, count); + +#else + if (unlikely(!page_ref_add_unless(page, count, 0))) + return 0; +#endif + VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page); + + return 1; +} + +#ifdef CONFIG_NUMA +extern struct page *__page_cache_alloc(gfp_t gfp); +#else +static inline struct page *__page_cache_alloc(gfp_t gfp) +{ + return alloc_pages(gfp, 0); +} +#endif + +static inline struct page *page_cache_alloc(struct address_space *x) +{ + return __page_cache_alloc(mapping_gfp_mask(x)); +} + +static inline gfp_t readahead_gfp_mask(struct address_space *x) +{ + return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN; +} + +typedef int filler_t(void *, struct page *); + +pgoff_t page_cache_next_hole(struct address_space *mapping, + pgoff_t index, unsigned long max_scan); +pgoff_t page_cache_prev_hole(struct address_space *mapping, + pgoff_t index, unsigned long max_scan); + +#define FGP_ACCESSED 0x00000001 +#define FGP_LOCK 0x00000002 +#define FGP_CREAT 0x00000004 +#define FGP_WRITE 0x00000008 +#define FGP_NOFS 0x00000010 +#define FGP_NOWAIT 0x00000020 + +struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, + int fgp_flags, gfp_t cache_gfp_mask); + +/** + * find_get_page - find and get a page reference + * @mapping: the address_space to search + * @offset: the page index + * + * Looks up the page cache slot at @mapping & @offset. If there is a + * page cache page, it is returned with an increased refcount. + * + * Otherwise, %NULL is returned. + */ +static inline struct page *find_get_page(struct address_space *mapping, + pgoff_t offset) +{ + return pagecache_get_page(mapping, offset, 0, 0); +} + +static inline struct page *find_get_page_flags(struct address_space *mapping, + pgoff_t offset, int fgp_flags) +{ + return pagecache_get_page(mapping, offset, fgp_flags, 0); +} + +/** + * find_lock_page - locate, pin and lock a pagecache page + * @mapping: the address_space to search + * @offset: the page index + * + * Looks up the page cache slot at @mapping & @offset. If there is a + * page cache page, it is returned locked and with an increased + * refcount. + * + * Otherwise, %NULL is returned. + * + * find_lock_page() may sleep. + */ +static inline struct page *find_lock_page(struct address_space *mapping, + pgoff_t offset) +{ + return pagecache_get_page(mapping, offset, FGP_LOCK, 0); +} + +/** + * find_or_create_page - locate or add a pagecache page + * @mapping: the page's address_space + * @index: the page's index into the mapping + * @gfp_mask: page allocation mode + * + * Looks up the page cache slot at @mapping & @offset. If there is a + * page cache page, it is returned locked and with an increased + * refcount. + * + * If the page is not present, a new page is allocated using @gfp_mask + * and added to the page cache and the VM's LRU list. The page is + * returned locked and with an increased refcount. + * + * On memory exhaustion, %NULL is returned. + * + * find_or_create_page() may sleep, even if @gfp_flags specifies an + * atomic allocation! + */ +static inline struct page *find_or_create_page(struct address_space *mapping, + pgoff_t offset, gfp_t gfp_mask) +{ + return pagecache_get_page(mapping, offset, + FGP_LOCK|FGP_ACCESSED|FGP_CREAT, + gfp_mask); +} + +/** + * grab_cache_page_nowait - returns locked page at given index in given cache + * @mapping: target address_space + * @index: the page index + * + * Same as grab_cache_page(), but do not wait if the page is unavailable. + * This is intended for speculative data generators, where the data can + * be regenerated if the page couldn't be grabbed. This routine should + * be safe to call while holding the lock for another page. + * + * Clear __GFP_FS when allocating the page to avoid recursion into the fs + * and deadlock against the caller's locked page. + */ +static inline struct page *grab_cache_page_nowait(struct address_space *mapping, + pgoff_t index) +{ + return pagecache_get_page(mapping, index, + FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, + mapping_gfp_mask(mapping)); +} + +struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); +struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset); +unsigned find_get_entries(struct address_space *mapping, pgoff_t start, + unsigned int nr_entries, struct page **entries, + pgoff_t *indices); +unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, + pgoff_t end, unsigned int nr_pages, + struct page **pages); +static inline unsigned find_get_pages(struct address_space *mapping, + pgoff_t *start, unsigned int nr_pages, + struct page **pages) +{ + return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages, + pages); +} +unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, + unsigned int nr_pages, struct page **pages); +unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, + pgoff_t end, int tag, unsigned int nr_pages, + struct page **pages); +static inline unsigned find_get_pages_tag(struct address_space *mapping, + pgoff_t *index, int tag, unsigned int nr_pages, + struct page **pages) +{ + return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag, + nr_pages, pages); +} +unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start, + int tag, unsigned int nr_entries, + struct page **entries, pgoff_t *indices); + +struct page *grab_cache_page_write_begin(struct address_space *mapping, + pgoff_t index, unsigned flags); + +/* + * Returns locked page at given index in given cache, creating it if needed. + */ +static inline struct page *grab_cache_page(struct address_space *mapping, + pgoff_t index) +{ + return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); +} + +extern struct page * read_cache_page(struct address_space *mapping, + pgoff_t index, filler_t *filler, void *data); +extern struct page * read_cache_page_gfp(struct address_space *mapping, + pgoff_t index, gfp_t gfp_mask); +extern int read_cache_pages(struct address_space *mapping, + struct list_head *pages, filler_t *filler, void *data); + +static inline struct page *read_mapping_page(struct address_space *mapping, + pgoff_t index, void *data) +{ + filler_t *filler = (filler_t *)mapping->a_ops->readpage; + return read_cache_page(mapping, index, filler, data); +} + +/* + * Get index of the page within radix-tree (but not for hugetlb pages). + * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) + */ +static inline pgoff_t page_to_index(struct page *page) +{ + pgoff_t pgoff; + + if (likely(!PageTransTail(page))) + return page->index; + + /* + * We don't initialize ->index for tail pages: calculate based on + * head page + */ + pgoff = compound_head(page)->index; + pgoff += page - compound_head(page); + return pgoff; +} + +extern pgoff_t hugetlb_basepage_index(struct page *page); + +/* + * Get the offset in PAGE_SIZE (even for hugetlb pages). + * (TODO: hugetlb pages should have ->index in PAGE_SIZE) + */ +static inline pgoff_t page_to_pgoff(struct page *page) +{ + if (unlikely(PageHuge(page))) + return hugetlb_basepage_index(page); + return page_to_index(page); +} + +/* + * Return byte-offset into filesystem object for page. + */ +static inline loff_t page_offset(struct page *page) +{ + return ((loff_t)page->index) << PAGE_SHIFT; +} + +static inline loff_t page_file_offset(struct page *page) +{ + return ((loff_t)page_index(page)) << PAGE_SHIFT; +} + +extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, + unsigned long address); + +static inline pgoff_t linear_page_index(struct vm_area_struct *vma, + unsigned long address) +{ + pgoff_t pgoff; + if (unlikely(is_vm_hugetlb_page(vma))) + return linear_hugepage_index(vma, address); + pgoff = (address - vma->vm_start) >> PAGE_SHIFT; + pgoff += vma->vm_pgoff; + return pgoff; +} + +extern void __lock_page(struct page *page); +extern int __lock_page_killable(struct page *page); +extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, + unsigned int flags); +extern void unlock_page(struct page *page); + +static inline int trylock_page(struct page *page) +{ + page = compound_head(page); + return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); +} + +/* + * lock_page may only be called if we have the page's inode pinned. + */ +static inline void lock_page(struct page *page) +{ + might_sleep(); + if (!trylock_page(page)) + __lock_page(page); +} + +/* + * lock_page_killable is like lock_page but can be interrupted by fatal + * signals. It returns 0 if it locked the page and -EINTR if it was + * killed while waiting. + */ +static inline int lock_page_killable(struct page *page) +{ + might_sleep(); + if (!trylock_page(page)) + return __lock_page_killable(page); + return 0; +} + +/* + * lock_page_or_retry - Lock the page, unless this would block and the + * caller indicated that it can handle a retry. + * + * Return value and mmap_sem implications depend on flags; see + * __lock_page_or_retry(). + */ +static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, + unsigned int flags) +{ + might_sleep(); + return trylock_page(page) || __lock_page_or_retry(page, mm, flags); +} + +/* + * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc., + * and should not be used directly. + */ +extern void wait_on_page_bit(struct page *page, int bit_nr); +extern int wait_on_page_bit_killable(struct page *page, int bit_nr); + +/* + * Wait for a page to be unlocked. + * + * This must be called with the caller "holding" the page, + * ie with increased "page->count" so that the page won't + * go away during the wait.. + */ +static inline void wait_on_page_locked(struct page *page) +{ + if (PageLocked(page)) + wait_on_page_bit(compound_head(page), PG_locked); +} + +static inline int wait_on_page_locked_killable(struct page *page) +{ + if (!PageLocked(page)) + return 0; + return wait_on_page_bit_killable(compound_head(page), PG_locked); +} + +/* + * Wait for a page to complete writeback + */ +static inline void wait_on_page_writeback(struct page *page) +{ + if (PageWriteback(page)) + wait_on_page_bit(page, PG_writeback); +} + +extern void end_page_writeback(struct page *page); +void wait_for_stable_page(struct page *page); + +void page_endio(struct page *page, bool is_write, int err); + +/* + * Add an arbitrary waiter to a page's wait queue + */ +extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter); + +/* + * Fault everything in given userspace address range in. + */ +static inline int fault_in_pages_writeable(char __user *uaddr, int size) +{ + char __user *end = uaddr + size - 1; + + if (unlikely(size == 0)) + return 0; + + if (unlikely(uaddr > end)) + return -EFAULT; + /* + * Writing zeroes into userspace here is OK, because we know that if + * the zero gets there, we'll be overwriting it. + */ + do { + if (unlikely(__put_user(0, uaddr) != 0)) + return -EFAULT; + uaddr += PAGE_SIZE; + } while (uaddr <= end); + + /* Check whether the range spilled into the next page. */ + if (((unsigned long)uaddr & PAGE_MASK) == + ((unsigned long)end & PAGE_MASK)) + return __put_user(0, end); + + return 0; +} + +static inline int fault_in_pages_readable(const char __user *uaddr, int size) +{ + volatile char c; + const char __user *end = uaddr + size - 1; + + if (unlikely(size == 0)) + return 0; + + if (unlikely(uaddr > end)) + return -EFAULT; + + do { + if (unlikely(__get_user(c, uaddr) != 0)) + return -EFAULT; + uaddr += PAGE_SIZE; + } while (uaddr <= end); + + /* Check whether the range spilled into the next page. */ + if (((unsigned long)uaddr & PAGE_MASK) == + ((unsigned long)end & PAGE_MASK)) { + return __get_user(c, end); + } + + (void)c; + return 0; +} + +int add_to_page_cache_locked(struct page *page, struct address_space *mapping, + pgoff_t index, gfp_t gfp_mask); +int add_to_page_cache_lru(struct page *page, struct address_space *mapping, + pgoff_t index, gfp_t gfp_mask); +extern void delete_from_page_cache(struct page *page); +extern void __delete_from_page_cache(struct page *page, void *shadow); +int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); +void delete_from_page_cache_batch(struct address_space *mapping, + struct pagevec *pvec); + +/* + * Like add_to_page_cache_locked, but used to add newly allocated pages: + * the page is new, so we can just run __SetPageLocked() against it. + */ +static inline int add_to_page_cache(struct page *page, + struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) +{ + int error; + + __SetPageLocked(page); + error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); + if (unlikely(error)) + __ClearPageLocked(page); + return error; +} + +static inline unsigned long dir_pages(struct inode *inode) +{ + return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> + PAGE_SHIFT; +} + +#endif /* _LINUX_PAGEMAP_H */ diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h new file mode 100644 index 000000000..6dc456ac6 --- /dev/null +++ b/include/linux/pagevec.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/pagevec.h + * + * In many places it is efficient to batch an operation up against multiple + * pages. A pagevec is a multipage container which is used for that. + */ + +#ifndef _LINUX_PAGEVEC_H +#define _LINUX_PAGEVEC_H + +/* 15 pointers + header align the pagevec structure to a power of two */ +#define PAGEVEC_SIZE 15 + +struct page; +struct address_space; + +struct pagevec { + unsigned char nr; + bool percpu_pvec_drained; + struct page *pages[PAGEVEC_SIZE]; +}; + +void __pagevec_release(struct pagevec *pvec); +void __pagevec_lru_add(struct pagevec *pvec); +unsigned pagevec_lookup_entries(struct pagevec *pvec, + struct address_space *mapping, + pgoff_t start, unsigned nr_entries, + pgoff_t *indices); +void pagevec_remove_exceptionals(struct pagevec *pvec); +unsigned pagevec_lookup_range(struct pagevec *pvec, + struct address_space *mapping, + pgoff_t *start, pgoff_t end); +static inline unsigned pagevec_lookup(struct pagevec *pvec, + struct address_space *mapping, + pgoff_t *start) +{ + return pagevec_lookup_range(pvec, mapping, start, (pgoff_t)-1); +} + +unsigned pagevec_lookup_range_tag(struct pagevec *pvec, + struct address_space *mapping, pgoff_t *index, pgoff_t end, + int tag); +unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec, + struct address_space *mapping, pgoff_t *index, pgoff_t end, + int tag, unsigned max_pages); +static inline unsigned pagevec_lookup_tag(struct pagevec *pvec, + struct address_space *mapping, pgoff_t *index, int tag) +{ + return pagevec_lookup_range_tag(pvec, mapping, index, (pgoff_t)-1, tag); +} + +static inline void pagevec_init(struct pagevec *pvec) +{ + pvec->nr = 0; + pvec->percpu_pvec_drained = false; +} + +static inline void pagevec_reinit(struct pagevec *pvec) +{ + pvec->nr = 0; +} + +static inline unsigned pagevec_count(struct pagevec *pvec) +{ + return pvec->nr; +} + +static inline unsigned pagevec_space(struct pagevec *pvec) +{ + return PAGEVEC_SIZE - pvec->nr; +} + +/* + * Add a page to a pagevec. Returns the number of slots still available. + */ +static inline unsigned pagevec_add(struct pagevec *pvec, struct page *page) +{ + pvec->pages[pvec->nr++] = page; + return pagevec_space(pvec); +} + +static inline void pagevec_release(struct pagevec *pvec) +{ + if (pagevec_count(pvec)) + __pagevec_release(pvec); +} + +#endif /* _LINUX_PAGEVEC_H */ diff --git a/include/linux/parman.h b/include/linux/parman.h new file mode 100644 index 000000000..3c8cccc7d --- /dev/null +++ b/include/linux/parman.h @@ -0,0 +1,76 @@ +/* + * include/linux/parman.h - Manager for linear priority array areas + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _PARMAN_H +#define _PARMAN_H + +#include + +enum parman_algo_type { + PARMAN_ALGO_TYPE_LSORT, +}; + +struct parman_item { + struct list_head list; + unsigned long index; +}; + +struct parman_prio { + struct list_head list; + struct list_head item_list; + unsigned long priority; +}; + +struct parman_ops { + unsigned long base_count; + unsigned long resize_step; + int (*resize)(void *priv, unsigned long new_count); + void (*move)(void *priv, unsigned long from_index, + unsigned long to_index, unsigned long count); + enum parman_algo_type algo; +}; + +struct parman; + +struct parman *parman_create(const struct parman_ops *ops, void *priv); +void parman_destroy(struct parman *parman); +void parman_prio_init(struct parman *parman, struct parman_prio *prio, + unsigned long priority); +void parman_prio_fini(struct parman_prio *prio); +int parman_item_add(struct parman *parman, struct parman_prio *prio, + struct parman_item *item); +void parman_item_remove(struct parman *parman, struct parman_prio *prio, + struct parman_item *item); + +#endif diff --git a/include/linux/parport.h b/include/linux/parport.h new file mode 100644 index 000000000..397607a0c --- /dev/null +++ b/include/linux/parport.h @@ -0,0 +1,521 @@ +/* + * Any part of this program may be used in documents licensed under + * the GNU Free Documentation License, Version 1.1 or any later version + * published by the Free Software Foundation. + */ +#ifndef _PARPORT_H_ +#define _PARPORT_H_ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Define this later. */ +struct parport; +struct pardevice; + +struct pc_parport_state { + unsigned int ctr; + unsigned int ecr; +}; + +struct ax_parport_state { + unsigned int ctr; + unsigned int ecr; + unsigned int dcsr; +}; + +/* used by both parport_amiga and parport_mfc3 */ +struct amiga_parport_state { + unsigned char data; /* ciaa.prb */ + unsigned char datadir; /* ciaa.ddrb */ + unsigned char status; /* ciab.pra & 7 */ + unsigned char statusdir;/* ciab.ddrb & 7 */ +}; + +struct ax88796_parport_state { + unsigned char cpr; +}; + +struct ip32_parport_state { + unsigned int dcr; + unsigned int ecr; +}; + +struct parport_state { + union { + struct pc_parport_state pc; + /* ARC has no state. */ + struct ax_parport_state ax; + struct amiga_parport_state amiga; + struct ax88796_parport_state ax88796; + /* Atari has not state. */ + struct ip32_parport_state ip32; + void *misc; + } u; +}; + +struct parport_operations { + /* IBM PC-style virtual registers. */ + void (*write_data)(struct parport *, unsigned char); + unsigned char (*read_data)(struct parport *); + + void (*write_control)(struct parport *, unsigned char); + unsigned char (*read_control)(struct parport *); + unsigned char (*frob_control)(struct parport *, unsigned char mask, + unsigned char val); + + unsigned char (*read_status)(struct parport *); + + /* IRQs. */ + void (*enable_irq)(struct parport *); + void (*disable_irq)(struct parport *); + + /* Data direction. */ + void (*data_forward) (struct parport *); + void (*data_reverse) (struct parport *); + + /* For core parport code. */ + void (*init_state)(struct pardevice *, struct parport_state *); + void (*save_state)(struct parport *, struct parport_state *); + void (*restore_state)(struct parport *, struct parport_state *); + + /* Block read/write */ + size_t (*epp_write_data) (struct parport *port, const void *buf, + size_t len, int flags); + size_t (*epp_read_data) (struct parport *port, void *buf, size_t len, + int flags); + size_t (*epp_write_addr) (struct parport *port, const void *buf, + size_t len, int flags); + size_t (*epp_read_addr) (struct parport *port, void *buf, size_t len, + int flags); + + size_t (*ecp_write_data) (struct parport *port, const void *buf, + size_t len, int flags); + size_t (*ecp_read_data) (struct parport *port, void *buf, size_t len, + int flags); + size_t (*ecp_write_addr) (struct parport *port, const void *buf, + size_t len, int flags); + + size_t (*compat_write_data) (struct parport *port, const void *buf, + size_t len, int flags); + size_t (*nibble_read_data) (struct parport *port, void *buf, + size_t len, int flags); + size_t (*byte_read_data) (struct parport *port, void *buf, + size_t len, int flags); + struct module *owner; +}; + +struct parport_device_info { + parport_device_class class; + const char *class_name; + const char *mfr; + const char *model; + const char *cmdset; + const char *description; +}; + +/* Each device can have two callback functions: + * 1) a preemption function, called by the resource manager to request + * that the driver relinquish control of the port. The driver should + * return zero if it agrees to release the port, and nonzero if it + * refuses. Do not call parport_release() - the kernel will do this + * implicitly. + * + * 2) a wake-up function, called by the resource manager to tell drivers + * that the port is available to be claimed. If a driver wants to use + * the port, it should call parport_claim() here. + */ + +/* A parallel port device */ +struct pardevice { + const char *name; + struct parport *port; + int daisy; + int (*preempt)(void *); + void (*wakeup)(void *); + void *private; + void (*irq_func)(void *); + unsigned int flags; + struct pardevice *next; + struct pardevice *prev; + struct device dev; + bool devmodel; + struct parport_state *state; /* saved status over preemption */ + wait_queue_head_t wait_q; + unsigned long int time; + unsigned long int timeslice; + volatile long int timeout; + unsigned long waiting; /* long req'd for set_bit --RR */ + struct pardevice *waitprev; + struct pardevice *waitnext; + void * sysctl_table; +}; + +#define to_pardevice(n) container_of(n, struct pardevice, dev) + +/* IEEE1284 information */ + +/* IEEE1284 phases. These are exposed to userland through ppdev IOCTL + * PP[GS]ETPHASE, so do not change existing values. */ +enum ieee1284_phase { + IEEE1284_PH_FWD_DATA, + IEEE1284_PH_FWD_IDLE, + IEEE1284_PH_TERMINATE, + IEEE1284_PH_NEGOTIATION, + IEEE1284_PH_HBUSY_DNA, + IEEE1284_PH_REV_IDLE, + IEEE1284_PH_HBUSY_DAVAIL, + IEEE1284_PH_REV_DATA, + IEEE1284_PH_ECP_SETUP, + IEEE1284_PH_ECP_FWD_TO_REV, + IEEE1284_PH_ECP_REV_TO_FWD, + IEEE1284_PH_ECP_DIR_UNKNOWN, +}; +struct ieee1284_info { + int mode; + volatile enum ieee1284_phase phase; + struct semaphore irq; +}; + +/* A parallel port */ +struct parport { + unsigned long base; /* base address */ + unsigned long base_hi; /* base address (hi - ECR) */ + unsigned int size; /* IO extent */ + const char *name; + unsigned int modes; + int irq; /* interrupt (or -1 for none) */ + int dma; + int muxport; /* which muxport (if any) this is */ + int portnum; /* which physical parallel port (not mux) */ + struct device *dev; /* Physical device associated with IO/DMA. + * This may unfortulately be null if the + * port has a legacy driver. + */ + struct device bus_dev; /* to link with the bus */ + struct parport *physport; + /* If this is a non-default mux + parport, i.e. we're a clone of a real + physical port, this is a pointer to that + port. The locking is only done in the + real port. For a clone port, the + following structure members are + meaningless: devices, cad, muxsel, + waithead, waittail, flags, pdir, + dev, ieee1284, *_lock. + + It this is a default mux parport, or + there is no mux involved, this points to + ourself. */ + + struct pardevice *devices; + struct pardevice *cad; /* port owner */ + int daisy; /* currently selected daisy addr */ + int muxsel; /* currently selected mux port */ + + struct pardevice *waithead; + struct pardevice *waittail; + + struct list_head list; + struct timer_list timer; + unsigned int flags; + + void *sysctl_table; + struct parport_device_info probe_info[5]; /* 0-3 + non-IEEE1284.3 */ + struct ieee1284_info ieee1284; + + struct parport_operations *ops; + void *private_data; /* for lowlevel driver */ + + int number; /* port index - the `n' in `parportn' */ + spinlock_t pardevice_lock; + spinlock_t waitlist_lock; + rwlock_t cad_lock; + + int spintime; + atomic_t ref_count; + + unsigned long devflags; +#define PARPORT_DEVPROC_REGISTERED 0 + struct pardevice *proc_device; /* Currently register proc device */ + + struct list_head full_list; + struct parport *slaves[3]; +}; + +#define to_parport_dev(n) container_of(n, struct parport, bus_dev) + +#define DEFAULT_SPIN_TIME 500 /* us */ + +struct parport_driver { + const char *name; + void (*attach) (struct parport *); + void (*detach) (struct parport *); + void (*match_port)(struct parport *); + int (*probe)(struct pardevice *); + struct device_driver driver; + bool devmodel; + struct list_head list; +}; + +#define to_parport_driver(n) container_of(n, struct parport_driver, driver) + +int parport_bus_init(void); +void parport_bus_exit(void); + +/* parport_register_port registers a new parallel port at the given + address (if one does not already exist) and returns a pointer to it. + This entails claiming the I/O region, IRQ and DMA. NULL is returned + if initialisation fails. */ +struct parport *parport_register_port(unsigned long base, int irq, int dma, + struct parport_operations *ops); + +/* Once a registered port is ready for high-level drivers to use, the + low-level driver that registered it should announce it. This will + call the high-level drivers' attach() functions (after things like + determining the IEEE 1284.3 topology of the port and collecting + DeviceIDs). */ +void parport_announce_port (struct parport *port); + +/* Unregister a port. */ +extern void parport_remove_port(struct parport *port); + +/* Register a new high-level driver. */ + +int __must_check __parport_register_driver(struct parport_driver *, + struct module *, + const char *mod_name); +/* + * parport_register_driver must be a macro so that KBUILD_MODNAME can + * be expanded + */ +#define parport_register_driver(driver) \ + __parport_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) + +/* Unregister a high-level driver. */ +extern void parport_unregister_driver (struct parport_driver *); +void parport_unregister_driver(struct parport_driver *); + +/* If parport_register_driver doesn't fit your needs, perhaps + * parport_find_xxx does. */ +extern struct parport *parport_find_number (int); +extern struct parport *parport_find_base (unsigned long); + +/* generic irq handler, if it suits your needs */ +extern irqreturn_t parport_irq_handler(int irq, void *dev_id); + +/* Reference counting for ports. */ +extern struct parport *parport_get_port (struct parport *); +extern void parport_put_port (struct parport *); +void parport_del_port(struct parport *); + +struct pardev_cb { + int (*preempt)(void *); + void (*wakeup)(void *); + void *private; + void (*irq_func)(void *); + unsigned int flags; +}; + +/* parport_register_device declares that a device is connected to a + port, and tells the kernel all it needs to know. + - pf is the preemption function (may be NULL for no callback) + - kf is the wake-up function (may be NULL for no callback) + - irq_func is the interrupt handler (may be NULL for no interrupts) + - handle is a user pointer that gets handed to callback functions. */ +struct pardevice *parport_register_device(struct parport *port, + const char *name, + int (*pf)(void *), void (*kf)(void *), + void (*irq_func)(void *), + int flags, void *handle); + +struct pardevice * +parport_register_dev_model(struct parport *port, const char *name, + const struct pardev_cb *par_dev_cb, int cnt); + +/* parport_unregister unlinks a device from the chain. */ +extern void parport_unregister_device(struct pardevice *dev); + +/* parport_claim tries to gain ownership of the port for a particular + driver. This may fail (return non-zero) if another driver is busy. + If this driver has registered an interrupt handler, it will be + enabled. */ +extern int parport_claim(struct pardevice *dev); + +/* parport_claim_or_block is the same, but sleeps if the port cannot + be claimed. Return value is 1 if it slept, 0 normally and -errno + on error. */ +extern int parport_claim_or_block(struct pardevice *dev); + +/* parport_release reverses a previous parport_claim. This can never + fail, though the effects are undefined (except that they are bad) + if you didn't previously own the port. Once you have released the + port you should make sure that neither your code nor the hardware + on the port tries to initiate any communication without first + re-claiming the port. If you mess with the port state (enabling + ECP for example) you should clean up before releasing the port. */ + +extern void parport_release(struct pardevice *dev); + +/** + * parport_yield - relinquish a parallel port temporarily + * @dev: a device on the parallel port + * + * This function relinquishes the port if it would be helpful to other + * drivers to do so. Afterwards it tries to reclaim the port using + * parport_claim(), and the return value is the same as for + * parport_claim(). If it fails, the port is left unclaimed and it is + * the driver's responsibility to reclaim the port. + * + * The parport_yield() and parport_yield_blocking() functions are for + * marking points in the driver at which other drivers may claim the + * port and use their devices. Yielding the port is similar to + * releasing it and reclaiming it, but is more efficient because no + * action is taken if there are no other devices needing the port. In + * fact, nothing is done even if there are other devices waiting but + * the current device is still within its "timeslice". The default + * timeslice is half a second, but it can be adjusted via the /proc + * interface. + **/ +static __inline__ int parport_yield(struct pardevice *dev) +{ + unsigned long int timeslip = (jiffies - dev->time); + if ((dev->port->waithead == NULL) || (timeslip < dev->timeslice)) + return 0; + parport_release(dev); + return parport_claim(dev); +} + +/** + * parport_yield_blocking - relinquish a parallel port temporarily + * @dev: a device on the parallel port + * + * This function relinquishes the port if it would be helpful to other + * drivers to do so. Afterwards it tries to reclaim the port using + * parport_claim_or_block(), and the return value is the same as for + * parport_claim_or_block(). + **/ +static __inline__ int parport_yield_blocking(struct pardevice *dev) +{ + unsigned long int timeslip = (jiffies - dev->time); + if ((dev->port->waithead == NULL) || (timeslip < dev->timeslice)) + return 0; + parport_release(dev); + return parport_claim_or_block(dev); +} + +/* Flags used to identify what a device does. */ +#define PARPORT_DEV_TRAN 0 /* WARNING !! DEPRECATED !! */ +#define PARPORT_DEV_LURK (1<<0) /* WARNING !! DEPRECATED !! */ +#define PARPORT_DEV_EXCL (1<<1) /* Need exclusive access. */ + +#define PARPORT_FLAG_EXCL (1<<1) /* EXCL driver registered. */ + +/* IEEE1284 functions */ +extern void parport_ieee1284_interrupt (void *); +extern int parport_negotiate (struct parport *, int mode); +extern ssize_t parport_write (struct parport *, const void *buf, size_t len); +extern ssize_t parport_read (struct parport *, void *buf, size_t len); + +#define PARPORT_INACTIVITY_O_NONBLOCK 1 +extern long parport_set_timeout (struct pardevice *, long inactivity); + +extern int parport_wait_event (struct parport *, long timeout); +extern int parport_wait_peripheral (struct parport *port, + unsigned char mask, + unsigned char val); +extern int parport_poll_peripheral (struct parport *port, + unsigned char mask, + unsigned char val, + int usec); + +/* For architectural drivers */ +extern size_t parport_ieee1284_write_compat (struct parport *, + const void *, size_t, int); +extern size_t parport_ieee1284_read_nibble (struct parport *, + void *, size_t, int); +extern size_t parport_ieee1284_read_byte (struct parport *, + void *, size_t, int); +extern size_t parport_ieee1284_ecp_read_data (struct parport *, + void *, size_t, int); +extern size_t parport_ieee1284_ecp_write_data (struct parport *, + const void *, size_t, int); +extern size_t parport_ieee1284_ecp_write_addr (struct parport *, + const void *, size_t, int); +extern size_t parport_ieee1284_epp_write_data (struct parport *, + const void *, size_t, int); +extern size_t parport_ieee1284_epp_read_data (struct parport *, + void *, size_t, int); +extern size_t parport_ieee1284_epp_write_addr (struct parport *, + const void *, size_t, int); +extern size_t parport_ieee1284_epp_read_addr (struct parport *, + void *, size_t, int); + +/* IEEE1284.3 functions */ +extern int parport_daisy_init (struct parport *port); +extern void parport_daisy_fini (struct parport *port); +extern struct pardevice *parport_open (int devnum, const char *name); +extern void parport_close (struct pardevice *dev); +extern ssize_t parport_device_id (int devnum, char *buffer, size_t len); +extern void parport_daisy_deselect_all (struct parport *port); +extern int parport_daisy_select (struct parport *port, int daisy, int mode); + +/* Lowlevel drivers _can_ call this support function to handle irqs. */ +static inline void parport_generic_irq(struct parport *port) +{ + parport_ieee1284_interrupt (port); + read_lock(&port->cad_lock); + if (port->cad && port->cad->irq_func) + port->cad->irq_func(port->cad->private); + read_unlock(&port->cad_lock); +} + +/* Prototypes from parport_procfs */ +extern int parport_proc_register(struct parport *pp); +extern int parport_proc_unregister(struct parport *pp); +extern int parport_device_proc_register(struct pardevice *device); +extern int parport_device_proc_unregister(struct pardevice *device); + +/* If PC hardware is the only type supported, we can optimise a bit. */ +#if !defined(CONFIG_PARPORT_NOT_PC) + +#include +#define parport_write_data(p,x) parport_pc_write_data(p,x) +#define parport_read_data(p) parport_pc_read_data(p) +#define parport_write_control(p,x) parport_pc_write_control(p,x) +#define parport_read_control(p) parport_pc_read_control(p) +#define parport_frob_control(p,m,v) parport_pc_frob_control(p,m,v) +#define parport_read_status(p) parport_pc_read_status(p) +#define parport_enable_irq(p) parport_pc_enable_irq(p) +#define parport_disable_irq(p) parport_pc_disable_irq(p) +#define parport_data_forward(p) parport_pc_data_forward(p) +#define parport_data_reverse(p) parport_pc_data_reverse(p) + +#else /* !CONFIG_PARPORT_NOT_PC */ + +/* Generic operations vector through the dispatch table. */ +#define parport_write_data(p,x) (p)->ops->write_data(p,x) +#define parport_read_data(p) (p)->ops->read_data(p) +#define parport_write_control(p,x) (p)->ops->write_control(p,x) +#define parport_read_control(p) (p)->ops->read_control(p) +#define parport_frob_control(p,m,v) (p)->ops->frob_control(p,m,v) +#define parport_read_status(p) (p)->ops->read_status(p) +#define parport_enable_irq(p) (p)->ops->enable_irq(p) +#define parport_disable_irq(p) (p)->ops->disable_irq(p) +#define parport_data_forward(p) (p)->ops->data_forward(p) +#define parport_data_reverse(p) (p)->ops->data_reverse(p) + +#endif /* !CONFIG_PARPORT_NOT_PC */ + +extern unsigned long parport_default_timeslice; +extern int parport_default_spintime; + +#endif /* _PARPORT_H_ */ diff --git a/include/linux/parport_pc.h b/include/linux/parport_pc.h new file mode 100644 index 000000000..3d6fc576d --- /dev/null +++ b/include/linux/parport_pc.h @@ -0,0 +1,239 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_PARPORT_PC_H +#define __LINUX_PARPORT_PC_H + +#include + +/* --- register definitions ------------------------------- */ + +#define ECONTROL(p) ((p)->base_hi + 0x2) +#define CONFIGB(p) ((p)->base_hi + 0x1) +#define CONFIGA(p) ((p)->base_hi + 0x0) +#define FIFO(p) ((p)->base_hi + 0x0) +#define EPPDATA(p) ((p)->base + 0x4) +#define EPPADDR(p) ((p)->base + 0x3) +#define CONTROL(p) ((p)->base + 0x2) +#define STATUS(p) ((p)->base + 0x1) +#define DATA(p) ((p)->base + 0x0) + +struct parport_pc_private { + /* Contents of CTR. */ + unsigned char ctr; + + /* Bitmask of writable CTR bits. */ + unsigned char ctr_writable; + + /* Whether or not there's an ECR. */ + int ecr; + + /* Number of PWords that FIFO will hold. */ + int fifo_depth; + + /* Number of bytes per portword. */ + int pword; + + /* Not used yet. */ + int readIntrThreshold; + int writeIntrThreshold; + + /* buffer suitable for DMA, if DMA enabled */ + char *dma_buf; + dma_addr_t dma_handle; + struct list_head list; + struct parport *port; +}; + +struct parport_pc_via_data +{ + /* ISA PnP IRQ routing register 1 */ + u8 via_pci_parport_irq_reg; + /* ISA PnP DMA request routing register */ + u8 via_pci_parport_dma_reg; + /* Register and value to enable SuperIO configuration access */ + u8 via_pci_superio_config_reg; + u8 via_pci_superio_config_data; + /* SuperIO function register number */ + u8 viacfg_function; + /* parallel port control register number */ + u8 viacfg_parport_control; + /* Parallel port base address register */ + u8 viacfg_parport_base; +}; + +static __inline__ void parport_pc_write_data(struct parport *p, unsigned char d) +{ +#ifdef DEBUG_PARPORT + printk (KERN_DEBUG "parport_pc_write_data(%p,0x%02x)\n", p, d); +#endif + outb(d, DATA(p)); +} + +static __inline__ unsigned char parport_pc_read_data(struct parport *p) +{ + unsigned char val = inb (DATA (p)); +#ifdef DEBUG_PARPORT + printk (KERN_DEBUG "parport_pc_read_data(%p) = 0x%02x\n", + p, val); +#endif + return val; +} + +#ifdef DEBUG_PARPORT +static inline void dump_parport_state (char *str, struct parport *p) +{ + /* here's hoping that reading these ports won't side-effect anything underneath */ + unsigned char ecr = inb (ECONTROL (p)); + unsigned char dcr = inb (CONTROL (p)); + unsigned char dsr = inb (STATUS (p)); + static const char *const ecr_modes[] = {"SPP", "PS2", "PPFIFO", "ECP", "xXx", "yYy", "TST", "CFG"}; + const struct parport_pc_private *priv = p->physport->private_data; + int i; + + printk (KERN_DEBUG "*** parport state (%s): ecr=[%s", str, ecr_modes[(ecr & 0xe0) >> 5]); + if (ecr & 0x10) printk (",nErrIntrEn"); + if (ecr & 0x08) printk (",dmaEn"); + if (ecr & 0x04) printk (",serviceIntr"); + if (ecr & 0x02) printk (",f_full"); + if (ecr & 0x01) printk (",f_empty"); + for (i=0; i<2; i++) { + printk ("] dcr(%s)=[", i ? "soft" : "hard"); + dcr = i ? priv->ctr : inb (CONTROL (p)); + + if (dcr & 0x20) { + printk ("rev"); + } else { + printk ("fwd"); + } + if (dcr & 0x10) printk (",ackIntEn"); + if (!(dcr & 0x08)) printk (",N-SELECT-IN"); + if (dcr & 0x04) printk (",N-INIT"); + if (!(dcr & 0x02)) printk (",N-AUTOFD"); + if (!(dcr & 0x01)) printk (",N-STROBE"); + } + printk ("] dsr=["); + if (!(dsr & 0x80)) printk ("BUSY"); + if (dsr & 0x40) printk (",N-ACK"); + if (dsr & 0x20) printk (",PERROR"); + if (dsr & 0x10) printk (",SELECT"); + if (dsr & 0x08) printk (",N-FAULT"); + printk ("]\n"); + return; +} +#else /* !DEBUG_PARPORT */ +#define dump_parport_state(args...) +#endif /* !DEBUG_PARPORT */ + +/* __parport_pc_frob_control differs from parport_pc_frob_control in that + * it doesn't do any extra masking. */ +static __inline__ unsigned char __parport_pc_frob_control (struct parport *p, + unsigned char mask, + unsigned char val) +{ + struct parport_pc_private *priv = p->physport->private_data; + unsigned char ctr = priv->ctr; +#ifdef DEBUG_PARPORT + printk (KERN_DEBUG + "__parport_pc_frob_control(%02x,%02x): %02x -> %02x\n", + mask, val, ctr, ((ctr & ~mask) ^ val) & priv->ctr_writable); +#endif + ctr = (ctr & ~mask) ^ val; + ctr &= priv->ctr_writable; /* only write writable bits. */ + outb (ctr, CONTROL (p)); + priv->ctr = ctr; /* Update soft copy */ + return ctr; +} + +static __inline__ void parport_pc_data_reverse (struct parport *p) +{ + __parport_pc_frob_control (p, 0x20, 0x20); +} + +static __inline__ void parport_pc_data_forward (struct parport *p) +{ + __parport_pc_frob_control (p, 0x20, 0x00); +} + +static __inline__ void parport_pc_write_control (struct parport *p, + unsigned char d) +{ + const unsigned char wm = (PARPORT_CONTROL_STROBE | + PARPORT_CONTROL_AUTOFD | + PARPORT_CONTROL_INIT | + PARPORT_CONTROL_SELECT); + + /* Take this out when drivers have adapted to newer interface. */ + if (d & 0x20) { + printk (KERN_DEBUG "%s (%s): use data_reverse for this!\n", + p->name, p->cad->name); + parport_pc_data_reverse (p); + } + + __parport_pc_frob_control (p, wm, d & wm); +} + +static __inline__ unsigned char parport_pc_read_control(struct parport *p) +{ + const unsigned char rm = (PARPORT_CONTROL_STROBE | + PARPORT_CONTROL_AUTOFD | + PARPORT_CONTROL_INIT | + PARPORT_CONTROL_SELECT); + const struct parport_pc_private *priv = p->physport->private_data; + return priv->ctr & rm; /* Use soft copy */ +} + +static __inline__ unsigned char parport_pc_frob_control (struct parport *p, + unsigned char mask, + unsigned char val) +{ + const unsigned char wm = (PARPORT_CONTROL_STROBE | + PARPORT_CONTROL_AUTOFD | + PARPORT_CONTROL_INIT | + PARPORT_CONTROL_SELECT); + + /* Take this out when drivers have adapted to newer interface. */ + if (mask & 0x20) { + printk (KERN_DEBUG "%s (%s): use data_%s for this!\n", + p->name, p->cad->name, + (val & 0x20) ? "reverse" : "forward"); + if (val & 0x20) + parport_pc_data_reverse (p); + else + parport_pc_data_forward (p); + } + + /* Restrict mask and val to control lines. */ + mask &= wm; + val &= wm; + + return __parport_pc_frob_control (p, mask, val); +} + +static __inline__ unsigned char parport_pc_read_status(struct parport *p) +{ + return inb(STATUS(p)); +} + + +static __inline__ void parport_pc_disable_irq(struct parport *p) +{ + __parport_pc_frob_control (p, 0x10, 0x00); +} + +static __inline__ void parport_pc_enable_irq(struct parport *p) +{ + __parport_pc_frob_control (p, 0x10, 0x10); +} + +extern void parport_pc_release_resources(struct parport *p); + +extern int parport_pc_claim_resources(struct parport *p); + +/* PCMCIA code will want to get us to look at a port. Provide a mechanism. */ +extern struct parport *parport_pc_probe_port(unsigned long base, + unsigned long base_hi, + int irq, int dma, + struct device *dev, + int irqflags); +extern void parport_pc_unregister_port(struct parport *p); + +#endif diff --git a/include/linux/parser.h b/include/linux/parser.h new file mode 100644 index 000000000..12fc3482f --- /dev/null +++ b/include/linux/parser.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/parser.h + * + * Header for lib/parser.c + * Intended use of these functions is parsing filesystem argument lists, + * but could potentially be used anywhere else that simple option=arg + * parsing is required. + */ + + +/* associates an integer enumerator with a pattern string. */ +struct match_token { + int token; + const char *pattern; +}; + +typedef struct match_token match_table_t[]; + +/* Maximum number of arguments that match_token will find in a pattern */ +enum {MAX_OPT_ARGS = 3}; + +/* Describe the location within a string of a substring */ +typedef struct { + char *from; + char *to; +} substring_t; + +int match_token(char *, const match_table_t table, substring_t args[]); +int match_int(substring_t *, int *result); +int match_u64(substring_t *, u64 *result); +int match_octal(substring_t *, int *result); +int match_hex(substring_t *, int *result); +bool match_wildcard(const char *pattern, const char *str); +size_t match_strlcpy(char *, const substring_t *, size_t); +char *match_strdup(const substring_t *); diff --git a/include/linux/pata_arasan_cf_data.h b/include/linux/pata_arasan_cf_data.h new file mode 100644 index 000000000..9fade5dd2 --- /dev/null +++ b/include/linux/pata_arasan_cf_data.h @@ -0,0 +1,47 @@ +/* + * include/linux/pata_arasan_cf_data.h + * + * Arasan Compact Flash host controller platform data header file + * + * Copyright (C) 2011 ST Microelectronics + * Viresh Kumar + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef _PATA_ARASAN_CF_DATA_H +#define _PATA_ARASAN_CF_DATA_H + +#include + +struct arasan_cf_pdata { + u8 cf_if_clk; + #define CF_IF_CLK_100M (0x0) + #define CF_IF_CLK_75M (0x1) + #define CF_IF_CLK_66M (0x2) + #define CF_IF_CLK_50M (0x3) + #define CF_IF_CLK_40M (0x4) + #define CF_IF_CLK_33M (0x5) + #define CF_IF_CLK_25M (0x6) + #define CF_IF_CLK_125M (0x7) + #define CF_IF_CLK_150M (0x8) + #define CF_IF_CLK_166M (0x9) + #define CF_IF_CLK_200M (0xA) + /* + * Platform specific incapabilities of CF controller is handled via + * quirks + */ + u32 quirk; + #define CF_BROKEN_PIO (1) + #define CF_BROKEN_MWDMA (1 << 1) + #define CF_BROKEN_UDMA (1 << 2) +}; + +static inline void +set_arasan_cf_pdata(struct platform_device *pdev, struct arasan_cf_pdata *data) +{ + pdev->dev.platform_data = data; +} +#endif /* _PATA_ARASAN_CF_DATA_H */ diff --git a/include/linux/patchkey.h b/include/linux/patchkey.h new file mode 100644 index 000000000..f581defb2 --- /dev/null +++ b/include/linux/patchkey.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * -- definition of _PATCHKEY macro + * + * Copyright (C) 2005 Stuart Brady + * + * This exists because awe_voice.h defined its own _PATCHKEY and it wasn't + * clear whether removing this would break anything in userspace. + * + * Do not include this file directly. Please use instead. + * For kernel code, use + */ +#ifndef _LINUX_PATCHKEY_H +#define _LINUX_PATCHKEY_H + +# include +#include + +# if defined(__BIG_ENDIAN) +# define _PATCHKEY(id) (0xfd00|id) +# elif defined(__LITTLE_ENDIAN) +# define _PATCHKEY(id) ((id<<8)|0x00fd) +# else +# error "could not determine byte order" +# endif +#endif /* _LINUX_PATCHKEY_H */ diff --git a/include/linux/path.h b/include/linux/path.h new file mode 100644 index 000000000..475225a03 --- /dev/null +++ b/include/linux/path.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PATH_H +#define _LINUX_PATH_H + +struct dentry; +struct vfsmount; + +struct path { + struct vfsmount *mnt; + struct dentry *dentry; +} __randomize_layout; + +extern void path_get(const struct path *); +extern void path_put(const struct path *); + +static inline int path_equal(const struct path *path1, const struct path *path2) +{ + return path1->mnt == path2->mnt && path1->dentry == path2->dentry; +} + +static inline void path_put_init(struct path *path) +{ + path_put(path); + *path = (struct path) { }; +} + +#endif /* _LINUX_PATH_H */ diff --git a/include/linux/pch_dma.h b/include/linux/pch_dma.h new file mode 100644 index 000000000..fdafe529e --- /dev/null +++ b/include/linux/pch_dma.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2010 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef PCH_DMA_H +#define PCH_DMA_H + +#include + +enum pch_dma_width { + PCH_DMA_WIDTH_1_BYTE, + PCH_DMA_WIDTH_2_BYTES, + PCH_DMA_WIDTH_4_BYTES, +}; + +struct pch_dma_slave { + struct device *dma_dev; + unsigned int chan_id; + dma_addr_t tx_reg; + dma_addr_t rx_reg; + enum pch_dma_width width; +}; + +#endif diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h new file mode 100644 index 000000000..8082b612f --- /dev/null +++ b/include/linux/pci-acpi.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * File pci-acpi.h + * + * Copyright (C) 2004 Intel + * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) + */ + +#ifndef _PCI_ACPI_H_ +#define _PCI_ACPI_H_ + +#include + +#ifdef CONFIG_ACPI +extern acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev); +static inline acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev) +{ + return acpi_remove_pm_notifier(dev); +} +extern acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev, + struct pci_dev *pci_dev); +static inline acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev) +{ + return acpi_remove_pm_notifier(dev); +} +extern phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle); + +struct pci_ecam_ops; +extern int pci_mcfg_lookup(struct acpi_pci_root *root, struct resource *cfgres, + struct pci_ecam_ops **ecam_ops); + +static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) +{ + struct pci_bus *pbus = pdev->bus; + + /* Find a PCI root bus */ + while (!pci_is_root_bus(pbus)) + pbus = pbus->parent; + + return ACPI_HANDLE(pbus->bridge); +} + +static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus) +{ + struct device *dev; + + if (pci_is_root_bus(pbus)) + dev = pbus->bridge; + else { + /* If pbus is a virtual bus, there is no bridge to it */ + if (!pbus->self) + return NULL; + + dev = &pbus->self->dev; + } + + return ACPI_HANDLE(dev); +} + +struct acpi_pci_root; +struct acpi_pci_root_ops; + +struct acpi_pci_root_info { + struct acpi_pci_root *root; + struct acpi_device *bridge; + struct acpi_pci_root_ops *ops; + struct list_head resources; + char name[16]; +}; + +struct acpi_pci_root_ops { + struct pci_ops *pci_ops; + int (*init_info)(struct acpi_pci_root_info *info); + void (*release_info)(struct acpi_pci_root_info *info); + int (*prepare_resources)(struct acpi_pci_root_info *info); +}; + +extern int acpi_pci_probe_root_resources(struct acpi_pci_root_info *info); +extern struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root, + struct acpi_pci_root_ops *ops, + struct acpi_pci_root_info *info, + void *sd); + +void acpi_pci_add_bus(struct pci_bus *bus); +void acpi_pci_remove_bus(struct pci_bus *bus); + +#ifdef CONFIG_ACPI_PCI_SLOT +void acpi_pci_slot_init(void); +void acpi_pci_slot_enumerate(struct pci_bus *bus); +void acpi_pci_slot_remove(struct pci_bus *bus); +#else +static inline void acpi_pci_slot_init(void) { } +static inline void acpi_pci_slot_enumerate(struct pci_bus *bus) { } +static inline void acpi_pci_slot_remove(struct pci_bus *bus) { } +#endif + +#ifdef CONFIG_HOTPLUG_PCI_ACPI +void acpiphp_init(void); +void acpiphp_enumerate_slots(struct pci_bus *bus); +void acpiphp_remove_slots(struct pci_bus *bus); +void acpiphp_check_host_bridge(struct acpi_device *adev); +#else +static inline void acpiphp_init(void) { } +static inline void acpiphp_enumerate_slots(struct pci_bus *bus) { } +static inline void acpiphp_remove_slots(struct pci_bus *bus) { } +static inline void acpiphp_check_host_bridge(struct acpi_device *adev) { } +#endif + +extern const guid_t pci_acpi_dsm_guid; +#define DEVICE_LABEL_DSM 0x07 +#define RESET_DELAY_DSM 0x08 +#define FUNCTION_DELAY_DSM 0x09 + +#else /* CONFIG_ACPI */ +static inline void acpi_pci_add_bus(struct pci_bus *bus) { } +static inline void acpi_pci_remove_bus(struct pci_bus *bus) { } +#endif /* CONFIG_ACPI */ + +#ifdef CONFIG_ACPI_APEI +extern bool aer_acpi_firmware_first(void); +#else +static inline bool aer_acpi_firmware_first(void) { return false; } +#endif + +#endif /* _PCI_ACPI_H_ */ diff --git a/include/linux/pci-aspm.h b/include/linux/pci-aspm.h new file mode 100644 index 000000000..df28af5ce --- /dev/null +++ b/include/linux/pci-aspm.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * aspm.h + * + * PCI Express ASPM defines and function prototypes + * + * Copyright (C) 2007 Intel Corp. + * Zhang Yanmin (yanmin.zhang@intel.com) + * Shaohua Li (shaohua.li@intel.com) + * + * For more information, please consult the following manuals (look at + * http://www.pcisig.com/ for how to get them): + * + * PCI Express Specification + */ + +#ifndef LINUX_ASPM_H +#define LINUX_ASPM_H + +#include + +#define PCIE_LINK_STATE_L0S 1 +#define PCIE_LINK_STATE_L1 2 +#define PCIE_LINK_STATE_CLKPM 4 + +#ifdef CONFIG_PCIEASPM +void pci_disable_link_state(struct pci_dev *pdev, int state); +void pci_disable_link_state_locked(struct pci_dev *pdev, int state); +void pcie_no_aspm(void); +#else +static inline void pci_disable_link_state(struct pci_dev *pdev, int state) { } +static inline void pcie_no_aspm(void) { } +#endif + +#endif /* LINUX_ASPM_H */ diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h new file mode 100644 index 000000000..7c4b8e272 --- /dev/null +++ b/include/linux/pci-ats.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_PCI_ATS_H +#define LINUX_PCI_ATS_H + +#include + +#ifdef CONFIG_PCI_PRI + +int pci_enable_pri(struct pci_dev *pdev, u32 reqs); +void pci_disable_pri(struct pci_dev *pdev); +void pci_restore_pri_state(struct pci_dev *pdev); +int pci_reset_pri(struct pci_dev *pdev); + +#else /* CONFIG_PCI_PRI */ + +static inline int pci_enable_pri(struct pci_dev *pdev, u32 reqs) +{ + return -ENODEV; +} + +static inline void pci_disable_pri(struct pci_dev *pdev) +{ +} + +static inline void pci_restore_pri_state(struct pci_dev *pdev) +{ +} + +static inline int pci_reset_pri(struct pci_dev *pdev) +{ + return -ENODEV; +} + +#endif /* CONFIG_PCI_PRI */ + +#ifdef CONFIG_PCI_PASID + +int pci_enable_pasid(struct pci_dev *pdev, int features); +void pci_disable_pasid(struct pci_dev *pdev); +void pci_restore_pasid_state(struct pci_dev *pdev); +int pci_pasid_features(struct pci_dev *pdev); +int pci_max_pasids(struct pci_dev *pdev); + +#else /* CONFIG_PCI_PASID */ + +static inline int pci_enable_pasid(struct pci_dev *pdev, int features) +{ + return -EINVAL; +} + +static inline void pci_disable_pasid(struct pci_dev *pdev) +{ +} + +static inline void pci_restore_pasid_state(struct pci_dev *pdev) +{ +} + +static inline int pci_pasid_features(struct pci_dev *pdev) +{ + return -EINVAL; +} + +static inline int pci_max_pasids(struct pci_dev *pdev) +{ + return -EINVAL; +} + +#endif /* CONFIG_PCI_PASID */ + + +#endif /* LINUX_PCI_ATS_H*/ diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h new file mode 100644 index 000000000..c3f1b44ad --- /dev/null +++ b/include/linux/pci-dma-compat.h @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* include this file if the platform implements the dma_ DMA Mapping API + * and wants to provide the pci_ DMA Mapping API in terms of it */ + +#ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H +#define _ASM_GENERIC_PCI_DMA_COMPAT_H + +#include + +/* This defines the direction arg to the DMA mapping routines. */ +#define PCI_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL +#define PCI_DMA_TODEVICE DMA_TO_DEVICE +#define PCI_DMA_FROMDEVICE DMA_FROM_DEVICE +#define PCI_DMA_NONE DMA_NONE + +static inline void * +pci_alloc_consistent(struct pci_dev *hwdev, size_t size, + dma_addr_t *dma_handle) +{ + return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC); +} + +static inline void * +pci_zalloc_consistent(struct pci_dev *hwdev, size_t size, + dma_addr_t *dma_handle) +{ + return dma_zalloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC); +} + +static inline void +pci_free_consistent(struct pci_dev *hwdev, size_t size, + void *vaddr, dma_addr_t dma_handle) +{ + dma_free_coherent(&hwdev->dev, size, vaddr, dma_handle); +} + +static inline dma_addr_t +pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) +{ + return dma_map_single(&hwdev->dev, ptr, size, (enum dma_data_direction)direction); +} + +static inline void +pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, + size_t size, int direction) +{ + dma_unmap_single(&hwdev->dev, dma_addr, size, (enum dma_data_direction)direction); +} + +static inline dma_addr_t +pci_map_page(struct pci_dev *hwdev, struct page *page, + unsigned long offset, size_t size, int direction) +{ + return dma_map_page(&hwdev->dev, page, offset, size, (enum dma_data_direction)direction); +} + +static inline void +pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address, + size_t size, int direction) +{ + dma_unmap_page(&hwdev->dev, dma_address, size, (enum dma_data_direction)direction); +} + +static inline int +pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, + int nents, int direction) +{ + return dma_map_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction); +} + +static inline void +pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, + int nents, int direction) +{ + dma_unmap_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction); +} + +static inline void +pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, + size_t size, int direction) +{ + dma_sync_single_for_cpu(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); +} + +static inline void +pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, + size_t size, int direction) +{ + dma_sync_single_for_device(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); +} + +static inline void +pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, + int nelems, int direction) +{ + dma_sync_sg_for_cpu(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction); +} + +static inline void +pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, + int nelems, int direction) +{ + dma_sync_sg_for_device(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction); +} + +static inline int +pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr) +{ + return dma_mapping_error(&pdev->dev, dma_addr); +} + +#ifdef CONFIG_PCI +static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) +{ + return dma_set_mask(&dev->dev, mask); +} + +static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) +{ + return dma_set_coherent_mask(&dev->dev, mask); +} + +static inline int pci_set_dma_max_seg_size(struct pci_dev *dev, + unsigned int size) +{ + return dma_set_max_seg_size(&dev->dev, size); +} + +static inline int pci_set_dma_seg_boundary(struct pci_dev *dev, + unsigned long mask) +{ + return dma_set_seg_boundary(&dev->dev, mask); +} +#else +static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) +{ return -EIO; } +static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) +{ return -EIO; } +static inline int pci_set_dma_max_seg_size(struct pci_dev *dev, + unsigned int size) +{ return -EIO; } +static inline int pci_set_dma_seg_boundary(struct pci_dev *dev, + unsigned long mask) +{ return -EIO; } +#endif + +#endif diff --git a/include/linux/pci-dma.h b/include/linux/pci-dma.h new file mode 100644 index 000000000..0f7aa7353 --- /dev/null +++ b/include/linux/pci-dma.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PCI_DMA_H +#define _LINUX_PCI_DMA_H + +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) DEFINE_DMA_UNMAP_ADDR(ADDR_NAME); +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) DEFINE_DMA_UNMAP_LEN(LEN_NAME); +#define pci_unmap_addr dma_unmap_addr +#define pci_unmap_addr_set dma_unmap_addr_set +#define pci_unmap_len dma_unmap_len +#define pci_unmap_len_set dma_unmap_len_set + +#endif diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h new file mode 100644 index 000000000..29efa09d6 --- /dev/null +++ b/include/linux/pci-ecam.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2016 Broadcom + */ +#ifndef DRIVERS_PCI_ECAM_H +#define DRIVERS_PCI_ECAM_H + +#include +#include +#include + +/* + * struct to hold pci ops and bus shift of the config window + * for a PCI controller. + */ +struct pci_config_window; +struct pci_ecam_ops { + unsigned int bus_shift; + struct pci_ops pci_ops; + int (*init)(struct pci_config_window *); +}; + +/* + * struct to hold the mappings of a config space window. This + * is expected to be used as sysdata for PCI controllers that + * use ECAM. + */ +struct pci_config_window { + struct resource res; + struct resource busr; + void *priv; + struct pci_ecam_ops *ops; + union { + void __iomem *win; /* 64-bit single mapping */ + void __iomem **winp; /* 32-bit per-bus mapping */ + }; + struct device *parent;/* ECAM res was from this dev */ +}; + +/* create and free pci_config_window */ +struct pci_config_window *pci_ecam_create(struct device *dev, + struct resource *cfgres, struct resource *busr, + struct pci_ecam_ops *ops); +void pci_ecam_free(struct pci_config_window *cfg); + +/* map_bus when ->sysdata is an instance of pci_config_window */ +void __iomem *pci_ecam_map_bus(struct pci_bus *bus, unsigned int devfn, + int where); +/* default ECAM ops */ +extern struct pci_ecam_ops pci_generic_ecam_ops; + +#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) +extern struct pci_ecam_ops pci_32b_ops; /* 32-bit accesses only */ +extern struct pci_ecam_ops hisi_pcie_ops; /* HiSilicon */ +extern struct pci_ecam_ops thunder_pem_ecam_ops; /* Cavium ThunderX 1.x & 2.x */ +extern struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */ +extern struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */ +extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */ +#endif + +#ifdef CONFIG_PCI_HOST_COMMON +/* for DT-based PCI controllers that support ECAM */ +int pci_host_common_probe(struct platform_device *pdev, + struct pci_ecam_ops *ops); +int pci_host_common_remove(struct platform_device *pdev); +#endif +#endif diff --git a/include/linux/pci-ep-cfs.h b/include/linux/pci-ep-cfs.h new file mode 100644 index 000000000..f42b0fd4b --- /dev/null +++ b/include/linux/pci-ep-cfs.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/** + * PCI Endpoint ConfigFS header file + * + * Copyright (C) 2017 Texas Instruments + * Author: Kishon Vijay Abraham I + */ + +#ifndef __LINUX_PCI_EP_CFS_H +#define __LINUX_PCI_EP_CFS_H + +#include + +#ifdef CONFIG_PCI_ENDPOINT_CONFIGFS +struct config_group *pci_ep_cfs_add_epc_group(const char *name); +void pci_ep_cfs_remove_epc_group(struct config_group *group); +struct config_group *pci_ep_cfs_add_epf_group(const char *name); +void pci_ep_cfs_remove_epf_group(struct config_group *group); +#else +static inline struct config_group *pci_ep_cfs_add_epc_group(const char *name) +{ + return 0; +} + +static inline void pci_ep_cfs_remove_epc_group(struct config_group *group) +{ +} + +static inline struct config_group *pci_ep_cfs_add_epf_group(const char *name) +{ + return 0; +} + +static inline void pci_ep_cfs_remove_epf_group(struct config_group *group) +{ +} +#endif +#endif /* __LINUX_PCI_EP_CFS_H */ diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h new file mode 100644 index 000000000..931fda3e5 --- /dev/null +++ b/include/linux/pci-epc.h @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * PCI Endpoint *Controller* (EPC) header file + * + * Copyright (C) 2017 Texas Instruments + * Author: Kishon Vijay Abraham I + */ + +#ifndef __LINUX_PCI_EPC_H +#define __LINUX_PCI_EPC_H + +#include + +struct pci_epc; + +enum pci_epc_irq_type { + PCI_EPC_IRQ_UNKNOWN, + PCI_EPC_IRQ_LEGACY, + PCI_EPC_IRQ_MSI, + PCI_EPC_IRQ_MSIX, +}; + +/** + * struct pci_epc_ops - set of function pointers for performing EPC operations + * @write_header: ops to populate configuration space header + * @set_bar: ops to configure the BAR + * @clear_bar: ops to reset the BAR + * @map_addr: ops to map CPU address to PCI address + * @unmap_addr: ops to unmap CPU address and PCI address + * @set_msi: ops to set the requested number of MSI interrupts in the MSI + * capability register + * @get_msi: ops to get the number of MSI interrupts allocated by the RC from + * the MSI capability register + * @set_msix: ops to set the requested number of MSI-X interrupts in the + * MSI-X capability register + * @get_msix: ops to get the number of MSI-X interrupts allocated by the RC + * from the MSI-X capability register + * @raise_irq: ops to raise a legacy, MSI or MSI-X interrupt + * @start: ops to start the PCI link + * @stop: ops to stop the PCI link + * @owner: the module owner containing the ops + */ +struct pci_epc_ops { + int (*write_header)(struct pci_epc *epc, u8 func_no, + struct pci_epf_header *hdr); + int (*set_bar)(struct pci_epc *epc, u8 func_no, + struct pci_epf_bar *epf_bar); + void (*clear_bar)(struct pci_epc *epc, u8 func_no, + struct pci_epf_bar *epf_bar); + int (*map_addr)(struct pci_epc *epc, u8 func_no, + phys_addr_t addr, u64 pci_addr, size_t size); + void (*unmap_addr)(struct pci_epc *epc, u8 func_no, + phys_addr_t addr); + int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 interrupts); + int (*get_msi)(struct pci_epc *epc, u8 func_no); + int (*set_msix)(struct pci_epc *epc, u8 func_no, u16 interrupts); + int (*get_msix)(struct pci_epc *epc, u8 func_no); + int (*raise_irq)(struct pci_epc *epc, u8 func_no, + enum pci_epc_irq_type type, u16 interrupt_num); + int (*start)(struct pci_epc *epc); + void (*stop)(struct pci_epc *epc); + struct module *owner; +}; + +/** + * struct pci_epc_mem - address space of the endpoint controller + * @phys_base: physical base address of the PCI address space + * @size: the size of the PCI address space + * @bitmap: bitmap to manage the PCI address space + * @pages: number of bits representing the address region + * @page_size: size of each page + * @lock: mutex to protect bitmap + */ +struct pci_epc_mem { + phys_addr_t phys_base; + size_t size; + unsigned long *bitmap; + size_t page_size; + int pages; + /* mutex to protect against concurrent access for memory allocation*/ + struct mutex lock; +}; + +/** + * struct pci_epc - represents the PCI EPC device + * @dev: PCI EPC device + * @pci_epf: list of endpoint functions present in this EPC device + * @ops: function pointers for performing endpoint operations + * @mem: address space of the endpoint controller + * @max_functions: max number of functions that can be configured in this EPC + * @group: configfs group representing the PCI EPC device + * @lock: spinlock to protect pci_epc ops + */ +struct pci_epc { + struct device dev; + struct list_head pci_epf; + const struct pci_epc_ops *ops; + struct pci_epc_mem *mem; + u8 max_functions; + struct config_group *group; + /* spinlock to protect against concurrent access of EP controller */ + spinlock_t lock; + unsigned int features; +}; + +#define EPC_FEATURE_NO_LINKUP_NOTIFIER BIT(0) +#define EPC_FEATURE_BAR_MASK (BIT(1) | BIT(2) | BIT(3)) +#define EPC_FEATURE_MSIX_AVAILABLE BIT(4) +#define EPC_FEATURE_SET_BAR(features, bar) \ + (features |= (EPC_FEATURE_BAR_MASK & (bar << 1))) +#define EPC_FEATURE_GET_BAR(features) \ + ((features & EPC_FEATURE_BAR_MASK) >> 1) + +#define to_pci_epc(device) container_of((device), struct pci_epc, dev) + +#define pci_epc_create(dev, ops) \ + __pci_epc_create((dev), (ops), THIS_MODULE) +#define devm_pci_epc_create(dev, ops) \ + __devm_pci_epc_create((dev), (ops), THIS_MODULE) + +#define pci_epc_mem_init(epc, phys_addr, size) \ + __pci_epc_mem_init((epc), (phys_addr), (size), PAGE_SIZE) + +static inline void epc_set_drvdata(struct pci_epc *epc, void *data) +{ + dev_set_drvdata(&epc->dev, data); +} + +static inline void *epc_get_drvdata(struct pci_epc *epc) +{ + return dev_get_drvdata(&epc->dev); +} + +struct pci_epc * +__devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops, + struct module *owner); +struct pci_epc * +__pci_epc_create(struct device *dev, const struct pci_epc_ops *ops, + struct module *owner); +void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc); +void pci_epc_destroy(struct pci_epc *epc); +int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf); +void pci_epc_linkup(struct pci_epc *epc); +void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf); +int pci_epc_write_header(struct pci_epc *epc, u8 func_no, + struct pci_epf_header *hdr); +int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, + struct pci_epf_bar *epf_bar); +void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, + struct pci_epf_bar *epf_bar); +int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, + phys_addr_t phys_addr, + u64 pci_addr, size_t size); +void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, + phys_addr_t phys_addr); +int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts); +int pci_epc_get_msi(struct pci_epc *epc, u8 func_no); +int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts); +int pci_epc_get_msix(struct pci_epc *epc, u8 func_no); +int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, + enum pci_epc_irq_type type, u16 interrupt_num); +int pci_epc_start(struct pci_epc *epc); +void pci_epc_stop(struct pci_epc *epc); +struct pci_epc *pci_epc_get(const char *epc_name); +void pci_epc_put(struct pci_epc *epc); + +int __pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_addr, size_t size, + size_t page_size); +void pci_epc_mem_exit(struct pci_epc *epc); +void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc, + phys_addr_t *phys_addr, size_t size); +void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr, + void __iomem *virt_addr, size_t size); +#endif /* __LINUX_PCI_EPC_H */ diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h new file mode 100644 index 000000000..ec02f5875 --- /dev/null +++ b/include/linux/pci-epf.h @@ -0,0 +1,157 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * PCI Endpoint *Function* (EPF) header file + * + * Copyright (C) 2017 Texas Instruments + * Author: Kishon Vijay Abraham I + */ + +#ifndef __LINUX_PCI_EPF_H +#define __LINUX_PCI_EPF_H + +#include +#include +#include + +struct pci_epf; + +enum pci_barno { + BAR_0, + BAR_1, + BAR_2, + BAR_3, + BAR_4, + BAR_5, +}; + +/** + * struct pci_epf_header - represents standard configuration header + * @vendorid: identifies device manufacturer + * @deviceid: identifies a particular device + * @revid: specifies a device-specific revision identifier + * @progif_code: identifies a specific register-level programming interface + * @subclass_code: identifies more specifically the function of the device + * @baseclass_code: broadly classifies the type of function the device performs + * @cache_line_size: specifies the system cacheline size in units of DWORDs + * @subsys_vendor_id: vendor of the add-in card or subsystem + * @subsys_id: id specific to vendor + * @interrupt_pin: interrupt pin the device (or device function) uses + */ +struct pci_epf_header { + u16 vendorid; + u16 deviceid; + u8 revid; + u8 progif_code; + u8 subclass_code; + u8 baseclass_code; + u8 cache_line_size; + u16 subsys_vendor_id; + u16 subsys_id; + enum pci_interrupt_pin interrupt_pin; +}; + +/** + * struct pci_epf_ops - set of function pointers for performing EPF operations + * @bind: ops to perform when a EPC device has been bound to EPF device + * @unbind: ops to perform when a binding has been lost between a EPC device + * and EPF device + * @linkup: ops to perform when the EPC device has established a connection with + * a host system + */ +struct pci_epf_ops { + int (*bind)(struct pci_epf *epf); + void (*unbind)(struct pci_epf *epf); + void (*linkup)(struct pci_epf *epf); +}; + +/** + * struct pci_epf_driver - represents the PCI EPF driver + * @probe: ops to perform when a new EPF device has been bound to the EPF driver + * @remove: ops to perform when the binding between the EPF device and EPF + * driver is broken + * @driver: PCI EPF driver + * @ops: set of function pointers for performing EPF operations + * @owner: the owner of the module that registers the PCI EPF driver + * @epf_group: list of configfs group corresponding to the PCI EPF driver + * @id_table: identifies EPF devices for probing + */ +struct pci_epf_driver { + int (*probe)(struct pci_epf *epf); + int (*remove)(struct pci_epf *epf); + + struct device_driver driver; + struct pci_epf_ops *ops; + struct module *owner; + struct list_head epf_group; + const struct pci_epf_device_id *id_table; +}; + +#define to_pci_epf_driver(drv) (container_of((drv), struct pci_epf_driver, \ + driver)) + +/** + * struct pci_epf_bar - represents the BAR of EPF device + * @phys_addr: physical address that should be mapped to the BAR + * @size: the size of the address space present in BAR + */ +struct pci_epf_bar { + dma_addr_t phys_addr; + size_t size; + enum pci_barno barno; + int flags; +}; + +/** + * struct pci_epf - represents the PCI EPF device + * @dev: the PCI EPF device + * @name: the name of the PCI EPF device + * @header: represents standard configuration header + * @bar: represents the BAR of EPF device + * @msi_interrupts: number of MSI interrupts required by this function + * @func_no: unique function number within this endpoint device + * @epc: the EPC device to which this EPF device is bound + * @driver: the EPF driver to which this EPF device is bound + * @list: to add pci_epf as a list of PCI endpoint functions to pci_epc + */ +struct pci_epf { + struct device dev; + const char *name; + struct pci_epf_header *header; + struct pci_epf_bar bar[6]; + u8 msi_interrupts; + u16 msix_interrupts; + u8 func_no; + + struct pci_epc *epc; + struct pci_epf_driver *driver; + struct list_head list; +}; + +#define to_pci_epf(epf_dev) container_of((epf_dev), struct pci_epf, dev) + +#define pci_epf_register_driver(driver) \ + __pci_epf_register_driver((driver), THIS_MODULE) + +static inline void epf_set_drvdata(struct pci_epf *epf, void *data) +{ + dev_set_drvdata(&epf->dev, data); +} + +static inline void *epf_get_drvdata(struct pci_epf *epf) +{ + return dev_get_drvdata(&epf->dev); +} + +const struct pci_epf_device_id * +pci_epf_match_device(const struct pci_epf_device_id *id, struct pci_epf *epf); +struct pci_epf *pci_epf_create(const char *name); +void pci_epf_destroy(struct pci_epf *epf); +int __pci_epf_register_driver(struct pci_epf_driver *driver, + struct module *owner); +void pci_epf_unregister_driver(struct pci_epf_driver *driver); +void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar); +void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar); +int pci_epf_bind(struct pci_epf *epf); +void pci_epf_unbind(struct pci_epf *epf); +void pci_epf_linkup(struct pci_epf *epf); +#endif /* __LINUX_PCI_EPF_H */ diff --git a/include/linux/pci.h b/include/linux/pci.h new file mode 100644 index 000000000..3e06e9790 --- /dev/null +++ b/include/linux/pci.h @@ -0,0 +1,2347 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * pci.h + * + * PCI defines and function prototypes + * Copyright 1994, Drew Eckhardt + * Copyright 1997--1999 Martin Mares + * + * For more information, please consult the following manuals (look at + * http://www.pcisig.com/ for how to get them): + * + * PCI BIOS Specification + * PCI Local Bus Specification + * PCI to PCI Bridge Specification + * PCI System Design Guide + */ +#ifndef LINUX_PCI_H +#define LINUX_PCI_H + + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * The PCI interface treats multi-function devices as independent + * devices. The slot/function address of each device is encoded + * in a single byte as follows: + * + * 7:3 = slot + * 2:0 = function + * + * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h. + * In the interest of not exposing interfaces to user-space unnecessarily, + * the following kernel-only defines are being added here. + */ +#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) +/* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */ +#define PCI_BUS_NUM(x) (((x) >> 8) & 0xff) + +/* pci_slot represents a physical slot */ +struct pci_slot { + struct pci_bus *bus; /* Bus this slot is on */ + struct list_head list; /* Node in list of slots */ + struct hotplug_slot *hotplug; /* Hotplug info (move here) */ + unsigned char number; /* PCI_SLOT(pci_dev->devfn) */ + struct kobject kobj; +}; + +static inline const char *pci_slot_name(const struct pci_slot *slot) +{ + return kobject_name(&slot->kobj); +} + +/* File state for mmap()s on /proc/bus/pci/X/Y */ +enum pci_mmap_state { + pci_mmap_io, + pci_mmap_mem +}; + +/* For PCI devices, the region numbers are assigned this way: */ +enum { + /* #0-5: standard PCI resources */ + PCI_STD_RESOURCES, + PCI_STD_RESOURCE_END = 5, + + /* #6: expansion ROM resource */ + PCI_ROM_RESOURCE, + + /* Device-specific resources */ +#ifdef CONFIG_PCI_IOV + PCI_IOV_RESOURCES, + PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1, +#endif + + /* Resources assigned to buses behind the bridge */ +#define PCI_BRIDGE_RESOURCE_NUM 4 + + PCI_BRIDGE_RESOURCES, + PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES + + PCI_BRIDGE_RESOURCE_NUM - 1, + + /* Total resources associated with a PCI device */ + PCI_NUM_RESOURCES, + + /* Preserve this for compatibility */ + DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES, +}; + +/** + * enum pci_interrupt_pin - PCI INTx interrupt values + * @PCI_INTERRUPT_UNKNOWN: Unknown or unassigned interrupt + * @PCI_INTERRUPT_INTA: PCI INTA pin + * @PCI_INTERRUPT_INTB: PCI INTB pin + * @PCI_INTERRUPT_INTC: PCI INTC pin + * @PCI_INTERRUPT_INTD: PCI INTD pin + * + * Corresponds to values for legacy PCI INTx interrupts, as can be found in the + * PCI_INTERRUPT_PIN register. + */ +enum pci_interrupt_pin { + PCI_INTERRUPT_UNKNOWN, + PCI_INTERRUPT_INTA, + PCI_INTERRUPT_INTB, + PCI_INTERRUPT_INTC, + PCI_INTERRUPT_INTD, +}; + +/* The number of legacy PCI INTx interrupts */ +#define PCI_NUM_INTX 4 + +/* + * pci_power_t values must match the bits in the Capabilities PME_Support + * and Control/Status PowerState fields in the Power Management capability. + */ +typedef int __bitwise pci_power_t; + +#define PCI_D0 ((pci_power_t __force) 0) +#define PCI_D1 ((pci_power_t __force) 1) +#define PCI_D2 ((pci_power_t __force) 2) +#define PCI_D3hot ((pci_power_t __force) 3) +#define PCI_D3cold ((pci_power_t __force) 4) +#define PCI_UNKNOWN ((pci_power_t __force) 5) +#define PCI_POWER_ERROR ((pci_power_t __force) -1) + +/* Remember to update this when the list above changes! */ +extern const char *pci_power_names[]; + +static inline const char *pci_power_name(pci_power_t state) +{ + return pci_power_names[1 + (__force int) state]; +} + +#define PCI_PM_D2_DELAY 200 +#define PCI_PM_D3_WAIT 10 +#define PCI_PM_D3COLD_WAIT 100 +#define PCI_PM_BUS_WAIT 50 + +/** + * The pci_channel state describes connectivity between the CPU and + * the PCI device. If some PCI bus between here and the PCI device + * has crashed or locked up, this info is reflected here. + */ +typedef unsigned int __bitwise pci_channel_state_t; + +enum pci_channel_state { + /* I/O channel is in normal state */ + pci_channel_io_normal = (__force pci_channel_state_t) 1, + + /* I/O to channel is blocked */ + pci_channel_io_frozen = (__force pci_channel_state_t) 2, + + /* PCI card is dead */ + pci_channel_io_perm_failure = (__force pci_channel_state_t) 3, +}; + +typedef unsigned int __bitwise pcie_reset_state_t; + +enum pcie_reset_state { + /* Reset is NOT asserted (Use to deassert reset) */ + pcie_deassert_reset = (__force pcie_reset_state_t) 1, + + /* Use #PERST to reset PCIe device */ + pcie_warm_reset = (__force pcie_reset_state_t) 2, + + /* Use PCIe Hot Reset to reset device */ + pcie_hot_reset = (__force pcie_reset_state_t) 3 +}; + +typedef unsigned short __bitwise pci_dev_flags_t; +enum pci_dev_flags { + /* INTX_DISABLE in PCI_COMMAND register disables MSI too */ + PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0), + /* Device configuration is irrevocably lost if disabled into D3 */ + PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1), + /* Provide indication device is assigned by a Virtual Machine Manager */ + PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2), + /* Flag for quirk use to store if quirk-specific ACS is enabled */ + PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3), + /* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */ + PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5), + /* Do not use bus resets for device */ + PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6), + /* Do not use PM reset even if device advertises NoSoftRst- */ + PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7), + /* Get VPD from function 0 VPD */ + PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8), + /* A non-root bridge where translation occurs, stop alias search here */ + PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9), + /* Do not use FLR even if device advertises PCI_AF_CAP */ + PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10), + /* Don't use Relaxed Ordering for TLPs directed at this device */ + PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11), + /* Device does honor MSI masking despite saying otherwise */ + PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12), +}; + +enum pci_irq_reroute_variant { + INTEL_IRQ_REROUTE_VARIANT = 1, + MAX_IRQ_REROUTE_VARIANTS = 3 +}; + +typedef unsigned short __bitwise pci_bus_flags_t; +enum pci_bus_flags { + PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1, + PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2, + PCI_BUS_FLAGS_NO_AERSID = (__force pci_bus_flags_t) 4, + PCI_BUS_FLAGS_NO_EXTCFG = (__force pci_bus_flags_t) 8, +}; + +/* Values from Link Status register, PCIe r3.1, sec 7.8.8 */ +enum pcie_link_width { + PCIE_LNK_WIDTH_RESRV = 0x00, + PCIE_LNK_X1 = 0x01, + PCIE_LNK_X2 = 0x02, + PCIE_LNK_X4 = 0x04, + PCIE_LNK_X8 = 0x08, + PCIE_LNK_X12 = 0x0c, + PCIE_LNK_X16 = 0x10, + PCIE_LNK_X32 = 0x20, + PCIE_LNK_WIDTH_UNKNOWN = 0xff, +}; + +/* Based on the PCI Hotplug Spec, but some values are made up by us */ +enum pci_bus_speed { + PCI_SPEED_33MHz = 0x00, + PCI_SPEED_66MHz = 0x01, + PCI_SPEED_66MHz_PCIX = 0x02, + PCI_SPEED_100MHz_PCIX = 0x03, + PCI_SPEED_133MHz_PCIX = 0x04, + PCI_SPEED_66MHz_PCIX_ECC = 0x05, + PCI_SPEED_100MHz_PCIX_ECC = 0x06, + PCI_SPEED_133MHz_PCIX_ECC = 0x07, + PCI_SPEED_66MHz_PCIX_266 = 0x09, + PCI_SPEED_100MHz_PCIX_266 = 0x0a, + PCI_SPEED_133MHz_PCIX_266 = 0x0b, + AGP_UNKNOWN = 0x0c, + AGP_1X = 0x0d, + AGP_2X = 0x0e, + AGP_4X = 0x0f, + AGP_8X = 0x10, + PCI_SPEED_66MHz_PCIX_533 = 0x11, + PCI_SPEED_100MHz_PCIX_533 = 0x12, + PCI_SPEED_133MHz_PCIX_533 = 0x13, + PCIE_SPEED_2_5GT = 0x14, + PCIE_SPEED_5_0GT = 0x15, + PCIE_SPEED_8_0GT = 0x16, + PCIE_SPEED_16_0GT = 0x17, + PCI_SPEED_UNKNOWN = 0xff, +}; + +enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev); +enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev); + +struct pci_cap_saved_data { + u16 cap_nr; + bool cap_extended; + unsigned int size; + u32 data[0]; +}; + +struct pci_cap_saved_state { + struct hlist_node next; + struct pci_cap_saved_data cap; +}; + +struct irq_affinity; +struct pcie_link_state; +struct pci_vpd; +struct pci_sriov; +struct pci_ats; + +/* The pci_dev structure describes PCI devices */ +struct pci_dev { + struct list_head bus_list; /* Node in per-bus list */ + struct pci_bus *bus; /* Bus this device is on */ + struct pci_bus *subordinate; /* Bus this device bridges to */ + + void *sysdata; /* Hook for sys-specific extension */ + struct proc_dir_entry *procent; /* Device entry in /proc/bus/pci */ + struct pci_slot *slot; /* Physical slot this device is in */ + + unsigned int devfn; /* Encoded device & function index */ + unsigned short vendor; + unsigned short device; + unsigned short subsystem_vendor; + unsigned short subsystem_device; + unsigned int class; /* 3 bytes: (base,sub,prog-if) */ + u8 revision; /* PCI revision, low byte of class word */ + u8 hdr_type; /* PCI header type (`multi' flag masked out) */ +#ifdef CONFIG_PCIEAER + u16 aer_cap; /* AER capability offset */ + struct aer_stats *aer_stats; /* AER stats for this device */ +#endif + u8 pcie_cap; /* PCIe capability offset */ + u8 msi_cap; /* MSI capability offset */ + u8 msix_cap; /* MSI-X capability offset */ + u8 pcie_mpss:3; /* PCIe Max Payload Size Supported */ + u8 rom_base_reg; /* Config register controlling ROM */ + u8 pin; /* Interrupt pin this device uses */ + u16 pcie_flags_reg; /* Cached PCIe Capabilities Register */ + unsigned long *dma_alias_mask;/* Mask of enabled devfn aliases */ + + struct pci_driver *driver; /* Driver bound to this device */ + u64 dma_mask; /* Mask of the bits of bus address this + device implements. Normally this is + 0xffffffff. You only need to change + this if your device has broken DMA + or supports 64-bit transfers. */ + + struct device_dma_parameters dma_parms; + + pci_power_t current_state; /* Current operating state. In ACPI, + this is D0-D3, D0 being fully + functional, and D3 being off. */ + u8 pm_cap; /* PM capability offset */ + unsigned int pme_support:5; /* Bitmask of states from which PME# + can be generated */ + unsigned int pme_poll:1; /* Poll device's PME status bit */ + unsigned int d1_support:1; /* Low power state D1 is supported */ + unsigned int d2_support:1; /* Low power state D2 is supported */ + unsigned int no_d1d2:1; /* D1 and D2 are forbidden */ + unsigned int no_d3cold:1; /* D3cold is forbidden */ + unsigned int bridge_d3:1; /* Allow D3 for bridge */ + unsigned int d3cold_allowed:1; /* D3cold is allowed by user */ + unsigned int mmio_always_on:1; /* Disallow turning off io/mem + decoding during BAR sizing */ + unsigned int wakeup_prepared:1; + unsigned int runtime_d3cold:1; /* Whether go through runtime + D3cold, not set for devices + powered on/off by the + corresponding bridge */ + unsigned int ignore_hotplug:1; /* Ignore hotplug events */ + unsigned int hotplug_user_indicators:1; /* SlotCtl indicators + controlled exclusively by + user sysfs */ + unsigned int clear_retrain_link:1; /* Need to clear Retrain Link + bit manually */ + unsigned int d3_delay; /* D3->D0 transition time in ms */ + unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */ + +#ifdef CONFIG_PCIEASPM + struct pcie_link_state *link_state; /* ASPM link state */ + unsigned int ltr_path:1; /* Latency Tolerance Reporting + supported from root to here */ +#endif + unsigned int eetlp_prefix_path:1; /* End-to-End TLP Prefix */ + + pci_channel_state_t error_state; /* Current connectivity state */ + struct device dev; /* Generic device interface */ + + int cfg_size; /* Size of config space */ + + /* + * Instead of touching interrupt line and base address registers + * directly, use the values stored here. They might be different! + */ + unsigned int irq; + struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */ + + bool match_driver; /* Skip attaching driver */ + + unsigned int transparent:1; /* Subtractive decode bridge */ + unsigned int io_window:1; /* Bridge has I/O window */ + unsigned int pref_window:1; /* Bridge has pref mem window */ + unsigned int pref_64_window:1; /* Pref mem window is 64-bit */ + unsigned int multifunction:1; /* Multi-function device */ + + unsigned int is_busmaster:1; /* Is busmaster */ + unsigned int no_msi:1; /* May not use MSI */ + unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */ + unsigned int block_cfg_access:1; /* Config space access blocked */ + unsigned int broken_parity_status:1; /* Generates false positive parity */ + unsigned int irq_reroute_variant:2; /* Needs IRQ rerouting variant */ + unsigned int msi_enabled:1; + unsigned int msix_enabled:1; + unsigned int ari_enabled:1; /* ARI forwarding */ + unsigned int ats_enabled:1; /* Address Translation Svc */ + unsigned int pasid_enabled:1; /* Process Address Space ID */ + unsigned int pri_enabled:1; /* Page Request Interface */ + unsigned int is_managed:1; + unsigned int needs_freset:1; /* Requires fundamental reset */ + unsigned int state_saved:1; + unsigned int is_physfn:1; + unsigned int is_virtfn:1; + unsigned int reset_fn:1; + unsigned int is_hotplug_bridge:1; + unsigned int shpc_managed:1; /* SHPC owned by shpchp */ + unsigned int is_thunderbolt:1; /* Thunderbolt controller */ + unsigned int __aer_firmware_first_valid:1; + unsigned int __aer_firmware_first:1; + unsigned int broken_intx_masking:1; /* INTx masking can't be used */ + unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */ + unsigned int irq_managed:1; + unsigned int has_secondary_link:1; + unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */ + unsigned int is_probed:1; /* Device probing in progress */ + pci_dev_flags_t dev_flags; + atomic_t enable_cnt; /* pci_enable_device has been called */ + + u32 saved_config_space[16]; /* Config space saved at suspend time */ + struct hlist_head saved_cap_space; + struct bin_attribute *rom_attr; /* Attribute descriptor for sysfs ROM entry */ + int rom_attr_enabled; /* Display of ROM attribute enabled? */ + struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ + struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ + +#ifdef CONFIG_HOTPLUG_PCI_PCIE + unsigned int broken_cmd_compl:1; /* No compl for some cmds */ +#endif +#ifdef CONFIG_PCIE_PTM + unsigned int ptm_root:1; + unsigned int ptm_enabled:1; + u8 ptm_granularity; +#endif +#ifdef CONFIG_PCI_MSI + const struct attribute_group **msi_irq_groups; +#endif + struct pci_vpd *vpd; +#ifdef CONFIG_PCI_ATS + union { + struct pci_sriov *sriov; /* PF: SR-IOV info */ + struct pci_dev *physfn; /* VF: related PF */ + }; + u16 ats_cap; /* ATS Capability offset */ + u8 ats_stu; /* ATS Smallest Translation Unit */ + atomic_t ats_ref_cnt; /* Number of VFs with ATS enabled */ +#endif +#ifdef CONFIG_PCI_PRI + u32 pri_reqs_alloc; /* Number of PRI requests allocated */ +#endif +#ifdef CONFIG_PCI_PASID + u16 pasid_features; +#endif + phys_addr_t rom; /* Physical address if not from BAR */ + size_t romlen; /* Length if not from BAR */ + char *driver_override; /* Driver name to force a match */ + + unsigned long priv_flags; /* Private flags for the PCI driver */ +}; + +static inline struct pci_dev *pci_physfn(struct pci_dev *dev) +{ +#ifdef CONFIG_PCI_IOV + if (dev->is_virtfn) + dev = dev->physfn; +#endif + return dev; +} + +struct pci_dev *pci_alloc_dev(struct pci_bus *bus); + +#define to_pci_dev(n) container_of(n, struct pci_dev, dev) +#define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL) + +static inline int pci_channel_offline(struct pci_dev *pdev) +{ + return (pdev->error_state != pci_channel_io_normal); +} + +struct pci_host_bridge { + struct device dev; + struct pci_bus *bus; /* Root bus */ + struct pci_ops *ops; + void *sysdata; + int busnr; + struct list_head windows; /* resource_entry */ + u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */ + int (*map_irq)(const struct pci_dev *, u8, u8); + void (*release_fn)(struct pci_host_bridge *); + void *release_data; + struct msi_controller *msi; + unsigned int ignore_reset_delay:1; /* For entire hierarchy */ + unsigned int no_ext_tags:1; /* No Extended Tags */ + unsigned int native_aer:1; /* OS may use PCIe AER */ + unsigned int native_pcie_hotplug:1; /* OS may use PCIe hotplug */ + unsigned int native_shpc_hotplug:1; /* OS may use SHPC hotplug */ + unsigned int native_pme:1; /* OS may use PCIe PME */ + unsigned int native_ltr:1; /* OS may use PCIe LTR */ + /* Resource alignment requirements */ + resource_size_t (*align_resource)(struct pci_dev *dev, + const struct resource *res, + resource_size_t start, + resource_size_t size, + resource_size_t align); + unsigned long private[0] ____cacheline_aligned; +}; + +#define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev) + +static inline void *pci_host_bridge_priv(struct pci_host_bridge *bridge) +{ + return (void *)bridge->private; +} + +static inline struct pci_host_bridge *pci_host_bridge_from_priv(void *priv) +{ + return container_of(priv, struct pci_host_bridge, private); +} + +struct pci_host_bridge *pci_alloc_host_bridge(size_t priv); +struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev, + size_t priv); +void pci_free_host_bridge(struct pci_host_bridge *bridge); +struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus); + +void pci_set_host_bridge_release(struct pci_host_bridge *bridge, + void (*release_fn)(struct pci_host_bridge *), + void *release_data); + +int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge); + +/* + * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond + * to P2P or CardBus bridge windows) go in a table. Additional ones (for + * buses below host bridges or subtractive decode bridges) go in the list. + * Use pci_bus_for_each_resource() to iterate through all the resources. + */ + +/* + * PCI_SUBTRACTIVE_DECODE means the bridge forwards the window implicitly + * and there's no way to program the bridge with the details of the window. + * This does not apply to ACPI _CRS windows, even with the _DEC subtractive- + * decode bit set, because they are explicit and can be programmed with _SRS. + */ +#define PCI_SUBTRACTIVE_DECODE 0x1 + +struct pci_bus_resource { + struct list_head list; + struct resource *res; + unsigned int flags; +}; + +#define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */ + +struct pci_bus { + struct list_head node; /* Node in list of buses */ + struct pci_bus *parent; /* Parent bus this bridge is on */ + struct list_head children; /* List of child buses */ + struct list_head devices; /* List of devices on this bus */ + struct pci_dev *self; /* Bridge device as seen by parent */ + struct list_head slots; /* List of slots on this bus; + protected by pci_slot_mutex */ + struct resource *resource[PCI_BRIDGE_RESOURCE_NUM]; + struct list_head resources; /* Address space routed to this bus */ + struct resource busn_res; /* Bus numbers routed to this bus */ + + struct pci_ops *ops; /* Configuration access functions */ + struct msi_controller *msi; /* MSI controller */ + void *sysdata; /* Hook for sys-specific extension */ + struct proc_dir_entry *procdir; /* Directory entry in /proc/bus/pci */ + + unsigned char number; /* Bus number */ + unsigned char primary; /* Number of primary bridge */ + unsigned char max_bus_speed; /* enum pci_bus_speed */ + unsigned char cur_bus_speed; /* enum pci_bus_speed */ +#ifdef CONFIG_PCI_DOMAINS_GENERIC + int domain_nr; +#endif + + char name[48]; + + unsigned short bridge_ctl; /* Manage NO_ISA/FBB/et al behaviors */ + pci_bus_flags_t bus_flags; /* Inherited by child buses */ + struct device *bridge; + struct device dev; + struct bin_attribute *legacy_io; /* Legacy I/O for this bus */ + struct bin_attribute *legacy_mem; /* Legacy mem */ + unsigned int is_added:1; + unsigned int unsafe_warn:1; /* warned about RW1C config write */ +}; + +#define to_pci_bus(n) container_of(n, struct pci_bus, dev) + +/* + * Returns true if the PCI bus is root (behind host-PCI bridge), + * false otherwise + * + * Some code assumes that "bus->self == NULL" means that bus is a root bus. + * This is incorrect because "virtual" buses added for SR-IOV (via + * virtfn_add_bus()) have "bus->self == NULL" but are not root buses. + */ +static inline bool pci_is_root_bus(struct pci_bus *pbus) +{ + return !(pbus->parent); +} + +/** + * pci_is_bridge - check if the PCI device is a bridge + * @dev: PCI device + * + * Return true if the PCI device is bridge whether it has subordinate + * or not. + */ +static inline bool pci_is_bridge(struct pci_dev *dev) +{ + return dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || + dev->hdr_type == PCI_HEADER_TYPE_CARDBUS; +} + +#define for_each_pci_bridge(dev, bus) \ + list_for_each_entry(dev, &bus->devices, bus_list) \ + if (!pci_is_bridge(dev)) {} else + +static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev) +{ + dev = pci_physfn(dev); + if (pci_is_root_bus(dev->bus)) + return NULL; + + return dev->bus->self; +} + +struct device *pci_get_host_bridge_device(struct pci_dev *dev); +void pci_put_host_bridge_device(struct device *dev); + +#ifdef CONFIG_PCI_MSI +static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) +{ + return pci_dev->msi_enabled || pci_dev->msix_enabled; +} +#else +static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; } +#endif + +/* Error values that may be returned by PCI functions */ +#define PCIBIOS_SUCCESSFUL 0x00 +#define PCIBIOS_FUNC_NOT_SUPPORTED 0x81 +#define PCIBIOS_BAD_VENDOR_ID 0x83 +#define PCIBIOS_DEVICE_NOT_FOUND 0x86 +#define PCIBIOS_BAD_REGISTER_NUMBER 0x87 +#define PCIBIOS_SET_FAILED 0x88 +#define PCIBIOS_BUFFER_TOO_SMALL 0x89 + +/* Translate above to generic errno for passing back through non-PCI code */ +static inline int pcibios_err_to_errno(int err) +{ + if (err <= PCIBIOS_SUCCESSFUL) + return err; /* Assume already errno */ + + switch (err) { + case PCIBIOS_FUNC_NOT_SUPPORTED: + return -ENOENT; + case PCIBIOS_BAD_VENDOR_ID: + return -ENOTTY; + case PCIBIOS_DEVICE_NOT_FOUND: + return -ENODEV; + case PCIBIOS_BAD_REGISTER_NUMBER: + return -EFAULT; + case PCIBIOS_SET_FAILED: + return -EIO; + case PCIBIOS_BUFFER_TOO_SMALL: + return -ENOSPC; + } + + return -ERANGE; +} + +/* Low-level architecture-dependent routines */ + +struct pci_ops { + int (*add_bus)(struct pci_bus *bus); + void (*remove_bus)(struct pci_bus *bus); + void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where); + int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val); + int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val); +}; + +/* + * ACPI needs to be able to access PCI config space before we've done a + * PCI bus scan and created pci_bus structures. + */ +int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, + int reg, int len, u32 *val); +int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, + int reg, int len, u32 val); + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT +typedef u64 pci_bus_addr_t; +#else +typedef u32 pci_bus_addr_t; +#endif + +struct pci_bus_region { + pci_bus_addr_t start; + pci_bus_addr_t end; +}; + +struct pci_dynids { + spinlock_t lock; /* Protects list, index */ + struct list_head list; /* For IDs added at runtime */ +}; + + +/* + * PCI Error Recovery System (PCI-ERS). If a PCI device driver provides + * a set of callbacks in struct pci_error_handlers, that device driver + * will be notified of PCI bus errors, and will be driven to recovery + * when an error occurs. + */ + +typedef unsigned int __bitwise pci_ers_result_t; + +enum pci_ers_result { + /* No result/none/not supported in device driver */ + PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1, + + /* Device driver can recover without slot reset */ + PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2, + + /* Device driver wants slot to be reset */ + PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3, + + /* Device has completely failed, is unrecoverable */ + PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4, + + /* Device driver is fully recovered and operational */ + PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5, + + /* No AER capabilities registered for the driver */ + PCI_ERS_RESULT_NO_AER_DRIVER = (__force pci_ers_result_t) 6, +}; + +/* PCI bus error event callbacks */ +struct pci_error_handlers { + /* PCI bus error detected on this device */ + pci_ers_result_t (*error_detected)(struct pci_dev *dev, + enum pci_channel_state error); + + /* MMIO has been re-enabled, but not DMA */ + pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev); + + /* PCI slot has been reset */ + pci_ers_result_t (*slot_reset)(struct pci_dev *dev); + + /* PCI function reset prepare or completed */ + void (*reset_prepare)(struct pci_dev *dev); + void (*reset_done)(struct pci_dev *dev); + + /* Device driver may resume normal operations */ + void (*resume)(struct pci_dev *dev); +}; + + +struct module; +struct pci_driver { + struct list_head node; + const char *name; + const struct pci_device_id *id_table; /* Must be non-NULL for probe to be called */ + int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */ + void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */ + int (*suspend)(struct pci_dev *dev, pm_message_t state); /* Device suspended */ + int (*suspend_late)(struct pci_dev *dev, pm_message_t state); + int (*resume_early)(struct pci_dev *dev); + int (*resume) (struct pci_dev *dev); /* Device woken up */ + void (*shutdown) (struct pci_dev *dev); + int (*sriov_configure) (struct pci_dev *dev, int num_vfs); /* On PF */ + const struct pci_error_handlers *err_handler; + const struct attribute_group **groups; + struct device_driver driver; + struct pci_dynids dynids; +}; + +#define to_pci_driver(drv) container_of(drv, struct pci_driver, driver) + +/** + * PCI_DEVICE - macro used to describe a specific PCI device + * @vend: the 16 bit PCI Vendor ID + * @dev: the 16 bit PCI Device ID + * + * This macro is used to create a struct pci_device_id that matches a + * specific device. The subvendor and subdevice fields will be set to + * PCI_ANY_ID. + */ +#define PCI_DEVICE(vend,dev) \ + .vendor = (vend), .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID + +/** + * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem + * @vend: the 16 bit PCI Vendor ID + * @dev: the 16 bit PCI Device ID + * @subvend: the 16 bit PCI Subvendor ID + * @subdev: the 16 bit PCI Subdevice ID + * + * This macro is used to create a struct pci_device_id that matches a + * specific device with subsystem information. + */ +#define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \ + .vendor = (vend), .device = (dev), \ + .subvendor = (subvend), .subdevice = (subdev) + +/** + * PCI_DEVICE_CLASS - macro used to describe a specific PCI device class + * @dev_class: the class, subclass, prog-if triple for this device + * @dev_class_mask: the class mask for this device + * + * This macro is used to create a struct pci_device_id that matches a + * specific PCI class. The vendor, device, subvendor, and subdevice + * fields will be set to PCI_ANY_ID. + */ +#define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \ + .class = (dev_class), .class_mask = (dev_class_mask), \ + .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID + +/** + * PCI_VDEVICE - macro used to describe a specific PCI device in short form + * @vend: the vendor name + * @dev: the 16 bit PCI Device ID + * + * This macro is used to create a struct pci_device_id that matches a + * specific PCI device. The subvendor, and subdevice fields will be set + * to PCI_ANY_ID. The macro allows the next field to follow as the device + * private data. + */ +#define PCI_VDEVICE(vend, dev) \ + .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0 + +/** + * PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form + * @vend: the vendor name (without PCI_VENDOR_ID_ prefix) + * @dev: the device name (without PCI_DEVICE_ID__ prefix) + * @data: the driver data to be filled + * + * This macro is used to create a struct pci_device_id that matches a + * specific PCI device. The subvendor, and subdevice fields will be set + * to PCI_ANY_ID. + */ +#define PCI_DEVICE_DATA(vend, dev, data) \ + .vendor = PCI_VENDOR_ID_##vend, .device = PCI_DEVICE_ID_##vend##_##dev, \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \ + .driver_data = (kernel_ulong_t)(data) + +enum { + PCI_REASSIGN_ALL_RSRC = 0x00000001, /* Ignore firmware setup */ + PCI_REASSIGN_ALL_BUS = 0x00000002, /* Reassign all bus numbers */ + PCI_PROBE_ONLY = 0x00000004, /* Use existing setup */ + PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* Don't do ISA alignment */ + PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* Enable domains in /proc */ + PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */ + PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* Scan all, not just dev 0 */ +}; + +/* These external functions are only available when PCI support is enabled */ +#ifdef CONFIG_PCI + +extern unsigned int pci_flags; + +static inline void pci_set_flags(int flags) { pci_flags = flags; } +static inline void pci_add_flags(int flags) { pci_flags |= flags; } +static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; } +static inline int pci_has_flag(int flag) { return pci_flags & flag; } + +void pcie_bus_configure_settings(struct pci_bus *bus); + +enum pcie_bus_config_types { + PCIE_BUS_TUNE_OFF, /* Don't touch MPS at all */ + PCIE_BUS_DEFAULT, /* Ensure MPS matches upstream bridge */ + PCIE_BUS_SAFE, /* Use largest MPS boot-time devices support */ + PCIE_BUS_PERFORMANCE, /* Use MPS and MRRS for best performance */ + PCIE_BUS_PEER2PEER, /* Set MPS = 128 for all devices */ +}; + +extern enum pcie_bus_config_types pcie_bus_config; + +extern struct bus_type pci_bus_type; + +/* Do NOT directly access these two variables, unless you are arch-specific PCI + * code, or PCI core code. */ +extern struct list_head pci_root_buses; /* List of all known PCI buses */ +/* Some device drivers need know if PCI is initiated */ +int no_pci_devices(void); + +void pcibios_resource_survey_bus(struct pci_bus *bus); +void pcibios_bus_add_device(struct pci_dev *pdev); +void pcibios_add_bus(struct pci_bus *bus); +void pcibios_remove_bus(struct pci_bus *bus); +void pcibios_fixup_bus(struct pci_bus *); +int __must_check pcibios_enable_device(struct pci_dev *, int mask); +/* Architecture-specific versions may override this (weak) */ +char *pcibios_setup(char *str); + +/* Used only when drivers/pci/setup.c is used */ +resource_size_t pcibios_align_resource(void *, const struct resource *, + resource_size_t, + resource_size_t); + +/* Weak but can be overriden by arch */ +void pci_fixup_cardbus(struct pci_bus *); + +/* Generic PCI functions used internally */ + +void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region, + struct resource *res); +void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res, + struct pci_bus_region *region); +void pcibios_scan_specific_bus(int busn); +struct pci_bus *pci_find_bus(int domain, int busnr); +void pci_bus_add_devices(const struct pci_bus *bus); +struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata); +struct pci_bus *pci_create_root_bus(struct device *parent, int bus, + struct pci_ops *ops, void *sysdata, + struct list_head *resources); +int pci_host_probe(struct pci_host_bridge *bridge); +int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax); +int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax); +void pci_bus_release_busn_res(struct pci_bus *b); +struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, + struct pci_ops *ops, void *sysdata, + struct list_head *resources); +int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge); +struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, + int busnr); +void pcie_update_link_speed(struct pci_bus *bus, u16 link_status); +struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, + const char *name, + struct hotplug_slot *hotplug); +void pci_destroy_slot(struct pci_slot *slot); +#ifdef CONFIG_SYSFS +void pci_dev_assign_slot(struct pci_dev *dev); +#else +static inline void pci_dev_assign_slot(struct pci_dev *dev) { } +#endif +int pci_scan_slot(struct pci_bus *bus, int devfn); +struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn); +void pci_device_add(struct pci_dev *dev, struct pci_bus *bus); +unsigned int pci_scan_child_bus(struct pci_bus *bus); +void pci_bus_add_device(struct pci_dev *dev); +void pci_read_bridge_bases(struct pci_bus *child); +struct resource *pci_find_parent_resource(const struct pci_dev *dev, + struct resource *res); +struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev); +u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin); +int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge); +u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp); +struct pci_dev *pci_dev_get(struct pci_dev *dev); +void pci_dev_put(struct pci_dev *dev); +void pci_remove_bus(struct pci_bus *b); +void pci_stop_and_remove_bus_device(struct pci_dev *dev); +void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev); +void pci_stop_root_bus(struct pci_bus *bus); +void pci_remove_root_bus(struct pci_bus *bus); +void pci_setup_cardbus(struct pci_bus *bus); +void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type); +void pci_sort_breadthfirst(void); +#define dev_is_pci(d) ((d)->bus == &pci_bus_type) +#define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false)) + +/* Generic PCI functions exported to card drivers */ + +enum pci_lost_interrupt_reason { + PCI_LOST_IRQ_NO_INFORMATION = 0, + PCI_LOST_IRQ_DISABLE_MSI, + PCI_LOST_IRQ_DISABLE_MSIX, + PCI_LOST_IRQ_DISABLE_ACPI, +}; +enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *dev); +int pci_find_capability(struct pci_dev *dev, int cap); +int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap); +int pci_find_ext_capability(struct pci_dev *dev, int cap); +int pci_find_next_ext_capability(struct pci_dev *dev, int pos, int cap); +int pci_find_ht_capability(struct pci_dev *dev, int ht_cap); +int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap); +struct pci_bus *pci_find_next_bus(const struct pci_bus *from); + +struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device, + struct pci_dev *from); +struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, + unsigned int ss_vendor, unsigned int ss_device, + struct pci_dev *from); +struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn); +struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus, + unsigned int devfn); +struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from); +int pci_dev_present(const struct pci_device_id *ids); + +int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn, + int where, u8 *val); +int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn, + int where, u16 *val); +int pci_bus_read_config_dword(struct pci_bus *bus, unsigned int devfn, + int where, u32 *val); +int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn, + int where, u8 val); +int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn, + int where, u16 val); +int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn, + int where, u32 val); + +int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val); +int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val); +int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val); +int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val); + +struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops); + +int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val); +int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val); +int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val); +int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val); +int pci_write_config_word(const struct pci_dev *dev, int where, u16 val); +int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val); + +int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); +int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val); +int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val); +int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val); +int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set); +int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos, + u32 clear, u32 set); + +static inline int pcie_capability_set_word(struct pci_dev *dev, int pos, + u16 set) +{ + return pcie_capability_clear_and_set_word(dev, pos, 0, set); +} + +static inline int pcie_capability_set_dword(struct pci_dev *dev, int pos, + u32 set) +{ + return pcie_capability_clear_and_set_dword(dev, pos, 0, set); +} + +static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos, + u16 clear) +{ + return pcie_capability_clear_and_set_word(dev, pos, clear, 0); +} + +static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos, + u32 clear) +{ + return pcie_capability_clear_and_set_dword(dev, pos, clear, 0); +} + +/* User-space driven config access */ +int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val); +int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val); +int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val); +int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val); +int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val); +int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val); + +int __must_check pci_enable_device(struct pci_dev *dev); +int __must_check pci_enable_device_io(struct pci_dev *dev); +int __must_check pci_enable_device_mem(struct pci_dev *dev); +int __must_check pci_reenable_device(struct pci_dev *); +int __must_check pcim_enable_device(struct pci_dev *pdev); +void pcim_pin_device(struct pci_dev *pdev); + +static inline bool pci_intx_mask_supported(struct pci_dev *pdev) +{ + /* + * INTx masking is supported if PCI_COMMAND_INTX_DISABLE is + * writable and no quirk has marked the feature broken. + */ + return !pdev->broken_intx_masking; +} + +static inline int pci_is_enabled(struct pci_dev *pdev) +{ + return (atomic_read(&pdev->enable_cnt) > 0); +} + +static inline int pci_is_managed(struct pci_dev *pdev) +{ + return pdev->is_managed; +} + +void pci_disable_device(struct pci_dev *dev); + +extern unsigned int pcibios_max_latency; +void pci_set_master(struct pci_dev *dev); +void pci_clear_master(struct pci_dev *dev); + +int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state); +int pci_set_cacheline_size(struct pci_dev *dev); +#define HAVE_PCI_SET_MWI +int __must_check pci_set_mwi(struct pci_dev *dev); +int __must_check pcim_set_mwi(struct pci_dev *dev); +int pci_try_set_mwi(struct pci_dev *dev); +void pci_clear_mwi(struct pci_dev *dev); +void pci_intx(struct pci_dev *dev, int enable); +bool pci_check_and_mask_intx(struct pci_dev *dev); +bool pci_check_and_unmask_intx(struct pci_dev *dev); +int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask); +int pci_wait_for_pending_transaction(struct pci_dev *dev); +int pcix_get_max_mmrbc(struct pci_dev *dev); +int pcix_get_mmrbc(struct pci_dev *dev); +int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc); +int pcie_get_readrq(struct pci_dev *dev); +int pcie_set_readrq(struct pci_dev *dev, int rq); +int pcie_get_mps(struct pci_dev *dev); +int pcie_set_mps(struct pci_dev *dev, int mps); +u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev, + enum pci_bus_speed *speed, + enum pcie_link_width *width); +void pcie_print_link_status(struct pci_dev *dev); +bool pcie_has_flr(struct pci_dev *dev); +int pcie_flr(struct pci_dev *dev); +int __pci_reset_function_locked(struct pci_dev *dev); +int pci_reset_function(struct pci_dev *dev); +int pci_reset_function_locked(struct pci_dev *dev); +int pci_try_reset_function(struct pci_dev *dev); +int pci_probe_reset_slot(struct pci_slot *slot); +int pci_probe_reset_bus(struct pci_bus *bus); +int pci_reset_bus(struct pci_dev *dev); +void pci_reset_secondary_bus(struct pci_dev *dev); +void pcibios_reset_secondary_bus(struct pci_dev *dev); +void pci_update_resource(struct pci_dev *dev, int resno); +int __must_check pci_assign_resource(struct pci_dev *dev, int i); +int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align); +void pci_release_resource(struct pci_dev *dev, int resno); +int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size); +int pci_select_bars(struct pci_dev *dev, unsigned long flags); +bool pci_device_is_present(struct pci_dev *pdev); +void pci_ignore_hotplug(struct pci_dev *dev); + +int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr, + irq_handler_t handler, irq_handler_t thread_fn, void *dev_id, + const char *fmt, ...); +void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id); + +/* ROM control related routines */ +int pci_enable_rom(struct pci_dev *pdev); +void pci_disable_rom(struct pci_dev *pdev); +void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); +void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); + +/* Power management related routines */ +int pci_save_state(struct pci_dev *dev); +void pci_restore_state(struct pci_dev *dev); +struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev); +int pci_load_saved_state(struct pci_dev *dev, + struct pci_saved_state *state); +int pci_load_and_free_saved_state(struct pci_dev *dev, + struct pci_saved_state **state); +struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap); +struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, + u16 cap); +int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size); +int pci_add_ext_cap_save_buffer(struct pci_dev *dev, + u16 cap, unsigned int size); +int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state); +int pci_set_power_state(struct pci_dev *dev, pci_power_t state); +pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); +bool pci_pme_capable(struct pci_dev *dev, pci_power_t state); +void pci_pme_active(struct pci_dev *dev, bool enable); +int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable); +int pci_wake_from_d3(struct pci_dev *dev, bool enable); +int pci_prepare_to_sleep(struct pci_dev *dev); +int pci_back_from_sleep(struct pci_dev *dev); +bool pci_dev_run_wake(struct pci_dev *dev); +bool pci_check_pme_status(struct pci_dev *dev); +void pci_pme_wakeup_bus(struct pci_bus *bus); +void pci_d3cold_enable(struct pci_dev *dev); +void pci_d3cold_disable(struct pci_dev *dev); +bool pcie_relaxed_ordering_enabled(struct pci_dev *dev); +void pci_wakeup_bus(struct pci_bus *bus); +void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state); + +/* PCI Virtual Channel */ +int pci_save_vc_state(struct pci_dev *dev); +void pci_restore_vc_state(struct pci_dev *dev); +void pci_allocate_vc_save_buffers(struct pci_dev *dev); + +/* For use by arch with custom probe code */ +void set_pcie_port_type(struct pci_dev *pdev); +void set_pcie_hotplug_bridge(struct pci_dev *pdev); + +/* Functions for PCI Hotplug drivers to use */ +int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap); +unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge); +unsigned int pci_rescan_bus(struct pci_bus *bus); +void pci_lock_rescan_remove(void); +void pci_unlock_rescan_remove(void); + +/* Vital Product Data routines */ +ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); +ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); +int pci_set_vpd_size(struct pci_dev *dev, size_t len); + +/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ +resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx); +void pci_bus_assign_resources(const struct pci_bus *bus); +void pci_bus_claim_resources(struct pci_bus *bus); +void pci_bus_size_bridges(struct pci_bus *bus); +int pci_claim_resource(struct pci_dev *, int); +int pci_claim_bridge_resource(struct pci_dev *bridge, int i); +void pci_assign_unassigned_resources(void); +void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge); +void pci_assign_unassigned_bus_resources(struct pci_bus *bus); +void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus); +int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type); +void pdev_enable_device(struct pci_dev *); +int pci_enable_resources(struct pci_dev *, int mask); +void pci_assign_irq(struct pci_dev *dev); +struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res); +#define HAVE_PCI_REQ_REGIONS 2 +int __must_check pci_request_regions(struct pci_dev *, const char *); +int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *); +void pci_release_regions(struct pci_dev *); +int __must_check pci_request_region(struct pci_dev *, int, const char *); +int __must_check pci_request_region_exclusive(struct pci_dev *, int, const char *); +void pci_release_region(struct pci_dev *, int); +int pci_request_selected_regions(struct pci_dev *, int, const char *); +int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *); +void pci_release_selected_regions(struct pci_dev *, int); + +/* drivers/pci/bus.c */ +struct pci_bus *pci_bus_get(struct pci_bus *bus); +void pci_bus_put(struct pci_bus *bus); +void pci_add_resource(struct list_head *resources, struct resource *res); +void pci_add_resource_offset(struct list_head *resources, struct resource *res, + resource_size_t offset); +void pci_free_resource_list(struct list_head *resources); +void pci_bus_add_resource(struct pci_bus *bus, struct resource *res, + unsigned int flags); +struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n); +void pci_bus_remove_resources(struct pci_bus *bus); +int devm_request_pci_bus_resources(struct device *dev, + struct list_head *resources); + +/* Temporary until new and working PCI SBR API in place */ +int pci_bridge_secondary_bus_reset(struct pci_dev *dev); + +#define pci_bus_for_each_resource(bus, res, i) \ + for (i = 0; \ + (res = pci_bus_resource_n(bus, i)) || i < PCI_BRIDGE_RESOURCE_NUM; \ + i++) + +int __must_check pci_bus_alloc_resource(struct pci_bus *bus, + struct resource *res, resource_size_t size, + resource_size_t align, resource_size_t min, + unsigned long type_mask, + resource_size_t (*alignf)(void *, + const struct resource *, + resource_size_t, + resource_size_t), + void *alignf_data); + + +int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr, + resource_size_t size); +unsigned long pci_address_to_pio(phys_addr_t addr); +phys_addr_t pci_pio_to_address(unsigned long pio); +int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr); +int devm_pci_remap_iospace(struct device *dev, const struct resource *res, + phys_addr_t phys_addr); +void pci_unmap_iospace(struct resource *res); +void __iomem *devm_pci_remap_cfgspace(struct device *dev, + resource_size_t offset, + resource_size_t size); +void __iomem *devm_pci_remap_cfg_resource(struct device *dev, + struct resource *res); + +static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar) +{ + struct pci_bus_region region; + + pcibios_resource_to_bus(pdev->bus, ®ion, &pdev->resource[bar]); + return region.start; +} + +/* Proper probing supporting hot-pluggable devices */ +int __must_check __pci_register_driver(struct pci_driver *, struct module *, + const char *mod_name); + +/* pci_register_driver() must be a macro so KBUILD_MODNAME can be expanded */ +#define pci_register_driver(driver) \ + __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) + +void pci_unregister_driver(struct pci_driver *dev); + +/** + * module_pci_driver() - Helper macro for registering a PCI driver + * @__pci_driver: pci_driver struct + * + * Helper macro for PCI drivers which do not do anything special in module + * init/exit. This eliminates a lot of boilerplate. Each module may only + * use this macro once, and calling it replaces module_init() and module_exit() + */ +#define module_pci_driver(__pci_driver) \ + module_driver(__pci_driver, pci_register_driver, pci_unregister_driver) + +/** + * builtin_pci_driver() - Helper macro for registering a PCI driver + * @__pci_driver: pci_driver struct + * + * Helper macro for PCI drivers which do not do anything special in their + * init code. This eliminates a lot of boilerplate. Each driver may only + * use this macro once, and calling it replaces device_initcall(...) + */ +#define builtin_pci_driver(__pci_driver) \ + builtin_driver(__pci_driver, pci_register_driver) + +struct pci_driver *pci_dev_driver(const struct pci_dev *dev); +int pci_add_dynid(struct pci_driver *drv, + unsigned int vendor, unsigned int device, + unsigned int subvendor, unsigned int subdevice, + unsigned int class, unsigned int class_mask, + unsigned long driver_data); +const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, + struct pci_dev *dev); +int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, + int pass); + +void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), + void *userdata); +int pci_cfg_space_size(struct pci_dev *dev); +unsigned char pci_bus_max_busnr(struct pci_bus *bus); +void pci_setup_bridge(struct pci_bus *bus); +resource_size_t pcibios_window_alignment(struct pci_bus *bus, + unsigned long type); + +#define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0) +#define PCI_VGA_STATE_CHANGE_DECODES (1 << 1) + +int pci_set_vga_state(struct pci_dev *pdev, bool decode, + unsigned int command_bits, u32 flags); + +#define PCI_IRQ_LEGACY (1 << 0) /* Allow legacy interrupts */ +#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */ +#define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */ +#define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */ +#define PCI_IRQ_ALL_TYPES \ + (PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX) + +/* kmem_cache style wrapper around pci_alloc_consistent() */ + +#include +#include + +#define pci_pool dma_pool +#define pci_pool_create(name, pdev, size, align, allocation) \ + dma_pool_create(name, &pdev->dev, size, align, allocation) +#define pci_pool_destroy(pool) dma_pool_destroy(pool) +#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle) +#define pci_pool_zalloc(pool, flags, handle) \ + dma_pool_zalloc(pool, flags, handle) +#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr) + +struct msix_entry { + u32 vector; /* Kernel uses to write allocated vector */ + u16 entry; /* Driver uses to specify entry, OS writes */ +}; + +#ifdef CONFIG_PCI_MSI +int pci_msi_vec_count(struct pci_dev *dev); +void pci_disable_msi(struct pci_dev *dev); +int pci_msix_vec_count(struct pci_dev *dev); +void pci_disable_msix(struct pci_dev *dev); +void pci_restore_msi_state(struct pci_dev *dev); +int pci_msi_enabled(void); +int pci_enable_msi(struct pci_dev *dev); +int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec); +static inline int pci_enable_msix_exact(struct pci_dev *dev, + struct msix_entry *entries, int nvec) +{ + int rc = pci_enable_msix_range(dev, entries, nvec, nvec); + if (rc < 0) + return rc; + return 0; +} +int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags, + const struct irq_affinity *affd); + +void pci_free_irq_vectors(struct pci_dev *dev); +int pci_irq_vector(struct pci_dev *dev, unsigned int nr); +const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec); +int pci_irq_get_node(struct pci_dev *pdev, int vec); + +#else +static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; } +static inline void pci_disable_msi(struct pci_dev *dev) { } +static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; } +static inline void pci_disable_msix(struct pci_dev *dev) { } +static inline void pci_restore_msi_state(struct pci_dev *dev) { } +static inline int pci_msi_enabled(void) { return 0; } +static inline int pci_enable_msi(struct pci_dev *dev) +{ return -ENOSYS; } +static inline int pci_enable_msix_range(struct pci_dev *dev, + struct msix_entry *entries, int minvec, int maxvec) +{ return -ENOSYS; } +static inline int pci_enable_msix_exact(struct pci_dev *dev, + struct msix_entry *entries, int nvec) +{ return -ENOSYS; } + +static inline int +pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags, + const struct irq_affinity *aff_desc) +{ + if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq) + return 1; + return -ENOSPC; +} + +static inline void pci_free_irq_vectors(struct pci_dev *dev) +{ +} + +static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr) +{ + if (WARN_ON_ONCE(nr > 0)) + return -EINVAL; + return dev->irq; +} +static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, + int vec) +{ + return cpu_possible_mask; +} + +static inline int pci_irq_get_node(struct pci_dev *pdev, int vec) +{ + return first_online_node; +} +#endif + +static inline int +pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags) +{ + return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, flags, + NULL); +} + +/** + * pci_irqd_intx_xlate() - Translate PCI INTx value to an IRQ domain hwirq + * @d: the INTx IRQ domain + * @node: the DT node for the device whose interrupt we're translating + * @intspec: the interrupt specifier data from the DT + * @intsize: the number of entries in @intspec + * @out_hwirq: pointer at which to write the hwirq number + * @out_type: pointer at which to write the interrupt type + * + * Translate a PCI INTx interrupt number from device tree in the range 1-4, as + * stored in the standard PCI_INTERRUPT_PIN register, to a value in the range + * 0-3 suitable for use in a 4 entry IRQ domain. That is, subtract one from the + * INTx value to obtain the hwirq number. + * + * Returns 0 on success, or -EINVAL if the interrupt specifier is out of range. + */ +static inline int pci_irqd_intx_xlate(struct irq_domain *d, + struct device_node *node, + const u32 *intspec, + unsigned int intsize, + unsigned long *out_hwirq, + unsigned int *out_type) +{ + const u32 intx = intspec[0]; + + if (intx < PCI_INTERRUPT_INTA || intx > PCI_INTERRUPT_INTD) + return -EINVAL; + + *out_hwirq = intx - PCI_INTERRUPT_INTA; + return 0; +} + +#ifdef CONFIG_PCIEPORTBUS +extern bool pcie_ports_disabled; +extern bool pcie_ports_native; +#else +#define pcie_ports_disabled true +#define pcie_ports_native false +#endif + +#ifdef CONFIG_PCIEASPM +bool pcie_aspm_support_enabled(void); +#else +static inline bool pcie_aspm_support_enabled(void) { return false; } +#endif + +#ifdef CONFIG_PCIEAER +bool pci_aer_available(void); +#else +static inline bool pci_aer_available(void) { return false; } +#endif + +#ifdef CONFIG_PCIE_ECRC +void pcie_set_ecrc_checking(struct pci_dev *dev); +void pcie_ecrc_get_policy(char *str); +#else +static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { } +static inline void pcie_ecrc_get_policy(char *str) { } +#endif + +bool pci_ats_disabled(void); + +#ifdef CONFIG_PCI_ATS +/* Address Translation Service */ +void pci_ats_init(struct pci_dev *dev); +int pci_enable_ats(struct pci_dev *dev, int ps); +void pci_disable_ats(struct pci_dev *dev); +int pci_ats_queue_depth(struct pci_dev *dev); +#else +static inline void pci_ats_init(struct pci_dev *d) { } +static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; } +static inline void pci_disable_ats(struct pci_dev *d) { } +static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; } +#endif + +#ifdef CONFIG_PCIE_PTM +int pci_enable_ptm(struct pci_dev *dev, u8 *granularity); +#else +static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity) +{ return -EINVAL; } +#endif + +void pci_cfg_access_lock(struct pci_dev *dev); +bool pci_cfg_access_trylock(struct pci_dev *dev); +void pci_cfg_access_unlock(struct pci_dev *dev); + +/* + * PCI domain support. Sometimes called PCI segment (eg by ACPI), + * a PCI domain is defined to be a set of PCI buses which share + * configuration space. + */ +#ifdef CONFIG_PCI_DOMAINS +extern int pci_domains_supported; +#else +enum { pci_domains_supported = 0 }; +static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } +static inline int pci_proc_domain(struct pci_bus *bus) { return 0; } +#endif /* CONFIG_PCI_DOMAINS */ + +/* + * Generic implementation for PCI domain support. If your + * architecture does not need custom management of PCI + * domains then this implementation will be used + */ +#ifdef CONFIG_PCI_DOMAINS_GENERIC +static inline int pci_domain_nr(struct pci_bus *bus) +{ + return bus->domain_nr; +} +#ifdef CONFIG_ACPI +int acpi_pci_bus_find_domain_nr(struct pci_bus *bus); +#else +static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) +{ return 0; } +#endif +int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent); +#endif + +/* Some architectures require additional setup to direct VGA traffic */ +typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode, + unsigned int command_bits, u32 flags); +void pci_register_set_vga_state(arch_set_vga_state_t func); + +static inline int +pci_request_io_regions(struct pci_dev *pdev, const char *name) +{ + return pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_IO), name); +} + +static inline void +pci_release_io_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_IO)); +} + +static inline int +pci_request_mem_regions(struct pci_dev *pdev, const char *name) +{ + return pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM), name); +} + +static inline void +pci_release_mem_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +} + +#else /* CONFIG_PCI is not enabled */ + +static inline void pci_set_flags(int flags) { } +static inline void pci_add_flags(int flags) { } +static inline void pci_clear_flags(int flags) { } +static inline int pci_has_flag(int flag) { return 0; } + +/* + * If the system does not have PCI, clearly these return errors. Define + * these as simple inline functions to avoid hair in drivers. + */ +#define _PCI_NOP(o, s, t) \ + static inline int pci_##o##_config_##s(struct pci_dev *dev, \ + int where, t val) \ + { return PCIBIOS_FUNC_NOT_SUPPORTED; } + +#define _PCI_NOP_ALL(o, x) _PCI_NOP(o, byte, u8 x) \ + _PCI_NOP(o, word, u16 x) \ + _PCI_NOP(o, dword, u32 x) +_PCI_NOP_ALL(read, *) +_PCI_NOP_ALL(write,) + +static inline struct pci_dev *pci_get_device(unsigned int vendor, + unsigned int device, + struct pci_dev *from) +{ return NULL; } + +static inline struct pci_dev *pci_get_subsys(unsigned int vendor, + unsigned int device, + unsigned int ss_vendor, + unsigned int ss_device, + struct pci_dev *from) +{ return NULL; } + +static inline struct pci_dev *pci_get_class(unsigned int class, + struct pci_dev *from) +{ return NULL; } + +#define pci_dev_present(ids) (0) +#define no_pci_devices() (1) +#define pci_dev_put(dev) do { } while (0) + +static inline void pci_set_master(struct pci_dev *dev) { } +static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; } +static inline void pci_disable_device(struct pci_dev *dev) { } +static inline int pci_assign_resource(struct pci_dev *dev, int i) +{ return -EBUSY; } +static inline int __must_check __pci_register_driver(struct pci_driver *drv, + struct module *owner, + const char *mod_name) +{ return 0; } +static inline int pci_register_driver(struct pci_driver *drv) +{ return 0; } +static inline void pci_unregister_driver(struct pci_driver *drv) { } +static inline int pci_find_capability(struct pci_dev *dev, int cap) +{ return 0; } +static inline int pci_find_next_capability(struct pci_dev *dev, u8 post, + int cap) +{ return 0; } +static inline int pci_find_ext_capability(struct pci_dev *dev, int cap) +{ return 0; } + +/* Power management related routines */ +static inline int pci_save_state(struct pci_dev *dev) { return 0; } +static inline void pci_restore_state(struct pci_dev *dev) { } +static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state) +{ return 0; } +static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable) +{ return 0; } +static inline pci_power_t pci_choose_state(struct pci_dev *dev, + pm_message_t state) +{ return PCI_D0; } +static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, + int enable) +{ return 0; } + +static inline struct resource *pci_find_resource(struct pci_dev *dev, + struct resource *res) +{ return NULL; } +static inline int pci_request_regions(struct pci_dev *dev, const char *res_name) +{ return -EIO; } +static inline void pci_release_regions(struct pci_dev *dev) { } + +static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; } + +static inline void pci_block_cfg_access(struct pci_dev *dev) { } +static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev) +{ return 0; } +static inline void pci_unblock_cfg_access(struct pci_dev *dev) { } + +static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from) +{ return NULL; } +static inline struct pci_dev *pci_get_slot(struct pci_bus *bus, + unsigned int devfn) +{ return NULL; } +static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain, + unsigned int bus, unsigned int devfn) +{ return NULL; } + +static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } +static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; } + +#define dev_is_pci(d) (false) +#define dev_is_pf(d) (false) +static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags) +{ return false; } +static inline int pci_irqd_intx_xlate(struct irq_domain *d, + struct device_node *node, + const u32 *intspec, + unsigned int intsize, + unsigned long *out_hwirq, + unsigned int *out_type) +{ return -EINVAL; } +#endif /* CONFIG_PCI */ + +/* Include architecture-dependent settings and functions */ + +#include + +/* These two functions provide almost identical functionality. Depennding + * on the architecture, one will be implemented as a wrapper around the + * other (in drivers/pci/mmap.c). + * + * pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff + * is expected to be an offset within that region. + * + * pci_mmap_page_range() is the legacy architecture-specific interface, + * which accepts a "user visible" resource address converted by + * pci_resource_to_user(), as used in the legacy mmap() interface in + * /proc/bus/pci/. + */ +int pci_mmap_resource_range(struct pci_dev *dev, int bar, + struct vm_area_struct *vma, + enum pci_mmap_state mmap_state, int write_combine); +int pci_mmap_page_range(struct pci_dev *pdev, int bar, + struct vm_area_struct *vma, + enum pci_mmap_state mmap_state, int write_combine); + +#ifndef arch_can_pci_mmap_wc +#define arch_can_pci_mmap_wc() 0 +#endif + +#ifndef arch_can_pci_mmap_io +#define arch_can_pci_mmap_io() 0 +#define pci_iobar_pfn(pdev, bar, vma) (-EINVAL) +#else +int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma); +#endif + +#ifndef pci_root_bus_fwnode +#define pci_root_bus_fwnode(bus) NULL +#endif + +/* + * These helpers provide future and backwards compatibility + * for accessing popular PCI BAR info + */ +#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start) +#define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end) +#define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags) +#define pci_resource_len(dev,bar) \ + ((pci_resource_start((dev), (bar)) == 0 && \ + pci_resource_end((dev), (bar)) == \ + pci_resource_start((dev), (bar))) ? 0 : \ + \ + (pci_resource_end((dev), (bar)) - \ + pci_resource_start((dev), (bar)) + 1)) + +/* + * Similar to the helpers above, these manipulate per-pci_dev + * driver-specific data. They are really just a wrapper around + * the generic device structure functions of these calls. + */ +static inline void *pci_get_drvdata(struct pci_dev *pdev) +{ + return dev_get_drvdata(&pdev->dev); +} + +static inline void pci_set_drvdata(struct pci_dev *pdev, void *data) +{ + dev_set_drvdata(&pdev->dev, data); +} + +static inline const char *pci_name(const struct pci_dev *pdev) +{ + return dev_name(&pdev->dev); +} + + +/* + * Some archs don't want to expose struct resource to userland as-is + * in sysfs and /proc + */ +#ifdef HAVE_ARCH_PCI_RESOURCE_TO_USER +void pci_resource_to_user(const struct pci_dev *dev, int bar, + const struct resource *rsrc, + resource_size_t *start, resource_size_t *end); +#else +static inline void pci_resource_to_user(const struct pci_dev *dev, int bar, + const struct resource *rsrc, resource_size_t *start, + resource_size_t *end) +{ + *start = rsrc->start; + *end = rsrc->end; +} +#endif /* HAVE_ARCH_PCI_RESOURCE_TO_USER */ + + +/* + * The world is not perfect and supplies us with broken PCI devices. + * For at least a part of these bugs we need a work-around, so both + * generic (drivers/pci/quirks.c) and per-architecture code can define + * fixup hooks to be called for particular buggy devices. + */ + +struct pci_fixup { + u16 vendor; /* Or PCI_ANY_ID */ + u16 device; /* Or PCI_ANY_ID */ + u32 class; /* Or PCI_ANY_ID */ + unsigned int class_shift; /* should be 0, 8, 16 */ +#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS + int hook_offset; +#else + void (*hook)(struct pci_dev *dev); +#endif +}; + +enum pci_fixup_pass { + pci_fixup_early, /* Before probing BARs */ + pci_fixup_header, /* After reading configuration header */ + pci_fixup_final, /* Final phase of device fixups */ + pci_fixup_enable, /* pci_enable_device() time */ + pci_fixup_resume, /* pci_device_resume() */ + pci_fixup_suspend, /* pci_device_suspend() */ + pci_fixup_resume_early, /* pci_device_resume_early() */ + pci_fixup_suspend_late, /* pci_device_suspend_late() */ +}; + +#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS +#define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ + class_shift, hook) \ + __ADDRESSABLE(hook) \ + asm(".section " #sec ", \"a\" \n" \ + ".balign 16 \n" \ + ".short " #vendor ", " #device " \n" \ + ".long " #class ", " #class_shift " \n" \ + ".long " #hook " - . \n" \ + ".previous \n"); +#define DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ + class_shift, hook) \ + __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ + class_shift, hook) +#else +/* Anonymous variables would be nice... */ +#define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \ + class_shift, hook) \ + static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \ + __attribute__((__section__(#section), aligned((sizeof(void *))))) \ + = { vendor, device, class, class_shift, hook }; +#endif + +#define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \ + class_shift, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ + hook, vendor, device, class, class_shift, hook) +#define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \ + class_shift, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ + hook, vendor, device, class, class_shift, hook) +#define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \ + class_shift, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ + hook, vendor, device, class, class_shift, hook) +#define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \ + class_shift, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ + hook, vendor, device, class, class_shift, hook) +#define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \ + class_shift, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ + resume##hook, vendor, device, class, class_shift, hook) +#define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \ + class_shift, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ + resume_early##hook, vendor, device, class, class_shift, hook) +#define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \ + class_shift, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ + suspend##hook, vendor, device, class, class_shift, hook) +#define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class, \ + class_shift, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \ + suspend_late##hook, vendor, device, class, class_shift, hook) + +#define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ + hook, vendor, device, PCI_ANY_ID, 0, hook) +#define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ + hook, vendor, device, PCI_ANY_ID, 0, hook) +#define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ + hook, vendor, device, PCI_ANY_ID, 0, hook) +#define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ + hook, vendor, device, PCI_ANY_ID, 0, hook) +#define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ + resume##hook, vendor, device, PCI_ANY_ID, 0, hook) +#define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ + resume_early##hook, vendor, device, PCI_ANY_ID, 0, hook) +#define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ + suspend##hook, vendor, device, PCI_ANY_ID, 0, hook) +#define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \ + suspend_late##hook, vendor, device, PCI_ANY_ID, 0, hook) + +#ifdef CONFIG_PCI_QUIRKS +void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); +#else +static inline void pci_fixup_device(enum pci_fixup_pass pass, + struct pci_dev *dev) { } +#endif + +void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen); +void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr); +void __iomem * const *pcim_iomap_table(struct pci_dev *pdev); +int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name); +int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask, + const char *name); +void pcim_iounmap_regions(struct pci_dev *pdev, int mask); + +extern int pci_pci_problems; +#define PCIPCI_FAIL 1 /* No PCI PCI DMA */ +#define PCIPCI_TRITON 2 +#define PCIPCI_NATOMA 4 +#define PCIPCI_VIAETBF 8 +#define PCIPCI_VSFX 16 +#define PCIPCI_ALIMAGIK 32 /* Need low latency setting */ +#define PCIAGP_FAIL 64 /* No PCI to AGP DMA */ + +extern unsigned long pci_cardbus_io_size; +extern unsigned long pci_cardbus_mem_size; +extern u8 pci_dfl_cache_line_size; +extern u8 pci_cache_line_size; + +extern unsigned long pci_hotplug_io_size; +extern unsigned long pci_hotplug_mem_size; +extern unsigned long pci_hotplug_bus_size; + +/* Architecture-specific versions may override these (weak) */ +void pcibios_disable_device(struct pci_dev *dev); +void pcibios_set_master(struct pci_dev *dev); +int pcibios_set_pcie_reset_state(struct pci_dev *dev, + enum pcie_reset_state state); +int pcibios_add_device(struct pci_dev *dev); +void pcibios_release_device(struct pci_dev *dev); +void pcibios_penalize_isa_irq(int irq, int active); +int pcibios_alloc_irq(struct pci_dev *dev); +void pcibios_free_irq(struct pci_dev *dev); +resource_size_t pcibios_default_alignment(void); + +#ifdef CONFIG_HIBERNATE_CALLBACKS +extern struct dev_pm_ops pcibios_pm_ops; +#endif + +#if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG) +void __init pci_mmcfg_early_init(void); +void __init pci_mmcfg_late_init(void); +#else +static inline void pci_mmcfg_early_init(void) { } +static inline void pci_mmcfg_late_init(void) { } +#endif + +int pci_ext_cfg_avail(void); + +void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar); +void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar); + +#ifdef CONFIG_PCI_IOV +int pci_iov_virtfn_bus(struct pci_dev *dev, int id); +int pci_iov_virtfn_devfn(struct pci_dev *dev, int id); + +int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn); +void pci_disable_sriov(struct pci_dev *dev); +int pci_iov_add_virtfn(struct pci_dev *dev, int id); +void pci_iov_remove_virtfn(struct pci_dev *dev, int id); +int pci_num_vf(struct pci_dev *dev); +int pci_vfs_assigned(struct pci_dev *dev); +int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs); +int pci_sriov_get_totalvfs(struct pci_dev *dev); +int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn); +resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno); +void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe); + +/* Arch may override these (weak) */ +int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs); +int pcibios_sriov_disable(struct pci_dev *pdev); +resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno); +#else +static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id) +{ + return -ENOSYS; +} +static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id) +{ + return -ENOSYS; +} +static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn) +{ return -ENODEV; } +static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id) +{ + return -ENOSYS; +} +static inline void pci_iov_remove_virtfn(struct pci_dev *dev, + int id) { } +static inline void pci_disable_sriov(struct pci_dev *dev) { } +static inline int pci_num_vf(struct pci_dev *dev) { return 0; } +static inline int pci_vfs_assigned(struct pci_dev *dev) +{ return 0; } +static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs) +{ return 0; } +static inline int pci_sriov_get_totalvfs(struct pci_dev *dev) +{ return 0; } +#define pci_sriov_configure_simple NULL +static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno) +{ return 0; } +static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { } +#endif + +#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE) +void pci_hp_create_module_link(struct pci_slot *pci_slot); +void pci_hp_remove_module_link(struct pci_slot *pci_slot); +#endif + +/** + * pci_pcie_cap - get the saved PCIe capability offset + * @dev: PCI device + * + * PCIe capability offset is calculated at PCI device initialization + * time and saved in the data structure. This function returns saved + * PCIe capability offset. Using this instead of pci_find_capability() + * reduces unnecessary search in the PCI configuration space. If you + * need to calculate PCIe capability offset from raw device for some + * reasons, please use pci_find_capability() instead. + */ +static inline int pci_pcie_cap(struct pci_dev *dev) +{ + return dev->pcie_cap; +} + +/** + * pci_is_pcie - check if the PCI device is PCI Express capable + * @dev: PCI device + * + * Returns: true if the PCI device is PCI Express capable, false otherwise. + */ +static inline bool pci_is_pcie(struct pci_dev *dev) +{ + return pci_pcie_cap(dev); +} + +/** + * pcie_caps_reg - get the PCIe Capabilities Register + * @dev: PCI device + */ +static inline u16 pcie_caps_reg(const struct pci_dev *dev) +{ + return dev->pcie_flags_reg; +} + +/** + * pci_pcie_type - get the PCIe device/port type + * @dev: PCI device + */ +static inline int pci_pcie_type(const struct pci_dev *dev) +{ + return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4; +} + +static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev) +{ + while (1) { + if (!pci_is_pcie(dev)) + break; + if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) + return dev; + if (!dev->bus->self) + break; + dev = dev->bus->self; + } + return NULL; +} + +void pci_request_acs(void); +bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags); +bool pci_acs_path_enabled(struct pci_dev *start, + struct pci_dev *end, u16 acs_flags); +int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask); + +#define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */ +#define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT) + +/* Large Resource Data Type Tag Item Names */ +#define PCI_VPD_LTIN_ID_STRING 0x02 /* Identifier String */ +#define PCI_VPD_LTIN_RO_DATA 0x10 /* Read-Only Data */ +#define PCI_VPD_LTIN_RW_DATA 0x11 /* Read-Write Data */ + +#define PCI_VPD_LRDT_ID_STRING PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING) +#define PCI_VPD_LRDT_RO_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA) +#define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA) + +/* Small Resource Data Type Tag Item Names */ +#define PCI_VPD_STIN_END 0x0f /* End */ + +#define PCI_VPD_SRDT_END (PCI_VPD_STIN_END << 3) + +#define PCI_VPD_SRDT_TIN_MASK 0x78 +#define PCI_VPD_SRDT_LEN_MASK 0x07 +#define PCI_VPD_LRDT_TIN_MASK 0x7f + +#define PCI_VPD_LRDT_TAG_SIZE 3 +#define PCI_VPD_SRDT_TAG_SIZE 1 + +#define PCI_VPD_INFO_FLD_HDR_SIZE 3 + +#define PCI_VPD_RO_KEYWORD_PARTNO "PN" +#define PCI_VPD_RO_KEYWORD_MFR_ID "MN" +#define PCI_VPD_RO_KEYWORD_VENDOR0 "V0" +#define PCI_VPD_RO_KEYWORD_CHKSUM "RV" + +/** + * pci_vpd_lrdt_size - Extracts the Large Resource Data Type length + * @lrdt: Pointer to the beginning of the Large Resource Data Type tag + * + * Returns the extracted Large Resource Data Type length. + */ +static inline u16 pci_vpd_lrdt_size(const u8 *lrdt) +{ + return (u16)lrdt[1] + ((u16)lrdt[2] << 8); +} + +/** + * pci_vpd_lrdt_tag - Extracts the Large Resource Data Type Tag Item + * @lrdt: Pointer to the beginning of the Large Resource Data Type tag + * + * Returns the extracted Large Resource Data Type Tag item. + */ +static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt) +{ + return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK); +} + +/** + * pci_vpd_srdt_size - Extracts the Small Resource Data Type length + * @srdt: Pointer to the beginning of the Small Resource Data Type tag + * + * Returns the extracted Small Resource Data Type length. + */ +static inline u8 pci_vpd_srdt_size(const u8 *srdt) +{ + return (*srdt) & PCI_VPD_SRDT_LEN_MASK; +} + +/** + * pci_vpd_srdt_tag - Extracts the Small Resource Data Type Tag Item + * @srdt: Pointer to the beginning of the Small Resource Data Type tag + * + * Returns the extracted Small Resource Data Type Tag Item. + */ +static inline u8 pci_vpd_srdt_tag(const u8 *srdt) +{ + return ((*srdt) & PCI_VPD_SRDT_TIN_MASK) >> 3; +} + +/** + * pci_vpd_info_field_size - Extracts the information field length + * @lrdt: Pointer to the beginning of an information field header + * + * Returns the extracted information field length. + */ +static inline u8 pci_vpd_info_field_size(const u8 *info_field) +{ + return info_field[2]; +} + +/** + * pci_vpd_find_tag - Locates the Resource Data Type tag provided + * @buf: Pointer to buffered vpd data + * @off: The offset into the buffer at which to begin the search + * @len: The length of the vpd buffer + * @rdt: The Resource Data Type to search for + * + * Returns the index where the Resource Data Type was found or + * -ENOENT otherwise. + */ +int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt); + +/** + * pci_vpd_find_info_keyword - Locates an information field keyword in the VPD + * @buf: Pointer to buffered vpd data + * @off: The offset into the buffer at which to begin the search + * @len: The length of the buffer area, relative to off, in which to search + * @kw: The keyword to search for + * + * Returns the index where the information field keyword was found or + * -ENOENT otherwise. + */ +int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off, + unsigned int len, const char *kw); + +/* PCI <-> OF binding helpers */ +#ifdef CONFIG_OF +struct device_node; +struct irq_domain; +void pci_set_of_node(struct pci_dev *dev); +void pci_release_of_node(struct pci_dev *dev); +void pci_set_bus_of_node(struct pci_bus *bus); +void pci_release_bus_of_node(struct pci_bus *bus); +struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus); +int pci_parse_request_of_pci_ranges(struct device *dev, + struct list_head *resources, + struct resource **bus_range); + +/* Arch may override this (weak) */ +struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus); + +#else /* CONFIG_OF */ +static inline void pci_set_of_node(struct pci_dev *dev) { } +static inline void pci_release_of_node(struct pci_dev *dev) { } +static inline void pci_set_bus_of_node(struct pci_bus *bus) { } +static inline void pci_release_bus_of_node(struct pci_bus *bus) { } +static inline struct irq_domain * +pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; } +static inline int pci_parse_request_of_pci_ranges(struct device *dev, + struct list_head *resources, + struct resource **bus_range) +{ + return -EINVAL; +} +#endif /* CONFIG_OF */ + +static inline struct device_node * +pci_device_to_OF_node(const struct pci_dev *pdev) +{ + return pdev ? pdev->dev.of_node : NULL; +} + +static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) +{ + return bus ? bus->dev.of_node : NULL; +} + +#ifdef CONFIG_ACPI +struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus); + +void +pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *)); +#else +static inline struct irq_domain * +pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; } +#endif + +#ifdef CONFIG_EEH +static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev) +{ + return pdev->dev.archdata.edev; +} +#endif + +void pci_add_dma_alias(struct pci_dev *dev, u8 devfn); +bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2); +int pci_for_each_dma_alias(struct pci_dev *pdev, + int (*fn)(struct pci_dev *pdev, + u16 alias, void *data), void *data); + +/* Helper functions for operation of device flag */ +static inline void pci_set_dev_assigned(struct pci_dev *pdev) +{ + pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED; +} +static inline void pci_clear_dev_assigned(struct pci_dev *pdev) +{ + pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; +} +static inline bool pci_is_dev_assigned(struct pci_dev *pdev) +{ + return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED; +} + +/** + * pci_ari_enabled - query ARI forwarding status + * @bus: the PCI bus + * + * Returns true if ARI forwarding is enabled. + */ +static inline bool pci_ari_enabled(struct pci_bus *bus) +{ + return bus->self && bus->self->ari_enabled; +} + +/** + * pci_is_thunderbolt_attached - whether device is on a Thunderbolt daisy chain + * @pdev: PCI device to check + * + * Walk upwards from @pdev and check for each encountered bridge if it's part + * of a Thunderbolt controller. Reaching the host bridge means @pdev is not + * Thunderbolt-attached. (But rather soldered to the mainboard usually.) + */ +static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev) +{ + struct pci_dev *parent = pdev; + + if (pdev->is_thunderbolt) + return true; + + while ((parent = pci_upstream_bridge(parent))) + if (parent->is_thunderbolt) + return true; + + return false; +} + +#if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH) +void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type); +#endif + +/* Provide the legacy pci_dma_* API */ +#include + +#define pci_printk(level, pdev, fmt, arg...) \ + dev_printk(level, &(pdev)->dev, fmt, ##arg) + +#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg) +#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg) +#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg) +#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg) +#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg) +#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg) +#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg) +#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg) + +#endif /* LINUX_PCI_H */ diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h new file mode 100644 index 000000000..a6d6650a0 --- /dev/null +++ b/include/linux/pci_hotplug.h @@ -0,0 +1,196 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * PCI HotPlug Core Functions + * + * Copyright (C) 1995,2001 Compaq Computer Corporation + * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) + * Copyright (C) 2001 IBM Corp. + * + * All rights reserved. + * + * Send feedback to + * + */ +#ifndef _PCI_HOTPLUG_H +#define _PCI_HOTPLUG_H + +/** + * struct hotplug_slot_ops -the callbacks that the hotplug pci core can use + * @owner: The module owner of this structure + * @mod_name: The module name (KBUILD_MODNAME) of this structure + * @enable_slot: Called when the user wants to enable a specific pci slot + * @disable_slot: Called when the user wants to disable a specific pci slot + * @set_attention_status: Called to set the specific slot's attention LED to + * the specified value + * @hardware_test: Called to run a specified hardware test on the specified + * slot. + * @get_power_status: Called to get the current power status of a slot. + * If this field is NULL, the value passed in the struct hotplug_slot_info + * will be used when this value is requested by a user. + * @get_attention_status: Called to get the current attention status of a slot. + * If this field is NULL, the value passed in the struct hotplug_slot_info + * will be used when this value is requested by a user. + * @get_latch_status: Called to get the current latch status of a slot. + * If this field is NULL, the value passed in the struct hotplug_slot_info + * will be used when this value is requested by a user. + * @get_adapter_status: Called to get see if an adapter is present in the slot or not. + * If this field is NULL, the value passed in the struct hotplug_slot_info + * will be used when this value is requested by a user. + * @reset_slot: Optional interface to allow override of a bus reset for the + * slot for cases where a secondary bus reset can result in spurious + * hotplug events or where a slot can be reset independent of the bus. + * + * The table of function pointers that is passed to the hotplug pci core by a + * hotplug pci driver. These functions are called by the hotplug pci core when + * the user wants to do something to a specific slot (query it for information, + * set an LED, enable / disable power, etc.) + */ +struct hotplug_slot_ops { + struct module *owner; + const char *mod_name; + int (*enable_slot) (struct hotplug_slot *slot); + int (*disable_slot) (struct hotplug_slot *slot); + int (*set_attention_status) (struct hotplug_slot *slot, u8 value); + int (*hardware_test) (struct hotplug_slot *slot, u32 value); + int (*get_power_status) (struct hotplug_slot *slot, u8 *value); + int (*get_attention_status) (struct hotplug_slot *slot, u8 *value); + int (*get_latch_status) (struct hotplug_slot *slot, u8 *value); + int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value); + int (*reset_slot) (struct hotplug_slot *slot, int probe); +}; + +/** + * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot + * @power_status: if power is enabled or not (1/0) + * @attention_status: if the attention light is enabled or not (1/0) + * @latch_status: if the latch (if any) is open or closed (1/0) + * @adapter_status: if there is a pci board present in the slot or not (1/0) + * + * Used to notify the hotplug pci core of the status of a specific slot. + */ +struct hotplug_slot_info { + u8 power_status; + u8 attention_status; + u8 latch_status; + u8 adapter_status; +}; + +/** + * struct hotplug_slot - used to register a physical slot with the hotplug pci core + * @ops: pointer to the &struct hotplug_slot_ops to be used for this slot + * @info: pointer to the &struct hotplug_slot_info for the initial values for + * this slot. + * @private: used by the hotplug pci controller driver to store whatever it + * needs. + */ +struct hotplug_slot { + struct hotplug_slot_ops *ops; + struct hotplug_slot_info *info; + void *private; + + /* Variables below this are for use only by the hotplug pci core. */ + struct list_head slot_list; + struct pci_slot *pci_slot; +}; + +static inline const char *hotplug_slot_name(const struct hotplug_slot *slot) +{ + return pci_slot_name(slot->pci_slot); +} + +int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *pbus, int nr, + const char *name, struct module *owner, + const char *mod_name); +int __pci_hp_initialize(struct hotplug_slot *slot, struct pci_bus *bus, int nr, + const char *name, struct module *owner, + const char *mod_name); +int pci_hp_add(struct hotplug_slot *slot); + +void pci_hp_del(struct hotplug_slot *slot); +void pci_hp_destroy(struct hotplug_slot *slot); +void pci_hp_deregister(struct hotplug_slot *slot); + +int __must_check pci_hp_change_slot_info(struct hotplug_slot *slot, + struct hotplug_slot_info *info); + +/* use a define to avoid include chaining to get THIS_MODULE & friends */ +#define pci_hp_register(slot, pbus, devnr, name) \ + __pci_hp_register(slot, pbus, devnr, name, THIS_MODULE, KBUILD_MODNAME) +#define pci_hp_initialize(slot, bus, nr, name) \ + __pci_hp_initialize(slot, bus, nr, name, THIS_MODULE, KBUILD_MODNAME) + +/* PCI Setting Record (Type 0) */ +struct hpp_type0 { + u32 revision; + u8 cache_line_size; + u8 latency_timer; + u8 enable_serr; + u8 enable_perr; +}; + +/* PCI-X Setting Record (Type 1) */ +struct hpp_type1 { + u32 revision; + u8 max_mem_read; + u8 avg_max_split; + u16 tot_max_split; +}; + +/* PCI Express Setting Record (Type 2) */ +struct hpp_type2 { + u32 revision; + u32 unc_err_mask_and; + u32 unc_err_mask_or; + u32 unc_err_sever_and; + u32 unc_err_sever_or; + u32 cor_err_mask_and; + u32 cor_err_mask_or; + u32 adv_err_cap_and; + u32 adv_err_cap_or; + u16 pci_exp_devctl_and; + u16 pci_exp_devctl_or; + u16 pci_exp_lnkctl_and; + u16 pci_exp_lnkctl_or; + u32 sec_unc_err_sever_and; + u32 sec_unc_err_sever_or; + u32 sec_unc_err_mask_and; + u32 sec_unc_err_mask_or; +}; + +struct hotplug_params { + struct hpp_type0 *t0; /* Type0: NULL if not available */ + struct hpp_type1 *t1; /* Type1: NULL if not available */ + struct hpp_type2 *t2; /* Type2: NULL if not available */ + struct hpp_type0 type0_data; + struct hpp_type1 type1_data; + struct hpp_type2 type2_data; +}; + +#ifdef CONFIG_ACPI +#include +int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp); +bool pciehp_is_native(struct pci_dev *bridge); +int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge); +bool shpchp_is_native(struct pci_dev *bridge); +int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle); +int acpi_pci_detect_ejectable(acpi_handle handle); +#else +static inline int pci_get_hp_params(struct pci_dev *dev, + struct hotplug_params *hpp) +{ + return -ENODEV; +} + +static inline int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge) +{ + return 0; +} +static inline bool pciehp_is_native(struct pci_dev *bridge) { return true; } +static inline bool shpchp_is_native(struct pci_dev *bridge) { return true; } +#endif + +static inline bool hotplug_is_native(struct pci_dev *bridge) +{ + return pciehp_is_native(bridge) || shpchp_is_native(bridge); +} +#endif diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h new file mode 100644 index 000000000..c0dd2f749 --- /dev/null +++ b/include/linux/pci_ids.h @@ -0,0 +1,3121 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * PCI Class, Vendor and Device IDs + * + * Please keep sorted. + * + * Do not add new entries to this file unless the definitions + * are shared between multiple drivers. + */ +#ifndef _LINUX_PCI_IDS_H +#define _LINUX_PCI_IDS_H + +/* Device classes and subclasses */ + +#define PCI_CLASS_NOT_DEFINED 0x0000 +#define PCI_CLASS_NOT_DEFINED_VGA 0x0001 + +#define PCI_BASE_CLASS_STORAGE 0x01 +#define PCI_CLASS_STORAGE_SCSI 0x0100 +#define PCI_CLASS_STORAGE_IDE 0x0101 +#define PCI_CLASS_STORAGE_FLOPPY 0x0102 +#define PCI_CLASS_STORAGE_IPI 0x0103 +#define PCI_CLASS_STORAGE_RAID 0x0104 +#define PCI_CLASS_STORAGE_SATA 0x0106 +#define PCI_CLASS_STORAGE_SATA_AHCI 0x010601 +#define PCI_CLASS_STORAGE_SAS 0x0107 +#define PCI_CLASS_STORAGE_EXPRESS 0x010802 +#define PCI_CLASS_STORAGE_OTHER 0x0180 + + +#define PCI_BASE_CLASS_NETWORK 0x02 +#define PCI_CLASS_NETWORK_ETHERNET 0x0200 +#define PCI_CLASS_NETWORK_TOKEN_RING 0x0201 +#define PCI_CLASS_NETWORK_FDDI 0x0202 +#define PCI_CLASS_NETWORK_ATM 0x0203 +#define PCI_CLASS_NETWORK_OTHER 0x0280 + +#define PCI_BASE_CLASS_DISPLAY 0x03 +#define PCI_CLASS_DISPLAY_VGA 0x0300 +#define PCI_CLASS_DISPLAY_XGA 0x0301 +#define PCI_CLASS_DISPLAY_3D 0x0302 +#define PCI_CLASS_DISPLAY_OTHER 0x0380 + +#define PCI_BASE_CLASS_MULTIMEDIA 0x04 +#define PCI_CLASS_MULTIMEDIA_VIDEO 0x0400 +#define PCI_CLASS_MULTIMEDIA_AUDIO 0x0401 +#define PCI_CLASS_MULTIMEDIA_PHONE 0x0402 +#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403 +#define PCI_CLASS_MULTIMEDIA_OTHER 0x0480 + +#define PCI_BASE_CLASS_MEMORY 0x05 +#define PCI_CLASS_MEMORY_RAM 0x0500 +#define PCI_CLASS_MEMORY_FLASH 0x0501 +#define PCI_CLASS_MEMORY_OTHER 0x0580 + +#define PCI_BASE_CLASS_BRIDGE 0x06 +#define PCI_CLASS_BRIDGE_HOST 0x0600 +#define PCI_CLASS_BRIDGE_ISA 0x0601 +#define PCI_CLASS_BRIDGE_EISA 0x0602 +#define PCI_CLASS_BRIDGE_MC 0x0603 +#define PCI_CLASS_BRIDGE_PCI 0x0604 +#define PCI_CLASS_BRIDGE_PCMCIA 0x0605 +#define PCI_CLASS_BRIDGE_NUBUS 0x0606 +#define PCI_CLASS_BRIDGE_CARDBUS 0x0607 +#define PCI_CLASS_BRIDGE_RACEWAY 0x0608 +#define PCI_CLASS_BRIDGE_OTHER 0x0680 + +#define PCI_BASE_CLASS_COMMUNICATION 0x07 +#define PCI_CLASS_COMMUNICATION_SERIAL 0x0700 +#define PCI_CLASS_COMMUNICATION_PARALLEL 0x0701 +#define PCI_CLASS_COMMUNICATION_MULTISERIAL 0x0702 +#define PCI_CLASS_COMMUNICATION_MODEM 0x0703 +#define PCI_CLASS_COMMUNICATION_OTHER 0x0780 + +#define PCI_BASE_CLASS_SYSTEM 0x08 +#define PCI_CLASS_SYSTEM_PIC 0x0800 +#define PCI_CLASS_SYSTEM_PIC_IOAPIC 0x080010 +#define PCI_CLASS_SYSTEM_PIC_IOXAPIC 0x080020 +#define PCI_CLASS_SYSTEM_DMA 0x0801 +#define PCI_CLASS_SYSTEM_TIMER 0x0802 +#define PCI_CLASS_SYSTEM_RTC 0x0803 +#define PCI_CLASS_SYSTEM_PCI_HOTPLUG 0x0804 +#define PCI_CLASS_SYSTEM_SDHCI 0x0805 +#define PCI_CLASS_SYSTEM_OTHER 0x0880 + +#define PCI_BASE_CLASS_INPUT 0x09 +#define PCI_CLASS_INPUT_KEYBOARD 0x0900 +#define PCI_CLASS_INPUT_PEN 0x0901 +#define PCI_CLASS_INPUT_MOUSE 0x0902 +#define PCI_CLASS_INPUT_SCANNER 0x0903 +#define PCI_CLASS_INPUT_GAMEPORT 0x0904 +#define PCI_CLASS_INPUT_OTHER 0x0980 + +#define PCI_BASE_CLASS_DOCKING 0x0a +#define PCI_CLASS_DOCKING_GENERIC 0x0a00 +#define PCI_CLASS_DOCKING_OTHER 0x0a80 + +#define PCI_BASE_CLASS_PROCESSOR 0x0b +#define PCI_CLASS_PROCESSOR_386 0x0b00 +#define PCI_CLASS_PROCESSOR_486 0x0b01 +#define PCI_CLASS_PROCESSOR_PENTIUM 0x0b02 +#define PCI_CLASS_PROCESSOR_ALPHA 0x0b10 +#define PCI_CLASS_PROCESSOR_POWERPC 0x0b20 +#define PCI_CLASS_PROCESSOR_MIPS 0x0b30 +#define PCI_CLASS_PROCESSOR_CO 0x0b40 + +#define PCI_BASE_CLASS_SERIAL 0x0c +#define PCI_CLASS_SERIAL_FIREWIRE 0x0c00 +#define PCI_CLASS_SERIAL_FIREWIRE_OHCI 0x0c0010 +#define PCI_CLASS_SERIAL_ACCESS 0x0c01 +#define PCI_CLASS_SERIAL_SSA 0x0c02 +#define PCI_CLASS_SERIAL_USB 0x0c03 +#define PCI_CLASS_SERIAL_USB_UHCI 0x0c0300 +#define PCI_CLASS_SERIAL_USB_OHCI 0x0c0310 +#define PCI_CLASS_SERIAL_USB_EHCI 0x0c0320 +#define PCI_CLASS_SERIAL_USB_XHCI 0x0c0330 +#define PCI_CLASS_SERIAL_USB_DEVICE 0x0c03fe +#define PCI_CLASS_SERIAL_FIBER 0x0c04 +#define PCI_CLASS_SERIAL_SMBUS 0x0c05 +#define PCI_CLASS_SERIAL_IPMI 0x0c07 +#define PCI_CLASS_SERIAL_IPMI_SMIC 0x0c0700 +#define PCI_CLASS_SERIAL_IPMI_KCS 0x0c0701 +#define PCI_CLASS_SERIAL_IPMI_BT 0x0c0702 + +#define PCI_BASE_CLASS_WIRELESS 0x0d +#define PCI_CLASS_WIRELESS_RF_CONTROLLER 0x0d10 +#define PCI_CLASS_WIRELESS_WHCI 0x0d1010 + +#define PCI_BASE_CLASS_INTELLIGENT 0x0e +#define PCI_CLASS_INTELLIGENT_I2O 0x0e00 + +#define PCI_BASE_CLASS_SATELLITE 0x0f +#define PCI_CLASS_SATELLITE_TV 0x0f00 +#define PCI_CLASS_SATELLITE_AUDIO 0x0f01 +#define PCI_CLASS_SATELLITE_VOICE 0x0f03 +#define PCI_CLASS_SATELLITE_DATA 0x0f04 + +#define PCI_BASE_CLASS_CRYPT 0x10 +#define PCI_CLASS_CRYPT_NETWORK 0x1000 +#define PCI_CLASS_CRYPT_ENTERTAINMENT 0x1001 +#define PCI_CLASS_CRYPT_OTHER 0x1080 + +#define PCI_BASE_CLASS_SIGNAL_PROCESSING 0x11 +#define PCI_CLASS_SP_DPIO 0x1100 +#define PCI_CLASS_SP_OTHER 0x1180 + +#define PCI_CLASS_OTHERS 0xff + +/* Vendors and devices. Sort key: vendor first, device next. */ + +#define PCI_VENDOR_ID_LOONGSON 0x0014 + +#define PCI_VENDOR_ID_TTTECH 0x0357 +#define PCI_DEVICE_ID_TTTECH_MC322 0x000a + +#define PCI_VENDOR_ID_DYNALINK 0x0675 +#define PCI_DEVICE_ID_DYNALINK_IS64PH 0x1702 + +#define PCI_VENDOR_ID_UBIQUITI 0x0777 + +#define PCI_VENDOR_ID_BERKOM 0x0871 +#define PCI_DEVICE_ID_BERKOM_A1T 0xffa1 +#define PCI_DEVICE_ID_BERKOM_T_CONCEPT 0xffa2 +#define PCI_DEVICE_ID_BERKOM_A4T 0xffa4 +#define PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO 0xffa8 + +#define PCI_VENDOR_ID_COMPAQ 0x0e11 +#define PCI_DEVICE_ID_COMPAQ_TOKENRING 0x0508 +#define PCI_DEVICE_ID_COMPAQ_TACHYON 0xa0fc +#define PCI_DEVICE_ID_COMPAQ_SMART2P 0xae10 +#define PCI_DEVICE_ID_COMPAQ_NETEL100 0xae32 +#define PCI_DEVICE_ID_COMPAQ_NETEL10 0xae34 +#define PCI_DEVICE_ID_COMPAQ_TRIFLEX_IDE 0xae33 +#define PCI_DEVICE_ID_COMPAQ_NETFLEX3I 0xae35 +#define PCI_DEVICE_ID_COMPAQ_NETEL100D 0xae40 +#define PCI_DEVICE_ID_COMPAQ_NETEL100PI 0xae43 +#define PCI_DEVICE_ID_COMPAQ_NETEL100I 0xb011 +#define PCI_DEVICE_ID_COMPAQ_CISS 0xb060 +#define PCI_DEVICE_ID_COMPAQ_CISSB 0xb178 +#define PCI_DEVICE_ID_COMPAQ_CISSC 0x46 +#define PCI_DEVICE_ID_COMPAQ_THUNDER 0xf130 +#define PCI_DEVICE_ID_COMPAQ_NETFLEX3B 0xf150 + +#define PCI_VENDOR_ID_NCR 0x1000 +#define PCI_VENDOR_ID_LSI_LOGIC 0x1000 +#define PCI_DEVICE_ID_NCR_53C810 0x0001 +#define PCI_DEVICE_ID_NCR_53C820 0x0002 +#define PCI_DEVICE_ID_NCR_53C825 0x0003 +#define PCI_DEVICE_ID_NCR_53C815 0x0004 +#define PCI_DEVICE_ID_LSI_53C810AP 0x0005 +#define PCI_DEVICE_ID_NCR_53C860 0x0006 +#define PCI_DEVICE_ID_LSI_53C1510 0x000a +#define PCI_DEVICE_ID_NCR_53C896 0x000b +#define PCI_DEVICE_ID_NCR_53C895 0x000c +#define PCI_DEVICE_ID_NCR_53C885 0x000d +#define PCI_DEVICE_ID_NCR_53C875 0x000f +#define PCI_DEVICE_ID_NCR_53C1510 0x0010 +#define PCI_DEVICE_ID_LSI_53C895A 0x0012 +#define PCI_DEVICE_ID_LSI_53C875A 0x0013 +#define PCI_DEVICE_ID_LSI_53C1010_33 0x0020 +#define PCI_DEVICE_ID_LSI_53C1010_66 0x0021 +#define PCI_DEVICE_ID_LSI_53C1030 0x0030 +#define PCI_DEVICE_ID_LSI_1030_53C1035 0x0032 +#define PCI_DEVICE_ID_LSI_53C1035 0x0040 +#define PCI_DEVICE_ID_NCR_53C875J 0x008f +#define PCI_DEVICE_ID_LSI_FC909 0x0621 +#define PCI_DEVICE_ID_LSI_FC929 0x0622 +#define PCI_DEVICE_ID_LSI_FC929_LAN 0x0623 +#define PCI_DEVICE_ID_LSI_FC919 0x0624 +#define PCI_DEVICE_ID_LSI_FC919_LAN 0x0625 +#define PCI_DEVICE_ID_LSI_FC929X 0x0626 +#define PCI_DEVICE_ID_LSI_FC939X 0x0642 +#define PCI_DEVICE_ID_LSI_FC949X 0x0640 +#define PCI_DEVICE_ID_LSI_FC949ES 0x0646 +#define PCI_DEVICE_ID_LSI_FC919X 0x0628 +#define PCI_DEVICE_ID_NCR_YELLOWFIN 0x0701 +#define PCI_DEVICE_ID_LSI_61C102 0x0901 +#define PCI_DEVICE_ID_LSI_63C815 0x1000 +#define PCI_DEVICE_ID_LSI_SAS1064 0x0050 +#define PCI_DEVICE_ID_LSI_SAS1064R 0x0411 +#define PCI_DEVICE_ID_LSI_SAS1066 0x005E +#define PCI_DEVICE_ID_LSI_SAS1068 0x0054 +#define PCI_DEVICE_ID_LSI_SAS1064A 0x005C +#define PCI_DEVICE_ID_LSI_SAS1064E 0x0056 +#define PCI_DEVICE_ID_LSI_SAS1066E 0x005A +#define PCI_DEVICE_ID_LSI_SAS1068E 0x0058 +#define PCI_DEVICE_ID_LSI_SAS1078 0x0060 + +#define PCI_VENDOR_ID_ATI 0x1002 +/* Mach64 */ +#define PCI_DEVICE_ID_ATI_68800 0x4158 +#define PCI_DEVICE_ID_ATI_215CT222 0x4354 +#define PCI_DEVICE_ID_ATI_210888CX 0x4358 +#define PCI_DEVICE_ID_ATI_215ET222 0x4554 +/* Mach64 / Rage */ +#define PCI_DEVICE_ID_ATI_215GB 0x4742 +#define PCI_DEVICE_ID_ATI_215GD 0x4744 +#define PCI_DEVICE_ID_ATI_215GI 0x4749 +#define PCI_DEVICE_ID_ATI_215GP 0x4750 +#define PCI_DEVICE_ID_ATI_215GQ 0x4751 +#define PCI_DEVICE_ID_ATI_215XL 0x4752 +#define PCI_DEVICE_ID_ATI_215GT 0x4754 +#define PCI_DEVICE_ID_ATI_215GTB 0x4755 +#define PCI_DEVICE_ID_ATI_215_IV 0x4756 +#define PCI_DEVICE_ID_ATI_215_IW 0x4757 +#define PCI_DEVICE_ID_ATI_215_IZ 0x475A +#define PCI_DEVICE_ID_ATI_210888GX 0x4758 +#define PCI_DEVICE_ID_ATI_215_LB 0x4c42 +#define PCI_DEVICE_ID_ATI_215_LD 0x4c44 +#define PCI_DEVICE_ID_ATI_215_LG 0x4c47 +#define PCI_DEVICE_ID_ATI_215_LI 0x4c49 +#define PCI_DEVICE_ID_ATI_215_LM 0x4c4D +#define PCI_DEVICE_ID_ATI_215_LN 0x4c4E +#define PCI_DEVICE_ID_ATI_215_LR 0x4c52 +#define PCI_DEVICE_ID_ATI_215_LS 0x4c53 +#define PCI_DEVICE_ID_ATI_264_LT 0x4c54 +/* Mach64 VT */ +#define PCI_DEVICE_ID_ATI_264VT 0x5654 +#define PCI_DEVICE_ID_ATI_264VU 0x5655 +#define PCI_DEVICE_ID_ATI_264VV 0x5656 +/* Rage128 GL */ +#define PCI_DEVICE_ID_ATI_RAGE128_RE 0x5245 +#define PCI_DEVICE_ID_ATI_RAGE128_RF 0x5246 +#define PCI_DEVICE_ID_ATI_RAGE128_RG 0x5247 +/* Rage128 VR */ +#define PCI_DEVICE_ID_ATI_RAGE128_RK 0x524b +#define PCI_DEVICE_ID_ATI_RAGE128_RL 0x524c +#define PCI_DEVICE_ID_ATI_RAGE128_SE 0x5345 +#define PCI_DEVICE_ID_ATI_RAGE128_SF 0x5346 +#define PCI_DEVICE_ID_ATI_RAGE128_SG 0x5347 +#define PCI_DEVICE_ID_ATI_RAGE128_SH 0x5348 +#define PCI_DEVICE_ID_ATI_RAGE128_SK 0x534b +#define PCI_DEVICE_ID_ATI_RAGE128_SL 0x534c +#define PCI_DEVICE_ID_ATI_RAGE128_SM 0x534d +#define PCI_DEVICE_ID_ATI_RAGE128_SN 0x534e +/* Rage128 Ultra */ +#define PCI_DEVICE_ID_ATI_RAGE128_TF 0x5446 +#define PCI_DEVICE_ID_ATI_RAGE128_TL 0x544c +#define PCI_DEVICE_ID_ATI_RAGE128_TR 0x5452 +#define PCI_DEVICE_ID_ATI_RAGE128_TS 0x5453 +#define PCI_DEVICE_ID_ATI_RAGE128_TT 0x5454 +#define PCI_DEVICE_ID_ATI_RAGE128_TU 0x5455 +/* Rage128 M3 */ +#define PCI_DEVICE_ID_ATI_RAGE128_LE 0x4c45 +#define PCI_DEVICE_ID_ATI_RAGE128_LF 0x4c46 +/* Rage128 M4 */ +#define PCI_DEVICE_ID_ATI_RAGE128_MF 0x4d46 +#define PCI_DEVICE_ID_ATI_RAGE128_ML 0x4d4c +/* Rage128 Pro GL */ +#define PCI_DEVICE_ID_ATI_RAGE128_PA 0x5041 +#define PCI_DEVICE_ID_ATI_RAGE128_PB 0x5042 +#define PCI_DEVICE_ID_ATI_RAGE128_PC 0x5043 +#define PCI_DEVICE_ID_ATI_RAGE128_PD 0x5044 +#define PCI_DEVICE_ID_ATI_RAGE128_PE 0x5045 +#define PCI_DEVICE_ID_ATI_RAGE128_PF 0x5046 +/* Rage128 Pro VR */ +#define PCI_DEVICE_ID_ATI_RAGE128_PG 0x5047 +#define PCI_DEVICE_ID_ATI_RAGE128_PH 0x5048 +#define PCI_DEVICE_ID_ATI_RAGE128_PI 0x5049 +#define PCI_DEVICE_ID_ATI_RAGE128_PJ 0x504A +#define PCI_DEVICE_ID_ATI_RAGE128_PK 0x504B +#define PCI_DEVICE_ID_ATI_RAGE128_PL 0x504C +#define PCI_DEVICE_ID_ATI_RAGE128_PM 0x504D +#define PCI_DEVICE_ID_ATI_RAGE128_PN 0x504E +#define PCI_DEVICE_ID_ATI_RAGE128_PO 0x504F +#define PCI_DEVICE_ID_ATI_RAGE128_PP 0x5050 +#define PCI_DEVICE_ID_ATI_RAGE128_PQ 0x5051 +#define PCI_DEVICE_ID_ATI_RAGE128_PR 0x5052 +#define PCI_DEVICE_ID_ATI_RAGE128_PS 0x5053 +#define PCI_DEVICE_ID_ATI_RAGE128_PT 0x5054 +#define PCI_DEVICE_ID_ATI_RAGE128_PU 0x5055 +#define PCI_DEVICE_ID_ATI_RAGE128_PV 0x5056 +#define PCI_DEVICE_ID_ATI_RAGE128_PW 0x5057 +#define PCI_DEVICE_ID_ATI_RAGE128_PX 0x5058 +/* Rage128 M4 */ +/* Radeon R100 */ +#define PCI_DEVICE_ID_ATI_RADEON_QD 0x5144 +#define PCI_DEVICE_ID_ATI_RADEON_QE 0x5145 +#define PCI_DEVICE_ID_ATI_RADEON_QF 0x5146 +#define PCI_DEVICE_ID_ATI_RADEON_QG 0x5147 +/* Radeon RV100 (VE) */ +#define PCI_DEVICE_ID_ATI_RADEON_QY 0x5159 +#define PCI_DEVICE_ID_ATI_RADEON_QZ 0x515a +/* Radeon R200 (8500) */ +#define PCI_DEVICE_ID_ATI_RADEON_QL 0x514c +#define PCI_DEVICE_ID_ATI_RADEON_QN 0x514e +#define PCI_DEVICE_ID_ATI_RADEON_QO 0x514f +#define PCI_DEVICE_ID_ATI_RADEON_Ql 0x516c +#define PCI_DEVICE_ID_ATI_RADEON_BB 0x4242 +/* Radeon R200 (9100) */ +#define PCI_DEVICE_ID_ATI_RADEON_QM 0x514d +/* Radeon RV200 (7500) */ +#define PCI_DEVICE_ID_ATI_RADEON_QW 0x5157 +#define PCI_DEVICE_ID_ATI_RADEON_QX 0x5158 +/* Radeon NV-100 */ +/* Radeon RV250 (9000) */ +#define PCI_DEVICE_ID_ATI_RADEON_Id 0x4964 +#define PCI_DEVICE_ID_ATI_RADEON_Ie 0x4965 +#define PCI_DEVICE_ID_ATI_RADEON_If 0x4966 +#define PCI_DEVICE_ID_ATI_RADEON_Ig 0x4967 +/* Radeon RV280 (9200) */ +#define PCI_DEVICE_ID_ATI_RADEON_Ya 0x5961 +#define PCI_DEVICE_ID_ATI_RADEON_Yd 0x5964 +/* Radeon R300 (9500) */ +/* Radeon R300 (9700) */ +#define PCI_DEVICE_ID_ATI_RADEON_ND 0x4e44 +#define PCI_DEVICE_ID_ATI_RADEON_NE 0x4e45 +#define PCI_DEVICE_ID_ATI_RADEON_NF 0x4e46 +#define PCI_DEVICE_ID_ATI_RADEON_NG 0x4e47 +/* Radeon R350 (9800) */ +/* Radeon RV350 (9600) */ +/* Radeon M6 */ +#define PCI_DEVICE_ID_ATI_RADEON_LY 0x4c59 +#define PCI_DEVICE_ID_ATI_RADEON_LZ 0x4c5a +/* Radeon M7 */ +#define PCI_DEVICE_ID_ATI_RADEON_LW 0x4c57 +#define PCI_DEVICE_ID_ATI_RADEON_LX 0x4c58 +/* Radeon M9 */ +#define PCI_DEVICE_ID_ATI_RADEON_Ld 0x4c64 +#define PCI_DEVICE_ID_ATI_RADEON_Le 0x4c65 +#define PCI_DEVICE_ID_ATI_RADEON_Lf 0x4c66 +#define PCI_DEVICE_ID_ATI_RADEON_Lg 0x4c67 +/* Radeon */ +/* RadeonIGP */ +#define PCI_DEVICE_ID_ATI_RS100 0xcab0 +#define PCI_DEVICE_ID_ATI_RS200 0xcab2 +#define PCI_DEVICE_ID_ATI_RS200_B 0xcbb2 +#define PCI_DEVICE_ID_ATI_RS250 0xcab3 +#define PCI_DEVICE_ID_ATI_RS300_100 0x5830 +#define PCI_DEVICE_ID_ATI_RS300_133 0x5831 +#define PCI_DEVICE_ID_ATI_RS300_166 0x5832 +#define PCI_DEVICE_ID_ATI_RS300_200 0x5833 +#define PCI_DEVICE_ID_ATI_RS350_100 0x7830 +#define PCI_DEVICE_ID_ATI_RS350_133 0x7831 +#define PCI_DEVICE_ID_ATI_RS350_166 0x7832 +#define PCI_DEVICE_ID_ATI_RS350_200 0x7833 +#define PCI_DEVICE_ID_ATI_RS400_100 0x5a30 +#define PCI_DEVICE_ID_ATI_RS400_133 0x5a31 +#define PCI_DEVICE_ID_ATI_RS400_166 0x5a32 +#define PCI_DEVICE_ID_ATI_RS400_200 0x5a33 +#define PCI_DEVICE_ID_ATI_RS480 0x5950 +/* ATI IXP Chipset */ +#define PCI_DEVICE_ID_ATI_IXP200_IDE 0x4349 +#define PCI_DEVICE_ID_ATI_IXP200_SMBUS 0x4353 +#define PCI_DEVICE_ID_ATI_IXP300_SMBUS 0x4363 +#define PCI_DEVICE_ID_ATI_IXP300_IDE 0x4369 +#define PCI_DEVICE_ID_ATI_IXP300_SATA 0x436e +#define PCI_DEVICE_ID_ATI_IXP400_SMBUS 0x4372 +#define PCI_DEVICE_ID_ATI_IXP400_IDE 0x4376 +#define PCI_DEVICE_ID_ATI_IXP400_SATA 0x4379 +#define PCI_DEVICE_ID_ATI_IXP400_SATA2 0x437a +#define PCI_DEVICE_ID_ATI_IXP600_SATA 0x4380 +#define PCI_DEVICE_ID_ATI_SBX00_SMBUS 0x4385 +#define PCI_DEVICE_ID_ATI_IXP600_IDE 0x438c +#define PCI_DEVICE_ID_ATI_IXP700_SATA 0x4390 +#define PCI_DEVICE_ID_ATI_IXP700_IDE 0x439c + +#define PCI_VENDOR_ID_VLSI 0x1004 +#define PCI_DEVICE_ID_VLSI_82C592 0x0005 +#define PCI_DEVICE_ID_VLSI_82C593 0x0006 +#define PCI_DEVICE_ID_VLSI_82C594 0x0007 +#define PCI_DEVICE_ID_VLSI_82C597 0x0009 +#define PCI_DEVICE_ID_VLSI_82C541 0x000c +#define PCI_DEVICE_ID_VLSI_82C543 0x000d +#define PCI_DEVICE_ID_VLSI_82C532 0x0101 +#define PCI_DEVICE_ID_VLSI_82C534 0x0102 +#define PCI_DEVICE_ID_VLSI_82C535 0x0104 +#define PCI_DEVICE_ID_VLSI_82C147 0x0105 +#define PCI_DEVICE_ID_VLSI_VAS96011 0x0702 + +/* AMD RD890 Chipset */ +#define PCI_DEVICE_ID_RD890_IOMMU 0x5a23 + +#define PCI_VENDOR_ID_ADL 0x1005 +#define PCI_DEVICE_ID_ADL_2301 0x2301 + +#define PCI_VENDOR_ID_NS 0x100b +#define PCI_DEVICE_ID_NS_87415 0x0002 +#define PCI_DEVICE_ID_NS_87560_LIO 0x000e +#define PCI_DEVICE_ID_NS_87560_USB 0x0012 +#define PCI_DEVICE_ID_NS_83815 0x0020 +#define PCI_DEVICE_ID_NS_83820 0x0022 +#define PCI_DEVICE_ID_NS_CS5535_ISA 0x002b +#define PCI_DEVICE_ID_NS_CS5535_IDE 0x002d +#define PCI_DEVICE_ID_NS_CS5535_AUDIO 0x002e +#define PCI_DEVICE_ID_NS_CS5535_USB 0x002f +#define PCI_DEVICE_ID_NS_GX_VIDEO 0x0030 +#define PCI_DEVICE_ID_NS_SATURN 0x0035 +#define PCI_DEVICE_ID_NS_SCx200_BRIDGE 0x0500 +#define PCI_DEVICE_ID_NS_SCx200_SMI 0x0501 +#define PCI_DEVICE_ID_NS_SCx200_IDE 0x0502 +#define PCI_DEVICE_ID_NS_SCx200_AUDIO 0x0503 +#define PCI_DEVICE_ID_NS_SCx200_VIDEO 0x0504 +#define PCI_DEVICE_ID_NS_SCx200_XBUS 0x0505 +#define PCI_DEVICE_ID_NS_SC1100_BRIDGE 0x0510 +#define PCI_DEVICE_ID_NS_SC1100_SMI 0x0511 +#define PCI_DEVICE_ID_NS_SC1100_XBUS 0x0515 +#define PCI_DEVICE_ID_NS_87410 0xd001 + +#define PCI_DEVICE_ID_NS_GX_HOST_BRIDGE 0x0028 + +#define PCI_VENDOR_ID_TSENG 0x100c +#define PCI_DEVICE_ID_TSENG_W32P_2 0x3202 +#define PCI_DEVICE_ID_TSENG_W32P_b 0x3205 +#define PCI_DEVICE_ID_TSENG_W32P_c 0x3206 +#define PCI_DEVICE_ID_TSENG_W32P_d 0x3207 +#define PCI_DEVICE_ID_TSENG_ET6000 0x3208 + +#define PCI_VENDOR_ID_WEITEK 0x100e +#define PCI_DEVICE_ID_WEITEK_P9000 0x9001 +#define PCI_DEVICE_ID_WEITEK_P9100 0x9100 + +#define PCI_VENDOR_ID_DEC 0x1011 +#define PCI_DEVICE_ID_DEC_BRD 0x0001 +#define PCI_DEVICE_ID_DEC_TULIP 0x0002 +#define PCI_DEVICE_ID_DEC_TGA 0x0004 +#define PCI_DEVICE_ID_DEC_TULIP_FAST 0x0009 +#define PCI_DEVICE_ID_DEC_TGA2 0x000D +#define PCI_DEVICE_ID_DEC_FDDI 0x000F +#define PCI_DEVICE_ID_DEC_TULIP_PLUS 0x0014 +#define PCI_DEVICE_ID_DEC_21142 0x0019 +#define PCI_DEVICE_ID_DEC_21052 0x0021 +#define PCI_DEVICE_ID_DEC_21150 0x0022 +#define PCI_DEVICE_ID_DEC_21152 0x0024 +#define PCI_DEVICE_ID_DEC_21153 0x0025 +#define PCI_DEVICE_ID_DEC_21154 0x0026 +#define PCI_DEVICE_ID_DEC_21285 0x1065 +#define PCI_DEVICE_ID_COMPAQ_42XX 0x0046 + +#define PCI_VENDOR_ID_CIRRUS 0x1013 +#define PCI_DEVICE_ID_CIRRUS_7548 0x0038 +#define PCI_DEVICE_ID_CIRRUS_5430 0x00a0 +#define PCI_DEVICE_ID_CIRRUS_5434_4 0x00a4 +#define PCI_DEVICE_ID_CIRRUS_5434_8 0x00a8 +#define PCI_DEVICE_ID_CIRRUS_5436 0x00ac +#define PCI_DEVICE_ID_CIRRUS_5446 0x00b8 +#define PCI_DEVICE_ID_CIRRUS_5480 0x00bc +#define PCI_DEVICE_ID_CIRRUS_5462 0x00d0 +#define PCI_DEVICE_ID_CIRRUS_5464 0x00d4 +#define PCI_DEVICE_ID_CIRRUS_5465 0x00d6 +#define PCI_DEVICE_ID_CIRRUS_6729 0x1100 +#define PCI_DEVICE_ID_CIRRUS_6832 0x1110 +#define PCI_DEVICE_ID_CIRRUS_7543 0x1202 +#define PCI_DEVICE_ID_CIRRUS_4610 0x6001 +#define PCI_DEVICE_ID_CIRRUS_4612 0x6003 +#define PCI_DEVICE_ID_CIRRUS_4615 0x6004 + +#define PCI_VENDOR_ID_IBM 0x1014 +#define PCI_DEVICE_ID_IBM_TR 0x0018 +#define PCI_DEVICE_ID_IBM_TR_WAKE 0x003e +#define PCI_DEVICE_ID_IBM_CPC710_PCI64 0x00fc +#define PCI_DEVICE_ID_IBM_SNIPE 0x0180 +#define PCI_DEVICE_ID_IBM_CITRINE 0x028C +#define PCI_DEVICE_ID_IBM_GEMSTONE 0xB166 +#define PCI_DEVICE_ID_IBM_OBSIDIAN 0x02BD +#define PCI_DEVICE_ID_IBM_ICOM_DEV_ID_1 0x0031 +#define PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2 0x0219 +#define PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX 0x021A +#define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM 0x0251 +#define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE 0x0361 +#define PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL 0x252 + +#define PCI_SUBVENDOR_ID_IBM 0x1014 +#define PCI_SUBDEVICE_ID_IBM_SATURN_SERIAL_ONE_PORT 0x03d4 + +#define PCI_VENDOR_ID_UNISYS 0x1018 +#define PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR 0x001C + +#define PCI_VENDOR_ID_COMPEX2 0x101a /* pci.ids says "AT&T GIS (NCR)" */ +#define PCI_DEVICE_ID_COMPEX2_100VG 0x0005 + +#define PCI_VENDOR_ID_WD 0x101c +#define PCI_DEVICE_ID_WD_90C 0xc24a + +#define PCI_VENDOR_ID_AMI 0x101e +#define PCI_DEVICE_ID_AMI_MEGARAID3 0x1960 +#define PCI_DEVICE_ID_AMI_MEGARAID 0x9010 +#define PCI_DEVICE_ID_AMI_MEGARAID2 0x9060 + +#define PCI_VENDOR_ID_AMD 0x1022 +#define PCI_DEVICE_ID_AMD_K8_NB 0x1100 +#define PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP 0x1101 +#define PCI_DEVICE_ID_AMD_K8_NB_MEMCTL 0x1102 +#define PCI_DEVICE_ID_AMD_K8_NB_MISC 0x1103 +#define PCI_DEVICE_ID_AMD_10H_NB_HT 0x1200 +#define PCI_DEVICE_ID_AMD_10H_NB_MAP 0x1201 +#define PCI_DEVICE_ID_AMD_10H_NB_DRAM 0x1202 +#define PCI_DEVICE_ID_AMD_10H_NB_MISC 0x1203 +#define PCI_DEVICE_ID_AMD_10H_NB_LINK 0x1204 +#define PCI_DEVICE_ID_AMD_11H_NB_HT 0x1300 +#define PCI_DEVICE_ID_AMD_11H_NB_MAP 0x1301 +#define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302 +#define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303 +#define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304 +#define PCI_DEVICE_ID_AMD_15H_M10H_F3 0x1403 +#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F3 0x141d +#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F4 0x141e +#define PCI_DEVICE_ID_AMD_15H_M60H_NB_F3 0x1573 +#define PCI_DEVICE_ID_AMD_15H_M60H_NB_F4 0x1574 +#define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600 +#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601 +#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602 +#define PCI_DEVICE_ID_AMD_15H_NB_F3 0x1603 +#define PCI_DEVICE_ID_AMD_15H_NB_F4 0x1604 +#define PCI_DEVICE_ID_AMD_15H_NB_F5 0x1605 +#define PCI_DEVICE_ID_AMD_16H_NB_F3 0x1533 +#define PCI_DEVICE_ID_AMD_16H_NB_F4 0x1534 +#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F3 0x1583 +#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F4 0x1584 +#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 +#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb +#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493 +#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443 +#define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653 +#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 +#define PCI_DEVICE_ID_AMD_LANCE 0x2000 +#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 +#define PCI_DEVICE_ID_AMD_SCSI 0x2020 +#define PCI_DEVICE_ID_AMD_SERENADE 0x36c0 +#define PCI_DEVICE_ID_AMD_FE_GATE_7006 0x7006 +#define PCI_DEVICE_ID_AMD_FE_GATE_7007 0x7007 +#define PCI_DEVICE_ID_AMD_FE_GATE_700C 0x700C +#define PCI_DEVICE_ID_AMD_FE_GATE_700E 0x700E +#define PCI_DEVICE_ID_AMD_COBRA_7401 0x7401 +#define PCI_DEVICE_ID_AMD_VIPER_7409 0x7409 +#define PCI_DEVICE_ID_AMD_VIPER_740B 0x740B +#define PCI_DEVICE_ID_AMD_VIPER_7410 0x7410 +#define PCI_DEVICE_ID_AMD_VIPER_7411 0x7411 +#define PCI_DEVICE_ID_AMD_VIPER_7413 0x7413 +#define PCI_DEVICE_ID_AMD_VIPER_7440 0x7440 +#define PCI_DEVICE_ID_AMD_OPUS_7441 0x7441 +#define PCI_DEVICE_ID_AMD_OPUS_7443 0x7443 +#define PCI_DEVICE_ID_AMD_VIPER_7443 0x7443 +#define PCI_DEVICE_ID_AMD_OPUS_7445 0x7445 +#define PCI_DEVICE_ID_AMD_GOLAM_7450 0x7450 +#define PCI_DEVICE_ID_AMD_8111_PCI 0x7460 +#define PCI_DEVICE_ID_AMD_8111_LPC 0x7468 +#define PCI_DEVICE_ID_AMD_8111_IDE 0x7469 +#define PCI_DEVICE_ID_AMD_8111_SMBUS2 0x746a +#define PCI_DEVICE_ID_AMD_8111_SMBUS 0x746b +#define PCI_DEVICE_ID_AMD_8111_AUDIO 0x746d +#define PCI_DEVICE_ID_AMD_8151_0 0x7454 +#define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450 +#define PCI_DEVICE_ID_AMD_8131_APIC 0x7451 +#define PCI_DEVICE_ID_AMD_8132_BRIDGE 0x7458 +#define PCI_DEVICE_ID_AMD_NL_USB 0x7912 +#define PCI_DEVICE_ID_AMD_CS5535_IDE 0x208F +#define PCI_DEVICE_ID_AMD_CS5536_ISA 0x2090 +#define PCI_DEVICE_ID_AMD_CS5536_FLASH 0x2091 +#define PCI_DEVICE_ID_AMD_CS5536_AUDIO 0x2093 +#define PCI_DEVICE_ID_AMD_CS5536_OHC 0x2094 +#define PCI_DEVICE_ID_AMD_CS5536_EHC 0x2095 +#define PCI_DEVICE_ID_AMD_CS5536_UDC 0x2096 +#define PCI_DEVICE_ID_AMD_CS5536_UOC 0x2097 +#define PCI_DEVICE_ID_AMD_CS5536_DEV_IDE 0x2092 +#define PCI_DEVICE_ID_AMD_CS5536_IDE 0x209A +#define PCI_DEVICE_ID_AMD_LX_VIDEO 0x2081 +#define PCI_DEVICE_ID_AMD_LX_AES 0x2082 +#define PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE 0x7800 +#define PCI_DEVICE_ID_AMD_HUDSON2_SMBUS 0x780b +#define PCI_DEVICE_ID_AMD_HUDSON2_IDE 0x780c +#define PCI_DEVICE_ID_AMD_KERNCZ_SMBUS 0x790b + +#define PCI_VENDOR_ID_TRIDENT 0x1023 +#define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX 0x2000 +#define PCI_DEVICE_ID_TRIDENT_4DWAVE_NX 0x2001 +#define PCI_DEVICE_ID_TRIDENT_9320 0x9320 +#define PCI_DEVICE_ID_TRIDENT_9388 0x9388 +#define PCI_DEVICE_ID_TRIDENT_9397 0x9397 +#define PCI_DEVICE_ID_TRIDENT_939A 0x939A +#define PCI_DEVICE_ID_TRIDENT_9520 0x9520 +#define PCI_DEVICE_ID_TRIDENT_9525 0x9525 +#define PCI_DEVICE_ID_TRIDENT_9420 0x9420 +#define PCI_DEVICE_ID_TRIDENT_9440 0x9440 +#define PCI_DEVICE_ID_TRIDENT_9660 0x9660 +#define PCI_DEVICE_ID_TRIDENT_9750 0x9750 +#define PCI_DEVICE_ID_TRIDENT_9850 0x9850 +#define PCI_DEVICE_ID_TRIDENT_9880 0x9880 +#define PCI_DEVICE_ID_TRIDENT_8400 0x8400 +#define PCI_DEVICE_ID_TRIDENT_8420 0x8420 +#define PCI_DEVICE_ID_TRIDENT_8500 0x8500 + +#define PCI_VENDOR_ID_AI 0x1025 +#define PCI_DEVICE_ID_AI_M1435 0x1435 + +#define PCI_VENDOR_ID_DELL 0x1028 +#define PCI_DEVICE_ID_DELL_RACIII 0x0008 +#define PCI_DEVICE_ID_DELL_RAC4 0x0012 +#define PCI_DEVICE_ID_DELL_PERC5 0x0015 + +#define PCI_VENDOR_ID_MATROX 0x102B +#define PCI_DEVICE_ID_MATROX_MGA_2 0x0518 +#define PCI_DEVICE_ID_MATROX_MIL 0x0519 +#define PCI_DEVICE_ID_MATROX_MYS 0x051A +#define PCI_DEVICE_ID_MATROX_MIL_2 0x051b +#define PCI_DEVICE_ID_MATROX_MYS_AGP 0x051e +#define PCI_DEVICE_ID_MATROX_MIL_2_AGP 0x051f +#define PCI_DEVICE_ID_MATROX_MGA_IMP 0x0d10 +#define PCI_DEVICE_ID_MATROX_G100_MM 0x1000 +#define PCI_DEVICE_ID_MATROX_G100_AGP 0x1001 +#define PCI_DEVICE_ID_MATROX_G200_PCI 0x0520 +#define PCI_DEVICE_ID_MATROX_G200_AGP 0x0521 +#define PCI_DEVICE_ID_MATROX_G400 0x0525 +#define PCI_DEVICE_ID_MATROX_G200EV_PCI 0x0530 +#define PCI_DEVICE_ID_MATROX_G550 0x2527 +#define PCI_DEVICE_ID_MATROX_VIA 0x4536 + +#define PCI_VENDOR_ID_MOBILITY_ELECTRONICS 0x14f2 + +#define PCI_VENDOR_ID_CT 0x102c +#define PCI_DEVICE_ID_CT_69000 0x00c0 +#define PCI_DEVICE_ID_CT_65545 0x00d8 +#define PCI_DEVICE_ID_CT_65548 0x00dc +#define PCI_DEVICE_ID_CT_65550 0x00e0 +#define PCI_DEVICE_ID_CT_65554 0x00e4 +#define PCI_DEVICE_ID_CT_65555 0x00e5 + +#define PCI_VENDOR_ID_MIRO 0x1031 +#define PCI_DEVICE_ID_MIRO_36050 0x5601 +#define PCI_DEVICE_ID_MIRO_DC10PLUS 0x7efe +#define PCI_DEVICE_ID_MIRO_DC30PLUS 0xd801 + +#define PCI_VENDOR_ID_NEC 0x1033 +#define PCI_DEVICE_ID_NEC_CBUS_1 0x0001 /* PCI-Cbus Bridge */ +#define PCI_DEVICE_ID_NEC_LOCAL 0x0002 /* Local Bridge */ +#define PCI_DEVICE_ID_NEC_ATM 0x0003 /* ATM LAN Controller */ +#define PCI_DEVICE_ID_NEC_R4000 0x0004 /* R4000 Bridge */ +#define PCI_DEVICE_ID_NEC_486 0x0005 /* 486 Like Peripheral Bus Bridge */ +#define PCI_DEVICE_ID_NEC_ACCEL_1 0x0006 /* Graphic Accelerator */ +#define PCI_DEVICE_ID_NEC_UXBUS 0x0007 /* UX-Bus Bridge */ +#define PCI_DEVICE_ID_NEC_ACCEL_2 0x0008 /* Graphic Accelerator */ +#define PCI_DEVICE_ID_NEC_GRAPH 0x0009 /* PCI-CoreGraph Bridge */ +#define PCI_DEVICE_ID_NEC_VL 0x0016 /* PCI-VL Bridge */ +#define PCI_DEVICE_ID_NEC_STARALPHA2 0x002c /* STAR ALPHA2 */ +#define PCI_DEVICE_ID_NEC_CBUS_2 0x002d /* PCI-Cbus Bridge */ +#define PCI_DEVICE_ID_NEC_USB 0x0035 /* PCI-USB Host */ +#define PCI_DEVICE_ID_NEC_CBUS_3 0x003b +#define PCI_DEVICE_ID_NEC_NAPCCARD 0x003e +#define PCI_DEVICE_ID_NEC_PCX2 0x0046 /* PowerVR */ +#define PCI_DEVICE_ID_NEC_VRC5476 0x009b +#define PCI_DEVICE_ID_NEC_VRC4173 0x00a5 +#define PCI_DEVICE_ID_NEC_VRC5477_AC97 0x00a6 +#define PCI_DEVICE_ID_NEC_PC9821CS01 0x800c /* PC-9821-CS01 */ +#define PCI_DEVICE_ID_NEC_PC9821NRB06 0x800d /* PC-9821NR-B06 */ + +#define PCI_VENDOR_ID_FD 0x1036 +#define PCI_DEVICE_ID_FD_36C70 0x0000 + +#define PCI_VENDOR_ID_SI 0x1039 +#define PCI_DEVICE_ID_SI_5591_AGP 0x0001 +#define PCI_DEVICE_ID_SI_6202 0x0002 +#define PCI_DEVICE_ID_SI_503 0x0008 +#define PCI_DEVICE_ID_SI_ACPI 0x0009 +#define PCI_DEVICE_ID_SI_SMBUS 0x0016 +#define PCI_DEVICE_ID_SI_LPC 0x0018 +#define PCI_DEVICE_ID_SI_5597_VGA 0x0200 +#define PCI_DEVICE_ID_SI_6205 0x0205 +#define PCI_DEVICE_ID_SI_501 0x0406 +#define PCI_DEVICE_ID_SI_496 0x0496 +#define PCI_DEVICE_ID_SI_300 0x0300 +#define PCI_DEVICE_ID_SI_315H 0x0310 +#define PCI_DEVICE_ID_SI_315 0x0315 +#define PCI_DEVICE_ID_SI_315PRO 0x0325 +#define PCI_DEVICE_ID_SI_530 0x0530 +#define PCI_DEVICE_ID_SI_540 0x0540 +#define PCI_DEVICE_ID_SI_550 0x0550 +#define PCI_DEVICE_ID_SI_540_VGA 0x5300 +#define PCI_DEVICE_ID_SI_550_VGA 0x5315 +#define PCI_DEVICE_ID_SI_620 0x0620 +#define PCI_DEVICE_ID_SI_630 0x0630 +#define PCI_DEVICE_ID_SI_633 0x0633 +#define PCI_DEVICE_ID_SI_635 0x0635 +#define PCI_DEVICE_ID_SI_640 0x0640 +#define PCI_DEVICE_ID_SI_645 0x0645 +#define PCI_DEVICE_ID_SI_646 0x0646 +#define PCI_DEVICE_ID_SI_648 0x0648 +#define PCI_DEVICE_ID_SI_650 0x0650 +#define PCI_DEVICE_ID_SI_651 0x0651 +#define PCI_DEVICE_ID_SI_655 0x0655 +#define PCI_DEVICE_ID_SI_661 0x0661 +#define PCI_DEVICE_ID_SI_730 0x0730 +#define PCI_DEVICE_ID_SI_733 0x0733 +#define PCI_DEVICE_ID_SI_630_VGA 0x6300 +#define PCI_DEVICE_ID_SI_735 0x0735 +#define PCI_DEVICE_ID_SI_740 0x0740 +#define PCI_DEVICE_ID_SI_741 0x0741 +#define PCI_DEVICE_ID_SI_745 0x0745 +#define PCI_DEVICE_ID_SI_746 0x0746 +#define PCI_DEVICE_ID_SI_755 0x0755 +#define PCI_DEVICE_ID_SI_760 0x0760 +#define PCI_DEVICE_ID_SI_900 0x0900 +#define PCI_DEVICE_ID_SI_961 0x0961 +#define PCI_DEVICE_ID_SI_962 0x0962 +#define PCI_DEVICE_ID_SI_963 0x0963 +#define PCI_DEVICE_ID_SI_965 0x0965 +#define PCI_DEVICE_ID_SI_966 0x0966 +#define PCI_DEVICE_ID_SI_968 0x0968 +#define PCI_DEVICE_ID_SI_1180 0x1180 +#define PCI_DEVICE_ID_SI_5511 0x5511 +#define PCI_DEVICE_ID_SI_5513 0x5513 +#define PCI_DEVICE_ID_SI_5517 0x5517 +#define PCI_DEVICE_ID_SI_5518 0x5518 +#define PCI_DEVICE_ID_SI_5571 0x5571 +#define PCI_DEVICE_ID_SI_5581 0x5581 +#define PCI_DEVICE_ID_SI_5582 0x5582 +#define PCI_DEVICE_ID_SI_5591 0x5591 +#define PCI_DEVICE_ID_SI_5596 0x5596 +#define PCI_DEVICE_ID_SI_5597 0x5597 +#define PCI_DEVICE_ID_SI_5598 0x5598 +#define PCI_DEVICE_ID_SI_5600 0x5600 +#define PCI_DEVICE_ID_SI_7012 0x7012 +#define PCI_DEVICE_ID_SI_7013 0x7013 +#define PCI_DEVICE_ID_SI_7016 0x7016 +#define PCI_DEVICE_ID_SI_7018 0x7018 + +#define PCI_VENDOR_ID_HP 0x103c +#define PCI_VENDOR_ID_HP_3PAR 0x1590 +#define PCI_DEVICE_ID_HP_VISUALIZE_EG 0x1005 +#define PCI_DEVICE_ID_HP_VISUALIZE_FX6 0x1006 +#define PCI_DEVICE_ID_HP_VISUALIZE_FX4 0x1008 +#define PCI_DEVICE_ID_HP_VISUALIZE_FX2 0x100a +#define PCI_DEVICE_ID_HP_TACHYON 0x1028 +#define PCI_DEVICE_ID_HP_TACHLITE 0x1029 +#define PCI_DEVICE_ID_HP_J2585A 0x1030 +#define PCI_DEVICE_ID_HP_J2585B 0x1031 +#define PCI_DEVICE_ID_HP_J2973A 0x1040 +#define PCI_DEVICE_ID_HP_J2970A 0x1042 +#define PCI_DEVICE_ID_HP_DIVA 0x1048 +#define PCI_DEVICE_ID_HP_DIVA_TOSCA1 0x1049 +#define PCI_DEVICE_ID_HP_DIVA_TOSCA2 0x104A +#define PCI_DEVICE_ID_HP_DIVA_MAESTRO 0x104B +#define PCI_DEVICE_ID_HP_REO_IOC 0x10f1 +#define PCI_DEVICE_ID_HP_VISUALIZE_FXE 0x108b +#define PCI_DEVICE_ID_HP_DIVA_HALFDOME 0x1223 +#define PCI_DEVICE_ID_HP_DIVA_KEYSTONE 0x1226 +#define PCI_DEVICE_ID_HP_DIVA_POWERBAR 0x1227 +#define PCI_DEVICE_ID_HP_ZX1_IOC 0x122a +#define PCI_DEVICE_ID_HP_PCIX_LBA 0x122e +#define PCI_DEVICE_ID_HP_SX1000_IOC 0x127c +#define PCI_DEVICE_ID_HP_DIVA_EVEREST 0x1282 +#define PCI_DEVICE_ID_HP_DIVA_AUX 0x1290 +#define PCI_DEVICE_ID_HP_DIVA_RMP3 0x1301 +#define PCI_DEVICE_ID_HP_DIVA_HURRICANE 0x132a +#define PCI_DEVICE_ID_HP_CISSA 0x3220 +#define PCI_DEVICE_ID_HP_CISSC 0x3230 +#define PCI_DEVICE_ID_HP_CISSD 0x3238 +#define PCI_DEVICE_ID_HP_CISSE 0x323a +#define PCI_DEVICE_ID_HP_CISSF 0x323b +#define PCI_DEVICE_ID_HP_CISSH 0x323c +#define PCI_DEVICE_ID_HP_CISSI 0x3239 +#define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031 + +#define PCI_VENDOR_ID_PCTECH 0x1042 +#define PCI_DEVICE_ID_PCTECH_RZ1000 0x1000 +#define PCI_DEVICE_ID_PCTECH_RZ1001 0x1001 +#define PCI_DEVICE_ID_PCTECH_SAMURAI_IDE 0x3020 + +#define PCI_VENDOR_ID_ASUSTEK 0x1043 +#define PCI_DEVICE_ID_ASUSTEK_0675 0x0675 + +#define PCI_VENDOR_ID_DPT 0x1044 +#define PCI_DEVICE_ID_DPT 0xa400 + +#define PCI_VENDOR_ID_OPTI 0x1045 +#define PCI_DEVICE_ID_OPTI_82C558 0xc558 +#define PCI_DEVICE_ID_OPTI_82C621 0xc621 +#define PCI_DEVICE_ID_OPTI_82C700 0xc700 +#define PCI_DEVICE_ID_OPTI_82C825 0xd568 + +#define PCI_VENDOR_ID_ELSA 0x1048 +#define PCI_DEVICE_ID_ELSA_MICROLINK 0x1000 +#define PCI_DEVICE_ID_ELSA_QS3000 0x3000 + +#define PCI_VENDOR_ID_STMICRO 0x104A +#define PCI_DEVICE_ID_STMICRO_USB_HOST 0xCC00 +#define PCI_DEVICE_ID_STMICRO_USB_OHCI 0xCC01 +#define PCI_DEVICE_ID_STMICRO_USB_OTG 0xCC02 +#define PCI_DEVICE_ID_STMICRO_UART_HWFC 0xCC03 +#define PCI_DEVICE_ID_STMICRO_UART_NO_HWFC 0xCC04 +#define PCI_DEVICE_ID_STMICRO_SOC_DMA 0xCC05 +#define PCI_DEVICE_ID_STMICRO_SATA 0xCC06 +#define PCI_DEVICE_ID_STMICRO_I2C 0xCC07 +#define PCI_DEVICE_ID_STMICRO_SPI_HS 0xCC08 +#define PCI_DEVICE_ID_STMICRO_MAC 0xCC09 +#define PCI_DEVICE_ID_STMICRO_SDIO_EMMC 0xCC0A +#define PCI_DEVICE_ID_STMICRO_SDIO 0xCC0B +#define PCI_DEVICE_ID_STMICRO_GPIO 0xCC0C +#define PCI_DEVICE_ID_STMICRO_VIP 0xCC0D +#define PCI_DEVICE_ID_STMICRO_AUDIO_ROUTER_DMA 0xCC0E +#define PCI_DEVICE_ID_STMICRO_AUDIO_ROUTER_SRCS 0xCC0F +#define PCI_DEVICE_ID_STMICRO_AUDIO_ROUTER_MSPS 0xCC10 +#define PCI_DEVICE_ID_STMICRO_CAN 0xCC11 +#define PCI_DEVICE_ID_STMICRO_MLB 0xCC12 +#define PCI_DEVICE_ID_STMICRO_DBP 0xCC13 +#define PCI_DEVICE_ID_STMICRO_SATA_PHY 0xCC14 +#define PCI_DEVICE_ID_STMICRO_ESRAM 0xCC15 +#define PCI_DEVICE_ID_STMICRO_VIC 0xCC16 + +#define PCI_VENDOR_ID_BUSLOGIC 0x104B +#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC 0x0140 +#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER 0x1040 +#define PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT 0x8130 + +#define PCI_VENDOR_ID_TI 0x104c +#define PCI_DEVICE_ID_TI_TVP4020 0x3d07 +#define PCI_DEVICE_ID_TI_4450 0x8011 +#define PCI_DEVICE_ID_TI_XX21_XX11 0x8031 +#define PCI_DEVICE_ID_TI_XX21_XX11_FM 0x8033 +#define PCI_DEVICE_ID_TI_XX21_XX11_SD 0x8034 +#define PCI_DEVICE_ID_TI_X515 0x8036 +#define PCI_DEVICE_ID_TI_XX12 0x8039 +#define PCI_DEVICE_ID_TI_XX12_FM 0x803b +#define PCI_DEVICE_ID_TI_XIO2000A 0x8231 +#define PCI_DEVICE_ID_TI_1130 0xac12 +#define PCI_DEVICE_ID_TI_1031 0xac13 +#define PCI_DEVICE_ID_TI_1131 0xac15 +#define PCI_DEVICE_ID_TI_1250 0xac16 +#define PCI_DEVICE_ID_TI_1220 0xac17 +#define PCI_DEVICE_ID_TI_1221 0xac19 +#define PCI_DEVICE_ID_TI_1210 0xac1a +#define PCI_DEVICE_ID_TI_1450 0xac1b +#define PCI_DEVICE_ID_TI_1225 0xac1c +#define PCI_DEVICE_ID_TI_1251A 0xac1d +#define PCI_DEVICE_ID_TI_1211 0xac1e +#define PCI_DEVICE_ID_TI_1251B 0xac1f +#define PCI_DEVICE_ID_TI_4410 0xac41 +#define PCI_DEVICE_ID_TI_4451 0xac42 +#define PCI_DEVICE_ID_TI_4510 0xac44 +#define PCI_DEVICE_ID_TI_4520 0xac46 +#define PCI_DEVICE_ID_TI_7510 0xac47 +#define PCI_DEVICE_ID_TI_7610 0xac48 +#define PCI_DEVICE_ID_TI_7410 0xac49 +#define PCI_DEVICE_ID_TI_1410 0xac50 +#define PCI_DEVICE_ID_TI_1420 0xac51 +#define PCI_DEVICE_ID_TI_1451A 0xac52 +#define PCI_DEVICE_ID_TI_1620 0xac54 +#define PCI_DEVICE_ID_TI_1520 0xac55 +#define PCI_DEVICE_ID_TI_1510 0xac56 +#define PCI_DEVICE_ID_TI_X620 0xac8d +#define PCI_DEVICE_ID_TI_X420 0xac8e +#define PCI_DEVICE_ID_TI_XX20_FM 0xac8f +#define PCI_DEVICE_ID_TI_DRA74x 0xb500 +#define PCI_DEVICE_ID_TI_DRA72x 0xb501 + +#define PCI_VENDOR_ID_SONY 0x104d + +/* Winbond have two vendor IDs! See 0x10ad as well */ +#define PCI_VENDOR_ID_WINBOND2 0x1050 +#define PCI_DEVICE_ID_WINBOND2_89C940F 0x5a5a +#define PCI_DEVICE_ID_WINBOND2_6692 0x6692 + +#define PCI_VENDOR_ID_ANIGMA 0x1051 +#define PCI_DEVICE_ID_ANIGMA_MC145575 0x0100 + +#define PCI_VENDOR_ID_EFAR 0x1055 +#define PCI_DEVICE_ID_EFAR_SLC90E66_1 0x9130 +#define PCI_DEVICE_ID_EFAR_SLC90E66_3 0x9463 + +#define PCI_VENDOR_ID_MOTOROLA 0x1057 +#define PCI_DEVICE_ID_MOTOROLA_MPC105 0x0001 +#define PCI_DEVICE_ID_MOTOROLA_MPC106 0x0002 +#define PCI_DEVICE_ID_MOTOROLA_MPC107 0x0004 +#define PCI_DEVICE_ID_MOTOROLA_RAVEN 0x4801 +#define PCI_DEVICE_ID_MOTOROLA_FALCON 0x4802 +#define PCI_DEVICE_ID_MOTOROLA_HAWK 0x4803 +#define PCI_DEVICE_ID_MOTOROLA_HARRIER 0x480b +#define PCI_DEVICE_ID_MOTOROLA_MPC5200 0x5803 +#define PCI_DEVICE_ID_MOTOROLA_MPC5200B 0x5809 + +#define PCI_VENDOR_ID_PROMISE 0x105a +#define PCI_DEVICE_ID_PROMISE_20265 0x0d30 +#define PCI_DEVICE_ID_PROMISE_20267 0x4d30 +#define PCI_DEVICE_ID_PROMISE_20246 0x4d33 +#define PCI_DEVICE_ID_PROMISE_20262 0x4d38 +#define PCI_DEVICE_ID_PROMISE_20263 0x0D38 +#define PCI_DEVICE_ID_PROMISE_20268 0x4d68 +#define PCI_DEVICE_ID_PROMISE_20269 0x4d69 +#define PCI_DEVICE_ID_PROMISE_20270 0x6268 +#define PCI_DEVICE_ID_PROMISE_20271 0x6269 +#define PCI_DEVICE_ID_PROMISE_20275 0x1275 +#define PCI_DEVICE_ID_PROMISE_20276 0x5275 +#define PCI_DEVICE_ID_PROMISE_20277 0x7275 + +#define PCI_VENDOR_ID_FOXCONN 0x105b + +#define PCI_VENDOR_ID_UMC 0x1060 +#define PCI_DEVICE_ID_UMC_UM8673F 0x0101 +#define PCI_DEVICE_ID_UMC_UM8886BF 0x673a +#define PCI_DEVICE_ID_UMC_UM8886A 0x886a + +#define PCI_VENDOR_ID_PICOPOWER 0x1066 +#define PCI_DEVICE_ID_PICOPOWER_PT86C523 0x0002 +#define PCI_DEVICE_ID_PICOPOWER_PT86C523BBP 0x8002 + +#define PCI_VENDOR_ID_MYLEX 0x1069 +#define PCI_DEVICE_ID_MYLEX_DAC960_P 0x0001 +#define PCI_DEVICE_ID_MYLEX_DAC960_PD 0x0002 +#define PCI_DEVICE_ID_MYLEX_DAC960_PG 0x0010 +#define PCI_DEVICE_ID_MYLEX_DAC960_LA 0x0020 +#define PCI_DEVICE_ID_MYLEX_DAC960_LP 0x0050 +#define PCI_DEVICE_ID_MYLEX_DAC960_BA 0xBA56 +#define PCI_DEVICE_ID_MYLEX_DAC960_GEM 0xB166 + +#define PCI_VENDOR_ID_APPLE 0x106b +#define PCI_DEVICE_ID_APPLE_BANDIT 0x0001 +#define PCI_DEVICE_ID_APPLE_HYDRA 0x000e +#define PCI_DEVICE_ID_APPLE_UNI_N_FW 0x0018 +#define PCI_DEVICE_ID_APPLE_UNI_N_AGP 0x0020 +#define PCI_DEVICE_ID_APPLE_UNI_N_GMAC 0x0021 +#define PCI_DEVICE_ID_APPLE_UNI_N_GMACP 0x0024 +#define PCI_DEVICE_ID_APPLE_UNI_N_AGP_P 0x0027 +#define PCI_DEVICE_ID_APPLE_UNI_N_AGP15 0x002d +#define PCI_DEVICE_ID_APPLE_UNI_N_PCI15 0x002e +#define PCI_DEVICE_ID_APPLE_UNI_N_GMAC2 0x0032 +#define PCI_DEVICE_ID_APPLE_UNI_N_ATA 0x0033 +#define PCI_DEVICE_ID_APPLE_UNI_N_AGP2 0x0034 +#define PCI_DEVICE_ID_APPLE_IPID_ATA100 0x003b +#define PCI_DEVICE_ID_APPLE_K2_ATA100 0x0043 +#define PCI_DEVICE_ID_APPLE_U3_AGP 0x004b +#define PCI_DEVICE_ID_APPLE_K2_GMAC 0x004c +#define PCI_DEVICE_ID_APPLE_SH_ATA 0x0050 +#define PCI_DEVICE_ID_APPLE_SH_SUNGEM 0x0051 +#define PCI_DEVICE_ID_APPLE_U3L_AGP 0x0058 +#define PCI_DEVICE_ID_APPLE_U3H_AGP 0x0059 +#define PCI_DEVICE_ID_APPLE_U4_PCIE 0x005b +#define PCI_DEVICE_ID_APPLE_IPID2_AGP 0x0066 +#define PCI_DEVICE_ID_APPLE_IPID2_ATA 0x0069 +#define PCI_DEVICE_ID_APPLE_IPID2_FW 0x006a +#define PCI_DEVICE_ID_APPLE_IPID2_GMAC 0x006b +#define PCI_DEVICE_ID_APPLE_TIGON3 0x1645 + +#define PCI_VENDOR_ID_YAMAHA 0x1073 +#define PCI_DEVICE_ID_YAMAHA_724 0x0004 +#define PCI_DEVICE_ID_YAMAHA_724F 0x000d +#define PCI_DEVICE_ID_YAMAHA_740 0x000a +#define PCI_DEVICE_ID_YAMAHA_740C 0x000c +#define PCI_DEVICE_ID_YAMAHA_744 0x0010 +#define PCI_DEVICE_ID_YAMAHA_754 0x0012 + +#define PCI_VENDOR_ID_QLOGIC 0x1077 +#define PCI_DEVICE_ID_QLOGIC_ISP10160 0x1016 +#define PCI_DEVICE_ID_QLOGIC_ISP1020 0x1020 +#define PCI_DEVICE_ID_QLOGIC_ISP1080 0x1080 +#define PCI_DEVICE_ID_QLOGIC_ISP12160 0x1216 +#define PCI_DEVICE_ID_QLOGIC_ISP1240 0x1240 +#define PCI_DEVICE_ID_QLOGIC_ISP1280 0x1280 +#define PCI_DEVICE_ID_QLOGIC_ISP2100 0x2100 +#define PCI_DEVICE_ID_QLOGIC_ISP2200 0x2200 +#define PCI_DEVICE_ID_QLOGIC_ISP2300 0x2300 +#define PCI_DEVICE_ID_QLOGIC_ISP2312 0x2312 +#define PCI_DEVICE_ID_QLOGIC_ISP2322 0x2322 +#define PCI_DEVICE_ID_QLOGIC_ISP6312 0x6312 +#define PCI_DEVICE_ID_QLOGIC_ISP6322 0x6322 +#define PCI_DEVICE_ID_QLOGIC_ISP2422 0x2422 +#define PCI_DEVICE_ID_QLOGIC_ISP2432 0x2432 +#define PCI_DEVICE_ID_QLOGIC_ISP2512 0x2512 +#define PCI_DEVICE_ID_QLOGIC_ISP2522 0x2522 +#define PCI_DEVICE_ID_QLOGIC_ISP5422 0x5422 +#define PCI_DEVICE_ID_QLOGIC_ISP5432 0x5432 + +#define PCI_VENDOR_ID_CYRIX 0x1078 +#define PCI_DEVICE_ID_CYRIX_5510 0x0000 +#define PCI_DEVICE_ID_CYRIX_PCI_MASTER 0x0001 +#define PCI_DEVICE_ID_CYRIX_5520 0x0002 +#define PCI_DEVICE_ID_CYRIX_5530_LEGACY 0x0100 +#define PCI_DEVICE_ID_CYRIX_5530_IDE 0x0102 +#define PCI_DEVICE_ID_CYRIX_5530_AUDIO 0x0103 +#define PCI_DEVICE_ID_CYRIX_5530_VIDEO 0x0104 + +#define PCI_VENDOR_ID_CONTAQ 0x1080 +#define PCI_DEVICE_ID_CONTAQ_82C693 0xc693 + +#define PCI_VENDOR_ID_OLICOM 0x108d +#define PCI_DEVICE_ID_OLICOM_OC2325 0x0012 +#define PCI_DEVICE_ID_OLICOM_OC2183 0x0013 +#define PCI_DEVICE_ID_OLICOM_OC2326 0x0014 + +#define PCI_VENDOR_ID_SUN 0x108e +#define PCI_DEVICE_ID_SUN_EBUS 0x1000 +#define PCI_DEVICE_ID_SUN_HAPPYMEAL 0x1001 +#define PCI_DEVICE_ID_SUN_RIO_EBUS 0x1100 +#define PCI_DEVICE_ID_SUN_RIO_GEM 0x1101 +#define PCI_DEVICE_ID_SUN_RIO_1394 0x1102 +#define PCI_DEVICE_ID_SUN_RIO_USB 0x1103 +#define PCI_DEVICE_ID_SUN_GEM 0x2bad +#define PCI_DEVICE_ID_SUN_SIMBA 0x5000 +#define PCI_DEVICE_ID_SUN_PBM 0x8000 +#define PCI_DEVICE_ID_SUN_SCHIZO 0x8001 +#define PCI_DEVICE_ID_SUN_SABRE 0xa000 +#define PCI_DEVICE_ID_SUN_HUMMINGBIRD 0xa001 +#define PCI_DEVICE_ID_SUN_TOMATILLO 0xa801 +#define PCI_DEVICE_ID_SUN_CASSINI 0xabba + +#define PCI_VENDOR_ID_NI 0x1093 +#define PCI_DEVICE_ID_NI_PCI2322 0xd130 +#define PCI_DEVICE_ID_NI_PCI2324 0xd140 +#define PCI_DEVICE_ID_NI_PCI2328 0xd150 +#define PCI_DEVICE_ID_NI_PXI8422_2322 0xd190 +#define PCI_DEVICE_ID_NI_PXI8422_2324 0xd1a0 +#define PCI_DEVICE_ID_NI_PXI8420_2322 0xd1d0 +#define PCI_DEVICE_ID_NI_PXI8420_2324 0xd1e0 +#define PCI_DEVICE_ID_NI_PXI8420_2328 0xd1f0 +#define PCI_DEVICE_ID_NI_PXI8420_23216 0xd1f1 +#define PCI_DEVICE_ID_NI_PCI2322I 0xd250 +#define PCI_DEVICE_ID_NI_PCI2324I 0xd270 +#define PCI_DEVICE_ID_NI_PCI23216 0xd2b0 +#define PCI_DEVICE_ID_NI_PXI8430_2322 0x7080 +#define PCI_DEVICE_ID_NI_PCI8430_2322 0x70db +#define PCI_DEVICE_ID_NI_PXI8430_2324 0x70dd +#define PCI_DEVICE_ID_NI_PCI8430_2324 0x70df +#define PCI_DEVICE_ID_NI_PXI8430_2328 0x70e2 +#define PCI_DEVICE_ID_NI_PCI8430_2328 0x70e4 +#define PCI_DEVICE_ID_NI_PXI8430_23216 0x70e6 +#define PCI_DEVICE_ID_NI_PCI8430_23216 0x70e7 +#define PCI_DEVICE_ID_NI_PXI8432_2322 0x70e8 +#define PCI_DEVICE_ID_NI_PCI8432_2322 0x70ea +#define PCI_DEVICE_ID_NI_PXI8432_2324 0x70ec +#define PCI_DEVICE_ID_NI_PCI8432_2324 0x70ee + +#define PCI_VENDOR_ID_CMD 0x1095 +#define PCI_DEVICE_ID_CMD_643 0x0643 +#define PCI_DEVICE_ID_CMD_646 0x0646 +#define PCI_DEVICE_ID_CMD_648 0x0648 +#define PCI_DEVICE_ID_CMD_649 0x0649 + +#define PCI_DEVICE_ID_SII_680 0x0680 +#define PCI_DEVICE_ID_SII_3112 0x3112 +#define PCI_DEVICE_ID_SII_1210SA 0x0240 + +#define PCI_VENDOR_ID_BROOKTREE 0x109e +#define PCI_DEVICE_ID_BROOKTREE_878 0x0878 +#define PCI_DEVICE_ID_BROOKTREE_879 0x0879 + +#define PCI_VENDOR_ID_SGI 0x10a9 +#define PCI_DEVICE_ID_SGI_IOC3 0x0003 +#define PCI_DEVICE_ID_SGI_LITHIUM 0x1002 +#define PCI_DEVICE_ID_SGI_IOC4 0x100a + +#define PCI_VENDOR_ID_WINBOND 0x10ad +#define PCI_DEVICE_ID_WINBOND_82C105 0x0105 +#define PCI_DEVICE_ID_WINBOND_83C553 0x0565 + +#define PCI_VENDOR_ID_PLX 0x10b5 +#define PCI_DEVICE_ID_PLX_R685 0x1030 +#define PCI_DEVICE_ID_PLX_ROMULUS 0x106a +#define PCI_DEVICE_ID_PLX_SPCOM800 0x1076 +#define PCI_DEVICE_ID_PLX_1077 0x1077 +#define PCI_DEVICE_ID_PLX_SPCOM200 0x1103 +#define PCI_DEVICE_ID_PLX_DJINN_ITOO 0x1151 +#define PCI_DEVICE_ID_PLX_R753 0x1152 +#define PCI_DEVICE_ID_PLX_OLITEC 0x1187 +#define PCI_DEVICE_ID_PLX_PCI200SYN 0x3196 +#define PCI_DEVICE_ID_PLX_9030 0x9030 +#define PCI_DEVICE_ID_PLX_9050 0x9050 +#define PCI_DEVICE_ID_PLX_9056 0x9056 +#define PCI_DEVICE_ID_PLX_9080 0x9080 +#define PCI_DEVICE_ID_PLX_GTEK_SERIAL2 0xa001 + +#define PCI_VENDOR_ID_MADGE 0x10b6 +#define PCI_DEVICE_ID_MADGE_MK2 0x0002 + +#define PCI_VENDOR_ID_3COM 0x10b7 +#define PCI_DEVICE_ID_3COM_3C985 0x0001 +#define PCI_DEVICE_ID_3COM_3C940 0x1700 +#define PCI_DEVICE_ID_3COM_3C339 0x3390 +#define PCI_DEVICE_ID_3COM_3C359 0x3590 +#define PCI_DEVICE_ID_3COM_3C940B 0x80eb +#define PCI_DEVICE_ID_3COM_3CR990 0x9900 +#define PCI_DEVICE_ID_3COM_3CR990_TX_95 0x9902 +#define PCI_DEVICE_ID_3COM_3CR990_TX_97 0x9903 +#define PCI_DEVICE_ID_3COM_3CR990B 0x9904 +#define PCI_DEVICE_ID_3COM_3CR990_FX 0x9905 +#define PCI_DEVICE_ID_3COM_3CR990SVR95 0x9908 +#define PCI_DEVICE_ID_3COM_3CR990SVR97 0x9909 +#define PCI_DEVICE_ID_3COM_3CR990SVR 0x990a + +#define PCI_VENDOR_ID_AL 0x10b9 +#define PCI_DEVICE_ID_AL_M1533 0x1533 +#define PCI_DEVICE_ID_AL_M1535 0x1535 +#define PCI_DEVICE_ID_AL_M1541 0x1541 +#define PCI_DEVICE_ID_AL_M1563 0x1563 +#define PCI_DEVICE_ID_AL_M1621 0x1621 +#define PCI_DEVICE_ID_AL_M1631 0x1631 +#define PCI_DEVICE_ID_AL_M1632 0x1632 +#define PCI_DEVICE_ID_AL_M1641 0x1641 +#define PCI_DEVICE_ID_AL_M1644 0x1644 +#define PCI_DEVICE_ID_AL_M1647 0x1647 +#define PCI_DEVICE_ID_AL_M1651 0x1651 +#define PCI_DEVICE_ID_AL_M1671 0x1671 +#define PCI_DEVICE_ID_AL_M1681 0x1681 +#define PCI_DEVICE_ID_AL_M1683 0x1683 +#define PCI_DEVICE_ID_AL_M1689 0x1689 +#define PCI_DEVICE_ID_AL_M5219 0x5219 +#define PCI_DEVICE_ID_AL_M5228 0x5228 +#define PCI_DEVICE_ID_AL_M5229 0x5229 +#define PCI_DEVICE_ID_AL_M5451 0x5451 +#define PCI_DEVICE_ID_AL_M7101 0x7101 + +#define PCI_VENDOR_ID_NEOMAGIC 0x10c8 +#define PCI_DEVICE_ID_NEOMAGIC_NM256AV_AUDIO 0x8005 +#define PCI_DEVICE_ID_NEOMAGIC_NM256ZX_AUDIO 0x8006 +#define PCI_DEVICE_ID_NEOMAGIC_NM256XL_PLUS_AUDIO 0x8016 + +#define PCI_VENDOR_ID_TCONRAD 0x10da +#define PCI_DEVICE_ID_TCONRAD_TOKENRING 0x0508 + +#define PCI_VENDOR_ID_ROHM 0x10db + +#define PCI_VENDOR_ID_NVIDIA 0x10de +#define PCI_DEVICE_ID_NVIDIA_TNT 0x0020 +#define PCI_DEVICE_ID_NVIDIA_TNT2 0x0028 +#define PCI_DEVICE_ID_NVIDIA_UTNT2 0x0029 +#define PCI_DEVICE_ID_NVIDIA_TNT_UNKNOWN 0x002a +#define PCI_DEVICE_ID_NVIDIA_VTNT2 0x002C +#define PCI_DEVICE_ID_NVIDIA_UVTNT2 0x002D +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SMBUS 0x0034 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE 0x0035 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA 0x0036 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2 0x003e +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_ULTRA 0x0040 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800 0x0041 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_LE 0x0042 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_GT 0x0045 +#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_4000 0x004E +#define PCI_DEVICE_ID_NVIDIA_NFORCE4_SMBUS 0x0052 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE 0x0053 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA 0x0054 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2 0x0055 +#define PCI_DEVICE_ID_NVIDIA_CK804_AUDIO 0x0059 +#define PCI_DEVICE_ID_NVIDIA_CK804_PCIE 0x005d +#define PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS 0x0064 +#define PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE 0x0065 +#define PCI_DEVICE_ID_NVIDIA_MCP2_MODEM 0x0069 +#define PCI_DEVICE_ID_NVIDIA_MCP2_AUDIO 0x006a +#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SMBUS 0x0084 +#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE 0x0085 +#define PCI_DEVICE_ID_NVIDIA_MCP2S_MODEM 0x0089 +#define PCI_DEVICE_ID_NVIDIA_CK8_AUDIO 0x008a +#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA 0x008e +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GT 0x0090 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GTX 0x0091 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_7800 0x0098 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_7800_GTX 0x0099 +#define PCI_DEVICE_ID_NVIDIA_ITNT2 0x00A0 +#define PCI_DEVICE_ID_GEFORCE_6800A 0x00c1 +#define PCI_DEVICE_ID_GEFORCE_6800A_LE 0x00c2 +#define PCI_DEVICE_ID_GEFORCE_GO_6800 0x00c8 +#define PCI_DEVICE_ID_GEFORCE_GO_6800_ULTRA 0x00c9 +#define PCI_DEVICE_ID_QUADRO_FX_GO1400 0x00cc +#define PCI_DEVICE_ID_QUADRO_FX_1400 0x00ce +#define PCI_DEVICE_ID_NVIDIA_NFORCE3 0x00d1 +#define PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS 0x00d4 +#define PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE 0x00d5 +#define PCI_DEVICE_ID_NVIDIA_MCP3_MODEM 0x00d9 +#define PCI_DEVICE_ID_NVIDIA_MCP3_AUDIO 0x00da +#define PCI_DEVICE_ID_NVIDIA_NFORCE3S 0x00e1 +#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA 0x00e3 +#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SMBUS 0x00e4 +#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE 0x00e5 +#define PCI_DEVICE_ID_NVIDIA_CK8S_AUDIO 0x00ea +#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2 0x00ee +#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_ALT1 0x00f0 +#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT1 0x00f1 +#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT2 0x00f2 +#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6200_ALT1 0x00f3 +#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_GT 0x00f9 +#define PCIE_DEVICE_ID_NVIDIA_QUADRO_NVS280 0x00fd +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_SDR 0x0100 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_DDR 0x0101 +#define PCI_DEVICE_ID_NVIDIA_QUADRO 0x0103 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_MX 0x0110 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_MX2 0x0111 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_GO 0x0112 +#define PCI_DEVICE_ID_NVIDIA_QUADRO2_MXR 0x0113 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6600_GT 0x0140 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6600 0x0141 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6610_XL 0x0145 +#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_540 0x014E +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6200 0x014F +#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_GTS 0x0150 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_GTS2 0x0151 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_ULTRA 0x0152 +#define PCI_DEVICE_ID_NVIDIA_QUADRO2_PRO 0x0153 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6200_TURBOCACHE 0x0161 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6200 0x0164 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6250 0x0166 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6200_1 0x0167 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6250_1 0x0168 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_460 0x0170 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440 0x0171 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_420 0x0172 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440_SE 0x0173 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_440_GO 0x0174 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_420_GO 0x0175 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_420_GO_M32 0x0176 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_460_GO 0x0177 +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_500XGL 0x0178 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_440_GO_M64 0x0179 +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_200 0x017A +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_550XGL 0x017B +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_500_GOGL 0x017C +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_410_GO_M16 0x017D +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440_8X 0x0181 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440SE_8X 0x0182 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_420_8X 0x0183 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_4000 0x0185 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_448_GO 0x0186 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_488_GO 0x0187 +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_580_XGL 0x0188 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_MAC 0x0189 +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_280_NVS 0x018A +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_380_XGL 0x018B +#define PCI_DEVICE_ID_NVIDIA_IGEFORCE2 0x01a0 +#define PCI_DEVICE_ID_NVIDIA_NFORCE 0x01a4 +#define PCI_DEVICE_ID_NVIDIA_MCP1_AUDIO 0x01b1 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_SMBUS 0x01b4 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_IDE 0x01bc +#define PCI_DEVICE_ID_NVIDIA_MCP1_MODEM 0x01c1 +#define PCI_DEVICE_ID_NVIDIA_NFORCE2 0x01e0 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE3 0x0200 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE3_1 0x0201 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE3_2 0x0202 +#define PCI_DEVICE_ID_NVIDIA_QUADRO_DDC 0x0203 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B 0x0211 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B_LE 0x0212 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B_GT 0x0215 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4600 0x0250 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4400 0x0251 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4200 0x0253 +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_900XGL 0x0258 +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_750XGL 0x0259 +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_700XGL 0x025B +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS 0x0264 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE 0x0265 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA 0x0266 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2 0x0267 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS 0x0368 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE 0x036E +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA 0x037E +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2 0x037F +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800 0x0280 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800_8X 0x0281 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800SE 0x0282 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_4200_GO 0x0286 +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_980_XGL 0x0288 +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_780_XGL 0x0289 +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_700_GOGL 0x028C +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5800_ULTRA 0x0301 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5800 0x0302 +#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_2000 0x0308 +#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1000 0x0309 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600_ULTRA 0x0311 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600 0x0312 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600SE 0x0314 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5600 0x031A +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5650 0x031B +#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO700 0x031C +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200 0x0320 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200_ULTRA 0x0321 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200_1 0x0322 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200SE 0x0323 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5200 0x0324 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5250 0x0325 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5500 0x0326 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5100 0x0327 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5250_32 0x0328 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO_5200 0x0329 +#define PCI_DEVICE_ID_NVIDIA_QUADRO_NVS_280_PCI 0x032A +#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_500 0x032B +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5300 0x032C +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5100 0x032D +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900_ULTRA 0x0330 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900 0x0331 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900XT 0x0332 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5950_ULTRA 0x0333 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900ZT 0x0334 +#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_3000 0x0338 +#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_700 0x033F +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700_ULTRA 0x0341 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700 0x0342 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700LE 0x0343 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700VE 0x0344 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_1 0x0347 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_2 0x0348 +#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO1000 0x034C +#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1100 0x034E +#define PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0 0x0360 +#define PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4 0x0364 +#define PCI_DEVICE_ID_NVIDIA_NVENET_15 0x0373 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA 0x03E7 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SMBUS 0x03EB +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE 0x03EC +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2 0x03F6 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3 0x03F7 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_SMBUS 0x0446 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE 0x0448 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_SMBUS 0x0542 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE 0x0560 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE 0x056C +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP78S_SMBUS 0x0752 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_SMBUS 0x07D8 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_320M 0x08A0 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS 0x0AA2 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA 0x0D85 + +#define PCI_VENDOR_ID_IMS 0x10e0 +#define PCI_DEVICE_ID_IMS_TT128 0x9128 +#define PCI_DEVICE_ID_IMS_TT3D 0x9135 + +#define PCI_VENDOR_ID_AMCC 0x10e8 +#define PCI_VENDOR_ID_AMPERE 0x1def + +#define PCI_VENDOR_ID_INTERG 0x10ea +#define PCI_DEVICE_ID_INTERG_1682 0x1682 +#define PCI_DEVICE_ID_INTERG_2000 0x2000 +#define PCI_DEVICE_ID_INTERG_2010 0x2010 +#define PCI_DEVICE_ID_INTERG_5000 0x5000 +#define PCI_DEVICE_ID_INTERG_5050 0x5050 + +#define PCI_VENDOR_ID_REALTEK 0x10ec +#define PCI_DEVICE_ID_REALTEK_8139 0x8139 + +#define PCI_VENDOR_ID_XILINX 0x10ee +#define PCI_DEVICE_ID_RME_DIGI96 0x3fc0 +#define PCI_DEVICE_ID_RME_DIGI96_8 0x3fc1 +#define PCI_DEVICE_ID_RME_DIGI96_8_PRO 0x3fc2 +#define PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST 0x3fc3 +#define PCI_DEVICE_ID_XILINX_HAMMERFALL_DSP 0x3fc5 +#define PCI_DEVICE_ID_XILINX_HAMMERFALL_DSP_MADI 0x3fc6 + +#define PCI_VENDOR_ID_INIT 0x1101 + +#define PCI_VENDOR_ID_CREATIVE 0x1102 /* duplicate: ECTIVA */ +#define PCI_DEVICE_ID_CREATIVE_EMU10K1 0x0002 +#define PCI_DEVICE_ID_CREATIVE_20K1 0x0005 +#define PCI_DEVICE_ID_CREATIVE_20K2 0x000b +#define PCI_SUBDEVICE_ID_CREATIVE_SB0760 0x0024 +#define PCI_SUBDEVICE_ID_CREATIVE_SB08801 0x0041 +#define PCI_SUBDEVICE_ID_CREATIVE_SB08802 0x0042 +#define PCI_SUBDEVICE_ID_CREATIVE_SB08803 0x0043 +#define PCI_SUBDEVICE_ID_CREATIVE_SB1270 0x0062 +#define PCI_SUBDEVICE_ID_CREATIVE_HENDRIX 0x6000 + +#define PCI_VENDOR_ID_ECTIVA 0x1102 /* duplicate: CREATIVE */ +#define PCI_DEVICE_ID_ECTIVA_EV1938 0x8938 + +#define PCI_VENDOR_ID_TTI 0x1103 +#define PCI_DEVICE_ID_TTI_HPT343 0x0003 +#define PCI_DEVICE_ID_TTI_HPT366 0x0004 +#define PCI_DEVICE_ID_TTI_HPT372 0x0005 +#define PCI_DEVICE_ID_TTI_HPT302 0x0006 +#define PCI_DEVICE_ID_TTI_HPT371 0x0007 +#define PCI_DEVICE_ID_TTI_HPT374 0x0008 +#define PCI_DEVICE_ID_TTI_HPT372N 0x0009 /* apparently a 372N variant? */ + +#define PCI_VENDOR_ID_SIGMA 0x1105 + +#define PCI_VENDOR_ID_VIA 0x1106 +#define PCI_DEVICE_ID_VIA_8763_0 0x0198 +#define PCI_DEVICE_ID_VIA_8380_0 0x0204 +#define PCI_DEVICE_ID_VIA_3238_0 0x0238 +#define PCI_DEVICE_ID_VIA_PT880 0x0258 +#define PCI_DEVICE_ID_VIA_PT880ULTRA 0x0308 +#define PCI_DEVICE_ID_VIA_PX8X0_0 0x0259 +#define PCI_DEVICE_ID_VIA_3269_0 0x0269 +#define PCI_DEVICE_ID_VIA_K8T800PRO_0 0x0282 +#define PCI_DEVICE_ID_VIA_3296_0 0x0296 +#define PCI_DEVICE_ID_VIA_8363_0 0x0305 +#define PCI_DEVICE_ID_VIA_P4M800CE 0x0314 +#define PCI_DEVICE_ID_VIA_P4M890 0x0327 +#define PCI_DEVICE_ID_VIA_VT3324 0x0324 +#define PCI_DEVICE_ID_VIA_VT3336 0x0336 +#define PCI_DEVICE_ID_VIA_VT3351 0x0351 +#define PCI_DEVICE_ID_VIA_VT3364 0x0364 +#define PCI_DEVICE_ID_VIA_8371_0 0x0391 +#define PCI_DEVICE_ID_VIA_6415 0x0415 +#define PCI_DEVICE_ID_VIA_8501_0 0x0501 +#define PCI_DEVICE_ID_VIA_82C561 0x0561 +#define PCI_DEVICE_ID_VIA_82C586_1 0x0571 +#define PCI_DEVICE_ID_VIA_82C576 0x0576 +#define PCI_DEVICE_ID_VIA_82C586_0 0x0586 +#define PCI_DEVICE_ID_VIA_82C596 0x0596 +#define PCI_DEVICE_ID_VIA_82C597_0 0x0597 +#define PCI_DEVICE_ID_VIA_82C598_0 0x0598 +#define PCI_DEVICE_ID_VIA_8601_0 0x0601 +#define PCI_DEVICE_ID_VIA_8605_0 0x0605 +#define PCI_DEVICE_ID_VIA_82C686 0x0686 +#define PCI_DEVICE_ID_VIA_82C691_0 0x0691 +#define PCI_DEVICE_ID_VIA_82C576_1 0x1571 +#define PCI_DEVICE_ID_VIA_82C586_2 0x3038 +#define PCI_DEVICE_ID_VIA_82C586_3 0x3040 +#define PCI_DEVICE_ID_VIA_82C596_3 0x3050 +#define PCI_DEVICE_ID_VIA_82C596B_3 0x3051 +#define PCI_DEVICE_ID_VIA_82C686_4 0x3057 +#define PCI_DEVICE_ID_VIA_82C686_5 0x3058 +#define PCI_DEVICE_ID_VIA_8233_5 0x3059 +#define PCI_DEVICE_ID_VIA_8233_0 0x3074 +#define PCI_DEVICE_ID_VIA_8633_0 0x3091 +#define PCI_DEVICE_ID_VIA_8367_0 0x3099 +#define PCI_DEVICE_ID_VIA_8653_0 0x3101 +#define PCI_DEVICE_ID_VIA_8622 0x3102 +#define PCI_DEVICE_ID_VIA_8235_USB_2 0x3104 +#define PCI_DEVICE_ID_VIA_8233C_0 0x3109 +#define PCI_DEVICE_ID_VIA_8361 0x3112 +#define PCI_DEVICE_ID_VIA_XM266 0x3116 +#define PCI_DEVICE_ID_VIA_612X 0x3119 +#define PCI_DEVICE_ID_VIA_862X_0 0x3123 +#define PCI_DEVICE_ID_VIA_8753_0 0x3128 +#define PCI_DEVICE_ID_VIA_8233A 0x3147 +#define PCI_DEVICE_ID_VIA_8703_51_0 0x3148 +#define PCI_DEVICE_ID_VIA_8237_SATA 0x3149 +#define PCI_DEVICE_ID_VIA_XN266 0x3156 +#define PCI_DEVICE_ID_VIA_6410 0x3164 +#define PCI_DEVICE_ID_VIA_8754C_0 0x3168 +#define PCI_DEVICE_ID_VIA_8235 0x3177 +#define PCI_DEVICE_ID_VIA_8385_0 0x3188 +#define PCI_DEVICE_ID_VIA_8377_0 0x3189 +#define PCI_DEVICE_ID_VIA_8378_0 0x3205 +#define PCI_DEVICE_ID_VIA_8783_0 0x3208 +#define PCI_DEVICE_ID_VIA_8237 0x3227 +#define PCI_DEVICE_ID_VIA_8251 0x3287 +#define PCI_DEVICE_ID_VIA_8261 0x3402 +#define PCI_DEVICE_ID_VIA_8237A 0x3337 +#define PCI_DEVICE_ID_VIA_8237S 0x3372 +#define PCI_DEVICE_ID_VIA_SATA_EIDE 0x5324 +#define PCI_DEVICE_ID_VIA_8231 0x8231 +#define PCI_DEVICE_ID_VIA_8231_4 0x8235 +#define PCI_DEVICE_ID_VIA_8365_1 0x8305 +#define PCI_DEVICE_ID_VIA_CX700 0x8324 +#define PCI_DEVICE_ID_VIA_CX700_IDE 0x0581 +#define PCI_DEVICE_ID_VIA_VX800 0x8353 +#define PCI_DEVICE_ID_VIA_VX855 0x8409 +#define PCI_DEVICE_ID_VIA_VX900 0x8410 +#define PCI_DEVICE_ID_VIA_8371_1 0x8391 +#define PCI_DEVICE_ID_VIA_82C598_1 0x8598 +#define PCI_DEVICE_ID_VIA_838X_1 0xB188 +#define PCI_DEVICE_ID_VIA_83_87XX_1 0xB198 +#define PCI_DEVICE_ID_VIA_VX855_IDE 0xC409 +#define PCI_DEVICE_ID_VIA_ANON 0xFFFF + +#define PCI_VENDOR_ID_SIEMENS 0x110A +#define PCI_DEVICE_ID_SIEMENS_DSCC4 0x2102 + +#define PCI_VENDOR_ID_VORTEX 0x1119 +#define PCI_DEVICE_ID_VORTEX_GDT60x0 0x0000 +#define PCI_DEVICE_ID_VORTEX_GDT6000B 0x0001 +#define PCI_DEVICE_ID_VORTEX_GDT6x10 0x0002 +#define PCI_DEVICE_ID_VORTEX_GDT6x20 0x0003 +#define PCI_DEVICE_ID_VORTEX_GDT6530 0x0004 +#define PCI_DEVICE_ID_VORTEX_GDT6550 0x0005 +#define PCI_DEVICE_ID_VORTEX_GDT6x17 0x0006 +#define PCI_DEVICE_ID_VORTEX_GDT6x27 0x0007 +#define PCI_DEVICE_ID_VORTEX_GDT6537 0x0008 +#define PCI_DEVICE_ID_VORTEX_GDT6557 0x0009 +#define PCI_DEVICE_ID_VORTEX_GDT6x15 0x000a +#define PCI_DEVICE_ID_VORTEX_GDT6x25 0x000b +#define PCI_DEVICE_ID_VORTEX_GDT6535 0x000c +#define PCI_DEVICE_ID_VORTEX_GDT6555 0x000d +#define PCI_DEVICE_ID_VORTEX_GDT6x17RP 0x0100 +#define PCI_DEVICE_ID_VORTEX_GDT6x27RP 0x0101 +#define PCI_DEVICE_ID_VORTEX_GDT6537RP 0x0102 +#define PCI_DEVICE_ID_VORTEX_GDT6557RP 0x0103 +#define PCI_DEVICE_ID_VORTEX_GDT6x11RP 0x0104 +#define PCI_DEVICE_ID_VORTEX_GDT6x21RP 0x0105 + +#define PCI_VENDOR_ID_EF 0x111a +#define PCI_DEVICE_ID_EF_ATM_FPGA 0x0000 +#define PCI_DEVICE_ID_EF_ATM_ASIC 0x0002 +#define PCI_DEVICE_ID_EF_ATM_LANAI2 0x0003 +#define PCI_DEVICE_ID_EF_ATM_LANAIHB 0x0005 + +#define PCI_VENDOR_ID_IDT 0x111d +#define PCI_DEVICE_ID_IDT_IDT77201 0x0001 + +#define PCI_VENDOR_ID_FORE 0x1127 +#define PCI_DEVICE_ID_FORE_PCA200E 0x0300 + +#define PCI_VENDOR_ID_PHILIPS 0x1131 +#define PCI_DEVICE_ID_PHILIPS_SAA7146 0x7146 +#define PCI_DEVICE_ID_PHILIPS_SAA9730 0x9730 + +#define PCI_VENDOR_ID_EICON 0x1133 +#define PCI_DEVICE_ID_EICON_DIVA20 0xe002 +#define PCI_DEVICE_ID_EICON_DIVA20_U 0xe004 +#define PCI_DEVICE_ID_EICON_DIVA201 0xe005 +#define PCI_DEVICE_ID_EICON_DIVA202 0xe00b +#define PCI_DEVICE_ID_EICON_MAESTRA 0xe010 +#define PCI_DEVICE_ID_EICON_MAESTRAQ 0xe012 +#define PCI_DEVICE_ID_EICON_MAESTRAQ_U 0xe013 +#define PCI_DEVICE_ID_EICON_MAESTRAP 0xe014 + +#define PCI_VENDOR_ID_CISCO 0x1137 + +#define PCI_VENDOR_ID_ZIATECH 0x1138 +#define PCI_DEVICE_ID_ZIATECH_5550_HC 0x5550 + + +#define PCI_VENDOR_ID_SYSKONNECT 0x1148 +#define PCI_DEVICE_ID_SYSKONNECT_TR 0x4200 +#define PCI_DEVICE_ID_SYSKONNECT_GE 0x4300 +#define PCI_DEVICE_ID_SYSKONNECT_YU 0x4320 +#define PCI_DEVICE_ID_SYSKONNECT_9DXX 0x4400 +#define PCI_DEVICE_ID_SYSKONNECT_9MXX 0x4500 + +#define PCI_VENDOR_ID_DIGI 0x114f +#define PCI_DEVICE_ID_DIGI_DF_M_IOM2_E 0x0070 +#define PCI_DEVICE_ID_DIGI_DF_M_E 0x0071 +#define PCI_DEVICE_ID_DIGI_DF_M_IOM2_A 0x0072 +#define PCI_DEVICE_ID_DIGI_DF_M_A 0x0073 +#define PCI_DEVICE_ID_DIGI_NEO_8 0x00B1 +#define PCI_DEVICE_ID_NEO_2DB9 0x00C8 +#define PCI_DEVICE_ID_NEO_2DB9PRI 0x00C9 +#define PCI_DEVICE_ID_NEO_2RJ45 0x00CA +#define PCI_DEVICE_ID_NEO_2RJ45PRI 0x00CB +#define PCIE_DEVICE_ID_NEO_4_IBM 0x00F4 + +#define PCI_VENDOR_ID_XIRCOM 0x115d +#define PCI_DEVICE_ID_XIRCOM_RBM56G 0x0101 +#define PCI_DEVICE_ID_XIRCOM_X3201_MDM 0x0103 + +#define PCI_VENDOR_ID_SERVERWORKS 0x1166 +#define PCI_DEVICE_ID_SERVERWORKS_HE 0x0008 +#define PCI_DEVICE_ID_SERVERWORKS_LE 0x0009 +#define PCI_DEVICE_ID_SERVERWORKS_GCNB_LE 0x0017 +#define PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB 0x0036 +#define PCI_DEVICE_ID_SERVERWORKS_EPB 0x0103 +#define PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE 0x0132 +#define PCI_DEVICE_ID_SERVERWORKS_OSB4 0x0200 +#define PCI_DEVICE_ID_SERVERWORKS_CSB5 0x0201 +#define PCI_DEVICE_ID_SERVERWORKS_CSB6 0x0203 +#define PCI_DEVICE_ID_SERVERWORKS_HT1000SB 0x0205 +#define PCI_DEVICE_ID_SERVERWORKS_OSB4IDE 0x0211 +#define PCI_DEVICE_ID_SERVERWORKS_CSB5IDE 0x0212 +#define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE 0x0213 +#define PCI_DEVICE_ID_SERVERWORKS_HT1000IDE 0x0214 +#define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2 0x0217 +#define PCI_DEVICE_ID_SERVERWORKS_CSB6LPC 0x0227 +#define PCI_DEVICE_ID_SERVERWORKS_HT1100LD 0x0408 + +#define PCI_VENDOR_ID_ALTERA 0x1172 + +#define PCI_VENDOR_ID_SBE 0x1176 +#define PCI_DEVICE_ID_SBE_WANXL100 0x0301 +#define PCI_DEVICE_ID_SBE_WANXL200 0x0302 +#define PCI_DEVICE_ID_SBE_WANXL400 0x0104 +#define PCI_SUBDEVICE_ID_SBE_T3E3 0x0009 +#define PCI_SUBDEVICE_ID_SBE_2T3E3_P0 0x0901 +#define PCI_SUBDEVICE_ID_SBE_2T3E3_P1 0x0902 + +#define PCI_VENDOR_ID_TOSHIBA 0x1179 +#define PCI_DEVICE_ID_TOSHIBA_PICCOLO_1 0x0101 +#define PCI_DEVICE_ID_TOSHIBA_PICCOLO_2 0x0102 +#define PCI_DEVICE_ID_TOSHIBA_PICCOLO_3 0x0103 +#define PCI_DEVICE_ID_TOSHIBA_PICCOLO_5 0x0105 +#define PCI_DEVICE_ID_TOSHIBA_TOPIC95 0x060a +#define PCI_DEVICE_ID_TOSHIBA_TOPIC97 0x060f +#define PCI_DEVICE_ID_TOSHIBA_TOPIC100 0x0617 + +#define PCI_VENDOR_ID_TOSHIBA_2 0x102f +#define PCI_DEVICE_ID_TOSHIBA_TC35815CF 0x0030 +#define PCI_DEVICE_ID_TOSHIBA_TC35815_NWU 0x0031 +#define PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939 0x0032 +#define PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE 0x0105 +#define PCI_DEVICE_ID_TOSHIBA_TC86C001_MISC 0x0108 +#define PCI_DEVICE_ID_TOSHIBA_SPIDER_NET 0x01b3 + +#define PCI_VENDOR_ID_ATTO 0x117c + +#define PCI_VENDOR_ID_RICOH 0x1180 +#define PCI_DEVICE_ID_RICOH_RL5C465 0x0465 +#define PCI_DEVICE_ID_RICOH_RL5C466 0x0466 +#define PCI_DEVICE_ID_RICOH_RL5C475 0x0475 +#define PCI_DEVICE_ID_RICOH_RL5C476 0x0476 +#define PCI_DEVICE_ID_RICOH_RL5C478 0x0478 +#define PCI_DEVICE_ID_RICOH_R5C822 0x0822 +#define PCI_DEVICE_ID_RICOH_R5CE822 0xe822 +#define PCI_DEVICE_ID_RICOH_R5CE823 0xe823 +#define PCI_DEVICE_ID_RICOH_R5C832 0x0832 +#define PCI_DEVICE_ID_RICOH_R5C843 0x0843 + +#define PCI_VENDOR_ID_DLINK 0x1186 +#define PCI_DEVICE_ID_DLINK_DGE510T 0x4c00 + +#define PCI_VENDOR_ID_ARTOP 0x1191 +#define PCI_DEVICE_ID_ARTOP_ATP850UF 0x0005 +#define PCI_DEVICE_ID_ARTOP_ATP860 0x0006 +#define PCI_DEVICE_ID_ARTOP_ATP860R 0x0007 +#define PCI_DEVICE_ID_ARTOP_ATP865 0x0008 +#define PCI_DEVICE_ID_ARTOP_ATP865R 0x0009 +#define PCI_DEVICE_ID_ARTOP_ATP867A 0x000A +#define PCI_DEVICE_ID_ARTOP_ATP867B 0x000B +#define PCI_DEVICE_ID_ARTOP_AEC7610 0x8002 +#define PCI_DEVICE_ID_ARTOP_AEC7612UW 0x8010 +#define PCI_DEVICE_ID_ARTOP_AEC7612U 0x8020 +#define PCI_DEVICE_ID_ARTOP_AEC7612S 0x8030 +#define PCI_DEVICE_ID_ARTOP_AEC7612D 0x8040 +#define PCI_DEVICE_ID_ARTOP_AEC7612SUW 0x8050 +#define PCI_DEVICE_ID_ARTOP_8060 0x8060 + +#define PCI_VENDOR_ID_ZEITNET 0x1193 +#define PCI_DEVICE_ID_ZEITNET_1221 0x0001 +#define PCI_DEVICE_ID_ZEITNET_1225 0x0002 + +#define PCI_VENDOR_ID_FUJITSU_ME 0x119e +#define PCI_DEVICE_ID_FUJITSU_FS155 0x0001 +#define PCI_DEVICE_ID_FUJITSU_FS50 0x0003 + +#define PCI_SUBVENDOR_ID_KEYSPAN 0x11a9 +#define PCI_SUBDEVICE_ID_KEYSPAN_SX2 0x5334 + +#define PCI_VENDOR_ID_MARVELL 0x11ab +#define PCI_VENDOR_ID_MARVELL_EXT 0x1b4b +#define PCI_DEVICE_ID_MARVELL_GT64111 0x4146 +#define PCI_DEVICE_ID_MARVELL_GT64260 0x6430 +#define PCI_DEVICE_ID_MARVELL_MV64360 0x6460 +#define PCI_DEVICE_ID_MARVELL_MV64460 0x6480 +#define PCI_DEVICE_ID_MARVELL_88ALP01_NAND 0x4100 +#define PCI_DEVICE_ID_MARVELL_88ALP01_SD 0x4101 +#define PCI_DEVICE_ID_MARVELL_88ALP01_CCIC 0x4102 + +#define PCI_VENDOR_ID_V3 0x11b0 +#define PCI_DEVICE_ID_V3_V960 0x0001 +#define PCI_DEVICE_ID_V3_V351 0x0002 + +#define PCI_VENDOR_ID_ATT 0x11c1 +#define PCI_DEVICE_ID_ATT_VENUS_MODEM 0x480 + +#define PCI_VENDOR_ID_SPECIALIX 0x11cb +#define PCI_SUBDEVICE_ID_SPECIALIX_SPEED4 0xa004 + +#define PCI_VENDOR_ID_ANALOG_DEVICES 0x11d4 +#define PCI_DEVICE_ID_AD1889JS 0x1889 + +#define PCI_DEVICE_ID_SEGA_BBA 0x1234 + +#define PCI_VENDOR_ID_ZORAN 0x11de +#define PCI_DEVICE_ID_ZORAN_36057 0x6057 +#define PCI_DEVICE_ID_ZORAN_36120 0x6120 + +#define PCI_VENDOR_ID_COMPEX 0x11f6 +#define PCI_DEVICE_ID_COMPEX_ENET100VG4 0x0112 + +#define PCI_VENDOR_ID_PMC_Sierra 0x11f8 +#define PCI_VENDOR_ID_MICROSEMI 0x11f8 + +#define PCI_VENDOR_ID_RP 0x11fe +#define PCI_DEVICE_ID_RP32INTF 0x0001 +#define PCI_DEVICE_ID_RP8INTF 0x0002 +#define PCI_DEVICE_ID_RP16INTF 0x0003 +#define PCI_DEVICE_ID_RP4QUAD 0x0004 +#define PCI_DEVICE_ID_RP8OCTA 0x0005 +#define PCI_DEVICE_ID_RP8J 0x0006 +#define PCI_DEVICE_ID_RP4J 0x0007 +#define PCI_DEVICE_ID_RP8SNI 0x0008 +#define PCI_DEVICE_ID_RP16SNI 0x0009 +#define PCI_DEVICE_ID_RPP4 0x000A +#define PCI_DEVICE_ID_RPP8 0x000B +#define PCI_DEVICE_ID_RP4M 0x000D +#define PCI_DEVICE_ID_RP2_232 0x000E +#define PCI_DEVICE_ID_RP2_422 0x000F +#define PCI_DEVICE_ID_URP32INTF 0x0801 +#define PCI_DEVICE_ID_URP8INTF 0x0802 +#define PCI_DEVICE_ID_URP16INTF 0x0803 +#define PCI_DEVICE_ID_URP8OCTA 0x0805 +#define PCI_DEVICE_ID_UPCI_RM3_8PORT 0x080C +#define PCI_DEVICE_ID_UPCI_RM3_4PORT 0x080D +#define PCI_DEVICE_ID_CRP16INTF 0x0903 + +#define PCI_VENDOR_ID_CYCLADES 0x120e +#define PCI_DEVICE_ID_CYCLOM_Y_Lo 0x0100 +#define PCI_DEVICE_ID_CYCLOM_Y_Hi 0x0101 +#define PCI_DEVICE_ID_CYCLOM_4Y_Lo 0x0102 +#define PCI_DEVICE_ID_CYCLOM_4Y_Hi 0x0103 +#define PCI_DEVICE_ID_CYCLOM_8Y_Lo 0x0104 +#define PCI_DEVICE_ID_CYCLOM_8Y_Hi 0x0105 +#define PCI_DEVICE_ID_CYCLOM_Z_Lo 0x0200 +#define PCI_DEVICE_ID_CYCLOM_Z_Hi 0x0201 +#define PCI_DEVICE_ID_PC300_RX_2 0x0300 +#define PCI_DEVICE_ID_PC300_RX_1 0x0301 +#define PCI_DEVICE_ID_PC300_TE_2 0x0310 +#define PCI_DEVICE_ID_PC300_TE_1 0x0311 +#define PCI_DEVICE_ID_PC300_TE_M_2 0x0320 +#define PCI_DEVICE_ID_PC300_TE_M_1 0x0321 + +#define PCI_VENDOR_ID_ESSENTIAL 0x120f +#define PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER 0x0001 + +#define PCI_VENDOR_ID_O2 0x1217 +#define PCI_DEVICE_ID_O2_6729 0x6729 +#define PCI_DEVICE_ID_O2_6730 0x673a +#define PCI_DEVICE_ID_O2_6832 0x6832 +#define PCI_DEVICE_ID_O2_6836 0x6836 +#define PCI_DEVICE_ID_O2_6812 0x6872 +#define PCI_DEVICE_ID_O2_6933 0x6933 +#define PCI_DEVICE_ID_O2_8120 0x8120 +#define PCI_DEVICE_ID_O2_8220 0x8220 +#define PCI_DEVICE_ID_O2_8221 0x8221 +#define PCI_DEVICE_ID_O2_8320 0x8320 +#define PCI_DEVICE_ID_O2_8321 0x8321 + +#define PCI_VENDOR_ID_3DFX 0x121a +#define PCI_DEVICE_ID_3DFX_VOODOO 0x0001 +#define PCI_DEVICE_ID_3DFX_VOODOO2 0x0002 +#define PCI_DEVICE_ID_3DFX_BANSHEE 0x0003 +#define PCI_DEVICE_ID_3DFX_VOODOO3 0x0005 +#define PCI_DEVICE_ID_3DFX_VOODOO5 0x0009 + +#define PCI_VENDOR_ID_AVM 0x1244 +#define PCI_DEVICE_ID_AVM_B1 0x0700 +#define PCI_DEVICE_ID_AVM_C4 0x0800 +#define PCI_DEVICE_ID_AVM_A1 0x0a00 +#define PCI_DEVICE_ID_AVM_A1_V2 0x0e00 +#define PCI_DEVICE_ID_AVM_C2 0x1100 +#define PCI_DEVICE_ID_AVM_T1 0x1200 + +#define PCI_VENDOR_ID_STALLION 0x124d + +/* Allied Telesyn */ +#define PCI_VENDOR_ID_AT 0x1259 +#define PCI_SUBDEVICE_ID_AT_2700FX 0x2701 +#define PCI_SUBDEVICE_ID_AT_2701FX 0x2703 + +#define PCI_VENDOR_ID_ESS 0x125d +#define PCI_DEVICE_ID_ESS_ESS1968 0x1968 +#define PCI_DEVICE_ID_ESS_ESS1978 0x1978 +#define PCI_DEVICE_ID_ESS_ALLEGRO_1 0x1988 +#define PCI_DEVICE_ID_ESS_ALLEGRO 0x1989 +#define PCI_DEVICE_ID_ESS_CANYON3D_2LE 0x1990 +#define PCI_DEVICE_ID_ESS_CANYON3D_2 0x1992 +#define PCI_DEVICE_ID_ESS_MAESTRO3 0x1998 +#define PCI_DEVICE_ID_ESS_MAESTRO3_1 0x1999 +#define PCI_DEVICE_ID_ESS_MAESTRO3_HW 0x199a +#define PCI_DEVICE_ID_ESS_MAESTRO3_2 0x199b + +#define PCI_VENDOR_ID_SATSAGEM 0x1267 +#define PCI_DEVICE_ID_SATSAGEM_NICCY 0x1016 + +#define PCI_VENDOR_ID_ENSONIQ 0x1274 +#define PCI_DEVICE_ID_ENSONIQ_CT5880 0x5880 +#define PCI_DEVICE_ID_ENSONIQ_ES1370 0x5000 +#define PCI_DEVICE_ID_ENSONIQ_ES1371 0x1371 + +#define PCI_VENDOR_ID_TRANSMETA 0x1279 +#define PCI_DEVICE_ID_EFFICEON 0x0060 + +#define PCI_VENDOR_ID_ROCKWELL 0x127A + +#define PCI_VENDOR_ID_ITE 0x1283 +#define PCI_DEVICE_ID_ITE_8172 0x8172 +#define PCI_DEVICE_ID_ITE_8211 0x8211 +#define PCI_DEVICE_ID_ITE_8212 0x8212 +#define PCI_DEVICE_ID_ITE_8213 0x8213 +#define PCI_DEVICE_ID_ITE_8152 0x8152 +#define PCI_DEVICE_ID_ITE_8872 0x8872 +#define PCI_DEVICE_ID_ITE_IT8330G_0 0xe886 + +/* formerly Platform Tech */ +#define PCI_DEVICE_ID_ESS_ESS0100 0x0100 + +#define PCI_VENDOR_ID_ALTEON 0x12ae + +#define PCI_SUBVENDOR_ID_CONNECT_TECH 0x12c4 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_232 0x0001 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_232 0x0002 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_232 0x0003 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485 0x0004 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485_4_4 0x0005 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_485 0x0006 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_485_2_2 0x0007 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_485 0x0008 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485_2_6 0x0009 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH081101V1 0x000A +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH041101V1 0x000B +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_20MHZ 0x000C +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_PTM 0x000D +#define PCI_SUBDEVICE_ID_CONNECT_TECH_NT960PCI 0x0100 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_TITAN_2 0x0201 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_TITAN_4 0x0202 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_232 0x0300 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_232 0x0301 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_232 0x0302 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_1_1 0x0310 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_2 0x0311 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_4 0x0312 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2 0x0320 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4 0x0321 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8 0x0322 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_485 0x0330 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_485 0x0331 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_485 0x0332 + +#define PCI_VENDOR_ID_NVIDIA_SGS 0x12d2 +#define PCI_DEVICE_ID_NVIDIA_SGS_RIVA128 0x0018 + +#define PCI_VENDOR_ID_PERICOM 0x12D8 +#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951 +#define PCI_DEVICE_ID_PERICOM_PI7C9X7952 0x7952 +#define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954 +#define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958 + +#define PCI_SUBVENDOR_ID_CHASE_PCIFAST 0x12E0 +#define PCI_SUBDEVICE_ID_CHASE_PCIFAST4 0x0031 +#define PCI_SUBDEVICE_ID_CHASE_PCIFAST8 0x0021 +#define PCI_SUBDEVICE_ID_CHASE_PCIFAST16 0x0011 +#define PCI_SUBDEVICE_ID_CHASE_PCIFAST16FMC 0x0041 +#define PCI_SUBVENDOR_ID_CHASE_PCIRAS 0x124D +#define PCI_SUBDEVICE_ID_CHASE_PCIRAS4 0xF001 +#define PCI_SUBDEVICE_ID_CHASE_PCIRAS8 0xF010 + +#define PCI_VENDOR_ID_AUREAL 0x12eb +#define PCI_DEVICE_ID_AUREAL_VORTEX_1 0x0001 +#define PCI_DEVICE_ID_AUREAL_VORTEX_2 0x0002 +#define PCI_DEVICE_ID_AUREAL_ADVANTAGE 0x0003 + +#define PCI_VENDOR_ID_ELECTRONICDESIGNGMBH 0x12f8 +#define PCI_DEVICE_ID_LML_33R10 0x8a02 + +#define PCI_VENDOR_ID_ESDGMBH 0x12fe +#define PCI_DEVICE_ID_ESDGMBH_CPCIASIO4 0x0111 + +#define PCI_VENDOR_ID_CB 0x1307 /* Measurement Computing */ + +#define PCI_VENDOR_ID_SIIG 0x131f +#define PCI_SUBVENDOR_ID_SIIG 0x131f +#define PCI_DEVICE_ID_SIIG_1S_10x_550 0x1000 +#define PCI_DEVICE_ID_SIIG_1S_10x_650 0x1001 +#define PCI_DEVICE_ID_SIIG_1S_10x_850 0x1002 +#define PCI_DEVICE_ID_SIIG_1S1P_10x_550 0x1010 +#define PCI_DEVICE_ID_SIIG_1S1P_10x_650 0x1011 +#define PCI_DEVICE_ID_SIIG_1S1P_10x_850 0x1012 +#define PCI_DEVICE_ID_SIIG_1P_10x 0x1020 +#define PCI_DEVICE_ID_SIIG_2P_10x 0x1021 +#define PCI_DEVICE_ID_SIIG_2S_10x_550 0x1030 +#define PCI_DEVICE_ID_SIIG_2S_10x_650 0x1031 +#define PCI_DEVICE_ID_SIIG_2S_10x_850 0x1032 +#define PCI_DEVICE_ID_SIIG_2S1P_10x_550 0x1034 +#define PCI_DEVICE_ID_SIIG_2S1P_10x_650 0x1035 +#define PCI_DEVICE_ID_SIIG_2S1P_10x_850 0x1036 +#define PCI_DEVICE_ID_SIIG_4S_10x_550 0x1050 +#define PCI_DEVICE_ID_SIIG_4S_10x_650 0x1051 +#define PCI_DEVICE_ID_SIIG_4S_10x_850 0x1052 +#define PCI_DEVICE_ID_SIIG_1S_20x_550 0x2000 +#define PCI_DEVICE_ID_SIIG_1S_20x_650 0x2001 +#define PCI_DEVICE_ID_SIIG_1S_20x_850 0x2002 +#define PCI_DEVICE_ID_SIIG_1P_20x 0x2020 +#define PCI_DEVICE_ID_SIIG_2P_20x 0x2021 +#define PCI_DEVICE_ID_SIIG_2S_20x_550 0x2030 +#define PCI_DEVICE_ID_SIIG_2S_20x_650 0x2031 +#define PCI_DEVICE_ID_SIIG_2S_20x_850 0x2032 +#define PCI_DEVICE_ID_SIIG_2P1S_20x_550 0x2040 +#define PCI_DEVICE_ID_SIIG_2P1S_20x_650 0x2041 +#define PCI_DEVICE_ID_SIIG_2P1S_20x_850 0x2042 +#define PCI_DEVICE_ID_SIIG_1S1P_20x_550 0x2010 +#define PCI_DEVICE_ID_SIIG_1S1P_20x_650 0x2011 +#define PCI_DEVICE_ID_SIIG_1S1P_20x_850 0x2012 +#define PCI_DEVICE_ID_SIIG_4S_20x_550 0x2050 +#define PCI_DEVICE_ID_SIIG_4S_20x_650 0x2051 +#define PCI_DEVICE_ID_SIIG_4S_20x_850 0x2052 +#define PCI_DEVICE_ID_SIIG_2S1P_20x_550 0x2060 +#define PCI_DEVICE_ID_SIIG_2S1P_20x_650 0x2061 +#define PCI_DEVICE_ID_SIIG_2S1P_20x_850 0x2062 +#define PCI_DEVICE_ID_SIIG_8S_20x_550 0x2080 +#define PCI_DEVICE_ID_SIIG_8S_20x_650 0x2081 +#define PCI_DEVICE_ID_SIIG_8S_20x_850 0x2082 +#define PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL 0x2050 + +#define PCI_VENDOR_ID_RADISYS 0x1331 + +#define PCI_VENDOR_ID_MICRO_MEMORY 0x1332 +#define PCI_DEVICE_ID_MICRO_MEMORY_5415CN 0x5415 +#define PCI_DEVICE_ID_MICRO_MEMORY_5425CN 0x5425 +#define PCI_DEVICE_ID_MICRO_MEMORY_6155 0x6155 + +#define PCI_VENDOR_ID_DOMEX 0x134a +#define PCI_DEVICE_ID_DOMEX_DMX3191D 0x0001 + +#define PCI_VENDOR_ID_INTASHIELD 0x135a +#define PCI_DEVICE_ID_INTASHIELD_IS200 0x0d80 +#define PCI_DEVICE_ID_INTASHIELD_IS400 0x0dc0 + +#define PCI_VENDOR_ID_QUATECH 0x135C +#define PCI_DEVICE_ID_QUATECH_QSC100 0x0010 +#define PCI_DEVICE_ID_QUATECH_DSC100 0x0020 +#define PCI_DEVICE_ID_QUATECH_DSC200 0x0030 +#define PCI_DEVICE_ID_QUATECH_QSC200 0x0040 +#define PCI_DEVICE_ID_QUATECH_ESC100D 0x0050 +#define PCI_DEVICE_ID_QUATECH_ESC100M 0x0060 +#define PCI_DEVICE_ID_QUATECH_QSCP100 0x0120 +#define PCI_DEVICE_ID_QUATECH_DSCP100 0x0130 +#define PCI_DEVICE_ID_QUATECH_QSCP200 0x0140 +#define PCI_DEVICE_ID_QUATECH_DSCP200 0x0150 +#define PCI_DEVICE_ID_QUATECH_QSCLP100 0x0170 +#define PCI_DEVICE_ID_QUATECH_DSCLP100 0x0180 +#define PCI_DEVICE_ID_QUATECH_DSC100E 0x0181 +#define PCI_DEVICE_ID_QUATECH_SSCLP100 0x0190 +#define PCI_DEVICE_ID_QUATECH_QSCLP200 0x01A0 +#define PCI_DEVICE_ID_QUATECH_DSCLP200 0x01B0 +#define PCI_DEVICE_ID_QUATECH_DSC200E 0x01B1 +#define PCI_DEVICE_ID_QUATECH_SSCLP200 0x01C0 +#define PCI_DEVICE_ID_QUATECH_ESCLP100 0x01E0 +#define PCI_DEVICE_ID_QUATECH_SPPXP_100 0x0278 + +#define PCI_VENDOR_ID_SEALEVEL 0x135e +#define PCI_DEVICE_ID_SEALEVEL_U530 0x7101 +#define PCI_DEVICE_ID_SEALEVEL_UCOMM2 0x7201 +#define PCI_DEVICE_ID_SEALEVEL_UCOMM422 0x7402 +#define PCI_DEVICE_ID_SEALEVEL_UCOMM232 0x7202 +#define PCI_DEVICE_ID_SEALEVEL_COMM4 0x7401 +#define PCI_DEVICE_ID_SEALEVEL_COMM8 0x7801 +#define PCI_DEVICE_ID_SEALEVEL_7803 0x7803 +#define PCI_DEVICE_ID_SEALEVEL_UCOMM8 0x7804 + +#define PCI_VENDOR_ID_HYPERCOPE 0x1365 +#define PCI_DEVICE_ID_HYPERCOPE_PLX 0x9050 +#define PCI_SUBDEVICE_ID_HYPERCOPE_OLD_ERGO 0x0104 +#define PCI_SUBDEVICE_ID_HYPERCOPE_ERGO 0x0106 +#define PCI_SUBDEVICE_ID_HYPERCOPE_METRO 0x0107 +#define PCI_SUBDEVICE_ID_HYPERCOPE_CHAMP2 0x0108 + +#define PCI_VENDOR_ID_DIGIGRAM 0x1369 +#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_SERIAL_SUBSYSTEM 0xc001 +#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_CAE_SERIAL_SUBSYSTEM 0xc002 +#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ESE_SERIAL_SUBSYSTEM 0xc021 +#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ESE_CAE_SERIAL_SUBSYSTEM 0xc022 + +#define PCI_VENDOR_ID_KAWASAKI 0x136b +#define PCI_DEVICE_ID_MCHIP_KL5A72002 0xff01 + +#define PCI_VENDOR_ID_CNET 0x1371 +#define PCI_DEVICE_ID_CNET_GIGACARD 0x434e + +#define PCI_VENDOR_ID_LMC 0x1376 +#define PCI_DEVICE_ID_LMC_HSSI 0x0003 +#define PCI_DEVICE_ID_LMC_DS3 0x0004 +#define PCI_DEVICE_ID_LMC_SSI 0x0005 +#define PCI_DEVICE_ID_LMC_T1 0x0006 + +#define PCI_VENDOR_ID_NETGEAR 0x1385 +#define PCI_DEVICE_ID_NETGEAR_GA620 0x620a + +#define PCI_VENDOR_ID_APPLICOM 0x1389 +#define PCI_DEVICE_ID_APPLICOM_PCIGENERIC 0x0001 +#define PCI_DEVICE_ID_APPLICOM_PCI2000IBS_CAN 0x0002 +#define PCI_DEVICE_ID_APPLICOM_PCI2000PFB 0x0003 + +#define PCI_VENDOR_ID_MOXA 0x1393 +#define PCI_DEVICE_ID_MOXA_RC7000 0x0001 +#define PCI_DEVICE_ID_MOXA_CP102 0x1020 +#define PCI_DEVICE_ID_MOXA_CP102UL 0x1021 +#define PCI_DEVICE_ID_MOXA_CP102U 0x1022 +#define PCI_DEVICE_ID_MOXA_C104 0x1040 +#define PCI_DEVICE_ID_MOXA_CP104U 0x1041 +#define PCI_DEVICE_ID_MOXA_CP104JU 0x1042 +#define PCI_DEVICE_ID_MOXA_CP104EL 0x1043 +#define PCI_DEVICE_ID_MOXA_CT114 0x1140 +#define PCI_DEVICE_ID_MOXA_CP114 0x1141 +#define PCI_DEVICE_ID_MOXA_CP118U 0x1180 +#define PCI_DEVICE_ID_MOXA_CP118EL 0x1181 +#define PCI_DEVICE_ID_MOXA_CP132 0x1320 +#define PCI_DEVICE_ID_MOXA_CP132U 0x1321 +#define PCI_DEVICE_ID_MOXA_CP134U 0x1340 +#define PCI_DEVICE_ID_MOXA_C168 0x1680 +#define PCI_DEVICE_ID_MOXA_CP168U 0x1681 +#define PCI_DEVICE_ID_MOXA_CP168EL 0x1682 +#define PCI_DEVICE_ID_MOXA_CP204J 0x2040 +#define PCI_DEVICE_ID_MOXA_C218 0x2180 +#define PCI_DEVICE_ID_MOXA_C320 0x3200 + +#define PCI_VENDOR_ID_CCD 0x1397 +#define PCI_DEVICE_ID_CCD_HFC4S 0x08B4 +#define PCI_SUBDEVICE_ID_CCD_PMX2S 0x1234 +#define PCI_DEVICE_ID_CCD_HFC8S 0x16B8 +#define PCI_DEVICE_ID_CCD_2BD0 0x2bd0 +#define PCI_DEVICE_ID_CCD_HFCE1 0x30B1 +#define PCI_SUBDEVICE_ID_CCD_SPD4S 0x3136 +#define PCI_SUBDEVICE_ID_CCD_SPDE1 0x3137 +#define PCI_DEVICE_ID_CCD_B000 0xb000 +#define PCI_DEVICE_ID_CCD_B006 0xb006 +#define PCI_DEVICE_ID_CCD_B007 0xb007 +#define PCI_DEVICE_ID_CCD_B008 0xb008 +#define PCI_DEVICE_ID_CCD_B009 0xb009 +#define PCI_DEVICE_ID_CCD_B00A 0xb00a +#define PCI_DEVICE_ID_CCD_B00B 0xb00b +#define PCI_DEVICE_ID_CCD_B00C 0xb00c +#define PCI_DEVICE_ID_CCD_B100 0xb100 +#define PCI_SUBDEVICE_ID_CCD_IOB4ST 0xB520 +#define PCI_SUBDEVICE_ID_CCD_IOB8STR 0xB521 +#define PCI_SUBDEVICE_ID_CCD_IOB8ST 0xB522 +#define PCI_SUBDEVICE_ID_CCD_IOB1E1 0xB523 +#define PCI_SUBDEVICE_ID_CCD_SWYX4S 0xB540 +#define PCI_SUBDEVICE_ID_CCD_JH4S20 0xB550 +#define PCI_SUBDEVICE_ID_CCD_IOB8ST_1 0xB552 +#define PCI_SUBDEVICE_ID_CCD_JHSE1 0xB553 +#define PCI_SUBDEVICE_ID_CCD_JH8S 0xB55B +#define PCI_SUBDEVICE_ID_CCD_BN4S 0xB560 +#define PCI_SUBDEVICE_ID_CCD_BN8S 0xB562 +#define PCI_SUBDEVICE_ID_CCD_BNE1 0xB563 +#define PCI_SUBDEVICE_ID_CCD_BNE1D 0xB564 +#define PCI_SUBDEVICE_ID_CCD_BNE1DP 0xB565 +#define PCI_SUBDEVICE_ID_CCD_BN2S 0xB566 +#define PCI_SUBDEVICE_ID_CCD_BN1SM 0xB567 +#define PCI_SUBDEVICE_ID_CCD_BN4SM 0xB568 +#define PCI_SUBDEVICE_ID_CCD_BN2SM 0xB569 +#define PCI_SUBDEVICE_ID_CCD_BNE1M 0xB56A +#define PCI_SUBDEVICE_ID_CCD_BN8SP 0xB56B +#define PCI_SUBDEVICE_ID_CCD_HFC4S 0xB620 +#define PCI_SUBDEVICE_ID_CCD_HFC8S 0xB622 +#define PCI_DEVICE_ID_CCD_B700 0xb700 +#define PCI_DEVICE_ID_CCD_B701 0xb701 +#define PCI_SUBDEVICE_ID_CCD_HFCE1 0xC523 +#define PCI_SUBDEVICE_ID_CCD_OV2S 0xE884 +#define PCI_SUBDEVICE_ID_CCD_OV4S 0xE888 +#define PCI_SUBDEVICE_ID_CCD_OV8S 0xE998 + +#define PCI_VENDOR_ID_EXAR 0x13a8 +#define PCI_DEVICE_ID_EXAR_XR17C152 0x0152 +#define PCI_DEVICE_ID_EXAR_XR17C154 0x0154 +#define PCI_DEVICE_ID_EXAR_XR17C158 0x0158 +#define PCI_DEVICE_ID_EXAR_XR17V352 0x0352 +#define PCI_DEVICE_ID_EXAR_XR17V354 0x0354 +#define PCI_DEVICE_ID_EXAR_XR17V358 0x0358 + +#define PCI_VENDOR_ID_MICROGATE 0x13c0 +#define PCI_DEVICE_ID_MICROGATE_USC 0x0010 +#define PCI_DEVICE_ID_MICROGATE_SCA 0x0030 + +#define PCI_VENDOR_ID_3WARE 0x13C1 +#define PCI_DEVICE_ID_3WARE_1000 0x1000 +#define PCI_DEVICE_ID_3WARE_7000 0x1001 +#define PCI_DEVICE_ID_3WARE_9000 0x1002 + +#define PCI_VENDOR_ID_IOMEGA 0x13ca +#define PCI_DEVICE_ID_IOMEGA_BUZ 0x4231 + +#define PCI_VENDOR_ID_ABOCOM 0x13D1 +#define PCI_DEVICE_ID_ABOCOM_2BD1 0x2BD1 + +#define PCI_VENDOR_ID_SUNDANCE 0x13f0 + +#define PCI_VENDOR_ID_CMEDIA 0x13f6 +#define PCI_DEVICE_ID_CMEDIA_CM8338A 0x0100 +#define PCI_DEVICE_ID_CMEDIA_CM8338B 0x0101 +#define PCI_DEVICE_ID_CMEDIA_CM8738 0x0111 +#define PCI_DEVICE_ID_CMEDIA_CM8738B 0x0112 + +#define PCI_VENDOR_ID_ADVANTECH 0x13fe + +#define PCI_VENDOR_ID_MEILHAUS 0x1402 + +#define PCI_VENDOR_ID_LAVA 0x1407 +#define PCI_DEVICE_ID_LAVA_DSERIAL 0x0100 /* 2x 16550 */ +#define PCI_DEVICE_ID_LAVA_QUATRO_A 0x0101 /* 2x 16550, half of 4 port */ +#define PCI_DEVICE_ID_LAVA_QUATRO_B 0x0102 /* 2x 16550, half of 4 port */ +#define PCI_DEVICE_ID_LAVA_QUATTRO_A 0x0120 /* 2x 16550A, half of 4 port */ +#define PCI_DEVICE_ID_LAVA_QUATTRO_B 0x0121 /* 2x 16550A, half of 4 port */ +#define PCI_DEVICE_ID_LAVA_OCTO_A 0x0180 /* 4x 16550A, half of 8 port */ +#define PCI_DEVICE_ID_LAVA_OCTO_B 0x0181 /* 4x 16550A, half of 8 port */ +#define PCI_DEVICE_ID_LAVA_PORT_PLUS 0x0200 /* 2x 16650 */ +#define PCI_DEVICE_ID_LAVA_QUAD_A 0x0201 /* 2x 16650, half of 4 port */ +#define PCI_DEVICE_ID_LAVA_QUAD_B 0x0202 /* 2x 16650, half of 4 port */ +#define PCI_DEVICE_ID_LAVA_SSERIAL 0x0500 /* 1x 16550 */ +#define PCI_DEVICE_ID_LAVA_PORT_650 0x0600 /* 1x 16650 */ +#define PCI_DEVICE_ID_LAVA_PARALLEL 0x8000 +#define PCI_DEVICE_ID_LAVA_DUAL_PAR_A 0x8002 /* The Lava Dual Parallel is */ +#define PCI_DEVICE_ID_LAVA_DUAL_PAR_B 0x8003 /* two PCI devices on a card */ +#define PCI_DEVICE_ID_LAVA_BOCA_IOPPAR 0x8800 + +#define PCI_VENDOR_ID_TIMEDIA 0x1409 +#define PCI_DEVICE_ID_TIMEDIA_1889 0x7168 + +#define PCI_VENDOR_ID_ICE 0x1412 +#define PCI_DEVICE_ID_ICE_1712 0x1712 +#define PCI_DEVICE_ID_VT1724 0x1724 + +#define PCI_VENDOR_ID_OXSEMI 0x1415 +#define PCI_DEVICE_ID_OXSEMI_12PCI840 0x8403 +#define PCI_DEVICE_ID_OXSEMI_PCIe840 0xC000 +#define PCI_DEVICE_ID_OXSEMI_PCIe840_G 0xC004 +#define PCI_DEVICE_ID_OXSEMI_PCIe952_0 0xC100 +#define PCI_DEVICE_ID_OXSEMI_PCIe952_0_G 0xC104 +#define PCI_DEVICE_ID_OXSEMI_PCIe952_1 0xC110 +#define PCI_DEVICE_ID_OXSEMI_PCIe952_1_G 0xC114 +#define PCI_DEVICE_ID_OXSEMI_PCIe952_1_U 0xC118 +#define PCI_DEVICE_ID_OXSEMI_PCIe952_1_GU 0xC11C +#define PCI_DEVICE_ID_OXSEMI_16PCI954 0x9501 +#define PCI_DEVICE_ID_OXSEMI_C950 0x950B +#define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511 +#define PCI_DEVICE_ID_OXSEMI_16PCI954PP 0x9513 +#define PCI_DEVICE_ID_OXSEMI_16PCI952 0x9521 +#define PCI_DEVICE_ID_OXSEMI_16PCI952PP 0x9523 +#define PCI_SUBDEVICE_ID_OXSEMI_C950 0x0001 + +#define PCI_VENDOR_ID_CHELSIO 0x1425 + +#define PCI_VENDOR_ID_ADLINK 0x144a + +#define PCI_VENDOR_ID_SAMSUNG 0x144d + +#define PCI_VENDOR_ID_GIGABYTE 0x1458 + +#define PCI_VENDOR_ID_AMBIT 0x1468 + +#define PCI_VENDOR_ID_MYRICOM 0x14c1 + +#define PCI_VENDOR_ID_MEDIATEK 0x14c3 +#define PCI_DEVICE_ID_MEDIATEK_7629 0x7629 + +#define PCI_VENDOR_ID_TITAN 0x14D2 +#define PCI_DEVICE_ID_TITAN_010L 0x8001 +#define PCI_DEVICE_ID_TITAN_100L 0x8010 +#define PCI_DEVICE_ID_TITAN_110L 0x8011 +#define PCI_DEVICE_ID_TITAN_200L 0x8020 +#define PCI_DEVICE_ID_TITAN_210L 0x8021 +#define PCI_DEVICE_ID_TITAN_400L 0x8040 +#define PCI_DEVICE_ID_TITAN_800L 0x8080 +#define PCI_DEVICE_ID_TITAN_100 0xA001 +#define PCI_DEVICE_ID_TITAN_200 0xA005 +#define PCI_DEVICE_ID_TITAN_400 0xA003 +#define PCI_DEVICE_ID_TITAN_800B 0xA004 + +#define PCI_VENDOR_ID_PANACOM 0x14d4 +#define PCI_DEVICE_ID_PANACOM_QUADMODEM 0x0400 +#define PCI_DEVICE_ID_PANACOM_DUALMODEM 0x0402 + +#define PCI_VENDOR_ID_SIPACKETS 0x14d9 +#define PCI_DEVICE_ID_SP1011 0x0010 + +#define PCI_VENDOR_ID_AFAVLAB 0x14db +#define PCI_DEVICE_ID_AFAVLAB_P028 0x2180 +#define PCI_DEVICE_ID_AFAVLAB_P030 0x2182 +#define PCI_SUBDEVICE_ID_AFAVLAB_P061 0x2150 + +#define PCI_VENDOR_ID_AMPLICON 0x14dc + +#define PCI_VENDOR_ID_BCM_GVC 0x14a4 +#define PCI_VENDOR_ID_BROADCOM 0x14e4 +#define PCI_DEVICE_ID_TIGON3_5752 0x1600 +#define PCI_DEVICE_ID_TIGON3_5752M 0x1601 +#define PCI_DEVICE_ID_NX2_5709 0x1639 +#define PCI_DEVICE_ID_NX2_5709S 0x163a +#define PCI_DEVICE_ID_TIGON3_5700 0x1644 +#define PCI_DEVICE_ID_TIGON3_5701 0x1645 +#define PCI_DEVICE_ID_TIGON3_5702 0x1646 +#define PCI_DEVICE_ID_TIGON3_5703 0x1647 +#define PCI_DEVICE_ID_TIGON3_5704 0x1648 +#define PCI_DEVICE_ID_TIGON3_5704S_2 0x1649 +#define PCI_DEVICE_ID_NX2_5706 0x164a +#define PCI_DEVICE_ID_NX2_5708 0x164c +#define PCI_DEVICE_ID_TIGON3_5702FE 0x164d +#define PCI_DEVICE_ID_NX2_57710 0x164e +#define PCI_DEVICE_ID_NX2_57711 0x164f +#define PCI_DEVICE_ID_NX2_57711E 0x1650 +#define PCI_DEVICE_ID_TIGON3_5705 0x1653 +#define PCI_DEVICE_ID_TIGON3_5705_2 0x1654 +#define PCI_DEVICE_ID_TIGON3_5719 0x1657 +#define PCI_DEVICE_ID_TIGON3_5721 0x1659 +#define PCI_DEVICE_ID_TIGON3_5722 0x165a +#define PCI_DEVICE_ID_TIGON3_5723 0x165b +#define PCI_DEVICE_ID_TIGON3_5705M 0x165d +#define PCI_DEVICE_ID_TIGON3_5705M_2 0x165e +#define PCI_DEVICE_ID_NX2_57712 0x1662 +#define PCI_DEVICE_ID_NX2_57712E 0x1663 +#define PCI_DEVICE_ID_NX2_57712_MF 0x1663 +#define PCI_DEVICE_ID_TIGON3_5714 0x1668 +#define PCI_DEVICE_ID_TIGON3_5714S 0x1669 +#define PCI_DEVICE_ID_TIGON3_5780 0x166a +#define PCI_DEVICE_ID_TIGON3_5780S 0x166b +#define PCI_DEVICE_ID_TIGON3_5705F 0x166e +#define PCI_DEVICE_ID_NX2_57712_VF 0x166f +#define PCI_DEVICE_ID_TIGON3_5754M 0x1672 +#define PCI_DEVICE_ID_TIGON3_5755M 0x1673 +#define PCI_DEVICE_ID_TIGON3_5756 0x1674 +#define PCI_DEVICE_ID_TIGON3_5750 0x1676 +#define PCI_DEVICE_ID_TIGON3_5751 0x1677 +#define PCI_DEVICE_ID_TIGON3_5715 0x1678 +#define PCI_DEVICE_ID_TIGON3_5715S 0x1679 +#define PCI_DEVICE_ID_TIGON3_5754 0x167a +#define PCI_DEVICE_ID_TIGON3_5755 0x167b +#define PCI_DEVICE_ID_TIGON3_5751M 0x167d +#define PCI_DEVICE_ID_TIGON3_5751F 0x167e +#define PCI_DEVICE_ID_TIGON3_5787F 0x167f +#define PCI_DEVICE_ID_TIGON3_5761E 0x1680 +#define PCI_DEVICE_ID_TIGON3_5761 0x1681 +#define PCI_DEVICE_ID_TIGON3_5764 0x1684 +#define PCI_DEVICE_ID_NX2_57800 0x168a +#define PCI_DEVICE_ID_NX2_57840 0x168d +#define PCI_DEVICE_ID_NX2_57810 0x168e +#define PCI_DEVICE_ID_TIGON3_5787M 0x1693 +#define PCI_DEVICE_ID_TIGON3_5782 0x1696 +#define PCI_DEVICE_ID_TIGON3_5784 0x1698 +#define PCI_DEVICE_ID_TIGON3_5786 0x169a +#define PCI_DEVICE_ID_TIGON3_5787 0x169b +#define PCI_DEVICE_ID_TIGON3_5788 0x169c +#define PCI_DEVICE_ID_TIGON3_5789 0x169d +#define PCI_DEVICE_ID_NX2_57840_4_10 0x16a1 +#define PCI_DEVICE_ID_NX2_57840_2_20 0x16a2 +#define PCI_DEVICE_ID_NX2_57840_MF 0x16a4 +#define PCI_DEVICE_ID_NX2_57800_MF 0x16a5 +#define PCI_DEVICE_ID_TIGON3_5702X 0x16a6 +#define PCI_DEVICE_ID_TIGON3_5703X 0x16a7 +#define PCI_DEVICE_ID_TIGON3_5704S 0x16a8 +#define PCI_DEVICE_ID_NX2_57800_VF 0x16a9 +#define PCI_DEVICE_ID_NX2_5706S 0x16aa +#define PCI_DEVICE_ID_NX2_5708S 0x16ac +#define PCI_DEVICE_ID_NX2_57840_VF 0x16ad +#define PCI_DEVICE_ID_NX2_57810_MF 0x16ae +#define PCI_DEVICE_ID_NX2_57810_VF 0x16af +#define PCI_DEVICE_ID_TIGON3_5702A3 0x16c6 +#define PCI_DEVICE_ID_TIGON3_5703A3 0x16c7 +#define PCI_DEVICE_ID_TIGON3_5781 0x16dd +#define PCI_DEVICE_ID_TIGON3_5753 0x16f7 +#define PCI_DEVICE_ID_TIGON3_5753M 0x16fd +#define PCI_DEVICE_ID_TIGON3_5753F 0x16fe +#define PCI_DEVICE_ID_TIGON3_5901 0x170d +#define PCI_DEVICE_ID_BCM4401B1 0x170c +#define PCI_DEVICE_ID_TIGON3_5901_2 0x170e +#define PCI_DEVICE_ID_TIGON3_5906 0x1712 +#define PCI_DEVICE_ID_TIGON3_5906M 0x1713 +#define PCI_DEVICE_ID_BCM4401 0x4401 +#define PCI_DEVICE_ID_BCM4401B0 0x4402 + +#define PCI_VENDOR_ID_TOPIC 0x151f +#define PCI_DEVICE_ID_TOPIC_TP560 0x0000 + +#define PCI_VENDOR_ID_MAINPINE 0x1522 +#define PCI_DEVICE_ID_MAINPINE_PBRIDGE 0x0100 +#define PCI_VENDOR_ID_ENE 0x1524 +#define PCI_DEVICE_ID_ENE_CB710_FLASH 0x0510 +#define PCI_DEVICE_ID_ENE_CB712_SD 0x0550 +#define PCI_DEVICE_ID_ENE_CB712_SD_2 0x0551 +#define PCI_DEVICE_ID_ENE_CB714_SD 0x0750 +#define PCI_DEVICE_ID_ENE_CB714_SD_2 0x0751 +#define PCI_DEVICE_ID_ENE_1211 0x1211 +#define PCI_DEVICE_ID_ENE_1225 0x1225 +#define PCI_DEVICE_ID_ENE_1410 0x1410 +#define PCI_DEVICE_ID_ENE_710 0x1411 +#define PCI_DEVICE_ID_ENE_712 0x1412 +#define PCI_DEVICE_ID_ENE_1420 0x1420 +#define PCI_DEVICE_ID_ENE_720 0x1421 +#define PCI_DEVICE_ID_ENE_722 0x1422 + +#define PCI_SUBVENDOR_ID_PERLE 0x155f +#define PCI_SUBDEVICE_ID_PCI_RAS4 0xf001 +#define PCI_SUBDEVICE_ID_PCI_RAS8 0xf010 + +#define PCI_VENDOR_ID_SYBA 0x1592 +#define PCI_DEVICE_ID_SYBA_2P_EPP 0x0782 +#define PCI_DEVICE_ID_SYBA_1P_ECP 0x0783 + +#define PCI_VENDOR_ID_MORETON 0x15aa +#define PCI_DEVICE_ID_RASTEL_2PORT 0x2000 + +#define PCI_VENDOR_ID_VMWARE 0x15ad +#define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07b0 + +#define PCI_VENDOR_ID_ZOLTRIX 0x15b0 +#define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0 + +#define PCI_VENDOR_ID_MELLANOX 0x15b3 +#define PCI_DEVICE_ID_MELLANOX_CONNECTX3 0x1003 +#define PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO 0x1007 +#define PCI_DEVICE_ID_MELLANOX_CONNECTIB 0x1011 +#define PCI_DEVICE_ID_MELLANOX_CONNECTX4 0x1013 +#define PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX 0x1015 +#define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44 +#define PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE 0x5a46 +#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c +#define PCI_DEVICE_ID_MELLANOX_SINAI 0x6274 +#define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT 0x6278 +#define PCI_DEVICE_ID_MELLANOX_ARBEL 0x6282 +#define PCI_DEVICE_ID_MELLANOX_HERMON_SDR 0x6340 +#define PCI_DEVICE_ID_MELLANOX_HERMON_DDR 0x634a +#define PCI_DEVICE_ID_MELLANOX_HERMON_QDR 0x6354 +#define PCI_DEVICE_ID_MELLANOX_HERMON_EN 0x6368 +#define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN 0x6372 +#define PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2 0x6732 +#define PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2 0x673c +#define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2 0x6746 +#define PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2 0x6750 +#define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2 0x675a +#define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2 0x6764 +#define PCI_DEVICE_ID_MELLANOX_CONNECTX2 0x676e + +#define PCI_VENDOR_ID_DFI 0x15bd + +#define PCI_VENDOR_ID_QUICKNET 0x15e2 +#define PCI_DEVICE_ID_QUICKNET_XJ 0x0500 + +/* + * ADDI-DATA GmbH communication cards + */ +#define PCI_VENDOR_ID_ADDIDATA 0x15B8 +#define PCI_DEVICE_ID_ADDIDATA_APCI7500 0x7000 +#define PCI_DEVICE_ID_ADDIDATA_APCI7420 0x7001 +#define PCI_DEVICE_ID_ADDIDATA_APCI7300 0x7002 +#define PCI_DEVICE_ID_ADDIDATA_APCI7500_2 0x7009 +#define PCI_DEVICE_ID_ADDIDATA_APCI7420_2 0x700A +#define PCI_DEVICE_ID_ADDIDATA_APCI7300_2 0x700B +#define PCI_DEVICE_ID_ADDIDATA_APCI7500_3 0x700C +#define PCI_DEVICE_ID_ADDIDATA_APCI7420_3 0x700D +#define PCI_DEVICE_ID_ADDIDATA_APCI7300_3 0x700E +#define PCI_DEVICE_ID_ADDIDATA_APCI7800_3 0x700F +#define PCI_DEVICE_ID_ADDIDATA_APCIe7300 0x7010 +#define PCI_DEVICE_ID_ADDIDATA_APCIe7420 0x7011 +#define PCI_DEVICE_ID_ADDIDATA_APCIe7500 0x7012 +#define PCI_DEVICE_ID_ADDIDATA_APCIe7800 0x7013 + +#define PCI_VENDOR_ID_PDC 0x15e9 + +#define PCI_VENDOR_ID_FARSITE 0x1619 +#define PCI_DEVICE_ID_FARSITE_T2P 0x0400 +#define PCI_DEVICE_ID_FARSITE_T4P 0x0440 +#define PCI_DEVICE_ID_FARSITE_T1U 0x0610 +#define PCI_DEVICE_ID_FARSITE_T2U 0x0620 +#define PCI_DEVICE_ID_FARSITE_T4U 0x0640 +#define PCI_DEVICE_ID_FARSITE_TE1 0x1610 +#define PCI_DEVICE_ID_FARSITE_TE1C 0x1612 + +#define PCI_VENDOR_ID_ARIMA 0x161f + +#define PCI_VENDOR_ID_BROCADE 0x1657 +#define PCI_DEVICE_ID_BROCADE_CT 0x0014 +#define PCI_DEVICE_ID_BROCADE_FC_8G1P 0x0017 +#define PCI_DEVICE_ID_BROCADE_CT_FC 0x0021 + +#define PCI_VENDOR_ID_SIBYTE 0x166d +#define PCI_DEVICE_ID_BCM1250_PCI 0x0001 +#define PCI_DEVICE_ID_BCM1250_HT 0x0002 + +#define PCI_VENDOR_ID_ATHEROS 0x168c + +#define PCI_VENDOR_ID_NETCELL 0x169c +#define PCI_DEVICE_ID_REVOLUTION 0x0044 + +#define PCI_VENDOR_ID_CENATEK 0x16CA +#define PCI_DEVICE_ID_CENATEK_IDE 0x0001 + +#define PCI_VENDOR_ID_SYNOPSYS 0x16c3 +#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd +#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce +#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31 0xabcf +#define PCI_DEVICE_ID_SYNOPSYS_EDDA 0xedda + +#define PCI_VENDOR_ID_USR 0x16ec + +#define PCI_VENDOR_ID_VITESSE 0x1725 +#define PCI_DEVICE_ID_VITESSE_VSC7174 0x7174 + +#define PCI_VENDOR_ID_LINKSYS 0x1737 +#define PCI_DEVICE_ID_LINKSYS_EG1064 0x1064 + +#define PCI_VENDOR_ID_ALTIMA 0x173b +#define PCI_DEVICE_ID_ALTIMA_AC1000 0x03e8 +#define PCI_DEVICE_ID_ALTIMA_AC1001 0x03e9 +#define PCI_DEVICE_ID_ALTIMA_AC9100 0x03ea +#define PCI_DEVICE_ID_ALTIMA_AC1003 0x03eb + +#define PCI_VENDOR_ID_CAVIUM 0x177d + +#define PCI_VENDOR_ID_TECHWELL 0x1797 +#define PCI_DEVICE_ID_TECHWELL_6800 0x6800 +#define PCI_DEVICE_ID_TECHWELL_6801 0x6801 +#define PCI_DEVICE_ID_TECHWELL_6804 0x6804 +#define PCI_DEVICE_ID_TECHWELL_6816_1 0x6810 +#define PCI_DEVICE_ID_TECHWELL_6816_2 0x6811 +#define PCI_DEVICE_ID_TECHWELL_6816_3 0x6812 +#define PCI_DEVICE_ID_TECHWELL_6816_4 0x6813 + +#define PCI_VENDOR_ID_BELKIN 0x1799 +#define PCI_DEVICE_ID_BELKIN_F5D7010V7 0x701f + +#define PCI_VENDOR_ID_RDC 0x17f3 +#define PCI_DEVICE_ID_RDC_R6020 0x6020 +#define PCI_DEVICE_ID_RDC_R6030 0x6030 +#define PCI_DEVICE_ID_RDC_R6040 0x6040 +#define PCI_DEVICE_ID_RDC_R6060 0x6060 +#define PCI_DEVICE_ID_RDC_R6061 0x6061 +#define PCI_DEVICE_ID_RDC_D1010 0x1010 + +#define PCI_VENDOR_ID_GLI 0x17a0 + +#define PCI_VENDOR_ID_LENOVO 0x17aa + +#define PCI_VENDOR_ID_QCOM 0x17cb + +#define PCI_VENDOR_ID_CDNS 0x17cd + +#define PCI_VENDOR_ID_ARECA 0x17d3 +#define PCI_DEVICE_ID_ARECA_1110 0x1110 +#define PCI_DEVICE_ID_ARECA_1120 0x1120 +#define PCI_DEVICE_ID_ARECA_1130 0x1130 +#define PCI_DEVICE_ID_ARECA_1160 0x1160 +#define PCI_DEVICE_ID_ARECA_1170 0x1170 +#define PCI_DEVICE_ID_ARECA_1200 0x1200 +#define PCI_DEVICE_ID_ARECA_1201 0x1201 +#define PCI_DEVICE_ID_ARECA_1202 0x1202 +#define PCI_DEVICE_ID_ARECA_1210 0x1210 +#define PCI_DEVICE_ID_ARECA_1220 0x1220 +#define PCI_DEVICE_ID_ARECA_1230 0x1230 +#define PCI_DEVICE_ID_ARECA_1260 0x1260 +#define PCI_DEVICE_ID_ARECA_1270 0x1270 +#define PCI_DEVICE_ID_ARECA_1280 0x1280 +#define PCI_DEVICE_ID_ARECA_1380 0x1380 +#define PCI_DEVICE_ID_ARECA_1381 0x1381 +#define PCI_DEVICE_ID_ARECA_1680 0x1680 +#define PCI_DEVICE_ID_ARECA_1681 0x1681 + +#define PCI_VENDOR_ID_S2IO 0x17d5 +#define PCI_DEVICE_ID_S2IO_WIN 0x5731 +#define PCI_DEVICE_ID_S2IO_UNI 0x5831 +#define PCI_DEVICE_ID_HERC_WIN 0x5732 +#define PCI_DEVICE_ID_HERC_UNI 0x5832 + +#define PCI_VENDOR_ID_SITECOM 0x182d +#define PCI_DEVICE_ID_SITECOM_DC105V2 0x3069 + +#define PCI_VENDOR_ID_TOPSPIN 0x1867 + +#define PCI_VENDOR_ID_COMMTECH 0x18f7 + +#define PCI_VENDOR_ID_SILAN 0x1904 + +#define PCI_VENDOR_ID_RENESAS 0x1912 +#define PCI_DEVICE_ID_RENESAS_SH7781 0x0001 +#define PCI_DEVICE_ID_RENESAS_SH7780 0x0002 +#define PCI_DEVICE_ID_RENESAS_SH7763 0x0004 +#define PCI_DEVICE_ID_RENESAS_SH7785 0x0007 +#define PCI_DEVICE_ID_RENESAS_SH7786 0x0010 + +#define PCI_VENDOR_ID_SOLARFLARE 0x1924 +#define PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0 0x0703 +#define PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1 0x6703 +#define PCI_DEVICE_ID_SOLARFLARE_SFC4000B 0x0710 + +#define PCI_VENDOR_ID_TDI 0x192E +#define PCI_DEVICE_ID_TDI_EHCI 0x0101 + +#define PCI_VENDOR_ID_FREESCALE 0x1957 +#define PCI_DEVICE_ID_MPC8308 0xc006 +#define PCI_DEVICE_ID_MPC8315E 0x00b4 +#define PCI_DEVICE_ID_MPC8315 0x00b5 +#define PCI_DEVICE_ID_MPC8314E 0x00b6 +#define PCI_DEVICE_ID_MPC8314 0x00b7 +#define PCI_DEVICE_ID_MPC8378E 0x00c4 +#define PCI_DEVICE_ID_MPC8378 0x00c5 +#define PCI_DEVICE_ID_MPC8377E 0x00c6 +#define PCI_DEVICE_ID_MPC8377 0x00c7 +#define PCI_DEVICE_ID_MPC8548E 0x0012 +#define PCI_DEVICE_ID_MPC8548 0x0013 +#define PCI_DEVICE_ID_MPC8543E 0x0014 +#define PCI_DEVICE_ID_MPC8543 0x0015 +#define PCI_DEVICE_ID_MPC8547E 0x0018 +#define PCI_DEVICE_ID_MPC8545E 0x0019 +#define PCI_DEVICE_ID_MPC8545 0x001a +#define PCI_DEVICE_ID_MPC8569E 0x0061 +#define PCI_DEVICE_ID_MPC8569 0x0060 +#define PCI_DEVICE_ID_MPC8568E 0x0020 +#define PCI_DEVICE_ID_MPC8568 0x0021 +#define PCI_DEVICE_ID_MPC8567E 0x0022 +#define PCI_DEVICE_ID_MPC8567 0x0023 +#define PCI_DEVICE_ID_MPC8533E 0x0030 +#define PCI_DEVICE_ID_MPC8533 0x0031 +#define PCI_DEVICE_ID_MPC8544E 0x0032 +#define PCI_DEVICE_ID_MPC8544 0x0033 +#define PCI_DEVICE_ID_MPC8572E 0x0040 +#define PCI_DEVICE_ID_MPC8572 0x0041 +#define PCI_DEVICE_ID_MPC8536E 0x0050 +#define PCI_DEVICE_ID_MPC8536 0x0051 +#define PCI_DEVICE_ID_P2020E 0x0070 +#define PCI_DEVICE_ID_P2020 0x0071 +#define PCI_DEVICE_ID_P2010E 0x0078 +#define PCI_DEVICE_ID_P2010 0x0079 +#define PCI_DEVICE_ID_P1020E 0x0100 +#define PCI_DEVICE_ID_P1020 0x0101 +#define PCI_DEVICE_ID_P1021E 0x0102 +#define PCI_DEVICE_ID_P1021 0x0103 +#define PCI_DEVICE_ID_P1011E 0x0108 +#define PCI_DEVICE_ID_P1011 0x0109 +#define PCI_DEVICE_ID_P1022E 0x0110 +#define PCI_DEVICE_ID_P1022 0x0111 +#define PCI_DEVICE_ID_P1013E 0x0118 +#define PCI_DEVICE_ID_P1013 0x0119 +#define PCI_DEVICE_ID_P4080E 0x0400 +#define PCI_DEVICE_ID_P4080 0x0401 +#define PCI_DEVICE_ID_P4040E 0x0408 +#define PCI_DEVICE_ID_P4040 0x0409 +#define PCI_DEVICE_ID_P2040E 0x0410 +#define PCI_DEVICE_ID_P2040 0x0411 +#define PCI_DEVICE_ID_P3041E 0x041E +#define PCI_DEVICE_ID_P3041 0x041F +#define PCI_DEVICE_ID_P5020E 0x0420 +#define PCI_DEVICE_ID_P5020 0x0421 +#define PCI_DEVICE_ID_P5010E 0x0428 +#define PCI_DEVICE_ID_P5010 0x0429 +#define PCI_DEVICE_ID_MPC8641 0x7010 +#define PCI_DEVICE_ID_MPC8641D 0x7011 +#define PCI_DEVICE_ID_MPC8610 0x7018 + +#define PCI_VENDOR_ID_PASEMI 0x1959 + +#define PCI_VENDOR_ID_ATTANSIC 0x1969 +#define PCI_DEVICE_ID_ATTANSIC_L1 0x1048 +#define PCI_DEVICE_ID_ATTANSIC_L2 0x2048 + +#define PCI_VENDOR_ID_JMICRON 0x197B +#define PCI_DEVICE_ID_JMICRON_JMB360 0x2360 +#define PCI_DEVICE_ID_JMICRON_JMB361 0x2361 +#define PCI_DEVICE_ID_JMICRON_JMB362 0x2362 +#define PCI_DEVICE_ID_JMICRON_JMB363 0x2363 +#define PCI_DEVICE_ID_JMICRON_JMB364 0x2364 +#define PCI_DEVICE_ID_JMICRON_JMB365 0x2365 +#define PCI_DEVICE_ID_JMICRON_JMB366 0x2366 +#define PCI_DEVICE_ID_JMICRON_JMB368 0x2368 +#define PCI_DEVICE_ID_JMICRON_JMB369 0x2369 +#define PCI_DEVICE_ID_JMICRON_JMB38X_SD 0x2381 +#define PCI_DEVICE_ID_JMICRON_JMB38X_MMC 0x2382 +#define PCI_DEVICE_ID_JMICRON_JMB38X_MS 0x2383 +#define PCI_DEVICE_ID_JMICRON_JMB385_MS 0x2388 +#define PCI_DEVICE_ID_JMICRON_JMB388_SD 0x2391 +#define PCI_DEVICE_ID_JMICRON_JMB388_ESD 0x2392 +#define PCI_DEVICE_ID_JMICRON_JMB390_MS 0x2393 + +#define PCI_VENDOR_ID_KORENIX 0x1982 +#define PCI_DEVICE_ID_KORENIX_JETCARDF0 0x1600 +#define PCI_DEVICE_ID_KORENIX_JETCARDF1 0x16ff +#define PCI_DEVICE_ID_KORENIX_JETCARDF2 0x1700 +#define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff + +#define PCI_VENDOR_ID_HUAWEI 0x19e5 + +#define PCI_VENDOR_ID_NETRONOME 0x19ee +#define PCI_DEVICE_ID_NETRONOME_NFP4000 0x4000 +#define PCI_DEVICE_ID_NETRONOME_NFP5000 0x5000 +#define PCI_DEVICE_ID_NETRONOME_NFP6000 0x6000 +#define PCI_DEVICE_ID_NETRONOME_NFP6000_VF 0x6003 + +#define PCI_VENDOR_ID_QMI 0x1a32 + +#define PCI_VENDOR_ID_AZWAVE 0x1a3b + +#define PCI_VENDOR_ID_REDHAT_QUMRANET 0x1af4 +#define PCI_SUBVENDOR_ID_REDHAT_QUMRANET 0x1af4 +#define PCI_SUBDEVICE_ID_QEMU 0x1100 + +#define PCI_VENDOR_ID_ASMEDIA 0x1b21 + +#define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS 0x1c36 + +#define PCI_VENDOR_ID_CIRCUITCO 0x1cc8 +#define PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD 0x0001 + +#define PCI_VENDOR_ID_AMAZON 0x1d0f + +#define PCI_VENDOR_ID_TEKRAM 0x1de1 +#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 + +#define PCI_VENDOR_ID_TEHUTI 0x1fc9 +#define PCI_DEVICE_ID_TEHUTI_3009 0x3009 +#define PCI_DEVICE_ID_TEHUTI_3010 0x3010 +#define PCI_DEVICE_ID_TEHUTI_3014 0x3014 + +#define PCI_VENDOR_ID_SUNIX 0x1fd4 +#define PCI_DEVICE_ID_SUNIX_1999 0x1999 + +#define PCI_VENDOR_ID_HINT 0x3388 +#define PCI_DEVICE_ID_HINT_VXPROII_IDE 0x8013 + +#define PCI_VENDOR_ID_3DLABS 0x3d3d +#define PCI_DEVICE_ID_3DLABS_PERMEDIA2 0x0007 +#define PCI_DEVICE_ID_3DLABS_PERMEDIA2V 0x0009 + +#define PCI_VENDOR_ID_NETXEN 0x4040 +#define PCI_DEVICE_ID_NX2031_10GXSR 0x0001 +#define PCI_DEVICE_ID_NX2031_10GCX4 0x0002 +#define PCI_DEVICE_ID_NX2031_4GCU 0x0003 +#define PCI_DEVICE_ID_NX2031_IMEZ 0x0004 +#define PCI_DEVICE_ID_NX2031_HMEZ 0x0005 +#define PCI_DEVICE_ID_NX2031_XG_MGMT 0x0024 +#define PCI_DEVICE_ID_NX2031_XG_MGMT2 0x0025 +#define PCI_DEVICE_ID_NX3031 0x0100 + +#define PCI_VENDOR_ID_AKS 0x416c +#define PCI_DEVICE_ID_AKS_ALADDINCARD 0x0100 + +#define PCI_VENDOR_ID_ACCESSIO 0x494f +#define PCI_DEVICE_ID_ACCESSIO_WDG_CSM 0x22c0 + +#define PCI_VENDOR_ID_S3 0x5333 +#define PCI_DEVICE_ID_S3_TRIO 0x8811 +#define PCI_DEVICE_ID_S3_868 0x8880 +#define PCI_DEVICE_ID_S3_968 0x88f0 +#define PCI_DEVICE_ID_S3_SAVAGE4 0x8a25 +#define PCI_DEVICE_ID_S3_PROSAVAGE8 0x8d04 +#define PCI_DEVICE_ID_S3_SONICVIBES 0xca00 + +#define PCI_VENDOR_ID_DUNORD 0x5544 +#define PCI_DEVICE_ID_DUNORD_I3000 0x0001 + +#define PCI_VENDOR_ID_DCI 0x6666 +#define PCI_DEVICE_ID_DCI_PCCOM4 0x0001 +#define PCI_DEVICE_ID_DCI_PCCOM8 0x0002 +#define PCI_DEVICE_ID_DCI_PCCOM2 0x0004 + +#define PCI_VENDOR_ID_INTEL 0x8086 +#define PCI_DEVICE_ID_INTEL_EESSC 0x0008 +#define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320 +#define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321 +#define PCI_DEVICE_ID_INTEL_PXH_0 0x0329 +#define PCI_DEVICE_ID_INTEL_PXH_1 0x032A +#define PCI_DEVICE_ID_INTEL_PXHV 0x032C +#define PCI_DEVICE_ID_INTEL_80332_0 0x0330 +#define PCI_DEVICE_ID_INTEL_80332_1 0x0332 +#define PCI_DEVICE_ID_INTEL_80333_0 0x0370 +#define PCI_DEVICE_ID_INTEL_80333_1 0x0372 +#define PCI_DEVICE_ID_INTEL_82375 0x0482 +#define PCI_DEVICE_ID_INTEL_82424 0x0483 +#define PCI_DEVICE_ID_INTEL_82378 0x0484 +#define PCI_DEVICE_ID_INTEL_MRST_SD0 0x0807 +#define PCI_DEVICE_ID_INTEL_MRST_SD1 0x0808 +#define PCI_DEVICE_ID_INTEL_MFD_SD 0x0820 +#define PCI_DEVICE_ID_INTEL_MFD_SDIO1 0x0821 +#define PCI_DEVICE_ID_INTEL_MFD_SDIO2 0x0822 +#define PCI_DEVICE_ID_INTEL_MFD_EMMC0 0x0823 +#define PCI_DEVICE_ID_INTEL_MFD_EMMC1 0x0824 +#define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084F +#define PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB 0x095E +#define PCI_DEVICE_ID_INTEL_I960 0x0960 +#define PCI_DEVICE_ID_INTEL_I960RM 0x0962 +#define PCI_DEVICE_ID_INTEL_CENTERTON_ILB 0x0c60 +#define PCI_DEVICE_ID_INTEL_8257X_SOL 0x1062 +#define PCI_DEVICE_ID_INTEL_82573E_SOL 0x1085 +#define PCI_DEVICE_ID_INTEL_82573L_SOL 0x108F +#define PCI_DEVICE_ID_INTEL_82815_MC 0x1130 +#define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132 +#define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221 +#define PCI_DEVICE_ID_INTEL_7505_0 0x2550 +#define PCI_DEVICE_ID_INTEL_7205_0 0x255d +#define PCI_DEVICE_ID_INTEL_82437 0x122d +#define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e +#define PCI_DEVICE_ID_INTEL_82371FB_1 0x1230 +#define PCI_DEVICE_ID_INTEL_82371MX 0x1234 +#define PCI_DEVICE_ID_INTEL_82441 0x1237 +#define PCI_DEVICE_ID_INTEL_82380FB 0x124b +#define PCI_DEVICE_ID_INTEL_82439 0x1250 +#define PCI_DEVICE_ID_INTEL_LIGHT_RIDGE 0x1513 /* Tbt 1 Gen 1 */ +#define PCI_DEVICE_ID_INTEL_EAGLE_RIDGE 0x151a +#define PCI_DEVICE_ID_INTEL_LIGHT_PEAK 0x151b +#define PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C 0x1547 /* Tbt 1 Gen 2 */ +#define PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C 0x1548 +#define PCI_DEVICE_ID_INTEL_PORT_RIDGE 0x1549 +#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_NHI 0x1566 /* Tbt 1 Gen 3 */ +#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE 0x1567 +#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_NHI 0x1568 +#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE 0x1569 +#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI 0x156a /* Thunderbolt 2 */ +#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE 0x156b +#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI 0x156c +#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE 0x156d +#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI 0x1575 /* Thunderbolt 3 */ +#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE 0x1576 +#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI 0x1577 +#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE 0x1578 +#define PCI_DEVICE_ID_INTEL_80960_RP 0x1960 +#define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21 +#define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30 +#define PCI_DEVICE_ID_INTEL_IOAT 0x1a38 +#define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN 0x1c41 +#define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX 0x1c5f +#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0 0x1d40 +#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1 0x1d41 +#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI 0x1e31 +#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MIN 0x1e40 +#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MAX 0x1e5f +#define PCI_DEVICE_ID_INTEL_VMD_201D 0x201d +#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN 0x2310 +#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MAX 0x231f +#define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410 +#define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411 +#define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413 +#define PCI_DEVICE_ID_INTEL_82801AA_5 0x2415 +#define PCI_DEVICE_ID_INTEL_82801AA_6 0x2416 +#define PCI_DEVICE_ID_INTEL_82801AA_8 0x2418 +#define PCI_DEVICE_ID_INTEL_82801AB_0 0x2420 +#define PCI_DEVICE_ID_INTEL_82801AB_1 0x2421 +#define PCI_DEVICE_ID_INTEL_82801AB_3 0x2423 +#define PCI_DEVICE_ID_INTEL_82801AB_5 0x2425 +#define PCI_DEVICE_ID_INTEL_82801AB_6 0x2426 +#define PCI_DEVICE_ID_INTEL_82801AB_8 0x2428 +#define PCI_DEVICE_ID_INTEL_82801BA_0 0x2440 +#define PCI_DEVICE_ID_INTEL_82801BA_2 0x2443 +#define PCI_DEVICE_ID_INTEL_82801BA_4 0x2445 +#define PCI_DEVICE_ID_INTEL_82801BA_6 0x2448 +#define PCI_DEVICE_ID_INTEL_82801BA_8 0x244a +#define PCI_DEVICE_ID_INTEL_82801BA_9 0x244b +#define PCI_DEVICE_ID_INTEL_82801BA_10 0x244c +#define PCI_DEVICE_ID_INTEL_82801BA_11 0x244e +#define PCI_DEVICE_ID_INTEL_82801E_0 0x2450 +#define PCI_DEVICE_ID_INTEL_82801E_11 0x245b +#define PCI_DEVICE_ID_INTEL_82801CA_0 0x2480 +#define PCI_DEVICE_ID_INTEL_82801CA_3 0x2483 +#define PCI_DEVICE_ID_INTEL_82801CA_5 0x2485 +#define PCI_DEVICE_ID_INTEL_82801CA_6 0x2486 +#define PCI_DEVICE_ID_INTEL_82801CA_10 0x248a +#define PCI_DEVICE_ID_INTEL_82801CA_11 0x248b +#define PCI_DEVICE_ID_INTEL_82801CA_12 0x248c +#define PCI_DEVICE_ID_INTEL_82801DB_0 0x24c0 +#define PCI_DEVICE_ID_INTEL_82801DB_1 0x24c1 +#define PCI_DEVICE_ID_INTEL_82801DB_2 0x24c2 +#define PCI_DEVICE_ID_INTEL_82801DB_3 0x24c3 +#define PCI_DEVICE_ID_INTEL_82801DB_5 0x24c5 +#define PCI_DEVICE_ID_INTEL_82801DB_6 0x24c6 +#define PCI_DEVICE_ID_INTEL_82801DB_9 0x24c9 +#define PCI_DEVICE_ID_INTEL_82801DB_10 0x24ca +#define PCI_DEVICE_ID_INTEL_82801DB_11 0x24cb +#define PCI_DEVICE_ID_INTEL_82801DB_12 0x24cc +#define PCI_DEVICE_ID_INTEL_82801EB_0 0x24d0 +#define PCI_DEVICE_ID_INTEL_82801EB_1 0x24d1 +#define PCI_DEVICE_ID_INTEL_82801EB_3 0x24d3 +#define PCI_DEVICE_ID_INTEL_82801EB_5 0x24d5 +#define PCI_DEVICE_ID_INTEL_82801EB_6 0x24d6 +#define PCI_DEVICE_ID_INTEL_82801EB_11 0x24db +#define PCI_DEVICE_ID_INTEL_82801EB_12 0x24dc +#define PCI_DEVICE_ID_INTEL_82801EB_13 0x24dd +#define PCI_DEVICE_ID_INTEL_ESB_1 0x25a1 +#define PCI_DEVICE_ID_INTEL_ESB_2 0x25a2 +#define PCI_DEVICE_ID_INTEL_ESB_4 0x25a4 +#define PCI_DEVICE_ID_INTEL_ESB_5 0x25a6 +#define PCI_DEVICE_ID_INTEL_ESB_9 0x25ab +#define PCI_DEVICE_ID_INTEL_ESB_10 0x25ac +#define PCI_DEVICE_ID_INTEL_82820_HB 0x2500 +#define PCI_DEVICE_ID_INTEL_82820_UP_HB 0x2501 +#define PCI_DEVICE_ID_INTEL_82850_HB 0x2530 +#define PCI_DEVICE_ID_INTEL_82860_HB 0x2531 +#define PCI_DEVICE_ID_INTEL_E7501_MCH 0x254c +#define PCI_DEVICE_ID_INTEL_82845G_HB 0x2560 +#define PCI_DEVICE_ID_INTEL_82845G_IG 0x2562 +#define PCI_DEVICE_ID_INTEL_82865_HB 0x2570 +#define PCI_DEVICE_ID_INTEL_82865_IG 0x2572 +#define PCI_DEVICE_ID_INTEL_82875_HB 0x2578 +#define PCI_DEVICE_ID_INTEL_82915G_HB 0x2580 +#define PCI_DEVICE_ID_INTEL_82915G_IG 0x2582 +#define PCI_DEVICE_ID_INTEL_82915GM_HB 0x2590 +#define PCI_DEVICE_ID_INTEL_82915GM_IG 0x2592 +#define PCI_DEVICE_ID_INTEL_5000_ERR 0x25F0 +#define PCI_DEVICE_ID_INTEL_5000_FBD0 0x25F5 +#define PCI_DEVICE_ID_INTEL_5000_FBD1 0x25F6 +#define PCI_DEVICE_ID_INTEL_82945G_HB 0x2770 +#define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772 +#define PCI_DEVICE_ID_INTEL_3000_HB 0x2778 +#define PCI_DEVICE_ID_INTEL_82945GM_HB 0x27A0 +#define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27A2 +#define PCI_DEVICE_ID_INTEL_ICH6_0 0x2640 +#define PCI_DEVICE_ID_INTEL_ICH6_1 0x2641 +#define PCI_DEVICE_ID_INTEL_ICH6_2 0x2642 +#define PCI_DEVICE_ID_INTEL_ICH6_16 0x266a +#define PCI_DEVICE_ID_INTEL_ICH6_17 0x266d +#define PCI_DEVICE_ID_INTEL_ICH6_18 0x266e +#define PCI_DEVICE_ID_INTEL_ICH6_19 0x266f +#define PCI_DEVICE_ID_INTEL_ESB2_0 0x2670 +#define PCI_DEVICE_ID_INTEL_ESB2_14 0x2698 +#define PCI_DEVICE_ID_INTEL_ESB2_17 0x269b +#define PCI_DEVICE_ID_INTEL_ESB2_18 0x269e +#define PCI_DEVICE_ID_INTEL_ICH7_0 0x27b8 +#define PCI_DEVICE_ID_INTEL_ICH7_1 0x27b9 +#define PCI_DEVICE_ID_INTEL_ICH7_30 0x27b0 +#define PCI_DEVICE_ID_INTEL_TGP_LPC 0x27bc +#define PCI_DEVICE_ID_INTEL_ICH7_31 0x27bd +#define PCI_DEVICE_ID_INTEL_ICH7_17 0x27da +#define PCI_DEVICE_ID_INTEL_ICH7_19 0x27dd +#define PCI_DEVICE_ID_INTEL_ICH7_20 0x27de +#define PCI_DEVICE_ID_INTEL_ICH7_21 0x27df +#define PCI_DEVICE_ID_INTEL_ICH8_0 0x2810 +#define PCI_DEVICE_ID_INTEL_ICH8_1 0x2811 +#define PCI_DEVICE_ID_INTEL_ICH8_2 0x2812 +#define PCI_DEVICE_ID_INTEL_ICH8_3 0x2814 +#define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815 +#define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e +#define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850 +#define PCI_DEVICE_ID_INTEL_VMD_28C0 0x28c0 +#define PCI_DEVICE_ID_INTEL_ICH9_0 0x2910 +#define PCI_DEVICE_ID_INTEL_ICH9_1 0x2917 +#define PCI_DEVICE_ID_INTEL_ICH9_2 0x2912 +#define PCI_DEVICE_ID_INTEL_ICH9_3 0x2913 +#define PCI_DEVICE_ID_INTEL_ICH9_4 0x2914 +#define PCI_DEVICE_ID_INTEL_ICH9_5 0x2919 +#define PCI_DEVICE_ID_INTEL_ICH9_6 0x2930 +#define PCI_DEVICE_ID_INTEL_ICH9_7 0x2916 +#define PCI_DEVICE_ID_INTEL_ICH9_8 0x2918 +#define PCI_DEVICE_ID_INTEL_I7_MCR 0x2c18 +#define PCI_DEVICE_ID_INTEL_I7_MC_TAD 0x2c19 +#define PCI_DEVICE_ID_INTEL_I7_MC_RAS 0x2c1a +#define PCI_DEVICE_ID_INTEL_I7_MC_TEST 0x2c1c +#define PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL 0x2c20 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR 0x2c21 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK 0x2c22 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC 0x2c23 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL 0x2c28 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR 0x2c29 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK 0x2c2a +#define PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC 0x2c2b +#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL 0x2c30 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR 0x2c31 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK 0x2c32 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC 0x2c33 +#define PCI_DEVICE_ID_INTEL_I7_NONCORE 0x2c41 +#define PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT 0x2c40 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE 0x2c50 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT 0x2c51 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2 0x2c70 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_SAD 0x2c81 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0 0x2c90 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_PHY0 0x2c91 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR 0x2c98 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD 0x2c99 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST 0x2c9C +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL 0x2ca0 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR 0x2ca1 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK 0x2ca2 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC 0x2ca3 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL 0x2ca8 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR 0x2ca9 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK 0x2caa +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC 0x2cab +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2 0x2d98 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2 0x2d99 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2 0x2d9a +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2 0x2d9c +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2 0x2da0 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2 0x2da1 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2 0x2da2 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2 0x2da3 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2 0x2da8 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2 0x2da9 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2 0x2daa +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2 0x2dab +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2 0x2db0 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2 0x2db1 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2 0x2db2 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2 0x2db3 +#define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340 +#define PCI_DEVICE_ID_INTEL_IOAT_TBG4 0x3429 +#define PCI_DEVICE_ID_INTEL_IOAT_TBG5 0x342a +#define PCI_DEVICE_ID_INTEL_IOAT_TBG6 0x342b +#define PCI_DEVICE_ID_INTEL_IOAT_TBG7 0x342c +#define PCI_DEVICE_ID_INTEL_X58_HUB_MGMT 0x342e +#define PCI_DEVICE_ID_INTEL_IOAT_TBG0 0x3430 +#define PCI_DEVICE_ID_INTEL_IOAT_TBG1 0x3431 +#define PCI_DEVICE_ID_INTEL_IOAT_TBG2 0x3432 +#define PCI_DEVICE_ID_INTEL_IOAT_TBG3 0x3433 +#define PCI_DEVICE_ID_INTEL_82830_HB 0x3575 +#define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577 +#define PCI_DEVICE_ID_INTEL_82854_HB 0x358c +#define PCI_DEVICE_ID_INTEL_82854_IG 0x358e +#define PCI_DEVICE_ID_INTEL_82855GM_HB 0x3580 +#define PCI_DEVICE_ID_INTEL_82855GM_IG 0x3582 +#define PCI_DEVICE_ID_INTEL_E7520_MCH 0x3590 +#define PCI_DEVICE_ID_INTEL_E7320_MCH 0x3592 +#define PCI_DEVICE_ID_INTEL_MCH_PA 0x3595 +#define PCI_DEVICE_ID_INTEL_MCH_PA1 0x3596 +#define PCI_DEVICE_ID_INTEL_MCH_PB 0x3597 +#define PCI_DEVICE_ID_INTEL_MCH_PB1 0x3598 +#define PCI_DEVICE_ID_INTEL_MCH_PC 0x3599 +#define PCI_DEVICE_ID_INTEL_MCH_PC1 0x359a +#define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e +#define PCI_DEVICE_ID_INTEL_I7300_MCH_ERR 0x360c +#define PCI_DEVICE_ID_INTEL_I7300_MCH_FB0 0x360f +#define PCI_DEVICE_ID_INTEL_I7300_MCH_FB1 0x3610 +#define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b +#define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c +#define PCI_DEVICE_ID_INTEL_IOAT_JSF0 0x3710 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF1 0x3711 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF2 0x3712 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF3 0x3713 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF4 0x3714 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF5 0x3715 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF6 0x3716 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF7 0x3717 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF8 0x3718 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF9 0x3719 +#define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14 +#define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16 +#define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18 +#define PCI_DEVICE_ID_INTEL_ICH10_3 0x3a1a +#define PCI_DEVICE_ID_INTEL_ICH10_4 0x3a30 +#define PCI_DEVICE_ID_INTEL_ICH10_5 0x3a60 +#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN 0x3b00 +#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX 0x3b1f +#define PCI_DEVICE_ID_INTEL_IOAT_SNB0 0x3c20 +#define PCI_DEVICE_ID_INTEL_IOAT_SNB1 0x3c21 +#define PCI_DEVICE_ID_INTEL_IOAT_SNB2 0x3c22 +#define PCI_DEVICE_ID_INTEL_IOAT_SNB3 0x3c23 +#define PCI_DEVICE_ID_INTEL_IOAT_SNB4 0x3c24 +#define PCI_DEVICE_ID_INTEL_IOAT_SNB5 0x3c25 +#define PCI_DEVICE_ID_INTEL_IOAT_SNB6 0x3c26 +#define PCI_DEVICE_ID_INTEL_IOAT_SNB7 0x3c27 +#define PCI_DEVICE_ID_INTEL_IOAT_SNB8 0x3c2e +#define PCI_DEVICE_ID_INTEL_IOAT_SNB9 0x3c2f +#define PCI_DEVICE_ID_INTEL_UNC_HA 0x3c46 +#define PCI_DEVICE_ID_INTEL_UNC_IMC0 0x3cb0 +#define PCI_DEVICE_ID_INTEL_UNC_IMC1 0x3cb1 +#define PCI_DEVICE_ID_INTEL_UNC_IMC2 0x3cb4 +#define PCI_DEVICE_ID_INTEL_UNC_IMC3 0x3cb5 +#define PCI_DEVICE_ID_INTEL_UNC_QPI0 0x3c41 +#define PCI_DEVICE_ID_INTEL_UNC_QPI1 0x3c42 +#define PCI_DEVICE_ID_INTEL_UNC_R2PCIE 0x3c43 +#define PCI_DEVICE_ID_INTEL_UNC_R3QPI0 0x3c44 +#define PCI_DEVICE_ID_INTEL_UNC_R3QPI1 0x3c45 +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS 0x3c71 /* 15.1 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR0 0x3c72 /* 16.2 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR1 0x3c73 /* 16.3 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR2 0x3c76 /* 16.6 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR3 0x3c77 /* 16.7 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0 0x3ca0 /* 14.0 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA 0x3ca8 /* 15.0 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0 0x3caa /* 15.2 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1 0x3cab /* 15.3 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2 0x3cac /* 15.4 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3 0x3cad /* 15.5 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO 0x3cb8 /* 17.0 */ +#define PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX 0x3ce0 +#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0 0x3cf4 /* 12.6 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_BR 0x3cf5 /* 13.6 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1 0x3cf6 /* 12.7 */ +#define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f +#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0 +#define PCI_DEVICE_ID_INTEL_5100_19 0x65f3 +#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5 +#define PCI_DEVICE_ID_INTEL_5100_22 0x65f6 +#define PCI_DEVICE_ID_INTEL_5400_ERR 0x4030 +#define PCI_DEVICE_ID_INTEL_5400_FBD0 0x4035 +#define PCI_DEVICE_ID_INTEL_5400_FBD1 0x4036 +#define PCI_DEVICE_ID_INTEL_IOAT_SCNB 0x65ff +#define PCI_DEVICE_ID_INTEL_EP80579_0 0x5031 +#define PCI_DEVICE_ID_INTEL_EP80579_1 0x5032 +#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000 +#define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010 +#define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020 +#define PCI_DEVICE_ID_INTEL_82437VX 0x7030 +#define PCI_DEVICE_ID_INTEL_82439TX 0x7100 +#define PCI_DEVICE_ID_INTEL_82371AB_0 0x7110 +#define PCI_DEVICE_ID_INTEL_82371AB 0x7111 +#define PCI_DEVICE_ID_INTEL_82371AB_2 0x7112 +#define PCI_DEVICE_ID_INTEL_82371AB_3 0x7113 +#define PCI_DEVICE_ID_INTEL_82810_MC1 0x7120 +#define PCI_DEVICE_ID_INTEL_82810_IG1 0x7121 +#define PCI_DEVICE_ID_INTEL_82810_MC3 0x7122 +#define PCI_DEVICE_ID_INTEL_82810_IG3 0x7123 +#define PCI_DEVICE_ID_INTEL_82810E_MC 0x7124 +#define PCI_DEVICE_ID_INTEL_82810E_IG 0x7125 +#define PCI_DEVICE_ID_INTEL_82443LX_0 0x7180 +#define PCI_DEVICE_ID_INTEL_82443LX_1 0x7181 +#define PCI_DEVICE_ID_INTEL_82443BX_0 0x7190 +#define PCI_DEVICE_ID_INTEL_82443BX_1 0x7191 +#define PCI_DEVICE_ID_INTEL_82443BX_2 0x7192 +#define PCI_DEVICE_ID_INTEL_440MX 0x7195 +#define PCI_DEVICE_ID_INTEL_440MX_6 0x7196 +#define PCI_DEVICE_ID_INTEL_82443MX_0 0x7198 +#define PCI_DEVICE_ID_INTEL_82443MX_1 0x7199 +#define PCI_DEVICE_ID_INTEL_82443MX_3 0x719b +#define PCI_DEVICE_ID_INTEL_82443GX_0 0x71a0 +#define PCI_DEVICE_ID_INTEL_82443GX_2 0x71a2 +#define PCI_DEVICE_ID_INTEL_82372FB_1 0x7601 +#define PCI_DEVICE_ID_INTEL_SCH_LPC 0x8119 +#define PCI_DEVICE_ID_INTEL_SCH_IDE 0x811a +#define PCI_DEVICE_ID_INTEL_E6XX_CU 0x8183 +#define PCI_DEVICE_ID_INTEL_ITC_LPC 0x8186 +#define PCI_DEVICE_ID_INTEL_82454GX 0x84c4 +#define PCI_DEVICE_ID_INTEL_82450GX 0x84c5 +#define PCI_DEVICE_ID_INTEL_82451NX 0x84ca +#define PCI_DEVICE_ID_INTEL_82454NX 0x84cb +#define PCI_DEVICE_ID_INTEL_84460GX 0x84ea +#define PCI_DEVICE_ID_INTEL_IXP4XX 0x8500 +#define PCI_DEVICE_ID_INTEL_IXP2800 0x9004 +#define PCI_DEVICE_ID_INTEL_VMD_9A0B 0x9a0b +#define PCI_DEVICE_ID_INTEL_S21152BB 0xb152 + +#define PCI_VENDOR_ID_SCALEMP 0x8686 +#define PCI_DEVICE_ID_SCALEMP_VSMP_CTL 0x1010 + +#define PCI_VENDOR_ID_COMPUTONE 0x8e0e +#define PCI_DEVICE_ID_COMPUTONE_PG 0x0302 +#define PCI_SUBVENDOR_ID_COMPUTONE 0x8e0e +#define PCI_SUBDEVICE_ID_COMPUTONE_PG4 0x0001 +#define PCI_SUBDEVICE_ID_COMPUTONE_PG8 0x0002 +#define PCI_SUBDEVICE_ID_COMPUTONE_PG6 0x0003 + +#define PCI_VENDOR_ID_KTI 0x8e2e + +#define PCI_VENDOR_ID_ADAPTEC 0x9004 +#define PCI_DEVICE_ID_ADAPTEC_7810 0x1078 +#define PCI_DEVICE_ID_ADAPTEC_7821 0x2178 +#define PCI_DEVICE_ID_ADAPTEC_38602 0x3860 +#define PCI_DEVICE_ID_ADAPTEC_7850 0x5078 +#define PCI_DEVICE_ID_ADAPTEC_7855 0x5578 +#define PCI_DEVICE_ID_ADAPTEC_3860 0x6038 +#define PCI_DEVICE_ID_ADAPTEC_1480A 0x6075 +#define PCI_DEVICE_ID_ADAPTEC_7860 0x6078 +#define PCI_DEVICE_ID_ADAPTEC_7861 0x6178 +#define PCI_DEVICE_ID_ADAPTEC_7870 0x7078 +#define PCI_DEVICE_ID_ADAPTEC_7871 0x7178 +#define PCI_DEVICE_ID_ADAPTEC_7872 0x7278 +#define PCI_DEVICE_ID_ADAPTEC_7873 0x7378 +#define PCI_DEVICE_ID_ADAPTEC_7874 0x7478 +#define PCI_DEVICE_ID_ADAPTEC_7895 0x7895 +#define PCI_DEVICE_ID_ADAPTEC_7880 0x8078 +#define PCI_DEVICE_ID_ADAPTEC_7881 0x8178 +#define PCI_DEVICE_ID_ADAPTEC_7882 0x8278 +#define PCI_DEVICE_ID_ADAPTEC_7883 0x8378 +#define PCI_DEVICE_ID_ADAPTEC_7884 0x8478 +#define PCI_DEVICE_ID_ADAPTEC_7885 0x8578 +#define PCI_DEVICE_ID_ADAPTEC_7886 0x8678 +#define PCI_DEVICE_ID_ADAPTEC_7887 0x8778 +#define PCI_DEVICE_ID_ADAPTEC_7888 0x8878 + +#define PCI_VENDOR_ID_ADAPTEC2 0x9005 +#define PCI_DEVICE_ID_ADAPTEC2_2940U2 0x0010 +#define PCI_DEVICE_ID_ADAPTEC2_2930U2 0x0011 +#define PCI_DEVICE_ID_ADAPTEC2_7890B 0x0013 +#define PCI_DEVICE_ID_ADAPTEC2_7890 0x001f +#define PCI_DEVICE_ID_ADAPTEC2_3940U2 0x0050 +#define PCI_DEVICE_ID_ADAPTEC2_3950U2D 0x0051 +#define PCI_DEVICE_ID_ADAPTEC2_7896 0x005f +#define PCI_DEVICE_ID_ADAPTEC2_7892A 0x0080 +#define PCI_DEVICE_ID_ADAPTEC2_7892B 0x0081 +#define PCI_DEVICE_ID_ADAPTEC2_7892D 0x0083 +#define PCI_DEVICE_ID_ADAPTEC2_7892P 0x008f +#define PCI_DEVICE_ID_ADAPTEC2_7899A 0x00c0 +#define PCI_DEVICE_ID_ADAPTEC2_7899B 0x00c1 +#define PCI_DEVICE_ID_ADAPTEC2_7899D 0x00c3 +#define PCI_DEVICE_ID_ADAPTEC2_7899P 0x00cf +#define PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN 0x0500 +#define PCI_DEVICE_ID_ADAPTEC2_SCAMP 0x0503 + +#define PCI_VENDOR_ID_HOLTEK 0x9412 +#define PCI_DEVICE_ID_HOLTEK_6565 0x6565 + +#define PCI_VENDOR_ID_NETMOS 0x9710 +#define PCI_DEVICE_ID_NETMOS_9705 0x9705 +#define PCI_DEVICE_ID_NETMOS_9715 0x9715 +#define PCI_DEVICE_ID_NETMOS_9735 0x9735 +#define PCI_DEVICE_ID_NETMOS_9745 0x9745 +#define PCI_DEVICE_ID_NETMOS_9755 0x9755 +#define PCI_DEVICE_ID_NETMOS_9805 0x9805 +#define PCI_DEVICE_ID_NETMOS_9815 0x9815 +#define PCI_DEVICE_ID_NETMOS_9835 0x9835 +#define PCI_DEVICE_ID_NETMOS_9845 0x9845 +#define PCI_DEVICE_ID_NETMOS_9855 0x9855 +#define PCI_DEVICE_ID_NETMOS_9865 0x9865 +#define PCI_DEVICE_ID_NETMOS_9900 0x9900 +#define PCI_DEVICE_ID_NETMOS_9901 0x9901 +#define PCI_DEVICE_ID_NETMOS_9904 0x9904 +#define PCI_DEVICE_ID_NETMOS_9912 0x9912 +#define PCI_DEVICE_ID_NETMOS_9922 0x9922 + +#define PCI_VENDOR_ID_3COM_2 0xa727 + +#define PCI_VENDOR_ID_DIGIUM 0xd161 +#define PCI_DEVICE_ID_DIGIUM_HFC4S 0xb410 + +#define PCI_SUBVENDOR_ID_EXSYS 0xd84d +#define PCI_SUBDEVICE_ID_EXSYS_4014 0x4014 +#define PCI_SUBDEVICE_ID_EXSYS_4055 0x4055 + +#define PCI_VENDOR_ID_TIGERJET 0xe159 +#define PCI_DEVICE_ID_TIGERJET_300 0x0001 +#define PCI_DEVICE_ID_TIGERJET_100 0x0002 + +#define PCI_VENDOR_ID_XILINX_RME 0xea60 +#define PCI_DEVICE_ID_RME_DIGI32 0x9896 +#define PCI_DEVICE_ID_RME_DIGI32_PRO 0x9897 +#define PCI_DEVICE_ID_RME_DIGI32_8 0x9898 + +#define PCI_VENDOR_ID_XEN 0x5853 +#define PCI_DEVICE_ID_XEN_PLATFORM 0x0001 + +#define PCI_VENDOR_ID_OCZ 0x1b85 + +#define PCI_VENDOR_ID_NCUBE 0x10ff + +#endif /* _LINUX_PCI_IDS_H */ diff --git a/include/linux/pda_power.h b/include/linux/pda_power.h new file mode 100644 index 000000000..2bb62bf29 --- /dev/null +++ b/include/linux/pda_power.h @@ -0,0 +1,42 @@ +/* + * Common power driver for PDAs and phones with one or two external + * power supplies (AC/USB) connected to main and backup batteries, + * and optional builtin charger. + * + * Copyright © 2007 Anton Vorontsov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __PDA_POWER_H__ +#define __PDA_POWER_H__ + +#define PDA_POWER_CHARGE_AC (1 << 0) +#define PDA_POWER_CHARGE_USB (1 << 1) + +struct device; + +struct pda_power_pdata { + int (*init)(struct device *dev); + int (*is_ac_online)(void); + int (*is_usb_online)(void); + void (*set_charge)(int flags); + void (*exit)(struct device *dev); + int (*suspend)(pm_message_t state); + int (*resume)(void); + + char **supplied_to; + size_t num_supplicants; + + unsigned int wait_for_status; /* msecs, default is 500 */ + unsigned int wait_for_charger; /* msecs, default is 500 */ + unsigned int polling_interval; /* msecs, default is 2000 */ + + unsigned long ac_max_uA; /* current to draw when on AC */ + + bool use_otg_notifier; +}; + +#endif /* __PDA_POWER_H__ */ diff --git a/include/linux/pe.h b/include/linux/pe.h new file mode 100644 index 000000000..143ce75be --- /dev/null +++ b/include/linux/pe.h @@ -0,0 +1,455 @@ +/* + * Copyright 2011 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * Author(s): Peter Jones + */ +#ifndef __LINUX_PE_H +#define __LINUX_PE_H + +#include + +#define MZ_MAGIC 0x5a4d /* "MZ" */ + +#define PE_MAGIC 0x00004550 /* "PE\0\0" */ +#define PE_OPT_MAGIC_PE32 0x010b +#define PE_OPT_MAGIC_PE32_ROM 0x0107 +#define PE_OPT_MAGIC_PE32PLUS 0x020b + +/* machine type */ +#define IMAGE_FILE_MACHINE_UNKNOWN 0x0000 +#define IMAGE_FILE_MACHINE_AM33 0x01d3 +#define IMAGE_FILE_MACHINE_AMD64 0x8664 +#define IMAGE_FILE_MACHINE_ARM 0x01c0 +#define IMAGE_FILE_MACHINE_ARMV7 0x01c4 +#define IMAGE_FILE_MACHINE_ARM64 0xaa64 +#define IMAGE_FILE_MACHINE_EBC 0x0ebc +#define IMAGE_FILE_MACHINE_I386 0x014c +#define IMAGE_FILE_MACHINE_IA64 0x0200 +#define IMAGE_FILE_MACHINE_M32R 0x9041 +#define IMAGE_FILE_MACHINE_MIPS16 0x0266 +#define IMAGE_FILE_MACHINE_MIPSFPU 0x0366 +#define IMAGE_FILE_MACHINE_MIPSFPU16 0x0466 +#define IMAGE_FILE_MACHINE_POWERPC 0x01f0 +#define IMAGE_FILE_MACHINE_POWERPCFP 0x01f1 +#define IMAGE_FILE_MACHINE_R4000 0x0166 +#define IMAGE_FILE_MACHINE_SH3 0x01a2 +#define IMAGE_FILE_MACHINE_SH3DSP 0x01a3 +#define IMAGE_FILE_MACHINE_SH3E 0x01a4 +#define IMAGE_FILE_MACHINE_SH4 0x01a6 +#define IMAGE_FILE_MACHINE_SH5 0x01a8 +#define IMAGE_FILE_MACHINE_THUMB 0x01c2 +#define IMAGE_FILE_MACHINE_WCEMIPSV2 0x0169 + +/* flags */ +#define IMAGE_FILE_RELOCS_STRIPPED 0x0001 +#define IMAGE_FILE_EXECUTABLE_IMAGE 0x0002 +#define IMAGE_FILE_LINE_NUMS_STRIPPED 0x0004 +#define IMAGE_FILE_LOCAL_SYMS_STRIPPED 0x0008 +#define IMAGE_FILE_AGGRESSIVE_WS_TRIM 0x0010 +#define IMAGE_FILE_LARGE_ADDRESS_AWARE 0x0020 +#define IMAGE_FILE_16BIT_MACHINE 0x0040 +#define IMAGE_FILE_BYTES_REVERSED_LO 0x0080 +#define IMAGE_FILE_32BIT_MACHINE 0x0100 +#define IMAGE_FILE_DEBUG_STRIPPED 0x0200 +#define IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP 0x0400 +#define IMAGE_FILE_NET_RUN_FROM_SWAP 0x0800 +#define IMAGE_FILE_SYSTEM 0x1000 +#define IMAGE_FILE_DLL 0x2000 +#define IMAGE_FILE_UP_SYSTEM_ONLY 0x4000 +#define IMAGE_FILE_BYTES_REVERSED_HI 0x8000 + +#define IMAGE_FILE_OPT_ROM_MAGIC 0x107 +#define IMAGE_FILE_OPT_PE32_MAGIC 0x10b +#define IMAGE_FILE_OPT_PE32_PLUS_MAGIC 0x20b + +#define IMAGE_SUBSYSTEM_UNKNOWN 0 +#define IMAGE_SUBSYSTEM_NATIVE 1 +#define IMAGE_SUBSYSTEM_WINDOWS_GUI 2 +#define IMAGE_SUBSYSTEM_WINDOWS_CUI 3 +#define IMAGE_SUBSYSTEM_POSIX_CUI 7 +#define IMAGE_SUBSYSTEM_WINDOWS_CE_GUI 9 +#define IMAGE_SUBSYSTEM_EFI_APPLICATION 10 +#define IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER 11 +#define IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER 12 +#define IMAGE_SUBSYSTEM_EFI_ROM_IMAGE 13 +#define IMAGE_SUBSYSTEM_XBOX 14 + +#define IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE 0x0040 +#define IMAGE_DLL_CHARACTERISTICS_FORCE_INTEGRITY 0x0080 +#define IMAGE_DLL_CHARACTERISTICS_NX_COMPAT 0x0100 +#define IMAGE_DLLCHARACTERISTICS_NO_ISOLATION 0x0200 +#define IMAGE_DLLCHARACTERISTICS_NO_SEH 0x0400 +#define IMAGE_DLLCHARACTERISTICS_NO_BIND 0x0800 +#define IMAGE_DLLCHARACTERISTICS_WDM_DRIVER 0x2000 +#define IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE 0x8000 + +/* they actually defined 0x00000000 as well, but I think we'll skip that one. */ +#define IMAGE_SCN_RESERVED_0 0x00000001 +#define IMAGE_SCN_RESERVED_1 0x00000002 +#define IMAGE_SCN_RESERVED_2 0x00000004 +#define IMAGE_SCN_TYPE_NO_PAD 0x00000008 /* don't pad - obsolete */ +#define IMAGE_SCN_RESERVED_3 0x00000010 +#define IMAGE_SCN_CNT_CODE 0x00000020 /* .text */ +#define IMAGE_SCN_CNT_INITIALIZED_DATA 0x00000040 /* .data */ +#define IMAGE_SCN_CNT_UNINITIALIZED_DATA 0x00000080 /* .bss */ +#define IMAGE_SCN_LNK_OTHER 0x00000100 /* reserved */ +#define IMAGE_SCN_LNK_INFO 0x00000200 /* .drectve comments */ +#define IMAGE_SCN_RESERVED_4 0x00000400 +#define IMAGE_SCN_LNK_REMOVE 0x00000800 /* .o only - scn to be rm'd*/ +#define IMAGE_SCN_LNK_COMDAT 0x00001000 /* .o only - COMDAT data */ +#define IMAGE_SCN_RESERVED_5 0x00002000 /* spec omits this */ +#define IMAGE_SCN_RESERVED_6 0x00004000 /* spec omits this */ +#define IMAGE_SCN_GPREL 0x00008000 /* global pointer referenced data */ +/* spec lists 0x20000 twice, I suspect they meant 0x10000 for one of them */ +#define IMAGE_SCN_MEM_PURGEABLE 0x00010000 /* reserved for "future" use */ +#define IMAGE_SCN_16BIT 0x00020000 /* reserved for "future" use */ +#define IMAGE_SCN_LOCKED 0x00040000 /* reserved for "future" use */ +#define IMAGE_SCN_PRELOAD 0x00080000 /* reserved for "future" use */ +/* and here they just stuck a 1-byte integer in the middle of a bitfield */ +#define IMAGE_SCN_ALIGN_1BYTES 0x00100000 /* it does what it says on the box */ +#define IMAGE_SCN_ALIGN_2BYTES 0x00200000 +#define IMAGE_SCN_ALIGN_4BYTES 0x00300000 +#define IMAGE_SCN_ALIGN_8BYTES 0x00400000 +#define IMAGE_SCN_ALIGN_16BYTES 0x00500000 +#define IMAGE_SCN_ALIGN_32BYTES 0x00600000 +#define IMAGE_SCN_ALIGN_64BYTES 0x00700000 +#define IMAGE_SCN_ALIGN_128BYTES 0x00800000 +#define IMAGE_SCN_ALIGN_256BYTES 0x00900000 +#define IMAGE_SCN_ALIGN_512BYTES 0x00a00000 +#define IMAGE_SCN_ALIGN_1024BYTES 0x00b00000 +#define IMAGE_SCN_ALIGN_2048BYTES 0x00c00000 +#define IMAGE_SCN_ALIGN_4096BYTES 0x00d00000 +#define IMAGE_SCN_ALIGN_8192BYTES 0x00e00000 +#define IMAGE_SCN_LNK_NRELOC_OVFL 0x01000000 /* extended relocations */ +#define IMAGE_SCN_MEM_DISCARDABLE 0x02000000 /* scn can be discarded */ +#define IMAGE_SCN_MEM_NOT_CACHED 0x04000000 /* cannot be cached */ +#define IMAGE_SCN_MEM_NOT_PAGED 0x08000000 /* not pageable */ +#define IMAGE_SCN_MEM_SHARED 0x10000000 /* can be shared */ +#define IMAGE_SCN_MEM_EXECUTE 0x20000000 /* can be executed as code */ +#define IMAGE_SCN_MEM_READ 0x40000000 /* readable */ +#define IMAGE_SCN_MEM_WRITE 0x80000000 /* writeable */ + +#define IMAGE_DEBUG_TYPE_CODEVIEW 2 + +#ifndef __ASSEMBLY__ + +struct mz_hdr { + uint16_t magic; /* MZ_MAGIC */ + uint16_t lbsize; /* size of last used block */ + uint16_t blocks; /* pages in file, 0x3 */ + uint16_t relocs; /* relocations */ + uint16_t hdrsize; /* header size in "paragraphs" */ + uint16_t min_extra_pps; /* .bss */ + uint16_t max_extra_pps; /* runtime limit for the arena size */ + uint16_t ss; /* relative stack segment */ + uint16_t sp; /* initial %sp register */ + uint16_t checksum; /* word checksum */ + uint16_t ip; /* initial %ip register */ + uint16_t cs; /* initial %cs relative to load segment */ + uint16_t reloc_table_offset; /* offset of the first relocation */ + uint16_t overlay_num; /* overlay number. set to 0. */ + uint16_t reserved0[4]; /* reserved */ + uint16_t oem_id; /* oem identifier */ + uint16_t oem_info; /* oem specific */ + uint16_t reserved1[10]; /* reserved */ + uint32_t peaddr; /* address of pe header */ + char message[64]; /* message to print */ +}; + +struct mz_reloc { + uint16_t offset; + uint16_t segment; +}; + +struct pe_hdr { + uint32_t magic; /* PE magic */ + uint16_t machine; /* machine type */ + uint16_t sections; /* number of sections */ + uint32_t timestamp; /* time_t */ + uint32_t symbol_table; /* symbol table offset */ + uint32_t symbols; /* number of symbols */ + uint16_t opt_hdr_size; /* size of optional header */ + uint16_t flags; /* flags */ +}; + +/* the fact that pe32 isn't padded where pe32+ is 64-bit means union won't + * work right. vomit. */ +struct pe32_opt_hdr { + /* "standard" header */ + uint16_t magic; /* file type */ + uint8_t ld_major; /* linker major version */ + uint8_t ld_minor; /* linker minor version */ + uint32_t text_size; /* size of text section(s) */ + uint32_t data_size; /* size of data section(s) */ + uint32_t bss_size; /* size of bss section(s) */ + uint32_t entry_point; /* file offset of entry point */ + uint32_t code_base; /* relative code addr in ram */ + uint32_t data_base; /* relative data addr in ram */ + /* "windows" header */ + uint32_t image_base; /* preferred load address */ + uint32_t section_align; /* alignment in bytes */ + uint32_t file_align; /* file alignment in bytes */ + uint16_t os_major; /* major OS version */ + uint16_t os_minor; /* minor OS version */ + uint16_t image_major; /* major image version */ + uint16_t image_minor; /* minor image version */ + uint16_t subsys_major; /* major subsystem version */ + uint16_t subsys_minor; /* minor subsystem version */ + uint32_t win32_version; /* reserved, must be 0 */ + uint32_t image_size; /* image size */ + uint32_t header_size; /* header size rounded up to + file_align */ + uint32_t csum; /* checksum */ + uint16_t subsys; /* subsystem */ + uint16_t dll_flags; /* more flags! */ + uint32_t stack_size_req;/* amt of stack requested */ + uint32_t stack_size; /* amt of stack required */ + uint32_t heap_size_req; /* amt of heap requested */ + uint32_t heap_size; /* amt of heap required */ + uint32_t loader_flags; /* reserved, must be 0 */ + uint32_t data_dirs; /* number of data dir entries */ +}; + +struct pe32plus_opt_hdr { + uint16_t magic; /* file type */ + uint8_t ld_major; /* linker major version */ + uint8_t ld_minor; /* linker minor version */ + uint32_t text_size; /* size of text section(s) */ + uint32_t data_size; /* size of data section(s) */ + uint32_t bss_size; /* size of bss section(s) */ + uint32_t entry_point; /* file offset of entry point */ + uint32_t code_base; /* relative code addr in ram */ + /* "windows" header */ + uint64_t image_base; /* preferred load address */ + uint32_t section_align; /* alignment in bytes */ + uint32_t file_align; /* file alignment in bytes */ + uint16_t os_major; /* major OS version */ + uint16_t os_minor; /* minor OS version */ + uint16_t image_major; /* major image version */ + uint16_t image_minor; /* minor image version */ + uint16_t subsys_major; /* major subsystem version */ + uint16_t subsys_minor; /* minor subsystem version */ + uint32_t win32_version; /* reserved, must be 0 */ + uint32_t image_size; /* image size */ + uint32_t header_size; /* header size rounded up to + file_align */ + uint32_t csum; /* checksum */ + uint16_t subsys; /* subsystem */ + uint16_t dll_flags; /* more flags! */ + uint64_t stack_size_req;/* amt of stack requested */ + uint64_t stack_size; /* amt of stack required */ + uint64_t heap_size_req; /* amt of heap requested */ + uint64_t heap_size; /* amt of heap required */ + uint32_t loader_flags; /* reserved, must be 0 */ + uint32_t data_dirs; /* number of data dir entries */ +}; + +struct data_dirent { + uint32_t virtual_address; /* relative to load address */ + uint32_t size; +}; + +struct data_directory { + struct data_dirent exports; /* .edata */ + struct data_dirent imports; /* .idata */ + struct data_dirent resources; /* .rsrc */ + struct data_dirent exceptions; /* .pdata */ + struct data_dirent certs; /* certs */ + struct data_dirent base_relocations; /* .reloc */ + struct data_dirent debug; /* .debug */ + struct data_dirent arch; /* reservered */ + struct data_dirent global_ptr; /* global pointer reg. Size=0 */ + struct data_dirent tls; /* .tls */ + struct data_dirent load_config; /* load configuration structure */ + struct data_dirent bound_imports; /* no idea */ + struct data_dirent import_addrs; /* import address table */ + struct data_dirent delay_imports; /* delay-load import table */ + struct data_dirent clr_runtime_hdr; /* .cor (object only) */ + struct data_dirent reserved; +}; + +struct section_header { + char name[8]; /* name or "/12\0" string tbl offset */ + uint32_t virtual_size; /* size of loaded section in ram */ + uint32_t virtual_address; /* relative virtual address */ + uint32_t raw_data_size; /* size of the section */ + uint32_t data_addr; /* file pointer to first page of sec */ + uint32_t relocs; /* file pointer to relocation entries */ + uint32_t line_numbers; /* line numbers! */ + uint16_t num_relocs; /* number of relocations */ + uint16_t num_lin_numbers; /* srsly. */ + uint32_t flags; +}; + +enum x64_coff_reloc_type { + IMAGE_REL_AMD64_ABSOLUTE = 0, + IMAGE_REL_AMD64_ADDR64, + IMAGE_REL_AMD64_ADDR32, + IMAGE_REL_AMD64_ADDR32N, + IMAGE_REL_AMD64_REL32, + IMAGE_REL_AMD64_REL32_1, + IMAGE_REL_AMD64_REL32_2, + IMAGE_REL_AMD64_REL32_3, + IMAGE_REL_AMD64_REL32_4, + IMAGE_REL_AMD64_REL32_5, + IMAGE_REL_AMD64_SECTION, + IMAGE_REL_AMD64_SECREL, + IMAGE_REL_AMD64_SECREL7, + IMAGE_REL_AMD64_TOKEN, + IMAGE_REL_AMD64_SREL32, + IMAGE_REL_AMD64_PAIR, + IMAGE_REL_AMD64_SSPAN32, +}; + +enum arm_coff_reloc_type { + IMAGE_REL_ARM_ABSOLUTE, + IMAGE_REL_ARM_ADDR32, + IMAGE_REL_ARM_ADDR32N, + IMAGE_REL_ARM_BRANCH2, + IMAGE_REL_ARM_BRANCH1, + IMAGE_REL_ARM_SECTION, + IMAGE_REL_ARM_SECREL, +}; + +enum sh_coff_reloc_type { + IMAGE_REL_SH3_ABSOLUTE, + IMAGE_REL_SH3_DIRECT16, + IMAGE_REL_SH3_DIRECT32, + IMAGE_REL_SH3_DIRECT8, + IMAGE_REL_SH3_DIRECT8_WORD, + IMAGE_REL_SH3_DIRECT8_LONG, + IMAGE_REL_SH3_DIRECT4, + IMAGE_REL_SH3_DIRECT4_WORD, + IMAGE_REL_SH3_DIRECT4_LONG, + IMAGE_REL_SH3_PCREL8_WORD, + IMAGE_REL_SH3_PCREL8_LONG, + IMAGE_REL_SH3_PCREL12_WORD, + IMAGE_REL_SH3_STARTOF_SECTION, + IMAGE_REL_SH3_SIZEOF_SECTION, + IMAGE_REL_SH3_SECTION, + IMAGE_REL_SH3_SECREL, + IMAGE_REL_SH3_DIRECT32_NB, + IMAGE_REL_SH3_GPREL4_LONG, + IMAGE_REL_SH3_TOKEN, + IMAGE_REL_SHM_PCRELPT, + IMAGE_REL_SHM_REFLO, + IMAGE_REL_SHM_REFHALF, + IMAGE_REL_SHM_RELLO, + IMAGE_REL_SHM_RELHALF, + IMAGE_REL_SHM_PAIR, + IMAGE_REL_SHM_NOMODE, +}; + +enum ppc_coff_reloc_type { + IMAGE_REL_PPC_ABSOLUTE, + IMAGE_REL_PPC_ADDR64, + IMAGE_REL_PPC_ADDR32, + IMAGE_REL_PPC_ADDR24, + IMAGE_REL_PPC_ADDR16, + IMAGE_REL_PPC_ADDR14, + IMAGE_REL_PPC_REL24, + IMAGE_REL_PPC_REL14, + IMAGE_REL_PPC_ADDR32N, + IMAGE_REL_PPC_SECREL, + IMAGE_REL_PPC_SECTION, + IMAGE_REL_PPC_SECREL16, + IMAGE_REL_PPC_REFHI, + IMAGE_REL_PPC_REFLO, + IMAGE_REL_PPC_PAIR, + IMAGE_REL_PPC_SECRELLO, + IMAGE_REL_PPC_GPREL, + IMAGE_REL_PPC_TOKEN, +}; + +enum x86_coff_reloc_type { + IMAGE_REL_I386_ABSOLUTE, + IMAGE_REL_I386_DIR16, + IMAGE_REL_I386_REL16, + IMAGE_REL_I386_DIR32, + IMAGE_REL_I386_DIR32NB, + IMAGE_REL_I386_SEG12, + IMAGE_REL_I386_SECTION, + IMAGE_REL_I386_SECREL, + IMAGE_REL_I386_TOKEN, + IMAGE_REL_I386_SECREL7, + IMAGE_REL_I386_REL32, +}; + +enum ia64_coff_reloc_type { + IMAGE_REL_IA64_ABSOLUTE, + IMAGE_REL_IA64_IMM14, + IMAGE_REL_IA64_IMM22, + IMAGE_REL_IA64_IMM64, + IMAGE_REL_IA64_DIR32, + IMAGE_REL_IA64_DIR64, + IMAGE_REL_IA64_PCREL21B, + IMAGE_REL_IA64_PCREL21M, + IMAGE_REL_IA64_PCREL21F, + IMAGE_REL_IA64_GPREL22, + IMAGE_REL_IA64_LTOFF22, + IMAGE_REL_IA64_SECTION, + IMAGE_REL_IA64_SECREL22, + IMAGE_REL_IA64_SECREL64I, + IMAGE_REL_IA64_SECREL32, + IMAGE_REL_IA64_DIR32NB, + IMAGE_REL_IA64_SREL14, + IMAGE_REL_IA64_SREL22, + IMAGE_REL_IA64_SREL32, + IMAGE_REL_IA64_UREL32, + IMAGE_REL_IA64_PCREL60X, + IMAGE_REL_IA64_PCREL60B, + IMAGE_REL_IA64_PCREL60F, + IMAGE_REL_IA64_PCREL60I, + IMAGE_REL_IA64_PCREL60M, + IMAGE_REL_IA64_IMMGPREL6, + IMAGE_REL_IA64_TOKEN, + IMAGE_REL_IA64_GPREL32, + IMAGE_REL_IA64_ADDEND, +}; + +struct coff_reloc { + uint32_t virtual_address; + uint32_t symbol_table_index; + union { + enum x64_coff_reloc_type x64_type; + enum arm_coff_reloc_type arm_type; + enum sh_coff_reloc_type sh_type; + enum ppc_coff_reloc_type ppc_type; + enum x86_coff_reloc_type x86_type; + enum ia64_coff_reloc_type ia64_type; + uint16_t data; + }; +}; + +/* + * Definitions for the contents of the certs data block + */ +#define WIN_CERT_TYPE_PKCS_SIGNED_DATA 0x0002 +#define WIN_CERT_TYPE_EFI_OKCS115 0x0EF0 +#define WIN_CERT_TYPE_EFI_GUID 0x0EF1 + +#define WIN_CERT_REVISION_1_0 0x0100 +#define WIN_CERT_REVISION_2_0 0x0200 + +struct win_certificate { + uint32_t length; + uint16_t revision; + uint16_t cert_type; +}; + +#endif /* !__ASSEMBLY__ */ + +#endif /* __LINUX_PE_H */ diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h new file mode 100644 index 000000000..90b8ce813 --- /dev/null +++ b/include/linux/percpu-defs.h @@ -0,0 +1,528 @@ +/* + * linux/percpu-defs.h - basic definitions for percpu areas + * + * DO NOT INCLUDE DIRECTLY OUTSIDE PERCPU IMPLEMENTATION PROPER. + * + * This file is separate from linux/percpu.h to avoid cyclic inclusion + * dependency from arch header files. Only to be included from + * asm/percpu.h. + * + * This file includes macros necessary to declare percpu sections and + * variables, and definitions of percpu accessors and operations. It + * should provide enough percpu features to arch header files even when + * they can only include asm/percpu.h to avoid cyclic inclusion dependency. + */ + +#ifndef _LINUX_PERCPU_DEFS_H +#define _LINUX_PERCPU_DEFS_H + +#ifdef CONFIG_SMP + +#ifdef MODULE +#define PER_CPU_SHARED_ALIGNED_SECTION "" +#define PER_CPU_ALIGNED_SECTION "" +#else +#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned" +#define PER_CPU_ALIGNED_SECTION "..shared_aligned" +#endif +#define PER_CPU_FIRST_SECTION "..first" + +#else + +#define PER_CPU_SHARED_ALIGNED_SECTION "" +#define PER_CPU_ALIGNED_SECTION "..shared_aligned" +#define PER_CPU_FIRST_SECTION "" + +#endif + +/* + * Base implementations of per-CPU variable declarations and definitions, where + * the section in which the variable is to be placed is provided by the + * 'sec' argument. This may be used to affect the parameters governing the + * variable's storage. + * + * NOTE! The sections for the DECLARE and for the DEFINE must match, lest + * linkage errors occur due the compiler generating the wrong code to access + * that section. + */ +#define __PCPU_ATTRS(sec) \ + __percpu __attribute__((section(PER_CPU_BASE_SECTION sec))) \ + PER_CPU_ATTRIBUTES + +#define __PCPU_DUMMY_ATTRS \ + __attribute__((section(".discard"), unused)) + +/* + * s390 and alpha modules require percpu variables to be defined as + * weak to force the compiler to generate GOT based external + * references for them. This is necessary because percpu sections + * will be located outside of the usually addressable area. + * + * This definition puts the following two extra restrictions when + * defining percpu variables. + * + * 1. The symbol must be globally unique, even the static ones. + * 2. Static percpu variables cannot be defined inside a function. + * + * Archs which need weak percpu definitions should define + * ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary. + * + * To ensure that the generic code observes the above two + * restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak + * definition is used for all cases. + */ +#if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU) +/* + * __pcpu_scope_* dummy variable is used to enforce scope. It + * receives the static modifier when it's used in front of + * DEFINE_PER_CPU() and will trigger build failure if + * DECLARE_PER_CPU() is used for the same variable. + * + * __pcpu_unique_* dummy variable is used to enforce symbol uniqueness + * such that hidden weak symbol collision, which will cause unrelated + * variables to share the same address, can be detected during build. + */ +#define DECLARE_PER_CPU_SECTION(type, name, sec) \ + extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ + extern __PCPU_ATTRS(sec) __typeof__(type) name + +#define DEFINE_PER_CPU_SECTION(type, name, sec) \ + __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ + extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ + __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ + extern __PCPU_ATTRS(sec) __typeof__(type) name; \ + __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ + __typeof__(type) name +#else +/* + * Normal declaration and definition macros. + */ +#define DECLARE_PER_CPU_SECTION(type, name, sec) \ + extern __PCPU_ATTRS(sec) __typeof__(type) name + +#define DEFINE_PER_CPU_SECTION(type, name, sec) \ + __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \ + __typeof__(type) name +#endif + +/* + * Variant on the per-CPU variable declaration/definition theme used for + * ordinary per-CPU variables. + */ +#define DECLARE_PER_CPU(type, name) \ + DECLARE_PER_CPU_SECTION(type, name, "") + +#define DEFINE_PER_CPU(type, name) \ + DEFINE_PER_CPU_SECTION(type, name, "") + +/* + * Declaration/definition used for per-CPU variables that must come first in + * the set of variables. + */ +#define DECLARE_PER_CPU_FIRST(type, name) \ + DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) + +#define DEFINE_PER_CPU_FIRST(type, name) \ + DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) + +/* + * Declaration/definition used for per-CPU variables that must be cacheline + * aligned under SMP conditions so that, whilst a particular instance of the + * data corresponds to a particular CPU, inefficiencies due to direct access by + * other CPUs are reduced by preventing the data from unnecessarily spanning + * cachelines. + * + * An example of this would be statistical data, where each CPU's set of data + * is updated by that CPU alone, but the data from across all CPUs is collated + * by a CPU processing a read from a proc file. + */ +#define DECLARE_PER_CPU_SHARED_ALIGNED(type, name) \ + DECLARE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ + ____cacheline_aligned_in_smp + +#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ + DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ + ____cacheline_aligned_in_smp + +#define DECLARE_PER_CPU_ALIGNED(type, name) \ + DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \ + ____cacheline_aligned + +#define DEFINE_PER_CPU_ALIGNED(type, name) \ + DEFINE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \ + ____cacheline_aligned + +/* + * Declaration/definition used for per-CPU variables that must be page aligned. + */ +#define DECLARE_PER_CPU_PAGE_ALIGNED(type, name) \ + DECLARE_PER_CPU_SECTION(type, name, "..page_aligned") \ + __aligned(PAGE_SIZE) + +#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ + DEFINE_PER_CPU_SECTION(type, name, "..page_aligned") \ + __aligned(PAGE_SIZE) + +/* + * Declaration/definition used for per-CPU variables that must be read mostly. + */ +#define DECLARE_PER_CPU_READ_MOSTLY(type, name) \ + DECLARE_PER_CPU_SECTION(type, name, "..read_mostly") + +#define DEFINE_PER_CPU_READ_MOSTLY(type, name) \ + DEFINE_PER_CPU_SECTION(type, name, "..read_mostly") + +/* + * Declaration/definition used for per-CPU variables that should be accessed + * as decrypted when memory encryption is enabled in the guest. + */ +#ifdef CONFIG_AMD_MEM_ENCRYPT +#define DECLARE_PER_CPU_DECRYPTED(type, name) \ + DECLARE_PER_CPU_SECTION(type, name, "..decrypted") + +#define DEFINE_PER_CPU_DECRYPTED(type, name) \ + DEFINE_PER_CPU_SECTION(type, name, "..decrypted") +#else +#define DEFINE_PER_CPU_DECRYPTED(type, name) DEFINE_PER_CPU(type, name) +#endif + +/* + * Intermodule exports for per-CPU variables. sparse forgets about + * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to + * noop if __CHECKER__. + */ +#ifndef __CHECKER__ +#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var) +#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var) +#else +#define EXPORT_PER_CPU_SYMBOL(var) +#define EXPORT_PER_CPU_SYMBOL_GPL(var) +#endif + +/* + * Accessors and operations. + */ +#ifndef __ASSEMBLY__ + +/* + * __verify_pcpu_ptr() verifies @ptr is a percpu pointer without evaluating + * @ptr and is invoked once before a percpu area is accessed by all + * accessors and operations. This is performed in the generic part of + * percpu and arch overrides don't need to worry about it; however, if an + * arch wants to implement an arch-specific percpu accessor or operation, + * it may use __verify_pcpu_ptr() to verify the parameters. + * + * + 0 is required in order to convert the pointer type from a + * potential array type to a pointer to a single item of the array. + */ +#define __verify_pcpu_ptr(ptr) \ +do { \ + const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \ + (void)__vpp_verify; \ +} while (0) + +#ifdef CONFIG_SMP + +/* + * Add an offset to a pointer but keep the pointer as-is. Use RELOC_HIDE() + * to prevent the compiler from making incorrect assumptions about the + * pointer value. The weird cast keeps both GCC and sparse happy. + */ +#define SHIFT_PERCPU_PTR(__p, __offset) \ + RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)) + +#define per_cpu_ptr(ptr, cpu) \ +({ \ + __verify_pcpu_ptr(ptr); \ + SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))); \ +}) + +#define raw_cpu_ptr(ptr) \ +({ \ + __verify_pcpu_ptr(ptr); \ + arch_raw_cpu_ptr(ptr); \ +}) + +#ifdef CONFIG_DEBUG_PREEMPT +#define this_cpu_ptr(ptr) \ +({ \ + __verify_pcpu_ptr(ptr); \ + SHIFT_PERCPU_PTR(ptr, my_cpu_offset); \ +}) +#else +#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) +#endif + +#else /* CONFIG_SMP */ + +#define VERIFY_PERCPU_PTR(__p) \ +({ \ + __verify_pcpu_ptr(__p); \ + (typeof(*(__p)) __kernel __force *)(__p); \ +}) + +#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); }) +#define raw_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) +#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) + +#endif /* CONFIG_SMP */ + +#define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu)) + +/* + * Must be an lvalue. Since @var must be a simple identifier, + * we force a syntax error here if it isn't. + */ +#define get_cpu_var(var) \ +(*({ \ + preempt_disable(); \ + this_cpu_ptr(&var); \ +})) + +/* + * The weird & is necessary because sparse considers (void)(var) to be + * a direct dereference of percpu variable (var). + */ +#define put_cpu_var(var) \ +do { \ + (void)&(var); \ + preempt_enable(); \ +} while (0) + +#define get_cpu_ptr(var) \ +({ \ + preempt_disable(); \ + this_cpu_ptr(var); \ +}) + +#define put_cpu_ptr(var) \ +do { \ + (void)(var); \ + preempt_enable(); \ +} while (0) + +/* + * Branching function to split up a function into a set of functions that + * are called for different scalar sizes of the objects handled. + */ + +extern void __bad_size_call_parameter(void); + +#ifdef CONFIG_DEBUG_PREEMPT +extern void __this_cpu_preempt_check(const char *op); +#else +static inline void __this_cpu_preempt_check(const char *op) { } +#endif + +#define __pcpu_size_call_return(stem, variable) \ +({ \ + typeof(variable) pscr_ret__; \ + __verify_pcpu_ptr(&(variable)); \ + switch(sizeof(variable)) { \ + case 1: pscr_ret__ = stem##1(variable); break; \ + case 2: pscr_ret__ = stem##2(variable); break; \ + case 4: pscr_ret__ = stem##4(variable); break; \ + case 8: pscr_ret__ = stem##8(variable); break; \ + default: \ + __bad_size_call_parameter(); break; \ + } \ + pscr_ret__; \ +}) + +#define __pcpu_size_call_return2(stem, variable, ...) \ +({ \ + typeof(variable) pscr2_ret__; \ + __verify_pcpu_ptr(&(variable)); \ + switch(sizeof(variable)) { \ + case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ + case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \ + case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \ + case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \ + default: \ + __bad_size_call_parameter(); break; \ + } \ + pscr2_ret__; \ +}) + +/* + * Special handling for cmpxchg_double. cmpxchg_double is passed two + * percpu variables. The first has to be aligned to a double word + * boundary and the second has to follow directly thereafter. + * We enforce this on all architectures even if they don't support + * a double cmpxchg instruction, since it's a cheap requirement, and it + * avoids breaking the requirement for architectures with the instruction. + */ +#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \ +({ \ + bool pdcrb_ret__; \ + __verify_pcpu_ptr(&(pcp1)); \ + BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \ + VM_BUG_ON((unsigned long)(&(pcp1)) % (2 * sizeof(pcp1))); \ + VM_BUG_ON((unsigned long)(&(pcp2)) != \ + (unsigned long)(&(pcp1)) + sizeof(pcp1)); \ + switch(sizeof(pcp1)) { \ + case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \ + case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \ + case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \ + case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \ + default: \ + __bad_size_call_parameter(); break; \ + } \ + pdcrb_ret__; \ +}) + +#define __pcpu_size_call(stem, variable, ...) \ +do { \ + __verify_pcpu_ptr(&(variable)); \ + switch(sizeof(variable)) { \ + case 1: stem##1(variable, __VA_ARGS__);break; \ + case 2: stem##2(variable, __VA_ARGS__);break; \ + case 4: stem##4(variable, __VA_ARGS__);break; \ + case 8: stem##8(variable, __VA_ARGS__);break; \ + default: \ + __bad_size_call_parameter();break; \ + } \ +} while (0) + +/* + * this_cpu operations (C) 2008-2013 Christoph Lameter + * + * Optimized manipulation for memory allocated through the per cpu + * allocator or for addresses of per cpu variables. + * + * These operation guarantee exclusivity of access for other operations + * on the *same* processor. The assumption is that per cpu data is only + * accessed by a single processor instance (the current one). + * + * The arch code can provide optimized implementation by defining macros + * for certain scalar sizes. F.e. provide this_cpu_add_2() to provide per + * cpu atomic operations for 2 byte sized RMW actions. If arch code does + * not provide operations for a scalar size then the fallback in the + * generic code will be used. + * + * cmpxchg_double replaces two adjacent scalars at once. The first two + * parameters are per cpu variables which have to be of the same size. A + * truth value is returned to indicate success or failure (since a double + * register result is difficult to handle). There is very limited hardware + * support for these operations, so only certain sizes may work. + */ + +/* + * Operations for contexts where we do not want to do any checks for + * preemptions. Unless strictly necessary, always use [__]this_cpu_*() + * instead. + * + * If there is no other protection through preempt disable and/or disabling + * interupts then one of these RMW operations can show unexpected behavior + * because the execution thread was rescheduled on another processor or an + * interrupt occurred and the same percpu variable was modified from the + * interrupt context. + */ +#define raw_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, pcp) +#define raw_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, pcp, val) +#define raw_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, pcp, val) +#define raw_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, pcp, val) +#define raw_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, pcp, val) +#define raw_cpu_add_return(pcp, val) __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val) +#define raw_cpu_xchg(pcp, nval) __pcpu_size_call_return2(raw_cpu_xchg_, pcp, nval) +#define raw_cpu_cmpxchg(pcp, oval, nval) \ + __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval) +#define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2) + +#define raw_cpu_sub(pcp, val) raw_cpu_add(pcp, -(val)) +#define raw_cpu_inc(pcp) raw_cpu_add(pcp, 1) +#define raw_cpu_dec(pcp) raw_cpu_sub(pcp, 1) +#define raw_cpu_sub_return(pcp, val) raw_cpu_add_return(pcp, -(typeof(pcp))(val)) +#define raw_cpu_inc_return(pcp) raw_cpu_add_return(pcp, 1) +#define raw_cpu_dec_return(pcp) raw_cpu_add_return(pcp, -1) + +/* + * Operations for contexts that are safe from preemption/interrupts. These + * operations verify that preemption is disabled. + */ +#define __this_cpu_read(pcp) \ +({ \ + __this_cpu_preempt_check("read"); \ + raw_cpu_read(pcp); \ +}) + +#define __this_cpu_write(pcp, val) \ +({ \ + __this_cpu_preempt_check("write"); \ + raw_cpu_write(pcp, val); \ +}) + +#define __this_cpu_add(pcp, val) \ +({ \ + __this_cpu_preempt_check("add"); \ + raw_cpu_add(pcp, val); \ +}) + +#define __this_cpu_and(pcp, val) \ +({ \ + __this_cpu_preempt_check("and"); \ + raw_cpu_and(pcp, val); \ +}) + +#define __this_cpu_or(pcp, val) \ +({ \ + __this_cpu_preempt_check("or"); \ + raw_cpu_or(pcp, val); \ +}) + +#define __this_cpu_add_return(pcp, val) \ +({ \ + __this_cpu_preempt_check("add_return"); \ + raw_cpu_add_return(pcp, val); \ +}) + +#define __this_cpu_xchg(pcp, nval) \ +({ \ + __this_cpu_preempt_check("xchg"); \ + raw_cpu_xchg(pcp, nval); \ +}) + +#define __this_cpu_cmpxchg(pcp, oval, nval) \ +({ \ + __this_cpu_preempt_check("cmpxchg"); \ + raw_cpu_cmpxchg(pcp, oval, nval); \ +}) + +#define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ +({ __this_cpu_preempt_check("cmpxchg_double"); \ + raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2); \ +}) + +#define __this_cpu_sub(pcp, val) __this_cpu_add(pcp, -(typeof(pcp))(val)) +#define __this_cpu_inc(pcp) __this_cpu_add(pcp, 1) +#define __this_cpu_dec(pcp) __this_cpu_sub(pcp, 1) +#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val)) +#define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1) +#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) + +/* + * Operations with implied preemption/interrupt protection. These + * operations can be used without worrying about preemption or interrupt. + */ +#define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, pcp) +#define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, pcp, val) +#define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, pcp, val) +#define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, pcp, val) +#define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, pcp, val) +#define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) +#define this_cpu_xchg(pcp, nval) __pcpu_size_call_return2(this_cpu_xchg_, pcp, nval) +#define this_cpu_cmpxchg(pcp, oval, nval) \ + __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) +#define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2) + +#define this_cpu_sub(pcp, val) this_cpu_add(pcp, -(typeof(pcp))(val)) +#define this_cpu_inc(pcp) this_cpu_add(pcp, 1) +#define this_cpu_dec(pcp) this_cpu_sub(pcp, 1) +#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val)) +#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) +#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) + +#endif /* __ASSEMBLY__ */ +#endif /* _LINUX_PERCPU_DEFS_H */ diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h new file mode 100644 index 000000000..009cdf3d6 --- /dev/null +++ b/include/linux/percpu-refcount.h @@ -0,0 +1,334 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Percpu refcounts: + * (C) 2012 Google, Inc. + * Author: Kent Overstreet + * + * This implements a refcount with similar semantics to atomic_t - atomic_inc(), + * atomic_dec_and_test() - but percpu. + * + * There's one important difference between percpu refs and normal atomic_t + * refcounts; you have to keep track of your initial refcount, and then when you + * start shutting down you call percpu_ref_kill() _before_ dropping the initial + * refcount. + * + * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less + * than an atomic_t - this is because of the way shutdown works, see + * percpu_ref_kill()/PERCPU_COUNT_BIAS. + * + * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the + * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill() + * puts the ref back in single atomic_t mode, collecting the per cpu refs and + * issuing the appropriate barriers, and then marks the ref as shutting down so + * that percpu_ref_put() will check for the ref hitting 0. After it returns, + * it's safe to drop the initial ref. + * + * USAGE: + * + * See fs/aio.c for some example usage; it's used there for struct kioctx, which + * is created when userspaces calls io_setup(), and destroyed when userspace + * calls io_destroy() or the process exits. + * + * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it + * removes the kioctx from the proccess's table of kioctxs and kills percpu_ref. + * After that, there can't be any new users of the kioctx (from lookup_ioctx()) + * and it's then safe to drop the initial ref with percpu_ref_put(). + * + * Note that the free path, free_ioctx(), needs to go through explicit call_rcu() + * to synchronize with RCU protected lookup_ioctx(). percpu_ref operations don't + * imply RCU grace periods of any kind and if a user wants to combine percpu_ref + * with RCU protection, it must be done explicitly. + * + * Code that does a two stage shutdown like this often needs some kind of + * explicit synchronization to ensure the initial refcount can only be dropped + * once - percpu_ref_kill() does this for you, it returns true once and false if + * someone else already called it. The aio code uses it this way, but it's not + * necessary if the code has some other mechanism to synchronize teardown. + * around. + */ + +#ifndef _LINUX_PERCPU_REFCOUNT_H +#define _LINUX_PERCPU_REFCOUNT_H + +#include +#include +#include +#include +#include + +struct percpu_ref; +typedef void (percpu_ref_func_t)(struct percpu_ref *); + +/* flags set in the lower bits of percpu_ref->percpu_count_ptr */ +enum { + __PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */ + __PERCPU_REF_DEAD = 1LU << 1, /* (being) killed */ + __PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD, + + __PERCPU_REF_FLAG_BITS = 2, +}; + +/* @flags for percpu_ref_init() */ +enum { + /* + * Start w/ ref == 1 in atomic mode. Can be switched to percpu + * operation using percpu_ref_switch_to_percpu(). If initialized + * with this flag, the ref will stay in atomic mode until + * percpu_ref_switch_to_percpu() is invoked on it. + */ + PERCPU_REF_INIT_ATOMIC = 1 << 0, + + /* + * Start dead w/ ref == 0 in atomic mode. Must be revived with + * percpu_ref_reinit() before used. Implies INIT_ATOMIC. + */ + PERCPU_REF_INIT_DEAD = 1 << 1, +}; + +struct percpu_ref { + atomic_long_t count; + /* + * The low bit of the pointer indicates whether the ref is in percpu + * mode; if set, then get/put will manipulate the atomic_t. + */ + unsigned long percpu_count_ptr; + percpu_ref_func_t *release; + percpu_ref_func_t *confirm_switch; + bool force_atomic:1; + struct rcu_head rcu; +}; + +int __must_check percpu_ref_init(struct percpu_ref *ref, + percpu_ref_func_t *release, unsigned int flags, + gfp_t gfp); +void percpu_ref_exit(struct percpu_ref *ref); +void percpu_ref_switch_to_atomic(struct percpu_ref *ref, + percpu_ref_func_t *confirm_switch); +void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref); +void percpu_ref_switch_to_percpu(struct percpu_ref *ref); +void percpu_ref_kill_and_confirm(struct percpu_ref *ref, + percpu_ref_func_t *confirm_kill); +void percpu_ref_reinit(struct percpu_ref *ref); + +/** + * percpu_ref_kill - drop the initial ref + * @ref: percpu_ref to kill + * + * Must be used to drop the initial ref on a percpu refcount; must be called + * precisely once before shutdown. + * + * Switches @ref into atomic mode before gathering up the percpu counters + * and dropping the initial ref. + * + * There are no implied RCU grace periods between kill and release. + */ +static inline void percpu_ref_kill(struct percpu_ref *ref) +{ + percpu_ref_kill_and_confirm(ref, NULL); +} + +/* + * Internal helper. Don't use outside percpu-refcount proper. The + * function doesn't return the pointer and let the caller test it for NULL + * because doing so forces the compiler to generate two conditional + * branches as it can't assume that @ref->percpu_count is not NULL. + */ +static inline bool __ref_is_percpu(struct percpu_ref *ref, + unsigned long __percpu **percpu_countp) +{ + unsigned long percpu_ptr; + + /* + * The value of @ref->percpu_count_ptr is tested for + * !__PERCPU_REF_ATOMIC, which may be set asynchronously, and then + * used as a pointer. If the compiler generates a separate fetch + * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in + * between contaminating the pointer value, meaning that + * READ_ONCE() is required when fetching it. + * + * The smp_read_barrier_depends() implied by READ_ONCE() pairs + * with smp_store_release() in __percpu_ref_switch_to_percpu(). + */ + percpu_ptr = READ_ONCE(ref->percpu_count_ptr); + + /* + * Theoretically, the following could test just ATOMIC; however, + * then we'd have to mask off DEAD separately as DEAD may be + * visible without ATOMIC if we race with percpu_ref_kill(). DEAD + * implies ATOMIC anyway. Test them together. + */ + if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD)) + return false; + + *percpu_countp = (unsigned long __percpu *)percpu_ptr; + return true; +} + +/** + * percpu_ref_get_many - increment a percpu refcount + * @ref: percpu_ref to get + * @nr: number of references to get + * + * Analogous to atomic_long_add(). + * + * This function is safe to call as long as @ref is between init and exit. + */ +static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr) +{ + unsigned long __percpu *percpu_count; + + rcu_read_lock_sched(); + + if (__ref_is_percpu(ref, &percpu_count)) + this_cpu_add(*percpu_count, nr); + else + atomic_long_add(nr, &ref->count); + + rcu_read_unlock_sched(); +} + +/** + * percpu_ref_get - increment a percpu refcount + * @ref: percpu_ref to get + * + * Analagous to atomic_long_inc(). + * + * This function is safe to call as long as @ref is between init and exit. + */ +static inline void percpu_ref_get(struct percpu_ref *ref) +{ + percpu_ref_get_many(ref, 1); +} + +/** + * percpu_ref_tryget - try to increment a percpu refcount + * @ref: percpu_ref to try-get + * + * Increment a percpu refcount unless its count already reached zero. + * Returns %true on success; %false on failure. + * + * This function is safe to call as long as @ref is between init and exit. + */ +static inline bool percpu_ref_tryget(struct percpu_ref *ref) +{ + unsigned long __percpu *percpu_count; + bool ret; + + rcu_read_lock_sched(); + + if (__ref_is_percpu(ref, &percpu_count)) { + this_cpu_inc(*percpu_count); + ret = true; + } else { + ret = atomic_long_inc_not_zero(&ref->count); + } + + rcu_read_unlock_sched(); + + return ret; +} + +/** + * percpu_ref_tryget_live - try to increment a live percpu refcount + * @ref: percpu_ref to try-get + * + * Increment a percpu refcount unless it has already been killed. Returns + * %true on success; %false on failure. + * + * Completion of percpu_ref_kill() in itself doesn't guarantee that this + * function will fail. For such guarantee, percpu_ref_kill_and_confirm() + * should be used. After the confirm_kill callback is invoked, it's + * guaranteed that no new reference will be given out by + * percpu_ref_tryget_live(). + * + * This function is safe to call as long as @ref is between init and exit. + */ +static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) +{ + unsigned long __percpu *percpu_count; + bool ret = false; + + rcu_read_lock_sched(); + + if (__ref_is_percpu(ref, &percpu_count)) { + this_cpu_inc(*percpu_count); + ret = true; + } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) { + ret = atomic_long_inc_not_zero(&ref->count); + } + + rcu_read_unlock_sched(); + + return ret; +} + +/** + * percpu_ref_put_many - decrement a percpu refcount + * @ref: percpu_ref to put + * @nr: number of references to put + * + * Decrement the refcount, and if 0, call the release function (which was passed + * to percpu_ref_init()) + * + * This function is safe to call as long as @ref is between init and exit. + */ +static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr) +{ + unsigned long __percpu *percpu_count; + + rcu_read_lock_sched(); + + if (__ref_is_percpu(ref, &percpu_count)) + this_cpu_sub(*percpu_count, nr); + else if (unlikely(atomic_long_sub_and_test(nr, &ref->count))) + ref->release(ref); + + rcu_read_unlock_sched(); +} + +/** + * percpu_ref_put - decrement a percpu refcount + * @ref: percpu_ref to put + * + * Decrement the refcount, and if 0, call the release function (which was passed + * to percpu_ref_init()) + * + * This function is safe to call as long as @ref is between init and exit. + */ +static inline void percpu_ref_put(struct percpu_ref *ref) +{ + percpu_ref_put_many(ref, 1); +} + +/** + * percpu_ref_is_dying - test whether a percpu refcount is dying or dead + * @ref: percpu_ref to test + * + * Returns %true if @ref is dying or dead. + * + * This function is safe to call as long as @ref is between init and exit + * and the caller is responsible for synchronizing against state changes. + */ +static inline bool percpu_ref_is_dying(struct percpu_ref *ref) +{ + return ref->percpu_count_ptr & __PERCPU_REF_DEAD; +} + +/** + * percpu_ref_is_zero - test whether a percpu refcount reached zero + * @ref: percpu_ref to test + * + * Returns %true if @ref reached zero. + * + * This function is safe to call as long as @ref is between init and exit. + */ +static inline bool percpu_ref_is_zero(struct percpu_ref *ref) +{ + unsigned long __percpu *percpu_count; + + if (__ref_is_percpu(ref, &percpu_count)) + return false; + return !atomic_long_read(&ref->count); +} + +#endif diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h new file mode 100644 index 000000000..79b99d653 --- /dev/null +++ b/include/linux/percpu-rwsem.h @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PERCPU_RWSEM_H +#define _LINUX_PERCPU_RWSEM_H + +#include +#include +#include +#include +#include +#include + +struct percpu_rw_semaphore { + struct rcu_sync rss; + unsigned int __percpu *read_count; + struct rw_semaphore rw_sem; /* slowpath */ + struct rcuwait writer; /* blocked writer */ + int readers_block; +}; + +#define DEFINE_STATIC_PERCPU_RWSEM(name) \ +static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name); \ +static struct percpu_rw_semaphore name = { \ + .rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC), \ + .read_count = &__percpu_rwsem_rc_##name, \ + .rw_sem = __RWSEM_INITIALIZER(name.rw_sem), \ + .writer = __RCUWAIT_INITIALIZER(name.writer), \ +} + +extern int __percpu_down_read(struct percpu_rw_semaphore *, int); +extern void __percpu_up_read(struct percpu_rw_semaphore *); + +static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem) +{ + might_sleep(); + + rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 0, _RET_IP_); + + preempt_disable(); + /* + * We are in an RCU-sched read-side critical section, so the writer + * cannot both change sem->state from readers_fast and start checking + * counters while we are here. So if we see !sem->state, we know that + * the writer won't be checking until we're past the preempt_enable() + * and that one the synchronize_sched() is done, the writer will see + * anything we did within this RCU-sched read-size critical section. + */ + __this_cpu_inc(*sem->read_count); + if (unlikely(!rcu_sync_is_idle(&sem->rss))) + __percpu_down_read(sem, false); /* Unconditional memory barrier */ + barrier(); + /* + * The barrier() prevents the compiler from + * bleeding the critical section out. + */ +} + +static inline void percpu_down_read(struct percpu_rw_semaphore *sem) +{ + percpu_down_read_preempt_disable(sem); + preempt_enable(); +} + +static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem) +{ + int ret = 1; + + preempt_disable(); + /* + * Same as in percpu_down_read(). + */ + __this_cpu_inc(*sem->read_count); + if (unlikely(!rcu_sync_is_idle(&sem->rss))) + ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */ + preempt_enable(); + /* + * The barrier() from preempt_enable() prevents the compiler from + * bleeding the critical section out. + */ + + if (ret) + rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 1, _RET_IP_); + + return ret; +} + +static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem) +{ + /* + * The barrier() prevents the compiler from + * bleeding the critical section out. + */ + barrier(); + /* + * Same as in percpu_down_read(). + */ + if (likely(rcu_sync_is_idle(&sem->rss))) + __this_cpu_dec(*sem->read_count); + else + __percpu_up_read(sem); /* Unconditional memory barrier */ + preempt_enable(); + + rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_); +} + +static inline void percpu_up_read(struct percpu_rw_semaphore *sem) +{ + preempt_disable(); + percpu_up_read_preempt_enable(sem); +} + +extern void percpu_down_write(struct percpu_rw_semaphore *); +extern void percpu_up_write(struct percpu_rw_semaphore *); + +extern int __percpu_init_rwsem(struct percpu_rw_semaphore *, + const char *, struct lock_class_key *); + +extern void percpu_free_rwsem(struct percpu_rw_semaphore *); + +#define percpu_init_rwsem(sem) \ +({ \ + static struct lock_class_key rwsem_key; \ + __percpu_init_rwsem(sem, #sem, &rwsem_key); \ +}) + +#define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem) + +#define percpu_rwsem_assert_held(sem) \ + lockdep_assert_held(&(sem)->rw_sem) + +static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem, + bool read, unsigned long ip) +{ + lock_release(&sem->rw_sem.dep_map, 1, ip); +#ifdef CONFIG_RWSEM_SPIN_ON_OWNER + if (!read) + sem->rw_sem.owner = RWSEM_OWNER_UNKNOWN; +#endif +} + +static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem, + bool read, unsigned long ip) +{ + lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip); +#ifdef CONFIG_RWSEM_SPIN_ON_OWNER + if (!read) + sem->rw_sem.owner = current; +#endif +} + +#endif diff --git a/include/linux/percpu.h b/include/linux/percpu.h new file mode 100644 index 000000000..70b7123f3 --- /dev/null +++ b/include/linux/percpu.h @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_PERCPU_H +#define __LINUX_PERCPU_H + +#include +#include +#include +#include +#include +#include +#include + +#include + +/* enough to cover all DEFINE_PER_CPUs in modules */ +#ifdef CONFIG_MODULES +#define PERCPU_MODULE_RESERVE (8 << 10) +#else +#define PERCPU_MODULE_RESERVE 0 +#endif + +/* minimum unit size, also is the maximum supported allocation size */ +#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) + +/* minimum allocation size and shift in bytes */ +#define PCPU_MIN_ALLOC_SHIFT 2 +#define PCPU_MIN_ALLOC_SIZE (1 << PCPU_MIN_ALLOC_SHIFT) + +/* number of bits per page, used to trigger a scan if blocks are > PAGE_SIZE */ +#define PCPU_BITS_PER_PAGE (PAGE_SIZE >> PCPU_MIN_ALLOC_SHIFT) + +/* + * This determines the size of each metadata block. There are several subtle + * constraints around this constant. The reserved region must be a multiple of + * PCPU_BITMAP_BLOCK_SIZE. Additionally, PCPU_BITMAP_BLOCK_SIZE must be a + * multiple of PAGE_SIZE or PAGE_SIZE must be a multiple of + * PCPU_BITMAP_BLOCK_SIZE to align with the populated page map. The unit_size + * also has to be a multiple of PCPU_BITMAP_BLOCK_SIZE to ensure full blocks. + */ +#define PCPU_BITMAP_BLOCK_SIZE PAGE_SIZE +#define PCPU_BITMAP_BLOCK_BITS (PCPU_BITMAP_BLOCK_SIZE >> \ + PCPU_MIN_ALLOC_SHIFT) + +/* + * Percpu allocator can serve percpu allocations before slab is + * initialized which allows slab to depend on the percpu allocator. + * The following two parameters decide how much resource to + * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or + * larger than PERCPU_DYNAMIC_EARLY_SIZE. + */ +#define PERCPU_DYNAMIC_EARLY_SLOTS 128 +#define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10) + +/* + * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy + * back on the first chunk for dynamic percpu allocation if arch is + * manually allocating and mapping it for faster access (as a part of + * large page mapping for example). + * + * The following values give between one and two pages of free space + * after typical minimal boot (2-way SMP, single disk and NIC) with + * both defconfig and a distro config on x86_64 and 32. More + * intelligent way to determine this would be nice. + */ +#if BITS_PER_LONG > 32 +#define PERCPU_DYNAMIC_RESERVE (28 << 10) +#else +#define PERCPU_DYNAMIC_RESERVE (20 << 10) +#endif + +extern void *pcpu_base_addr; +extern const unsigned long *pcpu_unit_offsets; + +struct pcpu_group_info { + int nr_units; /* aligned # of units */ + unsigned long base_offset; /* base address offset */ + unsigned int *cpu_map; /* unit->cpu map, empty + * entries contain NR_CPUS */ +}; + +struct pcpu_alloc_info { + size_t static_size; + size_t reserved_size; + size_t dyn_size; + size_t unit_size; + size_t atom_size; + size_t alloc_size; + size_t __ai_size; /* internal, don't use */ + int nr_groups; /* 0 if grouping unnecessary */ + struct pcpu_group_info groups[]; +}; + +enum pcpu_fc { + PCPU_FC_AUTO, + PCPU_FC_EMBED, + PCPU_FC_PAGE, + + PCPU_FC_NR, +}; +extern const char * const pcpu_fc_names[PCPU_FC_NR]; + +extern enum pcpu_fc pcpu_chosen_fc; + +typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size, + size_t align); +typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size); +typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr); +typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to); + +extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, + int nr_units); +extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai); + +extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, + void *base_addr); + +#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK +extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, + size_t atom_size, + pcpu_fc_cpu_distance_fn_t cpu_distance_fn, + pcpu_fc_alloc_fn_t alloc_fn, + pcpu_fc_free_fn_t free_fn); +#endif + +#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK +extern int __init pcpu_page_first_chunk(size_t reserved_size, + pcpu_fc_alloc_fn_t alloc_fn, + pcpu_fc_free_fn_t free_fn, + pcpu_fc_populate_pte_fn_t populate_pte_fn); +#endif + +extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); +extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr); +extern bool is_kernel_percpu_address(unsigned long addr); + +#if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) +extern void __init setup_per_cpu_areas(void); +#endif + +extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp); +extern void __percpu *__alloc_percpu(size_t size, size_t align); +extern void free_percpu(void __percpu *__pdata); +extern phys_addr_t per_cpu_ptr_to_phys(void *addr); + +#define alloc_percpu_gfp(type, gfp) \ + (typeof(type) __percpu *)__alloc_percpu_gfp(sizeof(type), \ + __alignof__(type), gfp) +#define alloc_percpu(type) \ + (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \ + __alignof__(type)) + +extern unsigned long pcpu_nr_pages(void); + +#endif /* __LINUX_PERCPU_H */ diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h new file mode 100644 index 000000000..0a4f54dd4 --- /dev/null +++ b/include/linux/percpu_counter.h @@ -0,0 +1,192 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PERCPU_COUNTER_H +#define _LINUX_PERCPU_COUNTER_H +/* + * A simple "approximate counter" for use in ext2 and ext3 superblocks. + * + * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4. + */ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_SMP + +struct percpu_counter { + raw_spinlock_t lock; + s64 count; +#ifdef CONFIG_HOTPLUG_CPU + struct list_head list; /* All percpu_counters are on a list */ +#endif + s32 __percpu *counters; +}; + +extern int percpu_counter_batch; + +int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp, + struct lock_class_key *key); + +#define percpu_counter_init(fbc, value, gfp) \ + ({ \ + static struct lock_class_key __key; \ + \ + __percpu_counter_init(fbc, value, gfp, &__key); \ + }) + +void percpu_counter_destroy(struct percpu_counter *fbc); +void percpu_counter_set(struct percpu_counter *fbc, s64 amount); +void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, + s32 batch); +s64 __percpu_counter_sum(struct percpu_counter *fbc); +int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch); + +static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) +{ + return __percpu_counter_compare(fbc, rhs, percpu_counter_batch); +} + +static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) +{ + percpu_counter_add_batch(fbc, amount, percpu_counter_batch); +} + +static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) +{ + s64 ret = __percpu_counter_sum(fbc); + return ret < 0 ? 0 : ret; +} + +static inline s64 percpu_counter_sum(struct percpu_counter *fbc) +{ + return __percpu_counter_sum(fbc); +} + +static inline s64 percpu_counter_read(struct percpu_counter *fbc) +{ + return fbc->count; +} + +/* + * It is possible for the percpu_counter_read() to return a small negative + * number for some counter which should never be negative. + * + */ +static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) +{ + /* Prevent reloads of fbc->count */ + s64 ret = READ_ONCE(fbc->count); + + if (ret >= 0) + return ret; + return 0; +} + +static inline bool percpu_counter_initialized(struct percpu_counter *fbc) +{ + return (fbc->counters != NULL); +} + +#else /* !CONFIG_SMP */ + +struct percpu_counter { + s64 count; +}; + +static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount, + gfp_t gfp) +{ + fbc->count = amount; + return 0; +} + +static inline void percpu_counter_destroy(struct percpu_counter *fbc) +{ +} + +static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount) +{ + fbc->count = amount; +} + +static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) +{ + if (fbc->count > rhs) + return 1; + else if (fbc->count < rhs) + return -1; + else + return 0; +} + +static inline int +__percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) +{ + return percpu_counter_compare(fbc, rhs); +} + +static inline void +percpu_counter_add(struct percpu_counter *fbc, s64 amount) +{ + preempt_disable(); + fbc->count += amount; + preempt_enable(); +} + +static inline void +percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) +{ + percpu_counter_add(fbc, amount); +} + +static inline s64 percpu_counter_read(struct percpu_counter *fbc) +{ + return fbc->count; +} + +/* + * percpu_counter is intended to track positive numbers. In the UP case the + * number should never be negative. + */ +static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) +{ + return fbc->count; +} + +static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) +{ + return percpu_counter_read_positive(fbc); +} + +static inline s64 percpu_counter_sum(struct percpu_counter *fbc) +{ + return percpu_counter_read(fbc); +} + +static inline bool percpu_counter_initialized(struct percpu_counter *fbc) +{ + return true; +} + +#endif /* CONFIG_SMP */ + +static inline void percpu_counter_inc(struct percpu_counter *fbc) +{ + percpu_counter_add(fbc, 1); +} + +static inline void percpu_counter_dec(struct percpu_counter *fbc) +{ + percpu_counter_add(fbc, -1); +} + +static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount) +{ + percpu_counter_add(fbc, -amount); +} + +#endif /* _LINUX_PERCPU_COUNTER_H */ diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h new file mode 100644 index 000000000..bf309ff6f --- /dev/null +++ b/include/linux/perf/arm_pmu.h @@ -0,0 +1,176 @@ +/* + * linux/arch/arm/include/asm/pmu.h + * + * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __ARM_PMU_H__ +#define __ARM_PMU_H__ + +#include +#include +#include +#include +#include + +#ifdef CONFIG_ARM_PMU + +/* + * The ARMv7 CPU PMU supports up to 32 event counters. + */ +#define ARMPMU_MAX_HWEVENTS 32 + +/* + * ARM PMU hw_event flags + */ +/* Event uses a 64bit counter */ +#define ARMPMU_EVT_64BIT 1 + +#define HW_OP_UNSUPPORTED 0xFFFF +#define C(_x) PERF_COUNT_HW_CACHE_##_x +#define CACHE_OP_UNSUPPORTED 0xFFFF + +#define PERF_MAP_ALL_UNSUPPORTED \ + [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED + +#define PERF_CACHE_MAP_ALL_UNSUPPORTED \ +[0 ... C(MAX) - 1] = { \ + [0 ... C(OP_MAX) - 1] = { \ + [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \ + }, \ +} + +/* The events for a given PMU register set. */ +struct pmu_hw_events { + /* + * The events that are active on the PMU for the given index. + */ + struct perf_event *events[ARMPMU_MAX_HWEVENTS]; + + /* + * A 1 bit for an index indicates that the counter is being used for + * an event. A 0 means that the counter can be used. + */ + DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS); + + /* + * Hardware lock to serialize accesses to PMU registers. Needed for the + * read/modify/write sequences. + */ + raw_spinlock_t pmu_lock; + + /* + * When using percpu IRQs, we need a percpu dev_id. Place it here as we + * already have to allocate this struct per cpu. + */ + struct arm_pmu *percpu_pmu; + + int irq; +}; + +enum armpmu_attr_groups { + ARMPMU_ATTR_GROUP_COMMON, + ARMPMU_ATTR_GROUP_EVENTS, + ARMPMU_ATTR_GROUP_FORMATS, + ARMPMU_NR_ATTR_GROUPS +}; + +struct arm_pmu { + struct pmu pmu; + cpumask_t supported_cpus; + char *name; + irqreturn_t (*handle_irq)(struct arm_pmu *pmu); + void (*enable)(struct perf_event *event); + void (*disable)(struct perf_event *event); + int (*get_event_idx)(struct pmu_hw_events *hw_events, + struct perf_event *event); + void (*clear_event_idx)(struct pmu_hw_events *hw_events, + struct perf_event *event); + int (*set_event_filter)(struct hw_perf_event *evt, + struct perf_event_attr *attr); + u64 (*read_counter)(struct perf_event *event); + void (*write_counter)(struct perf_event *event, u64 val); + void (*start)(struct arm_pmu *); + void (*stop)(struct arm_pmu *); + void (*reset)(void *); + int (*map_event)(struct perf_event *event); + int (*filter_match)(struct perf_event *event); + int num_events; + bool secure_access; /* 32-bit ARM only */ +#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40 + DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); + struct platform_device *plat_device; + struct pmu_hw_events __percpu *hw_events; + struct hlist_node node; + struct notifier_block cpu_pm_nb; + /* the attr_groups array must be NULL-terminated */ + const struct attribute_group *attr_groups[ARMPMU_NR_ATTR_GROUPS + 1]; + + /* Only to be used by ACPI probing code */ + unsigned long acpi_cpuid; +}; + +#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) + +u64 armpmu_event_update(struct perf_event *event); + +int armpmu_event_set_period(struct perf_event *event); + +int armpmu_map_event(struct perf_event *event, + const unsigned (*event_map)[PERF_COUNT_HW_MAX], + const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX], + u32 raw_event_mask); + +typedef int (*armpmu_init_fn)(struct arm_pmu *); + +struct pmu_probe_info { + unsigned int cpuid; + unsigned int mask; + armpmu_init_fn init; +}; + +#define PMU_PROBE(_cpuid, _mask, _fn) \ +{ \ + .cpuid = (_cpuid), \ + .mask = (_mask), \ + .init = (_fn), \ +} + +#define ARM_PMU_PROBE(_cpuid, _fn) \ + PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn) + +#define ARM_PMU_XSCALE_MASK ((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK) + +#define XSCALE_PMU_PROBE(_version, _fn) \ + PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn) + +int arm_pmu_device_probe(struct platform_device *pdev, + const struct of_device_id *of_table, + const struct pmu_probe_info *probe_table); + +#ifdef CONFIG_ACPI +int arm_pmu_acpi_probe(armpmu_init_fn init_fn); +#else +static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; } +#endif + +/* Internal functions only for core arm_pmu code */ +struct arm_pmu *armpmu_alloc(void); +struct arm_pmu *armpmu_alloc_atomic(void); +void armpmu_free(struct arm_pmu *pmu); +int armpmu_register(struct arm_pmu *pmu); +int armpmu_request_irq(int irq, int cpu); +void armpmu_free_irq(int irq, int cpu); + +#define ARMV8_PMU_PDEV_NAME "armv8-pmu" + +#endif /* CONFIG_ARM_PMU */ + +#endif /* __ARM_PMU_H__ */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h new file mode 100644 index 000000000..efe30b9b1 --- /dev/null +++ b/include/linux/perf_event.h @@ -0,0 +1,1434 @@ +/* + * Performance events: + * + * Copyright (C) 2008-2009, Thomas Gleixner + * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar + * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra + * + * Data type definitions, declarations, prototypes. + * + * Started by: Thomas Gleixner and Ingo Molnar + * + * For licencing details see kernel-base/COPYING + */ +#ifndef _LINUX_PERF_EVENT_H +#define _LINUX_PERF_EVENT_H + +#include +#include + +/* + * Kernel-internal data types and definitions: + */ + +#ifdef CONFIG_PERF_EVENTS +# include +# include +#endif + +struct perf_guest_info_callbacks { + int (*is_in_guest)(void); + int (*is_user_mode)(void); + unsigned long (*get_guest_ip)(void); +}; + +#ifdef CONFIG_HAVE_HW_BREAKPOINT +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct perf_callchain_entry { + __u64 nr; + __u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */ +}; + +struct perf_callchain_entry_ctx { + struct perf_callchain_entry *entry; + u32 max_stack; + u32 nr; + short contexts; + bool contexts_maxed; +}; + +typedef unsigned long (*perf_copy_f)(void *dst, const void *src, + unsigned long off, unsigned long len); + +struct perf_raw_frag { + union { + struct perf_raw_frag *next; + unsigned long pad; + }; + perf_copy_f copy; + void *data; + u32 size; +} __packed; + +struct perf_raw_record { + struct perf_raw_frag frag; + u32 size; +}; + +/* + * branch stack layout: + * nr: number of taken branches stored in entries[] + * + * Note that nr can vary from sample to sample + * branches (to, from) are stored from most recent + * to least recent, i.e., entries[0] contains the most + * recent branch. + */ +struct perf_branch_stack { + __u64 nr; + struct perf_branch_entry entries[0]; +}; + +struct task_struct; + +/* + * extra PMU register associated with an event + */ +struct hw_perf_event_extra { + u64 config; /* register value */ + unsigned int reg; /* register address or index */ + int alloc; /* extra register already allocated */ + int idx; /* index in shared_regs->regs[] */ +}; + +/** + * struct hw_perf_event - performance event hardware details: + */ +struct hw_perf_event { +#ifdef CONFIG_PERF_EVENTS + union { + struct { /* hardware */ + u64 config; + u64 last_tag; + unsigned long config_base; + unsigned long event_base; + int event_base_rdpmc; + int idx; + int last_cpu; + int flags; + + struct hw_perf_event_extra extra_reg; + struct hw_perf_event_extra branch_reg; + }; + struct { /* software */ + struct hrtimer hrtimer; + }; + struct { /* tracepoint */ + /* for tp_event->class */ + struct list_head tp_list; + }; + struct { /* amd_power */ + u64 pwr_acc; + u64 ptsc; + }; +#ifdef CONFIG_HAVE_HW_BREAKPOINT + struct { /* breakpoint */ + /* + * Crufty hack to avoid the chicken and egg + * problem hw_breakpoint has with context + * creation and event initalization. + */ + struct arch_hw_breakpoint info; + struct list_head bp_list; + }; +#endif + struct { /* amd_iommu */ + u8 iommu_bank; + u8 iommu_cntr; + u16 padding; + u64 conf; + u64 conf1; + }; + }; + /* + * If the event is a per task event, this will point to the task in + * question. See the comment in perf_event_alloc(). + */ + struct task_struct *target; + + /* + * PMU would store hardware filter configuration + * here. + */ + void *addr_filters; + + /* Last sync'ed generation of filters */ + unsigned long addr_filters_gen; + +/* + * hw_perf_event::state flags; used to track the PERF_EF_* state. + */ +#define PERF_HES_STOPPED 0x01 /* the counter is stopped */ +#define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */ +#define PERF_HES_ARCH 0x04 + + int state; + + /* + * The last observed hardware counter value, updated with a + * local64_cmpxchg() such that pmu::read() can be called nested. + */ + local64_t prev_count; + + /* + * The period to start the next sample with. + */ + u64 sample_period; + + /* + * The period we started this sample with. + */ + u64 last_period; + + /* + * However much is left of the current period; note that this is + * a full 64bit value and allows for generation of periods longer + * than hardware might allow. + */ + local64_t period_left; + + /* + * State for throttling the event, see __perf_event_overflow() and + * perf_adjust_freq_unthr_context(). + */ + u64 interrupts_seq; + u64 interrupts; + + /* + * State for freq target events, see __perf_event_overflow() and + * perf_adjust_freq_unthr_context(). + */ + u64 freq_time_stamp; + u64 freq_count_stamp; +#endif +}; + +struct perf_event; + +/* + * Common implementation detail of pmu::{start,commit,cancel}_txn + */ +#define PERF_PMU_TXN_ADD 0x1 /* txn to add/schedule event on PMU */ +#define PERF_PMU_TXN_READ 0x2 /* txn to read event group from PMU */ + +/** + * pmu::capabilities flags + */ +#define PERF_PMU_CAP_NO_INTERRUPT 0x01 +#define PERF_PMU_CAP_NO_NMI 0x02 +#define PERF_PMU_CAP_AUX_NO_SG 0x04 +#define PERF_PMU_CAP_AUX_SW_DOUBLEBUF 0x08 +#define PERF_PMU_CAP_EXCLUSIVE 0x10 +#define PERF_PMU_CAP_ITRACE 0x20 +#define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40 + +/** + * struct pmu - generic performance monitoring unit + */ +struct pmu { + struct list_head entry; + + struct module *module; + struct device *dev; + const struct attribute_group **attr_groups; + const char *name; + int type; + + /* + * various common per-pmu feature flags + */ + int capabilities; + + int * __percpu pmu_disable_count; + struct perf_cpu_context * __percpu pmu_cpu_context; + atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */ + int task_ctx_nr; + int hrtimer_interval_ms; + + /* number of address filters this PMU can do */ + unsigned int nr_addr_filters; + + /* + * Fully disable/enable this PMU, can be used to protect from the PMI + * as well as for lazy/batch writing of the MSRs. + */ + void (*pmu_enable) (struct pmu *pmu); /* optional */ + void (*pmu_disable) (struct pmu *pmu); /* optional */ + + /* + * Try and initialize the event for this PMU. + * + * Returns: + * -ENOENT -- @event is not for this PMU + * + * -ENODEV -- @event is for this PMU but PMU not present + * -EBUSY -- @event is for this PMU but PMU temporarily unavailable + * -EINVAL -- @event is for this PMU but @event is not valid + * -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported + * -EACCESS -- @event is for this PMU, @event is valid, but no privilidges + * + * 0 -- @event is for this PMU and valid + * + * Other error return values are allowed. + */ + int (*event_init) (struct perf_event *event); + + /* + * Notification that the event was mapped or unmapped. Called + * in the context of the mapping task. + */ + void (*event_mapped) (struct perf_event *event, struct mm_struct *mm); /* optional */ + void (*event_unmapped) (struct perf_event *event, struct mm_struct *mm); /* optional */ + + /* + * Flags for ->add()/->del()/ ->start()/->stop(). There are + * matching hw_perf_event::state flags. + */ +#define PERF_EF_START 0x01 /* start the counter when adding */ +#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ +#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ + + /* + * Adds/Removes a counter to/from the PMU, can be done inside a + * transaction, see the ->*_txn() methods. + * + * The add/del callbacks will reserve all hardware resources required + * to service the event, this includes any counter constraint + * scheduling etc. + * + * Called with IRQs disabled and the PMU disabled on the CPU the event + * is on. + * + * ->add() called without PERF_EF_START should result in the same state + * as ->add() followed by ->stop(). + * + * ->del() must always PERF_EF_UPDATE stop an event. If it calls + * ->stop() that must deal with already being stopped without + * PERF_EF_UPDATE. + */ + int (*add) (struct perf_event *event, int flags); + void (*del) (struct perf_event *event, int flags); + + /* + * Starts/Stops a counter present on the PMU. + * + * The PMI handler should stop the counter when perf_event_overflow() + * returns !0. ->start() will be used to continue. + * + * Also used to change the sample period. + * + * Called with IRQs disabled and the PMU disabled on the CPU the event + * is on -- will be called from NMI context with the PMU generates + * NMIs. + * + * ->stop() with PERF_EF_UPDATE will read the counter and update + * period/count values like ->read() would. + * + * ->start() with PERF_EF_RELOAD will reprogram the the counter + * value, must be preceded by a ->stop() with PERF_EF_UPDATE. + */ + void (*start) (struct perf_event *event, int flags); + void (*stop) (struct perf_event *event, int flags); + + /* + * Updates the counter value of the event. + * + * For sampling capable PMUs this will also update the software period + * hw_perf_event::period_left field. + */ + void (*read) (struct perf_event *event); + + /* + * Group events scheduling is treated as a transaction, add + * group events as a whole and perform one schedulability test. + * If the test fails, roll back the whole group + * + * Start the transaction, after this ->add() doesn't need to + * do schedulability tests. + * + * Optional. + */ + void (*start_txn) (struct pmu *pmu, unsigned int txn_flags); + /* + * If ->start_txn() disabled the ->add() schedulability test + * then ->commit_txn() is required to perform one. On success + * the transaction is closed. On error the transaction is kept + * open until ->cancel_txn() is called. + * + * Optional. + */ + int (*commit_txn) (struct pmu *pmu); + /* + * Will cancel the transaction, assumes ->del() is called + * for each successful ->add() during the transaction. + * + * Optional. + */ + void (*cancel_txn) (struct pmu *pmu); + + /* + * Will return the value for perf_event_mmap_page::index for this event, + * if no implementation is provided it will default to: event->hw.idx + 1. + */ + int (*event_idx) (struct perf_event *event); /*optional */ + + /* + * context-switches callback + */ + void (*sched_task) (struct perf_event_context *ctx, + bool sched_in); + /* + * PMU specific data size + */ + size_t task_ctx_size; + + + /* + * Set up pmu-private data structures for an AUX area + */ + void *(*setup_aux) (struct perf_event *event, void **pages, + int nr_pages, bool overwrite); + /* optional */ + + /* + * Free pmu-private AUX data structures + */ + void (*free_aux) (void *aux); /* optional */ + + /* + * Validate address range filters: make sure the HW supports the + * requested configuration and number of filters; return 0 if the + * supplied filters are valid, -errno otherwise. + * + * Runs in the context of the ioctl()ing process and is not serialized + * with the rest of the PMU callbacks. + */ + int (*addr_filters_validate) (struct list_head *filters); + /* optional */ + + /* + * Synchronize address range filter configuration: + * translate hw-agnostic filters into hardware configuration in + * event::hw::addr_filters. + * + * Runs as a part of filter sync sequence that is done in ->start() + * callback by calling perf_event_addr_filters_sync(). + * + * May (and should) traverse event::addr_filters::list, for which its + * caller provides necessary serialization. + */ + void (*addr_filters_sync) (struct perf_event *event); + /* optional */ + + /* + * Filter events for PMU-specific reasons. + */ + int (*filter_match) (struct perf_event *event); /* optional */ + + /* + * Check period value for PERF_EVENT_IOC_PERIOD ioctl. + */ + int (*check_period) (struct perf_event *event, u64 value); /* optional */ +}; + +enum perf_addr_filter_action_t { + PERF_ADDR_FILTER_ACTION_STOP = 0, + PERF_ADDR_FILTER_ACTION_START, + PERF_ADDR_FILTER_ACTION_FILTER, +}; + +/** + * struct perf_addr_filter - address range filter definition + * @entry: event's filter list linkage + * @inode: object file's inode for file-based filters + * @offset: filter range offset + * @size: filter range size (size==0 means single address trigger) + * @action: filter/start/stop + * + * This is a hardware-agnostic filter configuration as specified by the user. + */ +struct perf_addr_filter { + struct list_head entry; + struct path path; + unsigned long offset; + unsigned long size; + enum perf_addr_filter_action_t action; +}; + +/** + * struct perf_addr_filters_head - container for address range filters + * @list: list of filters for this event + * @lock: spinlock that serializes accesses to the @list and event's + * (and its children's) filter generations. + * @nr_file_filters: number of file-based filters + * + * A child event will use parent's @list (and therefore @lock), so they are + * bundled together; see perf_event_addr_filters(). + */ +struct perf_addr_filters_head { + struct list_head list; + raw_spinlock_t lock; + unsigned int nr_file_filters; +}; + +struct perf_addr_filter_range { + unsigned long start; + unsigned long size; +}; + +/** + * enum perf_event_state - the states of an event: + */ +enum perf_event_state { + PERF_EVENT_STATE_DEAD = -4, + PERF_EVENT_STATE_EXIT = -3, + PERF_EVENT_STATE_ERROR = -2, + PERF_EVENT_STATE_OFF = -1, + PERF_EVENT_STATE_INACTIVE = 0, + PERF_EVENT_STATE_ACTIVE = 1, +}; + +struct file; +struct perf_sample_data; + +typedef void (*perf_overflow_handler_t)(struct perf_event *, + struct perf_sample_data *, + struct pt_regs *regs); + +/* + * Event capabilities. For event_caps and groups caps. + * + * PERF_EV_CAP_SOFTWARE: Is a software event. + * PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read + * from any CPU in the package where it is active. + */ +#define PERF_EV_CAP_SOFTWARE BIT(0) +#define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1) + +#define SWEVENT_HLIST_BITS 8 +#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) + +struct swevent_hlist { + struct hlist_head heads[SWEVENT_HLIST_SIZE]; + struct rcu_head rcu_head; +}; + +#define PERF_ATTACH_CONTEXT 0x01 +#define PERF_ATTACH_GROUP 0x02 +#define PERF_ATTACH_TASK 0x04 +#define PERF_ATTACH_TASK_DATA 0x08 +#define PERF_ATTACH_ITRACE 0x10 + +struct perf_cgroup; +struct ring_buffer; + +struct pmu_event_list { + raw_spinlock_t lock; + struct list_head list; +}; + +#define for_each_sibling_event(sibling, event) \ + if ((event)->group_leader == (event)) \ + list_for_each_entry((sibling), &(event)->sibling_list, sibling_list) + +/** + * struct perf_event - performance event kernel representation: + */ +struct perf_event { +#ifdef CONFIG_PERF_EVENTS + /* + * entry onto perf_event_context::event_list; + * modifications require ctx->lock + * RCU safe iterations. + */ + struct list_head event_entry; + + /* + * Locked for modification by both ctx->mutex and ctx->lock; holding + * either sufficies for read. + */ + struct list_head sibling_list; + struct list_head active_list; + /* + * Node on the pinned or flexible tree located at the event context; + */ + struct rb_node group_node; + u64 group_index; + /* + * We need storage to track the entries in perf_pmu_migrate_context; we + * cannot use the event_entry because of RCU and we want to keep the + * group in tact which avoids us using the other two entries. + */ + struct list_head migrate_entry; + + struct hlist_node hlist_entry; + struct list_head active_entry; + int nr_siblings; + + /* Not serialized. Only written during event initialization. */ + int event_caps; + /* The cumulative AND of all event_caps for events in this group. */ + int group_caps; + + struct perf_event *group_leader; + struct pmu *pmu; + void *pmu_private; + + enum perf_event_state state; + unsigned int attach_state; + local64_t count; + atomic64_t child_count; + + /* + * These are the total time in nanoseconds that the event + * has been enabled (i.e. eligible to run, and the task has + * been scheduled in, if this is a per-task event) + * and running (scheduled onto the CPU), respectively. + */ + u64 total_time_enabled; + u64 total_time_running; + u64 tstamp; + + /* + * timestamp shadows the actual context timing but it can + * be safely used in NMI interrupt context. It reflects the + * context time as it was when the event was last scheduled in. + * + * ctx_time already accounts for ctx->timestamp. Therefore to + * compute ctx_time for a sample, simply add perf_clock(). + */ + u64 shadow_ctx_time; + + struct perf_event_attr attr; + u16 header_size; + u16 id_header_size; + u16 read_size; + struct hw_perf_event hw; + + struct perf_event_context *ctx; + atomic_long_t refcount; + + /* + * These accumulate total time (in nanoseconds) that children + * events have been enabled and running, respectively. + */ + atomic64_t child_total_time_enabled; + atomic64_t child_total_time_running; + + /* + * Protect attach/detach and child_list: + */ + struct mutex child_mutex; + struct list_head child_list; + struct perf_event *parent; + + int oncpu; + int cpu; + + struct list_head owner_entry; + struct task_struct *owner; + + /* mmap bits */ + struct mutex mmap_mutex; + atomic_t mmap_count; + + struct ring_buffer *rb; + struct list_head rb_entry; + unsigned long rcu_batches; + int rcu_pending; + + /* poll related */ + wait_queue_head_t waitq; + struct fasync_struct *fasync; + + /* delayed work for NMIs and such */ + int pending_wakeup; + int pending_kill; + int pending_disable; + struct irq_work pending; + + atomic_t event_limit; + + /* address range filters */ + struct perf_addr_filters_head addr_filters; + /* vma address array for file-based filders */ + struct perf_addr_filter_range *addr_filter_ranges; + unsigned long addr_filters_gen; + + void (*destroy)(struct perf_event *); + struct rcu_head rcu_head; + + struct pid_namespace *ns; + u64 id; + + u64 (*clock)(void); + perf_overflow_handler_t overflow_handler; + void *overflow_handler_context; +#ifdef CONFIG_BPF_SYSCALL + perf_overflow_handler_t orig_overflow_handler; + struct bpf_prog *prog; +#endif + +#ifdef CONFIG_EVENT_TRACING + struct trace_event_call *tp_event; + struct event_filter *filter; +#ifdef CONFIG_FUNCTION_TRACER + struct ftrace_ops ftrace_ops; +#endif +#endif + +#ifdef CONFIG_CGROUP_PERF + struct perf_cgroup *cgrp; /* cgroup event is attach to */ +#endif + + struct list_head sb_list; +#endif /* CONFIG_PERF_EVENTS */ +}; + + +struct perf_event_groups { + struct rb_root tree; + u64 index; +}; + +/** + * struct perf_event_context - event context structure + * + * Used as a container for task events and CPU events as well: + */ +struct perf_event_context { + struct pmu *pmu; + /* + * Protect the states of the events in the list, + * nr_active, and the list: + */ + raw_spinlock_t lock; + /* + * Protect the list of events. Locking either mutex or lock + * is sufficient to ensure the list doesn't change; to change + * the list you need to lock both the mutex and the spinlock. + */ + struct mutex mutex; + + struct list_head active_ctx_list; + struct perf_event_groups pinned_groups; + struct perf_event_groups flexible_groups; + struct list_head event_list; + + struct list_head pinned_active; + struct list_head flexible_active; + + int nr_events; + int nr_active; + int is_active; + int nr_stat; + int nr_freq; + int rotate_disable; + /* + * Set when nr_events != nr_active, except tolerant to events not + * necessary to be active due to scheduling constraints, such as cgroups. + */ + int rotate_necessary; + atomic_t refcount; + struct task_struct *task; + + /* + * Context clock, runs when context enabled. + */ + u64 time; + u64 timestamp; + + /* + * These fields let us detect when two contexts have both + * been cloned (inherited) from a common ancestor. + */ + struct perf_event_context *parent_ctx; + u64 parent_gen; + u64 generation; + int pin_count; +#ifdef CONFIG_CGROUP_PERF + int nr_cgroups; /* cgroup evts */ +#endif + void *task_ctx_data; /* pmu specific data */ + struct rcu_head rcu_head; +}; + +/* + * Number of contexts where an event can trigger: + * task, softirq, hardirq, nmi. + */ +#define PERF_NR_CONTEXTS 4 + +/** + * struct perf_event_cpu_context - per cpu event context structure + */ +struct perf_cpu_context { + struct perf_event_context ctx; + struct perf_event_context *task_ctx; + int active_oncpu; + int exclusive; + + raw_spinlock_t hrtimer_lock; + struct hrtimer hrtimer; + ktime_t hrtimer_interval; + unsigned int hrtimer_active; + +#ifdef CONFIG_CGROUP_PERF + struct perf_cgroup *cgrp; + struct list_head cgrp_cpuctx_entry; +#endif + + struct list_head sched_cb_entry; + int sched_cb_usage; + + int online; +}; + +struct perf_output_handle { + struct perf_event *event; + struct ring_buffer *rb; + unsigned long wakeup; + unsigned long size; + u64 aux_flags; + union { + void *addr; + unsigned long head; + }; + int page; +}; + +struct bpf_perf_event_data_kern { + bpf_user_pt_regs_t *regs; + struct perf_sample_data *data; + struct perf_event *event; +}; + +#ifdef CONFIG_CGROUP_PERF + +/* + * perf_cgroup_info keeps track of time_enabled for a cgroup. + * This is a per-cpu dynamically allocated data structure. + */ +struct perf_cgroup_info { + u64 time; + u64 timestamp; +}; + +struct perf_cgroup { + struct cgroup_subsys_state css; + struct perf_cgroup_info __percpu *info; +}; + +/* + * Must ensure cgroup is pinned (css_get) before calling + * this function. In other words, we cannot call this function + * if there is no cgroup event for the current CPU context. + */ +static inline struct perf_cgroup * +perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx) +{ + return container_of(task_css_check(task, perf_event_cgrp_id, + ctx ? lockdep_is_held(&ctx->lock) + : true), + struct perf_cgroup, css); +} +#endif /* CONFIG_CGROUP_PERF */ + +#ifdef CONFIG_PERF_EVENTS + +extern void *perf_aux_output_begin(struct perf_output_handle *handle, + struct perf_event *event); +extern void perf_aux_output_end(struct perf_output_handle *handle, + unsigned long size); +extern int perf_aux_output_skip(struct perf_output_handle *handle, + unsigned long size); +extern void *perf_get_aux(struct perf_output_handle *handle); +extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags); +extern void perf_event_itrace_started(struct perf_event *event); + +extern int perf_pmu_register(struct pmu *pmu, const char *name, int type); +extern void perf_pmu_unregister(struct pmu *pmu); + +extern int perf_num_counters(void); +extern const char *perf_pmu_name(void); +extern void __perf_event_task_sched_in(struct task_struct *prev, + struct task_struct *task); +extern void __perf_event_task_sched_out(struct task_struct *prev, + struct task_struct *next); +extern int perf_event_init_task(struct task_struct *child); +extern void perf_event_exit_task(struct task_struct *child); +extern void perf_event_free_task(struct task_struct *task); +extern void perf_event_delayed_put(struct task_struct *task); +extern struct file *perf_event_get(unsigned int fd); +extern const struct perf_event *perf_get_event(struct file *file); +extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event); +extern void perf_event_print_debug(void); +extern void perf_pmu_disable(struct pmu *pmu); +extern void perf_pmu_enable(struct pmu *pmu); +extern void perf_sched_cb_dec(struct pmu *pmu); +extern void perf_sched_cb_inc(struct pmu *pmu); +extern int perf_event_task_disable(void); +extern int perf_event_task_enable(void); +extern int perf_event_refresh(struct perf_event *event, int refresh); +extern void perf_event_update_userpage(struct perf_event *event); +extern int perf_event_release_kernel(struct perf_event *event); +extern struct perf_event * +perf_event_create_kernel_counter(struct perf_event_attr *attr, + int cpu, + struct task_struct *task, + perf_overflow_handler_t callback, + void *context); +extern void perf_pmu_migrate_context(struct pmu *pmu, + int src_cpu, int dst_cpu); +int perf_event_read_local(struct perf_event *event, u64 *value, + u64 *enabled, u64 *running); +extern u64 perf_event_read_value(struct perf_event *event, + u64 *enabled, u64 *running); + + +struct perf_sample_data { + /* + * Fields set by perf_sample_data_init(), group so as to + * minimize the cachelines touched. + */ + u64 addr; + struct perf_raw_record *raw; + struct perf_branch_stack *br_stack; + u64 period; + u64 weight; + u64 txn; + union perf_mem_data_src data_src; + + /* + * The other fields, optionally {set,used} by + * perf_{prepare,output}_sample(). + */ + u64 type; + u64 ip; + struct { + u32 pid; + u32 tid; + } tid_entry; + u64 time; + u64 id; + u64 stream_id; + struct { + u32 cpu; + u32 reserved; + } cpu_entry; + struct perf_callchain_entry *callchain; + + /* + * regs_user may point to task_pt_regs or to regs_user_copy, depending + * on arch details. + */ + struct perf_regs regs_user; + struct pt_regs regs_user_copy; + + struct perf_regs regs_intr; + u64 stack_user_size; + + u64 phys_addr; +} ____cacheline_aligned; + +/* default value for data source */ +#define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\ + PERF_MEM_S(LVL, NA) |\ + PERF_MEM_S(SNOOP, NA) |\ + PERF_MEM_S(LOCK, NA) |\ + PERF_MEM_S(TLB, NA)) + +static inline void perf_sample_data_init(struct perf_sample_data *data, + u64 addr, u64 period) +{ + /* remaining struct members initialized in perf_prepare_sample() */ + data->addr = addr; + data->raw = NULL; + data->br_stack = NULL; + data->period = period; + data->weight = 0; + data->data_src.val = PERF_MEM_NA; + data->txn = 0; +} + +extern void perf_output_sample(struct perf_output_handle *handle, + struct perf_event_header *header, + struct perf_sample_data *data, + struct perf_event *event); +extern void perf_prepare_sample(struct perf_event_header *header, + struct perf_sample_data *data, + struct perf_event *event, + struct pt_regs *regs); + +extern int perf_event_overflow(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs); + +extern void perf_event_output_forward(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs); +extern void perf_event_output_backward(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs); +extern void perf_event_output(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs); + +static inline bool +is_default_overflow_handler(struct perf_event *event) +{ + if (likely(event->overflow_handler == perf_event_output_forward)) + return true; + if (unlikely(event->overflow_handler == perf_event_output_backward)) + return true; + return false; +} + +extern void +perf_event_header__init_id(struct perf_event_header *header, + struct perf_sample_data *data, + struct perf_event *event); +extern void +perf_event__output_id_sample(struct perf_event *event, + struct perf_output_handle *handle, + struct perf_sample_data *sample); + +extern void +perf_log_lost_samples(struct perf_event *event, u64 lost); + +static inline bool is_sampling_event(struct perf_event *event) +{ + return event->attr.sample_period != 0; +} + +/* + * Return 1 for a software event, 0 for a hardware event + */ +static inline int is_software_event(struct perf_event *event) +{ + return event->event_caps & PERF_EV_CAP_SOFTWARE; +} + +/* + * Return 1 for event in sw context, 0 for event in hw context + */ +static inline int in_software_context(struct perf_event *event) +{ + return event->ctx->pmu->task_ctx_nr == perf_sw_context; +} + +static inline int is_exclusive_pmu(struct pmu *pmu) +{ + return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE; +} + +extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; + +extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64); +extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); + +#ifndef perf_arch_fetch_caller_regs +static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } +#endif + +/* + * Take a snapshot of the regs. Skip ip and frame pointer to + * the nth caller. We only need a few of the regs: + * - ip for PERF_SAMPLE_IP + * - cs for user_mode() tests + * - bp for callchains + * - eflags, for future purposes, just in case + */ +static inline void perf_fetch_caller_regs(struct pt_regs *regs) +{ + perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); +} + +static __always_inline void +perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) +{ + if (static_key_false(&perf_swevent_enabled[event_id])) + __perf_sw_event(event_id, nr, regs, addr); +} + +DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]); + +/* + * 'Special' version for the scheduler, it hard assumes no recursion, + * which is guaranteed by us not actually scheduling inside other swevents + * because those disable preemption. + */ +static __always_inline void +perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) +{ + if (static_key_false(&perf_swevent_enabled[event_id])) { + struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); + + perf_fetch_caller_regs(regs); + ___perf_sw_event(event_id, nr, regs, addr); + } +} + +extern struct static_key_false perf_sched_events; + +static __always_inline bool +perf_sw_migrate_enabled(void) +{ + if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS])) + return true; + return false; +} + +static inline void perf_event_task_migrate(struct task_struct *task) +{ + if (perf_sw_migrate_enabled()) + task->sched_migrated = 1; +} + +static inline void perf_event_task_sched_in(struct task_struct *prev, + struct task_struct *task) +{ + if (static_branch_unlikely(&perf_sched_events)) + __perf_event_task_sched_in(prev, task); + + if (perf_sw_migrate_enabled() && task->sched_migrated) { + struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); + + perf_fetch_caller_regs(regs); + ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0); + task->sched_migrated = 0; + } +} + +static inline void perf_event_task_sched_out(struct task_struct *prev, + struct task_struct *next) +{ + perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0); + + if (static_branch_unlikely(&perf_sched_events)) + __perf_event_task_sched_out(prev, next); +} + +extern void perf_event_mmap(struct vm_area_struct *vma); +extern struct perf_guest_info_callbacks *perf_guest_cbs; +extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); +extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); + +extern void perf_event_exec(void); +extern void perf_event_comm(struct task_struct *tsk, bool exec); +extern void perf_event_namespaces(struct task_struct *tsk); +extern void perf_event_fork(struct task_struct *tsk); + +/* Callchains */ +DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); + +extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); +extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); +extern struct perf_callchain_entry * +get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, + u32 max_stack, bool crosstask, bool add_mark); +extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs); +extern int get_callchain_buffers(int max_stack); +extern void put_callchain_buffers(void); + +extern int sysctl_perf_event_max_stack; +extern int sysctl_perf_event_max_contexts_per_stack; + +static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip) +{ + if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) { + struct perf_callchain_entry *entry = ctx->entry; + entry->ip[entry->nr++] = ip; + ++ctx->contexts; + return 0; + } else { + ctx->contexts_maxed = true; + return -1; /* no more room, stop walking the stack */ + } +} + +static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip) +{ + if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) { + struct perf_callchain_entry *entry = ctx->entry; + entry->ip[entry->nr++] = ip; + ++ctx->nr; + return 0; + } else { + return -1; /* no more room, stop walking the stack */ + } +} + +extern int sysctl_perf_event_paranoid; +extern int sysctl_perf_event_mlock; +extern int sysctl_perf_event_sample_rate; +extern int sysctl_perf_cpu_time_max_percent; + +extern void perf_sample_event_took(u64 sample_len_ns); + +extern int perf_proc_update_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); +extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); + +int perf_event_max_stack_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); + +static inline bool perf_paranoid_tracepoint_raw(void) +{ + return sysctl_perf_event_paranoid > -1; +} + +static inline bool perf_paranoid_cpu(void) +{ + return sysctl_perf_event_paranoid > 0; +} + +static inline bool perf_paranoid_kernel(void) +{ + return sysctl_perf_event_paranoid > 1; +} + +extern void perf_event_init(void); +extern void perf_tp_event(u16 event_type, u64 count, void *record, + int entry_size, struct pt_regs *regs, + struct hlist_head *head, int rctx, + struct task_struct *task); +extern void perf_bp_event(struct perf_event *event, void *data); + +#ifndef perf_misc_flags +# define perf_misc_flags(regs) \ + (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) +# define perf_instruction_pointer(regs) instruction_pointer(regs) +#endif +#ifndef perf_arch_bpf_user_pt_regs +# define perf_arch_bpf_user_pt_regs(regs) regs +#endif + +static inline bool has_branch_stack(struct perf_event *event) +{ + return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; +} + +static inline bool needs_branch_stack(struct perf_event *event) +{ + return event->attr.branch_sample_type != 0; +} + +static inline bool has_aux(struct perf_event *event) +{ + return event->pmu->setup_aux; +} + +static inline bool is_write_backward(struct perf_event *event) +{ + return !!event->attr.write_backward; +} + +static inline bool has_addr_filter(struct perf_event *event) +{ + return event->pmu->nr_addr_filters; +} + +/* + * An inherited event uses parent's filters + */ +static inline struct perf_addr_filters_head * +perf_event_addr_filters(struct perf_event *event) +{ + struct perf_addr_filters_head *ifh = &event->addr_filters; + + if (event->parent) + ifh = &event->parent->addr_filters; + + return ifh; +} + +extern void perf_event_addr_filters_sync(struct perf_event *event); + +extern int perf_output_begin(struct perf_output_handle *handle, + struct perf_event *event, unsigned int size); +extern int perf_output_begin_forward(struct perf_output_handle *handle, + struct perf_event *event, + unsigned int size); +extern int perf_output_begin_backward(struct perf_output_handle *handle, + struct perf_event *event, + unsigned int size); + +extern void perf_output_end(struct perf_output_handle *handle); +extern unsigned int perf_output_copy(struct perf_output_handle *handle, + const void *buf, unsigned int len); +extern unsigned int perf_output_skip(struct perf_output_handle *handle, + unsigned int len); +extern int perf_swevent_get_recursion_context(void); +extern void perf_swevent_put_recursion_context(int rctx); +extern u64 perf_swevent_set_period(struct perf_event *event); +extern void perf_event_enable(struct perf_event *event); +extern void perf_event_disable(struct perf_event *event); +extern void perf_event_disable_local(struct perf_event *event); +extern void perf_event_disable_inatomic(struct perf_event *event); +extern void perf_event_task_tick(void); +extern int perf_event_account_interrupt(struct perf_event *event); +#else /* !CONFIG_PERF_EVENTS: */ +static inline void * +perf_aux_output_begin(struct perf_output_handle *handle, + struct perf_event *event) { return NULL; } +static inline void +perf_aux_output_end(struct perf_output_handle *handle, unsigned long size) + { } +static inline int +perf_aux_output_skip(struct perf_output_handle *handle, + unsigned long size) { return -EINVAL; } +static inline void * +perf_get_aux(struct perf_output_handle *handle) { return NULL; } +static inline void +perf_event_task_migrate(struct task_struct *task) { } +static inline void +perf_event_task_sched_in(struct task_struct *prev, + struct task_struct *task) { } +static inline void +perf_event_task_sched_out(struct task_struct *prev, + struct task_struct *next) { } +static inline int perf_event_init_task(struct task_struct *child) { return 0; } +static inline void perf_event_exit_task(struct task_struct *child) { } +static inline void perf_event_free_task(struct task_struct *task) { } +static inline void perf_event_delayed_put(struct task_struct *task) { } +static inline struct file *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); } +static inline const struct perf_event *perf_get_event(struct file *file) +{ + return ERR_PTR(-EINVAL); +} +static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event) +{ + return ERR_PTR(-EINVAL); +} +static inline int perf_event_read_local(struct perf_event *event, u64 *value, + u64 *enabled, u64 *running) +{ + return -EINVAL; +} +static inline void perf_event_print_debug(void) { } +static inline int perf_event_task_disable(void) { return -EINVAL; } +static inline int perf_event_task_enable(void) { return -EINVAL; } +static inline int perf_event_refresh(struct perf_event *event, int refresh) +{ + return -EINVAL; +} + +static inline void +perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } +static inline void +perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { } +static inline void +perf_bp_event(struct perf_event *event, void *data) { } + +static inline int perf_register_guest_info_callbacks +(struct perf_guest_info_callbacks *callbacks) { return 0; } +static inline int perf_unregister_guest_info_callbacks +(struct perf_guest_info_callbacks *callbacks) { return 0; } + +static inline void perf_event_mmap(struct vm_area_struct *vma) { } +static inline void perf_event_exec(void) { } +static inline void perf_event_comm(struct task_struct *tsk, bool exec) { } +static inline void perf_event_namespaces(struct task_struct *tsk) { } +static inline void perf_event_fork(struct task_struct *tsk) { } +static inline void perf_event_init(void) { } +static inline int perf_swevent_get_recursion_context(void) { return -1; } +static inline void perf_swevent_put_recursion_context(int rctx) { } +static inline u64 perf_swevent_set_period(struct perf_event *event) { return 0; } +static inline void perf_event_enable(struct perf_event *event) { } +static inline void perf_event_disable(struct perf_event *event) { } +static inline int __perf_event_disable(void *info) { return -1; } +static inline void perf_event_task_tick(void) { } +static inline int perf_event_release_kernel(struct perf_event *event) { return 0; } +#endif + +#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) +extern void perf_restore_debug_store(void); +#else +static inline void perf_restore_debug_store(void) { } +#endif + +static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag) +{ + return frag->pad < sizeof(u64); +} + +#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) + +struct perf_pmu_events_attr { + struct device_attribute attr; + u64 id; + const char *event_str; +}; + +struct perf_pmu_events_ht_attr { + struct device_attribute attr; + u64 id; + const char *event_str_ht; + const char *event_str_noht; +}; + +ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, + char *page); + +#define PMU_EVENT_ATTR(_name, _var, _id, _show) \ +static struct perf_pmu_events_attr _var = { \ + .attr = __ATTR(_name, 0444, _show, NULL), \ + .id = _id, \ +}; + +#define PMU_EVENT_ATTR_STRING(_name, _var, _str) \ +static struct perf_pmu_events_attr _var = { \ + .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \ + .id = 0, \ + .event_str = _str, \ +}; + +#define PMU_FORMAT_ATTR(_name, _format) \ +static ssize_t \ +_name##_show(struct device *dev, \ + struct device_attribute *attr, \ + char *page) \ +{ \ + BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ + return sprintf(page, _format "\n"); \ +} \ + \ +static struct device_attribute format_attr_##_name = __ATTR_RO(_name) + +/* Performance counter hotplug functions */ +#ifdef CONFIG_PERF_EVENTS +int perf_event_init_cpu(unsigned int cpu); +int perf_event_exit_cpu(unsigned int cpu); +#else +#define perf_event_init_cpu NULL +#define perf_event_exit_cpu NULL +#endif + +#endif /* _LINUX_PERF_EVENT_H */ diff --git a/include/linux/perf_regs.h b/include/linux/perf_regs.h new file mode 100644 index 000000000..476747456 --- /dev/null +++ b/include/linux/perf_regs.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PERF_REGS_H +#define _LINUX_PERF_REGS_H + +#include + +struct perf_regs { + __u64 abi; + struct pt_regs *regs; +}; + +#ifdef CONFIG_HAVE_PERF_REGS +#include +u64 perf_reg_value(struct pt_regs *regs, int idx); +int perf_reg_validate(u64 mask); +u64 perf_reg_abi(struct task_struct *task); +void perf_get_regs_user(struct perf_regs *regs_user, + struct pt_regs *regs, + struct pt_regs *regs_user_copy); +#else +static inline u64 perf_reg_value(struct pt_regs *regs, int idx) +{ + return 0; +} + +static inline int perf_reg_validate(u64 mask) +{ + return mask ? -ENOSYS : 0; +} + +static inline u64 perf_reg_abi(struct task_struct *task) +{ + return PERF_SAMPLE_REGS_ABI_NONE; +} + +static inline void perf_get_regs_user(struct perf_regs *regs_user, + struct pt_regs *regs, + struct pt_regs *regs_user_copy) +{ + regs_user->regs = task_pt_regs(current); + regs_user->abi = perf_reg_abi(current); +} +#endif /* CONFIG_HAVE_PERF_REGS */ +#endif /* _LINUX_PERF_REGS_H */ diff --git a/include/linux/personality.h b/include/linux/personality.h new file mode 100644 index 000000000..fc16fbc65 --- /dev/null +++ b/include/linux/personality.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PERSONALITY_H +#define _LINUX_PERSONALITY_H + +#include + +/* + * Return the base personality without flags. + */ +#define personality(pers) (pers & PER_MASK) + +/* + * Change personality of the currently running process. + */ +#define set_personality(pers) (current->personality = (pers)) + +#endif /* _LINUX_PERSONALITY_H */ diff --git a/include/linux/pfn.h b/include/linux/pfn.h new file mode 100644 index 000000000..14bc053c5 --- /dev/null +++ b/include/linux/pfn.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PFN_H_ +#define _LINUX_PFN_H_ + +#ifndef __ASSEMBLY__ +#include + +/* + * pfn_t: encapsulates a page-frame number that is optionally backed + * by memmap (struct page). Whether a pfn_t has a 'struct page' + * backing is indicated by flags in the high bits of the value. + */ +typedef struct { + u64 val; +} pfn_t; +#endif + +#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK) +#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) +#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) +#define PFN_PHYS(x) ((phys_addr_t)(x) << PAGE_SHIFT) +#define PHYS_PFN(x) ((unsigned long)((x) >> PAGE_SHIFT)) + +#endif diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h new file mode 100644 index 000000000..673546ba7 --- /dev/null +++ b/include/linux/pfn_t.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PFN_T_H_ +#define _LINUX_PFN_T_H_ +#include + +/* + * PFN_FLAGS_MASK - mask of all the possible valid pfn_t flags + * PFN_SG_CHAIN - pfn is a pointer to the next scatterlist entry + * PFN_SG_LAST - pfn references a page and is the last scatterlist entry + * PFN_DEV - pfn is not covered by system memmap by default + * PFN_MAP - pfn has a dynamic page mapping established by a device driver + */ +#define PFN_FLAGS_MASK (((u64) (~PAGE_MASK)) << (BITS_PER_LONG_LONG - PAGE_SHIFT)) +#define PFN_SG_CHAIN (1ULL << (BITS_PER_LONG_LONG - 1)) +#define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2)) +#define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3)) +#define PFN_MAP (1ULL << (BITS_PER_LONG_LONG - 4)) +#define PFN_SPECIAL (1ULL << (BITS_PER_LONG_LONG - 5)) + +#define PFN_FLAGS_TRACE \ + { PFN_SPECIAL, "SPECIAL" }, \ + { PFN_SG_CHAIN, "SG_CHAIN" }, \ + { PFN_SG_LAST, "SG_LAST" }, \ + { PFN_DEV, "DEV" }, \ + { PFN_MAP, "MAP" } + +static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, u64 flags) +{ + pfn_t pfn_t = { .val = pfn | (flags & PFN_FLAGS_MASK), }; + + return pfn_t; +} + +/* a default pfn to pfn_t conversion assumes that @pfn is pfn_valid() */ +static inline pfn_t pfn_to_pfn_t(unsigned long pfn) +{ + return __pfn_to_pfn_t(pfn, 0); +} + +static inline pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags) +{ + return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags); +} + +static inline bool pfn_t_has_page(pfn_t pfn) +{ + return (pfn.val & PFN_MAP) == PFN_MAP || (pfn.val & PFN_DEV) == 0; +} + +static inline unsigned long pfn_t_to_pfn(pfn_t pfn) +{ + return pfn.val & ~PFN_FLAGS_MASK; +} + +static inline struct page *pfn_t_to_page(pfn_t pfn) +{ + if (pfn_t_has_page(pfn)) + return pfn_to_page(pfn_t_to_pfn(pfn)); + return NULL; +} + +static inline phys_addr_t pfn_t_to_phys(pfn_t pfn) +{ + return PFN_PHYS(pfn_t_to_pfn(pfn)); +} + +static inline void *pfn_t_to_virt(pfn_t pfn) +{ + if (pfn_t_has_page(pfn)) + return __va(pfn_t_to_phys(pfn)); + return NULL; +} + +static inline pfn_t page_to_pfn_t(struct page *page) +{ + return pfn_to_pfn_t(page_to_pfn(page)); +} + +static inline int pfn_t_valid(pfn_t pfn) +{ + return pfn_valid(pfn_t_to_pfn(pfn)); +} + +#ifdef CONFIG_MMU +static inline pte_t pfn_t_pte(pfn_t pfn, pgprot_t pgprot) +{ + return pfn_pte(pfn_t_to_pfn(pfn), pgprot); +} +#endif + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline pmd_t pfn_t_pmd(pfn_t pfn, pgprot_t pgprot) +{ + return pfn_pmd(pfn_t_to_pfn(pfn), pgprot); +} + +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD +static inline pud_t pfn_t_pud(pfn_t pfn, pgprot_t pgprot) +{ + return pfn_pud(pfn_t_to_pfn(pfn), pgprot); +} +#endif +#endif + +#ifdef __HAVE_ARCH_PTE_DEVMAP +static inline bool pfn_t_devmap(pfn_t pfn) +{ + const u64 flags = PFN_DEV|PFN_MAP; + + return (pfn.val & flags) == flags; +} +#else +static inline bool pfn_t_devmap(pfn_t pfn) +{ + return false; +} +pte_t pte_mkdevmap(pte_t pte); +pmd_t pmd_mkdevmap(pmd_t pmd); +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ + defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) +pud_t pud_mkdevmap(pud_t pud); +#endif +#endif /* __HAVE_ARCH_PTE_DEVMAP */ + +#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL +static inline bool pfn_t_special(pfn_t pfn) +{ + return (pfn.val & PFN_SPECIAL) == PFN_SPECIAL; +} +#else +static inline bool pfn_t_special(pfn_t pfn) +{ + return false; +} +#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ +#endif /* _LINUX_PFN_T_H_ */ diff --git a/include/linux/phonet.h b/include/linux/phonet.h new file mode 100644 index 000000000..f691b04fc --- /dev/null +++ b/include/linux/phonet.h @@ -0,0 +1,40 @@ +/** + * file phonet.h + * + * Phonet sockets kernel interface + * + * Copyright (C) 2008 Nokia Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ +#ifndef LINUX_PHONET_H +#define LINUX_PHONET_H + +#include + +#define SIOCPNGAUTOCONF (SIOCDEVPRIVATE + 0) + +struct if_phonet_autoconf { + uint8_t device; +}; + +struct if_phonet_req { + char ifr_phonet_name[16]; + union { + struct if_phonet_autoconf ifru_phonet_autoconf; + } ifr_ifru; +}; +#define ifr_phonet_autoconf ifr_ifru.ifru_phonet_autoconf +#endif diff --git a/include/linux/phy.h b/include/linux/phy.h new file mode 100644 index 000000000..42766e717 --- /dev/null +++ b/include/linux/phy.h @@ -0,0 +1,1174 @@ +/* + * Framework and drivers for configuring and reading different PHYs + * Based on code in sungem_phy.c and gianfar_phy.c + * + * Author: Andy Fleming + * + * Copyright (c) 2004 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __PHY_H +#define __PHY_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define PHY_DEFAULT_FEATURES (SUPPORTED_Autoneg | \ + SUPPORTED_TP | \ + SUPPORTED_MII) + +#define PHY_10BT_FEATURES (SUPPORTED_10baseT_Half | \ + SUPPORTED_10baseT_Full) + +#define PHY_100BT_FEATURES (SUPPORTED_100baseT_Half | \ + SUPPORTED_100baseT_Full) + +#define PHY_1000BT_FEATURES (SUPPORTED_1000baseT_Half | \ + SUPPORTED_1000baseT_Full) + +#define PHY_BASIC_FEATURES (PHY_10BT_FEATURES | \ + PHY_100BT_FEATURES | \ + PHY_DEFAULT_FEATURES) + +#define PHY_GBIT_FEATURES (PHY_BASIC_FEATURES | \ + PHY_1000BT_FEATURES) + + +/* + * Set phydev->irq to PHY_POLL if interrupts are not supported, + * or not desired for this PHY. Set to PHY_IGNORE_INTERRUPT if + * the attached driver handles the interrupt + */ +#define PHY_POLL -1 +#define PHY_IGNORE_INTERRUPT -2 + +#define PHY_HAS_INTERRUPT 0x00000001 +#define PHY_IS_INTERNAL 0x00000002 +#define PHY_RST_AFTER_CLK_EN 0x00000004 +#define MDIO_DEVICE_IS_PHY 0x80000000 + +/* Interface Mode definitions */ +typedef enum { + PHY_INTERFACE_MODE_NA, + PHY_INTERFACE_MODE_INTERNAL, + PHY_INTERFACE_MODE_MII, + PHY_INTERFACE_MODE_GMII, + PHY_INTERFACE_MODE_SGMII, + PHY_INTERFACE_MODE_TBI, + PHY_INTERFACE_MODE_REVMII, + PHY_INTERFACE_MODE_RMII, + PHY_INTERFACE_MODE_RGMII, + PHY_INTERFACE_MODE_RGMII_ID, + PHY_INTERFACE_MODE_RGMII_RXID, + PHY_INTERFACE_MODE_RGMII_TXID, + PHY_INTERFACE_MODE_RTBI, + PHY_INTERFACE_MODE_SMII, + PHY_INTERFACE_MODE_XGMII, + PHY_INTERFACE_MODE_MOCA, + PHY_INTERFACE_MODE_QSGMII, + PHY_INTERFACE_MODE_TRGMII, + PHY_INTERFACE_MODE_1000BASEX, + PHY_INTERFACE_MODE_2500BASEX, + PHY_INTERFACE_MODE_RXAUI, + PHY_INTERFACE_MODE_XAUI, + /* 10GBASE-KR, XFI, SFI - single lane 10G Serdes */ + PHY_INTERFACE_MODE_10GKR, + PHY_INTERFACE_MODE_MAX, +} phy_interface_t; + +/** + * phy_supported_speeds - return all speeds currently supported by a phy device + * @phy: The phy device to return supported speeds of. + * @speeds: buffer to store supported speeds in. + * @size: size of speeds buffer. + * + * Description: Returns the number of supported speeds, and + * fills the speeds * buffer with the supported speeds. If speeds buffer is + * too small to contain * all currently supported speeds, will return as + * many speeds as can fit. + */ +unsigned int phy_supported_speeds(struct phy_device *phy, + unsigned int *speeds, + unsigned int size); + +/** + * It maps 'enum phy_interface_t' found in include/linux/phy.h + * into the device tree binding of 'phy-mode', so that Ethernet + * device driver can get phy interface from device tree. + */ +static inline const char *phy_modes(phy_interface_t interface) +{ + switch (interface) { + case PHY_INTERFACE_MODE_NA: + return ""; + case PHY_INTERFACE_MODE_INTERNAL: + return "internal"; + case PHY_INTERFACE_MODE_MII: + return "mii"; + case PHY_INTERFACE_MODE_GMII: + return "gmii"; + case PHY_INTERFACE_MODE_SGMII: + return "sgmii"; + case PHY_INTERFACE_MODE_TBI: + return "tbi"; + case PHY_INTERFACE_MODE_REVMII: + return "rev-mii"; + case PHY_INTERFACE_MODE_RMII: + return "rmii"; + case PHY_INTERFACE_MODE_RGMII: + return "rgmii"; + case PHY_INTERFACE_MODE_RGMII_ID: + return "rgmii-id"; + case PHY_INTERFACE_MODE_RGMII_RXID: + return "rgmii-rxid"; + case PHY_INTERFACE_MODE_RGMII_TXID: + return "rgmii-txid"; + case PHY_INTERFACE_MODE_RTBI: + return "rtbi"; + case PHY_INTERFACE_MODE_SMII: + return "smii"; + case PHY_INTERFACE_MODE_XGMII: + return "xgmii"; + case PHY_INTERFACE_MODE_MOCA: + return "moca"; + case PHY_INTERFACE_MODE_QSGMII: + return "qsgmii"; + case PHY_INTERFACE_MODE_TRGMII: + return "trgmii"; + case PHY_INTERFACE_MODE_1000BASEX: + return "1000base-x"; + case PHY_INTERFACE_MODE_2500BASEX: + return "2500base-x"; + case PHY_INTERFACE_MODE_RXAUI: + return "rxaui"; + case PHY_INTERFACE_MODE_XAUI: + return "xaui"; + case PHY_INTERFACE_MODE_10GKR: + return "10gbase-kr"; + default: + return "unknown"; + } +} + + +#define PHY_INIT_TIMEOUT 100000 +#define PHY_STATE_TIME 1 +#define PHY_FORCE_TIMEOUT 10 +#define PHY_AN_TIMEOUT 10 + +#define PHY_MAX_ADDR 32 + +/* Used when trying to connect to a specific phy (mii bus id:phy device id) */ +#define PHY_ID_FMT "%s:%02x" + +#define MII_BUS_ID_SIZE 61 + +/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit + IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. */ +#define MII_ADDR_C45 (1<<30) + +struct device; +struct phylink; +struct sk_buff; + +/* + * The Bus class for PHYs. Devices which provide access to + * PHYs should register using this structure + */ +struct mii_bus { + struct module *owner; + const char *name; + char id[MII_BUS_ID_SIZE]; + void *priv; + int (*read)(struct mii_bus *bus, int addr, int regnum); + int (*write)(struct mii_bus *bus, int addr, int regnum, u16 val); + int (*reset)(struct mii_bus *bus); + + /* + * A lock to ensure that only one thing can read/write + * the MDIO bus at a time + */ + struct mutex mdio_lock; + + struct device *parent; + enum { + MDIOBUS_ALLOCATED = 1, + MDIOBUS_REGISTERED, + MDIOBUS_UNREGISTERED, + MDIOBUS_RELEASED, + } state; + struct device dev; + + /* list of all PHYs on bus */ + struct mdio_device *mdio_map[PHY_MAX_ADDR]; + + /* PHY addresses to be ignored when probing */ + u32 phy_mask; + + /* PHY addresses to ignore the TA/read failure */ + u32 phy_ignore_ta_mask; + + /* + * An array of interrupts, each PHY's interrupt at the index + * matching its address + */ + int irq[PHY_MAX_ADDR]; + + /* GPIO reset pulse width in microseconds */ + int reset_delay_us; + /* RESET GPIO descriptor pointer */ + struct gpio_desc *reset_gpiod; +}; +#define to_mii_bus(d) container_of(d, struct mii_bus, dev) + +struct mii_bus *mdiobus_alloc_size(size_t); +static inline struct mii_bus *mdiobus_alloc(void) +{ + return mdiobus_alloc_size(0); +} + +int __mdiobus_register(struct mii_bus *bus, struct module *owner); +#define mdiobus_register(bus) __mdiobus_register(bus, THIS_MODULE) +void mdiobus_unregister(struct mii_bus *bus); +void mdiobus_free(struct mii_bus *bus); +struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv); +static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev) +{ + return devm_mdiobus_alloc_size(dev, 0); +} + +void devm_mdiobus_free(struct device *dev, struct mii_bus *bus); +struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); + +#define PHY_INTERRUPT_DISABLED 0x0 +#define PHY_INTERRUPT_ENABLED 0x80000000 + +/* PHY state machine states: + * + * DOWN: PHY device and driver are not ready for anything. probe + * should be called if and only if the PHY is in this state, + * given that the PHY device exists. + * - PHY driver probe function will, depending on the PHY, set + * the state to STARTING or READY + * + * STARTING: PHY device is coming up, and the ethernet driver is + * not ready. PHY drivers may set this in the probe function. + * If they do, they are responsible for making sure the state is + * eventually set to indicate whether the PHY is UP or READY, + * depending on the state when the PHY is done starting up. + * - PHY driver will set the state to READY + * - start will set the state to PENDING + * + * READY: PHY is ready to send and receive packets, but the + * controller is not. By default, PHYs which do not implement + * probe will be set to this state by phy_probe(). If the PHY + * driver knows the PHY is ready, and the PHY state is STARTING, + * then it sets this STATE. + * - start will set the state to UP + * + * PENDING: PHY device is coming up, but the ethernet driver is + * ready. phy_start will set this state if the PHY state is + * STARTING. + * - PHY driver will set the state to UP when the PHY is ready + * + * UP: The PHY and attached device are ready to do work. + * Interrupts should be started here. + * - timer moves to AN + * + * AN: The PHY is currently negotiating the link state. Link is + * therefore down for now. phy_timer will set this state when it + * detects the state is UP. config_aneg will set this state + * whenever called with phydev->autoneg set to AUTONEG_ENABLE. + * - If autonegotiation finishes, but there's no link, it sets + * the state to NOLINK. + * - If aneg finishes with link, it sets the state to RUNNING, + * and calls adjust_link + * - If autonegotiation did not finish after an arbitrary amount + * of time, autonegotiation should be tried again if the PHY + * supports "magic" autonegotiation (back to AN) + * - If it didn't finish, and no magic_aneg, move to FORCING. + * + * NOLINK: PHY is up, but not currently plugged in. + * - If the timer notes that the link comes back, we move to RUNNING + * - config_aneg moves to AN + * - phy_stop moves to HALTED + * + * FORCING: PHY is being configured with forced settings + * - if link is up, move to RUNNING + * - If link is down, we drop to the next highest setting, and + * retry (FORCING) after a timeout + * - phy_stop moves to HALTED + * + * RUNNING: PHY is currently up, running, and possibly sending + * and/or receiving packets + * - timer will set CHANGELINK if we're polling (this ensures the + * link state is polled every other cycle of this state machine, + * which makes it every other second) + * - irq will set CHANGELINK + * - config_aneg will set AN + * - phy_stop moves to HALTED + * + * CHANGELINK: PHY experienced a change in link state + * - timer moves to RUNNING if link + * - timer moves to NOLINK if the link is down + * - phy_stop moves to HALTED + * + * HALTED: PHY is up, but no polling or interrupts are done. Or + * PHY is in an error state. + * + * - phy_start moves to RESUMING + * + * RESUMING: PHY was halted, but now wants to run again. + * - If we are forcing, or aneg is done, timer moves to RUNNING + * - If aneg is not done, timer moves to AN + * - phy_stop moves to HALTED + */ +enum phy_state { + PHY_DOWN = 0, + PHY_STARTING, + PHY_READY, + PHY_PENDING, + PHY_UP, + PHY_AN, + PHY_RUNNING, + PHY_NOLINK, + PHY_FORCING, + PHY_CHANGELINK, + PHY_HALTED, + PHY_RESUMING +}; + +/** + * struct phy_c45_device_ids - 802.3-c45 Device Identifiers + * @devices_in_package: Bit vector of devices present. + * @device_ids: The device identifer for each present device. + */ +struct phy_c45_device_ids { + u32 devices_in_package; + u32 device_ids[8]; +}; + +/* phy_device: An instance of a PHY + * + * drv: Pointer to the driver for this PHY instance + * phy_id: UID for this device found during discovery + * c45_ids: 802.3-c45 Device Identifers if is_c45. + * is_c45: Set to true if this phy uses clause 45 addressing. + * is_internal: Set to true if this phy is internal to a MAC. + * is_pseudo_fixed_link: Set to true if this phy is an Ethernet switch, etc. + * has_fixups: Set to true if this phy has fixups/quirks. + * suspended: Set to true if this phy has been suspended successfully. + * suspended_by_mdio_bus: Set to true if this phy was suspended by MDIO bus. + * sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal. + * loopback_enabled: Set true if this phy has been loopbacked successfully. + * state: state of the PHY for management purposes + * dev_flags: Device-specific flags used by the PHY driver. + * link_timeout: The number of timer firings to wait before the + * giving up on the current attempt at acquiring a link + * irq: IRQ number of the PHY's interrupt (-1 if none) + * phy_timer: The timer for handling the state machine + * phy_queue: A work_queue for the phy_mac_interrupt + * attached_dev: The attached enet driver's device instance ptr + * adjust_link: Callback for the enet controller to respond to + * changes in the link state. + * + * speed, duplex, pause, supported, advertising, lp_advertising, + * and autoneg are used like in mii_if_info + * + * interrupts currently only supports enabled or disabled, + * but could be changed in the future to support enabling + * and disabling specific interrupts + * + * Contains some infrastructure for polling and interrupt + * handling, as well as handling shifts in PHY hardware state + */ +struct phy_device { + struct mdio_device mdio; + + /* Information about the PHY type */ + /* And management functions */ + struct phy_driver *drv; + + u32 phy_id; + + struct phy_c45_device_ids c45_ids; + unsigned is_c45:1; + unsigned is_internal:1; + unsigned is_pseudo_fixed_link:1; + unsigned has_fixups:1; + unsigned suspended:1; + unsigned suspended_by_mdio_bus:1; + unsigned sysfs_links:1; + unsigned loopback_enabled:1; + + unsigned autoneg:1; + /* The most recently read link state */ + unsigned link:1; + + enum phy_state state; + + u32 dev_flags; + + phy_interface_t interface; + + /* + * forced speed & duplex (no autoneg) + * partner speed & duplex & pause (autoneg) + */ + int speed; + int duplex; + int pause; + int asym_pause; + + /* Enabled Interrupts */ + u32 interrupts; + + /* Union of PHY and Attached devices' supported modes */ + /* See mii.h for more info */ + u32 supported; + u32 advertising; + u32 lp_advertising; + + /* Energy efficient ethernet modes which should be prohibited */ + u32 eee_broken_modes; + + int link_timeout; + +#ifdef CONFIG_LED_TRIGGER_PHY + struct phy_led_trigger *phy_led_triggers; + unsigned int phy_num_led_triggers; + struct phy_led_trigger *last_triggered; + + struct phy_led_trigger *led_link_trigger; +#endif + + /* + * Interrupt number for this PHY + * -1 means no interrupt + */ + int irq; + + /* private data pointer */ + /* For use by PHYs to maintain extra state */ + void *priv; + + /* Interrupt and Polling infrastructure */ + struct work_struct phy_queue; + struct delayed_work state_queue; + + struct mutex lock; + + struct phylink *phylink; + struct net_device *attached_dev; + + u8 mdix; + u8 mdix_ctrl; + + void (*phy_link_change)(struct phy_device *, bool up, bool do_carrier); + void (*adjust_link)(struct net_device *dev); +}; +#define to_phy_device(d) container_of(to_mdio_device(d), \ + struct phy_device, mdio) + +/* struct phy_driver: Driver structure for a particular PHY type + * + * driver_data: static driver data + * phy_id: The result of reading the UID registers of this PHY + * type, and ANDing them with the phy_id_mask. This driver + * only works for PHYs with IDs which match this field + * name: The friendly name of this PHY type + * phy_id_mask: Defines the important bits of the phy_id + * features: A list of features (speed, duplex, etc) supported + * by this PHY + * flags: A bitfield defining certain other features this PHY + * supports (like interrupts) + * + * All functions are optional. If config_aneg or read_status + * are not implemented, the phy core uses the genphy versions. + * Note that none of these functions should be called from + * interrupt time. The goal is for the bus read/write functions + * to be able to block when the bus transaction is happening, + * and be freed up by an interrupt (The MPC85xx has this ability, + * though it is not currently supported in the driver). + */ +struct phy_driver { + struct mdio_driver_common mdiodrv; + u32 phy_id; + char *name; + u32 phy_id_mask; + u32 features; + u32 flags; + const void *driver_data; + + /* + * Called to issue a PHY software reset + */ + int (*soft_reset)(struct phy_device *phydev); + + /* + * Called to initialize the PHY, + * including after a reset + */ + int (*config_init)(struct phy_device *phydev); + + /* + * Called during discovery. Used to set + * up device-specific structures, if any + */ + int (*probe)(struct phy_device *phydev); + + /* PHY Power Management */ + int (*suspend)(struct phy_device *phydev); + int (*resume)(struct phy_device *phydev); + + /* + * Configures the advertisement and resets + * autonegotiation if phydev->autoneg is on, + * forces the speed to the current settings in phydev + * if phydev->autoneg is off + */ + int (*config_aneg)(struct phy_device *phydev); + + /* Determines the auto negotiation result */ + int (*aneg_done)(struct phy_device *phydev); + + /* Determines the negotiated speed and duplex */ + int (*read_status)(struct phy_device *phydev); + + /* Clears any pending interrupts */ + int (*ack_interrupt)(struct phy_device *phydev); + + /* Enables or disables interrupts */ + int (*config_intr)(struct phy_device *phydev); + + /* + * Checks if the PHY generated an interrupt. + * For multi-PHY devices with shared PHY interrupt pin + */ + int (*did_interrupt)(struct phy_device *phydev); + + /* Clears up any memory if needed */ + void (*remove)(struct phy_device *phydev); + + /* Returns true if this is a suitable driver for the given + * phydev. If NULL, matching is based on phy_id and + * phy_id_mask. + */ + int (*match_phy_device)(struct phy_device *phydev); + + /* Handles ethtool queries for hardware time stamping. */ + int (*ts_info)(struct phy_device *phydev, struct ethtool_ts_info *ti); + + /* Handles SIOCSHWTSTAMP ioctl for hardware time stamping. */ + int (*hwtstamp)(struct phy_device *phydev, struct ifreq *ifr); + + /* + * Requests a Rx timestamp for 'skb'. If the skb is accepted, + * the phy driver promises to deliver it using netif_rx() as + * soon as a timestamp becomes available. One of the + * PTP_CLASS_ values is passed in 'type'. The function must + * return true if the skb is accepted for delivery. + */ + bool (*rxtstamp)(struct phy_device *dev, struct sk_buff *skb, int type); + + /* + * Requests a Tx timestamp for 'skb'. The phy driver promises + * to deliver it using skb_complete_tx_timestamp() as soon as a + * timestamp becomes available. One of the PTP_CLASS_ values + * is passed in 'type'. + */ + void (*txtstamp)(struct phy_device *dev, struct sk_buff *skb, int type); + + /* Some devices (e.g. qnap TS-119P II) require PHY register changes to + * enable Wake on LAN, so set_wol is provided to be called in the + * ethernet driver's set_wol function. */ + int (*set_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol); + + /* See set_wol, but for checking whether Wake on LAN is enabled. */ + void (*get_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol); + + /* + * Called to inform a PHY device driver when the core is about to + * change the link state. This callback is supposed to be used as + * fixup hook for drivers that need to take action when the link + * state changes. Drivers are by no means allowed to mess with the + * PHY device structure in their implementations. + */ + void (*link_change_notify)(struct phy_device *dev); + + /* + * Phy specific driver override for reading a MMD register. + * This function is optional for PHY specific drivers. When + * not provided, the default MMD read function will be used + * by phy_read_mmd(), which will use either a direct read for + * Clause 45 PHYs or an indirect read for Clause 22 PHYs. + * devnum is the MMD device number within the PHY device, + * regnum is the register within the selected MMD device. + */ + int (*read_mmd)(struct phy_device *dev, int devnum, u16 regnum); + + /* + * Phy specific driver override for writing a MMD register. + * This function is optional for PHY specific drivers. When + * not provided, the default MMD write function will be used + * by phy_write_mmd(), which will use either a direct write for + * Clause 45 PHYs, or an indirect write for Clause 22 PHYs. + * devnum is the MMD device number within the PHY device, + * regnum is the register within the selected MMD device. + * val is the value to be written. + */ + int (*write_mmd)(struct phy_device *dev, int devnum, u16 regnum, + u16 val); + + int (*read_page)(struct phy_device *dev); + int (*write_page)(struct phy_device *dev, int page); + + /* Get the size and type of the eeprom contained within a plug-in + * module */ + int (*module_info)(struct phy_device *dev, + struct ethtool_modinfo *modinfo); + + /* Get the eeprom information from the plug-in module */ + int (*module_eeprom)(struct phy_device *dev, + struct ethtool_eeprom *ee, u8 *data); + + /* Get statistics from the phy using ethtool */ + int (*get_sset_count)(struct phy_device *dev); + void (*get_strings)(struct phy_device *dev, u8 *data); + void (*get_stats)(struct phy_device *dev, + struct ethtool_stats *stats, u64 *data); + + /* Get and Set PHY tunables */ + int (*get_tunable)(struct phy_device *dev, + struct ethtool_tunable *tuna, void *data); + int (*set_tunable)(struct phy_device *dev, + struct ethtool_tunable *tuna, + const void *data); + int (*set_loopback)(struct phy_device *dev, bool enable); +}; +#define to_phy_driver(d) container_of(to_mdio_common_driver(d), \ + struct phy_driver, mdiodrv) + +#define PHY_ANY_ID "MATCH ANY PHY" +#define PHY_ANY_UID 0xffffffff + +/* A Structure for boards to register fixups with the PHY Lib */ +struct phy_fixup { + struct list_head list; + char bus_id[MII_BUS_ID_SIZE + 3]; + u32 phy_uid; + u32 phy_uid_mask; + int (*run)(struct phy_device *phydev); +}; + +const char *phy_speed_to_str(int speed); +const char *phy_duplex_to_str(unsigned int duplex); + +/* A structure for mapping a particular speed and duplex + * combination to a particular SUPPORTED and ADVERTISED value + */ +struct phy_setting { + u32 speed; + u8 duplex; + u8 bit; +}; + +const struct phy_setting * +phy_lookup_setting(int speed, int duplex, const unsigned long *mask, + size_t maxbit, bool exact); +size_t phy_speeds(unsigned int *speeds, size_t size, + unsigned long *mask, size_t maxbit); + +void phy_resolve_aneg_linkmode(struct phy_device *phydev); + +/** + * phy_read_mmd - Convenience function for reading a register + * from an MMD on a given PHY. + * @phydev: The phy_device struct + * @devad: The MMD to read from + * @regnum: The register on the MMD to read + * + * Same rules as for phy_read(); + */ +int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum); + +/** + * phy_read - Convenience function for reading a given PHY register + * @phydev: the phy_device struct + * @regnum: register number to read + * + * NOTE: MUST NOT be called from interrupt context, + * because the bus read/write functions may wait for an interrupt + * to conclude the operation. + */ +static inline int phy_read(struct phy_device *phydev, u32 regnum) +{ + return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, regnum); +} + +/** + * __phy_read - convenience function for reading a given PHY register + * @phydev: the phy_device struct + * @regnum: register number to read + * + * The caller must have taken the MDIO bus lock. + */ +static inline int __phy_read(struct phy_device *phydev, u32 regnum) +{ + return __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, regnum); +} + +/** + * phy_write - Convenience function for writing a given PHY register + * @phydev: the phy_device struct + * @regnum: register number to write + * @val: value to write to @regnum + * + * NOTE: MUST NOT be called from interrupt context, + * because the bus read/write functions may wait for an interrupt + * to conclude the operation. + */ +static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val) +{ + return mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum, val); +} + +/** + * __phy_write - Convenience function for writing a given PHY register + * @phydev: the phy_device struct + * @regnum: register number to write + * @val: value to write to @regnum + * + * The caller must have taken the MDIO bus lock. + */ +static inline int __phy_write(struct phy_device *phydev, u32 regnum, u16 val) +{ + return __mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum, + val); +} + +int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set); +int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set); + +/** + * __phy_set_bits - Convenience function for setting bits in a PHY register + * @phydev: the phy_device struct + * @regnum: register number to write + * @val: bits to set + * + * The caller must have taken the MDIO bus lock. + */ +static inline int __phy_set_bits(struct phy_device *phydev, u32 regnum, u16 val) +{ + return __phy_modify(phydev, regnum, 0, val); +} + +/** + * __phy_clear_bits - Convenience function for clearing bits in a PHY register + * @phydev: the phy_device struct + * @regnum: register number to write + * @val: bits to clear + * + * The caller must have taken the MDIO bus lock. + */ +static inline int __phy_clear_bits(struct phy_device *phydev, u32 regnum, + u16 val) +{ + return __phy_modify(phydev, regnum, val, 0); +} + +/** + * phy_set_bits - Convenience function for setting bits in a PHY register + * @phydev: the phy_device struct + * @regnum: register number to write + * @val: bits to set + */ +static inline int phy_set_bits(struct phy_device *phydev, u32 regnum, u16 val) +{ + return phy_modify(phydev, regnum, 0, val); +} + +/** + * phy_clear_bits - Convenience function for clearing bits in a PHY register + * @phydev: the phy_device struct + * @regnum: register number to write + * @val: bits to clear + */ +static inline int phy_clear_bits(struct phy_device *phydev, u32 regnum, u16 val) +{ + return phy_modify(phydev, regnum, val, 0); +} + +/** + * phy_interrupt_is_valid - Convenience function for testing a given PHY irq + * @phydev: the phy_device struct + * + * NOTE: must be kept in sync with addition/removal of PHY_POLL and + * PHY_IGNORE_INTERRUPT + */ +static inline bool phy_interrupt_is_valid(struct phy_device *phydev) +{ + return phydev->irq != PHY_POLL && phydev->irq != PHY_IGNORE_INTERRUPT; +} + +/** + * phy_polling_mode - Convenience function for testing whether polling is + * used to detect PHY status changes + * @phydev: the phy_device struct + */ +static inline bool phy_polling_mode(struct phy_device *phydev) +{ + return phydev->irq == PHY_POLL; +} + +/** + * phy_is_internal - Convenience function for testing if a PHY is internal + * @phydev: the phy_device struct + */ +static inline bool phy_is_internal(struct phy_device *phydev) +{ + return phydev->is_internal; +} + +/** + * phy_interface_mode_is_rgmii - Convenience function for testing if a + * PHY interface mode is RGMII (all variants) + * @mode: the phy_interface_t enum + */ +static inline bool phy_interface_mode_is_rgmii(phy_interface_t mode) +{ + return mode >= PHY_INTERFACE_MODE_RGMII && + mode <= PHY_INTERFACE_MODE_RGMII_TXID; +}; + +/** + * phy_interface_mode_is_8023z() - does the phy interface mode use 802.3z + * negotiation + * @mode: one of &enum phy_interface_t + * + * Returns true if the phy interface mode uses the 16-bit negotiation + * word as defined in 802.3z. (See 802.3-2015 37.2.1 Config_Reg encoding) + */ +static inline bool phy_interface_mode_is_8023z(phy_interface_t mode) +{ + return mode == PHY_INTERFACE_MODE_1000BASEX || + mode == PHY_INTERFACE_MODE_2500BASEX; +} + +/** + * phy_interface_is_rgmii - Convenience function for testing if a PHY interface + * is RGMII (all variants) + * @phydev: the phy_device struct + */ +static inline bool phy_interface_is_rgmii(struct phy_device *phydev) +{ + return phy_interface_mode_is_rgmii(phydev->interface); +}; + +/* + * phy_is_pseudo_fixed_link - Convenience function for testing if this + * PHY is the CPU port facing side of an Ethernet switch, or similar. + * @phydev: the phy_device struct + */ +static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev) +{ + return phydev->is_pseudo_fixed_link; +} + +/** + * phy_write_mmd - Convenience function for writing a register + * on an MMD on a given PHY. + * @phydev: The phy_device struct + * @devad: The MMD to read from + * @regnum: The register on the MMD to read + * @val: value to write to @regnum + * + * Same rules as for phy_write(); + */ +int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val); + +int phy_save_page(struct phy_device *phydev); +int phy_select_page(struct phy_device *phydev, int page); +int phy_restore_page(struct phy_device *phydev, int oldpage, int ret); +int phy_read_paged(struct phy_device *phydev, int page, u32 regnum); +int phy_write_paged(struct phy_device *phydev, int page, u32 regnum, u16 val); +int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum, + u16 mask, u16 set); + +struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, + bool is_c45, + struct phy_c45_device_ids *c45_ids); +#if IS_ENABLED(CONFIG_PHYLIB) +struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45); +int phy_device_register(struct phy_device *phy); +void phy_device_free(struct phy_device *phydev); +#else +static inline +struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45) +{ + return NULL; +} + +static inline int phy_device_register(struct phy_device *phy) +{ + return 0; +} + +static inline void phy_device_free(struct phy_device *phydev) { } +#endif /* CONFIG_PHYLIB */ +void phy_device_remove(struct phy_device *phydev); +int phy_init_hw(struct phy_device *phydev); +int phy_suspend(struct phy_device *phydev); +int phy_resume(struct phy_device *phydev); +int __phy_resume(struct phy_device *phydev); +int phy_loopback(struct phy_device *phydev, bool enable); +struct phy_device *phy_attach(struct net_device *dev, const char *bus_id, + phy_interface_t interface); +struct phy_device *phy_find_first(struct mii_bus *bus); +int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, + u32 flags, phy_interface_t interface); +int phy_connect_direct(struct net_device *dev, struct phy_device *phydev, + void (*handler)(struct net_device *), + phy_interface_t interface); +struct phy_device *phy_connect(struct net_device *dev, const char *bus_id, + void (*handler)(struct net_device *), + phy_interface_t interface); +void phy_disconnect(struct phy_device *phydev); +void phy_detach(struct phy_device *phydev); +void phy_start(struct phy_device *phydev); +void phy_stop(struct phy_device *phydev); +int phy_start_aneg(struct phy_device *phydev); +int phy_aneg_done(struct phy_device *phydev); +int phy_speed_down(struct phy_device *phydev, bool sync); +int phy_speed_up(struct phy_device *phydev); + +int phy_stop_interrupts(struct phy_device *phydev); +int phy_restart_aneg(struct phy_device *phydev); +int phy_reset_after_clk_enable(struct phy_device *phydev); + +static inline void phy_device_reset(struct phy_device *phydev, int value) +{ + mdio_device_reset(&phydev->mdio, value); +} + +#define phydev_err(_phydev, format, args...) \ + dev_err(&_phydev->mdio.dev, format, ##args) + +#define phydev_dbg(_phydev, format, args...) \ + dev_dbg(&_phydev->mdio.dev, format, ##args) + +static inline const char *phydev_name(const struct phy_device *phydev) +{ + return dev_name(&phydev->mdio.dev); +} + +void phy_attached_print(struct phy_device *phydev, const char *fmt, ...) + __printf(2, 3); +void phy_attached_info(struct phy_device *phydev); + +/* Clause 22 PHY */ +int genphy_config_init(struct phy_device *phydev); +int genphy_setup_forced(struct phy_device *phydev); +int genphy_restart_aneg(struct phy_device *phydev); +int genphy_config_aneg(struct phy_device *phydev); +int genphy_aneg_done(struct phy_device *phydev); +int genphy_update_link(struct phy_device *phydev); +int genphy_read_status(struct phy_device *phydev); +int genphy_suspend(struct phy_device *phydev); +int genphy_resume(struct phy_device *phydev); +int genphy_loopback(struct phy_device *phydev, bool enable); +int genphy_soft_reset(struct phy_device *phydev); +static inline int genphy_no_soft_reset(struct phy_device *phydev) +{ + return 0; +} +int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad, + u16 regnum); +int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum, + u16 regnum, u16 val); + +/* Clause 45 PHY */ +int genphy_c45_restart_aneg(struct phy_device *phydev); +int genphy_c45_aneg_done(struct phy_device *phydev); +int genphy_c45_read_link(struct phy_device *phydev, u32 mmd_mask); +int genphy_c45_read_lpa(struct phy_device *phydev); +int genphy_c45_read_pma(struct phy_device *phydev); +int genphy_c45_pma_setup_forced(struct phy_device *phydev); +int genphy_c45_an_disable_aneg(struct phy_device *phydev); +int genphy_c45_read_mdix(struct phy_device *phydev); + +/* The gen10g_* functions are the old Clause 45 stub */ +int gen10g_config_aneg(struct phy_device *phydev); +int gen10g_read_status(struct phy_device *phydev); +int gen10g_no_soft_reset(struct phy_device *phydev); +int gen10g_config_init(struct phy_device *phydev); +int gen10g_suspend(struct phy_device *phydev); +int gen10g_resume(struct phy_device *phydev); + +static inline int phy_read_status(struct phy_device *phydev) +{ + if (!phydev->drv) + return -EIO; + + if (phydev->drv->read_status) + return phydev->drv->read_status(phydev); + else + return genphy_read_status(phydev); +} + +void phy_driver_unregister(struct phy_driver *drv); +void phy_drivers_unregister(struct phy_driver *drv, int n); +int phy_driver_register(struct phy_driver *new_driver, struct module *owner); +int phy_drivers_register(struct phy_driver *new_driver, int n, + struct module *owner); +void phy_state_machine(struct work_struct *work); +void phy_change_work(struct work_struct *work); +void phy_mac_interrupt(struct phy_device *phydev); +void phy_start_machine(struct phy_device *phydev); +void phy_stop_machine(struct phy_device *phydev); +void phy_trigger_machine(struct phy_device *phydev, bool sync); +int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd); +void phy_ethtool_ksettings_get(struct phy_device *phydev, + struct ethtool_link_ksettings *cmd); +int phy_ethtool_ksettings_set(struct phy_device *phydev, + const struct ethtool_link_ksettings *cmd); +int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd); +int phy_start_interrupts(struct phy_device *phydev); +void phy_print_status(struct phy_device *phydev); +int phy_set_max_speed(struct phy_device *phydev, u32 max_speed); + +int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask, + int (*run)(struct phy_device *)); +int phy_register_fixup_for_id(const char *bus_id, + int (*run)(struct phy_device *)); +int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask, + int (*run)(struct phy_device *)); + +int phy_unregister_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask); +int phy_unregister_fixup_for_id(const char *bus_id); +int phy_unregister_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask); + +int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable); +int phy_get_eee_err(struct phy_device *phydev); +int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data); +int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data); +int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol); +void phy_ethtool_get_wol(struct phy_device *phydev, + struct ethtool_wolinfo *wol); +int phy_ethtool_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd); +int phy_ethtool_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd); +int phy_ethtool_nway_reset(struct net_device *ndev); + +#if IS_ENABLED(CONFIG_PHYLIB) +int __init mdio_bus_init(void); +void mdio_bus_exit(void); +#endif + +/* Inline function for use within net/core/ethtool.c (built-in) */ +static inline int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data) +{ + if (!phydev->drv) + return -EIO; + + mutex_lock(&phydev->lock); + phydev->drv->get_strings(phydev, data); + mutex_unlock(&phydev->lock); + + return 0; +} + +static inline int phy_ethtool_get_sset_count(struct phy_device *phydev) +{ + int ret; + + if (!phydev->drv) + return -EIO; + + if (phydev->drv->get_sset_count && + phydev->drv->get_strings && + phydev->drv->get_stats) { + mutex_lock(&phydev->lock); + ret = phydev->drv->get_sset_count(phydev); + mutex_unlock(&phydev->lock); + + return ret; + } + + return -EOPNOTSUPP; +} + +static inline int phy_ethtool_get_stats(struct phy_device *phydev, + struct ethtool_stats *stats, u64 *data) +{ + if (!phydev->drv) + return -EIO; + + mutex_lock(&phydev->lock); + phydev->drv->get_stats(phydev, stats, data); + mutex_unlock(&phydev->lock); + + return 0; +} + +extern struct bus_type mdio_bus_type; + +struct mdio_board_info { + const char *bus_id; + char modalias[MDIO_NAME_SIZE]; + int mdio_addr; + const void *platform_data; +}; + +#if IS_ENABLED(CONFIG_MDIO_DEVICE) +int mdiobus_register_board_info(const struct mdio_board_info *info, + unsigned int n); +#else +static inline int mdiobus_register_board_info(const struct mdio_board_info *i, + unsigned int n) +{ + return 0; +} +#endif + + +/** + * module_phy_driver() - Helper macro for registering PHY drivers + * @__phy_drivers: array of PHY drivers to register + * + * Helper macro for PHY drivers which do not do anything special in module + * init/exit. Each module may only use this macro once, and calling it + * replaces module_init() and module_exit(). + */ +#define phy_module_driver(__phy_drivers, __count) \ +static int __init phy_module_init(void) \ +{ \ + return phy_drivers_register(__phy_drivers, __count, THIS_MODULE); \ +} \ +module_init(phy_module_init); \ +static void __exit phy_module_exit(void) \ +{ \ + phy_drivers_unregister(__phy_drivers, __count); \ +} \ +module_exit(phy_module_exit) + +#define module_phy_driver(__phy_drivers) \ + phy_module_driver(__phy_drivers, ARRAY_SIZE(__phy_drivers)) + +#endif /* __PHY_H */ diff --git a/include/linux/phy/omap_control_phy.h b/include/linux/phy/omap_control_phy.h new file mode 100644 index 000000000..eb7d4a135 --- /dev/null +++ b/include/linux/phy/omap_control_phy.h @@ -0,0 +1,99 @@ +/* + * omap_control_phy.h - Header file for the PHY part of control module. + * + * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Author: Kishon Vijay Abraham I + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __OMAP_CONTROL_PHY_H__ +#define __OMAP_CONTROL_PHY_H__ + +enum omap_control_phy_type { + OMAP_CTRL_TYPE_OTGHS = 1, /* Mailbox OTGHS_CONTROL */ + OMAP_CTRL_TYPE_USB2, /* USB2_PHY, power down in CONTROL_DEV_CONF */ + OMAP_CTRL_TYPE_PIPE3, /* PIPE3 PHY, DPLL & seperate Rx/Tx power */ + OMAP_CTRL_TYPE_PCIE, /* RX TX control of ACSPCIE */ + OMAP_CTRL_TYPE_DRA7USB2, /* USB2 PHY, power and power_aux e.g. DRA7 */ + OMAP_CTRL_TYPE_AM437USB2, /* USB2 PHY, power e.g. AM437x */ +}; + +struct omap_control_phy { + struct device *dev; + + u32 __iomem *otghs_control; + u32 __iomem *power; + u32 __iomem *power_aux; + u32 __iomem *pcie_pcs; + + struct clk *sys_clk; + + enum omap_control_phy_type type; +}; + +enum omap_control_usb_mode { + USB_MODE_UNDEFINED = 0, + USB_MODE_HOST, + USB_MODE_DEVICE, + USB_MODE_DISCONNECT, +}; + +#define OMAP_CTRL_DEV_PHY_PD BIT(0) + +#define OMAP_CTRL_DEV_AVALID BIT(0) +#define OMAP_CTRL_DEV_BVALID BIT(1) +#define OMAP_CTRL_DEV_VBUSVALID BIT(2) +#define OMAP_CTRL_DEV_SESSEND BIT(3) +#define OMAP_CTRL_DEV_IDDIG BIT(4) + +#define OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_CMD_MASK 0x003FC000 +#define OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_CMD_SHIFT 0xE + +#define OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_FREQ_MASK 0xFFC00000 +#define OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_FREQ_SHIFT 0x16 + +#define OMAP_CTRL_PIPE3_PHY_TX_RX_POWERON 0x3 +#define OMAP_CTRL_PIPE3_PHY_TX_RX_POWEROFF 0x0 + +#define OMAP_CTRL_PCIE_PCS_MASK 0xff +#define OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT 16 + +#define OMAP_CTRL_USB2_PHY_PD BIT(28) + +#define AM437X_CTRL_USB2_PHY_PD BIT(0) +#define AM437X_CTRL_USB2_OTG_PD BIT(1) +#define AM437X_CTRL_USB2_OTGVDET_EN BIT(19) +#define AM437X_CTRL_USB2_OTGSESSEND_EN BIT(20) + +#if IS_ENABLED(CONFIG_OMAP_CONTROL_PHY) +void omap_control_phy_power(struct device *dev, int on); +void omap_control_usb_set_mode(struct device *dev, + enum omap_control_usb_mode mode); +void omap_control_pcie_pcs(struct device *dev, u8 delay); +#else + +static inline void omap_control_phy_power(struct device *dev, int on) +{ +} + +static inline void omap_control_usb_set_mode(struct device *dev, + enum omap_control_usb_mode mode) +{ +} + +static inline void omap_control_pcie_pcs(struct device *dev, u8 delay) +{ +} +#endif + +#endif /* __OMAP_CONTROL_PHY_H__ */ diff --git a/include/linux/phy/omap_usb.h b/include/linux/phy/omap_usb.h new file mode 100644 index 000000000..2e5fb870e --- /dev/null +++ b/include/linux/phy/omap_usb.h @@ -0,0 +1,100 @@ +/* + * omap_usb.h -- omap usb2 phy header file + * + * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Author: Kishon Vijay Abraham I + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __DRIVERS_OMAP_USB2_H +#define __DRIVERS_OMAP_USB2_H + +#include +#include + +struct usb_dpll_params { + u16 m; + u8 n; + u8 freq:3; + u8 sd; + u32 mf; +}; + +enum omap_usb_phy_type { + TYPE_USB2, /* USB2_PHY, power down in CONTROL_DEV_CONF */ + TYPE_DRA7USB2, /* USB2 PHY, power and power_aux e.g. DRA7 */ + TYPE_AM437USB2, /* USB2 PHY, power e.g. AM437x */ +}; + +struct omap_usb { + struct usb_phy phy; + struct phy_companion *comparator; + void __iomem *pll_ctrl_base; + void __iomem *phy_base; + struct device *dev; + struct device *control_dev; + struct clk *wkupclk; + struct clk *optclk; + u8 flags; + enum omap_usb_phy_type type; + struct regmap *syscon_phy_power; /* ctrl. reg. acces */ + unsigned int power_reg; /* power reg. index within syscon */ + u32 mask; + u32 power_on; + u32 power_off; +}; + +struct usb_phy_data { + const char *label; + u8 flags; + u32 mask; + u32 power_on; + u32 power_off; +}; + +/* Driver Flags */ +#define OMAP_USB2_HAS_START_SRP (1 << 0) +#define OMAP_USB2_HAS_SET_VBUS (1 << 1) +#define OMAP_USB2_CALIBRATE_FALSE_DISCONNECT (1 << 2) + +#define OMAP_DEV_PHY_PD BIT(0) +#define OMAP_USB2_PHY_PD BIT(28) + +#define AM437X_USB2_PHY_PD BIT(0) +#define AM437X_USB2_OTG_PD BIT(1) +#define AM437X_USB2_OTGVDET_EN BIT(19) +#define AM437X_USB2_OTGSESSEND_EN BIT(20) + +#define phy_to_omapusb(x) container_of((x), struct omap_usb, phy) + +#if defined(CONFIG_OMAP_USB2) || defined(CONFIG_OMAP_USB2_MODULE) +extern int omap_usb2_set_comparator(struct phy_companion *comparator); +#else +static inline int omap_usb2_set_comparator(struct phy_companion *comparator) +{ + return -ENODEV; +} +#endif + +static inline u32 omap_usb_readl(void __iomem *addr, unsigned offset) +{ + return __raw_readl(addr + offset); +} + +static inline void omap_usb_writel(void __iomem *addr, unsigned offset, + u32 data) +{ + __raw_writel(data, addr + offset); +} + +#endif /* __DRIVERS_OMAP_USB_H */ diff --git a/include/linux/phy/phy-qcom-ufs.h b/include/linux/phy/phy-qcom-ufs.h new file mode 100644 index 000000000..0a2c18a97 --- /dev/null +++ b/include/linux/phy/phy-qcom-ufs.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2013-2015, Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef PHY_QCOM_UFS_H_ +#define PHY_QCOM_UFS_H_ + +#include "phy.h" + +/** + * ufs_qcom_phy_enable_dev_ref_clk() - Enable the device + * ref clock. + * @phy: reference to a generic phy. + */ +void ufs_qcom_phy_enable_dev_ref_clk(struct phy *phy); + +/** + * ufs_qcom_phy_disable_dev_ref_clk() - Disable the device + * ref clock. + * @phy: reference to a generic phy. + */ +void ufs_qcom_phy_disable_dev_ref_clk(struct phy *phy); + +int ufs_qcom_phy_set_tx_lane_enable(struct phy *phy, u32 tx_lanes); +void ufs_qcom_phy_save_controller_version(struct phy *phy, + u8 major, u16 minor, u16 step); + +#endif /* PHY_QCOM_UFS_H_ */ diff --git a/include/linux/phy/phy-sun4i-usb.h b/include/linux/phy/phy-sun4i-usb.h new file mode 100644 index 000000000..50aed92ea --- /dev/null +++ b/include/linux/phy/phy-sun4i-usb.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2015 Hans de Goede + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef PHY_SUN4I_USB_H_ +#define PHY_SUN4I_USB_H_ + +#include "phy.h" + +/** + * sun4i_usb_phy_set_squelch_detect() - Enable/disable squelch detect + * @phy: reference to a sun4i usb phy + * @enabled: wether to enable or disable squelch detect + */ +void sun4i_usb_phy_set_squelch_detect(struct phy *phy, bool enabled); + +#endif diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h new file mode 100644 index 000000000..9713aebdd --- /dev/null +++ b/include/linux/phy/phy.h @@ -0,0 +1,425 @@ +/* + * phy.h -- generic phy header file + * + * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com + * + * Author: Kishon Vijay Abraham I + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __DRIVERS_PHY_H +#define __DRIVERS_PHY_H + +#include +#include +#include +#include +#include + +struct phy; + +enum phy_mode { + PHY_MODE_INVALID, + PHY_MODE_USB_HOST, + PHY_MODE_USB_HOST_LS, + PHY_MODE_USB_HOST_FS, + PHY_MODE_USB_HOST_HS, + PHY_MODE_USB_HOST_SS, + PHY_MODE_USB_DEVICE, + PHY_MODE_USB_DEVICE_LS, + PHY_MODE_USB_DEVICE_FS, + PHY_MODE_USB_DEVICE_HS, + PHY_MODE_USB_DEVICE_SS, + PHY_MODE_USB_OTG, + PHY_MODE_SGMII, + PHY_MODE_2500SGMII, + PHY_MODE_10GKR, + PHY_MODE_UFS_HS_A, + PHY_MODE_UFS_HS_B, +}; + +/** + * struct phy_ops - set of function pointers for performing phy operations + * @init: operation to be performed for initializing phy + * @exit: operation to be performed while exiting + * @power_on: powering on the phy + * @power_off: powering off the phy + * @set_mode: set the mode of the phy + * @reset: resetting the phy + * @calibrate: calibrate the phy + * @owner: the module owner containing the ops + */ +struct phy_ops { + int (*init)(struct phy *phy); + int (*exit)(struct phy *phy); + int (*power_on)(struct phy *phy); + int (*power_off)(struct phy *phy); + int (*set_mode)(struct phy *phy, enum phy_mode mode); + int (*reset)(struct phy *phy); + int (*calibrate)(struct phy *phy); + struct module *owner; +}; + +/** + * struct phy_attrs - represents phy attributes + * @bus_width: Data path width implemented by PHY + */ +struct phy_attrs { + u32 bus_width; + enum phy_mode mode; +}; + +/** + * struct phy - represents the phy device + * @dev: phy device + * @id: id of the phy device + * @ops: function pointers for performing phy operations + * @init_data: list of PHY consumers (non-dt only) + * @mutex: mutex to protect phy_ops + * @init_count: used to protect when the PHY is used by multiple consumers + * @power_count: used to protect when the PHY is used by multiple consumers + * @attrs: used to specify PHY specific attributes + * @pwr: power regulator associated with the phy + */ +struct phy { + struct device dev; + int id; + const struct phy_ops *ops; + struct mutex mutex; + int init_count; + int power_count; + struct phy_attrs attrs; + struct regulator *pwr; +}; + +/** + * struct phy_provider - represents the phy provider + * @dev: phy provider device + * @children: can be used to override the default (dev->of_node) child node + * @owner: the module owner having of_xlate + * @list: to maintain a linked list of PHY providers + * @of_xlate: function pointer to obtain phy instance from phy pointer + */ +struct phy_provider { + struct device *dev; + struct device_node *children; + struct module *owner; + struct list_head list; + struct phy * (*of_xlate)(struct device *dev, + struct of_phandle_args *args); +}; + +/** + * struct phy_lookup - PHY association in list of phys managed by the phy driver + * @node: list node + * @dev_id: the device of the association + * @con_id: connection ID string on device + * @phy: the phy of the association + */ +struct phy_lookup { + struct list_head node; + const char *dev_id; + const char *con_id; + struct phy *phy; +}; + +#define to_phy(a) (container_of((a), struct phy, dev)) + +#define of_phy_provider_register(dev, xlate) \ + __of_phy_provider_register((dev), NULL, THIS_MODULE, (xlate)) + +#define devm_of_phy_provider_register(dev, xlate) \ + __devm_of_phy_provider_register((dev), NULL, THIS_MODULE, (xlate)) + +#define of_phy_provider_register_full(dev, children, xlate) \ + __of_phy_provider_register(dev, children, THIS_MODULE, xlate) + +#define devm_of_phy_provider_register_full(dev, children, xlate) \ + __devm_of_phy_provider_register(dev, children, THIS_MODULE, xlate) + +static inline void phy_set_drvdata(struct phy *phy, void *data) +{ + dev_set_drvdata(&phy->dev, data); +} + +static inline void *phy_get_drvdata(struct phy *phy) +{ + return dev_get_drvdata(&phy->dev); +} + +#if IS_ENABLED(CONFIG_GENERIC_PHY) +int phy_pm_runtime_get(struct phy *phy); +int phy_pm_runtime_get_sync(struct phy *phy); +int phy_pm_runtime_put(struct phy *phy); +int phy_pm_runtime_put_sync(struct phy *phy); +void phy_pm_runtime_allow(struct phy *phy); +void phy_pm_runtime_forbid(struct phy *phy); +int phy_init(struct phy *phy); +int phy_exit(struct phy *phy); +int phy_power_on(struct phy *phy); +int phy_power_off(struct phy *phy); +int phy_set_mode(struct phy *phy, enum phy_mode mode); +static inline enum phy_mode phy_get_mode(struct phy *phy) +{ + return phy->attrs.mode; +} +int phy_reset(struct phy *phy); +int phy_calibrate(struct phy *phy); +static inline int phy_get_bus_width(struct phy *phy) +{ + return phy->attrs.bus_width; +} +static inline void phy_set_bus_width(struct phy *phy, int bus_width) +{ + phy->attrs.bus_width = bus_width; +} +struct phy *phy_get(struct device *dev, const char *string); +struct phy *phy_optional_get(struct device *dev, const char *string); +struct phy *devm_phy_get(struct device *dev, const char *string); +struct phy *devm_phy_optional_get(struct device *dev, const char *string); +struct phy *devm_of_phy_get(struct device *dev, struct device_node *np, + const char *con_id); +struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np, + int index); +void phy_put(struct phy *phy); +void devm_phy_put(struct device *dev, struct phy *phy); +struct phy *of_phy_get(struct device_node *np, const char *con_id); +struct phy *of_phy_simple_xlate(struct device *dev, + struct of_phandle_args *args); +struct phy *phy_create(struct device *dev, struct device_node *node, + const struct phy_ops *ops); +struct phy *devm_phy_create(struct device *dev, struct device_node *node, + const struct phy_ops *ops); +void phy_destroy(struct phy *phy); +void devm_phy_destroy(struct device *dev, struct phy *phy); +struct phy_provider *__of_phy_provider_register(struct device *dev, + struct device_node *children, struct module *owner, + struct phy * (*of_xlate)(struct device *dev, + struct of_phandle_args *args)); +struct phy_provider *__devm_of_phy_provider_register(struct device *dev, + struct device_node *children, struct module *owner, + struct phy * (*of_xlate)(struct device *dev, + struct of_phandle_args *args)); +void of_phy_provider_unregister(struct phy_provider *phy_provider); +void devm_of_phy_provider_unregister(struct device *dev, + struct phy_provider *phy_provider); +int phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id); +void phy_remove_lookup(struct phy *phy, const char *con_id, const char *dev_id); +#else +static inline int phy_pm_runtime_get(struct phy *phy) +{ + if (!phy) + return 0; + return -ENOSYS; +} + +static inline int phy_pm_runtime_get_sync(struct phy *phy) +{ + if (!phy) + return 0; + return -ENOSYS; +} + +static inline int phy_pm_runtime_put(struct phy *phy) +{ + if (!phy) + return 0; + return -ENOSYS; +} + +static inline int phy_pm_runtime_put_sync(struct phy *phy) +{ + if (!phy) + return 0; + return -ENOSYS; +} + +static inline void phy_pm_runtime_allow(struct phy *phy) +{ + return; +} + +static inline void phy_pm_runtime_forbid(struct phy *phy) +{ + return; +} + +static inline int phy_init(struct phy *phy) +{ + if (!phy) + return 0; + return -ENOSYS; +} + +static inline int phy_exit(struct phy *phy) +{ + if (!phy) + return 0; + return -ENOSYS; +} + +static inline int phy_power_on(struct phy *phy) +{ + if (!phy) + return 0; + return -ENOSYS; +} + +static inline int phy_power_off(struct phy *phy) +{ + if (!phy) + return 0; + return -ENOSYS; +} + +static inline int phy_set_mode(struct phy *phy, enum phy_mode mode) +{ + if (!phy) + return 0; + return -ENOSYS; +} + +static inline enum phy_mode phy_get_mode(struct phy *phy) +{ + return PHY_MODE_INVALID; +} + +static inline int phy_reset(struct phy *phy) +{ + if (!phy) + return 0; + return -ENOSYS; +} + +static inline int phy_calibrate(struct phy *phy) +{ + if (!phy) + return 0; + return -ENOSYS; +} + +static inline int phy_get_bus_width(struct phy *phy) +{ + return -ENOSYS; +} + +static inline void phy_set_bus_width(struct phy *phy, int bus_width) +{ + return; +} + +static inline struct phy *phy_get(struct device *dev, const char *string) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct phy *phy_optional_get(struct device *dev, + const char *string) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct phy *devm_phy_get(struct device *dev, const char *string) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct phy *devm_phy_optional_get(struct device *dev, + const char *string) +{ + return NULL; +} + +static inline struct phy *devm_of_phy_get(struct device *dev, + struct device_node *np, + const char *con_id) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct phy *devm_of_phy_get_by_index(struct device *dev, + struct device_node *np, + int index) +{ + return ERR_PTR(-ENOSYS); +} + +static inline void phy_put(struct phy *phy) +{ +} + +static inline void devm_phy_put(struct device *dev, struct phy *phy) +{ +} + +static inline struct phy *of_phy_get(struct device_node *np, const char *con_id) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct phy *of_phy_simple_xlate(struct device *dev, + struct of_phandle_args *args) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct phy *phy_create(struct device *dev, + struct device_node *node, + const struct phy_ops *ops) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct phy *devm_phy_create(struct device *dev, + struct device_node *node, + const struct phy_ops *ops) +{ + return ERR_PTR(-ENOSYS); +} + +static inline void phy_destroy(struct phy *phy) +{ +} + +static inline void devm_phy_destroy(struct device *dev, struct phy *phy) +{ +} + +static inline struct phy_provider *__of_phy_provider_register( + struct device *dev, struct device_node *children, struct module *owner, + struct phy * (*of_xlate)(struct device *dev, + struct of_phandle_args *args)) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct phy_provider *__devm_of_phy_provider_register(struct device + *dev, struct device_node *children, struct module *owner, + struct phy * (*of_xlate)(struct device *dev, + struct of_phandle_args *args)) +{ + return ERR_PTR(-ENOSYS); +} + +static inline void of_phy_provider_unregister(struct phy_provider *phy_provider) +{ +} + +static inline void devm_of_phy_provider_unregister(struct device *dev, + struct phy_provider *phy_provider) +{ +} +static inline int +phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id) +{ + return 0; +} +static inline void phy_remove_lookup(struct phy *phy, const char *con_id, + const char *dev_id) { } +#endif + +#endif /* __DRIVERS_PHY_H */ diff --git a/include/linux/phy/tegra/xusb.h b/include/linux/phy/tegra/xusb.h new file mode 100644 index 000000000..8e1a57a78 --- /dev/null +++ b/include/linux/phy/tegra/xusb.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef PHY_TEGRA_XUSB_H +#define PHY_TEGRA_XUSB_H + +struct tegra_xusb_padctl; +struct device; + +struct tegra_xusb_padctl *tegra_xusb_padctl_get(struct device *dev); +void tegra_xusb_padctl_put(struct tegra_xusb_padctl *padctl); + +int tegra_xusb_padctl_usb3_save_context(struct tegra_xusb_padctl *padctl, + unsigned int port); +int tegra_xusb_padctl_hsic_set_idle(struct tegra_xusb_padctl *padctl, + unsigned int port, bool idle); +int tegra_xusb_padctl_usb3_set_lfps_detect(struct tegra_xusb_padctl *padctl, + unsigned int port, bool enable); + +#endif /* PHY_TEGRA_XUSB_H */ diff --git a/include/linux/phy/ulpi_phy.h b/include/linux/phy/ulpi_phy.h new file mode 100644 index 000000000..7054b4403 --- /dev/null +++ b/include/linux/phy/ulpi_phy.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include + +/** + * Helper that registers PHY for a ULPI device and adds a lookup for binding it + * and it's controller, which is always the parent. + */ +static inline struct phy +*ulpi_phy_create(struct ulpi *ulpi, const struct phy_ops *ops) +{ + struct phy *phy; + int ret; + + phy = phy_create(&ulpi->dev, NULL, ops); + if (IS_ERR(phy)) + return phy; + + ret = phy_create_lookup(phy, "usb2-phy", dev_name(ulpi->dev.parent)); + if (ret) { + phy_destroy(phy); + return ERR_PTR(ret); + } + + return phy; +} + +/* Remove a PHY that was created with ulpi_phy_create() and it's lookup. */ +static inline void ulpi_phy_destroy(struct ulpi *ulpi, struct phy *phy) +{ + phy_remove_lookup(phy, "usb2-phy", dev_name(ulpi->dev.parent)); + phy_destroy(phy); +} diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h new file mode 100644 index 000000000..ee54453a4 --- /dev/null +++ b/include/linux/phy_fixed.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __PHY_FIXED_H +#define __PHY_FIXED_H + +struct fixed_phy_status { + int link; + int speed; + int duplex; + int pause; + int asym_pause; +}; + +struct device_node; + +#if IS_ENABLED(CONFIG_FIXED_PHY) +extern int fixed_phy_add(unsigned int irq, int phy_id, + struct fixed_phy_status *status, + int link_gpio); +extern struct phy_device *fixed_phy_register(unsigned int irq, + struct fixed_phy_status *status, + int link_gpio, + struct device_node *np); +extern void fixed_phy_unregister(struct phy_device *phydev); +extern int fixed_phy_set_link_update(struct phy_device *phydev, + int (*link_update)(struct net_device *, + struct fixed_phy_status *)); +#else +static inline int fixed_phy_add(unsigned int irq, int phy_id, + struct fixed_phy_status *status, + int link_gpio) +{ + return -ENODEV; +} +static inline struct phy_device *fixed_phy_register(unsigned int irq, + struct fixed_phy_status *status, + int gpio_link, + struct device_node *np) +{ + return ERR_PTR(-ENODEV); +} +static inline void fixed_phy_unregister(struct phy_device *phydev) +{ +} +static inline int fixed_phy_set_link_update(struct phy_device *phydev, + int (*link_update)(struct net_device *, + struct fixed_phy_status *)) +{ + return -ENODEV; +} +#endif /* CONFIG_FIXED_PHY */ + +#endif /* __PHY_FIXED_H */ diff --git a/include/linux/phy_led_triggers.h b/include/linux/phy_led_triggers.h new file mode 100644 index 000000000..b37b05bfd --- /dev/null +++ b/include/linux/phy_led_triggers.h @@ -0,0 +1,51 @@ +/* Copyright (C) 2016 National Instruments Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __PHY_LED_TRIGGERS +#define __PHY_LED_TRIGGERS + +struct phy_device; + +#ifdef CONFIG_LED_TRIGGER_PHY + +#include +#include + +#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 10 + +#define PHY_LINK_LED_TRIGGER_NAME_SIZE (MII_BUS_ID_SIZE + \ + FIELD_SIZEOF(struct mdio_device, addr)+\ + PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE) + +struct phy_led_trigger { + struct led_trigger trigger; + char name[PHY_LINK_LED_TRIGGER_NAME_SIZE]; + unsigned int speed; +}; + + +extern int phy_led_triggers_register(struct phy_device *phy); +extern void phy_led_triggers_unregister(struct phy_device *phy); +extern void phy_led_trigger_change_speed(struct phy_device *phy); + +#else + +static inline int phy_led_triggers_register(struct phy_device *phy) +{ + return 0; +} +static inline void phy_led_triggers_unregister(struct phy_device *phy) { } +static inline void phy_led_trigger_change_speed(struct phy_device *phy) { } + +#endif + +#endif diff --git a/include/linux/phylink.h b/include/linux/phylink.h new file mode 100644 index 000000000..021fc6595 --- /dev/null +++ b/include/linux/phylink.h @@ -0,0 +1,239 @@ +#ifndef NETDEV_PCS_H +#define NETDEV_PCS_H + +#include +#include +#include + +struct device_node; +struct ethtool_cmd; +struct fwnode_handle; +struct net_device; + +enum { + MLO_PAUSE_NONE, + MLO_PAUSE_ASYM = BIT(0), + MLO_PAUSE_SYM = BIT(1), + MLO_PAUSE_RX = BIT(2), + MLO_PAUSE_TX = BIT(3), + MLO_PAUSE_TXRX_MASK = MLO_PAUSE_TX | MLO_PAUSE_RX, + MLO_PAUSE_AN = BIT(4), + + MLO_AN_PHY = 0, /* Conventional PHY */ + MLO_AN_FIXED, /* Fixed-link mode */ + MLO_AN_INBAND, /* In-band protocol */ +}; + +static inline bool phylink_autoneg_inband(unsigned int mode) +{ + return mode == MLO_AN_INBAND; +} + +/** + * struct phylink_link_state - link state structure + * @advertising: ethtool bitmask containing advertised link modes + * @lp_advertising: ethtool bitmask containing link partner advertised link + * modes + * @interface: link &typedef phy_interface_t mode + * @speed: link speed, one of the SPEED_* constants. + * @duplex: link duplex mode, one of DUPLEX_* constants. + * @pause: link pause state, described by MLO_PAUSE_* constants. + * @link: true if the link is up. + * @an_enabled: true if autonegotiation is enabled/desired. + * @an_complete: true if autonegotiation has completed. + */ +struct phylink_link_state { + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); + __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising); + phy_interface_t interface; + int speed; + int duplex; + int pause; + unsigned int link:1; + unsigned int an_enabled:1; + unsigned int an_complete:1; +}; + +/** + * struct phylink_mac_ops - MAC operations structure. + * @validate: Validate and update the link configuration. + * @mac_link_state: Read the current link state from the hardware. + * @mac_config: configure the MAC for the selected mode and state. + * @mac_an_restart: restart 802.3z BaseX autonegotiation. + * @mac_link_down: take the link down. + * @mac_link_up: allow the link to come up. + * + * The individual methods are described more fully below. + */ +struct phylink_mac_ops { + void (*validate)(struct net_device *ndev, unsigned long *supported, + struct phylink_link_state *state); + int (*mac_link_state)(struct net_device *ndev, + struct phylink_link_state *state); + void (*mac_config)(struct net_device *ndev, unsigned int mode, + const struct phylink_link_state *state); + void (*mac_an_restart)(struct net_device *ndev); + void (*mac_link_down)(struct net_device *ndev, unsigned int mode, + phy_interface_t interface); + void (*mac_link_up)(struct net_device *ndev, unsigned int mode, + phy_interface_t interface, + struct phy_device *phy); +}; + +#if 0 /* For kernel-doc purposes only. */ +/** + * validate - Validate and update the link configuration + * @ndev: a pointer to a &struct net_device for the MAC. + * @supported: ethtool bitmask for supported link modes. + * @state: a pointer to a &struct phylink_link_state. + * + * Clear bits in the @supported and @state->advertising masks that + * are not supportable by the MAC. + * + * Note that the PHY may be able to transform from one connection + * technology to another, so, eg, don't clear 1000BaseX just + * because the MAC is unable to BaseX mode. This is more about + * clearing unsupported speeds and duplex settings. + * + * If the @state->interface mode is %PHY_INTERFACE_MODE_1000BASEX + * or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode + * based on @state->advertising and/or @state->speed and update + * @state->interface accordingly. + */ +void validate(struct net_device *ndev, unsigned long *supported, + struct phylink_link_state *state); + +/** + * mac_link_state() - Read the current link state from the hardware + * @ndev: a pointer to a &struct net_device for the MAC. + * @state: a pointer to a &struct phylink_link_state. + * + * Read the current link state from the MAC, reporting the current + * speed in @state->speed, duplex mode in @state->duplex, pause mode + * in @state->pause using the %MLO_PAUSE_RX and %MLO_PAUSE_TX bits, + * negotiation completion state in @state->an_complete, and link + * up state in @state->link. + */ +int mac_link_state(struct net_device *ndev, + struct phylink_link_state *state); + +/** + * mac_config() - configure the MAC for the selected mode and state + * @ndev: a pointer to a &struct net_device for the MAC. + * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND. + * @state: a pointer to a &struct phylink_link_state. + * + * The action performed depends on the currently selected mode: + * + * %MLO_AN_FIXED, %MLO_AN_PHY: + * Configure the specified @state->speed, @state->duplex and + * @state->pause (%MLO_PAUSE_TX / %MLO_PAUSE_RX) mode. + * + * %MLO_AN_INBAND: + * place the link in an inband negotiation mode (such as 802.3z + * 1000base-X or Cisco SGMII mode depending on the @state->interface + * mode). In both cases, link state management (whether the link + * is up or not) is performed by the MAC, and reported via the + * mac_link_state() callback. Changes in link state must be made + * by calling phylink_mac_change(). + * + * If in 802.3z mode, the link speed is fixed, dependent on the + * @state->interface. Duplex is negotiated, and pause is advertised + * according to @state->an_enabled, @state->pause and + * @state->advertising flags. Beware of MACs which only support full + * duplex at gigabit and higher speeds. + * + * If in Cisco SGMII mode, the link speed and duplex mode are passed + * in the serial bitstream 16-bit configuration word, and the MAC + * should be configured to read these bits and acknowledge the + * configuration word. Nothing is advertised by the MAC. The MAC is + * responsible for reading the configuration word and configuring + * itself accordingly. + */ +void mac_config(struct net_device *ndev, unsigned int mode, + const struct phylink_link_state *state); + +/** + * mac_an_restart() - restart 802.3z BaseX autonegotiation + * @ndev: a pointer to a &struct net_device for the MAC. + */ +void mac_an_restart(struct net_device *ndev); + +/** + * mac_link_down() - take the link down + * @ndev: a pointer to a &struct net_device for the MAC. + * @mode: link autonegotiation mode + * @interface: link &typedef phy_interface_t mode + * + * If @mode is not an in-band negotiation mode (as defined by + * phylink_autoneg_inband()), force the link down and disable any + * Energy Efficient Ethernet MAC configuration. Interface type + * selection must be done in mac_config(). + */ +void mac_link_down(struct net_device *ndev, unsigned int mode, + phy_interface_t interface); + +/** + * mac_link_up() - allow the link to come up + * @ndev: a pointer to a &struct net_device for the MAC. + * @mode: link autonegotiation mode + * @interface: link &typedef phy_interface_t mode + * @phy: any attached phy + * + * If @mode is not an in-band negotiation mode (as defined by + * phylink_autoneg_inband()), allow the link to come up. If @phy + * is non-%NULL, configure Energy Efficient Ethernet by calling + * phy_init_eee() and perform appropriate MAC configuration for EEE. + * Interface type selection must be done in mac_config(). + */ +void mac_link_up(struct net_device *ndev, unsigned int mode, + phy_interface_t interface, + struct phy_device *phy); +#endif + +struct phylink *phylink_create(struct net_device *, struct fwnode_handle *, + phy_interface_t iface, const struct phylink_mac_ops *ops); +void phylink_destroy(struct phylink *); + +int phylink_connect_phy(struct phylink *, struct phy_device *); +int phylink_of_phy_connect(struct phylink *, struct device_node *, u32 flags); +void phylink_disconnect_phy(struct phylink *); +int phylink_fixed_state_cb(struct phylink *, + void (*cb)(struct net_device *dev, + struct phylink_link_state *)); + +void phylink_mac_change(struct phylink *, bool up); + +void phylink_start(struct phylink *); +void phylink_stop(struct phylink *); + +void phylink_ethtool_get_wol(struct phylink *, struct ethtool_wolinfo *); +int phylink_ethtool_set_wol(struct phylink *, struct ethtool_wolinfo *); + +int phylink_ethtool_ksettings_get(struct phylink *, + struct ethtool_link_ksettings *); +int phylink_ethtool_ksettings_set(struct phylink *, + const struct ethtool_link_ksettings *); +int phylink_ethtool_nway_reset(struct phylink *); +void phylink_ethtool_get_pauseparam(struct phylink *, + struct ethtool_pauseparam *); +int phylink_ethtool_set_pauseparam(struct phylink *, + struct ethtool_pauseparam *); +int phylink_get_eee_err(struct phylink *); +int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *); +int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *); +int phylink_mii_ioctl(struct phylink *, struct ifreq *, int); + +#define phylink_zero(bm) \ + bitmap_zero(bm, __ETHTOOL_LINK_MODE_MASK_NBITS) +#define __phylink_do_bit(op, bm, mode) \ + op(ETHTOOL_LINK_MODE_ ## mode ## _BIT, bm) + +#define phylink_set(bm, mode) __phylink_do_bit(__set_bit, bm, mode) +#define phylink_clear(bm, mode) __phylink_do_bit(__clear_bit, bm, mode) +#define phylink_test(bm, mode) __phylink_do_bit(test_bit, bm, mode) + +void phylink_set_port_modes(unsigned long *bits); +void phylink_helper_basex_speed(struct phylink_link_state *state); + +#endif diff --git a/include/linux/pid.h b/include/linux/pid.h new file mode 100644 index 000000000..14a9a39da --- /dev/null +++ b/include/linux/pid.h @@ -0,0 +1,194 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PID_H +#define _LINUX_PID_H + +#include + +enum pid_type +{ + PIDTYPE_PID, + PIDTYPE_TGID, + PIDTYPE_PGID, + PIDTYPE_SID, + PIDTYPE_MAX, +}; + +/* + * What is struct pid? + * + * A struct pid is the kernel's internal notion of a process identifier. + * It refers to individual tasks, process groups, and sessions. While + * there are processes attached to it the struct pid lives in a hash + * table, so it and then the processes that it refers to can be found + * quickly from the numeric pid value. The attached processes may be + * quickly accessed by following pointers from struct pid. + * + * Storing pid_t values in the kernel and referring to them later has a + * problem. The process originally with that pid may have exited and the + * pid allocator wrapped, and another process could have come along + * and been assigned that pid. + * + * Referring to user space processes by holding a reference to struct + * task_struct has a problem. When the user space process exits + * the now useless task_struct is still kept. A task_struct plus a + * stack consumes around 10K of low kernel memory. More precisely + * this is THREAD_SIZE + sizeof(struct task_struct). By comparison + * a struct pid is about 64 bytes. + * + * Holding a reference to struct pid solves both of these problems. + * It is small so holding a reference does not consume a lot of + * resources, and since a new struct pid is allocated when the numeric pid + * value is reused (when pids wrap around) we don't mistakenly refer to new + * processes. + */ + + +/* + * struct upid is used to get the id of the struct pid, as it is + * seen in particular namespace. Later the struct pid is found with + * find_pid_ns() using the int nr and struct pid_namespace *ns. + */ + +struct upid { + int nr; + struct pid_namespace *ns; +}; + +struct pid +{ + atomic_t count; + unsigned int level; + /* lists of tasks that use this pid */ + struct hlist_head tasks[PIDTYPE_MAX]; + struct rcu_head rcu; + struct upid numbers[1]; +}; + +extern struct pid init_struct_pid; + +static inline struct pid *get_pid(struct pid *pid) +{ + if (pid) + atomic_inc(&pid->count); + return pid; +} + +extern void put_pid(struct pid *pid); +extern struct task_struct *pid_task(struct pid *pid, enum pid_type); +extern struct task_struct *get_pid_task(struct pid *pid, enum pid_type); + +extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type); + +/* + * these helpers must be called with the tasklist_lock write-held. + */ +extern void attach_pid(struct task_struct *task, enum pid_type); +extern void detach_pid(struct task_struct *task, enum pid_type); +extern void change_pid(struct task_struct *task, enum pid_type, + struct pid *pid); +extern void transfer_pid(struct task_struct *old, struct task_struct *new, + enum pid_type); + +struct pid_namespace; +extern struct pid_namespace init_pid_ns; + +/* + * look up a PID in the hash table. Must be called with the tasklist_lock + * or rcu_read_lock() held. + * + * find_pid_ns() finds the pid in the namespace specified + * find_vpid() finds the pid by its virtual id, i.e. in the current namespace + * + * see also find_task_by_vpid() set in include/linux/sched.h + */ +extern struct pid *find_pid_ns(int nr, struct pid_namespace *ns); +extern struct pid *find_vpid(int nr); + +/* + * Lookup a PID in the hash table, and return with it's count elevated. + */ +extern struct pid *find_get_pid(int nr); +extern struct pid *find_ge_pid(int nr, struct pid_namespace *); +int next_pidmap(struct pid_namespace *pid_ns, unsigned int last); + +extern struct pid *alloc_pid(struct pid_namespace *ns); +extern void free_pid(struct pid *pid); +extern void disable_pid_allocation(struct pid_namespace *ns); + +/* + * ns_of_pid() returns the pid namespace in which the specified pid was + * allocated. + * + * NOTE: + * ns_of_pid() is expected to be called for a process (task) that has + * an attached 'struct pid' (see attach_pid(), detach_pid()) i.e @pid + * is expected to be non-NULL. If @pid is NULL, caller should handle + * the resulting NULL pid-ns. + */ +static inline struct pid_namespace *ns_of_pid(struct pid *pid) +{ + struct pid_namespace *ns = NULL; + if (pid) + ns = pid->numbers[pid->level].ns; + return ns; +} + +/* + * is_child_reaper returns true if the pid is the init process + * of the current namespace. As this one could be checked before + * pid_ns->child_reaper is assigned in copy_process, we check + * with the pid number. + */ +static inline bool is_child_reaper(struct pid *pid) +{ + return pid->numbers[pid->level].nr == 1; +} + +/* + * the helpers to get the pid's id seen from different namespaces + * + * pid_nr() : global id, i.e. the id seen from the init namespace; + * pid_vnr() : virtual id, i.e. the id seen from the pid namespace of + * current. + * pid_nr_ns() : id seen from the ns specified. + * + * see also task_xid_nr() etc in include/linux/sched.h + */ + +static inline pid_t pid_nr(struct pid *pid) +{ + pid_t nr = 0; + if (pid) + nr = pid->numbers[0].nr; + return nr; +} + +pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns); +pid_t pid_vnr(struct pid *pid); + +#define do_each_pid_task(pid, type, task) \ + do { \ + if ((pid) != NULL) \ + hlist_for_each_entry_rcu((task), \ + &(pid)->tasks[type], pid_links[type]) { + + /* + * Both old and new leaders may be attached to + * the same pid in the middle of de_thread(). + */ +#define while_each_pid_task(pid, type, task) \ + if (type == PIDTYPE_PID) \ + break; \ + } \ + } while (0) + +#define do_each_pid_thread(pid, type, task) \ + do_each_pid_task(pid, type, task) { \ + struct task_struct *tg___ = task; \ + for_each_thread(tg___, task) { + +#define while_each_pid_thread(pid, type, task) \ + } \ + task = tg___; \ + } while_each_pid_task(pid, type, task) +#endif /* _LINUX_PID_H */ diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h new file mode 100644 index 000000000..49538b172 --- /dev/null +++ b/include/linux/pid_namespace.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PID_NS_H +#define _LINUX_PID_NS_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +struct fs_pin; + +enum { /* definitions for pid_namespace's hide_pid field */ + HIDEPID_OFF = 0, + HIDEPID_NO_ACCESS = 1, + HIDEPID_INVISIBLE = 2, +}; + +struct pid_namespace { + struct kref kref; + struct idr idr; + struct rcu_head rcu; + unsigned int pid_allocated; + struct task_struct *child_reaper; + struct kmem_cache *pid_cachep; + unsigned int level; + struct pid_namespace *parent; +#ifdef CONFIG_PROC_FS + struct vfsmount *proc_mnt; + struct dentry *proc_self; + struct dentry *proc_thread_self; +#endif +#ifdef CONFIG_BSD_PROCESS_ACCT + struct fs_pin *bacct; +#endif + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct work_struct proc_work; + kgid_t pid_gid; + int hide_pid; + int reboot; /* group exit code if this pidns was rebooted */ + struct ns_common ns; +} __randomize_layout; + +extern struct pid_namespace init_pid_ns; + +#define PIDNS_ADDING (1U << 31) + +#ifdef CONFIG_PID_NS +static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns) +{ + if (ns != &init_pid_ns) + kref_get(&ns->kref); + return ns; +} + +extern struct pid_namespace *copy_pid_ns(unsigned long flags, + struct user_namespace *user_ns, struct pid_namespace *ns); +extern void zap_pid_ns_processes(struct pid_namespace *pid_ns); +extern int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd); +extern void put_pid_ns(struct pid_namespace *ns); + +#else /* !CONFIG_PID_NS */ +#include + +static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns) +{ + return ns; +} + +static inline struct pid_namespace *copy_pid_ns(unsigned long flags, + struct user_namespace *user_ns, struct pid_namespace *ns) +{ + if (flags & CLONE_NEWPID) + ns = ERR_PTR(-EINVAL); + return ns; +} + +static inline void put_pid_ns(struct pid_namespace *ns) +{ +} + +static inline void zap_pid_ns_processes(struct pid_namespace *ns) +{ + BUG(); +} + +static inline int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd) +{ + return 0; +} +#endif /* CONFIG_PID_NS */ + +extern struct pid_namespace *task_active_pid_ns(struct task_struct *tsk); +void pidhash_init(void); +void pid_idr_init(void); + +#endif /* _LINUX_PID_NS_H */ diff --git a/include/linux/pim.h b/include/linux/pim.h new file mode 100644 index 000000000..290d4d2ed --- /dev/null +++ b/include/linux/pim.h @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_PIM_H +#define __LINUX_PIM_H + +#include +#include + +/* Message types - V1 */ +#define PIM_V1_VERSION cpu_to_be32(0x10000000) +#define PIM_V1_REGISTER 1 + +/* Message types - V2 */ +#define PIM_VERSION 2 + +/* RFC7761, sec 4.9: + * Type + * Types for specific PIM messages. PIM Types are: + * + * Message Type Destination + * --------------------------------------------------------------------- + * 0 = Hello Multicast to ALL-PIM-ROUTERS + * 1 = Register Unicast to RP + * 2 = Register-Stop Unicast to source of Register + * packet + * 3 = Join/Prune Multicast to ALL-PIM-ROUTERS + * 4 = Bootstrap Multicast to ALL-PIM-ROUTERS + * 5 = Assert Multicast to ALL-PIM-ROUTERS + * 6 = Graft (used in PIM-DM only) Unicast to RPF'(S) + * 7 = Graft-Ack (used in PIM-DM only) Unicast to source of Graft + * packet + * 8 = Candidate-RP-Advertisement Unicast to Domain's BSR + */ +enum { + PIM_TYPE_HELLO, + PIM_TYPE_REGISTER, + PIM_TYPE_REGISTER_STOP, + PIM_TYPE_JOIN_PRUNE, + PIM_TYPE_BOOTSTRAP, + PIM_TYPE_ASSERT, + PIM_TYPE_GRAFT, + PIM_TYPE_GRAFT_ACK, + PIM_TYPE_CANDIDATE_RP_ADV +}; + +#define PIM_NULL_REGISTER cpu_to_be32(0x40000000) + +/* RFC7761, sec 4.9: + * The PIM header common to all PIM messages is: + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * |PIM Ver| Type | Reserved | Checksum | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ +struct pimhdr { + __u8 type; + __u8 reserved; + __be16 csum; +}; + +/* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */ +struct pimreghdr { + __u8 type; + __u8 reserved; + __be16 csum; + __be32 flags; +}; + +int pim_rcv_v1(struct sk_buff *skb); + +static inline bool ipmr_pimsm_enabled(void) +{ + return IS_BUILTIN(CONFIG_IP_PIMSM_V1) || IS_BUILTIN(CONFIG_IP_PIMSM_V2); +} + +static inline struct pimhdr *pim_hdr(const struct sk_buff *skb) +{ + return (struct pimhdr *)skb_transport_header(skb); +} + +static inline u8 pim_hdr_version(const struct pimhdr *pimhdr) +{ + return pimhdr->type >> 4; +} + +static inline u8 pim_hdr_type(const struct pimhdr *pimhdr) +{ + return pimhdr->type & 0xf; +} + +/* check if the address is 224.0.0.13, RFC7761 sec 4.3.1 */ +static inline bool pim_ipv4_all_pim_routers(__be32 addr) +{ + return addr == htonl(0xE000000D); +} +#endif diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h new file mode 100644 index 000000000..0412cc983 --- /dev/null +++ b/include/linux/pinctrl/consumer.h @@ -0,0 +1,201 @@ +/* + * Consumer interface the pin control subsystem + * + * Copyright (C) 2012 ST-Ericsson SA + * Written on behalf of Linaro for ST-Ericsson + * Based on bits of regulator core, gpio core and clk core + * + * Author: Linus Walleij + * + * License terms: GNU General Public License (GPL) version 2 + */ +#ifndef __LINUX_PINCTRL_CONSUMER_H +#define __LINUX_PINCTRL_CONSUMER_H + +#include +#include +#include +#include + +/* This struct is private to the core and should be regarded as a cookie */ +struct pinctrl; +struct pinctrl_state; +struct device; + +#ifdef CONFIG_PINCTRL + +/* External interface to pin control */ +extern int pinctrl_gpio_request(unsigned gpio); +extern void pinctrl_gpio_free(unsigned gpio); +extern int pinctrl_gpio_direction_input(unsigned gpio); +extern int pinctrl_gpio_direction_output(unsigned gpio); +extern int pinctrl_gpio_set_config(unsigned gpio, unsigned long config); + +extern struct pinctrl * __must_check pinctrl_get(struct device *dev); +extern void pinctrl_put(struct pinctrl *p); +extern struct pinctrl_state * __must_check pinctrl_lookup_state( + struct pinctrl *p, + const char *name); +extern int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *s); + +extern struct pinctrl * __must_check devm_pinctrl_get(struct device *dev); +extern void devm_pinctrl_put(struct pinctrl *p); + +#ifdef CONFIG_PM +extern int pinctrl_pm_select_default_state(struct device *dev); +extern int pinctrl_pm_select_sleep_state(struct device *dev); +extern int pinctrl_pm_select_idle_state(struct device *dev); +#else +static inline int pinctrl_pm_select_default_state(struct device *dev) +{ + return 0; +} +static inline int pinctrl_pm_select_sleep_state(struct device *dev) +{ + return 0; +} +static inline int pinctrl_pm_select_idle_state(struct device *dev) +{ + return 0; +} +#endif + +#else /* !CONFIG_PINCTRL */ + +static inline int pinctrl_gpio_request(unsigned gpio) +{ + return 0; +} + +static inline void pinctrl_gpio_free(unsigned gpio) +{ +} + +static inline int pinctrl_gpio_direction_input(unsigned gpio) +{ + return 0; +} + +static inline int pinctrl_gpio_direction_output(unsigned gpio) +{ + return 0; +} + +static inline int pinctrl_gpio_set_config(unsigned gpio, unsigned long config) +{ + return 0; +} + +static inline struct pinctrl * __must_check pinctrl_get(struct device *dev) +{ + return NULL; +} + +static inline void pinctrl_put(struct pinctrl *p) +{ +} + +static inline struct pinctrl_state * __must_check pinctrl_lookup_state( + struct pinctrl *p, + const char *name) +{ + return NULL; +} + +static inline int pinctrl_select_state(struct pinctrl *p, + struct pinctrl_state *s) +{ + return 0; +} + +static inline struct pinctrl * __must_check devm_pinctrl_get(struct device *dev) +{ + return NULL; +} + +static inline void devm_pinctrl_put(struct pinctrl *p) +{ +} + +static inline int pinctrl_pm_select_default_state(struct device *dev) +{ + return 0; +} + +static inline int pinctrl_pm_select_sleep_state(struct device *dev) +{ + return 0; +} + +static inline int pinctrl_pm_select_idle_state(struct device *dev) +{ + return 0; +} + +#endif /* CONFIG_PINCTRL */ + +static inline struct pinctrl * __must_check pinctrl_get_select( + struct device *dev, const char *name) +{ + struct pinctrl *p; + struct pinctrl_state *s; + int ret; + + p = pinctrl_get(dev); + if (IS_ERR(p)) + return p; + + s = pinctrl_lookup_state(p, name); + if (IS_ERR(s)) { + pinctrl_put(p); + return ERR_CAST(s); + } + + ret = pinctrl_select_state(p, s); + if (ret < 0) { + pinctrl_put(p); + return ERR_PTR(ret); + } + + return p; +} + +static inline struct pinctrl * __must_check pinctrl_get_select_default( + struct device *dev) +{ + return pinctrl_get_select(dev, PINCTRL_STATE_DEFAULT); +} + +static inline struct pinctrl * __must_check devm_pinctrl_get_select( + struct device *dev, const char *name) +{ + struct pinctrl *p; + struct pinctrl_state *s; + int ret; + + p = devm_pinctrl_get(dev); + if (IS_ERR(p)) + return p; + + s = pinctrl_lookup_state(p, name); + if (IS_ERR(s)) { + devm_pinctrl_put(p); + return ERR_CAST(s); + } + + ret = pinctrl_select_state(p, s); + if (ret < 0) { + devm_pinctrl_put(p); + return ERR_PTR(ret); + } + + return p; +} + +static inline struct pinctrl * __must_check devm_pinctrl_get_select_default( + struct device *dev) +{ + return devm_pinctrl_get_select(dev, PINCTRL_STATE_DEFAULT); +} + +#endif /* __LINUX_PINCTRL_CONSUMER_H */ diff --git a/include/linux/pinctrl/devinfo.h b/include/linux/pinctrl/devinfo.h new file mode 100644 index 000000000..d01a8638b --- /dev/null +++ b/include/linux/pinctrl/devinfo.h @@ -0,0 +1,61 @@ +/* + * Per-device information from the pin control system. + * This is the stuff that get included into the device + * core. + * + * Copyright (C) 2012 ST-Ericsson SA + * Written on behalf of Linaro for ST-Ericsson + * This interface is used in the core to keep track of pins. + * + * Author: Linus Walleij + * + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef PINCTRL_DEVINFO_H +#define PINCTRL_DEVINFO_H + +#ifdef CONFIG_PINCTRL + +/* The device core acts as a consumer toward pinctrl */ +#include + +/** + * struct dev_pin_info - pin state container for devices + * @p: pinctrl handle for the containing device + * @default_state: the default state for the handle, if found + * @init_state: the state at probe time, if found + * @sleep_state: the state at suspend time, if found + * @idle_state: the state at idle (runtime suspend) time, if found + */ +struct dev_pin_info { + struct pinctrl *p; + struct pinctrl_state *default_state; + struct pinctrl_state *init_state; +#ifdef CONFIG_PM + struct pinctrl_state *sleep_state; + struct pinctrl_state *idle_state; +#endif +}; + +extern int pinctrl_bind_pins(struct device *dev); +extern int pinctrl_init_done(struct device *dev); + +#else + +struct device; + +/* Stubs if we're not using pinctrl */ + +static inline int pinctrl_bind_pins(struct device *dev) +{ + return 0; +} + +static inline int pinctrl_init_done(struct device *dev) +{ + return 0; +} + +#endif /* CONFIG_PINCTRL */ +#endif /* PINCTRL_DEVINFO_H */ diff --git a/include/linux/pinctrl/machine.h b/include/linux/pinctrl/machine.h new file mode 100644 index 000000000..7fa5d8719 --- /dev/null +++ b/include/linux/pinctrl/machine.h @@ -0,0 +1,170 @@ +/* + * Machine interface for the pinctrl subsystem. + * + * Copyright (C) 2011 ST-Ericsson SA + * Written on behalf of Linaro for ST-Ericsson + * Based on bits of regulator core, gpio core and clk core + * + * Author: Linus Walleij + * + * License terms: GNU General Public License (GPL) version 2 + */ +#ifndef __LINUX_PINCTRL_MACHINE_H +#define __LINUX_PINCTRL_MACHINE_H + +#include + +#include + +enum pinctrl_map_type { + PIN_MAP_TYPE_INVALID, + PIN_MAP_TYPE_DUMMY_STATE, + PIN_MAP_TYPE_MUX_GROUP, + PIN_MAP_TYPE_CONFIGS_PIN, + PIN_MAP_TYPE_CONFIGS_GROUP, +}; + +/** + * struct pinctrl_map_mux - mapping table content for MAP_TYPE_MUX_GROUP + * @group: the name of the group whose mux function is to be configured. This + * field may be left NULL, and the first applicable group for the function + * will be used. + * @function: the mux function to select for the group + */ +struct pinctrl_map_mux { + const char *group; + const char *function; +}; + +/** + * struct pinctrl_map_configs - mapping table content for MAP_TYPE_CONFIGS_* + * @group_or_pin: the name of the pin or group whose configuration parameters + * are to be configured. + * @configs: a pointer to an array of config parameters/values to program into + * hardware. Each individual pin controller defines the format and meaning + * of config parameters. + * @num_configs: the number of entries in array @configs + */ +struct pinctrl_map_configs { + const char *group_or_pin; + unsigned long *configs; + unsigned num_configs; +}; + +/** + * struct pinctrl_map - boards/machines shall provide this map for devices + * @dev_name: the name of the device using this specific mapping, the name + * must be the same as in your struct device*. If this name is set to the + * same name as the pin controllers own dev_name(), the map entry will be + * hogged by the driver itself upon registration + * @name: the name of this specific map entry for the particular machine. + * This is the parameter passed to pinmux_lookup_state() + * @type: the type of mapping table entry + * @ctrl_dev_name: the name of the device controlling this specific mapping, + * the name must be the same as in your struct device*. This field is not + * used for PIN_MAP_TYPE_DUMMY_STATE + * @data: Data specific to the mapping type + */ +struct pinctrl_map { + const char *dev_name; + const char *name; + enum pinctrl_map_type type; + const char *ctrl_dev_name; + union { + struct pinctrl_map_mux mux; + struct pinctrl_map_configs configs; + } data; +}; + +/* Convenience macros to create mapping table entries */ + +#define PIN_MAP_DUMMY_STATE(dev, state) \ + { \ + .dev_name = dev, \ + .name = state, \ + .type = PIN_MAP_TYPE_DUMMY_STATE, \ + } + +#define PIN_MAP_MUX_GROUP(dev, state, pinctrl, grp, func) \ + { \ + .dev_name = dev, \ + .name = state, \ + .type = PIN_MAP_TYPE_MUX_GROUP, \ + .ctrl_dev_name = pinctrl, \ + .data.mux = { \ + .group = grp, \ + .function = func, \ + }, \ + } + +#define PIN_MAP_MUX_GROUP_DEFAULT(dev, pinctrl, grp, func) \ + PIN_MAP_MUX_GROUP(dev, PINCTRL_STATE_DEFAULT, pinctrl, grp, func) + +#define PIN_MAP_MUX_GROUP_HOG(dev, state, grp, func) \ + PIN_MAP_MUX_GROUP(dev, state, dev, grp, func) + +#define PIN_MAP_MUX_GROUP_HOG_DEFAULT(dev, grp, func) \ + PIN_MAP_MUX_GROUP(dev, PINCTRL_STATE_DEFAULT, dev, grp, func) + +#define PIN_MAP_CONFIGS_PIN(dev, state, pinctrl, pin, cfgs) \ + { \ + .dev_name = dev, \ + .name = state, \ + .type = PIN_MAP_TYPE_CONFIGS_PIN, \ + .ctrl_dev_name = pinctrl, \ + .data.configs = { \ + .group_or_pin = pin, \ + .configs = cfgs, \ + .num_configs = ARRAY_SIZE(cfgs), \ + }, \ + } + +#define PIN_MAP_CONFIGS_PIN_DEFAULT(dev, pinctrl, pin, cfgs) \ + PIN_MAP_CONFIGS_PIN(dev, PINCTRL_STATE_DEFAULT, pinctrl, pin, cfgs) + +#define PIN_MAP_CONFIGS_PIN_HOG(dev, state, pin, cfgs) \ + PIN_MAP_CONFIGS_PIN(dev, state, dev, pin, cfgs) + +#define PIN_MAP_CONFIGS_PIN_HOG_DEFAULT(dev, pin, cfgs) \ + PIN_MAP_CONFIGS_PIN(dev, PINCTRL_STATE_DEFAULT, dev, pin, cfgs) + +#define PIN_MAP_CONFIGS_GROUP(dev, state, pinctrl, grp, cfgs) \ + { \ + .dev_name = dev, \ + .name = state, \ + .type = PIN_MAP_TYPE_CONFIGS_GROUP, \ + .ctrl_dev_name = pinctrl, \ + .data.configs = { \ + .group_or_pin = grp, \ + .configs = cfgs, \ + .num_configs = ARRAY_SIZE(cfgs), \ + }, \ + } + +#define PIN_MAP_CONFIGS_GROUP_DEFAULT(dev, pinctrl, grp, cfgs) \ + PIN_MAP_CONFIGS_GROUP(dev, PINCTRL_STATE_DEFAULT, pinctrl, grp, cfgs) + +#define PIN_MAP_CONFIGS_GROUP_HOG(dev, state, grp, cfgs) \ + PIN_MAP_CONFIGS_GROUP(dev, state, dev, grp, cfgs) + +#define PIN_MAP_CONFIGS_GROUP_HOG_DEFAULT(dev, grp, cfgs) \ + PIN_MAP_CONFIGS_GROUP(dev, PINCTRL_STATE_DEFAULT, dev, grp, cfgs) + +#ifdef CONFIG_PINCTRL + +extern int pinctrl_register_mappings(const struct pinctrl_map *map, + unsigned num_maps); +extern void pinctrl_provide_dummies(void); +#else + +static inline int pinctrl_register_mappings(const struct pinctrl_map *map, + unsigned num_maps) +{ + return 0; +} + +static inline void pinctrl_provide_dummies(void) +{ +} +#endif /* !CONFIG_PINCTRL */ +#endif diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h new file mode 100644 index 000000000..6c0680641 --- /dev/null +++ b/include/linux/pinctrl/pinconf-generic.h @@ -0,0 +1,228 @@ +/* + * Interface the generic pinconfig portions of the pinctrl subsystem + * + * Copyright (C) 2011 ST-Ericsson SA + * Written on behalf of Linaro for ST-Ericsson + * This interface is used in the core to keep track of pins. + * + * Author: Linus Walleij + * + * License terms: GNU General Public License (GPL) version 2 + */ +#ifndef __LINUX_PINCTRL_PINCONF_GENERIC_H +#define __LINUX_PINCTRL_PINCONF_GENERIC_H + +/** + * enum pin_config_param - possible pin configuration parameters + * @PIN_CONFIG_BIAS_BUS_HOLD: the pin will be set to weakly latch so that it + * weakly drives the last value on a tristate bus, also known as a "bus + * holder", "bus keeper" or "repeater". This allows another device on the + * bus to change the value by driving the bus high or low and switching to + * tristate. The argument is ignored. + * @PIN_CONFIG_BIAS_DISABLE: disable any pin bias on the pin, a + * transition from say pull-up to pull-down implies that you disable + * pull-up in the process, this setting disables all biasing. + * @PIN_CONFIG_BIAS_HIGH_IMPEDANCE: the pin will be set to a high impedance + * mode, also know as "third-state" (tristate) or "high-Z" or "floating". + * On output pins this effectively disconnects the pin, which is useful + * if for example some other pin is going to drive the signal connected + * to it for a while. Pins used for input are usually always high + * impedance. + * @PIN_CONFIG_BIAS_PULL_DOWN: the pin will be pulled down (usually with high + * impedance to GROUND). If the argument is != 0 pull-down is enabled, + * if it is 0, pull-down is total, i.e. the pin is connected to GROUND. + * @PIN_CONFIG_BIAS_PULL_PIN_DEFAULT: the pin will be pulled up or down based + * on embedded knowledge of the controller hardware, like current mux + * function. The pull direction and possibly strength too will normally + * be decided completely inside the hardware block and not be readable + * from the kernel side. + * If the argument is != 0 pull up/down is enabled, if it is 0, the + * configuration is ignored. The proper way to disable it is to use + * @PIN_CONFIG_BIAS_DISABLE. + * @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high + * impedance to VDD). If the argument is != 0 pull-up is enabled, + * if it is 0, pull-up is total, i.e. the pin is connected to VDD. + * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open + * collector) which means it is usually wired with other output ports + * which are then pulled up with an external resistor. Setting this + * config will enable open drain mode, the argument is ignored. + * @PIN_CONFIG_DRIVE_OPEN_SOURCE: the pin will be driven with open source + * (open emitter). Setting this config will enable open source mode, the + * argument is ignored. + * @PIN_CONFIG_DRIVE_PUSH_PULL: the pin will be driven actively high and + * low, this is the most typical case and is typically achieved with two + * active transistors on the output. Setting this config will enable + * push-pull mode, the argument is ignored. + * @PIN_CONFIG_DRIVE_STRENGTH: the pin will sink or source at most the current + * passed as argument. The argument is in mA. + * @PIN_CONFIG_INPUT_DEBOUNCE: this will configure the pin to debounce mode, + * which means it will wait for signals to settle when reading inputs. The + * argument gives the debounce time in usecs. Setting the + * argument to zero turns debouncing off. + * @PIN_CONFIG_INPUT_ENABLE: enable the pin's input. Note that this does not + * affect the pin's ability to drive output. 1 enables input, 0 disables + * input. + * @PIN_CONFIG_INPUT_SCHMITT: this will configure an input pin to run in + * schmitt-trigger mode. If the schmitt-trigger has adjustable hysteresis, + * the threshold value is given on a custom format as argument when + * setting pins to this mode. + * @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin. + * If the argument != 0, schmitt-trigger mode is enabled. If it's 0, + * schmitt-trigger mode is disabled. + * @PIN_CONFIG_LOW_POWER_MODE: this will configure the pin for low power + * operation, if several modes of operation are supported these can be + * passed in the argument on a custom form, else just use argument 1 + * to indicate low power mode, argument 0 turns low power mode off. + * @PIN_CONFIG_OUTPUT_ENABLE: this will enable the pin's output mode + * without driving a value there. For most platforms this reduces to + * enable the output buffers and then let the pin controller current + * configuration (eg. the currently selected mux function) drive values on + * the line. Use argument 1 to enable output mode, argument 0 to disable + * it. + * @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a + * value on the line. Use argument 1 to indicate high level, argument 0 to + * indicate low level. (Please see Documentation/driver-api/pinctl.rst, + * section "GPIO mode pitfalls" for a discussion around this parameter.) + * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power + * supplies, the argument to this parameter (on a custom format) tells + * the driver which alternative power source to use. + * @PIN_CONFIG_SLEEP_HARDWARE_STATE: indicate this is sleep related state. + * @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to + * this parameter (on a custom format) tells the driver which alternative + * slew rate to use. + * @PIN_CONFIG_SKEW_DELAY: if the pin has programmable skew rate (on inputs) + * or latch delay (on outputs) this parameter (in a custom format) + * specifies the clock skew or latch delay. It typically controls how + * many double inverters are put in front of the line. + * @PIN_CONFIG_PERSIST_STATE: retain pin state across sleep or controller reset + * @PIN_CONFIG_END: this is the last enumerator for pin configurations, if + * you need to pass in custom configurations to the pin controller, use + * PIN_CONFIG_END+1 as the base offset. + * @PIN_CONFIG_MAX: this is the maximum configuration value that can be + * presented using the packed format. + */ +enum pin_config_param { + PIN_CONFIG_BIAS_BUS_HOLD, + PIN_CONFIG_BIAS_DISABLE, + PIN_CONFIG_BIAS_HIGH_IMPEDANCE, + PIN_CONFIG_BIAS_PULL_DOWN, + PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, + PIN_CONFIG_BIAS_PULL_UP, + PIN_CONFIG_DRIVE_OPEN_DRAIN, + PIN_CONFIG_DRIVE_OPEN_SOURCE, + PIN_CONFIG_DRIVE_PUSH_PULL, + PIN_CONFIG_DRIVE_STRENGTH, + PIN_CONFIG_INPUT_DEBOUNCE, + PIN_CONFIG_INPUT_ENABLE, + PIN_CONFIG_INPUT_SCHMITT, + PIN_CONFIG_INPUT_SCHMITT_ENABLE, + PIN_CONFIG_LOW_POWER_MODE, + PIN_CONFIG_OUTPUT_ENABLE, + PIN_CONFIG_OUTPUT, + PIN_CONFIG_POWER_SOURCE, + PIN_CONFIG_SLEEP_HARDWARE_STATE, + PIN_CONFIG_SLEW_RATE, + PIN_CONFIG_SKEW_DELAY, + PIN_CONFIG_PERSIST_STATE, + PIN_CONFIG_END = 0x7F, + PIN_CONFIG_MAX = 0xFF, +}; + +/* + * Helpful configuration macro to be used in tables etc. + */ +#define PIN_CONF_PACKED(p, a) ((a << 8) | ((unsigned long) p & 0xffUL)) + +/* + * The following inlines stuffs a configuration parameter and data value + * into and out of an unsigned long argument, as used by the generic pin config + * system. We put the parameter in the lower 8 bits and the argument in the + * upper 24 bits. + */ + +static inline enum pin_config_param pinconf_to_config_param(unsigned long config) +{ + return (enum pin_config_param) (config & 0xffUL); +} + +static inline u32 pinconf_to_config_argument(unsigned long config) +{ + return (u32) ((config >> 8) & 0xffffffUL); +} + +static inline unsigned long pinconf_to_config_packed(enum pin_config_param param, + u32 argument) +{ + return PIN_CONF_PACKED(param, argument); +} + +#ifdef CONFIG_GENERIC_PINCONF + +#ifdef CONFIG_DEBUG_FS +#define PCONFDUMP(a, b, c, d) { \ + .param = a, .display = b, .format = c, .has_arg = d \ + } + +struct pin_config_item { + const enum pin_config_param param; + const char * const display; + const char * const format; + bool has_arg; +}; +#endif /* CONFIG_DEBUG_FS */ + +#ifdef CONFIG_OF + +#include +#include +struct pinctrl_dev; +struct pinctrl_map; + +struct pinconf_generic_params { + const char * const property; + enum pin_config_param param; + u32 default_value; +}; + +int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev, + struct device_node *np, struct pinctrl_map **map, + unsigned *reserved_maps, unsigned *num_maps, + enum pinctrl_map_type type); +int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev, + struct device_node *np_config, struct pinctrl_map **map, + unsigned *num_maps, enum pinctrl_map_type type); +void pinconf_generic_dt_free_map(struct pinctrl_dev *pctldev, + struct pinctrl_map *map, unsigned num_maps); + +static inline int pinconf_generic_dt_node_to_map_group( + struct pinctrl_dev *pctldev, struct device_node *np_config, + struct pinctrl_map **map, unsigned *num_maps) +{ + return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps, + PIN_MAP_TYPE_CONFIGS_GROUP); +} + +static inline int pinconf_generic_dt_node_to_map_pin( + struct pinctrl_dev *pctldev, struct device_node *np_config, + struct pinctrl_map **map, unsigned *num_maps) +{ + return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps, + PIN_MAP_TYPE_CONFIGS_PIN); +} + +static inline int pinconf_generic_dt_node_to_map_all( + struct pinctrl_dev *pctldev, struct device_node *np_config, + struct pinctrl_map **map, unsigned *num_maps) +{ + /* + * passing the type as PIN_MAP_TYPE_INVALID causes the underlying parser + * to infer the map type from the DT properties used. + */ + return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps, + PIN_MAP_TYPE_INVALID); +} +#endif + +#endif /* CONFIG_GENERIC_PINCONF */ + +#endif /* __LINUX_PINCTRL_PINCONF_GENERIC_H */ diff --git a/include/linux/pinctrl/pinconf.h b/include/linux/pinctrl/pinconf.h new file mode 100644 index 000000000..8dd85d302 --- /dev/null +++ b/include/linux/pinctrl/pinconf.h @@ -0,0 +1,76 @@ +/* + * Interface the pinconfig portions of the pinctrl subsystem + * + * Copyright (C) 2011 ST-Ericsson SA + * Written on behalf of Linaro for ST-Ericsson + * This interface is used in the core to keep track of pins. + * + * Author: Linus Walleij + * + * License terms: GNU General Public License (GPL) version 2 + */ +#ifndef __LINUX_PINCTRL_PINCONF_H +#define __LINUX_PINCTRL_PINCONF_H + +#ifdef CONFIG_PINCONF + +#include + +struct pinctrl_dev; +struct seq_file; + +/** + * struct pinconf_ops - pin config operations, to be implemented by + * pin configuration capable drivers. + * @is_generic: for pin controllers that want to use the generic interface, + * this flag tells the framework that it's generic. + * @pin_config_get: get the config of a certain pin, if the requested config + * is not available on this controller this should return -ENOTSUPP + * and if it is available but disabled it should return -EINVAL + * @pin_config_set: configure an individual pin + * @pin_config_group_get: get configurations for an entire pin group; should + * return -ENOTSUPP and -EINVAL using the same rules as pin_config_get. + * @pin_config_group_set: configure all pins in a group + * @pin_config_dbg_parse_modify: optional debugfs to modify a pin configuration + * @pin_config_dbg_show: optional debugfs display hook that will provide + * per-device info for a certain pin in debugfs + * @pin_config_group_dbg_show: optional debugfs display hook that will provide + * per-device info for a certain group in debugfs + * @pin_config_config_dbg_show: optional debugfs display hook that will decode + * and display a driver's pin configuration parameter + */ +struct pinconf_ops { +#ifdef CONFIG_GENERIC_PINCONF + bool is_generic; +#endif + int (*pin_config_get) (struct pinctrl_dev *pctldev, + unsigned pin, + unsigned long *config); + int (*pin_config_set) (struct pinctrl_dev *pctldev, + unsigned pin, + unsigned long *configs, + unsigned num_configs); + int (*pin_config_group_get) (struct pinctrl_dev *pctldev, + unsigned selector, + unsigned long *config); + int (*pin_config_group_set) (struct pinctrl_dev *pctldev, + unsigned selector, + unsigned long *configs, + unsigned num_configs); + int (*pin_config_dbg_parse_modify) (struct pinctrl_dev *pctldev, + const char *arg, + unsigned long *config); + void (*pin_config_dbg_show) (struct pinctrl_dev *pctldev, + struct seq_file *s, + unsigned offset); + void (*pin_config_group_dbg_show) (struct pinctrl_dev *pctldev, + struct seq_file *s, + unsigned selector); + void (*pin_config_config_dbg_show) (struct pinctrl_dev *pctldev, + struct seq_file *s, + unsigned long config); +}; + +#endif + +#endif /* __LINUX_PINCTRL_PINCONF_H */ diff --git a/include/linux/pinctrl/pinctrl-state.h b/include/linux/pinctrl/pinctrl-state.h new file mode 100644 index 000000000..a0e785815 --- /dev/null +++ b/include/linux/pinctrl/pinctrl-state.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Standard pin control state definitions + */ + +/** + * @PINCTRL_STATE_DEFAULT: the state the pinctrl handle shall be put + * into as default, usually this means the pins are up and ready to + * be used by the device driver. This state is commonly used by + * hogs to configure muxing and pins at boot, and also as a state + * to go into when returning from sleep and idle in + * .pm_runtime_resume() or ordinary .resume() for example. + * @PINCTRL_STATE_INIT: normally the pinctrl will be set to "default" + * before the driver's probe() function is called. There are some + * drivers where that is not appropriate becausing doing so would + * glitch the pins. In those cases you can add an "init" pinctrl + * which is the state of the pins before drive probe. After probe + * if the pins are still in "init" state they'll be moved to + * "default". + * @PINCTRL_STATE_IDLE: the state the pinctrl handle shall be put into + * when the pins are idle. This is a state where the system is relaxed + * but not fully sleeping - some power may be on but clocks gated for + * example. Could typically be set from a pm_runtime_suspend() or + * pm_runtime_idle() operation. + * @PINCTRL_STATE_SLEEP: the state the pinctrl handle shall be put into + * when the pins are sleeping. This is a state where the system is in + * its lowest sleep state. Could typically be set from an + * ordinary .suspend() function. + */ +#define PINCTRL_STATE_DEFAULT "default" +#define PINCTRL_STATE_INIT "init" +#define PINCTRL_STATE_IDLE "idle" +#define PINCTRL_STATE_SLEEP "sleep" diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h new file mode 100644 index 000000000..8f5dbb845 --- /dev/null +++ b/include/linux/pinctrl/pinctrl.h @@ -0,0 +1,213 @@ +/* + * Interface the pinctrl subsystem + * + * Copyright (C) 2011 ST-Ericsson SA + * Written on behalf of Linaro for ST-Ericsson + * This interface is used in the core to keep track of pins. + * + * Author: Linus Walleij + * + * License terms: GNU General Public License (GPL) version 2 + */ +#ifndef __LINUX_PINCTRL_PINCTRL_H +#define __LINUX_PINCTRL_PINCTRL_H + +#ifdef CONFIG_PINCTRL + +#include +#include +#include +#include +#include + +struct device; +struct pinctrl_dev; +struct pinctrl_map; +struct pinmux_ops; +struct pinconf_ops; +struct pin_config_item; +struct gpio_chip; +struct device_node; + +/** + * struct pinctrl_pin_desc - boards/machines provide information on their + * pins, pads or other muxable units in this struct + * @number: unique pin number from the global pin number space + * @name: a name for this pin + * @drv_data: driver-defined per-pin data. pinctrl core does not touch this + */ +struct pinctrl_pin_desc { + unsigned number; + const char *name; + void *drv_data; +}; + +/* Convenience macro to define a single named or anonymous pin descriptor */ +#define PINCTRL_PIN(a, b) { .number = a, .name = b } +#define PINCTRL_PIN_ANON(a) { .number = a } + +/** + * struct pinctrl_gpio_range - each pin controller can provide subranges of + * the GPIO number space to be handled by the controller + * @node: list node for internal use + * @name: a name for the chip in this range + * @id: an ID number for the chip in this range + * @base: base offset of the GPIO range + * @pin_base: base pin number of the GPIO range if pins == NULL + * @pins: enumeration of pins in GPIO range or NULL + * @npins: number of pins in the GPIO range, including the base number + * @gc: an optional pointer to a gpio_chip + */ +struct pinctrl_gpio_range { + struct list_head node; + const char *name; + unsigned int id; + unsigned int base; + unsigned int pin_base; + unsigned const *pins; + unsigned int npins; + struct gpio_chip *gc; +}; + +/** + * struct pinctrl_ops - global pin control operations, to be implemented by + * pin controller drivers. + * @get_groups_count: Returns the count of total number of groups registered. + * @get_group_name: return the group name of the pin group + * @get_group_pins: return an array of pins corresponding to a certain + * group selector @pins, and the size of the array in @num_pins + * @pin_dbg_show: optional debugfs display hook that will provide per-device + * info for a certain pin in debugfs + * @dt_node_to_map: parse a device tree "pin configuration node", and create + * mapping table entries for it. These are returned through the @map and + * @num_maps output parameters. This function is optional, and may be + * omitted for pinctrl drivers that do not support device tree. + * @dt_free_map: free mapping table entries created via @dt_node_to_map. The + * top-level @map pointer must be freed, along with any dynamically + * allocated members of the mapping table entries themselves. This + * function is optional, and may be omitted for pinctrl drivers that do + * not support device tree. + */ +struct pinctrl_ops { + int (*get_groups_count) (struct pinctrl_dev *pctldev); + const char *(*get_group_name) (struct pinctrl_dev *pctldev, + unsigned selector); + int (*get_group_pins) (struct pinctrl_dev *pctldev, + unsigned selector, + const unsigned **pins, + unsigned *num_pins); + void (*pin_dbg_show) (struct pinctrl_dev *pctldev, struct seq_file *s, + unsigned offset); + int (*dt_node_to_map) (struct pinctrl_dev *pctldev, + struct device_node *np_config, + struct pinctrl_map **map, unsigned *num_maps); + void (*dt_free_map) (struct pinctrl_dev *pctldev, + struct pinctrl_map *map, unsigned num_maps); +}; + +/** + * struct pinctrl_desc - pin controller descriptor, register this to pin + * control subsystem + * @name: name for the pin controller + * @pins: an array of pin descriptors describing all the pins handled by + * this pin controller + * @npins: number of descriptors in the array, usually just ARRAY_SIZE() + * of the pins field above + * @pctlops: pin control operation vtable, to support global concepts like + * grouping of pins, this is optional. + * @pmxops: pinmux operations vtable, if you support pinmuxing in your driver + * @confops: pin config operations vtable, if you support pin configuration in + * your driver + * @owner: module providing the pin controller, used for refcounting + * @num_custom_params: Number of driver-specific custom parameters to be parsed + * from the hardware description + * @custom_params: List of driver_specific custom parameters to be parsed from + * the hardware description + * @custom_conf_items: Information how to print @params in debugfs, must be + * the same size as the @custom_params, i.e. @num_custom_params + */ +struct pinctrl_desc { + const char *name; + const struct pinctrl_pin_desc *pins; + unsigned int npins; + const struct pinctrl_ops *pctlops; + const struct pinmux_ops *pmxops; + const struct pinconf_ops *confops; + struct module *owner; +#ifdef CONFIG_GENERIC_PINCONF + unsigned int num_custom_params; + const struct pinconf_generic_params *custom_params; + const struct pin_config_item *custom_conf_items; +#endif +}; + +/* External interface to pin controller */ + +extern int pinctrl_register_and_init(struct pinctrl_desc *pctldesc, + struct device *dev, void *driver_data, + struct pinctrl_dev **pctldev); +extern int pinctrl_enable(struct pinctrl_dev *pctldev); + +/* Please use pinctrl_register_and_init() and pinctrl_enable() instead */ +extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc, + struct device *dev, void *driver_data); + +extern void pinctrl_unregister(struct pinctrl_dev *pctldev); + +extern int devm_pinctrl_register_and_init(struct device *dev, + struct pinctrl_desc *pctldesc, + void *driver_data, + struct pinctrl_dev **pctldev); + +/* Please use devm_pinctrl_register_and_init() instead */ +extern struct pinctrl_dev *devm_pinctrl_register(struct device *dev, + struct pinctrl_desc *pctldesc, + void *driver_data); + +extern void devm_pinctrl_unregister(struct device *dev, + struct pinctrl_dev *pctldev); + +extern bool pin_is_valid(struct pinctrl_dev *pctldev, int pin); +extern void pinctrl_add_gpio_range(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range); +extern void pinctrl_add_gpio_ranges(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *ranges, + unsigned nranges); +extern void pinctrl_remove_gpio_range(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range); + +extern struct pinctrl_dev *pinctrl_find_and_add_gpio_range(const char *devname, + struct pinctrl_gpio_range *range); +extern struct pinctrl_gpio_range * +pinctrl_find_gpio_range_from_pin(struct pinctrl_dev *pctldev, + unsigned int pin); +extern int pinctrl_get_group_pins(struct pinctrl_dev *pctldev, + const char *pin_group, const unsigned **pins, + unsigned *num_pins); + +#ifdef CONFIG_OF +extern struct pinctrl_dev *of_pinctrl_get(struct device_node *np); +#else +static inline +struct pinctrl_dev *of_pinctrl_get(struct device_node *np) +{ + return NULL; +} +#endif /* CONFIG_OF */ + +extern const char *pinctrl_dev_get_name(struct pinctrl_dev *pctldev); +extern const char *pinctrl_dev_get_devname(struct pinctrl_dev *pctldev); +extern void *pinctrl_dev_get_drvdata(struct pinctrl_dev *pctldev); +#else + +struct pinctrl_dev; + +/* Sufficiently stupid default functions when pinctrl is not in use */ +static inline bool pin_is_valid(struct pinctrl_dev *pctldev, int pin) +{ + return pin >= 0; +} + +#endif /* !CONFIG_PINCTRL */ + +#endif /* __LINUX_PINCTRL_PINCTRL_H */ diff --git a/include/linux/pinctrl/pinmux.h b/include/linux/pinctrl/pinmux.h new file mode 100644 index 000000000..ace60d775 --- /dev/null +++ b/include/linux/pinctrl/pinmux.h @@ -0,0 +1,90 @@ +/* + * Interface the pinmux subsystem + * + * Copyright (C) 2011 ST-Ericsson SA + * Written on behalf of Linaro for ST-Ericsson + * Based on bits of regulator core, gpio core and clk core + * + * Author: Linus Walleij + * + * License terms: GNU General Public License (GPL) version 2 + */ +#ifndef __LINUX_PINCTRL_PINMUX_H +#define __LINUX_PINCTRL_PINMUX_H + +#include +#include +#include + +#ifdef CONFIG_PINMUX + +struct pinctrl_dev; + +/** + * struct pinmux_ops - pinmux operations, to be implemented by pin controller + * drivers that support pinmuxing + * @request: called by the core to see if a certain pin can be made + * available for muxing. This is called by the core to acquire the pins + * before selecting any actual mux setting across a function. The driver + * is allowed to answer "no" by returning a negative error code + * @free: the reverse function of the request() callback, frees a pin after + * being requested + * @get_functions_count: returns number of selectable named functions available + * in this pinmux driver + * @get_function_name: return the function name of the muxing selector, + * called by the core to figure out which mux setting it shall map a + * certain device to + * @get_function_groups: return an array of groups names (in turn + * referencing pins) connected to a certain function selector. The group + * name can be used with the generic @pinctrl_ops to retrieve the + * actual pins affected. The applicable groups will be returned in + * @groups and the number of groups in @num_groups + * @set_mux: enable a certain muxing function with a certain pin group. The + * driver does not need to figure out whether enabling this function + * conflicts some other use of the pins in that group, such collisions + * are handled by the pinmux subsystem. The @func_selector selects a + * certain function whereas @group_selector selects a certain set of pins + * to be used. On simple controllers the latter argument may be ignored + * @gpio_request_enable: requests and enables GPIO on a certain pin. + * Implement this only if you can mux every pin individually as GPIO. The + * affected GPIO range is passed along with an offset(pin number) into that + * specific GPIO range - function selectors and pin groups are orthogonal + * to this, the core will however make sure the pins do not collide. + * @gpio_disable_free: free up GPIO muxing on a certain pin, the reverse of + * @gpio_request_enable + * @gpio_set_direction: Since controllers may need different configurations + * depending on whether the GPIO is configured as input or output, + * a direction selector function may be implemented as a backing + * to the GPIO controllers that need pin muxing. + * @strict: do not allow simultaneous use of the same pin for GPIO and another + * function. Check both gpio_owner and mux_owner strictly before approving + * the pin request. + */ +struct pinmux_ops { + int (*request) (struct pinctrl_dev *pctldev, unsigned offset); + int (*free) (struct pinctrl_dev *pctldev, unsigned offset); + int (*get_functions_count) (struct pinctrl_dev *pctldev); + const char *(*get_function_name) (struct pinctrl_dev *pctldev, + unsigned selector); + int (*get_function_groups) (struct pinctrl_dev *pctldev, + unsigned selector, + const char * const **groups, + unsigned *num_groups); + int (*set_mux) (struct pinctrl_dev *pctldev, unsigned func_selector, + unsigned group_selector); + int (*gpio_request_enable) (struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range, + unsigned offset); + void (*gpio_disable_free) (struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range, + unsigned offset); + int (*gpio_set_direction) (struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range, + unsigned offset, + bool input); + bool strict; +}; + +#endif /* CONFIG_PINMUX */ + +#endif /* __LINUX_PINCTRL_PINMUX_H */ diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h new file mode 100644 index 000000000..7897a3cc0 --- /dev/null +++ b/include/linux/pipe_fs_i.h @@ -0,0 +1,199 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PIPE_FS_I_H +#define _LINUX_PIPE_FS_I_H + +#define PIPE_DEF_BUFFERS 16 + +#define PIPE_BUF_FLAG_LRU 0x01 /* page is on the LRU */ +#define PIPE_BUF_FLAG_ATOMIC 0x02 /* was atomically mapped */ +#define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */ +#define PIPE_BUF_FLAG_PACKET 0x08 /* read() as a packet */ + +/** + * struct pipe_buffer - a linux kernel pipe buffer + * @page: the page containing the data for the pipe buffer + * @offset: offset of data inside the @page + * @len: length of data inside the @page + * @ops: operations associated with this buffer. See @pipe_buf_operations. + * @flags: pipe buffer flags. See above. + * @private: private data owned by the ops. + **/ +struct pipe_buffer { + struct page *page; + unsigned int offset, len; + const struct pipe_buf_operations *ops; + unsigned int flags; + unsigned long private; +}; + +/** + * struct pipe_inode_info - a linux kernel pipe + * @mutex: mutex protecting the whole thing + * @wait: reader/writer wait point in case of empty/full pipe + * @nrbufs: the number of non-empty pipe buffers in this pipe + * @buffers: total number of buffers (should be a power of 2) + * @curbuf: the current pipe buffer entry + * @tmp_page: cached released page + * @readers: number of current readers of this pipe + * @writers: number of current writers of this pipe + * @files: number of struct file referring this pipe (protected by ->i_lock) + * @waiting_writers: number of writers blocked waiting for room + * @r_counter: reader counter + * @w_counter: writer counter + * @fasync_readers: reader side fasync + * @fasync_writers: writer side fasync + * @bufs: the circular array of pipe buffers + * @user: the user who created this pipe + **/ +struct pipe_inode_info { + struct mutex mutex; + wait_queue_head_t wait; + unsigned int nrbufs, curbuf, buffers; + unsigned int readers; + unsigned int writers; + unsigned int files; + unsigned int waiting_writers; + unsigned int r_counter; + unsigned int w_counter; + struct page *tmp_page; + struct fasync_struct *fasync_readers; + struct fasync_struct *fasync_writers; + struct pipe_buffer *bufs; + struct user_struct *user; +}; + +/* + * Note on the nesting of these functions: + * + * ->confirm() + * ->steal() + * + * That is, ->steal() must be called on a confirmed buffer. + * See below for the meaning of each operation. Also see kerneldoc + * in fs/pipe.c for the pipe and generic variants of these hooks. + */ +struct pipe_buf_operations { + /* + * This is set to 1, if the generic pipe read/write may coalesce + * data into an existing buffer. If this is set to 0, a new pipe + * page segment is always used for new data. + */ + int can_merge; + + /* + * ->confirm() verifies that the data in the pipe buffer is there + * and that the contents are good. If the pages in the pipe belong + * to a file system, we may need to wait for IO completion in this + * hook. Returns 0 for good, or a negative error value in case of + * error. + */ + int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); + + /* + * When the contents of this pipe buffer has been completely + * consumed by a reader, ->release() is called. + */ + void (*release)(struct pipe_inode_info *, struct pipe_buffer *); + + /* + * Attempt to take ownership of the pipe buffer and its contents. + * ->steal() returns 0 for success, in which case the contents + * of the pipe (the buf->page) is locked and now completely owned + * by the caller. The page may then be transferred to a different + * mapping, the most often used case is insertion into different + * file address space cache. + */ + int (*steal)(struct pipe_inode_info *, struct pipe_buffer *); + + /* + * Get a reference to the pipe buffer. + */ + bool (*get)(struct pipe_inode_info *, struct pipe_buffer *); +}; + +/** + * pipe_buf_get - get a reference to a pipe_buffer + * @pipe: the pipe that the buffer belongs to + * @buf: the buffer to get a reference to + * + * Return: %true if the reference was successfully obtained. + */ +static inline __must_check bool pipe_buf_get(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) +{ + return buf->ops->get(pipe, buf); +} + +/** + * pipe_buf_release - put a reference to a pipe_buffer + * @pipe: the pipe that the buffer belongs to + * @buf: the buffer to put a reference to + */ +static inline void pipe_buf_release(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) +{ + const struct pipe_buf_operations *ops = buf->ops; + + buf->ops = NULL; + ops->release(pipe, buf); +} + +/** + * pipe_buf_confirm - verify contents of the pipe buffer + * @pipe: the pipe that the buffer belongs to + * @buf: the buffer to confirm + */ +static inline int pipe_buf_confirm(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) +{ + return buf->ops->confirm(pipe, buf); +} + +/** + * pipe_buf_steal - attempt to take ownership of a pipe_buffer + * @pipe: the pipe that the buffer belongs to + * @buf: the buffer to attempt to steal + */ +static inline int pipe_buf_steal(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) +{ + return buf->ops->steal(pipe, buf); +} + +/* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual + memory allocation, whereas PIPE_BUF makes atomicity guarantees. */ +#define PIPE_SIZE PAGE_SIZE + +/* Pipe lock and unlock operations */ +void pipe_lock(struct pipe_inode_info *); +void pipe_unlock(struct pipe_inode_info *); +void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *); + +extern unsigned int pipe_max_size; +extern unsigned long pipe_user_pages_hard; +extern unsigned long pipe_user_pages_soft; + +/* Drop the inode semaphore and wait for a pipe event, atomically */ +void pipe_wait(struct pipe_inode_info *pipe); + +struct pipe_inode_info *alloc_pipe_info(void); +void free_pipe_info(struct pipe_inode_info *); + +/* Generic pipe buffer ops functions */ +bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); +int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); +int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); +int generic_pipe_buf_nosteal(struct pipe_inode_info *, struct pipe_buffer *); +void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); +void pipe_buf_mark_unmergeable(struct pipe_buffer *buf); + +extern const struct pipe_buf_operations nosteal_pipe_buf_ops; + +/* for F_SETPIPE_SZ and F_GETPIPE_SZ */ +long pipe_fcntl(struct file *, unsigned int, unsigned long arg); +struct pipe_inode_info *get_pipe_info(struct file *file); + +int create_pipe_files(struct file **, int); +unsigned int round_pipe_size(unsigned long size); + +#endif diff --git a/include/linux/pkeys.h b/include/linux/pkeys.h new file mode 100644 index 000000000..2955ba976 --- /dev/null +++ b/include/linux/pkeys.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PKEYS_H +#define _LINUX_PKEYS_H + +#include + +#ifdef CONFIG_ARCH_HAS_PKEYS +#include +#else /* ! CONFIG_ARCH_HAS_PKEYS */ +#define arch_max_pkey() (1) +#define execute_only_pkey(mm) (0) +#define arch_override_mprotect_pkey(vma, prot, pkey) (0) +#define PKEY_DEDICATED_EXECUTE_ONLY 0 +#define ARCH_VM_PKEY_FLAGS 0 + +static inline int vma_pkey(struct vm_area_struct *vma) +{ + return 0; +} + +static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey) +{ + return (pkey == 0); +} + +static inline int mm_pkey_alloc(struct mm_struct *mm) +{ + return -1; +} + +static inline int mm_pkey_free(struct mm_struct *mm, int pkey) +{ + return -EINVAL; +} + +static inline int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, + unsigned long init_val) +{ + return 0; +} + +static inline bool arch_pkeys_enabled(void) +{ + return false; +} + +static inline void copy_init_pkru_to_fpregs(void) +{ +} + +#endif /* ! CONFIG_ARCH_HAS_PKEYS */ + +#endif /* _LINUX_PKEYS_H */ diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h new file mode 100644 index 000000000..174601554 --- /dev/null +++ b/include/linux/pktcdvd.h @@ -0,0 +1,205 @@ +/* + * Copyright (C) 2000 Jens Axboe + * Copyright (C) 2001-2004 Peter Osterlund + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and + * DVD-RW devices. + * + */ +#ifndef __PKTCDVD_H +#define __PKTCDVD_H + +#include +#include +#include +#include +#include +#include +#include + +/* default bio write queue congestion marks */ +#define PKT_WRITE_CONGESTION_ON 10000 +#define PKT_WRITE_CONGESTION_OFF 9000 + + +struct packet_settings +{ + __u32 size; /* packet size in (512 byte) sectors */ + __u8 fp; /* fixed packets */ + __u8 link_loss; /* the rest is specified + * as per Mt Fuji */ + __u8 write_type; + __u8 track_mode; + __u8 block_mode; +}; + +/* + * Very crude stats for now + */ +struct packet_stats +{ + unsigned long pkt_started; + unsigned long pkt_ended; + unsigned long secs_w; + unsigned long secs_rg; + unsigned long secs_r; +}; + +struct packet_cdrw +{ + struct list_head pkt_free_list; + struct list_head pkt_active_list; + spinlock_t active_list_lock; /* Serialize access to pkt_active_list */ + struct task_struct *thread; + atomic_t pending_bios; +}; + +/* + * Switch to high speed reading after reading this many kilobytes + * with no interspersed writes. + */ +#define HI_SPEED_SWITCH 512 + +struct packet_iosched +{ + atomic_t attention; /* Set to non-zero when queue processing is needed */ + int writing; /* Non-zero when writing, zero when reading */ + spinlock_t lock; /* Protecting read/write queue manipulations */ + struct bio_list read_queue; + struct bio_list write_queue; + sector_t last_write; /* The sector where the last write ended */ + int successive_reads; +}; + +/* + * 32 buffers of 2048 bytes + */ +#if (PAGE_SIZE % CD_FRAMESIZE) != 0 +#error "PAGE_SIZE must be a multiple of CD_FRAMESIZE" +#endif +#define PACKET_MAX_SIZE 128 +#define FRAMES_PER_PAGE (PAGE_SIZE / CD_FRAMESIZE) +#define PACKET_MAX_SECTORS (PACKET_MAX_SIZE * CD_FRAMESIZE >> 9) + +enum packet_data_state { + PACKET_IDLE_STATE, /* Not used at the moment */ + PACKET_WAITING_STATE, /* Waiting for more bios to arrive, so */ + /* we don't have to do as much */ + /* data gathering */ + PACKET_READ_WAIT_STATE, /* Waiting for reads to fill in holes */ + PACKET_WRITE_WAIT_STATE, /* Waiting for the write to complete */ + PACKET_RECOVERY_STATE, /* Recover after read/write errors */ + PACKET_FINISHED_STATE, /* After write has finished */ + + PACKET_NUM_STATES /* Number of possible states */ +}; + +/* + * Information needed for writing a single packet + */ +struct pktcdvd_device; + +struct packet_data +{ + struct list_head list; + + spinlock_t lock; /* Lock protecting state transitions and */ + /* orig_bios list */ + + struct bio_list orig_bios; /* Original bios passed to pkt_make_request */ + /* that will be handled by this packet */ + int write_size; /* Total size of all bios in the orig_bios */ + /* list, measured in number of frames */ + + struct bio *w_bio; /* The bio we will send to the real CD */ + /* device once we have all data for the */ + /* packet we are going to write */ + sector_t sector; /* First sector in this packet */ + int frames; /* Number of frames in this packet */ + + enum packet_data_state state; /* Current state */ + atomic_t run_sm; /* Incremented whenever the state */ + /* machine needs to be run */ + long sleep_time; /* Set this to non-zero to make the state */ + /* machine run after this many jiffies. */ + + atomic_t io_wait; /* Number of pending IO operations */ + atomic_t io_errors; /* Number of read/write errors during IO */ + + struct bio *r_bios[PACKET_MAX_SIZE]; /* bios to use during data gathering */ + struct page *pages[PACKET_MAX_SIZE / FRAMES_PER_PAGE]; + + int cache_valid; /* If non-zero, the data for the zone defined */ + /* by the sector variable is completely cached */ + /* in the pages[] vector. */ + + int id; /* ID number for debugging */ + struct pktcdvd_device *pd; +}; + +struct pkt_rb_node { + struct rb_node rb_node; + struct bio *bio; +}; + +struct packet_stacked_data +{ + struct bio *bio; /* Original read request bio */ + struct pktcdvd_device *pd; +}; +#define PSD_POOL_SIZE 64 + +struct pktcdvd_kobj +{ + struct kobject kobj; + struct pktcdvd_device *pd; +}; +#define to_pktcdvdkobj(_k) \ + ((struct pktcdvd_kobj*)container_of(_k,struct pktcdvd_kobj,kobj)) + +struct pktcdvd_device +{ + struct block_device *bdev; /* dev attached */ + dev_t pkt_dev; /* our dev */ + char name[20]; + struct packet_settings settings; + struct packet_stats stats; + int refcnt; /* Open count */ + int write_speed; /* current write speed, kB/s */ + int read_speed; /* current read speed, kB/s */ + unsigned long offset; /* start offset */ + __u8 mode_offset; /* 0 / 8 */ + __u8 type; + unsigned long flags; + __u16 mmc3_profile; + __u32 nwa; /* next writable address */ + __u32 lra; /* last recorded address */ + struct packet_cdrw cdrw; + wait_queue_head_t wqueue; + + spinlock_t lock; /* Serialize access to bio_queue */ + struct rb_root bio_queue; /* Work queue of bios we need to handle */ + int bio_queue_size; /* Number of nodes in bio_queue */ + sector_t current_sector; /* Keep track of where the elevator is */ + atomic_t scan_queue; /* Set to non-zero when pkt_handle_queue */ + /* needs to be run. */ + mempool_t rb_pool; /* mempool for pkt_rb_node allocations */ + + struct packet_iosched iosched; + struct gendisk *disk; + + int write_congestion_off; + int write_congestion_on; + + struct device *dev; /* sysfs pktcdvd[0-7] dev */ + struct pktcdvd_kobj *kobj_stat; /* sysfs pktcdvd[0-7]/stat/ */ + struct pktcdvd_kobj *kobj_wqueue; /* sysfs pktcdvd[0-7]/write_queue/ */ + + struct dentry *dfs_d_root; /* debugfs: devname directory */ + struct dentry *dfs_f_info; /* debugfs: info file */ +}; + +#endif /* __PKTCDVD_H */ diff --git a/include/linux/pl320-ipc.h b/include/linux/pl320-ipc.h new file mode 100644 index 000000000..5161f63ec --- /dev/null +++ b/include/linux/pl320-ipc.h @@ -0,0 +1,17 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +int pl320_ipc_transmit(u32 *data); +int pl320_ipc_register_notifier(struct notifier_block *nb); +int pl320_ipc_unregister_notifier(struct notifier_block *nb); diff --git a/include/linux/platform_data/ad5449.h b/include/linux/platform_data/ad5449.h new file mode 100644 index 000000000..bd712bd4b --- /dev/null +++ b/include/linux/platform_data/ad5449.h @@ -0,0 +1,40 @@ +/* + * AD5415, AD5426, AD5429, AD5432, AD5439, AD5443, AD5449 Digital to Analog + * Converter driver. + * + * Copyright 2012 Analog Devices Inc. + * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2. + */ + +#ifndef __LINUX_PLATFORM_DATA_AD5449_H__ +#define __LINUX_PLATFORM_DATA_AD5449_H__ + +/** + * enum ad5449_sdo_mode - AD5449 SDO pin configuration + * @AD5449_SDO_DRIVE_FULL: Drive the SDO pin with full strength. + * @AD5449_SDO_DRIVE_WEAK: Drive the SDO pin with not full strength. + * @AD5449_SDO_OPEN_DRAIN: Operate the SDO pin in open-drain mode. + * @AD5449_SDO_DISABLED: Disable the SDO pin, in this mode it is not possible to + * read back from the device. + */ +enum ad5449_sdo_mode { + AD5449_SDO_DRIVE_FULL = 0x0, + AD5449_SDO_DRIVE_WEAK = 0x1, + AD5449_SDO_OPEN_DRAIN = 0x2, + AD5449_SDO_DISABLED = 0x3, +}; + +/** + * struct ad5449_platform_data - Platform data for the ad5449 DAC driver + * @sdo_mode: SDO pin mode + * @hardware_clear_to_midscale: Whether asserting the hardware CLR pin sets the + * outputs to midscale (true) or to zero scale(false). + */ +struct ad5449_platform_data { + enum ad5449_sdo_mode sdo_mode; + bool hardware_clear_to_midscale; +}; + +#endif diff --git a/include/linux/platform_data/ad5755.h b/include/linux/platform_data/ad5755.h new file mode 100644 index 000000000..a5a1cb751 --- /dev/null +++ b/include/linux/platform_data/ad5755.h @@ -0,0 +1,103 @@ +/* + * Copyright 2012 Analog Devices Inc. + * + * Licensed under the GPL-2. + */ +#ifndef __LINUX_PLATFORM_DATA_AD5755_H__ +#define __LINUX_PLATFORM_DATA_AD5755_H__ + +enum ad5755_mode { + AD5755_MODE_VOLTAGE_0V_5V = 0, + AD5755_MODE_VOLTAGE_0V_10V = 1, + AD5755_MODE_VOLTAGE_PLUSMINUS_5V = 2, + AD5755_MODE_VOLTAGE_PLUSMINUS_10V = 3, + AD5755_MODE_CURRENT_4mA_20mA = 4, + AD5755_MODE_CURRENT_0mA_20mA = 5, + AD5755_MODE_CURRENT_0mA_24mA = 6, +}; + +enum ad5755_dc_dc_phase { + AD5755_DC_DC_PHASE_ALL_SAME_EDGE = 0, + AD5755_DC_DC_PHASE_A_B_SAME_EDGE_C_D_OPP_EDGE = 1, + AD5755_DC_DC_PHASE_A_C_SAME_EDGE_B_D_OPP_EDGE = 2, + AD5755_DC_DC_PHASE_90_DEGREE = 3, +}; + +enum ad5755_dc_dc_freq { + AD5755_DC_DC_FREQ_250kHZ = 0, + AD5755_DC_DC_FREQ_410kHZ = 1, + AD5755_DC_DC_FREQ_650kHZ = 2, +}; + +enum ad5755_dc_dc_maxv { + AD5755_DC_DC_MAXV_23V = 0, + AD5755_DC_DC_MAXV_24V5 = 1, + AD5755_DC_DC_MAXV_27V = 2, + AD5755_DC_DC_MAXV_29V5 = 3, +}; + +enum ad5755_slew_rate { + AD5755_SLEW_RATE_64k = 0, + AD5755_SLEW_RATE_32k = 1, + AD5755_SLEW_RATE_16k = 2, + AD5755_SLEW_RATE_8k = 3, + AD5755_SLEW_RATE_4k = 4, + AD5755_SLEW_RATE_2k = 5, + AD5755_SLEW_RATE_1k = 6, + AD5755_SLEW_RATE_500 = 7, + AD5755_SLEW_RATE_250 = 8, + AD5755_SLEW_RATE_125 = 9, + AD5755_SLEW_RATE_64 = 10, + AD5755_SLEW_RATE_32 = 11, + AD5755_SLEW_RATE_16 = 12, + AD5755_SLEW_RATE_8 = 13, + AD5755_SLEW_RATE_4 = 14, + AD5755_SLEW_RATE_0_5 = 15, +}; + +enum ad5755_slew_step_size { + AD5755_SLEW_STEP_SIZE_1 = 0, + AD5755_SLEW_STEP_SIZE_2 = 1, + AD5755_SLEW_STEP_SIZE_4 = 2, + AD5755_SLEW_STEP_SIZE_8 = 3, + AD5755_SLEW_STEP_SIZE_16 = 4, + AD5755_SLEW_STEP_SIZE_32 = 5, + AD5755_SLEW_STEP_SIZE_64 = 6, + AD5755_SLEW_STEP_SIZE_128 = 7, + AD5755_SLEW_STEP_SIZE_256 = 8, +}; + +/** + * struct ad5755_platform_data - AD5755 DAC driver platform data + * @ext_dc_dc_compenstation_resistor: Whether an external DC-DC converter + * compensation register is used. + * @dc_dc_phase: DC-DC converter phase. + * @dc_dc_freq: DC-DC converter frequency. + * @dc_dc_maxv: DC-DC maximum allowed boost voltage. + * @dac.mode: The mode to be used for the DAC output. + * @dac.ext_current_sense_resistor: Whether an external current sense resistor + * is used. + * @dac.enable_voltage_overrange: Whether to enable 20% voltage output overrange. + * @dac.slew.enable: Whether to enable digital slew. + * @dac.slew.rate: Slew rate of the digital slew. + * @dac.slew.step_size: Slew step size of the digital slew. + **/ +struct ad5755_platform_data { + bool ext_dc_dc_compenstation_resistor; + enum ad5755_dc_dc_phase dc_dc_phase; + enum ad5755_dc_dc_freq dc_dc_freq; + enum ad5755_dc_dc_maxv dc_dc_maxv; + + struct { + enum ad5755_mode mode; + bool ext_current_sense_resistor; + bool enable_voltage_overrange; + struct { + bool enable; + enum ad5755_slew_rate rate; + enum ad5755_slew_step_size step_size; + } slew; + } dac[4]; +}; + +#endif diff --git a/include/linux/platform_data/ad5761.h b/include/linux/platform_data/ad5761.h new file mode 100644 index 000000000..7bd8ed7d9 --- /dev/null +++ b/include/linux/platform_data/ad5761.h @@ -0,0 +1,44 @@ +/* + * AD5721, AD5721R, AD5761, AD5761R, Voltage Output Digital to Analog Converter + * + * Copyright 2016 Qtechnology A/S + * 2016 Ricardo Ribalda + * + * Licensed under the GPL-2. + */ +#ifndef __LINUX_PLATFORM_DATA_AD5761_H__ +#define __LINUX_PLATFORM_DATA_AD5761_H__ + +/** + * enum ad5761_voltage_range - Voltage range the AD5761 is configured for. + * @AD5761_VOLTAGE_RANGE_M10V_10V: -10V to 10V + * @AD5761_VOLTAGE_RANGE_0V_10V: 0V to 10V + * @AD5761_VOLTAGE_RANGE_M5V_5V: -5V to 5V + * @AD5761_VOLTAGE_RANGE_0V_5V: 0V to 5V + * @AD5761_VOLTAGE_RANGE_M2V5_7V5: -2.5V to 7.5V + * @AD5761_VOLTAGE_RANGE_M3V_3V: -3V to 3V + * @AD5761_VOLTAGE_RANGE_0V_16V: 0V to 16V + * @AD5761_VOLTAGE_RANGE_0V_20V: 0V to 20V + */ + +enum ad5761_voltage_range { + AD5761_VOLTAGE_RANGE_M10V_10V, + AD5761_VOLTAGE_RANGE_0V_10V, + AD5761_VOLTAGE_RANGE_M5V_5V, + AD5761_VOLTAGE_RANGE_0V_5V, + AD5761_VOLTAGE_RANGE_M2V5_7V5, + AD5761_VOLTAGE_RANGE_M3V_3V, + AD5761_VOLTAGE_RANGE_0V_16V, + AD5761_VOLTAGE_RANGE_0V_20V, +}; + +/** + * struct ad5761_platform_data - AD5761 DAC driver platform data + * @voltage_range: Voltage range the AD5761 is configured for + */ + +struct ad5761_platform_data { + enum ad5761_voltage_range voltage_range; +}; + +#endif diff --git a/include/linux/platform_data/ad7266.h b/include/linux/platform_data/ad7266.h new file mode 100644 index 000000000..eabfdcb26 --- /dev/null +++ b/include/linux/platform_data/ad7266.h @@ -0,0 +1,54 @@ +/* + * AD7266/65 SPI ADC driver + * + * Copyright 2012 Analog Devices Inc. + * + * Licensed under the GPL-2. + */ + +#ifndef __IIO_ADC_AD7266_H__ +#define __IIO_ADC_AD7266_H__ + +/** + * enum ad7266_range - AD7266 reference voltage range + * @AD7266_RANGE_VREF: Device is configured for input range 0V - VREF + * (RANGE pin set to low) + * @AD7266_RANGE_2VREF: Device is configured for input range 0V - 2VREF + * (RANGE pin set to high) + */ +enum ad7266_range { + AD7266_RANGE_VREF, + AD7266_RANGE_2VREF, +}; + +/** + * enum ad7266_mode - AD7266 sample mode + * @AD7266_MODE_DIFF: Device is configured for full differential mode + * (SGL/DIFF pin set to low, AD0 pin set to low) + * @AD7266_MODE_PSEUDO_DIFF: Device is configured for pseudo differential mode + * (SGL/DIFF pin set to low, AD0 pin set to high) + * @AD7266_MODE_SINGLE_ENDED: Device is configured for single-ended mode + * (SGL/DIFF pin set to high) + */ +enum ad7266_mode { + AD7266_MODE_DIFF, + AD7266_MODE_PSEUDO_DIFF, + AD7266_MODE_SINGLE_ENDED, +}; + +/** + * struct ad7266_platform_data - Platform data for the AD7266 driver + * @range: Reference voltage range the device is configured for + * @mode: Sample mode the device is configured for + * @fixed_addr: Whether the address pins are hard-wired + * @addr_gpios: GPIOs used for controlling the address pins, only used if + * fixed_addr is set to false. + */ +struct ad7266_platform_data { + enum ad7266_range range; + enum ad7266_mode mode; + bool fixed_addr; + unsigned int addr_gpios[3]; +}; + +#endif diff --git a/include/linux/platform_data/ad7291.h b/include/linux/platform_data/ad7291.h new file mode 100644 index 000000000..b1fd1530c --- /dev/null +++ b/include/linux/platform_data/ad7291.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __IIO_AD7291_H__ +#define __IIO_AD7291_H__ + +/** + * struct ad7291_platform_data - AD7291 platform data + * @use_external_ref: Whether to use an external or internal reference voltage + */ +struct ad7291_platform_data { + bool use_external_ref; +}; + +#endif diff --git a/include/linux/platform_data/ad7298.h b/include/linux/platform_data/ad7298.h new file mode 100644 index 000000000..fbf8adf13 --- /dev/null +++ b/include/linux/platform_data/ad7298.h @@ -0,0 +1,20 @@ +/* + * AD7298 SPI ADC driver + * + * Copyright 2011 Analog Devices Inc. + * + * Licensed under the GPL-2. + */ + +#ifndef __LINUX_PLATFORM_DATA_AD7298_H__ +#define __LINUX_PLATFORM_DATA_AD7298_H__ + +/** + * struct ad7298_platform_data - Platform data for the ad7298 ADC driver + * @ext_ref: Whether to use an external reference voltage. + **/ +struct ad7298_platform_data { + bool ext_ref; +}; + +#endif /* IIO_ADC_AD7298_H_ */ diff --git a/include/linux/platform_data/ad7303.h b/include/linux/platform_data/ad7303.h new file mode 100644 index 000000000..de6a7a6b4 --- /dev/null +++ b/include/linux/platform_data/ad7303.h @@ -0,0 +1,21 @@ +/* + * Analog Devices AD7303 DAC driver + * + * Copyright 2013 Analog Devices Inc. + * + * Licensed under the GPL-2. + */ + +#ifndef __IIO_ADC_AD7303_H__ +#define __IIO_ADC_AD7303_H__ + +/** + * struct ad7303_platform_data - AD7303 platform data + * @use_external_ref: If set to true use an external voltage reference connected + * to the REF pin, otherwise use the internal reference derived from Vdd. + */ +struct ad7303_platform_data { + bool use_external_ref; +}; + +#endif diff --git a/include/linux/platform_data/ad7791.h b/include/linux/platform_data/ad7791.h new file mode 100644 index 000000000..cc7533049 --- /dev/null +++ b/include/linux/platform_data/ad7791.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_PLATFORM_DATA_AD7791__ +#define __LINUX_PLATFORM_DATA_AD7791__ + +/** + * struct ad7791_platform_data - AD7791 device platform data + * @buffered: If set to true configure the device for buffered input mode. + * @burnout_current: If set to true the 100mA burnout current is enabled. + * @unipolar: If set to true sample in unipolar mode, if set to false sample in + * bipolar mode. + */ +struct ad7791_platform_data { + bool buffered; + bool burnout_current; + bool unipolar; +}; + +#endif diff --git a/include/linux/platform_data/ad7793.h b/include/linux/platform_data/ad7793.h new file mode 100644 index 000000000..7ea6751aa --- /dev/null +++ b/include/linux/platform_data/ad7793.h @@ -0,0 +1,112 @@ +/* + * AD7792/AD7793 SPI ADC driver + * + * Copyright 2011 Analog Devices Inc. + * + * Licensed under the GPL-2. + */ +#ifndef __LINUX_PLATFORM_DATA_AD7793_H__ +#define __LINUX_PLATFORM_DATA_AD7793_H__ + +/** + * enum ad7793_clock_source - AD7793 clock source selection + * @AD7793_CLK_SRC_INT: Internal 64 kHz clock, not available at the CLK pin. + * @AD7793_CLK_SRC_INT_CO: Internal 64 kHz clock, available at the CLK pin. + * @AD7793_CLK_SRC_EXT: Use external clock. + * @AD7793_CLK_SRC_EXT_DIV2: Use external clock divided by 2. + */ +enum ad7793_clock_source { + AD7793_CLK_SRC_INT, + AD7793_CLK_SRC_INT_CO, + AD7793_CLK_SRC_EXT, + AD7793_CLK_SRC_EXT_DIV2, +}; + +/** + * enum ad7793_bias_voltage - AD7793 bias voltage selection + * @AD7793_BIAS_VOLTAGE_DISABLED: Bias voltage generator disabled + * @AD7793_BIAS_VOLTAGE_AIN1: Bias voltage connected to AIN1(-). + * @AD7793_BIAS_VOLTAGE_AIN2: Bias voltage connected to AIN2(-). + * @AD7793_BIAS_VOLTAGE_AIN3: Bias voltage connected to AIN3(-). + * Only valid for AD7795/AD7796. + */ +enum ad7793_bias_voltage { + AD7793_BIAS_VOLTAGE_DISABLED, + AD7793_BIAS_VOLTAGE_AIN1, + AD7793_BIAS_VOLTAGE_AIN2, + AD7793_BIAS_VOLTAGE_AIN3, +}; + +/** + * enum ad7793_refsel - AD7793 reference voltage selection + * @AD7793_REFSEL_REFIN1: External reference applied between REFIN1(+) + * and REFIN1(-). + * @AD7793_REFSEL_REFIN2: External reference applied between REFIN2(+) and + * and REFIN1(-). Only valid for AD7795/AD7796. + * @AD7793_REFSEL_INTERNAL: Internal 1.17 V reference. + */ +enum ad7793_refsel { + AD7793_REFSEL_REFIN1 = 0, + AD7793_REFSEL_REFIN2 = 1, + AD7793_REFSEL_INTERNAL = 2, +}; + +/** + * enum ad7793_current_source_direction - AD7793 excitation current direction + * @AD7793_IEXEC1_IOUT1_IEXEC2_IOUT2: Current source IEXC1 connected to pin + * IOUT1, current source IEXC2 connected to pin IOUT2. + * @AD7793_IEXEC1_IOUT2_IEXEC2_IOUT1: Current source IEXC2 connected to pin + * IOUT1, current source IEXC1 connected to pin IOUT2. + * @AD7793_IEXEC1_IEXEC2_IOUT1: Both current sources connected to pin IOUT1. + * Only valid when the current sources are set to 10 uA or 210 uA. + * @AD7793_IEXEC1_IEXEC2_IOUT2: Both current sources connected to Pin IOUT2. + * Only valid when the current ources are set to 10 uA or 210 uA. + */ +enum ad7793_current_source_direction { + AD7793_IEXEC1_IOUT1_IEXEC2_IOUT2 = 0, + AD7793_IEXEC1_IOUT2_IEXEC2_IOUT1 = 1, + AD7793_IEXEC1_IEXEC2_IOUT1 = 2, + AD7793_IEXEC1_IEXEC2_IOUT2 = 3, +}; + +/** + * enum ad7793_excitation_current - AD7793 excitation current selection + * @AD7793_IX_DISABLED: Excitation current Disabled. + * @AD7793_IX_10uA: Enable 10 micro-ampere excitation current. + * @AD7793_IX_210uA: Enable 210 micro-ampere excitation current. + * @AD7793_IX_1mA: Enable 1 milli-Ampere excitation current. + */ +enum ad7793_excitation_current { + AD7793_IX_DISABLED = 0, + AD7793_IX_10uA = 1, + AD7793_IX_210uA = 2, + AD7793_IX_1mA = 3, +}; + +/** + * struct ad7793_platform_data - AD7793 platform data + * @clock_src: Clock source selection + * @burnout_current: If set to true the 100nA burnout current is enabled. + * @boost_enable: Enable boost for the bias voltage generator. + * @buffered: If set to true configure the device for buffered input mode. + * @unipolar: If set to true sample in unipolar mode, if set to false sample in + * bipolar mode. + * @refsel: Reference voltage selection + * @bias_voltage: Bias voltage selection + * @exitation_current: Excitation current selection + * @current_source_direction: Excitation current direction selection + */ +struct ad7793_platform_data { + enum ad7793_clock_source clock_src; + bool burnout_current; + bool boost_enable; + bool buffered; + bool unipolar; + + enum ad7793_refsel refsel; + enum ad7793_bias_voltage bias_voltage; + enum ad7793_excitation_current exitation_current; + enum ad7793_current_source_direction current_source_direction; +}; + +#endif /* IIO_ADC_AD7793_H_ */ diff --git a/include/linux/platform_data/ad7879.h b/include/linux/platform_data/ad7879.h new file mode 100644 index 000000000..6655cc845 --- /dev/null +++ b/include/linux/platform_data/ad7879.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* linux/platform_data/ad7879.h */ + +/* Touchscreen characteristics vary between boards and models. The + * platform_data for the device's "struct device" holds this information. + * + * It's OK if the min/max values are zero. + */ +struct ad7879_platform_data { + u16 model; /* 7879 */ + u16 x_plate_ohms; + u16 x_min, x_max; + u16 y_min, y_max; + u16 pressure_min, pressure_max; + + bool swap_xy; /* swap x and y axes */ + + /* [0..255] 0=OFF Starts at 1=550us and goes + * all the way to 9.440ms in steps of 35us. + */ + u8 pen_down_acc_interval; + /* [0..15] Starts at 0=128us and goes all the + * way to 4.096ms in steps of 128us. + */ + u8 first_conversion_delay; + /* [0..3] 0 = 2us, 1 = 4us, 2 = 8us, 3 = 16us */ + u8 acquisition_time; + /* [0..3] Average X middle samples 0 = 2, 1 = 4, 2 = 8, 3 = 16 */ + u8 averaging; + /* [0..3] Perform X measurements 0 = OFF, + * 1 = 4, 2 = 8, 3 = 16 (median > averaging) + */ + u8 median; + /* 1 = AUX/VBAT/GPIO export GPIO to gpiolib + * requires CONFIG_GPIOLIB + */ + bool gpio_export; + /* identifies the first GPIO number handled by this chip; + * or, if negative, requests dynamic ID allocation. + */ + s32 gpio_base; +}; diff --git a/include/linux/platform_data/ad7887.h b/include/linux/platform_data/ad7887.h new file mode 100644 index 000000000..1e06eac31 --- /dev/null +++ b/include/linux/platform_data/ad7887.h @@ -0,0 +1,26 @@ +/* + * AD7887 SPI ADC driver + * + * Copyright 2010 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ +#ifndef IIO_ADC_AD7887_H_ +#define IIO_ADC_AD7887_H_ + +/** + * struct ad7887_platform_data - AD7887 ADC driver platform data + * @en_dual: Whether to use dual channel mode. If set to true AIN1 becomes the + * second input channel, and Vref is internally connected to Vdd. If set to + * false the device is used in single channel mode and AIN1/Vref is used as + * VREF input. + * @use_onchip_ref: Whether to use the onchip reference. If set to true the + * internal 2.5V reference is used. If set to false a external reference is + * used. + */ +struct ad7887_platform_data { + bool en_dual; + bool use_onchip_ref; +}; + +#endif /* IIO_ADC_AD7887_H_ */ diff --git a/include/linux/platform_data/adau17x1.h b/include/linux/platform_data/adau17x1.h new file mode 100644 index 000000000..9db1b905d --- /dev/null +++ b/include/linux/platform_data/adau17x1.h @@ -0,0 +1,109 @@ +/* + * Driver for ADAU1361/ADAU1461/ADAU1761/ADAU1961/ADAU1381/ADAU1781 codecs + * + * Copyright 2011-2014 Analog Devices Inc. + * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2 or later. + */ + +#ifndef __LINUX_PLATFORM_DATA_ADAU17X1_H__ +#define __LINUX_PLATFORM_DATA_ADAU17X1_H__ + +/** + * enum adau17x1_micbias_voltage - Microphone bias voltage + * @ADAU17X1_MICBIAS_0_90_AVDD: 0.9 * AVDD + * @ADAU17X1_MICBIAS_0_65_AVDD: 0.65 * AVDD + */ +enum adau17x1_micbias_voltage { + ADAU17X1_MICBIAS_0_90_AVDD = 0, + ADAU17X1_MICBIAS_0_65_AVDD = 1, +}; + +/** + * enum adau1761_digmic_jackdet_pin_mode - Configuration of the JACKDET/MICIN pin + * @ADAU1761_DIGMIC_JACKDET_PIN_MODE_NONE: Disable the pin + * @ADAU1761_DIGMIC_JACKDET_PIN_MODE_DIGMIC: Configure the pin for usage as + * digital microphone input. + * @ADAU1761_DIGMIC_JACKDET_PIN_MODE_JACKDETECT: Configure the pin for jack + * insertion detection. + */ +enum adau1761_digmic_jackdet_pin_mode { + ADAU1761_DIGMIC_JACKDET_PIN_MODE_NONE, + ADAU1761_DIGMIC_JACKDET_PIN_MODE_DIGMIC, + ADAU1761_DIGMIC_JACKDET_PIN_MODE_JACKDETECT, +}; + +/** + * adau1761_jackdetect_debounce_time - Jack insertion detection debounce time + * @ADAU1761_JACKDETECT_DEBOUNCE_5MS: 5 milliseconds + * @ADAU1761_JACKDETECT_DEBOUNCE_10MS: 10 milliseconds + * @ADAU1761_JACKDETECT_DEBOUNCE_20MS: 20 milliseconds + * @ADAU1761_JACKDETECT_DEBOUNCE_40MS: 40 milliseconds + */ +enum adau1761_jackdetect_debounce_time { + ADAU1761_JACKDETECT_DEBOUNCE_5MS = 0, + ADAU1761_JACKDETECT_DEBOUNCE_10MS = 1, + ADAU1761_JACKDETECT_DEBOUNCE_20MS = 2, + ADAU1761_JACKDETECT_DEBOUNCE_40MS = 3, +}; + +/** + * enum adau1761_output_mode - Output mode configuration + * @ADAU1761_OUTPUT_MODE_HEADPHONE: Headphone output + * @ADAU1761_OUTPUT_MODE_HEADPHONE_CAPLESS: Capless headphone output + * @ADAU1761_OUTPUT_MODE_LINE: Line output + */ +enum adau1761_output_mode { + ADAU1761_OUTPUT_MODE_HEADPHONE, + ADAU1761_OUTPUT_MODE_HEADPHONE_CAPLESS, + ADAU1761_OUTPUT_MODE_LINE, +}; + +/** + * struct adau1761_platform_data - ADAU1761 Codec driver platform data + * @input_differential: If true the input pins will be configured in + * differential mode. + * @lineout_mode: Output mode for the LOUT/ROUT pins + * @headphone_mode: Output mode for the LHP/RHP pins + * @digmic_jackdetect_pin_mode: JACKDET/MICIN pin configuration + * @jackdetect_debounce_time: Jack insertion detection debounce time. + * Note: This value will only be used, if the JACKDET/MICIN pin is configured + * for jack insertion detection. + * @jackdetect_active_low: If true the jack insertion detection is active low. + * Othwise it will be active high. + * @micbias_voltage: Microphone voltage bias + */ +struct adau1761_platform_data { + bool input_differential; + enum adau1761_output_mode lineout_mode; + enum adau1761_output_mode headphone_mode; + + enum adau1761_digmic_jackdet_pin_mode digmic_jackdetect_pin_mode; + + enum adau1761_jackdetect_debounce_time jackdetect_debounce_time; + bool jackdetect_active_low; + + enum adau17x1_micbias_voltage micbias_voltage; +}; + +/** + * struct adau1781_platform_data - ADAU1781 Codec driver platform data + * @left_input_differential: If true configure the left input as + * differential input. + * @right_input_differential: If true configure the right input as differntial + * input. + * @use_dmic: If true configure the MIC pins as digital microphone pins instead + * of analog microphone pins. + * @micbias_voltage: Microphone voltage bias + */ +struct adau1781_platform_data { + bool left_input_differential; + bool right_input_differential; + + bool use_dmic; + + enum adau17x1_micbias_voltage micbias_voltage; +}; + +#endif diff --git a/include/linux/platform_data/adau1977.h b/include/linux/platform_data/adau1977.h new file mode 100644 index 000000000..bed11d908 --- /dev/null +++ b/include/linux/platform_data/adau1977.h @@ -0,0 +1,45 @@ +/* + * ADAU1977/ADAU1978/ADAU1979 driver + * + * Copyright 2014 Analog Devices Inc. + * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2. + */ + +#ifndef __LINUX_PLATFORM_DATA_ADAU1977_H__ +#define __LINUX_PLATFORM_DATA_ADAU1977_H__ + +/** + * enum adau1977_micbias - ADAU1977 MICBIAS pin voltage setting + * @ADAU1977_MICBIAS_5V0: MICBIAS is set to 5.0 V + * @ADAU1977_MICBIAS_5V5: MICBIAS is set to 5.5 V + * @ADAU1977_MICBIAS_6V0: MICBIAS is set to 6.0 V + * @ADAU1977_MICBIAS_6V5: MICBIAS is set to 6.5 V + * @ADAU1977_MICBIAS_7V0: MICBIAS is set to 7.0 V + * @ADAU1977_MICBIAS_7V5: MICBIAS is set to 7.5 V + * @ADAU1977_MICBIAS_8V0: MICBIAS is set to 8.0 V + * @ADAU1977_MICBIAS_8V5: MICBIAS is set to 8.5 V + * @ADAU1977_MICBIAS_9V0: MICBIAS is set to 9.0 V + */ +enum adau1977_micbias { + ADAU1977_MICBIAS_5V0 = 0x0, + ADAU1977_MICBIAS_5V5 = 0x1, + ADAU1977_MICBIAS_6V0 = 0x2, + ADAU1977_MICBIAS_6V5 = 0x3, + ADAU1977_MICBIAS_7V0 = 0x4, + ADAU1977_MICBIAS_7V5 = 0x5, + ADAU1977_MICBIAS_8V0 = 0x6, + ADAU1977_MICBIAS_8V5 = 0x7, + ADAU1977_MICBIAS_9V0 = 0x8, +}; + +/** + * struct adau1977_platform_data - Platform configuration data for the ADAU1977 + * @micbias: Specifies the voltage for the MICBIAS pin + */ +struct adau1977_platform_data { + enum adau1977_micbias micbias; +}; + +#endif diff --git a/include/linux/platform_data/adp5588.h b/include/linux/platform_data/adp5588.h new file mode 100644 index 000000000..c2153049c --- /dev/null +++ b/include/linux/platform_data/adp5588.h @@ -0,0 +1,172 @@ +/* + * Analog Devices ADP5588 I/O Expander and QWERTY Keypad Controller + * + * Copyright 2009-2010 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef _ADP5588_H +#define _ADP5588_H + +#define DEV_ID 0x00 /* Device ID */ +#define CFG 0x01 /* Configuration Register1 */ +#define INT_STAT 0x02 /* Interrupt Status Register */ +#define KEY_LCK_EC_STAT 0x03 /* Key Lock and Event Counter Register */ +#define Key_EVENTA 0x04 /* Key Event Register A */ +#define Key_EVENTB 0x05 /* Key Event Register B */ +#define Key_EVENTC 0x06 /* Key Event Register C */ +#define Key_EVENTD 0x07 /* Key Event Register D */ +#define Key_EVENTE 0x08 /* Key Event Register E */ +#define Key_EVENTF 0x09 /* Key Event Register F */ +#define Key_EVENTG 0x0A /* Key Event Register G */ +#define Key_EVENTH 0x0B /* Key Event Register H */ +#define Key_EVENTI 0x0C /* Key Event Register I */ +#define Key_EVENTJ 0x0D /* Key Event Register J */ +#define KP_LCK_TMR 0x0E /* Keypad Lock1 to Lock2 Timer */ +#define UNLOCK1 0x0F /* Unlock Key1 */ +#define UNLOCK2 0x10 /* Unlock Key2 */ +#define GPIO_INT_STAT1 0x11 /* GPIO Interrupt Status */ +#define GPIO_INT_STAT2 0x12 /* GPIO Interrupt Status */ +#define GPIO_INT_STAT3 0x13 /* GPIO Interrupt Status */ +#define GPIO_DAT_STAT1 0x14 /* GPIO Data Status, Read twice to clear */ +#define GPIO_DAT_STAT2 0x15 /* GPIO Data Status, Read twice to clear */ +#define GPIO_DAT_STAT3 0x16 /* GPIO Data Status, Read twice to clear */ +#define GPIO_DAT_OUT1 0x17 /* GPIO DATA OUT */ +#define GPIO_DAT_OUT2 0x18 /* GPIO DATA OUT */ +#define GPIO_DAT_OUT3 0x19 /* GPIO DATA OUT */ +#define GPIO_INT_EN1 0x1A /* GPIO Interrupt Enable */ +#define GPIO_INT_EN2 0x1B /* GPIO Interrupt Enable */ +#define GPIO_INT_EN3 0x1C /* GPIO Interrupt Enable */ +#define KP_GPIO1 0x1D /* Keypad or GPIO Selection */ +#define KP_GPIO2 0x1E /* Keypad or GPIO Selection */ +#define KP_GPIO3 0x1F /* Keypad or GPIO Selection */ +#define GPI_EM1 0x20 /* GPI Event Mode 1 */ +#define GPI_EM2 0x21 /* GPI Event Mode 2 */ +#define GPI_EM3 0x22 /* GPI Event Mode 3 */ +#define GPIO_DIR1 0x23 /* GPIO Data Direction */ +#define GPIO_DIR2 0x24 /* GPIO Data Direction */ +#define GPIO_DIR3 0x25 /* GPIO Data Direction */ +#define GPIO_INT_LVL1 0x26 /* GPIO Edge/Level Detect */ +#define GPIO_INT_LVL2 0x27 /* GPIO Edge/Level Detect */ +#define GPIO_INT_LVL3 0x28 /* GPIO Edge/Level Detect */ +#define Debounce_DIS1 0x29 /* Debounce Disable */ +#define Debounce_DIS2 0x2A /* Debounce Disable */ +#define Debounce_DIS3 0x2B /* Debounce Disable */ +#define GPIO_PULL1 0x2C /* GPIO Pull Disable */ +#define GPIO_PULL2 0x2D /* GPIO Pull Disable */ +#define GPIO_PULL3 0x2E /* GPIO Pull Disable */ +#define CMP_CFG_STAT 0x30 /* Comparator Configuration and Status Register */ +#define CMP_CONFG_SENS1 0x31 /* Sensor1 Comparator Configuration Register */ +#define CMP_CONFG_SENS2 0x32 /* L2 Light Sensor Reference Level, Output Falling for Sensor 1 */ +#define CMP1_LVL2_TRIP 0x33 /* L2 Light Sensor Hysteresis (Active when Output Rising) for Sensor 1 */ +#define CMP1_LVL2_HYS 0x34 /* L3 Light Sensor Reference Level, Output Falling For Sensor 1 */ +#define CMP1_LVL3_TRIP 0x35 /* L3 Light Sensor Hysteresis (Active when Output Rising) For Sensor 1 */ +#define CMP1_LVL3_HYS 0x36 /* Sensor 2 Comparator Configuration Register */ +#define CMP2_LVL2_TRIP 0x37 /* L2 Light Sensor Reference Level, Output Falling for Sensor 2 */ +#define CMP2_LVL2_HYS 0x38 /* L2 Light Sensor Hysteresis (Active when Output Rising) for Sensor 2 */ +#define CMP2_LVL3_TRIP 0x39 /* L3 Light Sensor Reference Level, Output Falling For Sensor 2 */ +#define CMP2_LVL3_HYS 0x3A /* L3 Light Sensor Hysteresis (Active when Output Rising) For Sensor 2 */ +#define CMP1_ADC_DAT_R1 0x3B /* Comparator 1 ADC data Register1 */ +#define CMP1_ADC_DAT_R2 0x3C /* Comparator 1 ADC data Register2 */ +#define CMP2_ADC_DAT_R1 0x3D /* Comparator 2 ADC data Register1 */ +#define CMP2_ADC_DAT_R2 0x3E /* Comparator 2 ADC data Register2 */ + +#define ADP5588_DEVICE_ID_MASK 0xF + + /* Configuration Register1 */ +#define ADP5588_AUTO_INC (1 << 7) +#define ADP5588_GPIEM_CFG (1 << 6) +#define ADP5588_OVR_FLOW_M (1 << 5) +#define ADP5588_INT_CFG (1 << 4) +#define ADP5588_OVR_FLOW_IEN (1 << 3) +#define ADP5588_K_LCK_IM (1 << 2) +#define ADP5588_GPI_IEN (1 << 1) +#define ADP5588_KE_IEN (1 << 0) + +/* Interrupt Status Register */ +#define ADP5588_CMP2_INT (1 << 5) +#define ADP5588_CMP1_INT (1 << 4) +#define ADP5588_OVR_FLOW_INT (1 << 3) +#define ADP5588_K_LCK_INT (1 << 2) +#define ADP5588_GPI_INT (1 << 1) +#define ADP5588_KE_INT (1 << 0) + +/* Key Lock and Event Counter Register */ +#define ADP5588_K_LCK_EN (1 << 6) +#define ADP5588_LCK21 0x30 +#define ADP5588_KEC 0xF + +#define ADP5588_MAXGPIO 18 +#define ADP5588_BANK(offs) ((offs) >> 3) +#define ADP5588_BIT(offs) (1u << ((offs) & 0x7)) + +/* Put one of these structures in i2c_board_info platform_data */ + +#define ADP5588_KEYMAPSIZE 80 + +#define GPI_PIN_ROW0 97 +#define GPI_PIN_ROW1 98 +#define GPI_PIN_ROW2 99 +#define GPI_PIN_ROW3 100 +#define GPI_PIN_ROW4 101 +#define GPI_PIN_ROW5 102 +#define GPI_PIN_ROW6 103 +#define GPI_PIN_ROW7 104 +#define GPI_PIN_COL0 105 +#define GPI_PIN_COL1 106 +#define GPI_PIN_COL2 107 +#define GPI_PIN_COL3 108 +#define GPI_PIN_COL4 109 +#define GPI_PIN_COL5 110 +#define GPI_PIN_COL6 111 +#define GPI_PIN_COL7 112 +#define GPI_PIN_COL8 113 +#define GPI_PIN_COL9 114 + +#define GPI_PIN_ROW_BASE GPI_PIN_ROW0 +#define GPI_PIN_ROW_END GPI_PIN_ROW7 +#define GPI_PIN_COL_BASE GPI_PIN_COL0 +#define GPI_PIN_COL_END GPI_PIN_COL9 + +#define GPI_PIN_BASE GPI_PIN_ROW_BASE +#define GPI_PIN_END GPI_PIN_COL_END + +#define ADP5588_GPIMAPSIZE_MAX (GPI_PIN_END - GPI_PIN_BASE + 1) + +struct adp5588_gpi_map { + unsigned short pin; + unsigned short sw_evt; +}; + +struct adp5588_kpad_platform_data { + int rows; /* Number of rows */ + int cols; /* Number of columns */ + const unsigned short *keymap; /* Pointer to keymap */ + unsigned short keymapsize; /* Keymap size */ + unsigned repeat:1; /* Enable key repeat */ + unsigned en_keylock:1; /* Enable Key Lock feature */ + unsigned short unlock_key1; /* Unlock Key 1 */ + unsigned short unlock_key2; /* Unlock Key 2 */ + const struct adp5588_gpi_map *gpimap; + unsigned short gpimapsize; + const struct adp5588_gpio_platform_data *gpio_data; +}; + +struct i2c_client; /* forward declaration */ + +struct adp5588_gpio_platform_data { + int gpio_start; /* GPIO Chip base # */ + const char *const *names; + unsigned irq_base; /* interrupt base # */ + unsigned pullup_dis_mask; /* Pull-Up Disable Mask */ + int (*setup)(struct i2c_client *client, + unsigned gpio, unsigned ngpio, + void *context); + int (*teardown)(struct i2c_client *client, + unsigned gpio, unsigned ngpio, + void *context); + void *context; +}; + +#endif diff --git a/include/linux/platform_data/adp8860.h b/include/linux/platform_data/adp8860.h new file mode 100644 index 000000000..0b4d39855 --- /dev/null +++ b/include/linux/platform_data/adp8860.h @@ -0,0 +1,154 @@ +/* + * Definitions and platform data for Analog Devices + * Backlight drivers ADP8860 + * + * Copyright 2009-2010 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef __LINUX_I2C_ADP8860_H +#define __LINUX_I2C_ADP8860_H + +#include +#include + +#define ID_ADP8860 8860 + +#define ADP8860_MAX_BRIGHTNESS 0x7F +#define FLAG_OFFT_SHIFT 8 + +/* + * LEDs subdevice platform data + */ + +#define ADP8860_LED_DIS_BLINK (0 << FLAG_OFFT_SHIFT) +#define ADP8860_LED_OFFT_600ms (1 << FLAG_OFFT_SHIFT) +#define ADP8860_LED_OFFT_1200ms (2 << FLAG_OFFT_SHIFT) +#define ADP8860_LED_OFFT_1800ms (3 << FLAG_OFFT_SHIFT) + +#define ADP8860_LED_ONT_200ms 0 +#define ADP8860_LED_ONT_600ms 1 +#define ADP8860_LED_ONT_800ms 2 +#define ADP8860_LED_ONT_1200ms 3 + +#define ADP8860_LED_D7 (7) +#define ADP8860_LED_D6 (6) +#define ADP8860_LED_D5 (5) +#define ADP8860_LED_D4 (4) +#define ADP8860_LED_D3 (3) +#define ADP8860_LED_D2 (2) +#define ADP8860_LED_D1 (1) + +/* + * Backlight subdevice platform data + */ + +#define ADP8860_BL_D7 (1 << 6) +#define ADP8860_BL_D6 (1 << 5) +#define ADP8860_BL_D5 (1 << 4) +#define ADP8860_BL_D4 (1 << 3) +#define ADP8860_BL_D3 (1 << 2) +#define ADP8860_BL_D2 (1 << 1) +#define ADP8860_BL_D1 (1 << 0) + +#define ADP8860_FADE_T_DIS 0 /* Fade Timer Disabled */ +#define ADP8860_FADE_T_300ms 1 /* 0.3 Sec */ +#define ADP8860_FADE_T_600ms 2 +#define ADP8860_FADE_T_900ms 3 +#define ADP8860_FADE_T_1200ms 4 +#define ADP8860_FADE_T_1500ms 5 +#define ADP8860_FADE_T_1800ms 6 +#define ADP8860_FADE_T_2100ms 7 +#define ADP8860_FADE_T_2400ms 8 +#define ADP8860_FADE_T_2700ms 9 +#define ADP8860_FADE_T_3000ms 10 +#define ADP8860_FADE_T_3500ms 11 +#define ADP8860_FADE_T_4000ms 12 +#define ADP8860_FADE_T_4500ms 13 +#define ADP8860_FADE_T_5000ms 14 +#define ADP8860_FADE_T_5500ms 15 /* 5.5 Sec */ + +#define ADP8860_FADE_LAW_LINEAR 0 +#define ADP8860_FADE_LAW_SQUARE 1 +#define ADP8860_FADE_LAW_CUBIC1 2 +#define ADP8860_FADE_LAW_CUBIC2 3 + +#define ADP8860_BL_AMBL_FILT_80ms 0 /* Light sensor filter time */ +#define ADP8860_BL_AMBL_FILT_160ms 1 +#define ADP8860_BL_AMBL_FILT_320ms 2 +#define ADP8860_BL_AMBL_FILT_640ms 3 +#define ADP8860_BL_AMBL_FILT_1280ms 4 +#define ADP8860_BL_AMBL_FILT_2560ms 5 +#define ADP8860_BL_AMBL_FILT_5120ms 6 +#define ADP8860_BL_AMBL_FILT_10240ms 7 /* 10.24 sec */ + +/* + * Blacklight current 0..30mA + */ +#define ADP8860_BL_CUR_mA(I) ((I * 127) / 30) + +/* + * L2 comparator current 0..1106uA + */ +#define ADP8860_L2_COMP_CURR_uA(I) ((I * 255) / 1106) + +/* + * L3 comparator current 0..138uA + */ +#define ADP8860_L3_COMP_CURR_uA(I) ((I * 255) / 138) + +struct adp8860_backlight_platform_data { + u8 bl_led_assign; /* 1 = Backlight 0 = Individual LED */ + + u8 bl_fade_in; /* Backlight Fade-In Timer */ + u8 bl_fade_out; /* Backlight Fade-Out Timer */ + u8 bl_fade_law; /* fade-on/fade-off transfer characteristic */ + + u8 en_ambl_sens; /* 1 = enable ambient light sensor */ + u8 abml_filt; /* Light sensor filter time */ + + u8 l1_daylight_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l1_daylight_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l2_office_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l2_office_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l3_dark_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l3_dark_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + + u8 l2_trip; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */ + u8 l2_hyst; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */ + u8 l3_trip; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */ + u8 l3_hyst; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */ + + /** + * Independent Current Sinks / LEDS + * Sinks not assigned to the Backlight can be exposed to + * user space using the LEDS CLASS interface + */ + + int num_leds; + struct led_info *leds; + u8 led_fade_in; /* LED Fade-In Timer */ + u8 led_fade_out; /* LED Fade-Out Timer */ + u8 led_fade_law; /* fade-on/fade-off transfer characteristic */ + u8 led_on_time; + + /** + * Gain down disable. Setting this option does not allow the + * charge pump to switch to lower gains. NOT AVAILABLE on ADP8860 + * 1 = the charge pump doesn't switch down in gain until all LEDs are 0. + * The charge pump switches up in gain as needed. This feature is + * useful if the ADP8863 charge pump is used to drive an external load. + * This feature must be used when utilizing small fly capacitors + * (0402 or smaller). + * 0 = the charge pump automatically switches up and down in gain. + * This provides optimal efficiency, but is not suitable for driving + * loads that are not connected through the ADP8863 diode drivers. + * Additionally, the charge pump fly capacitors should be low ESR + * and sized 0603 or greater. + */ + + u8 gdwn_dis; +}; + +#endif /* __LINUX_I2C_ADP8860_H */ diff --git a/include/linux/platform_data/adp8870.h b/include/linux/platform_data/adp8870.h new file mode 100644 index 000000000..624dceccb --- /dev/null +++ b/include/linux/platform_data/adp8870.h @@ -0,0 +1,153 @@ +/* + * Definitions and platform data for Analog Devices + * Backlight drivers ADP8870 + * + * Copyright 2009-2010 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef __LINUX_I2C_ADP8870_H +#define __LINUX_I2C_ADP8870_H + +#define ID_ADP8870 8870 + +#define ADP8870_MAX_BRIGHTNESS 0x7F +#define FLAG_OFFT_SHIFT 8 + +/* + * LEDs subdevice platform data + */ + +#define ADP8870_LED_DIS_BLINK (0 << FLAG_OFFT_SHIFT) +#define ADP8870_LED_OFFT_600ms (1 << FLAG_OFFT_SHIFT) +#define ADP8870_LED_OFFT_1200ms (2 << FLAG_OFFT_SHIFT) +#define ADP8870_LED_OFFT_1800ms (3 << FLAG_OFFT_SHIFT) + +#define ADP8870_LED_ONT_200ms 0 +#define ADP8870_LED_ONT_600ms 1 +#define ADP8870_LED_ONT_800ms 2 +#define ADP8870_LED_ONT_1200ms 3 + +#define ADP8870_LED_D7 (7) +#define ADP8870_LED_D6 (6) +#define ADP8870_LED_D5 (5) +#define ADP8870_LED_D4 (4) +#define ADP8870_LED_D3 (3) +#define ADP8870_LED_D2 (2) +#define ADP8870_LED_D1 (1) + +/* + * Backlight subdevice platform data + */ + +#define ADP8870_BL_D7 (1 << 6) +#define ADP8870_BL_D6 (1 << 5) +#define ADP8870_BL_D5 (1 << 4) +#define ADP8870_BL_D4 (1 << 3) +#define ADP8870_BL_D3 (1 << 2) +#define ADP8870_BL_D2 (1 << 1) +#define ADP8870_BL_D1 (1 << 0) + +#define ADP8870_FADE_T_DIS 0 /* Fade Timer Disabled */ +#define ADP8870_FADE_T_300ms 1 /* 0.3 Sec */ +#define ADP8870_FADE_T_600ms 2 +#define ADP8870_FADE_T_900ms 3 +#define ADP8870_FADE_T_1200ms 4 +#define ADP8870_FADE_T_1500ms 5 +#define ADP8870_FADE_T_1800ms 6 +#define ADP8870_FADE_T_2100ms 7 +#define ADP8870_FADE_T_2400ms 8 +#define ADP8870_FADE_T_2700ms 9 +#define ADP8870_FADE_T_3000ms 10 +#define ADP8870_FADE_T_3500ms 11 +#define ADP8870_FADE_T_4000ms 12 +#define ADP8870_FADE_T_4500ms 13 +#define ADP8870_FADE_T_5000ms 14 +#define ADP8870_FADE_T_5500ms 15 /* 5.5 Sec */ + +#define ADP8870_FADE_LAW_LINEAR 0 +#define ADP8870_FADE_LAW_SQUARE 1 +#define ADP8870_FADE_LAW_CUBIC1 2 +#define ADP8870_FADE_LAW_CUBIC2 3 + +#define ADP8870_BL_AMBL_FILT_80ms 0 /* Light sensor filter time */ +#define ADP8870_BL_AMBL_FILT_160ms 1 +#define ADP8870_BL_AMBL_FILT_320ms 2 +#define ADP8870_BL_AMBL_FILT_640ms 3 +#define ADP8870_BL_AMBL_FILT_1280ms 4 +#define ADP8870_BL_AMBL_FILT_2560ms 5 +#define ADP8870_BL_AMBL_FILT_5120ms 6 +#define ADP8870_BL_AMBL_FILT_10240ms 7 /* 10.24 sec */ + +/* + * Blacklight current 0..30mA + */ +#define ADP8870_BL_CUR_mA(I) ((I * 127) / 30) + +/* + * L2 comparator current 0..1106uA + */ +#define ADP8870_L2_COMP_CURR_uA(I) ((I * 255) / 1106) + +/* + * L3 comparator current 0..551uA + */ +#define ADP8870_L3_COMP_CURR_uA(I) ((I * 255) / 551) + +/* + * L4 comparator current 0..275uA + */ +#define ADP8870_L4_COMP_CURR_uA(I) ((I * 255) / 275) + +/* + * L5 comparator current 0..138uA + */ +#define ADP8870_L5_COMP_CURR_uA(I) ((I * 255) / 138) + +struct adp8870_backlight_platform_data { + u8 bl_led_assign; /* 1 = Backlight 0 = Individual LED */ + u8 pwm_assign; /* 1 = Enables PWM mode */ + + u8 bl_fade_in; /* Backlight Fade-In Timer */ + u8 bl_fade_out; /* Backlight Fade-Out Timer */ + u8 bl_fade_law; /* fade-on/fade-off transfer characteristic */ + + u8 en_ambl_sens; /* 1 = enable ambient light sensor */ + u8 abml_filt; /* Light sensor filter time */ + + u8 l1_daylight_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l1_daylight_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l2_bright_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l2_bright_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l3_office_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l3_office_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l4_indoor_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l4_indor_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l5_dark_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l5_dark_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + + u8 l2_trip; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */ + u8 l2_hyst; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */ + u8 l3_trip; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */ + u8 l3_hyst; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */ + u8 l4_trip; /* use L4_COMP_CURR_uA(I) 0 <= I <= 275 uA */ + u8 l4_hyst; /* use L4_COMP_CURR_uA(I) 0 <= I <= 275 uA */ + u8 l5_trip; /* use L5_COMP_CURR_uA(I) 0 <= I <= 138 uA */ + u8 l5_hyst; /* use L6_COMP_CURR_uA(I) 0 <= I <= 138 uA */ + + /** + * Independent Current Sinks / LEDS + * Sinks not assigned to the Backlight can be exposed to + * user space using the LEDS CLASS interface + */ + + int num_leds; + struct led_info *leds; + u8 led_fade_in; /* LED Fade-In Timer */ + u8 led_fade_out; /* LED Fade-Out Timer */ + u8 led_fade_law; /* fade-on/fade-off transfer characteristic */ + u8 led_on_time; +}; + +#endif /* __LINUX_I2C_ADP8870_H */ diff --git a/include/linux/platform_data/ads1015.h b/include/linux/platform_data/ads1015.h new file mode 100644 index 000000000..d5aa2a045 --- /dev/null +++ b/include/linux/platform_data/ads1015.h @@ -0,0 +1,36 @@ +/* + * Platform Data for ADS1015 12-bit 4-input ADC + * (C) Copyright 2010 + * Dirk Eibach, Guntermann & Drunck GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef LINUX_ADS1015_H +#define LINUX_ADS1015_H + +#define ADS1015_CHANNELS 8 + +struct ads1015_channel_data { + bool enabled; + unsigned int pga; + unsigned int data_rate; +}; + +struct ads1015_platform_data { + struct ads1015_channel_data channel_data[ADS1015_CHANNELS]; +}; + +#endif /* LINUX_ADS1015_H */ diff --git a/include/linux/platform_data/ads7828.h b/include/linux/platform_data/ads7828.h new file mode 100644 index 000000000..3245f45f9 --- /dev/null +++ b/include/linux/platform_data/ads7828.h @@ -0,0 +1,29 @@ +/* + * TI ADS7828 A/D Converter platform data definition + * + * Copyright (c) 2012 Savoir-faire Linux Inc. + * Vivien Didelot + * + * For further information, see the Documentation/hwmon/ads7828 file. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _PDATA_ADS7828_H +#define _PDATA_ADS7828_H + +/** + * struct ads7828_platform_data - optional ADS7828 connectivity info + * @diff_input: Differential input mode. + * @ext_vref: Use an external voltage reference. + * @vref_mv: Voltage reference value, if external. + */ +struct ads7828_platform_data { + bool diff_input; + bool ext_vref; + unsigned int vref_mv; +}; + +#endif /* _PDATA_ADS7828_H */ diff --git a/include/linux/platform_data/ams-delta-fiq.h b/include/linux/platform_data/ams-delta-fiq.h new file mode 100644 index 000000000..cf4589ccb --- /dev/null +++ b/include/linux/platform_data/ams-delta-fiq.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * include/linux/platform_data/ams-delta-fiq.h + * + * Taken from the original Amstrad modifications to fiq.h + * + * Copyright (c) 2004 Amstrad Plc + * Copyright (c) 2006 Matt Callow + * Copyright (c) 2010 Janusz Krzysztofik + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __LINUX_PLATFORM_DATA_AMS_DELTA_FIQ_H +#define __LINUX_PLATFORM_DATA_AMS_DELTA_FIQ_H + +/* + * These are the offsets from the beginning of the fiq_buffer. They are put here + * since the buffer and header need to be accessed by drivers servicing devices + * which generate GPIO interrupts - e.g. keyboard, modem, hook switch. + */ +#define FIQ_MASK 0 +#define FIQ_STATE 1 +#define FIQ_KEYS_CNT 2 +#define FIQ_TAIL_OFFSET 3 +#define FIQ_HEAD_OFFSET 4 +#define FIQ_BUF_LEN 5 +#define FIQ_KEY 6 +#define FIQ_MISSED_KEYS 7 +#define FIQ_BUFFER_START 8 +#define FIQ_GPIO_INT_MASK 9 +#define FIQ_KEYS_HICNT 10 +#define FIQ_IRQ_PEND 11 +#define FIQ_SIR_CODE_L1 12 +#define IRQ_SIR_CODE_L2 13 + +#define FIQ_CNT_INT_00 14 +#define FIQ_CNT_INT_KEY 15 +#define FIQ_CNT_INT_MDM 16 +#define FIQ_CNT_INT_03 17 +#define FIQ_CNT_INT_HSW 18 +#define FIQ_CNT_INT_05 19 +#define FIQ_CNT_INT_06 20 +#define FIQ_CNT_INT_07 21 +#define FIQ_CNT_INT_08 22 +#define FIQ_CNT_INT_09 23 +#define FIQ_CNT_INT_10 24 +#define FIQ_CNT_INT_11 25 +#define FIQ_CNT_INT_12 26 +#define FIQ_CNT_INT_13 27 +#define FIQ_CNT_INT_14 28 +#define FIQ_CNT_INT_15 29 + +#define FIQ_CIRC_BUFF 30 /*Start of circular buffer */ + +#endif diff --git a/include/linux/platform_data/apds990x.h b/include/linux/platform_data/apds990x.h new file mode 100644 index 000000000..d186fcc5d --- /dev/null +++ b/include/linux/platform_data/apds990x.h @@ -0,0 +1,79 @@ +/* + * This file is part of the APDS990x sensor driver. + * Chip is combined proximity and ambient light sensor. + * + * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies). + * + * Contact: Samu Onkalo + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __APDS990X_H__ +#define __APDS990X_H__ + + +#define APDS_IRLED_CURR_12mA 0x3 +#define APDS_IRLED_CURR_25mA 0x2 +#define APDS_IRLED_CURR_50mA 0x1 +#define APDS_IRLED_CURR_100mA 0x0 + +/** + * struct apds990x_chip_factors - defines effect of the cover window + * @ga: Total glass attenuation + * @cf1: clear channel factor 1 for raw to lux conversion + * @irf1: IR channel factor 1 for raw to lux conversion + * @cf2: clear channel factor 2 for raw to lux conversion + * @irf2: IR channel factor 2 for raw to lux conversion + * @df: device factor for conversion formulas + * + * Structure for tuning ALS calculation to match with environment. + * Values depend on the material above the sensor and the sensor + * itself. If the GA is zero, driver will use uncovered sensor default values + * format: decimal value * APDS_PARAM_SCALE except df which is plain integer. + */ +#define APDS_PARAM_SCALE 4096 +struct apds990x_chip_factors { + int ga; + int cf1; + int irf1; + int cf2; + int irf2; + int df; +}; + +/** + * struct apds990x_platform_data - platform data for apsd990x.c driver + * @cf: chip factor data + * @pddrive: IR-led driving current + * @ppcount: number of IR pulses used for proximity estimation + * @setup_resources: interrupt line setup call back function + * @release_resources: interrupt line release call back function + * + * Proximity detection result depends heavily on correct ppcount, pdrive + * and cover window. + * + */ + +struct apds990x_platform_data { + struct apds990x_chip_factors cf; + u8 pdrive; + u8 ppcount; + int (*setup_resources)(void); + int (*release_resources)(void); +}; + +#endif diff --git a/include/linux/platform_data/arm-ux500-pm.h b/include/linux/platform_data/arm-ux500-pm.h new file mode 100644 index 000000000..8dff64b29 --- /dev/null +++ b/include/linux/platform_data/arm-ux500-pm.h @@ -0,0 +1,21 @@ +/* + * Copyright (C) ST-Ericsson SA 2010-2013 + * Author: Rickard Andersson for + * ST-Ericsson. + * Author: Daniel Lezcano for Linaro. + * License terms: GNU General Public License (GPL) version 2 + * + */ + +#ifndef ARM_UX500_PM_H +#define ARM_UX500_PM_H + +int prcmu_gic_decouple(void); +int prcmu_gic_recouple(void); +bool prcmu_gic_pending_irq(void); +bool prcmu_pending_irq(void); +bool prcmu_is_cpu_in_wfi(int cpu); +int prcmu_copy_gic_settings(void); +void ux500_pm_init(u32 phy_base, u32 size); + +#endif /* ARM_UX500_PM_H */ diff --git a/include/linux/platform_data/asoc-imx-ssi.h b/include/linux/platform_data/asoc-imx-ssi.h new file mode 100644 index 000000000..902851aeb --- /dev/null +++ b/include/linux/platform_data/asoc-imx-ssi.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __MACH_SSI_H +#define __MACH_SSI_H + +struct snd_ac97; + +extern unsigned char imx_ssi_fiq_start, imx_ssi_fiq_end; +extern unsigned long imx_ssi_fiq_base, imx_ssi_fiq_tx_buffer, imx_ssi_fiq_rx_buffer; + +struct imx_ssi_platform_data { + unsigned int flags; +#define IMX_SSI_DMA (1 << 0) +#define IMX_SSI_USE_AC97 (1 << 1) +#define IMX_SSI_NET (1 << 2) +#define IMX_SSI_SYN (1 << 3) +#define IMX_SSI_USE_I2S_SLAVE (1 << 4) + void (*ac97_reset) (struct snd_ac97 *ac97); + void (*ac97_warm_reset)(struct snd_ac97 *ac97); +}; + +extern int mxc_set_irq_fiq(unsigned int irq, unsigned int type); + +#endif /* __MACH_SSI_H */ + diff --git a/include/linux/platform_data/asoc-kirkwood.h b/include/linux/platform_data/asoc-kirkwood.h new file mode 100644 index 000000000..d442cefa3 --- /dev/null +++ b/include/linux/platform_data/asoc-kirkwood.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __PLAT_AUDIO_H +#define __PLAT_AUDIO_H + +struct kirkwood_asoc_platform_data { + int burst; +}; +#endif diff --git a/include/linux/platform_data/asoc-mx27vis.h b/include/linux/platform_data/asoc-mx27vis.h new file mode 100644 index 000000000..2107d0d99 --- /dev/null +++ b/include/linux/platform_data/asoc-mx27vis.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __PLATFORM_DATA_ASOC_MX27VIS_H +#define __PLATFORM_DATA_ASOC_MX27VIS_H + +struct snd_mx27vis_platform_data { + int amp_gain0_gpio; + int amp_gain1_gpio; + int amp_mutel_gpio; + int amp_muter_gpio; +}; + +#endif /* __PLATFORM_DATA_ASOC_MX27VIS_H */ diff --git a/include/linux/platform_data/asoc-palm27x.h b/include/linux/platform_data/asoc-palm27x.h new file mode 100644 index 000000000..22b69a393 --- /dev/null +++ b/include/linux/platform_data/asoc-palm27x.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _INCLUDE_PALMASOC_H_ +#define _INCLUDE_PALMASOC_H_ + +struct palm27x_asoc_info { + int jack_gpio; +}; + +#endif diff --git a/include/linux/platform_data/asoc-s3c.h b/include/linux/platform_data/asoc-s3c.h new file mode 100644 index 000000000..90641a5da --- /dev/null +++ b/include/linux/platform_data/asoc-s3c.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2009 Samsung Electronics Co. Ltd + * Author: Jaswinder Singh + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* The machine init code calls s3c*_ac97_setup_gpio with + * one of these defines in order to select appropriate bank + * of GPIO for AC97 pins + */ +#define S3C64XX_AC97_GPD 0 +#define S3C64XX_AC97_GPE 1 + +#include + +extern void s3c64xx_ac97_setup_gpio(int); + +struct samsung_i2s_type { +/* If the Primary DAI has 5.1 Channels */ +#define QUIRK_PRI_6CHAN (1 << 0) +/* If the I2S block has a Stereo Overlay Channel */ +#define QUIRK_SEC_DAI (1 << 1) +/* + * If the I2S block has no internal prescalar or MUX (I2SMOD[10] bit) + * The Machine driver must provide suitably set clock to the I2S block. + */ +#define QUIRK_NO_MUXPSR (1 << 2) +#define QUIRK_NEED_RSTCLR (1 << 3) +#define QUIRK_SUPPORTS_TDM (1 << 4) +#define QUIRK_SUPPORTS_IDMA (1 << 5) + /* Quirks of the I2S controller */ + u32 quirks; + dma_addr_t idma_addr; +}; + +/** + * struct s3c_audio_pdata - common platform data for audio device drivers + * @cfg_gpio: Callback function to setup mux'ed pins in I2S/PCM/AC97 mode + */ +struct s3c_audio_pdata { + int (*cfg_gpio)(struct platform_device *); + dma_filter_fn dma_filter; + void *dma_playback; + void *dma_capture; + void *dma_play_sec; + void *dma_capture_mic; + struct samsung_i2s_type type; +}; diff --git a/include/linux/platform_data/asoc-s3c24xx_simtec.h b/include/linux/platform_data/asoc-s3c24xx_simtec.h new file mode 100644 index 000000000..d220e5412 --- /dev/null +++ b/include/linux/platform_data/asoc-s3c24xx_simtec.h @@ -0,0 +1,33 @@ +/* + * Copyright 2008 Simtec Electronics + * http://armlinux.simtec.co.uk/ + * Ben Dooks + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Simtec Audio support. +*/ + +/** + * struct s3c24xx_audio_simtec_pdata - platform data for simtec audio + * @use_mpllin: Select codec clock from MPLLin + * @output_cdclk: Need to output CDCLK to the codec + * @have_mic: Set if we have a MIC socket + * @have_lout: Set if we have a LineOut socket + * @amp_gpio: GPIO pin to enable the AMP + * @amp_gain: Option GPIO to control AMP gain + */ +struct s3c24xx_audio_simtec_pdata { + unsigned int use_mpllin:1; + unsigned int output_cdclk:1; + + unsigned int have_mic:1; + unsigned int have_lout:1; + + int amp_gpio; + int amp_gain[2]; + + void (*startup)(void); +}; diff --git a/include/linux/platform_data/asoc-ti-mcbsp.h b/include/linux/platform_data/asoc-ti-mcbsp.h new file mode 100644 index 000000000..e319d0a2e --- /dev/null +++ b/include/linux/platform_data/asoc-ti-mcbsp.h @@ -0,0 +1,48 @@ +/* + * Defines for Multi-Channel Buffered Serial Port + * + * Copyright (C) 2002 RidgeRun, Inc. + * Author: Steve Johnson + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ +#ifndef __ASOC_TI_MCBSP_H +#define __ASOC_TI_MCBSP_H + +#include +#include + +/* Platform specific configuration */ +struct omap_mcbsp_ops { + void (*request)(unsigned int); + void (*free)(unsigned int); +}; + +struct omap_mcbsp_platform_data { + struct omap_mcbsp_ops *ops; + u16 buffer_size; + u8 reg_size; + u8 reg_step; + + /* McBSP platform and instance specific features */ + bool has_wakeup; /* Wakeup capability */ + bool has_ccr; /* Transceiver has configuration control registers */ + int (*force_ick_on)(struct clk *clk, bool force_on); +}; + +void omap3_mcbsp_init_pdata_callback(struct omap_mcbsp_platform_data *pdata); + +#endif diff --git a/include/linux/platform_data/asoc-ux500-msp.h b/include/linux/platform_data/asoc-ux500-msp.h new file mode 100644 index 000000000..2f34bb98f --- /dev/null +++ b/include/linux/platform_data/asoc-ux500-msp.h @@ -0,0 +1,20 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * Author: Rabin Vincent for ST-Ericsson + * License terms: GNU General Public License (GPL), version 2. + */ + +#ifndef __MSP_H +#define __MSP_H + +#include + +/* Platform data structure for a MSP I2S-device */ +struct msp_i2s_platform_data { + int id; + struct stedma40_chan_cfg *msp_i2s_dma_rx; + struct stedma40_chan_cfg *msp_i2s_dma_tx; +}; + +#endif diff --git a/include/linux/platform_data/at24.h b/include/linux/platform_data/at24.h new file mode 100644 index 000000000..63507ff46 --- /dev/null +++ b/include/linux/platform_data/at24.h @@ -0,0 +1,60 @@ +/* + * at24.h - platform_data for the at24 (generic eeprom) driver + * (C) Copyright 2008 by Pengutronix + * (C) Copyright 2012 by Wolfram Sang + * same license as the driver + */ + +#ifndef _LINUX_AT24_H +#define _LINUX_AT24_H + +#include +#include +#include + +/** + * struct at24_platform_data - data to set up at24 (generic eeprom) driver + * @byte_len: size of eeprom in byte + * @page_size: number of byte which can be written in one go + * @flags: tunable options, check AT24_FLAG_* defines + * @setup: an optional callback invoked after eeprom is probed; enables kernel + code to access eeprom via nvmem, see example + * @context: optional parameter passed to setup() + * + * If you set up a custom eeprom type, please double-check the parameters. + * Especially page_size needs extra care, as you risk data loss if your value + * is bigger than what the chip actually supports! + * + * An example in pseudo code for a setup() callback: + * + * void get_mac_addr(struct nvmem_device *nvmem, void *context) + * { + * u8 *mac_addr = ethernet_pdata->mac_addr; + * off_t offset = context; + * + * // Read MAC addr from EEPROM + * if (nvmem_device_read(nvmem, offset, ETH_ALEN, mac_addr) == ETH_ALEN) + * pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr); + * } + * + * This function pointer and context can now be set up in at24_platform_data. + */ + +struct at24_platform_data { + u32 byte_len; /* size (sum of all addr) */ + u16 page_size; /* for writes */ + u8 flags; +#define AT24_FLAG_ADDR16 BIT(7) /* address pointer is 16 bit */ +#define AT24_FLAG_READONLY BIT(6) /* sysfs-entry will be read-only */ +#define AT24_FLAG_IRUGO BIT(5) /* sysfs-entry will be world-readable */ +#define AT24_FLAG_TAKE8ADDR BIT(4) /* take always 8 addresses (24c00) */ +#define AT24_FLAG_SERIAL BIT(3) /* factory-programmed serial number */ +#define AT24_FLAG_MAC BIT(2) /* factory-programmed mac address */ +#define AT24_FLAG_NO_RDROL BIT(1) /* does not auto-rollover reads to */ + /* the next slave address */ + + void (*setup)(struct nvmem_device *nvmem, void *context); + void *context; +}; + +#endif /* _LINUX_AT24_H */ diff --git a/include/linux/platform_data/at91_adc.h b/include/linux/platform_data/at91_adc.h new file mode 100644 index 000000000..7819fc787 --- /dev/null +++ b/include/linux/platform_data/at91_adc.h @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2011 Free Electrons + * + * Licensed under the GPLv2 or later. + */ + +#ifndef _AT91_ADC_H_ +#define _AT91_ADC_H_ + +enum atmel_adc_ts_type { + ATMEL_ADC_TOUCHSCREEN_NONE = 0, + ATMEL_ADC_TOUCHSCREEN_4WIRE = 4, + ATMEL_ADC_TOUCHSCREEN_5WIRE = 5, +}; + +/** + * struct at91_adc_trigger - description of triggers + * @name: name of the trigger advertised to the user + * @value: value to set in the ADC's trigger setup register + to enable the trigger + * @is_external: Does the trigger rely on an external pin? + */ +struct at91_adc_trigger { + const char *name; + u8 value; + bool is_external; +}; + +/** + * struct at91_adc_data - platform data for ADC driver + * @channels_used: channels in use on the board as a bitmask + * @startup_time: startup time of the ADC in microseconds + * @trigger_list: Triggers available in the ADC + * @trigger_number: Number of triggers available in the ADC + * @use_external_triggers: does the board has external triggers availables + * @vref: Reference voltage for the ADC in millivolts + * @touchscreen_type: If a touchscreen is connected, its type (4 or 5 wires) + */ +struct at91_adc_data { + unsigned long channels_used; + u8 startup_time; + struct at91_adc_trigger *trigger_list; + u8 trigger_number; + bool use_external_triggers; + u16 vref; + enum atmel_adc_ts_type touchscreen_type; +}; + +extern void __init at91_add_device_adc(struct at91_adc_data *data); +#endif diff --git a/include/linux/platform_data/ata-pxa.h b/include/linux/platform_data/ata-pxa.h new file mode 100644 index 000000000..6cf7df1d5 --- /dev/null +++ b/include/linux/platform_data/ata-pxa.h @@ -0,0 +1,33 @@ +/* + * Generic PXA PATA driver + * + * Copyright (C) 2010 Marek Vasut + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __MACH_PATA_PXA_H__ +#define __MACH_PATA_PXA_H__ + +struct pata_pxa_pdata { + /* PXA DMA DREQ<0:2> pin */ + uint32_t dma_dreq; + /* Register shift */ + uint32_t reg_shift; + /* IRQ flags */ + uint32_t irq_flags; +}; + +#endif /* __MACH_PATA_PXA_H__ */ diff --git a/include/linux/platform_data/ata-samsung_cf.h b/include/linux/platform_data/ata-samsung_cf.h new file mode 100644 index 000000000..748e71642 --- /dev/null +++ b/include/linux/platform_data/ata-samsung_cf.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2010 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Samsung CF-ATA platform_device info + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#ifndef __ATA_SAMSUNG_CF_H +#define __ATA_SAMSUNG_CF_H __FILE__ + +/** + * struct s3c_ide_platdata - S3C IDE driver platform data. + * @setup_gpio: Setup the external GPIO pins to the right state for data + * transfer in true-ide mode. + */ +struct s3c_ide_platdata { + void (*setup_gpio)(void); +}; + +/* + * s3c_ide_set_platdata() - Setup the platform specifc data for IDE driver. + * @pdata: Platform data for IDE driver. + */ +extern void s3c_ide_set_platdata(struct s3c_ide_platdata *pdata); + +/* architecture-specific IDE configuration */ +extern void s3c64xx_ide_setup_gpio(void); +extern void s5pv210_ide_setup_gpio(void); + +#endif /*__ATA_SAMSUNG_CF_H */ diff --git a/include/linux/platform_data/atmel.h b/include/linux/platform_data/atmel.h new file mode 100644 index 000000000..cdceb4d4e --- /dev/null +++ b/include/linux/platform_data/atmel.h @@ -0,0 +1,32 @@ +/* + * atmel platform data + * + * GPL v2 Only + */ + +#ifndef __ATMEL_H__ +#define __ATMEL_H__ + + /* Compact Flash */ +struct at91_cf_data { + int irq_pin; /* I/O IRQ */ + int det_pin; /* Card detect */ + int vcc_pin; /* power switching */ + int rst_pin; /* card reset */ + u8 chipselect; /* EBI Chip Select number */ + u8 flags; +#define AT91_CF_TRUE_IDE 0x01 +#define AT91_IDE_SWAP_A0_A2 0x02 +}; + +/* FIXME: this needs a better location, but gets stuff building again */ +#ifdef CONFIG_ATMEL_PM +extern int at91_suspend_entering_slow_clock(void); +#else +static inline int at91_suspend_entering_slow_clock(void) +{ + return 0; +} +#endif + +#endif /* __ATMEL_H__ */ diff --git a/include/linux/platform_data/b53.h b/include/linux/platform_data/b53.h new file mode 100644 index 000000000..8eaef2f2b --- /dev/null +++ b/include/linux/platform_data/b53.h @@ -0,0 +1,37 @@ +/* + * B53 platform data + * + * Copyright (C) 2013 Jonas Gorski + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __B53_H +#define __B53_H + +#include +#include + +struct b53_platform_data { + /* Must be first such that dsa_register_switch() can access it */ + struct dsa_chip_data cd; + + u32 chip_id; + u16 enabled_ports; + + /* only used by MMAP'd driver */ + unsigned big_endian:1; + void __iomem *regs; +}; + +#endif diff --git a/include/linux/platform_data/bcmgenet.h b/include/linux/platform_data/bcmgenet.h new file mode 100644 index 000000000..d8f873862 --- /dev/null +++ b/include/linux/platform_data/bcmgenet.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_PLATFORM_DATA_BCMGENET_H__ +#define __LINUX_PLATFORM_DATA_BCMGENET_H__ + +#include +#include +#include + +struct bcmgenet_platform_data { + bool mdio_enabled; + phy_interface_t phy_interface; + int phy_address; + int phy_speed; + int phy_duplex; + u8 mac_address[ETH_ALEN]; + int genet_version; +}; + +#endif diff --git a/include/linux/platform_data/bd6107.h b/include/linux/platform_data/bd6107.h new file mode 100644 index 000000000..671d6502d --- /dev/null +++ b/include/linux/platform_data/bd6107.h @@ -0,0 +1,19 @@ +/* + * bd6107.h - Rohm BD6107 LEDs Driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __BD6107_H__ +#define __BD6107_H__ + +struct device; + +struct bd6107_platform_data { + struct device *fbdev; + int reset; /* Reset GPIO */ + unsigned int def_value; +}; + +#endif diff --git a/include/linux/platform_data/bh1770glc.h b/include/linux/platform_data/bh1770glc.h new file mode 100644 index 000000000..8b5e2df36 --- /dev/null +++ b/include/linux/platform_data/bh1770glc.h @@ -0,0 +1,53 @@ +/* + * This file is part of the ROHM BH1770GLC / OSRAM SFH7770 sensor driver. + * Chip is combined proximity and ambient light sensor. + * + * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies). + * + * Contact: Samu Onkalo + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __BH1770_H__ +#define __BH1770_H__ + +/** + * struct bh1770_platform_data - platform data for bh1770glc driver + * @led_def_curr: IR led driving current. + * @glass_attenuation: Attenuation factor for covering window. + * @setup_resources: Call back for interrupt line setup function + * @release_resources: Call back for interrupte line release function + * + * Example of glass attenuation: 16384 * 385 / 100 means attenuation factor + * of 3.85. i.e. light_above_sensor = light_above_cover_window / 3.85 + */ + +struct bh1770_platform_data { +#define BH1770_LED_5mA 0 +#define BH1770_LED_10mA 1 +#define BH1770_LED_20mA 2 +#define BH1770_LED_50mA 3 +#define BH1770_LED_100mA 4 +#define BH1770_LED_150mA 5 +#define BH1770_LED_200mA 6 + __u8 led_def_curr; +#define BH1770_NEUTRAL_GA 16384 /* 16384 / 16384 = 1 */ + __u32 glass_attenuation; + int (*setup_resources)(void); + int (*release_resources)(void); +}; +#endif diff --git a/include/linux/platform_data/brcmfmac.h b/include/linux/platform_data/brcmfmac.h new file mode 100644 index 000000000..1d30bf278 --- /dev/null +++ b/include/linux/platform_data/brcmfmac.h @@ -0,0 +1,185 @@ +/* + * Copyright (c) 201 Broadcom Corporation + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _LINUX_BRCMFMAC_PLATFORM_H +#define _LINUX_BRCMFMAC_PLATFORM_H + + +#define BRCMFMAC_PDATA_NAME "brcmfmac" + +#define BRCMFMAC_COUNTRY_BUF_SZ 4 + + +/* + * Platform specific driver functions and data. Through the platform specific + * device data functions and data can be provided to help the brcmfmac driver to + * operate with the device in combination with the used platform. + */ + + +/** + * Note: the brcmfmac can be loaded as module or be statically built-in into + * the kernel. If built-in then do note that it uses module_init (and + * module_exit) routines which equal device_initcall. So if you intend to + * create a module with the platform specific data for the brcmfmac and have + * it built-in to the kernel then use a higher initcall then device_initcall + * (see init.h). If this is not done then brcmfmac will load without problems + * but will not pickup the platform data. + * + * When the driver does not "detect" platform driver data then it will continue + * without reporting anything and just assume there is no data needed. Which is + * probably true for most platforms. + */ + +/** + * enum brcmf_bus_type - Bus type identifier. Currently SDIO, USB and PCIE are + * supported. + */ +enum brcmf_bus_type { + BRCMF_BUSTYPE_SDIO, + BRCMF_BUSTYPE_USB, + BRCMF_BUSTYPE_PCIE +}; + + +/** + * struct brcmfmac_sdio_pd - SDIO Device specific platform data. + * + * @txglomsz: SDIO txglom size. Use 0 if default of driver is to be + * used. + * @drive_strength: is the preferred drive_strength to be used for the SDIO + * pins. If 0 then a default value will be used. This is + * the target drive strength, the exact drive strength + * which will be used depends on the capabilities of the + * device. + * @oob_irq_supported: does the board have support for OOB interrupts. SDIO + * in-band interrupts are relatively slow and for having + * less overhead on interrupt processing an out of band + * interrupt can be used. If the HW supports this then + * enable this by setting this field to true and configure + * the oob related fields. + * @oob_irq_nr, + * @oob_irq_flags: the OOB interrupt information. The values are used for + * registering the irq using request_irq function. + * @broken_sg_support: flag for broken sg list support of SDIO host controller. + * Set this to true if the SDIO host controller has higher + * align requirement than 32 bytes for each scatterlist + * item. + * @sd_head_align: alignment requirement for start of data buffer. + * @sd_sgentry_align: length alignment requirement for each sg entry. + * @reset: This function can get called if the device communication + * broke down. This functionality is particularly useful in + * case of SDIO type devices. It is possible to reset a + * dongle via sdio data interface, but it requires that + * this is fully functional. This function is chip/module + * specific and this function should return only after the + * complete reset has completed. + */ +struct brcmfmac_sdio_pd { + int txglomsz; + unsigned int drive_strength; + bool oob_irq_supported; + unsigned int oob_irq_nr; + unsigned long oob_irq_flags; + bool broken_sg_support; + unsigned short sd_head_align; + unsigned short sd_sgentry_align; + void (*reset)(void); +}; + +/** + * struct brcmfmac_pd_cc_entry - Struct for translating user space country code + * (iso3166) to firmware country code and + * revision. + * + * @iso3166: iso3166 alpha 2 country code string. + * @cc: firmware country code string. + * @rev: firmware country code revision. + */ +struct brcmfmac_pd_cc_entry { + char iso3166[BRCMFMAC_COUNTRY_BUF_SZ]; + char cc[BRCMFMAC_COUNTRY_BUF_SZ]; + s32 rev; +}; + +/** + * struct brcmfmac_pd_cc - Struct for translating country codes as set by user + * space to a country code and rev which can be used by + * firmware. + * + * @table_size: number of entries in table (> 0) + * @table: array of 1 or more elements with translation information. + */ +struct brcmfmac_pd_cc { + int table_size; + struct brcmfmac_pd_cc_entry table[0]; +}; + +/** + * struct brcmfmac_pd_device - Device specific platform data. (id/rev/bus_type) + * is the unique identifier of the device. + * + * @id: ID of the device for which this data is. In case of SDIO + * or PCIE this is the chipid as identified by chip.c In + * case of USB this is the chipid as identified by the + * device query. + * @rev: chip revision, see id. + * @bus_type: The type of bus. Some chipid/rev exist for different bus + * types. Each bus type has its own set of settings. + * @feature_disable: Bitmask of features to disable (override), See feature.c + * in brcmfmac for details. + * @country_codes: If available, pointer to struct for translating country + * codes. + * @bus: Bus specific (union) device settings. Currently only + * SDIO. + */ +struct brcmfmac_pd_device { + unsigned int id; + unsigned int rev; + enum brcmf_bus_type bus_type; + unsigned int feature_disable; + struct brcmfmac_pd_cc *country_codes; + union { + struct brcmfmac_sdio_pd sdio; + } bus; +}; + +/** + * struct brcmfmac_platform_data - BRCMFMAC specific platform data. + * + * @power_on: This function is called by the brcmfmac driver when the module + * gets loaded. This can be particularly useful for low power + * devices. The platform spcific routine may for example decide to + * power up the complete device. If there is no use-case for this + * function then provide NULL. + * @power_off: This function is called by the brcmfmac when the module gets + * unloaded. At this point the devices can be powered down or + * otherwise be reset. So if an actual power_off is not supported + * but reset is supported by the devices then reset the devices + * when this function gets called. This can be particularly useful + * for low power devices. If there is no use-case for this + * function then provide NULL. + */ +struct brcmfmac_platform_data { + void (*power_on)(void); + void (*power_off)(void); + char *fw_alternative_path; + int device_count; + struct brcmfmac_pd_device devices[0]; +}; + + +#endif /* _LINUX_BRCMFMAC_PLATFORM_H */ diff --git a/include/linux/platform_data/clk-da8xx-cfgchip.h b/include/linux/platform_data/clk-da8xx-cfgchip.h new file mode 100644 index 000000000..de0f77d38 --- /dev/null +++ b/include/linux/platform_data/clk-da8xx-cfgchip.h @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * clk-da8xx-cfgchip - TI DaVinci DA8xx CFGCHIP clock driver + * + * Copyright (C) 2018 David Lechner + */ + +#ifndef __LINUX_PLATFORM_DATA_CLK_DA8XX_CFGCHIP_H__ +#define __LINUX_PLATFORM_DATA_CLK_DA8XX_CFGCHIP_H__ + +#include + +/** + * da8xx_cfgchip_clk_platform_data + * @cfgchip: CFGCHIP syscon regmap + */ +struct da8xx_cfgchip_clk_platform_data { + struct regmap *cfgchip; +}; + +#endif /* __LINUX_PLATFORM_DATA_CLK_DA8XX_CFGCHIP_H__ */ diff --git a/include/linux/platform_data/clk-davinci-pll.h b/include/linux/platform_data/clk-davinci-pll.h new file mode 100644 index 000000000..e55dab1d5 --- /dev/null +++ b/include/linux/platform_data/clk-davinci-pll.h @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PLL clock driver for TI Davinci SoCs + * + * Copyright (C) 2018 David Lechner + */ + +#ifndef __LINUX_PLATFORM_DATA_CLK_DAVINCI_PLL_H__ +#define __LINUX_PLATFORM_DATA_CLK_DAVINCI_PLL_H__ + +#include + +/** + * davinci_pll_platform_data + * @cfgchip: CFGCHIP syscon regmap + */ +struct davinci_pll_platform_data { + struct regmap *cfgchip; +}; + +#endif /* __LINUX_PLATFORM_DATA_CLK_DAVINCI_PLL_H__ */ diff --git a/include/linux/platform_data/clk-integrator.h b/include/linux/platform_data/clk-integrator.h new file mode 100644 index 000000000..addd48cac --- /dev/null +++ b/include/linux/platform_data/clk-integrator.h @@ -0,0 +1,2 @@ +void integrator_impd1_clk_init(void __iomem *base, unsigned int id); +void integrator_impd1_clk_exit(unsigned int id); diff --git a/include/linux/platform_data/clk-lpss.h b/include/linux/platform_data/clk-lpss.h new file mode 100644 index 000000000..23901992b --- /dev/null +++ b/include/linux/platform_data/clk-lpss.h @@ -0,0 +1,23 @@ +/* + * Intel Low Power Subsystem clocks. + * + * Copyright (C) 2013, Intel Corporation + * Authors: Mika Westerberg + * Rafael J. Wysocki + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __CLK_LPSS_H +#define __CLK_LPSS_H + +struct lpss_clk_data { + const char *name; + struct clk *clk; +}; + +extern int lpt_clk_init(void); + +#endif /* __CLK_LPSS_H */ diff --git a/include/linux/platform_data/clk-st.h b/include/linux/platform_data/clk-st.h new file mode 100644 index 000000000..7cdb6a402 --- /dev/null +++ b/include/linux/platform_data/clk-st.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: MIT */ +/* + * clock framework for AMD Stoney based clock + * + * Copyright 2018 Advanced Micro Devices, Inc. + */ + +#ifndef __CLK_ST_H +#define __CLK_ST_H + +#include + +struct st_clk_data { + void __iomem *base; +}; + +#endif /* __CLK_ST_H */ diff --git a/include/linux/platform_data/clk-u300.h b/include/linux/platform_data/clk-u300.h new file mode 100644 index 000000000..8429e7391 --- /dev/null +++ b/include/linux/platform_data/clk-u300.h @@ -0,0 +1 @@ +void __init u300_clk_init(void __iomem *base); diff --git a/include/linux/platform_data/cpuidle-exynos.h b/include/linux/platform_data/cpuidle-exynos.h new file mode 100644 index 000000000..bfa40e4c5 --- /dev/null +++ b/include/linux/platform_data/cpuidle-exynos.h @@ -0,0 +1,20 @@ +/* + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#ifndef __CPUIDLE_EXYNOS_H +#define __CPUIDLE_EXYNOS_H + +struct cpuidle_exynos_data { + int (*cpu0_enter_aftr)(void); + int (*cpu1_powerdown)(void); + void (*pre_enter_aftr)(void); + void (*post_enter_aftr)(void); +}; + +#endif diff --git a/include/linux/platform_data/crypto-atmel.h b/include/linux/platform_data/crypto-atmel.h new file mode 100644 index 000000000..0471aaf69 --- /dev/null +++ b/include/linux/platform_data/crypto-atmel.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_CRYPTO_ATMEL_H +#define __LINUX_CRYPTO_ATMEL_H + +#include + +/** + * struct crypto_dma_data - DMA data for AES/TDES/SHA + */ +struct crypto_dma_data { + struct at_dma_slave txdata; + struct at_dma_slave rxdata; +}; + +/** + * struct crypto_platform_data - board-specific AES/TDES/SHA configuration + * @dma_slave: DMA slave interface to use in data transfers. + */ +struct crypto_platform_data { + struct crypto_dma_data *dma_slave; +}; + +#endif /* __LINUX_CRYPTO_ATMEL_H */ diff --git a/include/linux/platform_data/crypto-ux500.h b/include/linux/platform_data/crypto-ux500.h new file mode 100644 index 000000000..94df96d9a --- /dev/null +++ b/include/linux/platform_data/crypto-ux500.h @@ -0,0 +1,22 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * Author: Joakim Bech for ST-Ericsson + * License terms: GNU General Public License (GPL) version 2 + */ +#ifndef _CRYPTO_UX500_H +#define _CRYPTO_UX500_H +#include +#include + +struct hash_platform_data { + void *mem_to_engine; + bool (*dma_filter)(struct dma_chan *chan, void *filter_param); +}; + +struct cryp_platform_data { + struct stedma40_chan_cfg mem_to_engine; + struct stedma40_chan_cfg engine_to_mem; +}; + +#endif diff --git a/include/linux/platform_data/cyttsp4.h b/include/linux/platform_data/cyttsp4.h new file mode 100644 index 000000000..6eba54aff --- /dev/null +++ b/include/linux/platform_data/cyttsp4.h @@ -0,0 +1,76 @@ +/* + * Header file for: + * Cypress TrueTouch(TM) Standard Product (TTSP) touchscreen drivers. + * For use with Cypress Txx3xx parts. + * Supported parts include: + * CY8CTST341 + * CY8CTMA340 + * + * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc. + * Copyright (C) 2012 Javier Martinez Canillas + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2, and only version 2, as published by the + * Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Contact Cypress Semiconductor at www.cypress.com (kev@cypress.com) + * + */ +#ifndef _CYTTSP4_H_ +#define _CYTTSP4_H_ + +#define CYTTSP4_MT_NAME "cyttsp4_mt" +#define CYTTSP4_I2C_NAME "cyttsp4_i2c_adapter" +#define CYTTSP4_SPI_NAME "cyttsp4_spi_adapter" + +#define CY_TOUCH_SETTINGS_MAX 32 + +struct touch_framework { + const uint16_t *abs; + uint8_t size; + uint8_t enable_vkeys; +} __packed; + +struct cyttsp4_mt_platform_data { + struct touch_framework *frmwrk; + unsigned short flags; + char const *inp_dev_name; +}; + +struct touch_settings { + const uint8_t *data; + uint32_t size; + uint8_t tag; +} __packed; + +struct cyttsp4_core_platform_data { + int irq_gpio; + int rst_gpio; + int level_irq_udelay; + int (*xres)(struct cyttsp4_core_platform_data *pdata, + struct device *dev); + int (*init)(struct cyttsp4_core_platform_data *pdata, + int on, struct device *dev); + int (*power)(struct cyttsp4_core_platform_data *pdata, + int on, struct device *dev, atomic_t *ignore_irq); + int (*irq_stat)(struct cyttsp4_core_platform_data *pdata, + struct device *dev); + struct touch_settings *sett[CY_TOUCH_SETTINGS_MAX]; +}; + +struct cyttsp4_platform_data { + struct cyttsp4_core_platform_data *core_pdata; + struct cyttsp4_mt_platform_data *mt_pdata; +}; + +#endif /* _CYTTSP4_H_ */ diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h new file mode 100644 index 000000000..85ad68f92 --- /dev/null +++ b/include/linux/platform_data/davinci_asp.h @@ -0,0 +1,112 @@ +/* + * TI DaVinci Audio Serial Port support + * + * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __DAVINCI_ASP_H +#define __DAVINCI_ASP_H + +#include + +struct davinci_mcasp_pdata { + u32 tx_dma_offset; + u32 rx_dma_offset; + int asp_chan_q; /* event queue number for ASP channel */ + int ram_chan_q; /* event queue number for RAM channel */ + /* + * Allowing this is more efficient and eliminates left and right swaps + * caused by underruns, but will swap the left and right channels + * when compared to previous behavior. + */ + unsigned enable_channel_combine:1; + unsigned sram_size_playback; + unsigned sram_size_capture; + struct gen_pool *sram_pool; + + /* + * If McBSP peripheral gets the clock from an external pin, + * there are three chooses, that are MCBSP_CLKX, MCBSP_CLKR + * and MCBSP_CLKS. + * Depending on different hardware connections it is possible + * to use this setting to change the behaviour of McBSP + * driver. + */ + int clk_input_pin; + + /* + * This flag works when both clock and FS are outputs for the cpu + * and makes clock more accurate (FS is not symmetrical and the + * clock is very fast. + * The clock becoming faster is named + * i2s continuous serial clock (I2S_SCK) and it is an externally + * visible bit clock. + * + * first line : WordSelect + * second line : ContinuousSerialClock + * third line: SerialData + * + * SYMMETRICAL APPROACH: + * _______________________ LEFT + * _| RIGHT |______________________| + * _ _ _ _ _ _ _ _ + * _| |_| |_ x16 _| |_| |_| |_| |_ x16 _| |_| |_ + * _ _ _ _ _ _ _ _ + * _/ \_/ \_ ... _/ \_/ \_/ \_/ \_ ... _/ \_/ \_ + * \_/ \_/ \_/ \_/ \_/ \_/ \_/ \_/ + * + * ACCURATE CLOCK APPROACH: + * ______________ LEFT + * _| RIGHT |_______________________________| + * _ _ _ _ _ _ _ _ _ + * _| |_ x16 _| |_| |_ x16 _| |_| |_| |_| |_| |_| | + * _ _ _ _ dummy cycles + * _/ \_ ... _/ \_/ \_ ... _/ \__________________ + * \_/ \_/ \_/ \_/ + * + */ + bool i2s_accurate_sck; + + /* McASP specific fields */ + int tdm_slots; + u8 op_mode; + u8 num_serializer; + u8 *serial_dir; + u8 version; + u8 txnumevt; + u8 rxnumevt; + int tx_dma_channel; + int rx_dma_channel; +}; +/* TODO: Fix arch/arm/mach-davinci/ users and remove this define */ +#define snd_platform_data davinci_mcasp_pdata + +enum { + MCASP_VERSION_1 = 0, /* DM646x */ + MCASP_VERSION_2, /* DA8xx/OMAPL1x */ + MCASP_VERSION_3, /* TI81xx/AM33xx */ + MCASP_VERSION_4, /* DRA7xxx */ +}; + +enum mcbsp_clk_input_pin { + MCBSP_CLKR = 0, /* as in DM365 */ + MCBSP_CLKS, +}; + +#define INACTIVE_MODE 0 +#define TX_MODE 1 +#define RX_MODE 2 + +#define DAVINCI_MCASP_IIS_MODE 0 +#define DAVINCI_MCASP_DIT_MODE 1 + +#endif diff --git a/include/linux/platform_data/db8500_thermal.h b/include/linux/platform_data/db8500_thermal.h new file mode 100644 index 000000000..3bf60902e --- /dev/null +++ b/include/linux/platform_data/db8500_thermal.h @@ -0,0 +1,38 @@ +/* + * db8500_thermal.h - DB8500 Thermal Management Implementation + * + * Copyright (C) 2012 ST-Ericsson + * Copyright (C) 2012 Linaro Ltd. + * + * Author: Hongbo Zhang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DB8500_THERMAL_H_ +#define _DB8500_THERMAL_H_ + +#include + +#define COOLING_DEV_MAX 8 + +struct db8500_trip_point { + unsigned long temp; + enum thermal_trip_type type; + char cdev_name[COOLING_DEV_MAX][THERMAL_NAME_LENGTH]; +}; + +struct db8500_thsens_platform_data { + struct db8500_trip_point trip_points[THERMAL_MAX_TRIPS]; + int num_trips; +}; + +#endif /* _DB8500_THERMAL_H_ */ diff --git a/include/linux/platform_data/dma-atmel.h b/include/linux/platform_data/dma-atmel.h new file mode 100644 index 000000000..e95f19c65 --- /dev/null +++ b/include/linux/platform_data/dma-atmel.h @@ -0,0 +1,65 @@ +/* + * Header file for the Atmel AHB DMA Controller driver + * + * Copyright (C) 2008 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef AT_HDMAC_H +#define AT_HDMAC_H + +#include + +/** + * struct at_dma_platform_data - Controller configuration parameters + * @nr_channels: Number of channels supported by hardware (max 8) + * @cap_mask: dma_capability flags supported by the platform + */ +struct at_dma_platform_data { + unsigned int nr_channels; + dma_cap_mask_t cap_mask; +}; + +/** + * struct at_dma_slave - Controller-specific information about a slave + * @dma_dev: required DMA master device + * @cfg: Platform-specific initializer for the CFG register + */ +struct at_dma_slave { + struct device *dma_dev; + u32 cfg; +}; + + +/* Platform-configurable bits in CFG */ +#define ATC_PER_MSB(h) ((0x30U & (h)) >> 4) /* Extract most significant bits of a handshaking identifier */ + +#define ATC_SRC_PER(h) (0xFU & (h)) /* Channel src rq associated with periph handshaking ifc h */ +#define ATC_DST_PER(h) ((0xFU & (h)) << 4) /* Channel dst rq associated with periph handshaking ifc h */ +#define ATC_SRC_REP (0x1 << 8) /* Source Replay Mod */ +#define ATC_SRC_H2SEL (0x1 << 9) /* Source Handshaking Mod */ +#define ATC_SRC_H2SEL_SW (0x0 << 9) +#define ATC_SRC_H2SEL_HW (0x1 << 9) +#define ATC_SRC_PER_MSB(h) (ATC_PER_MSB(h) << 10) /* Channel src rq (most significant bits) */ +#define ATC_DST_REP (0x1 << 12) /* Destination Replay Mod */ +#define ATC_DST_H2SEL (0x1 << 13) /* Destination Handshaking Mod */ +#define ATC_DST_H2SEL_SW (0x0 << 13) +#define ATC_DST_H2SEL_HW (0x1 << 13) +#define ATC_DST_PER_MSB(h) (ATC_PER_MSB(h) << 14) /* Channel dst rq (most significant bits) */ +#define ATC_SOD (0x1 << 16) /* Stop On Done */ +#define ATC_LOCK_IF (0x1 << 20) /* Interface Lock */ +#define ATC_LOCK_B (0x1 << 21) /* AHB Bus Lock */ +#define ATC_LOCK_IF_L (0x1 << 22) /* Master Interface Arbiter Lock */ +#define ATC_LOCK_IF_L_CHUNK (0x0 << 22) +#define ATC_LOCK_IF_L_BUFFER (0x1 << 22) +#define ATC_AHB_PROT_MASK (0x7 << 24) /* AHB Protection */ +#define ATC_FIFOCFG_MASK (0x3 << 28) /* FIFO Request Configuration */ +#define ATC_FIFOCFG_LARGESTBURST (0x0 << 28) +#define ATC_FIFOCFG_HALFFIFO (0x1 << 28) +#define ATC_FIFOCFG_ENOUGHSPACE (0x2 << 28) + + +#endif /* AT_HDMAC_H */ diff --git a/include/linux/platform_data/dma-coh901318.h b/include/linux/platform_data/dma-coh901318.h new file mode 100644 index 000000000..c4cb9590d --- /dev/null +++ b/include/linux/platform_data/dma-coh901318.h @@ -0,0 +1,72 @@ +/* + * Platform data for the COH901318 DMA controller + * Copyright (C) 2007-2013 ST-Ericsson + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef PLAT_COH901318_H +#define PLAT_COH901318_H + +#ifdef CONFIG_COH901318 + +/* We only support the U300 DMA channels */ +#define U300_DMA_MSL_TX_0 0 +#define U300_DMA_MSL_TX_1 1 +#define U300_DMA_MSL_TX_2 2 +#define U300_DMA_MSL_TX_3 3 +#define U300_DMA_MSL_TX_4 4 +#define U300_DMA_MSL_TX_5 5 +#define U300_DMA_MSL_TX_6 6 +#define U300_DMA_MSL_RX_0 7 +#define U300_DMA_MSL_RX_1 8 +#define U300_DMA_MSL_RX_2 9 +#define U300_DMA_MSL_RX_3 10 +#define U300_DMA_MSL_RX_4 11 +#define U300_DMA_MSL_RX_5 12 +#define U300_DMA_MSL_RX_6 13 +#define U300_DMA_MMCSD_RX_TX 14 +#define U300_DMA_MSPRO_TX 15 +#define U300_DMA_MSPRO_RX 16 +#define U300_DMA_UART0_TX 17 +#define U300_DMA_UART0_RX 18 +#define U300_DMA_APEX_TX 19 +#define U300_DMA_APEX_RX 20 +#define U300_DMA_PCM_I2S0_TX 21 +#define U300_DMA_PCM_I2S0_RX 22 +#define U300_DMA_PCM_I2S1_TX 23 +#define U300_DMA_PCM_I2S1_RX 24 +#define U300_DMA_XGAM_CDI 25 +#define U300_DMA_XGAM_PDI 26 +#define U300_DMA_SPI_TX 27 +#define U300_DMA_SPI_RX 28 +#define U300_DMA_GENERAL_PURPOSE_0 29 +#define U300_DMA_GENERAL_PURPOSE_1 30 +#define U300_DMA_GENERAL_PURPOSE_2 31 +#define U300_DMA_GENERAL_PURPOSE_3 32 +#define U300_DMA_GENERAL_PURPOSE_4 33 +#define U300_DMA_GENERAL_PURPOSE_5 34 +#define U300_DMA_GENERAL_PURPOSE_6 35 +#define U300_DMA_GENERAL_PURPOSE_7 36 +#define U300_DMA_GENERAL_PURPOSE_8 37 +#define U300_DMA_UART1_TX 38 +#define U300_DMA_UART1_RX 39 + +#define U300_DMA_DEVICE_CHANNELS 32 +#define U300_DMA_CHANNELS 40 + +/** + * coh901318_filter_id() - DMA channel filter function + * @chan: dma channel handle + * @chan_id: id of dma channel to be filter out + * + * In dma_request_channel() it specifies what channel id to be requested + */ +bool coh901318_filter_id(struct dma_chan *chan, void *chan_id); +#else +static inline bool coh901318_filter_id(struct dma_chan *chan, void *chan_id) +{ + return false; +} +#endif + +#endif /* PLAT_COH901318_H */ diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h new file mode 100644 index 000000000..1a1d58ebf --- /dev/null +++ b/include/linux/platform_data/dma-dw.h @@ -0,0 +1,76 @@ +/* + * Driver for the Synopsys DesignWare DMA Controller + * + * Copyright (C) 2007 Atmel Corporation + * Copyright (C) 2010-2011 ST Microelectronics + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _PLATFORM_DATA_DMA_DW_H +#define _PLATFORM_DATA_DMA_DW_H + +#include + +#define DW_DMA_MAX_NR_MASTERS 4 +#define DW_DMA_MAX_NR_CHANNELS 8 + +/** + * struct dw_dma_slave - Controller-specific information about a slave + * + * @dma_dev: required DMA master device + * @src_id: src request line + * @dst_id: dst request line + * @m_master: memory master for transfers on allocated channel + * @p_master: peripheral master for transfers on allocated channel + * @hs_polarity:set active low polarity of handshake interface + */ +struct dw_dma_slave { + struct device *dma_dev; + u8 src_id; + u8 dst_id; + u8 m_master; + u8 p_master; + bool hs_polarity; +}; + +/** + * struct dw_dma_platform_data - Controller configuration parameters + * @nr_channels: Number of channels supported by hardware (max 8) + * @is_private: The device channels should be marked as private and not for + * by the general purpose DMA channel allocator. + * @is_memcpy: The device channels do support memory-to-memory transfers. + * @is_idma32: The type of the DMA controller is iDMA32 + * @chan_allocation_order: Allocate channels starting from 0 or 7 + * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. + * @block_size: Maximum block size supported by the controller + * @nr_masters: Number of AHB masters supported by the controller + * @data_width: Maximum data width supported by hardware per AHB master + * (in bytes, power of 2) + * @multi_block: Multi block transfers supported by hardware per channel. + * @protctl: Protection control signals setting per channel. + */ +struct dw_dma_platform_data { + unsigned int nr_channels; + bool is_private; + bool is_memcpy; + bool is_idma32; +#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ +#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ + unsigned char chan_allocation_order; +#define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */ +#define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */ + unsigned char chan_priority; + unsigned int block_size; + unsigned char nr_masters; + unsigned char data_width[DW_DMA_MAX_NR_MASTERS]; + unsigned char multi_block[DW_DMA_MAX_NR_CHANNELS]; +#define CHAN_PROTCTL_PRIVILEGED BIT(0) +#define CHAN_PROTCTL_BUFFERABLE BIT(1) +#define CHAN_PROTCTL_CACHEABLE BIT(2) +#define CHAN_PROTCTL_MASK GENMASK(2, 0) + unsigned char protctl; +}; + +#endif /* _PLATFORM_DATA_DMA_DW_H */ diff --git a/include/linux/platform_data/dma-ep93xx.h b/include/linux/platform_data/dma-ep93xx.h new file mode 100644 index 000000000..eb9805bb3 --- /dev/null +++ b/include/linux/platform_data/dma-ep93xx.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_ARCH_DMA_H +#define __ASM_ARCH_DMA_H + +#include +#include +#include + +/* + * M2P channels. + * + * Note that these values are also directly used for setting the PPALLOC + * register. + */ +#define EP93XX_DMA_I2S1 0 +#define EP93XX_DMA_I2S2 1 +#define EP93XX_DMA_AAC1 2 +#define EP93XX_DMA_AAC2 3 +#define EP93XX_DMA_AAC3 4 +#define EP93XX_DMA_I2S3 5 +#define EP93XX_DMA_UART1 6 +#define EP93XX_DMA_UART2 7 +#define EP93XX_DMA_UART3 8 +#define EP93XX_DMA_IRDA 9 +/* M2M channels */ +#define EP93XX_DMA_SSP 10 +#define EP93XX_DMA_IDE 11 + +/** + * struct ep93xx_dma_data - configuration data for the EP93xx dmaengine + * @port: peripheral which is requesting the channel + * @direction: TX/RX channel + * @name: optional name for the channel, this is displayed in /proc/interrupts + * + * This information is passed as private channel parameter in a filter + * function. Note that this is only needed for slave/cyclic channels. For + * memcpy channels %NULL data should be passed. + */ +struct ep93xx_dma_data { + int port; + enum dma_transfer_direction direction; + const char *name; +}; + +/** + * struct ep93xx_dma_chan_data - platform specific data for a DMA channel + * @name: name of the channel, used for getting the right clock for the channel + * @base: mapped registers + * @irq: interrupt number used by this channel + */ +struct ep93xx_dma_chan_data { + const char *name; + void __iomem *base; + int irq; +}; + +/** + * struct ep93xx_dma_platform_data - platform data for the dmaengine driver + * @channels: array of channels which are passed to the driver + * @num_channels: number of channels in the array + * + * This structure is passed to the DMA engine driver via platform data. For + * M2P channels, contract is that even channels are for TX and odd for RX. + * There is no requirement for the M2M channels. + */ +struct ep93xx_dma_platform_data { + struct ep93xx_dma_chan_data *channels; + size_t num_channels; +}; + +static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan) +{ + return !strcmp(dev_name(chan->device->dev), "ep93xx-dma-m2p"); +} + +/** + * ep93xx_dma_chan_direction - returns direction the channel can be used + * @chan: channel + * + * This function can be used in filter functions to find out whether the + * channel supports given DMA direction. Only M2P channels have such + * limitation, for M2M channels the direction is configurable. + */ +static inline enum dma_transfer_direction +ep93xx_dma_chan_direction(struct dma_chan *chan) +{ + if (!ep93xx_dma_chan_is_m2p(chan)) + return DMA_TRANS_NONE; + + /* even channels are for TX, odd for RX */ + return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; +} + +#endif /* __ASM_ARCH_DMA_H */ diff --git a/include/linux/platform_data/dma-hsu.h b/include/linux/platform_data/dma-hsu.h new file mode 100644 index 000000000..3453fa655 --- /dev/null +++ b/include/linux/platform_data/dma-hsu.h @@ -0,0 +1,21 @@ +/* + * Driver for the High Speed UART DMA + * + * Copyright (C) 2015 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _PLATFORM_DATA_DMA_HSU_H +#define _PLATFORM_DATA_DMA_HSU_H + +#include + +struct hsu_dma_slave { + struct device *dma_dev; + int chan_id; +}; + +#endif /* _PLATFORM_DATA_DMA_HSU_H */ diff --git a/include/linux/platform_data/dma-imx-sdma.h b/include/linux/platform_data/dma-imx-sdma.h new file mode 100644 index 000000000..30e676b36 --- /dev/null +++ b/include/linux/platform_data/dma-imx-sdma.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __MACH_MXC_SDMA_H__ +#define __MACH_MXC_SDMA_H__ + +/** + * struct sdma_script_start_addrs - SDMA script start pointers + * + * start addresses of the different functions in the physical + * address space of the SDMA engine. + */ +struct sdma_script_start_addrs { + s32 ap_2_ap_addr; + s32 ap_2_bp_addr; + s32 ap_2_ap_fixed_addr; + s32 bp_2_ap_addr; + s32 loopback_on_dsp_side_addr; + s32 mcu_interrupt_only_addr; + s32 firi_2_per_addr; + s32 firi_2_mcu_addr; + s32 per_2_firi_addr; + s32 mcu_2_firi_addr; + s32 uart_2_per_addr; + s32 uart_2_mcu_addr; + s32 per_2_app_addr; + s32 mcu_2_app_addr; + s32 per_2_per_addr; + s32 uartsh_2_per_addr; + s32 uartsh_2_mcu_addr; + s32 per_2_shp_addr; + s32 mcu_2_shp_addr; + s32 ata_2_mcu_addr; + s32 mcu_2_ata_addr; + s32 app_2_per_addr; + s32 app_2_mcu_addr; + s32 shp_2_per_addr; + s32 shp_2_mcu_addr; + s32 mshc_2_mcu_addr; + s32 mcu_2_mshc_addr; + s32 spdif_2_mcu_addr; + s32 mcu_2_spdif_addr; + s32 asrc_2_mcu_addr; + s32 ext_mem_2_ipu_addr; + s32 descrambler_addr; + s32 dptc_dvfs_addr; + s32 utra_addr; + s32 ram_code_start_addr; + /* End of v1 array */ + s32 mcu_2_ssish_addr; + s32 ssish_2_mcu_addr; + s32 hdmi_dma_addr; + /* End of v2 array */ + s32 zcanfd_2_mcu_addr; + s32 zqspi_2_mcu_addr; + s32 mcu_2_ecspi_addr; + /* End of v3 array */ + s32 mcu_2_zqspi_addr; + /* End of v4 array */ +}; + +/** + * struct sdma_platform_data - platform specific data for SDMA engine + * + * @fw_name The firmware name + * @script_addrs SDMA scripts addresses in SDMA ROM + */ +struct sdma_platform_data { + char *fw_name; + struct sdma_script_start_addrs *script_addrs; +}; + +#endif /* __MACH_MXC_SDMA_H__ */ diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h new file mode 100644 index 000000000..7d964e787 --- /dev/null +++ b/include/linux/platform_data/dma-imx.h @@ -0,0 +1,71 @@ +/* + * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_ARCH_MXC_DMA_H__ +#define __ASM_ARCH_MXC_DMA_H__ + +#include +#include +#include + +/* + * This enumerates peripheral types. Used for SDMA. + */ +enum sdma_peripheral_type { + IMX_DMATYPE_SSI, /* MCU domain SSI */ + IMX_DMATYPE_SSI_SP, /* Shared SSI */ + IMX_DMATYPE_MMC, /* MMC */ + IMX_DMATYPE_SDHC, /* SDHC */ + IMX_DMATYPE_UART, /* MCU domain UART */ + IMX_DMATYPE_UART_SP, /* Shared UART */ + IMX_DMATYPE_FIRI, /* FIRI */ + IMX_DMATYPE_CSPI, /* MCU domain CSPI */ + IMX_DMATYPE_CSPI_SP, /* Shared CSPI */ + IMX_DMATYPE_SIM, /* SIM */ + IMX_DMATYPE_ATA, /* ATA */ + IMX_DMATYPE_CCM, /* CCM */ + IMX_DMATYPE_EXT, /* External peripheral */ + IMX_DMATYPE_MSHC, /* Memory Stick Host Controller */ + IMX_DMATYPE_MSHC_SP, /* Shared Memory Stick Host Controller */ + IMX_DMATYPE_DSP, /* DSP */ + IMX_DMATYPE_MEMORY, /* Memory */ + IMX_DMATYPE_FIFO_MEMORY,/* FIFO type Memory */ + IMX_DMATYPE_SPDIF, /* SPDIF */ + IMX_DMATYPE_IPU_MEMORY, /* IPU Memory */ + IMX_DMATYPE_ASRC, /* ASRC */ + IMX_DMATYPE_ESAI, /* ESAI */ + IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */ + IMX_DMATYPE_ASRC_SP, /* Shared ASRC */ + IMX_DMATYPE_SAI, /* SAI */ +}; + +enum imx_dma_prio { + DMA_PRIO_HIGH = 0, + DMA_PRIO_MEDIUM = 1, + DMA_PRIO_LOW = 2 +}; + +struct imx_dma_data { + int dma_request; /* DMA request line */ + int dma_request2; /* secondary DMA request line */ + enum sdma_peripheral_type peripheral_type; + int priority; +}; + +static inline int imx_dma_is_ipu(struct dma_chan *chan) +{ + return !strcmp(dev_name(chan->device->dev), "ipu-core"); +} + +static inline int imx_dma_is_general_purpose(struct dma_chan *chan) +{ + return !strcmp(chan->device->dev->driver->name, "imx-sdma") || + !strcmp(chan->device->dev->driver->name, "imx-dma"); +} + +#endif diff --git a/include/linux/platform_data/dma-mmp_tdma.h b/include/linux/platform_data/dma-mmp_tdma.h new file mode 100644 index 000000000..422d4504d --- /dev/null +++ b/include/linux/platform_data/dma-mmp_tdma.h @@ -0,0 +1,40 @@ +/* + * SRAM Memory Management + * + * Copyright (c) 2011 Marvell Semiconductors Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __DMA_MMP_TDMA_H +#define __DMA_MMP_TDMA_H + +#include + +/* ARBITRARY: SRAM allocations are multiples of this 2^N size */ +#define SRAM_GRANULARITY 512 + +enum sram_type { + MMP_SRAM_UNDEFINED = 0, + MMP_ASRAM, + MMP_ISRAM, +}; + +struct sram_platdata { + char *pool_name; + int granularity; +}; + +#ifdef CONFIG_MMP_SRAM +extern struct gen_pool *sram_get_gpool(char *pool_name); +#else +static inline struct gen_pool *sram_get_gpool(char *pool_name) +{ + return NULL; +} +#endif + +#endif /* __DMA_MMP_TDMA_H */ diff --git a/include/linux/platform_data/dma-mv_xor.h b/include/linux/platform_data/dma-mv_xor.h new file mode 100644 index 000000000..6867a7ea3 --- /dev/null +++ b/include/linux/platform_data/dma-mv_xor.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Marvell XOR platform device data definition file. + */ + +#ifndef __DMA_MV_XOR_H +#define __DMA_MV_XOR_H + +#include +#include + +#define MV_XOR_NAME "mv_xor" + +struct mv_xor_channel_data { + dma_cap_mask_t cap_mask; +}; + +struct mv_xor_platform_data { + struct mv_xor_channel_data *channels; +}; + +#endif diff --git a/include/linux/platform_data/dma-s3c24xx.h b/include/linux/platform_data/dma-s3c24xx.h new file mode 100644 index 000000000..4f9aba405 --- /dev/null +++ b/include/linux/platform_data/dma-s3c24xx.h @@ -0,0 +1,52 @@ +/* + * S3C24XX DMA handling + * + * Copyright (c) 2013 Heiko Stuebner + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +/* Helper to encode the source selection constraints for early s3c socs. */ +#define S3C24XX_DMA_CHANREQ(src, chan) ((BIT(3) | src) << chan * 4) + +enum s3c24xx_dma_bus { + S3C24XX_DMA_APB, + S3C24XX_DMA_AHB, +}; + +/** + * @bus: on which bus does the peripheral reside - AHB or APB. + * @handshake: is a handshake with the peripheral necessary + * @chansel: channel selection information, depending on variant; reqsel for + * s3c2443 and later and channel-selection map for earlier SoCs + * see CHANSEL doc in s3c2443-dma.c + */ +struct s3c24xx_dma_channel { + enum s3c24xx_dma_bus bus; + bool handshake; + u16 chansel; +}; + +struct dma_slave_map; + +/** + * struct s3c24xx_dma_platdata - platform specific settings + * @num_phy_channels: number of physical channels + * @channels: array of virtual channel descriptions + * @num_channels: number of virtual channels + * @slave_map: dma slave map matching table + * @slavecnt: number of elements in slave_map + */ +struct s3c24xx_dma_platdata { + int num_phy_channels; + struct s3c24xx_dma_channel *channels; + int num_channels; + const struct dma_slave_map *slave_map; + int slavecnt; +}; + +struct dma_chan; +bool s3c24xx_dma_filter(struct dma_chan *chan, void *param); diff --git a/include/linux/platform_data/dma-ste-dma40.h b/include/linux/platform_data/dma-ste-dma40.h new file mode 100644 index 000000000..1bb9b1852 --- /dev/null +++ b/include/linux/platform_data/dma-ste-dma40.h @@ -0,0 +1,209 @@ +/* + * Copyright (C) ST-Ericsson SA 2007-2010 + * Author: Per Forlin for ST-Ericsson + * Author: Jonas Aaberg for ST-Ericsson + * License terms: GNU General Public License (GPL) version 2 + */ + + +#ifndef STE_DMA40_H +#define STE_DMA40_H + +#include +#include +#include +#include + +/* + * Maxium size for a single dma descriptor + * Size is limited to 16 bits. + * Size is in the units of addr-widths (1,2,4,8 bytes) + * Larger transfers will be split up to multiple linked desc + */ +#define STEDMA40_MAX_SEG_SIZE 0xFFFF + +/* dev types for memcpy */ +#define STEDMA40_DEV_DST_MEMORY (-1) +#define STEDMA40_DEV_SRC_MEMORY (-1) + +enum stedma40_mode { + STEDMA40_MODE_LOGICAL = 0, + STEDMA40_MODE_PHYSICAL, + STEDMA40_MODE_OPERATION, +}; + +enum stedma40_mode_opt { + STEDMA40_PCHAN_BASIC_MODE = 0, + STEDMA40_LCHAN_SRC_LOG_DST_LOG = 0, + STEDMA40_PCHAN_MODULO_MODE, + STEDMA40_PCHAN_DOUBLE_DST_MODE, + STEDMA40_LCHAN_SRC_PHY_DST_LOG, + STEDMA40_LCHAN_SRC_LOG_DST_PHY, +}; + +#define STEDMA40_ESIZE_8_BIT 0x0 +#define STEDMA40_ESIZE_16_BIT 0x1 +#define STEDMA40_ESIZE_32_BIT 0x2 +#define STEDMA40_ESIZE_64_BIT 0x3 + +/* The value 4 indicates that PEN-reg shall be set to 0 */ +#define STEDMA40_PSIZE_PHY_1 0x4 +#define STEDMA40_PSIZE_PHY_2 0x0 +#define STEDMA40_PSIZE_PHY_4 0x1 +#define STEDMA40_PSIZE_PHY_8 0x2 +#define STEDMA40_PSIZE_PHY_16 0x3 + +/* + * The number of elements differ in logical and + * physical mode + */ +#define STEDMA40_PSIZE_LOG_1 STEDMA40_PSIZE_PHY_2 +#define STEDMA40_PSIZE_LOG_4 STEDMA40_PSIZE_PHY_4 +#define STEDMA40_PSIZE_LOG_8 STEDMA40_PSIZE_PHY_8 +#define STEDMA40_PSIZE_LOG_16 STEDMA40_PSIZE_PHY_16 + +/* Maximum number of possible physical channels */ +#define STEDMA40_MAX_PHYS 32 + +enum stedma40_flow_ctrl { + STEDMA40_NO_FLOW_CTRL, + STEDMA40_FLOW_CTRL, +}; + +/** + * struct stedma40_half_channel_info - dst/src channel configuration + * + * @big_endian: true if the src/dst should be read as big endian + * @data_width: Data width of the src/dst hardware + * @p_size: Burst size + * @flow_ctrl: Flow control on/off. + */ +struct stedma40_half_channel_info { + bool big_endian; + enum dma_slave_buswidth data_width; + int psize; + enum stedma40_flow_ctrl flow_ctrl; +}; + +/** + * struct stedma40_chan_cfg - Structure to be filled by client drivers. + * + * @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH + * @high_priority: true if high-priority + * @realtime: true if realtime mode is to be enabled. Only available on DMA40 + * version 3+, i.e DB8500v2+ + * @mode: channel mode: physical, logical, or operation + * @mode_opt: options for the chosen channel mode + * @dev_type: src/dst device type (driver uses dir to figure out which) + * @src_info: Parameters for dst half channel + * @dst_info: Parameters for dst half channel + * @use_fixed_channel: if true, use physical channel specified by phy_channel + * @phy_channel: physical channel to use, only if use_fixed_channel is true + * + * This structure has to be filled by the client drivers. + * It is recommended to do all dma configurations for clients in the machine. + * + */ +struct stedma40_chan_cfg { + enum dma_transfer_direction dir; + bool high_priority; + bool realtime; + enum stedma40_mode mode; + enum stedma40_mode_opt mode_opt; + int dev_type; + struct stedma40_half_channel_info src_info; + struct stedma40_half_channel_info dst_info; + + bool use_fixed_channel; + int phy_channel; +}; + +/** + * struct stedma40_platform_data - Configuration struct for the dma device. + * + * @dev_tx: mapping between destination event line and io address + * @dev_rx: mapping between source event line and io address + * @disabled_channels: A vector, ending with -1, that marks physical channels + * that are for different reasons not available for the driver. + * @soft_lli_chans: A vector, that marks physical channels will use LLI by SW + * which avoids HW bug that exists in some versions of the controller. + * SoftLLI introduces relink overhead that could impact performace for + * certain use cases. + * @num_of_soft_lli_chans: The number of channels that needs to be configured + * to use SoftLLI. + * @use_esram_lcla: flag for mapping the lcla into esram region + * @num_of_memcpy_chans: The number of channels reserved for memcpy. + * @num_of_phy_chans: The number of physical channels implemented in HW. + * 0 means reading the number of channels from DMA HW but this is only valid + * for 'multiple of 4' channels, like 8. + */ +struct stedma40_platform_data { + int disabled_channels[STEDMA40_MAX_PHYS]; + int *soft_lli_chans; + int num_of_soft_lli_chans; + bool use_esram_lcla; + int num_of_memcpy_chans; + int num_of_phy_chans; +}; + +#ifdef CONFIG_STE_DMA40 + +/** + * stedma40_filter() - Provides stedma40_chan_cfg to the + * ste_dma40 dma driver via the dmaengine framework. + * does some checking of what's provided. + * + * Never directly called by client. It used by dmaengine. + * @chan: dmaengine handle. + * @data: Must be of type: struct stedma40_chan_cfg and is + * the configuration of the framework. + * + * + */ + +bool stedma40_filter(struct dma_chan *chan, void *data); + +/** + * stedma40_slave_mem() - Transfers a raw data buffer to or from a slave + * (=device) + * + * @chan: dmaengine handle + * @addr: source or destination physicall address. + * @size: bytes to transfer + * @direction: direction of transfer + * @flags: is actually enum dma_ctrl_flags. See dmaengine.h + */ + +static inline struct +dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan, + dma_addr_t addr, + unsigned int size, + enum dma_transfer_direction direction, + unsigned long flags) +{ + struct scatterlist sg; + sg_init_table(&sg, 1); + sg.dma_address = addr; + sg.length = size; + + return dmaengine_prep_slave_sg(chan, &sg, 1, direction, flags); +} + +#else +static inline bool stedma40_filter(struct dma_chan *chan, void *data) +{ + return false; +} + +static inline struct +dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan, + dma_addr_t addr, + unsigned int size, + enum dma_transfer_direction direction, + unsigned long flags) +{ + return NULL; +} +#endif + +#endif diff --git a/include/linux/platform_data/dmtimer-omap.h b/include/linux/platform_data/dmtimer-omap.h new file mode 100644 index 000000000..757a0f9e2 --- /dev/null +++ b/include/linux/platform_data/dmtimer-omap.h @@ -0,0 +1,69 @@ +/* + * DMTIMER platform data for TI OMAP platforms + * + * Copyright (C) 2012 Texas Instruments + * Author: Jon Hunter + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __PLATFORM_DATA_DMTIMER_OMAP_H__ +#define __PLATFORM_DATA_DMTIMER_OMAP_H__ + +struct omap_dm_timer_ops { + struct omap_dm_timer *(*request_by_node)(struct device_node *np); + struct omap_dm_timer *(*request_specific)(int timer_id); + struct omap_dm_timer *(*request)(void); + + int (*free)(struct omap_dm_timer *timer); + + void (*enable)(struct omap_dm_timer *timer); + void (*disable)(struct omap_dm_timer *timer); + + int (*get_irq)(struct omap_dm_timer *timer); + int (*set_int_enable)(struct omap_dm_timer *timer, + unsigned int value); + int (*set_int_disable)(struct omap_dm_timer *timer, u32 mask); + + struct clk *(*get_fclk)(struct omap_dm_timer *timer); + + int (*start)(struct omap_dm_timer *timer); + int (*stop)(struct omap_dm_timer *timer); + int (*set_source)(struct omap_dm_timer *timer, int source); + + int (*set_load)(struct omap_dm_timer *timer, int autoreload, + unsigned int value); + int (*set_match)(struct omap_dm_timer *timer, int enable, + unsigned int match); + int (*set_pwm)(struct omap_dm_timer *timer, int def_on, + int toggle, int trigger); + int (*set_prescaler)(struct omap_dm_timer *timer, int prescaler); + + unsigned int (*read_counter)(struct omap_dm_timer *timer); + int (*write_counter)(struct omap_dm_timer *timer, + unsigned int value); + unsigned int (*read_status)(struct omap_dm_timer *timer); + int (*write_status)(struct omap_dm_timer *timer, + unsigned int value); +}; + +struct dmtimer_platform_data { + /* set_timer_src - Only used for OMAP1 devices */ + int (*set_timer_src)(struct platform_device *pdev, int source); + u32 timer_capability; + u32 timer_errata; + int (*get_context_loss_count)(struct device *); + const struct omap_dm_timer_ops *timer_ops; +}; + +#endif /* __PLATFORM_DATA_DMTIMER_OMAP_H__ */ diff --git a/include/linux/platform_data/ds620.h b/include/linux/platform_data/ds620.h new file mode 100644 index 000000000..6ef58bb77 --- /dev/null +++ b/include/linux/platform_data/ds620.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_DS620_H +#define _LINUX_DS620_H + +#include +#include + +/* platform data for the DS620 temperature sensor and thermostat */ + +struct ds620_platform_data { + /* + * Thermostat output pin PO mode: + * 0 = always low (default) + * 1 = PO_LOW + * 2 = PO_HIGH + * + * (see Documentation/hwmon/ds620) + */ + int pomode; +}; + +#endif /* _LINUX_DS620_H */ diff --git a/include/linux/platform_data/dwc3-omap.h b/include/linux/platform_data/dwc3-omap.h new file mode 100644 index 000000000..1d36ca874 --- /dev/null +++ b/include/linux/platform_data/dwc3-omap.h @@ -0,0 +1,43 @@ +/** + * dwc3-omap.h - OMAP Specific Glue layer, header. + * + * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com + * All rights reserved. + * + * Author: Felipe Balbi + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The names of the above-listed copyright holders may not be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2, as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +enum dwc3_omap_utmi_mode { + DWC3_OMAP_UTMI_MODE_UNKNOWN = 0, + DWC3_OMAP_UTMI_MODE_HW, + DWC3_OMAP_UTMI_MODE_SW, +}; diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h new file mode 100644 index 000000000..0a533f944 --- /dev/null +++ b/include/linux/platform_data/edma.h @@ -0,0 +1,88 @@ +/* + * TI EDMA definitions + * + * Copyright (C) 2006-2013 Texas Instruments. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +/* + * This EDMA3 programming framework exposes two basic kinds of resource: + * + * Channel Triggers transfers, usually from a hardware event but + * also manually or by "chaining" from DMA completions. + * Each channel is coupled to a Parameter RAM (PaRAM) slot. + * + * Slot Each PaRAM slot holds a DMA transfer descriptor (PaRAM + * "set"), source and destination addresses, a link to a + * next PaRAM slot (if any), options for the transfer, and + * instructions for updating those addresses. There are + * more than twice as many slots as event channels. + * + * Each PaRAM set describes a sequence of transfers, either for one large + * buffer or for several discontiguous smaller buffers. An EDMA transfer + * is driven only from a channel, which performs the transfers specified + * in its PaRAM slot until there are no more transfers. When that last + * transfer completes, the "link" field may be used to reload the channel's + * PaRAM slot with a new transfer descriptor. + * + * The EDMA Channel Controller (CC) maps requests from channels into physical + * Transfer Controller (TC) requests when the channel triggers (by hardware + * or software events, or by chaining). The two physical DMA channels provided + * by the TCs are thus shared by many logical channels. + * + * DaVinci hardware also has a "QDMA" mechanism which is not currently + * supported through this interface. (DSP firmware uses it though.) + */ + +#ifndef EDMA_H_ +#define EDMA_H_ + +enum dma_event_q { + EVENTQ_0 = 0, + EVENTQ_1 = 1, + EVENTQ_2 = 2, + EVENTQ_3 = 3, + EVENTQ_DEFAULT = -1 +}; + +#define EDMA_CTLR_CHAN(ctlr, chan) (((ctlr) << 16) | (chan)) +#define EDMA_CTLR(i) ((i) >> 16) +#define EDMA_CHAN_SLOT(i) ((i) & 0xffff) + +#define EDMA_FILTER_PARAM(ctlr, chan) ((int[]) { EDMA_CTLR_CHAN(ctlr, chan) }) + +struct edma_rsv_info { + + const s16 (*rsv_chans)[2]; + const s16 (*rsv_slots)[2]; +}; + +struct dma_slave_map; + +/* platform_data for EDMA driver */ +struct edma_soc_info { + /* + * Default queue is expected to be a low-priority queue. + * This way, long transfers on the default queue started + * by the codec engine will not cause audio defects. + */ + enum dma_event_q default_queue; + + /* Resource reservation for other cores */ + struct edma_rsv_info *rsv; + + /* List of channels allocated for memcpy, terminated with -1 */ + s32 *memcpy_channels; + + s8 (*queue_priority_mapping)[2]; + const s16 (*xbar_chans)[2]; + + const struct dma_slave_map *slave_map; + int slavecnt; +}; + +#endif diff --git a/include/linux/platform_data/efm32-spi.h b/include/linux/platform_data/efm32-spi.h new file mode 100644 index 000000000..a2c56fcd0 --- /dev/null +++ b/include/linux/platform_data/efm32-spi.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_PLATFORM_DATA_EFM32_SPI_H__ +#define __LINUX_PLATFORM_DATA_EFM32_SPI_H__ + +#include + +/** + * struct efm32_spi_pdata + * @location: pinmux location for the I/O pins (to be written to the ROUTE + * register) + */ +struct efm32_spi_pdata { + u8 location; +}; +#endif /* ifndef __LINUX_PLATFORM_DATA_EFM32_SPI_H__ */ diff --git a/include/linux/platform_data/efm32-uart.h b/include/linux/platform_data/efm32-uart.h new file mode 100644 index 000000000..ccbb8f11d --- /dev/null +++ b/include/linux/platform_data/efm32-uart.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * + * + */ +#ifndef __LINUX_PLATFORM_DATA_EFM32_UART_H__ +#define __LINUX_PLATFORM_DATA_EFM32_UART_H__ + +#include + +/** + * struct efm32_uart_pdata + * @location: pinmux location for the I/O pins (to be written to the ROUTE + * register) + */ +struct efm32_uart_pdata { + u8 location; +}; +#endif /* ifndef __LINUX_PLATFORM_DATA_EFM32_UART_H__ */ diff --git a/include/linux/platform_data/ehci-sh.h b/include/linux/platform_data/ehci-sh.h new file mode 100644 index 000000000..5c15a738e --- /dev/null +++ b/include/linux/platform_data/ehci-sh.h @@ -0,0 +1,28 @@ +/* + * EHCI SuperH driver platform data + * + * Copyright (C) 2012 Nobuhiro Iwamatsu + * Copyright (C) 2012 Renesas Solutions Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef __USB_EHCI_SH_H +#define __USB_EHCI_SH_H + +struct ehci_sh_platdata { + void (*phy_init)(void); /* Phy init function */ +}; + +#endif /* __USB_EHCI_SH_H */ diff --git a/include/linux/platform_data/elm.h b/include/linux/platform_data/elm.h new file mode 100644 index 000000000..b8686c00f --- /dev/null +++ b/include/linux/platform_data/elm.h @@ -0,0 +1,65 @@ +/* + * BCH Error Location Module + * + * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ELM_H +#define __ELM_H + +enum bch_ecc { + BCH4_ECC = 0, + BCH8_ECC, + BCH16_ECC, +}; + +/* ELM support 8 error syndrome process */ +#define ERROR_VECTOR_MAX 8 + +/** + * struct elm_errorvec - error vector for elm + * @error_reported: set true for vectors error is reported + * @error_uncorrectable: number of uncorrectable errors + * @error_count: number of correctable errors in the sector + * @error_loc: buffer for error location + * + */ +struct elm_errorvec { + bool error_reported; + bool error_uncorrectable; + int error_count; + int error_loc[16]; +}; + +#if IS_ENABLED(CONFIG_MTD_NAND_OMAP_BCH) +void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc, + struct elm_errorvec *err_vec); +int elm_config(struct device *dev, enum bch_ecc bch_type, + int ecc_steps, int ecc_step_size, int ecc_syndrome_size); +#else +static inline void +elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc, + struct elm_errorvec *err_vec) +{ +} + +static inline int elm_config(struct device *dev, enum bch_ecc bch_type, + int ecc_steps, int ecc_step_size, + int ecc_syndrome_size) +{ + return -ENOSYS; +} +#endif /* CONFIG_MTD_NAND_ECC_BCH */ + +#endif /* __ELM_H */ diff --git a/include/linux/platform_data/emif_plat.h b/include/linux/platform_data/emif_plat.h new file mode 100644 index 000000000..5c19a2a64 --- /dev/null +++ b/include/linux/platform_data/emif_plat.h @@ -0,0 +1,129 @@ +/* + * Definitions for TI EMIF device platform data + * + * Copyright (C) 2012 Texas Instruments, Inc. + * + * Aneesh V + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __EMIF_PLAT_H +#define __EMIF_PLAT_H + +/* Low power modes - EMIF_PWR_MGMT_CTRL */ +#define EMIF_LP_MODE_DISABLE 0 +#define EMIF_LP_MODE_CLOCK_STOP 1 +#define EMIF_LP_MODE_SELF_REFRESH 2 +#define EMIF_LP_MODE_PWR_DN 4 + +/* Hardware capabilities */ +#define EMIF_HW_CAPS_LL_INTERFACE 0x00000001 + +/* + * EMIF IP Revisions + * EMIF4D - Used in OMAP4 + * EMIF4D5 - Used in OMAP5 + */ +#define EMIF_4D 1 +#define EMIF_4D5 2 + +/* + * PHY types + * ATTILAPHY - Used in OMAP4 + * INTELLIPHY - Used in OMAP5 + */ +#define EMIF_PHY_TYPE_ATTILAPHY 1 +#define EMIF_PHY_TYPE_INTELLIPHY 2 + +/* Custom config requests */ +#define EMIF_CUSTOM_CONFIG_LPMODE 0x00000001 +#define EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL 0x00000002 +#define EMIF_CUSTOM_CONFIG_EXTENDED_TEMP_PART 0x00000004 + +#ifndef __ASSEMBLY__ +/** + * struct ddr_device_info - All information about the DDR device except AC + * timing parameters + * @type: Device type (LPDDR2-S4, LPDDR2-S2 etc) + * @density: Device density + * @io_width: Bus width + * @cs1_used: Whether there is a DDR device attached to the second + * chip-select(CS1) of this EMIF instance + * @cal_resistors_per_cs: Whether there is one calibration resistor per + * chip-select or whether it's a single one for both + * @manufacturer: Manufacturer name string + */ +struct ddr_device_info { + u32 type; + u32 density; + u32 io_width; + u32 cs1_used; + u32 cal_resistors_per_cs; + char manufacturer[10]; +}; + +/** + * struct emif_custom_configs - Custom configuration parameters/policies + * passed from the platform layer + * @mask: Mask to indicate which configs are requested + * @lpmode: LPMODE to be used in PWR_MGMT_CTRL register + * @lpmode_timeout_performance: Timeout before LPMODE entry when higher + * performance is desired at the cost of power (typically + * at higher OPPs) + * @lpmode_timeout_power: Timeout before LPMODE entry when better power + * savings is desired and performance is not important + * (typically at lower loads indicated by lower OPPs) + * @lpmode_freq_threshold: The DDR frequency threshold to identify between + * the above two cases: + * timeout = (freq >= lpmode_freq_threshold) ? + * lpmode_timeout_performance : + * lpmode_timeout_power; + * @temp_alert_poll_interval_ms: LPDDR2 MR4 polling interval at nominal + * temperature(in milliseconds). When temperature is high + * polling is done 4 times as frequently. + */ +struct emif_custom_configs { + u32 mask; + u32 lpmode; + u32 lpmode_timeout_performance; + u32 lpmode_timeout_power; + u32 lpmode_freq_threshold; + u32 temp_alert_poll_interval_ms; +}; + +/** + * struct emif_platform_data - Platform data passed on EMIF platform + * device creation. Used by the driver. + * @hw_caps: Hw capabilities of the EMIF IP in the respective SoC + * @device_info: Device info structure containing information such + * as type, bus width, density etc + * @timings: Timings information from device datasheet passed + * as an array of 'struct lpddr2_timings'. Can be NULL + * if if default timings are ok + * @timings_arr_size: Size of the timings array. Depends on the number + * of different frequencies for which timings data + * is provided + * @min_tck: Minimum value of some timing parameters in terms + * of number of cycles. Can be NULL if default values + * are ok + * @custom_configs: Custom configurations requested by SoC or board + * code and the data for them. Can be NULL if default + * configurations done by the driver are ok. See + * documentation for 'struct emif_custom_configs' for + * more details + */ +struct emif_platform_data { + u32 hw_caps; + struct ddr_device_info *device_info; + const struct lpddr2_timings *timings; + u32 timings_arr_size; + const struct lpddr2_min_tck *min_tck; + struct emif_custom_configs *custom_configs; + u32 ip_rev; + u32 phy_type; +}; +#endif /* __ASSEMBLY__ */ + +#endif /* __LINUX_EMIF_H */ diff --git a/include/linux/platform_data/eth-netx.h b/include/linux/platform_data/eth-netx.h new file mode 100644 index 000000000..a39515972 --- /dev/null +++ b/include/linux/platform_data/eth-netx.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2005 Sascha Hauer , Pengutronix + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __ETH_NETX_H +#define __ETH_NETX_H + +struct netxeth_platform_data { + unsigned int xcno; /* number of xmac/xpec engine this eth uses */ +}; + +#endif diff --git a/include/linux/platform_data/fsa9480.h b/include/linux/platform_data/fsa9480.h new file mode 100644 index 000000000..72dddcb4b --- /dev/null +++ b/include/linux/platform_data/fsa9480.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2010 Samsung Electronics + * Minkyu Kang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _FSA9480_H_ +#define _FSA9480_H_ + +#define FSA9480_ATTACHED 1 +#define FSA9480_DETACHED 0 + +struct fsa9480_platform_data { + void (*cfg_gpio) (void); + void (*usb_cb) (u8 attached); + void (*uart_cb) (u8 attached); + void (*charger_cb) (u8 attached); + void (*jig_cb) (u8 attached); + void (*reset_cb) (void); + void (*usb_power) (u8 on); + int wakeup; +}; + +#endif /* _FSA9480_H_ */ diff --git a/include/linux/platform_data/g762.h b/include/linux/platform_data/g762.h new file mode 100644 index 000000000..d3c512837 --- /dev/null +++ b/include/linux/platform_data/g762.h @@ -0,0 +1,37 @@ +/* + * Platform data structure for g762 fan controller driver + * + * Copyright (C) 2013, Arnaud EBALARD + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef __LINUX_PLATFORM_DATA_G762_H__ +#define __LINUX_PLATFORM_DATA_G762_H__ + +/* + * Following structure can be used to set g762 driver platform specific data + * during board init. Note that passing a sparse structure is possible but + * will result in non-specified attributes to be set to default value, hence + * overloading those installed during boot (e.g. by u-boot). + */ + +struct g762_platform_data { + u32 fan_startv; + u32 fan_gear_mode; + u32 pwm_polarity; + u32 clk_freq; +}; + +#endif /* __LINUX_PLATFORM_DATA_G762_H__ */ diff --git a/include/linux/platform_data/gpio-ath79.h b/include/linux/platform_data/gpio-ath79.h new file mode 100644 index 000000000..88b0db7be --- /dev/null +++ b/include/linux/platform_data/gpio-ath79.h @@ -0,0 +1,19 @@ +/* + * Atheros AR7XXX/AR9XXX GPIO controller platform data + * + * Copyright (C) 2015 Alban Bedel + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_PLATFORM_DATA_GPIO_ATH79_H +#define __LINUX_PLATFORM_DATA_GPIO_ATH79_H + +struct ath79_gpio_platform_data { + unsigned ngpios; + bool oe_inverted; +}; + +#endif diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h new file mode 100644 index 000000000..57a5a35e0 --- /dev/null +++ b/include/linux/platform_data/gpio-davinci.h @@ -0,0 +1,61 @@ +/* + * DaVinci GPIO Platform Related Defines + * + * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __DAVINCI_GPIO_PLATFORM_H +#define __DAVINCI_GPIO_PLATFORM_H + +#include +#include + +#include + +#define MAX_REGS_BANKS 5 +#define MAX_INT_PER_BANK 32 + +struct davinci_gpio_platform_data { + u32 ngpio; + u32 gpio_unbanked; +}; + +struct davinci_gpio_irq_data { + void __iomem *regs; + struct davinci_gpio_controller *chip; + int bank_num; +}; + +struct davinci_gpio_controller { + struct gpio_chip chip; + struct irq_domain *irq_domain; + /* Serialize access to GPIO registers */ + spinlock_t lock; + void __iomem *regs[MAX_REGS_BANKS]; + int gpio_unbanked; + int irqs[MAX_INT_PER_BANK]; + unsigned int base; +}; + +/* + * basic gpio routines + */ +#define GPIO(X) (X) /* 0 <= X <= (DAVINCI_N_GPIO - 1) */ + +/* Convert GPIO signal to GPIO pin number */ +#define GPIO_TO_PIN(bank, gpio) (16 * (bank) + (gpio)) + +static inline u32 __gpio_mask(unsigned gpio) +{ + return 1 << (gpio % 32); +} +#endif diff --git a/include/linux/platform_data/gpio-dwapb.h b/include/linux/platform_data/gpio-dwapb.h new file mode 100644 index 000000000..419cfacb4 --- /dev/null +++ b/include/linux/platform_data/gpio-dwapb.h @@ -0,0 +1,32 @@ +/* + * Copyright(c) 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef GPIO_DW_APB_H +#define GPIO_DW_APB_H + +struct dwapb_port_property { + struct fwnode_handle *fwnode; + unsigned int idx; + unsigned int ngpio; + unsigned int gpio_base; + int irq[32]; + bool has_irq; + bool irq_shared; +}; + +struct dwapb_platform_data { + struct dwapb_port_property *properties; + unsigned int nports; +}; + +#endif diff --git a/include/linux/platform_data/gpio-htc-egpio.h b/include/linux/platform_data/gpio-htc-egpio.h new file mode 100644 index 000000000..9a3e78082 --- /dev/null +++ b/include/linux/platform_data/gpio-htc-egpio.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * HTC simple EGPIO irq and gpio extender + */ + +#ifndef __HTC_EGPIO_H__ +#define __HTC_EGPIO_H__ + +/* Descriptive values for all-in or all-out htc_egpio_chip descriptors. */ +#define HTC_EGPIO_OUTPUT (~0) +#define HTC_EGPIO_INPUT 0 + +/** + * struct htc_egpio_chip - descriptor to create gpio_chip for register range + * @reg_start: index of first register + * @gpio_base: gpio number of first pin in this register range + * @num_gpios: number of gpios in this register range, max BITS_PER_LONG + * (number of registers = DIV_ROUND_UP(num_gpios, reg_width)) + * @direction: bitfield, '0' = input, '1' = output, + */ +struct htc_egpio_chip { + int reg_start; + int gpio_base; + int num_gpios; + unsigned long direction; + unsigned long initial_values; +}; + +/** + * struct htc_egpio_platform_data - description provided by the arch + * @irq_base: beginning of available IRQs (eg, IRQ_BOARD_START) + * @num_irqs: number of irqs + * @reg_width: number of bits per register, either 8 or 16 bit + * @bus_width: alignment of the registers, either 16 or 32 bit + * @invert_acks: set if chip requires writing '0' to ack an irq, instead of '1' + * @ack_register: location of the irq/ack register + * @chip: pointer to array of htc_egpio_chip descriptors + * @num_chips: number of egpio chip descriptors + */ +struct htc_egpio_platform_data { + int bus_width; + int reg_width; + + int irq_base; + int num_irqs; + int invert_acks; + int ack_register; + + struct htc_egpio_chip *chip; + int num_chips; +}; + +/* Determine the wakeup irq, to be called during early resume */ +extern int htc_egpio_get_wakeup_irq(struct device *dev); + +#endif diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h new file mode 100644 index 000000000..861285569 --- /dev/null +++ b/include/linux/platform_data/gpio-omap.h @@ -0,0 +1,219 @@ +/* + * OMAP GPIO handling defines and functions + * + * Copyright (C) 2003-2005 Nokia Corporation + * + * Written by Juha Yrjölä + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#ifndef __ASM_ARCH_OMAP_GPIO_H +#define __ASM_ARCH_OMAP_GPIO_H + +#include +#include + +#define OMAP1_MPUIO_BASE 0xfffb5000 + +/* + * These are the omap15xx/16xx offsets. The omap7xx offset are + * OMAP_MPUIO_ / 2 offsets below. + */ +#define OMAP_MPUIO_INPUT_LATCH 0x00 +#define OMAP_MPUIO_OUTPUT 0x04 +#define OMAP_MPUIO_IO_CNTL 0x08 +#define OMAP_MPUIO_KBR_LATCH 0x10 +#define OMAP_MPUIO_KBC 0x14 +#define OMAP_MPUIO_GPIO_EVENT_MODE 0x18 +#define OMAP_MPUIO_GPIO_INT_EDGE 0x1c +#define OMAP_MPUIO_KBD_INT 0x20 +#define OMAP_MPUIO_GPIO_INT 0x24 +#define OMAP_MPUIO_KBD_MASKIT 0x28 +#define OMAP_MPUIO_GPIO_MASKIT 0x2c +#define OMAP_MPUIO_GPIO_DEBOUNCING 0x30 +#define OMAP_MPUIO_LATCH 0x34 + +#define OMAP34XX_NR_GPIOS 6 + +/* + * OMAP1510 GPIO registers + */ +#define OMAP1510_GPIO_DATA_INPUT 0x00 +#define OMAP1510_GPIO_DATA_OUTPUT 0x04 +#define OMAP1510_GPIO_DIR_CONTROL 0x08 +#define OMAP1510_GPIO_INT_CONTROL 0x0c +#define OMAP1510_GPIO_INT_MASK 0x10 +#define OMAP1510_GPIO_INT_STATUS 0x14 +#define OMAP1510_GPIO_PIN_CONTROL 0x18 + +#define OMAP1510_IH_GPIO_BASE 64 + +/* + * OMAP1610 specific GPIO registers + */ +#define OMAP1610_GPIO_REVISION 0x0000 +#define OMAP1610_GPIO_SYSCONFIG 0x0010 +#define OMAP1610_GPIO_SYSSTATUS 0x0014 +#define OMAP1610_GPIO_IRQSTATUS1 0x0018 +#define OMAP1610_GPIO_IRQENABLE1 0x001c +#define OMAP1610_GPIO_WAKEUPENABLE 0x0028 +#define OMAP1610_GPIO_DATAIN 0x002c +#define OMAP1610_GPIO_DATAOUT 0x0030 +#define OMAP1610_GPIO_DIRECTION 0x0034 +#define OMAP1610_GPIO_EDGE_CTRL1 0x0038 +#define OMAP1610_GPIO_EDGE_CTRL2 0x003c +#define OMAP1610_GPIO_CLEAR_IRQENABLE1 0x009c +#define OMAP1610_GPIO_CLEAR_WAKEUPENA 0x00a8 +#define OMAP1610_GPIO_CLEAR_DATAOUT 0x00b0 +#define OMAP1610_GPIO_SET_IRQENABLE1 0x00dc +#define OMAP1610_GPIO_SET_WAKEUPENA 0x00e8 +#define OMAP1610_GPIO_SET_DATAOUT 0x00f0 + +/* + * OMAP7XX specific GPIO registers + */ +#define OMAP7XX_GPIO_DATA_INPUT 0x00 +#define OMAP7XX_GPIO_DATA_OUTPUT 0x04 +#define OMAP7XX_GPIO_DIR_CONTROL 0x08 +#define OMAP7XX_GPIO_INT_CONTROL 0x0c +#define OMAP7XX_GPIO_INT_MASK 0x10 +#define OMAP7XX_GPIO_INT_STATUS 0x14 + +/* + * omap2+ specific GPIO registers + */ +#define OMAP24XX_GPIO_REVISION 0x0000 +#define OMAP24XX_GPIO_IRQSTATUS1 0x0018 +#define OMAP24XX_GPIO_IRQSTATUS2 0x0028 +#define OMAP24XX_GPIO_IRQENABLE2 0x002c +#define OMAP24XX_GPIO_IRQENABLE1 0x001c +#define OMAP24XX_GPIO_WAKE_EN 0x0020 +#define OMAP24XX_GPIO_CTRL 0x0030 +#define OMAP24XX_GPIO_OE 0x0034 +#define OMAP24XX_GPIO_DATAIN 0x0038 +#define OMAP24XX_GPIO_DATAOUT 0x003c +#define OMAP24XX_GPIO_LEVELDETECT0 0x0040 +#define OMAP24XX_GPIO_LEVELDETECT1 0x0044 +#define OMAP24XX_GPIO_RISINGDETECT 0x0048 +#define OMAP24XX_GPIO_FALLINGDETECT 0x004c +#define OMAP24XX_GPIO_DEBOUNCE_EN 0x0050 +#define OMAP24XX_GPIO_DEBOUNCE_VAL 0x0054 +#define OMAP24XX_GPIO_CLEARIRQENABLE1 0x0060 +#define OMAP24XX_GPIO_SETIRQENABLE1 0x0064 +#define OMAP24XX_GPIO_CLEARWKUENA 0x0080 +#define OMAP24XX_GPIO_SETWKUENA 0x0084 +#define OMAP24XX_GPIO_CLEARDATAOUT 0x0090 +#define OMAP24XX_GPIO_SETDATAOUT 0x0094 + +#define OMAP4_GPIO_REVISION 0x0000 +#define OMAP4_GPIO_EOI 0x0020 +#define OMAP4_GPIO_IRQSTATUSRAW0 0x0024 +#define OMAP4_GPIO_IRQSTATUSRAW1 0x0028 +#define OMAP4_GPIO_IRQSTATUS0 0x002c +#define OMAP4_GPIO_IRQSTATUS1 0x0030 +#define OMAP4_GPIO_IRQSTATUSSET0 0x0034 +#define OMAP4_GPIO_IRQSTATUSSET1 0x0038 +#define OMAP4_GPIO_IRQSTATUSCLR0 0x003c +#define OMAP4_GPIO_IRQSTATUSCLR1 0x0040 +#define OMAP4_GPIO_IRQWAKEN0 0x0044 +#define OMAP4_GPIO_IRQWAKEN1 0x0048 +#define OMAP4_GPIO_IRQENABLE1 0x011c +#define OMAP4_GPIO_WAKE_EN 0x0120 +#define OMAP4_GPIO_IRQSTATUS2 0x0128 +#define OMAP4_GPIO_IRQENABLE2 0x012c +#define OMAP4_GPIO_CTRL 0x0130 +#define OMAP4_GPIO_OE 0x0134 +#define OMAP4_GPIO_DATAIN 0x0138 +#define OMAP4_GPIO_DATAOUT 0x013c +#define OMAP4_GPIO_LEVELDETECT0 0x0140 +#define OMAP4_GPIO_LEVELDETECT1 0x0144 +#define OMAP4_GPIO_RISINGDETECT 0x0148 +#define OMAP4_GPIO_FALLINGDETECT 0x014c +#define OMAP4_GPIO_DEBOUNCENABLE 0x0150 +#define OMAP4_GPIO_DEBOUNCINGTIME 0x0154 +#define OMAP4_GPIO_CLEARIRQENABLE1 0x0160 +#define OMAP4_GPIO_SETIRQENABLE1 0x0164 +#define OMAP4_GPIO_CLEARWKUENA 0x0180 +#define OMAP4_GPIO_SETWKUENA 0x0184 +#define OMAP4_GPIO_CLEARDATAOUT 0x0190 +#define OMAP4_GPIO_SETDATAOUT 0x0194 + +#define OMAP_MAX_GPIO_LINES 192 + +#define OMAP_MPUIO(nr) (OMAP_MAX_GPIO_LINES + (nr)) +#define OMAP_GPIO_IS_MPUIO(nr) ((nr) >= OMAP_MAX_GPIO_LINES) + +struct omap_gpio_reg_offs { + u16 revision; + u16 direction; + u16 datain; + u16 dataout; + u16 set_dataout; + u16 clr_dataout; + u16 irqstatus; + u16 irqstatus2; + u16 irqstatus_raw0; + u16 irqstatus_raw1; + u16 irqenable; + u16 irqenable2; + u16 set_irqenable; + u16 clr_irqenable; + u16 debounce; + u16 debounce_en; + u16 ctrl; + u16 wkup_en; + u16 leveldetect0; + u16 leveldetect1; + u16 risingdetect; + u16 fallingdetect; + u16 irqctrl; + u16 edgectrl1; + u16 edgectrl2; + u16 pinctrl; + + bool irqenable_inv; +}; + +struct omap_gpio_platform_data { + int bank_type; + int bank_width; /* GPIO bank width */ + int bank_stride; /* Only needed for omap1 MPUIO */ + bool dbck_flag; /* dbck required or not - True for OMAP3&4 */ + bool loses_context; /* whether the bank would ever lose context */ + bool is_mpuio; /* whether the bank is of type MPUIO */ + u32 non_wakeup_gpios; + + struct omap_gpio_reg_offs *regs; + + /* Return context loss count due to PM states changing */ + int (*get_context_loss_count)(struct device *dev); +}; + +#if IS_BUILTIN(CONFIG_GPIO_OMAP) +extern void omap2_gpio_prepare_for_idle(int off_mode); +extern void omap2_gpio_resume_after_idle(void); +#else +static inline void omap2_gpio_prepare_for_idle(int off_mode) +{ +} + +static inline void omap2_gpio_resume_after_idle(void) +{ +} +#endif + +#endif diff --git a/include/linux/platform_data/gpio-ts5500.h b/include/linux/platform_data/gpio-ts5500.h new file mode 100644 index 000000000..b10d11c9b --- /dev/null +++ b/include/linux/platform_data/gpio-ts5500.h @@ -0,0 +1,27 @@ +/* + * GPIO (DIO) header for Technologic Systems TS-5500 + * + * Copyright (c) 2012 Savoir-faire Linux Inc. + * Vivien Didelot + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _PDATA_GPIO_TS5500_H +#define _PDATA_GPIO_TS5500_H + +/** + * struct ts5500_dio_platform_data - TS-5500 pin block configuration + * @base: The GPIO base number to use. + * @strap: The only pin connected to an interrupt in a block is input-only. + * If you need a bidirectional line which can trigger an IRQ, you + * may strap it with an in/out pin. This flag indicates this case. + */ +struct ts5500_dio_platform_data { + int base; + bool strap; +}; + +#endif /* _PDATA_GPIO_TS5500_H */ diff --git a/include/linux/platform_data/gpio_backlight.h b/include/linux/platform_data/gpio_backlight.h new file mode 100644 index 000000000..683d90453 --- /dev/null +++ b/include/linux/platform_data/gpio_backlight.h @@ -0,0 +1,20 @@ +/* + * gpio_backlight.h - Simple GPIO-controlled backlight + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __GPIO_BACKLIGHT_H__ +#define __GPIO_BACKLIGHT_H__ + +struct device; + +struct gpio_backlight_platform_data { + struct device *fbdev; + int gpio; + int def_value; + const char *name; +}; + +#endif diff --git a/include/linux/platform_data/gpmc-omap.h b/include/linux/platform_data/gpmc-omap.h new file mode 100644 index 000000000..67ccdb0e1 --- /dev/null +++ b/include/linux/platform_data/gpmc-omap.h @@ -0,0 +1,172 @@ +/* + * OMAP GPMC Platform data + * + * Copyright (C) 2014 Texas Instruments, Inc. - http://www.ti.com + * Roger Quadros + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +#ifndef _GPMC_OMAP_H_ +#define _GPMC_OMAP_H_ + +/* Maximum Number of Chip Selects */ +#define GPMC_CS_NUM 8 + +/* bool type time settings */ +struct gpmc_bool_timings { + bool cycle2cyclediffcsen; + bool cycle2cyclesamecsen; + bool we_extra_delay; + bool oe_extra_delay; + bool adv_extra_delay; + bool cs_extra_delay; + bool time_para_granularity; +}; + +/* + * Note that all values in this struct are in nanoseconds except sync_clk + * (which is in picoseconds), while the register values are in gpmc_fck cycles. + */ +struct gpmc_timings { + /* Minimum clock period for synchronous mode (in picoseconds) */ + u32 sync_clk; + + /* Chip-select signal timings corresponding to GPMC_CS_CONFIG2 */ + u32 cs_on; /* Assertion time */ + u32 cs_rd_off; /* Read deassertion time */ + u32 cs_wr_off; /* Write deassertion time */ + + /* ADV signal timings corresponding to GPMC_CONFIG3 */ + u32 adv_on; /* Assertion time */ + u32 adv_rd_off; /* Read deassertion time */ + u32 adv_wr_off; /* Write deassertion time */ + u32 adv_aad_mux_on; /* ADV assertion time for AAD */ + u32 adv_aad_mux_rd_off; /* ADV read deassertion time for AAD */ + u32 adv_aad_mux_wr_off; /* ADV write deassertion time for AAD */ + + /* WE signals timings corresponding to GPMC_CONFIG4 */ + u32 we_on; /* WE assertion time */ + u32 we_off; /* WE deassertion time */ + + /* OE signals timings corresponding to GPMC_CONFIG4 */ + u32 oe_on; /* OE assertion time */ + u32 oe_off; /* OE deassertion time */ + u32 oe_aad_mux_on; /* OE assertion time for AAD */ + u32 oe_aad_mux_off; /* OE deassertion time for AAD */ + + /* Access time and cycle time timings corresponding to GPMC_CONFIG5 */ + u32 page_burst_access; /* Multiple access word delay */ + u32 access; /* Start-cycle to first data valid delay */ + u32 rd_cycle; /* Total read cycle time */ + u32 wr_cycle; /* Total write cycle time */ + + u32 bus_turnaround; + u32 cycle2cycle_delay; + + u32 wait_monitoring; + u32 clk_activation; + + /* The following are only on OMAP3430 */ + u32 wr_access; /* WRACCESSTIME */ + u32 wr_data_mux_bus; /* WRDATAONADMUXBUS */ + + struct gpmc_bool_timings bool_timings; +}; + +/* Device timings in picoseconds */ +struct gpmc_device_timings { + u32 t_ceasu; /* address setup to CS valid */ + u32 t_avdasu; /* address setup to ADV valid */ + /* XXX: try to combine t_avdp_r & t_avdp_w. Issue is + * of tusb using these timings even for sync whilst + * ideally for adv_rd/(wr)_off it should have considered + * t_avdh instead. This indirectly necessitates r/w + * variations of t_avdp as it is possible to have one + * sync & other async + */ + u32 t_avdp_r; /* ADV low time (what about t_cer ?) */ + u32 t_avdp_w; + u32 t_aavdh; /* address hold time */ + u32 t_oeasu; /* address setup to OE valid */ + u32 t_aa; /* access time from ADV assertion */ + u32 t_iaa; /* initial access time */ + u32 t_oe; /* access time from OE assertion */ + u32 t_ce; /* access time from CS asertion */ + u32 t_rd_cycle; /* read cycle time */ + u32 t_cez_r; /* read CS deassertion to high Z */ + u32 t_cez_w; /* write CS deassertion to high Z */ + u32 t_oez; /* OE deassertion to high Z */ + u32 t_weasu; /* address setup to WE valid */ + u32 t_wpl; /* write assertion time */ + u32 t_wph; /* write deassertion time */ + u32 t_wr_cycle; /* write cycle time */ + + u32 clk; + u32 t_bacc; /* burst access valid clock to output delay */ + u32 t_ces; /* CS setup time to clk */ + u32 t_avds; /* ADV setup time to clk */ + u32 t_avdh; /* ADV hold time from clk */ + u32 t_ach; /* address hold time from clk */ + u32 t_rdyo; /* clk to ready valid */ + + u32 t_ce_rdyz; /* XXX: description ?, or use t_cez instead */ + u32 t_ce_avd; /* CS on to ADV on delay */ + + /* XXX: check the possibility of combining + * cyc_aavhd_oe & cyc_aavdh_we + */ + u8 cyc_aavdh_oe;/* read address hold time in cycles */ + u8 cyc_aavdh_we;/* write address hold time in cycles */ + u8 cyc_oe; /* access time from OE assertion in cycles */ + u8 cyc_wpl; /* write deassertion time in cycles */ + u32 cyc_iaa; /* initial access time in cycles */ + + /* extra delays */ + bool ce_xdelay; + bool avd_xdelay; + bool oe_xdelay; + bool we_xdelay; +}; + +#define GPMC_BURST_4 4 /* 4 word burst */ +#define GPMC_BURST_8 8 /* 8 word burst */ +#define GPMC_BURST_16 16 /* 16 word burst */ +#define GPMC_DEVWIDTH_8BIT 1 /* 8-bit device width */ +#define GPMC_DEVWIDTH_16BIT 2 /* 16-bit device width */ +#define GPMC_MUX_AAD 1 /* Addr-Addr-Data multiplex */ +#define GPMC_MUX_AD 2 /* Addr-Data multiplex */ + +struct gpmc_settings { + bool burst_wrap; /* enables wrap bursting */ + bool burst_read; /* enables read page/burst mode */ + bool burst_write; /* enables write page/burst mode */ + bool device_nand; /* device is NAND */ + bool sync_read; /* enables synchronous reads */ + bool sync_write; /* enables synchronous writes */ + bool wait_on_read; /* monitor wait on reads */ + bool wait_on_write; /* monitor wait on writes */ + u32 burst_len; /* page/burst length */ + u32 device_width; /* device bus width (8 or 16 bit) */ + u32 mux_add_data; /* multiplex address & data */ + u32 wait_pin; /* wait-pin to be used */ +}; + +/* Data for each chip select */ +struct gpmc_omap_cs_data { + bool valid; /* data is valid */ + bool is_nand; /* device within this CS is NAND */ + struct gpmc_settings *settings; + struct gpmc_device_timings *device_timings; + struct gpmc_timings *gpmc_timings; + struct platform_device *pdev; /* device within this CS region */ + unsigned int pdata_size; +}; + +struct gpmc_omap_platform_data { + struct gpmc_omap_cs_data cs[GPMC_CS_NUM]; +}; + +#endif /* _GPMC_OMAP_H */ diff --git a/include/linux/platform_data/hsmmc-omap.h b/include/linux/platform_data/hsmmc-omap.h new file mode 100644 index 000000000..73d9098ad --- /dev/null +++ b/include/linux/platform_data/hsmmc-omap.h @@ -0,0 +1,81 @@ +/* + * MMC definitions for OMAP2 + * + * Copyright (C) 2006 Nokia Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* + * struct omap_hsmmc_dev_attr.flags possibilities + * + * OMAP_HSMMC_SUPPORTS_DUAL_VOLT: Some HSMMC controller instances can + * operate with either 1.8Vdc or 3.0Vdc card voltages; this flag + * should be set if this is the case. See for example Section 22.5.3 + * "MMC/SD/SDIO1 Bus Voltage Selection" of the OMAP34xx Multimedia + * Device Silicon Revision 3.1.x Revision ZR (July 2011) (SWPU223R). + * + * OMAP_HSMMC_BROKEN_MULTIBLOCK_READ: Multiple-block read transfers + * don't work correctly on some MMC controller instances on some + * OMAP3 SoCs; this flag should be set if this is the case. See + * for example Advisory 2.1.1.128 "MMC: Multiple Block Read + * Operation Issue" in _OMAP3530/3525/3515/3503 Silicon Errata_ + * Revision F (October 2010) (SPRZ278F). + */ +#define OMAP_HSMMC_SUPPORTS_DUAL_VOLT BIT(0) +#define OMAP_HSMMC_BROKEN_MULTIBLOCK_READ BIT(1) +#define OMAP_HSMMC_SWAKEUP_MISSING BIT(2) + +struct omap_hsmmc_dev_attr { + u8 flags; +}; + +struct mmc_card; + +struct omap_hsmmc_platform_data { + /* back-link to device */ + struct device *dev; + + /* set if your board has components or wiring that limits the + * maximum frequency on the MMC bus */ + unsigned int max_freq; + + /* Integrating attributes from the omap_hwmod layer */ + u8 controller_flags; + + /* Register offset deviation */ + u16 reg_offset; + + /* + * 4/8 wires and any additional host capabilities + * need to OR'd all capabilities (ref. linux/mmc/host.h) + */ + u32 caps; /* Used for the MMC driver on 2430 and later */ + u32 pm_caps; /* PM capabilities of the mmc */ + + /* nonremovable e.g. eMMC */ + unsigned nonremovable:1; + + /* eMMC does not handle power off when not in sleep state */ + unsigned no_regulator_off_init:1; + + /* we can put the features above into this variable */ +#define HSMMC_HAS_PBIAS (1 << 0) +#define HSMMC_HAS_UPDATED_RESET (1 << 1) +#define HSMMC_HAS_HSPE_SUPPORT (1 << 2) + unsigned features; + + /* string specifying a particular variant of hardware */ + char *version; + + int gpio_cd; /* gpio (card detect) */ + int gpio_cod; /* gpio (cover detect) */ + int gpio_wp; /* gpio (write protect) */ + /* if we have special card, init it using this callback */ + void (*init_card)(struct mmc_card *card); + + const char *name; + u32 ocr_mask; +}; diff --git a/include/linux/platform_data/hwmon-s3c.h b/include/linux/platform_data/hwmon-s3c.h new file mode 100644 index 000000000..0e3cce130 --- /dev/null +++ b/include/linux/platform_data/hwmon-s3c.h @@ -0,0 +1,49 @@ +/* + * Copyright 2005 Simtec Electronics + * Ben Dooks + * http://armlinux.simtec.co.uk/ + * + * S3C - HWMon interface for ADC + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#ifndef __HWMON_S3C_H__ +#define __HWMON_S3C_H__ + +/** + * s3c_hwmon_chcfg - channel configuration + * @name: The name to give this channel. + * @mult: Multiply the ADC value read by this. + * @div: Divide the value from the ADC by this. + * + * The value read from the ADC is converted to a value that + * hwmon expects (mV) by result = (value_read * @mult) / @div. + */ +struct s3c_hwmon_chcfg { + const char *name; + unsigned int mult; + unsigned int div; +}; + +/** + * s3c_hwmon_pdata - HWMON platform data + * @in: One configuration for each possible channel used. + */ +struct s3c_hwmon_pdata { + struct s3c_hwmon_chcfg *in[8]; +}; + +/** + * s3c_hwmon_set_platdata - Set platform data for S3C HWMON device + * @pd: Platform data to register to device. + * + * Register the given platform data for use with the S3C HWMON device. + * The call will copy the platform data, so the board definitions can + * make the structure itself __initdata. + */ +extern void __init s3c_hwmon_set_platdata(struct s3c_hwmon_pdata *pd); + +#endif /* __HWMON_S3C_H__ */ diff --git a/include/linux/platform_data/i2c-cbus-gpio.h b/include/linux/platform_data/i2c-cbus-gpio.h new file mode 100644 index 000000000..6faa992a9 --- /dev/null +++ b/include/linux/platform_data/i2c-cbus-gpio.h @@ -0,0 +1,27 @@ +/* + * i2c-cbus-gpio.h - CBUS I2C platform_data definition + * + * Copyright (C) 2004-2009 Nokia Corporation + * + * Written by Felipe Balbi and Aaro Koskinen. + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of this + * archive for more details. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __INCLUDE_LINUX_I2C_CBUS_GPIO_H +#define __INCLUDE_LINUX_I2C_CBUS_GPIO_H + +struct i2c_cbus_platform_data { + int dat_gpio; + int clk_gpio; + int sel_gpio; +}; + +#endif /* __INCLUDE_LINUX_I2C_CBUS_GPIO_H */ diff --git a/include/linux/platform_data/i2c-davinci.h b/include/linux/platform_data/i2c-davinci.h new file mode 100644 index 000000000..98967df07 --- /dev/null +++ b/include/linux/platform_data/i2c-davinci.h @@ -0,0 +1,26 @@ +/* + * DaVinci I2C controller platform_device info + * + * Author: Vladimir Barinov, MontaVista Software, Inc. + * + * 2007 (c) MontaVista Software, Inc. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. +*/ + +#ifndef __ASM_ARCH_I2C_H +#define __ASM_ARCH_I2C_H + +/* All frequencies are expressed in kHz */ +struct davinci_i2c_platform_data { + unsigned int bus_freq; /* standard bus frequency (kHz) */ + unsigned int bus_delay; /* post-transaction delay (usec) */ + bool gpio_recovery; /* Use GPIO recovery method */ + bool has_pfunc; /* Chip has a ICPFUNC register */ +}; + +/* for board setup code */ +void davinci_init_i2c(struct davinci_i2c_platform_data *); + +#endif /* __ASM_ARCH_I2C_H */ diff --git a/include/linux/platform_data/i2c-designware.h b/include/linux/platform_data/i2c-designware.h new file mode 100644 index 000000000..7a61fb27c --- /dev/null +++ b/include/linux/platform_data/i2c-designware.h @@ -0,0 +1,21 @@ +/* + * Copyright(c) 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef I2C_DESIGNWARE_H +#define I2C_DESIGNWARE_H + +struct dw_i2c_platform_data { + unsigned int i2c_scl_freq; +}; + +#endif diff --git a/include/linux/platform_data/i2c-gpio.h b/include/linux/platform_data/i2c-gpio.h new file mode 100644 index 000000000..352c1426f --- /dev/null +++ b/include/linux/platform_data/i2c-gpio.h @@ -0,0 +1,34 @@ +/* + * i2c-gpio interface to platform code + * + * Copyright (C) 2007 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _LINUX_I2C_GPIO_H +#define _LINUX_I2C_GPIO_H + +/** + * struct i2c_gpio_platform_data - Platform-dependent data for i2c-gpio + * @udelay: signal toggle delay. SCL frequency is (500 / udelay) kHz + * @timeout: clock stretching timeout in jiffies. If the slave keeps + * SCL low for longer than this, the transfer will time out. + * @sda_is_open_drain: SDA is configured as open drain, i.e. the pin + * isn't actively driven high when setting the output value high. + * gpio_get_value() must return the actual pin state even if the + * pin is configured as an output. + * @scl_is_open_drain: SCL is set up as open drain. Same requirements + * as for sda_is_open_drain apply. + * @scl_is_output_only: SCL output drivers cannot be turned off. + */ +struct i2c_gpio_platform_data { + int udelay; + int timeout; + unsigned int sda_is_open_drain:1; + unsigned int scl_is_open_drain:1; + unsigned int scl_is_output_only:1; +}; + +#endif /* _LINUX_I2C_GPIO_H */ diff --git a/include/linux/platform_data/i2c-hid.h b/include/linux/platform_data/i2c-hid.h new file mode 100644 index 000000000..c628bb5e1 --- /dev/null +++ b/include/linux/platform_data/i2c-hid.h @@ -0,0 +1,41 @@ +/* + * HID over I2C protocol implementation + * + * Copyright (c) 2012 Benjamin Tissoires + * Copyright (c) 2012 Ecole Nationale de l'Aviation Civile, France + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive for + * more details. + */ + +#ifndef __LINUX_I2C_HID_H +#define __LINUX_I2C_HID_H + +#include +#include + +/** + * struct i2chid_platform_data - used by hid over i2c implementation. + * @hid_descriptor_address: i2c register where the HID descriptor is stored. + * @supplies: regulators for powering on the device. + * @post_power_delay_ms: delay after powering on before device is usable. + * + * Note that it is the responsibility of the platform driver (or the acpi 5.0 + * driver, or the flattened device tree) to setup the irq related to the gpio in + * the struct i2c_board_info. + * The platform driver should also setup the gpio according to the device: + * + * A typical example is the following: + * irq = gpio_to_irq(intr_gpio); + * hkdk4412_i2c_devs5[0].irq = irq; // store the irq in i2c_board_info + * gpio_request(intr_gpio, "elan-irq"); + * s3c_gpio_setpull(intr_gpio, S3C_GPIO_PULL_UP); + */ +struct i2c_hid_platform_data { + u16 hid_descriptor_address; + struct regulator_bulk_data supplies[2]; + int post_power_delay_ms; +}; + +#endif /* __LINUX_I2C_HID_H */ diff --git a/include/linux/platform_data/i2c-imx.h b/include/linux/platform_data/i2c-imx.h new file mode 100644 index 000000000..8289d915e --- /dev/null +++ b/include/linux/platform_data/i2c-imx.h @@ -0,0 +1,21 @@ +/* + * i2c.h - i.MX I2C driver header file + * + * Copyright (c) 2008, Darius Augulis + * + * This file is released under the GPLv2 + */ + +#ifndef __ASM_ARCH_I2C_H_ +#define __ASM_ARCH_I2C_H_ + +/** + * struct imxi2c_platform_data - structure of platform data for MXC I2C driver + * @bitrate: Bus speed measured in Hz + * + **/ +struct imxi2c_platform_data { + u32 bitrate; +}; + +#endif /* __ASM_ARCH_I2C_H_ */ diff --git a/include/linux/platform_data/i2c-mux-gpio.h b/include/linux/platform_data/i2c-mux-gpio.h new file mode 100644 index 000000000..440610820 --- /dev/null +++ b/include/linux/platform_data/i2c-mux-gpio.h @@ -0,0 +1,43 @@ +/* + * i2c-mux-gpio interface to platform code + * + * Peter Korsgaard + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _LINUX_I2C_MUX_GPIO_H +#define _LINUX_I2C_MUX_GPIO_H + +/* MUX has no specific idle mode */ +#define I2C_MUX_GPIO_NO_IDLE ((unsigned)-1) + +/** + * struct i2c_mux_gpio_platform_data - Platform-dependent data for i2c-mux-gpio + * @parent: Parent I2C bus adapter number + * @base_nr: Base I2C bus number to number adapters from or zero for dynamic + * @values: Array of bitmasks of GPIO settings (low/high) for each + * position + * @n_values: Number of multiplexer positions (busses to instantiate) + * @classes: Optional I2C auto-detection classes + * @gpio_chip: Optional GPIO chip name; if set, GPIO pin numbers are given + * relative to the base GPIO number of that chip + * @gpios: Array of GPIO numbers used to control MUX + * @n_gpios: Number of GPIOs used to control MUX + * @idle: Bitmask to write to MUX when idle or GPIO_I2CMUX_NO_IDLE if not used + */ +struct i2c_mux_gpio_platform_data { + int parent; + int base_nr; + const unsigned *values; + int n_values; + const unsigned *classes; + char *gpio_chip; + const unsigned *gpios; + int n_gpios; + unsigned idle; +}; + +#endif /* _LINUX_I2C_MUX_GPIO_H */ diff --git a/include/linux/platform_data/i2c-mux-reg.h b/include/linux/platform_data/i2c-mux-reg.h new file mode 100644 index 000000000..c68712aad --- /dev/null +++ b/include/linux/platform_data/i2c-mux-reg.h @@ -0,0 +1,44 @@ +/* + * I2C multiplexer using a single register + * + * Copyright 2015 Freescale Semiconductor + * York Sun + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __LINUX_PLATFORM_DATA_I2C_MUX_REG_H +#define __LINUX_PLATFORM_DATA_I2C_MUX_REG_H + +/** + * struct i2c_mux_reg_platform_data - Platform-dependent data for i2c-mux-reg + * @parent: Parent I2C bus adapter number + * @base_nr: Base I2C bus number to number adapters from or zero for dynamic + * @values: Array of value for each channel + * @n_values: Number of multiplexer channels + * @little_endian: Indicating if the register is in little endian + * @write_only: Reading the register is not allowed by hardware + * @classes: Optional I2C auto-detection classes + * @idle: Value to write to mux when idle + * @idle_in_use: indicate if idle value is in use + * @reg: Virtual address of the register to switch channel + * @reg_size: register size in bytes + */ +struct i2c_mux_reg_platform_data { + int parent; + int base_nr; + const unsigned int *values; + int n_values; + bool little_endian; + bool write_only; + const unsigned int *classes; + u32 idle; + bool idle_in_use; + void __iomem *reg; + resource_size_t reg_size; +}; + +#endif /* __LINUX_PLATFORM_DATA_I2C_MUX_REG_H */ diff --git a/include/linux/platform_data/i2c-ocores.h b/include/linux/platform_data/i2c-ocores.h new file mode 100644 index 000000000..113d6b12f --- /dev/null +++ b/include/linux/platform_data/i2c-ocores.h @@ -0,0 +1,23 @@ +/* + * i2c-ocores.h - definitions for the i2c-ocores interface + * + * Peter Korsgaard + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef _LINUX_I2C_OCORES_H +#define _LINUX_I2C_OCORES_H + +struct ocores_i2c_platform_data { + u32 reg_shift; /* register offset shift value */ + u32 reg_io_width; /* register io read/write width */ + u32 clock_khz; /* input clock in kHz */ + bool big_endian; /* registers are big endian */ + u8 num_devices; /* number of devices in the devices list */ + struct i2c_board_info const *devices; /* devices connected to the bus */ +}; + +#endif /* _LINUX_I2C_OCORES_H */ diff --git a/include/linux/platform_data/i2c-omap.h b/include/linux/platform_data/i2c-omap.h new file mode 100644 index 000000000..3444265ee --- /dev/null +++ b/include/linux/platform_data/i2c-omap.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __I2C_OMAP_H__ +#define __I2C_OMAP_H__ + +#include + +/* + * Version 2 of the I2C peripheral unit has a different register + * layout and extra registers. The ID register in the V2 peripheral + * unit on the OMAP4430 reports the same ID as the V1 peripheral + * unit on the OMAP3530, so we must inform the driver which IP + * version we know it is running on from platform / cpu-specific + * code using these constants in the hwmod class definition. + */ + +#define OMAP_I2C_IP_VERSION_1 1 +#define OMAP_I2C_IP_VERSION_2 2 + +/* struct omap_i2c_bus_platform_data .flags meanings */ + +#define OMAP_I2C_FLAG_NO_FIFO BIT(0) +#define OMAP_I2C_FLAG_SIMPLE_CLOCK BIT(1) +#define OMAP_I2C_FLAG_16BIT_DATA_REG BIT(2) +#define OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK BIT(5) +#define OMAP_I2C_FLAG_FORCE_19200_INT_CLK BIT(6) +/* how the CPU address bus must be translated for I2C unit access */ +#define OMAP_I2C_FLAG_BUS_SHIFT_NONE 0 +#define OMAP_I2C_FLAG_BUS_SHIFT_1 BIT(7) +#define OMAP_I2C_FLAG_BUS_SHIFT_2 BIT(8) +#define OMAP_I2C_FLAG_BUS_SHIFT__SHIFT 7 + +struct omap_i2c_bus_platform_data { + u32 clkrate; + u32 rev; + u32 flags; + void (*set_mpu_wkup_lat)(struct device *dev, long set); +}; + +#endif diff --git a/include/linux/platform_data/i2c-pca-platform.h b/include/linux/platform_data/i2c-pca-platform.h new file mode 100644 index 000000000..c37329432 --- /dev/null +++ b/include/linux/platform_data/i2c-pca-platform.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef I2C_PCA9564_PLATFORM_H +#define I2C_PCA9564_PLATFORM_H + +struct i2c_pca9564_pf_platform_data { + int i2c_clock_speed; /* values are defined in linux/i2c-algo-pca.h */ + int timeout; /* timeout in jiffies */ +}; + +#endif /* I2C_PCA9564_PLATFORM_H */ diff --git a/include/linux/platform_data/i2c-pxa.h b/include/linux/platform_data/i2c-pxa.h new file mode 100644 index 000000000..5236f216d --- /dev/null +++ b/include/linux/platform_data/i2c-pxa.h @@ -0,0 +1,74 @@ +/* + * i2c_pxa.h + * + * Copyright (C) 2002 Intrinsyc Software Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#ifndef _I2C_PXA_H_ +#define _I2C_PXA_H_ + +#if 0 +#define DEF_TIMEOUT 3 +#else +/* need a longer timeout if we're dealing with the fact we may well be + * looking at a multi-master environment +*/ +#define DEF_TIMEOUT 32 +#endif + +#define BUS_ERROR (-EREMOTEIO) +#define XFER_NAKED (-ECONNREFUSED) +#define I2C_RETRY (-2000) /* an error has occurred retry transmit */ + +/* ICR initialize bit values +* +* 15. FM 0 (100 Khz operation) +* 14. UR 0 (No unit reset) +* 13. SADIE 0 (Disables the unit from interrupting on slave addresses +* matching its slave address) +* 12. ALDIE 0 (Disables the unit from interrupt when it loses arbitration +* in master mode) +* 11. SSDIE 0 (Disables interrupts from a slave stop detected, in slave mode) +* 10. BEIE 1 (Enable interrupts from detected bus errors, no ACK sent) +* 9. IRFIE 1 (Enable interrupts from full buffer received) +* 8. ITEIE 1 (Enables the I2C unit to interrupt when transmit buffer empty) +* 7. GCD 1 (Disables i2c unit response to general call messages as a slave) +* 6. IUE 0 (Disable unit until we change settings) +* 5. SCLE 1 (Enables the i2c clock output for master mode (drives SCL) +* 4. MA 0 (Only send stop with the ICR stop bit) +* 3. TB 0 (We are not transmitting a byte initially) +* 2. ACKNAK 0 (Send an ACK after the unit receives a byte) +* 1. STOP 0 (Do not send a STOP) +* 0. START 0 (Do not send a START) +* +*/ +#define I2C_ICR_INIT (ICR_BEIE | ICR_IRFIE | ICR_ITEIE | ICR_GCD | ICR_SCLE) + +/* I2C status register init values + * + * 10. BED 1 (Clear bus error detected) + * 9. SAD 1 (Clear slave address detected) + * 7. IRF 1 (Clear IDBR Receive Full) + * 6. ITE 1 (Clear IDBR Transmit Empty) + * 5. ALD 1 (Clear Arbitration Loss Detected) + * 4. SSD 1 (Clear Slave Stop Detected) + */ +#define I2C_ISR_INIT 0x7FF /* status register init */ + +struct i2c_slave_client; + +struct i2c_pxa_platform_data { + unsigned int slave_addr; + struct i2c_slave_client *slave; + unsigned int class; + unsigned int use_pio :1; + unsigned int fast_mode :1; + unsigned int high_mode:1; + unsigned char master_code; + unsigned long rate; +}; +#endif diff --git a/include/linux/platform_data/i2c-s3c2410.h b/include/linux/platform_data/i2c-s3c2410.h new file mode 100644 index 000000000..05af66b84 --- /dev/null +++ b/include/linux/platform_data/i2c-s3c2410.h @@ -0,0 +1,78 @@ +/* + * Copyright 2004-2009 Simtec Electronics + * Ben Dooks + * + * S3C - I2C Controller platform_device info + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#ifndef __I2C_S3C2410_H +#define __I2C_S3C2410_H __FILE__ + +#define S3C_IICFLG_FILTER (1<<0) /* enable s3c2440 filter */ + +struct platform_device; + +/** + * struct s3c2410_platform_i2c - Platform data for s3c I2C. + * @bus_num: The bus number to use (if possible). + * @flags: Any flags for the I2C bus (E.g. S3C_IICFLK_FILTER). + * @slave_addr: The I2C address for the slave device (if enabled). + * @frequency: The desired frequency in Hz of the bus. This is + * guaranteed to not be exceeded. If the caller does + * not care, use zero and the driver will select a + * useful default. + * @sda_delay: The delay (in ns) applied to SDA edges. + * @cfg_gpio: A callback to configure the pins for I2C operation. + */ +struct s3c2410_platform_i2c { + int bus_num; + unsigned int flags; + unsigned int slave_addr; + unsigned long frequency; + unsigned int sda_delay; + + void (*cfg_gpio)(struct platform_device *dev); +}; + +/** + * s3c_i2c0_set_platdata - set platform data for i2c0 device + * @i2c: The platform data to set, or NULL for default data. + * + * Register the given platform data for use with the i2c0 device. This + * call copies the platform data, so the caller can use __initdata for + * their copy. + * + * This call will set cfg_gpio if is null to the default platform + * implementation. + * + * Any user of s3c_device_i2c0 should call this, even if it is with + * NULL to ensure that the device is given the default platform data + * as the driver will no longer carry defaults. + */ +extern void s3c_i2c0_set_platdata(struct s3c2410_platform_i2c *i2c); +extern void s3c_i2c1_set_platdata(struct s3c2410_platform_i2c *i2c); +extern void s3c_i2c2_set_platdata(struct s3c2410_platform_i2c *i2c); +extern void s3c_i2c3_set_platdata(struct s3c2410_platform_i2c *i2c); +extern void s3c_i2c4_set_platdata(struct s3c2410_platform_i2c *i2c); +extern void s3c_i2c5_set_platdata(struct s3c2410_platform_i2c *i2c); +extern void s3c_i2c6_set_platdata(struct s3c2410_platform_i2c *i2c); +extern void s3c_i2c7_set_platdata(struct s3c2410_platform_i2c *i2c); +extern void s5p_i2c_hdmiphy_set_platdata(struct s3c2410_platform_i2c *i2c); + +/* defined by architecture to configure gpio */ +extern void s3c_i2c0_cfg_gpio(struct platform_device *dev); +extern void s3c_i2c1_cfg_gpio(struct platform_device *dev); +extern void s3c_i2c2_cfg_gpio(struct platform_device *dev); +extern void s3c_i2c3_cfg_gpio(struct platform_device *dev); +extern void s3c_i2c4_cfg_gpio(struct platform_device *dev); +extern void s3c_i2c5_cfg_gpio(struct platform_device *dev); +extern void s3c_i2c6_cfg_gpio(struct platform_device *dev); +extern void s3c_i2c7_cfg_gpio(struct platform_device *dev); + +extern struct s3c2410_platform_i2c default_i2c_data; + +#endif /* __I2C_S3C2410_H */ diff --git a/include/linux/platform_data/i2c-xiic.h b/include/linux/platform_data/i2c-xiic.h new file mode 100644 index 000000000..4f9f2256a --- /dev/null +++ b/include/linux/platform_data/i2c-xiic.h @@ -0,0 +1,43 @@ +/* + * i2c-xiic.h + * Copyright (c) 2009 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* Supports: + * Xilinx IIC + */ + +#ifndef _LINUX_I2C_XIIC_H +#define _LINUX_I2C_XIIC_H + +/** + * struct xiic_i2c_platform_data - Platform data of the Xilinx I2C driver + * @num_devices: Number of devices that shall be added when the driver + * is probed. + * @devices: The actuall devices to add. + * + * This purpose of this platform data struct is to be able to provide a number + * of devices that should be added to the I2C bus. The reason is that sometimes + * the I2C board info is not enough, a new PCI board can for instance be + * plugged into a standard PC, and the bus number might be unknown at + * early init time. + */ +struct xiic_i2c_platform_data { + u8 num_devices; + struct i2c_board_info const *devices; +}; + +#endif /* _LINUX_I2C_XIIC_H */ diff --git a/include/linux/platform_data/ina2xx.h b/include/linux/platform_data/ina2xx.h new file mode 100644 index 000000000..9f0aa1b48 --- /dev/null +++ b/include/linux/platform_data/ina2xx.h @@ -0,0 +1,19 @@ +/* + * Driver for Texas Instruments INA219, INA226 power monitor chips + * + * Copyright (C) 2012 Lothar Felten + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * For further information, see the Documentation/hwmon/ina2xx file. + */ + +/** + * struct ina2xx_platform_data - ina2xx info + * @shunt_uohms shunt resistance in microohms + */ +struct ina2xx_platform_data { + long shunt_uohms; +}; diff --git a/include/linux/platform_data/intel-mid_wdt.h b/include/linux/platform_data/intel-mid_wdt.h new file mode 100644 index 000000000..b98253466 --- /dev/null +++ b/include/linux/platform_data/intel-mid_wdt.h @@ -0,0 +1,22 @@ +/* + * intel-mid_wdt: generic Intel MID SCU watchdog driver + * + * Copyright (C) 2014 Intel Corporation. All rights reserved. + * Contact: David Cohen + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General + * Public License as published by the Free Software Foundation. + */ + +#ifndef __INTEL_MID_WDT_H__ +#define __INTEL_MID_WDT_H__ + +#include + +struct intel_mid_wdt_pdata { + int irq; + int (*probe)(struct platform_device *pdev); +}; + +#endif /*__INTEL_MID_WDT_H__*/ diff --git a/include/linux/platform_data/intel-spi.h b/include/linux/platform_data/intel-spi.h new file mode 100644 index 000000000..942b0c3f8 --- /dev/null +++ b/include/linux/platform_data/intel-spi.h @@ -0,0 +1,31 @@ +/* + * Intel PCH/PCU SPI flash driver. + * + * Copyright (C) 2016, Intel Corporation + * Author: Mika Westerberg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef INTEL_SPI_PDATA_H +#define INTEL_SPI_PDATA_H + +enum intel_spi_type { + INTEL_SPI_BYT = 1, + INTEL_SPI_LPT, + INTEL_SPI_BXT, +}; + +/** + * struct intel_spi_boardinfo - Board specific data for Intel SPI driver + * @type: Type which this controller is compatible with + * @writeable: The chip is writeable + */ +struct intel_spi_boardinfo { + enum intel_spi_type type; + bool writeable; +}; + +#endif /* INTEL_SPI_PDATA_H */ diff --git a/include/linux/platform_data/invensense_mpu6050.h b/include/linux/platform_data/invensense_mpu6050.h new file mode 100644 index 000000000..554b59801 --- /dev/null +++ b/include/linux/platform_data/invensense_mpu6050.h @@ -0,0 +1,34 @@ +/* +* Copyright (C) 2012 Invensense, Inc. +* +* This software is licensed under the terms of the GNU General Public +* License version 2, as published by the Free Software Foundation, and +* may be copied, distributed, and modified under those terms. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +*/ + +#ifndef __INV_MPU6050_PLATFORM_H_ +#define __INV_MPU6050_PLATFORM_H_ + +/** + * struct inv_mpu6050_platform_data - Platform data for the mpu driver + * @orientation: Orientation matrix of the chip (deprecated in favor of + * mounting matrix retrieved from device-tree) + * + * Contains platform specific information on how to configure the MPU6050 to + * work on this platform. The orientation matricies are 3x3 rotation matricies + * that are applied to the data to rotate from the mounting orientation to the + * platform orientation. The values must be one of 0, 1, or -1 and each row and + * column should have exactly 1 non-zero value. + * + * Deprecated in favor of mounting matrix retrieved from device-tree. + */ +struct inv_mpu6050_platform_data { + __s8 orientation[9]; +}; + +#endif diff --git a/include/linux/platform_data/iommu-omap.h b/include/linux/platform_data/iommu-omap.h new file mode 100644 index 000000000..e8b12dbf6 --- /dev/null +++ b/include/linux/platform_data/iommu-omap.h @@ -0,0 +1,19 @@ +/* + * omap iommu: main structures + * + * Copyright (C) 2008-2009 Nokia Corporation + * + * Written by Hiroshi DOYU + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include + +struct iommu_platform_data { + const char *reset_name; + int (*assert_reset)(struct platform_device *pdev, const char *name); + int (*deassert_reset)(struct platform_device *pdev, const char *name); +}; diff --git a/include/linux/platform_data/irda-pxaficp.h b/include/linux/platform_data/irda-pxaficp.h new file mode 100644 index 000000000..bd35ddcf3 --- /dev/null +++ b/include/linux/platform_data/irda-pxaficp.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ASMARM_ARCH_IRDA_H +#define ASMARM_ARCH_IRDA_H + +/* board specific transceiver capabilities */ + +#define IR_OFF 1 +#define IR_SIRMODE 2 +#define IR_FIRMODE 4 + +struct pxaficp_platform_data { + int transceiver_cap; + void (*transceiver_mode)(struct device *dev, int mode); + int (*startup)(struct device *dev); + void (*shutdown)(struct device *dev); + int gpio_pwdown; /* powerdown GPIO for the IrDA chip */ + bool gpio_pwdown_inverted; /* gpio_pwdown is inverted */ +}; + +extern void pxa_set_ficp_info(struct pxaficp_platform_data *info); + +#if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x) +void pxa2xx_transceiver_mode(struct device *dev, int mode); +#endif + +#endif diff --git a/include/linux/platform_data/irda-sa11x0.h b/include/linux/platform_data/irda-sa11x0.h new file mode 100644 index 000000000..38f77b5e5 --- /dev/null +++ b/include/linux/platform_data/irda-sa11x0.h @@ -0,0 +1,20 @@ +/* + * arch/arm/include/asm/mach/irda.h + * + * Copyright (C) 2004 Russell King. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ASM_ARM_MACH_IRDA_H +#define __ASM_ARM_MACH_IRDA_H + +struct irda_platform_data { + int (*startup)(struct device *); + void (*shutdown)(struct device *); + int (*set_power)(struct device *, unsigned int state); + void (*set_speed)(struct device *, unsigned int speed); +}; + +#endif diff --git a/include/linux/platform_data/isl9305.h b/include/linux/platform_data/isl9305.h new file mode 100644 index 000000000..4ac1a070a --- /dev/null +++ b/include/linux/platform_data/isl9305.h @@ -0,0 +1,30 @@ +/* + * isl9305 - Intersil ISL9305 DCDC regulator + * + * Copyright 2014 Linaro Ltd + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __ISL9305_H +#define __ISL9305_H + +#define ISL9305_DCD1 0 +#define ISL9305_DCD2 1 +#define ISL9305_LDO1 2 +#define ISL9305_LDO2 3 + +#define ISL9305_MAX_REGULATOR ISL9305_LDO2 + +struct regulator_init_data; + +struct isl9305_pdata { + struct regulator_init_data *init_data[ISL9305_MAX_REGULATOR + 1]; +}; + +#endif diff --git a/include/linux/platform_data/itco_wdt.h b/include/linux/platform_data/itco_wdt.h new file mode 100644 index 000000000..2ccdce6a4 --- /dev/null +++ b/include/linux/platform_data/itco_wdt.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Platform data for the Intel TCO Watchdog + */ + +#ifndef _ITCO_WDT_H_ +#define _ITCO_WDT_H_ + +/* Watchdog resources */ +#define ICH_RES_IO_TCO 0 +#define ICH_RES_IO_SMI 1 +#define ICH_RES_MEM_OFF 2 +#define ICH_RES_MEM_GCS_PMC 0 + +struct itco_wdt_platform_data { + char name[32]; + unsigned int version; + /* private data to be passed to update_no_reboot_bit API */ + void *no_reboot_priv; + /* pointer for platform specific no reboot update function */ + int (*update_no_reboot_bit)(void *priv, bool set); +}; + +#endif /* _ITCO_WDT_H_ */ diff --git a/include/linux/platform_data/jz4740/jz4740_nand.h b/include/linux/platform_data/jz4740/jz4740_nand.h new file mode 100644 index 000000000..bc571f6d5 --- /dev/null +++ b/include/linux/platform_data/jz4740/jz4740_nand.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2009-2010, Lars-Peter Clausen + * JZ4740 SoC NAND controller driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef __JZ4740_NAND_H__ +#define __JZ4740_NAND_H__ + +#include +#include + +#define JZ_NAND_NUM_BANKS 4 + +struct jz_nand_platform_data { + int num_partitions; + struct mtd_partition *partitions; + + unsigned char banks[JZ_NAND_NUM_BANKS]; + + void (*ident_callback)(struct platform_device *, struct mtd_info *, + struct mtd_partition **, int *num_partitions); +}; + +#endif diff --git a/include/linux/platform_data/keyboard-pxa930_rotary.h b/include/linux/platform_data/keyboard-pxa930_rotary.h new file mode 100644 index 000000000..3271aa01c --- /dev/null +++ b/include/linux/platform_data/keyboard-pxa930_rotary.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_ARCH_PXA930_ROTARY_H +#define __ASM_ARCH_PXA930_ROTARY_H + +/* NOTE: + * + * rotary can be either interpreted as a ralative input event (e.g. + * REL_WHEEL or REL_HWHEEL) or a specific key event (e.g. UP/DOWN + * or LEFT/RIGHT), depending on if up_key & down_key are assigned + * or rel_code is assigned a non-zero value. When all are non-zero, + * up_key and down_key will be preferred. + */ +struct pxa930_rotary_platform_data { + int up_key; + int down_key; + int rel_code; +}; + +void __init pxa930_set_rotarykey_info(struct pxa930_rotary_platform_data *info); + +#endif /* __ASM_ARCH_PXA930_ROTARY_H */ diff --git a/include/linux/platform_data/keyboard-spear.h b/include/linux/platform_data/keyboard-spear.h new file mode 100644 index 000000000..5e3ff6539 --- /dev/null +++ b/include/linux/platform_data/keyboard-spear.h @@ -0,0 +1,164 @@ +/* + * Copyright (C) 2010 ST Microelectronics + * Rajeev Kumar + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __PLAT_KEYBOARD_H +#define __PLAT_KEYBOARD_H + +#include +#include +#include +#include + +#define DECLARE_9x9_KEYMAP(_name) \ +int _name[] = { \ + KEY(0, 0, KEY_ESC), \ + KEY(0, 1, KEY_1), \ + KEY(0, 2, KEY_2), \ + KEY(0, 3, KEY_3), \ + KEY(0, 4, KEY_4), \ + KEY(0, 5, KEY_5), \ + KEY(0, 6, KEY_6), \ + KEY(0, 7, KEY_7), \ + KEY(0, 8, KEY_8), \ + KEY(1, 0, KEY_9), \ + KEY(1, 1, KEY_MINUS), \ + KEY(1, 2, KEY_EQUAL), \ + KEY(1, 3, KEY_BACKSPACE), \ + KEY(1, 4, KEY_TAB), \ + KEY(1, 5, KEY_Q), \ + KEY(1, 6, KEY_W), \ + KEY(1, 7, KEY_E), \ + KEY(1, 8, KEY_R), \ + KEY(2, 0, KEY_T), \ + KEY(2, 1, KEY_Y), \ + KEY(2, 2, KEY_U), \ + KEY(2, 3, KEY_I), \ + KEY(2, 4, KEY_O), \ + KEY(2, 5, KEY_P), \ + KEY(2, 6, KEY_LEFTBRACE), \ + KEY(2, 7, KEY_RIGHTBRACE), \ + KEY(2, 8, KEY_ENTER), \ + KEY(3, 0, KEY_LEFTCTRL), \ + KEY(3, 1, KEY_A), \ + KEY(3, 2, KEY_S), \ + KEY(3, 3, KEY_D), \ + KEY(3, 4, KEY_F), \ + KEY(3, 5, KEY_G), \ + KEY(3, 6, KEY_H), \ + KEY(3, 7, KEY_J), \ + KEY(3, 8, KEY_K), \ + KEY(4, 0, KEY_L), \ + KEY(4, 1, KEY_SEMICOLON), \ + KEY(4, 2, KEY_APOSTROPHE), \ + KEY(4, 3, KEY_GRAVE), \ + KEY(4, 4, KEY_LEFTSHIFT), \ + KEY(4, 5, KEY_BACKSLASH), \ + KEY(4, 6, KEY_Z), \ + KEY(4, 7, KEY_X), \ + KEY(4, 8, KEY_C), \ + KEY(5, 0, KEY_V), \ + KEY(5, 1, KEY_B), \ + KEY(5, 2, KEY_N), \ + KEY(5, 3, KEY_M), \ + KEY(5, 4, KEY_COMMA), \ + KEY(5, 5, KEY_DOT), \ + KEY(5, 6, KEY_SLASH), \ + KEY(5, 7, KEY_RIGHTSHIFT), \ + KEY(5, 8, KEY_KPASTERISK), \ + KEY(6, 0, KEY_LEFTALT), \ + KEY(6, 1, KEY_SPACE), \ + KEY(6, 2, KEY_CAPSLOCK), \ + KEY(6, 3, KEY_F1), \ + KEY(6, 4, KEY_F2), \ + KEY(6, 5, KEY_F3), \ + KEY(6, 6, KEY_F4), \ + KEY(6, 7, KEY_F5), \ + KEY(6, 8, KEY_F6), \ + KEY(7, 0, KEY_F7), \ + KEY(7, 1, KEY_F8), \ + KEY(7, 2, KEY_F9), \ + KEY(7, 3, KEY_F10), \ + KEY(7, 4, KEY_NUMLOCK), \ + KEY(7, 5, KEY_SCROLLLOCK), \ + KEY(7, 6, KEY_KP7), \ + KEY(7, 7, KEY_KP8), \ + KEY(7, 8, KEY_KP9), \ + KEY(8, 0, KEY_KPMINUS), \ + KEY(8, 1, KEY_KP4), \ + KEY(8, 2, KEY_KP5), \ + KEY(8, 3, KEY_KP6), \ + KEY(8, 4, KEY_KPPLUS), \ + KEY(8, 5, KEY_KP1), \ + KEY(8, 6, KEY_KP2), \ + KEY(8, 7, KEY_KP3), \ + KEY(8, 8, KEY_KP0), \ +} + +#define DECLARE_6x6_KEYMAP(_name) \ +int _name[] = { \ + KEY(0, 0, KEY_RESERVED), \ + KEY(0, 1, KEY_1), \ + KEY(0, 2, KEY_2), \ + KEY(0, 3, KEY_3), \ + KEY(0, 4, KEY_4), \ + KEY(0, 5, KEY_5), \ + KEY(1, 0, KEY_Q), \ + KEY(1, 1, KEY_W), \ + KEY(1, 2, KEY_E), \ + KEY(1, 3, KEY_R), \ + KEY(1, 4, KEY_T), \ + KEY(1, 5, KEY_Y), \ + KEY(2, 0, KEY_D), \ + KEY(2, 1, KEY_F), \ + KEY(2, 2, KEY_G), \ + KEY(2, 3, KEY_H), \ + KEY(2, 4, KEY_J), \ + KEY(2, 5, KEY_K), \ + KEY(3, 0, KEY_B), \ + KEY(3, 1, KEY_N), \ + KEY(3, 2, KEY_M), \ + KEY(3, 3, KEY_COMMA), \ + KEY(3, 4, KEY_DOT), \ + KEY(3, 5, KEY_SLASH), \ + KEY(4, 0, KEY_F6), \ + KEY(4, 1, KEY_F7), \ + KEY(4, 2, KEY_F8), \ + KEY(4, 3, KEY_F9), \ + KEY(4, 4, KEY_F10), \ + KEY(4, 5, KEY_NUMLOCK), \ + KEY(5, 0, KEY_KP2), \ + KEY(5, 1, KEY_KP3), \ + KEY(5, 2, KEY_KP0), \ + KEY(5, 3, KEY_KPDOT), \ + KEY(5, 4, KEY_RO), \ + KEY(5, 5, KEY_ZENKAKUHANKAKU), \ +} + +#define KEYPAD_9x9 0 +#define KEYPAD_6x6 1 +#define KEYPAD_2x2 2 + +/** + * struct kbd_platform_data - spear keyboard platform data + * keymap: pointer to keymap data (table and size) + * rep: enables key autorepeat + * mode: choose keyboard support(9x9, 6x6, 2x2) + * suspended_rate: rate at which keyboard would operate in suspended mode + * + * This structure is supposed to be used by platform code to supply + * keymaps to drivers that implement keyboards. + */ +struct kbd_platform_data { + const struct matrix_keymap_data *keymap; + bool rep; + unsigned int mode; + unsigned int suspended_rate; +}; + +#endif /* __PLAT_KEYBOARD_H */ diff --git a/include/linux/platform_data/keypad-ep93xx.h b/include/linux/platform_data/keypad-ep93xx.h new file mode 100644 index 000000000..0e36818e3 --- /dev/null +++ b/include/linux/platform_data/keypad-ep93xx.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __KEYPAD_EP93XX_H +#define __KEYPAD_EP93XX_H + +struct matrix_keymap_data; + +/* flags for the ep93xx_keypad driver */ +#define EP93XX_KEYPAD_DISABLE_3_KEY (1<<0) /* disable 3-key reset */ +#define EP93XX_KEYPAD_DIAG_MODE (1<<1) /* diagnostic mode */ +#define EP93XX_KEYPAD_BACK_DRIVE (1<<2) /* back driving mode */ +#define EP93XX_KEYPAD_TEST_MODE (1<<3) /* scan only column 0 */ +#define EP93XX_KEYPAD_KDIV (1<<4) /* 1/4 clock or 1/16 clock */ +#define EP93XX_KEYPAD_AUTOREPEAT (1<<5) /* enable key autorepeat */ + +/** + * struct ep93xx_keypad_platform_data - platform specific device structure + * @keymap_data: pointer to &matrix_keymap_data + * @debounce: debounce start count; terminal count is 0xff + * @prescale: row/column counter pre-scaler load value + * @flags: see above + */ +struct ep93xx_keypad_platform_data { + struct matrix_keymap_data *keymap_data; + unsigned int debounce; + unsigned int prescale; + unsigned int flags; +}; + +#define EP93XX_MATRIX_ROWS (8) +#define EP93XX_MATRIX_COLS (8) + +#endif /* __KEYPAD_EP93XX_H */ diff --git a/include/linux/platform_data/keypad-nomadik-ske.h b/include/linux/platform_data/keypad-nomadik-ske.h new file mode 100644 index 000000000..31382fbc0 --- /dev/null +++ b/include/linux/platform_data/keypad-nomadik-ske.h @@ -0,0 +1,50 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License v2 + * Author: Naveen Kumar Gaddipati + * + * ux500 Scroll key and Keypad Encoder (SKE) header + */ + +#ifndef __SKE_H +#define __SKE_H + +#include + +/* register definitions for SKE peripheral */ +#define SKE_CR 0x00 +#define SKE_VAL0 0x04 +#define SKE_VAL1 0x08 +#define SKE_DBCR 0x0C +#define SKE_IMSC 0x10 +#define SKE_RIS 0x14 +#define SKE_MIS 0x18 +#define SKE_ICR 0x1C + +/* + * Keypad module + */ + +/** + * struct keypad_platform_data - structure for platform specific data + * @init: pointer to keypad init function + * @exit: pointer to keypad deinitialisation function + * @keymap_data: matrix scan code table for keycodes + * @krow: maximum number of rows + * @kcol: maximum number of columns + * @debounce_ms: platform specific debounce time + * @no_autorepeat: flag for auto repetition + * @wakeup_enable: allow waking up the system + */ +struct ske_keypad_platform_data { + int (*init)(void); + int (*exit)(void); + const struct matrix_keymap_data *keymap_data; + u8 krow; + u8 kcol; + u8 debounce_ms; + bool no_autorepeat; + bool wakeup_enable; +}; +#endif /*__SKE_KPD_H*/ diff --git a/include/linux/platform_data/keypad-omap.h b/include/linux/platform_data/keypad-omap.h new file mode 100644 index 000000000..c3a3abae9 --- /dev/null +++ b/include/linux/platform_data/keypad-omap.h @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2006 Komal Shah + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __KEYPAD_OMAP_H +#define __KEYPAD_OMAP_H + +#ifndef CONFIG_ARCH_OMAP1 +#warning Please update the board to use matrix-keypad driver +#define omap_readw(reg) 0 +#define omap_writew(val, reg) do {} while (0) +#endif +#include + +struct omap_kp_platform_data { + int rows; + int cols; + const struct matrix_keymap_data *keymap_data; + bool rep; + unsigned long delay; + bool dbounce; + /* specific to OMAP242x*/ + unsigned int *row_gpios; + unsigned int *col_gpios; +}; + +/* Group (0..3) -- when multiple keys are pressed, only the + * keys pressed in the same group are considered as pressed. This is + * in order to workaround certain crappy HW designs that produce ghost + * keypresses. Two free bits, not used by neither row/col nor keynum, + * must be available for use as group bits. The below GROUP_SHIFT + * macro definition is based on some prior knowledge of the + * matrix_keypad defined KEY() macro internals. + */ +#define GROUP_SHIFT 14 +#define GROUP_0 (0 << GROUP_SHIFT) +#define GROUP_1 (1 << GROUP_SHIFT) +#define GROUP_2 (2 << GROUP_SHIFT) +#define GROUP_3 (3 << GROUP_SHIFT) +#define GROUP_MASK GROUP_3 +#if KEY_MAX & GROUP_MASK +#error Group bits in conflict with keynum bits +#endif + + +#endif + diff --git a/include/linux/platform_data/keypad-pxa27x.h b/include/linux/platform_data/keypad-pxa27x.h new file mode 100644 index 000000000..a376442b9 --- /dev/null +++ b/include/linux/platform_data/keypad-pxa27x.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_ARCH_PXA27x_KEYPAD_H +#define __ASM_ARCH_PXA27x_KEYPAD_H + +#include +#include + +#define MAX_MATRIX_KEY_ROWS (8) +#define MAX_MATRIX_KEY_COLS (8) +#define MATRIX_ROW_SHIFT (3) +#define MAX_DIRECT_KEY_NUM (8) + +/* pxa3xx keypad platform specific parameters + * + * NOTE: + * 1. direct_key_num indicates the number of keys in the direct keypad + * _plus_ the number of rotary-encoder sensor inputs, this can be + * left as 0 if only rotary encoders are enabled, the driver will + * automatically calculate this + * + * 2. direct_key_map is the key code map for the direct keys, if rotary + * encoder(s) are enabled, direct key 0/1(2/3) will be ignored + * + * 3. rotary can be either interpreted as a relative input event (e.g. + * REL_WHEEL/REL_HWHEEL) or specific keys (e.g. UP/DOWN/LEFT/RIGHT) + * + * 4. matrix key and direct key will use the same debounce_interval by + * default, which should be sufficient in most cases + * + * pxa168 keypad platform specific parameter + * + * NOTE: + * clear_wakeup_event callback is a workaround required to clear the + * keypad interrupt. The keypad wake must be cleared in addition to + * reading the MI/DI bits in the KPC register. + */ +struct pxa27x_keypad_platform_data { + + /* code map for the matrix keys */ + const struct matrix_keymap_data *matrix_keymap_data; + unsigned int matrix_key_rows; + unsigned int matrix_key_cols; + + /* direct keys */ + int direct_key_num; + unsigned int direct_key_map[MAX_DIRECT_KEY_NUM]; + /* the key output may be low active */ + int direct_key_low_active; + /* give board a chance to choose the start direct key */ + unsigned int direct_key_mask; + + /* rotary encoders 0 */ + int enable_rotary0; + int rotary0_rel_code; + int rotary0_up_key; + int rotary0_down_key; + + /* rotary encoders 1 */ + int enable_rotary1; + int rotary1_rel_code; + int rotary1_up_key; + int rotary1_down_key; + + /* key debounce interval */ + unsigned int debounce_interval; + + /* clear wakeup event requirement for pxa168 */ + void (*clear_wakeup_event)(void); +}; + +extern void pxa_set_keypad_info(struct pxa27x_keypad_platform_data *info); + +#endif /* __ASM_ARCH_PXA27x_KEYPAD_H */ diff --git a/include/linux/platform_data/keypad-w90p910.h b/include/linux/platform_data/keypad-w90p910.h new file mode 100644 index 000000000..206ca4ecd --- /dev/null +++ b/include/linux/platform_data/keypad-w90p910.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_ARCH_W90P910_KEYPAD_H +#define __ASM_ARCH_W90P910_KEYPAD_H + +#include + +extern void mfp_set_groupi(struct device *dev); + +struct w90p910_keypad_platform_data { + const struct matrix_keymap_data *keymap_data; + + unsigned int prescale; + unsigned int debounce; +}; + +#endif /* __ASM_ARCH_W90P910_KEYPAD_H */ diff --git a/include/linux/platform_data/keyscan-davinci.h b/include/linux/platform_data/keyscan-davinci.h new file mode 100644 index 000000000..7a560e05b --- /dev/null +++ b/include/linux/platform_data/keyscan-davinci.h @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2009 Texas Instruments, Inc + * + * Author: Miguel Aguilar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef DAVINCI_KEYSCAN_H +#define DAVINCI_KEYSCAN_H + +#include + +enum davinci_matrix_types { + DAVINCI_KEYSCAN_MATRIX_4X4, + DAVINCI_KEYSCAN_MATRIX_5X3, +}; + +struct davinci_ks_platform_data { + int (*device_enable)(struct device *dev); + unsigned short *keymap; + u32 keymapsize; + u8 rep:1; + u8 strobe; + u8 interval; + u8 matrix_type; +}; + +#endif + diff --git a/include/linux/platform_data/lcd-mipid.h b/include/linux/platform_data/lcd-mipid.h new file mode 100644 index 000000000..63f05eb23 --- /dev/null +++ b/include/linux/platform_data/lcd-mipid.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LCD_MIPID_H +#define __LCD_MIPID_H + +enum mipid_test_num { + MIPID_TEST_RGB_LINES, +}; + +enum mipid_test_result { + MIPID_TEST_SUCCESS, + MIPID_TEST_INVALID, + MIPID_TEST_FAILED, +}; + +#ifdef __KERNEL__ + +struct mipid_platform_data { + int nreset_gpio; + int data_lines; + + void (*shutdown)(struct mipid_platform_data *pdata); + void (*set_bklight_level)(struct mipid_platform_data *pdata, + int level); + int (*get_bklight_level)(struct mipid_platform_data *pdata); + int (*get_bklight_max)(struct mipid_platform_data *pdata); +}; + +#endif + +#endif diff --git a/include/linux/platform_data/leds-kirkwood-netxbig.h b/include/linux/platform_data/leds-kirkwood-netxbig.h new file mode 100644 index 000000000..3c85a735c --- /dev/null +++ b/include/linux/platform_data/leds-kirkwood-netxbig.h @@ -0,0 +1,54 @@ +/* + * Platform data structure for netxbig LED driver + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __LEDS_KIRKWOOD_NETXBIG_H +#define __LEDS_KIRKWOOD_NETXBIG_H + +struct netxbig_gpio_ext { + unsigned *addr; + int num_addr; + unsigned *data; + int num_data; + unsigned enable; +}; + +enum netxbig_led_mode { + NETXBIG_LED_OFF, + NETXBIG_LED_ON, + NETXBIG_LED_SATA, + NETXBIG_LED_TIMER1, + NETXBIG_LED_TIMER2, + NETXBIG_LED_MODE_NUM, +}; + +#define NETXBIG_LED_INVALID_MODE NETXBIG_LED_MODE_NUM + +struct netxbig_led_timer { + unsigned long delay_on; + unsigned long delay_off; + enum netxbig_led_mode mode; +}; + +struct netxbig_led { + const char *name; + const char *default_trigger; + int mode_addr; + int *mode_val; + int bright_addr; + int bright_max; +}; + +struct netxbig_led_platform_data { + struct netxbig_gpio_ext *gpio_ext; + struct netxbig_led_timer *timer; + int num_timer; + struct netxbig_led *leds; + int num_leds; +}; + +#endif /* __LEDS_KIRKWOOD_NETXBIG_H */ diff --git a/include/linux/platform_data/leds-kirkwood-ns2.h b/include/linux/platform_data/leds-kirkwood-ns2.h new file mode 100644 index 000000000..eb8a6860e --- /dev/null +++ b/include/linux/platform_data/leds-kirkwood-ns2.h @@ -0,0 +1,38 @@ +/* + * Platform data structure for Network Space v2 LED driver + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __LEDS_KIRKWOOD_NS2_H +#define __LEDS_KIRKWOOD_NS2_H + +enum ns2_led_modes { + NS_V2_LED_OFF, + NS_V2_LED_ON, + NS_V2_LED_SATA, +}; + +struct ns2_led_modval { + enum ns2_led_modes mode; + int cmd_level; + int slow_level; +}; + +struct ns2_led { + const char *name; + const char *default_trigger; + unsigned cmd; + unsigned slow; + int num_modes; + struct ns2_led_modval *modval; +}; + +struct ns2_led_platform_data { + int num_leds; + struct ns2_led *leds; +}; + +#endif /* __LEDS_KIRKWOOD_NS2_H */ diff --git a/include/linux/platform_data/leds-lm355x.h b/include/linux/platform_data/leds-lm355x.h new file mode 100644 index 000000000..b88724bb0 --- /dev/null +++ b/include/linux/platform_data/leds-lm355x.h @@ -0,0 +1,66 @@ +/* + * Copyright (C) 2012 Texas Instruments + * + * License Terms: GNU General Public License v2 + * + * Simple driver for Texas Instruments LM355x LED driver chip + * + * Author: G.Shark Jeong + * Daniel Jeong + */ + +#define LM355x_NAME "leds-lm355x" +#define LM3554_NAME "leds-lm3554" +#define LM3556_NAME "leds-lm3556" + +/* lm3554 : strobe def. on */ +enum lm355x_strobe { + LM355x_PIN_STROBE_DISABLE = 0x00, + LM355x_PIN_STROBE_ENABLE = 0x01, +}; + +enum lm355x_torch { + LM355x_PIN_TORCH_DISABLE = 0, + LM3554_PIN_TORCH_ENABLE = 0x80, + LM3556_PIN_TORCH_ENABLE = 0x10, +}; + +enum lm355x_tx2 { + LM355x_PIN_TX_DISABLE = 0, + LM3554_PIN_TX_ENABLE = 0x20, + LM3556_PIN_TX_ENABLE = 0x40, +}; + +enum lm355x_ntc { + LM355x_PIN_NTC_DISABLE = 0, + LM3554_PIN_NTC_ENABLE = 0x08, + LM3556_PIN_NTC_ENABLE = 0x80, +}; + +enum lm355x_pmode { + LM355x_PMODE_DISABLE = 0, + LM355x_PMODE_ENABLE = 0x04, +}; + +/* + * struct lm3554_platform_data + * @pin_strobe: strobe input + * @pin_torch : input pin + * lm3554-tx1/torch/gpio1 + * lm3556-torch + * @pin_tx2 : input pin + * lm3554-envm/tx2/gpio2 + * lm3556-tx pin + * @ntc_pin : output pin + * lm3554-ledi/ntc + * lm3556-temp pin + * @pass_mode : pass mode + */ +struct lm355x_platform_data { + enum lm355x_strobe pin_strobe; + enum lm355x_torch pin_tx1; + enum lm355x_tx2 pin_tx2; + enum lm355x_ntc ntc_pin; + + enum lm355x_pmode pass_mode; +}; diff --git a/include/linux/platform_data/leds-lm3642.h b/include/linux/platform_data/leds-lm3642.h new file mode 100644 index 000000000..72d6ee6ad --- /dev/null +++ b/include/linux/platform_data/leds-lm3642.h @@ -0,0 +1,38 @@ +/* +* Copyright (C) 2012 Texas Instruments +* +* License Terms: GNU General Public License v2 +* +* Simple driver for Texas Instruments LM3642 LED driver chip +* +* Author: G.Shark Jeong +* Daniel Jeong +*/ + +#ifndef __LINUX_LM3642_H +#define __LINUX_LM3642_H + +#define LM3642_NAME "leds-lm3642" + +enum lm3642_torch_pin_enable { + LM3642_TORCH_PIN_DISABLE = 0x00, + LM3642_TORCH_PIN_ENABLE = 0x10, +}; + +enum lm3642_strobe_pin_enable { + LM3642_STROBE_PIN_DISABLE = 0x00, + LM3642_STROBE_PIN_ENABLE = 0x20, +}; + +enum lm3642_tx_pin_enable { + LM3642_TX_PIN_DISABLE = 0x00, + LM3642_TX_PIN_ENABLE = 0x40, +}; + +struct lm3642_platform_data { + enum lm3642_torch_pin_enable torch_pin; + enum lm3642_strobe_pin_enable strobe_pin; + enum lm3642_tx_pin_enable tx_pin; +}; + +#endif /* __LINUX_LM3642_H */ diff --git a/include/linux/platform_data/leds-lp55xx.h b/include/linux/platform_data/leds-lp55xx.h new file mode 100644 index 000000000..624ff9eda --- /dev/null +++ b/include/linux/platform_data/leds-lp55xx.h @@ -0,0 +1,81 @@ +/* + * LP55XX Platform Data Header + * + * Copyright (C) 2012 Texas Instruments + * + * Author: Milo(Woogyom) Kim + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * Derived from leds-lp5521.h, leds-lp5523.h + */ + +#ifndef _LEDS_LP55XX_H +#define _LEDS_LP55XX_H + +/* Clock configuration */ +#define LP55XX_CLOCK_AUTO 0 +#define LP55XX_CLOCK_INT 1 +#define LP55XX_CLOCK_EXT 2 + +struct lp55xx_led_config { + const char *name; + const char *default_trigger; + u8 chan_nr; + u8 led_current; /* mA x10, 0 if led is not connected */ + u8 max_current; +}; + +struct lp55xx_predef_pattern { + const u8 *r; + const u8 *g; + const u8 *b; + u8 size_r; + u8 size_g; + u8 size_b; +}; + +enum lp8501_pwr_sel { + LP8501_ALL_VDD, /* D1~9 are connected to VDD */ + LP8501_6VDD_3VOUT, /* D1~6 with VDD, D7~9 with VOUT */ + LP8501_3VDD_6VOUT, /* D1~6 with VOUT, D7~9 with VDD */ + LP8501_ALL_VOUT, /* D1~9 are connected to VOUT */ +}; + +/* + * struct lp55xx_platform_data + * @led_config : Configurable led class device + * @num_channels : Number of LED channels + * @label : Used for naming LEDs + * @clock_mode : Input clock mode. LP55XX_CLOCK_AUTO or _INT or _EXT + * @setup_resources : Platform specific function before enabling the chip + * @release_resources : Platform specific function after disabling the chip + * @enable : EN pin control by platform side + * @patterns : Predefined pattern data for RGB channels + * @num_patterns : Number of patterns + * @update_config : Value of CONFIG register + */ +struct lp55xx_platform_data { + + /* LED channel configuration */ + struct lp55xx_led_config *led_config; + u8 num_channels; + const char *label; + + /* Clock configuration */ + u8 clock_mode; + + /* optional enable GPIO */ + int enable_gpio; + + /* Predefined pattern data */ + struct lp55xx_predef_pattern *patterns; + unsigned int num_patterns; + + /* LP8501 specific */ + enum lp8501_pwr_sel pwr_sel; +}; + +#endif /* _LEDS_LP55XX_H */ diff --git a/include/linux/platform_data/leds-omap.h b/include/linux/platform_data/leds-omap.h new file mode 100644 index 000000000..56c9b2a0a --- /dev/null +++ b/include/linux/platform_data/leds-omap.h @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2006 Samsung Electronics + * Kyungmin Park + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ASMARM_ARCH_LED_H +#define ASMARM_ARCH_LED_H + +struct omap_led_config { + struct led_classdev cdev; + s16 gpio; +}; + +struct omap_led_platform_data { + s16 nr_leds; + struct omap_led_config *leds; +}; + +#endif diff --git a/include/linux/platform_data/leds-pca963x.h b/include/linux/platform_data/leds-pca963x.h new file mode 100644 index 000000000..54e845ffb --- /dev/null +++ b/include/linux/platform_data/leds-pca963x.h @@ -0,0 +1,48 @@ +/* + * PCA963X LED chip driver. + * + * Copyright 2012 bct electronic GmbH + * Copyright 2013 Qtechnology A/S + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef __LINUX_PCA963X_H +#define __LINUX_PCA963X_H +#include + +enum pca963x_outdrv { + PCA963X_OPEN_DRAIN, + PCA963X_TOTEM_POLE, /* aka push-pull */ +}; + +enum pca963x_blink_type { + PCA963X_SW_BLINK, + PCA963X_HW_BLINK, +}; + +enum pca963x_direction { + PCA963X_NORMAL, + PCA963X_INVERTED, +}; + +struct pca963x_platform_data { + struct led_platform_data leds; + enum pca963x_outdrv outdrv; + enum pca963x_blink_type blink_type; + enum pca963x_direction dir; +}; + +#endif /* __LINUX_PCA963X_H*/ diff --git a/include/linux/platform_data/leds-s3c24xx.h b/include/linux/platform_data/leds-s3c24xx.h new file mode 100644 index 000000000..441a6f290 --- /dev/null +++ b/include/linux/platform_data/leds-s3c24xx.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2006 Simtec Electronics + * http://armlinux.simtec.co.uk/ + * Ben Dooks + * + * S3C24XX - LEDs GPIO connector + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#ifndef __LEDS_S3C24XX_H +#define __LEDS_S3C24XX_H + +#define S3C24XX_LEDF_ACTLOW (1<<0) /* LED is on when GPIO low */ +#define S3C24XX_LEDF_TRISTATE (1<<1) /* tristate to turn off */ + +struct s3c24xx_led_platdata { + unsigned int gpio; + unsigned int flags; + + char *name; + char *def_trigger; +}; + +#endif /* __LEDS_S3C24XX_H */ diff --git a/include/linux/platform_data/lm3630a_bl.h b/include/linux/platform_data/lm3630a_bl.h new file mode 100644 index 000000000..7538e38e2 --- /dev/null +++ b/include/linux/platform_data/lm3630a_bl.h @@ -0,0 +1,65 @@ +/* +* Simple driver for Texas Instruments LM3630A LED Flash driver chip +* Copyright (C) 2012 Texas Instruments +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License version 2 as +* published by the Free Software Foundation. +* +*/ + +#ifndef __LINUX_LM3630A_H +#define __LINUX_LM3630A_H + +#define LM3630A_NAME "lm3630a_bl" + +enum lm3630a_pwm_ctrl { + LM3630A_PWM_DISABLE = 0x00, + LM3630A_PWM_BANK_A, + LM3630A_PWM_BANK_B, + LM3630A_PWM_BANK_ALL, + LM3630A_PWM_BANK_A_ACT_LOW = 0x05, + LM3630A_PWM_BANK_B_ACT_LOW, + LM3630A_PWM_BANK_ALL_ACT_LOW, +}; + +enum lm3630a_leda_ctrl { + LM3630A_LEDA_DISABLE = 0x00, + LM3630A_LEDA_ENABLE = 0x04, + LM3630A_LEDA_ENABLE_LINEAR = 0x14, +}; + +enum lm3630a_ledb_ctrl { + LM3630A_LEDB_DISABLE = 0x00, + LM3630A_LEDB_ON_A = 0x01, + LM3630A_LEDB_ENABLE = 0x02, + LM3630A_LEDB_ENABLE_LINEAR = 0x0A, +}; + +#define LM3630A_MAX_BRIGHTNESS 255 +/* + *@leda_init_brt : led a init brightness. 4~255 + *@leda_max_brt : led a max brightness. 4~255 + *@leda_ctrl : led a disable, enable linear, enable exponential + *@ledb_init_brt : led b init brightness. 4~255 + *@ledb_max_brt : led b max brightness. 4~255 + *@ledb_ctrl : led b disable, enable linear, enable exponential + *@pwm_period : pwm period + *@pwm_ctrl : pwm disable, bank a or b, active high or low + */ +struct lm3630a_platform_data { + + /* led a config. */ + int leda_init_brt; + int leda_max_brt; + enum lm3630a_leda_ctrl leda_ctrl; + /* led b config. */ + int ledb_init_brt; + int ledb_max_brt; + enum lm3630a_ledb_ctrl ledb_ctrl; + /* pwm config. */ + unsigned int pwm_period; + enum lm3630a_pwm_ctrl pwm_ctrl; +}; + +#endif /* __LINUX_LM3630A_H */ diff --git a/include/linux/platform_data/lm3639_bl.h b/include/linux/platform_data/lm3639_bl.h new file mode 100644 index 000000000..5234cd5ed --- /dev/null +++ b/include/linux/platform_data/lm3639_bl.h @@ -0,0 +1,69 @@ +/* +* Simple driver for Texas Instruments LM3630 LED Flash driver chip +* Copyright (C) 2012 Texas Instruments +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License version 2 as +* published by the Free Software Foundation. +* +*/ + +#ifndef __LINUX_LM3639_H +#define __LINUX_LM3639_H + +#define LM3639_NAME "lm3639_bl" + +enum lm3639_pwm { + LM3639_PWM_DISABLE = 0x00, + LM3639_PWM_EN_ACTLOW = 0x48, + LM3639_PWM_EN_ACTHIGH = 0x40, +}; + +enum lm3639_strobe { + LM3639_STROBE_DISABLE = 0x00, + LM3639_STROBE_EN_ACTLOW = 0x10, + LM3639_STROBE_EN_ACTHIGH = 0x30, +}; + +enum lm3639_txpin { + LM3639_TXPIN_DISABLE = 0x00, + LM3639_TXPIN_EN_ACTLOW = 0x04, + LM3639_TXPIN_EN_ACTHIGH = 0x0C, +}; + +enum lm3639_fleds { + LM3639_FLED_DIASBLE_ALL = 0x00, + LM3639_FLED_EN_1 = 0x40, + LM3639_FLED_EN_2 = 0x20, + LM3639_FLED_EN_ALL = 0x60, +}; + +enum lm3639_bleds { + LM3639_BLED_DIASBLE_ALL = 0x00, + LM3639_BLED_EN_1 = 0x10, + LM3639_BLED_EN_2 = 0x08, + LM3639_BLED_EN_ALL = 0x18, +}; +enum lm3639_bled_mode { + LM3639_BLED_MODE_EXPONETIAL = 0x00, + LM3639_BLED_MODE_LINEAR = 0x10, +}; + +struct lm3639_platform_data { + unsigned int max_brt_led; + unsigned int init_brt_led; + + /* input pins */ + enum lm3639_pwm pin_pwm; + enum lm3639_strobe pin_strobe; + enum lm3639_txpin pin_tx; + + /* output pins */ + enum lm3639_fleds fled_pins; + enum lm3639_bleds bled_pins; + enum lm3639_bled_mode bled_mode; + + void (*pwm_set_intensity) (int brightness, int max_brightness); + int (*pwm_get_intensity) (void); +}; +#endif /* __LINUX_LM3639_H */ diff --git a/include/linux/platform_data/lm8323.h b/include/linux/platform_data/lm8323.h new file mode 100644 index 000000000..478d668bc --- /dev/null +++ b/include/linux/platform_data/lm8323.h @@ -0,0 +1,46 @@ +/* + * lm8323.h - Configuration for LM8323 keypad driver. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation (version 2 of the License only). + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_LM8323_H +#define __LINUX_LM8323_H + +#include + +/* + * Largest keycode that the chip can send, plus one, + * so keys can be mapped directly at the index of the + * LM8323 keycode instead of subtracting one. + */ +#define LM8323_KEYMAP_SIZE (0x7f + 1) + +#define LM8323_NUM_PWMS 3 + +struct lm8323_platform_data { + int debounce_time; /* Time to watch for key bouncing, in ms. */ + int active_time; /* Idle time until sleep, in ms. */ + + int size_x; + int size_y; + bool repeat; + const unsigned short *keymap; + + const char *pwm_names[LM8323_NUM_PWMS]; + + const char *name; /* Device name. */ +}; + +#endif /* __LINUX_LM8323_H */ diff --git a/include/linux/platform_data/lp855x.h b/include/linux/platform_data/lp855x.h new file mode 100644 index 000000000..1b2ba24e4 --- /dev/null +++ b/include/linux/platform_data/lp855x.h @@ -0,0 +1,149 @@ +/* + * LP855x Backlight Driver + * + * Copyright (C) 2011 Texas Instruments + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef _LP855X_H +#define _LP855X_H + +#define BL_CTL_SHFT (0) +#define BRT_MODE_SHFT (1) +#define BRT_MODE_MASK (0x06) + +/* Enable backlight. Only valid when BRT_MODE=10(I2C only) */ +#define ENABLE_BL (1) +#define DISABLE_BL (0) + +#define I2C_CONFIG(id) id ## _I2C_CONFIG +#define PWM_CONFIG(id) id ## _PWM_CONFIG + +/* DEVICE CONTROL register - LP8550 */ +#define LP8550_PWM_CONFIG (LP8550_PWM_ONLY << BRT_MODE_SHFT) +#define LP8550_I2C_CONFIG ((ENABLE_BL << BL_CTL_SHFT) | \ + (LP8550_I2C_ONLY << BRT_MODE_SHFT)) + +/* DEVICE CONTROL register - LP8551 */ +#define LP8551_PWM_CONFIG LP8550_PWM_CONFIG +#define LP8551_I2C_CONFIG LP8550_I2C_CONFIG + +/* DEVICE CONTROL register - LP8552 */ +#define LP8552_PWM_CONFIG LP8550_PWM_CONFIG +#define LP8552_I2C_CONFIG LP8550_I2C_CONFIG + +/* DEVICE CONTROL register - LP8553 */ +#define LP8553_PWM_CONFIG LP8550_PWM_CONFIG +#define LP8553_I2C_CONFIG LP8550_I2C_CONFIG + +/* CONFIG register - LP8555 */ +#define LP8555_PWM_STANDBY BIT(7) +#define LP8555_PWM_FILTER BIT(6) +#define LP8555_RELOAD_EPROM BIT(3) /* use it if EPROMs should be reset + when the backlight turns on */ +#define LP8555_OFF_OPENLEDS BIT(2) +#define LP8555_PWM_CONFIG LP8555_PWM_ONLY +#define LP8555_I2C_CONFIG LP8555_I2C_ONLY +#define LP8555_COMB1_CONFIG LP8555_COMBINED1 +#define LP8555_COMB2_CONFIG LP8555_COMBINED2 + +/* DEVICE CONTROL register - LP8556 */ +#define LP8556_PWM_CONFIG (LP8556_PWM_ONLY << BRT_MODE_SHFT) +#define LP8556_COMB1_CONFIG (LP8556_COMBINED1 << BRT_MODE_SHFT) +#define LP8556_I2C_CONFIG ((ENABLE_BL << BL_CTL_SHFT) | \ + (LP8556_I2C_ONLY << BRT_MODE_SHFT)) +#define LP8556_COMB2_CONFIG (LP8556_COMBINED2 << BRT_MODE_SHFT) +#define LP8556_FAST_CONFIG BIT(7) /* use it if EPROMs should be maintained + when exiting the low power mode */ + +/* CONFIG register - LP8557 */ +#define LP8557_PWM_STANDBY BIT(7) +#define LP8557_PWM_FILTER BIT(6) +#define LP8557_RELOAD_EPROM BIT(3) /* use it if EPROMs should be reset + when the backlight turns on */ +#define LP8557_OFF_OPENLEDS BIT(2) +#define LP8557_PWM_CONFIG LP8557_PWM_ONLY +#define LP8557_I2C_CONFIG LP8557_I2C_ONLY +#define LP8557_COMB1_CONFIG LP8557_COMBINED1 +#define LP8557_COMB2_CONFIG LP8557_COMBINED2 + +enum lp855x_chip_id { + LP8550, + LP8551, + LP8552, + LP8553, + LP8555, + LP8556, + LP8557, +}; + +enum lp8550_brighntess_source { + LP8550_PWM_ONLY, + LP8550_I2C_ONLY = 2, +}; + +enum lp8551_brighntess_source { + LP8551_PWM_ONLY = LP8550_PWM_ONLY, + LP8551_I2C_ONLY = LP8550_I2C_ONLY, +}; + +enum lp8552_brighntess_source { + LP8552_PWM_ONLY = LP8550_PWM_ONLY, + LP8552_I2C_ONLY = LP8550_I2C_ONLY, +}; + +enum lp8553_brighntess_source { + LP8553_PWM_ONLY = LP8550_PWM_ONLY, + LP8553_I2C_ONLY = LP8550_I2C_ONLY, +}; + +enum lp8555_brightness_source { + LP8555_PWM_ONLY, + LP8555_I2C_ONLY, + LP8555_COMBINED1, /* Brightness register with shaped PWM */ + LP8555_COMBINED2, /* PWM with shaped brightness register */ +}; + +enum lp8556_brightness_source { + LP8556_PWM_ONLY, + LP8556_COMBINED1, /* pwm + i2c before the shaper block */ + LP8556_I2C_ONLY, + LP8556_COMBINED2, /* pwm + i2c after the shaper block */ +}; + +enum lp8557_brightness_source { + LP8557_PWM_ONLY, + LP8557_I2C_ONLY, + LP8557_COMBINED1, /* pwm + i2c after the shaper block */ + LP8557_COMBINED2, /* pwm + i2c before the shaper block */ +}; + +struct lp855x_rom_data { + u8 addr; + u8 val; +}; + +/** + * struct lp855x_platform_data + * @name : Backlight driver name. If it is not defined, default name is set. + * @device_control : value of DEVICE CONTROL register + * @initial_brightness : initial value of backlight brightness + * @period_ns : platform specific pwm period value. unit is nano. + Only valid when mode is PWM_BASED. + * @size_program : total size of lp855x_rom_data + * @rom_data : list of new eeprom/eprom registers + */ +struct lp855x_platform_data { + const char *name; + u8 device_control; + u8 initial_brightness; + unsigned int period_ns; + int size_program; + struct lp855x_rom_data *rom_data; +}; + +#endif diff --git a/include/linux/platform_data/lp8727.h b/include/linux/platform_data/lp8727.h new file mode 100644 index 000000000..47128a50e --- /dev/null +++ b/include/linux/platform_data/lp8727.h @@ -0,0 +1,68 @@ +/* + * LP8727 Micro/Mini USB IC with integrated charger + * + * Copyright (C) 2011 Texas Instruments + * Copyright (C) 2011 National Semiconductor + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _LP8727_H +#define _LP8727_H + +enum lp8727_eoc_level { + LP8727_EOC_5P, + LP8727_EOC_10P, + LP8727_EOC_16P, + LP8727_EOC_20P, + LP8727_EOC_25P, + LP8727_EOC_33P, + LP8727_EOC_50P, +}; + +enum lp8727_ichg { + LP8727_ICHG_90mA, + LP8727_ICHG_100mA, + LP8727_ICHG_400mA, + LP8727_ICHG_450mA, + LP8727_ICHG_500mA, + LP8727_ICHG_600mA, + LP8727_ICHG_700mA, + LP8727_ICHG_800mA, + LP8727_ICHG_900mA, + LP8727_ICHG_1000mA, +}; + +/** + * struct lp8727_chg_param + * @eoc_level : end of charge level setting + * @ichg : charging current + */ +struct lp8727_chg_param { + enum lp8727_eoc_level eoc_level; + enum lp8727_ichg ichg; +}; + +/** + * struct lp8727_platform_data + * @get_batt_present : check battery status - exists or not + * @get_batt_level : get battery voltage (mV) + * @get_batt_capacity : get battery capacity (%) + * @get_batt_temp : get battery temperature + * @ac : charging parameters for AC type charger + * @usb : charging parameters for USB type charger + * @debounce_msec : interrupt debounce time + */ +struct lp8727_platform_data { + u8 (*get_batt_present)(void); + u16 (*get_batt_level)(void); + u8 (*get_batt_capacity)(void); + u8 (*get_batt_temp)(void); + struct lp8727_chg_param *ac; + struct lp8727_chg_param *usb; + unsigned int debounce_msec; +}; + +#endif diff --git a/include/linux/platform_data/lp8755.h b/include/linux/platform_data/lp8755.h new file mode 100644 index 000000000..a7fd0776c --- /dev/null +++ b/include/linux/platform_data/lp8755.h @@ -0,0 +1,71 @@ +/* + * LP8755 High Performance Power Management Unit Driver:System Interface Driver + * + * Copyright (C) 2012 Texas Instruments + * + * Author: Daniel(Geon Si) Jeong + * G.Shark Jeong + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef _LP8755_H +#define _LP8755_H + +#include + +#define LP8755_NAME "lp8755-regulator" +/* + *PWR FAULT : power fault detected + *OCP : over current protect activated + *OVP : over voltage protect activated + *TEMP_WARN : thermal warning + *TEMP_SHDN : thermal shutdonw detected + *I_LOAD : current measured + */ +#define LP8755_EVENT_PWR_FAULT REGULATOR_EVENT_FAIL +#define LP8755_EVENT_OCP REGULATOR_EVENT_OVER_CURRENT +#define LP8755_EVENT_OVP 0x10000 +#define LP8755_EVENT_TEMP_WARN 0x2000 +#define LP8755_EVENT_TEMP_SHDN REGULATOR_EVENT_OVER_TEMP +#define LP8755_EVENT_I_LOAD 0x40000 + +enum lp8755_bucks { + LP8755_BUCK0 = 0, + LP8755_BUCK1, + LP8755_BUCK2, + LP8755_BUCK3, + LP8755_BUCK4, + LP8755_BUCK5, + LP8755_BUCK_MAX, +}; + +/** + * multiphase configuration options + */ +enum lp8755_mphase_config { + MPHASE_CONF0, + MPHASE_CONF1, + MPHASE_CONF2, + MPHASE_CONF3, + MPHASE_CONF4, + MPHASE_CONF5, + MPHASE_CONF6, + MPHASE_CONF7, + MPHASE_CONF8, + MPHASE_CONF_MAX +}; + +/** + * struct lp8755_platform_data + * @mphase_type : Multiphase Switcher Configurations. + * @buck_data : buck0~6 init voltage in uV + */ +struct lp8755_platform_data { + int mphase; + struct regulator_init_data *buck_data[LP8755_BUCK_MAX]; +}; +#endif diff --git a/include/linux/platform_data/ltc4245.h b/include/linux/platform_data/ltc4245.h new file mode 100644 index 000000000..56bda4be0 --- /dev/null +++ b/include/linux/platform_data/ltc4245.h @@ -0,0 +1,21 @@ +/* + * Platform Data for LTC4245 hardware monitor chip + * + * Copyright (c) 2010 Ira W. Snyder + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef LINUX_LTC4245_H +#define LINUX_LTC4245_H + +#include + +struct ltc4245_platform_data { + bool use_extra_gpios; +}; + +#endif /* LINUX_LTC4245_H */ diff --git a/include/linux/platform_data/lv5207lp.h b/include/linux/platform_data/lv5207lp.h new file mode 100644 index 000000000..7dc4d9a21 --- /dev/null +++ b/include/linux/platform_data/lv5207lp.h @@ -0,0 +1,19 @@ +/* + * lv5207lp.h - Sanyo LV5207LP LEDs Driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __LV5207LP_H__ +#define __LV5207LP_H__ + +struct device; + +struct lv5207lp_platform_data { + struct device *fbdev; + unsigned int max_value; + unsigned int def_value; +}; + +#endif diff --git a/include/linux/platform_data/macb.h b/include/linux/platform_data/macb.h new file mode 100644 index 000000000..7815d50c2 --- /dev/null +++ b/include/linux/platform_data/macb.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2004-2006 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __MACB_PDATA_H__ +#define __MACB_PDATA_H__ + +#include + +/** + * struct macb_platform_data - platform data for MACB Ethernet + * @phy_mask: phy mask passed when register the MDIO bus + * within the driver + * @phy_irq_pin: PHY IRQ + * @is_rmii: using RMII interface? + * @rev_eth_addr: reverse Ethernet address byte order + * @pclk: platform clock + * @hclk: AHB clock + */ +struct macb_platform_data { + u32 phy_mask; + int phy_irq_pin; + u8 is_rmii; + u8 rev_eth_addr; + struct clk *pclk; + struct clk *hclk; +}; + +#endif /* __MACB_PDATA_H__ */ diff --git a/include/linux/platform_data/max197.h b/include/linux/platform_data/max197.h new file mode 100644 index 000000000..8da8f94ee --- /dev/null +++ b/include/linux/platform_data/max197.h @@ -0,0 +1,26 @@ +/* + * Maxim MAX197 A/D Converter Driver + * + * Copyright (c) 2012 Savoir-faire Linux Inc. + * Vivien Didelot + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * For further information, see the Documentation/hwmon/max197 file. + */ + +#ifndef _PDATA_MAX197_H +#define _PDATA_MAX197_H + +/** + * struct max197_platform_data - MAX197 connectivity info + * @convert: Function used to start a conversion with control byte ctrl. + * It must return the raw data, or a negative error code. + */ +struct max197_platform_data { + int (*convert)(u8 ctrl); +}; + +#endif /* _PDATA_MAX197_H */ diff --git a/include/linux/platform_data/max3421-hcd.h b/include/linux/platform_data/max3421-hcd.h new file mode 100644 index 000000000..5947a6f43 --- /dev/null +++ b/include/linux/platform_data/max3421-hcd.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2014 eGauge Systems LLC + * Contributed by David Mosberger-Tang + * + * Platform-data structure for MAX3421 USB HCD driver. + * + */ +#ifndef MAX3421_HCD_PLAT_H_INCLUDED +#define MAX3421_HCD_PLAT_H_INCLUDED + +/* + * This structure defines the mapping of certain auxiliary functions to the + * MAX3421E GPIO pins. The chip has eight GP inputs and eight GP outputs. + * A value of 0 indicates that the pin is not used/wired to anything. + * + * At this point, the only control the max3421-hcd driver cares about is + * to control Vbus (5V to the peripheral). + */ +struct max3421_hcd_platform_data { + u8 vbus_gpout; /* pin controlling Vbus */ + u8 vbus_active_level; /* level that turns on power */ +}; + +#endif /* MAX3421_HCD_PLAT_H_INCLUDED */ diff --git a/include/linux/platform_data/max6639.h b/include/linux/platform_data/max6639.h new file mode 100644 index 000000000..65bfdb4fd --- /dev/null +++ b/include/linux/platform_data/max6639.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MAX6639_H +#define _LINUX_MAX6639_H + +#include + +/* platform data for the MAX6639 temperature sensor and fan control */ + +struct max6639_platform_data { + bool pwm_polarity; /* Polarity low (0) or high (1, default) */ + int ppr; /* Pulses per rotation 1..4 (default == 2) */ + int rpm_range; /* 2000, 4000 (default), 8000 or 16000 */ +}; + +#endif /* _LINUX_MAX6639_H */ diff --git a/include/linux/platform_data/max6697.h b/include/linux/platform_data/max6697.h new file mode 100644 index 000000000..ed9d3b3da --- /dev/null +++ b/include/linux/platform_data/max6697.h @@ -0,0 +1,36 @@ +/* + * max6697.h + * Copyright (c) 2012 Guenter Roeck + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef MAX6697_H +#define MAX6697_H + +#include + +/* + * For all bit masks: + * bit 0: local temperature + * bit 1..7: remote temperatures + */ +struct max6697_platform_data { + bool smbus_timeout_disable; /* set to disable SMBus timeouts */ + bool extended_range_enable; /* set to enable extended temp range */ + bool beta_compensation; /* set to enable beta compensation */ + u8 alert_mask; /* set bit to 1 to disable alert */ + u8 over_temperature_mask; /* set bit to 1 to disable */ + u8 resistance_cancellation; /* set bit to 0 to disable + * bit mask for MAX6581, + * boolean for other chips + */ + u8 ideality_mask; /* set bit to 0 to disable */ + u8 ideality_value; /* transistor ideality as per + * MAX6581 datasheet + */ +}; + +#endif /* MAX6697_H */ diff --git a/include/linux/platform_data/max732x.h b/include/linux/platform_data/max732x.h new file mode 100644 index 000000000..f231c635f --- /dev/null +++ b/include/linux/platform_data/max732x.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_I2C_MAX732X_H +#define __LINUX_I2C_MAX732X_H + +/* platform data for the MAX732x 8/16-bit I/O expander driver */ + +struct max732x_platform_data { + /* number of the first GPIO */ + unsigned gpio_base; + + /* interrupt base */ + int irq_base; + + void *context; /* param to setup/teardown */ + + int (*setup)(struct i2c_client *client, + unsigned gpio, unsigned ngpio, + void *context); + int (*teardown)(struct i2c_client *client, + unsigned gpio, unsigned ngpio, + void *context); +}; +#endif /* __LINUX_I2C_MAX732X_H */ diff --git a/include/linux/platform_data/mcs.h b/include/linux/platform_data/mcs.h new file mode 100644 index 000000000..61bb18a4f --- /dev/null +++ b/include/linux/platform_data/mcs.h @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2009 - 2010 Samsung Electronics Co.Ltd + * Author: Joonyoung Shim + * Author: HeungJun Kim + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MCS_H +#define __LINUX_MCS_H + +#define MCS_KEY_MAP(v, c) ((((v) & 0xff) << 16) | ((c) & 0xffff)) +#define MCS_KEY_VAL(v) (((v) >> 16) & 0xff) +#define MCS_KEY_CODE(v) ((v) & 0xffff) + +struct mcs_platform_data { + void (*poweron)(bool); + void (*cfg_pin)(void); + + /* touchscreen */ + unsigned int x_size; + unsigned int y_size; + + /* touchkey */ + const u32 *keymap; + unsigned int keymap_size; + unsigned int key_maxval; + bool no_autorepeat; +}; + +#endif /* __LINUX_MCS_H */ diff --git a/include/linux/platform_data/mdio-bcm-unimac.h b/include/linux/platform_data/mdio-bcm-unimac.h new file mode 100644 index 000000000..8a5f9f0b2 --- /dev/null +++ b/include/linux/platform_data/mdio-bcm-unimac.h @@ -0,0 +1,13 @@ +#ifndef __MDIO_BCM_UNIMAC_PDATA_H +#define __MDIO_BCM_UNIMAC_PDATA_H + +struct unimac_mdio_pdata { + u32 phy_mask; + int (*wait_func)(void *data); + void *wait_func_data; + const char *bus_name; +}; + +#define UNIMAC_MDIO_DRV_NAME "unimac-mdio" + +#endif /* __MDIO_BCM_UNIMAC_PDATA_H */ diff --git a/include/linux/platform_data/media/camera-mx2.h b/include/linux/platform_data/media/camera-mx2.h new file mode 100644 index 000000000..7ded6f1f7 --- /dev/null +++ b/include/linux/platform_data/media/camera-mx2.h @@ -0,0 +1,44 @@ +/* + * mx2-cam.h - i.MX27/i.MX25 camera driver header file + * + * Copyright (C) 2003, Intel Corporation + * Copyright (C) 2008, Sascha Hauer + * Copyright (C) 2010, Baruch Siach + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef __MACH_MX2_CAM_H_ +#define __MACH_MX2_CAM_H_ + +#define MX2_CAMERA_EXT_VSYNC (1 << 1) +#define MX2_CAMERA_CCIR (1 << 2) +#define MX2_CAMERA_CCIR_INTERLACE (1 << 3) +#define MX2_CAMERA_HSYNC_HIGH (1 << 4) +#define MX2_CAMERA_GATED_CLOCK (1 << 5) +#define MX2_CAMERA_INV_DATA (1 << 6) +#define MX2_CAMERA_PCLK_SAMPLE_RISING (1 << 7) + +/** + * struct mx2_camera_platform_data - optional platform data for mx2_camera + * @flags: any combination of MX2_CAMERA_* + * @clk: clock rate of the csi block / 2 + */ +struct mx2_camera_platform_data { + unsigned long flags; + unsigned long clk; +}; + +#endif /* __MACH_MX2_CAM_H_ */ diff --git a/include/linux/platform_data/media/camera-mx3.h b/include/linux/platform_data/media/camera-mx3.h new file mode 100644 index 000000000..a910dadc8 --- /dev/null +++ b/include/linux/platform_data/media/camera-mx3.h @@ -0,0 +1,52 @@ +/* + * mx3_camera.h - i.MX3x camera driver header file + * + * Copyright (C) 2008, Guennadi Liakhovetski, DENX Software Engineering, + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _MX3_CAMERA_H_ +#define _MX3_CAMERA_H_ + +#include + +#define MX3_CAMERA_CLK_SRC 1 +#define MX3_CAMERA_EXT_VSYNC 2 +#define MX3_CAMERA_DP 4 +#define MX3_CAMERA_PCP 8 +#define MX3_CAMERA_HSP 0x10 +#define MX3_CAMERA_VSP 0x20 +#define MX3_CAMERA_DATAWIDTH_4 0x40 +#define MX3_CAMERA_DATAWIDTH_8 0x80 +#define MX3_CAMERA_DATAWIDTH_10 0x100 +#define MX3_CAMERA_DATAWIDTH_15 0x200 + +#define MX3_CAMERA_DATAWIDTH_MASK (MX3_CAMERA_DATAWIDTH_4 | MX3_CAMERA_DATAWIDTH_8 | \ + MX3_CAMERA_DATAWIDTH_10 | MX3_CAMERA_DATAWIDTH_15) + +struct v4l2_async_subdev; + +/** + * struct mx3_camera_pdata - i.MX3x camera platform data + * @flags: MX3_CAMERA_* flags + * @mclk_10khz: master clock frequency in 10kHz units + * @dma_dev: IPU DMA device to match against in channel allocation + */ +struct mx3_camera_pdata { + unsigned long flags; + unsigned long mclk_10khz; + struct device *dma_dev; + struct v4l2_async_subdev **asd; /* Flat array, arranged in groups */ + int *asd_sizes; /* 0-terminated array of asd group sizes */ +}; + +#endif diff --git a/include/linux/platform_data/media/camera-pxa.h b/include/linux/platform_data/media/camera-pxa.h new file mode 100644 index 000000000..ce5d90e1a --- /dev/null +++ b/include/linux/platform_data/media/camera-pxa.h @@ -0,0 +1,46 @@ +/* + camera.h - PXA camera driver header file + + Copyright (C) 2003, Intel Corporation + Copyright (C) 2008, Guennadi Liakhovetski + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#ifndef __ASM_ARCH_CAMERA_H_ +#define __ASM_ARCH_CAMERA_H_ + +#define PXA_CAMERA_MASTER 1 +#define PXA_CAMERA_DATAWIDTH_4 2 +#define PXA_CAMERA_DATAWIDTH_5 4 +#define PXA_CAMERA_DATAWIDTH_8 8 +#define PXA_CAMERA_DATAWIDTH_9 0x10 +#define PXA_CAMERA_DATAWIDTH_10 0x20 +#define PXA_CAMERA_PCLK_EN 0x40 +#define PXA_CAMERA_MCLK_EN 0x80 +#define PXA_CAMERA_PCP 0x100 +#define PXA_CAMERA_HSP 0x200 +#define PXA_CAMERA_VSP 0x400 + +struct pxacamera_platform_data { + unsigned long flags; + unsigned long mclk_10khz; + int sensor_i2c_adapter_id; + int sensor_i2c_address; +}; + +extern void pxa_set_camera_info(struct pxacamera_platform_data *); + +#endif /* __ASM_ARCH_CAMERA_H_ */ diff --git a/include/linux/platform_data/media/coda.h b/include/linux/platform_data/media/coda.h new file mode 100644 index 000000000..6ad4410d9 --- /dev/null +++ b/include/linux/platform_data/media/coda.h @@ -0,0 +1,18 @@ +/* + * Copyright (C) 2013 Philipp Zabel, Pengutronix + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef PLATFORM_CODA_H +#define PLATFORM_CODA_H + +struct device; + +struct coda_platform_data { + struct device *iram_dev; +}; + +#endif diff --git a/include/linux/platform_data/media/mmp-camera.h b/include/linux/platform_data/media/mmp-camera.h new file mode 100644 index 000000000..d2d3a443e --- /dev/null +++ b/include/linux/platform_data/media/mmp-camera.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Information for the Marvell Armada MMP camera + */ + +#include + +enum dphy3_algo { + DPHY3_ALGO_DEFAULT = 0, + DPHY3_ALGO_PXA910, + DPHY3_ALGO_PXA2128 +}; + +struct mmp_camera_platform_data { + struct platform_device *i2c_device; + int sensor_power_gpio; + int sensor_reset_gpio; + enum v4l2_mbus_type bus_type; + int mclk_min; /* The minimal value of MCLK */ + int mclk_src; /* which clock source the MCLK derives from */ + int mclk_div; /* Clock Divider Value for MCLK */ + /* + * MIPI support + */ + int dphy[3]; /* DPHY: CSI2_DPHY3, CSI2_DPHY5, CSI2_DPHY6 */ + enum dphy3_algo dphy3_algo; /* algos for calculate CSI2_DPHY3 */ + int lane; /* ccic used lane number; 0 means DVP mode */ + int lane_clk; +}; diff --git a/include/linux/platform_data/media/omap1_camera.h b/include/linux/platform_data/media/omap1_camera.h new file mode 100644 index 000000000..819767cf0 --- /dev/null +++ b/include/linux/platform_data/media/omap1_camera.h @@ -0,0 +1,35 @@ +/* + * Header for V4L2 SoC Camera driver for OMAP1 Camera Interface + * + * Copyright (C) 2010, Janusz Krzysztofik + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MEDIA_OMAP1_CAMERA_H_ +#define __MEDIA_OMAP1_CAMERA_H_ + +#include + +#define OMAP1_CAMERA_IOSIZE 0x1c + +enum omap1_cam_vb_mode { + OMAP1_CAM_DMA_CONTIG = 0, + OMAP1_CAM_DMA_SG, +}; + +#define OMAP1_CAMERA_MIN_BUF_COUNT(x) ((x) == OMAP1_CAM_DMA_CONTIG ? 3 : 2) + +struct omap1_cam_platform_data { + unsigned long camexclk_khz; + unsigned long lclk_khz_max; + unsigned long flags; +}; + +#define OMAP1_CAMERA_LCLK_RISING BIT(0) +#define OMAP1_CAMERA_RST_LOW BIT(1) +#define OMAP1_CAMERA_RST_HIGH BIT(2) + +#endif /* __MEDIA_OMAP1_CAMERA_H_ */ diff --git a/include/linux/platform_data/media/omap4iss.h b/include/linux/platform_data/media/omap4iss.h new file mode 100644 index 000000000..2a511a8fc --- /dev/null +++ b/include/linux/platform_data/media/omap4iss.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ARCH_ARM_PLAT_OMAP4_ISS_H +#define ARCH_ARM_PLAT_OMAP4_ISS_H + +#include + +struct iss_device; + +enum iss_interface_type { + ISS_INTERFACE_CSI2A_PHY1, + ISS_INTERFACE_CSI2B_PHY2, +}; + +/** + * struct iss_csiphy_lane: CSI2 lane position and polarity + * @pos: position of the lane + * @pol: polarity of the lane + */ +struct iss_csiphy_lane { + u8 pos; + u8 pol; +}; + +#define ISS_CSIPHY1_NUM_DATA_LANES 4 +#define ISS_CSIPHY2_NUM_DATA_LANES 1 + +/** + * struct iss_csiphy_lanes_cfg - CSI2 lane configuration + * @data: Configuration of one or two data lanes + * @clk: Clock lane configuration + */ +struct iss_csiphy_lanes_cfg { + struct iss_csiphy_lane data[ISS_CSIPHY1_NUM_DATA_LANES]; + struct iss_csiphy_lane clk; +}; + +/** + * struct iss_csi2_platform_data - CSI2 interface platform data + * @crc: Enable the cyclic redundancy check + * @vpclk_div: Video port output clock control + */ +struct iss_csi2_platform_data { + unsigned crc:1; + unsigned vpclk_div:2; + struct iss_csiphy_lanes_cfg lanecfg; +}; + +struct iss_subdev_i2c_board_info { + struct i2c_board_info *board_info; + int i2c_adapter_id; +}; + +struct iss_v4l2_subdevs_group { + struct iss_subdev_i2c_board_info *subdevs; + enum iss_interface_type interface; + union { + struct iss_csi2_platform_data csi2; + } bus; /* gcc < 4.6.0 chokes on anonymous union initializers */ +}; + +struct iss_platform_data { + struct iss_v4l2_subdevs_group *subdevs; + void (*set_constraints)(struct iss_device *iss, bool enable); +}; + +#endif diff --git a/include/linux/platform_data/media/s5p_hdmi.h b/include/linux/platform_data/media/s5p_hdmi.h new file mode 100644 index 000000000..bb9cacb0c --- /dev/null +++ b/include/linux/platform_data/media/s5p_hdmi.h @@ -0,0 +1,36 @@ +/* + * Driver header for S5P HDMI chip. + * + * Copyright (c) 2011 Samsung Electronics, Co. Ltd + * Contact: Tomasz Stanislawski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef S5P_HDMI_H +#define S5P_HDMI_H + +struct i2c_board_info; + +/** + * @hdmiphy_bus: controller id for HDMIPHY bus + * @hdmiphy_info: template for HDMIPHY I2C device + * @mhl_bus: controller id for MHL control bus + * @mhl_info: template for MHL I2C device + * @hpd_gpio: GPIO for Hot-Plug-Detect pin + * + * NULL pointer for *_info fields indicates that + * the corresponding chip is not present + */ +struct s5p_hdmi_platform_data { + int hdmiphy_bus; + struct i2c_board_info *hdmiphy_info; + int mhl_bus; + struct i2c_board_info *mhl_info; + int hpd_gpio; +}; + +#endif /* S5P_HDMI_H */ diff --git a/include/linux/platform_data/media/si4713.h b/include/linux/platform_data/media/si4713.h new file mode 100644 index 000000000..932668ad5 --- /dev/null +++ b/include/linux/platform_data/media/si4713.h @@ -0,0 +1,48 @@ +/* + * include/linux/platform_data/media/si4713.h + * + * Board related data definitions for Si4713 i2c device driver. + * + * Copyright (c) 2009 Nokia Corporation + * Contact: Eduardo Valentin + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + * + */ + +#ifndef SI4713_H +#define SI4713_H + +/* The SI4713 I2C sensor chip has a fixed slave address of 0xc6 or 0x22. */ +#define SI4713_I2C_ADDR_BUSEN_HIGH 0x63 +#define SI4713_I2C_ADDR_BUSEN_LOW 0x11 + +/* + * Platform dependent definition + */ +struct si4713_platform_data { + bool is_platform_device; +}; + +/* + * Structure to query for Received Noise Level (RNL). + */ +struct si4713_rnl { + __u32 index; /* modulator index */ + __u32 frequency; /* frequency to peform rnl measurement */ + __s32 rnl; /* result of measurement in dBuV */ + __u32 reserved[4]; /* drivers and apps must init this to 0 */ +}; + +/* + * This is the ioctl number to query for rnl. Users must pass a + * struct si4713_rnl pointer specifying desired frequency in 'frequency' field + * following driver capabilities (i.e V4L2_TUNER_CAP_LOW). + * Driver must return measured value in the same struture, filling 'rnl' field. + */ +#define SI4713_IOC_MEASURE_RNL _IOWR('V', BASE_VIDIOC_PRIVATE + 0, \ + struct si4713_rnl) + +#endif /* ifndef SI4713_H*/ diff --git a/include/linux/platform_data/media/soc_camera_platform.h b/include/linux/platform_data/media/soc_camera_platform.h new file mode 100644 index 000000000..1e5065dab --- /dev/null +++ b/include/linux/platform_data/media/soc_camera_platform.h @@ -0,0 +1,83 @@ +/* + * Generic Platform Camera Driver Header + * + * Copyright (C) 2008 Magnus Damm + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __SOC_CAMERA_H__ +#define __SOC_CAMERA_H__ + +#include +#include +#include + +struct device; + +struct soc_camera_platform_info { + const char *format_name; + unsigned long format_depth; + struct v4l2_mbus_framefmt format; + unsigned long mbus_param; + enum v4l2_mbus_type mbus_type; + struct soc_camera_device *icd; + int (*set_capture)(struct soc_camera_platform_info *info, int enable); +}; + +static inline void soc_camera_platform_release(struct platform_device **pdev) +{ + *pdev = NULL; +} + +static inline int soc_camera_platform_add(struct soc_camera_device *icd, + struct platform_device **pdev, + struct soc_camera_link *plink, + void (*release)(struct device *dev), + int id) +{ + struct soc_camera_subdev_desc *ssdd = + (struct soc_camera_subdev_desc *)plink; + struct soc_camera_platform_info *info = ssdd->drv_priv; + int ret; + + if (&icd->sdesc->subdev_desc != ssdd) + return -ENODEV; + + if (*pdev) + return -EBUSY; + + *pdev = platform_device_alloc("soc_camera_platform", id); + if (!*pdev) + return -ENOMEM; + + info->icd = icd; + + (*pdev)->dev.platform_data = info; + (*pdev)->dev.release = release; + + ret = platform_device_add(*pdev); + if (ret < 0) { + platform_device_put(*pdev); + *pdev = NULL; + info->icd = NULL; + } + + return ret; +} + +static inline void soc_camera_platform_del(const struct soc_camera_device *icd, + struct platform_device *pdev, + const struct soc_camera_link *plink) +{ + const struct soc_camera_subdev_desc *ssdd = + (const struct soc_camera_subdev_desc *)plink; + if (&icd->sdesc->subdev_desc != ssdd || !pdev) + return; + + platform_device_unregister(pdev); +} + +#endif /* __SOC_CAMERA_H__ */ diff --git a/include/linux/platform_data/media/timb_radio.h b/include/linux/platform_data/media/timb_radio.h new file mode 100644 index 000000000..a40a6a348 --- /dev/null +++ b/include/linux/platform_data/media/timb_radio.h @@ -0,0 +1,30 @@ +/* + * timb_radio.h Platform struct for the Timberdale radio driver + * Copyright (c) 2009 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _TIMB_RADIO_ +#define _TIMB_RADIO_ 1 + +#include + +struct timb_radio_platform_data { + int i2c_adapter; /* I2C adapter where the tuner and dsp are attached */ + struct i2c_board_info *tuner; + struct i2c_board_info *dsp; +}; + +#endif diff --git a/include/linux/platform_data/media/timb_video.h b/include/linux/platform_data/media/timb_video.h new file mode 100644 index 000000000..70ae43970 --- /dev/null +++ b/include/linux/platform_data/media/timb_video.h @@ -0,0 +1,33 @@ +/* + * timb_video.h Platform struct for the Timberdale video driver + * Copyright (c) 2009-2010 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _TIMB_VIDEO_ +#define _TIMB_VIDEO_ 1 + +#include + +struct timb_video_platform_data { + int dma_channel; + int i2c_adapter; /* The I2C adapter where the encoder is attached */ + struct { + const char *module_name; + struct i2c_board_info *info; + } encoder; +}; + +#endif diff --git a/include/linux/platform_data/mfd-mcp-sa11x0.h b/include/linux/platform_data/mfd-mcp-sa11x0.h new file mode 100644 index 000000000..747cd6baf --- /dev/null +++ b/include/linux/platform_data/mfd-mcp-sa11x0.h @@ -0,0 +1,20 @@ +/* + * Copyright (C) 2005 Russell King. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __MFD_MCP_SA11X0_H +#define __MFD_MCP_SA11X0_H + +#include + +struct mcp_plat_data { + u32 mccr0; + u32 mccr1; + unsigned int sclk_rate; + void *codec_pdata; +}; + +#endif diff --git a/include/linux/platform_data/microchip-ksz.h b/include/linux/platform_data/microchip-ksz.h new file mode 100644 index 000000000..84789ca63 --- /dev/null +++ b/include/linux/platform_data/microchip-ksz.h @@ -0,0 +1,29 @@ +/* + * Microchip KSZ series switch platform data + * + * Copyright (C) 2017 + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __MICROCHIP_KSZ_H +#define __MICROCHIP_KSZ_H + +#include + +struct ksz_platform_data { + u32 chip_id; + u16 enabled_ports; +}; + +#endif diff --git a/include/linux/platform_data/mlxreg.h b/include/linux/platform_data/mlxreg.h new file mode 100644 index 000000000..19f5cb618 --- /dev/null +++ b/include/linux/platform_data/mlxreg.h @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Vadim Pasternak + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __LINUX_PLATFORM_DATA_MLXREG_H +#define __LINUX_PLATFORM_DATA_MLXREG_H + +#define MLXREG_CORE_LABEL_MAX_SIZE 32 + +/** + * struct mlxreg_hotplug_device - I2C device data: + * + * @adapter: I2C device adapter; + * @client: I2C device client; + * @brdinfo: device board information; + * @nr: I2C device adapter number, to which device is to be attached; + * + * Structure represents I2C hotplug device static data (board topology) and + * dynamic data (related kernel objects handles). + */ +struct mlxreg_hotplug_device { + struct i2c_adapter *adapter; + struct i2c_client *client; + struct i2c_board_info *brdinfo; + int nr; +}; + +/** + * struct mlxreg_core_data - attributes control data: + * + * @label: attribute label; + * @reg: attribute register; + * @mask: attribute access mask; + * @bit: attribute effective bit; + * @mode: access mode; + * @np - pointer to node platform associated with attribute; + * @hpdev - hotplug device data; + * @health_cntr: dynamic device health indication counter; + * @attached: true if device has been attached after good health indication; + */ +struct mlxreg_core_data { + char label[MLXREG_CORE_LABEL_MAX_SIZE]; + u32 reg; + u32 mask; + u32 bit; + umode_t mode; + struct device_node *np; + struct mlxreg_hotplug_device hpdev; + u8 health_cntr; + bool attached; +}; + +/** + * struct mlxreg_core_item - same type components controlled by the driver: + * + * @data: component data; + * @aggr_mask: group aggregation mask; + * @reg: group interrupt status register; + * @mask: group interrupt mask; + * @cache: last status value for elements fro the same group; + * @count: number of available elements in the group; + * @ind: element's index inside the group; + * @inversed: if 0: 0 for signal status is OK, if 1 - 1 is OK; + * @health: true if device has health indication, false in other case; + */ +struct mlxreg_core_item { + struct mlxreg_core_data *data; + u32 aggr_mask; + u32 reg; + u32 mask; + u32 cache; + u8 count; + u8 ind; + u8 inversed; + u8 health; +}; + +/** + * struct mlxreg_core_platform_data - platform data: + * + * @led_data: led private data; + * @regmap: register map of parent device; + * @counter: number of led instances; + */ +struct mlxreg_core_platform_data { + struct mlxreg_core_data *data; + void *regmap; + int counter; +}; + +/** + * struct mlxreg_core_hotplug_platform_data - hotplug platform data: + * + * @items: same type components with the hotplug capability; + * @irq: platform interrupt number; + * @regmap: register map of parent device; + * @counter: number of the components with the hotplug capability; + * @cell: location of top aggregation interrupt register; + * @mask: top aggregation interrupt common mask; + * @cell_low: location of low aggregation interrupt register; + * @mask_low: low aggregation interrupt common mask; + * @deferred_nr: I2C adapter number must be exist prior probing execution; + * @shift_nr: I2C adapter numbers must be incremented by this value; + */ +struct mlxreg_core_hotplug_platform_data { + struct mlxreg_core_item *items; + int irq; + void *regmap; + int counter; + u32 cell; + u32 mask; + u32 cell_low; + u32 mask_low; + int deferred_nr; + int shift_nr; +}; + +#endif /* __LINUX_PLATFORM_DATA_MLXREG_H */ diff --git a/include/linux/platform_data/mmc-davinci.h b/include/linux/platform_data/mmc-davinci.h new file mode 100644 index 000000000..87a8bed3b --- /dev/null +++ b/include/linux/platform_data/mmc-davinci.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Board-specific MMC configuration + */ + +#ifndef _DAVINCI_MMC_H +#define _DAVINCI_MMC_H + +#include +#include + +struct davinci_mmc_config { + /* get_cd()/get_wp() may sleep */ + int (*get_cd)(int module); + int (*get_ro)(int module); + + void (*set_power)(int module, bool on); + + /* wires == 0 is equivalent to wires == 4 (4-bit parallel) */ + u8 wires; + + u32 max_freq; + + /* any additional host capabilities: OR'd in to mmc->f_caps */ + u32 caps; + + /* Number of sg segments */ + u8 nr_sg; +}; +void davinci_setup_mmc(int module, struct davinci_mmc_config *config); + +enum { + MMC_CTLR_VERSION_1 = 0, /* DM644x and DM355 */ + MMC_CTLR_VERSION_2, /* DA830 */ +}; + +#endif diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h new file mode 100644 index 000000000..640dec8b5 --- /dev/null +++ b/include/linux/platform_data/mmc-esdhc-imx.h @@ -0,0 +1,49 @@ +/* + * Copyright 2010 Wolfram Sang + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + +#ifndef __ASM_ARCH_IMX_ESDHC_H +#define __ASM_ARCH_IMX_ESDHC_H + +#include + +enum wp_types { + ESDHC_WP_NONE, /* no WP, neither controller nor gpio */ + ESDHC_WP_CONTROLLER, /* mmc controller internal WP */ + ESDHC_WP_GPIO, /* external gpio pin for WP */ +}; + +enum cd_types { + ESDHC_CD_NONE, /* no CD, neither controller nor gpio */ + ESDHC_CD_CONTROLLER, /* mmc controller internal CD */ + ESDHC_CD_GPIO, /* external gpio pin for CD */ + ESDHC_CD_PERMANENT, /* no CD, card permanently wired to host */ +}; + +/** + * struct esdhc_platform_data - platform data for esdhc on i.MX + * + * ESDHC_WP(CD)_CONTROLLER type is not available on i.MX25/35. + * + * @wp_gpio: gpio for write_protect + * @cd_gpio: gpio for card_detect interrupt + * @wp_type: type of write_protect method (see wp_types enum above) + * @cd_type: type of card_detect method (see cd_types enum above) + */ + +struct esdhc_platform_data { + unsigned int wp_gpio; + unsigned int cd_gpio; + enum wp_types wp_type; + enum cd_types cd_type; + int max_bus_width; + unsigned int delay_line; + unsigned int tuning_step; /* The delay cell steps in tuning procedure */ + unsigned int tuning_start_tap; /* The start delay cell point in tuning procedure */ +}; +#endif /* __ASM_ARCH_IMX_ESDHC_H */ diff --git a/include/linux/platform_data/mmc-mxcmmc.h b/include/linux/platform_data/mmc-mxcmmc.h new file mode 100644 index 000000000..ac6773513 --- /dev/null +++ b/include/linux/platform_data/mmc-mxcmmc.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ASMARM_ARCH_MMC_H +#define ASMARM_ARCH_MMC_H + +#include +#include + +struct device; + +/* board specific SDHC data, optional. + * If not present, a writable card with 3,3V is assumed. + */ +struct imxmmc_platform_data { + /* Return values for the get_ro callback should be: + * 0 for a read/write card + * 1 for a read-only card + * -ENOSYS when not supported (equal to NULL callback) + * or a negative errno value when something bad happened + */ + int (*get_ro)(struct device *); + + /* board specific hook to (de)initialize the SD slot. + * The board code can call 'handler' on a card detection + * change giving data as argument. + */ + int (*init)(struct device *dev, irq_handler_t handler, void *data); + void (*exit)(struct device *dev, void *data); + + /* available voltages. If not given, assume + * MMC_VDD_32_33 | MMC_VDD_33_34 + */ + unsigned int ocr_avail; + + /* adjust slot voltage */ + void (*setpower)(struct device *, unsigned int vdd); + + /* enable card detect using DAT3 */ + int dat3_card_detect; +}; + +#endif diff --git a/include/linux/platform_data/mmc-omap.h b/include/linux/platform_data/mmc-omap.h new file mode 100644 index 000000000..929469291 --- /dev/null +++ b/include/linux/platform_data/mmc-omap.h @@ -0,0 +1,121 @@ +/* + * MMC definitions for OMAP2 + * + * Copyright (C) 2006 Nokia Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define OMAP_MMC_MAX_SLOTS 2 + +struct mmc_card; + +struct omap_mmc_platform_data { + /* back-link to device */ + struct device *dev; + + /* number of slots per controller */ + unsigned nr_slots:2; + + /* set if your board has components or wiring that limits the + * maximum frequency on the MMC bus */ + unsigned int max_freq; + + /* switch the bus to a new slot */ + int (*switch_slot)(struct device *dev, int slot); + /* initialize board-specific MMC functionality, can be NULL if + * not supported */ + int (*init)(struct device *dev); + void (*cleanup)(struct device *dev); + void (*shutdown)(struct device *dev); + + /* Return context loss count due to PM states changing */ + int (*get_context_loss_count)(struct device *dev); + + /* Integrating attributes from the omap_hwmod layer */ + u8 controller_flags; + + /* Register offset deviation */ + u16 reg_offset; + + struct omap_mmc_slot_data { + + /* + * 4/8 wires and any additional host capabilities + * need to OR'd all capabilities (ref. linux/mmc/host.h) + */ + u8 wires; /* Used for the MMC driver on omap1 and 2420 */ + u32 caps; /* Used for the MMC driver on 2430 and later */ + u32 pm_caps; /* PM capabilities of the mmc */ + + /* + * nomux means "standard" muxing is wrong on this board, and + * that board-specific code handled it before common init logic. + */ + unsigned nomux:1; + + /* switch pin can be for card detect (default) or card cover */ + unsigned cover:1; + + /* use the internal clock */ + unsigned internal_clock:1; + + /* nonremovable e.g. eMMC */ + unsigned nonremovable:1; + + /* Try to sleep or power off when possible */ + unsigned power_saving:1; + + /* If using power_saving and the MMC power is not to go off */ + unsigned no_off:1; + + /* eMMC does not handle power off when not in sleep state */ + unsigned no_regulator_off_init:1; + + /* Regulator off remapped to sleep */ + unsigned vcc_aux_disable_is_sleep:1; + + /* we can put the features above into this variable */ +#define MMC_OMAP7XX (1 << 3) +#define MMC_OMAP15XX (1 << 4) +#define MMC_OMAP16XX (1 << 5) + unsigned features; + + int switch_pin; /* gpio (card detect) */ + int gpio_wp; /* gpio (write protect) */ + + int (*set_bus_mode)(struct device *dev, int slot, int bus_mode); + int (*set_power)(struct device *dev, int slot, + int power_on, int vdd); + int (*get_ro)(struct device *dev, int slot); + void (*remux)(struct device *dev, int slot, int power_on); + /* Call back before enabling / disabling regulators */ + void (*before_set_reg)(struct device *dev, int slot, + int power_on, int vdd); + /* Call back after enabling / disabling regulators */ + void (*after_set_reg)(struct device *dev, int slot, + int power_on, int vdd); + /* if we have special card, init it using this callback */ + void (*init_card)(struct mmc_card *card); + + /* return MMC cover switch state, can be NULL if not supported. + * + * possible return values: + * 0 - closed + * 1 - open + */ + int (*get_cover_state)(struct device *dev, int slot); + + const char *name; + u32 ocr_mask; + + /* Card detection IRQs */ + int card_detect_irq; + int (*card_detect)(struct device *dev, int slot); + + unsigned int ban_openended:1; + + } slots[OMAP_MMC_MAX_SLOTS]; +}; diff --git a/include/linux/platform_data/mmc-pxamci.h b/include/linux/platform_data/mmc-pxamci.h new file mode 100644 index 000000000..752f97c62 --- /dev/null +++ b/include/linux/platform_data/mmc-pxamci.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ASMARM_ARCH_MMC_H +#define ASMARM_ARCH_MMC_H + +#include +#include + +struct device; +struct mmc_host; + +struct pxamci_platform_data { + unsigned int ocr_mask; /* available voltages */ + unsigned long detect_delay_ms; /* delay in millisecond before detecting cards after interrupt */ + int (*init)(struct device *, irq_handler_t , void *); + int (*get_ro)(struct device *); + int (*setpower)(struct device *, unsigned int); + void (*exit)(struct device *, void *); + int gpio_card_detect; /* gpio detecting card insertion */ + int gpio_card_ro; /* gpio detecting read only toggle */ + bool gpio_card_ro_invert; /* gpio ro is inverted */ + int gpio_power; /* gpio powering up MMC bus */ + bool gpio_power_invert; /* gpio power is inverted */ +}; + +extern void pxa_set_mci_info(struct pxamci_platform_data *info); +extern void pxa3xx_set_mci2_info(struct pxamci_platform_data *info); +extern void pxa3xx_set_mci3_info(struct pxamci_platform_data *info); + +#endif diff --git a/include/linux/platform_data/mmc-s3cmci.h b/include/linux/platform_data/mmc-s3cmci.h new file mode 100644 index 000000000..b68d9f0bd --- /dev/null +++ b/include/linux/platform_data/mmc-s3cmci.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ARCH_MCI_H +#define _ARCH_MCI_H + +/** + * struct s3c24xx_mci_pdata - sd/mmc controller platform data + * @no_wprotect: Set this to indicate there is no write-protect switch. + * @no_detect: Set this if there is no detect switch. + * @wprotect_invert: Invert the default sense of the write protect switch. + * @detect_invert: Invert the default sense of the write protect switch. + * @use_dma: Set to allow the use of DMA. + * @gpio_detect: GPIO number for the card detect line. + * @gpio_wprotect: GPIO number for the write protect line. + * @ocr_avail: The mask of the available power states, non-zero to use. + * @set_power: Callback to control the power mode. + * + * The @gpio_detect is used for card detection when @no_wprotect is unset, + * and the default sense is that 0 returned from gpio_get_value() means + * that a card is inserted. If @detect_invert is set, then the value from + * gpio_get_value() is inverted, which makes 1 mean card inserted. + * + * The driver will use @gpio_wprotect to signal whether the card is write + * protected if @no_wprotect is not set. A 0 returned from gpio_get_value() + * means the card is read/write, and 1 means read-only. The @wprotect_invert + * will invert the value returned from gpio_get_value(). + * + * Card power is set by @ocr_availa, using MCC_VDD_ constants if it is set + * to a non-zero value, otherwise the default of 3.2-3.4V is used. + */ +struct s3c24xx_mci_pdata { + unsigned int no_wprotect:1; + unsigned int no_detect:1; + unsigned int wprotect_invert:1; + unsigned int detect_invert:1; /* set => detect active high */ + unsigned int use_dma:1; + + unsigned int gpio_detect; + unsigned int gpio_wprotect; + unsigned long ocr_avail; + void (*set_power)(unsigned char power_mode, + unsigned short vdd); +}; + +/** + * s3c24xx_mci_set_platdata - set platform data for mmc/sdi device + * @pdata: The platform data + * + * Copy the platform data supplied by @pdata so that this can be marked + * __initdata. + */ +extern void s3c24xx_mci_set_platdata(struct s3c24xx_mci_pdata *pdata); + +#endif /* _ARCH_NCI_H */ diff --git a/include/linux/platform_data/mmc-sdhci-s3c.h b/include/linux/platform_data/mmc-sdhci-s3c.h new file mode 100644 index 000000000..74a54eeb2 --- /dev/null +++ b/include/linux/platform_data/mmc-sdhci-s3c.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __PLATFORM_DATA_SDHCI_S3C_H +#define __PLATFORM_DATA_SDHCI_S3C_H + +struct platform_device; + +enum cd_types { + S3C_SDHCI_CD_INTERNAL, /* use mmc internal CD line */ + S3C_SDHCI_CD_EXTERNAL, /* use external callback */ + S3C_SDHCI_CD_GPIO, /* use external gpio pin for CD line */ + S3C_SDHCI_CD_NONE, /* no CD line, use polling to detect card */ + S3C_SDHCI_CD_PERMANENT, /* no CD line, card permanently wired to host */ +}; + +/** + * struct s3c_sdhci_platdata() - Platform device data for Samsung SDHCI + * @max_width: The maximum number of data bits supported. + * @host_caps: Standard MMC host capabilities bit field. + * @host_caps2: The second standard MMC host capabilities bit field. + * @cd_type: Type of Card Detection method (see cd_types enum above) + * @ext_cd_init: Initialize external card detect subsystem. Called on + * sdhci-s3c driver probe when cd_type == S3C_SDHCI_CD_EXTERNAL. + * notify_func argument is a callback to the sdhci-s3c driver + * that triggers the card detection event. Callback arguments: + * dev is pointer to platform device of the host controller, + * state is new state of the card (0 - removed, 1 - inserted). + * @ext_cd_cleanup: Cleanup external card detect subsystem. Called on + * sdhci-s3c driver remove when cd_type == S3C_SDHCI_CD_EXTERNAL. + * notify_func argument is the same callback as for ext_cd_init. + * @ext_cd_gpio: gpio pin used for external CD line, valid only if + * cd_type == S3C_SDHCI_CD_GPIO + * @ext_cd_gpio_invert: invert values for external CD gpio line + * @cfg_gpio: Configure the GPIO for a specific card bit-width + * + * Initialisation data specific to either the machine or the platform + * for the device driver to use or call-back when configuring gpio or + * card speed information. +*/ +struct s3c_sdhci_platdata { + unsigned int max_width; + unsigned int host_caps; + unsigned int host_caps2; + unsigned int pm_caps; + enum cd_types cd_type; + + int ext_cd_gpio; + bool ext_cd_gpio_invert; + int (*ext_cd_init)(void (*notify_func)(struct platform_device *, + int state)); + int (*ext_cd_cleanup)(void (*notify_func)(struct platform_device *, + int state)); + + void (*cfg_gpio)(struct platform_device *dev, int width); +}; + + +#endif /* __PLATFORM_DATA_SDHCI_S3C_H */ diff --git a/include/linux/platform_data/mmp_audio.h b/include/linux/platform_data/mmp_audio.h new file mode 100644 index 000000000..0f25d165a --- /dev/null +++ b/include/linux/platform_data/mmp_audio.h @@ -0,0 +1,22 @@ +/* + * MMP Platform AUDIO Management + * + * Copyright (c) 2011 Marvell Semiconductors Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef MMP_AUDIO_H +#define MMP_AUDIO_H + +struct mmp_audio_platdata { + u32 period_max_capture; + u32 buffer_max_capture; + u32 period_max_playback; + u32 buffer_max_playback; +}; + +#endif /* MMP_AUDIO_H */ diff --git a/include/linux/platform_data/mmp_dma.h b/include/linux/platform_data/mmp_dma.h new file mode 100644 index 000000000..6397b9c81 --- /dev/null +++ b/include/linux/platform_data/mmp_dma.h @@ -0,0 +1,24 @@ +/* + * MMP Platform DMA Management + * + * Copyright (c) 2011 Marvell Semiconductors Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef MMP_DMA_H +#define MMP_DMA_H + +struct dma_slave_map; + +struct mmp_dma_platdata { + int dma_channels; + int nb_requestors; + int slave_map_cnt; + const struct dma_slave_map *slave_map; +}; + +#endif /* MMP_DMA_H */ diff --git a/include/linux/platform_data/mouse-pxa930_trkball.h b/include/linux/platform_data/mouse-pxa930_trkball.h new file mode 100644 index 000000000..ba0ac7a30 --- /dev/null +++ b/include/linux/platform_data/mouse-pxa930_trkball.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_ARCH_PXA930_TRKBALL_H +#define __ASM_ARCH_PXA930_TRKBALL_H + +struct pxa930_trkball_platform_data { + int x_filter; + int y_filter; +}; + +#endif /* __ASM_ARCH_PXA930_TRKBALL_H */ + diff --git a/include/linux/platform_data/mtd-davinci-aemif.h b/include/linux/platform_data/mtd-davinci-aemif.h new file mode 100644 index 000000000..a403dd51d --- /dev/null +++ b/include/linux/platform_data/mtd-davinci-aemif.h @@ -0,0 +1,36 @@ +/* + * TI DaVinci AEMIF support + * + * Copyright 2010 (C) Texas Instruments, Inc. http://www.ti.com/ + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ +#ifndef _MACH_DAVINCI_AEMIF_H +#define _MACH_DAVINCI_AEMIF_H + +#include + +#define NRCSR_OFFSET 0x00 +#define AWCCR_OFFSET 0x04 +#define A1CR_OFFSET 0x10 + +#define ACR_ASIZE_MASK 0x3 +#define ACR_EW_MASK BIT(30) +#define ACR_SS_MASK BIT(31) + +/* All timings in nanoseconds */ +struct davinci_aemif_timing { + u8 wsetup; + u8 wstrobe; + u8 whold; + + u8 rsetup; + u8 rstrobe; + u8 rhold; + + u8 ta; +}; + +#endif diff --git a/include/linux/platform_data/mtd-davinci.h b/include/linux/platform_data/mtd-davinci.h new file mode 100644 index 000000000..1bbfa27cc --- /dev/null +++ b/include/linux/platform_data/mtd-davinci.h @@ -0,0 +1,100 @@ +/* + * mach-davinci/nand.h + * + * Copyright © 2006 Texas Instruments. + * + * Ported to 2.6.23 Copyright © 2008 by + * Sander Huijsen + * Troy Kisky + * Dirk Behme + * + * -------------------------------------------------------------------------- + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __ARCH_ARM_DAVINCI_NAND_H +#define __ARCH_ARM_DAVINCI_NAND_H + +#include + +#define NANDFCR_OFFSET 0x60 +#define NANDFSR_OFFSET 0x64 +#define NANDF1ECC_OFFSET 0x70 + +/* 4-bit ECC syndrome registers */ +#define NAND_4BIT_ECC_LOAD_OFFSET 0xbc +#define NAND_4BIT_ECC1_OFFSET 0xc0 +#define NAND_4BIT_ECC2_OFFSET 0xc4 +#define NAND_4BIT_ECC3_OFFSET 0xc8 +#define NAND_4BIT_ECC4_OFFSET 0xcc +#define NAND_ERR_ADD1_OFFSET 0xd0 +#define NAND_ERR_ADD2_OFFSET 0xd4 +#define NAND_ERR_ERRVAL1_OFFSET 0xd8 +#define NAND_ERR_ERRVAL2_OFFSET 0xdc + +/* NOTE: boards don't need to use these address bits + * for ALE/CLE unless they support booting from NAND. + * They're used unless platform data overrides them. + */ +#define MASK_ALE 0x08 +#define MASK_CLE 0x10 + +struct davinci_nand_pdata { /* platform_data */ + uint32_t mask_ale; + uint32_t mask_cle; + + /* + * 0-indexed chip-select number of the asynchronous + * interface to which the NAND device has been connected. + * + * So, if you have NAND connected to CS3 of DA850, you + * will pass '1' here. Since the asynchronous interface + * on DA850 starts from CS2. + */ + uint32_t core_chipsel; + + /* for packages using two chipselects */ + uint32_t mask_chipsel; + + /* board's default static partition info */ + struct mtd_partition *parts; + unsigned nr_parts; + + /* none == NAND_ECC_NONE (strongly *not* advised!!) + * soft == NAND_ECC_SOFT + * else == NAND_ECC_HW, according to ecc_bits + * + * All DaVinci-family chips support 1-bit hardware ECC. + * Newer ones also support 4-bit ECC, but are awkward + * using it with large page chips. + */ + nand_ecc_modes_t ecc_mode; + u8 ecc_bits; + + /* e.g. NAND_BUSWIDTH_16 */ + unsigned options; + /* e.g. NAND_BBT_USE_FLASH */ + unsigned bbt_options; + + /* Main and mirror bbt descriptor overrides */ + struct nand_bbt_descr *bbt_td; + struct nand_bbt_descr *bbt_md; + + /* Access timings */ + struct davinci_aemif_timing *timing; +}; + +#endif /* __ARCH_ARM_DAVINCI_NAND_H */ diff --git a/include/linux/platform_data/mtd-mxc_nand.h b/include/linux/platform_data/mtd-mxc_nand.h new file mode 100644 index 000000000..6bb96ef16 --- /dev/null +++ b/include/linux/platform_data/mtd-mxc_nand.h @@ -0,0 +1,32 @@ +/* + * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved. + * Copyright 2008 Sascha Hauer, kernel@pengutronix.de + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + * MA 02110-1301, USA. + */ + +#ifndef __ASM_ARCH_NAND_H +#define __ASM_ARCH_NAND_H + +#include + +struct mxc_nand_platform_data { + unsigned int width; /* data bus width in bytes */ + unsigned int hw_ecc:1; /* 0 if suppress hardware ECC */ + unsigned int flash_bbt:1; /* set to 1 to use a flash based bbt */ + struct mtd_partition *parts; /* partition table */ + int nr_parts; /* size of parts */ +}; +#endif /* __ASM_ARCH_NAND_H */ diff --git a/include/linux/platform_data/mtd-nand-omap2.h b/include/linux/platform_data/mtd-nand-omap2.h new file mode 100644 index 000000000..619df2431 --- /dev/null +++ b/include/linux/platform_data/mtd-nand-omap2.h @@ -0,0 +1,67 @@ +/* + * Copyright (C) 2006 Micron Technology Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _MTD_NAND_OMAP2_H +#define _MTD_NAND_OMAP2_H + +#include + +#define GPMC_BCH_NUM_REMAINDER 8 + +enum nand_io { + NAND_OMAP_PREFETCH_POLLED = 0, /* prefetch polled mode, default */ + NAND_OMAP_POLLED, /* polled mode, without prefetch */ + NAND_OMAP_PREFETCH_DMA, /* prefetch enabled sDMA mode */ + NAND_OMAP_PREFETCH_IRQ /* prefetch enabled irq mode */ +}; + +enum omap_ecc { + /* + * 1-bit ECC: calculation and correction by SW + * ECC stored at end of spare area + */ + OMAP_ECC_HAM1_CODE_SW = 0, + + /* + * 1-bit ECC: calculation by GPMC, Error detection by Software + * ECC layout compatible with ROM code layout + */ + OMAP_ECC_HAM1_CODE_HW, + /* 4-bit ECC calculation by GPMC, Error detection by Software */ + OMAP_ECC_BCH4_CODE_HW_DETECTION_SW, + /* 4-bit ECC calculation by GPMC, Error detection by ELM */ + OMAP_ECC_BCH4_CODE_HW, + /* 8-bit ECC calculation by GPMC, Error detection by Software */ + OMAP_ECC_BCH8_CODE_HW_DETECTION_SW, + /* 8-bit ECC calculation by GPMC, Error detection by ELM */ + OMAP_ECC_BCH8_CODE_HW, + /* 16-bit ECC calculation by GPMC, Error detection by ELM */ + OMAP_ECC_BCH16_CODE_HW, +}; + +struct gpmc_nand_regs { + void __iomem *gpmc_nand_command; + void __iomem *gpmc_nand_address; + void __iomem *gpmc_nand_data; + void __iomem *gpmc_prefetch_config1; + void __iomem *gpmc_prefetch_config2; + void __iomem *gpmc_prefetch_control; + void __iomem *gpmc_prefetch_status; + void __iomem *gpmc_ecc_config; + void __iomem *gpmc_ecc_control; + void __iomem *gpmc_ecc_size_config; + void __iomem *gpmc_ecc1_result; + void __iomem *gpmc_bch_result0[GPMC_BCH_NUM_REMAINDER]; + void __iomem *gpmc_bch_result1[GPMC_BCH_NUM_REMAINDER]; + void __iomem *gpmc_bch_result2[GPMC_BCH_NUM_REMAINDER]; + void __iomem *gpmc_bch_result3[GPMC_BCH_NUM_REMAINDER]; + void __iomem *gpmc_bch_result4[GPMC_BCH_NUM_REMAINDER]; + void __iomem *gpmc_bch_result5[GPMC_BCH_NUM_REMAINDER]; + void __iomem *gpmc_bch_result6[GPMC_BCH_NUM_REMAINDER]; +}; +#endif diff --git a/include/linux/platform_data/mtd-nand-pxa3xx.h b/include/linux/platform_data/mtd-nand-pxa3xx.h new file mode 100644 index 000000000..4fd0f592a --- /dev/null +++ b/include/linux/platform_data/mtd-nand-pxa3xx.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_ARCH_PXA3XX_NAND_H +#define __ASM_ARCH_PXA3XX_NAND_H + +#include +#include + +/* + * Current pxa3xx_nand controller has two chip select which both be workable but + * historically all platforms remaining on platform data used only one. Switch + * to device tree if you need more. + */ +struct pxa3xx_nand_platform_data { + /* Keep OBM/bootloader NFC timing configuration */ + bool keep_config; + /* Use a flash-based bad block table */ + bool flash_bbt; + /* Requested ECC strength and ECC step size */ + int ecc_strength, ecc_step_size; + /* Partitions */ + const struct mtd_partition *parts; + unsigned int nr_parts; +}; + +extern void pxa3xx_set_nand_info(struct pxa3xx_nand_platform_data *info); + +#endif /* __ASM_ARCH_PXA3XX_NAND_H */ diff --git a/include/linux/platform_data/mtd-nand-s3c2410.h b/include/linux/platform_data/mtd-nand-s3c2410.h new file mode 100644 index 000000000..f8c553f92 --- /dev/null +++ b/include/linux/platform_data/mtd-nand-s3c2410.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2004 Simtec Electronics + * Ben Dooks + * + * S3C2410 - NAND device controller platform_device info + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#ifndef __MTD_NAND_S3C2410_H +#define __MTD_NAND_S3C2410_H + +#include + +/** + * struct s3c2410_nand_set - define a set of one or more nand chips + * @flash_bbt: Openmoko u-boot can create a Bad Block Table + * Setting this flag will allow the kernel to + * look for it at boot time and also skip the NAND + * scan. + * @options: Default value to set into 'struct nand_chip' options. + * @nr_chips: Number of chips in this set + * @nr_partitions: Number of partitions pointed to by @partitions + * @name: Name of set (optional) + * @nr_map: Map for low-layer logical to physical chip numbers (option) + * @partitions: The mtd partition list + * + * define a set of one or more nand chips registered with an unique mtd. Also + * allows to pass flag to the underlying NAND layer. 'disable_ecc' will trigger + * a warning at boot time. + */ +struct s3c2410_nand_set { + unsigned int flash_bbt:1; + + unsigned int options; + int nr_chips; + int nr_partitions; + char *name; + int *nr_map; + struct mtd_partition *partitions; + struct device_node *of_node; +}; + +struct s3c2410_platform_nand { + /* timing information for controller, all times in nanoseconds */ + + int tacls; /* time for active CLE/ALE to nWE/nOE */ + int twrph0; /* active time for nWE/nOE */ + int twrph1; /* time for release CLE/ALE from nWE/nOE inactive */ + + unsigned int ignore_unset_ecc:1; + + nand_ecc_modes_t ecc_mode; + + int nr_sets; + struct s3c2410_nand_set *sets; + + void (*select_chip)(struct s3c2410_nand_set *, + int chip); +}; + +/** + * s3c_nand_set_platdata() - register NAND platform data. + * @nand: The NAND platform data to register with s3c_device_nand. + * + * This function copies the given NAND platform data, @nand and registers + * it with the s3c_device_nand. This allows @nand to be __initdata. +*/ +extern void s3c_nand_set_platdata(struct s3c2410_platform_nand *nand); + +#endif /*__MTD_NAND_S3C2410_H */ diff --git a/include/linux/platform_data/mtd-orion_nand.h b/include/linux/platform_data/mtd-orion_nand.h new file mode 100644 index 000000000..34828eb85 --- /dev/null +++ b/include/linux/platform_data/mtd-orion_nand.h @@ -0,0 +1,23 @@ +/* + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __MTD_ORION_NAND_H +#define __MTD_ORION_NAND_H + +/* + * Device bus NAND private data + */ +struct orion_nand_data { + struct mtd_partition *parts; + u32 nr_parts; + u8 ale; /* address line number connected to ALE */ + u8 cle; /* address line number connected to CLE */ + u8 width; /* buswidth */ + u8 chip_delay; +}; + + +#endif diff --git a/include/linux/platform_data/mv88e6xxx.h b/include/linux/platform_data/mv88e6xxx.h new file mode 100644 index 000000000..f63af2955 --- /dev/null +++ b/include/linux/platform_data/mv88e6xxx.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DSA_MV88E6XXX_H +#define __DSA_MV88E6XXX_H + +#include + +struct dsa_mv88e6xxx_pdata { + /* Must be first, such that dsa_register_switch() can access this + * without gory pointer manipulations + */ + struct dsa_chip_data cd; + const char *compatible; + unsigned int enabled_ports; + struct net_device *netdev; + u32 eeprom_len; +}; + +#endif diff --git a/include/linux/platform_data/mv_usb.h b/include/linux/platform_data/mv_usb.h new file mode 100644 index 000000000..98b7925f1 --- /dev/null +++ b/include/linux/platform_data/mv_usb.h @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2011 Marvell International Ltd. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __MV_PLATFORM_USB_H +#define __MV_PLATFORM_USB_H + +enum pxa_ehci_type { + EHCI_UNDEFINED = 0, + PXA_U2OEHCI, /* pxa 168, 9xx */ + PXA_SPH, /* pxa 168, 9xx SPH */ + MMP3_HSIC, /* mmp3 hsic */ + MMP3_FSIC, /* mmp3 fsic */ +}; + +enum { + MV_USB_MODE_OTG, + MV_USB_MODE_HOST, +}; + +enum { + VBUS_LOW = 0, + VBUS_HIGH = 1 << 0, +}; + +struct mv_usb_addon_irq { + unsigned int irq; + int (*poll)(void); +}; + +struct mv_usb_platform_data { + struct mv_usb_addon_irq *id; /* Only valid for OTG. ID pin change*/ + struct mv_usb_addon_irq *vbus; /* valid for OTG/UDC. VBUS change*/ + + /* only valid for HCD. OTG or Host only*/ + unsigned int mode; + + /* This flag is used for that needs id pin checked by otg */ + unsigned int disable_otg_clock_gating:1; + /* Force a_bus_req to be asserted */ + unsigned int otg_force_a_bus_req:1; + + int (*phy_init)(void __iomem *regbase); + void (*phy_deinit)(void __iomem *regbase); + int (*set_vbus)(unsigned int vbus); + int (*private_init)(void __iomem *opregs, void __iomem *phyregs); +}; +#endif diff --git a/include/linux/platform_data/net-cw1200.h b/include/linux/platform_data/net-cw1200.h new file mode 100644 index 000000000..c6fbc3ce4 --- /dev/null +++ b/include/linux/platform_data/net-cw1200.h @@ -0,0 +1,81 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * Author: Dmitry Tarnyagin + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef CW1200_PLAT_H_INCLUDED +#define CW1200_PLAT_H_INCLUDED + +struct cw1200_platform_data_spi { + u8 spi_bits_per_word; /* REQUIRED */ + u16 ref_clk; /* REQUIRED (in KHz) */ + + /* All others are optional */ + bool have_5ghz; + int reset; /* GPIO to RSTn signal (0 disables) */ + int powerup; /* GPIO to POWERUP signal (0 disables) */ + int (*power_ctrl)(const struct cw1200_platform_data_spi *pdata, + bool enable); /* Control 3v3 / 1v8 supply */ + int (*clk_ctrl)(const struct cw1200_platform_data_spi *pdata, + bool enable); /* Control CLK32K */ + const u8 *macaddr; /* if NULL, use cw1200_mac_template module parameter */ + const char *sdd_file; /* if NULL, will use default for detected hw type */ +}; + +struct cw1200_platform_data_sdio { + u16 ref_clk; /* REQUIRED (in KHz) */ + + /* All others are optional */ + bool have_5ghz; + bool no_nptb; /* SDIO hardware does not support non-power-of-2-blocksizes */ + int reset; /* GPIO to RSTn signal (0 disables) */ + int powerup; /* GPIO to POWERUP signal (0 disables) */ + int irq; /* IRQ line or 0 to use SDIO IRQ */ + int (*power_ctrl)(const struct cw1200_platform_data_sdio *pdata, + bool enable); /* Control 3v3 / 1v8 supply */ + int (*clk_ctrl)(const struct cw1200_platform_data_sdio *pdata, + bool enable); /* Control CLK32K */ + const u8 *macaddr; /* if NULL, use cw1200_mac_template module parameter */ + const char *sdd_file; /* if NULL, will use default for detected hw type */ +}; + + +/* An example of SPI support in your board setup file: + + static struct cw1200_platform_data_spi cw1200_platform_data = { + .ref_clk = 38400, + .spi_bits_per_word = 16, + .reset = GPIO_RF_RESET, + .powerup = GPIO_RF_POWERUP, + .macaddr = wifi_mac_addr, + .sdd_file = "sdd_sagrad_1091_1098.bin", + }; + static struct spi_board_info myboard_spi_devices[] __initdata = { + { + .modalias = "cw1200_wlan_spi", + .max_speed_hz = 52000000, + .bus_num = 0, + .irq = WIFI_IRQ, + .platform_data = &cw1200_platform_data, + .chip_select = 0, + }, + }; + + */ + +/* An example of SDIO support in your board setup file: + + static struct cw1200_platform_data_sdio my_cw1200_platform_data = { + .ref_clk = 38400, + .have_5ghz = false, + .sdd_file = "sdd_myplatform.bin", + }; + cw1200_sdio_set_platform_data(&my_cw1200_platform_data); + + */ + +void __init cw1200_sdio_set_platform_data(struct cw1200_platform_data_sdio *pdata); + +#endif /* CW1200_PLAT_H_INCLUDED */ diff --git a/include/linux/platform_data/nfcmrvl.h b/include/linux/platform_data/nfcmrvl.h new file mode 100644 index 000000000..9e75ac8d1 --- /dev/null +++ b/include/linux/platform_data/nfcmrvl.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2015, Marvell International Ltd. + * + * This software file (the "File") is distributed by Marvell International + * Ltd. under the terms of the GNU General Public License Version 2, June 1991 + * (the "License"). You may use, redistribute and/or modify this File in + * accordance with the terms and conditions of the License, a copy of which + * is available on the worldwide web at + * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. + * + * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE + * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE + * ARE EXPRESSLY DISCLAIMED. The License provides additional details about + * this warranty disclaimer. + */ + +#ifndef _NFCMRVL_PTF_H_ +#define _NFCMRVL_PTF_H_ + +struct nfcmrvl_platform_data { + /* + * Generic + */ + + /* GPIO that is wired to RESET_N signal */ + int reset_n_io; + /* Tell if transport is muxed in HCI one */ + unsigned int hci_muxed; + + /* + * UART specific + */ + + /* Tell if UART needs flow control at init */ + unsigned int flow_control; + /* Tell if firmware supports break control for power management */ + unsigned int break_control; + + + /* + * I2C specific + */ + + unsigned int irq; + unsigned int irq_polarity; +}; + +#endif /* _NFCMRVL_PTF_H_ */ diff --git a/include/linux/platform_data/ntc_thermistor.h b/include/linux/platform_data/ntc_thermistor.h new file mode 100644 index 000000000..698d0d59d --- /dev/null +++ b/include/linux/platform_data/ntc_thermistor.h @@ -0,0 +1,62 @@ +/* + * ntc_thermistor.h - NTC Thermistors + * + * Copyright (C) 2010 Samsung Electronics + * MyungJoo Ham + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _LINUX_NTC_H +#define _LINUX_NTC_H + +struct iio_channel; + +enum ntc_thermistor_type { + TYPE_NCPXXWB473, + TYPE_NCPXXWL333, + TYPE_B57330V2103, + TYPE_NCPXXWF104, + TYPE_NCPXXXH103, +}; + +struct ntc_thermistor_platform_data { + /* + * One (not both) of read_uV and read_ohm should be provided and only + * one of the two should be provided. + * Both functions should return negative value for an error case. + * + * pullup_uV, pullup_ohm, pulldown_ohm, and connect are required to use + * read_uV() + * + * How to setup pullup_ohm, pulldown_ohm, and connect is + * described at Documentation/hwmon/ntc_thermistor + * + * pullup/down_ohm: 0 for infinite / not-connected + * + * chan: iio_channel pointer to communicate with the ADC which the + * thermistor is using for conversion of the analog values. + */ + int (*read_uv)(struct ntc_thermistor_platform_data *); + unsigned int pullup_uv; + + unsigned int pullup_ohm; + unsigned int pulldown_ohm; + enum { NTC_CONNECTED_POSITIVE, NTC_CONNECTED_GROUND } connect; + struct iio_channel *chan; + + int (*read_ohm)(void); +}; + +#endif /* _LINUX_NTC_H */ diff --git a/include/linux/platform_data/nxp-nci.h b/include/linux/platform_data/nxp-nci.h new file mode 100644 index 000000000..d6ed28679 --- /dev/null +++ b/include/linux/platform_data/nxp-nci.h @@ -0,0 +1,27 @@ +/* + * Generic platform data for the NXP NCI NFC chips. + * + * Copyright (C) 2014 NXP Semiconductors All rights reserved. + * + * Authors: Clément Perrochaud + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _NXP_NCI_H_ +#define _NXP_NCI_H_ + +struct nxp_nci_nfc_platform_data { + unsigned int gpio_en; + unsigned int gpio_fw; + unsigned int irq; +}; + +#endif /* _NXP_NCI_H_ */ diff --git a/include/linux/platform_data/omap-twl4030.h b/include/linux/platform_data/omap-twl4030.h new file mode 100644 index 000000000..ee60ef79d --- /dev/null +++ b/include/linux/platform_data/omap-twl4030.h @@ -0,0 +1,58 @@ +/** + * omap-twl4030.h - ASoC machine driver for TI SoC based boards with twl4030 + * codec, header. + * + * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com + * All rights reserved. + * + * Author: Peter Ujfalusi + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef _OMAP_TWL4030_H_ +#define _OMAP_TWL4030_H_ + +/* To select if only one channel is connected in a stereo port */ +#define OMAP_TWL4030_LEFT (1 << 0) +#define OMAP_TWL4030_RIGHT (1 << 1) + +struct omap_tw4030_pdata { + const char *card_name; + /* Voice port is connected to McBSP3 */ + bool voice_connected; + + /* The driver will parse the connection flags if this flag is set */ + bool custom_routing; + /* Flags to indicate connected audio ports. */ + u8 has_hs; + u8 has_hf; + u8 has_predriv; + u8 has_carkit; + bool has_ear; + + bool has_mainmic; + bool has_submic; + bool has_hsmic; + bool has_carkitmic; + bool has_digimic0; + bool has_digimic1; + u8 has_linein; + + /* Jack detect GPIO or <= 0 if it is not implemented */ + int jack_detect; +}; + +#endif /* _OMAP_TWL4030_H_ */ diff --git a/include/linux/platform_data/omap-wd-timer.h b/include/linux/platform_data/omap-wd-timer.h new file mode 100644 index 000000000..d75f5f802 --- /dev/null +++ b/include/linux/platform_data/omap-wd-timer.h @@ -0,0 +1,38 @@ +/* + * OMAP2+ WDTIMER-specific function prototypes + * + * Copyright (C) 2012 Texas Instruments, Inc. + * Paul Walmsley + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __LINUX_PLATFORM_DATA_OMAP_WD_TIMER_H +#define __LINUX_PLATFORM_DATA_OMAP_WD_TIMER_H + +#include + +/* + * Standardized OMAP reset source bits + * + * This is a subset of the ones listed in arch/arm/mach-omap2/prm.h + * and are the only ones needed in the watchdog driver. + */ +#define OMAP_MPU_WD_RST_SRC_ID_SHIFT 3 + +/** + * struct omap_wd_timer_platform_data - WDTIMER integration to the host SoC + * @read_reset_sources - fn ptr for the SoC to indicate the last reset cause + * + * The function pointed to by @read_reset_sources must return its data + * in a standard format - search for RST_SRC_ID_SHIFT in + * arch/arm/mach-omap2 + */ +struct omap_wd_timer_platform_data { + u32 (*read_reset_sources)(void); +}; + +#endif diff --git a/include/linux/platform_data/omap1_bl.h b/include/linux/platform_data/omap1_bl.h new file mode 100644 index 000000000..5e8b17d77 --- /dev/null +++ b/include/linux/platform_data/omap1_bl.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __OMAP1_BL_H__ +#define __OMAP1_BL_H__ + +#include + +struct omap_backlight_config { + int default_intensity; + int (*set_power)(struct device *dev, int state); +}; + +#endif diff --git a/include/linux/platform_data/omapdss.h b/include/linux/platform_data/omapdss.h new file mode 100644 index 000000000..7feb011ed --- /dev/null +++ b/include/linux/platform_data/omapdss.h @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2016 Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __OMAPDSS_PDATA_H +#define __OMAPDSS_PDATA_H + +enum omapdss_version { + OMAPDSS_VER_UNKNOWN = 0, + OMAPDSS_VER_OMAP24xx, + OMAPDSS_VER_OMAP34xx_ES1, /* OMAP3430 ES1.0, 2.0 */ + OMAPDSS_VER_OMAP34xx_ES3, /* OMAP3430 ES3.0+ */ + OMAPDSS_VER_OMAP3630, + OMAPDSS_VER_AM35xx, + OMAPDSS_VER_OMAP4430_ES1, /* OMAP4430 ES1.0 */ + OMAPDSS_VER_OMAP4430_ES2, /* OMAP4430 ES2.0, 2.1, 2.2 */ + OMAPDSS_VER_OMAP4, /* All other OMAP4s */ + OMAPDSS_VER_OMAP5, + OMAPDSS_VER_AM43xx, + OMAPDSS_VER_DRA7xx, +}; + +/* Board specific data */ +struct omap_dss_board_info { + int (*dsi_enable_pads)(int dsi_id, unsigned int lane_mask); + void (*dsi_disable_pads)(int dsi_id, unsigned int lane_mask); + int (*set_min_bus_tput)(struct device *dev, unsigned long r); + enum omapdss_version version; +}; + +#endif /* __OMAPDSS_PDATA_H */ diff --git a/include/linux/platform_data/pca953x.h b/include/linux/platform_data/pca953x.h new file mode 100644 index 000000000..4eb53e023 --- /dev/null +++ b/include/linux/platform_data/pca953x.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PCA953X_H +#define _LINUX_PCA953X_H + +#include +#include + +/* platform data for the PCA9539 16-bit I/O expander driver */ + +struct pca953x_platform_data { + /* number of the first GPIO */ + unsigned gpio_base; + + /* initial polarity inversion setting */ + u32 invert; + + /* interrupt base */ + int irq_base; + + void *context; /* param to setup/teardown */ + + int (*setup)(struct i2c_client *client, + unsigned gpio, unsigned ngpio, + void *context); + int (*teardown)(struct i2c_client *client, + unsigned gpio, unsigned ngpio, + void *context); + const char *const *names; +}; + +#endif /* _LINUX_PCA953X_H */ diff --git a/include/linux/platform_data/pca954x.h b/include/linux/platform_data/pca954x.h new file mode 100644 index 000000000..1712677d5 --- /dev/null +++ b/include/linux/platform_data/pca954x.h @@ -0,0 +1,48 @@ +/* + * + * pca954x.h - I2C multiplexer/switch support + * + * Copyright (c) 2008-2009 Rodolfo Giometti + * Copyright (c) 2008-2009 Eurotech S.p.A. + * Michael Lawnick + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + + +#ifndef _LINUX_I2C_PCA954X_H +#define _LINUX_I2C_PCA954X_H + +/* Platform data for the PCA954x I2C multiplexers */ + +/* Per channel initialisation data: + * @adap_id: bus number for the adapter. 0 = don't care + * @deselect_on_exit: set this entry to 1, if your H/W needs deselection + * of this channel after transaction. + * + */ +struct pca954x_platform_mode { + int adap_id; + unsigned int deselect_on_exit:1; + unsigned int class; +}; + +/* Per mux/switch data, used with i2c_register_board_info */ +struct pca954x_platform_data { + struct pca954x_platform_mode *modes; + int num_modes; +}; + +#endif /* _LINUX_I2C_PCA954X_H */ diff --git a/include/linux/platform_data/pcf857x.h b/include/linux/platform_data/pcf857x.h new file mode 100644 index 000000000..11d4ed78c --- /dev/null +++ b/include/linux/platform_data/pcf857x.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_PCF857X_H +#define __LINUX_PCF857X_H + +/** + * struct pcf857x_platform_data - data to set up pcf857x driver + * @gpio_base: number of the chip's first GPIO + * @n_latch: optional bit-inverse of initial register value; if + * you leave this initialized to zero the driver will act + * like the chip was just reset + * @setup: optional callback issued once the GPIOs are valid + * @teardown: optional callback issued before the GPIOs are invalidated + * @context: optional parameter passed to setup() and teardown() + * + * In addition to the I2C_BOARD_INFO() state appropriate to each chip, + * the i2c_board_info used with the pcf875x driver must provide its + * platform_data (pointer to one of these structures) with at least + * the gpio_base value initialized. + * + * The @setup callback may be used with the kind of board-specific glue + * which hands the (now-valid) GPIOs to other drivers, or which puts + * devices in their initial states using these GPIOs. + * + * These GPIO chips are only "quasi-bidirectional"; read the chip specs + * to understand the behavior. They don't have separate registers to + * record which pins are used for input or output, record which output + * values are driven, or provide access to input values. That must be + * inferred by reading the chip's value and knowing the last value written + * to it. If you leave n_latch initialized to zero, that last written + * value is presumed to be all ones (as if the chip were just reset). + */ +struct pcf857x_platform_data { + unsigned gpio_base; + unsigned n_latch; + + int (*setup)(struct i2c_client *client, + int gpio, unsigned ngpio, + void *context); + int (*teardown)(struct i2c_client *client, + int gpio, unsigned ngpio, + void *context); + void *context; +}; + +#endif /* __LINUX_PCF857X_H */ diff --git a/include/linux/platform_data/pcmcia-pxa2xx_viper.h b/include/linux/platform_data/pcmcia-pxa2xx_viper.h new file mode 100644 index 000000000..a23b58aff --- /dev/null +++ b/include/linux/platform_data/pcmcia-pxa2xx_viper.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ARCOM_PCMCIA_H +#define __ARCOM_PCMCIA_H + +struct arcom_pcmcia_pdata { + int cd_gpio; + int rdy_gpio; + int pwr_gpio; + void (*reset)(int state); +}; + +#endif diff --git a/include/linux/platform_data/phy-da8xx-usb.h b/include/linux/platform_data/phy-da8xx-usb.h new file mode 100644 index 000000000..85c2b9938 --- /dev/null +++ b/include/linux/platform_data/phy-da8xx-usb.h @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * phy-da8xx-usb - TI DaVinci DA8xx USB PHY driver + * + * Copyright (C) 2018 David Lechner + */ + +#ifndef __LINUX_PLATFORM_DATA_PHY_DA8XX_USB_H__ +#define __LINUX_PLATFORM_DATA_PHY_DA8XX_USB_H__ + +#include + +/** + * da8xx_usb_phy_platform_data + * @cfgchip: CFGCHIP syscon regmap + */ +struct da8xx_usb_phy_platform_data { + struct regmap *cfgchip; +}; + +#endif /* __LINUX_PLATFORM_DATA_PHY_DA8XX_USB_H__ */ diff --git a/include/linux/platform_data/pinctrl-single.h b/include/linux/platform_data/pinctrl-single.h new file mode 100644 index 000000000..1cf36fdf9 --- /dev/null +++ b/include/linux/platform_data/pinctrl-single.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * irq: optional wake-up interrupt + * rearm: optional soc specific rearm function + * + * Note that the irq and rearm setup should come from device + * tree except for omap where there are still some dependencies + * to the legacy PRM code. + */ +struct pcs_pdata { + int irq; + void (*rearm)(void); +}; diff --git a/include/linux/platform_data/pixcir_i2c_ts.h b/include/linux/platform_data/pixcir_i2c_ts.h new file mode 100644 index 000000000..4ab3cd6f1 --- /dev/null +++ b/include/linux/platform_data/pixcir_i2c_ts.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _PIXCIR_I2C_TS_H +#define _PIXCIR_I2C_TS_H + +/* + * Register map + */ +#define PIXCIR_REG_POWER_MODE 51 +#define PIXCIR_REG_INT_MODE 52 + +/* + * Power modes: + * active: max scan speed + * idle: lower scan speed with automatic transition to active on touch + * halt: datasheet says sleep but this is more like halt as the chip + * clocks are cut and it can only be brought out of this mode + * using the RESET pin. + */ +enum pixcir_power_mode { + PIXCIR_POWER_ACTIVE, + PIXCIR_POWER_IDLE, + PIXCIR_POWER_HALT, +}; + +#define PIXCIR_POWER_MODE_MASK 0x03 +#define PIXCIR_POWER_ALLOW_IDLE (1UL << 2) + +/* + * Interrupt modes: + * periodical: interrupt is asserted periodicaly + * diff coordinates: interrupt is asserted when coordinates change + * level on touch: interrupt level asserted during touch + * pulse on touch: interrupt pulse asserted druing touch + * + */ +enum pixcir_int_mode { + PIXCIR_INT_PERIODICAL, + PIXCIR_INT_DIFF_COORD, + PIXCIR_INT_LEVEL_TOUCH, + PIXCIR_INT_PULSE_TOUCH, +}; + +#define PIXCIR_INT_MODE_MASK 0x03 +#define PIXCIR_INT_ENABLE (1UL << 3) +#define PIXCIR_INT_POL_HIGH (1UL << 2) + +/** + * struct pixcir_irc_chip_data - chip related data + * @max_fingers: Max number of fingers reported simultaneously by h/w + * @has_hw_ids: Hardware supports finger tracking IDs + * + */ +struct pixcir_i2c_chip_data { + u8 max_fingers; + bool has_hw_ids; +}; + +struct pixcir_ts_platform_data { + int x_max; + int y_max; + struct pixcir_i2c_chip_data chip; +}; + +#endif diff --git a/include/linux/platform_data/pm33xx.h b/include/linux/platform_data/pm33xx.h new file mode 100644 index 000000000..fbf5ed73c --- /dev/null +++ b/include/linux/platform_data/pm33xx.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * TI pm33xx platform data + * + * Copyright (C) 2016-2018 Texas Instruments, Inc. + * Dave Gerlach + */ + +#ifndef _LINUX_PLATFORM_DATA_PM33XX_H +#define _LINUX_PLATFORM_DATA_PM33XX_H + +#include +#include + +/* + * WFI Flags for sleep code control + * + * These flags allow PM code to exclude certain operations from happening + * in the low level ASM code found in sleep33xx.S and sleep43xx.S + * + * WFI_FLAG_FLUSH_CACHE: Flush the ARM caches and disable caching. Only + * needed when MPU will lose context. + * WFI_FLAG_SELF_REFRESH: Let EMIF place DDR memory into self-refresh and + * disable EMIF. + * WFI_FLAG_SAVE_EMIF: Save context of all EMIF registers and restore in + * resume path. Only needed if PER domain loses context + * and must also have WFI_FLAG_SELF_REFRESH set. + * WFI_FLAG_WAKE_M3: Disable MPU clock or clockdomain to cause wkup_m3 to + * execute when WFI instruction executes. + * WFI_FLAG_RTC_ONLY: Configure the RTC to enter RTC+DDR mode. + */ +#define WFI_FLAG_FLUSH_CACHE BIT(0) +#define WFI_FLAG_SELF_REFRESH BIT(1) +#define WFI_FLAG_SAVE_EMIF BIT(2) +#define WFI_FLAG_WAKE_M3 BIT(3) +#define WFI_FLAG_RTC_ONLY BIT(4) + +#ifndef __ASSEMBLER__ +struct am33xx_pm_sram_addr { + void (*do_wfi)(void); + unsigned long *do_wfi_sz; + unsigned long *resume_offset; + unsigned long *emif_sram_table; + unsigned long *ro_sram_data; + unsigned long resume_address; +}; + +struct am33xx_pm_platform_data { + int (*init)(void); + int (*soc_suspend)(unsigned int state, int (*fn)(unsigned long), + unsigned long args); + struct am33xx_pm_sram_addr *(*get_sram_addrs)(void); + void __iomem *(*get_rtc_base_addr)(void); +}; + +struct am33xx_pm_sram_data { + u32 wfi_flags; + u32 l2_aux_ctrl_val; + u32 l2_prefetch_ctrl_val; +} __packed __aligned(8); + +struct am33xx_pm_ro_sram_data { + u32 amx3_pm_sram_data_virt; + u32 amx3_pm_sram_data_phys; + void __iomem *rtc_base_virt; +} __packed __aligned(8); + +#endif /* __ASSEMBLER__ */ +#endif /* _LINUX_PLATFORM_DATA_PM33XX_H */ diff --git a/include/linux/platform_data/pwm_omap_dmtimer.h b/include/linux/platform_data/pwm_omap_dmtimer.h new file mode 100644 index 000000000..e7d521e48 --- /dev/null +++ b/include/linux/platform_data/pwm_omap_dmtimer.h @@ -0,0 +1,90 @@ +/* + * include/linux/platform_data/pwm_omap_dmtimer.h + * + * OMAP Dual-Mode Timer PWM platform data + * + * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/ + * Tarun Kanti DebBarma + * Thara Gopinath + * + * Platform device conversion and hwmod support. + * + * Copyright (C) 2005 Nokia Corporation + * Author: Lauri Leukkunen + * PWM and clock framework support by Timo Teras. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __PWM_OMAP_DMTIMER_PDATA_H +#define __PWM_OMAP_DMTIMER_PDATA_H + +/* clock sources */ +#define PWM_OMAP_DMTIMER_SRC_SYS_CLK 0x00 +#define PWM_OMAP_DMTIMER_SRC_32_KHZ 0x01 +#define PWM_OMAP_DMTIMER_SRC_EXT_CLK 0x02 + +/* timer interrupt enable bits */ +#define PWM_OMAP_DMTIMER_INT_CAPTURE (1 << 2) +#define PWM_OMAP_DMTIMER_INT_OVERFLOW (1 << 1) +#define PWM_OMAP_DMTIMER_INT_MATCH (1 << 0) + +/* trigger types */ +#define PWM_OMAP_DMTIMER_TRIGGER_NONE 0x00 +#define PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW 0x01 +#define PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW_AND_COMPARE 0x02 + +struct omap_dm_timer; +typedef struct omap_dm_timer pwm_omap_dmtimer; + +struct pwm_omap_dmtimer_pdata { + pwm_omap_dmtimer *(*request_by_node)(struct device_node *np); + pwm_omap_dmtimer *(*request_specific)(int timer_id); + pwm_omap_dmtimer *(*request)(void); + + int (*free)(pwm_omap_dmtimer *timer); + + void (*enable)(pwm_omap_dmtimer *timer); + void (*disable)(pwm_omap_dmtimer *timer); + + int (*get_irq)(pwm_omap_dmtimer *timer); + int (*set_int_enable)(pwm_omap_dmtimer *timer, unsigned int value); + int (*set_int_disable)(pwm_omap_dmtimer *timer, u32 mask); + + struct clk *(*get_fclk)(pwm_omap_dmtimer *timer); + + int (*start)(pwm_omap_dmtimer *timer); + int (*stop)(pwm_omap_dmtimer *timer); + int (*set_source)(pwm_omap_dmtimer *timer, int source); + + int (*set_load)(pwm_omap_dmtimer *timer, int autoreload, + unsigned int value); + int (*set_match)(pwm_omap_dmtimer *timer, int enable, + unsigned int match); + int (*set_pwm)(pwm_omap_dmtimer *timer, int def_on, + int toggle, int trigger); + int (*set_prescaler)(pwm_omap_dmtimer *timer, int prescaler); + + unsigned int (*read_counter)(pwm_omap_dmtimer *timer); + int (*write_counter)(pwm_omap_dmtimer *timer, unsigned int value); + unsigned int (*read_status)(pwm_omap_dmtimer *timer); + int (*write_status)(pwm_omap_dmtimer *timer, unsigned int value); +}; + +#endif /* __PWM_OMAP_DMTIMER_PDATA_H */ diff --git a/include/linux/platform_data/pxa2xx_udc.h b/include/linux/platform_data/pxa2xx_udc.h new file mode 100644 index 000000000..ff9c35dca --- /dev/null +++ b/include/linux/platform_data/pxa2xx_udc.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This supports machine-specific differences in how the PXA2xx + * USB Device Controller (UDC) is wired. + * + * It is set in linux/arch/arm/mach-pxa/.c or in + * linux/arch/mach-ixp4xx/.c and used in + * the probe routine of linux/drivers/usb/gadget/pxa2xx_udc.c + */ +#ifndef PXA2XX_UDC_H +#define PXA2XX_UDC_H + +struct pxa2xx_udc_mach_info { + int (*udc_is_connected)(void); /* do we see host? */ + void (*udc_command)(int cmd); +#define PXA2XX_UDC_CMD_CONNECT 0 /* let host see us */ +#define PXA2XX_UDC_CMD_DISCONNECT 1 /* so host won't see us */ + + /* Boards following the design guidelines in the developer's manual, + * with on-chip GPIOs not Lubbock's weird hardware, can have a sane + * VBUS IRQ and omit the methods above. Store the GPIO number + * here. Note that sometimes the signals go through inverters... + */ + bool gpio_pullup_inverted; + int gpio_pullup; /* high == pullup activated */ +}; + +#endif diff --git a/include/linux/platform_data/pxa_sdhci.h b/include/linux/platform_data/pxa_sdhci.h new file mode 100644 index 000000000..9e20c2fb4 --- /dev/null +++ b/include/linux/platform_data/pxa_sdhci.h @@ -0,0 +1,58 @@ +/* + * include/linux/platform_data/pxa_sdhci.h + * + * Copyright 2010 Marvell + * Zhangfei Gao + * + * PXA Platform - SDHCI platform data definitions + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _PXA_SDHCI_H_ +#define _PXA_SDHCI_H_ + +/* pxa specific flag */ +/* Require clock free running */ +#define PXA_FLAG_ENABLE_CLOCK_GATING (1<<0) +/* card always wired to host, like on-chip emmc */ +#define PXA_FLAG_CARD_PERMANENT (1<<1) +/* Board design supports 8-bit data on SD/SDIO BUS */ +#define PXA_FLAG_SD_8_BIT_CAPABLE_SLOT (1<<2) + +/* + * struct pxa_sdhci_platdata() - Platform device data for PXA SDHCI + * @flags: flags for platform requirement + * @clk_delay_cycles: + * mmp2: each step is roughly 100ps, 5bits width + * pxa910: each step is 1ns, 4bits width + * @clk_delay_sel: select clk_delay, used on pxa910 + * 0: choose feedback clk + * 1: choose feedback clk + delay value + * 2: choose internal clk + * @clk_delay_enable: enable clk_delay or not, used on pxa910 + * @ext_cd_gpio: gpio pin used for external CD line + * @ext_cd_gpio_invert: invert values for external CD gpio line + * @max_speed: the maximum speed supported + * @host_caps: Standard MMC host capabilities bit field. + * @quirks: quirks of platfrom + * @quirks2: quirks2 of platfrom + * @pm_caps: pm_caps of platfrom + */ +struct sdhci_pxa_platdata { + unsigned int flags; + unsigned int clk_delay_cycles; + unsigned int clk_delay_sel; + bool clk_delay_enable; + unsigned int ext_cd_gpio; + bool ext_cd_gpio_invert; + unsigned int max_speed; + u32 host_caps; + u32 host_caps2; + unsigned int quirks; + unsigned int quirks2; + unsigned int pm_caps; +}; +#endif /* _PXA_SDHCI_H_ */ diff --git a/include/linux/platform_data/regulator-haptic.h b/include/linux/platform_data/regulator-haptic.h new file mode 100644 index 000000000..5658e58e0 --- /dev/null +++ b/include/linux/platform_data/regulator-haptic.h @@ -0,0 +1,29 @@ +/* + * Regulator Haptic Platform Data + * + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * Author: Jaewon Kim + * Author: Hyunhee Kim + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _REGULATOR_HAPTIC_H +#define _REGULATOR_HAPTIC_H + +/* + * struct regulator_haptic_data - Platform device data + * + * @max_volt: maximum voltage value supplied to the haptic motor. + * + * @min_volt: minimum voltage value supplied to the haptic motor. + * + */ +struct regulator_haptic_data { + unsigned int max_volt; + unsigned int min_volt; +}; + +#endif /* _REGULATOR_HAPTIC_H */ diff --git a/include/linux/platform_data/remoteproc-omap.h b/include/linux/platform_data/remoteproc-omap.h new file mode 100644 index 000000000..71a1b2399 --- /dev/null +++ b/include/linux/platform_data/remoteproc-omap.h @@ -0,0 +1,59 @@ +/* + * Remote Processor - omap-specific bits + * + * Copyright (C) 2011 Texas Instruments, Inc. + * Copyright (C) 2011 Google, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _PLAT_REMOTEPROC_H +#define _PLAT_REMOTEPROC_H + +struct rproc_ops; +struct platform_device; + +/* + * struct omap_rproc_pdata - omap remoteproc's platform data + * @name: the remoteproc's name + * @oh_name: omap hwmod device + * @oh_name_opt: optional, secondary omap hwmod device + * @firmware: name of firmware file to load + * @mbox_name: name of omap mailbox device to use with this rproc + * @ops: start/stop rproc handlers + * @device_enable: omap-specific handler for enabling a device + * @device_shutdown: omap-specific handler for shutting down a device + * @set_bootaddr: omap-specific handler for setting the rproc boot address + */ +struct omap_rproc_pdata { + const char *name; + const char *oh_name; + const char *oh_name_opt; + const char *firmware; + const char *mbox_name; + const struct rproc_ops *ops; + int (*device_enable)(struct platform_device *pdev); + int (*device_shutdown)(struct platform_device *pdev); + void (*set_bootaddr)(u32); +}; + +#if defined(CONFIG_OMAP_REMOTEPROC) || defined(CONFIG_OMAP_REMOTEPROC_MODULE) + +void __init omap_rproc_reserve_cma(void); + +#else + +static inline void __init omap_rproc_reserve_cma(void) +{ +} + +#endif + +#endif /* _PLAT_REMOTEPROC_H */ diff --git a/include/linux/platform_data/rtc-ds2404.h b/include/linux/platform_data/rtc-ds2404.h new file mode 100644 index 000000000..22c538255 --- /dev/null +++ b/include/linux/platform_data/rtc-ds2404.h @@ -0,0 +1,20 @@ +/* + * ds2404.h - platform data structure for the DS2404 RTC. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2012 Sven Schnelle + */ + +#ifndef __LINUX_DS2404_H +#define __LINUX_DS2404_H + +struct ds2404_platform_data { + + unsigned int gpio_rst; + unsigned int gpio_clk; + unsigned int gpio_dq; +}; +#endif diff --git a/include/linux/platform_data/rtc-v3020.h b/include/linux/platform_data/rtc-v3020.h new file mode 100644 index 000000000..e55d82ceb --- /dev/null +++ b/include/linux/platform_data/rtc-v3020.h @@ -0,0 +1,41 @@ +/* + * v3020.h - Registers definition and platform data structure for the v3020 RTC. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2006, 8D Technologies inc. + */ +#ifndef __LINUX_V3020_H +#define __LINUX_V3020_H + +/* The v3020 has only one data pin but which one + * is used depends on the board. */ +struct v3020_platform_data { + int leftshift; /* (1<<(leftshift)) & readl() */ + + unsigned int use_gpio:1; + unsigned int gpio_cs; + unsigned int gpio_wr; + unsigned int gpio_rd; + unsigned int gpio_io; +}; + +#define V3020_STATUS_0 0x00 +#define V3020_STATUS_1 0x01 +#define V3020_SECONDS 0x02 +#define V3020_MINUTES 0x03 +#define V3020_HOURS 0x04 +#define V3020_MONTH_DAY 0x05 +#define V3020_MONTH 0x06 +#define V3020_YEAR 0x07 +#define V3020_WEEK_DAY 0x08 +#define V3020_WEEK 0x09 + +#define V3020_IS_COMMAND(val) ((val)>=0x0E) + +#define V3020_CMD_RAM2CLOCK 0x0E +#define V3020_CMD_CLOCK2RAM 0x0F + +#endif /* __LINUX_V3020_H */ diff --git a/include/linux/platform_data/s3c-hsotg.h b/include/linux/platform_data/s3c-hsotg.h new file mode 100644 index 000000000..3982586ba --- /dev/null +++ b/include/linux/platform_data/s3c-hsotg.h @@ -0,0 +1,42 @@ +/* include/linux/platform_data/s3c-hsotg.h + * + * Copyright 2008 Openmoko, Inc. + * Copyright 2008 Simtec Electronics + * Ben Dooks + * http://armlinux.simtec.co.uk/ + * + * S3C USB2.0 High-speed / OtG platform information + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#ifndef __LINUX_USB_S3C_HSOTG_H +#define __LINUX_USB_S3C_HSOTG_H + +struct platform_device; + +enum dwc2_hsotg_dmamode { + S3C_HSOTG_DMA_NONE, /* do not use DMA at-all */ + S3C_HSOTG_DMA_ONLY, /* always use DMA */ + S3C_HSOTG_DMA_DRV, /* DMA is chosen by driver */ +}; + +/** + * struct dwc2_hsotg_plat - platform data for high-speed otg/udc + * @dma: Whether to use DMA or not. + * @is_osc: The clock source is an oscillator, not a crystal + */ +struct dwc2_hsotg_plat { + enum dwc2_hsotg_dmamode dma; + unsigned int is_osc:1; + int phy_type; + + int (*phy_init)(struct platform_device *pdev, int type); + int (*phy_exit)(struct platform_device *pdev, int type); +}; + +extern void dwc2_hsotg_set_platdata(struct dwc2_hsotg_plat *pd); + +#endif /* __LINUX_USB_S3C_HSOTG_H */ diff --git a/include/linux/platform_data/s3c-hsudc.h b/include/linux/platform_data/s3c-hsudc.h new file mode 100644 index 000000000..6fa109339 --- /dev/null +++ b/include/linux/platform_data/s3c-hsudc.h @@ -0,0 +1,34 @@ +/* + * S3C24XX USB 2.0 High-speed USB controller gadget driver + * + * Copyright (c) 2010 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * The S3C24XX USB 2.0 high-speed USB controller supports upto 9 endpoints. + * Each endpoint can be configured as either in or out endpoint. Endpoints + * can be configured for Bulk or Interrupt transfer mode. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#ifndef __LINUX_USB_S3C_HSUDC_H +#define __LINUX_USB_S3C_HSUDC_H + +/** + * s3c24xx_hsudc_platdata - Platform data for USB High-Speed gadget controller. + * @epnum: Number of endpoints to be instantiated by the controller driver. + * @gpio_init: Platform specific USB related GPIO initialization. + * @gpio_uninit: Platform specific USB releted GPIO uninitialzation. + * + * Representation of platform data for the S3C24XX USB 2.0 High Speed gadget + * controllers. + */ +struct s3c24xx_hsudc_platdata { + unsigned int epnum; + void (*gpio_init)(void); + void (*gpio_uninit)(void); +}; + +#endif /* __LINUX_USB_S3C_HSUDC_H */ diff --git a/include/linux/platform_data/sa11x0-serial.h b/include/linux/platform_data/sa11x0-serial.h new file mode 100644 index 000000000..8b79ab08a --- /dev/null +++ b/include/linux/platform_data/sa11x0-serial.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Author: Nicolas Pitre + * + * Moved and changed lots, Russell King + * + * Low level machine dependent UART functions. + */ +#ifndef SA11X0_SERIAL_H +#define SA11X0_SERIAL_H + +struct uart_port; +struct uart_info; + +/* + * This is a temporary structure for registering these + * functions; it is intended to be discarded after boot. + */ +struct sa1100_port_fns { + void (*set_mctrl)(struct uart_port *, u_int); + u_int (*get_mctrl)(struct uart_port *); + void (*pm)(struct uart_port *, u_int, u_int); + int (*set_wake)(struct uart_port *, u_int); +}; + +#ifdef CONFIG_SERIAL_SA1100 +void sa1100_register_uart_fns(struct sa1100_port_fns *fns); +void sa1100_register_uart(int idx, int port); +#else +static inline void sa1100_register_uart_fns(struct sa1100_port_fns *fns) +{ +} +static inline void sa1100_register_uart(int idx, int port) +{ +} +#endif + +#endif diff --git a/include/linux/platform_data/sc18is602.h b/include/linux/platform_data/sc18is602.h new file mode 100644 index 000000000..18602cab7 --- /dev/null +++ b/include/linux/platform_data/sc18is602.h @@ -0,0 +1,19 @@ +/* + * Platform data for NXP SC18IS602/603 + * + * Copyright (C) 2012 Guenter Roeck + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * For further information, see the Documentation/spi/spi-sc18is602 file. + */ + +/** + * struct sc18is602_platform_data - sc18is602 info + * @clock_frequency SC18IS603 oscillator frequency + */ +struct sc18is602_platform_data { + u32 clock_frequency; +}; diff --git a/include/linux/platform_data/sdhci-pic32.h b/include/linux/platform_data/sdhci-pic32.h new file mode 100644 index 000000000..7e0efe64c --- /dev/null +++ b/include/linux/platform_data/sdhci-pic32.h @@ -0,0 +1,22 @@ +/* + * Purna Chandra Mandal, purna.mandal@microchip.com + * Copyright (C) 2015 Microchip Technology Inc. All rights reserved. + * + * This program is free software; you can distribute it and/or modify it + * under the terms of the GNU General Public License (Version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ +#ifndef __PIC32_SDHCI_PDATA_H__ +#define __PIC32_SDHCI_PDATA_H__ + +struct pic32_sdhci_platform_data { + /* read & write fifo threshold */ + int (*setup_dma)(u32 rfifo, u32 wfifo); +}; + +#endif diff --git a/include/linux/platform_data/serial-imx.h b/include/linux/platform_data/serial-imx.h new file mode 100644 index 000000000..a938eba2f --- /dev/null +++ b/include/linux/platform_data/serial-imx.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2008 by Sascha Hauer + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + * MA 02110-1301, USA. + */ + +#ifndef ASMARM_ARCH_UART_H +#define ASMARM_ARCH_UART_H + +#define IMXUART_HAVE_RTSCTS (1<<0) + +struct imxuart_platform_data { + unsigned int flags; +}; + +#endif diff --git a/include/linux/platform_data/serial-omap.h b/include/linux/platform_data/serial-omap.h new file mode 100644 index 000000000..2ba2c34ca --- /dev/null +++ b/include/linux/platform_data/serial-omap.h @@ -0,0 +1,46 @@ +/* + * Driver for OMAP-UART controller. + * Based on drivers/serial/8250.c + * + * Copyright (C) 2010 Texas Instruments. + * + * Authors: + * Govindraj R + * Thara Gopinath + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __OMAP_SERIAL_H__ +#define __OMAP_SERIAL_H__ + +#include +#include +#include + +#define OMAP_SERIAL_DRIVER_NAME "omap_uart" + +/* + * Use tty device name as ttyO, [O -> OMAP] + * in bootargs we specify as console=ttyO0 if uart1 + * is used as console uart. + */ +#define OMAP_SERIAL_NAME "ttyO" + +struct omap_uart_port_info { + bool dma_enabled; /* To specify DMA Mode */ + unsigned int uartclk; /* UART clock rate */ + upf_t flags; /* UPF_* flags */ + unsigned int dma_rx_buf_size; + unsigned int dma_rx_timeout; + unsigned int autosuspend_timeout; + unsigned int dma_rx_poll_rate; + + int (*get_context_loss_count)(struct device *); + void (*enable_wakeup)(struct device *, bool); +}; + +#endif /* __OMAP_SERIAL_H__ */ diff --git a/include/linux/platform_data/serial-sccnxp.h b/include/linux/platform_data/serial-sccnxp.h new file mode 100644 index 000000000..af0c8c3b8 --- /dev/null +++ b/include/linux/platform_data/serial-sccnxp.h @@ -0,0 +1,88 @@ +/* + * NXP (Philips) SCC+++(SCN+++) serial driver + * + * Copyright (C) 2012 Alexander Shiyan + * + * Based on sc26xx.c, by Thomas Bogendörfer (tsbogend@alpha.franken.de) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _PLATFORM_DATA_SERIAL_SCCNXP_H_ +#define _PLATFORM_DATA_SERIAL_SCCNXP_H_ + +#define SCCNXP_MAX_UARTS 2 + +/* Output lines */ +#define LINE_OP0 1 +#define LINE_OP1 2 +#define LINE_OP2 3 +#define LINE_OP3 4 +#define LINE_OP4 5 +#define LINE_OP5 6 +#define LINE_OP6 7 +#define LINE_OP7 8 + +/* Input lines */ +#define LINE_IP0 9 +#define LINE_IP1 10 +#define LINE_IP2 11 +#define LINE_IP3 12 +#define LINE_IP4 13 +#define LINE_IP5 14 +#define LINE_IP6 15 + +/* Signals */ +#define DTR_OP 0 /* DTR */ +#define RTS_OP 4 /* RTS */ +#define DSR_IP 8 /* DSR */ +#define CTS_IP 12 /* CTS */ +#define DCD_IP 16 /* DCD */ +#define RNG_IP 20 /* RNG */ + +#define DIR_OP 24 /* Special signal for control RS-485. + * Goes high when transmit, + * then goes low. + */ + +/* Routing control signal 'sig' to line 'line' */ +#define MCTRL_SIG(sig, line) ((line) << (sig)) + +/* + * Example board initialization data: + * + * static struct resource sc2892_resources[] = { + * DEFINE_RES_MEM(UART_PHYS_START, 0x10), + * DEFINE_RES_IRQ(IRQ_EXT2), + * }; + * + * static struct sccnxp_pdata sc2892_info = { + * .mctrl_cfg[0] = MCTRL_SIG(DIR_OP, LINE_OP0), + * .mctrl_cfg[1] = MCTRL_SIG(DIR_OP, LINE_OP1), + * }; + * + * static struct platform_device sc2892 = { + * .name = "sc2892", + * .id = -1, + * .resource = sc2892_resources, + * .num_resources = ARRAY_SIZE(sc2892_resources), + * .dev = { + * .platform_data = &sc2892_info, + * }, + * }; + */ + +/* SCCNXP platform data structure */ +struct sccnxp_pdata { + /* Shift for A0 line */ + const u8 reg_shift; + /* Modem control lines configuration */ + const u32 mctrl_cfg[SCCNXP_MAX_UARTS]; + /* Timer value for polling mode (usecs) */ + const unsigned int poll_time_us; +}; + +#endif diff --git a/include/linux/platform_data/shmob_drm.h b/include/linux/platform_data/shmob_drm.h new file mode 100644 index 000000000..ee495d707 --- /dev/null +++ b/include/linux/platform_data/shmob_drm.h @@ -0,0 +1,95 @@ +/* + * shmob_drm.h -- SH Mobile DRM driver + * + * Copyright (C) 2012 Renesas Corporation + * + * Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __SHMOB_DRM_H__ +#define __SHMOB_DRM_H__ + +#include + +#include + +enum shmob_drm_clk_source { + SHMOB_DRM_CLK_BUS, + SHMOB_DRM_CLK_PERIPHERAL, + SHMOB_DRM_CLK_EXTERNAL, +}; + +enum shmob_drm_interface { + SHMOB_DRM_IFACE_RGB8, /* 24bpp, 8:8:8 */ + SHMOB_DRM_IFACE_RGB9, /* 18bpp, 9:9 */ + SHMOB_DRM_IFACE_RGB12A, /* 24bpp, 12:12 */ + SHMOB_DRM_IFACE_RGB12B, /* 12bpp */ + SHMOB_DRM_IFACE_RGB16, /* 16bpp */ + SHMOB_DRM_IFACE_RGB18, /* 18bpp */ + SHMOB_DRM_IFACE_RGB24, /* 24bpp */ + SHMOB_DRM_IFACE_YUV422, /* 16bpp */ + SHMOB_DRM_IFACE_SYS8A, /* 24bpp, 8:8:8 */ + SHMOB_DRM_IFACE_SYS8B, /* 18bpp, 8:8:2 */ + SHMOB_DRM_IFACE_SYS8C, /* 18bpp, 2:8:8 */ + SHMOB_DRM_IFACE_SYS8D, /* 16bpp, 8:8 */ + SHMOB_DRM_IFACE_SYS9, /* 18bpp, 9:9 */ + SHMOB_DRM_IFACE_SYS12, /* 24bpp, 12:12 */ + SHMOB_DRM_IFACE_SYS16A, /* 16bpp */ + SHMOB_DRM_IFACE_SYS16B, /* 18bpp, 16:2 */ + SHMOB_DRM_IFACE_SYS16C, /* 18bpp, 2:16 */ + SHMOB_DRM_IFACE_SYS18, /* 18bpp */ + SHMOB_DRM_IFACE_SYS24, /* 24bpp */ +}; + +struct shmob_drm_backlight_data { + const char *name; + int max_brightness; + int (*get_brightness)(void); + int (*set_brightness)(int brightness); +}; + +struct shmob_drm_panel_data { + unsigned int width_mm; /* Panel width in mm */ + unsigned int height_mm; /* Panel height in mm */ + struct drm_mode_modeinfo mode; +}; + +struct shmob_drm_sys_interface_data { + unsigned int read_latch:6; + unsigned int read_setup:8; + unsigned int read_cycle:8; + unsigned int read_strobe:8; + unsigned int write_setup:8; + unsigned int write_cycle:8; + unsigned int write_strobe:8; + unsigned int cs_setup:3; + unsigned int vsync_active_high:1; + unsigned int vsync_dir_input:1; +}; + +#define SHMOB_DRM_IFACE_FL_DWPOL (1 << 0) /* Rising edge dot clock data latch */ +#define SHMOB_DRM_IFACE_FL_DIPOL (1 << 1) /* Active low display enable */ +#define SHMOB_DRM_IFACE_FL_DAPOL (1 << 2) /* Active low display data */ +#define SHMOB_DRM_IFACE_FL_HSCNT (1 << 3) /* Disable HSYNC during VBLANK */ +#define SHMOB_DRM_IFACE_FL_DWCNT (1 << 4) /* Disable dotclock during blanking */ + +struct shmob_drm_interface_data { + enum shmob_drm_interface interface; + struct shmob_drm_sys_interface_data sys; + unsigned int clk_div; + unsigned int flags; +}; + +struct shmob_drm_platform_data { + enum shmob_drm_clk_source clk_source; + struct shmob_drm_interface_data iface; + struct shmob_drm_panel_data panel; + struct shmob_drm_backlight_data backlight; +}; + +#endif /* __SHMOB_DRM_H__ */ diff --git a/include/linux/platform_data/sht3x.h b/include/linux/platform_data/sht3x.h new file mode 100644 index 000000000..2e5eea358 --- /dev/null +++ b/include/linux/platform_data/sht3x.h @@ -0,0 +1,25 @@ +/* + * Copyright (C) 2016 Sensirion AG, Switzerland + * Author: David Frey + * Author: Pascal Sachs + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __SHT3X_H_ +#define __SHT3X_H_ + +struct sht3x_platform_data { + bool blocking_io; + bool high_precision; +}; +#endif /* __SHT3X_H_ */ diff --git a/include/linux/platform_data/shtc1.h b/include/linux/platform_data/shtc1.h new file mode 100644 index 000000000..7b8c353f7 --- /dev/null +++ b/include/linux/platform_data/shtc1.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2014 Sensirion AG, Switzerland + * Author: Johannes Winkelmann + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __SHTC1_H_ +#define __SHTC1_H_ + +struct shtc1_platform_data { + bool blocking_io; + bool high_precision; +}; +#endif /* __SHTC1_H_ */ diff --git a/include/linux/platform_data/si5351.h b/include/linux/platform_data/si5351.h new file mode 100644 index 000000000..c71a2dd66 --- /dev/null +++ b/include/linux/platform_data/si5351.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Si5351A/B/C programmable clock generator platform_data. + */ + +#ifndef __LINUX_PLATFORM_DATA_SI5351_H__ +#define __LINUX_PLATFORM_DATA_SI5351_H__ + +/** + * enum si5351_pll_src - Si5351 pll clock source + * @SI5351_PLL_SRC_DEFAULT: default, do not change eeprom config + * @SI5351_PLL_SRC_XTAL: pll source clock is XTAL input + * @SI5351_PLL_SRC_CLKIN: pll source clock is CLKIN input (Si5351C only) + */ +enum si5351_pll_src { + SI5351_PLL_SRC_DEFAULT = 0, + SI5351_PLL_SRC_XTAL = 1, + SI5351_PLL_SRC_CLKIN = 2, +}; + +/** + * enum si5351_multisynth_src - Si5351 multisynth clock source + * @SI5351_MULTISYNTH_SRC_DEFAULT: default, do not change eeprom config + * @SI5351_MULTISYNTH_SRC_VCO0: multisynth source clock is VCO0 + * @SI5351_MULTISYNTH_SRC_VCO1: multisynth source clock is VCO1/VXCO + */ +enum si5351_multisynth_src { + SI5351_MULTISYNTH_SRC_DEFAULT = 0, + SI5351_MULTISYNTH_SRC_VCO0 = 1, + SI5351_MULTISYNTH_SRC_VCO1 = 2, +}; + +/** + * enum si5351_clkout_src - Si5351 clock output clock source + * @SI5351_CLKOUT_SRC_DEFAULT: default, do not change eeprom config + * @SI5351_CLKOUT_SRC_MSYNTH_N: clkout N source clock is multisynth N + * @SI5351_CLKOUT_SRC_MSYNTH_0_4: clkout N source clock is multisynth 0 (N<4) + * or 4 (N>=4) + * @SI5351_CLKOUT_SRC_XTAL: clkout N source clock is XTAL + * @SI5351_CLKOUT_SRC_CLKIN: clkout N source clock is CLKIN (Si5351C only) + */ +enum si5351_clkout_src { + SI5351_CLKOUT_SRC_DEFAULT = 0, + SI5351_CLKOUT_SRC_MSYNTH_N = 1, + SI5351_CLKOUT_SRC_MSYNTH_0_4 = 2, + SI5351_CLKOUT_SRC_XTAL = 3, + SI5351_CLKOUT_SRC_CLKIN = 4, +}; + +/** + * enum si5351_drive_strength - Si5351 clock output drive strength + * @SI5351_DRIVE_DEFAULT: default, do not change eeprom config + * @SI5351_DRIVE_2MA: 2mA clock output drive strength + * @SI5351_DRIVE_4MA: 4mA clock output drive strength + * @SI5351_DRIVE_6MA: 6mA clock output drive strength + * @SI5351_DRIVE_8MA: 8mA clock output drive strength + */ +enum si5351_drive_strength { + SI5351_DRIVE_DEFAULT = 0, + SI5351_DRIVE_2MA = 2, + SI5351_DRIVE_4MA = 4, + SI5351_DRIVE_6MA = 6, + SI5351_DRIVE_8MA = 8, +}; + +/** + * enum si5351_disable_state - Si5351 clock output disable state + * @SI5351_DISABLE_DEFAULT: default, do not change eeprom config + * @SI5351_DISABLE_LOW: CLKx is set to a LOW state when disabled + * @SI5351_DISABLE_HIGH: CLKx is set to a HIGH state when disabled + * @SI5351_DISABLE_FLOATING: CLKx is set to a FLOATING state when + * disabled + * @SI5351_DISABLE_NEVER: CLKx is NEVER disabled + */ +enum si5351_disable_state { + SI5351_DISABLE_DEFAULT = 0, + SI5351_DISABLE_LOW, + SI5351_DISABLE_HIGH, + SI5351_DISABLE_FLOATING, + SI5351_DISABLE_NEVER, +}; + +/** + * struct si5351_clkout_config - Si5351 clock output configuration + * @clkout: clkout number + * @multisynth_src: multisynth source clock + * @clkout_src: clkout source clock + * @pll_master: if true, clkout can also change pll rate + * @pll_reset: if true, clkout can reset its pll + * @drive: output drive strength + * @rate: initial clkout rate, or default if 0 + */ +struct si5351_clkout_config { + enum si5351_multisynth_src multisynth_src; + enum si5351_clkout_src clkout_src; + enum si5351_drive_strength drive; + enum si5351_disable_state disable_state; + bool pll_master; + bool pll_reset; + unsigned long rate; +}; + +/** + * struct si5351_platform_data - Platform data for the Si5351 clock driver + * @clk_xtal: xtal input clock + * @clk_clkin: clkin input clock + * @pll_src: array of pll source clock setting + * @clkout: array of clkout configuration + */ +struct si5351_platform_data { + enum si5351_pll_src pll_src[2]; + struct si5351_clkout_config clkout[8]; +}; + +#endif diff --git a/include/linux/platform_data/simplefb.h b/include/linux/platform_data/simplefb.h new file mode 100644 index 000000000..077303ced --- /dev/null +++ b/include/linux/platform_data/simplefb.h @@ -0,0 +1,64 @@ +/* + * simplefb.h - Simple Framebuffer Device + * + * Copyright (C) 2013 David Herrmann + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __PLATFORM_DATA_SIMPLEFB_H__ +#define __PLATFORM_DATA_SIMPLEFB_H__ + +#include +#include +#include + +/* format array, use it to initialize a "struct simplefb_format" array */ +#define SIMPLEFB_FORMATS \ +{ \ + { "r5g6b5", 16, {11, 5}, {5, 6}, {0, 5}, {0, 0}, DRM_FORMAT_RGB565 }, \ + { "x1r5g5b5", 16, {10, 5}, {5, 5}, {0, 5}, {0, 0}, DRM_FORMAT_XRGB1555 }, \ + { "a1r5g5b5", 16, {10, 5}, {5, 5}, {0, 5}, {15, 1}, DRM_FORMAT_ARGB1555 }, \ + { "r8g8b8", 24, {16, 8}, {8, 8}, {0, 8}, {0, 0}, DRM_FORMAT_RGB888 }, \ + { "x8r8g8b8", 32, {16, 8}, {8, 8}, {0, 8}, {0, 0}, DRM_FORMAT_XRGB8888 }, \ + { "a8r8g8b8", 32, {16, 8}, {8, 8}, {0, 8}, {24, 8}, DRM_FORMAT_ARGB8888 }, \ + { "a8b8g8r8", 32, {0, 8}, {8, 8}, {16, 8}, {24, 8}, DRM_FORMAT_ABGR8888 }, \ + { "x2r10g10b10", 32, {20, 10}, {10, 10}, {0, 10}, {0, 0}, DRM_FORMAT_XRGB2101010 }, \ + { "a2r10g10b10", 32, {20, 10}, {10, 10}, {0, 10}, {30, 2}, DRM_FORMAT_ARGB2101010 }, \ +} + +/* + * Data-Format for Simple-Framebuffers + * @name: unique 0-terminated name that can be used to identify the mode + * @red,green,blue: Offsets and sizes of the single RGB parts + * @transp: Offset and size of the alpha bits. length=0 means no alpha + * @fourcc: 32bit DRM four-CC code (see drm_fourcc.h) + */ +struct simplefb_format { + const char *name; + u32 bits_per_pixel; + struct fb_bitfield red; + struct fb_bitfield green; + struct fb_bitfield blue; + struct fb_bitfield transp; + u32 fourcc; +}; + +/* + * Simple-Framebuffer description + * If the arch-boot code creates simple-framebuffers without DT support, it + * can pass the width, height, stride and format via this platform-data object. + * The framebuffer location must be given as IORESOURCE_MEM resource. + * @format must be a format as described in "struct simplefb_format" above. + */ +struct simplefb_platform_data { + u32 width; + u32 height; + u32 stride; + const char *format; +}; + +#endif /* __PLATFORM_DATA_SIMPLEFB_H__ */ diff --git a/include/linux/platform_data/sky81452-backlight.h b/include/linux/platform_data/sky81452-backlight.h new file mode 100644 index 000000000..1231e9bb0 --- /dev/null +++ b/include/linux/platform_data/sky81452-backlight.h @@ -0,0 +1,46 @@ +/* + * sky81452.h SKY81452 backlight driver + * + * Copyright 2014 Skyworks Solutions Inc. + * Author : Gyungoh Yoo + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#ifndef _SKY81452_BACKLIGHT_H +#define _SKY81452_BACKLIGHT_H + +/** + * struct sky81452_platform_data + * @name: backlight driver name. + If it is not defined, default name is lcd-backlight. + * @gpio_enable:GPIO number which control EN pin + * @enable: Enable mask for current sink channel 1, 2, 3, 4, 5 and 6. + * @ignore_pwm: true if DPWMI should be ignored. + * @dpwm_mode: true is DPWM dimming mode, otherwise Analog dimming mode. + * @phase_shift:true is phase shift mode. + * @short_detecion_threshold: It should be one of 4, 5, 6 and 7V. + * @boost_current_limit: It should be one of 2300, 2750mA. + */ +struct sky81452_bl_platform_data { + const char *name; + int gpio_enable; + unsigned int enable; + bool ignore_pwm; + bool dpwm_mode; + bool phase_shift; + unsigned int short_detection_threshold; + unsigned int boost_current_limit; +}; + +#endif diff --git a/include/linux/platform_data/spi-clps711x.h b/include/linux/platform_data/spi-clps711x.h new file mode 100644 index 000000000..301956e63 --- /dev/null +++ b/include/linux/platform_data/spi-clps711x.h @@ -0,0 +1,21 @@ +/* + * CLPS711X SPI bus driver definitions + * + * Copyright (C) 2012 Alexander Shiyan + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef ____LINUX_PLATFORM_DATA_SPI_CLPS711X_H +#define ____LINUX_PLATFORM_DATA_SPI_CLPS711X_H + +/* Board specific platform_data */ +struct spi_clps711x_pdata { + int *chipselect; /* Array of GPIO-numbers */ + int num_chipselect; /* Total count of GPIOs */ +}; + +#endif diff --git a/include/linux/platform_data/spi-davinci.h b/include/linux/platform_data/spi-davinci.h new file mode 100644 index 000000000..f4edcb03c --- /dev/null +++ b/include/linux/platform_data/spi-davinci.h @@ -0,0 +1,90 @@ +/* + * Copyright 2009 Texas Instruments. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __ARCH_ARM_DAVINCI_SPI_H +#define __ARCH_ARM_DAVINCI_SPI_H + +#include + +#define SPI_INTERN_CS 0xFF + +enum { + SPI_VERSION_1, /* For DM355/DM365/DM6467 */ + SPI_VERSION_2, /* For DA8xx */ +}; + +/** + * davinci_spi_platform_data - Platform data for SPI master device on DaVinci + * + * @version: version of the SPI IP. Different DaVinci devices have slightly + * varying versions of the same IP. + * @num_chipselect: number of chipselects supported by this SPI master + * @intr_line: interrupt line used to connect the SPI IP to the ARM interrupt + * controller withn the SoC. Possible values are 0 and 1. + * @chip_sel: list of GPIOs which can act as chip-selects for the SPI. + * SPI_INTERN_CS denotes internal SPI chip-select. Not necessary + * to populate if all chip-selects are internal. + * @cshold_bug: set this to true if the SPI controller on your chip requires + * a write to CSHOLD bit in between transfers (like in DM355). + * @dma_event_q: DMA event queue to use if SPI_IO_TYPE_DMA is used for any + * device on the bus. + */ +struct davinci_spi_platform_data { + u8 version; + u8 num_chipselect; + u8 intr_line; + u8 *chip_sel; + u8 prescaler_limit; + bool cshold_bug; + enum dma_event_q dma_event_q; +}; + +/** + * davinci_spi_config - Per-chip-select configuration for SPI slave devices + * + * @wdelay: amount of delay between transmissions. Measured in number of + * SPI module clocks. + * @odd_parity: polarity of parity flag at the end of transmit data stream. + * 0 - odd parity, 1 - even parity. + * @parity_enable: enable transmission of parity at end of each transmit + * data stream. + * @io_type: type of IO transfer. Choose between polled, interrupt and DMA. + * @timer_disable: disable chip-select timers (setup and hold) + * @c2tdelay: chip-select setup time. Measured in number of SPI module clocks. + * @t2cdelay: chip-select hold time. Measured in number of SPI module clocks. + * @t2edelay: transmit data finished to SPI ENAn pin inactive time. Measured + * in number of SPI clocks. + * @c2edelay: chip-select active to SPI ENAn signal active time. Measured in + * number of SPI clocks. + */ +struct davinci_spi_config { + u8 wdelay; + u8 odd_parity; + u8 parity_enable; +#define SPI_IO_TYPE_INTR 0 +#define SPI_IO_TYPE_POLL 1 +#define SPI_IO_TYPE_DMA 2 + u8 io_type; + u8 timer_disable; + u8 c2tdelay; + u8 t2cdelay; + u8 t2edelay; + u8 c2edelay; +}; + +#endif /* __ARCH_ARM_DAVINCI_SPI_H */ diff --git a/include/linux/platform_data/spi-ep93xx.h b/include/linux/platform_data/spi-ep93xx.h new file mode 100644 index 000000000..eb16c6739 --- /dev/null +++ b/include/linux/platform_data/spi-ep93xx.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_MACH_EP93XX_SPI_H +#define __ASM_MACH_EP93XX_SPI_H + +struct spi_device; + +/** + * struct ep93xx_spi_info - EP93xx specific SPI descriptor + * @chipselect: array of gpio numbers to use as chip selects + * @num_chipselect: ARRAY_SIZE(chipselect) + * @use_dma: use DMA for the transfers + */ +struct ep93xx_spi_info { + int *chipselect; + int num_chipselect; + bool use_dma; +}; + +#endif /* __ASM_MACH_EP93XX_SPI_H */ diff --git a/include/linux/platform_data/spi-imx.h b/include/linux/platform_data/spi-imx.h new file mode 100644 index 000000000..328f670d1 --- /dev/null +++ b/include/linux/platform_data/spi-imx.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __MACH_SPI_H_ +#define __MACH_SPI_H_ + +/* + * struct spi_imx_master - device.platform_data for SPI controller devices. + * @chipselect: Array of chipselects for this master or NULL. Numbers >= 0 + * mean GPIO pins, -ENOENT means internal CSPI chipselect + * matching the position in the array. E.g., if chipselect[1] = + * -ENOENT then a SPI slave using chip select 1 will use the + * native SS1 line of the CSPI. Omitting the array will use + * all native chip selects. + + * Normally you want to use gpio based chip selects as the CSPI + * module tries to be intelligent about when to assert the + * chipselect: The CSPI module deasserts the chipselect once it + * runs out of input data. The other problem is that it is not + * possible to mix between high active and low active chipselects + * on one single bus using the internal chipselects. + * Unfortunately, on some SoCs, Freescale decided to put some + * chipselects on dedicated pins which are not usable as gpios, + * so we have to support the internal chipselects. + * + * @num_chipselect: If @chipselect is specified, ARRAY_SIZE(chipselect), + * otherwise the number of native chip selects. + */ +struct spi_imx_master { + int *chipselect; + int num_chipselect; +}; + +#endif /* __MACH_SPI_H_*/ diff --git a/include/linux/platform_data/spi-mt65xx.h b/include/linux/platform_data/spi-mt65xx.h new file mode 100644 index 000000000..ba4e4bb70 --- /dev/null +++ b/include/linux/platform_data/spi-mt65xx.h @@ -0,0 +1,22 @@ +/* + * MTK SPI bus driver definitions + * + * Copyright (c) 2015 MediaTek Inc. + * Author: Leilk Liu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef ____LINUX_PLATFORM_DATA_SPI_MTK_H +#define ____LINUX_PLATFORM_DATA_SPI_MTK_H + +/* Board specific platform_data */ +struct mtk_chip_config { + u32 tx_mlsb; + u32 rx_mlsb; + u32 cs_pol; + u32 sample_sel; +}; +#endif diff --git a/include/linux/platform_data/spi-nuc900.h b/include/linux/platform_data/spi-nuc900.h new file mode 100644 index 000000000..4b3f46832 --- /dev/null +++ b/include/linux/platform_data/spi-nuc900.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2009 Nuvoton technology corporation. + * + * Wan ZongShun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation;version 2 of the License. + * + */ + +#ifndef __SPI_NUC900_H +#define __SPI_NUC900_H + +extern void mfp_set_groupg(struct device *dev, const char *subname); + +struct nuc900_spi_info { + unsigned int num_cs; + unsigned int lsb; + unsigned int txneg; + unsigned int rxneg; + unsigned int divider; + unsigned int sleep; + unsigned int txnum; + unsigned int txbitlen; + int bus_num; +}; + +struct nuc900_spi_chip { + unsigned char bits_per_word; +}; + +#endif /* __SPI_NUC900_H */ diff --git a/include/linux/platform_data/spi-omap2-mcspi.h b/include/linux/platform_data/spi-omap2-mcspi.h new file mode 100644 index 000000000..0bf9fddb8 --- /dev/null +++ b/include/linux/platform_data/spi-omap2-mcspi.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _OMAP2_MCSPI_H +#define _OMAP2_MCSPI_H + +#define OMAP4_MCSPI_REG_OFFSET 0x100 + +#define MCSPI_PINDIR_D0_IN_D1_OUT 0 +#define MCSPI_PINDIR_D0_OUT_D1_IN 1 + +struct omap2_mcspi_platform_config { + unsigned short num_cs; + unsigned int regs_offset; + unsigned int pin_dir:1; +}; + +struct omap2_mcspi_device_config { + unsigned turbo_mode:1; + + /* toggle chip select after every word */ + unsigned cs_per_word:1; +}; + +#endif diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h new file mode 100644 index 000000000..773daf791 --- /dev/null +++ b/include/linux/platform_data/spi-s3c64xx.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Copyright (C) 2009 Samsung Electronics Ltd. + * Jaswinder Singh + */ + +#ifndef __SPI_S3C64XX_H +#define __SPI_S3C64XX_H + +#include + +struct platform_device; + +/** + * struct s3c64xx_spi_csinfo - ChipSelect description + * @fb_delay: Slave specific feedback delay. + * Refer to FB_CLK_SEL register definition in SPI chapter. + * @line: Custom 'identity' of the CS line. + * + * This is per SPI-Slave Chipselect information. + * Allocate and initialize one in machine init code and make the + * spi_board_info.controller_data point to it. + */ +struct s3c64xx_spi_csinfo { + u8 fb_delay; + unsigned line; +}; + +/** + * struct s3c64xx_spi_info - SPI Controller defining structure + * @src_clk_nr: Clock source index for the CLK_CFG[SPI_CLKSEL] field. + * @num_cs: Number of CS this controller emulates. + * @cfg_gpio: Configure pins for this SPI controller. + */ +struct s3c64xx_spi_info { + int src_clk_nr; + int num_cs; + bool no_cs; + int (*cfg_gpio)(void); +}; + +/** + * s3c64xx_spi_set_platdata - SPI Controller configure callback by the board + * initialization code. + * @cfg_gpio: Pointer to gpio setup function. + * @src_clk_nr: Clock the SPI controller is to use to generate SPI clocks. + * @num_cs: Number of elements in the 'cs' array. + * + * Call this from machine init code for each SPI Controller that + * has some chips attached to it. + */ +extern void s3c64xx_spi0_set_platdata(int (*cfg_gpio)(void), int src_clk_nr, + int num_cs); +extern void s3c64xx_spi1_set_platdata(int (*cfg_gpio)(void), int src_clk_nr, + int num_cs); +extern void s3c64xx_spi2_set_platdata(int (*cfg_gpio)(void), int src_clk_nr, + int num_cs); + +/* defined by architecture to configure gpio */ +extern int s3c64xx_spi0_cfg_gpio(void); +extern int s3c64xx_spi1_cfg_gpio(void); +extern int s3c64xx_spi2_cfg_gpio(void); + +extern struct s3c64xx_spi_info s3c64xx_spi0_pdata; +extern struct s3c64xx_spi_info s3c64xx_spi1_pdata; +extern struct s3c64xx_spi_info s3c64xx_spi2_pdata; +#endif /*__SPI_S3C64XX_H */ diff --git a/include/linux/platform_data/ssm2518.h b/include/linux/platform_data/ssm2518.h new file mode 100644 index 000000000..9a8e3ea28 --- /dev/null +++ b/include/linux/platform_data/ssm2518.h @@ -0,0 +1,22 @@ +/* + * SSM2518 amplifier audio driver + * + * Copyright 2013 Analog Devices Inc. + * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2. + */ + +#ifndef __LINUX_PLATFORM_DATA_SSM2518_H__ +#define __LINUX_PLATFORM_DATA_SSM2518_H__ + +/** + * struct ssm2518_platform_data - Platform data for the ssm2518 driver + * @enable_gpio: GPIO connected to the nSD pin. Set to -1 if the nSD pin is + * hardwired. + */ +struct ssm2518_platform_data { + int enable_gpio; +}; + +#endif diff --git a/include/linux/platform_data/st33zp24.h b/include/linux/platform_data/st33zp24.h new file mode 100644 index 000000000..6f0fb6ebd --- /dev/null +++ b/include/linux/platform_data/st33zp24.h @@ -0,0 +1,28 @@ +/* + * STMicroelectronics TPM Linux driver for TPM 1.2 ST33ZP24 + * Copyright (C) 2009 - 2016 STMicroelectronics + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ +#ifndef __ST33ZP24_H__ +#define __ST33ZP24_H__ + +#define TPM_ST33_I2C "st33zp24-i2c" +#define TPM_ST33_SPI "st33zp24-spi" + +struct st33zp24_platform_data { + int io_lpcpd; +}; + +#endif /* __ST33ZP24_H__ */ diff --git a/include/linux/platform_data/st_sensors_pdata.h b/include/linux/platform_data/st_sensors_pdata.h new file mode 100644 index 000000000..f8274b0c6 --- /dev/null +++ b/include/linux/platform_data/st_sensors_pdata.h @@ -0,0 +1,28 @@ +/* + * STMicroelectronics sensors platform-data driver + * + * Copyright 2013 STMicroelectronics Inc. + * + * Denis Ciocca + * + * Licensed under the GPL-2. + */ + +#ifndef ST_SENSORS_PDATA_H +#define ST_SENSORS_PDATA_H + +/** + * struct st_sensors_platform_data - Platform data for the ST sensors + * @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2). + * Available only for accelerometer and pressure sensors. + * Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet). + * @open_drain: set the interrupt line to be open drain if possible. + * @spi_3wire: enable spi-3wire mode. + */ +struct st_sensors_platform_data { + u8 drdy_int_pin; + bool open_drain; + bool spi_3wire; +}; + +#endif /* ST_SENSORS_PDATA_H */ diff --git a/include/linux/platform_data/syscon.h b/include/linux/platform_data/syscon.h new file mode 100644 index 000000000..2c089dd3e --- /dev/null +++ b/include/linux/platform_data/syscon.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef PLATFORM_DATA_SYSCON_H +#define PLATFORM_DATA_SYSCON_H + +struct syscon_platform_data { + const char *label; +}; + +#endif diff --git a/include/linux/platform_data/tc35876x.h b/include/linux/platform_data/tc35876x.h new file mode 100644 index 000000000..cd6a51c71 --- /dev/null +++ b/include/linux/platform_data/tc35876x.h @@ -0,0 +1,11 @@ + +#ifndef _TC35876X_H +#define _TC35876X_H + +struct tc35876x_platform_data { + int gpio_bridge_reset; + int gpio_panel_bl_en; + int gpio_panel_vadd; +}; + +#endif /* _TC35876X_H */ diff --git a/include/linux/platform_data/tda9950.h b/include/linux/platform_data/tda9950.h new file mode 100644 index 000000000..c65efd461 --- /dev/null +++ b/include/linux/platform_data/tda9950.h @@ -0,0 +1,16 @@ +#ifndef LINUX_PLATFORM_DATA_TDA9950_H +#define LINUX_PLATFORM_DATA_TDA9950_H + +struct device; + +struct tda9950_glue { + struct device *parent; + unsigned long irq_flags; + void *data; + int (*init)(void *); + void (*exit)(void *); + int (*open)(void *); + void (*release)(void *); +}; + +#endif diff --git a/include/linux/platform_data/ti-aemif.h b/include/linux/platform_data/ti-aemif.h new file mode 100644 index 000000000..e6407bafc --- /dev/null +++ b/include/linux/platform_data/ti-aemif.h @@ -0,0 +1,48 @@ +/* + * TI DaVinci AEMIF platform glue. + * + * Copyright (C) 2017 BayLibre SAS + * + * Author: + * Bartosz Golaszewski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __TI_DAVINCI_AEMIF_DATA_H__ +#define __TI_DAVINCI_AEMIF_DATA_H__ + +#include + +/** + * struct aemif_abus_data - Async bus configuration parameters. + * + * @cs - Chip-select number. + */ +struct aemif_abus_data { + u32 cs; +}; + +/** + * struct aemif_platform_data - Data to set up the TI aemif driver. + * + * @dev_lookup: of_dev_auxdata passed to of_platform_populate() for aemif + * subdevices. + * @cs_offset: Lowest allowed chip-select number. + * @abus_data: Array of async bus configuration entries. + * @num_abus_data: Number of abus entries. + * @sub_devices: Array of platform subdevices. + * @num_sub_devices: Number of subdevices. + */ +struct aemif_platform_data { + struct of_dev_auxdata *dev_lookup; + u32 cs_offset; + struct aemif_abus_data *abus_data; + size_t num_abus_data; + struct platform_device *sub_devices; + size_t num_sub_devices; +}; + +#endif /* __TI_DAVINCI_AEMIF_DATA_H__ */ diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h new file mode 100644 index 000000000..2efa3470a --- /dev/null +++ b/include/linux/platform_data/ti-sysc.h @@ -0,0 +1,138 @@ +#ifndef __TI_SYSC_DATA_H__ +#define __TI_SYSC_DATA_H__ + +enum ti_sysc_module_type { + TI_SYSC_OMAP2, + TI_SYSC_OMAP2_TIMER, + TI_SYSC_OMAP3_SHAM, + TI_SYSC_OMAP3_AES, + TI_SYSC_OMAP4, + TI_SYSC_OMAP4_TIMER, + TI_SYSC_OMAP4_SIMPLE, + TI_SYSC_OMAP34XX_SR, + TI_SYSC_OMAP36XX_SR, + TI_SYSC_OMAP4_SR, + TI_SYSC_OMAP4_MCASP, + TI_SYSC_OMAP4_USB_HOST_FS, + TI_SYSC_DRA7_MCAN, +}; + +struct ti_sysc_cookie { + void *data; +}; + +/** + * struct sysc_regbits - TI OCP_SYSCONFIG register field offsets + * @midle_shift: Offset of the midle bit + * @clkact_shift: Offset of the clockactivity bit + * @sidle_shift: Offset of the sidle bit + * @enwkup_shift: Offset of the enawakeup bit + * @srst_shift: Offset of the softreset bit + * @autoidle_shift: Offset of the autoidle bit + * @dmadisable_shift: Offset of the dmadisable bit + * @emufree_shift; Offset of the emufree bit + * + * Note that 0 is a valid shift, and for ti-sysc.c -ENODEV can be used if a + * feature is not available. + */ +struct sysc_regbits { + s8 midle_shift; + s8 clkact_shift; + s8 sidle_shift; + s8 enwkup_shift; + s8 srst_shift; + s8 autoidle_shift; + s8 dmadisable_shift; + s8 emufree_shift; +}; + +#define SYSC_QUIRK_RESOURCE_PROVIDER BIT(9) +#define SYSC_QUIRK_LEGACY_IDLE BIT(8) +#define SYSC_QUIRK_RESET_STATUS BIT(7) +#define SYSC_QUIRK_NO_IDLE_ON_INIT BIT(6) +#define SYSC_QUIRK_NO_RESET_ON_INIT BIT(5) +#define SYSC_QUIRK_OPT_CLKS_NEEDED BIT(4) +#define SYSC_QUIRK_OPT_CLKS_IN_RESET BIT(3) +#define SYSC_QUIRK_16BIT BIT(2) +#define SYSC_QUIRK_UNCACHED BIT(1) +#define SYSC_QUIRK_USE_CLOCKACT BIT(0) + +#define SYSC_NR_IDLEMODES 4 + +/** + * struct sysc_capabilities - capabilities for an interconnect target module + * + * @sysc_mask: bitmask of supported SYSCONFIG register bits + * @regbits: bitmask of SYSCONFIG register bits + * @mod_quirks: bitmask of module specific quirks + */ +struct sysc_capabilities { + const enum ti_sysc_module_type type; + const u32 sysc_mask; + const struct sysc_regbits *regbits; + const u32 mod_quirks; +}; + +/** + * struct sysc_config - configuration for an interconnect target module + * @sysc_val: configured value for sysc register + * @midlemodes: bitmask of supported master idle modes + * @sidlemodes: bitmask of supported master idle modes + * @srst_udelay: optional delay needed after OCP soft reset + * @quirks: bitmask of enabled quirks + */ +struct sysc_config { + u32 sysc_val; + u32 syss_mask; + u8 midlemodes; + u8 sidlemodes; + u8 srst_udelay; + u32 quirks; +}; + +enum sysc_registers { + SYSC_REVISION, + SYSC_SYSCONFIG, + SYSC_SYSSTATUS, + SYSC_MAX_REGS, +}; + +/** + * struct ti_sysc_module_data - ti-sysc to hwmod translation data for a module + * @name: legacy "ti,hwmods" module name + * @module_pa: physical address of the interconnect target module + * @module_size: size of the interconnect target module + * @offsets: array of register offsets as listed in enum sysc_registers + * @nr_offsets: number of registers + * @cap: interconnect target module capabilities + * @cfg: interconnect target module configuration + * + * This data is enough to allocate a new struct omap_hwmod_class_sysconfig + * based on device tree data parsed by ti-sysc driver. + */ +struct ti_sysc_module_data { + const char *name; + u64 module_pa; + u32 module_size; + int *offsets; + int nr_offsets; + const struct sysc_capabilities *cap; + struct sysc_config *cfg; +}; + +struct device; + +struct ti_sysc_platform_data { + struct of_dev_auxdata *auxdata; + int (*init_module)(struct device *dev, + const struct ti_sysc_module_data *data, + struct ti_sysc_cookie *cookie); + int (*enable_module)(struct device *dev, + const struct ti_sysc_cookie *cookie); + int (*idle_module)(struct device *dev, + const struct ti_sysc_cookie *cookie); + int (*shutdown_module)(struct device *dev, + const struct ti_sysc_cookie *cookie); +}; + +#endif /* __TI_SYSC_DATA_H__ */ diff --git a/include/linux/platform_data/touchscreen-s3c2410.h b/include/linux/platform_data/touchscreen-s3c2410.h new file mode 100644 index 000000000..71eccaa98 --- /dev/null +++ b/include/linux/platform_data/touchscreen-s3c2410.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2005 Arnaud Patard + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#ifndef __TOUCHSCREEN_S3C2410_H +#define __TOUCHSCREEN_S3C2410_H + +struct s3c2410_ts_mach_info { + int delay; + int presc; + int oversampling_shift; + void (*cfg_gpio)(struct platform_device *dev); +}; + +extern void s3c24xx_ts_set_platdata(struct s3c2410_ts_mach_info *); +extern void s3c64xx_ts_set_platdata(struct s3c2410_ts_mach_info *); + +/* defined by architecture to configure gpio */ +extern void s3c24xx_ts_cfg_gpio(struct platform_device *dev); + +#endif /*__TOUCHSCREEN_S3C2410_H */ diff --git a/include/linux/platform_data/tsc2007.h b/include/linux/platform_data/tsc2007.h new file mode 100644 index 000000000..a0ca52c41 --- /dev/null +++ b/include/linux/platform_data/tsc2007.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_I2C_TSC2007_H +#define __LINUX_I2C_TSC2007_H + +/* linux/platform_data/tsc2007.h */ + +struct tsc2007_platform_data { + u16 model; /* 2007. */ + u16 x_plate_ohms; /* must be non-zero value */ + u16 max_rt; /* max. resistance above which samples are ignored */ + unsigned long poll_period; /* time (in ms) between samples */ + int fuzzx; /* fuzz factor for X, Y and pressure axes */ + int fuzzy; + int fuzzz; + + int (*get_pendown_state)(struct device *); + /* If needed, clear 2nd level interrupt source */ + void (*clear_penirq)(void); + int (*init_platform_hw)(void); + void (*exit_platform_hw)(void); +}; + +#endif diff --git a/include/linux/platform_data/tsl2563.h b/include/linux/platform_data/tsl2563.h new file mode 100644 index 000000000..9cf9309c3 --- /dev/null +++ b/include/linux/platform_data/tsl2563.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_TSL2563_H +#define __LINUX_TSL2563_H + +struct tsl2563_platform_data { + int cover_comp_gain; +}; + +#endif /* __LINUX_TSL2563_H */ diff --git a/include/linux/platform_data/tsl2772.h b/include/linux/platform_data/tsl2772.h new file mode 100644 index 000000000..f8ade15a3 --- /dev/null +++ b/include/linux/platform_data/tsl2772.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Device driver for monitoring ambient light intensity (lux) + * and proximity (prox) within the TAOS TSL2772 family of devices. + * + * Copyright (c) 2012, TAOS Corporation. + * Copyright (c) 2017-2018 Brian Masney + */ + +#ifndef __TSL2772_H +#define __TSL2772_H + +struct tsl2772_lux { + unsigned int ch0; + unsigned int ch1; +}; + +/* Max number of segments allowable in LUX table */ +#define TSL2772_MAX_LUX_TABLE_SIZE 6 +/* The default LUX tables all have 3 elements. */ +#define TSL2772_DEF_LUX_TABLE_SZ 3 +#define TSL2772_DEFAULT_TABLE_BYTES (sizeof(struct tsl2772_lux) * \ + TSL2772_DEF_LUX_TABLE_SZ) + +/* Proximity diode to use */ +#define TSL2772_DIODE0 0x01 +#define TSL2772_DIODE1 0x02 +#define TSL2772_DIODE_BOTH 0x03 + +/* LED Power */ +#define TSL2772_100_mA 0x00 +#define TSL2772_50_mA 0x01 +#define TSL2772_25_mA 0x02 +#define TSL2772_13_mA 0x03 + +/** + * struct tsl2772_settings - Settings for the tsl2772 driver + * @als_time: Integration time of the ALS channel ADCs in 2.73 ms + * increments. Total integration time is + * (256 - als_time) * 2.73. + * @als_gain: Index into the tsl2772_als_gain array. + * @als_gain_trim: Default gain trim to account for aperture effects. + * @wait_time: Time between proximity and ALS cycles in 2.73 + * periods. + * @prox_time: Integration time of the proximity ADC in 2.73 ms + * increments. Total integration time is + * (256 - prx_time) * 2.73. + * @prox_gain: Index into the tsl2772_prx_gain array. + * @als_prox_config: The value of the ALS / Proximity configuration + * register. + * @als_cal_target: Known external ALS reading for calibration. + * @als_persistence: H/W Filters, Number of 'out of limits' ALS readings. + * @als_interrupt_en: Enable/Disable ALS interrupts + * @als_thresh_low: CH0 'low' count to trigger interrupt. + * @als_thresh_high: CH0 'high' count to trigger interrupt. + * @prox_persistence: H/W Filters, Number of 'out of limits' proximity + * readings. + * @prox_interrupt_en: Enable/Disable proximity interrupts. + * @prox_thres_low: Low threshold proximity detection. + * @prox_thres_high: High threshold proximity detection. + * @prox_pulse_count: Number if proximity emitter pulses. + * @prox_max_samples_cal: The number of samples that are taken when performing + * a proximity calibration. + * @prox_diode Which diode(s) to use for driving the external + * LED(s) for proximity sensing. + * @prox_power The amount of power to use for the external LED(s). + */ +struct tsl2772_settings { + int als_time; + int als_gain; + int als_gain_trim; + int wait_time; + int prox_time; + int prox_gain; + int als_prox_config; + int als_cal_target; + u8 als_persistence; + bool als_interrupt_en; + int als_thresh_low; + int als_thresh_high; + u8 prox_persistence; + bool prox_interrupt_en; + int prox_thres_low; + int prox_thres_high; + int prox_pulse_count; + int prox_max_samples_cal; + int prox_diode; + int prox_power; +}; + +/** + * struct tsl2772_platform_data - Platform callback, glass and defaults + * @platform_lux_table: Device specific glass coefficents + * @platform_default_settings: Device specific power on defaults + */ +struct tsl2772_platform_data { + struct tsl2772_lux platform_lux_table[TSL2772_MAX_LUX_TABLE_SIZE]; + struct tsl2772_settings *platform_default_settings; +}; + +#endif /* __TSL2772_H */ diff --git a/include/linux/platform_data/txx9/ndfmc.h b/include/linux/platform_data/txx9/ndfmc.h new file mode 100644 index 000000000..fc172627d --- /dev/null +++ b/include/linux/platform_data/txx9/ndfmc.h @@ -0,0 +1,30 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * (C) Copyright TOSHIBA CORPORATION 2007 + */ +#ifndef __TXX9_NDFMC_H +#define __TXX9_NDFMC_H + +#define NDFMC_PLAT_FLAG_USE_BSPRT 0x01 +#define NDFMC_PLAT_FLAG_NO_RSTR 0x02 +#define NDFMC_PLAT_FLAG_HOLDADD 0x04 +#define NDFMC_PLAT_FLAG_DUMMYWRITE 0x08 + +struct txx9ndfmc_platform_data { + unsigned int shift; + unsigned int gbus_clock; + unsigned int hold; /* hold time in nanosecond */ + unsigned int spw; /* strobe pulse width in nanosecond */ + unsigned int flags; + unsigned char ch_mask; /* available channel bitmask */ + unsigned char wp_mask; /* write-protect bitmask */ + unsigned char wide_mask; /* 16bit-nand bitmask */ +}; + +void txx9_ndfmc_init(unsigned long baseaddr, + const struct txx9ndfmc_platform_data *plat_data); + +#endif /* __TXX9_NDFMC_H */ diff --git a/include/linux/platform_data/uio_dmem_genirq.h b/include/linux/platform_data/uio_dmem_genirq.h new file mode 100644 index 000000000..973c1bb32 --- /dev/null +++ b/include/linux/platform_data/uio_dmem_genirq.h @@ -0,0 +1,26 @@ +/* + * include/linux/platform_data/uio_dmem_genirq.h + * + * Copyright (C) 2012 Damian Hobson-Garcia + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _UIO_DMEM_GENIRQ_H +#define _UIO_DMEM_GENIRQ_H + +#include + +struct uio_dmem_genirq_pdata { + struct uio_info uioinfo; + unsigned int *dynamic_region_sizes; + unsigned int num_dynamic_regions; +}; +#endif /* _UIO_DMEM_GENIRQ_H */ diff --git a/include/linux/platform_data/uio_pruss.h b/include/linux/platform_data/uio_pruss.h new file mode 100644 index 000000000..3d47d2198 --- /dev/null +++ b/include/linux/platform_data/uio_pruss.h @@ -0,0 +1,26 @@ +/* + * include/linux/platform_data/uio_pruss.h + * + * Platform data for uio_pruss driver + * + * Copyright (C) 2010-11 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _UIO_PRUSS_H_ +#define _UIO_PRUSS_H_ + +/* To configure the PRUSS INTC base offset for UIO driver */ +struct uio_pruss_pdata { + u32 pintc_base; + struct gen_pool *sram_pool; +}; +#endif /* _UIO_PRUSS_H_ */ diff --git a/include/linux/platform_data/usb-davinci.h b/include/linux/platform_data/usb-davinci.h new file mode 100644 index 000000000..0926e99f2 --- /dev/null +++ b/include/linux/platform_data/usb-davinci.h @@ -0,0 +1,36 @@ +/* + * USB related definitions + * + * Copyright (C) 2009 MontaVista Software, Inc. + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef __ASM_ARCH_USB_H +#define __ASM_ARCH_USB_H + +struct da8xx_ohci_root_hub; + +typedef void (*da8xx_ocic_handler_t)(struct da8xx_ohci_root_hub *hub, + unsigned port); + +/* Passed as the platform data to the OHCI driver */ +struct da8xx_ohci_root_hub { + /* Switch the port power on/off */ + int (*set_power)(unsigned port, int on); + /* Read the port power status */ + int (*get_power)(unsigned port); + /* Read the port over-current indicator */ + int (*get_oci)(unsigned port); + /* Over-current indicator change notification (pass NULL to disable) */ + int (*ocic_notify)(da8xx_ocic_handler_t handler); + + /* Time from power on to power good (in 2 ms units) */ + u8 potpgt; +}; + +void davinci_setup_usb(unsigned mA, unsigned potpgt_ms); + +#endif /* ifndef __ASM_ARCH_USB_H */ diff --git a/include/linux/platform_data/usb-ehci-mxc.h b/include/linux/platform_data/usb-ehci-mxc.h new file mode 100644 index 000000000..ad9794d09 --- /dev/null +++ b/include/linux/platform_data/usb-ehci-mxc.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __INCLUDE_ASM_ARCH_MXC_EHCI_H +#define __INCLUDE_ASM_ARCH_MXC_EHCI_H + +struct mxc_usbh_platform_data { + int (*init)(struct platform_device *pdev); + int (*exit)(struct platform_device *pdev); + + unsigned int portsc; + struct usb_phy *otg; +}; + +#endif /* __INCLUDE_ASM_ARCH_MXC_EHCI_H */ + diff --git a/include/linux/platform_data/usb-ehci-orion.h b/include/linux/platform_data/usb-ehci-orion.h new file mode 100644 index 000000000..52b0acb35 --- /dev/null +++ b/include/linux/platform_data/usb-ehci-orion.h @@ -0,0 +1,24 @@ +/* + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __USB_EHCI_ORION_H +#define __USB_EHCI_ORION_H + +#include + +enum orion_ehci_phy_ver { + EHCI_PHY_ORION, + EHCI_PHY_DD, + EHCI_PHY_KW, + EHCI_PHY_NA, +}; + +struct orion_ehci_data { + enum orion_ehci_phy_ver phy_version; +}; + + +#endif diff --git a/include/linux/platform_data/usb-musb-ux500.h b/include/linux/platform_data/usb-musb-ux500.h new file mode 100644 index 000000000..dd9c83ac7 --- /dev/null +++ b/include/linux/platform_data/usb-musb-ux500.h @@ -0,0 +1,22 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * Author: Mian Yousaf Kaukab + * License terms: GNU General Public License (GPL) version 2 + */ +#ifndef __ASM_ARCH_USB_H +#define __ASM_ARCH_USB_H + +#include + +#define UX500_MUSB_DMA_NUM_RX_TX_CHANNELS 8 + +struct ux500_musb_board_data { + void **dma_rx_param_array; + void **dma_tx_param_array; + bool (*dma_filter)(struct dma_chan *chan, void *filter_param); +}; + +void ux500_add_usb(struct device *parent, resource_size_t base, + int irq, int *dma_rx_cfg, int *dma_tx_cfg); +#endif diff --git a/include/linux/platform_data/usb-mx2.h b/include/linux/platform_data/usb-mx2.h new file mode 100644 index 000000000..22d0b5962 --- /dev/null +++ b/include/linux/platform_data/usb-mx2.h @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2009 Martin Fuzzey + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ASM_ARCH_MX21_USBH +#define __ASM_ARCH_MX21_USBH + +enum mx21_usbh_xcvr { + /* Values below as used by hardware (HWMODE register) */ + MX21_USBXCVR_TXDIF_RXDIF = 0, + MX21_USBXCVR_TXDIF_RXSE = 1, + MX21_USBXCVR_TXSE_RXDIF = 2, + MX21_USBXCVR_TXSE_RXSE = 3, +}; + +struct mx21_usbh_platform_data { + enum mx21_usbh_xcvr host_xcvr; /* tranceiver mode host 1,2 ports */ + enum mx21_usbh_xcvr otg_xcvr; /* tranceiver mode otg (as host) port */ + u16 enable_host1:1, + enable_host2:1, + enable_otg_host:1, /* enable "OTG" port (as host) */ + host1_xcverless:1, /* traceiverless host1 port */ + host1_txenoe:1, /* output enable host1 transmit enable */ + otg_ext_xcvr:1, /* external tranceiver for OTG port */ + unused:10; +}; + +#endif /* __ASM_ARCH_MX21_USBH */ diff --git a/include/linux/platform_data/usb-ohci-pxa27x.h b/include/linux/platform_data/usb-ohci-pxa27x.h new file mode 100644 index 000000000..69adea769 --- /dev/null +++ b/include/linux/platform_data/usb-ohci-pxa27x.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ASMARM_ARCH_OHCI_H +#define ASMARM_ARCH_OHCI_H + +struct device; + +struct pxaohci_platform_data { + int (*init)(struct device *); + void (*exit)(struct device *); + + unsigned long flags; +#define ENABLE_PORT1 (1 << 0) +#define ENABLE_PORT2 (1 << 1) +#define ENABLE_PORT3 (1 << 2) +#define ENABLE_PORT_ALL (ENABLE_PORT1 | ENABLE_PORT2 | ENABLE_PORT3) + +#define POWER_SENSE_LOW (1 << 3) +#define POWER_CONTROL_LOW (1 << 4) +#define NO_OC_PROTECTION (1 << 5) +#define OC_MODE_GLOBAL (0 << 6) +#define OC_MODE_PERPORT (1 << 6) + + int power_on_delay; /* Power On to Power Good time - in ms + * HCD must wait for this duration before + * accessing a powered on port + */ + int port_mode; +#define PMM_NPS_MODE 1 +#define PMM_GLOBAL_MODE 2 +#define PMM_PERPORT_MODE 3 + + int power_budget; +}; + +extern void pxa_set_ohci_info(struct pxaohci_platform_data *info); + +#endif diff --git a/include/linux/platform_data/usb-ohci-s3c2410.h b/include/linux/platform_data/usb-ohci-s3c2410.h new file mode 100644 index 000000000..cc7554ae6 --- /dev/null +++ b/include/linux/platform_data/usb-ohci-s3c2410.h @@ -0,0 +1,43 @@ +/* arch/arm/plat-samsung/include/plat/usb-control.h + * + * Copyright (c) 2004 Simtec Electronics + * Ben Dooks + * + * S3C - USB host port information + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#ifndef __ASM_ARCH_USBCONTROL_H +#define __ASM_ARCH_USBCONTROL_H + +#define S3C_HCDFLG_USED (1) + +struct s3c2410_hcd_port { + unsigned char flags; + unsigned char power; + unsigned char oc_status; + unsigned char oc_changed; +}; + +struct s3c2410_hcd_info { + struct usb_hcd *hcd; + struct s3c2410_hcd_port port[2]; + + void (*power_control)(int port, int to); + void (*enable_oc)(struct s3c2410_hcd_info *, int on); + void (*report_oc)(struct s3c2410_hcd_info *, int ports); +}; + +static inline void s3c2410_usb_report_oc(struct s3c2410_hcd_info *info, int ports) +{ + if (info->report_oc != NULL) { + (info->report_oc)(info, ports); + } +} + +extern void s3c_ohci_set_platdata(struct s3c2410_hcd_info *info); + +#endif /*__ASM_ARCH_USBCONTROL_H */ diff --git a/include/linux/platform_data/usb-omap.h b/include/linux/platform_data/usb-omap.h new file mode 100644 index 000000000..fa579b4c6 --- /dev/null +++ b/include/linux/platform_data/usb-omap.h @@ -0,0 +1,88 @@ +/* + * usb-omap.h - Platform data for the various OMAP USB IPs + * + * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com + * + * This software is distributed under the terms of the GNU General Public + * License ("GPL") version 2, as published by the Free Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#define OMAP3_HS_USB_PORTS 3 + +enum usbhs_omap_port_mode { + OMAP_USBHS_PORT_MODE_UNUSED, + OMAP_EHCI_PORT_MODE_PHY, + OMAP_EHCI_PORT_MODE_TLL, + OMAP_EHCI_PORT_MODE_HSIC, + OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0, + OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM, + OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0, + OMAP_OHCI_PORT_MODE_PHY_4PIN_DPDM, + OMAP_OHCI_PORT_MODE_TLL_6PIN_DATSE0, + OMAP_OHCI_PORT_MODE_TLL_6PIN_DPDM, + OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0, + OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM, + OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0, + OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM +}; + +struct usbtll_omap_platform_data { + enum usbhs_omap_port_mode port_mode[OMAP3_HS_USB_PORTS]; +}; + +struct ehci_hcd_omap_platform_data { + enum usbhs_omap_port_mode port_mode[OMAP3_HS_USB_PORTS]; + int reset_gpio_port[OMAP3_HS_USB_PORTS]; + struct regulator *regulator[OMAP3_HS_USB_PORTS]; + unsigned phy_reset:1; +}; + +struct ohci_hcd_omap_platform_data { + enum usbhs_omap_port_mode port_mode[OMAP3_HS_USB_PORTS]; + unsigned es2_compatibility:1; +}; + +struct usbhs_omap_platform_data { + int nports; + enum usbhs_omap_port_mode port_mode[OMAP3_HS_USB_PORTS]; + int reset_gpio_port[OMAP3_HS_USB_PORTS]; + struct regulator *regulator[OMAP3_HS_USB_PORTS]; + + struct ehci_hcd_omap_platform_data *ehci_data; + struct ohci_hcd_omap_platform_data *ohci_data; + + /* OMAP3 <= ES2.1 have a single ulpi bypass control bit */ + unsigned single_ulpi_bypass:1; + unsigned es2_compatibility:1; + unsigned phy_reset:1; +}; + +/*-------------------------------------------------------------------------*/ + +struct omap_musb_board_data { + u8 interface_type; + u8 mode; + u16 power; + unsigned extvbus:1; + void (*set_phy_power)(u8 on); + void (*clear_irq)(void); + void (*set_mode)(u8 mode); + void (*reset)(void); +}; + +enum musb_interface { + MUSB_INTERFACE_ULPI, + MUSB_INTERFACE_UTMI +}; diff --git a/include/linux/platform_data/usb-omap1.h b/include/linux/platform_data/usb-omap1.h new file mode 100644 index 000000000..43b5ce139 --- /dev/null +++ b/include/linux/platform_data/usb-omap1.h @@ -0,0 +1,53 @@ +/* + * Platform data for OMAP1 USB + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive for + * more details. + */ +#ifndef __LINUX_USB_OMAP1_H +#define __LINUX_USB_OMAP1_H + +#include + +struct omap_usb_config { + /* Configure drivers according to the connectors on your board: + * - "A" connector (rectagular) + * ... for host/OHCI use, set "register_host". + * - "B" connector (squarish) or "Mini-B" + * ... for device/gadget use, set "register_dev". + * - "Mini-AB" connector (very similar to Mini-B) + * ... for OTG use as device OR host, initialize "otg" + */ + unsigned register_host:1; + unsigned register_dev:1; + u8 otg; /* port number, 1-based: usb1 == 2 */ + + const char *extcon; /* extcon device for OTG */ + + u8 hmc_mode; + + /* implicitly true if otg: host supports remote wakeup? */ + u8 rwc; + + /* signaling pins used to talk to transceiver on usbN: + * 0 == usbN unused + * 2 == usb0-only, using internal transceiver + * 3 == 3 wire bidirectional + * 4 == 4 wire bidirectional + * 6 == 6 wire unidirectional (or TLL) + */ + u8 pins[3]; + + struct platform_device *udc_device; + struct platform_device *ohci_device; + struct platform_device *otg_device; + + u32 (*usb0_init)(unsigned nwires, unsigned is_device); + u32 (*usb1_init)(unsigned nwires); + u32 (*usb2_init)(unsigned nwires, unsigned alt_pingroup); + + int (*ocpi_enable)(void); +}; + +#endif /* __LINUX_USB_OMAP1_H */ diff --git a/include/linux/platform_data/usb-pxa3xx-ulpi.h b/include/linux/platform_data/usb-pxa3xx-ulpi.h new file mode 100644 index 000000000..9d82cb65e --- /dev/null +++ b/include/linux/platform_data/usb-pxa3xx-ulpi.h @@ -0,0 +1,35 @@ +/* + * PXA3xx U2D header + * + * Copyright (C) 2010 CompuLab Ltd. + * + * Igor Grinberg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __PXA310_U2D__ +#define __PXA310_U2D__ + +#include + +struct pxa3xx_u2d_platform_data { + +#define ULPI_SER_6PIN (1 << 0) +#define ULPI_SER_3PIN (1 << 1) + unsigned int ulpi_mode; + + int (*init)(struct device *); + void (*exit)(struct device *); +}; + + +/* Start PXA3xx U2D host */ +int pxa3xx_u2d_start_hc(struct usb_bus *host); +/* Stop PXA3xx U2D host */ +void pxa3xx_u2d_stop_hc(struct usb_bus *host); + +extern void pxa3xx_set_u2d_info(struct pxa3xx_u2d_platform_data *info); + +#endif /* __PXA310_U2D__ */ diff --git a/include/linux/platform_data/usb-s3c2410_udc.h b/include/linux/platform_data/usb-s3c2410_udc.h new file mode 100644 index 000000000..de8e2288a --- /dev/null +++ b/include/linux/platform_data/usb-s3c2410_udc.h @@ -0,0 +1,44 @@ +/* arch/arm/plat-samsung/include/plat/udc.h + * + * Copyright (c) 2005 Arnaud Patard + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * + * Changelog: + * 14-Mar-2005 RTP Created file + * 02-Aug-2005 RTP File rename + * 07-Sep-2005 BJD Minor cleanups, changed cmd to enum + * 18-Jan-2007 HMW Add per-platform vbus_draw function +*/ + +#ifndef __ASM_ARM_ARCH_UDC_H +#define __ASM_ARM_ARCH_UDC_H + +enum s3c2410_udc_cmd_e { + S3C2410_UDC_P_ENABLE = 1, /* Pull-up enable */ + S3C2410_UDC_P_DISABLE = 2, /* Pull-up disable */ + S3C2410_UDC_P_RESET = 3, /* UDC reset, in case of */ +}; + +struct s3c2410_udc_mach_info { + void (*udc_command)(enum s3c2410_udc_cmd_e); + void (*vbus_draw)(unsigned int ma); + + unsigned int pullup_pin; + unsigned int pullup_pin_inverted; + + unsigned int vbus_pin; + unsigned char vbus_pin_inverted; +}; + +extern void __init s3c24xx_udc_set_platdata(struct s3c2410_udc_mach_info *); + +struct s3c24xx_hsudc_platdata; + +extern void __init s3c24xx_hsudc_set_platdata(struct s3c24xx_hsudc_platdata *pd); + +#endif /* __ASM_ARM_ARCH_UDC_H */ diff --git a/include/linux/platform_data/usb3503.h b/include/linux/platform_data/usb3503.h new file mode 100644 index 000000000..e049d51c1 --- /dev/null +++ b/include/linux/platform_data/usb3503.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __USB3503_H__ +#define __USB3503_H__ + +#define USB3503_I2C_NAME "usb3503" + +#define USB3503_OFF_PORT1 (1 << 1) +#define USB3503_OFF_PORT2 (1 << 2) +#define USB3503_OFF_PORT3 (1 << 3) + +enum usb3503_mode { + USB3503_MODE_UNKNOWN, + USB3503_MODE_HUB, + USB3503_MODE_STANDBY, +}; + +struct usb3503_platform_data { + enum usb3503_mode initial_mode; + u8 port_off_mask; + int gpio_intn; + int gpio_connect; + int gpio_reset; +}; + +#endif diff --git a/include/linux/platform_data/ux500_wdt.h b/include/linux/platform_data/ux500_wdt.h new file mode 100644 index 000000000..1689ff4c3 --- /dev/null +++ b/include/linux/platform_data/ux500_wdt.h @@ -0,0 +1,19 @@ +/* + * Copyright (C) ST Ericsson SA 2011 + * + * License Terms: GNU General Public License v2 + * + * STE Ux500 Watchdog platform data + */ +#ifndef __UX500_WDT_H +#define __UX500_WDT_H + +/** + * struct ux500_wdt_data + */ +struct ux500_wdt_data { + unsigned int timeout; + bool has_28_bits_resolution; +}; + +#endif /* __UX500_WDT_H */ diff --git a/include/linux/platform_data/video-clcd-versatile.h b/include/linux/platform_data/video-clcd-versatile.h new file mode 100644 index 000000000..305ebaec3 --- /dev/null +++ b/include/linux/platform_data/video-clcd-versatile.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef PLAT_CLCD_H +#define PLAT_CLCD_H + +#ifdef CONFIG_PLAT_VERSATILE_CLCD +struct clcd_panel *versatile_clcd_get_panel(const char *); +int versatile_clcd_setup_dma(struct clcd_fb *, unsigned long); +int versatile_clcd_mmap_dma(struct clcd_fb *, struct vm_area_struct *); +void versatile_clcd_remove_dma(struct clcd_fb *); +#else +static inline struct clcd_panel *versatile_clcd_get_panel(const char *s) +{ + return NULL; +} +static inline int versatile_clcd_setup_dma(struct clcd_fb *fb, unsigned long framesize) +{ + return -ENODEV; +} +static inline int versatile_clcd_mmap_dma(struct clcd_fb *fb, struct vm_area_struct *vm) +{ + return -ENODEV; +} +static inline void versatile_clcd_remove_dma(struct clcd_fb *fb) +{ +} +#endif + +#endif diff --git a/include/linux/platform_data/video-ep93xx.h b/include/linux/platform_data/video-ep93xx.h new file mode 100644 index 000000000..a6f3ccdec --- /dev/null +++ b/include/linux/platform_data/video-ep93xx.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VIDEO_EP93XX_H +#define __VIDEO_EP93XX_H + +struct platform_device; +struct fb_info; + +/* VideoAttributes flags */ +#define EP93XXFB_STATE_MACHINE_ENABLE (1 << 0) +#define EP93XXFB_PIXEL_CLOCK_ENABLE (1 << 1) +#define EP93XXFB_VSYNC_ENABLE (1 << 2) +#define EP93XXFB_PIXEL_DATA_ENABLE (1 << 3) +#define EP93XXFB_COMPOSITE_SYNC (1 << 4) +#define EP93XXFB_SYNC_VERT_HIGH (1 << 5) +#define EP93XXFB_SYNC_HORIZ_HIGH (1 << 6) +#define EP93XXFB_SYNC_BLANK_HIGH (1 << 7) +#define EP93XXFB_PCLK_FALLING (1 << 8) +#define EP93XXFB_ENABLE_AC (1 << 9) +#define EP93XXFB_ENABLE_LCD (1 << 10) +#define EP93XXFB_ENABLE_CCIR (1 << 12) +#define EP93XXFB_USE_PARALLEL_INTERFACE (1 << 13) +#define EP93XXFB_ENABLE_INTERRUPT (1 << 14) +#define EP93XXFB_USB_INTERLACE (1 << 16) +#define EP93XXFB_USE_EQUALIZATION (1 << 17) +#define EP93XXFB_USE_DOUBLE_HORZ (1 << 18) +#define EP93XXFB_USE_DOUBLE_VERT (1 << 19) +#define EP93XXFB_USE_BLANK_PIXEL (1 << 20) +#define EP93XXFB_USE_SDCSN0 (0 << 21) +#define EP93XXFB_USE_SDCSN1 (1 << 21) +#define EP93XXFB_USE_SDCSN2 (2 << 21) +#define EP93XXFB_USE_SDCSN3 (3 << 21) + +#define EP93XXFB_ENABLE (EP93XXFB_STATE_MACHINE_ENABLE | \ + EP93XXFB_PIXEL_CLOCK_ENABLE | \ + EP93XXFB_VSYNC_ENABLE | \ + EP93XXFB_PIXEL_DATA_ENABLE) + +struct ep93xxfb_mach_info { + unsigned int flags; + int (*setup)(struct platform_device *pdev); + void (*teardown)(struct platform_device *pdev); + void (*blank)(int blank_mode, struct fb_info *info); +}; + +#endif /* __VIDEO_EP93XX_H */ diff --git a/include/linux/platform_data/video-imxfb.h b/include/linux/platform_data/video-imxfb.h new file mode 100644 index 000000000..02812651a --- /dev/null +++ b/include/linux/platform_data/video-imxfb.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This structure describes the machine which we are running on. + */ +#ifndef __MACH_IMXFB_H__ +#define __MACH_IMXFB_H__ + +#include + +#define PCR_TFT (1 << 31) +#define PCR_COLOR (1 << 30) +#define PCR_PBSIZ_1 (0 << 28) +#define PCR_PBSIZ_2 (1 << 28) +#define PCR_PBSIZ_4 (2 << 28) +#define PCR_PBSIZ_8 (3 << 28) +#define PCR_BPIX_1 (0 << 25) +#define PCR_BPIX_2 (1 << 25) +#define PCR_BPIX_4 (2 << 25) +#define PCR_BPIX_8 (3 << 25) +#define PCR_BPIX_12 (4 << 25) +#define PCR_BPIX_16 (5 << 25) +#define PCR_BPIX_18 (6 << 25) +#define PCR_PIXPOL (1 << 24) +#define PCR_FLMPOL (1 << 23) +#define PCR_LPPOL (1 << 22) +#define PCR_CLKPOL (1 << 21) +#define PCR_OEPOL (1 << 20) +#define PCR_SCLKIDLE (1 << 19) +#define PCR_END_SEL (1 << 18) +#define PCR_END_BYTE_SWAP (1 << 17) +#define PCR_REV_VS (1 << 16) +#define PCR_ACD_SEL (1 << 15) +#define PCR_ACD(x) (((x) & 0x7f) << 8) +#define PCR_SCLK_SEL (1 << 7) +#define PCR_SHARP (1 << 6) +#define PCR_PCD(x) ((x) & 0x3f) + +#define PWMR_CLS(x) (((x) & 0x1ff) << 16) +#define PWMR_LDMSK (1 << 15) +#define PWMR_SCR1 (1 << 10) +#define PWMR_SCR0 (1 << 9) +#define PWMR_CC_EN (1 << 8) +#define PWMR_PW(x) ((x) & 0xff) + +#define LSCR1_PS_RISE_DELAY(x) (((x) & 0x7f) << 26) +#define LSCR1_CLS_RISE_DELAY(x) (((x) & 0x3f) << 16) +#define LSCR1_REV_TOGGLE_DELAY(x) (((x) & 0xf) << 8) +#define LSCR1_GRAY2(x) (((x) & 0xf) << 4) +#define LSCR1_GRAY1(x) (((x) & 0xf)) + +struct imx_fb_videomode { + struct fb_videomode mode; + u32 pcr; + bool aus_mode; + unsigned char bpp; +}; + +struct imx_fb_platform_data { + struct imx_fb_videomode *mode; + int num_modes; + + u_int pwmr; + u_int lscr1; + u_int dmacr; + + int (*init)(struct platform_device *); + void (*exit)(struct platform_device *); +}; + +#endif /* ifndef __MACH_IMXFB_H__ */ diff --git a/include/linux/platform_data/video-mx3fb.h b/include/linux/platform_data/video-mx3fb.h new file mode 100644 index 000000000..fdbe60001 --- /dev/null +++ b/include/linux/platform_data/video-mx3fb.h @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2008 + * Guennadi Liakhovetski, DENX Software Engineering, + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_ARCH_MX3FB_H__ +#define __ASM_ARCH_MX3FB_H__ + +#include +#include + +/* Proprietary FB_SYNC_ flags */ +#define FB_SYNC_OE_ACT_HIGH 0x80000000 +#define FB_SYNC_CLK_INVERT 0x40000000 +#define FB_SYNC_DATA_INVERT 0x20000000 +#define FB_SYNC_CLK_IDLE_EN 0x10000000 +#define FB_SYNC_SHARP_MODE 0x08000000 +#define FB_SYNC_SWAP_RGB 0x04000000 +#define FB_SYNC_CLK_SEL_EN 0x02000000 + +/* + * Specify the way your display is connected. The IPU can arbitrarily + * map the internal colors to the external data lines. We only support + * the following mappings at the moment. + */ +enum disp_data_mapping { + /* blue -> d[0..5], green -> d[6..11], red -> d[12..17] */ + IPU_DISP_DATA_MAPPING_RGB666, + /* blue -> d[0..4], green -> d[5..10], red -> d[11..15] */ + IPU_DISP_DATA_MAPPING_RGB565, + /* blue -> d[0..7], green -> d[8..15], red -> d[16..23] */ + IPU_DISP_DATA_MAPPING_RGB888, +}; + +/** + * struct mx3fb_platform_data - mx3fb platform data + * + * @dma_dev: pointer to the dma-device, used for dma-slave connection + * @mode: pointer to a platform-provided per mxc_register_fb() videomode + */ +struct mx3fb_platform_data { + struct device *dma_dev; + const char *name; + const struct fb_videomode *mode; + int num_modes; + enum disp_data_mapping disp_data_fmt; +}; + +#endif diff --git a/include/linux/platform_data/video-nuc900fb.h b/include/linux/platform_data/video-nuc900fb.h new file mode 100644 index 000000000..cec5ece76 --- /dev/null +++ b/include/linux/platform_data/video-nuc900fb.h @@ -0,0 +1,83 @@ +/* linux/include/asm/arch-nuc900/fb.h + * + * Copyright (c) 2008 Nuvoton technology corporation + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Changelog: + * + * 2008/08/26 vincen.zswan modify this file for LCD. + */ + +#ifndef __ASM_ARM_FB_H +#define __ASM_ARM_FB_H + + + +/* LCD Controller Hardware Desc */ +struct nuc900fb_hw { + unsigned int lcd_dccs; + unsigned int lcd_device_ctrl; + unsigned int lcd_mpulcd_cmd; + unsigned int lcd_int_cs; + unsigned int lcd_crtc_size; + unsigned int lcd_crtc_dend; + unsigned int lcd_crtc_hr; + unsigned int lcd_crtc_hsync; + unsigned int lcd_crtc_vr; + unsigned int lcd_va_baddr0; + unsigned int lcd_va_baddr1; + unsigned int lcd_va_fbctrl; + unsigned int lcd_va_scale; + unsigned int lcd_va_test; + unsigned int lcd_va_win; + unsigned int lcd_va_stuff; +}; + +/* LCD Display Description */ +struct nuc900fb_display { + /* LCD Image type */ + unsigned type; + + /* LCD Screen Size */ + unsigned short width; + unsigned short height; + + /* LCD Screen Info */ + unsigned short xres; + unsigned short yres; + unsigned short bpp; + + unsigned long pixclock; + unsigned short left_margin; + unsigned short right_margin; + unsigned short hsync_len; + unsigned short upper_margin; + unsigned short lower_margin; + unsigned short vsync_len; + + /* hardware special register value */ + unsigned int dccs; + unsigned int devctl; + unsigned int fbctrl; + unsigned int scale; +}; + +struct nuc900fb_mach_info { + struct nuc900fb_display *displays; + unsigned num_displays; + unsigned default_display; + /* GPIO Setting Info */ + unsigned gpio_dir; + unsigned gpio_dir_mask; + unsigned gpio_data; + unsigned gpio_data_mask; +}; + +extern void __init nuc900_fb_set_platdata(struct nuc900fb_mach_info *); + +#endif /* __ASM_ARM_FB_H */ diff --git a/include/linux/platform_data/video-pxafb.h b/include/linux/platform_data/video-pxafb.h new file mode 100644 index 000000000..07c6c1e15 --- /dev/null +++ b/include/linux/platform_data/video-pxafb.h @@ -0,0 +1,173 @@ +/* + * Support for the xscale frame buffer. + * + * Author: Jean-Frederic Clere + * Created: Sep 22, 2003 + * Copyright: jfclere@sinix.net + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include + +/* + * Supported LCD connections + * + * bits 0 - 3: for LCD panel type: + * + * STN - for passive matrix + * DSTN - for dual scan passive matrix + * TFT - for active matrix + * + * bits 4 - 9 : for bus width + * bits 10-17 : for AC Bias Pin Frequency + * bit 18 : for output enable polarity + * bit 19 : for pixel clock edge + * bit 20 : for output pixel format when base is RGBT16 + */ +#define LCD_CONN_TYPE(_x) ((_x) & 0x0f) +#define LCD_CONN_WIDTH(_x) (((_x) >> 4) & 0x1f) + +#define LCD_TYPE_MASK 0xf +#define LCD_TYPE_UNKNOWN 0 +#define LCD_TYPE_MONO_STN 1 +#define LCD_TYPE_MONO_DSTN 2 +#define LCD_TYPE_COLOR_STN 3 +#define LCD_TYPE_COLOR_DSTN 4 +#define LCD_TYPE_COLOR_TFT 5 +#define LCD_TYPE_SMART_PANEL 6 +#define LCD_TYPE_MAX 7 + +#define LCD_MONO_STN_4BPP ((4 << 4) | LCD_TYPE_MONO_STN) +#define LCD_MONO_STN_8BPP ((8 << 4) | LCD_TYPE_MONO_STN) +#define LCD_MONO_DSTN_8BPP ((8 << 4) | LCD_TYPE_MONO_DSTN) +#define LCD_COLOR_STN_8BPP ((8 << 4) | LCD_TYPE_COLOR_STN) +#define LCD_COLOR_DSTN_16BPP ((16 << 4) | LCD_TYPE_COLOR_DSTN) +#define LCD_COLOR_TFT_8BPP ((8 << 4) | LCD_TYPE_COLOR_TFT) +#define LCD_COLOR_TFT_16BPP ((16 << 4) | LCD_TYPE_COLOR_TFT) +#define LCD_COLOR_TFT_18BPP ((18 << 4) | LCD_TYPE_COLOR_TFT) +#define LCD_SMART_PANEL_8BPP ((8 << 4) | LCD_TYPE_SMART_PANEL) +#define LCD_SMART_PANEL_16BPP ((16 << 4) | LCD_TYPE_SMART_PANEL) +#define LCD_SMART_PANEL_18BPP ((18 << 4) | LCD_TYPE_SMART_PANEL) + +#define LCD_AC_BIAS_FREQ(x) (((x) & 0xff) << 10) +#define LCD_BIAS_ACTIVE_HIGH (0 << 18) +#define LCD_BIAS_ACTIVE_LOW (1 << 18) +#define LCD_PCLK_EDGE_RISE (0 << 19) +#define LCD_PCLK_EDGE_FALL (1 << 19) +#define LCD_ALTERNATE_MAPPING (1 << 20) + +/* + * This structure describes the machine which we are running on. + * It is set in linux/arch/arm/mach-pxa/machine_name.c and used in the probe routine + * of linux/drivers/video/pxafb.c + */ +struct pxafb_mode_info { + u_long pixclock; + + u_short xres; + u_short yres; + + u_char bpp; + u_int cmap_greyscale:1, + depth:8, + transparency:1, + unused:22; + + /* Parallel Mode Timing */ + u_char hsync_len; + u_char left_margin; + u_char right_margin; + + u_char vsync_len; + u_char upper_margin; + u_char lower_margin; + u_char sync; + + /* Smart Panel Mode Timing - see PXA27x DM 7.4.15.0.3 for details + * Note: + * 1. all parameters in nanosecond (ns) + * 2. a0cs{rd,wr}_set_hld are controlled by the same register bits + * in pxa27x and pxa3xx, initialize them to the same value or + * the larger one will be used + * 3. same to {rd,wr}_pulse_width + * + * 4. LCD_PCLK_EDGE_{RISE,FALL} controls the L_PCLK_WR polarity + * 5. sync & FB_SYNC_HOR_HIGH_ACT controls the L_LCLK_A0 + * 6. sync & FB_SYNC_VERT_HIGH_ACT controls the L_LCLK_RD + */ + unsigned a0csrd_set_hld; /* A0 and CS Setup/Hold Time before/after L_FCLK_RD */ + unsigned a0cswr_set_hld; /* A0 and CS Setup/Hold Time before/after L_PCLK_WR */ + unsigned wr_pulse_width; /* L_PCLK_WR pulse width */ + unsigned rd_pulse_width; /* L_FCLK_RD pulse width */ + unsigned cmd_inh_time; /* Command Inhibit time between two writes */ + unsigned op_hold_time; /* Output Hold time from L_FCLK_RD negation */ +}; + +struct pxafb_mach_info { + struct pxafb_mode_info *modes; + unsigned int num_modes; + + unsigned int lcd_conn; + unsigned long video_mem_size; + + u_int fixed_modes:1, + cmap_inverse:1, + cmap_static:1, + acceleration_enabled:1, + unused:28; + + /* The following should be defined in LCCR0 + * LCCR0_Act or LCCR0_Pas Active or Passive + * LCCR0_Sngl or LCCR0_Dual Single/Dual panel + * LCCR0_Mono or LCCR0_Color Mono/Color + * LCCR0_4PixMono or LCCR0_8PixMono (in mono single mode) + * LCCR0_DMADel(Tcpu) (optional) DMA request delay + * + * The following should not be defined in LCCR0: + * LCCR0_OUM, LCCR0_BM, LCCR0_QDM, LCCR0_DIS, LCCR0_EFM + * LCCR0_IUM, LCCR0_SFM, LCCR0_LDM, LCCR0_ENB + */ + u_int lccr0; + /* The following should be defined in LCCR3 + * LCCR3_OutEnH or LCCR3_OutEnL Output enable polarity + * LCCR3_PixRsEdg or LCCR3_PixFlEdg Pixel clock edge type + * LCCR3_Acb(X) AB Bias pin frequency + * LCCR3_DPC (optional) Double Pixel Clock mode (untested) + * + * The following should not be defined in LCCR3 + * LCCR3_HSP, LCCR3_VSP, LCCR0_Pcd(x), LCCR3_Bpp + */ + u_int lccr3; + /* The following should be defined in LCCR4 + * LCCR4_PAL_FOR_0 or LCCR4_PAL_FOR_1 or LCCR4_PAL_FOR_2 + * + * All other bits in LCCR4 should be left alone. + */ + u_int lccr4; + void (*pxafb_backlight_power)(int); + void (*pxafb_lcd_power)(int, struct fb_var_screeninfo *); + void (*smart_update)(struct fb_info *); +}; + +void pxa_set_fb_info(struct device *, struct pxafb_mach_info *); +unsigned long pxafb_get_hsync_time(struct device *dev); + +#ifdef CONFIG_FB_PXA_SMARTPANEL +extern int pxafb_smart_queue(struct fb_info *info, uint16_t *cmds, int); +extern int pxafb_smart_flush(struct fb_info *info); +#else +static inline int pxafb_smart_queue(struct fb_info *info, + uint16_t *cmds, int n) +{ + return 0; +} + +static inline int pxafb_smart_flush(struct fb_info *info) +{ + return 0; +} +#endif diff --git a/include/linux/platform_data/video_s3c.h b/include/linux/platform_data/video_s3c.h new file mode 100644 index 000000000..dd7747ba3 --- /dev/null +++ b/include/linux/platform_data/video_s3c.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __PLATFORM_DATA_VIDEO_S3C +#define __PLATFORM_DATA_VIDEO_S3C + +/* S3C_FB_MAX_WIN + * Set to the maximum number of windows that any of the supported hardware + * can use. Since the platform data uses this for an array size, having it + * set to the maximum of any version of the hardware can do is safe. + */ +#define S3C_FB_MAX_WIN (5) + +/** + * struct s3c_fb_pd_win - per window setup data + * @xres : The window X size. + * @yres : The window Y size. + * @virtual_x: The virtual X size. + * @virtual_y: The virtual Y size. + */ +struct s3c_fb_pd_win { + unsigned short default_bpp; + unsigned short max_bpp; + unsigned short xres; + unsigned short yres; + unsigned short virtual_x; + unsigned short virtual_y; +}; + +/** + * struct s3c_fb_platdata - S3C driver platform specific information + * @setup_gpio: Setup the external GPIO pins to the right state to transfer + * the data from the display system to the connected display + * device. + * @vidcon0: The base vidcon0 values to control the panel data format. + * @vidcon1: The base vidcon1 values to control the panel data output. + * @vtiming: Video timing when connected to a RGB type panel. + * @win: The setup data for each hardware window, or NULL for unused. + * @display_mode: The LCD output display mode. + * + * The platform data supplies the video driver with all the information + * it requires to work with the display(s) attached to the machine. It + * controls the initial mode, the number of display windows (0 is always + * the base framebuffer) that are initialised etc. + * + */ +struct s3c_fb_platdata { + void (*setup_gpio)(void); + + struct s3c_fb_pd_win *win[S3C_FB_MAX_WIN]; + struct fb_videomode *vtiming; + + u32 vidcon0; + u32 vidcon1; +}; + +#endif diff --git a/include/linux/platform_data/voltage-omap.h b/include/linux/platform_data/voltage-omap.h new file mode 100644 index 000000000..5be4d5def --- /dev/null +++ b/include/linux/platform_data/voltage-omap.h @@ -0,0 +1,39 @@ +/* + * OMAP Voltage Management Routines + * + * Copyright (C) 2011, Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ARCH_ARM_OMAP_VOLTAGE_H +#define __ARCH_ARM_OMAP_VOLTAGE_H + +/** + * struct omap_volt_data - Omap voltage specific data. + * @voltage_nominal: The possible voltage value in uV + * @sr_efuse_offs: The offset of the efuse register(from system + * control module base address) from where to read + * the n-target value for the smartreflex module. + * @sr_errminlimit: Error min limit value for smartreflex. This value + * differs at differnet opp and thus is linked + * with voltage. + * @vp_errorgain: Error gain value for the voltage processor. This + * field also differs according to the voltage/opp. + */ +struct omap_volt_data { + u32 volt_nominal; + u32 sr_efuse_offs; + u8 sr_errminlimit; + u8 vp_errgain; +}; +struct voltagedomain; + +struct voltagedomain *voltdm_lookup(const char *name); +int voltdm_scale(struct voltagedomain *voltdm, unsigned long target_volt); +unsigned long voltdm_get_voltage(struct voltagedomain *voltdm); +struct omap_volt_data *omap_voltage_get_voltdata(struct voltagedomain *voltdm, + unsigned long volt); +#endif diff --git a/include/linux/platform_data/wiznet.h b/include/linux/platform_data/wiznet.h new file mode 100644 index 000000000..b5d8c192d --- /dev/null +++ b/include/linux/platform_data/wiznet.h @@ -0,0 +1,24 @@ +/* + * Ethernet driver for the WIZnet W5x00 chip. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef PLATFORM_DATA_WIZNET_H +#define PLATFORM_DATA_WIZNET_H + +#include + +struct wiznet_platform_data { + int link_gpio; + u8 mac_addr[ETH_ALEN]; +}; + +#ifndef CONFIG_WIZNET_BUS_SHIFT +#define CONFIG_WIZNET_BUS_SHIFT 0 +#endif + +#define W5100_BUS_DIRECT_SIZE (0x8000 << CONFIG_WIZNET_BUS_SHIFT) +#define W5300_BUS_DIRECT_SIZE (0x0400 << CONFIG_WIZNET_BUS_SHIFT) + +#endif /* PLATFORM_DATA_WIZNET_H */ diff --git a/include/linux/platform_data/wkup_m3.h b/include/linux/platform_data/wkup_m3.h new file mode 100644 index 000000000..3f1d77eff --- /dev/null +++ b/include/linux/platform_data/wkup_m3.h @@ -0,0 +1,30 @@ +/* + * TI Wakeup M3 remote processor platform data + * + * Copyright (C) 2014-2015 Texas Instruments, Inc. + * + * Dave Gerlach + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _LINUX_PLATFORM_DATA_WKUP_M3_H +#define _LINUX_PLATFORM_DATA_WKUP_M3_H + +struct platform_device; + +struct wkup_m3_platform_data { + const char *reset_name; + + int (*assert_reset)(struct platform_device *pdev, const char *name); + int (*deassert_reset)(struct platform_device *pdev, const char *name); +}; + +#endif /* _LINUX_PLATFORM_DATA_WKUP_M3_H */ diff --git a/include/linux/platform_data/x86/apple.h b/include/linux/platform_data/x86/apple.h new file mode 100644 index 000000000..079e816c3 --- /dev/null +++ b/include/linux/platform_data/x86/apple.h @@ -0,0 +1,13 @@ +#ifndef PLATFORM_DATA_X86_APPLE_H +#define PLATFORM_DATA_X86_APPLE_H + +#ifdef CONFIG_X86 +/** + * x86_apple_machine - whether the machine is an x86 Apple Macintosh + */ +extern bool x86_apple_machine; +#else +#define x86_apple_machine false +#endif + +#endif diff --git a/include/linux/platform_data/x86/clk-pmc-atom.h b/include/linux/platform_data/x86/clk-pmc-atom.h new file mode 100644 index 000000000..7a37ac27d --- /dev/null +++ b/include/linux/platform_data/x86/clk-pmc-atom.h @@ -0,0 +1,47 @@ +/* + * Intel Atom platform clocks for BayTrail and CherryTrail SoC. + * + * Copyright (C) 2016, Intel Corporation + * Author: Irina Tirdea + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __PLATFORM_DATA_X86_CLK_PMC_ATOM_H +#define __PLATFORM_DATA_X86_CLK_PMC_ATOM_H + +/** + * struct pmc_clk - PMC platform clock configuration + * + * @name: identified, typically pmc_plt_clk_, x=[0..5] + * @freq: in Hz, 19.2MHz and 25MHz (Baytrail only) supported + * @parent_name: one of 'xtal' or 'osc' + */ +struct pmc_clk { + const char *name; + unsigned long freq; + const char *parent_name; +}; + +/** + * struct pmc_clk_data - common PMC clock configuration + * + * @base: PMC clock register base offset + * @clks: pointer to set of registered clocks, typically 0..5 + * @critical: flag to indicate if firmware enabled pmc_plt_clks + * should be marked as critial or not + */ +struct pmc_clk_data { + void __iomem *base; + const struct pmc_clk *clks; + bool critical; +}; + +#endif /* __PLATFORM_DATA_X86_CLK_PMC_ATOM_H */ diff --git a/include/linux/platform_data/x86/mlxcpld.h b/include/linux/platform_data/x86/mlxcpld.h new file mode 100644 index 000000000..b08dcb183 --- /dev/null +++ b/include/linux/platform_data/x86/mlxcpld.h @@ -0,0 +1,52 @@ +/* + * mlxcpld.h - Mellanox I2C multiplexer support in CPLD + * + * Copyright (c) 2016 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016 Michael Shych + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _LINUX_I2C_MLXCPLD_H +#define _LINUX_I2C_MLXCPLD_H + +/* Platform data for the CPLD I2C multiplexers */ + +/* mlxcpld_mux_plat_data - per mux data, used with i2c_register_board_info + * @adap_ids - adapter array + * @num_adaps - number of adapters + * @sel_reg_addr - mux select register offset in CPLD space + */ +struct mlxcpld_mux_plat_data { + int *adap_ids; + int num_adaps; + int sel_reg_addr; +}; + +#endif /* _LINUX_I2C_MLXCPLD_H */ diff --git a/include/linux/platform_data/x86/pmc_atom.h b/include/linux/platform_data/x86/pmc_atom.h new file mode 100644 index 000000000..e4905fe69 --- /dev/null +++ b/include/linux/platform_data/x86/pmc_atom.h @@ -0,0 +1,158 @@ +/* + * Intel Atom SOC Power Management Controller Header File + * Copyright (c) 2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef PMC_ATOM_H +#define PMC_ATOM_H + +/* ValleyView Power Control Unit PCI Device ID */ +#define PCI_DEVICE_ID_VLV_PMC 0x0F1C +/* CherryTrail Power Control Unit PCI Device ID */ +#define PCI_DEVICE_ID_CHT_PMC 0x229C + +/* PMC Memory mapped IO registers */ +#define PMC_BASE_ADDR_OFFSET 0x44 +#define PMC_BASE_ADDR_MASK 0xFFFFFE00 +#define PMC_MMIO_REG_LEN 0x100 +#define PMC_REG_BIT_WIDTH 32 + +/* BIOS uses FUNC_DIS to disable specific function */ +#define PMC_FUNC_DIS 0x34 +#define PMC_FUNC_DIS_2 0x38 + +/* CHT specific bits in FUNC_DIS2 register */ +#define BIT_FD_GMM BIT(3) +#define BIT_FD_ISH BIT(4) + +/* S0ix wake event control */ +#define PMC_S0IX_WAKE_EN 0x3C + +#define BIT_LPC_CLOCK_RUN BIT(4) +#define BIT_SHARED_IRQ_GPSC BIT(5) +#define BIT_ORED_DEDICATED_IRQ_GPSS BIT(18) +#define BIT_ORED_DEDICATED_IRQ_GPSC BIT(19) +#define BIT_SHARED_IRQ_GPSS BIT(20) + +#define PMC_WAKE_EN_SETTING ~(BIT_LPC_CLOCK_RUN | \ + BIT_SHARED_IRQ_GPSC | \ + BIT_ORED_DEDICATED_IRQ_GPSS | \ + BIT_ORED_DEDICATED_IRQ_GPSC | \ + BIT_SHARED_IRQ_GPSS) + +/* The timers accumulate time spent in sleep state */ +#define PMC_S0IR_TMR 0x80 +#define PMC_S0I1_TMR 0x84 +#define PMC_S0I2_TMR 0x88 +#define PMC_S0I3_TMR 0x8C +#define PMC_S0_TMR 0x90 +/* Sleep state counter is in units of of 32us */ +#define PMC_TMR_SHIFT 5 + +/* Power status of power islands */ +#define PMC_PSS 0x98 + +#define PMC_PSS_BIT_GBE BIT(0) +#define PMC_PSS_BIT_SATA BIT(1) +#define PMC_PSS_BIT_HDA BIT(2) +#define PMC_PSS_BIT_SEC BIT(3) +#define PMC_PSS_BIT_PCIE BIT(4) +#define PMC_PSS_BIT_LPSS BIT(5) +#define PMC_PSS_BIT_LPE BIT(6) +#define PMC_PSS_BIT_DFX BIT(7) +#define PMC_PSS_BIT_USH_CTRL BIT(8) +#define PMC_PSS_BIT_USH_SUS BIT(9) +#define PMC_PSS_BIT_USH_VCCS BIT(10) +#define PMC_PSS_BIT_USH_VCCA BIT(11) +#define PMC_PSS_BIT_OTG_CTRL BIT(12) +#define PMC_PSS_BIT_OTG_VCCS BIT(13) +#define PMC_PSS_BIT_OTG_VCCA_CLK BIT(14) +#define PMC_PSS_BIT_OTG_VCCA BIT(15) +#define PMC_PSS_BIT_USB BIT(16) +#define PMC_PSS_BIT_USB_SUS BIT(17) + +/* CHT specific bits in PSS register */ +#define PMC_PSS_BIT_CHT_UFS BIT(7) +#define PMC_PSS_BIT_CHT_UXD BIT(11) +#define PMC_PSS_BIT_CHT_UXD_FD BIT(12) +#define PMC_PSS_BIT_CHT_UX_ENG BIT(15) +#define PMC_PSS_BIT_CHT_USB_SUS BIT(16) +#define PMC_PSS_BIT_CHT_GMM BIT(17) +#define PMC_PSS_BIT_CHT_ISH BIT(18) +#define PMC_PSS_BIT_CHT_DFX_MASTER BIT(26) +#define PMC_PSS_BIT_CHT_DFX_CLUSTER1 BIT(27) +#define PMC_PSS_BIT_CHT_DFX_CLUSTER2 BIT(28) +#define PMC_PSS_BIT_CHT_DFX_CLUSTER3 BIT(29) +#define PMC_PSS_BIT_CHT_DFX_CLUSTER4 BIT(30) +#define PMC_PSS_BIT_CHT_DFX_CLUSTER5 BIT(31) + +/* These registers reflect D3 status of functions */ +#define PMC_D3_STS_0 0xA0 + +#define BIT_LPSS1_F0_DMA BIT(0) +#define BIT_LPSS1_F1_PWM1 BIT(1) +#define BIT_LPSS1_F2_PWM2 BIT(2) +#define BIT_LPSS1_F3_HSUART1 BIT(3) +#define BIT_LPSS1_F4_HSUART2 BIT(4) +#define BIT_LPSS1_F5_SPI BIT(5) +#define BIT_LPSS1_F6_XXX BIT(6) +#define BIT_LPSS1_F7_XXX BIT(7) +#define BIT_SCC_EMMC BIT(8) +#define BIT_SCC_SDIO BIT(9) +#define BIT_SCC_SDCARD BIT(10) +#define BIT_SCC_MIPI BIT(11) +#define BIT_HDA BIT(12) +#define BIT_LPE BIT(13) +#define BIT_OTG BIT(14) +#define BIT_USH BIT(15) +#define BIT_GBE BIT(16) +#define BIT_SATA BIT(17) +#define BIT_USB_EHCI BIT(18) +#define BIT_SEC BIT(19) +#define BIT_PCIE_PORT0 BIT(20) +#define BIT_PCIE_PORT1 BIT(21) +#define BIT_PCIE_PORT2 BIT(22) +#define BIT_PCIE_PORT3 BIT(23) +#define BIT_LPSS2_F0_DMA BIT(24) +#define BIT_LPSS2_F1_I2C1 BIT(25) +#define BIT_LPSS2_F2_I2C2 BIT(26) +#define BIT_LPSS2_F3_I2C3 BIT(27) +#define BIT_LPSS2_F4_I2C4 BIT(28) +#define BIT_LPSS2_F5_I2C5 BIT(29) +#define BIT_LPSS2_F6_I2C6 BIT(30) +#define BIT_LPSS2_F7_I2C7 BIT(31) + +#define PMC_D3_STS_1 0xA4 +#define BIT_SMB BIT(0) +#define BIT_OTG_SS_PHY BIT(1) +#define BIT_USH_SS_PHY BIT(2) +#define BIT_DFX BIT(3) + +/* CHT specific bits in PMC_D3_STS_1 register */ +#define BIT_STS_GMM BIT(1) +#define BIT_STS_ISH BIT(2) + +/* PMC I/O Registers */ +#define ACPI_BASE_ADDR_OFFSET 0x40 +#define ACPI_BASE_ADDR_MASK 0xFFFFFE00 +#define ACPI_MMIO_REG_LEN 0x100 + +#define PM1_CNT 0x4 +#define SLEEP_TYPE_MASK 0xFFFFECFF +#define SLEEP_TYPE_S5 0x1C00 +#define SLEEP_ENABLE 0x2000 + +extern int pmc_atom_read(int offset, u32 *value); +extern int pmc_atom_write(int offset, u32 value); + +#endif /* PMC_ATOM_H */ diff --git a/include/linux/platform_data/zforce_ts.h b/include/linux/platform_data/zforce_ts.h new file mode 100644 index 000000000..7bdece8ef --- /dev/null +++ b/include/linux/platform_data/zforce_ts.h @@ -0,0 +1,23 @@ +/* drivers/input/touchscreen/zforce.c + * + * Copyright (C) 2012-2013 MundoReader S.L. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _LINUX_INPUT_ZFORCE_TS_H +#define _LINUX_INPUT_ZFORCE_TS_H + +struct zforce_ts_platdata { + unsigned int x_max; + unsigned int y_max; +}; + +#endif /* _LINUX_INPUT_ZFORCE_TS_H */ diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h new file mode 100644 index 000000000..1a9f38f27 --- /dev/null +++ b/include/linux/platform_device.h @@ -0,0 +1,373 @@ +/* + * platform_device.h - generic, centralized driver model + * + * Copyright (c) 2001-2003 Patrick Mochel + * + * This file is released under the GPLv2 + * + * See Documentation/driver-model/ for more information. + */ + +#ifndef _PLATFORM_DEVICE_H_ +#define _PLATFORM_DEVICE_H_ + +#include + +#define PLATFORM_DEVID_NONE (-1) +#define PLATFORM_DEVID_AUTO (-2) + +struct mfd_cell; +struct property_entry; +struct platform_device_id; + +struct platform_device { + const char *name; + int id; + bool id_auto; + struct device dev; + u32 num_resources; + struct resource *resource; + + const struct platform_device_id *id_entry; + char *driver_override; /* Driver name to force a match */ + + /* MFD cell pointer */ + struct mfd_cell *mfd_cell; + + /* arch specific additions */ + struct pdev_archdata archdata; +}; + +#define platform_get_device_id(pdev) ((pdev)->id_entry) + +#define to_platform_device(x) container_of((x), struct platform_device, dev) + +extern int platform_device_register(struct platform_device *); +extern void platform_device_unregister(struct platform_device *); + +extern struct bus_type platform_bus_type; +extern struct device platform_bus; + +extern void arch_setup_pdev_archdata(struct platform_device *); +extern struct resource *platform_get_resource(struct platform_device *, + unsigned int, unsigned int); +extern int platform_get_irq(struct platform_device *, unsigned int); +extern int platform_irq_count(struct platform_device *); +extern struct resource *platform_get_resource_byname(struct platform_device *, + unsigned int, + const char *); +extern int platform_get_irq_byname(struct platform_device *, const char *); +extern int platform_add_devices(struct platform_device **, int); + +struct platform_device_info { + struct device *parent; + struct fwnode_handle *fwnode; + + const char *name; + int id; + + const struct resource *res; + unsigned int num_res; + + const void *data; + size_t size_data; + u64 dma_mask; + + struct property_entry *properties; +}; +extern struct platform_device *platform_device_register_full( + const struct platform_device_info *pdevinfo); + +/** + * platform_device_register_resndata - add a platform-level device with + * resources and platform-specific data + * + * @parent: parent device for the device we're adding + * @name: base name of the device we're adding + * @id: instance id + * @res: set of resources that needs to be allocated for the device + * @num: number of resources + * @data: platform specific data for this platform device + * @size: size of platform specific data + * + * Returns &struct platform_device pointer on success, or ERR_PTR() on error. + */ +static inline struct platform_device *platform_device_register_resndata( + struct device *parent, const char *name, int id, + const struct resource *res, unsigned int num, + const void *data, size_t size) { + + struct platform_device_info pdevinfo = { + .parent = parent, + .name = name, + .id = id, + .res = res, + .num_res = num, + .data = data, + .size_data = size, + .dma_mask = 0, + }; + + return platform_device_register_full(&pdevinfo); +} + +/** + * platform_device_register_simple - add a platform-level device and its resources + * @name: base name of the device we're adding + * @id: instance id + * @res: set of resources that needs to be allocated for the device + * @num: number of resources + * + * This function creates a simple platform device that requires minimal + * resource and memory management. Canned release function freeing memory + * allocated for the device allows drivers using such devices to be + * unloaded without waiting for the last reference to the device to be + * dropped. + * + * This interface is primarily intended for use with legacy drivers which + * probe hardware directly. Because such drivers create sysfs device nodes + * themselves, rather than letting system infrastructure handle such device + * enumeration tasks, they don't fully conform to the Linux driver model. + * In particular, when such drivers are built as modules, they can't be + * "hotplugged". + * + * Returns &struct platform_device pointer on success, or ERR_PTR() on error. + */ +static inline struct platform_device *platform_device_register_simple( + const char *name, int id, + const struct resource *res, unsigned int num) +{ + return platform_device_register_resndata(NULL, name, id, + res, num, NULL, 0); +} + +/** + * platform_device_register_data - add a platform-level device with platform-specific data + * @parent: parent device for the device we're adding + * @name: base name of the device we're adding + * @id: instance id + * @data: platform specific data for this platform device + * @size: size of platform specific data + * + * This function creates a simple platform device that requires minimal + * resource and memory management. Canned release function freeing memory + * allocated for the device allows drivers using such devices to be + * unloaded without waiting for the last reference to the device to be + * dropped. + * + * Returns &struct platform_device pointer on success, or ERR_PTR() on error. + */ +static inline struct platform_device *platform_device_register_data( + struct device *parent, const char *name, int id, + const void *data, size_t size) +{ + return platform_device_register_resndata(parent, name, id, + NULL, 0, data, size); +} + +extern struct platform_device *platform_device_alloc(const char *name, int id); +extern int platform_device_add_resources(struct platform_device *pdev, + const struct resource *res, + unsigned int num); +extern int platform_device_add_data(struct platform_device *pdev, + const void *data, size_t size); +extern int platform_device_add_properties(struct platform_device *pdev, + const struct property_entry *properties); +extern int platform_device_add(struct platform_device *pdev); +extern void platform_device_del(struct platform_device *pdev); +extern void platform_device_put(struct platform_device *pdev); + +struct platform_driver { + int (*probe)(struct platform_device *); + int (*remove)(struct platform_device *); + void (*shutdown)(struct platform_device *); + int (*suspend)(struct platform_device *, pm_message_t state); + int (*resume)(struct platform_device *); + struct device_driver driver; + const struct platform_device_id *id_table; + bool prevent_deferred_probe; +}; + +#define to_platform_driver(drv) (container_of((drv), struct platform_driver, \ + driver)) + +/* + * use a macro to avoid include chaining to get THIS_MODULE + */ +#define platform_driver_register(drv) \ + __platform_driver_register(drv, THIS_MODULE) +extern int __platform_driver_register(struct platform_driver *, + struct module *); +extern void platform_driver_unregister(struct platform_driver *); + +/* non-hotpluggable platform devices may use this so that probe() and + * its support may live in __init sections, conserving runtime memory. + */ +#define platform_driver_probe(drv, probe) \ + __platform_driver_probe(drv, probe, THIS_MODULE) +extern int __platform_driver_probe(struct platform_driver *driver, + int (*probe)(struct platform_device *), struct module *module); + +static inline void *platform_get_drvdata(const struct platform_device *pdev) +{ + return dev_get_drvdata(&pdev->dev); +} + +static inline void platform_set_drvdata(struct platform_device *pdev, + void *data) +{ + dev_set_drvdata(&pdev->dev, data); +} + +/* module_platform_driver() - Helper macro for drivers that don't do + * anything special in module init/exit. This eliminates a lot of + * boilerplate. Each module may only use this macro once, and + * calling it replaces module_init() and module_exit() + */ +#define module_platform_driver(__platform_driver) \ + module_driver(__platform_driver, platform_driver_register, \ + platform_driver_unregister) + +/* builtin_platform_driver() - Helper macro for builtin drivers that + * don't do anything special in driver init. This eliminates some + * boilerplate. Each driver may only use this macro once, and + * calling it replaces device_initcall(). Note this is meant to be + * a parallel of module_platform_driver() above, but w/o _exit stuff. + */ +#define builtin_platform_driver(__platform_driver) \ + builtin_driver(__platform_driver, platform_driver_register) + +/* module_platform_driver_probe() - Helper macro for drivers that don't do + * anything special in module init/exit. This eliminates a lot of + * boilerplate. Each module may only use this macro once, and + * calling it replaces module_init() and module_exit() + */ +#define module_platform_driver_probe(__platform_driver, __platform_probe) \ +static int __init __platform_driver##_init(void) \ +{ \ + return platform_driver_probe(&(__platform_driver), \ + __platform_probe); \ +} \ +module_init(__platform_driver##_init); \ +static void __exit __platform_driver##_exit(void) \ +{ \ + platform_driver_unregister(&(__platform_driver)); \ +} \ +module_exit(__platform_driver##_exit); + +/* builtin_platform_driver_probe() - Helper macro for drivers that don't do + * anything special in device init. This eliminates some boilerplate. Each + * driver may only use this macro once, and using it replaces device_initcall. + * This is meant to be a parallel of module_platform_driver_probe above, but + * without the __exit parts. + */ +#define builtin_platform_driver_probe(__platform_driver, __platform_probe) \ +static int __init __platform_driver##_init(void) \ +{ \ + return platform_driver_probe(&(__platform_driver), \ + __platform_probe); \ +} \ +device_initcall(__platform_driver##_init); \ + +#define platform_create_bundle(driver, probe, res, n_res, data, size) \ + __platform_create_bundle(driver, probe, res, n_res, data, size, THIS_MODULE) +extern struct platform_device *__platform_create_bundle( + struct platform_driver *driver, int (*probe)(struct platform_device *), + struct resource *res, unsigned int n_res, + const void *data, size_t size, struct module *module); + +int __platform_register_drivers(struct platform_driver * const *drivers, + unsigned int count, struct module *owner); +void platform_unregister_drivers(struct platform_driver * const *drivers, + unsigned int count); + +#define platform_register_drivers(drivers, count) \ + __platform_register_drivers(drivers, count, THIS_MODULE) + +/* early platform driver interface */ +struct early_platform_driver { + const char *class_str; + struct platform_driver *pdrv; + struct list_head list; + int requested_id; + char *buffer; + int bufsize; +}; + +#define EARLY_PLATFORM_ID_UNSET -2 +#define EARLY_PLATFORM_ID_ERROR -3 + +extern int early_platform_driver_register(struct early_platform_driver *epdrv, + char *buf); +extern void early_platform_add_devices(struct platform_device **devs, int num); + +static inline int is_early_platform_device(struct platform_device *pdev) +{ + return !pdev->dev.driver; +} + +extern void early_platform_driver_register_all(char *class_str); +extern int early_platform_driver_probe(char *class_str, + int nr_probe, int user_only); +extern void early_platform_cleanup(void); + +#define early_platform_init(class_string, platdrv) \ + early_platform_init_buffer(class_string, platdrv, NULL, 0) + +#ifndef MODULE +#define early_platform_init_buffer(class_string, platdrv, buf, bufsiz) \ +static __initdata struct early_platform_driver early_driver = { \ + .class_str = class_string, \ + .buffer = buf, \ + .bufsize = bufsiz, \ + .pdrv = platdrv, \ + .requested_id = EARLY_PLATFORM_ID_UNSET, \ +}; \ +static int __init early_platform_driver_setup_func(char *buffer) \ +{ \ + return early_platform_driver_register(&early_driver, buffer); \ +} \ +early_param(class_string, early_platform_driver_setup_func) +#else /* MODULE */ +#define early_platform_init_buffer(class_string, platdrv, buf, bufsiz) \ +static inline char *early_platform_driver_setup_func(void) \ +{ \ + return bufsiz ? buf : NULL; \ +} +#endif /* MODULE */ + +#ifdef CONFIG_SUSPEND +extern int platform_pm_suspend(struct device *dev); +extern int platform_pm_resume(struct device *dev); +#else +#define platform_pm_suspend NULL +#define platform_pm_resume NULL +#endif + +#ifdef CONFIG_HIBERNATE_CALLBACKS +extern int platform_pm_freeze(struct device *dev); +extern int platform_pm_thaw(struct device *dev); +extern int platform_pm_poweroff(struct device *dev); +extern int platform_pm_restore(struct device *dev); +#else +#define platform_pm_freeze NULL +#define platform_pm_thaw NULL +#define platform_pm_poweroff NULL +#define platform_pm_restore NULL +#endif + +extern int platform_dma_configure(struct device *dev); + +#ifdef CONFIG_PM_SLEEP +#define USE_PLATFORM_PM_SLEEP_OPS \ + .suspend = platform_pm_suspend, \ + .resume = platform_pm_resume, \ + .freeze = platform_pm_freeze, \ + .thaw = platform_pm_thaw, \ + .poweroff = platform_pm_poweroff, \ + .restore = platform_pm_restore, +#else +#define USE_PLATFORM_PM_SLEEP_OPS +#endif + +#endif /* _PLATFORM_DEVICE_H_ */ diff --git a/include/linux/plist.h b/include/linux/plist.h new file mode 100644 index 000000000..97883604a --- /dev/null +++ b/include/linux/plist.h @@ -0,0 +1,300 @@ +/* + * Descending-priority-sorted double-linked list + * + * (C) 2002-2003 Intel Corp + * Inaky Perez-Gonzalez . + * + * 2001-2005 (c) MontaVista Software, Inc. + * Daniel Walker + * + * (C) 2005 Thomas Gleixner + * + * Simplifications of the original code by + * Oleg Nesterov + * + * Licensed under the FSF's GNU Public License v2 or later. + * + * Based on simple lists (include/linux/list.h). + * + * This is a priority-sorted list of nodes; each node has a + * priority from INT_MIN (highest) to INT_MAX (lowest). + * + * Addition is O(K), removal is O(1), change of priority of a node is + * O(K) and K is the number of RT priority levels used in the system. + * (1 <= K <= 99) + * + * This list is really a list of lists: + * + * - The tier 1 list is the prio_list, different priority nodes. + * + * - The tier 2 list is the node_list, serialized nodes. + * + * Simple ASCII art explanation: + * + * pl:prio_list (only for plist_node) + * nl:node_list + * HEAD| NODE(S) + * | + * ||------------------------------------| + * ||->|pl|<->|pl|<--------------->|pl|<-| + * | |10| |21| |21| |21| |40| (prio) + * | | | | | | | | | | | + * | | | | | | | | | | | + * |->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<-| + * |-------------------------------------------| + * + * The nodes on the prio_list list are sorted by priority to simplify + * the insertion of new nodes. There are no nodes with duplicate + * priorites on the list. + * + * The nodes on the node_list are ordered by priority and can contain + * entries which have the same priority. Those entries are ordered + * FIFO + * + * Addition means: look for the prio_list node in the prio_list + * for the priority of the node and insert it before the node_list + * entry of the next prio_list node. If it is the first node of + * that priority, add it to the prio_list in the right position and + * insert it into the serialized node_list list + * + * Removal means remove it from the node_list and remove it from + * the prio_list if the node_list list_head is non empty. In case + * of removal from the prio_list it must be checked whether other + * entries of the same priority are on the list or not. If there + * is another entry of the same priority then this entry has to + * replace the removed entry on the prio_list. If the entry which + * is removed is the only entry of this priority then a simple + * remove from both list is sufficient. + * + * INT_MIN is the highest priority, 0 is the medium highest, INT_MAX + * is lowest priority. + * + * No locking is done, up to the caller. + * + */ +#ifndef _LINUX_PLIST_H_ +#define _LINUX_PLIST_H_ + +#include +#include + +struct plist_head { + struct list_head node_list; +}; + +struct plist_node { + int prio; + struct list_head prio_list; + struct list_head node_list; +}; + +/** + * PLIST_HEAD_INIT - static struct plist_head initializer + * @head: struct plist_head variable name + */ +#define PLIST_HEAD_INIT(head) \ +{ \ + .node_list = LIST_HEAD_INIT((head).node_list) \ +} + +/** + * PLIST_HEAD - declare and init plist_head + * @head: name for struct plist_head variable + */ +#define PLIST_HEAD(head) \ + struct plist_head head = PLIST_HEAD_INIT(head) + +/** + * PLIST_NODE_INIT - static struct plist_node initializer + * @node: struct plist_node variable name + * @__prio: initial node priority + */ +#define PLIST_NODE_INIT(node, __prio) \ +{ \ + .prio = (__prio), \ + .prio_list = LIST_HEAD_INIT((node).prio_list), \ + .node_list = LIST_HEAD_INIT((node).node_list), \ +} + +/** + * plist_head_init - dynamic struct plist_head initializer + * @head: &struct plist_head pointer + */ +static inline void +plist_head_init(struct plist_head *head) +{ + INIT_LIST_HEAD(&head->node_list); +} + +/** + * plist_node_init - Dynamic struct plist_node initializer + * @node: &struct plist_node pointer + * @prio: initial node priority + */ +static inline void plist_node_init(struct plist_node *node, int prio) +{ + node->prio = prio; + INIT_LIST_HEAD(&node->prio_list); + INIT_LIST_HEAD(&node->node_list); +} + +extern void plist_add(struct plist_node *node, struct plist_head *head); +extern void plist_del(struct plist_node *node, struct plist_head *head); + +extern void plist_requeue(struct plist_node *node, struct plist_head *head); + +/** + * plist_for_each - iterate over the plist + * @pos: the type * to use as a loop counter + * @head: the head for your list + */ +#define plist_for_each(pos, head) \ + list_for_each_entry(pos, &(head)->node_list, node_list) + +/** + * plist_for_each_continue - continue iteration over the plist + * @pos: the type * to use as a loop cursor + * @head: the head for your list + * + * Continue to iterate over plist, continuing after the current position. + */ +#define plist_for_each_continue(pos, head) \ + list_for_each_entry_continue(pos, &(head)->node_list, node_list) + +/** + * plist_for_each_safe - iterate safely over a plist of given type + * @pos: the type * to use as a loop counter + * @n: another type * to use as temporary storage + * @head: the head for your list + * + * Iterate over a plist of given type, safe against removal of list entry. + */ +#define plist_for_each_safe(pos, n, head) \ + list_for_each_entry_safe(pos, n, &(head)->node_list, node_list) + +/** + * plist_for_each_entry - iterate over list of given type + * @pos: the type * to use as a loop counter + * @head: the head for your list + * @mem: the name of the list_head within the struct + */ +#define plist_for_each_entry(pos, head, mem) \ + list_for_each_entry(pos, &(head)->node_list, mem.node_list) + +/** + * plist_for_each_entry_continue - continue iteration over list of given type + * @pos: the type * to use as a loop cursor + * @head: the head for your list + * @m: the name of the list_head within the struct + * + * Continue to iterate over list of given type, continuing after + * the current position. + */ +#define plist_for_each_entry_continue(pos, head, m) \ + list_for_each_entry_continue(pos, &(head)->node_list, m.node_list) + +/** + * plist_for_each_entry_safe - iterate safely over list of given type + * @pos: the type * to use as a loop counter + * @n: another type * to use as temporary storage + * @head: the head for your list + * @m: the name of the list_head within the struct + * + * Iterate over list of given type, safe against removal of list entry. + */ +#define plist_for_each_entry_safe(pos, n, head, m) \ + list_for_each_entry_safe(pos, n, &(head)->node_list, m.node_list) + +/** + * plist_head_empty - return !0 if a plist_head is empty + * @head: &struct plist_head pointer + */ +static inline int plist_head_empty(const struct plist_head *head) +{ + return list_empty(&head->node_list); +} + +/** + * plist_node_empty - return !0 if plist_node is not on a list + * @node: &struct plist_node pointer + */ +static inline int plist_node_empty(const struct plist_node *node) +{ + return list_empty(&node->node_list); +} + +/* All functions below assume the plist_head is not empty. */ + +/** + * plist_first_entry - get the struct for the first entry + * @head: the &struct plist_head pointer + * @type: the type of the struct this is embedded in + * @member: the name of the list_head within the struct + */ +#ifdef CONFIG_DEBUG_PI_LIST +# define plist_first_entry(head, type, member) \ +({ \ + WARN_ON(plist_head_empty(head)); \ + container_of(plist_first(head), type, member); \ +}) +#else +# define plist_first_entry(head, type, member) \ + container_of(plist_first(head), type, member) +#endif + +/** + * plist_last_entry - get the struct for the last entry + * @head: the &struct plist_head pointer + * @type: the type of the struct this is embedded in + * @member: the name of the list_head within the struct + */ +#ifdef CONFIG_DEBUG_PI_LIST +# define plist_last_entry(head, type, member) \ +({ \ + WARN_ON(plist_head_empty(head)); \ + container_of(plist_last(head), type, member); \ +}) +#else +# define plist_last_entry(head, type, member) \ + container_of(plist_last(head), type, member) +#endif + +/** + * plist_next - get the next entry in list + * @pos: the type * to cursor + */ +#define plist_next(pos) \ + list_next_entry(pos, node_list) + +/** + * plist_prev - get the prev entry in list + * @pos: the type * to cursor + */ +#define plist_prev(pos) \ + list_prev_entry(pos, node_list) + +/** + * plist_first - return the first node (and thus, highest priority) + * @head: the &struct plist_head pointer + * + * Assumes the plist is _not_ empty. + */ +static inline struct plist_node *plist_first(const struct plist_head *head) +{ + return list_entry(head->node_list.next, + struct plist_node, node_list); +} + +/** + * plist_last - return the last node (and thus, lowest priority) + * @head: the &struct plist_head pointer + * + * Assumes the plist is _not_ empty. + */ +static inline struct plist_node *plist_last(const struct plist_head *head) +{ + return list_entry(head->node_list.prev, + struct plist_node, node_list); +} + +#endif diff --git a/include/linux/pm-trace.h b/include/linux/pm-trace.h new file mode 100644 index 000000000..b8604f884 --- /dev/null +++ b/include/linux/pm-trace.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef PM_TRACE_H +#define PM_TRACE_H + +#include +#ifdef CONFIG_PM_TRACE +#include + +extern int pm_trace_enabled; +extern bool pm_trace_rtc_abused; + +static inline bool pm_trace_rtc_valid(void) +{ + return !pm_trace_rtc_abused; +} + +static inline int pm_trace_is_enabled(void) +{ + return pm_trace_enabled; +} + +struct device; +extern void set_trace_device(struct device *); +extern void generate_pm_trace(const void *tracedata, unsigned int user); +extern int show_trace_dev_match(char *buf, size_t size); + +#define TRACE_DEVICE(dev) do { \ + if (pm_trace_enabled) \ + set_trace_device(dev); \ + } while(0) + +#else + +static inline bool pm_trace_rtc_valid(void) { return true; } +static inline int pm_trace_is_enabled(void) { return 0; } + +#define TRACE_DEVICE(dev) do { } while (0) +#define TRACE_RESUME(dev) do { } while (0) +#define TRACE_SUSPEND(dev) do { } while (0) + +#endif + +#endif diff --git a/include/linux/pm.h b/include/linux/pm.h new file mode 100644 index 000000000..e723b78d8 --- /dev/null +++ b/include/linux/pm.h @@ -0,0 +1,830 @@ +/* + * pm.h - Power management interface + * + * Copyright (C) 2000 Andrew Henroid + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _LINUX_PM_H +#define _LINUX_PM_H + +#include +#include +#include +#include +#include +#include + +/* + * Callbacks for platform drivers to implement. + */ +extern void (*pm_power_off)(void); +extern void (*pm_power_off_prepare)(void); + +struct device; /* we have a circular dep with device.h */ +#ifdef CONFIG_VT_CONSOLE_SLEEP +extern void pm_vt_switch_required(struct device *dev, bool required); +extern void pm_vt_switch_unregister(struct device *dev); +#else +static inline void pm_vt_switch_required(struct device *dev, bool required) +{ +} +static inline void pm_vt_switch_unregister(struct device *dev) +{ +} +#endif /* CONFIG_VT_CONSOLE_SLEEP */ + +/* + * Device power management + */ + +struct device; + +#ifdef CONFIG_PM +extern const char power_group_name[]; /* = "power" */ +#else +#define power_group_name NULL +#endif + +typedef struct pm_message { + int event; +} pm_message_t; + +/** + * struct dev_pm_ops - device PM callbacks. + * + * @prepare: The principal role of this callback is to prevent new children of + * the device from being registered after it has returned (the driver's + * subsystem and generally the rest of the kernel is supposed to prevent + * new calls to the probe method from being made too once @prepare() has + * succeeded). If @prepare() detects a situation it cannot handle (e.g. + * registration of a child already in progress), it may return -EAGAIN, so + * that the PM core can execute it once again (e.g. after a new child has + * been registered) to recover from the race condition. + * This method is executed for all kinds of suspend transitions and is + * followed by one of the suspend callbacks: @suspend(), @freeze(), or + * @poweroff(). If the transition is a suspend to memory or standby (that + * is, not related to hibernation), the return value of @prepare() may be + * used to indicate to the PM core to leave the device in runtime suspend + * if applicable. Namely, if @prepare() returns a positive number, the PM + * core will understand that as a declaration that the device appears to be + * runtime-suspended and it may be left in that state during the entire + * transition and during the subsequent resume if all of its descendants + * are left in runtime suspend too. If that happens, @complete() will be + * executed directly after @prepare() and it must ensure the proper + * functioning of the device after the system resume. + * The PM core executes subsystem-level @prepare() for all devices before + * starting to invoke suspend callbacks for any of them, so generally + * devices may be assumed to be functional or to respond to runtime resume + * requests while @prepare() is being executed. However, device drivers + * may NOT assume anything about the availability of user space at that + * time and it is NOT valid to request firmware from within @prepare() + * (it's too late to do that). It also is NOT valid to allocate + * substantial amounts of memory from @prepare() in the GFP_KERNEL mode. + * [To work around these limitations, drivers may register suspend and + * hibernation notifiers to be executed before the freezing of tasks.] + * + * @complete: Undo the changes made by @prepare(). This method is executed for + * all kinds of resume transitions, following one of the resume callbacks: + * @resume(), @thaw(), @restore(). Also called if the state transition + * fails before the driver's suspend callback: @suspend(), @freeze() or + * @poweroff(), can be executed (e.g. if the suspend callback fails for one + * of the other devices that the PM core has unsuccessfully attempted to + * suspend earlier). + * The PM core executes subsystem-level @complete() after it has executed + * the appropriate resume callbacks for all devices. If the corresponding + * @prepare() at the beginning of the suspend transition returned a + * positive number and the device was left in runtime suspend (without + * executing any suspend and resume callbacks for it), @complete() will be + * the only callback executed for the device during resume. In that case, + * @complete() must be prepared to do whatever is necessary to ensure the + * proper functioning of the device after the system resume. To this end, + * @complete() can check the power.direct_complete flag of the device to + * learn whether (unset) or not (set) the previous suspend and resume + * callbacks have been executed for it. + * + * @suspend: Executed before putting the system into a sleep state in which the + * contents of main memory are preserved. The exact action to perform + * depends on the device's subsystem (PM domain, device type, class or bus + * type), but generally the device must be quiescent after subsystem-level + * @suspend() has returned, so that it doesn't do any I/O or DMA. + * Subsystem-level @suspend() is executed for all devices after invoking + * subsystem-level @prepare() for all of them. + * + * @suspend_late: Continue operations started by @suspend(). For a number of + * devices @suspend_late() may point to the same callback routine as the + * runtime suspend callback. + * + * @resume: Executed after waking the system up from a sleep state in which the + * contents of main memory were preserved. The exact action to perform + * depends on the device's subsystem, but generally the driver is expected + * to start working again, responding to hardware events and software + * requests (the device itself may be left in a low-power state, waiting + * for a runtime resume to occur). The state of the device at the time its + * driver's @resume() callback is run depends on the platform and subsystem + * the device belongs to. On most platforms, there are no restrictions on + * availability of resources like clocks during @resume(). + * Subsystem-level @resume() is executed for all devices after invoking + * subsystem-level @resume_noirq() for all of them. + * + * @resume_early: Prepare to execute @resume(). For a number of devices + * @resume_early() may point to the same callback routine as the runtime + * resume callback. + * + * @freeze: Hibernation-specific, executed before creating a hibernation image. + * Analogous to @suspend(), but it should not enable the device to signal + * wakeup events or change its power state. The majority of subsystems + * (with the notable exception of the PCI bus type) expect the driver-level + * @freeze() to save the device settings in memory to be used by @restore() + * during the subsequent resume from hibernation. + * Subsystem-level @freeze() is executed for all devices after invoking + * subsystem-level @prepare() for all of them. + * + * @freeze_late: Continue operations started by @freeze(). Analogous to + * @suspend_late(), but it should not enable the device to signal wakeup + * events or change its power state. + * + * @thaw: Hibernation-specific, executed after creating a hibernation image OR + * if the creation of an image has failed. Also executed after a failing + * attempt to restore the contents of main memory from such an image. + * Undo the changes made by the preceding @freeze(), so the device can be + * operated in the same way as immediately before the call to @freeze(). + * Subsystem-level @thaw() is executed for all devices after invoking + * subsystem-level @thaw_noirq() for all of them. It also may be executed + * directly after @freeze() in case of a transition error. + * + * @thaw_early: Prepare to execute @thaw(). Undo the changes made by the + * preceding @freeze_late(). + * + * @poweroff: Hibernation-specific, executed after saving a hibernation image. + * Analogous to @suspend(), but it need not save the device's settings in + * memory. + * Subsystem-level @poweroff() is executed for all devices after invoking + * subsystem-level @prepare() for all of them. + * + * @poweroff_late: Continue operations started by @poweroff(). Analogous to + * @suspend_late(), but it need not save the device's settings in memory. + * + * @restore: Hibernation-specific, executed after restoring the contents of main + * memory from a hibernation image, analogous to @resume(). + * + * @restore_early: Prepare to execute @restore(), analogous to @resume_early(). + * + * @suspend_noirq: Complete the actions started by @suspend(). Carry out any + * additional operations required for suspending the device that might be + * racing with its driver's interrupt handler, which is guaranteed not to + * run while @suspend_noirq() is being executed. + * It generally is expected that the device will be in a low-power state + * (appropriate for the target system sleep state) after subsystem-level + * @suspend_noirq() has returned successfully. If the device can generate + * system wakeup signals and is enabled to wake up the system, it should be + * configured to do so at that time. However, depending on the platform + * and device's subsystem, @suspend() or @suspend_late() may be allowed to + * put the device into the low-power state and configure it to generate + * wakeup signals, in which case it generally is not necessary to define + * @suspend_noirq(). + * + * @resume_noirq: Prepare for the execution of @resume() by carrying out any + * operations required for resuming the device that might be racing with + * its driver's interrupt handler, which is guaranteed not to run while + * @resume_noirq() is being executed. + * + * @freeze_noirq: Complete the actions started by @freeze(). Carry out any + * additional operations required for freezing the device that might be + * racing with its driver's interrupt handler, which is guaranteed not to + * run while @freeze_noirq() is being executed. + * The power state of the device should not be changed by either @freeze(), + * or @freeze_late(), or @freeze_noirq() and it should not be configured to + * signal system wakeup by any of these callbacks. + * + * @thaw_noirq: Prepare for the execution of @thaw() by carrying out any + * operations required for thawing the device that might be racing with its + * driver's interrupt handler, which is guaranteed not to run while + * @thaw_noirq() is being executed. + * + * @poweroff_noirq: Complete the actions started by @poweroff(). Analogous to + * @suspend_noirq(), but it need not save the device's settings in memory. + * + * @restore_noirq: Prepare for the execution of @restore() by carrying out any + * operations required for thawing the device that might be racing with its + * driver's interrupt handler, which is guaranteed not to run while + * @restore_noirq() is being executed. Analogous to @resume_noirq(). + * + * @runtime_suspend: Prepare the device for a condition in which it won't be + * able to communicate with the CPU(s) and RAM due to power management. + * This need not mean that the device should be put into a low-power state. + * For example, if the device is behind a link which is about to be turned + * off, the device may remain at full power. If the device does go to low + * power and is capable of generating runtime wakeup events, remote wakeup + * (i.e., a hardware mechanism allowing the device to request a change of + * its power state via an interrupt) should be enabled for it. + * + * @runtime_resume: Put the device into the fully active state in response to a + * wakeup event generated by hardware or at the request of software. If + * necessary, put the device into the full-power state and restore its + * registers, so that it is fully operational. + * + * @runtime_idle: Device appears to be inactive and it might be put into a + * low-power state if all of the necessary conditions are satisfied. + * Check these conditions, and return 0 if it's appropriate to let the PM + * core queue a suspend request for the device. + * + * Several device power state transitions are externally visible, affecting + * the state of pending I/O queues and (for drivers that touch hardware) + * interrupts, wakeups, DMA, and other hardware state. There may also be + * internal transitions to various low-power modes which are transparent + * to the rest of the driver stack (such as a driver that's ON gating off + * clocks which are not in active use). + * + * The externally visible transitions are handled with the help of callbacks + * included in this structure in such a way that, typically, two levels of + * callbacks are involved. First, the PM core executes callbacks provided by PM + * domains, device types, classes and bus types. They are the subsystem-level + * callbacks expected to execute callbacks provided by device drivers, although + * they may choose not to do that. If the driver callbacks are executed, they + * have to collaborate with the subsystem-level callbacks to achieve the goals + * appropriate for the given system transition, given transition phase and the + * subsystem the device belongs to. + * + * All of the above callbacks, except for @complete(), return error codes. + * However, the error codes returned by @resume(), @thaw(), @restore(), + * @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do not cause the PM + * core to abort the resume transition during which they are returned. The + * error codes returned in those cases are only printed to the system logs for + * debugging purposes. Still, it is recommended that drivers only return error + * codes from their resume methods in case of an unrecoverable failure (i.e. + * when the device being handled refuses to resume and becomes unusable) to + * allow the PM core to be modified in the future, so that it can avoid + * attempting to handle devices that failed to resume and their children. + * + * It is allowed to unregister devices while the above callbacks are being + * executed. However, a callback routine MUST NOT try to unregister the device + * it was called for, although it may unregister children of that device (for + * example, if it detects that a child was unplugged while the system was + * asleep). + * + * There also are callbacks related to runtime power management of devices. + * Again, as a rule these callbacks are executed by the PM core for subsystems + * (PM domains, device types, classes and bus types) and the subsystem-level + * callbacks are expected to invoke the driver callbacks. Moreover, the exact + * actions to be performed by a device driver's callbacks generally depend on + * the platform and subsystem the device belongs to. + * + * Refer to Documentation/power/runtime_pm.txt for more information about the + * role of the @runtime_suspend(), @runtime_resume() and @runtime_idle() + * callbacks in device runtime power management. + */ +struct dev_pm_ops { + int (*prepare)(struct device *dev); + void (*complete)(struct device *dev); + int (*suspend)(struct device *dev); + int (*resume)(struct device *dev); + int (*freeze)(struct device *dev); + int (*thaw)(struct device *dev); + int (*poweroff)(struct device *dev); + int (*restore)(struct device *dev); + int (*suspend_late)(struct device *dev); + int (*resume_early)(struct device *dev); + int (*freeze_late)(struct device *dev); + int (*thaw_early)(struct device *dev); + int (*poweroff_late)(struct device *dev); + int (*restore_early)(struct device *dev); + int (*suspend_noirq)(struct device *dev); + int (*resume_noirq)(struct device *dev); + int (*freeze_noirq)(struct device *dev); + int (*thaw_noirq)(struct device *dev); + int (*poweroff_noirq)(struct device *dev); + int (*restore_noirq)(struct device *dev); + int (*runtime_suspend)(struct device *dev); + int (*runtime_resume)(struct device *dev); + int (*runtime_idle)(struct device *dev); +}; + +#ifdef CONFIG_PM_SLEEP +#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ + .suspend = suspend_fn, \ + .resume = resume_fn, \ + .freeze = suspend_fn, \ + .thaw = resume_fn, \ + .poweroff = suspend_fn, \ + .restore = resume_fn, +#else +#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) +#endif + +#ifdef CONFIG_PM_SLEEP +#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ + .suspend_late = suspend_fn, \ + .resume_early = resume_fn, \ + .freeze_late = suspend_fn, \ + .thaw_early = resume_fn, \ + .poweroff_late = suspend_fn, \ + .restore_early = resume_fn, +#else +#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) +#endif + +#ifdef CONFIG_PM_SLEEP +#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ + .suspend_noirq = suspend_fn, \ + .resume_noirq = resume_fn, \ + .freeze_noirq = suspend_fn, \ + .thaw_noirq = resume_fn, \ + .poweroff_noirq = suspend_fn, \ + .restore_noirq = resume_fn, +#else +#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) +#endif + +#ifdef CONFIG_PM +#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ + .runtime_suspend = suspend_fn, \ + .runtime_resume = resume_fn, \ + .runtime_idle = idle_fn, +#else +#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) +#endif + +/* + * Use this if you want to use the same suspend and resume callbacks for suspend + * to RAM and hibernation. + */ +#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \ +const struct dev_pm_ops name = { \ + SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ +} + +/* + * Use this for defining a set of PM operations to be used in all situations + * (system suspend, hibernation or runtime PM). + * NOTE: In general, system suspend callbacks, .suspend() and .resume(), should + * be different from the corresponding runtime PM callbacks, .runtime_suspend(), + * and .runtime_resume(), because .runtime_suspend() always works on an already + * quiescent device, while .suspend() should assume that the device may be doing + * something when it is called (it should ensure that the device will be + * quiescent after it has returned). Therefore it's better to point the "late" + * suspend and "early" resume callback pointers, .suspend_late() and + * .resume_early(), to the same routines as .runtime_suspend() and + * .runtime_resume(), respectively (and analogously for hibernation). + */ +#define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \ +const struct dev_pm_ops name = { \ + SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ + SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ +} + +/* + * PM_EVENT_ messages + * + * The following PM_EVENT_ messages are defined for the internal use of the PM + * core, in order to provide a mechanism allowing the high level suspend and + * hibernation code to convey the necessary information to the device PM core + * code: + * + * ON No transition. + * + * FREEZE System is going to hibernate, call ->prepare() and ->freeze() + * for all devices. + * + * SUSPEND System is going to suspend, call ->prepare() and ->suspend() + * for all devices. + * + * HIBERNATE Hibernation image has been saved, call ->prepare() and + * ->poweroff() for all devices. + * + * QUIESCE Contents of main memory are going to be restored from a (loaded) + * hibernation image, call ->prepare() and ->freeze() for all + * devices. + * + * RESUME System is resuming, call ->resume() and ->complete() for all + * devices. + * + * THAW Hibernation image has been created, call ->thaw() and + * ->complete() for all devices. + * + * RESTORE Contents of main memory have been restored from a hibernation + * image, call ->restore() and ->complete() for all devices. + * + * RECOVER Creation of a hibernation image or restoration of the main + * memory contents from a hibernation image has failed, call + * ->thaw() and ->complete() for all devices. + * + * The following PM_EVENT_ messages are defined for internal use by + * kernel subsystems. They are never issued by the PM core. + * + * USER_SUSPEND Manual selective suspend was issued by userspace. + * + * USER_RESUME Manual selective resume was issued by userspace. + * + * REMOTE_WAKEUP Remote-wakeup request was received from the device. + * + * AUTO_SUSPEND Automatic (device idle) runtime suspend was + * initiated by the subsystem. + * + * AUTO_RESUME Automatic (device needed) runtime resume was + * requested by a driver. + */ + +#define PM_EVENT_INVALID (-1) +#define PM_EVENT_ON 0x0000 +#define PM_EVENT_FREEZE 0x0001 +#define PM_EVENT_SUSPEND 0x0002 +#define PM_EVENT_HIBERNATE 0x0004 +#define PM_EVENT_QUIESCE 0x0008 +#define PM_EVENT_RESUME 0x0010 +#define PM_EVENT_THAW 0x0020 +#define PM_EVENT_RESTORE 0x0040 +#define PM_EVENT_RECOVER 0x0080 +#define PM_EVENT_USER 0x0100 +#define PM_EVENT_REMOTE 0x0200 +#define PM_EVENT_AUTO 0x0400 + +#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE) +#define PM_EVENT_USER_SUSPEND (PM_EVENT_USER | PM_EVENT_SUSPEND) +#define PM_EVENT_USER_RESUME (PM_EVENT_USER | PM_EVENT_RESUME) +#define PM_EVENT_REMOTE_RESUME (PM_EVENT_REMOTE | PM_EVENT_RESUME) +#define PM_EVENT_AUTO_SUSPEND (PM_EVENT_AUTO | PM_EVENT_SUSPEND) +#define PM_EVENT_AUTO_RESUME (PM_EVENT_AUTO | PM_EVENT_RESUME) + +#define PMSG_INVALID ((struct pm_message){ .event = PM_EVENT_INVALID, }) +#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, }) +#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, }) +#define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, }) +#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, }) +#define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, }) +#define PMSG_RESUME ((struct pm_message){ .event = PM_EVENT_RESUME, }) +#define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, }) +#define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, }) +#define PMSG_RECOVER ((struct pm_message){ .event = PM_EVENT_RECOVER, }) +#define PMSG_USER_SUSPEND ((struct pm_message) \ + { .event = PM_EVENT_USER_SUSPEND, }) +#define PMSG_USER_RESUME ((struct pm_message) \ + { .event = PM_EVENT_USER_RESUME, }) +#define PMSG_REMOTE_RESUME ((struct pm_message) \ + { .event = PM_EVENT_REMOTE_RESUME, }) +#define PMSG_AUTO_SUSPEND ((struct pm_message) \ + { .event = PM_EVENT_AUTO_SUSPEND, }) +#define PMSG_AUTO_RESUME ((struct pm_message) \ + { .event = PM_EVENT_AUTO_RESUME, }) + +#define PMSG_IS_AUTO(msg) (((msg).event & PM_EVENT_AUTO) != 0) + +/* + * Device run-time power management status. + * + * These status labels are used internally by the PM core to indicate the + * current status of a device with respect to the PM core operations. They do + * not reflect the actual power state of the device or its status as seen by the + * driver. + * + * RPM_ACTIVE Device is fully operational. Indicates that the device + * bus type's ->runtime_resume() callback has completed + * successfully. + * + * RPM_SUSPENDED Device bus type's ->runtime_suspend() callback has + * completed successfully. The device is regarded as + * suspended. + * + * RPM_RESUMING Device bus type's ->runtime_resume() callback is being + * executed. + * + * RPM_SUSPENDING Device bus type's ->runtime_suspend() callback is being + * executed. + */ + +enum rpm_status { + RPM_ACTIVE = 0, + RPM_RESUMING, + RPM_SUSPENDED, + RPM_SUSPENDING, +}; + +/* + * Device run-time power management request types. + * + * RPM_REQ_NONE Do nothing. + * + * RPM_REQ_IDLE Run the device bus type's ->runtime_idle() callback + * + * RPM_REQ_SUSPEND Run the device bus type's ->runtime_suspend() callback + * + * RPM_REQ_AUTOSUSPEND Same as RPM_REQ_SUSPEND, but not until the device has + * been inactive for as long as power.autosuspend_delay + * + * RPM_REQ_RESUME Run the device bus type's ->runtime_resume() callback + */ + +enum rpm_request { + RPM_REQ_NONE = 0, + RPM_REQ_IDLE, + RPM_REQ_SUSPEND, + RPM_REQ_AUTOSUSPEND, + RPM_REQ_RESUME, +}; + +struct wakeup_source; +struct wake_irq; +struct pm_domain_data; + +struct pm_subsys_data { + spinlock_t lock; + unsigned int refcount; +#ifdef CONFIG_PM_CLK + struct list_head clock_list; +#endif +#ifdef CONFIG_PM_GENERIC_DOMAINS + struct pm_domain_data *domain_data; +#endif +}; + +/* + * Driver flags to control system suspend/resume behavior. + * + * These flags can be set by device drivers at the probe time. They need not be + * cleared by the drivers as the driver core will take care of that. + * + * NEVER_SKIP: Do not skip all system suspend/resume callbacks for the device. + * SMART_PREPARE: Check the return value of the driver's ->prepare callback. + * SMART_SUSPEND: No need to resume the device from runtime suspend. + * LEAVE_SUSPENDED: Avoid resuming the device during system resume if possible. + * + * Setting SMART_PREPARE instructs bus types and PM domains which may want + * system suspend/resume callbacks to be skipped for the device to return 0 from + * their ->prepare callbacks if the driver's ->prepare callback returns 0 (in + * other words, the system suspend/resume callbacks can only be skipped for the + * device if its driver doesn't object against that). This flag has no effect + * if NEVER_SKIP is set. + * + * Setting SMART_SUSPEND instructs bus types and PM domains which may want to + * runtime resume the device upfront during system suspend that doing so is not + * necessary from the driver's perspective. It also may cause them to skip + * invocations of the ->suspend_late and ->suspend_noirq callbacks provided by + * the driver if they decide to leave the device in runtime suspend. + * + * Setting LEAVE_SUSPENDED informs the PM core and middle-layer code that the + * driver prefers the device to be left in suspend after system resume. + */ +#define DPM_FLAG_NEVER_SKIP BIT(0) +#define DPM_FLAG_SMART_PREPARE BIT(1) +#define DPM_FLAG_SMART_SUSPEND BIT(2) +#define DPM_FLAG_LEAVE_SUSPENDED BIT(3) + +struct dev_pm_info { + pm_message_t power_state; + unsigned int can_wakeup:1; + unsigned int async_suspend:1; + bool in_dpm_list:1; /* Owned by the PM core */ + bool is_prepared:1; /* Owned by the PM core */ + bool is_suspended:1; /* Ditto */ + bool is_noirq_suspended:1; + bool is_late_suspended:1; + bool early_init:1; /* Owned by the PM core */ + bool direct_complete:1; /* Owned by the PM core */ + u32 driver_flags; + spinlock_t lock; +#ifdef CONFIG_PM_SLEEP + struct list_head entry; + struct completion completion; + struct wakeup_source *wakeup; + bool wakeup_path:1; + bool syscore:1; + bool no_pm_callbacks:1; /* Owned by the PM core */ + unsigned int must_resume:1; /* Owned by the PM core */ + unsigned int may_skip_resume:1; /* Set by subsystems */ +#else + unsigned int should_wakeup:1; +#endif +#ifdef CONFIG_PM + struct timer_list suspend_timer; + unsigned long timer_expires; + struct work_struct work; + wait_queue_head_t wait_queue; + struct wake_irq *wakeirq; + atomic_t usage_count; + atomic_t child_count; + unsigned int disable_depth:3; + unsigned int idle_notification:1; + unsigned int request_pending:1; + unsigned int deferred_resume:1; + unsigned int runtime_auto:1; + bool ignore_children:1; + unsigned int no_callbacks:1; + unsigned int irq_safe:1; + unsigned int use_autosuspend:1; + unsigned int timer_autosuspends:1; + unsigned int memalloc_noio:1; + unsigned int links_count; + enum rpm_request request; + enum rpm_status runtime_status; + int runtime_error; + int autosuspend_delay; + unsigned long last_busy; + unsigned long active_jiffies; + unsigned long suspended_jiffies; + unsigned long accounting_timestamp; +#endif + struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ + void (*set_latency_tolerance)(struct device *, s32); + struct dev_pm_qos *qos; +}; + +extern void update_pm_runtime_accounting(struct device *dev); +extern int dev_pm_get_subsys_data(struct device *dev); +extern void dev_pm_put_subsys_data(struct device *dev); + +/** + * struct dev_pm_domain - power management domain representation. + * + * @ops: Power management operations associated with this domain. + * @detach: Called when removing a device from the domain. + * @activate: Called before executing probe routines for bus types and drivers. + * @sync: Called after successful driver probe. + * @dismiss: Called after unsuccessful driver probe and after driver removal. + * + * Power domains provide callbacks that are executed during system suspend, + * hibernation, system resume and during runtime PM transitions instead of + * subsystem-level and driver-level callbacks. + */ +struct dev_pm_domain { + struct dev_pm_ops ops; + void (*detach)(struct device *dev, bool power_off); + int (*activate)(struct device *dev); + void (*sync)(struct device *dev); + void (*dismiss)(struct device *dev); +}; + +/* + * The PM_EVENT_ messages are also used by drivers implementing the legacy + * suspend framework, based on the ->suspend() and ->resume() callbacks common + * for suspend and hibernation transitions, according to the rules below. + */ + +/* Necessary, because several drivers use PM_EVENT_PRETHAW */ +#define PM_EVENT_PRETHAW PM_EVENT_QUIESCE + +/* + * One transition is triggered by resume(), after a suspend() call; the + * message is implicit: + * + * ON Driver starts working again, responding to hardware events + * and software requests. The hardware may have gone through + * a power-off reset, or it may have maintained state from the + * previous suspend() which the driver will rely on while + * resuming. On most platforms, there are no restrictions on + * availability of resources like clocks during resume(). + * + * Other transitions are triggered by messages sent using suspend(). All + * these transitions quiesce the driver, so that I/O queues are inactive. + * That commonly entails turning off IRQs and DMA; there may be rules + * about how to quiesce that are specific to the bus or the device's type. + * (For example, network drivers mark the link state.) Other details may + * differ according to the message: + * + * SUSPEND Quiesce, enter a low power device state appropriate for + * the upcoming system state (such as PCI_D3hot), and enable + * wakeup events as appropriate. + * + * HIBERNATE Enter a low power device state appropriate for the hibernation + * state (eg. ACPI S4) and enable wakeup events as appropriate. + * + * FREEZE Quiesce operations so that a consistent image can be saved; + * but do NOT otherwise enter a low power device state, and do + * NOT emit system wakeup events. + * + * PRETHAW Quiesce as if for FREEZE; additionally, prepare for restoring + * the system from a snapshot taken after an earlier FREEZE. + * Some drivers will need to reset their hardware state instead + * of preserving it, to ensure that it's never mistaken for the + * state which that earlier snapshot had set up. + * + * A minimally power-aware driver treats all messages as SUSPEND, fully + * reinitializes its device during resume() -- whether or not it was reset + * during the suspend/resume cycle -- and can't issue wakeup events. + * + * More power-aware drivers may also use low power states at runtime as + * well as during system sleep states like PM_SUSPEND_STANDBY. They may + * be able to use wakeup events to exit from runtime low-power states, + * or from system low-power states such as standby or suspend-to-RAM. + */ + +#ifdef CONFIG_PM_SLEEP +extern void device_pm_lock(void); +extern void dpm_resume_start(pm_message_t state); +extern void dpm_resume_end(pm_message_t state); +extern void dpm_noirq_resume_devices(pm_message_t state); +extern void dpm_noirq_end(void); +extern void dpm_resume_noirq(pm_message_t state); +extern void dpm_resume_early(pm_message_t state); +extern void dpm_resume(pm_message_t state); +extern void dpm_complete(pm_message_t state); + +extern void device_pm_unlock(void); +extern int dpm_suspend_end(pm_message_t state); +extern int dpm_suspend_start(pm_message_t state); +extern void dpm_noirq_begin(void); +extern int dpm_noirq_suspend_devices(pm_message_t state); +extern int dpm_suspend_noirq(pm_message_t state); +extern int dpm_suspend_late(pm_message_t state); +extern int dpm_suspend(pm_message_t state); +extern int dpm_prepare(pm_message_t state); + +extern void __suspend_report_result(const char *function, void *fn, int ret); + +#define suspend_report_result(fn, ret) \ + do { \ + __suspend_report_result(__func__, fn, ret); \ + } while (0) + +extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); +extern void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *)); + +extern int pm_generic_prepare(struct device *dev); +extern int pm_generic_suspend_late(struct device *dev); +extern int pm_generic_suspend_noirq(struct device *dev); +extern int pm_generic_suspend(struct device *dev); +extern int pm_generic_resume_early(struct device *dev); +extern int pm_generic_resume_noirq(struct device *dev); +extern int pm_generic_resume(struct device *dev); +extern int pm_generic_freeze_noirq(struct device *dev); +extern int pm_generic_freeze_late(struct device *dev); +extern int pm_generic_freeze(struct device *dev); +extern int pm_generic_thaw_noirq(struct device *dev); +extern int pm_generic_thaw_early(struct device *dev); +extern int pm_generic_thaw(struct device *dev); +extern int pm_generic_restore_noirq(struct device *dev); +extern int pm_generic_restore_early(struct device *dev); +extern int pm_generic_restore(struct device *dev); +extern int pm_generic_poweroff_noirq(struct device *dev); +extern int pm_generic_poweroff_late(struct device *dev); +extern int pm_generic_poweroff(struct device *dev); +extern void pm_generic_complete(struct device *dev); + +extern void dev_pm_skip_next_resume_phases(struct device *dev); +extern bool dev_pm_may_skip_resume(struct device *dev); +extern bool dev_pm_smart_suspend_and_suspended(struct device *dev); + +#else /* !CONFIG_PM_SLEEP */ + +#define device_pm_lock() do {} while (0) +#define device_pm_unlock() do {} while (0) + +static inline int dpm_suspend_start(pm_message_t state) +{ + return 0; +} + +#define suspend_report_result(fn, ret) do {} while (0) + +static inline int device_pm_wait_for_dev(struct device *a, struct device *b) +{ + return 0; +} + +static inline void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *)) +{ +} + +#define pm_generic_prepare NULL +#define pm_generic_suspend_late NULL +#define pm_generic_suspend_noirq NULL +#define pm_generic_suspend NULL +#define pm_generic_resume_early NULL +#define pm_generic_resume_noirq NULL +#define pm_generic_resume NULL +#define pm_generic_freeze_noirq NULL +#define pm_generic_freeze_late NULL +#define pm_generic_freeze NULL +#define pm_generic_thaw_noirq NULL +#define pm_generic_thaw_early NULL +#define pm_generic_thaw NULL +#define pm_generic_restore_noirq NULL +#define pm_generic_restore_early NULL +#define pm_generic_restore NULL +#define pm_generic_poweroff_noirq NULL +#define pm_generic_poweroff_late NULL +#define pm_generic_poweroff NULL +#define pm_generic_complete NULL +#endif /* !CONFIG_PM_SLEEP */ + +/* How to reorder dpm_list after device_move() */ +enum dpm_order { + DPM_ORDER_NONE, + DPM_ORDER_DEV_AFTER_PARENT, + DPM_ORDER_PARENT_BEFORE_DEV, + DPM_ORDER_DEV_LAST, +}; + +#endif /* _LINUX_PM_H */ diff --git a/include/linux/pm2301_charger.h b/include/linux/pm2301_charger.h new file mode 100644 index 000000000..85c16defe --- /dev/null +++ b/include/linux/pm2301_charger.h @@ -0,0 +1,61 @@ +/* + * PM2301 charger driver. + * + * Copyright (C) 2012 ST Ericsson Corporation + * + * Contact: Olivier LAUNAY (olivier.launay@stericsson.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef __LINUX_PM2301_H +#define __LINUX_PM2301_H + +/** + * struct pm2xxx_bm_charger_parameters - Charger specific parameters + * @ac_volt_max: maximum allowed AC charger voltage in mV + * @ac_curr_max: maximum allowed AC charger current in mA + */ +struct pm2xxx_bm_charger_parameters { + int ac_volt_max; + int ac_curr_max; +}; + +/** + * struct pm2xxx_bm_data - pm2xxx battery management data + * @enable_overshoot flag to enable VBAT overshoot control + * @chg_params charger parameters + */ +struct pm2xxx_bm_data { + bool enable_overshoot; + const struct pm2xxx_bm_charger_parameters *chg_params; +}; + +struct pm2xxx_charger_platform_data { + char **supplied_to; + size_t num_supplicants; + int i2c_bus; + const char *label; + int gpio_irq_number; + unsigned int lpn_gpio; + int irq_type; +}; + +struct pm2xxx_platform_data { + struct pm2xxx_charger_platform_data *wall_charger; + struct pm2xxx_bm_data *battery; +}; + +#endif /* __LINUX_PM2301_H */ diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h new file mode 100644 index 000000000..09779b0ae --- /dev/null +++ b/include/linux/pm_clock.h @@ -0,0 +1,99 @@ +/* + * pm_clock.h - Definitions and headers related to device clocks. + * + * Copyright (C) 2011 Rafael J. Wysocki , Renesas Electronics Corp. + * + * This file is released under the GPLv2. + */ + +#ifndef _LINUX_PM_CLOCK_H +#define _LINUX_PM_CLOCK_H + +#include +#include + +struct pm_clk_notifier_block { + struct notifier_block nb; + struct dev_pm_domain *pm_domain; + char *con_ids[]; +}; + +struct clk; + +#ifdef CONFIG_PM +extern int pm_clk_runtime_suspend(struct device *dev); +extern int pm_clk_runtime_resume(struct device *dev); +#define USE_PM_CLK_RUNTIME_OPS \ + .runtime_suspend = pm_clk_runtime_suspend, \ + .runtime_resume = pm_clk_runtime_resume, +#else +#define USE_PM_CLK_RUNTIME_OPS +#endif + +#ifdef CONFIG_PM_CLK +static inline bool pm_clk_no_clocks(struct device *dev) +{ + return dev && dev->power.subsys_data + && list_empty(&dev->power.subsys_data->clock_list); +} + +extern void pm_clk_init(struct device *dev); +extern int pm_clk_create(struct device *dev); +extern void pm_clk_destroy(struct device *dev); +extern int pm_clk_add(struct device *dev, const char *con_id); +extern int pm_clk_add_clk(struct device *dev, struct clk *clk); +extern int of_pm_clk_add_clk(struct device *dev, const char *name); +extern int of_pm_clk_add_clks(struct device *dev); +extern void pm_clk_remove(struct device *dev, const char *con_id); +extern void pm_clk_remove_clk(struct device *dev, struct clk *clk); +extern int pm_clk_suspend(struct device *dev); +extern int pm_clk_resume(struct device *dev); +#else +static inline bool pm_clk_no_clocks(struct device *dev) +{ + return true; +} +static inline void pm_clk_init(struct device *dev) +{ +} +static inline int pm_clk_create(struct device *dev) +{ + return -EINVAL; +} +static inline void pm_clk_destroy(struct device *dev) +{ +} +static inline int pm_clk_add(struct device *dev, const char *con_id) +{ + return -EINVAL; +} + +static inline int pm_clk_add_clk(struct device *dev, struct clk *clk) +{ + return -EINVAL; +} +static inline int of_pm_clk_add_clks(struct device *dev) +{ + return -EINVAL; +} +static inline void pm_clk_remove(struct device *dev, const char *con_id) +{ +} +#define pm_clk_suspend NULL +#define pm_clk_resume NULL +static inline void pm_clk_remove_clk(struct device *dev, struct clk *clk) +{ +} +#endif + +#ifdef CONFIG_HAVE_CLK +extern void pm_clk_add_notifier(struct bus_type *bus, + struct pm_clk_notifier_block *clknb); +#else +static inline void pm_clk_add_notifier(struct bus_type *bus, + struct pm_clk_notifier_block *clknb) +{ +} +#endif + +#endif diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h new file mode 100644 index 000000000..776c546d5 --- /dev/null +++ b/include/linux/pm_domain.h @@ -0,0 +1,336 @@ +/* + * pm_domain.h - Definitions and headers related to device power domains. + * + * Copyright (C) 2011 Rafael J. Wysocki , Renesas Electronics Corp. + * + * This file is released under the GPLv2. + */ + +#ifndef _LINUX_PM_DOMAIN_H +#define _LINUX_PM_DOMAIN_H + +#include +#include +#include +#include +#include +#include +#include + +/* Defines used for the flags field in the struct generic_pm_domain */ +#define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */ +#define GENPD_FLAG_IRQ_SAFE (1U << 1) /* PM domain operates in atomic */ +#define GENPD_FLAG_ALWAYS_ON (1U << 2) /* PM domain is always powered on */ +#define GENPD_FLAG_ACTIVE_WAKEUP (1U << 3) /* Keep devices active if wakeup */ + +enum gpd_status { + GPD_STATE_ACTIVE = 0, /* PM domain is active */ + GPD_STATE_POWER_OFF, /* PM domain is off */ +}; + +struct dev_power_governor { + bool (*power_down_ok)(struct dev_pm_domain *domain); + bool (*suspend_ok)(struct device *dev); +}; + +struct gpd_dev_ops { + int (*start)(struct device *dev); + int (*stop)(struct device *dev); +}; + +struct genpd_power_state { + s64 power_off_latency_ns; + s64 power_on_latency_ns; + s64 residency_ns; + struct fwnode_handle *fwnode; + ktime_t idle_time; +}; + +struct genpd_lock_ops; +struct dev_pm_opp; + +struct generic_pm_domain { + struct device dev; + struct dev_pm_domain domain; /* PM domain operations */ + struct list_head gpd_list_node; /* Node in the global PM domains list */ + struct list_head master_links; /* Links with PM domain as a master */ + struct list_head slave_links; /* Links with PM domain as a slave */ + struct list_head dev_list; /* List of devices */ + struct dev_power_governor *gov; + struct work_struct power_off_work; + struct fwnode_handle *provider; /* Identity of the domain provider */ + bool has_provider; + const char *name; + atomic_t sd_count; /* Number of subdomains with power "on" */ + enum gpd_status status; /* Current state of the domain */ + unsigned int device_count; /* Number of devices */ + unsigned int suspended_count; /* System suspend device counter */ + unsigned int prepared_count; /* Suspend counter of prepared devices */ + unsigned int performance_state; /* Aggregated max performance state */ + int (*power_off)(struct generic_pm_domain *domain); + int (*power_on)(struct generic_pm_domain *domain); + unsigned int (*opp_to_performance_state)(struct generic_pm_domain *genpd, + struct dev_pm_opp *opp); + int (*set_performance_state)(struct generic_pm_domain *genpd, + unsigned int state); + struct gpd_dev_ops dev_ops; + s64 max_off_time_ns; /* Maximum allowed "suspended" time. */ + bool max_off_time_changed; + bool cached_power_down_ok; + int (*attach_dev)(struct generic_pm_domain *domain, + struct device *dev); + void (*detach_dev)(struct generic_pm_domain *domain, + struct device *dev); + unsigned int flags; /* Bit field of configs for genpd */ + struct genpd_power_state *states; + unsigned int state_count; /* number of states */ + unsigned int state_idx; /* state that genpd will go to when off */ + void *free; /* Free the state that was allocated for default */ + ktime_t on_time; + ktime_t accounting_time; + const struct genpd_lock_ops *lock_ops; + union { + struct mutex mlock; + struct { + spinlock_t slock; + unsigned long lock_flags; + }; + }; + +}; + +static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) +{ + return container_of(pd, struct generic_pm_domain, domain); +} + +struct gpd_link { + struct generic_pm_domain *master; + struct list_head master_node; + struct generic_pm_domain *slave; + struct list_head slave_node; +}; + +struct gpd_timing_data { + s64 suspend_latency_ns; + s64 resume_latency_ns; + s64 effective_constraint_ns; + bool constraint_changed; + bool cached_suspend_ok; +}; + +struct pm_domain_data { + struct list_head list_node; + struct device *dev; +}; + +struct generic_pm_domain_data { + struct pm_domain_data base; + struct gpd_timing_data td; + struct notifier_block nb; + unsigned int performance_state; + void *data; +}; + +#ifdef CONFIG_PM_GENERIC_DOMAINS +static inline struct generic_pm_domain_data *to_gpd_data(struct pm_domain_data *pdd) +{ + return container_of(pdd, struct generic_pm_domain_data, base); +} + +static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev) +{ + return to_gpd_data(dev->power.subsys_data->domain_data); +} + +int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev); +int pm_genpd_remove_device(struct device *dev); +int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, + struct generic_pm_domain *new_subdomain); +int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, + struct generic_pm_domain *target); +int pm_genpd_init(struct generic_pm_domain *genpd, + struct dev_power_governor *gov, bool is_off); +int pm_genpd_remove(struct generic_pm_domain *genpd); +int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state); + +extern struct dev_power_governor simple_qos_governor; +extern struct dev_power_governor pm_domain_always_on_gov; +#else + +static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev) +{ + return ERR_PTR(-ENOSYS); +} +static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, + struct device *dev) +{ + return -ENOSYS; +} +static inline int pm_genpd_remove_device(struct device *dev) +{ + return -ENOSYS; +} +static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, + struct generic_pm_domain *new_sd) +{ + return -ENOSYS; +} +static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, + struct generic_pm_domain *target) +{ + return -ENOSYS; +} +static inline int pm_genpd_init(struct generic_pm_domain *genpd, + struct dev_power_governor *gov, bool is_off) +{ + return -ENOSYS; +} +static inline int pm_genpd_remove(struct generic_pm_domain *genpd) +{ + return -ENOTSUPP; +} + +static inline int dev_pm_genpd_set_performance_state(struct device *dev, + unsigned int state) +{ + return -ENOTSUPP; +} + +#define simple_qos_governor (*(struct dev_power_governor *)(NULL)) +#define pm_domain_always_on_gov (*(struct dev_power_governor *)(NULL)) +#endif + +#ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP +void pm_genpd_syscore_poweroff(struct device *dev); +void pm_genpd_syscore_poweron(struct device *dev); +#else +static inline void pm_genpd_syscore_poweroff(struct device *dev) {} +static inline void pm_genpd_syscore_poweron(struct device *dev) {} +#endif + +/* OF PM domain providers */ +struct of_device_id; + +typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args, + void *data); + +struct genpd_onecell_data { + struct generic_pm_domain **domains; + unsigned int num_domains; + genpd_xlate_t xlate; +}; + +#ifdef CONFIG_PM_GENERIC_DOMAINS_OF +int of_genpd_add_provider_simple(struct device_node *np, + struct generic_pm_domain *genpd); +int of_genpd_add_provider_onecell(struct device_node *np, + struct genpd_onecell_data *data); +void of_genpd_del_provider(struct device_node *np); +int of_genpd_add_device(struct of_phandle_args *args, struct device *dev); +int of_genpd_add_subdomain(struct of_phandle_args *parent, + struct of_phandle_args *new_subdomain); +struct generic_pm_domain *of_genpd_remove_last(struct device_node *np); +int of_genpd_parse_idle_states(struct device_node *dn, + struct genpd_power_state **states, int *n); +unsigned int of_genpd_opp_to_performance_state(struct device *dev, + struct device_node *np); + +int genpd_dev_pm_attach(struct device *dev); +struct device *genpd_dev_pm_attach_by_id(struct device *dev, + unsigned int index); +struct device *genpd_dev_pm_attach_by_name(struct device *dev, + char *name); +#else /* !CONFIG_PM_GENERIC_DOMAINS_OF */ +static inline int of_genpd_add_provider_simple(struct device_node *np, + struct generic_pm_domain *genpd) +{ + return -ENOTSUPP; +} + +static inline int of_genpd_add_provider_onecell(struct device_node *np, + struct genpd_onecell_data *data) +{ + return -ENOTSUPP; +} + +static inline void of_genpd_del_provider(struct device_node *np) {} + +static inline int of_genpd_add_device(struct of_phandle_args *args, + struct device *dev) +{ + return -ENODEV; +} + +static inline int of_genpd_add_subdomain(struct of_phandle_args *parent, + struct of_phandle_args *new_subdomain) +{ + return -ENODEV; +} + +static inline int of_genpd_parse_idle_states(struct device_node *dn, + struct genpd_power_state **states, int *n) +{ + return -ENODEV; +} + +static inline unsigned int +of_genpd_opp_to_performance_state(struct device *dev, + struct device_node *np) +{ + return 0; +} + +static inline int genpd_dev_pm_attach(struct device *dev) +{ + return 0; +} + +static inline struct device *genpd_dev_pm_attach_by_id(struct device *dev, + unsigned int index) +{ + return NULL; +} + +static inline struct device *genpd_dev_pm_attach_by_name(struct device *dev, + char *name) +{ + return NULL; +} + +static inline +struct generic_pm_domain *of_genpd_remove_last(struct device_node *np) +{ + return ERR_PTR(-ENOTSUPP); +} +#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ + +#ifdef CONFIG_PM +int dev_pm_domain_attach(struct device *dev, bool power_on); +struct device *dev_pm_domain_attach_by_id(struct device *dev, + unsigned int index); +struct device *dev_pm_domain_attach_by_name(struct device *dev, + char *name); +void dev_pm_domain_detach(struct device *dev, bool power_off); +void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd); +#else +static inline int dev_pm_domain_attach(struct device *dev, bool power_on) +{ + return 0; +} +static inline struct device *dev_pm_domain_attach_by_id(struct device *dev, + unsigned int index) +{ + return NULL; +} +static inline struct device *dev_pm_domain_attach_by_name(struct device *dev, + char *name) +{ + return NULL; +} +static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {} +static inline void dev_pm_domain_set(struct device *dev, + struct dev_pm_domain *pd) {} +#endif + +#endif /* _LINUX_PM_DOMAIN_H */ diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h new file mode 100644 index 000000000..099b31960 --- /dev/null +++ b/include/linux/pm_opp.h @@ -0,0 +1,348 @@ +/* + * Generic OPP Interface + * + * Copyright (C) 2009-2010 Texas Instruments Incorporated. + * Nishanth Menon + * Romit Dasgupta + * Kevin Hilman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_OPP_H__ +#define __LINUX_OPP_H__ + +#include +#include + +struct clk; +struct regulator; +struct dev_pm_opp; +struct device; +struct opp_table; + +enum dev_pm_opp_event { + OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE, +}; + +/** + * struct dev_pm_opp_supply - Power supply voltage/current values + * @u_volt: Target voltage in microvolts corresponding to this OPP + * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP + * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP + * @u_amp: Maximum current drawn by the device in microamperes + * + * This structure stores the voltage/current values for a single power supply. + */ +struct dev_pm_opp_supply { + unsigned long u_volt; + unsigned long u_volt_min; + unsigned long u_volt_max; + unsigned long u_amp; +}; + +/** + * struct dev_pm_opp_info - OPP freq/voltage/current values + * @rate: Target clk rate in hz + * @supplies: Array of voltage/current values for all power supplies + * + * This structure stores the freq/voltage/current values for a single OPP. + */ +struct dev_pm_opp_info { + unsigned long rate; + struct dev_pm_opp_supply *supplies; +}; + +/** + * struct dev_pm_set_opp_data - Set OPP data + * @old_opp: Old OPP info + * @new_opp: New OPP info + * @regulators: Array of regulator pointers + * @regulator_count: Number of regulators + * @clk: Pointer to clk + * @dev: Pointer to the struct device + * + * This structure contains all information required for setting an OPP. + */ +struct dev_pm_set_opp_data { + struct dev_pm_opp_info old_opp; + struct dev_pm_opp_info new_opp; + + struct regulator **regulators; + unsigned int regulator_count; + struct clk *clk; + struct device *dev; +}; + +#if defined(CONFIG_PM_OPP) + +struct opp_table *dev_pm_opp_get_opp_table(struct device *dev); +void dev_pm_opp_put_opp_table(struct opp_table *opp_table); + +unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp); + +unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp); + +bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp); + +int dev_pm_opp_get_opp_count(struct device *dev); +unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev); +unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev); +unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev); +unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev); + +struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, + unsigned long freq, + bool available); + +struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, + unsigned long *freq); + +struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, + unsigned long *freq); +void dev_pm_opp_put(struct dev_pm_opp *opp); + +int dev_pm_opp_add(struct device *dev, unsigned long freq, + unsigned long u_volt); +void dev_pm_opp_remove(struct device *dev, unsigned long freq); + +int dev_pm_opp_enable(struct device *dev, unsigned long freq); + +int dev_pm_opp_disable(struct device *dev, unsigned long freq); + +int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb); +int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb); + +struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, unsigned int count); +void dev_pm_opp_put_supported_hw(struct opp_table *opp_table); +struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name); +void dev_pm_opp_put_prop_name(struct opp_table *opp_table); +struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count); +void dev_pm_opp_put_regulators(struct opp_table *opp_table); +struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name); +void dev_pm_opp_put_clkname(struct opp_table *opp_table); +struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); +void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table); +int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); +int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask); +int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); +void dev_pm_opp_remove_table(struct device *dev); +void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask); +#else +static inline struct opp_table *dev_pm_opp_get_opp_table(struct device *dev) +{ + return ERR_PTR(-ENOTSUPP); +} + +static inline void dev_pm_opp_put_opp_table(struct opp_table *opp_table) {} + +static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) +{ + return 0; +} + +static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) +{ + return 0; +} + +static inline bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) +{ + return false; +} + +static inline int dev_pm_opp_get_opp_count(struct device *dev) +{ + return 0; +} + +static inline unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) +{ + return 0; +} + +static inline unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) +{ + return 0; +} + +static inline unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev) +{ + return 0; +} + +static inline unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev) +{ + return 0; +} + +static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, + unsigned long freq, bool available) +{ + return ERR_PTR(-ENOTSUPP); +} + +static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, + unsigned long *freq) +{ + return ERR_PTR(-ENOTSUPP); +} + +static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, + unsigned long *freq) +{ + return ERR_PTR(-ENOTSUPP); +} + +static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {} + +static inline int dev_pm_opp_add(struct device *dev, unsigned long freq, + unsigned long u_volt) +{ + return -ENOTSUPP; +} + +static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq) +{ +} + +static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq) +{ + return 0; +} + +static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq) +{ + return 0; +} + +static inline int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb) +{ + return -ENOTSUPP; +} + +static inline int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb) +{ + return -ENOTSUPP; +} + +static inline struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev, + const u32 *versions, + unsigned int count) +{ + return ERR_PTR(-ENOTSUPP); +} + +static inline void dev_pm_opp_put_supported_hw(struct opp_table *opp_table) {} + +static inline struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, + int (*set_opp)(struct dev_pm_set_opp_data *data)) +{ + return ERR_PTR(-ENOTSUPP); +} + +static inline void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table) {} + +static inline struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name) +{ + return ERR_PTR(-ENOTSUPP); +} + +static inline void dev_pm_opp_put_prop_name(struct opp_table *opp_table) {} + +static inline struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count) +{ + return ERR_PTR(-ENOTSUPP); +} + +static inline void dev_pm_opp_put_regulators(struct opp_table *opp_table) {} + +static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name) +{ + return ERR_PTR(-ENOTSUPP); +} + +static inline void dev_pm_opp_put_clkname(struct opp_table *opp_table) {} + +static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) +{ + return -ENOTSUPP; +} + +static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask) +{ + return -ENOTSUPP; +} + +static inline int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) +{ + return -EINVAL; +} + +static inline void dev_pm_opp_remove_table(struct device *dev) +{ +} + +static inline void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask) +{ +} + +#endif /* CONFIG_PM_OPP */ + +#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) +int dev_pm_opp_of_add_table(struct device *dev); +int dev_pm_opp_of_add_table_indexed(struct device *dev, int index); +void dev_pm_opp_of_remove_table(struct device *dev); +int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask); +void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask); +int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); +struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev); +struct dev_pm_opp *of_dev_pm_opp_find_required_opp(struct device *dev, struct device_node *np); +struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp); +#else +static inline int dev_pm_opp_of_add_table(struct device *dev) +{ + return -ENOTSUPP; +} + +static inline int dev_pm_opp_of_add_table_indexed(struct device *dev, int index) +{ + return -ENOTSUPP; +} + +static inline void dev_pm_opp_of_remove_table(struct device *dev) +{ +} + +static inline int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask) +{ + return -ENOTSUPP; +} + +static inline void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask) +{ +} + +static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) +{ + return -ENOTSUPP; +} + +static inline struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev) +{ + return NULL; +} + +static inline struct dev_pm_opp *of_dev_pm_opp_find_required_opp(struct device *dev, struct device_node *np) +{ + return NULL; +} +static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp) +{ + return NULL; +} +#endif + +#endif /* __LINUX_OPP_H__ */ diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h new file mode 100644 index 000000000..6ea1ae373 --- /dev/null +++ b/include/linux/pm_qos.h @@ -0,0 +1,250 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PM_QOS_H +#define _LINUX_PM_QOS_H +/* interface for the pm_qos_power infrastructure of the linux kernel. + * + * Mark Gross + */ +#include +#include +#include +#include + +enum { + PM_QOS_RESERVED = 0, + PM_QOS_CPU_DMA_LATENCY, + PM_QOS_NETWORK_LATENCY, + PM_QOS_NETWORK_THROUGHPUT, + PM_QOS_MEMORY_BANDWIDTH, + + /* insert new class ID */ + PM_QOS_NUM_CLASSES, +}; + +enum pm_qos_flags_status { + PM_QOS_FLAGS_UNDEFINED = -1, + PM_QOS_FLAGS_NONE, + PM_QOS_FLAGS_SOME, + PM_QOS_FLAGS_ALL, +}; + +#define PM_QOS_DEFAULT_VALUE (-1) +#define PM_QOS_LATENCY_ANY S32_MAX +#define PM_QOS_LATENCY_ANY_NS ((s64)PM_QOS_LATENCY_ANY * NSEC_PER_USEC) + +#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) +#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) +#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0 +#define PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE 0 +#define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE PM_QOS_LATENCY_ANY +#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY +#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS PM_QOS_LATENCY_ANY_NS +#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0 +#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1) + +#define PM_QOS_FLAG_NO_POWER_OFF (1 << 0) + +struct pm_qos_request { + struct plist_node node; + int pm_qos_class; + struct delayed_work work; /* for pm_qos_update_request_timeout */ +}; + +struct pm_qos_flags_request { + struct list_head node; + s32 flags; /* Do not change to 64 bit */ +}; + +enum dev_pm_qos_req_type { + DEV_PM_QOS_RESUME_LATENCY = 1, + DEV_PM_QOS_LATENCY_TOLERANCE, + DEV_PM_QOS_FLAGS, +}; + +struct dev_pm_qos_request { + enum dev_pm_qos_req_type type; + union { + struct plist_node pnode; + struct pm_qos_flags_request flr; + } data; + struct device *dev; +}; + +enum pm_qos_type { + PM_QOS_UNITIALIZED, + PM_QOS_MAX, /* return the largest value */ + PM_QOS_MIN, /* return the smallest value */ + PM_QOS_SUM /* return the sum */ +}; + +/* + * Note: The lockless read path depends on the CPU accessing target_value + * or effective_flags atomically. Atomic access is only guaranteed on all CPU + * types linux supports for 32 bit quantites + */ +struct pm_qos_constraints { + struct plist_head list; + s32 target_value; /* Do not change to 64 bit */ + s32 default_value; + s32 no_constraint_value; + enum pm_qos_type type; + struct blocking_notifier_head *notifiers; +}; + +struct pm_qos_flags { + struct list_head list; + s32 effective_flags; /* Do not change to 64 bit */ +}; + +struct dev_pm_qos { + struct pm_qos_constraints resume_latency; + struct pm_qos_constraints latency_tolerance; + struct pm_qos_flags flags; + struct dev_pm_qos_request *resume_latency_req; + struct dev_pm_qos_request *latency_tolerance_req; + struct dev_pm_qos_request *flags_req; +}; + +/* Action requested to pm_qos_update_target */ +enum pm_qos_req_action { + PM_QOS_ADD_REQ, /* Add a new request */ + PM_QOS_UPDATE_REQ, /* Update an existing request */ + PM_QOS_REMOVE_REQ /* Remove an existing request */ +}; + +static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req) +{ + return req->dev != NULL; +} + +int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, + enum pm_qos_req_action action, int value); +bool pm_qos_update_flags(struct pm_qos_flags *pqf, + struct pm_qos_flags_request *req, + enum pm_qos_req_action action, s32 val); +void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class, + s32 value); +void pm_qos_update_request(struct pm_qos_request *req, + s32 new_value); +void pm_qos_update_request_timeout(struct pm_qos_request *req, + s32 new_value, unsigned long timeout_us); +void pm_qos_remove_request(struct pm_qos_request *req); + +int pm_qos_request(int pm_qos_class); +int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier); +int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier); +int pm_qos_request_active(struct pm_qos_request *req); +s32 pm_qos_read_value(struct pm_qos_constraints *c); + +#ifdef CONFIG_PM +enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask); +enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask); +s32 __dev_pm_qos_read_value(struct device *dev); +s32 dev_pm_qos_read_value(struct device *dev); +int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, + enum dev_pm_qos_req_type type, s32 value); +int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value); +int dev_pm_qos_remove_request(struct dev_pm_qos_request *req); +int dev_pm_qos_add_notifier(struct device *dev, + struct notifier_block *notifier); +int dev_pm_qos_remove_notifier(struct device *dev, + struct notifier_block *notifier); +void dev_pm_qos_constraints_init(struct device *dev); +void dev_pm_qos_constraints_destroy(struct device *dev); +int dev_pm_qos_add_ancestor_request(struct device *dev, + struct dev_pm_qos_request *req, + enum dev_pm_qos_req_type type, s32 value); +int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value); +void dev_pm_qos_hide_latency_limit(struct device *dev); +int dev_pm_qos_expose_flags(struct device *dev, s32 value); +void dev_pm_qos_hide_flags(struct device *dev); +int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set); +s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev); +int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val); +int dev_pm_qos_expose_latency_tolerance(struct device *dev); +void dev_pm_qos_hide_latency_tolerance(struct device *dev); + +static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) +{ + return dev->power.qos->resume_latency_req->data.pnode.prio; +} + +static inline s32 dev_pm_qos_requested_flags(struct device *dev) +{ + return dev->power.qos->flags_req->data.flr.flags; +} + +static inline s32 dev_pm_qos_raw_read_value(struct device *dev) +{ + return IS_ERR_OR_NULL(dev->power.qos) ? + PM_QOS_RESUME_LATENCY_NO_CONSTRAINT : + pm_qos_read_value(&dev->power.qos->resume_latency); +} +#else +static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, + s32 mask) + { return PM_QOS_FLAGS_UNDEFINED; } +static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, + s32 mask) + { return PM_QOS_FLAGS_UNDEFINED; } +static inline s32 __dev_pm_qos_read_value(struct device *dev) + { return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; } +static inline s32 dev_pm_qos_read_value(struct device *dev) + { return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; } +static inline int dev_pm_qos_add_request(struct device *dev, + struct dev_pm_qos_request *req, + enum dev_pm_qos_req_type type, + s32 value) + { return 0; } +static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req, + s32 new_value) + { return 0; } +static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) + { return 0; } +static inline int dev_pm_qos_add_notifier(struct device *dev, + struct notifier_block *notifier) + { return 0; } +static inline int dev_pm_qos_remove_notifier(struct device *dev, + struct notifier_block *notifier) + { return 0; } +static inline void dev_pm_qos_constraints_init(struct device *dev) +{ + dev->power.power_state = PMSG_ON; +} +static inline void dev_pm_qos_constraints_destroy(struct device *dev) +{ + dev->power.power_state = PMSG_INVALID; +} +static inline int dev_pm_qos_add_ancestor_request(struct device *dev, + struct dev_pm_qos_request *req, + enum dev_pm_qos_req_type type, + s32 value) + { return 0; } +static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) + { return 0; } +static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {} +static inline int dev_pm_qos_expose_flags(struct device *dev, s32 value) + { return 0; } +static inline void dev_pm_qos_hide_flags(struct device *dev) {} +static inline int dev_pm_qos_update_flags(struct device *dev, s32 m, bool set) + { return 0; } +static inline s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev) + { return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; } +static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) + { return 0; } +static inline int dev_pm_qos_expose_latency_tolerance(struct device *dev) + { return 0; } +static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {} + +static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) +{ + return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; +} +static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; } +static inline s32 dev_pm_qos_raw_read_value(struct device *dev) +{ + return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; +} +#endif + +#endif diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h new file mode 100644 index 000000000..f0fc4700b --- /dev/null +++ b/include/linux/pm_runtime.h @@ -0,0 +1,279 @@ +/* + * pm_runtime.h - Device run-time power management helper functions. + * + * Copyright (C) 2009 Rafael J. Wysocki + * + * This file is released under the GPLv2. + */ + +#ifndef _LINUX_PM_RUNTIME_H +#define _LINUX_PM_RUNTIME_H + +#include +#include +#include + +#include + +/* Runtime PM flag argument bits */ +#define RPM_ASYNC 0x01 /* Request is asynchronous */ +#define RPM_NOWAIT 0x02 /* Don't wait for concurrent + state change */ +#define RPM_GET_PUT 0x04 /* Increment/decrement the + usage_count */ +#define RPM_AUTO 0x08 /* Use autosuspend_delay */ + +#ifdef CONFIG_PM +extern struct workqueue_struct *pm_wq; + +static inline bool queue_pm_work(struct work_struct *work) +{ + return queue_work(pm_wq, work); +} + +extern int pm_generic_runtime_suspend(struct device *dev); +extern int pm_generic_runtime_resume(struct device *dev); +extern int pm_runtime_force_suspend(struct device *dev); +extern int pm_runtime_force_resume(struct device *dev); + +extern int __pm_runtime_idle(struct device *dev, int rpmflags); +extern int __pm_runtime_suspend(struct device *dev, int rpmflags); +extern int __pm_runtime_resume(struct device *dev, int rpmflags); +extern int pm_runtime_get_if_in_use(struct device *dev); +extern int pm_schedule_suspend(struct device *dev, unsigned int delay); +extern int __pm_runtime_set_status(struct device *dev, unsigned int status); +extern int pm_runtime_barrier(struct device *dev); +extern void pm_runtime_enable(struct device *dev); +extern void __pm_runtime_disable(struct device *dev, bool check_resume); +extern void pm_runtime_allow(struct device *dev); +extern void pm_runtime_forbid(struct device *dev); +extern void pm_runtime_no_callbacks(struct device *dev); +extern void pm_runtime_irq_safe(struct device *dev); +extern void __pm_runtime_use_autosuspend(struct device *dev, bool use); +extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay); +extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev); +extern void pm_runtime_update_max_time_suspended(struct device *dev, + s64 delta_ns); +extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable); +extern void pm_runtime_clean_up_links(struct device *dev); +extern void pm_runtime_get_suppliers(struct device *dev); +extern void pm_runtime_put_suppliers(struct device *dev); +extern void pm_runtime_new_link(struct device *dev); +extern void pm_runtime_drop_link(struct device *dev); + +static inline void pm_suspend_ignore_children(struct device *dev, bool enable) +{ + dev->power.ignore_children = enable; +} + +static inline void pm_runtime_get_noresume(struct device *dev) +{ + atomic_inc(&dev->power.usage_count); +} + +static inline void pm_runtime_put_noidle(struct device *dev) +{ + atomic_add_unless(&dev->power.usage_count, -1, 0); +} + +static inline bool pm_runtime_suspended(struct device *dev) +{ + return dev->power.runtime_status == RPM_SUSPENDED + && !dev->power.disable_depth; +} + +static inline bool pm_runtime_active(struct device *dev) +{ + return dev->power.runtime_status == RPM_ACTIVE + || dev->power.disable_depth; +} + +static inline bool pm_runtime_status_suspended(struct device *dev) +{ + return dev->power.runtime_status == RPM_SUSPENDED; +} + +static inline bool pm_runtime_enabled(struct device *dev) +{ + return !dev->power.disable_depth; +} + +static inline bool pm_runtime_callbacks_present(struct device *dev) +{ + return !dev->power.no_callbacks; +} + +static inline void pm_runtime_mark_last_busy(struct device *dev) +{ + WRITE_ONCE(dev->power.last_busy, jiffies); +} + +static inline bool pm_runtime_is_irq_safe(struct device *dev) +{ + return dev->power.irq_safe; +} + +#else /* !CONFIG_PM */ + +static inline bool queue_pm_work(struct work_struct *work) { return false; } + +static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; } +static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } +static inline int pm_runtime_force_suspend(struct device *dev) { return 0; } +static inline int pm_runtime_force_resume(struct device *dev) { return 0; } + +static inline int __pm_runtime_idle(struct device *dev, int rpmflags) +{ + return -ENOSYS; +} +static inline int __pm_runtime_suspend(struct device *dev, int rpmflags) +{ + return -ENOSYS; +} +static inline int __pm_runtime_resume(struct device *dev, int rpmflags) +{ + return 1; +} +static inline int pm_schedule_suspend(struct device *dev, unsigned int delay) +{ + return -ENOSYS; +} +static inline int pm_runtime_get_if_in_use(struct device *dev) +{ + return -EINVAL; +} +static inline int __pm_runtime_set_status(struct device *dev, + unsigned int status) { return 0; } +static inline int pm_runtime_barrier(struct device *dev) { return 0; } +static inline void pm_runtime_enable(struct device *dev) {} +static inline void __pm_runtime_disable(struct device *dev, bool c) {} +static inline void pm_runtime_allow(struct device *dev) {} +static inline void pm_runtime_forbid(struct device *dev) {} + +static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {} +static inline void pm_runtime_get_noresume(struct device *dev) {} +static inline void pm_runtime_put_noidle(struct device *dev) {} +static inline bool pm_runtime_suspended(struct device *dev) { return false; } +static inline bool pm_runtime_active(struct device *dev) { return true; } +static inline bool pm_runtime_status_suspended(struct device *dev) { return false; } +static inline bool pm_runtime_enabled(struct device *dev) { return false; } + +static inline void pm_runtime_no_callbacks(struct device *dev) {} +static inline void pm_runtime_irq_safe(struct device *dev) {} +static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; } + +static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; } +static inline void pm_runtime_mark_last_busy(struct device *dev) {} +static inline void __pm_runtime_use_autosuspend(struct device *dev, + bool use) {} +static inline void pm_runtime_set_autosuspend_delay(struct device *dev, + int delay) {} +static inline unsigned long pm_runtime_autosuspend_expiration( + struct device *dev) { return 0; } +static inline void pm_runtime_set_memalloc_noio(struct device *dev, + bool enable){} +static inline void pm_runtime_clean_up_links(struct device *dev) {} +static inline void pm_runtime_get_suppliers(struct device *dev) {} +static inline void pm_runtime_put_suppliers(struct device *dev) {} +static inline void pm_runtime_new_link(struct device *dev) {} +static inline void pm_runtime_drop_link(struct device *dev) {} + +#endif /* !CONFIG_PM */ + +static inline int pm_runtime_idle(struct device *dev) +{ + return __pm_runtime_idle(dev, 0); +} + +static inline int pm_runtime_suspend(struct device *dev) +{ + return __pm_runtime_suspend(dev, 0); +} + +static inline int pm_runtime_autosuspend(struct device *dev) +{ + return __pm_runtime_suspend(dev, RPM_AUTO); +} + +static inline int pm_runtime_resume(struct device *dev) +{ + return __pm_runtime_resume(dev, 0); +} + +static inline int pm_request_idle(struct device *dev) +{ + return __pm_runtime_idle(dev, RPM_ASYNC); +} + +static inline int pm_request_resume(struct device *dev) +{ + return __pm_runtime_resume(dev, RPM_ASYNC); +} + +static inline int pm_request_autosuspend(struct device *dev) +{ + return __pm_runtime_suspend(dev, RPM_ASYNC | RPM_AUTO); +} + +static inline int pm_runtime_get(struct device *dev) +{ + return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC); +} + +static inline int pm_runtime_get_sync(struct device *dev) +{ + return __pm_runtime_resume(dev, RPM_GET_PUT); +} + +static inline int pm_runtime_put(struct device *dev) +{ + return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC); +} + +static inline int pm_runtime_put_autosuspend(struct device *dev) +{ + return __pm_runtime_suspend(dev, + RPM_GET_PUT | RPM_ASYNC | RPM_AUTO); +} + +static inline int pm_runtime_put_sync(struct device *dev) +{ + return __pm_runtime_idle(dev, RPM_GET_PUT); +} + +static inline int pm_runtime_put_sync_suspend(struct device *dev) +{ + return __pm_runtime_suspend(dev, RPM_GET_PUT); +} + +static inline int pm_runtime_put_sync_autosuspend(struct device *dev) +{ + return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO); +} + +static inline int pm_runtime_set_active(struct device *dev) +{ + return __pm_runtime_set_status(dev, RPM_ACTIVE); +} + +static inline int pm_runtime_set_suspended(struct device *dev) +{ + return __pm_runtime_set_status(dev, RPM_SUSPENDED); +} + +static inline void pm_runtime_disable(struct device *dev) +{ + __pm_runtime_disable(dev, true); +} + +static inline void pm_runtime_use_autosuspend(struct device *dev) +{ + __pm_runtime_use_autosuspend(dev, true); +} + +static inline void pm_runtime_dont_use_autosuspend(struct device *dev) +{ + __pm_runtime_use_autosuspend(dev, false); +} + +#endif diff --git a/include/linux/pm_wakeirq.h b/include/linux/pm_wakeirq.h new file mode 100644 index 000000000..cd5b62db9 --- /dev/null +++ b/include/linux/pm_wakeirq.h @@ -0,0 +1,51 @@ +/* + * pm_wakeirq.h - Device wakeirq helper functions + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _LINUX_PM_WAKEIRQ_H +#define _LINUX_PM_WAKEIRQ_H + +#ifdef CONFIG_PM + +extern int dev_pm_set_wake_irq(struct device *dev, int irq); +extern int dev_pm_set_dedicated_wake_irq(struct device *dev, + int irq); +extern void dev_pm_clear_wake_irq(struct device *dev); +extern void dev_pm_enable_wake_irq(struct device *dev); +extern void dev_pm_disable_wake_irq(struct device *dev); + +#else /* !CONFIG_PM */ + +static inline int dev_pm_set_wake_irq(struct device *dev, int irq) +{ + return 0; +} + +static inline int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) +{ + return 0; +} + +static inline void dev_pm_clear_wake_irq(struct device *dev) +{ +} + +static inline void dev_pm_enable_wake_irq(struct device *dev) +{ +} + +static inline void dev_pm_disable_wake_irq(struct device *dev) +{ +} + +#endif /* CONFIG_PM */ +#endif /* _LINUX_PM_WAKEIRQ_H */ diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h new file mode 100644 index 000000000..4238dde0a --- /dev/null +++ b/include/linux/pm_wakeup.h @@ -0,0 +1,228 @@ +/* + * pm_wakeup.h - Power management wakeup interface + * + * Copyright (C) 2008 Alan Stern + * Copyright (C) 2010 Rafael J. Wysocki, Novell Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _LINUX_PM_WAKEUP_H +#define _LINUX_PM_WAKEUP_H + +#ifndef _DEVICE_H_ +# error "please don't include this file directly" +#endif + +#include + +struct wake_irq; + +/** + * struct wakeup_source - Representation of wakeup sources + * + * @name: Name of the wakeup source + * @entry: Wakeup source list entry + * @lock: Wakeup source lock + * @wakeirq: Optional device specific wakeirq + * @timer: Wakeup timer list + * @timer_expires: Wakeup timer expiration + * @total_time: Total time this wakeup source has been active. + * @max_time: Maximum time this wakeup source has been continuously active. + * @last_time: Monotonic clock when the wakeup source's was touched last time. + * @prevent_sleep_time: Total time this source has been preventing autosleep. + * @event_count: Number of signaled wakeup events. + * @active_count: Number of times the wakeup source was activated. + * @relax_count: Number of times the wakeup source was deactivated. + * @expire_count: Number of times the wakeup source's timeout has expired. + * @wakeup_count: Number of times the wakeup source might abort suspend. + * @active: Status of the wakeup source. + * @has_timeout: The wakeup source has been activated with a timeout. + */ +struct wakeup_source { + const char *name; + struct list_head entry; + spinlock_t lock; + struct wake_irq *wakeirq; + struct timer_list timer; + unsigned long timer_expires; + ktime_t total_time; + ktime_t max_time; + ktime_t last_time; + ktime_t start_prevent_time; + ktime_t prevent_sleep_time; + unsigned long event_count; + unsigned long active_count; + unsigned long relax_count; + unsigned long expire_count; + unsigned long wakeup_count; + bool active:1; + bool autosleep_enabled:1; +}; + +#ifdef CONFIG_PM_SLEEP + +/* + * Changes to device_may_wakeup take effect on the next pm state change. + */ + +static inline bool device_can_wakeup(struct device *dev) +{ + return dev->power.can_wakeup; +} + +static inline bool device_may_wakeup(struct device *dev) +{ + return dev->power.can_wakeup && !!dev->power.wakeup; +} + +static inline void device_set_wakeup_path(struct device *dev) +{ + dev->power.wakeup_path = true; +} + +/* drivers/base/power/wakeup.c */ +extern void wakeup_source_prepare(struct wakeup_source *ws, const char *name); +extern struct wakeup_source *wakeup_source_create(const char *name); +extern void wakeup_source_drop(struct wakeup_source *ws); +extern void wakeup_source_destroy(struct wakeup_source *ws); +extern void wakeup_source_add(struct wakeup_source *ws); +extern void wakeup_source_remove(struct wakeup_source *ws); +extern struct wakeup_source *wakeup_source_register(const char *name); +extern void wakeup_source_unregister(struct wakeup_source *ws); +extern int device_wakeup_enable(struct device *dev); +extern int device_wakeup_disable(struct device *dev); +extern void device_set_wakeup_capable(struct device *dev, bool capable); +extern int device_init_wakeup(struct device *dev, bool val); +extern int device_set_wakeup_enable(struct device *dev, bool enable); +extern void __pm_stay_awake(struct wakeup_source *ws); +extern void pm_stay_awake(struct device *dev); +extern void __pm_relax(struct wakeup_source *ws); +extern void pm_relax(struct device *dev); +extern void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard); +extern void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard); + +#else /* !CONFIG_PM_SLEEP */ + +static inline void device_set_wakeup_capable(struct device *dev, bool capable) +{ + dev->power.can_wakeup = capable; +} + +static inline bool device_can_wakeup(struct device *dev) +{ + return dev->power.can_wakeup; +} + +static inline void wakeup_source_prepare(struct wakeup_source *ws, + const char *name) {} + +static inline struct wakeup_source *wakeup_source_create(const char *name) +{ + return NULL; +} + +static inline void wakeup_source_drop(struct wakeup_source *ws) {} + +static inline void wakeup_source_destroy(struct wakeup_source *ws) {} + +static inline void wakeup_source_add(struct wakeup_source *ws) {} + +static inline void wakeup_source_remove(struct wakeup_source *ws) {} + +static inline struct wakeup_source *wakeup_source_register(const char *name) +{ + return NULL; +} + +static inline void wakeup_source_unregister(struct wakeup_source *ws) {} + +static inline int device_wakeup_enable(struct device *dev) +{ + dev->power.should_wakeup = true; + return 0; +} + +static inline int device_wakeup_disable(struct device *dev) +{ + dev->power.should_wakeup = false; + return 0; +} + +static inline int device_set_wakeup_enable(struct device *dev, bool enable) +{ + dev->power.should_wakeup = enable; + return 0; +} + +static inline int device_init_wakeup(struct device *dev, bool val) +{ + device_set_wakeup_capable(dev, val); + device_set_wakeup_enable(dev, val); + return 0; +} + +static inline bool device_may_wakeup(struct device *dev) +{ + return dev->power.can_wakeup && dev->power.should_wakeup; +} + +static inline void device_set_wakeup_path(struct device *dev) {} + +static inline void __pm_stay_awake(struct wakeup_source *ws) {} + +static inline void pm_stay_awake(struct device *dev) {} + +static inline void __pm_relax(struct wakeup_source *ws) {} + +static inline void pm_relax(struct device *dev) {} + +static inline void pm_wakeup_ws_event(struct wakeup_source *ws, + unsigned int msec, bool hard) {} + +static inline void pm_wakeup_dev_event(struct device *dev, unsigned int msec, + bool hard) {} + +#endif /* !CONFIG_PM_SLEEP */ + +static inline void wakeup_source_init(struct wakeup_source *ws, + const char *name) +{ + wakeup_source_prepare(ws, name); + wakeup_source_add(ws); +} + +static inline void wakeup_source_trash(struct wakeup_source *ws) +{ + wakeup_source_remove(ws); + wakeup_source_drop(ws); +} + +static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) +{ + return pm_wakeup_ws_event(ws, msec, false); +} + +static inline void pm_wakeup_event(struct device *dev, unsigned int msec) +{ + return pm_wakeup_dev_event(dev, msec, false); +} + +static inline void pm_wakeup_hard_event(struct device *dev) +{ + return pm_wakeup_dev_event(dev, 0, true); +} + +#endif /* _LINUX_PM_WAKEUP_H */ diff --git a/include/linux/pmbus.h b/include/linux/pmbus.h new file mode 100644 index 000000000..ee3c2aba2 --- /dev/null +++ b/include/linux/pmbus.h @@ -0,0 +1,49 @@ +/* + * Hardware monitoring driver for PMBus devices + * + * Copyright (c) 2010, 2011 Ericsson AB. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _PMBUS_H_ +#define _PMBUS_H_ + +/* flags */ + +/* + * PMBUS_SKIP_STATUS_CHECK + * + * During register detection, skip checking the status register for + * communication or command errors. + * + * Some PMBus chips respond with valid data when trying to read an unsupported + * register. For such chips, checking the status register is mandatory when + * trying to determine if a chip register exists or not. + * Other PMBus chips don't support the STATUS_CML register, or report + * communication errors for no explicable reason. For such chips, checking + * the status register must be disabled. + */ +#define PMBUS_SKIP_STATUS_CHECK (1 << 0) + +struct pmbus_platform_data { + u32 flags; /* Device specific flags */ + + /* regulator support */ + int num_regulators; + struct regulator_init_data *reg_init_data; +}; + +#endif /* _PMBUS_H_ */ diff --git a/include/linux/pmu.h b/include/linux/pmu.h new file mode 100644 index 000000000..9ac8fc60a --- /dev/null +++ b/include/linux/pmu.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Definitions for talking to the PMU. The PMU is a microcontroller + * which controls battery charging and system power on PowerBook 3400 + * and 2400 models as well as the RTC and various other things. + * + * Copyright (C) 1998 Paul Mackerras. + */ +#ifndef _LINUX_PMU_H +#define _LINUX_PMU_H + +#include + + +extern int find_via_pmu(void); + +extern int pmu_request(struct adb_request *req, + void (*done)(struct adb_request *), int nbytes, ...); +extern int pmu_queue_request(struct adb_request *req); +extern void pmu_poll(void); +extern void pmu_poll_adb(void); /* For use by xmon */ +extern void pmu_wait_complete(struct adb_request *req); + +/* For use before switching interrupts off for a long time; + * warning: not stackable + */ +#if defined(CONFIG_ADB_PMU) +extern void pmu_suspend(void); +extern void pmu_resume(void); +#else +static inline void pmu_suspend(void) +{} +static inline void pmu_resume(void) +{} +#endif + +extern void pmu_enable_irled(int on); + +extern void pmu_restart(void); +extern void pmu_shutdown(void); +extern void pmu_unlock(void); + +extern int pmu_present(void); +extern int pmu_get_model(void); + +extern void pmu_backlight_set_sleep(int sleep); + +#define PMU_MAX_BATTERIES 2 + +/* values for pmu_power_flags */ +#define PMU_PWR_AC_PRESENT 0x00000001 + +/* values for pmu_battery_info.flags */ +#define PMU_BATT_PRESENT 0x00000001 +#define PMU_BATT_CHARGING 0x00000002 +#define PMU_BATT_TYPE_MASK 0x000000f0 +#define PMU_BATT_TYPE_SMART 0x00000010 /* Smart battery */ +#define PMU_BATT_TYPE_HOOPER 0x00000020 /* 3400/3500 */ +#define PMU_BATT_TYPE_COMET 0x00000030 /* 2400 */ + +struct pmu_battery_info +{ + unsigned int flags; + unsigned int charge; /* current charge */ + unsigned int max_charge; /* maximum charge */ + signed int amperage; /* current, positive if charging */ + unsigned int voltage; /* voltage */ + unsigned int time_remaining; /* remaining time */ +}; + +extern int pmu_battery_count; +extern struct pmu_battery_info pmu_batteries[PMU_MAX_BATTERIES]; +extern unsigned int pmu_power_flags; + +/* Backlight */ +extern void pmu_backlight_init(void); + +/* some code needs to know if the PMU was suspended for hibernation */ +#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) +extern int pmu_sys_suspended; +#else +/* if power management is not configured it can't be suspended */ +#define pmu_sys_suspended 0 +#endif + +#endif /* _LINUX_PMU_H */ diff --git a/include/linux/pnfs_osd_xdr.h b/include/linux/pnfs_osd_xdr.h new file mode 100644 index 000000000..17d7d0d20 --- /dev/null +++ b/include/linux/pnfs_osd_xdr.h @@ -0,0 +1,317 @@ +/* + * pNFS-osd on-the-wire data structures + * + * Copyright (C) 2007 Panasas Inc. [year of first publication] + * All rights reserved. + * + * Benny Halevy + * Boaz Harrosh + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * See the file COPYING included with this distribution for more details. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the Panasas company nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __PNFS_OSD_XDR_H__ +#define __PNFS_OSD_XDR_H__ + +#include + +/* + * draft-ietf-nfsv4-minorversion-22 + * draft-ietf-nfsv4-pnfs-obj-12 + */ + +/* Layout Structure */ + +enum pnfs_osd_raid_algorithm4 { + PNFS_OSD_RAID_0 = 1, + PNFS_OSD_RAID_4 = 2, + PNFS_OSD_RAID_5 = 3, + PNFS_OSD_RAID_PQ = 4 /* Reed-Solomon P+Q */ +}; + +/* struct pnfs_osd_data_map4 { + * uint32_t odm_num_comps; + * length4 odm_stripe_unit; + * uint32_t odm_group_width; + * uint32_t odm_group_depth; + * uint32_t odm_mirror_cnt; + * pnfs_osd_raid_algorithm4 odm_raid_algorithm; + * }; + */ +struct pnfs_osd_data_map { + u32 odm_num_comps; + u64 odm_stripe_unit; + u32 odm_group_width; + u32 odm_group_depth; + u32 odm_mirror_cnt; + u32 odm_raid_algorithm; +}; + +/* struct pnfs_osd_objid4 { + * deviceid4 oid_device_id; + * uint64_t oid_partition_id; + * uint64_t oid_object_id; + * }; + */ +struct pnfs_osd_objid { + struct nfs4_deviceid oid_device_id; + u64 oid_partition_id; + u64 oid_object_id; +}; + +/* For printout. I use: + * kprint("dev(%llx:%llx)", _DEVID_LO(pointer), _DEVID_HI(pointer)); + * BE style + */ +#define _DEVID_LO(oid_device_id) \ + (unsigned long long)be64_to_cpup((__be64 *)(oid_device_id)->data) + +#define _DEVID_HI(oid_device_id) \ + (unsigned long long)be64_to_cpup(((__be64 *)(oid_device_id)->data) + 1) + +enum pnfs_osd_version { + PNFS_OSD_MISSING = 0, + PNFS_OSD_VERSION_1 = 1, + PNFS_OSD_VERSION_2 = 2 +}; + +struct pnfs_osd_opaque_cred { + u32 cred_len; + void *cred; +}; + +enum pnfs_osd_cap_key_sec { + PNFS_OSD_CAP_KEY_SEC_NONE = 0, + PNFS_OSD_CAP_KEY_SEC_SSV = 1, +}; + +/* struct pnfs_osd_object_cred4 { + * pnfs_osd_objid4 oc_object_id; + * pnfs_osd_version4 oc_osd_version; + * pnfs_osd_cap_key_sec4 oc_cap_key_sec; + * opaque oc_capability_key<>; + * opaque oc_capability<>; + * }; + */ +struct pnfs_osd_object_cred { + struct pnfs_osd_objid oc_object_id; + u32 oc_osd_version; + u32 oc_cap_key_sec; + struct pnfs_osd_opaque_cred oc_cap_key; + struct pnfs_osd_opaque_cred oc_cap; +}; + +/* struct pnfs_osd_layout4 { + * pnfs_osd_data_map4 olo_map; + * uint32_t olo_comps_index; + * pnfs_osd_object_cred4 olo_components<>; + * }; + */ +struct pnfs_osd_layout { + struct pnfs_osd_data_map olo_map; + u32 olo_comps_index; + u32 olo_num_comps; + struct pnfs_osd_object_cred *olo_comps; +}; + +/* Device Address */ +enum pnfs_osd_targetid_type { + OBJ_TARGET_ANON = 1, + OBJ_TARGET_SCSI_NAME = 2, + OBJ_TARGET_SCSI_DEVICE_ID = 3, +}; + +/* union pnfs_osd_targetid4 switch (pnfs_osd_targetid_type4 oti_type) { + * case OBJ_TARGET_SCSI_NAME: + * string oti_scsi_name<>; + * + * case OBJ_TARGET_SCSI_DEVICE_ID: + * opaque oti_scsi_device_id<>; + * + * default: + * void; + * }; + * + * union pnfs_osd_targetaddr4 switch (bool ota_available) { + * case TRUE: + * netaddr4 ota_netaddr; + * case FALSE: + * void; + * }; + * + * struct pnfs_osd_deviceaddr4 { + * pnfs_osd_targetid4 oda_targetid; + * pnfs_osd_targetaddr4 oda_targetaddr; + * uint64_t oda_lun; + * opaque oda_systemid<>; + * pnfs_osd_object_cred4 oda_root_obj_cred; + * opaque oda_osdname<>; + * }; + */ +struct pnfs_osd_targetid { + u32 oti_type; + struct nfs4_string oti_scsi_device_id; +}; + +/* struct netaddr4 { + * // see struct rpcb in RFC1833 + * string r_netid<>; // network id + * string r_addr<>; // universal address + * }; + */ +struct pnfs_osd_net_addr { + struct nfs4_string r_netid; + struct nfs4_string r_addr; +}; + +struct pnfs_osd_targetaddr { + u32 ota_available; + struct pnfs_osd_net_addr ota_netaddr; +}; + +struct pnfs_osd_deviceaddr { + struct pnfs_osd_targetid oda_targetid; + struct pnfs_osd_targetaddr oda_targetaddr; + u8 oda_lun[8]; + struct nfs4_string oda_systemid; + struct pnfs_osd_object_cred oda_root_obj_cred; + struct nfs4_string oda_osdname; +}; + +/* LAYOUTCOMMIT: layoutupdate */ + +/* union pnfs_osd_deltaspaceused4 switch (bool dsu_valid) { + * case TRUE: + * int64_t dsu_delta; + * case FALSE: + * void; + * }; + * + * struct pnfs_osd_layoutupdate4 { + * pnfs_osd_deltaspaceused4 olu_delta_space_used; + * bool olu_ioerr_flag; + * }; + */ +struct pnfs_osd_layoutupdate { + u32 dsu_valid; + s64 dsu_delta; + u32 olu_ioerr_flag; +}; + +/* LAYOUTRETURN: I/O Rrror Report */ + +enum pnfs_osd_errno { + PNFS_OSD_ERR_EIO = 1, + PNFS_OSD_ERR_NOT_FOUND = 2, + PNFS_OSD_ERR_NO_SPACE = 3, + PNFS_OSD_ERR_BAD_CRED = 4, + PNFS_OSD_ERR_NO_ACCESS = 5, + PNFS_OSD_ERR_UNREACHABLE = 6, + PNFS_OSD_ERR_RESOURCE = 7 +}; + +/* struct pnfs_osd_ioerr4 { + * pnfs_osd_objid4 oer_component; + * length4 oer_comp_offset; + * length4 oer_comp_length; + * bool oer_iswrite; + * pnfs_osd_errno4 oer_errno; + * }; + */ +struct pnfs_osd_ioerr { + struct pnfs_osd_objid oer_component; + u64 oer_comp_offset; + u64 oer_comp_length; + u32 oer_iswrite; + u32 oer_errno; +}; + +/* OSD XDR Client API */ +/* Layout helpers */ +/* Layout decoding is done in two parts: + * 1. First Call pnfs_osd_xdr_decode_layout_map to read in only the header part + * of the layout. @iter members need not be initialized. + * Returned: + * @layout members are set. (@layout->olo_comps set to NULL). + * + * Zero on success, or negative error if passed xdr is broken. + * + * 2. 2nd Call pnfs_osd_xdr_decode_layout_comp() in a loop until it returns + * false, to decode the next component. + * Returned: + * true if there is more to decode or false if we are done or error. + * + * Example: + * struct pnfs_osd_xdr_decode_layout_iter iter; + * struct pnfs_osd_layout layout; + * struct pnfs_osd_object_cred comp; + * int status; + * + * status = pnfs_osd_xdr_decode_layout_map(&layout, &iter, xdr); + * if (unlikely(status)) + * goto err; + * while(pnfs_osd_xdr_decode_layout_comp(&comp, &iter, xdr, &status)) { + * // All of @comp strings point to inside the xdr_buffer + * // or scrach buffer. Copy them out to user memory eg. + * copy_single_comp(dest_comp++, &comp); + * } + * if (unlikely(status)) + * goto err; + */ + +struct pnfs_osd_xdr_decode_layout_iter { + unsigned total_comps; + unsigned decoded_comps; +}; + +extern int pnfs_osd_xdr_decode_layout_map(struct pnfs_osd_layout *layout, + struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr); + +extern bool pnfs_osd_xdr_decode_layout_comp(struct pnfs_osd_object_cred *comp, + struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr, + int *err); + +/* Device Info helpers */ + +/* Note: All strings inside @deviceaddr point to space inside @p. + * @p should stay valid while @deviceaddr is in use. + */ +extern void pnfs_osd_xdr_decode_deviceaddr( + struct pnfs_osd_deviceaddr *deviceaddr, __be32 *p); + +/* layoutupdate (layout_commit) xdr helpers */ +extern int +pnfs_osd_xdr_encode_layoutupdate(struct xdr_stream *xdr, + struct pnfs_osd_layoutupdate *lou); + +/* osd_ioerror encoding (layout_return) */ +extern __be32 *pnfs_osd_xdr_ioerr_reserve_space(struct xdr_stream *xdr); +extern void pnfs_osd_xdr_encode_ioerr(__be32 *p, struct pnfs_osd_ioerr *ioerr); + +#endif /* __PNFS_OSD_XDR_H__ */ diff --git a/include/linux/pnp.h b/include/linux/pnp.h new file mode 100644 index 000000000..fc4df3cce --- /dev/null +++ b/include/linux/pnp.h @@ -0,0 +1,517 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Linux Plug and Play Support + * Copyright by Adam Belay + * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. + * Bjorn Helgaas + */ + +#ifndef _LINUX_PNP_H +#define _LINUX_PNP_H + +#include +#include +#include +#include +#include + +#define PNP_NAME_LEN 50 + +struct pnp_protocol; +struct pnp_dev; + +/* + * Resource Management + */ +#ifdef CONFIG_PNP +struct resource *pnp_get_resource(struct pnp_dev *dev, unsigned long type, + unsigned int num); +#else +static inline struct resource *pnp_get_resource(struct pnp_dev *dev, + unsigned long type, unsigned int num) +{ + return NULL; +} +#endif + +static inline int pnp_resource_valid(struct resource *res) +{ + if (res) + return 1; + return 0; +} + +static inline int pnp_resource_enabled(struct resource *res) +{ + if (res && !(res->flags & IORESOURCE_DISABLED)) + return 1; + return 0; +} + +static inline resource_size_t pnp_resource_len(struct resource *res) +{ + if (res->start == 0 && res->end == 0) + return 0; + return resource_size(res); +} + + +static inline resource_size_t pnp_port_start(struct pnp_dev *dev, + unsigned int bar) +{ + struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar); + + if (pnp_resource_valid(res)) + return res->start; + return 0; +} + +static inline resource_size_t pnp_port_end(struct pnp_dev *dev, + unsigned int bar) +{ + struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar); + + if (pnp_resource_valid(res)) + return res->end; + return 0; +} + +static inline unsigned long pnp_port_flags(struct pnp_dev *dev, + unsigned int bar) +{ + struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar); + + if (pnp_resource_valid(res)) + return res->flags; + return IORESOURCE_IO | IORESOURCE_AUTO; +} + +static inline int pnp_port_valid(struct pnp_dev *dev, unsigned int bar) +{ + return pnp_resource_valid(pnp_get_resource(dev, IORESOURCE_IO, bar)); +} + +static inline resource_size_t pnp_port_len(struct pnp_dev *dev, + unsigned int bar) +{ + struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar); + + if (pnp_resource_valid(res)) + return pnp_resource_len(res); + return 0; +} + + +static inline resource_size_t pnp_mem_start(struct pnp_dev *dev, + unsigned int bar) +{ + struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar); + + if (pnp_resource_valid(res)) + return res->start; + return 0; +} + +static inline resource_size_t pnp_mem_end(struct pnp_dev *dev, + unsigned int bar) +{ + struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar); + + if (pnp_resource_valid(res)) + return res->end; + return 0; +} + +static inline unsigned long pnp_mem_flags(struct pnp_dev *dev, unsigned int bar) +{ + struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar); + + if (pnp_resource_valid(res)) + return res->flags; + return IORESOURCE_MEM | IORESOURCE_AUTO; +} + +static inline int pnp_mem_valid(struct pnp_dev *dev, unsigned int bar) +{ + return pnp_resource_valid(pnp_get_resource(dev, IORESOURCE_MEM, bar)); +} + +static inline resource_size_t pnp_mem_len(struct pnp_dev *dev, + unsigned int bar) +{ + struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar); + + if (pnp_resource_valid(res)) + return pnp_resource_len(res); + return 0; +} + + +static inline resource_size_t pnp_irq(struct pnp_dev *dev, unsigned int bar) +{ + struct resource *res = pnp_get_resource(dev, IORESOURCE_IRQ, bar); + + if (pnp_resource_valid(res)) + return res->start; + return -1; +} + +static inline unsigned long pnp_irq_flags(struct pnp_dev *dev, unsigned int bar) +{ + struct resource *res = pnp_get_resource(dev, IORESOURCE_IRQ, bar); + + if (pnp_resource_valid(res)) + return res->flags; + return IORESOURCE_IRQ | IORESOURCE_AUTO; +} + +static inline int pnp_irq_valid(struct pnp_dev *dev, unsigned int bar) +{ + return pnp_resource_valid(pnp_get_resource(dev, IORESOURCE_IRQ, bar)); +} + + +static inline resource_size_t pnp_dma(struct pnp_dev *dev, unsigned int bar) +{ + struct resource *res = pnp_get_resource(dev, IORESOURCE_DMA, bar); + + if (pnp_resource_valid(res)) + return res->start; + return -1; +} + +static inline unsigned long pnp_dma_flags(struct pnp_dev *dev, unsigned int bar) +{ + struct resource *res = pnp_get_resource(dev, IORESOURCE_DMA, bar); + + if (pnp_resource_valid(res)) + return res->flags; + return IORESOURCE_DMA | IORESOURCE_AUTO; +} + +static inline int pnp_dma_valid(struct pnp_dev *dev, unsigned int bar) +{ + return pnp_resource_valid(pnp_get_resource(dev, IORESOURCE_DMA, bar)); +} + + +/* + * Device Management + */ + +struct pnp_card { + struct device dev; /* Driver Model device interface */ + unsigned char number; /* used as an index, must be unique */ + struct list_head global_list; /* node in global list of cards */ + struct list_head protocol_list; /* node in protocol's list of cards */ + struct list_head devices; /* devices attached to the card */ + + struct pnp_protocol *protocol; + struct pnp_id *id; /* contains supported EISA IDs */ + + char name[PNP_NAME_LEN]; /* contains a human-readable name */ + unsigned char pnpver; /* Plug & Play version */ + unsigned char productver; /* product version */ + unsigned int serial; /* serial number */ + unsigned char checksum; /* if zero - checksum passed */ + struct proc_dir_entry *procdir; /* directory entry in /proc/bus/isapnp */ +}; + +#define global_to_pnp_card(n) list_entry(n, struct pnp_card, global_list) +#define protocol_to_pnp_card(n) list_entry(n, struct pnp_card, protocol_list) +#define to_pnp_card(n) container_of(n, struct pnp_card, dev) +#define pnp_for_each_card(card) \ + list_for_each_entry(card, &pnp_cards, global_list) + +struct pnp_card_link { + struct pnp_card *card; + struct pnp_card_driver *driver; + void *driver_data; + pm_message_t pm_state; +}; + +static inline void *pnp_get_card_drvdata(struct pnp_card_link *pcard) +{ + return pcard->driver_data; +} + +static inline void pnp_set_card_drvdata(struct pnp_card_link *pcard, void *data) +{ + pcard->driver_data = data; +} + +struct pnp_dev { + struct device dev; /* Driver Model device interface */ + u64 dma_mask; + unsigned int number; /* used as an index, must be unique */ + int status; + + struct list_head global_list; /* node in global list of devices */ + struct list_head protocol_list; /* node in list of device's protocol */ + struct list_head card_list; /* node in card's list of devices */ + struct list_head rdev_list; /* node in cards list of requested devices */ + + struct pnp_protocol *protocol; + struct pnp_card *card; /* card the device is attached to, none if NULL */ + struct pnp_driver *driver; + struct pnp_card_link *card_link; + + struct pnp_id *id; /* supported EISA IDs */ + + int active; + int capabilities; + unsigned int num_dependent_sets; + struct list_head resources; + struct list_head options; + + char name[PNP_NAME_LEN]; /* contains a human-readable name */ + int flags; /* used by protocols */ + struct proc_dir_entry *procent; /* device entry in /proc/bus/isapnp */ + void *data; +}; + +#define global_to_pnp_dev(n) list_entry(n, struct pnp_dev, global_list) +#define card_to_pnp_dev(n) list_entry(n, struct pnp_dev, card_list) +#define protocol_to_pnp_dev(n) list_entry(n, struct pnp_dev, protocol_list) +#define to_pnp_dev(n) container_of(n, struct pnp_dev, dev) +#define pnp_for_each_dev(dev) list_for_each_entry(dev, &pnp_global, global_list) +#define card_for_each_dev(card, dev) \ + list_for_each_entry(dev, &(card)->devices, card_list) +#define pnp_dev_name(dev) (dev)->name + +static inline void *pnp_get_drvdata(struct pnp_dev *pdev) +{ + return dev_get_drvdata(&pdev->dev); +} + +static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data) +{ + dev_set_drvdata(&pdev->dev, data); +} + +struct pnp_fixup { + char id[7]; + void (*quirk_function) (struct pnp_dev * dev); /* fixup function */ +}; + +/* config parameters */ +#define PNP_CONFIG_NORMAL 0x0001 +#define PNP_CONFIG_FORCE 0x0002 /* disables validity checking */ + +/* capabilities */ +#define PNP_READ 0x0001 +#define PNP_WRITE 0x0002 +#define PNP_DISABLE 0x0004 +#define PNP_CONFIGURABLE 0x0008 +#define PNP_REMOVABLE 0x0010 +#define PNP_CONSOLE 0x0020 + +#define pnp_can_read(dev) (((dev)->protocol->get) && \ + ((dev)->capabilities & PNP_READ)) +#define pnp_can_write(dev) (((dev)->protocol->set) && \ + ((dev)->capabilities & PNP_WRITE)) +#define pnp_can_disable(dev) (((dev)->protocol->disable) && \ + ((dev)->capabilities & PNP_DISABLE) && \ + (!((dev)->capabilities & PNP_CONSOLE) || \ + console_suspend_enabled)) +#define pnp_can_configure(dev) ((!(dev)->active) && \ + ((dev)->capabilities & PNP_CONFIGURABLE)) +#define pnp_can_suspend(dev) (((dev)->protocol->suspend) && \ + (!((dev)->capabilities & PNP_CONSOLE) || \ + console_suspend_enabled)) + + +#ifdef CONFIG_ISAPNP +extern struct pnp_protocol isapnp_protocol; +#define pnp_device_is_isapnp(dev) ((dev)->protocol == (&isapnp_protocol)) +#else +#define pnp_device_is_isapnp(dev) 0 +#endif +extern struct mutex pnp_res_mutex; + +#ifdef CONFIG_PNPBIOS +extern struct pnp_protocol pnpbios_protocol; +extern bool arch_pnpbios_disabled(void); +#define pnp_device_is_pnpbios(dev) ((dev)->protocol == (&pnpbios_protocol)) +#else +#define pnp_device_is_pnpbios(dev) 0 +#define arch_pnpbios_disabled() false +#endif + +#ifdef CONFIG_PNPACPI +extern struct pnp_protocol pnpacpi_protocol; + +static inline struct acpi_device *pnp_acpi_device(struct pnp_dev *dev) +{ + if (dev->protocol == &pnpacpi_protocol) + return dev->data; + return NULL; +} +#else +#define pnp_acpi_device(dev) 0 +#endif + +/* status */ +#define PNP_READY 0x0000 +#define PNP_ATTACHED 0x0001 +#define PNP_BUSY 0x0002 +#define PNP_FAULTY 0x0004 + +/* isapnp specific macros */ + +#define isapnp_card_number(dev) ((dev)->card ? (dev)->card->number : -1) +#define isapnp_csn_number(dev) ((dev)->number) + +/* + * Driver Management + */ + +struct pnp_id { + char id[PNP_ID_LEN]; + struct pnp_id *next; +}; + +struct pnp_driver { + char *name; + const struct pnp_device_id *id_table; + unsigned int flags; + int (*probe) (struct pnp_dev *dev, const struct pnp_device_id *dev_id); + void (*remove) (struct pnp_dev *dev); + void (*shutdown) (struct pnp_dev *dev); + int (*suspend) (struct pnp_dev *dev, pm_message_t state); + int (*resume) (struct pnp_dev *dev); + struct device_driver driver; +}; + +#define to_pnp_driver(drv) container_of(drv, struct pnp_driver, driver) + +struct pnp_card_driver { + struct list_head global_list; + char *name; + const struct pnp_card_device_id *id_table; + unsigned int flags; + int (*probe) (struct pnp_card_link *card, + const struct pnp_card_device_id *card_id); + void (*remove) (struct pnp_card_link *card); + int (*suspend) (struct pnp_card_link *card, pm_message_t state); + int (*resume) (struct pnp_card_link *card); + struct pnp_driver link; +}; + +#define to_pnp_card_driver(drv) container_of(drv, struct pnp_card_driver, link) + +/* pnp driver flags */ +#define PNP_DRIVER_RES_DO_NOT_CHANGE 0x0001 /* do not change the state of the device */ +#define PNP_DRIVER_RES_DISABLE 0x0003 /* ensure the device is disabled */ + +/* + * Protocol Management + */ + +struct pnp_protocol { + struct list_head protocol_list; + char *name; + + /* resource control functions */ + int (*get) (struct pnp_dev *dev); + int (*set) (struct pnp_dev *dev); + int (*disable) (struct pnp_dev *dev); + + /* protocol specific suspend/resume */ + bool (*can_wakeup) (struct pnp_dev *dev); + int (*suspend) (struct pnp_dev * dev, pm_message_t state); + int (*resume) (struct pnp_dev * dev); + + /* used by pnp layer only (look but don't touch) */ + unsigned char number; /* protocol number */ + struct device dev; /* link to driver model */ + struct list_head cards; + struct list_head devices; +}; + +#define to_pnp_protocol(n) list_entry(n, struct pnp_protocol, protocol_list) +#define protocol_for_each_card(protocol, card) \ + list_for_each_entry(card, &(protocol)->cards, protocol_list) +#define protocol_for_each_dev(protocol, dev) \ + list_for_each_entry(dev, &(protocol)->devices, protocol_list) + +extern struct bus_type pnp_bus_type; + +#if defined(CONFIG_PNP) + +/* device management */ +int pnp_device_attach(struct pnp_dev *pnp_dev); +void pnp_device_detach(struct pnp_dev *pnp_dev); +extern struct list_head pnp_global; +extern int pnp_platform_devices; + +/* multidevice card support */ +struct pnp_dev *pnp_request_card_device(struct pnp_card_link *clink, + const char *id, struct pnp_dev *from); +void pnp_release_card_device(struct pnp_dev *dev); +int pnp_register_card_driver(struct pnp_card_driver *drv); +void pnp_unregister_card_driver(struct pnp_card_driver *drv); +extern struct list_head pnp_cards; + +/* resource management */ +int pnp_possible_config(struct pnp_dev *dev, int type, resource_size_t base, + resource_size_t size); +int pnp_auto_config_dev(struct pnp_dev *dev); +int pnp_start_dev(struct pnp_dev *dev); +int pnp_stop_dev(struct pnp_dev *dev); +int pnp_activate_dev(struct pnp_dev *dev); +int pnp_disable_dev(struct pnp_dev *dev); +int pnp_range_reserved(resource_size_t start, resource_size_t end); + +/* protocol helpers */ +int pnp_is_active(struct pnp_dev *dev); +int compare_pnp_id(struct pnp_id *pos, const char *id); +int pnp_register_driver(struct pnp_driver *drv); +void pnp_unregister_driver(struct pnp_driver *drv); + +#else + +/* device management */ +static inline int pnp_device_attach(struct pnp_dev *pnp_dev) { return -ENODEV; } +static inline void pnp_device_detach(struct pnp_dev *pnp_dev) { } + +#define pnp_platform_devices 0 + +/* multidevice card support */ +static inline struct pnp_dev *pnp_request_card_device(struct pnp_card_link *clink, const char *id, struct pnp_dev *from) { return NULL; } +static inline void pnp_release_card_device(struct pnp_dev *dev) { } +static inline int pnp_register_card_driver(struct pnp_card_driver *drv) { return -ENODEV; } +static inline void pnp_unregister_card_driver(struct pnp_card_driver *drv) { } + +/* resource management */ +static inline int pnp_possible_config(struct pnp_dev *dev, int type, + resource_size_t base, + resource_size_t size) { return 0; } +static inline int pnp_auto_config_dev(struct pnp_dev *dev) { return -ENODEV; } +static inline int pnp_start_dev(struct pnp_dev *dev) { return -ENODEV; } +static inline int pnp_stop_dev(struct pnp_dev *dev) { return -ENODEV; } +static inline int pnp_activate_dev(struct pnp_dev *dev) { return -ENODEV; } +static inline int pnp_disable_dev(struct pnp_dev *dev) { return -ENODEV; } +static inline int pnp_range_reserved(resource_size_t start, resource_size_t end) { return 0;} + +/* protocol helpers */ +static inline int pnp_is_active(struct pnp_dev *dev) { return 0; } +static inline int compare_pnp_id(struct pnp_id *pos, const char *id) { return -ENODEV; } +static inline int pnp_register_driver(struct pnp_driver *drv) { return -ENODEV; } +static inline void pnp_unregister_driver(struct pnp_driver *drv) { } + +#endif /* CONFIG_PNP */ + +/** + * module_pnp_driver() - Helper macro for registering a PnP driver + * @__pnp_driver: pnp_driver struct + * + * Helper macro for PnP drivers which do not do anything special in module + * init/exit. This eliminates a lot of boilerplate. Each module may only + * use this macro once, and calling it replaces module_init() and module_exit() + */ +#define module_pnp_driver(__pnp_driver) \ + module_driver(__pnp_driver, pnp_register_driver, \ + pnp_unregister_driver) + +#endif /* _LINUX_PNP_H */ diff --git a/include/linux/poison.h b/include/linux/poison.h new file mode 100644 index 000000000..15927ebc2 --- /dev/null +++ b/include/linux/poison.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_POISON_H +#define _LINUX_POISON_H + +/********** include/linux/list.h **********/ + +/* + * Architectures might want to move the poison pointer offset + * into some well-recognized area such as 0xdead000000000000, + * that is also not mappable by user-space exploits: + */ +#ifdef CONFIG_ILLEGAL_POINTER_VALUE +# define POISON_POINTER_DELTA _AC(CONFIG_ILLEGAL_POINTER_VALUE, UL) +#else +# define POISON_POINTER_DELTA 0 +#endif + +/* + * These are non-NULL pointers that will result in page faults + * under normal circumstances, used to verify that nobody uses + * non-initialized list entries. + */ +#define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA) +#define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA) + +/********** include/linux/timer.h **********/ +/* + * Magic number "tsta" to indicate a static timer initializer + * for the object debugging code. + */ +#define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA) + +/********** mm/debug-pagealloc.c **********/ +#ifdef CONFIG_PAGE_POISONING_ZERO +#define PAGE_POISON 0x00 +#else +#define PAGE_POISON 0xaa +#endif + +/********** mm/page_alloc.c ************/ + +#define TAIL_MAPPING ((void *) 0x400 + POISON_POINTER_DELTA) + +/********** mm/slab.c **********/ +/* + * Magic nums for obj red zoning. + * Placed in the first word before and the first word after an obj. + */ +#define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */ +#define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */ + +#define SLUB_RED_INACTIVE 0xbb +#define SLUB_RED_ACTIVE 0xcc + +/* ...and for poisoning */ +#define POISON_INUSE 0x5a /* for use-uninitialised poisoning */ +#define POISON_FREE 0x6b /* for use-after-free poisoning */ +#define POISON_END 0xa5 /* end-byte of poisoning */ + +/********** arch/$ARCH/mm/init.c **********/ +#define POISON_FREE_INITMEM 0xcc + +/********** arch/ia64/hp/common/sba_iommu.c **********/ +/* + * arch/ia64/hp/common/sba_iommu.c uses a 16-byte poison string with a + * value of "SBAIOMMU POISON\0" for spill-over poisoning. + */ + +/********** fs/jbd/journal.c **********/ +#define JBD_POISON_FREE 0x5b +#define JBD2_POISON_FREE 0x5c + +/********** drivers/base/dmapool.c **********/ +#define POOL_POISON_FREED 0xa7 /* !inuse */ +#define POOL_POISON_ALLOCATED 0xa9 /* !initted */ + +/********** drivers/atm/ **********/ +#define ATM_POISON_FREE 0x12 +#define ATM_POISON 0xdeadbeef + +/********** kernel/mutexes **********/ +#define MUTEX_DEBUG_INIT 0x11 +#define MUTEX_DEBUG_FREE 0x22 +#define MUTEX_POISON_WW_CTX ((void *) 0x500 + POISON_POINTER_DELTA) + +/********** lib/flex_array.c **********/ +#define FLEX_ARRAY_FREE 0x6c /* for use-after-free poisoning */ + +/********** security/ **********/ +#define KEY_DESTROY 0xbd + +#endif diff --git a/include/linux/poll.h b/include/linux/poll.h new file mode 100644 index 000000000..1cdc32b1f --- /dev/null +++ b/include/linux/poll.h @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_POLL_H +#define _LINUX_POLL_H + + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern struct ctl_table epoll_table[]; /* for sysctl */ +/* ~832 bytes of stack space used max in sys_select/sys_poll before allocating + additional memory. */ +#ifdef __clang__ +#define MAX_STACK_ALLOC 768 +#else +#define MAX_STACK_ALLOC 832 +#endif +#define FRONTEND_STACK_ALLOC 256 +#define SELECT_STACK_ALLOC FRONTEND_STACK_ALLOC +#define POLL_STACK_ALLOC FRONTEND_STACK_ALLOC +#define WQUEUES_STACK_ALLOC (MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC) +#define N_INLINE_POLL_ENTRIES (WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry)) + +#define DEFAULT_POLLMASK (EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM) + +struct poll_table_struct; + +/* + * structures and helpers for f_op->poll implementations + */ +typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); + +/* + * Do not touch the structure directly, use the access functions + * poll_does_not_wait() and poll_requested_events() instead. + */ +typedef struct poll_table_struct { + poll_queue_proc _qproc; + __poll_t _key; +} poll_table; + +static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) +{ + if (p && p->_qproc && wait_address) + p->_qproc(filp, wait_address, p); +} + +/* + * Return true if it is guaranteed that poll will not wait. This is the case + * if the poll() of another file descriptor in the set got an event, so there + * is no need for waiting. + */ +static inline bool poll_does_not_wait(const poll_table *p) +{ + return p == NULL || p->_qproc == NULL; +} + +/* + * Return the set of events that the application wants to poll for. + * This is useful for drivers that need to know whether a DMA transfer has + * to be started implicitly on poll(). You typically only want to do that + * if the application is actually polling for POLLIN and/or POLLOUT. + */ +static inline __poll_t poll_requested_events(const poll_table *p) +{ + return p ? p->_key : ~(__poll_t)0; +} + +static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) +{ + pt->_qproc = qproc; + pt->_key = ~(__poll_t)0; /* all events enabled */ +} + +static inline bool file_can_poll(struct file *file) +{ + return file->f_op->poll; +} + +static inline __poll_t vfs_poll(struct file *file, struct poll_table_struct *pt) +{ + if (unlikely(!file->f_op->poll)) + return DEFAULT_POLLMASK; + return file->f_op->poll(file, pt); +} + +struct poll_table_entry { + struct file *filp; + __poll_t key; + wait_queue_entry_t wait; + wait_queue_head_t *wait_address; +}; + +/* + * Structures and helpers for select/poll syscall + */ +struct poll_wqueues { + poll_table pt; + struct poll_table_page *table; + struct task_struct *polling_task; + int triggered; + int error; + int inline_index; + struct poll_table_entry inline_entries[N_INLINE_POLL_ENTRIES]; +}; + +extern void poll_initwait(struct poll_wqueues *pwq); +extern void poll_freewait(struct poll_wqueues *pwq); +extern u64 select_estimate_accuracy(struct timespec64 *tv); + +#define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1) + +extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, + fd_set __user *exp, struct timespec64 *end_time); + +extern int poll_select_set_timeout(struct timespec64 *to, time64_t sec, + long nsec); + +#define __MAP(v, from, to) \ + (from < to ? (v & from) * (to/from) : (v & from) / (from/to)) + +static inline __u16 mangle_poll(__poll_t val) +{ + __u16 v = (__force __u16)val; +#define M(X) __MAP(v, (__force __u16)EPOLL##X, POLL##X) + return M(IN) | M(OUT) | M(PRI) | M(ERR) | M(NVAL) | + M(RDNORM) | M(RDBAND) | M(WRNORM) | M(WRBAND) | + M(HUP) | M(RDHUP) | M(MSG); +#undef M +} + +static inline __poll_t demangle_poll(u16 val) +{ +#define M(X) (__force __poll_t)__MAP(val, POLL##X, (__force __u16)EPOLL##X) + return M(IN) | M(OUT) | M(PRI) | M(ERR) | M(NVAL) | + M(RDNORM) | M(RDBAND) | M(WRNORM) | M(WRBAND) | + M(HUP) | M(RDHUP) | M(MSG); +#undef M +} +#undef __MAP + + +#endif /* _LINUX_POLL_H */ diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h new file mode 100644 index 000000000..03cb1f21b --- /dev/null +++ b/include/linux/posix-clock.h @@ -0,0 +1,132 @@ +/* + * posix-clock.h - support for dynamic clock devices + * + * Copyright (C) 2010 OMICRON electronics GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#ifndef _LINUX_POSIX_CLOCK_H_ +#define _LINUX_POSIX_CLOCK_H_ + +#include +#include +#include +#include +#include + +struct posix_clock; + +/** + * struct posix_clock_operations - functional interface to the clock + * + * Every posix clock is represented by a character device. Drivers may + * optionally offer extended capabilities by implementing the + * character device methods. The character device file operations are + * first handled by the clock device layer, then passed on to the + * driver by calling these functions. + * + * @owner: The clock driver should set to THIS_MODULE + * @clock_adjtime: Adjust the clock + * @clock_gettime: Read the current time + * @clock_getres: Get the clock resolution + * @clock_settime: Set the current time value + * @open: Optional character device open method + * @release: Optional character device release method + * @ioctl: Optional character device ioctl method + * @read: Optional character device read method + * @poll: Optional character device poll method + */ +struct posix_clock_operations { + struct module *owner; + + int (*clock_adjtime)(struct posix_clock *pc, struct timex *tx); + + int (*clock_gettime)(struct posix_clock *pc, struct timespec64 *ts); + + int (*clock_getres) (struct posix_clock *pc, struct timespec64 *ts); + + int (*clock_settime)(struct posix_clock *pc, + const struct timespec64 *ts); + + /* + * Optional character device methods: + */ + long (*ioctl) (struct posix_clock *pc, + unsigned int cmd, unsigned long arg); + + int (*open) (struct posix_clock *pc, fmode_t f_mode); + + __poll_t (*poll) (struct posix_clock *pc, + struct file *file, poll_table *wait); + + int (*release) (struct posix_clock *pc); + + ssize_t (*read) (struct posix_clock *pc, + uint flags, char __user *buf, size_t cnt); +}; + +/** + * struct posix_clock - represents a dynamic posix clock + * + * @ops: Functional interface to the clock + * @cdev: Character device instance for this clock + * @dev: Pointer to the clock's device. + * @rwsem: Protects the 'zombie' field from concurrent access. + * @zombie: If 'zombie' is true, then the hardware has disappeared. + * + * Drivers should embed their struct posix_clock within a private + * structure, obtaining a reference to it during callbacks using + * container_of(). + * + * Drivers should supply an initialized but not exposed struct device + * to posix_clock_register(). It is used to manage lifetime of the + * driver's private structure. It's 'release' field should be set to + * a release function for this private structure. + */ +struct posix_clock { + struct posix_clock_operations ops; + struct cdev cdev; + struct device *dev; + struct rw_semaphore rwsem; + bool zombie; +}; + +/** + * posix_clock_register() - register a new clock + * @clk: Pointer to the clock. Caller must provide 'ops' field + * @dev: Pointer to the initialized device. Caller must provide + * 'release' field + * + * A clock driver calls this function to register itself with the + * clock device subsystem. If 'clk' points to dynamically allocated + * memory, then the caller must provide a 'release' function to free + * that memory. + * + * Returns zero on success, non-zero otherwise. + */ +int posix_clock_register(struct posix_clock *clk, struct device *dev); + +/** + * posix_clock_unregister() - unregister a clock + * @clk: Clock instance previously registered via posix_clock_register() + * + * A clock driver calls this function to remove itself from the clock + * device subsystem. The posix_clock itself will remain (in an + * inactive state) until its reference count drops to zero, at which + * point it will be deallocated with its 'release' method. + */ +void posix_clock_unregister(struct posix_clock *clk); + +#endif diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h new file mode 100644 index 000000000..ee7e987ea --- /dev/null +++ b/include/linux/posix-timers.h @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _linux_POSIX_TIMERS_H +#define _linux_POSIX_TIMERS_H + +#include +#include +#include +#include +#include + +struct siginfo; + +struct cpu_timer_list { + struct list_head entry; + u64 expires, incr; + struct task_struct *task; + int firing; +}; + +/* + * Bit fields within a clockid: + * + * The most significant 29 bits hold either a pid or a file descriptor. + * + * Bit 2 indicates whether a cpu clock refers to a thread or a process. + * + * Bits 1 and 0 give the type: PROF=0, VIRT=1, SCHED=2, or FD=3. + * + * A clockid is invalid if bits 2, 1, and 0 are all set. + */ +#define CPUCLOCK_PID(clock) ((pid_t) ~((clock) >> 3)) +#define CPUCLOCK_PERTHREAD(clock) \ + (((clock) & (clockid_t) CPUCLOCK_PERTHREAD_MASK) != 0) + +#define CPUCLOCK_PERTHREAD_MASK 4 +#define CPUCLOCK_WHICH(clock) ((clock) & (clockid_t) CPUCLOCK_CLOCK_MASK) +#define CPUCLOCK_CLOCK_MASK 3 +#define CPUCLOCK_PROF 0 +#define CPUCLOCK_VIRT 1 +#define CPUCLOCK_SCHED 2 +#define CPUCLOCK_MAX 3 +#define CLOCKFD CPUCLOCK_MAX +#define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK) + +static inline clockid_t make_process_cpuclock(const unsigned int pid, + const clockid_t clock) +{ + return ((~pid) << 3) | clock; +} +static inline clockid_t make_thread_cpuclock(const unsigned int tid, + const clockid_t clock) +{ + return make_process_cpuclock(tid, clock | CPUCLOCK_PERTHREAD_MASK); +} + +static inline clockid_t fd_to_clockid(const int fd) +{ + return make_process_cpuclock((unsigned int) fd, CLOCKFD); +} + +static inline int clockid_to_fd(const clockid_t clk) +{ + return ~(clk >> 3); +} + +#define REQUEUE_PENDING 1 + +/** + * struct k_itimer - POSIX.1b interval timer structure. + * @list: List head for binding the timer to signals->posix_timers + * @t_hash: Entry in the posix timer hash table + * @it_lock: Lock protecting the timer + * @kclock: Pointer to the k_clock struct handling this timer + * @it_clock: The posix timer clock id + * @it_id: The posix timer id for identifying the timer + * @it_active: Marker that timer is active + * @it_overrun: The overrun counter for pending signals + * @it_overrun_last: The overrun at the time of the last delivered signal + * @it_requeue_pending: Indicator that timer waits for being requeued on + * signal delivery + * @it_sigev_notify: The notify word of sigevent struct for signal delivery + * @it_interval: The interval for periodic timers + * @it_signal: Pointer to the creators signal struct + * @it_pid: The pid of the process/task targeted by the signal + * @it_process: The task to wakeup on clock_nanosleep (CPU timers) + * @sigq: Pointer to preallocated sigqueue + * @it: Union representing the various posix timer type + * internals. Also used for rcu freeing the timer. + */ +struct k_itimer { + struct list_head list; + struct hlist_node t_hash; + spinlock_t it_lock; + const struct k_clock *kclock; + clockid_t it_clock; + timer_t it_id; + int it_active; + s64 it_overrun; + s64 it_overrun_last; + int it_requeue_pending; + int it_sigev_notify; + ktime_t it_interval; + struct signal_struct *it_signal; + union { + struct pid *it_pid; + struct task_struct *it_process; + }; + struct sigqueue *sigq; + union { + struct { + struct hrtimer timer; + } real; + struct cpu_timer_list cpu; + struct { + struct alarm alarmtimer; + } alarm; + struct rcu_head rcu; + } it; +}; + +void run_posix_cpu_timers(struct task_struct *task); +void posix_cpu_timers_exit(struct task_struct *task); +void posix_cpu_timers_exit_group(struct task_struct *task); +void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx, + u64 *newval, u64 *oldval); + +void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new); + +void posixtimer_rearm(struct siginfo *info); +#endif diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h new file mode 100644 index 000000000..540595a32 --- /dev/null +++ b/include/linux/posix_acl.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + File: linux/posix_acl.h + + (C) 2002 Andreas Gruenbacher, +*/ + + +#ifndef __LINUX_POSIX_ACL_H +#define __LINUX_POSIX_ACL_H + +#include +#include +#include +#include +#include + +struct posix_acl_entry { + short e_tag; + unsigned short e_perm; + union { + kuid_t e_uid; + kgid_t e_gid; + }; +}; + +struct posix_acl { + refcount_t a_refcount; + struct rcu_head a_rcu; + unsigned int a_count; + struct posix_acl_entry a_entries[0]; +}; + +#define FOREACH_ACL_ENTRY(pa, acl, pe) \ + for(pa=(acl)->a_entries, pe=pa+(acl)->a_count; paa_refcount); + return acl; +} + +/* + * Free an ACL handle. + */ +static inline void +posix_acl_release(struct posix_acl *acl) +{ + if (acl && refcount_dec_and_test(&acl->a_refcount)) + kfree_rcu(acl, a_rcu); +} + + +/* posix_acl.c */ + +extern void posix_acl_init(struct posix_acl *, int); +extern struct posix_acl *posix_acl_alloc(int, gfp_t); +extern int posix_acl_valid(struct user_namespace *, const struct posix_acl *); +extern int posix_acl_permission(struct inode *, const struct posix_acl *, int); +extern struct posix_acl *posix_acl_from_mode(umode_t, gfp_t); +extern int posix_acl_equiv_mode(const struct posix_acl *, umode_t *); +extern int __posix_acl_create(struct posix_acl **, gfp_t, umode_t *); +extern int __posix_acl_chmod(struct posix_acl **, gfp_t, umode_t); + +extern struct posix_acl *get_posix_acl(struct inode *, int); +extern int set_posix_acl(struct inode *, int, struct posix_acl *); + +#ifdef CONFIG_FS_POSIX_ACL +extern int posix_acl_chmod(struct inode *, umode_t); +extern int posix_acl_create(struct inode *, umode_t *, struct posix_acl **, + struct posix_acl **); +extern int posix_acl_update_mode(struct inode *, umode_t *, struct posix_acl **); + +extern int simple_set_acl(struct inode *, struct posix_acl *, int); +extern int simple_acl_create(struct inode *, struct inode *); + +struct posix_acl *get_cached_acl(struct inode *inode, int type); +struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type); +void set_cached_acl(struct inode *inode, int type, struct posix_acl *acl); +void forget_cached_acl(struct inode *inode, int type); +void forget_all_cached_acls(struct inode *inode); + +static inline void cache_no_acl(struct inode *inode) +{ + inode->i_acl = NULL; + inode->i_default_acl = NULL; +} +#else +static inline int posix_acl_chmod(struct inode *inode, umode_t mode) +{ + return 0; +} + +#define simple_set_acl NULL + +static inline int simple_acl_create(struct inode *dir, struct inode *inode) +{ + return 0; +} +static inline void cache_no_acl(struct inode *inode) +{ +} + +static inline int posix_acl_create(struct inode *inode, umode_t *mode, + struct posix_acl **default_acl, struct posix_acl **acl) +{ + *default_acl = *acl = NULL; + return 0; +} + +static inline void forget_all_cached_acls(struct inode *inode) +{ +} +#endif /* CONFIG_FS_POSIX_ACL */ + +struct posix_acl *get_acl(struct inode *inode, int type); + +#endif /* __LINUX_POSIX_ACL_H */ diff --git a/include/linux/posix_acl_xattr.h b/include/linux/posix_acl_xattr.h new file mode 100644 index 000000000..238770999 --- /dev/null +++ b/include/linux/posix_acl_xattr.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + File: linux/posix_acl_xattr.h + + Extended attribute system call representation of Access Control Lists. + + Copyright (C) 2000 by Andreas Gruenbacher + Copyright (C) 2002 SGI - Silicon Graphics, Inc + */ +#ifndef _POSIX_ACL_XATTR_H +#define _POSIX_ACL_XATTR_H + +#include +#include +#include + +static inline size_t +posix_acl_xattr_size(int count) +{ + return (sizeof(struct posix_acl_xattr_header) + + (count * sizeof(struct posix_acl_xattr_entry))); +} + +static inline int +posix_acl_xattr_count(size_t size) +{ + if (size < sizeof(struct posix_acl_xattr_header)) + return -1; + size -= sizeof(struct posix_acl_xattr_header); + if (size % sizeof(struct posix_acl_xattr_entry)) + return -1; + return size / sizeof(struct posix_acl_xattr_entry); +} + +#ifdef CONFIG_FS_POSIX_ACL +void posix_acl_fix_xattr_from_user(void *value, size_t size); +void posix_acl_fix_xattr_to_user(void *value, size_t size); +#else +static inline void posix_acl_fix_xattr_from_user(void *value, size_t size) +{ +} +static inline void posix_acl_fix_xattr_to_user(void *value, size_t size) +{ +} +#endif + +struct posix_acl *posix_acl_from_xattr(struct user_namespace *user_ns, + const void *value, size_t size); +int posix_acl_to_xattr(struct user_namespace *user_ns, + const struct posix_acl *acl, void *buffer, size_t size); + +extern const struct xattr_handler posix_acl_access_xattr_handler; +extern const struct xattr_handler posix_acl_default_xattr_handler; + +#endif /* _POSIX_ACL_XATTR_H */ diff --git a/include/linux/power/ab8500.h b/include/linux/power/ab8500.h new file mode 100644 index 000000000..cdbb6c2a8 --- /dev/null +++ b/include/linux/power/ab8500.h @@ -0,0 +1,16 @@ +/* + * Copyright (C) ST-Ericsson 2013 + * Author: Hongbo Zhang + * License terms: GNU General Public License v2 + */ + +#ifndef PWR_AB8500_H +#define PWR_AB8500_H + +extern const struct abx500_res_to_temp ab8500_temp_tbl_a_thermistor[]; +extern const int ab8500_temp_tbl_a_size; + +extern const struct abx500_res_to_temp ab8500_temp_tbl_b_thermistor[]; +extern const int ab8500_temp_tbl_b_size; + +#endif /* PWR_AB8500_H */ diff --git a/include/linux/power/bq2415x_charger.h b/include/linux/power/bq2415x_charger.h new file mode 100644 index 000000000..50762af8b --- /dev/null +++ b/include/linux/power/bq2415x_charger.h @@ -0,0 +1,58 @@ +/* + * bq2415x charger driver + * + * Copyright (C) 2011-2013 Pali Rohár + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef BQ2415X_CHARGER_H +#define BQ2415X_CHARGER_H + +/* + * This is platform data for bq2415x chip. It contains default board + * voltages and currents which can be also later configured via sysfs. If + * value is -1 then default chip value (specified in datasheet) will be + * used. + * + * Value resistor_sense is needed for for configuring charge and + * termination current. It it is less or equal to zero, configuring charge + * and termination current will not be possible. + * + * For automode support is needed to provide name of power supply device + * in value notify_device. Device driver must immediately report property + * POWER_SUPPLY_PROP_CURRENT_MAX when current changed. + */ + +/* Supported modes with maximal current limit */ +enum bq2415x_mode { + BQ2415X_MODE_OFF, /* offline mode (charger disabled) */ + BQ2415X_MODE_NONE, /* unknown charger (100mA) */ + BQ2415X_MODE_HOST_CHARGER, /* usb host/hub charger (500mA) */ + BQ2415X_MODE_DEDICATED_CHARGER, /* dedicated charger (unlimited) */ + BQ2415X_MODE_BOOST, /* boost mode (charging disabled) */ +}; + +struct bq2415x_platform_data { + int current_limit; /* mA */ + int weak_battery_voltage; /* mV */ + int battery_regulation_voltage; /* mV */ + int charge_current; /* mA */ + int termination_current; /* mA */ + int resistor_sense; /* m ohm */ + const char *notify_device; /* name */ +}; + +#endif diff --git a/include/linux/power/bq24190_charger.h b/include/linux/power/bq24190_charger.h new file mode 100644 index 000000000..45ce7f116 --- /dev/null +++ b/include/linux/power/bq24190_charger.h @@ -0,0 +1,18 @@ +/* + * Platform data for the TI bq24190 battery charger driver. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _BQ24190_CHARGER_H_ +#define _BQ24190_CHARGER_H_ + +#include + +struct bq24190_platform_data { + const struct regulator_init_data *regulator_init_data; +}; + +#endif diff --git a/include/linux/power/bq24735-charger.h b/include/linux/power/bq24735-charger.h new file mode 100644 index 000000000..b04be59f9 --- /dev/null +++ b/include/linux/power/bq24735-charger.h @@ -0,0 +1,37 @@ +/* + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __CHARGER_BQ24735_H_ +#define __CHARGER_BQ24735_H_ + +#include +#include + +struct bq24735_platform { + uint32_t charge_current; + uint32_t charge_voltage; + uint32_t input_current; + + const char *name; + + bool ext_control; + + char **supplied_to; + size_t num_supplicants; +}; + +#endif /* __CHARGER_BQ24735_H_ */ diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h new file mode 100644 index 000000000..13d5dd4eb --- /dev/null +++ b/include/linux/power/bq27xxx_battery.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_BQ27X00_BATTERY_H__ +#define __LINUX_BQ27X00_BATTERY_H__ + +enum bq27xxx_chip { + BQ27000 = 1, /* bq27000, bq27200 */ + BQ27010, /* bq27010, bq27210 */ + BQ2750X, /* bq27500 deprecated alias */ + BQ2751X, /* bq27510, bq27520 deprecated alias */ + BQ2752X, + BQ27500, /* bq27500/1 */ + BQ27510G1, /* bq27510G1 */ + BQ27510G2, /* bq27510G2 */ + BQ27510G3, /* bq27510G3 */ + BQ27520G1, /* bq27520G1 */ + BQ27520G2, /* bq27520G2 */ + BQ27520G3, /* bq27520G3 */ + BQ27520G4, /* bq27520G4 */ + BQ27521, /* bq27521 */ + BQ27530, /* bq27530, bq27531 */ + BQ27531, + BQ27541, /* bq27541, bq27542, bq27546, bq27742 */ + BQ27542, + BQ27546, + BQ27742, + BQ27545, /* bq27545 */ + BQ27421, /* bq27421, bq27441, bq27621 */ + BQ27425, + BQ27426, + BQ27441, + BQ27621, +}; + +struct bq27xxx_device_info; +struct bq27xxx_access_methods { + int (*read)(struct bq27xxx_device_info *di, u8 reg, bool single); + int (*write)(struct bq27xxx_device_info *di, u8 reg, int value, bool single); + int (*read_bulk)(struct bq27xxx_device_info *di, u8 reg, u8 *data, int len); + int (*write_bulk)(struct bq27xxx_device_info *di, u8 reg, u8 *data, int len); +}; + +struct bq27xxx_reg_cache { + int temperature; + int time_to_empty; + int time_to_empty_avg; + int time_to_full; + int charge_full; + int cycle_count; + int capacity; + int energy; + int flags; + int health; +}; + +struct bq27xxx_device_info { + struct device *dev; + int id; + enum bq27xxx_chip chip; + u32 opts; + const char *name; + struct bq27xxx_dm_reg *dm_regs; + u32 unseal_key; + struct bq27xxx_access_methods bus; + struct bq27xxx_reg_cache cache; + int charge_design_full; + unsigned long last_update; + struct delayed_work work; + struct power_supply *bat; + struct list_head list; + struct mutex lock; + u8 *regs; +}; + +void bq27xxx_battery_update(struct bq27xxx_device_info *di); +int bq27xxx_battery_setup(struct bq27xxx_device_info *di); +void bq27xxx_battery_teardown(struct bq27xxx_device_info *di); + +#endif diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h new file mode 100644 index 000000000..c4fa907c8 --- /dev/null +++ b/include/linux/power/charger-manager.h @@ -0,0 +1,259 @@ +/* + * Copyright (C) 2011 Samsung Electronics Co., Ltd. + * MyungJoo.Ham + * + * Charger Manager. + * This framework enables to control and multiple chargers and to + * monitor charging even in the context of suspend-to-RAM with + * an interface combining the chargers. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +**/ + +#ifndef _CHARGER_MANAGER_H +#define _CHARGER_MANAGER_H + +#include +#include +#include + +enum data_source { + CM_BATTERY_PRESENT, + CM_NO_BATTERY, + CM_FUEL_GAUGE, + CM_CHARGER_STAT, +}; + +enum polling_modes { + CM_POLL_DISABLE = 0, + CM_POLL_ALWAYS, + CM_POLL_EXTERNAL_POWER_ONLY, + CM_POLL_CHARGING_ONLY, +}; + +enum cm_event_types { + CM_EVENT_UNKNOWN = 0, + CM_EVENT_BATT_FULL, + CM_EVENT_BATT_IN, + CM_EVENT_BATT_OUT, + CM_EVENT_BATT_OVERHEAT, + CM_EVENT_BATT_COLD, + CM_EVENT_EXT_PWR_IN_OUT, + CM_EVENT_CHG_START_STOP, + CM_EVENT_OTHERS, +}; + +/** + * struct charger_cable + * @extcon_name: the name of extcon device. + * @name: the name of charger cable(external connector). + * @extcon_dev: the extcon device. + * @wq: the workqueue to control charger according to the state of + * charger cable. If charger cable is attached, enable charger. + * But if charger cable is detached, disable charger. + * @nb: the notifier block to receive changed state from EXTCON + * (External Connector) when charger cable is attached/detached. + * @attached: the state of charger cable. + * true: the charger cable is attached + * false: the charger cable is detached + * @charger: the instance of struct charger_regulator. + * @cm: the Charger Manager representing the battery. + */ +struct charger_cable { + const char *extcon_name; + const char *name; + + /* The charger-manager use Extcon framework */ + struct extcon_specific_cable_nb extcon_dev; + struct work_struct wq; + struct notifier_block nb; + + /* The state of charger cable */ + bool attached; + + struct charger_regulator *charger; + + /* + * Set min/max current of regulator to protect over-current issue + * according to a kind of charger cable when cable is attached. + */ + int min_uA; + int max_uA; + + struct charger_manager *cm; +}; + +/** + * struct charger_regulator + * @regulator_name: the name of regulator for using charger. + * @consumer: the regulator consumer for the charger. + * @externally_control: + * Set if the charger-manager cannot control charger, + * the charger will be maintained with disabled state. + * @cables: + * the array of charger cables to enable/disable charger + * and set current limit according to constraint data of + * struct charger_cable if only charger cable included + * in the array of charger cables is attached/detached. + * @num_cables: the number of charger cables. + * @attr_g: Attribute group for the charger(regulator) + * @attr_name: "name" sysfs entry + * @attr_state: "state" sysfs entry + * @attr_externally_control: "externally_control" sysfs entry + * @attrs: Arrays pointing to attr_name/state/externally_control for attr_g + */ +struct charger_regulator { + /* The name of regulator for charging */ + const char *regulator_name; + struct regulator *consumer; + + /* charger never on when system is on */ + int externally_control; + + /* + * Store constraint information related to current limit, + * each cable have different condition for charging. + */ + struct charger_cable *cables; + int num_cables; + + struct attribute_group attr_g; + struct device_attribute attr_name; + struct device_attribute attr_state; + struct device_attribute attr_externally_control; + struct attribute *attrs[4]; + + struct charger_manager *cm; +}; + +/** + * struct charger_desc + * @psy_name: the name of power-supply-class for charger manager + * @polling_mode: + * Determine which polling mode will be used + * @fullbatt_vchkdrop_ms: + * @fullbatt_vchkdrop_uV: + * Check voltage drop after the battery is fully charged. + * If it has dropped more than fullbatt_vchkdrop_uV after + * fullbatt_vchkdrop_ms, CM will restart charging. + * @fullbatt_uV: voltage in microvolt + * If VBATT >= fullbatt_uV, it is assumed to be full. + * @fullbatt_soc: state of Charge in % + * If state of Charge >= fullbatt_soc, it is assumed to be full. + * @fullbatt_full_capacity: full capacity measure + * If full capacity of battery >= fullbatt_full_capacity, + * it is assumed to be full. + * @polling_interval_ms: interval in millisecond at which + * charger manager will monitor battery health + * @battery_present: + * Specify where information for existence of battery can be obtained + * @psy_charger_stat: the names of power-supply for chargers + * @num_charger_regulator: the number of entries in charger_regulators + * @charger_regulators: array of charger regulators + * @psy_fuel_gauge: the name of power-supply for fuel gauge + * @thermal_zone : the name of thermal zone for battery + * @temp_min : Minimum battery temperature for charging. + * @temp_max : Maximum battery temperature for charging. + * @temp_diff : Temperature difference to restart charging. + * @measure_battery_temp: + * true: measure battery temperature + * false: measure ambient temperature + * @charging_max_duration_ms: Maximum possible duration for charging + * If whole charging duration exceed 'charging_max_duration_ms', + * cm stop charging. + * @discharging_max_duration_ms: + * Maximum possible duration for discharging with charger cable + * after full-batt. If discharging duration exceed 'discharging + * max_duration_ms', cm start charging. + */ +struct charger_desc { + const char *psy_name; + + enum polling_modes polling_mode; + unsigned int polling_interval_ms; + + unsigned int fullbatt_vchkdrop_ms; + unsigned int fullbatt_vchkdrop_uV; + unsigned int fullbatt_uV; + unsigned int fullbatt_soc; + unsigned int fullbatt_full_capacity; + + enum data_source battery_present; + + const char **psy_charger_stat; + + int num_charger_regulators; + struct charger_regulator *charger_regulators; + + const char *psy_fuel_gauge; + + const char *thermal_zone; + + int temp_min; + int temp_max; + int temp_diff; + + bool measure_battery_temp; + + u32 charging_max_duration_ms; + u32 discharging_max_duration_ms; +}; + +#define PSY_NAME_MAX 30 + +/** + * struct charger_manager + * @entry: entry for list + * @dev: device pointer + * @desc: instance of charger_desc + * @fuel_gauge: power_supply for fuel gauge + * @charger_stat: array of power_supply for chargers + * @tzd_batt : thermal zone device for battery + * @charger_enabled: the state of charger + * @fullbatt_vchk_jiffies_at: + * jiffies at the time full battery check will occur. + * @fullbatt_vchk_work: work queue for full battery check + * @emergency_stop: + * When setting true, stop charging + * @psy_name_buf: the name of power-supply-class for charger manager + * @charger_psy: power_supply for charger manager + * @status_save_ext_pwr_inserted: + * saved status of external power before entering suspend-to-RAM + * @status_save_batt: + * saved status of battery before entering suspend-to-RAM + * @charging_start_time: saved start time of enabling charging + * @charging_end_time: saved end time of disabling charging + */ +struct charger_manager { + struct list_head entry; + struct device *dev; + struct charger_desc *desc; + +#ifdef CONFIG_THERMAL + struct thermal_zone_device *tzd_batt; +#endif + bool charger_enabled; + + unsigned long fullbatt_vchk_jiffies_at; + struct delayed_work fullbatt_vchk_work; + + int emergency_stop; + + char psy_name_buf[PSY_NAME_MAX + 1]; + struct power_supply_desc charger_psy_desc; + struct power_supply *charger_psy; + + u64 charging_start_time; + u64 charging_end_time; +}; + +#ifdef CONFIG_CHARGER_MANAGER +extern void cm_notify_event(struct power_supply *psy, + enum cm_event_types type, char *msg); +#else +static inline void cm_notify_event(struct power_supply *psy, + enum cm_event_types type, char *msg) { } +#endif +#endif /* _CHARGER_MANAGER_H */ diff --git a/include/linux/power/generic-adc-battery.h b/include/linux/power/generic-adc-battery.h new file mode 100644 index 000000000..b1ebe0853 --- /dev/null +++ b/include/linux/power/generic-adc-battery.h @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2012, Anish Kumar + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef GENERIC_ADC_BATTERY_H +#define GENERIC_ADC_BATTERY_H + +/** + * struct gab_platform_data - platform_data for generic adc iio battery driver. + * @battery_info: recommended structure to specify static power supply + * parameters + * @cal_charge: calculate charge level. + * @gpio_charge_finished: gpio for the charger. + * @gpio_inverted: Should be 1 if the GPIO is active low otherwise 0 + * @jitter_delay: delay required after the interrupt to check battery + * status.Default set is 10ms. + */ +struct gab_platform_data { + struct power_supply_info battery_info; + int (*cal_charge)(long value); + int gpio_charge_finished; + bool gpio_inverted; + int jitter_delay; +}; + +#endif /* GENERIC_ADC_BATTERY_H */ diff --git a/include/linux/power/gpio-charger.h b/include/linux/power/gpio-charger.h new file mode 100644 index 000000000..de1dfe09a --- /dev/null +++ b/include/linux/power/gpio-charger.h @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2010, Lars-Peter Clausen + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef __LINUX_POWER_GPIO_CHARGER_H__ +#define __LINUX_POWER_GPIO_CHARGER_H__ + +#include +#include + +/** + * struct gpio_charger_platform_data - platform_data for gpio_charger devices + * @name: Name for the chargers power_supply device + * @type: Type of the charger + * @gpio: GPIO which is used to indicate the chargers status + * @gpio_active_low: Should be set to 1 if the GPIO is active low otherwise 0 + * @supplied_to: Array of battery names to which this chargers supplies power + * @num_supplicants: Number of entries in the supplied_to array + */ +struct gpio_charger_platform_data { + const char *name; + enum power_supply_type type; + + int gpio; + int gpio_active_low; + + char **supplied_to; + size_t num_supplicants; +}; + +#endif diff --git a/include/linux/power/isp1704_charger.h b/include/linux/power/isp1704_charger.h new file mode 100644 index 000000000..0105d9e7a --- /dev/null +++ b/include/linux/power/isp1704_charger.h @@ -0,0 +1,30 @@ +/* + * ISP1704 USB Charger Detection driver + * + * Copyright (C) 2011 Nokia Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + + +#ifndef __ISP1704_CHARGER_H +#define __ISP1704_CHARGER_H + +struct isp1704_charger_data { + void (*set_power)(bool on); + int enable_gpio; +}; + +#endif diff --git a/include/linux/power/jz4740-battery.h b/include/linux/power/jz4740-battery.h new file mode 100644 index 000000000..19c9610c7 --- /dev/null +++ b/include/linux/power/jz4740-battery.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2009, Jiejing Zhang + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef __JZ4740_BATTERY_H +#define __JZ4740_BATTERY_H + +struct jz_battery_platform_data { + struct power_supply_info info; + int gpio_charge; /* GPIO port of Charger state */ + int gpio_charge_active_low; +}; + +#endif diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h new file mode 100644 index 000000000..86e5ad8ae --- /dev/null +++ b/include/linux/power/max17042_battery.h @@ -0,0 +1,231 @@ +/* + * Fuel gauge driver for Maxim 17042 / 8966 / 8997 + * Note that Maxim 8966 and 8997 are mfd and this is its subdevice. + * + * Copyright (C) 2011 Samsung Electronics + * MyungJoo Ham + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __MAX17042_BATTERY_H_ +#define __MAX17042_BATTERY_H_ + +#define MAX17042_STATUS_BattAbsent (1 << 3) +#define MAX17042_BATTERY_FULL (95) /* Recommend. FullSOCThr value */ +#define MAX17042_DEFAULT_SNS_RESISTOR (10000) +#define MAX17042_DEFAULT_VMIN (3000) +#define MAX17042_DEFAULT_VMAX (4500) /* LiHV cell max */ +#define MAX17042_DEFAULT_TEMP_MIN (0) /* For sys without temp sensor */ +#define MAX17042_DEFAULT_TEMP_MAX (700) /* 70 degrees Celcius */ + +/* Consider RepCap which is less then 10 units below FullCAP full */ +#define MAX17042_FULL_THRESHOLD 10 + +#define MAX17042_CHARACTERIZATION_DATA_SIZE 48 + +enum max17042_register { + MAX17042_STATUS = 0x00, + MAX17042_VALRT_Th = 0x01, + MAX17042_TALRT_Th = 0x02, + MAX17042_SALRT_Th = 0x03, + MAX17042_AtRate = 0x04, + MAX17042_RepCap = 0x05, + MAX17042_RepSOC = 0x06, + MAX17042_Age = 0x07, + MAX17042_TEMP = 0x08, + MAX17042_VCELL = 0x09, + MAX17042_Current = 0x0A, + MAX17042_AvgCurrent = 0x0B, + + MAX17042_SOC = 0x0D, + MAX17042_AvSOC = 0x0E, + MAX17042_RemCap = 0x0F, + MAX17042_FullCAP = 0x10, + MAX17042_TTE = 0x11, + MAX17042_V_empty = 0x12, + + MAX17042_RSLOW = 0x14, + + MAX17042_AvgTA = 0x16, + MAX17042_Cycles = 0x17, + MAX17042_DesignCap = 0x18, + MAX17042_AvgVCELL = 0x19, + MAX17042_MinMaxTemp = 0x1A, + MAX17042_MinMaxVolt = 0x1B, + MAX17042_MinMaxCurr = 0x1C, + MAX17042_CONFIG = 0x1D, + MAX17042_ICHGTerm = 0x1E, + MAX17042_AvCap = 0x1F, + MAX17042_ManName = 0x20, + MAX17042_DevName = 0x21, + + MAX17042_FullCAPNom = 0x23, + MAX17042_TempNom = 0x24, + MAX17042_TempLim = 0x25, + MAX17042_TempHot = 0x26, + MAX17042_AIN = 0x27, + MAX17042_LearnCFG = 0x28, + MAX17042_FilterCFG = 0x29, + MAX17042_RelaxCFG = 0x2A, + MAX17042_MiscCFG = 0x2B, + MAX17042_TGAIN = 0x2C, + MAX17042_TOFF = 0x2D, + MAX17042_CGAIN = 0x2E, + MAX17042_COFF = 0x2F, + + MAX17042_MaskSOC = 0x32, + MAX17042_SOC_empty = 0x33, + MAX17042_T_empty = 0x34, + + MAX17042_FullCAP0 = 0x35, + MAX17042_LAvg_empty = 0x36, + MAX17042_FCTC = 0x37, + MAX17042_RCOMP0 = 0x38, + MAX17042_TempCo = 0x39, + MAX17042_EmptyTempCo = 0x3A, + MAX17042_K_empty0 = 0x3B, + MAX17042_TaskPeriod = 0x3C, + MAX17042_FSTAT = 0x3D, + + MAX17042_SHDNTIMER = 0x3F, + + MAX17042_dQacc = 0x45, + MAX17042_dPacc = 0x46, + + MAX17042_VFSOC0 = 0x48, + + MAX17042_QH = 0x4D, + MAX17042_QL = 0x4E, + + MAX17042_VFSOC0Enable = 0x60, + MAX17042_MLOCKReg1 = 0x62, + MAX17042_MLOCKReg2 = 0x63, + + MAX17042_MODELChrTbl = 0x80, + + MAX17042_OCV = 0xEE, + + MAX17042_OCVInternal = 0xFB, + + MAX17042_VFSOC = 0xFF, +}; + +/* Registers specific to max17047/50 */ +enum max17047_register { + MAX17047_QRTbl00 = 0x12, + MAX17047_FullSOCThr = 0x13, + MAX17047_QRTbl10 = 0x22, + MAX17047_QRTbl20 = 0x32, + MAX17047_V_empty = 0x3A, + MAX17047_QRTbl30 = 0x42, +}; + +enum max170xx_chip_type { + MAXIM_DEVICE_TYPE_UNKNOWN = 0, + MAXIM_DEVICE_TYPE_MAX17042, + MAXIM_DEVICE_TYPE_MAX17047, + MAXIM_DEVICE_TYPE_MAX17050, + + MAXIM_DEVICE_TYPE_NUM +}; + +/* + * used for setting a register to a desired value + * addr : address for a register + * data : setting value for the register + */ +struct max17042_reg_data { + u8 addr; + u16 data; +}; + +struct max17042_config_data { + /* External current sense resistor value in milli-ohms */ + u32 cur_sense_val; + + /* A/D measurement */ + u16 tgain; /* 0x2C */ + u16 toff; /* 0x2D */ + u16 cgain; /* 0x2E */ + u16 coff; /* 0x2F */ + + /* Alert / Status */ + u16 valrt_thresh; /* 0x01 */ + u16 talrt_thresh; /* 0x02 */ + u16 soc_alrt_thresh; /* 0x03 */ + u16 config; /* 0x01D */ + u16 shdntimer; /* 0x03F */ + + /* App data */ + u16 full_soc_thresh; /* 0x13 */ + u16 design_cap; /* 0x18 */ + u16 ichgt_term; /* 0x1E */ + + /* MG3 config */ + u16 at_rate; /* 0x04 */ + u16 learn_cfg; /* 0x28 */ + u16 filter_cfg; /* 0x29 */ + u16 relax_cfg; /* 0x2A */ + u16 misc_cfg; /* 0x2B */ + u16 masksoc; /* 0x32 */ + + /* MG3 save and restore */ + u16 fullcap; /* 0x10 */ + u16 fullcapnom; /* 0x23 */ + u16 socempty; /* 0x33 */ + u16 lavg_empty; /* 0x36 */ + u16 dqacc; /* 0x45 */ + u16 dpacc; /* 0x46 */ + u16 qrtbl00; /* 0x12 */ + u16 qrtbl10; /* 0x22 */ + u16 qrtbl20; /* 0x32 */ + u16 qrtbl30; /* 0x42 */ + + /* Cell technology from power_supply.h */ + u16 cell_technology; + + /* Cell Data */ + u16 vempty; /* 0x12 */ + u16 temp_nom; /* 0x24 */ + u16 temp_lim; /* 0x25 */ + u16 fctc; /* 0x37 */ + u16 rcomp0; /* 0x38 */ + u16 tcompc0; /* 0x39 */ + u16 empty_tempco; /* 0x3A */ + u16 kempty0; /* 0x3B */ + u16 cell_char_tbl[MAX17042_CHARACTERIZATION_DATA_SIZE]; +} __packed; + +struct max17042_platform_data { + struct max17042_reg_data *init_data; + struct max17042_config_data *config_data; + int num_init_data; /* Number of enties in init_data array */ + bool enable_current_sense; + bool enable_por_init; /* Use POR init from Maxim appnote */ + + /* + * R_sns in micro-ohms. + * default 10000 (if r_sns = 0) as it is the recommended value by + * the datasheet although it can be changed by board designers. + */ + unsigned int r_sns; + int vmin; /* in millivolts */ + int vmax; /* in millivolts */ + int temp_min; /* in tenths of degree Celsius */ + int temp_max; /* in tenths of degree Celsius */ +}; + +#endif /* __MAX17042_BATTERY_H_ */ diff --git a/include/linux/power/max8903_charger.h b/include/linux/power/max8903_charger.h new file mode 100644 index 000000000..89d3f1cb3 --- /dev/null +++ b/include/linux/power/max8903_charger.h @@ -0,0 +1,57 @@ +/* + * max8903_charger.h - Maxim 8903 USB/Adapter Charger Driver + * + * Copyright (C) 2011 Samsung Electronics + * MyungJoo Ham + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#ifndef __MAX8903_CHARGER_H__ +#define __MAX8903_CHARGER_H__ + +struct max8903_pdata { + /* + * GPIOs + * cen, chg, flt, dcm and usus are optional. + * dok and uok are not optional depending on the status of + * dc_valid and usb_valid. + */ + int cen; /* Charger Enable input */ + int dok; /* DC(Adapter) Power OK output */ + int uok; /* USB Power OK output */ + int chg; /* Charger status output */ + int flt; /* Fault output */ + int dcm; /* Current-Limit Mode input (1: DC, 2: USB) */ + int usus; /* USB Suspend Input (1: suspended) */ + + /* + * DC(Adapter/TA) is wired + * When dc_valid is true, + * dok should be valid. + * + * At least one of dc_valid or usb_valid should be true. + */ + bool dc_valid; + /* + * USB is wired + * When usb_valid is true, + * uok should be valid. + */ + bool usb_valid; +}; + +#endif /* __MAX8903_CHARGER_H__ */ diff --git a/include/linux/power/sbs-battery.h b/include/linux/power/sbs-battery.h new file mode 100644 index 000000000..519b8b432 --- /dev/null +++ b/include/linux/power/sbs-battery.h @@ -0,0 +1,38 @@ +/* + * Gas Gauge driver for SBS Compliant Gas Gauges + * + * Copyright (c) 2010, NVIDIA Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __LINUX_POWER_SBS_BATTERY_H_ +#define __LINUX_POWER_SBS_BATTERY_H_ + +#include +#include + +/** + * struct sbs_platform_data - platform data for sbs devices + * @i2c_retry_count: # of times to retry on i2c IO failure + * @poll_retry_count: # of times to retry looking for new status after + * external change notification + */ +struct sbs_platform_data { + u32 i2c_retry_count; + u32 poll_retry_count; +}; + +#endif diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h new file mode 100644 index 000000000..37d9b70ed --- /dev/null +++ b/include/linux/power/smartreflex.h @@ -0,0 +1,329 @@ +/* + * OMAP Smartreflex Defines and Routines + * + * Author: Thara Gopinath + * + * Copyright (C) 2010 Texas Instruments, Inc. + * Thara Gopinath + * + * Copyright (C) 2008 Nokia Corporation + * Kalle Jokiniemi + * + * Copyright (C) 2007 Texas Instruments, Inc. + * Lesly A M + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __POWER_SMARTREFLEX_H +#define __POWER_SMARTREFLEX_H + +#include +#include +#include +#include + +/* + * Different Smartreflex IPs version. The v1 is the 65nm version used in + * OMAP3430. The v2 is the update for the 45nm version of the IP + * used in OMAP3630 and OMAP4430 + */ +#define SR_TYPE_V1 1 +#define SR_TYPE_V2 2 + +/* SMART REFLEX REG ADDRESS OFFSET */ +#define SRCONFIG 0x00 +#define SRSTATUS 0x04 +#define SENVAL 0x08 +#define SENMIN 0x0C +#define SENMAX 0x10 +#define SENAVG 0x14 +#define AVGWEIGHT 0x18 +#define NVALUERECIPROCAL 0x1c +#define SENERROR_V1 0x20 +#define ERRCONFIG_V1 0x24 +#define IRQ_EOI 0x20 +#define IRQSTATUS_RAW 0x24 +#define IRQSTATUS 0x28 +#define IRQENABLE_SET 0x2C +#define IRQENABLE_CLR 0x30 +#define SENERROR_V2 0x34 +#define ERRCONFIG_V2 0x38 + +/* Bit/Shift Positions */ + +/* SRCONFIG */ +#define SRCONFIG_ACCUMDATA_SHIFT 22 +#define SRCONFIG_SRCLKLENGTH_SHIFT 12 +#define SRCONFIG_SENNENABLE_V1_SHIFT 5 +#define SRCONFIG_SENPENABLE_V1_SHIFT 3 +#define SRCONFIG_SENNENABLE_V2_SHIFT 1 +#define SRCONFIG_SENPENABLE_V2_SHIFT 0 +#define SRCONFIG_CLKCTRL_SHIFT 0 + +#define SRCONFIG_ACCUMDATA_MASK (0x3ff << 22) + +#define SRCONFIG_SRENABLE BIT(11) +#define SRCONFIG_SENENABLE BIT(10) +#define SRCONFIG_ERRGEN_EN BIT(9) +#define SRCONFIG_MINMAXAVG_EN BIT(8) +#define SRCONFIG_DELAYCTRL BIT(2) + +/* AVGWEIGHT */ +#define AVGWEIGHT_SENPAVGWEIGHT_SHIFT 2 +#define AVGWEIGHT_SENNAVGWEIGHT_SHIFT 0 + +/* NVALUERECIPROCAL */ +#define NVALUERECIPROCAL_SENPGAIN_SHIFT 20 +#define NVALUERECIPROCAL_SENNGAIN_SHIFT 16 +#define NVALUERECIPROCAL_RNSENP_SHIFT 8 +#define NVALUERECIPROCAL_RNSENN_SHIFT 0 + +/* ERRCONFIG */ +#define ERRCONFIG_ERRWEIGHT_SHIFT 16 +#define ERRCONFIG_ERRMAXLIMIT_SHIFT 8 +#define ERRCONFIG_ERRMINLIMIT_SHIFT 0 + +#define SR_ERRWEIGHT_MASK (0x07 << 16) +#define SR_ERRMAXLIMIT_MASK (0xff << 8) +#define SR_ERRMINLIMIT_MASK (0xff << 0) + +#define ERRCONFIG_VPBOUNDINTEN_V1 BIT(31) +#define ERRCONFIG_VPBOUNDINTST_V1 BIT(30) +#define ERRCONFIG_MCUACCUMINTEN BIT(29) +#define ERRCONFIG_MCUACCUMINTST BIT(28) +#define ERRCONFIG_MCUVALIDINTEN BIT(27) +#define ERRCONFIG_MCUVALIDINTST BIT(26) +#define ERRCONFIG_MCUBOUNDINTEN BIT(25) +#define ERRCONFIG_MCUBOUNDINTST BIT(24) +#define ERRCONFIG_MCUDISACKINTEN BIT(23) +#define ERRCONFIG_VPBOUNDINTST_V2 BIT(23) +#define ERRCONFIG_MCUDISACKINTST BIT(22) +#define ERRCONFIG_VPBOUNDINTEN_V2 BIT(22) + +#define ERRCONFIG_STATUS_V1_MASK (ERRCONFIG_VPBOUNDINTST_V1 | \ + ERRCONFIG_MCUACCUMINTST | \ + ERRCONFIG_MCUVALIDINTST | \ + ERRCONFIG_MCUBOUNDINTST | \ + ERRCONFIG_MCUDISACKINTST) +/* IRQSTATUS */ +#define IRQSTATUS_MCUACCUMINT BIT(3) +#define IRQSTATUS_MCVALIDINT BIT(2) +#define IRQSTATUS_MCBOUNDSINT BIT(1) +#define IRQSTATUS_MCUDISABLEACKINT BIT(0) + +/* IRQENABLE_SET and IRQENABLE_CLEAR */ +#define IRQENABLE_MCUACCUMINT BIT(3) +#define IRQENABLE_MCUVALIDINT BIT(2) +#define IRQENABLE_MCUBOUNDSINT BIT(1) +#define IRQENABLE_MCUDISABLEACKINT BIT(0) + +/* Common Bit values */ + +#define SRCLKLENGTH_12MHZ_SYSCLK 0x3c +#define SRCLKLENGTH_13MHZ_SYSCLK 0x41 +#define SRCLKLENGTH_19MHZ_SYSCLK 0x60 +#define SRCLKLENGTH_26MHZ_SYSCLK 0x82 +#define SRCLKLENGTH_38MHZ_SYSCLK 0xC0 + +/* + * 3430 specific values. Maybe these should be passed from board file or + * pmic structures. + */ +#define OMAP3430_SR_ACCUMDATA 0x1f4 + +#define OMAP3430_SR1_SENPAVGWEIGHT 0x03 +#define OMAP3430_SR1_SENNAVGWEIGHT 0x03 + +#define OMAP3430_SR2_SENPAVGWEIGHT 0x01 +#define OMAP3430_SR2_SENNAVGWEIGHT 0x01 + +#define OMAP3430_SR_ERRWEIGHT 0x04 +#define OMAP3430_SR_ERRMAXLIMIT 0x02 + +enum sr_instance { + OMAP_SR_MPU, /* shared with iva on omap3 */ + OMAP_SR_CORE, + OMAP_SR_IVA, + OMAP_SR_NR, +}; + +struct omap_sr { + char *name; + struct list_head node; + struct platform_device *pdev; + struct omap_sr_nvalue_table *nvalue_table; + struct voltagedomain *voltdm; + struct dentry *dbg_dir; + unsigned int irq; + int srid; + int ip_type; + int nvalue_count; + bool autocomp_active; + u32 clk_length; + u32 err_weight; + u32 err_minlimit; + u32 err_maxlimit; + u32 accum_data; + u32 senn_avgweight; + u32 senp_avgweight; + u32 senp_mod; + u32 senn_mod; + void __iomem *base; +}; + +/** + * test_cond_timeout - busy-loop, testing a condition + * @cond: condition to test until it evaluates to true + * @timeout: maximum number of microseconds in the timeout + * @index: loop index (integer) + * + * Loop waiting for @cond to become true or until at least @timeout + * microseconds have passed. To use, define some integer @index in the + * calling code. After running, if @index == @timeout, then the loop has + * timed out. + * + * Copied from omap_test_timeout */ +#define sr_test_cond_timeout(cond, timeout, index) \ +({ \ + for (index = 0; index < timeout; index++) { \ + if (cond) \ + break; \ + udelay(1); \ + } \ +}) + +/** + * struct omap_sr_pmic_data - Strucutre to be populated by pmic code to pass + * pmic specific info to smartreflex driver + * + * @sr_pmic_init: API to initialize smartreflex on the PMIC side. + */ +struct omap_sr_pmic_data { + void (*sr_pmic_init) (void); +}; + +/** + * struct omap_smartreflex_dev_attr - Smartreflex Device attribute. + * + * @sensor_voltdm_name: Name of voltdomain of SR instance + */ +struct omap_smartreflex_dev_attr { + const char *sensor_voltdm_name; +}; + +/* + * The smart reflex driver supports CLASS1 CLASS2 and CLASS3 SR. + * The smartreflex class driver should pass the class type. + * Should be used to populate the class_type field of the + * omap_smartreflex_class_data structure. + */ +#define SR_CLASS1 0x1 +#define SR_CLASS2 0x2 +#define SR_CLASS3 0x3 + +/** + * struct omap_sr_class_data - Smartreflex class driver info + * + * @enable: API to enable a particular class smaartreflex. + * @disable: API to disable a particular class smartreflex. + * @configure: API to configure a particular class smartreflex. + * @notify: API to notify the class driver about an event in SR. + * Not needed for class3. + * @notify_flags: specify the events to be notified to the class driver + * @class_type: specify which smartreflex class. + * Can be used by the SR driver to take any class + * based decisions. + */ +struct omap_sr_class_data { + int (*enable)(struct omap_sr *sr); + int (*disable)(struct omap_sr *sr, int is_volt_reset); + int (*configure)(struct omap_sr *sr); + int (*notify)(struct omap_sr *sr, u32 status); + u8 notify_flags; + u8 class_type; +}; + +/** + * struct omap_sr_nvalue_table - Smartreflex n-target value info + * + * @efuse_offs: The offset of the efuse where n-target values are stored. + * @nvalue: The n-target value. + * @errminlimit: The value of the ERRMINLIMIT bitfield for this n-target + * @volt_nominal: microvolts DC that the VDD is initially programmed to + */ +struct omap_sr_nvalue_table { + u32 efuse_offs; + u32 nvalue; + u32 errminlimit; + unsigned long volt_nominal; +}; + +/** + * struct omap_sr_data - Smartreflex platform data. + * + * @name: instance name + * @ip_type: Smartreflex IP type. + * @senp_mod: SENPENABLE value of the sr CONFIG register + * @senn_mod: SENNENABLE value for sr CONFIG register + * @err_weight ERRWEIGHT value of the sr ERRCONFIG register + * @err_maxlimit ERRMAXLIMIT value of the sr ERRCONFIG register + * @accum_data ACCUMDATA value of the sr CONFIG register + * @senn_avgweight SENNAVGWEIGHT value of the sr AVGWEIGHT register + * @senp_avgweight SENPAVGWEIGHT value of the sr AVGWEIGHT register + * @nvalue_count: Number of distinct nvalues in the nvalue table + * @enable_on_init: whether this sr module needs to enabled at + * boot up or not. + * @nvalue_table: table containing the efuse offsets and nvalues + * corresponding to them. + * @voltdm: Pointer to the voltage domain associated with the SR + */ +struct omap_sr_data { + const char *name; + int ip_type; + u32 senp_mod; + u32 senn_mod; + u32 err_weight; + u32 err_maxlimit; + u32 accum_data; + u32 senn_avgweight; + u32 senp_avgweight; + int nvalue_count; + bool enable_on_init; + struct omap_sr_nvalue_table *nvalue_table; + struct voltagedomain *voltdm; +}; + + +extern struct omap_sr_data omap_sr_pdata[OMAP_SR_NR]; + +#ifdef CONFIG_POWER_AVS_OMAP + +/* Smartreflex module enable/disable interface */ +void omap_sr_enable(struct voltagedomain *voltdm); +void omap_sr_disable(struct voltagedomain *voltdm); +void omap_sr_disable_reset_volt(struct voltagedomain *voltdm); + +/* API to register the pmic specific data with the smartreflex driver. */ +void omap_sr_register_pmic(struct omap_sr_pmic_data *pmic_data); + +/* Smartreflex driver hooks to be called from Smartreflex class driver */ +int sr_enable(struct omap_sr *sr, unsigned long volt); +void sr_disable(struct omap_sr *sr); +int sr_configure_errgen(struct omap_sr *sr); +int sr_disable_errgen(struct omap_sr *sr); +int sr_configure_minmax(struct omap_sr *sr); + +/* API to register the smartreflex class driver with the smartreflex driver */ +int sr_register_class(struct omap_sr_class_data *class_data); +#else +static inline void omap_sr_enable(struct voltagedomain *voltdm) {} +static inline void omap_sr_disable(struct voltagedomain *voltdm) {} +static inline void omap_sr_disable_reset_volt( + struct voltagedomain *voltdm) {} +static inline void omap_sr_register_pmic( + struct omap_sr_pmic_data *pmic_data) {} +#endif +#endif diff --git a/include/linux/power/smb347-charger.h b/include/linux/power/smb347-charger.h new file mode 100644 index 000000000..b3cb20dab --- /dev/null +++ b/include/linux/power/smb347-charger.h @@ -0,0 +1,117 @@ +/* + * Summit Microelectronics SMB347 Battery Charger Driver + * + * Copyright (C) 2011, Intel Corporation + * + * Authors: Bruce E. Robertson + * Mika Westerberg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef SMB347_CHARGER_H +#define SMB347_CHARGER_H + +#include +#include + +enum { + /* use the default compensation method */ + SMB347_SOFT_TEMP_COMPENSATE_DEFAULT = -1, + + SMB347_SOFT_TEMP_COMPENSATE_NONE, + SMB347_SOFT_TEMP_COMPENSATE_CURRENT, + SMB347_SOFT_TEMP_COMPENSATE_VOLTAGE, +}; + +/* Use default factory programmed value for hard/soft temperature limit */ +#define SMB347_TEMP_USE_DEFAULT -273 + +/* + * Charging enable can be controlled by software (via i2c) by + * smb347-charger driver or by EN pin (active low/high). + */ +enum smb347_chg_enable { + SMB347_CHG_ENABLE_SW, + SMB347_CHG_ENABLE_PIN_ACTIVE_LOW, + SMB347_CHG_ENABLE_PIN_ACTIVE_HIGH, +}; + +/** + * struct smb347_charger_platform_data - platform data for SMB347 charger + * @battery_info: Information about the battery + * @max_charge_current: maximum current (in uA) the battery can be charged + * @max_charge_voltage: maximum voltage (in uV) the battery can be charged + * @pre_charge_current: current (in uA) to use in pre-charging phase + * @termination_current: current (in uA) used to determine when the + * charging cycle terminates + * @pre_to_fast_voltage: voltage (in uV) treshold used for transitioning to + * pre-charge to fast charge mode + * @mains_current_limit: maximum input current drawn from AC/DC input (in uA) + * @usb_hc_current_limit: maximum input high current (in uA) drawn from USB + * input + * @chip_temp_threshold: die temperature where device starts limiting charge + * current [%100 - %130] (in degree C) + * @soft_cold_temp_limit: soft cold temperature limit [%0 - %15] (in degree C), + * granularity is 5 deg C. + * @soft_hot_temp_limit: soft hot temperature limit [%40 - %55] (in degree C), + * granularity is 5 deg C. + * @hard_cold_temp_limit: hard cold temperature limit [%-5 - %10] (in degree C), + * granularity is 5 deg C. + * @hard_hot_temp_limit: hard hot temperature limit [%50 - %65] (in degree C), + * granularity is 5 deg C. + * @suspend_on_hard_temp_limit: suspend charging when hard limit is hit + * @soft_temp_limit_compensation: compensation method when soft temperature + * limit is hit + * @charge_current_compensation: current (in uA) for charging compensation + * current when temperature hits soft limits + * @use_mains: AC/DC input can be used + * @use_usb: USB input can be used + * @use_usb_otg: USB OTG output can be used (not implemented yet) + * @irq_gpio: GPIO number used for interrupts (%-1 if not used) + * @enable_control: how charging enable/disable is controlled + * (driver/pin controls) + * + * @use_main, @use_usb, and @use_usb_otg are means to enable/disable + * hardware support for these. This is useful when we want to have for + * example OTG charging controlled via OTG transceiver driver and not by + * the SMB347 hardware. + * + * Hard and soft temperature limit values are given as described in the + * device data sheet and assuming NTC beta value is %3750. Even if this is + * not the case, these values should be used. They can be mapped to the + * corresponding NTC beta values with the help of table %2 in the data + * sheet. So for example if NTC beta is %3375 and we want to program hard + * hot limit to be %53 deg C, @hard_hot_temp_limit should be set to %50. + * + * If zero value is given in any of the current and voltage values, the + * factory programmed default will be used. For soft/hard temperature + * values, pass in %SMB347_TEMP_USE_DEFAULT instead. + */ +struct smb347_charger_platform_data { + struct power_supply_info battery_info; + unsigned int max_charge_current; + unsigned int max_charge_voltage; + unsigned int pre_charge_current; + unsigned int termination_current; + unsigned int pre_to_fast_voltage; + unsigned int mains_current_limit; + unsigned int usb_hc_current_limit; + unsigned int chip_temp_threshold; + int soft_cold_temp_limit; + int soft_hot_temp_limit; + int hard_cold_temp_limit; + int hard_hot_temp_limit; + bool suspend_on_hard_temp_limit; + unsigned int soft_temp_limit_compensation; + unsigned int charge_current_compensation; + bool use_mains; + bool use_usb; + bool use_usb_otg; + int irq_gpio; + enum smb347_chg_enable enable_control; +}; + +#endif /* SMB347_CHARGER_H */ diff --git a/include/linux/power/twl4030_madc_battery.h b/include/linux/power/twl4030_madc_battery.h new file mode 100644 index 000000000..23110dc77 --- /dev/null +++ b/include/linux/power/twl4030_madc_battery.h @@ -0,0 +1,39 @@ +/* + * Dumb driver for LiIon batteries using TWL4030 madc. + * + * Copyright 2013 Golden Delicious Computers + * Nikolaus Schaller + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef __TWL4030_MADC_BATTERY_H +#define __TWL4030_MADC_BATTERY_H + +/* + * Usually we can assume 100% @ 4.15V and 0% @ 3.3V but curves differ for + * charging and discharging! + */ + +struct twl4030_madc_bat_calibration { + short voltage; /* in mV - specify -1 for end of list */ + short level; /* in percent (0 .. 100%) */ +}; + +struct twl4030_madc_bat_platform_data { + unsigned int capacity; /* total capacity in uAh */ + struct twl4030_madc_bat_calibration *charging; + int charging_size; + struct twl4030_madc_bat_calibration *discharging; + int discharging_size; +}; + +#endif diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h new file mode 100644 index 000000000..f80769175 --- /dev/null +++ b/include/linux/power_supply.h @@ -0,0 +1,453 @@ +/* + * Universal power supply monitor class + * + * Copyright © 2007 Anton Vorontsov + * Copyright © 2004 Szabolcs Gyurko + * Copyright © 2003 Ian Molton + * + * Modified: 2004, Oct Szabolcs Gyurko + * + * You may use this code as per GPL version 2 + */ + +#ifndef __LINUX_POWER_SUPPLY_H__ +#define __LINUX_POWER_SUPPLY_H__ + +#include +#include +#include +#include +#include + +/* + * All voltages, currents, charges, energies, time and temperatures in uV, + * µA, µAh, µWh, seconds and tenths of degree Celsius unless otherwise + * stated. It's driver's job to convert its raw values to units in which + * this class operates. + */ + +/* + * For systems where the charger determines the maximum battery capacity + * the min and max fields should be used to present these values to user + * space. Unused/unknown fields will not appear in sysfs. + */ + +enum { + POWER_SUPPLY_STATUS_UNKNOWN = 0, + POWER_SUPPLY_STATUS_CHARGING, + POWER_SUPPLY_STATUS_DISCHARGING, + POWER_SUPPLY_STATUS_NOT_CHARGING, + POWER_SUPPLY_STATUS_FULL, +}; + +enum { + POWER_SUPPLY_CHARGE_TYPE_UNKNOWN = 0, + POWER_SUPPLY_CHARGE_TYPE_NONE, + POWER_SUPPLY_CHARGE_TYPE_TRICKLE, + POWER_SUPPLY_CHARGE_TYPE_FAST, +}; + +enum { + POWER_SUPPLY_HEALTH_UNKNOWN = 0, + POWER_SUPPLY_HEALTH_GOOD, + POWER_SUPPLY_HEALTH_OVERHEAT, + POWER_SUPPLY_HEALTH_DEAD, + POWER_SUPPLY_HEALTH_OVERVOLTAGE, + POWER_SUPPLY_HEALTH_UNSPEC_FAILURE, + POWER_SUPPLY_HEALTH_COLD, + POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE, + POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE, +}; + +enum { + POWER_SUPPLY_TECHNOLOGY_UNKNOWN = 0, + POWER_SUPPLY_TECHNOLOGY_NiMH, + POWER_SUPPLY_TECHNOLOGY_LION, + POWER_SUPPLY_TECHNOLOGY_LIPO, + POWER_SUPPLY_TECHNOLOGY_LiFe, + POWER_SUPPLY_TECHNOLOGY_NiCd, + POWER_SUPPLY_TECHNOLOGY_LiMn, +}; + +enum { + POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN = 0, + POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL, + POWER_SUPPLY_CAPACITY_LEVEL_LOW, + POWER_SUPPLY_CAPACITY_LEVEL_NORMAL, + POWER_SUPPLY_CAPACITY_LEVEL_HIGH, + POWER_SUPPLY_CAPACITY_LEVEL_FULL, +}; + +enum { + POWER_SUPPLY_SCOPE_UNKNOWN = 0, + POWER_SUPPLY_SCOPE_SYSTEM, + POWER_SUPPLY_SCOPE_DEVICE, +}; + +enum power_supply_property { + /* Properties of type `int' */ + POWER_SUPPLY_PROP_STATUS = 0, + POWER_SUPPLY_PROP_CHARGE_TYPE, + POWER_SUPPLY_PROP_HEALTH, + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_AUTHENTIC, + POWER_SUPPLY_PROP_TECHNOLOGY, + POWER_SUPPLY_PROP_CYCLE_COUNT, + POWER_SUPPLY_PROP_VOLTAGE_MAX, + POWER_SUPPLY_PROP_VOLTAGE_MIN, + POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, + POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_VOLTAGE_AVG, + POWER_SUPPLY_PROP_VOLTAGE_OCV, + POWER_SUPPLY_PROP_VOLTAGE_BOOT, + POWER_SUPPLY_PROP_CURRENT_MAX, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CURRENT_AVG, + POWER_SUPPLY_PROP_CURRENT_BOOT, + POWER_SUPPLY_PROP_POWER_NOW, + POWER_SUPPLY_PROP_POWER_AVG, + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, + POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN, + POWER_SUPPLY_PROP_CHARGE_FULL, + POWER_SUPPLY_PROP_CHARGE_EMPTY, + POWER_SUPPLY_PROP_CHARGE_NOW, + POWER_SUPPLY_PROP_CHARGE_AVG, + POWER_SUPPLY_PROP_CHARGE_COUNTER, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX, + POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, + POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, + POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, + POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, + POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN, + POWER_SUPPLY_PROP_ENERGY_FULL, + POWER_SUPPLY_PROP_ENERGY_EMPTY, + POWER_SUPPLY_PROP_ENERGY_NOW, + POWER_SUPPLY_PROP_ENERGY_AVG, + POWER_SUPPLY_PROP_CAPACITY, /* in percents! */ + POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN, /* in percents! */ + POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX, /* in percents! */ + POWER_SUPPLY_PROP_CAPACITY_LEVEL, + POWER_SUPPLY_PROP_TEMP, + POWER_SUPPLY_PROP_TEMP_MAX, + POWER_SUPPLY_PROP_TEMP_MIN, + POWER_SUPPLY_PROP_TEMP_ALERT_MIN, + POWER_SUPPLY_PROP_TEMP_ALERT_MAX, + POWER_SUPPLY_PROP_TEMP_AMBIENT, + POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN, + POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, + POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, + POWER_SUPPLY_PROP_TIME_TO_FULL_AVG, + POWER_SUPPLY_PROP_TYPE, /* use power_supply.type instead */ + POWER_SUPPLY_PROP_USB_TYPE, + POWER_SUPPLY_PROP_SCOPE, + POWER_SUPPLY_PROP_PRECHARGE_CURRENT, + POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT, + POWER_SUPPLY_PROP_CALIBRATE, + /* Properties of type `const char *' */ + POWER_SUPPLY_PROP_MODEL_NAME, + POWER_SUPPLY_PROP_MANUFACTURER, + POWER_SUPPLY_PROP_SERIAL_NUMBER, +}; + +enum power_supply_type { + POWER_SUPPLY_TYPE_UNKNOWN = 0, + POWER_SUPPLY_TYPE_BATTERY, + POWER_SUPPLY_TYPE_UPS, + POWER_SUPPLY_TYPE_MAINS, + POWER_SUPPLY_TYPE_USB, /* Standard Downstream Port */ + POWER_SUPPLY_TYPE_USB_DCP, /* Dedicated Charging Port */ + POWER_SUPPLY_TYPE_USB_CDP, /* Charging Downstream Port */ + POWER_SUPPLY_TYPE_USB_ACA, /* Accessory Charger Adapters */ + POWER_SUPPLY_TYPE_USB_TYPE_C, /* Type C Port */ + POWER_SUPPLY_TYPE_USB_PD, /* Power Delivery Port */ + POWER_SUPPLY_TYPE_USB_PD_DRP, /* PD Dual Role Port */ + POWER_SUPPLY_TYPE_APPLE_BRICK_ID, /* Apple Charging Method */ +}; + +enum power_supply_usb_type { + POWER_SUPPLY_USB_TYPE_UNKNOWN = 0, + POWER_SUPPLY_USB_TYPE_SDP, /* Standard Downstream Port */ + POWER_SUPPLY_USB_TYPE_DCP, /* Dedicated Charging Port */ + POWER_SUPPLY_USB_TYPE_CDP, /* Charging Downstream Port */ + POWER_SUPPLY_USB_TYPE_ACA, /* Accessory Charger Adapters */ + POWER_SUPPLY_USB_TYPE_C, /* Type C Port */ + POWER_SUPPLY_USB_TYPE_PD, /* Power Delivery Port */ + POWER_SUPPLY_USB_TYPE_PD_DRP, /* PD Dual Role Port */ + POWER_SUPPLY_USB_TYPE_PD_PPS, /* PD Programmable Power Supply */ + POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID, /* Apple Charging Method */ +}; + +enum power_supply_notifier_events { + PSY_EVENT_PROP_CHANGED, +}; + +union power_supply_propval { + int intval; + const char *strval; +}; + +struct device_node; +struct power_supply; + +/* Run-time specific power supply configuration */ +struct power_supply_config { + struct device_node *of_node; + struct fwnode_handle *fwnode; + + /* Driver private data */ + void *drv_data; + + char **supplied_to; + size_t num_supplicants; +}; + +/* Description of power supply */ +struct power_supply_desc { + const char *name; + enum power_supply_type type; + enum power_supply_usb_type *usb_types; + size_t num_usb_types; + enum power_supply_property *properties; + size_t num_properties; + + /* + * Functions for drivers implementing power supply class. + * These shouldn't be called directly by other drivers for accessing + * this power supply. Instead use power_supply_*() functions (for + * example power_supply_get_property()). + */ + int (*get_property)(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val); + int (*set_property)(struct power_supply *psy, + enum power_supply_property psp, + const union power_supply_propval *val); + /* + * property_is_writeable() will be called during registration + * of power supply. If this happens during device probe then it must + * not access internal data of device (because probe did not end). + */ + int (*property_is_writeable)(struct power_supply *psy, + enum power_supply_property psp); + void (*external_power_changed)(struct power_supply *psy); + void (*set_charged)(struct power_supply *psy); + + /* + * Set if thermal zone should not be created for this power supply. + * For example for virtual supplies forwarding calls to actual + * sensors or other supplies. + */ + bool no_thermal; + /* For APM emulation, think legacy userspace. */ + int use_for_apm; +}; + +struct power_supply { + const struct power_supply_desc *desc; + + char **supplied_to; + size_t num_supplicants; + + char **supplied_from; + size_t num_supplies; + struct device_node *of_node; + + /* Driver private data */ + void *drv_data; + + /* private */ + struct device dev; + struct work_struct changed_work; + struct delayed_work deferred_register_work; + spinlock_t changed_lock; + bool changed; + bool initialized; + bool removing; + atomic_t use_cnt; +#ifdef CONFIG_THERMAL + struct thermal_zone_device *tzd; + struct thermal_cooling_device *tcd; +#endif + +#ifdef CONFIG_LEDS_TRIGGERS + struct led_trigger *charging_full_trig; + char *charging_full_trig_name; + struct led_trigger *charging_trig; + char *charging_trig_name; + struct led_trigger *full_trig; + char *full_trig_name; + struct led_trigger *online_trig; + char *online_trig_name; + struct led_trigger *charging_blink_full_solid_trig; + char *charging_blink_full_solid_trig_name; +#endif +}; + +/* + * This is recommended structure to specify static power supply parameters. + * Generic one, parametrizable for different power supplies. Power supply + * class itself does not use it, but that's what implementing most platform + * drivers, should try reuse for consistency. + */ + +struct power_supply_info { + const char *name; + int technology; + int voltage_max_design; + int voltage_min_design; + int charge_full_design; + int charge_empty_design; + int energy_full_design; + int energy_empty_design; + int use_for_apm; +}; + +/* + * This is the recommended struct to manage static battery parameters, + * populated by power_supply_get_battery_info(). Most platform drivers should + * use these for consistency. + * Its field names must correspond to elements in enum power_supply_property. + * The default field value is -EINVAL. + * Power supply class itself doesn't use this. + */ + +struct power_supply_battery_info { + int energy_full_design_uwh; /* microWatt-hours */ + int charge_full_design_uah; /* microAmp-hours */ + int voltage_min_design_uv; /* microVolts */ + int precharge_current_ua; /* microAmps */ + int charge_term_current_ua; /* microAmps */ + int constant_charge_current_max_ua; /* microAmps */ + int constant_charge_voltage_max_uv; /* microVolts */ +}; + +extern struct atomic_notifier_head power_supply_notifier; +extern int power_supply_reg_notifier(struct notifier_block *nb); +extern void power_supply_unreg_notifier(struct notifier_block *nb); +extern struct power_supply *power_supply_get_by_name(const char *name); +extern void power_supply_put(struct power_supply *psy); +#ifdef CONFIG_OF +extern struct power_supply *power_supply_get_by_phandle(struct device_node *np, + const char *property); +extern struct power_supply *devm_power_supply_get_by_phandle( + struct device *dev, const char *property); +#else /* !CONFIG_OF */ +static inline struct power_supply * +power_supply_get_by_phandle(struct device_node *np, const char *property) +{ return NULL; } +static inline struct power_supply * +devm_power_supply_get_by_phandle(struct device *dev, const char *property) +{ return NULL; } +#endif /* CONFIG_OF */ + +extern int power_supply_get_battery_info(struct power_supply *psy, + struct power_supply_battery_info *info); +extern void power_supply_changed(struct power_supply *psy); +extern int power_supply_am_i_supplied(struct power_supply *psy); +extern int power_supply_set_input_current_limit_from_supplier( + struct power_supply *psy); +extern int power_supply_set_battery_charged(struct power_supply *psy); + +#ifdef CONFIG_POWER_SUPPLY +extern int power_supply_is_system_supplied(void); +#else +static inline int power_supply_is_system_supplied(void) { return -ENOSYS; } +#endif + +extern int power_supply_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val); +extern int power_supply_set_property(struct power_supply *psy, + enum power_supply_property psp, + const union power_supply_propval *val); +extern int power_supply_property_is_writeable(struct power_supply *psy, + enum power_supply_property psp); +extern void power_supply_external_power_changed(struct power_supply *psy); + +extern struct power_supply *__must_check +power_supply_register(struct device *parent, + const struct power_supply_desc *desc, + const struct power_supply_config *cfg); +extern struct power_supply *__must_check +power_supply_register_no_ws(struct device *parent, + const struct power_supply_desc *desc, + const struct power_supply_config *cfg); +extern struct power_supply *__must_check +devm_power_supply_register(struct device *parent, + const struct power_supply_desc *desc, + const struct power_supply_config *cfg); +extern struct power_supply *__must_check +devm_power_supply_register_no_ws(struct device *parent, + const struct power_supply_desc *desc, + const struct power_supply_config *cfg); +extern void power_supply_unregister(struct power_supply *psy); +extern int power_supply_powers(struct power_supply *psy, struct device *dev); + +#define to_power_supply(device) container_of(device, struct power_supply, dev) + +extern void *power_supply_get_drvdata(struct power_supply *psy); +/* For APM emulation, think legacy userspace. */ +extern struct class *power_supply_class; + +static inline bool power_supply_is_amp_property(enum power_supply_property psp) +{ + switch (psp) { + case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: + case POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN: + case POWER_SUPPLY_PROP_CHARGE_FULL: + case POWER_SUPPLY_PROP_CHARGE_EMPTY: + case POWER_SUPPLY_PROP_CHARGE_NOW: + case POWER_SUPPLY_PROP_CHARGE_AVG: + case POWER_SUPPLY_PROP_CHARGE_COUNTER: + case POWER_SUPPLY_PROP_PRECHARGE_CURRENT: + case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT: + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX: + case POWER_SUPPLY_PROP_CURRENT_MAX: + case POWER_SUPPLY_PROP_CURRENT_NOW: + case POWER_SUPPLY_PROP_CURRENT_AVG: + case POWER_SUPPLY_PROP_CURRENT_BOOT: + return 1; + default: + break; + } + + return 0; +} + +static inline bool power_supply_is_watt_property(enum power_supply_property psp) +{ + switch (psp) { + case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN: + case POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN: + case POWER_SUPPLY_PROP_ENERGY_FULL: + case POWER_SUPPLY_PROP_ENERGY_EMPTY: + case POWER_SUPPLY_PROP_ENERGY_NOW: + case POWER_SUPPLY_PROP_ENERGY_AVG: + case POWER_SUPPLY_PROP_VOLTAGE_MAX: + case POWER_SUPPLY_PROP_VOLTAGE_MIN: + case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: + case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + case POWER_SUPPLY_PROP_VOLTAGE_AVG: + case POWER_SUPPLY_PROP_VOLTAGE_OCV: + case POWER_SUPPLY_PROP_VOLTAGE_BOOT: + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX: + case POWER_SUPPLY_PROP_POWER_NOW: + return 1; + default: + break; + } + + return 0; +} + +#endif /* __LINUX_POWER_SUPPLY_H__ */ diff --git a/include/linux/powercap.h b/include/linux/powercap.h new file mode 100644 index 000000000..f0a4e6257 --- /dev/null +++ b/include/linux/powercap.h @@ -0,0 +1,325 @@ +/* + * powercap.h: Data types and headers for sysfs power capping interface + * Copyright (c) 2013, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc. + * + */ + +#ifndef __POWERCAP_H__ +#define __POWERCAP_H__ + +#include +#include + +/* + * A power cap class device can contain multiple powercap control_types. + * Each control_type can have multiple power zones, which can be independently + * controlled. Each power zone can have one or more constraints. + */ + +struct powercap_control_type; +struct powercap_zone; +struct powercap_zone_constraint; + +/** + * struct powercap_control_type_ops - Define control type callbacks + * @set_enable: Enable/Disable whole control type. + * Default is enabled. But this callback allows all zones + * to be in disable state and remove any applied power + * limits. If disabled power zone can only be monitored + * not controlled. + * @get_enable: get Enable/Disable status. + * @release: Callback to inform that last reference to this + * control type is closed. So it is safe to free data + * structure associated with this control type. + * This callback is mandatory if the client own memory + * for the control type. + * + * This structure defines control type callbacks to be implemented by client + * drivers + */ +struct powercap_control_type_ops { + int (*set_enable) (struct powercap_control_type *, bool mode); + int (*get_enable) (struct powercap_control_type *, bool *mode); + int (*release) (struct powercap_control_type *); +}; + +/** + * struct powercap_control_type- Defines a powercap control_type + * @name: name of control_type + * @dev: device for this control_type + * @idr: idr to have unique id for its child + * @root_node: Root holding power zones for this control_type + * @ops: Pointer to callback struct + * @node_lock: mutex for control type + * @allocated: This is possible that client owns the memory + * used by this structure. In this case + * this flag is set to false by framework to + * prevent deallocation during release process. + * Otherwise this flag is set to true. + * @ctrl_inst: link to the control_type list + * + * Defines powercap control_type. This acts as a container for power + * zones, which use same method to control power. E.g. RAPL, RAPL-PCI etc. + * All fields are private and should not be used by client drivers. + */ +struct powercap_control_type { + struct device dev; + struct idr idr; + int nr_zones; + const struct powercap_control_type_ops *ops; + struct mutex lock; + bool allocated; + struct list_head node; +}; + +/** + * struct powercap_zone_ops - Define power zone callbacks + * @get_max_energy_range_uj: Get maximum range of energy counter in + * micro-joules. + * @get_energy_uj: Get current energy counter in micro-joules. + * @reset_energy_uj: Reset micro-joules energy counter. + * @get_max_power_range_uw: Get maximum range of power counter in + * micro-watts. + * @get_power_uw: Get current power counter in micro-watts. + * @set_enable: Enable/Disable power zone controls. + * Default is enabled. + * @get_enable: get Enable/Disable status. + * @release: Callback to inform that last reference to this + * control type is closed. So it is safe to free + * data structure associated with this + * control type. Mandatory, if client driver owns + * the power_zone memory. + * + * This structure defines zone callbacks to be implemented by client drivers. + * Client drives can define both energy and power related callbacks. But at + * the least one type (either power or energy) is mandatory. Client drivers + * should handle mutual exclusion, if required in callbacks. + */ +struct powercap_zone_ops { + int (*get_max_energy_range_uj) (struct powercap_zone *, u64 *); + int (*get_energy_uj) (struct powercap_zone *, u64 *); + int (*reset_energy_uj) (struct powercap_zone *); + int (*get_max_power_range_uw) (struct powercap_zone *, u64 *); + int (*get_power_uw) (struct powercap_zone *, u64 *); + int (*set_enable) (struct powercap_zone *, bool mode); + int (*get_enable) (struct powercap_zone *, bool *mode); + int (*release) (struct powercap_zone *); +}; + +#define POWERCAP_ZONE_MAX_ATTRS 6 +#define POWERCAP_CONSTRAINTS_ATTRS 8 +#define MAX_CONSTRAINTS_PER_ZONE 10 +/** + * struct powercap_zone- Defines instance of a power cap zone + * @id: Unique id + * @name: Power zone name. + * @control_type_inst: Control type instance for this zone. + * @ops: Pointer to the zone operation structure. + * @dev: Instance of a device. + * @const_id_cnt: Number of constraint defined. + * @idr: Instance to an idr entry for children zones. + * @parent_idr: To remove reference from the parent idr. + * @private_data: Private data pointer if any for this zone. + * @zone_dev_attrs: Attributes associated with this device. + * @zone_attr_count: Attribute count. + * @dev_zone_attr_group: Attribute group for attributes. + * @dev_attr_groups: Attribute group store to register with device. + * @allocated: This is possible that client owns the memory + * used by this structure. In this case + * this flag is set to false by framework to + * prevent deallocation during release process. + * Otherwise this flag is set to true. + * @constraint_ptr: List of constraints for this zone. + * + * This defines a power zone instance. The fields of this structure are + * private, and should not be used by client drivers. + */ +struct powercap_zone { + int id; + char *name; + void *control_type_inst; + const struct powercap_zone_ops *ops; + struct device dev; + int const_id_cnt; + struct idr idr; + struct idr *parent_idr; + void *private_data; + struct attribute **zone_dev_attrs; + int zone_attr_count; + struct attribute_group dev_zone_attr_group; + const struct attribute_group *dev_attr_groups[2]; /* 1 group + NULL */ + bool allocated; + struct powercap_zone_constraint *constraints; +}; + +/** + * struct powercap_zone_constraint_ops - Define constraint callbacks + * @set_power_limit_uw: Set power limit in micro-watts. + * @get_power_limit_uw: Get power limit in micro-watts. + * @set_time_window_us: Set time window in micro-seconds. + * @get_time_window_us: Get time window in micro-seconds. + * @get_max_power_uw: Get max power allowed in micro-watts. + * @get_min_power_uw: Get min power allowed in micro-watts. + * @get_max_time_window_us: Get max time window allowed in micro-seconds. + * @get_min_time_window_us: Get min time window allowed in micro-seconds. + * @get_name: Get the name of constraint + * + * This structure is used to define the constraint callbacks for the client + * drivers. The following callbacks are mandatory and can't be NULL: + * set_power_limit_uw + * get_power_limit_uw + * set_time_window_us + * get_time_window_us + * get_name + * Client drivers should handle mutual exclusion, if required in callbacks. + */ +struct powercap_zone_constraint_ops { + int (*set_power_limit_uw) (struct powercap_zone *, int, u64); + int (*get_power_limit_uw) (struct powercap_zone *, int, u64 *); + int (*set_time_window_us) (struct powercap_zone *, int, u64); + int (*get_time_window_us) (struct powercap_zone *, int, u64 *); + int (*get_max_power_uw) (struct powercap_zone *, int, u64 *); + int (*get_min_power_uw) (struct powercap_zone *, int, u64 *); + int (*get_max_time_window_us) (struct powercap_zone *, int, u64 *); + int (*get_min_time_window_us) (struct powercap_zone *, int, u64 *); + const char *(*get_name) (struct powercap_zone *, int); +}; + +/** + * struct powercap_zone_constraint- Defines instance of a constraint + * @id: Instance Id of this constraint. + * @power_zone: Pointer to the power zone for this constraint. + * @ops: Pointer to the constraint callbacks. + * + * This defines a constraint instance. + */ +struct powercap_zone_constraint { + int id; + struct powercap_zone *power_zone; + const struct powercap_zone_constraint_ops *ops; +}; + + +/* For clients to get their device pointer, may be used for dev_dbgs */ +#define POWERCAP_GET_DEV(power_zone) (&power_zone->dev) + +/** +* powercap_set_zone_data() - Set private data for a zone +* @power_zone: A pointer to the valid zone instance. +* @pdata: A pointer to the user private data. +* +* Allows client drivers to associate some private data to zone instance. +*/ +static inline void powercap_set_zone_data(struct powercap_zone *power_zone, + void *pdata) +{ + if (power_zone) + power_zone->private_data = pdata; +} + +/** +* powercap_get_zone_data() - Get private data for a zone +* @power_zone: A pointer to the valid zone instance. +* +* Allows client drivers to get private data associate with a zone, +* using call to powercap_set_zone_data. +*/ +static inline void *powercap_get_zone_data(struct powercap_zone *power_zone) +{ + if (power_zone) + return power_zone->private_data; + return NULL; +} + +/** +* powercap_register_control_type() - Register a control_type with framework +* @control_type: Pointer to client allocated memory for the control type +* structure storage. If this is NULL, powercap framework +* will allocate memory and own it. +* Advantage of this parameter is that client can embed +* this data in its data structures and allocate in a +* single call, preventing multiple allocations. +* @control_type_name: The Name of this control_type, which will be shown +* in the sysfs Interface. +* @ops: Callbacks for control type. This parameter is optional. +* +* Used to create a control_type with the power capping class. Here control_type +* can represent a type of technology, which can control a range of power zones. +* For example a control_type can be RAPL (Running Average Power Limit) +* Intel® 64 and IA-32 Processor Architectures. The name can be any string +* which must be unique, otherwise this function returns NULL. +* A pointer to the control_type instance is returned on success. +*/ +struct powercap_control_type *powercap_register_control_type( + struct powercap_control_type *control_type, + const char *name, + const struct powercap_control_type_ops *ops); + +/** +* powercap_unregister_control_type() - Unregister a control_type from framework +* @instance: A pointer to the valid control_type instance. +* +* Used to unregister a control_type with the power capping class. +* All power zones registered under this control type have to be unregistered +* before calling this function, or it will fail with an error code. +*/ +int powercap_unregister_control_type(struct powercap_control_type *instance); + +/* Zone register/unregister API */ + +/** +* powercap_register_zone() - Register a power zone +* @power_zone: Pointer to client allocated memory for the power zone structure +* storage. If this is NULL, powercap framework will allocate +* memory and own it. Advantage of this parameter is that client +* can embed this data in its data structures and allocate in a +* single call, preventing multiple allocations. +* @control_type: A control_type instance under which this zone operates. +* @name: A name for this zone. +* @parent: A pointer to the parent power zone instance if any or NULL +* @ops: Pointer to zone operation callback structure. +* @no_constraints: Number of constraints for this zone +* @const_ops: Pointer to constraint callback structure +* +* Register a power zone under a given control type. A power zone must register +* a pointer to a structure representing zone callbacks. +* A power zone can be located under a parent power zone, in which case @parent +* should point to it. Otherwise, if @parent is NULL, the new power zone will +* be located directly under the given control type +* For each power zone there may be a number of constraints that appear in the +* sysfs under that zone as attributes with unique numeric IDs. +* Returns pointer to the power_zone on success. +*/ +struct powercap_zone *powercap_register_zone( + struct powercap_zone *power_zone, + struct powercap_control_type *control_type, + const char *name, + struct powercap_zone *parent, + const struct powercap_zone_ops *ops, + int nr_constraints, + const struct powercap_zone_constraint_ops *const_ops); + +/** +* powercap_unregister_zone() - Unregister a zone device +* @control_type: A pointer to the valid instance of a control_type. +* @power_zone: A pointer to the valid zone instance for a control_type +* +* Used to unregister a zone device for a control_type. Caller should +* make sure that children for this zone are unregistered first. +*/ +int powercap_unregister_zone(struct powercap_control_type *control_type, + struct powercap_zone *power_zone); + +#endif diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h new file mode 100644 index 000000000..4ea1d377e --- /dev/null +++ b/include/linux/ppp-comp.h @@ -0,0 +1,106 @@ +/* + * ppp-comp.h - Definitions for doing PPP packet compression. + * + * Copyright 1994-1998 Paul Mackerras. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + */ +#ifndef _NET_PPP_COMP_H +#define _NET_PPP_COMP_H + +#include + + +struct module; + +/* + * The following symbols control whether we include code for + * various compression methods. + */ + +#ifndef DO_BSD_COMPRESS +#define DO_BSD_COMPRESS 1 /* by default, include BSD-Compress */ +#endif +#ifndef DO_DEFLATE +#define DO_DEFLATE 1 /* by default, include Deflate */ +#endif +#define DO_PREDICTOR_1 0 +#define DO_PREDICTOR_2 0 + +/* + * Structure giving methods for compression/decompression. + */ + +struct compressor { + int compress_proto; /* CCP compression protocol number */ + + /* Allocate space for a compressor (transmit side) */ + void *(*comp_alloc) (unsigned char *options, int opt_len); + + /* Free space used by a compressor */ + void (*comp_free) (void *state); + + /* Initialize a compressor */ + int (*comp_init) (void *state, unsigned char *options, + int opt_len, int unit, int opthdr, int debug); + + /* Reset a compressor */ + void (*comp_reset) (void *state); + + /* Compress a packet */ + int (*compress) (void *state, unsigned char *rptr, + unsigned char *obuf, int isize, int osize); + + /* Return compression statistics */ + void (*comp_stat) (void *state, struct compstat *stats); + + /* Allocate space for a decompressor (receive side) */ + void *(*decomp_alloc) (unsigned char *options, int opt_len); + + /* Free space used by a decompressor */ + void (*decomp_free) (void *state); + + /* Initialize a decompressor */ + int (*decomp_init) (void *state, unsigned char *options, + int opt_len, int unit, int opthdr, int mru, + int debug); + + /* Reset a decompressor */ + void (*decomp_reset) (void *state); + + /* Decompress a packet. */ + int (*decompress) (void *state, unsigned char *ibuf, int isize, + unsigned char *obuf, int osize); + + /* Update state for an incompressible packet received */ + void (*incomp) (void *state, unsigned char *ibuf, int icnt); + + /* Return decompression statistics */ + void (*decomp_stat) (void *state, struct compstat *stats); + + /* Used in locking compressor modules */ + struct module *owner; + /* Extra skb space needed by the compressor algorithm */ + unsigned int comp_extra; +}; + +/* + * The return value from decompress routine is the length of the + * decompressed packet if successful, otherwise DECOMP_ERROR + * or DECOMP_FATALERROR if an error occurred. + * + * We need to make this distinction so that we can disable certain + * useful functionality, namely sending a CCP reset-request as a result + * of an error detected after decompression. This is to avoid infringing + * a patent held by Motorola. + * Don't you just lurve software patents. + */ + +#define DECOMP_ERROR -1 /* error detected before decomp. */ +#define DECOMP_FATALERROR -2 /* error detected after decomp. */ + +extern int ppp_register_compressor(struct compressor *); +extern void ppp_unregister_compressor(struct compressor *); +#endif /* _NET_PPP_COMP_H */ diff --git a/include/linux/ppp_channel.h b/include/linux/ppp_channel.h new file mode 100644 index 000000000..5d87f810a --- /dev/null +++ b/include/linux/ppp_channel.h @@ -0,0 +1,88 @@ +#ifndef _PPP_CHANNEL_H_ +#define _PPP_CHANNEL_H_ +/* + * Definitions for the interface between the generic PPP code + * and a PPP channel. + * + * A PPP channel provides a way for the generic PPP code to send + * and receive packets over some sort of communications medium. + * Packets are stored in sk_buffs and have the 2-byte PPP protocol + * number at the start, but not the address and control bytes. + * + * Copyright 1999 Paul Mackerras. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * ==FILEVERSION 20000322== + */ + +#include +#include +#include +#include + +struct ppp_channel; + +struct ppp_channel_ops { + /* Send a packet (or multilink fragment) on this channel. + Returns 1 if it was accepted, 0 if not. */ + int (*start_xmit)(struct ppp_channel *, struct sk_buff *); + /* Handle an ioctl call that has come in via /dev/ppp. */ + int (*ioctl)(struct ppp_channel *, unsigned int, unsigned long); +}; + +struct ppp_channel { + void *private; /* channel private data */ + const struct ppp_channel_ops *ops; /* operations for this channel */ + int mtu; /* max transmit packet size */ + int hdrlen; /* amount of headroom channel needs */ + void *ppp; /* opaque to channel */ + int speed; /* transfer rate (bytes/second) */ + /* the following is not used at present */ + int latency; /* overhead time in milliseconds */ +}; + +#ifdef __KERNEL__ +/* Called by the channel when it can send some more data. */ +extern void ppp_output_wakeup(struct ppp_channel *); + +/* Called by the channel to process a received PPP packet. + The packet should have just the 2-byte PPP protocol header. */ +extern void ppp_input(struct ppp_channel *, struct sk_buff *); + +/* Called by the channel when an input error occurs, indicating + that we may have missed a packet. */ +extern void ppp_input_error(struct ppp_channel *, int code); + +/* Attach a channel to a given PPP unit in specified net. */ +extern int ppp_register_net_channel(struct net *, struct ppp_channel *); + +/* Attach a channel to a given PPP unit. */ +extern int ppp_register_channel(struct ppp_channel *); + +/* Detach a channel from its PPP unit (e.g. on hangup). */ +extern void ppp_unregister_channel(struct ppp_channel *); + +/* Get the channel number for a channel */ +extern int ppp_channel_index(struct ppp_channel *); + +/* Get the unit number associated with a channel, or -1 if none */ +extern int ppp_unit_number(struct ppp_channel *); + +/* Get the device name associated with a channel, or NULL if none */ +extern char *ppp_dev_name(struct ppp_channel *); + +/* + * SMP locking notes: + * The channel code must ensure that when it calls ppp_unregister_channel, + * nothing is executing in any of the procedures above, for that + * channel. The generic layer will ensure that nothing is executing + * in the start_xmit and ioctl routines for the channel by the time + * that ppp_unregister_channel returns. + */ + +#endif /* __KERNEL__ */ +#endif diff --git a/include/linux/ppp_defs.h b/include/linux/ppp_defs.h new file mode 100644 index 000000000..28aa0237c --- /dev/null +++ b/include/linux/ppp_defs.h @@ -0,0 +1,17 @@ +/* + * ppp_defs.h - PPP definitions. + * + * Copyright 1994-2000 Paul Mackerras. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + */ +#ifndef _PPP_DEFS_H_ +#define _PPP_DEFS_H_ + +#include +#include + +#define PPP_FCS(fcs, c) crc_ccitt_byte(fcs, c) +#endif /* _PPP_DEFS_H_ */ diff --git a/include/linux/pps-gpio.h b/include/linux/pps-gpio.h new file mode 100644 index 000000000..56f35dd3d --- /dev/null +++ b/include/linux/pps-gpio.h @@ -0,0 +1,32 @@ +/* + * pps-gpio.h -- PPS client for GPIOs + * + * + * Copyright (C) 2011 James Nuss + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _PPS_GPIO_H +#define _PPS_GPIO_H + +struct pps_gpio_platform_data { + bool assert_falling_edge; + bool capture_clear; + unsigned int gpio_pin; + const char *gpio_label; +}; + +#endif /* _PPS_GPIO_H */ diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h new file mode 100644 index 000000000..80a980cc8 --- /dev/null +++ b/include/linux/pps_kernel.h @@ -0,0 +1,133 @@ +/* + * PPS API kernel header + * + * Copyright (C) 2009 Rodolfo Giometti + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef LINUX_PPS_KERNEL_H +#define LINUX_PPS_KERNEL_H + +#include +#include +#include +#include + +/* + * Global defines + */ + +struct pps_device; + +/* The specific PPS source info */ +struct pps_source_info { + char name[PPS_MAX_NAME_LEN]; /* symbolic name */ + char path[PPS_MAX_NAME_LEN]; /* path of connected device */ + int mode; /* PPS allowed mode */ + + void (*echo)(struct pps_device *pps, + int event, void *data); /* PPS echo function */ + + struct module *owner; + struct device *dev; /* Parent device for device_create */ +}; + +struct pps_event_time { +#ifdef CONFIG_NTP_PPS + struct timespec64 ts_raw; +#endif /* CONFIG_NTP_PPS */ + struct timespec64 ts_real; +}; + +/* The main struct */ +struct pps_device { + struct pps_source_info info; /* PSS source info */ + + struct pps_kparams params; /* PPS current params */ + + __u32 assert_sequence; /* PPS assert event seq # */ + __u32 clear_sequence; /* PPS clear event seq # */ + struct pps_ktime assert_tu; + struct pps_ktime clear_tu; + int current_mode; /* PPS mode at event time */ + + unsigned int last_ev; /* last PPS event id */ + wait_queue_head_t queue; /* PPS event queue */ + + unsigned int id; /* PPS source unique ID */ + void const *lookup_cookie; /* For pps_lookup_dev() only */ + struct cdev cdev; + struct device *dev; + struct fasync_struct *async_queue; /* fasync method */ + spinlock_t lock; +}; + +/* + * Global variables + */ + +extern const struct attribute_group *pps_groups[]; + +/* + * Internal functions. + * + * These are not actually part of the exported API, but this is a + * convenient header file to put them in. + */ + +extern int pps_register_cdev(struct pps_device *pps); +extern void pps_unregister_cdev(struct pps_device *pps); + +/* + * Exported functions + */ + +extern struct pps_device *pps_register_source( + struct pps_source_info *info, int default_params); +extern void pps_unregister_source(struct pps_device *pps); +extern void pps_event(struct pps_device *pps, + struct pps_event_time *ts, int event, void *data); +/* Look up a pps_device by magic cookie */ +struct pps_device *pps_lookup_dev(void const *cookie); + +static inline void timespec_to_pps_ktime(struct pps_ktime *kt, + struct timespec64 ts) +{ + kt->sec = ts.tv_sec; + kt->nsec = ts.tv_nsec; +} + +static inline void pps_get_ts(struct pps_event_time *ts) +{ + struct system_time_snapshot snap; + + ktime_get_snapshot(&snap); + ts->ts_real = ktime_to_timespec64(snap.real); +#ifdef CONFIG_NTP_PPS + ts->ts_raw = ktime_to_timespec64(snap.raw); +#endif +} + +/* Subtract known time delay from PPS event time(s) */ +static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec64 delta) +{ + ts->ts_real = timespec64_sub(ts->ts_real, delta); +#ifdef CONFIG_NTP_PPS + ts->ts_raw = timespec64_sub(ts->ts_raw, delta); +#endif +} + +#endif /* LINUX_PPS_KERNEL_H */ diff --git a/include/linux/pr.h b/include/linux/pr.h new file mode 100644 index 000000000..94ceec713 --- /dev/null +++ b/include/linux/pr.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_PR_H +#define LINUX_PR_H + +#include + +struct pr_ops { + int (*pr_register)(struct block_device *bdev, u64 old_key, u64 new_key, + u32 flags); + int (*pr_reserve)(struct block_device *bdev, u64 key, + enum pr_type type, u32 flags); + int (*pr_release)(struct block_device *bdev, u64 key, + enum pr_type type); + int (*pr_preempt)(struct block_device *bdev, u64 old_key, u64 new_key, + enum pr_type type, bool abort); + int (*pr_clear)(struct block_device *bdev, u64 key); +}; + +#endif /* LINUX_PR_H */ diff --git a/include/linux/prandom.h b/include/linux/prandom.h new file mode 100644 index 000000000..709e8e69f --- /dev/null +++ b/include/linux/prandom.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/prandom.h + * + * Include file for the fast pseudo-random 32-bit + * generation. + */ +#ifndef _LINUX_PRANDOM_H +#define _LINUX_PRANDOM_H + +#include +#include +#include + +u32 prandom_u32(void); +void prandom_bytes(void *buf, size_t nbytes); +void prandom_seed(u32 seed); +void prandom_reseed_late(void); + +#if BITS_PER_LONG == 64 +/* + * The core SipHash round function. Each line can be executed in + * parallel given enough CPU resources. + */ +#define PRND_SIPROUND(v0, v1, v2, v3) SIPHASH_PERMUTATION(v0, v1, v2, v3) + +#define PRND_K0 (SIPHASH_CONST_0 ^ SIPHASH_CONST_2) +#define PRND_K1 (SIPHASH_CONST_1 ^ SIPHASH_CONST_3) + +#elif BITS_PER_LONG == 32 +/* + * On 32-bit machines, we use HSipHash, a reduced-width version of SipHash. + * This is weaker, but 32-bit machines are not used for high-traffic + * applications, so there is less output for an attacker to analyze. + */ +#define PRND_SIPROUND(v0, v1, v2, v3) HSIPHASH_PERMUTATION(v0, v1, v2, v3) +#define PRND_K0 (HSIPHASH_CONST_0 ^ HSIPHASH_CONST_2) +#define PRND_K1 (HSIPHASH_CONST_1 ^ HSIPHASH_CONST_3) + +#else +#error Unsupported BITS_PER_LONG +#endif + +struct rnd_state { + __u32 s1, s2, s3, s4; +}; + +u32 prandom_u32_state(struct rnd_state *state); +void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); +void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state); + +#define prandom_init_once(pcpu_state) \ + DO_ONCE(prandom_seed_full_state, (pcpu_state)) + +/** + * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) + * @ep_ro: right open interval endpoint + * + * Returns a pseudo-random number that is in interval [0, ep_ro). Note + * that the result depends on PRNG being well distributed in [0, ~0U] + * u32 space. Here we use maximally equidistributed combined Tausworthe + * generator, that is, prandom_u32(). This is useful when requesting a + * random index of an array containing ep_ro elements, for example. + * + * Returns: pseudo-random number in interval [0, ep_ro) + */ +static inline u32 prandom_u32_max(u32 ep_ro) +{ + return (u32)(((u64) prandom_u32() * ep_ro) >> 32); +} + +/* + * Handle minimum values for seeds + */ +static inline u32 __seed(u32 x, u32 m) +{ + return (x < m) ? x + m : x; +} + +/** + * prandom_seed_state - set seed for prandom_u32_state(). + * @state: pointer to state structure to receive the seed. + * @seed: arbitrary 64-bit value to use as a seed. + */ +static inline void prandom_seed_state(struct rnd_state *state, u64 seed) +{ + u32 i = ((seed >> 32) ^ (seed << 10) ^ seed) & 0xffffffffUL; + + state->s1 = __seed(i, 2U); + state->s2 = __seed(i, 8U); + state->s3 = __seed(i, 16U); + state->s4 = __seed(i, 128U); +} + +/* Pseudo random number generator from numerical recipes. */ +static inline u32 next_pseudo_random32(u32 seed) +{ + return seed * 1664525 + 1013904223; +} + +#endif diff --git a/include/linux/preempt.h b/include/linux/preempt.h new file mode 100644 index 000000000..c01813c3f --- /dev/null +++ b/include/linux/preempt.h @@ -0,0 +1,328 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_PREEMPT_H +#define __LINUX_PREEMPT_H + +/* + * include/linux/preempt.h - macros for accessing and manipulating + * preempt_count (used for kernel preemption, interrupt count, etc.) + */ + +#include +#include + +/* + * We put the hardirq and softirq counter into the preemption + * counter. The bitmask has the following meaning: + * + * - bits 0-7 are the preemption count (max preemption depth: 256) + * - bits 8-15 are the softirq count (max # of softirqs: 256) + * + * The hardirq count could in theory be the same as the number of + * interrupts in the system, but we run all interrupt handlers with + * interrupts disabled, so we cannot have nesting interrupts. Though + * there are a few palaeontologic drivers which reenable interrupts in + * the handler, so we need more than one bit here. + * + * PREEMPT_MASK: 0x000000ff + * SOFTIRQ_MASK: 0x0000ff00 + * HARDIRQ_MASK: 0x000f0000 + * NMI_MASK: 0x00100000 + * PREEMPT_NEED_RESCHED: 0x80000000 + */ +#define PREEMPT_BITS 8 +#define SOFTIRQ_BITS 8 +#define HARDIRQ_BITS 4 +#define NMI_BITS 1 + +#define PREEMPT_SHIFT 0 +#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) +#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) +#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS) + +#define __IRQ_MASK(x) ((1UL << (x))-1) + +#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) +#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) +#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) +#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT) + +#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) +#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) +#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) +#define NMI_OFFSET (1UL << NMI_SHIFT) + +#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) + +/* We use the MSB mostly because its available */ +#define PREEMPT_NEED_RESCHED 0x80000000 + +#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) + +/* + * Disable preemption until the scheduler is running -- use an unconditional + * value so that it also works on !PREEMPT_COUNT kernels. + * + * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count(). + */ +#define INIT_PREEMPT_COUNT PREEMPT_OFFSET + +/* + * Initial preempt_count value; reflects the preempt_count schedule invariant + * which states that during context switches: + * + * preempt_count() == 2*PREEMPT_DISABLE_OFFSET + * + * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels. + * Note: See finish_task_switch(). + */ +#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) + +/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */ +#include + +#define hardirq_count() (preempt_count() & HARDIRQ_MASK) +#define softirq_count() (preempt_count() & SOFTIRQ_MASK) +#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ + | NMI_MASK)) + +/* + * Are we doing bottom half or hardware interrupt processing? + * + * in_irq() - We're in (hard) IRQ context + * in_softirq() - We have BH disabled, or are processing softirqs + * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled + * in_serving_softirq() - We're in softirq context + * in_nmi() - We're in NMI context + * in_task() - We're in task context + * + * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really + * should not be used in new code. + */ +#define in_irq() (hardirq_count()) +#define in_softirq() (softirq_count()) +#define in_interrupt() (irq_count()) +#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) +#define in_nmi() (preempt_count() & NMI_MASK) +#define in_task() (!(preempt_count() & \ + (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) + +/* + * The preempt_count offset after preempt_disable(); + */ +#if defined(CONFIG_PREEMPT_COUNT) +# define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET +#else +# define PREEMPT_DISABLE_OFFSET 0 +#endif + +/* + * The preempt_count offset after spin_lock() + */ +#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET + +/* + * The preempt_count offset needed for things like: + * + * spin_lock_bh() + * + * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and + * softirqs, such that unlock sequences of: + * + * spin_unlock(); + * local_bh_enable(); + * + * Work as expected. + */ +#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET) + +/* + * Are we running in atomic context? WARNING: this macro cannot + * always detect atomic context; in particular, it cannot know about + * held spinlocks in non-preemptible kernels. Thus it should not be + * used in the general case to determine whether sleeping is possible. + * Do not use in_atomic() in driver code. + */ +#define in_atomic() (preempt_count() != 0) + +/* + * Check whether we were atomic before we did preempt_disable(): + * (used by the scheduler) + */ +#define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET) + +#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE) +extern void preempt_count_add(int val); +extern void preempt_count_sub(int val); +#define preempt_count_dec_and_test() \ + ({ preempt_count_sub(1); should_resched(0); }) +#else +#define preempt_count_add(val) __preempt_count_add(val) +#define preempt_count_sub(val) __preempt_count_sub(val) +#define preempt_count_dec_and_test() __preempt_count_dec_and_test() +#endif + +#define __preempt_count_inc() __preempt_count_add(1) +#define __preempt_count_dec() __preempt_count_sub(1) + +#define preempt_count_inc() preempt_count_add(1) +#define preempt_count_dec() preempt_count_sub(1) + +#ifdef CONFIG_PREEMPT_COUNT + +#define preempt_disable() \ +do { \ + preempt_count_inc(); \ + barrier(); \ +} while (0) + +#define sched_preempt_enable_no_resched() \ +do { \ + barrier(); \ + preempt_count_dec(); \ +} while (0) + +#define preempt_enable_no_resched() sched_preempt_enable_no_resched() + +#define preemptible() (preempt_count() == 0 && !irqs_disabled()) + +#ifdef CONFIG_PREEMPT +#define preempt_enable() \ +do { \ + barrier(); \ + if (unlikely(preempt_count_dec_and_test())) \ + __preempt_schedule(); \ +} while (0) + +#define preempt_enable_notrace() \ +do { \ + barrier(); \ + if (unlikely(__preempt_count_dec_and_test())) \ + __preempt_schedule_notrace(); \ +} while (0) + +#define preempt_check_resched() \ +do { \ + if (should_resched(0)) \ + __preempt_schedule(); \ +} while (0) + +#else /* !CONFIG_PREEMPT */ +#define preempt_enable() \ +do { \ + barrier(); \ + preempt_count_dec(); \ +} while (0) + +#define preempt_enable_notrace() \ +do { \ + barrier(); \ + __preempt_count_dec(); \ +} while (0) + +#define preempt_check_resched() do { } while (0) +#endif /* CONFIG_PREEMPT */ + +#define preempt_disable_notrace() \ +do { \ + __preempt_count_inc(); \ + barrier(); \ +} while (0) + +#define preempt_enable_no_resched_notrace() \ +do { \ + barrier(); \ + __preempt_count_dec(); \ +} while (0) + +#else /* !CONFIG_PREEMPT_COUNT */ + +/* + * Even if we don't have any preemption, we need preempt disable/enable + * to be barriers, so that we don't have things like get_user/put_user + * that can cause faults and scheduling migrate into our preempt-protected + * region. + */ +#define preempt_disable() barrier() +#define sched_preempt_enable_no_resched() barrier() +#define preempt_enable_no_resched() barrier() +#define preempt_enable() barrier() +#define preempt_check_resched() do { } while (0) + +#define preempt_disable_notrace() barrier() +#define preempt_enable_no_resched_notrace() barrier() +#define preempt_enable_notrace() barrier() +#define preemptible() 0 + +#endif /* CONFIG_PREEMPT_COUNT */ + +#ifdef MODULE +/* + * Modules have no business playing preemption tricks. + */ +#undef sched_preempt_enable_no_resched +#undef preempt_enable_no_resched +#undef preempt_enable_no_resched_notrace +#undef preempt_check_resched +#endif + +#define preempt_set_need_resched() \ +do { \ + set_preempt_need_resched(); \ +} while (0) +#define preempt_fold_need_resched() \ +do { \ + if (tif_need_resched()) \ + set_preempt_need_resched(); \ +} while (0) + +#ifdef CONFIG_PREEMPT_NOTIFIERS + +struct preempt_notifier; + +/** + * preempt_ops - notifiers called when a task is preempted and rescheduled + * @sched_in: we're about to be rescheduled: + * notifier: struct preempt_notifier for the task being scheduled + * cpu: cpu we're scheduled on + * @sched_out: we've just been preempted + * notifier: struct preempt_notifier for the task being preempted + * next: the task that's kicking us out + * + * Please note that sched_in and out are called under different + * contexts. sched_out is called with rq lock held and irq disabled + * while sched_in is called without rq lock and irq enabled. This + * difference is intentional and depended upon by its users. + */ +struct preempt_ops { + void (*sched_in)(struct preempt_notifier *notifier, int cpu); + void (*sched_out)(struct preempt_notifier *notifier, + struct task_struct *next); +}; + +/** + * preempt_notifier - key for installing preemption notifiers + * @link: internal use + * @ops: defines the notifier functions to be called + * + * Usually used in conjunction with container_of(). + */ +struct preempt_notifier { + struct hlist_node link; + struct preempt_ops *ops; +}; + +void preempt_notifier_inc(void); +void preempt_notifier_dec(void); +void preempt_notifier_register(struct preempt_notifier *notifier); +void preempt_notifier_unregister(struct preempt_notifier *notifier); + +static inline void preempt_notifier_init(struct preempt_notifier *notifier, + struct preempt_ops *ops) +{ + INIT_HLIST_NODE(¬ifier->link); + notifier->ops = ops; +} + +#endif + +#endif /* __LINUX_PREEMPT_H */ diff --git a/include/linux/prefetch.h b/include/linux/prefetch.h new file mode 100644 index 000000000..13eafebf3 --- /dev/null +++ b/include/linux/prefetch.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Generic cache management functions. Everything is arch-specific, + * but this header exists to make sure the defines/functions can be + * used in a generic way. + * + * 2000-11-13 Arjan van de Ven + * + */ + +#ifndef _LINUX_PREFETCH_H +#define _LINUX_PREFETCH_H + +#include +#include +#include + +/* + prefetch(x) attempts to pre-emptively get the memory pointed to + by address "x" into the CPU L1 cache. + prefetch(x) should not cause any kind of exception, prefetch(0) is + specifically ok. + + prefetch() should be defined by the architecture, if not, the + #define below provides a no-op define. + + There are 3 prefetch() macros: + + prefetch(x) - prefetches the cacheline at "x" for read + prefetchw(x) - prefetches the cacheline at "x" for write + spin_lock_prefetch(x) - prefetches the spinlock *x for taking + + there is also PREFETCH_STRIDE which is the architecure-preferred + "lookahead" size for prefetching streamed operations. + +*/ + +#ifndef ARCH_HAS_PREFETCH +#define prefetch(x) __builtin_prefetch(x) +#endif + +#ifndef ARCH_HAS_PREFETCHW +#define prefetchw(x) __builtin_prefetch(x,1) +#endif + +#ifndef ARCH_HAS_SPINLOCK_PREFETCH +#define spin_lock_prefetch(x) prefetchw(x) +#endif + +#ifndef PREFETCH_STRIDE +#define PREFETCH_STRIDE (4*L1_CACHE_BYTES) +#endif + +static inline void prefetch_range(void *addr, size_t len) +{ +#ifdef ARCH_HAS_PREFETCH + char *cp; + char *end = addr + len; + + for (cp = addr; cp < end; cp += PREFETCH_STRIDE) + prefetch(cp); +#endif +} + +#endif diff --git a/include/linux/prime_numbers.h b/include/linux/prime_numbers.h new file mode 100644 index 000000000..2b8e99c94 --- /dev/null +++ b/include/linux/prime_numbers.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_PRIME_NUMBERS_H +#define __LINUX_PRIME_NUMBERS_H + +#include + +bool is_prime_number(unsigned long x); +unsigned long next_prime_number(unsigned long x); + +/** + * for_each_prime_number - iterate over each prime upto a value + * @prime: the current prime number in this iteration + * @max: the upper limit + * + * Starting from the first prime number 2 iterate over each prime number up to + * the @max value. On each iteration, @prime is set to the current prime number. + * @max should be less than ULONG_MAX to ensure termination. To begin with + * @prime set to 1 on the first iteration use for_each_prime_number_from() + * instead. + */ +#define for_each_prime_number(prime, max) \ + for_each_prime_number_from((prime), 2, (max)) + +/** + * for_each_prime_number_from - iterate over each prime upto a value + * @prime: the current prime number in this iteration + * @from: the initial value + * @max: the upper limit + * + * Starting from @from iterate over each successive prime number up to the + * @max value. On each iteration, @prime is set to the current prime number. + * @max should be less than ULONG_MAX, and @from less than @max, to ensure + * termination. + */ +#define for_each_prime_number_from(prime, from, max) \ + for (prime = (from); prime <= (max); prime = next_prime_number(prime)) + +#endif /* !__LINUX_PRIME_NUMBERS_H */ diff --git a/include/linux/printk.h b/include/linux/printk.h new file mode 100644 index 000000000..6dd867e39 --- /dev/null +++ b/include/linux/printk.h @@ -0,0 +1,528 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __KERNEL_PRINTK__ +#define __KERNEL_PRINTK__ + +#include +#include +#include +#include +#include + +extern const char linux_banner[]; +extern const char linux_proc_banner[]; + +#define PRINTK_MAX_SINGLE_HEADER_LEN 2 + +static inline int printk_get_level(const char *buffer) +{ + if (buffer[0] == KERN_SOH_ASCII && buffer[1]) { + switch (buffer[1]) { + case '0' ... '7': + case 'd': /* KERN_DEFAULT */ + case 'c': /* KERN_CONT */ + return buffer[1]; + } + } + return 0; +} + +static inline const char *printk_skip_level(const char *buffer) +{ + if (printk_get_level(buffer)) + return buffer + 2; + + return buffer; +} + +static inline const char *printk_skip_headers(const char *buffer) +{ + while (printk_get_level(buffer)) + buffer = printk_skip_level(buffer); + + return buffer; +} + +#define CONSOLE_EXT_LOG_MAX 8192 + +/* printk's without a loglevel use this.. */ +#define MESSAGE_LOGLEVEL_DEFAULT CONFIG_MESSAGE_LOGLEVEL_DEFAULT + +/* We show everything that is MORE important than this.. */ +#define CONSOLE_LOGLEVEL_SILENT 0 /* Mum's the word */ +#define CONSOLE_LOGLEVEL_MIN 1 /* Minimum loglevel we let people use */ +#define CONSOLE_LOGLEVEL_DEBUG 10 /* issue debug messages */ +#define CONSOLE_LOGLEVEL_MOTORMOUTH 15 /* You can't shut this one up */ + +/* + * Default used to be hard-coded at 7, quiet used to be hardcoded at 4, + * we're now allowing both to be set from kernel config. + */ +#define CONSOLE_LOGLEVEL_DEFAULT CONFIG_CONSOLE_LOGLEVEL_DEFAULT +#define CONSOLE_LOGLEVEL_QUIET CONFIG_CONSOLE_LOGLEVEL_QUIET + +extern int console_printk[]; + +#define console_loglevel (console_printk[0]) +#define default_message_loglevel (console_printk[1]) +#define minimum_console_loglevel (console_printk[2]) +#define default_console_loglevel (console_printk[3]) + +static inline void console_silent(void) +{ + console_loglevel = CONSOLE_LOGLEVEL_SILENT; +} + +static inline void console_verbose(void) +{ + if (console_loglevel) + console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH; +} + +/* strlen("ratelimit") + 1 */ +#define DEVKMSG_STR_MAX_SIZE 10 +extern char devkmsg_log_str[]; +struct ctl_table; + +struct va_format { + const char *fmt; + va_list *va; +}; + +/* + * FW_BUG + * Add this to a message where you are sure the firmware is buggy or behaves + * really stupid or out of spec. Be aware that the responsible BIOS developer + * should be able to fix this issue or at least get a concrete idea of the + * problem by reading your message without the need of looking at the kernel + * code. + * + * Use it for definite and high priority BIOS bugs. + * + * FW_WARN + * Use it for not that clear (e.g. could the kernel messed up things already?) + * and medium priority BIOS bugs. + * + * FW_INFO + * Use this one if you want to tell the user or vendor about something + * suspicious, but generally harmless related to the firmware. + * + * Use it for information or very low priority BIOS bugs. + */ +#define FW_BUG "[Firmware Bug]: " +#define FW_WARN "[Firmware Warn]: " +#define FW_INFO "[Firmware Info]: " + +/* + * HW_ERR + * Add this to a message for hardware errors, so that user can report + * it to hardware vendor instead of LKML or software vendor. + */ +#define HW_ERR "[Hardware Error]: " + +/* + * DEPRECATED + * Add this to a message whenever you want to warn user space about the use + * of a deprecated aspect of an API so they can stop using it + */ +#define DEPRECATED "[Deprecated]: " + +/* + * Dummy printk for disabled debugging statements to use whilst maintaining + * gcc's format checking. + */ +#define no_printk(fmt, ...) \ +({ \ + if (0) \ + printk(fmt, ##__VA_ARGS__); \ + 0; \ +}) + +#ifdef CONFIG_EARLY_PRINTK +extern asmlinkage __printf(1, 2) +void early_printk(const char *fmt, ...); +#else +static inline __printf(1, 2) __cold +void early_printk(const char *s, ...) { } +#endif + +#ifdef CONFIG_PRINTK_NMI +extern void printk_nmi_enter(void); +extern void printk_nmi_exit(void); +extern void printk_nmi_direct_enter(void); +extern void printk_nmi_direct_exit(void); +#else +static inline void printk_nmi_enter(void) { } +static inline void printk_nmi_exit(void) { } +static inline void printk_nmi_direct_enter(void) { } +static inline void printk_nmi_direct_exit(void) { } +#endif /* PRINTK_NMI */ + +#ifdef CONFIG_PRINTK +asmlinkage __printf(5, 0) +int vprintk_emit(int facility, int level, + const char *dict, size_t dictlen, + const char *fmt, va_list args); + +asmlinkage __printf(1, 0) +int vprintk(const char *fmt, va_list args); + +asmlinkage __printf(5, 6) __cold +int printk_emit(int facility, int level, + const char *dict, size_t dictlen, + const char *fmt, ...); + +asmlinkage __printf(1, 2) __cold +int printk(const char *fmt, ...); + +/* + * Special printk facility for scheduler/timekeeping use only, _DO_NOT_USE_ ! + */ +__printf(1, 2) __cold int printk_deferred(const char *fmt, ...); + +/* + * Please don't use printk_ratelimit(), because it shares ratelimiting state + * with all other unrelated printk_ratelimit() callsites. Instead use + * printk_ratelimited() or plain old __ratelimit(). + */ +extern int __printk_ratelimit(const char *func); +#define printk_ratelimit() __printk_ratelimit(__func__) +extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, + unsigned int interval_msec); + +extern int printk_delay_msec; +extern int dmesg_restrict; + +extern int +devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, void __user *buf, + size_t *lenp, loff_t *ppos); + +extern void wake_up_klogd(void); + +char *log_buf_addr_get(void); +u32 log_buf_len_get(void); +void log_buf_vmcoreinfo_setup(void); +void __init setup_log_buf(int early); +__printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...); +void dump_stack_print_info(const char *log_lvl); +void show_regs_print_info(const char *log_lvl); +extern asmlinkage void dump_stack(void) __cold; +extern void printk_safe_flush(void); +extern void printk_safe_flush_on_panic(void); +#else +static inline __printf(1, 0) +int vprintk(const char *s, va_list args) +{ + return 0; +} +static inline __printf(1, 2) __cold +int printk(const char *s, ...) +{ + return 0; +} +static inline __printf(1, 2) __cold +int printk_deferred(const char *s, ...) +{ + return 0; +} +static inline int printk_ratelimit(void) +{ + return 0; +} +static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, + unsigned int interval_msec) +{ + return false; +} + +static inline void wake_up_klogd(void) +{ +} + +static inline char *log_buf_addr_get(void) +{ + return NULL; +} + +static inline u32 log_buf_len_get(void) +{ + return 0; +} + +static inline void log_buf_vmcoreinfo_setup(void) +{ +} + +static inline void setup_log_buf(int early) +{ +} + +static inline __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...) +{ +} + +static inline void dump_stack_print_info(const char *log_lvl) +{ +} + +static inline void show_regs_print_info(const char *log_lvl) +{ +} + +static inline asmlinkage void dump_stack(void) +{ +} + +static inline void printk_safe_flush(void) +{ +} + +static inline void printk_safe_flush_on_panic(void) +{ +} +#endif + +extern int kptr_restrict; + +#ifndef pr_fmt +#define pr_fmt(fmt) fmt +#endif + +/* + * These can be used to print at the various log levels. + * All of these will print unconditionally, although note that pr_debug() + * and other debug macros are compiled out unless either DEBUG is defined + * or CONFIG_DYNAMIC_DEBUG is set. + */ +#define pr_emerg(fmt, ...) \ + printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) +#define pr_alert(fmt, ...) \ + printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) +#define pr_crit(fmt, ...) \ + printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) +#define pr_err(fmt, ...) \ + printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) +#define pr_warning(fmt, ...) \ + printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) +#define pr_warn pr_warning +#define pr_notice(fmt, ...) \ + printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) +#define pr_info(fmt, ...) \ + printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) +/* + * Like KERN_CONT, pr_cont() should only be used when continuing + * a line with no newline ('\n') enclosed. Otherwise it defaults + * back to KERN_DEFAULT. + */ +#define pr_cont(fmt, ...) \ + printk(KERN_CONT fmt, ##__VA_ARGS__) + +/* pr_devel() should produce zero code unless DEBUG is defined */ +#ifdef DEBUG +#define pr_devel(fmt, ...) \ + printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#else +#define pr_devel(fmt, ...) \ + no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#endif + + +/* If you are writing a driver, please use dev_dbg instead */ +#if defined(CONFIG_DYNAMIC_DEBUG) +#include + +/* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */ +#define pr_debug(fmt, ...) \ + dynamic_pr_debug(fmt, ##__VA_ARGS__) +#elif defined(DEBUG) +#define pr_debug(fmt, ...) \ + printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#else +#define pr_debug(fmt, ...) \ + no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#endif + +/* + * Print a one-time message (analogous to WARN_ONCE() et al): + */ + +#ifdef CONFIG_PRINTK +#define printk_once(fmt, ...) \ +({ \ + static bool __print_once __read_mostly; \ + bool __ret_print_once = !__print_once; \ + \ + if (!__print_once) { \ + __print_once = true; \ + printk(fmt, ##__VA_ARGS__); \ + } \ + unlikely(__ret_print_once); \ +}) +#define printk_deferred_once(fmt, ...) \ +({ \ + static bool __print_once __read_mostly; \ + bool __ret_print_once = !__print_once; \ + \ + if (!__print_once) { \ + __print_once = true; \ + printk_deferred(fmt, ##__VA_ARGS__); \ + } \ + unlikely(__ret_print_once); \ +}) +#else +#define printk_once(fmt, ...) \ + no_printk(fmt, ##__VA_ARGS__) +#define printk_deferred_once(fmt, ...) \ + no_printk(fmt, ##__VA_ARGS__) +#endif + +#define pr_emerg_once(fmt, ...) \ + printk_once(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) +#define pr_alert_once(fmt, ...) \ + printk_once(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) +#define pr_crit_once(fmt, ...) \ + printk_once(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) +#define pr_err_once(fmt, ...) \ + printk_once(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) +#define pr_warn_once(fmt, ...) \ + printk_once(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) +#define pr_notice_once(fmt, ...) \ + printk_once(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) +#define pr_info_once(fmt, ...) \ + printk_once(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) +#define pr_cont_once(fmt, ...) \ + printk_once(KERN_CONT pr_fmt(fmt), ##__VA_ARGS__) + +#if defined(DEBUG) +#define pr_devel_once(fmt, ...) \ + printk_once(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#else +#define pr_devel_once(fmt, ...) \ + no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#endif + +/* If you are writing a driver, please use dev_dbg instead */ +#if defined(DEBUG) +#define pr_debug_once(fmt, ...) \ + printk_once(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#else +#define pr_debug_once(fmt, ...) \ + no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#endif + +/* + * ratelimited messages with local ratelimit_state, + * no local ratelimit_state used in the !PRINTK case + */ +#ifdef CONFIG_PRINTK +#define printk_ratelimited(fmt, ...) \ +({ \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + \ + if (__ratelimit(&_rs)) \ + printk(fmt, ##__VA_ARGS__); \ +}) +#else +#define printk_ratelimited(fmt, ...) \ + no_printk(fmt, ##__VA_ARGS__) +#endif + +#define pr_emerg_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) +#define pr_alert_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) +#define pr_crit_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) +#define pr_err_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) +#define pr_warn_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) +#define pr_notice_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) +#define pr_info_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) +/* no pr_cont_ratelimited, don't do that... */ + +#if defined(DEBUG) +#define pr_devel_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#else +#define pr_devel_ratelimited(fmt, ...) \ + no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#endif + +/* If you are writing a driver, please use dev_dbg instead */ +#if defined(CONFIG_DYNAMIC_DEBUG) +/* descriptor check is first to prevent flooding with "callbacks suppressed" */ +#define pr_debug_ratelimited(fmt, ...) \ +do { \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, pr_fmt(fmt)); \ + if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \ + __ratelimit(&_rs)) \ + __dynamic_pr_debug(&descriptor, pr_fmt(fmt), ##__VA_ARGS__); \ +} while (0) +#elif defined(DEBUG) +#define pr_debug_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#else +#define pr_debug_ratelimited(fmt, ...) \ + no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#endif + +extern const struct file_operations kmsg_fops; + +enum { + DUMP_PREFIX_NONE, + DUMP_PREFIX_ADDRESS, + DUMP_PREFIX_OFFSET +}; +extern int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, + int groupsize, char *linebuf, size_t linebuflen, + bool ascii); +#ifdef CONFIG_PRINTK +extern void print_hex_dump(const char *level, const char *prefix_str, + int prefix_type, int rowsize, int groupsize, + const void *buf, size_t len, bool ascii); +#if defined(CONFIG_DYNAMIC_DEBUG) +#define print_hex_dump_bytes(prefix_str, prefix_type, buf, len) \ + dynamic_hex_dump(prefix_str, prefix_type, 16, 1, buf, len, true) +#else +extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type, + const void *buf, size_t len); +#endif /* defined(CONFIG_DYNAMIC_DEBUG) */ +#else +static inline void print_hex_dump(const char *level, const char *prefix_str, + int prefix_type, int rowsize, int groupsize, + const void *buf, size_t len, bool ascii) +{ +} +static inline void print_hex_dump_bytes(const char *prefix_str, int prefix_type, + const void *buf, size_t len) +{ +} + +#endif + +#if defined(CONFIG_DYNAMIC_DEBUG) +#define print_hex_dump_debug(prefix_str, prefix_type, rowsize, \ + groupsize, buf, len, ascii) \ + dynamic_hex_dump(prefix_str, prefix_type, rowsize, \ + groupsize, buf, len, ascii) +#elif defined(DEBUG) +#define print_hex_dump_debug(prefix_str, prefix_type, rowsize, \ + groupsize, buf, len, ascii) \ + print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize, \ + groupsize, buf, len, ascii) +#else +static inline void print_hex_dump_debug(const char *prefix_str, int prefix_type, + int rowsize, int groupsize, + const void *buf, size_t len, bool ascii) +{ +} +#endif + +#endif diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h new file mode 100644 index 000000000..5141657a0 --- /dev/null +++ b/include/linux/proc_fs.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * The proc filesystem constants/structures + */ +#ifndef _LINUX_PROC_FS_H +#define _LINUX_PROC_FS_H + +#include +#include + +struct proc_dir_entry; +struct seq_file; +struct seq_operations; + +#ifdef CONFIG_PROC_FS + +typedef int (*proc_write_t)(struct file *, char *, size_t); + +extern void proc_root_init(void); +extern void proc_flush_task(struct task_struct *); + +extern struct proc_dir_entry *proc_symlink(const char *, + struct proc_dir_entry *, const char *); +struct proc_dir_entry *_proc_mkdir(const char *, umode_t, struct proc_dir_entry *, void *, bool); +extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *); +extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t, + struct proc_dir_entry *, void *); +extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t, + struct proc_dir_entry *); +struct proc_dir_entry *proc_create_mount_point(const char *name); + +struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode, + struct proc_dir_entry *parent, const struct seq_operations *ops, + unsigned int state_size, void *data); +#define proc_create_seq_data(name, mode, parent, ops, data) \ + proc_create_seq_private(name, mode, parent, ops, 0, data) +#define proc_create_seq(name, mode, parent, ops) \ + proc_create_seq_private(name, mode, parent, ops, 0, NULL) +struct proc_dir_entry *proc_create_single_data(const char *name, umode_t mode, + struct proc_dir_entry *parent, + int (*show)(struct seq_file *, void *), void *data); +#define proc_create_single(name, mode, parent, show) \ + proc_create_single_data(name, mode, parent, show, NULL) + +extern struct proc_dir_entry *proc_create_data(const char *, umode_t, + struct proc_dir_entry *, + const struct file_operations *, + void *); + +struct proc_dir_entry *proc_create(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct file_operations *proc_fops); +extern void proc_set_size(struct proc_dir_entry *, loff_t); +extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t); +extern void *PDE_DATA(const struct inode *); +extern void *proc_get_parent_data(const struct inode *); +extern void proc_remove(struct proc_dir_entry *); +extern void remove_proc_entry(const char *, struct proc_dir_entry *); +extern int remove_proc_subtree(const char *, struct proc_dir_entry *); + +struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode, + struct proc_dir_entry *parent, const struct seq_operations *ops, + unsigned int state_size, void *data); +#define proc_create_net(name, mode, parent, state_size, ops) \ + proc_create_net_data(name, mode, parent, state_size, ops, NULL) +struct proc_dir_entry *proc_create_net_single(const char *name, umode_t mode, + struct proc_dir_entry *parent, + int (*show)(struct seq_file *, void *), void *data); +struct proc_dir_entry *proc_create_net_data_write(const char *name, umode_t mode, + struct proc_dir_entry *parent, + const struct seq_operations *ops, + proc_write_t write, + unsigned int state_size, void *data); +struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mode, + struct proc_dir_entry *parent, + int (*show)(struct seq_file *, void *), + proc_write_t write, + void *data); + +#else /* CONFIG_PROC_FS */ + +static inline void proc_root_init(void) +{ +} + +static inline void proc_flush_task(struct task_struct *task) +{ +} + +static inline struct proc_dir_entry *proc_symlink(const char *name, + struct proc_dir_entry *parent,const char *dest) { return NULL;} +static inline struct proc_dir_entry *proc_mkdir(const char *name, + struct proc_dir_entry *parent) {return NULL;} +static inline struct proc_dir_entry *proc_create_mount_point(const char *name) { return NULL; } +static inline struct proc_dir_entry *_proc_mkdir(const char *name, umode_t mode, + struct proc_dir_entry *parent, void *data, bool force_lookup) +{ + return NULL; +} +static inline struct proc_dir_entry *proc_mkdir_data(const char *name, + umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; } +static inline struct proc_dir_entry *proc_mkdir_mode(const char *name, + umode_t mode, struct proc_dir_entry *parent) { return NULL; } +#define proc_create_seq_private(name, mode, parent, ops, size, data) ({NULL;}) +#define proc_create_seq_data(name, mode, parent, ops, data) ({NULL;}) +#define proc_create_seq(name, mode, parent, ops) ({NULL;}) +#define proc_create_single(name, mode, parent, show) ({NULL;}) +#define proc_create_single_data(name, mode, parent, show, data) ({NULL;}) +#define proc_create(name, mode, parent, proc_fops) ({NULL;}) +#define proc_create_data(name, mode, parent, proc_fops, data) ({NULL;}) + +static inline void proc_set_size(struct proc_dir_entry *de, loff_t size) {} +static inline void proc_set_user(struct proc_dir_entry *de, kuid_t uid, kgid_t gid) {} +static inline void *PDE_DATA(const struct inode *inode) {BUG(); return NULL;} +static inline void *proc_get_parent_data(const struct inode *inode) { BUG(); return NULL; } + +static inline void proc_remove(struct proc_dir_entry *de) {} +#define remove_proc_entry(name, parent) do {} while (0) +static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *parent) { return 0; } + +#define proc_create_net_data(name, mode, parent, ops, state_size, data) ({NULL;}) +#define proc_create_net(name, mode, parent, state_size, ops) ({NULL;}) +#define proc_create_net_single(name, mode, parent, show, data) ({NULL;}) + +#endif /* CONFIG_PROC_FS */ + +struct net; + +static inline struct proc_dir_entry *proc_net_mkdir( + struct net *net, const char *name, struct proc_dir_entry *parent) +{ + return _proc_mkdir(name, 0, parent, net, true); +} + +struct ns_common; +int open_related_ns(struct ns_common *ns, + struct ns_common *(*get_ns)(struct ns_common *ns)); + +/* get the associated pid namespace for a file in procfs */ +static inline struct pid_namespace *proc_pid_ns(const struct inode *inode) +{ + return inode->i_sb->s_fs_info; +} + +#endif /* _LINUX_PROC_FS_H */ diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h new file mode 100644 index 000000000..d31cb6215 --- /dev/null +++ b/include/linux/proc_ns.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * procfs namespace bits + */ +#ifndef _LINUX_PROC_NS_H +#define _LINUX_PROC_NS_H + +#include + +struct pid_namespace; +struct nsproxy; +struct path; +struct task_struct; +struct inode; + +struct proc_ns_operations { + const char *name; + const char *real_ns_name; + int type; + struct ns_common *(*get)(struct task_struct *task); + void (*put)(struct ns_common *ns); + int (*install)(struct nsproxy *nsproxy, struct ns_common *ns); + struct user_namespace *(*owner)(struct ns_common *ns); + struct ns_common *(*get_parent)(struct ns_common *ns); +} __randomize_layout; + +extern const struct proc_ns_operations netns_operations; +extern const struct proc_ns_operations utsns_operations; +extern const struct proc_ns_operations ipcns_operations; +extern const struct proc_ns_operations pidns_operations; +extern const struct proc_ns_operations pidns_for_children_operations; +extern const struct proc_ns_operations userns_operations; +extern const struct proc_ns_operations mntns_operations; +extern const struct proc_ns_operations cgroupns_operations; + +/* + * We always define these enumerators + */ +enum { + PROC_ROOT_INO = 1, + PROC_IPC_INIT_INO = 0xEFFFFFFFU, + PROC_UTS_INIT_INO = 0xEFFFFFFEU, + PROC_USER_INIT_INO = 0xEFFFFFFDU, + PROC_PID_INIT_INO = 0xEFFFFFFCU, + PROC_CGROUP_INIT_INO = 0xEFFFFFFBU, +}; + +#ifdef CONFIG_PROC_FS + +extern int pid_ns_prepare_proc(struct pid_namespace *ns); +extern void pid_ns_release_proc(struct pid_namespace *ns); +extern int proc_alloc_inum(unsigned int *pino); +extern void proc_free_inum(unsigned int inum); + +#else /* CONFIG_PROC_FS */ + +static inline int pid_ns_prepare_proc(struct pid_namespace *ns) { return 0; } +static inline void pid_ns_release_proc(struct pid_namespace *ns) {} + +static inline int proc_alloc_inum(unsigned int *inum) +{ + *inum = 1; + return 0; +} +static inline void proc_free_inum(unsigned int inum) {} + +#endif /* CONFIG_PROC_FS */ + +static inline int ns_alloc_inum(struct ns_common *ns) +{ + atomic_long_set(&ns->stashed, 0); + return proc_alloc_inum(&ns->inum); +} + +#define ns_free_inum(ns) proc_free_inum((ns)->inum) + +extern struct file *proc_ns_fget(int fd); +#define get_proc_ns(inode) ((struct ns_common *)(inode)->i_private) +extern void *ns_get_path(struct path *path, struct task_struct *task, + const struct proc_ns_operations *ns_ops); +typedef struct ns_common *ns_get_path_helper_t(void *); +extern void *ns_get_path_cb(struct path *path, ns_get_path_helper_t ns_get_cb, + void *private_data); + +extern int ns_get_name(char *buf, size_t size, struct task_struct *task, + const struct proc_ns_operations *ns_ops); +extern void nsfs_init(void); + +#endif /* _LINUX_PROC_NS_H */ diff --git a/include/linux/processor.h b/include/linux/processor.h new file mode 100644 index 000000000..dbc952eec --- /dev/null +++ b/include/linux/processor.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Misc low level processor primitives */ +#ifndef _LINUX_PROCESSOR_H +#define _LINUX_PROCESSOR_H + +#include + +/* + * spin_begin is used before beginning a busy-wait loop, and must be paired + * with spin_end when the loop is exited. spin_cpu_relax must be called + * within the loop. + * + * The loop body should be as small and fast as possible, on the order of + * tens of instructions/cycles as a guide. It should and avoid calling + * cpu_relax, or any "spin" or sleep type of primitive including nested uses + * of these primitives. It should not lock or take any other resource. + * Violations of these guidelies will not cause a bug, but may cause sub + * optimal performance. + * + * These loops are optimized to be used where wait times are expected to be + * less than the cost of a context switch (and associated overhead). + * + * Detection of resource owner and decision to spin or sleep or guest-yield + * (e.g., spin lock holder vcpu preempted, or mutex owner not on CPU) can be + * tested within the loop body. + */ +#ifndef spin_begin +#define spin_begin() +#endif + +#ifndef spin_cpu_relax +#define spin_cpu_relax() cpu_relax() +#endif + +/* + * spin_cpu_yield may be called to yield (undirected) to the hypervisor if + * necessary. This should be used if the wait is expected to take longer + * than context switch overhead, but we can't sleep or do a directed yield. + */ +#ifndef spin_cpu_yield +#define spin_cpu_yield() cpu_relax_yield() +#endif + +#ifndef spin_end +#define spin_end() +#endif + +/* + * spin_until_cond can be used to wait for a condition to become true. It + * may be expected that the first iteration will true in the common case + * (no spinning), so that callers should not require a first "likely" test + * for the uncontended case before using this primitive. + * + * Usage and implementation guidelines are the same as for the spin_begin + * primitives, above. + */ +#ifndef spin_until_cond +#define spin_until_cond(cond) \ +do { \ + if (unlikely(!(cond))) { \ + spin_begin(); \ + do { \ + spin_cpu_relax(); \ + } while (!(cond)); \ + spin_end(); \ + } \ +} while (0) + +#endif + +#endif /* _LINUX_PROCESSOR_H */ diff --git a/include/linux/profile.h b/include/linux/profile.h new file mode 100644 index 000000000..bad18ca43 --- /dev/null +++ b/include/linux/profile.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PROFILE_H +#define _LINUX_PROFILE_H + +#include +#include +#include +#include + +#include + +#define CPU_PROFILING 1 +#define SCHED_PROFILING 2 +#define SLEEP_PROFILING 3 +#define KVM_PROFILING 4 + +struct proc_dir_entry; +struct pt_regs; +struct notifier_block; + +#if defined(CONFIG_PROFILING) && defined(CONFIG_PROC_FS) +void create_prof_cpu_mask(void); +int create_proc_profile(void); +#else +static inline void create_prof_cpu_mask(void) +{ +} + +static inline int create_proc_profile(void) +{ + return 0; +} +#endif + +enum profile_type { + PROFILE_TASK_EXIT, + PROFILE_MUNMAP +}; + +#ifdef CONFIG_PROFILING + +extern int prof_on __read_mostly; + +/* init basic kernel profiler */ +int profile_init(void); +int profile_setup(char *str); +void profile_tick(int type); +int setup_profiling_timer(unsigned int multiplier); + +/* + * Add multiple profiler hits to a given address: + */ +void profile_hits(int type, void *ip, unsigned int nr_hits); + +/* + * Single profiler hit: + */ +static inline void profile_hit(int type, void *ip) +{ + /* + * Speedup for the common (no profiling enabled) case: + */ + if (unlikely(prof_on == type)) + profile_hits(type, ip, 1); +} + +struct task_struct; +struct mm_struct; + +/* task is in do_exit() */ +void profile_task_exit(struct task_struct * task); + +/* task is dead, free task struct ? Returns 1 if + * the task was taken, 0 if the task should be freed. + */ +int profile_handoff_task(struct task_struct * task); + +/* sys_munmap */ +void profile_munmap(unsigned long addr); + +int task_handoff_register(struct notifier_block * n); +int task_handoff_unregister(struct notifier_block * n); + +int profile_event_register(enum profile_type, struct notifier_block * n); +int profile_event_unregister(enum profile_type, struct notifier_block * n); + +struct pt_regs; + +#else + +#define prof_on 0 + +static inline int profile_init(void) +{ + return 0; +} + +static inline void profile_tick(int type) +{ + return; +} + +static inline void profile_hits(int type, void *ip, unsigned int nr_hits) +{ + return; +} + +static inline void profile_hit(int type, void *ip) +{ + return; +} + +static inline int task_handoff_register(struct notifier_block * n) +{ + return -ENOSYS; +} + +static inline int task_handoff_unregister(struct notifier_block * n) +{ + return -ENOSYS; +} + +static inline int profile_event_register(enum profile_type t, struct notifier_block * n) +{ + return -ENOSYS; +} + +static inline int profile_event_unregister(enum profile_type t, struct notifier_block * n) +{ + return -ENOSYS; +} + +#define profile_task_exit(a) do { } while (0) +#define profile_handoff_task(a) (0) +#define profile_munmap(a) do { } while (0) + +#endif /* CONFIG_PROFILING */ + +#endif /* _LINUX_PROFILE_H */ diff --git a/include/linux/projid.h b/include/linux/projid.h new file mode 100644 index 000000000..613730622 --- /dev/null +++ b/include/linux/projid.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PROJID_H +#define _LINUX_PROJID_H + +/* + * A set of types for the internal kernel types representing project ids. + * + * The types defined in this header allow distinguishing which project ids in + * the kernel are values used by userspace and which project id values are + * the internal kernel values. With the addition of user namespaces the values + * can be different. Using the type system makes it possible for the compiler + * to detect when we overlook these differences. + * + */ +#include + +struct user_namespace; +extern struct user_namespace init_user_ns; + +typedef __kernel_uid32_t projid_t; + +typedef struct { + projid_t val; +} kprojid_t; + +static inline projid_t __kprojid_val(kprojid_t projid) +{ + return projid.val; +} + +#define KPROJIDT_INIT(value) (kprojid_t){ value } + +#define INVALID_PROJID KPROJIDT_INIT(-1) +#define OVERFLOW_PROJID 65534 + +static inline bool projid_eq(kprojid_t left, kprojid_t right) +{ + return __kprojid_val(left) == __kprojid_val(right); +} + +static inline bool projid_lt(kprojid_t left, kprojid_t right) +{ + return __kprojid_val(left) < __kprojid_val(right); +} + +static inline bool projid_valid(kprojid_t projid) +{ + return !projid_eq(projid, INVALID_PROJID); +} + +#ifdef CONFIG_USER_NS + +extern kprojid_t make_kprojid(struct user_namespace *from, projid_t projid); + +extern projid_t from_kprojid(struct user_namespace *to, kprojid_t projid); +extern projid_t from_kprojid_munged(struct user_namespace *to, kprojid_t projid); + +static inline bool kprojid_has_mapping(struct user_namespace *ns, kprojid_t projid) +{ + return from_kprojid(ns, projid) != (projid_t)-1; +} + +#else + +static inline kprojid_t make_kprojid(struct user_namespace *from, projid_t projid) +{ + return KPROJIDT_INIT(projid); +} + +static inline projid_t from_kprojid(struct user_namespace *to, kprojid_t kprojid) +{ + return __kprojid_val(kprojid); +} + +static inline projid_t from_kprojid_munged(struct user_namespace *to, kprojid_t kprojid) +{ + projid_t projid = from_kprojid(to, kprojid); + if (projid == (projid_t)-1) + projid = OVERFLOW_PROJID; + return projid; +} + +static inline bool kprojid_has_mapping(struct user_namespace *ns, kprojid_t projid) +{ + return true; +} + +#endif /* CONFIG_USER_NS */ + +#endif /* _LINUX_PROJID_H */ diff --git a/include/linux/property.h b/include/linux/property.h new file mode 100644 index 000000000..1a1236405 --- /dev/null +++ b/include/linux/property.h @@ -0,0 +1,314 @@ +/* + * property.h - Unified device property interface. + * + * Copyright (C) 2014, Intel Corporation + * Authors: Rafael J. Wysocki + * Mika Westerberg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _LINUX_PROPERTY_H_ +#define _LINUX_PROPERTY_H_ + +#include +#include + +struct device; + +enum dev_prop_type { + DEV_PROP_U8, + DEV_PROP_U16, + DEV_PROP_U32, + DEV_PROP_U64, + DEV_PROP_STRING, + DEV_PROP_MAX, +}; + +enum dev_dma_attr { + DEV_DMA_NOT_SUPPORTED, + DEV_DMA_NON_COHERENT, + DEV_DMA_COHERENT, +}; + +struct fwnode_handle *dev_fwnode(struct device *dev); + +bool device_property_present(struct device *dev, const char *propname); +int device_property_read_u8_array(struct device *dev, const char *propname, + u8 *val, size_t nval); +int device_property_read_u16_array(struct device *dev, const char *propname, + u16 *val, size_t nval); +int device_property_read_u32_array(struct device *dev, const char *propname, + u32 *val, size_t nval); +int device_property_read_u64_array(struct device *dev, const char *propname, + u64 *val, size_t nval); +int device_property_read_string_array(struct device *dev, const char *propname, + const char **val, size_t nval); +int device_property_read_string(struct device *dev, const char *propname, + const char **val); +int device_property_match_string(struct device *dev, + const char *propname, const char *string); + +bool fwnode_device_is_available(const struct fwnode_handle *fwnode); +bool fwnode_property_present(const struct fwnode_handle *fwnode, + const char *propname); +int fwnode_property_read_u8_array(const struct fwnode_handle *fwnode, + const char *propname, u8 *val, + size_t nval); +int fwnode_property_read_u16_array(const struct fwnode_handle *fwnode, + const char *propname, u16 *val, + size_t nval); +int fwnode_property_read_u32_array(const struct fwnode_handle *fwnode, + const char *propname, u32 *val, + size_t nval); +int fwnode_property_read_u64_array(const struct fwnode_handle *fwnode, + const char *propname, u64 *val, + size_t nval); +int fwnode_property_read_string_array(const struct fwnode_handle *fwnode, + const char *propname, const char **val, + size_t nval); +int fwnode_property_read_string(const struct fwnode_handle *fwnode, + const char *propname, const char **val); +int fwnode_property_match_string(const struct fwnode_handle *fwnode, + const char *propname, const char *string); +int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode, + const char *prop, const char *nargs_prop, + unsigned int nargs, unsigned int index, + struct fwnode_reference_args *args); + +struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode); +struct fwnode_handle *fwnode_get_next_parent( + struct fwnode_handle *fwnode); +struct fwnode_handle *fwnode_get_next_child_node( + const struct fwnode_handle *fwnode, struct fwnode_handle *child); +struct fwnode_handle *fwnode_get_next_available_child_node( + const struct fwnode_handle *fwnode, struct fwnode_handle *child); + +#define fwnode_for_each_child_node(fwnode, child) \ + for (child = fwnode_get_next_child_node(fwnode, NULL); child; \ + child = fwnode_get_next_child_node(fwnode, child)) + +#define fwnode_for_each_available_child_node(fwnode, child) \ + for (child = fwnode_get_next_available_child_node(fwnode, NULL); child;\ + child = fwnode_get_next_available_child_node(fwnode, child)) + +struct fwnode_handle *device_get_next_child_node( + struct device *dev, struct fwnode_handle *child); + +#define device_for_each_child_node(dev, child) \ + for (child = device_get_next_child_node(dev, NULL); child; \ + child = device_get_next_child_node(dev, child)) + +struct fwnode_handle *fwnode_get_named_child_node( + const struct fwnode_handle *fwnode, const char *childname); +struct fwnode_handle *device_get_named_child_node(struct device *dev, + const char *childname); + +struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode); +void fwnode_handle_put(struct fwnode_handle *fwnode); + +int fwnode_irq_get(struct fwnode_handle *fwnode, unsigned int index); + +unsigned int device_get_child_node_count(struct device *dev); + +static inline bool device_property_read_bool(struct device *dev, + const char *propname) +{ + return device_property_present(dev, propname); +} + +static inline int device_property_read_u8(struct device *dev, + const char *propname, u8 *val) +{ + return device_property_read_u8_array(dev, propname, val, 1); +} + +static inline int device_property_read_u16(struct device *dev, + const char *propname, u16 *val) +{ + return device_property_read_u16_array(dev, propname, val, 1); +} + +static inline int device_property_read_u32(struct device *dev, + const char *propname, u32 *val) +{ + return device_property_read_u32_array(dev, propname, val, 1); +} + +static inline int device_property_read_u64(struct device *dev, + const char *propname, u64 *val) +{ + return device_property_read_u64_array(dev, propname, val, 1); +} + +static inline bool fwnode_property_read_bool(const struct fwnode_handle *fwnode, + const char *propname) +{ + return fwnode_property_present(fwnode, propname); +} + +static inline int fwnode_property_read_u8(const struct fwnode_handle *fwnode, + const char *propname, u8 *val) +{ + return fwnode_property_read_u8_array(fwnode, propname, val, 1); +} + +static inline int fwnode_property_read_u16(const struct fwnode_handle *fwnode, + const char *propname, u16 *val) +{ + return fwnode_property_read_u16_array(fwnode, propname, val, 1); +} + +static inline int fwnode_property_read_u32(const struct fwnode_handle *fwnode, + const char *propname, u32 *val) +{ + return fwnode_property_read_u32_array(fwnode, propname, val, 1); +} + +static inline int fwnode_property_read_u64(const struct fwnode_handle *fwnode, + const char *propname, u64 *val) +{ + return fwnode_property_read_u64_array(fwnode, propname, val, 1); +} + +/** + * struct property_entry - "Built-in" device property representation. + * @name: Name of the property. + * @length: Length of data making up the value. + * @is_array: True when the property is an array. + * @type: Type of the data in unions. + * @pointer: Pointer to the property (an array of items of the given type). + * @value: Value of the property (when it is a single item of the given type). + */ +struct property_entry { + const char *name; + size_t length; + bool is_array; + enum dev_prop_type type; + union { + union { + const u8 *u8_data; + const u16 *u16_data; + const u32 *u32_data; + const u64 *u64_data; + const char * const *str; + } pointer; + union { + u8 u8_data; + u16 u16_data; + u32 u32_data; + u64 u64_data; + const char *str; + } value; + }; +}; + +/* + * Note: the below four initializers for the anonymous union are carefully + * crafted to avoid gcc-4.4.4's problems with initialization of anon unions + * and structs. + */ + +#define PROPERTY_ENTRY_INTEGER_ARRAY(_name_, _type_, _Type_, _val_) \ +(struct property_entry) { \ + .name = _name_, \ + .length = ARRAY_SIZE(_val_) * sizeof(_type_), \ + .is_array = true, \ + .type = DEV_PROP_##_Type_, \ + { .pointer = { ._type_##_data = _val_ } }, \ +} + +#define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u8, U8, _val_) +#define PROPERTY_ENTRY_U16_ARRAY(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u16, U16, _val_) +#define PROPERTY_ENTRY_U32_ARRAY(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u32, U32, _val_) +#define PROPERTY_ENTRY_U64_ARRAY(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u64, U64, _val_) + +#define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \ +(struct property_entry) { \ + .name = _name_, \ + .length = ARRAY_SIZE(_val_) * sizeof(const char *), \ + .is_array = true, \ + .type = DEV_PROP_STRING, \ + { .pointer = { .str = _val_ } }, \ +} + +#define PROPERTY_ENTRY_INTEGER(_name_, _type_, _Type_, _val_) \ +(struct property_entry) { \ + .name = _name_, \ + .length = sizeof(_type_), \ + .type = DEV_PROP_##_Type_, \ + { .value = { ._type_##_data = _val_ } }, \ +} + +#define PROPERTY_ENTRY_U8(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER(_name_, u8, U8, _val_) +#define PROPERTY_ENTRY_U16(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER(_name_, u16, U16, _val_) +#define PROPERTY_ENTRY_U32(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER(_name_, u32, U32, _val_) +#define PROPERTY_ENTRY_U64(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER(_name_, u64, U64, _val_) + +#define PROPERTY_ENTRY_STRING(_name_, _val_) \ +(struct property_entry) { \ + .name = _name_, \ + .length = sizeof(const char *), \ + .type = DEV_PROP_STRING, \ + { .value = { .str = _val_ } }, \ +} + +#define PROPERTY_ENTRY_BOOL(_name_) \ +(struct property_entry) { \ + .name = _name_, \ +} + +struct property_entry * +property_entries_dup(const struct property_entry *properties); + +void property_entries_free(const struct property_entry *properties); + +int device_add_properties(struct device *dev, + const struct property_entry *properties); +void device_remove_properties(struct device *dev); + +bool device_dma_supported(struct device *dev); + +enum dev_dma_attr device_get_dma_attr(struct device *dev); + +const void *device_get_match_data(struct device *dev); + +int device_get_phy_mode(struct device *dev); + +void *device_get_mac_address(struct device *dev, char *addr, int alen); + +int fwnode_get_phy_mode(struct fwnode_handle *fwnode); +void *fwnode_get_mac_address(struct fwnode_handle *fwnode, + char *addr, int alen); +struct fwnode_handle *fwnode_graph_get_next_endpoint( + const struct fwnode_handle *fwnode, struct fwnode_handle *prev); +struct fwnode_handle * +fwnode_graph_get_port_parent(const struct fwnode_handle *fwnode); +struct fwnode_handle *fwnode_graph_get_remote_port_parent( + const struct fwnode_handle *fwnode); +struct fwnode_handle *fwnode_graph_get_remote_port( + const struct fwnode_handle *fwnode); +struct fwnode_handle *fwnode_graph_get_remote_endpoint( + const struct fwnode_handle *fwnode); +struct fwnode_handle * +fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port, + u32 endpoint); + +#define fwnode_graph_for_each_endpoint(fwnode, child) \ + for (child = NULL; \ + (child = fwnode_graph_get_next_endpoint(fwnode, child)); ) + +int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, + struct fwnode_endpoint *endpoint); + +#endif /* _LINUX_PROPERTY_H_ */ diff --git a/include/linux/psci.h b/include/linux/psci.h new file mode 100644 index 000000000..8b1b3b593 --- /dev/null +++ b/include/linux/psci.h @@ -0,0 +1,70 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2015 ARM Limited + */ + +#ifndef __LINUX_PSCI_H +#define __LINUX_PSCI_H + +#include +#include + +#define PSCI_POWER_STATE_TYPE_STANDBY 0 +#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1 + +bool psci_tos_resident_on(int cpu); + +int psci_cpu_init_idle(unsigned int cpu); +int psci_cpu_suspend_enter(unsigned long index); + +enum psci_conduit { + PSCI_CONDUIT_NONE, + PSCI_CONDUIT_SMC, + PSCI_CONDUIT_HVC, +}; + +enum smccc_version { + SMCCC_VERSION_1_0, + SMCCC_VERSION_1_1, +}; + +struct psci_operations { + u32 (*get_version)(void); + int (*cpu_suspend)(u32 state, unsigned long entry_point); + int (*cpu_off)(u32 state); + int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); + int (*migrate)(unsigned long cpuid); + int (*affinity_info)(unsigned long target_affinity, + unsigned long lowest_affinity_level); + int (*migrate_info_type)(void); + enum psci_conduit conduit; + enum smccc_version smccc_version; +}; + +extern struct psci_operations psci_ops; + +#if defined(CONFIG_ARM_PSCI_FW) +int __init psci_dt_init(void); +#else +static inline int psci_dt_init(void) { return 0; } +#endif + +#if defined(CONFIG_ARM_PSCI_FW) && defined(CONFIG_ACPI) +int __init psci_acpi_init(void); +bool __init acpi_psci_present(void); +bool acpi_psci_use_hvc(void); +#else +static inline int psci_acpi_init(void) { return 0; } +static inline bool acpi_psci_present(void) { return false; } +static inline bool acpi_psci_use_hvc(void) {return false; } +#endif + +#endif /* __LINUX_PSCI_H */ diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h new file mode 100644 index 000000000..827c60184 --- /dev/null +++ b/include/linux/psp-sev.h @@ -0,0 +1,629 @@ +/* + * AMD Secure Encrypted Virtualization (SEV) driver interface + * + * Copyright (C) 2016-2017 Advanced Micro Devices, Inc. + * + * Author: Brijesh Singh + * + * SEV spec 0.14 is available at: + * http://support.amd.com/TechDocs/55766_SEV-KM API_Specification.pdf + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __PSP_SEV_H__ +#define __PSP_SEV_H__ + +#include + +#ifdef CONFIG_X86 +#include + +#define __psp_pa(x) __sme_pa(x) +#else +#define __psp_pa(x) __pa(x) +#endif + +#define SEV_FW_BLOB_MAX_SIZE 0x4000 /* 16KB */ + +/** + * SEV platform state + */ +enum sev_state { + SEV_STATE_UNINIT = 0x0, + SEV_STATE_INIT = 0x1, + SEV_STATE_WORKING = 0x2, + + SEV_STATE_MAX +}; + +/** + * SEV platform and guest management commands + */ +enum sev_cmd { + /* platform commands */ + SEV_CMD_INIT = 0x001, + SEV_CMD_SHUTDOWN = 0x002, + SEV_CMD_FACTORY_RESET = 0x003, + SEV_CMD_PLATFORM_STATUS = 0x004, + SEV_CMD_PEK_GEN = 0x005, + SEV_CMD_PEK_CSR = 0x006, + SEV_CMD_PEK_CERT_IMPORT = 0x007, + SEV_CMD_PDH_CERT_EXPORT = 0x008, + SEV_CMD_PDH_GEN = 0x009, + SEV_CMD_DF_FLUSH = 0x00A, + SEV_CMD_DOWNLOAD_FIRMWARE = 0x00B, + SEV_CMD_GET_ID = 0x00C, + + /* Guest commands */ + SEV_CMD_DECOMMISSION = 0x020, + SEV_CMD_ACTIVATE = 0x021, + SEV_CMD_DEACTIVATE = 0x022, + SEV_CMD_GUEST_STATUS = 0x023, + + /* Guest launch commands */ + SEV_CMD_LAUNCH_START = 0x030, + SEV_CMD_LAUNCH_UPDATE_DATA = 0x031, + SEV_CMD_LAUNCH_UPDATE_VMSA = 0x032, + SEV_CMD_LAUNCH_MEASURE = 0x033, + SEV_CMD_LAUNCH_UPDATE_SECRET = 0x034, + SEV_CMD_LAUNCH_FINISH = 0x035, + + /* Guest migration commands (outgoing) */ + SEV_CMD_SEND_START = 0x040, + SEV_CMD_SEND_UPDATE_DATA = 0x041, + SEV_CMD_SEND_UPDATE_VMSA = 0x042, + SEV_CMD_SEND_FINISH = 0x043, + + /* Guest migration commands (incoming) */ + SEV_CMD_RECEIVE_START = 0x050, + SEV_CMD_RECEIVE_UPDATE_DATA = 0x051, + SEV_CMD_RECEIVE_UPDATE_VMSA = 0x052, + SEV_CMD_RECEIVE_FINISH = 0x053, + + /* Guest debug commands */ + SEV_CMD_DBG_DECRYPT = 0x060, + SEV_CMD_DBG_ENCRYPT = 0x061, + + SEV_CMD_MAX, +}; + +/** + * struct sev_data_init - INIT command parameters + * + * @flags: processing flags + * @tmr_address: system physical address used for SEV-ES + * @tmr_len: len of tmr_address + */ +struct sev_data_init { + u32 flags; /* In */ + u32 reserved; /* In */ + u64 tmr_address; /* In */ + u32 tmr_len; /* In */ +} __packed; + +/** + * struct sev_data_pek_csr - PEK_CSR command parameters + * + * @address: PEK certificate chain + * @len: len of certificate + */ +struct sev_data_pek_csr { + u64 address; /* In */ + u32 len; /* In/Out */ +} __packed; + +/** + * struct sev_data_cert_import - PEK_CERT_IMPORT command parameters + * + * @pek_address: PEK certificate chain + * @pek_len: len of PEK certificate + * @oca_address: OCA certificate chain + * @oca_len: len of OCA certificate + */ +struct sev_data_pek_cert_import { + u64 pek_cert_address; /* In */ + u32 pek_cert_len; /* In */ + u32 reserved; /* In */ + u64 oca_cert_address; /* In */ + u32 oca_cert_len; /* In */ +} __packed; + +/** + * struct sev_data_download_firmware - DOWNLOAD_FIRMWARE command parameters + * + * @address: physical address of firmware image + * @len: len of the firmware image + */ +struct sev_data_download_firmware { + u64 address; /* In */ + u32 len; /* In */ +} __packed; + +/** + * struct sev_data_get_id - GET_ID command parameters + * + * @address: physical address of region to place unique CPU ID(s) + * @len: len of the region + */ +struct sev_data_get_id { + u64 address; /* In */ + u32 len; /* In/Out */ +} __packed; +/** + * struct sev_data_pdh_cert_export - PDH_CERT_EXPORT command parameters + * + * @pdh_address: PDH certificate address + * @pdh_len: len of PDH certificate + * @cert_chain_address: PDH certificate chain + * @cert_chain_len: len of PDH certificate chain + */ +struct sev_data_pdh_cert_export { + u64 pdh_cert_address; /* In */ + u32 pdh_cert_len; /* In/Out */ + u32 reserved; /* In */ + u64 cert_chain_address; /* In */ + u32 cert_chain_len; /* In/Out */ +} __packed; + +/** + * struct sev_data_decommission - DECOMMISSION command parameters + * + * @handle: handle of the VM to decommission + */ +struct sev_data_decommission { + u32 handle; /* In */ +} __packed; + +/** + * struct sev_data_activate - ACTIVATE command parameters + * + * @handle: handle of the VM to activate + * @asid: asid assigned to the VM + */ +struct sev_data_activate { + u32 handle; /* In */ + u32 asid; /* In */ +} __packed; + +/** + * struct sev_data_deactivate - DEACTIVATE command parameters + * + * @handle: handle of the VM to deactivate + */ +struct sev_data_deactivate { + u32 handle; /* In */ +} __packed; + +/** + * struct sev_data_guest_status - SEV GUEST_STATUS command parameters + * + * @handle: handle of the VM to retrieve status + * @policy: policy information for the VM + * @asid: current ASID of the VM + * @state: current state of the VM + */ +struct sev_data_guest_status { + u32 handle; /* In */ + u32 policy; /* Out */ + u32 asid; /* Out */ + u8 state; /* Out */ +} __packed; + +/** + * struct sev_data_launch_start - LAUNCH_START command parameters + * + * @handle: handle assigned to the VM + * @policy: guest launch policy + * @dh_cert_address: physical address of DH certificate blob + * @dh_cert_len: len of DH certificate blob + * @session_address: physical address of session parameters + * @session_len: len of session parameters + */ +struct sev_data_launch_start { + u32 handle; /* In/Out */ + u32 policy; /* In */ + u64 dh_cert_address; /* In */ + u32 dh_cert_len; /* In */ + u32 reserved; /* In */ + u64 session_address; /* In */ + u32 session_len; /* In */ +} __packed; + +/** + * struct sev_data_launch_update_data - LAUNCH_UPDATE_DATA command parameter + * + * @handle: handle of the VM to update + * @len: len of memory to be encrypted + * @address: physical address of memory region to encrypt + */ +struct sev_data_launch_update_data { + u32 handle; /* In */ + u32 reserved; + u64 address; /* In */ + u32 len; /* In */ +} __packed; + +/** + * struct sev_data_launch_update_vmsa - LAUNCH_UPDATE_VMSA command + * + * @handle: handle of the VM + * @address: physical address of memory region to encrypt + * @len: len of memory region to encrypt + */ +struct sev_data_launch_update_vmsa { + u32 handle; /* In */ + u32 reserved; + u64 address; /* In */ + u32 len; /* In */ +} __packed; + +/** + * struct sev_data_launch_measure - LAUNCH_MEASURE command parameters + * + * @handle: handle of the VM to process + * @address: physical address containing the measurement blob + * @len: len of measurement blob + */ +struct sev_data_launch_measure { + u32 handle; /* In */ + u32 reserved; + u64 address; /* In */ + u32 len; /* In/Out */ +} __packed; + +/** + * struct sev_data_launch_secret - LAUNCH_SECRET command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing the packet header + * @hdr_len: len of packet header + * @guest_address: system physical address of guest memory region + * @guest_len: len of guest_paddr + * @trans_address: physical address of transport memory buffer + * @trans_len: len of transport memory buffer + */ +struct sev_data_launch_secret { + u32 handle; /* In */ + u32 reserved1; + u64 hdr_address; /* In */ + u32 hdr_len; /* In */ + u32 reserved2; + u64 guest_address; /* In */ + u32 guest_len; /* In */ + u32 reserved3; + u64 trans_address; /* In */ + u32 trans_len; /* In */ +} __packed; + +/** + * struct sev_data_launch_finish - LAUNCH_FINISH command parameters + * + * @handle: handle of the VM to process + */ +struct sev_data_launch_finish { + u32 handle; /* In */ +} __packed; + +/** + * struct sev_data_send_start - SEND_START command parameters + * + * @handle: handle of the VM to process + * @policy: policy information for the VM + * @pdh_cert_address: physical address containing PDH certificate + * @pdh_cert_len: len of PDH certificate + * @plat_certs_address: physical address containing platform certificate + * @plat_certs_len: len of platform certificate + * @amd_certs_address: physical address containing AMD certificate + * @amd_certs_len: len of AMD certificate + * @session_address: physical address containing Session data + * @session_len: len of session data + */ +struct sev_data_send_start { + u32 handle; /* In */ + u32 policy; /* Out */ + u64 pdh_cert_address; /* In */ + u32 pdh_cert_len; /* In */ + u32 reserved1; + u64 plat_cert_address; /* In */ + u32 plat_cert_len; /* In */ + u32 reserved2; + u64 amd_cert_address; /* In */ + u32 amd_cert_len; /* In */ + u32 reserved3; + u64 session_address; /* In */ + u32 session_len; /* In/Out */ +} __packed; + +/** + * struct sev_data_send_update - SEND_UPDATE_DATA command + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header + * @hdr_len: len of packet header + * @guest_address: physical address of guest memory region to send + * @guest_len: len of guest memory region to send + * @trans_address: physical address of host memory region + * @trans_len: len of host memory region + */ +struct sev_data_send_update_data { + u32 handle; /* In */ + u32 reserved1; + u64 hdr_address; /* In */ + u32 hdr_len; /* In/Out */ + u32 reserved2; + u64 guest_address; /* In */ + u32 guest_len; /* In */ + u32 reserved3; + u64 trans_address; /* In */ + u32 trans_len; /* In */ +} __packed; + +/** + * struct sev_data_send_update - SEND_UPDATE_VMSA command + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header + * @hdr_len: len of packet header + * @guest_address: physical address of guest memory region to send + * @guest_len: len of guest memory region to send + * @trans_address: physical address of host memory region + * @trans_len: len of host memory region + */ +struct sev_data_send_update_vmsa { + u32 handle; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In/Out */ + u32 reserved2; + u64 guest_address; /* In */ + u32 guest_len; /* In */ + u32 reserved3; + u64 trans_address; /* In */ + u32 trans_len; /* In */ +} __packed; + +/** + * struct sev_data_send_finish - SEND_FINISH command parameters + * + * @handle: handle of the VM to process + */ +struct sev_data_send_finish { + u32 handle; /* In */ +} __packed; + +/** + * struct sev_data_receive_start - RECEIVE_START command parameters + * + * @handle: handle of the VM to perform receive operation + * @pdh_cert_address: system physical address containing PDH certificate blob + * @pdh_cert_len: len of PDH certificate blob + * @session_address: system physical address containing session blob + * @session_len: len of session blob + */ +struct sev_data_receive_start { + u32 handle; /* In/Out */ + u32 policy; /* In */ + u64 pdh_cert_address; /* In */ + u32 pdh_cert_len; /* In */ + u32 reserved1; + u64 session_address; /* In */ + u32 session_len; /* In */ +} __packed; + +/** + * struct sev_data_receive_update_data - RECEIVE_UPDATE_DATA command parameters + * + * @handle: handle of the VM to update + * @hdr_address: physical address containing packet header blob + * @hdr_len: len of packet header + * @guest_address: system physical address of guest memory region + * @guest_len: len of guest memory region + * @trans_address: system physical address of transport buffer + * @trans_len: len of transport buffer + */ +struct sev_data_receive_update_data { + u32 handle; /* In */ + u32 reserved1; + u64 hdr_address; /* In */ + u32 hdr_len; /* In */ + u32 reserved2; + u64 guest_address; /* In */ + u32 guest_len; /* In */ + u32 reserved3; + u64 trans_address; /* In */ + u32 trans_len; /* In */ +} __packed; + +/** + * struct sev_data_receive_update_vmsa - RECEIVE_UPDATE_VMSA command parameters + * + * @handle: handle of the VM to update + * @hdr_address: physical address containing packet header blob + * @hdr_len: len of packet header + * @guest_address: system physical address of guest memory region + * @guest_len: len of guest memory region + * @trans_address: system physical address of transport buffer + * @trans_len: len of transport buffer + */ +struct sev_data_receive_update_vmsa { + u32 handle; /* In */ + u32 reserved1; + u64 hdr_address; /* In */ + u32 hdr_len; /* In */ + u32 reserved2; + u64 guest_address; /* In */ + u32 guest_len; /* In */ + u32 reserved3; + u64 trans_address; /* In */ + u32 trans_len; /* In */ +} __packed; + +/** + * struct sev_data_receive_finish - RECEIVE_FINISH command parameters + * + * @handle: handle of the VM to finish + */ +struct sev_data_receive_finish { + u32 handle; /* In */ +} __packed; + +/** + * struct sev_data_dbg - DBG_ENCRYPT/DBG_DECRYPT command parameters + * + * @handle: handle of the VM to perform debug operation + * @src_addr: source address of data to operate on + * @dst_addr: destination address of data to operate on + * @len: len of data to operate on + */ +struct sev_data_dbg { + u32 handle; /* In */ + u32 reserved; + u64 src_addr; /* In */ + u64 dst_addr; /* In */ + u32 len; /* In */ +} __packed; + +#ifdef CONFIG_CRYPTO_DEV_SP_PSP + +/** + * sev_platform_init - perform SEV INIT command + * + * @error: SEV command return code + * + * Returns: + * 0 if the SEV successfully processed the command + * -%ENODEV if the SEV device is not available + * -%ENOTSUPP if the SEV does not support SEV + * -%ETIMEDOUT if the SEV command timed out + * -%EIO if the SEV returned a non-zero return code + */ +int sev_platform_init(int *error); + +/** + * sev_platform_status - perform SEV PLATFORM_STATUS command + * + * @status: sev_user_data_status structure to be processed + * @error: SEV command return code + * + * Returns: + * 0 if the SEV successfully processed the command + * -%ENODEV if the SEV device is not available + * -%ENOTSUPP if the SEV does not support SEV + * -%ETIMEDOUT if the SEV command timed out + * -%EIO if the SEV returned a non-zero return code + */ +int sev_platform_status(struct sev_user_data_status *status, int *error); + +/** + * sev_issue_cmd_external_user - issue SEV command by other driver with a file + * handle. + * + * This function can be used by other drivers to issue a SEV command on + * behalf of userspace. The caller must pass a valid SEV file descriptor + * so that we know that it has access to SEV device. + * + * @filep - SEV device file pointer + * @cmd - command to issue + * @data - command buffer + * @error: SEV command return code + * + * Returns: + * 0 if the SEV successfully processed the command + * -%ENODEV if the SEV device is not available + * -%ENOTSUPP if the SEV does not support SEV + * -%ETIMEDOUT if the SEV command timed out + * -%EIO if the SEV returned a non-zero return code + * -%EINVAL if the SEV file descriptor is not valid + */ +int sev_issue_cmd_external_user(struct file *filep, unsigned int id, + void *data, int *error); + +/** + * sev_guest_deactivate - perform SEV DEACTIVATE command + * + * @deactivate: sev_data_deactivate structure to be processed + * @sev_ret: sev command return code + * + * Returns: + * 0 if the sev successfully processed the command + * -%ENODEV if the sev device is not available + * -%ENOTSUPP if the sev does not support SEV + * -%ETIMEDOUT if the sev command timed out + * -%EIO if the sev returned a non-zero return code + */ +int sev_guest_deactivate(struct sev_data_deactivate *data, int *error); + +/** + * sev_guest_activate - perform SEV ACTIVATE command + * + * @activate: sev_data_activate structure to be processed + * @sev_ret: sev command return code + * + * Returns: + * 0 if the sev successfully processed the command + * -%ENODEV if the sev device is not available + * -%ENOTSUPP if the sev does not support SEV + * -%ETIMEDOUT if the sev command timed out + * -%EIO if the sev returned a non-zero return code + */ +int sev_guest_activate(struct sev_data_activate *data, int *error); + +/** + * sev_guest_df_flush - perform SEV DF_FLUSH command + * + * @sev_ret: sev command return code + * + * Returns: + * 0 if the sev successfully processed the command + * -%ENODEV if the sev device is not available + * -%ENOTSUPP if the sev does not support SEV + * -%ETIMEDOUT if the sev command timed out + * -%EIO if the sev returned a non-zero return code + */ +int sev_guest_df_flush(int *error); + +/** + * sev_guest_decommission - perform SEV DECOMMISSION command + * + * @decommission: sev_data_decommission structure to be processed + * @sev_ret: sev command return code + * + * Returns: + * 0 if the sev successfully processed the command + * -%ENODEV if the sev device is not available + * -%ENOTSUPP if the sev does not support SEV + * -%ETIMEDOUT if the sev command timed out + * -%EIO if the sev returned a non-zero return code + */ +int sev_guest_decommission(struct sev_data_decommission *data, int *error); + +void *psp_copy_user_blob(u64 __user uaddr, u32 len); + +#else /* !CONFIG_CRYPTO_DEV_SP_PSP */ + +static inline int +sev_platform_status(struct sev_user_data_status *status, int *error) { return -ENODEV; } + +static inline int sev_platform_init(int *error) { return -ENODEV; } + +static inline int +sev_guest_deactivate(struct sev_data_deactivate *data, int *error) { return -ENODEV; } + +static inline int +sev_guest_decommission(struct sev_data_decommission *data, int *error) { return -ENODEV; } + +static inline int +sev_guest_activate(struct sev_data_activate *data, int *error) { return -ENODEV; } + +static inline int sev_guest_df_flush(int *error) { return -ENODEV; } + +static inline int +sev_issue_cmd_external_user(struct file *filep, unsigned int id, void *data, int *error) { return -ENODEV; } + +static inline void *psp_copy_user_blob(u64 __user uaddr, u32 len) { return ERR_PTR(-EINVAL); } + +#endif /* CONFIG_CRYPTO_DEV_SP_PSP */ + +#endif /* __PSP_SEV_H__ */ diff --git a/include/linux/pstore.h b/include/linux/pstore.h new file mode 100644 index 000000000..de9093d6e --- /dev/null +++ b/include/linux/pstore.h @@ -0,0 +1,279 @@ +/* + * Persistent Storage - pstore.h + * + * Copyright (C) 2010 Intel Corporation + * + * This code is the generic layer to export data records from platform + * level persistent storage via a file system. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _LINUX_PSTORE_H +#define _LINUX_PSTORE_H + +#include +#include +#include +#include +#include +#include +#include + +struct module; + +/* pstore record types (see fs/pstore/inode.c for filename templates) */ +enum pstore_type_id { + PSTORE_TYPE_DMESG = 0, + PSTORE_TYPE_MCE = 1, + PSTORE_TYPE_CONSOLE = 2, + PSTORE_TYPE_FTRACE = 3, + /* PPC64 partition types */ + PSTORE_TYPE_PPC_RTAS = 4, + PSTORE_TYPE_PPC_OF = 5, + PSTORE_TYPE_PPC_COMMON = 6, + PSTORE_TYPE_PMSG = 7, + PSTORE_TYPE_PPC_OPAL = 8, + PSTORE_TYPE_UNKNOWN = 255 +}; + +struct pstore_info; +/** + * struct pstore_record - details of a pstore record entry + * @psi: pstore backend driver information + * @type: pstore record type + * @id: per-type unique identifier for record + * @time: timestamp of the record + * @buf: pointer to record contents + * @size: size of @buf + * @ecc_notice_size: + * ECC information for @buf + * + * Valid for PSTORE_TYPE_DMESG @type: + * + * @count: Oops count since boot + * @reason: kdump reason for notification + * @part: position in a multipart record + * @compressed: whether the buffer is compressed + * + */ +struct pstore_record { + struct pstore_info *psi; + enum pstore_type_id type; + u64 id; + struct timespec64 time; + char *buf; + ssize_t size; + ssize_t ecc_notice_size; + + int count; + enum kmsg_dump_reason reason; + unsigned int part; + bool compressed; +}; + +/** + * struct pstore_info - backend pstore driver structure + * + * @owner: module which is repsonsible for this backend driver + * @name: name of the backend driver + * + * @buf_lock: semaphore to serialize access to @buf + * @buf: preallocated crash dump buffer + * @bufsize: size of @buf available for crash dump bytes (must match + * smallest number of bytes available for writing to a + * backend entry, since compressed bytes don't take kindly + * to being truncated) + * + * @read_mutex: serializes @open, @read, @close, and @erase callbacks + * @flags: bitfield of frontends the backend can accept writes for + * @data: backend-private pointer passed back during callbacks + * + * Callbacks: + * + * @open: + * Notify backend that pstore is starting a full read of backend + * records. Followed by one or more @read calls, and a final @close. + * + * @psi: in: pointer to the struct pstore_info for the backend + * + * Returns 0 on success, and non-zero on error. + * + * @close: + * Notify backend that pstore has finished a full read of backend + * records. Always preceded by an @open call and one or more @read + * calls. + * + * @psi: in: pointer to the struct pstore_info for the backend + * + * Returns 0 on success, and non-zero on error. (Though pstore will + * ignore the error.) + * + * @read: + * Read next available backend record. Called after a successful + * @open. + * + * @record: + * pointer to record to populate. @buf should be allocated + * by the backend and filled. At least @type and @id should + * be populated, since these are used when creating pstorefs + * file names. + * + * Returns record size on success, zero when no more records are + * available, or negative on error. + * + * @write: + * A newly generated record needs to be written to backend storage. + * + * @record: + * pointer to record metadata. When @type is PSTORE_TYPE_DMESG, + * @buf will be pointing to the preallocated @psi.buf, since + * memory allocation may be broken during an Oops. Regardless, + * @buf must be proccesed or copied before returning. The + * backend is also expected to write @id with something that + * can help identify this record to a future @erase callback. + * The @time field will be prepopulated with the current time, + * when available. The @size field will have the size of data + * in @buf. + * + * Returns 0 on success, and non-zero on error. + * + * @write_user: + * Perform a frontend write to a backend record, using a specified + * buffer that is coming directly from userspace, instead of the + * @record @buf. + * + * @record: pointer to record metadata. + * @buf: pointer to userspace contents to write to backend + * + * Returns 0 on success, and non-zero on error. + * + * @erase: + * Delete a record from backend storage. Different backends + * identify records differently, so entire original record is + * passed back to assist in identification of what the backend + * should remove from storage. + * + * @record: pointer to record metadata. + * + * Returns 0 on success, and non-zero on error. + * + */ +struct pstore_info { + struct module *owner; + char *name; + + struct semaphore buf_lock; + char *buf; + size_t bufsize; + + struct mutex read_mutex; + + int flags; + void *data; + + int (*open)(struct pstore_info *psi); + int (*close)(struct pstore_info *psi); + ssize_t (*read)(struct pstore_record *record); + int (*write)(struct pstore_record *record); + int (*write_user)(struct pstore_record *record, + const char __user *buf); + int (*erase)(struct pstore_record *record); +}; + +/* Supported frontends */ +#define PSTORE_FLAGS_DMESG (1 << 0) +#define PSTORE_FLAGS_CONSOLE (1 << 1) +#define PSTORE_FLAGS_FTRACE (1 << 2) +#define PSTORE_FLAGS_PMSG (1 << 3) + +extern int pstore_register(struct pstore_info *); +extern void pstore_unregister(struct pstore_info *); + +struct pstore_ftrace_record { + unsigned long ip; + unsigned long parent_ip; + u64 ts; +}; + +/* + * ftrace related stuff: Both backends and frontends need these so expose + * them here. + */ + +#if NR_CPUS <= 2 && defined(CONFIG_ARM_THUMB) +#define PSTORE_CPU_IN_IP 0x1 +#elif NR_CPUS <= 4 && defined(CONFIG_ARM) +#define PSTORE_CPU_IN_IP 0x3 +#endif + +#define TS_CPU_SHIFT 8 +#define TS_CPU_MASK (BIT(TS_CPU_SHIFT) - 1) + +/* + * If CPU number can be stored in IP, store it there, otherwise store it in + * the time stamp. This means more timestamp resolution is available when + * the CPU can be stored in the IP. + */ +#ifdef PSTORE_CPU_IN_IP +static inline void +pstore_ftrace_encode_cpu(struct pstore_ftrace_record *rec, unsigned int cpu) +{ + rec->ip |= cpu; +} + +static inline unsigned int +pstore_ftrace_decode_cpu(struct pstore_ftrace_record *rec) +{ + return rec->ip & PSTORE_CPU_IN_IP; +} + +static inline u64 +pstore_ftrace_read_timestamp(struct pstore_ftrace_record *rec) +{ + return rec->ts; +} + +static inline void +pstore_ftrace_write_timestamp(struct pstore_ftrace_record *rec, u64 val) +{ + rec->ts = val; +} +#else +static inline void +pstore_ftrace_encode_cpu(struct pstore_ftrace_record *rec, unsigned int cpu) +{ + rec->ts &= ~(TS_CPU_MASK); + rec->ts |= cpu; +} + +static inline unsigned int +pstore_ftrace_decode_cpu(struct pstore_ftrace_record *rec) +{ + return rec->ts & TS_CPU_MASK; +} + +static inline u64 +pstore_ftrace_read_timestamp(struct pstore_ftrace_record *rec) +{ + return rec->ts >> TS_CPU_SHIFT; +} + +static inline void +pstore_ftrace_write_timestamp(struct pstore_ftrace_record *rec, u64 val) +{ + rec->ts = (rec->ts & TS_CPU_MASK) | (val << TS_CPU_SHIFT); +} +#endif + +#endif /*_LINUX_PSTORE_H*/ diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h new file mode 100644 index 000000000..e6d226464 --- /dev/null +++ b/include/linux/pstore_ram.h @@ -0,0 +1,105 @@ +/* + * Copyright (C) 2010 Marco Stornelli + * Copyright (C) 2011 Kees Cook + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __LINUX_PSTORE_RAM_H__ +#define __LINUX_PSTORE_RAM_H__ + +#include +#include +#include +#include +#include +#include + +/* + * Choose whether access to the RAM zone requires locking or not. If a zone + * can be written to from different CPUs like with ftrace for example, then + * PRZ_FLAG_NO_LOCK is used. For all other cases, locking is required. + */ +#define PRZ_FLAG_NO_LOCK BIT(0) + +struct persistent_ram_buffer; +struct rs_control; + +struct persistent_ram_ecc_info { + int block_size; + int ecc_size; + int symsize; + int poly; + uint16_t *par; +}; + +struct persistent_ram_zone { + phys_addr_t paddr; + size_t size; + void *vaddr; + struct persistent_ram_buffer *buffer; + size_t buffer_size; + u32 flags; + raw_spinlock_t buffer_lock; + + /* ECC correction */ + char *par_buffer; + char *par_header; + struct rs_control *rs_decoder; + int corrected_bytes; + int bad_blocks; + struct persistent_ram_ecc_info ecc_info; + + char *old_log; + size_t old_log_size; +}; + +struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, + u32 sig, struct persistent_ram_ecc_info *ecc_info, + unsigned int memtype, u32 flags); +void persistent_ram_free(struct persistent_ram_zone *prz); +void persistent_ram_zap(struct persistent_ram_zone *prz); + +int persistent_ram_write(struct persistent_ram_zone *prz, const void *s, + unsigned int count); +int persistent_ram_write_user(struct persistent_ram_zone *prz, + const void __user *s, unsigned int count); + +void persistent_ram_save_old(struct persistent_ram_zone *prz); +size_t persistent_ram_old_size(struct persistent_ram_zone *prz); +void *persistent_ram_old(struct persistent_ram_zone *prz); +void persistent_ram_free_old(struct persistent_ram_zone *prz); +ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz, + char *str, size_t len); + +/* + * Ramoops platform data + * @mem_size memory size for ramoops + * @mem_address physical memory address to contain ramoops + */ + +#define RAMOOPS_FLAG_FTRACE_PER_CPU BIT(0) + +struct ramoops_platform_data { + unsigned long mem_size; + phys_addr_t mem_address; + unsigned int mem_type; + unsigned long record_size; + unsigned long console_size; + unsigned long ftrace_size; + unsigned long pmsg_size; + int dump_oops; + u32 flags; + struct persistent_ram_ecc_info ecc_info; +}; + +#endif diff --git a/include/linux/pti.h b/include/linux/pti.h new file mode 100644 index 000000000..1a941efca --- /dev/null +++ b/include/linux/pti.h @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 +#ifndef _INCLUDE_PTI_H +#define _INCLUDE_PTI_H + +#ifdef CONFIG_PAGE_TABLE_ISOLATION +#include +#else +static inline void pti_init(void) { } +static inline void pti_finalize(void) { } +#endif + +#endif diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h new file mode 100644 index 000000000..059242030 --- /dev/null +++ b/include/linux/ptp_classify.h @@ -0,0 +1,83 @@ +/* + * PTP 1588 support + * + * This file implements a BPF that recognizes PTP event messages. + * + * Copyright (C) 2010 OMICRON electronics GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _PTP_CLASSIFY_H_ +#define _PTP_CLASSIFY_H_ + +#include +#include + +#define PTP_CLASS_NONE 0x00 /* not a PTP event message */ +#define PTP_CLASS_V1 0x01 /* protocol version 1 */ +#define PTP_CLASS_V2 0x02 /* protocol version 2 */ +#define PTP_CLASS_VMASK 0x0f /* max protocol version is 15 */ +#define PTP_CLASS_IPV4 0x10 /* event in an IPV4 UDP packet */ +#define PTP_CLASS_IPV6 0x20 /* event in an IPV6 UDP packet */ +#define PTP_CLASS_L2 0x40 /* event in a L2 packet */ +#define PTP_CLASS_PMASK 0x70 /* mask for the packet type field */ +#define PTP_CLASS_VLAN 0x80 /* event in a VLAN tagged packet */ + +#define PTP_CLASS_V1_IPV4 (PTP_CLASS_V1 | PTP_CLASS_IPV4) +#define PTP_CLASS_V1_IPV6 (PTP_CLASS_V1 | PTP_CLASS_IPV6) /* probably DNE */ +#define PTP_CLASS_V2_IPV4 (PTP_CLASS_V2 | PTP_CLASS_IPV4) +#define PTP_CLASS_V2_IPV6 (PTP_CLASS_V2 | PTP_CLASS_IPV6) +#define PTP_CLASS_V2_L2 (PTP_CLASS_V2 | PTP_CLASS_L2) +#define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN) +#define PTP_CLASS_L4 (PTP_CLASS_IPV4 | PTP_CLASS_IPV6) + +#define PTP_EV_PORT 319 +#define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */ + +#define OFF_PTP_SOURCE_UUID 22 /* PTPv1 only */ +#define OFF_PTP_SEQUENCE_ID 30 +#define OFF_PTP_CONTROL 32 /* PTPv1 only */ + +/* Below defines should actually be removed at some point in time. */ +#define IP6_HLEN 40 +#define UDP_HLEN 8 +#define OFF_IHL 14 +#define IPV4_HLEN(data) (((struct iphdr *)(data + OFF_IHL))->ihl << 2) + +#if defined(CONFIG_NET_PTP_CLASSIFY) +/** + * ptp_classify_raw - classify a PTP packet + * @skb: buffer + * + * Runs a minimal BPF dissector to classify a network packet to + * determine the PTP class. In case the skb does not contain any + * PTP protocol data, PTP_CLASS_NONE will be returned, otherwise + * PTP_CLASS_V1_IPV{4,6}, PTP_CLASS_V2_IPV{4,6} or + * PTP_CLASS_V2_{L2,VLAN}, depending on the packet content. + */ +unsigned int ptp_classify_raw(const struct sk_buff *skb); + +void __init ptp_classifier_init(void); +#else +static inline void ptp_classifier_init(void) +{ +} +static inline unsigned int ptp_classify_raw(struct sk_buff *skb) +{ + return PTP_CLASS_NONE; +} +#endif +#endif /* _PTP_CLASSIFY_H_ */ diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h new file mode 100644 index 000000000..99c3f4ee9 --- /dev/null +++ b/include/linux/ptp_clock_kernel.h @@ -0,0 +1,258 @@ +/* + * PTP 1588 clock support + * + * Copyright (C) 2010 OMICRON electronics GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _PTP_CLOCK_KERNEL_H_ +#define _PTP_CLOCK_KERNEL_H_ + +#include +#include +#include + + +struct ptp_clock_request { + enum { + PTP_CLK_REQ_EXTTS, + PTP_CLK_REQ_PEROUT, + PTP_CLK_REQ_PPS, + } type; + union { + struct ptp_extts_request extts; + struct ptp_perout_request perout; + }; +}; + +struct system_device_crosststamp; +/** + * struct ptp_clock_info - decribes a PTP hardware clock + * + * @owner: The clock driver should set to THIS_MODULE. + * @name: A short "friendly name" to identify the clock and to + * help distinguish PHY based devices from MAC based ones. + * The string is not meant to be a unique id. + * @max_adj: The maximum possible frequency adjustment, in parts per billon. + * @n_alarm: The number of programmable alarms. + * @n_ext_ts: The number of external time stamp channels. + * @n_per_out: The number of programmable periodic signals. + * @n_pins: The number of programmable pins. + * @pps: Indicates whether the clock supports a PPS callback. + * @pin_config: Array of length 'n_pins'. If the number of + * programmable pins is nonzero, then drivers must + * allocate and initialize this array. + * + * clock operations + * + * @adjfine: Adjusts the frequency of the hardware clock. + * parameter scaled_ppm: Desired frequency offset from + * nominal frequency in parts per million, but with a + * 16 bit binary fractional field. + * + * @adjfreq: Adjusts the frequency of the hardware clock. + * This method is deprecated. New drivers should implement + * the @adjfine method instead. + * parameter delta: Desired frequency offset from nominal frequency + * in parts per billion + * + * @adjtime: Shifts the time of the hardware clock. + * parameter delta: Desired change in nanoseconds. + * + * @gettime64: Reads the current time from the hardware clock. + * parameter ts: Holds the result. + * + * @getcrosststamp: Reads the current time from the hardware clock and + * system clock simultaneously. + * parameter cts: Contains timestamp (device,system) pair, + * where system time is realtime and monotonic. + * + * @settime64: Set the current time on the hardware clock. + * parameter ts: Time value to set. + * + * @enable: Request driver to enable or disable an ancillary feature. + * parameter request: Desired resource to enable or disable. + * parameter on: Caller passes one to enable or zero to disable. + * + * @verify: Confirm that a pin can perform a given function. The PTP + * Hardware Clock subsystem maintains the 'pin_config' + * array on behalf of the drivers, but the PHC subsystem + * assumes that every pin can perform every function. This + * hook gives drivers a way of telling the core about + * limitations on specific pins. This function must return + * zero if the function can be assigned to this pin, and + * nonzero otherwise. + * parameter pin: index of the pin in question. + * parameter func: the desired function to use. + * parameter chan: the function channel index to use. + * + * @do_work: Request driver to perform auxiliary (periodic) operations + * Driver should return delay of the next auxiliary work scheduling + * time (>=0) or negative value in case further scheduling + * is not required. + * + * Drivers should embed their ptp_clock_info within a private + * structure, obtaining a reference to it using container_of(). + * + * The callbacks must all return zero on success, non-zero otherwise. + */ + +struct ptp_clock_info { + struct module *owner; + char name[16]; + s32 max_adj; + int n_alarm; + int n_ext_ts; + int n_per_out; + int n_pins; + int pps; + struct ptp_pin_desc *pin_config; + int (*adjfine)(struct ptp_clock_info *ptp, long scaled_ppm); + int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta); + int (*adjtime)(struct ptp_clock_info *ptp, s64 delta); + int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts); + int (*getcrosststamp)(struct ptp_clock_info *ptp, + struct system_device_crosststamp *cts); + int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts); + int (*enable)(struct ptp_clock_info *ptp, + struct ptp_clock_request *request, int on); + int (*verify)(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan); + long (*do_aux_work)(struct ptp_clock_info *ptp); +}; + +struct ptp_clock; + +enum ptp_clock_events { + PTP_CLOCK_ALARM, + PTP_CLOCK_EXTTS, + PTP_CLOCK_PPS, + PTP_CLOCK_PPSUSR, +}; + +/** + * struct ptp_clock_event - decribes a PTP hardware clock event + * + * @type: One of the ptp_clock_events enumeration values. + * @index: Identifies the source of the event. + * @timestamp: When the event occurred (%PTP_CLOCK_EXTTS only). + * @pps_times: When the event occurred (%PTP_CLOCK_PPSUSR only). + */ + +struct ptp_clock_event { + int type; + int index; + union { + u64 timestamp; + struct pps_event_time pps_times; + }; +}; + +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) + +/** + * ptp_clock_register() - register a PTP hardware clock driver + * + * @info: Structure describing the new clock. + * @parent: Pointer to the parent device of the new clock. + * + * Returns a valid pointer on success or PTR_ERR on failure. If PHC + * support is missing at the configuration level, this function + * returns NULL, and drivers are expected to gracefully handle that + * case separately. + */ + +extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, + struct device *parent); + +/** + * ptp_clock_unregister() - unregister a PTP hardware clock driver + * + * @ptp: The clock to remove from service. + */ + +extern int ptp_clock_unregister(struct ptp_clock *ptp); + +/** + * ptp_clock_event() - notify the PTP layer about an event + * + * @ptp: The clock obtained from ptp_clock_register(). + * @event: Message structure describing the event. + */ + +extern void ptp_clock_event(struct ptp_clock *ptp, + struct ptp_clock_event *event); + +/** + * ptp_clock_index() - obtain the device index of a PTP clock + * + * @ptp: The clock obtained from ptp_clock_register(). + */ + +extern int ptp_clock_index(struct ptp_clock *ptp); + +/** + * scaled_ppm_to_ppb() - convert scaled ppm to ppb + * + * @ppm: Parts per million, but with a 16 bit binary fractional field + */ + +extern long scaled_ppm_to_ppb(long ppm); + +/** + * ptp_find_pin() - obtain the pin index of a given auxiliary function + * + * @ptp: The clock obtained from ptp_clock_register(). + * @func: One of the ptp_pin_function enumerated values. + * @chan: The particular functional channel to find. + * Return: Pin index in the range of zero to ptp_clock_caps.n_pins - 1, + * or -1 if the auxiliary function cannot be found. + */ + +int ptp_find_pin(struct ptp_clock *ptp, + enum ptp_pin_function func, unsigned int chan); + +/** + * ptp_schedule_worker() - schedule ptp auxiliary work + * + * @ptp: The clock obtained from ptp_clock_register(). + * @delay: number of jiffies to wait before queuing + * See kthread_queue_delayed_work() for more info. + */ + +int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay); + +#else +static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, + struct device *parent) +{ return NULL; } +static inline int ptp_clock_unregister(struct ptp_clock *ptp) +{ return 0; } +static inline void ptp_clock_event(struct ptp_clock *ptp, + struct ptp_clock_event *event) +{ } +static inline int ptp_clock_index(struct ptp_clock *ptp) +{ return -1; } +static inline int ptp_find_pin(struct ptp_clock *ptp, + enum ptp_pin_function func, unsigned int chan) +{ return -1; } +static inline int ptp_schedule_worker(struct ptp_clock *ptp, + unsigned long delay) +{ return -EOPNOTSUPP; } + +#endif + +#endif diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h new file mode 100644 index 000000000..186cd8e97 --- /dev/null +++ b/include/linux/ptr_ring.h @@ -0,0 +1,678 @@ +/* + * Definitions for the 'struct ptr_ring' datastructure. + * + * Author: + * Michael S. Tsirkin + * + * Copyright (C) 2016 Red Hat, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This is a limited-size FIFO maintaining pointers in FIFO order, with + * one CPU producing entries and another consuming entries from a FIFO. + * + * This implementation tries to minimize cache-contention when there is a + * single producer and a single consumer CPU. + */ + +#ifndef _LINUX_PTR_RING_H +#define _LINUX_PTR_RING_H 1 + +#ifdef __KERNEL__ +#include +#include +#include +#include +#include +#include +#include +#endif + +struct ptr_ring { + int producer ____cacheline_aligned_in_smp; + spinlock_t producer_lock; + int consumer_head ____cacheline_aligned_in_smp; /* next valid entry */ + int consumer_tail; /* next entry to invalidate */ + spinlock_t consumer_lock; + /* Shared consumer/producer data */ + /* Read-only by both the producer and the consumer */ + int size ____cacheline_aligned_in_smp; /* max entries in queue */ + int batch; /* number of entries to consume in a batch */ + void **queue; +}; + +/* Note: callers invoking this in a loop must use a compiler barrier, + * for example cpu_relax(). + * + * NB: this is unlike __ptr_ring_empty in that callers must hold producer_lock: + * see e.g. ptr_ring_full. + */ +static inline bool __ptr_ring_full(struct ptr_ring *r) +{ + return r->queue[r->producer]; +} + +static inline bool ptr_ring_full(struct ptr_ring *r) +{ + bool ret; + + spin_lock(&r->producer_lock); + ret = __ptr_ring_full(r); + spin_unlock(&r->producer_lock); + + return ret; +} + +static inline bool ptr_ring_full_irq(struct ptr_ring *r) +{ + bool ret; + + spin_lock_irq(&r->producer_lock); + ret = __ptr_ring_full(r); + spin_unlock_irq(&r->producer_lock); + + return ret; +} + +static inline bool ptr_ring_full_any(struct ptr_ring *r) +{ + unsigned long flags; + bool ret; + + spin_lock_irqsave(&r->producer_lock, flags); + ret = __ptr_ring_full(r); + spin_unlock_irqrestore(&r->producer_lock, flags); + + return ret; +} + +static inline bool ptr_ring_full_bh(struct ptr_ring *r) +{ + bool ret; + + spin_lock_bh(&r->producer_lock); + ret = __ptr_ring_full(r); + spin_unlock_bh(&r->producer_lock); + + return ret; +} + +/* Note: callers invoking this in a loop must use a compiler barrier, + * for example cpu_relax(). Callers must hold producer_lock. + * Callers are responsible for making sure pointer that is being queued + * points to a valid data. + */ +static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) +{ + if (unlikely(!r->size) || r->queue[r->producer]) + return -ENOSPC; + + /* Make sure the pointer we are storing points to a valid data. */ + /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */ + smp_wmb(); + + WRITE_ONCE(r->queue[r->producer++], ptr); + if (unlikely(r->producer >= r->size)) + r->producer = 0; + return 0; +} + +/* + * Note: resize (below) nests producer lock within consumer lock, so if you + * consume in interrupt or BH context, you must disable interrupts/BH when + * calling this. + */ +static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr) +{ + int ret; + + spin_lock(&r->producer_lock); + ret = __ptr_ring_produce(r, ptr); + spin_unlock(&r->producer_lock); + + return ret; +} + +static inline int ptr_ring_produce_irq(struct ptr_ring *r, void *ptr) +{ + int ret; + + spin_lock_irq(&r->producer_lock); + ret = __ptr_ring_produce(r, ptr); + spin_unlock_irq(&r->producer_lock); + + return ret; +} + +static inline int ptr_ring_produce_any(struct ptr_ring *r, void *ptr) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&r->producer_lock, flags); + ret = __ptr_ring_produce(r, ptr); + spin_unlock_irqrestore(&r->producer_lock, flags); + + return ret; +} + +static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr) +{ + int ret; + + spin_lock_bh(&r->producer_lock); + ret = __ptr_ring_produce(r, ptr); + spin_unlock_bh(&r->producer_lock); + + return ret; +} + +static inline void *__ptr_ring_peek(struct ptr_ring *r) +{ + if (likely(r->size)) + return READ_ONCE(r->queue[r->consumer_head]); + return NULL; +} + +/* + * Test ring empty status without taking any locks. + * + * NB: This is only safe to call if ring is never resized. + * + * However, if some other CPU consumes ring entries at the same time, the value + * returned is not guaranteed to be correct. + * + * In this case - to avoid incorrectly detecting the ring + * as empty - the CPU consuming the ring entries is responsible + * for either consuming all ring entries until the ring is empty, + * or synchronizing with some other CPU and causing it to + * re-test __ptr_ring_empty and/or consume the ring enteries + * after the synchronization point. + * + * Note: callers invoking this in a loop must use a compiler barrier, + * for example cpu_relax(). + */ +static inline bool __ptr_ring_empty(struct ptr_ring *r) +{ + if (likely(r->size)) + return !r->queue[READ_ONCE(r->consumer_head)]; + return true; +} + +static inline bool ptr_ring_empty(struct ptr_ring *r) +{ + bool ret; + + spin_lock(&r->consumer_lock); + ret = __ptr_ring_empty(r); + spin_unlock(&r->consumer_lock); + + return ret; +} + +static inline bool ptr_ring_empty_irq(struct ptr_ring *r) +{ + bool ret; + + spin_lock_irq(&r->consumer_lock); + ret = __ptr_ring_empty(r); + spin_unlock_irq(&r->consumer_lock); + + return ret; +} + +static inline bool ptr_ring_empty_any(struct ptr_ring *r) +{ + unsigned long flags; + bool ret; + + spin_lock_irqsave(&r->consumer_lock, flags); + ret = __ptr_ring_empty(r); + spin_unlock_irqrestore(&r->consumer_lock, flags); + + return ret; +} + +static inline bool ptr_ring_empty_bh(struct ptr_ring *r) +{ + bool ret; + + spin_lock_bh(&r->consumer_lock); + ret = __ptr_ring_empty(r); + spin_unlock_bh(&r->consumer_lock); + + return ret; +} + +/* Must only be called after __ptr_ring_peek returned !NULL */ +static inline void __ptr_ring_discard_one(struct ptr_ring *r) +{ + /* Fundamentally, what we want to do is update consumer + * index and zero out the entry so producer can reuse it. + * Doing it naively at each consume would be as simple as: + * consumer = r->consumer; + * r->queue[consumer++] = NULL; + * if (unlikely(consumer >= r->size)) + * consumer = 0; + * r->consumer = consumer; + * but that is suboptimal when the ring is full as producer is writing + * out new entries in the same cache line. Defer these updates until a + * batch of entries has been consumed. + */ + /* Note: we must keep consumer_head valid at all times for __ptr_ring_empty + * to work correctly. + */ + int consumer_head = r->consumer_head; + int head = consumer_head++; + + /* Once we have processed enough entries invalidate them in + * the ring all at once so producer can reuse their space in the ring. + * We also do this when we reach end of the ring - not mandatory + * but helps keep the implementation simple. + */ + if (unlikely(consumer_head - r->consumer_tail >= r->batch || + consumer_head >= r->size)) { + /* Zero out entries in the reverse order: this way we touch the + * cache line that producer might currently be reading the last; + * producer won't make progress and touch other cache lines + * besides the first one until we write out all entries. + */ + while (likely(head >= r->consumer_tail)) + r->queue[head--] = NULL; + r->consumer_tail = consumer_head; + } + if (unlikely(consumer_head >= r->size)) { + consumer_head = 0; + r->consumer_tail = 0; + } + /* matching READ_ONCE in __ptr_ring_empty for lockless tests */ + WRITE_ONCE(r->consumer_head, consumer_head); +} + +static inline void *__ptr_ring_consume(struct ptr_ring *r) +{ + void *ptr; + + /* The READ_ONCE in __ptr_ring_peek guarantees that anyone + * accessing data through the pointer is up to date. Pairs + * with smp_wmb in __ptr_ring_produce. + */ + ptr = __ptr_ring_peek(r); + if (ptr) + __ptr_ring_discard_one(r); + + return ptr; +} + +static inline int __ptr_ring_consume_batched(struct ptr_ring *r, + void **array, int n) +{ + void *ptr; + int i; + + for (i = 0; i < n; i++) { + ptr = __ptr_ring_consume(r); + if (!ptr) + break; + array[i] = ptr; + } + + return i; +} + +/* + * Note: resize (below) nests producer lock within consumer lock, so if you + * call this in interrupt or BH context, you must disable interrupts/BH when + * producing. + */ +static inline void *ptr_ring_consume(struct ptr_ring *r) +{ + void *ptr; + + spin_lock(&r->consumer_lock); + ptr = __ptr_ring_consume(r); + spin_unlock(&r->consumer_lock); + + return ptr; +} + +static inline void *ptr_ring_consume_irq(struct ptr_ring *r) +{ + void *ptr; + + spin_lock_irq(&r->consumer_lock); + ptr = __ptr_ring_consume(r); + spin_unlock_irq(&r->consumer_lock); + + return ptr; +} + +static inline void *ptr_ring_consume_any(struct ptr_ring *r) +{ + unsigned long flags; + void *ptr; + + spin_lock_irqsave(&r->consumer_lock, flags); + ptr = __ptr_ring_consume(r); + spin_unlock_irqrestore(&r->consumer_lock, flags); + + return ptr; +} + +static inline void *ptr_ring_consume_bh(struct ptr_ring *r) +{ + void *ptr; + + spin_lock_bh(&r->consumer_lock); + ptr = __ptr_ring_consume(r); + spin_unlock_bh(&r->consumer_lock); + + return ptr; +} + +static inline int ptr_ring_consume_batched(struct ptr_ring *r, + void **array, int n) +{ + int ret; + + spin_lock(&r->consumer_lock); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock(&r->consumer_lock); + + return ret; +} + +static inline int ptr_ring_consume_batched_irq(struct ptr_ring *r, + void **array, int n) +{ + int ret; + + spin_lock_irq(&r->consumer_lock); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock_irq(&r->consumer_lock); + + return ret; +} + +static inline int ptr_ring_consume_batched_any(struct ptr_ring *r, + void **array, int n) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&r->consumer_lock, flags); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock_irqrestore(&r->consumer_lock, flags); + + return ret; +} + +static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r, + void **array, int n) +{ + int ret; + + spin_lock_bh(&r->consumer_lock); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock_bh(&r->consumer_lock); + + return ret; +} + +/* Cast to structure type and call a function without discarding from FIFO. + * Function must return a value. + * Callers must take consumer_lock. + */ +#define __PTR_RING_PEEK_CALL(r, f) ((f)(__ptr_ring_peek(r))) + +#define PTR_RING_PEEK_CALL(r, f) ({ \ + typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ + \ + spin_lock(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ + spin_unlock(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v; \ +}) + +#define PTR_RING_PEEK_CALL_IRQ(r, f) ({ \ + typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ + \ + spin_lock_irq(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ + spin_unlock_irq(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v; \ +}) + +#define PTR_RING_PEEK_CALL_BH(r, f) ({ \ + typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ + \ + spin_lock_bh(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ + spin_unlock_bh(&(r)->consumer_lock); \ + __PTR_RING_PEEK_CALL_v; \ +}) + +#define PTR_RING_PEEK_CALL_ANY(r, f) ({ \ + typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ + unsigned long __PTR_RING_PEEK_CALL_f;\ + \ + spin_lock_irqsave(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \ + __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ + spin_unlock_irqrestore(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \ + __PTR_RING_PEEK_CALL_v; \ +}) + +/* Not all gfp_t flags (besides GFP_KERNEL) are allowed. See + * documentation for vmalloc for which of them are legal. + */ +static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp) +{ + if (size > KMALLOC_MAX_SIZE / sizeof(void *)) + return NULL; + return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO); +} + +static inline void __ptr_ring_set_size(struct ptr_ring *r, int size) +{ + r->size = size; + r->batch = SMP_CACHE_BYTES * 2 / sizeof(*(r->queue)); + /* We need to set batch at least to 1 to make logic + * in __ptr_ring_discard_one work correctly. + * Batching too much (because ring is small) would cause a lot of + * burstiness. Needs tuning, for now disable batching. + */ + if (r->batch > r->size / 2 || !r->batch) + r->batch = 1; +} + +static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp) +{ + r->queue = __ptr_ring_init_queue_alloc(size, gfp); + if (!r->queue) + return -ENOMEM; + + __ptr_ring_set_size(r, size); + r->producer = r->consumer_head = r->consumer_tail = 0; + spin_lock_init(&r->producer_lock); + spin_lock_init(&r->consumer_lock); + + return 0; +} + +/* + * Return entries into ring. Destroy entries that don't fit. + * + * Note: this is expected to be a rare slow path operation. + * + * Note: producer lock is nested within consumer lock, so if you + * resize you must make sure all uses nest correctly. + * In particular if you consume ring in interrupt or BH context, you must + * disable interrupts/BH when doing so. + */ +static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n, + void (*destroy)(void *)) +{ + unsigned long flags; + int head; + + spin_lock_irqsave(&r->consumer_lock, flags); + spin_lock(&r->producer_lock); + + if (!r->size) + goto done; + + /* + * Clean out buffered entries (for simplicity). This way following code + * can test entries for NULL and if not assume they are valid. + */ + head = r->consumer_head - 1; + while (likely(head >= r->consumer_tail)) + r->queue[head--] = NULL; + r->consumer_tail = r->consumer_head; + + /* + * Go over entries in batch, start moving head back and copy entries. + * Stop when we run into previously unconsumed entries. + */ + while (n) { + head = r->consumer_head - 1; + if (head < 0) + head = r->size - 1; + if (r->queue[head]) { + /* This batch entry will have to be destroyed. */ + goto done; + } + r->queue[head] = batch[--n]; + r->consumer_tail = head; + /* matching READ_ONCE in __ptr_ring_empty for lockless tests */ + WRITE_ONCE(r->consumer_head, head); + } + +done: + /* Destroy all entries left in the batch. */ + while (n) + destroy(batch[--n]); + spin_unlock(&r->producer_lock); + spin_unlock_irqrestore(&r->consumer_lock, flags); +} + +static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue, + int size, gfp_t gfp, + void (*destroy)(void *)) +{ + int producer = 0; + void **old; + void *ptr; + + while ((ptr = __ptr_ring_consume(r))) + if (producer < size) + queue[producer++] = ptr; + else if (destroy) + destroy(ptr); + + if (producer >= size) + producer = 0; + __ptr_ring_set_size(r, size); + r->producer = producer; + r->consumer_head = 0; + r->consumer_tail = 0; + old = r->queue; + r->queue = queue; + + return old; +} + +/* + * Note: producer lock is nested within consumer lock, so if you + * resize you must make sure all uses nest correctly. + * In particular if you consume ring in interrupt or BH context, you must + * disable interrupts/BH when doing so. + */ +static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, + void (*destroy)(void *)) +{ + unsigned long flags; + void **queue = __ptr_ring_init_queue_alloc(size, gfp); + void **old; + + if (!queue) + return -ENOMEM; + + spin_lock_irqsave(&(r)->consumer_lock, flags); + spin_lock(&(r)->producer_lock); + + old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy); + + spin_unlock(&(r)->producer_lock); + spin_unlock_irqrestore(&(r)->consumer_lock, flags); + + kvfree(old); + + return 0; +} + +/* + * Note: producer lock is nested within consumer lock, so if you + * resize you must make sure all uses nest correctly. + * In particular if you consume ring in interrupt or BH context, you must + * disable interrupts/BH when doing so. + */ +static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, + unsigned int nrings, + int size, + gfp_t gfp, void (*destroy)(void *)) +{ + unsigned long flags; + void ***queues; + int i; + + queues = kmalloc_array(nrings, sizeof(*queues), gfp); + if (!queues) + goto noqueues; + + for (i = 0; i < nrings; ++i) { + queues[i] = __ptr_ring_init_queue_alloc(size, gfp); + if (!queues[i]) + goto nomem; + } + + for (i = 0; i < nrings; ++i) { + spin_lock_irqsave(&(rings[i])->consumer_lock, flags); + spin_lock(&(rings[i])->producer_lock); + queues[i] = __ptr_ring_swap_queue(rings[i], queues[i], + size, gfp, destroy); + spin_unlock(&(rings[i])->producer_lock); + spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags); + } + + for (i = 0; i < nrings; ++i) + kvfree(queues[i]); + + kfree(queues); + + return 0; + +nomem: + while (--i >= 0) + kvfree(queues[i]); + + kfree(queues); + +noqueues: + return -ENOMEM; +} + +static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *)) +{ + void *ptr; + + if (destroy) + while ((ptr = ptr_ring_consume(r))) + destroy(ptr); + kvfree(r->queue); +} + +#endif /* _LINUX_PTR_RING_H */ diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h new file mode 100644 index 000000000..d41de55cd --- /dev/null +++ b/include/linux/ptrace.h @@ -0,0 +1,403 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PTRACE_H +#define _LINUX_PTRACE_H + +#include /* For unlikely. */ +#include /* For struct task_struct. */ +#include /* For send_sig(), same_thread_group(), etc. */ +#include /* for IS_ERR_VALUE */ +#include /* For BUG_ON. */ +#include /* For task_active_pid_ns. */ +#include + +extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr, + void *buf, int len, unsigned int gup_flags); + +/* + * Ptrace flags + * + * The owner ship rules for task->ptrace which holds the ptrace + * flags is simple. When a task is running it owns it's task->ptrace + * flags. When the a task is stopped the ptracer owns task->ptrace. + */ + +#define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */ +#define PT_PTRACED 0x00000001 +#define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */ + +#define PT_OPT_FLAG_SHIFT 3 +/* PT_TRACE_* event enable flags */ +#define PT_EVENT_FLAG(event) (1 << (PT_OPT_FLAG_SHIFT + (event))) +#define PT_TRACESYSGOOD PT_EVENT_FLAG(0) +#define PT_TRACE_FORK PT_EVENT_FLAG(PTRACE_EVENT_FORK) +#define PT_TRACE_VFORK PT_EVENT_FLAG(PTRACE_EVENT_VFORK) +#define PT_TRACE_CLONE PT_EVENT_FLAG(PTRACE_EVENT_CLONE) +#define PT_TRACE_EXEC PT_EVENT_FLAG(PTRACE_EVENT_EXEC) +#define PT_TRACE_VFORK_DONE PT_EVENT_FLAG(PTRACE_EVENT_VFORK_DONE) +#define PT_TRACE_EXIT PT_EVENT_FLAG(PTRACE_EVENT_EXIT) +#define PT_TRACE_SECCOMP PT_EVENT_FLAG(PTRACE_EVENT_SECCOMP) + +#define PT_EXITKILL (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT) +#define PT_SUSPEND_SECCOMP (PTRACE_O_SUSPEND_SECCOMP << PT_OPT_FLAG_SHIFT) + +extern long arch_ptrace(struct task_struct *child, long request, + unsigned long addr, unsigned long data); +extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); +extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); +extern void ptrace_disable(struct task_struct *); +extern int ptrace_request(struct task_struct *child, long request, + unsigned long addr, unsigned long data); +extern void ptrace_notify(int exit_code); +extern void __ptrace_link(struct task_struct *child, + struct task_struct *new_parent, + const struct cred *ptracer_cred); +extern void __ptrace_unlink(struct task_struct *child); +extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead); +#define PTRACE_MODE_READ 0x01 +#define PTRACE_MODE_ATTACH 0x02 +#define PTRACE_MODE_NOAUDIT 0x04 +#define PTRACE_MODE_FSCREDS 0x08 +#define PTRACE_MODE_REALCREDS 0x10 + +/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */ +#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS) +#define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS) +#define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS) +#define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS) + +/** + * ptrace_may_access - check whether the caller is permitted to access + * a target task. + * @task: target task + * @mode: selects type of access and caller credentials + * + * Returns true on success, false on denial. + * + * One of the flags PTRACE_MODE_FSCREDS and PTRACE_MODE_REALCREDS must + * be set in @mode to specify whether the access was requested through + * a filesystem syscall (should use effective capabilities and fsuid + * of the caller) or through an explicit syscall such as + * process_vm_writev or ptrace (and should use the real credentials). + */ +extern bool ptrace_may_access(struct task_struct *task, unsigned int mode); + +static inline int ptrace_reparented(struct task_struct *child) +{ + return !same_thread_group(child->real_parent, child->parent); +} + +static inline void ptrace_unlink(struct task_struct *child) +{ + if (unlikely(child->ptrace)) + __ptrace_unlink(child); +} + +int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, + unsigned long data); +int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, + unsigned long data); + +/** + * ptrace_parent - return the task that is tracing the given task + * @task: task to consider + * + * Returns %NULL if no one is tracing @task, or the &struct task_struct + * pointer to its tracer. + * + * Must called under rcu_read_lock(). The pointer returned might be kept + * live only by RCU. During exec, this may be called with task_lock() held + * on @task, still held from when check_unsafe_exec() was called. + */ +static inline struct task_struct *ptrace_parent(struct task_struct *task) +{ + if (unlikely(task->ptrace)) + return rcu_dereference(task->parent); + return NULL; +} + +/** + * ptrace_event_enabled - test whether a ptrace event is enabled + * @task: ptracee of interest + * @event: %PTRACE_EVENT_* to test + * + * Test whether @event is enabled for ptracee @task. + * + * Returns %true if @event is enabled, %false otherwise. + */ +static inline bool ptrace_event_enabled(struct task_struct *task, int event) +{ + return task->ptrace & PT_EVENT_FLAG(event); +} + +/** + * ptrace_event - possibly stop for a ptrace event notification + * @event: %PTRACE_EVENT_* value to report + * @message: value for %PTRACE_GETEVENTMSG to return + * + * Check whether @event is enabled and, if so, report @event and @message + * to the ptrace parent. + * + * Called without locks. + */ +static inline void ptrace_event(int event, unsigned long message) +{ + if (unlikely(ptrace_event_enabled(current, event))) { + current->ptrace_message = message; + ptrace_notify((event << 8) | SIGTRAP); + } else if (event == PTRACE_EVENT_EXEC) { + /* legacy EXEC report via SIGTRAP */ + if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED) + send_sig(SIGTRAP, current, 0); + } +} + +/** + * ptrace_event_pid - possibly stop for a ptrace event notification + * @event: %PTRACE_EVENT_* value to report + * @pid: process identifier for %PTRACE_GETEVENTMSG to return + * + * Check whether @event is enabled and, if so, report @event and @pid + * to the ptrace parent. @pid is reported as the pid_t seen from the + * the ptrace parent's pid namespace. + * + * Called without locks. + */ +static inline void ptrace_event_pid(int event, struct pid *pid) +{ + /* + * FIXME: There's a potential race if a ptracer in a different pid + * namespace than parent attaches between computing message below and + * when we acquire tasklist_lock in ptrace_stop(). If this happens, + * the ptracer will get a bogus pid from PTRACE_GETEVENTMSG. + */ + unsigned long message = 0; + struct pid_namespace *ns; + + rcu_read_lock(); + ns = task_active_pid_ns(rcu_dereference(current->parent)); + if (ns) + message = pid_nr_ns(pid, ns); + rcu_read_unlock(); + + ptrace_event(event, message); +} + +/** + * ptrace_init_task - initialize ptrace state for a new child + * @child: new child task + * @ptrace: true if child should be ptrace'd by parent's tracer + * + * This is called immediately after adding @child to its parent's children + * list. @ptrace is false in the normal case, and true to ptrace @child. + * + * Called with current's siglock and write_lock_irq(&tasklist_lock) held. + */ +static inline void ptrace_init_task(struct task_struct *child, bool ptrace) +{ + INIT_LIST_HEAD(&child->ptrace_entry); + INIT_LIST_HEAD(&child->ptraced); + child->jobctl = 0; + child->ptrace = 0; + child->parent = child->real_parent; + + if (unlikely(ptrace) && current->ptrace) { + child->ptrace = current->ptrace; + __ptrace_link(child, current->parent, current->ptracer_cred); + + if (child->ptrace & PT_SEIZED) + task_set_jobctl_pending(child, JOBCTL_TRAP_STOP); + else + sigaddset(&child->pending.signal, SIGSTOP); + } + else + child->ptracer_cred = NULL; +} + +/** + * ptrace_release_task - final ptrace-related cleanup of a zombie being reaped + * @task: task in %EXIT_DEAD state + * + * Called with write_lock(&tasklist_lock) held. + */ +static inline void ptrace_release_task(struct task_struct *task) +{ + BUG_ON(!list_empty(&task->ptraced)); + ptrace_unlink(task); + BUG_ON(!list_empty(&task->ptrace_entry)); +} + +#ifndef force_successful_syscall_return +/* + * System call handlers that, upon successful completion, need to return a + * negative value should call force_successful_syscall_return() right before + * returning. On architectures where the syscall convention provides for a + * separate error flag (e.g., alpha, ia64, ppc{,64}, sparc{,64}, possibly + * others), this macro can be used to ensure that the error flag will not get + * set. On architectures which do not support a separate error flag, the macro + * is a no-op and the spurious error condition needs to be filtered out by some + * other means (e.g., in user-level, by passing an extra argument to the + * syscall handler, or something along those lines). + */ +#define force_successful_syscall_return() do { } while (0) +#endif + +#ifndef is_syscall_success +/* + * On most systems we can tell if a syscall is a success based on if the retval + * is an error value. On some systems like ia64 and powerpc they have different + * indicators of success/failure and must define their own. + */ +#define is_syscall_success(regs) (!IS_ERR_VALUE((unsigned long)(regs_return_value(regs)))) +#endif + +/* + * should define the following things inside #ifdef __KERNEL__. + * + * These do-nothing inlines are used when the arch does not + * implement single-step. The kerneldoc comments are here + * to document the interface for all arch definitions. + */ + +#ifndef arch_has_single_step +/** + * arch_has_single_step - does this CPU support user-mode single-step? + * + * If this is defined, then there must be function declarations or + * inlines for user_enable_single_step() and user_disable_single_step(). + * arch_has_single_step() should evaluate to nonzero iff the machine + * supports instruction single-step for user mode. + * It can be a constant or it can test a CPU feature bit. + */ +#define arch_has_single_step() (0) + +/** + * user_enable_single_step - single-step in user-mode task + * @task: either current or a task stopped in %TASK_TRACED + * + * This can only be called when arch_has_single_step() has returned nonzero. + * Set @task so that when it returns to user mode, it will trap after the + * next single instruction executes. If arch_has_block_step() is defined, + * this must clear the effects of user_enable_block_step() too. + */ +static inline void user_enable_single_step(struct task_struct *task) +{ + BUG(); /* This can never be called. */ +} + +/** + * user_disable_single_step - cancel user-mode single-step + * @task: either current or a task stopped in %TASK_TRACED + * + * Clear @task of the effects of user_enable_single_step() and + * user_enable_block_step(). This can be called whether or not either + * of those was ever called on @task, and even if arch_has_single_step() + * returned zero. + */ +static inline void user_disable_single_step(struct task_struct *task) +{ +} +#else +extern void user_enable_single_step(struct task_struct *); +extern void user_disable_single_step(struct task_struct *); +#endif /* arch_has_single_step */ + +#ifndef arch_has_block_step +/** + * arch_has_block_step - does this CPU support user-mode block-step? + * + * If this is defined, then there must be a function declaration or inline + * for user_enable_block_step(), and arch_has_single_step() must be defined + * too. arch_has_block_step() should evaluate to nonzero iff the machine + * supports step-until-branch for user mode. It can be a constant or it + * can test a CPU feature bit. + */ +#define arch_has_block_step() (0) + +/** + * user_enable_block_step - step until branch in user-mode task + * @task: either current or a task stopped in %TASK_TRACED + * + * This can only be called when arch_has_block_step() has returned nonzero, + * and will never be called when single-instruction stepping is being used. + * Set @task so that when it returns to user mode, it will trap after the + * next branch or trap taken. + */ +static inline void user_enable_block_step(struct task_struct *task) +{ + BUG(); /* This can never be called. */ +} +#else +extern void user_enable_block_step(struct task_struct *); +#endif /* arch_has_block_step */ + +#ifdef ARCH_HAS_USER_SINGLE_STEP_INFO +extern void user_single_step_siginfo(struct task_struct *tsk, + struct pt_regs *regs, siginfo_t *info); +#else +static inline void user_single_step_siginfo(struct task_struct *tsk, + struct pt_regs *regs, siginfo_t *info) +{ + info->si_signo = SIGTRAP; +} +#endif + +#ifndef arch_ptrace_stop_needed +/** + * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called + * @code: current->exit_code value ptrace will stop with + * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with + * + * This is called with the siglock held, to decide whether or not it's + * necessary to release the siglock and call arch_ptrace_stop() with the + * same @code and @info arguments. It can be defined to a constant if + * arch_ptrace_stop() is never required, or always is. On machines where + * this makes sense, it should be defined to a quick test to optimize out + * calling arch_ptrace_stop() when it would be superfluous. For example, + * if the thread has not been back to user mode since the last stop, the + * thread state might indicate that nothing needs to be done. + * + * This is guaranteed to be invoked once before a task stops for ptrace and + * may include arch-specific operations necessary prior to a ptrace stop. + */ +#define arch_ptrace_stop_needed(code, info) (0) +#endif + +#ifndef arch_ptrace_stop +/** + * arch_ptrace_stop - Do machine-specific work before stopping for ptrace + * @code: current->exit_code value ptrace will stop with + * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with + * + * This is called with no locks held when arch_ptrace_stop_needed() has + * just returned nonzero. It is allowed to block, e.g. for user memory + * access. The arch can have machine-specific work to be done before + * ptrace stops. On ia64, register backing store gets written back to user + * memory here. Since this can be costly (requires dropping the siglock), + * we only do it when the arch requires it for this particular stop, as + * indicated by arch_ptrace_stop_needed(). + */ +#define arch_ptrace_stop(code, info) do { } while (0) +#endif + +#ifndef current_pt_regs +#define current_pt_regs() task_pt_regs(current) +#endif + +/* + * unlike current_pt_regs(), this one is equal to task_pt_regs(current) + * on *all* architectures; the only reason to have a per-arch definition + * is optimisation. + */ +#ifndef signal_pt_regs +#define signal_pt_regs() task_pt_regs(current) +#endif + +#ifndef current_user_stack_pointer +#define current_user_stack_pointer() user_stack_pointer(current_pt_regs()) +#endif + +extern int task_current_syscall(struct task_struct *target, long *callno, + unsigned long args[6], unsigned int maxargs, + unsigned long *sp, unsigned long *pc); + +#endif diff --git a/include/linux/purgatory.h b/include/linux/purgatory.h new file mode 100644 index 000000000..b950e961c --- /dev/null +++ b/include/linux/purgatory.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PURGATORY_H +#define _LINUX_PURGATORY_H + +#include +#include +#include + +struct kexec_sha_region { + unsigned long start; + unsigned long len; +}; + +/* + * These forward declarations serve two purposes: + * + * 1) Make sparse happy when checking arch/purgatory + * 2) Document that these are required to be global so the symbol + * lookup in kexec works + */ +extern struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX]; +extern u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE]; + +#endif diff --git a/include/linux/pvclock_gtod.h b/include/linux/pvclock_gtod.h new file mode 100644 index 000000000..f63549581 --- /dev/null +++ b/include/linux/pvclock_gtod.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _PVCLOCK_GTOD_H +#define _PVCLOCK_GTOD_H + +#include + +/* + * The pvclock gtod notifier is called when the system time is updated + * and is used to keep guest time synchronized with host time. + * + * The 'action' parameter in the notifier function is false (0), or + * true (non-zero) if system time was stepped. + */ +extern int pvclock_gtod_register_notifier(struct notifier_block *nb); +extern int pvclock_gtod_unregister_notifier(struct notifier_block *nb); + +#endif /* _PVCLOCK_GTOD_H */ diff --git a/include/linux/pwm.h b/include/linux/pwm.h new file mode 100644 index 000000000..bd7d611d6 --- /dev/null +++ b/include/linux/pwm.h @@ -0,0 +1,652 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_PWM_H +#define __LINUX_PWM_H + +#include +#include +#include + +struct pwm_capture; +struct seq_file; + +struct pwm_chip; + +/** + * enum pwm_polarity - polarity of a PWM signal + * @PWM_POLARITY_NORMAL: a high signal for the duration of the duty- + * cycle, followed by a low signal for the remainder of the pulse + * period + * @PWM_POLARITY_INVERSED: a low signal for the duration of the duty- + * cycle, followed by a high signal for the remainder of the pulse + * period + */ +enum pwm_polarity { + PWM_POLARITY_NORMAL, + PWM_POLARITY_INVERSED, +}; + +/** + * struct pwm_args - board-dependent PWM arguments + * @period: reference period + * @polarity: reference polarity + * + * This structure describes board-dependent arguments attached to a PWM + * device. These arguments are usually retrieved from the PWM lookup table or + * device tree. + * + * Do not confuse this with the PWM state: PWM arguments represent the initial + * configuration that users want to use on this PWM device rather than the + * current PWM hardware state. + */ +struct pwm_args { + unsigned int period; + enum pwm_polarity polarity; +}; + +enum { + PWMF_REQUESTED = 1 << 0, + PWMF_EXPORTED = 1 << 1, +}; + +/* + * struct pwm_state - state of a PWM channel + * @period: PWM period (in nanoseconds) + * @duty_cycle: PWM duty cycle (in nanoseconds) + * @polarity: PWM polarity + * @enabled: PWM enabled status + */ +struct pwm_state { + unsigned int period; + unsigned int duty_cycle; + enum pwm_polarity polarity; + bool enabled; +}; + +/** + * struct pwm_device - PWM channel object + * @label: name of the PWM device + * @flags: flags associated with the PWM device + * @hwpwm: per-chip relative index of the PWM device + * @pwm: global index of the PWM device + * @chip: PWM chip providing this PWM device + * @chip_data: chip-private data associated with the PWM device + * @args: PWM arguments + * @state: curent PWM channel state + */ +struct pwm_device { + const char *label; + unsigned long flags; + unsigned int hwpwm; + unsigned int pwm; + struct pwm_chip *chip; + void *chip_data; + + struct pwm_args args; + struct pwm_state state; +}; + +/** + * pwm_get_state() - retrieve the current PWM state + * @pwm: PWM device + * @state: state to fill with the current PWM state + */ +static inline void pwm_get_state(const struct pwm_device *pwm, + struct pwm_state *state) +{ + *state = pwm->state; +} + +static inline bool pwm_is_enabled(const struct pwm_device *pwm) +{ + struct pwm_state state; + + pwm_get_state(pwm, &state); + + return state.enabled; +} + +static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period) +{ + if (pwm) + pwm->state.period = period; +} + +static inline unsigned int pwm_get_period(const struct pwm_device *pwm) +{ + struct pwm_state state; + + pwm_get_state(pwm, &state); + + return state.period; +} + +static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty) +{ + if (pwm) + pwm->state.duty_cycle = duty; +} + +static inline unsigned int pwm_get_duty_cycle(const struct pwm_device *pwm) +{ + struct pwm_state state; + + pwm_get_state(pwm, &state); + + return state.duty_cycle; +} + +static inline enum pwm_polarity pwm_get_polarity(const struct pwm_device *pwm) +{ + struct pwm_state state; + + pwm_get_state(pwm, &state); + + return state.polarity; +} + +static inline void pwm_get_args(const struct pwm_device *pwm, + struct pwm_args *args) +{ + *args = pwm->args; +} + +/** + * pwm_init_state() - prepare a new state to be applied with pwm_apply_state() + * @pwm: PWM device + * @state: state to fill with the prepared PWM state + * + * This functions prepares a state that can later be tweaked and applied + * to the PWM device with pwm_apply_state(). This is a convenient function + * that first retrieves the current PWM state and the replaces the period + * and polarity fields with the reference values defined in pwm->args. + * Once the function returns, you can adjust the ->enabled and ->duty_cycle + * fields according to your needs before calling pwm_apply_state(). + * + * ->duty_cycle is initially set to zero to avoid cases where the current + * ->duty_cycle value exceed the pwm_args->period one, which would trigger + * an error if the user calls pwm_apply_state() without adjusting ->duty_cycle + * first. + */ +static inline void pwm_init_state(const struct pwm_device *pwm, + struct pwm_state *state) +{ + struct pwm_args args; + + /* First get the current state. */ + pwm_get_state(pwm, state); + + /* Then fill it with the reference config */ + pwm_get_args(pwm, &args); + + state->period = args.period; + state->polarity = args.polarity; + state->duty_cycle = 0; +} + +/** + * pwm_get_relative_duty_cycle() - Get a relative duty cycle value + * @state: PWM state to extract the duty cycle from + * @scale: target scale of the relative duty cycle + * + * This functions converts the absolute duty cycle stored in @state (expressed + * in nanosecond) into a value relative to the period. + * + * For example if you want to get the duty_cycle expressed in percent, call: + * + * pwm_get_state(pwm, &state); + * duty = pwm_get_relative_duty_cycle(&state, 100); + */ +static inline unsigned int +pwm_get_relative_duty_cycle(const struct pwm_state *state, unsigned int scale) +{ + if (!state->period) + return 0; + + return DIV_ROUND_CLOSEST_ULL((u64)state->duty_cycle * scale, + state->period); +} + +/** + * pwm_set_relative_duty_cycle() - Set a relative duty cycle value + * @state: PWM state to fill + * @duty_cycle: relative duty cycle value + * @scale: scale in which @duty_cycle is expressed + * + * This functions converts a relative into an absolute duty cycle (expressed + * in nanoseconds), and puts the result in state->duty_cycle. + * + * For example if you want to configure a 50% duty cycle, call: + * + * pwm_init_state(pwm, &state); + * pwm_set_relative_duty_cycle(&state, 50, 100); + * pwm_apply_state(pwm, &state); + * + * This functions returns -EINVAL if @duty_cycle and/or @scale are + * inconsistent (@scale == 0 or @duty_cycle > @scale). + */ +static inline int +pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle, + unsigned int scale) +{ + if (!scale || duty_cycle > scale) + return -EINVAL; + + state->duty_cycle = DIV_ROUND_CLOSEST_ULL((u64)duty_cycle * + state->period, + scale); + + return 0; +} + +/** + * struct pwm_ops - PWM controller operations + * @request: optional hook for requesting a PWM + * @free: optional hook for freeing a PWM + * @config: configure duty cycles and period length for this PWM + * @set_polarity: configure the polarity of this PWM + * @capture: capture and report PWM signal + * @enable: enable PWM output toggling + * @disable: disable PWM output toggling + * @apply: atomically apply a new PWM config. The state argument + * should be adjusted with the real hardware config (if the + * approximate the period or duty_cycle value, state should + * reflect it) + * @get_state: get the current PWM state. This function is only + * called once per PWM device when the PWM chip is + * registered. + * @dbg_show: optional routine to show contents in debugfs + * @owner: helps prevent removal of modules exporting active PWMs + */ +struct pwm_ops { + int (*request)(struct pwm_chip *chip, struct pwm_device *pwm); + void (*free)(struct pwm_chip *chip, struct pwm_device *pwm); + int (*config)(struct pwm_chip *chip, struct pwm_device *pwm, + int duty_ns, int period_ns); + int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm, + enum pwm_polarity polarity); + int (*capture)(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_capture *result, unsigned long timeout); + int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm); + void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm); + int (*apply)(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state); + void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state); +#ifdef CONFIG_DEBUG_FS + void (*dbg_show)(struct pwm_chip *chip, struct seq_file *s); +#endif + struct module *owner; +}; + +/** + * struct pwm_chip - abstract a PWM controller + * @dev: device providing the PWMs + * @list: list node for internal use + * @ops: callbacks for this PWM controller + * @base: number of first PWM controlled by this chip + * @npwm: number of PWMs controlled by this chip + * @pwms: array of PWM devices allocated by the framework + * @of_xlate: request a PWM device given a device tree PWM specifier + * @of_pwm_n_cells: number of cells expected in the device tree PWM specifier + */ +struct pwm_chip { + struct device *dev; + struct list_head list; + const struct pwm_ops *ops; + int base; + unsigned int npwm; + + struct pwm_device *pwms; + + struct pwm_device * (*of_xlate)(struct pwm_chip *pc, + const struct of_phandle_args *args); + unsigned int of_pwm_n_cells; +}; + +/** + * struct pwm_capture - PWM capture data + * @period: period of the PWM signal (in nanoseconds) + * @duty_cycle: duty cycle of the PWM signal (in nanoseconds) + */ +struct pwm_capture { + unsigned int period; + unsigned int duty_cycle; +}; + +#if IS_ENABLED(CONFIG_PWM) +/* PWM user APIs */ +struct pwm_device *pwm_request(int pwm_id, const char *label); +void pwm_free(struct pwm_device *pwm); +int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state); +int pwm_adjust_config(struct pwm_device *pwm); + +/** + * pwm_config() - change a PWM device configuration + * @pwm: PWM device + * @duty_ns: "on" time (in nanoseconds) + * @period_ns: duration (in nanoseconds) of one cycle + * + * Returns: 0 on success or a negative error code on failure. + */ +static inline int pwm_config(struct pwm_device *pwm, int duty_ns, + int period_ns) +{ + struct pwm_state state; + + if (!pwm) + return -EINVAL; + + if (duty_ns < 0 || period_ns < 0) + return -EINVAL; + + pwm_get_state(pwm, &state); + if (state.duty_cycle == duty_ns && state.period == period_ns) + return 0; + + state.duty_cycle = duty_ns; + state.period = period_ns; + return pwm_apply_state(pwm, &state); +} + +/** + * pwm_set_polarity() - configure the polarity of a PWM signal + * @pwm: PWM device + * @polarity: new polarity of the PWM signal + * + * Note that the polarity cannot be configured while the PWM device is + * enabled. + * + * Returns: 0 on success or a negative error code on failure. + */ +static inline int pwm_set_polarity(struct pwm_device *pwm, + enum pwm_polarity polarity) +{ + struct pwm_state state; + + if (!pwm) + return -EINVAL; + + pwm_get_state(pwm, &state); + if (state.polarity == polarity) + return 0; + + /* + * Changing the polarity of a running PWM without adjusting the + * dutycycle/period value is a bit risky (can introduce glitches). + * Return -EBUSY in this case. + * Note that this is allowed when using pwm_apply_state() because + * the user specifies all the parameters. + */ + if (state.enabled) + return -EBUSY; + + state.polarity = polarity; + return pwm_apply_state(pwm, &state); +} + +/** + * pwm_enable() - start a PWM output toggling + * @pwm: PWM device + * + * Returns: 0 on success or a negative error code on failure. + */ +static inline int pwm_enable(struct pwm_device *pwm) +{ + struct pwm_state state; + + if (!pwm) + return -EINVAL; + + pwm_get_state(pwm, &state); + if (state.enabled) + return 0; + + state.enabled = true; + return pwm_apply_state(pwm, &state); +} + +/** + * pwm_disable() - stop a PWM output toggling + * @pwm: PWM device + */ +static inline void pwm_disable(struct pwm_device *pwm) +{ + struct pwm_state state; + + if (!pwm) + return; + + pwm_get_state(pwm, &state); + if (!state.enabled) + return; + + state.enabled = false; + pwm_apply_state(pwm, &state); +} + +/* PWM provider APIs */ +int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result, + unsigned long timeout); +int pwm_set_chip_data(struct pwm_device *pwm, void *data); +void *pwm_get_chip_data(struct pwm_device *pwm); + +int pwmchip_add_with_polarity(struct pwm_chip *chip, + enum pwm_polarity polarity); +int pwmchip_add(struct pwm_chip *chip); +int pwmchip_remove(struct pwm_chip *chip); +struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip, + unsigned int index, + const char *label); + +struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *pc, + const struct of_phandle_args *args); + +struct pwm_device *pwm_get(struct device *dev, const char *con_id); +struct pwm_device *of_pwm_get(struct device_node *np, const char *con_id); +void pwm_put(struct pwm_device *pwm); + +struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id); +struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np, + const char *con_id); +void devm_pwm_put(struct device *dev, struct pwm_device *pwm); +#else +static inline struct pwm_device *pwm_request(int pwm_id, const char *label) +{ + return ERR_PTR(-ENODEV); +} + +static inline void pwm_free(struct pwm_device *pwm) +{ +} + +static inline int pwm_apply_state(struct pwm_device *pwm, + const struct pwm_state *state) +{ + return -ENOTSUPP; +} + +static inline int pwm_adjust_config(struct pwm_device *pwm) +{ + return -ENOTSUPP; +} + +static inline int pwm_config(struct pwm_device *pwm, int duty_ns, + int period_ns) +{ + return -EINVAL; +} + +static inline int pwm_capture(struct pwm_device *pwm, + struct pwm_capture *result, + unsigned long timeout) +{ + return -EINVAL; +} + +static inline int pwm_set_polarity(struct pwm_device *pwm, + enum pwm_polarity polarity) +{ + return -ENOTSUPP; +} + +static inline int pwm_enable(struct pwm_device *pwm) +{ + return -EINVAL; +} + +static inline void pwm_disable(struct pwm_device *pwm) +{ +} + +static inline int pwm_set_chip_data(struct pwm_device *pwm, void *data) +{ + return -EINVAL; +} + +static inline void *pwm_get_chip_data(struct pwm_device *pwm) +{ + return NULL; +} + +static inline int pwmchip_add(struct pwm_chip *chip) +{ + return -EINVAL; +} + +static inline int pwmchip_add_inversed(struct pwm_chip *chip) +{ + return -EINVAL; +} + +static inline int pwmchip_remove(struct pwm_chip *chip) +{ + return -EINVAL; +} + +static inline struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip, + unsigned int index, + const char *label) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct pwm_device *pwm_get(struct device *dev, + const char *consumer) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct pwm_device *of_pwm_get(struct device_node *np, + const char *con_id) +{ + return ERR_PTR(-ENODEV); +} + +static inline void pwm_put(struct pwm_device *pwm) +{ +} + +static inline struct pwm_device *devm_pwm_get(struct device *dev, + const char *consumer) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct pwm_device *devm_of_pwm_get(struct device *dev, + struct device_node *np, + const char *con_id) +{ + return ERR_PTR(-ENODEV); +} + +static inline void devm_pwm_put(struct device *dev, struct pwm_device *pwm) +{ +} +#endif + +static inline void pwm_apply_args(struct pwm_device *pwm) +{ + struct pwm_state state = { }; + + /* + * PWM users calling pwm_apply_args() expect to have a fresh config + * where the polarity and period are set according to pwm_args info. + * The problem is, polarity can only be changed when the PWM is + * disabled. + * + * PWM drivers supporting hardware readout may declare the PWM device + * as enabled, and prevent polarity setting, which changes from the + * existing behavior, where all PWM devices are declared as disabled + * at startup (even if they are actually enabled), thus authorizing + * polarity setting. + * + * To fulfill this requirement, we apply a new state which disables + * the PWM device and set the reference period and polarity config. + * + * Note that PWM users requiring a smooth handover between the + * bootloader and the kernel (like critical regulators controlled by + * PWM devices) will have to switch to the atomic API and avoid calling + * pwm_apply_args(). + */ + + state.enabled = false; + state.polarity = pwm->args.polarity; + state.period = pwm->args.period; + + pwm_apply_state(pwm, &state); +} + +struct pwm_lookup { + struct list_head list; + const char *provider; + unsigned int index; + const char *dev_id; + const char *con_id; + unsigned int period; + enum pwm_polarity polarity; + const char *module; /* optional, may be NULL */ +}; + +#define PWM_LOOKUP_WITH_MODULE(_provider, _index, _dev_id, _con_id, \ + _period, _polarity, _module) \ + { \ + .provider = _provider, \ + .index = _index, \ + .dev_id = _dev_id, \ + .con_id = _con_id, \ + .period = _period, \ + .polarity = _polarity, \ + .module = _module, \ + } + +#define PWM_LOOKUP(_provider, _index, _dev_id, _con_id, _period, _polarity) \ + PWM_LOOKUP_WITH_MODULE(_provider, _index, _dev_id, _con_id, _period, \ + _polarity, NULL) + +#if IS_ENABLED(CONFIG_PWM) +void pwm_add_table(struct pwm_lookup *table, size_t num); +void pwm_remove_table(struct pwm_lookup *table, size_t num); +#else +static inline void pwm_add_table(struct pwm_lookup *table, size_t num) +{ +} + +static inline void pwm_remove_table(struct pwm_lookup *table, size_t num) +{ +} +#endif + +#ifdef CONFIG_PWM_SYSFS +void pwmchip_sysfs_export(struct pwm_chip *chip); +void pwmchip_sysfs_unexport(struct pwm_chip *chip); +#else +static inline void pwmchip_sysfs_export(struct pwm_chip *chip) +{ +} + +static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip) +{ +} +#endif /* CONFIG_PWM_SYSFS */ + +#endif /* __LINUX_PWM_H */ diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h new file mode 100644 index 000000000..8ea265a02 --- /dev/null +++ b/include/linux/pwm_backlight.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Generic PWM backlight driver data - see drivers/video/backlight/pwm_bl.c + */ +#ifndef __LINUX_PWM_BACKLIGHT_H +#define __LINUX_PWM_BACKLIGHT_H + +#include + +struct platform_pwm_backlight_data { + int pwm_id; + unsigned int max_brightness; + unsigned int dft_brightness; + unsigned int lth_brightness; + unsigned int pwm_period_ns; + unsigned int *levels; + unsigned int post_pwm_on_delay; + unsigned int pwm_off_delay; + /* TODO remove once all users are switched to gpiod_* API */ + int enable_gpio; + int (*init)(struct device *dev); + int (*notify)(struct device *dev, int brightness); + void (*notify_after)(struct device *dev, int brightness); + void (*exit)(struct device *dev); + int (*check_fb)(struct device *dev, struct fb_info *info); +}; + +#endif diff --git a/include/linux/pxa168_eth.h b/include/linux/pxa168_eth.h new file mode 100644 index 000000000..fb09c2c7c --- /dev/null +++ b/include/linux/pxa168_eth.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + *pxa168 ethernet platform device data definition file. + */ +#ifndef __LINUX_PXA168_ETH_H +#define __LINUX_PXA168_ETH_H + +#include + +struct pxa168_eth_platform_data { + int port_number; + int phy_addr; + + /* + * If speed is 0, then speed and duplex are autonegotiated. + */ + int speed; /* 0, SPEED_10, SPEED_100 */ + int duplex; /* DUPLEX_HALF or DUPLEX_FULL */ + phy_interface_t intf; + + /* + * Override default RX/TX queue sizes if nonzero. + */ + int rx_queue_size; + int tx_queue_size; + + /* + * init callback is used for board specific initialization + * e.g on Aspenite its used to initialize the PHY transceiver. + */ + int (*init)(void); +}; + +#endif /* __LINUX_PXA168_ETH_H */ diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h new file mode 100644 index 000000000..13b4244d4 --- /dev/null +++ b/include/linux/pxa2xx_ssp.h @@ -0,0 +1,268 @@ +/* + * pxa2xx_ssp.h + * + * Copyright (C) 2003 Russell King, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This driver supports the following PXA CPU/SSP ports:- + * + * PXA250 SSP + * PXA255 SSP, NSSP + * PXA26x SSP, NSSP, ASSP + * PXA27x SSP1, SSP2, SSP3 + * PXA3xx SSP1, SSP2, SSP3, SSP4 + */ + +#ifndef __LINUX_SSP_H +#define __LINUX_SSP_H + +#include +#include +#include + + +/* + * SSP Serial Port Registers + * PXA250, PXA255, PXA26x and PXA27x SSP controllers are all slightly different. + * PXA255, PXA26x and PXA27x have extra ports, registers and bits. + */ + +#define SSCR0 (0x00) /* SSP Control Register 0 */ +#define SSCR1 (0x04) /* SSP Control Register 1 */ +#define SSSR (0x08) /* SSP Status Register */ +#define SSITR (0x0C) /* SSP Interrupt Test Register */ +#define SSDR (0x10) /* SSP Data Write/Data Read Register */ + +#define SSTO (0x28) /* SSP Time Out Register */ +#define DDS_RATE (0x28) /* SSP DDS Clock Rate Register (Intel Quark) */ +#define SSPSP (0x2C) /* SSP Programmable Serial Protocol */ +#define SSTSA (0x30) /* SSP Tx Timeslot Active */ +#define SSRSA (0x34) /* SSP Rx Timeslot Active */ +#define SSTSS (0x38) /* SSP Timeslot Status */ +#define SSACD (0x3C) /* SSP Audio Clock Divider */ +#define SSACDD (0x40) /* SSP Audio Clock Dither Divider */ + +/* Common PXA2xx bits first */ +#define SSCR0_DSS (0x0000000f) /* Data Size Select (mask) */ +#define SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..16] */ +#define SSCR0_FRF (0x00000030) /* FRame Format (mask) */ +#define SSCR0_Motorola (0x0 << 4) /* Motorola's Serial Peripheral Interface (SPI) */ +#define SSCR0_TI (0x1 << 4) /* Texas Instruments' Synchronous Serial Protocol (SSP) */ +#define SSCR0_National (0x2 << 4) /* National Microwire */ +#define SSCR0_ECS (1 << 6) /* External clock select */ +#define SSCR0_SSE (1 << 7) /* Synchronous Serial Port Enable */ +#define SSCR0_SCR(x) ((x) << 8) /* Serial Clock Rate (mask) */ + +/* PXA27x, PXA3xx */ +#define SSCR0_EDSS (1 << 20) /* Extended data size select */ +#define SSCR0_NCS (1 << 21) /* Network clock select */ +#define SSCR0_RIM (1 << 22) /* Receive FIFO overrrun interrupt mask */ +#define SSCR0_TUM (1 << 23) /* Transmit FIFO underrun interrupt mask */ +#define SSCR0_FRDC (0x07000000) /* Frame rate divider control (mask) */ +#define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24) /* Time slots per frame [1..8] */ +#define SSCR0_FPCKE (1 << 29) /* FIFO packing enable */ +#define SSCR0_ACS (1 << 30) /* Audio clock select */ +#define SSCR0_MOD (1 << 31) /* Mode (normal or network) */ + + +#define SSCR1_RIE (1 << 0) /* Receive FIFO Interrupt Enable */ +#define SSCR1_TIE (1 << 1) /* Transmit FIFO Interrupt Enable */ +#define SSCR1_LBM (1 << 2) /* Loop-Back Mode */ +#define SSCR1_SPO (1 << 3) /* Motorola SPI SSPSCLK polarity setting */ +#define SSCR1_SPH (1 << 4) /* Motorola SPI SSPSCLK phase setting */ +#define SSCR1_MWDS (1 << 5) /* Microwire Transmit Data Size */ + +#define SSSR_ALT_FRM_MASK 3 /* Masks the SFRM signal number */ +#define SSSR_TNF (1 << 2) /* Transmit FIFO Not Full */ +#define SSSR_RNE (1 << 3) /* Receive FIFO Not Empty */ +#define SSSR_BSY (1 << 4) /* SSP Busy */ +#define SSSR_TFS (1 << 5) /* Transmit FIFO Service Request */ +#define SSSR_RFS (1 << 6) /* Receive FIFO Service Request */ +#define SSSR_ROR (1 << 7) /* Receive FIFO Overrun */ + +#define RX_THRESH_DFLT 8 +#define TX_THRESH_DFLT 8 + +#define SSSR_TFL_MASK (0xf << 8) /* Transmit FIFO Level mask */ +#define SSSR_RFL_MASK (0xf << 12) /* Receive FIFO Level mask */ + +#define SSCR1_TFT (0x000003c0) /* Transmit FIFO Threshold (mask) */ +#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */ +#define SSCR1_RFT (0x00003c00) /* Receive FIFO Threshold (mask) */ +#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */ + +#define RX_THRESH_CE4100_DFLT 2 +#define TX_THRESH_CE4100_DFLT 2 + +#define CE4100_SSSR_TFL_MASK (0x3 << 8) /* Transmit FIFO Level mask */ +#define CE4100_SSSR_RFL_MASK (0x3 << 12) /* Receive FIFO Level mask */ + +#define CE4100_SSCR1_TFT (0x000000c0) /* Transmit FIFO Threshold (mask) */ +#define CE4100_SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..4] */ +#define CE4100_SSCR1_RFT (0x00000c00) /* Receive FIFO Threshold (mask) */ +#define CE4100_SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */ + +/* QUARK_X1000 SSCR0 bit definition */ +#define QUARK_X1000_SSCR0_DSS (0x1F << 0) /* Data Size Select (mask) */ +#define QUARK_X1000_SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..32] */ +#define QUARK_X1000_SSCR0_FRF (0x3 << 5) /* FRame Format (mask) */ +#define QUARK_X1000_SSCR0_Motorola (0x0 << 5) /* Motorola's Serial Peripheral Interface (SPI) */ + +#define RX_THRESH_QUARK_X1000_DFLT 1 +#define TX_THRESH_QUARK_X1000_DFLT 16 + +#define QUARK_X1000_SSSR_TFL_MASK (0x1F << 8) /* Transmit FIFO Level mask */ +#define QUARK_X1000_SSSR_RFL_MASK (0x1F << 13) /* Receive FIFO Level mask */ + +#define QUARK_X1000_SSCR1_TFT (0x1F << 6) /* Transmit FIFO Threshold (mask) */ +#define QUARK_X1000_SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..32] */ +#define QUARK_X1000_SSCR1_RFT (0x1F << 11) /* Receive FIFO Threshold (mask) */ +#define QUARK_X1000_SSCR1_RxTresh(x) (((x) - 1) << 11) /* level [1..32] */ +#define QUARK_X1000_SSCR1_STRF (1 << 17) /* Select FIFO or EFWR */ +#define QUARK_X1000_SSCR1_EFWR (1 << 16) /* Enable FIFO Write/Read */ + +/* extra bits in PXA255, PXA26x and PXA27x SSP ports */ +#define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */ +#define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */ +#define SSCR1_TTELP (1 << 31) /* TXD Tristate Enable Last Phase */ +#define SSCR1_TTE (1 << 30) /* TXD Tristate Enable */ +#define SSCR1_EBCEI (1 << 29) /* Enable Bit Count Error interrupt */ +#define SSCR1_SCFR (1 << 28) /* Slave Clock free Running */ +#define SSCR1_ECRA (1 << 27) /* Enable Clock Request A */ +#define SSCR1_ECRB (1 << 26) /* Enable Clock request B */ +#define SSCR1_SCLKDIR (1 << 25) /* Serial Bit Rate Clock Direction */ +#define SSCR1_SFRMDIR (1 << 24) /* Frame Direction */ +#define SSCR1_RWOT (1 << 23) /* Receive Without Transmit */ +#define SSCR1_TRAIL (1 << 22) /* Trailing Byte */ +#define SSCR1_TSRE (1 << 21) /* Transmit Service Request Enable */ +#define SSCR1_RSRE (1 << 20) /* Receive Service Request Enable */ +#define SSCR1_TINTE (1 << 19) /* Receiver Time-out Interrupt enable */ +#define SSCR1_PINTE (1 << 18) /* Peripheral Trailing Byte Interrupt Enable */ +#define SSCR1_IFS (1 << 16) /* Invert Frame Signal */ +#define SSCR1_STRF (1 << 15) /* Select FIFO or EFWR */ +#define SSCR1_EFWR (1 << 14) /* Enable FIFO Write/Read */ + +#define SSSR_BCE (1 << 23) /* Bit Count Error */ +#define SSSR_CSS (1 << 22) /* Clock Synchronisation Status */ +#define SSSR_TUR (1 << 21) /* Transmit FIFO Under Run */ +#define SSSR_EOC (1 << 20) /* End Of Chain */ +#define SSSR_TINT (1 << 19) /* Receiver Time-out Interrupt */ +#define SSSR_PINT (1 << 18) /* Peripheral Trailing Byte Interrupt */ + + +#define SSPSP_SCMODE(x) ((x) << 0) /* Serial Bit Rate Clock Mode */ +#define SSPSP_SFRMP (1 << 2) /* Serial Frame Polarity */ +#define SSPSP_ETDS (1 << 3) /* End of Transfer data State */ +#define SSPSP_STRTDLY(x) ((x) << 4) /* Start Delay */ +#define SSPSP_DMYSTRT(x) ((x) << 7) /* Dummy Start */ +#define SSPSP_SFRMDLY(x) ((x) << 9) /* Serial Frame Delay */ +#define SSPSP_SFRMWDTH(x) ((x) << 16) /* Serial Frame Width */ +#define SSPSP_DMYSTOP(x) ((x) << 23) /* Dummy Stop */ +#define SSPSP_FSRT (1 << 25) /* Frame Sync Relative Timing */ + +/* PXA3xx */ +#define SSPSP_EDMYSTRT(x) ((x) << 26) /* Extended Dummy Start */ +#define SSPSP_EDMYSTOP(x) ((x) << 28) /* Extended Dummy Stop */ +#define SSPSP_TIMING_MASK (0x7f8001f0) + +#define SSACD_SCDB (1 << 3) /* SSPSYSCLK Divider Bypass */ +#define SSACD_ACPS(x) ((x) << 4) /* Audio clock PLL select */ +#define SSACD_ACDS(x) ((x) << 0) /* Audio clock divider select */ +#define SSACD_ACDS_1 (0) +#define SSACD_ACDS_2 (1) +#define SSACD_ACDS_4 (2) +#define SSACD_ACDS_8 (3) +#define SSACD_ACDS_16 (4) +#define SSACD_ACDS_32 (5) +#define SSACD_SCDB_4X (0) +#define SSACD_SCDB_1X (1) +#define SSACD_SCDX8 (1 << 7) /* SYSCLK division ratio select */ + +/* LPSS SSP */ +#define SSITF 0x44 /* TX FIFO trigger level */ +#define SSITF_TxLoThresh(x) (((x) - 1) << 8) +#define SSITF_TxHiThresh(x) ((x) - 1) + +#define SSIRF 0x48 /* RX FIFO trigger level */ +#define SSIRF_RxThresh(x) ((x) - 1) + +enum pxa_ssp_type { + SSP_UNDEFINED = 0, + PXA25x_SSP, /* pxa 210, 250, 255, 26x */ + PXA25x_NSSP, /* pxa 255, 26x (including ASSP) */ + PXA27x_SSP, + PXA3xx_SSP, + PXA168_SSP, + PXA910_SSP, + CE4100_SSP, + QUARK_X1000_SSP, + LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */ + LPSS_BYT_SSP, + LPSS_BSW_SSP, + LPSS_SPT_SSP, + LPSS_BXT_SSP, + LPSS_CNL_SSP, +}; + +struct ssp_device { + struct platform_device *pdev; + struct list_head node; + + struct clk *clk; + void __iomem *mmio_base; + unsigned long phys_base; + + const char *label; + int port_id; + int type; + int use_count; + int irq; + + struct device_node *of_node; +}; + +/** + * pxa_ssp_write_reg - Write to a SSP register + * + * @dev: SSP device to access + * @reg: Register to write to + * @val: Value to be written. + */ +static inline void pxa_ssp_write_reg(struct ssp_device *dev, u32 reg, u32 val) +{ + __raw_writel(val, dev->mmio_base + reg); +} + +/** + * pxa_ssp_read_reg - Read from a SSP register + * + * @dev: SSP device to access + * @reg: Register to read from + */ +static inline u32 pxa_ssp_read_reg(struct ssp_device *dev, u32 reg) +{ + return __raw_readl(dev->mmio_base + reg); +} + +#if IS_ENABLED(CONFIG_PXA_SSP) +struct ssp_device *pxa_ssp_request(int port, const char *label); +void pxa_ssp_free(struct ssp_device *); +struct ssp_device *pxa_ssp_request_of(const struct device_node *of_node, + const char *label); +#else +static inline struct ssp_device *pxa_ssp_request(int port, const char *label) +{ + return NULL; +} +static inline struct ssp_device *pxa_ssp_request_of(const struct device_node *n, + const char *name) +{ + return NULL; +} +static inline void pxa_ssp_free(struct ssp_device *ssp) {} +#endif + +#endif diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h new file mode 100644 index 000000000..5d6144977 --- /dev/null +++ b/include/linux/qcom-geni-se.h @@ -0,0 +1,425 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _LINUX_QCOM_GENI_SE +#define _LINUX_QCOM_GENI_SE + +/* Transfer mode supported by GENI Serial Engines */ +enum geni_se_xfer_mode { + GENI_SE_INVALID, + GENI_SE_FIFO, + GENI_SE_DMA, +}; + +/* Protocols supported by GENI Serial Engines */ +enum geni_se_protocol_type { + GENI_SE_NONE, + GENI_SE_SPI, + GENI_SE_UART, + GENI_SE_I2C, + GENI_SE_I3C, +}; + +struct geni_wrapper; +struct clk; + +/** + * struct geni_se - GENI Serial Engine + * @base: Base Address of the Serial Engine's register block + * @dev: Pointer to the Serial Engine device + * @wrapper: Pointer to the parent QUP Wrapper core + * @clk: Handle to the core serial engine clock + * @num_clk_levels: Number of valid clock levels in clk_perf_tbl + * @clk_perf_tbl: Table of clock frequency input to serial engine clock + */ +struct geni_se { + void __iomem *base; + struct device *dev; + struct geni_wrapper *wrapper; + struct clk *clk; + unsigned int num_clk_levels; + unsigned long *clk_perf_tbl; +}; + +/* Common SE registers */ +#define GENI_FORCE_DEFAULT_REG 0x20 +#define SE_GENI_STATUS 0x40 +#define GENI_SER_M_CLK_CFG 0x48 +#define GENI_SER_S_CLK_CFG 0x4c +#define GENI_FW_REVISION_RO 0x68 +#define SE_GENI_CLK_SEL 0x7c +#define SE_GENI_DMA_MODE_EN 0x258 +#define SE_GENI_M_CMD0 0x600 +#define SE_GENI_M_CMD_CTRL_REG 0x604 +#define SE_GENI_M_IRQ_STATUS 0x610 +#define SE_GENI_M_IRQ_EN 0x614 +#define SE_GENI_M_IRQ_CLEAR 0x618 +#define SE_GENI_S_CMD0 0x630 +#define SE_GENI_S_CMD_CTRL_REG 0x634 +#define SE_GENI_S_IRQ_STATUS 0x640 +#define SE_GENI_S_IRQ_EN 0x644 +#define SE_GENI_S_IRQ_CLEAR 0x648 +#define SE_GENI_TX_FIFOn 0x700 +#define SE_GENI_RX_FIFOn 0x780 +#define SE_GENI_TX_FIFO_STATUS 0x800 +#define SE_GENI_RX_FIFO_STATUS 0x804 +#define SE_GENI_TX_WATERMARK_REG 0x80c +#define SE_GENI_RX_WATERMARK_REG 0x810 +#define SE_GENI_RX_RFR_WATERMARK_REG 0x814 +#define SE_GENI_IOS 0x908 +#define SE_DMA_TX_IRQ_STAT 0xc40 +#define SE_DMA_TX_IRQ_CLR 0xc44 +#define SE_DMA_TX_FSM_RST 0xc58 +#define SE_DMA_RX_IRQ_STAT 0xd40 +#define SE_DMA_RX_IRQ_CLR 0xd44 +#define SE_DMA_RX_FSM_RST 0xd58 +#define SE_HW_PARAM_0 0xe24 +#define SE_HW_PARAM_1 0xe28 + +/* GENI_FORCE_DEFAULT_REG fields */ +#define FORCE_DEFAULT BIT(0) + +/* GENI_STATUS fields */ +#define M_GENI_CMD_ACTIVE BIT(0) +#define S_GENI_CMD_ACTIVE BIT(12) + +/* GENI_SER_M_CLK_CFG/GENI_SER_S_CLK_CFG */ +#define SER_CLK_EN BIT(0) +#define CLK_DIV_MSK GENMASK(15, 4) +#define CLK_DIV_SHFT 4 + +/* GENI_FW_REVISION_RO fields */ +#define FW_REV_PROTOCOL_MSK GENMASK(15, 8) +#define FW_REV_PROTOCOL_SHFT 8 + +/* GENI_CLK_SEL fields */ +#define CLK_SEL_MSK GENMASK(2, 0) + +/* SE_GENI_DMA_MODE_EN */ +#define GENI_DMA_MODE_EN BIT(0) + +/* GENI_M_CMD0 fields */ +#define M_OPCODE_MSK GENMASK(31, 27) +#define M_OPCODE_SHFT 27 +#define M_PARAMS_MSK GENMASK(26, 0) + +/* GENI_M_CMD_CTRL_REG */ +#define M_GENI_CMD_CANCEL BIT(2) +#define M_GENI_CMD_ABORT BIT(1) +#define M_GENI_DISABLE BIT(0) + +/* GENI_S_CMD0 fields */ +#define S_OPCODE_MSK GENMASK(31, 27) +#define S_OPCODE_SHFT 27 +#define S_PARAMS_MSK GENMASK(26, 0) + +/* GENI_S_CMD_CTRL_REG */ +#define S_GENI_CMD_CANCEL BIT(2) +#define S_GENI_CMD_ABORT BIT(1) +#define S_GENI_DISABLE BIT(0) + +/* GENI_M_IRQ_EN fields */ +#define M_CMD_DONE_EN BIT(0) +#define M_CMD_OVERRUN_EN BIT(1) +#define M_ILLEGAL_CMD_EN BIT(2) +#define M_CMD_FAILURE_EN BIT(3) +#define M_CMD_CANCEL_EN BIT(4) +#define M_CMD_ABORT_EN BIT(5) +#define M_TIMESTAMP_EN BIT(6) +#define M_RX_IRQ_EN BIT(7) +#define M_GP_SYNC_IRQ_0_EN BIT(8) +#define M_GP_IRQ_0_EN BIT(9) +#define M_GP_IRQ_1_EN BIT(10) +#define M_GP_IRQ_2_EN BIT(11) +#define M_GP_IRQ_3_EN BIT(12) +#define M_GP_IRQ_4_EN BIT(13) +#define M_GP_IRQ_5_EN BIT(14) +#define M_IO_DATA_DEASSERT_EN BIT(22) +#define M_IO_DATA_ASSERT_EN BIT(23) +#define M_RX_FIFO_RD_ERR_EN BIT(24) +#define M_RX_FIFO_WR_ERR_EN BIT(25) +#define M_RX_FIFO_WATERMARK_EN BIT(26) +#define M_RX_FIFO_LAST_EN BIT(27) +#define M_TX_FIFO_RD_ERR_EN BIT(28) +#define M_TX_FIFO_WR_ERR_EN BIT(29) +#define M_TX_FIFO_WATERMARK_EN BIT(30) +#define M_SEC_IRQ_EN BIT(31) +#define M_COMMON_GENI_M_IRQ_EN (GENMASK(6, 1) | \ + M_IO_DATA_DEASSERT_EN | \ + M_IO_DATA_ASSERT_EN | M_RX_FIFO_RD_ERR_EN | \ + M_RX_FIFO_WR_ERR_EN | M_TX_FIFO_RD_ERR_EN | \ + M_TX_FIFO_WR_ERR_EN) + +/* GENI_S_IRQ_EN fields */ +#define S_CMD_DONE_EN BIT(0) +#define S_CMD_OVERRUN_EN BIT(1) +#define S_ILLEGAL_CMD_EN BIT(2) +#define S_CMD_FAILURE_EN BIT(3) +#define S_CMD_CANCEL_EN BIT(4) +#define S_CMD_ABORT_EN BIT(5) +#define S_GP_SYNC_IRQ_0_EN BIT(8) +#define S_GP_IRQ_0_EN BIT(9) +#define S_GP_IRQ_1_EN BIT(10) +#define S_GP_IRQ_2_EN BIT(11) +#define S_GP_IRQ_3_EN BIT(12) +#define S_GP_IRQ_4_EN BIT(13) +#define S_GP_IRQ_5_EN BIT(14) +#define S_IO_DATA_DEASSERT_EN BIT(22) +#define S_IO_DATA_ASSERT_EN BIT(23) +#define S_RX_FIFO_RD_ERR_EN BIT(24) +#define S_RX_FIFO_WR_ERR_EN BIT(25) +#define S_RX_FIFO_WATERMARK_EN BIT(26) +#define S_RX_FIFO_LAST_EN BIT(27) +#define S_COMMON_GENI_S_IRQ_EN (GENMASK(5, 1) | GENMASK(13, 9) | \ + S_RX_FIFO_RD_ERR_EN | S_RX_FIFO_WR_ERR_EN) + +/* GENI_/TX/RX/RX_RFR/_WATERMARK_REG fields */ +#define WATERMARK_MSK GENMASK(5, 0) + +/* GENI_TX_FIFO_STATUS fields */ +#define TX_FIFO_WC GENMASK(27, 0) + +/* GENI_RX_FIFO_STATUS fields */ +#define RX_LAST BIT(31) +#define RX_LAST_BYTE_VALID_MSK GENMASK(30, 28) +#define RX_LAST_BYTE_VALID_SHFT 28 +#define RX_FIFO_WC_MSK GENMASK(24, 0) + +/* SE_GENI_IOS fields */ +#define IO2_DATA_IN BIT(1) +#define RX_DATA_IN BIT(0) + +/* SE_DMA_TX_IRQ_STAT Register fields */ +#define TX_DMA_DONE BIT(0) +#define TX_EOT BIT(1) +#define TX_SBE BIT(2) +#define TX_RESET_DONE BIT(3) + +/* SE_DMA_RX_IRQ_STAT Register fields */ +#define RX_DMA_DONE BIT(0) +#define RX_EOT BIT(1) +#define RX_SBE BIT(2) +#define RX_RESET_DONE BIT(3) +#define RX_FLUSH_DONE BIT(4) +#define RX_GENI_GP_IRQ GENMASK(10, 5) +#define RX_GENI_CANCEL_IRQ BIT(11) +#define RX_GENI_GP_IRQ_EXT GENMASK(13, 12) + +/* SE_HW_PARAM_0 fields */ +#define TX_FIFO_WIDTH_MSK GENMASK(29, 24) +#define TX_FIFO_WIDTH_SHFT 24 +#define TX_FIFO_DEPTH_MSK GENMASK(21, 16) +#define TX_FIFO_DEPTH_SHFT 16 + +/* SE_HW_PARAM_1 fields */ +#define RX_FIFO_WIDTH_MSK GENMASK(29, 24) +#define RX_FIFO_WIDTH_SHFT 24 +#define RX_FIFO_DEPTH_MSK GENMASK(21, 16) +#define RX_FIFO_DEPTH_SHFT 16 + +#define HW_VER_MAJOR_MASK GENMASK(31, 28) +#define HW_VER_MAJOR_SHFT 28 +#define HW_VER_MINOR_MASK GENMASK(27, 16) +#define HW_VER_MINOR_SHFT 16 +#define HW_VER_STEP_MASK GENMASK(15, 0) + +#if IS_ENABLED(CONFIG_QCOM_GENI_SE) + +u32 geni_se_get_qup_hw_version(struct geni_se *se); + +#define geni_se_get_wrapper_version(se, major, minor, step) do { \ + u32 ver; \ +\ + ver = geni_se_get_qup_hw_version(se); \ + major = (ver & HW_VER_MAJOR_MASK) >> HW_VER_MAJOR_SHFT; \ + minor = (ver & HW_VER_MINOR_MASK) >> HW_VER_MINOR_SHFT; \ + step = version & HW_VER_STEP_MASK; \ +} while (0) + +/** + * geni_se_read_proto() - Read the protocol configured for a serial engine + * @se: Pointer to the concerned serial engine. + * + * Return: Protocol value as configured in the serial engine. + */ +static inline u32 geni_se_read_proto(struct geni_se *se) +{ + u32 val; + + val = readl_relaxed(se->base + GENI_FW_REVISION_RO); + + return (val & FW_REV_PROTOCOL_MSK) >> FW_REV_PROTOCOL_SHFT; +} + +/** + * geni_se_setup_m_cmd() - Setup the primary sequencer + * @se: Pointer to the concerned serial engine. + * @cmd: Command/Operation to setup in the primary sequencer. + * @params: Parameter for the sequencer command. + * + * This function is used to configure the primary sequencer with the + * command and its associated parameters. + */ +static inline void geni_se_setup_m_cmd(struct geni_se *se, u32 cmd, u32 params) +{ + u32 m_cmd; + + m_cmd = (cmd << M_OPCODE_SHFT) | (params & M_PARAMS_MSK); + writel_relaxed(m_cmd, se->base + SE_GENI_M_CMD0); +} + +/** + * geni_se_setup_s_cmd() - Setup the secondary sequencer + * @se: Pointer to the concerned serial engine. + * @cmd: Command/Operation to setup in the secondary sequencer. + * @params: Parameter for the sequencer command. + * + * This function is used to configure the secondary sequencer with the + * command and its associated parameters. + */ +static inline void geni_se_setup_s_cmd(struct geni_se *se, u32 cmd, u32 params) +{ + u32 s_cmd; + + s_cmd = readl_relaxed(se->base + SE_GENI_S_CMD0); + s_cmd &= ~(S_OPCODE_MSK | S_PARAMS_MSK); + s_cmd |= (cmd << S_OPCODE_SHFT); + s_cmd |= (params & S_PARAMS_MSK); + writel_relaxed(s_cmd, se->base + SE_GENI_S_CMD0); +} + +/** + * geni_se_cancel_m_cmd() - Cancel the command configured in the primary + * sequencer + * @se: Pointer to the concerned serial engine. + * + * This function is used to cancel the currently configured command in the + * primary sequencer. + */ +static inline void geni_se_cancel_m_cmd(struct geni_se *se) +{ + writel_relaxed(M_GENI_CMD_CANCEL, se->base + SE_GENI_M_CMD_CTRL_REG); +} + +/** + * geni_se_cancel_s_cmd() - Cancel the command configured in the secondary + * sequencer + * @se: Pointer to the concerned serial engine. + * + * This function is used to cancel the currently configured command in the + * secondary sequencer. + */ +static inline void geni_se_cancel_s_cmd(struct geni_se *se) +{ + writel_relaxed(S_GENI_CMD_CANCEL, se->base + SE_GENI_S_CMD_CTRL_REG); +} + +/** + * geni_se_abort_m_cmd() - Abort the command configured in the primary sequencer + * @se: Pointer to the concerned serial engine. + * + * This function is used to force abort the currently configured command in the + * primary sequencer. + */ +static inline void geni_se_abort_m_cmd(struct geni_se *se) +{ + writel_relaxed(M_GENI_CMD_ABORT, se->base + SE_GENI_M_CMD_CTRL_REG); +} + +/** + * geni_se_abort_s_cmd() - Abort the command configured in the secondary + * sequencer + * @se: Pointer to the concerned serial engine. + * + * This function is used to force abort the currently configured command in the + * secondary sequencer. + */ +static inline void geni_se_abort_s_cmd(struct geni_se *se) +{ + writel_relaxed(S_GENI_CMD_ABORT, se->base + SE_GENI_S_CMD_CTRL_REG); +} + +/** + * geni_se_get_tx_fifo_depth() - Get the TX fifo depth of the serial engine + * @se: Pointer to the concerned serial engine. + * + * This function is used to get the depth i.e. number of elements in the + * TX fifo of the serial engine. + * + * Return: TX fifo depth in units of FIFO words. + */ +static inline u32 geni_se_get_tx_fifo_depth(struct geni_se *se) +{ + u32 val; + + val = readl_relaxed(se->base + SE_HW_PARAM_0); + + return (val & TX_FIFO_DEPTH_MSK) >> TX_FIFO_DEPTH_SHFT; +} + +/** + * geni_se_get_tx_fifo_width() - Get the TX fifo width of the serial engine + * @se: Pointer to the concerned serial engine. + * + * This function is used to get the width i.e. word size per element in the + * TX fifo of the serial engine. + * + * Return: TX fifo width in bits + */ +static inline u32 geni_se_get_tx_fifo_width(struct geni_se *se) +{ + u32 val; + + val = readl_relaxed(se->base + SE_HW_PARAM_0); + + return (val & TX_FIFO_WIDTH_MSK) >> TX_FIFO_WIDTH_SHFT; +} + +/** + * geni_se_get_rx_fifo_depth() - Get the RX fifo depth of the serial engine + * @se: Pointer to the concerned serial engine. + * + * This function is used to get the depth i.e. number of elements in the + * RX fifo of the serial engine. + * + * Return: RX fifo depth in units of FIFO words + */ +static inline u32 geni_se_get_rx_fifo_depth(struct geni_se *se) +{ + u32 val; + + val = readl_relaxed(se->base + SE_HW_PARAM_1); + + return (val & RX_FIFO_DEPTH_MSK) >> RX_FIFO_DEPTH_SHFT; +} + +void geni_se_init(struct geni_se *se, u32 rx_wm, u32 rx_rfr); + +void geni_se_select_mode(struct geni_se *se, enum geni_se_xfer_mode mode); + +void geni_se_config_packing(struct geni_se *se, int bpw, int pack_words, + bool msb_to_lsb, bool tx_cfg, bool rx_cfg); + +int geni_se_resources_off(struct geni_se *se); + +int geni_se_resources_on(struct geni_se *se); + +int geni_se_clk_tbl_get(struct geni_se *se, unsigned long **tbl); + +int geni_se_clk_freq_match(struct geni_se *se, unsigned long req_freq, + unsigned int *index, unsigned long *res_freq, + bool exact); + +int geni_se_tx_dma_prep(struct geni_se *se, void *buf, size_t len, + dma_addr_t *iova); + +int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len, + dma_addr_t *iova); + +void geni_se_tx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len); + +void geni_se_rx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len); +#endif +#endif diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h new file mode 100644 index 000000000..116b81ac4 --- /dev/null +++ b/include/linux/qcom_scm.h @@ -0,0 +1,107 @@ +/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __QCOM_SCM_H +#define __QCOM_SCM_H + +#include +#include + +#define QCOM_SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF)) +#define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0 +#define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1 +#define QCOM_SCM_HDCP_MAX_REQ_CNT 5 + +struct qcom_scm_hdcp_req { + u32 addr; + u32 val; +}; + +struct qcom_scm_vmperm { + int vmid; + int perm; +}; + +#define QCOM_SCM_VMID_HLOS 0x3 +#define QCOM_SCM_VMID_MSS_MSA 0xF +#define QCOM_SCM_PERM_READ 0x4 +#define QCOM_SCM_PERM_WRITE 0x2 +#define QCOM_SCM_PERM_EXEC 0x1 +#define QCOM_SCM_PERM_RW (QCOM_SCM_PERM_READ | QCOM_SCM_PERM_WRITE) +#define QCOM_SCM_PERM_RWX (QCOM_SCM_PERM_RW | QCOM_SCM_PERM_EXEC) + +#if IS_ENABLED(CONFIG_QCOM_SCM) +extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus); +extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus); +extern bool qcom_scm_is_available(void); +extern bool qcom_scm_hdcp_available(void); +extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, + u32 *resp); +extern bool qcom_scm_pas_supported(u32 peripheral); +extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, + size_t size); +extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, + phys_addr_t size); +extern int qcom_scm_pas_auth_and_reset(u32 peripheral); +extern int qcom_scm_pas_shutdown(u32 peripheral); +extern int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, + unsigned int *src, struct qcom_scm_vmperm *newvm, + int dest_cnt); +extern void qcom_scm_cpu_power_down(u32 flags); +extern u32 qcom_scm_get_version(void); +extern int qcom_scm_set_remote_state(u32 state, u32 id); +extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare); +extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size); +extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare); +extern int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val); +extern int qcom_scm_io_writel(phys_addr_t addr, unsigned int val); +#else + +#include + +static inline +int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) +{ + return -ENODEV; +} +static inline +int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) +{ + return -ENODEV; +} +static inline bool qcom_scm_is_available(void) { return false; } +static inline bool qcom_scm_hdcp_available(void) { return false; } +static inline int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, + u32 *resp) { return -ENODEV; } +static inline bool qcom_scm_pas_supported(u32 peripheral) { return false; } +static inline int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, + size_t size) { return -ENODEV; } +static inline int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, + phys_addr_t size) { return -ENODEV; } +static inline int +qcom_scm_pas_auth_and_reset(u32 peripheral) { return -ENODEV; } +static inline int qcom_scm_pas_shutdown(u32 peripheral) { return -ENODEV; } +static inline int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, + unsigned int *src, + struct qcom_scm_vmperm *newvm, + int dest_cnt) { return -ENODEV; } +static inline void qcom_scm_cpu_power_down(u32 flags) {} +static inline u32 qcom_scm_get_version(void) { return 0; } +static inline u32 +qcom_scm_set_remote_state(u32 state,u32 id) { return -ENODEV; } +static inline int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) { return -ENODEV; } +static inline int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) { return -ENODEV; } +static inline int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) { return -ENODEV; } +static inline int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) { return -ENODEV; } +static inline int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) { return -ENODEV; } +#endif +#endif diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h new file mode 100644 index 000000000..0081fa6d1 --- /dev/null +++ b/include/linux/qed/common_hsi.h @@ -0,0 +1,1391 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2016 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _COMMON_HSI_H +#define _COMMON_HSI_H + +#include +#include +#include +#include + +/* dma_addr_t manip */ +#define PTR_LO(x) ((u32)(((uintptr_t)(x)) & 0xffffffff)) +#define PTR_HI(x) ((u32)((((uintptr_t)(x)) >> 16) >> 16)) +#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x)) +#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x)) +#define DMA_REGPAIR_LE(x, val) do { \ + (x).hi = DMA_HI_LE((val)); \ + (x).lo = DMA_LO_LE((val)); \ + } while (0) + +#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) +#define HILO_64(hi, lo) \ + HILO_GEN(le32_to_cpu(hi), le32_to_cpu(lo), u64) +#define HILO_64_REGPAIR(regpair) ({ \ + typeof(regpair) __regpair = (regpair); \ + HILO_64(__regpair.hi, __regpair.lo); }) +#define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair)) + +#ifndef __COMMON_HSI__ +#define __COMMON_HSI__ + +/********************************/ +/* PROTOCOL COMMON FW CONSTANTS */ +/********************************/ + +#define X_FINAL_CLEANUP_AGG_INT 1 + +#define EVENT_RING_PAGE_SIZE_BYTES 4096 + +#define NUM_OF_GLOBAL_QUEUES 128 +#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64 + +#define ISCSI_CDU_TASK_SEG_TYPE 0 +#define FCOE_CDU_TASK_SEG_TYPE 0 +#define RDMA_CDU_TASK_SEG_TYPE 1 + +#define FW_ASSERT_GENERAL_ATTN_IDX 32 + +#define MAX_PINNED_CCFC 32 + +/* Queue Zone sizes in bytes */ +#define TSTORM_QZONE_SIZE 8 +#define MSTORM_QZONE_SIZE 16 +#define USTORM_QZONE_SIZE 8 +#define XSTORM_QZONE_SIZE 8 +#define YSTORM_QZONE_SIZE 0 +#define PSTORM_QZONE_SIZE 0 + +#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7 +#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT 16 +#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE 48 +#define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD 112 + +/********************************/ +/* CORE (LIGHT L2) FW CONSTANTS */ +/********************************/ + +#define CORE_LL2_MAX_RAMROD_PER_CON 8 +#define CORE_LL2_TX_BD_PAGE_SIZE_BYTES 4096 +#define CORE_LL2_RX_BD_PAGE_SIZE_BYTES 4096 +#define CORE_LL2_RX_CQE_PAGE_SIZE_BYTES 4096 +#define CORE_LL2_RX_NUM_NEXT_PAGE_BDS 1 + +#define CORE_LL2_TX_MAX_BDS_PER_PACKET 12 + +#define CORE_SPQE_PAGE_SIZE_BYTES 4096 + +#define MAX_NUM_LL2_RX_QUEUES 48 +#define MAX_NUM_LL2_TX_STATS_COUNTERS 48 + +#define FW_MAJOR_VERSION 8 +#define FW_MINOR_VERSION 37 +#define FW_REVISION_VERSION 2 +#define FW_ENGINEERING_VERSION 0 + +/***********************/ +/* COMMON HW CONSTANTS */ +/***********************/ + +/* PCI functions */ +#define MAX_NUM_PORTS_K2 (4) +#define MAX_NUM_PORTS_BB (2) +#define MAX_NUM_PORTS (MAX_NUM_PORTS_K2) + +#define MAX_NUM_PFS_K2 (16) +#define MAX_NUM_PFS_BB (8) +#define MAX_NUM_PFS (MAX_NUM_PFS_K2) +#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */ + +#define MAX_NUM_VFS_K2 (192) +#define MAX_NUM_VFS_BB (120) +#define MAX_NUM_VFS (MAX_NUM_VFS_K2) + +#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB) +#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + MAX_NUM_VFS) + +#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB) +#define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS) + +#define MAX_NUM_VPORTS_K2 (208) +#define MAX_NUM_VPORTS_BB (160) +#define MAX_NUM_VPORTS (MAX_NUM_VPORTS_K2) + +#define MAX_NUM_L2_QUEUES_K2 (320) +#define MAX_NUM_L2_QUEUES_BB (256) +#define MAX_NUM_L2_QUEUES (MAX_NUM_L2_QUEUES_K2) + +/* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */ +#define NUM_PHYS_TCS_4PORT_K2 (4) +#define NUM_OF_PHYS_TCS (8) +#define PURE_LB_TC NUM_OF_PHYS_TCS +#define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1) +#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1) + +/* CIDs */ +#define NUM_OF_CONNECTION_TYPES_E4 (8) +#define NUM_OF_LCIDS (320) +#define NUM_OF_LTIDS (320) + +/* Global PXP windows (GTT) */ +#define NUM_OF_GTT 19 +#define GTT_DWORD_SIZE_BITS 10 +#define GTT_BYTE_SIZE_BITS (GTT_DWORD_SIZE_BITS + 2) +#define GTT_DWORD_SIZE BIT(GTT_DWORD_SIZE_BITS) + +/* Tools Version */ +#define TOOLS_VERSION 10 + +/*****************/ +/* CDU CONSTANTS */ +/*****************/ + +#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17) +#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff) + +#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12) +#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff) + +#define CDU_CONTEXT_VALIDATION_CFG_ENABLE_SHIFT (0) +#define CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT (1) +#define CDU_CONTEXT_VALIDATION_CFG_USE_TYPE (2) +#define CDU_CONTEXT_VALIDATION_CFG_USE_REGION (3) +#define CDU_CONTEXT_VALIDATION_CFG_USE_CID (4) +#define CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE (5) + +/*****************/ +/* DQ CONSTANTS */ +/*****************/ + +/* DEMS */ +#define DQ_DEMS_LEGACY 0 +#define DQ_DEMS_TOE_MORE_TO_SEND 3 +#define DQ_DEMS_TOE_LOCAL_ADV_WND 4 +#define DQ_DEMS_ROCE_CQ_CONS 7 + +/* XCM agg val selection (HW) */ +#define DQ_XCM_AGG_VAL_SEL_WORD2 0 +#define DQ_XCM_AGG_VAL_SEL_WORD3 1 +#define DQ_XCM_AGG_VAL_SEL_WORD4 2 +#define DQ_XCM_AGG_VAL_SEL_WORD5 3 +#define DQ_XCM_AGG_VAL_SEL_REG3 4 +#define DQ_XCM_AGG_VAL_SEL_REG4 5 +#define DQ_XCM_AGG_VAL_SEL_REG5 6 +#define DQ_XCM_AGG_VAL_SEL_REG6 7 + +/* XCM agg val selection (FW) */ +#define DQ_XCM_CORE_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_CORE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_CORE_SPQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD DQ_XCM_AGG_VAL_SEL_WORD2 +#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 +#define DQ_XCM_FCOE_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_FCOE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_FCOE_X_FERQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD5 +#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 +#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6 +#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 +#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4 + +/* UCM agg val selection (HW) */ +#define DQ_UCM_AGG_VAL_SEL_WORD0 0 +#define DQ_UCM_AGG_VAL_SEL_WORD1 1 +#define DQ_UCM_AGG_VAL_SEL_WORD2 2 +#define DQ_UCM_AGG_VAL_SEL_WORD3 3 +#define DQ_UCM_AGG_VAL_SEL_REG0 4 +#define DQ_UCM_AGG_VAL_SEL_REG1 5 +#define DQ_UCM_AGG_VAL_SEL_REG2 6 +#define DQ_UCM_AGG_VAL_SEL_REG3 7 + +/* UCM agg val selection (FW) */ +#define DQ_UCM_ETH_PMD_TX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD2 +#define DQ_UCM_ETH_PMD_RX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD3 +#define DQ_UCM_ROCE_CQ_CONS_CMD DQ_UCM_AGG_VAL_SEL_REG0 +#define DQ_UCM_ROCE_CQ_PROD_CMD DQ_UCM_AGG_VAL_SEL_REG2 + +/* TCM agg val selection (HW) */ +#define DQ_TCM_AGG_VAL_SEL_WORD0 0 +#define DQ_TCM_AGG_VAL_SEL_WORD1 1 +#define DQ_TCM_AGG_VAL_SEL_WORD2 2 +#define DQ_TCM_AGG_VAL_SEL_WORD3 3 +#define DQ_TCM_AGG_VAL_SEL_REG1 4 +#define DQ_TCM_AGG_VAL_SEL_REG2 5 +#define DQ_TCM_AGG_VAL_SEL_REG6 6 +#define DQ_TCM_AGG_VAL_SEL_REG9 7 + +/* TCM agg val selection (FW) */ +#define DQ_TCM_L2B_BD_PROD_CMD \ + DQ_TCM_AGG_VAL_SEL_WORD1 +#define DQ_TCM_ROCE_RQ_PROD_CMD \ + DQ_TCM_AGG_VAL_SEL_WORD0 + +/* XCM agg counter flag selection (HW) */ +#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0 +#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1 +#define DQ_XCM_AGG_FLG_SHIFT_CF12 2 +#define DQ_XCM_AGG_FLG_SHIFT_CF13 3 +#define DQ_XCM_AGG_FLG_SHIFT_CF18 4 +#define DQ_XCM_AGG_FLG_SHIFT_CF19 5 +#define DQ_XCM_AGG_FLG_SHIFT_CF22 6 +#define DQ_XCM_AGG_FLG_SHIFT_CF23 7 + +/* XCM agg counter flag selection (FW) */ +#define DQ_XCM_CORE_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18) +#define DQ_XCM_CORE_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_CORE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ETH_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18) +#define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) +#define DQ_XCM_FCOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) +#define DQ_XCM_TOE_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_TOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) + +/* UCM agg counter flag selection (HW) */ +#define DQ_UCM_AGG_FLG_SHIFT_CF0 0 +#define DQ_UCM_AGG_FLG_SHIFT_CF1 1 +#define DQ_UCM_AGG_FLG_SHIFT_CF3 2 +#define DQ_UCM_AGG_FLG_SHIFT_CF4 3 +#define DQ_UCM_AGG_FLG_SHIFT_CF5 4 +#define DQ_UCM_AGG_FLG_SHIFT_CF6 5 +#define DQ_UCM_AGG_FLG_SHIFT_RULE0EN 6 +#define DQ_UCM_AGG_FLG_SHIFT_RULE1EN 7 + +/* UCM agg counter flag selection (FW) */ +#define DQ_UCM_ETH_PMD_TX_ARM_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4) +#define DQ_UCM_ETH_PMD_RX_ARM_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5) +#define DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4) +#define DQ_UCM_ROCE_CQ_ARM_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5) +#define DQ_UCM_TOE_TIMER_STOP_ALL_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF3) +#define DQ_UCM_TOE_SLOW_PATH_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4) +#define DQ_UCM_TOE_DQ_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5) + +/* TCM agg counter flag selection (HW) */ +#define DQ_TCM_AGG_FLG_SHIFT_CF0 0 +#define DQ_TCM_AGG_FLG_SHIFT_CF1 1 +#define DQ_TCM_AGG_FLG_SHIFT_CF2 2 +#define DQ_TCM_AGG_FLG_SHIFT_CF3 3 +#define DQ_TCM_AGG_FLG_SHIFT_CF4 4 +#define DQ_TCM_AGG_FLG_SHIFT_CF5 5 +#define DQ_TCM_AGG_FLG_SHIFT_CF6 6 +#define DQ_TCM_AGG_FLG_SHIFT_CF7 7 +/* TCM agg counter flag selection (FW) */ +#define DQ_TCM_FCOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) +#define DQ_TCM_FCOE_DUMMY_TIMER_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF2) +#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) +#define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) +#define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) +#define DQ_TCM_TOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) +#define DQ_TCM_TOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) +#define DQ_TCM_IWARP_POST_RQ_CF_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) + +/* PWM address mapping */ +#define DQ_PWM_OFFSET_DPM_BASE 0x0 +#define DQ_PWM_OFFSET_DPM_END 0x27 +#define DQ_PWM_OFFSET_XCM16_BASE 0x40 +#define DQ_PWM_OFFSET_XCM32_BASE 0x44 +#define DQ_PWM_OFFSET_UCM16_BASE 0x48 +#define DQ_PWM_OFFSET_UCM32_BASE 0x4C +#define DQ_PWM_OFFSET_UCM16_4 0x50 +#define DQ_PWM_OFFSET_TCM16_BASE 0x58 +#define DQ_PWM_OFFSET_TCM32_BASE 0x5C +#define DQ_PWM_OFFSET_XCM_FLAGS 0x68 +#define DQ_PWM_OFFSET_UCM_FLAGS 0x69 +#define DQ_PWM_OFFSET_TCM_FLAGS 0x6B + +#define DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD (DQ_PWM_OFFSET_XCM16_BASE + 2) +#define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT (DQ_PWM_OFFSET_UCM32_BASE) +#define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_16BIT (DQ_PWM_OFFSET_UCM16_4) +#define DQ_PWM_OFFSET_UCM_RDMA_INT_TIMEOUT (DQ_PWM_OFFSET_UCM16_BASE + 2) +#define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS (DQ_PWM_OFFSET_UCM_FLAGS) +#define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1) +#define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3) + +#define DQ_REGION_SHIFT (12) + +/* DPM */ +#define DQ_DPM_WQE_BUFF_SIZE (320) + +/* Conn type ranges */ +#define DQ_CONN_TYPE_RANGE_SHIFT (4) + +/*****************/ +/* QM CONSTANTS */ +/*****************/ + +/* Number of TX queues in the QM */ +#define MAX_QM_TX_QUEUES_K2 512 +#define MAX_QM_TX_QUEUES_BB 448 +#define MAX_QM_TX_QUEUES MAX_QM_TX_QUEUES_K2 + +/* Number of Other queues in the QM */ +#define MAX_QM_OTHER_QUEUES_BB 64 +#define MAX_QM_OTHER_QUEUES_K2 128 +#define MAX_QM_OTHER_QUEUES MAX_QM_OTHER_QUEUES_K2 + +/* Number of queues in a PF queue group */ +#define QM_PF_QUEUE_GROUP_SIZE 8 + +/* The size of a single queue element in bytes */ +#define QM_PQ_ELEMENT_SIZE 4 + +/* Base number of Tx PQs in the CM PQ representation. + * Should be used when storing PQ IDs in CM PQ registers and context. + */ +#define CM_TX_PQ_BASE 0x200 + +/* Number of global Vport/QCN rate limiters */ +#define MAX_QM_GLOBAL_RLS 256 + +/* QM registers data */ +#define QM_LINE_CRD_REG_WIDTH 16 +#define QM_LINE_CRD_REG_SIGN_BIT BIT((QM_LINE_CRD_REG_WIDTH - 1)) +#define QM_BYTE_CRD_REG_WIDTH 24 +#define QM_BYTE_CRD_REG_SIGN_BIT BIT((QM_BYTE_CRD_REG_WIDTH - 1)) +#define QM_WFQ_CRD_REG_WIDTH 32 +#define QM_WFQ_CRD_REG_SIGN_BIT BIT((QM_WFQ_CRD_REG_WIDTH - 1)) +#define QM_RL_CRD_REG_WIDTH 32 +#define QM_RL_CRD_REG_SIGN_BIT BIT((QM_RL_CRD_REG_WIDTH - 1)) + +/*****************/ +/* CAU CONSTANTS */ +/*****************/ + +#define CAU_FSM_ETH_RX 0 +#define CAU_FSM_ETH_TX 1 + +/* Number of Protocol Indices per Status Block */ +#define PIS_PER_SB_E4 12 + +#define CAU_HC_STOPPED_STATE 3 +#define CAU_HC_DISABLE_STATE 4 +#define CAU_HC_ENABLE_STATE 0 + +/*****************/ +/* IGU CONSTANTS */ +/*****************/ + +#define MAX_SB_PER_PATH_K2 (368) +#define MAX_SB_PER_PATH_BB (288) +#define MAX_TOT_SB_PER_PATH \ + MAX_SB_PER_PATH_K2 + +#define MAX_SB_PER_PF_MIMD 129 +#define MAX_SB_PER_PF_SIMD 64 +#define MAX_SB_PER_VF 64 + +/* Memory addresses on the BAR for the IGU Sub Block */ +#define IGU_MEM_BASE 0x0000 + +#define IGU_MEM_MSIX_BASE 0x0000 +#define IGU_MEM_MSIX_UPPER 0x0101 +#define IGU_MEM_MSIX_RESERVED_UPPER 0x01ff + +#define IGU_MEM_PBA_MSIX_BASE 0x0200 +#define IGU_MEM_PBA_MSIX_UPPER 0x0202 +#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff + +#define IGU_CMD_INT_ACK_BASE 0x0400 +#define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \ + MAX_TOT_SB_PER_PATH - 1) +#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff + +#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0 +#define IGU_CMD_ATTN_BIT_SET_UPPER 0x05f1 +#define IGU_CMD_ATTN_BIT_CLR_UPPER 0x05f2 + +#define IGU_REG_SISR_MDPC_WMASK_UPPER 0x05f3 +#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER 0x05f4 +#define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER 0x05f5 +#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05f6 + +#define IGU_CMD_PROD_UPD_BASE 0x0600 +#define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\ + MAX_TOT_SB_PER_PATH - 1) +#define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff + +/*****************/ +/* PXP CONSTANTS */ +/*****************/ + +/* Bars for Blocks */ +#define PXP_BAR_GRC 0 +#define PXP_BAR_TSDM 0 +#define PXP_BAR_USDM 0 +#define PXP_BAR_XSDM 0 +#define PXP_BAR_MSDM 0 +#define PXP_BAR_YSDM 0 +#define PXP_BAR_PSDM 0 +#define PXP_BAR_IGU 0 +#define PXP_BAR_DQ 1 + +/* PTT and GTT */ +#define PXP_PER_PF_ENTRY_SIZE 8 +#define PXP_NUM_GLOBAL_WINDOWS 243 +#define PXP_GLOBAL_ENTRY_SIZE 4 +#define PXP_ADMIN_WINDOW_ALLOWED_LENGTH 4 +#define PXP_PF_WINDOW_ADMIN_START 0 +#define PXP_PF_WINDOW_ADMIN_LENGTH 0x1000 +#define PXP_PF_WINDOW_ADMIN_END (PXP_PF_WINDOW_ADMIN_START + \ + PXP_PF_WINDOW_ADMIN_LENGTH - 1) +#define PXP_PF_WINDOW_ADMIN_PER_PF_START 0 +#define PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH (PXP_NUM_PF_WINDOWS * \ + PXP_PER_PF_ENTRY_SIZE) +#define PXP_PF_WINDOW_ADMIN_PER_PF_END (PXP_PF_WINDOW_ADMIN_PER_PF_START + \ + PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH - 1) +#define PXP_PF_WINDOW_ADMIN_GLOBAL_START 0x200 +#define PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH (PXP_NUM_GLOBAL_WINDOWS * \ + PXP_GLOBAL_ENTRY_SIZE) +#define PXP_PF_WINDOW_ADMIN_GLOBAL_END \ + (PXP_PF_WINDOW_ADMIN_GLOBAL_START + \ + PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH - 1) +#define PXP_PF_GLOBAL_PRETEND_ADDR 0x1f0 +#define PXP_PF_ME_OPAQUE_MASK_ADDR 0xf4 +#define PXP_PF_ME_OPAQUE_ADDR 0x1f8 +#define PXP_PF_ME_CONCRETE_ADDR 0x1fc + +#define PXP_NUM_PF_WINDOWS 12 +#define PXP_EXTERNAL_BAR_PF_WINDOW_START 0x1000 +#define PXP_EXTERNAL_BAR_PF_WINDOW_NUM PXP_NUM_PF_WINDOWS +#define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE 0x1000 +#define PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH \ + (PXP_EXTERNAL_BAR_PF_WINDOW_NUM * \ + PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) +#define PXP_EXTERNAL_BAR_PF_WINDOW_END \ + (PXP_EXTERNAL_BAR_PF_WINDOW_START + \ + PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH - 1) + +#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START \ + (PXP_EXTERNAL_BAR_PF_WINDOW_END + 1) +#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM PXP_NUM_GLOBAL_WINDOWS +#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE 0x1000 +#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH \ + (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM * \ + PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE) +#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_END \ + (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \ + PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1) + +/* PF BAR */ +#define PXP_BAR0_START_GRC 0x0000 +#define PXP_BAR0_GRC_LENGTH 0x1C00000 +#define PXP_BAR0_END_GRC (PXP_BAR0_START_GRC + \ + PXP_BAR0_GRC_LENGTH - 1) + +#define PXP_BAR0_START_IGU 0x1C00000 +#define PXP_BAR0_IGU_LENGTH 0x10000 +#define PXP_BAR0_END_IGU (PXP_BAR0_START_IGU + \ + PXP_BAR0_IGU_LENGTH - 1) + +#define PXP_BAR0_START_TSDM 0x1C80000 +#define PXP_BAR0_SDM_LENGTH 0x40000 +#define PXP_BAR0_SDM_RESERVED_LENGTH 0x40000 +#define PXP_BAR0_END_TSDM (PXP_BAR0_START_TSDM + \ + PXP_BAR0_SDM_LENGTH - 1) + +#define PXP_BAR0_START_MSDM 0x1D00000 +#define PXP_BAR0_END_MSDM (PXP_BAR0_START_MSDM + \ + PXP_BAR0_SDM_LENGTH - 1) + +#define PXP_BAR0_START_USDM 0x1D80000 +#define PXP_BAR0_END_USDM (PXP_BAR0_START_USDM + \ + PXP_BAR0_SDM_LENGTH - 1) + +#define PXP_BAR0_START_XSDM 0x1E00000 +#define PXP_BAR0_END_XSDM (PXP_BAR0_START_XSDM + \ + PXP_BAR0_SDM_LENGTH - 1) + +#define PXP_BAR0_START_YSDM 0x1E80000 +#define PXP_BAR0_END_YSDM (PXP_BAR0_START_YSDM + \ + PXP_BAR0_SDM_LENGTH - 1) + +#define PXP_BAR0_START_PSDM 0x1F00000 +#define PXP_BAR0_END_PSDM (PXP_BAR0_START_PSDM + \ + PXP_BAR0_SDM_LENGTH - 1) + +#define PXP_BAR0_FIRST_INVALID_ADDRESS (PXP_BAR0_END_PSDM + 1) + +/* VF BAR */ +#define PXP_VF_BAR0 0 + +#define PXP_VF_BAR0_START_IGU 0 +#define PXP_VF_BAR0_IGU_LENGTH 0x3000 +#define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \ + PXP_VF_BAR0_IGU_LENGTH - 1) + +#define PXP_VF_BAR0_START_DQ 0x3000 +#define PXP_VF_BAR0_DQ_LENGTH 0x200 +#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0 +#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS (PXP_VF_BAR0_START_DQ + \ + PXP_VF_BAR0_DQ_OPAQUE_OFFSET) +#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \ + + 4) +#define PXP_VF_BAR0_END_DQ (PXP_VF_BAR0_START_DQ + \ + PXP_VF_BAR0_DQ_LENGTH - 1) + +#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200 +#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200 +#define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400 +#define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600 +#define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800 +#define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00 +#define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00 +#define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_GRC 0x3E00 +#define PXP_VF_BAR0_GRC_LENGTH 0x200 +#define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + \ + PXP_VF_BAR0_GRC_LENGTH - 1) + +#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000 +#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000 + +#define PXP_VF_BAR0_START_IGU2 0x10000 +#define PXP_VF_BAR0_IGU2_LENGTH 0xD000 +#define PXP_VF_BAR0_END_IGU2 (PXP_VF_BAR0_START_IGU2 + \ + PXP_VF_BAR0_IGU2_LENGTH - 1) + +#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32 + +#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12 +#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024 + +/* ILT Records */ +#define PXP_NUM_ILT_RECORDS_BB 7600 +#define PXP_NUM_ILT_RECORDS_K2 11000 +#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2) + +/* Host Interface */ +#define PXP_QUEUES_ZONE_MAX_NUM 320 + +/*****************/ +/* PRM CONSTANTS */ +/*****************/ +#define PRM_DMA_PAD_BYTES_NUM 2 + +/*****************/ +/* SDMs CONSTANTS */ +/*****************/ + +#define SDM_OP_GEN_TRIG_NONE 0 +#define SDM_OP_GEN_TRIG_WAKE_THREAD 1 +#define SDM_OP_GEN_TRIG_AGG_INT 2 +#define SDM_OP_GEN_TRIG_LOADER 4 +#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6 +#define SDM_OP_GEN_TRIG_INC_ORDER_CNT 9 + +/********************/ +/* Completion types */ +/********************/ + +#define SDM_COMP_TYPE_NONE 0 +#define SDM_COMP_TYPE_WAKE_THREAD 1 +#define SDM_COMP_TYPE_AGG_INT 2 +#define SDM_COMP_TYPE_CM 3 +#define SDM_COMP_TYPE_LOADER 4 +#define SDM_COMP_TYPE_PXP 5 +#define SDM_COMP_TYPE_INDICATE_ERROR 6 +#define SDM_COMP_TYPE_RELEASE_THREAD 7 +#define SDM_COMP_TYPE_RAM 8 +#define SDM_COMP_TYPE_INC_ORDER_CNT 9 + +/*****************/ +/* PBF CONSTANTS */ +/*****************/ + +/* Number of PBF command queue lines. Each line is 32B. */ +#define PBF_MAX_CMD_LINES 3328 + +/* Number of BTB blocks. Each block is 256B. */ +#define BTB_MAX_BLOCKS 1440 + +/*****************/ +/* PRS CONSTANTS */ +/*****************/ + +#define PRS_GFT_CAM_LINES_NO_MATCH 31 + +/* Interrupt coalescing TimeSet */ +struct coalescing_timeset { + u8 value; +#define COALESCING_TIMESET_TIMESET_MASK 0x7F +#define COALESCING_TIMESET_TIMESET_SHIFT 0 +#define COALESCING_TIMESET_VALID_MASK 0x1 +#define COALESCING_TIMESET_VALID_SHIFT 7 +}; + +struct common_queue_zone { + __le16 ring_drv_data_consumer; + __le16 reserved; +}; + +/* ETH Rx producers data */ +struct eth_rx_prod_data { + __le16 bd_prod; + __le16 cqe_prod; +}; + +struct tcp_ulp_connect_done_params { + __le16 mss; + u8 snd_wnd_scale; + u8 flags; +#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_MASK 0x1 +#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_SHIFT 0 +#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_MASK 0x7F +#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_SHIFT 1 +}; + +struct iscsi_connect_done_results { + __le16 icid; + __le16 conn_id; + struct tcp_ulp_connect_done_params params; +}; + +struct iscsi_eqe_data { + __le16 icid; + __le16 conn_id; + __le16 reserved; + u8 error_code; + u8 error_pdu_opcode_reserved; +#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F +#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_SHIFT 0 +#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_VALID_MASK 0x1 +#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_VALID_SHIFT 6 +#define ISCSI_EQE_DATA_RESERVED0_MASK 0x1 +#define ISCSI_EQE_DATA_RESERVED0_SHIFT 7 +}; + +/* Multi function mode */ +enum mf_mode { + ERROR_MODE /* Unsupported mode */, + MF_OVLAN, + MF_NPAR, + MAX_MF_MODE +}; + +/* Per-protocol connection types */ +enum protocol_type { + PROTOCOLID_ISCSI, + PROTOCOLID_FCOE, + PROTOCOLID_ROCE, + PROTOCOLID_CORE, + PROTOCOLID_ETH, + PROTOCOLID_IWARP, + PROTOCOLID_RESERVED0, + PROTOCOLID_PREROCE, + PROTOCOLID_COMMON, + PROTOCOLID_RESERVED1, + MAX_PROTOCOL_TYPE +}; + +struct regpair { + __le32 lo; + __le32 hi; +}; + +/* RoCE Destroy Event Data */ +struct rdma_eqe_destroy_qp { + __le32 cid; + u8 reserved[4]; +}; + +/* RDMA Event Data Union */ +union rdma_eqe_data { + struct regpair async_handle; + struct rdma_eqe_destroy_qp rdma_destroy_qp_data; +}; + +/* Ustorm Queue Zone */ +struct ustorm_eth_queue_zone { + struct coalescing_timeset int_coalescing_timeset; + u8 reserved[3]; +}; + +struct ustorm_queue_zone { + struct ustorm_eth_queue_zone eth; + struct common_queue_zone common; +}; + +/* Status block structure */ +struct cau_pi_entry { + __le32 prod; +#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF +#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0 +#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F +#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16 +#define CAU_PI_ENTRY_FSM_SEL_MASK 0x1 +#define CAU_PI_ENTRY_FSM_SEL_SHIFT 23 +#define CAU_PI_ENTRY_RESERVED_MASK 0xFF +#define CAU_PI_ENTRY_RESERVED_SHIFT 24 +}; + +/* Status block structure */ +struct cau_sb_entry { + __le32 data; +#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF +#define CAU_SB_ENTRY_SB_PROD_SHIFT 0 +#define CAU_SB_ENTRY_STATE0_MASK 0xF +#define CAU_SB_ENTRY_STATE0_SHIFT 24 +#define CAU_SB_ENTRY_STATE1_MASK 0xF +#define CAU_SB_ENTRY_STATE1_SHIFT 28 + __le32 params; +#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F +#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0 +#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F +#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7 +#define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3 +#define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14 +#define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3 +#define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16 +#define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF +#define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18 +#define CAU_SB_ENTRY_VF_VALID_MASK 0x1 +#define CAU_SB_ENTRY_VF_VALID_SHIFT 26 +#define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF +#define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27 +#define CAU_SB_ENTRY_TPH_MASK 0x1 +#define CAU_SB_ENTRY_TPH_SHIFT 31 +}; + +/* Igu cleanup bit values to distinguish between clean or producer consumer + * update. + */ +enum command_type_bit { + IGU_COMMAND_TYPE_NOP = 0, + IGU_COMMAND_TYPE_SET = 1, + MAX_COMMAND_TYPE_BIT +}; + +/* Core doorbell data */ +struct core_db_data { + u8 params; +#define CORE_DB_DATA_DEST_MASK 0x3 +#define CORE_DB_DATA_DEST_SHIFT 0 +#define CORE_DB_DATA_AGG_CMD_MASK 0x3 +#define CORE_DB_DATA_AGG_CMD_SHIFT 2 +#define CORE_DB_DATA_BYPASS_EN_MASK 0x1 +#define CORE_DB_DATA_BYPASS_EN_SHIFT 4 +#define CORE_DB_DATA_RESERVED_MASK 0x1 +#define CORE_DB_DATA_RESERVED_SHIFT 5 +#define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; + __le16 spq_prod; +}; + +/* Enum of doorbell aggregative command selection */ +enum db_agg_cmd_sel { + DB_AGG_CMD_NOP, + DB_AGG_CMD_SET, + DB_AGG_CMD_ADD, + DB_AGG_CMD_MAX, + MAX_DB_AGG_CMD_SEL +}; + +/* Enum of doorbell destination */ +enum db_dest { + DB_DEST_XCM, + DB_DEST_UCM, + DB_DEST_TCM, + DB_NUM_DESTINATIONS, + MAX_DB_DEST +}; + +/* Enum of doorbell DPM types */ +enum db_dpm_type { + DPM_LEGACY, + DPM_RDMA, + DPM_L2_INLINE, + DPM_L2_BD, + MAX_DB_DPM_TYPE +}; + +/* Structure for doorbell data, in L2 DPM mode, for 1st db in a DPM burst */ +struct db_l2_dpm_data { + __le16 icid; + __le16 bd_prod; + __le32 params; +#define DB_L2_DPM_DATA_SIZE_MASK 0x3F +#define DB_L2_DPM_DATA_SIZE_SHIFT 0 +#define DB_L2_DPM_DATA_DPM_TYPE_MASK 0x3 +#define DB_L2_DPM_DATA_DPM_TYPE_SHIFT 6 +#define DB_L2_DPM_DATA_NUM_BDS_MASK 0xFF +#define DB_L2_DPM_DATA_NUM_BDS_SHIFT 8 +#define DB_L2_DPM_DATA_PKT_SIZE_MASK 0x7FF +#define DB_L2_DPM_DATA_PKT_SIZE_SHIFT 16 +#define DB_L2_DPM_DATA_RESERVED0_MASK 0x1 +#define DB_L2_DPM_DATA_RESERVED0_SHIFT 27 +#define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7 +#define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28 +#define DB_L2_DPM_DATA_GFS_SRC_EN_MASK 0x1 +#define DB_L2_DPM_DATA_GFS_SRC_EN_SHIFT 31 +}; + +/* Structure for SGE in a DPM doorbell of type DPM_L2_BD */ +struct db_l2_dpm_sge { + struct regpair addr; + __le16 nbytes; + __le16 bitfields; +#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK 0x1FF +#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0 +#define DB_L2_DPM_SGE_RESERVED0_MASK 0x3 +#define DB_L2_DPM_SGE_RESERVED0_SHIFT 9 +#define DB_L2_DPM_SGE_ST_VALID_MASK 0x1 +#define DB_L2_DPM_SGE_ST_VALID_SHIFT 11 +#define DB_L2_DPM_SGE_RESERVED1_MASK 0xF +#define DB_L2_DPM_SGE_RESERVED1_SHIFT 12 + __le32 reserved2; +}; + +/* Structure for doorbell address, in legacy mode */ +struct db_legacy_addr { + __le32 addr; +#define DB_LEGACY_ADDR_RESERVED0_MASK 0x3 +#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0 +#define DB_LEGACY_ADDR_DEMS_MASK 0x7 +#define DB_LEGACY_ADDR_DEMS_SHIFT 2 +#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF +#define DB_LEGACY_ADDR_ICID_SHIFT 5 +}; + +/* Structure for doorbell address, in PWM mode */ +struct db_pwm_addr { + __le32 addr; +#define DB_PWM_ADDR_RESERVED0_MASK 0x7 +#define DB_PWM_ADDR_RESERVED0_SHIFT 0 +#define DB_PWM_ADDR_OFFSET_MASK 0x7F +#define DB_PWM_ADDR_OFFSET_SHIFT 3 +#define DB_PWM_ADDR_WID_MASK 0x3 +#define DB_PWM_ADDR_WID_SHIFT 10 +#define DB_PWM_ADDR_DPI_MASK 0xFFFF +#define DB_PWM_ADDR_DPI_SHIFT 12 +#define DB_PWM_ADDR_RESERVED1_MASK 0xF +#define DB_PWM_ADDR_RESERVED1_SHIFT 28 +}; + +/* Parameters to RDMA firmware, passed in EDPM doorbell */ +struct db_rdma_dpm_params { + __le32 params; +#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F +#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0 +#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3 +#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6 +#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF +#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8 +#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF +#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16 +#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27 +#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28 +#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29 +#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30 +#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31 +}; + +/* Structure for doorbell data, in RDMA DPM mode, for the first doorbell in a + * DPM burst. + */ +struct db_rdma_dpm_data { + __le16 icid; + __le16 prod_val; + struct db_rdma_dpm_params params; +}; + +/* Igu interrupt command */ +enum igu_int_cmd { + IGU_INT_ENABLE = 0, + IGU_INT_DISABLE = 1, + IGU_INT_NOP = 2, + IGU_INT_NOP2 = 3, + MAX_IGU_INT_CMD +}; + +/* IGU producer or consumer update command */ +struct igu_prod_cons_update { + __le32 sb_id_and_flags; +#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF +#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0 +#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24 +#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3 +#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25 +#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27 +#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28 +#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3 +#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29 +#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31 + __le32 reserved1; +}; + +/* Igu segments access for default status block only */ +enum igu_seg_access { + IGU_SEG_ACCESS_REG = 0, + IGU_SEG_ACCESS_ATTN = 1, + MAX_IGU_SEG_ACCESS +}; + +/* Enumeration for L3 type field of parsing_and_err_flags. + * L3Type: 0 - unknown (not ip), 1 - Ipv4, 2 - Ipv6 + * (This field can be filled according to the last-ethertype) + */ +enum l3_type { + e_l3_type_unknown, + e_l3_type_ipv4, + e_l3_type_ipv6, + MAX_L3_TYPE +}; + +/* Enumeration for l4Protocol field of parsing_and_err_flags. + * L4-protocol: 0 - none, 1 - TCP, 2 - UDP. + * If the packet is IPv4 fragment, and its not the first fragment, the + * protocol-type should be set to none. + */ +enum l4_protocol { + e_l4_protocol_none, + e_l4_protocol_tcp, + e_l4_protocol_udp, + MAX_L4_PROTOCOL +}; + +/* Parsing and error flags field */ +struct parsing_and_err_flags { + __le16 flags; +#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3 +#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0 +#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3 +#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2 +#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4 +#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5 +#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6 +#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7 +#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8 +#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9 +#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10 +#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11 +#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12 +#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15 +}; + +/* Parsing error flags bitmap */ +struct parsing_err_flags { + __le16 flags; +#define PARSING_ERR_FLAGS_MAC_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_MAC_ERROR_SHIFT 0 +#define PARSING_ERR_FLAGS_TRUNC_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_TRUNC_ERROR_SHIFT 1 +#define PARSING_ERR_FLAGS_PKT_TOO_SMALL_MASK 0x1 +#define PARSING_ERR_FLAGS_PKT_TOO_SMALL_SHIFT 2 +#define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_SHIFT 3 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_SHIFT 4 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_SHIFT 5 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_SHIFT 6 +#define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_SHIFT 7 +#define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_SHIFT 8 +#define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_MASK 0x1 +#define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_SHIFT 9 +#define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_SHIFT 10 +#define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_SHIFT 11 +#define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_MASK 0x1 +#define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_SHIFT 12 +#define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_MASK 0x1 +#define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_SHIFT 13 +#define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_SHIFT 14 +#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT 15 +}; + +/* Pb context */ +struct pb_context { + __le32 crc[4]; +}; + +/* Concrete Function ID */ +struct pxp_concrete_fid { + __le16 fid; +#define PXP_CONCRETE_FID_PFID_MASK 0xF +#define PXP_CONCRETE_FID_PFID_SHIFT 0 +#define PXP_CONCRETE_FID_PORT_MASK 0x3 +#define PXP_CONCRETE_FID_PORT_SHIFT 4 +#define PXP_CONCRETE_FID_PATH_MASK 0x1 +#define PXP_CONCRETE_FID_PATH_SHIFT 6 +#define PXP_CONCRETE_FID_VFVALID_MASK 0x1 +#define PXP_CONCRETE_FID_VFVALID_SHIFT 7 +#define PXP_CONCRETE_FID_VFID_MASK 0xFF +#define PXP_CONCRETE_FID_VFID_SHIFT 8 +}; + +/* Concrete Function ID */ +struct pxp_pretend_concrete_fid { + __le16 fid; +#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF +#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0 +#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7 +#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4 +#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1 +#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7 +#define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF +#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8 +}; + +/* Function ID */ +union pxp_pretend_fid { + struct pxp_pretend_concrete_fid concrete_fid; + __le16 opaque_fid; +}; + +/* Pxp Pretend Command Register */ +struct pxp_pretend_cmd { + union pxp_pretend_fid fid; + __le16 control; +#define PXP_PRETEND_CMD_PATH_MASK 0x1 +#define PXP_PRETEND_CMD_PATH_SHIFT 0 +#define PXP_PRETEND_CMD_USE_PORT_MASK 0x1 +#define PXP_PRETEND_CMD_USE_PORT_SHIFT 1 +#define PXP_PRETEND_CMD_PORT_MASK 0x3 +#define PXP_PRETEND_CMD_PORT_SHIFT 2 +#define PXP_PRETEND_CMD_RESERVED0_MASK 0xF +#define PXP_PRETEND_CMD_RESERVED0_SHIFT 4 +#define PXP_PRETEND_CMD_RESERVED1_MASK 0xF +#define PXP_PRETEND_CMD_RESERVED1_SHIFT 8 +#define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1 +#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12 +#define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1 +#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13 +#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1 +#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14 +#define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1 +#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15 +}; + +/* PTT Record in PXP Admin Window */ +struct pxp_ptt_entry { + __le32 offset; +#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF +#define PXP_PTT_ENTRY_OFFSET_SHIFT 0 +#define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF +#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23 + struct pxp_pretend_cmd pretend; +}; + +/* VF Zone A Permission Register */ +struct pxp_vf_zone_a_permission { + __le32 control; +#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF +#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT 0 +#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK 0x1 +#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT 8 +#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK 0x7F +#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9 +#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK 0xFFFF +#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16 +}; + +/* Rdif context */ +struct rdif_task_context { + __le32 initial_ref_tag; + __le16 app_tag_value; + __le16 app_tag_mask; + u8 flags0; +#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0 +#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1 +#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1 +#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1 +#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2 +#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1 +#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3 +#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3 +#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4 +#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 +#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 +#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1 +#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 7 + u8 partial_dif_data[7]; + __le16 partial_crc_value; + __le16 partial_checksum_value; + __le32 offset_in_io; + __le16 flags1; +#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1 +#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0 +#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1 +#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2 +#define RDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3 +#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4 +#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5 +#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7 +#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6 +#define RDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3 +#define RDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9 +#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1 +#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11 +#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1 +#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12 +#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1 +#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13 +#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 14 +#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 15 + __le16 state; +#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_MASK 0xF +#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_SHIFT 0 +#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_MASK 0xF +#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_SHIFT 4 +#define RDIF_TASK_CONTEXT_ERROR_IN_IO_MASK 0x1 +#define RDIF_TASK_CONTEXT_ERROR_IN_IO_SHIFT 8 +#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_MASK 0x1 +#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_SHIFT 9 +#define RDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF +#define RDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 10 +#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3 +#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14 + __le32 reserved2; +}; + +/* Status block structure */ +struct status_block_e4 { + __le16 pi_array[PIS_PER_SB_E4]; + __le32 sb_num; +#define STATUS_BLOCK_E4_SB_NUM_MASK 0x1FF +#define STATUS_BLOCK_E4_SB_NUM_SHIFT 0 +#define STATUS_BLOCK_E4_ZERO_PAD_MASK 0x7F +#define STATUS_BLOCK_E4_ZERO_PAD_SHIFT 9 +#define STATUS_BLOCK_E4_ZERO_PAD2_MASK 0xFFFF +#define STATUS_BLOCK_E4_ZERO_PAD2_SHIFT 16 + __le32 prod_index; +#define STATUS_BLOCK_E4_PROD_INDEX_MASK 0xFFFFFF +#define STATUS_BLOCK_E4_PROD_INDEX_SHIFT 0 +#define STATUS_BLOCK_E4_ZERO_PAD3_MASK 0xFF +#define STATUS_BLOCK_E4_ZERO_PAD3_SHIFT 24 +}; + +/* Tdif context */ +struct tdif_task_context { + __le32 initial_ref_tag; + __le16 app_tag_value; + __le16 app_tag_mask; + __le16 partial_crc_value_b; + __le16 partial_checksum_value_b; + __le16 stateB; +#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_MASK 0xF +#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_SHIFT 0 +#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_MASK 0xF +#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_SHIFT 4 +#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_MASK 0x1 +#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_SHIFT 8 +#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_MASK 0x1 +#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_SHIFT 9 +#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F +#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10 + u8 reserved1; + u8 flags0; +#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0 +#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1 +#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1 +#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1 +#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2 +#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1 +#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3 +#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3 +#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4 +#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 +#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 +#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1 +#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7 + __le32 flags1; +#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1 +#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0 +#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1 +#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2 +#define TDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3 +#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4 +#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5 +#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7 +#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6 +#define TDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3 +#define TDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9 +#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1 +#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11 +#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1 +#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12 +#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1 +#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13 +#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_MASK 0xF +#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_SHIFT 14 +#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_MASK 0xF +#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_SHIFT 18 +#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_MASK 0x1 +#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_SHIFT 22 +#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_MASK 0x1 +#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_SHIFT 23 +#define TDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF +#define TDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 24 +#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 28 +#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 29 +#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1 +#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 30 +#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1 +#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31 + __le32 offset_in_io_b; + __le16 partial_crc_value_a; + __le16 partial_checksum_value_a; + __le32 offset_in_io_a; + u8 partial_dif_data_a[8]; + u8 partial_dif_data_b[8]; +}; + +/* Timers context */ +struct timers_context { + __le32 logical_client_0; +#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0x7FFFFFF +#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0 +#define TIMERS_CONTEXT_RESERVED0_MASK 0x1 +#define TIMERS_CONTEXT_RESERVED0_SHIFT 27 +#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1 +#define TIMERS_CONTEXT_VALIDLC0_SHIFT 28 +#define TIMERS_CONTEXT_ACTIVELC0_MASK 0x1 +#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29 +#define TIMERS_CONTEXT_RESERVED1_MASK 0x3 +#define TIMERS_CONTEXT_RESERVED1_SHIFT 30 + __le32 logical_client_1; +#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0x7FFFFFF +#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0 +#define TIMERS_CONTEXT_RESERVED2_MASK 0x1 +#define TIMERS_CONTEXT_RESERVED2_SHIFT 27 +#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1 +#define TIMERS_CONTEXT_VALIDLC1_SHIFT 28 +#define TIMERS_CONTEXT_ACTIVELC1_MASK 0x1 +#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29 +#define TIMERS_CONTEXT_RESERVED3_MASK 0x3 +#define TIMERS_CONTEXT_RESERVED3_SHIFT 30 + __le32 logical_client_2; +#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0x7FFFFFF +#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0 +#define TIMERS_CONTEXT_RESERVED4_MASK 0x1 +#define TIMERS_CONTEXT_RESERVED4_SHIFT 27 +#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1 +#define TIMERS_CONTEXT_VALIDLC2_SHIFT 28 +#define TIMERS_CONTEXT_ACTIVELC2_MASK 0x1 +#define TIMERS_CONTEXT_ACTIVELC2_SHIFT 29 +#define TIMERS_CONTEXT_RESERVED5_MASK 0x3 +#define TIMERS_CONTEXT_RESERVED5_SHIFT 30 + __le32 host_expiration_fields; +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0x7FFFFFF +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_SHIFT 0 +#define TIMERS_CONTEXT_RESERVED6_MASK 0x1 +#define TIMERS_CONTEXT_RESERVED6_SHIFT 27 +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK 0x1 +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_SHIFT 28 +#define TIMERS_CONTEXT_RESERVED7_MASK 0x7 +#define TIMERS_CONTEXT_RESERVED7_SHIFT 29 +}; + +/* Enum for next_protocol field of tunnel_parsing_flags / tunnelTypeDesc */ +enum tunnel_next_protocol { + e_unknown = 0, + e_l2 = 1, + e_ipv4 = 2, + e_ipv6 = 3, + MAX_TUNNEL_NEXT_PROTOCOL +}; + +#endif /* __COMMON_HSI__ */ +#endif diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h new file mode 100644 index 000000000..d9416ad5e --- /dev/null +++ b/include/linux/qed/eth_common.h @@ -0,0 +1,481 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __ETH_COMMON__ +#define __ETH_COMMON__ + +/********************/ +/* ETH FW CONSTANTS */ +/********************/ + +#define ETH_HSI_VER_MAJOR 3 +#define ETH_HSI_VER_MINOR 10 + +#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5 + +#define ETH_CACHE_LINE_SIZE 64 +#define ETH_RX_CQE_GAP 32 +#define ETH_MAX_RAMROD_PER_CON 8 +#define ETH_TX_BD_PAGE_SIZE_BYTES 4096 +#define ETH_RX_BD_PAGE_SIZE_BYTES 4096 +#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096 +#define ETH_RX_NUM_NEXT_PAGE_BDS 2 + +#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253 +#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251 + +#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1 +#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18 +#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255 +#define ETH_TX_MAX_LSO_HDR_NBD 4 +#define ETH_TX_MIN_BDS_PER_LSO_PKT 3 +#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3 +#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2 +#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2 +#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8)) +#define ETH_TX_MAX_LSO_HDR_BYTES 510 +#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1) +#define ETH_TX_LSO_WINDOW_MIN_LEN 9700 +#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000 +#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320 +#define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF + +#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS +#define ETH_NUM_STATISTIC_COUNTERS_DOUBLE_VF_ZONE \ + (ETH_NUM_STATISTIC_COUNTERS - MAX_NUM_VFS / 2) +#define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \ + (ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4) + +/* Maximum number of buffers, used for RX packet placement */ +#define ETH_RX_MAX_BUFF_PER_PKT 5 +#define ETH_RX_BD_THRESHOLD 12 + +/* Num of MAC/VLAN filters */ +#define ETH_NUM_MAC_FILTERS 512 +#define ETH_NUM_VLAN_FILTERS 512 + +/* Approx. multicast constants */ +#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0 +#define ETH_MULTICAST_MAC_BINS 256 +#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32) + +/* Ethernet vport update constants */ +#define ETH_FILTER_RULES_COUNT 10 +#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128 +#define ETH_RSS_KEY_SIZE_REGS 10 +#define ETH_RSS_ENGINE_NUM_K2 207 +#define ETH_RSS_ENGINE_NUM_BB 127 + +/* TPA constants */ +#define ETH_TPA_MAX_AGGS_NUM 64 +#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT +#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6 +#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4 + +/* Control frame check constants */ +#define ETH_CTL_FRAME_ETH_TYPE_NUM 4 + +/* GFS constants */ +#define ETH_GFT_TRASHCAN_VPORT 0x1FF /* GFT drop flow vport number */ + +/* Destination port mode */ +enum dest_port_mode { + DEST_PORT_PHY, + DEST_PORT_LOOPBACK, + DEST_PORT_PHY_LOOPBACK, + DEST_PORT_DROP, + MAX_DEST_PORT_MODE +}; + +/* Ethernet address type */ +enum eth_addr_type { + BROADCAST_ADDRESS, + MULTICAST_ADDRESS, + UNICAST_ADDRESS, + UNKNOWN_ADDRESS, + MAX_ETH_ADDR_TYPE +}; + +struct eth_tx_1st_bd_flags { + u8 bitfields; +#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0 +#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1 +#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2 +#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3 +#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4 +#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5 +#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6 +#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7 +}; + +/* The parsing information data fo rthe first tx bd of a given packet */ +struct eth_tx_data_1st_bd { + __le16 vlan; + u8 nbds; + struct eth_tx_1st_bd_flags bd_flags; + __le16 bitfields; +#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1 +#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0 +#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1 +#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1 +#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF +#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2 +}; + +/* The parsing information data for the second tx bd of a given packet */ +struct eth_tx_data_2nd_bd { + __le16 tunn_ip_size; + __le16 bitfields1; +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4 +#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3 +#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6 +#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8 +#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3 +#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11 +#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12 +#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13 +#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14 +#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15 + __le16 bitfields2; +#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF +#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0 +#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7 +#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 +}; + +/* Firmware data for L2-EDPM packet */ +struct eth_edpm_fw_data { + struct eth_tx_data_1st_bd data_1st_bd; + struct eth_tx_data_2nd_bd data_2nd_bd; + __le32 reserved; +}; + +/* Tunneling parsing flags */ +struct eth_tunnel_parsing_flags { + u8 flags; +#define ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3 +#define ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0 +#define ETH_TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1 +#define ETH_TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2 +#define ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3 +#define ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3 +#define ETH_TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1 +#define ETH_TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5 +#define ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1 +#define ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6 +#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1 +#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7 +}; + +/* PMD flow control bits */ +struct eth_pmd_flow_flags { + u8 flags; +#define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1 +#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0 +#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1 +#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1 +#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F +#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2 +}; + +/* Regular ETH Rx FP CQE */ +struct eth_fast_path_rx_reg_cqe { + u8 type; + u8 bitfields; +#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7 +#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0 +#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF +#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3 +#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1 +#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7 + __le16 pkt_len; + struct parsing_and_err_flags pars_flags; + __le16 vlan_tag; + __le32 rss_hash; + __le16 len_on_first_bd; + u8 placement_offset; + struct eth_tunnel_parsing_flags tunnel_pars_flags; + u8 bd_num; + u8 reserved; + __le16 flow_id; + u8 reserved1[11]; + struct eth_pmd_flow_flags pmd_flags; +}; + +/* TPA-continue ETH Rx FP CQE */ +struct eth_fast_path_rx_tpa_cont_cqe { + u8 type; + u8 tpa_agg_index; + __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE]; + u8 reserved; + u8 reserved1; + __le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE]; + u8 reserved3[3]; + struct eth_pmd_flow_flags pmd_flags; +}; + +/* TPA-end ETH Rx FP CQE */ +struct eth_fast_path_rx_tpa_end_cqe { + u8 type; + u8 tpa_agg_index; + __le16 total_packet_len; + u8 num_of_bds; + u8 end_reason; + __le16 num_of_coalesced_segs; + __le32 ts_delta; + __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE]; + __le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE]; + __le16 reserved1; + u8 reserved2; + struct eth_pmd_flow_flags pmd_flags; +}; + +/* TPA-start ETH Rx FP CQE */ +struct eth_fast_path_rx_tpa_start_cqe { + u8 type; + u8 bitfields; +#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0 +#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF +#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7 + __le16 seg_len; + struct parsing_and_err_flags pars_flags; + __le16 vlan_tag; + __le32 rss_hash; + __le16 len_on_first_bd; + u8 placement_offset; + struct eth_tunnel_parsing_flags tunnel_pars_flags; + u8 tpa_agg_index; + u8 header_len; + __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE]; + __le16 flow_id; + u8 reserved; + struct eth_pmd_flow_flags pmd_flags; +}; + +/* The L4 pseudo checksum mode for Ethernet */ +enum eth_l4_pseudo_checksum_mode { + ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH, + ETH_L4_PSEUDO_CSUM_ZERO_LENGTH, + MAX_ETH_L4_PSEUDO_CHECKSUM_MODE +}; + +struct eth_rx_bd { + struct regpair addr; +}; + +/* Regular ETH Rx SP CQE */ +struct eth_slow_path_rx_cqe { + u8 type; + u8 ramrod_cmd_id; + u8 error_flag; + u8 reserved[25]; + __le16 echo; + u8 reserved1; + struct eth_pmd_flow_flags pmd_flags; +}; + +/* Union for all ETH Rx CQE types */ +union eth_rx_cqe { + struct eth_fast_path_rx_reg_cqe fast_path_regular; + struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start; + struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont; + struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end; + struct eth_slow_path_rx_cqe slow_path; +}; + +/* ETH Rx CQE type */ +enum eth_rx_cqe_type { + ETH_RX_CQE_TYPE_UNUSED, + ETH_RX_CQE_TYPE_REGULAR, + ETH_RX_CQE_TYPE_SLOW_PATH, + ETH_RX_CQE_TYPE_TPA_START, + ETH_RX_CQE_TYPE_TPA_CONT, + ETH_RX_CQE_TYPE_TPA_END, + MAX_ETH_RX_CQE_TYPE +}; + +struct eth_rx_pmd_cqe { + union eth_rx_cqe cqe; + u8 reserved[ETH_RX_CQE_GAP]; +}; + +enum eth_rx_tunn_type { + ETH_RX_NO_TUNN, + ETH_RX_TUNN_GENEVE, + ETH_RX_TUNN_GRE, + ETH_RX_TUNN_VXLAN, + MAX_ETH_RX_TUNN_TYPE +}; + +/* Aggregation end reason. */ +enum eth_tpa_end_reason { + ETH_AGG_END_UNUSED, + ETH_AGG_END_SP_UPDATE, + ETH_AGG_END_MAX_LEN, + ETH_AGG_END_LAST_SEG, + ETH_AGG_END_TIMEOUT, + ETH_AGG_END_NOT_CONSISTENT, + ETH_AGG_END_OUT_OF_ORDER, + ETH_AGG_END_NON_TPA_SEG, + MAX_ETH_TPA_END_REASON +}; + +/* The first tx bd of a given packet */ +struct eth_tx_1st_bd { + struct regpair addr; + __le16 nbytes; + struct eth_tx_data_1st_bd data; +}; + +/* The second tx bd of a given packet */ +struct eth_tx_2nd_bd { + struct regpair addr; + __le16 nbytes; + struct eth_tx_data_2nd_bd data; +}; + +/* The parsing information data for the third tx bd of a given packet */ +struct eth_tx_data_3rd_bd { + __le16 lso_mss; + __le16 bitfields; +#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF +#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0 +#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF +#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4 +#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8 +#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F +#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9 + u8 tunn_l4_hdr_start_offset_w; + u8 tunn_hdr_size_w; +}; + +/* The third tx bd of a given packet */ +struct eth_tx_3rd_bd { + struct regpair addr; + __le16 nbytes; + struct eth_tx_data_3rd_bd data; +}; + +/* Complementary information for the regular tx bd of a given packet */ +struct eth_tx_data_bd { + __le16 reserved0; + __le16 bitfields; +#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF +#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0 +#define ETH_TX_DATA_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_BD_START_BD_SHIFT 8 +#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F +#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9 + __le16 reserved3; +}; + +/* The common non-special TX BD ring element */ +struct eth_tx_bd { + struct regpair addr; + __le16 nbytes; + struct eth_tx_data_bd data; +}; + +union eth_tx_bd_types { + struct eth_tx_1st_bd first_bd; + struct eth_tx_2nd_bd second_bd; + struct eth_tx_3rd_bd third_bd; + struct eth_tx_bd reg_bd; +}; + +/* Mstorm Queue Zone */ +enum eth_tx_tunn_type { + ETH_TX_TUNN_GENEVE, + ETH_TX_TUNN_TTAG, + ETH_TX_TUNN_GRE, + ETH_TX_TUNN_VXLAN, + MAX_ETH_TX_TUNN_TYPE +}; + +/* Ystorm Queue Zone */ +struct xstorm_eth_queue_zone { + struct coalescing_timeset int_coalescing_timeset; + u8 reserved[7]; +}; + +/* ETH doorbell data */ +struct eth_db_data { + u8 params; +#define ETH_DB_DATA_DEST_MASK 0x3 +#define ETH_DB_DATA_DEST_SHIFT 0 +#define ETH_DB_DATA_AGG_CMD_MASK 0x3 +#define ETH_DB_DATA_AGG_CMD_SHIFT 2 +#define ETH_DB_DATA_BYPASS_EN_MASK 0x1 +#define ETH_DB_DATA_BYPASS_EN_SHIFT 4 +#define ETH_DB_DATA_RESERVED_MASK 0x1 +#define ETH_DB_DATA_RESERVED_SHIFT 5 +#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; + __le16 bd_prod; +}; + +/* RSS hash type */ +enum rss_hash_type { + RSS_HASH_TYPE_DEFAULT = 0, + RSS_HASH_TYPE_IPV4 = 1, + RSS_HASH_TYPE_TCP_IPV4 = 2, + RSS_HASH_TYPE_IPV6 = 3, + RSS_HASH_TYPE_TCP_IPV6 = 4, + RSS_HASH_TYPE_UDP_IPV4 = 5, + RSS_HASH_TYPE_UDP_IPV6 = 6, + MAX_RSS_HASH_TYPE +}; + +#endif /* __ETH_COMMON__ */ diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h new file mode 100644 index 000000000..22077c586 --- /dev/null +++ b/include/linux/qed/fcoe_common.h @@ -0,0 +1,744 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef __FCOE_COMMON__ +#define __FCOE_COMMON__ + +/*********************/ +/* FCOE FW CONSTANTS */ +/*********************/ + +#define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12 + +/* The fcoe storm task context protection-information of Ystorm */ +struct protection_info_ctx { + __le16 flags; +#define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK 0x3 +#define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT 0 +#define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK 0x1 +#define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT 2 +#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK 0x1 +#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT 3 +#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK 0xF +#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT 4 +#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1 +#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT 8 +#define PROTECTION_INFO_CTX_RESERVED0_MASK 0x7F +#define PROTECTION_INFO_CTX_RESERVED0_SHIFT 9 + u8 dix_block_size; + u8 dst_size; +}; + +/* The fcoe storm task context protection-information of Ystorm */ +union protection_info_union_ctx { + struct protection_info_ctx info; + __le32 value; +}; + +/* FCP CMD payload */ +struct fcoe_fcp_cmd_payload { + __le32 opaque[8]; +}; + +/* FCP RSP payload */ +struct fcoe_fcp_rsp_payload { + __le32 opaque[6]; +}; + +/* FCP RSP payload */ +struct fcp_rsp_payload_padded { + struct fcoe_fcp_rsp_payload rsp_payload; + __le32 reserved[2]; +}; + +/* FCP RSP payload */ +struct fcoe_fcp_xfer_payload { + __le32 opaque[3]; +}; + +/* FCP RSP payload */ +struct fcp_xfer_payload_padded { + struct fcoe_fcp_xfer_payload xfer_payload; + __le32 reserved[5]; +}; + +/* Task params */ +struct fcoe_tx_data_params { + __le32 data_offset; + __le32 offset_in_io; + u8 flags; +#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK 0x1 +#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT 0 +#define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK 0x1 +#define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT 1 +#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK 0x1 +#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT 2 +#define FCOE_TX_DATA_PARAMS_RESERVED0_MASK 0x1F +#define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT 3 + u8 dif_residual; + __le16 seq_cnt; + __le16 single_sge_saved_offset; + __le16 next_dif_offset; + __le16 seq_id; + __le16 reserved3; +}; + +/* Middle path parameters: FC header fields provided by the driver */ +struct fcoe_tx_mid_path_params { + __le32 parameter; + u8 r_ctl; + u8 type; + u8 cs_ctl; + u8 df_ctl; + __le16 rx_id; + __le16 ox_id; +}; + +/* Task params */ +struct fcoe_tx_params { + struct fcoe_tx_data_params data; + struct fcoe_tx_mid_path_params mid_path; +}; + +/* Union of FCP CMD payload \ TX params \ ABTS \ Cleanup */ +union fcoe_tx_info_union_ctx { + struct fcoe_fcp_cmd_payload fcp_cmd_payload; + struct fcp_rsp_payload_padded fcp_rsp_payload; + struct fcp_xfer_payload_padded fcp_xfer_payload; + struct fcoe_tx_params tx_params; +}; + +/* Data sgl */ +struct fcoe_slow_sgl_ctx { + struct regpair base_sgl_addr; + __le16 curr_sge_off; + __le16 remainder_num_sges; + __le16 curr_sgl_index; + __le16 reserved; +}; + +/* Union of DIX SGL \ cached DIX sges */ +union fcoe_dix_desc_ctx { + struct fcoe_slow_sgl_ctx dix_sgl; + struct scsi_sge cached_dix_sge; +}; + +/* The fcoe storm task context of Ystorm */ +struct ystorm_fcoe_task_st_ctx { + u8 task_type; + u8 sgl_mode; +#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1 +#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0 +#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x7F +#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 1 + u8 cached_dix_sge; + u8 expect_first_xfer; + __le32 num_pbf_zero_write; + union protection_info_union_ctx protection_info_union; + __le32 data_2_trns_rem; + struct scsi_sgl_params sgl_params; + u8 reserved1[12]; + union fcoe_tx_info_union_ctx tx_info_union; + union fcoe_dix_desc_ctx dix_desc; + struct scsi_cached_sges data_desc; + __le16 ox_id; + __le16 rx_id; + __le32 task_rety_identifier; + u8 reserved2[8]; +}; + +struct e4_ystorm_fcoe_task_ag_ctx { + u8 byte0; + u8 byte1; + __le16 word0; + u8 flags0; +#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 + u8 flags1; +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 byte2; + __le32 reg0; + u8 byte3; + u8 byte4; + __le16 rx_id; + __le16 word2; + __le16 word3; + __le16 word4; + __le16 word5; + __le32 reg1; + __le32 reg2; +}; + +struct e4_tstorm_fcoe_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 icid; + u8 flags0; +#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6 +#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7 + u8 flags1; +#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0 +#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2 +#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4 +#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6 + u8 flags2; +#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0 +#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6 + u8 flags3; +#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0 +#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2 +#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4 +#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5 +#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7 + u8 flags4; +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0 +#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7 + u8 cleanup_state; + __le16 last_sent_tid; + __le32 rec_rr_tov_exp_timeout; + u8 byte3; + u8 byte4; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 data_offset_end_of_seq; + __le32 data_offset_next; +}; + +/* Cached data sges */ +struct fcoe_exp_ro { + __le32 data_offset; + __le32 reserved; +}; + +/* Union of Cleanup address \ expected relative offsets */ +union fcoe_cleanup_addr_exp_ro_union { + struct regpair abts_rsp_fc_payload_hi; + struct fcoe_exp_ro exp_ro; +}; + +/* Fields coppied from ABTSrsp pckt */ +struct fcoe_abts_pkt { + __le32 abts_rsp_fc_payload_lo; + __le16 abts_rsp_rx_id; + u8 abts_rsp_rctl; + u8 reserved2; +}; + +/* FW read- write (modifyable) part The fcoe task storm context of Tstorm */ +struct fcoe_tstorm_fcoe_task_st_ctx_read_write { + union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union; + __le16 flags; +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 2 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 3 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 5 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 6 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0xFF +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 8 + __le16 seq_cnt; + u8 seq_id; + u8 ooo_rx_seq_id; + __le16 rx_id; + struct fcoe_abts_pkt abts_data; + __le32 e_d_tov_exp_timeout_val; + __le16 ooo_rx_seq_cnt; + __le16 reserved1; +}; + +/* FW read only part The fcoe task storm context of Tstorm */ +struct fcoe_tstorm_fcoe_task_st_ctx_read_only { + u8 task_type; + u8 dev_type; + u8 conf_supported; + u8 glbl_q_num; + __le32 cid; + __le32 fcp_cmd_trns_size; + __le32 rsrv; +}; + +/** The fcoe task storm context of Tstorm */ +struct tstorm_fcoe_task_st_ctx { + struct fcoe_tstorm_fcoe_task_st_ctx_read_write read_write; + struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only; +}; + +struct e4_mstorm_fcoe_task_ag_ctx { + u8 byte0; + u8 byte1; + __le16 icid; + u8 flags0; +#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5 +#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 +#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 + u8 flags1; +#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 +#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4 +#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 cleanup_state; + __le32 received_bytes; + u8 byte3; + u8 glbl_q_num; + __le16 word1; + __le16 tid_to_xfer; + __le16 word3; + __le16 word4; + __le16 word5; + __le32 expected_bytes; + __le32 reg2; +}; + +/* The fcoe task storm context of Mstorm */ +struct mstorm_fcoe_task_st_ctx { + struct regpair rsp_buf_addr; + __le32 rsrv[2]; + struct scsi_sgl_params sgl_params; + __le32 data_2_trns_rem; + __le32 data_buffer_offset; + __le16 parent_id; + __le16 flags; +#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK 0xF +#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT 0 +#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK 0x3 +#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT 4 +#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK 0x1 +#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT 6 +#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK 0x1 +#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7 +#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK 0x3 +#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT 8 +#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1 +#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10 +#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK 0x1 +#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT 11 +#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK 0x1 +#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT 12 +#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1 +#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 13 +#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK 0x3 +#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT 14 + struct scsi_cached_sges data_desc; +}; + +struct e4_ustorm_fcoe_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 icid; + u8 flags0; +#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4 +#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 + u8 flags2; +#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3 +#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7 + u8 flags3; +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3 +#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF +#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 + __le32 dif_err_intervals; + __le32 dif_error_1st_interval; + __le32 global_cq_num; + __le32 reg3; + __le32 reg4; + __le32 reg5; +}; + +/* FCoE task context */ +struct e4_fcoe_task_context { + struct ystorm_fcoe_task_st_ctx ystorm_st_context; + struct regpair ystorm_st_padding[2]; + struct tdif_task_context tdif_context; + struct e4_ystorm_fcoe_task_ag_ctx ystorm_ag_context; + struct e4_tstorm_fcoe_task_ag_ctx tstorm_ag_context; + struct timers_context timer_context; + struct tstorm_fcoe_task_st_ctx tstorm_st_context; + struct regpair tstorm_st_padding[2]; + struct e4_mstorm_fcoe_task_ag_ctx mstorm_ag_context; + struct mstorm_fcoe_task_st_ctx mstorm_st_context; + struct e4_ustorm_fcoe_task_ag_ctx ustorm_ag_context; + struct rdif_task_context rdif_context; +}; + +/* FCoE additional WQE (Sq/XferQ) information */ +union fcoe_additional_info_union { + __le32 previous_tid; + __le32 parent_tid; + __le32 burst_length; + __le32 seq_rec_updated_offset; +}; + +/* FCoE Ramrod Command IDs */ +enum fcoe_completion_status { + FCOE_COMPLETION_STATUS_SUCCESS, + FCOE_COMPLETION_STATUS_FCOE_VER_ERR, + FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR, + MAX_FCOE_COMPLETION_STATUS +}; + +/* FC address (SID/DID) network presentation */ +struct fc_addr_nw { + u8 addr_lo; + u8 addr_mid; + u8 addr_hi; +}; + +/* FCoE connection offload */ +struct fcoe_conn_offload_ramrod_data { + struct regpair sq_pbl_addr; + struct regpair sq_curr_page_addr; + struct regpair sq_next_page_addr; + struct regpair xferq_pbl_addr; + struct regpair xferq_curr_page_addr; + struct regpair xferq_next_page_addr; + struct regpair respq_pbl_addr; + struct regpair respq_curr_page_addr; + struct regpair respq_next_page_addr; + __le16 dst_mac_addr_lo; + __le16 dst_mac_addr_mid; + __le16 dst_mac_addr_hi; + __le16 src_mac_addr_lo; + __le16 src_mac_addr_mid; + __le16 src_mac_addr_hi; + __le16 tx_max_fc_pay_len; + __le16 e_d_tov_timer_val; + __le16 rx_max_fc_pay_len; + __le16 vlan_tag; +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK 0xFFF +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT 0 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT 12 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK 0x7 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT 13 + __le16 physical_q0; + __le16 rec_rr_tov_timer_val; + struct fc_addr_nw s_id; + u8 max_conc_seqs_c3; + struct fc_addr_nw d_id; + u8 flags; +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT 1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_SHIFT 4 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 5 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 7 + __le16 conn_id; + u8 def_q_idx; + u8 reserved[5]; +}; + +/* FCoE terminate connection request */ +struct fcoe_conn_terminate_ramrod_data { + struct regpair terminate_params_addr; +}; + +/* FCoE device type */ +enum fcoe_device_type { + FCOE_TASK_DEV_TYPE_DISK, + FCOE_TASK_DEV_TYPE_TAPE, + MAX_FCOE_DEVICE_TYPE +}; + +/* Data sgl */ +struct fcoe_fast_sgl_ctx { + struct regpair sgl_start_addr; + __le32 sgl_byte_offset; + __le16 task_reuse_cnt; + __le16 init_offset_in_first_sge; +}; + +/* FCoE firmware function init */ +struct fcoe_init_func_ramrod_data { + struct scsi_init_func_params func_params; + struct scsi_init_func_queues q_params; + __le16 mtu; + __le16 sq_num_pages_in_pbl; + __le32 reserved[3]; +}; + +/* FCoE: Mode of the connection: Target or Initiator or both */ +enum fcoe_mode_type { + FCOE_INITIATOR_MODE = 0x0, + FCOE_TARGET_MODE = 0x1, + FCOE_BOTH_OR_NOT_CHOSEN = 0x3, + MAX_FCOE_MODE_TYPE +}; + +/* Per PF FCoE receive path statistics - tStorm RAM structure */ +struct fcoe_rx_stat { + struct regpair fcoe_rx_byte_cnt; + struct regpair fcoe_rx_data_pkt_cnt; + struct regpair fcoe_rx_xfer_pkt_cnt; + struct regpair fcoe_rx_other_pkt_cnt; + __le32 fcoe_silent_drop_pkt_cmdq_full_cnt; + __le32 fcoe_silent_drop_pkt_rq_full_cnt; + __le32 fcoe_silent_drop_pkt_crc_error_cnt; + __le32 fcoe_silent_drop_pkt_task_invalid_cnt; + __le32 fcoe_silent_drop_total_pkt_cnt; + __le32 rsrv; +}; + +/* FCoE SQE request type */ +enum fcoe_sqe_request_type { + SEND_FCOE_CMD, + SEND_FCOE_MIDPATH, + SEND_FCOE_ABTS_REQUEST, + FCOE_EXCHANGE_CLEANUP, + FCOE_SEQUENCE_RECOVERY, + SEND_FCOE_XFER_RDY, + SEND_FCOE_RSP, + SEND_FCOE_RSP_WITH_SENSE_DATA, + SEND_FCOE_TARGET_DATA, + SEND_FCOE_INITIATOR_DATA, + SEND_FCOE_XFER_CONTINUATION_RDY, + SEND_FCOE_TARGET_ABTS_RSP, + MAX_FCOE_SQE_REQUEST_TYPE +}; + +/* FCoe statistics request */ +struct fcoe_stat_ramrod_data { + struct regpair stat_params_addr; +}; + +/* FCoE task type */ +enum fcoe_task_type { + FCOE_TASK_TYPE_WRITE_INITIATOR, + FCOE_TASK_TYPE_READ_INITIATOR, + FCOE_TASK_TYPE_MIDPATH, + FCOE_TASK_TYPE_UNSOLICITED, + FCOE_TASK_TYPE_ABTS, + FCOE_TASK_TYPE_EXCHANGE_CLEANUP, + FCOE_TASK_TYPE_SEQUENCE_CLEANUP, + FCOE_TASK_TYPE_WRITE_TARGET, + FCOE_TASK_TYPE_READ_TARGET, + FCOE_TASK_TYPE_RSP, + FCOE_TASK_TYPE_RSP_SENSE_DATA, + FCOE_TASK_TYPE_ABTS_TARGET, + FCOE_TASK_TYPE_ENUM_SIZE, + MAX_FCOE_TASK_TYPE +}; + +/* Per PF FCoE transmit path statistics - pStorm RAM structure */ +struct fcoe_tx_stat { + struct regpair fcoe_tx_byte_cnt; + struct regpair fcoe_tx_data_pkt_cnt; + struct regpair fcoe_tx_xfer_pkt_cnt; + struct regpair fcoe_tx_other_pkt_cnt; +}; + +/* FCoE SQ/XferQ element */ +struct fcoe_wqe { + __le16 task_id; + __le16 flags; +#define FCOE_WQE_REQ_TYPE_MASK 0xF +#define FCOE_WQE_REQ_TYPE_SHIFT 0 +#define FCOE_WQE_SGL_MODE_MASK 0x1 +#define FCOE_WQE_SGL_MODE_SHIFT 4 +#define FCOE_WQE_CONTINUATION_MASK 0x1 +#define FCOE_WQE_CONTINUATION_SHIFT 5 +#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1 +#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6 +#define FCOE_WQE_RESERVED_MASK 0x1 +#define FCOE_WQE_RESERVED_SHIFT 7 +#define FCOE_WQE_NUM_SGES_MASK 0xF +#define FCOE_WQE_NUM_SGES_SHIFT 8 +#define FCOE_WQE_RESERVED1_MASK 0xF +#define FCOE_WQE_RESERVED1_SHIFT 12 + union fcoe_additional_info_union additional_info_union; +}; + +/* FCoE XFRQ element */ +struct xfrqe_prot_flags { + u8 flags; +#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF +#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0 +#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK 0x1 +#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT 4 +#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK 0x3 +#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT 5 +#define XFRQE_PROT_FLAGS_RESERVED_MASK 0x1 +#define XFRQE_PROT_FLAGS_RESERVED_SHIFT 7 +}; + +/* FCoE doorbell data */ +struct fcoe_db_data { + u8 params; +#define FCOE_DB_DATA_DEST_MASK 0x3 +#define FCOE_DB_DATA_DEST_SHIFT 0 +#define FCOE_DB_DATA_AGG_CMD_MASK 0x3 +#define FCOE_DB_DATA_AGG_CMD_SHIFT 2 +#define FCOE_DB_DATA_BYPASS_EN_MASK 0x1 +#define FCOE_DB_DATA_BYPASS_EN_SHIFT 4 +#define FCOE_DB_DATA_RESERVED_MASK 0x1 +#define FCOE_DB_DATA_RESERVED_SHIFT 5 +#define FCOE_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; + __le16 sq_prod; +}; + +#endif /* __FCOE_COMMON__ */ diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h new file mode 100644 index 000000000..b34c573f2 --- /dev/null +++ b/include/linux/qed/iscsi_common.h @@ -0,0 +1,1572 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __ISCSI_COMMON__ +#define __ISCSI_COMMON__ + +/**********************/ +/* ISCSI FW CONSTANTS */ +/**********************/ + +/* iSCSI HSI constants */ +#define ISCSI_DEFAULT_MTU (1500) + +/* KWQ (kernel work queue) layer codes */ +#define ISCSI_SLOW_PATH_LAYER_CODE (6) + +/* iSCSI parameter defaults */ +#define ISCSI_DEFAULT_HEADER_DIGEST (0) +#define ISCSI_DEFAULT_DATA_DIGEST (0) +#define ISCSI_DEFAULT_INITIAL_R2T (1) +#define ISCSI_DEFAULT_IMMEDIATE_DATA (1) +#define ISCSI_DEFAULT_MAX_PDU_LENGTH (0x2000) +#define ISCSI_DEFAULT_FIRST_BURST_LENGTH (0x10000) +#define ISCSI_DEFAULT_MAX_BURST_LENGTH (0x40000) +#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1) + +/* iSCSI parameter limits */ +#define ISCSI_MIN_VAL_MAX_PDU_LENGTH (0x200) +#define ISCSI_MAX_VAL_MAX_PDU_LENGTH (0xffffff) +#define ISCSI_MIN_VAL_BURST_LENGTH (0x200) +#define ISCSI_MAX_VAL_BURST_LENGTH (0xffffff) +#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1) +#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff) + +#define ISCSI_AHS_CNTL_SIZE 4 + +#define ISCSI_WQE_NUM_SGES_SLOWIO (0xf) + +/* iSCSI reserved params */ +#define ISCSI_ITT_ALL_ONES (0xffffffff) +#define ISCSI_TTT_ALL_ONES (0xffffffff) + +#define ISCSI_OPTION_1_OFF_CHIP_TCP 1 +#define ISCSI_OPTION_2_ON_CHIP_TCP 2 + +#define ISCSI_INITIATOR_MODE 0 +#define ISCSI_TARGET_MODE 1 + +/* iSCSI request op codes */ +#define ISCSI_OPCODE_NOP_OUT (0) +#define ISCSI_OPCODE_SCSI_CMD (1) +#define ISCSI_OPCODE_TMF_REQUEST (2) +#define ISCSI_OPCODE_LOGIN_REQUEST (3) +#define ISCSI_OPCODE_TEXT_REQUEST (4) +#define ISCSI_OPCODE_DATA_OUT (5) +#define ISCSI_OPCODE_LOGOUT_REQUEST (6) + +/* iSCSI response/messages op codes */ +#define ISCSI_OPCODE_NOP_IN (0x20) +#define ISCSI_OPCODE_SCSI_RESPONSE (0x21) +#define ISCSI_OPCODE_TMF_RESPONSE (0x22) +#define ISCSI_OPCODE_LOGIN_RESPONSE (0x23) +#define ISCSI_OPCODE_TEXT_RESPONSE (0x24) +#define ISCSI_OPCODE_DATA_IN (0x25) +#define ISCSI_OPCODE_LOGOUT_RESPONSE (0x26) +#define ISCSI_OPCODE_R2T (0x31) +#define ISCSI_OPCODE_ASYNC_MSG (0x32) +#define ISCSI_OPCODE_REJECT (0x3f) + +/* iSCSI stages */ +#define ISCSI_STAGE_SECURITY_NEGOTIATION (0) +#define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION (1) +#define ISCSI_STAGE_FULL_FEATURE_PHASE (3) + +/* iSCSI CQE errors */ +#define CQE_ERROR_BITMAP_DATA_DIGEST (0x08) +#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10) +#define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20) + +/* Union of data bd_opaque/ tq_tid */ +union bd_opaque_tq_union { + __le16 bd_opaque; + __le16 tq_tid; +}; + +/* ISCSI SGL entry */ +struct cqe_error_bitmap { + u8 cqe_error_status_bits; +#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7 +#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0 +#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1 +#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3 +#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1 +#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4 +#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK 0x1 +#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT 5 +#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK 0x1 +#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT 6 +#define CQE_ERROR_BITMAP_RESERVED2_MASK 0x1 +#define CQE_ERROR_BITMAP_RESERVED2_SHIFT 7 +}; + +union cqe_error_status { + u8 error_status; + struct cqe_error_bitmap error_bits; +}; + +/* iSCSI Login Response PDU header */ +struct data_hdr { + __le32 data[12]; +}; + +struct lun_mapper_addr_reserved { + struct regpair lun_mapper_addr; + u8 reserved0[8]; +}; + +/* rdif conetxt for dif on immediate */ +struct dif_on_immediate_params { + __le32 initial_ref_tag; + __le16 application_tag; + __le16 application_tag_mask; + __le16 flags1; +#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_SHIFT 0 +#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_SHIFT 1 +#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_SHIFT 2 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_SHIFT 3 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_SHIFT 4 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_SHIFT 5 +#define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_SHIFT 6 +#define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_SHIFT 7 +#define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_MASK 0x3 +#define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_SHIFT 8 +#define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_MASK 0xF +#define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_SHIFT 10 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_SHIFT 14 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_SHIFT 15 + u8 flags0; +#define DIF_ON_IMMEDIATE_PARAMS_RESERVED_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_RESERVED_SHIFT 0 +#define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_SHIFT 1 +#define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_SHIFT 2 +#define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_SHIFT 3 +#define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_MASK 0x3 +#define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_SHIFT 4 +#define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_SHIFT 6 +#define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_SHIFT 7 + u8 reserved_zero[5]; +}; + +/* iSCSI dif on immediate mode attributes union */ +union dif_configuration_params { + struct lun_mapper_addr_reserved lun_mapper_address; + struct dif_on_immediate_params def_dif_conf; +}; + +/* Union of data/r2t sequence number */ +union iscsi_seq_num { + __le16 data_sn; + __le16 r2t_sn; +}; + +/* iSCSI DIF flags */ +struct iscsi_dif_flags { + u8 flags; +#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF +#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0 +#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK 0x1 +#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT 4 +#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK 0x7 +#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT 5 +}; + +/* The iscsi storm task context of Ystorm */ +struct ystorm_iscsi_task_state { + struct scsi_cached_sges data_desc; + struct scsi_sgl_params sgl_params; + __le32 exp_r2t_sn; + __le32 buffer_offset; + union iscsi_seq_num seq_num; + struct iscsi_dif_flags dif_flags; + u8 flags; +#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK 0x1 +#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0 +#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1 +#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1 +#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_MASK 0x1 +#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_SHIFT 2 +#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x1F +#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 3 +}; + +/* The iscsi storm task context of Ystorm */ +struct ystorm_iscsi_task_rxmit_opt { + __le32 fast_rxmit_sge_offset; + __le32 scan_start_buffer_offset; + __le32 fast_rxmit_buffer_offset; + u8 scan_start_sgl_index; + u8 fast_rxmit_sgl_index; + __le16 reserved; +}; + +/* iSCSI Common PDU header */ +struct iscsi_common_hdr { + u8 hdr_status; + u8 hdr_response; + u8 hdr_flags; + u8 hdr_first_byte; +#define ISCSI_COMMON_HDR_OPCODE_MASK 0x3F +#define ISCSI_COMMON_HDR_OPCODE_SHIFT 0 +#define ISCSI_COMMON_HDR_IMM_MASK 0x1 +#define ISCSI_COMMON_HDR_IMM_SHIFT 6 +#define ISCSI_COMMON_HDR_RSRV_MASK 0x1 +#define ISCSI_COMMON_HDR_RSRV_SHIFT 7 + __le32 hdr_second_dword; +#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair lun_reserved; + __le32 itt; + __le32 ttt; + __le32 cmdstat_sn; + __le32 exp_statcmd_sn; + __le32 max_cmd_sn; + __le32 data[3]; +}; + +/* iSCSI Command PDU header */ +struct iscsi_cmd_hdr { + __le16 reserved1; + u8 flags_attr; +#define ISCSI_CMD_HDR_ATTR_MASK 0x7 +#define ISCSI_CMD_HDR_ATTR_SHIFT 0 +#define ISCSI_CMD_HDR_RSRV_MASK 0x3 +#define ISCSI_CMD_HDR_RSRV_SHIFT 3 +#define ISCSI_CMD_HDR_WRITE_MASK 0x1 +#define ISCSI_CMD_HDR_WRITE_SHIFT 5 +#define ISCSI_CMD_HDR_READ_MASK 0x1 +#define ISCSI_CMD_HDR_READ_SHIFT 6 +#define ISCSI_CMD_HDR_FINAL_MASK 0x1 +#define ISCSI_CMD_HDR_FINAL_SHIFT 7 + u8 hdr_first_byte; +#define ISCSI_CMD_HDR_OPCODE_MASK 0x3F +#define ISCSI_CMD_HDR_OPCODE_SHIFT 0 +#define ISCSI_CMD_HDR_IMM_MASK 0x1 +#define ISCSI_CMD_HDR_IMM_SHIFT 6 +#define ISCSI_CMD_HDR_RSRV1_MASK 0x1 +#define ISCSI_CMD_HDR_RSRV1_SHIFT 7 + __le32 hdr_second_dword; +#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair lun; + __le32 itt; + __le32 expected_transfer_length; + __le32 cmd_sn; + __le32 exp_stat_sn; + __le32 cdb[4]; +}; + +/* iSCSI Command PDU header with Extended CDB (Initiator Mode) */ +struct iscsi_ext_cdb_cmd_hdr { + __le16 reserved1; + u8 flags_attr; +#define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK 0x7 +#define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT 0 +#define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK 0x3 +#define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT 3 +#define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK 0x1 +#define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT 5 +#define ISCSI_EXT_CDB_CMD_HDR_READ_MASK 0x1 +#define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT 6 +#define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK 0x1 +#define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT 7 + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK 0xFF +#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT 24 + struct regpair lun; + __le32 itt; + __le32 expected_transfer_length; + __le32 cmd_sn; + __le32 exp_stat_sn; + struct scsi_sge cdb_sge; +}; + +/* iSCSI login request PDU header */ +struct iscsi_login_req_hdr { + u8 version_min; + u8 version_max; + u8 flags_attr; +#define ISCSI_LOGIN_REQ_HDR_NSG_MASK 0x3 +#define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT 0 +#define ISCSI_LOGIN_REQ_HDR_CSG_MASK 0x3 +#define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT 2 +#define ISCSI_LOGIN_REQ_HDR_RSRV_MASK 0x3 +#define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT 4 +#define ISCSI_LOGIN_REQ_HDR_C_MASK 0x1 +#define ISCSI_LOGIN_REQ_HDR_C_SHIFT 6 +#define ISCSI_LOGIN_REQ_HDR_T_MASK 0x1 +#define ISCSI_LOGIN_REQ_HDR_T_SHIFT 7 + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24 + __le32 isid_tabc; + __le16 tsih; + __le16 isid_d; + __le32 itt; + __le16 reserved1; + __le16 cid; + __le32 cmd_sn; + __le32 exp_stat_sn; + __le32 reserved2[4]; +}; + +/* iSCSI logout request PDU header */ +struct iscsi_logout_req_hdr { + __le16 reserved0; + u8 reason_code; + u8 opcode; + __le32 reserved1; + __le32 reserved2[2]; + __le32 itt; + __le16 reserved3; + __le16 cid; + __le32 cmd_sn; + __le32 exp_stat_sn; + __le32 reserved4[4]; +}; + +/* iSCSI Data-out PDU header */ +struct iscsi_data_out_hdr { + __le16 reserved1; + u8 flags_attr; +#define ISCSI_DATA_OUT_HDR_RSRV_MASK 0x7F +#define ISCSI_DATA_OUT_HDR_RSRV_SHIFT 0 +#define ISCSI_DATA_OUT_HDR_FINAL_MASK 0x1 +#define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7 + u8 opcode; + __le32 reserved2; + struct regpair lun; + __le32 itt; + __le32 ttt; + __le32 reserved3; + __le32 exp_stat_sn; + __le32 reserved4; + __le32 data_sn; + __le32 buffer_offset; + __le32 reserved5; +}; + +/* iSCSI Data-in PDU header */ +struct iscsi_data_in_hdr { + u8 status_rsvd; + u8 reserved1; + u8 flags; +#define ISCSI_DATA_IN_HDR_STATUS_MASK 0x1 +#define ISCSI_DATA_IN_HDR_STATUS_SHIFT 0 +#define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK 0x1 +#define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT 1 +#define ISCSI_DATA_IN_HDR_OVERFLOW_MASK 0x1 +#define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT 2 +#define ISCSI_DATA_IN_HDR_RSRV_MASK 0x7 +#define ISCSI_DATA_IN_HDR_RSRV_SHIFT 3 +#define ISCSI_DATA_IN_HDR_ACK_MASK 0x1 +#define ISCSI_DATA_IN_HDR_ACK_SHIFT 6 +#define ISCSI_DATA_IN_HDR_FINAL_MASK 0x1 +#define ISCSI_DATA_IN_HDR_FINAL_SHIFT 7 + u8 opcode; + __le32 reserved2; + struct regpair lun; + __le32 itt; + __le32 ttt; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le32 data_sn; + __le32 buffer_offset; + __le32 residual_count; +}; + +/* iSCSI R2T PDU header */ +struct iscsi_r2t_hdr { + u8 reserved0[3]; + u8 opcode; + __le32 reserved2; + struct regpair lun; + __le32 itt; + __le32 ttt; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le32 r2t_sn; + __le32 buffer_offset; + __le32 desired_data_trns_len; +}; + +/* iSCSI NOP-out PDU header */ +struct iscsi_nop_out_hdr { + __le16 reserved1; + u8 flags_attr; +#define ISCSI_NOP_OUT_HDR_RSRV_MASK 0x7F +#define ISCSI_NOP_OUT_HDR_RSRV_SHIFT 0 +#define ISCSI_NOP_OUT_HDR_CONST1_MASK 0x1 +#define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7 + u8 opcode; + __le32 reserved2; + struct regpair lun; + __le32 itt; + __le32 ttt; + __le32 cmd_sn; + __le32 exp_stat_sn; + __le32 reserved3; + __le32 reserved4; + __le32 reserved5; + __le32 reserved6; +}; + +/* iSCSI NOP-in PDU header */ +struct iscsi_nop_in_hdr { + __le16 reserved0; + u8 flags_attr; +#define ISCSI_NOP_IN_HDR_RSRV_MASK 0x7F +#define ISCSI_NOP_IN_HDR_RSRV_SHIFT 0 +#define ISCSI_NOP_IN_HDR_CONST1_MASK 0x1 +#define ISCSI_NOP_IN_HDR_CONST1_SHIFT 7 + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair lun; + __le32 itt; + __le32 ttt; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le32 reserved5; + __le32 reserved6; + __le32 reserved7; +}; + +/* iSCSI Login Response PDU header */ +struct iscsi_login_response_hdr { + u8 version_active; + u8 version_max; + u8 flags_attr; +#define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK 0x3 +#define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT 0 +#define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK 0x3 +#define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT 2 +#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK 0x3 +#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT 4 +#define ISCSI_LOGIN_RESPONSE_HDR_C_MASK 0x1 +#define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT 6 +#define ISCSI_LOGIN_RESPONSE_HDR_T_MASK 0x1 +#define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT 7 + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 + __le32 isid_tabc; + __le16 tsih; + __le16 isid_d; + __le32 itt; + __le32 reserved1; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le16 reserved2; + u8 status_detail; + u8 status_class; + __le32 reserved4[2]; +}; + +/* iSCSI Logout Response PDU header */ +struct iscsi_logout_response_hdr { + u8 reserved1; + u8 response; + u8 flags; + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 + __le32 reserved2[2]; + __le32 itt; + __le32 reserved3; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le32 reserved4; + __le16 time_2_retain; + __le16 time_2_wait; + __le32 reserved5[1]; +}; + +/* iSCSI Text Request PDU header */ +struct iscsi_text_request_hdr { + __le16 reserved0; + u8 flags_attr; +#define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK 0x3F +#define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT 0 +#define ISCSI_TEXT_REQUEST_HDR_C_MASK 0x1 +#define ISCSI_TEXT_REQUEST_HDR_C_SHIFT 6 +#define ISCSI_TEXT_REQUEST_HDR_F_MASK 0x1 +#define ISCSI_TEXT_REQUEST_HDR_F_SHIFT 7 + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair lun; + __le32 itt; + __le32 ttt; + __le32 cmd_sn; + __le32 exp_stat_sn; + __le32 reserved4[4]; +}; + +/* iSCSI Text Response PDU header */ +struct iscsi_text_response_hdr { + __le16 reserved1; + u8 flags; +#define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK 0x3F +#define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT 0 +#define ISCSI_TEXT_RESPONSE_HDR_C_MASK 0x1 +#define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT 6 +#define ISCSI_TEXT_RESPONSE_HDR_F_MASK 0x1 +#define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT 7 + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair lun; + __le32 itt; + __le32 ttt; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le32 reserved4[3]; +}; + +/* iSCSI TMF Request PDU header */ +struct iscsi_tmf_request_hdr { + __le16 reserved0; + u8 function; + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair lun; + __le32 itt; + __le32 rtt; + __le32 cmd_sn; + __le32 exp_stat_sn; + __le32 ref_cmd_sn; + __le32 exp_data_sn; + __le32 reserved4[2]; +}; + +struct iscsi_tmf_response_hdr { + u8 reserved2; + u8 hdr_response; + u8 hdr_flags; + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair reserved0; + __le32 itt; + __le32 reserved1; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le32 reserved4[3]; +}; + +/* iSCSI Response PDU header */ +struct iscsi_response_hdr { + u8 hdr_status; + u8 hdr_response; + u8 hdr_flags; + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair lun; + __le32 itt; + __le32 snack_tag; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le32 exp_data_sn; + __le32 bi_residual_count; + __le32 residual_count; +}; + +/* iSCSI Reject PDU header */ +struct iscsi_reject_hdr { + u8 reserved4; + u8 hdr_reason; + u8 hdr_flags; + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair reserved0; + __le32 all_ones; + __le32 reserved2; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le32 data_sn; + __le32 reserved3[2]; +}; + +/* iSCSI Asynchronous Message PDU header */ +struct iscsi_async_msg_hdr { + __le16 reserved0; + u8 flags_attr; +#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK 0x7F +#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT 0 +#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK 0x1 +#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT 7 + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair lun; + __le32 all_ones; + __le32 reserved1; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le16 param1_rsrv; + u8 async_vcode; + u8 async_event; + __le16 param3_rsrv; + __le16 param2_rsrv; + __le32 reserved7; +}; + +/* PDU header part of Ystorm task context */ +union iscsi_task_hdr { + struct iscsi_common_hdr common; + struct data_hdr data; + struct iscsi_cmd_hdr cmd; + struct iscsi_ext_cdb_cmd_hdr ext_cdb_cmd; + struct iscsi_login_req_hdr login_req; + struct iscsi_logout_req_hdr logout_req; + struct iscsi_data_out_hdr data_out; + struct iscsi_data_in_hdr data_in; + struct iscsi_r2t_hdr r2t; + struct iscsi_nop_out_hdr nop_out; + struct iscsi_nop_in_hdr nop_in; + struct iscsi_login_response_hdr login_response; + struct iscsi_logout_response_hdr logout_response; + struct iscsi_text_request_hdr text_request; + struct iscsi_text_response_hdr text_response; + struct iscsi_tmf_request_hdr tmf_request; + struct iscsi_tmf_response_hdr tmf_response; + struct iscsi_response_hdr response; + struct iscsi_reject_hdr reject; + struct iscsi_async_msg_hdr async_msg; +}; + +/* The iscsi storm task context of Ystorm */ +struct ystorm_iscsi_task_st_ctx { + struct ystorm_iscsi_task_state state; + struct ystorm_iscsi_task_rxmit_opt rxmit_opt; + union iscsi_task_hdr pdu_hdr; +}; + +struct e4_ystorm_iscsi_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 word0; + u8 flags0; +#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_MASK 0x1 /* bit3 */ +#define E4_YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_SHIFT 7 + u8 flags1; +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 byte2; + __le32 TTT; + u8 byte3; + u8 byte4; + __le16 word1; +}; + +struct e4_mstorm_iscsi_task_ag_ctx { + u8 cdu_validation; + u8 byte1; + __le16 task_cid; + u8 flags0; +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7 + u8 flags1; +#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 byte2; + __le32 reg0; + u8 byte3; + u8 byte4; + __le16 word1; +}; + +struct e4_ustorm_iscsi_task_ag_ctx { + u8 reserved; + u8 state; + __le16 icid; + u8 flags0; +#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5 +#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3 +#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6 + u8 flags1; +#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0 +#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3 +#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4 +#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 + u8 flags2; +#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0 +#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3 +#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7 + u8 flags3; +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3 +#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF +#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 + __le32 dif_err_intervals; + __le32 dif_error_1st_interval; + __le32 rcv_cont_len; + __le32 exp_cont_len; + __le32 total_data_acked; + __le32 exp_data_acked; + u8 next_tid_valid; + u8 byte3; + __le16 word1; + __le16 next_tid; + __le16 word3; + __le32 hdr_residual_count; + __le32 exp_r2t_sn; +}; + +/* The iscsi storm task context of Mstorm */ +struct mstorm_iscsi_task_st_ctx { + struct scsi_cached_sges data_desc; + struct scsi_sgl_params sgl_params; + __le32 rem_task_size; + __le32 data_buffer_offset; + u8 task_type; + struct iscsi_dif_flags dif_flags; + __le16 dif_task_icid; + struct regpair sense_db; + __le32 expected_itt; + __le32 reserved1; +}; + +struct iscsi_reg1 { + __le32 reg1_map; +#define ISCSI_REG1_NUM_SGES_MASK 0xF +#define ISCSI_REG1_NUM_SGES_SHIFT 0 +#define ISCSI_REG1_RESERVED1_MASK 0xFFFFFFF +#define ISCSI_REG1_RESERVED1_SHIFT 4 +}; + +struct tqe_opaque { + __le16 opaque[2]; +}; + +/* The iscsi storm task context of Ustorm */ +struct ustorm_iscsi_task_st_ctx { + __le32 rem_rcv_len; + __le32 exp_data_transfer_len; + __le32 exp_data_sn; + struct regpair lun; + struct iscsi_reg1 reg1; + u8 flags2; +#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0 +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1 + struct iscsi_dif_flags dif_flags; + __le16 reserved3; + struct tqe_opaque tqe_opaque_list; + __le32 reserved5; + __le32 reserved6; + __le32 reserved7; + u8 task_type; + u8 error_flags; +#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0 +#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1 +#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2 +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK 0x1F +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT 3 + u8 flags; +#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK 0x3 +#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT 0 +#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2 +#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3 +#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4 +#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5 +#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6 +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT 7 + u8 cq_rss_number; +}; + +/* iscsi task context */ +struct e4_iscsi_task_context { + struct ystorm_iscsi_task_st_ctx ystorm_st_context; + struct e4_ystorm_iscsi_task_ag_ctx ystorm_ag_context; + struct regpair ystorm_ag_padding[2]; + struct tdif_task_context tdif_context; + struct e4_mstorm_iscsi_task_ag_ctx mstorm_ag_context; + struct regpair mstorm_ag_padding[2]; + struct e4_ustorm_iscsi_task_ag_ctx ustorm_ag_context; + struct mstorm_iscsi_task_st_ctx mstorm_st_context; + struct ustorm_iscsi_task_st_ctx ustorm_st_context; + struct rdif_task_context rdif_context; +}; + +/* iSCSI connection offload params passed by driver to FW in ISCSI offload + * ramrod. + */ +struct iscsi_conn_offload_params { + struct regpair sq_pbl_addr; + struct regpair r2tq_pbl_addr; + struct regpair xhq_pbl_addr; + struct regpair uhq_pbl_addr; + __le32 initial_ack; + __le16 physical_q0; + __le16 physical_q1; + u8 flags; +#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1 +#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0 +#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1 +#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1 +#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1 +#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2 +#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F +#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3 + u8 pbl_page_size_log; + u8 pbe_page_size_log; + u8 default_cq; + __le32 stat_sn; +}; + +/* iSCSI connection statistics */ +struct iscsi_conn_stats_params { + struct regpair iscsi_tcp_tx_packets_cnt; + struct regpair iscsi_tcp_tx_bytes_cnt; + struct regpair iscsi_tcp_tx_rxmit_cnt; + struct regpair iscsi_tcp_rx_packets_cnt; + struct regpair iscsi_tcp_rx_bytes_cnt; + struct regpair iscsi_tcp_rx_dup_ack_cnt; + __le32 iscsi_tcp_rx_chksum_err_cnt; + __le32 reserved; +}; + +/* spe message header */ +struct iscsi_slow_path_hdr { + u8 op_code; + u8 flags; +#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK 0xF +#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT 0 +#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK 0x7 +#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4 +#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK 0x1 +#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT 7 +}; + +/* iSCSI connection update params passed by driver to FW in ISCSI update + *ramrod. + */ +struct iscsi_conn_update_ramrod_params { + struct iscsi_slow_path_hdr hdr; + __le16 conn_id; + __le32 fw_cid; + u8 flags; +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_SHIFT 6 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_SHIFT 7 + u8 reserved0[3]; + __le32 max_seq_size; + __le32 max_send_pdu_length; + __le32 max_recv_pdu_length; + __le32 first_seq_length; + __le32 exp_stat_sn; + union dif_configuration_params dif_on_imme_params; +}; + +/* iSCSI CQ element */ +struct iscsi_cqe_common { + __le16 conn_id; + u8 cqe_type; + union cqe_error_status error_bitmap; + __le32 reserved[3]; + union iscsi_task_hdr iscsi_hdr; +}; + +/* iSCSI CQ element */ +struct iscsi_cqe_solicited { + __le16 conn_id; + u8 cqe_type; + union cqe_error_status error_bitmap; + __le16 itid; + u8 task_type; + u8 fw_dbg_field; + u8 caused_conn_err; + u8 reserved0[3]; + __le32 data_truncated_bytes; + union iscsi_task_hdr iscsi_hdr; +}; + +/* iSCSI CQ element */ +struct iscsi_cqe_unsolicited { + __le16 conn_id; + u8 cqe_type; + union cqe_error_status error_bitmap; + __le16 reserved0; + u8 reserved1; + u8 unsol_cqe_type; + __le16 rqe_opaque; + __le16 reserved2[3]; + union iscsi_task_hdr iscsi_hdr; +}; + +/* iSCSI CQ element */ +union iscsi_cqe { + struct iscsi_cqe_common cqe_common; + struct iscsi_cqe_solicited cqe_solicited; + struct iscsi_cqe_unsolicited cqe_unsolicited; +}; + +/* iSCSI CQE type */ +enum iscsi_cqes_type { + ISCSI_CQE_TYPE_SOLICITED = 1, + ISCSI_CQE_TYPE_UNSOLICITED, + ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE, + ISCSI_CQE_TYPE_TASK_CLEANUP, + ISCSI_CQE_TYPE_DUMMY, + MAX_ISCSI_CQES_TYPE +}; + +/* iSCSI CQE type */ +enum iscsi_cqe_unsolicited_type { + ISCSI_CQE_UNSOLICITED_NONE, + ISCSI_CQE_UNSOLICITED_SINGLE, + ISCSI_CQE_UNSOLICITED_FIRST, + ISCSI_CQE_UNSOLICITED_MIDDLE, + ISCSI_CQE_UNSOLICITED_LAST, + MAX_ISCSI_CQE_UNSOLICITED_TYPE +}; + +/* iscsi debug modes */ +struct iscsi_debug_modes { + u8 flags; +#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5 +#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_SHIFT 6 +#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_SHIFT 7 +}; + +/* iSCSI kernel completion queue IDs */ +enum iscsi_eqe_opcode { + ISCSI_EVENT_TYPE_INIT_FUNC = 0, + ISCSI_EVENT_TYPE_DESTROY_FUNC, + ISCSI_EVENT_TYPE_OFFLOAD_CONN, + ISCSI_EVENT_TYPE_UPDATE_CONN, + ISCSI_EVENT_TYPE_CLEAR_SQ, + ISCSI_EVENT_TYPE_TERMINATE_CONN, + ISCSI_EVENT_TYPE_MAC_UPDATE_CONN, + ISCSI_EVENT_TYPE_COLLECT_STATS_CONN, + ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE, + ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE, + ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10, + ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD, + ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD, + ISCSI_EVENT_TYPE_ASYN_SYN_RCVD, + ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME, + ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT, + ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT, + ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2, + ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR, + ISCSI_EVENT_TYPE_TCP_CONN_ERROR, + MAX_ISCSI_EQE_OPCODE +}; + +/* iSCSI EQE and CQE completion status */ +enum iscsi_error_types { + ISCSI_STATUS_NONE = 0, + ISCSI_CQE_ERROR_UNSOLICITED_RCV_ON_INVALID_CONN = 1, + ISCSI_CONN_ERROR_TASK_CID_MISMATCH, + ISCSI_CONN_ERROR_TASK_NOT_VALID, + ISCSI_CONN_ERROR_RQ_RING_IS_FULL, + ISCSI_CONN_ERROR_CMDQ_RING_IS_FULL, + ISCSI_CONN_ERROR_HQE_CACHING_FAILED, + ISCSI_CONN_ERROR_HEADER_DIGEST_ERROR, + ISCSI_CONN_ERROR_LOCAL_COMPLETION_ERROR, + ISCSI_CONN_ERROR_DATA_OVERRUN, + ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR, + ISCSI_CONN_ERROR_IP_OPTIONS_ERROR, + ISCSI_CONN_ERROR_PRS_ERRORS, + ISCSI_CONN_ERROR_CONNECT_INVALID_TCP_OPTION, + ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR, + ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_LEN, + ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_TYPE, + ISCSI_CONN_ERROR_PROTOCOL_ERR_ITT_OUT_OF_RANGE, + ISCSI_CONN_ERROR_PROTOCOL_ERR_TTT_OUT_OF_RANGE, + ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_EXCEEDS_PDU_SIZE, + ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE, + ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE_BEFORE_UPDATE, + ISCSI_CONN_ERROR_UNVALID_NOPIN_DSL, + ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_CARRIES_NO_DATA, + ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SN, + ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_IN_TTT, + ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_OUT_ITT, + ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_TTT, + ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_BUFFER_OFFSET, + ISCSI_CONN_ERROR_PROTOCOL_ERR_BUFFER_OFFSET_OOO, + ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_SN, + ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0, + ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1, + ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_2, + ISCSI_CONN_ERROR_PROTOCOL_ERR_LUN, + ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO, + ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO_S_BIT_ONE, + ISCSI_CONN_ERROR_PROTOCOL_ERR_EXP_STAT_SN, + ISCSI_CONN_ERROR_PROTOCOL_ERR_DSL_NOT_ZERO, + ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_DSL, + ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG, + ISCSI_CONN_ERROR_PROTOCOL_ERR_OUTSTANDING_R2T_COUNT, + ISCSI_CONN_ERROR_PROTOCOL_ERR_DIF_TX, + ISCSI_CONN_ERROR_SENSE_DATA_LENGTH, + ISCSI_CONN_ERROR_DATA_PLACEMENT_ERROR, + ISCSI_CONN_ERROR_INVALID_ITT, + ISCSI_ERROR_UNKNOWN, + MAX_ISCSI_ERROR_TYPES +}; + +/* iSCSI Ramrod Command IDs */ +enum iscsi_ramrod_cmd_id { + ISCSI_RAMROD_CMD_ID_UNUSED = 0, + ISCSI_RAMROD_CMD_ID_INIT_FUNC = 1, + ISCSI_RAMROD_CMD_ID_DESTROY_FUNC = 2, + ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN = 3, + ISCSI_RAMROD_CMD_ID_UPDATE_CONN = 4, + ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5, + ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6, + ISCSI_RAMROD_CMD_ID_MAC_UPDATE = 7, + ISCSI_RAMROD_CMD_ID_CONN_STATS = 8, + MAX_ISCSI_RAMROD_CMD_ID +}; + +/* iSCSI connection termination request */ +struct iscsi_spe_conn_mac_update { + struct iscsi_slow_path_hdr hdr; + __le16 conn_id; + __le32 fw_cid; + __le16 remote_mac_addr_lo; + __le16 remote_mac_addr_mid; + __le16 remote_mac_addr_hi; + u8 reserved0[2]; +}; + +/* iSCSI and TCP connection (Option 1) offload params passed by driver to FW in + * iSCSI offload ramrod. + */ +struct iscsi_spe_conn_offload { + struct iscsi_slow_path_hdr hdr; + __le16 conn_id; + __le32 fw_cid; + struct iscsi_conn_offload_params iscsi; + struct tcp_offload_params tcp; +}; + +/* iSCSI and TCP connection(Option 2) offload params passed by driver to FW in + * iSCSI offload ramrod. + */ +struct iscsi_spe_conn_offload_option2 { + struct iscsi_slow_path_hdr hdr; + __le16 conn_id; + __le32 fw_cid; + struct iscsi_conn_offload_params iscsi; + struct tcp_offload_params_opt2 tcp; +}; + +/* iSCSI collect connection statistics request */ +struct iscsi_spe_conn_statistics { + struct iscsi_slow_path_hdr hdr; + __le16 conn_id; + __le32 fw_cid; + u8 reset_stats; + u8 reserved0[7]; + struct regpair stats_cnts_addr; +}; + +/* iSCSI connection termination request */ +struct iscsi_spe_conn_termination { + struct iscsi_slow_path_hdr hdr; + __le16 conn_id; + __le32 fw_cid; + u8 abortive; + u8 reserved0[7]; + struct regpair queue_cnts_addr; + struct regpair query_params_addr; +}; + +/* iSCSI firmware function destroy parameters */ +struct iscsi_spe_func_dstry { + struct iscsi_slow_path_hdr hdr; + __le16 reserved0; + __le32 reserved1; +}; + +/* iSCSI firmware function init parameters */ +struct iscsi_spe_func_init { + struct iscsi_slow_path_hdr hdr; + __le16 half_way_close_timeout; + u8 num_sq_pages_in_ring; + u8 num_r2tq_pages_in_ring; + u8 num_uhq_pages_in_ring; + u8 ll2_rx_queue_id; + u8 flags; +#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_MASK 0x1 +#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_SHIFT 0 +#define ISCSI_SPE_FUNC_INIT_RESERVED0_MASK 0x7F +#define ISCSI_SPE_FUNC_INIT_RESERVED0_SHIFT 1 + struct iscsi_debug_modes debug_mode; + __le16 reserved1; + __le32 reserved2; + struct scsi_init_func_params func_params; + struct scsi_init_func_queues q_params; +}; + +/* iSCSI task type */ +enum iscsi_task_type { + ISCSI_TASK_TYPE_INITIATOR_WRITE, + ISCSI_TASK_TYPE_INITIATOR_READ, + ISCSI_TASK_TYPE_MIDPATH, + ISCSI_TASK_TYPE_UNSOLIC, + ISCSI_TASK_TYPE_EXCHCLEANUP, + ISCSI_TASK_TYPE_IRRELEVANT, + ISCSI_TASK_TYPE_TARGET_WRITE, + ISCSI_TASK_TYPE_TARGET_READ, + ISCSI_TASK_TYPE_TARGET_RESPONSE, + ISCSI_TASK_TYPE_LOGIN_RESPONSE, + ISCSI_TASK_TYPE_TARGET_IMM_W_DIF, + MAX_ISCSI_TASK_TYPE +}; + +/* iSCSI DesiredDataTransferLength/ttt union */ +union iscsi_ttt_txlen_union { + __le32 desired_tx_len; + __le32 ttt; +}; + +/* iSCSI uHQ element */ +struct iscsi_uhqe { + __le32 reg1; +#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK 0xFFFFF +#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT 0 +#define ISCSI_UHQE_LOCAL_COMP_MASK 0x1 +#define ISCSI_UHQE_LOCAL_COMP_SHIFT 20 +#define ISCSI_UHQE_TOGGLE_BIT_MASK 0x1 +#define ISCSI_UHQE_TOGGLE_BIT_SHIFT 21 +#define ISCSI_UHQE_PURE_PAYLOAD_MASK 0x1 +#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT 22 +#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK 0x1 +#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT 23 +#define ISCSI_UHQE_TASK_ID_HI_MASK 0xFF +#define ISCSI_UHQE_TASK_ID_HI_SHIFT 24 + __le32 reg2; +#define ISCSI_UHQE_BUFFER_OFFSET_MASK 0xFFFFFF +#define ISCSI_UHQE_BUFFER_OFFSET_SHIFT 0 +#define ISCSI_UHQE_TASK_ID_LO_MASK 0xFF +#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24 +}; + +/* iSCSI WQ element */ +struct iscsi_wqe { + __le16 task_id; + u8 flags; +#define ISCSI_WQE_WQE_TYPE_MASK 0x7 +#define ISCSI_WQE_WQE_TYPE_SHIFT 0 +#define ISCSI_WQE_NUM_SGES_MASK 0xF +#define ISCSI_WQE_NUM_SGES_SHIFT 3 +#define ISCSI_WQE_RESPONSE_MASK 0x1 +#define ISCSI_WQE_RESPONSE_SHIFT 7 + struct iscsi_dif_flags prot_flags; + __le32 contlen_cdbsize; +#define ISCSI_WQE_CONT_LEN_MASK 0xFFFFFF +#define ISCSI_WQE_CONT_LEN_SHIFT 0 +#define ISCSI_WQE_CDB_SIZE_MASK 0xFF +#define ISCSI_WQE_CDB_SIZE_SHIFT 24 +}; + +/* iSCSI wqe type */ +enum iscsi_wqe_type { + ISCSI_WQE_TYPE_NORMAL, + ISCSI_WQE_TYPE_TASK_CLEANUP, + ISCSI_WQE_TYPE_MIDDLE_PATH, + ISCSI_WQE_TYPE_LOGIN, + ISCSI_WQE_TYPE_FIRST_R2T_CONT, + ISCSI_WQE_TYPE_NONFIRST_R2T_CONT, + ISCSI_WQE_TYPE_RESPONSE, + MAX_ISCSI_WQE_TYPE +}; + +/* iSCSI xHQ element */ +struct iscsi_xhqe { + union iscsi_ttt_txlen_union ttt_or_txlen; + __le32 exp_stat_sn; + struct iscsi_dif_flags prot_flags; + u8 total_ahs_length; + u8 opcode; + u8 flags; +#define ISCSI_XHQE_FINAL_MASK 0x1 +#define ISCSI_XHQE_FINAL_SHIFT 0 +#define ISCSI_XHQE_STATUS_BIT_MASK 0x1 +#define ISCSI_XHQE_STATUS_BIT_SHIFT 1 +#define ISCSI_XHQE_NUM_SGES_MASK 0xF +#define ISCSI_XHQE_NUM_SGES_SHIFT 2 +#define ISCSI_XHQE_RESERVED0_MASK 0x3 +#define ISCSI_XHQE_RESERVED0_SHIFT 6 + union iscsi_seq_num seq_num; + __le16 reserved1; +}; + +/* Per PF iSCSI receive path statistics - mStorm RAM structure */ +struct mstorm_iscsi_stats_drv { + struct regpair iscsi_rx_dropped_pdus_task_not_valid; + struct regpair iscsi_rx_dup_ack_cnt; +}; + +/* Per PF iSCSI transmit path statistics - pStorm RAM structure */ +struct pstorm_iscsi_stats_drv { + struct regpair iscsi_tx_bytes_cnt; + struct regpair iscsi_tx_packet_cnt; +}; + +/* Per PF iSCSI receive path statistics - tStorm RAM structure */ +struct tstorm_iscsi_stats_drv { + struct regpair iscsi_rx_bytes_cnt; + struct regpair iscsi_rx_packet_cnt; + struct regpair iscsi_rx_new_ooo_isle_events_cnt; + struct regpair iscsi_rx_tcp_payload_bytes_cnt; + struct regpair iscsi_rx_tcp_pkt_cnt; + struct regpair iscsi_rx_pure_ack_cnt; + __le32 iscsi_cmdq_threshold_cnt; + __le32 iscsi_rq_threshold_cnt; + __le32 iscsi_immq_threshold_cnt; +}; + +/* Per PF iSCSI receive path statistics - uStorm RAM structure */ +struct ustorm_iscsi_stats_drv { + struct regpair iscsi_rx_data_pdu_cnt; + struct regpair iscsi_rx_r2t_pdu_cnt; + struct regpair iscsi_rx_total_pdu_cnt; +}; + +/* Per PF iSCSI transmit path statistics - xStorm RAM structure */ +struct xstorm_iscsi_stats_drv { + struct regpair iscsi_tx_go_to_slow_start_event_cnt; + struct regpair iscsi_tx_fast_retransmit_event_cnt; + struct regpair iscsi_tx_pure_ack_cnt; + struct regpair iscsi_tx_delayed_ack_cnt; +}; + +/* Per PF iSCSI transmit path statistics - yStorm RAM structure */ +struct ystorm_iscsi_stats_drv { + struct regpair iscsi_tx_data_pdu_cnt; + struct regpair iscsi_tx_r2t_pdu_cnt; + struct regpair iscsi_tx_total_pdu_cnt; + struct regpair iscsi_tx_tcp_payload_bytes_cnt; + struct regpair iscsi_tx_tcp_pkt_cnt; +}; + +struct e4_tstorm_iscsi_task_ag_ctx { + u8 byte0; + u8 byte1; + __le16 word0; + u8 flags0; +#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 + u8 flags1; +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6 + u8 flags2; +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6 + u8 flags3; +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7 + u8 flags4; +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7 + u8 byte2; + __le16 word1; + __le32 reg0; + u8 byte3; + u8 byte4; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg1; + __le32 reg2; +}; + +/* iSCSI doorbell data */ +struct iscsi_db_data { + u8 params; +#define ISCSI_DB_DATA_DEST_MASK 0x3 +#define ISCSI_DB_DATA_DEST_SHIFT 0 +#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3 +#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2 +#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1 +#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4 +#define ISCSI_DB_DATA_RESERVED_MASK 0x1 +#define ISCSI_DB_DATA_RESERVED_SHIFT 5 +#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; + __le16 sq_prod; +}; + +#endif /* __ISCSI_COMMON__ */ diff --git a/include/linux/qed/iwarp_common.h b/include/linux/qed/iwarp_common.h new file mode 100644 index 000000000..c6cfd39cd --- /dev/null +++ b/include/linux/qed/iwarp_common.h @@ -0,0 +1,56 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __IWARP_COMMON__ +#define __IWARP_COMMON__ + +#include + +/************************/ +/* IWARP FW CONSTANTS */ +/************************/ + +#define IWARP_ACTIVE_MODE 0 +#define IWARP_PASSIVE_MODE 1 + +#define IWARP_SHARED_QUEUE_PAGE_SIZE (0x8000) +#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET (0x4000) +#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE (0x1000) +#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET (0x5000) +#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE (0x3000) + +#define IWARP_REQ_MAX_INLINE_DATA_SIZE (128) +#define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE (176) + +#define IWARP_MAX_QPS (64 * 1024) + +#endif /* __IWARP_COMMON__ */ diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h new file mode 100644 index 000000000..6d15040c6 --- /dev/null +++ b/include/linux/qed/qed_chain.h @@ -0,0 +1,735 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _QED_CHAIN_H +#define _QED_CHAIN_H + +#include +#include +#include +#include +#include +#include + +enum qed_chain_mode { + /* Each Page contains a next pointer at its end */ + QED_CHAIN_MODE_NEXT_PTR, + + /* Chain is a single page (next ptr) is unrequired */ + QED_CHAIN_MODE_SINGLE, + + /* Page pointers are located in a side list */ + QED_CHAIN_MODE_PBL, +}; + +enum qed_chain_use_mode { + QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */ + QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */ + QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */ +}; + +enum qed_chain_cnt_type { + /* The chain's size/prod/cons are kept in 16-bit variables */ + QED_CHAIN_CNT_TYPE_U16, + + /* The chain's size/prod/cons are kept in 32-bit variables */ + QED_CHAIN_CNT_TYPE_U32, +}; + +struct qed_chain_next { + struct regpair next_phys; + void *next_virt; +}; + +struct qed_chain_pbl_u16 { + u16 prod_page_idx; + u16 cons_page_idx; +}; + +struct qed_chain_pbl_u32 { + u32 prod_page_idx; + u32 cons_page_idx; +}; + +struct qed_chain_ext_pbl { + dma_addr_t p_pbl_phys; + void *p_pbl_virt; +}; + +struct qed_chain_u16 { + /* Cyclic index of next element to produce/consme */ + u16 prod_idx; + u16 cons_idx; +}; + +struct qed_chain_u32 { + /* Cyclic index of next element to produce/consme */ + u32 prod_idx; + u32 cons_idx; +}; + +struct addr_tbl_entry { + void *virt_addr; + dma_addr_t dma_map; +}; + +struct qed_chain { + /* fastpath portion of the chain - required for commands such + * as produce / consume. + */ + /* Point to next element to produce/consume */ + void *p_prod_elem; + void *p_cons_elem; + + /* Fastpath portions of the PBL [if exists] */ + struct { + /* Table for keeping the virtual and physical addresses of the + * chain pages, respectively to the physical addresses + * in the pbl table. + */ + struct addr_tbl_entry *pp_addr_tbl; + + union { + struct qed_chain_pbl_u16 u16; + struct qed_chain_pbl_u32 u32; + } c; + } pbl; + + union { + struct qed_chain_u16 chain16; + struct qed_chain_u32 chain32; + } u; + + /* Capacity counts only usable elements */ + u32 capacity; + u32 page_cnt; + + enum qed_chain_mode mode; + + /* Elements information for fast calculations */ + u16 elem_per_page; + u16 elem_per_page_mask; + u16 elem_size; + u16 next_page_mask; + u16 usable_per_page; + u8 elem_unusable; + + u8 cnt_type; + + /* Slowpath of the chain - required for initialization and destruction, + * but isn't involved in regular functionality. + */ + + /* Base address of a pre-allocated buffer for pbl */ + struct { + dma_addr_t p_phys_table; + void *p_virt_table; + } pbl_sp; + + /* Address of first page of the chain - the address is required + * for fastpath operation [consume/produce] but only for the the SINGLE + * flavour which isn't considered fastpath [== SPQ]. + */ + void *p_virt_addr; + dma_addr_t p_phys_addr; + + /* Total number of elements [for entire chain] */ + u32 size; + + u8 intended_use; + + bool b_external_pbl; +}; + +#define QED_CHAIN_PBL_ENTRY_SIZE (8) +#define QED_CHAIN_PAGE_SIZE (0x1000) +#define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size)) + +#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \ + (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \ + (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / \ + (elem_size))) : 0) + +#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \ + ((u32)(ELEMS_PER_PAGE(elem_size) - \ + UNUSABLE_ELEMS_PER_PAGE(elem_size, mode))) + +#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \ + DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode)) + +#define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16) +#define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32) + +/* Accessors */ +static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain) +{ + return p_chain->u.chain16.prod_idx; +} + +static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain) +{ + return p_chain->u.chain16.cons_idx; +} + +static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain) +{ + return p_chain->u.chain32.cons_idx; +} + +static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain) +{ + u16 elem_per_page = p_chain->elem_per_page; + u32 prod = p_chain->u.chain16.prod_idx; + u32 cons = p_chain->u.chain16.cons_idx; + u16 used; + + if (prod < cons) + prod += (u32)U16_MAX + 1; + + used = (u16)(prod - cons); + if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) + used -= prod / elem_per_page - cons / elem_per_page; + + return (u16)(p_chain->capacity - used); +} + +static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain) +{ + u16 elem_per_page = p_chain->elem_per_page; + u64 prod = p_chain->u.chain32.prod_idx; + u64 cons = p_chain->u.chain32.cons_idx; + u32 used; + + if (prod < cons) + prod += (u64)U32_MAX + 1; + + used = (u32)(prod - cons); + if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) + used -= (u32)(prod / elem_per_page - cons / elem_per_page); + + return p_chain->capacity - used; +} + +static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain) +{ + return p_chain->usable_per_page; +} + +static inline u8 qed_chain_get_unusable_per_page(struct qed_chain *p_chain) +{ + return p_chain->elem_unusable; +} + +static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain) +{ + return p_chain->page_cnt; +} + +static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain) +{ + return p_chain->pbl_sp.p_phys_table; +} + +/** + * @brief qed_chain_advance_page - + * + * Advance the next element accros pages for a linked chain + * + * @param p_chain + * @param p_next_elem + * @param idx_to_inc + * @param page_to_inc + */ +static inline void +qed_chain_advance_page(struct qed_chain *p_chain, + void **p_next_elem, void *idx_to_inc, void *page_to_inc) +{ + struct qed_chain_next *p_next = NULL; + u32 page_index = 0; + + switch (p_chain->mode) { + case QED_CHAIN_MODE_NEXT_PTR: + p_next = *p_next_elem; + *p_next_elem = p_next->next_virt; + if (is_chain_u16(p_chain)) + *(u16 *)idx_to_inc += p_chain->elem_unusable; + else + *(u32 *)idx_to_inc += p_chain->elem_unusable; + break; + case QED_CHAIN_MODE_SINGLE: + *p_next_elem = p_chain->p_virt_addr; + break; + + case QED_CHAIN_MODE_PBL: + if (is_chain_u16(p_chain)) { + if (++(*(u16 *)page_to_inc) == p_chain->page_cnt) + *(u16 *)page_to_inc = 0; + page_index = *(u16 *)page_to_inc; + } else { + if (++(*(u32 *)page_to_inc) == p_chain->page_cnt) + *(u32 *)page_to_inc = 0; + page_index = *(u32 *)page_to_inc; + } + *p_next_elem = p_chain->pbl.pp_addr_tbl[page_index].virt_addr; + } +} + +#define is_unusable_idx(p, idx) \ + (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page) + +#define is_unusable_idx_u32(p, idx) \ + (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page) +#define is_unusable_next_idx(p, idx) \ + ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \ + (p)->usable_per_page) + +#define is_unusable_next_idx_u32(p, idx) \ + ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) == \ + (p)->usable_per_page) + +#define test_and_skip(p, idx) \ + do { \ + if (is_chain_u16(p)) { \ + if (is_unusable_idx(p, idx)) \ + (p)->u.chain16.idx += (p)->elem_unusable; \ + } else { \ + if (is_unusable_idx_u32(p, idx)) \ + (p)->u.chain32.idx += (p)->elem_unusable; \ + } \ + } while (0) + +/** + * @brief qed_chain_return_produced - + * + * A chain in which the driver "Produces" elements should use this API + * to indicate previous produced elements are now consumed. + * + * @param p_chain + */ +static inline void qed_chain_return_produced(struct qed_chain *p_chain) +{ + if (is_chain_u16(p_chain)) + p_chain->u.chain16.cons_idx++; + else + p_chain->u.chain32.cons_idx++; + test_and_skip(p_chain, cons_idx); +} + +/** + * @brief qed_chain_produce - + * + * A chain in which the driver "Produces" elements should use this to get + * a pointer to the next element which can be "Produced". It's driver + * responsibility to validate that the chain has room for new element. + * + * @param p_chain + * + * @return void*, a pointer to next element + */ +static inline void *qed_chain_produce(struct qed_chain *p_chain) +{ + void *p_ret = NULL, *p_prod_idx, *p_prod_page_idx; + + if (is_chain_u16(p_chain)) { + if ((p_chain->u.chain16.prod_idx & + p_chain->elem_per_page_mask) == p_chain->next_page_mask) { + p_prod_idx = &p_chain->u.chain16.prod_idx; + p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx; + qed_chain_advance_page(p_chain, &p_chain->p_prod_elem, + p_prod_idx, p_prod_page_idx); + } + p_chain->u.chain16.prod_idx++; + } else { + if ((p_chain->u.chain32.prod_idx & + p_chain->elem_per_page_mask) == p_chain->next_page_mask) { + p_prod_idx = &p_chain->u.chain32.prod_idx; + p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx; + qed_chain_advance_page(p_chain, &p_chain->p_prod_elem, + p_prod_idx, p_prod_page_idx); + } + p_chain->u.chain32.prod_idx++; + } + + p_ret = p_chain->p_prod_elem; + p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) + + p_chain->elem_size); + + return p_ret; +} + +/** + * @brief qed_chain_get_capacity - + * + * Get the maximum number of BDs in chain + * + * @param p_chain + * @param num + * + * @return number of unusable BDs + */ +static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain) +{ + return p_chain->capacity; +} + +/** + * @brief qed_chain_recycle_consumed - + * + * Returns an element which was previously consumed; + * Increments producers so they could be written to FW. + * + * @param p_chain + */ +static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain) +{ + test_and_skip(p_chain, prod_idx); + if (is_chain_u16(p_chain)) + p_chain->u.chain16.prod_idx++; + else + p_chain->u.chain32.prod_idx++; +} + +/** + * @brief qed_chain_consume - + * + * A Chain in which the driver utilizes data written by a different source + * (i.e., FW) should use this to access passed buffers. + * + * @param p_chain + * + * @return void*, a pointer to the next buffer written + */ +static inline void *qed_chain_consume(struct qed_chain *p_chain) +{ + void *p_ret = NULL, *p_cons_idx, *p_cons_page_idx; + + if (is_chain_u16(p_chain)) { + if ((p_chain->u.chain16.cons_idx & + p_chain->elem_per_page_mask) == p_chain->next_page_mask) { + p_cons_idx = &p_chain->u.chain16.cons_idx; + p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx; + qed_chain_advance_page(p_chain, &p_chain->p_cons_elem, + p_cons_idx, p_cons_page_idx); + } + p_chain->u.chain16.cons_idx++; + } else { + if ((p_chain->u.chain32.cons_idx & + p_chain->elem_per_page_mask) == p_chain->next_page_mask) { + p_cons_idx = &p_chain->u.chain32.cons_idx; + p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx; + qed_chain_advance_page(p_chain, &p_chain->p_cons_elem, + p_cons_idx, p_cons_page_idx); + } + p_chain->u.chain32.cons_idx++; + } + + p_ret = p_chain->p_cons_elem; + p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) + + p_chain->elem_size); + + return p_ret; +} + +/** + * @brief qed_chain_reset - Resets the chain to its start state + * + * @param p_chain pointer to a previously allocted chain + */ +static inline void qed_chain_reset(struct qed_chain *p_chain) +{ + u32 i; + + if (is_chain_u16(p_chain)) { + p_chain->u.chain16.prod_idx = 0; + p_chain->u.chain16.cons_idx = 0; + } else { + p_chain->u.chain32.prod_idx = 0; + p_chain->u.chain32.cons_idx = 0; + } + p_chain->p_cons_elem = p_chain->p_virt_addr; + p_chain->p_prod_elem = p_chain->p_virt_addr; + + if (p_chain->mode == QED_CHAIN_MODE_PBL) { + /* Use (page_cnt - 1) as a reset value for the prod/cons page's + * indices, to avoid unnecessary page advancing on the first + * call to qed_chain_produce/consume. Instead, the indices + * will be advanced to page_cnt and then will be wrapped to 0. + */ + u32 reset_val = p_chain->page_cnt - 1; + + if (is_chain_u16(p_chain)) { + p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val; + p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val; + } else { + p_chain->pbl.c.u32.prod_page_idx = reset_val; + p_chain->pbl.c.u32.cons_page_idx = reset_val; + } + } + + switch (p_chain->intended_use) { + case QED_CHAIN_USE_TO_CONSUME: + /* produce empty elements */ + for (i = 0; i < p_chain->capacity; i++) + qed_chain_recycle_consumed(p_chain); + break; + + case QED_CHAIN_USE_TO_CONSUME_PRODUCE: + case QED_CHAIN_USE_TO_PRODUCE: + default: + /* Do nothing */ + break; + } +} + +/** + * @brief qed_chain_init - Initalizes a basic chain struct + * + * @param p_chain + * @param p_virt_addr + * @param p_phys_addr physical address of allocated buffer's beginning + * @param page_cnt number of pages in the allocated buffer + * @param elem_size size of each element in the chain + * @param intended_use + * @param mode + */ +static inline void qed_chain_init_params(struct qed_chain *p_chain, + u32 page_cnt, + u8 elem_size, + enum qed_chain_use_mode intended_use, + enum qed_chain_mode mode, + enum qed_chain_cnt_type cnt_type) +{ + /* chain fixed parameters */ + p_chain->p_virt_addr = NULL; + p_chain->p_phys_addr = 0; + p_chain->elem_size = elem_size; + p_chain->intended_use = (u8)intended_use; + p_chain->mode = mode; + p_chain->cnt_type = (u8)cnt_type; + + p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size); + p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode); + p_chain->elem_per_page_mask = p_chain->elem_per_page - 1; + p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode); + p_chain->next_page_mask = (p_chain->usable_per_page & + p_chain->elem_per_page_mask); + + p_chain->page_cnt = page_cnt; + p_chain->capacity = p_chain->usable_per_page * page_cnt; + p_chain->size = p_chain->elem_per_page * page_cnt; + + p_chain->pbl_sp.p_phys_table = 0; + p_chain->pbl_sp.p_virt_table = NULL; + p_chain->pbl.pp_addr_tbl = NULL; +} + +/** + * @brief qed_chain_init_mem - + * + * Initalizes a basic chain struct with its chain buffers + * + * @param p_chain + * @param p_virt_addr virtual address of allocated buffer's beginning + * @param p_phys_addr physical address of allocated buffer's beginning + * + */ +static inline void qed_chain_init_mem(struct qed_chain *p_chain, + void *p_virt_addr, dma_addr_t p_phys_addr) +{ + p_chain->p_virt_addr = p_virt_addr; + p_chain->p_phys_addr = p_phys_addr; +} + +/** + * @brief qed_chain_init_pbl_mem - + * + * Initalizes a basic chain struct with its pbl buffers + * + * @param p_chain + * @param p_virt_pbl pointer to a pre allocated side table which will hold + * virtual page addresses. + * @param p_phys_pbl pointer to a pre-allocated side table which will hold + * physical page addresses. + * @param pp_virt_addr_tbl + * pointer to a pre-allocated side table which will hold + * the virtual addresses of the chain pages. + * + */ +static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain, + void *p_virt_pbl, + dma_addr_t p_phys_pbl, + struct addr_tbl_entry *pp_addr_tbl) +{ + p_chain->pbl_sp.p_phys_table = p_phys_pbl; + p_chain->pbl_sp.p_virt_table = p_virt_pbl; + p_chain->pbl.pp_addr_tbl = pp_addr_tbl; +} + +/** + * @brief qed_chain_init_next_ptr_elem - + * + * Initalizes a next pointer element + * + * @param p_chain + * @param p_virt_curr virtual address of a chain page of which the next + * pointer element is initialized + * @param p_virt_next virtual address of the next chain page + * @param p_phys_next physical address of the next chain page + * + */ +static inline void +qed_chain_init_next_ptr_elem(struct qed_chain *p_chain, + void *p_virt_curr, + void *p_virt_next, dma_addr_t p_phys_next) +{ + struct qed_chain_next *p_next; + u32 size; + + size = p_chain->elem_size * p_chain->usable_per_page; + p_next = (struct qed_chain_next *)((u8 *)p_virt_curr + size); + + DMA_REGPAIR_LE(p_next->next_phys, p_phys_next); + + p_next->next_virt = p_virt_next; +} + +/** + * @brief qed_chain_get_last_elem - + * + * Returns a pointer to the last element of the chain + * + * @param p_chain + * + * @return void* + */ +static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain) +{ + struct qed_chain_next *p_next = NULL; + void *p_virt_addr = NULL; + u32 size, last_page_idx; + + if (!p_chain->p_virt_addr) + goto out; + + switch (p_chain->mode) { + case QED_CHAIN_MODE_NEXT_PTR: + size = p_chain->elem_size * p_chain->usable_per_page; + p_virt_addr = p_chain->p_virt_addr; + p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + size); + while (p_next->next_virt != p_chain->p_virt_addr) { + p_virt_addr = p_next->next_virt; + p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + + size); + } + break; + case QED_CHAIN_MODE_SINGLE: + p_virt_addr = p_chain->p_virt_addr; + break; + case QED_CHAIN_MODE_PBL: + last_page_idx = p_chain->page_cnt - 1; + p_virt_addr = p_chain->pbl.pp_addr_tbl[last_page_idx].virt_addr; + break; + } + /* p_virt_addr points at this stage to the last page of the chain */ + size = p_chain->elem_size * (p_chain->usable_per_page - 1); + p_virt_addr = (u8 *)p_virt_addr + size; +out: + return p_virt_addr; +} + +/** + * @brief qed_chain_set_prod - sets the prod to the given value + * + * @param prod_idx + * @param p_prod_elem + */ +static inline void qed_chain_set_prod(struct qed_chain *p_chain, + u32 prod_idx, void *p_prod_elem) +{ + if (p_chain->mode == QED_CHAIN_MODE_PBL) { + u32 cur_prod, page_mask, page_cnt, page_diff; + + cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx : + p_chain->u.chain32.prod_idx; + + /* Assume that number of elements in a page is power of 2 */ + page_mask = ~p_chain->elem_per_page_mask; + + /* Use "cur_prod - 1" and "prod_idx - 1" since producer index + * reaches the first element of next page before the page index + * is incremented. See qed_chain_produce(). + * Index wrap around is not a problem because the difference + * between current and given producer indices is always + * positive and lower than the chain's capacity. + */ + page_diff = (((cur_prod - 1) & page_mask) - + ((prod_idx - 1) & page_mask)) / + p_chain->elem_per_page; + + page_cnt = qed_chain_get_page_cnt(p_chain); + if (is_chain_u16(p_chain)) + p_chain->pbl.c.u16.prod_page_idx = + (p_chain->pbl.c.u16.prod_page_idx - + page_diff + page_cnt) % page_cnt; + else + p_chain->pbl.c.u32.prod_page_idx = + (p_chain->pbl.c.u32.prod_page_idx - + page_diff + page_cnt) % page_cnt; + } + + if (is_chain_u16(p_chain)) + p_chain->u.chain16.prod_idx = (u16) prod_idx; + else + p_chain->u.chain32.prod_idx = prod_idx; + p_chain->p_prod_elem = p_prod_elem; +} + +/** + * @brief qed_chain_pbl_zero_mem - set chain memory to 0 + * + * @param p_chain + */ +static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain) +{ + u32 i, page_cnt; + + if (p_chain->mode != QED_CHAIN_MODE_PBL) + return; + + page_cnt = qed_chain_get_page_cnt(p_chain); + + for (i = 0; i < page_cnt; i++) + memset(p_chain->pbl.pp_addr_tbl[i].virt_addr, 0, + QED_CHAIN_PAGE_SIZE); +} + +#endif diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h new file mode 100644 index 000000000..a1310482c --- /dev/null +++ b/include/linux/qed/qed_eth_if.h @@ -0,0 +1,371 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _QED_ETH_IF_H +#define _QED_ETH_IF_H + +#include +#include +#include +#include +#include + +/* 64 max queues * (1 rx + 4 tx-cos + 1 xdp) */ +#define QED_MIN_L2_CONS (2 + NUM_PHYS_TCS_4PORT_K2) +#define QED_MAX_L2_CONS (64 * (QED_MIN_L2_CONS)) + +struct qed_queue_start_common_params { + /* Should always be relative to entity sending this. */ + u8 vport_id; + u16 queue_id; + + /* Relative, but relevant only for PFs */ + u8 stats_id; + + struct qed_sb_info *p_sb; + u8 sb_idx; + + u8 tc; +}; + +struct qed_rxq_start_ret_params { + void __iomem *p_prod; + void *p_handle; +}; + +struct qed_txq_start_ret_params { + void __iomem *p_doorbell; + void *p_handle; +}; + +enum qed_filter_config_mode { + QED_FILTER_CONFIG_MODE_DISABLE, + QED_FILTER_CONFIG_MODE_5_TUPLE, + QED_FILTER_CONFIG_MODE_L4_PORT, + QED_FILTER_CONFIG_MODE_IP_DEST, + QED_FILTER_CONFIG_MODE_IP_SRC, +}; + +struct qed_ntuple_filter_params { + /* Physically mapped address containing header of buffer to be used + * as filter. + */ + dma_addr_t addr; + + /* Length of header in bytes */ + u16 length; + + /* Relative queue-id to receive classified packet */ +#define QED_RFS_NTUPLE_QID_RSS ((u16)-1) + u16 qid; + + /* Identifier can either be according to vport-id or vfid */ + bool b_is_vf; + u8 vport_id; + u8 vf_id; + + /* true iff this filter is to be added. Else to be removed */ + bool b_is_add; + + /* If flow needs to be dropped */ + bool b_is_drop; +}; + +struct qed_dev_eth_info { + struct qed_dev_info common; + + u8 num_queues; + u8 num_tc; + + u8 port_mac[ETH_ALEN]; + u16 num_vlan_filters; + u16 num_mac_filters; + + /* Legacy VF - this affects the datapath, so qede has to know */ + bool is_legacy; + + /* Might depend on available resources [in case of VF] */ + bool xdp_supported; +}; + +struct qed_update_vport_rss_params { + void *rss_ind_table[128]; + u32 rss_key[10]; + u8 rss_caps; +}; + +struct qed_update_vport_params { + u8 vport_id; + u8 update_vport_active_flg; + u8 vport_active_flg; + u8 update_tx_switching_flg; + u8 tx_switching_flg; + u8 update_accept_any_vlan_flg; + u8 accept_any_vlan; + u8 update_rss_flg; + struct qed_update_vport_rss_params rss_params; +}; + +struct qed_start_vport_params { + bool remove_inner_vlan; + bool handle_ptp_pkts; + bool gro_enable; + bool drop_ttl0; + u8 vport_id; + u16 mtu; + bool clear_stats; +}; + +enum qed_filter_rx_mode_type { + QED_FILTER_RX_MODE_TYPE_REGULAR, + QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC, + QED_FILTER_RX_MODE_TYPE_PROMISC, +}; + +enum qed_filter_xcast_params_type { + QED_FILTER_XCAST_TYPE_ADD, + QED_FILTER_XCAST_TYPE_DEL, + QED_FILTER_XCAST_TYPE_REPLACE, +}; + +struct qed_filter_ucast_params { + enum qed_filter_xcast_params_type type; + u8 vlan_valid; + u16 vlan; + u8 mac_valid; + unsigned char mac[ETH_ALEN]; +}; + +struct qed_filter_mcast_params { + enum qed_filter_xcast_params_type type; + u8 num; + unsigned char mac[64][ETH_ALEN]; +}; + +union qed_filter_type_params { + enum qed_filter_rx_mode_type accept_flags; + struct qed_filter_ucast_params ucast; + struct qed_filter_mcast_params mcast; +}; + +enum qed_filter_type { + QED_FILTER_TYPE_UCAST, + QED_FILTER_TYPE_MCAST, + QED_FILTER_TYPE_RX_MODE, + QED_MAX_FILTER_TYPES, +}; + +struct qed_filter_params { + enum qed_filter_type type; + union qed_filter_type_params filter; +}; + +struct qed_tunn_params { + u16 vxlan_port; + u8 update_vxlan_port; + u16 geneve_port; + u8 update_geneve_port; +}; + +struct qed_eth_cb_ops { + struct qed_common_cb_ops common; + void (*force_mac) (void *dev, u8 *mac, bool forced); + void (*ports_update)(void *dev, u16 vxlan_port, u16 geneve_port); +}; + +#define QED_MAX_PHC_DRIFT_PPB 291666666 + +enum qed_ptp_filter_type { + QED_PTP_FILTER_NONE, + QED_PTP_FILTER_ALL, + QED_PTP_FILTER_V1_L4_EVENT, + QED_PTP_FILTER_V1_L4_GEN, + QED_PTP_FILTER_V2_L4_EVENT, + QED_PTP_FILTER_V2_L4_GEN, + QED_PTP_FILTER_V2_L2_EVENT, + QED_PTP_FILTER_V2_L2_GEN, + QED_PTP_FILTER_V2_EVENT, + QED_PTP_FILTER_V2_GEN +}; + +enum qed_ptp_hwtstamp_tx_type { + QED_PTP_HWTSTAMP_TX_OFF, + QED_PTP_HWTSTAMP_TX_ON, +}; + +#ifdef CONFIG_DCB +/* Prototype declaration of qed_eth_dcbnl_ops should match with the declaration + * of dcbnl_rtnl_ops structure. + */ +struct qed_eth_dcbnl_ops { + /* IEEE 802.1Qaz std */ + int (*ieee_getpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc); + int (*ieee_setpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc); + int (*ieee_getets)(struct qed_dev *cdev, struct ieee_ets *ets); + int (*ieee_setets)(struct qed_dev *cdev, struct ieee_ets *ets); + int (*ieee_peer_getets)(struct qed_dev *cdev, struct ieee_ets *ets); + int (*ieee_peer_getpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc); + int (*ieee_getapp)(struct qed_dev *cdev, struct dcb_app *app); + int (*ieee_setapp)(struct qed_dev *cdev, struct dcb_app *app); + + /* CEE std */ + u8 (*getstate)(struct qed_dev *cdev); + u8 (*setstate)(struct qed_dev *cdev, u8 state); + void (*getpgtccfgtx)(struct qed_dev *cdev, int prio, u8 *prio_type, + u8 *pgid, u8 *bw_pct, u8 *up_map); + void (*getpgbwgcfgtx)(struct qed_dev *cdev, int pgid, u8 *bw_pct); + void (*getpgtccfgrx)(struct qed_dev *cdev, int prio, u8 *prio_type, + u8 *pgid, u8 *bw_pct, u8 *up_map); + void (*getpgbwgcfgrx)(struct qed_dev *cdev, int pgid, u8 *bw_pct); + void (*getpfccfg)(struct qed_dev *cdev, int prio, u8 *setting); + void (*setpfccfg)(struct qed_dev *cdev, int prio, u8 setting); + u8 (*getcap)(struct qed_dev *cdev, int capid, u8 *cap); + int (*getnumtcs)(struct qed_dev *cdev, int tcid, u8 *num); + u8 (*getpfcstate)(struct qed_dev *cdev); + int (*getapp)(struct qed_dev *cdev, u8 idtype, u16 id); + u8 (*getfeatcfg)(struct qed_dev *cdev, int featid, u8 *flags); + + /* DCBX configuration */ + u8 (*getdcbx)(struct qed_dev *cdev); + void (*setpgtccfgtx)(struct qed_dev *cdev, int prio, + u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map); + void (*setpgtccfgrx)(struct qed_dev *cdev, int prio, + u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map); + void (*setpgbwgcfgtx)(struct qed_dev *cdev, int pgid, u8 bw_pct); + void (*setpgbwgcfgrx)(struct qed_dev *cdev, int pgid, u8 bw_pct); + u8 (*setall)(struct qed_dev *cdev); + int (*setnumtcs)(struct qed_dev *cdev, int tcid, u8 num); + void (*setpfcstate)(struct qed_dev *cdev, u8 state); + int (*setapp)(struct qed_dev *cdev, u8 idtype, u16 idval, u8 up); + u8 (*setdcbx)(struct qed_dev *cdev, u8 state); + u8 (*setfeatcfg)(struct qed_dev *cdev, int featid, u8 flags); + + /* Peer apps */ + int (*peer_getappinfo)(struct qed_dev *cdev, + struct dcb_peer_app_info *info, + u16 *app_count); + int (*peer_getapptable)(struct qed_dev *cdev, struct dcb_app *table); + + /* CEE peer */ + int (*cee_peer_getpfc)(struct qed_dev *cdev, struct cee_pfc *pfc); + int (*cee_peer_getpg)(struct qed_dev *cdev, struct cee_pg *pg); +}; +#endif + +struct qed_eth_ptp_ops { + int (*cfg_filters)(struct qed_dev *, enum qed_ptp_filter_type, + enum qed_ptp_hwtstamp_tx_type); + int (*read_rx_ts)(struct qed_dev *, u64 *); + int (*read_tx_ts)(struct qed_dev *, u64 *); + int (*read_cc)(struct qed_dev *, u64 *); + int (*disable)(struct qed_dev *); + int (*adjfreq)(struct qed_dev *, s32); + int (*enable)(struct qed_dev *); +}; + +struct qed_eth_ops { + const struct qed_common_ops *common; +#ifdef CONFIG_QED_SRIOV + const struct qed_iov_hv_ops *iov; +#endif +#ifdef CONFIG_DCB + const struct qed_eth_dcbnl_ops *dcb; +#endif + const struct qed_eth_ptp_ops *ptp; + + int (*fill_dev_info)(struct qed_dev *cdev, + struct qed_dev_eth_info *info); + + void (*register_ops)(struct qed_dev *cdev, + struct qed_eth_cb_ops *ops, + void *cookie); + + bool(*check_mac) (struct qed_dev *cdev, u8 *mac); + + int (*vport_start)(struct qed_dev *cdev, + struct qed_start_vport_params *params); + + int (*vport_stop)(struct qed_dev *cdev, + u8 vport_id); + + int (*vport_update)(struct qed_dev *cdev, + struct qed_update_vport_params *params); + + int (*q_rx_start)(struct qed_dev *cdev, + u8 rss_num, + struct qed_queue_start_common_params *params, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, + struct qed_rxq_start_ret_params *ret_params); + + int (*q_rx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle); + + int (*q_tx_start)(struct qed_dev *cdev, + u8 rss_num, + struct qed_queue_start_common_params *params, + dma_addr_t pbl_addr, + u16 pbl_size, + struct qed_txq_start_ret_params *ret_params); + + int (*q_tx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle); + + int (*filter_config)(struct qed_dev *cdev, + struct qed_filter_params *params); + + int (*fastpath_stop)(struct qed_dev *cdev); + + int (*eth_cqe_completion)(struct qed_dev *cdev, + u8 rss_id, + struct eth_slow_path_rx_cqe *cqe); + + void (*get_vport_stats)(struct qed_dev *cdev, + struct qed_eth_stats *stats); + + int (*tunn_config)(struct qed_dev *cdev, + struct qed_tunn_params *params); + + int (*ntuple_filter_config)(struct qed_dev *cdev, + void *cookie, + struct qed_ntuple_filter_params *params); + + int (*configure_arfs_searcher)(struct qed_dev *cdev, + enum qed_filter_config_mode mode); + int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle); + int (*req_bulletin_update_mac)(struct qed_dev *cdev, u8 *mac); +}; + +const struct qed_eth_ops *qed_get_eth_ops(void); +void qed_put_eth_ops(void); + +#endif diff --git a/include/linux/qed/qed_fcoe_if.h b/include/linux/qed/qed_fcoe_if.h new file mode 100644 index 000000000..46082480a --- /dev/null +++ b/include/linux/qed/qed_fcoe_if.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _QED_FCOE_IF_H +#define _QED_FCOE_IF_H +#include +#include +struct qed_fcoe_stats { + u64 fcoe_rx_byte_cnt; + u64 fcoe_rx_data_pkt_cnt; + u64 fcoe_rx_xfer_pkt_cnt; + u64 fcoe_rx_other_pkt_cnt; + u32 fcoe_silent_drop_pkt_cmdq_full_cnt; + u32 fcoe_silent_drop_pkt_rq_full_cnt; + u32 fcoe_silent_drop_pkt_crc_error_cnt; + u32 fcoe_silent_drop_pkt_task_invalid_cnt; + u32 fcoe_silent_drop_total_pkt_cnt; + + u64 fcoe_tx_byte_cnt; + u64 fcoe_tx_data_pkt_cnt; + u64 fcoe_tx_xfer_pkt_cnt; + u64 fcoe_tx_other_pkt_cnt; +}; + +struct qed_dev_fcoe_info { + struct qed_dev_info common; + + void __iomem *primary_dbq_rq_addr; + void __iomem *secondary_bdq_rq_addr; + + u64 wwpn; + u64 wwnn; + + u8 num_cqs; +}; + +struct qed_fcoe_params_offload { + dma_addr_t sq_pbl_addr; + dma_addr_t sq_curr_page_addr; + dma_addr_t sq_next_page_addr; + + u8 src_mac[ETH_ALEN]; + u8 dst_mac[ETH_ALEN]; + + u16 tx_max_fc_pay_len; + u16 e_d_tov_timer_val; + u16 rec_tov_timer_val; + u16 rx_max_fc_pay_len; + u16 vlan_tag; + + struct fc_addr_nw s_id; + u8 max_conc_seqs_c3; + struct fc_addr_nw d_id; + u8 flags; + u8 def_q_idx; +}; + +#define MAX_TID_BLOCKS_FCOE (512) +struct qed_fcoe_tid { + u32 size; /* In bytes per task */ + u32 num_tids_per_block; + u8 *blocks[MAX_TID_BLOCKS_FCOE]; +}; + +struct qed_fcoe_cb_ops { + struct qed_common_cb_ops common; + u32 (*get_login_failures)(void *cookie); +}; + +void qed_fcoe_set_pf_params(struct qed_dev *cdev, + struct qed_fcoe_pf_params *params); + +/** + * struct qed_fcoe_ops - qed FCoE operations. + * @common: common operations pointer + * @fill_dev_info: fills FCoE specific information + * @param cdev + * @param info + * @return 0 on sucesss, otherwise error value. + * @register_ops: register FCoE operations + * @param cdev + * @param ops - specified using qed_iscsi_cb_ops + * @param cookie - driver private + * @ll2: light L2 operations pointer + * @start: fcoe in FW + * @param cdev + * @param tasks - qed will fill information about tasks + * return 0 on success, otherwise error value. + * @stop: stops fcoe in FW + * @param cdev + * return 0 on success, otherwise error value. + * @acquire_conn: acquire a new fcoe connection + * @param cdev + * @param handle - qed will fill handle that should be + * used henceforth as identifier of the + * connection. + * @param p_doorbell - qed will fill the address of the + * doorbell. + * return 0 on sucesss, otherwise error value. + * @release_conn: release a previously acquired fcoe connection + * @param cdev + * @param handle - the connection handle. + * return 0 on success, otherwise error value. + * @offload_conn: configures an offloaded connection + * @param cdev + * @param handle - the connection handle. + * @param conn_info - the configuration to use for the + * offload. + * return 0 on success, otherwise error value. + * @destroy_conn: stops an offloaded connection + * @param cdev + * @param handle - the connection handle. + * @param terminate_params + * return 0 on success, otherwise error value. + * @get_stats: gets FCoE related statistics + * @param cdev + * @param stats - pointer to struck that would be filled + * we stats + * return 0 on success, error otherwise. + */ +struct qed_fcoe_ops { + const struct qed_common_ops *common; + + int (*fill_dev_info)(struct qed_dev *cdev, + struct qed_dev_fcoe_info *info); + + void (*register_ops)(struct qed_dev *cdev, + struct qed_fcoe_cb_ops *ops, void *cookie); + + const struct qed_ll2_ops *ll2; + + int (*start)(struct qed_dev *cdev, struct qed_fcoe_tid *tasks); + + int (*stop)(struct qed_dev *cdev); + + int (*acquire_conn)(struct qed_dev *cdev, + u32 *handle, + u32 *fw_cid, void __iomem **p_doorbell); + + int (*release_conn)(struct qed_dev *cdev, u32 handle); + + int (*offload_conn)(struct qed_dev *cdev, + u32 handle, + struct qed_fcoe_params_offload *conn_info); + int (*destroy_conn)(struct qed_dev *cdev, + u32 handle, dma_addr_t terminate_params); + + int (*get_stats)(struct qed_dev *cdev, struct qed_fcoe_stats *stats); +}; + +const struct qed_fcoe_ops *qed_get_fcoe_ops(void); +void qed_put_fcoe_ops(void); +#endif diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h new file mode 100644 index 000000000..8cd34645e --- /dev/null +++ b/include/linux/qed/qed_if.h @@ -0,0 +1,1329 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _QED_IF_H +#define _QED_IF_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum dcbx_protocol_type { + DCBX_PROTOCOL_ISCSI, + DCBX_PROTOCOL_FCOE, + DCBX_PROTOCOL_ROCE, + DCBX_PROTOCOL_ROCE_V2, + DCBX_PROTOCOL_ETH, + DCBX_MAX_PROTOCOL_TYPE +}; + +#define QED_ROCE_PROTOCOL_INDEX (3) + +#define QED_LLDP_CHASSIS_ID_STAT_LEN 4 +#define QED_LLDP_PORT_ID_STAT_LEN 4 +#define QED_DCBX_MAX_APP_PROTOCOL 32 +#define QED_MAX_PFC_PRIORITIES 8 +#define QED_DCBX_DSCP_SIZE 64 + +struct qed_dcbx_lldp_remote { + u32 peer_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN]; + u32 peer_port_id[QED_LLDP_PORT_ID_STAT_LEN]; + bool enable_rx; + bool enable_tx; + u32 tx_interval; + u32 max_credit; +}; + +struct qed_dcbx_lldp_local { + u32 local_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN]; + u32 local_port_id[QED_LLDP_PORT_ID_STAT_LEN]; +}; + +struct qed_dcbx_app_prio { + u8 roce; + u8 roce_v2; + u8 fcoe; + u8 iscsi; + u8 eth; +}; + +struct qed_dbcx_pfc_params { + bool willing; + bool enabled; + u8 prio[QED_MAX_PFC_PRIORITIES]; + u8 max_tc; +}; + +enum qed_dcbx_sf_ieee_type { + QED_DCBX_SF_IEEE_ETHTYPE, + QED_DCBX_SF_IEEE_TCP_PORT, + QED_DCBX_SF_IEEE_UDP_PORT, + QED_DCBX_SF_IEEE_TCP_UDP_PORT +}; + +struct qed_app_entry { + bool ethtype; + enum qed_dcbx_sf_ieee_type sf_ieee; + bool enabled; + u8 prio; + u16 proto_id; + enum dcbx_protocol_type proto_type; +}; + +struct qed_dcbx_params { + struct qed_app_entry app_entry[QED_DCBX_MAX_APP_PROTOCOL]; + u16 num_app_entries; + bool app_willing; + bool app_valid; + bool app_error; + bool ets_willing; + bool ets_enabled; + bool ets_cbs; + bool valid; + u8 ets_pri_tc_tbl[QED_MAX_PFC_PRIORITIES]; + u8 ets_tc_bw_tbl[QED_MAX_PFC_PRIORITIES]; + u8 ets_tc_tsa_tbl[QED_MAX_PFC_PRIORITIES]; + struct qed_dbcx_pfc_params pfc; + u8 max_ets_tc; +}; + +struct qed_dcbx_admin_params { + struct qed_dcbx_params params; + bool valid; +}; + +struct qed_dcbx_remote_params { + struct qed_dcbx_params params; + bool valid; +}; + +struct qed_dcbx_operational_params { + struct qed_dcbx_app_prio app_prio; + struct qed_dcbx_params params; + bool valid; + bool enabled; + bool ieee; + bool cee; + bool local; + u32 err; +}; + +struct qed_dcbx_get { + struct qed_dcbx_operational_params operational; + struct qed_dcbx_lldp_remote lldp_remote; + struct qed_dcbx_lldp_local lldp_local; + struct qed_dcbx_remote_params remote; + struct qed_dcbx_admin_params local; +}; + +enum qed_nvm_images { + QED_NVM_IMAGE_ISCSI_CFG, + QED_NVM_IMAGE_FCOE_CFG, + QED_NVM_IMAGE_NVM_CFG1, + QED_NVM_IMAGE_DEFAULT_CFG, + QED_NVM_IMAGE_NVM_META, +}; + +struct qed_link_eee_params { + u32 tx_lpi_timer; +#define QED_EEE_1G_ADV BIT(0) +#define QED_EEE_10G_ADV BIT(1) + + /* Capabilities are represented using QED_EEE_*_ADV values */ + u8 adv_caps; + u8 lp_adv_caps; + bool enable; + bool tx_lpi_enable; +}; + +enum qed_led_mode { + QED_LED_MODE_OFF, + QED_LED_MODE_ON, + QED_LED_MODE_RESTORE +}; + +struct qed_mfw_tlv_eth { + u16 lso_maxoff_size; + bool lso_maxoff_size_set; + u16 lso_minseg_size; + bool lso_minseg_size_set; + u8 prom_mode; + bool prom_mode_set; + u16 tx_descr_size; + bool tx_descr_size_set; + u16 rx_descr_size; + bool rx_descr_size_set; + u16 netq_count; + bool netq_count_set; + u32 tcp4_offloads; + bool tcp4_offloads_set; + u32 tcp6_offloads; + bool tcp6_offloads_set; + u16 tx_descr_qdepth; + bool tx_descr_qdepth_set; + u16 rx_descr_qdepth; + bool rx_descr_qdepth_set; + u8 iov_offload; +#define QED_MFW_TLV_IOV_OFFLOAD_NONE (0) +#define QED_MFW_TLV_IOV_OFFLOAD_MULTIQUEUE (1) +#define QED_MFW_TLV_IOV_OFFLOAD_VEB (2) +#define QED_MFW_TLV_IOV_OFFLOAD_VEPA (3) + bool iov_offload_set; + u8 txqs_empty; + bool txqs_empty_set; + u8 rxqs_empty; + bool rxqs_empty_set; + u8 num_txqs_full; + bool num_txqs_full_set; + u8 num_rxqs_full; + bool num_rxqs_full_set; +}; + +#define QED_MFW_TLV_TIME_SIZE 14 +struct qed_mfw_tlv_time { + bool b_set; + u8 month; + u8 day; + u8 hour; + u8 min; + u16 msec; + u16 usec; +}; + +struct qed_mfw_tlv_fcoe { + u8 scsi_timeout; + bool scsi_timeout_set; + u32 rt_tov; + bool rt_tov_set; + u32 ra_tov; + bool ra_tov_set; + u32 ed_tov; + bool ed_tov_set; + u32 cr_tov; + bool cr_tov_set; + u8 boot_type; + bool boot_type_set; + u8 npiv_state; + bool npiv_state_set; + u32 num_npiv_ids; + bool num_npiv_ids_set; + u8 switch_name[8]; + bool switch_name_set; + u16 switch_portnum; + bool switch_portnum_set; + u8 switch_portid[3]; + bool switch_portid_set; + u8 vendor_name[8]; + bool vendor_name_set; + u8 switch_model[8]; + bool switch_model_set; + u8 switch_fw_version[8]; + bool switch_fw_version_set; + u8 qos_pri; + bool qos_pri_set; + u8 port_alias[3]; + bool port_alias_set; + u8 port_state; +#define QED_MFW_TLV_PORT_STATE_OFFLINE (0) +#define QED_MFW_TLV_PORT_STATE_LOOP (1) +#define QED_MFW_TLV_PORT_STATE_P2P (2) +#define QED_MFW_TLV_PORT_STATE_FABRIC (3) + bool port_state_set; + u16 fip_tx_descr_size; + bool fip_tx_descr_size_set; + u16 fip_rx_descr_size; + bool fip_rx_descr_size_set; + u16 link_failures; + bool link_failures_set; + u8 fcoe_boot_progress; + bool fcoe_boot_progress_set; + u64 rx_bcast; + bool rx_bcast_set; + u64 tx_bcast; + bool tx_bcast_set; + u16 fcoe_txq_depth; + bool fcoe_txq_depth_set; + u16 fcoe_rxq_depth; + bool fcoe_rxq_depth_set; + u64 fcoe_rx_frames; + bool fcoe_rx_frames_set; + u64 fcoe_rx_bytes; + bool fcoe_rx_bytes_set; + u64 fcoe_tx_frames; + bool fcoe_tx_frames_set; + u64 fcoe_tx_bytes; + bool fcoe_tx_bytes_set; + u16 crc_count; + bool crc_count_set; + u32 crc_err_src_fcid[5]; + bool crc_err_src_fcid_set[5]; + struct qed_mfw_tlv_time crc_err[5]; + u16 losync_err; + bool losync_err_set; + u16 losig_err; + bool losig_err_set; + u16 primtive_err; + bool primtive_err_set; + u16 disparity_err; + bool disparity_err_set; + u16 code_violation_err; + bool code_violation_err_set; + u32 flogi_param[4]; + bool flogi_param_set[4]; + struct qed_mfw_tlv_time flogi_tstamp; + u32 flogi_acc_param[4]; + bool flogi_acc_param_set[4]; + struct qed_mfw_tlv_time flogi_acc_tstamp; + u32 flogi_rjt; + bool flogi_rjt_set; + struct qed_mfw_tlv_time flogi_rjt_tstamp; + u32 fdiscs; + bool fdiscs_set; + u8 fdisc_acc; + bool fdisc_acc_set; + u8 fdisc_rjt; + bool fdisc_rjt_set; + u8 plogi; + bool plogi_set; + u8 plogi_acc; + bool plogi_acc_set; + u8 plogi_rjt; + bool plogi_rjt_set; + u32 plogi_dst_fcid[5]; + bool plogi_dst_fcid_set[5]; + struct qed_mfw_tlv_time plogi_tstamp[5]; + u32 plogi_acc_src_fcid[5]; + bool plogi_acc_src_fcid_set[5]; + struct qed_mfw_tlv_time plogi_acc_tstamp[5]; + u8 tx_plogos; + bool tx_plogos_set; + u8 plogo_acc; + bool plogo_acc_set; + u8 plogo_rjt; + bool plogo_rjt_set; + u32 plogo_src_fcid[5]; + bool plogo_src_fcid_set[5]; + struct qed_mfw_tlv_time plogo_tstamp[5]; + u8 rx_logos; + bool rx_logos_set; + u8 tx_accs; + bool tx_accs_set; + u8 tx_prlis; + bool tx_prlis_set; + u8 rx_accs; + bool rx_accs_set; + u8 tx_abts; + bool tx_abts_set; + u8 rx_abts_acc; + bool rx_abts_acc_set; + u8 rx_abts_rjt; + bool rx_abts_rjt_set; + u32 abts_dst_fcid[5]; + bool abts_dst_fcid_set[5]; + struct qed_mfw_tlv_time abts_tstamp[5]; + u8 rx_rscn; + bool rx_rscn_set; + u32 rx_rscn_nport[4]; + bool rx_rscn_nport_set[4]; + u8 tx_lun_rst; + bool tx_lun_rst_set; + u8 abort_task_sets; + bool abort_task_sets_set; + u8 tx_tprlos; + bool tx_tprlos_set; + u8 tx_nos; + bool tx_nos_set; + u8 rx_nos; + bool rx_nos_set; + u8 ols; + bool ols_set; + u8 lr; + bool lr_set; + u8 lrr; + bool lrr_set; + u8 tx_lip; + bool tx_lip_set; + u8 rx_lip; + bool rx_lip_set; + u8 eofa; + bool eofa_set; + u8 eofni; + bool eofni_set; + u8 scsi_chks; + bool scsi_chks_set; + u8 scsi_cond_met; + bool scsi_cond_met_set; + u8 scsi_busy; + bool scsi_busy_set; + u8 scsi_inter; + bool scsi_inter_set; + u8 scsi_inter_cond_met; + bool scsi_inter_cond_met_set; + u8 scsi_rsv_conflicts; + bool scsi_rsv_conflicts_set; + u8 scsi_tsk_full; + bool scsi_tsk_full_set; + u8 scsi_aca_active; + bool scsi_aca_active_set; + u8 scsi_tsk_abort; + bool scsi_tsk_abort_set; + u32 scsi_rx_chk[5]; + bool scsi_rx_chk_set[5]; + struct qed_mfw_tlv_time scsi_chk_tstamp[5]; +}; + +struct qed_mfw_tlv_iscsi { + u8 target_llmnr; + bool target_llmnr_set; + u8 header_digest; + bool header_digest_set; + u8 data_digest; + bool data_digest_set; + u8 auth_method; +#define QED_MFW_TLV_AUTH_METHOD_NONE (1) +#define QED_MFW_TLV_AUTH_METHOD_CHAP (2) +#define QED_MFW_TLV_AUTH_METHOD_MUTUAL_CHAP (3) + bool auth_method_set; + u16 boot_taget_portal; + bool boot_taget_portal_set; + u16 frame_size; + bool frame_size_set; + u16 tx_desc_size; + bool tx_desc_size_set; + u16 rx_desc_size; + bool rx_desc_size_set; + u8 boot_progress; + bool boot_progress_set; + u16 tx_desc_qdepth; + bool tx_desc_qdepth_set; + u16 rx_desc_qdepth; + bool rx_desc_qdepth_set; + u64 rx_frames; + bool rx_frames_set; + u64 rx_bytes; + bool rx_bytes_set; + u64 tx_frames; + bool tx_frames_set; + u64 tx_bytes; + bool tx_bytes_set; +}; + +#define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \ + (void __iomem *)(reg_addr)) + +#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr)) + +#define QED_COALESCE_MAX 0x1FF +#define QED_DEFAULT_RX_USECS 12 +#define QED_DEFAULT_TX_USECS 48 + +/* forward */ +struct qed_dev; + +struct qed_eth_pf_params { + /* The following parameters are used during HW-init + * and these parameters need to be passed as arguments + * to update_pf_params routine invoked before slowpath start + */ + u16 num_cons; + + /* per-VF number of CIDs */ + u8 num_vf_cons; +#define ETH_PF_PARAMS_VF_CONS_DEFAULT (32) + + /* To enable arfs, previous to HW-init a positive number needs to be + * set [as filters require allocated searcher ILT memory]. + * This will set the maximal number of configured steering-filters. + */ + u32 num_arfs_filters; +}; + +struct qed_fcoe_pf_params { + /* The following parameters are used during protocol-init */ + u64 glbl_q_params_addr; + u64 bdq_pbl_base_addr[2]; + + /* The following parameters are used during HW-init + * and these parameters need to be passed as arguments + * to update_pf_params routine invoked before slowpath start + */ + u16 num_cons; + u16 num_tasks; + + /* The following parameters are used during protocol-init */ + u16 sq_num_pbl_pages; + + u16 cq_num_entries; + u16 cmdq_num_entries; + u16 rq_buffer_log_size; + u16 mtu; + u16 dummy_icid; + u16 bdq_xoff_threshold[2]; + u16 bdq_xon_threshold[2]; + u16 rq_buffer_size; + u8 num_cqs; /* num of global CQs */ + u8 log_page_size; + u8 gl_rq_pi; + u8 gl_cmd_pi; + u8 debug_mode; + u8 is_target; + u8 bdq_pbl_num_entries[2]; +}; + +/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */ +struct qed_iscsi_pf_params { + u64 glbl_q_params_addr; + u64 bdq_pbl_base_addr[3]; + u16 cq_num_entries; + u16 cmdq_num_entries; + u32 two_msl_timer; + u16 tx_sws_timer; + + /* The following parameters are used during HW-init + * and these parameters need to be passed as arguments + * to update_pf_params routine invoked before slowpath start + */ + u16 num_cons; + u16 num_tasks; + + /* The following parameters are used during protocol-init */ + u16 half_way_close_timeout; + u16 bdq_xoff_threshold[3]; + u16 bdq_xon_threshold[3]; + u16 cmdq_xoff_threshold; + u16 cmdq_xon_threshold; + u16 rq_buffer_size; + + u8 num_sq_pages_in_ring; + u8 num_r2tq_pages_in_ring; + u8 num_uhq_pages_in_ring; + u8 num_queues; + u8 log_page_size; + u8 rqe_log_size; + u8 max_fin_rt; + u8 gl_rq_pi; + u8 gl_cmd_pi; + u8 debug_mode; + u8 ll2_ooo_queue_id; + + u8 is_target; + u8 is_soc_en; + u8 soc_num_of_blocks_log; + u8 bdq_pbl_num_entries[3]; +}; + +struct qed_rdma_pf_params { + /* Supplied to QED during resource allocation (may affect the ILT and + * the doorbell BAR). + */ + u32 min_dpis; /* number of requested DPIs */ + u32 num_qps; /* number of requested Queue Pairs */ + u32 num_srqs; /* number of requested SRQ */ + u8 roce_edpm_mode; /* see QED_ROCE_EDPM_MODE_ENABLE */ + u8 gl_pi; /* protocol index */ + + /* Will allocate rate limiters to be used with QPs */ + u8 enable_dcqcn; +}; + +struct qed_pf_params { + struct qed_eth_pf_params eth_pf_params; + struct qed_fcoe_pf_params fcoe_pf_params; + struct qed_iscsi_pf_params iscsi_pf_params; + struct qed_rdma_pf_params rdma_pf_params; +}; + +enum qed_int_mode { + QED_INT_MODE_INTA, + QED_INT_MODE_MSIX, + QED_INT_MODE_MSI, + QED_INT_MODE_POLL, +}; + +struct qed_sb_info { + struct status_block_e4 *sb_virt; + dma_addr_t sb_phys; + u32 sb_ack; /* Last given ack */ + u16 igu_sb_id; + void __iomem *igu_addr; + u8 flags; +#define QED_SB_INFO_INIT 0x1 +#define QED_SB_INFO_SETUP 0x2 + + struct qed_dev *cdev; +}; + +enum qed_dev_type { + QED_DEV_TYPE_BB, + QED_DEV_TYPE_AH, +}; + +struct qed_dev_info { + unsigned long pci_mem_start; + unsigned long pci_mem_end; + unsigned int pci_irq; + u8 num_hwfns; + + u8 hw_mac[ETH_ALEN]; + + /* FW version */ + u16 fw_major; + u16 fw_minor; + u16 fw_rev; + u16 fw_eng; + + /* MFW version */ + u32 mfw_rev; +#define QED_MFW_VERSION_0_MASK 0x000000FF +#define QED_MFW_VERSION_0_OFFSET 0 +#define QED_MFW_VERSION_1_MASK 0x0000FF00 +#define QED_MFW_VERSION_1_OFFSET 8 +#define QED_MFW_VERSION_2_MASK 0x00FF0000 +#define QED_MFW_VERSION_2_OFFSET 16 +#define QED_MFW_VERSION_3_MASK 0xFF000000 +#define QED_MFW_VERSION_3_OFFSET 24 + + u32 flash_size; + bool b_inter_pf_switch; + bool tx_switching; + bool rdma_supported; + u16 mtu; + + bool wol_support; + + /* MBI version */ + u32 mbi_version; +#define QED_MBI_VERSION_0_MASK 0x000000FF +#define QED_MBI_VERSION_0_OFFSET 0 +#define QED_MBI_VERSION_1_MASK 0x0000FF00 +#define QED_MBI_VERSION_1_OFFSET 8 +#define QED_MBI_VERSION_2_MASK 0x00FF0000 +#define QED_MBI_VERSION_2_OFFSET 16 + + enum qed_dev_type dev_type; + + /* Output parameters for qede */ + bool vxlan_enable; + bool gre_enable; + bool geneve_enable; + + u8 abs_pf_id; +}; + +enum qed_sb_type { + QED_SB_TYPE_L2_QUEUE, + QED_SB_TYPE_CNQ, + QED_SB_TYPE_STORAGE, +}; + +enum qed_protocol { + QED_PROTOCOL_ETH, + QED_PROTOCOL_ISCSI, + QED_PROTOCOL_FCOE, +}; + +enum qed_link_mode_bits { + QED_LM_FIBRE_BIT = BIT(0), + QED_LM_Autoneg_BIT = BIT(1), + QED_LM_Asym_Pause_BIT = BIT(2), + QED_LM_Pause_BIT = BIT(3), + QED_LM_1000baseT_Half_BIT = BIT(4), + QED_LM_1000baseT_Full_BIT = BIT(5), + QED_LM_10000baseKR_Full_BIT = BIT(6), + QED_LM_25000baseKR_Full_BIT = BIT(7), + QED_LM_40000baseLR4_Full_BIT = BIT(8), + QED_LM_50000baseKR2_Full_BIT = BIT(9), + QED_LM_100000baseKR4_Full_BIT = BIT(10), + QED_LM_COUNT = 11 +}; + +struct qed_link_params { + bool link_up; + +#define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0) +#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1) +#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2) +#define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3) +#define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4) +#define QED_LINK_OVERRIDE_EEE_CONFIG BIT(5) + u32 override_flags; + bool autoneg; + u32 adv_speeds; + u32 forced_speed; +#define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0) +#define QED_LINK_PAUSE_RX_ENABLE BIT(1) +#define QED_LINK_PAUSE_TX_ENABLE BIT(2) + u32 pause_config; +#define QED_LINK_LOOPBACK_NONE BIT(0) +#define QED_LINK_LOOPBACK_INT_PHY BIT(1) +#define QED_LINK_LOOPBACK_EXT_PHY BIT(2) +#define QED_LINK_LOOPBACK_EXT BIT(3) +#define QED_LINK_LOOPBACK_MAC BIT(4) + u32 loopback_mode; + struct qed_link_eee_params eee; +}; + +struct qed_link_output { + bool link_up; + + /* In QED_LM_* defs */ + u32 supported_caps; + u32 advertised_caps; + u32 lp_caps; + + u32 speed; /* In Mb/s */ + u8 duplex; /* In DUPLEX defs */ + u8 port; /* In PORT defs */ + bool autoneg; + u32 pause_config; + + /* EEE - capability & param */ + bool eee_supported; + bool eee_active; + u8 sup_caps; + struct qed_link_eee_params eee; +}; + +struct qed_probe_params { + enum qed_protocol protocol; + u32 dp_module; + u8 dp_level; + bool is_vf; +}; + +#define QED_DRV_VER_STR_SIZE 12 +struct qed_slowpath_params { + u32 int_mode; + u8 drv_major; + u8 drv_minor; + u8 drv_rev; + u8 drv_eng; + u8 name[QED_DRV_VER_STR_SIZE]; +}; + +#define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */ + +struct qed_int_info { + struct msix_entry *msix; + u8 msix_cnt; + + /* This should be updated by the protocol driver */ + u8 used_cnt; +}; + +struct qed_generic_tlvs { +#define QED_TLV_IP_CSUM BIT(0) +#define QED_TLV_LSO BIT(1) + u16 feat_flags; +#define QED_TLV_MAC_COUNT 3 + u8 mac[QED_TLV_MAC_COUNT][ETH_ALEN]; +}; + +#define QED_I2C_DEV_ADDR_A0 0xA0 +#define QED_I2C_DEV_ADDR_A2 0xA2 + +#define QED_NVM_SIGNATURE 0x12435687 + +enum qed_nvm_flash_cmd { + QED_NVM_FLASH_CMD_FILE_DATA = 0x2, + QED_NVM_FLASH_CMD_FILE_START = 0x3, + QED_NVM_FLASH_CMD_NVM_CHANGE = 0x4, + QED_NVM_FLASH_CMD_NVM_MAX, +}; + +struct qed_common_cb_ops { + void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc); + void (*link_update)(void *dev, + struct qed_link_output *link); + void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type); + void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data); + void (*get_protocol_tlv_data)(void *dev, void *data); +}; + +struct qed_selftest_ops { +/** + * @brief selftest_interrupt - Perform interrupt test + * + * @param cdev + * + * @return 0 on success, error otherwise. + */ + int (*selftest_interrupt)(struct qed_dev *cdev); + +/** + * @brief selftest_memory - Perform memory test + * + * @param cdev + * + * @return 0 on success, error otherwise. + */ + int (*selftest_memory)(struct qed_dev *cdev); + +/** + * @brief selftest_register - Perform register test + * + * @param cdev + * + * @return 0 on success, error otherwise. + */ + int (*selftest_register)(struct qed_dev *cdev); + +/** + * @brief selftest_clock - Perform clock test + * + * @param cdev + * + * @return 0 on success, error otherwise. + */ + int (*selftest_clock)(struct qed_dev *cdev); + +/** + * @brief selftest_nvram - Perform nvram test + * + * @param cdev + * + * @return 0 on success, error otherwise. + */ + int (*selftest_nvram) (struct qed_dev *cdev); +}; + +struct qed_common_ops { + struct qed_selftest_ops *selftest; + + struct qed_dev* (*probe)(struct pci_dev *dev, + struct qed_probe_params *params); + + void (*remove)(struct qed_dev *cdev); + + int (*set_power_state)(struct qed_dev *cdev, + pci_power_t state); + + void (*set_name) (struct qed_dev *cdev, char name[]); + + /* Client drivers need to make this call before slowpath_start. + * PF params required for the call before slowpath_start is + * documented within the qed_pf_params structure definition. + */ + void (*update_pf_params)(struct qed_dev *cdev, + struct qed_pf_params *params); + int (*slowpath_start)(struct qed_dev *cdev, + struct qed_slowpath_params *params); + + int (*slowpath_stop)(struct qed_dev *cdev); + + /* Requests to use `cnt' interrupts for fastpath. + * upon success, returns number of interrupts allocated for fastpath. + */ + int (*set_fp_int)(struct qed_dev *cdev, + u16 cnt); + + /* Fills `info' with pointers required for utilizing interrupts */ + int (*get_fp_int)(struct qed_dev *cdev, + struct qed_int_info *info); + + u32 (*sb_init)(struct qed_dev *cdev, + struct qed_sb_info *sb_info, + void *sb_virt_addr, + dma_addr_t sb_phy_addr, + u16 sb_id, + enum qed_sb_type type); + + u32 (*sb_release)(struct qed_dev *cdev, + struct qed_sb_info *sb_info, + u16 sb_id); + + void (*simd_handler_config)(struct qed_dev *cdev, + void *token, + int index, + void (*handler)(void *)); + + void (*simd_handler_clean)(struct qed_dev *cdev, + int index); + int (*dbg_grc)(struct qed_dev *cdev, + void *buffer, u32 *num_dumped_bytes); + + int (*dbg_grc_size)(struct qed_dev *cdev); + + int (*dbg_all_data) (struct qed_dev *cdev, void *buffer); + + int (*dbg_all_data_size) (struct qed_dev *cdev); + +/** + * @brief can_link_change - can the instance change the link or not + * + * @param cdev + * + * @return true if link-change is allowed, false otherwise. + */ + bool (*can_link_change)(struct qed_dev *cdev); + +/** + * @brief set_link - set links according to params + * + * @param cdev + * @param params - values used to override the default link configuration + * + * @return 0 on success, error otherwise. + */ + int (*set_link)(struct qed_dev *cdev, + struct qed_link_params *params); + +/** + * @brief get_link - returns the current link state. + * + * @param cdev + * @param if_link - structure to be filled with current link configuration. + */ + void (*get_link)(struct qed_dev *cdev, + struct qed_link_output *if_link); + +/** + * @brief - drains chip in case Tx completions fail to arrive due to pause. + * + * @param cdev + */ + int (*drain)(struct qed_dev *cdev); + +/** + * @brief update_msglvl - update module debug level + * + * @param cdev + * @param dp_module + * @param dp_level + */ + void (*update_msglvl)(struct qed_dev *cdev, + u32 dp_module, + u8 dp_level); + + int (*chain_alloc)(struct qed_dev *cdev, + enum qed_chain_use_mode intended_use, + enum qed_chain_mode mode, + enum qed_chain_cnt_type cnt_type, + u32 num_elems, + size_t elem_size, + struct qed_chain *p_chain, + struct qed_chain_ext_pbl *ext_pbl); + + void (*chain_free)(struct qed_dev *cdev, + struct qed_chain *p_chain); + +/** + * @brief nvm_flash - Flash nvm data. + * + * @param cdev + * @param name - file containing the data + * + * @return 0 on success, error otherwise. + */ + int (*nvm_flash)(struct qed_dev *cdev, const char *name); + +/** + * @brief nvm_get_image - reads an entire image from nvram + * + * @param cdev + * @param type - type of the request nvram image + * @param buf - preallocated buffer to fill with the image + * @param len - length of the allocated buffer + * + * @return 0 on success, error otherwise + */ + int (*nvm_get_image)(struct qed_dev *cdev, + enum qed_nvm_images type, u8 *buf, u16 len); + +/** + * @brief set_coalesce - Configure Rx coalesce value in usec + * + * @param cdev + * @param rx_coal - Rx coalesce value in usec + * @param tx_coal - Tx coalesce value in usec + * @param qid - Queue index + * @param sb_id - Status Block Id + * + * @return 0 on success, error otherwise. + */ + int (*set_coalesce)(struct qed_dev *cdev, + u16 rx_coal, u16 tx_coal, void *handle); + +/** + * @brief set_led - Configure LED mode + * + * @param cdev + * @param mode - LED mode + * + * @return 0 on success, error otherwise. + */ + int (*set_led)(struct qed_dev *cdev, + enum qed_led_mode mode); + +/** + * @brief update_drv_state - API to inform the change in the driver state. + * + * @param cdev + * @param active + * + */ + int (*update_drv_state)(struct qed_dev *cdev, bool active); + +/** + * @brief update_mac - API to inform the change in the mac address + * + * @param cdev + * @param mac + * + */ + int (*update_mac)(struct qed_dev *cdev, u8 *mac); + +/** + * @brief update_mtu - API to inform the change in the mtu + * + * @param cdev + * @param mtu + * + */ + int (*update_mtu)(struct qed_dev *cdev, u16 mtu); + +/** + * @brief update_wol - update of changes in the WoL configuration + * + * @param cdev + * @param enabled - true iff WoL should be enabled. + */ + int (*update_wol) (struct qed_dev *cdev, bool enabled); + +/** + * @brief read_module_eeprom + * + * @param cdev + * @param buf - buffer + * @param dev_addr - PHY device memory region + * @param offset - offset into eeprom contents to be read + * @param len - buffer length, i.e., max bytes to be read + */ + int (*read_module_eeprom)(struct qed_dev *cdev, + char *buf, u8 dev_addr, u32 offset, u32 len); +}; + +#define MASK_FIELD(_name, _value) \ + ((_value) &= (_name ## _MASK)) + +#define FIELD_VALUE(_name, _value) \ + ((_value & _name ## _MASK) << _name ## _SHIFT) + +#define SET_FIELD(value, name, flag) \ + do { \ + (value) &= ~(name ## _MASK << name ## _SHIFT); \ + (value) |= (((u64)flag) << (name ## _SHIFT)); \ + } while (0) + +#define GET_FIELD(value, name) \ + (((value) >> (name ## _SHIFT)) & name ## _MASK) + +/* Debug print definitions */ +#define DP_ERR(cdev, fmt, ...) \ + do { \ + pr_err("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + DP_NAME(cdev) ? DP_NAME(cdev) : "", \ + ## __VA_ARGS__); \ + } while (0) + +#define DP_NOTICE(cdev, fmt, ...) \ + do { \ + if (unlikely((cdev)->dp_level <= QED_LEVEL_NOTICE)) { \ + pr_notice("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + DP_NAME(cdev) ? DP_NAME(cdev) : "", \ + ## __VA_ARGS__); \ + \ + } \ + } while (0) + +#define DP_INFO(cdev, fmt, ...) \ + do { \ + if (unlikely((cdev)->dp_level <= QED_LEVEL_INFO)) { \ + pr_notice("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + DP_NAME(cdev) ? DP_NAME(cdev) : "", \ + ## __VA_ARGS__); \ + } \ + } while (0) + +#define DP_VERBOSE(cdev, module, fmt, ...) \ + do { \ + if (unlikely(((cdev)->dp_level <= QED_LEVEL_VERBOSE) && \ + ((cdev)->dp_module & module))) { \ + pr_notice("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + DP_NAME(cdev) ? DP_NAME(cdev) : "", \ + ## __VA_ARGS__); \ + } \ + } while (0) + +enum DP_LEVEL { + QED_LEVEL_VERBOSE = 0x0, + QED_LEVEL_INFO = 0x1, + QED_LEVEL_NOTICE = 0x2, + QED_LEVEL_ERR = 0x3, +}; + +#define QED_LOG_LEVEL_SHIFT (30) +#define QED_LOG_VERBOSE_MASK (0x3fffffff) +#define QED_LOG_INFO_MASK (0x40000000) +#define QED_LOG_NOTICE_MASK (0x80000000) + +enum DP_MODULE { + QED_MSG_SPQ = 0x10000, + QED_MSG_STATS = 0x20000, + QED_MSG_DCB = 0x40000, + QED_MSG_IOV = 0x80000, + QED_MSG_SP = 0x100000, + QED_MSG_STORAGE = 0x200000, + QED_MSG_CXT = 0x800000, + QED_MSG_LL2 = 0x1000000, + QED_MSG_ILT = 0x2000000, + QED_MSG_RDMA = 0x4000000, + QED_MSG_DEBUG = 0x8000000, + /* to be added...up to 0x8000000 */ +}; + +enum qed_mf_mode { + QED_MF_DEFAULT, + QED_MF_OVLAN, + QED_MF_NPAR, +}; + +struct qed_eth_stats_common { + u64 no_buff_discards; + u64 packet_too_big_discard; + u64 ttl0_discard; + u64 rx_ucast_bytes; + u64 rx_mcast_bytes; + u64 rx_bcast_bytes; + u64 rx_ucast_pkts; + u64 rx_mcast_pkts; + u64 rx_bcast_pkts; + u64 mftag_filter_discards; + u64 mac_filter_discards; + u64 gft_filter_drop; + u64 tx_ucast_bytes; + u64 tx_mcast_bytes; + u64 tx_bcast_bytes; + u64 tx_ucast_pkts; + u64 tx_mcast_pkts; + u64 tx_bcast_pkts; + u64 tx_err_drop_pkts; + u64 tpa_coalesced_pkts; + u64 tpa_coalesced_events; + u64 tpa_aborts_num; + u64 tpa_not_coalesced_pkts; + u64 tpa_coalesced_bytes; + + /* port */ + u64 rx_64_byte_packets; + u64 rx_65_to_127_byte_packets; + u64 rx_128_to_255_byte_packets; + u64 rx_256_to_511_byte_packets; + u64 rx_512_to_1023_byte_packets; + u64 rx_1024_to_1518_byte_packets; + u64 rx_crc_errors; + u64 rx_mac_crtl_frames; + u64 rx_pause_frames; + u64 rx_pfc_frames; + u64 rx_align_errors; + u64 rx_carrier_errors; + u64 rx_oversize_packets; + u64 rx_jabbers; + u64 rx_undersize_packets; + u64 rx_fragments; + u64 tx_64_byte_packets; + u64 tx_65_to_127_byte_packets; + u64 tx_128_to_255_byte_packets; + u64 tx_256_to_511_byte_packets; + u64 tx_512_to_1023_byte_packets; + u64 tx_1024_to_1518_byte_packets; + u64 tx_pause_frames; + u64 tx_pfc_frames; + u64 brb_truncates; + u64 brb_discards; + u64 rx_mac_bytes; + u64 rx_mac_uc_packets; + u64 rx_mac_mc_packets; + u64 rx_mac_bc_packets; + u64 rx_mac_frames_ok; + u64 tx_mac_bytes; + u64 tx_mac_uc_packets; + u64 tx_mac_mc_packets; + u64 tx_mac_bc_packets; + u64 tx_mac_ctrl_frames; + u64 link_change_count; +}; + +struct qed_eth_stats_bb { + u64 rx_1519_to_1522_byte_packets; + u64 rx_1519_to_2047_byte_packets; + u64 rx_2048_to_4095_byte_packets; + u64 rx_4096_to_9216_byte_packets; + u64 rx_9217_to_16383_byte_packets; + u64 tx_1519_to_2047_byte_packets; + u64 tx_2048_to_4095_byte_packets; + u64 tx_4096_to_9216_byte_packets; + u64 tx_9217_to_16383_byte_packets; + u64 tx_lpi_entry_count; + u64 tx_total_collisions; +}; + +struct qed_eth_stats_ah { + u64 rx_1519_to_max_byte_packets; + u64 tx_1519_to_max_byte_packets; +}; + +struct qed_eth_stats { + struct qed_eth_stats_common common; + + union { + struct qed_eth_stats_bb bb; + struct qed_eth_stats_ah ah; + }; +}; + +#define QED_SB_IDX 0x0002 + +#define RX_PI 0 +#define TX_PI(tc) (RX_PI + 1 + tc) + +struct qed_sb_cnt_info { + /* Original, current, and free SBs for PF */ + int orig; + int cnt; + int free_cnt; + + /* Original, current and free SBS for child VFs */ + int iov_orig; + int iov_cnt; + int free_cnt_iov; +}; + +static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) +{ + u32 prod = 0; + u16 rc = 0; + + prod = le32_to_cpu(sb_info->sb_virt->prod_index) & + STATUS_BLOCK_E4_PROD_INDEX_MASK; + if (sb_info->sb_ack != prod) { + sb_info->sb_ack = prod; + rc |= QED_SB_IDX; + } + + /* Let SB update */ + mmiowb(); + return rc; +} + +/** + * + * @brief This function creates an update command for interrupts that is + * written to the IGU. + * + * @param sb_info - This is the structure allocated and + * initialized per status block. Assumption is + * that it was initialized using qed_sb_init + * @param int_cmd - Enable/Disable/Nop + * @param upd_flg - whether igu consumer should be + * updated. + * + * @return inline void + */ +static inline void qed_sb_ack(struct qed_sb_info *sb_info, + enum igu_int_cmd int_cmd, + u8 upd_flg) +{ + struct igu_prod_cons_update igu_ack = { 0 }; + + igu_ack.sb_id_and_flags = + ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | + (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | + (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | + (IGU_SEG_ACCESS_REG << + IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); + + DIRECT_REG_WR(sb_info->igu_addr, igu_ack.sb_id_and_flags); + + /* Both segments (interrupts & acks) are written to same place address; + * Need to guarantee all commands will be received (in-order) by HW. + */ + mmiowb(); + barrier(); +} + +static inline void __internal_ram_wr(void *p_hwfn, + void __iomem *addr, + int size, + u32 *data) + +{ + unsigned int i; + + for (i = 0; i < size / sizeof(*data); i++) + DIRECT_REG_WR(&((u32 __iomem *)addr)[i], data[i]); +} + +static inline void internal_ram_wr(void __iomem *addr, + int size, + u32 *data) +{ + __internal_ram_wr(NULL, addr, size, data); +} + +enum qed_rss_caps { + QED_RSS_IPV4 = 0x1, + QED_RSS_IPV6 = 0x2, + QED_RSS_IPV4_TCP = 0x4, + QED_RSS_IPV6_TCP = 0x8, + QED_RSS_IPV4_UDP = 0x10, + QED_RSS_IPV6_UDP = 0x20, +}; + +#define QED_RSS_IND_TABLE_SIZE 128 +#define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */ +#endif diff --git a/include/linux/qed/qed_iov_if.h b/include/linux/qed/qed_iov_if.h new file mode 100644 index 000000000..ac2e6a319 --- /dev/null +++ b/include/linux/qed/qed_iov_if.h @@ -0,0 +1,60 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _QED_IOV_IF_H +#define _QED_IOV_IF_H + +#include + +/* Structs used by PF to control and manipulate child VFs */ +struct qed_iov_hv_ops { + int (*configure)(struct qed_dev *cdev, int num_vfs_param); + + int (*set_mac) (struct qed_dev *cdev, u8 *mac, int vfid); + + int (*set_vlan) (struct qed_dev *cdev, u16 vid, int vfid); + + int (*get_config) (struct qed_dev *cdev, int vf_id, + struct ifla_vf_info *ivi); + + int (*set_link_state) (struct qed_dev *cdev, int vf_id, + int link_state); + + int (*set_spoof) (struct qed_dev *cdev, int vfid, bool val); + + int (*set_rate) (struct qed_dev *cdev, int vfid, + u32 min_rate, u32 max_rate); + + int (*set_trust) (struct qed_dev *cdev, int vfid, bool trust); +}; + +#endif diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h new file mode 100644 index 000000000..d0df1bec5 --- /dev/null +++ b/include/linux/qed/qed_iscsi_if.h @@ -0,0 +1,260 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _QED_ISCSI_IF_H +#define _QED_ISCSI_IF_H +#include +#include + +typedef int (*iscsi_event_cb_t) (void *context, + u8 fw_event_code, void *fw_handle); +struct qed_iscsi_stats { + u64 iscsi_rx_bytes_cnt; + u64 iscsi_rx_packet_cnt; + u64 iscsi_rx_new_ooo_isle_events_cnt; + u32 iscsi_cmdq_threshold_cnt; + u32 iscsi_rq_threshold_cnt; + u32 iscsi_immq_threshold_cnt; + + u64 iscsi_rx_dropped_pdus_task_not_valid; + + u64 iscsi_rx_data_pdu_cnt; + u64 iscsi_rx_r2t_pdu_cnt; + u64 iscsi_rx_total_pdu_cnt; + + u64 iscsi_tx_go_to_slow_start_event_cnt; + u64 iscsi_tx_fast_retransmit_event_cnt; + + u64 iscsi_tx_data_pdu_cnt; + u64 iscsi_tx_r2t_pdu_cnt; + u64 iscsi_tx_total_pdu_cnt; + + u64 iscsi_tx_bytes_cnt; + u64 iscsi_tx_packet_cnt; +}; + +struct qed_dev_iscsi_info { + struct qed_dev_info common; + + void __iomem *primary_dbq_rq_addr; + void __iomem *secondary_bdq_rq_addr; + + u8 num_cqs; +}; + +struct qed_iscsi_id_params { + u8 mac[ETH_ALEN]; + u32 ip[4]; + u16 port; +}; + +struct qed_iscsi_params_offload { + u8 layer_code; + dma_addr_t sq_pbl_addr; + u32 initial_ack; + + struct qed_iscsi_id_params src; + struct qed_iscsi_id_params dst; + u16 vlan_id; + u8 tcp_flags; + u8 ip_version; + u8 default_cq; + + u8 ka_max_probe_cnt; + u8 dup_ack_theshold; + u32 rcv_next; + u32 snd_una; + u32 snd_next; + u32 snd_max; + u32 snd_wnd; + u32 rcv_wnd; + u32 snd_wl1; + u32 cwnd; + u32 ss_thresh; + u16 srtt; + u16 rtt_var; + u32 ts_recent; + u32 ts_recent_age; + u32 total_rt; + u32 ka_timeout_delta; + u32 rt_timeout_delta; + u8 dup_ack_cnt; + u8 snd_wnd_probe_cnt; + u8 ka_probe_cnt; + u8 rt_cnt; + u32 flow_label; + u32 ka_timeout; + u32 ka_interval; + u32 max_rt_time; + u32 initial_rcv_wnd; + u8 ttl; + u8 tos_or_tc; + u16 remote_port; + u16 local_port; + u16 mss; + u8 snd_wnd_scale; + u8 rcv_wnd_scale; + u16 da_timeout_value; + u8 ack_frequency; +}; + +struct qed_iscsi_params_update { + u8 update_flag; +#define QED_ISCSI_CONN_HD_EN BIT(0) +#define QED_ISCSI_CONN_DD_EN BIT(1) +#define QED_ISCSI_CONN_INITIAL_R2T BIT(2) +#define QED_ISCSI_CONN_IMMEDIATE_DATA BIT(3) + + u32 max_seq_size; + u32 max_recv_pdu_length; + u32 max_send_pdu_length; + u32 first_seq_length; + u32 exp_stat_sn; +}; + +#define MAX_TID_BLOCKS_ISCSI (512) +struct qed_iscsi_tid { + u32 size; /* In bytes per task */ + u32 num_tids_per_block; + u8 *blocks[MAX_TID_BLOCKS_ISCSI]; +}; + +struct qed_iscsi_cb_ops { + struct qed_common_cb_ops common; +}; + +/** + * struct qed_iscsi_ops - qed iSCSI operations. + * @common: common operations pointer + * @ll2: light L2 operations pointer + * @fill_dev_info: fills iSCSI specific information + * @param cdev + * @param info + * @return 0 on sucesss, otherwise error value. + * @register_ops: register iscsi operations + * @param cdev + * @param ops - specified using qed_iscsi_cb_ops + * @param cookie - driver private + * @start: iscsi in FW + * @param cdev + * @param tasks - qed will fill information about tasks + * return 0 on success, otherwise error value. + * @stop: iscsi in FW + * @param cdev + * return 0 on success, otherwise error value. + * @acquire_conn: acquire a new iscsi connection + * @param cdev + * @param handle - qed will fill handle that should be + * used henceforth as identifier of the + * connection. + * @param p_doorbell - qed will fill the address of the + * doorbell. + * @return 0 on sucesss, otherwise error value. + * @release_conn: release a previously acquired iscsi connection + * @param cdev + * @param handle - the connection handle. + * @return 0 on success, otherwise error value. + * @offload_conn: configures an offloaded connection + * @param cdev + * @param handle - the connection handle. + * @param conn_info - the configuration to use for the + * offload. + * @return 0 on success, otherwise error value. + * @update_conn: updates an offloaded connection + * @param cdev + * @param handle - the connection handle. + * @param conn_info - the configuration to use for the + * offload. + * @return 0 on success, otherwise error value. + * @destroy_conn: stops an offloaded connection + * @param cdev + * @param handle - the connection handle. + * @return 0 on success, otherwise error value. + * @clear_sq: clear all task in sq + * @param cdev + * @param handle - the connection handle. + * @return 0 on success, otherwise error value. + * @get_stats: iSCSI related statistics + * @param cdev + * @param stats - pointer to struck that would be filled + * we stats + * @return 0 on success, error otherwise. + * @change_mac Change MAC of interface + * @param cdev + * @param handle - the connection handle. + * @param mac - new MAC to configure. + * @return 0 on success, otherwise error value. + */ +struct qed_iscsi_ops { + const struct qed_common_ops *common; + + const struct qed_ll2_ops *ll2; + + int (*fill_dev_info)(struct qed_dev *cdev, + struct qed_dev_iscsi_info *info); + + void (*register_ops)(struct qed_dev *cdev, + struct qed_iscsi_cb_ops *ops, void *cookie); + + int (*start)(struct qed_dev *cdev, + struct qed_iscsi_tid *tasks, + void *event_context, iscsi_event_cb_t async_event_cb); + + int (*stop)(struct qed_dev *cdev); + + int (*acquire_conn)(struct qed_dev *cdev, + u32 *handle, + u32 *fw_cid, void __iomem **p_doorbell); + + int (*release_conn)(struct qed_dev *cdev, u32 handle); + + int (*offload_conn)(struct qed_dev *cdev, + u32 handle, + struct qed_iscsi_params_offload *conn_info); + + int (*update_conn)(struct qed_dev *cdev, + u32 handle, + struct qed_iscsi_params_update *conn_info); + + int (*destroy_conn)(struct qed_dev *cdev, u32 handle, u8 abrt_conn); + + int (*clear_sq)(struct qed_dev *cdev, u32 handle); + + int (*get_stats)(struct qed_dev *cdev, + struct qed_iscsi_stats *stats); + + int (*change_mac)(struct qed_dev *cdev, u32 handle, const u8 *mac); +}; + +const struct qed_iscsi_ops *qed_get_iscsi_ops(void); +void qed_put_iscsi_ops(void); +#endif diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h new file mode 100644 index 000000000..5eb022953 --- /dev/null +++ b/include/linux/qed/qed_ll2_if.h @@ -0,0 +1,307 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _QED_LL2_IF_H +#define _QED_LL2_IF_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum qed_ll2_conn_type { + QED_LL2_TYPE_FCOE, + QED_LL2_TYPE_ISCSI, + QED_LL2_TYPE_TEST, + QED_LL2_TYPE_OOO, + QED_LL2_TYPE_RESERVED2, + QED_LL2_TYPE_ROCE, + QED_LL2_TYPE_IWARP, + QED_LL2_TYPE_RESERVED3, + MAX_QED_LL2_RX_CONN_TYPE +}; + +enum qed_ll2_roce_flavor_type { + QED_LL2_ROCE, + QED_LL2_RROCE, + MAX_QED_LL2_ROCE_FLAVOR_TYPE +}; + +enum qed_ll2_tx_dest { + QED_LL2_TX_DEST_NW, /* Light L2 TX Destination to the Network */ + QED_LL2_TX_DEST_LB, /* Light L2 TX Destination to the Loopback */ + QED_LL2_TX_DEST_DROP, /* Light L2 Drop the TX packet */ + QED_LL2_TX_DEST_MAX +}; + +enum qed_ll2_error_handle { + QED_LL2_DROP_PACKET, + QED_LL2_DO_NOTHING, + QED_LL2_ASSERT, +}; + +struct qed_ll2_stats { + u64 gsi_invalid_hdr; + u64 gsi_invalid_pkt_length; + u64 gsi_unsupported_pkt_typ; + u64 gsi_crcchksm_error; + + u64 packet_too_big_discard; + u64 no_buff_discard; + + u64 rcv_ucast_bytes; + u64 rcv_mcast_bytes; + u64 rcv_bcast_bytes; + u64 rcv_ucast_pkts; + u64 rcv_mcast_pkts; + u64 rcv_bcast_pkts; + + u64 sent_ucast_bytes; + u64 sent_mcast_bytes; + u64 sent_bcast_bytes; + u64 sent_ucast_pkts; + u64 sent_mcast_pkts; + u64 sent_bcast_pkts; +}; + +struct qed_ll2_comp_rx_data { + void *cookie; + dma_addr_t rx_buf_addr; + u16 parse_flags; + u16 err_flags; + u16 vlan; + bool b_last_packet; + u8 connection_handle; + + union { + u16 packet_length; + u16 data_length; + } length; + + u32 opaque_data_0; + u32 opaque_data_1; + + /* GSI only */ + u32 src_qp; + u16 qp_id; + + union { + u8 placement_offset; + u8 data_length_error; + } u; +}; + +typedef +void (*qed_ll2_complete_rx_packet_cb)(void *cxt, + struct qed_ll2_comp_rx_data *data); + +typedef +void (*qed_ll2_release_rx_packet_cb)(void *cxt, + u8 connection_handle, + void *cookie, + dma_addr_t rx_buf_addr, + bool b_last_packet); + +typedef +void (*qed_ll2_complete_tx_packet_cb)(void *cxt, + u8 connection_handle, + void *cookie, + dma_addr_t first_frag_addr, + bool b_last_fragment, + bool b_last_packet); + +typedef +void (*qed_ll2_release_tx_packet_cb)(void *cxt, + u8 connection_handle, + void *cookie, + dma_addr_t first_frag_addr, + bool b_last_fragment, bool b_last_packet); + +typedef +void (*qed_ll2_slowpath_cb)(void *cxt, u8 connection_handle, + u32 opaque_data_0, u32 opaque_data_1); + +struct qed_ll2_cbs { + qed_ll2_complete_rx_packet_cb rx_comp_cb; + qed_ll2_release_rx_packet_cb rx_release_cb; + qed_ll2_complete_tx_packet_cb tx_comp_cb; + qed_ll2_release_tx_packet_cb tx_release_cb; + qed_ll2_slowpath_cb slowpath_cb; + void *cookie; +}; + +struct qed_ll2_acquire_data_inputs { + enum qed_ll2_conn_type conn_type; + u16 mtu; + u16 rx_num_desc; + u16 rx_num_ooo_buffers; + u8 rx_drop_ttl0_flg; + u8 rx_vlan_removal_en; + u16 tx_num_desc; + u8 tx_max_bds_per_packet; + u8 tx_tc; + enum qed_ll2_tx_dest tx_dest; + enum qed_ll2_error_handle ai_err_packet_too_big; + enum qed_ll2_error_handle ai_err_no_buf; + bool secondary_queue; + u8 gsi_enable; +}; + +struct qed_ll2_acquire_data { + struct qed_ll2_acquire_data_inputs input; + const struct qed_ll2_cbs *cbs; + + /* Output container for LL2 connection's handle */ + u8 *p_connection_handle; +}; + +struct qed_ll2_tx_pkt_info { + void *cookie; + dma_addr_t first_frag; + enum qed_ll2_tx_dest tx_dest; + enum qed_ll2_roce_flavor_type qed_roce_flavor; + u16 vlan; + u16 l4_hdr_offset_w; /* from start of packet */ + u16 first_frag_len; + u8 num_of_bds; + u8 bd_flags; + bool enable_ip_cksum; + bool enable_l4_cksum; + bool calc_ip_len; + bool remove_stag; +}; + +#define QED_LL2_UNUSED_HANDLE (0xff) + +struct qed_ll2_cb_ops { + int (*rx_cb)(void *, struct sk_buff *, u32, u32); + int (*tx_cb)(void *, struct sk_buff *, bool); +}; + +struct qed_ll2_params { + u16 mtu; + bool drop_ttl0_packets; + bool rx_vlan_stripping; + u8 tx_tc; + bool frags_mapped; + u8 ll2_mac_address[ETH_ALEN]; +}; + +enum qed_ll2_xmit_flags { + /* FIP discovery packet */ + QED_LL2_XMIT_FLAGS_FIP_DISCOVERY +}; + +struct qed_ll2_ops { +/** + * @brief start - initializes ll2 + * + * @param cdev + * @param params - protocol driver configuration for the ll2. + * + * @return 0 on success, otherwise error value. + */ + int (*start)(struct qed_dev *cdev, struct qed_ll2_params *params); + +/** + * @brief stop - stops the ll2 + * + * @param cdev + * + * @return 0 on success, otherwise error value. + */ + int (*stop)(struct qed_dev *cdev); + +/** + * @brief start_xmit - transmits an skb over the ll2 interface + * + * @param cdev + * @param skb + * @param xmit_flags - Transmit options defined by the enum qed_ll2_xmit_flags. + * + * @return 0 on success, otherwise error value. + */ + int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb, + unsigned long xmit_flags); + +/** + * @brief register_cb_ops - protocol driver register the callback for Rx/Tx + * packets. Should be called before `start'. + * + * @param cdev + * @param cookie - to be passed to the callback functions. + * @param ops - the callback functions to register for Rx / Tx. + * + * @return 0 on success, otherwise error value. + */ + void (*register_cb_ops)(struct qed_dev *cdev, + const struct qed_ll2_cb_ops *ops, + void *cookie); + +/** + * @brief get LL2 related statistics + * + * @param cdev + * @param stats - pointer to struct that would be filled with stats + * + * @return 0 on success, error otherwise. + */ + int (*get_stats)(struct qed_dev *cdev, struct qed_ll2_stats *stats); +}; + +#ifdef CONFIG_QED_LL2 +int qed_ll2_alloc_if(struct qed_dev *); +void qed_ll2_dealloc_if(struct qed_dev *); +#else +static const struct qed_ll2_ops qed_ll2_ops_pass = { + .start = NULL, + .stop = NULL, + .start_xmit = NULL, + .register_cb_ops = NULL, + .get_stats = NULL, +}; + +static inline int qed_ll2_alloc_if(struct qed_dev *cdev) +{ + return 0; +} + +static inline void qed_ll2_dealloc_if(struct qed_dev *cdev) +{ +} +#endif +#endif diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h new file mode 100644 index 000000000..df4d13f7e --- /dev/null +++ b/include/linux/qed/qed_rdma_if.h @@ -0,0 +1,704 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _QED_RDMA_IF_H +#define _QED_RDMA_IF_H +#include +#include +#include +#include +#include +#include +#include + +enum qed_roce_ll2_tx_dest { + /* Light L2 TX Destination to the Network */ + QED_ROCE_LL2_TX_DEST_NW, + + /* Light L2 TX Destination to the Loopback */ + QED_ROCE_LL2_TX_DEST_LB, + QED_ROCE_LL2_TX_DEST_MAX +}; + +#define QED_RDMA_MAX_CNQ_SIZE (0xFFFF) + +/* rdma interface */ + +enum qed_roce_qp_state { + QED_ROCE_QP_STATE_RESET, + QED_ROCE_QP_STATE_INIT, + QED_ROCE_QP_STATE_RTR, + QED_ROCE_QP_STATE_RTS, + QED_ROCE_QP_STATE_SQD, + QED_ROCE_QP_STATE_ERR, + QED_ROCE_QP_STATE_SQE +}; + +enum qed_rdma_tid_type { + QED_RDMA_TID_REGISTERED_MR, + QED_RDMA_TID_FMR, + QED_RDMA_TID_MW +}; + +struct qed_rdma_events { + void *context; + void (*affiliated_event)(void *context, u8 fw_event_code, + void *fw_handle); + void (*unaffiliated_event)(void *context, u8 event_code); +}; + +struct qed_rdma_device { + u32 vendor_id; + u32 vendor_part_id; + u32 hw_ver; + u64 fw_ver; + + u64 node_guid; + u64 sys_image_guid; + + u8 max_cnq; + u8 max_sge; + u8 max_srq_sge; + u16 max_inline; + u32 max_wqe; + u32 max_srq_wqe; + u8 max_qp_resp_rd_atomic_resc; + u8 max_qp_req_rd_atomic_resc; + u64 max_dev_resp_rd_atomic_resc; + u32 max_cq; + u32 max_qp; + u32 max_srq; + u32 max_mr; + u64 max_mr_size; + u32 max_cqe; + u32 max_mw; + u32 max_fmr; + u32 max_mr_mw_fmr_pbl; + u64 max_mr_mw_fmr_size; + u32 max_pd; + u32 max_ah; + u8 max_pkey; + u16 max_srq_wr; + u8 max_stats_queues; + u32 dev_caps; + + /* Abilty to support RNR-NAK generation */ + +#define QED_RDMA_DEV_CAP_RNR_NAK_MASK 0x1 +#define QED_RDMA_DEV_CAP_RNR_NAK_SHIFT 0 + /* Abilty to support shutdown port */ +#define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK 0x1 +#define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT 1 + /* Abilty to support port active event */ +#define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK 0x1 +#define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT 2 + /* Abilty to support port change event */ +#define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK 0x1 +#define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT 3 + /* Abilty to support system image GUID */ +#define QED_RDMA_DEV_CAP_SYS_IMAGE_MASK 0x1 +#define QED_RDMA_DEV_CAP_SYS_IMAGE_SHIFT 4 + /* Abilty to support bad P_Key counter support */ +#define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK 0x1 +#define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT 5 + /* Abilty to support atomic operations */ +#define QED_RDMA_DEV_CAP_ATOMIC_OP_MASK 0x1 +#define QED_RDMA_DEV_CAP_ATOMIC_OP_SHIFT 6 +#define QED_RDMA_DEV_CAP_RESIZE_CQ_MASK 0x1 +#define QED_RDMA_DEV_CAP_RESIZE_CQ_SHIFT 7 + /* Abilty to support modifying the maximum number of + * outstanding work requests per QP + */ +#define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK 0x1 +#define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT 8 + /* Abilty to support automatic path migration */ +#define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK 0x1 +#define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT 9 + /* Abilty to support the base memory management extensions */ +#define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK 0x1 +#define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT 10 +#define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK 0x1 +#define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT 11 + /* Abilty to support multipile page sizes per memory region */ +#define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK 0x1 +#define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT 12 + /* Abilty to support block list physical buffer list */ +#define QED_RDMA_DEV_CAP_BLOCK_MODE_MASK 0x1 +#define QED_RDMA_DEV_CAP_BLOCK_MODE_SHIFT 13 + /* Abilty to support zero based virtual addresses */ +#define QED_RDMA_DEV_CAP_ZBVA_MASK 0x1 +#define QED_RDMA_DEV_CAP_ZBVA_SHIFT 14 + /* Abilty to support local invalidate fencing */ +#define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK 0x1 +#define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT 15 + /* Abilty to support Loopback on QP */ +#define QED_RDMA_DEV_CAP_LB_INDICATOR_MASK 0x1 +#define QED_RDMA_DEV_CAP_LB_INDICATOR_SHIFT 16 + u64 page_size_caps; + u8 dev_ack_delay; + u32 reserved_lkey; + u32 bad_pkey_counter; + struct qed_rdma_events events; +}; + +enum qed_port_state { + QED_RDMA_PORT_UP, + QED_RDMA_PORT_DOWN, +}; + +enum qed_roce_capability { + QED_ROCE_V1 = 1 << 0, + QED_ROCE_V2 = 1 << 1, +}; + +struct qed_rdma_port { + enum qed_port_state port_state; + int link_speed; + u64 max_msg_size; + u8 source_gid_table_len; + void *source_gid_table_ptr; + u8 pkey_table_len; + void *pkey_table_ptr; + u32 pkey_bad_counter; + enum qed_roce_capability capability; +}; + +struct qed_rdma_cnq_params { + u8 num_pbl_pages; + u64 pbl_ptr; +}; + +/* The CQ Mode affects the CQ doorbell transaction size. + * 64/32 bit machines should configure to 32/16 bits respectively. + */ +enum qed_rdma_cq_mode { + QED_RDMA_CQ_MODE_16_BITS, + QED_RDMA_CQ_MODE_32_BITS, +}; + +struct qed_roce_dcqcn_params { + u8 notification_point; + u8 reaction_point; + + /* fields for notification point */ + u32 cnp_send_timeout; + + /* fields for reaction point */ + u32 rl_bc_rate; + u16 rl_max_rate; + u16 rl_r_ai; + u16 rl_r_hai; + u16 dcqcn_g; + u32 dcqcn_k_us; + u32 dcqcn_timeout_us; +}; + +struct qed_rdma_start_in_params { + struct qed_rdma_events *events; + struct qed_rdma_cnq_params cnq_pbl_list[128]; + u8 desired_cnq; + enum qed_rdma_cq_mode cq_mode; + struct qed_roce_dcqcn_params dcqcn_params; + u16 max_mtu; + u8 mac_addr[ETH_ALEN]; + u8 iwarp_flags; +}; + +struct qed_rdma_add_user_out_params { + u16 dpi; + u64 dpi_addr; + u64 dpi_phys_addr; + u32 dpi_size; + u16 wid_count; +}; + +enum roce_mode { + ROCE_V1, + ROCE_V2_IPV4, + ROCE_V2_IPV6, + MAX_ROCE_MODE +}; + +union qed_gid { + u8 bytes[16]; + u16 words[8]; + u32 dwords[4]; + u64 qwords[2]; + u32 ipv4_addr; +}; + +struct qed_rdma_register_tid_in_params { + u32 itid; + enum qed_rdma_tid_type tid_type; + u8 key; + u16 pd; + bool local_read; + bool local_write; + bool remote_read; + bool remote_write; + bool remote_atomic; + bool mw_bind; + u64 pbl_ptr; + bool pbl_two_level; + u8 pbl_page_size_log; + u8 page_size_log; + u32 fbo; + u64 length; + u64 vaddr; + bool zbva; + bool phy_mr; + bool dma_mr; + + bool dif_enabled; + u64 dif_error_addr; +}; + +struct qed_rdma_create_cq_in_params { + u32 cq_handle_lo; + u32 cq_handle_hi; + u32 cq_size; + u16 dpi; + bool pbl_two_level; + u64 pbl_ptr; + u16 pbl_num_pages; + u8 pbl_page_size_log; + u8 cnq_id; + u16 int_timeout; +}; + +struct qed_rdma_create_srq_in_params { + u64 pbl_base_addr; + u64 prod_pair_addr; + u16 num_pages; + u16 pd_id; + u16 page_size; +}; + +struct qed_rdma_destroy_cq_in_params { + u16 icid; +}; + +struct qed_rdma_destroy_cq_out_params { + u16 num_cq_notif; +}; + +struct qed_rdma_create_qp_in_params { + u32 qp_handle_lo; + u32 qp_handle_hi; + u32 qp_handle_async_lo; + u32 qp_handle_async_hi; + bool use_srq; + bool signal_all; + bool fmr_and_reserved_lkey; + u16 pd; + u16 dpi; + u16 sq_cq_id; + u16 sq_num_pages; + u64 sq_pbl_ptr; + u8 max_sq_sges; + u16 rq_cq_id; + u16 rq_num_pages; + u64 rq_pbl_ptr; + u16 srq_id; + u8 stats_queue; +}; + +struct qed_rdma_create_qp_out_params { + u32 qp_id; + u16 icid; + void *rq_pbl_virt; + dma_addr_t rq_pbl_phys; + void *sq_pbl_virt; + dma_addr_t sq_pbl_phys; +}; + +struct qed_rdma_modify_qp_in_params { + u32 modify_flags; +#define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK 0x1 +#define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT 0 +#define QED_ROCE_MODIFY_QP_VALID_PKEY_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_PKEY_SHIFT 1 +#define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK 0x1 +#define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT 2 +#define QED_ROCE_MODIFY_QP_VALID_DEST_QP_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT 3 +#define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT 4 +#define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT 5 +#define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT 6 +#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK 0x1 +#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT 7 +#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK 0x1 +#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT 8 +#define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT 9 +#define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT 10 +#define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT 11 +#define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT 12 +#define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT 13 +#define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT 14 + + enum qed_roce_qp_state new_state; + u16 pkey; + bool incoming_rdma_read_en; + bool incoming_rdma_write_en; + bool incoming_atomic_en; + bool e2e_flow_control_en; + u32 dest_qp; + bool lb_indication; + u16 mtu; + u8 traffic_class_tos; + u8 hop_limit_ttl; + u32 flow_label; + union qed_gid sgid; + union qed_gid dgid; + u16 udp_src_port; + + u16 vlan_id; + + u32 rq_psn; + u32 sq_psn; + u8 max_rd_atomic_resp; + u8 max_rd_atomic_req; + u32 ack_timeout; + u8 retry_cnt; + u8 rnr_retry_cnt; + u8 min_rnr_nak_timer; + bool sqd_async; + u8 remote_mac_addr[6]; + u8 local_mac_addr[6]; + bool use_local_mac; + enum roce_mode roce_mode; +}; + +struct qed_rdma_query_qp_out_params { + enum qed_roce_qp_state state; + u32 rq_psn; + u32 sq_psn; + bool draining; + u16 mtu; + u32 dest_qp; + bool incoming_rdma_read_en; + bool incoming_rdma_write_en; + bool incoming_atomic_en; + bool e2e_flow_control_en; + union qed_gid sgid; + union qed_gid dgid; + u32 flow_label; + u8 hop_limit_ttl; + u8 traffic_class_tos; + u32 timeout; + u8 rnr_retry; + u8 retry_cnt; + u8 min_rnr_nak_timer; + u16 pkey_index; + u8 max_rd_atomic; + u8 max_dest_rd_atomic; + bool sqd_async; +}; + +struct qed_rdma_create_srq_out_params { + u16 srq_id; +}; + +struct qed_rdma_destroy_srq_in_params { + u16 srq_id; +}; + +struct qed_rdma_modify_srq_in_params { + u32 wqe_limit; + u16 srq_id; +}; + +struct qed_rdma_stats_out_params { + u64 sent_bytes; + u64 sent_pkts; + u64 rcv_bytes; + u64 rcv_pkts; +}; + +struct qed_rdma_counters_out_params { + u64 pd_count; + u64 max_pd; + u64 dpi_count; + u64 max_dpi; + u64 cq_count; + u64 max_cq; + u64 qp_count; + u64 max_qp; + u64 tid_count; + u64 max_tid; +}; + +#define QED_ROCE_TX_HEAD_FAILURE (1) +#define QED_ROCE_TX_FRAG_FAILURE (2) + +enum qed_iwarp_event_type { + QED_IWARP_EVENT_MPA_REQUEST, /* Passive side request received */ + QED_IWARP_EVENT_PASSIVE_COMPLETE, /* ack on mpa response */ + QED_IWARP_EVENT_ACTIVE_COMPLETE, /* Active side reply received */ + QED_IWARP_EVENT_DISCONNECT, + QED_IWARP_EVENT_CLOSE, + QED_IWARP_EVENT_IRQ_FULL, + QED_IWARP_EVENT_RQ_EMPTY, + QED_IWARP_EVENT_LLP_TIMEOUT, + QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR, + QED_IWARP_EVENT_CQ_OVERFLOW, + QED_IWARP_EVENT_QP_CATASTROPHIC, + QED_IWARP_EVENT_ACTIVE_MPA_REPLY, + QED_IWARP_EVENT_LOCAL_ACCESS_ERROR, + QED_IWARP_EVENT_REMOTE_OPERATION_ERROR, + QED_IWARP_EVENT_TERMINATE_RECEIVED, + QED_IWARP_EVENT_SRQ_LIMIT, + QED_IWARP_EVENT_SRQ_EMPTY, +}; + +enum qed_tcp_ip_version { + QED_TCP_IPV4, + QED_TCP_IPV6, +}; + +struct qed_iwarp_cm_info { + enum qed_tcp_ip_version ip_version; + u32 remote_ip[4]; + u32 local_ip[4]; + u16 remote_port; + u16 local_port; + u16 vlan; + u8 ord; + u8 ird; + u16 private_data_len; + const void *private_data; +}; + +struct qed_iwarp_cm_event_params { + enum qed_iwarp_event_type event; + const struct qed_iwarp_cm_info *cm_info; + void *ep_context; /* To be passed to accept call */ + int status; +}; + +typedef int (*iwarp_event_handler) (void *context, + struct qed_iwarp_cm_event_params *event); + +struct qed_iwarp_connect_in { + iwarp_event_handler event_cb; + void *cb_context; + struct qed_rdma_qp *qp; + struct qed_iwarp_cm_info cm_info; + u16 mss; + u8 remote_mac_addr[ETH_ALEN]; + u8 local_mac_addr[ETH_ALEN]; +}; + +struct qed_iwarp_connect_out { + void *ep_context; +}; + +struct qed_iwarp_listen_in { + iwarp_event_handler event_cb; + void *cb_context; /* passed to event_cb */ + u32 max_backlog; + enum qed_tcp_ip_version ip_version; + u32 ip_addr[4]; + u16 port; + u16 vlan; +}; + +struct qed_iwarp_listen_out { + void *handle; +}; + +struct qed_iwarp_accept_in { + void *ep_context; + void *cb_context; + struct qed_rdma_qp *qp; + const void *private_data; + u16 private_data_len; + u8 ord; + u8 ird; +}; + +struct qed_iwarp_reject_in { + void *ep_context; + void *cb_context; + const void *private_data; + u16 private_data_len; +}; + +struct qed_iwarp_send_rtr_in { + void *ep_context; +}; + +struct qed_roce_ll2_header { + void *vaddr; + dma_addr_t baddr; + size_t len; +}; + +struct qed_roce_ll2_buffer { + dma_addr_t baddr; + size_t len; +}; + +struct qed_roce_ll2_packet { + struct qed_roce_ll2_header header; + int n_seg; + struct qed_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE]; + int roce_mode; + enum qed_roce_ll2_tx_dest tx_dest; +}; + +enum qed_rdma_type { + QED_RDMA_TYPE_ROCE, + QED_RDMA_TYPE_IWARP +}; + +struct qed_dev_rdma_info { + struct qed_dev_info common; + enum qed_rdma_type rdma_type; + u8 user_dpm_enabled; +}; + +struct qed_rdma_ops { + const struct qed_common_ops *common; + + int (*fill_dev_info)(struct qed_dev *cdev, + struct qed_dev_rdma_info *info); + void *(*rdma_get_rdma_ctx)(struct qed_dev *cdev); + + int (*rdma_init)(struct qed_dev *dev, + struct qed_rdma_start_in_params *iparams); + + int (*rdma_add_user)(void *rdma_cxt, + struct qed_rdma_add_user_out_params *oparams); + + void (*rdma_remove_user)(void *rdma_cxt, u16 dpi); + int (*rdma_stop)(void *rdma_cxt); + struct qed_rdma_device* (*rdma_query_device)(void *rdma_cxt); + struct qed_rdma_port* (*rdma_query_port)(void *rdma_cxt); + int (*rdma_get_start_sb)(struct qed_dev *cdev); + int (*rdma_get_min_cnq_msix)(struct qed_dev *cdev); + void (*rdma_cnq_prod_update)(void *rdma_cxt, u8 cnq_index, u16 prod); + int (*rdma_get_rdma_int)(struct qed_dev *cdev, + struct qed_int_info *info); + int (*rdma_set_rdma_int)(struct qed_dev *cdev, u16 cnt); + int (*rdma_alloc_pd)(void *rdma_cxt, u16 *pd); + void (*rdma_dealloc_pd)(void *rdma_cxt, u16 pd); + int (*rdma_create_cq)(void *rdma_cxt, + struct qed_rdma_create_cq_in_params *params, + u16 *icid); + int (*rdma_destroy_cq)(void *rdma_cxt, + struct qed_rdma_destroy_cq_in_params *iparams, + struct qed_rdma_destroy_cq_out_params *oparams); + struct qed_rdma_qp * + (*rdma_create_qp)(void *rdma_cxt, + struct qed_rdma_create_qp_in_params *iparams, + struct qed_rdma_create_qp_out_params *oparams); + + int (*rdma_modify_qp)(void *roce_cxt, struct qed_rdma_qp *qp, + struct qed_rdma_modify_qp_in_params *iparams); + + int (*rdma_query_qp)(void *rdma_cxt, struct qed_rdma_qp *qp, + struct qed_rdma_query_qp_out_params *oparams); + int (*rdma_destroy_qp)(void *rdma_cxt, struct qed_rdma_qp *qp); + + int + (*rdma_register_tid)(void *rdma_cxt, + struct qed_rdma_register_tid_in_params *iparams); + + int (*rdma_deregister_tid)(void *rdma_cxt, u32 itid); + int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid); + void (*rdma_free_tid)(void *rdma_cxt, u32 itid); + + int (*rdma_create_srq)(void *rdma_cxt, + struct qed_rdma_create_srq_in_params *iparams, + struct qed_rdma_create_srq_out_params *oparams); + int (*rdma_destroy_srq)(void *rdma_cxt, + struct qed_rdma_destroy_srq_in_params *iparams); + int (*rdma_modify_srq)(void *rdma_cxt, + struct qed_rdma_modify_srq_in_params *iparams); + + int (*ll2_acquire_connection)(void *rdma_cxt, + struct qed_ll2_acquire_data *data); + + int (*ll2_establish_connection)(void *rdma_cxt, u8 connection_handle); + int (*ll2_terminate_connection)(void *rdma_cxt, u8 connection_handle); + void (*ll2_release_connection)(void *rdma_cxt, u8 connection_handle); + + int (*ll2_prepare_tx_packet)(void *rdma_cxt, + u8 connection_handle, + struct qed_ll2_tx_pkt_info *pkt, + bool notify_fw); + + int (*ll2_set_fragment_of_tx_packet)(void *rdma_cxt, + u8 connection_handle, + dma_addr_t addr, + u16 nbytes); + int (*ll2_post_rx_buffer)(void *rdma_cxt, u8 connection_handle, + dma_addr_t addr, u16 buf_len, void *cookie, + u8 notify_fw); + int (*ll2_get_stats)(void *rdma_cxt, + u8 connection_handle, + struct qed_ll2_stats *p_stats); + int (*ll2_set_mac_filter)(struct qed_dev *cdev, + u8 *old_mac_address, u8 *new_mac_address); + + int (*iwarp_connect)(void *rdma_cxt, + struct qed_iwarp_connect_in *iparams, + struct qed_iwarp_connect_out *oparams); + + int (*iwarp_create_listen)(void *rdma_cxt, + struct qed_iwarp_listen_in *iparams, + struct qed_iwarp_listen_out *oparams); + + int (*iwarp_accept)(void *rdma_cxt, + struct qed_iwarp_accept_in *iparams); + + int (*iwarp_reject)(void *rdma_cxt, + struct qed_iwarp_reject_in *iparams); + + int (*iwarp_destroy_listen)(void *rdma_cxt, void *handle); + + int (*iwarp_send_rtr)(void *rdma_cxt, + struct qed_iwarp_send_rtr_in *iparams); +}; + +const struct qed_rdma_ops *qed_get_rdma_ops(void); + +#endif diff --git a/include/linux/qed/qede_rdma.h b/include/linux/qed/qede_rdma.h new file mode 100644 index 000000000..9904617a9 --- /dev/null +++ b/include/linux/qed/qede_rdma.h @@ -0,0 +1,94 @@ +/* QLogic qedr NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef QEDE_ROCE_H +#define QEDE_ROCE_H + +#include +#include +#include +#include + +struct qedr_dev; +struct qed_dev; +struct qede_dev; + +enum qede_rdma_event { + QEDE_UP, + QEDE_DOWN, + QEDE_CHANGE_ADDR, + QEDE_CLOSE +}; + +struct qede_rdma_event_work { + struct list_head list; + struct work_struct work; + void *ptr; + enum qede_rdma_event event; +}; + +struct qedr_driver { + unsigned char name[32]; + + struct qedr_dev* (*add)(struct qed_dev *, struct pci_dev *, + struct net_device *); + + void (*remove)(struct qedr_dev *); + void (*notify)(struct qedr_dev *, enum qede_rdma_event); +}; + +/* APIs for RDMA driver to register callback handlers, + * which will be invoked when device is added, removed, ifup, ifdown + */ +int qede_rdma_register_driver(struct qedr_driver *drv); +void qede_rdma_unregister_driver(struct qedr_driver *drv); + +bool qede_rdma_supported(struct qede_dev *dev); + +#if IS_ENABLED(CONFIG_QED_RDMA) +int qede_rdma_dev_add(struct qede_dev *dev); +void qede_rdma_dev_event_open(struct qede_dev *dev); +void qede_rdma_dev_event_close(struct qede_dev *dev); +void qede_rdma_dev_remove(struct qede_dev *dev); +void qede_rdma_event_changeaddr(struct qede_dev *edr); + +#else +static inline int qede_rdma_dev_add(struct qede_dev *dev) +{ + return 0; +} + +static inline void qede_rdma_dev_event_open(struct qede_dev *dev) {} +static inline void qede_rdma_dev_event_close(struct qede_dev *dev) {} +static inline void qede_rdma_dev_remove(struct qede_dev *dev) {} +static inline void qede_rdma_event_changeaddr(struct qede_dev *edr) {} +#endif +#endif diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h new file mode 100644 index 000000000..480a57eb3 --- /dev/null +++ b/include/linux/qed/rdma_common.h @@ -0,0 +1,73 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __RDMA_COMMON__ +#define __RDMA_COMMON__ + +/************************/ +/* RDMA FW CONSTANTS */ +/************************/ + +#define RDMA_RESERVED_LKEY (0) +#define RDMA_RING_PAGE_SIZE (0x1000) + +#define RDMA_MAX_SGE_PER_SQ_WQE (4) +#define RDMA_MAX_SGE_PER_RQ_WQE (4) + +#define RDMA_MAX_DATA_SIZE_IN_WQE (0x80000000) + +#define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50) +#define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20) + +#define RDMA_MAX_CQS (64 * 1024) +#define RDMA_MAX_TIDS (128 * 1024 - 1) +#define RDMA_MAX_PDS (64 * 1024) +#define RDMA_MAX_XRC_SRQS (1024) +#define RDMA_MAX_SRQS (32 * 1024) + +#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS +#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2 +#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB + +#define RDMA_TASK_TYPE (PROTOCOLID_ROCE) + +struct rdma_srq_id { + __le16 srq_idx; + __le16 opaque_fid; +}; + +struct rdma_srq_producers { + __le32 sge_prod; + __le32 wqe_prod; +}; + +#endif /* __RDMA_COMMON__ */ diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h new file mode 100644 index 000000000..473fba76a --- /dev/null +++ b/include/linux/qed/roce_common.h @@ -0,0 +1,69 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __ROCE_COMMON__ +#define __ROCE_COMMON__ + +/************************/ +/* ROCE FW CONSTANTS */ +/************************/ + +#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256) +#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288) + +#define ROCE_MAX_QPS (32 * 1024) +#define ROCE_DCQCN_NP_MAX_QPS (64) +#define ROCE_DCQCN_RP_MAX_QPS (64) +#define ROCE_LKEY_MW_DIF_EN_BIT (28) + +/* Affiliated asynchronous events / errors enumeration */ +enum roce_async_events_type { + ROCE_ASYNC_EVENT_NONE = 0, + ROCE_ASYNC_EVENT_COMM_EST = 1, + ROCE_ASYNC_EVENT_SQ_DRAINED, + ROCE_ASYNC_EVENT_SRQ_LIMIT, + ROCE_ASYNC_EVENT_LAST_WQE_REACHED, + ROCE_ASYNC_EVENT_CQ_ERR, + ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR, + ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR, + ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR, + ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR, + ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR, + ROCE_ASYNC_EVENT_SRQ_EMPTY, + ROCE_ASYNC_EVENT_DESTROY_QP_DONE, + ROCE_ASYNC_EVENT_XRC_DOMAIN_ERR, + ROCE_ASYNC_EVENT_INVALID_XRCETH_ERR, + ROCE_ASYNC_EVENT_XRC_SRQ_CATASTROPHIC_ERR, + MAX_ROCE_ASYNC_EVENTS_TYPE +}; + +#endif /* __ROCE_COMMON__ */ diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h new file mode 100644 index 000000000..505c0b48a --- /dev/null +++ b/include/linux/qed/storage_common.h @@ -0,0 +1,182 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __STORAGE_COMMON__ +#define __STORAGE_COMMON__ + +/*********************/ +/* SCSI CONSTANTS */ +/*********************/ + +#define SCSI_MAX_NUM_OF_CMDQS (NUM_OF_GLOBAL_QUEUES / 2) +#define BDQ_NUM_RESOURCES (4) + +#define BDQ_ID_RQ (0) +#define BDQ_ID_IMM_DATA (1) +#define BDQ_ID_TQ (2) +#define BDQ_NUM_IDS (3) + +#define SCSI_NUM_SGES_SLOW_SGL_THR 8 + +#define BDQ_MAX_EXTERNAL_RING_SIZE BIT(15) + +/* SCSI op codes */ +#define SCSI_OPCODE_COMPARE_AND_WRITE (0x89) +#define SCSI_OPCODE_READ_10 (0x28) +#define SCSI_OPCODE_WRITE_6 (0x0A) +#define SCSI_OPCODE_WRITE_10 (0x2A) +#define SCSI_OPCODE_WRITE_12 (0xAA) +#define SCSI_OPCODE_WRITE_16 (0x8A) +#define SCSI_OPCODE_WRITE_AND_VERIFY_10 (0x2E) +#define SCSI_OPCODE_WRITE_AND_VERIFY_12 (0xAE) +#define SCSI_OPCODE_WRITE_AND_VERIFY_16 (0x8E) + +/* iSCSI Drv opaque */ +struct iscsi_drv_opaque { + __le16 reserved_zero[3]; + __le16 opaque; +}; + +/* Scsi 2B/8B opaque union */ +union scsi_opaque { + struct regpair fcoe_opaque; + struct iscsi_drv_opaque iscsi_opaque; +}; + +/* SCSI buffer descriptor */ +struct scsi_bd { + struct regpair address; + union scsi_opaque opaque; +}; + +/* Scsi Drv BDQ struct */ +struct scsi_bdq_ram_drv_data { + __le16 external_producer; + __le16 reserved0[3]; +}; + +/* SCSI SGE entry */ +struct scsi_sge { + struct regpair sge_addr; + __le32 sge_len; + __le32 reserved; +}; + +/* Cached SGEs section */ +struct scsi_cached_sges { + struct scsi_sge sge[4]; +}; + +/* Scsi Drv CMDQ struct */ +struct scsi_drv_cmdq { + __le16 cmdq_cons; + __le16 reserved0; + __le32 reserved1; +}; + +/* Common SCSI init params passed by driver to FW in function init ramrod */ +struct scsi_init_func_params { + __le16 num_tasks; + u8 log_page_size; + u8 debug_mode; + u8 reserved2[12]; +}; + +/* SCSI RQ/CQ/CMDQ firmware function init parameters */ +struct scsi_init_func_queues { + struct regpair glbl_q_params_addr; + __le16 rq_buffer_size; + __le16 cq_num_entries; + __le16 cmdq_num_entries; + u8 bdq_resource_id; + u8 q_validity; +#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1 +#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0 +#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1 +#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1 +#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1 +#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2 +#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_MASK 0x1 +#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_SHIFT 3 +#define SCSI_INIT_FUNC_QUEUES_SOC_EN_MASK 0x1 +#define SCSI_INIT_FUNC_QUEUES_SOC_EN_SHIFT 4 +#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_MASK 0x7 +#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_SHIFT 5 + __le16 cq_cmdq_sb_num_arr[SCSI_MAX_NUM_OF_CMDQS]; + u8 num_queues; + u8 queue_relative_offset; + u8 cq_sb_pi; + u8 cmdq_sb_pi; + u8 bdq_pbl_num_entries[BDQ_NUM_IDS]; + u8 reserved1; + struct regpair bdq_pbl_base_address[BDQ_NUM_IDS]; + __le16 bdq_xoff_threshold[BDQ_NUM_IDS]; + __le16 cmdq_xoff_threshold; + __le16 bdq_xon_threshold[BDQ_NUM_IDS]; + __le16 cmdq_xon_threshold; +}; + +/* Scsi Drv BDQ Data struct (2 BDQ IDs: 0 - RQ, 1 - Immediate Data) */ +struct scsi_ram_per_bdq_resource_drv_data { + struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS]; +}; + +/* SCSI SGL types */ +enum scsi_sgl_mode { + SCSI_TX_SLOW_SGL, + SCSI_FAST_SGL, + MAX_SCSI_SGL_MODE +}; + +/* SCSI SGL parameters */ +struct scsi_sgl_params { + struct regpair sgl_addr; + __le32 sgl_total_length; + __le32 sge_offset; + __le16 sgl_num_sges; + u8 sgl_index; + u8 reserved; +}; + +/* SCSI terminate connection params */ +struct scsi_terminate_extra_params { + __le16 unsolicited_cq_count; + __le16 cmdq_count; + u8 reserved[4]; +}; + +/* SCSI Task Queue Element */ +struct scsi_tqe { + __le16 itid; +}; + +#endif /* __STORAGE_COMMON__ */ diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h new file mode 100644 index 000000000..4a4845193 --- /dev/null +++ b/include/linux/qed/tcp_common.h @@ -0,0 +1,281 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __TCP_COMMON__ +#define __TCP_COMMON__ + +/********************/ +/* TCP FW CONSTANTS */ +/********************/ + +#define TCP_INVALID_TIMEOUT_VAL -1 + +/* OOO opaque data received from LL2 */ +struct ooo_opaque { + __le32 cid; + u8 drop_isle; + u8 drop_size; + u8 ooo_opcode; + u8 ooo_isle; +}; + +/* tcp connect mode enum */ +enum tcp_connect_mode { + TCP_CONNECT_ACTIVE, + TCP_CONNECT_PASSIVE, + MAX_TCP_CONNECT_MODE +}; + +/* tcp function init parameters */ +struct tcp_init_params { + __le32 two_msl_timer; + __le16 tx_sws_timer; + u8 max_fin_rt; + u8 reserved[9]; +}; + +/* tcp IPv4/IPv6 enum */ +enum tcp_ip_version { + TCP_IPV4, + TCP_IPV6, + MAX_TCP_IP_VERSION +}; + +/* tcp offload parameters */ +struct tcp_offload_params { + __le16 local_mac_addr_lo; + __le16 local_mac_addr_mid; + __le16 local_mac_addr_hi; + __le16 remote_mac_addr_lo; + __le16 remote_mac_addr_mid; + __le16 remote_mac_addr_hi; + __le16 vlan_id; + __le16 flags; +#define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0 +#define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1 +#define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2 +#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_SHIFT 3 +#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_SHIFT 4 +#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 5 +#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 6 +#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 7 +#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 8 +#define TCP_OFFLOAD_PARAMS_RESERVED_MASK 0x7F +#define TCP_OFFLOAD_PARAMS_RESERVED_SHIFT 9 + u8 ip_version; + u8 reserved0[3]; + __le32 remote_ip[4]; + __le32 local_ip[4]; + __le32 flow_label; + u8 ttl; + u8 tos_or_tc; + __le16 remote_port; + __le16 local_port; + __le16 mss; + u8 rcv_wnd_scale; + u8 connect_mode; + __le16 srtt; + __le32 ss_thresh; + __le32 rcv_wnd; + __le32 cwnd; + u8 ka_max_probe_cnt; + u8 dup_ack_theshold; + __le16 reserved1; + __le32 ka_timeout; + __le32 ka_interval; + __le32 max_rt_time; + __le32 initial_rcv_wnd; + __le32 rcv_next; + __le32 snd_una; + __le32 snd_next; + __le32 snd_max; + __le32 snd_wnd; + __le32 snd_wl1; + __le32 ts_recent; + __le32 ts_recent_age; + __le32 total_rt; + __le32 ka_timeout_delta; + __le32 rt_timeout_delta; + u8 dup_ack_cnt; + u8 snd_wnd_probe_cnt; + u8 ka_probe_cnt; + u8 rt_cnt; + __le16 rtt_var; + __le16 fw_internal; + u8 snd_wnd_scale; + u8 ack_frequency; + __le16 da_timeout_value; + __le32 reserved3; +}; + +/* tcp offload parameters */ +struct tcp_offload_params_opt2 { + __le16 local_mac_addr_lo; + __le16 local_mac_addr_mid; + __le16 local_mac_addr_hi; + __le16 remote_mac_addr_lo; + __le16 remote_mac_addr_mid; + __le16 remote_mac_addr_hi; + __le16 vlan_id; + __le16 flags; +#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0 +#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1 +#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2 +#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_SHIFT 3 +#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0xFFF +#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 4 + u8 ip_version; + u8 reserved1[3]; + __le32 remote_ip[4]; + __le32 local_ip[4]; + __le32 flow_label; + u8 ttl; + u8 tos_or_tc; + __le16 remote_port; + __le16 local_port; + __le16 mss; + u8 rcv_wnd_scale; + u8 connect_mode; + __le16 syn_ip_payload_length; + __le32 syn_phy_addr_lo; + __le32 syn_phy_addr_hi; + __le32 cwnd; + u8 ka_max_probe_cnt; + u8 reserved2[3]; + __le32 ka_timeout; + __le32 ka_interval; + __le32 max_rt_time; + __le32 reserved3[16]; +}; + +/* tcp IPv4/IPv6 enum */ +enum tcp_seg_placement_event { + TCP_EVENT_ADD_PEN, + TCP_EVENT_ADD_NEW_ISLE, + TCP_EVENT_ADD_ISLE_RIGHT, + TCP_EVENT_ADD_ISLE_LEFT, + TCP_EVENT_JOIN, + TCP_EVENT_DELETE_ISLES, + TCP_EVENT_NOP, + MAX_TCP_SEG_PLACEMENT_EVENT +}; + +/* tcp init parameters */ +struct tcp_update_params { + __le16 flags; +#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT 0 +#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT 1 +#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT 2 +#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT 3 +#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT 4 +#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT 5 +#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT 6 +#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT 7 +#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT 8 +#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9 +#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT 10 +#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT 11 +#define TCP_UPDATE_PARAMS_KA_EN_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_EN_SHIFT 12 +#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK 0x1 +#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT 13 +#define TCP_UPDATE_PARAMS_KA_RESTART_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT 14 +#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK 0x1 +#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT 15 + __le16 remote_mac_addr_lo; + __le16 remote_mac_addr_mid; + __le16 remote_mac_addr_hi; + __le16 mss; + u8 ttl; + u8 tos_or_tc; + __le32 ka_timeout; + __le32 ka_interval; + __le32 max_rt_time; + __le32 flow_label; + __le32 initial_rcv_wnd; + u8 ka_max_probe_cnt; + u8 reserved1[7]; +}; + +/* toe upload parameters */ +struct tcp_upload_params { + __le32 rcv_next; + __le32 snd_una; + __le32 snd_next; + __le32 snd_max; + __le32 snd_wnd; + __le32 rcv_wnd; + __le32 snd_wl1; + __le32 cwnd; + __le32 ss_thresh; + __le16 srtt; + __le16 rtt_var; + __le32 ts_time; + __le32 ts_recent; + __le32 ts_recent_age; + __le32 total_rt; + __le32 ka_timeout_delta; + __le32 rt_timeout_delta; + u8 dup_ack_cnt; + u8 snd_wnd_probe_cnt; + u8 ka_probe_cnt; + u8 rt_cnt; + __le32 reserved; +}; + +#endif /* __TCP_COMMON__ */ diff --git a/include/linux/qnx6_fs.h b/include/linux/qnx6_fs.h new file mode 100644 index 000000000..13373d437 --- /dev/null +++ b/include/linux/qnx6_fs.h @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Name : qnx6_fs.h + * Author : Kai Bankett + * Function : qnx6 global filesystem definitions + * History : 17-01-2012 created + */ +#ifndef _LINUX_QNX6_FS_H +#define _LINUX_QNX6_FS_H + +#include +#include + +#define QNX6_ROOT_INO 1 + +/* for di_status */ +#define QNX6_FILE_DIRECTORY 0x01 +#define QNX6_FILE_DELETED 0x02 +#define QNX6_FILE_NORMAL 0x03 + +#define QNX6_SUPERBLOCK_SIZE 0x200 /* superblock always is 512 bytes */ +#define QNX6_SUPERBLOCK_AREA 0x1000 /* area reserved for superblock */ +#define QNX6_BOOTBLOCK_SIZE 0x2000 /* heading bootblock area */ +#define QNX6_DIR_ENTRY_SIZE 0x20 /* dir entry size of 32 bytes */ +#define QNX6_INODE_SIZE 0x80 /* each inode is 128 bytes */ +#define QNX6_INODE_SIZE_BITS 7 /* inode entry size shift */ + +#define QNX6_NO_DIRECT_POINTERS 16 /* 16 blockptrs in sbl/inode */ +#define QNX6_PTR_MAX_LEVELS 5 /* maximum indirect levels */ + +/* for filenames */ +#define QNX6_SHORT_NAME_MAX 27 +#define QNX6_LONG_NAME_MAX 510 + +/* list of mount options */ +#define QNX6_MOUNT_MMI_FS 0x010000 /* mount as Audi MMI 3G fs */ + +/* + * This is the original qnx6 inode layout on disk. + * Each inode is 128 byte long. + */ +struct qnx6_inode_entry { + __fs64 di_size; + __fs32 di_uid; + __fs32 di_gid; + __fs32 di_ftime; + __fs32 di_mtime; + __fs32 di_atime; + __fs32 di_ctime; + __fs16 di_mode; + __fs16 di_ext_mode; + __fs32 di_block_ptr[QNX6_NO_DIRECT_POINTERS]; + __u8 di_filelevels; + __u8 di_status; + __u8 di_unknown2[2]; + __fs32 di_zero2[6]; +}; + +/* + * Each directory entry is maximum 32 bytes long. + * If more characters or special characters required it is stored + * in the longfilenames structure. + */ +struct qnx6_dir_entry { + __fs32 de_inode; + __u8 de_size; + char de_fname[QNX6_SHORT_NAME_MAX]; +}; + +/* + * Longfilename direntries have a different structure + */ +struct qnx6_long_dir_entry { + __fs32 de_inode; + __u8 de_size; + __u8 de_unknown[3]; + __fs32 de_long_inode; + __fs32 de_checksum; +}; + +struct qnx6_long_filename { + __fs16 lf_size; + __u8 lf_fname[QNX6_LONG_NAME_MAX]; +}; + +struct qnx6_root_node { + __fs64 size; + __fs32 ptr[QNX6_NO_DIRECT_POINTERS]; + __u8 levels; + __u8 mode; + __u8 spare[6]; +}; + +struct qnx6_super_block { + __fs32 sb_magic; + __fs32 sb_checksum; + __fs64 sb_serial; + __fs32 sb_ctime; /* time the fs was created */ + __fs32 sb_atime; /* last access time */ + __fs32 sb_flags; + __fs16 sb_version1; /* filesystem version information */ + __fs16 sb_version2; /* filesystem version information */ + __u8 sb_volumeid[16]; + __fs32 sb_blocksize; + __fs32 sb_num_inodes; + __fs32 sb_free_inodes; + __fs32 sb_num_blocks; + __fs32 sb_free_blocks; + __fs32 sb_allocgroup; + struct qnx6_root_node Inode; + struct qnx6_root_node Bitmap; + struct qnx6_root_node Longfile; + struct qnx6_root_node Unknown; +}; + +/* Audi MMI 3G superblock layout is different to plain qnx6 */ +struct qnx6_mmi_super_block { + __fs32 sb_magic; + __fs32 sb_checksum; + __fs64 sb_serial; + __u8 sb_spare0[12]; + __u8 sb_id[12]; + __fs32 sb_blocksize; + __fs32 sb_num_inodes; + __fs32 sb_free_inodes; + __fs32 sb_num_blocks; + __fs32 sb_free_blocks; + __u8 sb_spare1[4]; + struct qnx6_root_node Inode; + struct qnx6_root_node Bitmap; + struct qnx6_root_node Longfile; + struct qnx6_root_node Unknown; +}; + +#endif diff --git a/include/linux/quicklist.h b/include/linux/quicklist.h new file mode 100644 index 000000000..034982c98 --- /dev/null +++ b/include/linux/quicklist.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_QUICKLIST_H +#define LINUX_QUICKLIST_H +/* + * Fast allocations and disposal of pages. Pages must be in the condition + * as needed after allocation when they are freed. Per cpu lists of pages + * are kept that only contain node local pages. + * + * (C) 2007, SGI. Christoph Lameter + */ +#include +#include +#include + +#ifdef CONFIG_QUICKLIST + +struct quicklist { + void *page; + int nr_pages; +}; + +DECLARE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK]; + +/* + * The two key functions quicklist_alloc and quicklist_free are inline so + * that they may be custom compiled for the platform. + * Specifying a NULL ctor can remove constructor support. Specifying + * a constant quicklist allows the determination of the exact address + * in the per cpu area. + * + * The fast patch in quicklist_alloc touched only a per cpu cacheline and + * the first cacheline of the page itself. There is minmal overhead involved. + */ +static inline void *quicklist_alloc(int nr, gfp_t flags, void (*ctor)(void *)) +{ + struct quicklist *q; + void **p = NULL; + + q =&get_cpu_var(quicklist)[nr]; + p = q->page; + if (likely(p)) { + q->page = p[0]; + p[0] = NULL; + q->nr_pages--; + } + put_cpu_var(quicklist); + if (likely(p)) + return p; + + p = (void *)__get_free_page(flags | __GFP_ZERO); + if (ctor && p) + ctor(p); + return p; +} + +static inline void __quicklist_free(int nr, void (*dtor)(void *), void *p, + struct page *page) +{ + struct quicklist *q; + + q = &get_cpu_var(quicklist)[nr]; + *(void **)p = q->page; + q->page = p; + q->nr_pages++; + put_cpu_var(quicklist); +} + +static inline void quicklist_free(int nr, void (*dtor)(void *), void *pp) +{ + __quicklist_free(nr, dtor, pp, virt_to_page(pp)); +} + +static inline void quicklist_free_page(int nr, void (*dtor)(void *), + struct page *page) +{ + __quicklist_free(nr, dtor, page_address(page), page); +} + +void quicklist_trim(int nr, void (*dtor)(void *), + unsigned long min_pages, unsigned long max_free); + +unsigned long quicklist_total_size(void); + +#else + +static inline unsigned long quicklist_total_size(void) +{ + return 0; +} + +#endif + +#endif /* LINUX_QUICKLIST_H */ + diff --git a/include/linux/quota.h b/include/linux/quota.h new file mode 100644 index 000000000..27aab84fc --- /dev/null +++ b/include/linux/quota.h @@ -0,0 +1,540 @@ +/* + * Copyright (c) 1982, 1986 Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Robert Elz at The University of Melbourne. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +#ifndef _LINUX_QUOTA_ +#define _LINUX_QUOTA_ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#undef USRQUOTA +#undef GRPQUOTA +#undef PRJQUOTA +enum quota_type { + USRQUOTA = 0, /* element used for user quotas */ + GRPQUOTA = 1, /* element used for group quotas */ + PRJQUOTA = 2, /* element used for project quotas */ +}; + +/* Masks for quota types when used as a bitmask */ +#define QTYPE_MASK_USR (1 << USRQUOTA) +#define QTYPE_MASK_GRP (1 << GRPQUOTA) +#define QTYPE_MASK_PRJ (1 << PRJQUOTA) + +typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */ +typedef long long qsize_t; /* Type in which we store sizes */ + +struct kqid { /* Type in which we store the quota identifier */ + union { + kuid_t uid; + kgid_t gid; + kprojid_t projid; + }; + enum quota_type type; /* USRQUOTA (uid) or GRPQUOTA (gid) or PRJQUOTA (projid) */ +}; + +extern bool qid_eq(struct kqid left, struct kqid right); +extern bool qid_lt(struct kqid left, struct kqid right); +extern qid_t from_kqid(struct user_namespace *to, struct kqid qid); +extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid); +extern bool qid_valid(struct kqid qid); + +/** + * make_kqid - Map a user-namespace, type, qid tuple into a kqid. + * @from: User namespace that the qid is in + * @type: The type of quota + * @qid: Quota identifier + * + * Maps a user-namespace, type qid tuple into a kernel internal + * kqid, and returns that kqid. + * + * When there is no mapping defined for the user-namespace, type, + * qid tuple an invalid kqid is returned. Callers are expected to + * test for and handle handle invalid kqids being returned. + * Invalid kqids may be tested for using qid_valid(). + */ +static inline struct kqid make_kqid(struct user_namespace *from, + enum quota_type type, qid_t qid) +{ + struct kqid kqid; + + kqid.type = type; + switch (type) { + case USRQUOTA: + kqid.uid = make_kuid(from, qid); + break; + case GRPQUOTA: + kqid.gid = make_kgid(from, qid); + break; + case PRJQUOTA: + kqid.projid = make_kprojid(from, qid); + break; + default: + BUG(); + } + return kqid; +} + +/** + * make_kqid_invalid - Explicitly make an invalid kqid + * @type: The type of quota identifier + * + * Returns an invalid kqid with the specified type. + */ +static inline struct kqid make_kqid_invalid(enum quota_type type) +{ + struct kqid kqid; + + kqid.type = type; + switch (type) { + case USRQUOTA: + kqid.uid = INVALID_UID; + break; + case GRPQUOTA: + kqid.gid = INVALID_GID; + break; + case PRJQUOTA: + kqid.projid = INVALID_PROJID; + break; + default: + BUG(); + } + return kqid; +} + +/** + * make_kqid_uid - Make a kqid from a kuid + * @uid: The kuid to make the quota identifier from + */ +static inline struct kqid make_kqid_uid(kuid_t uid) +{ + struct kqid kqid; + kqid.type = USRQUOTA; + kqid.uid = uid; + return kqid; +} + +/** + * make_kqid_gid - Make a kqid from a kgid + * @gid: The kgid to make the quota identifier from + */ +static inline struct kqid make_kqid_gid(kgid_t gid) +{ + struct kqid kqid; + kqid.type = GRPQUOTA; + kqid.gid = gid; + return kqid; +} + +/** + * make_kqid_projid - Make a kqid from a projid + * @projid: The kprojid to make the quota identifier from + */ +static inline struct kqid make_kqid_projid(kprojid_t projid) +{ + struct kqid kqid; + kqid.type = PRJQUOTA; + kqid.projid = projid; + return kqid; +} + +/** + * qid_has_mapping - Report if a qid maps into a user namespace. + * @ns: The user namespace to see if a value maps into. + * @qid: The kernel internal quota identifier to test. + */ +static inline bool qid_has_mapping(struct user_namespace *ns, struct kqid qid) +{ + return from_kqid(ns, qid) != (qid_t) -1; +} + + +extern spinlock_t dq_data_lock; + +/* Maximal numbers of writes for quota operation (insert/delete/update) + * (over VFS all formats) */ +#define DQUOT_INIT_ALLOC max(V1_INIT_ALLOC, V2_INIT_ALLOC) +#define DQUOT_INIT_REWRITE max(V1_INIT_REWRITE, V2_INIT_REWRITE) +#define DQUOT_DEL_ALLOC max(V1_DEL_ALLOC, V2_DEL_ALLOC) +#define DQUOT_DEL_REWRITE max(V1_DEL_REWRITE, V2_DEL_REWRITE) + +/* + * Data for one user/group kept in memory + */ +struct mem_dqblk { + qsize_t dqb_bhardlimit; /* absolute limit on disk blks alloc */ + qsize_t dqb_bsoftlimit; /* preferred limit on disk blks */ + qsize_t dqb_curspace; /* current used space */ + qsize_t dqb_rsvspace; /* current reserved space for delalloc*/ + qsize_t dqb_ihardlimit; /* absolute limit on allocated inodes */ + qsize_t dqb_isoftlimit; /* preferred inode limit */ + qsize_t dqb_curinodes; /* current # allocated inodes */ + time64_t dqb_btime; /* time limit for excessive disk use */ + time64_t dqb_itime; /* time limit for excessive inode use */ +}; + +/* + * Data for one quotafile kept in memory + */ +struct quota_format_type; + +struct mem_dqinfo { + struct quota_format_type *dqi_format; + int dqi_fmt_id; /* Id of the dqi_format - used when turning + * quotas on after remount RW */ + struct list_head dqi_dirty_list; /* List of dirty dquots [dq_list_lock] */ + unsigned long dqi_flags; /* DFQ_ flags [dq_data_lock] */ + unsigned int dqi_bgrace; /* Space grace time [dq_data_lock] */ + unsigned int dqi_igrace; /* Inode grace time [dq_data_lock] */ + qsize_t dqi_max_spc_limit; /* Maximum space limit [static] */ + qsize_t dqi_max_ino_limit; /* Maximum inode limit [static] */ + void *dqi_priv; +}; + +struct super_block; + +/* Mask for flags passed to userspace */ +#define DQF_GETINFO_MASK (DQF_ROOT_SQUASH | DQF_SYS_FILE) +/* Mask for flags modifiable from userspace */ +#define DQF_SETINFO_MASK DQF_ROOT_SQUASH + +enum { + DQF_INFO_DIRTY_B = DQF_PRIVATE, +}; +#define DQF_INFO_DIRTY (1 << DQF_INFO_DIRTY_B) /* Is info dirty? */ + +extern void mark_info_dirty(struct super_block *sb, int type); +static inline int info_dirty(struct mem_dqinfo *info) +{ + return test_bit(DQF_INFO_DIRTY_B, &info->dqi_flags); +} + +enum { + DQST_LOOKUPS, + DQST_DROPS, + DQST_READS, + DQST_WRITES, + DQST_CACHE_HITS, + DQST_ALLOC_DQUOTS, + DQST_FREE_DQUOTS, + DQST_SYNCS, + _DQST_DQSTAT_LAST +}; + +struct dqstats { + unsigned long stat[_DQST_DQSTAT_LAST]; + struct percpu_counter counter[_DQST_DQSTAT_LAST]; +}; + +extern struct dqstats dqstats; + +static inline void dqstats_inc(unsigned int type) +{ + percpu_counter_inc(&dqstats.counter[type]); +} + +static inline void dqstats_dec(unsigned int type) +{ + percpu_counter_dec(&dqstats.counter[type]); +} + +#define DQ_MOD_B 0 /* dquot modified since read */ +#define DQ_BLKS_B 1 /* uid/gid has been warned about blk limit */ +#define DQ_INODES_B 2 /* uid/gid has been warned about inode limit */ +#define DQ_FAKE_B 3 /* no limits only usage */ +#define DQ_READ_B 4 /* dquot was read into memory */ +#define DQ_ACTIVE_B 5 /* dquot is active (dquot_release not called) */ +#define DQ_LASTSET_B 6 /* Following 6 bits (see QIF_) are reserved\ + * for the mask of entries set via SETQUOTA\ + * quotactl. They are set under dq_data_lock\ + * and the quota format handling dquot can\ + * clear them when it sees fit. */ + +struct dquot { + struct hlist_node dq_hash; /* Hash list in memory [dq_list_lock] */ + struct list_head dq_inuse; /* List of all quotas [dq_list_lock] */ + struct list_head dq_free; /* Free list element [dq_list_lock] */ + struct list_head dq_dirty; /* List of dirty dquots [dq_list_lock] */ + struct mutex dq_lock; /* dquot IO lock */ + spinlock_t dq_dqb_lock; /* Lock protecting dq_dqb changes */ + atomic_t dq_count; /* Use count */ + struct super_block *dq_sb; /* superblock this applies to */ + struct kqid dq_id; /* ID this applies to (uid, gid, projid) */ + loff_t dq_off; /* Offset of dquot on disk [dq_lock, stable once set] */ + unsigned long dq_flags; /* See DQ_* */ + struct mem_dqblk dq_dqb; /* Diskquota usage [dq_dqb_lock] */ +}; + +/* Operations which must be implemented by each quota format */ +struct quota_format_ops { + int (*check_quota_file)(struct super_block *sb, int type); /* Detect whether file is in our format */ + int (*read_file_info)(struct super_block *sb, int type); /* Read main info about file - called on quotaon() */ + int (*write_file_info)(struct super_block *sb, int type); /* Write main info about file */ + int (*free_file_info)(struct super_block *sb, int type); /* Called on quotaoff() */ + int (*read_dqblk)(struct dquot *dquot); /* Read structure for one user */ + int (*commit_dqblk)(struct dquot *dquot); /* Write structure for one user */ + int (*release_dqblk)(struct dquot *dquot); /* Called when last reference to dquot is being dropped */ + int (*get_next_id)(struct super_block *sb, struct kqid *qid); /* Get next ID with existing structure in the quota file */ +}; + +/* Operations working with dquots */ +struct dquot_operations { + int (*write_dquot) (struct dquot *); /* Ordinary dquot write */ + struct dquot *(*alloc_dquot)(struct super_block *, int); /* Allocate memory for new dquot */ + void (*destroy_dquot)(struct dquot *); /* Free memory for dquot */ + int (*acquire_dquot) (struct dquot *); /* Quota is going to be created on disk */ + int (*release_dquot) (struct dquot *); /* Quota is going to be deleted from disk */ + int (*mark_dirty) (struct dquot *); /* Dquot is marked dirty */ + int (*write_info) (struct super_block *, int); /* Write of quota "superblock" */ + /* get reserved quota for delayed alloc, value returned is managed by + * quota code only */ + qsize_t *(*get_reserved_space) (struct inode *); + int (*get_projid) (struct inode *, kprojid_t *);/* Get project ID */ + /* Get number of inodes that were charged for a given inode */ + int (*get_inode_usage) (struct inode *, qsize_t *); + /* Get next ID with active quota structure */ + int (*get_next_id) (struct super_block *sb, struct kqid *qid); +}; + +struct path; + +/* Structure for communicating via ->get_dqblk() & ->set_dqblk() */ +struct qc_dqblk { + int d_fieldmask; /* mask of fields to change in ->set_dqblk() */ + u64 d_spc_hardlimit; /* absolute limit on used space */ + u64 d_spc_softlimit; /* preferred limit on used space */ + u64 d_ino_hardlimit; /* maximum # allocated inodes */ + u64 d_ino_softlimit; /* preferred inode limit */ + u64 d_space; /* Space owned by the user */ + u64 d_ino_count; /* # inodes owned by the user */ + s64 d_ino_timer; /* zero if within inode limits */ + /* if not, we refuse service */ + s64 d_spc_timer; /* similar to above; for space */ + int d_ino_warns; /* # warnings issued wrt num inodes */ + int d_spc_warns; /* # warnings issued wrt used space */ + u64 d_rt_spc_hardlimit; /* absolute limit on realtime space */ + u64 d_rt_spc_softlimit; /* preferred limit on RT space */ + u64 d_rt_space; /* realtime space owned */ + s64 d_rt_spc_timer; /* similar to above; for RT space */ + int d_rt_spc_warns; /* # warnings issued wrt RT space */ +}; + +/* + * Field specifiers for ->set_dqblk() in struct qc_dqblk and also for + * ->set_info() in struct qc_info + */ +#define QC_INO_SOFT (1<<0) +#define QC_INO_HARD (1<<1) +#define QC_SPC_SOFT (1<<2) +#define QC_SPC_HARD (1<<3) +#define QC_RT_SPC_SOFT (1<<4) +#define QC_RT_SPC_HARD (1<<5) +#define QC_LIMIT_MASK (QC_INO_SOFT | QC_INO_HARD | QC_SPC_SOFT | QC_SPC_HARD | \ + QC_RT_SPC_SOFT | QC_RT_SPC_HARD) +#define QC_SPC_TIMER (1<<6) +#define QC_INO_TIMER (1<<7) +#define QC_RT_SPC_TIMER (1<<8) +#define QC_TIMER_MASK (QC_SPC_TIMER | QC_INO_TIMER | QC_RT_SPC_TIMER) +#define QC_SPC_WARNS (1<<9) +#define QC_INO_WARNS (1<<10) +#define QC_RT_SPC_WARNS (1<<11) +#define QC_WARNS_MASK (QC_SPC_WARNS | QC_INO_WARNS | QC_RT_SPC_WARNS) +#define QC_SPACE (1<<12) +#define QC_INO_COUNT (1<<13) +#define QC_RT_SPACE (1<<14) +#define QC_ACCT_MASK (QC_SPACE | QC_INO_COUNT | QC_RT_SPACE) +#define QC_FLAGS (1<<15) + +#define QCI_SYSFILE (1 << 0) /* Quota file is hidden from userspace */ +#define QCI_ROOT_SQUASH (1 << 1) /* Root squash turned on */ +#define QCI_ACCT_ENABLED (1 << 2) /* Quota accounting enabled */ +#define QCI_LIMITS_ENFORCED (1 << 3) /* Quota limits enforced */ + +/* Structures for communicating via ->get_state */ +struct qc_type_state { + unsigned int flags; /* Flags QCI_* */ + unsigned int spc_timelimit; /* Time after which space softlimit is + * enforced */ + unsigned int ino_timelimit; /* Ditto for inode softlimit */ + unsigned int rt_spc_timelimit; /* Ditto for real-time space */ + unsigned int spc_warnlimit; /* Limit for number of space warnings */ + unsigned int ino_warnlimit; /* Ditto for inodes */ + unsigned int rt_spc_warnlimit; /* Ditto for real-time space */ + unsigned long long ino; /* Inode number of quota file */ + blkcnt_t blocks; /* Number of 512-byte blocks in the file */ + blkcnt_t nextents; /* Number of extents in the file */ +}; + +struct qc_state { + unsigned int s_incoredqs; /* Number of dquots in core */ + struct qc_type_state s_state[MAXQUOTAS]; /* Per quota type information */ +}; + +/* Structure for communicating via ->set_info */ +struct qc_info { + int i_fieldmask; /* mask of fields to change in ->set_info() */ + unsigned int i_flags; /* Flags QCI_* */ + unsigned int i_spc_timelimit; /* Time after which space softlimit is + * enforced */ + unsigned int i_ino_timelimit; /* Ditto for inode softlimit */ + unsigned int i_rt_spc_timelimit;/* Ditto for real-time space */ + unsigned int i_spc_warnlimit; /* Limit for number of space warnings */ + unsigned int i_ino_warnlimit; /* Limit for number of inode warnings */ + unsigned int i_rt_spc_warnlimit; /* Ditto for real-time space */ +}; + +/* Operations handling requests from userspace */ +struct quotactl_ops { + int (*quota_on)(struct super_block *, int, int, const struct path *); + int (*quota_off)(struct super_block *, int); + int (*quota_enable)(struct super_block *, unsigned int); + int (*quota_disable)(struct super_block *, unsigned int); + int (*quota_sync)(struct super_block *, int); + int (*set_info)(struct super_block *, int, struct qc_info *); + int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); + int (*get_nextdqblk)(struct super_block *, struct kqid *, + struct qc_dqblk *); + int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); + int (*get_state)(struct super_block *, struct qc_state *); + int (*rm_xquota)(struct super_block *, unsigned int); +}; + +struct quota_format_type { + int qf_fmt_id; /* Quota format id */ + const struct quota_format_ops *qf_ops; /* Operations of format */ + struct module *qf_owner; /* Module implementing quota format */ + struct quota_format_type *qf_next; +}; + +/** + * Quota state flags - they actually come in two flavors - for users and groups. + * + * Actual typed flags layout: + * USRQUOTA GRPQUOTA + * DQUOT_USAGE_ENABLED 0x0001 0x0002 + * DQUOT_LIMITS_ENABLED 0x0004 0x0008 + * DQUOT_SUSPENDED 0x0010 0x0020 + * + * Following bits are used for non-typed flags: + * DQUOT_QUOTA_SYS_FILE 0x0040 + * DQUOT_NEGATIVE_USAGE 0x0080 + */ +enum { + _DQUOT_USAGE_ENABLED = 0, /* Track disk usage for users */ + _DQUOT_LIMITS_ENABLED, /* Enforce quota limits for users */ + _DQUOT_SUSPENDED, /* User diskquotas are off, but + * we have necessary info in + * memory to turn them on */ + _DQUOT_STATE_FLAGS +}; +#define DQUOT_USAGE_ENABLED (1 << _DQUOT_USAGE_ENABLED * MAXQUOTAS) +#define DQUOT_LIMITS_ENABLED (1 << _DQUOT_LIMITS_ENABLED * MAXQUOTAS) +#define DQUOT_SUSPENDED (1 << _DQUOT_SUSPENDED * MAXQUOTAS) +#define DQUOT_STATE_FLAGS (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED | \ + DQUOT_SUSPENDED) +/* Other quota flags */ +#define DQUOT_STATE_LAST (_DQUOT_STATE_FLAGS * MAXQUOTAS) +#define DQUOT_QUOTA_SYS_FILE (1 << DQUOT_STATE_LAST) + /* Quota file is a special + * system file and user cannot + * touch it. Filesystem is + * responsible for setting + * S_NOQUOTA, S_NOATIME flags + */ +#define DQUOT_NEGATIVE_USAGE (1 << (DQUOT_STATE_LAST + 1)) + /* Allow negative quota usage */ +/* Do not track dirty dquots in a list */ +#define DQUOT_NOLIST_DIRTY (1 << (DQUOT_STATE_LAST + 2)) + +static inline unsigned int dquot_state_flag(unsigned int flags, int type) +{ + return flags << type; +} + +static inline unsigned int dquot_generic_flag(unsigned int flags, int type) +{ + return (flags >> type) & DQUOT_STATE_FLAGS; +} + +/* Bitmap of quota types where flag is set in flags */ +static __always_inline unsigned dquot_state_types(unsigned flags, unsigned flag) +{ + BUILD_BUG_ON_NOT_POWER_OF_2(flag); + return (flags / flag) & ((1 << MAXQUOTAS) - 1); +} + +#ifdef CONFIG_QUOTA_NETLINK_INTERFACE +extern void quota_send_warning(struct kqid qid, dev_t dev, + const char warntype); +#else +static inline void quota_send_warning(struct kqid qid, dev_t dev, + const char warntype) +{ + return; +} +#endif /* CONFIG_QUOTA_NETLINK_INTERFACE */ + +struct quota_info { + unsigned int flags; /* Flags for diskquotas on this device */ + struct rw_semaphore dqio_sem; /* Lock quota file while I/O in progress */ + struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */ + struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */ + const struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */ +}; + +int register_quota_format(struct quota_format_type *fmt); +void unregister_quota_format(struct quota_format_type *fmt); + +struct quota_module_name { + int qm_fmt_id; + char *qm_mod_name; +}; + +#define INIT_QUOTA_MODULE_NAMES {\ + {QFMT_VFS_OLD, "quota_v1"},\ + {QFMT_VFS_V0, "quota_v2"},\ + {QFMT_VFS_V1, "quota_v2"},\ + {0, NULL}} + +#endif /* _QUOTA_ */ diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h new file mode 100644 index 000000000..91e0b7624 --- /dev/null +++ b/include/linux/quotaops.h @@ -0,0 +1,404 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Definitions for diskquota-operations. When diskquota is configured these + * macros expand to the right source-code. + * + * Author: Marco van Wieringen + */ +#ifndef _LINUX_QUOTAOPS_ +#define _LINUX_QUOTAOPS_ + +#include + +#define DQUOT_SPACE_WARN 0x1 +#define DQUOT_SPACE_RESERVE 0x2 +#define DQUOT_SPACE_NOFAIL 0x4 + +static inline struct quota_info *sb_dqopt(struct super_block *sb) +{ + return &sb->s_dquot; +} + +/* i_mutex must being held */ +static inline bool is_quota_modification(struct inode *inode, struct iattr *ia) +{ + return (ia->ia_valid & ATTR_SIZE) || + (ia->ia_valid & ATTR_UID && !uid_eq(ia->ia_uid, inode->i_uid)) || + (ia->ia_valid & ATTR_GID && !gid_eq(ia->ia_gid, inode->i_gid)); +} + +int kernel_quotactl(unsigned int cmd, const char __user *special, + qid_t id, void __user *addr); + +#if defined(CONFIG_QUOTA) + +#define quota_error(sb, fmt, args...) \ + __quota_error((sb), __func__, fmt , ## args) + +extern __printf(3, 4) +void __quota_error(struct super_block *sb, const char *func, + const char *fmt, ...); + +/* + * declaration of quota_function calls in kernel. + */ +int dquot_initialize(struct inode *inode); +bool dquot_initialize_needed(struct inode *inode); +void dquot_drop(struct inode *inode); +struct dquot *dqget(struct super_block *sb, struct kqid qid); +static inline struct dquot *dqgrab(struct dquot *dquot) +{ + /* Make sure someone else has active reference to dquot */ + WARN_ON_ONCE(!atomic_read(&dquot->dq_count)); + WARN_ON_ONCE(!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)); + atomic_inc(&dquot->dq_count); + return dquot; +} + +static inline bool dquot_is_busy(struct dquot *dquot) +{ + if (test_bit(DQ_MOD_B, &dquot->dq_flags)) + return true; + if (atomic_read(&dquot->dq_count) > 1) + return true; + return false; +} + +void dqput(struct dquot *dquot); +int dquot_scan_active(struct super_block *sb, + int (*fn)(struct dquot *dquot, unsigned long priv), + unsigned long priv); +struct dquot *dquot_alloc(struct super_block *sb, int type); +void dquot_destroy(struct dquot *dquot); + +int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags); +void __dquot_free_space(struct inode *inode, qsize_t number, int flags); + +int dquot_alloc_inode(struct inode *inode); + +int dquot_claim_space_nodirty(struct inode *inode, qsize_t number); +void dquot_free_inode(struct inode *inode); +void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number); + +int dquot_disable(struct super_block *sb, int type, unsigned int flags); +/* Suspend quotas on remount RO */ +static inline int dquot_suspend(struct super_block *sb, int type) +{ + return dquot_disable(sb, type, DQUOT_SUSPENDED); +} +int dquot_resume(struct super_block *sb, int type); + +int dquot_commit(struct dquot *dquot); +int dquot_acquire(struct dquot *dquot); +int dquot_release(struct dquot *dquot); +int dquot_commit_info(struct super_block *sb, int type); +int dquot_get_next_id(struct super_block *sb, struct kqid *qid); +int dquot_mark_dquot_dirty(struct dquot *dquot); + +int dquot_file_open(struct inode *inode, struct file *file); + +int dquot_enable(struct inode *inode, int type, int format_id, + unsigned int flags); +int dquot_quota_on(struct super_block *sb, int type, int format_id, + const struct path *path); +int dquot_quota_on_mount(struct super_block *sb, char *qf_name, + int format_id, int type); +int dquot_quota_off(struct super_block *sb, int type); +int dquot_writeback_dquots(struct super_block *sb, int type); +int dquot_quota_sync(struct super_block *sb, int type); +int dquot_get_state(struct super_block *sb, struct qc_state *state); +int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii); +int dquot_get_dqblk(struct super_block *sb, struct kqid id, + struct qc_dqblk *di); +int dquot_get_next_dqblk(struct super_block *sb, struct kqid *id, + struct qc_dqblk *di); +int dquot_set_dqblk(struct super_block *sb, struct kqid id, + struct qc_dqblk *di); + +int __dquot_transfer(struct inode *inode, struct dquot **transfer_to); +int dquot_transfer(struct inode *inode, struct iattr *iattr); + +static inline struct mem_dqinfo *sb_dqinfo(struct super_block *sb, int type) +{ + return sb_dqopt(sb)->info + type; +} + +/* + * Functions for checking status of quota + */ + +static inline bool sb_has_quota_usage_enabled(struct super_block *sb, int type) +{ + return sb_dqopt(sb)->flags & + dquot_state_flag(DQUOT_USAGE_ENABLED, type); +} + +static inline bool sb_has_quota_limits_enabled(struct super_block *sb, int type) +{ + return sb_dqopt(sb)->flags & + dquot_state_flag(DQUOT_LIMITS_ENABLED, type); +} + +static inline bool sb_has_quota_suspended(struct super_block *sb, int type) +{ + return sb_dqopt(sb)->flags & + dquot_state_flag(DQUOT_SUSPENDED, type); +} + +static inline unsigned sb_any_quota_suspended(struct super_block *sb) +{ + return dquot_state_types(sb_dqopt(sb)->flags, DQUOT_SUSPENDED); +} + +/* Does kernel know about any quota information for given sb + type? */ +static inline bool sb_has_quota_loaded(struct super_block *sb, int type) +{ + /* Currently if anything is on, then quota usage is on as well */ + return sb_has_quota_usage_enabled(sb, type); +} + +static inline unsigned sb_any_quota_loaded(struct super_block *sb) +{ + return dquot_state_types(sb_dqopt(sb)->flags, DQUOT_USAGE_ENABLED); +} + +static inline bool sb_has_quota_active(struct super_block *sb, int type) +{ + return sb_has_quota_loaded(sb, type) && + !sb_has_quota_suspended(sb, type); +} + +/* + * Operations supported for diskquotas. + */ +extern const struct dquot_operations dquot_operations; +extern const struct quotactl_ops dquot_quotactl_sysfile_ops; + +#else + +static inline int sb_has_quota_usage_enabled(struct super_block *sb, int type) +{ + return 0; +} + +static inline int sb_has_quota_limits_enabled(struct super_block *sb, int type) +{ + return 0; +} + +static inline int sb_has_quota_suspended(struct super_block *sb, int type) +{ + return 0; +} + +static inline int sb_any_quota_suspended(struct super_block *sb) +{ + return 0; +} + +/* Does kernel know about any quota information for given sb + type? */ +static inline int sb_has_quota_loaded(struct super_block *sb, int type) +{ + return 0; +} + +static inline int sb_any_quota_loaded(struct super_block *sb) +{ + return 0; +} + +static inline int sb_has_quota_active(struct super_block *sb, int type) +{ + return 0; +} + +static inline int dquot_initialize(struct inode *inode) +{ + return 0; +} + +static inline bool dquot_initialize_needed(struct inode *inode) +{ + return false; +} + +static inline void dquot_drop(struct inode *inode) +{ +} + +static inline int dquot_alloc_inode(struct inode *inode) +{ + return 0; +} + +static inline void dquot_free_inode(struct inode *inode) +{ +} + +static inline int dquot_transfer(struct inode *inode, struct iattr *iattr) +{ + return 0; +} + +static inline int __dquot_alloc_space(struct inode *inode, qsize_t number, + int flags) +{ + if (!(flags & DQUOT_SPACE_RESERVE)) + inode_add_bytes(inode, number); + return 0; +} + +static inline void __dquot_free_space(struct inode *inode, qsize_t number, + int flags) +{ + if (!(flags & DQUOT_SPACE_RESERVE)) + inode_sub_bytes(inode, number); +} + +static inline int dquot_claim_space_nodirty(struct inode *inode, qsize_t number) +{ + inode_add_bytes(inode, number); + return 0; +} + +static inline int dquot_reclaim_space_nodirty(struct inode *inode, + qsize_t number) +{ + inode_sub_bytes(inode, number); + return 0; +} + +static inline int dquot_disable(struct super_block *sb, int type, + unsigned int flags) +{ + return 0; +} + +static inline int dquot_suspend(struct super_block *sb, int type) +{ + return 0; +} + +static inline int dquot_resume(struct super_block *sb, int type) +{ + return 0; +} + +#define dquot_file_open generic_file_open + +static inline int dquot_writeback_dquots(struct super_block *sb, int type) +{ + return 0; +} + +#endif /* CONFIG_QUOTA */ + +static inline int dquot_alloc_space_nodirty(struct inode *inode, qsize_t nr) +{ + return __dquot_alloc_space(inode, nr, DQUOT_SPACE_WARN); +} + +static inline void dquot_alloc_space_nofail(struct inode *inode, qsize_t nr) +{ + __dquot_alloc_space(inode, nr, DQUOT_SPACE_WARN|DQUOT_SPACE_NOFAIL); + mark_inode_dirty_sync(inode); +} + +static inline int dquot_alloc_space(struct inode *inode, qsize_t nr) +{ + int ret; + + ret = dquot_alloc_space_nodirty(inode, nr); + if (!ret) { + /* + * Mark inode fully dirty. Since we are allocating blocks, inode + * would become fully dirty soon anyway and it reportedly + * reduces lock contention. + */ + mark_inode_dirty(inode); + } + return ret; +} + +static inline int dquot_alloc_block_nodirty(struct inode *inode, qsize_t nr) +{ + return dquot_alloc_space_nodirty(inode, nr << inode->i_blkbits); +} + +static inline void dquot_alloc_block_nofail(struct inode *inode, qsize_t nr) +{ + dquot_alloc_space_nofail(inode, nr << inode->i_blkbits); +} + +static inline int dquot_alloc_block(struct inode *inode, qsize_t nr) +{ + return dquot_alloc_space(inode, nr << inode->i_blkbits); +} + +static inline int dquot_prealloc_block_nodirty(struct inode *inode, qsize_t nr) +{ + return __dquot_alloc_space(inode, nr << inode->i_blkbits, 0); +} + +static inline int dquot_prealloc_block(struct inode *inode, qsize_t nr) +{ + int ret; + + ret = dquot_prealloc_block_nodirty(inode, nr); + if (!ret) + mark_inode_dirty_sync(inode); + return ret; +} + +static inline int dquot_reserve_block(struct inode *inode, qsize_t nr) +{ + return __dquot_alloc_space(inode, nr << inode->i_blkbits, + DQUOT_SPACE_WARN|DQUOT_SPACE_RESERVE); +} + +static inline int dquot_claim_block(struct inode *inode, qsize_t nr) +{ + int ret; + + ret = dquot_claim_space_nodirty(inode, nr << inode->i_blkbits); + if (!ret) + mark_inode_dirty_sync(inode); + return ret; +} + +static inline void dquot_reclaim_block(struct inode *inode, qsize_t nr) +{ + dquot_reclaim_space_nodirty(inode, nr << inode->i_blkbits); + mark_inode_dirty_sync(inode); +} + +static inline void dquot_free_space_nodirty(struct inode *inode, qsize_t nr) +{ + __dquot_free_space(inode, nr, 0); +} + +static inline void dquot_free_space(struct inode *inode, qsize_t nr) +{ + dquot_free_space_nodirty(inode, nr); + mark_inode_dirty_sync(inode); +} + +static inline void dquot_free_block_nodirty(struct inode *inode, qsize_t nr) +{ + dquot_free_space_nodirty(inode, nr << inode->i_blkbits); +} + +static inline void dquot_free_block(struct inode *inode, qsize_t nr) +{ + dquot_free_space(inode, nr << inode->i_blkbits); +} + +static inline void dquot_release_reservation_block(struct inode *inode, + qsize_t nr) +{ + __dquot_free_space(inode, nr << inode->i_blkbits, DQUOT_SPACE_RESERVE); +} + +unsigned int qtype_enforce_flag(int type); + +#endif /* _LINUX_QUOTAOPS_ */ diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h new file mode 100644 index 000000000..34149e8b5 --- /dev/null +++ b/include/linux/radix-tree.h @@ -0,0 +1,621 @@ +/* + * Copyright (C) 2001 Momchil Velikov + * Portions Copyright (C) 2001 Christoph Hellwig + * Copyright (C) 2006 Nick Piggin + * Copyright (C) 2012 Konstantin Khlebnikov + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#ifndef _LINUX_RADIX_TREE_H +#define _LINUX_RADIX_TREE_H + +#include +#include +#include +#include +#include +#include +#include + +/* + * The bottom two bits of the slot determine how the remaining bits in the + * slot are interpreted: + * + * 00 - data pointer + * 01 - internal entry + * 10 - exceptional entry + * 11 - this bit combination is currently unused/reserved + * + * The internal entry may be a pointer to the next level in the tree, a + * sibling entry, or an indicator that the entry in this slot has been moved + * to another location in the tree and the lookup should be restarted. While + * NULL fits the 'data pointer' pattern, it means that there is no entry in + * the tree for this index (no matter what level of the tree it is found at). + * This means that you cannot store NULL in the tree as a value for the index. + */ +#define RADIX_TREE_ENTRY_MASK 3UL +#define RADIX_TREE_INTERNAL_NODE 1UL + +/* + * Most users of the radix tree store pointers but shmem/tmpfs stores swap + * entries in the same tree. They are marked as exceptional entries to + * distinguish them from pointers to struct page. + * EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it. + */ +#define RADIX_TREE_EXCEPTIONAL_ENTRY 2 +#define RADIX_TREE_EXCEPTIONAL_SHIFT 2 + +static inline bool radix_tree_is_internal_node(void *ptr) +{ + return ((unsigned long)ptr & RADIX_TREE_ENTRY_MASK) == + RADIX_TREE_INTERNAL_NODE; +} + +/*** radix-tree API starts here ***/ + +#define RADIX_TREE_MAX_TAGS 3 + +#ifndef RADIX_TREE_MAP_SHIFT +#define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6) +#endif + +#define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT) +#define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1) + +#define RADIX_TREE_TAG_LONGS \ + ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG) + +#define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long)) +#define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \ + RADIX_TREE_MAP_SHIFT)) + +/* + * @count is the count of every non-NULL element in the ->slots array + * whether that is an exceptional entry, a retry entry, a user pointer, + * a sibling entry or a pointer to the next level of the tree. + * @exceptional is the count of every element in ->slots which is + * either radix_tree_exceptional_entry() or is a sibling entry for an + * exceptional entry. + */ +struct radix_tree_node { + unsigned char shift; /* Bits remaining in each slot */ + unsigned char offset; /* Slot offset in parent */ + unsigned char count; /* Total entry count */ + unsigned char exceptional; /* Exceptional entry count */ + struct radix_tree_node *parent; /* Used when ascending tree */ + struct radix_tree_root *root; /* The tree we belong to */ + union { + struct list_head private_list; /* For tree user */ + struct rcu_head rcu_head; /* Used when freeing node */ + }; + void __rcu *slots[RADIX_TREE_MAP_SIZE]; + unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; +}; + +/* The IDR tag is stored in the low bits of the GFP flags */ +#define ROOT_IS_IDR ((__force gfp_t)4) +/* The top bits of gfp_mask are used to store the root tags */ +#define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT) + +struct radix_tree_root { + spinlock_t xa_lock; + gfp_t gfp_mask; + struct radix_tree_node __rcu *rnode; +}; + +#define RADIX_TREE_INIT(name, mask) { \ + .xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock), \ + .gfp_mask = (mask), \ + .rnode = NULL, \ +} + +#define RADIX_TREE(name, mask) \ + struct radix_tree_root name = RADIX_TREE_INIT(name, mask) + +#define INIT_RADIX_TREE(root, mask) \ +do { \ + spin_lock_init(&(root)->xa_lock); \ + (root)->gfp_mask = (mask); \ + (root)->rnode = NULL; \ +} while (0) + +static inline bool radix_tree_empty(const struct radix_tree_root *root) +{ + return root->rnode == NULL; +} + +/** + * struct radix_tree_iter - radix tree iterator state + * + * @index: index of current slot + * @next_index: one beyond the last index for this chunk + * @tags: bit-mask for tag-iterating + * @node: node that contains current slot + * @shift: shift for the node that holds our slots + * + * This radix tree iterator works in terms of "chunks" of slots. A chunk is a + * subinterval of slots contained within one radix tree leaf node. It is + * described by a pointer to its first slot and a struct radix_tree_iter + * which holds the chunk's position in the tree and its size. For tagged + * iteration radix_tree_iter also holds the slots' bit-mask for one chosen + * radix tree tag. + */ +struct radix_tree_iter { + unsigned long index; + unsigned long next_index; + unsigned long tags; + struct radix_tree_node *node; +#ifdef CONFIG_RADIX_TREE_MULTIORDER + unsigned int shift; +#endif +}; + +static inline unsigned int iter_shift(const struct radix_tree_iter *iter) +{ +#ifdef CONFIG_RADIX_TREE_MULTIORDER + return iter->shift; +#else + return 0; +#endif +} + +/** + * Radix-tree synchronization + * + * The radix-tree API requires that users provide all synchronisation (with + * specific exceptions, noted below). + * + * Synchronization of access to the data items being stored in the tree, and + * management of their lifetimes must be completely managed by API users. + * + * For API usage, in general, + * - any function _modifying_ the tree or tags (inserting or deleting + * items, setting or clearing tags) must exclude other modifications, and + * exclude any functions reading the tree. + * - any function _reading_ the tree or tags (looking up items or tags, + * gang lookups) must exclude modifications to the tree, but may occur + * concurrently with other readers. + * + * The notable exceptions to this rule are the following functions: + * __radix_tree_lookup + * radix_tree_lookup + * radix_tree_lookup_slot + * radix_tree_tag_get + * radix_tree_gang_lookup + * radix_tree_gang_lookup_slot + * radix_tree_gang_lookup_tag + * radix_tree_gang_lookup_tag_slot + * radix_tree_tagged + * + * The first 8 functions are able to be called locklessly, using RCU. The + * caller must ensure calls to these functions are made within rcu_read_lock() + * regions. Other readers (lock-free or otherwise) and modifications may be + * running concurrently. + * + * It is still required that the caller manage the synchronization and lifetimes + * of the items. So if RCU lock-free lookups are used, typically this would mean + * that the items have their own locks, or are amenable to lock-free access; and + * that the items are freed by RCU (or only freed after having been deleted from + * the radix tree *and* a synchronize_rcu() grace period). + * + * (Note, rcu_assign_pointer and rcu_dereference are not needed to control + * access to data items when inserting into or looking up from the radix tree) + * + * Note that the value returned by radix_tree_tag_get() may not be relied upon + * if only the RCU read lock is held. Functions to set/clear tags and to + * delete nodes running concurrently with it may affect its result such that + * two consecutive reads in the same locked section may return different + * values. If reliability is required, modification functions must also be + * excluded from concurrency. + * + * radix_tree_tagged is able to be called without locking or RCU. + */ + +/** + * radix_tree_deref_slot - dereference a slot + * @slot: slot pointer, returned by radix_tree_lookup_slot + * + * For use with radix_tree_lookup_slot(). Caller must hold tree at least read + * locked across slot lookup and dereference. Not required if write lock is + * held (ie. items cannot be concurrently inserted). + * + * radix_tree_deref_retry must be used to confirm validity of the pointer if + * only the read lock is held. + * + * Return: entry stored in that slot. + */ +static inline void *radix_tree_deref_slot(void __rcu **slot) +{ + return rcu_dereference(*slot); +} + +/** + * radix_tree_deref_slot_protected - dereference a slot with tree lock held + * @slot: slot pointer, returned by radix_tree_lookup_slot + * + * Similar to radix_tree_deref_slot. The caller does not hold the RCU read + * lock but it must hold the tree lock to prevent parallel updates. + * + * Return: entry stored in that slot. + */ +static inline void *radix_tree_deref_slot_protected(void __rcu **slot, + spinlock_t *treelock) +{ + return rcu_dereference_protected(*slot, lockdep_is_held(treelock)); +} + +/** + * radix_tree_deref_retry - check radix_tree_deref_slot + * @arg: pointer returned by radix_tree_deref_slot + * Returns: 0 if retry is not required, otherwise retry is required + * + * radix_tree_deref_retry must be used with radix_tree_deref_slot. + */ +static inline int radix_tree_deref_retry(void *arg) +{ + return unlikely(radix_tree_is_internal_node(arg)); +} + +/** + * radix_tree_exceptional_entry - radix_tree_deref_slot gave exceptional entry? + * @arg: value returned by radix_tree_deref_slot + * Returns: 0 if well-aligned pointer, non-0 if exceptional entry. + */ +static inline int radix_tree_exceptional_entry(void *arg) +{ + /* Not unlikely because radix_tree_exception often tested first */ + return (unsigned long)arg & RADIX_TREE_EXCEPTIONAL_ENTRY; +} + +/** + * radix_tree_exception - radix_tree_deref_slot returned either exception? + * @arg: value returned by radix_tree_deref_slot + * Returns: 0 if well-aligned pointer, non-0 if either kind of exception. + */ +static inline int radix_tree_exception(void *arg) +{ + return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK); +} + +int __radix_tree_create(struct radix_tree_root *, unsigned long index, + unsigned order, struct radix_tree_node **nodep, + void __rcu ***slotp); +int __radix_tree_insert(struct radix_tree_root *, unsigned long index, + unsigned order, void *); +static inline int radix_tree_insert(struct radix_tree_root *root, + unsigned long index, void *entry) +{ + return __radix_tree_insert(root, index, 0, entry); +} +void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index, + struct radix_tree_node **nodep, void __rcu ***slotp); +void *radix_tree_lookup(const struct radix_tree_root *, unsigned long); +void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *, + unsigned long index); +typedef void (*radix_tree_update_node_t)(struct radix_tree_node *); +void __radix_tree_replace(struct radix_tree_root *, struct radix_tree_node *, + void __rcu **slot, void *entry, + radix_tree_update_node_t update_node); +void radix_tree_iter_replace(struct radix_tree_root *, + const struct radix_tree_iter *, void __rcu **slot, void *entry); +void radix_tree_replace_slot(struct radix_tree_root *, + void __rcu **slot, void *entry); +void __radix_tree_delete_node(struct radix_tree_root *, + struct radix_tree_node *, + radix_tree_update_node_t update_node); +void radix_tree_iter_delete(struct radix_tree_root *, + struct radix_tree_iter *iter, void __rcu **slot); +void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); +void *radix_tree_delete(struct radix_tree_root *, unsigned long); +void radix_tree_clear_tags(struct radix_tree_root *, struct radix_tree_node *, + void __rcu **slot); +unsigned int radix_tree_gang_lookup(const struct radix_tree_root *, + void **results, unsigned long first_index, + unsigned int max_items); +unsigned int radix_tree_gang_lookup_slot(const struct radix_tree_root *, + void __rcu ***results, unsigned long *indices, + unsigned long first_index, unsigned int max_items); +int radix_tree_preload(gfp_t gfp_mask); +int radix_tree_maybe_preload(gfp_t gfp_mask); +int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order); +void radix_tree_init(void); +void *radix_tree_tag_set(struct radix_tree_root *, + unsigned long index, unsigned int tag); +void *radix_tree_tag_clear(struct radix_tree_root *, + unsigned long index, unsigned int tag); +int radix_tree_tag_get(const struct radix_tree_root *, + unsigned long index, unsigned int tag); +void radix_tree_iter_tag_set(struct radix_tree_root *, + const struct radix_tree_iter *iter, unsigned int tag); +void radix_tree_iter_tag_clear(struct radix_tree_root *, + const struct radix_tree_iter *iter, unsigned int tag); +unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root *, + void **results, unsigned long first_index, + unsigned int max_items, unsigned int tag); +unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *, + void __rcu ***results, unsigned long first_index, + unsigned int max_items, unsigned int tag); +int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag); + +static inline void radix_tree_preload_end(void) +{ + preempt_enable(); +} + +int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t); +int radix_tree_split(struct radix_tree_root *, unsigned long index, + unsigned new_order); +int radix_tree_join(struct radix_tree_root *, unsigned long index, + unsigned new_order, void *); + +void __rcu **idr_get_free(struct radix_tree_root *root, + struct radix_tree_iter *iter, gfp_t gfp, + unsigned long max); + +enum { + RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */ + RADIX_TREE_ITER_TAGGED = 0x10, /* lookup tagged slots */ + RADIX_TREE_ITER_CONTIG = 0x20, /* stop at first hole */ +}; + +/** + * radix_tree_iter_init - initialize radix tree iterator + * + * @iter: pointer to iterator state + * @start: iteration starting index + * Returns: NULL + */ +static __always_inline void __rcu ** +radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) +{ + /* + * Leave iter->tags uninitialized. radix_tree_next_chunk() will fill it + * in the case of a successful tagged chunk lookup. If the lookup was + * unsuccessful or non-tagged then nobody cares about ->tags. + * + * Set index to zero to bypass next_index overflow protection. + * See the comment in radix_tree_next_chunk() for details. + */ + iter->index = 0; + iter->next_index = start; + return NULL; +} + +/** + * radix_tree_next_chunk - find next chunk of slots for iteration + * + * @root: radix tree root + * @iter: iterator state + * @flags: RADIX_TREE_ITER_* flags and tag index + * Returns: pointer to chunk first slot, or NULL if there no more left + * + * This function looks up the next chunk in the radix tree starting from + * @iter->next_index. It returns a pointer to the chunk's first slot. + * Also it fills @iter with data about chunk: position in the tree (index), + * its end (next_index), and constructs a bit mask for tagged iterating (tags). + */ +void __rcu **radix_tree_next_chunk(const struct radix_tree_root *, + struct radix_tree_iter *iter, unsigned flags); + +/** + * radix_tree_iter_lookup - look up an index in the radix tree + * @root: radix tree root + * @iter: iterator state + * @index: key to look up + * + * If @index is present in the radix tree, this function returns the slot + * containing it and updates @iter to describe the entry. If @index is not + * present, it returns NULL. + */ +static inline void __rcu ** +radix_tree_iter_lookup(const struct radix_tree_root *root, + struct radix_tree_iter *iter, unsigned long index) +{ + radix_tree_iter_init(iter, index); + return radix_tree_next_chunk(root, iter, RADIX_TREE_ITER_CONTIG); +} + +/** + * radix_tree_iter_find - find a present entry + * @root: radix tree root + * @iter: iterator state + * @index: start location + * + * This function returns the slot containing the entry with the lowest index + * which is at least @index. If @index is larger than any present entry, this + * function returns NULL. The @iter is updated to describe the entry found. + */ +static inline void __rcu ** +radix_tree_iter_find(const struct radix_tree_root *root, + struct radix_tree_iter *iter, unsigned long index) +{ + radix_tree_iter_init(iter, index); + return radix_tree_next_chunk(root, iter, 0); +} + +/** + * radix_tree_iter_retry - retry this chunk of the iteration + * @iter: iterator state + * + * If we iterate over a tree protected only by the RCU lock, a race + * against deletion or creation may result in seeing a slot for which + * radix_tree_deref_retry() returns true. If so, call this function + * and continue the iteration. + */ +static inline __must_check +void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter) +{ + iter->next_index = iter->index; + iter->tags = 0; + return NULL; +} + +static inline unsigned long +__radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots) +{ + return iter->index + (slots << iter_shift(iter)); +} + +/** + * radix_tree_iter_resume - resume iterating when the chunk may be invalid + * @slot: pointer to current slot + * @iter: iterator state + * Returns: New slot pointer + * + * If the iterator needs to release then reacquire a lock, the chunk may + * have been invalidated by an insertion or deletion. Call this function + * before releasing the lock to continue the iteration from the next index. + */ +void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot, + struct radix_tree_iter *iter); + +/** + * radix_tree_chunk_size - get current chunk size + * + * @iter: pointer to radix tree iterator + * Returns: current chunk size + */ +static __always_inline long +radix_tree_chunk_size(struct radix_tree_iter *iter) +{ + return (iter->next_index - iter->index) >> iter_shift(iter); +} + +#ifdef CONFIG_RADIX_TREE_MULTIORDER +void __rcu **__radix_tree_next_slot(void __rcu **slot, + struct radix_tree_iter *iter, unsigned flags); +#else +/* Can't happen without sibling entries, but the compiler can't tell that */ +static inline void __rcu **__radix_tree_next_slot(void __rcu **slot, + struct radix_tree_iter *iter, unsigned flags) +{ + return slot; +} +#endif + +/** + * radix_tree_next_slot - find next slot in chunk + * + * @slot: pointer to current slot + * @iter: pointer to interator state + * @flags: RADIX_TREE_ITER_*, should be constant + * Returns: pointer to next slot, or NULL if there no more left + * + * This function updates @iter->index in the case of a successful lookup. + * For tagged lookup it also eats @iter->tags. + * + * There are several cases where 'slot' can be passed in as NULL to this + * function. These cases result from the use of radix_tree_iter_resume() or + * radix_tree_iter_retry(). In these cases we don't end up dereferencing + * 'slot' because either: + * a) we are doing tagged iteration and iter->tags has been set to 0, or + * b) we are doing non-tagged iteration, and iter->index and iter->next_index + * have been set up so that radix_tree_chunk_size() returns 1 or 0. + */ +static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot, + struct radix_tree_iter *iter, unsigned flags) +{ + if (flags & RADIX_TREE_ITER_TAGGED) { + iter->tags >>= 1; + if (unlikely(!iter->tags)) + return NULL; + if (likely(iter->tags & 1ul)) { + iter->index = __radix_tree_iter_add(iter, 1); + slot++; + goto found; + } + if (!(flags & RADIX_TREE_ITER_CONTIG)) { + unsigned offset = __ffs(iter->tags); + + iter->tags >>= offset++; + iter->index = __radix_tree_iter_add(iter, offset); + slot += offset; + goto found; + } + } else { + long count = radix_tree_chunk_size(iter); + + while (--count > 0) { + slot++; + iter->index = __radix_tree_iter_add(iter, 1); + + if (likely(*slot)) + goto found; + if (flags & RADIX_TREE_ITER_CONTIG) { + /* forbid switching to the next chunk */ + iter->next_index = 0; + break; + } + } + } + return NULL; + + found: + if (unlikely(radix_tree_is_internal_node(rcu_dereference_raw(*slot)))) + return __radix_tree_next_slot(slot, iter, flags); + return slot; +} + +/** + * radix_tree_for_each_slot - iterate over non-empty slots + * + * @slot: the void** variable for pointer to slot + * @root: the struct radix_tree_root pointer + * @iter: the struct radix_tree_iter pointer + * @start: iteration starting index + * + * @slot points to radix tree slot, @iter->index contains its index. + */ +#define radix_tree_for_each_slot(slot, root, iter, start) \ + for (slot = radix_tree_iter_init(iter, start) ; \ + slot || (slot = radix_tree_next_chunk(root, iter, 0)) ; \ + slot = radix_tree_next_slot(slot, iter, 0)) + +/** + * radix_tree_for_each_contig - iterate over contiguous slots + * + * @slot: the void** variable for pointer to slot + * @root: the struct radix_tree_root pointer + * @iter: the struct radix_tree_iter pointer + * @start: iteration starting index + * + * @slot points to radix tree slot, @iter->index contains its index. + */ +#define radix_tree_for_each_contig(slot, root, iter, start) \ + for (slot = radix_tree_iter_init(iter, start) ; \ + slot || (slot = radix_tree_next_chunk(root, iter, \ + RADIX_TREE_ITER_CONTIG)) ; \ + slot = radix_tree_next_slot(slot, iter, \ + RADIX_TREE_ITER_CONTIG)) + +/** + * radix_tree_for_each_tagged - iterate over tagged slots + * + * @slot: the void** variable for pointer to slot + * @root: the struct radix_tree_root pointer + * @iter: the struct radix_tree_iter pointer + * @start: iteration starting index + * @tag: tag index + * + * @slot points to radix tree slot, @iter->index contains its index. + */ +#define radix_tree_for_each_tagged(slot, root, iter, start, tag) \ + for (slot = radix_tree_iter_init(iter, start) ; \ + slot || (slot = radix_tree_next_chunk(root, iter, \ + RADIX_TREE_ITER_TAGGED | tag)) ; \ + slot = radix_tree_next_slot(slot, iter, \ + RADIX_TREE_ITER_TAGGED | tag)) + +#endif /* _LINUX_RADIX_TREE_H */ diff --git a/include/linux/raid/md_u.h b/include/linux/raid/md_u.h new file mode 100644 index 000000000..358c04bfb --- /dev/null +++ b/include/linux/raid/md_u.h @@ -0,0 +1,20 @@ +/* + md_u.h : user <=> kernel API between Linux raidtools and RAID drivers + Copyright (C) 1998 Ingo Molnar + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + You should have received a copy of the GNU General Public License + (for example /usr/src/linux/COPYING); if not, write to the Free + Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ +#ifndef _MD_U_H +#define _MD_U_H + +#include + +extern int mdp_major; +#endif diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h new file mode 100644 index 000000000..ea8505204 --- /dev/null +++ b/include/linux/raid/pq.h @@ -0,0 +1,191 @@ +/* -*- linux-c -*- ------------------------------------------------------- * + * + * Copyright 2003 H. Peter Anvin - All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, Inc., 53 Temple Place Ste 330, + * Boston MA 02111-1307, USA; either version 2 of the License, or + * (at your option) any later version; incorporated herein by reference. + * + * ----------------------------------------------------------------------- */ + +#ifndef LINUX_RAID_RAID6_H +#define LINUX_RAID_RAID6_H + +#ifdef __KERNEL__ + +/* Set to 1 to use kernel-wide empty_zero_page */ +#define RAID6_USE_EMPTY_ZERO_PAGE 0 +#include + +/* We need a pre-zeroed page... if we don't want to use the kernel-provided + one define it here */ +#if RAID6_USE_EMPTY_ZERO_PAGE +# define raid6_empty_zero_page empty_zero_page +#else +extern const char raid6_empty_zero_page[PAGE_SIZE]; +#endif + +#else /* ! __KERNEL__ */ +/* Used for testing in user space */ + +#include +#include +#include +#include +#include +#include + +/* Not standard, but glibc defines it */ +#define BITS_PER_LONG __WORDSIZE + +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; + +#ifndef PAGE_SIZE +# define PAGE_SIZE 4096 +#endif +extern const char raid6_empty_zero_page[PAGE_SIZE]; + +#define __init +#define __exit +#define __attribute_const__ __attribute__((const)) +#define noinline __attribute__((noinline)) + +#define preempt_enable() +#define preempt_disable() +#define cpu_has_feature(x) 1 +#define enable_kernel_altivec() +#define disable_kernel_altivec() + +#define EXPORT_SYMBOL(sym) +#define EXPORT_SYMBOL_GPL(sym) +#define MODULE_LICENSE(licence) +#define MODULE_DESCRIPTION(desc) +#define subsys_initcall(x) +#define module_exit(x) +#endif /* __KERNEL__ */ + +/* Routine choices */ +struct raid6_calls { + void (*gen_syndrome)(int, size_t, void **); + void (*xor_syndrome)(int, int, int, size_t, void **); + int (*valid)(void); /* Returns 1 if this routine set is usable */ + const char *name; /* Name of this routine set */ + int prefer; /* Has special performance attribute */ +}; + +/* Selected algorithm */ +extern struct raid6_calls raid6_call; + +/* Various routine sets */ +extern const struct raid6_calls raid6_intx1; +extern const struct raid6_calls raid6_intx2; +extern const struct raid6_calls raid6_intx4; +extern const struct raid6_calls raid6_intx8; +extern const struct raid6_calls raid6_intx16; +extern const struct raid6_calls raid6_intx32; +extern const struct raid6_calls raid6_mmxx1; +extern const struct raid6_calls raid6_mmxx2; +extern const struct raid6_calls raid6_sse1x1; +extern const struct raid6_calls raid6_sse1x2; +extern const struct raid6_calls raid6_sse2x1; +extern const struct raid6_calls raid6_sse2x2; +extern const struct raid6_calls raid6_sse2x4; +extern const struct raid6_calls raid6_altivec1; +extern const struct raid6_calls raid6_altivec2; +extern const struct raid6_calls raid6_altivec4; +extern const struct raid6_calls raid6_altivec8; +extern const struct raid6_calls raid6_avx2x1; +extern const struct raid6_calls raid6_avx2x2; +extern const struct raid6_calls raid6_avx2x4; +extern const struct raid6_calls raid6_avx512x1; +extern const struct raid6_calls raid6_avx512x2; +extern const struct raid6_calls raid6_avx512x4; +extern const struct raid6_calls raid6_s390vx8; +extern const struct raid6_calls raid6_vpermxor1; +extern const struct raid6_calls raid6_vpermxor2; +extern const struct raid6_calls raid6_vpermxor4; +extern const struct raid6_calls raid6_vpermxor8; + +struct raid6_recov_calls { + void (*data2)(int, size_t, int, int, void **); + void (*datap)(int, size_t, int, void **); + int (*valid)(void); + const char *name; + int priority; +}; + +extern const struct raid6_recov_calls raid6_recov_intx1; +extern const struct raid6_recov_calls raid6_recov_ssse3; +extern const struct raid6_recov_calls raid6_recov_avx2; +extern const struct raid6_recov_calls raid6_recov_avx512; +extern const struct raid6_recov_calls raid6_recov_s390xc; +extern const struct raid6_recov_calls raid6_recov_neon; + +extern const struct raid6_calls raid6_neonx1; +extern const struct raid6_calls raid6_neonx2; +extern const struct raid6_calls raid6_neonx4; +extern const struct raid6_calls raid6_neonx8; + +/* Algorithm list */ +extern const struct raid6_calls * const raid6_algos[]; +extern const struct raid6_recov_calls *const raid6_recov_algos[]; +int raid6_select_algo(void); + +/* Return values from chk_syndrome */ +#define RAID6_OK 0 +#define RAID6_P_BAD 1 +#define RAID6_Q_BAD 2 +#define RAID6_PQ_BAD 3 + +/* Galois field tables */ +extern const u8 raid6_gfmul[256][256] __attribute__((aligned(256))); +extern const u8 raid6_vgfmul[256][32] __attribute__((aligned(256))); +extern const u8 raid6_gfexp[256] __attribute__((aligned(256))); +extern const u8 raid6_gflog[256] __attribute__((aligned(256))); +extern const u8 raid6_gfinv[256] __attribute__((aligned(256))); +extern const u8 raid6_gfexi[256] __attribute__((aligned(256))); + +/* Recovery routines */ +extern void (*raid6_2data_recov)(int disks, size_t bytes, int faila, int failb, + void **ptrs); +extern void (*raid6_datap_recov)(int disks, size_t bytes, int faila, + void **ptrs); +void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, + void **ptrs); + +/* Some definitions to allow code to be compiled for testing in userspace */ +#ifndef __KERNEL__ + +# define jiffies raid6_jiffies() +# define printk printf +# define pr_err(format, ...) fprintf(stderr, format, ## __VA_ARGS__) +# define pr_info(format, ...) fprintf(stdout, format, ## __VA_ARGS__) +# define GFP_KERNEL 0 +# define __get_free_pages(x, y) ((unsigned long)mmap(NULL, PAGE_SIZE << (y), \ + PROT_READ|PROT_WRITE, \ + MAP_PRIVATE|MAP_ANONYMOUS,\ + 0, 0)) +# define free_pages(x, y) munmap((void *)(x), PAGE_SIZE << (y)) + +static inline void cpu_relax(void) +{ + /* Nothing */ +} + +#undef HZ +#define HZ 1000 +static inline uint32_t raid6_jiffies(void) +{ + struct timeval tv; + gettimeofday(&tv, NULL); + return tv.tv_sec*1000 + tv.tv_usec/1000; +} + +#endif /* ! __KERNEL__ */ + +#endif /* LINUX_RAID_RAID6_H */ diff --git a/include/linux/raid/xor.h b/include/linux/raid/xor.h new file mode 100644 index 000000000..2a9fee8dd --- /dev/null +++ b/include/linux/raid/xor.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _XOR_H +#define _XOR_H + +#define MAX_XOR_BLOCKS 4 + +extern void xor_blocks(unsigned int count, unsigned int bytes, + void *dest, void **srcs); + +struct xor_block_template { + struct xor_block_template *next; + const char *name; + int speed; + void (*do_2)(unsigned long, unsigned long *, unsigned long *); + void (*do_3)(unsigned long, unsigned long *, unsigned long *, + unsigned long *); + void (*do_4)(unsigned long, unsigned long *, unsigned long *, + unsigned long *, unsigned long *); + void (*do_5)(unsigned long, unsigned long *, unsigned long *, + unsigned long *, unsigned long *, unsigned long *); +}; + +#endif diff --git a/include/linux/raid_class.h b/include/linux/raid_class.h new file mode 100644 index 000000000..ec8655514 --- /dev/null +++ b/include/linux/raid_class.h @@ -0,0 +1,84 @@ +/* + * raid_class.h - a generic raid visualisation class + * + * Copyright (c) 2005 - James Bottomley + * + * This file is licensed under GPLv2 + */ +#include + +struct raid_template { + struct transport_container raid_attrs; +}; + +struct raid_function_template { + void *cookie; + int (*is_raid)(struct device *); + void (*get_resync)(struct device *); + void (*get_state)(struct device *); +}; + +enum raid_state { + RAID_STATE_UNKNOWN = 0, + RAID_STATE_ACTIVE, + RAID_STATE_DEGRADED, + RAID_STATE_RESYNCING, + RAID_STATE_OFFLINE, +}; + +enum raid_level { + RAID_LEVEL_UNKNOWN = 0, + RAID_LEVEL_LINEAR, + RAID_LEVEL_0, + RAID_LEVEL_1, + RAID_LEVEL_10, + RAID_LEVEL_1E, + RAID_LEVEL_3, + RAID_LEVEL_4, + RAID_LEVEL_5, + RAID_LEVEL_50, + RAID_LEVEL_6, + RAID_LEVEL_JBOD, +}; + +struct raid_data { + struct list_head component_list; + int component_count; + enum raid_level level; + enum raid_state state; + int resync; +}; + +/* resync complete goes from 0 to this */ +#define RAID_MAX_RESYNC (10000) + +#define DEFINE_RAID_ATTRIBUTE(type, attr) \ +static inline void \ +raid_set_##attr(struct raid_template *r, struct device *dev, type value) { \ + struct device *device = \ + attribute_container_find_class_device(&r->raid_attrs.ac, dev);\ + struct raid_data *rd; \ + BUG_ON(!device); \ + rd = dev_get_drvdata(device); \ + rd->attr = value; \ +} \ +static inline type \ +raid_get_##attr(struct raid_template *r, struct device *dev) { \ + struct device *device = \ + attribute_container_find_class_device(&r->raid_attrs.ac, dev);\ + struct raid_data *rd; \ + BUG_ON(!device); \ + rd = dev_get_drvdata(device); \ + return rd->attr; \ +} + +DEFINE_RAID_ATTRIBUTE(enum raid_level, level) +DEFINE_RAID_ATTRIBUTE(int, resync) +DEFINE_RAID_ATTRIBUTE(enum raid_state, state) + +struct raid_template *raid_class_attach(struct raid_function_template *); +void raid_class_release(struct raid_template *); + +int __must_check raid_component_add(struct raid_template *, struct device *, + struct device *); + diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h new file mode 100644 index 000000000..5ef7d54ca --- /dev/null +++ b/include/linux/ramfs.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_RAMFS_H +#define _LINUX_RAMFS_H + +struct inode *ramfs_get_inode(struct super_block *sb, const struct inode *dir, + umode_t mode, dev_t dev); +extern struct dentry *ramfs_mount(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data); + +#ifdef CONFIG_MMU +static inline int +ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) +{ + return 0; +} +#else +extern int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize); +#endif + +extern const struct file_operations ramfs_file_operations; +extern const struct vm_operations_struct generic_file_vm_ops; +extern int __init init_ramfs_fs(void); + +int ramfs_fill_super(struct super_block *sb, void *data, int silent); + +#endif diff --git a/include/linux/random.h b/include/linux/random.h new file mode 100644 index 000000000..3feafab49 --- /dev/null +++ b/include/linux/random.h @@ -0,0 +1,141 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _LINUX_RANDOM_H +#define _LINUX_RANDOM_H + +#include +#include +#include +#include + +#include + +struct notifier_block; + +void add_device_randomness(const void *buf, size_t len); +void __init add_bootloader_randomness(const void *buf, size_t len); +void add_input_randomness(unsigned int type, unsigned int code, + unsigned int value) __latent_entropy; +void add_interrupt_randomness(int irq) __latent_entropy; +void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy); + +#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) +static inline void add_latent_entropy(void) +{ + add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy)); +} +#else +static inline void add_latent_entropy(void) { } +#endif + +void get_random_bytes(void *buf, size_t len); +size_t __must_check get_random_bytes_arch(void *buf, size_t len); +u32 get_random_u32(void); +u64 get_random_u64(void); +static inline unsigned int get_random_int(void) +{ + return get_random_u32(); +} +static inline unsigned long get_random_long(void) +{ +#if BITS_PER_LONG == 64 + return get_random_u64(); +#else + return get_random_u32(); +#endif +} + +/* + * On 64-bit architectures, protect against non-terminated C string overflows + * by zeroing out the first byte of the canary; this leaves 56 bits of entropy. + */ +#ifdef CONFIG_64BIT +# ifdef __LITTLE_ENDIAN +# define CANARY_MASK 0xffffffffffffff00UL +# else /* big endian, 64 bits: */ +# define CANARY_MASK 0x00ffffffffffffffUL +# endif +#else /* 32 bits: */ +# define CANARY_MASK 0xffffffffUL +#endif + +static inline unsigned long get_random_canary(void) +{ + return get_random_long() & CANARY_MASK; +} + +int __init random_init(const char *command_line); +bool rng_is_initialized(void); +int wait_for_random_bytes(void); +int register_random_ready_notifier(struct notifier_block *nb); +int unregister_random_ready_notifier(struct notifier_block *nb); + +/* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes). + * Returns the result of the call to wait_for_random_bytes. */ +static inline int get_random_bytes_wait(void *buf, size_t nbytes) +{ + int ret = wait_for_random_bytes(); + get_random_bytes(buf, nbytes); + return ret; +} + +#define declare_get_random_var_wait(name, ret_type) \ + static inline int get_random_ ## name ## _wait(ret_type *out) { \ + int ret = wait_for_random_bytes(); \ + if (unlikely(ret)) \ + return ret; \ + *out = get_random_ ## name(); \ + return 0; \ + } +declare_get_random_var_wait(u32, u32) +declare_get_random_var_wait(u64, u32) +declare_get_random_var_wait(int, unsigned int) +declare_get_random_var_wait(long, unsigned long) +#undef declare_get_random_var + +/* + * This is designed to be standalone for just prandom + * users, but for now we include it from + * for legacy reasons. + */ +#include + +#ifdef CONFIG_ARCH_RANDOM +# include +#else +static inline bool __must_check arch_get_random_long(unsigned long *v) { return false; } +static inline bool __must_check arch_get_random_int(unsigned int *v) { return false; } +static inline bool __must_check arch_get_random_seed_long(unsigned long *v) { return false; } +static inline bool __must_check arch_get_random_seed_int(unsigned int *v) { return false; } +#endif + +/* + * Called from the boot CPU during startup; not valid to call once + * secondary CPUs are up and preemption is possible. + */ +#ifndef arch_get_random_seed_long_early +static inline bool __init arch_get_random_seed_long_early(unsigned long *v) +{ + WARN_ON(system_state != SYSTEM_BOOTING); + return arch_get_random_seed_long(v); +} +#endif + +#ifndef arch_get_random_long_early +static inline bool __init arch_get_random_long_early(unsigned long *v) +{ + WARN_ON(system_state != SYSTEM_BOOTING); + return arch_get_random_long(v); +} +#endif + +#ifdef CONFIG_SMP +int random_prepare_cpu(unsigned int cpu); +int random_online_cpu(unsigned int cpu); +#endif + +#ifndef MODULE +extern const struct file_operations random_fops, urandom_fops; +#endif + +#endif /* _LINUX_RANDOM_H */ diff --git a/include/linux/range.h b/include/linux/range.h new file mode 100644 index 000000000..d1fbeb664 --- /dev/null +++ b/include/linux/range.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_RANGE_H +#define _LINUX_RANGE_H + +struct range { + u64 start; + u64 end; +}; + +int add_range(struct range *range, int az, int nr_range, + u64 start, u64 end); + + +int add_range_with_merge(struct range *range, int az, int nr_range, + u64 start, u64 end); + +void subtract_range(struct range *range, int az, u64 start, u64 end); + +int clean_sort_range(struct range *range, int az); + +void sort_range(struct range *range, int nr_range); + +#define MAX_RESOURCE ((resource_size_t)~0) +static inline resource_size_t cap_resource(u64 val) +{ + if (val > MAX_RESOURCE) + return MAX_RESOURCE; + + return val; +} +#endif diff --git a/include/linux/ras.h b/include/linux/ras.h new file mode 100644 index 000000000..7c3debb47 --- /dev/null +++ b/include/linux/ras.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __RAS_H__ +#define __RAS_H__ + +#include +#include +#include + +#ifdef CONFIG_DEBUG_FS +int ras_userspace_consumers(void); +void ras_debugfs_init(void); +int ras_add_daemon_trace(void); +#else +static inline int ras_userspace_consumers(void) { return 0; } +static inline void ras_debugfs_init(void) { } +static inline int ras_add_daemon_trace(void) { return 0; } +#endif + +#ifdef CONFIG_RAS_CEC +void __init cec_init(void); +int __init parse_cec_param(char *str); +int cec_add_elem(u64 pfn); +#else +static inline void __init cec_init(void) { } +static inline int cec_add_elem(u64 pfn) { return -ENODEV; } +#endif + +#ifdef CONFIG_RAS +void log_non_standard_event(const guid_t *sec_type, + const guid_t *fru_id, const char *fru_text, + const u8 sev, const u8 *err, const u32 len); +void log_arm_hw_error(struct cper_sec_proc_arm *err); +#else +static inline void +log_non_standard_event(const guid_t *sec_type, + const guid_t *fru_id, const char *fru_text, + const u8 sev, const u8 *err, const u32 len) +{ return; } +static inline void +log_arm_hw_error(struct cper_sec_proc_arm *err) { return; } +#endif + +#endif /* __RAS_H__ */ diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h new file mode 100644 index 000000000..8ddf79e92 --- /dev/null +++ b/include/linux/ratelimit.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_RATELIMIT_H +#define _LINUX_RATELIMIT_H + +#include +#include +#include + +#define DEFAULT_RATELIMIT_INTERVAL (5 * HZ) +#define DEFAULT_RATELIMIT_BURST 10 + +/* issue num suppressed message on exit */ +#define RATELIMIT_MSG_ON_RELEASE BIT(0) + +struct ratelimit_state { + raw_spinlock_t lock; /* protect the state */ + + int interval; + int burst; + int printed; + int missed; + unsigned long begin; + unsigned long flags; +}; + +#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \ + .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ + .interval = interval_init, \ + .burst = burst_init, \ + } + +#define RATELIMIT_STATE_INIT_DISABLED \ + RATELIMIT_STATE_INIT(ratelimit_state, 0, DEFAULT_RATELIMIT_BURST) + +#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \ + \ + struct ratelimit_state name = \ + RATELIMIT_STATE_INIT(name, interval_init, burst_init) \ + +static inline void ratelimit_state_init(struct ratelimit_state *rs, + int interval, int burst) +{ + memset(rs, 0, sizeof(*rs)); + + raw_spin_lock_init(&rs->lock); + rs->interval = interval; + rs->burst = burst; +} + +static inline void ratelimit_default_init(struct ratelimit_state *rs) +{ + return ratelimit_state_init(rs, DEFAULT_RATELIMIT_INTERVAL, + DEFAULT_RATELIMIT_BURST); +} + +static inline void ratelimit_state_exit(struct ratelimit_state *rs) +{ + if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) + return; + + if (rs->missed) { + pr_warn("%s: %d output lines suppressed due to ratelimiting\n", + current->comm, rs->missed); + rs->missed = 0; + } +} + +static inline void +ratelimit_set_flags(struct ratelimit_state *rs, unsigned long flags) +{ + rs->flags = flags; +} + +extern struct ratelimit_state printk_ratelimit_state; + +extern int ___ratelimit(struct ratelimit_state *rs, const char *func); +#define __ratelimit(state) ___ratelimit(state, __func__) + +#ifdef CONFIG_PRINTK + +#define WARN_ON_RATELIMIT(condition, state) ({ \ + bool __rtn_cond = !!(condition); \ + WARN_ON(__rtn_cond && __ratelimit(state)); \ + __rtn_cond; \ +}) + +#define WARN_RATELIMIT(condition, format, ...) \ +({ \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + int rtn = !!(condition); \ + \ + if (unlikely(rtn && __ratelimit(&_rs))) \ + WARN(rtn, format, ##__VA_ARGS__); \ + \ + rtn; \ +}) + +#else + +#define WARN_ON_RATELIMIT(condition, state) \ + WARN_ON(condition) + +#define WARN_RATELIMIT(condition, format, ...) \ +({ \ + int rtn = WARN(condition, format, ##__VA_ARGS__); \ + rtn; \ +}) + +#endif + +#endif /* _LINUX_RATELIMIT_H */ diff --git a/include/linux/rational.h b/include/linux/rational.h new file mode 100644 index 000000000..33f5f5fc3 --- /dev/null +++ b/include/linux/rational.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * rational fractions + * + * Copyright (C) 2009 emlix GmbH, Oskar Schirmer + * + * helper functions when coping with rational numbers, + * e.g. when calculating optimum numerator/denominator pairs for + * pll configuration taking into account restricted register size + */ + +#ifndef _LINUX_RATIONAL_H +#define _LINUX_RATIONAL_H + +void rational_best_approximation( + unsigned long given_numerator, unsigned long given_denominator, + unsigned long max_numerator, unsigned long max_denominator, + unsigned long *best_numerator, unsigned long *best_denominator); + +#endif /* _LINUX_RATIONAL_H */ diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h new file mode 100644 index 000000000..fcbeed405 --- /dev/null +++ b/include/linux/rbtree.h @@ -0,0 +1,151 @@ +/* + Red Black Trees + (C) 1999 Andrea Arcangeli + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + linux/include/linux/rbtree.h + + To use rbtrees you'll have to implement your own insert and search cores. + This will avoid us to use callbacks and to drop drammatically performances. + I know it's not the cleaner way, but in C (not in C++) to get + performances and genericity... + + See Documentation/rbtree.txt for documentation and samples. +*/ + +#ifndef _LINUX_RBTREE_H +#define _LINUX_RBTREE_H + +#include +#include +#include + +struct rb_node { + unsigned long __rb_parent_color; + struct rb_node *rb_right; + struct rb_node *rb_left; +} __attribute__((aligned(sizeof(long)))); + /* The alignment might seem pointless, but allegedly CRIS needs it */ + +struct rb_root { + struct rb_node *rb_node; +}; + +/* + * Leftmost-cached rbtrees. + * + * We do not cache the rightmost node based on footprint + * size vs number of potential users that could benefit + * from O(1) rb_last(). Just not worth it, users that want + * this feature can always implement the logic explicitly. + * Furthermore, users that want to cache both pointers may + * find it a bit asymmetric, but that's ok. + */ +struct rb_root_cached { + struct rb_root rb_root; + struct rb_node *rb_leftmost; +}; + +#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3)) + +#define RB_ROOT (struct rb_root) { NULL, } +#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL } +#define rb_entry(ptr, type, member) container_of(ptr, type, member) + +#define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL) + +/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */ +#define RB_EMPTY_NODE(node) \ + ((node)->__rb_parent_color == (unsigned long)(node)) +#define RB_CLEAR_NODE(node) \ + ((node)->__rb_parent_color = (unsigned long)(node)) + + +extern void rb_insert_color(struct rb_node *, struct rb_root *); +extern void rb_erase(struct rb_node *, struct rb_root *); + + +/* Find logical next and previous nodes in a tree */ +extern struct rb_node *rb_next(const struct rb_node *); +extern struct rb_node *rb_prev(const struct rb_node *); +extern struct rb_node *rb_first(const struct rb_root *); +extern struct rb_node *rb_last(const struct rb_root *); + +extern void rb_insert_color_cached(struct rb_node *, + struct rb_root_cached *, bool); +extern void rb_erase_cached(struct rb_node *node, struct rb_root_cached *); +/* Same as rb_first(), but O(1) */ +#define rb_first_cached(root) (root)->rb_leftmost + +/* Postorder iteration - always visit the parent after its children */ +extern struct rb_node *rb_first_postorder(const struct rb_root *); +extern struct rb_node *rb_next_postorder(const struct rb_node *); + +/* Fast replacement of a single node without remove/rebalance/add/rebalance */ +extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, + struct rb_root *root); +extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, + struct rb_root *root); +extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new, + struct rb_root_cached *root); + +static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, + struct rb_node **rb_link) +{ + node->__rb_parent_color = (unsigned long)parent; + node->rb_left = node->rb_right = NULL; + + *rb_link = node; +} + +static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent, + struct rb_node **rb_link) +{ + node->__rb_parent_color = (unsigned long)parent; + node->rb_left = node->rb_right = NULL; + + rcu_assign_pointer(*rb_link, node); +} + +#define rb_entry_safe(ptr, type, member) \ + ({ typeof(ptr) ____ptr = (ptr); \ + ____ptr ? rb_entry(____ptr, type, member) : NULL; \ + }) + +/** + * rbtree_postorder_for_each_entry_safe - iterate in post-order over rb_root of + * given type allowing the backing memory of @pos to be invalidated + * + * @pos: the 'type *' to use as a loop cursor. + * @n: another 'type *' to use as temporary storage + * @root: 'rb_root *' of the rbtree. + * @field: the name of the rb_node field within 'type'. + * + * rbtree_postorder_for_each_entry_safe() provides a similar guarantee as + * list_for_each_entry_safe() and allows the iteration to continue independent + * of changes to @pos by the body of the loop. + * + * Note, however, that it cannot handle other modifications that re-order the + * rbtree it is iterating over. This includes calling rb_erase() on @pos, as + * rb_erase() may rebalance the tree, causing us to miss some nodes. + */ +#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \ + for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \ + pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \ + typeof(*pos), field); 1; }); \ + pos = n) + +#endif /* _LINUX_RBTREE_H */ diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h new file mode 100644 index 000000000..af8a61be2 --- /dev/null +++ b/include/linux/rbtree_augmented.h @@ -0,0 +1,292 @@ +/* + Red Black Trees + (C) 1999 Andrea Arcangeli + (C) 2002 David Woodhouse + (C) 2012 Michel Lespinasse + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + linux/include/linux/rbtree_augmented.h +*/ + +#ifndef _LINUX_RBTREE_AUGMENTED_H +#define _LINUX_RBTREE_AUGMENTED_H + +#include +#include +#include + +/* + * Please note - only struct rb_augment_callbacks and the prototypes for + * rb_insert_augmented() and rb_erase_augmented() are intended to be public. + * The rest are implementation details you are not expected to depend on. + * + * See Documentation/rbtree.txt for documentation and samples. + */ + +struct rb_augment_callbacks { + void (*propagate)(struct rb_node *node, struct rb_node *stop); + void (*copy)(struct rb_node *old, struct rb_node *new); + void (*rotate)(struct rb_node *old, struct rb_node *new); +}; + +extern void __rb_insert_augmented(struct rb_node *node, + struct rb_root *root, + bool newleft, struct rb_node **leftmost, + void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); +/* + * Fixup the rbtree and update the augmented information when rebalancing. + * + * On insertion, the user must update the augmented information on the path + * leading to the inserted node, then call rb_link_node() as usual and + * rb_augment_inserted() instead of the usual rb_insert_color() call. + * If rb_augment_inserted() rebalances the rbtree, it will callback into + * a user provided function to update the augmented information on the + * affected subtrees. + */ +static inline void +rb_insert_augmented(struct rb_node *node, struct rb_root *root, + const struct rb_augment_callbacks *augment) +{ + __rb_insert_augmented(node, root, false, NULL, augment->rotate); +} + +static inline void +rb_insert_augmented_cached(struct rb_node *node, + struct rb_root_cached *root, bool newleft, + const struct rb_augment_callbacks *augment) +{ + __rb_insert_augmented(node, &root->rb_root, + newleft, &root->rb_leftmost, augment->rotate); +} + +#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \ + rbtype, rbaugmented, rbcompute) \ +static inline void \ +rbname ## _propagate(struct rb_node *rb, struct rb_node *stop) \ +{ \ + while (rb != stop) { \ + rbstruct *node = rb_entry(rb, rbstruct, rbfield); \ + rbtype augmented = rbcompute(node); \ + if (node->rbaugmented == augmented) \ + break; \ + node->rbaugmented = augmented; \ + rb = rb_parent(&node->rbfield); \ + } \ +} \ +static inline void \ +rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \ +{ \ + rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \ + rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \ + new->rbaugmented = old->rbaugmented; \ +} \ +static void \ +rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \ +{ \ + rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \ + rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \ + new->rbaugmented = old->rbaugmented; \ + old->rbaugmented = rbcompute(old); \ +} \ +rbstatic const struct rb_augment_callbacks rbname = { \ + .propagate = rbname ## _propagate, \ + .copy = rbname ## _copy, \ + .rotate = rbname ## _rotate \ +}; + + +#define RB_RED 0 +#define RB_BLACK 1 + +#define __rb_parent(pc) ((struct rb_node *)(pc & ~3)) + +#define __rb_color(pc) ((pc) & 1) +#define __rb_is_black(pc) __rb_color(pc) +#define __rb_is_red(pc) (!__rb_color(pc)) +#define rb_color(rb) __rb_color((rb)->__rb_parent_color) +#define rb_is_red(rb) __rb_is_red((rb)->__rb_parent_color) +#define rb_is_black(rb) __rb_is_black((rb)->__rb_parent_color) + +static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p) +{ + rb->__rb_parent_color = rb_color(rb) | (unsigned long)p; +} + +static inline void rb_set_parent_color(struct rb_node *rb, + struct rb_node *p, int color) +{ + rb->__rb_parent_color = (unsigned long)p | color; +} + +static inline void +__rb_change_child(struct rb_node *old, struct rb_node *new, + struct rb_node *parent, struct rb_root *root) +{ + if (parent) { + if (parent->rb_left == old) + WRITE_ONCE(parent->rb_left, new); + else + WRITE_ONCE(parent->rb_right, new); + } else + WRITE_ONCE(root->rb_node, new); +} + +static inline void +__rb_change_child_rcu(struct rb_node *old, struct rb_node *new, + struct rb_node *parent, struct rb_root *root) +{ + if (parent) { + if (parent->rb_left == old) + rcu_assign_pointer(parent->rb_left, new); + else + rcu_assign_pointer(parent->rb_right, new); + } else + rcu_assign_pointer(root->rb_node, new); +} + +extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root, + void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); + +static __always_inline struct rb_node * +__rb_erase_augmented(struct rb_node *node, struct rb_root *root, + struct rb_node **leftmost, + const struct rb_augment_callbacks *augment) +{ + struct rb_node *child = node->rb_right; + struct rb_node *tmp = node->rb_left; + struct rb_node *parent, *rebalance; + unsigned long pc; + + if (leftmost && node == *leftmost) + *leftmost = rb_next(node); + + if (!tmp) { + /* + * Case 1: node to erase has no more than 1 child (easy!) + * + * Note that if there is one child it must be red due to 5) + * and node must be black due to 4). We adjust colors locally + * so as to bypass __rb_erase_color() later on. + */ + pc = node->__rb_parent_color; + parent = __rb_parent(pc); + __rb_change_child(node, child, parent, root); + if (child) { + child->__rb_parent_color = pc; + rebalance = NULL; + } else + rebalance = __rb_is_black(pc) ? parent : NULL; + tmp = parent; + } else if (!child) { + /* Still case 1, but this time the child is node->rb_left */ + tmp->__rb_parent_color = pc = node->__rb_parent_color; + parent = __rb_parent(pc); + __rb_change_child(node, tmp, parent, root); + rebalance = NULL; + tmp = parent; + } else { + struct rb_node *successor = child, *child2; + + tmp = child->rb_left; + if (!tmp) { + /* + * Case 2: node's successor is its right child + * + * (n) (s) + * / \ / \ + * (x) (s) -> (x) (c) + * \ + * (c) + */ + parent = successor; + child2 = successor->rb_right; + + augment->copy(node, successor); + } else { + /* + * Case 3: node's successor is leftmost under + * node's right child subtree + * + * (n) (s) + * / \ / \ + * (x) (y) -> (x) (y) + * / / + * (p) (p) + * / / + * (s) (c) + * \ + * (c) + */ + do { + parent = successor; + successor = tmp; + tmp = tmp->rb_left; + } while (tmp); + child2 = successor->rb_right; + WRITE_ONCE(parent->rb_left, child2); + WRITE_ONCE(successor->rb_right, child); + rb_set_parent(child, successor); + + augment->copy(node, successor); + augment->propagate(parent, successor); + } + + tmp = node->rb_left; + WRITE_ONCE(successor->rb_left, tmp); + rb_set_parent(tmp, successor); + + pc = node->__rb_parent_color; + tmp = __rb_parent(pc); + __rb_change_child(node, successor, tmp, root); + + if (child2) { + successor->__rb_parent_color = pc; + rb_set_parent_color(child2, parent, RB_BLACK); + rebalance = NULL; + } else { + unsigned long pc2 = successor->__rb_parent_color; + successor->__rb_parent_color = pc; + rebalance = __rb_is_black(pc2) ? parent : NULL; + } + tmp = successor; + } + + augment->propagate(tmp, NULL); + return rebalance; +} + +static __always_inline void +rb_erase_augmented(struct rb_node *node, struct rb_root *root, + const struct rb_augment_callbacks *augment) +{ + struct rb_node *rebalance = __rb_erase_augmented(node, root, + NULL, augment); + if (rebalance) + __rb_erase_color(rebalance, root, augment->rotate); +} + +static __always_inline void +rb_erase_augmented_cached(struct rb_node *node, struct rb_root_cached *root, + const struct rb_augment_callbacks *augment) +{ + struct rb_node *rebalance = __rb_erase_augmented(node, &root->rb_root, + &root->rb_leftmost, + augment); + if (rebalance) + __rb_erase_color(rebalance, &root->rb_root, augment->rotate); +} + +#endif /* _LINUX_RBTREE_AUGMENTED_H */ diff --git a/include/linux/rbtree_latch.h b/include/linux/rbtree_latch.h new file mode 100644 index 000000000..7d012faa5 --- /dev/null +++ b/include/linux/rbtree_latch.h @@ -0,0 +1,214 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Latched RB-trees + * + * Copyright (C) 2015 Intel Corp., Peter Zijlstra + * + * Since RB-trees have non-atomic modifications they're not immediately suited + * for RCU/lockless queries. Even though we made RB-tree lookups non-fatal for + * lockless lookups; we cannot guarantee they return a correct result. + * + * The simplest solution is a seqlock + RB-tree, this will allow lockless + * lookups; but has the constraint (inherent to the seqlock) that read sides + * cannot nest in write sides. + * + * If we need to allow unconditional lookups (say as required for NMI context + * usage) we need a more complex setup; this data structure provides this by + * employing the latch technique -- see @raw_write_seqcount_latch -- to + * implement a latched RB-tree which does allow for unconditional lookups by + * virtue of always having (at least) one stable copy of the tree. + * + * However, while we have the guarantee that there is at all times one stable + * copy, this does not guarantee an iteration will not observe modifications. + * What might have been a stable copy at the start of the iteration, need not + * remain so for the duration of the iteration. + * + * Therefore, this does require a lockless RB-tree iteration to be non-fatal; + * see the comment in lib/rbtree.c. Note however that we only require the first + * condition -- not seeing partial stores -- because the latch thing isolates + * us from loops. If we were to interrupt a modification the lookup would be + * pointed at the stable tree and complete while the modification was halted. + */ + +#ifndef RB_TREE_LATCH_H +#define RB_TREE_LATCH_H + +#include +#include +#include + +struct latch_tree_node { + struct rb_node node[2]; +}; + +struct latch_tree_root { + seqcount_t seq; + struct rb_root tree[2]; +}; + +/** + * latch_tree_ops - operators to define the tree order + * @less: used for insertion; provides the (partial) order between two elements. + * @comp: used for lookups; provides the order between the search key and an element. + * + * The operators are related like: + * + * comp(a->key,b) < 0 := less(a,b) + * comp(a->key,b) > 0 := less(b,a) + * comp(a->key,b) == 0 := !less(a,b) && !less(b,a) + * + * If these operators define a partial order on the elements we make no + * guarantee on which of the elements matching the key is found. See + * latch_tree_find(). + */ +struct latch_tree_ops { + bool (*less)(struct latch_tree_node *a, struct latch_tree_node *b); + int (*comp)(void *key, struct latch_tree_node *b); +}; + +static __always_inline struct latch_tree_node * +__lt_from_rb(struct rb_node *node, int idx) +{ + return container_of(node, struct latch_tree_node, node[idx]); +} + +static __always_inline void +__lt_insert(struct latch_tree_node *ltn, struct latch_tree_root *ltr, int idx, + bool (*less)(struct latch_tree_node *a, struct latch_tree_node *b)) +{ + struct rb_root *root = <r->tree[idx]; + struct rb_node **link = &root->rb_node; + struct rb_node *node = <n->node[idx]; + struct rb_node *parent = NULL; + struct latch_tree_node *ltp; + + while (*link) { + parent = *link; + ltp = __lt_from_rb(parent, idx); + + if (less(ltn, ltp)) + link = &parent->rb_left; + else + link = &parent->rb_right; + } + + rb_link_node_rcu(node, parent, link); + rb_insert_color(node, root); +} + +static __always_inline void +__lt_erase(struct latch_tree_node *ltn, struct latch_tree_root *ltr, int idx) +{ + rb_erase(<n->node[idx], <r->tree[idx]); +} + +static __always_inline struct latch_tree_node * +__lt_find(void *key, struct latch_tree_root *ltr, int idx, + int (*comp)(void *key, struct latch_tree_node *node)) +{ + struct rb_node *node = rcu_dereference_raw(ltr->tree[idx].rb_node); + struct latch_tree_node *ltn; + int c; + + while (node) { + ltn = __lt_from_rb(node, idx); + c = comp(key, ltn); + + if (c < 0) + node = rcu_dereference_raw(node->rb_left); + else if (c > 0) + node = rcu_dereference_raw(node->rb_right); + else + return ltn; + } + + return NULL; +} + +/** + * latch_tree_insert() - insert @node into the trees @root + * @node: nodes to insert + * @root: trees to insert @node into + * @ops: operators defining the node order + * + * It inserts @node into @root in an ordered fashion such that we can always + * observe one complete tree. See the comment for raw_write_seqcount_latch(). + * + * The inserts use rcu_assign_pointer() to publish the element such that the + * tree structure is stored before we can observe the new @node. + * + * All modifications (latch_tree_insert, latch_tree_remove) are assumed to be + * serialized. + */ +static __always_inline void +latch_tree_insert(struct latch_tree_node *node, + struct latch_tree_root *root, + const struct latch_tree_ops *ops) +{ + raw_write_seqcount_latch(&root->seq); + __lt_insert(node, root, 0, ops->less); + raw_write_seqcount_latch(&root->seq); + __lt_insert(node, root, 1, ops->less); +} + +/** + * latch_tree_erase() - removes @node from the trees @root + * @node: nodes to remote + * @root: trees to remove @node from + * @ops: operators defining the node order + * + * Removes @node from the trees @root in an ordered fashion such that we can + * always observe one complete tree. See the comment for + * raw_write_seqcount_latch(). + * + * It is assumed that @node will observe one RCU quiescent state before being + * reused of freed. + * + * All modifications (latch_tree_insert, latch_tree_remove) are assumed to be + * serialized. + */ +static __always_inline void +latch_tree_erase(struct latch_tree_node *node, + struct latch_tree_root *root, + const struct latch_tree_ops *ops) +{ + raw_write_seqcount_latch(&root->seq); + __lt_erase(node, root, 0); + raw_write_seqcount_latch(&root->seq); + __lt_erase(node, root, 1); +} + +/** + * latch_tree_find() - find the node matching @key in the trees @root + * @key: search key + * @root: trees to search for @key + * @ops: operators defining the node order + * + * Does a lockless lookup in the trees @root for the node matching @key. + * + * It is assumed that this is called while holding the appropriate RCU read + * side lock. + * + * If the operators define a partial order on the elements (there are multiple + * elements which have the same key value) it is undefined which of these + * elements will be found. Nor is it possible to iterate the tree to find + * further elements with the same key value. + * + * Returns: a pointer to the node matching @key or NULL. + */ +static __always_inline struct latch_tree_node * +latch_tree_find(void *key, struct latch_tree_root *root, + const struct latch_tree_ops *ops) +{ + struct latch_tree_node *node; + unsigned int seq; + + do { + seq = raw_read_seqcount_latch(&root->seq); + node = __lt_find(key, root, seq & 1, ops->comp); + } while (read_seqcount_retry(&root->seq, seq)); + + return node; +} + +#endif /* RB_TREE_LATCH_H */ diff --git a/include/linux/rcu_node_tree.h b/include/linux/rcu_node_tree.h new file mode 100644 index 000000000..426cee67f --- /dev/null +++ b/include/linux/rcu_node_tree.h @@ -0,0 +1,103 @@ +/* + * RCU node combining tree definitions. These are used to compute + * global attributes while avoiding common-case global contention. A key + * property that these computations rely on is a tournament-style approach + * where only one of the tasks contending a lower level in the tree need + * advance to the next higher level. If properly configured, this allows + * unlimited scalability while maintaining a constant level of contention + * on the root node. + * + * This seemingly RCU-private file must be available to SRCU users + * because the size of the TREE SRCU srcu_struct structure depends + * on these definitions. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + * Copyright IBM Corporation, 2017 + * + * Author: Paul E. McKenney + */ + +#ifndef __LINUX_RCU_NODE_TREE_H +#define __LINUX_RCU_NODE_TREE_H + +/* + * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and + * CONFIG_RCU_FANOUT_LEAF. + * In theory, it should be possible to add more levels straightforwardly. + * In practice, this did work well going from three levels to four. + * Of course, your mileage may vary. + */ + +#ifdef CONFIG_RCU_FANOUT +#define RCU_FANOUT CONFIG_RCU_FANOUT +#else /* #ifdef CONFIG_RCU_FANOUT */ +# ifdef CONFIG_64BIT +# define RCU_FANOUT 64 +# else +# define RCU_FANOUT 32 +# endif +#endif /* #else #ifdef CONFIG_RCU_FANOUT */ + +#ifdef CONFIG_RCU_FANOUT_LEAF +#define RCU_FANOUT_LEAF CONFIG_RCU_FANOUT_LEAF +#else /* #ifdef CONFIG_RCU_FANOUT_LEAF */ +#define RCU_FANOUT_LEAF 16 +#endif /* #else #ifdef CONFIG_RCU_FANOUT_LEAF */ + +#define RCU_FANOUT_1 (RCU_FANOUT_LEAF) +#define RCU_FANOUT_2 (RCU_FANOUT_1 * RCU_FANOUT) +#define RCU_FANOUT_3 (RCU_FANOUT_2 * RCU_FANOUT) +#define RCU_FANOUT_4 (RCU_FANOUT_3 * RCU_FANOUT) + +#if NR_CPUS <= RCU_FANOUT_1 +# define RCU_NUM_LVLS 1 +# define NUM_RCU_LVL_0 1 +# define NUM_RCU_NODES NUM_RCU_LVL_0 +# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0 } +# define RCU_NODE_NAME_INIT { "rcu_node_0" } +# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0" } +#elif NR_CPUS <= RCU_FANOUT_2 +# define RCU_NUM_LVLS 2 +# define NUM_RCU_LVL_0 1 +# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) +# define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1) +# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1 } +# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1" } +# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1" } +#elif NR_CPUS <= RCU_FANOUT_3 +# define RCU_NUM_LVLS 3 +# define NUM_RCU_LVL_0 1 +# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) +# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) +# define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2) +# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2 } +# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2" } +# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2" } +#elif NR_CPUS <= RCU_FANOUT_4 +# define RCU_NUM_LVLS 4 +# define NUM_RCU_LVL_0 1 +# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3) +# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) +# define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) +# define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3) +# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2, NUM_RCU_LVL_3 } +# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2", "rcu_node_3" } +# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" } +#else +# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" +#endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */ + +#endif /* __LINUX_RCU_NODE_TREE_H */ diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h new file mode 100644 index 000000000..c3ad00e63 --- /dev/null +++ b/include/linux/rcu_segcblist.h @@ -0,0 +1,94 @@ +/* + * RCU segmented callback lists + * + * This seemingly RCU-private file must be available to SRCU users + * because the size of the TREE SRCU srcu_struct structure depends + * on these definitions. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + * Copyright IBM Corporation, 2017 + * + * Authors: Paul E. McKenney + */ + +#ifndef __INCLUDE_LINUX_RCU_SEGCBLIST_H +#define __INCLUDE_LINUX_RCU_SEGCBLIST_H + +/* Simple unsegmented callback lists. */ +struct rcu_cblist { + struct rcu_head *head; + struct rcu_head **tail; + long len; + long len_lazy; +}; + +#define RCU_CBLIST_INITIALIZER(n) { .head = NULL, .tail = &n.head } + +/* Complicated segmented callback lists. ;-) */ + +/* + * Index values for segments in rcu_segcblist structure. + * + * The segments are as follows: + * + * [head, *tails[RCU_DONE_TAIL]): + * Callbacks whose grace period has elapsed, and thus can be invoked. + * [*tails[RCU_DONE_TAIL], *tails[RCU_WAIT_TAIL]): + * Callbacks waiting for the current GP from the current CPU's viewpoint. + * [*tails[RCU_WAIT_TAIL], *tails[RCU_NEXT_READY_TAIL]): + * Callbacks that arrived before the next GP started, again from + * the current CPU's viewpoint. These can be handled by the next GP. + * [*tails[RCU_NEXT_READY_TAIL], *tails[RCU_NEXT_TAIL]): + * Callbacks that might have arrived after the next GP started. + * There is some uncertainty as to when a given GP starts and + * ends, but a CPU knows the exact times if it is the one starting + * or ending the GP. Other CPUs know that the previous GP ends + * before the next one starts. + * + * Note that RCU_WAIT_TAIL cannot be empty unless RCU_NEXT_READY_TAIL is also + * empty. + * + * The ->gp_seq[] array contains the grace-period number at which the + * corresponding segment of callbacks will be ready to invoke. A given + * element of this array is meaningful only when the corresponding segment + * is non-empty, and it is never valid for RCU_DONE_TAIL (whose callbacks + * are already ready to invoke) or for RCU_NEXT_TAIL (whose callbacks have + * not yet been assigned a grace-period number). + */ +#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */ +#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */ +#define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */ +#define RCU_NEXT_TAIL 3 +#define RCU_CBLIST_NSEGS 4 + +struct rcu_segcblist { + struct rcu_head *head; + struct rcu_head **tails[RCU_CBLIST_NSEGS]; + unsigned long gp_seq[RCU_CBLIST_NSEGS]; + long len; + long len_lazy; +}; + +#define RCU_SEGCBLIST_INITIALIZER(n) \ +{ \ + .head = NULL, \ + .tails[RCU_DONE_TAIL] = &n.head, \ + .tails[RCU_WAIT_TAIL] = &n.head, \ + .tails[RCU_NEXT_READY_TAIL] = &n.head, \ + .tails[RCU_NEXT_TAIL] = &n.head, \ +} + +#endif /* __INCLUDE_LINUX_RCU_SEGCBLIST_H */ diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h new file mode 100644 index 000000000..ece7ed9a4 --- /dev/null +++ b/include/linux/rcu_sync.h @@ -0,0 +1,87 @@ +/* + * RCU-based infrastructure for lightweight reader-writer locking + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + * Copyright (c) 2015, Red Hat, Inc. + * + * Author: Oleg Nesterov + */ + +#ifndef _LINUX_RCU_SYNC_H_ +#define _LINUX_RCU_SYNC_H_ + +#include +#include + +enum rcu_sync_type { RCU_SYNC, RCU_SCHED_SYNC, RCU_BH_SYNC }; + +/* Structure to mediate between updaters and fastpath-using readers. */ +struct rcu_sync { + int gp_state; + int gp_count; + wait_queue_head_t gp_wait; + + int cb_state; + struct rcu_head cb_head; + + enum rcu_sync_type gp_type; +}; + +extern void rcu_sync_lockdep_assert(struct rcu_sync *); + +/** + * rcu_sync_is_idle() - Are readers permitted to use their fastpaths? + * @rsp: Pointer to rcu_sync structure to use for synchronization + * + * Returns true if readers are permitted to use their fastpaths. + * Must be invoked within an RCU read-side critical section whose + * flavor matches that of the rcu_sync struture. + */ +static inline bool rcu_sync_is_idle(struct rcu_sync *rsp) +{ +#ifdef CONFIG_PROVE_RCU + rcu_sync_lockdep_assert(rsp); +#endif + return !rsp->gp_state; /* GP_IDLE */ +} + +extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type); +extern void rcu_sync_enter_start(struct rcu_sync *); +extern void rcu_sync_enter(struct rcu_sync *); +extern void rcu_sync_exit(struct rcu_sync *); +extern void rcu_sync_dtor(struct rcu_sync *); + +#define __RCU_SYNC_INITIALIZER(name, type) { \ + .gp_state = 0, \ + .gp_count = 0, \ + .gp_wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.gp_wait), \ + .cb_state = 0, \ + .gp_type = type, \ + } + +#define __DEFINE_RCU_SYNC(name, type) \ + struct rcu_sync_struct name = __RCU_SYNC_INITIALIZER(name, type) + +#define DEFINE_RCU_SYNC(name) \ + __DEFINE_RCU_SYNC(name, RCU_SYNC) + +#define DEFINE_RCU_SCHED_SYNC(name) \ + __DEFINE_RCU_SYNC(name, RCU_SCHED_SYNC) + +#define DEFINE_RCU_BH_SYNC(name) \ + __DEFINE_RCU_SYNC(name, RCU_BH_SYNC) + +#endif /* _LINUX_RCU_SYNC_H_ */ diff --git a/include/linux/rculist.h b/include/linux/rculist.h new file mode 100644 index 000000000..4786c2235 --- /dev/null +++ b/include/linux/rculist.h @@ -0,0 +1,705 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_RCULIST_H +#define _LINUX_RCULIST_H + +#ifdef __KERNEL__ + +/* + * RCU-protected list version + */ +#include +#include + +/* + * Why is there no list_empty_rcu()? Because list_empty() serves this + * purpose. The list_empty() function fetches the RCU-protected pointer + * and compares it to the address of the list head, but neither dereferences + * this pointer itself nor provides this pointer to the caller. Therefore, + * it is not necessary to use rcu_dereference(), so that list_empty() can + * be used anywhere you would want to use a list_empty_rcu(). + */ + +/* + * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers + * @list: list to be initialized + * + * You should instead use INIT_LIST_HEAD() for normal initialization and + * cleanup tasks, when readers have no access to the list being initialized. + * However, if the list being initialized is visible to readers, you + * need to keep the compiler from being too mischievous. + */ +static inline void INIT_LIST_HEAD_RCU(struct list_head *list) +{ + WRITE_ONCE(list->next, list); + WRITE_ONCE(list->prev, list); +} + +/* + * return the ->next pointer of a list_head in an rcu safe + * way, we must not access it directly + */ +#define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) + +/* + * Insert a new entry between two known consecutive entries. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +static inline void __list_add_rcu(struct list_head *new, + struct list_head *prev, struct list_head *next) +{ + if (!__list_add_valid(new, prev, next)) + return; + + new->next = next; + new->prev = prev; + rcu_assign_pointer(list_next_rcu(prev), new); + next->prev = new; +} + +/** + * list_add_rcu - add a new entry to rcu-protected list + * @new: new entry to be added + * @head: list head to add it after + * + * Insert a new entry after the specified head. + * This is good for implementing stacks. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as list_add_rcu() + * or list_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * list_for_each_entry_rcu(). + */ +static inline void list_add_rcu(struct list_head *new, struct list_head *head) +{ + __list_add_rcu(new, head, head->next); +} + +/** + * list_add_tail_rcu - add a new entry to rcu-protected list + * @new: new entry to be added + * @head: list head to add it before + * + * Insert a new entry before the specified head. + * This is useful for implementing queues. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as list_add_tail_rcu() + * or list_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * list_for_each_entry_rcu(). + */ +static inline void list_add_tail_rcu(struct list_head *new, + struct list_head *head) +{ + __list_add_rcu(new, head->prev, head); +} + +/** + * list_del_rcu - deletes entry from list without re-initialization + * @entry: the element to delete from the list. + * + * Note: list_empty() on entry does not return true after this, + * the entry is in an undefined state. It is useful for RCU based + * lockfree traversal. + * + * In particular, it means that we can not poison the forward + * pointers that may still be used for walking the list. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as list_del_rcu() + * or list_add_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * list_for_each_entry_rcu(). + * + * Note that the caller is not permitted to immediately free + * the newly deleted entry. Instead, either synchronize_rcu() + * or call_rcu() must be used to defer freeing until an RCU + * grace period has elapsed. + */ +static inline void list_del_rcu(struct list_head *entry) +{ + __list_del_entry(entry); + entry->prev = LIST_POISON2; +} + +/** + * hlist_del_init_rcu - deletes entry from hash list with re-initialization + * @n: the element to delete from the hash list. + * + * Note: list_unhashed() on the node return true after this. It is + * useful for RCU based read lockfree traversal if the writer side + * must know if the list entry is still hashed or already unhashed. + * + * In particular, it means that we can not poison the forward pointers + * that may still be used for walking the hash list and we can only + * zero the pprev pointer so list_unhashed() will return true after + * this. + * + * The caller must take whatever precautions are necessary (such as + * holding appropriate locks) to avoid racing with another + * list-mutation primitive, such as hlist_add_head_rcu() or + * hlist_del_rcu(), running on this same list. However, it is + * perfectly legal to run concurrently with the _rcu list-traversal + * primitives, such as hlist_for_each_entry_rcu(). + */ +static inline void hlist_del_init_rcu(struct hlist_node *n) +{ + if (!hlist_unhashed(n)) { + __hlist_del(n); + n->pprev = NULL; + } +} + +/** + * list_replace_rcu - replace old entry by new one + * @old : the element to be replaced + * @new : the new element to insert + * + * The @old entry will be replaced with the @new entry atomically. + * Note: @old should not be empty. + */ +static inline void list_replace_rcu(struct list_head *old, + struct list_head *new) +{ + new->next = old->next; + new->prev = old->prev; + rcu_assign_pointer(list_next_rcu(new->prev), new); + new->next->prev = new; + old->prev = LIST_POISON2; +} + +/** + * __list_splice_init_rcu - join an RCU-protected list into an existing list. + * @list: the RCU-protected list to splice + * @prev: points to the last element of the existing list + * @next: points to the first element of the existing list + * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... + * + * The list pointed to by @prev and @next can be RCU-read traversed + * concurrently with this function. + * + * Note that this function blocks. + * + * Important note: the caller must take whatever action is necessary to prevent + * any other updates to the existing list. In principle, it is possible to + * modify the list as soon as sync() begins execution. If this sort of thing + * becomes necessary, an alternative version based on call_rcu() could be + * created. But only if -really- needed -- there is no shortage of RCU API + * members. + */ +static inline void __list_splice_init_rcu(struct list_head *list, + struct list_head *prev, + struct list_head *next, + void (*sync)(void)) +{ + struct list_head *first = list->next; + struct list_head *last = list->prev; + + /* + * "first" and "last" tracking list, so initialize it. RCU readers + * have access to this list, so we must use INIT_LIST_HEAD_RCU() + * instead of INIT_LIST_HEAD(). + */ + + INIT_LIST_HEAD_RCU(list); + + /* + * At this point, the list body still points to the source list. + * Wait for any readers to finish using the list before splicing + * the list body into the new list. Any new readers will see + * an empty list. + */ + + sync(); + + /* + * Readers are finished with the source list, so perform splice. + * The order is important if the new list is global and accessible + * to concurrent RCU readers. Note that RCU readers are not + * permitted to traverse the prev pointers without excluding + * this function. + */ + + last->next = next; + rcu_assign_pointer(list_next_rcu(prev), first); + first->prev = prev; + next->prev = last; +} + +/** + * list_splice_init_rcu - splice an RCU-protected list into an existing list, + * designed for stacks. + * @list: the RCU-protected list to splice + * @head: the place in the existing list to splice the first list into + * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... + */ +static inline void list_splice_init_rcu(struct list_head *list, + struct list_head *head, + void (*sync)(void)) +{ + if (!list_empty(list)) + __list_splice_init_rcu(list, head, head->next, sync); +} + +/** + * list_splice_tail_init_rcu - splice an RCU-protected list into an existing + * list, designed for queues. + * @list: the RCU-protected list to splice + * @head: the place in the existing list to splice the first list into + * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... + */ +static inline void list_splice_tail_init_rcu(struct list_head *list, + struct list_head *head, + void (*sync)(void)) +{ + if (!list_empty(list)) + __list_splice_init_rcu(list, head->prev, head, sync); +} + +/** + * list_entry_rcu - get the struct for this entry + * @ptr: the &struct list_head pointer. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_head within the struct. + * + * This primitive may safely run concurrently with the _rcu list-mutation + * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). + */ +#define list_entry_rcu(ptr, type, member) \ + container_of(READ_ONCE(ptr), type, member) + +/* + * Where are list_empty_rcu() and list_first_entry_rcu()? + * + * Implementing those functions following their counterparts list_empty() and + * list_first_entry() is not advisable because they lead to subtle race + * conditions as the following snippet shows: + * + * if (!list_empty_rcu(mylist)) { + * struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member); + * do_something(bar); + * } + * + * The list may not be empty when list_empty_rcu checks it, but it may be when + * list_first_entry_rcu rereads the ->next pointer. + * + * Rereading the ->next pointer is not a problem for list_empty() and + * list_first_entry() because they would be protected by a lock that blocks + * writers. + * + * See list_first_or_null_rcu for an alternative. + */ + +/** + * list_first_or_null_rcu - get the first element from a list + * @ptr: the list head to take the element from. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_head within the struct. + * + * Note that if the list is empty, it returns NULL. + * + * This primitive may safely run concurrently with the _rcu list-mutation + * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). + */ +#define list_first_or_null_rcu(ptr, type, member) \ +({ \ + struct list_head *__ptr = (ptr); \ + struct list_head *__next = READ_ONCE(__ptr->next); \ + likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \ +}) + +/** + * list_next_or_null_rcu - get the first element from a list + * @head: the head for the list. + * @ptr: the list head to take the next element from. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_head within the struct. + * + * Note that if the ptr is at the end of the list, NULL is returned. + * + * This primitive may safely run concurrently with the _rcu list-mutation + * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). + */ +#define list_next_or_null_rcu(head, ptr, type, member) \ +({ \ + struct list_head *__head = (head); \ + struct list_head *__ptr = (ptr); \ + struct list_head *__next = READ_ONCE(__ptr->next); \ + likely(__next != __head) ? list_entry_rcu(__next, type, \ + member) : NULL; \ +}) + +/** + * list_for_each_entry_rcu - iterate over rcu list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_head within the struct. + * + * This list-traversal primitive may safely run concurrently with + * the _rcu list-mutation primitives such as list_add_rcu() + * as long as the traversal is guarded by rcu_read_lock(). + */ +#define list_for_each_entry_rcu(pos, head, member) \ + for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) + +/** + * list_entry_lockless - get the struct for this entry + * @ptr: the &struct list_head pointer. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_head within the struct. + * + * This primitive may safely run concurrently with the _rcu list-mutation + * primitives such as list_add_rcu(), but requires some implicit RCU + * read-side guarding. One example is running within a special + * exception-time environment where preemption is disabled and where + * lockdep cannot be invoked (in which case updaters must use RCU-sched, + * as in synchronize_sched(), call_rcu_sched(), and friends). Another + * example is when items are added to the list, but never deleted. + */ +#define list_entry_lockless(ptr, type, member) \ + container_of((typeof(ptr))READ_ONCE(ptr), type, member) + +/** + * list_for_each_entry_lockless - iterate over rcu list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * This primitive may safely run concurrently with the _rcu list-mutation + * primitives such as list_add_rcu(), but requires some implicit RCU + * read-side guarding. One example is running within a special + * exception-time environment where preemption is disabled and where + * lockdep cannot be invoked (in which case updaters must use RCU-sched, + * as in synchronize_sched(), call_rcu_sched(), and friends). Another + * example is when items are added to the list, but never deleted. + */ +#define list_for_each_entry_lockless(pos, head, member) \ + for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = list_entry_lockless(pos->member.next, typeof(*pos), member)) + +/** + * list_for_each_entry_continue_rcu - continue iteration over list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_head within the struct. + * + * Continue to iterate over list of given type, continuing after + * the current position which must have been in the list when the RCU read + * lock was taken. + * This would typically require either that you obtained the node from a + * previous walk of the list in the same RCU read-side critical section, or + * that you held some sort of non-RCU reference (such as a reference count) + * to keep the node alive *and* in the list. + * + * This iterator is similar to list_for_each_entry_from_rcu() except + * this starts after the given position and that one starts at the given + * position. + */ +#define list_for_each_entry_continue_rcu(pos, head, member) \ + for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) + +/** + * list_for_each_entry_from_rcu - iterate over a list from current point + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_node within the struct. + * + * Iterate over the tail of a list starting from a given position, + * which must have been in the list when the RCU read lock was taken. + * This would typically require either that you obtained the node from a + * previous walk of the list in the same RCU read-side critical section, or + * that you held some sort of non-RCU reference (such as a reference count) + * to keep the node alive *and* in the list. + * + * This iterator is similar to list_for_each_entry_continue_rcu() except + * this starts from the given position and that one starts from the position + * after the given position. + */ +#define list_for_each_entry_from_rcu(pos, head, member) \ + for (; &(pos)->member != (head); \ + pos = list_entry_rcu(pos->member.next, typeof(*(pos)), member)) + +/** + * hlist_del_rcu - deletes entry from hash list without re-initialization + * @n: the element to delete from the hash list. + * + * Note: list_unhashed() on entry does not return true after this, + * the entry is in an undefined state. It is useful for RCU based + * lockfree traversal. + * + * In particular, it means that we can not poison the forward + * pointers that may still be used for walking the hash list. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_add_head_rcu() + * or hlist_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_for_each_entry(). + */ +static inline void hlist_del_rcu(struct hlist_node *n) +{ + __hlist_del(n); + n->pprev = LIST_POISON2; +} + +/** + * hlist_replace_rcu - replace old entry by new one + * @old : the element to be replaced + * @new : the new element to insert + * + * The @old entry will be replaced with the @new entry atomically. + */ +static inline void hlist_replace_rcu(struct hlist_node *old, + struct hlist_node *new) +{ + struct hlist_node *next = old->next; + + new->next = next; + new->pprev = old->pprev; + rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new); + if (next) + new->next->pprev = &new->next; + old->pprev = LIST_POISON2; +} + +/* + * return the first or the next element in an RCU protected hlist + */ +#define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first))) +#define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next))) +#define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev))) + +/** + * hlist_add_head_rcu + * @n: the element to add to the hash list. + * @h: the list to add to. + * + * Description: + * Adds the specified element to the specified hlist, + * while permitting racing traversals. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_add_head_rcu() + * or hlist_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_for_each_entry_rcu(), used to prevent memory-consistency + * problems on Alpha CPUs. Regardless of the type of CPU, the + * list-traversal primitive must be guarded by rcu_read_lock(). + */ +static inline void hlist_add_head_rcu(struct hlist_node *n, + struct hlist_head *h) +{ + struct hlist_node *first = h->first; + + n->next = first; + n->pprev = &h->first; + rcu_assign_pointer(hlist_first_rcu(h), n); + if (first) + first->pprev = &n->next; +} + +/** + * hlist_add_tail_rcu + * @n: the element to add to the hash list. + * @h: the list to add to. + * + * Description: + * Adds the specified element to the specified hlist, + * while permitting racing traversals. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_add_head_rcu() + * or hlist_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_for_each_entry_rcu(), used to prevent memory-consistency + * problems on Alpha CPUs. Regardless of the type of CPU, the + * list-traversal primitive must be guarded by rcu_read_lock(). + */ +static inline void hlist_add_tail_rcu(struct hlist_node *n, + struct hlist_head *h) +{ + struct hlist_node *i, *last = NULL; + + /* Note: write side code, so rcu accessors are not needed. */ + for (i = h->first; i; i = i->next) + last = i; + + if (last) { + n->next = last->next; + n->pprev = &last->next; + rcu_assign_pointer(hlist_next_rcu(last), n); + } else { + hlist_add_head_rcu(n, h); + } +} + +/** + * hlist_add_before_rcu + * @n: the new element to add to the hash list. + * @next: the existing element to add the new element before. + * + * Description: + * Adds the specified element to the specified hlist + * before the specified node while permitting racing traversals. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_add_head_rcu() + * or hlist_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_for_each_entry_rcu(), used to prevent memory-consistency + * problems on Alpha CPUs. + */ +static inline void hlist_add_before_rcu(struct hlist_node *n, + struct hlist_node *next) +{ + n->pprev = next->pprev; + n->next = next; + rcu_assign_pointer(hlist_pprev_rcu(n), n); + next->pprev = &n->next; +} + +/** + * hlist_add_behind_rcu + * @n: the new element to add to the hash list. + * @prev: the existing element to add the new element after. + * + * Description: + * Adds the specified element to the specified hlist + * after the specified node while permitting racing traversals. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_add_head_rcu() + * or hlist_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_for_each_entry_rcu(), used to prevent memory-consistency + * problems on Alpha CPUs. + */ +static inline void hlist_add_behind_rcu(struct hlist_node *n, + struct hlist_node *prev) +{ + n->next = prev->next; + n->pprev = &prev->next; + rcu_assign_pointer(hlist_next_rcu(prev), n); + if (n->next) + n->next->pprev = &n->next; +} + +#define __hlist_for_each_rcu(pos, head) \ + for (pos = rcu_dereference(hlist_first_rcu(head)); \ + pos; \ + pos = rcu_dereference(hlist_next_rcu(pos))) + +/** + * hlist_for_each_entry_rcu - iterate over rcu list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + * + * This list-traversal primitive may safely run concurrently with + * the _rcu list-mutation primitives such as hlist_add_head_rcu() + * as long as the traversal is guarded by rcu_read_lock(). + */ +#define hlist_for_each_entry_rcu(pos, head, member) \ + for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\ + typeof(*(pos)), member); \ + pos; \ + pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ + &(pos)->member)), typeof(*(pos)), member)) + +/** + * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing) + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + * + * This list-traversal primitive may safely run concurrently with + * the _rcu list-mutation primitives such as hlist_add_head_rcu() + * as long as the traversal is guarded by rcu_read_lock(). + * + * This is the same as hlist_for_each_entry_rcu() except that it does + * not do any RCU debugging or tracing. + */ +#define hlist_for_each_entry_rcu_notrace(pos, head, member) \ + for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\ + typeof(*(pos)), member); \ + pos; \ + pos = hlist_entry_safe(rcu_dereference_raw_notrace(hlist_next_rcu(\ + &(pos)->member)), typeof(*(pos)), member)) + +/** + * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + * + * This list-traversal primitive may safely run concurrently with + * the _rcu list-mutation primitives such as hlist_add_head_rcu() + * as long as the traversal is guarded by rcu_read_lock(). + */ +#define hlist_for_each_entry_rcu_bh(pos, head, member) \ + for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\ + typeof(*(pos)), member); \ + pos; \ + pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\ + &(pos)->member)), typeof(*(pos)), member)) + +/** + * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point + * @pos: the type * to use as a loop cursor. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_continue_rcu(pos, member) \ + for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ + &(pos)->member)), typeof(*(pos)), member); \ + pos; \ + pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ + &(pos)->member)), typeof(*(pos)), member)) + +/** + * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point + * @pos: the type * to use as a loop cursor. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_continue_rcu_bh(pos, member) \ + for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \ + &(pos)->member)), typeof(*(pos)), member); \ + pos; \ + pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \ + &(pos)->member)), typeof(*(pos)), member)) + +/** + * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point + * @pos: the type * to use as a loop cursor. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_from_rcu(pos, member) \ + for (; pos; \ + pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ + &(pos)->member)), typeof(*(pos)), member)) + +#endif /* __KERNEL__ */ +#endif diff --git a/include/linux/rculist_bl.h b/include/linux/rculist_bl.h new file mode 100644 index 000000000..66e73ec1a --- /dev/null +++ b/include/linux/rculist_bl.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_RCULIST_BL_H +#define _LINUX_RCULIST_BL_H + +/* + * RCU-protected bl list version. See include/linux/list_bl.h. + */ +#include +#include + +static inline void hlist_bl_set_first_rcu(struct hlist_bl_head *h, + struct hlist_bl_node *n) +{ + LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK); + LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) != + LIST_BL_LOCKMASK); + rcu_assign_pointer(h->first, + (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK)); +} + +static inline struct hlist_bl_node *hlist_bl_first_rcu(struct hlist_bl_head *h) +{ + return (struct hlist_bl_node *) + ((unsigned long)rcu_dereference_check(h->first, hlist_bl_is_locked(h)) & ~LIST_BL_LOCKMASK); +} + +/** + * hlist_bl_del_init_rcu - deletes entry from hash list with re-initialization + * @n: the element to delete from the hash list. + * + * Note: hlist_bl_unhashed() on the node returns true after this. It is + * useful for RCU based read lockfree traversal if the writer side + * must know if the list entry is still hashed or already unhashed. + * + * In particular, it means that we can not poison the forward pointers + * that may still be used for walking the hash list and we can only + * zero the pprev pointer so list_unhashed() will return true after + * this. + * + * The caller must take whatever precautions are necessary (such as + * holding appropriate locks) to avoid racing with another + * list-mutation primitive, such as hlist_bl_add_head_rcu() or + * hlist_bl_del_rcu(), running on this same list. However, it is + * perfectly legal to run concurrently with the _rcu list-traversal + * primitives, such as hlist_bl_for_each_entry_rcu(). + */ +static inline void hlist_bl_del_init_rcu(struct hlist_bl_node *n) +{ + if (!hlist_bl_unhashed(n)) { + __hlist_bl_del(n); + n->pprev = NULL; + } +} + +/** + * hlist_bl_del_rcu - deletes entry from hash list without re-initialization + * @n: the element to delete from the hash list. + * + * Note: hlist_bl_unhashed() on entry does not return true after this, + * the entry is in an undefined state. It is useful for RCU based + * lockfree traversal. + * + * In particular, it means that we can not poison the forward + * pointers that may still be used for walking the hash list. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_bl_add_head_rcu() + * or hlist_bl_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_bl_for_each_entry(). + */ +static inline void hlist_bl_del_rcu(struct hlist_bl_node *n) +{ + __hlist_bl_del(n); + n->pprev = LIST_POISON2; +} + +/** + * hlist_bl_add_head_rcu + * @n: the element to add to the hash list. + * @h: the list to add to. + * + * Description: + * Adds the specified element to the specified hlist_bl, + * while permitting racing traversals. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_bl_add_head_rcu() + * or hlist_bl_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_bl_for_each_entry_rcu(), used to prevent memory-consistency + * problems on Alpha CPUs. Regardless of the type of CPU, the + * list-traversal primitive must be guarded by rcu_read_lock(). + */ +static inline void hlist_bl_add_head_rcu(struct hlist_bl_node *n, + struct hlist_bl_head *h) +{ + struct hlist_bl_node *first; + + /* don't need hlist_bl_first_rcu because we're under lock */ + first = hlist_bl_first(h); + + n->next = first; + if (first) + first->pprev = &n->next; + n->pprev = &h->first; + + /* need _rcu because we can have concurrent lock free readers */ + hlist_bl_set_first_rcu(h, n); +} +/** + * hlist_bl_for_each_entry_rcu - iterate over rcu list of given type + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_bl_node to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the hlist_bl_node within the struct. + * + */ +#define hlist_bl_for_each_entry_rcu(tpos, pos, head, member) \ + for (pos = hlist_bl_first_rcu(head); \ + pos && \ + ({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1; }); \ + pos = rcu_dereference_raw(pos->next)) + +#endif diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h new file mode 100644 index 000000000..90f2e2232 --- /dev/null +++ b/include/linux/rculist_nulls.h @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_RCULIST_NULLS_H +#define _LINUX_RCULIST_NULLS_H + +#ifdef __KERNEL__ + +/* + * RCU-protected list version + */ +#include +#include + +/** + * hlist_nulls_del_init_rcu - deletes entry from hash list with re-initialization + * @n: the element to delete from the hash list. + * + * Note: hlist_nulls_unhashed() on the node return true after this. It is + * useful for RCU based read lockfree traversal if the writer side + * must know if the list entry is still hashed or already unhashed. + * + * In particular, it means that we can not poison the forward pointers + * that may still be used for walking the hash list and we can only + * zero the pprev pointer so list_unhashed() will return true after + * this. + * + * The caller must take whatever precautions are necessary (such as + * holding appropriate locks) to avoid racing with another + * list-mutation primitive, such as hlist_nulls_add_head_rcu() or + * hlist_nulls_del_rcu(), running on this same list. However, it is + * perfectly legal to run concurrently with the _rcu list-traversal + * primitives, such as hlist_nulls_for_each_entry_rcu(). + */ +static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n) +{ + if (!hlist_nulls_unhashed(n)) { + __hlist_nulls_del(n); + WRITE_ONCE(n->pprev, NULL); + } +} + +#define hlist_nulls_first_rcu(head) \ + (*((struct hlist_nulls_node __rcu __force **)&(head)->first)) + +#define hlist_nulls_next_rcu(node) \ + (*((struct hlist_nulls_node __rcu __force **)&(node)->next)) + +/** + * hlist_nulls_del_rcu - deletes entry from hash list without re-initialization + * @n: the element to delete from the hash list. + * + * Note: hlist_nulls_unhashed() on entry does not return true after this, + * the entry is in an undefined state. It is useful for RCU based + * lockfree traversal. + * + * In particular, it means that we can not poison the forward + * pointers that may still be used for walking the hash list. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_nulls_add_head_rcu() + * or hlist_nulls_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_nulls_for_each_entry(). + */ +static inline void hlist_nulls_del_rcu(struct hlist_nulls_node *n) +{ + __hlist_nulls_del(n); + WRITE_ONCE(n->pprev, LIST_POISON2); +} + +/** + * hlist_nulls_add_head_rcu + * @n: the element to add to the hash list. + * @h: the list to add to. + * + * Description: + * Adds the specified element to the specified hlist_nulls, + * while permitting racing traversals. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_nulls_add_head_rcu() + * or hlist_nulls_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency + * problems on Alpha CPUs. Regardless of the type of CPU, the + * list-traversal primitive must be guarded by rcu_read_lock(). + */ +static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, + struct hlist_nulls_head *h) +{ + struct hlist_nulls_node *first = h->first; + + n->next = first; + WRITE_ONCE(n->pprev, &h->first); + rcu_assign_pointer(hlist_nulls_first_rcu(h), n); + if (!is_a_nulls(first)) + WRITE_ONCE(first->pprev, &n->next); +} + +/** + * hlist_nulls_add_tail_rcu + * @n: the element to add to the hash list. + * @h: the list to add to. + * + * Description: + * Adds the specified element to the specified hlist_nulls, + * while permitting racing traversals. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_nulls_add_head_rcu() + * or hlist_nulls_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency + * problems on Alpha CPUs. Regardless of the type of CPU, the + * list-traversal primitive must be guarded by rcu_read_lock(). + */ +static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n, + struct hlist_nulls_head *h) +{ + struct hlist_nulls_node *i, *last = NULL; + + /* Note: write side code, so rcu accessors are not needed. */ + for (i = h->first; !is_a_nulls(i); i = i->next) + last = i; + + if (last) { + n->next = last->next; + n->pprev = &last->next; + rcu_assign_pointer(hlist_next_rcu(last), n); + } else { + hlist_nulls_add_head_rcu(n, h); + } +} + +/** + * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_nulls_node to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the hlist_nulls_node within the struct. + * + * The barrier() is needed to make sure compiler doesn't cache first element [1], + * as this loop can be restarted [2] + * [1] Documentation/core-api/atomic_ops.rst around line 114 + * [2] Documentation/RCU/rculist_nulls.txt around line 146 + */ +#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ + for (({barrier();}), \ + pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \ + (!is_a_nulls(pos)) && \ + ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ + pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos))) + +/** + * hlist_nulls_for_each_entry_safe - + * iterate over list of given type safe against removal of list entry + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_nulls_node to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the hlist_nulls_node within the struct. + */ +#define hlist_nulls_for_each_entry_safe(tpos, pos, head, member) \ + for (({barrier();}), \ + pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \ + (!is_a_nulls(pos)) && \ + ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); \ + pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)); 1; });) +#endif +#endif diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h new file mode 100644 index 000000000..68cbe1114 --- /dev/null +++ b/include/linux/rcupdate.h @@ -0,0 +1,886 @@ +/* + * Read-Copy Update mechanism for mutual exclusion + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + * Copyright IBM Corporation, 2001 + * + * Author: Dipankar Sarma + * + * Based on the original work by Paul McKenney + * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. + * Papers: + * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf + * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) + * + * For detailed explanation of Read-Copy Update mechanism see - + * http://lse.sourceforge.net/locking/rcupdate.html + * + */ + +#ifndef __LINUX_RCUPDATE_H +#define __LINUX_RCUPDATE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) +#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) +#define ulong2long(a) (*(long *)(&(a))) + +/* Exported common interfaces */ + +#ifdef CONFIG_PREEMPT_RCU +void call_rcu(struct rcu_head *head, rcu_callback_t func); +#else /* #ifdef CONFIG_PREEMPT_RCU */ +#define call_rcu call_rcu_sched +#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ + +void call_rcu_bh(struct rcu_head *head, rcu_callback_t func); +void call_rcu_sched(struct rcu_head *head, rcu_callback_t func); +void synchronize_sched(void); +void rcu_barrier_tasks(void); + +#ifdef CONFIG_PREEMPT_RCU + +void __rcu_read_lock(void); +void __rcu_read_unlock(void); +void synchronize_rcu(void); + +/* + * Defined as a macro as it is a very low level header included from + * areas that don't even know about current. This gives the rcu_read_lock() + * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other + * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. + */ +#define rcu_preempt_depth() (current->rcu_read_lock_nesting) + +#else /* #ifdef CONFIG_PREEMPT_RCU */ + +static inline void __rcu_read_lock(void) +{ + preempt_disable(); +} + +static inline void __rcu_read_unlock(void) +{ + preempt_enable(); +} + +static inline void synchronize_rcu(void) +{ + synchronize_sched(); +} + +static inline int rcu_preempt_depth(void) +{ + return 0; +} + +#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ + +/* Internal to kernel */ +void rcu_init(void); +extern int rcu_scheduler_active __read_mostly; +void rcu_sched_qs(void); +void rcu_bh_qs(void); +void rcu_check_callbacks(int user); +void rcu_report_dead(unsigned int cpu); +void rcutree_migrate_callbacks(int cpu); + +#ifdef CONFIG_RCU_STALL_COMMON +void rcu_sysrq_start(void); +void rcu_sysrq_end(void); +#else /* #ifdef CONFIG_RCU_STALL_COMMON */ +static inline void rcu_sysrq_start(void) { } +static inline void rcu_sysrq_end(void) { } +#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */ + +#ifdef CONFIG_NO_HZ_FULL +void rcu_user_enter(void); +void rcu_user_exit(void); +#else +static inline void rcu_user_enter(void) { } +static inline void rcu_user_exit(void) { } +#endif /* CONFIG_NO_HZ_FULL */ + +#ifdef CONFIG_RCU_NOCB_CPU +void rcu_init_nohz(void); +#else /* #ifdef CONFIG_RCU_NOCB_CPU */ +static inline void rcu_init_nohz(void) { } +#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ + +/** + * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers + * @a: Code that RCU needs to pay attention to. + * + * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden + * in the inner idle loop, that is, between the rcu_idle_enter() and + * the rcu_idle_exit() -- RCU will happily ignore any such read-side + * critical sections. However, things like powertop need tracepoints + * in the inner idle loop. + * + * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU()) + * will tell RCU that it needs to pay attention, invoke its argument + * (in this example, calling the do_something_with_RCU() function), + * and then tell RCU to go back to ignoring this CPU. It is permissible + * to nest RCU_NONIDLE() wrappers, but not indefinitely (but the limit is + * on the order of a million or so, even on 32-bit systems). It is + * not legal to block within RCU_NONIDLE(), nor is it permissible to + * transfer control either into or out of RCU_NONIDLE()'s statement. + */ +#define RCU_NONIDLE(a) \ + do { \ + rcu_irq_enter_irqson(); \ + do { a; } while (0); \ + rcu_irq_exit_irqson(); \ + } while (0) + +/* + * Note a quasi-voluntary context switch for RCU-tasks's benefit. + * This is a macro rather than an inline function to avoid #include hell. + */ +#ifdef CONFIG_TASKS_RCU +#define rcu_tasks_qs(t) \ + do { \ + if (READ_ONCE((t)->rcu_tasks_holdout)) \ + WRITE_ONCE((t)->rcu_tasks_holdout, false); \ + } while (0) +#define rcu_note_voluntary_context_switch(t) \ + do { \ + rcu_all_qs(); \ + rcu_tasks_qs(t); \ + } while (0) +void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); +void synchronize_rcu_tasks(void); +void exit_tasks_rcu_start(void); +void exit_tasks_rcu_finish(void); +#else /* #ifdef CONFIG_TASKS_RCU */ +#define rcu_tasks_qs(t) do { } while (0) +#define rcu_note_voluntary_context_switch(t) rcu_all_qs() +#define call_rcu_tasks call_rcu_sched +#define synchronize_rcu_tasks synchronize_sched +static inline void exit_tasks_rcu_start(void) { } +static inline void exit_tasks_rcu_finish(void) { } +#endif /* #else #ifdef CONFIG_TASKS_RCU */ + +/** + * cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU + * + * This macro resembles cond_resched(), except that it is defined to + * report potential quiescent states to RCU-tasks even if the cond_resched() + * machinery were to be shut off, as some advocate for PREEMPT kernels. + */ +#define cond_resched_tasks_rcu_qs() \ +do { \ + rcu_tasks_qs(current); \ + cond_resched(); \ +} while (0) + +/* + * Infrastructure to implement the synchronize_() primitives in + * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. + */ + +#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) +#include +#elif defined(CONFIG_TINY_RCU) +#include +#else +#error "Unknown RCU implementation specified to kernel configuration" +#endif + +/* + * The init_rcu_head_on_stack() and destroy_rcu_head_on_stack() calls + * are needed for dynamic initialization and destruction of rcu_head + * on the stack, and init_rcu_head()/destroy_rcu_head() are needed for + * dynamic initialization and destruction of statically allocated rcu_head + * structures. However, rcu_head structures allocated dynamically in the + * heap don't need any initialization. + */ +#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD +void init_rcu_head(struct rcu_head *head); +void destroy_rcu_head(struct rcu_head *head); +void init_rcu_head_on_stack(struct rcu_head *head); +void destroy_rcu_head_on_stack(struct rcu_head *head); +#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ +static inline void init_rcu_head(struct rcu_head *head) { } +static inline void destroy_rcu_head(struct rcu_head *head) { } +static inline void init_rcu_head_on_stack(struct rcu_head *head) { } +static inline void destroy_rcu_head_on_stack(struct rcu_head *head) { } +#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ + +#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) +bool rcu_lockdep_current_cpu_online(void); +#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ +static inline bool rcu_lockdep_current_cpu_online(void) { return true; } +#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + +static inline void rcu_lock_acquire(struct lockdep_map *map) +{ + lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); +} + +static inline void rcu_lock_release(struct lockdep_map *map) +{ + lock_release(map, 1, _THIS_IP_); +} + +extern struct lockdep_map rcu_lock_map; +extern struct lockdep_map rcu_bh_lock_map; +extern struct lockdep_map rcu_sched_lock_map; +extern struct lockdep_map rcu_callback_map; +int debug_lockdep_rcu_enabled(void); +int rcu_read_lock_held(void); +int rcu_read_lock_bh_held(void); +int rcu_read_lock_sched_held(void); + +#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +# define rcu_lock_acquire(a) do { } while (0) +# define rcu_lock_release(a) do { } while (0) + +static inline int rcu_read_lock_held(void) +{ + return 1; +} + +static inline int rcu_read_lock_bh_held(void) +{ + return 1; +} + +static inline int rcu_read_lock_sched_held(void) +{ + return !preemptible(); +} +#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +#ifdef CONFIG_PROVE_RCU + +/** + * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met + * @c: condition to check + * @s: informative message + */ +#define RCU_LOCKDEP_WARN(c, s) \ + do { \ + static bool __section(.data.unlikely) __warned; \ + if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \ + __warned = true; \ + lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ + } \ + } while (0) + +#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU) +static inline void rcu_preempt_sleep_check(void) +{ + RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map), + "Illegal context switch in RCU read-side critical section"); +} +#else /* #ifdef CONFIG_PROVE_RCU */ +static inline void rcu_preempt_sleep_check(void) { } +#endif /* #else #ifdef CONFIG_PROVE_RCU */ + +#define rcu_sleep_check() \ + do { \ + rcu_preempt_sleep_check(); \ + RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \ + "Illegal context switch in RCU-bh read-side critical section"); \ + RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \ + "Illegal context switch in RCU-sched read-side critical section"); \ + } while (0) + +#else /* #ifdef CONFIG_PROVE_RCU */ + +#define RCU_LOCKDEP_WARN(c, s) do { } while (0) +#define rcu_sleep_check() do { } while (0) + +#endif /* #else #ifdef CONFIG_PROVE_RCU */ + +/* + * Helper functions for rcu_dereference_check(), rcu_dereference_protected() + * and rcu_assign_pointer(). Some of these could be folded into their + * callers, but they are left separate in order to ease introduction of + * multiple flavors of pointers to match the multiple flavors of RCU + * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in + * the future. + */ + +#ifdef __CHECKER__ +#define rcu_dereference_sparse(p, space) \ + ((void)(((typeof(*p) space *)p) == p)) +#else /* #ifdef __CHECKER__ */ +#define rcu_dereference_sparse(p, space) +#endif /* #else #ifdef __CHECKER__ */ + +#define __rcu_access_pointer(p, space) \ +({ \ + typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \ + rcu_dereference_sparse(p, space); \ + ((typeof(*p) __force __kernel *)(_________p1)); \ +}) +#define __rcu_dereference_check(p, c, space) \ +({ \ + /* Dependency order vs. p above. */ \ + typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \ + RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \ + rcu_dereference_sparse(p, space); \ + ((typeof(*p) __force __kernel *)(________p1)); \ +}) +#define __rcu_dereference_protected(p, c, space) \ +({ \ + RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \ + rcu_dereference_sparse(p, space); \ + ((typeof(*p) __force __kernel *)(p)); \ +}) +#define rcu_dereference_raw(p) \ +({ \ + /* Dependency order vs. p above. */ \ + typeof(p) ________p1 = READ_ONCE(p); \ + ((typeof(*p) __force __kernel *)(________p1)); \ +}) + +/** + * RCU_INITIALIZER() - statically initialize an RCU-protected global variable + * @v: The value to statically initialize with. + */ +#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) + +/** + * rcu_assign_pointer() - assign to RCU-protected pointer + * @p: pointer to assign to + * @v: value to assign (publish) + * + * Assigns the specified value to the specified RCU-protected + * pointer, ensuring that any concurrent RCU readers will see + * any prior initialization. + * + * Inserts memory barriers on architectures that require them + * (which is most of them), and also prevents the compiler from + * reordering the code that initializes the structure after the pointer + * assignment. More importantly, this call documents which pointers + * will be dereferenced by RCU read-side code. + * + * In some special cases, you may use RCU_INIT_POINTER() instead + * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due + * to the fact that it does not constrain either the CPU or the compiler. + * That said, using RCU_INIT_POINTER() when you should have used + * rcu_assign_pointer() is a very bad thing that results in + * impossible-to-diagnose memory corruption. So please be careful. + * See the RCU_INIT_POINTER() comment header for details. + * + * Note that rcu_assign_pointer() evaluates each of its arguments only + * once, appearances notwithstanding. One of the "extra" evaluations + * is in typeof() and the other visible only to sparse (__CHECKER__), + * neither of which actually execute the argument. As with most cpp + * macros, this execute-arguments-only-once property is important, so + * please be careful when making changes to rcu_assign_pointer() and the + * other macros that it invokes. + */ +#define rcu_assign_pointer(p, v) \ +({ \ + uintptr_t _r_a_p__v = (uintptr_t)(v); \ + \ + if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ + WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ + else \ + smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \ + _r_a_p__v; \ +}) + +/** + * rcu_swap_protected() - swap an RCU and a regular pointer + * @rcu_ptr: RCU pointer + * @ptr: regular pointer + * @c: the conditions under which the dereference will take place + * + * Perform swap(@rcu_ptr, @ptr) where @rcu_ptr is an RCU-annotated pointer and + * @c is the argument that is passed to the rcu_dereference_protected() call + * used to read that pointer. + */ +#define rcu_swap_protected(rcu_ptr, ptr, c) do { \ + typeof(ptr) __tmp = rcu_dereference_protected((rcu_ptr), (c)); \ + rcu_assign_pointer((rcu_ptr), (ptr)); \ + (ptr) = __tmp; \ +} while (0) + +/** + * rcu_access_pointer() - fetch RCU pointer with no dereferencing + * @p: The pointer to read + * + * Return the value of the specified RCU-protected pointer, but omit the + * lockdep checks for being in an RCU read-side critical section. This is + * useful when the value of this pointer is accessed, but the pointer is + * not dereferenced, for example, when testing an RCU-protected pointer + * against NULL. Although rcu_access_pointer() may also be used in cases + * where update-side locks prevent the value of the pointer from changing, + * you should instead use rcu_dereference_protected() for this use case. + * + * It is also permissible to use rcu_access_pointer() when read-side + * access to the pointer was removed at least one grace period ago, as + * is the case in the context of the RCU callback that is freeing up + * the data, or after a synchronize_rcu() returns. This can be useful + * when tearing down multi-linked structures after a grace period + * has elapsed. + */ +#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu) + +/** + * rcu_dereference_check() - rcu_dereference with debug checking + * @p: The pointer to read, prior to dereferencing + * @c: The conditions under which the dereference will take place + * + * Do an rcu_dereference(), but check that the conditions under which the + * dereference will take place are correct. Typically the conditions + * indicate the various locking conditions that should be held at that + * point. The check should return true if the conditions are satisfied. + * An implicit check for being in an RCU read-side critical section + * (rcu_read_lock()) is included. + * + * For example: + * + * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock)); + * + * could be used to indicate to lockdep that foo->bar may only be dereferenced + * if either rcu_read_lock() is held, or that the lock required to replace + * the bar struct at foo->bar is held. + * + * Note that the list of conditions may also include indications of when a lock + * need not be held, for example during initialisation or destruction of the + * target struct: + * + * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) || + * atomic_read(&foo->usage) == 0); + * + * Inserts memory barriers on architectures that require them + * (currently only the Alpha), prevents the compiler from refetching + * (and from merging fetches), and, more importantly, documents exactly + * which pointers are protected by RCU and checks that the pointer is + * annotated as __rcu. + */ +#define rcu_dereference_check(p, c) \ + __rcu_dereference_check((p), (c) || rcu_read_lock_held(), __rcu) + +/** + * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking + * @p: The pointer to read, prior to dereferencing + * @c: The conditions under which the dereference will take place + * + * This is the RCU-bh counterpart to rcu_dereference_check(). + */ +#define rcu_dereference_bh_check(p, c) \ + __rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu) + +/** + * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking + * @p: The pointer to read, prior to dereferencing + * @c: The conditions under which the dereference will take place + * + * This is the RCU-sched counterpart to rcu_dereference_check(). + */ +#define rcu_dereference_sched_check(p, c) \ + __rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \ + __rcu) + +/* + * The tracing infrastructure traces RCU (we want that), but unfortunately + * some of the RCU checks causes tracing to lock up the system. + * + * The no-tracing version of rcu_dereference_raw() must not call + * rcu_read_lock_held(). + */ +#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu) + +/** + * rcu_dereference_protected() - fetch RCU pointer when updates prevented + * @p: The pointer to read, prior to dereferencing + * @c: The conditions under which the dereference will take place + * + * Return the value of the specified RCU-protected pointer, but omit + * the READ_ONCE(). This is useful in cases where update-side locks + * prevent the value of the pointer from changing. Please note that this + * primitive does *not* prevent the compiler from repeating this reference + * or combining it with other references, so it should not be used without + * protection of appropriate locks. + * + * This function is only for update-side use. Using this function + * when protected only by rcu_read_lock() will result in infrequent + * but very ugly failures. + */ +#define rcu_dereference_protected(p, c) \ + __rcu_dereference_protected((p), (c), __rcu) + + +/** + * rcu_dereference() - fetch RCU-protected pointer for dereferencing + * @p: The pointer to read, prior to dereferencing + * + * This is a simple wrapper around rcu_dereference_check(). + */ +#define rcu_dereference(p) rcu_dereference_check(p, 0) + +/** + * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing + * @p: The pointer to read, prior to dereferencing + * + * Makes rcu_dereference_check() do the dirty work. + */ +#define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0) + +/** + * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing + * @p: The pointer to read, prior to dereferencing + * + * Makes rcu_dereference_check() do the dirty work. + */ +#define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0) + +/** + * rcu_pointer_handoff() - Hand off a pointer from RCU to other mechanism + * @p: The pointer to hand off + * + * This is simply an identity function, but it documents where a pointer + * is handed off from RCU to some other synchronization mechanism, for + * example, reference counting or locking. In C11, it would map to + * kill_dependency(). It could be used as follows:: + * + * rcu_read_lock(); + * p = rcu_dereference(gp); + * long_lived = is_long_lived(p); + * if (long_lived) { + * if (!atomic_inc_not_zero(p->refcnt)) + * long_lived = false; + * else + * p = rcu_pointer_handoff(p); + * } + * rcu_read_unlock(); + */ +#define rcu_pointer_handoff(p) (p) + +/** + * rcu_read_lock() - mark the beginning of an RCU read-side critical section + * + * When synchronize_rcu() is invoked on one CPU while other CPUs + * are within RCU read-side critical sections, then the + * synchronize_rcu() is guaranteed to block until after all the other + * CPUs exit their critical sections. Similarly, if call_rcu() is invoked + * on one CPU while other CPUs are within RCU read-side critical + * sections, invocation of the corresponding RCU callback is deferred + * until after the all the other CPUs exit their critical sections. + * + * Note, however, that RCU callbacks are permitted to run concurrently + * with new RCU read-side critical sections. One way that this can happen + * is via the following sequence of events: (1) CPU 0 enters an RCU + * read-side critical section, (2) CPU 1 invokes call_rcu() to register + * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, + * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU + * callback is invoked. This is legal, because the RCU read-side critical + * section that was running concurrently with the call_rcu() (and which + * therefore might be referencing something that the corresponding RCU + * callback would free up) has completed before the corresponding + * RCU callback is invoked. + * + * RCU read-side critical sections may be nested. Any deferred actions + * will be deferred until the outermost RCU read-side critical section + * completes. + * + * You can avoid reading and understanding the next paragraph by + * following this rule: don't put anything in an rcu_read_lock() RCU + * read-side critical section that would block in a !PREEMPT kernel. + * But if you want the full story, read on! + * + * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), + * it is illegal to block while in an RCU read-side critical section. + * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPT + * kernel builds, RCU read-side critical sections may be preempted, + * but explicit blocking is illegal. Finally, in preemptible RCU + * implementations in real-time (with -rt patchset) kernel builds, RCU + * read-side critical sections may be preempted and they may also block, but + * only when acquiring spinlocks that are subject to priority inheritance. + */ +static __always_inline void rcu_read_lock(void) +{ + __rcu_read_lock(); + __acquire(RCU); + rcu_lock_acquire(&rcu_lock_map); + RCU_LOCKDEP_WARN(!rcu_is_watching(), + "rcu_read_lock() used illegally while idle"); +} + +/* + * So where is rcu_write_lock()? It does not exist, as there is no + * way for writers to lock out RCU readers. This is a feature, not + * a bug -- this property is what provides RCU's performance benefits. + * Of course, writers must coordinate with each other. The normal + * spinlock primitives work well for this, but any other technique may be + * used as well. RCU does not care how the writers keep out of each + * others' way, as long as they do so. + */ + +/** + * rcu_read_unlock() - marks the end of an RCU read-side critical section. + * + * In most situations, rcu_read_unlock() is immune from deadlock. + * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock() + * is responsible for deboosting, which it does via rt_mutex_unlock(). + * Unfortunately, this function acquires the scheduler's runqueue and + * priority-inheritance spinlocks. This means that deadlock could result + * if the caller of rcu_read_unlock() already holds one of these locks or + * any lock that is ever acquired while holding them. + * + * That said, RCU readers are never priority boosted unless they were + * preempted. Therefore, one way to avoid deadlock is to make sure + * that preemption never happens within any RCU read-side critical + * section whose outermost rcu_read_unlock() is called with one of + * rt_mutex_unlock()'s locks held. Such preemption can be avoided in + * a number of ways, for example, by invoking preempt_disable() before + * critical section's outermost rcu_read_lock(). + * + * Given that the set of locks acquired by rt_mutex_unlock() might change + * at any time, a somewhat more future-proofed approach is to make sure + * that that preemption never happens within any RCU read-side critical + * section whose outermost rcu_read_unlock() is called with irqs disabled. + * This approach relies on the fact that rt_mutex_unlock() currently only + * acquires irq-disabled locks. + * + * The second of these two approaches is best in most situations, + * however, the first approach can also be useful, at least to those + * developers willing to keep abreast of the set of locks acquired by + * rt_mutex_unlock(). + * + * See rcu_read_lock() for more information. + */ +static inline void rcu_read_unlock(void) +{ + RCU_LOCKDEP_WARN(!rcu_is_watching(), + "rcu_read_unlock() used illegally while idle"); + __release(RCU); + __rcu_read_unlock(); + rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */ +} + +/** + * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section + * + * This is equivalent of rcu_read_lock(), but to be used when updates + * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since + * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a + * softirq handler to be a quiescent state, a process in RCU read-side + * critical section must be protected by disabling softirqs. Read-side + * critical sections in interrupt context can use just rcu_read_lock(), + * though this should at least be commented to avoid confusing people + * reading the code. + * + * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh() + * must occur in the same context, for example, it is illegal to invoke + * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh() + * was invoked from some other task. + */ +static inline void rcu_read_lock_bh(void) +{ + local_bh_disable(); + __acquire(RCU_BH); + rcu_lock_acquire(&rcu_bh_lock_map); + RCU_LOCKDEP_WARN(!rcu_is_watching(), + "rcu_read_lock_bh() used illegally while idle"); +} + +/* + * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section + * + * See rcu_read_lock_bh() for more information. + */ +static inline void rcu_read_unlock_bh(void) +{ + RCU_LOCKDEP_WARN(!rcu_is_watching(), + "rcu_read_unlock_bh() used illegally while idle"); + rcu_lock_release(&rcu_bh_lock_map); + __release(RCU_BH); + local_bh_enable(); +} + +/** + * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section + * + * This is equivalent of rcu_read_lock(), but to be used when updates + * are being done using call_rcu_sched() or synchronize_rcu_sched(). + * Read-side critical sections can also be introduced by anything that + * disables preemption, including local_irq_disable() and friends. + * + * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched() + * must occur in the same context, for example, it is illegal to invoke + * rcu_read_unlock_sched() from process context if the matching + * rcu_read_lock_sched() was invoked from an NMI handler. + */ +static inline void rcu_read_lock_sched(void) +{ + preempt_disable(); + __acquire(RCU_SCHED); + rcu_lock_acquire(&rcu_sched_lock_map); + RCU_LOCKDEP_WARN(!rcu_is_watching(), + "rcu_read_lock_sched() used illegally while idle"); +} + +/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ +static inline notrace void rcu_read_lock_sched_notrace(void) +{ + preempt_disable_notrace(); + __acquire(RCU_SCHED); +} + +/* + * rcu_read_unlock_sched - marks the end of a RCU-classic critical section + * + * See rcu_read_lock_sched for more information. + */ +static inline void rcu_read_unlock_sched(void) +{ + RCU_LOCKDEP_WARN(!rcu_is_watching(), + "rcu_read_unlock_sched() used illegally while idle"); + rcu_lock_release(&rcu_sched_lock_map); + __release(RCU_SCHED); + preempt_enable(); +} + +/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ +static inline notrace void rcu_read_unlock_sched_notrace(void) +{ + __release(RCU_SCHED); + preempt_enable_notrace(); +} + +/** + * RCU_INIT_POINTER() - initialize an RCU protected pointer + * @p: The pointer to be initialized. + * @v: The value to initialized the pointer to. + * + * Initialize an RCU-protected pointer in special cases where readers + * do not need ordering constraints on the CPU or the compiler. These + * special cases are: + * + * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer *or* + * 2. The caller has taken whatever steps are required to prevent + * RCU readers from concurrently accessing this pointer *or* + * 3. The referenced data structure has already been exposed to + * readers either at compile time or via rcu_assign_pointer() *and* + * + * a. You have not made *any* reader-visible changes to + * this structure since then *or* + * b. It is OK for readers accessing this structure from its + * new location to see the old state of the structure. (For + * example, the changes were to statistical counters or to + * other state where exact synchronization is not required.) + * + * Failure to follow these rules governing use of RCU_INIT_POINTER() will + * result in impossible-to-diagnose memory corruption. As in the structures + * will look OK in crash dumps, but any concurrent RCU readers might + * see pre-initialized values of the referenced data structure. So + * please be very careful how you use RCU_INIT_POINTER()!!! + * + * If you are creating an RCU-protected linked structure that is accessed + * by a single external-to-structure RCU-protected pointer, then you may + * use RCU_INIT_POINTER() to initialize the internal RCU-protected + * pointers, but you must use rcu_assign_pointer() to initialize the + * external-to-structure pointer *after* you have completely initialized + * the reader-accessible portions of the linked structure. + * + * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no + * ordering guarantees for either the CPU or the compiler. + */ +#define RCU_INIT_POINTER(p, v) \ + do { \ + rcu_dereference_sparse(p, __rcu); \ + WRITE_ONCE(p, RCU_INITIALIZER(v)); \ + } while (0) + +/** + * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer + * @p: The pointer to be initialized. + * @v: The value to initialized the pointer to. + * + * GCC-style initialization for an RCU-protected pointer in a structure field. + */ +#define RCU_POINTER_INITIALIZER(p, v) \ + .p = RCU_INITIALIZER(v) + +/* + * Does the specified offset indicate that the corresponding rcu_head + * structure can be handled by kfree_rcu()? + */ +#define __is_kfree_rcu_offset(offset) ((offset) < 4096) + +/* + * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain. + */ +#define __kfree_rcu(head, offset) \ + do { \ + BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \ + kfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \ + } while (0) + +/** + * kfree_rcu() - kfree an object after a grace period. + * @ptr: pointer to kfree + * @rcu_head: the name of the struct rcu_head within the type of @ptr. + * + * Many rcu callbacks functions just call kfree() on the base structure. + * These functions are trivial, but their size adds up, and furthermore + * when they are used in a kernel module, that module must invoke the + * high-latency rcu_barrier() function at module-unload time. + * + * The kfree_rcu() function handles this issue. Rather than encoding a + * function address in the embedded rcu_head structure, kfree_rcu() instead + * encodes the offset of the rcu_head structure within the base structure. + * Because the functions are not allowed in the low-order 4096 bytes of + * kernel virtual memory, offsets up to 4095 bytes can be accommodated. + * If the offset is larger than 4095 bytes, a compile-time error will + * be generated in __kfree_rcu(). If this error is triggered, you can + * either fall back to use of call_rcu() or rearrange the structure to + * position the rcu_head structure into the first 4096 bytes. + * + * Note that the allowable offset might decrease in the future, for example, + * to allow something like kmem_cache_free_rcu(). + * + * The BUILD_BUG_ON check must not involve any function calls, hence the + * checks are done in macros here. + */ +#define kfree_rcu(ptr, rcu_head) \ + __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) + + +/* + * Place this after a lock-acquisition primitive to guarantee that + * an UNLOCK+LOCK pair acts as a full barrier. This guarantee applies + * if the UNLOCK and LOCK are executed by the same CPU or if the + * UNLOCK and LOCK operate on the same lock variable. + */ +#ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE +#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */ +#else /* #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */ +#define smp_mb__after_unlock_lock() do { } while (0) +#endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */ + + +#endif /* __LINUX_RCUPDATE_H */ diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h new file mode 100644 index 000000000..57f371344 --- /dev/null +++ b/include/linux/rcupdate_wait.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_RCUPDATE_WAIT_H +#define _LINUX_SCHED_RCUPDATE_WAIT_H + +/* + * RCU synchronization types and methods: + */ + +#include +#include + +/* + * Structure allowing asynchronous waiting on RCU. + */ +struct rcu_synchronize { + struct rcu_head head; + struct completion completion; +}; +void wakeme_after_rcu(struct rcu_head *head); + +void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, + struct rcu_synchronize *rs_array); + +#define _wait_rcu_gp(checktiny, ...) \ +do { \ + call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ + struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ + __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \ + __crcu_array, __rs_array); \ +} while (0) + +#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) + +/** + * synchronize_rcu_mult - Wait concurrently for multiple grace periods + * @...: List of call_rcu() functions for the flavors to wait on. + * + * This macro waits concurrently for multiple flavors of RCU grace periods. + * For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait + * on concurrent RCU and RCU-bh grace periods. Waiting on a give SRCU + * domain requires you to write a wrapper function for that SRCU domain's + * call_srcu() function, supplying the corresponding srcu_struct. + * + * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU + * or RCU-bh, given that anywhere synchronize_rcu_mult() can be called + * is automatically a grace period. + */ +#define synchronize_rcu_mult(...) \ + _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__) + +#endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */ diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h new file mode 100644 index 000000000..8d9a0ea8f --- /dev/null +++ b/include/linux/rcutiny.h @@ -0,0 +1,137 @@ +/* + * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + * Copyright IBM Corporation, 2008 + * + * Author: Paul E. McKenney + * + * For detailed explanation of Read-Copy Update mechanism see - + * Documentation/RCU + */ +#ifndef __LINUX_TINY_H +#define __LINUX_TINY_H + +#include + +struct rcu_dynticks; +static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp) +{ + return 0; +} + +/* Never flag non-existent other CPUs! */ +static inline bool rcu_eqs_special_set(int cpu) { return false; } + +static inline unsigned long get_state_synchronize_rcu(void) +{ + return 0; +} + +static inline void cond_synchronize_rcu(unsigned long oldstate) +{ + might_sleep(); +} + +static inline unsigned long get_state_synchronize_sched(void) +{ + return 0; +} + +static inline void cond_synchronize_sched(unsigned long oldstate) +{ + might_sleep(); +} + +extern void rcu_barrier_bh(void); +extern void rcu_barrier_sched(void); + +static inline void synchronize_rcu_expedited(void) +{ + synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ +} + +static inline void rcu_barrier(void) +{ + rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ +} + +static inline void synchronize_rcu_bh(void) +{ + synchronize_sched(); +} + +static inline void synchronize_rcu_bh_expedited(void) +{ + synchronize_sched(); +} + +static inline void synchronize_sched_expedited(void) +{ + synchronize_sched(); +} + +static inline void kfree_call_rcu(struct rcu_head *head, + rcu_callback_t func) +{ + call_rcu(head, func); +} + +#define rcu_note_context_switch(preempt) \ + do { \ + rcu_sched_qs(); \ + rcu_tasks_qs(current); \ + } while (0) + +static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) +{ + *nextevt = KTIME_MAX; + return 0; +} + +/* + * Take advantage of the fact that there is only one CPU, which + * allows us to ignore virtualization-based context switches. + */ +static inline void rcu_virt_note_context_switch(int cpu) { } +static inline void rcu_cpu_stall_reset(void) { } +static inline void rcu_idle_enter(void) { } +static inline void rcu_idle_exit(void) { } +static inline void rcu_irq_enter(void) { } +static inline void rcu_irq_exit_irqson(void) { } +static inline void rcu_irq_enter_irqson(void) { } +static inline void rcu_irq_exit(void) { } +static inline void exit_rcu(void) { } +#ifdef CONFIG_SRCU +void rcu_scheduler_starting(void); +#else /* #ifndef CONFIG_SRCU */ +static inline void rcu_scheduler_starting(void) { } +#endif /* #else #ifndef CONFIG_SRCU */ +static inline void rcu_end_inkernel_boot(void) { } +static inline bool rcu_is_watching(void) { return true; } + +/* Avoid RCU read-side critical sections leaking across. */ +static inline void rcu_all_qs(void) { barrier(); } + +/* RCUtree hotplug events */ +#define rcutree_prepare_cpu NULL +#define rcutree_online_cpu NULL +#define rcutree_offline_cpu NULL +#define rcutree_dead_cpu NULL +#define rcutree_dying_cpu NULL +static inline void rcu_cpu_starting(unsigned int cpu) { } + +#endif /* __LINUX_RCUTINY_H */ diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h new file mode 100644 index 000000000..914655848 --- /dev/null +++ b/include/linux/rcutree.h @@ -0,0 +1,106 @@ +/* + * Read-Copy Update mechanism for mutual exclusion (tree-based version) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + * Copyright IBM Corporation, 2008 + * + * Author: Dipankar Sarma + * Paul E. McKenney Hierarchical algorithm + * + * Based on the original work by Paul McKenney + * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. + * + * For detailed explanation of Read-Copy Update mechanism see - + * Documentation/RCU + */ + +#ifndef __LINUX_RCUTREE_H +#define __LINUX_RCUTREE_H + +void rcu_note_context_switch(bool preempt); +int rcu_needs_cpu(u64 basem, u64 *nextevt); +void rcu_cpu_stall_reset(void); + +/* + * Note a virtualization-based context switch. This is simply a + * wrapper around rcu_note_context_switch(), which allows TINY_RCU + * to save a few bytes. The caller must have disabled interrupts. + */ +static inline void rcu_virt_note_context_switch(int cpu) +{ + rcu_note_context_switch(false); +} + +void synchronize_rcu_bh(void); +void synchronize_sched_expedited(void); +void synchronize_rcu_expedited(void); + +void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func); + +/** + * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period + * + * Wait for an RCU-bh grace period to elapse, but use a "big hammer" + * approach to force the grace period to end quickly. This consumes + * significant time on all CPUs and is unfriendly to real-time workloads, + * so is thus not recommended for any sort of common-case code. In fact, + * if you are using synchronize_rcu_bh_expedited() in a loop, please + * restructure your code to batch your updates, and then use a single + * synchronize_rcu_bh() instead. + * + * Note that it is illegal to call this function while holding any lock + * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal + * to call this function from a CPU-hotplug notifier. Failing to observe + * these restriction will result in deadlock. + */ +static inline void synchronize_rcu_bh_expedited(void) +{ + synchronize_sched_expedited(); +} + +void rcu_barrier(void); +void rcu_barrier_bh(void); +void rcu_barrier_sched(void); +bool rcu_eqs_special_set(int cpu); +unsigned long get_state_synchronize_rcu(void); +void cond_synchronize_rcu(unsigned long oldstate); +unsigned long get_state_synchronize_sched(void); +void cond_synchronize_sched(unsigned long oldstate); + +void rcu_idle_enter(void); +void rcu_idle_exit(void); +void rcu_irq_enter(void); +void rcu_irq_exit(void); +void rcu_irq_enter_irqson(void); +void rcu_irq_exit_irqson(void); + +void exit_rcu(void); + +void rcu_scheduler_starting(void); +extern int rcu_scheduler_active __read_mostly; +void rcu_end_inkernel_boot(void); +bool rcu_is_watching(void); +void rcu_all_qs(void); + +/* RCUtree hotplug events */ +int rcutree_prepare_cpu(unsigned int cpu); +int rcutree_online_cpu(unsigned int cpu); +int rcutree_offline_cpu(unsigned int cpu); +int rcutree_dead_cpu(unsigned int cpu); +int rcutree_dying_cpu(unsigned int cpu); +void rcu_cpu_starting(unsigned int cpu); + +#endif /* __LINUX_RCUTREE_H */ diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h new file mode 100644 index 000000000..90bfa3279 --- /dev/null +++ b/include/linux/rcuwait.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_RCUWAIT_H_ +#define _LINUX_RCUWAIT_H_ + +#include + +/* + * rcuwait provides a way of blocking and waking up a single + * task in an rcu-safe manner; where it is forbidden to use + * after exit_notify(). task_struct is not properly rcu protected, + * unless dealing with rcu-aware lists, ie: find_task_by_*(). + * + * Alternatively we have task_rcu_dereference(), but the return + * semantics have different implications which would break the + * wakeup side. The only time @task is non-nil is when a user is + * blocked (or checking if it needs to) on a condition, and reset + * as soon as we know that the condition has succeeded and are + * awoken. + */ +struct rcuwait { + struct task_struct *task; +}; + +#define __RCUWAIT_INITIALIZER(name) \ + { .task = NULL, } + +static inline void rcuwait_init(struct rcuwait *w) +{ + w->task = NULL; +} + +extern void rcuwait_wake_up(struct rcuwait *w); + +/* + * The caller is responsible for locking around rcuwait_wait_event(), + * such that writes to @task are properly serialized. + */ +#define rcuwait_wait_event(w, condition) \ +({ \ + /* \ + * Complain if we are called after do_exit()/exit_notify(), \ + * as we cannot rely on the rcu critical region for the \ + * wakeup side. \ + */ \ + WARN_ON(current->exit_state); \ + \ + rcu_assign_pointer((w)->task, current); \ + for (;;) { \ + /* \ + * Implicit barrier (A) pairs with (B) in \ + * rcuwait_wake_up(). \ + */ \ + set_current_state(TASK_UNINTERRUPTIBLE); \ + if (condition) \ + break; \ + \ + schedule(); \ + } \ + \ + WRITE_ONCE((w)->task, NULL); \ + __set_current_state(TASK_RUNNING); \ +}) + +#endif /* _LINUX_RCUWAIT_H_ */ diff --git a/include/linux/reboot-mode.h b/include/linux/reboot-mode.h new file mode 100644 index 000000000..4a2abb38d --- /dev/null +++ b/include/linux/reboot-mode.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __REBOOT_MODE_H__ +#define __REBOOT_MODE_H__ + +struct reboot_mode_driver { + struct device *dev; + struct list_head head; + int (*write)(struct reboot_mode_driver *reboot, unsigned int magic); + struct notifier_block reboot_notifier; +}; + +int reboot_mode_register(struct reboot_mode_driver *reboot); +int reboot_mode_unregister(struct reboot_mode_driver *reboot); +int devm_reboot_mode_register(struct device *dev, + struct reboot_mode_driver *reboot); +void devm_reboot_mode_unregister(struct device *dev, + struct reboot_mode_driver *reboot); + +#endif diff --git a/include/linux/reboot.h b/include/linux/reboot.h new file mode 100644 index 000000000..e63799a6e --- /dev/null +++ b/include/linux/reboot.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_REBOOT_H +#define _LINUX_REBOOT_H + + +#include +#include + +struct device; + +#define SYS_DOWN 0x0001 /* Notify of system down */ +#define SYS_RESTART SYS_DOWN +#define SYS_HALT 0x0002 /* Notify of system halt */ +#define SYS_POWER_OFF 0x0003 /* Notify of system power off */ + +enum reboot_mode { + REBOOT_COLD = 0, + REBOOT_WARM, + REBOOT_HARD, + REBOOT_SOFT, + REBOOT_GPIO, +}; +extern enum reboot_mode reboot_mode; + +enum reboot_type { + BOOT_TRIPLE = 't', + BOOT_KBD = 'k', + BOOT_BIOS = 'b', + BOOT_ACPI = 'a', + BOOT_EFI = 'e', + BOOT_CF9_FORCE = 'p', + BOOT_CF9_SAFE = 'q', +}; +extern enum reboot_type reboot_type; + +extern int reboot_default; +extern int reboot_cpu; +extern int reboot_force; + + +extern int register_reboot_notifier(struct notifier_block *); +extern int unregister_reboot_notifier(struct notifier_block *); + +extern int devm_register_reboot_notifier(struct device *, struct notifier_block *); + +extern int register_restart_handler(struct notifier_block *); +extern int unregister_restart_handler(struct notifier_block *); +extern void do_kernel_restart(char *cmd); + +/* + * Architecture-specific implementations of sys_reboot commands. + */ + +extern void migrate_to_reboot_cpu(void); +extern void machine_restart(char *cmd); +extern void machine_halt(void); +extern void machine_power_off(void); + +extern void machine_shutdown(void); +struct pt_regs; +extern void machine_crash_shutdown(struct pt_regs *); + +/* + * Architecture independent implemenations of sys_reboot commands. + */ + +extern void kernel_restart_prepare(char *cmd); +extern void kernel_restart(char *cmd); +extern void kernel_halt(void); +extern void kernel_power_off(void); + +extern int C_A_D; /* for sysctl */ +void ctrl_alt_del(void); + +#define POWEROFF_CMD_PATH_LEN 256 +extern char poweroff_cmd[POWEROFF_CMD_PATH_LEN]; + +extern void orderly_poweroff(bool force); +extern void orderly_reboot(void); + +/* + * Emergency restart, callable from an interrupt handler. + */ + +extern void emergency_restart(void); +#include + +#endif /* _LINUX_REBOOT_H */ diff --git a/include/linux/reciprocal_div.h b/include/linux/reciprocal_div.h new file mode 100644 index 000000000..585ce89c0 --- /dev/null +++ b/include/linux/reciprocal_div.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_RECIPROCAL_DIV_H +#define _LINUX_RECIPROCAL_DIV_H + +#include + +/* + * This algorithm is based on the paper "Division by Invariant + * Integers Using Multiplication" by Torbjörn Granlund and Peter + * L. Montgomery. + * + * The assembler implementation from Agner Fog, which this code is + * based on, can be found here: + * http://www.agner.org/optimize/asmlib.zip + * + * This optimization for A/B is helpful if the divisor B is mostly + * runtime invariant. The reciprocal of B is calculated in the + * slow-path with reciprocal_value(). The fast-path can then just use + * a much faster multiplication operation with a variable dividend A + * to calculate the division A/B. + */ + +struct reciprocal_value { + u32 m; + u8 sh1, sh2; +}; + +/* "reciprocal_value" and "reciprocal_divide" together implement the basic + * version of the algorithm described in Figure 4.1 of the paper. + */ +struct reciprocal_value reciprocal_value(u32 d); + +static inline u32 reciprocal_divide(u32 a, struct reciprocal_value R) +{ + u32 t = (u32)(((u64)a * R.m) >> 32); + return (t + ((a - t) >> R.sh1)) >> R.sh2; +} + +struct reciprocal_value_adv { + u32 m; + u8 sh, exp; + bool is_wide_m; +}; + +/* "reciprocal_value_adv" implements the advanced version of the algorithm + * described in Figure 4.2 of the paper except when "divisor > (1U << 31)" whose + * ceil(log2(d)) result will be 32 which then requires u128 divide on host. The + * exception case could be easily handled before calling "reciprocal_value_adv". + * + * The advanced version requires more complex calculation to get the reciprocal + * multiplier and other control variables, but then could reduce the required + * emulation operations. + * + * It makes no sense to use this advanced version for host divide emulation, + * those extra complexities for calculating multiplier etc could completely + * waive our saving on emulation operations. + * + * However, it makes sense to use it for JIT divide code generation for which + * we are willing to trade performance of JITed code with that of host. As shown + * by the following pseudo code, the required emulation operations could go down + * from 6 (the basic version) to 3 or 4. + * + * To use the result of "reciprocal_value_adv", suppose we want to calculate + * n/d, the pseudo C code will be: + * + * struct reciprocal_value_adv rvalue; + * u8 pre_shift, exp; + * + * // handle exception case. + * if (d >= (1U << 31)) { + * result = n >= d; + * return; + * } + * + * rvalue = reciprocal_value_adv(d, 32) + * exp = rvalue.exp; + * if (rvalue.is_wide_m && !(d & 1)) { + * // floor(log2(d & (2^32 -d))) + * pre_shift = fls(d & -d) - 1; + * rvalue = reciprocal_value_adv(d >> pre_shift, 32 - pre_shift); + * } else { + * pre_shift = 0; + * } + * + * // code generation starts. + * if (imm == 1U << exp) { + * result = n >> exp; + * } else if (rvalue.is_wide_m) { + * // pre_shift must be zero when reached here. + * t = (n * rvalue.m) >> 32; + * result = n - t; + * result >>= 1; + * result += t; + * result >>= rvalue.sh - 1; + * } else { + * if (pre_shift) + * result = n >> pre_shift; + * result = ((u64)result * rvalue.m) >> 32; + * result >>= rvalue.sh; + * } + */ +struct reciprocal_value_adv reciprocal_value_adv(u32 d, u8 prec); + +#endif /* _LINUX_RECIPROCAL_DIV_H */ diff --git a/include/linux/refcount.h b/include/linux/refcount.h new file mode 100644 index 000000000..e28cce21b --- /dev/null +++ b/include/linux/refcount.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_REFCOUNT_H +#define _LINUX_REFCOUNT_H + +#include +#include +#include + +struct mutex; + +/** + * struct refcount_t - variant of atomic_t specialized for reference counts + * @refs: atomic_t counter field + * + * The counter saturates at UINT_MAX and will not move once + * there. This avoids wrapping the counter and causing 'spurious' + * use-after-free bugs. + */ +typedef struct refcount_struct { + atomic_t refs; +} refcount_t; + +#define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), } + +/** + * refcount_set - set a refcount's value + * @r: the refcount + * @n: value to which the refcount will be set + */ +static inline void refcount_set(refcount_t *r, unsigned int n) +{ + atomic_set(&r->refs, n); +} + +/** + * refcount_read - get a refcount's value + * @r: the refcount + * + * Return: the refcount's value + */ +static inline unsigned int refcount_read(const refcount_t *r) +{ + return atomic_read(&r->refs); +} + +extern __must_check bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r); +extern void refcount_add_checked(unsigned int i, refcount_t *r); + +extern __must_check bool refcount_inc_not_zero_checked(refcount_t *r); +extern void refcount_inc_checked(refcount_t *r); + +extern __must_check bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r); + +extern __must_check bool refcount_dec_and_test_checked(refcount_t *r); +extern void refcount_dec_checked(refcount_t *r); + +#ifdef CONFIG_REFCOUNT_FULL + +#define refcount_add_not_zero refcount_add_not_zero_checked +#define refcount_add refcount_add_checked + +#define refcount_inc_not_zero refcount_inc_not_zero_checked +#define refcount_inc refcount_inc_checked + +#define refcount_sub_and_test refcount_sub_and_test_checked + +#define refcount_dec_and_test refcount_dec_and_test_checked +#define refcount_dec refcount_dec_checked + +#else +# ifdef CONFIG_ARCH_HAS_REFCOUNT +# include +# else +static inline __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r) +{ + return atomic_add_unless(&r->refs, i, 0); +} + +static inline void refcount_add(unsigned int i, refcount_t *r) +{ + atomic_add(i, &r->refs); +} + +static inline __must_check bool refcount_inc_not_zero(refcount_t *r) +{ + return atomic_add_unless(&r->refs, 1, 0); +} + +static inline void refcount_inc(refcount_t *r) +{ + atomic_inc(&r->refs); +} + +static inline __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r) +{ + return atomic_sub_and_test(i, &r->refs); +} + +static inline __must_check bool refcount_dec_and_test(refcount_t *r) +{ + return atomic_dec_and_test(&r->refs); +} + +static inline void refcount_dec(refcount_t *r) +{ + atomic_dec(&r->refs); +} +# endif /* !CONFIG_ARCH_HAS_REFCOUNT */ +#endif /* CONFIG_REFCOUNT_FULL */ + +extern __must_check bool refcount_dec_if_one(refcount_t *r); +extern __must_check bool refcount_dec_not_one(refcount_t *r); +extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock); +extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock); +extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r, + spinlock_t *lock, + unsigned long *flags); +#endif /* _LINUX_REFCOUNT_H */ diff --git a/include/linux/regmap.h b/include/linux/regmap.h new file mode 100644 index 000000000..379505a53 --- /dev/null +++ b/include/linux/regmap.h @@ -0,0 +1,1372 @@ +#ifndef __LINUX_REGMAP_H +#define __LINUX_REGMAP_H + +/* + * Register map access API + * + * Copyright 2011 Wolfson Microelectronics plc + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +struct module; +struct clk; +struct device; +struct i2c_client; +struct irq_domain; +struct slim_device; +struct spi_device; +struct spmi_device; +struct regmap; +struct regmap_range_cfg; +struct regmap_field; +struct snd_ac97; +struct sdw_slave; + +/* An enum of all the supported cache types */ +enum regcache_type { + REGCACHE_NONE, + REGCACHE_RBTREE, + REGCACHE_COMPRESSED, + REGCACHE_FLAT, +}; + +/** + * struct reg_default - Default value for a register. + * + * @reg: Register address. + * @def: Register default value. + * + * We use an array of structs rather than a simple array as many modern devices + * have very sparse register maps. + */ +struct reg_default { + unsigned int reg; + unsigned int def; +}; + +/** + * struct reg_sequence - An individual write from a sequence of writes. + * + * @reg: Register address. + * @def: Register value. + * @delay_us: Delay to be applied after the register write in microseconds + * + * Register/value pairs for sequences of writes with an optional delay in + * microseconds to be applied after each write. + */ +struct reg_sequence { + unsigned int reg; + unsigned int def; + unsigned int delay_us; +}; + +#define regmap_update_bits(map, reg, mask, val) \ + regmap_update_bits_base(map, reg, mask, val, NULL, false, false) +#define regmap_update_bits_async(map, reg, mask, val)\ + regmap_update_bits_base(map, reg, mask, val, NULL, true, false) +#define regmap_update_bits_check(map, reg, mask, val, change)\ + regmap_update_bits_base(map, reg, mask, val, change, false, false) +#define regmap_update_bits_check_async(map, reg, mask, val, change)\ + regmap_update_bits_base(map, reg, mask, val, change, true, false) + +#define regmap_write_bits(map, reg, mask, val) \ + regmap_update_bits_base(map, reg, mask, val, NULL, false, true) + +#define regmap_field_write(field, val) \ + regmap_field_update_bits_base(field, ~0, val, NULL, false, false) +#define regmap_field_force_write(field, val) \ + regmap_field_update_bits_base(field, ~0, val, NULL, false, true) +#define regmap_field_update_bits(field, mask, val)\ + regmap_field_update_bits_base(field, mask, val, NULL, false, false) +#define regmap_field_force_update_bits(field, mask, val) \ + regmap_field_update_bits_base(field, mask, val, NULL, false, true) + +#define regmap_fields_write(field, id, val) \ + regmap_fields_update_bits_base(field, id, ~0, val, NULL, false, false) +#define regmap_fields_force_write(field, id, val) \ + regmap_fields_update_bits_base(field, id, ~0, val, NULL, false, true) +#define regmap_fields_update_bits(field, id, mask, val)\ + regmap_fields_update_bits_base(field, id, mask, val, NULL, false, false) +#define regmap_fields_force_update_bits(field, id, mask, val) \ + regmap_fields_update_bits_base(field, id, mask, val, NULL, false, true) + +/** + * regmap_read_poll_timeout - Poll until a condition is met or a timeout occurs + * + * @map: Regmap to read from + * @addr: Address to poll + * @val: Unsigned integer variable to read the value into + * @cond: Break condition (usually involving @val) + * @sleep_us: Maximum time to sleep between reads in us (0 + * tight-loops). Should be less than ~20ms since usleep_range + * is used (see Documentation/timers/timers-howto.txt). + * @timeout_us: Timeout in us, 0 means never timeout + * + * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_read + * error return value in case of a error read. In the two former cases, + * the last read value at @addr is stored in @val. Must not be called + * from atomic context if sleep_us or timeout_us are used. + * + * This is modelled after the readx_poll_timeout macros in linux/iopoll.h. + */ +#define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \ +({ \ + u64 __timeout_us = (timeout_us); \ + unsigned long __sleep_us = (sleep_us); \ + ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ + int __ret; \ + might_sleep_if(__sleep_us); \ + for (;;) { \ + __ret = regmap_read((map), (addr), &(val)); \ + if (__ret) \ + break; \ + if (cond) \ + break; \ + if ((__timeout_us) && \ + ktime_compare(ktime_get(), __timeout) > 0) { \ + __ret = regmap_read((map), (addr), &(val)); \ + break; \ + } \ + if (__sleep_us) \ + usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ + } \ + __ret ?: ((cond) ? 0 : -ETIMEDOUT); \ +}) + +/** + * regmap_field_read_poll_timeout - Poll until a condition is met or timeout + * + * @field: Regmap field to read from + * @val: Unsigned integer variable to read the value into + * @cond: Break condition (usually involving @val) + * @sleep_us: Maximum time to sleep between reads in us (0 + * tight-loops). Should be less than ~20ms since usleep_range + * is used (see Documentation/timers/timers-howto.txt). + * @timeout_us: Timeout in us, 0 means never timeout + * + * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_field_read + * error return value in case of a error read. In the two former cases, + * the last read value at @addr is stored in @val. Must not be called + * from atomic context if sleep_us or timeout_us are used. + * + * This is modelled after the readx_poll_timeout macros in linux/iopoll.h. + */ +#define regmap_field_read_poll_timeout(field, val, cond, sleep_us, timeout_us) \ +({ \ + u64 __timeout_us = (timeout_us); \ + unsigned long __sleep_us = (sleep_us); \ + ktime_t timeout = ktime_add_us(ktime_get(), __timeout_us); \ + int pollret; \ + might_sleep_if(__sleep_us); \ + for (;;) { \ + pollret = regmap_field_read((field), &(val)); \ + if (pollret) \ + break; \ + if (cond) \ + break; \ + if (__timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \ + pollret = regmap_field_read((field), &(val)); \ + break; \ + } \ + if (__sleep_us) \ + usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ + } \ + pollret ?: ((cond) ? 0 : -ETIMEDOUT); \ +}) + +#ifdef CONFIG_REGMAP + +enum regmap_endian { + /* Unspecified -> 0 -> Backwards compatible default */ + REGMAP_ENDIAN_DEFAULT = 0, + REGMAP_ENDIAN_BIG, + REGMAP_ENDIAN_LITTLE, + REGMAP_ENDIAN_NATIVE, +}; + +/** + * struct regmap_range - A register range, used for access related checks + * (readable/writeable/volatile/precious checks) + * + * @range_min: address of first register + * @range_max: address of last register + */ +struct regmap_range { + unsigned int range_min; + unsigned int range_max; +}; + +#define regmap_reg_range(low, high) { .range_min = low, .range_max = high, } + +/** + * struct regmap_access_table - A table of register ranges for access checks + * + * @yes_ranges : pointer to an array of regmap ranges used as "yes ranges" + * @n_yes_ranges: size of the above array + * @no_ranges: pointer to an array of regmap ranges used as "no ranges" + * @n_no_ranges: size of the above array + * + * A table of ranges including some yes ranges and some no ranges. + * If a register belongs to a no_range, the corresponding check function + * will return false. If a register belongs to a yes range, the corresponding + * check function will return true. "no_ranges" are searched first. + */ +struct regmap_access_table { + const struct regmap_range *yes_ranges; + unsigned int n_yes_ranges; + const struct regmap_range *no_ranges; + unsigned int n_no_ranges; +}; + +typedef void (*regmap_lock)(void *); +typedef void (*regmap_unlock)(void *); + +/** + * struct regmap_config - Configuration for the register map of a device. + * + * @name: Optional name of the regmap. Useful when a device has multiple + * register regions. + * + * @reg_bits: Number of bits in a register address, mandatory. + * @reg_stride: The register address stride. Valid register addresses are a + * multiple of this value. If set to 0, a value of 1 will be + * used. + * @pad_bits: Number of bits of padding between register and value. + * @val_bits: Number of bits in a register value, mandatory. + * + * @writeable_reg: Optional callback returning true if the register + * can be written to. If this field is NULL but wr_table + * (see below) is not, the check is performed on such table + * (a register is writeable if it belongs to one of the ranges + * specified by wr_table). + * @readable_reg: Optional callback returning true if the register + * can be read from. If this field is NULL but rd_table + * (see below) is not, the check is performed on such table + * (a register is readable if it belongs to one of the ranges + * specified by rd_table). + * @volatile_reg: Optional callback returning true if the register + * value can't be cached. If this field is NULL but + * volatile_table (see below) is not, the check is performed on + * such table (a register is volatile if it belongs to one of + * the ranges specified by volatile_table). + * @precious_reg: Optional callback returning true if the register + * should not be read outside of a call from the driver + * (e.g., a clear on read interrupt status register). If this + * field is NULL but precious_table (see below) is not, the + * check is performed on such table (a register is precious if + * it belongs to one of the ranges specified by precious_table). + * @readable_noinc_reg: Optional callback returning true if the register + * supports multiple read operations without incrementing + * the register number. If this field is NULL but + * rd_noinc_table (see below) is not, the check is + * performed on such table (a register is no increment + * readable if it belongs to one of the ranges specified + * by rd_noinc_table). + * @disable_locking: This regmap is either protected by external means or + * is guaranteed not be be accessed from multiple threads. + * Don't use any locking mechanisms. + * @lock: Optional lock callback (overrides regmap's default lock + * function, based on spinlock or mutex). + * @unlock: As above for unlocking. + * @lock_arg: this field is passed as the only argument of lock/unlock + * functions (ignored in case regular lock/unlock functions + * are not overridden). + * @reg_read: Optional callback that if filled will be used to perform + * all the reads from the registers. Should only be provided for + * devices whose read operation cannot be represented as a simple + * read operation on a bus such as SPI, I2C, etc. Most of the + * devices do not need this. + * @reg_write: Same as above for writing. + * @fast_io: Register IO is fast. Use a spinlock instead of a mutex + * to perform locking. This field is ignored if custom lock/unlock + * functions are used (see fields lock/unlock of struct regmap_config). + * This field is a duplicate of a similar file in + * 'struct regmap_bus' and serves exact same purpose. + * Use it only for "no-bus" cases. + * @max_register: Optional, specifies the maximum valid register address. + * @wr_table: Optional, points to a struct regmap_access_table specifying + * valid ranges for write access. + * @rd_table: As above, for read access. + * @volatile_table: As above, for volatile registers. + * @precious_table: As above, for precious registers. + * @rd_noinc_table: As above, for no increment readable registers. + * @reg_defaults: Power on reset values for registers (for use with + * register cache support). + * @num_reg_defaults: Number of elements in reg_defaults. + * + * @read_flag_mask: Mask to be set in the top bytes of the register when doing + * a read. + * @write_flag_mask: Mask to be set in the top bytes of the register when doing + * a write. If both read_flag_mask and write_flag_mask are + * empty and zero_flag_mask is not set the regmap_bus default + * masks are used. + * @zero_flag_mask: If set, read_flag_mask and write_flag_mask are used even + * if they are both empty. + * @use_single_rw: If set, converts the bulk read and write operations into + * a series of single read and write operations. This is useful + * for device that does not support bulk read and write. + * @can_multi_write: If set, the device supports the multi write mode of bulk + * write operations, if clear multi write requests will be + * split into individual write operations + * + * @cache_type: The actual cache type. + * @reg_defaults_raw: Power on reset values for registers (for use with + * register cache support). + * @num_reg_defaults_raw: Number of elements in reg_defaults_raw. + * @reg_format_endian: Endianness for formatted register addresses. If this is + * DEFAULT, the @reg_format_endian_default value from the + * regmap bus is used. + * @val_format_endian: Endianness for formatted register values. If this is + * DEFAULT, the @reg_format_endian_default value from the + * regmap bus is used. + * + * @ranges: Array of configuration entries for virtual address ranges. + * @num_ranges: Number of range configuration entries. + * @use_hwlock: Indicate if a hardware spinlock should be used. + * @hwlock_id: Specify the hardware spinlock id. + * @hwlock_mode: The hardware spinlock mode, should be HWLOCK_IRQSTATE, + * HWLOCK_IRQ or 0. + */ +struct regmap_config { + const char *name; + + int reg_bits; + int reg_stride; + int pad_bits; + int val_bits; + + bool (*writeable_reg)(struct device *dev, unsigned int reg); + bool (*readable_reg)(struct device *dev, unsigned int reg); + bool (*volatile_reg)(struct device *dev, unsigned int reg); + bool (*precious_reg)(struct device *dev, unsigned int reg); + bool (*readable_noinc_reg)(struct device *dev, unsigned int reg); + + bool disable_locking; + regmap_lock lock; + regmap_unlock unlock; + void *lock_arg; + + int (*reg_read)(void *context, unsigned int reg, unsigned int *val); + int (*reg_write)(void *context, unsigned int reg, unsigned int val); + + bool fast_io; + + unsigned int max_register; + const struct regmap_access_table *wr_table; + const struct regmap_access_table *rd_table; + const struct regmap_access_table *volatile_table; + const struct regmap_access_table *precious_table; + const struct regmap_access_table *rd_noinc_table; + const struct reg_default *reg_defaults; + unsigned int num_reg_defaults; + enum regcache_type cache_type; + const void *reg_defaults_raw; + unsigned int num_reg_defaults_raw; + + unsigned long read_flag_mask; + unsigned long write_flag_mask; + bool zero_flag_mask; + + bool use_single_rw; + bool can_multi_write; + + enum regmap_endian reg_format_endian; + enum regmap_endian val_format_endian; + + const struct regmap_range_cfg *ranges; + unsigned int num_ranges; + + bool use_hwlock; + unsigned int hwlock_id; + unsigned int hwlock_mode; +}; + +/** + * struct regmap_range_cfg - Configuration for indirectly accessed or paged + * registers. + * + * @name: Descriptive name for diagnostics + * + * @range_min: Address of the lowest register address in virtual range. + * @range_max: Address of the highest register in virtual range. + * + * @selector_reg: Register with selector field. + * @selector_mask: Bit shift for selector value. + * @selector_shift: Bit mask for selector value. + * + * @window_start: Address of first (lowest) register in data window. + * @window_len: Number of registers in data window. + * + * Registers, mapped to this virtual range, are accessed in two steps: + * 1. page selector register update; + * 2. access through data window registers. + */ +struct regmap_range_cfg { + const char *name; + + /* Registers of virtual address range */ + unsigned int range_min; + unsigned int range_max; + + /* Page selector for indirect addressing */ + unsigned int selector_reg; + unsigned int selector_mask; + int selector_shift; + + /* Data window (per each page) */ + unsigned int window_start; + unsigned int window_len; +}; + +struct regmap_async; + +typedef int (*regmap_hw_write)(void *context, const void *data, + size_t count); +typedef int (*regmap_hw_gather_write)(void *context, + const void *reg, size_t reg_len, + const void *val, size_t val_len); +typedef int (*regmap_hw_async_write)(void *context, + const void *reg, size_t reg_len, + const void *val, size_t val_len, + struct regmap_async *async); +typedef int (*regmap_hw_read)(void *context, + const void *reg_buf, size_t reg_size, + void *val_buf, size_t val_size); +typedef int (*regmap_hw_reg_read)(void *context, unsigned int reg, + unsigned int *val); +typedef int (*regmap_hw_reg_write)(void *context, unsigned int reg, + unsigned int val); +typedef int (*regmap_hw_reg_update_bits)(void *context, unsigned int reg, + unsigned int mask, unsigned int val); +typedef struct regmap_async *(*regmap_hw_async_alloc)(void); +typedef void (*regmap_hw_free_context)(void *context); + +/** + * struct regmap_bus - Description of a hardware bus for the register map + * infrastructure. + * + * @fast_io: Register IO is fast. Use a spinlock instead of a mutex + * to perform locking. This field is ignored if custom lock/unlock + * functions are used (see fields lock/unlock of + * struct regmap_config). + * @write: Write operation. + * @gather_write: Write operation with split register/value, return -ENOTSUPP + * if not implemented on a given device. + * @async_write: Write operation which completes asynchronously, optional and + * must serialise with respect to non-async I/O. + * @reg_write: Write a single register value to the given register address. This + * write operation has to complete when returning from the function. + * @reg_update_bits: Update bits operation to be used against volatile + * registers, intended for devices supporting some mechanism + * for setting clearing bits without having to + * read/modify/write. + * @read: Read operation. Data is returned in the buffer used to transmit + * data. + * @reg_read: Read a single register value from a given register address. + * @free_context: Free context. + * @async_alloc: Allocate a regmap_async() structure. + * @read_flag_mask: Mask to be set in the top byte of the register when doing + * a read. + * @reg_format_endian_default: Default endianness for formatted register + * addresses. Used when the regmap_config specifies DEFAULT. If this is + * DEFAULT, BIG is assumed. + * @val_format_endian_default: Default endianness for formatted register + * values. Used when the regmap_config specifies DEFAULT. If this is + * DEFAULT, BIG is assumed. + * @max_raw_read: Max raw read size that can be used on the bus. + * @max_raw_write: Max raw write size that can be used on the bus. + */ +struct regmap_bus { + bool fast_io; + regmap_hw_write write; + regmap_hw_gather_write gather_write; + regmap_hw_async_write async_write; + regmap_hw_reg_write reg_write; + regmap_hw_reg_update_bits reg_update_bits; + regmap_hw_read read; + regmap_hw_reg_read reg_read; + regmap_hw_free_context free_context; + regmap_hw_async_alloc async_alloc; + u8 read_flag_mask; + enum regmap_endian reg_format_endian_default; + enum regmap_endian val_format_endian_default; + size_t max_raw_read; + size_t max_raw_write; +}; + +/* + * __regmap_init functions. + * + * These functions take a lock key and name parameter, and should not be called + * directly. Instead, use the regmap_init macros that generate a key and name + * for each call. + */ +struct regmap *__regmap_init(struct device *dev, + const struct regmap_bus *bus, + void *bus_context, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); +struct regmap *__regmap_init_i2c(struct i2c_client *i2c, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); +struct regmap *__regmap_init_sccb(struct i2c_client *i2c, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); +struct regmap *__regmap_init_slimbus(struct slim_device *slimbus, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); +struct regmap *__regmap_init_spi(struct spi_device *dev, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); +struct regmap *__regmap_init_spmi_base(struct spmi_device *dev, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); +struct regmap *__regmap_init_spmi_ext(struct spmi_device *dev, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); +struct regmap *__regmap_init_w1(struct device *w1_dev, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); +struct regmap *__regmap_init_mmio_clk(struct device *dev, const char *clk_id, + void __iomem *regs, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); +struct regmap *__regmap_init_ac97(struct snd_ac97 *ac97, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); +struct regmap *__regmap_init_sdw(struct sdw_slave *sdw, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); + +struct regmap *__devm_regmap_init(struct device *dev, + const struct regmap_bus *bus, + void *bus_context, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); +struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); +struct regmap *__devm_regmap_init_sccb(struct i2c_client *i2c, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); +struct regmap *__devm_regmap_init_spi(struct spi_device *dev, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); +struct regmap *__devm_regmap_init_spmi_base(struct spmi_device *dev, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); +struct regmap *__devm_regmap_init_spmi_ext(struct spmi_device *dev, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); +struct regmap *__devm_regmap_init_w1(struct device *w1_dev, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); +struct regmap *__devm_regmap_init_mmio_clk(struct device *dev, + const char *clk_id, + void __iomem *regs, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); +struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); +struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); +struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); +/* + * Wrapper for regmap_init macros to include a unique lockdep key and name + * for each call. No-op if CONFIG_LOCKDEP is not set. + * + * @fn: Real function to call (in the form __[*_]regmap_init[_*]) + * @name: Config variable name (#config in the calling macro) + **/ +#ifdef CONFIG_LOCKDEP +#define __regmap_lockdep_wrapper(fn, name, ...) \ +( \ + ({ \ + static struct lock_class_key _key; \ + fn(__VA_ARGS__, &_key, \ + KBUILD_BASENAME ":" \ + __stringify(__LINE__) ":" \ + "(" name ")->lock"); \ + }) \ +) +#else +#define __regmap_lockdep_wrapper(fn, name, ...) fn(__VA_ARGS__, NULL, NULL) +#endif + +/** + * regmap_init() - Initialise register map + * + * @dev: Device that will be interacted with + * @bus: Bus-specific callbacks to use with device + * @bus_context: Data passed to bus-specific callbacks + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer to + * a struct regmap. This function should generally not be called + * directly, it should be called by bus-specific init functions. + */ +#define regmap_init(dev, bus, bus_context, config) \ + __regmap_lockdep_wrapper(__regmap_init, #config, \ + dev, bus, bus_context, config) +int regmap_attach_dev(struct device *dev, struct regmap *map, + const struct regmap_config *config); + +/** + * regmap_init_i2c() - Initialise register map + * + * @i2c: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer to + * a struct regmap. + */ +#define regmap_init_i2c(i2c, config) \ + __regmap_lockdep_wrapper(__regmap_init_i2c, #config, \ + i2c, config) + +/** + * regmap_init_sccb() - Initialise register map + * + * @i2c: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer to + * a struct regmap. + */ +#define regmap_init_sccb(i2c, config) \ + __regmap_lockdep_wrapper(__regmap_init_sccb, #config, \ + i2c, config) + +/** + * regmap_init_slimbus() - Initialise register map + * + * @slimbus: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer to + * a struct regmap. + */ +#define regmap_init_slimbus(slimbus, config) \ + __regmap_lockdep_wrapper(__regmap_init_slimbus, #config, \ + slimbus, config) + +/** + * regmap_init_spi() - Initialise register map + * + * @dev: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer to + * a struct regmap. + */ +#define regmap_init_spi(dev, config) \ + __regmap_lockdep_wrapper(__regmap_init_spi, #config, \ + dev, config) + +/** + * regmap_init_spmi_base() - Create regmap for the Base register space + * + * @dev: SPMI device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer to + * a struct regmap. + */ +#define regmap_init_spmi_base(dev, config) \ + __regmap_lockdep_wrapper(__regmap_init_spmi_base, #config, \ + dev, config) + +/** + * regmap_init_spmi_ext() - Create regmap for Ext register space + * + * @dev: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer to + * a struct regmap. + */ +#define regmap_init_spmi_ext(dev, config) \ + __regmap_lockdep_wrapper(__regmap_init_spmi_ext, #config, \ + dev, config) + +/** + * regmap_init_w1() - Initialise register map + * + * @w1_dev: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer to + * a struct regmap. + */ +#define regmap_init_w1(w1_dev, config) \ + __regmap_lockdep_wrapper(__regmap_init_w1, #config, \ + w1_dev, config) + +/** + * regmap_init_mmio_clk() - Initialise register map with register clock + * + * @dev: Device that will be interacted with + * @clk_id: register clock consumer ID + * @regs: Pointer to memory-mapped IO region + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer to + * a struct regmap. + */ +#define regmap_init_mmio_clk(dev, clk_id, regs, config) \ + __regmap_lockdep_wrapper(__regmap_init_mmio_clk, #config, \ + dev, clk_id, regs, config) + +/** + * regmap_init_mmio() - Initialise register map + * + * @dev: Device that will be interacted with + * @regs: Pointer to memory-mapped IO region + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer to + * a struct regmap. + */ +#define regmap_init_mmio(dev, regs, config) \ + regmap_init_mmio_clk(dev, NULL, regs, config) + +/** + * regmap_init_ac97() - Initialise AC'97 register map + * + * @ac97: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer to + * a struct regmap. + */ +#define regmap_init_ac97(ac97, config) \ + __regmap_lockdep_wrapper(__regmap_init_ac97, #config, \ + ac97, config) +bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); + +/** + * regmap_init_sdw() - Initialise register map + * + * @sdw: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer to + * a struct regmap. + */ +#define regmap_init_sdw(sdw, config) \ + __regmap_lockdep_wrapper(__regmap_init_sdw, #config, \ + sdw, config) + + +/** + * devm_regmap_init() - Initialise managed register map + * + * @dev: Device that will be interacted with + * @bus: Bus-specific callbacks to use with device + * @bus_context: Data passed to bus-specific callbacks + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. This function should generally not be called + * directly, it should be called by bus-specific init functions. The + * map will be automatically freed by the device management code. + */ +#define devm_regmap_init(dev, bus, bus_context, config) \ + __regmap_lockdep_wrapper(__devm_regmap_init, #config, \ + dev, bus, bus_context, config) + +/** + * devm_regmap_init_i2c() - Initialise managed register map + * + * @i2c: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. The regmap will be automatically freed by the + * device management code. + */ +#define devm_regmap_init_i2c(i2c, config) \ + __regmap_lockdep_wrapper(__devm_regmap_init_i2c, #config, \ + i2c, config) + +/** + * devm_regmap_init_sccb() - Initialise managed register map + * + * @i2c: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. The regmap will be automatically freed by the + * device management code. + */ +#define devm_regmap_init_sccb(i2c, config) \ + __regmap_lockdep_wrapper(__devm_regmap_init_sccb, #config, \ + i2c, config) + +/** + * devm_regmap_init_spi() - Initialise register map + * + * @dev: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. The map will be automatically freed by the + * device management code. + */ +#define devm_regmap_init_spi(dev, config) \ + __regmap_lockdep_wrapper(__devm_regmap_init_spi, #config, \ + dev, config) + +/** + * devm_regmap_init_spmi_base() - Create managed regmap for Base register space + * + * @dev: SPMI device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. The regmap will be automatically freed by the + * device management code. + */ +#define devm_regmap_init_spmi_base(dev, config) \ + __regmap_lockdep_wrapper(__devm_regmap_init_spmi_base, #config, \ + dev, config) + +/** + * devm_regmap_init_spmi_ext() - Create managed regmap for Ext register space + * + * @dev: SPMI device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. The regmap will be automatically freed by the + * device management code. + */ +#define devm_regmap_init_spmi_ext(dev, config) \ + __regmap_lockdep_wrapper(__devm_regmap_init_spmi_ext, #config, \ + dev, config) + +/** + * devm_regmap_init_w1() - Initialise managed register map + * + * @w1_dev: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. The regmap will be automatically freed by the + * device management code. + */ +#define devm_regmap_init_w1(w1_dev, config) \ + __regmap_lockdep_wrapper(__devm_regmap_init_w1, #config, \ + w1_dev, config) +/** + * devm_regmap_init_mmio_clk() - Initialise managed register map with clock + * + * @dev: Device that will be interacted with + * @clk_id: register clock consumer ID + * @regs: Pointer to memory-mapped IO region + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. The regmap will be automatically freed by the + * device management code. + */ +#define devm_regmap_init_mmio_clk(dev, clk_id, regs, config) \ + __regmap_lockdep_wrapper(__devm_regmap_init_mmio_clk, #config, \ + dev, clk_id, regs, config) + +/** + * devm_regmap_init_mmio() - Initialise managed register map + * + * @dev: Device that will be interacted with + * @regs: Pointer to memory-mapped IO region + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. The regmap will be automatically freed by the + * device management code. + */ +#define devm_regmap_init_mmio(dev, regs, config) \ + devm_regmap_init_mmio_clk(dev, NULL, regs, config) + +/** + * devm_regmap_init_ac97() - Initialise AC'97 register map + * + * @ac97: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. The regmap will be automatically freed by the + * device management code. + */ +#define devm_regmap_init_ac97(ac97, config) \ + __regmap_lockdep_wrapper(__devm_regmap_init_ac97, #config, \ + ac97, config) + +/** + * devm_regmap_init_sdw() - Initialise managed register map + * + * @sdw: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. The regmap will be automatically freed by the + * device management code. + */ +#define devm_regmap_init_sdw(sdw, config) \ + __regmap_lockdep_wrapper(__devm_regmap_init_sdw, #config, \ + sdw, config) + +/** + * devm_regmap_init_slimbus() - Initialise managed register map + * + * @slimbus: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. The regmap will be automatically freed by the + * device management code. + */ +#define devm_regmap_init_slimbus(slimbus, config) \ + __regmap_lockdep_wrapper(__devm_regmap_init_slimbus, #config, \ + slimbus, config) +int regmap_mmio_attach_clk(struct regmap *map, struct clk *clk); +void regmap_mmio_detach_clk(struct regmap *map); +void regmap_exit(struct regmap *map); +int regmap_reinit_cache(struct regmap *map, + const struct regmap_config *config); +struct regmap *dev_get_regmap(struct device *dev, const char *name); +struct device *regmap_get_device(struct regmap *map); +int regmap_write(struct regmap *map, unsigned int reg, unsigned int val); +int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val); +int regmap_raw_write(struct regmap *map, unsigned int reg, + const void *val, size_t val_len); +int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, + size_t val_count); +int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, + int num_regs); +int regmap_multi_reg_write_bypassed(struct regmap *map, + const struct reg_sequence *regs, + int num_regs); +int regmap_raw_write_async(struct regmap *map, unsigned int reg, + const void *val, size_t val_len); +int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val); +int regmap_raw_read(struct regmap *map, unsigned int reg, + void *val, size_t val_len); +int regmap_noinc_read(struct regmap *map, unsigned int reg, + void *val, size_t val_len); +int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, + size_t val_count); +int regmap_update_bits_base(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val, + bool *change, bool async, bool force); +int regmap_get_val_bytes(struct regmap *map); +int regmap_get_max_register(struct regmap *map); +int regmap_get_reg_stride(struct regmap *map); +int regmap_async_complete(struct regmap *map); +bool regmap_can_raw_write(struct regmap *map); +size_t regmap_get_raw_read_max(struct regmap *map); +size_t regmap_get_raw_write_max(struct regmap *map); + +int regcache_sync(struct regmap *map); +int regcache_sync_region(struct regmap *map, unsigned int min, + unsigned int max); +int regcache_drop_region(struct regmap *map, unsigned int min, + unsigned int max); +void regcache_cache_only(struct regmap *map, bool enable); +void regcache_cache_bypass(struct regmap *map, bool enable); +void regcache_mark_dirty(struct regmap *map); + +bool regmap_check_range_table(struct regmap *map, unsigned int reg, + const struct regmap_access_table *table); + +int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs, + int num_regs); +int regmap_parse_val(struct regmap *map, const void *buf, + unsigned int *val); + +static inline bool regmap_reg_in_range(unsigned int reg, + const struct regmap_range *range) +{ + return reg >= range->range_min && reg <= range->range_max; +} + +bool regmap_reg_in_ranges(unsigned int reg, + const struct regmap_range *ranges, + unsigned int nranges); + +/** + * struct reg_field - Description of an register field + * + * @reg: Offset of the register within the regmap bank + * @lsb: lsb of the register field. + * @msb: msb of the register field. + * @id_size: port size if it has some ports + * @id_offset: address offset for each ports + */ +struct reg_field { + unsigned int reg; + unsigned int lsb; + unsigned int msb; + unsigned int id_size; + unsigned int id_offset; +}; + +#define REG_FIELD(_reg, _lsb, _msb) { \ + .reg = _reg, \ + .lsb = _lsb, \ + .msb = _msb, \ + } + +struct regmap_field *regmap_field_alloc(struct regmap *regmap, + struct reg_field reg_field); +void regmap_field_free(struct regmap_field *field); + +struct regmap_field *devm_regmap_field_alloc(struct device *dev, + struct regmap *regmap, struct reg_field reg_field); +void devm_regmap_field_free(struct device *dev, struct regmap_field *field); + +int regmap_field_read(struct regmap_field *field, unsigned int *val); +int regmap_field_update_bits_base(struct regmap_field *field, + unsigned int mask, unsigned int val, + bool *change, bool async, bool force); +int regmap_fields_read(struct regmap_field *field, unsigned int id, + unsigned int *val); +int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, + unsigned int mask, unsigned int val, + bool *change, bool async, bool force); + +/** + * struct regmap_irq - Description of an IRQ for the generic regmap irq_chip. + * + * @reg_offset: Offset of the status/mask register within the bank + * @mask: Mask used to flag/control the register. + * @type_reg_offset: Offset register for the irq type setting. + * @type_rising_mask: Mask bit to configure RISING type irq. + * @type_falling_mask: Mask bit to configure FALLING type irq. + */ +struct regmap_irq { + unsigned int reg_offset; + unsigned int mask; + unsigned int type_reg_offset; + unsigned int type_rising_mask; + unsigned int type_falling_mask; +}; + +#define REGMAP_IRQ_REG(_irq, _off, _mask) \ + [_irq] = { .reg_offset = (_off), .mask = (_mask) } + +/** + * struct regmap_irq_chip - Description of a generic regmap irq_chip. + * + * @name: Descriptive name for IRQ controller. + * + * @status_base: Base status register address. + * @mask_base: Base mask register address. + * @mask_writeonly: Base mask register is write only. + * @unmask_base: Base unmask register address. for chips who have + * separate mask and unmask registers + * @ack_base: Base ack address. If zero then the chip is clear on read. + * Using zero value is possible with @use_ack bit. + * @wake_base: Base address for wake enables. If zero unsupported. + * @type_base: Base address for irq type. If zero unsupported. + * @irq_reg_stride: Stride to use for chips where registers are not contiguous. + * @init_ack_masked: Ack all masked interrupts once during initalization. + * @mask_invert: Inverted mask register: cleared bits are masked out. + * @use_ack: Use @ack register even if it is zero. + * @ack_invert: Inverted ack register: cleared bits for ack. + * @wake_invert: Inverted wake register: cleared bits are wake enabled. + * @type_invert: Invert the type flags. + * @runtime_pm: Hold a runtime PM lock on the device when accessing it. + * + * @num_regs: Number of registers in each control bank. + * @irqs: Descriptors for individual IRQs. Interrupt numbers are + * assigned based on the index in the array of the interrupt. + * @num_irqs: Number of descriptors. + * @num_type_reg: Number of type registers. + * @type_reg_stride: Stride to use for chips where type registers are not + * contiguous. + * @handle_pre_irq: Driver specific callback to handle interrupt from device + * before regmap_irq_handler process the interrupts. + * @handle_post_irq: Driver specific callback to handle interrupt from device + * after handling the interrupts in regmap_irq_handler(). + * @irq_drv_data: Driver specific IRQ data which is passed as parameter when + * driver specific pre/post interrupt handler is called. + * + * This is not intended to handle every possible interrupt controller, but + * it should handle a substantial proportion of those that are found in the + * wild. + */ +struct regmap_irq_chip { + const char *name; + + unsigned int status_base; + unsigned int mask_base; + unsigned int unmask_base; + unsigned int ack_base; + unsigned int wake_base; + unsigned int type_base; + unsigned int irq_reg_stride; + bool mask_writeonly:1; + bool init_ack_masked:1; + bool mask_invert:1; + bool use_ack:1; + bool ack_invert:1; + bool wake_invert:1; + bool runtime_pm:1; + bool type_invert:1; + + int num_regs; + + const struct regmap_irq *irqs; + int num_irqs; + + int num_type_reg; + unsigned int type_reg_stride; + + int (*handle_pre_irq)(void *irq_drv_data); + int (*handle_post_irq)(void *irq_drv_data); + void *irq_drv_data; +}; + +struct regmap_irq_chip_data; + +int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, + int irq_base, const struct regmap_irq_chip *chip, + struct regmap_irq_chip_data **data); +void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *data); + +int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq, + int irq_flags, int irq_base, + const struct regmap_irq_chip *chip, + struct regmap_irq_chip_data **data); +void devm_regmap_del_irq_chip(struct device *dev, int irq, + struct regmap_irq_chip_data *data); + +int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data); +int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq); +struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data); + +#else + +/* + * These stubs should only ever be called by generic code which has + * regmap based facilities, if they ever get called at runtime + * something is going wrong and something probably needs to select + * REGMAP. + */ + +static inline int regmap_write(struct regmap *map, unsigned int reg, + unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_write_async(struct regmap *map, unsigned int reg, + unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_raw_write(struct regmap *map, unsigned int reg, + const void *val, size_t val_len) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_raw_write_async(struct regmap *map, unsigned int reg, + const void *val, size_t val_len) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_bulk_write(struct regmap *map, unsigned int reg, + const void *val, size_t val_count) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_read(struct regmap *map, unsigned int reg, + unsigned int *val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_raw_read(struct regmap *map, unsigned int reg, + void *val, size_t val_len) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_noinc_read(struct regmap *map, unsigned int reg, + void *val, size_t val_len) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_bulk_read(struct regmap *map, unsigned int reg, + void *val, size_t val_count) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_update_bits_base(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val, + bool *change, bool async, bool force) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_field_update_bits_base(struct regmap_field *field, + unsigned int mask, unsigned int val, + bool *change, bool async, bool force) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_fields_update_bits_base(struct regmap_field *field, + unsigned int id, + unsigned int mask, unsigned int val, + bool *change, bool async, bool force) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_get_val_bytes(struct regmap *map) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_get_max_register(struct regmap *map) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_get_reg_stride(struct regmap *map) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regcache_sync(struct regmap *map) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regcache_sync_region(struct regmap *map, unsigned int min, + unsigned int max) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regcache_drop_region(struct regmap *map, unsigned int min, + unsigned int max) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline void regcache_cache_only(struct regmap *map, bool enable) +{ + WARN_ONCE(1, "regmap API is disabled"); +} + +static inline void regcache_cache_bypass(struct regmap *map, bool enable) +{ + WARN_ONCE(1, "regmap API is disabled"); +} + +static inline void regcache_mark_dirty(struct regmap *map) +{ + WARN_ONCE(1, "regmap API is disabled"); +} + +static inline void regmap_async_complete(struct regmap *map) +{ + WARN_ONCE(1, "regmap API is disabled"); +} + +static inline int regmap_register_patch(struct regmap *map, + const struct reg_sequence *regs, + int num_regs) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_parse_val(struct regmap *map, const void *buf, + unsigned int *val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline struct regmap *dev_get_regmap(struct device *dev, + const char *name) +{ + return NULL; +} + +static inline struct device *regmap_get_device(struct regmap *map) +{ + WARN_ONCE(1, "regmap API is disabled"); + return NULL; +} + +#endif + +#endif diff --git a/include/linux/regset.h b/include/linux/regset.h new file mode 100644 index 000000000..494cedaaf --- /dev/null +++ b/include/linux/regset.h @@ -0,0 +1,428 @@ +/* + * User-mode machine state access + * + * Copyright (C) 2007 Red Hat, Inc. All rights reserved. + * + * This copyrighted material is made available to anyone wishing to use, + * modify, copy, or redistribute it subject to the terms and conditions + * of the GNU General Public License v.2. + * + * Red Hat Author: Roland McGrath. + */ + +#ifndef _LINUX_REGSET_H +#define _LINUX_REGSET_H 1 + +#include +#include +#include +#include +struct task_struct; +struct user_regset; + + +/** + * user_regset_active_fn - type of @active function in &struct user_regset + * @target: thread being examined + * @regset: regset being examined + * + * Return -%ENODEV if not available on the hardware found. + * Return %0 if no interesting state in this thread. + * Return >%0 number of @size units of interesting state. + * Any get call fetching state beyond that number will + * see the default initialization state for this data, + * so a caller that knows what the default state is need + * not copy it all out. + * This call is optional; the pointer is %NULL if there + * is no inexpensive check to yield a value < @n. + */ +typedef int user_regset_active_fn(struct task_struct *target, + const struct user_regset *regset); + +/** + * user_regset_get_fn - type of @get function in &struct user_regset + * @target: thread being examined + * @regset: regset being examined + * @pos: offset into the regset data to access, in bytes + * @count: amount of data to copy, in bytes + * @kbuf: if not %NULL, a kernel-space pointer to copy into + * @ubuf: if @kbuf is %NULL, a user-space pointer to copy into + * + * Fetch register values. Return %0 on success; -%EIO or -%ENODEV + * are usual failure returns. The @pos and @count values are in + * bytes, but must be properly aligned. If @kbuf is non-null, that + * buffer is used and @ubuf is ignored. If @kbuf is %NULL, then + * ubuf gives a userland pointer to access directly, and an -%EFAULT + * return value is possible. + */ +typedef int user_regset_get_fn(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf); + +/** + * user_regset_set_fn - type of @set function in &struct user_regset + * @target: thread being examined + * @regset: regset being examined + * @pos: offset into the regset data to access, in bytes + * @count: amount of data to copy, in bytes + * @kbuf: if not %NULL, a kernel-space pointer to copy from + * @ubuf: if @kbuf is %NULL, a user-space pointer to copy from + * + * Store register values. Return %0 on success; -%EIO or -%ENODEV + * are usual failure returns. The @pos and @count values are in + * bytes, but must be properly aligned. If @kbuf is non-null, that + * buffer is used and @ubuf is ignored. If @kbuf is %NULL, then + * ubuf gives a userland pointer to access directly, and an -%EFAULT + * return value is possible. + */ +typedef int user_regset_set_fn(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf); + +/** + * user_regset_writeback_fn - type of @writeback function in &struct user_regset + * @target: thread being examined + * @regset: regset being examined + * @immediate: zero if writeback at completion of next context switch is OK + * + * This call is optional; usually the pointer is %NULL. When + * provided, there is some user memory associated with this regset's + * hardware, such as memory backing cached register data on register + * window machines; the regset's data controls what user memory is + * used (e.g. via the stack pointer value). + * + * Write register data back to user memory. If the @immediate flag + * is nonzero, it must be written to the user memory so uaccess or + * access_process_vm() can see it when this call returns; if zero, + * then it must be written back by the time the task completes a + * context switch (as synchronized with wait_task_inactive()). + * Return %0 on success or if there was nothing to do, -%EFAULT for + * a memory problem (bad stack pointer or whatever), or -%EIO for a + * hardware problem. + */ +typedef int user_regset_writeback_fn(struct task_struct *target, + const struct user_regset *regset, + int immediate); + +/** + * user_regset_get_size_fn - type of @get_size function in &struct user_regset + * @target: thread being examined + * @regset: regset being examined + * + * This call is optional; usually the pointer is %NULL. + * + * When provided, this function must return the current size of regset + * data, as observed by the @get function in &struct user_regset. The + * value returned must be a multiple of @size. The returned size is + * required to be valid only until the next time (if any) @regset is + * modified for @target. + * + * This function is intended for dynamically sized regsets. A regset + * that is statically sized does not need to implement it. + * + * This function should not be called directly: instead, callers should + * call regset_size() to determine the current size of a regset. + */ +typedef unsigned int user_regset_get_size_fn(struct task_struct *target, + const struct user_regset *regset); + +/** + * struct user_regset - accessible thread CPU state + * @n: Number of slots (registers). + * @size: Size in bytes of a slot (register). + * @align: Required alignment, in bytes. + * @bias: Bias from natural indexing. + * @core_note_type: ELF note @n_type value used in core dumps. + * @get: Function to fetch values. + * @set: Function to store values. + * @active: Function to report if regset is active, or %NULL. + * @writeback: Function to write data back to user memory, or %NULL. + * @get_size: Function to return the regset's size, or %NULL. + * + * This data structure describes a machine resource we call a register set. + * This is part of the state of an individual thread, not necessarily + * actual CPU registers per se. A register set consists of a number of + * similar slots, given by @n. Each slot is @size bytes, and aligned to + * @align bytes (which is at least @size). For dynamically-sized + * regsets, @n must contain the maximum possible number of slots for the + * regset, and @get_size must point to a function that returns the + * current regset size. + * + * Callers that need to know only the current size of the regset and do + * not care about its internal structure should call regset_size() + * instead of inspecting @n or calling @get_size. + * + * For backward compatibility, the @get and @set methods must pad to, or + * accept, @n * @size bytes, even if the current regset size is smaller. + * The precise semantics of these operations depend on the regset being + * accessed. + * + * The functions to which &struct user_regset members point must be + * called only on the current thread or on a thread that is in + * %TASK_STOPPED or %TASK_TRACED state, that we are guaranteed will not + * be woken up and return to user mode, and that we have called + * wait_task_inactive() on. (The target thread always might wake up for + * SIGKILL while these functions are working, in which case that + * thread's user_regset state might be scrambled.) + * + * The @pos argument must be aligned according to @align; the @count + * argument must be a multiple of @size. These functions are not + * responsible for checking for invalid arguments. + * + * When there is a natural value to use as an index, @bias gives the + * difference between the natural index and the slot index for the + * register set. For example, x86 GDT segment descriptors form a regset; + * the segment selector produces a natural index, but only a subset of + * that index space is available as a regset (the TLS slots); subtracting + * @bias from a segment selector index value computes the regset slot. + * + * If nonzero, @core_note_type gives the n_type field (NT_* value) + * of the core file note in which this regset's data appears. + * NT_PRSTATUS is a special case in that the regset data starts at + * offsetof(struct elf_prstatus, pr_reg) into the note data; that is + * part of the per-machine ELF formats userland knows about. In + * other cases, the core file note contains exactly the whole regset + * (@n * @size) and nothing else. The core file note is normally + * omitted when there is an @active function and it returns zero. + */ +struct user_regset { + user_regset_get_fn *get; + user_regset_set_fn *set; + user_regset_active_fn *active; + user_regset_writeback_fn *writeback; + user_regset_get_size_fn *get_size; + unsigned int n; + unsigned int size; + unsigned int align; + unsigned int bias; + unsigned int core_note_type; +}; + +/** + * struct user_regset_view - available regsets + * @name: Identifier, e.g. UTS_MACHINE string. + * @regsets: Array of @n regsets available in this view. + * @n: Number of elements in @regsets. + * @e_machine: ELF header @e_machine %EM_* value written in core dumps. + * @e_flags: ELF header @e_flags value written in core dumps. + * @ei_osabi: ELF header @e_ident[%EI_OSABI] value written in core dumps. + * + * A regset view is a collection of regsets (&struct user_regset, + * above). This describes all the state of a thread that can be seen + * from a given architecture/ABI environment. More than one view might + * refer to the same &struct user_regset, or more than one regset + * might refer to the same machine-specific state in the thread. For + * example, a 32-bit thread's state could be examined from the 32-bit + * view or from the 64-bit view. Either method reaches the same thread + * register state, doing appropriate widening or truncation. + */ +struct user_regset_view { + const char *name; + const struct user_regset *regsets; + unsigned int n; + u32 e_flags; + u16 e_machine; + u8 ei_osabi; +}; + +/* + * This is documented here rather than at the definition sites because its + * implementation is machine-dependent but its interface is universal. + */ +/** + * task_user_regset_view - Return the process's native regset view. + * @tsk: a thread of the process in question + * + * Return the &struct user_regset_view that is native for the given process. + * For example, what it would access when it called ptrace(). + * Throughout the life of the process, this only changes at exec. + */ +const struct user_regset_view *task_user_regset_view(struct task_struct *tsk); + + +/* + * These are helpers for writing regset get/set functions in arch code. + * Because @start_pos and @end_pos are always compile-time constants, + * these are inlined into very little code though they look large. + * + * Use one or more calls sequentially for each chunk of regset data stored + * contiguously in memory. Call with constants for @start_pos and @end_pos, + * giving the range of byte positions in the regset that data corresponds + * to; @end_pos can be -1 if this chunk is at the end of the regset layout. + * Each call updates the arguments to point past its chunk. + */ + +static inline int user_regset_copyout(unsigned int *pos, unsigned int *count, + void **kbuf, + void __user **ubuf, const void *data, + const int start_pos, const int end_pos) +{ + if (*count == 0) + return 0; + BUG_ON(*pos < start_pos); + if (end_pos < 0 || *pos < end_pos) { + unsigned int copy = (end_pos < 0 ? *count + : min(*count, end_pos - *pos)); + data += *pos - start_pos; + if (*kbuf) { + memcpy(*kbuf, data, copy); + *kbuf += copy; + } else if (__copy_to_user(*ubuf, data, copy)) + return -EFAULT; + else + *ubuf += copy; + *pos += copy; + *count -= copy; + } + return 0; +} + +static inline int user_regset_copyin(unsigned int *pos, unsigned int *count, + const void **kbuf, + const void __user **ubuf, void *data, + const int start_pos, const int end_pos) +{ + if (*count == 0) + return 0; + BUG_ON(*pos < start_pos); + if (end_pos < 0 || *pos < end_pos) { + unsigned int copy = (end_pos < 0 ? *count + : min(*count, end_pos - *pos)); + data += *pos - start_pos; + if (*kbuf) { + memcpy(data, *kbuf, copy); + *kbuf += copy; + } else if (__copy_from_user(data, *ubuf, copy)) + return -EFAULT; + else + *ubuf += copy; + *pos += copy; + *count -= copy; + } + return 0; +} + +/* + * These two parallel the two above, but for portions of a regset layout + * that always read as all-zero or for which writes are ignored. + */ +static inline int user_regset_copyout_zero(unsigned int *pos, + unsigned int *count, + void **kbuf, void __user **ubuf, + const int start_pos, + const int end_pos) +{ + if (*count == 0) + return 0; + BUG_ON(*pos < start_pos); + if (end_pos < 0 || *pos < end_pos) { + unsigned int copy = (end_pos < 0 ? *count + : min(*count, end_pos - *pos)); + if (*kbuf) { + memset(*kbuf, 0, copy); + *kbuf += copy; + } else if (__clear_user(*ubuf, copy)) + return -EFAULT; + else + *ubuf += copy; + *pos += copy; + *count -= copy; + } + return 0; +} + +static inline int user_regset_copyin_ignore(unsigned int *pos, + unsigned int *count, + const void **kbuf, + const void __user **ubuf, + const int start_pos, + const int end_pos) +{ + if (*count == 0) + return 0; + BUG_ON(*pos < start_pos); + if (end_pos < 0 || *pos < end_pos) { + unsigned int copy = (end_pos < 0 ? *count + : min(*count, end_pos - *pos)); + if (*kbuf) + *kbuf += copy; + else + *ubuf += copy; + *pos += copy; + *count -= copy; + } + return 0; +} + +/** + * copy_regset_to_user - fetch a thread's user_regset data into user memory + * @target: thread to be examined + * @view: &struct user_regset_view describing user thread machine state + * @setno: index in @view->regsets + * @offset: offset into the regset data, in bytes + * @size: amount of data to copy, in bytes + * @data: user-mode pointer to copy into + */ +static inline int copy_regset_to_user(struct task_struct *target, + const struct user_regset_view *view, + unsigned int setno, + unsigned int offset, unsigned int size, + void __user *data) +{ + const struct user_regset *regset = &view->regsets[setno]; + + if (!regset->get) + return -EOPNOTSUPP; + + if (!access_ok(VERIFY_WRITE, data, size)) + return -EFAULT; + + return regset->get(target, regset, offset, size, NULL, data); +} + +/** + * copy_regset_from_user - store into thread's user_regset data from user memory + * @target: thread to be examined + * @view: &struct user_regset_view describing user thread machine state + * @setno: index in @view->regsets + * @offset: offset into the regset data, in bytes + * @size: amount of data to copy, in bytes + * @data: user-mode pointer to copy from + */ +static inline int copy_regset_from_user(struct task_struct *target, + const struct user_regset_view *view, + unsigned int setno, + unsigned int offset, unsigned int size, + const void __user *data) +{ + const struct user_regset *regset = &view->regsets[setno]; + + if (!regset->set) + return -EOPNOTSUPP; + + if (!access_ok(VERIFY_READ, data, size)) + return -EFAULT; + + return regset->set(target, regset, offset, size, NULL, data); +} + +/** + * regset_size - determine the current size of a regset + * @target: thread to be examined + * @regset: regset to be examined + * + * Note that the returned size is valid only until the next time + * (if any) @regset is modified for @target. + */ +static inline unsigned int regset_size(struct task_struct *target, + const struct user_regset *regset) +{ + if (!regset->get_size) + return regset->n * regset->size; + else + return regset->get_size(target, regset); +} + +#endif /* */ diff --git a/include/linux/regulator/ab8500.h b/include/linux/regulator/ab8500.h new file mode 100644 index 000000000..d48ab3e66 --- /dev/null +++ b/include/linux/regulator/ab8500.h @@ -0,0 +1,167 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License v2 + * + * Authors: Sundar Iyer for ST-Ericsson + * Bengt Jonsson for ST-Ericsson + * Daniel Willerud for ST-Ericsson + */ + +#ifndef __LINUX_MFD_AB8500_REGULATOR_H +#define __LINUX_MFD_AB8500_REGULATOR_H + +#include + +/* AB8500 regulators */ +enum ab8500_regulator_id { + AB8500_LDO_AUX1, + AB8500_LDO_AUX2, + AB8500_LDO_AUX3, + AB8500_LDO_INTCORE, + AB8500_LDO_TVOUT, + AB8500_LDO_AUDIO, + AB8500_LDO_ANAMIC1, + AB8500_LDO_ANAMIC2, + AB8500_LDO_DMIC, + AB8500_LDO_ANA, + AB8500_NUM_REGULATORS, +}; + +/* AB8505 regulators */ +enum ab8505_regulator_id { + AB8505_LDO_AUX1, + AB8505_LDO_AUX2, + AB8505_LDO_AUX3, + AB8505_LDO_AUX4, + AB8505_LDO_AUX5, + AB8505_LDO_AUX6, + AB8505_LDO_INTCORE, + AB8505_LDO_ADC, + AB8505_LDO_AUDIO, + AB8505_LDO_ANAMIC1, + AB8505_LDO_ANAMIC2, + AB8505_LDO_AUX8, + AB8505_LDO_ANA, + AB8505_NUM_REGULATORS, +}; + +/* AB8500 and AB8505 register initialization */ +struct ab8500_regulator_reg_init { + int id; + u8 mask; + u8 value; +}; + +#define INIT_REGULATOR_REGISTER(_id, _mask, _value) \ + { \ + .id = _id, \ + .mask = _mask, \ + .value = _value, \ + } + +/* AB8500 registers */ +enum ab8500_regulator_reg { + AB8500_REGUREQUESTCTRL2, + AB8500_REGUREQUESTCTRL3, + AB8500_REGUREQUESTCTRL4, + AB8500_REGUSYSCLKREQ1HPVALID1, + AB8500_REGUSYSCLKREQ1HPVALID2, + AB8500_REGUHWHPREQ1VALID1, + AB8500_REGUHWHPREQ1VALID2, + AB8500_REGUHWHPREQ2VALID1, + AB8500_REGUHWHPREQ2VALID2, + AB8500_REGUSWHPREQVALID1, + AB8500_REGUSWHPREQVALID2, + AB8500_REGUSYSCLKREQVALID1, + AB8500_REGUSYSCLKREQVALID2, + AB8500_REGUMISC1, + AB8500_VAUDIOSUPPLY, + AB8500_REGUCTRL1VAMIC, + AB8500_VPLLVANAREGU, + AB8500_VREFDDR, + AB8500_EXTSUPPLYREGU, + AB8500_VAUX12REGU, + AB8500_VRF1VAUX3REGU, + AB8500_VAUX1SEL, + AB8500_VAUX2SEL, + AB8500_VRF1VAUX3SEL, + AB8500_REGUCTRL2SPARE, + AB8500_REGUCTRLDISCH, + AB8500_REGUCTRLDISCH2, + AB8500_NUM_REGULATOR_REGISTERS, +}; + +/* AB8505 registers */ +enum ab8505_regulator_reg { + AB8505_REGUREQUESTCTRL1, + AB8505_REGUREQUESTCTRL2, + AB8505_REGUREQUESTCTRL3, + AB8505_REGUREQUESTCTRL4, + AB8505_REGUSYSCLKREQ1HPVALID1, + AB8505_REGUSYSCLKREQ1HPVALID2, + AB8505_REGUHWHPREQ1VALID1, + AB8505_REGUHWHPREQ1VALID2, + AB8505_REGUHWHPREQ2VALID1, + AB8505_REGUHWHPREQ2VALID2, + AB8505_REGUSWHPREQVALID1, + AB8505_REGUSWHPREQVALID2, + AB8505_REGUSYSCLKREQVALID1, + AB8505_REGUSYSCLKREQVALID2, + AB8505_REGUVAUX4REQVALID, + AB8505_REGUMISC1, + AB8505_VAUDIOSUPPLY, + AB8505_REGUCTRL1VAMIC, + AB8505_VSMPSAREGU, + AB8505_VSMPSBREGU, + AB8505_VSAFEREGU, /* NOTE! PRCMU register */ + AB8505_VPLLVANAREGU, + AB8505_EXTSUPPLYREGU, + AB8505_VAUX12REGU, + AB8505_VRF1VAUX3REGU, + AB8505_VSMPSASEL1, + AB8505_VSMPSASEL2, + AB8505_VSMPSASEL3, + AB8505_VSMPSBSEL1, + AB8505_VSMPSBSEL2, + AB8505_VSMPSBSEL3, + AB8505_VSAFESEL1, /* NOTE! PRCMU register */ + AB8505_VSAFESEL2, /* NOTE! PRCMU register */ + AB8505_VSAFESEL3, /* NOTE! PRCMU register */ + AB8505_VAUX1SEL, + AB8505_VAUX2SEL, + AB8505_VRF1VAUX3SEL, + AB8505_VAUX4REQCTRL, + AB8505_VAUX4REGU, + AB8505_VAUX4SEL, + AB8505_REGUCTRLDISCH, + AB8505_REGUCTRLDISCH2, + AB8505_REGUCTRLDISCH3, + AB8505_CTRLVAUX5, + AB8505_CTRLVAUX6, + AB8505_NUM_REGULATOR_REGISTERS, +}; + +/* AB8500 external regulators */ +struct ab8500_ext_regulator_cfg { + bool hwreq; /* requires hw mode or high power mode */ +}; + +enum ab8500_ext_regulator_id { + AB8500_EXT_SUPPLY1, + AB8500_EXT_SUPPLY2, + AB8500_EXT_SUPPLY3, + AB8500_NUM_EXT_REGULATORS, +}; + +/* AB8500 regulator platform data */ +struct ab8500_regulator_platform_data { + int num_reg_init; + struct ab8500_regulator_reg_init *reg_init; + int num_regulator; + struct regulator_init_data *regulator; + int num_ext_regulator; + struct regulator_init_data *ext_regulator; +}; + +#endif diff --git a/include/linux/regulator/act8865.h b/include/linux/regulator/act8865.h new file mode 100644 index 000000000..113d861a1 --- /dev/null +++ b/include/linux/regulator/act8865.h @@ -0,0 +1,90 @@ +/* + * act8865.h -- Voltage regulation for active-semi act88xx PMUs + * + * Copyright (C) 2013 Atmel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_REGULATOR_ACT8865_H +#define __LINUX_REGULATOR_ACT8865_H + +#include + +enum { + ACT8600_ID_DCDC1, + ACT8600_ID_DCDC2, + ACT8600_ID_DCDC3, + ACT8600_ID_SUDCDC4, + ACT8600_ID_LDO5, + ACT8600_ID_LDO6, + ACT8600_ID_LDO7, + ACT8600_ID_LDO8, + ACT8600_ID_LDO9, + ACT8600_ID_LDO10, +}; + +enum { + ACT8865_ID_DCDC1, + ACT8865_ID_DCDC2, + ACT8865_ID_DCDC3, + ACT8865_ID_LDO1, + ACT8865_ID_LDO2, + ACT8865_ID_LDO3, + ACT8865_ID_LDO4, + ACT8865_REG_NUM, +}; + +enum { + ACT8846_ID_REG1, + ACT8846_ID_REG2, + ACT8846_ID_REG3, + ACT8846_ID_REG4, + ACT8846_ID_REG5, + ACT8846_ID_REG6, + ACT8846_ID_REG7, + ACT8846_ID_REG8, + ACT8846_ID_REG9, + ACT8846_ID_REG10, + ACT8846_ID_REG11, + ACT8846_ID_REG12, + ACT8846_REG_NUM, +}; + +enum { + ACT8600, + ACT8865, + ACT8846, +}; + +/** + * act8865_regulator_data - regulator data + * @id: regulator id + * @name: regulator name + * @init_data: regulator init data + * @of_node: device tree node (optional) + */ +struct act8865_regulator_data { + int id; + const char *name; + struct regulator_init_data *init_data; + struct device_node *of_node; +}; + +/** + * act8865_platform_data - platform data for act8865 + * @num_regulators: number of regulators used + * @regulators: pointer to regulators used + */ +struct act8865_platform_data { + int num_regulators; + struct act8865_regulator_data *regulators; +}; +#endif diff --git a/include/linux/regulator/arizona-ldo1.h b/include/linux/regulator/arizona-ldo1.h new file mode 100644 index 000000000..fe74ab999 --- /dev/null +++ b/include/linux/regulator/arizona-ldo1.h @@ -0,0 +1,21 @@ +/* + * Platform data for Arizona LDO1 regulator + * + * Copyright 2017 Cirrus Logic + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef ARIZONA_LDO1_H +#define ARIZONA_LDO1_H + +struct regulator_init_data; + +struct arizona_ldo1_pdata { + /** Regulator configuration for LDO1 */ + const struct regulator_init_data *init_data; +}; + +#endif diff --git a/include/linux/regulator/arizona-micsupp.h b/include/linux/regulator/arizona-micsupp.h new file mode 100644 index 000000000..616842619 --- /dev/null +++ b/include/linux/regulator/arizona-micsupp.h @@ -0,0 +1,21 @@ +/* + * Platform data for Arizona micsupp regulator + * + * Copyright 2017 Cirrus Logic + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef ARIZONA_MICSUPP_H +#define ARIZONA_MICSUPP_H + +struct regulator_init_data; + +struct arizona_micsupp_pdata { + /** Regulator configuration for micsupp */ + const struct regulator_init_data *init_data; +}; + +#endif diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h new file mode 100644 index 000000000..f3f76051e --- /dev/null +++ b/include/linux/regulator/consumer.h @@ -0,0 +1,612 @@ +/* + * consumer.h -- SoC Regulator consumer support. + * + * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. + * + * Author: Liam Girdwood + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Regulator Consumer Interface. + * + * A Power Management Regulator framework for SoC based devices. + * Features:- + * o Voltage and current level control. + * o Operating mode control. + * o Regulator status. + * o sysfs entries for showing client devices and status + * + * EXPERIMENTAL FEATURES: + * Dynamic Regulator operating Mode Switching (DRMS) - allows regulators + * to use most efficient operating mode depending upon voltage and load and + * is transparent to client drivers. + * + * e.g. Devices x,y,z share regulator r. Device x and y draw 20mA each during + * IO and 1mA at idle. Device z draws 100mA when under load and 5mA when + * idling. Regulator r has > 90% efficiency in NORMAL mode at loads > 100mA + * but this drops rapidly to 60% when below 100mA. Regulator r has > 90% + * efficiency in IDLE mode at loads < 10mA. Thus regulator r will operate + * in normal mode for loads > 10mA and in IDLE mode for load <= 10mA. + * + */ + +#ifndef __LINUX_REGULATOR_CONSUMER_H_ +#define __LINUX_REGULATOR_CONSUMER_H_ + +#include + +struct device; +struct notifier_block; +struct regmap; + +/* + * Regulator operating modes. + * + * Regulators can run in a variety of different operating modes depending on + * output load. This allows further system power savings by selecting the + * best (and most efficient) regulator mode for a desired load. + * + * Most drivers will only care about NORMAL. The modes below are generic and + * will probably not match the naming convention of your regulator data sheet + * but should match the use cases in the datasheet. + * + * In order of power efficiency (least efficient at top). + * + * Mode Description + * FAST Regulator can handle fast changes in it's load. + * e.g. useful in CPU voltage & frequency scaling where + * load can quickly increase with CPU frequency increases. + * + * NORMAL Normal regulator power supply mode. Most drivers will + * use this mode. + * + * IDLE Regulator runs in a more efficient mode for light + * loads. Can be used for devices that have a low power + * requirement during periods of inactivity. This mode + * may be more noisy than NORMAL and may not be able + * to handle fast load switching. + * + * STANDBY Regulator runs in the most efficient mode for very + * light loads. Can be used by devices when they are + * in a sleep/standby state. This mode is likely to be + * the most noisy and may not be able to handle fast load + * switching. + * + * NOTE: Most regulators will only support a subset of these modes. Some + * will only just support NORMAL. + * + * These modes can be OR'ed together to make up a mask of valid register modes. + */ + +#define REGULATOR_MODE_INVALID 0x0 +#define REGULATOR_MODE_FAST 0x1 +#define REGULATOR_MODE_NORMAL 0x2 +#define REGULATOR_MODE_IDLE 0x4 +#define REGULATOR_MODE_STANDBY 0x8 + +/* + * Regulator notifier events. + * + * UNDER_VOLTAGE Regulator output is under voltage. + * OVER_CURRENT Regulator output current is too high. + * REGULATION_OUT Regulator output is out of regulation. + * FAIL Regulator output has failed. + * OVER_TEMP Regulator over temp. + * FORCE_DISABLE Regulator forcibly shut down by software. + * VOLTAGE_CHANGE Regulator voltage changed. + * Data passed is old voltage cast to (void *). + * DISABLE Regulator was disabled. + * PRE_VOLTAGE_CHANGE Regulator is about to have voltage changed. + * Data passed is "struct pre_voltage_change_data" + * ABORT_VOLTAGE_CHANGE Regulator voltage change failed for some reason. + * Data passed is old voltage cast to (void *). + * PRE_DISABLE Regulator is about to be disabled + * ABORT_DISABLE Regulator disable failed for some reason + * + * NOTE: These events can be OR'ed together when passed into handler. + */ + +#define REGULATOR_EVENT_UNDER_VOLTAGE 0x01 +#define REGULATOR_EVENT_OVER_CURRENT 0x02 +#define REGULATOR_EVENT_REGULATION_OUT 0x04 +#define REGULATOR_EVENT_FAIL 0x08 +#define REGULATOR_EVENT_OVER_TEMP 0x10 +#define REGULATOR_EVENT_FORCE_DISABLE 0x20 +#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40 +#define REGULATOR_EVENT_DISABLE 0x80 +#define REGULATOR_EVENT_PRE_VOLTAGE_CHANGE 0x100 +#define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE 0x200 +#define REGULATOR_EVENT_PRE_DISABLE 0x400 +#define REGULATOR_EVENT_ABORT_DISABLE 0x800 +#define REGULATOR_EVENT_ENABLE 0x1000 + +/* + * Regulator errors that can be queried using regulator_get_error_flags + * + * UNDER_VOLTAGE Regulator output is under voltage. + * OVER_CURRENT Regulator output current is too high. + * REGULATION_OUT Regulator output is out of regulation. + * FAIL Regulator output has failed. + * OVER_TEMP Regulator over temp. + * + * NOTE: These errors can be OR'ed together. + */ + +#define REGULATOR_ERROR_UNDER_VOLTAGE BIT(1) +#define REGULATOR_ERROR_OVER_CURRENT BIT(2) +#define REGULATOR_ERROR_REGULATION_OUT BIT(3) +#define REGULATOR_ERROR_FAIL BIT(4) +#define REGULATOR_ERROR_OVER_TEMP BIT(5) + + +/** + * struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event + * + * @old_uV: Current voltage before change. + * @min_uV: Min voltage we'll change to. + * @max_uV: Max voltage we'll change to. + */ +struct pre_voltage_change_data { + unsigned long old_uV; + unsigned long min_uV; + unsigned long max_uV; +}; + +struct regulator; + +/** + * struct regulator_bulk_data - Data used for bulk regulator operations. + * + * @supply: The name of the supply. Initialised by the user before + * using the bulk regulator APIs. + * @consumer: The regulator consumer for the supply. This will be managed + * by the bulk API. + * + * The regulator APIs provide a series of regulator_bulk_() API calls as + * a convenience to consumers which require multiple supplies. This + * structure is used to manage data for these calls. + */ +struct regulator_bulk_data { + const char *supply; + struct regulator *consumer; + + /* private: Internal use */ + int ret; +}; + +#if defined(CONFIG_REGULATOR) + +/* regulator get and put */ +struct regulator *__must_check regulator_get(struct device *dev, + const char *id); +struct regulator *__must_check devm_regulator_get(struct device *dev, + const char *id); +struct regulator *__must_check regulator_get_exclusive(struct device *dev, + const char *id); +struct regulator *__must_check devm_regulator_get_exclusive(struct device *dev, + const char *id); +struct regulator *__must_check regulator_get_optional(struct device *dev, + const char *id); +struct regulator *__must_check devm_regulator_get_optional(struct device *dev, + const char *id); +void regulator_put(struct regulator *regulator); +void devm_regulator_put(struct regulator *regulator); + +int regulator_register_supply_alias(struct device *dev, const char *id, + struct device *alias_dev, + const char *alias_id); +void regulator_unregister_supply_alias(struct device *dev, const char *id); + +int regulator_bulk_register_supply_alias(struct device *dev, + const char *const *id, + struct device *alias_dev, + const char *const *alias_id, + int num_id); +void regulator_bulk_unregister_supply_alias(struct device *dev, + const char * const *id, int num_id); + +int devm_regulator_register_supply_alias(struct device *dev, const char *id, + struct device *alias_dev, + const char *alias_id); +void devm_regulator_unregister_supply_alias(struct device *dev, + const char *id); + +int devm_regulator_bulk_register_supply_alias(struct device *dev, + const char *const *id, + struct device *alias_dev, + const char *const *alias_id, + int num_id); +void devm_regulator_bulk_unregister_supply_alias(struct device *dev, + const char *const *id, + int num_id); + +/* regulator output control and status */ +int __must_check regulator_enable(struct regulator *regulator); +int regulator_disable(struct regulator *regulator); +int regulator_force_disable(struct regulator *regulator); +int regulator_is_enabled(struct regulator *regulator); +int regulator_disable_deferred(struct regulator *regulator, int ms); + +int __must_check regulator_bulk_get(struct device *dev, int num_consumers, + struct regulator_bulk_data *consumers); +int __must_check devm_regulator_bulk_get(struct device *dev, int num_consumers, + struct regulator_bulk_data *consumers); +int __must_check regulator_bulk_enable(int num_consumers, + struct regulator_bulk_data *consumers); +int regulator_bulk_disable(int num_consumers, + struct regulator_bulk_data *consumers); +int regulator_bulk_force_disable(int num_consumers, + struct regulator_bulk_data *consumers); +void regulator_bulk_free(int num_consumers, + struct regulator_bulk_data *consumers); + +int regulator_count_voltages(struct regulator *regulator); +int regulator_list_voltage(struct regulator *regulator, unsigned selector); +int regulator_is_supported_voltage(struct regulator *regulator, + int min_uV, int max_uV); +unsigned int regulator_get_linear_step(struct regulator *regulator); +int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV); +int regulator_set_voltage_time(struct regulator *regulator, + int old_uV, int new_uV); +int regulator_get_voltage(struct regulator *regulator); +int regulator_sync_voltage(struct regulator *regulator); +int regulator_set_current_limit(struct regulator *regulator, + int min_uA, int max_uA); +int regulator_get_current_limit(struct regulator *regulator); + +int regulator_set_mode(struct regulator *regulator, unsigned int mode); +unsigned int regulator_get_mode(struct regulator *regulator); +int regulator_get_error_flags(struct regulator *regulator, + unsigned int *flags); +int regulator_set_load(struct regulator *regulator, int load_uA); + +int regulator_allow_bypass(struct regulator *regulator, bool allow); + +struct regmap *regulator_get_regmap(struct regulator *regulator); +int regulator_get_hardware_vsel_register(struct regulator *regulator, + unsigned *vsel_reg, + unsigned *vsel_mask); +int regulator_list_hardware_vsel(struct regulator *regulator, + unsigned selector); + +/* regulator notifier block */ +int regulator_register_notifier(struct regulator *regulator, + struct notifier_block *nb); +int devm_regulator_register_notifier(struct regulator *regulator, + struct notifier_block *nb); +int regulator_unregister_notifier(struct regulator *regulator, + struct notifier_block *nb); +void devm_regulator_unregister_notifier(struct regulator *regulator, + struct notifier_block *nb); + +/* driver data - core doesn't touch */ +void *regulator_get_drvdata(struct regulator *regulator); +void regulator_set_drvdata(struct regulator *regulator, void *data); + +#else + +/* + * Make sure client drivers will still build on systems with no software + * controllable voltage or current regulators. + */ +static inline struct regulator *__must_check regulator_get(struct device *dev, + const char *id) +{ + /* Nothing except the stubbed out regulator API should be + * looking at the value except to check if it is an error + * value. Drivers are free to handle NULL specifically by + * skipping all regulator API calls, but they don't have to. + * Drivers which don't, should make sure they properly handle + * corner cases of the API, such as regulator_get_voltage() + * returning 0. + */ + return NULL; +} + +static inline struct regulator *__must_check +devm_regulator_get(struct device *dev, const char *id) +{ + return NULL; +} + +static inline struct regulator *__must_check +regulator_get_exclusive(struct device *dev, const char *id) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct regulator *__must_check +regulator_get_optional(struct device *dev, const char *id) +{ + return ERR_PTR(-ENODEV); +} + + +static inline struct regulator *__must_check +devm_regulator_get_optional(struct device *dev, const char *id) +{ + return ERR_PTR(-ENODEV); +} + +static inline void regulator_put(struct regulator *regulator) +{ +} + +static inline void devm_regulator_put(struct regulator *regulator) +{ +} + +static inline int regulator_register_supply_alias(struct device *dev, + const char *id, + struct device *alias_dev, + const char *alias_id) +{ + return 0; +} + +static inline void regulator_unregister_supply_alias(struct device *dev, + const char *id) +{ +} + +static inline int regulator_bulk_register_supply_alias(struct device *dev, + const char *const *id, + struct device *alias_dev, + const char * const *alias_id, + int num_id) +{ + return 0; +} + +static inline void regulator_bulk_unregister_supply_alias(struct device *dev, + const char * const *id, + int num_id) +{ +} + +static inline int devm_regulator_register_supply_alias(struct device *dev, + const char *id, + struct device *alias_dev, + const char *alias_id) +{ + return 0; +} + +static inline void devm_regulator_unregister_supply_alias(struct device *dev, + const char *id) +{ +} + +static inline int devm_regulator_bulk_register_supply_alias(struct device *dev, + const char *const *id, + struct device *alias_dev, + const char *const *alias_id, + int num_id) +{ + return 0; +} + +static inline void devm_regulator_bulk_unregister_supply_alias( + struct device *dev, const char *const *id, int num_id) +{ +} + +static inline int regulator_enable(struct regulator *regulator) +{ + return 0; +} + +static inline int regulator_disable(struct regulator *regulator) +{ + return 0; +} + +static inline int regulator_force_disable(struct regulator *regulator) +{ + return 0; +} + +static inline int regulator_disable_deferred(struct regulator *regulator, + int ms) +{ + return 0; +} + +static inline int regulator_is_enabled(struct regulator *regulator) +{ + return 1; +} + +static inline int regulator_bulk_get(struct device *dev, + int num_consumers, + struct regulator_bulk_data *consumers) +{ + return 0; +} + +static inline int devm_regulator_bulk_get(struct device *dev, int num_consumers, + struct regulator_bulk_data *consumers) +{ + return 0; +} + +static inline int regulator_bulk_enable(int num_consumers, + struct regulator_bulk_data *consumers) +{ + return 0; +} + +static inline int regulator_bulk_disable(int num_consumers, + struct regulator_bulk_data *consumers) +{ + return 0; +} + +static inline int regulator_bulk_force_disable(int num_consumers, + struct regulator_bulk_data *consumers) +{ + return 0; +} + +static inline void regulator_bulk_free(int num_consumers, + struct regulator_bulk_data *consumers) +{ +} + +static inline int regulator_set_voltage(struct regulator *regulator, + int min_uV, int max_uV) +{ + return 0; +} + +static inline int regulator_set_voltage_time(struct regulator *regulator, + int old_uV, int new_uV) +{ + return 0; +} + +static inline int regulator_get_voltage(struct regulator *regulator) +{ + return -EINVAL; +} + +static inline int regulator_is_supported_voltage(struct regulator *regulator, + int min_uV, int max_uV) +{ + return 0; +} + +static inline int regulator_set_current_limit(struct regulator *regulator, + int min_uA, int max_uA) +{ + return 0; +} + +static inline int regulator_get_current_limit(struct regulator *regulator) +{ + return 0; +} + +static inline int regulator_set_mode(struct regulator *regulator, + unsigned int mode) +{ + return 0; +} + +static inline unsigned int regulator_get_mode(struct regulator *regulator) +{ + return REGULATOR_MODE_NORMAL; +} + +static inline int regulator_get_error_flags(struct regulator *regulator, + unsigned int *flags) +{ + return -EINVAL; +} + +static inline int regulator_set_load(struct regulator *regulator, int load_uA) +{ + return 0; +} + +static inline int regulator_allow_bypass(struct regulator *regulator, + bool allow) +{ + return 0; +} + +static inline struct regmap *regulator_get_regmap(struct regulator *regulator) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline int regulator_get_hardware_vsel_register(struct regulator *regulator, + unsigned *vsel_reg, + unsigned *vsel_mask) +{ + return -EOPNOTSUPP; +} + +static inline int regulator_list_hardware_vsel(struct regulator *regulator, + unsigned selector) +{ + return -EOPNOTSUPP; +} + +static inline int regulator_register_notifier(struct regulator *regulator, + struct notifier_block *nb) +{ + return 0; +} + +static inline int devm_regulator_register_notifier(struct regulator *regulator, + struct notifier_block *nb) +{ + return 0; +} + +static inline int regulator_unregister_notifier(struct regulator *regulator, + struct notifier_block *nb) +{ + return 0; +} + +static inline int devm_regulator_unregister_notifier(struct regulator *regulator, + struct notifier_block *nb) +{ + return 0; +} + +static inline void *regulator_get_drvdata(struct regulator *regulator) +{ + return NULL; +} + +static inline void regulator_set_drvdata(struct regulator *regulator, + void *data) +{ +} + +static inline int regulator_count_voltages(struct regulator *regulator) +{ + return 0; +} + +static inline int regulator_list_voltage(struct regulator *regulator, unsigned selector) +{ + return -EINVAL; +} + +#endif + +static inline int regulator_set_voltage_triplet(struct regulator *regulator, + int min_uV, int target_uV, + int max_uV) +{ + if (regulator_set_voltage(regulator, target_uV, max_uV) == 0) + return 0; + + return regulator_set_voltage(regulator, min_uV, max_uV); +} + +static inline int regulator_set_voltage_tol(struct regulator *regulator, + int new_uV, int tol_uV) +{ + if (regulator_set_voltage(regulator, new_uV, new_uV + tol_uV) == 0) + return 0; + else + return regulator_set_voltage(regulator, + new_uV - tol_uV, new_uV + tol_uV); +} + +static inline int regulator_is_supported_voltage_tol(struct regulator *regulator, + int target_uV, int tol_uV) +{ + return regulator_is_supported_voltage(regulator, + target_uV - tol_uV, + target_uV + tol_uV); +} + +#endif diff --git a/include/linux/regulator/da9211.h b/include/linux/regulator/da9211.h new file mode 100644 index 000000000..d1f2073e4 --- /dev/null +++ b/include/linux/regulator/da9211.h @@ -0,0 +1,48 @@ +/* + * da9211.h - Regulator device driver for DA9211/DA9212 + * /DA9213/DA9223/DA9214/DA9224/DA9215/DA9225 + * Copyright (C) 2015 Dialog Semiconductor Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_REGULATOR_DA9211_H +#define __LINUX_REGULATOR_DA9211_H + +#include + +#define DA9211_MAX_REGULATORS 2 + +struct gpio_desc; + +enum da9211_chip_id { + DA9211, + DA9212, + DA9213, + DA9223, + DA9214, + DA9224, + DA9215, + DA9225, +}; + +struct da9211_pdata { + /* + * Number of buck + * 1 : 4 phase 1 buck + * 2 : 2 phase 2 buck + */ + int num_buck; + struct gpio_desc *gpiod_ren[DA9211_MAX_REGULATORS]; + struct device_node *reg_node[DA9211_MAX_REGULATORS]; + struct regulator_init_data *init_data[DA9211_MAX_REGULATORS]; +}; +#endif diff --git a/include/linux/regulator/db8500-prcmu.h b/include/linux/regulator/db8500-prcmu.h new file mode 100644 index 000000000..612062313 --- /dev/null +++ b/include/linux/regulator/db8500-prcmu.h @@ -0,0 +1,45 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License v2 + * + * Author: Bengt Jonsson for ST-Ericsson + * + * Interface to power domain regulators on DB8500 + */ + +#ifndef __REGULATOR_H__ +#define __REGULATOR_H__ + +/* Number of DB8500 regulators and regulator enumeration */ +enum db8500_regulator_id { + DB8500_REGULATOR_VAPE, + DB8500_REGULATOR_VARM, + DB8500_REGULATOR_VMODEM, + DB8500_REGULATOR_VPLL, + DB8500_REGULATOR_VSMPS1, + DB8500_REGULATOR_VSMPS2, + DB8500_REGULATOR_VSMPS3, + DB8500_REGULATOR_VRF1, + DB8500_REGULATOR_SWITCH_SVAMMDSP, + DB8500_REGULATOR_SWITCH_SVAMMDSPRET, + DB8500_REGULATOR_SWITCH_SVAPIPE, + DB8500_REGULATOR_SWITCH_SIAMMDSP, + DB8500_REGULATOR_SWITCH_SIAMMDSPRET, + DB8500_REGULATOR_SWITCH_SIAPIPE, + DB8500_REGULATOR_SWITCH_SGA, + DB8500_REGULATOR_SWITCH_B2R2_MCDE, + DB8500_REGULATOR_SWITCH_ESRAM12, + DB8500_REGULATOR_SWITCH_ESRAM12RET, + DB8500_REGULATOR_SWITCH_ESRAM34, + DB8500_REGULATOR_SWITCH_ESRAM34RET, + DB8500_NUM_REGULATORS +}; + +/* + * Exported interface for CPUIdle only. This function is called with all + * interrupts turned off. + */ +int power_state_active_is_enabled(void); + +#endif diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h new file mode 100644 index 000000000..0fd8fbb74 --- /dev/null +++ b/include/linux/regulator/driver.h @@ -0,0 +1,530 @@ +/* + * driver.h -- SoC Regulator driver support. + * + * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. + * + * Author: Liam Girdwood + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Regulator Driver Interface. + */ + +#ifndef __LINUX_REGULATOR_DRIVER_H_ +#define __LINUX_REGULATOR_DRIVER_H_ + +#define MAX_COUPLED 4 + +#include +#include +#include + +struct gpio_desc; +struct regmap; +struct regulator_dev; +struct regulator_config; +struct regulator_init_data; +struct regulator_enable_gpio; + +enum regulator_status { + REGULATOR_STATUS_OFF, + REGULATOR_STATUS_ON, + REGULATOR_STATUS_ERROR, + /* fast/normal/idle/standby are flavors of "on" */ + REGULATOR_STATUS_FAST, + REGULATOR_STATUS_NORMAL, + REGULATOR_STATUS_IDLE, + REGULATOR_STATUS_STANDBY, + /* The regulator is enabled but not regulating */ + REGULATOR_STATUS_BYPASS, + /* in case that any other status doesn't apply */ + REGULATOR_STATUS_UNDEFINED, +}; + +/** + * struct regulator_linear_range - specify linear voltage ranges + * + * Specify a range of voltages for regulator_map_linear_range() and + * regulator_list_linear_range(). + * + * @min_uV: Lowest voltage in range + * @min_sel: Lowest selector for range + * @max_sel: Highest selector for range + * @uV_step: Step size + */ +struct regulator_linear_range { + unsigned int min_uV; + unsigned int min_sel; + unsigned int max_sel; + unsigned int uV_step; +}; + +/* Initialize struct regulator_linear_range */ +#define REGULATOR_LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV) \ +{ \ + .min_uV = _min_uV, \ + .min_sel = _min_sel, \ + .max_sel = _max_sel, \ + .uV_step = _step_uV, \ +} + +/** + * struct regulator_ops - regulator operations. + * + * @enable: Configure the regulator as enabled. + * @disable: Configure the regulator as disabled. + * @is_enabled: Return 1 if the regulator is enabled, 0 if not. + * May also return negative errno. + * + * @set_voltage: Set the voltage for the regulator within the range specified. + * The driver should select the voltage closest to min_uV. + * @set_voltage_sel: Set the voltage for the regulator using the specified + * selector. + * @map_voltage: Convert a voltage into a selector + * @get_voltage: Return the currently configured voltage for the regulator; + * return -ENOTRECOVERABLE if regulator can't be read at + * bootup and hasn't been set yet. + * @get_voltage_sel: Return the currently configured voltage selector for the + * regulator; return -ENOTRECOVERABLE if regulator can't + * be read at bootup and hasn't been set yet. + * @list_voltage: Return one of the supported voltages, in microvolts; zero + * if the selector indicates a voltage that is unusable on this system; + * or negative errno. Selectors range from zero to one less than + * regulator_desc.n_voltages. Voltages may be reported in any order. + * + * @set_current_limit: Configure a limit for a current-limited regulator. + * The driver should select the current closest to max_uA. + * @get_current_limit: Get the configured limit for a current-limited regulator. + * @set_input_current_limit: Configure an input limit. + * + * @set_over_current_protection: Support capability of automatically shutting + * down when detecting an over current event. + * + * @set_active_discharge: Set active discharge enable/disable of regulators. + * + * @set_mode: Set the configured operating mode for the regulator. + * @get_mode: Get the configured operating mode for the regulator. + * @get_error_flags: Get the current error(s) for the regulator. + * @get_status: Return actual (not as-configured) status of regulator, as a + * REGULATOR_STATUS value (or negative errno) + * @get_optimum_mode: Get the most efficient operating mode for the regulator + * when running with the specified parameters. + * @set_load: Set the load for the regulator. + * + * @set_bypass: Set the regulator in bypass mode. + * @get_bypass: Get the regulator bypass mode state. + * + * @enable_time: Time taken for the regulator voltage output voltage to + * stabilise after being enabled, in microseconds. + * @set_ramp_delay: Set the ramp delay for the regulator. The driver should + * select ramp delay equal to or less than(closest) ramp_delay. + * @set_voltage_time: Time taken for the regulator voltage output voltage + * to stabilise after being set to a new value, in microseconds. + * The function receives the from and to voltage as input, it + * should return the worst case. + * @set_voltage_time_sel: Time taken for the regulator voltage output voltage + * to stabilise after being set to a new value, in microseconds. + * The function receives the from and to voltage selector as + * input, it should return the worst case. + * @set_soft_start: Enable soft start for the regulator. + * + * @set_suspend_voltage: Set the voltage for the regulator when the system + * is suspended. + * @set_suspend_enable: Mark the regulator as enabled when the system is + * suspended. + * @set_suspend_disable: Mark the regulator as disabled when the system is + * suspended. + * @set_suspend_mode: Set the operating mode for the regulator when the + * system is suspended. + * + * @set_pull_down: Configure the regulator to pull down when the regulator + * is disabled. + * + * This struct describes regulator operations which can be implemented by + * regulator chip drivers. + */ +struct regulator_ops { + + /* enumerate supported voltages */ + int (*list_voltage) (struct regulator_dev *, unsigned selector); + + /* get/set regulator voltage */ + int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV, + unsigned *selector); + int (*map_voltage)(struct regulator_dev *, int min_uV, int max_uV); + int (*set_voltage_sel) (struct regulator_dev *, unsigned selector); + int (*get_voltage) (struct regulator_dev *); + int (*get_voltage_sel) (struct regulator_dev *); + + /* get/set regulator current */ + int (*set_current_limit) (struct regulator_dev *, + int min_uA, int max_uA); + int (*get_current_limit) (struct regulator_dev *); + + int (*set_input_current_limit) (struct regulator_dev *, int lim_uA); + int (*set_over_current_protection) (struct regulator_dev *); + int (*set_active_discharge) (struct regulator_dev *, bool enable); + + /* enable/disable regulator */ + int (*enable) (struct regulator_dev *); + int (*disable) (struct regulator_dev *); + int (*is_enabled) (struct regulator_dev *); + + /* get/set regulator operating mode (defined in consumer.h) */ + int (*set_mode) (struct regulator_dev *, unsigned int mode); + unsigned int (*get_mode) (struct regulator_dev *); + + /* retrieve current error flags on the regulator */ + int (*get_error_flags)(struct regulator_dev *, unsigned int *flags); + + /* Time taken to enable or set voltage on the regulator */ + int (*enable_time) (struct regulator_dev *); + int (*set_ramp_delay) (struct regulator_dev *, int ramp_delay); + int (*set_voltage_time) (struct regulator_dev *, int old_uV, + int new_uV); + int (*set_voltage_time_sel) (struct regulator_dev *, + unsigned int old_selector, + unsigned int new_selector); + + int (*set_soft_start) (struct regulator_dev *); + + /* report regulator status ... most other accessors report + * control inputs, this reports results of combining inputs + * from Linux (and other sources) with the actual load. + * returns REGULATOR_STATUS_* or negative errno. + */ + int (*get_status)(struct regulator_dev *); + + /* get most efficient regulator operating mode for load */ + unsigned int (*get_optimum_mode) (struct regulator_dev *, int input_uV, + int output_uV, int load_uA); + /* set the load on the regulator */ + int (*set_load)(struct regulator_dev *, int load_uA); + + /* control and report on bypass mode */ + int (*set_bypass)(struct regulator_dev *dev, bool enable); + int (*get_bypass)(struct regulator_dev *dev, bool *enable); + + /* the operations below are for configuration of regulator state when + * its parent PMIC enters a global STANDBY/HIBERNATE state */ + + /* set regulator suspend voltage */ + int (*set_suspend_voltage) (struct regulator_dev *, int uV); + + /* enable/disable regulator in suspend state */ + int (*set_suspend_enable) (struct regulator_dev *); + int (*set_suspend_disable) (struct regulator_dev *); + + /* set regulator suspend operating mode (defined in consumer.h) */ + int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode); + + int (*resume)(struct regulator_dev *rdev); + + int (*set_pull_down) (struct regulator_dev *); +}; + +/* + * Regulators can either control voltage or current. + */ +enum regulator_type { + REGULATOR_VOLTAGE, + REGULATOR_CURRENT, +}; + +/** + * struct regulator_desc - Static regulator descriptor + * + * Each regulator registered with the core is described with a + * structure of this type and a struct regulator_config. This + * structure contains the non-varying parts of the regulator + * description. + * + * @name: Identifying name for the regulator. + * @supply_name: Identifying the regulator supply + * @of_match: Name used to identify regulator in DT. + * @regulators_node: Name of node containing regulator definitions in DT. + * @of_parse_cb: Optional callback called only if of_match is present. + * Will be called for each regulator parsed from DT, during + * init_data parsing. + * The regulator_config passed as argument to the callback will + * be a copy of config passed to regulator_register, valid only + * for this particular call. Callback may freely change the + * config but it cannot store it for later usage. + * Callback should return 0 on success or negative ERRNO + * indicating failure. + * @id: Numerical identifier for the regulator. + * @ops: Regulator operations table. + * @irq: Interrupt number for the regulator. + * @type: Indicates if the regulator is a voltage or current regulator. + * @owner: Module providing the regulator, used for refcounting. + * + * @continuous_voltage_range: Indicates if the regulator can set any + * voltage within constrains range. + * @n_voltages: Number of selectors available for ops.list_voltage(). + * + * @min_uV: Voltage given by the lowest selector (if linear mapping) + * @uV_step: Voltage increase with each selector (if linear mapping) + * @linear_min_sel: Minimal selector for starting linear mapping + * @fixed_uV: Fixed voltage of rails. + * @ramp_delay: Time to settle down after voltage change (unit: uV/us) + * @min_dropout_uV: The minimum dropout voltage this regulator can handle + * @linear_ranges: A constant table of possible voltage ranges. + * @n_linear_ranges: Number of entries in the @linear_ranges table. + * @volt_table: Voltage mapping table (if table based mapping) + * + * @vsel_reg: Register for selector when using regulator_regmap_X_voltage_ + * @vsel_mask: Mask for register bitfield used for selector + * @csel_reg: Register for TPS65218 LS3 current regulator + * @csel_mask: Mask for TPS65218 LS3 current regulator + * @apply_reg: Register for initiate voltage change on the output when + * using regulator_set_voltage_sel_regmap + * @apply_bit: Register bitfield used for initiate voltage change on the + * output when using regulator_set_voltage_sel_regmap + * @enable_reg: Register for control when using regmap enable/disable ops + * @enable_mask: Mask for control when using regmap enable/disable ops + * @enable_val: Enabling value for control when using regmap enable/disable ops + * @disable_val: Disabling value for control when using regmap enable/disable ops + * @enable_is_inverted: A flag to indicate set enable_mask bits to disable + * when using regulator_enable_regmap and friends APIs. + * @bypass_reg: Register for control when using regmap set_bypass + * @bypass_mask: Mask for control when using regmap set_bypass + * @bypass_val_on: Enabling value for control when using regmap set_bypass + * @bypass_val_off: Disabling value for control when using regmap set_bypass + * @active_discharge_off: Enabling value for control when using regmap + * set_active_discharge + * @active_discharge_on: Disabling value for control when using regmap + * set_active_discharge + * @active_discharge_mask: Mask for control when using regmap + * set_active_discharge + * @active_discharge_reg: Register for control when using regmap + * set_active_discharge + * @soft_start_reg: Register for control when using regmap set_soft_start + * @soft_start_mask: Mask for control when using regmap set_soft_start + * @soft_start_val_on: Enabling value for control when using regmap + * set_soft_start + * @pull_down_reg: Register for control when using regmap set_pull_down + * @pull_down_mask: Mask for control when using regmap set_pull_down + * @pull_down_val_on: Enabling value for control when using regmap + * set_pull_down + * + * @enable_time: Time taken for initial enable of regulator (in uS). + * @off_on_delay: guard time (in uS), before re-enabling a regulator + * + * @of_map_mode: Maps a hardware mode defined in a DeviceTree to a standard mode + */ +struct regulator_desc { + const char *name; + const char *supply_name; + const char *of_match; + const char *regulators_node; + int (*of_parse_cb)(struct device_node *, + const struct regulator_desc *, + struct regulator_config *); + int id; + unsigned int continuous_voltage_range:1; + unsigned n_voltages; + const struct regulator_ops *ops; + int irq; + enum regulator_type type; + struct module *owner; + + unsigned int min_uV; + unsigned int uV_step; + unsigned int linear_min_sel; + int fixed_uV; + unsigned int ramp_delay; + int min_dropout_uV; + + const struct regulator_linear_range *linear_ranges; + int n_linear_ranges; + + const unsigned int *volt_table; + + unsigned int vsel_reg; + unsigned int vsel_mask; + unsigned int csel_reg; + unsigned int csel_mask; + unsigned int apply_reg; + unsigned int apply_bit; + unsigned int enable_reg; + unsigned int enable_mask; + unsigned int enable_val; + unsigned int disable_val; + bool enable_is_inverted; + unsigned int bypass_reg; + unsigned int bypass_mask; + unsigned int bypass_val_on; + unsigned int bypass_val_off; + unsigned int active_discharge_on; + unsigned int active_discharge_off; + unsigned int active_discharge_mask; + unsigned int active_discharge_reg; + unsigned int soft_start_reg; + unsigned int soft_start_mask; + unsigned int soft_start_val_on; + unsigned int pull_down_reg; + unsigned int pull_down_mask; + unsigned int pull_down_val_on; + + unsigned int enable_time; + + unsigned int off_on_delay; + + unsigned int (*of_map_mode)(unsigned int mode); +}; + +/** + * struct regulator_config - Dynamic regulator descriptor + * + * Each regulator registered with the core is described with a + * structure of this type and a struct regulator_desc. This structure + * contains the runtime variable parts of the regulator description. + * + * @dev: struct device for the regulator + * @init_data: platform provided init data, passed through by driver + * @driver_data: private regulator data + * @of_node: OpenFirmware node to parse for device tree bindings (may be + * NULL). + * @regmap: regmap to use for core regmap helpers if dev_get_regmap() is + * insufficient. + * @ena_gpio_initialized: GPIO controlling regulator enable was properly + * initialized, meaning that >= 0 is a valid gpio + * identifier and < 0 is a non existent gpio. + * @ena_gpio: GPIO controlling regulator enable. + * @ena_gpiod: GPIO descriptor controlling regulator enable. + * @ena_gpio_invert: Sense for GPIO enable control. + * @ena_gpio_flags: Flags to use when calling gpio_request_one() + */ +struct regulator_config { + struct device *dev; + const struct regulator_init_data *init_data; + void *driver_data; + struct device_node *of_node; + struct regmap *regmap; + + bool ena_gpio_initialized; + int ena_gpio; + struct gpio_desc *ena_gpiod; + unsigned int ena_gpio_invert:1; + unsigned int ena_gpio_flags; +}; + +/* + * struct coupling_desc + * + * Describes coupling of regulators. Each regulator should have + * at least a pointer to itself in coupled_rdevs array. + * When a new coupled regulator is resolved, n_resolved is + * incremented. + */ +struct coupling_desc { + struct regulator_dev *coupled_rdevs[MAX_COUPLED]; + int n_resolved; + int n_coupled; +}; + +/* + * struct regulator_dev + * + * Voltage / Current regulator class device. One for each + * regulator. + * + * This should *not* be used directly by anything except the regulator + * core and notification injection (which should take the mutex and do + * no other direct access). + */ +struct regulator_dev { + const struct regulator_desc *desc; + int exclusive; + u32 use_count; + u32 open_count; + u32 bypass_count; + + /* lists we belong to */ + struct list_head list; /* list of all regulators */ + + /* lists we own */ + struct list_head consumer_list; /* consumers we supply */ + + struct coupling_desc coupling_desc; + + struct blocking_notifier_head notifier; + struct mutex mutex; /* consumer lock */ + struct task_struct *mutex_owner; + int ref_cnt; + struct module *owner; + struct device dev; + struct regulation_constraints *constraints; + struct regulator *supply; /* for tree */ + const char *supply_name; + struct regmap *regmap; + + struct delayed_work disable_work; + int deferred_disables; + + void *reg_data; /* regulator_dev data */ + + struct dentry *debugfs; + + struct regulator_enable_gpio *ena_pin; + unsigned int ena_gpio_state:1; + + unsigned int is_switch:1; + + /* time when this regulator was disabled last time */ + unsigned long last_off_jiffy; +}; + +struct regulator_dev * +regulator_register(const struct regulator_desc *regulator_desc, + const struct regulator_config *config); +struct regulator_dev * +devm_regulator_register(struct device *dev, + const struct regulator_desc *regulator_desc, + const struct regulator_config *config); +void regulator_unregister(struct regulator_dev *rdev); +void devm_regulator_unregister(struct device *dev, struct regulator_dev *rdev); + +int regulator_notifier_call_chain(struct regulator_dev *rdev, + unsigned long event, void *data); + +void *rdev_get_drvdata(struct regulator_dev *rdev); +struct device *rdev_get_dev(struct regulator_dev *rdev); +int rdev_get_id(struct regulator_dev *rdev); + +int regulator_mode_to_status(unsigned int); + +int regulator_list_voltage_linear(struct regulator_dev *rdev, + unsigned int selector); +int regulator_list_voltage_linear_range(struct regulator_dev *rdev, + unsigned int selector); +int regulator_list_voltage_table(struct regulator_dev *rdev, + unsigned int selector); +int regulator_map_voltage_linear(struct regulator_dev *rdev, + int min_uV, int max_uV); +int regulator_map_voltage_linear_range(struct regulator_dev *rdev, + int min_uV, int max_uV); +int regulator_map_voltage_iterate(struct regulator_dev *rdev, + int min_uV, int max_uV); +int regulator_map_voltage_ascend(struct regulator_dev *rdev, + int min_uV, int max_uV); +int regulator_get_voltage_sel_regmap(struct regulator_dev *rdev); +int regulator_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned sel); +int regulator_is_enabled_regmap(struct regulator_dev *rdev); +int regulator_enable_regmap(struct regulator_dev *rdev); +int regulator_disable_regmap(struct regulator_dev *rdev); +int regulator_set_voltage_time_sel(struct regulator_dev *rdev, + unsigned int old_selector, + unsigned int new_selector); +int regulator_set_bypass_regmap(struct regulator_dev *rdev, bool enable); +int regulator_get_bypass_regmap(struct regulator_dev *rdev, bool *enable); +int regulator_set_soft_start_regmap(struct regulator_dev *rdev); +int regulator_set_pull_down_regmap(struct regulator_dev *rdev); + +int regulator_set_active_discharge_regmap(struct regulator_dev *rdev, + bool enable); +void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data); + +#endif diff --git a/include/linux/regulator/fan53555.h b/include/linux/regulator/fan53555.h new file mode 100644 index 000000000..f13880e84 --- /dev/null +++ b/include/linux/regulator/fan53555.h @@ -0,0 +1,61 @@ +/* + * fan53555.h - Fairchild Regulator FAN53555 Driver + * + * Copyright (C) 2012 Marvell Technology Ltd. + * Yunfan Zhang + * + * This package is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __FAN53555_H__ +#define __FAN53555_H__ + +/* VSEL ID */ +enum { + FAN53555_VSEL_ID_0 = 0, + FAN53555_VSEL_ID_1, +}; + +/* Transition slew rate limiting from a low to high voltage. + * ----------------------- + * Bin |Slew Rate(mV/uS) + * ------|---------------- + * 000 | 64.00 + * ------|---------------- + * 001 | 32.00 + * ------|---------------- + * 010 | 16.00 + * ------|---------------- + * 011 | 8.00 + * ------|---------------- + * 100 | 4.00 + * ------|---------------- + * 101 | 2.00 + * ------|---------------- + * 110 | 1.00 + * ------|---------------- + * 111 | 0.50 + * ----------------------- + */ +enum { + FAN53555_SLEW_RATE_64MV = 0, + FAN53555_SLEW_RATE_32MV, + FAN53555_SLEW_RATE_16MV, + FAN53555_SLEW_RATE_8MV, + FAN53555_SLEW_RATE_4MV, + FAN53555_SLEW_RATE_2MV, + FAN53555_SLEW_RATE_1MV, + FAN53555_SLEW_RATE_0_5MV, +}; + +struct fan53555_platform_data { + struct regulator_init_data *regulator; + unsigned int slew_rate; + /* Sleep VSEL ID */ + unsigned int sleep_vsel_id; +}; + +#endif /* __FAN53555_H__ */ diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h new file mode 100644 index 000000000..48918be64 --- /dev/null +++ b/include/linux/regulator/fixed.h @@ -0,0 +1,76 @@ +/* + * fixed.h + * + * Copyright 2008 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * Copyright (c) 2009 Nokia Corporation + * Roger Quadros + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + */ + +#ifndef __REGULATOR_FIXED_H +#define __REGULATOR_FIXED_H + +struct regulator_init_data; + +/** + * struct fixed_voltage_config - fixed_voltage_config structure + * @supply_name: Name of the regulator supply + * @input_supply: Name of the input regulator supply + * @microvolts: Output voltage of regulator + * @gpio: GPIO to use for enable control + * set to -EINVAL if not used + * @startup_delay: Start-up time in microseconds + * @gpio_is_open_drain: Gpio pin is open drain or normal type. + * If it is open drain type then HIGH will be set + * through PULL-UP with setting gpio as input + * and low will be set as gpio-output with driven + * to low. For non-open-drain case, the gpio will + * will be in output and drive to low/high accordingly. + * @enable_high: Polarity of enable GPIO + * 1 = Active high, 0 = Active low + * @enabled_at_boot: Whether regulator has been enabled at + * boot or not. 1 = Yes, 0 = No + * This is used to keep the regulator at + * the default state + * @init_data: regulator_init_data + * + * This structure contains fixed voltage regulator configuration + * information that must be passed by platform code to the fixed + * voltage regulator driver. + */ +struct fixed_voltage_config { + const char *supply_name; + const char *input_supply; + int microvolts; + int gpio; + unsigned startup_delay; + unsigned gpio_is_open_drain:1; + unsigned enable_high:1; + unsigned enabled_at_boot:1; + struct regulator_init_data *init_data; +}; + +struct regulator_consumer_supply; + +#if IS_ENABLED(CONFIG_REGULATOR) +struct platform_device *regulator_register_always_on(int id, const char *name, + struct regulator_consumer_supply *supplies, int num_supplies, int uv); +#else +static inline struct platform_device *regulator_register_always_on(int id, const char *name, + struct regulator_consumer_supply *supplies, int num_supplies, int uv) +{ + return NULL; +} +#endif + +#define regulator_register_fixed(id, s, ns) regulator_register_always_on(id, \ + "fixed-dummy", s, ns, 0) + +#endif diff --git a/include/linux/regulator/gpio-regulator.h b/include/linux/regulator/gpio-regulator.h new file mode 100644 index 000000000..19fbd2674 --- /dev/null +++ b/include/linux/regulator/gpio-regulator.h @@ -0,0 +1,87 @@ +/* + * gpio-regulator.h + * + * Copyright 2011 Heiko Stuebner + * + * based on fixed.h + * + * Copyright 2008 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * Copyright (c) 2009 Nokia Corporation + * Roger Quadros + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + */ + +#ifndef __REGULATOR_GPIO_H +#define __REGULATOR_GPIO_H + +struct regulator_init_data; + +enum regulator_type; + +/** + * struct gpio_regulator_state - state description + * @value: microvolts or microamps + * @gpios: bitfield of gpio target-states for the value + * + * This structure describes a supported setting of the regulator + * and the necessary gpio-state to achieve it. + * + * The n-th bit in the bitfield describes the state of the n-th GPIO + * from the gpios-array defined in gpio_regulator_config below. + */ +struct gpio_regulator_state { + int value; + int gpios; +}; + +/** + * struct gpio_regulator_config - config structure + * @supply_name: Name of the regulator supply + * @enable_gpio: GPIO to use for enable control + * set to -EINVAL if not used + * @enable_high: Polarity of enable GPIO + * 1 = Active high, 0 = Active low + * @enabled_at_boot: Whether regulator has been enabled at + * boot or not. 1 = Yes, 0 = No + * This is used to keep the regulator at + * the default state + * @startup_delay: Start-up time in microseconds + * @gpios: Array containing the gpios needed to control + * the setting of the regulator + * @nr_gpios: Number of gpios + * @states: Array of gpio_regulator_state entries describing + * the gpio state for specific voltages + * @nr_states: Number of states available + * @regulator_type: either REGULATOR_CURRENT or REGULATOR_VOLTAGE + * @init_data: regulator_init_data + * + * This structure contains gpio-voltage regulator configuration + * information that must be passed by platform code to the + * gpio-voltage regulator driver. + */ +struct gpio_regulator_config { + const char *supply_name; + + int enable_gpio; + unsigned enable_high:1; + unsigned enabled_at_boot:1; + unsigned startup_delay; + + struct gpio *gpios; + int nr_gpios; + + struct gpio_regulator_state *states; + int nr_states; + + enum regulator_type type; + struct regulator_init_data *init_data; +}; + +#endif diff --git a/include/linux/regulator/lp3971.h b/include/linux/regulator/lp3971.h new file mode 100644 index 000000000..61401649f --- /dev/null +++ b/include/linux/regulator/lp3971.h @@ -0,0 +1,51 @@ +/* + * National Semiconductors LP3971 PMIC chip client interface + * + * Copyright (C) 2009 Samsung Electronics + * Author: Marek Szyprowski + * + * Based on wm8400.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __LINUX_REGULATOR_LP3971_H +#define __LINUX_REGULATOR_LP3971_H + +#include + +#define LP3971_LDO1 0 +#define LP3971_LDO2 1 +#define LP3971_LDO3 2 +#define LP3971_LDO4 3 +#define LP3971_LDO5 4 + +#define LP3971_DCDC1 5 +#define LP3971_DCDC2 6 +#define LP3971_DCDC3 7 + +#define LP3971_NUM_REGULATORS 8 + +struct lp3971_regulator_subdev { + int id; + struct regulator_init_data *initdata; +}; + +struct lp3971_platform_data { + int num_regulators; + struct lp3971_regulator_subdev *regulators; +}; + +#endif diff --git a/include/linux/regulator/lp3972.h b/include/linux/regulator/lp3972.h new file mode 100644 index 000000000..9bb7389b7 --- /dev/null +++ b/include/linux/regulator/lp3972.h @@ -0,0 +1,48 @@ +/* + * National Semiconductors LP3972 PMIC chip client interface + * + * Based on lp3971.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __LINUX_REGULATOR_LP3972_H +#define __LINUX_REGULATOR_LP3972_H + +#include + +#define LP3972_LDO1 0 +#define LP3972_LDO2 1 +#define LP3972_LDO3 2 +#define LP3972_LDO4 3 +#define LP3972_LDO5 4 + +#define LP3972_DCDC1 5 +#define LP3972_DCDC2 6 +#define LP3972_DCDC3 7 + +#define LP3972_NUM_REGULATORS 8 + +struct lp3972_regulator_subdev { + int id; + struct regulator_init_data *initdata; +}; + +struct lp3972_platform_data { + int num_regulators; + struct lp3972_regulator_subdev *regulators; +}; + +#endif diff --git a/include/linux/regulator/lp872x.h b/include/linux/regulator/lp872x.h new file mode 100644 index 000000000..6029279f4 --- /dev/null +++ b/include/linux/regulator/lp872x.h @@ -0,0 +1,95 @@ +/* + * Copyright 2012 Texas Instruments + * + * Author: Milo(Woogyom) Kim + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __LP872X_REGULATOR_H__ +#define __LP872X_REGULATOR_H__ + +#include +#include +#include + +#define LP872X_MAX_REGULATORS 9 + +#define LP8720_ENABLE_DELAY 200 +#define LP8725_ENABLE_DELAY 30000 + +enum lp872x_regulator_id { + LP8720_ID_BASE, + LP8720_ID_LDO1 = LP8720_ID_BASE, + LP8720_ID_LDO2, + LP8720_ID_LDO3, + LP8720_ID_LDO4, + LP8720_ID_LDO5, + LP8720_ID_BUCK, + + LP8725_ID_BASE, + LP8725_ID_LDO1 = LP8725_ID_BASE, + LP8725_ID_LDO2, + LP8725_ID_LDO3, + LP8725_ID_LDO4, + LP8725_ID_LDO5, + LP8725_ID_LILO1, + LP8725_ID_LILO2, + LP8725_ID_BUCK1, + LP8725_ID_BUCK2, + + LP872X_ID_MAX, +}; + +enum lp872x_dvs_state { + DVS_LOW = GPIOF_OUT_INIT_LOW, + DVS_HIGH = GPIOF_OUT_INIT_HIGH, +}; + +enum lp872x_dvs_sel { + SEL_V1, + SEL_V2, +}; + +/** + * lp872x_dvs + * @gpio : gpio pin number for dvs control + * @vsel : dvs selector for buck v1 or buck v2 register + * @init_state : initial dvs pin state + */ +struct lp872x_dvs { + int gpio; + enum lp872x_dvs_sel vsel; + enum lp872x_dvs_state init_state; +}; + +/** + * lp872x_regdata + * @id : regulator id + * @init_data : init data for each regulator + */ +struct lp872x_regulator_data { + enum lp872x_regulator_id id; + struct regulator_init_data *init_data; +}; + +/** + * lp872x_platform_data + * @general_config : the value of LP872X_GENERAL_CFG register + * @update_config : if LP872X_GENERAL_CFG register is updated, set true + * @regulator_data : platform regulator id and init data + * @dvs : dvs data for buck voltage control + * @enable_gpio : gpio pin number for enable control + */ +struct lp872x_platform_data { + u8 general_config; + bool update_config; + struct lp872x_regulator_data regulator_data[LP872X_MAX_REGULATORS]; + struct lp872x_dvs *dvs; + int enable_gpio; +}; + +#endif diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h new file mode 100644 index 000000000..a459a5e97 --- /dev/null +++ b/include/linux/regulator/machine.h @@ -0,0 +1,265 @@ +/* + * machine.h -- SoC Regulator support, machine/board driver API. + * + * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. + * + * Author: Liam Girdwood + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Regulator Machine/Board Interface. + */ + +#ifndef __LINUX_REGULATOR_MACHINE_H_ +#define __LINUX_REGULATOR_MACHINE_H_ + +#include +#include + +struct regulator; + +/* + * Regulator operation constraint flags. These flags are used to enable + * certain regulator operations and can be OR'ed together. + * + * VOLTAGE: Regulator output voltage can be changed by software on this + * board/machine. + * CURRENT: Regulator output current can be changed by software on this + * board/machine. + * MODE: Regulator operating mode can be changed by software on this + * board/machine. + * STATUS: Regulator can be enabled and disabled. + * DRMS: Dynamic Regulator Mode Switching is enabled for this regulator. + * BYPASS: Regulator can be put into bypass mode + */ + +#define REGULATOR_CHANGE_VOLTAGE 0x1 +#define REGULATOR_CHANGE_CURRENT 0x2 +#define REGULATOR_CHANGE_MODE 0x4 +#define REGULATOR_CHANGE_STATUS 0x8 +#define REGULATOR_CHANGE_DRMS 0x10 +#define REGULATOR_CHANGE_BYPASS 0x20 + +/* + * operations in suspend mode + * DO_NOTHING_IN_SUSPEND - the default value + * DISABLE_IN_SUSPEND - turn off regulator in suspend states + * ENABLE_IN_SUSPEND - keep regulator on in suspend states + */ +#define DO_NOTHING_IN_SUSPEND 0 +#define DISABLE_IN_SUSPEND 1 +#define ENABLE_IN_SUSPEND 2 + +/* Regulator active discharge flags */ +enum regulator_active_discharge { + REGULATOR_ACTIVE_DISCHARGE_DEFAULT, + REGULATOR_ACTIVE_DISCHARGE_DISABLE, + REGULATOR_ACTIVE_DISCHARGE_ENABLE, +}; + +/** + * struct regulator_state - regulator state during low power system states + * + * This describes a regulators state during a system wide low power + * state. One of enabled or disabled must be set for the + * configuration to be applied. + * + * @uV: Default operating voltage during suspend, it can be adjusted + * among . + * @min_uV: Minimum suspend voltage may be set. + * @max_uV: Maximum suspend voltage may be set. + * @mode: Operating mode during suspend. + * @enabled: operations during suspend. + * - DO_NOTHING_IN_SUSPEND + * - DISABLE_IN_SUSPEND + * - ENABLE_IN_SUSPEND + * @changeable: Is this state can be switched between enabled/disabled, + */ +struct regulator_state { + int uV; + int min_uV; + int max_uV; + unsigned int mode; + int enabled; + bool changeable; +}; + +/** + * struct regulation_constraints - regulator operating constraints. + * + * This struct describes regulator and board/machine specific constraints. + * + * @name: Descriptive name for the constraints, used for display purposes. + * + * @min_uV: Smallest voltage consumers may set. + * @max_uV: Largest voltage consumers may set. + * @uV_offset: Offset applied to voltages from consumer to compensate for + * voltage drops. + * + * @min_uA: Smallest current consumers may set. + * @max_uA: Largest current consumers may set. + * @ilim_uA: Maximum input current. + * @system_load: Load that isn't captured by any consumer requests. + * + * @max_spread: Max possible spread between coupled regulators + * @valid_modes_mask: Mask of modes which may be configured by consumers. + * @valid_ops_mask: Operations which may be performed by consumers. + * + * @always_on: Set if the regulator should never be disabled. + * @boot_on: Set if the regulator is enabled when the system is initially + * started. If the regulator is not enabled by the hardware or + * bootloader then it will be enabled when the constraints are + * applied. + * @apply_uV: Apply the voltage constraint when initialising. + * @ramp_disable: Disable ramp delay when initialising or when setting voltage. + * @soft_start: Enable soft start so that voltage ramps slowly. + * @pull_down: Enable pull down when regulator is disabled. + * @over_current_protection: Auto disable on over current event. + * + * @input_uV: Input voltage for regulator when supplied by another regulator. + * + * @state_disk: State for regulator when system is suspended in disk mode. + * @state_mem: State for regulator when system is suspended in mem mode. + * @state_standby: State for regulator when system is suspended in standby + * mode. + * @initial_state: Suspend state to set by default. + * @initial_mode: Mode to set at startup. + * @ramp_delay: Time to settle down after voltage change (unit: uV/us) + * @settling_time: Time to settle down after voltage change when voltage + * change is non-linear (unit: microseconds). + * @settling_time_up: Time to settle down after voltage increase when voltage + * change is non-linear (unit: microseconds). + * @settling_time_down : Time to settle down after voltage decrease when + * voltage change is non-linear (unit: microseconds). + * @active_discharge: Enable/disable active discharge. The enum + * regulator_active_discharge values are used for + * initialisation. + * @enable_time: Turn-on time of the rails (unit: microseconds) + */ +struct regulation_constraints { + + const char *name; + + /* voltage output range (inclusive) - for voltage control */ + int min_uV; + int max_uV; + + int uV_offset; + + /* current output range (inclusive) - for current control */ + int min_uA; + int max_uA; + int ilim_uA; + + int system_load; + + /* used for coupled regulators */ + int max_spread; + + /* valid regulator operating modes for this machine */ + unsigned int valid_modes_mask; + + /* valid operations for regulator on this machine */ + unsigned int valid_ops_mask; + + /* regulator input voltage - only if supply is another regulator */ + int input_uV; + + /* regulator suspend states for global PMIC STANDBY/HIBERNATE */ + struct regulator_state state_disk; + struct regulator_state state_mem; + struct regulator_state state_standby; + suspend_state_t initial_state; /* suspend state to set at init */ + + /* mode to set on startup */ + unsigned int initial_mode; + + unsigned int ramp_delay; + unsigned int settling_time; + unsigned int settling_time_up; + unsigned int settling_time_down; + unsigned int enable_time; + + unsigned int active_discharge; + + /* constraint flags */ + unsigned always_on:1; /* regulator never off when system is on */ + unsigned boot_on:1; /* bootloader/firmware enabled regulator */ + unsigned apply_uV:1; /* apply uV constraint if min == max */ + unsigned ramp_disable:1; /* disable ramp delay */ + unsigned soft_start:1; /* ramp voltage slowly */ + unsigned pull_down:1; /* pull down resistor when regulator off */ + unsigned over_current_protection:1; /* auto disable on over current */ +}; + +/** + * struct regulator_consumer_supply - supply -> device mapping + * + * This maps a supply name to a device. Use of dev_name allows support for + * buses which make struct device available late such as I2C. + * + * @dev_name: Result of dev_name() for the consumer. + * @supply: Name for the supply. + */ +struct regulator_consumer_supply { + const char *dev_name; /* dev_name() for consumer */ + const char *supply; /* consumer supply - e.g. "vcc" */ +}; + +/* Initialize struct regulator_consumer_supply */ +#define REGULATOR_SUPPLY(_name, _dev_name) \ +{ \ + .supply = _name, \ + .dev_name = _dev_name, \ +} + +/** + * struct regulator_init_data - regulator platform initialisation data. + * + * Initialisation constraints, our supply and consumers supplies. + * + * @supply_regulator: Parent regulator. Specified using the regulator name + * as it appears in the name field in sysfs, which can + * be explicitly set using the constraints field 'name'. + * + * @constraints: Constraints. These must be specified for the regulator to + * be usable. + * @num_consumer_supplies: Number of consumer device supplies. + * @consumer_supplies: Consumer device supply configuration. + * + * @regulator_init: Callback invoked when the regulator has been registered. + * @driver_data: Data passed to regulator_init. + */ +struct regulator_init_data { + const char *supply_regulator; /* or NULL for system supply */ + + struct regulation_constraints constraints; + + int num_consumer_supplies; + struct regulator_consumer_supply *consumer_supplies; + + /* optional regulator machine specific init */ + int (*regulator_init)(void *driver_data); + void *driver_data; /* core does not touch this */ +}; + +#ifdef CONFIG_REGULATOR +void regulator_has_full_constraints(void); +#else +static inline void regulator_has_full_constraints(void) +{ +} +#endif + +static inline int regulator_suspend_prepare(suspend_state_t state) +{ + return 0; +} +static inline int regulator_suspend_finish(void) +{ + return 0; +} + +#endif diff --git a/include/linux/regulator/max1586.h b/include/linux/regulator/max1586.h new file mode 100644 index 000000000..cedd0febe --- /dev/null +++ b/include/linux/regulator/max1586.h @@ -0,0 +1,63 @@ +/* + * max1586.h -- Voltage regulation for the Maxim 1586 + * + * Copyright (C) 2008 Robert Jarzmik + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef REGULATOR_MAX1586 +#define REGULATOR_MAX1586 + +#include + +#define MAX1586_V3 0 +#define MAX1586_V6 1 + +/* precalculated values for v3_gain */ +#define MAX1586_GAIN_NO_R24 1000000 /* 700000 .. 1475000 mV */ +#define MAX1586_GAIN_R24_3k32 1051098 /* 735768 .. 1550369 mV */ +#define MAX1586_GAIN_R24_5k11 1078648 /* 755053 .. 1591005 mV */ +#define MAX1586_GAIN_R24_7k5 1115432 /* 780802 .. 1645262 mV */ + +/** + * max1586_subdev_data - regulator data + * @id: regulator Id (either MAX1586_V3 or MAX1586_V6) + * @name: regulator cute name (example for V3: "vcc_core") + * @platform_data: regulator init data (constraints, supplies, ...) + */ +struct max1586_subdev_data { + int id; + const char *name; + struct regulator_init_data *platform_data; +}; + +/** + * max1586_platform_data - platform data for max1586 + * @num_subdevs: number of regulators used (may be 1 or 2) + * @subdevs: regulator used + * At most, there will be a regulator for V3 and one for V6 voltages. + * @v3_gain: gain on the V3 voltage output multiplied by 1e6. + * This can be calculated as ((1 + R24/R25 + R24/185.5kOhm) * 1e6) + * for an external resistor configuration as described in the + * data sheet (R25=100kOhm). + */ +struct max1586_platform_data { + int num_subdevs; + struct max1586_subdev_data *subdevs; + int v3_gain; +}; + +#endif diff --git a/include/linux/regulator/max8649.h b/include/linux/regulator/max8649.h new file mode 100644 index 000000000..417d14ecd --- /dev/null +++ b/include/linux/regulator/max8649.h @@ -0,0 +1,44 @@ +/* + * Interface of Maxim max8649 + * + * Copyright (C) 2009-2010 Marvell International Ltd. + * Haojian Zhuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_REGULATOR_MAX8649_H +#define __LINUX_REGULATOR_MAX8649_H + +#include + +enum { + MAX8649_EXTCLK_26MHZ = 0, + MAX8649_EXTCLK_13MHZ, + MAX8649_EXTCLK_19MHZ, /* 19.2MHz */ +}; + +enum { + MAX8649_RAMP_32MV = 0, + MAX8649_RAMP_16MV, + MAX8649_RAMP_8MV, + MAX8649_RAMP_4MV, + MAX8649_RAMP_2MV, + MAX8649_RAMP_1MV, + MAX8649_RAMP_0_5MV, + MAX8649_RAMP_0_25MV, +}; + +struct max8649_platform_data { + struct regulator_init_data *regulator; + + unsigned mode:2; /* bit[1:0] = VID1,VID0 */ + unsigned extclk_freq:2; + unsigned extclk:1; + unsigned ramp_timing:3; + unsigned ramp_down:1; +}; + +#endif /* __LINUX_REGULATOR_MAX8649_H */ diff --git a/include/linux/regulator/max8660.h b/include/linux/regulator/max8660.h new file mode 100644 index 000000000..f8a6a4844 --- /dev/null +++ b/include/linux/regulator/max8660.h @@ -0,0 +1,57 @@ +/* + * max8660.h -- Voltage regulation for the Maxim 8660/8661 + * + * Copyright (C) 2009 Wolfram Sang, Pengutronix e.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_REGULATOR_MAX8660_H +#define __LINUX_REGULATOR_MAX8660_H + +#include + +enum { + MAX8660_V3, + MAX8660_V4, + MAX8660_V5, + MAX8660_V6, + MAX8660_V7, + MAX8660_V_END, +}; + +/** + * max8660_subdev_data - regulator subdev data + * @id: regulator id + * @name: regulator name + * @platform_data: regulator init data + */ +struct max8660_subdev_data { + int id; + const char *name; + struct regulator_init_data *platform_data; +}; + +/** + * max8660_platform_data - platform data for max8660 + * @num_subdevs: number of regulators used + * @subdevs: pointer to regulators used + * @en34_is_high: if EN34 is driven high, regulators cannot be en-/disabled. + */ +struct max8660_platform_data { + int num_subdevs; + struct max8660_subdev_data *subdevs; + unsigned en34_is_high:1; +}; +#endif diff --git a/include/linux/regulator/max8952.h b/include/linux/regulator/max8952.h new file mode 100644 index 000000000..686c42c04 --- /dev/null +++ b/include/linux/regulator/max8952.h @@ -0,0 +1,134 @@ +/* + * max8952.h - Voltage regulation for the Maxim 8952 + * + * Copyright (C) 2010 Samsung Electrnoics + * MyungJoo Ham + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef REGULATOR_MAX8952 +#define REGULATOR_MAX8952 + +#include + +enum { + MAX8952_DVS_MODE0, + MAX8952_DVS_MODE1, + MAX8952_DVS_MODE2, + MAX8952_DVS_MODE3, +}; + +enum { + MAX8952_DVS_770mV = 0, + MAX8952_DVS_780mV, + MAX8952_DVS_790mV, + MAX8952_DVS_800mV, + MAX8952_DVS_810mV, + MAX8952_DVS_820mV, + MAX8952_DVS_830mV, + MAX8952_DVS_840mV, + MAX8952_DVS_850mV, + MAX8952_DVS_860mV, + MAX8952_DVS_870mV, + MAX8952_DVS_880mV, + MAX8952_DVS_890mV, + MAX8952_DVS_900mV, + MAX8952_DVS_910mV, + MAX8952_DVS_920mV, + MAX8952_DVS_930mV, + MAX8952_DVS_940mV, + MAX8952_DVS_950mV, + MAX8952_DVS_960mV, + MAX8952_DVS_970mV, + MAX8952_DVS_980mV, + MAX8952_DVS_990mV, + MAX8952_DVS_1000mV, + MAX8952_DVS_1010mV, + MAX8952_DVS_1020mV, + MAX8952_DVS_1030mV, + MAX8952_DVS_1040mV, + MAX8952_DVS_1050mV, + MAX8952_DVS_1060mV, + MAX8952_DVS_1070mV, + MAX8952_DVS_1080mV, + MAX8952_DVS_1090mV, + MAX8952_DVS_1100mV, + MAX8952_DVS_1110mV, + MAX8952_DVS_1120mV, + MAX8952_DVS_1130mV, + MAX8952_DVS_1140mV, + MAX8952_DVS_1150mV, + MAX8952_DVS_1160mV, + MAX8952_DVS_1170mV, + MAX8952_DVS_1180mV, + MAX8952_DVS_1190mV, + MAX8952_DVS_1200mV, + MAX8952_DVS_1210mV, + MAX8952_DVS_1220mV, + MAX8952_DVS_1230mV, + MAX8952_DVS_1240mV, + MAX8952_DVS_1250mV, + MAX8952_DVS_1260mV, + MAX8952_DVS_1270mV, + MAX8952_DVS_1280mV, + MAX8952_DVS_1290mV, + MAX8952_DVS_1300mV, + MAX8952_DVS_1310mV, + MAX8952_DVS_1320mV, + MAX8952_DVS_1330mV, + MAX8952_DVS_1340mV, + MAX8952_DVS_1350mV, + MAX8952_DVS_1360mV, + MAX8952_DVS_1370mV, + MAX8952_DVS_1380mV, + MAX8952_DVS_1390mV, + MAX8952_DVS_1400mV, +}; + +enum { + MAX8952_SYNC_FREQ_26MHZ, /* Default */ + MAX8952_SYNC_FREQ_13MHZ, + MAX8952_SYNC_FREQ_19_2MHZ, +}; + +enum { + MAX8952_RAMP_32mV_us = 0, /* Default */ + MAX8952_RAMP_16mV_us, + MAX8952_RAMP_8mV_us, + MAX8952_RAMP_4mV_us, + MAX8952_RAMP_2mV_us, + MAX8952_RAMP_1mV_us, + MAX8952_RAMP_0_5mV_us, + MAX8952_RAMP_0_25mV_us, +}; + +#define MAX8952_NUM_DVS_MODE 4 + +struct max8952_platform_data { + int gpio_vid0; + int gpio_vid1; + + u32 default_mode; + u32 dvs_mode[MAX8952_NUM_DVS_MODE]; /* MAX8952_DVS_MODEx_XXXXmV */ + + u32 sync_freq; + u32 ramp_speed; + + struct regulator_init_data *reg_data; +}; + + +#endif /* REGULATOR_MAX8952 */ diff --git a/include/linux/regulator/max8973-regulator.h b/include/linux/regulator/max8973-regulator.h new file mode 100644 index 000000000..2fcb99802 --- /dev/null +++ b/include/linux/regulator/max8973-regulator.h @@ -0,0 +1,81 @@ +/* + * max8973-regulator.h -- MAXIM 8973 regulator + * + * Interface for regulator driver for MAXIM 8973 DC-DC step-down + * switching regulator. + * + * Copyright (C) 2012 NVIDIA Corporation + + * Author: Laxman Dewangan + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef __LINUX_REGULATOR_MAX8973_H +#define __LINUX_REGULATOR_MAX8973_H + +/* + * Control flags for configuration of the device. + * Client need to pass this information with ORed + */ +#define MAX8973_CONTROL_REMOTE_SENSE_ENABLE 0x00000001 +#define MAX8973_CONTROL_FALLING_SLEW_RATE_ENABLE 0x00000002 +#define MAX8973_CONTROL_OUTPUT_ACTIVE_DISCH_ENABLE 0x00000004 +#define MAX8973_CONTROL_BIAS_ENABLE 0x00000008 +#define MAX8973_CONTROL_PULL_DOWN_ENABLE 0x00000010 +#define MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE 0x00000020 + +#define MAX8973_CONTROL_CLKADV_TRIP_DISABLED 0x00000000 +#define MAX8973_CONTROL_CLKADV_TRIP_75mV_PER_US 0x00010000 +#define MAX8973_CONTROL_CLKADV_TRIP_150mV_PER_US 0x00020000 +#define MAX8973_CONTROL_CLKADV_TRIP_75mV_PER_US_HIST_DIS 0x00030000 + +#define MAX8973_CONTROL_INDUCTOR_VALUE_NOMINAL 0x00000000 +#define MAX8973_CONTROL_INDUCTOR_VALUE_MINUS_30_PER 0x00100000 +#define MAX8973_CONTROL_INDUCTOR_VALUE_PLUS_30_PER 0x00200000 +#define MAX8973_CONTROL_INDUCTOR_VALUE_PLUS_60_PER 0x00300000 + +/* + * struct max8973_regulator_platform_data - max8973 regulator platform data. + * + * @reg_init_data: The regulator init data. + * @control_flags: Control flags which are ORed value of above flags to + * configure device. + * @junction_temp_warning: Junction temp in millicelcius on which warning need + * to be set. Thermal functionality is only supported on + * MAX77621. The threshold warning supported by MAX77621 + * are 120C and 140C. + * @enable_ext_control: Enable the voltage enable/disable through external + * control signal from EN input pin. If it is false then + * voltage output will be enabled/disabled through EN bit of + * device register. + * @enable_gpio: Enable GPIO. If EN pin is controlled through GPIO from host + * then GPIO number can be provided. If no GPIO controlled then + * it should be -1. + * @dvs_gpio: GPIO for dvs. It should be -1 if this is tied with fixed logic. + * @dvs_def_state: Default state of dvs. 1 if it is high else 0. + */ +struct max8973_regulator_platform_data { + struct regulator_init_data *reg_init_data; + unsigned long control_flags; + unsigned long junction_temp_warning; + bool enable_ext_control; + int enable_gpio; + int dvs_gpio; + unsigned dvs_def_state:1; +}; + +#endif /* __LINUX_REGULATOR_MAX8973_H */ diff --git a/include/linux/regulator/mt6311.h b/include/linux/regulator/mt6311.h new file mode 100644 index 000000000..847325939 --- /dev/null +++ b/include/linux/regulator/mt6311.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2015 MediaTek Inc. + * Author: Henry Chen + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_REGULATOR_MT6311_H +#define __LINUX_REGULATOR_MT6311_H + +#define MT6311_MAX_REGULATORS 2 + +enum { + MT6311_ID_VDVFS = 0, + MT6311_ID_VBIASN, +}; + +#define MT6311_E1_CID_CODE 0x10 +#define MT6311_E2_CID_CODE 0x20 +#define MT6311_E3_CID_CODE 0x30 + +#endif /* __LINUX_REGULATOR_MT6311_H */ diff --git a/include/linux/regulator/mt6323-regulator.h b/include/linux/regulator/mt6323-regulator.h new file mode 100644 index 000000000..67011cd1c --- /dev/null +++ b/include/linux/regulator/mt6323-regulator.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2016 MediaTek Inc. + * Author: Chen Zhong + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_REGULATOR_MT6323_H +#define __LINUX_REGULATOR_MT6323_H + +enum { + MT6323_ID_VPROC = 0, + MT6323_ID_VSYS, + MT6323_ID_VPA, + MT6323_ID_VTCXO, + MT6323_ID_VCN28, + MT6323_ID_VCN33_BT, + MT6323_ID_VCN33_WIFI, + MT6323_ID_VA, + MT6323_ID_VCAMA, + MT6323_ID_VIO28 = 9, + MT6323_ID_VUSB, + MT6323_ID_VMC, + MT6323_ID_VMCH, + MT6323_ID_VEMC3V3, + MT6323_ID_VGP1, + MT6323_ID_VGP2, + MT6323_ID_VGP3, + MT6323_ID_VCN18, + MT6323_ID_VSIM1, + MT6323_ID_VSIM2, + MT6323_ID_VRTC, + MT6323_ID_VCAMAF, + MT6323_ID_VIBR, + MT6323_ID_VRF18, + MT6323_ID_VM, + MT6323_ID_VIO18, + MT6323_ID_VCAMD, + MT6323_ID_VCAMIO, + MT6323_ID_RG_MAX, +}; + +#define MT6323_MAX_REGULATOR MT6323_ID_RG_MAX + +#endif /* __LINUX_REGULATOR_MT6323_H */ diff --git a/include/linux/regulator/mt6380-regulator.h b/include/linux/regulator/mt6380-regulator.h new file mode 100644 index 000000000..465182da6 --- /dev/null +++ b/include/linux/regulator/mt6380-regulator.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2017 MediaTek Inc. + * Author: Chenglin Xu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_REGULATOR_mt6380_H +#define __LINUX_REGULATOR_mt6380_H + +enum { + MT6380_ID_VCPU = 0, + MT6380_ID_VCORE, + MT6380_ID_VRF, + MT6380_ID_VMLDO, + MT6380_ID_VALDO, + MT6380_ID_VPHYLDO, + MT6380_ID_VDDRLDO, + MT6380_ID_VTLDO, + MT6380_ID_RG_MAX, +}; + +#define MT6380_MAX_REGULATOR MT6380_ID_RG_MAX + +#endif /* __LINUX_REGULATOR_mt6380_H */ diff --git a/include/linux/regulator/mt6397-regulator.h b/include/linux/regulator/mt6397-regulator.h new file mode 100644 index 000000000..30cc5963e --- /dev/null +++ b/include/linux/regulator/mt6397-regulator.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Flora Fu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_REGULATOR_MT6397_H +#define __LINUX_REGULATOR_MT6397_H + +enum { + MT6397_ID_VPCA15 = 0, + MT6397_ID_VPCA7, + MT6397_ID_VSRAMCA15, + MT6397_ID_VSRAMCA7, + MT6397_ID_VCORE, + MT6397_ID_VGPU, + MT6397_ID_VDRM, + MT6397_ID_VIO18 = 7, + MT6397_ID_VTCXO, + MT6397_ID_VA28, + MT6397_ID_VCAMA, + MT6397_ID_VIO28, + MT6397_ID_VUSB, + MT6397_ID_VMC, + MT6397_ID_VMCH, + MT6397_ID_VEMC3V3, + MT6397_ID_VGP1, + MT6397_ID_VGP2, + MT6397_ID_VGP3, + MT6397_ID_VGP4, + MT6397_ID_VGP5, + MT6397_ID_VGP6, + MT6397_ID_VIBR, + MT6397_ID_RG_MAX, +}; + +#define MT6397_MAX_REGULATOR MT6397_ID_RG_MAX +#define MT6397_REGULATOR_ID97 0x97 +#define MT6397_REGULATOR_ID91 0x91 + +#endif /* __LINUX_REGULATOR_MT6397_H */ diff --git a/include/linux/regulator/of_regulator.h b/include/linux/regulator/of_regulator.h new file mode 100644 index 000000000..df7f154a2 --- /dev/null +++ b/include/linux/regulator/of_regulator.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * OpenFirmware regulator support routines + * + */ + +#ifndef __LINUX_OF_REG_H +#define __LINUX_OF_REG_H + +struct regulator_desc; + +struct of_regulator_match { + const char *name; + void *driver_data; + struct regulator_init_data *init_data; + struct device_node *of_node; + const struct regulator_desc *desc; +}; + +#if defined(CONFIG_OF) +extern struct regulator_init_data + *of_get_regulator_init_data(struct device *dev, + struct device_node *node, + const struct regulator_desc *desc); +extern int of_regulator_match(struct device *dev, struct device_node *node, + struct of_regulator_match *matches, + unsigned int num_matches); +#else +static inline struct regulator_init_data + *of_get_regulator_init_data(struct device *dev, + struct device_node *node, + const struct regulator_desc *desc) +{ + return NULL; +} + +static inline int of_regulator_match(struct device *dev, + struct device_node *node, + struct of_regulator_match *matches, + unsigned int num_matches) +{ + return 0; +} +#endif /* CONFIG_OF */ + +#endif /* __LINUX_OF_REG_H */ diff --git a/include/linux/regulator/pfuze100.h b/include/linux/regulator/pfuze100.h new file mode 100644 index 000000000..cb5aecd40 --- /dev/null +++ b/include/linux/regulator/pfuze100.h @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ +#ifndef __LINUX_REG_PFUZE100_H +#define __LINUX_REG_PFUZE100_H + +#define PFUZE100_SW1AB 0 +#define PFUZE100_SW1C 1 +#define PFUZE100_SW2 2 +#define PFUZE100_SW3A 3 +#define PFUZE100_SW3B 4 +#define PFUZE100_SW4 5 +#define PFUZE100_SWBST 6 +#define PFUZE100_VSNVS 7 +#define PFUZE100_VREFDDR 8 +#define PFUZE100_VGEN1 9 +#define PFUZE100_VGEN2 10 +#define PFUZE100_VGEN3 11 +#define PFUZE100_VGEN4 12 +#define PFUZE100_VGEN5 13 +#define PFUZE100_VGEN6 14 +#define PFUZE100_MAX_REGULATOR 15 + +#define PFUZE200_SW1AB 0 +#define PFUZE200_SW2 1 +#define PFUZE200_SW3A 2 +#define PFUZE200_SW3B 3 +#define PFUZE200_SWBST 4 +#define PFUZE200_VSNVS 5 +#define PFUZE200_VREFDDR 6 +#define PFUZE200_VGEN1 7 +#define PFUZE200_VGEN2 8 +#define PFUZE200_VGEN3 9 +#define PFUZE200_VGEN4 10 +#define PFUZE200_VGEN5 11 +#define PFUZE200_VGEN6 12 +#define PFUZE200_COIN 13 + +#define PFUZE3000_SW1A 0 +#define PFUZE3000_SW1B 1 +#define PFUZE3000_SW2 2 +#define PFUZE3000_SW3 3 +#define PFUZE3000_SWBST 4 +#define PFUZE3000_VSNVS 5 +#define PFUZE3000_VREFDDR 6 +#define PFUZE3000_VLDO1 7 +#define PFUZE3000_VLDO2 8 +#define PFUZE3000_VCCSD 9 +#define PFUZE3000_V33 10 +#define PFUZE3000_VLDO3 11 +#define PFUZE3000_VLDO4 12 + +#define PFUZE3001_SW1 0 +#define PFUZE3001_SW2 1 +#define PFUZE3001_SW3 2 +#define PFUZE3001_VSNVS 3 +#define PFUZE3001_VLDO1 4 +#define PFUZE3001_VLDO2 5 +#define PFUZE3001_VCCSD 6 +#define PFUZE3001_V33 7 +#define PFUZE3001_VLDO3 8 +#define PFUZE3001_VLDO4 9 + +struct regulator_init_data; + +struct pfuze_regulator_platform_data { + struct regulator_init_data *init_data[PFUZE100_MAX_REGULATOR]; +}; + +#endif /* __LINUX_REG_PFUZE100_H */ diff --git a/include/linux/regulator/tps51632-regulator.h b/include/linux/regulator/tps51632-regulator.h new file mode 100644 index 000000000..d00841e1a --- /dev/null +++ b/include/linux/regulator/tps51632-regulator.h @@ -0,0 +1,47 @@ +/* + * tps51632-regulator.h -- TPS51632 regulator + * + * Interface for regulator driver for TPS51632 3-2-1 Phase D-Cap Step Down + * Driverless Controller with serial VID control and DVFS. + * + * Copyright (C) 2012 NVIDIA Corporation + + * Author: Laxman Dewangan + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef __LINUX_REGULATOR_TPS51632_H +#define __LINUX_REGULATOR_TPS51632_H + +/* + * struct tps51632_regulator_platform_data - tps51632 regulator platform data. + * + * @reg_init_data: The regulator init data. + * @enable_pwm_dvfs: Enable PWM DVFS or not. + * @dvfs_step_20mV: Step for DVFS is 20mV or 10mV. + * @max_voltage_uV: Maximum possible voltage in PWM-DVFS mode. + * @base_voltage_uV: Base voltage when PWM-DVFS enabled. + */ +struct tps51632_regulator_platform_data { + struct regulator_init_data *reg_init_data; + bool enable_pwm_dvfs; + bool dvfs_step_20mV; + int max_voltage_uV; + int base_voltage_uV; +}; + +#endif /* __LINUX_REGULATOR_TPS51632_H */ diff --git a/include/linux/regulator/tps62360.h b/include/linux/regulator/tps62360.h new file mode 100644 index 000000000..a4c49394c --- /dev/null +++ b/include/linux/regulator/tps62360.h @@ -0,0 +1,53 @@ +/* + * tps62360.h -- TI tps62360 + * + * Interface for regulator driver for TI TPS62360 Processor core supply + * + * Copyright (C) 2012 NVIDIA Corporation + + * Author: Laxman Dewangan + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef __LINUX_REGULATOR_TPS62360_H +#define __LINUX_REGULATOR_TPS62360_H + +/* + * struct tps62360_regulator_platform_data - tps62360 regulator platform data. + * + * @reg_init_data: The regulator init data. + * @en_discharge: Enable discharge the output capacitor via internal + * register. + * @en_internal_pulldn: internal pull down enable or not. + * @vsel0_gpio: Gpio number for vsel0. It should be -1 if this is tied with + * fixed logic. + * @vsel1_gpio: Gpio number for vsel1. It should be -1 if this is tied with + * fixed logic. + * @vsel0_def_state: Default state of vsel0. 1 if it is high else 0. + * @vsel1_def_state: Default state of vsel1. 1 if it is high else 0. + */ +struct tps62360_regulator_platform_data { + struct regulator_init_data *reg_init_data; + bool en_discharge; + bool en_internal_pulldn; + int vsel0_gpio; + int vsel1_gpio; + int vsel0_def_state; + int vsel1_def_state; +}; + +#endif /* __LINUX_REGULATOR_TPS62360_H */ diff --git a/include/linux/regulator/tps6507x.h b/include/linux/regulator/tps6507x.h new file mode 100644 index 000000000..4892f591b --- /dev/null +++ b/include/linux/regulator/tps6507x.h @@ -0,0 +1,32 @@ +/* + * tps6507x.h -- Voltage regulation for the Texas Instruments TPS6507X + * + * Copyright (C) 2010 Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef REGULATOR_TPS6507X +#define REGULATOR_TPS6507X + +/** + * tps6507x_reg_platform_data - platform data for tps6507x + * @defdcdc_default: Defines whether DCDC high or the low register controls + * output voltage by default. Valid for DCDC2 and DCDC3 outputs only. + */ +struct tps6507x_reg_platform_data { + bool defdcdc_default; +}; + +#endif diff --git a/include/linux/regulator/userspace-consumer.h b/include/linux/regulator/userspace-consumer.h new file mode 100644 index 000000000..b5dba0628 --- /dev/null +++ b/include/linux/regulator/userspace-consumer.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __REGULATOR_PLATFORM_CONSUMER_H_ +#define __REGULATOR_PLATFORM_CONSUMER_H_ + +struct regulator_consumer_supply; + +/** + * struct regulator_userspace_consumer_data - line consumer + * initialisation data. + * + * @name: Name for the consumer line + * @num_supplies: Number of supplies feeding the line + * @supplies: Supplies configuration. + * @init_on: Set if the regulators supplying the line should be + * enabled during initialisation + */ +struct regulator_userspace_consumer_data { + const char *name; + + int num_supplies; + struct regulator_bulk_data *supplies; + + bool init_on; +}; + +#endif /* __REGULATOR_PLATFORM_CONSUMER_H_ */ diff --git a/include/linux/relay.h b/include/linux/relay.h new file mode 100644 index 000000000..c759f96e3 --- /dev/null +++ b/include/linux/relay.h @@ -0,0 +1,300 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/relay.h + * + * Copyright (C) 2002, 2003 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp + * Copyright (C) 1999, 2000, 2001, 2002 - Karim Yaghmour (karim@opersys.com) + * + * CONFIG_RELAY definitions and declarations + */ + +#ifndef _LINUX_RELAY_H +#define _LINUX_RELAY_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Tracks changes to rchan/rchan_buf structs + */ +#define RELAYFS_CHANNEL_VERSION 7 + +/* + * Per-cpu relay channel buffer + */ +struct rchan_buf +{ + void *start; /* start of channel buffer */ + void *data; /* start of current sub-buffer */ + size_t offset; /* current offset into sub-buffer */ + size_t subbufs_produced; /* count of sub-buffers produced */ + size_t subbufs_consumed; /* count of sub-buffers consumed */ + struct rchan *chan; /* associated channel */ + wait_queue_head_t read_wait; /* reader wait queue */ + struct irq_work wakeup_work; /* reader wakeup */ + struct dentry *dentry; /* channel file dentry */ + struct kref kref; /* channel buffer refcount */ + struct page **page_array; /* array of current buffer pages */ + unsigned int page_count; /* number of current buffer pages */ + unsigned int finalized; /* buffer has been finalized */ + size_t *padding; /* padding counts per sub-buffer */ + size_t prev_padding; /* temporary variable */ + size_t bytes_consumed; /* bytes consumed in cur read subbuf */ + size_t early_bytes; /* bytes consumed before VFS inited */ + unsigned int cpu; /* this buf's cpu */ +} ____cacheline_aligned; + +/* + * Relay channel data structure + */ +struct rchan +{ + u32 version; /* the version of this struct */ + size_t subbuf_size; /* sub-buffer size */ + size_t n_subbufs; /* number of sub-buffers per buffer */ + size_t alloc_size; /* total buffer size allocated */ + struct rchan_callbacks *cb; /* client callbacks */ + struct kref kref; /* channel refcount */ + void *private_data; /* for user-defined data */ + size_t last_toobig; /* tried to log event > subbuf size */ + struct rchan_buf * __percpu *buf; /* per-cpu channel buffers */ + int is_global; /* One global buffer ? */ + struct list_head list; /* for channel list */ + struct dentry *parent; /* parent dentry passed to open */ + int has_base_filename; /* has a filename associated? */ + char base_filename[NAME_MAX]; /* saved base filename */ +}; + +/* + * Relay channel client callbacks + */ +struct rchan_callbacks +{ + /* + * subbuf_start - called on buffer-switch to a new sub-buffer + * @buf: the channel buffer containing the new sub-buffer + * @subbuf: the start of the new sub-buffer + * @prev_subbuf: the start of the previous sub-buffer + * @prev_padding: unused space at the end of previous sub-buffer + * + * The client should return 1 to continue logging, 0 to stop + * logging. + * + * NOTE: subbuf_start will also be invoked when the buffer is + * created, so that the first sub-buffer can be initialized + * if necessary. In this case, prev_subbuf will be NULL. + * + * NOTE: the client can reserve bytes at the beginning of the new + * sub-buffer by calling subbuf_start_reserve() in this callback. + */ + int (*subbuf_start) (struct rchan_buf *buf, + void *subbuf, + void *prev_subbuf, + size_t prev_padding); + + /* + * buf_mapped - relay buffer mmap notification + * @buf: the channel buffer + * @filp: relay file pointer + * + * Called when a relay file is successfully mmapped + */ + void (*buf_mapped)(struct rchan_buf *buf, + struct file *filp); + + /* + * buf_unmapped - relay buffer unmap notification + * @buf: the channel buffer + * @filp: relay file pointer + * + * Called when a relay file is successfully unmapped + */ + void (*buf_unmapped)(struct rchan_buf *buf, + struct file *filp); + /* + * create_buf_file - create file to represent a relay channel buffer + * @filename: the name of the file to create + * @parent: the parent of the file to create + * @mode: the mode of the file to create + * @buf: the channel buffer + * @is_global: outparam - set non-zero if the buffer should be global + * + * Called during relay_open(), once for each per-cpu buffer, + * to allow the client to create a file to be used to + * represent the corresponding channel buffer. If the file is + * created outside of relay, the parent must also exist in + * that filesystem. + * + * The callback should return the dentry of the file created + * to represent the relay buffer. + * + * Setting the is_global outparam to a non-zero value will + * cause relay_open() to create a single global buffer rather + * than the default set of per-cpu buffers. + * + * See Documentation/filesystems/relay.txt for more info. + */ + struct dentry *(*create_buf_file)(const char *filename, + struct dentry *parent, + umode_t mode, + struct rchan_buf *buf, + int *is_global); + + /* + * remove_buf_file - remove file representing a relay channel buffer + * @dentry: the dentry of the file to remove + * + * Called during relay_close(), once for each per-cpu buffer, + * to allow the client to remove a file used to represent a + * channel buffer. + * + * The callback should return 0 if successful, negative if not. + */ + int (*remove_buf_file)(struct dentry *dentry); +}; + +/* + * CONFIG_RELAY kernel API, kernel/relay.c + */ + +struct rchan *relay_open(const char *base_filename, + struct dentry *parent, + size_t subbuf_size, + size_t n_subbufs, + struct rchan_callbacks *cb, + void *private_data); +extern int relay_late_setup_files(struct rchan *chan, + const char *base_filename, + struct dentry *parent); +extern void relay_close(struct rchan *chan); +extern void relay_flush(struct rchan *chan); +extern void relay_subbufs_consumed(struct rchan *chan, + unsigned int cpu, + size_t consumed); +extern void relay_reset(struct rchan *chan); +extern int relay_buf_full(struct rchan_buf *buf); + +extern size_t relay_switch_subbuf(struct rchan_buf *buf, + size_t length); + +/** + * relay_write - write data into the channel + * @chan: relay channel + * @data: data to be written + * @length: number of bytes to write + * + * Writes data into the current cpu's channel buffer. + * + * Protects the buffer by disabling interrupts. Use this + * if you might be logging from interrupt context. Try + * __relay_write() if you know you won't be logging from + * interrupt context. + */ +static inline void relay_write(struct rchan *chan, + const void *data, + size_t length) +{ + unsigned long flags; + struct rchan_buf *buf; + + local_irq_save(flags); + buf = *this_cpu_ptr(chan->buf); + if (unlikely(buf->offset + length > chan->subbuf_size)) + length = relay_switch_subbuf(buf, length); + memcpy(buf->data + buf->offset, data, length); + buf->offset += length; + local_irq_restore(flags); +} + +/** + * __relay_write - write data into the channel + * @chan: relay channel + * @data: data to be written + * @length: number of bytes to write + * + * Writes data into the current cpu's channel buffer. + * + * Protects the buffer by disabling preemption. Use + * relay_write() if you might be logging from interrupt + * context. + */ +static inline void __relay_write(struct rchan *chan, + const void *data, + size_t length) +{ + struct rchan_buf *buf; + + buf = *get_cpu_ptr(chan->buf); + if (unlikely(buf->offset + length > buf->chan->subbuf_size)) + length = relay_switch_subbuf(buf, length); + memcpy(buf->data + buf->offset, data, length); + buf->offset += length; + put_cpu_ptr(chan->buf); +} + +/** + * relay_reserve - reserve slot in channel buffer + * @chan: relay channel + * @length: number of bytes to reserve + * + * Returns pointer to reserved slot, NULL if full. + * + * Reserves a slot in the current cpu's channel buffer. + * Does not protect the buffer at all - caller must provide + * appropriate synchronization. + */ +static inline void *relay_reserve(struct rchan *chan, size_t length) +{ + void *reserved = NULL; + struct rchan_buf *buf = *get_cpu_ptr(chan->buf); + + if (unlikely(buf->offset + length > buf->chan->subbuf_size)) { + length = relay_switch_subbuf(buf, length); + if (!length) + goto end; + } + reserved = buf->data + buf->offset; + buf->offset += length; + +end: + put_cpu_ptr(chan->buf); + return reserved; +} + +/** + * subbuf_start_reserve - reserve bytes at the start of a sub-buffer + * @buf: relay channel buffer + * @length: number of bytes to reserve + * + * Helper function used to reserve bytes at the beginning of + * a sub-buffer in the subbuf_start() callback. + */ +static inline void subbuf_start_reserve(struct rchan_buf *buf, + size_t length) +{ + BUG_ON(length >= buf->chan->subbuf_size - 1); + buf->offset = length; +} + +/* + * exported relay file operations, kernel/relay.c + */ +extern const struct file_operations relay_file_operations; + +#ifdef CONFIG_RELAY +int relay_prepare_cpu(unsigned int cpu); +#else +#define relay_prepare_cpu NULL +#endif + +#endif /* _LINUX_RELAY_H */ + diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h new file mode 100644 index 000000000..e3c5d856b --- /dev/null +++ b/include/linux/remoteproc.h @@ -0,0 +1,577 @@ +/* + * Remote Processor Framework + * + * Copyright(c) 2011 Texas Instruments, Inc. + * Copyright(c) 2011 Google, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Texas Instruments nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef REMOTEPROC_H +#define REMOTEPROC_H + +#include +#include +#include +#include +#include +#include + +/** + * struct resource_table - firmware resource table header + * @ver: version number + * @num: number of resource entries + * @reserved: reserved (must be zero) + * @offset: array of offsets pointing at the various resource entries + * + * A resource table is essentially a list of system resources required + * by the remote processor. It may also include configuration entries. + * If needed, the remote processor firmware should contain this table + * as a dedicated ".resource_table" ELF section. + * + * Some resources entries are mere announcements, where the host is informed + * of specific remoteproc configuration. Other entries require the host to + * do something (e.g. allocate a system resource). Sometimes a negotiation + * is expected, where the firmware requests a resource, and once allocated, + * the host should provide back its details (e.g. address of an allocated + * memory region). + * + * The header of the resource table, as expressed by this structure, + * contains a version number (should we need to change this format in the + * future), the number of available resource entries, and their offsets + * in the table. + * + * Immediately following this header are the resource entries themselves, + * each of which begins with a resource entry header (as described below). + */ +struct resource_table { + u32 ver; + u32 num; + u32 reserved[2]; + u32 offset[0]; +} __packed; + +/** + * struct fw_rsc_hdr - firmware resource entry header + * @type: resource type + * @data: resource data + * + * Every resource entry begins with a 'struct fw_rsc_hdr' header providing + * its @type. The content of the entry itself will immediately follow + * this header, and it should be parsed according to the resource type. + */ +struct fw_rsc_hdr { + u32 type; + u8 data[0]; +} __packed; + +/** + * enum fw_resource_type - types of resource entries + * + * @RSC_CARVEOUT: request for allocation of a physically contiguous + * memory region. + * @RSC_DEVMEM: request to iommu_map a memory-based peripheral. + * @RSC_TRACE: announces the availability of a trace buffer into which + * the remote processor will be writing logs. + * @RSC_VDEV: declare support for a virtio device, and serve as its + * virtio header. + * @RSC_LAST: just keep this one at the end + * + * For more details regarding a specific resource type, please see its + * dedicated structure below. + * + * Please note that these values are used as indices to the rproc_handle_rsc + * lookup table, so please keep them sane. Moreover, @RSC_LAST is used to + * check the validity of an index before the lookup table is accessed, so + * please update it as needed. + */ +enum fw_resource_type { + RSC_CARVEOUT = 0, + RSC_DEVMEM = 1, + RSC_TRACE = 2, + RSC_VDEV = 3, + RSC_LAST = 4, +}; + +#define FW_RSC_ADDR_ANY (-1) + +/** + * struct fw_rsc_carveout - physically contiguous memory request + * @da: device address + * @pa: physical address + * @len: length (in bytes) + * @flags: iommu protection flags + * @reserved: reserved (must be zero) + * @name: human-readable name of the requested memory region + * + * This resource entry requests the host to allocate a physically contiguous + * memory region. + * + * These request entries should precede other firmware resource entries, + * as other entries might request placing other data objects inside + * these memory regions (e.g. data/code segments, trace resource entries, ...). + * + * Allocating memory this way helps utilizing the reserved physical memory + * (e.g. CMA) more efficiently, and also minimizes the number of TLB entries + * needed to map it (in case @rproc is using an IOMMU). Reducing the TLB + * pressure is important; it may have a substantial impact on performance. + * + * If the firmware is compiled with static addresses, then @da should specify + * the expected device address of this memory region. If @da is set to + * FW_RSC_ADDR_ANY, then the host will dynamically allocate it, and then + * overwrite @da with the dynamically allocated address. + * + * We will always use @da to negotiate the device addresses, even if it + * isn't using an iommu. In that case, though, it will obviously contain + * physical addresses. + * + * Some remote processors needs to know the allocated physical address + * even if they do use an iommu. This is needed, e.g., if they control + * hardware accelerators which access the physical memory directly (this + * is the case with OMAP4 for instance). In that case, the host will + * overwrite @pa with the dynamically allocated physical address. + * Generally we don't want to expose physical addresses if we don't have to + * (remote processors are generally _not_ trusted), so we might want to + * change this to happen _only_ when explicitly required by the hardware. + * + * @flags is used to provide IOMMU protection flags, and @name should + * (optionally) contain a human readable name of this carveout region + * (mainly for debugging purposes). + */ +struct fw_rsc_carveout { + u32 da; + u32 pa; + u32 len; + u32 flags; + u32 reserved; + u8 name[32]; +} __packed; + +/** + * struct fw_rsc_devmem - iommu mapping request + * @da: device address + * @pa: physical address + * @len: length (in bytes) + * @flags: iommu protection flags + * @reserved: reserved (must be zero) + * @name: human-readable name of the requested region to be mapped + * + * This resource entry requests the host to iommu map a physically contiguous + * memory region. This is needed in case the remote processor requires + * access to certain memory-based peripherals; _never_ use it to access + * regular memory. + * + * This is obviously only needed if the remote processor is accessing memory + * via an iommu. + * + * @da should specify the required device address, @pa should specify + * the physical address we want to map, @len should specify the size of + * the mapping and @flags is the IOMMU protection flags. As always, @name may + * (optionally) contain a human readable name of this mapping (mainly for + * debugging purposes). + * + * Note: at this point we just "trust" those devmem entries to contain valid + * physical addresses, but this isn't safe and will be changed: eventually we + * want remoteproc implementations to provide us ranges of physical addresses + * the firmware is allowed to request, and not allow firmwares to request + * access to physical addresses that are outside those ranges. + */ +struct fw_rsc_devmem { + u32 da; + u32 pa; + u32 len; + u32 flags; + u32 reserved; + u8 name[32]; +} __packed; + +/** + * struct fw_rsc_trace - trace buffer declaration + * @da: device address + * @len: length (in bytes) + * @reserved: reserved (must be zero) + * @name: human-readable name of the trace buffer + * + * This resource entry provides the host information about a trace buffer + * into which the remote processor will write log messages. + * + * @da specifies the device address of the buffer, @len specifies + * its size, and @name may contain a human readable name of the trace buffer. + * + * After booting the remote processor, the trace buffers are exposed to the + * user via debugfs entries (called trace0, trace1, etc..). + */ +struct fw_rsc_trace { + u32 da; + u32 len; + u32 reserved; + u8 name[32]; +} __packed; + +/** + * struct fw_rsc_vdev_vring - vring descriptor entry + * @da: device address + * @align: the alignment between the consumer and producer parts of the vring + * @num: num of buffers supported by this vring (must be power of two) + * @notifyid is a unique rproc-wide notify index for this vring. This notify + * index is used when kicking a remote processor, to let it know that this + * vring is triggered. + * @pa: physical address + * + * This descriptor is not a resource entry by itself; it is part of the + * vdev resource type (see below). + * + * Note that @da should either contain the device address where + * the remote processor is expecting the vring, or indicate that + * dynamically allocation of the vring's device address is supported. + */ +struct fw_rsc_vdev_vring { + u32 da; + u32 align; + u32 num; + u32 notifyid; + u32 pa; +} __packed; + +/** + * struct fw_rsc_vdev - virtio device header + * @id: virtio device id (as in virtio_ids.h) + * @notifyid is a unique rproc-wide notify index for this vdev. This notify + * index is used when kicking a remote processor, to let it know that the + * status/features of this vdev have changes. + * @dfeatures specifies the virtio device features supported by the firmware + * @gfeatures is a place holder used by the host to write back the + * negotiated features that are supported by both sides. + * @config_len is the size of the virtio config space of this vdev. The config + * space lies in the resource table immediate after this vdev header. + * @status is a place holder where the host will indicate its virtio progress. + * @num_of_vrings indicates how many vrings are described in this vdev header + * @reserved: reserved (must be zero) + * @vring is an array of @num_of_vrings entries of 'struct fw_rsc_vdev_vring'. + * + * This resource is a virtio device header: it provides information about + * the vdev, and is then used by the host and its peer remote processors + * to negotiate and share certain virtio properties. + * + * By providing this resource entry, the firmware essentially asks remoteproc + * to statically allocate a vdev upon registration of the rproc (dynamic vdev + * allocation is not yet supported). + * + * Note: unlike virtualization systems, the term 'host' here means + * the Linux side which is running remoteproc to control the remote + * processors. We use the name 'gfeatures' to comply with virtio's terms, + * though there isn't really any virtualized guest OS here: it's the host + * which is responsible for negotiating the final features. + * Yeah, it's a bit confusing. + * + * Note: immediately following this structure is the virtio config space for + * this vdev (which is specific to the vdev; for more info, read the virtio + * spec). the size of the config space is specified by @config_len. + */ +struct fw_rsc_vdev { + u32 id; + u32 notifyid; + u32 dfeatures; + u32 gfeatures; + u32 config_len; + u8 status; + u8 num_of_vrings; + u8 reserved[2]; + struct fw_rsc_vdev_vring vring[0]; +} __packed; + +/** + * struct rproc_mem_entry - memory entry descriptor + * @va: virtual address + * @dma: dma address + * @len: length, in bytes + * @da: device address + * @priv: associated data + * @node: list node + */ +struct rproc_mem_entry { + void *va; + dma_addr_t dma; + int len; + u32 da; + void *priv; + struct list_head node; +}; + +struct rproc; +struct firmware; + +/** + * struct rproc_ops - platform-specific device handlers + * @start: power on the device and boot it + * @stop: power off the device + * @kick: kick a virtqueue (virtqueue id given as a parameter) + * @da_to_va: optional platform hook to perform address translations + * @load_rsc_table: load resource table from firmware image + * @find_loaded_rsc_table: find the loaded resouce table + * @load: load firmeware to memory, where the remote processor + * expects to find it + * @sanity_check: sanity check the fw image + * @get_boot_addr: get boot address to entry point specified in firmware + */ +struct rproc_ops { + int (*start)(struct rproc *rproc); + int (*stop)(struct rproc *rproc); + void (*kick)(struct rproc *rproc, int vqid); + void * (*da_to_va)(struct rproc *rproc, u64 da, int len); + int (*parse_fw)(struct rproc *rproc, const struct firmware *fw); + struct resource_table *(*find_loaded_rsc_table)( + struct rproc *rproc, const struct firmware *fw); + int (*load)(struct rproc *rproc, const struct firmware *fw); + int (*sanity_check)(struct rproc *rproc, const struct firmware *fw); + u32 (*get_boot_addr)(struct rproc *rproc, const struct firmware *fw); +}; + +/** + * enum rproc_state - remote processor states + * @RPROC_OFFLINE: device is powered off + * @RPROC_SUSPENDED: device is suspended; needs to be woken up to receive + * a message. + * @RPROC_RUNNING: device is up and running + * @RPROC_CRASHED: device has crashed; need to start recovery + * @RPROC_DELETED: device is deleted + * @RPROC_LAST: just keep this one at the end + * + * Please note that the values of these states are used as indices + * to rproc_state_string, a state-to-name lookup table, + * so please keep the two synchronized. @RPROC_LAST is used to check + * the validity of an index before the lookup table is accessed, so + * please update it as needed too. + */ +enum rproc_state { + RPROC_OFFLINE = 0, + RPROC_SUSPENDED = 1, + RPROC_RUNNING = 2, + RPROC_CRASHED = 3, + RPROC_DELETED = 4, + RPROC_LAST = 5, +}; + +/** + * enum rproc_crash_type - remote processor crash types + * @RPROC_MMUFAULT: iommu fault + * @RPROC_WATCHDOG: watchdog bite + * @RPROC_FATAL_ERROR fatal error + * + * Each element of the enum is used as an array index. So that, the value of + * the elements should be always something sane. + * + * Feel free to add more types when needed. + */ +enum rproc_crash_type { + RPROC_MMUFAULT, + RPROC_WATCHDOG, + RPROC_FATAL_ERROR, +}; + +/** + * struct rproc_dump_segment - segment info from ELF header + * @node: list node related to the rproc segment list + * @da: device address of the segment + * @size: size of the segment + */ +struct rproc_dump_segment { + struct list_head node; + + dma_addr_t da; + size_t size; + + loff_t offset; +}; + +/** + * struct rproc - represents a physical remote processor device + * @node: list node of this rproc object + * @domain: iommu domain + * @name: human readable name of the rproc + * @firmware: name of firmware file to be loaded + * @priv: private data which belongs to the platform-specific rproc module + * @ops: platform-specific start/stop rproc handlers + * @dev: virtual device for refcounting and common remoteproc behavior + * @power: refcount of users who need this rproc powered up + * @state: state of the device + * @lock: lock which protects concurrent manipulations of the rproc + * @dbg_dir: debugfs directory of this rproc device + * @traces: list of trace buffers + * @num_traces: number of trace buffers + * @carveouts: list of physically contiguous memory allocations + * @mappings: list of iommu mappings we initiated, needed on shutdown + * @bootaddr: address of first instruction to boot rproc with (optional) + * @rvdevs: list of remote virtio devices + * @subdevs: list of subdevices, to following the running state + * @notifyids: idr for dynamically assigning rproc-wide unique notify ids + * @index: index of this rproc device + * @crash_handler: workqueue for handling a crash + * @crash_cnt: crash counter + * @recovery_disabled: flag that state if recovery was disabled + * @max_notifyid: largest allocated notify id. + * @table_ptr: pointer to the resource table in effect + * @cached_table: copy of the resource table + * @table_sz: size of @cached_table + * @has_iommu: flag to indicate if remote processor is behind an MMU + * @dump_segments: list of segments in the firmware + */ +struct rproc { + struct list_head node; + struct iommu_domain *domain; + const char *name; + char *firmware; + void *priv; + struct rproc_ops *ops; + struct device dev; + atomic_t power; + unsigned int state; + struct mutex lock; + struct dentry *dbg_dir; + struct list_head traces; + int num_traces; + struct list_head carveouts; + struct list_head mappings; + u32 bootaddr; + struct list_head rvdevs; + struct list_head subdevs; + struct idr notifyids; + int index; + struct work_struct crash_handler; + unsigned int crash_cnt; + bool recovery_disabled; + int max_notifyid; + struct resource_table *table_ptr; + struct resource_table *cached_table; + size_t table_sz; + bool has_iommu; + bool auto_boot; + struct list_head dump_segments; +}; + +/** + * struct rproc_subdev - subdevice tied to a remoteproc + * @node: list node related to the rproc subdevs list + * @prepare: prepare function, called before the rproc is started + * @start: start function, called after the rproc has been started + * @stop: stop function, called before the rproc is stopped; the @crashed + * parameter indicates if this originates from a recovery + * @unprepare: unprepare function, called after the rproc has been stopped + */ +struct rproc_subdev { + struct list_head node; + + int (*prepare)(struct rproc_subdev *subdev); + int (*start)(struct rproc_subdev *subdev); + void (*stop)(struct rproc_subdev *subdev, bool crashed); + void (*unprepare)(struct rproc_subdev *subdev); +}; + +/* we currently support only two vrings per rvdev */ + +#define RVDEV_NUM_VRINGS 2 + +/** + * struct rproc_vring - remoteproc vring state + * @va: virtual address + * @dma: dma address + * @len: length, in bytes + * @da: device address + * @align: vring alignment + * @notifyid: rproc-specific unique vring index + * @rvdev: remote vdev + * @vq: the virtqueue of this vring + */ +struct rproc_vring { + void *va; + dma_addr_t dma; + int len; + u32 da; + u32 align; + int notifyid; + struct rproc_vdev *rvdev; + struct virtqueue *vq; +}; + +/** + * struct rproc_vdev - remoteproc state for a supported virtio device + * @refcount: reference counter for the vdev and vring allocations + * @subdev: handle for registering the vdev as a rproc subdevice + * @id: virtio device id (as in virtio_ids.h) + * @node: list node + * @rproc: the rproc handle + * @vdev: the virio device + * @vring: the vrings for this vdev + * @rsc_offset: offset of the vdev's resource entry + */ +struct rproc_vdev { + struct kref refcount; + + struct rproc_subdev subdev; + + unsigned int id; + struct list_head node; + struct rproc *rproc; + struct virtio_device vdev; + struct rproc_vring vring[RVDEV_NUM_VRINGS]; + u32 rsc_offset; +}; + +struct rproc *rproc_get_by_phandle(phandle phandle); +struct rproc *rproc_get_by_child(struct device *dev); + +struct rproc *rproc_alloc(struct device *dev, const char *name, + const struct rproc_ops *ops, + const char *firmware, int len); +void rproc_put(struct rproc *rproc); +int rproc_add(struct rproc *rproc); +int rproc_del(struct rproc *rproc); +void rproc_free(struct rproc *rproc); + +int rproc_boot(struct rproc *rproc); +void rproc_shutdown(struct rproc *rproc); +void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type); +int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size); + +static inline struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev) +{ + return container_of(vdev, struct rproc_vdev, vdev); +} + +static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev) +{ + struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); + + return rvdev->rproc; +} + +void rproc_add_subdev(struct rproc *rproc, struct rproc_subdev *subdev); + +void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev); + +#endif /* REMOTEPROC_H */ diff --git a/include/linux/remoteproc/qcom_rproc.h b/include/linux/remoteproc/qcom_rproc.h new file mode 100644 index 000000000..fa8e38681 --- /dev/null +++ b/include/linux/remoteproc/qcom_rproc.h @@ -0,0 +1,22 @@ +#ifndef __QCOM_RPROC_H__ +#define __QCOM_RPROC_H__ + +struct notifier_block; + +#if IS_ENABLED(CONFIG_QCOM_RPROC_COMMON) + +int qcom_register_ssr_notifier(struct notifier_block *nb); +void qcom_unregister_ssr_notifier(struct notifier_block *nb); + +#else + +static inline int qcom_register_ssr_notifier(struct notifier_block *nb) +{ + return 0; +} + +static inline void qcom_unregister_ssr_notifier(struct notifier_block *nb) {} + +#endif + +#endif diff --git a/include/linux/remoteproc/st_slim_rproc.h b/include/linux/remoteproc/st_slim_rproc.h new file mode 100644 index 000000000..4155556fa --- /dev/null +++ b/include/linux/remoteproc/st_slim_rproc.h @@ -0,0 +1,58 @@ +/* + * SLIM core rproc driver header + * + * Copyright (C) 2016 STMicroelectronics + * + * Author: Peter Griffin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef _ST_REMOTEPROC_SLIM_H +#define _ST_REMOTEPROC_SLIM_H + +#define ST_SLIM_MEM_MAX 2 +#define ST_SLIM_MAX_CLK 4 + +enum { + ST_SLIM_DMEM, + ST_SLIM_IMEM, +}; + +/** + * struct st_slim_mem - slim internal memory structure + * @cpu_addr: MPU virtual address of the memory region + * @bus_addr: Bus address used to access the memory region + * @size: Size of the memory region + */ +struct st_slim_mem { + void __iomem *cpu_addr; + phys_addr_t bus_addr; + size_t size; +}; + +/** + * struct st_slim_rproc - SLIM slim core + * @rproc: rproc handle + * @mem: slim memory information + * @slimcore: slim slimcore regs + * @peri: slim peripheral regs + * @clks: slim clocks + */ +struct st_slim_rproc { + struct rproc *rproc; + struct st_slim_mem mem[ST_SLIM_MEM_MAX]; + void __iomem *slimcore; + void __iomem *peri; + + /* st_slim_rproc private */ + struct clk *clks[ST_SLIM_MAX_CLK]; +}; + +struct st_slim_rproc *st_slim_rproc_alloc(struct platform_device *pdev, + char *fw_name); +void st_slim_rproc_put(struct st_slim_rproc *slim_rproc); + +#endif diff --git a/include/linux/reservation.h b/include/linux/reservation.h new file mode 100644 index 000000000..02166e815 --- /dev/null +++ b/include/linux/reservation.h @@ -0,0 +1,290 @@ +/* + * Header file for reservations for dma-buf and ttm + * + * Copyright(C) 2011 Linaro Limited. All rights reserved. + * Copyright (C) 2012-2013 Canonical Ltd + * Copyright (C) 2012 Texas Instruments + * + * Authors: + * Rob Clark + * Maarten Lankhorst + * Thomas Hellstrom + * + * Based on bo.c which bears the following copyright notice, + * but is dual licensed: + * + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _LINUX_RESERVATION_H +#define _LINUX_RESERVATION_H + +#include +#include +#include +#include +#include + +extern struct ww_class reservation_ww_class; +extern struct lock_class_key reservation_seqcount_class; +extern const char reservation_seqcount_string[]; + +/** + * struct reservation_object_list - a list of shared fences + * @rcu: for internal use + * @shared_count: table of shared fences + * @shared_max: for growing shared fence table + * @shared: shared fence table + */ +struct reservation_object_list { + struct rcu_head rcu; + u32 shared_count, shared_max; + struct dma_fence __rcu *shared[]; +}; + +/** + * struct reservation_object - a reservation object manages fences for a buffer + * @lock: update side lock + * @seq: sequence count for managing RCU read-side synchronization + * @fence_excl: the exclusive fence, if there is one currently + * @fence: list of current shared fences + * @staged: staged copy of shared fences for RCU updates + */ +struct reservation_object { + struct ww_mutex lock; + seqcount_t seq; + + struct dma_fence __rcu *fence_excl; + struct reservation_object_list __rcu *fence; + struct reservation_object_list *staged; +}; + +#define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base) +#define reservation_object_assert_held(obj) \ + lockdep_assert_held(&(obj)->lock.base) + +/** + * reservation_object_init - initialize a reservation object + * @obj: the reservation object + */ +static inline void +reservation_object_init(struct reservation_object *obj) +{ + ww_mutex_init(&obj->lock, &reservation_ww_class); + + __seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class); + RCU_INIT_POINTER(obj->fence, NULL); + RCU_INIT_POINTER(obj->fence_excl, NULL); + obj->staged = NULL; +} + +/** + * reservation_object_fini - destroys a reservation object + * @obj: the reservation object + */ +static inline void +reservation_object_fini(struct reservation_object *obj) +{ + int i; + struct reservation_object_list *fobj; + struct dma_fence *excl; + + /* + * This object should be dead and all references must have + * been released to it, so no need to be protected with rcu. + */ + excl = rcu_dereference_protected(obj->fence_excl, 1); + if (excl) + dma_fence_put(excl); + + fobj = rcu_dereference_protected(obj->fence, 1); + if (fobj) { + for (i = 0; i < fobj->shared_count; ++i) + dma_fence_put(rcu_dereference_protected(fobj->shared[i], 1)); + + kfree(fobj); + } + kfree(obj->staged); + + ww_mutex_destroy(&obj->lock); +} + +/** + * reservation_object_get_list - get the reservation object's + * shared fence list, with update-side lock held + * @obj: the reservation object + * + * Returns the shared fence list. Does NOT take references to + * the fence. The obj->lock must be held. + */ +static inline struct reservation_object_list * +reservation_object_get_list(struct reservation_object *obj) +{ + return rcu_dereference_protected(obj->fence, + reservation_object_held(obj)); +} + +/** + * reservation_object_lock - lock the reservation object + * @obj: the reservation object + * @ctx: the locking context + * + * Locks the reservation object for exclusive access and modification. Note, + * that the lock is only against other writers, readers will run concurrently + * with a writer under RCU. The seqlock is used to notify readers if they + * overlap with a writer. + * + * As the reservation object may be locked by multiple parties in an + * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle + * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation + * object may be locked by itself by passing NULL as @ctx. + */ +static inline int +reservation_object_lock(struct reservation_object *obj, + struct ww_acquire_ctx *ctx) +{ + return ww_mutex_lock(&obj->lock, ctx); +} + +/** + * reservation_object_lock_interruptible - lock the reservation object + * @obj: the reservation object + * @ctx: the locking context + * + * Locks the reservation object interruptible for exclusive access and + * modification. Note, that the lock is only against other writers, readers + * will run concurrently with a writer under RCU. The seqlock is used to + * notify readers if they overlap with a writer. + * + * As the reservation object may be locked by multiple parties in an + * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle + * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation + * object may be locked by itself by passing NULL as @ctx. + */ +static inline int +reservation_object_lock_interruptible(struct reservation_object *obj, + struct ww_acquire_ctx *ctx) +{ + return ww_mutex_lock_interruptible(&obj->lock, ctx); +} + + +/** + * reservation_object_trylock - trylock the reservation object + * @obj: the reservation object + * + * Tries to lock the reservation object for exclusive access and modification. + * Note, that the lock is only against other writers, readers will run + * concurrently with a writer under RCU. The seqlock is used to notify readers + * if they overlap with a writer. + * + * Also note that since no context is provided, no deadlock protection is + * possible. + * + * Returns true if the lock was acquired, false otherwise. + */ +static inline bool __must_check +reservation_object_trylock(struct reservation_object *obj) +{ + return ww_mutex_trylock(&obj->lock); +} + +/** + * reservation_object_unlock - unlock the reservation object + * @obj: the reservation object + * + * Unlocks the reservation object following exclusive access. + */ +static inline void +reservation_object_unlock(struct reservation_object *obj) +{ + ww_mutex_unlock(&obj->lock); +} + +/** + * reservation_object_get_excl - get the reservation object's + * exclusive fence, with update-side lock held + * @obj: the reservation object + * + * Returns the exclusive fence (if any). Does NOT take a + * reference. The obj->lock must be held. + * + * RETURNS + * The exclusive fence or NULL + */ +static inline struct dma_fence * +reservation_object_get_excl(struct reservation_object *obj) +{ + return rcu_dereference_protected(obj->fence_excl, + reservation_object_held(obj)); +} + +/** + * reservation_object_get_excl_rcu - get the reservation object's + * exclusive fence, without lock held. + * @obj: the reservation object + * + * If there is an exclusive fence, this atomically increments it's + * reference count and returns it. + * + * RETURNS + * The exclusive fence or NULL if none + */ +static inline struct dma_fence * +reservation_object_get_excl_rcu(struct reservation_object *obj) +{ + struct dma_fence *fence; + + if (!rcu_access_pointer(obj->fence_excl)) + return NULL; + + rcu_read_lock(); + fence = dma_fence_get_rcu_safe(&obj->fence_excl); + rcu_read_unlock(); + + return fence; +} + +int reservation_object_reserve_shared(struct reservation_object *obj); +void reservation_object_add_shared_fence(struct reservation_object *obj, + struct dma_fence *fence); + +void reservation_object_add_excl_fence(struct reservation_object *obj, + struct dma_fence *fence); + +int reservation_object_get_fences_rcu(struct reservation_object *obj, + struct dma_fence **pfence_excl, + unsigned *pshared_count, + struct dma_fence ***pshared); + +int reservation_object_copy_fences(struct reservation_object *dst, + struct reservation_object *src); + +long reservation_object_wait_timeout_rcu(struct reservation_object *obj, + bool wait_all, bool intr, + unsigned long timeout); + +bool reservation_object_test_signaled_rcu(struct reservation_object *obj, + bool test_all); + +#endif /* _LINUX_RESERVATION_H */ diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h new file mode 100644 index 000000000..8675ec649 --- /dev/null +++ b/include/linux/reset-controller.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_RESET_CONTROLLER_H_ +#define _LINUX_RESET_CONTROLLER_H_ + +#include + +struct reset_controller_dev; + +/** + * struct reset_control_ops - reset controller driver callbacks + * + * @reset: for self-deasserting resets, does all necessary + * things to reset the device + * @assert: manually assert the reset line, if supported + * @deassert: manually deassert the reset line, if supported + * @status: return the status of the reset line, if supported + */ +struct reset_control_ops { + int (*reset)(struct reset_controller_dev *rcdev, unsigned long id); + int (*assert)(struct reset_controller_dev *rcdev, unsigned long id); + int (*deassert)(struct reset_controller_dev *rcdev, unsigned long id); + int (*status)(struct reset_controller_dev *rcdev, unsigned long id); +}; + +struct module; +struct device_node; +struct of_phandle_args; + +/** + * struct reset_control_lookup - represents a single lookup entry + * + * @list: internal list of all reset lookup entries + * @provider: name of the reset controller device controlling this reset line + * @index: ID of the reset controller in the reset controller device + * @dev_id: name of the device associated with this reset line + * @con_id name of the reset line (can be NULL) + */ +struct reset_control_lookup { + struct list_head list; + const char *provider; + unsigned int index; + const char *dev_id; + const char *con_id; +}; + +#define RESET_LOOKUP(_provider, _index, _dev_id, _con_id) \ + { \ + .provider = _provider, \ + .index = _index, \ + .dev_id = _dev_id, \ + .con_id = _con_id, \ + } + +/** + * struct reset_controller_dev - reset controller entity that might + * provide multiple reset controls + * @ops: a pointer to device specific struct reset_control_ops + * @owner: kernel module of the reset controller driver + * @list: internal list of reset controller devices + * @reset_control_head: head of internal list of requested reset controls + * @dev: corresponding driver model device struct + * @of_node: corresponding device tree node as phandle target + * @of_reset_n_cells: number of cells in reset line specifiers + * @of_xlate: translation function to translate from specifier as found in the + * device tree to id as given to the reset control ops + * @nr_resets: number of reset controls in this reset controller device + */ +struct reset_controller_dev { + const struct reset_control_ops *ops; + struct module *owner; + struct list_head list; + struct list_head reset_control_head; + struct device *dev; + struct device_node *of_node; + int of_reset_n_cells; + int (*of_xlate)(struct reset_controller_dev *rcdev, + const struct of_phandle_args *reset_spec); + unsigned int nr_resets; +}; + +int reset_controller_register(struct reset_controller_dev *rcdev); +void reset_controller_unregister(struct reset_controller_dev *rcdev); + +struct device; +int devm_reset_controller_register(struct device *dev, + struct reset_controller_dev *rcdev); + +void reset_controller_add_lookup(struct reset_control_lookup *lookup, + unsigned int num_entries); + +#endif diff --git a/include/linux/reset.h b/include/linux/reset.h new file mode 100644 index 000000000..09732c36f --- /dev/null +++ b/include/linux/reset.h @@ -0,0 +1,428 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_RESET_H_ +#define _LINUX_RESET_H_ + +#include + +struct device; +struct device_node; +struct reset_control; + +#ifdef CONFIG_RESET_CONTROLLER + +int reset_control_reset(struct reset_control *rstc); +int reset_control_assert(struct reset_control *rstc); +int reset_control_deassert(struct reset_control *rstc); +int reset_control_status(struct reset_control *rstc); + +struct reset_control *__of_reset_control_get(struct device_node *node, + const char *id, int index, bool shared, + bool optional); +struct reset_control *__reset_control_get(struct device *dev, const char *id, + int index, bool shared, + bool optional); +void reset_control_put(struct reset_control *rstc); +int __device_reset(struct device *dev, bool optional); +struct reset_control *__devm_reset_control_get(struct device *dev, + const char *id, int index, bool shared, + bool optional); + +struct reset_control *devm_reset_control_array_get(struct device *dev, + bool shared, bool optional); +struct reset_control *of_reset_control_array_get(struct device_node *np, + bool shared, bool optional); + +#else + +static inline int reset_control_reset(struct reset_control *rstc) +{ + return 0; +} + +static inline int reset_control_assert(struct reset_control *rstc) +{ + return 0; +} + +static inline int reset_control_deassert(struct reset_control *rstc) +{ + return 0; +} + +static inline int reset_control_status(struct reset_control *rstc) +{ + return 0; +} + +static inline void reset_control_put(struct reset_control *rstc) +{ +} + +static inline int __device_reset(struct device *dev, bool optional) +{ + return optional ? 0 : -ENOTSUPP; +} + +static inline struct reset_control *__of_reset_control_get( + struct device_node *node, + const char *id, int index, bool shared, + bool optional) +{ + return optional ? NULL : ERR_PTR(-ENOTSUPP); +} + +static inline struct reset_control *__reset_control_get( + struct device *dev, const char *id, + int index, bool shared, bool optional) +{ + return optional ? NULL : ERR_PTR(-ENOTSUPP); +} + +static inline struct reset_control *__devm_reset_control_get( + struct device *dev, const char *id, + int index, bool shared, bool optional) +{ + return optional ? NULL : ERR_PTR(-ENOTSUPP); +} + +static inline struct reset_control * +devm_reset_control_array_get(struct device *dev, bool shared, bool optional) +{ + return optional ? NULL : ERR_PTR(-ENOTSUPP); +} + +static inline struct reset_control * +of_reset_control_array_get(struct device_node *np, bool shared, bool optional) +{ + return optional ? NULL : ERR_PTR(-ENOTSUPP); +} + +#endif /* CONFIG_RESET_CONTROLLER */ + +static inline int __must_check device_reset(struct device *dev) +{ + return __device_reset(dev, false); +} + +static inline int device_reset_optional(struct device *dev) +{ + return __device_reset(dev, true); +} + +/** + * reset_control_get_exclusive - Lookup and obtain an exclusive reference + * to a reset controller. + * @dev: device to be reset by the controller + * @id: reset line name + * + * Returns a struct reset_control or IS_ERR() condition containing errno. + * If this function is called more then once for the same reset_control it will + * return -EBUSY. + * + * See reset_control_get_shared for details on shared references to + * reset-controls. + * + * Use of id names is optional. + */ +static inline struct reset_control * +__must_check reset_control_get_exclusive(struct device *dev, const char *id) +{ + return __reset_control_get(dev, id, 0, false, false); +} + +/** + * reset_control_get_shared - Lookup and obtain a shared reference to a + * reset controller. + * @dev: device to be reset by the controller + * @id: reset line name + * + * Returns a struct reset_control or IS_ERR() condition containing errno. + * This function is intended for use with reset-controls which are shared + * between hardware-blocks. + * + * When a reset-control is shared, the behavior of reset_control_assert / + * deassert is changed, the reset-core will keep track of a deassert_count + * and only (re-)assert the reset after reset_control_assert has been called + * as many times as reset_control_deassert was called. Also see the remark + * about shared reset-controls in the reset_control_assert docs. + * + * Calling reset_control_assert without first calling reset_control_deassert + * is not allowed on a shared reset control. Calling reset_control_reset is + * also not allowed on a shared reset control. + * + * Use of id names is optional. + */ +static inline struct reset_control *reset_control_get_shared( + struct device *dev, const char *id) +{ + return __reset_control_get(dev, id, 0, true, false); +} + +static inline struct reset_control *reset_control_get_optional_exclusive( + struct device *dev, const char *id) +{ + return __reset_control_get(dev, id, 0, false, true); +} + +static inline struct reset_control *reset_control_get_optional_shared( + struct device *dev, const char *id) +{ + return __reset_control_get(dev, id, 0, true, true); +} + +/** + * of_reset_control_get_exclusive - Lookup and obtain an exclusive reference + * to a reset controller. + * @node: device to be reset by the controller + * @id: reset line name + * + * Returns a struct reset_control or IS_ERR() condition containing errno. + * + * Use of id names is optional. + */ +static inline struct reset_control *of_reset_control_get_exclusive( + struct device_node *node, const char *id) +{ + return __of_reset_control_get(node, id, 0, false, false); +} + +/** + * of_reset_control_get_shared - Lookup and obtain an shared reference + * to a reset controller. + * @node: device to be reset by the controller + * @id: reset line name + * + * When a reset-control is shared, the behavior of reset_control_assert / + * deassert is changed, the reset-core will keep track of a deassert_count + * and only (re-)assert the reset after reset_control_assert has been called + * as many times as reset_control_deassert was called. Also see the remark + * about shared reset-controls in the reset_control_assert docs. + * + * Calling reset_control_assert without first calling reset_control_deassert + * is not allowed on a shared reset control. Calling reset_control_reset is + * also not allowed on a shared reset control. + * Returns a struct reset_control or IS_ERR() condition containing errno. + * + * Use of id names is optional. + */ +static inline struct reset_control *of_reset_control_get_shared( + struct device_node *node, const char *id) +{ + return __of_reset_control_get(node, id, 0, true, false); +} + +/** + * of_reset_control_get_exclusive_by_index - Lookup and obtain an exclusive + * reference to a reset controller + * by index. + * @node: device to be reset by the controller + * @index: index of the reset controller + * + * This is to be used to perform a list of resets for a device or power domain + * in whatever order. Returns a struct reset_control or IS_ERR() condition + * containing errno. + */ +static inline struct reset_control *of_reset_control_get_exclusive_by_index( + struct device_node *node, int index) +{ + return __of_reset_control_get(node, NULL, index, false, false); +} + +/** + * of_reset_control_get_shared_by_index - Lookup and obtain an shared + * reference to a reset controller + * by index. + * @node: device to be reset by the controller + * @index: index of the reset controller + * + * When a reset-control is shared, the behavior of reset_control_assert / + * deassert is changed, the reset-core will keep track of a deassert_count + * and only (re-)assert the reset after reset_control_assert has been called + * as many times as reset_control_deassert was called. Also see the remark + * about shared reset-controls in the reset_control_assert docs. + * + * Calling reset_control_assert without first calling reset_control_deassert + * is not allowed on a shared reset control. Calling reset_control_reset is + * also not allowed on a shared reset control. + * Returns a struct reset_control or IS_ERR() condition containing errno. + * + * This is to be used to perform a list of resets for a device or power domain + * in whatever order. Returns a struct reset_control or IS_ERR() condition + * containing errno. + */ +static inline struct reset_control *of_reset_control_get_shared_by_index( + struct device_node *node, int index) +{ + return __of_reset_control_get(node, NULL, index, true, false); +} + +/** + * devm_reset_control_get_exclusive - resource managed + * reset_control_get_exclusive() + * @dev: device to be reset by the controller + * @id: reset line name + * + * Managed reset_control_get_exclusive(). For reset controllers returned + * from this function, reset_control_put() is called automatically on driver + * detach. + * + * See reset_control_get_exclusive() for more information. + */ +static inline struct reset_control * +__must_check devm_reset_control_get_exclusive(struct device *dev, + const char *id) +{ + return __devm_reset_control_get(dev, id, 0, false, false); +} + +/** + * devm_reset_control_get_shared - resource managed reset_control_get_shared() + * @dev: device to be reset by the controller + * @id: reset line name + * + * Managed reset_control_get_shared(). For reset controllers returned from + * this function, reset_control_put() is called automatically on driver detach. + * See reset_control_get_shared() for more information. + */ +static inline struct reset_control *devm_reset_control_get_shared( + struct device *dev, const char *id) +{ + return __devm_reset_control_get(dev, id, 0, true, false); +} + +static inline struct reset_control *devm_reset_control_get_optional_exclusive( + struct device *dev, const char *id) +{ + return __devm_reset_control_get(dev, id, 0, false, true); +} + +static inline struct reset_control *devm_reset_control_get_optional_shared( + struct device *dev, const char *id) +{ + return __devm_reset_control_get(dev, id, 0, true, true); +} + +/** + * devm_reset_control_get_exclusive_by_index - resource managed + * reset_control_get_exclusive() + * @dev: device to be reset by the controller + * @index: index of the reset controller + * + * Managed reset_control_get_exclusive(). For reset controllers returned from + * this function, reset_control_put() is called automatically on driver + * detach. + * + * See reset_control_get_exclusive() for more information. + */ +static inline struct reset_control * +devm_reset_control_get_exclusive_by_index(struct device *dev, int index) +{ + return __devm_reset_control_get(dev, NULL, index, false, false); +} + +/** + * devm_reset_control_get_shared_by_index - resource managed + * reset_control_get_shared + * @dev: device to be reset by the controller + * @index: index of the reset controller + * + * Managed reset_control_get_shared(). For reset controllers returned from + * this function, reset_control_put() is called automatically on driver detach. + * See reset_control_get_shared() for more information. + */ +static inline struct reset_control * +devm_reset_control_get_shared_by_index(struct device *dev, int index) +{ + return __devm_reset_control_get(dev, NULL, index, true, false); +} + +/* + * TEMPORARY calls to use during transition: + * + * of_reset_control_get() => of_reset_control_get_exclusive() + * + * These inline function calls will be removed once all consumers + * have been moved over to the new explicit API. + */ +static inline struct reset_control *of_reset_control_get( + struct device_node *node, const char *id) +{ + return of_reset_control_get_exclusive(node, id); +} + +static inline struct reset_control *of_reset_control_get_by_index( + struct device_node *node, int index) +{ + return of_reset_control_get_exclusive_by_index(node, index); +} + +static inline struct reset_control *devm_reset_control_get( + struct device *dev, const char *id) +{ + return devm_reset_control_get_exclusive(dev, id); +} + +static inline struct reset_control *devm_reset_control_get_optional( + struct device *dev, const char *id) +{ + return devm_reset_control_get_optional_exclusive(dev, id); + +} + +static inline struct reset_control *devm_reset_control_get_by_index( + struct device *dev, int index) +{ + return devm_reset_control_get_exclusive_by_index(dev, index); +} + +/* + * APIs to manage a list of reset controllers + */ +static inline struct reset_control * +devm_reset_control_array_get_exclusive(struct device *dev) +{ + return devm_reset_control_array_get(dev, false, false); +} + +static inline struct reset_control * +devm_reset_control_array_get_shared(struct device *dev) +{ + return devm_reset_control_array_get(dev, true, false); +} + +static inline struct reset_control * +devm_reset_control_array_get_optional_exclusive(struct device *dev) +{ + return devm_reset_control_array_get(dev, false, true); +} + +static inline struct reset_control * +devm_reset_control_array_get_optional_shared(struct device *dev) +{ + return devm_reset_control_array_get(dev, true, true); +} + +static inline struct reset_control * +of_reset_control_array_get_exclusive(struct device_node *node) +{ + return of_reset_control_array_get(node, false, false); +} + +static inline struct reset_control * +of_reset_control_array_get_shared(struct device_node *node) +{ + return of_reset_control_array_get(node, true, false); +} + +static inline struct reset_control * +of_reset_control_array_get_optional_exclusive(struct device_node *node) +{ + return of_reset_control_array_get(node, false, true); +} + +static inline struct reset_control * +of_reset_control_array_get_optional_shared(struct device_node *node) +{ + return of_reset_control_array_get(node, true, true); +} +#endif diff --git a/include/linux/reset/bcm63xx_pmb.h b/include/linux/reset/bcm63xx_pmb.h new file mode 100644 index 000000000..bb4af7b5e --- /dev/null +++ b/include/linux/reset/bcm63xx_pmb.h @@ -0,0 +1,88 @@ +/* + * Broadcom BCM63xx Processor Monitor Bus shared routines (SMP and reset) + * + * Copyright (C) 2015, Broadcom Corporation + * Author: Florian Fainelli + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __BCM63XX_PMB_H +#define __BCM63XX_PMB_H + +#include +#include +#include +#include + +/* PMB Master controller register */ +#define PMB_CTRL 0x00 +#define PMC_PMBM_START (1 << 31) +#define PMC_PMBM_TIMEOUT (1 << 30) +#define PMC_PMBM_SLAVE_ERR (1 << 29) +#define PMC_PMBM_BUSY (1 << 28) +#define PMC_PMBM_READ (0 << 20) +#define PMC_PMBM_WRITE (1 << 20) +#define PMB_WR_DATA 0x04 +#define PMB_TIMEOUT 0x08 +#define PMB_RD_DATA 0x0C + +#define PMB_BUS_ID_SHIFT 8 + +/* Perform the low-level PMB master operation, shared between reads and + * writes. + */ +static inline int __bpcm_do_op(void __iomem *master, unsigned int addr, + u32 off, u32 op) +{ + unsigned int timeout = 1000; + u32 cmd; + + cmd = (PMC_PMBM_START | op | (addr & 0xff) << 12 | off); + writel(cmd, master + PMB_CTRL); + do { + cmd = readl(master + PMB_CTRL); + if (!(cmd & PMC_PMBM_START)) + return 0; + + if (cmd & PMC_PMBM_SLAVE_ERR) + return -EIO; + + if (cmd & PMC_PMBM_TIMEOUT) + return -ETIMEDOUT; + + udelay(1); + } while (timeout-- > 0); + + return -ETIMEDOUT; +} + +static inline int bpcm_rd(void __iomem *master, unsigned int addr, + u32 off, u32 *val) +{ + int ret = 0; + + ret = __bpcm_do_op(master, addr, off >> 2, PMC_PMBM_READ); + *val = readl(master + PMB_RD_DATA); + + return ret; +} + +static inline int bpcm_wr(void __iomem *master, unsigned int addr, + u32 off, u32 val) +{ + int ret = 0; + + writel(val, master + PMB_WR_DATA); + ret = __bpcm_do_op(master, addr, off >> 2, PMC_PMBM_WRITE); + + return ret; +} + +#endif /* __BCM63XX_PMB_H */ diff --git a/include/linux/resource.h b/include/linux/resource.h new file mode 100644 index 000000000..bdf491cbc --- /dev/null +++ b/include/linux/resource.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_RESOURCE_H +#define _LINUX_RESOURCE_H + +#include + + +struct task_struct; + +void getrusage(struct task_struct *p, int who, struct rusage *ru); +int do_prlimit(struct task_struct *tsk, unsigned int resource, + struct rlimit *new_rlim, struct rlimit *old_rlim); + +#endif diff --git a/include/linux/resource_ext.h b/include/linux/resource_ext.h new file mode 100644 index 000000000..e2bf63d88 --- /dev/null +++ b/include/linux/resource_ext.h @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2015, Intel Corporation + * Author: Jiang Liu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ +#ifndef _LINUX_RESOURCE_EXT_H +#define _LINUX_RESOURCE_EXT_H +#include +#include +#include +#include + +/* Represent resource window for bridge devices */ +struct resource_win { + struct resource res; /* In master (CPU) address space */ + resource_size_t offset; /* Translation offset for bridge */ +}; + +/* + * Common resource list management data structure and interfaces to support + * ACPI, PNP and PCI host bridge etc. + */ +struct resource_entry { + struct list_head node; + struct resource *res; /* In master (CPU) address space */ + resource_size_t offset; /* Translation offset for bridge */ + struct resource __res; /* Default storage for res */ +}; + +extern struct resource_entry * +resource_list_create_entry(struct resource *res, size_t extra_size); +extern void resource_list_free(struct list_head *head); + +static inline void resource_list_add(struct resource_entry *entry, + struct list_head *head) +{ + list_add(&entry->node, head); +} + +static inline void resource_list_add_tail(struct resource_entry *entry, + struct list_head *head) +{ + list_add_tail(&entry->node, head); +} + +static inline void resource_list_del(struct resource_entry *entry) +{ + list_del(&entry->node); +} + +static inline void resource_list_free_entry(struct resource_entry *entry) +{ + kfree(entry); +} + +static inline void +resource_list_destroy_entry(struct resource_entry *entry) +{ + resource_list_del(entry); + resource_list_free_entry(entry); +} + +#define resource_list_for_each_entry(entry, list) \ + list_for_each_entry((entry), (list), node) + +#define resource_list_for_each_entry_safe(entry, tmp, list) \ + list_for_each_entry_safe((entry), (tmp), (list), node) + +#endif /* _LINUX_RESOURCE_EXT_H */ diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h new file mode 100644 index 000000000..5d83d0c1d --- /dev/null +++ b/include/linux/restart_block.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common syscall restarting data + */ +#ifndef __LINUX_RESTART_BLOCK_H +#define __LINUX_RESTART_BLOCK_H + +#include +#include +#include + +struct timespec; +struct compat_timespec; +struct pollfd; + +enum timespec_type { + TT_NONE = 0, + TT_NATIVE = 1, + TT_COMPAT = 2, +}; + +/* + * System call restart block. + */ +struct restart_block { + long (*fn)(struct restart_block *); + union { + /* For futex_wait and futex_wait_requeue_pi */ + struct { + u32 __user *uaddr; + u32 val; + u32 flags; + u32 bitset; + u64 time; + u32 __user *uaddr2; + } futex; + /* For nanosleep */ + struct { + clockid_t clockid; + enum timespec_type type; + union { + struct __kernel_timespec __user *rmtp; + struct compat_timespec __user *compat_rmtp; + }; + u64 expires; + } nanosleep; + /* For poll */ + struct { + struct pollfd __user *ufds; + int nfds; + int has_timeout; + unsigned long tv_sec; + unsigned long tv_nsec; + } poll; + }; +}; + +extern long do_no_restart_syscall(struct restart_block *parm); + +#endif /* __LINUX_RESTART_BLOCK_H */ diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h new file mode 100644 index 000000000..8ad2487a8 --- /dev/null +++ b/include/linux/rfkill.h @@ -0,0 +1,320 @@ +/* + * Copyright (C) 2006 - 2007 Ivo van Doorn + * Copyright (C) 2007 Dmitry Torokhov + * Copyright 2009 Johannes Berg + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef __RFKILL_H +#define __RFKILL_H + +#include + +/* don't allow anyone to use these in the kernel */ +enum rfkill_user_states { + RFKILL_USER_STATE_SOFT_BLOCKED = RFKILL_STATE_SOFT_BLOCKED, + RFKILL_USER_STATE_UNBLOCKED = RFKILL_STATE_UNBLOCKED, + RFKILL_USER_STATE_HARD_BLOCKED = RFKILL_STATE_HARD_BLOCKED, +}; +#undef RFKILL_STATE_SOFT_BLOCKED +#undef RFKILL_STATE_UNBLOCKED +#undef RFKILL_STATE_HARD_BLOCKED + +#include +#include +#include +#include +#include + +struct device; +/* this is opaque */ +struct rfkill; + +/** + * struct rfkill_ops - rfkill driver methods + * + * @poll: poll the rfkill block state(s) -- only assign this method + * when you need polling. When called, simply call one of the + * rfkill_set{,_hw,_sw}_state family of functions. If the hw + * is getting unblocked you need to take into account the return + * value of those functions to make sure the software block is + * properly used. + * @query: query the rfkill block state(s) and call exactly one of the + * rfkill_set{,_hw,_sw}_state family of functions. Assign this + * method if input events can cause hardware state changes to make + * the rfkill core query your driver before setting a requested + * block. + * @set_block: turn the transmitter on (blocked == false) or off + * (blocked == true) -- ignore and return 0 when hard blocked. + * This callback must be assigned. + */ +struct rfkill_ops { + void (*poll)(struct rfkill *rfkill, void *data); + void (*query)(struct rfkill *rfkill, void *data); + int (*set_block)(void *data, bool blocked); +}; + +#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) +/** + * rfkill_alloc - Allocate rfkill structure + * @name: name of the struct -- the string is not copied internally + * @parent: device that has rf switch on it + * @type: type of the switch (RFKILL_TYPE_*) + * @ops: rfkill methods + * @ops_data: data passed to each method + * + * This function should be called by the transmitter driver to allocate an + * rfkill structure. Returns %NULL on failure. + */ +struct rfkill * __must_check rfkill_alloc(const char *name, + struct device *parent, + const enum rfkill_type type, + const struct rfkill_ops *ops, + void *ops_data); + +/** + * rfkill_register - Register a rfkill structure. + * @rfkill: rfkill structure to be registered + * + * This function should be called by the transmitter driver to register + * the rfkill structure. Before calling this function the driver needs + * to be ready to service method calls from rfkill. + * + * If rfkill_init_sw_state() is not called before registration, + * set_block() will be called to initialize the software blocked state + * to a default value. + * + * If the hardware blocked state is not set before registration, + * it is assumed to be unblocked. + */ +int __must_check rfkill_register(struct rfkill *rfkill); + +/** + * rfkill_pause_polling(struct rfkill *rfkill) + * + * Pause polling -- say transmitter is off for other reasons. + * NOTE: not necessary for suspend/resume -- in that case the + * core stops polling anyway (but will also correctly handle + * the case of polling having been paused before suspend.) + */ +void rfkill_pause_polling(struct rfkill *rfkill); + +/** + * rfkill_resume_polling(struct rfkill *rfkill) + * + * Resume polling + * NOTE: not necessary for suspend/resume -- in that case the + * core stops polling anyway + */ +void rfkill_resume_polling(struct rfkill *rfkill); + + +/** + * rfkill_unregister - Unregister a rfkill structure. + * @rfkill: rfkill structure to be unregistered + * + * This function should be called by the network driver during device + * teardown to destroy rfkill structure. Until it returns, the driver + * needs to be able to service method calls. + */ +void rfkill_unregister(struct rfkill *rfkill); + +/** + * rfkill_destroy - Free rfkill structure + * @rfkill: rfkill structure to be destroyed + * + * Destroys the rfkill structure. + */ +void rfkill_destroy(struct rfkill *rfkill); + +/** + * rfkill_set_hw_state - Set the internal rfkill hardware block state + * @rfkill: pointer to the rfkill class to modify. + * @blocked: the current hardware block state to set + * + * rfkill drivers that get events when the hard-blocked state changes + * use this function to notify the rfkill core (and through that also + * userspace) of the current state. They should also use this after + * resume if the state could have changed. + * + * You need not (but may) call this function if poll_state is assigned. + * + * This function can be called in any context, even from within rfkill + * callbacks. + * + * The function returns the combined block state (true if transmitter + * should be blocked) so that drivers need not keep track of the soft + * block state -- which they might not be able to. + */ +bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked); + +/** + * rfkill_set_sw_state - Set the internal rfkill software block state + * @rfkill: pointer to the rfkill class to modify. + * @blocked: the current software block state to set + * + * rfkill drivers that get events when the soft-blocked state changes + * (yes, some platforms directly act on input but allow changing again) + * use this function to notify the rfkill core (and through that also + * userspace) of the current state. + * + * Drivers should also call this function after resume if the state has + * been changed by the user. This only makes sense for "persistent" + * devices (see rfkill_init_sw_state()). + * + * This function can be called in any context, even from within rfkill + * callbacks. + * + * The function returns the combined block state (true if transmitter + * should be blocked). + */ +bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked); + +/** + * rfkill_init_sw_state - Initialize persistent software block state + * @rfkill: pointer to the rfkill class to modify. + * @blocked: the current software block state to set + * + * rfkill drivers that preserve their software block state over power off + * use this function to notify the rfkill core (and through that also + * userspace) of their initial state. It should only be used before + * registration. + * + * In addition, it marks the device as "persistent", an attribute which + * can be read by userspace. Persistent devices are expected to preserve + * their own state when suspended. + */ +void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked); + +/** + * rfkill_set_states - Set the internal rfkill block states + * @rfkill: pointer to the rfkill class to modify. + * @sw: the current software block state to set + * @hw: the current hardware block state to set + * + * This function can be called in any context, even from within rfkill + * callbacks. + */ +void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw); + +/** + * rfkill_blocked - Query rfkill block state + * + * @rfkill: rfkill struct to query + */ +bool rfkill_blocked(struct rfkill *rfkill); + +/** + * rfkill_find_type - Helper for finding rfkill type by name + * @name: the name of the type + * + * Returns enum rfkill_type that corresponds to the name. + */ +enum rfkill_type rfkill_find_type(const char *name); + +#else /* !RFKILL */ +static inline struct rfkill * __must_check +rfkill_alloc(const char *name, + struct device *parent, + const enum rfkill_type type, + const struct rfkill_ops *ops, + void *ops_data) +{ + return ERR_PTR(-ENODEV); +} + +static inline int __must_check rfkill_register(struct rfkill *rfkill) +{ + if (rfkill == ERR_PTR(-ENODEV)) + return 0; + return -EINVAL; +} + +static inline void rfkill_pause_polling(struct rfkill *rfkill) +{ +} + +static inline void rfkill_resume_polling(struct rfkill *rfkill) +{ +} + +static inline void rfkill_unregister(struct rfkill *rfkill) +{ +} + +static inline void rfkill_destroy(struct rfkill *rfkill) +{ +} + +static inline bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked) +{ + return blocked; +} + +static inline bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) +{ + return blocked; +} + +static inline void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked) +{ +} + +static inline void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw) +{ +} + +static inline bool rfkill_blocked(struct rfkill *rfkill) +{ + return false; +} + +static inline enum rfkill_type rfkill_find_type(const char *name) +{ + return RFKILL_TYPE_ALL; +} + +#endif /* RFKILL || RFKILL_MODULE */ + + +#ifdef CONFIG_RFKILL_LEDS +/** + * rfkill_get_led_trigger_name - Get the LED trigger name for the button's LED. + * This function might return a NULL pointer if registering of the + * LED trigger failed. Use this as "default_trigger" for the LED. + */ +const char *rfkill_get_led_trigger_name(struct rfkill *rfkill); + +/** + * rfkill_set_led_trigger_name - Set the LED trigger name + * @rfkill: rfkill struct + * @name: LED trigger name + * + * This function sets the LED trigger name of the radio LED + * trigger that rfkill creates. It is optional, but if called + * must be called before rfkill_register() to be effective. + */ +void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name); +#else +static inline const char *rfkill_get_led_trigger_name(struct rfkill *rfkill) +{ + return NULL; +} + +static inline void +rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name) +{ +} +#endif + +#endif /* RFKILL_H */ diff --git a/include/linux/rhashtable-types.h b/include/linux/rhashtable-types.h new file mode 100644 index 000000000..763d613ce --- /dev/null +++ b/include/linux/rhashtable-types.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Resizable, Scalable, Concurrent Hash Table + * + * Simple structures that might be needed in include + * files. + */ + +#ifndef _LINUX_RHASHTABLE_TYPES_H +#define _LINUX_RHASHTABLE_TYPES_H + +#include +#include +#include +#include + +struct rhash_head { + struct rhash_head __rcu *next; +}; + +struct rhlist_head { + struct rhash_head rhead; + struct rhlist_head __rcu *next; +}; + +struct bucket_table; + +/** + * struct rhashtable_compare_arg - Key for the function rhashtable_compare + * @ht: Hash table + * @key: Key to compare against + */ +struct rhashtable_compare_arg { + struct rhashtable *ht; + const void *key; +}; + +typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed); +typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed); +typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg, + const void *obj); + +/** + * struct rhashtable_params - Hash table construction parameters + * @nelem_hint: Hint on number of elements, should be 75% of desired size + * @key_len: Length of key + * @key_offset: Offset of key in struct to be hashed + * @head_offset: Offset of rhash_head in struct to be hashed + * @max_size: Maximum size while expanding + * @min_size: Minimum size while shrinking + * @locks_mul: Number of bucket locks to allocate per cpu (default: 32) + * @automatic_shrinking: Enable automatic shrinking of tables + * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash) + * @obj_hashfn: Function to hash object + * @obj_cmpfn: Function to compare key with object + */ +struct rhashtable_params { + u16 nelem_hint; + u16 key_len; + u16 key_offset; + u16 head_offset; + unsigned int max_size; + u16 min_size; + bool automatic_shrinking; + u8 locks_mul; + rht_hashfn_t hashfn; + rht_obj_hashfn_t obj_hashfn; + rht_obj_cmpfn_t obj_cmpfn; +}; + +/** + * struct rhashtable - Hash table handle + * @tbl: Bucket table + * @key_len: Key length for hashfn + * @max_elems: Maximum number of elements in table + * @p: Configuration parameters + * @rhlist: True if this is an rhltable + * @run_work: Deferred worker to expand/shrink asynchronously + * @mutex: Mutex to protect current/future table swapping + * @lock: Spin lock to protect walker list + * @nelems: Number of elements in table + */ +struct rhashtable { + struct bucket_table __rcu *tbl; + unsigned int key_len; + unsigned int max_elems; + struct rhashtable_params p; + bool rhlist; + struct work_struct run_work; + struct mutex mutex; + spinlock_t lock; + atomic_t nelems; +}; + +/** + * struct rhltable - Hash table with duplicate objects in a list + * @ht: Underlying rhtable + */ +struct rhltable { + struct rhashtable ht; +}; + +/** + * struct rhashtable_walker - Hash table walker + * @list: List entry on list of walkers + * @tbl: The table that we were walking over + */ +struct rhashtable_walker { + struct list_head list; + struct bucket_table *tbl; +}; + +/** + * struct rhashtable_iter - Hash table iterator + * @ht: Table to iterate through + * @p: Current pointer + * @list: Current hash list pointer + * @walker: Associated rhashtable walker + * @slot: Current slot + * @skip: Number of entries to skip in slot + */ +struct rhashtable_iter { + struct rhashtable *ht; + struct rhash_head *p; + struct rhlist_head *list; + struct rhashtable_walker walker; + unsigned int slot; + unsigned int skip; + bool end_of_table; +}; + +int rhashtable_init(struct rhashtable *ht, + const struct rhashtable_params *params); +int rhltable_init(struct rhltable *hlt, + const struct rhashtable_params *params); + +#endif /* _LINUX_RHASHTABLE_TYPES_H */ diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h new file mode 100644 index 000000000..eb7111039 --- /dev/null +++ b/include/linux/rhashtable.h @@ -0,0 +1,1154 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Resizable, Scalable, Concurrent Hash Table + * + * Copyright (c) 2015-2016 Herbert Xu + * Copyright (c) 2014-2015 Thomas Graf + * Copyright (c) 2008-2014 Patrick McHardy + * + * Code partially derived from nft_hash + * Rewritten with rehash code from br_multicast plus single list + * pointer as suggested by Josh Triplett + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _LINUX_RHASHTABLE_H +#define _LINUX_RHASHTABLE_H + +#include +#include +#include +#include +#include +#include + +#include +/* + * The end of the chain is marked with a special nulls marks which has + * the least significant bit set. + */ + +/* Maximum chain length before rehash + * + * The maximum (not average) chain length grows with the size of the hash + * table, at a rate of (log N)/(log log N). + * + * The value of 16 is selected so that even if the hash table grew to + * 2^32 you would not expect the maximum chain length to exceed it + * unless we are under attack (or extremely unlucky). + * + * As this limit is only to detect attacks, we don't need to set it to a + * lower value as you'd need the chain length to vastly exceed 16 to have + * any real effect on the system. + */ +#define RHT_ELASTICITY 16u + +/** + * struct bucket_table - Table of hash buckets + * @size: Number of hash buckets + * @nest: Number of bits of first-level nested table. + * @rehash: Current bucket being rehashed + * @hash_rnd: Random seed to fold into hash + * @locks_mask: Mask to apply before accessing locks[] + * @locks: Array of spinlocks protecting individual buckets + * @walkers: List of active walkers + * @rcu: RCU structure for freeing the table + * @future_tbl: Table under construction during rehashing + * @ntbl: Nested table used when out of memory. + * @buckets: size * hash buckets + */ +struct bucket_table { + unsigned int size; + unsigned int nest; + unsigned int rehash; + u32 hash_rnd; + unsigned int locks_mask; + spinlock_t *locks; + struct list_head walkers; + struct rcu_head rcu; + + struct bucket_table __rcu *future_tbl; + + struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; +}; + +#define INIT_RHT_NULLS_HEAD(ptr) \ + ((ptr) = (typeof(ptr)) NULLS_MARKER(0)) + +static inline bool rht_is_a_nulls(const struct rhash_head *ptr) +{ + return ((unsigned long) ptr & 1); +} + +static inline void *rht_obj(const struct rhashtable *ht, + const struct rhash_head *he) +{ + return (char *)he - ht->p.head_offset; +} + +static inline unsigned int rht_bucket_index(const struct bucket_table *tbl, + unsigned int hash) +{ + return hash & (tbl->size - 1); +} + +static inline unsigned int rht_key_get_hash(struct rhashtable *ht, + const void *key, const struct rhashtable_params params, + unsigned int hash_rnd) +{ + unsigned int hash; + + /* params must be equal to ht->p if it isn't constant. */ + if (!__builtin_constant_p(params.key_len)) + hash = ht->p.hashfn(key, ht->key_len, hash_rnd); + else if (params.key_len) { + unsigned int key_len = params.key_len; + + if (params.hashfn) + hash = params.hashfn(key, key_len, hash_rnd); + else if (key_len & (sizeof(u32) - 1)) + hash = jhash(key, key_len, hash_rnd); + else + hash = jhash2(key, key_len / sizeof(u32), hash_rnd); + } else { + unsigned int key_len = ht->p.key_len; + + if (params.hashfn) + hash = params.hashfn(key, key_len, hash_rnd); + else + hash = jhash(key, key_len, hash_rnd); + } + + return hash; +} + +static inline unsigned int rht_key_hashfn( + struct rhashtable *ht, const struct bucket_table *tbl, + const void *key, const struct rhashtable_params params) +{ + unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd); + + return rht_bucket_index(tbl, hash); +} + +static inline unsigned int rht_head_hashfn( + struct rhashtable *ht, const struct bucket_table *tbl, + const struct rhash_head *he, const struct rhashtable_params params) +{ + const char *ptr = rht_obj(ht, he); + + return likely(params.obj_hashfn) ? + rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?: + ht->p.key_len, + tbl->hash_rnd)) : + rht_key_hashfn(ht, tbl, ptr + params.key_offset, params); +} + +/** + * rht_grow_above_75 - returns true if nelems > 0.75 * table-size + * @ht: hash table + * @tbl: current table + */ +static inline bool rht_grow_above_75(const struct rhashtable *ht, + const struct bucket_table *tbl) +{ + /* Expand table when exceeding 75% load */ + return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) && + (!ht->p.max_size || tbl->size < ht->p.max_size); +} + +/** + * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size + * @ht: hash table + * @tbl: current table + */ +static inline bool rht_shrink_below_30(const struct rhashtable *ht, + const struct bucket_table *tbl) +{ + /* Shrink table beneath 30% load */ + return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) && + tbl->size > ht->p.min_size; +} + +/** + * rht_grow_above_100 - returns true if nelems > table-size + * @ht: hash table + * @tbl: current table + */ +static inline bool rht_grow_above_100(const struct rhashtable *ht, + const struct bucket_table *tbl) +{ + return atomic_read(&ht->nelems) > tbl->size && + (!ht->p.max_size || tbl->size < ht->p.max_size); +} + +/** + * rht_grow_above_max - returns true if table is above maximum + * @ht: hash table + * @tbl: current table + */ +static inline bool rht_grow_above_max(const struct rhashtable *ht, + const struct bucket_table *tbl) +{ + return atomic_read(&ht->nelems) >= ht->max_elems; +} + +/* The bucket lock is selected based on the hash and protects mutations + * on a group of hash buckets. + * + * A maximum of tbl->size/2 bucket locks is allocated. This ensures that + * a single lock always covers both buckets which may both contains + * entries which link to the same bucket of the old table during resizing. + * This allows to simplify the locking as locking the bucket in both + * tables during resize always guarantee protection. + * + * IMPORTANT: When holding the bucket lock of both the old and new table + * during expansions and shrinking, the old bucket lock must always be + * acquired first. + */ +static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl, + unsigned int hash) +{ + return &tbl->locks[hash & tbl->locks_mask]; +} + +#ifdef CONFIG_PROVE_LOCKING +int lockdep_rht_mutex_is_held(struct rhashtable *ht); +int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash); +#else +static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht) +{ + return 1; +} + +static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, + u32 hash) +{ + return 1; +} +#endif /* CONFIG_PROVE_LOCKING */ + +void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, + struct rhash_head *obj); + +void rhashtable_walk_enter(struct rhashtable *ht, + struct rhashtable_iter *iter); +void rhashtable_walk_exit(struct rhashtable_iter *iter); +int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU); + +static inline void rhashtable_walk_start(struct rhashtable_iter *iter) +{ + (void)rhashtable_walk_start_check(iter); +} + +void *rhashtable_walk_next(struct rhashtable_iter *iter); +void *rhashtable_walk_peek(struct rhashtable_iter *iter); +void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU); + +void rhashtable_free_and_destroy(struct rhashtable *ht, + void (*free_fn)(void *ptr, void *arg), + void *arg); +void rhashtable_destroy(struct rhashtable *ht); + +struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, + unsigned int hash); +struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, + struct bucket_table *tbl, + unsigned int hash); + +#define rht_dereference(p, ht) \ + rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht)) + +#define rht_dereference_rcu(p, ht) \ + rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht)) + +#define rht_dereference_bucket(p, tbl, hash) \ + rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash)) + +#define rht_dereference_bucket_rcu(p, tbl, hash) \ + rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash)) + +#define rht_entry(tpos, pos, member) \ + ({ tpos = container_of(pos, typeof(*tpos), member); 1; }) + +static inline struct rhash_head __rcu *const *rht_bucket( + const struct bucket_table *tbl, unsigned int hash) +{ + return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : + &tbl->buckets[hash]; +} + +static inline struct rhash_head __rcu **rht_bucket_var( + struct bucket_table *tbl, unsigned int hash) +{ + return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : + &tbl->buckets[hash]; +} + +static inline struct rhash_head __rcu **rht_bucket_insert( + struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) +{ + return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) : + &tbl->buckets[hash]; +} + +/** + * rht_for_each_continue - continue iterating over hash chain + * @pos: the &struct rhash_head to use as a loop cursor. + * @head: the previous &struct rhash_head to continue from + * @tbl: the &struct bucket_table + * @hash: the hash value / bucket index + */ +#define rht_for_each_continue(pos, head, tbl, hash) \ + for (pos = rht_dereference_bucket(head, tbl, hash); \ + !rht_is_a_nulls(pos); \ + pos = rht_dereference_bucket((pos)->next, tbl, hash)) + +/** + * rht_for_each - iterate over hash chain + * @pos: the &struct rhash_head to use as a loop cursor. + * @tbl: the &struct bucket_table + * @hash: the hash value / bucket index + */ +#define rht_for_each(pos, tbl, hash) \ + rht_for_each_continue(pos, *rht_bucket(tbl, hash), tbl, hash) + +/** + * rht_for_each_entry_continue - continue iterating over hash chain + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct rhash_head to use as a loop cursor. + * @head: the previous &struct rhash_head to continue from + * @tbl: the &struct bucket_table + * @hash: the hash value / bucket index + * @member: name of the &struct rhash_head within the hashable struct. + */ +#define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \ + for (pos = rht_dereference_bucket(head, tbl, hash); \ + (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ + pos = rht_dereference_bucket((pos)->next, tbl, hash)) + +/** + * rht_for_each_entry - iterate over hash chain of given type + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct rhash_head to use as a loop cursor. + * @tbl: the &struct bucket_table + * @hash: the hash value / bucket index + * @member: name of the &struct rhash_head within the hashable struct. + */ +#define rht_for_each_entry(tpos, pos, tbl, hash, member) \ + rht_for_each_entry_continue(tpos, pos, *rht_bucket(tbl, hash), \ + tbl, hash, member) + +/** + * rht_for_each_entry_safe - safely iterate over hash chain of given type + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct rhash_head to use as a loop cursor. + * @next: the &struct rhash_head to use as next in loop cursor. + * @tbl: the &struct bucket_table + * @hash: the hash value / bucket index + * @member: name of the &struct rhash_head within the hashable struct. + * + * This hash chain list-traversal primitive allows for the looped code to + * remove the loop cursor from the list. + */ +#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ + for (pos = rht_dereference_bucket(*rht_bucket(tbl, hash), tbl, hash), \ + next = !rht_is_a_nulls(pos) ? \ + rht_dereference_bucket(pos->next, tbl, hash) : NULL; \ + (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ + pos = next, \ + next = !rht_is_a_nulls(pos) ? \ + rht_dereference_bucket(pos->next, tbl, hash) : NULL) + +/** + * rht_for_each_rcu_continue - continue iterating over rcu hash chain + * @pos: the &struct rhash_head to use as a loop cursor. + * @head: the previous &struct rhash_head to continue from + * @tbl: the &struct bucket_table + * @hash: the hash value / bucket index + * + * This hash chain list-traversal primitive may safely run concurrently with + * the _rcu mutation primitives such as rhashtable_insert() as long as the + * traversal is guarded by rcu_read_lock(). + */ +#define rht_for_each_rcu_continue(pos, head, tbl, hash) \ + for (({barrier(); }), \ + pos = rht_dereference_bucket_rcu(head, tbl, hash); \ + !rht_is_a_nulls(pos); \ + pos = rcu_dereference_raw(pos->next)) + +/** + * rht_for_each_rcu - iterate over rcu hash chain + * @pos: the &struct rhash_head to use as a loop cursor. + * @tbl: the &struct bucket_table + * @hash: the hash value / bucket index + * + * This hash chain list-traversal primitive may safely run concurrently with + * the _rcu mutation primitives such as rhashtable_insert() as long as the + * traversal is guarded by rcu_read_lock(). + */ +#define rht_for_each_rcu(pos, tbl, hash) \ + rht_for_each_rcu_continue(pos, *rht_bucket(tbl, hash), tbl, hash) + +/** + * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct rhash_head to use as a loop cursor. + * @head: the previous &struct rhash_head to continue from + * @tbl: the &struct bucket_table + * @hash: the hash value / bucket index + * @member: name of the &struct rhash_head within the hashable struct. + * + * This hash chain list-traversal primitive may safely run concurrently with + * the _rcu mutation primitives such as rhashtable_insert() as long as the + * traversal is guarded by rcu_read_lock(). + */ +#define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \ + for (({barrier(); }), \ + pos = rht_dereference_bucket_rcu(head, tbl, hash); \ + (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ + pos = rht_dereference_bucket_rcu(pos->next, tbl, hash)) + +/** + * rht_for_each_entry_rcu - iterate over rcu hash chain of given type + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct rhash_head to use as a loop cursor. + * @tbl: the &struct bucket_table + * @hash: the hash value / bucket index + * @member: name of the &struct rhash_head within the hashable struct. + * + * This hash chain list-traversal primitive may safely run concurrently with + * the _rcu mutation primitives such as rhashtable_insert() as long as the + * traversal is guarded by rcu_read_lock(). + */ +#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ + rht_for_each_entry_rcu_continue(tpos, pos, *rht_bucket(tbl, hash), \ + tbl, hash, member) + +/** + * rhl_for_each_rcu - iterate over rcu hash table list + * @pos: the &struct rlist_head to use as a loop cursor. + * @list: the head of the list + * + * This hash chain list-traversal primitive should be used on the + * list returned by rhltable_lookup. + */ +#define rhl_for_each_rcu(pos, list) \ + for (pos = list; pos; pos = rcu_dereference_raw(pos->next)) + +/** + * rhl_for_each_entry_rcu - iterate over rcu hash table list of given type + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct rlist_head to use as a loop cursor. + * @list: the head of the list + * @member: name of the &struct rlist_head within the hashable struct. + * + * This hash chain list-traversal primitive should be used on the + * list returned by rhltable_lookup. + */ +#define rhl_for_each_entry_rcu(tpos, pos, list, member) \ + for (pos = list; pos && rht_entry(tpos, pos, member); \ + pos = rcu_dereference_raw(pos->next)) + +static inline int rhashtable_compare(struct rhashtable_compare_arg *arg, + const void *obj) +{ + struct rhashtable *ht = arg->ht; + const char *ptr = obj; + + return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len); +} + +/* Internal function, do not use. */ +static inline struct rhash_head *__rhashtable_lookup( + struct rhashtable *ht, const void *key, + const struct rhashtable_params params) +{ + struct rhashtable_compare_arg arg = { + .ht = ht, + .key = key, + }; + struct bucket_table *tbl; + struct rhash_head *he; + unsigned int hash; + + tbl = rht_dereference_rcu(ht->tbl, ht); +restart: + hash = rht_key_hashfn(ht, tbl, key, params); + rht_for_each_rcu(he, tbl, hash) { + if (params.obj_cmpfn ? + params.obj_cmpfn(&arg, rht_obj(ht, he)) : + rhashtable_compare(&arg, rht_obj(ht, he))) + continue; + return he; + } + + /* Ensure we see any new tables. */ + smp_rmb(); + + tbl = rht_dereference_rcu(tbl->future_tbl, ht); + if (unlikely(tbl)) + goto restart; + + return NULL; +} + +/** + * rhashtable_lookup - search hash table + * @ht: hash table + * @key: the pointer to the key + * @params: hash table parameters + * + * Computes the hash value for the key and traverses the bucket chain looking + * for a entry with an identical key. The first matching entry is returned. + * + * This must only be called under the RCU read lock. + * + * Returns the first entry on which the compare function returned true. + */ +static inline void *rhashtable_lookup( + struct rhashtable *ht, const void *key, + const struct rhashtable_params params) +{ + struct rhash_head *he = __rhashtable_lookup(ht, key, params); + + return he ? rht_obj(ht, he) : NULL; +} + +/** + * rhashtable_lookup_fast - search hash table, without RCU read lock + * @ht: hash table + * @key: the pointer to the key + * @params: hash table parameters + * + * Computes the hash value for the key and traverses the bucket chain looking + * for a entry with an identical key. The first matching entry is returned. + * + * Only use this function when you have other mechanisms guaranteeing + * that the object won't go away after the RCU read lock is released. + * + * Returns the first entry on which the compare function returned true. + */ +static inline void *rhashtable_lookup_fast( + struct rhashtable *ht, const void *key, + const struct rhashtable_params params) +{ + void *obj; + + rcu_read_lock(); + obj = rhashtable_lookup(ht, key, params); + rcu_read_unlock(); + + return obj; +} + +/** + * rhltable_lookup - search hash list table + * @hlt: hash table + * @key: the pointer to the key + * @params: hash table parameters + * + * Computes the hash value for the key and traverses the bucket chain looking + * for a entry with an identical key. All matching entries are returned + * in a list. + * + * This must only be called under the RCU read lock. + * + * Returns the list of entries that match the given key. + */ +static inline struct rhlist_head *rhltable_lookup( + struct rhltable *hlt, const void *key, + const struct rhashtable_params params) +{ + struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params); + + return he ? container_of(he, struct rhlist_head, rhead) : NULL; +} + +/* Internal function, please use rhashtable_insert_fast() instead. This + * function returns the existing element already in hashes in there is a clash, + * otherwise it returns an error via ERR_PTR(). + */ +static inline void *__rhashtable_insert_fast( + struct rhashtable *ht, const void *key, struct rhash_head *obj, + const struct rhashtable_params params, bool rhlist) +{ + struct rhashtable_compare_arg arg = { + .ht = ht, + .key = key, + }; + struct rhash_head __rcu **pprev; + struct bucket_table *tbl; + struct rhash_head *head; + spinlock_t *lock; + unsigned int hash; + int elasticity; + void *data; + + rcu_read_lock(); + + tbl = rht_dereference_rcu(ht->tbl, ht); + hash = rht_head_hashfn(ht, tbl, obj, params); + lock = rht_bucket_lock(tbl, hash); + spin_lock_bh(lock); + + if (unlikely(rcu_access_pointer(tbl->future_tbl))) { +slow_path: + spin_unlock_bh(lock); + rcu_read_unlock(); + return rhashtable_insert_slow(ht, key, obj); + } + + elasticity = RHT_ELASTICITY; + pprev = rht_bucket_insert(ht, tbl, hash); + data = ERR_PTR(-ENOMEM); + if (!pprev) + goto out; + + rht_for_each_continue(head, *pprev, tbl, hash) { + struct rhlist_head *plist; + struct rhlist_head *list; + + elasticity--; + if (!key || + (params.obj_cmpfn ? + params.obj_cmpfn(&arg, rht_obj(ht, head)) : + rhashtable_compare(&arg, rht_obj(ht, head)))) { + pprev = &head->next; + continue; + } + + data = rht_obj(ht, head); + + if (!rhlist) + goto out; + + + list = container_of(obj, struct rhlist_head, rhead); + plist = container_of(head, struct rhlist_head, rhead); + + RCU_INIT_POINTER(list->next, plist); + head = rht_dereference_bucket(head->next, tbl, hash); + RCU_INIT_POINTER(list->rhead.next, head); + rcu_assign_pointer(*pprev, obj); + + goto good; + } + + if (elasticity <= 0) + goto slow_path; + + data = ERR_PTR(-E2BIG); + if (unlikely(rht_grow_above_max(ht, tbl))) + goto out; + + if (unlikely(rht_grow_above_100(ht, tbl))) + goto slow_path; + + head = rht_dereference_bucket(*pprev, tbl, hash); + + RCU_INIT_POINTER(obj->next, head); + if (rhlist) { + struct rhlist_head *list; + + list = container_of(obj, struct rhlist_head, rhead); + RCU_INIT_POINTER(list->next, NULL); + } + + rcu_assign_pointer(*pprev, obj); + + atomic_inc(&ht->nelems); + if (rht_grow_above_75(ht, tbl)) + schedule_work(&ht->run_work); + +good: + data = NULL; + +out: + spin_unlock_bh(lock); + rcu_read_unlock(); + + return data; +} + +/** + * rhashtable_insert_fast - insert object into hash table + * @ht: hash table + * @obj: pointer to hash head inside object + * @params: hash table parameters + * + * Will take a per bucket spinlock to protect against mutual mutations + * on the same bucket. Multiple insertions may occur in parallel unless + * they map to the same bucket lock. + * + * It is safe to call this function from atomic context. + * + * Will trigger an automatic deferred table resizing if residency in the + * table grows beyond 70%. + */ +static inline int rhashtable_insert_fast( + struct rhashtable *ht, struct rhash_head *obj, + const struct rhashtable_params params) +{ + void *ret; + + ret = __rhashtable_insert_fast(ht, NULL, obj, params, false); + if (IS_ERR(ret)) + return PTR_ERR(ret); + + return ret == NULL ? 0 : -EEXIST; +} + +/** + * rhltable_insert_key - insert object into hash list table + * @hlt: hash list table + * @key: the pointer to the key + * @list: pointer to hash list head inside object + * @params: hash table parameters + * + * Will take a per bucket spinlock to protect against mutual mutations + * on the same bucket. Multiple insertions may occur in parallel unless + * they map to the same bucket lock. + * + * It is safe to call this function from atomic context. + * + * Will trigger an automatic deferred table resizing if residency in the + * table grows beyond 70%. + */ +static inline int rhltable_insert_key( + struct rhltable *hlt, const void *key, struct rhlist_head *list, + const struct rhashtable_params params) +{ + return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead, + params, true)); +} + +/** + * rhltable_insert - insert object into hash list table + * @hlt: hash list table + * @list: pointer to hash list head inside object + * @params: hash table parameters + * + * Will take a per bucket spinlock to protect against mutual mutations + * on the same bucket. Multiple insertions may occur in parallel unless + * they map to the same bucket lock. + * + * It is safe to call this function from atomic context. + * + * Will trigger an automatic deferred table resizing if residency in the + * table grows beyond 70%. + */ +static inline int rhltable_insert( + struct rhltable *hlt, struct rhlist_head *list, + const struct rhashtable_params params) +{ + const char *key = rht_obj(&hlt->ht, &list->rhead); + + key += params.key_offset; + + return rhltable_insert_key(hlt, key, list, params); +} + +/** + * rhashtable_lookup_insert_fast - lookup and insert object into hash table + * @ht: hash table + * @obj: pointer to hash head inside object + * @params: hash table parameters + * + * Locks down the bucket chain in both the old and new table if a resize + * is in progress to ensure that writers can't remove from the old table + * and can't insert to the new table during the atomic operation of search + * and insertion. Searches for duplicates in both the old and new table if + * a resize is in progress. + * + * This lookup function may only be used for fixed key hash table (key_len + * parameter set). It will BUG() if used inappropriately. + * + * It is safe to call this function from atomic context. + * + * Will trigger an automatic deferred table resizing if residency in the + * table grows beyond 70%. + */ +static inline int rhashtable_lookup_insert_fast( + struct rhashtable *ht, struct rhash_head *obj, + const struct rhashtable_params params) +{ + const char *key = rht_obj(ht, obj); + void *ret; + + BUG_ON(ht->p.obj_hashfn); + + ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params, + false); + if (IS_ERR(ret)) + return PTR_ERR(ret); + + return ret == NULL ? 0 : -EEXIST; +} + +/** + * rhashtable_lookup_get_insert_fast - lookup and insert object into hash table + * @ht: hash table + * @obj: pointer to hash head inside object + * @params: hash table parameters + * + * Just like rhashtable_lookup_insert_fast(), but this function returns the + * object if it exists, NULL if it did not and the insertion was successful, + * and an ERR_PTR otherwise. + */ +static inline void *rhashtable_lookup_get_insert_fast( + struct rhashtable *ht, struct rhash_head *obj, + const struct rhashtable_params params) +{ + const char *key = rht_obj(ht, obj); + + BUG_ON(ht->p.obj_hashfn); + + return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params, + false); +} + +/** + * rhashtable_lookup_insert_key - search and insert object to hash table + * with explicit key + * @ht: hash table + * @key: key + * @obj: pointer to hash head inside object + * @params: hash table parameters + * + * Locks down the bucket chain in both the old and new table if a resize + * is in progress to ensure that writers can't remove from the old table + * and can't insert to the new table during the atomic operation of search + * and insertion. Searches for duplicates in both the old and new table if + * a resize is in progress. + * + * Lookups may occur in parallel with hashtable mutations and resizing. + * + * Will trigger an automatic deferred table resizing if residency in the + * table grows beyond 70%. + * + * Returns zero on success. + */ +static inline int rhashtable_lookup_insert_key( + struct rhashtable *ht, const void *key, struct rhash_head *obj, + const struct rhashtable_params params) +{ + void *ret; + + BUG_ON(!ht->p.obj_hashfn || !key); + + ret = __rhashtable_insert_fast(ht, key, obj, params, false); + if (IS_ERR(ret)) + return PTR_ERR(ret); + + return ret == NULL ? 0 : -EEXIST; +} + +/** + * rhashtable_lookup_get_insert_key - lookup and insert object into hash table + * @ht: hash table + * @obj: pointer to hash head inside object + * @params: hash table parameters + * @data: pointer to element data already in hashes + * + * Just like rhashtable_lookup_insert_key(), but this function returns the + * object if it exists, NULL if it does not and the insertion was successful, + * and an ERR_PTR otherwise. + */ +static inline void *rhashtable_lookup_get_insert_key( + struct rhashtable *ht, const void *key, struct rhash_head *obj, + const struct rhashtable_params params) +{ + BUG_ON(!ht->p.obj_hashfn || !key); + + return __rhashtable_insert_fast(ht, key, obj, params, false); +} + +/* Internal function, please use rhashtable_remove_fast() instead */ +static inline int __rhashtable_remove_fast_one( + struct rhashtable *ht, struct bucket_table *tbl, + struct rhash_head *obj, const struct rhashtable_params params, + bool rhlist) +{ + struct rhash_head __rcu **pprev; + struct rhash_head *he; + spinlock_t * lock; + unsigned int hash; + int err = -ENOENT; + + hash = rht_head_hashfn(ht, tbl, obj, params); + lock = rht_bucket_lock(tbl, hash); + + spin_lock_bh(lock); + + pprev = rht_bucket_var(tbl, hash); + rht_for_each_continue(he, *pprev, tbl, hash) { + struct rhlist_head *list; + + list = container_of(he, struct rhlist_head, rhead); + + if (he != obj) { + struct rhlist_head __rcu **lpprev; + + pprev = &he->next; + + if (!rhlist) + continue; + + do { + lpprev = &list->next; + list = rht_dereference_bucket(list->next, + tbl, hash); + } while (list && obj != &list->rhead); + + if (!list) + continue; + + list = rht_dereference_bucket(list->next, tbl, hash); + RCU_INIT_POINTER(*lpprev, list); + err = 0; + break; + } + + obj = rht_dereference_bucket(obj->next, tbl, hash); + err = 1; + + if (rhlist) { + list = rht_dereference_bucket(list->next, tbl, hash); + if (list) { + RCU_INIT_POINTER(list->rhead.next, obj); + obj = &list->rhead; + err = 0; + } + } + + rcu_assign_pointer(*pprev, obj); + break; + } + + spin_unlock_bh(lock); + + if (err > 0) { + atomic_dec(&ht->nelems); + if (unlikely(ht->p.automatic_shrinking && + rht_shrink_below_30(ht, tbl))) + schedule_work(&ht->run_work); + err = 0; + } + + return err; +} + +/* Internal function, please use rhashtable_remove_fast() instead */ +static inline int __rhashtable_remove_fast( + struct rhashtable *ht, struct rhash_head *obj, + const struct rhashtable_params params, bool rhlist) +{ + struct bucket_table *tbl; + int err; + + rcu_read_lock(); + + tbl = rht_dereference_rcu(ht->tbl, ht); + + /* Because we have already taken (and released) the bucket + * lock in old_tbl, if we find that future_tbl is not yet + * visible then that guarantees the entry to still be in + * the old tbl if it exists. + */ + while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params, + rhlist)) && + (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) + ; + + rcu_read_unlock(); + + return err; +} + +/** + * rhashtable_remove_fast - remove object from hash table + * @ht: hash table + * @obj: pointer to hash head inside object + * @params: hash table parameters + * + * Since the hash chain is single linked, the removal operation needs to + * walk the bucket chain upon removal. The removal operation is thus + * considerable slow if the hash table is not correctly sized. + * + * Will automatically shrink the table if permitted when residency drops + * below 30%. + * + * Returns zero on success, -ENOENT if the entry could not be found. + */ +static inline int rhashtable_remove_fast( + struct rhashtable *ht, struct rhash_head *obj, + const struct rhashtable_params params) +{ + return __rhashtable_remove_fast(ht, obj, params, false); +} + +/** + * rhltable_remove - remove object from hash list table + * @hlt: hash list table + * @list: pointer to hash list head inside object + * @params: hash table parameters + * + * Since the hash chain is single linked, the removal operation needs to + * walk the bucket chain upon removal. The removal operation is thus + * considerable slow if the hash table is not correctly sized. + * + * Will automatically shrink the table if permitted when residency drops + * below 30% + * + * Returns zero on success, -ENOENT if the entry could not be found. + */ +static inline int rhltable_remove( + struct rhltable *hlt, struct rhlist_head *list, + const struct rhashtable_params params) +{ + return __rhashtable_remove_fast(&hlt->ht, &list->rhead, params, true); +} + +/* Internal function, please use rhashtable_replace_fast() instead */ +static inline int __rhashtable_replace_fast( + struct rhashtable *ht, struct bucket_table *tbl, + struct rhash_head *obj_old, struct rhash_head *obj_new, + const struct rhashtable_params params) +{ + struct rhash_head __rcu **pprev; + struct rhash_head *he; + spinlock_t *lock; + unsigned int hash; + int err = -ENOENT; + + /* Minimally, the old and new objects must have same hash + * (which should mean identifiers are the same). + */ + hash = rht_head_hashfn(ht, tbl, obj_old, params); + if (hash != rht_head_hashfn(ht, tbl, obj_new, params)) + return -EINVAL; + + lock = rht_bucket_lock(tbl, hash); + + spin_lock_bh(lock); + + pprev = rht_bucket_var(tbl, hash); + rht_for_each_continue(he, *pprev, tbl, hash) { + if (he != obj_old) { + pprev = &he->next; + continue; + } + + rcu_assign_pointer(obj_new->next, obj_old->next); + rcu_assign_pointer(*pprev, obj_new); + err = 0; + break; + } + + spin_unlock_bh(lock); + + return err; +} + +/** + * rhashtable_replace_fast - replace an object in hash table + * @ht: hash table + * @obj_old: pointer to hash head inside object being replaced + * @obj_new: pointer to hash head inside object which is new + * @params: hash table parameters + * + * Replacing an object doesn't affect the number of elements in the hash table + * or bucket, so we don't need to worry about shrinking or expanding the + * table here. + * + * Returns zero on success, -ENOENT if the entry could not be found, + * -EINVAL if hash is not the same for the old and new objects. + */ +static inline int rhashtable_replace_fast( + struct rhashtable *ht, struct rhash_head *obj_old, + struct rhash_head *obj_new, + const struct rhashtable_params params) +{ + struct bucket_table *tbl; + int err; + + rcu_read_lock(); + + tbl = rht_dereference_rcu(ht->tbl, ht); + + /* Because we have already taken (and released) the bucket + * lock in old_tbl, if we find that future_tbl is not yet + * visible then that guarantees the entry to still be in + * the old tbl if it exists. + */ + while ((err = __rhashtable_replace_fast(ht, tbl, obj_old, + obj_new, params)) && + (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) + ; + + rcu_read_unlock(); + + return err; +} + +/* Obsolete function, do not use in new code. */ +static inline int rhashtable_walk_init(struct rhashtable *ht, + struct rhashtable_iter *iter, gfp_t gfp) +{ + rhashtable_walk_enter(ht, iter); + return 0; +} + +/** + * rhltable_walk_enter - Initialise an iterator + * @hlt: Table to walk over + * @iter: Hash table Iterator + * + * This function prepares a hash table walk. + * + * Note that if you restart a walk after rhashtable_walk_stop you + * may see the same object twice. Also, you may miss objects if + * there are removals in between rhashtable_walk_stop and the next + * call to rhashtable_walk_start. + * + * For a completely stable walk you should construct your own data + * structure outside the hash table. + * + * This function may be called from any process context, including + * non-preemptable context, but cannot be called from softirq or + * hardirq context. + * + * You must call rhashtable_walk_exit after this function returns. + */ +static inline void rhltable_walk_enter(struct rhltable *hlt, + struct rhashtable_iter *iter) +{ + return rhashtable_walk_enter(&hlt->ht, iter); +} + +/** + * rhltable_free_and_destroy - free elements and destroy hash list table + * @hlt: the hash list table to destroy + * @free_fn: callback to release resources of element + * @arg: pointer passed to free_fn + * + * See documentation for rhashtable_free_and_destroy. + */ +static inline void rhltable_free_and_destroy(struct rhltable *hlt, + void (*free_fn)(void *ptr, + void *arg), + void *arg) +{ + return rhashtable_free_and_destroy(&hlt->ht, free_fn, arg); +} + +static inline void rhltable_destroy(struct rhltable *hlt) +{ + return rhltable_free_and_destroy(hlt, NULL, NULL); +} + +#endif /* _LINUX_RHASHTABLE_H */ diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h new file mode 100644 index 000000000..941bfd9b3 --- /dev/null +++ b/include/linux/ring_buffer.h @@ -0,0 +1,213 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_RING_BUFFER_H +#define _LINUX_RING_BUFFER_H + +#include +#include +#include + +struct ring_buffer; +struct ring_buffer_iter; + +/* + * Don't refer to this struct directly, use functions below. + */ +struct ring_buffer_event { + u32 type_len:5, time_delta:27; + + u32 array[]; +}; + +/** + * enum ring_buffer_type - internal ring buffer types + * + * @RINGBUF_TYPE_PADDING: Left over page padding or discarded event + * If time_delta is 0: + * array is ignored + * size is variable depending on how much + * padding is needed + * If time_delta is non zero: + * array[0] holds the actual length + * size = 4 + length (bytes) + * + * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta + * array[0] = time delta (28 .. 59) + * size = 8 bytes + * + * @RINGBUF_TYPE_TIME_STAMP: Absolute timestamp + * Same format as TIME_EXTEND except that the + * value is an absolute timestamp, not a delta + * event.time_delta contains bottom 27 bits + * array[0] = top (28 .. 59) bits + * size = 8 bytes + * + * <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX: + * Data record + * If type_len is zero: + * array[0] holds the actual length + * array[1..(length+3)/4] holds data + * size = 4 + length (bytes) + * else + * length = type_len << 2 + * array[0..(length+3)/4-1] holds data + * size = 4 + length (bytes) + */ +enum ring_buffer_type { + RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28, + RINGBUF_TYPE_PADDING, + RINGBUF_TYPE_TIME_EXTEND, + RINGBUF_TYPE_TIME_STAMP, +}; + +unsigned ring_buffer_event_length(struct ring_buffer_event *event); +void *ring_buffer_event_data(struct ring_buffer_event *event); +u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event); + +/* + * ring_buffer_discard_commit will remove an event that has not + * been committed yet. If this is used, then ring_buffer_unlock_commit + * must not be called on the discarded event. This function + * will try to remove the event from the ring buffer completely + * if another event has not been written after it. + * + * Example use: + * + * if (some_condition) + * ring_buffer_discard_commit(buffer, event); + * else + * ring_buffer_unlock_commit(buffer, event); + */ +void ring_buffer_discard_commit(struct ring_buffer *buffer, + struct ring_buffer_event *event); + +/* + * size is in bytes for each per CPU buffer. + */ +struct ring_buffer * +__ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key); + +/* + * Because the ring buffer is generic, if other users of the ring buffer get + * traced by ftrace, it can produce lockdep warnings. We need to keep each + * ring buffer's lock class separate. + */ +#define ring_buffer_alloc(size, flags) \ +({ \ + static struct lock_class_key __key; \ + __ring_buffer_alloc((size), (flags), &__key); \ +}) + +int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full); +__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, + struct file *filp, poll_table *poll_table); + + +#define RING_BUFFER_ALL_CPUS -1 + +void ring_buffer_free(struct ring_buffer *buffer); + +int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, int cpu); + +void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val); + +struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer, + unsigned long length); +int ring_buffer_unlock_commit(struct ring_buffer *buffer, + struct ring_buffer_event *event); +int ring_buffer_write(struct ring_buffer *buffer, + unsigned long length, void *data); + +void ring_buffer_nest_start(struct ring_buffer *buffer); +void ring_buffer_nest_end(struct ring_buffer *buffer); + +struct ring_buffer_event * +ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, + unsigned long *lost_events); +struct ring_buffer_event * +ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, + unsigned long *lost_events); + +struct ring_buffer_iter * +ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags); +void ring_buffer_read_prepare_sync(void); +void ring_buffer_read_start(struct ring_buffer_iter *iter); +void ring_buffer_read_finish(struct ring_buffer_iter *iter); + +struct ring_buffer_event * +ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts); +struct ring_buffer_event * +ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts); +void ring_buffer_iter_reset(struct ring_buffer_iter *iter); +int ring_buffer_iter_empty(struct ring_buffer_iter *iter); + +unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu); + +void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu); +void ring_buffer_reset(struct ring_buffer *buffer); + +#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP +int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, + struct ring_buffer *buffer_b, int cpu); +#else +static inline int +ring_buffer_swap_cpu(struct ring_buffer *buffer_a, + struct ring_buffer *buffer_b, int cpu) +{ + return -ENODEV; +} +#endif + +bool ring_buffer_empty(struct ring_buffer *buffer); +bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu); + +void ring_buffer_record_disable(struct ring_buffer *buffer); +void ring_buffer_record_enable(struct ring_buffer *buffer); +void ring_buffer_record_off(struct ring_buffer *buffer); +void ring_buffer_record_on(struct ring_buffer *buffer); +bool ring_buffer_record_is_on(struct ring_buffer *buffer); +bool ring_buffer_record_is_set_on(struct ring_buffer *buffer); +void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); +void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); + +u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu); +unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu); +unsigned long ring_buffer_entries(struct ring_buffer *buffer); +unsigned long ring_buffer_overruns(struct ring_buffer *buffer); +unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); +unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); +unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu); +unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu); +unsigned long ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu); + +u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); +void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, + int cpu, u64 *ts); +void ring_buffer_set_clock(struct ring_buffer *buffer, + u64 (*clock)(void)); +void ring_buffer_set_time_stamp_abs(struct ring_buffer *buffer, bool abs); +bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer); + +size_t ring_buffer_page_len(void *page); + + +void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu); +void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data); +int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page, + size_t len, int cpu, int full); + +struct trace_seq; + +int ring_buffer_print_entry_header(struct trace_seq *s); +int ring_buffer_print_page_header(struct trace_seq *s); + +enum ring_buffer_flags { + RB_FL_OVERWRITE = 1 << 0, +}; + +#ifdef CONFIG_RING_BUFFER +int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node); +#else +#define trace_rb_cpu_prepare NULL +#endif + +#endif /* _LINUX_RING_BUFFER_H */ diff --git a/include/linux/rio.h b/include/linux/rio.h new file mode 100644 index 000000000..37b95c4af --- /dev/null +++ b/include/linux/rio.h @@ -0,0 +1,562 @@ +/* + * RapidIO interconnect services + * (RapidIO Interconnect Specification, http://www.rapidio.org) + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef LINUX_RIO_H +#define LINUX_RIO_H + +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_RAPIDIO_DMA_ENGINE +#include +#endif + +#define RIO_NO_HOPCOUNT -1 +#define RIO_INVALID_DESTID 0xffff + +#define RIO_MAX_MPORTS 8 +#define RIO_MAX_MPORT_RESOURCES 16 +#define RIO_MAX_DEV_RESOURCES 16 +#define RIO_MAX_MPORT_NAME 40 + +#define RIO_GLOBAL_TABLE 0xff /* Indicates access of a switch's + global routing table if it + has multiple (or per port) + tables */ + +#define RIO_INVALID_ROUTE 0xff /* Indicates that a route table + entry is invalid (no route + exists for the device ID) */ + +#define RIO_MAX_ROUTE_ENTRIES(size) (size ? (1 << 16) : (1 << 8)) +#define RIO_ANY_DESTID(size) (size ? 0xffff : 0xff) + +#define RIO_MAX_MBOX 4 +#define RIO_MAX_MSG_SIZE 0x1000 + +/* + * Error values that may be returned by RIO functions. + */ +#define RIO_SUCCESSFUL 0x00 +#define RIO_BAD_SIZE 0x81 + +/* + * For RIO devices, the region numbers are assigned this way: + * + * 0 RapidIO outbound doorbells + * 1-15 RapidIO memory regions + * + * For RIO master ports, the region number are assigned this way: + * + * 0 RapidIO inbound doorbells + * 1 RapidIO inbound mailboxes + * 2 RapidIO outbound mailboxes + */ +#define RIO_DOORBELL_RESOURCE 0 +#define RIO_INB_MBOX_RESOURCE 1 +#define RIO_OUTB_MBOX_RESOURCE 2 + +#define RIO_PW_MSG_SIZE 64 + +/* + * A component tag value (stored in the component tag CSR) is used as device's + * unique identifier assigned during enumeration. Besides being used for + * identifying switches (which do not have device ID register), it also is used + * by error management notification and therefore has to be assigned + * to endpoints as well. + */ +#define RIO_CTAG_RESRVD 0xfffe0000 /* Reserved */ +#define RIO_CTAG_UDEVID 0x0001ffff /* Unique device identifier */ + +extern struct bus_type rio_bus_type; +extern struct class rio_mport_class; + +struct rio_mport; +struct rio_dev; +union rio_pw_msg; + +/** + * struct rio_switch - RIO switch info + * @node: Node in global list of switches + * @route_table: Copy of switch routing table + * @port_ok: Status of each port (one bit per port) - OK=1 or UNINIT=0 + * @ops: pointer to switch-specific operations + * @lock: lock to serialize operations updates + * @nextdev: Array of per-port pointers to the next attached device + */ +struct rio_switch { + struct list_head node; + u8 *route_table; + u32 port_ok; + struct rio_switch_ops *ops; + spinlock_t lock; + struct rio_dev *nextdev[0]; +}; + +/** + * struct rio_switch_ops - Per-switch operations + * @owner: The module owner of this structure + * @add_entry: Callback for switch-specific route add function + * @get_entry: Callback for switch-specific route get function + * @clr_table: Callback for switch-specific clear route table function + * @set_domain: Callback for switch-specific domain setting function + * @get_domain: Callback for switch-specific domain get function + * @em_init: Callback for switch-specific error management init function + * @em_handle: Callback for switch-specific error management handler function + * + * Defines the operations that are necessary to initialize/control + * a particular RIO switch device. + */ +struct rio_switch_ops { + struct module *owner; + int (*add_entry) (struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 route_port); + int (*get_entry) (struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 *route_port); + int (*clr_table) (struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table); + int (*set_domain) (struct rio_mport *mport, u16 destid, u8 hopcount, + u8 sw_domain); + int (*get_domain) (struct rio_mport *mport, u16 destid, u8 hopcount, + u8 *sw_domain); + int (*em_init) (struct rio_dev *dev); + int (*em_handle) (struct rio_dev *dev, u8 swport); +}; + +enum rio_device_state { + RIO_DEVICE_INITIALIZING, + RIO_DEVICE_RUNNING, + RIO_DEVICE_GONE, + RIO_DEVICE_SHUTDOWN, +}; + +/** + * struct rio_dev - RIO device info + * @global_list: Node in list of all RIO devices + * @net_list: Node in list of RIO devices in a network + * @net: Network this device is a part of + * @do_enum: Enumeration flag + * @did: Device ID + * @vid: Vendor ID + * @device_rev: Device revision + * @asm_did: Assembly device ID + * @asm_vid: Assembly vendor ID + * @asm_rev: Assembly revision + * @efptr: Extended feature pointer + * @pef: Processing element features + * @swpinfo: Switch port info + * @src_ops: Source operation capabilities + * @dst_ops: Destination operation capabilities + * @comp_tag: RIO component tag + * @phys_efptr: RIO device extended features pointer + * @phys_rmap: LP-Serial Register Map Type (1 or 2) + * @em_efptr: RIO Error Management features pointer + * @dma_mask: Mask of bits of RIO address this device implements + * @driver: Driver claiming this device + * @dev: Device model device + * @riores: RIO resources this device owns + * @pwcback: port-write callback function for this device + * @destid: Network destination ID (or associated destid for switch) + * @hopcount: Hopcount to this device + * @prev: Previous RIO device connected to the current one + * @state: device state + * @rswitch: struct rio_switch (if valid for this device) + */ +struct rio_dev { + struct list_head global_list; /* node in list of all RIO devices */ + struct list_head net_list; /* node in per net list */ + struct rio_net *net; /* RIO net this device resides in */ + bool do_enum; + u16 did; + u16 vid; + u32 device_rev; + u16 asm_did; + u16 asm_vid; + u16 asm_rev; + u16 efptr; + u32 pef; + u32 swpinfo; + u32 src_ops; + u32 dst_ops; + u32 comp_tag; + u32 phys_efptr; + u32 phys_rmap; + u32 em_efptr; + u64 dma_mask; + struct rio_driver *driver; /* RIO driver claiming this device */ + struct device dev; /* LDM device structure */ + struct resource riores[RIO_MAX_DEV_RESOURCES]; + int (*pwcback) (struct rio_dev *rdev, union rio_pw_msg *msg, int step); + u16 destid; + u8 hopcount; + struct rio_dev *prev; + atomic_t state; + struct rio_switch rswitch[0]; /* RIO switch info */ +}; + +#define rio_dev_g(n) list_entry(n, struct rio_dev, global_list) +#define rio_dev_f(n) list_entry(n, struct rio_dev, net_list) +#define to_rio_dev(n) container_of(n, struct rio_dev, dev) +#define sw_to_rio_dev(n) container_of(n, struct rio_dev, rswitch[0]) +#define to_rio_mport(n) container_of(n, struct rio_mport, dev) +#define to_rio_net(n) container_of(n, struct rio_net, dev) + +/** + * struct rio_msg - RIO message event + * @res: Mailbox resource + * @mcback: Message event callback + */ +struct rio_msg { + struct resource *res; + void (*mcback) (struct rio_mport * mport, void *dev_id, int mbox, int slot); +}; + +/** + * struct rio_dbell - RIO doorbell event + * @node: Node in list of doorbell events + * @res: Doorbell resource + * @dinb: Doorbell event callback + * @dev_id: Device specific pointer to pass on event + */ +struct rio_dbell { + struct list_head node; + struct resource *res; + void (*dinb) (struct rio_mport *mport, void *dev_id, u16 src, u16 dst, u16 info); + void *dev_id; +}; + +/** + * struct rio_mport - RIO master port info + * @dbells: List of doorbell events + * @pwrites: List of portwrite events + * @node: Node in global list of master ports + * @nnode: Node in network list of master ports + * @net: RIO net this mport is attached to + * @lock: lock to synchronize lists manipulations + * @iores: I/O mem resource that this master port interface owns + * @riores: RIO resources that this master port interfaces owns + * @inb_msg: RIO inbound message event descriptors + * @outb_msg: RIO outbound message event descriptors + * @host_deviceid: Host device ID associated with this master port + * @ops: configuration space functions + * @id: Port ID, unique among all ports + * @index: Port index, unique among all port interfaces of the same type + * @sys_size: RapidIO common transport system size + * @phys_efptr: RIO port extended features pointer + * @phys_rmap: LP-Serial EFB Register Mapping type (1 or 2). + * @name: Port name string + * @dev: device structure associated with an mport + * @priv: Master port private data + * @dma: DMA device associated with mport + * @nscan: RapidIO network enumeration/discovery operations + * @state: mport device state + * @pwe_refcnt: port-write enable ref counter to track enable/disable requests + */ +struct rio_mport { + struct list_head dbells; /* list of doorbell events */ + struct list_head pwrites; /* list of portwrite events */ + struct list_head node; /* node in global list of ports */ + struct list_head nnode; /* node in net list of ports */ + struct rio_net *net; /* RIO net this mport is attached to */ + struct mutex lock; + struct resource iores; + struct resource riores[RIO_MAX_MPORT_RESOURCES]; + struct rio_msg inb_msg[RIO_MAX_MBOX]; + struct rio_msg outb_msg[RIO_MAX_MBOX]; + int host_deviceid; /* Host device ID */ + struct rio_ops *ops; /* low-level architecture-dependent routines */ + unsigned char id; /* port ID, unique among all ports */ + unsigned char index; /* port index, unique among all port + interfaces of the same type */ + unsigned int sys_size; /* RapidIO common transport system size. + * 0 - Small size. 256 devices. + * 1 - Large size, 65536 devices. + */ + u32 phys_efptr; + u32 phys_rmap; + unsigned char name[RIO_MAX_MPORT_NAME]; + struct device dev; + void *priv; /* Master port private data */ +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + struct dma_device dma; +#endif + struct rio_scan *nscan; + atomic_t state; + unsigned int pwe_refcnt; +}; + +static inline int rio_mport_is_running(struct rio_mport *mport) +{ + return atomic_read(&mport->state) == RIO_DEVICE_RUNNING; +} + +/* + * Enumeration/discovery control flags + */ +#define RIO_SCAN_ENUM_NO_WAIT 0x00000001 /* Do not wait for enum completed */ + +/** + * struct rio_net - RIO network info + * @node: Node in global list of RIO networks + * @devices: List of devices in this network + * @switches: List of switches in this network + * @mports: List of master ports accessing this network + * @hport: Default port for accessing this network + * @id: RIO network ID + * @dev: Device object + * @enum_data: private data specific to a network enumerator + * @release: enumerator-specific release callback + */ +struct rio_net { + struct list_head node; /* node in list of networks */ + struct list_head devices; /* list of devices in this net */ + struct list_head switches; /* list of switches in this net */ + struct list_head mports; /* list of ports accessing net */ + struct rio_mport *hport; /* primary port for accessing net */ + unsigned char id; /* RIO network ID */ + struct device dev; + void *enum_data; /* private data for enumerator of the network */ + void (*release)(struct rio_net *net); +}; + +enum rio_link_speed { + RIO_LINK_DOWN = 0, /* SRIO Link not initialized */ + RIO_LINK_125 = 1, /* 1.25 GBaud */ + RIO_LINK_250 = 2, /* 2.5 GBaud */ + RIO_LINK_312 = 3, /* 3.125 GBaud */ + RIO_LINK_500 = 4, /* 5.0 GBaud */ + RIO_LINK_625 = 5 /* 6.25 GBaud */ +}; + +enum rio_link_width { + RIO_LINK_1X = 0, + RIO_LINK_1XR = 1, + RIO_LINK_2X = 3, + RIO_LINK_4X = 2, + RIO_LINK_8X = 4, + RIO_LINK_16X = 5 +}; + +enum rio_mport_flags { + RIO_MPORT_DMA = (1 << 0), /* supports DMA data transfers */ + RIO_MPORT_DMA_SG = (1 << 1), /* DMA supports HW SG mode */ + RIO_MPORT_IBSG = (1 << 2), /* inbound mapping supports SG */ +}; + +/** + * struct rio_mport_attr - RIO mport device attributes + * @flags: mport device capability flags + * @link_speed: SRIO link speed value (as defined by RapidIO specification) + * @link_width: SRIO link width value (as defined by RapidIO specification) + * @dma_max_sge: number of SG list entries that can be handled by DMA channel(s) + * @dma_max_size: max number of bytes in single DMA transfer (SG entry) + * @dma_align: alignment shift for DMA operations (as for other DMA operations) + */ +struct rio_mport_attr { + int flags; + int link_speed; + int link_width; + + /* DMA capability info: valid only if RIO_MPORT_DMA flag is set */ + int dma_max_sge; + int dma_max_size; + int dma_align; +}; + +/* Low-level architecture-dependent routines */ + +/** + * struct rio_ops - Low-level RIO configuration space operations + * @lcread: Callback to perform local (master port) read of config space. + * @lcwrite: Callback to perform local (master port) write of config space. + * @cread: Callback to perform network read of config space. + * @cwrite: Callback to perform network write of config space. + * @dsend: Callback to send a doorbell message. + * @pwenable: Callback to enable/disable port-write message handling. + * @open_outb_mbox: Callback to initialize outbound mailbox. + * @close_outb_mbox: Callback to shut down outbound mailbox. + * @open_inb_mbox: Callback to initialize inbound mailbox. + * @close_inb_mbox: Callback to shut down inbound mailbox. + * @add_outb_message: Callback to add a message to an outbound mailbox queue. + * @add_inb_buffer: Callback to add a buffer to an inbound mailbox queue. + * @get_inb_message: Callback to get a message from an inbound mailbox queue. + * @map_inb: Callback to map RapidIO address region into local memory space. + * @unmap_inb: Callback to unmap RapidIO address region mapped with map_inb(). + * @query_mport: Callback to query mport device attributes. + * @map_outb: Callback to map outbound address region into local memory space. + * @unmap_outb: Callback to unmap outbound RapidIO address region. + */ +struct rio_ops { + int (*lcread) (struct rio_mport *mport, int index, u32 offset, int len, + u32 *data); + int (*lcwrite) (struct rio_mport *mport, int index, u32 offset, int len, + u32 data); + int (*cread) (struct rio_mport *mport, int index, u16 destid, + u8 hopcount, u32 offset, int len, u32 *data); + int (*cwrite) (struct rio_mport *mport, int index, u16 destid, + u8 hopcount, u32 offset, int len, u32 data); + int (*dsend) (struct rio_mport *mport, int index, u16 destid, u16 data); + int (*pwenable) (struct rio_mport *mport, int enable); + int (*open_outb_mbox)(struct rio_mport *mport, void *dev_id, + int mbox, int entries); + void (*close_outb_mbox)(struct rio_mport *mport, int mbox); + int (*open_inb_mbox)(struct rio_mport *mport, void *dev_id, + int mbox, int entries); + void (*close_inb_mbox)(struct rio_mport *mport, int mbox); + int (*add_outb_message)(struct rio_mport *mport, struct rio_dev *rdev, + int mbox, void *buffer, size_t len); + int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf); + void *(*get_inb_message)(struct rio_mport *mport, int mbox); + int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart, + u64 rstart, u64 size, u32 flags); + void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart); + int (*query_mport)(struct rio_mport *mport, + struct rio_mport_attr *attr); + int (*map_outb)(struct rio_mport *mport, u16 destid, u64 rstart, + u32 size, u32 flags, dma_addr_t *laddr); + void (*unmap_outb)(struct rio_mport *mport, u16 destid, u64 rstart); +}; + +#define RIO_RESOURCE_MEM 0x00000100 +#define RIO_RESOURCE_DOORBELL 0x00000200 +#define RIO_RESOURCE_MAILBOX 0x00000400 + +#define RIO_RESOURCE_CACHEABLE 0x00010000 +#define RIO_RESOURCE_PCI 0x00020000 + +#define RIO_RESOURCE_BUSY 0x80000000 + +/** + * struct rio_driver - RIO driver info + * @node: Node in list of drivers + * @name: RIO driver name + * @id_table: RIO device ids to be associated with this driver + * @probe: RIO device inserted + * @remove: RIO device removed + * @shutdown: shutdown notification callback + * @suspend: RIO device suspended + * @resume: RIO device awakened + * @enable_wake: RIO device enable wake event + * @driver: LDM driver struct + * + * Provides info on a RIO device driver for insertion/removal and + * power management purposes. + */ +struct rio_driver { + struct list_head node; + char *name; + const struct rio_device_id *id_table; + int (*probe) (struct rio_dev * dev, const struct rio_device_id * id); + void (*remove) (struct rio_dev * dev); + void (*shutdown)(struct rio_dev *dev); + int (*suspend) (struct rio_dev * dev, u32 state); + int (*resume) (struct rio_dev * dev); + int (*enable_wake) (struct rio_dev * dev, u32 state, int enable); + struct device_driver driver; +}; + +#define to_rio_driver(drv) container_of(drv,struct rio_driver, driver) + +union rio_pw_msg { + struct { + u32 comptag; /* Component Tag CSR */ + u32 errdetect; /* Port N Error Detect CSR */ + u32 is_port; /* Implementation specific + PortID */ + u32 ltlerrdet; /* LTL Error Detect CSR */ + u32 padding[12]; + } em; + u32 raw[RIO_PW_MSG_SIZE/sizeof(u32)]; +}; + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + +/* + * enum rio_write_type - RIO write transaction types used in DMA transfers + * + * Note: RapidIO specification defines write (NWRITE) and + * write-with-response (NWRITE_R) data transfer operations. + * Existing DMA controllers that service RapidIO may use one of these operations + * for entire data transfer or their combination with only the last data packet + * requires response. + */ +enum rio_write_type { + RDW_DEFAULT, /* default method used by DMA driver */ + RDW_ALL_NWRITE, /* all packets use NWRITE */ + RDW_ALL_NWRITE_R, /* all packets use NWRITE_R */ + RDW_LAST_NWRITE_R, /* last packet uses NWRITE_R, others - NWRITE */ +}; + +struct rio_dma_ext { + u16 destid; + u64 rio_addr; /* low 64-bits of 66-bit RapidIO address */ + u8 rio_addr_u; /* upper 2-bits of 66-bit RapidIO address */ + enum rio_write_type wr_type; /* preferred RIO write operation type */ +}; + +struct rio_dma_data { + /* Local data (as scatterlist) */ + struct scatterlist *sg; /* I/O scatter list */ + unsigned int sg_len; /* size of scatter list */ + /* Remote device address (flat buffer) */ + u64 rio_addr; /* low 64-bits of 66-bit RapidIO address */ + u8 rio_addr_u; /* upper 2-bits of 66-bit RapidIO address */ + enum rio_write_type wr_type; /* preferred RIO write operation type */ +}; + +static inline struct rio_mport *dma_to_mport(struct dma_device *ddev) +{ + return container_of(ddev, struct rio_mport, dma); +} +#endif /* CONFIG_RAPIDIO_DMA_ENGINE */ + +/** + * struct rio_scan - RIO enumeration and discovery operations + * @owner: The module owner of this structure + * @enumerate: Callback to perform RapidIO fabric enumeration. + * @discover: Callback to perform RapidIO fabric discovery. + */ +struct rio_scan { + struct module *owner; + int (*enumerate)(struct rio_mport *mport, u32 flags); + int (*discover)(struct rio_mport *mport, u32 flags); +}; + +/** + * struct rio_scan_node - list node to register RapidIO enumeration and + * discovery methods with RapidIO core. + * @mport_id: ID of an mport (net) serviced by this enumerator + * @node: node in global list of registered enumerators + * @ops: RIO enumeration and discovery operations + */ +struct rio_scan_node { + int mport_id; + struct list_head node; + struct rio_scan *ops; +}; + +/* Architecture and hardware-specific functions */ +extern int rio_mport_initialize(struct rio_mport *); +extern int rio_register_mport(struct rio_mport *); +extern int rio_unregister_mport(struct rio_mport *); +extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int); +extern void rio_close_inb_mbox(struct rio_mport *, int); +extern int rio_open_outb_mbox(struct rio_mport *, void *, int, int); +extern void rio_close_outb_mbox(struct rio_mport *, int); +extern int rio_query_mport(struct rio_mport *port, + struct rio_mport_attr *mport_attr); + +#endif /* LINUX_RIO_H */ diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h new file mode 100644 index 000000000..0834264fb --- /dev/null +++ b/include/linux/rio_drv.h @@ -0,0 +1,456 @@ +/* + * RapidIO driver services + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef LINUX_RIO_DRV_H +#define LINUX_RIO_DRV_H + +#include +#include +#include +#include +#include +#include + +extern int __rio_local_read_config_32(struct rio_mport *port, u32 offset, + u32 * data); +extern int __rio_local_write_config_32(struct rio_mport *port, u32 offset, + u32 data); +extern int __rio_local_read_config_16(struct rio_mport *port, u32 offset, + u16 * data); +extern int __rio_local_write_config_16(struct rio_mport *port, u32 offset, + u16 data); +extern int __rio_local_read_config_8(struct rio_mport *port, u32 offset, + u8 * data); +extern int __rio_local_write_config_8(struct rio_mport *port, u32 offset, + u8 data); + +extern int rio_mport_read_config_32(struct rio_mport *port, u16 destid, + u8 hopcount, u32 offset, u32 * data); +extern int rio_mport_write_config_32(struct rio_mport *port, u16 destid, + u8 hopcount, u32 offset, u32 data); +extern int rio_mport_read_config_16(struct rio_mport *port, u16 destid, + u8 hopcount, u32 offset, u16 * data); +extern int rio_mport_write_config_16(struct rio_mport *port, u16 destid, + u8 hopcount, u32 offset, u16 data); +extern int rio_mport_read_config_8(struct rio_mport *port, u16 destid, + u8 hopcount, u32 offset, u8 * data); +extern int rio_mport_write_config_8(struct rio_mport *port, u16 destid, + u8 hopcount, u32 offset, u8 data); + +/** + * rio_local_read_config_32 - Read 32 bits from local configuration space + * @port: Master port + * @offset: Offset into local configuration space + * @data: Pointer to read data into + * + * Reads 32 bits of data from the specified offset within the local + * device's configuration space. + */ +static inline int rio_local_read_config_32(struct rio_mport *port, u32 offset, + u32 * data) +{ + return __rio_local_read_config_32(port, offset, data); +} + +/** + * rio_local_write_config_32 - Write 32 bits to local configuration space + * @port: Master port + * @offset: Offset into local configuration space + * @data: Data to be written + * + * Writes 32 bits of data to the specified offset within the local + * device's configuration space. + */ +static inline int rio_local_write_config_32(struct rio_mport *port, u32 offset, + u32 data) +{ + return __rio_local_write_config_32(port, offset, data); +} + +/** + * rio_local_read_config_16 - Read 16 bits from local configuration space + * @port: Master port + * @offset: Offset into local configuration space + * @data: Pointer to read data into + * + * Reads 16 bits of data from the specified offset within the local + * device's configuration space. + */ +static inline int rio_local_read_config_16(struct rio_mport *port, u32 offset, + u16 * data) +{ + return __rio_local_read_config_16(port, offset, data); +} + +/** + * rio_local_write_config_16 - Write 16 bits to local configuration space + * @port: Master port + * @offset: Offset into local configuration space + * @data: Data to be written + * + * Writes 16 bits of data to the specified offset within the local + * device's configuration space. + */ + +static inline int rio_local_write_config_16(struct rio_mport *port, u32 offset, + u16 data) +{ + return __rio_local_write_config_16(port, offset, data); +} + +/** + * rio_local_read_config_8 - Read 8 bits from local configuration space + * @port: Master port + * @offset: Offset into local configuration space + * @data: Pointer to read data into + * + * Reads 8 bits of data from the specified offset within the local + * device's configuration space. + */ +static inline int rio_local_read_config_8(struct rio_mport *port, u32 offset, + u8 * data) +{ + return __rio_local_read_config_8(port, offset, data); +} + +/** + * rio_local_write_config_8 - Write 8 bits to local configuration space + * @port: Master port + * @offset: Offset into local configuration space + * @data: Data to be written + * + * Writes 8 bits of data to the specified offset within the local + * device's configuration space. + */ +static inline int rio_local_write_config_8(struct rio_mport *port, u32 offset, + u8 data) +{ + return __rio_local_write_config_8(port, offset, data); +} + +/** + * rio_read_config_32 - Read 32 bits from configuration space + * @rdev: RIO device + * @offset: Offset into device configuration space + * @data: Pointer to read data into + * + * Reads 32 bits of data from the specified offset within the + * RIO device's configuration space. + */ +static inline int rio_read_config_32(struct rio_dev *rdev, u32 offset, + u32 * data) +{ + return rio_mport_read_config_32(rdev->net->hport, rdev->destid, + rdev->hopcount, offset, data); +}; + +/** + * rio_write_config_32 - Write 32 bits to configuration space + * @rdev: RIO device + * @offset: Offset into device configuration space + * @data: Data to be written + * + * Writes 32 bits of data to the specified offset within the + * RIO device's configuration space. + */ +static inline int rio_write_config_32(struct rio_dev *rdev, u32 offset, + u32 data) +{ + return rio_mport_write_config_32(rdev->net->hport, rdev->destid, + rdev->hopcount, offset, data); +}; + +/** + * rio_read_config_16 - Read 16 bits from configuration space + * @rdev: RIO device + * @offset: Offset into device configuration space + * @data: Pointer to read data into + * + * Reads 16 bits of data from the specified offset within the + * RIO device's configuration space. + */ +static inline int rio_read_config_16(struct rio_dev *rdev, u32 offset, + u16 * data) +{ + return rio_mport_read_config_16(rdev->net->hport, rdev->destid, + rdev->hopcount, offset, data); +}; + +/** + * rio_write_config_16 - Write 16 bits to configuration space + * @rdev: RIO device + * @offset: Offset into device configuration space + * @data: Data to be written + * + * Writes 16 bits of data to the specified offset within the + * RIO device's configuration space. + */ +static inline int rio_write_config_16(struct rio_dev *rdev, u32 offset, + u16 data) +{ + return rio_mport_write_config_16(rdev->net->hport, rdev->destid, + rdev->hopcount, offset, data); +}; + +/** + * rio_read_config_8 - Read 8 bits from configuration space + * @rdev: RIO device + * @offset: Offset into device configuration space + * @data: Pointer to read data into + * + * Reads 8 bits of data from the specified offset within the + * RIO device's configuration space. + */ +static inline int rio_read_config_8(struct rio_dev *rdev, u32 offset, u8 * data) +{ + return rio_mport_read_config_8(rdev->net->hport, rdev->destid, + rdev->hopcount, offset, data); +}; + +/** + * rio_write_config_8 - Write 8 bits to configuration space + * @rdev: RIO device + * @offset: Offset into device configuration space + * @data: Data to be written + * + * Writes 8 bits of data to the specified offset within the + * RIO device's configuration space. + */ +static inline int rio_write_config_8(struct rio_dev *rdev, u32 offset, u8 data) +{ + return rio_mport_write_config_8(rdev->net->hport, rdev->destid, + rdev->hopcount, offset, data); +}; + +extern int rio_mport_send_doorbell(struct rio_mport *mport, u16 destid, + u16 data); + +/** + * rio_send_doorbell - Send a doorbell message to a device + * @rdev: RIO device + * @data: Doorbell message data + * + * Send a doorbell message to a RIO device. The doorbell message + * has a 16-bit info field provided by the @data argument. + */ +static inline int rio_send_doorbell(struct rio_dev *rdev, u16 data) +{ + return rio_mport_send_doorbell(rdev->net->hport, rdev->destid, data); +}; + +/** + * rio_init_mbox_res - Initialize a RIO mailbox resource + * @res: resource struct + * @start: start of mailbox range + * @end: end of mailbox range + * + * This function is used to initialize the fields of a resource + * for use as a mailbox resource. It initializes a range of + * mailboxes using the start and end arguments. + */ +static inline void rio_init_mbox_res(struct resource *res, int start, int end) +{ + memset(res, 0, sizeof(struct resource)); + res->start = start; + res->end = end; + res->flags = RIO_RESOURCE_MAILBOX; +} + +/** + * rio_init_dbell_res - Initialize a RIO doorbell resource + * @res: resource struct + * @start: start of doorbell range + * @end: end of doorbell range + * + * This function is used to initialize the fields of a resource + * for use as a doorbell resource. It initializes a range of + * doorbell messages using the start and end arguments. + */ +static inline void rio_init_dbell_res(struct resource *res, u16 start, u16 end) +{ + memset(res, 0, sizeof(struct resource)); + res->start = start; + res->end = end; + res->flags = RIO_RESOURCE_DOORBELL; +} + +/** + * RIO_DEVICE - macro used to describe a specific RIO device + * @dev: the 16 bit RIO device ID + * @ven: the 16 bit RIO vendor ID + * + * This macro is used to create a struct rio_device_id that matches a + * specific device. The assembly vendor and assembly device fields + * will be set to %RIO_ANY_ID. + */ +#define RIO_DEVICE(dev,ven) \ + .did = (dev), .vid = (ven), \ + .asm_did = RIO_ANY_ID, .asm_vid = RIO_ANY_ID + +/* Mailbox management */ +extern int rio_request_outb_mbox(struct rio_mport *, void *, int, int, + void (*)(struct rio_mport *, void *,int, int)); +extern int rio_release_outb_mbox(struct rio_mport *, int); + +/** + * rio_add_outb_message - Add RIO message to an outbound mailbox queue + * @mport: RIO master port containing the outbound queue + * @rdev: RIO device the message is be sent to + * @mbox: The outbound mailbox queue + * @buffer: Pointer to the message buffer + * @len: Length of the message buffer + * + * Adds a RIO message buffer to an outbound mailbox queue for + * transmission. Returns 0 on success. + */ +static inline int rio_add_outb_message(struct rio_mport *mport, + struct rio_dev *rdev, int mbox, + void *buffer, size_t len) +{ + return mport->ops->add_outb_message(mport, rdev, mbox, + buffer, len); +} + +extern int rio_request_inb_mbox(struct rio_mport *, void *, int, int, + void (*)(struct rio_mport *, void *, int, int)); +extern int rio_release_inb_mbox(struct rio_mport *, int); + +/** + * rio_add_inb_buffer - Add buffer to an inbound mailbox queue + * @mport: Master port containing the inbound mailbox + * @mbox: The inbound mailbox number + * @buffer: Pointer to the message buffer + * + * Adds a buffer to an inbound mailbox queue for reception. Returns + * 0 on success. + */ +static inline int rio_add_inb_buffer(struct rio_mport *mport, int mbox, + void *buffer) +{ + return mport->ops->add_inb_buffer(mport, mbox, buffer); +} + +/** + * rio_get_inb_message - Get A RIO message from an inbound mailbox queue + * @mport: Master port containing the inbound mailbox + * @mbox: The inbound mailbox number + * + * Get a RIO message from an inbound mailbox queue. Returns 0 on success. + */ +static inline void *rio_get_inb_message(struct rio_mport *mport, int mbox) +{ + return mport->ops->get_inb_message(mport, mbox); +} + +/* Doorbell management */ +extern int rio_request_inb_dbell(struct rio_mport *, void *, u16, u16, + void (*)(struct rio_mport *, void *, u16, u16, u16)); +extern int rio_release_inb_dbell(struct rio_mport *, u16, u16); +extern struct resource *rio_request_outb_dbell(struct rio_dev *, u16, u16); +extern int rio_release_outb_dbell(struct rio_dev *, struct resource *); + +/* Memory region management */ +int rio_claim_resource(struct rio_dev *, int); +int rio_request_regions(struct rio_dev *, char *); +void rio_release_regions(struct rio_dev *); +int rio_request_region(struct rio_dev *, int, char *); +void rio_release_region(struct rio_dev *, int); + +/* Memory mapping functions */ +extern int rio_map_inb_region(struct rio_mport *mport, dma_addr_t local, + u64 rbase, u32 size, u32 rflags); +extern void rio_unmap_inb_region(struct rio_mport *mport, dma_addr_t lstart); +extern int rio_map_outb_region(struct rio_mport *mport, u16 destid, u64 rbase, + u32 size, u32 rflags, dma_addr_t *local); +extern void rio_unmap_outb_region(struct rio_mport *mport, + u16 destid, u64 rstart); + +/* Port-Write management */ +extern int rio_request_inb_pwrite(struct rio_dev *, + int (*)(struct rio_dev *, union rio_pw_msg*, int)); +extern int rio_release_inb_pwrite(struct rio_dev *); +extern int rio_add_mport_pw_handler(struct rio_mport *mport, void *dev_id, + int (*pwcback)(struct rio_mport *mport, void *dev_id, + union rio_pw_msg *msg, int step)); +extern int rio_del_mport_pw_handler(struct rio_mport *mport, void *dev_id, + int (*pwcback)(struct rio_mport *mport, void *dev_id, + union rio_pw_msg *msg, int step)); +extern int rio_inb_pwrite_handler(struct rio_mport *mport, + union rio_pw_msg *pw_msg); +extern void rio_pw_enable(struct rio_mport *mport, int enable); + +/* LDM support */ +int rio_register_driver(struct rio_driver *); +void rio_unregister_driver(struct rio_driver *); +struct rio_dev *rio_dev_get(struct rio_dev *); +void rio_dev_put(struct rio_dev *); + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE +extern struct dma_chan *rio_request_dma(struct rio_dev *rdev); +extern struct dma_chan *rio_request_mport_dma(struct rio_mport *mport); +extern void rio_release_dma(struct dma_chan *dchan); +extern struct dma_async_tx_descriptor *rio_dma_prep_slave_sg( + struct rio_dev *rdev, struct dma_chan *dchan, + struct rio_dma_data *data, + enum dma_transfer_direction direction, unsigned long flags); +extern struct dma_async_tx_descriptor *rio_dma_prep_xfer( + struct dma_chan *dchan, u16 destid, + struct rio_dma_data *data, + enum dma_transfer_direction direction, unsigned long flags); +#endif + +/** + * rio_name - Get the unique RIO device identifier + * @rdev: RIO device + * + * Get the unique RIO device identifier. Returns the device + * identifier string. + */ +static inline const char *rio_name(struct rio_dev *rdev) +{ + return dev_name(&rdev->dev); +} + +/** + * rio_get_drvdata - Get RIO driver specific data + * @rdev: RIO device + * + * Get RIO driver specific data. Returns a pointer to the + * driver specific data. + */ +static inline void *rio_get_drvdata(struct rio_dev *rdev) +{ + return dev_get_drvdata(&rdev->dev); +} + +/** + * rio_set_drvdata - Set RIO driver specific data + * @rdev: RIO device + * @data: Pointer to driver specific data + * + * Set RIO driver specific data. device struct driver data pointer + * is set to the @data argument. + */ +static inline void rio_set_drvdata(struct rio_dev *rdev, void *data) +{ + dev_set_drvdata(&rdev->dev, data); +} + +/* Misc driver helpers */ +extern u16 rio_local_get_device_id(struct rio_mport *port); +extern void rio_local_set_device_id(struct rio_mport *port, u16 did); +extern struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from); +extern struct rio_dev *rio_get_asm(u16 vid, u16 did, u16 asm_vid, u16 asm_did, + struct rio_dev *from); +extern int rio_init_mports(void); + +#endif /* LINUX_RIO_DRV_H */ diff --git a/include/linux/rio_ids.h b/include/linux/rio_ids.h new file mode 100644 index 000000000..334c576c1 --- /dev/null +++ b/include/linux/rio_ids.h @@ -0,0 +1,44 @@ +/* + * RapidIO devices + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef LINUX_RIO_IDS_H +#define LINUX_RIO_IDS_H + +#define RIO_VID_FREESCALE 0x0002 +#define RIO_DID_MPC8560 0x0003 + +#define RIO_VID_TUNDRA 0x000d +#define RIO_DID_TSI500 0x0500 +#define RIO_DID_TSI568 0x0568 +#define RIO_DID_TSI572 0x0572 +#define RIO_DID_TSI574 0x0574 +#define RIO_DID_TSI576 0x0578 /* Same ID as Tsi578 */ +#define RIO_DID_TSI577 0x0577 +#define RIO_DID_TSI578 0x0578 + +#define RIO_VID_IDT 0x0038 +#define RIO_DID_IDT70K200 0x0310 +#define RIO_DID_IDTCPS8 0x035c +#define RIO_DID_IDTCPS12 0x035d +#define RIO_DID_IDTCPS16 0x035b +#define RIO_DID_IDTCPS6Q 0x035f +#define RIO_DID_IDTCPS10Q 0x035e +#define RIO_DID_IDTCPS1848 0x0374 +#define RIO_DID_IDTCPS1432 0x0375 +#define RIO_DID_IDTCPS1616 0x0379 +#define RIO_DID_IDTVPS1616 0x0377 +#define RIO_DID_IDTSPS1616 0x0378 +#define RIO_DID_TSI721 0x80ab +#define RIO_DID_IDTRXS1632 0x80e5 +#define RIO_DID_IDTRXS2448 0x80e6 + +#endif /* LINUX_RIO_IDS_H */ diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h new file mode 100644 index 000000000..40c04efe7 --- /dev/null +++ b/include/linux/rio_regs.h @@ -0,0 +1,395 @@ +/* + * RapidIO register definitions + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef LINUX_RIO_REGS_H +#define LINUX_RIO_REGS_H + +/* + * In RapidIO, each device has a 16MB configuration space that is + * accessed via maintenance transactions. Portions of configuration + * space are standardized and/or reserved. + */ +#define RIO_MAINT_SPACE_SZ 0x1000000 /* 16MB of RapidIO mainenance space */ + +#define RIO_DEV_ID_CAR 0x00 /* [I] Device Identity CAR */ +#define RIO_DEV_INFO_CAR 0x04 /* [I] Device Information CAR */ +#define RIO_ASM_ID_CAR 0x08 /* [I] Assembly Identity CAR */ +#define RIO_ASM_ID_MASK 0xffff0000 /* [I] Asm ID Mask */ +#define RIO_ASM_VEN_ID_MASK 0x0000ffff /* [I] Asm Vend Mask */ + +#define RIO_ASM_INFO_CAR 0x0c /* [I] Assembly Information CAR */ +#define RIO_ASM_REV_MASK 0xffff0000 /* [I] Asm Rev Mask */ +#define RIO_EXT_FTR_PTR_MASK 0x0000ffff /* [I] EF_PTR Mask */ + +#define RIO_PEF_CAR 0x10 /* [I] Processing Element Features CAR */ +#define RIO_PEF_BRIDGE 0x80000000 /* [I] Bridge */ +#define RIO_PEF_MEMORY 0x40000000 /* [I] MMIO */ +#define RIO_PEF_PROCESSOR 0x20000000 /* [I] Processor */ +#define RIO_PEF_SWITCH 0x10000000 /* [I] Switch */ +#define RIO_PEF_MULTIPORT 0x08000000 /* [VI, 2.1] Multiport */ +#define RIO_PEF_INB_MBOX 0x00f00000 /* [II, <= 1.2] Mailboxes */ +#define RIO_PEF_INB_MBOX0 0x00800000 /* [II, <= 1.2] Mailbox 0 */ +#define RIO_PEF_INB_MBOX1 0x00400000 /* [II, <= 1.2] Mailbox 1 */ +#define RIO_PEF_INB_MBOX2 0x00200000 /* [II, <= 1.2] Mailbox 2 */ +#define RIO_PEF_INB_MBOX3 0x00100000 /* [II, <= 1.2] Mailbox 3 */ +#define RIO_PEF_INB_DOORBELL 0x00080000 /* [II, <= 1.2] Doorbells */ +#define RIO_PEF_DEV32 0x00001000 /* [III] PE supports Common TRansport Dev32 */ +#define RIO_PEF_EXT_RT 0x00000200 /* [III, 1.3] Extended route table support */ +#define RIO_PEF_STD_RT 0x00000100 /* [III, 1.3] Standard route table support */ +#define RIO_PEF_CTLS 0x00000010 /* [III] Common Transport Large System (< rev.3) */ +#define RIO_PEF_DEV16 0x00000010 /* [III] PE Supports Common Transport Dev16 (rev.3) */ +#define RIO_PEF_EXT_FEATURES 0x00000008 /* [I] EFT_PTR valid */ +#define RIO_PEF_ADDR_66 0x00000004 /* [I] 66 bits */ +#define RIO_PEF_ADDR_50 0x00000002 /* [I] 50 bits */ +#define RIO_PEF_ADDR_34 0x00000001 /* [I] 34 bits */ + +#define RIO_SWP_INFO_CAR 0x14 /* [I] Switch Port Information CAR */ +#define RIO_SWP_INFO_PORT_TOTAL_MASK 0x0000ff00 /* [I] Total number of ports */ +#define RIO_SWP_INFO_PORT_NUM_MASK 0x000000ff /* [I] Maintenance transaction port number */ +#define RIO_GET_TOTAL_PORTS(x) ((x & RIO_SWP_INFO_PORT_TOTAL_MASK) >> 8) +#define RIO_GET_PORT_NUM(x) (x & RIO_SWP_INFO_PORT_NUM_MASK) + +#define RIO_SRC_OPS_CAR 0x18 /* [I] Source Operations CAR */ +#define RIO_SRC_OPS_READ 0x00008000 /* [I] Read op */ +#define RIO_SRC_OPS_WRITE 0x00004000 /* [I] Write op */ +#define RIO_SRC_OPS_STREAM_WRITE 0x00002000 /* [I] Str-write op */ +#define RIO_SRC_OPS_WRITE_RESPONSE 0x00001000 /* [I] Write/resp op */ +#define RIO_SRC_OPS_DATA_MSG 0x00000800 /* [II] Data msg op */ +#define RIO_SRC_OPS_DOORBELL 0x00000400 /* [II] Doorbell op */ +#define RIO_SRC_OPS_ATOMIC_TST_SWP 0x00000100 /* [I] Atomic TAS op */ +#define RIO_SRC_OPS_ATOMIC_INC 0x00000080 /* [I] Atomic inc op */ +#define RIO_SRC_OPS_ATOMIC_DEC 0x00000040 /* [I] Atomic dec op */ +#define RIO_SRC_OPS_ATOMIC_SET 0x00000020 /* [I] Atomic set op */ +#define RIO_SRC_OPS_ATOMIC_CLR 0x00000010 /* [I] Atomic clr op */ +#define RIO_SRC_OPS_PORT_WRITE 0x00000004 /* [I] Port-write op */ + +#define RIO_DST_OPS_CAR 0x1c /* Destination Operations CAR */ +#define RIO_DST_OPS_READ 0x00008000 /* [I] Read op */ +#define RIO_DST_OPS_WRITE 0x00004000 /* [I] Write op */ +#define RIO_DST_OPS_STREAM_WRITE 0x00002000 /* [I] Str-write op */ +#define RIO_DST_OPS_WRITE_RESPONSE 0x00001000 /* [I] Write/resp op */ +#define RIO_DST_OPS_DATA_MSG 0x00000800 /* [II] Data msg op */ +#define RIO_DST_OPS_DOORBELL 0x00000400 /* [II] Doorbell op */ +#define RIO_DST_OPS_ATOMIC_TST_SWP 0x00000100 /* [I] Atomic TAS op */ +#define RIO_DST_OPS_ATOMIC_INC 0x00000080 /* [I] Atomic inc op */ +#define RIO_DST_OPS_ATOMIC_DEC 0x00000040 /* [I] Atomic dec op */ +#define RIO_DST_OPS_ATOMIC_SET 0x00000020 /* [I] Atomic set op */ +#define RIO_DST_OPS_ATOMIC_CLR 0x00000010 /* [I] Atomic clr op */ +#define RIO_DST_OPS_PORT_WRITE 0x00000004 /* [I] Port-write op */ + +#define RIO_OPS_READ 0x00008000 /* [I] Read op */ +#define RIO_OPS_WRITE 0x00004000 /* [I] Write op */ +#define RIO_OPS_STREAM_WRITE 0x00002000 /* [I] Str-write op */ +#define RIO_OPS_WRITE_RESPONSE 0x00001000 /* [I] Write/resp op */ +#define RIO_OPS_DATA_MSG 0x00000800 /* [II] Data msg op */ +#define RIO_OPS_DOORBELL 0x00000400 /* [II] Doorbell op */ +#define RIO_OPS_ATOMIC_TST_SWP 0x00000100 /* [I] Atomic TAS op */ +#define RIO_OPS_ATOMIC_INC 0x00000080 /* [I] Atomic inc op */ +#define RIO_OPS_ATOMIC_DEC 0x00000040 /* [I] Atomic dec op */ +#define RIO_OPS_ATOMIC_SET 0x00000020 /* [I] Atomic set op */ +#define RIO_OPS_ATOMIC_CLR 0x00000010 /* [I] Atomic clr op */ +#define RIO_OPS_PORT_WRITE 0x00000004 /* [I] Port-write op */ + + /* 0x20-0x30 *//* Reserved */ + +#define RIO_SWITCH_RT_LIMIT 0x34 /* [III, 1.3] Switch Route Table Destination ID Limit CAR */ +#define RIO_RT_MAX_DESTID 0x0000ffff + +#define RIO_MBOX_CSR 0x40 /* [II, <= 1.2] Mailbox CSR */ +#define RIO_MBOX0_AVAIL 0x80000000 /* [II] Mbox 0 avail */ +#define RIO_MBOX0_FULL 0x40000000 /* [II] Mbox 0 full */ +#define RIO_MBOX0_EMPTY 0x20000000 /* [II] Mbox 0 empty */ +#define RIO_MBOX0_BUSY 0x10000000 /* [II] Mbox 0 busy */ +#define RIO_MBOX0_FAIL 0x08000000 /* [II] Mbox 0 fail */ +#define RIO_MBOX0_ERROR 0x04000000 /* [II] Mbox 0 error */ +#define RIO_MBOX1_AVAIL 0x00800000 /* [II] Mbox 1 avail */ +#define RIO_MBOX1_FULL 0x00200000 /* [II] Mbox 1 full */ +#define RIO_MBOX1_EMPTY 0x00200000 /* [II] Mbox 1 empty */ +#define RIO_MBOX1_BUSY 0x00100000 /* [II] Mbox 1 busy */ +#define RIO_MBOX1_FAIL 0x00080000 /* [II] Mbox 1 fail */ +#define RIO_MBOX1_ERROR 0x00040000 /* [II] Mbox 1 error */ +#define RIO_MBOX2_AVAIL 0x00008000 /* [II] Mbox 2 avail */ +#define RIO_MBOX2_FULL 0x00004000 /* [II] Mbox 2 full */ +#define RIO_MBOX2_EMPTY 0x00002000 /* [II] Mbox 2 empty */ +#define RIO_MBOX2_BUSY 0x00001000 /* [II] Mbox 2 busy */ +#define RIO_MBOX2_FAIL 0x00000800 /* [II] Mbox 2 fail */ +#define RIO_MBOX2_ERROR 0x00000400 /* [II] Mbox 2 error */ +#define RIO_MBOX3_AVAIL 0x00000080 /* [II] Mbox 3 avail */ +#define RIO_MBOX3_FULL 0x00000040 /* [II] Mbox 3 full */ +#define RIO_MBOX3_EMPTY 0x00000020 /* [II] Mbox 3 empty */ +#define RIO_MBOX3_BUSY 0x00000010 /* [II] Mbox 3 busy */ +#define RIO_MBOX3_FAIL 0x00000008 /* [II] Mbox 3 fail */ +#define RIO_MBOX3_ERROR 0x00000004 /* [II] Mbox 3 error */ + +#define RIO_WRITE_PORT_CSR 0x44 /* [I, <= 1.2] Write Port CSR */ +#define RIO_DOORBELL_CSR 0x44 /* [II, <= 1.2] Doorbell CSR */ +#define RIO_DOORBELL_AVAIL 0x80000000 /* [II] Doorbell avail */ +#define RIO_DOORBELL_FULL 0x40000000 /* [II] Doorbell full */ +#define RIO_DOORBELL_EMPTY 0x20000000 /* [II] Doorbell empty */ +#define RIO_DOORBELL_BUSY 0x10000000 /* [II] Doorbell busy */ +#define RIO_DOORBELL_FAILED 0x08000000 /* [II] Doorbell failed */ +#define RIO_DOORBELL_ERROR 0x04000000 /* [II] Doorbell error */ +#define RIO_WRITE_PORT_AVAILABLE 0x00000080 /* [I] Write Port Available */ +#define RIO_WRITE_PORT_FULL 0x00000040 /* [I] Write Port Full */ +#define RIO_WRITE_PORT_EMPTY 0x00000020 /* [I] Write Port Empty */ +#define RIO_WRITE_PORT_BUSY 0x00000010 /* [I] Write Port Busy */ +#define RIO_WRITE_PORT_FAILED 0x00000008 /* [I] Write Port Failed */ +#define RIO_WRITE_PORT_ERROR 0x00000004 /* [I] Write Port Error */ + + /* 0x48 *//* Reserved */ + +#define RIO_PELL_CTRL_CSR 0x4c /* [I] PE Logical Layer Control CSR */ +#define RIO_PELL_ADDR_66 0x00000004 /* [I] 66-bit addr */ +#define RIO_PELL_ADDR_50 0x00000002 /* [I] 50-bit addr */ +#define RIO_PELL_ADDR_34 0x00000001 /* [I] 34-bit addr */ + + /* 0x50-0x54 *//* Reserved */ + +#define RIO_LCSH_BA 0x58 /* [I] LCS High Base Address */ +#define RIO_LCSL_BA 0x5c /* [I] LCS Base Address */ + +#define RIO_DID_CSR 0x60 /* [III] Base Device ID CSR */ + + /* 0x64 *//* Reserved */ + +#define RIO_HOST_DID_LOCK_CSR 0x68 /* [III] Host Base Device ID Lock CSR */ +#define RIO_COMPONENT_TAG_CSR 0x6c /* [III] Component Tag CSR */ + +#define RIO_STD_RTE_CONF_DESTID_SEL_CSR 0x70 +#define RIO_STD_RTE_CONF_EXTCFGEN 0x80000000 +#define RIO_STD_RTE_CONF_PORT_SEL_CSR 0x74 +#define RIO_STD_RTE_DEFAULT_PORT 0x78 + + /* 0x7c-0xf8 *//* Reserved */ + /* 0x100-0xfff8 *//* [I] Extended Features Space */ + /* 0x10000-0xfffff8 *//* [I] Implementation-defined Space */ + +/* + * Extended Features Space is a configuration space area where + * functionality is mapped into extended feature blocks via a + * singly linked list of extended feature pointers (EFT_PTR). + * + * Each extended feature block can be identified/located in + * Extended Features Space by walking the extended feature + * list starting with the Extended Feature Pointer located + * in the Assembly Information CAR. + * + * Extended Feature Blocks (EFBs) are identified with an assigned + * EFB ID. Extended feature block offsets in the definitions are + * relative to the offset of the EFB within the Extended Features + * Space. + */ + +/* Helper macros to parse the Extended Feature Block header */ +#define RIO_EFB_PTR_MASK 0xffff0000 +#define RIO_EFB_ID_MASK 0x0000ffff +#define RIO_GET_BLOCK_PTR(x) ((x & RIO_EFB_PTR_MASK) >> 16) +#define RIO_GET_BLOCK_ID(x) (x & RIO_EFB_ID_MASK) + +/* Extended Feature Block IDs */ +#define RIO_EFB_SER_EP_M1_ID 0x0001 /* [VI] LP-Serial EP Devices, Map I */ +#define RIO_EFB_SER_EP_SW_M1_ID 0x0002 /* [VI] LP-Serial EP w SW Recovery Devices, Map I */ +#define RIO_EFB_SER_EPF_M1_ID 0x0003 /* [VI] LP-Serial EP Free Devices, Map I */ +#define RIO_EFB_SER_EP_ID 0x0004 /* [VI] LP-Serial EP Devices, RIO 1.2 */ +#define RIO_EFB_SER_EP_REC_ID 0x0005 /* [VI] LP-Serial EP w SW Recovery Devices, RIO 1.2 */ +#define RIO_EFB_SER_EP_FREE_ID 0x0006 /* [VI] LP-Serial EP Free Devices, RIO 1.2 */ +#define RIO_EFB_ERR_MGMNT 0x0007 /* [VIII] Error Management Extensions */ +#define RIO_EFB_SER_EPF_SW_M1_ID 0x0009 /* [VI] LP-Serial EP Free w SW Recovery Devices, Map I */ +#define RIO_EFB_SW_ROUTING_TBL 0x000E /* [III] Switch Routing Table Block */ +#define RIO_EFB_SER_EP_M2_ID 0x0011 /* [VI] LP-Serial EP Devices, Map II */ +#define RIO_EFB_SER_EP_SW_M2_ID 0x0012 /* [VI] LP-Serial EP w SW Recovery Devices, Map II */ +#define RIO_EFB_SER_EPF_M2_ID 0x0013 /* [VI] LP-Serial EP Free Devices, Map II */ +#define RIO_EFB_ERR_MGMNT_HS 0x0017 /* [VIII] Error Management Extensions, Hot-Swap only */ +#define RIO_EFB_SER_EPF_SW_M2_ID 0x0019 /* [VI] LP-Serial EP Free w SW Recovery Devices, Map II */ + +/* + * Physical LP-Serial Registers Definitions + * Parameters in register macros: + * n - port number, m - Register Map Type (1 or 2) + */ +#define RIO_PORT_MNT_HEADER 0x0000 +#define RIO_PORT_REQ_CTL_CSR 0x0020 +#define RIO_PORT_RSP_CTL_CSR 0x0024 +#define RIO_PORT_LINKTO_CTL_CSR 0x0020 +#define RIO_PORT_RSPTO_CTL_CSR 0x0024 +#define RIO_PORT_GEN_CTL_CSR 0x003c +#define RIO_PORT_GEN_HOST 0x80000000 +#define RIO_PORT_GEN_MASTER 0x40000000 +#define RIO_PORT_GEN_DISCOVERED 0x20000000 +#define RIO_PORT_N_MNT_REQ_CSR(n, m) (0x40 + (n) * (0x20 * (m))) +#define RIO_MNT_REQ_CMD_RD 0x03 /* Reset-device command */ +#define RIO_MNT_REQ_CMD_IS 0x04 /* Input-status command */ +#define RIO_PORT_N_MNT_RSP_CSR(n, m) (0x44 + (n) * (0x20 * (m))) +#define RIO_PORT_N_MNT_RSP_RVAL 0x80000000 /* Response Valid */ +#define RIO_PORT_N_MNT_RSP_ASTAT 0x000007e0 /* ackID Status */ +#define RIO_PORT_N_MNT_RSP_LSTAT 0x0000001f /* Link Status */ +#define RIO_PORT_N_ACK_STS_CSR(n) (0x48 + (n) * 0x20) /* Only in RM-I */ +#define RIO_PORT_N_ACK_CLEAR 0x80000000 +#define RIO_PORT_N_ACK_INBOUND 0x3f000000 +#define RIO_PORT_N_ACK_OUTSTAND 0x00003f00 +#define RIO_PORT_N_ACK_OUTBOUND 0x0000003f +#define RIO_PORT_N_CTL2_CSR(n, m) (0x54 + (n) * (0x20 * (m))) +#define RIO_PORT_N_CTL2_SEL_BAUD 0xf0000000 +#define RIO_PORT_N_ERR_STS_CSR(n, m) (0x58 + (n) * (0x20 * (m))) +#define RIO_PORT_N_ERR_STS_OUT_ES 0x00010000 /* Output Error-stopped */ +#define RIO_PORT_N_ERR_STS_INP_ES 0x00000100 /* Input Error-stopped */ +#define RIO_PORT_N_ERR_STS_PW_PEND 0x00000010 /* Port-Write Pending */ +#define RIO_PORT_N_ERR_STS_PORT_UA 0x00000008 /* Port Unavailable */ +#define RIO_PORT_N_ERR_STS_PORT_ERR 0x00000004 +#define RIO_PORT_N_ERR_STS_PORT_OK 0x00000002 +#define RIO_PORT_N_ERR_STS_PORT_UNINIT 0x00000001 +#define RIO_PORT_N_CTL_CSR(n, m) (0x5c + (n) * (0x20 * (m))) +#define RIO_PORT_N_CTL_PWIDTH 0xc0000000 +#define RIO_PORT_N_CTL_PWIDTH_1 0x00000000 +#define RIO_PORT_N_CTL_PWIDTH_4 0x40000000 +#define RIO_PORT_N_CTL_IPW 0x38000000 /* Initialized Port Width */ +#define RIO_PORT_N_CTL_P_TYP_SER 0x00000001 +#define RIO_PORT_N_CTL_LOCKOUT 0x00000002 +#define RIO_PORT_N_CTL_EN_RX 0x00200000 +#define RIO_PORT_N_CTL_EN_TX 0x00400000 +#define RIO_PORT_N_OB_ACK_CSR(n) (0x60 + (n) * 0x40) /* Only in RM-II */ +#define RIO_PORT_N_OB_ACK_CLEAR 0x80000000 +#define RIO_PORT_N_OB_ACK_OUTSTD 0x00fff000 +#define RIO_PORT_N_OB_ACK_OUTBND 0x00000fff +#define RIO_PORT_N_IB_ACK_CSR(n) (0x64 + (n) * 0x40) /* Only in RM-II */ +#define RIO_PORT_N_IB_ACK_INBND 0x00000fff + +/* + * Device-based helper macros for serial port register access. + * d - pointer to rapidio device object, n - port number + */ + +#define RIO_DEV_PORT_N_MNT_REQ_CSR(d, n) \ + (d->phys_efptr + RIO_PORT_N_MNT_REQ_CSR(n, d->phys_rmap)) + +#define RIO_DEV_PORT_N_MNT_RSP_CSR(d, n) \ + (d->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(n, d->phys_rmap)) + +#define RIO_DEV_PORT_N_ACK_STS_CSR(d, n) \ + (d->phys_efptr + RIO_PORT_N_ACK_STS_CSR(n)) + +#define RIO_DEV_PORT_N_CTL2_CSR(d, n) \ + (d->phys_efptr + RIO_PORT_N_CTL2_CSR(n, d->phys_rmap)) + +#define RIO_DEV_PORT_N_ERR_STS_CSR(d, n) \ + (d->phys_efptr + RIO_PORT_N_ERR_STS_CSR(n, d->phys_rmap)) + +#define RIO_DEV_PORT_N_CTL_CSR(d, n) \ + (d->phys_efptr + RIO_PORT_N_CTL_CSR(n, d->phys_rmap)) + +#define RIO_DEV_PORT_N_OB_ACK_CSR(d, n) \ + (d->phys_efptr + RIO_PORT_N_OB_ACK_CSR(n)) + +#define RIO_DEV_PORT_N_IB_ACK_CSR(d, n) \ + (d->phys_efptr + RIO_PORT_N_IB_ACK_CSR(n)) + +/* + * Error Management Extensions (RapidIO 1.3+, Part 8) + * + * Extended Features Block ID=0x0007 + */ + +/* General EM Registers (Common for all Ports) */ + +#define RIO_EM_EFB_HEADER 0x000 /* Error Management Extensions Block Header */ +#define RIO_EM_EMHS_CAR 0x004 /* EM Functionality CAR */ +#define RIO_EM_LTL_ERR_DETECT 0x008 /* Logical/Transport Layer Error Detect CSR */ +#define RIO_EM_LTL_ERR_EN 0x00c /* Logical/Transport Layer Error Enable CSR */ +#define REM_LTL_ERR_ILLTRAN 0x08000000 /* Illegal Transaction decode */ +#define REM_LTL_ERR_UNSOLR 0x00800000 /* Unsolicited Response */ +#define REM_LTL_ERR_UNSUPTR 0x00400000 /* Unsupported Transaction */ +#define REM_LTL_ERR_IMPSPEC 0x000000ff /* Implementation Specific */ +#define RIO_EM_LTL_HIADDR_CAP 0x010 /* Logical/Transport Layer High Address Capture CSR */ +#define RIO_EM_LTL_ADDR_CAP 0x014 /* Logical/Transport Layer Address Capture CSR */ +#define RIO_EM_LTL_DEVID_CAP 0x018 /* Logical/Transport Layer Device ID Capture CSR */ +#define RIO_EM_LTL_CTRL_CAP 0x01c /* Logical/Transport Layer Control Capture CSR */ +#define RIO_EM_LTL_DID32_CAP 0x020 /* Logical/Transport Layer Dev32 DestID Capture CSR */ +#define RIO_EM_LTL_SID32_CAP 0x024 /* Logical/Transport Layer Dev32 source ID Capture CSR */ +#define RIO_EM_PW_TGT_DEVID 0x028 /* Port-write Target deviceID CSR */ +#define RIO_EM_PW_TGT_DEVID_D16M 0xff000000 /* Port-write Target DID16 MSB */ +#define RIO_EM_PW_TGT_DEVID_D8 0x00ff0000 /* Port-write Target DID16 LSB or DID8 */ +#define RIO_EM_PW_TGT_DEVID_DEV16 0x00008000 /* Port-write Target DID16 LSB or DID8 */ +#define RIO_EM_PW_TGT_DEVID_DEV32 0x00004000 /* Port-write Target DID16 LSB or DID8 */ +#define RIO_EM_PKT_TTL 0x02c /* Packet Time-to-live CSR */ +#define RIO_EM_PKT_TTL_VAL 0xffff0000 /* Packet Time-to-live value */ +#define RIO_EM_PW_TGT32_DEVID 0x030 /* Port-write Dev32 Target deviceID CSR */ +#define RIO_EM_PW_TX_CTRL 0x034 /* Port-write Transmission Control CSR */ +#define RIO_EM_PW_TX_CTRL_PW_DIS 0x00000001 /* Port-write Transmission Disable bit */ + +/* Per-Port EM Registers */ + +#define RIO_EM_PN_ERR_DETECT(x) (0x040 + x*0x40) /* Port N Error Detect CSR */ +#define REM_PED_IMPL_SPEC 0x80000000 +#define REM_PED_LINK_OK2U 0x40000000 /* Link OK to Uninit transition */ +#define REM_PED_LINK_UPDA 0x20000000 /* Link Uninit Packet Discard Active */ +#define REM_PED_LINK_U2OK 0x10000000 /* Link Uninit to OK transition */ +#define REM_PED_LINK_TO 0x00000001 + +#define RIO_EM_PN_ERRRATE_EN(x) (0x044 + x*0x40) /* Port N Error Rate Enable CSR */ +#define RIO_EM_PN_ERRRATE_EN_OK2U 0x40000000 /* Enable notification for OK2U */ +#define RIO_EM_PN_ERRRATE_EN_UPDA 0x20000000 /* Enable notification for UPDA */ +#define RIO_EM_PN_ERRRATE_EN_U2OK 0x10000000 /* Enable notification for U2OK */ + +#define RIO_EM_PN_ATTRIB_CAP(x) (0x048 + x*0x40) /* Port N Attributes Capture CSR */ +#define RIO_EM_PN_PKT_CAP_0(x) (0x04c + x*0x40) /* Port N Packet/Control Symbol Capture 0 CSR */ +#define RIO_EM_PN_PKT_CAP_1(x) (0x050 + x*0x40) /* Port N Packet Capture 1 CSR */ +#define RIO_EM_PN_PKT_CAP_2(x) (0x054 + x*0x40) /* Port N Packet Capture 2 CSR */ +#define RIO_EM_PN_PKT_CAP_3(x) (0x058 + x*0x40) /* Port N Packet Capture 3 CSR */ +#define RIO_EM_PN_ERRRATE(x) (0x068 + x*0x40) /* Port N Error Rate CSR */ +#define RIO_EM_PN_ERRRATE_TR(x) (0x06c + x*0x40) /* Port N Error Rate Threshold CSR */ +#define RIO_EM_PN_LINK_UDT(x) (0x070 + x*0x40) /* Port N Link Uninit Discard Timer CSR */ +#define RIO_EM_PN_LINK_UDT_TO 0xffffff00 /* Link Uninit Timeout value */ + +/* + * Switch Routing Table Register Block ID=0x000E (RapidIO 3.0+, part 3) + * Register offsets are defined from beginning of the block. + */ + +/* Broadcast Routing Table Control CSR */ +#define RIO_BC_RT_CTL_CSR 0x020 +#define RIO_RT_CTL_THREE_LVL 0x80000000 +#define RIO_RT_CTL_DEV32_RT_CTRL 0x40000000 +#define RIO_RT_CTL_MC_MASK_SZ 0x03000000 /* 3.0+ Part 11: Multicast */ + +/* Broadcast Level 0 Info CSR */ +#define RIO_BC_RT_LVL0_INFO_CSR 0x030 +#define RIO_RT_L0I_NUM_GR 0xff000000 +#define RIO_RT_L0I_GR_PTR 0x00fffc00 + +/* Broadcast Level 1 Info CSR */ +#define RIO_BC_RT_LVL1_INFO_CSR 0x034 +#define RIO_RT_L1I_NUM_GR 0xff000000 +#define RIO_RT_L1I_GR_PTR 0x00fffc00 + +/* Broadcast Level 2 Info CSR */ +#define RIO_BC_RT_LVL2_INFO_CSR 0x038 +#define RIO_RT_L2I_NUM_GR 0xff000000 +#define RIO_RT_L2I_GR_PTR 0x00fffc00 + +/* Per-Port Routing Table registers. + * Register fields defined in the broadcast section above are + * applicable to the corresponding registers below. + */ +#define RIO_SPx_RT_CTL_CSR(x) (0x040 + (0x20 * x)) +#define RIO_SPx_RT_LVL0_INFO_CSR(x) (0x50 + (0x20 * x)) +#define RIO_SPx_RT_LVL1_INFO_CSR(x) (0x54 + (0x20 * x)) +#define RIO_SPx_RT_LVL2_INFO_CSR(x) (0x58 + (0x20 * x)) + +/* Register Formats for Routing Table Group entry. + * Register offsets are calculated using GR_PTR field in the corresponding + * table Level_N and group/entry numbers (see RapidIO 3.0+ Part 3). + */ +#define RIO_RT_Ln_ENTRY_IMPL_DEF 0xf0000000 +#define RIO_RT_Ln_ENTRY_RTE_VAL 0x000003ff +#define RIO_RT_ENTRY_DROP_PKT 0x300 + +#endif /* LINUX_RIO_REGS_H */ diff --git a/include/linux/rmap.h b/include/linux/rmap.h new file mode 100644 index 000000000..91ccae946 --- /dev/null +++ b/include/linux/rmap.h @@ -0,0 +1,304 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_RMAP_H +#define _LINUX_RMAP_H +/* + * Declarations for Reverse Mapping functions in mm/rmap.c + */ + +#include +#include +#include +#include +#include +#include + +/* + * The anon_vma heads a list of private "related" vmas, to scan if + * an anonymous page pointing to this anon_vma needs to be unmapped: + * the vmas on the list will be related by forking, or by splitting. + * + * Since vmas come and go as they are split and merged (particularly + * in mprotect), the mapping field of an anonymous page cannot point + * directly to a vma: instead it points to an anon_vma, on whose list + * the related vmas can be easily linked or unlinked. + * + * After unlinking the last vma on the list, we must garbage collect + * the anon_vma object itself: we're guaranteed no page can be + * pointing to this anon_vma once its vma list is empty. + */ +struct anon_vma { + struct anon_vma *root; /* Root of this anon_vma tree */ + struct rw_semaphore rwsem; /* W: modification, R: walking the list */ + /* + * The refcount is taken on an anon_vma when there is no + * guarantee that the vma of page tables will exist for + * the duration of the operation. A caller that takes + * the reference is responsible for clearing up the + * anon_vma if they are the last user on release + */ + atomic_t refcount; + + /* + * Count of child anon_vmas and VMAs which points to this anon_vma. + * + * This counter is used for making decision about reusing anon_vma + * instead of forking new one. See comments in function anon_vma_clone. + */ + unsigned degree; + + struct anon_vma *parent; /* Parent of this anon_vma */ + + /* + * NOTE: the LSB of the rb_root.rb_node is set by + * mm_take_all_locks() _after_ taking the above lock. So the + * rb_root must only be read/written after taking the above lock + * to be sure to see a valid next pointer. The LSB bit itself + * is serialized by a system wide lock only visible to + * mm_take_all_locks() (mm_all_locks_mutex). + */ + + /* Interval tree of private "related" vmas */ + struct rb_root_cached rb_root; +}; + +/* + * The copy-on-write semantics of fork mean that an anon_vma + * can become associated with multiple processes. Furthermore, + * each child process will have its own anon_vma, where new + * pages for that process are instantiated. + * + * This structure allows us to find the anon_vmas associated + * with a VMA, or the VMAs associated with an anon_vma. + * The "same_vma" list contains the anon_vma_chains linking + * all the anon_vmas associated with this VMA. + * The "rb" field indexes on an interval tree the anon_vma_chains + * which link all the VMAs associated with this anon_vma. + */ +struct anon_vma_chain { + struct vm_area_struct *vma; + struct anon_vma *anon_vma; + struct list_head same_vma; /* locked by mmap_sem & page_table_lock */ + struct rb_node rb; /* locked by anon_vma->rwsem */ + unsigned long rb_subtree_last; +#ifdef CONFIG_DEBUG_VM_RB + unsigned long cached_vma_start, cached_vma_last; +#endif +}; + +enum ttu_flags { + TTU_MIGRATION = 0x1, /* migration mode */ + TTU_MUNLOCK = 0x2, /* munlock mode */ + + TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */ + TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */ + TTU_IGNORE_ACCESS = 0x10, /* don't age */ + TTU_IGNORE_HWPOISON = 0x20, /* corrupted page is recoverable */ + TTU_BATCH_FLUSH = 0x40, /* Batch TLB flushes where possible + * and caller guarantees they will + * do a final flush if necessary */ + TTU_RMAP_LOCKED = 0x80, /* do not grab rmap lock: + * caller holds it */ + TTU_SPLIT_FREEZE = 0x100, /* freeze pte under splitting thp */ + TTU_SYNC = 0x200, /* avoid racy checks with PVMW_SYNC */ +}; + +#ifdef CONFIG_MMU +static inline void get_anon_vma(struct anon_vma *anon_vma) +{ + atomic_inc(&anon_vma->refcount); +} + +void __put_anon_vma(struct anon_vma *anon_vma); + +static inline void put_anon_vma(struct anon_vma *anon_vma) +{ + if (atomic_dec_and_test(&anon_vma->refcount)) + __put_anon_vma(anon_vma); +} + +static inline void anon_vma_lock_write(struct anon_vma *anon_vma) +{ + down_write(&anon_vma->root->rwsem); +} + +static inline void anon_vma_unlock_write(struct anon_vma *anon_vma) +{ + up_write(&anon_vma->root->rwsem); +} + +static inline void anon_vma_lock_read(struct anon_vma *anon_vma) +{ + down_read(&anon_vma->root->rwsem); +} + +static inline void anon_vma_unlock_read(struct anon_vma *anon_vma) +{ + up_read(&anon_vma->root->rwsem); +} + + +/* + * anon_vma helper functions. + */ +void anon_vma_init(void); /* create anon_vma_cachep */ +int __anon_vma_prepare(struct vm_area_struct *); +void unlink_anon_vmas(struct vm_area_struct *); +int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); +int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); + +static inline int anon_vma_prepare(struct vm_area_struct *vma) +{ + if (likely(vma->anon_vma)) + return 0; + + return __anon_vma_prepare(vma); +} + +static inline void anon_vma_merge(struct vm_area_struct *vma, + struct vm_area_struct *next) +{ + VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma); + unlink_anon_vmas(next); +} + +struct anon_vma *page_get_anon_vma(struct page *page); + +/* bitflags for do_page_add_anon_rmap() */ +#define RMAP_EXCLUSIVE 0x01 +#define RMAP_COMPOUND 0x02 + +/* + * rmap interfaces called when adding or removing pte of page + */ +void page_move_anon_rmap(struct page *, struct vm_area_struct *); +void page_add_anon_rmap(struct page *, struct vm_area_struct *, + unsigned long, bool); +void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, + unsigned long, int); +void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, + unsigned long, bool); +void page_add_file_rmap(struct page *, bool); +void page_remove_rmap(struct page *, bool); + +void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *, + unsigned long); +void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *, + unsigned long); + +static inline void page_dup_rmap(struct page *page, bool compound) +{ + atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount); +} + +/* + * Called from mm/vmscan.c to handle paging out + */ +int page_referenced(struct page *, int is_locked, + struct mem_cgroup *memcg, unsigned long *vm_flags); + +bool try_to_unmap(struct page *, enum ttu_flags flags); + +/* Avoid racy checks */ +#define PVMW_SYNC (1 << 0) +/* Look for migarion entries rather than present PTEs */ +#define PVMW_MIGRATION (1 << 1) + +struct page_vma_mapped_walk { + struct page *page; + struct vm_area_struct *vma; + unsigned long address; + pmd_t *pmd; + pte_t *pte; + spinlock_t *ptl; + unsigned int flags; +}; + +static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw) +{ + /* HugeTLB pte is set to the relevant page table entry without pte_mapped. */ + if (pvmw->pte && !PageHuge(pvmw->page)) + pte_unmap(pvmw->pte); + if (pvmw->ptl) + spin_unlock(pvmw->ptl); +} + +bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw); + +/* + * Used by swapoff to help locate where page is expected in vma. + */ +unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); + +/* + * Cleans the PTEs of shared mappings. + * (and since clean PTEs should also be readonly, write protects them too) + * + * returns the number of cleaned PTEs. + */ +int page_mkclean(struct page *); + +/* + * called in munlock()/munmap() path to check for other vmas holding + * the page mlocked. + */ +void try_to_munlock(struct page *); + +void remove_migration_ptes(struct page *old, struct page *new, bool locked); + +/* + * Called by memory-failure.c to kill processes. + */ +struct anon_vma *page_lock_anon_vma_read(struct page *page); +void page_unlock_anon_vma_read(struct anon_vma *anon_vma); +int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); + +/* + * rmap_walk_control: To control rmap traversing for specific needs + * + * arg: passed to rmap_one() and invalid_vma() + * rmap_one: executed on each vma where page is mapped + * done: for checking traversing termination condition + * anon_lock: for getting anon_lock by optimized way rather than default + * invalid_vma: for skipping uninterested vma + */ +struct rmap_walk_control { + void *arg; + /* + * Return false if page table scanning in rmap_walk should be stopped. + * Otherwise, return true. + */ + bool (*rmap_one)(struct page *page, struct vm_area_struct *vma, + unsigned long addr, void *arg); + int (*done)(struct page *page); + struct anon_vma *(*anon_lock)(struct page *page); + bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); +}; + +void rmap_walk(struct page *page, struct rmap_walk_control *rwc); +void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc); + +#else /* !CONFIG_MMU */ + +#define anon_vma_init() do {} while (0) +#define anon_vma_prepare(vma) (0) +#define anon_vma_link(vma) do {} while (0) + +static inline int page_referenced(struct page *page, int is_locked, + struct mem_cgroup *memcg, + unsigned long *vm_flags) +{ + *vm_flags = 0; + return 0; +} + +#define try_to_unmap(page, refs) false + +static inline int page_mkclean(struct page *page) +{ + return 0; +} + + +#endif /* CONFIG_MMU */ + +#endif /* _LINUX_RMAP_H */ diff --git a/include/linux/rmi.h b/include/linux/rmi.h new file mode 100644 index 000000000..5ef5c7c41 --- /dev/null +++ b/include/linux/rmi.h @@ -0,0 +1,379 @@ +/* + * Copyright (c) 2011-2016 Synaptics Incorporated + * Copyright (c) 2011 Unixphere + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#ifndef _RMI_H +#define _RMI_H +#include +#include +#include +#include +#include +#include +#include +#include + +#define NAME_BUFFER_SIZE 256 + +/** + * struct rmi_2d_axis_alignment - target axis alignment + * @swap_axes: set to TRUE if desired to swap x- and y-axis + * @flip_x: set to TRUE if desired to flip direction on x-axis + * @flip_y: set to TRUE if desired to flip direction on y-axis + * @clip_x_low - reported X coordinates below this setting will be clipped to + * the specified value + * @clip_x_high - reported X coordinates above this setting will be clipped to + * the specified value + * @clip_y_low - reported Y coordinates below this setting will be clipped to + * the specified value + * @clip_y_high - reported Y coordinates above this setting will be clipped to + * the specified value + * @offset_x - this value will be added to all reported X coordinates + * @offset_y - this value will be added to all reported Y coordinates + * @rel_report_enabled - if set to true, the relative reporting will be + * automatically enabled for this sensor. + */ +struct rmi_2d_axis_alignment { + bool swap_axes; + bool flip_x; + bool flip_y; + u16 clip_x_low; + u16 clip_y_low; + u16 clip_x_high; + u16 clip_y_high; + u16 offset_x; + u16 offset_y; + u8 delta_x_threshold; + u8 delta_y_threshold; +}; + +/** This is used to override any hints an F11 2D sensor might have provided + * as to what type of sensor it is. + * + * @rmi_f11_sensor_default - do not override, determine from F11_2D_QUERY14 if + * available. + * @rmi_f11_sensor_touchscreen - treat the sensor as a touchscreen (direct + * pointing). + * @rmi_f11_sensor_touchpad - thread the sensor as a touchpad (indirect + * pointing). + */ +enum rmi_sensor_type { + rmi_sensor_default = 0, + rmi_sensor_touchscreen, + rmi_sensor_touchpad +}; + +#define RMI_F11_DISABLE_ABS_REPORT BIT(0) + +/** + * struct rmi_2d_sensor_data - overrides defaults for a 2D sensor. + * @axis_align - provides axis alignment overrides (see above). + * @sensor_type - Forces the driver to treat the sensor as an indirect + * pointing device (touchpad) rather than a direct pointing device + * (touchscreen). This is useful when F11_2D_QUERY14 register is not + * available. + * @disable_report_mask - Force data to not be reported even if it is supported + * by the firware. + * @topbuttonpad - Used with the "5 buttons touchpads" found on the Lenovo 40 + * series + * @kernel_tracking - most moderns RMI f11 firmwares implement Multifinger + * Type B protocol. However, there are some corner cases where the user + * triggers some jumps by tapping with two fingers on the touchpad. + * Use this setting and dmax to filter out these jumps. + * Also, when using an old sensor using MF Type A behavior, set to true to + * report an actual MT protocol B. + * @dmax - the maximum distance (in sensor units) the kernel tracking allows two + * distincts fingers to be considered the same. + */ +struct rmi_2d_sensor_platform_data { + struct rmi_2d_axis_alignment axis_align; + enum rmi_sensor_type sensor_type; + int x_mm; + int y_mm; + int disable_report_mask; + u16 rezero_wait; + bool topbuttonpad; + bool kernel_tracking; + int dmax; + int dribble; + int palm_detect; +}; + +/** + * struct rmi_f30_data - overrides defaults for a single F30 GPIOs/LED chip. + * @buttonpad - the touchpad is a buttonpad, so enable only the first actual + * button that is found. + * @trackstick_buttons - Set when the function 30 is handling the physical + * buttons of the trackstick (as a PS/2 passthrough device). + * @disable - the touchpad incorrectly reports F30 and it should be ignored. + * This is a special case which is due to misconfigured firmware. + */ +struct rmi_f30_data { + bool buttonpad; + bool trackstick_buttons; + bool disable; +}; + + +/* + * Set the state of a register + * DEFAULT - use the default value set by the firmware config + * OFF - explicitly disable the register + * ON - explicitly enable the register + */ +enum rmi_reg_state { + RMI_REG_STATE_DEFAULT = 0, + RMI_REG_STATE_OFF = 1, + RMI_REG_STATE_ON = 2 +}; + +/** + * struct rmi_f01_power_management -When non-zero, these values will be written + * to the touch sensor to override the default firmware settigns. For a + * detailed explanation of what each field does, see the corresponding + * documention in the RMI4 specification. + * + * @nosleep - specifies whether the device is permitted to sleep or doze (that + * is, enter a temporary low power state) when no fingers are touching the + * sensor. + * @wakeup_threshold - controls the capacitance threshold at which the touch + * sensor will decide to wake up from that low power state. + * @doze_holdoff - controls how long the touch sensor waits after the last + * finger lifts before entering the doze state, in units of 100ms. + * @doze_interval - controls the interval between checks for finger presence + * when the touch sensor is in doze mode, in units of 10ms. + */ +struct rmi_f01_power_management { + enum rmi_reg_state nosleep; + u8 wakeup_threshold; + u8 doze_holdoff; + u8 doze_interval; +}; + +/** + * struct rmi_device_platform_data_spi - provides parameters used in SPI + * communications. All Synaptics SPI products support a standard SPI + * interface; some also support what is called SPI V2 mode, depending on + * firmware and/or ASIC limitations. In V2 mode, the touch sensor can + * support shorter delays during certain operations, and these are specified + * separately from the standard mode delays. + * + * @block_delay - for standard SPI transactions consisting of both a read and + * write operation, the delay (in microseconds) between the read and write + * operations. + * @split_read_block_delay_us - for V2 SPI transactions consisting of both a + * read and write operation, the delay (in microseconds) between the read and + * write operations. + * @read_delay_us - the delay between each byte of a read operation in normal + * SPI mode. + * @write_delay_us - the delay between each byte of a write operation in normal + * SPI mode. + * @split_read_byte_delay_us - the delay between each byte of a read operation + * in V2 mode. + * @pre_delay_us - the delay before the start of a SPI transaction. This is + * typically useful in conjunction with custom chip select assertions (see + * below). + * @post_delay_us - the delay after the completion of an SPI transaction. This + * is typically useful in conjunction with custom chip select assertions (see + * below). + * @cs_assert - For systems where the SPI subsystem does not control the CS/SSB + * line, or where such control is broken, you can provide a custom routine to + * handle a GPIO as CS/SSB. This routine will be called at the beginning and + * end of each SPI transaction. The RMI SPI implementation will wait + * pre_delay_us after this routine returns before starting the SPI transfer; + * and post_delay_us after completion of the SPI transfer(s) before calling it + * with assert==FALSE. + */ +struct rmi_device_platform_data_spi { + u32 block_delay_us; + u32 split_read_block_delay_us; + u32 read_delay_us; + u32 write_delay_us; + u32 split_read_byte_delay_us; + u32 pre_delay_us; + u32 post_delay_us; + u8 bits_per_word; + u16 mode; + + void *cs_assert_data; + int (*cs_assert)(const void *cs_assert_data, const bool assert); +}; + +/** + * struct rmi_device_platform_data - system specific configuration info. + * + * @reset_delay_ms - after issuing a reset command to the touch sensor, the + * driver waits a few milliseconds to give the firmware a chance to + * to re-initialize. You can override the default wait period here. + * @irq: irq associated with the attn gpio line, or negative + */ +struct rmi_device_platform_data { + int reset_delay_ms; + int irq; + + struct rmi_device_platform_data_spi spi_data; + + /* function handler pdata */ + struct rmi_2d_sensor_platform_data sensor_pdata; + struct rmi_f01_power_management power_management; + struct rmi_f30_data f30_data; +}; + +/** + * struct rmi_function_descriptor - RMI function base addresses + * + * @query_base_addr: The RMI Query base address + * @command_base_addr: The RMI Command base address + * @control_base_addr: The RMI Control base address + * @data_base_addr: The RMI Data base address + * @interrupt_source_count: The number of irqs this RMI function needs + * @function_number: The RMI function number + * + * This struct is used when iterating the Page Description Table. The addresses + * are 16-bit values to include the current page address. + * + */ +struct rmi_function_descriptor { + u16 query_base_addr; + u16 command_base_addr; + u16 control_base_addr; + u16 data_base_addr; + u8 interrupt_source_count; + u8 function_number; + u8 function_version; +}; + +struct rmi_device; + +/** + * struct rmi_transport_dev - represent an RMI transport device + * + * @dev: Pointer to the communication device, e.g. i2c or spi + * @rmi_dev: Pointer to the RMI device + * @proto_name: name of the transport protocol (SPI, i2c, etc) + * @ops: pointer to transport operations implementation + * + * The RMI transport device implements the glue between different communication + * buses such as I2C and SPI. + * + */ +struct rmi_transport_dev { + struct device *dev; + struct rmi_device *rmi_dev; + + const char *proto_name; + const struct rmi_transport_ops *ops; + + struct rmi_device_platform_data pdata; + + struct input_dev *input; +}; + +/** + * struct rmi_transport_ops - defines transport protocol operations. + * + * @write_block: Writing a block of data to the specified address + * @read_block: Read a block of data from the specified address. + */ +struct rmi_transport_ops { + int (*write_block)(struct rmi_transport_dev *xport, u16 addr, + const void *buf, size_t len); + int (*read_block)(struct rmi_transport_dev *xport, u16 addr, + void *buf, size_t len); + int (*reset)(struct rmi_transport_dev *xport, u16 reset_addr); +}; + +/** + * struct rmi_driver - driver for an RMI4 sensor on the RMI bus. + * + * @driver: Device driver model driver + * @reset_handler: Called when a reset is detected. + * @clear_irq_bits: Clear the specified bits in the current interrupt mask. + * @set_irq_bist: Set the specified bits in the current interrupt mask. + * @store_productid: Callback for cache product id from function 01 + * @data: Private data pointer + * + */ +struct rmi_driver { + struct device_driver driver; + + int (*reset_handler)(struct rmi_device *rmi_dev); + int (*clear_irq_bits)(struct rmi_device *rmi_dev, unsigned long *mask); + int (*set_irq_bits)(struct rmi_device *rmi_dev, unsigned long *mask); + int (*store_productid)(struct rmi_device *rmi_dev); + int (*set_input_params)(struct rmi_device *rmi_dev, + struct input_dev *input); + void *data; +}; + +/** + * struct rmi_device - represents an RMI4 sensor device on the RMI bus. + * + * @dev: The device created for the RMI bus + * @number: Unique number for the device on the bus. + * @driver: Pointer to associated driver + * @xport: Pointer to the transport interface + * + */ +struct rmi_device { + struct device dev; + int number; + + struct rmi_driver *driver; + struct rmi_transport_dev *xport; + +}; + +struct rmi4_attn_data { + unsigned long irq_status; + size_t size; + void *data; +}; + +struct rmi_driver_data { + struct list_head function_list; + + struct rmi_device *rmi_dev; + + struct rmi_function *f01_container; + struct rmi_function *f34_container; + bool bootloader_mode; + + int num_of_irq_regs; + int irq_count; + void *irq_memory; + unsigned long *irq_status; + unsigned long *fn_irq_bits; + unsigned long *current_irq_mask; + unsigned long *new_irq_mask; + struct mutex irq_mutex; + struct input_dev *input; + + struct irq_domain *irqdomain; + + u8 pdt_props; + + u8 num_rx_electrodes; + u8 num_tx_electrodes; + + bool enabled; + struct mutex enabled_mutex; + + struct rmi4_attn_data attn_data; + DECLARE_KFIFO(attn_fifo, struct rmi4_attn_data, 16); +}; + +int rmi_register_transport_device(struct rmi_transport_dev *xport); +void rmi_unregister_transport_device(struct rmi_transport_dev *xport); + +void rmi_set_attn_data(struct rmi_device *rmi_dev, unsigned long irq_status, + void *data, size_t size); + +int rmi_driver_suspend(struct rmi_device *rmi_dev, bool enable_wake); +int rmi_driver_resume(struct rmi_device *rmi_dev, bool clear_wake); +#endif diff --git a/include/linux/rndis.h b/include/linux/rndis.h new file mode 100644 index 000000000..882587c2b --- /dev/null +++ b/include/linux/rndis.h @@ -0,0 +1,392 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Remote Network Driver Interface Specification (RNDIS) + * definitions of the magic numbers used by this protocol + */ + +/* Remote NDIS Versions */ +#define RNDIS_MAJOR_VERSION 0x00000001 +#define RNDIS_MINOR_VERSION 0x00000000 + +/* Device Flags */ +#define RNDIS_DF_CONNECTIONLESS 0x00000001U +#define RNDIS_DF_CONNECTION_ORIENTED 0x00000002U +#define RNDIS_DF_RAW_DATA 0x00000004U + +/* + * Codes for "msg_type" field of rndis messages; + * only the data channel uses packet messages (maybe batched); + * everything else goes on the control channel. + */ +#define RNDIS_MSG_COMPLETION 0x80000000 +#define RNDIS_MSG_PACKET 0x00000001 /* 1-N packets */ +#define RNDIS_MSG_INIT 0x00000002 +#define RNDIS_MSG_INIT_C (RNDIS_MSG_INIT|RNDIS_MSG_COMPLETION) +#define RNDIS_MSG_HALT 0x00000003 +#define RNDIS_MSG_QUERY 0x00000004 +#define RNDIS_MSG_QUERY_C (RNDIS_MSG_QUERY|RNDIS_MSG_COMPLETION) +#define RNDIS_MSG_SET 0x00000005 +#define RNDIS_MSG_SET_C (RNDIS_MSG_SET|RNDIS_MSG_COMPLETION) +#define RNDIS_MSG_RESET 0x00000006 +#define RNDIS_MSG_RESET_C (RNDIS_MSG_RESET|RNDIS_MSG_COMPLETION) +#define RNDIS_MSG_INDICATE 0x00000007 +#define RNDIS_MSG_KEEPALIVE 0x00000008 +#define RNDIS_MSG_KEEPALIVE_C (RNDIS_MSG_KEEPALIVE|RNDIS_MSG_COMPLETION) +/* + * Reserved message type for private communication between lower-layer host + * driver and remote device, if necessary. + */ +#define RNDIS_MSG_BUS 0xff000001 + +/* codes for "status" field of completion messages */ +#define RNDIS_STATUS_SUCCESS 0x00000000 +#define RNDIS_STATUS_PENDING 0x00000103 + +/* Status codes */ +#define RNDIS_STATUS_NOT_RECOGNIZED 0x00010001 +#define RNDIS_STATUS_NOT_COPIED 0x00010002 +#define RNDIS_STATUS_NOT_ACCEPTED 0x00010003 +#define RNDIS_STATUS_CALL_ACTIVE 0x00010007 + +#define RNDIS_STATUS_ONLINE 0x40010003 +#define RNDIS_STATUS_RESET_START 0x40010004 +#define RNDIS_STATUS_RESET_END 0x40010005 +#define RNDIS_STATUS_RING_STATUS 0x40010006 +#define RNDIS_STATUS_CLOSED 0x40010007 +#define RNDIS_STATUS_WAN_LINE_UP 0x40010008 +#define RNDIS_STATUS_WAN_LINE_DOWN 0x40010009 +#define RNDIS_STATUS_WAN_FRAGMENT 0x4001000A +#define RNDIS_STATUS_MEDIA_CONNECT 0x4001000B +#define RNDIS_STATUS_MEDIA_DISCONNECT 0x4001000C +#define RNDIS_STATUS_HARDWARE_LINE_UP 0x4001000D +#define RNDIS_STATUS_HARDWARE_LINE_DOWN 0x4001000E +#define RNDIS_STATUS_INTERFACE_UP 0x4001000F +#define RNDIS_STATUS_INTERFACE_DOWN 0x40010010 +#define RNDIS_STATUS_MEDIA_BUSY 0x40010011 +#define RNDIS_STATUS_MEDIA_SPECIFIC_INDICATION 0x40010012 +#define RNDIS_STATUS_WW_INDICATION RDIA_SPECIFIC_INDICATION +#define RNDIS_STATUS_LINK_SPEED_CHANGE 0x40010013L +#define RNDIS_STATUS_NETWORK_CHANGE 0x40010018 + +#define RNDIS_STATUS_NOT_RESETTABLE 0x80010001 +#define RNDIS_STATUS_SOFT_ERRORS 0x80010003 +#define RNDIS_STATUS_HARD_ERRORS 0x80010004 +#define RNDIS_STATUS_BUFFER_OVERFLOW 0x80000005 + +#define RNDIS_STATUS_FAILURE 0xC0000001 +#define RNDIS_STATUS_RESOURCES 0xC000009A +#define RNDIS_STATUS_NOT_SUPPORTED 0xc00000BB +#define RNDIS_STATUS_CLOSING 0xC0010002 +#define RNDIS_STATUS_BAD_VERSION 0xC0010004 +#define RNDIS_STATUS_BAD_CHARACTERISTICS 0xC0010005 +#define RNDIS_STATUS_ADAPTER_NOT_FOUND 0xC0010006 +#define RNDIS_STATUS_OPEN_FAILED 0xC0010007 +#define RNDIS_STATUS_DEVICE_FAILED 0xC0010008 +#define RNDIS_STATUS_MULTICAST_FULL 0xC0010009 +#define RNDIS_STATUS_MULTICAST_EXISTS 0xC001000A +#define RNDIS_STATUS_MULTICAST_NOT_FOUND 0xC001000B +#define RNDIS_STATUS_REQUEST_ABORTED 0xC001000C +#define RNDIS_STATUS_RESET_IN_PROGRESS 0xC001000D +#define RNDIS_STATUS_CLOSING_INDICATING 0xC001000E +#define RNDIS_STATUS_INVALID_PACKET 0xC001000F +#define RNDIS_STATUS_OPEN_LIST_FULL 0xC0010010 +#define RNDIS_STATUS_ADAPTER_NOT_READY 0xC0010011 +#define RNDIS_STATUS_ADAPTER_NOT_OPEN 0xC0010012 +#define RNDIS_STATUS_NOT_INDICATING 0xC0010013 +#define RNDIS_STATUS_INVALID_LENGTH 0xC0010014 +#define RNDIS_STATUS_INVALID_DATA 0xC0010015 +#define RNDIS_STATUS_BUFFER_TOO_SHORT 0xC0010016 +#define RNDIS_STATUS_INVALID_OID 0xC0010017 +#define RNDIS_STATUS_ADAPTER_REMOVED 0xC0010018 +#define RNDIS_STATUS_UNSUPPORTED_MEDIA 0xC0010019 +#define RNDIS_STATUS_GROUP_ADDRESS_IN_USE 0xC001001A +#define RNDIS_STATUS_FILE_NOT_FOUND 0xC001001B +#define RNDIS_STATUS_ERROR_READING_FILE 0xC001001C +#define RNDIS_STATUS_ALREADY_MAPPED 0xC001001D +#define RNDIS_STATUS_RESOURCE_CONFLICT 0xC001001E +#define RNDIS_STATUS_NO_CABLE 0xC001001F + +#define RNDIS_STATUS_INVALID_SAP 0xC0010020 +#define RNDIS_STATUS_SAP_IN_USE 0xC0010021 +#define RNDIS_STATUS_INVALID_ADDRESS 0xC0010022 +#define RNDIS_STATUS_VC_NOT_ACTIVATED 0xC0010023 +#define RNDIS_STATUS_DEST_OUT_OF_ORDER 0xC0010024 +#define RNDIS_STATUS_VC_NOT_AVAILABLE 0xC0010025 +#define RNDIS_STATUS_CELLRATE_NOT_AVAILABLE 0xC0010026 +#define RNDIS_STATUS_INCOMPATABLE_QOS 0xC0010027 +#define RNDIS_STATUS_AAL_PARAMS_UNSUPPORTED 0xC0010028 +#define RNDIS_STATUS_NO_ROUTE_TO_DESTINATION 0xC0010029 + +#define RNDIS_STATUS_TOKEN_RING_OPEN_ERROR 0xC0011000 + +/* codes for RNDIS_OID_GEN_PHYSICAL_MEDIUM */ +#define RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED 0x00000000 +#define RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN 0x00000001 +#define RNDIS_PHYSICAL_MEDIUM_CABLE_MODEM 0x00000002 +#define RNDIS_PHYSICAL_MEDIUM_PHONE_LINE 0x00000003 +#define RNDIS_PHYSICAL_MEDIUM_POWER_LINE 0x00000004 +#define RNDIS_PHYSICAL_MEDIUM_DSL 0x00000005 +#define RNDIS_PHYSICAL_MEDIUM_FIBRE_CHANNEL 0x00000006 +#define RNDIS_PHYSICAL_MEDIUM_1394 0x00000007 +#define RNDIS_PHYSICAL_MEDIUM_WIRELESS_WAN 0x00000008 +#define RNDIS_PHYSICAL_MEDIUM_MAX 0x00000009 + +/* Remote NDIS medium types. */ +#define RNDIS_MEDIUM_UNSPECIFIED 0x00000000 +#define RNDIS_MEDIUM_802_3 0x00000000 +#define RNDIS_MEDIUM_802_5 0x00000001 +#define RNDIS_MEDIUM_FDDI 0x00000002 +#define RNDIS_MEDIUM_WAN 0x00000003 +#define RNDIS_MEDIUM_LOCAL_TALK 0x00000004 +#define RNDIS_MEDIUM_ARCNET_RAW 0x00000006 +#define RNDIS_MEDIUM_ARCNET_878_2 0x00000007 +#define RNDIS_MEDIUM_ATM 0x00000008 +#define RNDIS_MEDIUM_WIRELESS_LAN 0x00000009 +#define RNDIS_MEDIUM_IRDA 0x0000000A +#define RNDIS_MEDIUM_BPC 0x0000000B +#define RNDIS_MEDIUM_CO_WAN 0x0000000C +#define RNDIS_MEDIUM_1394 0x0000000D +/* Not a real medium, defined as an upper-bound */ +#define RNDIS_MEDIUM_MAX 0x0000000E + +/* Remote NDIS medium connection states. */ +#define RNDIS_MEDIA_STATE_CONNECTED 0x00000000 +#define RNDIS_MEDIA_STATE_DISCONNECTED 0x00000001 + +/* packet filter bits used by RNDIS_OID_GEN_CURRENT_PACKET_FILTER */ +#define RNDIS_PACKET_TYPE_DIRECTED 0x00000001 +#define RNDIS_PACKET_TYPE_MULTICAST 0x00000002 +#define RNDIS_PACKET_TYPE_ALL_MULTICAST 0x00000004 +#define RNDIS_PACKET_TYPE_BROADCAST 0x00000008 +#define RNDIS_PACKET_TYPE_SOURCE_ROUTING 0x00000010 +#define RNDIS_PACKET_TYPE_PROMISCUOUS 0x00000020 +#define RNDIS_PACKET_TYPE_SMT 0x00000040 +#define RNDIS_PACKET_TYPE_ALL_LOCAL 0x00000080 +#define RNDIS_PACKET_TYPE_GROUP 0x00001000 +#define RNDIS_PACKET_TYPE_ALL_FUNCTIONAL 0x00002000 +#define RNDIS_PACKET_TYPE_FUNCTIONAL 0x00004000 +#define RNDIS_PACKET_TYPE_MAC_FRAME 0x00008000 + +/* RNDIS_OID_GEN_MINIPORT_INFO constants */ +#define RNDIS_MINIPORT_BUS_MASTER 0x00000001 +#define RNDIS_MINIPORT_WDM_DRIVER 0x00000002 +#define RNDIS_MINIPORT_SG_LIST 0x00000004 +#define RNDIS_MINIPORT_SUPPORTS_MEDIA_QUERY 0x00000008 +#define RNDIS_MINIPORT_INDICATES_PACKETS 0x00000010 +#define RNDIS_MINIPORT_IGNORE_PACKET_QUEUE 0x00000020 +#define RNDIS_MINIPORT_IGNORE_REQUEST_QUEUE 0x00000040 +#define RNDIS_MINIPORT_IGNORE_TOKEN_RING_ERRORS 0x00000080 +#define RNDIS_MINIPORT_INTERMEDIATE_DRIVER 0x00000100 +#define RNDIS_MINIPORT_IS_NDIS_5 0x00000200 +#define RNDIS_MINIPORT_IS_CO 0x00000400 +#define RNDIS_MINIPORT_DESERIALIZE 0x00000800 +#define RNDIS_MINIPORT_REQUIRES_MEDIA_POLLING 0x00001000 +#define RNDIS_MINIPORT_SUPPORTS_MEDIA_SENSE 0x00002000 +#define RNDIS_MINIPORT_NETBOOT_CARD 0x00004000 +#define RNDIS_MINIPORT_PM_SUPPORTED 0x00008000 +#define RNDIS_MINIPORT_SUPPORTS_MAC_ADDRESS_OVERWRITE 0x00010000 +#define RNDIS_MINIPORT_USES_SAFE_BUFFER_APIS 0x00020000 +#define RNDIS_MINIPORT_HIDDEN 0x00040000 +#define RNDIS_MINIPORT_SWENUM 0x00080000 +#define RNDIS_MINIPORT_SURPRISE_REMOVE_OK 0x00100000 +#define RNDIS_MINIPORT_NO_HALT_ON_SUSPEND 0x00200000 +#define RNDIS_MINIPORT_HARDWARE_DEVICE 0x00400000 +#define RNDIS_MINIPORT_SUPPORTS_CANCEL_SEND_PACKETS 0x00800000 +#define RNDIS_MINIPORT_64BITS_DMA 0x01000000 + +#define RNDIS_MAC_OPTION_COPY_LOOKAHEAD_DATA 0x00000001 +#define RNDIS_MAC_OPTION_RECEIVE_SERIALIZED 0x00000002 +#define RNDIS_MAC_OPTION_TRANSFERS_NOT_PEND 0x00000004 +#define RNDIS_MAC_OPTION_NO_LOOPBACK 0x00000008 +#define RNDIS_MAC_OPTION_FULL_DUPLEX 0x00000010 +#define RNDIS_MAC_OPTION_EOTX_INDICATION 0x00000020 +#define RNDIS_MAC_OPTION_8021P_PRIORITY 0x00000040 +#define RNDIS_MAC_OPTION_RESERVED 0x80000000 + +/* Object Identifiers used by NdisRequest Query/Set Information */ +/* General (Required) Objects */ +#define RNDIS_OID_GEN_SUPPORTED_LIST 0x00010101 +#define RNDIS_OID_GEN_HARDWARE_STATUS 0x00010102 +#define RNDIS_OID_GEN_MEDIA_SUPPORTED 0x00010103 +#define RNDIS_OID_GEN_MEDIA_IN_USE 0x00010104 +#define RNDIS_OID_GEN_MAXIMUM_LOOKAHEAD 0x00010105 +#define RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE 0x00010106 +#define RNDIS_OID_GEN_LINK_SPEED 0x00010107 +#define RNDIS_OID_GEN_TRANSMIT_BUFFER_SPACE 0x00010108 +#define RNDIS_OID_GEN_RECEIVE_BUFFER_SPACE 0x00010109 +#define RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE 0x0001010A +#define RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE 0x0001010B +#define RNDIS_OID_GEN_VENDOR_ID 0x0001010C +#define RNDIS_OID_GEN_VENDOR_DESCRIPTION 0x0001010D +#define RNDIS_OID_GEN_CURRENT_PACKET_FILTER 0x0001010E +#define RNDIS_OID_GEN_CURRENT_LOOKAHEAD 0x0001010F +#define RNDIS_OID_GEN_DRIVER_VERSION 0x00010110 +#define RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE 0x00010111 +#define RNDIS_OID_GEN_PROTOCOL_OPTIONS 0x00010112 +#define RNDIS_OID_GEN_MAC_OPTIONS 0x00010113 +#define RNDIS_OID_GEN_MEDIA_CONNECT_STATUS 0x00010114 +#define RNDIS_OID_GEN_MAXIMUM_SEND_PACKETS 0x00010115 +#define RNDIS_OID_GEN_VENDOR_DRIVER_VERSION 0x00010116 +#define RNDIS_OID_GEN_SUPPORTED_GUIDS 0x00010117 +#define RNDIS_OID_GEN_NETWORK_LAYER_ADDRESSES 0x00010118 +#define RNDIS_OID_GEN_TRANSPORT_HEADER_OFFSET 0x00010119 +#define RNDIS_OID_GEN_PHYSICAL_MEDIUM 0x00010202 +#define RNDIS_OID_GEN_MACHINE_NAME 0x0001021A +#define RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER 0x0001021B +#define RNDIS_OID_GEN_VLAN_ID 0x0001021C + +/* Optional OIDs */ +#define RNDIS_OID_GEN_MEDIA_CAPABILITIES 0x00010201 + +/* Required statistics OIDs */ +#define RNDIS_OID_GEN_XMIT_OK 0x00020101 +#define RNDIS_OID_GEN_RCV_OK 0x00020102 +#define RNDIS_OID_GEN_XMIT_ERROR 0x00020103 +#define RNDIS_OID_GEN_RCV_ERROR 0x00020104 +#define RNDIS_OID_GEN_RCV_NO_BUFFER 0x00020105 + +/* Optional statistics OIDs */ +#define RNDIS_OID_GEN_DIRECTED_BYTES_XMIT 0x00020201 +#define RNDIS_OID_GEN_DIRECTED_FRAMES_XMIT 0x00020202 +#define RNDIS_OID_GEN_MULTICAST_BYTES_XMIT 0x00020203 +#define RNDIS_OID_GEN_MULTICAST_FRAMES_XMIT 0x00020204 +#define RNDIS_OID_GEN_BROADCAST_BYTES_XMIT 0x00020205 +#define RNDIS_OID_GEN_BROADCAST_FRAMES_XMIT 0x00020206 +#define RNDIS_OID_GEN_DIRECTED_BYTES_RCV 0x00020207 +#define RNDIS_OID_GEN_DIRECTED_FRAMES_RCV 0x00020208 +#define RNDIS_OID_GEN_MULTICAST_BYTES_RCV 0x00020209 +#define RNDIS_OID_GEN_MULTICAST_FRAMES_RCV 0x0002020A +#define RNDIS_OID_GEN_BROADCAST_BYTES_RCV 0x0002020B +#define RNDIS_OID_GEN_BROADCAST_FRAMES_RCV 0x0002020C + +#define RNDIS_OID_GEN_RCV_CRC_ERROR 0x0002020D +#define RNDIS_OID_GEN_TRANSMIT_QUEUE_LENGTH 0x0002020E + +#define RNDIS_OID_GEN_GET_TIME_CAPS 0x0002020F +#define RNDIS_OID_GEN_GET_NETCARD_TIME 0x00020210 + +#define RNDIS_OID_GEN_NETCARD_LOAD 0x00020211 +#define RNDIS_OID_GEN_DEVICE_PROFILE 0x00020212 +#define RNDIS_OID_GEN_INIT_TIME_MS 0x00020213 +#define RNDIS_OID_GEN_RESET_COUNTS 0x00020214 +#define RNDIS_OID_GEN_MEDIA_SENSE_COUNTS 0x00020215 +#define RNDIS_OID_GEN_FRIENDLY_NAME 0x00020216 +#define RNDIS_OID_GEN_MINIPORT_INFO 0x00020217 +#define RNDIS_OID_GEN_RESET_VERIFY_PARAMETERS 0x00020218 + +/* These are connection-oriented general OIDs. */ +/* These replace the above OIDs for connection-oriented media. */ +#define RNDIS_OID_GEN_CO_SUPPORTED_LIST 0x00010101 +#define RNDIS_OID_GEN_CO_HARDWARE_STATUS 0x00010102 +#define RNDIS_OID_GEN_CO_MEDIA_SUPPORTED 0x00010103 +#define RNDIS_OID_GEN_CO_MEDIA_IN_USE 0x00010104 +#define RNDIS_OID_GEN_CO_LINK_SPEED 0x00010105 +#define RNDIS_OID_GEN_CO_VENDOR_ID 0x00010106 +#define RNDIS_OID_GEN_CO_VENDOR_DESCRIPTION 0x00010107 +#define RNDIS_OID_GEN_CO_DRIVER_VERSION 0x00010108 +#define RNDIS_OID_GEN_CO_PROTOCOL_OPTIONS 0x00010109 +#define RNDIS_OID_GEN_CO_MAC_OPTIONS 0x0001010A +#define RNDIS_OID_GEN_CO_MEDIA_CONNECT_STATUS 0x0001010B +#define RNDIS_OID_GEN_CO_VENDOR_DRIVER_VERSION 0x0001010C +#define RNDIS_OID_GEN_CO_MINIMUM_LINK_SPEED 0x0001010D + +#define RNDIS_OID_GEN_CO_GET_TIME_CAPS 0x00010201 +#define RNDIS_OID_GEN_CO_GET_NETCARD_TIME 0x00010202 + +/* These are connection-oriented statistics OIDs. */ +#define RNDIS_OID_GEN_CO_XMIT_PDUS_OK 0x00020101 +#define RNDIS_OID_GEN_CO_RCV_PDUS_OK 0x00020102 +#define RNDIS_OID_GEN_CO_XMIT_PDUS_ERROR 0x00020103 +#define RNDIS_OID_GEN_CO_RCV_PDUS_ERROR 0x00020104 +#define RNDIS_OID_GEN_CO_RCV_PDUS_NO_BUFFER 0x00020105 + + +#define RNDIS_OID_GEN_CO_RCV_CRC_ERROR 0x00020201 +#define RNDIS_OID_GEN_CO_TRANSMIT_QUEUE_LENGTH 0x00020202 +#define RNDIS_OID_GEN_CO_BYTES_XMIT 0x00020203 +#define RNDIS_OID_GEN_CO_BYTES_RCV 0x00020204 +#define RNDIS_OID_GEN_CO_BYTES_XMIT_OUTSTANDING 0x00020205 +#define RNDIS_OID_GEN_CO_NETCARD_LOAD 0x00020206 + +/* These are objects for Connection-oriented media call-managers. */ +#define RNDIS_OID_CO_ADD_PVC 0xFF000001 +#define RNDIS_OID_CO_DELETE_PVC 0xFF000002 +#define RNDIS_OID_CO_GET_CALL_INFORMATION 0xFF000003 +#define RNDIS_OID_CO_ADD_ADDRESS 0xFF000004 +#define RNDIS_OID_CO_DELETE_ADDRESS 0xFF000005 +#define RNDIS_OID_CO_GET_ADDRESSES 0xFF000006 +#define RNDIS_OID_CO_ADDRESS_CHANGE 0xFF000007 +#define RNDIS_OID_CO_SIGNALING_ENABLED 0xFF000008 +#define RNDIS_OID_CO_SIGNALING_DISABLED 0xFF000009 + +/* 802.3 Objects (Ethernet) */ +#define RNDIS_OID_802_3_PERMANENT_ADDRESS 0x01010101 +#define RNDIS_OID_802_3_CURRENT_ADDRESS 0x01010102 +#define RNDIS_OID_802_3_MULTICAST_LIST 0x01010103 +#define RNDIS_OID_802_3_MAXIMUM_LIST_SIZE 0x01010104 +#define RNDIS_OID_802_3_MAC_OPTIONS 0x01010105 + +#define RNDIS_802_3_MAC_OPTION_PRIORITY 0x00000001 + +#define RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT 0x01020101 +#define RNDIS_OID_802_3_XMIT_ONE_COLLISION 0x01020102 +#define RNDIS_OID_802_3_XMIT_MORE_COLLISIONS 0x01020103 + +#define RNDIS_OID_802_3_XMIT_DEFERRED 0x01020201 +#define RNDIS_OID_802_3_XMIT_MAX_COLLISIONS 0x01020202 +#define RNDIS_OID_802_3_RCV_OVERRUN 0x01020203 +#define RNDIS_OID_802_3_XMIT_UNDERRUN 0x01020204 +#define RNDIS_OID_802_3_XMIT_HEARTBEAT_FAILURE 0x01020205 +#define RNDIS_OID_802_3_XMIT_TIMES_CRS_LOST 0x01020206 +#define RNDIS_OID_802_3_XMIT_LATE_COLLISIONS 0x01020207 + +#define RNDIS_OID_802_11_BSSID 0x0d010101 +#define RNDIS_OID_802_11_SSID 0x0d010102 +#define RNDIS_OID_802_11_INFRASTRUCTURE_MODE 0x0d010108 +#define RNDIS_OID_802_11_ADD_WEP 0x0d010113 +#define RNDIS_OID_802_11_REMOVE_WEP 0x0d010114 +#define RNDIS_OID_802_11_DISASSOCIATE 0x0d010115 +#define RNDIS_OID_802_11_AUTHENTICATION_MODE 0x0d010118 +#define RNDIS_OID_802_11_PRIVACY_FILTER 0x0d010119 +#define RNDIS_OID_802_11_BSSID_LIST_SCAN 0x0d01011a +#define RNDIS_OID_802_11_ENCRYPTION_STATUS 0x0d01011b +#define RNDIS_OID_802_11_ADD_KEY 0x0d01011d +#define RNDIS_OID_802_11_REMOVE_KEY 0x0d01011e +#define RNDIS_OID_802_11_ASSOCIATION_INFORMATION 0x0d01011f +#define RNDIS_OID_802_11_CAPABILITY 0x0d010122 +#define RNDIS_OID_802_11_PMKID 0x0d010123 +#define RNDIS_OID_802_11_NETWORK_TYPES_SUPPORTED 0x0d010203 +#define RNDIS_OID_802_11_NETWORK_TYPE_IN_USE 0x0d010204 +#define RNDIS_OID_802_11_TX_POWER_LEVEL 0x0d010205 +#define RNDIS_OID_802_11_RSSI 0x0d010206 +#define RNDIS_OID_802_11_RSSI_TRIGGER 0x0d010207 +#define RNDIS_OID_802_11_FRAGMENTATION_THRESHOLD 0x0d010209 +#define RNDIS_OID_802_11_RTS_THRESHOLD 0x0d01020a +#define RNDIS_OID_802_11_SUPPORTED_RATES 0x0d01020e +#define RNDIS_OID_802_11_CONFIGURATION 0x0d010211 +#define RNDIS_OID_802_11_POWER_MODE 0x0d010216 +#define RNDIS_OID_802_11_BSSID_LIST 0x0d010217 + +/* Plug and Play capabilities */ +#define RNDIS_OID_PNP_CAPABILITIES 0xFD010100 +#define RNDIS_OID_PNP_SET_POWER 0xFD010101 +#define RNDIS_OID_PNP_QUERY_POWER 0xFD010102 +#define RNDIS_OID_PNP_ADD_WAKE_UP_PATTERN 0xFD010103 +#define RNDIS_OID_PNP_REMOVE_WAKE_UP_PATTERN 0xFD010104 +#define RNDIS_OID_PNP_ENABLE_WAKE_UP 0xFD010106 + +/* RNDIS_PNP_CAPABILITIES.Flags constants */ +#define RNDIS_DEVICE_WAKE_UP_ENABLE 0x00000001 +#define RNDIS_DEVICE_WAKE_ON_PATTERN_MATCH_ENABLE 0x00000002 +#define RNDIS_DEVICE_WAKE_ON_MAGIC_PACKET_ENABLE 0x00000004 + +#define REMOTE_CONDIS_MP_CREATE_VC_MSG 0x00008001 +#define REMOTE_CONDIS_MP_DELETE_VC_MSG 0x00008002 +#define REMOTE_CONDIS_MP_ACTIVATE_VC_MSG 0x00008005 +#define REMOTE_CONDIS_MP_DEACTIVATE_VC_MSG 0x00008006 +#define REMOTE_CONDIS_INDICATE_STATUS_MSG 0x00008007 + +#define REMOTE_CONDIS_MP_CREATE_VC_CMPLT 0x80008001 +#define REMOTE_CONDIS_MP_DELETE_VC_CMPLT 0x80008002 +#define REMOTE_CONDIS_MP_ACTIVATE_VC_CMPLT 0x80008005 +#define REMOTE_CONDIS_MP_DEACTIVATE_VC_CMPLT 0x80008006 diff --git a/include/linux/rodata_test.h b/include/linux/rodata_test.h new file mode 100644 index 000000000..84766bcdd --- /dev/null +++ b/include/linux/rodata_test.h @@ -0,0 +1,22 @@ +/* + * rodata_test.h: functional test for mark_rodata_ro function + * + * (C) Copyright 2008 Intel Corporation + * Author: Arjan van de Ven + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + +#ifndef _RODATA_TEST_H +#define _RODATA_TEST_H + +#ifdef CONFIG_DEBUG_RODATA_TEST +void rodata_test(void); +#else +static inline void rodata_test(void) {} +#endif + +#endif /* _RODATA_TEST_H */ diff --git a/include/linux/root_dev.h b/include/linux/root_dev.h new file mode 100644 index 000000000..bab671b07 --- /dev/null +++ b/include/linux/root_dev.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ROOT_DEV_H_ +#define _ROOT_DEV_H_ + +#include +#include +#include + +enum { + Root_NFS = MKDEV(UNNAMED_MAJOR, 255), + Root_RAM0 = MKDEV(RAMDISK_MAJOR, 0), + Root_RAM1 = MKDEV(RAMDISK_MAJOR, 1), + Root_FD0 = MKDEV(FLOPPY_MAJOR, 0), + Root_HDA1 = MKDEV(IDE0_MAJOR, 1), + Root_HDA2 = MKDEV(IDE0_MAJOR, 2), + Root_SDA1 = MKDEV(SCSI_DISK0_MAJOR, 1), + Root_SDA2 = MKDEV(SCSI_DISK0_MAJOR, 2), + Root_HDC1 = MKDEV(IDE1_MAJOR, 1), + Root_SR0 = MKDEV(SCSI_CDROM_MAJOR, 0), +}; + +extern dev_t ROOT_DEV; + +#endif diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h new file mode 100644 index 000000000..a68972b09 --- /dev/null +++ b/include/linux/rpmsg.h @@ -0,0 +1,263 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* + * Remote processor messaging + * + * Copyright (C) 2011 Texas Instruments, Inc. + * Copyright (C) 2011 Google, Inc. + * All rights reserved. + */ + +#ifndef _LINUX_RPMSG_H +#define _LINUX_RPMSG_H + +#include +#include +#include +#include +#include +#include +#include + +#define RPMSG_ADDR_ANY 0xFFFFFFFF + +struct rpmsg_device; +struct rpmsg_endpoint; +struct rpmsg_device_ops; +struct rpmsg_endpoint_ops; + +/** + * struct rpmsg_channel_info - channel info representation + * @name: name of service + * @src: local address + * @dst: destination address + */ +struct rpmsg_channel_info { + char name[RPMSG_NAME_SIZE]; + u32 src; + u32 dst; +}; + +/** + * rpmsg_device - device that belong to the rpmsg bus + * @dev: the device struct + * @id: device id (used to match between rpmsg drivers and devices) + * @driver_override: driver name to force a match + * @src: local address + * @dst: destination address + * @ept: the rpmsg endpoint of this channel + * @announce: if set, rpmsg will announce the creation/removal of this channel + */ +struct rpmsg_device { + struct device dev; + struct rpmsg_device_id id; + char *driver_override; + u32 src; + u32 dst; + struct rpmsg_endpoint *ept; + bool announce; + + const struct rpmsg_device_ops *ops; +}; + +typedef int (*rpmsg_rx_cb_t)(struct rpmsg_device *, void *, int, void *, u32); + +/** + * struct rpmsg_endpoint - binds a local rpmsg address to its user + * @rpdev: rpmsg channel device + * @refcount: when this drops to zero, the ept is deallocated + * @cb: rx callback handler + * @cb_lock: must be taken before accessing/changing @cb + * @addr: local rpmsg address + * @priv: private data for the driver's use + * + * In essence, an rpmsg endpoint represents a listener on the rpmsg bus, as + * it binds an rpmsg address with an rx callback handler. + * + * Simple rpmsg drivers shouldn't use this struct directly, because + * things just work: every rpmsg driver provides an rx callback upon + * registering to the bus, and that callback is then bound to its rpmsg + * address when the driver is probed. When relevant inbound messages arrive + * (i.e. messages which their dst address equals to the src address of + * the rpmsg channel), the driver's handler is invoked to process it. + * + * More complicated drivers though, that do need to allocate additional rpmsg + * addresses, and bind them to different rx callbacks, must explicitly + * create additional endpoints by themselves (see rpmsg_create_ept()). + */ +struct rpmsg_endpoint { + struct rpmsg_device *rpdev; + struct kref refcount; + rpmsg_rx_cb_t cb; + struct mutex cb_lock; + u32 addr; + void *priv; + + const struct rpmsg_endpoint_ops *ops; +}; + +/** + * struct rpmsg_driver - rpmsg driver struct + * @drv: underlying device driver + * @id_table: rpmsg ids serviced by this driver + * @probe: invoked when a matching rpmsg channel (i.e. device) is found + * @remove: invoked when the rpmsg channel is removed + * @callback: invoked when an inbound message is received on the channel + */ +struct rpmsg_driver { + struct device_driver drv; + const struct rpmsg_device_id *id_table; + int (*probe)(struct rpmsg_device *dev); + void (*remove)(struct rpmsg_device *dev); + int (*callback)(struct rpmsg_device *, void *, int, void *, u32); +}; + +#if IS_ENABLED(CONFIG_RPMSG) + +int register_rpmsg_device(struct rpmsg_device *dev); +void unregister_rpmsg_device(struct rpmsg_device *dev); +int __register_rpmsg_driver(struct rpmsg_driver *drv, struct module *owner); +void unregister_rpmsg_driver(struct rpmsg_driver *drv); +void rpmsg_destroy_ept(struct rpmsg_endpoint *); +struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *, + rpmsg_rx_cb_t cb, void *priv, + struct rpmsg_channel_info chinfo); + +int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len); +int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); +int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, + void *data, int len); + +int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len); +int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); +int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, + void *data, int len); + +__poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp, + poll_table *wait); + +#else + +static inline int register_rpmsg_device(struct rpmsg_device *dev) +{ + return -ENXIO; +} + +static inline void unregister_rpmsg_device(struct rpmsg_device *dev) +{ + /* This shouldn't be possible */ + WARN_ON(1); +} + +static inline int __register_rpmsg_driver(struct rpmsg_driver *drv, + struct module *owner) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; +} + +static inline void unregister_rpmsg_driver(struct rpmsg_driver *drv) +{ + /* This shouldn't be possible */ + WARN_ON(1); +} + +static inline void rpmsg_destroy_ept(struct rpmsg_endpoint *ept) +{ + /* This shouldn't be possible */ + WARN_ON(1); +} + +static inline struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *rpdev, + rpmsg_rx_cb_t cb, + void *priv, + struct rpmsg_channel_info chinfo) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return NULL; +} + +static inline int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; +} + +static inline int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, + u32 dst) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; + +} + +static inline int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, + u32 dst, void *data, int len) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; +} + +static inline int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; +} + +static inline int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, + int len, u32 dst) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; +} + +static inline int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, + u32 dst, void *data, int len) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; +} + +static inline __poll_t rpmsg_poll(struct rpmsg_endpoint *ept, + struct file *filp, poll_table *wait) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return 0; +} + +#endif /* IS_ENABLED(CONFIG_RPMSG) */ + +/* use a macro to avoid include chaining to get THIS_MODULE */ +#define register_rpmsg_driver(drv) \ + __register_rpmsg_driver(drv, THIS_MODULE) + +/** + * module_rpmsg_driver() - Helper macro for registering an rpmsg driver + * @__rpmsg_driver: rpmsg_driver struct + * + * Helper macro for rpmsg drivers which do not do anything special in module + * init/exit. This eliminates a lot of boilerplate. Each module may only + * use this macro once, and calling it replaces module_init() and module_exit() + */ +#define module_rpmsg_driver(__rpmsg_driver) \ + module_driver(__rpmsg_driver, register_rpmsg_driver, \ + unregister_rpmsg_driver) + +#endif /* _LINUX_RPMSG_H */ diff --git a/include/linux/rpmsg/qcom_glink.h b/include/linux/rpmsg/qcom_glink.h new file mode 100644 index 000000000..96e26d947 --- /dev/null +++ b/include/linux/rpmsg/qcom_glink.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _LINUX_RPMSG_QCOM_GLINK_H +#define _LINUX_RPMSG_QCOM_GLINK_H + +#include + +struct qcom_glink; + +#if IS_ENABLED(CONFIG_RPMSG_QCOM_GLINK_SMEM) + +struct qcom_glink *qcom_glink_smem_register(struct device *parent, + struct device_node *node); +void qcom_glink_smem_unregister(struct qcom_glink *glink); + +#else + +static inline struct qcom_glink * +qcom_glink_smem_register(struct device *parent, + struct device_node *node) +{ + return NULL; +} + +static inline void qcom_glink_smem_unregister(struct qcom_glink *glink) {} + +#endif + +#endif diff --git a/include/linux/rpmsg/qcom_smd.h b/include/linux/rpmsg/qcom_smd.h new file mode 100644 index 000000000..2e92d7407 --- /dev/null +++ b/include/linux/rpmsg/qcom_smd.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _LINUX_RPMSG_QCOM_SMD_H +#define _LINUX_RPMSG_QCOM_SMD_H + +#include + +struct qcom_smd_edge; + +#if IS_ENABLED(CONFIG_RPMSG_QCOM_SMD) + +struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent, + struct device_node *node); +int qcom_smd_unregister_edge(struct qcom_smd_edge *edge); + +#else + +static inline struct qcom_smd_edge * +qcom_smd_register_edge(struct device *parent, + struct device_node *node) +{ + return NULL; +} + +static inline int qcom_smd_unregister_edge(struct qcom_smd_edge *edge) +{ + return 0; +} + +#endif + +#endif diff --git a/include/linux/rslib.h b/include/linux/rslib.h new file mode 100644 index 000000000..5974cedd0 --- /dev/null +++ b/include/linux/rslib.h @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Generic Reed Solomon encoder / decoder library + * + * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de) + * + * RS code lifted from reed solomon library written by Phil Karn + * Copyright 2002 Phil Karn, KA9Q + */ +#ifndef _RSLIB_H_ +#define _RSLIB_H_ + +#include +#include /* for gfp_t */ +#include /* for GFP_KERNEL */ + +/** + * struct rs_codec - rs codec data + * + * @mm: Bits per symbol + * @nn: Symbols per block (= (1<mm = number of bits per symbol + * rs->nn = (2^rs->mm) - 1 + * + * Simple arithmetic modulo would return a wrong result for values + * >= 3 * rs->nn +*/ +static inline int rs_modnn(struct rs_codec *rs, int x) +{ + while (x >= rs->nn) { + x -= rs->nn; + x = (x >> rs->mm) + (x & rs->nn); + } + return x; +} + +#endif diff --git a/include/linux/rtc.h b/include/linux/rtc.h new file mode 100644 index 000000000..5a34f5994 --- /dev/null +++ b/include/linux/rtc.h @@ -0,0 +1,280 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Generic RTC interface. + * This version contains the part of the user interface to the Real Time Clock + * service. It is used with both the legacy mc146818 and also EFI + * Struct rtc_time and first 12 ioctl by Paul Gortmaker, 1996 - separated out + * from to this file for 2.4 kernels. + * + * Copyright (C) 1999 Hewlett-Packard Co. + * Copyright (C) 1999 Stephane Eranian + */ +#ifndef _LINUX_RTC_H_ +#define _LINUX_RTC_H_ + + +#include +#include +#include +#include + +extern int rtc_month_days(unsigned int month, unsigned int year); +extern int rtc_year_days(unsigned int day, unsigned int month, unsigned int year); +extern int rtc_valid_tm(struct rtc_time *tm); +extern time64_t rtc_tm_to_time64(struct rtc_time *tm); +extern void rtc_time64_to_tm(time64_t time, struct rtc_time *tm); +ktime_t rtc_tm_to_ktime(struct rtc_time tm); +struct rtc_time rtc_ktime_to_tm(ktime_t kt); + +/* + * rtc_tm_sub - Return the difference in seconds. + */ +static inline time64_t rtc_tm_sub(struct rtc_time *lhs, struct rtc_time *rhs) +{ + return rtc_tm_to_time64(lhs) - rtc_tm_to_time64(rhs); +} + +static inline void rtc_time_to_tm(unsigned long time, struct rtc_time *tm) +{ + rtc_time64_to_tm(time, tm); +} + +static inline int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time) +{ + *time = rtc_tm_to_time64(tm); + + return 0; +} + +#include +#include +#include +#include +#include +#include +#include + +extern struct class *rtc_class; + +/* + * For these RTC methods the device parameter is the physical device + * on whatever bus holds the hardware (I2C, Platform, SPI, etc), which + * was passed to rtc_device_register(). Its driver_data normally holds + * device state, including the rtc_device pointer for the RTC. + * + * Most of these methods are called with rtc_device.ops_lock held, + * through the rtc_*(struct rtc_device *, ...) calls. + * + * The (current) exceptions are mostly filesystem hooks: + * - the proc() hook for procfs + * - non-ioctl() chardev hooks: open(), release(), read_callback() + * + * REVISIT those periodic irq calls *do* have ops_lock when they're + * issued through ioctl() ... + */ +struct rtc_class_ops { + int (*ioctl)(struct device *, unsigned int, unsigned long); + int (*read_time)(struct device *, struct rtc_time *); + int (*set_time)(struct device *, struct rtc_time *); + int (*read_alarm)(struct device *, struct rtc_wkalrm *); + int (*set_alarm)(struct device *, struct rtc_wkalrm *); + int (*proc)(struct device *, struct seq_file *); + int (*set_mmss64)(struct device *, time64_t secs); + int (*set_mmss)(struct device *, unsigned long secs); + int (*read_callback)(struct device *, int data); + int (*alarm_irq_enable)(struct device *, unsigned int enabled); + int (*read_offset)(struct device *, long *offset); + int (*set_offset)(struct device *, long offset); +}; + +struct rtc_timer { + struct timerqueue_node node; + ktime_t period; + void (*func)(void *private_data); + void *private_data; + int enabled; +}; + + +/* flags */ +#define RTC_DEV_BUSY 0 + +struct rtc_device { + struct device dev; + struct module *owner; + + int id; + + const struct rtc_class_ops *ops; + struct mutex ops_lock; + + struct cdev char_dev; + unsigned long flags; + + unsigned long irq_data; + spinlock_t irq_lock; + wait_queue_head_t irq_queue; + struct fasync_struct *async_queue; + + int irq_freq; + int max_user_freq; + + struct timerqueue_head timerqueue; + struct rtc_timer aie_timer; + struct rtc_timer uie_rtctimer; + struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */ + int pie_enabled; + struct work_struct irqwork; + /* Some hardware can't support UIE mode */ + int uie_unsupported; + + /* Number of nsec it takes to set the RTC clock. This influences when + * the set ops are called. An offset: + * - of 0.5 s will call RTC set for wall clock time 10.0 s at 9.5 s + * - of 1.5 s will call RTC set for wall clock time 10.0 s at 8.5 s + * - of -0.5 s will call RTC set for wall clock time 10.0 s at 10.5 s + */ + long set_offset_nsec; + + bool registered; + + struct nvmem_device *nvmem; + /* Old ABI support */ + bool nvram_old_abi; + struct bin_attribute *nvram; + + time64_t range_min; + timeu64_t range_max; + time64_t start_secs; + time64_t offset_secs; + bool set_start_time; + +#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL + struct work_struct uie_task; + struct timer_list uie_timer; + /* Those fields are protected by rtc->irq_lock */ + unsigned int oldsecs; + unsigned int uie_irq_active:1; + unsigned int stop_uie_polling:1; + unsigned int uie_task_active:1; + unsigned int uie_timer_active:1; +#endif +}; +#define to_rtc_device(d) container_of(d, struct rtc_device, dev) + +/* useful timestamps */ +#define RTC_TIMESTAMP_BEGIN_1900 -2208988800LL /* 1900-01-01 00:00:00 */ +#define RTC_TIMESTAMP_BEGIN_2000 946684800LL /* 2000-01-01 00:00:00 */ +#define RTC_TIMESTAMP_END_2099 4102444799LL /* 2099-12-31 23:59:59 */ + +extern struct rtc_device *rtc_device_register(const char *name, + struct device *dev, + const struct rtc_class_ops *ops, + struct module *owner); +extern struct rtc_device *devm_rtc_device_register(struct device *dev, + const char *name, + const struct rtc_class_ops *ops, + struct module *owner); +struct rtc_device *devm_rtc_allocate_device(struct device *dev); +int __rtc_register_device(struct module *owner, struct rtc_device *rtc); +extern void rtc_device_unregister(struct rtc_device *rtc); +extern void devm_rtc_device_unregister(struct device *dev, + struct rtc_device *rtc); + +extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm); +extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm); +extern int rtc_set_ntp_time(struct timespec64 now, unsigned long *target_nsec); +int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm); +extern int rtc_read_alarm(struct rtc_device *rtc, + struct rtc_wkalrm *alrm); +extern int rtc_set_alarm(struct rtc_device *rtc, + struct rtc_wkalrm *alrm); +extern int rtc_initialize_alarm(struct rtc_device *rtc, + struct rtc_wkalrm *alrm); +extern void rtc_update_irq(struct rtc_device *rtc, + unsigned long num, unsigned long events); + +extern struct rtc_device *rtc_class_open(const char *name); +extern void rtc_class_close(struct rtc_device *rtc); + +extern int rtc_irq_set_state(struct rtc_device *rtc, int enabled); +extern int rtc_irq_set_freq(struct rtc_device *rtc, int freq); +extern int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled); +extern int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled); +extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, + unsigned int enabled); + +void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode); +void rtc_aie_update_irq(void *private); +void rtc_uie_update_irq(void *private); +enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer); + +void rtc_timer_init(struct rtc_timer *timer, void (*f)(void *p), void *data); +int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer, + ktime_t expires, ktime_t period); +void rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer *timer); +int rtc_read_offset(struct rtc_device *rtc, long *offset); +int rtc_set_offset(struct rtc_device *rtc, long offset); +void rtc_timer_do_work(struct work_struct *work); + +static inline bool is_leap_year(unsigned int year) +{ + return (!(year % 4) && (year % 100)) || !(year % 400); +} + +/* Determine if we can call to driver to set the time. Drivers can only be + * called to set a second aligned time value, and the field set_offset_nsec + * specifies how far away from the second aligned time to call the driver. + * + * This also computes 'to_set' which is the time we are trying to set, and has + * a zero in tv_nsecs, such that: + * to_set - set_delay_nsec == now +/- FUZZ + * + */ +static inline bool rtc_tv_nsec_ok(s64 set_offset_nsec, + struct timespec64 *to_set, + const struct timespec64 *now) +{ + /* Allowed error in tv_nsec, arbitarily set to 5 jiffies in ns. */ + const unsigned long TIME_SET_NSEC_FUZZ = TICK_NSEC * 5; + struct timespec64 delay = {.tv_sec = 0, + .tv_nsec = set_offset_nsec}; + + *to_set = timespec64_add(*now, delay); + + if (to_set->tv_nsec < TIME_SET_NSEC_FUZZ) { + to_set->tv_nsec = 0; + return true; + } + + if (to_set->tv_nsec > NSEC_PER_SEC - TIME_SET_NSEC_FUZZ) { + to_set->tv_sec++; + to_set->tv_nsec = 0; + return true; + } + return false; +} + +#define rtc_register_device(device) \ + __rtc_register_device(THIS_MODULE, device) + +#ifdef CONFIG_RTC_HCTOSYS_DEVICE +extern int rtc_hctosys_ret; +#else +#define rtc_hctosys_ret -ENODEV +#endif + +#ifdef CONFIG_RTC_NVMEM +int rtc_nvmem_register(struct rtc_device *rtc, + struct nvmem_config *nvmem_config); +void rtc_nvmem_unregister(struct rtc_device *rtc); +#else +static inline int rtc_nvmem_register(struct rtc_device *rtc, + struct nvmem_config *nvmem_config) +{ + return 0; +} +static inline void rtc_nvmem_unregister(struct rtc_device *rtc) {} +#endif + +#endif /* _LINUX_RTC_H_ */ diff --git a/include/linux/rtc/ds1286.h b/include/linux/rtc/ds1286.h new file mode 100644 index 000000000..45ea0aa0a --- /dev/null +++ b/include/linux/rtc/ds1286.h @@ -0,0 +1,52 @@ +/* + * Copyright (C) 1998, 1999, 2003 Ralf Baechle + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#ifndef __LINUX_DS1286_H +#define __LINUX_DS1286_H + +/********************************************************************** + * register summary + **********************************************************************/ +#define RTC_HUNDREDTH_SECOND 0 +#define RTC_SECONDS 1 +#define RTC_MINUTES 2 +#define RTC_MINUTES_ALARM 3 +#define RTC_HOURS 4 +#define RTC_HOURS_ALARM 5 +#define RTC_DAY 6 +#define RTC_DAY_ALARM 7 +#define RTC_DATE 8 +#define RTC_MONTH 9 +#define RTC_YEAR 10 +#define RTC_CMD 11 +#define RTC_WHSEC 12 +#define RTC_WSEC 13 +#define RTC_UNUSED 14 + +/* RTC_*_alarm is always true if 2 MSBs are set */ +# define RTC_ALARM_DONT_CARE 0xC0 + + +/* + * Bits in the month register + */ +#define RTC_EOSC 0x80 +#define RTC_ESQW 0x40 + +/* + * Bits in the Command register + */ +#define RTC_TDF 0x01 +#define RTC_WAF 0x02 +#define RTC_TDM 0x04 +#define RTC_WAM 0x08 +#define RTC_PU_LVL 0x10 +#define RTC_IBH_LO 0x20 +#define RTC_IPSW 0x40 +#define RTC_TE 0x80 + +#endif /* __LINUX_DS1286_H */ diff --git a/include/linux/rtc/ds1307.h b/include/linux/rtc/ds1307.h new file mode 100644 index 000000000..291b1c490 --- /dev/null +++ b/include/linux/rtc/ds1307.h @@ -0,0 +1,22 @@ +/* + * ds1307.h - platform_data for the ds1307 (and variants) rtc driver + * (C) Copyright 2012 by Wolfram Sang, Pengutronix e.K. + * same license as the driver + */ + +#ifndef _LINUX_DS1307_H +#define _LINUX_DS1307_H + +#include + +#define DS1307_TRICKLE_CHARGER_250_OHM 0x01 +#define DS1307_TRICKLE_CHARGER_2K_OHM 0x02 +#define DS1307_TRICKLE_CHARGER_4K_OHM 0x03 +#define DS1307_TRICKLE_CHARGER_NO_DIODE 0x04 +#define DS1307_TRICKLE_CHARGER_DIODE 0x08 + +struct ds1307_platform_data { + u8 trickle_charger_setup; +}; + +#endif /* _LINUX_DS1307_H */ diff --git a/include/linux/rtc/ds1685.h b/include/linux/rtc/ds1685.h new file mode 100644 index 000000000..e6337a56d --- /dev/null +++ b/include/linux/rtc/ds1685.h @@ -0,0 +1,375 @@ +/* + * Definitions for the registers, addresses, and platform data of the + * DS1685/DS1687-series RTC chips. + * + * This Driver also works for the DS17X85/DS17X87 RTC chips. Functionally + * similar to the DS1685/DS1687, they support a few extra features which + * include larger, battery-backed NV-SRAM, burst-mode access, and an RTC + * write counter. + * + * Copyright (C) 2011-2014 Joshua Kinard . + * Copyright (C) 2009 Matthias Fuchs . + * + * References: + * DS1685/DS1687 3V/5V Real-Time Clocks, 19-5215, Rev 4/10. + * DS17x85/DS17x87 3V/5V Real-Time Clocks, 19-5222, Rev 4/10. + * DS1689/DS1693 3V/5V Serialized Real-Time Clocks, Rev 112105. + * Application Note 90, Using the Multiplex Bus RTC Extended Features. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _LINUX_RTC_DS1685_H_ +#define _LINUX_RTC_DS1685_H_ + +#include +#include +#include + +/** + * struct ds1685_priv - DS1685 private data structure. + * @dev: pointer to the rtc_device structure. + * @regs: iomapped base address pointer of the RTC registers. + * @regstep: padding/step size between registers (optional). + * @baseaddr: base address of the RTC device. + * @size: resource size. + * @lock: private lock variable for spin locking/unlocking. + * @work: private workqueue. + * @irq: IRQ number assigned to the RTC device. + * @prepare_poweroff: pointer to platform pre-poweroff function. + * @wake_alarm: pointer to platform wake alarm function. + * @post_ram_clear: pointer to platform post ram-clear function. + */ +struct ds1685_priv { + struct rtc_device *dev; + void __iomem *regs; + u32 regstep; + resource_size_t baseaddr; + size_t size; + spinlock_t lock; + struct work_struct work; + int irq_num; + bool bcd_mode; + bool no_irq; + bool uie_unsupported; + bool alloc_io_resources; + u8 (*read)(struct ds1685_priv *, int); + void (*write)(struct ds1685_priv *, int, u8); + void (*prepare_poweroff)(void); + void (*wake_alarm)(void); + void (*post_ram_clear)(void); +}; + + +/** + * struct ds1685_rtc_platform_data - platform data structure. + * @plat_prepare_poweroff: platform-specific pre-poweroff function. + * @plat_wake_alarm: platform-specific wake alarm function. + * @plat_post_ram_clear: platform-specific post ram-clear function. + * + * If your platform needs to use a custom padding/step size between + * registers, or uses one or more of the extended interrupts and needs special + * handling, then include this header file in your platform definition and + * set regstep and the plat_* pointers as appropriate. + */ +struct ds1685_rtc_platform_data { + const u32 regstep; + const bool bcd_mode; + const bool no_irq; + const bool uie_unsupported; + const bool alloc_io_resources; + u8 (*plat_read)(struct ds1685_priv *, int); + void (*plat_write)(struct ds1685_priv *, int, u8); + void (*plat_prepare_poweroff)(void); + void (*plat_wake_alarm)(void); + void (*plat_post_ram_clear)(void); +}; + + +/* + * Time Registers. + */ +#define RTC_SECS 0x00 /* Seconds 00-59 */ +#define RTC_SECS_ALARM 0x01 /* Alarm Seconds 00-59 */ +#define RTC_MINS 0x02 /* Minutes 00-59 */ +#define RTC_MINS_ALARM 0x03 /* Alarm Minutes 00-59 */ +#define RTC_HRS 0x04 /* Hours 01-12 AM/PM || 00-23 */ +#define RTC_HRS_ALARM 0x05 /* Alarm Hours 01-12 AM/PM || 00-23 */ +#define RTC_WDAY 0x06 /* Day of Week 01-07 */ +#define RTC_MDAY 0x07 /* Day of Month 01-31 */ +#define RTC_MONTH 0x08 /* Month 01-12 */ +#define RTC_YEAR 0x09 /* Year 00-99 */ +#define RTC_CENTURY 0x48 /* Century 00-99 */ +#define RTC_MDAY_ALARM 0x49 /* Alarm Day of Month 01-31 */ + + +/* + * Bit masks for the Time registers in BCD Mode (DM = 0). + */ +#define RTC_SECS_BCD_MASK 0x7f /* - x x x x x x x */ +#define RTC_MINS_BCD_MASK 0x7f /* - x x x x x x x */ +#define RTC_HRS_12_BCD_MASK 0x1f /* - - - x x x x x */ +#define RTC_HRS_24_BCD_MASK 0x3f /* - - x x x x x x */ +#define RTC_MDAY_BCD_MASK 0x3f /* - - x x x x x x */ +#define RTC_MONTH_BCD_MASK 0x1f /* - - - x x x x x */ +#define RTC_YEAR_BCD_MASK 0xff /* x x x x x x x x */ + +/* + * Bit masks for the Time registers in BIN Mode (DM = 1). + */ +#define RTC_SECS_BIN_MASK 0x3f /* - - x x x x x x */ +#define RTC_MINS_BIN_MASK 0x3f /* - - x x x x x x */ +#define RTC_HRS_12_BIN_MASK 0x0f /* - - - - x x x x */ +#define RTC_HRS_24_BIN_MASK 0x1f /* - - - x x x x x */ +#define RTC_MDAY_BIN_MASK 0x1f /* - - - x x x x x */ +#define RTC_MONTH_BIN_MASK 0x0f /* - - - - x x x x */ +#define RTC_YEAR_BIN_MASK 0x7f /* - x x x x x x x */ + +/* + * Bit masks common for the Time registers in BCD or BIN Mode. + */ +#define RTC_WDAY_MASK 0x07 /* - - - - - x x x */ +#define RTC_CENTURY_MASK 0xff /* x x x x x x x x */ +#define RTC_MDAY_ALARM_MASK 0xff /* x x x x x x x x */ +#define RTC_HRS_AMPM_MASK BIT(7) /* Mask for the AM/PM bit */ + + + +/* + * Control Registers. + */ +#define RTC_CTRL_A 0x0a /* Control Register A */ +#define RTC_CTRL_B 0x0b /* Control Register B */ +#define RTC_CTRL_C 0x0c /* Control Register C */ +#define RTC_CTRL_D 0x0d /* Control Register D */ +#define RTC_EXT_CTRL_4A 0x4a /* Extended Control Register 4A */ +#define RTC_EXT_CTRL_4B 0x4b /* Extended Control Register 4B */ + + +/* + * Bit names in Control Register A. + */ +#define RTC_CTRL_A_UIP BIT(7) /* Update In Progress */ +#define RTC_CTRL_A_DV2 BIT(6) /* Countdown Chain */ +#define RTC_CTRL_A_DV1 BIT(5) /* Oscillator Enable */ +#define RTC_CTRL_A_DV0 BIT(4) /* Bank Select */ +#define RTC_CTRL_A_RS2 BIT(2) /* Rate-Selection Bit 2 */ +#define RTC_CTRL_A_RS3 BIT(3) /* Rate-Selection Bit 3 */ +#define RTC_CTRL_A_RS1 BIT(1) /* Rate-Selection Bit 1 */ +#define RTC_CTRL_A_RS0 BIT(0) /* Rate-Selection Bit 0 */ +#define RTC_CTRL_A_RS_MASK 0x0f /* RS3 + RS2 + RS1 + RS0 */ + +/* + * Bit names in Control Register B. + */ +#define RTC_CTRL_B_SET BIT(7) /* SET Bit */ +#define RTC_CTRL_B_PIE BIT(6) /* Periodic-Interrupt Enable */ +#define RTC_CTRL_B_AIE BIT(5) /* Alarm-Interrupt Enable */ +#define RTC_CTRL_B_UIE BIT(4) /* Update-Ended Interrupt-Enable */ +#define RTC_CTRL_B_SQWE BIT(3) /* Square-Wave Enable */ +#define RTC_CTRL_B_DM BIT(2) /* Data Mode */ +#define RTC_CTRL_B_2412 BIT(1) /* 12-Hr/24-Hr Mode */ +#define RTC_CTRL_B_DSE BIT(0) /* Daylight Savings Enable */ +#define RTC_CTRL_B_PAU_MASK 0x70 /* PIE + AIE + UIE */ + + +/* + * Bit names in Control Register C. + * + * BIT(0), BIT(1), BIT(2), & BIT(3) are unused, always return 0, and cannot + * be written to. + */ +#define RTC_CTRL_C_IRQF BIT(7) /* Interrupt-Request Flag */ +#define RTC_CTRL_C_PF BIT(6) /* Periodic-Interrupt Flag */ +#define RTC_CTRL_C_AF BIT(5) /* Alarm-Interrupt Flag */ +#define RTC_CTRL_C_UF BIT(4) /* Update-Ended Interrupt Flag */ +#define RTC_CTRL_C_PAU_MASK 0x70 /* PF + AF + UF */ + + +/* + * Bit names in Control Register D. + * + * BIT(0) through BIT(6) are unused, always return 0, and cannot + * be written to. + */ +#define RTC_CTRL_D_VRT BIT(7) /* Valid RAM and Time */ + + +/* + * Bit names in Extended Control Register 4A. + * + * On the DS1685/DS1687/DS1689/DS1693, BIT(4) and BIT(5) are reserved for + * future use. They can be read from and written to, but have no effect + * on the RTC's operation. + * + * On the DS17x85/DS17x87, BIT(5) is Burst-Mode Enable (BME), and allows + * access to the extended NV-SRAM by automatically incrementing the address + * register when they are read from or written to. + */ +#define RTC_CTRL_4A_VRT2 BIT(7) /* Auxillary Battery Status */ +#define RTC_CTRL_4A_INCR BIT(6) /* Increment-in-Progress Status */ +#define RTC_CTRL_4A_PAB BIT(3) /* Power-Active Bar Control */ +#define RTC_CTRL_4A_RF BIT(2) /* RAM-Clear Flag */ +#define RTC_CTRL_4A_WF BIT(1) /* Wake-Up Alarm Flag */ +#define RTC_CTRL_4A_KF BIT(0) /* Kickstart Flag */ +#if !defined(CONFIG_RTC_DRV_DS1685) && !defined(CONFIG_RTC_DRV_DS1689) +#define RTC_CTRL_4A_BME BIT(5) /* Burst-Mode Enable */ +#endif +#define RTC_CTRL_4A_RWK_MASK 0x07 /* RF + WF + KF */ + + +/* + * Bit names in Extended Control Register 4B. + */ +#define RTC_CTRL_4B_ABE BIT(7) /* Auxillary Battery Enable */ +#define RTC_CTRL_4B_E32K BIT(6) /* Enable 32.768Hz on SQW Pin */ +#define RTC_CTRL_4B_CS BIT(5) /* Crystal Select */ +#define RTC_CTRL_4B_RCE BIT(4) /* RAM Clear-Enable */ +#define RTC_CTRL_4B_PRS BIT(3) /* PAB Reset-Select */ +#define RTC_CTRL_4B_RIE BIT(2) /* RAM Clear-Interrupt Enable */ +#define RTC_CTRL_4B_WIE BIT(1) /* Wake-Up Alarm-Interrupt Enable */ +#define RTC_CTRL_4B_KSE BIT(0) /* Kickstart Interrupt-Enable */ +#define RTC_CTRL_4B_RWK_MASK 0x07 /* RIE + WIE + KSE */ + + +/* + * Misc register names in Bank 1. + * + * The DV0 bit in Control Register A must be set to 1 for these registers + * to become available, including Extended Control Registers 4A & 4B. + */ +#define RTC_BANK1_SSN_MODEL 0x40 /* Model Number */ +#define RTC_BANK1_SSN_BYTE_1 0x41 /* 1st Byte of Serial Number */ +#define RTC_BANK1_SSN_BYTE_2 0x42 /* 2nd Byte of Serial Number */ +#define RTC_BANK1_SSN_BYTE_3 0x43 /* 3rd Byte of Serial Number */ +#define RTC_BANK1_SSN_BYTE_4 0x44 /* 4th Byte of Serial Number */ +#define RTC_BANK1_SSN_BYTE_5 0x45 /* 5th Byte of Serial Number */ +#define RTC_BANK1_SSN_BYTE_6 0x46 /* 6th Byte of Serial Number */ +#define RTC_BANK1_SSN_CRC 0x47 /* Serial CRC Byte */ +#define RTC_BANK1_RAM_DATA_PORT 0x53 /* Extended RAM Data Port */ + + +/* + * Model-specific registers in Bank 1. + * + * The addresses below differ depending on the model of the RTC chip + * selected in the kernel configuration. Not all of these features are + * supported in the main driver at present. + * + * DS1685/DS1687 - Extended NV-SRAM address (LSB only). + * DS1689/DS1693 - Vcc, Vbat, Pwr Cycle Counters & Customer-specific S/N. + * DS17x85/DS17x87 - Extended NV-SRAM addresses (MSB & LSB) & Write counter. + */ +#if defined(CONFIG_RTC_DRV_DS1685) +#define RTC_BANK1_RAM_ADDR 0x50 /* NV-SRAM Addr */ +#elif defined(CONFIG_RTC_DRV_DS1689) +#define RTC_BANK1_VCC_CTR_LSB 0x54 /* Vcc Counter Addr (LSB) */ +#define RTC_BANK1_VCC_CTR_MSB 0x57 /* Vcc Counter Addr (MSB) */ +#define RTC_BANK1_VBAT_CTR_LSB 0x58 /* Vbat Counter Addr (LSB) */ +#define RTC_BANK1_VBAT_CTR_MSB 0x5b /* Vbat Counter Addr (MSB) */ +#define RTC_BANK1_PWR_CTR_LSB 0x5c /* Pwr Cycle Counter Addr (LSB) */ +#define RTC_BANK1_PWR_CTR_MSB 0x5d /* Pwr Cycle Counter Addr (MSB) */ +#define RTC_BANK1_UNIQ_SN 0x60 /* Customer-specific S/N */ +#else /* DS17x85/DS17x87 */ +#define RTC_BANK1_RAM_ADDR_LSB 0x50 /* NV-SRAM Addr (LSB) */ +#define RTC_BANK1_RAM_ADDR_MSB 0x51 /* NV-SRAM Addr (MSB) */ +#define RTC_BANK1_WRITE_CTR 0x5e /* RTC Write Counter */ +#endif + + +/* + * Model numbers. + * + * The DS1688/DS1691 and DS1689/DS1693 chips share the same model number + * and the manual doesn't indicate any major differences. As such, they + * are regarded as the same chip in this driver. + */ +#define RTC_MODEL_DS1685 0x71 /* DS1685/DS1687 */ +#define RTC_MODEL_DS17285 0x72 /* DS17285/DS17287 */ +#define RTC_MODEL_DS1689 0x73 /* DS1688/DS1691/DS1689/DS1693 */ +#define RTC_MODEL_DS17485 0x74 /* DS17485/DS17487 */ +#define RTC_MODEL_DS17885 0x78 /* DS17885/DS17887 */ + + +/* + * Periodic Interrupt Rates / Square-Wave Output Frequency + * + * Periodic rates are selected by setting the RS3-RS0 bits in Control + * Register A and enabled via either the E32K bit in Extended Control + * Register 4B or the SQWE bit in Control Register B. + * + * E32K overrides the settings of RS3-RS0 and outputs a frequency of 32768Hz + * on the SQW pin of the RTC chip. While there are 16 possible selections, + * the 1-of-16 decoder is only able to divide the base 32768Hz signal into 13 + * smaller frequencies. The values 0x01 and 0x02 are not used and are + * synonymous with 0x08 and 0x09, respectively. + * + * When E32K is set to a logic 1, periodic interrupts are disabled and reading + * /dev/rtc will return -EINVAL. This also applies if the periodic interrupt + * frequency is set to 0Hz. + * + * Not currently used by the rtc-ds1685 driver because the RTC core removed + * support for hardware-generated periodic-interrupts in favour of + * hrtimer-generated interrupts. But these defines are kept around for use + * in userland, as documentation to the hardware, and possible future use if + * hardware-generated periodic interrupts are ever added back. + */ + /* E32K RS3 RS2 RS1 RS0 */ +#define RTC_SQW_8192HZ 0x03 /* 0 0 0 1 1 */ +#define RTC_SQW_4096HZ 0x04 /* 0 0 1 0 0 */ +#define RTC_SQW_2048HZ 0x05 /* 0 0 1 0 1 */ +#define RTC_SQW_1024HZ 0x06 /* 0 0 1 1 0 */ +#define RTC_SQW_512HZ 0x07 /* 0 0 1 1 1 */ +#define RTC_SQW_256HZ 0x08 /* 0 1 0 0 0 */ +#define RTC_SQW_128HZ 0x09 /* 0 1 0 0 1 */ +#define RTC_SQW_64HZ 0x0a /* 0 1 0 1 0 */ +#define RTC_SQW_32HZ 0x0b /* 0 1 0 1 1 */ +#define RTC_SQW_16HZ 0x0c /* 0 1 1 0 0 */ +#define RTC_SQW_8HZ 0x0d /* 0 1 1 0 1 */ +#define RTC_SQW_4HZ 0x0e /* 0 1 1 1 0 */ +#define RTC_SQW_2HZ 0x0f /* 0 1 1 1 1 */ +#define RTC_SQW_0HZ 0x00 /* 0 0 0 0 0 */ +#define RTC_SQW_32768HZ 32768 /* 1 - - - - */ +#define RTC_MAX_USER_FREQ 8192 + + +/* + * NVRAM data & addresses: + * - 50 bytes of NVRAM are available just past the clock registers. + * - 64 additional bytes are available in Bank0. + * + * Extended, battery-backed NV-SRAM: + * - DS1685/DS1687 - 128 bytes. + * - DS1689/DS1693 - 0 bytes. + * - DS17285/DS17287 - 2048 bytes. + * - DS17485/DS17487 - 4096 bytes. + * - DS17885/DS17887 - 8192 bytes. + */ +#define NVRAM_TIME_BASE 0x0e /* NVRAM Addr in Time regs */ +#define NVRAM_BANK0_BASE 0x40 /* NVRAM Addr in Bank0 regs */ +#define NVRAM_SZ_TIME 50 +#define NVRAM_SZ_BANK0 64 +#if defined(CONFIG_RTC_DRV_DS1685) +# define NVRAM_SZ_EXTND 128 +#elif defined(CONFIG_RTC_DRV_DS1689) +# define NVRAM_SZ_EXTND 0 +#elif defined(CONFIG_RTC_DRV_DS17285) +# define NVRAM_SZ_EXTND 2048 +#elif defined(CONFIG_RTC_DRV_DS17485) +# define NVRAM_SZ_EXTND 4096 +#elif defined(CONFIG_RTC_DRV_DS17885) +# define NVRAM_SZ_EXTND 8192 +#endif +#define NVRAM_TOTAL_SZ_BANK0 (NVRAM_SZ_TIME + NVRAM_SZ_BANK0) +#define NVRAM_TOTAL_SZ (NVRAM_TOTAL_SZ_BANK0 + NVRAM_SZ_EXTND) + + +/* + * Function Prototypes. + */ +extern void __noreturn +ds1685_rtc_poweroff(struct platform_device *pdev); + +#endif /* _LINUX_RTC_DS1685_H_ */ diff --git a/include/linux/rtc/m48t59.h b/include/linux/rtc/m48t59.h new file mode 100644 index 000000000..6fc961459 --- /dev/null +++ b/include/linux/rtc/m48t59.h @@ -0,0 +1,64 @@ +/* + * include/linux/rtc/m48t59.h + * + * Definitions for the platform data of m48t59 RTC chip driver. + * + * Copyright (c) 2007 Wind River Systems, Inc. + * + * Mark Zhan + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _LINUX_RTC_M48T59_H_ +#define _LINUX_RTC_M48T59_H_ + +/* + * M48T59 Register Offset + */ +#define M48T59_YEAR 0xf +#define M48T59_MONTH 0xe +#define M48T59_MDAY 0xd /* Day of Month */ +#define M48T59_WDAY 0xc /* Day of Week */ +#define M48T59_WDAY_CB 0x20 /* Century Bit */ +#define M48T59_WDAY_CEB 0x10 /* Century Enable Bit */ +#define M48T59_HOUR 0xb +#define M48T59_MIN 0xa +#define M48T59_SEC 0x9 +#define M48T59_CNTL 0x8 +#define M48T59_CNTL_READ 0x40 +#define M48T59_CNTL_WRITE 0x80 +#define M48T59_WATCHDOG 0x7 +#define M48T59_INTR 0x6 +#define M48T59_INTR_AFE 0x80 /* Alarm Interrupt Enable */ +#define M48T59_INTR_ABE 0x20 +#define M48T59_ALARM_DATE 0x5 +#define M48T59_ALARM_HOUR 0x4 +#define M48T59_ALARM_MIN 0x3 +#define M48T59_ALARM_SEC 0x2 +#define M48T59_UNUSED 0x1 +#define M48T59_FLAGS 0x0 +#define M48T59_FLAGS_WDT 0x80 /* watchdog timer expired */ +#define M48T59_FLAGS_AF 0x40 /* alarm */ +#define M48T59_FLAGS_BF 0x10 /* low battery */ + +#define M48T59RTC_TYPE_M48T59 0 /* to keep compatibility */ +#define M48T59RTC_TYPE_M48T02 1 +#define M48T59RTC_TYPE_M48T08 2 + +struct m48t59_plat_data { + /* The method to access M48T59 registers */ + void (*write_byte)(struct device *dev, u32 ofs, u8 val); + unsigned char (*read_byte)(struct device *dev, u32 ofs); + + int type; /* RTC model */ + + /* ioaddr mapped externally */ + void __iomem *ioaddr; + /* offset to RTC registers, automatically set according to the type */ + unsigned int offset; +}; + +#endif /* _LINUX_RTC_M48T59_H_ */ diff --git a/include/linux/rtc/sirfsoc_rtciobrg.h b/include/linux/rtc/sirfsoc_rtciobrg.h new file mode 100644 index 000000000..aefd99726 --- /dev/null +++ b/include/linux/rtc/sirfsoc_rtciobrg.h @@ -0,0 +1,22 @@ +/* + * RTC I/O Bridge interfaces for CSR SiRFprimaII + * ARM access the registers of SYSRTC, GPSRTC and PWRC through this module + * + * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. + * + * Licensed under GPLv2 or later. + */ +#ifndef _SIRFSOC_RTC_IOBRG_H_ +#define _SIRFSOC_RTC_IOBRG_H_ + +struct regmap_config; + +extern void sirfsoc_rtc_iobrg_besyncing(void); + +extern u32 sirfsoc_rtc_iobrg_readl(u32 addr); + +extern void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr); +struct regmap *devm_regmap_init_iobg(struct device *dev, + const struct regmap_config *config); + +#endif diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h new file mode 100644 index 000000000..6fd615a0e --- /dev/null +++ b/include/linux/rtmutex.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * RT Mutexes: blocking mutual exclusion locks with PI support + * + * started by Ingo Molnar and Thomas Gleixner: + * + * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2006, Timesys Corp., Thomas Gleixner + * + * This file contains the public data structure and API definitions. + */ + +#ifndef __LINUX_RT_MUTEX_H +#define __LINUX_RT_MUTEX_H + +#include +#include +#include + +extern int max_lock_depth; /* for sysctl */ + +/** + * The rt_mutex structure + * + * @wait_lock: spinlock to protect the structure + * @waiters: rbtree root to enqueue waiters in priority order; + * caches top-waiter (leftmost node). + * @owner: the mutex owner + */ +struct rt_mutex { + raw_spinlock_t wait_lock; + struct rb_root_cached waiters; + struct task_struct *owner; +#ifdef CONFIG_DEBUG_RT_MUTEXES + int save_state; + const char *name, *file; + int line; + void *magic; +#endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +struct rt_mutex_waiter; +struct hrtimer_sleeper; + +#ifdef CONFIG_DEBUG_RT_MUTEXES + extern int rt_mutex_debug_check_no_locks_freed(const void *from, + unsigned long len); + extern void rt_mutex_debug_check_no_locks_held(struct task_struct *task); +#else + static inline int rt_mutex_debug_check_no_locks_freed(const void *from, + unsigned long len) + { + return 0; + } +# define rt_mutex_debug_check_no_locks_held(task) do { } while (0) +#endif + +#ifdef CONFIG_DEBUG_RT_MUTEXES +# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ + , .name = #mutexname, .file = __FILE__, .line = __LINE__ + +# define rt_mutex_init(mutex) \ +do { \ + static struct lock_class_key __key; \ + __rt_mutex_init(mutex, __func__, &__key); \ +} while (0) + + extern void rt_mutex_debug_task_free(struct task_struct *tsk); +#else +# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) +# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL, NULL) +# define rt_mutex_debug_task_free(t) do { } while (0) +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \ + , .dep_map = { .name = #mutexname } +#else +#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) +#endif + +#define __RT_MUTEX_INITIALIZER(mutexname) \ + { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ + , .waiters = RB_ROOT_CACHED \ + , .owner = NULL \ + __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ + __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)} + +#define DEFINE_RT_MUTEX(mutexname) \ + struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname) + +/** + * rt_mutex_is_locked - is the mutex locked + * @lock: the mutex to be queried + * + * Returns 1 if the mutex is locked, 0 if unlocked. + */ +static inline int rt_mutex_is_locked(struct rt_mutex *lock) +{ + return lock->owner != NULL; +} + +extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key); +extern void rt_mutex_destroy(struct rt_mutex *lock); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass); +#define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0) +#else +extern void rt_mutex_lock(struct rt_mutex *lock); +#define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock) +#endif + +extern int rt_mutex_lock_interruptible(struct rt_mutex *lock); +extern int rt_mutex_timed_lock(struct rt_mutex *lock, + struct hrtimer_sleeper *timeout); + +extern int rt_mutex_trylock(struct rt_mutex *lock); + +extern void rt_mutex_unlock(struct rt_mutex *lock); + +#endif diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h new file mode 100644 index 000000000..bb9cb8411 --- /dev/null +++ b/include/linux/rtnetlink.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_RTNETLINK_H +#define __LINUX_RTNETLINK_H + + +#include +#include +#include +#include +#include + +extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo); +extern int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid); +extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, + u32 group, struct nlmsghdr *nlh, gfp_t flags); +extern void rtnl_set_sk_err(struct net *net, u32 group, int error); +extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics); +extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, + u32 id, long expires, u32 error); + +void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags); +void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change, + gfp_t flags, int *new_nsid, int new_ifindex); +struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, + unsigned change, u32 event, + gfp_t flags, int *new_nsid, + int new_ifindex); +void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, + gfp_t flags); + + +/* RTNL is used as a global lock for all changes to network configuration */ +extern void rtnl_lock(void); +extern void rtnl_unlock(void); +extern int rtnl_trylock(void); +extern int rtnl_is_locked(void); +extern int rtnl_lock_killable(void); +extern bool refcount_dec_and_rtnl_lock(refcount_t *r); + +extern wait_queue_head_t netdev_unregistering_wq; +extern struct rw_semaphore pernet_ops_rwsem; +extern struct rw_semaphore net_rwsem; + +#ifdef CONFIG_PROVE_LOCKING +extern bool lockdep_rtnl_is_held(void); +#else +static inline bool lockdep_rtnl_is_held(void) +{ + return true; +} +#endif /* #ifdef CONFIG_PROVE_LOCKING */ + +/** + * rcu_dereference_rtnl - rcu_dereference with debug checking + * @p: The pointer to read, prior to dereferencing + * + * Do an rcu_dereference(p), but check caller either holds rcu_read_lock() + * or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference() + */ +#define rcu_dereference_rtnl(p) \ + rcu_dereference_check(p, lockdep_rtnl_is_held()) + +/** + * rcu_dereference_bh_rtnl - rcu_dereference_bh with debug checking + * @p: The pointer to read, prior to dereference + * + * Do an rcu_dereference_bh(p), but check caller either holds rcu_read_lock_bh() + * or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference_bh() + */ +#define rcu_dereference_bh_rtnl(p) \ + rcu_dereference_bh_check(p, lockdep_rtnl_is_held()) + +/** + * rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL + * @p: The pointer to read, prior to dereferencing + * + * Return the value of the specified RCU-protected pointer, but omit + * the READ_ONCE(), because caller holds RTNL. + */ +#define rtnl_dereference(p) \ + rcu_dereference_protected(p, lockdep_rtnl_is_held()) + +static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev) +{ + return rtnl_dereference(dev->ingress_queue); +} + +static inline struct netdev_queue *dev_ingress_queue_rcu(struct net_device *dev) +{ + return rcu_dereference(dev->ingress_queue); +} + +struct netdev_queue *dev_ingress_queue_create(struct net_device *dev); + +#ifdef CONFIG_NET_INGRESS +void net_inc_ingress_queue(void); +void net_dec_ingress_queue(void); +#endif + +#ifdef CONFIG_NET_EGRESS +void net_inc_egress_queue(void); +void net_dec_egress_queue(void); +#endif + +void rtnetlink_init(void); +void __rtnl_unlock(void); +void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail); + +#define ASSERT_RTNL() \ + WARN_ONCE(!rtnl_is_locked(), \ + "RTNL: assertion failed at %s (%d)\n", __FILE__, __LINE__) + +extern int ndo_dflt_fdb_dump(struct sk_buff *skb, + struct netlink_callback *cb, + struct net_device *dev, + struct net_device *filter_dev, + int *idx); +extern int ndo_dflt_fdb_add(struct ndmsg *ndm, + struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, + u16 vid, + u16 flags); +extern int ndo_dflt_fdb_del(struct ndmsg *ndm, + struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, + u16 vid); + +extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, u16 mode, + u32 flags, u32 mask, int nlflags, + u32 filter_mask, + int (*vlan_fill)(struct sk_buff *skb, + struct net_device *dev, + u32 filter_mask)); +#endif /* __LINUX_RTNETLINK_H */ diff --git a/include/linux/rtsx_common.h b/include/linux/rtsx_common.h new file mode 100644 index 000000000..443176ee1 --- /dev/null +++ b/include/linux/rtsx_common.h @@ -0,0 +1,50 @@ +/* Driver for Realtek driver-based card reader + * + * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + * Author: + * Wei WANG + */ + +#ifndef __RTSX_COMMON_H +#define __RTSX_COMMON_H + +#define DRV_NAME_RTSX_PCI "rtsx_pci" +#define DRV_NAME_RTSX_PCI_SDMMC "rtsx_pci_sdmmc" +#define DRV_NAME_RTSX_PCI_MS "rtsx_pci_ms" + +#define RTSX_REG_PAIR(addr, val) (((u32)(addr) << 16) | (u8)(val)) + +#define RTSX_SSC_DEPTH_4M 0x01 +#define RTSX_SSC_DEPTH_2M 0x02 +#define RTSX_SSC_DEPTH_1M 0x03 +#define RTSX_SSC_DEPTH_500K 0x04 +#define RTSX_SSC_DEPTH_250K 0x05 + +#define RTSX_SD_CARD 0 +#define RTSX_MS_CARD 1 + +#define CLK_TO_DIV_N 0 +#define DIV_N_TO_CLK 1 + +struct platform_device; + +struct rtsx_slot { + struct platform_device *p_dev; + void (*card_event)(struct platform_device *p_dev); +}; + +#endif diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h new file mode 100644 index 000000000..e964bbd03 --- /dev/null +++ b/include/linux/rtsx_pci.h @@ -0,0 +1,1367 @@ +/* Driver for Realtek PCI-Express card reader + * + * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + * Author: + * Wei WANG + */ + +#ifndef __RTSX_PCI_H +#define __RTSX_PCI_H + +#include +#include +#include + +#define MAX_RW_REG_CNT 1024 + +#define RTSX_HCBAR 0x00 +#define RTSX_HCBCTLR 0x04 +#define STOP_CMD (0x01 << 28) +#define READ_REG_CMD 0 +#define WRITE_REG_CMD 1 +#define CHECK_REG_CMD 2 + +#define RTSX_HDBAR 0x08 +#define RTSX_SG_INT 0x04 +#define RTSX_SG_END 0x02 +#define RTSX_SG_VALID 0x01 +#define RTSX_SG_NO_OP 0x00 +#define RTSX_SG_TRANS_DATA (0x02 << 4) +#define RTSX_SG_LINK_DESC (0x03 << 4) +#define RTSX_HDBCTLR 0x0C +#define SDMA_MODE 0x00 +#define ADMA_MODE (0x02 << 26) +#define STOP_DMA (0x01 << 28) +#define TRIG_DMA (0x01 << 31) + +#define RTSX_HAIMR 0x10 +#define HAIMR_TRANS_START (0x01 << 31) +#define HAIMR_READ 0x00 +#define HAIMR_WRITE (0x01 << 30) +#define HAIMR_READ_START (HAIMR_TRANS_START | HAIMR_READ) +#define HAIMR_WRITE_START (HAIMR_TRANS_START | HAIMR_WRITE) +#define HAIMR_TRANS_END (HAIMR_TRANS_START) + +#define RTSX_BIPR 0x14 +#define CMD_DONE_INT (1 << 31) +#define DATA_DONE_INT (1 << 30) +#define TRANS_OK_INT (1 << 29) +#define TRANS_FAIL_INT (1 << 28) +#define XD_INT (1 << 27) +#define MS_INT (1 << 26) +#define SD_INT (1 << 25) +#define GPIO0_INT (1 << 24) +#define OC_INT (1 << 23) +#define SD_WRITE_PROTECT (1 << 19) +#define XD_EXIST (1 << 18) +#define MS_EXIST (1 << 17) +#define SD_EXIST (1 << 16) +#define DELINK_INT GPIO0_INT +#define MS_OC_INT (1 << 23) +#define SD_OC_INT (1 << 22) + +#define CARD_INT (XD_INT | MS_INT | SD_INT) +#define NEED_COMPLETE_INT (DATA_DONE_INT | TRANS_OK_INT | TRANS_FAIL_INT) +#define RTSX_INT (CMD_DONE_INT | NEED_COMPLETE_INT | \ + CARD_INT | GPIO0_INT | OC_INT) +#define CARD_EXIST (XD_EXIST | MS_EXIST | SD_EXIST) + +#define RTSX_BIER 0x18 +#define CMD_DONE_INT_EN (1 << 31) +#define DATA_DONE_INT_EN (1 << 30) +#define TRANS_OK_INT_EN (1 << 29) +#define TRANS_FAIL_INT_EN (1 << 28) +#define XD_INT_EN (1 << 27) +#define MS_INT_EN (1 << 26) +#define SD_INT_EN (1 << 25) +#define GPIO0_INT_EN (1 << 24) +#define OC_INT_EN (1 << 23) +#define DELINK_INT_EN GPIO0_INT_EN +#define MS_OC_INT_EN (1 << 23) +#define SD_OC_INT_EN (1 << 22) + + +/* + * macros for easy use + */ +#define rtsx_pci_writel(pcr, reg, value) \ + iowrite32(value, (pcr)->remap_addr + reg) +#define rtsx_pci_readl(pcr, reg) \ + ioread32((pcr)->remap_addr + reg) +#define rtsx_pci_writew(pcr, reg, value) \ + iowrite16(value, (pcr)->remap_addr + reg) +#define rtsx_pci_readw(pcr, reg) \ + ioread16((pcr)->remap_addr + reg) +#define rtsx_pci_writeb(pcr, reg, value) \ + iowrite8(value, (pcr)->remap_addr + reg) +#define rtsx_pci_readb(pcr, reg) \ + ioread8((pcr)->remap_addr + reg) + +#define rtsx_pci_read_config_byte(pcr, where, val) \ + pci_read_config_byte((pcr)->pci, where, val) + +#define rtsx_pci_write_config_byte(pcr, where, val) \ + pci_write_config_byte((pcr)->pci, where, val) + +#define rtsx_pci_read_config_dword(pcr, where, val) \ + pci_read_config_dword((pcr)->pci, where, val) + +#define rtsx_pci_write_config_dword(pcr, where, val) \ + pci_write_config_dword((pcr)->pci, where, val) + +#define STATE_TRANS_NONE 0 +#define STATE_TRANS_CMD 1 +#define STATE_TRANS_BUF 2 +#define STATE_TRANS_SG 3 + +#define TRANS_NOT_READY 0 +#define TRANS_RESULT_OK 1 +#define TRANS_RESULT_FAIL 2 +#define TRANS_NO_DEVICE 3 + +#define RTSX_RESV_BUF_LEN 4096 +#define HOST_CMDS_BUF_LEN 1024 +#define HOST_SG_TBL_BUF_LEN (RTSX_RESV_BUF_LEN - HOST_CMDS_BUF_LEN) +#define HOST_SG_TBL_ITEMS (HOST_SG_TBL_BUF_LEN / 8) +#define MAX_SG_ITEM_LEN 0x80000 +#define HOST_TO_DEVICE 0 +#define DEVICE_TO_HOST 1 + +#define OUTPUT_3V3 0 +#define OUTPUT_1V8 1 + +#define RTSX_PHASE_MAX 32 +#define RX_TUNING_CNT 3 + +#define MS_CFG 0xFD40 +#define SAMPLE_TIME_RISING 0x00 +#define SAMPLE_TIME_FALLING 0x80 +#define PUSH_TIME_DEFAULT 0x00 +#define PUSH_TIME_ODD 0x40 +#define NO_EXTEND_TOGGLE 0x00 +#define EXTEND_TOGGLE_CHK 0x20 +#define MS_BUS_WIDTH_1 0x00 +#define MS_BUS_WIDTH_4 0x10 +#define MS_BUS_WIDTH_8 0x18 +#define MS_2K_SECTOR_MODE 0x04 +#define MS_512_SECTOR_MODE 0x00 +#define MS_TOGGLE_TIMEOUT_EN 0x00 +#define MS_TOGGLE_TIMEOUT_DISEN 0x01 +#define MS_NO_CHECK_INT 0x02 +#define MS_TPC 0xFD41 +#define MS_TRANS_CFG 0xFD42 +#define WAIT_INT 0x80 +#define NO_WAIT_INT 0x00 +#define NO_AUTO_READ_INT_REG 0x00 +#define AUTO_READ_INT_REG 0x40 +#define MS_CRC16_ERR 0x20 +#define MS_RDY_TIMEOUT 0x10 +#define MS_INT_CMDNK 0x08 +#define MS_INT_BREQ 0x04 +#define MS_INT_ERR 0x02 +#define MS_INT_CED 0x01 +#define MS_TRANSFER 0xFD43 +#define MS_TRANSFER_START 0x80 +#define MS_TRANSFER_END 0x40 +#define MS_TRANSFER_ERR 0x20 +#define MS_BS_STATE 0x10 +#define MS_TM_READ_BYTES 0x00 +#define MS_TM_NORMAL_READ 0x01 +#define MS_TM_WRITE_BYTES 0x04 +#define MS_TM_NORMAL_WRITE 0x05 +#define MS_TM_AUTO_READ 0x08 +#define MS_TM_AUTO_WRITE 0x0C +#define MS_INT_REG 0xFD44 +#define MS_BYTE_CNT 0xFD45 +#define MS_SECTOR_CNT_L 0xFD46 +#define MS_SECTOR_CNT_H 0xFD47 +#define MS_DBUS_H 0xFD48 + +#define SD_CFG1 0xFDA0 +#define SD_CLK_DIVIDE_0 0x00 +#define SD_CLK_DIVIDE_256 0xC0 +#define SD_CLK_DIVIDE_128 0x80 +#define SD_BUS_WIDTH_1BIT 0x00 +#define SD_BUS_WIDTH_4BIT 0x01 +#define SD_BUS_WIDTH_8BIT 0x02 +#define SD_ASYNC_FIFO_NOT_RST 0x10 +#define SD_20_MODE 0x00 +#define SD_DDR_MODE 0x04 +#define SD_30_MODE 0x08 +#define SD_CLK_DIVIDE_MASK 0xC0 +#define SD_MODE_SELECT_MASK 0x0C +#define SD_CFG2 0xFDA1 +#define SD_CALCULATE_CRC7 0x00 +#define SD_NO_CALCULATE_CRC7 0x80 +#define SD_CHECK_CRC16 0x00 +#define SD_NO_CHECK_CRC16 0x40 +#define SD_NO_CHECK_WAIT_CRC_TO 0x20 +#define SD_WAIT_BUSY_END 0x08 +#define SD_NO_WAIT_BUSY_END 0x00 +#define SD_CHECK_CRC7 0x00 +#define SD_NO_CHECK_CRC7 0x04 +#define SD_RSP_LEN_0 0x00 +#define SD_RSP_LEN_6 0x01 +#define SD_RSP_LEN_17 0x02 +#define SD_RSP_TYPE_R0 0x04 +#define SD_RSP_TYPE_R1 0x01 +#define SD_RSP_TYPE_R1b 0x09 +#define SD_RSP_TYPE_R2 0x02 +#define SD_RSP_TYPE_R3 0x05 +#define SD_RSP_TYPE_R4 0x05 +#define SD_RSP_TYPE_R5 0x01 +#define SD_RSP_TYPE_R6 0x01 +#define SD_RSP_TYPE_R7 0x01 +#define SD_CFG3 0xFDA2 +#define SD30_CLK_END_EN 0x10 +#define SD_RSP_80CLK_TIMEOUT_EN 0x01 + +#define SD_STAT1 0xFDA3 +#define SD_CRC7_ERR 0x80 +#define SD_CRC16_ERR 0x40 +#define SD_CRC_WRITE_ERR 0x20 +#define SD_CRC_WRITE_ERR_MASK 0x1C +#define GET_CRC_TIME_OUT 0x02 +#define SD_TUNING_COMPARE_ERR 0x01 +#define SD_STAT2 0xFDA4 +#define SD_RSP_80CLK_TIMEOUT 0x01 + +#define SD_BUS_STAT 0xFDA5 +#define SD_CLK_TOGGLE_EN 0x80 +#define SD_CLK_FORCE_STOP 0x40 +#define SD_DAT3_STATUS 0x10 +#define SD_DAT2_STATUS 0x08 +#define SD_DAT1_STATUS 0x04 +#define SD_DAT0_STATUS 0x02 +#define SD_CMD_STATUS 0x01 +#define SD_PAD_CTL 0xFDA6 +#define SD_IO_USING_1V8 0x80 +#define SD_IO_USING_3V3 0x7F +#define TYPE_A_DRIVING 0x00 +#define TYPE_B_DRIVING 0x01 +#define TYPE_C_DRIVING 0x02 +#define TYPE_D_DRIVING 0x03 +#define SD_SAMPLE_POINT_CTL 0xFDA7 +#define DDR_FIX_RX_DAT 0x00 +#define DDR_VAR_RX_DAT 0x80 +#define DDR_FIX_RX_DAT_EDGE 0x00 +#define DDR_FIX_RX_DAT_14_DELAY 0x40 +#define DDR_FIX_RX_CMD 0x00 +#define DDR_VAR_RX_CMD 0x20 +#define DDR_FIX_RX_CMD_POS_EDGE 0x00 +#define DDR_FIX_RX_CMD_14_DELAY 0x10 +#define SD20_RX_POS_EDGE 0x00 +#define SD20_RX_14_DELAY 0x08 +#define SD20_RX_SEL_MASK 0x08 +#define SD_PUSH_POINT_CTL 0xFDA8 +#define DDR_FIX_TX_CMD_DAT 0x00 +#define DDR_VAR_TX_CMD_DAT 0x80 +#define DDR_FIX_TX_DAT_14_TSU 0x00 +#define DDR_FIX_TX_DAT_12_TSU 0x40 +#define DDR_FIX_TX_CMD_NEG_EDGE 0x00 +#define DDR_FIX_TX_CMD_14_AHEAD 0x20 +#define SD20_TX_NEG_EDGE 0x00 +#define SD20_TX_14_AHEAD 0x10 +#define SD20_TX_SEL_MASK 0x10 +#define DDR_VAR_SDCLK_POL_SWAP 0x01 +#define SD_CMD0 0xFDA9 +#define SD_CMD_START 0x40 +#define SD_CMD1 0xFDAA +#define SD_CMD2 0xFDAB +#define SD_CMD3 0xFDAC +#define SD_CMD4 0xFDAD +#define SD_CMD5 0xFDAE +#define SD_BYTE_CNT_L 0xFDAF +#define SD_BYTE_CNT_H 0xFDB0 +#define SD_BLOCK_CNT_L 0xFDB1 +#define SD_BLOCK_CNT_H 0xFDB2 +#define SD_TRANSFER 0xFDB3 +#define SD_TRANSFER_START 0x80 +#define SD_TRANSFER_END 0x40 +#define SD_STAT_IDLE 0x20 +#define SD_TRANSFER_ERR 0x10 +#define SD_TM_NORMAL_WRITE 0x00 +#define SD_TM_AUTO_WRITE_3 0x01 +#define SD_TM_AUTO_WRITE_4 0x02 +#define SD_TM_AUTO_READ_3 0x05 +#define SD_TM_AUTO_READ_4 0x06 +#define SD_TM_CMD_RSP 0x08 +#define SD_TM_AUTO_WRITE_1 0x09 +#define SD_TM_AUTO_WRITE_2 0x0A +#define SD_TM_NORMAL_READ 0x0C +#define SD_TM_AUTO_READ_1 0x0D +#define SD_TM_AUTO_READ_2 0x0E +#define SD_TM_AUTO_TUNING 0x0F +#define SD_CMD_STATE 0xFDB5 +#define SD_CMD_IDLE 0x80 + +#define SD_DATA_STATE 0xFDB6 +#define SD_DATA_IDLE 0x80 +#define REG_SD_STOP_SDCLK_CFG 0xFDB8 +#define SD30_CLK_STOP_CFG_EN 0x04 +#define SD30_CLK_STOP_CFG1 0x02 +#define SD30_CLK_STOP_CFG0 0x01 +#define REG_PRE_RW_MODE 0xFD70 +#define EN_INFINITE_MODE 0x01 + +#define SRCTL 0xFC13 + +#define DCM_DRP_CTL 0xFC23 +#define DCM_RESET 0x08 +#define DCM_LOCKED 0x04 +#define DCM_208M 0x00 +#define DCM_TX 0x01 +#define DCM_RX 0x02 +#define DCM_DRP_TRIG 0xFC24 +#define DRP_START 0x80 +#define DRP_DONE 0x40 +#define DCM_DRP_CFG 0xFC25 +#define DRP_WRITE 0x80 +#define DRP_READ 0x00 +#define DCM_WRITE_ADDRESS_50 0x50 +#define DCM_WRITE_ADDRESS_51 0x51 +#define DCM_READ_ADDRESS_00 0x00 +#define DCM_READ_ADDRESS_51 0x51 +#define DCM_DRP_WR_DATA_L 0xFC26 +#define DCM_DRP_WR_DATA_H 0xFC27 +#define DCM_DRP_RD_DATA_L 0xFC28 +#define DCM_DRP_RD_DATA_H 0xFC29 +#define SD_VPCLK0_CTL 0xFC2A +#define SD_VPCLK1_CTL 0xFC2B +#define PHASE_SELECT_MASK 0x1F +#define SD_DCMPS0_CTL 0xFC2C +#define SD_DCMPS1_CTL 0xFC2D +#define SD_VPTX_CTL SD_VPCLK0_CTL +#define SD_VPRX_CTL SD_VPCLK1_CTL +#define PHASE_CHANGE 0x80 +#define PHASE_NOT_RESET 0x40 +#define SD_DCMPS_TX_CTL SD_DCMPS0_CTL +#define SD_DCMPS_RX_CTL SD_DCMPS1_CTL +#define DCMPS_CHANGE 0x80 +#define DCMPS_CHANGE_DONE 0x40 +#define DCMPS_ERROR 0x20 +#define DCMPS_CURRENT_PHASE 0x1F +#define CARD_CLK_SOURCE 0xFC2E +#define CRC_FIX_CLK (0x00 << 0) +#define CRC_VAR_CLK0 (0x01 << 0) +#define CRC_VAR_CLK1 (0x02 << 0) +#define SD30_FIX_CLK (0x00 << 2) +#define SD30_VAR_CLK0 (0x01 << 2) +#define SD30_VAR_CLK1 (0x02 << 2) +#define SAMPLE_FIX_CLK (0x00 << 4) +#define SAMPLE_VAR_CLK0 (0x01 << 4) +#define SAMPLE_VAR_CLK1 (0x02 << 4) +#define CARD_PWR_CTL 0xFD50 +#define PMOS_STRG_MASK 0x10 +#define PMOS_STRG_800mA 0x10 +#define PMOS_STRG_400mA 0x00 +#define SD_POWER_OFF 0x03 +#define SD_PARTIAL_POWER_ON 0x01 +#define SD_POWER_ON 0x00 +#define SD_POWER_MASK 0x03 +#define MS_POWER_OFF 0x0C +#define MS_PARTIAL_POWER_ON 0x04 +#define MS_POWER_ON 0x00 +#define MS_POWER_MASK 0x0C +#define BPP_POWER_OFF 0x0F +#define BPP_POWER_5_PERCENT_ON 0x0E +#define BPP_POWER_10_PERCENT_ON 0x0C +#define BPP_POWER_15_PERCENT_ON 0x08 +#define BPP_POWER_ON 0x00 +#define BPP_POWER_MASK 0x0F +#define SD_VCC_PARTIAL_POWER_ON 0x02 +#define SD_VCC_POWER_ON 0x00 +#define CARD_CLK_SWITCH 0xFD51 +#define RTL8411B_PACKAGE_MODE 0xFD51 +#define CARD_SHARE_MODE 0xFD52 +#define CARD_SHARE_MASK 0x0F +#define CARD_SHARE_MULTI_LUN 0x00 +#define CARD_SHARE_NORMAL 0x00 +#define CARD_SHARE_48_SD 0x04 +#define CARD_SHARE_48_MS 0x08 +#define CARD_SHARE_BAROSSA_SD 0x01 +#define CARD_SHARE_BAROSSA_MS 0x02 +#define CARD_DRIVE_SEL 0xFD53 +#define MS_DRIVE_8mA (0x01 << 6) +#define MMC_DRIVE_8mA (0x01 << 4) +#define XD_DRIVE_8mA (0x01 << 2) +#define GPIO_DRIVE_8mA 0x01 +#define RTS5209_CARD_DRIVE_DEFAULT (MS_DRIVE_8mA | MMC_DRIVE_8mA |\ + XD_DRIVE_8mA | GPIO_DRIVE_8mA) +#define RTL8411_CARD_DRIVE_DEFAULT (MS_DRIVE_8mA | MMC_DRIVE_8mA |\ + XD_DRIVE_8mA) +#define RTSX_CARD_DRIVE_DEFAULT (MS_DRIVE_8mA | GPIO_DRIVE_8mA) + +#define CARD_STOP 0xFD54 +#define SPI_STOP 0x01 +#define XD_STOP 0x02 +#define SD_STOP 0x04 +#define MS_STOP 0x08 +#define SPI_CLR_ERR 0x10 +#define XD_CLR_ERR 0x20 +#define SD_CLR_ERR 0x40 +#define MS_CLR_ERR 0x80 +#define CARD_OE 0xFD55 +#define SD_OUTPUT_EN 0x04 +#define MS_OUTPUT_EN 0x08 +#define CARD_AUTO_BLINK 0xFD56 +#define CARD_GPIO_DIR 0xFD57 +#define CARD_GPIO 0xFD58 +#define CARD_DATA_SOURCE 0xFD5B +#define PINGPONG_BUFFER 0x01 +#define RING_BUFFER 0x00 +#define SD30_CLK_DRIVE_SEL 0xFD5A +#define DRIVER_TYPE_A 0x05 +#define DRIVER_TYPE_B 0x03 +#define DRIVER_TYPE_C 0x02 +#define DRIVER_TYPE_D 0x01 +#define CARD_SELECT 0xFD5C +#define SD_MOD_SEL 2 +#define MS_MOD_SEL 3 +#define SD30_DRIVE_SEL 0xFD5E +#define CFG_DRIVER_TYPE_A 0x02 +#define CFG_DRIVER_TYPE_B 0x03 +#define CFG_DRIVER_TYPE_C 0x01 +#define CFG_DRIVER_TYPE_D 0x00 +#define SD30_CMD_DRIVE_SEL 0xFD5E +#define SD30_DAT_DRIVE_SEL 0xFD5F +#define CARD_CLK_EN 0xFD69 +#define SD_CLK_EN 0x04 +#define MS_CLK_EN 0x08 +#define SD40_CLK_EN 0x10 +#define SDIO_CTRL 0xFD6B +#define CD_PAD_CTL 0xFD73 +#define CD_DISABLE_MASK 0x07 +#define MS_CD_DISABLE 0x04 +#define SD_CD_DISABLE 0x02 +#define XD_CD_DISABLE 0x01 +#define CD_DISABLE 0x07 +#define CD_ENABLE 0x00 +#define MS_CD_EN_ONLY 0x03 +#define SD_CD_EN_ONLY 0x05 +#define XD_CD_EN_ONLY 0x06 +#define FORCE_CD_LOW_MASK 0x38 +#define FORCE_CD_XD_LOW 0x08 +#define FORCE_CD_SD_LOW 0x10 +#define FORCE_CD_MS_LOW 0x20 +#define CD_AUTO_DISABLE 0x40 +#define FPDCTL 0xFC00 +#define SSC_POWER_DOWN 0x01 +#define SD_OC_POWER_DOWN 0x02 +#define ALL_POWER_DOWN 0x03 +#define OC_POWER_DOWN 0x02 +#define PDINFO 0xFC01 + +#define CLK_CTL 0xFC02 +#define CHANGE_CLK 0x01 +#define CLK_LOW_FREQ 0x01 + +#define CLK_DIV 0xFC03 +#define CLK_DIV_1 0x01 +#define CLK_DIV_2 0x02 +#define CLK_DIV_4 0x03 +#define CLK_DIV_8 0x04 +#define CLK_SEL 0xFC04 + +#define SSC_DIV_N_0 0xFC0F +#define SSC_DIV_N_1 0xFC10 +#define SSC_CTL1 0xFC11 +#define SSC_RSTB 0x80 +#define SSC_8X_EN 0x40 +#define SSC_FIX_FRAC 0x20 +#define SSC_SEL_1M 0x00 +#define SSC_SEL_2M 0x08 +#define SSC_SEL_4M 0x10 +#define SSC_SEL_8M 0x18 +#define SSC_CTL2 0xFC12 +#define SSC_DEPTH_MASK 0x07 +#define SSC_DEPTH_DISALBE 0x00 +#define SSC_DEPTH_4M 0x01 +#define SSC_DEPTH_2M 0x02 +#define SSC_DEPTH_1M 0x03 +#define SSC_DEPTH_500K 0x04 +#define SSC_DEPTH_250K 0x05 +#define RCCTL 0xFC14 + +#define FPGA_PULL_CTL 0xFC1D +#define OLT_LED_CTL 0xFC1E +#define LED_SHINE_MASK 0x08 +#define LED_SHINE_EN 0x08 +#define LED_SHINE_DISABLE 0x00 +#define GPIO_CTL 0xFC1F + +#define LDO_CTL 0xFC1E +#define BPP_ASIC_1V7 0x00 +#define BPP_ASIC_1V8 0x01 +#define BPP_ASIC_1V9 0x02 +#define BPP_ASIC_2V0 0x03 +#define BPP_ASIC_2V7 0x04 +#define BPP_ASIC_2V8 0x05 +#define BPP_ASIC_3V2 0x06 +#define BPP_ASIC_3V3 0x07 +#define BPP_REG_TUNED18 0x07 +#define BPP_TUNED18_SHIFT_8402 5 +#define BPP_TUNED18_SHIFT_8411 4 +#define BPP_PAD_MASK 0x04 +#define BPP_PAD_3V3 0x04 +#define BPP_PAD_1V8 0x00 +#define BPP_LDO_POWB 0x03 +#define BPP_LDO_ON 0x00 +#define BPP_LDO_SUSPEND 0x02 +#define BPP_LDO_OFF 0x03 +#define EFUSE_CTL 0xFC30 +#define EFUSE_ADD 0xFC31 +#define SYS_VER 0xFC32 +#define EFUSE_DATAL 0xFC34 +#define EFUSE_DATAH 0xFC35 + +#define CARD_PULL_CTL1 0xFD60 +#define CARD_PULL_CTL2 0xFD61 +#define CARD_PULL_CTL3 0xFD62 +#define CARD_PULL_CTL4 0xFD63 +#define CARD_PULL_CTL5 0xFD64 +#define CARD_PULL_CTL6 0xFD65 + +/* PCI Express Related Registers */ +#define IRQEN0 0xFE20 +#define IRQSTAT0 0xFE21 +#define DMA_DONE_INT 0x80 +#define SUSPEND_INT 0x40 +#define LINK_RDY_INT 0x20 +#define LINK_DOWN_INT 0x10 +#define IRQEN1 0xFE22 +#define IRQSTAT1 0xFE23 +#define TLPRIEN 0xFE24 +#define TLPRISTAT 0xFE25 +#define TLPTIEN 0xFE26 +#define TLPTISTAT 0xFE27 +#define DMATC0 0xFE28 +#define DMATC1 0xFE29 +#define DMATC2 0xFE2A +#define DMATC3 0xFE2B +#define DMACTL 0xFE2C +#define DMA_RST 0x80 +#define DMA_BUSY 0x04 +#define DMA_DIR_TO_CARD 0x00 +#define DMA_DIR_FROM_CARD 0x02 +#define DMA_EN 0x01 +#define DMA_128 (0 << 4) +#define DMA_256 (1 << 4) +#define DMA_512 (2 << 4) +#define DMA_1024 (3 << 4) +#define DMA_PACK_SIZE_MASK 0x30 +#define BCTL 0xFE2D +#define RBBC0 0xFE2E +#define RBBC1 0xFE2F +#define RBDAT 0xFE30 +#define RBCTL 0xFE34 +#define U_AUTO_DMA_EN_MASK 0x20 +#define U_AUTO_DMA_DISABLE 0x00 +#define RB_FLUSH 0x80 +#define CFGADDR0 0xFE35 +#define CFGADDR1 0xFE36 +#define CFGDATA0 0xFE37 +#define CFGDATA1 0xFE38 +#define CFGDATA2 0xFE39 +#define CFGDATA3 0xFE3A +#define CFGRWCTL 0xFE3B +#define PHYRWCTL 0xFE3C +#define PHYDATA0 0xFE3D +#define PHYDATA1 0xFE3E +#define PHYADDR 0xFE3F +#define MSGRXDATA0 0xFE40 +#define MSGRXDATA1 0xFE41 +#define MSGRXDATA2 0xFE42 +#define MSGRXDATA3 0xFE43 +#define MSGTXDATA0 0xFE44 +#define MSGTXDATA1 0xFE45 +#define MSGTXDATA2 0xFE46 +#define MSGTXDATA3 0xFE47 +#define MSGTXCTL 0xFE48 +#define LTR_CTL 0xFE4A +#define LTR_TX_EN_MASK BIT(7) +#define LTR_TX_EN_1 BIT(7) +#define LTR_TX_EN_0 0 +#define LTR_LATENCY_MODE_MASK BIT(6) +#define LTR_LATENCY_MODE_HW 0 +#define LTR_LATENCY_MODE_SW BIT(6) +#define OBFF_CFG 0xFE4C +#define OBFF_EN_MASK 0x03 +#define OBFF_DISABLE 0x00 + +#define CDRESUMECTL 0xFE52 +#define WAKE_SEL_CTL 0xFE54 +#define PCLK_CTL 0xFE55 +#define PCLK_MODE_SEL 0x20 +#define PME_FORCE_CTL 0xFE56 + +#define ASPM_FORCE_CTL 0xFE57 +#define FORCE_ASPM_CTL0 0x10 +#define FORCE_ASPM_VAL_MASK 0x03 +#define FORCE_ASPM_L1_EN 0x02 +#define FORCE_ASPM_L0_EN 0x01 +#define FORCE_ASPM_NO_ASPM 0x00 +#define PM_CLK_FORCE_CTL 0xFE58 +#define CLK_PM_EN 0x01 +#define FUNC_FORCE_CTL 0xFE59 +#define FUNC_FORCE_UPME_XMT_DBG 0x02 +#define PERST_GLITCH_WIDTH 0xFE5C +#define CHANGE_LINK_STATE 0xFE5B +#define RESET_LOAD_REG 0xFE5E +#define EFUSE_CONTENT 0xFE5F +#define HOST_SLEEP_STATE 0xFE60 +#define HOST_ENTER_S1 1 +#define HOST_ENTER_S3 2 + +#define SDIO_CFG 0xFE70 +#define PM_EVENT_DEBUG 0xFE71 +#define PME_DEBUG_0 0x08 +#define NFTS_TX_CTRL 0xFE72 + +#define PWR_GATE_CTRL 0xFE75 +#define PWR_GATE_EN 0x01 +#define LDO3318_PWR_MASK 0x06 +#define LDO_ON 0x00 +#define LDO_SUSPEND 0x04 +#define LDO_OFF 0x06 +#define PWD_SUSPEND_EN 0xFE76 +#define LDO_PWR_SEL 0xFE78 + +#define L1SUB_CONFIG1 0xFE8D +#define AUX_CLK_ACTIVE_SEL_MASK 0x01 +#define MAC_CKSW_DONE 0x00 +#define L1SUB_CONFIG2 0xFE8E +#define L1SUB_AUTO_CFG 0x02 +#define L1SUB_CONFIG3 0xFE8F +#define L1OFF_MBIAS2_EN_5250 BIT(7) + +#define DUMMY_REG_RESET_0 0xFE90 +#define IC_VERSION_MASK 0x0F + +#define REG_VREF 0xFE97 +#define PWD_SUSPND_EN 0x10 +#define RTS5260_DMA_RST_CTL_0 0xFEBF +#define RTS5260_DMA_RST 0x80 +#define RTS5260_ADMA3_RST 0x40 +#define AUTOLOAD_CFG_BASE 0xFF00 +#define RELINK_TIME_MASK 0x01 +#define PETXCFG 0xFF03 +#define FORCE_CLKREQ_DELINK_MASK BIT(7) +#define FORCE_CLKREQ_LOW 0x80 +#define FORCE_CLKREQ_HIGH 0x00 + +#define PM_CTRL1 0xFF44 +#define CD_RESUME_EN_MASK 0xF0 + +#define PM_CTRL2 0xFF45 +#define PM_CTRL3 0xFF46 +#define SDIO_SEND_PME_EN 0x80 +#define FORCE_RC_MODE_ON 0x40 +#define FORCE_RX50_LINK_ON 0x20 +#define D3_DELINK_MODE_EN 0x10 +#define USE_PESRTB_CTL_DELINK 0x08 +#define DELAY_PIN_WAKE 0x04 +#define RESET_PIN_WAKE 0x02 +#define PM_WAKE_EN 0x01 +#define PM_CTRL4 0xFF47 + +/* Memory mapping */ +#define SRAM_BASE 0xE600 +#define RBUF_BASE 0xF400 +#define PPBUF_BASE1 0xF800 +#define PPBUF_BASE2 0xFA00 +#define IMAGE_FLAG_ADDR0 0xCE80 +#define IMAGE_FLAG_ADDR1 0xCE81 + +#define RREF_CFG 0xFF6C +#define RREF_VBGSEL_MASK 0x38 +#define RREF_VBGSEL_1V25 0x28 + +#define OOBS_CONFIG 0xFF6E +#define OOBS_AUTOK_DIS 0x80 +#define OOBS_VAL_MASK 0x1F + +#define LDO_DV18_CFG 0xFF70 +#define LDO_DV18_SR_MASK 0xC0 +#define LDO_DV18_SR_DF 0x40 +#define DV331812_MASK 0x70 +#define DV331812_33 0x70 +#define DV331812_17 0x30 + +#define LDO_CONFIG2 0xFF71 +#define LDO_D3318_MASK 0x07 +#define LDO_D3318_33V 0x07 +#define LDO_D3318_18V 0x02 +#define DV331812_VDD1 0x04 +#define DV331812_POWERON 0x08 +#define DV331812_POWEROFF 0x00 + +#define LDO_VCC_CFG0 0xFF72 +#define LDO_VCC_LMTVTH_MASK 0x30 +#define LDO_VCC_LMTVTH_2A 0x10 +/*RTS5260*/ +#define RTS5260_DVCC_TUNE_MASK 0x70 +#define RTS5260_DVCC_33 0x70 + +#define LDO_VCC_CFG1 0xFF73 +#define LDO_VCC_REF_TUNE_MASK 0x30 +#define LDO_VCC_REF_1V2 0x20 +#define LDO_VCC_TUNE_MASK 0x07 +#define LDO_VCC_1V8 0x04 +#define LDO_VCC_3V3 0x07 +#define LDO_VCC_LMT_EN 0x08 +/*RTS5260*/ +#define LDO_POW_SDVDD1_MASK 0x08 +#define LDO_POW_SDVDD1_ON 0x08 +#define LDO_POW_SDVDD1_OFF 0x00 + +#define LDO_VIO_CFG 0xFF75 +#define LDO_VIO_SR_MASK 0xC0 +#define LDO_VIO_SR_DF 0x40 +#define LDO_VIO_REF_TUNE_MASK 0x30 +#define LDO_VIO_REF_1V2 0x20 +#define LDO_VIO_TUNE_MASK 0x07 +#define LDO_VIO_1V7 0x03 +#define LDO_VIO_1V8 0x04 +#define LDO_VIO_3V3 0x07 + +#define LDO_DV12S_CFG 0xFF76 +#define LDO_REF12_TUNE_MASK 0x18 +#define LDO_REF12_TUNE_DF 0x10 +#define LDO_D12_TUNE_MASK 0x07 +#define LDO_D12_TUNE_DF 0x04 + +#define LDO_AV12S_CFG 0xFF77 +#define LDO_AV12S_TUNE_MASK 0x07 +#define LDO_AV12S_TUNE_DF 0x04 + +#define SD40_LDO_CTL1 0xFE7D +#define SD40_VIO_TUNE_MASK 0x70 +#define SD40_VIO_TUNE_1V7 0x30 +#define SD_VIO_LDO_1V8 0x40 +#define SD_VIO_LDO_3V3 0x70 + +#define RTS5260_AUTOLOAD_CFG4 0xFF7F +#define RTS5260_MIMO_DISABLE 0x8A + +#define RTS5260_REG_GPIO_CTL0 0xFC1A +#define RTS5260_REG_GPIO_MASK 0x01 +#define RTS5260_REG_GPIO_ON 0x01 +#define RTS5260_REG_GPIO_OFF 0x00 + +#define PWR_GLOBAL_CTRL 0xF200 +#define PCIE_L1_2_EN 0x0C +#define PCIE_L1_1_EN 0x0A +#define PCIE_L1_0_EN 0x09 +#define PWR_FE_CTL 0xF201 +#define PCIE_L1_2_PD_FE_EN 0x0C +#define PCIE_L1_1_PD_FE_EN 0x0A +#define PCIE_L1_0_PD_FE_EN 0x09 +#define CFG_PCIE_APHY_OFF_0 0xF204 +#define CFG_PCIE_APHY_OFF_0_DEFAULT 0xBF +#define CFG_PCIE_APHY_OFF_1 0xF205 +#define CFG_PCIE_APHY_OFF_1_DEFAULT 0xFF +#define CFG_PCIE_APHY_OFF_2 0xF206 +#define CFG_PCIE_APHY_OFF_2_DEFAULT 0x01 +#define CFG_PCIE_APHY_OFF_3 0xF207 +#define CFG_PCIE_APHY_OFF_3_DEFAULT 0x00 +#define CFG_L1_0_PCIE_MAC_RET_VALUE 0xF20C +#define CFG_L1_0_PCIE_DPHY_RET_VALUE 0xF20E +#define CFG_L1_0_SYS_RET_VALUE 0xF210 +#define CFG_L1_0_CRC_MISC_RET_VALUE 0xF212 +#define CFG_L1_0_CRC_SD30_RET_VALUE 0xF214 +#define CFG_L1_0_CRC_SD40_RET_VALUE 0xF216 +#define CFG_LP_FPWM_VALUE 0xF219 +#define CFG_LP_FPWM_VALUE_DEFAULT 0x18 +#define PWC_CDR 0xF253 +#define PWC_CDR_DEFAULT 0x03 +#define CFG_L1_0_RET_VALUE_DEFAULT 0x1B +#define CFG_L1_0_CRC_MISC_RET_VALUE_DEFAULT 0x0C + +/* OCPCTL */ +#define SD_DETECT_EN 0x08 +#define SD_OCP_INT_EN 0x04 +#define SD_OCP_INT_CLR 0x02 +#define SD_OC_CLR 0x01 + +#define SDVIO_DETECT_EN (1 << 7) +#define SDVIO_OCP_INT_EN (1 << 6) +#define SDVIO_OCP_INT_CLR (1 << 5) +#define SDVIO_OC_CLR (1 << 4) + +/* OCPSTAT */ +#define SD_OCP_DETECT 0x08 +#define SD_OC_NOW 0x04 +#define SD_OC_EVER 0x02 + +#define SDVIO_OC_NOW (1 << 6) +#define SDVIO_OC_EVER (1 << 5) + +#define REG_OCPCTL 0xFD6A +#define REG_OCPSTAT 0xFD6E +#define REG_OCPGLITCH 0xFD6C +#define REG_OCPPARA1 0xFD6B +#define REG_OCPPARA2 0xFD6D + +/* rts5260 DV3318 OCP-related registers */ +#define REG_DV3318_OCPCTL 0xFD89 +#define DV3318_OCP_TIME_MASK 0xF0 +#define DV3318_DETECT_EN 0x08 +#define DV3318_OCP_INT_EN 0x04 +#define DV3318_OCP_INT_CLR 0x02 +#define DV3318_OCP_CLR 0x01 + +#define REG_DV3318_OCPSTAT 0xFD8A +#define DV3318_OCP_GlITCH_TIME_MASK 0xF0 +#define DV3318_OCP_DETECT 0x08 +#define DV3318_OCP_NOW 0x04 +#define DV3318_OCP_EVER 0x02 + +#define SD_OCP_GLITCH_MASK 0x0F + +/* OCPPARA1 */ +#define SDVIO_OCP_TIME_60 0x00 +#define SDVIO_OCP_TIME_100 0x10 +#define SDVIO_OCP_TIME_200 0x20 +#define SDVIO_OCP_TIME_400 0x30 +#define SDVIO_OCP_TIME_600 0x40 +#define SDVIO_OCP_TIME_800 0x50 +#define SDVIO_OCP_TIME_1100 0x60 +#define SDVIO_OCP_TIME_MASK 0x70 + +#define SD_OCP_TIME_60 0x00 +#define SD_OCP_TIME_100 0x01 +#define SD_OCP_TIME_200 0x02 +#define SD_OCP_TIME_400 0x03 +#define SD_OCP_TIME_600 0x04 +#define SD_OCP_TIME_800 0x05 +#define SD_OCP_TIME_1100 0x06 +#define SD_OCP_TIME_MASK 0x07 + +/* OCPPARA2 */ +#define SDVIO_OCP_THD_190 0x00 +#define SDVIO_OCP_THD_250 0x10 +#define SDVIO_OCP_THD_320 0x20 +#define SDVIO_OCP_THD_380 0x30 +#define SDVIO_OCP_THD_440 0x40 +#define SDVIO_OCP_THD_500 0x50 +#define SDVIO_OCP_THD_570 0x60 +#define SDVIO_OCP_THD_630 0x70 +#define SDVIO_OCP_THD_MASK 0x70 + +#define SD_OCP_THD_450 0x00 +#define SD_OCP_THD_550 0x01 +#define SD_OCP_THD_650 0x02 +#define SD_OCP_THD_750 0x03 +#define SD_OCP_THD_850 0x04 +#define SD_OCP_THD_950 0x05 +#define SD_OCP_THD_1050 0x06 +#define SD_OCP_THD_1150 0x07 +#define SD_OCP_THD_MASK 0x07 + +#define SDVIO_OCP_GLITCH_MASK 0xF0 +#define SDVIO_OCP_GLITCH_NONE 0x00 +#define SDVIO_OCP_GLITCH_50U 0x10 +#define SDVIO_OCP_GLITCH_100U 0x20 +#define SDVIO_OCP_GLITCH_200U 0x30 +#define SDVIO_OCP_GLITCH_600U 0x40 +#define SDVIO_OCP_GLITCH_800U 0x50 +#define SDVIO_OCP_GLITCH_1M 0x60 +#define SDVIO_OCP_GLITCH_2M 0x70 +#define SDVIO_OCP_GLITCH_3M 0x80 +#define SDVIO_OCP_GLITCH_4M 0x90 +#define SDVIO_OCP_GLIVCH_5M 0xA0 +#define SDVIO_OCP_GLITCH_6M 0xB0 +#define SDVIO_OCP_GLITCH_7M 0xC0 +#define SDVIO_OCP_GLITCH_8M 0xD0 +#define SDVIO_OCP_GLITCH_9M 0xE0 +#define SDVIO_OCP_GLITCH_10M 0xF0 + +#define SD_OCP_GLITCH_MASK 0x0F +#define SD_OCP_GLITCH_NONE 0x00 +#define SD_OCP_GLITCH_50U 0x01 +#define SD_OCP_GLITCH_100U 0x02 +#define SD_OCP_GLITCH_200U 0x03 +#define SD_OCP_GLITCH_600U 0x04 +#define SD_OCP_GLITCH_800U 0x05 +#define SD_OCP_GLITCH_1M 0x06 +#define SD_OCP_GLITCH_2M 0x07 +#define SD_OCP_GLITCH_3M 0x08 +#define SD_OCP_GLITCH_4M 0x09 +#define SD_OCP_GLIVCH_5M 0x0A +#define SD_OCP_GLITCH_6M 0x0B +#define SD_OCP_GLITCH_7M 0x0C +#define SD_OCP_GLITCH_8M 0x0D +#define SD_OCP_GLITCH_9M 0x0E +#define SD_OCP_GLITCH_10M 0x0F + +/* Phy register */ +#define PHY_PCR 0x00 +#define PHY_PCR_FORCE_CODE 0xB000 +#define PHY_PCR_OOBS_CALI_50 0x0800 +#define PHY_PCR_OOBS_VCM_08 0x0200 +#define PHY_PCR_OOBS_SEN_90 0x0040 +#define PHY_PCR_RSSI_EN 0x0002 +#define PHY_PCR_RX10K 0x0001 + +#define PHY_RCR0 0x01 +#define PHY_RCR1 0x02 +#define PHY_RCR1_ADP_TIME_4 0x0400 +#define PHY_RCR1_VCO_COARSE 0x001F +#define PHY_RCR1_INIT_27S 0x0A1F +#define PHY_SSCCR2 0x02 +#define PHY_SSCCR2_PLL_NCODE 0x0A00 +#define PHY_SSCCR2_TIME0 0x001C +#define PHY_SSCCR2_TIME2_WIDTH 0x0003 + +#define PHY_RCR2 0x03 +#define PHY_RCR2_EMPHASE_EN 0x8000 +#define PHY_RCR2_NADJR 0x4000 +#define PHY_RCR2_CDR_SR_2 0x0100 +#define PHY_RCR2_FREQSEL_12 0x0040 +#define PHY_RCR2_CDR_SC_12P 0x0010 +#define PHY_RCR2_CALIB_LATE 0x0002 +#define PHY_RCR2_INIT_27S 0xC152 +#define PHY_SSCCR3 0x03 +#define PHY_SSCCR3_STEP_IN 0x2740 +#define PHY_SSCCR3_CHECK_DELAY 0x0008 +#define _PHY_ANA03 0x03 +#define _PHY_ANA03_TIMER_MAX 0x2700 +#define _PHY_ANA03_OOBS_DEB_EN 0x0040 +#define _PHY_CMU_DEBUG_EN 0x0008 + +#define PHY_RTCR 0x04 +#define PHY_RDR 0x05 +#define PHY_RDR_RXDSEL_1_9 0x4000 +#define PHY_SSC_AUTO_PWD 0x0600 +#define PHY_TCR0 0x06 +#define PHY_TCR1 0x07 +#define PHY_TUNE 0x08 +#define PHY_TUNE_TUNEREF_1_0 0x4000 +#define PHY_TUNE_VBGSEL_1252 0x0C00 +#define PHY_TUNE_SDBUS_33 0x0200 +#define PHY_TUNE_TUNED18 0x01C0 +#define PHY_TUNE_TUNED12 0X0020 +#define PHY_TUNE_TUNEA12 0x0004 +#define PHY_TUNE_VOLTAGE_MASK 0xFC3F +#define PHY_TUNE_VOLTAGE_3V3 0x03C0 +#define PHY_TUNE_D18_1V8 0x0100 +#define PHY_TUNE_D18_1V7 0x0080 +#define PHY_ANA08 0x08 +#define PHY_ANA08_RX_EQ_DCGAIN 0x5000 +#define PHY_ANA08_SEL_RX_EN 0x0400 +#define PHY_ANA08_RX_EQ_VAL 0x03C0 +#define PHY_ANA08_SCP 0x0020 +#define PHY_ANA08_SEL_IPI 0x0004 + +#define PHY_IMR 0x09 +#define PHY_BPCR 0x0A +#define PHY_BPCR_IBRXSEL 0x0400 +#define PHY_BPCR_IBTXSEL 0x0100 +#define PHY_BPCR_IB_FILTER 0x0080 +#define PHY_BPCR_CMIRROR_EN 0x0040 + +#define PHY_BIST 0x0B +#define PHY_RAW_L 0x0C +#define PHY_RAW_H 0x0D +#define PHY_RAW_DATA 0x0E +#define PHY_HOST_CLK_CTRL 0x0F +#define PHY_DMR 0x10 +#define PHY_BACR 0x11 +#define PHY_BACR_BASIC_MASK 0xFFF3 +#define PHY_IER 0x12 +#define PHY_BCSR 0x13 +#define PHY_BPR 0x14 +#define PHY_BPNR2 0x15 +#define PHY_BPNR 0x16 +#define PHY_BRNR2 0x17 +#define PHY_BENR 0x18 +#define PHY_REV 0x19 +#define PHY_REV_RESV 0xE000 +#define PHY_REV_RXIDLE_LATCHED 0x1000 +#define PHY_REV_P1_EN 0x0800 +#define PHY_REV_RXIDLE_EN 0x0400 +#define PHY_REV_CLKREQ_TX_EN 0x0200 +#define PHY_REV_CLKREQ_RX_EN 0x0100 +#define PHY_REV_CLKREQ_DT_1_0 0x0040 +#define PHY_REV_STOP_CLKRD 0x0020 +#define PHY_REV_RX_PWST 0x0008 +#define PHY_REV_STOP_CLKWR 0x0004 +#define _PHY_REV0 0x19 +#define _PHY_REV0_FILTER_OUT 0x3800 +#define _PHY_REV0_CDR_BYPASS_PFD 0x0100 +#define _PHY_REV0_CDR_RX_IDLE_BYPASS 0x0002 + +#define PHY_FLD0 0x1A +#define PHY_ANA1A 0x1A +#define PHY_ANA1A_TXR_LOOPBACK 0x2000 +#define PHY_ANA1A_RXT_BIST 0x0500 +#define PHY_ANA1A_TXR_BIST 0x0040 +#define PHY_ANA1A_REV 0x0006 +#define PHY_FLD0_INIT_27S 0x2546 +#define PHY_FLD1 0x1B +#define PHY_FLD2 0x1C +#define PHY_FLD3 0x1D +#define PHY_FLD3_TIMER_4 0x0800 +#define PHY_FLD3_TIMER_6 0x0020 +#define PHY_FLD3_RXDELINK 0x0004 +#define PHY_FLD3_INIT_27S 0x0004 +#define PHY_ANA1D 0x1D +#define PHY_ANA1D_DEBUG_ADDR 0x0004 +#define _PHY_FLD0 0x1D +#define _PHY_FLD0_CLK_REQ_20C 0x8000 +#define _PHY_FLD0_RX_IDLE_EN 0x1000 +#define _PHY_FLD0_BIT_ERR_RSTN 0x0800 +#define _PHY_FLD0_BER_COUNT 0x01E0 +#define _PHY_FLD0_BER_TIMER 0x001E +#define _PHY_FLD0_CHECK_EN 0x0001 + +#define PHY_FLD4 0x1E +#define PHY_FLD4_FLDEN_SEL 0x4000 +#define PHY_FLD4_REQ_REF 0x2000 +#define PHY_FLD4_RXAMP_OFF 0x1000 +#define PHY_FLD4_REQ_ADDA 0x0800 +#define PHY_FLD4_BER_COUNT 0x00E0 +#define PHY_FLD4_BER_TIMER 0x000A +#define PHY_FLD4_BER_CHK_EN 0x0001 +#define PHY_FLD4_INIT_27S 0x5C7F +#define PHY_DIG1E 0x1E +#define PHY_DIG1E_REV 0x4000 +#define PHY_DIG1E_D0_X_D1 0x1000 +#define PHY_DIG1E_RX_ON_HOST 0x0800 +#define PHY_DIG1E_RCLK_REF_HOST 0x0400 +#define PHY_DIG1E_RCLK_TX_EN_KEEP 0x0040 +#define PHY_DIG1E_RCLK_TX_TERM_KEEP 0x0020 +#define PHY_DIG1E_RCLK_RX_EIDLE_ON 0x0010 +#define PHY_DIG1E_TX_TERM_KEEP 0x0008 +#define PHY_DIG1E_RX_TERM_KEEP 0x0004 +#define PHY_DIG1E_TX_EN_KEEP 0x0002 +#define PHY_DIG1E_RX_EN_KEEP 0x0001 +#define PHY_DUM_REG 0x1F + +#define PCR_ASPM_SETTING_REG1 0x160 +#define PCR_ASPM_SETTING_REG2 0x168 +#define PCR_ASPM_SETTING_5260 0x178 + +#define PCR_SETTING_REG1 0x724 +#define PCR_SETTING_REG2 0x814 +#define PCR_SETTING_REG3 0x747 + +#define rtsx_pci_init_cmd(pcr) ((pcr)->ci = 0) + +#define RTS5227_DEVICE_ID 0x5227 +#define RTS_MAX_TIMES_FREQ_REDUCTION 8 + +struct rtsx_pcr; + +struct pcr_handle { + struct rtsx_pcr *pcr; +}; + +struct pcr_ops { + int (*write_phy)(struct rtsx_pcr *pcr, u8 addr, u16 val); + int (*read_phy)(struct rtsx_pcr *pcr, u8 addr, u16 *val); + int (*extra_init_hw)(struct rtsx_pcr *pcr); + int (*optimize_phy)(struct rtsx_pcr *pcr); + int (*turn_on_led)(struct rtsx_pcr *pcr); + int (*turn_off_led)(struct rtsx_pcr *pcr); + int (*enable_auto_blink)(struct rtsx_pcr *pcr); + int (*disable_auto_blink)(struct rtsx_pcr *pcr); + int (*card_power_on)(struct rtsx_pcr *pcr, int card); + int (*card_power_off)(struct rtsx_pcr *pcr, int card); + int (*switch_output_voltage)(struct rtsx_pcr *pcr, + u8 voltage); + unsigned int (*cd_deglitch)(struct rtsx_pcr *pcr); + int (*conv_clk_and_div_n)(int clk, int dir); + void (*fetch_vendor_settings)(struct rtsx_pcr *pcr); + void (*force_power_down)(struct rtsx_pcr *pcr, u8 pm_state); + void (*stop_cmd)(struct rtsx_pcr *pcr); + + void (*set_aspm)(struct rtsx_pcr *pcr, bool enable); + int (*set_ltr_latency)(struct rtsx_pcr *pcr, u32 latency); + int (*set_l1off_sub)(struct rtsx_pcr *pcr, u8 val); + void (*set_l1off_cfg_sub_d0)(struct rtsx_pcr *pcr, int active); + void (*full_on)(struct rtsx_pcr *pcr); + void (*power_saving)(struct rtsx_pcr *pcr); + void (*enable_ocp)(struct rtsx_pcr *pcr); + void (*disable_ocp)(struct rtsx_pcr *pcr); + void (*init_ocp)(struct rtsx_pcr *pcr); + void (*process_ocp)(struct rtsx_pcr *pcr); + int (*get_ocpstat)(struct rtsx_pcr *pcr, u8 *val); + void (*clear_ocpstat)(struct rtsx_pcr *pcr); +}; + +enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN}; + +#define ASPM_L1_1_EN_MASK BIT(3) +#define ASPM_L1_2_EN_MASK BIT(2) +#define PM_L1_1_EN_MASK BIT(1) +#define PM_L1_2_EN_MASK BIT(0) + +#define ASPM_L1_1_EN BIT(0) +#define ASPM_L1_2_EN BIT(1) +#define PM_L1_1_EN BIT(2) +#define PM_L1_2_EN BIT(3) +#define LTR_L1SS_PWR_GATE_EN BIT(4) +#define L1_SNOOZE_TEST_EN BIT(5) +#define LTR_L1SS_PWR_GATE_CHECK_CARD_EN BIT(6) + +enum dev_aspm_mode { + DEV_ASPM_DYNAMIC, + DEV_ASPM_BACKDOOR, + DEV_ASPM_STATIC, + DEV_ASPM_DISABLE, +}; + +/* + * struct rtsx_cr_option - card reader option + * @dev_flags: device flags + * @force_clkreq_0: force clock request + * @ltr_en: enable ltr mode flag + * @ltr_enabled: ltr mode in configure space flag + * @ltr_active: ltr mode status + * @ltr_active_latency: ltr mode active latency + * @ltr_idle_latency: ltr mode idle latency + * @ltr_l1off_latency: ltr mode l1off latency + * @dev_aspm_mode: device aspm mode + * @l1_snooze_delay: l1 snooze delay + * @ltr_l1off_sspwrgate: ltr l1off sspwrgate + * @ltr_l1off_snooze_sspwrgate: ltr l1off snooze sspwrgate + * @ocp_en: enable ocp flag + * @sd_400mA_ocp_thd: 400mA ocp thd + * @sd_800mA_ocp_thd: 800mA ocp thd + */ +struct rtsx_cr_option { + u32 dev_flags; + bool force_clkreq_0; + bool ltr_en; + bool ltr_enabled; + bool ltr_active; + u32 ltr_active_latency; + u32 ltr_idle_latency; + u32 ltr_l1off_latency; + enum dev_aspm_mode dev_aspm_mode; + u32 l1_snooze_delay; + u8 ltr_l1off_sspwrgate; + u8 ltr_l1off_snooze_sspwrgate; + bool ocp_en; + u8 sd_400mA_ocp_thd; + u8 sd_800mA_ocp_thd; +}; + +/* + * struct rtsx_hw_param - card reader hardware param + * @interrupt_en: indicate which interrutp enable + * @ocp_glitch: ocp glitch time + */ +struct rtsx_hw_param { + u32 interrupt_en; + u8 ocp_glitch; +}; + +#define rtsx_set_dev_flag(cr, flag) \ + ((cr)->option.dev_flags |= (flag)) +#define rtsx_clear_dev_flag(cr, flag) \ + ((cr)->option.dev_flags &= ~(flag)) +#define rtsx_check_dev_flag(cr, flag) \ + ((cr)->option.dev_flags & (flag)) + +struct rtsx_pcr { + struct pci_dev *pci; + unsigned int id; + int pcie_cap; + struct rtsx_cr_option option; + struct rtsx_hw_param hw_param; + + /* pci resources */ + unsigned long addr; + void __iomem *remap_addr; + int irq; + + /* host reserved buffer */ + void *rtsx_resv_buf; + dma_addr_t rtsx_resv_buf_addr; + + void *host_cmds_ptr; + dma_addr_t host_cmds_addr; + int ci; + + void *host_sg_tbl_ptr; + dma_addr_t host_sg_tbl_addr; + int sgi; + + u32 bier; + char trans_result; + + unsigned int card_inserted; + unsigned int card_removed; + unsigned int card_exist; + + struct delayed_work carddet_work; + struct delayed_work idle_work; + + spinlock_t lock; + struct mutex pcr_mutex; + struct completion *done; + struct completion *finish_me; + + unsigned int cur_clock; + bool remove_pci; + bool msi_en; + +#define EXTRA_CAPS_SD_SDR50 (1 << 0) +#define EXTRA_CAPS_SD_SDR104 (1 << 1) +#define EXTRA_CAPS_SD_DDR50 (1 << 2) +#define EXTRA_CAPS_MMC_HSDDR (1 << 3) +#define EXTRA_CAPS_MMC_HS200 (1 << 4) +#define EXTRA_CAPS_MMC_8BIT (1 << 5) + u32 extra_caps; + +#define IC_VER_A 0 +#define IC_VER_B 1 +#define IC_VER_C 2 +#define IC_VER_D 3 + u8 ic_version; + + u8 sd30_drive_sel_1v8; + u8 sd30_drive_sel_3v3; + u8 card_drive_sel; +#define ASPM_L1_EN 0x02 + u8 aspm_en; + bool aspm_enabled; + +#define PCR_MS_PMOS (1 << 0) +#define PCR_REVERSE_SOCKET (1 << 1) + u32 flags; + + u32 tx_initial_phase; + u32 rx_initial_phase; + + const u32 *sd_pull_ctl_enable_tbl; + const u32 *sd_pull_ctl_disable_tbl; + const u32 *ms_pull_ctl_enable_tbl; + const u32 *ms_pull_ctl_disable_tbl; + + const struct pcr_ops *ops; + enum PDEV_STAT state; + + u16 reg_pm_ctrl3; + + int num_slots; + struct rtsx_slot *slots; + + u8 dma_error_count; + u8 ocp_stat; + u8 ocp_stat2; +}; + +#define PID_524A 0x524A +#define PID_5249 0x5249 +#define PID_5250 0x5250 +#define PID_525A 0x525A +#define PID_5260 0x5260 + +#define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid)) +#define PCI_VID(pcr) ((pcr)->pci->vendor) +#define PCI_PID(pcr) ((pcr)->pci->device) +#define is_version(pcr, pid, ver) \ + (CHK_PCI_PID(pcr, pid) && (pcr)->ic_version == (ver)) +#define pcr_dbg(pcr, fmt, arg...) \ + dev_dbg(&(pcr)->pci->dev, fmt, ##arg) + +#define SDR104_PHASE(val) ((val) & 0xFF) +#define SDR50_PHASE(val) (((val) >> 8) & 0xFF) +#define DDR50_PHASE(val) (((val) >> 16) & 0xFF) +#define SDR104_TX_PHASE(pcr) SDR104_PHASE((pcr)->tx_initial_phase) +#define SDR50_TX_PHASE(pcr) SDR50_PHASE((pcr)->tx_initial_phase) +#define DDR50_TX_PHASE(pcr) DDR50_PHASE((pcr)->tx_initial_phase) +#define SDR104_RX_PHASE(pcr) SDR104_PHASE((pcr)->rx_initial_phase) +#define SDR50_RX_PHASE(pcr) SDR50_PHASE((pcr)->rx_initial_phase) +#define DDR50_RX_PHASE(pcr) DDR50_PHASE((pcr)->rx_initial_phase) +#define SET_CLOCK_PHASE(sdr104, sdr50, ddr50) \ + (((ddr50) << 16) | ((sdr50) << 8) | (sdr104)) + +void rtsx_pci_start_run(struct rtsx_pcr *pcr); +int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data); +int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data); +int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val); +int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val); +void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr); +void rtsx_pci_add_cmd(struct rtsx_pcr *pcr, + u8 cmd_type, u16 reg_addr, u8 mask, u8 data); +void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr); +int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout); +int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist, + int num_sg, bool read, int timeout); +int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist, + int num_sg, bool read); +void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist, + int num_sg, bool read); +int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist, + int count, bool read, int timeout); +int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len); +int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len); +int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card); +int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card); +int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock, + u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk); +int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card); +int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card); +int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card); +int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage); +unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr); +void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr); + +static inline u8 *rtsx_pci_get_cmd_data(struct rtsx_pcr *pcr) +{ + return (u8 *)(pcr->host_cmds_ptr); +} + +static inline int rtsx_pci_update_cfg_byte(struct rtsx_pcr *pcr, int addr, + u8 mask, u8 append) +{ + int err; + u8 val; + + err = pci_read_config_byte(pcr->pci, addr, &val); + if (err < 0) + return err; + return pci_write_config_byte(pcr->pci, addr, (val & mask) | append); +} + +static inline void rtsx_pci_write_be32(struct rtsx_pcr *pcr, u16 reg, u32 val) +{ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg, 0xFF, val >> 24); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 1, 0xFF, val >> 16); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 2, 0xFF, val >> 8); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 3, 0xFF, val); +} + +static inline int rtsx_pci_update_phy(struct rtsx_pcr *pcr, u8 addr, + u16 mask, u16 append) +{ + int err; + u16 val; + + err = rtsx_pci_read_phy_register(pcr, addr, &val); + if (err < 0) + return err; + + return rtsx_pci_write_phy_register(pcr, addr, (val & mask) | append); +} + +#endif diff --git a/include/linux/rtsx_usb.h b/include/linux/rtsx_usb.h new file mode 100644 index 000000000..c446e4fd6 --- /dev/null +++ b/include/linux/rtsx_usb.h @@ -0,0 +1,628 @@ +/* Driver for Realtek RTS5139 USB card reader + * + * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + * Author: + * Roger Tseng + */ + +#ifndef __RTSX_USB_H +#define __RTSX_USB_H + +#include + +/* related module names */ +#define RTSX_USB_SD_CARD 0 +#define RTSX_USB_MS_CARD 1 + +/* endpoint numbers */ +#define EP_BULK_OUT 1 +#define EP_BULK_IN 2 +#define EP_INTR_IN 3 + +/* USB vendor requests */ +#define RTSX_USB_REQ_REG_OP 0x00 +#define RTSX_USB_REQ_POLL 0x02 + +/* miscellaneous parameters */ +#define MIN_DIV_N 60 +#define MAX_DIV_N 120 + +#define MAX_PHASE 15 +#define RX_TUNING_CNT 3 + +#define QFN24 0 +#define LQFP48 1 +#define CHECK_PKG(ucr, pkg) ((ucr)->package == (pkg)) + +/* data structures */ +struct rtsx_ucr { + u16 vendor_id; + u16 product_id; + + int package; + u8 ic_version; + bool is_rts5179; + + unsigned int cur_clk; + + u8 *cmd_buf; + unsigned int cmd_idx; + u8 *rsp_buf; + + struct usb_device *pusb_dev; + struct usb_interface *pusb_intf; + struct usb_sg_request current_sg; + unsigned char *iobuf; + dma_addr_t iobuf_dma; + + struct timer_list sg_timer; + struct mutex dev_mutex; +}; + +/* buffer size */ +#define IOBUF_SIZE 1024 + +/* prototypes of exported functions */ +extern int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status); + +extern int rtsx_usb_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data); +extern int rtsx_usb_write_register(struct rtsx_ucr *ucr, u16 addr, u8 mask, + u8 data); + +extern int rtsx_usb_ep0_write_register(struct rtsx_ucr *ucr, u16 addr, u8 mask, + u8 data); +extern int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, + u8 *data); + +extern void rtsx_usb_add_cmd(struct rtsx_ucr *ucr, u8 cmd_type, + u16 reg_addr, u8 mask, u8 data); +extern int rtsx_usb_send_cmd(struct rtsx_ucr *ucr, u8 flag, int timeout); +extern int rtsx_usb_get_rsp(struct rtsx_ucr *ucr, int rsp_len, int timeout); +extern int rtsx_usb_transfer_data(struct rtsx_ucr *ucr, unsigned int pipe, + void *buf, unsigned int len, int use_sg, + unsigned int *act_len, int timeout); + +extern int rtsx_usb_read_ppbuf(struct rtsx_ucr *ucr, u8 *buf, int buf_len); +extern int rtsx_usb_write_ppbuf(struct rtsx_ucr *ucr, u8 *buf, int buf_len); +extern int rtsx_usb_switch_clock(struct rtsx_ucr *ucr, unsigned int card_clock, + u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk); +extern int rtsx_usb_card_exclusive_check(struct rtsx_ucr *ucr, int card); + +/* card status */ +#define SD_CD 0x01 +#define MS_CD 0x02 +#define XD_CD 0x04 +#define CD_MASK (SD_CD | MS_CD | XD_CD) +#define SD_WP 0x08 + +/* reader command field offset & parameters */ +#define READ_REG_CMD 0 +#define WRITE_REG_CMD 1 +#define CHECK_REG_CMD 2 + +#define PACKET_TYPE 4 +#define CNT_H 5 +#define CNT_L 6 +#define STAGE_FLAG 7 +#define CMD_OFFSET 8 +#define SEQ_WRITE_DATA_OFFSET 12 + +#define BATCH_CMD 0 +#define SEQ_READ 1 +#define SEQ_WRITE 2 + +#define STAGE_R 0x01 +#define STAGE_DI 0x02 +#define STAGE_DO 0x04 +#define STAGE_MS_STATUS 0x08 +#define STAGE_XD_STATUS 0x10 +#define MODE_C 0x00 +#define MODE_CR (STAGE_R) +#define MODE_CDIR (STAGE_R | STAGE_DI) +#define MODE_CDOR (STAGE_R | STAGE_DO) + +#define EP0_OP_SHIFT 14 +#define EP0_READ_REG_CMD 2 +#define EP0_WRITE_REG_CMD 3 + +#define rtsx_usb_cmd_hdr_tag(ucr) \ + do { \ + ucr->cmd_buf[0] = 'R'; \ + ucr->cmd_buf[1] = 'T'; \ + ucr->cmd_buf[2] = 'C'; \ + ucr->cmd_buf[3] = 'R'; \ + } while (0) + +static inline void rtsx_usb_init_cmd(struct rtsx_ucr *ucr) +{ + rtsx_usb_cmd_hdr_tag(ucr); + ucr->cmd_idx = 0; + ucr->cmd_buf[PACKET_TYPE] = BATCH_CMD; +} + +/* internal register address */ +#define FPDCTL 0xFC00 +#define SSC_DIV_N_0 0xFC07 +#define SSC_CTL1 0xFC09 +#define SSC_CTL2 0xFC0A +#define CFG_MODE 0xFC0E +#define CFG_MODE_1 0xFC0F +#define RCCTL 0xFC14 +#define SOF_WDOG 0xFC28 +#define SYS_DUMMY0 0xFC30 + +#define MS_BLKEND 0xFD30 +#define MS_READ_START 0xFD31 +#define MS_READ_COUNT 0xFD32 +#define MS_WRITE_START 0xFD33 +#define MS_WRITE_COUNT 0xFD34 +#define MS_COMMAND 0xFD35 +#define MS_OLD_BLOCK_0 0xFD36 +#define MS_OLD_BLOCK_1 0xFD37 +#define MS_NEW_BLOCK_0 0xFD38 +#define MS_NEW_BLOCK_1 0xFD39 +#define MS_LOG_BLOCK_0 0xFD3A +#define MS_LOG_BLOCK_1 0xFD3B +#define MS_BUS_WIDTH 0xFD3C +#define MS_PAGE_START 0xFD3D +#define MS_PAGE_LENGTH 0xFD3E +#define MS_CFG 0xFD40 +#define MS_TPC 0xFD41 +#define MS_TRANS_CFG 0xFD42 +#define MS_TRANSFER 0xFD43 +#define MS_INT_REG 0xFD44 +#define MS_BYTE_CNT 0xFD45 +#define MS_SECTOR_CNT_L 0xFD46 +#define MS_SECTOR_CNT_H 0xFD47 +#define MS_DBUS_H 0xFD48 + +#define CARD_DMA1_CTL 0xFD5C +#define CARD_PULL_CTL1 0xFD60 +#define CARD_PULL_CTL2 0xFD61 +#define CARD_PULL_CTL3 0xFD62 +#define CARD_PULL_CTL4 0xFD63 +#define CARD_PULL_CTL5 0xFD64 +#define CARD_PULL_CTL6 0xFD65 +#define CARD_EXIST 0xFD6F +#define CARD_INT_PEND 0xFD71 + +#define LDO_POWER_CFG 0xFD7B + +#define SD_CFG1 0xFDA0 +#define SD_CFG2 0xFDA1 +#define SD_CFG3 0xFDA2 +#define SD_STAT1 0xFDA3 +#define SD_STAT2 0xFDA4 +#define SD_BUS_STAT 0xFDA5 +#define SD_PAD_CTL 0xFDA6 +#define SD_SAMPLE_POINT_CTL 0xFDA7 +#define SD_PUSH_POINT_CTL 0xFDA8 +#define SD_CMD0 0xFDA9 +#define SD_CMD1 0xFDAA +#define SD_CMD2 0xFDAB +#define SD_CMD3 0xFDAC +#define SD_CMD4 0xFDAD +#define SD_CMD5 0xFDAE +#define SD_BYTE_CNT_L 0xFDAF +#define SD_BYTE_CNT_H 0xFDB0 +#define SD_BLOCK_CNT_L 0xFDB1 +#define SD_BLOCK_CNT_H 0xFDB2 +#define SD_TRANSFER 0xFDB3 +#define SD_CMD_STATE 0xFDB5 +#define SD_DATA_STATE 0xFDB6 +#define SD_VPCLK0_CTL 0xFC2A +#define SD_VPCLK1_CTL 0xFC2B +#define SD_DCMPS0_CTL 0xFC2C +#define SD_DCMPS1_CTL 0xFC2D + +#define CARD_DMA1_CTL 0xFD5C + +#define HW_VERSION 0xFC01 + +#define SSC_CLK_FPGA_SEL 0xFC02 +#define CLK_DIV 0xFC03 +#define SFSM_ED 0xFC04 + +#define CD_DEGLITCH_WIDTH 0xFC20 +#define CD_DEGLITCH_EN 0xFC21 +#define AUTO_DELINK_EN 0xFC23 + +#define FPGA_PULL_CTL 0xFC1D +#define CARD_CLK_SOURCE 0xFC2E + +#define CARD_SHARE_MODE 0xFD51 +#define CARD_DRIVE_SEL 0xFD52 +#define CARD_STOP 0xFD53 +#define CARD_OE 0xFD54 +#define CARD_AUTO_BLINK 0xFD55 +#define CARD_GPIO 0xFD56 +#define SD30_DRIVE_SEL 0xFD57 + +#define CARD_DATA_SOURCE 0xFD5D +#define CARD_SELECT 0xFD5E + +#define CARD_CLK_EN 0xFD79 +#define CARD_PWR_CTL 0xFD7A + +#define OCPCTL 0xFD80 +#define OCPPARA1 0xFD81 +#define OCPPARA2 0xFD82 +#define OCPSTAT 0xFD83 + +#define HS_USB_STAT 0xFE01 +#define HS_VCONTROL 0xFE26 +#define HS_VSTAIN 0xFE27 +#define HS_VLOADM 0xFE28 +#define HS_VSTAOUT 0xFE29 + +#define MC_IRQ 0xFF00 +#define MC_IRQEN 0xFF01 +#define MC_FIFO_CTL 0xFF02 +#define MC_FIFO_BC0 0xFF03 +#define MC_FIFO_BC1 0xFF04 +#define MC_FIFO_STAT 0xFF05 +#define MC_FIFO_MODE 0xFF06 +#define MC_FIFO_RD_PTR0 0xFF07 +#define MC_FIFO_RD_PTR1 0xFF08 +#define MC_DMA_CTL 0xFF10 +#define MC_DMA_TC0 0xFF11 +#define MC_DMA_TC1 0xFF12 +#define MC_DMA_TC2 0xFF13 +#define MC_DMA_TC3 0xFF14 +#define MC_DMA_RST 0xFF15 + +#define RBUF_SIZE_MASK 0xFBFF +#define RBUF_BASE 0xF000 +#define PPBUF_BASE1 0xF800 +#define PPBUF_BASE2 0xFA00 + +/* internal register value macros */ +#define POWER_OFF 0x03 +#define PARTIAL_POWER_ON 0x02 +#define POWER_ON 0x00 +#define POWER_MASK 0x03 +#define LDO3318_PWR_MASK 0x0C +#define LDO_ON 0x00 +#define LDO_SUSPEND 0x08 +#define LDO_OFF 0x0C +#define DV3318_AUTO_PWR_OFF 0x10 +#define FORCE_LDO_POWERB 0x60 + +/* LDO_POWER_CFG */ +#define TUNE_SD18_MASK 0x1C +#define TUNE_SD18_1V7 0x00 +#define TUNE_SD18_1V8 (0x01 << 2) +#define TUNE_SD18_1V9 (0x02 << 2) +#define TUNE_SD18_2V0 (0x03 << 2) +#define TUNE_SD18_2V7 (0x04 << 2) +#define TUNE_SD18_2V8 (0x05 << 2) +#define TUNE_SD18_2V9 (0x06 << 2) +#define TUNE_SD18_3V3 (0x07 << 2) + +/* CLK_DIV */ +#define CLK_CHANGE 0x80 +#define CLK_DIV_1 0x00 +#define CLK_DIV_2 0x01 +#define CLK_DIV_4 0x02 +#define CLK_DIV_8 0x03 + +#define SSC_POWER_MASK 0x01 +#define SSC_POWER_DOWN 0x01 +#define SSC_POWER_ON 0x00 + +#define FPGA_VER 0x80 +#define HW_VER_MASK 0x0F + +#define EXTEND_DMA1_ASYNC_SIGNAL 0x02 + +/* CFG_MODE*/ +#define XTAL_FREE 0x80 +#define CLK_MODE_MASK 0x03 +#define CLK_MODE_12M_XTAL 0x00 +#define CLK_MODE_NON_XTAL 0x01 +#define CLK_MODE_24M_OSC 0x02 +#define CLK_MODE_48M_OSC 0x03 + +/* CFG_MODE_1*/ +#define RTS5179 0x02 + +#define NYET_EN 0x01 +#define NYET_MSAK 0x01 + +#define SD30_DRIVE_MASK 0x07 +#define SD20_DRIVE_MASK 0x03 + +#define DISABLE_SD_CD 0x08 +#define DISABLE_MS_CD 0x10 +#define DISABLE_XD_CD 0x20 +#define SD_CD_DEGLITCH_EN 0x01 +#define MS_CD_DEGLITCH_EN 0x02 +#define XD_CD_DEGLITCH_EN 0x04 + +#define CARD_SHARE_LQFP48 0x04 +#define CARD_SHARE_QFN24 0x00 +#define CARD_SHARE_LQFP_SEL 0x04 +#define CARD_SHARE_XD 0x00 +#define CARD_SHARE_SD 0x01 +#define CARD_SHARE_MS 0x02 +#define CARD_SHARE_MASK 0x03 + + +/* SD30_DRIVE_SEL */ +#define DRIVER_TYPE_A 0x05 +#define DRIVER_TYPE_B 0x03 +#define DRIVER_TYPE_C 0x02 +#define DRIVER_TYPE_D 0x01 + +/* SD_BUS_STAT */ +#define SD_CLK_TOGGLE_EN 0x80 +#define SD_CLK_FORCE_STOP 0x40 +#define SD_DAT3_STATUS 0x10 +#define SD_DAT2_STATUS 0x08 +#define SD_DAT1_STATUS 0x04 +#define SD_DAT0_STATUS 0x02 +#define SD_CMD_STATUS 0x01 + +/* SD_PAD_CTL */ +#define SD_IO_USING_1V8 0x80 +#define SD_IO_USING_3V3 0x7F +#define TYPE_A_DRIVING 0x00 +#define TYPE_B_DRIVING 0x01 +#define TYPE_C_DRIVING 0x02 +#define TYPE_D_DRIVING 0x03 + +/* CARD_CLK_EN */ +#define SD_CLK_EN 0x04 +#define MS_CLK_EN 0x08 + +/* CARD_SELECT */ +#define SD_MOD_SEL 2 +#define MS_MOD_SEL 3 + +/* CARD_SHARE_MODE */ +#define CARD_SHARE_LQFP48 0x04 +#define CARD_SHARE_QFN24 0x00 +#define CARD_SHARE_LQFP_SEL 0x04 +#define CARD_SHARE_XD 0x00 +#define CARD_SHARE_SD 0x01 +#define CARD_SHARE_MS 0x02 +#define CARD_SHARE_MASK 0x03 + +/* SSC_CTL1 */ +#define SSC_RSTB 0x80 +#define SSC_8X_EN 0x40 +#define SSC_FIX_FRAC 0x20 +#define SSC_SEL_1M 0x00 +#define SSC_SEL_2M 0x08 +#define SSC_SEL_4M 0x10 +#define SSC_SEL_8M 0x18 + +/* SSC_CTL2 */ +#define SSC_DEPTH_MASK 0x03 +#define SSC_DEPTH_DISALBE 0x00 +#define SSC_DEPTH_2M 0x01 +#define SSC_DEPTH_1M 0x02 +#define SSC_DEPTH_512K 0x03 + +/* SD_VPCLK0_CTL */ +#define PHASE_CHANGE 0x80 +#define PHASE_NOT_RESET 0x40 + +/* SD_TRANSFER */ +#define SD_TRANSFER_START 0x80 +#define SD_TRANSFER_END 0x40 +#define SD_STAT_IDLE 0x20 +#define SD_TRANSFER_ERR 0x10 +#define SD_TM_NORMAL_WRITE 0x00 +#define SD_TM_AUTO_WRITE_3 0x01 +#define SD_TM_AUTO_WRITE_4 0x02 +#define SD_TM_AUTO_READ_3 0x05 +#define SD_TM_AUTO_READ_4 0x06 +#define SD_TM_CMD_RSP 0x08 +#define SD_TM_AUTO_WRITE_1 0x09 +#define SD_TM_AUTO_WRITE_2 0x0A +#define SD_TM_NORMAL_READ 0x0C +#define SD_TM_AUTO_READ_1 0x0D +#define SD_TM_AUTO_READ_2 0x0E +#define SD_TM_AUTO_TUNING 0x0F + +/* SD_CFG1 */ +#define SD_CLK_DIVIDE_0 0x00 +#define SD_CLK_DIVIDE_256 0xC0 +#define SD_CLK_DIVIDE_128 0x80 +#define SD_CLK_DIVIDE_MASK 0xC0 +#define SD_BUS_WIDTH_1BIT 0x00 +#define SD_BUS_WIDTH_4BIT 0x01 +#define SD_BUS_WIDTH_8BIT 0x02 +#define SD_ASYNC_FIFO_RST 0x10 +#define SD_20_MODE 0x00 +#define SD_DDR_MODE 0x04 +#define SD_30_MODE 0x08 + +/* SD_CFG2 */ +#define SD_CALCULATE_CRC7 0x00 +#define SD_NO_CALCULATE_CRC7 0x80 +#define SD_CHECK_CRC16 0x00 +#define SD_NO_CHECK_CRC16 0x40 +#define SD_WAIT_CRC_TO_EN 0x20 +#define SD_WAIT_BUSY_END 0x08 +#define SD_NO_WAIT_BUSY_END 0x00 +#define SD_CHECK_CRC7 0x00 +#define SD_NO_CHECK_CRC7 0x04 +#define SD_RSP_LEN_0 0x00 +#define SD_RSP_LEN_6 0x01 +#define SD_RSP_LEN_17 0x02 +#define SD_RSP_TYPE_R0 0x04 +#define SD_RSP_TYPE_R1 0x01 +#define SD_RSP_TYPE_R1b 0x09 +#define SD_RSP_TYPE_R2 0x02 +#define SD_RSP_TYPE_R3 0x05 +#define SD_RSP_TYPE_R4 0x05 +#define SD_RSP_TYPE_R5 0x01 +#define SD_RSP_TYPE_R6 0x01 +#define SD_RSP_TYPE_R7 0x01 + +/* SD_STAT1 */ +#define SD_CRC7_ERR 0x80 +#define SD_CRC16_ERR 0x40 +#define SD_CRC_WRITE_ERR 0x20 +#define SD_CRC_WRITE_ERR_MASK 0x1C +#define GET_CRC_TIME_OUT 0x02 +#define SD_TUNING_COMPARE_ERR 0x01 + +/* SD_DATA_STATE */ +#define SD_DATA_IDLE 0x80 + +/* CARD_DATA_SOURCE */ +#define PINGPONG_BUFFER 0x01 +#define RING_BUFFER 0x00 + +/* CARD_OE */ +#define SD_OUTPUT_EN 0x04 +#define MS_OUTPUT_EN 0x08 + +/* CARD_STOP */ +#define SD_STOP 0x04 +#define MS_STOP 0x08 +#define SD_CLR_ERR 0x40 +#define MS_CLR_ERR 0x80 + +/* CARD_CLK_SOURCE */ +#define CRC_FIX_CLK (0x00 << 0) +#define CRC_VAR_CLK0 (0x01 << 0) +#define CRC_VAR_CLK1 (0x02 << 0) +#define SD30_FIX_CLK (0x00 << 2) +#define SD30_VAR_CLK0 (0x01 << 2) +#define SD30_VAR_CLK1 (0x02 << 2) +#define SAMPLE_FIX_CLK (0x00 << 4) +#define SAMPLE_VAR_CLK0 (0x01 << 4) +#define SAMPLE_VAR_CLK1 (0x02 << 4) + +/* SD_SAMPLE_POINT_CTL */ +#define DDR_FIX_RX_DAT 0x00 +#define DDR_VAR_RX_DAT 0x80 +#define DDR_FIX_RX_DAT_EDGE 0x00 +#define DDR_FIX_RX_DAT_14_DELAY 0x40 +#define DDR_FIX_RX_CMD 0x00 +#define DDR_VAR_RX_CMD 0x20 +#define DDR_FIX_RX_CMD_POS_EDGE 0x00 +#define DDR_FIX_RX_CMD_14_DELAY 0x10 +#define SD20_RX_POS_EDGE 0x00 +#define SD20_RX_14_DELAY 0x08 +#define SD20_RX_SEL_MASK 0x08 + +/* SD_PUSH_POINT_CTL */ +#define DDR_FIX_TX_CMD_DAT 0x00 +#define DDR_VAR_TX_CMD_DAT 0x80 +#define DDR_FIX_TX_DAT_14_TSU 0x00 +#define DDR_FIX_TX_DAT_12_TSU 0x40 +#define DDR_FIX_TX_CMD_NEG_EDGE 0x00 +#define DDR_FIX_TX_CMD_14_AHEAD 0x20 +#define SD20_TX_NEG_EDGE 0x00 +#define SD20_TX_14_AHEAD 0x10 +#define SD20_TX_SEL_MASK 0x10 +#define DDR_VAR_SDCLK_POL_SWAP 0x01 + +/* MS_CFG */ +#define SAMPLE_TIME_RISING 0x00 +#define SAMPLE_TIME_FALLING 0x80 +#define PUSH_TIME_DEFAULT 0x00 +#define PUSH_TIME_ODD 0x40 +#define NO_EXTEND_TOGGLE 0x00 +#define EXTEND_TOGGLE_CHK 0x20 +#define MS_BUS_WIDTH_1 0x00 +#define MS_BUS_WIDTH_4 0x10 +#define MS_BUS_WIDTH_8 0x18 +#define MS_2K_SECTOR_MODE 0x04 +#define MS_512_SECTOR_MODE 0x00 +#define MS_TOGGLE_TIMEOUT_EN 0x00 +#define MS_TOGGLE_TIMEOUT_DISEN 0x01 +#define MS_NO_CHECK_INT 0x02 + +/* MS_TRANS_CFG */ +#define WAIT_INT 0x80 +#define NO_WAIT_INT 0x00 +#define NO_AUTO_READ_INT_REG 0x00 +#define AUTO_READ_INT_REG 0x40 +#define MS_CRC16_ERR 0x20 +#define MS_RDY_TIMEOUT 0x10 +#define MS_INT_CMDNK 0x08 +#define MS_INT_BREQ 0x04 +#define MS_INT_ERR 0x02 +#define MS_INT_CED 0x01 + +/* MS_TRANSFER */ +#define MS_TRANSFER_START 0x80 +#define MS_TRANSFER_END 0x40 +#define MS_TRANSFER_ERR 0x20 +#define MS_BS_STATE 0x10 +#define MS_TM_READ_BYTES 0x00 +#define MS_TM_NORMAL_READ 0x01 +#define MS_TM_WRITE_BYTES 0x04 +#define MS_TM_NORMAL_WRITE 0x05 +#define MS_TM_AUTO_READ 0x08 +#define MS_TM_AUTO_WRITE 0x0C +#define MS_TM_SET_CMD 0x06 +#define MS_TM_COPY_PAGE 0x07 +#define MS_TM_MULTI_READ 0x02 +#define MS_TM_MULTI_WRITE 0x03 + +/* MC_FIFO_CTL */ +#define FIFO_FLUSH 0x01 + +/* MC_DMA_RST */ +#define DMA_RESET 0x01 + +/* MC_DMA_CTL */ +#define DMA_TC_EQ_0 0x80 +#define DMA_DIR_TO_CARD 0x00 +#define DMA_DIR_FROM_CARD 0x02 +#define DMA_EN 0x01 +#define DMA_128 (0 << 2) +#define DMA_256 (1 << 2) +#define DMA_512 (2 << 2) +#define DMA_1024 (3 << 2) +#define DMA_PACK_SIZE_MASK 0x0C + +/* CARD_INT_PEND */ +#define XD_INT 0x10 +#define MS_INT 0x08 +#define SD_INT 0x04 + +/* LED operations*/ +static inline int rtsx_usb_turn_on_led(struct rtsx_ucr *ucr) +{ + return rtsx_usb_ep0_write_register(ucr, CARD_GPIO, 0x03, 0x02); +} + +static inline int rtsx_usb_turn_off_led(struct rtsx_ucr *ucr) +{ + return rtsx_usb_ep0_write_register(ucr, CARD_GPIO, 0x03, 0x03); +} + +/* HW error clearing */ +static inline void rtsx_usb_clear_fsm_err(struct rtsx_ucr *ucr) +{ + rtsx_usb_ep0_write_register(ucr, SFSM_ED, 0xf8, 0xf8); +} + +static inline void rtsx_usb_clear_dma_err(struct rtsx_ucr *ucr) +{ + rtsx_usb_ep0_write_register(ucr, MC_FIFO_CTL, + FIFO_FLUSH, FIFO_FLUSH); + rtsx_usb_ep0_write_register(ucr, MC_DMA_RST, DMA_RESET, DMA_RESET); +} +#endif /* __RTS51139_H */ diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h new file mode 100644 index 000000000..3dcd617e6 --- /dev/null +++ b/include/linux/rwlock.h @@ -0,0 +1,131 @@ +#ifndef __LINUX_RWLOCK_H +#define __LINUX_RWLOCK_H + +#ifndef __LINUX_SPINLOCK_H +# error "please don't include this file directly" +#endif + +/* + * rwlock related methods + * + * split out from spinlock.h + * + * portions Copyright 2005, Red Hat, Inc., Ingo Molnar + * Released under the General Public License (GPL). + */ + +#ifdef CONFIG_DEBUG_SPINLOCK + extern void __rwlock_init(rwlock_t *lock, const char *name, + struct lock_class_key *key); +# define rwlock_init(lock) \ +do { \ + static struct lock_class_key __key; \ + \ + __rwlock_init((lock), #lock, &__key); \ +} while (0) +#else +# define rwlock_init(lock) \ + do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0) +#endif + +#ifdef CONFIG_DEBUG_SPINLOCK + extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock); +#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock) + extern int do_raw_read_trylock(rwlock_t *lock); + extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock); + extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock); +#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock) + extern int do_raw_write_trylock(rwlock_t *lock); + extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock); +#else + +#ifndef arch_read_lock_flags +# define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#endif + +#ifndef arch_write_lock_flags +# define arch_write_lock_flags(lock, flags) arch_write_lock(lock) +#endif + +# define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0) +# define do_raw_read_lock_flags(lock, flags) \ + do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0) +# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock) +# define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0) +# define do_raw_write_lock(rwlock) do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0) +# define do_raw_write_lock_flags(lock, flags) \ + do {__acquire(lock); arch_write_lock_flags(&(lock)->raw_lock, *(flags)); } while (0) +# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock) +# define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0) +#endif + +/* + * Define the various rw_lock methods. Note we define these + * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various + * methods are defined as nops in the case they are not required. + */ +#define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock)) +#define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock)) + +#define write_lock(lock) _raw_write_lock(lock) +#define read_lock(lock) _raw_read_lock(lock) + +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + +#define read_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _raw_read_lock_irqsave(lock); \ + } while (0) +#define write_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _raw_write_lock_irqsave(lock); \ + } while (0) + +#else + +#define read_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _raw_read_lock_irqsave(lock, flags); \ + } while (0) +#define write_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _raw_write_lock_irqsave(lock, flags); \ + } while (0) + +#endif + +#define read_lock_irq(lock) _raw_read_lock_irq(lock) +#define read_lock_bh(lock) _raw_read_lock_bh(lock) +#define write_lock_irq(lock) _raw_write_lock_irq(lock) +#define write_lock_bh(lock) _raw_write_lock_bh(lock) +#define read_unlock(lock) _raw_read_unlock(lock) +#define write_unlock(lock) _raw_write_unlock(lock) +#define read_unlock_irq(lock) _raw_read_unlock_irq(lock) +#define write_unlock_irq(lock) _raw_write_unlock_irq(lock) + +#define read_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _raw_read_unlock_irqrestore(lock, flags); \ + } while (0) +#define read_unlock_bh(lock) _raw_read_unlock_bh(lock) + +#define write_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _raw_write_unlock_irqrestore(lock, flags); \ + } while (0) +#define write_unlock_bh(lock) _raw_write_unlock_bh(lock) + +#define write_trylock_irqsave(lock, flags) \ +({ \ + local_irq_save(flags); \ + write_trylock(lock) ? \ + 1 : ({ local_irq_restore(flags); 0; }); \ +}) + +#endif /* __LINUX_RWLOCK_H */ diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h new file mode 100644 index 000000000..86ebb4bf9 --- /dev/null +++ b/include/linux/rwlock_api_smp.h @@ -0,0 +1,278 @@ +#ifndef __LINUX_RWLOCK_API_SMP_H +#define __LINUX_RWLOCK_API_SMP_H + +#ifndef __LINUX_SPINLOCK_API_SMP_H +# error "please don't include this file directly" +#endif + +/* + * include/linux/rwlock_api_smp.h + * + * spinlock API declarations on SMP (and debug) + * (implemented in kernel/spinlock.c) + * + * portions Copyright 2005, Red Hat, Inc., Ingo Molnar + * Released under the General Public License (GPL). + */ + +void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock); +void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock); +void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock); +void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock); +void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock); +void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock); +unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) + __acquires(lock); +unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) + __acquires(lock); +int __lockfunc _raw_read_trylock(rwlock_t *lock); +int __lockfunc _raw_write_trylock(rwlock_t *lock); +void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases(lock); +void __lockfunc _raw_write_unlock(rwlock_t *lock) __releases(lock); +void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases(lock); +void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) __releases(lock); +void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases(lock); +void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) __releases(lock); +void __lockfunc +_raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) + __releases(lock); +void __lockfunc +_raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) + __releases(lock); + +#ifdef CONFIG_INLINE_READ_LOCK +#define _raw_read_lock(lock) __raw_read_lock(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_LOCK +#define _raw_write_lock(lock) __raw_write_lock(lock) +#endif + +#ifdef CONFIG_INLINE_READ_LOCK_BH +#define _raw_read_lock_bh(lock) __raw_read_lock_bh(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_LOCK_BH +#define _raw_write_lock_bh(lock) __raw_write_lock_bh(lock) +#endif + +#ifdef CONFIG_INLINE_READ_LOCK_IRQ +#define _raw_read_lock_irq(lock) __raw_read_lock_irq(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ +#define _raw_write_lock_irq(lock) __raw_write_lock_irq(lock) +#endif + +#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE +#define _raw_read_lock_irqsave(lock) __raw_read_lock_irqsave(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE +#define _raw_write_lock_irqsave(lock) __raw_write_lock_irqsave(lock) +#endif + +#ifdef CONFIG_INLINE_READ_TRYLOCK +#define _raw_read_trylock(lock) __raw_read_trylock(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_TRYLOCK +#define _raw_write_trylock(lock) __raw_write_trylock(lock) +#endif + +#ifdef CONFIG_INLINE_READ_UNLOCK +#define _raw_read_unlock(lock) __raw_read_unlock(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_UNLOCK +#define _raw_write_unlock(lock) __raw_write_unlock(lock) +#endif + +#ifdef CONFIG_INLINE_READ_UNLOCK_BH +#define _raw_read_unlock_bh(lock) __raw_read_unlock_bh(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH +#define _raw_write_unlock_bh(lock) __raw_write_unlock_bh(lock) +#endif + +#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ +#define _raw_read_unlock_irq(lock) __raw_read_unlock_irq(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ +#define _raw_write_unlock_irq(lock) __raw_write_unlock_irq(lock) +#endif + +#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE +#define _raw_read_unlock_irqrestore(lock, flags) \ + __raw_read_unlock_irqrestore(lock, flags) +#endif + +#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE +#define _raw_write_unlock_irqrestore(lock, flags) \ + __raw_write_unlock_irqrestore(lock, flags) +#endif + +static inline int __raw_read_trylock(rwlock_t *lock) +{ + preempt_disable(); + if (do_raw_read_trylock(lock)) { + rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); + return 1; + } + preempt_enable(); + return 0; +} + +static inline int __raw_write_trylock(rwlock_t *lock) +{ + preempt_disable(); + if (do_raw_write_trylock(lock)) { + rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); + return 1; + } + preempt_enable(); + return 0; +} + +/* + * If lockdep is enabled then we use the non-preemption spin-ops + * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are + * not re-enabled during lock-acquire (which the preempt-spin-ops do): + */ +#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) + +static inline void __raw_read_lock(rwlock_t *lock) +{ + preempt_disable(); + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); +} + +static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock) +{ + unsigned long flags; + + local_irq_save(flags); + preempt_disable(); + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock, + do_raw_read_lock_flags, &flags); + return flags; +} + +static inline void __raw_read_lock_irq(rwlock_t *lock) +{ + local_irq_disable(); + preempt_disable(); + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); +} + +static inline void __raw_read_lock_bh(rwlock_t *lock) +{ + __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); +} + +static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock) +{ + unsigned long flags; + + local_irq_save(flags); + preempt_disable(); + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock, + do_raw_write_lock_flags, &flags); + return flags; +} + +static inline void __raw_write_lock_irq(rwlock_t *lock) +{ + local_irq_disable(); + preempt_disable(); + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); +} + +static inline void __raw_write_lock_bh(rwlock_t *lock) +{ + __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); +} + +static inline void __raw_write_lock(rwlock_t *lock) +{ + preempt_disable(); + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); +} + +#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */ + +static inline void __raw_write_unlock(rwlock_t *lock) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + do_raw_write_unlock(lock); + preempt_enable(); +} + +static inline void __raw_read_unlock(rwlock_t *lock) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + do_raw_read_unlock(lock); + preempt_enable(); +} + +static inline void +__raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + do_raw_read_unlock(lock); + local_irq_restore(flags); + preempt_enable(); +} + +static inline void __raw_read_unlock_irq(rwlock_t *lock) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + do_raw_read_unlock(lock); + local_irq_enable(); + preempt_enable(); +} + +static inline void __raw_read_unlock_bh(rwlock_t *lock) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + do_raw_read_unlock(lock); + __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); +} + +static inline void __raw_write_unlock_irqrestore(rwlock_t *lock, + unsigned long flags) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + do_raw_write_unlock(lock); + local_irq_restore(flags); + preempt_enable(); +} + +static inline void __raw_write_unlock_irq(rwlock_t *lock) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + do_raw_write_unlock(lock); + local_irq_enable(); + preempt_enable(); +} + +static inline void __raw_write_unlock_bh(rwlock_t *lock) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + do_raw_write_unlock(lock); + __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); +} + +#endif /* __LINUX_RWLOCK_API_SMP_H */ diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h new file mode 100644 index 000000000..857a72ceb --- /dev/null +++ b/include/linux/rwlock_types.h @@ -0,0 +1,45 @@ +#ifndef __LINUX_RWLOCK_TYPES_H +#define __LINUX_RWLOCK_TYPES_H + +/* + * include/linux/rwlock_types.h - generic rwlock type definitions + * and initializers + * + * portions Copyright 2005, Red Hat, Inc., Ingo Molnar + * Released under the General Public License (GPL). + */ +typedef struct { + arch_rwlock_t raw_lock; +#ifdef CONFIG_DEBUG_SPINLOCK + unsigned int magic, owner_cpu; + void *owner; +#endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} rwlock_t; + +#define RWLOCK_MAGIC 0xdeaf1eed + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +#else +# define RW_DEP_MAP_INIT(lockname) +#endif + +#ifdef CONFIG_DEBUG_SPINLOCK +#define __RW_LOCK_UNLOCKED(lockname) \ + (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \ + .magic = RWLOCK_MAGIC, \ + .owner = SPINLOCK_OWNER_INIT, \ + .owner_cpu = -1, \ + RW_DEP_MAP_INIT(lockname) } +#else +#define __RW_LOCK_UNLOCKED(lockname) \ + (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \ + RW_DEP_MAP_INIT(lockname) } +#endif + +#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) + +#endif /* __LINUX_RWLOCK_TYPES_H */ diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h new file mode 100644 index 000000000..e47568363 --- /dev/null +++ b/include/linux/rwsem-spinlock.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* rwsem-spinlock.h: fallback C implementation + * + * Copyright (c) 2001 David Howells (dhowells@redhat.com). + * - Derived partially from ideas by Andrea Arcangeli + * - Derived also from comments by Linus + */ + +#ifndef _LINUX_RWSEM_SPINLOCK_H +#define _LINUX_RWSEM_SPINLOCK_H + +#ifndef _LINUX_RWSEM_H +#error "please don't include linux/rwsem-spinlock.h directly, use linux/rwsem.h instead" +#endif + +#ifdef __KERNEL__ +/* + * the rw-semaphore definition + * - if count is 0 then there are no active readers or writers + * - if count is +ve then that is the number of active readers + * - if count is -1 then there is one active writer + * - if wait_list is not empty, then there are processes waiting for the semaphore + */ +struct rw_semaphore { + __s32 count; + raw_spinlock_t wait_lock; + struct list_head wait_list; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +#define RWSEM_UNLOCKED_VALUE 0x00000000 + +extern void __down_read(struct rw_semaphore *sem); +extern int __must_check __down_read_killable(struct rw_semaphore *sem); +extern int __down_read_trylock(struct rw_semaphore *sem); +extern void __down_write(struct rw_semaphore *sem); +extern int __must_check __down_write_killable(struct rw_semaphore *sem); +extern int __down_write_trylock(struct rw_semaphore *sem); +extern void __up_read(struct rw_semaphore *sem); +extern void __up_write(struct rw_semaphore *sem); +extern void __downgrade_write(struct rw_semaphore *sem); +extern int rwsem_is_locked(struct rw_semaphore *sem); + +#endif /* __KERNEL__ */ +#endif /* _LINUX_RWSEM_SPINLOCK_H */ diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h new file mode 100644 index 000000000..ab93b6eae --- /dev/null +++ b/include/linux/rwsem.h @@ -0,0 +1,196 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* rwsem.h: R/W semaphores, public interface + * + * Written by David Howells (dhowells@redhat.com). + * Derived from asm-i386/semaphore.h + */ + +#ifndef _LINUX_RWSEM_H +#define _LINUX_RWSEM_H + +#include + +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_RWSEM_SPIN_ON_OWNER +#include +#endif + +struct rw_semaphore; + +#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK +#include /* use a generic implementation */ +#define __RWSEM_INIT_COUNT(name) .count = RWSEM_UNLOCKED_VALUE +#else +/* All arch specific implementations share the same struct */ +struct rw_semaphore { + atomic_long_t count; + struct list_head wait_list; + raw_spinlock_t wait_lock; +#ifdef CONFIG_RWSEM_SPIN_ON_OWNER + struct optimistic_spin_queue osq; /* spinner MCS lock */ + /* + * Write owner. Used as a speculative check to see + * if the owner is running on the cpu. + */ + struct task_struct *owner; +#endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +/* + * Setting bit 0 of the owner field with other non-zero bits will indicate + * that the rwsem is writer-owned with an unknown owner. + */ +#define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-1L) + +extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_down_write_failed_killable(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); +extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); + +/* Include the arch specific part */ +#include + +/* In all implementations count != 0 means locked */ +static inline int rwsem_is_locked(struct rw_semaphore *sem) +{ + return atomic_long_read(&sem->count) != 0; +} + +#define __RWSEM_INIT_COUNT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE) +#endif + +/* Common initializer macros and functions */ + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } +#else +# define __RWSEM_DEP_MAP_INIT(lockname) +#endif + +#ifdef CONFIG_RWSEM_SPIN_ON_OWNER +#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED, .owner = NULL +#else +#define __RWSEM_OPT_INIT(lockname) +#endif + +#define __RWSEM_INITIALIZER(name) \ + { __RWSEM_INIT_COUNT(name), \ + .wait_list = LIST_HEAD_INIT((name).wait_list), \ + .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \ + __RWSEM_OPT_INIT(name) \ + __RWSEM_DEP_MAP_INIT(name) } + +#define DECLARE_RWSEM(name) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name) + +extern void __init_rwsem(struct rw_semaphore *sem, const char *name, + struct lock_class_key *key); + +#define init_rwsem(sem) \ +do { \ + static struct lock_class_key __key; \ + \ + __init_rwsem((sem), #sem, &__key); \ +} while (0) + +/* + * This is the same regardless of which rwsem implementation that is being used. + * It is just a heuristic meant to be called by somebody alreadying holding the + * rwsem to see if somebody from an incompatible type is wanting access to the + * lock. + */ +static inline int rwsem_is_contended(struct rw_semaphore *sem) +{ + return !list_empty(&sem->wait_list); +} + +/* + * lock for reading + */ +extern void down_read(struct rw_semaphore *sem); +extern int __must_check down_read_killable(struct rw_semaphore *sem); + +/* + * trylock for reading -- returns 1 if successful, 0 if contention + */ +extern int down_read_trylock(struct rw_semaphore *sem); + +/* + * lock for writing + */ +extern void down_write(struct rw_semaphore *sem); +extern int __must_check down_write_killable(struct rw_semaphore *sem); + +/* + * trylock for writing -- returns 1 if successful, 0 if contention + */ +extern int down_write_trylock(struct rw_semaphore *sem); + +/* + * release a read lock + */ +extern void up_read(struct rw_semaphore *sem); + +/* + * release a write lock + */ +extern void up_write(struct rw_semaphore *sem); + +/* + * downgrade write lock to read lock + */ +extern void downgrade_write(struct rw_semaphore *sem); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +/* + * nested locking. NOTE: rwsems are not allowed to recurse + * (which occurs if the same task tries to acquire the same + * lock instance multiple times), but multiple locks of the + * same lock class might be taken, if the order of the locks + * is always the same. This ordering rule can be expressed + * to lockdep via the _nested() APIs, but enumerating the + * subclasses that are used. (If the nesting relationship is + * static then another method for expressing nested locking is + * the explicit definition of lock class keys and the use of + * lockdep_set_class() at lock initialization time. + * See Documentation/locking/lockdep-design.txt for more details.) + */ +extern void down_read_nested(struct rw_semaphore *sem, int subclass); +extern void down_write_nested(struct rw_semaphore *sem, int subclass); +extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass); +extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock); + +# define down_write_nest_lock(sem, nest_lock) \ +do { \ + typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ + _down_write_nest_lock(sem, &(nest_lock)->dep_map); \ +} while (0); + +/* + * Take/release a lock when not the owner will release it. + * + * [ This API should be avoided as much as possible - the + * proper abstraction for this case is completions. ] + */ +extern void down_read_non_owner(struct rw_semaphore *sem); +extern void up_read_non_owner(struct rw_semaphore *sem); +#else +# define down_read_nested(sem, subclass) down_read(sem) +# define down_write_nest_lock(sem, nest_lock) down_write(sem) +# define down_write_nested(sem, subclass) down_write(sem) +# define down_write_killable_nested(sem, subclass) down_write_killable(sem) +# define down_read_non_owner(sem) down_read(sem) +# define up_read_non_owner(sem) up_read(sem) +#endif + +#endif /* _LINUX_RWSEM_H */ diff --git a/include/linux/s3c_adc_battery.h b/include/linux/s3c_adc_battery.h new file mode 100644 index 000000000..833871dcf --- /dev/null +++ b/include/linux/s3c_adc_battery.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _S3C_ADC_BATTERY_H +#define _S3C_ADC_BATTERY_H + +struct s3c_adc_bat_thresh { + int volt; /* mV */ + int cur; /* mA */ + int level; /* percent */ +}; + +struct s3c_adc_bat_pdata { + int (*init)(void); + void (*exit)(void); + void (*enable_charger)(void); + void (*disable_charger)(void); + + int gpio_charge_finished; + int gpio_inverted; + + const struct s3c_adc_bat_thresh *lut_noac; + unsigned int lut_noac_cnt; + const struct s3c_adc_bat_thresh *lut_acin; + unsigned int lut_acin_cnt; + + const unsigned int volt_channel; + const unsigned int current_channel; + const unsigned int backup_volt_channel; + + const unsigned int volt_samples; + const unsigned int current_samples; + const unsigned int backup_volt_samples; + + const unsigned int volt_mult; + const unsigned int current_mult; + const unsigned int backup_volt_mult; + const unsigned int internal_impedance; + + const unsigned int backup_volt_max; + const unsigned int backup_volt_min; +}; + +#endif diff --git a/include/linux/sa11x0-dma.h b/include/linux/sa11x0-dma.h new file mode 100644 index 000000000..65839a58b --- /dev/null +++ b/include/linux/sa11x0-dma.h @@ -0,0 +1,24 @@ +/* + * SA11x0 DMA Engine support + * + * Copyright (C) 2012 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __LINUX_SA11X0_DMA_H +#define __LINUX_SA11X0_DMA_H + +struct dma_chan; + +#if defined(CONFIG_DMA_SA11X0) || defined(CONFIG_DMA_SA11X0_MODULE) +bool sa11x0_dma_filter_fn(struct dma_chan *, void *); +#else +static inline bool sa11x0_dma_filter_fn(struct dma_chan *c, void *d) +{ + return false; +} +#endif + +#endif diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h new file mode 100644 index 000000000..804a50983 --- /dev/null +++ b/include/linux/sbitmap.h @@ -0,0 +1,534 @@ +/* + * Fast and scalable bitmaps. + * + * Copyright (C) 2016 Facebook + * Copyright (C) 2013-2014 Jens Axboe + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __LINUX_SCALE_BITMAP_H +#define __LINUX_SCALE_BITMAP_H + +#include +#include + +struct seq_file; + +/** + * struct sbitmap_word - Word in a &struct sbitmap. + */ +struct sbitmap_word { + /** + * @word: The bitmap word itself. + */ + unsigned long word; + + /** + * @depth: Number of bits being used in @word. + */ + unsigned long depth; +} ____cacheline_aligned_in_smp; + +/** + * struct sbitmap - Scalable bitmap. + * + * A &struct sbitmap is spread over multiple cachelines to avoid ping-pong. This + * trades off higher memory usage for better scalability. + */ +struct sbitmap { + /** + * @depth: Number of bits used in the whole bitmap. + */ + unsigned int depth; + + /** + * @shift: log2(number of bits used per word) + */ + unsigned int shift; + + /** + * @map_nr: Number of words (cachelines) being used for the bitmap. + */ + unsigned int map_nr; + + /** + * @map: Allocated bitmap. + */ + struct sbitmap_word *map; +}; + +#define SBQ_WAIT_QUEUES 8 +#define SBQ_WAKE_BATCH 8 + +/** + * struct sbq_wait_state - Wait queue in a &struct sbitmap_queue. + */ +struct sbq_wait_state { + /** + * @wait_cnt: Number of frees remaining before we wake up. + */ + atomic_t wait_cnt; + + /** + * @wait: Wait queue. + */ + wait_queue_head_t wait; +} ____cacheline_aligned_in_smp; + +/** + * struct sbitmap_queue - Scalable bitmap with the added ability to wait on free + * bits. + * + * A &struct sbitmap_queue uses multiple wait queues and rolling wakeups to + * avoid contention on the wait queue spinlock. This ensures that we don't hit a + * scalability wall when we run out of free bits and have to start putting tasks + * to sleep. + */ +struct sbitmap_queue { + /** + * @sb: Scalable bitmap. + */ + struct sbitmap sb; + + /* + * @alloc_hint: Cache of last successfully allocated or freed bit. + * + * This is per-cpu, which allows multiple users to stick to different + * cachelines until the map is exhausted. + */ + unsigned int __percpu *alloc_hint; + + /** + * @wake_batch: Number of bits which must be freed before we wake up any + * waiters. + */ + unsigned int wake_batch; + + /** + * @wake_index: Next wait queue in @ws to wake up. + */ + atomic_t wake_index; + + /** + * @ws: Wait queues. + */ + struct sbq_wait_state *ws; + + /** + * @round_robin: Allocate bits in strict round-robin order. + */ + bool round_robin; + + /** + * @min_shallow_depth: The minimum shallow depth which may be passed to + * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow(). + */ + unsigned int min_shallow_depth; +}; + +/** + * sbitmap_init_node() - Initialize a &struct sbitmap on a specific memory node. + * @sb: Bitmap to initialize. + * @depth: Number of bits to allocate. + * @shift: Use 2^@shift bits per word in the bitmap; if a negative number if + * given, a good default is chosen. + * @flags: Allocation flags. + * @node: Memory node to allocate on. + * + * Return: Zero on success or negative errno on failure. + */ +int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, + gfp_t flags, int node); + +/** + * sbitmap_free() - Free memory used by a &struct sbitmap. + * @sb: Bitmap to free. + */ +static inline void sbitmap_free(struct sbitmap *sb) +{ + kfree(sb->map); + sb->map = NULL; +} + +/** + * sbitmap_resize() - Resize a &struct sbitmap. + * @sb: Bitmap to resize. + * @depth: New number of bits to resize to. + * + * Doesn't reallocate anything. It's up to the caller to ensure that the new + * depth doesn't exceed the depth that the sb was initialized with. + */ +void sbitmap_resize(struct sbitmap *sb, unsigned int depth); + +/** + * sbitmap_get() - Try to allocate a free bit from a &struct sbitmap. + * @sb: Bitmap to allocate from. + * @alloc_hint: Hint for where to start searching for a free bit. + * @round_robin: If true, be stricter about allocation order; always allocate + * starting from the last allocated bit. This is less efficient + * than the default behavior (false). + * + * This operation provides acquire barrier semantics if it succeeds. + * + * Return: Non-negative allocated bit number if successful, -1 otherwise. + */ +int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin); + +/** + * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap, + * limiting the depth used from each word. + * @sb: Bitmap to allocate from. + * @alloc_hint: Hint for where to start searching for a free bit. + * @shallow_depth: The maximum number of bits to allocate from a single word. + * + * This rather specific operation allows for having multiple users with + * different allocation limits. E.g., there can be a high-priority class that + * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow() + * with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority + * class can only allocate half of the total bits in the bitmap, preventing it + * from starving out the high-priority class. + * + * Return: Non-negative allocated bit number if successful, -1 otherwise. + */ +int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint, + unsigned long shallow_depth); + +/** + * sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap. + * @sb: Bitmap to check. + * + * Return: true if any bit in the bitmap is set, false otherwise. + */ +bool sbitmap_any_bit_set(const struct sbitmap *sb); + +/** + * sbitmap_any_bit_clear() - Check for an unset bit in a &struct + * sbitmap. + * @sb: Bitmap to check. + * + * Return: true if any bit in the bitmap is clear, false otherwise. + */ +bool sbitmap_any_bit_clear(const struct sbitmap *sb); + +#define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift) +#define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U)) + +typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *); + +/** + * __sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap. + * @start: Where to start the iteration. + * @sb: Bitmap to iterate over. + * @fn: Callback. Should return true to continue or false to break early. + * @data: Pointer to pass to callback. + * + * This is inline even though it's non-trivial so that the function calls to the + * callback will hopefully get optimized away. + */ +static inline void __sbitmap_for_each_set(struct sbitmap *sb, + unsigned int start, + sb_for_each_fn fn, void *data) +{ + unsigned int index; + unsigned int nr; + unsigned int scanned = 0; + + if (start >= sb->depth) + start = 0; + index = SB_NR_TO_INDEX(sb, start); + nr = SB_NR_TO_BIT(sb, start); + + while (scanned < sb->depth) { + struct sbitmap_word *word = &sb->map[index]; + unsigned int depth = min_t(unsigned int, word->depth - nr, + sb->depth - scanned); + + scanned += depth; + if (!word->word) + goto next; + + /* + * On the first iteration of the outer loop, we need to add the + * bit offset back to the size of the word for find_next_bit(). + * On all other iterations, nr is zero, so this is a noop. + */ + depth += nr; + while (1) { + nr = find_next_bit(&word->word, depth, nr); + if (nr >= depth) + break; + if (!fn(sb, (index << sb->shift) + nr, data)) + return; + + nr++; + } +next: + nr = 0; + if (++index >= sb->map_nr) + index = 0; + } +} + +/** + * sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap. + * @sb: Bitmap to iterate over. + * @fn: Callback. Should return true to continue or false to break early. + * @data: Pointer to pass to callback. + */ +static inline void sbitmap_for_each_set(struct sbitmap *sb, sb_for_each_fn fn, + void *data) +{ + __sbitmap_for_each_set(sb, 0, fn, data); +} + +static inline unsigned long *__sbitmap_word(struct sbitmap *sb, + unsigned int bitnr) +{ + return &sb->map[SB_NR_TO_INDEX(sb, bitnr)].word; +} + +/* Helpers equivalent to the operations in asm/bitops.h and linux/bitmap.h */ + +static inline void sbitmap_set_bit(struct sbitmap *sb, unsigned int bitnr) +{ + set_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); +} + +static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr) +{ + clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); +} + +static inline void sbitmap_clear_bit_unlock(struct sbitmap *sb, + unsigned int bitnr) +{ + clear_bit_unlock(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); +} + +static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr) +{ + return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); +} + +unsigned int sbitmap_weight(const struct sbitmap *sb); + +/** + * sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file. + * @sb: Bitmap to show. + * @m: struct seq_file to write to. + * + * This is intended for debugging. The format may change at any time. + */ +void sbitmap_show(struct sbitmap *sb, struct seq_file *m); + +/** + * sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct + * seq_file. + * @sb: Bitmap to show. + * @m: struct seq_file to write to. + * + * This is intended for debugging. The output isn't guaranteed to be internally + * consistent. + */ +void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m); + +/** + * sbitmap_queue_init_node() - Initialize a &struct sbitmap_queue on a specific + * memory node. + * @sbq: Bitmap queue to initialize. + * @depth: See sbitmap_init_node(). + * @shift: See sbitmap_init_node(). + * @round_robin: See sbitmap_get(). + * @flags: Allocation flags. + * @node: Memory node to allocate on. + * + * Return: Zero on success or negative errno on failure. + */ +int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, + int shift, bool round_robin, gfp_t flags, int node); + +/** + * sbitmap_queue_free() - Free memory used by a &struct sbitmap_queue. + * + * @sbq: Bitmap queue to free. + */ +static inline void sbitmap_queue_free(struct sbitmap_queue *sbq) +{ + kfree(sbq->ws); + free_percpu(sbq->alloc_hint); + sbitmap_free(&sbq->sb); +} + +/** + * sbitmap_queue_resize() - Resize a &struct sbitmap_queue. + * @sbq: Bitmap queue to resize. + * @depth: New number of bits to resize to. + * + * Like sbitmap_resize(), this doesn't reallocate anything. It has to do + * some extra work on the &struct sbitmap_queue, so it's not safe to just + * resize the underlying &struct sbitmap. + */ +void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth); + +/** + * __sbitmap_queue_get() - Try to allocate a free bit from a &struct + * sbitmap_queue with preemption already disabled. + * @sbq: Bitmap queue to allocate from. + * + * Return: Non-negative allocated bit number if successful, -1 otherwise. + */ +int __sbitmap_queue_get(struct sbitmap_queue *sbq); + +/** + * __sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct + * sbitmap_queue, limiting the depth used from each word, with preemption + * already disabled. + * @sbq: Bitmap queue to allocate from. + * @shallow_depth: The maximum number of bits to allocate from a single word. + * See sbitmap_get_shallow(). + * + * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after + * initializing @sbq. + * + * Return: Non-negative allocated bit number if successful, -1 otherwise. + */ +int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, + unsigned int shallow_depth); + +/** + * sbitmap_queue_get() - Try to allocate a free bit from a &struct + * sbitmap_queue. + * @sbq: Bitmap queue to allocate from. + * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to + * sbitmap_queue_clear()). + * + * Return: Non-negative allocated bit number if successful, -1 otherwise. + */ +static inline int sbitmap_queue_get(struct sbitmap_queue *sbq, + unsigned int *cpu) +{ + int nr; + + *cpu = get_cpu(); + nr = __sbitmap_queue_get(sbq); + put_cpu(); + return nr; +} + +/** + * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct + * sbitmap_queue, limiting the depth used from each word. + * @sbq: Bitmap queue to allocate from. + * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to + * sbitmap_queue_clear()). + * @shallow_depth: The maximum number of bits to allocate from a single word. + * See sbitmap_get_shallow(). + * + * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after + * initializing @sbq. + * + * Return: Non-negative allocated bit number if successful, -1 otherwise. + */ +static inline int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, + unsigned int *cpu, + unsigned int shallow_depth) +{ + int nr; + + *cpu = get_cpu(); + nr = __sbitmap_queue_get_shallow(sbq, shallow_depth); + put_cpu(); + return nr; +} + +/** + * sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the + * minimum shallow depth that will be used. + * @sbq: Bitmap queue in question. + * @min_shallow_depth: The minimum shallow depth that will be passed to + * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow(). + * + * sbitmap_queue_clear() batches wakeups as an optimization. The batch size + * depends on the depth of the bitmap. Since the shallow allocation functions + * effectively operate with a different depth, the shallow depth must be taken + * into account when calculating the batch size. This function must be called + * with the minimum shallow depth that will be used. Failure to do so can result + * in missed wakeups. + */ +void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, + unsigned int min_shallow_depth); + +/** + * sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a + * &struct sbitmap_queue. + * @sbq: Bitmap to free from. + * @nr: Bit number to free. + * @cpu: CPU the bit was allocated on. + */ +void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, + unsigned int cpu); + +static inline int sbq_index_inc(int index) +{ + return (index + 1) & (SBQ_WAIT_QUEUES - 1); +} + +static inline void sbq_index_atomic_inc(atomic_t *index) +{ + int old = atomic_read(index); + int new = sbq_index_inc(old); + atomic_cmpxchg(index, old, new); +} + +/** + * sbq_wait_ptr() - Get the next wait queue to use for a &struct + * sbitmap_queue. + * @sbq: Bitmap queue to wait on. + * @wait_index: A counter per "user" of @sbq. + */ +static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq, + atomic_t *wait_index) +{ + struct sbq_wait_state *ws; + + ws = &sbq->ws[atomic_read(wait_index)]; + sbq_index_atomic_inc(wait_index); + return ws; +} + +/** + * sbitmap_queue_wake_all() - Wake up everything waiting on a &struct + * sbitmap_queue. + * @sbq: Bitmap queue to wake up. + */ +void sbitmap_queue_wake_all(struct sbitmap_queue *sbq); + +/** + * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue + * on a &struct sbitmap_queue. + * @sbq: Bitmap queue to wake up. + */ +void sbitmap_queue_wake_up(struct sbitmap_queue *sbq); + +/** + * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct + * seq_file. + * @sbq: Bitmap queue to show. + * @m: struct seq_file to write to. + * + * This is intended for debugging. The format may change at any time. + */ +void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m); + +#endif /* __LINUX_SCALE_BITMAP_H */ diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h new file mode 100644 index 000000000..093aa5712 --- /dev/null +++ b/include/linux/scatterlist.h @@ -0,0 +1,433 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCATTERLIST_H +#define _LINUX_SCATTERLIST_H + +#include +#include +#include +#include +#include + +struct scatterlist { + unsigned long page_link; + unsigned int offset; + unsigned int length; + dma_addr_t dma_address; +#ifdef CONFIG_NEED_SG_DMA_LENGTH + unsigned int dma_length; +#endif +}; + +/* + * Since the above length field is an unsigned int, below we define the maximum + * length in bytes that can be stored in one scatterlist entry. + */ +#define SCATTERLIST_MAX_SEGMENT (UINT_MAX & PAGE_MASK) + +/* + * These macros should be used after a dma_map_sg call has been done + * to get bus addresses of each of the SG entries and their lengths. + * You should only work with the number of sg entries dma_map_sg + * returns, or alternatively stop on the first sg_dma_len(sg) which + * is 0. + */ +#define sg_dma_address(sg) ((sg)->dma_address) + +#ifdef CONFIG_NEED_SG_DMA_LENGTH +#define sg_dma_len(sg) ((sg)->dma_length) +#else +#define sg_dma_len(sg) ((sg)->length) +#endif + +struct sg_table { + struct scatterlist *sgl; /* the list */ + unsigned int nents; /* number of mapped entries */ + unsigned int orig_nents; /* original size of list */ +}; + +/* + * Notes on SG table design. + * + * We use the unsigned long page_link field in the scatterlist struct to place + * the page pointer AND encode information about the sg table as well. The two + * lower bits are reserved for this information. + * + * If bit 0 is set, then the page_link contains a pointer to the next sg + * table list. Otherwise the next entry is at sg + 1. + * + * If bit 1 is set, then this sg entry is the last element in a list. + * + * See sg_next(). + * + */ + +#define SG_CHAIN 0x01UL +#define SG_END 0x02UL + +/* + * We overload the LSB of the page pointer to indicate whether it's + * a valid sg entry, or whether it points to the start of a new scatterlist. + * Those low bits are there for everyone! (thanks mason :-) + */ +#define sg_is_chain(sg) ((sg)->page_link & SG_CHAIN) +#define sg_is_last(sg) ((sg)->page_link & SG_END) +#define sg_chain_ptr(sg) \ + ((struct scatterlist *) ((sg)->page_link & ~(SG_CHAIN | SG_END))) + +/** + * sg_assign_page - Assign a given page to an SG entry + * @sg: SG entry + * @page: The page + * + * Description: + * Assign page to sg entry. Also see sg_set_page(), the most commonly used + * variant. + * + **/ +static inline void sg_assign_page(struct scatterlist *sg, struct page *page) +{ + unsigned long page_link = sg->page_link & (SG_CHAIN | SG_END); + + /* + * In order for the low bit stealing approach to work, pages + * must be aligned at a 32-bit boundary as a minimum. + */ + BUG_ON((unsigned long) page & (SG_CHAIN | SG_END)); +#ifdef CONFIG_DEBUG_SG + BUG_ON(sg_is_chain(sg)); +#endif + sg->page_link = page_link | (unsigned long) page; +} + +/** + * sg_set_page - Set sg entry to point at given page + * @sg: SG entry + * @page: The page + * @len: Length of data + * @offset: Offset into page + * + * Description: + * Use this function to set an sg entry pointing at a page, never assign + * the page directly. We encode sg table information in the lower bits + * of the page pointer. See sg_page() for looking up the page belonging + * to an sg entry. + * + **/ +static inline void sg_set_page(struct scatterlist *sg, struct page *page, + unsigned int len, unsigned int offset) +{ + sg_assign_page(sg, page); + sg->offset = offset; + sg->length = len; +} + +static inline struct page *sg_page(struct scatterlist *sg) +{ +#ifdef CONFIG_DEBUG_SG + BUG_ON(sg_is_chain(sg)); +#endif + return (struct page *)((sg)->page_link & ~(SG_CHAIN | SG_END)); +} + +/** + * sg_set_buf - Set sg entry to point at given data + * @sg: SG entry + * @buf: Data + * @buflen: Data length + * + **/ +static inline void sg_set_buf(struct scatterlist *sg, const void *buf, + unsigned int buflen) +{ +#ifdef CONFIG_DEBUG_SG + BUG_ON(!virt_addr_valid(buf)); +#endif + sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); +} + +/* + * Loop over each sg element, following the pointer to a new list if necessary + */ +#define for_each_sg(sglist, sg, nr, __i) \ + for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg)) + +/** + * sg_chain - Chain two sglists together + * @prv: First scatterlist + * @prv_nents: Number of entries in prv + * @sgl: Second scatterlist + * + * Description: + * Links @prv@ and @sgl@ together, to form a longer scatterlist. + * + **/ +static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, + struct scatterlist *sgl) +{ + /* + * offset and length are unused for chain entry. Clear them. + */ + prv[prv_nents - 1].offset = 0; + prv[prv_nents - 1].length = 0; + + /* + * Set lowest bit to indicate a link pointer, and make sure to clear + * the termination bit if it happens to be set. + */ + prv[prv_nents - 1].page_link = ((unsigned long) sgl | SG_CHAIN) + & ~SG_END; +} + +/** + * sg_mark_end - Mark the end of the scatterlist + * @sg: SG entryScatterlist + * + * Description: + * Marks the passed in sg entry as the termination point for the sg + * table. A call to sg_next() on this entry will return NULL. + * + **/ +static inline void sg_mark_end(struct scatterlist *sg) +{ + /* + * Set termination bit, clear potential chain bit + */ + sg->page_link |= SG_END; + sg->page_link &= ~SG_CHAIN; +} + +/** + * sg_unmark_end - Undo setting the end of the scatterlist + * @sg: SG entryScatterlist + * + * Description: + * Removes the termination marker from the given entry of the scatterlist. + * + **/ +static inline void sg_unmark_end(struct scatterlist *sg) +{ + sg->page_link &= ~SG_END; +} + +/** + * sg_phys - Return physical address of an sg entry + * @sg: SG entry + * + * Description: + * This calls page_to_phys() on the page in this sg entry, and adds the + * sg offset. The caller must know that it is legal to call page_to_phys() + * on the sg page. + * + **/ +static inline dma_addr_t sg_phys(struct scatterlist *sg) +{ + return page_to_phys(sg_page(sg)) + sg->offset; +} + +/** + * sg_virt - Return virtual address of an sg entry + * @sg: SG entry + * + * Description: + * This calls page_address() on the page in this sg entry, and adds the + * sg offset. The caller must know that the sg page has a valid virtual + * mapping. + * + **/ +static inline void *sg_virt(struct scatterlist *sg) +{ + return page_address(sg_page(sg)) + sg->offset; +} + +/** + * sg_init_marker - Initialize markers in sg table + * @sgl: The SG table + * @nents: Number of entries in table + * + **/ +static inline void sg_init_marker(struct scatterlist *sgl, + unsigned int nents) +{ + sg_mark_end(&sgl[nents - 1]); +} + +int sg_nents(struct scatterlist *sg); +int sg_nents_for_len(struct scatterlist *sg, u64 len); +struct scatterlist *sg_next(struct scatterlist *); +struct scatterlist *sg_last(struct scatterlist *s, unsigned int); +void sg_init_table(struct scatterlist *, unsigned int); +void sg_init_one(struct scatterlist *, const void *, unsigned int); +int sg_split(struct scatterlist *in, const int in_mapped_nents, + const off_t skip, const int nb_splits, + const size_t *split_sizes, + struct scatterlist **out, int *out_mapped_nents, + gfp_t gfp_mask); + +typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t); +typedef void (sg_free_fn)(struct scatterlist *, unsigned int); + +void __sg_free_table(struct sg_table *, unsigned int, bool, sg_free_fn *); +void sg_free_table(struct sg_table *); +int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, + struct scatterlist *, gfp_t, sg_alloc_fn *); +int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); +int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, + unsigned int n_pages, unsigned int offset, + unsigned long size, unsigned int max_segment, + gfp_t gfp_mask); +int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, + unsigned int n_pages, unsigned int offset, + unsigned long size, gfp_t gfp_mask); + +#ifdef CONFIG_SGL_ALLOC +struct scatterlist *sgl_alloc_order(unsigned long long length, + unsigned int order, bool chainable, + gfp_t gfp, unsigned int *nent_p); +struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp, + unsigned int *nent_p); +void sgl_free_n_order(struct scatterlist *sgl, int nents, int order); +void sgl_free_order(struct scatterlist *sgl, int order); +void sgl_free(struct scatterlist *sgl); +#endif /* CONFIG_SGL_ALLOC */ + +size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, + size_t buflen, off_t skip, bool to_buffer); + +size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, + const void *buf, size_t buflen); +size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, + void *buf, size_t buflen); + +size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, + const void *buf, size_t buflen, off_t skip); +size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, + void *buf, size_t buflen, off_t skip); +size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents, + size_t buflen, off_t skip); + +/* + * Maximum number of entries that will be allocated in one piece, if + * a list larger than this is required then chaining will be utilized. + */ +#define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist)) + +/* + * The maximum number of SG segments that we will put inside a + * scatterlist (unless chaining is used). Should ideally fit inside a + * single page, to avoid a higher order allocation. We could define this + * to SG_MAX_SINGLE_ALLOC to pack correctly at the highest order. The + * minimum value is 32 + */ +#define SG_CHUNK_SIZE 128 + +/* + * Like SG_CHUNK_SIZE, but for archs that have sg chaining. This limit + * is totally arbitrary, a setting of 2048 will get you at least 8mb ios. + */ +#ifdef CONFIG_ARCH_HAS_SG_CHAIN +#define SG_MAX_SEGMENTS 2048 +#else +#define SG_MAX_SEGMENTS SG_CHUNK_SIZE +#endif + +#ifdef CONFIG_SG_POOL +void sg_free_table_chained(struct sg_table *table, bool first_chunk); +int sg_alloc_table_chained(struct sg_table *table, int nents, + struct scatterlist *first_chunk); +#endif + +/* + * sg page iterator + * + * Iterates over sg entries page-by-page. On each successful iteration, + * you can call sg_page_iter_page(@piter) and sg_page_iter_dma_address(@piter) + * to get the current page and its dma address. @piter->sg will point to the + * sg holding this page and @piter->sg_pgoffset to the page's page offset + * within the sg. The iteration will stop either when a maximum number of sg + * entries was reached or a terminating sg (sg_last(sg) == true) was reached. + */ +struct sg_page_iter { + struct scatterlist *sg; /* sg holding the page */ + unsigned int sg_pgoffset; /* page offset within the sg */ + + /* these are internal states, keep away */ + unsigned int __nents; /* remaining sg entries */ + int __pg_advance; /* nr pages to advance at the + * next step */ +}; + +bool __sg_page_iter_next(struct sg_page_iter *piter); +void __sg_page_iter_start(struct sg_page_iter *piter, + struct scatterlist *sglist, unsigned int nents, + unsigned long pgoffset); +/** + * sg_page_iter_page - get the current page held by the page iterator + * @piter: page iterator holding the page + */ +static inline struct page *sg_page_iter_page(struct sg_page_iter *piter) +{ + return nth_page(sg_page(piter->sg), piter->sg_pgoffset); +} + +/** + * sg_page_iter_dma_address - get the dma address of the current page held by + * the page iterator. + * @piter: page iterator holding the page + */ +static inline dma_addr_t sg_page_iter_dma_address(struct sg_page_iter *piter) +{ + return sg_dma_address(piter->sg) + (piter->sg_pgoffset << PAGE_SHIFT); +} + +/** + * for_each_sg_page - iterate over the pages of the given sg list + * @sglist: sglist to iterate over + * @piter: page iterator to hold current page, sg, sg_pgoffset + * @nents: maximum number of sg entries to iterate over + * @pgoffset: starting page offset + */ +#define for_each_sg_page(sglist, piter, nents, pgoffset) \ + for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \ + __sg_page_iter_next(piter);) + +/* + * Mapping sg iterator + * + * Iterates over sg entries mapping page-by-page. On each successful + * iteration, @miter->page points to the mapped page and + * @miter->length bytes of data can be accessed at @miter->addr. As + * long as an interation is enclosed between start and stop, the user + * is free to choose control structure and when to stop. + * + * @miter->consumed is set to @miter->length on each iteration. It + * can be adjusted if the user can't consume all the bytes in one go. + * Also, a stopped iteration can be resumed by calling next on it. + * This is useful when iteration needs to release all resources and + * continue later (e.g. at the next interrupt). + */ + +#define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */ +#define SG_MITER_TO_SG (1 << 1) /* flush back to phys on unmap */ +#define SG_MITER_FROM_SG (1 << 2) /* nop */ + +struct sg_mapping_iter { + /* the following three fields can be accessed directly */ + struct page *page; /* currently mapped page */ + void *addr; /* pointer to the mapped area */ + size_t length; /* length of the mapped area */ + size_t consumed; /* number of consumed bytes */ + struct sg_page_iter piter; /* page iterator */ + + /* these are internal states, keep away */ + unsigned int __offset; /* offset within page */ + unsigned int __remaining; /* remaining bytes on page */ + unsigned int __flags; +}; + +void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, + unsigned int nents, unsigned int flags); +bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset); +bool sg_miter_next(struct sg_mapping_iter *miter); +void sg_miter_stop(struct sg_mapping_iter *miter); + +#endif /* _LINUX_SCATTERLIST_H */ diff --git a/include/linux/scc.h b/include/linux/scc.h new file mode 100644 index 000000000..745eabd17 --- /dev/null +++ b/include/linux/scc.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* $Id: scc.h,v 1.29 1997/04/02 14:56:45 jreuter Exp jreuter $ */ +#ifndef _SCC_H +#define _SCC_H + +#include + + +enum {TX_OFF, TX_ON}; /* command for scc_key_trx() */ + +/* Vector masks in RR2B */ + +#define VECTOR_MASK 0x06 +#define TXINT 0x00 +#define EXINT 0x02 +#define RXINT 0x04 +#define SPINT 0x06 + +#ifdef CONFIG_SCC_DELAY +#define Inb(port) inb_p(port) +#define Outb(port, val) outb_p(val, port) +#else +#define Inb(port) inb(port) +#define Outb(port, val) outb(val, port) +#endif + +/* SCC channel control structure for KISS */ + +struct scc_kiss { + unsigned char txdelay; /* Transmit Delay 10 ms/cnt */ + unsigned char persist; /* Persistence (0-255) as a % */ + unsigned char slottime; /* Delay to wait on persistence hit */ + unsigned char tailtime; /* Delay after last byte written */ + unsigned char fulldup; /* Full Duplex mode 0=CSMA 1=DUP 2=ALWAYS KEYED */ + unsigned char waittime; /* Waittime before any transmit attempt */ + unsigned int maxkeyup; /* Maximum time to transmit (seconds) */ + unsigned int mintime; /* Minimal offtime after MAXKEYUP timeout (seconds) */ + unsigned int idletime; /* Maximum idle time in ALWAYS KEYED mode (seconds) */ + unsigned int maxdefer; /* Timer for CSMA channel busy limit */ + unsigned char tx_inhibit; /* Transmit is not allowed when set */ + unsigned char group; /* Group ID for AX.25 TX interlocking */ + unsigned char mode; /* 'normal' or 'hwctrl' mode (unused) */ + unsigned char softdcd; /* Use DPLL instead of DCD pin for carrier detect */ +}; + + +/* SCC channel structure */ + +struct scc_channel { + int init; /* channel exists? */ + + struct net_device *dev; /* link to device control structure */ + struct net_device_stats dev_stat;/* device statistics */ + + char brand; /* manufacturer of the board */ + long clock; /* used clock */ + + io_port ctrl; /* I/O address of CONTROL register */ + io_port data; /* I/O address of DATA register */ + io_port special; /* I/O address of special function port */ + int irq; /* Number of Interrupt */ + + char option; + char enhanced; /* Enhanced SCC support */ + + unsigned char wreg[16]; /* Copy of last written value in WRx */ + unsigned char status; /* Copy of R0 at last external interrupt */ + unsigned char dcd; /* DCD status */ + + struct scc_kiss kiss; /* control structure for KISS params */ + struct scc_stat stat; /* statistical information */ + struct scc_modem modem; /* modem information */ + + struct sk_buff_head tx_queue; /* next tx buffer */ + struct sk_buff *rx_buff; /* pointer to frame currently received */ + struct sk_buff *tx_buff; /* pointer to frame currently transmitted */ + + /* Timer */ + struct timer_list tx_t; /* tx timer for this channel */ + struct timer_list tx_wdog; /* tx watchdogs */ + + /* Channel lock */ + spinlock_t lock; /* Channel guard lock */ +}; + +#endif /* defined(_SCC_H) */ diff --git a/include/linux/sched.h b/include/linux/sched.h new file mode 100644 index 000000000..f92d5ae6d --- /dev/null +++ b/include/linux/sched.h @@ -0,0 +1,1911 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_H +#define _LINUX_SCHED_H + +/* + * Define 'struct task_struct' and provide the main scheduler + * APIs (schedule(), wakeup variants, etc.) + */ + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* task_struct member predeclarations (sorted alphabetically): */ +struct audit_context; +struct backing_dev_info; +struct bio_list; +struct blk_plug; +struct cfs_rq; +struct fs_struct; +struct futex_pi_state; +struct io_context; +struct mempolicy; +struct nameidata; +struct nsproxy; +struct perf_event_context; +struct pid_namespace; +struct pipe_inode_info; +struct rcu_node; +struct reclaim_state; +struct robust_list_head; +struct sched_attr; +struct sched_param; +struct seq_file; +struct sighand_struct; +struct signal_struct; +struct task_delay_info; +struct task_group; + +/* + * Task state bitmask. NOTE! These bits are also + * encoded in fs/proc/array.c: get_task_state(). + * + * We have two separate sets of flags: task->state + * is about runnability, while task->exit_state are + * about the task exiting. Confusing, but this way + * modifying one set can't modify the other one by + * mistake. + */ + +/* Used in tsk->state: */ +#define TASK_RUNNING 0x0000 +#define TASK_INTERRUPTIBLE 0x0001 +#define TASK_UNINTERRUPTIBLE 0x0002 +#define __TASK_STOPPED 0x0004 +#define __TASK_TRACED 0x0008 +/* Used in tsk->exit_state: */ +#define EXIT_DEAD 0x0010 +#define EXIT_ZOMBIE 0x0020 +#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) +/* Used in tsk->state again: */ +#define TASK_PARKED 0x0040 +#define TASK_DEAD 0x0080 +#define TASK_WAKEKILL 0x0100 +#define TASK_WAKING 0x0200 +#define TASK_NOLOAD 0x0400 +#define TASK_NEW 0x0800 +#define TASK_STATE_MAX 0x1000 + +/* Convenience macros for the sake of set_current_state: */ +#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) +#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) +#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) + +#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD) + +/* Convenience macros for the sake of wake_up(): */ +#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) + +/* get_task_state(): */ +#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ + TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ + __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \ + TASK_PARKED) + +#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) + +#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) + +#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) + +#define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ + (task->flags & PF_FROZEN) == 0 && \ + (task->state & TASK_NOLOAD) == 0) + +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP + +/* + * Special states are those that do not use the normal wait-loop pattern. See + * the comment with set_special_state(). + */ +#define is_special_task_state(state) \ + ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD)) + +#define __set_current_state(state_value) \ + do { \ + WARN_ON_ONCE(is_special_task_state(state_value));\ + current->task_state_change = _THIS_IP_; \ + current->state = (state_value); \ + } while (0) + +#define set_current_state(state_value) \ + do { \ + WARN_ON_ONCE(is_special_task_state(state_value));\ + current->task_state_change = _THIS_IP_; \ + smp_store_mb(current->state, (state_value)); \ + } while (0) + +#define set_special_state(state_value) \ + do { \ + unsigned long flags; /* may shadow */ \ + WARN_ON_ONCE(!is_special_task_state(state_value)); \ + raw_spin_lock_irqsave(¤t->pi_lock, flags); \ + current->task_state_change = _THIS_IP_; \ + current->state = (state_value); \ + raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ + } while (0) +#else +/* + * set_current_state() includes a barrier so that the write of current->state + * is correctly serialised wrt the caller's subsequent test of whether to + * actually sleep: + * + * for (;;) { + * set_current_state(TASK_UNINTERRUPTIBLE); + * if (!need_sleep) + * break; + * + * schedule(); + * } + * __set_current_state(TASK_RUNNING); + * + * If the caller does not need such serialisation (because, for instance, the + * condition test and condition change and wakeup are under the same lock) then + * use __set_current_state(). + * + * The above is typically ordered against the wakeup, which does: + * + * need_sleep = false; + * wake_up_state(p, TASK_UNINTERRUPTIBLE); + * + * where wake_up_state() executes a full memory barrier before accessing the + * task state. + * + * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is, + * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a + * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). + * + * However, with slightly different timing the wakeup TASK_RUNNING store can + * also collide with the TASK_UNINTERRUPTIBLE store. Loosing that store is not + * a problem either because that will result in one extra go around the loop + * and our @cond test will save the day. + * + * Also see the comments of try_to_wake_up(). + */ +#define __set_current_state(state_value) \ + current->state = (state_value) + +#define set_current_state(state_value) \ + smp_store_mb(current->state, (state_value)) + +/* + * set_special_state() should be used for those states when the blocking task + * can not use the regular condition based wait-loop. In that case we must + * serialize against wakeups such that any possible in-flight TASK_RUNNING stores + * will not collide with our state change. + */ +#define set_special_state(state_value) \ + do { \ + unsigned long flags; /* may shadow */ \ + raw_spin_lock_irqsave(¤t->pi_lock, flags); \ + current->state = (state_value); \ + raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ + } while (0) + +#endif + +/* Task command name length: */ +#define TASK_COMM_LEN 16 + +extern void scheduler_tick(void); + +#define MAX_SCHEDULE_TIMEOUT LONG_MAX + +extern long schedule_timeout(long timeout); +extern long schedule_timeout_interruptible(long timeout); +extern long schedule_timeout_killable(long timeout); +extern long schedule_timeout_uninterruptible(long timeout); +extern long schedule_timeout_idle(long timeout); +asmlinkage void schedule(void); +extern void schedule_preempt_disabled(void); + +extern int __must_check io_schedule_prepare(void); +extern void io_schedule_finish(int token); +extern long io_schedule_timeout(long timeout); +extern void io_schedule(void); + +/** + * struct prev_cputime - snapshot of system and user cputime + * @utime: time spent in user mode + * @stime: time spent in system mode + * @lock: protects the above two fields + * + * Stores previous user/system time values such that we can guarantee + * monotonicity. + */ +struct prev_cputime { +#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE + u64 utime; + u64 stime; + raw_spinlock_t lock; +#endif +}; + +/** + * struct task_cputime - collected CPU time counts + * @utime: time spent in user mode, in nanoseconds + * @stime: time spent in kernel mode, in nanoseconds + * @sum_exec_runtime: total time spent on the CPU, in nanoseconds + * + * This structure groups together three kinds of CPU time that are tracked for + * threads and thread groups. Most things considering CPU time want to group + * these counts together and treat all three of them in parallel. + */ +struct task_cputime { + u64 utime; + u64 stime; + unsigned long long sum_exec_runtime; +}; + +/* Alternate field names when used on cache expirations: */ +#define virt_exp utime +#define prof_exp stime +#define sched_exp sum_exec_runtime + +enum vtime_state { + /* Task is sleeping or running in a CPU with VTIME inactive: */ + VTIME_INACTIVE = 0, + /* Task runs in userspace in a CPU with VTIME active: */ + VTIME_USER, + /* Task runs in kernelspace in a CPU with VTIME active: */ + VTIME_SYS, +}; + +struct vtime { + seqcount_t seqcount; + unsigned long long starttime; + enum vtime_state state; + u64 utime; + u64 stime; + u64 gtime; +}; + +struct sched_info { +#ifdef CONFIG_SCHED_INFO + /* Cumulative counters: */ + + /* # of times we have run on this CPU: */ + unsigned long pcount; + + /* Time spent waiting on a runqueue: */ + unsigned long long run_delay; + + /* Timestamps: */ + + /* When did we last run on a CPU? */ + unsigned long long last_arrival; + + /* When were we last queued to run? */ + unsigned long long last_queued; + +#endif /* CONFIG_SCHED_INFO */ +}; + +/* + * Integer metrics need fixed point arithmetic, e.g., sched/fair + * has a few: load, load_avg, util_avg, freq, and capacity. + * + * We define a basic fixed point arithmetic range, and then formalize + * all these metrics based on that basic range. + */ +# define SCHED_FIXEDPOINT_SHIFT 10 +# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) + +struct load_weight { + unsigned long weight; + u32 inv_weight; +}; + +/** + * struct util_est - Estimation utilization of FAIR tasks + * @enqueued: instantaneous estimated utilization of a task/cpu + * @ewma: the Exponential Weighted Moving Average (EWMA) + * utilization of a task + * + * Support data structure to track an Exponential Weighted Moving Average + * (EWMA) of a FAIR task's utilization. New samples are added to the moving + * average each time a task completes an activation. Sample's weight is chosen + * so that the EWMA will be relatively insensitive to transient changes to the + * task's workload. + * + * The enqueued attribute has a slightly different meaning for tasks and cpus: + * - task: the task's util_avg at last task dequeue time + * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU + * Thus, the util_est.enqueued of a task represents the contribution on the + * estimated utilization of the CPU where that task is currently enqueued. + * + * Only for tasks we track a moving average of the past instantaneous + * estimated utilization. This allows to absorb sporadic drops in utilization + * of an otherwise almost periodic task. + */ +struct util_est { + unsigned int enqueued; + unsigned int ewma; +#define UTIL_EST_WEIGHT_SHIFT 2 +} __attribute__((__aligned__(sizeof(u64)))); + +/* + * The load_avg/util_avg accumulates an infinite geometric series + * (see __update_load_avg() in kernel/sched/fair.c). + * + * [load_avg definition] + * + * load_avg = runnable% * scale_load_down(load) + * + * where runnable% is the time ratio that a sched_entity is runnable. + * For cfs_rq, it is the aggregated load_avg of all runnable and + * blocked sched_entities. + * + * load_avg may also take frequency scaling into account: + * + * load_avg = runnable% * scale_load_down(load) * freq% + * + * where freq% is the CPU frequency normalized to the highest frequency. + * + * [util_avg definition] + * + * util_avg = running% * SCHED_CAPACITY_SCALE + * + * where running% is the time ratio that a sched_entity is running on + * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable + * and blocked sched_entities. + * + * util_avg may also factor frequency scaling and CPU capacity scaling: + * + * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity% + * + * where freq% is the same as above, and capacity% is the CPU capacity + * normalized to the greatest capacity (due to uarch differences, etc). + * + * N.B., the above ratios (runnable%, running%, freq%, and capacity%) + * themselves are in the range of [0, 1]. To do fixed point arithmetics, + * we therefore scale them to as large a range as necessary. This is for + * example reflected by util_avg's SCHED_CAPACITY_SCALE. + * + * [Overflow issue] + * + * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities + * with the highest load (=88761), always runnable on a single cfs_rq, + * and should not overflow as the number already hits PID_MAX_LIMIT. + * + * For all other cases (including 32-bit kernels), struct load_weight's + * weight will overflow first before we do, because: + * + * Max(load_avg) <= Max(load.weight) + * + * Then it is the load_weight's responsibility to consider overflow + * issues. + */ +struct sched_avg { + u64 last_update_time; + u64 load_sum; + u64 runnable_load_sum; + u32 util_sum; + u32 period_contrib; + unsigned long load_avg; + unsigned long runnable_load_avg; + unsigned long util_avg; + struct util_est util_est; +} ____cacheline_aligned; + +struct sched_statistics { +#ifdef CONFIG_SCHEDSTATS + u64 wait_start; + u64 wait_max; + u64 wait_count; + u64 wait_sum; + u64 iowait_count; + u64 iowait_sum; + + u64 sleep_start; + u64 sleep_max; + s64 sum_sleep_runtime; + + u64 block_start; + u64 block_max; + u64 exec_max; + u64 slice_max; + + u64 nr_migrations_cold; + u64 nr_failed_migrations_affine; + u64 nr_failed_migrations_running; + u64 nr_failed_migrations_hot; + u64 nr_forced_migrations; + + u64 nr_wakeups; + u64 nr_wakeups_sync; + u64 nr_wakeups_migrate; + u64 nr_wakeups_local; + u64 nr_wakeups_remote; + u64 nr_wakeups_affine; + u64 nr_wakeups_affine_attempts; + u64 nr_wakeups_passive; + u64 nr_wakeups_idle; +#endif +}; + +struct sched_entity { + /* For load-balancing: */ + struct load_weight load; + unsigned long runnable_weight; + struct rb_node run_node; + struct list_head group_node; + unsigned int on_rq; + + u64 exec_start; + u64 sum_exec_runtime; + u64 vruntime; + u64 prev_sum_exec_runtime; + + u64 nr_migrations; + + struct sched_statistics statistics; + +#ifdef CONFIG_FAIR_GROUP_SCHED + int depth; + struct sched_entity *parent; + /* rq on which this entity is (to be) queued: */ + struct cfs_rq *cfs_rq; + /* rq "owned" by this entity/group: */ + struct cfs_rq *my_q; +#endif + +#ifdef CONFIG_SMP + /* + * Per entity load average tracking. + * + * Put into separate cache line so it does not + * collide with read-mostly values above. + */ + struct sched_avg avg; +#endif +}; + +struct sched_rt_entity { + struct list_head run_list; + unsigned long timeout; + unsigned long watchdog_stamp; + unsigned int time_slice; + unsigned short on_rq; + unsigned short on_list; + + struct sched_rt_entity *back; +#ifdef CONFIG_RT_GROUP_SCHED + struct sched_rt_entity *parent; + /* rq on which this entity is (to be) queued: */ + struct rt_rq *rt_rq; + /* rq "owned" by this entity/group: */ + struct rt_rq *my_q; +#endif +} __randomize_layout; + +struct sched_dl_entity { + struct rb_node rb_node; + + /* + * Original scheduling parameters. Copied here from sched_attr + * during sched_setattr(), they will remain the same until + * the next sched_setattr(). + */ + u64 dl_runtime; /* Maximum runtime for each instance */ + u64 dl_deadline; /* Relative deadline of each instance */ + u64 dl_period; /* Separation of two instances (period) */ + u64 dl_bw; /* dl_runtime / dl_period */ + u64 dl_density; /* dl_runtime / dl_deadline */ + + /* + * Actual scheduling parameters. Initialized with the values above, + * they are continously updated during task execution. Note that + * the remaining runtime could be < 0 in case we are in overrun. + */ + s64 runtime; /* Remaining runtime for this instance */ + u64 deadline; /* Absolute deadline for this instance */ + unsigned int flags; /* Specifying the scheduler behaviour */ + + /* + * Some bool flags: + * + * @dl_throttled tells if we exhausted the runtime. If so, the + * task has to wait for a replenishment to be performed at the + * next firing of dl_timer. + * + * @dl_boosted tells if we are boosted due to DI. If so we are + * outside bandwidth enforcement mechanism (but only until we + * exit the critical section); + * + * @dl_yielded tells if task gave up the CPU before consuming + * all its available runtime during the last job. + * + * @dl_non_contending tells if the task is inactive while still + * contributing to the active utilization. In other words, it + * indicates if the inactive timer has been armed and its handler + * has not been executed yet. This flag is useful to avoid race + * conditions between the inactive timer handler and the wakeup + * code. + * + * @dl_overrun tells if the task asked to be informed about runtime + * overruns. + */ + unsigned int dl_throttled : 1; + unsigned int dl_boosted : 1; + unsigned int dl_yielded : 1; + unsigned int dl_non_contending : 1; + unsigned int dl_overrun : 1; + + /* + * Bandwidth enforcement timer. Each -deadline task has its + * own bandwidth to be enforced, thus we need one timer per task. + */ + struct hrtimer dl_timer; + + /* + * Inactive timer, responsible for decreasing the active utilization + * at the "0-lag time". When a -deadline task blocks, it contributes + * to GRUB's active utilization until the "0-lag time", hence a + * timer is needed to decrease the active utilization at the correct + * time. + */ + struct hrtimer inactive_timer; +}; + +union rcu_special { + struct { + u8 blocked; + u8 need_qs; + u8 exp_need_qs; + + /* Otherwise the compiler can store garbage here: */ + u8 pad; + } b; /* Bits. */ + u32 s; /* Set of bits. */ +}; + +enum perf_event_task_context { + perf_invalid_context = -1, + perf_hw_context = 0, + perf_sw_context, + perf_nr_task_contexts, +}; + +struct wake_q_node { + struct wake_q_node *next; +}; + +struct task_struct { +#ifdef CONFIG_THREAD_INFO_IN_TASK + /* + * For reasons of header soup (see current_thread_info()), this + * must be the first element of task_struct. + */ + struct thread_info thread_info; +#endif + /* -1 unrunnable, 0 runnable, >0 stopped: */ + volatile long state; + + /* + * This begins the randomizable portion of task_struct. Only + * scheduling-critical items should be added above here. + */ + randomized_struct_fields_start + + void *stack; + atomic_t usage; + /* Per task flags (PF_*), defined further below: */ + unsigned int flags; + unsigned int ptrace; + +#ifdef CONFIG_SMP + struct llist_node wake_entry; + int on_cpu; +#ifdef CONFIG_THREAD_INFO_IN_TASK + /* Current CPU: */ + unsigned int cpu; +#endif + unsigned int wakee_flips; + unsigned long wakee_flip_decay_ts; + struct task_struct *last_wakee; + + /* + * recent_used_cpu is initially set as the last CPU used by a task + * that wakes affine another task. Waker/wakee relationships can + * push tasks around a CPU where each wakeup moves to the next one. + * Tracking a recently used CPU allows a quick search for a recently + * used CPU that may be idle. + */ + int recent_used_cpu; + int wake_cpu; +#endif + int on_rq; + + int prio; + int static_prio; + int normal_prio; + unsigned int rt_priority; + + const struct sched_class *sched_class; + struct sched_entity se; + struct sched_rt_entity rt; +#ifdef CONFIG_CGROUP_SCHED + struct task_group *sched_task_group; +#endif + struct sched_dl_entity dl; + +#ifdef CONFIG_PREEMPT_NOTIFIERS + /* List of struct preempt_notifier: */ + struct hlist_head preempt_notifiers; +#endif + +#ifdef CONFIG_BLK_DEV_IO_TRACE + unsigned int btrace_seq; +#endif + + unsigned int policy; + int nr_cpus_allowed; + cpumask_t cpus_allowed; + +#ifdef CONFIG_PREEMPT_RCU + int rcu_read_lock_nesting; + union rcu_special rcu_read_unlock_special; + struct list_head rcu_node_entry; + struct rcu_node *rcu_blocked_node; +#endif /* #ifdef CONFIG_PREEMPT_RCU */ + +#ifdef CONFIG_TASKS_RCU + unsigned long rcu_tasks_nvcsw; + u8 rcu_tasks_holdout; + u8 rcu_tasks_idx; + int rcu_tasks_idle_cpu; + struct list_head rcu_tasks_holdout_list; +#endif /* #ifdef CONFIG_TASKS_RCU */ + + struct sched_info sched_info; + + struct list_head tasks; +#ifdef CONFIG_SMP + struct plist_node pushable_tasks; + struct rb_node pushable_dl_tasks; +#endif + + struct mm_struct *mm; + struct mm_struct *active_mm; + + /* Per-thread vma caching: */ + struct vmacache vmacache; + +#ifdef SPLIT_RSS_COUNTING + struct task_rss_stat rss_stat; +#endif + int exit_state; + int exit_code; + int exit_signal; + /* The signal sent when the parent dies: */ + int pdeath_signal; + /* JOBCTL_*, siglock protected: */ + unsigned long jobctl; + + /* Used for emulating ABI behavior of previous Linux versions: */ + unsigned int personality; + + /* Scheduler bits, serialized by scheduler locks: */ + unsigned sched_reset_on_fork:1; + unsigned sched_contributes_to_load:1; + unsigned sched_migrated:1; + unsigned sched_remote_wakeup:1; + /* Force alignment to the next boundary: */ + unsigned :0; + + /* Unserialized, strictly 'current' */ + + /* Bit to tell LSMs we're in execve(): */ + unsigned in_execve:1; + unsigned in_iowait:1; +#ifndef TIF_RESTORE_SIGMASK + unsigned restore_sigmask:1; +#endif +#ifdef CONFIG_MEMCG + unsigned in_user_fault:1; +#ifdef CONFIG_MEMCG_KMEM + unsigned memcg_kmem_skip_account:1; +#endif +#endif +#ifdef CONFIG_COMPAT_BRK + unsigned brk_randomized:1; +#endif +#ifdef CONFIG_CGROUPS + /* disallow userland-initiated cgroup migration */ + unsigned no_cgroup_migration:1; +#endif +#ifdef CONFIG_BLK_CGROUP + /* to be used once the psi infrastructure lands upstream. */ + unsigned use_memdelay:1; +#endif + + unsigned long atomic_flags; /* Flags requiring atomic access. */ + + struct restart_block restart_block; + + pid_t pid; + pid_t tgid; + +#ifdef CONFIG_STACKPROTECTOR + /* Canary value for the -fstack-protector GCC feature: */ + unsigned long stack_canary; +#endif + /* + * Pointers to the (original) parent process, youngest child, younger sibling, + * older sibling, respectively. (p->father can be replaced with + * p->real_parent->pid) + */ + + /* Real parent process: */ + struct task_struct __rcu *real_parent; + + /* Recipient of SIGCHLD, wait4() reports: */ + struct task_struct __rcu *parent; + + /* + * Children/sibling form the list of natural children: + */ + struct list_head children; + struct list_head sibling; + struct task_struct *group_leader; + + /* + * 'ptraced' is the list of tasks this task is using ptrace() on. + * + * This includes both natural children and PTRACE_ATTACH targets. + * 'ptrace_entry' is this task's link on the p->parent->ptraced list. + */ + struct list_head ptraced; + struct list_head ptrace_entry; + + /* PID/PID hash table linkage. */ + struct pid *thread_pid; + struct hlist_node pid_links[PIDTYPE_MAX]; + struct list_head thread_group; + struct list_head thread_node; + + struct completion *vfork_done; + + /* CLONE_CHILD_SETTID: */ + int __user *set_child_tid; + + /* CLONE_CHILD_CLEARTID: */ + int __user *clear_child_tid; + + u64 utime; + u64 stime; +#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME + u64 utimescaled; + u64 stimescaled; +#endif + u64 gtime; + struct prev_cputime prev_cputime; +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN + struct vtime vtime; +#endif + +#ifdef CONFIG_NO_HZ_FULL + atomic_t tick_dep_mask; +#endif + /* Context switch counts: */ + unsigned long nvcsw; + unsigned long nivcsw; + + /* Monotonic time in nsecs: */ + u64 start_time; + + /* Boot based time in nsecs: */ + u64 real_start_time; + + /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */ + unsigned long min_flt; + unsigned long maj_flt; + +#ifdef CONFIG_POSIX_TIMERS + struct task_cputime cputime_expires; + struct list_head cpu_timers[3]; +#endif + + /* Process credentials: */ + + /* Tracer's credentials at attach: */ + const struct cred __rcu *ptracer_cred; + + /* Objective and real subjective task credentials (COW): */ + const struct cred __rcu *real_cred; + + /* Effective (overridable) subjective task credentials (COW): */ + const struct cred __rcu *cred; + + /* + * executable name, excluding path. + * + * - normally initialized setup_new_exec() + * - access it with [gs]et_task_comm() + * - lock it with task_lock() + */ + char comm[TASK_COMM_LEN]; + + struct nameidata *nameidata; + +#ifdef CONFIG_SYSVIPC + struct sysv_sem sysvsem; + struct sysv_shm sysvshm; +#endif +#ifdef CONFIG_DETECT_HUNG_TASK + unsigned long last_switch_count; + unsigned long last_switch_time; +#endif + /* Filesystem information: */ + struct fs_struct *fs; + + /* Open file information: */ + struct files_struct *files; + + /* Namespaces: */ + struct nsproxy *nsproxy; + + /* Signal handlers: */ + struct signal_struct *signal; + struct sighand_struct *sighand; + sigset_t blocked; + sigset_t real_blocked; + /* Restored if set_restore_sigmask() was used: */ + sigset_t saved_sigmask; + struct sigpending pending; + unsigned long sas_ss_sp; + size_t sas_ss_size; + unsigned int sas_ss_flags; + + struct callback_head *task_works; + + struct audit_context *audit_context; +#ifdef CONFIG_AUDITSYSCALL + kuid_t loginuid; + unsigned int sessionid; +#endif + struct seccomp seccomp; + + /* Thread group tracking: */ + u64 parent_exec_id; + u64 self_exec_id; + + /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */ + spinlock_t alloc_lock; + + /* Protection of the PI data structures: */ + raw_spinlock_t pi_lock; + + struct wake_q_node wake_q; + +#ifdef CONFIG_RT_MUTEXES + /* PI waiters blocked on a rt_mutex held by this task: */ + struct rb_root_cached pi_waiters; + /* Updated under owner's pi_lock and rq lock */ + struct task_struct *pi_top_task; + /* Deadlock detection and priority inheritance handling: */ + struct rt_mutex_waiter *pi_blocked_on; +#endif + +#ifdef CONFIG_DEBUG_MUTEXES + /* Mutex deadlock detection: */ + struct mutex_waiter *blocked_on; +#endif + +#ifdef CONFIG_TRACE_IRQFLAGS + unsigned int irq_events; + unsigned long hardirq_enable_ip; + unsigned long hardirq_disable_ip; + unsigned int hardirq_enable_event; + unsigned int hardirq_disable_event; + int hardirqs_enabled; + int hardirq_context; + unsigned long softirq_disable_ip; + unsigned long softirq_enable_ip; + unsigned int softirq_disable_event; + unsigned int softirq_enable_event; + int softirqs_enabled; + int softirq_context; +#endif + +#ifdef CONFIG_LOCKDEP +# define MAX_LOCK_DEPTH 48UL + u64 curr_chain_key; + int lockdep_depth; + unsigned int lockdep_recursion; + struct held_lock held_locks[MAX_LOCK_DEPTH]; +#endif + +#ifdef CONFIG_UBSAN + unsigned int in_ubsan; +#endif + + /* Journalling filesystem info: */ + void *journal_info; + + /* Stacked block device info: */ + struct bio_list *bio_list; + +#ifdef CONFIG_BLOCK + /* Stack plugging: */ + struct blk_plug *plug; +#endif + + /* VM state: */ + struct reclaim_state *reclaim_state; + + struct backing_dev_info *backing_dev_info; + + struct io_context *io_context; + + /* Ptrace state: */ + unsigned long ptrace_message; + siginfo_t *last_siginfo; + + struct task_io_accounting ioac; +#ifdef CONFIG_TASK_XACCT + /* Accumulated RSS usage: */ + u64 acct_rss_mem1; + /* Accumulated virtual memory usage: */ + u64 acct_vm_mem1; + /* stime + utime since last update: */ + u64 acct_timexpd; +#endif +#ifdef CONFIG_CPUSETS + /* Protected by ->alloc_lock: */ + nodemask_t mems_allowed; + /* Seqence number to catch updates: */ + seqcount_t mems_allowed_seq; + int cpuset_mem_spread_rotor; + int cpuset_slab_spread_rotor; +#endif +#ifdef CONFIG_CGROUPS + /* Control Group info protected by css_set_lock: */ + struct css_set __rcu *cgroups; + /* cg_list protected by css_set_lock and tsk->alloc_lock: */ + struct list_head cg_list; +#endif +#ifdef CONFIG_INTEL_RDT + u32 closid; + u32 rmid; +#endif +#ifdef CONFIG_FUTEX + struct robust_list_head __user *robust_list; +#ifdef CONFIG_COMPAT + struct compat_robust_list_head __user *compat_robust_list; +#endif + struct list_head pi_state_list; + struct futex_pi_state *pi_state_cache; + struct mutex futex_exit_mutex; + unsigned int futex_state; +#endif +#ifdef CONFIG_PERF_EVENTS + struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; + struct mutex perf_event_mutex; + struct list_head perf_event_list; +#endif +#ifdef CONFIG_DEBUG_PREEMPT + unsigned long preempt_disable_ip; +#endif +#ifdef CONFIG_NUMA + /* Protected by alloc_lock: */ + struct mempolicy *mempolicy; + short il_prev; + short pref_node_fork; +#endif +#ifdef CONFIG_NUMA_BALANCING + int numa_scan_seq; + unsigned int numa_scan_period; + unsigned int numa_scan_period_max; + int numa_preferred_nid; + unsigned long numa_migrate_retry; + /* Migration stamp: */ + u64 node_stamp; + u64 last_task_numa_placement; + u64 last_sum_exec_runtime; + struct callback_head numa_work; + + /* + * This pointer is only modified for current in syscall and + * pagefault context (and for tasks being destroyed), so it can be read + * from any of the following contexts: + * - RCU read-side critical section + * - current->numa_group from everywhere + * - task's runqueue locked, task not running + */ + struct numa_group __rcu *numa_group; + + /* + * numa_faults is an array split into four regions: + * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer + * in this precise order. + * + * faults_memory: Exponential decaying average of faults on a per-node + * basis. Scheduling placement decisions are made based on these + * counts. The values remain static for the duration of a PTE scan. + * faults_cpu: Track the nodes the process was running on when a NUMA + * hinting fault was incurred. + * faults_memory_buffer and faults_cpu_buffer: Record faults per node + * during the current scan window. When the scan completes, the counts + * in faults_memory and faults_cpu decay and these values are copied. + */ + unsigned long *numa_faults; + unsigned long total_numa_faults; + + /* + * numa_faults_locality tracks if faults recorded during the last + * scan window were remote/local or failed to migrate. The task scan + * period is adapted based on the locality of the faults with different + * weights depending on whether they were shared or private faults + */ + unsigned long numa_faults_locality[3]; + + unsigned long numa_pages_migrated; +#endif /* CONFIG_NUMA_BALANCING */ + +#ifdef CONFIG_RSEQ + struct rseq __user *rseq; + u32 rseq_len; + u32 rseq_sig; + /* + * RmW on rseq_event_mask must be performed atomically + * with respect to preemption. + */ + unsigned long rseq_event_mask; +#endif + + struct tlbflush_unmap_batch tlb_ubc; + + struct rcu_head rcu; + + /* Cache last used pipe for splice(): */ + struct pipe_inode_info *splice_pipe; + + struct page_frag task_frag; + +#ifdef CONFIG_TASK_DELAY_ACCT + struct task_delay_info *delays; +#endif + +#ifdef CONFIG_FAULT_INJECTION + int make_it_fail; + unsigned int fail_nth; +#endif + /* + * When (nr_dirtied >= nr_dirtied_pause), it's time to call + * balance_dirty_pages() for a dirty throttling pause: + */ + int nr_dirtied; + int nr_dirtied_pause; + /* Start of a write-and-pause period: */ + unsigned long dirty_paused_when; + +#ifdef CONFIG_LATENCYTOP + int latency_record_count; + struct latency_record latency_record[LT_SAVECOUNT]; +#endif + /* + * Time slack values; these are used to round up poll() and + * select() etc timeout values. These are in nanoseconds. + */ + u64 timer_slack_ns; + u64 default_timer_slack_ns; + +#ifdef CONFIG_KASAN + unsigned int kasan_depth; +#endif + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + /* Index of current stored address in ret_stack: */ + int curr_ret_stack; + int curr_ret_depth; + + /* Stack of return addresses for return function tracing: */ + struct ftrace_ret_stack *ret_stack; + + /* Timestamp for last schedule: */ + unsigned long long ftrace_timestamp; + + /* + * Number of functions that haven't been traced + * because of depth overrun: + */ + atomic_t trace_overrun; + + /* Pause tracing: */ + atomic_t tracing_graph_pause; +#endif + +#ifdef CONFIG_TRACING + /* State flags for use by tracers: */ + unsigned long trace; + + /* Bitmask and counter of trace recursion: */ + unsigned long trace_recursion; +#endif /* CONFIG_TRACING */ + +#ifdef CONFIG_KCOV + /* Coverage collection mode enabled for this task (0 if disabled): */ + unsigned int kcov_mode; + + /* Size of the kcov_area: */ + unsigned int kcov_size; + + /* Buffer for coverage collection: */ + void *kcov_area; + + /* KCOV descriptor wired with this task or NULL: */ + struct kcov *kcov; +#endif + +#ifdef CONFIG_MEMCG + struct mem_cgroup *memcg_in_oom; + gfp_t memcg_oom_gfp_mask; + int memcg_oom_order; + + /* Number of pages to reclaim on returning to userland: */ + unsigned int memcg_nr_pages_over_high; + + /* Used by memcontrol for targeted memcg charge: */ + struct mem_cgroup *active_memcg; +#endif + +#ifdef CONFIG_BLK_CGROUP + struct request_queue *throttle_queue; +#endif + +#ifdef CONFIG_UPROBES + struct uprobe_task *utask; +#endif +#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE) + unsigned int sequential_io; + unsigned int sequential_io_avg; +#endif +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP + unsigned long task_state_change; +#endif + int pagefault_disabled; +#ifdef CONFIG_MMU + struct task_struct *oom_reaper_list; +#endif +#ifdef CONFIG_VMAP_STACK + struct vm_struct *stack_vm_area; +#endif +#ifdef CONFIG_THREAD_INFO_IN_TASK + /* A live task holds one reference: */ + atomic_t stack_refcount; +#endif +#ifdef CONFIG_LIVEPATCH + int patch_state; +#endif +#ifdef CONFIG_SECURITY + /* Used by LSM modules for access restriction: */ + void *security; +#endif + + /* + * New fields for task_struct should be added above here, so that + * they are included in the randomized portion of task_struct. + */ + randomized_struct_fields_end + + /* CPU-specific state of this task: */ + struct thread_struct thread; + + /* + * WARNING: on x86, 'thread_struct' contains a variable-sized + * structure. It *MUST* be at the end of 'task_struct'. + * + * Do not put anything below here! + */ +}; + +static inline struct pid *task_pid(struct task_struct *task) +{ + return task->thread_pid; +} + +/* + * the helpers to get the task's different pids as they are seen + * from various namespaces + * + * task_xid_nr() : global id, i.e. the id seen from the init namespace; + * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of + * current. + * task_xid_nr_ns() : id seen from the ns specified; + * + * see also pid_nr() etc in include/linux/pid.h + */ +pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns); + +static inline pid_t task_pid_nr(struct task_struct *tsk) +{ + return tsk->pid; +} + +static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); +} + +static inline pid_t task_pid_vnr(struct task_struct *tsk) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); +} + + +static inline pid_t task_tgid_nr(struct task_struct *tsk) +{ + return tsk->tgid; +} + +/** + * pid_alive - check that a task structure is not stale + * @p: Task structure to be checked. + * + * Test if a process is not yet dead (at most zombie state) + * If pid_alive fails, then pointers within the task structure + * can be stale and must not be dereferenced. + * + * Return: 1 if the process is alive. 0 otherwise. + */ +static inline int pid_alive(const struct task_struct *p) +{ + return p->thread_pid != NULL; +} + +static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); +} + +static inline pid_t task_pgrp_vnr(struct task_struct *tsk) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); +} + + +static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); +} + +static inline pid_t task_session_vnr(struct task_struct *tsk) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); +} + +static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns); +} + +static inline pid_t task_tgid_vnr(struct task_struct *tsk) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL); +} + +static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) +{ + pid_t pid = 0; + + rcu_read_lock(); + if (pid_alive(tsk)) + pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns); + rcu_read_unlock(); + + return pid; +} + +static inline pid_t task_ppid_nr(const struct task_struct *tsk) +{ + return task_ppid_nr_ns(tsk, &init_pid_ns); +} + +/* Obsolete, do not use: */ +static inline pid_t task_pgrp_nr(struct task_struct *tsk) +{ + return task_pgrp_nr_ns(tsk, &init_pid_ns); +} + +#define TASK_REPORT_IDLE (TASK_REPORT + 1) +#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1) + +static inline unsigned int task_state_index(struct task_struct *tsk) +{ + unsigned int tsk_state = READ_ONCE(tsk->state); + unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT; + + BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX); + + if (tsk_state == TASK_IDLE) + state = TASK_REPORT_IDLE; + + return fls(state); +} + +static inline char task_index_to_char(unsigned int state) +{ + static const char state_char[] = "RSDTtXZPI"; + + BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1); + + return state_char[state]; +} + +static inline char task_state_to_char(struct task_struct *tsk) +{ + return task_index_to_char(task_state_index(tsk)); +} + +/** + * is_global_init - check if a task structure is init. Since init + * is free to have sub-threads we need to check tgid. + * @tsk: Task structure to be checked. + * + * Check if a task structure is the first user space task the kernel created. + * + * Return: 1 if the task structure is init. 0 otherwise. + */ +static inline int is_global_init(struct task_struct *tsk) +{ + return task_tgid_nr(tsk) == 1; +} + +extern struct pid *cad_pid; + +/* + * Per process flags + */ +#define PF_IDLE 0x00000002 /* I am an IDLE thread */ +#define PF_EXITING 0x00000004 /* Getting shut down */ +#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ +#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ +#define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */ +#define PF_MCE_PROCESS 0x00000080 /* Process policy on mce errors */ +#define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */ +#define PF_DUMPCORE 0x00000200 /* Dumped core */ +#define PF_SIGNALED 0x00000400 /* Killed by a signal */ +#define PF_MEMALLOC 0x00000800 /* Allocating memory */ +#define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */ +#define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */ +#define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */ +#define PF_FROZEN 0x00010000 /* Frozen for system suspend */ +#define PF_KSWAPD 0x00020000 /* I am kswapd */ +#define PF_MEMALLOC_NOFS 0x00040000 /* All allocation requests will inherit GFP_NOFS */ +#define PF_MEMALLOC_NOIO 0x00080000 /* All allocation requests will inherit GFP_NOIO */ +#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ +#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ +#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ +#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ +#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ +#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ +#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ +#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ +#define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ + +/* + * Only the _current_ task can read/write to tsk->flags, but other + * tasks can access tsk->flags in readonly mode for example + * with tsk_used_math (like during threaded core dumping). + * There is however an exception to this rule during ptrace + * or during fork: the ptracer task is allowed to write to the + * child->flags of its traced child (same goes for fork, the parent + * can write to the child->flags), because we're guaranteed the + * child is not running and in turn not changing child->flags + * at the same time the parent does it. + */ +#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) +#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) +#define clear_used_math() clear_stopped_child_used_math(current) +#define set_used_math() set_stopped_child_used_math(current) + +#define conditional_stopped_child_used_math(condition, child) \ + do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) + +#define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current) + +#define copy_to_stopped_child_used_math(child) \ + do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) + +/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ +#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) +#define used_math() tsk_used_math(current) + +static __always_inline bool is_percpu_thread(void) +{ +#ifdef CONFIG_SMP + return (current->flags & PF_NO_SETAFFINITY) && + (current->nr_cpus_allowed == 1); +#else + return true; +#endif +} + +/* Per-process atomic flags. */ +#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ +#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ +#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ +#define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */ +#define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/ +#define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */ +#define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */ + +#define TASK_PFA_TEST(name, func) \ + static inline bool task_##func(struct task_struct *p) \ + { return test_bit(PFA_##name, &p->atomic_flags); } + +#define TASK_PFA_SET(name, func) \ + static inline void task_set_##func(struct task_struct *p) \ + { set_bit(PFA_##name, &p->atomic_flags); } + +#define TASK_PFA_CLEAR(name, func) \ + static inline void task_clear_##func(struct task_struct *p) \ + { clear_bit(PFA_##name, &p->atomic_flags); } + +TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs) +TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs) + +TASK_PFA_TEST(SPREAD_PAGE, spread_page) +TASK_PFA_SET(SPREAD_PAGE, spread_page) +TASK_PFA_CLEAR(SPREAD_PAGE, spread_page) + +TASK_PFA_TEST(SPREAD_SLAB, spread_slab) +TASK_PFA_SET(SPREAD_SLAB, spread_slab) +TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) + +TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable) +TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable) +TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable) + +TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) +TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) + +TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable) +TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable) +TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable) + +TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable) +TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable) + +static inline void +current_restore_flags(unsigned long orig_flags, unsigned long flags) +{ + current->flags &= ~flags; + current->flags |= orig_flags & flags; +} + +extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); +extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); +#ifdef CONFIG_SMP +extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); +extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); +#else +static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) +{ +} +static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) +{ + if (!cpumask_test_cpu(0, new_mask)) + return -EINVAL; + return 0; +} +#endif + +#ifndef cpu_relax_yield +#define cpu_relax_yield() cpu_relax() +#endif + +extern int yield_to(struct task_struct *p, bool preempt); +extern void set_user_nice(struct task_struct *p, long nice); +extern int task_prio(const struct task_struct *p); + +/** + * task_nice - return the nice value of a given task. + * @p: the task in question. + * + * Return: The nice value [ -20 ... 0 ... 19 ]. + */ +static inline int task_nice(const struct task_struct *p) +{ + return PRIO_TO_NICE((p)->static_prio); +} + +extern int can_nice(const struct task_struct *p, const int nice); +extern int task_curr(const struct task_struct *p); +extern int idle_cpu(int cpu); +extern int available_idle_cpu(int cpu); +extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); +extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); +extern int sched_setattr(struct task_struct *, const struct sched_attr *); +extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *); +extern struct task_struct *idle_task(int cpu); + +/** + * is_idle_task - is the specified task an idle task? + * @p: the task in question. + * + * Return: 1 if @p is an idle task. 0 otherwise. + */ +static inline bool is_idle_task(const struct task_struct *p) +{ + return !!(p->flags & PF_IDLE); +} + +extern struct task_struct *curr_task(int cpu); +extern void ia64_set_curr_task(int cpu, struct task_struct *p); + +void yield(void); + +union thread_union { +#ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK + struct task_struct task; +#endif +#ifndef CONFIG_THREAD_INFO_IN_TASK + struct thread_info thread_info; +#endif + unsigned long stack[THREAD_SIZE/sizeof(long)]; +}; + +#ifndef CONFIG_THREAD_INFO_IN_TASK +extern struct thread_info init_thread_info; +#endif + +extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)]; + +#ifdef CONFIG_THREAD_INFO_IN_TASK +static inline struct thread_info *task_thread_info(struct task_struct *task) +{ + return &task->thread_info; +} +#elif !defined(__HAVE_THREAD_FUNCTIONS) +# define task_thread_info(task) ((struct thread_info *)(task)->stack) +#endif + +/* + * find a task by one of its numerical ids + * + * find_task_by_pid_ns(): + * finds a task by its pid in the specified namespace + * find_task_by_vpid(): + * finds a task by its virtual pid + * + * see also find_vpid() etc in include/linux/pid.h + */ + +extern struct task_struct *find_task_by_vpid(pid_t nr); +extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); + +/* + * find a task by its virtual pid and get the task struct + */ +extern struct task_struct *find_get_task_by_vpid(pid_t nr); + +extern int wake_up_state(struct task_struct *tsk, unsigned int state); +extern int wake_up_process(struct task_struct *tsk); +extern void wake_up_new_task(struct task_struct *tsk); + +#ifdef CONFIG_SMP +extern void kick_process(struct task_struct *tsk); +#else +static inline void kick_process(struct task_struct *tsk) { } +#endif + +extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); + +static inline void set_task_comm(struct task_struct *tsk, const char *from) +{ + __set_task_comm(tsk, from, false); +} + +extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk); +#define get_task_comm(buf, tsk) ({ \ + BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \ + __get_task_comm(buf, sizeof(buf), tsk); \ +}) + +#ifdef CONFIG_SMP +void scheduler_ipi(void); +extern unsigned long wait_task_inactive(struct task_struct *, long match_state); +#else +static inline void scheduler_ipi(void) { } +static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state) +{ + return 1; +} +#endif + +/* + * Set thread flags in other task's structures. + * See asm/thread_info.h for TIF_xxxx flags available: + */ +static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) +{ + set_ti_thread_flag(task_thread_info(tsk), flag); +} + +static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) +{ + clear_ti_thread_flag(task_thread_info(tsk), flag); +} + +static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag, + bool value) +{ + update_ti_thread_flag(task_thread_info(tsk), flag, value); +} + +static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) +{ + return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); +} + +static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) +{ + return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag); +} + +static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) +{ + return test_ti_thread_flag(task_thread_info(tsk), flag); +} + +static inline void set_tsk_need_resched(struct task_struct *tsk) +{ + set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); +} + +static inline void clear_tsk_need_resched(struct task_struct *tsk) +{ + clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); +} + +static inline int test_tsk_need_resched(struct task_struct *tsk) +{ + return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); +} + +/* + * cond_resched() and cond_resched_lock(): latency reduction via + * explicit rescheduling in places that are safe. The return + * value indicates whether a reschedule was done in fact. + * cond_resched_lock() will drop the spinlock before scheduling, + */ +#ifndef CONFIG_PREEMPT +extern int _cond_resched(void); +#else +static inline int _cond_resched(void) { return 0; } +#endif + +#define cond_resched() ({ \ + ___might_sleep(__FILE__, __LINE__, 0); \ + _cond_resched(); \ +}) + +extern int __cond_resched_lock(spinlock_t *lock); + +#define cond_resched_lock(lock) ({ \ + ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\ + __cond_resched_lock(lock); \ +}) + +static inline void cond_resched_rcu(void) +{ +#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU) + rcu_read_unlock(); + cond_resched(); + rcu_read_lock(); +#endif +} + +/* + * Does a critical section need to be broken due to another + * task waiting?: (technically does not depend on CONFIG_PREEMPT, + * but a general need for low latency) + */ +static inline int spin_needbreak(spinlock_t *lock) +{ +#ifdef CONFIG_PREEMPT + return spin_is_contended(lock); +#else + return 0; +#endif +} + +static __always_inline bool need_resched(void) +{ + return unlikely(tif_need_resched()); +} + +/* + * Wrappers for p->thread_info->cpu access. No-op on UP. + */ +#ifdef CONFIG_SMP + +static inline unsigned int task_cpu(const struct task_struct *p) +{ +#ifdef CONFIG_THREAD_INFO_IN_TASK + return READ_ONCE(p->cpu); +#else + return READ_ONCE(task_thread_info(p)->cpu); +#endif +} + +extern void set_task_cpu(struct task_struct *p, unsigned int cpu); + +#else + +static inline unsigned int task_cpu(const struct task_struct *p) +{ + return 0; +} + +static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) +{ +} + +#endif /* CONFIG_SMP */ + +/* + * In order to reduce various lock holder preemption latencies provide an + * interface to see if a vCPU is currently running or not. + * + * This allows us to terminate optimistic spin loops and block, analogous to + * the native optimistic spin heuristic of testing if the lock owner task is + * running or not. + */ +#ifndef vcpu_is_preempted +# define vcpu_is_preempted(cpu) false +#endif + +extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); +extern long sched_getaffinity(pid_t pid, struct cpumask *mask); + +#ifndef TASK_SIZE_OF +#define TASK_SIZE_OF(tsk) TASK_SIZE +#endif + +#ifdef CONFIG_RSEQ + +/* + * Map the event mask on the user-space ABI enum rseq_cs_flags + * for direct mask checks. + */ +enum rseq_event_mask_bits { + RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT, + RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT, + RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT, +}; + +enum rseq_event_mask { + RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT), + RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT), + RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT), +}; + +static inline void rseq_set_notify_resume(struct task_struct *t) +{ + if (t->rseq) + set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); +} + +void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs); + +static inline void rseq_handle_notify_resume(struct ksignal *ksig, + struct pt_regs *regs) +{ + if (current->rseq) + __rseq_handle_notify_resume(ksig, regs); +} + +static inline void rseq_signal_deliver(struct ksignal *ksig, + struct pt_regs *regs) +{ + preempt_disable(); + __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask); + preempt_enable(); + rseq_handle_notify_resume(ksig, regs); +} + +/* rseq_preempt() requires preemption to be disabled. */ +static inline void rseq_preempt(struct task_struct *t) +{ + __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask); + rseq_set_notify_resume(t); +} + +/* rseq_migrate() requires preemption to be disabled. */ +static inline void rseq_migrate(struct task_struct *t) +{ + __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask); + rseq_set_notify_resume(t); +} + +/* + * If parent process has a registered restartable sequences area, the + * child inherits. Unregister rseq for a clone with CLONE_VM set. + */ +static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) +{ + if (clone_flags & CLONE_VM) { + t->rseq = NULL; + t->rseq_len = 0; + t->rseq_sig = 0; + t->rseq_event_mask = 0; + } else { + t->rseq = current->rseq; + t->rseq_len = current->rseq_len; + t->rseq_sig = current->rseq_sig; + t->rseq_event_mask = current->rseq_event_mask; + } +} + +static inline void rseq_execve(struct task_struct *t) +{ + t->rseq = NULL; + t->rseq_len = 0; + t->rseq_sig = 0; + t->rseq_event_mask = 0; +} + +#else + +static inline void rseq_set_notify_resume(struct task_struct *t) +{ +} +static inline void rseq_handle_notify_resume(struct ksignal *ksig, + struct pt_regs *regs) +{ +} +static inline void rseq_signal_deliver(struct ksignal *ksig, + struct pt_regs *regs) +{ +} +static inline void rseq_preempt(struct task_struct *t) +{ +} +static inline void rseq_migrate(struct task_struct *t) +{ +} +static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) +{ +} +static inline void rseq_execve(struct task_struct *t) +{ +} + +#endif + +#ifdef CONFIG_DEBUG_RSEQ + +void rseq_syscall(struct pt_regs *regs); + +#else + +static inline void rseq_syscall(struct pt_regs *regs) +{ +} + +#endif + +#endif diff --git a/include/linux/sched/autogroup.h b/include/linux/sched/autogroup.h new file mode 100644 index 000000000..704391cc1 --- /dev/null +++ b/include/linux/sched/autogroup.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_AUTOGROUP_H +#define _LINUX_SCHED_AUTOGROUP_H + +struct signal_struct; +struct task_struct; +struct task_group; +struct seq_file; + +#ifdef CONFIG_SCHED_AUTOGROUP +extern void sched_autogroup_create_attach(struct task_struct *p); +extern void sched_autogroup_detach(struct task_struct *p); +extern void sched_autogroup_fork(struct signal_struct *sig); +extern void sched_autogroup_exit(struct signal_struct *sig); +extern void sched_autogroup_exit_task(struct task_struct *p); +#ifdef CONFIG_PROC_FS +extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); +extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice); +#endif +#else +static inline void sched_autogroup_create_attach(struct task_struct *p) { } +static inline void sched_autogroup_detach(struct task_struct *p) { } +static inline void sched_autogroup_fork(struct signal_struct *sig) { } +static inline void sched_autogroup_exit(struct signal_struct *sig) { } +static inline void sched_autogroup_exit_task(struct task_struct *p) { } +#endif + +#ifdef CONFIG_CGROUP_SCHED +extern struct task_group root_task_group; +#endif /* CONFIG_CGROUP_SCHED */ + +#endif /* _LINUX_SCHED_AUTOGROUP_H */ diff --git a/include/linux/sched/clock.h b/include/linux/sched/clock.h new file mode 100644 index 000000000..867d58831 --- /dev/null +++ b/include/linux/sched/clock.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_CLOCK_H +#define _LINUX_SCHED_CLOCK_H + +#include + +/* + * Do not use outside of architecture code which knows its limitations. + * + * sched_clock() has no promise of monotonicity or bounded drift between + * CPUs, use (which you should not) requires disabling IRQs. + * + * Please use one of the three interfaces below. + */ +extern unsigned long long notrace sched_clock(void); + +/* + * See the comment in kernel/sched/clock.c + */ +extern u64 running_clock(void); +extern u64 sched_clock_cpu(int cpu); + + +extern void sched_clock_init(void); + +#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK +static inline void sched_clock_tick(void) +{ +} + +static inline void clear_sched_clock_stable(void) +{ +} + +static inline void sched_clock_idle_sleep_event(void) +{ +} + +static inline void sched_clock_idle_wakeup_event(void) +{ +} + +static inline u64 cpu_clock(int cpu) +{ + return sched_clock(); +} + +static inline u64 local_clock(void) +{ + return sched_clock(); +} +#else +extern int sched_clock_stable(void); +extern void clear_sched_clock_stable(void); + +/* + * When sched_clock_stable(), __sched_clock_offset provides the offset + * between local_clock() and sched_clock(). + */ +extern u64 __sched_clock_offset; + +extern void sched_clock_tick(void); +extern void sched_clock_tick_stable(void); +extern void sched_clock_idle_sleep_event(void); +extern void sched_clock_idle_wakeup_event(void); + +/* + * As outlined in clock.c, provides a fast, high resolution, nanosecond + * time source that is monotonic per cpu argument and has bounded drift + * between cpus. + * + * ######################### BIG FAT WARNING ########################## + * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # + * # go backwards !! # + * #################################################################### + */ +static inline u64 cpu_clock(int cpu) +{ + return sched_clock_cpu(cpu); +} + +static inline u64 local_clock(void) +{ + return sched_clock_cpu(raw_smp_processor_id()); +} +#endif + +#ifdef CONFIG_IRQ_TIME_ACCOUNTING +/* + * An i/f to runtime opt-in for irq time accounting based off of sched_clock. + * The reason for this explicit opt-in is not to have perf penalty with + * slow sched_clocks. + */ +extern void enable_sched_clock_irqtime(void); +extern void disable_sched_clock_irqtime(void); +#else +static inline void enable_sched_clock_irqtime(void) {} +static inline void disable_sched_clock_irqtime(void) {} +#endif + +#endif /* _LINUX_SCHED_CLOCK_H */ diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h new file mode 100644 index 000000000..dfd82eab2 --- /dev/null +++ b/include/linux/sched/coredump.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_COREDUMP_H +#define _LINUX_SCHED_COREDUMP_H + +#include + +#define SUID_DUMP_DISABLE 0 /* No setuid dumping */ +#define SUID_DUMP_USER 1 /* Dump as user of process */ +#define SUID_DUMP_ROOT 2 /* Dump as root */ + +/* mm flags */ + +/* for SUID_DUMP_* above */ +#define MMF_DUMPABLE_BITS 2 +#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) + +extern void set_dumpable(struct mm_struct *mm, int value); +/* + * This returns the actual value of the suid_dumpable flag. For things + * that are using this for checking for privilege transitions, it must + * test against SUID_DUMP_USER rather than treating it as a boolean + * value. + */ +static inline int __get_dumpable(unsigned long mm_flags) +{ + return mm_flags & MMF_DUMPABLE_MASK; +} + +static inline int get_dumpable(struct mm_struct *mm) +{ + return __get_dumpable(mm->flags); +} + +/* coredump filter bits */ +#define MMF_DUMP_ANON_PRIVATE 2 +#define MMF_DUMP_ANON_SHARED 3 +#define MMF_DUMP_MAPPED_PRIVATE 4 +#define MMF_DUMP_MAPPED_SHARED 5 +#define MMF_DUMP_ELF_HEADERS 6 +#define MMF_DUMP_HUGETLB_PRIVATE 7 +#define MMF_DUMP_HUGETLB_SHARED 8 +#define MMF_DUMP_DAX_PRIVATE 9 +#define MMF_DUMP_DAX_SHARED 10 + +#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS +#define MMF_DUMP_FILTER_BITS 9 +#define MMF_DUMP_FILTER_MASK \ + (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) +#define MMF_DUMP_FILTER_DEFAULT \ + ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\ + (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF) + +#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS +# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS) +#else +# define MMF_DUMP_MASK_DEFAULT_ELF 0 +#endif + /* leave room for more dump flags */ +#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ +#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ +/* + * This one-shot flag is dropped due to necessity of changing exe once again + * on NFS restore + */ +//#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ + +#define MMF_HAS_UPROBES 19 /* has uprobes */ +#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ +#define MMF_OOM_SKIP 21 /* mm is of no interest for the OOM killer */ +#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ +#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ +#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ +#define MMF_OOM_VICTIM 25 /* mm is the oom victim */ +#define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */ +#define MMF_MULTIPROCESS 27 /* mm is shared between processes */ +#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) + +#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ + MMF_DISABLE_THP_MASK) + +#endif /* _LINUX_SCHED_COREDUMP_H */ diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h new file mode 100644 index 000000000..a4530d782 --- /dev/null +++ b/include/linux/sched/cpufreq.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_CPUFREQ_H +#define _LINUX_SCHED_CPUFREQ_H + +#include + +/* + * Interface between cpufreq drivers and the scheduler: + */ + +#define SCHED_CPUFREQ_IOWAIT (1U << 0) +#define SCHED_CPUFREQ_MIGRATION (1U << 1) + +#ifdef CONFIG_CPU_FREQ +struct cpufreq_policy; + +struct update_util_data { + void (*func)(struct update_util_data *data, u64 time, unsigned int flags); +}; + +void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, + void (*func)(struct update_util_data *data, u64 time, + unsigned int flags)); +void cpufreq_remove_update_util_hook(int cpu); +bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy); +#endif /* CONFIG_CPU_FREQ */ + +#endif /* _LINUX_SCHED_CPUFREQ_H */ diff --git a/include/linux/sched/cputime.h b/include/linux/sched/cputime.h new file mode 100644 index 000000000..53f883f5a --- /dev/null +++ b/include/linux/sched/cputime.h @@ -0,0 +1,189 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_CPUTIME_H +#define _LINUX_SCHED_CPUTIME_H + +#include + +/* + * cputime accounting APIs: + */ + +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE +#include + +#ifndef cputime_to_nsecs +# define cputime_to_nsecs(__ct) \ + (cputime_to_usecs(__ct) * NSEC_PER_USEC) +#endif +#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ + +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +extern void task_cputime(struct task_struct *t, + u64 *utime, u64 *stime); +extern u64 task_gtime(struct task_struct *t); +#else +static inline void task_cputime(struct task_struct *t, + u64 *utime, u64 *stime) +{ + *utime = t->utime; + *stime = t->stime; +} + +static inline u64 task_gtime(struct task_struct *t) +{ + return t->gtime; +} +#endif + +#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME +static inline void task_cputime_scaled(struct task_struct *t, + u64 *utimescaled, + u64 *stimescaled) +{ + *utimescaled = t->utimescaled; + *stimescaled = t->stimescaled; +} +#else +static inline void task_cputime_scaled(struct task_struct *t, + u64 *utimescaled, + u64 *stimescaled) +{ + task_cputime(t, utimescaled, stimescaled); +} +#endif + +extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); +extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); +extern void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, + u64 *ut, u64 *st); + +/* + * Thread group CPU time accounting. + */ +void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); +void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); + + +/* + * The following are functions that support scheduler-internal time accounting. + * These functions are generally called at the timer tick. None of this depends + * on CONFIG_SCHEDSTATS. + */ + +/** + * get_running_cputimer - return &tsk->signal->cputimer if cputimer is running + * + * @tsk: Pointer to target task. + */ +#ifdef CONFIG_POSIX_TIMERS +static inline +struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk) +{ + struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; + + /* Check if cputimer isn't running. This is accessed without locking. */ + if (!READ_ONCE(cputimer->running)) + return NULL; + + /* + * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime + * in __exit_signal(), we won't account to the signal struct further + * cputime consumed by that task, even though the task can still be + * ticking after __exit_signal(). + * + * In order to keep a consistent behaviour between thread group cputime + * and thread group cputimer accounting, lets also ignore the cputime + * elapsing after __exit_signal() in any thread group timer running. + * + * This makes sure that POSIX CPU clocks and timers are synchronized, so + * that a POSIX CPU timer won't expire while the corresponding POSIX CPU + * clock delta is behind the expiring timer value. + */ + if (unlikely(!tsk->sighand)) + return NULL; + + return cputimer; +} +#else +static inline +struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk) +{ + return NULL; +} +#endif + +/** + * account_group_user_time - Maintain utime for a thread group. + * + * @tsk: Pointer to task structure. + * @cputime: Time value by which to increment the utime field of the + * thread_group_cputime structure. + * + * If thread group time is being maintained, get the structure for the + * running CPU and update the utime field there. + */ +static inline void account_group_user_time(struct task_struct *tsk, + u64 cputime) +{ + struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); + + if (!cputimer) + return; + + atomic64_add(cputime, &cputimer->cputime_atomic.utime); +} + +/** + * account_group_system_time - Maintain stime for a thread group. + * + * @tsk: Pointer to task structure. + * @cputime: Time value by which to increment the stime field of the + * thread_group_cputime structure. + * + * If thread group time is being maintained, get the structure for the + * running CPU and update the stime field there. + */ +static inline void account_group_system_time(struct task_struct *tsk, + u64 cputime) +{ + struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); + + if (!cputimer) + return; + + atomic64_add(cputime, &cputimer->cputime_atomic.stime); +} + +/** + * account_group_exec_runtime - Maintain exec runtime for a thread group. + * + * @tsk: Pointer to task structure. + * @ns: Time value by which to increment the sum_exec_runtime field + * of the thread_group_cputime structure. + * + * If thread group time is being maintained, get the structure for the + * running CPU and update the sum_exec_runtime field there. + */ +static inline void account_group_exec_runtime(struct task_struct *tsk, + unsigned long long ns) +{ + struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); + + if (!cputimer) + return; + + atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime); +} + +static inline void prev_cputime_init(struct prev_cputime *prev) +{ +#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE + prev->utime = prev->stime = 0; + raw_spin_lock_init(&prev->lock); +#endif +} + +extern unsigned long long +task_sched_runtime(struct task_struct *task); + +#endif /* _LINUX_SCHED_CPUTIME_H */ diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h new file mode 100644 index 000000000..0cb034331 --- /dev/null +++ b/include/linux/sched/deadline.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * SCHED_DEADLINE tasks has negative priorities, reflecting + * the fact that any of them has higher prio than RT and + * NORMAL/BATCH tasks. + */ + +#define MAX_DL_PRIO 0 + +static inline int dl_prio(int prio) +{ + if (unlikely(prio < MAX_DL_PRIO)) + return 1; + return 0; +} + +static inline int dl_task(struct task_struct *p) +{ + return dl_prio(p->prio); +} + +static inline bool dl_time_before(u64 a, u64 b) +{ + return (s64)(a - b) < 0; +} diff --git a/include/linux/sched/debug.h b/include/linux/sched/debug.h new file mode 100644 index 000000000..95fb9e025 --- /dev/null +++ b/include/linux/sched/debug.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_DEBUG_H +#define _LINUX_SCHED_DEBUG_H + +/* + * Various scheduler/task debugging interfaces: + */ + +struct task_struct; +struct pid_namespace; + +extern void dump_cpu_task(int cpu); + +/* + * Only dump TASK_* tasks. (0 for all tasks) + */ +extern void show_state_filter(unsigned long state_filter); + +static inline void show_state(void) +{ + show_state_filter(0); +} + +struct pt_regs; + +extern void show_regs(struct pt_regs *); + +/* + * TASK is a pointer to the task whose backtrace we want to see (or NULL for current + * task), SP is the stack pointer of the first frame that should be shown in the back + * trace (or NULL if the entire call-chain of the task should be shown). + */ +extern void show_stack(struct task_struct *task, unsigned long *sp); + +extern void sched_show_task(struct task_struct *p); + +#ifdef CONFIG_SCHED_DEBUG +struct seq_file; +extern void proc_sched_show_task(struct task_struct *p, + struct pid_namespace *ns, struct seq_file *m); +extern void proc_sched_set_task(struct task_struct *p); +#endif + +/* Attach to any functions which should be ignored in wchan output. */ +#define __sched __attribute__((__section__(".sched.text"))) + +/* Linker adds these: start and end of __sched functions */ +extern char __sched_text_start[], __sched_text_end[]; + +/* Is this address in the __sched functions? */ +extern int in_sched_functions(unsigned long addr); + +#endif /* _LINUX_SCHED_DEBUG_H */ diff --git a/include/linux/sched/hotplug.h b/include/linux/sched/hotplug.h new file mode 100644 index 000000000..9a62ffdd2 --- /dev/null +++ b/include/linux/sched/hotplug.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_HOTPLUG_H +#define _LINUX_SCHED_HOTPLUG_H + +/* + * Scheduler interfaces for hotplug CPU support: + */ + +extern int sched_cpu_starting(unsigned int cpu); +extern int sched_cpu_activate(unsigned int cpu); +extern int sched_cpu_deactivate(unsigned int cpu); + +#ifdef CONFIG_HOTPLUG_CPU +extern int sched_cpu_dying(unsigned int cpu); +#else +# define sched_cpu_dying NULL +#endif + +#ifdef CONFIG_HOTPLUG_CPU +extern void idle_task_exit(void); +#else +static inline void idle_task_exit(void) {} +#endif + +#endif /* _LINUX_SCHED_HOTPLUG_H */ diff --git a/include/linux/sched/idle.h b/include/linux/sched/idle.h new file mode 100644 index 000000000..22873d276 --- /dev/null +++ b/include/linux/sched/idle.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_IDLE_H +#define _LINUX_SCHED_IDLE_H + +#include + +enum cpu_idle_type { + CPU_IDLE, + CPU_NOT_IDLE, + CPU_NEWLY_IDLE, + CPU_MAX_IDLE_TYPES +}; + +extern void wake_up_if_idle(int cpu); + +/* + * Idle thread specific functions to determine the need_resched + * polling state. + */ +#ifdef TIF_POLLING_NRFLAG + +static inline void __current_set_polling(void) +{ + set_thread_flag(TIF_POLLING_NRFLAG); +} + +static inline bool __must_check current_set_polling_and_test(void) +{ + __current_set_polling(); + + /* + * Polling state must be visible before we test NEED_RESCHED, + * paired by resched_curr() + */ + smp_mb__after_atomic(); + + return unlikely(tif_need_resched()); +} + +static inline void __current_clr_polling(void) +{ + clear_thread_flag(TIF_POLLING_NRFLAG); +} + +static inline bool __must_check current_clr_polling_and_test(void) +{ + __current_clr_polling(); + + /* + * Polling state must be visible before we test NEED_RESCHED, + * paired by resched_curr() + */ + smp_mb__after_atomic(); + + return unlikely(tif_need_resched()); +} + +#else +static inline void __current_set_polling(void) { } +static inline void __current_clr_polling(void) { } + +static inline bool __must_check current_set_polling_and_test(void) +{ + return unlikely(tif_need_resched()); +} +static inline bool __must_check current_clr_polling_and_test(void) +{ + return unlikely(tif_need_resched()); +} +#endif + +static inline void current_clr_polling(void) +{ + __current_clr_polling(); + + /* + * Ensure we check TIF_NEED_RESCHED after we clear the polling bit. + * Once the bit is cleared, we'll get IPIs with every new + * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also + * fold. + */ + smp_mb(); /* paired with resched_curr() */ + + preempt_fold_need_resched(); +} + +#endif /* _LINUX_SCHED_IDLE_H */ diff --git a/include/linux/sched/init.h b/include/linux/sched/init.h new file mode 100644 index 000000000..03542575f --- /dev/null +++ b/include/linux/sched/init.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_INIT_H +#define _LINUX_SCHED_INIT_H + +/* + * Scheduler init related prototypes: + */ + +extern void sched_init(void); +extern void sched_init_smp(void); + +#endif /* _LINUX_SCHED_INIT_H */ diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h new file mode 100644 index 000000000..4a6582c27 --- /dev/null +++ b/include/linux/sched/isolation.h @@ -0,0 +1,52 @@ +#ifndef _LINUX_SCHED_ISOLATION_H +#define _LINUX_SCHED_ISOLATION_H + +#include +#include +#include + +enum hk_flags { + HK_FLAG_TIMER = 1, + HK_FLAG_RCU = (1 << 1), + HK_FLAG_MISC = (1 << 2), + HK_FLAG_SCHED = (1 << 3), + HK_FLAG_TICK = (1 << 4), + HK_FLAG_DOMAIN = (1 << 5), + HK_FLAG_WQ = (1 << 6), +}; + +#ifdef CONFIG_CPU_ISOLATION +DECLARE_STATIC_KEY_FALSE(housekeeping_overriden); +extern int housekeeping_any_cpu(enum hk_flags flags); +extern const struct cpumask *housekeeping_cpumask(enum hk_flags flags); +extern void housekeeping_affine(struct task_struct *t, enum hk_flags flags); +extern bool housekeeping_test_cpu(int cpu, enum hk_flags flags); +extern void __init housekeeping_init(void); + +#else + +static inline int housekeeping_any_cpu(enum hk_flags flags) +{ + return smp_processor_id(); +} + +static inline const struct cpumask *housekeeping_cpumask(enum hk_flags flags) +{ + return cpu_possible_mask; +} + +static inline void housekeeping_affine(struct task_struct *t, + enum hk_flags flags) { } +static inline void housekeeping_init(void) { } +#endif /* CONFIG_CPU_ISOLATION */ + +static inline bool housekeeping_cpu(int cpu, enum hk_flags flags) +{ +#ifdef CONFIG_CPU_ISOLATION + if (static_branch_unlikely(&housekeeping_overriden)) + return housekeeping_test_cpu(cpu, flags); +#endif + return true; +} + +#endif /* _LINUX_SCHED_ISOLATION_H */ diff --git a/include/linux/sched/jobctl.h b/include/linux/sched/jobctl.h new file mode 100644 index 000000000..98228bd48 --- /dev/null +++ b/include/linux/sched/jobctl.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_JOBCTL_H +#define _LINUX_SCHED_JOBCTL_H + +#include + +struct task_struct; + +/* + * task->jobctl flags + */ +#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */ + +#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */ +#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */ +#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */ +#define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */ +#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */ +#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ +#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ + +#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT) +#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT) +#define JOBCTL_STOP_CONSUME (1UL << JOBCTL_STOP_CONSUME_BIT) +#define JOBCTL_TRAP_STOP (1UL << JOBCTL_TRAP_STOP_BIT) +#define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT) +#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT) +#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT) + +#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) +#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) + +extern bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask); +extern void task_clear_jobctl_trapping(struct task_struct *task); +extern void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask); + +#endif /* _LINUX_SCHED_JOBCTL_H */ diff --git a/include/linux/sched/loadavg.h b/include/linux/sched/loadavg.h new file mode 100644 index 000000000..80bc84ba5 --- /dev/null +++ b/include/linux/sched/loadavg.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_LOADAVG_H +#define _LINUX_SCHED_LOADAVG_H + +/* + * These are the constant used to fake the fixed-point load-average + * counting. Some notes: + * - 11 bit fractions expand to 22 bits by the multiplies: this gives + * a load-average precision of 10 bits integer + 11 bits fractional + * - if you want to count load-averages more often, you need more + * precision, or rounding will get you. With 2-second counting freq, + * the EXP_n values would be 1981, 2034 and 2043 if still using only + * 11 bit fractions. + */ +extern unsigned long avenrun[]; /* Load averages */ +extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); + +#define FSHIFT 11 /* nr of bits of precision */ +#define FIXED_1 (1<>= FSHIFT; + +extern void calc_global_load(unsigned long ticks); + +#endif /* _LINUX_SCHED_LOADAVG_H */ diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h new file mode 100644 index 000000000..ef54f4b3f --- /dev/null +++ b/include/linux/sched/mm.h @@ -0,0 +1,366 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_MM_H +#define _LINUX_SCHED_MM_H + +#include +#include +#include +#include +#include +#include + +/* + * Routines for handling mm_structs + */ +extern struct mm_struct *mm_alloc(void); + +/** + * mmgrab() - Pin a &struct mm_struct. + * @mm: The &struct mm_struct to pin. + * + * Make sure that @mm will not get freed even after the owning task + * exits. This doesn't guarantee that the associated address space + * will still exist later on and mmget_not_zero() has to be used before + * accessing it. + * + * This is a preferred way to to pin @mm for a longer/unbounded amount + * of time. + * + * Use mmdrop() to release the reference acquired by mmgrab(). + * + * See also for an in-depth explanation + * of &mm_struct.mm_count vs &mm_struct.mm_users. + */ +static inline void mmgrab(struct mm_struct *mm) +{ + atomic_inc(&mm->mm_count); +} + +extern void __mmdrop(struct mm_struct *mm); + +static inline void mmdrop(struct mm_struct *mm) +{ + /* + * The implicit full barrier implied by atomic_dec_and_test() is + * required by the membarrier system call before returning to + * user-space, after storing to rq->curr. + */ + if (unlikely(atomic_dec_and_test(&mm->mm_count))) + __mmdrop(mm); +} + +void mmdrop(struct mm_struct *mm); + +/* + * This has to be called after a get_task_mm()/mmget_not_zero() + * followed by taking the mmap_sem for writing before modifying the + * vmas or anything the coredump pretends not to change from under it. + * + * It also has to be called when mmgrab() is used in the context of + * the process, but then the mm_count refcount is transferred outside + * the context of the process to run down_write() on that pinned mm. + * + * NOTE: find_extend_vma() called from GUP context is the only place + * that can modify the "mm" (notably the vm_start/end) under mmap_sem + * for reading and outside the context of the process, so it is also + * the only case that holds the mmap_sem for reading that must call + * this function. Generally if the mmap_sem is hold for reading + * there's no need of this check after get_task_mm()/mmget_not_zero(). + * + * This function can be obsoleted and the check can be removed, after + * the coredump code will hold the mmap_sem for writing before + * invoking the ->core_dump methods. + */ +static inline bool mmget_still_valid(struct mm_struct *mm) +{ + return likely(!mm->core_state); +} + +/** + * mmget() - Pin the address space associated with a &struct mm_struct. + * @mm: The address space to pin. + * + * Make sure that the address space of the given &struct mm_struct doesn't + * go away. This does not protect against parts of the address space being + * modified or freed, however. + * + * Never use this function to pin this address space for an + * unbounded/indefinite amount of time. + * + * Use mmput() to release the reference acquired by mmget(). + * + * See also for an in-depth explanation + * of &mm_struct.mm_count vs &mm_struct.mm_users. + */ +static inline void mmget(struct mm_struct *mm) +{ + atomic_inc(&mm->mm_users); +} + +static inline bool mmget_not_zero(struct mm_struct *mm) +{ + return atomic_inc_not_zero(&mm->mm_users); +} + +/* mmput gets rid of the mappings and all user-space */ +extern void mmput(struct mm_struct *); +#ifdef CONFIG_MMU +/* same as above but performs the slow path from the async context. Can + * be called from the atomic context as well + */ +void mmput_async(struct mm_struct *); +#endif + +/* Grab a reference to a task's mm, if it is not already going away */ +extern struct mm_struct *get_task_mm(struct task_struct *task); +/* + * Grab a reference to a task's mm, if it is not already going away + * and ptrace_may_access with the mode parameter passed to it + * succeeds. + */ +extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); +/* Remove the current tasks stale references to the old mm_struct on exit() */ +extern void exit_mm_release(struct task_struct *, struct mm_struct *); +/* Remove the current tasks stale references to the old mm_struct on exec() */ +extern void exec_mm_release(struct task_struct *, struct mm_struct *); + +#ifdef CONFIG_MEMCG +extern void mm_update_next_owner(struct mm_struct *mm); +#else +static inline void mm_update_next_owner(struct mm_struct *mm) +{ +} +#endif /* CONFIG_MEMCG */ + +#ifdef CONFIG_MMU +extern void arch_pick_mmap_layout(struct mm_struct *mm, + struct rlimit *rlim_stack); +extern unsigned long +arch_get_unmapped_area(struct file *, unsigned long, unsigned long, + unsigned long, unsigned long); +extern unsigned long +arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags); +#else +static inline void arch_pick_mmap_layout(struct mm_struct *mm, + struct rlimit *rlim_stack) {} +#endif + +static inline bool in_vfork(struct task_struct *tsk) +{ + bool ret; + + /* + * need RCU to access ->real_parent if CLONE_VM was used along with + * CLONE_PARENT. + * + * We check real_parent->mm == tsk->mm because CLONE_VFORK does not + * imply CLONE_VM + * + * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus + * ->real_parent is not necessarily the task doing vfork(), so in + * theory we can't rely on task_lock() if we want to dereference it. + * + * And in this case we can't trust the real_parent->mm == tsk->mm + * check, it can be false negative. But we do not care, if init or + * another oom-unkillable task does this it should blame itself. + */ + rcu_read_lock(); + ret = tsk->vfork_done && + rcu_dereference(tsk->real_parent)->mm == tsk->mm; + rcu_read_unlock(); + + return ret; +} + +/* + * Applies per-task gfp context to the given allocation flags. + * PF_MEMALLOC_NOIO implies GFP_NOIO + * PF_MEMALLOC_NOFS implies GFP_NOFS + */ +static inline gfp_t current_gfp_context(gfp_t flags) +{ + /* + * NOIO implies both NOIO and NOFS and it is a weaker context + * so always make sure it makes precendence + */ + if (unlikely(current->flags & PF_MEMALLOC_NOIO)) + flags &= ~(__GFP_IO | __GFP_FS); + else if (unlikely(current->flags & PF_MEMALLOC_NOFS)) + flags &= ~__GFP_FS; + return flags; +} + +#ifdef CONFIG_LOCKDEP +extern void __fs_reclaim_acquire(void); +extern void __fs_reclaim_release(void); +extern void fs_reclaim_acquire(gfp_t gfp_mask); +extern void fs_reclaim_release(gfp_t gfp_mask); +#else +static inline void __fs_reclaim_acquire(void) { } +static inline void __fs_reclaim_release(void) { } +static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } +static inline void fs_reclaim_release(gfp_t gfp_mask) { } +#endif + +/** + * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope. + * + * This functions marks the beginning of the GFP_NOIO allocation scope. + * All further allocations will implicitly drop __GFP_IO flag and so + * they are safe for the IO critical section from the allocation recursion + * point of view. Use memalloc_noio_restore to end the scope with flags + * returned by this function. + * + * This function is safe to be used from any context. + */ +static inline unsigned int memalloc_noio_save(void) +{ + unsigned int flags = current->flags & PF_MEMALLOC_NOIO; + current->flags |= PF_MEMALLOC_NOIO; + return flags; +} + +/** + * memalloc_noio_restore - Ends the implicit GFP_NOIO scope. + * @flags: Flags to restore. + * + * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function. + * Always make sure that that the given flags is the return value from the + * pairing memalloc_noio_save call. + */ +static inline void memalloc_noio_restore(unsigned int flags) +{ + current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags; +} + +/** + * memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope. + * + * This functions marks the beginning of the GFP_NOFS allocation scope. + * All further allocations will implicitly drop __GFP_FS flag and so + * they are safe for the FS critical section from the allocation recursion + * point of view. Use memalloc_nofs_restore to end the scope with flags + * returned by this function. + * + * This function is safe to be used from any context. + */ +static inline unsigned int memalloc_nofs_save(void) +{ + unsigned int flags = current->flags & PF_MEMALLOC_NOFS; + current->flags |= PF_MEMALLOC_NOFS; + return flags; +} + +/** + * memalloc_nofs_restore - Ends the implicit GFP_NOFS scope. + * @flags: Flags to restore. + * + * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function. + * Always make sure that that the given flags is the return value from the + * pairing memalloc_nofs_save call. + */ +static inline void memalloc_nofs_restore(unsigned int flags) +{ + current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags; +} + +static inline unsigned int memalloc_noreclaim_save(void) +{ + unsigned int flags = current->flags & PF_MEMALLOC; + current->flags |= PF_MEMALLOC; + return flags; +} + +static inline void memalloc_noreclaim_restore(unsigned int flags) +{ + current->flags = (current->flags & ~PF_MEMALLOC) | flags; +} + +#ifdef CONFIG_MEMCG +/** + * memalloc_use_memcg - Starts the remote memcg charging scope. + * @memcg: memcg to charge. + * + * This function marks the beginning of the remote memcg charging scope. All the + * __GFP_ACCOUNT allocations till the end of the scope will be charged to the + * given memcg. + * + * NOTE: This function is not nesting safe. + */ +static inline void memalloc_use_memcg(struct mem_cgroup *memcg) +{ + WARN_ON_ONCE(current->active_memcg); + current->active_memcg = memcg; +} + +/** + * memalloc_unuse_memcg - Ends the remote memcg charging scope. + * + * This function marks the end of the remote memcg charging scope started by + * memalloc_use_memcg(). + */ +static inline void memalloc_unuse_memcg(void) +{ + current->active_memcg = NULL; +} +#else +static inline void memalloc_use_memcg(struct mem_cgroup *memcg) +{ +} + +static inline void memalloc_unuse_memcg(void) +{ +} +#endif + +#ifdef CONFIG_MEMBARRIER +enum { + MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0), + MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1), + MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2), + MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3), + MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4), + MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5), +}; + +enum { + MEMBARRIER_FLAG_SYNC_CORE = (1U << 0), +}; + +#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS +#include +#endif + +static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm) +{ + if (current->mm != mm) + return; + if (likely(!(atomic_read(&mm->membarrier_state) & + MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE))) + return; + sync_core_before_usermode(); +} + +static inline void membarrier_execve(struct task_struct *t) +{ + atomic_set(&t->mm->membarrier_state, 0); +} +#else +#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS +static inline void membarrier_arch_switch_mm(struct mm_struct *prev, + struct mm_struct *next, + struct task_struct *tsk) +{ +} +#endif +static inline void membarrier_execve(struct task_struct *t) +{ +} +static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm) +{ +} +#endif + +#endif /* _LINUX_SCHED_MM_H */ diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h new file mode 100644 index 000000000..b36f4cf38 --- /dev/null +++ b/include/linux/sched/nohz.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_NOHZ_H +#define _LINUX_SCHED_NOHZ_H + +/* + * This is the interface between the scheduler and nohz/dynticks: + */ + +#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) +extern void cpu_load_update_nohz_start(void); +extern void cpu_load_update_nohz_stop(void); +#else +static inline void cpu_load_update_nohz_start(void) { } +static inline void cpu_load_update_nohz_stop(void) { } +#endif + +#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) +extern void nohz_balance_enter_idle(int cpu); +extern int get_nohz_timer_target(void); +#else +static inline void nohz_balance_enter_idle(int cpu) { } +#endif + +#ifdef CONFIG_NO_HZ_COMMON +void calc_load_nohz_start(void); +void calc_load_nohz_stop(void); +#else +static inline void calc_load_nohz_start(void) { } +static inline void calc_load_nohz_stop(void) { } +#endif /* CONFIG_NO_HZ_COMMON */ + +#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP) +extern void wake_up_nohz_cpu(int cpu); +#else +static inline void wake_up_nohz_cpu(int cpu) { } +#endif + +#endif /* _LINUX_SCHED_NOHZ_H */ diff --git a/include/linux/sched/numa_balancing.h b/include/linux/sched/numa_balancing.h new file mode 100644 index 000000000..3988762ef --- /dev/null +++ b/include/linux/sched/numa_balancing.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_NUMA_BALANCING_H +#define _LINUX_SCHED_NUMA_BALANCING_H + +/* + * This is the interface between the scheduler and the MM that + * implements memory access pattern based NUMA-balancing: + */ + +#include + +#define TNF_MIGRATED 0x01 +#define TNF_NO_GROUP 0x02 +#define TNF_SHARED 0x04 +#define TNF_FAULT_LOCAL 0x08 +#define TNF_MIGRATE_FAIL 0x10 + +#ifdef CONFIG_NUMA_BALANCING +extern void task_numa_fault(int last_node, int node, int pages, int flags); +extern pid_t task_numa_group_id(struct task_struct *p); +extern void set_numabalancing_state(bool enabled); +extern void task_numa_free(struct task_struct *p, bool final); +extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page, + int src_nid, int dst_cpu); +#else +static inline void task_numa_fault(int last_node, int node, int pages, + int flags) +{ +} +static inline pid_t task_numa_group_id(struct task_struct *p) +{ + return 0; +} +static inline void set_numabalancing_state(bool enabled) +{ +} +static inline void task_numa_free(struct task_struct *p, bool final) +{ +} +static inline bool should_numa_migrate_memory(struct task_struct *p, + struct page *page, int src_nid, int dst_cpu) +{ + return true; +} +#endif + +#endif /* _LINUX_SCHED_NUMA_BALANCING_H */ diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h new file mode 100644 index 000000000..7d64feafc --- /dev/null +++ b/include/linux/sched/prio.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_PRIO_H +#define _LINUX_SCHED_PRIO_H + +#define MAX_NICE 19 +#define MIN_NICE -20 +#define NICE_WIDTH (MAX_NICE - MIN_NICE + 1) + +/* + * Priority of a process goes from 0..MAX_PRIO-1, valid RT + * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH + * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority + * values are inverted: lower p->prio value means higher priority. + * + * The MAX_USER_RT_PRIO value allows the actual maximum + * RT priority to be separate from the value exported to + * user-space. This allows kernel threads to set their + * priority to a value higher than any user task. Note: + * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. + */ + +#define MAX_USER_RT_PRIO 100 +#define MAX_RT_PRIO MAX_USER_RT_PRIO + +#define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH) +#define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2) + +/* + * Convert user-nice values [ -20 ... 0 ... 19 ] + * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], + * and back. + */ +#define NICE_TO_PRIO(nice) ((nice) + DEFAULT_PRIO) +#define PRIO_TO_NICE(prio) ((prio) - DEFAULT_PRIO) + +/* + * 'User priority' is the nice value converted to something we + * can work with better when scaling various scheduler parameters, + * it's a [ 0 ... 39 ] range. + */ +#define USER_PRIO(p) ((p)-MAX_RT_PRIO) +#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio) +#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO)) + +/* + * Convert nice value [19,-20] to rlimit style value [1,40]. + */ +static inline long nice_to_rlimit(long nice) +{ + return (MAX_NICE - nice + 1); +} + +/* + * Convert rlimit style value [1,40] to nice value [-20, 19]. + */ +static inline long rlimit_to_nice(long prio) +{ + return (MAX_NICE - prio + 1); +} + +#endif /* _LINUX_SCHED_PRIO_H */ diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h new file mode 100644 index 000000000..e5af028c0 --- /dev/null +++ b/include/linux/sched/rt.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_RT_H +#define _LINUX_SCHED_RT_H + +#include + +struct task_struct; + +static inline int rt_prio(int prio) +{ + if (unlikely(prio < MAX_RT_PRIO)) + return 1; + return 0; +} + +static inline int rt_task(struct task_struct *p) +{ + return rt_prio(p->prio); +} + +static inline bool task_is_realtime(struct task_struct *tsk) +{ + int policy = tsk->policy; + + if (policy == SCHED_FIFO || policy == SCHED_RR) + return true; + if (policy == SCHED_DEADLINE) + return true; + return false; +} + +#ifdef CONFIG_RT_MUTEXES +/* + * Must hold either p->pi_lock or task_rq(p)->lock. + */ +static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *p) +{ + return p->pi_top_task; +} +extern void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task); +extern void rt_mutex_adjust_pi(struct task_struct *p); +static inline bool tsk_is_pi_blocked(struct task_struct *tsk) +{ + return tsk->pi_blocked_on != NULL; +} +#else +static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task) +{ + return NULL; +} +# define rt_mutex_adjust_pi(p) do { } while (0) +static inline bool tsk_is_pi_blocked(struct task_struct *tsk) +{ + return false; +} +#endif + +extern void normalize_rt_tasks(void); + + +/* + * default timeslice is 100 msecs (used only for SCHED_RR tasks). + * Timeslices get refilled after they expire. + */ +#define RR_TIMESLICE (100 * HZ / 1000) + +#endif /* _LINUX_SCHED_RT_H */ diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h new file mode 100644 index 000000000..660d78c9a --- /dev/null +++ b/include/linux/sched/signal.h @@ -0,0 +1,702 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_SIGNAL_H +#define _LINUX_SCHED_SIGNAL_H + +#include +#include +#include +#include +#include +#include + +/* + * Types defining task->signal and task->sighand and APIs using them: + */ + +struct sighand_struct { + atomic_t count; + struct k_sigaction action[_NSIG]; + spinlock_t siglock; + wait_queue_head_t signalfd_wqh; +}; + +/* + * Per-process accounting stats: + */ +struct pacct_struct { + int ac_flag; + long ac_exitcode; + unsigned long ac_mem; + u64 ac_utime, ac_stime; + unsigned long ac_minflt, ac_majflt; +}; + +struct cpu_itimer { + u64 expires; + u64 incr; +}; + +/* + * This is the atomic variant of task_cputime, which can be used for + * storing and updating task_cputime statistics without locking. + */ +struct task_cputime_atomic { + atomic64_t utime; + atomic64_t stime; + atomic64_t sum_exec_runtime; +}; + +#define INIT_CPUTIME_ATOMIC \ + (struct task_cputime_atomic) { \ + .utime = ATOMIC64_INIT(0), \ + .stime = ATOMIC64_INIT(0), \ + .sum_exec_runtime = ATOMIC64_INIT(0), \ + } +/** + * struct thread_group_cputimer - thread group interval timer counts + * @cputime_atomic: atomic thread group interval timers. + * @running: true when there are timers running and + * @cputime_atomic receives updates. + * @checking_timer: true when a thread in the group is in the + * process of checking for thread group timers. + * + * This structure contains the version of task_cputime, above, that is + * used for thread group CPU timer calculations. + */ +struct thread_group_cputimer { + struct task_cputime_atomic cputime_atomic; + bool running; + bool checking_timer; +}; + +struct multiprocess_signals { + sigset_t signal; + struct hlist_node node; +}; + +/* + * NOTE! "signal_struct" does not have its own + * locking, because a shared signal_struct always + * implies a shared sighand_struct, so locking + * sighand_struct is always a proper superset of + * the locking of signal_struct. + */ +struct signal_struct { + atomic_t sigcnt; + atomic_t live; + int nr_threads; + struct list_head thread_head; + + wait_queue_head_t wait_chldexit; /* for wait4() */ + + /* current thread group signal load-balancing target: */ + struct task_struct *curr_target; + + /* shared signal handling: */ + struct sigpending shared_pending; + + /* For collecting multiprocess signals during fork */ + struct hlist_head multiprocess; + + /* thread group exit support */ + int group_exit_code; + /* overloaded: + * - notify group_exit_task when ->count is equal to notify_count + * - everyone except group_exit_task is stopped during signal delivery + * of fatal signals, group_exit_task processes the signal. + */ + int notify_count; + struct task_struct *group_exit_task; + + /* thread group stop support, overloads group_exit_code too */ + int group_stop_count; + unsigned int flags; /* see SIGNAL_* flags below */ + + /* + * PR_SET_CHILD_SUBREAPER marks a process, like a service + * manager, to re-parent orphan (double-forking) child processes + * to this process instead of 'init'. The service manager is + * able to receive SIGCHLD signals and is able to investigate + * the process until it calls wait(). All children of this + * process will inherit a flag if they should look for a + * child_subreaper process at exit. + */ + unsigned int is_child_subreaper:1; + unsigned int has_child_subreaper:1; + +#ifdef CONFIG_POSIX_TIMERS + + /* POSIX.1b Interval Timers */ + int posix_timer_id; + struct list_head posix_timers; + + /* ITIMER_REAL timer for the process */ + struct hrtimer real_timer; + ktime_t it_real_incr; + + /* + * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use + * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these + * values are defined to 0 and 1 respectively + */ + struct cpu_itimer it[2]; + + /* + * Thread group totals for process CPU timers. + * See thread_group_cputimer(), et al, for details. + */ + struct thread_group_cputimer cputimer; + + /* Earliest-expiration cache. */ + struct task_cputime cputime_expires; + + struct list_head cpu_timers[3]; + +#endif + + /* PID/PID hash table linkage. */ + struct pid *pids[PIDTYPE_MAX]; + +#ifdef CONFIG_NO_HZ_FULL + atomic_t tick_dep_mask; +#endif + + struct pid *tty_old_pgrp; + + /* boolean value for session group leader */ + int leader; + + struct tty_struct *tty; /* NULL if no tty */ + +#ifdef CONFIG_SCHED_AUTOGROUP + struct autogroup *autogroup; +#endif + /* + * Cumulative resource counters for dead threads in the group, + * and for reaped dead child processes forked by this group. + * Live threads maintain their own counters and add to these + * in __exit_signal, except for the group leader. + */ + seqlock_t stats_lock; + u64 utime, stime, cutime, cstime; + u64 gtime; + u64 cgtime; + struct prev_cputime prev_cputime; + unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; + unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; + unsigned long inblock, oublock, cinblock, coublock; + unsigned long maxrss, cmaxrss; + struct task_io_accounting ioac; + + /* + * Cumulative ns of schedule CPU time fo dead threads in the + * group, not including a zombie group leader, (This only differs + * from jiffies_to_ns(utime + stime) if sched_clock uses something + * other than jiffies.) + */ + unsigned long long sum_sched_runtime; + + /* + * We don't bother to synchronize most readers of this at all, + * because there is no reader checking a limit that actually needs + * to get both rlim_cur and rlim_max atomically, and either one + * alone is a single word that can safely be read normally. + * getrlimit/setrlimit use task_lock(current->group_leader) to + * protect this instead of the siglock, because they really + * have no need to disable irqs. + */ + struct rlimit rlim[RLIM_NLIMITS]; + +#ifdef CONFIG_BSD_PROCESS_ACCT + struct pacct_struct pacct; /* per-process accounting information */ +#endif +#ifdef CONFIG_TASKSTATS + struct taskstats *stats; +#endif +#ifdef CONFIG_AUDIT + unsigned audit_tty; + struct tty_audit_buf *tty_audit_buf; +#endif + + /* + * Thread is the potential origin of an oom condition; kill first on + * oom + */ + bool oom_flag_origin; + short oom_score_adj; /* OOM kill score adjustment */ + short oom_score_adj_min; /* OOM kill score adjustment min value. + * Only settable by CAP_SYS_RESOURCE. */ + struct mm_struct *oom_mm; /* recorded mm when the thread group got + * killed by the oom killer */ + + struct mutex cred_guard_mutex; /* guard against foreign influences on + * credential calculations + * (notably. ptrace) */ +} __randomize_layout; + +/* + * Bits in flags field of signal_struct. + */ +#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ +#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ +#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ +#define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */ +/* + * Pending notifications to parent. + */ +#define SIGNAL_CLD_STOPPED 0x00000010 +#define SIGNAL_CLD_CONTINUED 0x00000020 +#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) + +#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ + +#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \ + SIGNAL_STOP_CONTINUED) + +static inline void signal_set_stop_flags(struct signal_struct *sig, + unsigned int flags) +{ + WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP)); + sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags; +} + +/* If true, all threads except ->group_exit_task have pending SIGKILL */ +static inline int signal_group_exit(const struct signal_struct *sig) +{ + return (sig->flags & SIGNAL_GROUP_EXIT) || + (sig->group_exit_task != NULL); +} + +extern void flush_signals(struct task_struct *); +extern void ignore_signals(struct task_struct *); +extern void flush_signal_handlers(struct task_struct *, int force_default); +extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); + +static inline int kernel_dequeue_signal(siginfo_t *info) +{ + struct task_struct *tsk = current; + siginfo_t __info; + int ret; + + spin_lock_irq(&tsk->sighand->siglock); + ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info); + spin_unlock_irq(&tsk->sighand->siglock); + + return ret; +} + +static inline void kernel_signal_stop(void) +{ + spin_lock_irq(¤t->sighand->siglock); + if (current->jobctl & JOBCTL_STOP_DEQUEUED) + set_special_state(TASK_STOPPED); + spin_unlock_irq(¤t->sighand->siglock); + + schedule(); +} +#ifdef __ARCH_SI_TRAPNO +# define ___ARCH_SI_TRAPNO(_a1) , _a1 +#else +# define ___ARCH_SI_TRAPNO(_a1) +#endif +#ifdef __ia64__ +# define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3 +#else +# define ___ARCH_SI_IA64(_a1, _a2, _a3) +#endif + +int force_sig_fault(int sig, int code, void __user *addr + ___ARCH_SI_TRAPNO(int trapno) + ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) + , struct task_struct *t); +int send_sig_fault(int sig, int code, void __user *addr + ___ARCH_SI_TRAPNO(int trapno) + ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) + , struct task_struct *t); + +int force_sig_mceerr(int code, void __user *, short, struct task_struct *); +int send_sig_mceerr(int code, void __user *, short, struct task_struct *); + +int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper); +int force_sig_pkuerr(void __user *addr, u32 pkey); + +int force_sig_ptrace_errno_trap(int errno, void __user *addr); + +extern int send_sig_info(int, struct siginfo *, struct task_struct *); +extern void force_sigsegv(int sig, struct task_struct *p); +extern int force_sig_info(int, struct siginfo *, struct task_struct *); +extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); +extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); +extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *, + const struct cred *); +extern int kill_pgrp(struct pid *pid, int sig, int priv); +extern int kill_pid(struct pid *pid, int sig, int priv); +extern __must_check bool do_notify_parent(struct task_struct *, int); +extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); +extern void force_sig(int, struct task_struct *); +extern int send_sig(int, struct task_struct *, int); +extern int zap_other_threads(struct task_struct *p); +extern struct sigqueue *sigqueue_alloc(void); +extern void sigqueue_free(struct sigqueue *); +extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type); +extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); + +static inline int restart_syscall(void) +{ + set_tsk_thread_flag(current, TIF_SIGPENDING); + return -ERESTARTNOINTR; +} + +static inline int signal_pending(struct task_struct *p) +{ + return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); +} + +static inline int __fatal_signal_pending(struct task_struct *p) +{ + return unlikely(sigismember(&p->pending.signal, SIGKILL)); +} + +static inline int fatal_signal_pending(struct task_struct *p) +{ + return signal_pending(p) && __fatal_signal_pending(p); +} + +static inline int signal_pending_state(long state, struct task_struct *p) +{ + if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) + return 0; + if (!signal_pending(p)) + return 0; + + return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); +} + +/* + * Reevaluate whether the task has signals pending delivery. + * Wake the task if so. + * This is required every time the blocked sigset_t changes. + * callers must hold sighand->siglock. + */ +extern void recalc_sigpending_and_wake(struct task_struct *t); +extern void recalc_sigpending(void); +extern void calculate_sigpending(void); + +extern void signal_wake_up_state(struct task_struct *t, unsigned int state); + +static inline void signal_wake_up(struct task_struct *t, bool resume) +{ + signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0); +} +static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) +{ + signal_wake_up_state(t, resume ? __TASK_TRACED : 0); +} + +void task_join_group_stop(struct task_struct *task); + +#ifdef TIF_RESTORE_SIGMASK +/* + * Legacy restore_sigmask accessors. These are inefficient on + * SMP architectures because they require atomic operations. + */ + +/** + * set_restore_sigmask() - make sure saved_sigmask processing gets done + * + * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code + * will run before returning to user mode, to process the flag. For + * all callers, TIF_SIGPENDING is already set or it's no harm to set + * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the + * arch code will notice on return to user mode, in case those bits + * are scarce. We set TIF_SIGPENDING here to ensure that the arch + * signal code always gets run when TIF_RESTORE_SIGMASK is set. + */ +static inline void set_restore_sigmask(void) +{ + set_thread_flag(TIF_RESTORE_SIGMASK); + WARN_ON(!test_thread_flag(TIF_SIGPENDING)); +} + +static inline void clear_tsk_restore_sigmask(struct task_struct *tsk) +{ + clear_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK); +} + +static inline void clear_restore_sigmask(void) +{ + clear_thread_flag(TIF_RESTORE_SIGMASK); +} +static inline bool test_tsk_restore_sigmask(struct task_struct *tsk) +{ + return test_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK); +} +static inline bool test_restore_sigmask(void) +{ + return test_thread_flag(TIF_RESTORE_SIGMASK); +} +static inline bool test_and_clear_restore_sigmask(void) +{ + return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK); +} + +#else /* TIF_RESTORE_SIGMASK */ + +/* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */ +static inline void set_restore_sigmask(void) +{ + current->restore_sigmask = true; + WARN_ON(!test_thread_flag(TIF_SIGPENDING)); +} +static inline void clear_tsk_restore_sigmask(struct task_struct *tsk) +{ + tsk->restore_sigmask = false; +} +static inline void clear_restore_sigmask(void) +{ + current->restore_sigmask = false; +} +static inline bool test_restore_sigmask(void) +{ + return current->restore_sigmask; +} +static inline bool test_tsk_restore_sigmask(struct task_struct *tsk) +{ + return tsk->restore_sigmask; +} +static inline bool test_and_clear_restore_sigmask(void) +{ + if (!current->restore_sigmask) + return false; + current->restore_sigmask = false; + return true; +} +#endif + +static inline void restore_saved_sigmask(void) +{ + if (test_and_clear_restore_sigmask()) + __set_current_blocked(¤t->saved_sigmask); +} + +static inline sigset_t *sigmask_to_save(void) +{ + sigset_t *res = ¤t->blocked; + if (unlikely(test_restore_sigmask())) + res = ¤t->saved_sigmask; + return res; +} + +static inline int kill_cad_pid(int sig, int priv) +{ + return kill_pid(cad_pid, sig, priv); +} + +/* These can be the second arg to send_sig_info/send_group_sig_info. */ +#define SEND_SIG_NOINFO ((struct siginfo *) 0) +#define SEND_SIG_PRIV ((struct siginfo *) 1) +#define SEND_SIG_FORCED ((struct siginfo *) 2) + +/* + * True if we are on the alternate signal stack. + */ +static inline int on_sig_stack(unsigned long sp) +{ + /* + * If the signal stack is SS_AUTODISARM then, by construction, we + * can't be on the signal stack unless user code deliberately set + * SS_AUTODISARM when we were already on it. + * + * This improves reliability: if user state gets corrupted such that + * the stack pointer points very close to the end of the signal stack, + * then this check will enable the signal to be handled anyway. + */ + if (current->sas_ss_flags & SS_AUTODISARM) + return 0; + +#ifdef CONFIG_STACK_GROWSUP + return sp >= current->sas_ss_sp && + sp - current->sas_ss_sp < current->sas_ss_size; +#else + return sp > current->sas_ss_sp && + sp - current->sas_ss_sp <= current->sas_ss_size; +#endif +} + +static inline int sas_ss_flags(unsigned long sp) +{ + if (!current->sas_ss_size) + return SS_DISABLE; + + return on_sig_stack(sp) ? SS_ONSTACK : 0; +} + +static inline void sas_ss_reset(struct task_struct *p) +{ + p->sas_ss_sp = 0; + p->sas_ss_size = 0; + p->sas_ss_flags = SS_DISABLE; +} + +static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig) +{ + if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp)) +#ifdef CONFIG_STACK_GROWSUP + return current->sas_ss_sp; +#else + return current->sas_ss_sp + current->sas_ss_size; +#endif + return sp; +} + +extern void __cleanup_sighand(struct sighand_struct *); +extern void flush_itimer_signals(void); + +#define tasklist_empty() \ + list_empty(&init_task.tasks) + +#define next_task(p) \ + list_entry_rcu((p)->tasks.next, struct task_struct, tasks) + +#define for_each_process(p) \ + for (p = &init_task ; (p = next_task(p)) != &init_task ; ) + +extern bool current_is_single_threaded(void); + +/* + * Careful: do_each_thread/while_each_thread is a double loop so + * 'break' will not work as expected - use goto instead. + */ +#define do_each_thread(g, t) \ + for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do + +#define while_each_thread(g, t) \ + while ((t = next_thread(t)) != g) + +#define __for_each_thread(signal, t) \ + list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node) + +#define for_each_thread(p, t) \ + __for_each_thread((p)->signal, t) + +/* Careful: this is a double loop, 'break' won't work as expected. */ +#define for_each_process_thread(p, t) \ + for_each_process(p) for_each_thread(p, t) + +typedef int (*proc_visitor)(struct task_struct *p, void *data); +void walk_process_tree(struct task_struct *top, proc_visitor, void *); + +static inline +struct pid *task_pid_type(struct task_struct *task, enum pid_type type) +{ + struct pid *pid; + if (type == PIDTYPE_PID) + pid = task_pid(task); + else + pid = task->signal->pids[type]; + return pid; +} + +static inline struct pid *task_tgid(struct task_struct *task) +{ + return task->signal->pids[PIDTYPE_TGID]; +} + +/* + * Without tasklist or RCU lock it is not safe to dereference + * the result of task_pgrp/task_session even if task == current, + * we can race with another thread doing sys_setsid/sys_setpgid. + */ +static inline struct pid *task_pgrp(struct task_struct *task) +{ + return task->signal->pids[PIDTYPE_PGID]; +} + +static inline struct pid *task_session(struct task_struct *task) +{ + return task->signal->pids[PIDTYPE_SID]; +} + +static inline int get_nr_threads(struct task_struct *tsk) +{ + return tsk->signal->nr_threads; +} + +static inline bool thread_group_leader(struct task_struct *p) +{ + return p->exit_signal >= 0; +} + +/* Do to the insanities of de_thread it is possible for a process + * to have the pid of the thread group leader without actually being + * the thread group leader. For iteration through the pids in proc + * all we care about is that we have a task with the appropriate + * pid, we don't actually care if we have the right task. + */ +static inline bool has_group_leader_pid(struct task_struct *p) +{ + return task_pid(p) == task_tgid(p); +} + +static inline +bool same_thread_group(struct task_struct *p1, struct task_struct *p2) +{ + return p1->signal == p2->signal; +} + +static inline struct task_struct *next_thread(const struct task_struct *p) +{ + return list_entry_rcu(p->thread_group.next, + struct task_struct, thread_group); +} + +static inline int thread_group_empty(struct task_struct *p) +{ + return list_empty(&p->thread_group); +} + +#define delay_group_leader(p) \ + (thread_group_leader(p) && !thread_group_empty(p)) + +extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, + unsigned long *flags); + +static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk, + unsigned long *flags) +{ + struct sighand_struct *ret; + + ret = __lock_task_sighand(tsk, flags); + (void)__cond_lock(&tsk->sighand->siglock, ret); + return ret; +} + +static inline void unlock_task_sighand(struct task_struct *tsk, + unsigned long *flags) +{ + spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); +} + +static inline unsigned long task_rlimit(const struct task_struct *tsk, + unsigned int limit) +{ + return READ_ONCE(tsk->signal->rlim[limit].rlim_cur); +} + +static inline unsigned long task_rlimit_max(const struct task_struct *tsk, + unsigned int limit) +{ + return READ_ONCE(tsk->signal->rlim[limit].rlim_max); +} + +static inline unsigned long rlimit(unsigned int limit) +{ + return task_rlimit(current, limit); +} + +static inline unsigned long rlimit_max(unsigned int limit) +{ + return task_rlimit_max(current, limit); +} + +#endif /* _LINUX_SCHED_SIGNAL_H */ diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h new file mode 100644 index 000000000..59d3736c4 --- /dev/null +++ b/include/linux/sched/smt.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_SMT_H +#define _LINUX_SCHED_SMT_H + +#include + +#ifdef CONFIG_SCHED_SMT +extern struct static_key_false sched_smt_present; + +static __always_inline bool sched_smt_active(void) +{ + return static_branch_likely(&sched_smt_present); +} +#else +static inline bool sched_smt_active(void) { return false; } +#endif + +void arch_smt_update(void); + +#endif diff --git a/include/linux/sched/stat.h b/include/linux/sched/stat.h new file mode 100644 index 000000000..04f1321d1 --- /dev/null +++ b/include/linux/sched/stat.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_STAT_H +#define _LINUX_SCHED_STAT_H + +#include + +/* + * Various counters maintained by the scheduler and fork(), + * exposed via /proc, sys.c or used by drivers via these APIs. + * + * ( Note that all these values are aquired without locking, + * so they can only be relied on in narrow circumstances. ) + */ + +extern unsigned long total_forks; +extern int nr_threads; +DECLARE_PER_CPU(unsigned long, process_counts); +extern int nr_processes(void); +extern unsigned long nr_running(void); +extern bool single_task_running(void); +extern unsigned long nr_iowait(void); +extern unsigned long nr_iowait_cpu(int cpu); +extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); + +static inline int sched_info_on(void) +{ +#ifdef CONFIG_SCHEDSTATS + return 1; +#elif defined(CONFIG_TASK_DELAY_ACCT) + extern int delayacct_on; + return delayacct_on; +#else + return 0; +#endif +} + +#ifdef CONFIG_SCHEDSTATS +void force_schedstat_enabled(void); +#endif + +#endif /* _LINUX_SCHED_STAT_H */ diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h new file mode 100644 index 000000000..a9c32daeb --- /dev/null +++ b/include/linux/sched/sysctl.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_SYSCTL_H +#define _LINUX_SCHED_SYSCTL_H + +#include + +struct ctl_table; + +#ifdef CONFIG_DETECT_HUNG_TASK +extern int sysctl_hung_task_check_count; +extern unsigned int sysctl_hung_task_panic; +extern unsigned long sysctl_hung_task_timeout_secs; +extern unsigned long sysctl_hung_task_check_interval_secs; +extern int sysctl_hung_task_warnings; +extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos); +#else +/* Avoid need for ifdefs elsewhere in the code */ +enum { sysctl_hung_task_timeout_secs = 0 }; +#endif + +extern unsigned int sysctl_sched_latency; +extern unsigned int sysctl_sched_min_granularity; +extern unsigned int sysctl_sched_wakeup_granularity; +extern unsigned int sysctl_sched_child_runs_first; + +enum sched_tunable_scaling { + SCHED_TUNABLESCALING_NONE, + SCHED_TUNABLESCALING_LOG, + SCHED_TUNABLESCALING_LINEAR, + SCHED_TUNABLESCALING_END, +}; +extern enum sched_tunable_scaling sysctl_sched_tunable_scaling; + +extern unsigned int sysctl_numa_balancing_scan_delay; +extern unsigned int sysctl_numa_balancing_scan_period_min; +extern unsigned int sysctl_numa_balancing_scan_period_max; +extern unsigned int sysctl_numa_balancing_scan_size; + +#ifdef CONFIG_SCHED_DEBUG +extern __read_mostly unsigned int sysctl_sched_migration_cost; +extern __read_mostly unsigned int sysctl_sched_nr_migrate; + +int sched_proc_update_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *length, + loff_t *ppos); +#endif + +/* + * control realtime throttling: + * + * /proc/sys/kernel/sched_rt_period_us + * /proc/sys/kernel/sched_rt_runtime_us + */ +extern unsigned int sysctl_sched_rt_period; +extern int sysctl_sched_rt_runtime; + +#ifdef CONFIG_CFS_BANDWIDTH +extern unsigned int sysctl_sched_cfs_bandwidth_slice; +#endif + +#ifdef CONFIG_SCHED_AUTOGROUP +extern unsigned int sysctl_sched_autogroup_enabled; +#endif + +extern int sysctl_sched_rr_timeslice; +extern int sched_rr_timeslice; + +extern int sched_rr_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); + +extern int sched_rt_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); + +extern int sysctl_numa_balancing(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); + +extern int sysctl_schedstats(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); + +#endif /* _LINUX_SCHED_SYSCTL_H */ diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h new file mode 100644 index 000000000..91401309b --- /dev/null +++ b/include/linux/sched/task.h @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_TASK_H +#define _LINUX_SCHED_TASK_H + +/* + * Interface between the scheduler and various task lifetime (fork()/exit()) + * functionality: + */ + +#include + +struct task_struct; +struct rusage; +union thread_union; + +/* + * This serializes "schedule()" and also protects + * the run-queue from deletions/modifications (but + * _adding_ to the beginning of the run-queue has + * a separate lock). + */ +extern rwlock_t tasklist_lock; +extern spinlock_t mmlist_lock; + +extern union thread_union init_thread_union; +extern struct task_struct init_task; + +#ifdef CONFIG_PROVE_RCU +extern int lockdep_tasklist_lock_is_held(void); +#endif /* #ifdef CONFIG_PROVE_RCU */ + +extern asmlinkage void schedule_tail(struct task_struct *prev); +extern void init_idle(struct task_struct *idle, int cpu); + +extern int sched_fork(unsigned long clone_flags, struct task_struct *p); +extern void sched_dead(struct task_struct *p); + +void __noreturn do_task_dead(void); + +extern void proc_caches_init(void); + +extern void fork_init(void); + +extern void release_task(struct task_struct * p); + +#ifdef CONFIG_HAVE_COPY_THREAD_TLS +extern int copy_thread_tls(unsigned long, unsigned long, unsigned long, + struct task_struct *, unsigned long); +#else +extern int copy_thread(unsigned long, unsigned long, unsigned long, + struct task_struct *); + +/* Architectures that haven't opted into copy_thread_tls get the tls argument + * via pt_regs, so ignore the tls argument passed via C. */ +static inline int copy_thread_tls( + unsigned long clone_flags, unsigned long sp, unsigned long arg, + struct task_struct *p, unsigned long tls) +{ + return copy_thread(clone_flags, sp, arg, p); +} +#endif +extern void flush_thread(void); + +#ifdef CONFIG_HAVE_EXIT_THREAD +extern void exit_thread(struct task_struct *tsk); +#else +static inline void exit_thread(struct task_struct *tsk) +{ +} +#endif +extern void do_group_exit(int); + +extern void exit_files(struct task_struct *); +extern void exit_itimers(struct signal_struct *); + +extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long); +extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); +struct task_struct *fork_idle(int); +extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); +extern long kernel_wait4(pid_t, int __user *, int, struct rusage *); + +extern void free_task(struct task_struct *tsk); + +/* sched_exec is called by processes performing an exec */ +#ifdef CONFIG_SMP +extern void sched_exec(void); +#else +#define sched_exec() {} +#endif + +#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) + +extern void __put_task_struct(struct task_struct *t); + +static inline void put_task_struct(struct task_struct *t) +{ + if (atomic_dec_and_test(&t->usage)) + __put_task_struct(t); +} + +struct task_struct *task_rcu_dereference(struct task_struct **ptask); + +#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT +extern int arch_task_struct_size __read_mostly; +#else +# define arch_task_struct_size (sizeof(struct task_struct)) +#endif + +#ifndef CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST +/* + * If an architecture has not declared a thread_struct whitelist we + * must assume something there may need to be copied to userspace. + */ +static inline void arch_thread_struct_whitelist(unsigned long *offset, + unsigned long *size) +{ + *offset = 0; + /* Handle dynamically sized thread_struct. */ + *size = arch_task_struct_size - offsetof(struct task_struct, thread); +} +#endif + +#ifdef CONFIG_VMAP_STACK +static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) +{ + return t->stack_vm_area; +} +#else +static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) +{ + return NULL; +} +#endif + +/* + * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring + * subscriptions and synchronises with wait4(). Also used in procfs. Also + * pins the final release of task.io_context. Also protects ->cpuset and + * ->cgroup.subsys[]. And ->vfork_done. And ->sysvshm.shm_clist. + * + * Nests both inside and outside of read_lock(&tasklist_lock). + * It must not be nested with write_lock_irq(&tasklist_lock), + * neither inside nor outside. + */ +static inline void task_lock(struct task_struct *p) +{ + spin_lock(&p->alloc_lock); +} + +static inline void task_unlock(struct task_struct *p) +{ + spin_unlock(&p->alloc_lock); +} + +#endif /* _LINUX_SCHED_TASK_H */ diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h new file mode 100644 index 000000000..4f099d3fe --- /dev/null +++ b/include/linux/sched/task_stack.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_TASK_STACK_H +#define _LINUX_SCHED_TASK_STACK_H + +/* + * task->stack (kernel stack) handling interfaces: + */ + +#include +#include + +#ifdef CONFIG_THREAD_INFO_IN_TASK + +/* + * When accessing the stack of a non-current task that might exit, use + * try_get_task_stack() instead. task_stack_page will return a pointer + * that could get freed out from under you. + */ +static inline void *task_stack_page(const struct task_struct *task) +{ + return task->stack; +} + +#define setup_thread_stack(new,old) do { } while(0) + +static inline unsigned long *end_of_stack(const struct task_struct *task) +{ +#ifdef CONFIG_STACK_GROWSUP + return (unsigned long *)((unsigned long)task->stack + THREAD_SIZE) - 1; +#else + return task->stack; +#endif +} + +#elif !defined(__HAVE_THREAD_FUNCTIONS) + +#define task_stack_page(task) ((void *)(task)->stack) + +static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) +{ + *task_thread_info(p) = *task_thread_info(org); + task_thread_info(p)->task = p; +} + +/* + * Return the address of the last usable long on the stack. + * + * When the stack grows down, this is just above the thread + * info struct. Going any lower will corrupt the threadinfo. + * + * When the stack grows up, this is the highest address. + * Beyond that position, we corrupt data on the next page. + */ +static inline unsigned long *end_of_stack(struct task_struct *p) +{ +#ifdef CONFIG_STACK_GROWSUP + return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1; +#else + return (unsigned long *)(task_thread_info(p) + 1); +#endif +} + +#endif + +#ifdef CONFIG_THREAD_INFO_IN_TASK +static inline void *try_get_task_stack(struct task_struct *tsk) +{ + return atomic_inc_not_zero(&tsk->stack_refcount) ? + task_stack_page(tsk) : NULL; +} + +extern void put_task_stack(struct task_struct *tsk); +#else +static inline void *try_get_task_stack(struct task_struct *tsk) +{ + return task_stack_page(tsk); +} + +static inline void put_task_stack(struct task_struct *tsk) {} +#endif + +#define task_stack_end_corrupted(task) \ + (*(end_of_stack(task)) != STACK_END_MAGIC) + +static inline int object_is_on_stack(const void *obj) +{ + void *stack = task_stack_page(current); + + return (obj >= stack) && (obj < (stack + THREAD_SIZE)); +} + +extern void thread_stack_cache_init(void); + +#ifdef CONFIG_DEBUG_STACK_USAGE +static inline unsigned long stack_not_used(struct task_struct *p) +{ + unsigned long *n = end_of_stack(p); + + do { /* Skip over canary */ +# ifdef CONFIG_STACK_GROWSUP + n--; +# else + n++; +# endif + } while (!*n); + +# ifdef CONFIG_STACK_GROWSUP + return (unsigned long)end_of_stack(p) - (unsigned long)n; +# else + return (unsigned long)n - (unsigned long)end_of_stack(p); +# endif +} +#endif +extern void set_task_stack_end_magic(struct task_struct *tsk); + +#ifndef __HAVE_ARCH_KSTACK_END +static inline int kstack_end(void *addr) +{ + /* Reliable end of stack detection: + * Some APM bios versions misalign the stack + */ + return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*))); +} +#endif + +#endif /* _LINUX_SCHED_TASK_STACK_H */ diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h new file mode 100644 index 000000000..15f3f61f7 --- /dev/null +++ b/include/linux/sched/topology.h @@ -0,0 +1,227 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_TOPOLOGY_H +#define _LINUX_SCHED_TOPOLOGY_H + +#include + +#include + +/* + * Increase resolution of cpu_capacity calculations + */ +#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT +#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) + +/* + * sched-domains (multiprocessor balancing) declarations: + */ +#ifdef CONFIG_SMP + +#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */ +#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ +#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */ +#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ +#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ +#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ +#define SD_ASYM_CPUCAPACITY 0x0040 /* Groups have different max cpu capacities */ +#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu capacity */ +#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */ +#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ +#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ +#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ +#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ +#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ +#define SD_NUMA 0x4000 /* cross-node balancing */ + +#ifdef CONFIG_SCHED_SMT +static inline int cpu_smt_flags(void) +{ + return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; +} +#endif + +#ifdef CONFIG_SCHED_MC +static inline int cpu_core_flags(void) +{ + return SD_SHARE_PKG_RESOURCES; +} +#endif + +#ifdef CONFIG_NUMA +static inline int cpu_numa_flags(void) +{ + return SD_NUMA; +} +#endif + +extern int arch_asym_cpu_priority(int cpu); + +struct sched_domain_attr { + int relax_domain_level; +}; + +#define SD_ATTR_INIT (struct sched_domain_attr) { \ + .relax_domain_level = -1, \ +} + +extern int sched_domain_level_max; + +struct sched_group; + +struct sched_domain_shared { + atomic_t ref; + atomic_t nr_busy_cpus; + int has_idle_cores; +}; + +struct sched_domain { + /* These fields must be setup */ + struct sched_domain *parent; /* top domain must be null terminated */ + struct sched_domain *child; /* bottom domain must be null terminated */ + struct sched_group *groups; /* the balancing groups of the domain */ + unsigned long min_interval; /* Minimum balance interval ms */ + unsigned long max_interval; /* Maximum balance interval ms */ + unsigned int busy_factor; /* less balancing by factor if busy */ + unsigned int imbalance_pct; /* No balance until over watermark */ + unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ + unsigned int busy_idx; + unsigned int idle_idx; + unsigned int newidle_idx; + unsigned int wake_idx; + unsigned int forkexec_idx; + unsigned int smt_gain; + + int nohz_idle; /* NOHZ IDLE status */ + int flags; /* See SD_* */ + int level; + + /* Runtime fields. */ + unsigned long last_balance; /* init to jiffies. units in jiffies */ + unsigned int balance_interval; /* initialise to 1. units in ms. */ + unsigned int nr_balance_failed; /* initialise to 0 */ + + /* idle_balance() stats */ + u64 max_newidle_lb_cost; + unsigned long next_decay_max_lb_cost; + + u64 avg_scan_cost; /* select_idle_sibling */ + +#ifdef CONFIG_SCHEDSTATS + /* load_balance() stats */ + unsigned int lb_count[CPU_MAX_IDLE_TYPES]; + unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; + unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; + unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; + unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; + unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; + unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; + unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; + + /* Active load balancing */ + unsigned int alb_count; + unsigned int alb_failed; + unsigned int alb_pushed; + + /* SD_BALANCE_EXEC stats */ + unsigned int sbe_count; + unsigned int sbe_balanced; + unsigned int sbe_pushed; + + /* SD_BALANCE_FORK stats */ + unsigned int sbf_count; + unsigned int sbf_balanced; + unsigned int sbf_pushed; + + /* try_to_wake_up() stats */ + unsigned int ttwu_wake_remote; + unsigned int ttwu_move_affine; + unsigned int ttwu_move_balance; +#endif +#ifdef CONFIG_SCHED_DEBUG + char *name; +#endif + union { + void *private; /* used during construction */ + struct rcu_head rcu; /* used during destruction */ + }; + struct sched_domain_shared *shared; + + unsigned int span_weight; + /* + * Span of all CPUs in this domain. + * + * NOTE: this field is variable length. (Allocated dynamically + * by attaching extra space to the end of the structure, + * depending on how many CPUs the kernel has booted up with) + */ + unsigned long span[0]; +}; + +static inline struct cpumask *sched_domain_span(struct sched_domain *sd) +{ + return to_cpumask(sd->span); +} + +extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], + struct sched_domain_attr *dattr_new); + +/* Allocate an array of sched domains, for partition_sched_domains(). */ +cpumask_var_t *alloc_sched_domains(unsigned int ndoms); +void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); + +bool cpus_share_cache(int this_cpu, int that_cpu); + +typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); +typedef int (*sched_domain_flags_f)(void); + +#define SDTL_OVERLAP 0x01 + +struct sd_data { + struct sched_domain *__percpu *sd; + struct sched_domain_shared *__percpu *sds; + struct sched_group *__percpu *sg; + struct sched_group_capacity *__percpu *sgc; +}; + +struct sched_domain_topology_level { + sched_domain_mask_f mask; + sched_domain_flags_f sd_flags; + int flags; + int numa_level; + struct sd_data data; +#ifdef CONFIG_SCHED_DEBUG + char *name; +#endif +}; + +extern void set_sched_topology(struct sched_domain_topology_level *tl); + +#ifdef CONFIG_SCHED_DEBUG +# define SD_INIT_NAME(type) .name = #type +#else +# define SD_INIT_NAME(type) +#endif + +#else /* CONFIG_SMP */ + +struct sched_domain_attr; + +static inline void +partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], + struct sched_domain_attr *dattr_new) +{ +} + +static inline bool cpus_share_cache(int this_cpu, int that_cpu) +{ + return true; +} + +#endif /* !CONFIG_SMP */ + +static inline int task_node(const struct task_struct *p) +{ + return cpu_to_node(task_cpu(p)); +} + +#endif /* _LINUX_SCHED_TOPOLOGY_H */ diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h new file mode 100644 index 000000000..39ad98c09 --- /dev/null +++ b/include/linux/sched/user.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_USER_H +#define _LINUX_SCHED_USER_H + +#include +#include +#include +#include + +struct key; + +/* + * Some day this will be a full-fledged user tracking system.. + */ +struct user_struct { + refcount_t __count; /* reference count */ + atomic_t processes; /* How many processes does this user have? */ + atomic_t sigpending; /* How many pending signals does this user have? */ +#ifdef CONFIG_FANOTIFY + atomic_t fanotify_listeners; +#endif +#ifdef CONFIG_EPOLL + atomic_long_t epoll_watches; /* The number of file descriptors currently watched */ +#endif +#ifdef CONFIG_POSIX_MQUEUE + /* protected by mq_lock */ + unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ +#endif + unsigned long locked_shm; /* How many pages of mlocked shm ? */ + unsigned long unix_inflight; /* How many files in flight in unix sockets */ + atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */ + +#ifdef CONFIG_KEYS + struct key *uid_keyring; /* UID specific keyring */ + struct key *session_keyring; /* UID's default session keyring */ +#endif + + /* Hash table maintenance information */ + struct hlist_node uidhash_node; + kuid_t uid; + +#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL) || \ + defined(CONFIG_NET) + atomic_long_t locked_vm; +#endif + + /* Miscellaneous per-user rate limit */ + struct ratelimit_state ratelimit; +}; + +extern int uids_sysfs_init(void); + +extern struct user_struct *find_user(kuid_t); + +extern struct user_struct root_user; +#define INIT_USER (&root_user) + + +/* per-UID process charging. */ +extern struct user_struct * alloc_uid(kuid_t); +static inline struct user_struct *get_uid(struct user_struct *u) +{ + refcount_inc(&u->__count); + return u; +} +extern void free_uid(struct user_struct *); + +#endif /* _LINUX_SCHED_USER_H */ diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h new file mode 100644 index 000000000..10b19a192 --- /dev/null +++ b/include/linux/sched/wake_q.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_WAKE_Q_H +#define _LINUX_SCHED_WAKE_Q_H + +/* + * Wake-queues are lists of tasks with a pending wakeup, whose + * callers have already marked the task as woken internally, + * and can thus carry on. A common use case is being able to + * do the wakeups once the corresponding user lock as been + * released. + * + * We hold reference to each task in the list across the wakeup, + * thus guaranteeing that the memory is still valid by the time + * the actual wakeups are performed in wake_up_q(). + * + * One per task suffices, because there's never a need for a task to be + * in two wake queues simultaneously; it is forbidden to abandon a task + * in a wake queue (a call to wake_up_q() _must_ follow), so if a task is + * already in a wake queue, the wakeup will happen soon and the second + * waker can just skip it. + * + * The DEFINE_WAKE_Q macro declares and initializes the list head. + * wake_up_q() does NOT reinitialize the list; it's expected to be + * called near the end of a function. Otherwise, the list can be + * re-initialized for later re-use by wake_q_init(). + * + * Note that this can cause spurious wakeups. schedule() callers + * must ensure the call is done inside a loop, confirming that the + * wakeup condition has in fact occurred. + */ + +#include + +struct wake_q_head { + struct wake_q_node *first; + struct wake_q_node **lastp; +}; + +#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01) + +#define DEFINE_WAKE_Q(name) \ + struct wake_q_head name = { WAKE_Q_TAIL, &name.first } + +static inline void wake_q_init(struct wake_q_head *head) +{ + head->first = WAKE_Q_TAIL; + head->lastp = &head->first; +} + +extern void wake_q_add(struct wake_q_head *head, + struct task_struct *task); +extern void wake_up_q(struct wake_q_head *head); + +#endif /* _LINUX_SCHED_WAKE_Q_H */ diff --git a/include/linux/sched/xacct.h b/include/linux/sched/xacct.h new file mode 100644 index 000000000..c078f0a94 --- /dev/null +++ b/include/linux/sched/xacct.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_XACCT_H +#define _LINUX_SCHED_XACCT_H + +/* + * Extended task accounting methods: + */ + +#include + +#ifdef CONFIG_TASK_XACCT +static inline void add_rchar(struct task_struct *tsk, ssize_t amt) +{ + tsk->ioac.rchar += amt; +} + +static inline void add_wchar(struct task_struct *tsk, ssize_t amt) +{ + tsk->ioac.wchar += amt; +} + +static inline void inc_syscr(struct task_struct *tsk) +{ + tsk->ioac.syscr++; +} + +static inline void inc_syscw(struct task_struct *tsk) +{ + tsk->ioac.syscw++; +} +#else +static inline void add_rchar(struct task_struct *tsk, ssize_t amt) +{ +} + +static inline void add_wchar(struct task_struct *tsk, ssize_t amt) +{ +} + +static inline void inc_syscr(struct task_struct *tsk) +{ +} + +static inline void inc_syscw(struct task_struct *tsk) +{ +} +#endif + +#endif /* _LINUX_SCHED_XACCT_H */ diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h new file mode 100644 index 000000000..abe28d5cb --- /dev/null +++ b/include/linux/sched_clock.h @@ -0,0 +1,25 @@ +/* + * sched_clock.h: support for extending counters to full 64-bit ns counter + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef LINUX_SCHED_CLOCK +#define LINUX_SCHED_CLOCK + +#ifdef CONFIG_GENERIC_SCHED_CLOCK +extern void generic_sched_clock_init(void); + +extern void sched_clock_register(u64 (*read)(void), int bits, + unsigned long rate); +#else +static inline void generic_sched_clock_init(void) { } + +static inline void sched_clock_register(u64 (*read)(void), int bits, + unsigned long rate) +{ +} +#endif + +#endif diff --git a/include/linux/scif.h b/include/linux/scif.h new file mode 100644 index 000000000..eeb250b73 --- /dev/null +++ b/include/linux/scif.h @@ -0,0 +1,1339 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright(c) 2014 Intel Corporation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Intel SCIF driver. + * + */ +#ifndef __SCIF_H__ +#define __SCIF_H__ + +#include +#include +#include +#include + +#define SCIF_ACCEPT_SYNC 1 +#define SCIF_SEND_BLOCK 1 +#define SCIF_RECV_BLOCK 1 + +enum { + SCIF_PROT_READ = (1 << 0), + SCIF_PROT_WRITE = (1 << 1) +}; + +enum { + SCIF_MAP_FIXED = 0x10, + SCIF_MAP_KERNEL = 0x20, +}; + +enum { + SCIF_FENCE_INIT_SELF = (1 << 0), + SCIF_FENCE_INIT_PEER = (1 << 1), + SCIF_SIGNAL_LOCAL = (1 << 4), + SCIF_SIGNAL_REMOTE = (1 << 5) +}; + +enum { + SCIF_RMA_USECPU = (1 << 0), + SCIF_RMA_USECACHE = (1 << 1), + SCIF_RMA_SYNC = (1 << 2), + SCIF_RMA_ORDERED = (1 << 3) +}; + +/* End of SCIF Admin Reserved Ports */ +#define SCIF_ADMIN_PORT_END 1024 + +/* End of SCIF Reserved Ports */ +#define SCIF_PORT_RSVD 1088 + +typedef struct scif_endpt *scif_epd_t; +typedef struct scif_pinned_pages *scif_pinned_pages_t; + +/** + * struct scif_range - SCIF registered range used in kernel mode + * @cookie: cookie used internally by SCIF + * @nr_pages: number of pages of PAGE_SIZE + * @prot_flags: R/W protection + * @phys_addr: Array of bus addresses + * @va: Array of kernel virtual addresses backed by the pages in the phys_addr + * array. The va is populated only when called on the host for a remote + * SCIF connection on MIC. This is required to support the use case of DMA + * between MIC and another device which is not a SCIF node e.g., an IB or + * ethernet NIC. + */ +struct scif_range { + void *cookie; + int nr_pages; + int prot_flags; + dma_addr_t *phys_addr; + void __iomem **va; +}; + +/** + * struct scif_pollepd - SCIF endpoint to be monitored via scif_poll + * @epd: SCIF endpoint + * @events: requested events + * @revents: returned events + */ +struct scif_pollepd { + scif_epd_t epd; + __poll_t events; + __poll_t revents; +}; + +/** + * scif_peer_dev - representation of a peer SCIF device + * + * Peer devices show up as PCIe devices for the mgmt node but not the cards. + * The mgmt node discovers all the cards on the PCIe bus and informs the other + * cards about their peers. Upon notification of a peer a node adds a peer + * device to the peer bus to maintain symmetry in the way devices are + * discovered across all nodes in the SCIF network. + * + * @dev: underlying device + * @dnode - The destination node which this device will communicate with. + */ +struct scif_peer_dev { + struct device dev; + u8 dnode; +}; + +/** + * scif_client - representation of a SCIF client + * @name: client name + * @probe - client method called when a peer device is registered + * @remove - client method called when a peer device is unregistered + * @si - subsys_interface used internally for implementing SCIF clients + */ +struct scif_client { + const char *name; + void (*probe)(struct scif_peer_dev *spdev); + void (*remove)(struct scif_peer_dev *spdev); + struct subsys_interface si; +}; + +#define SCIF_OPEN_FAILED ((scif_epd_t)-1) +#define SCIF_REGISTER_FAILED ((off_t)-1) +#define SCIF_MMAP_FAILED ((void *)-1) + +/** + * scif_open() - Create an endpoint + * + * Return: + * Upon successful completion, scif_open() returns an endpoint descriptor to + * be used in subsequent SCIF functions calls to refer to that endpoint; + * otherwise in user mode SCIF_OPEN_FAILED (that is ((scif_epd_t)-1)) is + * returned and errno is set to indicate the error; in kernel mode a NULL + * scif_epd_t is returned. + * + * Errors: + * ENOMEM - Insufficient kernel memory was available + */ +scif_epd_t scif_open(void); + +/** + * scif_bind() - Bind an endpoint to a port + * @epd: endpoint descriptor + * @pn: port number + * + * scif_bind() binds endpoint epd to port pn, where pn is a port number on the + * local node. If pn is zero, a port number greater than or equal to + * SCIF_PORT_RSVD is assigned and returned. Each endpoint may be bound to + * exactly one local port. Ports less than 1024 when requested can only be bound + * by system (or root) processes or by processes executed by privileged users. + * + * Return: + * Upon successful completion, scif_bind() returns the port number to which epd + * is bound; otherwise in user mode -1 is returned and errno is set to + * indicate the error; in kernel mode the negative of one of the following + * errors is returned. + * + * Errors: + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + * EINVAL - the endpoint or the port is already bound + * EISCONN - The endpoint is already connected + * ENOSPC - No port number available for assignment + * EACCES - The port requested is protected and the user is not the superuser + */ +int scif_bind(scif_epd_t epd, u16 pn); + +/** + * scif_listen() - Listen for connections on an endpoint + * @epd: endpoint descriptor + * @backlog: maximum pending connection requests + * + * scif_listen() marks the endpoint epd as a listening endpoint - that is, as + * an endpoint that will be used to accept incoming connection requests. Once + * so marked, the endpoint is said to be in the listening state and may not be + * used as the endpoint of a connection. + * + * The endpoint, epd, must have been bound to a port. + * + * The backlog argument defines the maximum length to which the queue of + * pending connections for epd may grow. If a connection request arrives when + * the queue is full, the client may receive an error with an indication that + * the connection was refused. + * + * Return: + * Upon successful completion, scif_listen() returns 0; otherwise in user mode + * -1 is returned and errno is set to indicate the error; in kernel mode the + * negative of one of the following errors is returned. + * + * Errors: + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + * EINVAL - the endpoint is not bound to a port + * EISCONN - The endpoint is already connected or listening + */ +int scif_listen(scif_epd_t epd, int backlog); + +/** + * scif_connect() - Initiate a connection on a port + * @epd: endpoint descriptor + * @dst: global id of port to which to connect + * + * The scif_connect() function requests the connection of endpoint epd to remote + * port dst. If the connection is successful, a peer endpoint, bound to dst, is + * created on node dst.node. On successful return, the connection is complete. + * + * If the endpoint epd has not already been bound to a port, scif_connect() + * will bind it to an unused local port. + * + * A connection is terminated when an endpoint of the connection is closed, + * either explicitly by scif_close(), or when a process that owns one of the + * endpoints of the connection is terminated. + * + * In user space, scif_connect() supports an asynchronous connection mode + * if the application has set the O_NONBLOCK flag on the endpoint via the + * fcntl() system call. Setting this flag will result in the calling process + * not to wait during scif_connect(). + * + * Return: + * Upon successful completion, scif_connect() returns the port ID to which the + * endpoint, epd, is bound; otherwise in user mode -1 is returned and errno is + * set to indicate the error; in kernel mode the negative of one of the + * following errors is returned. + * + * Errors: + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + * ECONNREFUSED - The destination was not listening for connections or refused + * the connection request + * EINVAL - dst.port is not a valid port ID + * EISCONN - The endpoint is already connected + * ENOMEM - No buffer space is available + * ENODEV - The destination node does not exist, or the node is lost or existed, + * but is not currently in the network since it may have crashed + * ENOSPC - No port number available for assignment + * EOPNOTSUPP - The endpoint is listening and cannot be connected + */ +int scif_connect(scif_epd_t epd, struct scif_port_id *dst); + +/** + * scif_accept() - Accept a connection on an endpoint + * @epd: endpoint descriptor + * @peer: global id of port to which connected + * @newepd: new connected endpoint descriptor + * @flags: flags + * + * The scif_accept() call extracts the first connection request from the queue + * of pending connections for the port on which epd is listening. scif_accept() + * creates a new endpoint, bound to the same port as epd, and allocates a new + * SCIF endpoint descriptor, returned in newepd, for the endpoint. The new + * endpoint is connected to the endpoint through which the connection was + * requested. epd is unaffected by this call, and remains in the listening + * state. + * + * On successful return, peer holds the global port identifier (node id and + * local port number) of the port which requested the connection. + * + * A connection is terminated when an endpoint of the connection is closed, + * either explicitly by scif_close(), or when a process that owns one of the + * endpoints of the connection is terminated. + * + * The number of connections that can (subsequently) be accepted on epd is only + * limited by system resources (memory). + * + * The flags argument is formed by OR'ing together zero or more of the + * following values. + * SCIF_ACCEPT_SYNC - block until a connection request is presented. If + * SCIF_ACCEPT_SYNC is not in flags, and no pending + * connections are present on the queue, scif_accept() + * fails with an EAGAIN error + * + * In user mode, the select() and poll() functions can be used to determine + * when there is a connection request. In kernel mode, the scif_poll() + * function may be used for this purpose. A readable event will be delivered + * when a connection is requested. + * + * Return: + * Upon successful completion, scif_accept() returns 0; otherwise in user mode + * -1 is returned and errno is set to indicate the error; in kernel mode the + * negative of one of the following errors is returned. + * + * Errors: + * EAGAIN - SCIF_ACCEPT_SYNC is not set and no connections are present to be + * accepted or SCIF_ACCEPT_SYNC is not set and remote node failed to complete + * its connection request + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + * EINTR - Interrupted function + * EINVAL - epd is not a listening endpoint, or flags is invalid, or peer is + * NULL, or newepd is NULL + * ENODEV - The requesting node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOMEM - Not enough space + * ENOENT - Secondary part of epd registration failed + */ +int scif_accept(scif_epd_t epd, struct scif_port_id *peer, scif_epd_t + *newepd, int flags); + +/** + * scif_close() - Close an endpoint + * @epd: endpoint descriptor + * + * scif_close() closes an endpoint and performs necessary teardown of + * facilities associated with that endpoint. + * + * If epd is a listening endpoint then it will no longer accept connection + * requests on the port to which it is bound. Any pending connection requests + * are rejected. + * + * If epd is a connected endpoint, then its peer endpoint is also closed. RMAs + * which are in-process through epd or its peer endpoint will complete before + * scif_close() returns. Registered windows of the local and peer endpoints are + * released as if scif_unregister() was called against each window. + * + * Closing a SCIF endpoint does not affect local registered memory mapped by + * a SCIF endpoint on a remote node. The local memory remains mapped by the peer + * SCIF endpoint explicitly removed by calling munmap(..) by the peer. + * + * If the peer endpoint's receive queue is not empty at the time that epd is + * closed, then the peer endpoint can be passed as the endpoint parameter to + * scif_recv() until the receive queue is empty. + * + * epd is freed and may no longer be accessed. + * + * Return: + * Upon successful completion, scif_close() returns 0; otherwise in user mode + * -1 is returned and errno is set to indicate the error; in kernel mode the + * negative of one of the following errors is returned. + * + * Errors: + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + */ +int scif_close(scif_epd_t epd); + +/** + * scif_send() - Send a message + * @epd: endpoint descriptor + * @msg: message buffer address + * @len: message length + * @flags: blocking mode flags + * + * scif_send() sends data to the peer of endpoint epd. Up to len bytes of data + * are copied from memory starting at address msg. On successful execution the + * return value of scif_send() is the number of bytes that were sent, and is + * zero if no bytes were sent because len was zero. scif_send() may be called + * only when the endpoint is in a connected state. + * + * If a scif_send() call is non-blocking, then it sends only those bytes which + * can be sent without waiting, up to a maximum of len bytes. + * + * If a scif_send() call is blocking, then it normally returns after sending + * all len bytes. If a blocking call is interrupted or the connection is + * reset, the call is considered successful if some bytes were sent or len is + * zero, otherwise the call is considered unsuccessful. + * + * In user mode, the select() and poll() functions can be used to determine + * when the send queue is not full. In kernel mode, the scif_poll() function + * may be used for this purpose. + * + * It is recommended that scif_send()/scif_recv() only be used for short + * control-type message communication between SCIF endpoints. The SCIF RMA + * APIs are expected to provide better performance for transfer sizes of + * 1024 bytes or longer for the current MIC hardware and software + * implementation. + * + * scif_send() will block until the entire message is sent if SCIF_SEND_BLOCK + * is passed as the flags argument. + * + * Return: + * Upon successful completion, scif_send() returns the number of bytes sent; + * otherwise in user mode -1 is returned and errno is set to indicate the + * error; in kernel mode the negative of one of the following errors is + * returned. + * + * Errors: + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + * ECONNRESET - Connection reset by peer + * EINVAL - flags is invalid, or len is negative + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOMEM - Not enough space + * ENOTCONN - The endpoint is not connected + */ +int scif_send(scif_epd_t epd, void *msg, int len, int flags); + +/** + * scif_recv() - Receive a message + * @epd: endpoint descriptor + * @msg: message buffer address + * @len: message buffer length + * @flags: blocking mode flags + * + * scif_recv() receives data from the peer of endpoint epd. Up to len bytes of + * data are copied to memory starting at address msg. On successful execution + * the return value of scif_recv() is the number of bytes that were received, + * and is zero if no bytes were received because len was zero. scif_recv() may + * be called only when the endpoint is in a connected state. + * + * If a scif_recv() call is non-blocking, then it receives only those bytes + * which can be received without waiting, up to a maximum of len bytes. + * + * If a scif_recv() call is blocking, then it normally returns after receiving + * all len bytes. If the blocking call was interrupted due to a disconnection, + * subsequent calls to scif_recv() will copy all bytes received upto the point + * of disconnection. + * + * In user mode, the select() and poll() functions can be used to determine + * when data is available to be received. In kernel mode, the scif_poll() + * function may be used for this purpose. + * + * It is recommended that scif_send()/scif_recv() only be used for short + * control-type message communication between SCIF endpoints. The SCIF RMA + * APIs are expected to provide better performance for transfer sizes of + * 1024 bytes or longer for the current MIC hardware and software + * implementation. + * + * scif_recv() will block until the entire message is received if + * SCIF_RECV_BLOCK is passed as the flags argument. + * + * Return: + * Upon successful completion, scif_recv() returns the number of bytes + * received; otherwise in user mode -1 is returned and errno is set to + * indicate the error; in kernel mode the negative of one of the following + * errors is returned. + * + * Errors: + * EAGAIN - The destination node is returning from a low power state + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + * ECONNRESET - Connection reset by peer + * EINVAL - flags is invalid, or len is negative + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOMEM - Not enough space + * ENOTCONN - The endpoint is not connected + */ +int scif_recv(scif_epd_t epd, void *msg, int len, int flags); + +/** + * scif_register() - Mark a memory region for remote access. + * @epd: endpoint descriptor + * @addr: starting virtual address + * @len: length of range + * @offset: offset of window + * @prot_flags: read/write protection flags + * @map_flags: mapping flags + * + * The scif_register() function opens a window, a range of whole pages of the + * registered address space of the endpoint epd, starting at offset po and + * continuing for len bytes. The value of po, further described below, is a + * function of the parameters offset and len, and the value of map_flags. Each + * page of the window represents the physical memory page which backs the + * corresponding page of the range of virtual address pages starting at addr + * and continuing for len bytes. addr and len are constrained to be multiples + * of the page size. A successful scif_register() call returns po. + * + * When SCIF_MAP_FIXED is set in the map_flags argument, po will be offset + * exactly, and offset is constrained to be a multiple of the page size. The + * mapping established by scif_register() will not replace any existing + * registration; an error is returned if any page within the range [offset, + * offset + len - 1] intersects an existing window. + * + * When SCIF_MAP_FIXED is not set, the implementation uses offset in an + * implementation-defined manner to arrive at po. The po value so chosen will + * be an area of the registered address space that the implementation deems + * suitable for a mapping of len bytes. An offset value of 0 is interpreted as + * granting the implementation complete freedom in selecting po, subject to + * constraints described below. A non-zero value of offset is taken to be a + * suggestion of an offset near which the mapping should be placed. When the + * implementation selects a value for po, it does not replace any extant + * window. In all cases, po will be a multiple of the page size. + * + * The physical pages which are so represented by a window are available for + * access in calls to mmap(), scif_readfrom(), scif_writeto(), + * scif_vreadfrom(), and scif_vwriteto(). While a window is registered, the + * physical pages represented by the window will not be reused by the memory + * subsystem for any other purpose. Note that the same physical page may be + * represented by multiple windows. + * + * Subsequent operations which change the memory pages to which virtual + * addresses are mapped (such as mmap(), munmap()) have no effect on + * existing window. + * + * If the process will fork(), it is recommended that the registered + * virtual address range be marked with MADV_DONTFORK. Doing so will prevent + * problems due to copy-on-write semantics. + * + * The prot_flags argument is formed by OR'ing together one or more of the + * following values. + * SCIF_PROT_READ - allow read operations from the window + * SCIF_PROT_WRITE - allow write operations to the window + * + * Return: + * Upon successful completion, scif_register() returns the offset at which the + * mapping was placed (po); otherwise in user mode SCIF_REGISTER_FAILED (that + * is (off_t *)-1) is returned and errno is set to indicate the error; in + * kernel mode the negative of one of the following errors is returned. + * + * Errors: + * EADDRINUSE - SCIF_MAP_FIXED is set in map_flags, and pages in the range + * [offset, offset + len -1] are already registered + * EAGAIN - The mapping could not be performed due to lack of resources + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + * ECONNRESET - Connection reset by peer + * EINVAL - map_flags is invalid, or prot_flags is invalid, or SCIF_MAP_FIXED is + * set in flags, and offset is not a multiple of the page size, or addr is not a + * multiple of the page size, or len is not a multiple of the page size, or is + * 0, or offset is negative + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOMEM - Not enough space + * ENOTCONN -The endpoint is not connected + */ +off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset, + int prot_flags, int map_flags); + +/** + * scif_unregister() - Mark a memory region for remote access. + * @epd: endpoint descriptor + * @offset: start of range to unregister + * @len: length of range to unregister + * + * The scif_unregister() function closes those previously registered windows + * which are entirely within the range [offset, offset + len - 1]. It is an + * error to specify a range which intersects only a subrange of a window. + * + * On a successful return, pages within the window may no longer be specified + * in calls to mmap(), scif_readfrom(), scif_writeto(), scif_vreadfrom(), + * scif_vwriteto(), scif_get_pages, and scif_fence_signal(). The window, + * however, continues to exist until all previous references against it are + * removed. A window is referenced if there is a mapping to it created by + * mmap(), or if scif_get_pages() was called against the window + * (and the pages have not been returned via scif_put_pages()). A window is + * also referenced while an RMA, in which some range of the window is a source + * or destination, is in progress. Finally a window is referenced while some + * offset in that window was specified to scif_fence_signal(), and the RMAs + * marked by that call to scif_fence_signal() have not completed. While a + * window is in this state, its registered address space pages are not + * available for use in a new registered window. + * + * When all such references to the window have been removed, its references to + * all the physical pages which it represents are removed. Similarly, the + * registered address space pages of the window become available for + * registration in a new window. + * + * Return: + * Upon successful completion, scif_unregister() returns 0; otherwise in user + * mode -1 is returned and errno is set to indicate the error; in kernel mode + * the negative of one of the following errors is returned. In the event of an + * error, no windows are unregistered. + * + * Errors: + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + * ECONNRESET - Connection reset by peer + * EINVAL - the range [offset, offset + len - 1] intersects a subrange of a + * window, or offset is negative + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOTCONN - The endpoint is not connected + * ENXIO - Offsets in the range [offset, offset + len - 1] are invalid for the + * registered address space of epd + */ +int scif_unregister(scif_epd_t epd, off_t offset, size_t len); + +/** + * scif_readfrom() - Copy from a remote address space + * @epd: endpoint descriptor + * @loffset: offset in local registered address space to + * which to copy + * @len: length of range to copy + * @roffset: offset in remote registered address space + * from which to copy + * @rma_flags: transfer mode flags + * + * scif_readfrom() copies len bytes from the remote registered address space of + * the peer of endpoint epd, starting at the offset roffset to the local + * registered address space of epd, starting at the offset loffset. + * + * Each of the specified ranges [loffset, loffset + len - 1] and [roffset, + * roffset + len - 1] must be within some registered window or windows of the + * local and remote nodes. A range may intersect multiple registered windows, + * but only if those windows are contiguous in the registered address space. + * + * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using + * programmed read/writes. Otherwise the data is copied using DMA. If rma_- + * flags includes SCIF_RMA_SYNC, then scif_readfrom() will return after the + * transfer is complete. Otherwise, the transfer may be performed asynchron- + * ously. The order in which any two asynchronous RMA operations complete + * is non-deterministic. The synchronization functions, scif_fence_mark()/ + * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to + * the completion of asynchronous RMA operations on the same endpoint. + * + * The DMA transfer of individual bytes is not guaranteed to complete in + * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last + * cacheline or partial cacheline of the source range will become visible on + * the destination node after all other transferred data in the source + * range has become visible on the destination node. + * + * The optimal DMA performance will likely be realized if both + * loffset and roffset are cacheline aligned (are a multiple of 64). Lower + * performance will likely be realized if loffset and roffset are not + * cacheline aligned but are separated by some multiple of 64. The lowest level + * of performance is likely if loffset and roffset are not separated by a + * multiple of 64. + * + * The rma_flags argument is formed by ORing together zero or more of the + * following values. + * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA + * engine. + * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the + * transfer has completed. Passing this flag results in the + * current implementation busy waiting and consuming CPU cycles + * while the DMA transfer is in progress for best performance by + * avoiding the interrupt latency. + * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of + * the source range becomes visible on the destination node + * after all other transferred data in the source range has + * become visible on the destination + * + * Return: + * Upon successful completion, scif_readfrom() returns 0; otherwise in user + * mode -1 is returned and errno is set to indicate the error; in kernel mode + * the negative of one of the following errors is returned. + * + * Errors: + * EACCESS - Attempt to write to a read-only range + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + * ECONNRESET - Connection reset by peer + * EINVAL - rma_flags is invalid + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOTCONN - The endpoint is not connected + * ENXIO - The range [loffset, loffset + len - 1] is invalid for the registered + * address space of epd, or, The range [roffset, roffset + len - 1] is invalid + * for the registered address space of the peer of epd, or loffset or roffset + * is negative + */ +int scif_readfrom(scif_epd_t epd, off_t loffset, size_t len, off_t + roffset, int rma_flags); + +/** + * scif_writeto() - Copy to a remote address space + * @epd: endpoint descriptor + * @loffset: offset in local registered address space + * from which to copy + * @len: length of range to copy + * @roffset: offset in remote registered address space to + * which to copy + * @rma_flags: transfer mode flags + * + * scif_writeto() copies len bytes from the local registered address space of + * epd, starting at the offset loffset to the remote registered address space + * of the peer of endpoint epd, starting at the offset roffset. + * + * Each of the specified ranges [loffset, loffset + len - 1] and [roffset, + * roffset + len - 1] must be within some registered window or windows of the + * local and remote nodes. A range may intersect multiple registered windows, + * but only if those windows are contiguous in the registered address space. + * + * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using + * programmed read/writes. Otherwise the data is copied using DMA. If rma_- + * flags includes SCIF_RMA_SYNC, then scif_writeto() will return after the + * transfer is complete. Otherwise, the transfer may be performed asynchron- + * ously. The order in which any two asynchronous RMA operations complete + * is non-deterministic. The synchronization functions, scif_fence_mark()/ + * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to + * the completion of asynchronous RMA operations on the same endpoint. + * + * The DMA transfer of individual bytes is not guaranteed to complete in + * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last + * cacheline or partial cacheline of the source range will become visible on + * the destination node after all other transferred data in the source + * range has become visible on the destination node. + * + * The optimal DMA performance will likely be realized if both + * loffset and roffset are cacheline aligned (are a multiple of 64). Lower + * performance will likely be realized if loffset and roffset are not cacheline + * aligned but are separated by some multiple of 64. The lowest level of + * performance is likely if loffset and roffset are not separated by a multiple + * of 64. + * + * The rma_flags argument is formed by ORing together zero or more of the + * following values. + * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA + * engine. + * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the + * transfer has completed. Passing this flag results in the + * current implementation busy waiting and consuming CPU cycles + * while the DMA transfer is in progress for best performance by + * avoiding the interrupt latency. + * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of + * the source range becomes visible on the destination node + * after all other transferred data in the source range has + * become visible on the destination + * + * Return: + * Upon successful completion, scif_readfrom() returns 0; otherwise in user + * mode -1 is returned and errno is set to indicate the error; in kernel mode + * the negative of one of the following errors is returned. + * + * Errors: + * EACCESS - Attempt to write to a read-only range + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + * ECONNRESET - Connection reset by peer + * EINVAL - rma_flags is invalid + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOTCONN - The endpoint is not connected + * ENXIO - The range [loffset, loffset + len - 1] is invalid for the registered + * address space of epd, or, The range [roffset , roffset + len -1] is invalid + * for the registered address space of the peer of epd, or loffset or roffset + * is negative + */ +int scif_writeto(scif_epd_t epd, off_t loffset, size_t len, off_t + roffset, int rma_flags); + +/** + * scif_vreadfrom() - Copy from a remote address space + * @epd: endpoint descriptor + * @addr: address to which to copy + * @len: length of range to copy + * @roffset: offset in remote registered address space + * from which to copy + * @rma_flags: transfer mode flags + * + * scif_vreadfrom() copies len bytes from the remote registered address + * space of the peer of endpoint epd, starting at the offset roffset, to local + * memory, starting at addr. + * + * The specified range [roffset, roffset + len - 1] must be within some + * registered window or windows of the remote nodes. The range may + * intersect multiple registered windows, but only if those windows are + * contiguous in the registered address space. + * + * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using + * programmed read/writes. Otherwise the data is copied using DMA. If rma_- + * flags includes SCIF_RMA_SYNC, then scif_vreadfrom() will return after the + * transfer is complete. Otherwise, the transfer may be performed asynchron- + * ously. The order in which any two asynchronous RMA operations complete + * is non-deterministic. The synchronization functions, scif_fence_mark()/ + * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to + * the completion of asynchronous RMA operations on the same endpoint. + * + * The DMA transfer of individual bytes is not guaranteed to complete in + * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last + * cacheline or partial cacheline of the source range will become visible on + * the destination node after all other transferred data in the source + * range has become visible on the destination node. + * + * If rma_flags includes SCIF_RMA_USECACHE, then the physical pages which back + * the specified local memory range may be remain in a pinned state even after + * the specified transfer completes. This may reduce overhead if some or all of + * the same virtual address range is referenced in a subsequent call of + * scif_vreadfrom() or scif_vwriteto(). + * + * The optimal DMA performance will likely be realized if both + * addr and roffset are cacheline aligned (are a multiple of 64). Lower + * performance will likely be realized if addr and roffset are not + * cacheline aligned but are separated by some multiple of 64. The lowest level + * of performance is likely if addr and roffset are not separated by a + * multiple of 64. + * + * The rma_flags argument is formed by ORing together zero or more of the + * following values. + * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA + * engine. + * SCIF_RMA_USECACHE - enable registration caching + * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the + * transfer has completed. Passing this flag results in the + * current implementation busy waiting and consuming CPU cycles + * while the DMA transfer is in progress for best performance by + * avoiding the interrupt latency. + * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of + * the source range becomes visible on the destination node + * after all other transferred data in the source range has + * become visible on the destination + * + * Return: + * Upon successful completion, scif_vreadfrom() returns 0; otherwise in user + * mode -1 is returned and errno is set to indicate the error; in kernel mode + * the negative of one of the following errors is returned. + * + * Errors: + * EACCESS - Attempt to write to a read-only range + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + * ECONNRESET - Connection reset by peer + * EINVAL - rma_flags is invalid + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOTCONN - The endpoint is not connected + * ENXIO - Offsets in the range [roffset, roffset + len - 1] are invalid for the + * registered address space of epd + */ +int scif_vreadfrom(scif_epd_t epd, void *addr, size_t len, off_t roffset, + int rma_flags); + +/** + * scif_vwriteto() - Copy to a remote address space + * @epd: endpoint descriptor + * @addr: address from which to copy + * @len: length of range to copy + * @roffset: offset in remote registered address space to + * which to copy + * @rma_flags: transfer mode flags + * + * scif_vwriteto() copies len bytes from the local memory, starting at addr, to + * the remote registered address space of the peer of endpoint epd, starting at + * the offset roffset. + * + * The specified range [roffset, roffset + len - 1] must be within some + * registered window or windows of the remote nodes. The range may intersect + * multiple registered windows, but only if those windows are contiguous in the + * registered address space. + * + * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using + * programmed read/writes. Otherwise the data is copied using DMA. If rma_- + * flags includes SCIF_RMA_SYNC, then scif_vwriteto() will return after the + * transfer is complete. Otherwise, the transfer may be performed asynchron- + * ously. The order in which any two asynchronous RMA operations complete + * is non-deterministic. The synchronization functions, scif_fence_mark()/ + * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to + * the completion of asynchronous RMA operations on the same endpoint. + * + * The DMA transfer of individual bytes is not guaranteed to complete in + * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last + * cacheline or partial cacheline of the source range will become visible on + * the destination node after all other transferred data in the source + * range has become visible on the destination node. + * + * If rma_flags includes SCIF_RMA_USECACHE, then the physical pages which back + * the specified local memory range may be remain in a pinned state even after + * the specified transfer completes. This may reduce overhead if some or all of + * the same virtual address range is referenced in a subsequent call of + * scif_vreadfrom() or scif_vwriteto(). + * + * The optimal DMA performance will likely be realized if both + * addr and offset are cacheline aligned (are a multiple of 64). Lower + * performance will likely be realized if addr and offset are not cacheline + * aligned but are separated by some multiple of 64. The lowest level of + * performance is likely if addr and offset are not separated by a multiple of + * 64. + * + * The rma_flags argument is formed by ORing together zero or more of the + * following values. + * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA + * engine. + * SCIF_RMA_USECACHE - allow registration caching + * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the + * transfer has completed. Passing this flag results in the + * current implementation busy waiting and consuming CPU cycles + * while the DMA transfer is in progress for best performance by + * avoiding the interrupt latency. + * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of + * the source range becomes visible on the destination node + * after all other transferred data in the source range has + * become visible on the destination + * + * Return: + * Upon successful completion, scif_vwriteto() returns 0; otherwise in user + * mode -1 is returned and errno is set to indicate the error; in kernel mode + * the negative of one of the following errors is returned. + * + * Errors: + * EACCESS - Attempt to write to a read-only range + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + * ECONNRESET - Connection reset by peer + * EINVAL - rma_flags is invalid + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOTCONN - The endpoint is not connected + * ENXIO - Offsets in the range [roffset, roffset + len - 1] are invalid for the + * registered address space of epd + */ +int scif_vwriteto(scif_epd_t epd, void *addr, size_t len, off_t roffset, + int rma_flags); + +/** + * scif_fence_mark() - Mark previously issued RMAs + * @epd: endpoint descriptor + * @flags: control flags + * @mark: marked value returned as output. + * + * scif_fence_mark() returns after marking the current set of all uncompleted + * RMAs initiated through the endpoint epd or the current set of all + * uncompleted RMAs initiated through the peer of endpoint epd. The RMAs are + * marked with a value returned at mark. The application may subsequently call + * scif_fence_wait(), passing the value returned at mark, to await completion + * of all RMAs so marked. + * + * The flags argument has exactly one of the following values. + * SCIF_FENCE_INIT_SELF - RMA operations initiated through endpoint + * epd are marked + * SCIF_FENCE_INIT_PEER - RMA operations initiated through the peer + * of endpoint epd are marked + * + * Return: + * Upon successful completion, scif_fence_mark() returns 0; otherwise in user + * mode -1 is returned and errno is set to indicate the error; in kernel mode + * the negative of one of the following errors is returned. + * + * Errors: + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + * ECONNRESET - Connection reset by peer + * EINVAL - flags is invalid + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOTCONN - The endpoint is not connected + * ENOMEM - Insufficient kernel memory was available + */ +int scif_fence_mark(scif_epd_t epd, int flags, int *mark); + +/** + * scif_fence_wait() - Wait for completion of marked RMAs + * @epd: endpoint descriptor + * @mark: mark request + * + * scif_fence_wait() returns after all RMAs marked with mark have completed. + * The value passed in mark must have been obtained in a previous call to + * scif_fence_mark(). + * + * Return: + * Upon successful completion, scif_fence_wait() returns 0; otherwise in user + * mode -1 is returned and errno is set to indicate the error; in kernel mode + * the negative of one of the following errors is returned. + * + * Errors: + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + * ECONNRESET - Connection reset by peer + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOTCONN - The endpoint is not connected + * ENOMEM - Insufficient kernel memory was available + */ +int scif_fence_wait(scif_epd_t epd, int mark); + +/** + * scif_fence_signal() - Request a memory update on completion of RMAs + * @epd: endpoint descriptor + * @loff: local offset + * @lval: local value to write to loffset + * @roff: remote offset + * @rval: remote value to write to roffset + * @flags: flags + * + * scif_fence_signal() returns after marking the current set of all uncompleted + * RMAs initiated through the endpoint epd or marking the current set of all + * uncompleted RMAs initiated through the peer of endpoint epd. + * + * If flags includes SCIF_SIGNAL_LOCAL, then on completion of the RMAs in the + * marked set, lval is written to memory at the address corresponding to offset + * loff in the local registered address space of epd. loff must be within a + * registered window. If flags includes SCIF_SIGNAL_REMOTE, then on completion + * of the RMAs in the marked set, rval is written to memory at the address + * corresponding to offset roff in the remote registered address space of epd. + * roff must be within a remote registered window of the peer of epd. Note + * that any specified offset must be DWORD (4 byte / 32 bit) aligned. + * + * The flags argument is formed by OR'ing together the following. + * Exactly one of the following values. + * SCIF_FENCE_INIT_SELF - RMA operations initiated through endpoint + * epd are marked + * SCIF_FENCE_INIT_PEER - RMA operations initiated through the peer + * of endpoint epd are marked + * One or more of the following values. + * SCIF_SIGNAL_LOCAL - On completion of the marked set of RMAs, write lval to + * memory at the address corresponding to offset loff in the local + * registered address space of epd. + * SCIF_SIGNAL_REMOTE - On completion of the marked set of RMAs, write rval to + * memory at the address corresponding to offset roff in the remote + * registered address space of epd. + * + * Return: + * Upon successful completion, scif_fence_signal() returns 0; otherwise in + * user mode -1 is returned and errno is set to indicate the error; in kernel + * mode the negative of one of the following errors is returned. + * + * Errors: + * EBADF, ENOTTY - epd is not a valid endpoint descriptor + * ECONNRESET - Connection reset by peer + * EINVAL - flags is invalid, or loff or roff are not DWORD aligned + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOTCONN - The endpoint is not connected + * ENXIO - loff is invalid for the registered address of epd, or roff is invalid + * for the registered address space, of the peer of epd + */ +int scif_fence_signal(scif_epd_t epd, off_t loff, u64 lval, off_t roff, + u64 rval, int flags); + +/** + * scif_get_node_ids() - Return information about online nodes + * @nodes: array in which to return online node IDs + * @len: number of entries in the nodes array + * @self: address to place the node ID of the local node + * + * scif_get_node_ids() fills in the nodes array with up to len node IDs of the + * nodes in the SCIF network. If there is not enough space in nodes, as + * indicated by the len parameter, only len node IDs are returned in nodes. The + * return value of scif_get_node_ids() is the total number of nodes currently in + * the SCIF network. By checking the return value against the len parameter, + * the user may determine if enough space for nodes was allocated. + * + * The node ID of the local node is returned at self. + * + * Return: + * Upon successful completion, scif_get_node_ids() returns the actual number of + * online nodes in the SCIF network including 'self'; otherwise in user mode + * -1 is returned and errno is set to indicate the error; in kernel mode no + * errors are returned. + */ +int scif_get_node_ids(u16 *nodes, int len, u16 *self); + +/** + * scif_pin_pages() - Pin a set of pages + * @addr: Virtual address of range to pin + * @len: Length of range to pin + * @prot_flags: Page protection flags + * @map_flags: Page classification flags + * @pinned_pages: Handle to pinned pages + * + * scif_pin_pages() pins (locks in physical memory) the physical pages which + * back the range of virtual address pages starting at addr and continuing for + * len bytes. addr and len are constrained to be multiples of the page size. A + * successful scif_pin_pages() call returns a handle to pinned_pages which may + * be used in subsequent calls to scif_register_pinned_pages(). + * + * The pages will remain pinned as long as there is a reference against the + * scif_pinned_pages_t value returned by scif_pin_pages() and until + * scif_unpin_pages() is called, passing the scif_pinned_pages_t value. A + * reference is added to a scif_pinned_pages_t value each time a window is + * created by calling scif_register_pinned_pages() and passing the + * scif_pinned_pages_t value. A reference is removed from a + * scif_pinned_pages_t value each time such a window is deleted. + * + * Subsequent operations which change the memory pages to which virtual + * addresses are mapped (such as mmap(), munmap()) have no effect on the + * scif_pinned_pages_t value or windows created against it. + * + * If the process will fork(), it is recommended that the registered + * virtual address range be marked with MADV_DONTFORK. Doing so will prevent + * problems due to copy-on-write semantics. + * + * The prot_flags argument is formed by OR'ing together one or more of the + * following values. + * SCIF_PROT_READ - allow read operations against the pages + * SCIF_PROT_WRITE - allow write operations against the pages + * The map_flags argument can be set as SCIF_MAP_KERNEL to interpret addr as a + * kernel space address. By default, addr is interpreted as a user space + * address. + * + * Return: + * Upon successful completion, scif_pin_pages() returns 0; otherwise the + * negative of one of the following errors is returned. + * + * Errors: + * EINVAL - prot_flags is invalid, map_flags is invalid, or offset is negative + * ENOMEM - Not enough space + */ +int scif_pin_pages(void *addr, size_t len, int prot_flags, int map_flags, + scif_pinned_pages_t *pinned_pages); + +/** + * scif_unpin_pages() - Unpin a set of pages + * @pinned_pages: Handle to pinned pages to be unpinned + * + * scif_unpin_pages() prevents scif_register_pinned_pages() from registering new + * windows against pinned_pages. The physical pages represented by pinned_pages + * will remain pinned until all windows previously registered against + * pinned_pages are deleted (the window is scif_unregister()'d and all + * references to the window are removed (see scif_unregister()). + * + * pinned_pages must have been obtain from a previous call to scif_pin_pages(). + * After calling scif_unpin_pages(), it is an error to pass pinned_pages to + * scif_register_pinned_pages(). + * + * Return: + * Upon successful completion, scif_unpin_pages() returns 0; otherwise the + * negative of one of the following errors is returned. + * + * Errors: + * EINVAL - pinned_pages is not valid + */ +int scif_unpin_pages(scif_pinned_pages_t pinned_pages); + +/** + * scif_register_pinned_pages() - Mark a memory region for remote access. + * @epd: endpoint descriptor + * @pinned_pages: Handle to pinned pages + * @offset: Registered address space offset + * @map_flags: Flags which control where pages are mapped + * + * The scif_register_pinned_pages() function opens a window, a range of whole + * pages of the registered address space of the endpoint epd, starting at + * offset po. The value of po, further described below, is a function of the + * parameters offset and pinned_pages, and the value of map_flags. Each page of + * the window represents a corresponding physical memory page of the range + * represented by pinned_pages; the length of the window is the same as the + * length of range represented by pinned_pages. A successful + * scif_register_pinned_pages() call returns po as the return value. + * + * When SCIF_MAP_FIXED is set in the map_flags argument, po will be offset + * exactly, and offset is constrained to be a multiple of the page size. The + * mapping established by scif_register_pinned_pages() will not replace any + * existing registration; an error is returned if any page of the new window + * would intersect an existing window. + * + * When SCIF_MAP_FIXED is not set, the implementation uses offset in an + * implementation-defined manner to arrive at po. The po so chosen will be an + * area of the registered address space that the implementation deems suitable + * for a mapping of the required size. An offset value of 0 is interpreted as + * granting the implementation complete freedom in selecting po, subject to + * constraints described below. A non-zero value of offset is taken to be a + * suggestion of an offset near which the mapping should be placed. When the + * implementation selects a value for po, it does not replace any extant + * window. In all cases, po will be a multiple of the page size. + * + * The physical pages which are so represented by a window are available for + * access in calls to scif_get_pages(), scif_readfrom(), scif_writeto(), + * scif_vreadfrom(), and scif_vwriteto(). While a window is registered, the + * physical pages represented by the window will not be reused by the memory + * subsystem for any other purpose. Note that the same physical page may be + * represented by multiple windows. + * + * Windows created by scif_register_pinned_pages() are unregistered by + * scif_unregister(). + * + * The map_flags argument can be set to SCIF_MAP_FIXED which interprets a + * fixed offset. + * + * Return: + * Upon successful completion, scif_register_pinned_pages() returns the offset + * at which the mapping was placed (po); otherwise the negative of one of the + * following errors is returned. + * + * Errors: + * EADDRINUSE - SCIF_MAP_FIXED is set in map_flags and pages in the new window + * would intersect an existing window + * EAGAIN - The mapping could not be performed due to lack of resources + * ECONNRESET - Connection reset by peer + * EINVAL - map_flags is invalid, or SCIF_MAP_FIXED is set in map_flags, and + * offset is not a multiple of the page size, or offset is negative + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOMEM - Not enough space + * ENOTCONN - The endpoint is not connected + */ +off_t scif_register_pinned_pages(scif_epd_t epd, + scif_pinned_pages_t pinned_pages, + off_t offset, int map_flags); + +/** + * scif_get_pages() - Add references to remote registered pages + * @epd: endpoint descriptor + * @offset: remote registered offset + * @len: length of range of pages + * @pages: returned scif_range structure + * + * scif_get_pages() returns the addresses of the physical pages represented by + * those pages of the registered address space of the peer of epd, starting at + * offset and continuing for len bytes. offset and len are constrained to be + * multiples of the page size. + * + * All of the pages in the specified range [offset, offset + len - 1] must be + * within a single window of the registered address space of the peer of epd. + * + * The addresses are returned as a virtually contiguous array pointed to by the + * phys_addr component of the scif_range structure whose address is returned in + * pages. The nr_pages component of scif_range is the length of the array. The + * prot_flags component of scif_range holds the protection flag value passed + * when the pages were registered. + * + * Each physical page whose address is returned by scif_get_pages() remains + * available and will not be released for reuse until the scif_range structure + * is returned in a call to scif_put_pages(). The scif_range structure returned + * by scif_get_pages() must be unmodified. + * + * It is an error to call scif_close() on an endpoint on which a scif_range + * structure of that endpoint has not been returned to scif_put_pages(). + * + * Return: + * Upon successful completion, scif_get_pages() returns 0; otherwise the + * negative of one of the following errors is returned. + * Errors: + * ECONNRESET - Connection reset by peer. + * EINVAL - offset is not a multiple of the page size, or offset is negative, or + * len is not a multiple of the page size + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOTCONN - The endpoint is not connected + * ENXIO - Offsets in the range [offset, offset + len - 1] are invalid + * for the registered address space of the peer epd + */ +int scif_get_pages(scif_epd_t epd, off_t offset, size_t len, + struct scif_range **pages); + +/** + * scif_put_pages() - Remove references from remote registered pages + * @pages: pages to be returned + * + * scif_put_pages() releases a scif_range structure previously obtained by + * calling scif_get_pages(). The physical pages represented by pages may + * be reused when the window which represented those pages is unregistered. + * Therefore, those pages must not be accessed after calling scif_put_pages(). + * + * Return: + * Upon successful completion, scif_put_pages() returns 0; otherwise the + * negative of one of the following errors is returned. + * Errors: + * EINVAL - pages does not point to a valid scif_range structure, or + * the scif_range structure pointed to by pages was already returned + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOTCONN - The endpoint is not connected + */ +int scif_put_pages(struct scif_range *pages); + +/** + * scif_poll() - Wait for some event on an endpoint + * @epds: Array of endpoint descriptors + * @nepds: Length of epds + * @timeout: Upper limit on time for which scif_poll() will block + * + * scif_poll() waits for one of a set of endpoints to become ready to perform + * an I/O operation. + * + * The epds argument specifies the endpoint descriptors to be examined and the + * events of interest for each endpoint descriptor. epds is a pointer to an + * array with one member for each open endpoint descriptor of interest. + * + * The number of items in the epds array is specified in nepds. The epd field + * of scif_pollepd is an endpoint descriptor of an open endpoint. The field + * events is a bitmask specifying the events which the application is + * interested in. The field revents is an output parameter, filled by the + * kernel with the events that actually occurred. The bits returned in revents + * can include any of those specified in events, or one of the values EPOLLERR, + * EPOLLHUP, or EPOLLNVAL. (These three bits are meaningless in the events + * field, and will be set in the revents field whenever the corresponding + * condition is true.) + * + * If none of the events requested (and no error) has occurred for any of the + * endpoint descriptors, then scif_poll() blocks until one of the events occurs. + * + * The timeout argument specifies an upper limit on the time for which + * scif_poll() will block, in milliseconds. Specifying a negative value in + * timeout means an infinite timeout. + * + * The following bits may be set in events and returned in revents. + * EPOLLIN - Data may be received without blocking. For a connected + * endpoint, this means that scif_recv() may be called without blocking. For a + * listening endpoint, this means that scif_accept() may be called without + * blocking. + * EPOLLOUT - Data may be sent without blocking. For a connected endpoint, this + * means that scif_send() may be called without blocking. EPOLLOUT may also be + * used to block waiting for a non-blocking connect to complete. This bit value + * has no meaning for a listening endpoint and is ignored if specified. + * + * The following bits are only returned in revents, and are ignored if set in + * events. + * EPOLLERR - An error occurred on the endpoint + * EPOLLHUP - The connection to the peer endpoint was disconnected + * EPOLLNVAL - The specified endpoint descriptor is invalid. + * + * Return: + * Upon successful completion, scif_poll() returns a non-negative value. A + * positive value indicates the total number of endpoint descriptors that have + * been selected (that is, endpoint descriptors for which the revents member is + * non-zero). A value of 0 indicates that the call timed out and no endpoint + * descriptors have been selected. Otherwise in user mode -1 is returned and + * errno is set to indicate the error; in kernel mode the negative of one of + * the following errors is returned. + * + * Errors: + * EINTR - A signal occurred before any requested event + * EINVAL - The nepds argument is greater than {OPEN_MAX} + * ENOMEM - There was no space to allocate file descriptor tables + */ +int scif_poll(struct scif_pollepd *epds, unsigned int nepds, long timeout); + +/** + * scif_client_register() - Register a SCIF client + * @client: client to be registered + * + * scif_client_register() registers a SCIF client. The probe() method + * of the client is called when SCIF peer devices come online and the + * remove() method is called when the peer devices disappear. + * + * Return: + * Upon successful completion, scif_client_register() returns a non-negative + * value. Otherwise the return value is the same as subsys_interface_register() + * in the kernel. + */ +int scif_client_register(struct scif_client *client); + +/** + * scif_client_unregister() - Unregister a SCIF client + * @client: client to be unregistered + * + * scif_client_unregister() unregisters a SCIF client. + * + * Return: + * None + */ +void scif_client_unregister(struct scif_client *client); + +#endif /* __SCIF_H__ */ diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h new file mode 100644 index 000000000..f4c9fc0fc --- /dev/null +++ b/include/linux/scmi_protocol.h @@ -0,0 +1,285 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SCMI Message Protocol driver header + * + * Copyright (C) 2018 ARM Ltd. + */ +#include +#include + +#define SCMI_MAX_STR_SIZE 16 +#define SCMI_MAX_NUM_RATES 16 + +/** + * struct scmi_revision_info - version information structure + * + * @major_ver: Major ABI version. Change here implies risk of backward + * compatibility break. + * @minor_ver: Minor ABI version. Change here implies new feature addition, + * or compatible change in ABI. + * @num_protocols: Number of protocols that are implemented, excluding the + * base protocol. + * @num_agents: Number of agents in the system. + * @impl_ver: A vendor-specific implementation version. + * @vendor_id: A vendor identifier(Null terminated ASCII string) + * @sub_vendor_id: A sub-vendor identifier(Null terminated ASCII string) + */ +struct scmi_revision_info { + u16 major_ver; + u16 minor_ver; + u8 num_protocols; + u8 num_agents; + u32 impl_ver; + char vendor_id[SCMI_MAX_STR_SIZE]; + char sub_vendor_id[SCMI_MAX_STR_SIZE]; +}; + +struct scmi_clock_info { + char name[SCMI_MAX_STR_SIZE]; + bool rate_discrete; + union { + struct { + int num_rates; + u64 rates[SCMI_MAX_NUM_RATES]; + } list; + struct { + u64 min_rate; + u64 max_rate; + u64 step_size; + } range; + }; +}; + +struct scmi_handle; + +/** + * struct scmi_clk_ops - represents the various operations provided + * by SCMI Clock Protocol + * + * @count_get: get the count of clocks provided by SCMI + * @info_get: get the information of the specified clock + * @rate_get: request the current clock rate of a clock + * @rate_set: set the clock rate of a clock + * @enable: enables the specified clock + * @disable: disables the specified clock + */ +struct scmi_clk_ops { + int (*count_get)(const struct scmi_handle *handle); + + const struct scmi_clock_info *(*info_get) + (const struct scmi_handle *handle, u32 clk_id); + int (*rate_get)(const struct scmi_handle *handle, u32 clk_id, + u64 *rate); + int (*rate_set)(const struct scmi_handle *handle, u32 clk_id, + u32 config, u64 rate); + int (*enable)(const struct scmi_handle *handle, u32 clk_id); + int (*disable)(const struct scmi_handle *handle, u32 clk_id); +}; + +/** + * struct scmi_perf_ops - represents the various operations provided + * by SCMI Performance Protocol + * + * @limits_set: sets limits on the performance level of a domain + * @limits_get: gets limits on the performance level of a domain + * @level_set: sets the performance level of a domain + * @level_get: gets the performance level of a domain + * @device_domain_id: gets the scmi domain id for a given device + * @transition_latency_get: gets the DVFS transition latency for a given device + * @device_opps_add: adds all the OPPs for a given device + * @freq_set: sets the frequency for a given device using sustained frequency + * to sustained performance level mapping + * @freq_get: gets the frequency for a given device using sustained frequency + * to sustained performance level mapping + */ +struct scmi_perf_ops { + int (*limits_set)(const struct scmi_handle *handle, u32 domain, + u32 max_perf, u32 min_perf); + int (*limits_get)(const struct scmi_handle *handle, u32 domain, + u32 *max_perf, u32 *min_perf); + int (*level_set)(const struct scmi_handle *handle, u32 domain, + u32 level, bool poll); + int (*level_get)(const struct scmi_handle *handle, u32 domain, + u32 *level, bool poll); + int (*device_domain_id)(struct device *dev); + int (*transition_latency_get)(const struct scmi_handle *handle, + struct device *dev); + int (*device_opps_add)(const struct scmi_handle *handle, + struct device *dev); + int (*freq_set)(const struct scmi_handle *handle, u32 domain, + unsigned long rate, bool poll); + int (*freq_get)(const struct scmi_handle *handle, u32 domain, + unsigned long *rate, bool poll); +}; + +/** + * struct scmi_power_ops - represents the various operations provided + * by SCMI Power Protocol + * + * @num_domains_get: get the count of power domains provided by SCMI + * @name_get: gets the name of a power domain + * @state_set: sets the power state of a power domain + * @state_get: gets the power state of a power domain + */ +struct scmi_power_ops { + int (*num_domains_get)(const struct scmi_handle *handle); + char *(*name_get)(const struct scmi_handle *handle, u32 domain); +#define SCMI_POWER_STATE_TYPE_SHIFT 30 +#define SCMI_POWER_STATE_ID_MASK (BIT(28) - 1) +#define SCMI_POWER_STATE_PARAM(type, id) \ + ((((type) & BIT(0)) << SCMI_POWER_STATE_TYPE_SHIFT) | \ + ((id) & SCMI_POWER_STATE_ID_MASK)) +#define SCMI_POWER_STATE_GENERIC_ON SCMI_POWER_STATE_PARAM(0, 0) +#define SCMI_POWER_STATE_GENERIC_OFF SCMI_POWER_STATE_PARAM(1, 0) + int (*state_set)(const struct scmi_handle *handle, u32 domain, + u32 state); + int (*state_get)(const struct scmi_handle *handle, u32 domain, + u32 *state); +}; + +struct scmi_sensor_info { + u32 id; + u8 type; + char name[SCMI_MAX_STR_SIZE]; +}; + +/* + * Partial list from Distributed Management Task Force (DMTF) specification: + * DSP0249 (Platform Level Data Model specification) + */ +enum scmi_sensor_class { + NONE = 0x0, + TEMPERATURE_C = 0x2, + VOLTAGE = 0x5, + CURRENT = 0x6, + POWER = 0x7, + ENERGY = 0x8, +}; + +/** + * struct scmi_sensor_ops - represents the various operations provided + * by SCMI Sensor Protocol + * + * @count_get: get the count of sensors provided by SCMI + * @info_get: get the information of the specified sensor + * @configuration_set: control notifications on cross-over events for + * the trip-points + * @trip_point_set: selects and configures a trip-point of interest + * @reading_get: gets the current value of the sensor + */ +struct scmi_sensor_ops { + int (*count_get)(const struct scmi_handle *handle); + + const struct scmi_sensor_info *(*info_get) + (const struct scmi_handle *handle, u32 sensor_id); + int (*configuration_set)(const struct scmi_handle *handle, + u32 sensor_id); + int (*trip_point_set)(const struct scmi_handle *handle, u32 sensor_id, + u8 trip_id, u64 trip_value); + int (*reading_get)(const struct scmi_handle *handle, u32 sensor_id, + bool async, u64 *value); +}; + +/** + * struct scmi_handle - Handle returned to ARM SCMI clients for usage. + * + * @dev: pointer to the SCMI device + * @version: pointer to the structure containing SCMI version information + * @power_ops: pointer to set of power protocol operations + * @perf_ops: pointer to set of performance protocol operations + * @clk_ops: pointer to set of clock protocol operations + * @sensor_ops: pointer to set of sensor protocol operations + * @perf_priv: pointer to private data structure specific to performance + * protocol(for internal use only) + * @clk_priv: pointer to private data structure specific to clock + * protocol(for internal use only) + * @power_priv: pointer to private data structure specific to power + * protocol(for internal use only) + * @sensor_priv: pointer to private data structure specific to sensors + * protocol(for internal use only) + */ +struct scmi_handle { + struct device *dev; + struct scmi_revision_info *version; + struct scmi_perf_ops *perf_ops; + struct scmi_clk_ops *clk_ops; + struct scmi_power_ops *power_ops; + struct scmi_sensor_ops *sensor_ops; + /* for protocol internal use */ + void *perf_priv; + void *clk_priv; + void *power_priv; + void *sensor_priv; +}; + +enum scmi_std_protocol { + SCMI_PROTOCOL_BASE = 0x10, + SCMI_PROTOCOL_POWER = 0x11, + SCMI_PROTOCOL_SYSTEM = 0x12, + SCMI_PROTOCOL_PERF = 0x13, + SCMI_PROTOCOL_CLOCK = 0x14, + SCMI_PROTOCOL_SENSOR = 0x15, +}; + +struct scmi_device { + u32 id; + u8 protocol_id; + struct device dev; + struct scmi_handle *handle; +}; + +#define to_scmi_dev(d) container_of(d, struct scmi_device, dev) + +struct scmi_device * +scmi_device_create(struct device_node *np, struct device *parent, int protocol); +void scmi_device_destroy(struct scmi_device *scmi_dev); + +struct scmi_device_id { + u8 protocol_id; +}; + +struct scmi_driver { + const char *name; + int (*probe)(struct scmi_device *sdev); + void (*remove)(struct scmi_device *sdev); + const struct scmi_device_id *id_table; + + struct device_driver driver; +}; + +#define to_scmi_driver(d) container_of(d, struct scmi_driver, driver) + +#ifdef CONFIG_ARM_SCMI_PROTOCOL +int scmi_driver_register(struct scmi_driver *driver, + struct module *owner, const char *mod_name); +void scmi_driver_unregister(struct scmi_driver *driver); +#else +static inline int +scmi_driver_register(struct scmi_driver *driver, struct module *owner, + const char *mod_name) +{ + return -EINVAL; +} + +static inline void scmi_driver_unregister(struct scmi_driver *driver) {} +#endif /* CONFIG_ARM_SCMI_PROTOCOL */ + +#define scmi_register(driver) \ + scmi_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) +#define scmi_unregister(driver) \ + scmi_driver_unregister(driver) + +/** + * module_scmi_driver() - Helper macro for registering a scmi driver + * @__scmi_driver: scmi_driver structure + * + * Helper macro for scmi drivers to set up proper module init / exit + * functions. Replaces module_init() and module_exit() and keeps people from + * printing pointless things to the kernel log when their driver is loaded. + */ +#define module_scmi_driver(__scmi_driver) \ + module_driver(__scmi_driver, scmi_register, scmi_unregister) + +typedef int (*scmi_prot_init_fn_t)(struct scmi_handle *); +int scmi_protocol_register(int protocol_id, scmi_prot_init_fn_t fn); +void scmi_protocol_unregister(int protocol_id); diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h new file mode 100644 index 000000000..327d65663 --- /dev/null +++ b/include/linux/scpi_protocol.h @@ -0,0 +1,84 @@ +/* + * SCPI Message Protocol driver header + * + * Copyright (C) 2014 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ +#include + +struct scpi_opp { + u32 freq; + u32 m_volt; +} __packed; + +struct scpi_dvfs_info { + unsigned int count; + unsigned int latency; /* in nanoseconds */ + struct scpi_opp *opps; +}; + +enum scpi_sensor_class { + TEMPERATURE, + VOLTAGE, + CURRENT, + POWER, + ENERGY, +}; + +struct scpi_sensor_info { + u16 sensor_id; + u8 class; + u8 trigger_type; + char name[20]; +} __packed; + +/** + * struct scpi_ops - represents the various operations provided + * by SCP through SCPI message protocol + * @get_version: returns the major and minor revision on the SCPI + * message protocol + * @clk_get_range: gets clock range limit(min - max in Hz) + * @clk_get_val: gets clock value(in Hz) + * @clk_set_val: sets the clock value, setting to 0 will disable the + * clock (if supported) + * @dvfs_get_idx: gets the Operating Point of the given power domain. + * OPP is an index to the list return by @dvfs_get_info + * @dvfs_set_idx: sets the Operating Point of the given power domain. + * OPP is an index to the list return by @dvfs_get_info + * @dvfs_get_info: returns the DVFS capabilities of the given power + * domain. It includes the OPP list and the latency information + */ +struct scpi_ops { + u32 (*get_version)(void); + int (*clk_get_range)(u16, unsigned long *, unsigned long *); + unsigned long (*clk_get_val)(u16); + int (*clk_set_val)(u16, unsigned long); + int (*dvfs_get_idx)(u8); + int (*dvfs_set_idx)(u8, u8); + struct scpi_dvfs_info *(*dvfs_get_info)(u8); + int (*device_domain_id)(struct device *); + int (*get_transition_latency)(struct device *); + int (*add_opps_to_device)(struct device *); + int (*sensor_get_capability)(u16 *sensors); + int (*sensor_get_info)(u16 sensor_id, struct scpi_sensor_info *); + int (*sensor_get_value)(u16, u64 *); + int (*device_get_power_state)(u16); + int (*device_set_power_state)(u16, u8); +}; + +#if IS_REACHABLE(CONFIG_ARM_SCPI_PROTOCOL) +struct scpi_ops *get_scpi_ops(void); +#else +static inline struct scpi_ops *get_scpi_ops(void) { return NULL; } +#endif diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h new file mode 100644 index 000000000..eab708139 --- /dev/null +++ b/include/linux/screen_info.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SCREEN_INFO_H +#define _SCREEN_INFO_H + +#include + +extern struct screen_info screen_info; + +#endif /* _SCREEN_INFO_H */ diff --git a/include/linux/sctp.h b/include/linux/sctp.h new file mode 100644 index 000000000..83d94341e --- /dev/null +++ b/include/linux/sctp.h @@ -0,0 +1,811 @@ +/* SCTP kernel reference Implementation + * (C) Copyright IBM Corp. 2001, 2004 + * Copyright (c) 1999-2000 Cisco, Inc. + * Copyright (c) 1999-2001 Motorola, Inc. + * Copyright (c) 2001 Intel Corp. + * Copyright (c) 2001 Nokia, Inc. + * Copyright (c) 2001 La Monte H.P. Yarroll + * + * This file is part of the SCTP kernel reference Implementation + * + * Various protocol defined structures. + * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, see + * . + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers + * + * Or submit a bug report through the following website: + * http://www.sf.net/projects/lksctp + * + * Written or modified by: + * La Monte H.P. Yarroll + * Karl Knutson + * Jon Grimm + * Xingang Guo + * randall@sctp.chicago.il.us + * kmorneau@cisco.com + * qxie1@email.mot.com + * Sridhar Samudrala + * Kevin Gao + * + * Any bugs reported given to us we will try to fix... any fixes shared will + * be incorporated into the next SCTP release. + */ +#ifndef __LINUX_SCTP_H__ +#define __LINUX_SCTP_H__ + +#include /* We need in_addr. */ +#include /* We need in6_addr. */ +#include + +#include + +/* Section 3.1. SCTP Common Header Format */ +struct sctphdr { + __be16 source; + __be16 dest; + __be32 vtag; + __le32 checksum; +}; + +static inline struct sctphdr *sctp_hdr(const struct sk_buff *skb) +{ + return (struct sctphdr *)skb_transport_header(skb); +} + +/* Section 3.2. Chunk Field Descriptions. */ +struct sctp_chunkhdr { + __u8 type; + __u8 flags; + __be16 length; +}; + + +/* Section 3.2. Chunk Type Values. + * [Chunk Type] identifies the type of information contained in the Chunk + * Value field. It takes a value from 0 to 254. The value of 255 is + * reserved for future use as an extension field. + */ +enum sctp_cid { + SCTP_CID_DATA = 0, + SCTP_CID_INIT = 1, + SCTP_CID_INIT_ACK = 2, + SCTP_CID_SACK = 3, + SCTP_CID_HEARTBEAT = 4, + SCTP_CID_HEARTBEAT_ACK = 5, + SCTP_CID_ABORT = 6, + SCTP_CID_SHUTDOWN = 7, + SCTP_CID_SHUTDOWN_ACK = 8, + SCTP_CID_ERROR = 9, + SCTP_CID_COOKIE_ECHO = 10, + SCTP_CID_COOKIE_ACK = 11, + SCTP_CID_ECN_ECNE = 12, + SCTP_CID_ECN_CWR = 13, + SCTP_CID_SHUTDOWN_COMPLETE = 14, + + /* AUTH Extension Section 4.1 */ + SCTP_CID_AUTH = 0x0F, + + /* sctp ndata 5.1. I-DATA */ + SCTP_CID_I_DATA = 0x40, + + /* PR-SCTP Sec 3.2 */ + SCTP_CID_FWD_TSN = 0xC0, + + /* Use hex, as defined in ADDIP sec. 3.1 */ + SCTP_CID_ASCONF = 0xC1, + SCTP_CID_I_FWD_TSN = 0xC2, + SCTP_CID_ASCONF_ACK = 0x80, + SCTP_CID_RECONF = 0x82, +}; /* enum */ + + +/* Section 3.2 + * Chunk Types are encoded such that the highest-order two bits specify + * the action that must be taken if the processing endpoint does not + * recognize the Chunk Type. + */ +enum { + SCTP_CID_ACTION_DISCARD = 0x00, + SCTP_CID_ACTION_DISCARD_ERR = 0x40, + SCTP_CID_ACTION_SKIP = 0x80, + SCTP_CID_ACTION_SKIP_ERR = 0xc0, +}; + +enum { SCTP_CID_ACTION_MASK = 0xc0, }; + +/* This flag is used in Chunk Flags for ABORT and SHUTDOWN COMPLETE. + * + * 3.3.7 Abort Association (ABORT) (6): + * The T bit is set to 0 if the sender had a TCB that it destroyed. + * If the sender did not have a TCB it should set this bit to 1. + */ +enum { SCTP_CHUNK_FLAG_T = 0x01 }; + +/* + * Set the T bit + * + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Type = 14 |Reserved |T| Length = 4 | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * Chunk Flags: 8 bits + * + * Reserved: 7 bits + * Set to 0 on transmit and ignored on receipt. + * + * T bit: 1 bit + * The T bit is set to 0 if the sender had a TCB that it destroyed. If + * the sender did NOT have a TCB it should set this bit to 1. + * + * Note: Special rules apply to this chunk for verification, please + * see Section 8.5.1 for details. + */ + +#define sctp_test_T_bit(c) ((c)->chunk_hdr->flags & SCTP_CHUNK_FLAG_T) + +/* RFC 2960 + * Section 3.2.1 Optional/Variable-length Parmaeter Format. + */ + +struct sctp_paramhdr { + __be16 type; + __be16 length; +}; + +enum sctp_param { + + /* RFC 2960 Section 3.3.5 */ + SCTP_PARAM_HEARTBEAT_INFO = cpu_to_be16(1), + /* RFC 2960 Section 3.3.2.1 */ + SCTP_PARAM_IPV4_ADDRESS = cpu_to_be16(5), + SCTP_PARAM_IPV6_ADDRESS = cpu_to_be16(6), + SCTP_PARAM_STATE_COOKIE = cpu_to_be16(7), + SCTP_PARAM_UNRECOGNIZED_PARAMETERS = cpu_to_be16(8), + SCTP_PARAM_COOKIE_PRESERVATIVE = cpu_to_be16(9), + SCTP_PARAM_HOST_NAME_ADDRESS = cpu_to_be16(11), + SCTP_PARAM_SUPPORTED_ADDRESS_TYPES = cpu_to_be16(12), + SCTP_PARAM_ECN_CAPABLE = cpu_to_be16(0x8000), + + /* AUTH Extension Section 3 */ + SCTP_PARAM_RANDOM = cpu_to_be16(0x8002), + SCTP_PARAM_CHUNKS = cpu_to_be16(0x8003), + SCTP_PARAM_HMAC_ALGO = cpu_to_be16(0x8004), + + /* Add-IP: Supported Extensions, Section 4.2 */ + SCTP_PARAM_SUPPORTED_EXT = cpu_to_be16(0x8008), + + /* PR-SCTP Sec 3.1 */ + SCTP_PARAM_FWD_TSN_SUPPORT = cpu_to_be16(0xc000), + + /* Add-IP Extension. Section 3.2 */ + SCTP_PARAM_ADD_IP = cpu_to_be16(0xc001), + SCTP_PARAM_DEL_IP = cpu_to_be16(0xc002), + SCTP_PARAM_ERR_CAUSE = cpu_to_be16(0xc003), + SCTP_PARAM_SET_PRIMARY = cpu_to_be16(0xc004), + SCTP_PARAM_SUCCESS_REPORT = cpu_to_be16(0xc005), + SCTP_PARAM_ADAPTATION_LAYER_IND = cpu_to_be16(0xc006), + + /* RE-CONFIG. Section 4 */ + SCTP_PARAM_RESET_OUT_REQUEST = cpu_to_be16(0x000d), + SCTP_PARAM_RESET_IN_REQUEST = cpu_to_be16(0x000e), + SCTP_PARAM_RESET_TSN_REQUEST = cpu_to_be16(0x000f), + SCTP_PARAM_RESET_RESPONSE = cpu_to_be16(0x0010), + SCTP_PARAM_RESET_ADD_OUT_STREAMS = cpu_to_be16(0x0011), + SCTP_PARAM_RESET_ADD_IN_STREAMS = cpu_to_be16(0x0012), +}; /* enum */ + + +/* RFC 2960 Section 3.2.1 + * The Parameter Types are encoded such that the highest-order two bits + * specify the action that must be taken if the processing endpoint does + * not recognize the Parameter Type. + * + */ +enum { + SCTP_PARAM_ACTION_DISCARD = cpu_to_be16(0x0000), + SCTP_PARAM_ACTION_DISCARD_ERR = cpu_to_be16(0x4000), + SCTP_PARAM_ACTION_SKIP = cpu_to_be16(0x8000), + SCTP_PARAM_ACTION_SKIP_ERR = cpu_to_be16(0xc000), +}; + +enum { SCTP_PARAM_ACTION_MASK = cpu_to_be16(0xc000), }; + +/* RFC 2960 Section 3.3.1 Payload Data (DATA) (0) */ + +struct sctp_datahdr { + __be32 tsn; + __be16 stream; + __be16 ssn; + __u32 ppid; + __u8 payload[0]; +}; + +struct sctp_data_chunk { + struct sctp_chunkhdr chunk_hdr; + struct sctp_datahdr data_hdr; +}; + +struct sctp_idatahdr { + __be32 tsn; + __be16 stream; + __be16 reserved; + __be32 mid; + union { + __u32 ppid; + __be32 fsn; + }; + __u8 payload[0]; +}; + +struct sctp_idata_chunk { + struct sctp_chunkhdr chunk_hdr; + struct sctp_idatahdr data_hdr; +}; + +/* DATA Chuck Specific Flags */ +enum { + SCTP_DATA_MIDDLE_FRAG = 0x00, + SCTP_DATA_LAST_FRAG = 0x01, + SCTP_DATA_FIRST_FRAG = 0x02, + SCTP_DATA_NOT_FRAG = 0x03, + SCTP_DATA_UNORDERED = 0x04, + SCTP_DATA_SACK_IMM = 0x08, +}; +enum { SCTP_DATA_FRAG_MASK = 0x03, }; + + +/* RFC 2960 Section 3.3.2 Initiation (INIT) (1) + * + * This chunk is used to initiate a SCTP association between two + * endpoints. + */ +struct sctp_inithdr { + __be32 init_tag; + __be32 a_rwnd; + __be16 num_outbound_streams; + __be16 num_inbound_streams; + __be32 initial_tsn; + __u8 params[0]; +}; + +struct sctp_init_chunk { + struct sctp_chunkhdr chunk_hdr; + struct sctp_inithdr init_hdr; +}; + + +/* Section 3.3.2.1. IPv4 Address Parameter (5) */ +struct sctp_ipv4addr_param { + struct sctp_paramhdr param_hdr; + struct in_addr addr; +}; + +/* Section 3.3.2.1. IPv6 Address Parameter (6) */ +struct sctp_ipv6addr_param { + struct sctp_paramhdr param_hdr; + struct in6_addr addr; +}; + +/* Section 3.3.2.1 Cookie Preservative (9) */ +struct sctp_cookie_preserve_param { + struct sctp_paramhdr param_hdr; + __be32 lifespan_increment; +}; + +/* Section 3.3.2.1 Host Name Address (11) */ +struct sctp_hostname_param { + struct sctp_paramhdr param_hdr; + uint8_t hostname[0]; +}; + +/* Section 3.3.2.1 Supported Address Types (12) */ +struct sctp_supported_addrs_param { + struct sctp_paramhdr param_hdr; + __be16 types[0]; +}; + +/* ADDIP Section 3.2.6 Adaptation Layer Indication */ +struct sctp_adaptation_ind_param { + struct sctp_paramhdr param_hdr; + __be32 adaptation_ind; +}; + +/* ADDIP Section 4.2.7 Supported Extensions Parameter */ +struct sctp_supported_ext_param { + struct sctp_paramhdr param_hdr; + __u8 chunks[0]; +}; + +/* AUTH Section 3.1 Random */ +struct sctp_random_param { + struct sctp_paramhdr param_hdr; + __u8 random_val[0]; +}; + +/* AUTH Section 3.2 Chunk List */ +struct sctp_chunks_param { + struct sctp_paramhdr param_hdr; + __u8 chunks[0]; +}; + +/* AUTH Section 3.3 HMAC Algorithm */ +struct sctp_hmac_algo_param { + struct sctp_paramhdr param_hdr; + __be16 hmac_ids[0]; +}; + +/* RFC 2960. Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2): + * The INIT ACK chunk is used to acknowledge the initiation of an SCTP + * association. + */ +struct sctp_initack_chunk { + struct sctp_chunkhdr chunk_hdr; + struct sctp_inithdr init_hdr; +}; + +/* Section 3.3.3.1 State Cookie (7) */ +struct sctp_cookie_param { + struct sctp_paramhdr p; + __u8 body[0]; +}; + +/* Section 3.3.3.1 Unrecognized Parameters (8) */ +struct sctp_unrecognized_param { + struct sctp_paramhdr param_hdr; + struct sctp_paramhdr unrecognized; +}; + + + +/* + * 3.3.4 Selective Acknowledgement (SACK) (3): + * + * This chunk is sent to the peer endpoint to acknowledge received DATA + * chunks and to inform the peer endpoint of gaps in the received + * subsequences of DATA chunks as represented by their TSNs. + */ + +struct sctp_gap_ack_block { + __be16 start; + __be16 end; +}; + +union sctp_sack_variable { + struct sctp_gap_ack_block gab; + __be32 dup; +}; + +struct sctp_sackhdr { + __be32 cum_tsn_ack; + __be32 a_rwnd; + __be16 num_gap_ack_blocks; + __be16 num_dup_tsns; + union sctp_sack_variable variable[0]; +}; + +struct sctp_sack_chunk { + struct sctp_chunkhdr chunk_hdr; + struct sctp_sackhdr sack_hdr; +}; + + +/* RFC 2960. Section 3.3.5 Heartbeat Request (HEARTBEAT) (4): + * + * An endpoint should send this chunk to its peer endpoint to probe the + * reachability of a particular destination transport address defined in + * the present association. + */ + +struct sctp_heartbeathdr { + struct sctp_paramhdr info; +}; + +struct sctp_heartbeat_chunk { + struct sctp_chunkhdr chunk_hdr; + struct sctp_heartbeathdr hb_hdr; +}; + + +/* For the abort and shutdown ACK we must carry the init tag in the + * common header. Just the common header is all that is needed with a + * chunk descriptor. + */ +struct sctp_abort_chunk { + struct sctp_chunkhdr uh; +}; + + +/* For the graceful shutdown we must carry the tag (in common header) + * and the highest consecutive acking value. + */ +struct sctp_shutdownhdr { + __be32 cum_tsn_ack; +}; + +struct sctp_shutdown_chunk { + struct sctp_chunkhdr chunk_hdr; + struct sctp_shutdownhdr shutdown_hdr; +}; + +/* RFC 2960. Section 3.3.10 Operation Error (ERROR) (9) */ + +struct sctp_errhdr { + __be16 cause; + __be16 length; + __u8 variable[0]; +}; + +struct sctp_operr_chunk { + struct sctp_chunkhdr chunk_hdr; + struct sctp_errhdr err_hdr; +}; + +/* RFC 2960 3.3.10 - Operation Error + * + * Cause Code: 16 bits (unsigned integer) + * + * Defines the type of error conditions being reported. + * Cause Code + * Value Cause Code + * --------- ---------------- + * 1 Invalid Stream Identifier + * 2 Missing Mandatory Parameter + * 3 Stale Cookie Error + * 4 Out of Resource + * 5 Unresolvable Address + * 6 Unrecognized Chunk Type + * 7 Invalid Mandatory Parameter + * 8 Unrecognized Parameters + * 9 No User Data + * 10 Cookie Received While Shutting Down + */ +enum sctp_error { + + SCTP_ERROR_NO_ERROR = cpu_to_be16(0x00), + SCTP_ERROR_INV_STRM = cpu_to_be16(0x01), + SCTP_ERROR_MISS_PARAM = cpu_to_be16(0x02), + SCTP_ERROR_STALE_COOKIE = cpu_to_be16(0x03), + SCTP_ERROR_NO_RESOURCE = cpu_to_be16(0x04), + SCTP_ERROR_DNS_FAILED = cpu_to_be16(0x05), + SCTP_ERROR_UNKNOWN_CHUNK = cpu_to_be16(0x06), + SCTP_ERROR_INV_PARAM = cpu_to_be16(0x07), + SCTP_ERROR_UNKNOWN_PARAM = cpu_to_be16(0x08), + SCTP_ERROR_NO_DATA = cpu_to_be16(0x09), + SCTP_ERROR_COOKIE_IN_SHUTDOWN = cpu_to_be16(0x0a), + + + /* SCTP Implementation Guide: + * 11 Restart of an association with new addresses + * 12 User Initiated Abort + * 13 Protocol Violation + */ + + SCTP_ERROR_RESTART = cpu_to_be16(0x0b), + SCTP_ERROR_USER_ABORT = cpu_to_be16(0x0c), + SCTP_ERROR_PROTO_VIOLATION = cpu_to_be16(0x0d), + + /* ADDIP Section 3.3 New Error Causes + * + * Four new Error Causes are added to the SCTP Operational Errors, + * primarily for use in the ASCONF-ACK chunk. + * + * Value Cause Code + * --------- ---------------- + * 0x00A0 Request to Delete Last Remaining IP Address. + * 0x00A1 Operation Refused Due to Resource Shortage. + * 0x00A2 Request to Delete Source IP Address. + * 0x00A3 Association Aborted due to illegal ASCONF-ACK + * 0x00A4 Request refused - no authorization. + */ + SCTP_ERROR_DEL_LAST_IP = cpu_to_be16(0x00A0), + SCTP_ERROR_RSRC_LOW = cpu_to_be16(0x00A1), + SCTP_ERROR_DEL_SRC_IP = cpu_to_be16(0x00A2), + SCTP_ERROR_ASCONF_ACK = cpu_to_be16(0x00A3), + SCTP_ERROR_REQ_REFUSED = cpu_to_be16(0x00A4), + + /* AUTH Section 4. New Error Cause + * + * This section defines a new error cause that will be sent if an AUTH + * chunk is received with an unsupported HMAC identifier. + * illustrates the new error cause. + * + * Cause Code Error Cause Name + * -------------------------------------------------------------- + * 0x0105 Unsupported HMAC Identifier + */ + SCTP_ERROR_UNSUP_HMAC = cpu_to_be16(0x0105) +}; + + + +/* RFC 2960. Appendix A. Explicit Congestion Notification. + * Explicit Congestion Notification Echo (ECNE) (12) + */ +struct sctp_ecnehdr { + __be32 lowest_tsn; +}; + +struct sctp_ecne_chunk { + struct sctp_chunkhdr chunk_hdr; + struct sctp_ecnehdr ence_hdr; +}; + +/* RFC 2960. Appendix A. Explicit Congestion Notification. + * Congestion Window Reduced (CWR) (13) + */ +struct sctp_cwrhdr { + __be32 lowest_tsn; +}; + +/* PR-SCTP + * 3.2 Forward Cumulative TSN Chunk Definition (FORWARD TSN) + * + * Forward Cumulative TSN chunk has the following format: + * + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Type = 192 | Flags = 0x00 | Length = Variable | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | New Cumulative TSN | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Stream-1 | Stream Sequence-1 | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * \ / + * / \ + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Stream-N | Stream Sequence-N | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * Chunk Flags: + * + * Set to all zeros on transmit and ignored on receipt. + * + * New Cumulative TSN: 32 bit u_int + * + * This indicates the new cumulative TSN to the data receiver. Upon + * the reception of this value, the data receiver MUST consider + * any missing TSNs earlier than or equal to this value as received + * and stop reporting them as gaps in any subsequent SACKs. + * + * Stream-N: 16 bit u_int + * + * This field holds a stream number that was skipped by this + * FWD-TSN. + * + * Stream Sequence-N: 16 bit u_int + * This field holds the sequence number associated with the stream + * that was skipped. The stream sequence field holds the largest stream + * sequence number in this stream being skipped. The receiver of + * the FWD-TSN's can use the Stream-N and Stream Sequence-N fields + * to enable delivery of any stranded TSN's that remain on the stream + * re-ordering queues. This field MUST NOT report TSN's corresponding + * to DATA chunk that are marked as unordered. For ordered DATA + * chunks this field MUST be filled in. + */ +struct sctp_fwdtsn_skip { + __be16 stream; + __be16 ssn; +}; + +struct sctp_fwdtsn_hdr { + __be32 new_cum_tsn; + struct sctp_fwdtsn_skip skip[0]; +}; + +struct sctp_fwdtsn_chunk { + struct sctp_chunkhdr chunk_hdr; + struct sctp_fwdtsn_hdr fwdtsn_hdr; +}; + +struct sctp_ifwdtsn_skip { + __be16 stream; + __u8 reserved; + __u8 flags; + __be32 mid; +}; + +struct sctp_ifwdtsn_hdr { + __be32 new_cum_tsn; + struct sctp_ifwdtsn_skip skip[0]; +}; + +struct sctp_ifwdtsn_chunk { + struct sctp_chunkhdr chunk_hdr; + struct sctp_ifwdtsn_hdr fwdtsn_hdr; +}; + +/* ADDIP + * Section 3.1.1 Address Configuration Change Chunk (ASCONF) + * + * Serial Number: 32 bits (unsigned integer) + * This value represents a Serial Number for the ASCONF Chunk. The + * valid range of Serial Number is from 0 to 2^32-1. + * Serial Numbers wrap back to 0 after reaching 2^32 -1. + * + * Address Parameter: 8 or 20 bytes (depending on type) + * The address is an address of the sender of the ASCONF chunk, + * the address MUST be considered part of the association by the + * peer endpoint. This field may be used by the receiver of the + * ASCONF to help in finding the association. This parameter MUST + * be present in every ASCONF message i.e. it is a mandatory TLV + * parameter. + * + * ASCONF Parameter: TLV format + * Each Address configuration change is represented by a TLV + * parameter as defined in Section 3.2. One or more requests may + * be present in an ASCONF Chunk. + * + * Section 3.1.2 Address Configuration Acknowledgement Chunk (ASCONF-ACK) + * + * Serial Number: 32 bits (unsigned integer) + * This value represents the Serial Number for the received ASCONF + * Chunk that is acknowledged by this chunk. This value is copied + * from the received ASCONF Chunk. + * + * ASCONF Parameter Response: TLV format + * The ASCONF Parameter Response is used in the ASCONF-ACK to + * report status of ASCONF processing. + */ +struct sctp_addip_param { + struct sctp_paramhdr param_hdr; + __be32 crr_id; +}; + +struct sctp_addiphdr { + __be32 serial; + __u8 params[0]; +}; + +struct sctp_addip_chunk { + struct sctp_chunkhdr chunk_hdr; + struct sctp_addiphdr addip_hdr; +}; + +/* AUTH + * Section 4.1 Authentication Chunk (AUTH) + * + * This chunk is used to hold the result of the HMAC calculation. + * + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Type = 0x0F | Flags=0 | Length | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Shared Key Identifier | HMAC Identifier | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | | + * \ HMAC / + * / \ + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * Type: 1 byte (unsigned integer) + * This value MUST be set to 0x0F for all AUTH-chunks. + * + * Flags: 1 byte (unsigned integer) + * Set to zero on transmit and ignored on receipt. + * + * Length: 2 bytes (unsigned integer) + * This value holds the length of the HMAC in bytes plus 8. + * + * Shared Key Identifier: 2 bytes (unsigned integer) + * This value describes which endpoint pair shared key is used. + * + * HMAC Identifier: 2 bytes (unsigned integer) + * This value describes which message digest is being used. Table 2 + * shows the currently defined values. + * + * The following Table 2 shows the currently defined values for HMAC + * identifiers. + * + * +-----------------+--------------------------+ + * | HMAC Identifier | Message Digest Algorithm | + * +-----------------+--------------------------+ + * | 0 | Reserved | + * | 1 | SHA-1 defined in [8] | + * | 2 | Reserved | + * | 3 | SHA-256 defined in [8] | + * +-----------------+--------------------------+ + * + * + * HMAC: n bytes (unsigned integer) This hold the result of the HMAC + * calculation. + */ +struct sctp_authhdr { + __be16 shkey_id; + __be16 hmac_id; + __u8 hmac[0]; +}; + +struct sctp_auth_chunk { + struct sctp_chunkhdr chunk_hdr; + struct sctp_authhdr auth_hdr; +}; + +struct sctp_infox { + struct sctp_info *sctpinfo; + struct sctp_association *asoc; +}; + +struct sctp_reconf_chunk { + struct sctp_chunkhdr chunk_hdr; + __u8 params[0]; +}; + +struct sctp_strreset_outreq { + struct sctp_paramhdr param_hdr; + __be32 request_seq; + __be32 response_seq; + __be32 send_reset_at_tsn; + __be16 list_of_streams[0]; +}; + +struct sctp_strreset_inreq { + struct sctp_paramhdr param_hdr; + __be32 request_seq; + __be16 list_of_streams[0]; +}; + +struct sctp_strreset_tsnreq { + struct sctp_paramhdr param_hdr; + __be32 request_seq; +}; + +struct sctp_strreset_addstrm { + struct sctp_paramhdr param_hdr; + __be32 request_seq; + __be16 number_of_streams; + __be16 reserved; +}; + +enum { + SCTP_STRRESET_NOTHING_TO_DO = 0x00, + SCTP_STRRESET_PERFORMED = 0x01, + SCTP_STRRESET_DENIED = 0x02, + SCTP_STRRESET_ERR_WRONG_SSN = 0x03, + SCTP_STRRESET_ERR_IN_PROGRESS = 0x04, + SCTP_STRRESET_ERR_BAD_SEQNO = 0x05, + SCTP_STRRESET_IN_PROGRESS = 0x06, +}; + +struct sctp_strreset_resp { + struct sctp_paramhdr param_hdr; + __be32 response_seq; + __be32 result; +}; + +struct sctp_strreset_resptsn { + struct sctp_paramhdr param_hdr; + __be32 response_seq; + __be32 result; + __be32 senders_next_tsn; + __be32 receivers_next_tsn; +}; + +enum { + SCTP_DSCP_SET_MASK = 0x1, + SCTP_DSCP_VAL_MASK = 0xfc, + SCTP_FLOWLABEL_SET_MASK = 0x100000, + SCTP_FLOWLABEL_VAL_MASK = 0xfffff +}; + +#endif /* __LINUX_SCTP_H__ */ diff --git a/include/linux/scx200.h b/include/linux/scx200.h new file mode 100644 index 000000000..652ec1a45 --- /dev/null +++ b/include/linux/scx200.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* linux/include/linux/scx200.h + + Copyright (c) 2001,2002 Christer Weinigel + + Defines for the National Semiconductor SCx200 Processors +*/ + +/* Interesting stuff for the National Semiconductor SCx200 CPU */ + +extern unsigned scx200_cb_base; + +#define scx200_cb_present() (scx200_cb_base!=0) + +/* F0 PCI Header/Bridge Configuration Registers */ +#define SCx200_DOCCS_BASE 0x78 /* DOCCS Base Address Register */ +#define SCx200_DOCCS_CTRL 0x7c /* DOCCS Control Register */ + +/* GPIO Register Block */ +#define SCx200_GPIO_SIZE 0x2c /* Size of GPIO register block */ + +/* General Configuration Block */ +#define SCx200_CB_BASE_FIXED 0x9000 /* Base fixed at 0x9000 according to errata? */ + +/* Watchdog Timer */ +#define SCx200_WDT_OFFSET 0x00 /* offset within configuration block */ +#define SCx200_WDT_SIZE 0x05 /* size */ + +#define SCx200_WDT_WDTO 0x00 /* Time-Out Register */ +#define SCx200_WDT_WDCNFG 0x02 /* Configuration Register */ +#define SCx200_WDT_WDSTS 0x04 /* Status Register */ +#define SCx200_WDT_WDSTS_WDOVF (1<<0) /* Overflow bit */ + +/* High Resolution Timer */ +#define SCx200_TIMER_OFFSET 0x08 +#define SCx200_TIMER_SIZE 0x06 + +/* Clock Generators */ +#define SCx200_CLOCKGEN_OFFSET 0x10 +#define SCx200_CLOCKGEN_SIZE 0x10 + +/* Pin Multiplexing and Miscellaneous Configuration Registers */ +#define SCx200_MISC_OFFSET 0x30 +#define SCx200_MISC_SIZE 0x10 + +#define SCx200_PMR 0x30 /* Pin Multiplexing Register */ +#define SCx200_MCR 0x34 /* Miscellaneous Configuration Register */ +#define SCx200_INTSEL 0x38 /* Interrupt Selection Register */ +#define SCx200_IID 0x3c /* IA On a Chip Identification Number Reg */ +#define SCx200_REV 0x3d /* Revision Register */ +#define SCx200_CBA 0x3e /* Configuration Base Address Register */ +#define SCx200_CBA_SCRATCH 0x64 /* Configuration Base Address Scratchpad */ diff --git a/include/linux/scx200_gpio.h b/include/linux/scx200_gpio.h new file mode 100644 index 000000000..6386ddbb6 --- /dev/null +++ b/include/linux/scx200_gpio.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +u32 scx200_gpio_configure(unsigned index, u32 set, u32 clear); + +extern unsigned scx200_gpio_base; +extern unsigned long scx200_gpio_shadow[2]; +extern struct nsc_gpio_ops scx200_gpio_ops; + +#define scx200_gpio_present() (scx200_gpio_base!=0) + +/* Definitions to make sure I do the same thing in all functions */ +#define __SCx200_GPIO_BANK unsigned bank = index>>5 +#define __SCx200_GPIO_IOADDR unsigned short ioaddr = scx200_gpio_base+0x10*bank +#define __SCx200_GPIO_SHADOW unsigned long *shadow = scx200_gpio_shadow+bank +#define __SCx200_GPIO_INDEX index &= 31 + +#define __SCx200_GPIO_OUT __asm__ __volatile__("outsl":"=mS" (shadow):"d" (ioaddr), "0" (shadow)) + +/* returns the value of the GPIO pin */ + +static inline int scx200_gpio_get(unsigned index) { + __SCx200_GPIO_BANK; + __SCx200_GPIO_IOADDR + 0x04; + __SCx200_GPIO_INDEX; + + return (inl(ioaddr) & (1< +#else +#include +#endif + +/* + * All structures are 64 bytes long and are expected + * to live in an array, one for each interconnect. + * Most fields of the structures are shared among the + * various types, and most-specific fields are at the + * beginning (for alignment reasons, and to keep the + * magic number at the head of the interconnect record + */ + +/* Product, 40 bytes at offset 24, 8-byte aligned + * + * device_id is vendor-assigned; version is device-specific, + * date is hex (e.g 0x20120501), name is UTF-8, blank-filled + * and not terminated with a 0 byte. + */ +struct sdb_product { + uint64_t vendor_id; /* 0x18..0x1f */ + uint32_t device_id; /* 0x20..0x23 */ + uint32_t version; /* 0x24..0x27 */ + uint32_t date; /* 0x28..0x2b */ + uint8_t name[19]; /* 0x2c..0x3e */ + uint8_t record_type; /* 0x3f */ +}; + +/* + * Component, 56 bytes at offset 8, 8-byte aligned + * + * The address range is first to last, inclusive + * (for example 0x100000 - 0x10ffff) + */ +struct sdb_component { + uint64_t addr_first; /* 0x08..0x0f */ + uint64_t addr_last; /* 0x10..0x17 */ + struct sdb_product product; /* 0x18..0x3f */ +}; + +/* Type of the SDB record */ +enum sdb_record_type { + sdb_type_interconnect = 0x00, + sdb_type_device = 0x01, + sdb_type_bridge = 0x02, + sdb_type_integration = 0x80, + sdb_type_repo_url = 0x81, + sdb_type_synthesis = 0x82, + sdb_type_empty = 0xFF, +}; + +/* Type 0: interconnect (first of the array) + * + * sdb_records is the length of the table including this first + * record, version is 1. The bus type is enumerated later. + */ +#define SDB_MAGIC 0x5344422d /* "SDB-" */ +struct sdb_interconnect { + uint32_t sdb_magic; /* 0x00-0x03 */ + uint16_t sdb_records; /* 0x04-0x05 */ + uint8_t sdb_version; /* 0x06 */ + uint8_t sdb_bus_type; /* 0x07 */ + struct sdb_component sdb_component; /* 0x08-0x3f */ +}; + +/* Type 1: device + * + * class is 0 for "custom device", other values are + * to be standardized; ABI version is for the driver, + * bus-specific bits are defined by each bus (see below) + */ +struct sdb_device { + uint16_t abi_class; /* 0x00-0x01 */ + uint8_t abi_ver_major; /* 0x02 */ + uint8_t abi_ver_minor; /* 0x03 */ + uint32_t bus_specific; /* 0x04-0x07 */ + struct sdb_component sdb_component; /* 0x08-0x3f */ +}; + +/* Type 2: bridge + * + * child is the address of the nested SDB table + */ +struct sdb_bridge { + uint64_t sdb_child; /* 0x00-0x07 */ + struct sdb_component sdb_component; /* 0x08-0x3f */ +}; + +/* Type 0x80: integration + * + * all types with bit 7 set are meta-information, so + * software can ignore the types it doesn't know. Here we + * just provide product information for an aggregate device + */ +struct sdb_integration { + uint8_t reserved[24]; /* 0x00-0x17 */ + struct sdb_product product; /* 0x08-0x3f */ +}; + +/* Type 0x81: Top module repository url + * + * again, an informative field that software can ignore + */ +struct sdb_repo_url { + uint8_t repo_url[63]; /* 0x00-0x3e */ + uint8_t record_type; /* 0x3f */ +}; + +/* Type 0x82: Synthesis tool information + * + * this informative record + */ +struct sdb_synthesis { + uint8_t syn_name[16]; /* 0x00-0x0f */ + uint8_t commit_id[16]; /* 0x10-0x1f */ + uint8_t tool_name[8]; /* 0x20-0x27 */ + uint32_t tool_version; /* 0x28-0x2b */ + uint32_t date; /* 0x2c-0x2f */ + uint8_t user_name[15]; /* 0x30-0x3e */ + uint8_t record_type; /* 0x3f */ +}; + +/* Type 0xff: empty + * + * this allows keeping empty slots during development, + * so they can be filled later with minimal efforts and + * no misleading description is ever shipped -- hopefully. + * It can also be used to pad a table to a desired length. + */ +struct sdb_empty { + uint8_t reserved[63]; /* 0x00-0x3e */ + uint8_t record_type; /* 0x3f */ +}; + +/* The type of bus, for bus-specific flags */ +enum sdb_bus_type { + sdb_wishbone = 0x00, + sdb_data = 0x01, +}; + +#define SDB_WB_WIDTH_MASK 0x0f +#define SDB_WB_ACCESS8 0x01 +#define SDB_WB_ACCESS16 0x02 +#define SDB_WB_ACCESS32 0x04 +#define SDB_WB_ACCESS64 0x08 +#define SDB_WB_LITTLE_ENDIAN 0x80 + +#define SDB_DATA_READ 0x04 +#define SDB_DATA_WRITE 0x02 +#define SDB_DATA_EXEC 0x01 + +#endif /* __SDB_H__ */ diff --git a/include/linux/sdla.h b/include/linux/sdla.h new file mode 100644 index 000000000..fe7a967d7 --- /dev/null +++ b/include/linux/sdla.h @@ -0,0 +1,244 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Global definitions for the Frame relay interface. + * + * Version: @(#)if_ifrad.h 0.20 13 Apr 96 + * + * Author: Mike McLagan + * + * Changes: + * 0.15 Mike McLagan Structure packing + * + * 0.20 Mike McLagan New flags for S508 buffer handling + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef SDLA_H +#define SDLA_H + +#include + + +/* important Z80 window addresses */ +#define SDLA_CONTROL_WND 0xE000 + +#define SDLA_502_CMD_BUF 0xEF60 +#define SDLA_502_RCV_BUF 0xA900 +#define SDLA_502_TXN_AVAIL 0xFFF1 +#define SDLA_502_RCV_AVAIL 0xFFF2 +#define SDLA_502_EVENT_FLAGS 0xFFF3 +#define SDLA_502_MDM_STATUS 0xFFF4 +#define SDLA_502_IRQ_INTERFACE 0xFFFD +#define SDLA_502_IRQ_PERMISSION 0xFFFE +#define SDLA_502_DATA_OFS 0x0010 + +#define SDLA_508_CMD_BUF 0xE000 +#define SDLA_508_TXBUF_INFO 0xF100 +#define SDLA_508_RXBUF_INFO 0xF120 +#define SDLA_508_EVENT_FLAGS 0xF003 +#define SDLA_508_MDM_STATUS 0xF004 +#define SDLA_508_IRQ_INTERFACE 0xF010 +#define SDLA_508_IRQ_PERMISSION 0xF011 +#define SDLA_508_TSE_OFFSET 0xF012 + +/* Event flags */ +#define SDLA_EVENT_STATUS 0x01 +#define SDLA_EVENT_DLCI_STATUS 0x02 +#define SDLA_EVENT_BAD_DLCI 0x04 +#define SDLA_EVENT_LINK_DOWN 0x40 + +/* IRQ Trigger flags */ +#define SDLA_INTR_RX 0x01 +#define SDLA_INTR_TX 0x02 +#define SDLA_INTR_MODEM 0x04 +#define SDLA_INTR_COMPLETE 0x08 +#define SDLA_INTR_STATUS 0x10 +#define SDLA_INTR_TIMER 0x20 + +/* DLCI status bits */ +#define SDLA_DLCI_DELETED 0x01 +#define SDLA_DLCI_ACTIVE 0x02 +#define SDLA_DLCI_WAITING 0x04 +#define SDLA_DLCI_NEW 0x08 +#define SDLA_DLCI_INCLUDED 0x40 + +/* valid command codes */ +#define SDLA_INFORMATION_WRITE 0x01 +#define SDLA_INFORMATION_READ 0x02 +#define SDLA_ISSUE_IN_CHANNEL_SIGNAL 0x03 +#define SDLA_SET_DLCI_CONFIGURATION 0x10 +#define SDLA_READ_DLCI_CONFIGURATION 0x11 +#define SDLA_DISABLE_COMMUNICATIONS 0x12 +#define SDLA_ENABLE_COMMUNICATIONS 0x13 +#define SDLA_READ_DLC_STATUS 0x14 +#define SDLA_READ_DLC_STATISTICS 0x15 +#define SDLA_FLUSH_DLC_STATISTICS 0x16 +#define SDLA_LIST_ACTIVE_DLCI 0x17 +#define SDLA_FLUSH_INFORMATION_BUFFERS 0x18 +#define SDLA_ADD_DLCI 0x20 +#define SDLA_DELETE_DLCI 0x21 +#define SDLA_ACTIVATE_DLCI 0x22 +#define SDLA_DEACTIVATE_DLCI 0x23 +#define SDLA_READ_MODEM_STATUS 0x30 +#define SDLA_SET_MODEM_STATUS 0x31 +#define SDLA_READ_COMMS_ERR_STATS 0x32 +#define SDLA_FLUSH_COMMS_ERR_STATS 0x33 +#define SDLA_READ_CODE_VERSION 0x40 +#define SDLA_SET_IRQ_TRIGGER 0x50 +#define SDLA_GET_IRQ_TRIGGER 0x51 + +/* In channel signal types */ +#define SDLA_ICS_LINK_VERIFY 0x02 +#define SDLA_ICS_STATUS_ENQ 0x03 + +/* modem status flags */ +#define SDLA_MODEM_DTR_HIGH 0x01 +#define SDLA_MODEM_RTS_HIGH 0x02 +#define SDLA_MODEM_DCD_HIGH 0x08 +#define SDLA_MODEM_CTS_HIGH 0x20 + +/* used for RET_MODEM interpretation */ +#define SDLA_MODEM_DCD_LOW 0x01 +#define SDLA_MODEM_CTS_LOW 0x02 + +/* return codes */ +#define SDLA_RET_OK 0x00 +#define SDLA_RET_COMMUNICATIONS 0x01 +#define SDLA_RET_CHANNEL_INACTIVE 0x02 +#define SDLA_RET_DLCI_INACTIVE 0x03 +#define SDLA_RET_DLCI_CONFIG 0x04 +#define SDLA_RET_BUF_TOO_BIG 0x05 +#define SDLA_RET_NO_DATA 0x05 +#define SDLA_RET_BUF_OVERSIZE 0x06 +#define SDLA_RET_CIR_OVERFLOW 0x07 +#define SDLA_RET_NO_BUFS 0x08 +#define SDLA_RET_TIMEOUT 0x0A +#define SDLA_RET_MODEM 0x10 +#define SDLA_RET_CHANNEL_OFF 0x11 +#define SDLA_RET_CHANNEL_ON 0x12 +#define SDLA_RET_DLCI_STATUS 0x13 +#define SDLA_RET_DLCI_UNKNOWN 0x14 +#define SDLA_RET_COMMAND_INVALID 0x1F + +/* Configuration flags */ +#define SDLA_DIRECT_RECV 0x0080 +#define SDLA_TX_NO_EXCEPT 0x0020 +#define SDLA_NO_ICF_MSGS 0x1000 +#define SDLA_TX50_RX50 0x0000 +#define SDLA_TX70_RX30 0x2000 +#define SDLA_TX30_RX70 0x4000 + +/* IRQ selection flags */ +#define SDLA_IRQ_RECEIVE 0x01 +#define SDLA_IRQ_TRANSMIT 0x02 +#define SDLA_IRQ_MODEM_STAT 0x04 +#define SDLA_IRQ_COMMAND 0x08 +#define SDLA_IRQ_CHANNEL 0x10 +#define SDLA_IRQ_TIMER 0x20 + +/* definitions for PC memory mapping */ +#define SDLA_8K_WINDOW 0x01 +#define SDLA_S502_SEG_A 0x10 +#define SDLA_S502_SEG_C 0x20 +#define SDLA_S502_SEG_D 0x00 +#define SDLA_S502_SEG_E 0x30 +#define SDLA_S507_SEG_A 0x00 +#define SDLA_S507_SEG_B 0x40 +#define SDLA_S507_SEG_C 0x80 +#define SDLA_S507_SEG_E 0xC0 +#define SDLA_S508_SEG_A 0x00 +#define SDLA_S508_SEG_C 0x10 +#define SDLA_S508_SEG_D 0x08 +#define SDLA_S508_SEG_E 0x18 + +/* SDLA adapter port constants */ +#define SDLA_IO_EXTENTS 0x04 + +#define SDLA_REG_CONTROL 0x00 +#define SDLA_REG_PC_WINDOW 0x01 /* offset for PC window select latch */ +#define SDLA_REG_Z80_WINDOW 0x02 /* offset for Z80 window select latch */ +#define SDLA_REG_Z80_CONTROL 0x03 /* offset for Z80 control latch */ + +#define SDLA_S502_STS 0x00 /* status reg for 502, 502E, 507 */ +#define SDLA_S508_GNRL 0x00 /* general purp. reg for 508 */ +#define SDLA_S508_STS 0x01 /* status reg for 508 */ +#define SDLA_S508_IDR 0x02 /* ID reg for 508 */ + +/* control register flags */ +#define SDLA_S502A_START 0x00 /* start the CPU */ +#define SDLA_S502A_INTREQ 0x02 +#define SDLA_S502A_INTEN 0x04 +#define SDLA_S502A_HALT 0x08 /* halt the CPU */ +#define SDLA_S502A_NMI 0x10 /* issue an NMI to the CPU */ + +#define SDLA_S502E_CPUEN 0x01 +#define SDLA_S502E_ENABLE 0x02 +#define SDLA_S502E_INTACK 0x04 + +#define SDLA_S507_ENABLE 0x01 +#define SDLA_S507_IRQ3 0x00 +#define SDLA_S507_IRQ4 0x20 +#define SDLA_S507_IRQ5 0x40 +#define SDLA_S507_IRQ7 0x60 +#define SDLA_S507_IRQ10 0x80 +#define SDLA_S507_IRQ11 0xA0 +#define SDLA_S507_IRQ12 0xC0 +#define SDLA_S507_IRQ15 0xE0 + +#define SDLA_HALT 0x00 +#define SDLA_CPUEN 0x02 +#define SDLA_MEMEN 0x04 +#define SDLA_S507_EPROMWR 0x08 +#define SDLA_S507_EPROMCLK 0x10 +#define SDLA_S508_INTRQ 0x08 +#define SDLA_S508_INTEN 0x10 + +struct sdla_cmd { + char opp_flag; + char cmd; + short length; + char retval; + short dlci; + char flags; + short rxlost_int; + long rxlost_app; + char reserve[2]; + char data[SDLA_MAX_DATA]; /* transfer data buffer */ +} __attribute__((packed)); + +struct intr_info { + char flags; + short txlen; + char irq; + char flags2; + short timeout; +} __attribute__((packed)); + +/* found in the 508's control window at RXBUF_INFO */ +struct buf_info { + unsigned short rse_num; + unsigned long rse_base; + unsigned long rse_next; + unsigned long buf_base; + unsigned short reserved; + unsigned long buf_top; +} __attribute__((packed)); + +/* structure pointed to by rse_base in RXBUF_INFO struct */ +struct buf_entry { + char opp_flag; + short length; + short dlci; + char flags; + short timestamp; + short reserved[2]; + long buf_addr; +} __attribute__((packed)); + +#endif diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h new file mode 100644 index 000000000..e5320f6c8 --- /dev/null +++ b/include/linux/seccomp.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SECCOMP_H +#define _LINUX_SECCOMP_H + +#include + +#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \ + SECCOMP_FILTER_FLAG_LOG | \ + SECCOMP_FILTER_FLAG_SPEC_ALLOW) + +#ifdef CONFIG_SECCOMP + +#include +#include + +struct seccomp_filter; +/** + * struct seccomp - the state of a seccomp'ed process + * + * @mode: indicates one of the valid values above for controlled + * system calls available to a process. + * @filter: must always point to a valid seccomp-filter or NULL as it is + * accessed without locking during system call entry. + * + * @filter must only be accessed from the context of current as there + * is no read locking. + */ +struct seccomp { + int mode; + struct seccomp_filter *filter; +}; + +#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER +extern int __secure_computing(const struct seccomp_data *sd); +static inline int secure_computing(const struct seccomp_data *sd) +{ + if (unlikely(test_thread_flag(TIF_SECCOMP))) + return __secure_computing(sd); + return 0; +} +#else +extern void secure_computing_strict(int this_syscall); +#endif + +extern long prctl_get_seccomp(void); +extern long prctl_set_seccomp(unsigned long, char __user *); + +static inline int seccomp_mode(struct seccomp *s) +{ + return s->mode; +} + +#else /* CONFIG_SECCOMP */ + +#include + +struct seccomp { }; +struct seccomp_filter { }; + +#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER +static inline int secure_computing(struct seccomp_data *sd) { return 0; } +#else +static inline void secure_computing_strict(int this_syscall) { return; } +#endif + +static inline long prctl_get_seccomp(void) +{ + return -EINVAL; +} + +static inline long prctl_set_seccomp(unsigned long arg2, char __user *arg3) +{ + return -EINVAL; +} + +static inline int seccomp_mode(struct seccomp *s) +{ + return SECCOMP_MODE_DISABLED; +} +#endif /* CONFIG_SECCOMP */ + +#ifdef CONFIG_SECCOMP_FILTER +extern void put_seccomp_filter(struct task_struct *tsk); +extern void get_seccomp_filter(struct task_struct *tsk); +#else /* CONFIG_SECCOMP_FILTER */ +static inline void put_seccomp_filter(struct task_struct *tsk) +{ + return; +} +static inline void get_seccomp_filter(struct task_struct *tsk) +{ + return; +} +#endif /* CONFIG_SECCOMP_FILTER */ + +#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE) +extern long seccomp_get_filter(struct task_struct *task, + unsigned long filter_off, void __user *data); +extern long seccomp_get_metadata(struct task_struct *task, + unsigned long filter_off, void __user *data); +#else +static inline long seccomp_get_filter(struct task_struct *task, + unsigned long n, void __user *data) +{ + return -EINVAL; +} +static inline long seccomp_get_metadata(struct task_struct *task, + unsigned long filter_off, + void __user *data) +{ + return -EINVAL; +} +#endif /* CONFIG_SECCOMP_FILTER && CONFIG_CHECKPOINT_RESTORE */ +#endif /* _LINUX_SECCOMP_H */ diff --git a/include/linux/securebits.h b/include/linux/securebits.h new file mode 100644 index 000000000..656528673 --- /dev/null +++ b/include/linux/securebits.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SECUREBITS_H +#define _LINUX_SECUREBITS_H 1 + +#include + +#define issecure(X) (issecure_mask(X) & current_cred_xxx(securebits)) +#endif /* !_LINUX_SECUREBITS_H */ diff --git a/include/linux/security.h b/include/linux/security.h new file mode 100644 index 000000000..273877cf4 --- /dev/null +++ b/include/linux/security.h @@ -0,0 +1,1847 @@ +/* + * Linux Security plug + * + * Copyright (C) 2001 WireX Communications, Inc + * Copyright (C) 2001 Greg Kroah-Hartman + * Copyright (C) 2001 Networks Associates Technology, Inc + * Copyright (C) 2001 James Morris + * Copyright (C) 2001 Silicon Graphics, Inc. (Trust Technology Group) + * Copyright (C) 2016 Mellanox Techonologies + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Due to this file being licensed under the GPL there is controversy over + * whether this permits you to write a module that #includes this file + * without placing your module under the GPL. Please consult a lawyer for + * advice before doing this. + * + */ + +#ifndef __LINUX_SECURITY_H +#define __LINUX_SECURITY_H + +#include +#include +#include +#include +#include +#include +#include +#include + +struct linux_binprm; +struct cred; +struct rlimit; +struct siginfo; +struct sembuf; +struct kern_ipc_perm; +struct audit_context; +struct super_block; +struct inode; +struct dentry; +struct file; +struct vfsmount; +struct path; +struct qstr; +struct iattr; +struct fown_struct; +struct file_operations; +struct msg_msg; +struct xattr; +struct xfrm_sec_ctx; +struct mm_struct; + +/* Default (no) options for the capable function */ +#define CAP_OPT_NONE 0x0 +/* If capable should audit the security request */ +#define CAP_OPT_NOAUDIT BIT(1) +/* If capable is being called by a setid function */ +#define CAP_OPT_INSETID BIT(2) + +/* LSM Agnostic defines for sb_set_mnt_opts */ +#define SECURITY_LSM_NATIVE_LABELS 1 + +struct ctl_table; +struct audit_krule; +struct user_namespace; +struct timezone; + +enum lsm_event { + LSM_POLICY_CHANGE, +}; + +/* These functions are in security/commoncap.c */ +extern int cap_capable(const struct cred *cred, struct user_namespace *ns, + int cap, unsigned int opts); +extern int cap_settime(const struct timespec64 *ts, const struct timezone *tz); +extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode); +extern int cap_ptrace_traceme(struct task_struct *parent); +extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); +extern int cap_capset(struct cred *new, const struct cred *old, + const kernel_cap_t *effective, + const kernel_cap_t *inheritable, + const kernel_cap_t *permitted); +extern int cap_bprm_set_creds(struct linux_binprm *bprm); +extern int cap_inode_setxattr(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags); +extern int cap_inode_removexattr(struct dentry *dentry, const char *name); +extern int cap_inode_need_killpriv(struct dentry *dentry); +extern int cap_inode_killpriv(struct dentry *dentry); +extern int cap_inode_getsecurity(struct inode *inode, const char *name, + void **buffer, bool alloc); +extern int cap_mmap_addr(unsigned long addr); +extern int cap_mmap_file(struct file *file, unsigned long reqprot, + unsigned long prot, unsigned long flags); +extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags); +extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5); +extern int cap_task_setscheduler(struct task_struct *p); +extern int cap_task_setioprio(struct task_struct *p, int ioprio); +extern int cap_task_setnice(struct task_struct *p, int nice); +extern int cap_vm_enough_memory(struct mm_struct *mm, long pages); + +struct msghdr; +struct sk_buff; +struct sock; +struct sockaddr; +struct socket; +struct flowi; +struct dst_entry; +struct xfrm_selector; +struct xfrm_policy; +struct xfrm_state; +struct xfrm_user_sec_ctx; +struct seq_file; +struct sctp_endpoint; + +#ifdef CONFIG_MMU +extern unsigned long mmap_min_addr; +extern unsigned long dac_mmap_min_addr; +#else +#define mmap_min_addr 0UL +#define dac_mmap_min_addr 0UL +#endif + +/* + * Values used in the task_security_ops calls + */ +/* setuid or setgid, id0 == uid or gid */ +#define LSM_SETID_ID 1 + +/* setreuid or setregid, id0 == real, id1 == eff */ +#define LSM_SETID_RE 2 + +/* setresuid or setresgid, id0 == real, id1 == eff, uid2 == saved */ +#define LSM_SETID_RES 4 + +/* setfsuid or setfsgid, id0 == fsuid or fsgid */ +#define LSM_SETID_FS 8 + +/* Flags for security_task_prlimit(). */ +#define LSM_PRLIMIT_READ 1 +#define LSM_PRLIMIT_WRITE 2 + +/* forward declares to avoid warnings */ +struct sched_param; +struct request_sock; + +/* bprm->unsafe reasons */ +#define LSM_UNSAFE_SHARE 1 +#define LSM_UNSAFE_PTRACE 2 +#define LSM_UNSAFE_NO_NEW_PRIVS 4 + +#ifdef CONFIG_MMU +extern int mmap_min_addr_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); +#endif + +/* security_inode_init_security callback function to write xattrs */ +typedef int (*initxattrs) (struct inode *inode, + const struct xattr *xattr_array, void *fs_data); + + +/* Keep the kernel_load_data_id enum in sync with kernel_read_file_id */ +#define __data_id_enumify(ENUM, dummy) LOADING_ ## ENUM, +#define __data_id_stringify(dummy, str) #str, + +enum kernel_load_data_id { + __kernel_read_file_id(__data_id_enumify) +}; + +static const char * const kernel_load_data_str[] = { + __kernel_read_file_id(__data_id_stringify) +}; + +static inline const char *kernel_load_data_id_str(enum kernel_load_data_id id) +{ + if ((unsigned)id >= LOADING_MAX_ID) + return kernel_load_data_str[LOADING_UNKNOWN]; + + return kernel_load_data_str[id]; +} + +#ifdef CONFIG_SECURITY + +struct security_mnt_opts { + char **mnt_opts; + int *mnt_opts_flags; + int num_mnt_opts; +}; + +int call_lsm_notifier(enum lsm_event event, void *data); +int register_lsm_notifier(struct notifier_block *nb); +int unregister_lsm_notifier(struct notifier_block *nb); + +static inline void security_init_mnt_opts(struct security_mnt_opts *opts) +{ + opts->mnt_opts = NULL; + opts->mnt_opts_flags = NULL; + opts->num_mnt_opts = 0; +} + +static inline void security_free_mnt_opts(struct security_mnt_opts *opts) +{ + int i; + if (opts->mnt_opts) + for (i = 0; i < opts->num_mnt_opts; i++) + kfree(opts->mnt_opts[i]); + kfree(opts->mnt_opts); + opts->mnt_opts = NULL; + kfree(opts->mnt_opts_flags); + opts->mnt_opts_flags = NULL; + opts->num_mnt_opts = 0; +} + +/* prototypes */ +extern int security_init(void); + +/* Security operations */ +int security_binder_set_context_mgr(const struct cred *mgr); +int security_binder_transaction(const struct cred *from, + const struct cred *to); +int security_binder_transfer_binder(const struct cred *from, + const struct cred *to); +int security_binder_transfer_file(const struct cred *from, + const struct cred *to, struct file *file); +int security_ptrace_access_check(struct task_struct *child, unsigned int mode); +int security_ptrace_traceme(struct task_struct *parent); +int security_capget(struct task_struct *target, + kernel_cap_t *effective, + kernel_cap_t *inheritable, + kernel_cap_t *permitted); +int security_capset(struct cred *new, const struct cred *old, + const kernel_cap_t *effective, + const kernel_cap_t *inheritable, + const kernel_cap_t *permitted); +int security_capable(const struct cred *cred, + struct user_namespace *ns, + int cap, + unsigned int opts); +int security_quotactl(int cmds, int type, int id, struct super_block *sb); +int security_quota_on(struct dentry *dentry); +int security_syslog(int type); +int security_settime64(const struct timespec64 *ts, const struct timezone *tz); +int security_vm_enough_memory_mm(struct mm_struct *mm, long pages); +int security_bprm_set_creds(struct linux_binprm *bprm); +int security_bprm_check(struct linux_binprm *bprm); +void security_bprm_committing_creds(struct linux_binprm *bprm); +void security_bprm_committed_creds(struct linux_binprm *bprm); +int security_sb_alloc(struct super_block *sb); +void security_sb_free(struct super_block *sb); +int security_sb_copy_data(char *orig, char *copy); +int security_sb_remount(struct super_block *sb, void *data); +int security_sb_kern_mount(struct super_block *sb, int flags, void *data); +int security_sb_show_options(struct seq_file *m, struct super_block *sb); +int security_sb_statfs(struct dentry *dentry); +int security_sb_mount(const char *dev_name, const struct path *path, + const char *type, unsigned long flags, void *data); +int security_sb_umount(struct vfsmount *mnt, int flags); +int security_sb_pivotroot(const struct path *old_path, const struct path *new_path); +int security_sb_set_mnt_opts(struct super_block *sb, + struct security_mnt_opts *opts, + unsigned long kern_flags, + unsigned long *set_kern_flags); +int security_sb_clone_mnt_opts(const struct super_block *oldsb, + struct super_block *newsb, + unsigned long kern_flags, + unsigned long *set_kern_flags); +int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts); +int security_dentry_init_security(struct dentry *dentry, int mode, + const struct qstr *name, void **ctx, + u32 *ctxlen); +int security_dentry_create_files_as(struct dentry *dentry, int mode, + struct qstr *name, + const struct cred *old, + struct cred *new); + +int security_inode_alloc(struct inode *inode); +void security_inode_free(struct inode *inode); +int security_inode_init_security(struct inode *inode, struct inode *dir, + const struct qstr *qstr, + initxattrs initxattrs, void *fs_data); +int security_old_inode_init_security(struct inode *inode, struct inode *dir, + const struct qstr *qstr, const char **name, + void **value, size_t *len); +int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode); +int security_inode_link(struct dentry *old_dentry, struct inode *dir, + struct dentry *new_dentry); +int security_inode_unlink(struct inode *dir, struct dentry *dentry); +int security_inode_symlink(struct inode *dir, struct dentry *dentry, + const char *old_name); +int security_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode); +int security_inode_rmdir(struct inode *dir, struct dentry *dentry); +int security_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev); +int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry, + unsigned int flags); +int security_inode_readlink(struct dentry *dentry); +int security_inode_follow_link(struct dentry *dentry, struct inode *inode, + bool rcu); +int security_inode_permission(struct inode *inode, int mask); +int security_inode_setattr(struct dentry *dentry, struct iattr *attr); +int security_inode_getattr(const struct path *path); +int security_inode_setxattr(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags); +void security_inode_post_setxattr(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags); +int security_inode_getxattr(struct dentry *dentry, const char *name); +int security_inode_listxattr(struct dentry *dentry); +int security_inode_removexattr(struct dentry *dentry, const char *name); +int security_inode_need_killpriv(struct dentry *dentry); +int security_inode_killpriv(struct dentry *dentry); +int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc); +int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags); +int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size); +void security_inode_getsecid(struct inode *inode, u32 *secid); +int security_inode_copy_up(struct dentry *src, struct cred **new); +int security_inode_copy_up_xattr(const char *name); +int security_file_permission(struct file *file, int mask); +int security_file_alloc(struct file *file); +void security_file_free(struct file *file); +int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +int security_mmap_file(struct file *file, unsigned long prot, + unsigned long flags); +int security_mmap_addr(unsigned long addr); +int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, + unsigned long prot); +int security_file_lock(struct file *file, unsigned int cmd); +int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg); +void security_file_set_fowner(struct file *file); +int security_file_send_sigiotask(struct task_struct *tsk, + struct fown_struct *fown, int sig); +int security_file_receive(struct file *file); +int security_file_open(struct file *file); +int security_task_alloc(struct task_struct *task, unsigned long clone_flags); +void security_task_free(struct task_struct *task); +int security_cred_alloc_blank(struct cred *cred, gfp_t gfp); +void security_cred_free(struct cred *cred); +int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp); +void security_transfer_creds(struct cred *new, const struct cred *old); +void security_cred_getsecid(const struct cred *c, u32 *secid); +int security_kernel_act_as(struct cred *new, u32 secid); +int security_kernel_create_files_as(struct cred *new, struct inode *inode); +int security_kernel_module_request(char *kmod_name); +int security_kernel_load_data(enum kernel_load_data_id id); +int security_kernel_read_file(struct file *file, enum kernel_read_file_id id); +int security_kernel_post_read_file(struct file *file, char *buf, loff_t size, + enum kernel_read_file_id id); +int security_task_fix_setuid(struct cred *new, const struct cred *old, + int flags); +int security_task_setpgid(struct task_struct *p, pid_t pgid); +int security_task_getpgid(struct task_struct *p); +int security_task_getsid(struct task_struct *p); +void security_task_getsecid(struct task_struct *p, u32 *secid); +int security_task_setnice(struct task_struct *p, int nice); +int security_task_setioprio(struct task_struct *p, int ioprio); +int security_task_getioprio(struct task_struct *p); +int security_task_prlimit(const struct cred *cred, const struct cred *tcred, + unsigned int flags); +int security_task_setrlimit(struct task_struct *p, unsigned int resource, + struct rlimit *new_rlim); +int security_task_setscheduler(struct task_struct *p); +int security_task_getscheduler(struct task_struct *p); +int security_task_movememory(struct task_struct *p); +int security_task_kill(struct task_struct *p, struct siginfo *info, + int sig, const struct cred *cred); +int security_task_prctl(int option, unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5); +void security_task_to_inode(struct task_struct *p, struct inode *inode); +int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag); +void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid); +int security_msg_msg_alloc(struct msg_msg *msg); +void security_msg_msg_free(struct msg_msg *msg); +int security_msg_queue_alloc(struct kern_ipc_perm *msq); +void security_msg_queue_free(struct kern_ipc_perm *msq); +int security_msg_queue_associate(struct kern_ipc_perm *msq, int msqflg); +int security_msg_queue_msgctl(struct kern_ipc_perm *msq, int cmd); +int security_msg_queue_msgsnd(struct kern_ipc_perm *msq, + struct msg_msg *msg, int msqflg); +int security_msg_queue_msgrcv(struct kern_ipc_perm *msq, struct msg_msg *msg, + struct task_struct *target, long type, int mode); +int security_shm_alloc(struct kern_ipc_perm *shp); +void security_shm_free(struct kern_ipc_perm *shp); +int security_shm_associate(struct kern_ipc_perm *shp, int shmflg); +int security_shm_shmctl(struct kern_ipc_perm *shp, int cmd); +int security_shm_shmat(struct kern_ipc_perm *shp, char __user *shmaddr, int shmflg); +int security_sem_alloc(struct kern_ipc_perm *sma); +void security_sem_free(struct kern_ipc_perm *sma); +int security_sem_associate(struct kern_ipc_perm *sma, int semflg); +int security_sem_semctl(struct kern_ipc_perm *sma, int cmd); +int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops, + unsigned nsops, int alter); +void security_d_instantiate(struct dentry *dentry, struct inode *inode); +int security_getprocattr(struct task_struct *p, char *name, char **value); +int security_setprocattr(const char *name, void *value, size_t size); +int security_netlink_send(struct sock *sk, struct sk_buff *skb); +int security_ismaclabel(const char *name); +int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen); +int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid); +void security_release_secctx(char *secdata, u32 seclen); + +void security_inode_invalidate_secctx(struct inode *inode); +int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen); +int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen); +int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen); +#else /* CONFIG_SECURITY */ +struct security_mnt_opts { +}; + +static inline int call_lsm_notifier(enum lsm_event event, void *data) +{ + return 0; +} + +static inline int register_lsm_notifier(struct notifier_block *nb) +{ + return 0; +} + +static inline int unregister_lsm_notifier(struct notifier_block *nb) +{ + return 0; +} + +static inline void security_init_mnt_opts(struct security_mnt_opts *opts) +{ +} + +static inline void security_free_mnt_opts(struct security_mnt_opts *opts) +{ +} + +/* + * This is the default capabilities functionality. Most of these functions + * are just stubbed out, but a few must call the proper capable code. + */ + +static inline int security_init(void) +{ + return 0; +} + +static inline int security_binder_set_context_mgr(const struct cred *mgr) +{ + return 0; +} + +static inline int security_binder_transaction(const struct cred *from, + const struct cred *to) +{ + return 0; +} + +static inline int security_binder_transfer_binder(const struct cred *from, + const struct cred *to) +{ + return 0; +} + +static inline int security_binder_transfer_file(const struct cred *from, + const struct cred *to, + struct file *file) +{ + return 0; +} + +static inline int security_ptrace_access_check(struct task_struct *child, + unsigned int mode) +{ + return cap_ptrace_access_check(child, mode); +} + +static inline int security_ptrace_traceme(struct task_struct *parent) +{ + return cap_ptrace_traceme(parent); +} + +static inline int security_capget(struct task_struct *target, + kernel_cap_t *effective, + kernel_cap_t *inheritable, + kernel_cap_t *permitted) +{ + return cap_capget(target, effective, inheritable, permitted); +} + +static inline int security_capset(struct cred *new, + const struct cred *old, + const kernel_cap_t *effective, + const kernel_cap_t *inheritable, + const kernel_cap_t *permitted) +{ + return cap_capset(new, old, effective, inheritable, permitted); +} + +static inline int security_capable(const struct cred *cred, + struct user_namespace *ns, + int cap, + unsigned int opts) +{ + return cap_capable(cred, ns, cap, opts); +} + +static inline int security_quotactl(int cmds, int type, int id, + struct super_block *sb) +{ + return 0; +} + +static inline int security_quota_on(struct dentry *dentry) +{ + return 0; +} + +static inline int security_syslog(int type) +{ + return 0; +} + +static inline int security_settime64(const struct timespec64 *ts, + const struct timezone *tz) +{ + return cap_settime(ts, tz); +} + +static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages) +{ + return __vm_enough_memory(mm, pages, cap_vm_enough_memory(mm, pages)); +} + +static inline int security_bprm_set_creds(struct linux_binprm *bprm) +{ + return cap_bprm_set_creds(bprm); +} + +static inline int security_bprm_check(struct linux_binprm *bprm) +{ + return 0; +} + +static inline void security_bprm_committing_creds(struct linux_binprm *bprm) +{ +} + +static inline void security_bprm_committed_creds(struct linux_binprm *bprm) +{ +} + +static inline int security_sb_alloc(struct super_block *sb) +{ + return 0; +} + +static inline void security_sb_free(struct super_block *sb) +{ } + +static inline int security_sb_copy_data(char *orig, char *copy) +{ + return 0; +} + +static inline int security_sb_remount(struct super_block *sb, void *data) +{ + return 0; +} + +static inline int security_sb_kern_mount(struct super_block *sb, int flags, void *data) +{ + return 0; +} + +static inline int security_sb_show_options(struct seq_file *m, + struct super_block *sb) +{ + return 0; +} + +static inline int security_sb_statfs(struct dentry *dentry) +{ + return 0; +} + +static inline int security_sb_mount(const char *dev_name, const struct path *path, + const char *type, unsigned long flags, + void *data) +{ + return 0; +} + +static inline int security_sb_umount(struct vfsmount *mnt, int flags) +{ + return 0; +} + +static inline int security_sb_pivotroot(const struct path *old_path, + const struct path *new_path) +{ + return 0; +} + +static inline int security_sb_set_mnt_opts(struct super_block *sb, + struct security_mnt_opts *opts, + unsigned long kern_flags, + unsigned long *set_kern_flags) +{ + return 0; +} + +static inline int security_sb_clone_mnt_opts(const struct super_block *oldsb, + struct super_block *newsb, + unsigned long kern_flags, + unsigned long *set_kern_flags) +{ + return 0; +} + +static inline int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts) +{ + return 0; +} + +static inline int security_inode_alloc(struct inode *inode) +{ + return 0; +} + +static inline void security_inode_free(struct inode *inode) +{ } + +static inline int security_dentry_init_security(struct dentry *dentry, + int mode, + const struct qstr *name, + void **ctx, + u32 *ctxlen) +{ + return -EOPNOTSUPP; +} + +static inline int security_dentry_create_files_as(struct dentry *dentry, + int mode, struct qstr *name, + const struct cred *old, + struct cred *new) +{ + return 0; +} + + +static inline int security_inode_init_security(struct inode *inode, + struct inode *dir, + const struct qstr *qstr, + const initxattrs xattrs, + void *fs_data) +{ + return 0; +} + +static inline int security_old_inode_init_security(struct inode *inode, + struct inode *dir, + const struct qstr *qstr, + const char **name, + void **value, size_t *len) +{ + return -EOPNOTSUPP; +} + +static inline int security_inode_create(struct inode *dir, + struct dentry *dentry, + umode_t mode) +{ + return 0; +} + +static inline int security_inode_link(struct dentry *old_dentry, + struct inode *dir, + struct dentry *new_dentry) +{ + return 0; +} + +static inline int security_inode_unlink(struct inode *dir, + struct dentry *dentry) +{ + return 0; +} + +static inline int security_inode_symlink(struct inode *dir, + struct dentry *dentry, + const char *old_name) +{ + return 0; +} + +static inline int security_inode_mkdir(struct inode *dir, + struct dentry *dentry, + int mode) +{ + return 0; +} + +static inline int security_inode_rmdir(struct inode *dir, + struct dentry *dentry) +{ + return 0; +} + +static inline int security_inode_mknod(struct inode *dir, + struct dentry *dentry, + int mode, dev_t dev) +{ + return 0; +} + +static inline int security_inode_rename(struct inode *old_dir, + struct dentry *old_dentry, + struct inode *new_dir, + struct dentry *new_dentry, + unsigned int flags) +{ + return 0; +} + +static inline int security_inode_readlink(struct dentry *dentry) +{ + return 0; +} + +static inline int security_inode_follow_link(struct dentry *dentry, + struct inode *inode, + bool rcu) +{ + return 0; +} + +static inline int security_inode_permission(struct inode *inode, int mask) +{ + return 0; +} + +static inline int security_inode_setattr(struct dentry *dentry, + struct iattr *attr) +{ + return 0; +} + +static inline int security_inode_getattr(const struct path *path) +{ + return 0; +} + +static inline int security_inode_setxattr(struct dentry *dentry, + const char *name, const void *value, size_t size, int flags) +{ + return cap_inode_setxattr(dentry, name, value, size, flags); +} + +static inline void security_inode_post_setxattr(struct dentry *dentry, + const char *name, const void *value, size_t size, int flags) +{ } + +static inline int security_inode_getxattr(struct dentry *dentry, + const char *name) +{ + return 0; +} + +static inline int security_inode_listxattr(struct dentry *dentry) +{ + return 0; +} + +static inline int security_inode_removexattr(struct dentry *dentry, + const char *name) +{ + return cap_inode_removexattr(dentry, name); +} + +static inline int security_inode_need_killpriv(struct dentry *dentry) +{ + return cap_inode_need_killpriv(dentry); +} + +static inline int security_inode_killpriv(struct dentry *dentry) +{ + return cap_inode_killpriv(dentry); +} + +static inline int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc) +{ + return cap_inode_getsecurity(inode, name, buffer, alloc); +} + +static inline int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags) +{ + return -EOPNOTSUPP; +} + +static inline int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size) +{ + return 0; +} + +static inline void security_inode_getsecid(struct inode *inode, u32 *secid) +{ + *secid = 0; +} + +static inline int security_inode_copy_up(struct dentry *src, struct cred **new) +{ + return 0; +} + +static inline int security_inode_copy_up_xattr(const char *name) +{ + return -EOPNOTSUPP; +} + +static inline int security_file_permission(struct file *file, int mask) +{ + return 0; +} + +static inline int security_file_alloc(struct file *file) +{ + return 0; +} + +static inline void security_file_free(struct file *file) +{ } + +static inline int security_file_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + return 0; +} + +static inline int security_mmap_file(struct file *file, unsigned long prot, + unsigned long flags) +{ + return 0; +} + +static inline int security_mmap_addr(unsigned long addr) +{ + return cap_mmap_addr(addr); +} + +static inline int security_file_mprotect(struct vm_area_struct *vma, + unsigned long reqprot, + unsigned long prot) +{ + return 0; +} + +static inline int security_file_lock(struct file *file, unsigned int cmd) +{ + return 0; +} + +static inline int security_file_fcntl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + return 0; +} + +static inline void security_file_set_fowner(struct file *file) +{ + return; +} + +static inline int security_file_send_sigiotask(struct task_struct *tsk, + struct fown_struct *fown, + int sig) +{ + return 0; +} + +static inline int security_file_receive(struct file *file) +{ + return 0; +} + +static inline int security_file_open(struct file *file) +{ + return 0; +} + +static inline int security_task_alloc(struct task_struct *task, + unsigned long clone_flags) +{ + return 0; +} + +static inline void security_task_free(struct task_struct *task) +{ } + +static inline int security_cred_alloc_blank(struct cred *cred, gfp_t gfp) +{ + return 0; +} + +static inline void security_cred_free(struct cred *cred) +{ } + +static inline int security_prepare_creds(struct cred *new, + const struct cred *old, + gfp_t gfp) +{ + return 0; +} + +static inline void security_transfer_creds(struct cred *new, + const struct cred *old) +{ +} + +static inline int security_kernel_act_as(struct cred *cred, u32 secid) +{ + return 0; +} + +static inline int security_kernel_create_files_as(struct cred *cred, + struct inode *inode) +{ + return 0; +} + +static inline int security_kernel_module_request(char *kmod_name) +{ + return 0; +} + +static inline int security_kernel_load_data(enum kernel_load_data_id id) +{ + return 0; +} + +static inline int security_kernel_read_file(struct file *file, + enum kernel_read_file_id id) +{ + return 0; +} + +static inline int security_kernel_post_read_file(struct file *file, + char *buf, loff_t size, + enum kernel_read_file_id id) +{ + return 0; +} + +static inline int security_task_fix_setuid(struct cred *new, + const struct cred *old, + int flags) +{ + return cap_task_fix_setuid(new, old, flags); +} + +static inline int security_task_setpgid(struct task_struct *p, pid_t pgid) +{ + return 0; +} + +static inline int security_task_getpgid(struct task_struct *p) +{ + return 0; +} + +static inline int security_task_getsid(struct task_struct *p) +{ + return 0; +} + +static inline void security_task_getsecid(struct task_struct *p, u32 *secid) +{ + *secid = 0; +} + +static inline int security_task_setnice(struct task_struct *p, int nice) +{ + return cap_task_setnice(p, nice); +} + +static inline int security_task_setioprio(struct task_struct *p, int ioprio) +{ + return cap_task_setioprio(p, ioprio); +} + +static inline int security_task_getioprio(struct task_struct *p) +{ + return 0; +} + +static inline int security_task_prlimit(const struct cred *cred, + const struct cred *tcred, + unsigned int flags) +{ + return 0; +} + +static inline int security_task_setrlimit(struct task_struct *p, + unsigned int resource, + struct rlimit *new_rlim) +{ + return 0; +} + +static inline int security_task_setscheduler(struct task_struct *p) +{ + return cap_task_setscheduler(p); +} + +static inline int security_task_getscheduler(struct task_struct *p) +{ + return 0; +} + +static inline int security_task_movememory(struct task_struct *p) +{ + return 0; +} + +static inline int security_task_kill(struct task_struct *p, + struct siginfo *info, int sig, + const struct cred *cred) +{ + return 0; +} + +static inline int security_task_prctl(int option, unsigned long arg2, + unsigned long arg3, + unsigned long arg4, + unsigned long arg5) +{ + return cap_task_prctl(option, arg2, arg3, arg4, arg5); +} + +static inline void security_task_to_inode(struct task_struct *p, struct inode *inode) +{ } + +static inline int security_ipc_permission(struct kern_ipc_perm *ipcp, + short flag) +{ + return 0; +} + +static inline void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid) +{ + *secid = 0; +} + +static inline int security_msg_msg_alloc(struct msg_msg *msg) +{ + return 0; +} + +static inline void security_msg_msg_free(struct msg_msg *msg) +{ } + +static inline int security_msg_queue_alloc(struct kern_ipc_perm *msq) +{ + return 0; +} + +static inline void security_msg_queue_free(struct kern_ipc_perm *msq) +{ } + +static inline int security_msg_queue_associate(struct kern_ipc_perm *msq, + int msqflg) +{ + return 0; +} + +static inline int security_msg_queue_msgctl(struct kern_ipc_perm *msq, int cmd) +{ + return 0; +} + +static inline int security_msg_queue_msgsnd(struct kern_ipc_perm *msq, + struct msg_msg *msg, int msqflg) +{ + return 0; +} + +static inline int security_msg_queue_msgrcv(struct kern_ipc_perm *msq, + struct msg_msg *msg, + struct task_struct *target, + long type, int mode) +{ + return 0; +} + +static inline int security_shm_alloc(struct kern_ipc_perm *shp) +{ + return 0; +} + +static inline void security_shm_free(struct kern_ipc_perm *shp) +{ } + +static inline int security_shm_associate(struct kern_ipc_perm *shp, + int shmflg) +{ + return 0; +} + +static inline int security_shm_shmctl(struct kern_ipc_perm *shp, int cmd) +{ + return 0; +} + +static inline int security_shm_shmat(struct kern_ipc_perm *shp, + char __user *shmaddr, int shmflg) +{ + return 0; +} + +static inline int security_sem_alloc(struct kern_ipc_perm *sma) +{ + return 0; +} + +static inline void security_sem_free(struct kern_ipc_perm *sma) +{ } + +static inline int security_sem_associate(struct kern_ipc_perm *sma, int semflg) +{ + return 0; +} + +static inline int security_sem_semctl(struct kern_ipc_perm *sma, int cmd) +{ + return 0; +} + +static inline int security_sem_semop(struct kern_ipc_perm *sma, + struct sembuf *sops, unsigned nsops, + int alter) +{ + return 0; +} + +static inline void security_d_instantiate(struct dentry *dentry, struct inode *inode) +{ } + +static inline int security_getprocattr(struct task_struct *p, char *name, char **value) +{ + return -EINVAL; +} + +static inline int security_setprocattr(char *name, void *value, size_t size) +{ + return -EINVAL; +} + +static inline int security_netlink_send(struct sock *sk, struct sk_buff *skb) +{ + return 0; +} + +static inline int security_ismaclabel(const char *name) +{ + return 0; +} + +static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) +{ + return -EOPNOTSUPP; +} + +static inline int security_secctx_to_secid(const char *secdata, + u32 seclen, + u32 *secid) +{ + return -EOPNOTSUPP; +} + +static inline void security_release_secctx(char *secdata, u32 seclen) +{ +} + +static inline void security_inode_invalidate_secctx(struct inode *inode) +{ +} + +static inline int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen) +{ + return -EOPNOTSUPP; +} +static inline int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen) +{ + return -EOPNOTSUPP; +} +static inline int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_SECURITY */ + +#ifdef CONFIG_SECURITY_NETWORK + +int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk); +int security_unix_may_send(struct socket *sock, struct socket *other); +int security_socket_create(int family, int type, int protocol, int kern); +int security_socket_post_create(struct socket *sock, int family, + int type, int protocol, int kern); +int security_socket_socketpair(struct socket *socka, struct socket *sockb); +int security_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen); +int security_socket_connect(struct socket *sock, struct sockaddr *address, int addrlen); +int security_socket_listen(struct socket *sock, int backlog); +int security_socket_accept(struct socket *sock, struct socket *newsock); +int security_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size); +int security_socket_recvmsg(struct socket *sock, struct msghdr *msg, + int size, int flags); +int security_socket_getsockname(struct socket *sock); +int security_socket_getpeername(struct socket *sock); +int security_socket_getsockopt(struct socket *sock, int level, int optname); +int security_socket_setsockopt(struct socket *sock, int level, int optname); +int security_socket_shutdown(struct socket *sock, int how); +int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb); +int security_socket_getpeersec_stream(struct socket *sock, char __user *optval, + int __user *optlen, unsigned len); +int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid); +int security_sk_alloc(struct sock *sk, int family, gfp_t priority); +void security_sk_free(struct sock *sk); +void security_sk_clone(const struct sock *sk, struct sock *newsk); +void security_sk_classify_flow(struct sock *sk, struct flowi *fl); +void security_req_classify_flow(const struct request_sock *req, struct flowi *fl); +void security_sock_graft(struct sock*sk, struct socket *parent); +int security_inet_conn_request(struct sock *sk, + struct sk_buff *skb, struct request_sock *req); +void security_inet_csk_clone(struct sock *newsk, + const struct request_sock *req); +void security_inet_conn_established(struct sock *sk, + struct sk_buff *skb); +int security_secmark_relabel_packet(u32 secid); +void security_secmark_refcount_inc(void); +void security_secmark_refcount_dec(void); +int security_tun_dev_alloc_security(void **security); +void security_tun_dev_free_security(void *security); +int security_tun_dev_create(void); +int security_tun_dev_attach_queue(void *security); +int security_tun_dev_attach(struct sock *sk, void *security); +int security_tun_dev_open(void *security); +int security_sctp_assoc_request(struct sctp_endpoint *ep, struct sk_buff *skb); +int security_sctp_bind_connect(struct sock *sk, int optname, + struct sockaddr *address, int addrlen); +void security_sctp_sk_clone(struct sctp_endpoint *ep, struct sock *sk, + struct sock *newsk); + +#else /* CONFIG_SECURITY_NETWORK */ +static inline int security_unix_stream_connect(struct sock *sock, + struct sock *other, + struct sock *newsk) +{ + return 0; +} + +static inline int security_unix_may_send(struct socket *sock, + struct socket *other) +{ + return 0; +} + +static inline int security_socket_create(int family, int type, + int protocol, int kern) +{ + return 0; +} + +static inline int security_socket_post_create(struct socket *sock, + int family, + int type, + int protocol, int kern) +{ + return 0; +} + +static inline int security_socket_socketpair(struct socket *socka, + struct socket *sockb) +{ + return 0; +} + +static inline int security_socket_bind(struct socket *sock, + struct sockaddr *address, + int addrlen) +{ + return 0; +} + +static inline int security_socket_connect(struct socket *sock, + struct sockaddr *address, + int addrlen) +{ + return 0; +} + +static inline int security_socket_listen(struct socket *sock, int backlog) +{ + return 0; +} + +static inline int security_socket_accept(struct socket *sock, + struct socket *newsock) +{ + return 0; +} + +static inline int security_socket_sendmsg(struct socket *sock, + struct msghdr *msg, int size) +{ + return 0; +} + +static inline int security_socket_recvmsg(struct socket *sock, + struct msghdr *msg, int size, + int flags) +{ + return 0; +} + +static inline int security_socket_getsockname(struct socket *sock) +{ + return 0; +} + +static inline int security_socket_getpeername(struct socket *sock) +{ + return 0; +} + +static inline int security_socket_getsockopt(struct socket *sock, + int level, int optname) +{ + return 0; +} + +static inline int security_socket_setsockopt(struct socket *sock, + int level, int optname) +{ + return 0; +} + +static inline int security_socket_shutdown(struct socket *sock, int how) +{ + return 0; +} +static inline int security_sock_rcv_skb(struct sock *sk, + struct sk_buff *skb) +{ + return 0; +} + +static inline int security_socket_getpeersec_stream(struct socket *sock, char __user *optval, + int __user *optlen, unsigned len) +{ + return -ENOPROTOOPT; +} + +static inline int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid) +{ + return -ENOPROTOOPT; +} + +static inline int security_sk_alloc(struct sock *sk, int family, gfp_t priority) +{ + return 0; +} + +static inline void security_sk_free(struct sock *sk) +{ +} + +static inline void security_sk_clone(const struct sock *sk, struct sock *newsk) +{ +} + +static inline void security_sk_classify_flow(struct sock *sk, struct flowi *fl) +{ +} + +static inline void security_req_classify_flow(const struct request_sock *req, struct flowi *fl) +{ +} + +static inline void security_sock_graft(struct sock *sk, struct socket *parent) +{ +} + +static inline int security_inet_conn_request(struct sock *sk, + struct sk_buff *skb, struct request_sock *req) +{ + return 0; +} + +static inline void security_inet_csk_clone(struct sock *newsk, + const struct request_sock *req) +{ +} + +static inline void security_inet_conn_established(struct sock *sk, + struct sk_buff *skb) +{ +} + +static inline int security_secmark_relabel_packet(u32 secid) +{ + return 0; +} + +static inline void security_secmark_refcount_inc(void) +{ +} + +static inline void security_secmark_refcount_dec(void) +{ +} + +static inline int security_tun_dev_alloc_security(void **security) +{ + return 0; +} + +static inline void security_tun_dev_free_security(void *security) +{ +} + +static inline int security_tun_dev_create(void) +{ + return 0; +} + +static inline int security_tun_dev_attach_queue(void *security) +{ + return 0; +} + +static inline int security_tun_dev_attach(struct sock *sk, void *security) +{ + return 0; +} + +static inline int security_tun_dev_open(void *security) +{ + return 0; +} + +static inline int security_sctp_assoc_request(struct sctp_endpoint *ep, + struct sk_buff *skb) +{ + return 0; +} + +static inline int security_sctp_bind_connect(struct sock *sk, int optname, + struct sockaddr *address, + int addrlen) +{ + return 0; +} + +static inline void security_sctp_sk_clone(struct sctp_endpoint *ep, + struct sock *sk, + struct sock *newsk) +{ +} +#endif /* CONFIG_SECURITY_NETWORK */ + +#ifdef CONFIG_SECURITY_INFINIBAND +int security_ib_pkey_access(void *sec, u64 subnet_prefix, u16 pkey); +int security_ib_endport_manage_subnet(void *sec, const char *name, u8 port_num); +int security_ib_alloc_security(void **sec); +void security_ib_free_security(void *sec); +#else /* CONFIG_SECURITY_INFINIBAND */ +static inline int security_ib_pkey_access(void *sec, u64 subnet_prefix, u16 pkey) +{ + return 0; +} + +static inline int security_ib_endport_manage_subnet(void *sec, const char *dev_name, u8 port_num) +{ + return 0; +} + +static inline int security_ib_alloc_security(void **sec) +{ + return 0; +} + +static inline void security_ib_free_security(void *sec) +{ +} +#endif /* CONFIG_SECURITY_INFINIBAND */ + +#ifdef CONFIG_SECURITY_NETWORK_XFRM + +int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, + struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp); +int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctxp); +void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx); +int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx); +int security_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx); +int security_xfrm_state_alloc_acquire(struct xfrm_state *x, + struct xfrm_sec_ctx *polsec, u32 secid); +int security_xfrm_state_delete(struct xfrm_state *x); +void security_xfrm_state_free(struct xfrm_state *x); +int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir); +int security_xfrm_state_pol_flow_match(struct xfrm_state *x, + struct xfrm_policy *xp, + const struct flowi *fl); +int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid); +void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl); + +#else /* CONFIG_SECURITY_NETWORK_XFRM */ + +static inline int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, + struct xfrm_user_sec_ctx *sec_ctx, + gfp_t gfp) +{ + return 0; +} + +static inline int security_xfrm_policy_clone(struct xfrm_sec_ctx *old, struct xfrm_sec_ctx **new_ctxp) +{ + return 0; +} + +static inline void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx) +{ +} + +static inline int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx) +{ + return 0; +} + +static inline int security_xfrm_state_alloc(struct xfrm_state *x, + struct xfrm_user_sec_ctx *sec_ctx) +{ + return 0; +} + +static inline int security_xfrm_state_alloc_acquire(struct xfrm_state *x, + struct xfrm_sec_ctx *polsec, u32 secid) +{ + return 0; +} + +static inline void security_xfrm_state_free(struct xfrm_state *x) +{ +} + +static inline int security_xfrm_state_delete(struct xfrm_state *x) +{ + return 0; +} + +static inline int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir) +{ + return 0; +} + +static inline int security_xfrm_state_pol_flow_match(struct xfrm_state *x, + struct xfrm_policy *xp, const struct flowi *fl) +{ + return 1; +} + +static inline int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid) +{ + return 0; +} + +static inline void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl) +{ +} + +#endif /* CONFIG_SECURITY_NETWORK_XFRM */ + +#ifdef CONFIG_SECURITY_PATH +int security_path_unlink(const struct path *dir, struct dentry *dentry); +int security_path_mkdir(const struct path *dir, struct dentry *dentry, umode_t mode); +int security_path_rmdir(const struct path *dir, struct dentry *dentry); +int security_path_mknod(const struct path *dir, struct dentry *dentry, umode_t mode, + unsigned int dev); +int security_path_truncate(const struct path *path); +int security_path_symlink(const struct path *dir, struct dentry *dentry, + const char *old_name); +int security_path_link(struct dentry *old_dentry, const struct path *new_dir, + struct dentry *new_dentry); +int security_path_rename(const struct path *old_dir, struct dentry *old_dentry, + const struct path *new_dir, struct dentry *new_dentry, + unsigned int flags); +int security_path_chmod(const struct path *path, umode_t mode); +int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid); +int security_path_chroot(const struct path *path); +#else /* CONFIG_SECURITY_PATH */ +static inline int security_path_unlink(const struct path *dir, struct dentry *dentry) +{ + return 0; +} + +static inline int security_path_mkdir(const struct path *dir, struct dentry *dentry, + umode_t mode) +{ + return 0; +} + +static inline int security_path_rmdir(const struct path *dir, struct dentry *dentry) +{ + return 0; +} + +static inline int security_path_mknod(const struct path *dir, struct dentry *dentry, + umode_t mode, unsigned int dev) +{ + return 0; +} + +static inline int security_path_truncate(const struct path *path) +{ + return 0; +} + +static inline int security_path_symlink(const struct path *dir, struct dentry *dentry, + const char *old_name) +{ + return 0; +} + +static inline int security_path_link(struct dentry *old_dentry, + const struct path *new_dir, + struct dentry *new_dentry) +{ + return 0; +} + +static inline int security_path_rename(const struct path *old_dir, + struct dentry *old_dentry, + const struct path *new_dir, + struct dentry *new_dentry, + unsigned int flags) +{ + return 0; +} + +static inline int security_path_chmod(const struct path *path, umode_t mode) +{ + return 0; +} + +static inline int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid) +{ + return 0; +} + +static inline int security_path_chroot(const struct path *path) +{ + return 0; +} +#endif /* CONFIG_SECURITY_PATH */ + +#ifdef CONFIG_KEYS +#ifdef CONFIG_SECURITY + +int security_key_alloc(struct key *key, const struct cred *cred, unsigned long flags); +void security_key_free(struct key *key); +int security_key_permission(key_ref_t key_ref, + const struct cred *cred, unsigned perm); +int security_key_getsecurity(struct key *key, char **_buffer); + +#else + +static inline int security_key_alloc(struct key *key, + const struct cred *cred, + unsigned long flags) +{ + return 0; +} + +static inline void security_key_free(struct key *key) +{ +} + +static inline int security_key_permission(key_ref_t key_ref, + const struct cred *cred, + unsigned perm) +{ + return 0; +} + +static inline int security_key_getsecurity(struct key *key, char **_buffer) +{ + *_buffer = NULL; + return 0; +} + +#endif +#endif /* CONFIG_KEYS */ + +#ifdef CONFIG_AUDIT +#ifdef CONFIG_SECURITY +int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule); +int security_audit_rule_known(struct audit_krule *krule); +int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule, + struct audit_context *actx); +void security_audit_rule_free(void *lsmrule); + +#else + +static inline int security_audit_rule_init(u32 field, u32 op, char *rulestr, + void **lsmrule) +{ + return 0; +} + +static inline int security_audit_rule_known(struct audit_krule *krule) +{ + return 0; +} + +static inline int security_audit_rule_match(u32 secid, u32 field, u32 op, + void *lsmrule, struct audit_context *actx) +{ + return 0; +} + +static inline void security_audit_rule_free(void *lsmrule) +{ } + +#endif /* CONFIG_SECURITY */ +#endif /* CONFIG_AUDIT */ + +#ifdef CONFIG_SECURITYFS + +extern struct dentry *securityfs_create_file(const char *name, umode_t mode, + struct dentry *parent, void *data, + const struct file_operations *fops); +extern struct dentry *securityfs_create_dir(const char *name, struct dentry *parent); +struct dentry *securityfs_create_symlink(const char *name, + struct dentry *parent, + const char *target, + const struct inode_operations *iops); +extern void securityfs_remove(struct dentry *dentry); + +#else /* CONFIG_SECURITYFS */ + +static inline struct dentry *securityfs_create_dir(const char *name, + struct dentry *parent) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *securityfs_create_file(const char *name, + umode_t mode, + struct dentry *parent, + void *data, + const struct file_operations *fops) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *securityfs_create_symlink(const char *name, + struct dentry *parent, + const char *target, + const struct inode_operations *iops) +{ + return ERR_PTR(-ENODEV); +} + +static inline void securityfs_remove(struct dentry *dentry) +{} + +#endif + +#ifdef CONFIG_BPF_SYSCALL +union bpf_attr; +struct bpf_map; +struct bpf_prog; +struct bpf_prog_aux; +#ifdef CONFIG_SECURITY +extern int security_bpf(int cmd, union bpf_attr *attr, unsigned int size); +extern int security_bpf_map(struct bpf_map *map, fmode_t fmode); +extern int security_bpf_prog(struct bpf_prog *prog); +extern int security_bpf_map_alloc(struct bpf_map *map); +extern void security_bpf_map_free(struct bpf_map *map); +extern int security_bpf_prog_alloc(struct bpf_prog_aux *aux); +extern void security_bpf_prog_free(struct bpf_prog_aux *aux); +#else +static inline int security_bpf(int cmd, union bpf_attr *attr, + unsigned int size) +{ + return 0; +} + +static inline int security_bpf_map(struct bpf_map *map, fmode_t fmode) +{ + return 0; +} + +static inline int security_bpf_prog(struct bpf_prog *prog) +{ + return 0; +} + +static inline int security_bpf_map_alloc(struct bpf_map *map) +{ + return 0; +} + +static inline void security_bpf_map_free(struct bpf_map *map) +{ } + +static inline int security_bpf_prog_alloc(struct bpf_prog_aux *aux) +{ + return 0; +} + +static inline void security_bpf_prog_free(struct bpf_prog_aux *aux) +{ } +#endif /* CONFIG_SECURITY */ +#endif /* CONFIG_BPF_SYSCALL */ + +#ifdef CONFIG_SECURITY + +static inline char *alloc_secdata(void) +{ + return (char *)get_zeroed_page(GFP_KERNEL); +} + +static inline void free_secdata(void *secdata) +{ + free_page((unsigned long)secdata); +} + +#else + +static inline char *alloc_secdata(void) +{ + return (char *)1; +} + +static inline void free_secdata(void *secdata) +{ } +#endif /* CONFIG_SECURITY */ + +#endif /* ! __LINUX_SECURITY_H */ + diff --git a/include/linux/sed-opal.h b/include/linux/sed-opal.h new file mode 100644 index 000000000..04b124fca --- /dev/null +++ b/include/linux/sed-opal.h @@ -0,0 +1,75 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Authors: + * Rafael Antognolli + * Scott Bauer + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef LINUX_OPAL_H +#define LINUX_OPAL_H + +#include +#include + +struct opal_dev; + +typedef int (sec_send_recv)(void *data, u16 spsp, u8 secp, void *buffer, + size_t len, bool send); + +#ifdef CONFIG_BLK_SED_OPAL +void free_opal_dev(struct opal_dev *dev); +bool opal_unlock_from_suspend(struct opal_dev *dev); +struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv); +int sed_ioctl(struct opal_dev *dev, unsigned int cmd, void __user *ioctl_ptr); + +static inline bool is_sed_ioctl(unsigned int cmd) +{ + switch (cmd) { + case IOC_OPAL_SAVE: + case IOC_OPAL_LOCK_UNLOCK: + case IOC_OPAL_TAKE_OWNERSHIP: + case IOC_OPAL_ACTIVATE_LSP: + case IOC_OPAL_SET_PW: + case IOC_OPAL_ACTIVATE_USR: + case IOC_OPAL_REVERT_TPR: + case IOC_OPAL_LR_SETUP: + case IOC_OPAL_ADD_USR_TO_LR: + case IOC_OPAL_ENABLE_DISABLE_MBR: + case IOC_OPAL_ERASE_LR: + case IOC_OPAL_SECURE_ERASE_LR: + return true; + } + return false; +} +#else +static inline void free_opal_dev(struct opal_dev *dev) +{ +} + +static inline bool is_sed_ioctl(unsigned int cmd) +{ + return false; +} + +static inline int sed_ioctl(struct opal_dev *dev, unsigned int cmd, + void __user *ioctl_ptr) +{ + return 0; +} +static inline bool opal_unlock_from_suspend(struct opal_dev *dev) +{ + return false; +} +#define init_opal_dev(data, send_recv) NULL +#endif /* CONFIG_BLK_SED_OPAL */ +#endif /* LINUX_OPAL_H */ diff --git a/include/linux/seg6.h b/include/linux/seg6.h new file mode 100644 index 000000000..369066a33 --- /dev/null +++ b/include/linux/seg6.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SEG6_H +#define _LINUX_SEG6_H + +#include + +#endif diff --git a/include/linux/seg6_genl.h b/include/linux/seg6_genl.h new file mode 100644 index 000000000..2f25a3cd7 --- /dev/null +++ b/include/linux/seg6_genl.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SEG6_GENL_H +#define _LINUX_SEG6_GENL_H + +#include + +#endif diff --git a/include/linux/seg6_hmac.h b/include/linux/seg6_hmac.h new file mode 100644 index 000000000..16e59595e --- /dev/null +++ b/include/linux/seg6_hmac.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SEG6_HMAC_H +#define _LINUX_SEG6_HMAC_H + +#include + +#endif diff --git a/include/linux/seg6_iptunnel.h b/include/linux/seg6_iptunnel.h new file mode 100644 index 000000000..d07df7fc9 --- /dev/null +++ b/include/linux/seg6_iptunnel.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SEG6_IPTUNNEL_H +#define _LINUX_SEG6_IPTUNNEL_H + +#include + +#endif diff --git a/include/linux/seg6_local.h b/include/linux/seg6_local.h new file mode 100644 index 000000000..ee63e76fe --- /dev/null +++ b/include/linux/seg6_local.h @@ -0,0 +1,6 @@ +#ifndef _LINUX_SEG6_LOCAL_H +#define _LINUX_SEG6_LOCAL_H + +#include + +#endif diff --git a/include/linux/selection.h b/include/linux/selection.h new file mode 100644 index 000000000..77a1fb7c3 --- /dev/null +++ b/include/linux/selection.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * selection.h + * + * Interface between console.c, tty_io.c, vt.c, vc_screen.c and selection.c + */ + +#ifndef _LINUX_SELECTION_H_ +#define _LINUX_SELECTION_H_ + +#include +#include + +struct tty_struct; + +struct tty_struct; +struct vc_data; + +extern void clear_selection(void); +extern int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *tty); +extern int paste_selection(struct tty_struct *tty); +extern int sel_loadlut(char __user *p); +extern int mouse_reporting(void); +extern void mouse_report(struct tty_struct * tty, int butt, int mrx, int mry); + +bool vc_is_sel(struct vc_data *vc); + +extern int console_blanked; + +extern const unsigned char color_table[]; +extern unsigned char default_red[]; +extern unsigned char default_grn[]; +extern unsigned char default_blu[]; + +extern unsigned short *screen_pos(struct vc_data *vc, int w_offset, int viewed); +extern u16 screen_glyph(struct vc_data *vc, int offset); +extern u32 screen_glyph_unicode(struct vc_data *vc, int offset); +extern void complement_pos(struct vc_data *vc, int offset); +extern void invert_screen(struct vc_data *vc, int offset, int count, int shift); + +extern void getconsxy(struct vc_data *vc, unsigned char *p); +extern void putconsxy(struct vc_data *vc, unsigned char *p); + +extern u16 vcs_scr_readw(struct vc_data *vc, const u16 *org); +extern void vcs_scr_writew(struct vc_data *vc, u16 val, u16 *org); +extern void vcs_scr_updated(struct vc_data *vc); + +extern int vc_uniscr_check(struct vc_data *vc); +extern void vc_uniscr_copy_line(struct vc_data *vc, void *dest, int viewed, + unsigned int row, unsigned int col, + unsigned int nr); + +#endif diff --git a/include/linux/selinux.h b/include/linux/selinux.h new file mode 100644 index 000000000..44f459612 --- /dev/null +++ b/include/linux/selinux.h @@ -0,0 +1,35 @@ +/* + * SELinux services exported to the rest of the kernel. + * + * Author: James Morris + * + * Copyright (C) 2005 Red Hat, Inc., James Morris + * Copyright (C) 2006 Trusted Computer Solutions, Inc. + * Copyright (C) 2006 IBM Corporation, Timothy R. Chavez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, + * as published by the Free Software Foundation. + */ +#ifndef _LINUX_SELINUX_H +#define _LINUX_SELINUX_H + +struct selinux_audit_rule; +struct audit_context; +struct kern_ipc_perm; + +#ifdef CONFIG_SECURITY_SELINUX + +/** + * selinux_is_enabled - is SELinux enabled? + */ +bool selinux_is_enabled(void); +#else + +static inline bool selinux_is_enabled(void) +{ + return false; +} +#endif /* CONFIG_SECURITY_SELINUX */ + +#endif /* _LINUX_SELINUX_H */ diff --git a/include/linux/sem.h b/include/linux/sem.h new file mode 100644 index 000000000..5608a500c --- /dev/null +++ b/include/linux/sem.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SEM_H +#define _LINUX_SEM_H + +#include + +struct task_struct; +struct sem_undo_list; + +#ifdef CONFIG_SYSVIPC + +struct sysv_sem { + struct sem_undo_list *undo_list; +}; + +extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk); +extern void exit_sem(struct task_struct *tsk); + +#else + +struct sysv_sem { + /* empty */ +}; + +static inline int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) +{ + return 0; +} + +static inline void exit_sem(struct task_struct *tsk) +{ + return; +} +#endif + +#endif /* _LINUX_SEM_H */ diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h new file mode 100644 index 000000000..11c86fbfe --- /dev/null +++ b/include/linux/semaphore.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2008 Intel Corporation + * Author: Matthew Wilcox + * + * Distributed under the terms of the GNU GPL, version 2 + * + * Please see kernel/locking/semaphore.c for documentation of these functions + */ +#ifndef __LINUX_SEMAPHORE_H +#define __LINUX_SEMAPHORE_H + +#include +#include + +/* Please don't access any members of this structure directly */ +struct semaphore { + raw_spinlock_t lock; + unsigned int count; + struct list_head wait_list; +}; + +#define __SEMAPHORE_INITIALIZER(name, n) \ +{ \ + .lock = __RAW_SPIN_LOCK_UNLOCKED((name).lock), \ + .count = n, \ + .wait_list = LIST_HEAD_INIT((name).wait_list), \ +} + +#define DEFINE_SEMAPHORE(name) \ + struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1) + +static inline void sema_init(struct semaphore *sem, int val) +{ + static struct lock_class_key __key; + *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val); + lockdep_init_map(&sem->lock.dep_map, "semaphore->lock", &__key, 0); +} + +extern void down(struct semaphore *sem); +extern int __must_check down_interruptible(struct semaphore *sem); +extern int __must_check down_killable(struct semaphore *sem); +extern int __must_check down_trylock(struct semaphore *sem); +extern int __must_check down_timeout(struct semaphore *sem, long jiffies); +extern void up(struct semaphore *sem); + +#endif /* __LINUX_SEMAPHORE_H */ diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h new file mode 100644 index 000000000..7cc952282 --- /dev/null +++ b/include/linux/seq_buf.h @@ -0,0 +1,134 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SEQ_BUF_H +#define _LINUX_SEQ_BUF_H + +#include + +/* + * Trace sequences are used to allow a function to call several other functions + * to create a string of data to use. + */ + +/** + * seq_buf - seq buffer structure + * @buffer: pointer to the buffer + * @size: size of the buffer + * @len: the amount of data inside the buffer + * @readpos: The next position to read in the buffer. + */ +struct seq_buf { + char *buffer; + size_t size; + size_t len; + loff_t readpos; +}; + +static inline void seq_buf_clear(struct seq_buf *s) +{ + s->len = 0; + s->readpos = 0; +} + +static inline void +seq_buf_init(struct seq_buf *s, char *buf, unsigned int size) +{ + s->buffer = buf; + s->size = size; + seq_buf_clear(s); +} + +/* + * seq_buf have a buffer that might overflow. When this happens + * the len and size are set to be equal. + */ +static inline bool +seq_buf_has_overflowed(struct seq_buf *s) +{ + return s->len > s->size; +} + +static inline void +seq_buf_set_overflow(struct seq_buf *s) +{ + s->len = s->size + 1; +} + +/* + * How much buffer is left on the seq_buf? + */ +static inline unsigned int +seq_buf_buffer_left(struct seq_buf *s) +{ + if (seq_buf_has_overflowed(s)) + return 0; + + return s->size - s->len; +} + +/* How much buffer was written? */ +static inline unsigned int seq_buf_used(struct seq_buf *s) +{ + return min(s->len, s->size); +} + +/** + * seq_buf_get_buf - get buffer to write arbitrary data to + * @s: the seq_buf handle + * @bufp: the beginning of the buffer is stored here + * + * Return the number of bytes available in the buffer, or zero if + * there's no space. + */ +static inline size_t seq_buf_get_buf(struct seq_buf *s, char **bufp) +{ + WARN_ON(s->len > s->size + 1); + + if (s->len < s->size) { + *bufp = s->buffer + s->len; + return s->size - s->len; + } + + *bufp = NULL; + return 0; +} + +/** + * seq_buf_commit - commit data to the buffer + * @s: the seq_buf handle + * @num: the number of bytes to commit + * + * Commit @num bytes of data written to a buffer previously acquired + * by seq_buf_get. To signal an error condition, or that the data + * didn't fit in the available space, pass a negative @num value. + */ +static inline void seq_buf_commit(struct seq_buf *s, int num) +{ + if (num < 0) { + seq_buf_set_overflow(s); + } else { + /* num must be negative on overflow */ + BUG_ON(s->len + num > s->size); + s->len += num; + } +} + +extern __printf(2, 3) +int seq_buf_printf(struct seq_buf *s, const char *fmt, ...); +extern __printf(2, 0) +int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args); +extern int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s); +extern int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, + int cnt); +extern int seq_buf_puts(struct seq_buf *s, const char *str); +extern int seq_buf_putc(struct seq_buf *s, unsigned char c); +extern int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len); +extern int seq_buf_putmem_hex(struct seq_buf *s, const void *mem, + unsigned int len); +extern int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc); + +#ifdef CONFIG_BINARY_PRINTF +extern int +seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary); +#endif + +#endif /* _LINUX_SEQ_BUF_H */ diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h new file mode 100644 index 000000000..a121982af --- /dev/null +++ b/include/linux/seq_file.h @@ -0,0 +1,244 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SEQ_FILE_H +#define _LINUX_SEQ_FILE_H + +#include +#include +#include +#include +#include +#include +#include +#include + +struct seq_operations; + +struct seq_file { + char *buf; + size_t size; + size_t from; + size_t count; + size_t pad_until; + loff_t index; + loff_t read_pos; + u64 version; + struct mutex lock; + const struct seq_operations *op; + int poll_event; + const struct file *file; + void *private; +}; + +struct seq_operations { + void * (*start) (struct seq_file *m, loff_t *pos); + void (*stop) (struct seq_file *m, void *v); + void * (*next) (struct seq_file *m, void *v, loff_t *pos); + int (*show) (struct seq_file *m, void *v); +}; + +#define SEQ_SKIP 1 + +/** + * seq_has_overflowed - check if the buffer has overflowed + * @m: the seq_file handle + * + * seq_files have a buffer which may overflow. When this happens a larger + * buffer is reallocated and all the data will be printed again. + * The overflow state is true when m->count == m->size. + * + * Returns true if the buffer received more than it can hold. + */ +static inline bool seq_has_overflowed(struct seq_file *m) +{ + return m->count == m->size; +} + +/** + * seq_get_buf - get buffer to write arbitrary data to + * @m: the seq_file handle + * @bufp: the beginning of the buffer is stored here + * + * Return the number of bytes available in the buffer, or zero if + * there's no space. + */ +static inline size_t seq_get_buf(struct seq_file *m, char **bufp) +{ + BUG_ON(m->count > m->size); + if (m->count < m->size) + *bufp = m->buf + m->count; + else + *bufp = NULL; + + return m->size - m->count; +} + +/** + * seq_commit - commit data to the buffer + * @m: the seq_file handle + * @num: the number of bytes to commit + * + * Commit @num bytes of data written to a buffer previously acquired + * by seq_buf_get. To signal an error condition, or that the data + * didn't fit in the available space, pass a negative @num value. + */ +static inline void seq_commit(struct seq_file *m, int num) +{ + if (num < 0) { + m->count = m->size; + } else { + BUG_ON(m->count + num > m->size); + m->count += num; + } +} + +/** + * seq_setwidth - set padding width + * @m: the seq_file handle + * @size: the max number of bytes to pad. + * + * Call seq_setwidth() for setting max width, then call seq_printf() etc. and + * finally call seq_pad() to pad the remaining bytes. + */ +static inline void seq_setwidth(struct seq_file *m, size_t size) +{ + m->pad_until = m->count + size; +} +void seq_pad(struct seq_file *m, char c); + +char *mangle_path(char *s, const char *p, const char *esc); +int seq_open(struct file *, const struct seq_operations *); +ssize_t seq_read(struct file *, char __user *, size_t, loff_t *); +loff_t seq_lseek(struct file *, loff_t, int); +int seq_release(struct inode *, struct file *); +int seq_write(struct seq_file *seq, const void *data, size_t len); + +__printf(2, 0) +void seq_vprintf(struct seq_file *m, const char *fmt, va_list args); +__printf(2, 3) +void seq_printf(struct seq_file *m, const char *fmt, ...); +void seq_putc(struct seq_file *m, char c); +void seq_puts(struct seq_file *m, const char *s); +void seq_put_decimal_ull_width(struct seq_file *m, const char *delimiter, + unsigned long long num, unsigned int width); +void seq_put_decimal_ull(struct seq_file *m, const char *delimiter, + unsigned long long num); +void seq_put_decimal_ll(struct seq_file *m, const char *delimiter, long long num); +void seq_put_hex_ll(struct seq_file *m, const char *delimiter, + unsigned long long v, unsigned int width); + +void seq_escape(struct seq_file *m, const char *s, const char *esc); + +void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type, + int rowsize, int groupsize, const void *buf, size_t len, + bool ascii); + +int seq_path(struct seq_file *, const struct path *, const char *); +int seq_file_path(struct seq_file *, struct file *, const char *); +int seq_dentry(struct seq_file *, struct dentry *, const char *); +int seq_path_root(struct seq_file *m, const struct path *path, + const struct path *root, const char *esc); + +int single_open(struct file *, int (*)(struct seq_file *, void *), void *); +int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t); +int single_release(struct inode *, struct file *); +void *__seq_open_private(struct file *, const struct seq_operations *, int); +int seq_open_private(struct file *, const struct seq_operations *, int); +int seq_release_private(struct inode *, struct file *); + +#define DEFINE_SHOW_ATTRIBUTE(__name) \ +static int __name ## _open(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, __name ## _show, inode->i_private); \ +} \ + \ +static const struct file_operations __name ## _fops = { \ + .owner = THIS_MODULE, \ + .open = __name ## _open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ +} + +static inline struct user_namespace *seq_user_ns(struct seq_file *seq) +{ +#ifdef CONFIG_USER_NS + return seq->file->f_cred->user_ns; +#else + extern struct user_namespace init_user_ns; + return &init_user_ns; +#endif +} + +/** + * seq_show_options - display mount options with appropriate escapes. + * @m: the seq_file handle + * @name: the mount option name + * @value: the mount option name's value, can be NULL + */ +static inline void seq_show_option(struct seq_file *m, const char *name, + const char *value) +{ + seq_putc(m, ','); + seq_escape(m, name, ",= \t\n\\"); + if (value) { + seq_putc(m, '='); + seq_escape(m, value, ", \t\n\\"); + } +} + +/** + * seq_show_option_n - display mount options with appropriate escapes + * where @value must be a specific length. + * @m: the seq_file handle + * @name: the mount option name + * @value: the mount option name's value, cannot be NULL + * @length: the length of @value to display + * + * This is a macro since this uses "length" to define the size of the + * stack buffer. + */ +#define seq_show_option_n(m, name, value, length) { \ + char val_buf[length + 1]; \ + strncpy(val_buf, value, length); \ + val_buf[length] = '\0'; \ + seq_show_option(m, name, val_buf); \ +} + +#define SEQ_START_TOKEN ((void *)1) +/* + * Helpers for iteration over list_head-s in seq_files + */ + +extern struct list_head *seq_list_start(struct list_head *head, + loff_t pos); +extern struct list_head *seq_list_start_head(struct list_head *head, + loff_t pos); +extern struct list_head *seq_list_next(void *v, struct list_head *head, + loff_t *ppos); + +/* + * Helpers for iteration over hlist_head-s in seq_files + */ + +extern struct hlist_node *seq_hlist_start(struct hlist_head *head, + loff_t pos); +extern struct hlist_node *seq_hlist_start_head(struct hlist_head *head, + loff_t pos); +extern struct hlist_node *seq_hlist_next(void *v, struct hlist_head *head, + loff_t *ppos); + +extern struct hlist_node *seq_hlist_start_rcu(struct hlist_head *head, + loff_t pos); +extern struct hlist_node *seq_hlist_start_head_rcu(struct hlist_head *head, + loff_t pos); +extern struct hlist_node *seq_hlist_next_rcu(void *v, + struct hlist_head *head, + loff_t *ppos); + +/* Helpers for iterating over per-cpu hlist_head-s in seq_files */ +extern struct hlist_node *seq_hlist_start_percpu(struct hlist_head __percpu *head, int *cpu, loff_t pos); + +extern struct hlist_node *seq_hlist_next_percpu(void *v, struct hlist_head __percpu *head, int *cpu, loff_t *pos); + +void seq_file_init(void); +#endif diff --git a/include/linux/seq_file_net.h b/include/linux/seq_file_net.h new file mode 100644 index 000000000..0fdbe1ddd --- /dev/null +++ b/include/linux/seq_file_net.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __SEQ_FILE_NET_H__ +#define __SEQ_FILE_NET_H__ + +#include + +struct net; +extern struct net init_net; + +struct seq_net_private { +#ifdef CONFIG_NET_NS + struct net *net; +#endif +}; + +static inline struct net *seq_file_net(struct seq_file *seq) +{ +#ifdef CONFIG_NET_NS + return ((struct seq_net_private *)seq->private)->net; +#else + return &init_net; +#endif +} + +/* + * This one is needed for proc_create_net_single since net is stored directly + * in private not as a struct i.e. seq_file_net can't be used. + */ +static inline struct net *seq_file_single_net(struct seq_file *seq) +{ +#ifdef CONFIG_NET_NS + return (struct net *)seq->private; +#else + return &init_net; +#endif +} + +#endif diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h new file mode 100644 index 000000000..a42a29952 --- /dev/null +++ b/include/linux/seqlock.h @@ -0,0 +1,608 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_SEQLOCK_H +#define __LINUX_SEQLOCK_H +/* + * Reader/writer consistent mechanism without starving writers. This type of + * lock for data where the reader wants a consistent set of information + * and is willing to retry if the information changes. There are two types + * of readers: + * 1. Sequence readers which never block a writer but they may have to retry + * if a writer is in progress by detecting change in sequence number. + * Writers do not wait for a sequence reader. + * 2. Locking readers which will wait if a writer or another locking reader + * is in progress. A locking reader in progress will also block a writer + * from going forward. Unlike the regular rwlock, the read lock here is + * exclusive so that only one locking reader can get it. + * + * This is not as cache friendly as brlock. Also, this may not work well + * for data that contains pointers, because any writer could + * invalidate a pointer that a reader was following. + * + * Expected non-blocking reader usage: + * do { + * seq = read_seqbegin(&foo); + * ... + * } while (read_seqretry(&foo, seq)); + * + * + * On non-SMP the spin locks disappear but the writer still needs + * to increment the sequence variables because an interrupt routine could + * change the state of the data. + * + * Based on x86_64 vsyscall gettimeofday + * by Keith Owens and Andrea Arcangeli + */ + +#include +#include +#include +#include +#include + +/* + * Version using sequence counter only. + * This can be used when code has its own mutex protecting the + * updating starting before the write_seqcountbeqin() and ending + * after the write_seqcount_end(). + */ +typedef struct seqcount { + unsigned sequence; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} seqcount_t; + +static inline void __seqcount_init(seqcount_t *s, const char *name, + struct lock_class_key *key) +{ + /* + * Make sure we are not reinitializing a held lock: + */ + lockdep_init_map(&s->dep_map, name, key, 0); + s->sequence = 0; +} + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define SEQCOUNT_DEP_MAP_INIT(lockname) \ + .dep_map = { .name = #lockname } \ + +# define seqcount_init(s) \ + do { \ + static struct lock_class_key __key; \ + __seqcount_init((s), #s, &__key); \ + } while (0) + +static inline void seqcount_lockdep_reader_access(const seqcount_t *s) +{ + seqcount_t *l = (seqcount_t *)s; + unsigned long flags; + + local_irq_save(flags); + seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_); + seqcount_release(&l->dep_map, 1, _RET_IP_); + local_irq_restore(flags); +} + +#else +# define SEQCOUNT_DEP_MAP_INIT(lockname) +# define seqcount_init(s) __seqcount_init(s, NULL, NULL) +# define seqcount_lockdep_reader_access(x) +#endif + +#define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)} + + +/** + * __read_seqcount_begin - begin a seq-read critical section (without barrier) + * @s: pointer to seqcount_t + * Returns: count to be passed to read_seqcount_retry + * + * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb() + * barrier. Callers should ensure that smp_rmb() or equivalent ordering is + * provided before actually loading any of the variables that are to be + * protected in this critical section. + * + * Use carefully, only in critical code, and comment how the barrier is + * provided. + */ +static inline unsigned __read_seqcount_begin(const seqcount_t *s) +{ + unsigned ret; + +repeat: + ret = READ_ONCE(s->sequence); + if (unlikely(ret & 1)) { + cpu_relax(); + goto repeat; + } + return ret; +} + +/** + * raw_read_seqcount - Read the raw seqcount + * @s: pointer to seqcount_t + * Returns: count to be passed to read_seqcount_retry + * + * raw_read_seqcount opens a read critical section of the given + * seqcount without any lockdep checking and without checking or + * masking the LSB. Calling code is responsible for handling that. + */ +static inline unsigned raw_read_seqcount(const seqcount_t *s) +{ + unsigned ret = READ_ONCE(s->sequence); + smp_rmb(); + return ret; +} + +/** + * raw_read_seqcount_begin - start seq-read critical section w/o lockdep + * @s: pointer to seqcount_t + * Returns: count to be passed to read_seqcount_retry + * + * raw_read_seqcount_begin opens a read critical section of the given + * seqcount, but without any lockdep checking. Validity of the critical + * section is tested by checking read_seqcount_retry function. + */ +static inline unsigned raw_read_seqcount_begin(const seqcount_t *s) +{ + unsigned ret = __read_seqcount_begin(s); + smp_rmb(); + return ret; +} + +/** + * read_seqcount_begin - begin a seq-read critical section + * @s: pointer to seqcount_t + * Returns: count to be passed to read_seqcount_retry + * + * read_seqcount_begin opens a read critical section of the given seqcount. + * Validity of the critical section is tested by checking read_seqcount_retry + * function. + */ +static inline unsigned read_seqcount_begin(const seqcount_t *s) +{ + seqcount_lockdep_reader_access(s); + return raw_read_seqcount_begin(s); +} + +/** + * raw_seqcount_begin - begin a seq-read critical section + * @s: pointer to seqcount_t + * Returns: count to be passed to read_seqcount_retry + * + * raw_seqcount_begin opens a read critical section of the given seqcount. + * Validity of the critical section is tested by checking read_seqcount_retry + * function. + * + * Unlike read_seqcount_begin(), this function will not wait for the count + * to stabilize. If a writer is active when we begin, we will fail the + * read_seqcount_retry() instead of stabilizing at the beginning of the + * critical section. + */ +static inline unsigned raw_seqcount_begin(const seqcount_t *s) +{ + unsigned ret = READ_ONCE(s->sequence); + smp_rmb(); + return ret & ~1; +} + +/** + * __read_seqcount_retry - end a seq-read critical section (without barrier) + * @s: pointer to seqcount_t + * @start: count, from read_seqcount_begin + * Returns: 1 if retry is required, else 0 + * + * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb() + * barrier. Callers should ensure that smp_rmb() or equivalent ordering is + * provided before actually loading any of the variables that are to be + * protected in this critical section. + * + * Use carefully, only in critical code, and comment how the barrier is + * provided. + */ +static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) +{ + return unlikely(s->sequence != start); +} + +/** + * read_seqcount_retry - end a seq-read critical section + * @s: pointer to seqcount_t + * @start: count, from read_seqcount_begin + * Returns: 1 if retry is required, else 0 + * + * read_seqcount_retry closes a read critical section of the given seqcount. + * If the critical section was invalid, it must be ignored (and typically + * retried). + */ +static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) +{ + smp_rmb(); + return __read_seqcount_retry(s, start); +} + + + +static inline void raw_write_seqcount_begin(seqcount_t *s) +{ + s->sequence++; + smp_wmb(); +} + +static inline void raw_write_seqcount_end(seqcount_t *s) +{ + smp_wmb(); + s->sequence++; +} + +/** + * raw_write_seqcount_barrier - do a seq write barrier + * @s: pointer to seqcount_t + * + * This can be used to provide an ordering guarantee instead of the + * usual consistency guarantee. It is one wmb cheaper, because we can + * collapse the two back-to-back wmb()s. + * + * Note that, writes surrounding the barrier should be declared atomic (e.g. + * via WRITE_ONCE): a) to ensure the writes become visible to other threads + * atomically, avoiding compiler optimizations; b) to document which writes are + * meant to propagate to the reader critical section. This is necessary because + * neither writes before and after the barrier are enclosed in a seq-writer + * critical section that would ensure readers are aware of ongoing writes. + * + * seqcount_t seq; + * bool X = true, Y = false; + * + * void read(void) + * { + * bool x, y; + * + * do { + * int s = read_seqcount_begin(&seq); + * + * x = X; y = Y; + * + * } while (read_seqcount_retry(&seq, s)); + * + * BUG_ON(!x && !y); + * } + * + * void write(void) + * { + * WRITE_ONCE(Y, true); + * + * raw_write_seqcount_barrier(seq); + * + * WRITE_ONCE(X, false); + * } + */ +static inline void raw_write_seqcount_barrier(seqcount_t *s) +{ + s->sequence++; + smp_wmb(); + s->sequence++; +} + +static inline int raw_read_seqcount_latch(seqcount_t *s) +{ + /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */ + int seq = READ_ONCE(s->sequence); /* ^^^ */ + return seq; +} + +/** + * raw_write_seqcount_latch - redirect readers to even/odd copy + * @s: pointer to seqcount_t + * + * The latch technique is a multiversion concurrency control method that allows + * queries during non-atomic modifications. If you can guarantee queries never + * interrupt the modification -- e.g. the concurrency is strictly between CPUs + * -- you most likely do not need this. + * + * Where the traditional RCU/lockless data structures rely on atomic + * modifications to ensure queries observe either the old or the new state the + * latch allows the same for non-atomic updates. The trade-off is doubling the + * cost of storage; we have to maintain two copies of the entire data + * structure. + * + * Very simply put: we first modify one copy and then the other. This ensures + * there is always one copy in a stable state, ready to give us an answer. + * + * The basic form is a data structure like: + * + * struct latch_struct { + * seqcount_t seq; + * struct data_struct data[2]; + * }; + * + * Where a modification, which is assumed to be externally serialized, does the + * following: + * + * void latch_modify(struct latch_struct *latch, ...) + * { + * smp_wmb(); <- Ensure that the last data[1] update is visible + * latch->seq++; + * smp_wmb(); <- Ensure that the seqcount update is visible + * + * modify(latch->data[0], ...); + * + * smp_wmb(); <- Ensure that the data[0] update is visible + * latch->seq++; + * smp_wmb(); <- Ensure that the seqcount update is visible + * + * modify(latch->data[1], ...); + * } + * + * The query will have a form like: + * + * struct entry *latch_query(struct latch_struct *latch, ...) + * { + * struct entry *entry; + * unsigned seq, idx; + * + * do { + * seq = raw_read_seqcount_latch(&latch->seq); + * + * idx = seq & 0x01; + * entry = data_query(latch->data[idx], ...); + * + * smp_rmb(); + * } while (seq != latch->seq); + * + * return entry; + * } + * + * So during the modification, queries are first redirected to data[1]. Then we + * modify data[0]. When that is complete, we redirect queries back to data[0] + * and we can modify data[1]. + * + * NOTE: The non-requirement for atomic modifications does _NOT_ include + * the publishing of new entries in the case where data is a dynamic + * data structure. + * + * An iteration might start in data[0] and get suspended long enough + * to miss an entire modification sequence, once it resumes it might + * observe the new entry. + * + * NOTE: When data is a dynamic data structure; one should use regular RCU + * patterns to manage the lifetimes of the objects within. + */ +static inline void raw_write_seqcount_latch(seqcount_t *s) +{ + smp_wmb(); /* prior stores before incrementing "sequence" */ + s->sequence++; + smp_wmb(); /* increment "sequence" before following stores */ +} + +/* + * Sequence counter only version assumes that callers are using their + * own mutexing. + */ +static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass) +{ + raw_write_seqcount_begin(s); + seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); +} + +static inline void write_seqcount_begin(seqcount_t *s) +{ + write_seqcount_begin_nested(s, 0); +} + +static inline void write_seqcount_end(seqcount_t *s) +{ + seqcount_release(&s->dep_map, 1, _RET_IP_); + raw_write_seqcount_end(s); +} + +/** + * write_seqcount_invalidate - invalidate in-progress read-side seq operations + * @s: pointer to seqcount_t + * + * After write_seqcount_invalidate, no read-side seq operations will complete + * successfully and see data older than this. + */ +static inline void write_seqcount_invalidate(seqcount_t *s) +{ + smp_wmb(); + s->sequence+=2; +} + +typedef struct { + struct seqcount seqcount; + spinlock_t lock; +} seqlock_t; + +/* + * These macros triggered gcc-3.x compile-time problems. We think these are + * OK now. Be cautious. + */ +#define __SEQLOCK_UNLOCKED(lockname) \ + { \ + .seqcount = SEQCNT_ZERO(lockname), \ + .lock = __SPIN_LOCK_UNLOCKED(lockname) \ + } + +#define seqlock_init(x) \ + do { \ + seqcount_init(&(x)->seqcount); \ + spin_lock_init(&(x)->lock); \ + } while (0) + +#define DEFINE_SEQLOCK(x) \ + seqlock_t x = __SEQLOCK_UNLOCKED(x) + +/* + * Read side functions for starting and finalizing a read side section. + */ +static inline unsigned read_seqbegin(const seqlock_t *sl) +{ + return read_seqcount_begin(&sl->seqcount); +} + +static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) +{ + return read_seqcount_retry(&sl->seqcount, start); +} + +/* + * Lock out other writers and update the count. + * Acts like a normal spin_lock/unlock. + * Don't need preempt_disable() because that is in the spin_lock already. + */ +static inline void write_seqlock(seqlock_t *sl) +{ + spin_lock(&sl->lock); + write_seqcount_begin(&sl->seqcount); +} + +static inline void write_sequnlock(seqlock_t *sl) +{ + write_seqcount_end(&sl->seqcount); + spin_unlock(&sl->lock); +} + +static inline void write_seqlock_bh(seqlock_t *sl) +{ + spin_lock_bh(&sl->lock); + write_seqcount_begin(&sl->seqcount); +} + +static inline void write_sequnlock_bh(seqlock_t *sl) +{ + write_seqcount_end(&sl->seqcount); + spin_unlock_bh(&sl->lock); +} + +static inline void write_seqlock_irq(seqlock_t *sl) +{ + spin_lock_irq(&sl->lock); + write_seqcount_begin(&sl->seqcount); +} + +static inline void write_sequnlock_irq(seqlock_t *sl) +{ + write_seqcount_end(&sl->seqcount); + spin_unlock_irq(&sl->lock); +} + +static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) +{ + unsigned long flags; + + spin_lock_irqsave(&sl->lock, flags); + write_seqcount_begin(&sl->seqcount); + return flags; +} + +#define write_seqlock_irqsave(lock, flags) \ + do { flags = __write_seqlock_irqsave(lock); } while (0) + +static inline void +write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) +{ + write_seqcount_end(&sl->seqcount); + spin_unlock_irqrestore(&sl->lock, flags); +} + +/* + * A locking reader exclusively locks out other writers and locking readers, + * but doesn't update the sequence number. Acts like a normal spin_lock/unlock. + * Don't need preempt_disable() because that is in the spin_lock already. + */ +static inline void read_seqlock_excl(seqlock_t *sl) +{ + spin_lock(&sl->lock); +} + +static inline void read_sequnlock_excl(seqlock_t *sl) +{ + spin_unlock(&sl->lock); +} + +/** + * read_seqbegin_or_lock - begin a sequence number check or locking block + * @lock: sequence lock + * @seq : sequence number to be checked + * + * First try it once optimistically without taking the lock. If that fails, + * take the lock. The sequence number is also used as a marker for deciding + * whether to be a reader (even) or writer (odd). + * N.B. seq must be initialized to an even number to begin with. + */ +static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq) +{ + if (!(*seq & 1)) /* Even */ + *seq = read_seqbegin(lock); + else /* Odd */ + read_seqlock_excl(lock); +} + +static inline int need_seqretry(seqlock_t *lock, int seq) +{ + return !(seq & 1) && read_seqretry(lock, seq); +} + +static inline void done_seqretry(seqlock_t *lock, int seq) +{ + if (seq & 1) + read_sequnlock_excl(lock); +} + +static inline void read_seqlock_excl_bh(seqlock_t *sl) +{ + spin_lock_bh(&sl->lock); +} + +static inline void read_sequnlock_excl_bh(seqlock_t *sl) +{ + spin_unlock_bh(&sl->lock); +} + +static inline void read_seqlock_excl_irq(seqlock_t *sl) +{ + spin_lock_irq(&sl->lock); +} + +static inline void read_sequnlock_excl_irq(seqlock_t *sl) +{ + spin_unlock_irq(&sl->lock); +} + +static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl) +{ + unsigned long flags; + + spin_lock_irqsave(&sl->lock, flags); + return flags; +} + +#define read_seqlock_excl_irqsave(lock, flags) \ + do { flags = __read_seqlock_excl_irqsave(lock); } while (0) + +static inline void +read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags) +{ + spin_unlock_irqrestore(&sl->lock, flags); +} + +static inline unsigned long +read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq) +{ + unsigned long flags = 0; + + if (!(*seq & 1)) /* Even */ + *seq = read_seqbegin(lock); + else /* Odd */ + read_seqlock_excl_irqsave(lock, flags); + + return flags; +} + +static inline void +done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags) +{ + if (seq & 1) + read_sequnlock_excl_irqrestore(lock, flags); +} +#endif /* __LINUX_SEQLOCK_H */ diff --git a/include/linux/seqno-fence.h b/include/linux/seqno-fence.h new file mode 100644 index 000000000..c58c535d1 --- /dev/null +++ b/include/linux/seqno-fence.h @@ -0,0 +1,117 @@ +/* + * seqno-fence, using a dma-buf to synchronize fencing + * + * Copyright (C) 2012 Texas Instruments + * Copyright (C) 2012 Canonical Ltd + * Authors: + * Rob Clark + * Maarten Lankhorst + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __LINUX_SEQNO_FENCE_H +#define __LINUX_SEQNO_FENCE_H + +#include +#include + +enum seqno_fence_condition { + SEQNO_FENCE_WAIT_GEQUAL, + SEQNO_FENCE_WAIT_NONZERO +}; + +struct seqno_fence { + struct dma_fence base; + + const struct dma_fence_ops *ops; + struct dma_buf *sync_buf; + uint32_t seqno_ofs; + enum seqno_fence_condition condition; +}; + +extern const struct dma_fence_ops seqno_fence_ops; + +/** + * to_seqno_fence - cast a fence to a seqno_fence + * @fence: fence to cast to a seqno_fence + * + * Returns NULL if the fence is not a seqno_fence, + * or the seqno_fence otherwise. + */ +static inline struct seqno_fence * +to_seqno_fence(struct dma_fence *fence) +{ + if (fence->ops != &seqno_fence_ops) + return NULL; + return container_of(fence, struct seqno_fence, base); +} + +/** + * seqno_fence_init - initialize a seqno fence + * @fence: seqno_fence to initialize + * @lock: pointer to spinlock to use for fence + * @sync_buf: buffer containing the memory location to signal on + * @context: the execution context this fence is a part of + * @seqno_ofs: the offset within @sync_buf + * @seqno: the sequence # to signal on + * @cond: fence wait condition + * @ops: the fence_ops for operations on this seqno fence + * + * This function initializes a struct seqno_fence with passed parameters, + * and takes a reference on sync_buf which is released on fence destruction. + * + * A seqno_fence is a dma_fence which can complete in software when + * enable_signaling is called, but it also completes when + * (s32)((sync_buf)[seqno_ofs] - seqno) >= 0 is true + * + * The seqno_fence will take a refcount on the sync_buf until it's + * destroyed, but actual lifetime of sync_buf may be longer if one of the + * callers take a reference to it. + * + * Certain hardware have instructions to insert this type of wait condition + * in the command stream, so no intervention from software would be needed. + * This type of fence can be destroyed before completed, however a reference + * on the sync_buf dma-buf can be taken. It is encouraged to re-use the same + * dma-buf for sync_buf, since mapping or unmapping the sync_buf to the + * device's vm can be expensive. + * + * It is recommended for creators of seqno_fence to call dma_fence_signal() + * before destruction. This will prevent possible issues from wraparound at + * time of issue vs time of check, since users can check dma_fence_is_signaled() + * before submitting instructions for the hardware to wait on the fence. + * However, when ops.enable_signaling is not called, it doesn't have to be + * done as soon as possible, just before there's any real danger of seqno + * wraparound. + */ +static inline void +seqno_fence_init(struct seqno_fence *fence, spinlock_t *lock, + struct dma_buf *sync_buf, uint32_t context, + uint32_t seqno_ofs, uint32_t seqno, + enum seqno_fence_condition cond, + const struct dma_fence_ops *ops) +{ + BUG_ON(!fence || !sync_buf || !ops); + BUG_ON(!ops->wait || !ops->enable_signaling || + !ops->get_driver_name || !ops->get_timeline_name); + + /* + * ops is used in dma_fence_init for get_driver_name, so needs to be + * initialized first + */ + fence->ops = ops; + dma_fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno); + get_dma_buf(sync_buf); + fence->sync_buf = sync_buf; + fence->seqno_ofs = seqno_ofs; + fence->condition = cond; +} + +#endif /* __LINUX_SEQNO_FENCE_H */ diff --git a/include/linux/serdev.h b/include/linux/serdev.h new file mode 100644 index 000000000..f153b2c7f --- /dev/null +++ b/include/linux/serdev.h @@ -0,0 +1,338 @@ +/* + * Copyright (C) 2016-2017 Linaro Ltd., Rob Herring + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _LINUX_SERDEV_H +#define _LINUX_SERDEV_H + +#include +#include +#include +#include + +struct serdev_controller; +struct serdev_device; + +/* + * serdev device structures + */ + +/** + * struct serdev_device_ops - Callback operations for a serdev device + * @receive_buf: Function called with data received from device; + * returns number of bytes accepted; may sleep. + * @write_wakeup: Function called when ready to transmit more data; must + * not sleep. + */ +struct serdev_device_ops { + int (*receive_buf)(struct serdev_device *, const unsigned char *, size_t); + void (*write_wakeup)(struct serdev_device *); +}; + +/** + * struct serdev_device - Basic representation of an serdev device + * @dev: Driver model representation of the device. + * @nr: Device number on serdev bus. + * @ctrl: serdev controller managing this device. + * @ops: Device operations. + * @write_comp Completion used by serdev_device_write() internally + * @write_lock Lock to serialize access when writing data + */ +struct serdev_device { + struct device dev; + int nr; + struct serdev_controller *ctrl; + const struct serdev_device_ops *ops; + struct completion write_comp; + struct mutex write_lock; +}; + +static inline struct serdev_device *to_serdev_device(struct device *d) +{ + return container_of(d, struct serdev_device, dev); +} + +/** + * struct serdev_device_driver - serdev slave device driver + * @driver: serdev device drivers should initialize name field of this + * structure. + * @probe: binds this driver to a serdev device. + * @remove: unbinds this driver from the serdev device. + */ +struct serdev_device_driver { + struct device_driver driver; + int (*probe)(struct serdev_device *); + void (*remove)(struct serdev_device *); +}; + +static inline struct serdev_device_driver *to_serdev_device_driver(struct device_driver *d) +{ + return container_of(d, struct serdev_device_driver, driver); +} + +enum serdev_parity { + SERDEV_PARITY_NONE, + SERDEV_PARITY_EVEN, + SERDEV_PARITY_ODD, +}; + +/* + * serdev controller structures + */ +struct serdev_controller_ops { + int (*write_buf)(struct serdev_controller *, const unsigned char *, size_t); + void (*write_flush)(struct serdev_controller *); + int (*write_room)(struct serdev_controller *); + int (*open)(struct serdev_controller *); + void (*close)(struct serdev_controller *); + void (*set_flow_control)(struct serdev_controller *, bool); + int (*set_parity)(struct serdev_controller *, enum serdev_parity); + unsigned int (*set_baudrate)(struct serdev_controller *, unsigned int); + void (*wait_until_sent)(struct serdev_controller *, long); + int (*get_tiocm)(struct serdev_controller *); + int (*set_tiocm)(struct serdev_controller *, unsigned int, unsigned int); +}; + +/** + * struct serdev_controller - interface to the serdev controller + * @dev: Driver model representation of the device. + * @nr: number identifier for this controller/bus. + * @serdev: Pointer to slave device for this controller. + * @ops: Controller operations. + */ +struct serdev_controller { + struct device dev; + unsigned int nr; + struct serdev_device *serdev; + const struct serdev_controller_ops *ops; +}; + +static inline struct serdev_controller *to_serdev_controller(struct device *d) +{ + return container_of(d, struct serdev_controller, dev); +} + +static inline void *serdev_device_get_drvdata(const struct serdev_device *serdev) +{ + return dev_get_drvdata(&serdev->dev); +} + +static inline void serdev_device_set_drvdata(struct serdev_device *serdev, void *data) +{ + dev_set_drvdata(&serdev->dev, data); +} + +/** + * serdev_device_put() - decrement serdev device refcount + * @serdev serdev device. + */ +static inline void serdev_device_put(struct serdev_device *serdev) +{ + if (serdev) + put_device(&serdev->dev); +} + +static inline void serdev_device_set_client_ops(struct serdev_device *serdev, + const struct serdev_device_ops *ops) +{ + serdev->ops = ops; +} + +static inline +void *serdev_controller_get_drvdata(const struct serdev_controller *ctrl) +{ + return ctrl ? dev_get_drvdata(&ctrl->dev) : NULL; +} + +static inline void serdev_controller_set_drvdata(struct serdev_controller *ctrl, + void *data) +{ + dev_set_drvdata(&ctrl->dev, data); +} + +/** + * serdev_controller_put() - decrement controller refcount + * @ctrl serdev controller. + */ +static inline void serdev_controller_put(struct serdev_controller *ctrl) +{ + if (ctrl) + put_device(&ctrl->dev); +} + +struct serdev_device *serdev_device_alloc(struct serdev_controller *); +int serdev_device_add(struct serdev_device *); +void serdev_device_remove(struct serdev_device *); + +struct serdev_controller *serdev_controller_alloc(struct device *, size_t); +int serdev_controller_add(struct serdev_controller *); +void serdev_controller_remove(struct serdev_controller *); + +static inline void serdev_controller_write_wakeup(struct serdev_controller *ctrl) +{ + struct serdev_device *serdev = ctrl->serdev; + + if (!serdev || !serdev->ops->write_wakeup) + return; + + serdev->ops->write_wakeup(serdev); +} + +static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl, + const unsigned char *data, + size_t count) +{ + struct serdev_device *serdev = ctrl->serdev; + + if (!serdev || !serdev->ops->receive_buf) + return 0; + + return serdev->ops->receive_buf(serdev, data, count); +} + +#if IS_ENABLED(CONFIG_SERIAL_DEV_BUS) + +int serdev_device_open(struct serdev_device *); +void serdev_device_close(struct serdev_device *); +int devm_serdev_device_open(struct device *, struct serdev_device *); +unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int); +void serdev_device_set_flow_control(struct serdev_device *, bool); +int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t); +void serdev_device_wait_until_sent(struct serdev_device *, long); +int serdev_device_get_tiocm(struct serdev_device *); +int serdev_device_set_tiocm(struct serdev_device *, int, int); +void serdev_device_write_wakeup(struct serdev_device *); +int serdev_device_write(struct serdev_device *, const unsigned char *, size_t, unsigned long); +void serdev_device_write_flush(struct serdev_device *); +int serdev_device_write_room(struct serdev_device *); + +/* + * serdev device driver functions + */ +int __serdev_device_driver_register(struct serdev_device_driver *, struct module *); +#define serdev_device_driver_register(sdrv) \ + __serdev_device_driver_register(sdrv, THIS_MODULE) + +/** + * serdev_device_driver_unregister() - unregister an serdev client driver + * @sdrv: the driver to unregister + */ +static inline void serdev_device_driver_unregister(struct serdev_device_driver *sdrv) +{ + if (sdrv) + driver_unregister(&sdrv->driver); +} + +#define module_serdev_device_driver(__serdev_device_driver) \ + module_driver(__serdev_device_driver, serdev_device_driver_register, \ + serdev_device_driver_unregister) + +#else + +static inline int serdev_device_open(struct serdev_device *sdev) +{ + return -ENODEV; +} +static inline void serdev_device_close(struct serdev_device *sdev) {} +static inline unsigned int serdev_device_set_baudrate(struct serdev_device *sdev, unsigned int baudrate) +{ + return 0; +} +static inline void serdev_device_set_flow_control(struct serdev_device *sdev, bool enable) {} +static inline int serdev_device_write_buf(struct serdev_device *serdev, + const unsigned char *buf, + size_t count) +{ + return -ENODEV; +} +static inline void serdev_device_wait_until_sent(struct serdev_device *sdev, long timeout) {} +static inline int serdev_device_get_tiocm(struct serdev_device *serdev) +{ + return -ENOTSUPP; +} +static inline int serdev_device_set_tiocm(struct serdev_device *serdev, int set, int clear) +{ + return -ENOTSUPP; +} +static inline int serdev_device_write(struct serdev_device *sdev, const unsigned char *buf, + size_t count, unsigned long timeout) +{ + return -ENODEV; +} +static inline void serdev_device_write_flush(struct serdev_device *sdev) {} +static inline int serdev_device_write_room(struct serdev_device *sdev) +{ + return 0; +} + +#define serdev_device_driver_register(x) +#define serdev_device_driver_unregister(x) + +#endif /* CONFIG_SERIAL_DEV_BUS */ + +static inline bool serdev_device_get_cts(struct serdev_device *serdev) +{ + int status = serdev_device_get_tiocm(serdev); + return !!(status & TIOCM_CTS); +} + +static inline int serdev_device_wait_for_cts(struct serdev_device *serdev, bool state, int timeout_ms) +{ + unsigned long timeout; + bool signal; + + timeout = jiffies + msecs_to_jiffies(timeout_ms); + while (time_is_after_jiffies(timeout)) { + signal = serdev_device_get_cts(serdev); + if (signal == state) + return 0; + usleep_range(1000, 2000); + } + + return -ETIMEDOUT; +} + +static inline int serdev_device_set_rts(struct serdev_device *serdev, bool enable) +{ + if (enable) + return serdev_device_set_tiocm(serdev, TIOCM_RTS, 0); + else + return serdev_device_set_tiocm(serdev, 0, TIOCM_RTS); +} + +int serdev_device_set_parity(struct serdev_device *serdev, + enum serdev_parity parity); + +/* + * serdev hooks into TTY core + */ +struct tty_port; +struct tty_driver; + +#ifdef CONFIG_SERIAL_DEV_CTRL_TTYPORT +struct device *serdev_tty_port_register(struct tty_port *port, + struct device *parent, + struct tty_driver *drv, int idx); +int serdev_tty_port_unregister(struct tty_port *port); +#else +static inline struct device *serdev_tty_port_register(struct tty_port *port, + struct device *parent, + struct tty_driver *drv, int idx) +{ + return ERR_PTR(-ENODEV); +} +static inline int serdev_tty_port_unregister(struct tty_port *port) +{ + return -ENODEV; +} +#endif /* CONFIG_SERIAL_DEV_CTRL_TTYPORT */ + +#endif /*_LINUX_SERDEV_H */ diff --git a/include/linux/serial.h b/include/linux/serial.h new file mode 100644 index 000000000..0916107c7 --- /dev/null +++ b/include/linux/serial.h @@ -0,0 +1,33 @@ +/* + * include/linux/serial.h + * + * Copyright (C) 1992 by Theodore Ts'o. + * + * Redistribution of this file is permitted under the terms of the GNU + * Public License (GPL) + */ +#ifndef _LINUX_SERIAL_H +#define _LINUX_SERIAL_H + +#include +#include + + +/* + * Counters of the input lines (CTS, DSR, RI, CD) interrupts + */ + +struct async_icount { + __u32 cts, dsr, rng, dcd, tx, rx; + __u32 frame, parity, overrun, brk; + __u32 buf_overrun; +}; + +/* + * The size of the serial xmit buffer is 1 page, or 4096 bytes + */ +#define SERIAL_XMIT_SIZE PAGE_SIZE + +#include + +#endif /* _LINUX_SERIAL_H */ diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h new file mode 100644 index 000000000..5a655ba8d --- /dev/null +++ b/include/linux/serial_8250.h @@ -0,0 +1,186 @@ +/* + * linux/include/linux/serial_8250.h + * + * Copyright (C) 2004 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef _LINUX_SERIAL_8250_H +#define _LINUX_SERIAL_8250_H + +#include +#include +#include + +/* + * This is the platform device platform_data structure + */ +struct plat_serial8250_port { + unsigned long iobase; /* io base address */ + void __iomem *membase; /* ioremap cookie or NULL */ + resource_size_t mapbase; /* resource base */ + unsigned int irq; /* interrupt number */ + unsigned long irqflags; /* request_irq flags */ + unsigned int uartclk; /* UART clock rate */ + void *private_data; + unsigned char regshift; /* register shift */ + unsigned char iotype; /* UPIO_* */ + unsigned char hub6; + upf_t flags; /* UPF_* flags */ + unsigned int type; /* If UPF_FIXED_TYPE */ + unsigned int (*serial_in)(struct uart_port *, int); + void (*serial_out)(struct uart_port *, int, int); + void (*set_termios)(struct uart_port *, + struct ktermios *new, + struct ktermios *old); + void (*set_ldisc)(struct uart_port *, + struct ktermios *); + unsigned int (*get_mctrl)(struct uart_port *); + int (*handle_irq)(struct uart_port *); + void (*pm)(struct uart_port *, unsigned int state, + unsigned old); + void (*handle_break)(struct uart_port *); +}; + +/* + * Allocate 8250 platform device IDs. Nothing is implied by + * the numbering here, except for the legacy entry being -1. + */ +enum { + PLAT8250_DEV_LEGACY = -1, + PLAT8250_DEV_PLATFORM, + PLAT8250_DEV_PLATFORM1, + PLAT8250_DEV_PLATFORM2, + PLAT8250_DEV_FOURPORT, + PLAT8250_DEV_ACCENT, + PLAT8250_DEV_BOCA, + PLAT8250_DEV_EXAR_ST16C554, + PLAT8250_DEV_HUB6, + PLAT8250_DEV_AU1X00, + PLAT8250_DEV_SM501, +}; + +struct uart_8250_dma; +struct uart_8250_port; + +/** + * 8250 core driver operations + * + * @setup_irq() Setup irq handling. The universal 8250 driver links this + * port to the irq chain. Other drivers may @request_irq(). + * @release_irq() Undo irq handling. The universal 8250 driver unlinks + * the port from the irq chain. + */ +struct uart_8250_ops { + int (*setup_irq)(struct uart_8250_port *); + void (*release_irq)(struct uart_8250_port *); +}; + +struct uart_8250_em485 { + struct hrtimer start_tx_timer; /* "rs485 start tx" timer */ + struct hrtimer stop_tx_timer; /* "rs485 stop tx" timer */ + struct hrtimer *active_timer; /* pointer to active timer */ + struct uart_8250_port *port; /* for hrtimer callbacks */ +}; + +/* + * This should be used by drivers which want to register + * their own 8250 ports without registering their own + * platform device. Using these will make your driver + * dependent on the 8250 driver. + */ + +struct uart_8250_port { + struct uart_port port; + struct timer_list timer; /* "no irq" timer */ + struct list_head list; /* ports on this IRQ */ + u32 capabilities; /* port capabilities */ + unsigned short bugs; /* port bugs */ + bool fifo_bug; /* min RX trigger if enabled */ + unsigned int tx_loadsz; /* transmit fifo load size */ + unsigned char acr; + unsigned char fcr; + unsigned char ier; + unsigned char lcr; + unsigned char mcr; + unsigned char mcr_mask; /* mask of user bits */ + unsigned char mcr_force; /* mask of forced bits */ + unsigned char cur_iotype; /* Running I/O type */ + unsigned int rpm_tx_active; + unsigned char canary; /* non-zero during system sleep + * if no_console_suspend + */ + unsigned char probe; +#define UART_PROBE_RSA (1 << 0) + + /* + * Some bits in registers are cleared on a read, so they must + * be saved whenever the register is read but the bits will not + * be immediately processed. + */ +#define LSR_SAVE_FLAGS UART_LSR_BRK_ERROR_BITS + unsigned char lsr_saved_flags; +#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA + unsigned char msr_saved_flags; + + struct uart_8250_dma *dma; + const struct uart_8250_ops *ops; + + /* 8250 specific callbacks */ + int (*dl_read)(struct uart_8250_port *); + void (*dl_write)(struct uart_8250_port *, int); + + struct uart_8250_em485 *em485; + + /* Serial port overrun backoff */ + struct delayed_work overrun_backoff; + u32 overrun_backoff_time_ms; +}; + +static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up) +{ + return container_of(up, struct uart_8250_port, port); +} + +int serial8250_register_8250_port(struct uart_8250_port *); +void serial8250_unregister_port(int line); +void serial8250_suspend_port(int line); +void serial8250_resume_port(int line); + +extern int early_serial_setup(struct uart_port *port); + +extern int early_serial8250_setup(struct earlycon_device *device, + const char *options); +extern void serial8250_do_set_termios(struct uart_port *port, + struct ktermios *termios, struct ktermios *old); +extern void serial8250_do_set_ldisc(struct uart_port *port, + struct ktermios *termios); +extern unsigned int serial8250_do_get_mctrl(struct uart_port *port); +extern int serial8250_do_startup(struct uart_port *port); +extern void serial8250_do_shutdown(struct uart_port *port); +extern void serial8250_do_pm(struct uart_port *port, unsigned int state, + unsigned int oldstate); +extern void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl); +extern void serial8250_do_set_divisor(struct uart_port *port, unsigned int baud, + unsigned int quot, + unsigned int quot_frac); +extern int fsl8250_handle_irq(struct uart_port *port); +int serial8250_handle_irq(struct uart_port *port, unsigned int iir); +unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr); +void serial8250_read_char(struct uart_8250_port *up, unsigned char lsr); +void serial8250_tx_chars(struct uart_8250_port *up); +unsigned int serial8250_modem_status(struct uart_8250_port *up); +void serial8250_init_port(struct uart_8250_port *up); +void serial8250_set_defaults(struct uart_8250_port *up); +void serial8250_console_write(struct uart_8250_port *up, const char *s, + unsigned int count); +int serial8250_console_setup(struct uart_port *port, char *options, bool probe); + +extern void serial8250_set_isa_configurator(void (*v) + (int port, struct uart_port *up, + u32 *capabilities)); + +#endif diff --git a/include/linux/serial_bcm63xx.h b/include/linux/serial_bcm63xx.h new file mode 100644 index 000000000..b5e48ef89 --- /dev/null +++ b/include/linux/serial_bcm63xx.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SERIAL_BCM63XX_H +#define _LINUX_SERIAL_BCM63XX_H + +/* UART Control Register */ +#define UART_CTL_REG 0x0 +#define UART_CTL_RXTMOUTCNT_SHIFT 0 +#define UART_CTL_RXTMOUTCNT_MASK (0x1f << UART_CTL_RXTMOUTCNT_SHIFT) +#define UART_CTL_RSTTXDN_SHIFT 5 +#define UART_CTL_RSTTXDN_MASK (1 << UART_CTL_RSTTXDN_SHIFT) +#define UART_CTL_RSTRXFIFO_SHIFT 6 +#define UART_CTL_RSTRXFIFO_MASK (1 << UART_CTL_RSTRXFIFO_SHIFT) +#define UART_CTL_RSTTXFIFO_SHIFT 7 +#define UART_CTL_RSTTXFIFO_MASK (1 << UART_CTL_RSTTXFIFO_SHIFT) +#define UART_CTL_STOPBITS_SHIFT 8 +#define UART_CTL_STOPBITS_MASK (0xf << UART_CTL_STOPBITS_SHIFT) +#define UART_CTL_STOPBITS_1 (0x7 << UART_CTL_STOPBITS_SHIFT) +#define UART_CTL_STOPBITS_2 (0xf << UART_CTL_STOPBITS_SHIFT) +#define UART_CTL_BITSPERSYM_SHIFT 12 +#define UART_CTL_BITSPERSYM_MASK (0x3 << UART_CTL_BITSPERSYM_SHIFT) +#define UART_CTL_XMITBRK_SHIFT 14 +#define UART_CTL_XMITBRK_MASK (1 << UART_CTL_XMITBRK_SHIFT) +#define UART_CTL_RSVD_SHIFT 15 +#define UART_CTL_RSVD_MASK (1 << UART_CTL_RSVD_SHIFT) +#define UART_CTL_RXPAREVEN_SHIFT 16 +#define UART_CTL_RXPAREVEN_MASK (1 << UART_CTL_RXPAREVEN_SHIFT) +#define UART_CTL_RXPAREN_SHIFT 17 +#define UART_CTL_RXPAREN_MASK (1 << UART_CTL_RXPAREN_SHIFT) +#define UART_CTL_TXPAREVEN_SHIFT 18 +#define UART_CTL_TXPAREVEN_MASK (1 << UART_CTL_TXPAREVEN_SHIFT) +#define UART_CTL_TXPAREN_SHIFT 18 +#define UART_CTL_TXPAREN_MASK (1 << UART_CTL_TXPAREN_SHIFT) +#define UART_CTL_LOOPBACK_SHIFT 20 +#define UART_CTL_LOOPBACK_MASK (1 << UART_CTL_LOOPBACK_SHIFT) +#define UART_CTL_RXEN_SHIFT 21 +#define UART_CTL_RXEN_MASK (1 << UART_CTL_RXEN_SHIFT) +#define UART_CTL_TXEN_SHIFT 22 +#define UART_CTL_TXEN_MASK (1 << UART_CTL_TXEN_SHIFT) +#define UART_CTL_BRGEN_SHIFT 23 +#define UART_CTL_BRGEN_MASK (1 << UART_CTL_BRGEN_SHIFT) + +/* UART Baudword register */ +#define UART_BAUD_REG 0x4 + +/* UART Misc Control register */ +#define UART_MCTL_REG 0x8 +#define UART_MCTL_DTR_SHIFT 0 +#define UART_MCTL_DTR_MASK (1 << UART_MCTL_DTR_SHIFT) +#define UART_MCTL_RTS_SHIFT 1 +#define UART_MCTL_RTS_MASK (1 << UART_MCTL_RTS_SHIFT) +#define UART_MCTL_RXFIFOTHRESH_SHIFT 8 +#define UART_MCTL_RXFIFOTHRESH_MASK (0xf << UART_MCTL_RXFIFOTHRESH_SHIFT) +#define UART_MCTL_TXFIFOTHRESH_SHIFT 12 +#define UART_MCTL_TXFIFOTHRESH_MASK (0xf << UART_MCTL_TXFIFOTHRESH_SHIFT) +#define UART_MCTL_RXFIFOFILL_SHIFT 16 +#define UART_MCTL_RXFIFOFILL_MASK (0x1f << UART_MCTL_RXFIFOFILL_SHIFT) +#define UART_MCTL_TXFIFOFILL_SHIFT 24 +#define UART_MCTL_TXFIFOFILL_MASK (0x1f << UART_MCTL_TXFIFOFILL_SHIFT) + +/* UART External Input Configuration register */ +#define UART_EXTINP_REG 0xc +#define UART_EXTINP_RI_SHIFT 0 +#define UART_EXTINP_RI_MASK (1 << UART_EXTINP_RI_SHIFT) +#define UART_EXTINP_CTS_SHIFT 1 +#define UART_EXTINP_CTS_MASK (1 << UART_EXTINP_CTS_SHIFT) +#define UART_EXTINP_DCD_SHIFT 2 +#define UART_EXTINP_DCD_MASK (1 << UART_EXTINP_DCD_SHIFT) +#define UART_EXTINP_DSR_SHIFT 3 +#define UART_EXTINP_DSR_MASK (1 << UART_EXTINP_DSR_SHIFT) +#define UART_EXTINP_IRSTAT(x) (1 << (x + 4)) +#define UART_EXTINP_IRMASK(x) (1 << (x + 8)) +#define UART_EXTINP_IR_RI 0 +#define UART_EXTINP_IR_CTS 1 +#define UART_EXTINP_IR_DCD 2 +#define UART_EXTINP_IR_DSR 3 +#define UART_EXTINP_RI_NOSENSE_SHIFT 16 +#define UART_EXTINP_RI_NOSENSE_MASK (1 << UART_EXTINP_RI_NOSENSE_SHIFT) +#define UART_EXTINP_CTS_NOSENSE_SHIFT 17 +#define UART_EXTINP_CTS_NOSENSE_MASK (1 << UART_EXTINP_CTS_NOSENSE_SHIFT) +#define UART_EXTINP_DCD_NOSENSE_SHIFT 18 +#define UART_EXTINP_DCD_NOSENSE_MASK (1 << UART_EXTINP_DCD_NOSENSE_SHIFT) +#define UART_EXTINP_DSR_NOSENSE_SHIFT 19 +#define UART_EXTINP_DSR_NOSENSE_MASK (1 << UART_EXTINP_DSR_NOSENSE_SHIFT) + +/* UART Interrupt register */ +#define UART_IR_REG 0x10 +#define UART_IR_MASK(x) (1 << (x + 16)) +#define UART_IR_STAT(x) (1 << (x)) +#define UART_IR_EXTIP 0 +#define UART_IR_TXUNDER 1 +#define UART_IR_TXOVER 2 +#define UART_IR_TXTRESH 3 +#define UART_IR_TXRDLATCH 4 +#define UART_IR_TXEMPTY 5 +#define UART_IR_RXUNDER 6 +#define UART_IR_RXOVER 7 +#define UART_IR_RXTIMEOUT 8 +#define UART_IR_RXFULL 9 +#define UART_IR_RXTHRESH 10 +#define UART_IR_RXNOTEMPTY 11 +#define UART_IR_RXFRAMEERR 12 +#define UART_IR_RXPARERR 13 +#define UART_IR_RXBRK 14 +#define UART_IR_TXDONE 15 + +/* UART Fifo register */ +#define UART_FIFO_REG 0x14 +#define UART_FIFO_VALID_SHIFT 0 +#define UART_FIFO_VALID_MASK 0xff +#define UART_FIFO_FRAMEERR_SHIFT 8 +#define UART_FIFO_FRAMEERR_MASK (1 << UART_FIFO_FRAMEERR_SHIFT) +#define UART_FIFO_PARERR_SHIFT 9 +#define UART_FIFO_PARERR_MASK (1 << UART_FIFO_PARERR_SHIFT) +#define UART_FIFO_BRKDET_SHIFT 10 +#define UART_FIFO_BRKDET_MASK (1 << UART_FIFO_BRKDET_SHIFT) +#define UART_FIFO_ANYERR_MASK (UART_FIFO_FRAMEERR_MASK | \ + UART_FIFO_PARERR_MASK | \ + UART_FIFO_BRKDET_MASK) + +#endif /* _LINUX_SERIAL_BCM63XX_H */ diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h new file mode 100644 index 000000000..3460b15a2 --- /dev/null +++ b/include/linux/serial_core.h @@ -0,0 +1,556 @@ +/* + * linux/drivers/char/serial_core.h + * + * Copyright (C) 2000 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef LINUX_SERIAL_CORE_H +#define LINUX_SERIAL_CORE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_SERIAL_CORE_CONSOLE +#define uart_console(port) \ + ((port)->cons && (port)->cons->index == (port)->line) +#else +#define uart_console(port) ({ (void)port; 0; }) +#endif + +struct uart_port; +struct serial_struct; +struct device; + +/* + * This structure describes all the operations that can be done on the + * physical hardware. See Documentation/serial/driver for details. + */ +struct uart_ops { + unsigned int (*tx_empty)(struct uart_port *); + void (*set_mctrl)(struct uart_port *, unsigned int mctrl); + unsigned int (*get_mctrl)(struct uart_port *); + void (*stop_tx)(struct uart_port *); + void (*start_tx)(struct uart_port *); + void (*throttle)(struct uart_port *); + void (*unthrottle)(struct uart_port *); + void (*send_xchar)(struct uart_port *, char ch); + void (*stop_rx)(struct uart_port *); + void (*enable_ms)(struct uart_port *); + void (*break_ctl)(struct uart_port *, int ctl); + int (*startup)(struct uart_port *); + void (*shutdown)(struct uart_port *); + void (*flush_buffer)(struct uart_port *); + void (*set_termios)(struct uart_port *, struct ktermios *new, + struct ktermios *old); + void (*set_ldisc)(struct uart_port *, struct ktermios *); + void (*pm)(struct uart_port *, unsigned int state, + unsigned int oldstate); + + /* + * Return a string describing the type of the port + */ + const char *(*type)(struct uart_port *); + + /* + * Release IO and memory resources used by the port. + * This includes iounmap if necessary. + */ + void (*release_port)(struct uart_port *); + + /* + * Request IO and memory resources used by the port. + * This includes iomapping the port if necessary. + */ + int (*request_port)(struct uart_port *); + void (*config_port)(struct uart_port *, int); + int (*verify_port)(struct uart_port *, struct serial_struct *); + int (*ioctl)(struct uart_port *, unsigned int, unsigned long); +#ifdef CONFIG_CONSOLE_POLL + int (*poll_init)(struct uart_port *); + void (*poll_put_char)(struct uart_port *, unsigned char); + int (*poll_get_char)(struct uart_port *); +#endif +}; + +#define NO_POLL_CHAR 0x00ff0000 +#define UART_CONFIG_TYPE (1 << 0) +#define UART_CONFIG_IRQ (1 << 1) + +struct uart_icount { + __u32 cts; + __u32 dsr; + __u32 rng; + __u32 dcd; + __u32 rx; + __u32 tx; + __u32 frame; + __u32 overrun; + __u32 parity; + __u32 brk; + __u32 buf_overrun; +}; + +typedef unsigned int __bitwise upf_t; +typedef unsigned int __bitwise upstat_t; + +struct uart_port { + spinlock_t lock; /* port lock */ + unsigned long iobase; /* in/out[bwl] */ + unsigned char __iomem *membase; /* read/write[bwl] */ + unsigned int (*serial_in)(struct uart_port *, int); + void (*serial_out)(struct uart_port *, int, int); + void (*set_termios)(struct uart_port *, + struct ktermios *new, + struct ktermios *old); + void (*set_ldisc)(struct uart_port *, + struct ktermios *); + unsigned int (*get_mctrl)(struct uart_port *); + void (*set_mctrl)(struct uart_port *, unsigned int); + unsigned int (*get_divisor)(struct uart_port *, + unsigned int baud, + unsigned int *frac); + void (*set_divisor)(struct uart_port *, + unsigned int baud, + unsigned int quot, + unsigned int quot_frac); + int (*startup)(struct uart_port *port); + void (*shutdown)(struct uart_port *port); + void (*throttle)(struct uart_port *port); + void (*unthrottle)(struct uart_port *port); + int (*handle_irq)(struct uart_port *); + void (*pm)(struct uart_port *, unsigned int state, + unsigned int old); + void (*handle_break)(struct uart_port *); + int (*rs485_config)(struct uart_port *, + struct serial_rs485 *rs485); + unsigned int irq; /* irq number */ + unsigned long irqflags; /* irq flags */ + unsigned int uartclk; /* base uart clock */ + unsigned int fifosize; /* tx fifo size */ + unsigned char x_char; /* xon/xoff char */ + unsigned char regshift; /* reg offset shift */ + unsigned char iotype; /* io access style */ + unsigned char quirks; /* internal quirks */ + +#define UPIO_PORT (SERIAL_IO_PORT) /* 8b I/O port access */ +#define UPIO_HUB6 (SERIAL_IO_HUB6) /* Hub6 ISA card */ +#define UPIO_MEM (SERIAL_IO_MEM) /* driver-specific */ +#define UPIO_MEM32 (SERIAL_IO_MEM32) /* 32b little endian */ +#define UPIO_AU (SERIAL_IO_AU) /* Au1x00 and RT288x type IO */ +#define UPIO_TSI (SERIAL_IO_TSI) /* Tsi108/109 type IO */ +#define UPIO_MEM32BE (SERIAL_IO_MEM32BE) /* 32b big endian */ +#define UPIO_MEM16 (SERIAL_IO_MEM16) /* 16b little endian */ + + /* quirks must be updated while holding port mutex */ +#define UPQ_NO_TXEN_TEST BIT(0) + + unsigned int read_status_mask; /* driver specific */ + unsigned int ignore_status_mask; /* driver specific */ + struct uart_state *state; /* pointer to parent state */ + struct uart_icount icount; /* statistics */ + + struct console *cons; /* struct console, if any */ +#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(SUPPORT_SYSRQ) + unsigned long sysrq; /* sysrq timeout */ + unsigned int sysrq_ch; /* char for sysrq */ +#endif + + /* flags must be updated while holding port mutex */ + upf_t flags; + + /* + * These flags must be equivalent to the flags defined in + * include/uapi/linux/tty_flags.h which are the userspace definitions + * assigned from the serial_struct flags in uart_set_info() + * [for bit definitions in the UPF_CHANGE_MASK] + * + * Bits [0..UPF_LAST_USER] are userspace defined/visible/changeable + * The remaining bits are serial-core specific and not modifiable by + * userspace. + */ +#define UPF_FOURPORT ((__force upf_t) ASYNC_FOURPORT /* 1 */ ) +#define UPF_SAK ((__force upf_t) ASYNC_SAK /* 2 */ ) +#define UPF_SPD_HI ((__force upf_t) ASYNC_SPD_HI /* 4 */ ) +#define UPF_SPD_VHI ((__force upf_t) ASYNC_SPD_VHI /* 5 */ ) +#define UPF_SPD_CUST ((__force upf_t) ASYNC_SPD_CUST /* 0x0030 */ ) +#define UPF_SPD_WARP ((__force upf_t) ASYNC_SPD_WARP /* 0x1010 */ ) +#define UPF_SPD_MASK ((__force upf_t) ASYNC_SPD_MASK /* 0x1030 */ ) +#define UPF_SKIP_TEST ((__force upf_t) ASYNC_SKIP_TEST /* 6 */ ) +#define UPF_AUTO_IRQ ((__force upf_t) ASYNC_AUTO_IRQ /* 7 */ ) +#define UPF_HARDPPS_CD ((__force upf_t) ASYNC_HARDPPS_CD /* 11 */ ) +#define UPF_SPD_SHI ((__force upf_t) ASYNC_SPD_SHI /* 12 */ ) +#define UPF_LOW_LATENCY ((__force upf_t) ASYNC_LOW_LATENCY /* 13 */ ) +#define UPF_BUGGY_UART ((__force upf_t) ASYNC_BUGGY_UART /* 14 */ ) +#define UPF_MAGIC_MULTIPLIER ((__force upf_t) ASYNC_MAGIC_MULTIPLIER /* 16 */ ) + +#define UPF_NO_THRE_TEST ((__force upf_t) (1 << 19)) +/* Port has hardware-assisted h/w flow control */ +#define UPF_AUTO_CTS ((__force upf_t) (1 << 20)) +#define UPF_AUTO_RTS ((__force upf_t) (1 << 21)) +#define UPF_HARD_FLOW ((__force upf_t) (UPF_AUTO_CTS | UPF_AUTO_RTS)) +/* Port has hardware-assisted s/w flow control */ +#define UPF_SOFT_FLOW ((__force upf_t) (1 << 22)) +#define UPF_CONS_FLOW ((__force upf_t) (1 << 23)) +#define UPF_SHARE_IRQ ((__force upf_t) (1 << 24)) +#define UPF_EXAR_EFR ((__force upf_t) (1 << 25)) +#define UPF_BUG_THRE ((__force upf_t) (1 << 26)) +/* The exact UART type is known and should not be probed. */ +#define UPF_FIXED_TYPE ((__force upf_t) (1 << 27)) +#define UPF_BOOT_AUTOCONF ((__force upf_t) (1 << 28)) +#define UPF_FIXED_PORT ((__force upf_t) (1 << 29)) +#define UPF_DEAD ((__force upf_t) (1 << 30)) +#define UPF_IOREMAP ((__force upf_t) (1 << 31)) + +#define __UPF_CHANGE_MASK 0x17fff +#define UPF_CHANGE_MASK ((__force upf_t) __UPF_CHANGE_MASK) +#define UPF_USR_MASK ((__force upf_t) (UPF_SPD_MASK|UPF_LOW_LATENCY)) + +#if __UPF_CHANGE_MASK > ASYNC_FLAGS +#error Change mask not equivalent to userspace-visible bit defines +#endif + + /* + * Must hold termios_rwsem, port mutex and port lock to change; + * can hold any one lock to read. + */ + upstat_t status; + +#define UPSTAT_CTS_ENABLE ((__force upstat_t) (1 << 0)) +#define UPSTAT_DCD_ENABLE ((__force upstat_t) (1 << 1)) +#define UPSTAT_AUTORTS ((__force upstat_t) (1 << 2)) +#define UPSTAT_AUTOCTS ((__force upstat_t) (1 << 3)) +#define UPSTAT_AUTOXOFF ((__force upstat_t) (1 << 4)) +#define UPSTAT_SYNC_FIFO ((__force upstat_t) (1 << 5)) + + int hw_stopped; /* sw-assisted CTS flow state */ + unsigned int mctrl; /* current modem ctrl settings */ + unsigned int timeout; /* character-based timeout */ + unsigned int type; /* port type */ + const struct uart_ops *ops; + unsigned int custom_divisor; + unsigned int line; /* port index */ + unsigned int minor; + resource_size_t mapbase; /* for ioremap */ + resource_size_t mapsize; + struct device *dev; /* parent device */ + unsigned char hub6; /* this should be in the 8250 driver */ + unsigned char suspended; + unsigned char unused[2]; + const char *name; /* port name */ + struct attribute_group *attr_group; /* port specific attributes */ + const struct attribute_group **tty_groups; /* all attributes (serial core use only) */ + struct serial_rs485 rs485; + void *private_data; /* generic platform data pointer */ +}; + +static inline int serial_port_in(struct uart_port *up, int offset) +{ + return up->serial_in(up, offset); +} + +static inline void serial_port_out(struct uart_port *up, int offset, int value) +{ + up->serial_out(up, offset, value); +} + +/** + * enum uart_pm_state - power states for UARTs + * @UART_PM_STATE_ON: UART is powered, up and operational + * @UART_PM_STATE_OFF: UART is powered off + * @UART_PM_STATE_UNDEFINED: sentinel + */ +enum uart_pm_state { + UART_PM_STATE_ON = 0, + UART_PM_STATE_OFF = 3, /* number taken from ACPI */ + UART_PM_STATE_UNDEFINED, +}; + +/* + * This is the state information which is persistent across opens. + */ +struct uart_state { + struct tty_port port; + + enum uart_pm_state pm_state; + struct circ_buf xmit; + + atomic_t refcount; + wait_queue_head_t remove_wait; + struct uart_port *uart_port; +}; + +#define UART_XMIT_SIZE PAGE_SIZE + + +/* number of characters left in xmit buffer before we ask for more */ +#define WAKEUP_CHARS 256 + +struct module; +struct tty_driver; + +struct uart_driver { + struct module *owner; + const char *driver_name; + const char *dev_name; + int major; + int minor; + int nr; + struct console *cons; + + /* + * these are private; the low level driver should not + * touch these; they should be initialised to NULL + */ + struct uart_state *state; + struct tty_driver *tty_driver; +}; + +void uart_write_wakeup(struct uart_port *port); + +/* + * Baud rate helpers. + */ +void uart_update_timeout(struct uart_port *port, unsigned int cflag, + unsigned int baud); +unsigned int uart_get_baud_rate(struct uart_port *port, struct ktermios *termios, + struct ktermios *old, unsigned int min, + unsigned int max); +unsigned int uart_get_divisor(struct uart_port *port, unsigned int baud); + +/* Base timer interval for polling */ +static inline int uart_poll_timeout(struct uart_port *port) +{ + int timeout = port->timeout; + + return timeout > 6 ? (timeout / 2 - 2) : 1; +} + +/* + * Console helpers. + */ +struct earlycon_device { + struct console *con; + struct uart_port port; + char options[16]; /* e.g., 115200n8 */ + unsigned int baud; +}; + +struct earlycon_id { + char name[15]; + char name_term; /* In case compiler didn't '\0' term name */ + char compatible[128]; + int (*setup)(struct earlycon_device *, const char *options); +}; + +extern const struct earlycon_id *__earlycon_table[]; +extern const struct earlycon_id *__earlycon_table_end[]; + +#if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE) +#define EARLYCON_USED_OR_UNUSED __used +#else +#define EARLYCON_USED_OR_UNUSED __maybe_unused +#endif + +#define _OF_EARLYCON_DECLARE(_name, compat, fn, unique_id) \ + static const struct earlycon_id unique_id \ + EARLYCON_USED_OR_UNUSED __initconst \ + = { .name = __stringify(_name), \ + .compatible = compat, \ + .setup = fn }; \ + static const struct earlycon_id EARLYCON_USED_OR_UNUSED \ + __section(__earlycon_table) \ + * const __PASTE(__p, unique_id) = &unique_id + +#define OF_EARLYCON_DECLARE(_name, compat, fn) \ + _OF_EARLYCON_DECLARE(_name, compat, fn, \ + __UNIQUE_ID(__earlycon_##_name)) + +#define EARLYCON_DECLARE(_name, fn) OF_EARLYCON_DECLARE(_name, "", fn) + +extern int of_setup_earlycon(const struct earlycon_id *match, + unsigned long node, + const char *options); + +#ifdef CONFIG_SERIAL_EARLYCON +extern bool earlycon_acpi_spcr_enable __initdata; +int setup_earlycon(char *buf); +#else +static const bool earlycon_acpi_spcr_enable EARLYCON_USED_OR_UNUSED; +static inline int setup_earlycon(char *buf) { return 0; } +#endif + +struct uart_port *uart_get_console(struct uart_port *ports, int nr, + struct console *c); +int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr, + char **options); +void uart_parse_options(const char *options, int *baud, int *parity, int *bits, + int *flow); +int uart_set_options(struct uart_port *port, struct console *co, int baud, + int parity, int bits, int flow); +struct tty_driver *uart_console_device(struct console *co, int *index); +void uart_console_write(struct uart_port *port, const char *s, + unsigned int count, + void (*putchar)(struct uart_port *, int)); + +/* + * Port/driver registration/removal + */ +int uart_register_driver(struct uart_driver *uart); +void uart_unregister_driver(struct uart_driver *uart); +int uart_add_one_port(struct uart_driver *reg, struct uart_port *port); +int uart_remove_one_port(struct uart_driver *reg, struct uart_port *port); +int uart_match_port(struct uart_port *port1, struct uart_port *port2); + +/* + * Power Management + */ +int uart_suspend_port(struct uart_driver *reg, struct uart_port *port); +int uart_resume_port(struct uart_driver *reg, struct uart_port *port); + +#define uart_circ_empty(circ) ((circ)->head == (circ)->tail) +#define uart_circ_clear(circ) ((circ)->head = (circ)->tail = 0) + +#define uart_circ_chars_pending(circ) \ + (CIRC_CNT((circ)->head, (circ)->tail, UART_XMIT_SIZE)) + +#define uart_circ_chars_free(circ) \ + (CIRC_SPACE((circ)->head, (circ)->tail, UART_XMIT_SIZE)) + +static inline int uart_tx_stopped(struct uart_port *port) +{ + struct tty_struct *tty = port->state->port.tty; + if ((tty && tty->stopped) || port->hw_stopped) + return 1; + return 0; +} + +static inline bool uart_cts_enabled(struct uart_port *uport) +{ + return !!(uport->status & UPSTAT_CTS_ENABLE); +} + +static inline bool uart_softcts_mode(struct uart_port *uport) +{ + upstat_t mask = UPSTAT_CTS_ENABLE | UPSTAT_AUTOCTS; + + return ((uport->status & mask) == UPSTAT_CTS_ENABLE); +} + +/* + * The following are helper functions for the low level drivers. + */ + +extern void uart_handle_dcd_change(struct uart_port *uport, + unsigned int status); +extern void uart_handle_cts_change(struct uart_port *uport, + unsigned int status); + +extern void uart_insert_char(struct uart_port *port, unsigned int status, + unsigned int overrun, unsigned int ch, unsigned int flag); + +#if defined(SUPPORT_SYSRQ) && defined(CONFIG_MAGIC_SYSRQ_SERIAL) +static inline int +uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) +{ + if (port->sysrq) { + if (ch && time_before(jiffies, port->sysrq)) { + handle_sysrq(ch); + port->sysrq = 0; + return 1; + } + port->sysrq = 0; + } + return 0; +} +static inline int +uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch) +{ + if (port->sysrq) { + if (ch && time_before(jiffies, port->sysrq)) { + port->sysrq_ch = ch; + port->sysrq = 0; + return 1; + } + port->sysrq = 0; + } + return 0; +} +static inline void +uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags) +{ + int sysrq_ch; + + sysrq_ch = port->sysrq_ch; + port->sysrq_ch = 0; + + spin_unlock_irqrestore(&port->lock, irqflags); + + if (sysrq_ch) + handle_sysrq(sysrq_ch); +} +#else +static inline int +uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) { return 0; } +static inline int +uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch) { return 0; } +static inline void +uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags) +{ + spin_unlock_irqrestore(&port->lock, irqflags); +} +#endif + +/* + * We do the SysRQ and SAK checking like this... + */ +static inline int uart_handle_break(struct uart_port *port) +{ + struct uart_state *state = port->state; + + if (port->handle_break) + port->handle_break(port); + +#ifdef SUPPORT_SYSRQ + if (port->cons && port->cons->index == port->line) { + if (!port->sysrq) { + port->sysrq = jiffies + HZ*5; + return 1; + } + port->sysrq = 0; + } +#endif + if (port->flags & UPF_SAK) + do_SAK(state->port.tty); + return 0; +} + +/* + * UART_ENABLE_MS - determine if port should enable modem status irqs + */ +#define UART_ENABLE_MS(port,cflag) ((port)->flags & UPF_HARDPPS_CD || \ + (cflag) & CRTSCTS || \ + !((cflag) & CLOCAL)) + +void uart_get_rs485_mode(struct device *dev, struct serial_rs485 *rs485conf); +#endif /* LINUX_SERIAL_CORE_H */ diff --git a/include/linux/serial_max3100.h b/include/linux/serial_max3100.h new file mode 100644 index 000000000..4976befb6 --- /dev/null +++ b/include/linux/serial_max3100.h @@ -0,0 +1,52 @@ +/* + * + * Copyright (C) 2007 Christian Pellegrin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + + +#ifndef _LINUX_SERIAL_MAX3100_H +#define _LINUX_SERIAL_MAX3100_H 1 + + +/** + * struct plat_max3100 - MAX3100 SPI UART platform data + * @loopback: force MAX3100 in loopback + * @crystal: 1 for 3.6864 Mhz, 0 for 1.8432 + * @max3100_hw_suspend: MAX3100 has a shutdown pin. This is a hook + * called on suspend and resume to activate it. + * @poll_time: poll time for CTS signal in ms, 0 disables (so no hw + * flow ctrl is possible but you have less CPU usage) + * + * You should use this structure in your machine description to specify + * how the MAX3100 is connected. Example: + * + * static struct plat_max3100 max3100_plat_data = { + * .loopback = 0, + * .crystal = 0, + * .poll_time = 100, + * }; + * + * static struct spi_board_info spi_board_info[] = { + * { + * .modalias = "max3100", + * .platform_data = &max3100_plat_data, + * .irq = IRQ_EINT12, + * .max_speed_hz = 5*1000*1000, + * .chip_select = 0, + * }, + * }; + * + **/ +struct plat_max3100 { + int loopback; + int crystal; + void (*max3100_hw_suspend) (int suspend); + int poll_time; +}; + +#endif diff --git a/include/linux/serial_pnx8xxx.h b/include/linux/serial_pnx8xxx.h new file mode 100644 index 000000000..79ad87b0b --- /dev/null +++ b/include/linux/serial_pnx8xxx.h @@ -0,0 +1,80 @@ +/* + * Embedded Alley Solutions, source@embeddedalley.com. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _LINUX_SERIAL_PNX8XXX_H +#define _LINUX_SERIAL_PNX8XXX_H + +#include + +#define PNX8XXX_NR_PORTS 2 + +struct pnx8xxx_port { + struct uart_port port; + struct timer_list timer; + unsigned int old_status; +}; + +/* register offsets */ +#define PNX8XXX_LCR 0 +#define PNX8XXX_MCR 0x004 +#define PNX8XXX_BAUD 0x008 +#define PNX8XXX_CFG 0x00c +#define PNX8XXX_FIFO 0x028 +#define PNX8XXX_ISTAT 0xfe0 +#define PNX8XXX_IEN 0xfe4 +#define PNX8XXX_ICLR 0xfe8 +#define PNX8XXX_ISET 0xfec +#define PNX8XXX_PD 0xff4 +#define PNX8XXX_MID 0xffc + +#define PNX8XXX_UART_LCR_TXBREAK (1<<30) +#define PNX8XXX_UART_LCR_PAREVN 0x10000000 +#define PNX8XXX_UART_LCR_PAREN 0x08000000 +#define PNX8XXX_UART_LCR_2STOPB 0x04000000 +#define PNX8XXX_UART_LCR_8BIT 0x01000000 +#define PNX8XXX_UART_LCR_TX_RST 0x00040000 +#define PNX8XXX_UART_LCR_RX_RST 0x00020000 +#define PNX8XXX_UART_LCR_RX_NEXT 0x00010000 + +#define PNX8XXX_UART_MCR_SCR 0xFF000000 +#define PNX8XXX_UART_MCR_DCD 0x00800000 +#define PNX8XXX_UART_MCR_CTS 0x00100000 +#define PNX8XXX_UART_MCR_LOOP 0x00000010 +#define PNX8XXX_UART_MCR_RTS 0x00000002 +#define PNX8XXX_UART_MCR_DTR 0x00000001 + +#define PNX8XXX_UART_INT_TX 0x00000080 +#define PNX8XXX_UART_INT_EMPTY 0x00000040 +#define PNX8XXX_UART_INT_RCVTO 0x00000020 +#define PNX8XXX_UART_INT_RX 0x00000010 +#define PNX8XXX_UART_INT_RXOVRN 0x00000008 +#define PNX8XXX_UART_INT_FRERR 0x00000004 +#define PNX8XXX_UART_INT_BREAK 0x00000002 +#define PNX8XXX_UART_INT_PARITY 0x00000001 +#define PNX8XXX_UART_INT_ALLRX 0x0000003F +#define PNX8XXX_UART_INT_ALLTX 0x000000C0 + +#define PNX8XXX_UART_FIFO_TXFIFO 0x001F0000 +#define PNX8XXX_UART_FIFO_TXFIFO_STA (0x1f<<16) +#define PNX8XXX_UART_FIFO_RXBRK 0x00008000 +#define PNX8XXX_UART_FIFO_RXFE 0x00004000 +#define PNX8XXX_UART_FIFO_RXPAR 0x00002000 +#define PNX8XXX_UART_FIFO_RXFIFO 0x00001F00 +#define PNX8XXX_UART_FIFO_RBRTHR 0x000000FF + +#endif diff --git a/include/linux/serial_s3c.h b/include/linux/serial_s3c.h new file mode 100644 index 000000000..463ed28d2 --- /dev/null +++ b/include/linux/serial_s3c.h @@ -0,0 +1,277 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Internal header file for Samsung S3C2410 serial ports (UART0-2) + * + * Copyright (C) 2002 Shane Nay (shane@minirl.com) + * + * Additional defines, Copyright 2003 Simtec Electronics (linux@simtec.co.uk) + * + * Adapted from: + * + * Internal header file for MX1ADS serial ports (UART1 & 2) + * + * Copyright (C) 2002 Shane Nay (shane@minirl.com) + */ + +#ifndef __ASM_ARM_REGS_SERIAL_H +#define __ASM_ARM_REGS_SERIAL_H + +#define S3C2410_URXH (0x24) +#define S3C2410_UTXH (0x20) +#define S3C2410_ULCON (0x00) +#define S3C2410_UCON (0x04) +#define S3C2410_UFCON (0x08) +#define S3C2410_UMCON (0x0C) +#define S3C2410_UBRDIV (0x28) +#define S3C2410_UTRSTAT (0x10) +#define S3C2410_UERSTAT (0x14) +#define S3C2410_UFSTAT (0x18) +#define S3C2410_UMSTAT (0x1C) + +#define S3C2410_LCON_CFGMASK ((0xF<<3)|(0x3)) + +#define S3C2410_LCON_CS5 (0x0) +#define S3C2410_LCON_CS6 (0x1) +#define S3C2410_LCON_CS7 (0x2) +#define S3C2410_LCON_CS8 (0x3) +#define S3C2410_LCON_CSMASK (0x3) + +#define S3C2410_LCON_PNONE (0x0) +#define S3C2410_LCON_PEVEN (0x5 << 3) +#define S3C2410_LCON_PODD (0x4 << 3) +#define S3C2410_LCON_PMASK (0x7 << 3) + +#define S3C2410_LCON_STOPB (1<<2) +#define S3C2410_LCON_IRM (1<<6) + +#define S3C2440_UCON_CLKMASK (3<<10) +#define S3C2440_UCON_CLKSHIFT (10) +#define S3C2440_UCON_PCLK (0<<10) +#define S3C2440_UCON_UCLK (1<<10) +#define S3C2440_UCON_PCLK2 (2<<10) +#define S3C2440_UCON_FCLK (3<<10) +#define S3C2443_UCON_EPLL (3<<10) + +#define S3C6400_UCON_CLKMASK (3<<10) +#define S3C6400_UCON_CLKSHIFT (10) +#define S3C6400_UCON_PCLK (0<<10) +#define S3C6400_UCON_PCLK2 (2<<10) +#define S3C6400_UCON_UCLK0 (1<<10) +#define S3C6400_UCON_UCLK1 (3<<10) + +#define S3C2440_UCON2_FCLK_EN (1<<15) +#define S3C2440_UCON0_DIVMASK (15 << 12) +#define S3C2440_UCON1_DIVMASK (15 << 12) +#define S3C2440_UCON2_DIVMASK (7 << 12) +#define S3C2440_UCON_DIVSHIFT (12) + +#define S3C2412_UCON_CLKMASK (3<<10) +#define S3C2412_UCON_CLKSHIFT (10) +#define S3C2412_UCON_UCLK (1<<10) +#define S3C2412_UCON_USYSCLK (3<<10) +#define S3C2412_UCON_PCLK (0<<10) +#define S3C2412_UCON_PCLK2 (2<<10) + +#define S3C2410_UCON_CLKMASK (1 << 10) +#define S3C2410_UCON_CLKSHIFT (10) +#define S3C2410_UCON_UCLK (1<<10) +#define S3C2410_UCON_SBREAK (1<<4) + +#define S3C2410_UCON_TXILEVEL (1<<9) +#define S3C2410_UCON_RXILEVEL (1<<8) +#define S3C2410_UCON_TXIRQMODE (1<<2) +#define S3C2410_UCON_RXIRQMODE (1<<0) +#define S3C2410_UCON_RXFIFO_TOI (1<<7) +#define S3C2443_UCON_RXERR_IRQEN (1<<6) +#define S3C2443_UCON_LOOPBACK (1<<5) + +#define S3C2410_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \ + S3C2410_UCON_RXILEVEL | \ + S3C2410_UCON_TXIRQMODE | \ + S3C2410_UCON_RXIRQMODE | \ + S3C2410_UCON_RXFIFO_TOI) + +#define S3C64XX_UCON_TXBURST_1 (0<<20) +#define S3C64XX_UCON_TXBURST_4 (1<<20) +#define S3C64XX_UCON_TXBURST_8 (2<<20) +#define S3C64XX_UCON_TXBURST_16 (3<<20) +#define S3C64XX_UCON_TXBURST_MASK (0xf<<20) +#define S3C64XX_UCON_RXBURST_1 (0<<16) +#define S3C64XX_UCON_RXBURST_4 (1<<16) +#define S3C64XX_UCON_RXBURST_8 (2<<16) +#define S3C64XX_UCON_RXBURST_16 (3<<16) +#define S3C64XX_UCON_RXBURST_MASK (0xf<<16) +#define S3C64XX_UCON_TIMEOUT_SHIFT (12) +#define S3C64XX_UCON_TIMEOUT_MASK (0xf<<12) +#define S3C64XX_UCON_EMPTYINT_EN (1<<11) +#define S3C64XX_UCON_DMASUS_EN (1<<10) +#define S3C64XX_UCON_TXINT_LEVEL (1<<9) +#define S3C64XX_UCON_RXINT_LEVEL (1<<8) +#define S3C64XX_UCON_TIMEOUT_EN (1<<7) +#define S3C64XX_UCON_ERRINT_EN (1<<6) +#define S3C64XX_UCON_TXMODE_DMA (2<<2) +#define S3C64XX_UCON_TXMODE_CPU (1<<2) +#define S3C64XX_UCON_TXMODE_MASK (3<<2) +#define S3C64XX_UCON_RXMODE_DMA (2<<0) +#define S3C64XX_UCON_RXMODE_CPU (1<<0) +#define S3C64XX_UCON_RXMODE_MASK (3<<0) + +#define S3C2410_UFCON_FIFOMODE (1<<0) +#define S3C2410_UFCON_TXTRIG0 (0<<6) +#define S3C2410_UFCON_RXTRIG8 (1<<4) +#define S3C2410_UFCON_RXTRIG12 (2<<4) + +/* S3C2440 FIFO trigger levels */ +#define S3C2440_UFCON_RXTRIG1 (0<<4) +#define S3C2440_UFCON_RXTRIG8 (1<<4) +#define S3C2440_UFCON_RXTRIG16 (2<<4) +#define S3C2440_UFCON_RXTRIG32 (3<<4) + +#define S3C2440_UFCON_TXTRIG0 (0<<6) +#define S3C2440_UFCON_TXTRIG16 (1<<6) +#define S3C2440_UFCON_TXTRIG32 (2<<6) +#define S3C2440_UFCON_TXTRIG48 (3<<6) + +#define S3C2410_UFCON_RESETBOTH (3<<1) +#define S3C2410_UFCON_RESETTX (1<<2) +#define S3C2410_UFCON_RESETRX (1<<1) + +#define S3C2410_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \ + S3C2410_UFCON_TXTRIG0 | \ + S3C2410_UFCON_RXTRIG8 ) + +#define S3C2410_UMCOM_AFC (1<<4) +#define S3C2410_UMCOM_RTS_LOW (1<<0) + +#define S3C2412_UMCON_AFC_63 (0<<5) /* same as s3c2443 */ +#define S3C2412_UMCON_AFC_56 (1<<5) +#define S3C2412_UMCON_AFC_48 (2<<5) +#define S3C2412_UMCON_AFC_40 (3<<5) +#define S3C2412_UMCON_AFC_32 (4<<5) +#define S3C2412_UMCON_AFC_24 (5<<5) +#define S3C2412_UMCON_AFC_16 (6<<5) +#define S3C2412_UMCON_AFC_8 (7<<5) + +#define S3C2410_UFSTAT_TXFULL (1<<9) +#define S3C2410_UFSTAT_RXFULL (1<<8) +#define S3C2410_UFSTAT_TXMASK (15<<4) +#define S3C2410_UFSTAT_TXSHIFT (4) +#define S3C2410_UFSTAT_RXMASK (15<<0) +#define S3C2410_UFSTAT_RXSHIFT (0) + +/* UFSTAT S3C2443 same as S3C2440 */ +#define S3C2440_UFSTAT_TXFULL (1<<14) +#define S3C2440_UFSTAT_RXFULL (1<<6) +#define S3C2440_UFSTAT_TXSHIFT (8) +#define S3C2440_UFSTAT_RXSHIFT (0) +#define S3C2440_UFSTAT_TXMASK (63<<8) +#define S3C2440_UFSTAT_RXMASK (63) + +#define S3C2410_UTRSTAT_TIMEOUT (1<<3) +#define S3C2410_UTRSTAT_TXE (1<<2) +#define S3C2410_UTRSTAT_TXFE (1<<1) +#define S3C2410_UTRSTAT_RXDR (1<<0) + +#define S3C2410_UERSTAT_OVERRUN (1<<0) +#define S3C2410_UERSTAT_FRAME (1<<2) +#define S3C2410_UERSTAT_BREAK (1<<3) +#define S3C2443_UERSTAT_PARITY (1<<1) + +#define S3C2410_UERSTAT_ANY (S3C2410_UERSTAT_OVERRUN | \ + S3C2410_UERSTAT_FRAME | \ + S3C2410_UERSTAT_BREAK) + +#define S3C2410_UMSTAT_CTS (1<<0) +#define S3C2410_UMSTAT_DeltaCTS (1<<2) + +#define S3C2443_DIVSLOT (0x2C) + +/* S3C64XX interrupt registers. */ +#define S3C64XX_UINTP 0x30 +#define S3C64XX_UINTSP 0x34 +#define S3C64XX_UINTM 0x38 + +#define S3C64XX_UINTM_RXD (0) +#define S3C64XX_UINTM_ERROR (1) +#define S3C64XX_UINTM_TXD (2) +#define S3C64XX_UINTM_RXD_MSK (1 << S3C64XX_UINTM_RXD) +#define S3C64XX_UINTM_ERR_MSK (1 << S3C64XX_UINTM_ERROR) +#define S3C64XX_UINTM_TXD_MSK (1 << S3C64XX_UINTM_TXD) + +/* Following are specific to S5PV210 */ +#define S5PV210_UCON_CLKMASK (1<<10) +#define S5PV210_UCON_CLKSHIFT (10) +#define S5PV210_UCON_PCLK (0<<10) +#define S5PV210_UCON_UCLK (1<<10) + +#define S5PV210_UFCON_TXTRIG0 (0<<8) +#define S5PV210_UFCON_TXTRIG4 (1<<8) +#define S5PV210_UFCON_TXTRIG8 (2<<8) +#define S5PV210_UFCON_TXTRIG16 (3<<8) +#define S5PV210_UFCON_TXTRIG32 (4<<8) +#define S5PV210_UFCON_TXTRIG64 (5<<8) +#define S5PV210_UFCON_TXTRIG128 (6<<8) +#define S5PV210_UFCON_TXTRIG256 (7<<8) + +#define S5PV210_UFCON_RXTRIG1 (0<<4) +#define S5PV210_UFCON_RXTRIG4 (1<<4) +#define S5PV210_UFCON_RXTRIG8 (2<<4) +#define S5PV210_UFCON_RXTRIG16 (3<<4) +#define S5PV210_UFCON_RXTRIG32 (4<<4) +#define S5PV210_UFCON_RXTRIG64 (5<<4) +#define S5PV210_UFCON_RXTRIG128 (6<<4) +#define S5PV210_UFCON_RXTRIG256 (7<<4) + +#define S5PV210_UFSTAT_TXFULL (1<<24) +#define S5PV210_UFSTAT_RXFULL (1<<8) +#define S5PV210_UFSTAT_TXMASK (255<<16) +#define S5PV210_UFSTAT_TXSHIFT (16) +#define S5PV210_UFSTAT_RXMASK (255<<0) +#define S5PV210_UFSTAT_RXSHIFT (0) + +#define S3C2410_UCON_CLKSEL0 (1 << 0) +#define S3C2410_UCON_CLKSEL1 (1 << 1) +#define S3C2410_UCON_CLKSEL2 (1 << 2) +#define S3C2410_UCON_CLKSEL3 (1 << 3) + +/* Default values for s5pv210 UCON and UFCON uart registers */ +#define S5PV210_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \ + S3C2410_UCON_RXILEVEL | \ + S3C2410_UCON_TXIRQMODE | \ + S3C2410_UCON_RXIRQMODE | \ + S3C2410_UCON_RXFIFO_TOI | \ + S3C2443_UCON_RXERR_IRQEN) + +#define S5PV210_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \ + S5PV210_UFCON_TXTRIG4 | \ + S5PV210_UFCON_RXTRIG4) + +#ifndef __ASSEMBLY__ + +#include + +/* configuration structure for per-machine configurations for the + * serial port + * + * the pointer is setup by the machine specific initialisation from the + * arch/arm/mach-s3c2410/ directory. +*/ + +struct s3c2410_uartcfg { + unsigned char hwport; /* hardware port number */ + unsigned char unused; + unsigned short flags; + upf_t uart_flags; /* default uart flags */ + unsigned int clk_sel; + + unsigned int has_fracval; + + unsigned long ucon; /* value of ucon for port */ + unsigned long ulcon; /* value of ulcon for port */ + unsigned long ufcon; /* value of ufcon for port */ +}; + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_ARM_REGS_SERIAL_H */ + diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h new file mode 100644 index 000000000..1c89611e0 --- /dev/null +++ b/include/linux/serial_sci.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_SERIAL_SCI_H +#define __LINUX_SERIAL_SCI_H + +#include +#include +#include + +/* + * Generic header for SuperH (H)SCI(F) (used by sh/sh64 and related parts) + */ + +/* Serial Control Register (@ = not supported by all parts) */ +#define SCSCR_TIE BIT(7) /* Transmit Interrupt Enable */ +#define SCSCR_RIE BIT(6) /* Receive Interrupt Enable */ +#define SCSCR_TE BIT(5) /* Transmit Enable */ +#define SCSCR_RE BIT(4) /* Receive Enable */ +#define SCSCR_REIE BIT(3) /* Receive Error Interrupt Enable @ */ +#define SCSCR_TOIE BIT(2) /* Timeout Interrupt Enable @ */ +#define SCSCR_CKE1 BIT(1) /* Clock Enable 1 */ +#define SCSCR_CKE0 BIT(0) /* Clock Enable 0 */ + + +enum { + SCIx_PROBE_REGTYPE, + + SCIx_SCI_REGTYPE, + SCIx_IRDA_REGTYPE, + SCIx_SCIFA_REGTYPE, + SCIx_SCIFB_REGTYPE, + SCIx_SH2_SCIF_FIFODATA_REGTYPE, + SCIx_SH3_SCIF_REGTYPE, + SCIx_SH4_SCIF_REGTYPE, + SCIx_SH4_SCIF_BRG_REGTYPE, + SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, + SCIx_SH4_SCIF_FIFODATA_REGTYPE, + SCIx_SH7705_SCIF_REGTYPE, + SCIx_HSCIF_REGTYPE, + SCIx_RZ_SCIFA_REGTYPE, + + SCIx_NR_REGTYPES, +}; + +struct plat_sci_port_ops { + void (*init_pins)(struct uart_port *, unsigned int cflag); +}; + +/* + * Platform device specific platform_data struct + */ +struct plat_sci_port { + unsigned int type; /* SCI / SCIF / IRDA / HSCIF */ + upf_t flags; /* UPF_* flags */ + + unsigned int sampling_rate; + unsigned int scscr; /* SCSCR initialization */ + + /* + * Platform overrides if necessary, defaults otherwise. + */ + unsigned char regtype; + + struct plat_sci_port_ops *ops; +}; + +#endif /* __LINUX_SERIAL_SCI_H */ diff --git a/include/linux/serio.h b/include/linux/serio.h new file mode 100644 index 000000000..138a5efe8 --- /dev/null +++ b/include/linux/serio.h @@ -0,0 +1,167 @@ +/* + * Copyright (C) 1999-2002 Vojtech Pavlik +* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ +#ifndef _SERIO_H +#define _SERIO_H + + +#include +#include +#include +#include +#include +#include +#include +#include + +extern struct bus_type serio_bus; + +struct serio { + void *port_data; + + char name[32]; + char phys[32]; + char firmware_id[128]; + + bool manual_bind; + + struct serio_device_id id; + + /* Protects critical sections from port's interrupt handler */ + spinlock_t lock; + + int (*write)(struct serio *, unsigned char); + int (*open)(struct serio *); + void (*close)(struct serio *); + int (*start)(struct serio *); + void (*stop)(struct serio *); + + struct serio *parent; + /* Entry in parent->children list */ + struct list_head child_node; + struct list_head children; + /* Level of nesting in serio hierarchy */ + unsigned int depth; + + /* + * serio->drv is accessed from interrupt handlers; when modifying + * caller should acquire serio->drv_mutex and serio->lock. + */ + struct serio_driver *drv; + /* Protects serio->drv so attributes can pin current driver */ + struct mutex drv_mutex; + + struct device dev; + + struct list_head node; + + /* + * For use by PS/2 layer when several ports share hardware and + * may get indigestion when exposed to concurrent access (i8042). + */ + struct mutex *ps2_cmd_mutex; +}; +#define to_serio_port(d) container_of(d, struct serio, dev) + +struct serio_driver { + const char *description; + + const struct serio_device_id *id_table; + bool manual_bind; + + void (*write_wakeup)(struct serio *); + irqreturn_t (*interrupt)(struct serio *, unsigned char, unsigned int); + int (*connect)(struct serio *, struct serio_driver *drv); + int (*reconnect)(struct serio *); + int (*fast_reconnect)(struct serio *); + void (*disconnect)(struct serio *); + void (*cleanup)(struct serio *); + + struct device_driver driver; +}; +#define to_serio_driver(d) container_of(d, struct serio_driver, driver) + +int serio_open(struct serio *serio, struct serio_driver *drv); +void serio_close(struct serio *serio); +void serio_rescan(struct serio *serio); +void serio_reconnect(struct serio *serio); +irqreturn_t serio_interrupt(struct serio *serio, unsigned char data, unsigned int flags); + +void __serio_register_port(struct serio *serio, struct module *owner); + +/* use a define to avoid include chaining to get THIS_MODULE */ +#define serio_register_port(serio) \ + __serio_register_port(serio, THIS_MODULE) + +void serio_unregister_port(struct serio *serio); +void serio_unregister_child_port(struct serio *serio); + +int __must_check __serio_register_driver(struct serio_driver *drv, + struct module *owner, const char *mod_name); + +/* use a define to avoid include chaining to get THIS_MODULE & friends */ +#define serio_register_driver(drv) \ + __serio_register_driver(drv, THIS_MODULE, KBUILD_MODNAME) + +void serio_unregister_driver(struct serio_driver *drv); + +/** + * module_serio_driver() - Helper macro for registering a serio driver + * @__serio_driver: serio_driver struct + * + * Helper macro for serio drivers which do not do anything special in + * module init/exit. This eliminates a lot of boilerplate. Each module + * may only use this macro once, and calling it replaces module_init() + * and module_exit(). + */ +#define module_serio_driver(__serio_driver) \ + module_driver(__serio_driver, serio_register_driver, \ + serio_unregister_driver) + +static inline int serio_write(struct serio *serio, unsigned char data) +{ + if (serio->write) + return serio->write(serio, data); + else + return -1; +} + +static inline void serio_drv_write_wakeup(struct serio *serio) +{ + if (serio->drv && serio->drv->write_wakeup) + serio->drv->write_wakeup(serio); +} + +/* + * Use the following functions to manipulate serio's per-port + * driver-specific data. + */ +static inline void *serio_get_drvdata(struct serio *serio) +{ + return dev_get_drvdata(&serio->dev); +} + +static inline void serio_set_drvdata(struct serio *serio, void *data) +{ + dev_set_drvdata(&serio->dev, data); +} + +/* + * Use the following functions to protect critical sections in + * driver code from port's interrupt handler + */ +static inline void serio_pause_rx(struct serio *serio) +{ + spin_lock_irq(&serio->lock); +} + +static inline void serio_continue_rx(struct serio *serio) +{ + spin_unlock_irq(&serio->lock); +} + +#endif diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h new file mode 100644 index 000000000..a0e15e7b0 --- /dev/null +++ b/include/linux/set_memory.h @@ -0,0 +1,46 @@ +/* + * Copyright 2017, Michael Ellerman, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation; + */ +#ifndef _LINUX_SET_MEMORY_H_ +#define _LINUX_SET_MEMORY_H_ + +#ifdef CONFIG_ARCH_HAS_SET_MEMORY +#include +#else +static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; } +static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; } +static inline int set_memory_x(unsigned long addr, int numpages) { return 0; } +static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; } +#endif + +#ifndef set_mce_nospec +static inline int set_mce_nospec(unsigned long pfn, bool unmap) +{ + return 0; +} +#endif + +#ifndef clear_mce_nospec +static inline int clear_mce_nospec(unsigned long pfn) +{ + return 0; +} +#endif + +#ifndef CONFIG_ARCH_HAS_MEM_ENCRYPT +static inline int set_memory_encrypted(unsigned long addr, int numpages) +{ + return 0; +} + +static inline int set_memory_decrypted(unsigned long addr, int numpages) +{ + return 0; +} +#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */ + +#endif /* _LINUX_SET_MEMORY_H_ */ diff --git a/include/linux/sfi.h b/include/linux/sfi.h new file mode 100644 index 000000000..e0e1597ef --- /dev/null +++ b/include/linux/sfi.h @@ -0,0 +1,210 @@ +/* sfi.h Simple Firmware Interface */ + +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2009 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright(c) 2009 Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef _LINUX_SFI_H +#define _LINUX_SFI_H + +#include +#include + +/* Table signatures reserved by the SFI specification */ +#define SFI_SIG_SYST "SYST" +#define SFI_SIG_FREQ "FREQ" +#define SFI_SIG_IDLE "IDLE" +#define SFI_SIG_CPUS "CPUS" +#define SFI_SIG_MTMR "MTMR" +#define SFI_SIG_MRTC "MRTC" +#define SFI_SIG_MMAP "MMAP" +#define SFI_SIG_APIC "APIC" +#define SFI_SIG_XSDT "XSDT" +#define SFI_SIG_WAKE "WAKE" +#define SFI_SIG_DEVS "DEVS" +#define SFI_SIG_GPIO "GPIO" + +#define SFI_SIGNATURE_SIZE 4 +#define SFI_OEM_ID_SIZE 6 +#define SFI_OEM_TABLE_ID_SIZE 8 + +#define SFI_NAME_LEN 16 + +#define SFI_SYST_SEARCH_BEGIN 0x000E0000 +#define SFI_SYST_SEARCH_END 0x000FFFFF + +#define SFI_GET_NUM_ENTRIES(ptable, entry_type) \ + ((ptable->header.len - sizeof(struct sfi_table_header)) / \ + (sizeof(entry_type))) +/* + * Table structures must be byte-packed to match the SFI specification, + * as they are provided by the BIOS. + */ +struct sfi_table_header { + char sig[SFI_SIGNATURE_SIZE]; + u32 len; + u8 rev; + u8 csum; + char oem_id[SFI_OEM_ID_SIZE]; + char oem_table_id[SFI_OEM_TABLE_ID_SIZE]; +} __packed; + +struct sfi_table_simple { + struct sfi_table_header header; + u64 pentry[1]; +} __packed; + +/* Comply with UEFI spec 2.1 */ +struct sfi_mem_entry { + u32 type; + u64 phys_start; + u64 virt_start; + u64 pages; + u64 attrib; +} __packed; + +struct sfi_cpu_table_entry { + u32 apic_id; +} __packed; + +struct sfi_cstate_table_entry { + u32 hint; /* MWAIT hint */ + u32 latency; /* latency in ms */ +} __packed; + +struct sfi_apic_table_entry { + u64 phys_addr; /* phy base addr for APIC reg */ +} __packed; + +struct sfi_freq_table_entry { + u32 freq_mhz; /* in MHZ */ + u32 latency; /* transition latency in ms */ + u32 ctrl_val; /* value to write to PERF_CTL */ +} __packed; + +struct sfi_wake_table_entry { + u64 phys_addr; /* pointer to where the wake vector locates */ +} __packed; + +struct sfi_timer_table_entry { + u64 phys_addr; /* phy base addr for the timer */ + u32 freq_hz; /* in HZ */ + u32 irq; +} __packed; + +struct sfi_rtc_table_entry { + u64 phys_addr; /* phy base addr for the RTC */ + u32 irq; +} __packed; + +struct sfi_device_table_entry { + u8 type; /* bus type, I2C, SPI or ...*/ +#define SFI_DEV_TYPE_SPI 0 +#define SFI_DEV_TYPE_I2C 1 +#define SFI_DEV_TYPE_UART 2 +#define SFI_DEV_TYPE_HSI 3 +#define SFI_DEV_TYPE_IPC 4 +#define SFI_DEV_TYPE_SD 5 + + u8 host_num; /* attached to host 0, 1...*/ + u16 addr; + u8 irq; + u32 max_freq; + char name[SFI_NAME_LEN]; +} __packed; + +struct sfi_gpio_table_entry { + char controller_name[SFI_NAME_LEN]; + u16 pin_no; + char pin_name[SFI_NAME_LEN]; +} __packed; + +typedef int (*sfi_table_handler) (struct sfi_table_header *table); + +#ifdef CONFIG_SFI +extern void __init sfi_init(void); +extern int __init sfi_platform_init(void); +extern void __init sfi_init_late(void); +extern int sfi_table_parse(char *signature, char *oem_id, char *oem_table_id, + sfi_table_handler handler); + +extern int sfi_disabled; +static inline void disable_sfi(void) +{ + sfi_disabled = 1; +} + +#else /* !CONFIG_SFI */ + +static inline void sfi_init(void) +{ +} + +static inline void sfi_init_late(void) +{ +} + +#define sfi_disabled 0 + +static inline int sfi_table_parse(char *signature, char *oem_id, + char *oem_table_id, + sfi_table_handler handler) +{ + return -1; +} + +#endif /* !CONFIG_SFI */ + +#endif /*_LINUX_SFI_H*/ diff --git a/include/linux/sfi_acpi.h b/include/linux/sfi_acpi.h new file mode 100644 index 000000000..a6e555cbe --- /dev/null +++ b/include/linux/sfi_acpi.h @@ -0,0 +1,93 @@ +/* sfi.h Simple Firmware Interface */ + +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2009 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright(c) 2009 Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef _LINUX_SFI_ACPI_H +#define _LINUX_SFI_ACPI_H + +#include +#include + +#ifdef CONFIG_SFI +extern int sfi_acpi_table_parse(char *signature, char *oem_id, + char *oem_table_id, + int (*handler)(struct acpi_table_header *)); + +static inline int __init acpi_sfi_table_parse(char *signature, + int (*handler)(struct acpi_table_header *)) +{ + if (!acpi_table_parse(signature, handler)) + return 0; + + return sfi_acpi_table_parse(signature, NULL, NULL, handler); +} +#else /* !CONFIG_SFI */ +static inline int sfi_acpi_table_parse(char *signature, char *oem_id, + char *oem_table_id, + int (*handler)(struct acpi_table_header *)) +{ + return -1; +} + +static inline int __init acpi_sfi_table_parse(char *signature, + int (*handler)(struct acpi_table_header *)) +{ + return acpi_table_parse(signature, handler); +} +#endif /* !CONFIG_SFI */ + +#endif /*_LINUX_SFI_ACPI_H*/ diff --git a/include/linux/sfp.h b/include/linux/sfp.h new file mode 100644 index 000000000..d37518e89 --- /dev/null +++ b/include/linux/sfp.h @@ -0,0 +1,564 @@ +#ifndef LINUX_SFP_H +#define LINUX_SFP_H + +#include + +struct sfp_eeprom_base { + u8 phys_id; + u8 phys_ext_id; + u8 connector; +#if defined __BIG_ENDIAN_BITFIELD + u8 e10g_base_er:1; + u8 e10g_base_lrm:1; + u8 e10g_base_lr:1; + u8 e10g_base_sr:1; + u8 if_1x_sx:1; + u8 if_1x_lx:1; + u8 if_1x_copper_active:1; + u8 if_1x_copper_passive:1; + + u8 escon_mmf_1310_led:1; + u8 escon_smf_1310_laser:1; + u8 sonet_oc192_short_reach:1; + u8 sonet_reach_bit1:1; + u8 sonet_reach_bit2:1; + u8 sonet_oc48_long_reach:1; + u8 sonet_oc48_intermediate_reach:1; + u8 sonet_oc48_short_reach:1; + + u8 unallocated_5_7:1; + u8 sonet_oc12_smf_long_reach:1; + u8 sonet_oc12_smf_intermediate_reach:1; + u8 sonet_oc12_short_reach:1; + u8 unallocated_5_3:1; + u8 sonet_oc3_smf_long_reach:1; + u8 sonet_oc3_smf_intermediate_reach:1; + u8 sonet_oc3_short_reach:1; + + u8 e_base_px:1; + u8 e_base_bx10:1; + u8 e100_base_fx:1; + u8 e100_base_lx:1; + u8 e1000_base_t:1; + u8 e1000_base_cx:1; + u8 e1000_base_lx:1; + u8 e1000_base_sx:1; + + u8 fc_ll_v:1; + u8 fc_ll_s:1; + u8 fc_ll_i:1; + u8 fc_ll_l:1; + u8 fc_ll_m:1; + u8 fc_tech_sa:1; + u8 fc_tech_lc:1; + u8 fc_tech_electrical_inter_enclosure:1; + + u8 fc_tech_electrical_intra_enclosure:1; + u8 fc_tech_sn:1; + u8 fc_tech_sl:1; + u8 fc_tech_ll:1; + u8 sfp_ct_active:1; + u8 sfp_ct_passive:1; + u8 unallocated_8_1:1; + u8 unallocated_8_0:1; + + u8 fc_media_tw:1; + u8 fc_media_tp:1; + u8 fc_media_mi:1; + u8 fc_media_tv:1; + u8 fc_media_m6:1; + u8 fc_media_m5:1; + u8 unallocated_9_1:1; + u8 fc_media_sm:1; + + u8 fc_speed_1200:1; + u8 fc_speed_800:1; + u8 fc_speed_1600:1; + u8 fc_speed_400:1; + u8 fc_speed_3200:1; + u8 fc_speed_200:1; + u8 unallocated_10_1:1; + u8 fc_speed_100:1; +#elif defined __LITTLE_ENDIAN_BITFIELD + u8 if_1x_copper_passive:1; + u8 if_1x_copper_active:1; + u8 if_1x_lx:1; + u8 if_1x_sx:1; + u8 e10g_base_sr:1; + u8 e10g_base_lr:1; + u8 e10g_base_lrm:1; + u8 e10g_base_er:1; + + u8 sonet_oc3_short_reach:1; + u8 sonet_oc3_smf_intermediate_reach:1; + u8 sonet_oc3_smf_long_reach:1; + u8 unallocated_5_3:1; + u8 sonet_oc12_short_reach:1; + u8 sonet_oc12_smf_intermediate_reach:1; + u8 sonet_oc12_smf_long_reach:1; + u8 unallocated_5_7:1; + + u8 sonet_oc48_short_reach:1; + u8 sonet_oc48_intermediate_reach:1; + u8 sonet_oc48_long_reach:1; + u8 sonet_reach_bit2:1; + u8 sonet_reach_bit1:1; + u8 sonet_oc192_short_reach:1; + u8 escon_smf_1310_laser:1; + u8 escon_mmf_1310_led:1; + + u8 e1000_base_sx:1; + u8 e1000_base_lx:1; + u8 e1000_base_cx:1; + u8 e1000_base_t:1; + u8 e100_base_lx:1; + u8 e100_base_fx:1; + u8 e_base_bx10:1; + u8 e_base_px:1; + + u8 fc_tech_electrical_inter_enclosure:1; + u8 fc_tech_lc:1; + u8 fc_tech_sa:1; + u8 fc_ll_m:1; + u8 fc_ll_l:1; + u8 fc_ll_i:1; + u8 fc_ll_s:1; + u8 fc_ll_v:1; + + u8 unallocated_8_0:1; + u8 unallocated_8_1:1; + u8 sfp_ct_passive:1; + u8 sfp_ct_active:1; + u8 fc_tech_ll:1; + u8 fc_tech_sl:1; + u8 fc_tech_sn:1; + u8 fc_tech_electrical_intra_enclosure:1; + + u8 fc_media_sm:1; + u8 unallocated_9_1:1; + u8 fc_media_m5:1; + u8 fc_media_m6:1; + u8 fc_media_tv:1; + u8 fc_media_mi:1; + u8 fc_media_tp:1; + u8 fc_media_tw:1; + + u8 fc_speed_100:1; + u8 unallocated_10_1:1; + u8 fc_speed_200:1; + u8 fc_speed_3200:1; + u8 fc_speed_400:1; + u8 fc_speed_1600:1; + u8 fc_speed_800:1; + u8 fc_speed_1200:1; +#else +#error Unknown Endian +#endif + u8 encoding; + u8 br_nominal; + u8 rate_id; + u8 link_len[6]; + char vendor_name[16]; + u8 extended_cc; + char vendor_oui[3]; + char vendor_pn[16]; + char vendor_rev[4]; + union { + __be16 optical_wavelength; + __be16 cable_compliance; + struct { +#if defined __BIG_ENDIAN_BITFIELD + u8 reserved60_2:6; + u8 fc_pi_4_app_h:1; + u8 sff8431_app_e:1; + u8 reserved61:8; +#elif defined __LITTLE_ENDIAN_BITFIELD + u8 sff8431_app_e:1; + u8 fc_pi_4_app_h:1; + u8 reserved60_2:6; + u8 reserved61:8; +#else +#error Unknown Endian +#endif + } __packed passive; + struct { +#if defined __BIG_ENDIAN_BITFIELD + u8 reserved60_4:4; + u8 fc_pi_4_lim:1; + u8 sff8431_lim:1; + u8 fc_pi_4_app_h:1; + u8 sff8431_app_e:1; + u8 reserved61:8; +#elif defined __LITTLE_ENDIAN_BITFIELD + u8 sff8431_app_e:1; + u8 fc_pi_4_app_h:1; + u8 sff8431_lim:1; + u8 fc_pi_4_lim:1; + u8 reserved60_4:4; + u8 reserved61:8; +#else +#error Unknown Endian +#endif + } __packed active; + } __packed; + u8 reserved62; + u8 cc_base; +} __packed; + +struct sfp_eeprom_ext { + __be16 options; + u8 br_max; + u8 br_min; + char vendor_sn[16]; + char datecode[8]; + u8 diagmon; + u8 enhopts; + u8 sff8472_compliance; + u8 cc_ext; +} __packed; + +/** + * struct sfp_eeprom_id - raw SFP module identification information + * @base: base SFP module identification structure + * @ext: extended SFP module identification structure + * + * See the SFF-8472 specification and related documents for the definition + * of these structure members. This can be obtained from + * ftp://ftp.seagate.com/sff + */ +struct sfp_eeprom_id { + struct sfp_eeprom_base base; + struct sfp_eeprom_ext ext; +} __packed; + +struct sfp_diag { + __be16 temp_high_alarm; + __be16 temp_low_alarm; + __be16 temp_high_warn; + __be16 temp_low_warn; + __be16 volt_high_alarm; + __be16 volt_low_alarm; + __be16 volt_high_warn; + __be16 volt_low_warn; + __be16 bias_high_alarm; + __be16 bias_low_alarm; + __be16 bias_high_warn; + __be16 bias_low_warn; + __be16 txpwr_high_alarm; + __be16 txpwr_low_alarm; + __be16 txpwr_high_warn; + __be16 txpwr_low_warn; + __be16 rxpwr_high_alarm; + __be16 rxpwr_low_alarm; + __be16 rxpwr_high_warn; + __be16 rxpwr_low_warn; + __be16 laser_temp_high_alarm; + __be16 laser_temp_low_alarm; + __be16 laser_temp_high_warn; + __be16 laser_temp_low_warn; + __be16 tec_cur_high_alarm; + __be16 tec_cur_low_alarm; + __be16 tec_cur_high_warn; + __be16 tec_cur_low_warn; + __be32 cal_rxpwr4; + __be32 cal_rxpwr3; + __be32 cal_rxpwr2; + __be32 cal_rxpwr1; + __be32 cal_rxpwr0; + __be16 cal_txi_slope; + __be16 cal_txi_offset; + __be16 cal_txpwr_slope; + __be16 cal_txpwr_offset; + __be16 cal_t_slope; + __be16 cal_t_offset; + __be16 cal_v_slope; + __be16 cal_v_offset; +} __packed; + +/* SFP EEPROM registers */ +enum { + SFP_PHYS_ID = 0x00, + SFP_PHYS_EXT_ID = 0x01, + SFP_CONNECTOR = 0x02, + SFP_COMPLIANCE = 0x03, + SFP_ENCODING = 0x0b, + SFP_BR_NOMINAL = 0x0c, + SFP_RATE_ID = 0x0d, + SFP_LINK_LEN_SM_KM = 0x0e, + SFP_LINK_LEN_SM_100M = 0x0f, + SFP_LINK_LEN_50UM_OM2_10M = 0x10, + SFP_LINK_LEN_62_5UM_OM1_10M = 0x11, + SFP_LINK_LEN_COPPER_1M = 0x12, + SFP_LINK_LEN_50UM_OM4_10M = 0x12, + SFP_LINK_LEN_50UM_OM3_10M = 0x13, + SFP_VENDOR_NAME = 0x14, + SFP_VENDOR_OUI = 0x25, + SFP_VENDOR_PN = 0x28, + SFP_VENDOR_REV = 0x38, + SFP_OPTICAL_WAVELENGTH_MSB = 0x3c, + SFP_OPTICAL_WAVELENGTH_LSB = 0x3d, + SFP_CABLE_SPEC = 0x3c, + SFP_CC_BASE = 0x3f, + SFP_OPTIONS = 0x40, /* 2 bytes, MSB, LSB */ + SFP_BR_MAX = 0x42, + SFP_BR_MIN = 0x43, + SFP_VENDOR_SN = 0x44, + SFP_DATECODE = 0x54, + SFP_DIAGMON = 0x5c, + SFP_ENHOPTS = 0x5d, + SFP_SFF8472_COMPLIANCE = 0x5e, + SFP_CC_EXT = 0x5f, + + SFP_PHYS_ID_SFF = 0x02, + SFP_PHYS_ID_SFP = 0x03, + SFP_PHYS_EXT_ID_SFP = 0x04, + SFP_CONNECTOR_UNSPEC = 0x00, + /* codes 01-05 not supportable on SFP, but some modules have single SC */ + SFP_CONNECTOR_SC = 0x01, + SFP_CONNECTOR_FIBERJACK = 0x06, + SFP_CONNECTOR_LC = 0x07, + SFP_CONNECTOR_MT_RJ = 0x08, + SFP_CONNECTOR_MU = 0x09, + SFP_CONNECTOR_SG = 0x0a, + SFP_CONNECTOR_OPTICAL_PIGTAIL = 0x0b, + SFP_CONNECTOR_MPO_1X12 = 0x0c, + SFP_CONNECTOR_MPO_2X16 = 0x0d, + SFP_CONNECTOR_HSSDC_II = 0x20, + SFP_CONNECTOR_COPPER_PIGTAIL = 0x21, + SFP_CONNECTOR_RJ45 = 0x22, + SFP_CONNECTOR_NOSEPARATE = 0x23, + SFP_CONNECTOR_MXC_2X16 = 0x24, + SFP_ENCODING_UNSPEC = 0x00, + SFP_ENCODING_8B10B = 0x01, + SFP_ENCODING_4B5B = 0x02, + SFP_ENCODING_NRZ = 0x03, + SFP_ENCODING_8472_MANCHESTER = 0x04, + SFP_ENCODING_8472_SONET = 0x05, + SFP_ENCODING_8472_64B66B = 0x06, + SFP_ENCODING_256B257B = 0x07, + SFP_ENCODING_PAM4 = 0x08, + SFP_OPTIONS_HIGH_POWER_LEVEL = BIT(13), + SFP_OPTIONS_PAGING_A2 = BIT(12), + SFP_OPTIONS_RETIMER = BIT(11), + SFP_OPTIONS_COOLED_XCVR = BIT(10), + SFP_OPTIONS_POWER_DECL = BIT(9), + SFP_OPTIONS_RX_LINEAR_OUT = BIT(8), + SFP_OPTIONS_RX_DECISION_THRESH = BIT(7), + SFP_OPTIONS_TUNABLE_TX = BIT(6), + SFP_OPTIONS_RATE_SELECT = BIT(5), + SFP_OPTIONS_TX_DISABLE = BIT(4), + SFP_OPTIONS_TX_FAULT = BIT(3), + SFP_OPTIONS_LOS_INVERTED = BIT(2), + SFP_OPTIONS_LOS_NORMAL = BIT(1), + SFP_DIAGMON_DDM = BIT(6), + SFP_DIAGMON_INT_CAL = BIT(5), + SFP_DIAGMON_EXT_CAL = BIT(4), + SFP_DIAGMON_RXPWR_AVG = BIT(3), + SFP_DIAGMON_ADDRMODE = BIT(2), + SFP_ENHOPTS_ALARMWARN = BIT(7), + SFP_ENHOPTS_SOFT_TX_DISABLE = BIT(6), + SFP_ENHOPTS_SOFT_TX_FAULT = BIT(5), + SFP_ENHOPTS_SOFT_RX_LOS = BIT(4), + SFP_ENHOPTS_SOFT_RATE_SELECT = BIT(3), + SFP_ENHOPTS_APP_SELECT_SFF8079 = BIT(2), + SFP_ENHOPTS_SOFT_RATE_SFF8431 = BIT(1), + SFP_SFF8472_COMPLIANCE_NONE = 0x00, + SFP_SFF8472_COMPLIANCE_REV9_3 = 0x01, + SFP_SFF8472_COMPLIANCE_REV9_5 = 0x02, + SFP_SFF8472_COMPLIANCE_REV10_2 = 0x03, + SFP_SFF8472_COMPLIANCE_REV10_4 = 0x04, + SFP_SFF8472_COMPLIANCE_REV11_0 = 0x05, + SFP_SFF8472_COMPLIANCE_REV11_3 = 0x06, + SFP_SFF8472_COMPLIANCE_REV11_4 = 0x07, + SFP_SFF8472_COMPLIANCE_REV12_0 = 0x08, +}; + +/* SFP Diagnostics */ +enum { + /* Alarm and warnings stored MSB at lower address then LSB */ + SFP_TEMP_HIGH_ALARM = 0x00, + SFP_TEMP_LOW_ALARM = 0x02, + SFP_TEMP_HIGH_WARN = 0x04, + SFP_TEMP_LOW_WARN = 0x06, + SFP_VOLT_HIGH_ALARM = 0x08, + SFP_VOLT_LOW_ALARM = 0x0a, + SFP_VOLT_HIGH_WARN = 0x0c, + SFP_VOLT_LOW_WARN = 0x0e, + SFP_BIAS_HIGH_ALARM = 0x10, + SFP_BIAS_LOW_ALARM = 0x12, + SFP_BIAS_HIGH_WARN = 0x14, + SFP_BIAS_LOW_WARN = 0x16, + SFP_TXPWR_HIGH_ALARM = 0x18, + SFP_TXPWR_LOW_ALARM = 0x1a, + SFP_TXPWR_HIGH_WARN = 0x1c, + SFP_TXPWR_LOW_WARN = 0x1e, + SFP_RXPWR_HIGH_ALARM = 0x20, + SFP_RXPWR_LOW_ALARM = 0x22, + SFP_RXPWR_HIGH_WARN = 0x24, + SFP_RXPWR_LOW_WARN = 0x26, + SFP_LASER_TEMP_HIGH_ALARM = 0x28, + SFP_LASER_TEMP_LOW_ALARM = 0x2a, + SFP_LASER_TEMP_HIGH_WARN = 0x2c, + SFP_LASER_TEMP_LOW_WARN = 0x2e, + SFP_TEC_CUR_HIGH_ALARM = 0x30, + SFP_TEC_CUR_LOW_ALARM = 0x32, + SFP_TEC_CUR_HIGH_WARN = 0x34, + SFP_TEC_CUR_LOW_WARN = 0x36, + SFP_CAL_RXPWR4 = 0x38, + SFP_CAL_RXPWR3 = 0x3c, + SFP_CAL_RXPWR2 = 0x40, + SFP_CAL_RXPWR1 = 0x44, + SFP_CAL_RXPWR0 = 0x48, + SFP_CAL_TXI_SLOPE = 0x4c, + SFP_CAL_TXI_OFFSET = 0x4e, + SFP_CAL_TXPWR_SLOPE = 0x50, + SFP_CAL_TXPWR_OFFSET = 0x52, + SFP_CAL_T_SLOPE = 0x54, + SFP_CAL_T_OFFSET = 0x56, + SFP_CAL_V_SLOPE = 0x58, + SFP_CAL_V_OFFSET = 0x5a, + SFP_CHKSUM = 0x5f, + + SFP_TEMP = 0x60, + SFP_VCC = 0x62, + SFP_TX_BIAS = 0x64, + SFP_TX_POWER = 0x66, + SFP_RX_POWER = 0x68, + SFP_LASER_TEMP = 0x6a, + SFP_TEC_CUR = 0x6c, + + SFP_STATUS = 0x6e, + SFP_ALARM0 = 0x70, + SFP_ALARM0_TEMP_HIGH = BIT(7), + SFP_ALARM0_TEMP_LOW = BIT(6), + SFP_ALARM0_VCC_HIGH = BIT(5), + SFP_ALARM0_VCC_LOW = BIT(4), + SFP_ALARM0_TX_BIAS_HIGH = BIT(3), + SFP_ALARM0_TX_BIAS_LOW = BIT(2), + SFP_ALARM0_TXPWR_HIGH = BIT(1), + SFP_ALARM0_TXPWR_LOW = BIT(0), + + SFP_ALARM1 = 0x71, + SFP_ALARM1_RXPWR_HIGH = BIT(7), + SFP_ALARM1_RXPWR_LOW = BIT(6), + + SFP_WARN0 = 0x74, + SFP_WARN0_TEMP_HIGH = BIT(7), + SFP_WARN0_TEMP_LOW = BIT(6), + SFP_WARN0_VCC_HIGH = BIT(5), + SFP_WARN0_VCC_LOW = BIT(4), + SFP_WARN0_TX_BIAS_HIGH = BIT(3), + SFP_WARN0_TX_BIAS_LOW = BIT(2), + SFP_WARN0_TXPWR_HIGH = BIT(1), + SFP_WARN0_TXPWR_LOW = BIT(0), + + SFP_WARN1 = 0x75, + SFP_WARN1_RXPWR_HIGH = BIT(7), + SFP_WARN1_RXPWR_LOW = BIT(6), + + SFP_EXT_STATUS = 0x76, + SFP_VSL = 0x78, + SFP_PAGE = 0x7f, +}; + +struct fwnode_handle; +struct ethtool_eeprom; +struct ethtool_modinfo; +struct net_device; +struct sfp_bus; + +/** + * struct sfp_upstream_ops - upstream operations structure + * @module_insert: called after a module has been detected to determine + * whether the module is supported for the upstream device. + * @module_remove: called after the module has been removed. + * @link_down: called when the link is non-operational for whatever + * reason. + * @link_up: called when the link is operational. + * @connect_phy: called when an I2C accessible PHY has been detected + * on the module. + * @disconnect_phy: called when a module with an I2C accessible PHY has + * been removed. + */ +struct sfp_upstream_ops { + int (*module_insert)(void *priv, const struct sfp_eeprom_id *id); + void (*module_remove)(void *priv); + void (*link_down)(void *priv); + void (*link_up)(void *priv); + int (*connect_phy)(void *priv, struct phy_device *); + void (*disconnect_phy)(void *priv); +}; + +#if IS_ENABLED(CONFIG_SFP) +int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id, + unsigned long *support); +void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, + unsigned long *support); +phy_interface_t sfp_select_interface(struct sfp_bus *bus, + const struct sfp_eeprom_id *id, + unsigned long *link_modes); + +int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo); +int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee, + u8 *data); +void sfp_upstream_start(struct sfp_bus *bus); +void sfp_upstream_stop(struct sfp_bus *bus); +struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode, + struct net_device *ndev, void *upstream, + const struct sfp_upstream_ops *ops); +void sfp_unregister_upstream(struct sfp_bus *bus); +#else +static inline int sfp_parse_port(struct sfp_bus *bus, + const struct sfp_eeprom_id *id, + unsigned long *support) +{ + return PORT_OTHER; +} + +static inline void sfp_parse_support(struct sfp_bus *bus, + const struct sfp_eeprom_id *id, + unsigned long *support) +{ +} + +static inline phy_interface_t sfp_select_interface(struct sfp_bus *bus, + const struct sfp_eeprom_id *id, + unsigned long *link_modes) +{ + return PHY_INTERFACE_MODE_NA; +} + +static inline int sfp_get_module_info(struct sfp_bus *bus, + struct ethtool_modinfo *modinfo) +{ + return -EOPNOTSUPP; +} + +static inline int sfp_get_module_eeprom(struct sfp_bus *bus, + struct ethtool_eeprom *ee, u8 *data) +{ + return -EOPNOTSUPP; +} + +static inline void sfp_upstream_start(struct sfp_bus *bus) +{ +} + +static inline void sfp_upstream_stop(struct sfp_bus *bus) +{ +} + +static inline struct sfp_bus *sfp_register_upstream( + struct fwnode_handle *fwnode, + struct net_device *ndev, void *upstream, + const struct sfp_upstream_ops *ops) +{ + return (struct sfp_bus *)-1; +} + +static inline void sfp_unregister_upstream(struct sfp_bus *bus) +{ +} +#endif + +#endif diff --git a/include/linux/sh_clk.h b/include/linux/sh_clk.h new file mode 100644 index 000000000..7bed5be88 --- /dev/null +++ b/include/linux/sh_clk.h @@ -0,0 +1,213 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __SH_CLOCK_H +#define __SH_CLOCK_H + +#include +#include +#include +#include +#include +#include +#include + +struct clk; + +struct clk_mapping { + phys_addr_t phys; + void __iomem *base; + unsigned long len; + struct kref ref; +}; + +struct sh_clk_ops { +#ifdef CONFIG_SH_CLK_CPG_LEGACY + void (*init)(struct clk *clk); +#endif + int (*enable)(struct clk *clk); + void (*disable)(struct clk *clk); + unsigned long (*recalc)(struct clk *clk); + int (*set_rate)(struct clk *clk, unsigned long rate); + int (*set_parent)(struct clk *clk, struct clk *parent); + long (*round_rate)(struct clk *clk, unsigned long rate); +}; + +#define SH_CLK_DIV_MSK(div) ((1 << (div)) - 1) +#define SH_CLK_DIV4_MSK SH_CLK_DIV_MSK(4) +#define SH_CLK_DIV6_MSK SH_CLK_DIV_MSK(6) + +struct clk { + struct list_head node; + struct clk *parent; + struct clk **parent_table; /* list of parents to */ + unsigned short parent_num; /* choose between */ + unsigned char src_shift; /* source clock field in the */ + unsigned char src_width; /* configuration register */ + struct sh_clk_ops *ops; + + struct list_head children; + struct list_head sibling; /* node for children */ + + int usecount; + + unsigned long rate; + unsigned long flags; + + void __iomem *enable_reg; + void __iomem *status_reg; + unsigned int enable_bit; + void __iomem *mapped_reg; + + unsigned int div_mask; + unsigned long arch_flags; + void *priv; + struct clk_mapping *mapping; + struct cpufreq_frequency_table *freq_table; + unsigned int nr_freqs; +}; + +#define CLK_ENABLE_ON_INIT BIT(0) + +#define CLK_ENABLE_REG_32BIT BIT(1) /* default access size */ +#define CLK_ENABLE_REG_16BIT BIT(2) +#define CLK_ENABLE_REG_8BIT BIT(3) + +#define CLK_MASK_DIV_ON_DISABLE BIT(4) + +#define CLK_ENABLE_REG_MASK (CLK_ENABLE_REG_32BIT | \ + CLK_ENABLE_REG_16BIT | \ + CLK_ENABLE_REG_8BIT) + +/* drivers/sh/clk.c */ +unsigned long followparent_recalc(struct clk *); +void recalculate_root_clocks(void); +void propagate_rate(struct clk *); +int clk_reparent(struct clk *child, struct clk *parent); +int clk_register(struct clk *); +void clk_unregister(struct clk *); +void clk_enable_init_clocks(void); + +struct clk_div_mult_table { + unsigned int *divisors; + unsigned int nr_divisors; + unsigned int *multipliers; + unsigned int nr_multipliers; +}; + +struct cpufreq_frequency_table; +void clk_rate_table_build(struct clk *clk, + struct cpufreq_frequency_table *freq_table, + int nr_freqs, + struct clk_div_mult_table *src_table, + unsigned long *bitmap); + +long clk_rate_table_round(struct clk *clk, + struct cpufreq_frequency_table *freq_table, + unsigned long rate); + +int clk_rate_table_find(struct clk *clk, + struct cpufreq_frequency_table *freq_table, + unsigned long rate); + +long clk_rate_div_range_round(struct clk *clk, unsigned int div_min, + unsigned int div_max, unsigned long rate); + +long clk_rate_mult_range_round(struct clk *clk, unsigned int mult_min, + unsigned int mult_max, unsigned long rate); + +#define SH_CLK_MSTP(_parent, _enable_reg, _enable_bit, _status_reg, _flags) \ +{ \ + .parent = _parent, \ + .enable_reg = (void __iomem *)_enable_reg, \ + .enable_bit = _enable_bit, \ + .status_reg = _status_reg, \ + .flags = _flags, \ +} + +#define SH_CLK_MSTP32(_p, _r, _b, _f) \ + SH_CLK_MSTP(_p, _r, _b, 0, _f | CLK_ENABLE_REG_32BIT) + +#define SH_CLK_MSTP32_STS(_p, _r, _b, _s, _f) \ + SH_CLK_MSTP(_p, _r, _b, _s, _f | CLK_ENABLE_REG_32BIT) + +#define SH_CLK_MSTP16(_p, _r, _b, _f) \ + SH_CLK_MSTP(_p, _r, _b, 0, _f | CLK_ENABLE_REG_16BIT) + +#define SH_CLK_MSTP8(_p, _r, _b, _f) \ + SH_CLK_MSTP(_p, _r, _b, 0, _f | CLK_ENABLE_REG_8BIT) + +int sh_clk_mstp_register(struct clk *clks, int nr); + +/* + * MSTP registration never really cared about access size, despite the + * original enable/disable pairs assuming a 32-bit access. Clocks are + * responsible for defining their access sizes either directly or via the + * clock definition wrappers. + */ +static inline int __deprecated sh_clk_mstp32_register(struct clk *clks, int nr) +{ + return sh_clk_mstp_register(clks, nr); +} + +#define SH_CLK_DIV4(_parent, _reg, _shift, _div_bitmap, _flags) \ +{ \ + .parent = _parent, \ + .enable_reg = (void __iomem *)_reg, \ + .enable_bit = _shift, \ + .arch_flags = _div_bitmap, \ + .div_mask = SH_CLK_DIV4_MSK, \ + .flags = _flags, \ +} + +struct clk_div_table { + struct clk_div_mult_table *div_mult_table; + void (*kick)(struct clk *clk); +}; + +#define clk_div4_table clk_div_table + +int sh_clk_div4_register(struct clk *clks, int nr, + struct clk_div4_table *table); +int sh_clk_div4_enable_register(struct clk *clks, int nr, + struct clk_div4_table *table); +int sh_clk_div4_reparent_register(struct clk *clks, int nr, + struct clk_div4_table *table); + +#define SH_CLK_DIV6_EXT(_reg, _flags, _parents, \ + _num_parents, _src_shift, _src_width) \ +{ \ + .enable_reg = (void __iomem *)_reg, \ + .enable_bit = 0, /* unused */ \ + .flags = _flags | CLK_MASK_DIV_ON_DISABLE, \ + .div_mask = SH_CLK_DIV6_MSK, \ + .parent_table = _parents, \ + .parent_num = _num_parents, \ + .src_shift = _src_shift, \ + .src_width = _src_width, \ +} + +#define SH_CLK_DIV6(_parent, _reg, _flags) \ +{ \ + .parent = _parent, \ + .enable_reg = (void __iomem *)_reg, \ + .enable_bit = 0, /* unused */ \ + .div_mask = SH_CLK_DIV6_MSK, \ + .flags = _flags | CLK_MASK_DIV_ON_DISABLE, \ +} + +int sh_clk_div6_register(struct clk *clks, int nr); +int sh_clk_div6_reparent_register(struct clk *clks, int nr); + +#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk } +#define CLKDEV_DEV_ID(_id, _clk) { .dev_id = _id, .clk = _clk } +#define CLKDEV_ICK_ID(_cid, _did, _clk) { .con_id = _cid, .dev_id = _did, .clk = _clk } + +/* .enable_reg will be updated to .mapping on sh_clk_fsidiv_register() */ +#define SH_CLK_FSIDIV(_reg, _parent) \ +{ \ + .enable_reg = (void __iomem *)_reg, \ + .parent = _parent, \ +} + +int sh_clk_fsidiv_register(struct clk *clks, int nr); + +#endif /* __SH_CLOCK_H */ diff --git a/include/linux/sh_dma.h b/include/linux/sh_dma.h new file mode 100644 index 000000000..56b97eed2 --- /dev/null +++ b/include/linux/sh_dma.h @@ -0,0 +1,115 @@ +/* + * Header for the new SH dmaengine driver + * + * Copyright (C) 2010 Guennadi Liakhovetski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef SH_DMA_H +#define SH_DMA_H + +#include +#include +#include +#include + +struct device; + +/* Used by slave DMA clients to request DMA to/from a specific peripheral */ +struct sh_dmae_slave { + struct shdma_slave shdma_slave; /* Set by the platform */ +}; + +/* + * Supplied by platforms to specify, how a DMA channel has to be configured for + * a certain peripheral + */ +struct sh_dmae_slave_config { + int slave_id; + dma_addr_t addr; + u32 chcr; + char mid_rid; +}; + +/** + * struct sh_dmae_channel - DMAC channel platform data + * @offset: register offset within the main IOMEM resource + * @dmars: channel DMARS register offset + * @chclr_offset: channel CHCLR register offset + * @dmars_bit: channel DMARS field offset within the register + * @chclr_bit: bit position, to be set to reset the channel + */ +struct sh_dmae_channel { + unsigned int offset; + unsigned int dmars; + unsigned int chclr_offset; + unsigned char dmars_bit; + unsigned char chclr_bit; +}; + +/** + * struct sh_dmae_pdata - DMAC platform data + * @slave: array of slaves + * @slave_num: number of slaves in the above array + * @channel: array of DMA channels + * @channel_num: number of channels in the above array + * @ts_low_shift: shift of the low part of the TS field + * @ts_low_mask: low TS field mask + * @ts_high_shift: additional shift of the high part of the TS field + * @ts_high_mask: high TS field mask + * @ts_shift: array of Transfer Size shifts, indexed by TS value + * @ts_shift_num: number of shifts in the above array + * @dmaor_init: DMAOR initialisation value + * @chcr_offset: CHCR address offset + * @chcr_ie_bit: CHCR Interrupt Enable bit + * @dmaor_is_32bit: DMAOR is a 32-bit register + * @needs_tend_set: the TEND register has to be set + * @no_dmars: DMAC has no DMARS registers + * @chclr_present: DMAC has one or several CHCLR registers + * @chclr_bitwise: channel CHCLR registers are bitwise + * @slave_only: DMAC cannot be used for MEMCPY + */ +struct sh_dmae_pdata { + const struct sh_dmae_slave_config *slave; + int slave_num; + const struct sh_dmae_channel *channel; + int channel_num; + unsigned int ts_low_shift; + unsigned int ts_low_mask; + unsigned int ts_high_shift; + unsigned int ts_high_mask; + const unsigned int *ts_shift; + int ts_shift_num; + u16 dmaor_init; + unsigned int chcr_offset; + u32 chcr_ie_bit; + + unsigned int dmaor_is_32bit:1; + unsigned int needs_tend_set:1; + unsigned int no_dmars:1; + unsigned int chclr_present:1; + unsigned int chclr_bitwise:1; + unsigned int slave_only:1; +}; + +/* DMAOR definitions */ +#define DMAOR_AE 0x00000004 /* Address Error Flag */ +#define DMAOR_NMIF 0x00000002 +#define DMAOR_DME 0x00000001 /* DMA Master Enable */ + +/* Definitions for the SuperH DMAC */ +#define DM_INC 0x00004000 /* Destination addresses are incremented */ +#define DM_DEC 0x00008000 /* Destination addresses are decremented */ +#define DM_FIX 0x0000c000 /* Destination address is fixed */ +#define SM_INC 0x00001000 /* Source addresses are incremented */ +#define SM_DEC 0x00002000 /* Source addresses are decremented */ +#define SM_FIX 0x00003000 /* Source address is fixed */ +#define RS_AUTO 0x00000400 /* Auto Request */ +#define RS_ERS 0x00000800 /* DMA extended resource selector */ +#define CHCR_DE 0x00000001 /* DMA Enable */ +#define CHCR_TE 0x00000002 /* Transfer End Flag */ +#define CHCR_IE 0x00000004 /* Interrupt Enable */ + +#endif diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h new file mode 100644 index 000000000..6dfda97a6 --- /dev/null +++ b/include/linux/sh_eth.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_SH_ETH_H__ +#define __ASM_SH_ETH_H__ + +#include +#include + +struct sh_eth_plat_data { + int phy; + int phy_irq; + phy_interface_t phy_interface; + void (*set_mdio_gate)(void *addr); + + unsigned char mac_addr[ETH_ALEN]; + unsigned no_ether_link:1; + unsigned ether_link_active_low:1; +}; + +#endif diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h new file mode 100644 index 000000000..c255273b0 --- /dev/null +++ b/include/linux/sh_intc.h @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __SH_INTC_H +#define __SH_INTC_H + +#include + +#ifdef CONFIG_SUPERH +#define INTC_NR_IRQS 512 +#else +#define INTC_NR_IRQS 1024 +#endif + +/* + * Convert back and forth between INTEVT and IRQ values. + */ +#ifdef CONFIG_CPU_HAS_INTEVT +#define evt2irq(evt) (((evt) >> 5) - 16) +#define irq2evt(irq) (((irq) + 16) << 5) +#else +#define evt2irq(evt) (evt) +#define irq2evt(irq) (irq) +#endif + +typedef unsigned char intc_enum; + +struct intc_vect { + intc_enum enum_id; + unsigned short vect; +}; + +#define INTC_VECT(enum_id, vect) { enum_id, vect } +#define INTC_IRQ(enum_id, irq) INTC_VECT(enum_id, irq2evt(irq)) + +struct intc_group { + intc_enum enum_id; + intc_enum enum_ids[32]; +}; + +#define INTC_GROUP(enum_id, ids...) { enum_id, { ids } } + +struct intc_subgroup { + unsigned long reg, reg_width; + intc_enum parent_id; + intc_enum enum_ids[32]; +}; + +struct intc_mask_reg { + unsigned long set_reg, clr_reg, reg_width; + intc_enum enum_ids[32]; +#ifdef CONFIG_INTC_BALANCING + unsigned long dist_reg; +#endif +#ifdef CONFIG_SMP + unsigned long smp; +#endif +}; + +struct intc_prio_reg { + unsigned long set_reg, clr_reg, reg_width, field_width; + intc_enum enum_ids[16]; +#ifdef CONFIG_SMP + unsigned long smp; +#endif +}; + +struct intc_sense_reg { + unsigned long reg, reg_width, field_width; + intc_enum enum_ids[16]; +}; + +#ifdef CONFIG_INTC_BALANCING +#define INTC_SMP_BALANCING(reg) .dist_reg = (reg) +#else +#define INTC_SMP_BALANCING(reg) +#endif + +#ifdef CONFIG_SMP +#define INTC_SMP(stride, nr) .smp = (stride) | ((nr) << 8) +#else +#define INTC_SMP(stride, nr) +#endif + +struct intc_hw_desc { + struct intc_vect *vectors; + unsigned int nr_vectors; + struct intc_group *groups; + unsigned int nr_groups; + struct intc_mask_reg *mask_regs; + unsigned int nr_mask_regs; + struct intc_prio_reg *prio_regs; + unsigned int nr_prio_regs; + struct intc_sense_reg *sense_regs; + unsigned int nr_sense_regs; + struct intc_mask_reg *ack_regs; + unsigned int nr_ack_regs; + struct intc_subgroup *subgroups; + unsigned int nr_subgroups; +}; + +#define _INTC_ARRAY(a) a, __same_type(a, NULL) ? 0 : sizeof(a)/sizeof(*a) + +#define INTC_HW_DESC(vectors, groups, mask_regs, \ + prio_regs, sense_regs, ack_regs) \ +{ \ + _INTC_ARRAY(vectors), _INTC_ARRAY(groups), \ + _INTC_ARRAY(mask_regs), _INTC_ARRAY(prio_regs), \ + _INTC_ARRAY(sense_regs), _INTC_ARRAY(ack_regs), \ +} + +struct intc_desc { + char *name; + struct resource *resource; + unsigned int num_resources; + intc_enum force_enable; + intc_enum force_disable; + bool skip_syscore_suspend; + struct intc_hw_desc hw; +}; + +#define DECLARE_INTC_DESC(symbol, chipname, vectors, groups, \ + mask_regs, prio_regs, sense_regs) \ +struct intc_desc symbol __initdata = { \ + .name = chipname, \ + .hw = INTC_HW_DESC(vectors, groups, mask_regs, \ + prio_regs, sense_regs, NULL), \ +} + +#define DECLARE_INTC_DESC_ACK(symbol, chipname, vectors, groups, \ + mask_regs, prio_regs, sense_regs, ack_regs) \ +struct intc_desc symbol __initdata = { \ + .name = chipname, \ + .hw = INTC_HW_DESC(vectors, groups, mask_regs, \ + prio_regs, sense_regs, ack_regs), \ +} + +int register_intc_controller(struct intc_desc *desc); +int intc_set_priority(unsigned int irq, unsigned int prio); +int intc_irq_lookup(const char *chipname, intc_enum enum_id); +void intc_finalize(void); + +#ifdef CONFIG_INTC_USERIMASK +int register_intc_userimask(unsigned long addr); +#else +static inline int register_intc_userimask(unsigned long addr) +{ + return 0; +} +#endif + +#endif /* __SH_INTC_H */ diff --git a/include/linux/sh_timer.h b/include/linux/sh_timer.h new file mode 100644 index 000000000..74fd5140b --- /dev/null +++ b/include/linux/sh_timer.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __SH_TIMER_H__ +#define __SH_TIMER_H__ + +struct sh_timer_config { + unsigned int channels_mask; +}; + +#endif /* __SH_TIMER_H__ */ diff --git a/include/linux/sha256.h b/include/linux/sha256.h new file mode 100644 index 000000000..244fe01a6 --- /dev/null +++ b/include/linux/sha256.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2014 Red Hat Inc. + * + * Author: Vivek Goyal + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#ifndef SHA256_H +#define SHA256_H + +#include +#include + +/* + * Stand-alone implementation of the SHA256 algorithm. It is designed to + * have as little dependencies as possible so it can be used in the + * kexec_file purgatory. In other cases you should use the implementation in + * crypto/. + * + * For details see lib/sha256.c + */ + +extern int sha256_init(struct sha256_state *sctx); +extern int sha256_update(struct sha256_state *sctx, const u8 *input, + unsigned int length); +extern int sha256_final(struct sha256_state *sctx, u8 *hash); + +#endif /* SHA256_H */ diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h new file mode 100644 index 000000000..d927647e6 --- /dev/null +++ b/include/linux/shdma-base.h @@ -0,0 +1,137 @@ +/* + * Dmaengine driver base library for DMA controllers, found on SH-based SoCs + * + * extracted from shdma.c and headers + * + * Copyright (C) 2011-2012 Guennadi Liakhovetski + * Copyright (C) 2009 Nobuhiro Iwamatsu + * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. + * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. + * + * This is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + */ + +#ifndef SHDMA_BASE_H +#define SHDMA_BASE_H + +#include +#include +#include +#include + +/** + * shdma_pm_state - DMA channel PM state + * SHDMA_PM_ESTABLISHED: either idle or during data transfer + * SHDMA_PM_BUSY: during the transfer preparation, when we have to + * drop the lock temporarily + * SHDMA_PM_PENDING: transfers pending + */ +enum shdma_pm_state { + SHDMA_PM_ESTABLISHED, + SHDMA_PM_BUSY, + SHDMA_PM_PENDING, +}; + +struct device; + +/* + * Drivers, using this library are expected to embed struct shdma_dev, + * struct shdma_chan, struct shdma_desc, and struct shdma_slave + * in their respective device, channel, descriptor and slave objects. + */ + +struct shdma_slave { + int slave_id; +}; + +struct shdma_desc { + struct list_head node; + struct dma_async_tx_descriptor async_tx; + enum dma_transfer_direction direction; + size_t partial; + dma_cookie_t cookie; + int chunks; + int mark; + bool cyclic; /* used as cyclic transfer */ +}; + +struct shdma_chan { + spinlock_t chan_lock; /* Channel operation lock */ + struct list_head ld_queue; /* Link descriptors queue */ + struct list_head ld_free; /* Free link descriptors */ + struct dma_chan dma_chan; /* DMA channel */ + struct device *dev; /* Channel device */ + void *desc; /* buffer for descriptor array */ + int desc_num; /* desc count */ + size_t max_xfer_len; /* max transfer length */ + int id; /* Raw id of this channel */ + int irq; /* Channel IRQ */ + int slave_id; /* Client ID for slave DMA */ + int real_slave_id; /* argument passed to filter function */ + int hw_req; /* DMA request line for slave DMA - same + * as MID/RID, used with DT */ + enum shdma_pm_state pm_state; +}; + +/** + * struct shdma_ops - simple DMA driver operations + * desc_completed: return true, if this is the descriptor, that just has + * completed (atomic) + * halt_channel: stop DMA channel operation (atomic) + * channel_busy: return true, if the channel is busy (atomic) + * slave_addr: return slave DMA address + * desc_setup: set up the hardware specific descriptor portion (atomic) + * set_slave: bind channel to a slave + * setup_xfer: configure channel hardware for operation (atomic) + * start_xfer: start the DMA transfer (atomic) + * embedded_desc: return Nth struct shdma_desc pointer from the + * descriptor array + * chan_irq: process channel IRQ, return true if a transfer has + * completed (atomic) + */ +struct shdma_ops { + bool (*desc_completed)(struct shdma_chan *, struct shdma_desc *); + void (*halt_channel)(struct shdma_chan *); + bool (*channel_busy)(struct shdma_chan *); + dma_addr_t (*slave_addr)(struct shdma_chan *); + int (*desc_setup)(struct shdma_chan *, struct shdma_desc *, + dma_addr_t, dma_addr_t, size_t *); + int (*set_slave)(struct shdma_chan *, int, dma_addr_t, bool); + void (*setup_xfer)(struct shdma_chan *, int); + void (*start_xfer)(struct shdma_chan *, struct shdma_desc *); + struct shdma_desc *(*embedded_desc)(void *, int); + bool (*chan_irq)(struct shdma_chan *, int); + size_t (*get_partial)(struct shdma_chan *, struct shdma_desc *); +}; + +struct shdma_dev { + struct dma_device dma_dev; + struct shdma_chan **schan; + const struct shdma_ops *ops; + size_t desc_size; +}; + +#define shdma_for_each_chan(c, d, i) for (i = 0, c = (d)->schan[0]; \ + i < (d)->dma_dev.chancnt; c = (d)->schan[++i]) + +int shdma_request_irq(struct shdma_chan *, int, + unsigned long, const char *); +bool shdma_reset(struct shdma_dev *sdev); +void shdma_chan_probe(struct shdma_dev *sdev, + struct shdma_chan *schan, int id); +void shdma_chan_remove(struct shdma_chan *schan); +int shdma_init(struct device *dev, struct shdma_dev *sdev, + int chan_num); +void shdma_cleanup(struct shdma_dev *sdev); +#if IS_ENABLED(CONFIG_SH_DMAE_BASE) +bool shdma_chan_filter(struct dma_chan *chan, void *arg); +#else +static inline bool shdma_chan_filter(struct dma_chan *chan, void *arg) +{ + return false; +} +#endif + +#endif diff --git a/include/linux/shm.h b/include/linux/shm.h new file mode 100644 index 000000000..d8e69aed3 --- /dev/null +++ b/include/linux/shm.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SHM_H_ +#define _LINUX_SHM_H_ + +#include +#include +#include +#include + +struct file; + +#ifdef CONFIG_SYSVIPC +struct sysv_shm { + struct list_head shm_clist; +}; + +long do_shmat(int shmid, char __user *shmaddr, int shmflg, unsigned long *addr, + unsigned long shmlba); +bool is_file_shm_hugepages(struct file *file); +void exit_shm(struct task_struct *task); +#define shm_init_task(task) INIT_LIST_HEAD(&(task)->sysvshm.shm_clist) +#else +struct sysv_shm { + /* empty */ +}; + +static inline long do_shmat(int shmid, char __user *shmaddr, + int shmflg, unsigned long *addr, + unsigned long shmlba) +{ + return -ENOSYS; +} +static inline bool is_file_shm_hugepages(struct file *file) +{ + return false; +} +static inline void exit_shm(struct task_struct *task) +{ +} +static inline void shm_init_task(struct task_struct *task) +{ +} +#endif + +#endif /* _LINUX_SHM_H_ */ diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h new file mode 100644 index 000000000..f155dc607 --- /dev/null +++ b/include/linux/shmem_fs.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __SHMEM_FS_H +#define __SHMEM_FS_H + +#include +#include +#include +#include +#include +#include + +/* inode in-kernel data */ + +struct shmem_inode_info { + spinlock_t lock; + unsigned int seals; /* shmem seals */ + unsigned long flags; + unsigned long alloced; /* data pages alloced to file */ + unsigned long swapped; /* subtotal assigned to swap */ + struct list_head shrinklist; /* shrinkable hpage inodes */ + struct list_head swaplist; /* chain of maybes on swap */ + struct shared_policy policy; /* NUMA memory alloc policy */ + struct simple_xattrs xattrs; /* list of xattrs */ + struct inode vfs_inode; +}; + +struct shmem_sb_info { + unsigned long max_blocks; /* How many blocks are allowed */ + struct percpu_counter used_blocks; /* How many are allocated */ + unsigned long max_inodes; /* How many inodes are allowed */ + unsigned long free_inodes; /* How many are left for allocation */ + spinlock_t stat_lock; /* Serialize shmem_sb_info changes */ + umode_t mode; /* Mount mode for root directory */ + unsigned char huge; /* Whether to try for hugepages */ + kuid_t uid; /* Mount uid for root directory */ + kgid_t gid; /* Mount gid for root directory */ + struct mempolicy *mpol; /* default memory policy for mappings */ + spinlock_t shrinklist_lock; /* Protects shrinklist */ + struct list_head shrinklist; /* List of shinkable inodes */ + unsigned long shrinklist_len; /* Length of shrinklist */ +}; + +static inline struct shmem_inode_info *SHMEM_I(struct inode *inode) +{ + return container_of(inode, struct shmem_inode_info, vfs_inode); +} + +/* + * Functions in mm/shmem.c called directly from elsewhere: + */ +extern int shmem_init(void); +extern int shmem_fill_super(struct super_block *sb, void *data, int silent); +extern struct file *shmem_file_setup(const char *name, + loff_t size, unsigned long flags); +extern struct file *shmem_kernel_file_setup(const char *name, loff_t size, + unsigned long flags); +extern struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, + const char *name, loff_t size, unsigned long flags); +extern int shmem_zero_setup(struct vm_area_struct *); +extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags); +extern int shmem_lock(struct file *file, int lock, struct user_struct *user); +#ifdef CONFIG_SHMEM +extern bool shmem_mapping(struct address_space *mapping); +#else +static inline bool shmem_mapping(struct address_space *mapping) +{ + return false; +} +#endif /* CONFIG_SHMEM */ +extern void shmem_unlock_mapping(struct address_space *mapping); +extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, + pgoff_t index, gfp_t gfp_mask); +extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); +extern int shmem_unuse(swp_entry_t entry, struct page *page); + +extern unsigned long shmem_swap_usage(struct vm_area_struct *vma); +extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, + pgoff_t start, pgoff_t end); + +/* Flag allocation requirements to shmem_getpage */ +enum sgp_type { + SGP_READ, /* don't exceed i_size, don't allocate page */ + SGP_CACHE, /* don't exceed i_size, may allocate page */ + SGP_NOHUGE, /* like SGP_CACHE, but no huge pages */ + SGP_HUGE, /* like SGP_CACHE, huge pages preferred */ + SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */ + SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */ +}; + +extern int shmem_getpage(struct inode *inode, pgoff_t index, + struct page **pagep, enum sgp_type sgp); + +static inline struct page *shmem_read_mapping_page( + struct address_space *mapping, pgoff_t index) +{ + return shmem_read_mapping_page_gfp(mapping, index, + mapping_gfp_mask(mapping)); +} + +static inline bool shmem_file(struct file *file) +{ + if (!IS_ENABLED(CONFIG_SHMEM)) + return false; + if (!file || !file->f_mapping) + return false; + return shmem_mapping(file->f_mapping); +} + +extern bool shmem_charge(struct inode *inode, long pages); +extern void shmem_uncharge(struct inode *inode, long pages); + +#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE +extern bool shmem_huge_enabled(struct vm_area_struct *vma); +#else +static inline bool shmem_huge_enabled(struct vm_area_struct *vma) +{ + return false; +} +#endif + +#ifdef CONFIG_SHMEM +extern int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, + struct vm_area_struct *dst_vma, + unsigned long dst_addr, + unsigned long src_addr, + struct page **pagep); +extern int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm, + pmd_t *dst_pmd, + struct vm_area_struct *dst_vma, + unsigned long dst_addr); +#else +#define shmem_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \ + src_addr, pagep) ({ BUG(); 0; }) +#define shmem_mfill_zeropage_pte(dst_mm, dst_pmd, dst_vma, \ + dst_addr) ({ BUG(); 0; }) +#endif + +#endif diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h new file mode 100644 index 000000000..9443cafd1 --- /dev/null +++ b/include/linux/shrinker.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SHRINKER_H +#define _LINUX_SHRINKER_H + +/* + * This struct is used to pass information from page reclaim to the shrinkers. + * We consolidate the values for easier extention later. + * + * The 'gfpmask' refers to the allocation we are currently trying to + * fulfil. + */ +struct shrink_control { + gfp_t gfp_mask; + + /* current node being shrunk (for NUMA aware shrinkers) */ + int nid; + + /* + * How many objects scan_objects should scan and try to reclaim. + * This is reset before every call, so it is safe for callees + * to modify. + */ + unsigned long nr_to_scan; + + /* + * How many objects did scan_objects process? + * This defaults to nr_to_scan before every call, but the callee + * should track its actual progress. + */ + unsigned long nr_scanned; + + /* current memcg being shrunk (for memcg aware shrinkers) */ + struct mem_cgroup *memcg; +}; + +#define SHRINK_STOP (~0UL) +#define SHRINK_EMPTY (~0UL - 1) +/* + * A callback you can register to apply pressure to ageable caches. + * + * @count_objects should return the number of freeable items in the cache. If + * there are no objects to free, it should return SHRINK_EMPTY, while 0 is + * returned in cases of the number of freeable items cannot be determined + * or shrinker should skip this cache for this time (e.g., their number + * is below shrinkable limit). No deadlock checks should be done during the + * count callback - the shrinker relies on aggregating scan counts that couldn't + * be executed due to potential deadlocks to be run at a later call when the + * deadlock condition is no longer pending. + * + * @scan_objects will only be called if @count_objects returned a non-zero + * value for the number of freeable objects. The callout should scan the cache + * and attempt to free items from the cache. It should then return the number + * of objects freed during the scan, or SHRINK_STOP if progress cannot be made + * due to potential deadlocks. If SHRINK_STOP is returned, then no further + * attempts to call the @scan_objects will be made from the current reclaim + * context. + * + * @flags determine the shrinker abilities, like numa awareness + */ +struct shrinker { + unsigned long (*count_objects)(struct shrinker *, + struct shrink_control *sc); + unsigned long (*scan_objects)(struct shrinker *, + struct shrink_control *sc); + + long batch; /* reclaim batch size, 0 = default */ + int seeks; /* seeks to recreate an obj */ + unsigned flags; + + /* These are for internal use */ + struct list_head list; +#ifdef CONFIG_MEMCG_KMEM + /* ID in shrinker_idr */ + int id; +#endif + /* objs pending delete, per node */ + atomic_long_t *nr_deferred; +}; +#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ + +/* Flags */ +#define SHRINKER_NUMA_AWARE (1 << 0) +#define SHRINKER_MEMCG_AWARE (1 << 1) + +extern int prealloc_shrinker(struct shrinker *shrinker); +extern void register_shrinker_prepared(struct shrinker *shrinker); +extern int register_shrinker(struct shrinker *shrinker); +extern void unregister_shrinker(struct shrinker *shrinker); +extern void free_prealloced_shrinker(struct shrinker *shrinker); +#endif diff --git a/include/linux/signal.h b/include/linux/signal.h new file mode 100644 index 000000000..0be5ce237 --- /dev/null +++ b/include/linux/signal.h @@ -0,0 +1,452 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SIGNAL_H +#define _LINUX_SIGNAL_H + +#include +#include +#include + +struct task_struct; + +/* for sysctl */ +extern int print_fatal_signals; + +static inline void copy_siginfo(struct siginfo *to, const struct siginfo *from) +{ + memcpy(to, from, sizeof(*to)); +} + +static inline void clear_siginfo(struct siginfo *info) +{ + memset(info, 0, sizeof(*info)); +} + +int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from); + +enum siginfo_layout { + SIL_KILL, + SIL_TIMER, + SIL_POLL, + SIL_FAULT, + SIL_FAULT_MCEERR, + SIL_FAULT_BNDERR, + SIL_FAULT_PKUERR, + SIL_CHLD, + SIL_RT, + SIL_SYS, +}; + +enum siginfo_layout siginfo_layout(unsigned sig, int si_code); + +/* + * Define some primitives to manipulate sigset_t. + */ + +#ifndef __HAVE_ARCH_SIG_BITOPS +#include + +/* We don't use for these because there is no need to + be atomic. */ +static inline void sigaddset(sigset_t *set, int _sig) +{ + unsigned long sig = _sig - 1; + if (_NSIG_WORDS == 1) + set->sig[0] |= 1UL << sig; + else + set->sig[sig / _NSIG_BPW] |= 1UL << (sig % _NSIG_BPW); +} + +static inline void sigdelset(sigset_t *set, int _sig) +{ + unsigned long sig = _sig - 1; + if (_NSIG_WORDS == 1) + set->sig[0] &= ~(1UL << sig); + else + set->sig[sig / _NSIG_BPW] &= ~(1UL << (sig % _NSIG_BPW)); +} + +static inline int sigismember(sigset_t *set, int _sig) +{ + unsigned long sig = _sig - 1; + if (_NSIG_WORDS == 1) + return 1 & (set->sig[0] >> sig); + else + return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW)); +} + +#endif /* __HAVE_ARCH_SIG_BITOPS */ + +static inline int sigisemptyset(sigset_t *set) +{ + switch (_NSIG_WORDS) { + case 4: + return (set->sig[3] | set->sig[2] | + set->sig[1] | set->sig[0]) == 0; + case 2: + return (set->sig[1] | set->sig[0]) == 0; + case 1: + return set->sig[0] == 0; + default: + BUILD_BUG(); + return 0; + } +} + +static inline int sigequalsets(const sigset_t *set1, const sigset_t *set2) +{ + switch (_NSIG_WORDS) { + case 4: + return (set1->sig[3] == set2->sig[3]) && + (set1->sig[2] == set2->sig[2]) && + (set1->sig[1] == set2->sig[1]) && + (set1->sig[0] == set2->sig[0]); + case 2: + return (set1->sig[1] == set2->sig[1]) && + (set1->sig[0] == set2->sig[0]); + case 1: + return set1->sig[0] == set2->sig[0]; + } + return 0; +} + +#define sigmask(sig) (1UL << ((sig) - 1)) + +#ifndef __HAVE_ARCH_SIG_SETOPS +#include + +#define _SIG_SET_BINOP(name, op) \ +static inline void name(sigset_t *r, const sigset_t *a, const sigset_t *b) \ +{ \ + unsigned long a0, a1, a2, a3, b0, b1, b2, b3; \ + \ + switch (_NSIG_WORDS) { \ + case 4: \ + a3 = a->sig[3]; a2 = a->sig[2]; \ + b3 = b->sig[3]; b2 = b->sig[2]; \ + r->sig[3] = op(a3, b3); \ + r->sig[2] = op(a2, b2); \ + case 2: \ + a1 = a->sig[1]; b1 = b->sig[1]; \ + r->sig[1] = op(a1, b1); \ + case 1: \ + a0 = a->sig[0]; b0 = b->sig[0]; \ + r->sig[0] = op(a0, b0); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ +} + +#define _sig_or(x,y) ((x) | (y)) +_SIG_SET_BINOP(sigorsets, _sig_or) + +#define _sig_and(x,y) ((x) & (y)) +_SIG_SET_BINOP(sigandsets, _sig_and) + +#define _sig_andn(x,y) ((x) & ~(y)) +_SIG_SET_BINOP(sigandnsets, _sig_andn) + +#undef _SIG_SET_BINOP +#undef _sig_or +#undef _sig_and +#undef _sig_andn + +#define _SIG_SET_OP(name, op) \ +static inline void name(sigset_t *set) \ +{ \ + switch (_NSIG_WORDS) { \ + case 4: set->sig[3] = op(set->sig[3]); \ + set->sig[2] = op(set->sig[2]); \ + case 2: set->sig[1] = op(set->sig[1]); \ + case 1: set->sig[0] = op(set->sig[0]); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ +} + +#define _sig_not(x) (~(x)) +_SIG_SET_OP(signotset, _sig_not) + +#undef _SIG_SET_OP +#undef _sig_not + +static inline void sigemptyset(sigset_t *set) +{ + switch (_NSIG_WORDS) { + default: + memset(set, 0, sizeof(sigset_t)); + break; + case 2: set->sig[1] = 0; + case 1: set->sig[0] = 0; + break; + } +} + +static inline void sigfillset(sigset_t *set) +{ + switch (_NSIG_WORDS) { + default: + memset(set, -1, sizeof(sigset_t)); + break; + case 2: set->sig[1] = -1; + case 1: set->sig[0] = -1; + break; + } +} + +/* Some extensions for manipulating the low 32 signals in particular. */ + +static inline void sigaddsetmask(sigset_t *set, unsigned long mask) +{ + set->sig[0] |= mask; +} + +static inline void sigdelsetmask(sigset_t *set, unsigned long mask) +{ + set->sig[0] &= ~mask; +} + +static inline int sigtestsetmask(sigset_t *set, unsigned long mask) +{ + return (set->sig[0] & mask) != 0; +} + +static inline void siginitset(sigset_t *set, unsigned long mask) +{ + set->sig[0] = mask; + switch (_NSIG_WORDS) { + default: + memset(&set->sig[1], 0, sizeof(long)*(_NSIG_WORDS-1)); + break; + case 2: set->sig[1] = 0; + case 1: ; + } +} + +static inline void siginitsetinv(sigset_t *set, unsigned long mask) +{ + set->sig[0] = ~mask; + switch (_NSIG_WORDS) { + default: + memset(&set->sig[1], -1, sizeof(long)*(_NSIG_WORDS-1)); + break; + case 2: set->sig[1] = -1; + case 1: ; + } +} + +#endif /* __HAVE_ARCH_SIG_SETOPS */ + +static inline void init_sigpending(struct sigpending *sig) +{ + sigemptyset(&sig->signal); + INIT_LIST_HEAD(&sig->list); +} + +extern void flush_sigqueue(struct sigpending *queue); + +/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ +static inline int valid_signal(unsigned long sig) +{ + return sig <= _NSIG ? 1 : 0; +} + +struct timespec; +struct pt_regs; +enum pid_type; + +extern int next_signal(struct sigpending *pending, sigset_t *mask); +extern int do_send_sig_info(int sig, struct siginfo *info, + struct task_struct *p, enum pid_type type); +extern int group_send_sig_info(int sig, struct siginfo *info, + struct task_struct *p, enum pid_type type); +extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *); +extern int sigprocmask(int, sigset_t *, sigset_t *); +extern void set_current_blocked(sigset_t *); +extern void __set_current_blocked(const sigset_t *); +extern int show_unhandled_signals; + +extern bool get_signal(struct ksignal *ksig); +extern void signal_setup_done(int failed, struct ksignal *ksig, int stepping); +extern void exit_signals(struct task_struct *tsk); +extern void kernel_sigaction(int, __sighandler_t); + +#define SIG_KTHREAD ((__force __sighandler_t)2) +#define SIG_KTHREAD_KERNEL ((__force __sighandler_t)3) + +static inline void allow_signal(int sig) +{ + /* + * Kernel threads handle their own signals. Let the signal code + * know it'll be handled, so that they don't get converted to + * SIGKILL or just silently dropped. + */ + kernel_sigaction(sig, SIG_KTHREAD); +} + +static inline void allow_kernel_signal(int sig) +{ + /* + * Kernel threads handle their own signals. Let the signal code + * know signals sent by the kernel will be handled, so that they + * don't get silently dropped. + */ + kernel_sigaction(sig, SIG_KTHREAD_KERNEL); +} + +static inline void disallow_signal(int sig) +{ + kernel_sigaction(sig, SIG_IGN); +} + +extern struct kmem_cache *sighand_cachep; + +extern bool unhandled_signal(struct task_struct *tsk, int sig); + +/* + * In POSIX a signal is sent either to a specific thread (Linux task) + * or to the process as a whole (Linux thread group). How the signal + * is sent determines whether it's to one thread or the whole group, + * which determines which signal mask(s) are involved in blocking it + * from being delivered until later. When the signal is delivered, + * either it's caught or ignored by a user handler or it has a default + * effect that applies to the whole thread group (POSIX process). + * + * The possible effects an unblocked signal set to SIG_DFL can have are: + * ignore - Nothing Happens + * terminate - kill the process, i.e. all threads in the group, + * similar to exit_group. The group leader (only) reports + * WIFSIGNALED status to its parent. + * coredump - write a core dump file describing all threads using + * the same mm and then kill all those threads + * stop - stop all the threads in the group, i.e. TASK_STOPPED state + * + * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored. + * Other signals when not blocked and set to SIG_DFL behaves as follows. + * The job control signals also have other special effects. + * + * +--------------------+------------------+ + * | POSIX signal | default action | + * +--------------------+------------------+ + * | SIGHUP | terminate | + * | SIGINT | terminate | + * | SIGQUIT | coredump | + * | SIGILL | coredump | + * | SIGTRAP | coredump | + * | SIGABRT/SIGIOT | coredump | + * | SIGBUS | coredump | + * | SIGFPE | coredump | + * | SIGKILL | terminate(+) | + * | SIGUSR1 | terminate | + * | SIGSEGV | coredump | + * | SIGUSR2 | terminate | + * | SIGPIPE | terminate | + * | SIGALRM | terminate | + * | SIGTERM | terminate | + * | SIGCHLD | ignore | + * | SIGCONT | ignore(*) | + * | SIGSTOP | stop(*)(+) | + * | SIGTSTP | stop(*) | + * | SIGTTIN | stop(*) | + * | SIGTTOU | stop(*) | + * | SIGURG | ignore | + * | SIGXCPU | coredump | + * | SIGXFSZ | coredump | + * | SIGVTALRM | terminate | + * | SIGPROF | terminate | + * | SIGPOLL/SIGIO | terminate | + * | SIGSYS/SIGUNUSED | coredump | + * | SIGSTKFLT | terminate | + * | SIGWINCH | ignore | + * | SIGPWR | terminate | + * | SIGRTMIN-SIGRTMAX | terminate | + * +--------------------+------------------+ + * | non-POSIX signal | default action | + * +--------------------+------------------+ + * | SIGEMT | coredump | + * +--------------------+------------------+ + * + * (+) For SIGKILL and SIGSTOP the action is "always", not just "default". + * (*) Special job control effects: + * When SIGCONT is sent, it resumes the process (all threads in the group) + * from TASK_STOPPED state and also clears any pending/queued stop signals + * (any of those marked with "stop(*)"). This happens regardless of blocking, + * catching, or ignoring SIGCONT. When any stop signal is sent, it clears + * any pending/queued SIGCONT signals; this happens regardless of blocking, + * catching, or ignored the stop signal, though (except for SIGSTOP) the + * default action of stopping the process may happen later or never. + */ + +#ifdef SIGEMT +#define SIGEMT_MASK rt_sigmask(SIGEMT) +#else +#define SIGEMT_MASK 0 +#endif + +#if SIGRTMIN > BITS_PER_LONG +#define rt_sigmask(sig) (1ULL << ((sig)-1)) +#else +#define rt_sigmask(sig) sigmask(sig) +#endif + +#define siginmask(sig, mask) \ + ((sig) < SIGRTMIN && (rt_sigmask(sig) & (mask))) + +#define SIG_KERNEL_ONLY_MASK (\ + rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP)) + +#define SIG_KERNEL_STOP_MASK (\ + rt_sigmask(SIGSTOP) | rt_sigmask(SIGTSTP) | \ + rt_sigmask(SIGTTIN) | rt_sigmask(SIGTTOU) ) + +#define SIG_KERNEL_COREDUMP_MASK (\ + rt_sigmask(SIGQUIT) | rt_sigmask(SIGILL) | \ + rt_sigmask(SIGTRAP) | rt_sigmask(SIGABRT) | \ + rt_sigmask(SIGFPE) | rt_sigmask(SIGSEGV) | \ + rt_sigmask(SIGBUS) | rt_sigmask(SIGSYS) | \ + rt_sigmask(SIGXCPU) | rt_sigmask(SIGXFSZ) | \ + SIGEMT_MASK ) + +#define SIG_KERNEL_IGNORE_MASK (\ + rt_sigmask(SIGCONT) | rt_sigmask(SIGCHLD) | \ + rt_sigmask(SIGWINCH) | rt_sigmask(SIGURG) ) + +#define SIG_SPECIFIC_SICODES_MASK (\ + rt_sigmask(SIGILL) | rt_sigmask(SIGFPE) | \ + rt_sigmask(SIGSEGV) | rt_sigmask(SIGBUS) | \ + rt_sigmask(SIGTRAP) | rt_sigmask(SIGCHLD) | \ + rt_sigmask(SIGPOLL) | rt_sigmask(SIGSYS) | \ + SIGEMT_MASK ) + +#define sig_kernel_only(sig) siginmask(sig, SIG_KERNEL_ONLY_MASK) +#define sig_kernel_coredump(sig) siginmask(sig, SIG_KERNEL_COREDUMP_MASK) +#define sig_kernel_ignore(sig) siginmask(sig, SIG_KERNEL_IGNORE_MASK) +#define sig_kernel_stop(sig) siginmask(sig, SIG_KERNEL_STOP_MASK) +#define sig_specific_sicodes(sig) siginmask(sig, SIG_SPECIFIC_SICODES_MASK) + +#define sig_fatal(t, signr) \ + (!siginmask(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \ + (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL) + +void signals_init(void); + +int restore_altstack(const stack_t __user *); +int __save_altstack(stack_t __user *, unsigned long); + +#define save_altstack_ex(uss, sp) do { \ + stack_t __user *__uss = uss; \ + struct task_struct *t = current; \ + put_user_ex((void __user *)t->sas_ss_sp, &__uss->ss_sp); \ + put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \ + put_user_ex(t->sas_ss_size, &__uss->ss_size); \ + if (t->sas_ss_flags & SS_AUTODISARM) \ + sas_ss_reset(t); \ +} while (0); + +#ifdef CONFIG_PROC_FS +struct seq_file; +extern void render_sigset_t(struct seq_file *, const char *, sigset_t *); +#endif + +#endif /* _LINUX_SIGNAL_H */ diff --git a/include/linux/signal_types.h b/include/linux/signal_types.h new file mode 100644 index 000000000..222ae6960 --- /dev/null +++ b/include/linux/signal_types.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SIGNAL_TYPES_H +#define _LINUX_SIGNAL_TYPES_H + +/* + * Basic signal handling related data type definitions: + */ + +#include +#include + +/* + * Real Time signals may be queued. + */ + +struct sigqueue { + struct list_head list; + int flags; + siginfo_t info; + struct user_struct *user; +}; + +/* flags values. */ +#define SIGQUEUE_PREALLOC 1 + +struct sigpending { + struct list_head list; + sigset_t signal; +}; + +struct sigaction { +#ifndef __ARCH_HAS_IRIX_SIGACTION + __sighandler_t sa_handler; + unsigned long sa_flags; +#else + unsigned int sa_flags; + __sighandler_t sa_handler; +#endif +#ifdef __ARCH_HAS_SA_RESTORER + __sigrestore_t sa_restorer; +#endif + sigset_t sa_mask; /* mask last for extensibility */ +}; + +struct k_sigaction { + struct sigaction sa; +#ifdef __ARCH_HAS_KA_RESTORER + __sigrestore_t ka_restorer; +#endif +}; + +#ifdef CONFIG_OLD_SIGACTION +struct old_sigaction { + __sighandler_t sa_handler; + old_sigset_t sa_mask; + unsigned long sa_flags; + __sigrestore_t sa_restorer; +}; +#endif + +struct ksignal { + struct k_sigaction ka; + siginfo_t info; + int sig; +}; + +#endif /* _LINUX_SIGNAL_TYPES_H */ diff --git a/include/linux/signalfd.h b/include/linux/signalfd.h new file mode 100644 index 000000000..9a47c380b --- /dev/null +++ b/include/linux/signalfd.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/signalfd.h + * + * Copyright (C) 2007 Davide Libenzi + * + */ +#ifndef _LINUX_SIGNALFD_H +#define _LINUX_SIGNALFD_H + +#include +#include + +#ifdef CONFIG_SIGNALFD + +/* + * Deliver the signal to listening signalfd. + */ +static inline void signalfd_notify(struct task_struct *tsk, int sig) +{ + if (unlikely(waitqueue_active(&tsk->sighand->signalfd_wqh))) + wake_up(&tsk->sighand->signalfd_wqh); +} + +extern void signalfd_cleanup(struct sighand_struct *sighand); + +#else /* CONFIG_SIGNALFD */ + +static inline void signalfd_notify(struct task_struct *tsk, int sig) { } + +static inline void signalfd_cleanup(struct sighand_struct *sighand) { } + +#endif /* CONFIG_SIGNALFD */ + +#endif /* _LINUX_SIGNALFD_H */ diff --git a/include/linux/siox.h b/include/linux/siox.h new file mode 100644 index 000000000..d79624e83 --- /dev/null +++ b/include/linux/siox.h @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2015 Pengutronix, Uwe Kleine-König + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. + */ + +#include + +#define to_siox_device(_dev) container_of((_dev), struct siox_device, dev) +struct siox_device { + struct list_head node; /* node in smaster->devices */ + struct siox_master *smaster; + struct device dev; + + const char *type; + size_t inbytes; + size_t outbytes; + u8 statustype; + + u8 status_read_clean; + u8 status_written; + u8 status_written_lastcycle; + bool connected; + + /* statistics */ + unsigned int watchdog_errors; + unsigned int status_errors; + + struct kernfs_node *status_errors_kn; + struct kernfs_node *watchdog_kn; + struct kernfs_node *watchdog_errors_kn; + struct kernfs_node *connected_kn; +}; + +bool siox_device_synced(struct siox_device *sdevice); +bool siox_device_connected(struct siox_device *sdevice); + +struct siox_driver { + int (*probe)(struct siox_device *sdevice); + int (*remove)(struct siox_device *sdevice); + void (*shutdown)(struct siox_device *sdevice); + + /* + * buf is big enough to hold sdev->inbytes - 1 bytes, the status byte + * is in the scope of the framework. + */ + int (*set_data)(struct siox_device *sdevice, u8 status, u8 buf[]); + /* + * buf is big enough to hold sdev->outbytes - 1 bytes, the status byte + * is in the scope of the framework + */ + int (*get_data)(struct siox_device *sdevice, const u8 buf[]); + + struct device_driver driver; +}; + +static inline struct siox_driver *to_siox_driver(struct device_driver *driver) +{ + if (driver) + return container_of(driver, struct siox_driver, driver); + else + return NULL; +} + +int __siox_driver_register(struct siox_driver *sdriver, struct module *owner); + +static inline int siox_driver_register(struct siox_driver *sdriver) +{ + return __siox_driver_register(sdriver, THIS_MODULE); +} + +static inline void siox_driver_unregister(struct siox_driver *sdriver) +{ + return driver_unregister(&sdriver->driver); +} diff --git a/include/linux/siphash.h b/include/linux/siphash.h new file mode 100644 index 000000000..0bb5ecd50 --- /dev/null +++ b/include/linux/siphash.h @@ -0,0 +1,167 @@ +/* Copyright (C) 2016 Jason A. Donenfeld . All Rights Reserved. + * + * This file is provided under a dual BSD/GPLv2 license. + * + * SipHash: a fast short-input PRF + * https://131002.net/siphash/ + * + * This implementation is specifically for SipHash2-4 for a secure PRF + * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for + * hashtables. + */ + +#ifndef _LINUX_SIPHASH_H +#define _LINUX_SIPHASH_H + +#include +#include + +#define SIPHASH_ALIGNMENT __alignof__(u64) +typedef struct { + u64 key[2]; +} siphash_key_t; + +static inline bool siphash_key_is_zero(const siphash_key_t *key) +{ + return !(key->key[0] | key->key[1]); +} + +u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key); +u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key); + +u64 siphash_1u64(const u64 a, const siphash_key_t *key); +u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key); +u64 siphash_3u64(const u64 a, const u64 b, const u64 c, + const siphash_key_t *key); +u64 siphash_4u64(const u64 a, const u64 b, const u64 c, const u64 d, + const siphash_key_t *key); +u64 siphash_1u32(const u32 a, const siphash_key_t *key); +u64 siphash_3u32(const u32 a, const u32 b, const u32 c, + const siphash_key_t *key); + +static inline u64 siphash_2u32(const u32 a, const u32 b, + const siphash_key_t *key) +{ + return siphash_1u64((u64)b << 32 | a, key); +} +static inline u64 siphash_4u32(const u32 a, const u32 b, const u32 c, + const u32 d, const siphash_key_t *key) +{ + return siphash_2u64((u64)b << 32 | a, (u64)d << 32 | c, key); +} + + +static inline u64 ___siphash_aligned(const __le64 *data, size_t len, + const siphash_key_t *key) +{ + if (__builtin_constant_p(len) && len == 4) + return siphash_1u32(le32_to_cpup((const __le32 *)data), key); + if (__builtin_constant_p(len) && len == 8) + return siphash_1u64(le64_to_cpu(data[0]), key); + if (__builtin_constant_p(len) && len == 16) + return siphash_2u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]), + key); + if (__builtin_constant_p(len) && len == 24) + return siphash_3u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]), + le64_to_cpu(data[2]), key); + if (__builtin_constant_p(len) && len == 32) + return siphash_4u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]), + le64_to_cpu(data[2]), le64_to_cpu(data[3]), + key); + return __siphash_aligned(data, len, key); +} + +/** + * siphash - compute 64-bit siphash PRF value + * @data: buffer to hash + * @size: size of @data + * @key: the siphash key + */ +static inline u64 siphash(const void *data, size_t len, + const siphash_key_t *key) +{ + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || + !IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT)) + return __siphash_unaligned(data, len, key); + return ___siphash_aligned(data, len, key); +} + +#define HSIPHASH_ALIGNMENT __alignof__(unsigned long) +typedef struct { + unsigned long key[2]; +} hsiphash_key_t; + +u32 __hsiphash_aligned(const void *data, size_t len, + const hsiphash_key_t *key); +u32 __hsiphash_unaligned(const void *data, size_t len, + const hsiphash_key_t *key); + +u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key); +u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key); +u32 hsiphash_3u32(const u32 a, const u32 b, const u32 c, + const hsiphash_key_t *key); +u32 hsiphash_4u32(const u32 a, const u32 b, const u32 c, const u32 d, + const hsiphash_key_t *key); + +static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len, + const hsiphash_key_t *key) +{ + if (__builtin_constant_p(len) && len == 4) + return hsiphash_1u32(le32_to_cpu(data[0]), key); + if (__builtin_constant_p(len) && len == 8) + return hsiphash_2u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]), + key); + if (__builtin_constant_p(len) && len == 12) + return hsiphash_3u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]), + le32_to_cpu(data[2]), key); + if (__builtin_constant_p(len) && len == 16) + return hsiphash_4u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]), + le32_to_cpu(data[2]), le32_to_cpu(data[3]), + key); + return __hsiphash_aligned(data, len, key); +} + +/** + * hsiphash - compute 32-bit hsiphash PRF value + * @data: buffer to hash + * @size: size of @data + * @key: the hsiphash key + */ +static inline u32 hsiphash(const void *data, size_t len, + const hsiphash_key_t *key) +{ + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || + !IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT)) + return __hsiphash_unaligned(data, len, key); + return ___hsiphash_aligned(data, len, key); +} + +/* + * These macros expose the raw SipHash and HalfSipHash permutations. + * Do not use them directly! If you think you have a use for them, + * be sure to CC the maintainer of this file explaining why. + */ + +#define SIPHASH_PERMUTATION(a, b, c, d) ( \ + (a) += (b), (b) = rol64((b), 13), (b) ^= (a), (a) = rol64((a), 32), \ + (c) += (d), (d) = rol64((d), 16), (d) ^= (c), \ + (a) += (d), (d) = rol64((d), 21), (d) ^= (a), \ + (c) += (b), (b) = rol64((b), 17), (b) ^= (c), (c) = rol64((c), 32)) + +#define SIPHASH_CONST_0 0x736f6d6570736575ULL +#define SIPHASH_CONST_1 0x646f72616e646f6dULL +#define SIPHASH_CONST_2 0x6c7967656e657261ULL +#define SIPHASH_CONST_3 0x7465646279746573ULL + +#define HSIPHASH_PERMUTATION(a, b, c, d) ( \ + (a) += (b), (b) = rol32((b), 5), (b) ^= (a), (a) = rol32((a), 16), \ + (c) += (d), (d) = rol32((d), 8), (d) ^= (c), \ + (a) += (d), (d) = rol32((d), 7), (d) ^= (a), \ + (c) += (b), (b) = rol32((b), 13), (b) ^= (c), (c) = rol32((c), 16)) + +#define HSIPHASH_CONST_0 0U +#define HSIPHASH_CONST_1 0U +#define HSIPHASH_CONST_2 0x6c796765U +#define HSIPHASH_CONST_3 0x74656462U + +#endif /* _LINUX_SIPHASH_H */ diff --git a/include/linux/sirfsoc_dma.h b/include/linux/sirfsoc_dma.h new file mode 100644 index 000000000..50161b6af --- /dev/null +++ b/include/linux/sirfsoc_dma.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SIRFSOC_DMA_H_ +#define _SIRFSOC_DMA_H_ + +bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id); + +#endif diff --git a/include/linux/sizes.h b/include/linux/sizes.h new file mode 100644 index 000000000..fbde0bc7e --- /dev/null +++ b/include/linux/sizes.h @@ -0,0 +1,51 @@ +/* + * include/linux/sizes.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __LINUX_SIZES_H__ +#define __LINUX_SIZES_H__ + +#include + +#define SZ_1 0x00000001 +#define SZ_2 0x00000002 +#define SZ_4 0x00000004 +#define SZ_8 0x00000008 +#define SZ_16 0x00000010 +#define SZ_32 0x00000020 +#define SZ_64 0x00000040 +#define SZ_128 0x00000080 +#define SZ_256 0x00000100 +#define SZ_512 0x00000200 + +#define SZ_1K 0x00000400 +#define SZ_2K 0x00000800 +#define SZ_4K 0x00001000 +#define SZ_8K 0x00002000 +#define SZ_16K 0x00004000 +#define SZ_32K 0x00008000 +#define SZ_64K 0x00010000 +#define SZ_128K 0x00020000 +#define SZ_256K 0x00040000 +#define SZ_512K 0x00080000 + +#define SZ_1M 0x00100000 +#define SZ_2M 0x00200000 +#define SZ_4M 0x00400000 +#define SZ_8M 0x00800000 +#define SZ_16M 0x01000000 +#define SZ_32M 0x02000000 +#define SZ_64M 0x04000000 +#define SZ_128M 0x08000000 +#define SZ_256M 0x10000000 +#define SZ_512M 0x20000000 + +#define SZ_1G 0x40000000 +#define SZ_2G 0x80000000 + +#define SZ_4G _AC(0x100000000, ULL) + +#endif /* __LINUX_SIZES_H__ */ diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h new file mode 100644 index 000000000..62d9b0a63 --- /dev/null +++ b/include/linux/skb_array.h @@ -0,0 +1,220 @@ +/* + * Definitions for the 'struct skb_array' datastructure. + * + * Author: + * Michael S. Tsirkin + * + * Copyright (C) 2016 Red Hat, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * Limited-size FIFO of skbs. Can be used more or less whenever + * sk_buff_head can be used, except you need to know the queue size in + * advance. + * Implemented as a type-safe wrapper around ptr_ring. + */ + +#ifndef _LINUX_SKB_ARRAY_H +#define _LINUX_SKB_ARRAY_H 1 + +#ifdef __KERNEL__ +#include +#include +#include +#endif + +struct skb_array { + struct ptr_ring ring; +}; + +/* Might be slightly faster than skb_array_full below, but callers invoking + * this in a loop must use a compiler barrier, for example cpu_relax(). + */ +static inline bool __skb_array_full(struct skb_array *a) +{ + return __ptr_ring_full(&a->ring); +} + +static inline bool skb_array_full(struct skb_array *a) +{ + return ptr_ring_full(&a->ring); +} + +static inline int skb_array_produce(struct skb_array *a, struct sk_buff *skb) +{ + return ptr_ring_produce(&a->ring, skb); +} + +static inline int skb_array_produce_irq(struct skb_array *a, struct sk_buff *skb) +{ + return ptr_ring_produce_irq(&a->ring, skb); +} + +static inline int skb_array_produce_bh(struct skb_array *a, struct sk_buff *skb) +{ + return ptr_ring_produce_bh(&a->ring, skb); +} + +static inline int skb_array_produce_any(struct skb_array *a, struct sk_buff *skb) +{ + return ptr_ring_produce_any(&a->ring, skb); +} + +/* Might be slightly faster than skb_array_empty below, but only safe if the + * array is never resized. Also, callers invoking this in a loop must take care + * to use a compiler barrier, for example cpu_relax(). + */ +static inline bool __skb_array_empty(struct skb_array *a) +{ + return __ptr_ring_empty(&a->ring); +} + +static inline struct sk_buff *__skb_array_peek(struct skb_array *a) +{ + return __ptr_ring_peek(&a->ring); +} + +static inline bool skb_array_empty(struct skb_array *a) +{ + return ptr_ring_empty(&a->ring); +} + +static inline bool skb_array_empty_bh(struct skb_array *a) +{ + return ptr_ring_empty_bh(&a->ring); +} + +static inline bool skb_array_empty_irq(struct skb_array *a) +{ + return ptr_ring_empty_irq(&a->ring); +} + +static inline bool skb_array_empty_any(struct skb_array *a) +{ + return ptr_ring_empty_any(&a->ring); +} + +static inline struct sk_buff *__skb_array_consume(struct skb_array *a) +{ + return __ptr_ring_consume(&a->ring); +} + +static inline struct sk_buff *skb_array_consume(struct skb_array *a) +{ + return ptr_ring_consume(&a->ring); +} + +static inline int skb_array_consume_batched(struct skb_array *a, + struct sk_buff **array, int n) +{ + return ptr_ring_consume_batched(&a->ring, (void **)array, n); +} + +static inline struct sk_buff *skb_array_consume_irq(struct skb_array *a) +{ + return ptr_ring_consume_irq(&a->ring); +} + +static inline int skb_array_consume_batched_irq(struct skb_array *a, + struct sk_buff **array, int n) +{ + return ptr_ring_consume_batched_irq(&a->ring, (void **)array, n); +} + +static inline struct sk_buff *skb_array_consume_any(struct skb_array *a) +{ + return ptr_ring_consume_any(&a->ring); +} + +static inline int skb_array_consume_batched_any(struct skb_array *a, + struct sk_buff **array, int n) +{ + return ptr_ring_consume_batched_any(&a->ring, (void **)array, n); +} + + +static inline struct sk_buff *skb_array_consume_bh(struct skb_array *a) +{ + return ptr_ring_consume_bh(&a->ring); +} + +static inline int skb_array_consume_batched_bh(struct skb_array *a, + struct sk_buff **array, int n) +{ + return ptr_ring_consume_batched_bh(&a->ring, (void **)array, n); +} + +static inline int __skb_array_len_with_tag(struct sk_buff *skb) +{ + if (likely(skb)) { + int len = skb->len; + + if (skb_vlan_tag_present(skb)) + len += VLAN_HLEN; + + return len; + } else { + return 0; + } +} + +static inline int skb_array_peek_len(struct skb_array *a) +{ + return PTR_RING_PEEK_CALL(&a->ring, __skb_array_len_with_tag); +} + +static inline int skb_array_peek_len_irq(struct skb_array *a) +{ + return PTR_RING_PEEK_CALL_IRQ(&a->ring, __skb_array_len_with_tag); +} + +static inline int skb_array_peek_len_bh(struct skb_array *a) +{ + return PTR_RING_PEEK_CALL_BH(&a->ring, __skb_array_len_with_tag); +} + +static inline int skb_array_peek_len_any(struct skb_array *a) +{ + return PTR_RING_PEEK_CALL_ANY(&a->ring, __skb_array_len_with_tag); +} + +static inline int skb_array_init(struct skb_array *a, int size, gfp_t gfp) +{ + return ptr_ring_init(&a->ring, size, gfp); +} + +static void __skb_array_destroy_skb(void *ptr) +{ + kfree_skb(ptr); +} + +static inline void skb_array_unconsume(struct skb_array *a, + struct sk_buff **skbs, int n) +{ + ptr_ring_unconsume(&a->ring, (void **)skbs, n, __skb_array_destroy_skb); +} + +static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp) +{ + return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb); +} + +static inline int skb_array_resize_multiple(struct skb_array **rings, + int nrings, unsigned int size, + gfp_t gfp) +{ + BUILD_BUG_ON(offsetof(struct skb_array, ring)); + return ptr_ring_resize_multiple((struct ptr_ring **)rings, + nrings, size, gfp, + __skb_array_destroy_skb); +} + +static inline void skb_array_cleanup(struct skb_array *a) +{ + ptr_ring_cleanup(&a->ring, __skb_array_destroy_skb); +} + +#endif /* _LINUX_SKB_ARRAY_H */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h new file mode 100644 index 000000000..f97734f34 --- /dev/null +++ b/include/linux/skbuff.h @@ -0,0 +1,4251 @@ +/* + * Definitions for the 'struct sk_buff' memory handlers. + * + * Authors: + * Alan Cox, + * Florian La Roche, + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _LINUX_SKBUFF_H +#define _LINUX_SKBUFF_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* The interface for checksum offload between the stack and networking drivers + * is as follows... + * + * A. IP checksum related features + * + * Drivers advertise checksum offload capabilities in the features of a device. + * From the stack's point of view these are capabilities offered by the driver, + * a driver typically only advertises features that it is capable of offloading + * to its device. + * + * The checksum related features are: + * + * NETIF_F_HW_CSUM - The driver (or its device) is able to compute one + * IP (one's complement) checksum for any combination + * of protocols or protocol layering. The checksum is + * computed and set in a packet per the CHECKSUM_PARTIAL + * interface (see below). + * + * NETIF_F_IP_CSUM - Driver (device) is only able to checksum plain + * TCP or UDP packets over IPv4. These are specifically + * unencapsulated packets of the form IPv4|TCP or + * IPv4|UDP where the Protocol field in the IPv4 header + * is TCP or UDP. The IPv4 header may contain IP options + * This feature cannot be set in features for a device + * with NETIF_F_HW_CSUM also set. This feature is being + * DEPRECATED (see below). + * + * NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain + * TCP or UDP packets over IPv6. These are specifically + * unencapsulated packets of the form IPv6|TCP or + * IPv4|UDP where the Next Header field in the IPv6 + * header is either TCP or UDP. IPv6 extension headers + * are not supported with this feature. This feature + * cannot be set in features for a device with + * NETIF_F_HW_CSUM also set. This feature is being + * DEPRECATED (see below). + * + * NETIF_F_RXCSUM - Driver (device) performs receive checksum offload. + * This flag is used only used to disable the RX checksum + * feature for a device. The stack will accept receive + * checksum indication in packets received on a device + * regardless of whether NETIF_F_RXCSUM is set. + * + * B. Checksumming of received packets by device. Indication of checksum + * verification is in set skb->ip_summed. Possible values are: + * + * CHECKSUM_NONE: + * + * Device did not checksum this packet e.g. due to lack of capabilities. + * The packet contains full (though not verified) checksum in packet but + * not in skb->csum. Thus, skb->csum is undefined in this case. + * + * CHECKSUM_UNNECESSARY: + * + * The hardware you're dealing with doesn't calculate the full checksum + * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums + * for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY + * if their checksums are okay. skb->csum is still undefined in this case + * though. A driver or device must never modify the checksum field in the + * packet even if checksum is verified. + * + * CHECKSUM_UNNECESSARY is applicable to following protocols: + * TCP: IPv6 and IPv4. + * UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a + * zero UDP checksum for either IPv4 or IPv6, the networking stack + * may perform further validation in this case. + * GRE: only if the checksum is present in the header. + * SCTP: indicates the CRC in SCTP header has been validated. + * FCOE: indicates the CRC in FC frame has been validated. + * + * skb->csum_level indicates the number of consecutive checksums found in + * the packet minus one that have been verified as CHECKSUM_UNNECESSARY. + * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet + * and a device is able to verify the checksums for UDP (possibly zero), + * GRE (checksum flag is set), and TCP-- skb->csum_level would be set to + * two. If the device were only able to verify the UDP checksum and not + * GRE, either because it doesn't support GRE checksum of because GRE + * checksum is bad, skb->csum_level would be set to zero (TCP checksum is + * not considered in this case). + * + * CHECKSUM_COMPLETE: + * + * This is the most generic way. The device supplied checksum of the _whole_ + * packet as seen by netif_rx() and fills out in skb->csum. Meaning, the + * hardware doesn't need to parse L3/L4 headers to implement this. + * + * Notes: + * - Even if device supports only some protocols, but is able to produce + * skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY. + * - CHECKSUM_COMPLETE is not applicable to SCTP and FCoE protocols. + * + * CHECKSUM_PARTIAL: + * + * A checksum is set up to be offloaded to a device as described in the + * output description for CHECKSUM_PARTIAL. This may occur on a packet + * received directly from another Linux OS, e.g., a virtualized Linux kernel + * on the same host, or it may be set in the input path in GRO or remote + * checksum offload. For the purposes of checksum verification, the checksum + * referred to by skb->csum_start + skb->csum_offset and any preceding + * checksums in the packet are considered verified. Any checksums in the + * packet that are after the checksum being offloaded are not considered to + * be verified. + * + * C. Checksumming on transmit for non-GSO. The stack requests checksum offload + * in the skb->ip_summed for a packet. Values are: + * + * CHECKSUM_PARTIAL: + * + * The driver is required to checksum the packet as seen by hard_start_xmit() + * from skb->csum_start up to the end, and to record/write the checksum at + * offset skb->csum_start + skb->csum_offset. A driver may verify that the + * csum_start and csum_offset values are valid values given the length and + * offset of the packet, however they should not attempt to validate that the + * checksum refers to a legitimate transport layer checksum-- it is the + * purview of the stack to validate that csum_start and csum_offset are set + * correctly. + * + * When the stack requests checksum offload for a packet, the driver MUST + * ensure that the checksum is set correctly. A driver can either offload the + * checksum calculation to the device, or call skb_checksum_help (in the case + * that the device does not support offload for a particular checksum). + * + * NETIF_F_IP_CSUM and NETIF_F_IPV6_CSUM are being deprecated in favor of + * NETIF_F_HW_CSUM. New devices should use NETIF_F_HW_CSUM to indicate + * checksum offload capability. + * skb_csum_hwoffload_help() can be called to resolve CHECKSUM_PARTIAL based + * on network device checksumming capabilities: if a packet does not match + * them, skb_checksum_help or skb_crc32c_help (depending on the value of + * csum_not_inet, see item D.) is called to resolve the checksum. + * + * CHECKSUM_NONE: + * + * The skb was already checksummed by the protocol, or a checksum is not + * required. + * + * CHECKSUM_UNNECESSARY: + * + * This has the same meaning on as CHECKSUM_NONE for checksum offload on + * output. + * + * CHECKSUM_COMPLETE: + * Not used in checksum output. If a driver observes a packet with this value + * set in skbuff, if should treat as CHECKSUM_NONE being set. + * + * D. Non-IP checksum (CRC) offloads + * + * NETIF_F_SCTP_CRC - This feature indicates that a device is capable of + * offloading the SCTP CRC in a packet. To perform this offload the stack + * will set set csum_start and csum_offset accordingly, set ip_summed to + * CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication in + * the skbuff that the CHECKSUM_PARTIAL refers to CRC32c. + * A driver that supports both IP checksum offload and SCTP CRC32c offload + * must verify which offload is configured for a packet by testing the + * value of skb->csum_not_inet; skb_crc32c_csum_help is provided to resolve + * CHECKSUM_PARTIAL on skbs where csum_not_inet is set to 1. + * + * NETIF_F_FCOE_CRC - This feature indicates that a device is capable of + * offloading the FCOE CRC in a packet. To perform this offload the stack + * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset + * accordingly. Note the there is no indication in the skbuff that the + * CHECKSUM_PARTIAL refers to an FCOE checksum, a driver that supports + * both IP checksum offload and FCOE CRC offload must verify which offload + * is configured for a packet presumably by inspecting packet headers. + * + * E. Checksumming on output with GSO. + * + * In the case of a GSO packet (skb_is_gso(skb) is true), checksum offload + * is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the + * gso_type is SKB_GSO_TCPV4 or SKB_GSO_TCPV6, TCP checksum offload as + * part of the GSO operation is implied. If a checksum is being offloaded + * with GSO then ip_summed is CHECKSUM_PARTIAL, csum_start and csum_offset + * are set to refer to the outermost checksum being offload (two offloaded + * checksums are possible with UDP encapsulation). + */ + +/* Don't change this without changing skb_csum_unnecessary! */ +#define CHECKSUM_NONE 0 +#define CHECKSUM_UNNECESSARY 1 +#define CHECKSUM_COMPLETE 2 +#define CHECKSUM_PARTIAL 3 + +/* Maximum value in skb->csum_level */ +#define SKB_MAX_CSUM_LEVEL 3 + +#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES) +#define SKB_WITH_OVERHEAD(X) \ + ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define SKB_MAX_ORDER(X, ORDER) \ + SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X)) +#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) +#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2)) + +/* return minimum truesize of one skb containing X bytes of data */ +#define SKB_TRUESIZE(X) ((X) + \ + SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) + +struct net_device; +struct scatterlist; +struct pipe_inode_info; +struct iov_iter; +struct napi_struct; + +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) +struct nf_conntrack { + atomic_t use; +}; +#endif + +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) +struct nf_bridge_info { + refcount_t use; + enum { + BRNF_PROTO_UNCHANGED, + BRNF_PROTO_8021Q, + BRNF_PROTO_PPPOE + } orig_proto:8; + u8 pkt_otherhost:1; + u8 in_prerouting:1; + u8 bridged_dnat:1; + __u16 frag_max_size; + struct net_device *physindev; + + /* always valid & non-NULL from FORWARD on, for physdev match */ + struct net_device *physoutdev; + union { + /* prerouting: detect dnat in orig/reply direction */ + __be32 ipv4_daddr; + struct in6_addr ipv6_daddr; + + /* after prerouting + nat detected: store original source + * mac since neigh resolution overwrites it, only used while + * skb is out in neigh layer. + */ + char neigh_header[8]; + }; +}; +#endif + +struct sk_buff_head { + /* These two members must be first. */ + struct sk_buff *next; + struct sk_buff *prev; + + __u32 qlen; + spinlock_t lock; +}; + +struct sk_buff; + +/* To allow 64K frame to be packed as single skb without frag_list we + * require 64K/PAGE_SIZE pages plus 1 additional page to allow for + * buffers which do not start on a page boundary. + * + * Since GRO uses frags we allocate at least 16 regardless of page + * size. + */ +#if (65536/PAGE_SIZE + 1) < 16 +#define MAX_SKB_FRAGS 16UL +#else +#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1) +#endif +extern int sysctl_max_skb_frags; + +/* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to + * segment using its current segmentation instead. + */ +#define GSO_BY_FRAGS 0xFFFF + +typedef struct skb_frag_struct skb_frag_t; + +struct skb_frag_struct { + struct { + struct page *p; + } page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; + __u32 size; +#else + __u16 page_offset; + __u16 size; +#endif +}; + +static inline unsigned int skb_frag_size(const skb_frag_t *frag) +{ + return frag->size; +} + +static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size) +{ + frag->size = size; +} + +static inline void skb_frag_size_add(skb_frag_t *frag, int delta) +{ + frag->size += delta; +} + +static inline void skb_frag_size_sub(skb_frag_t *frag, int delta) +{ + frag->size -= delta; +} + +static inline bool skb_frag_must_loop(struct page *p) +{ +#if defined(CONFIG_HIGHMEM) + if (PageHighMem(p)) + return true; +#endif + return false; +} + +/** + * skb_frag_foreach_page - loop over pages in a fragment + * + * @f: skb frag to operate on + * @f_off: offset from start of f->page.p + * @f_len: length from f_off to loop over + * @p: (temp var) current page + * @p_off: (temp var) offset from start of current page, + * non-zero only on first page. + * @p_len: (temp var) length in current page, + * < PAGE_SIZE only on first and last page. + * @copied: (temp var) length so far, excluding current p_len. + * + * A fragment can hold a compound page, in which case per-page + * operations, notably kmap_atomic, must be called for each + * regular page. + */ +#define skb_frag_foreach_page(f, f_off, f_len, p, p_off, p_len, copied) \ + for (p = skb_frag_page(f) + ((f_off) >> PAGE_SHIFT), \ + p_off = (f_off) & (PAGE_SIZE - 1), \ + p_len = skb_frag_must_loop(p) ? \ + min_t(u32, f_len, PAGE_SIZE - p_off) : f_len, \ + copied = 0; \ + copied < f_len; \ + copied += p_len, p++, p_off = 0, \ + p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \ + +#define HAVE_HW_TIME_STAMP + +/** + * struct skb_shared_hwtstamps - hardware time stamps + * @hwtstamp: hardware time stamp transformed into duration + * since arbitrary point in time + * + * Software time stamps generated by ktime_get_real() are stored in + * skb->tstamp. + * + * hwtstamps can only be compared against other hwtstamps from + * the same device. + * + * This structure is attached to packets as part of the + * &skb_shared_info. Use skb_hwtstamps() to get a pointer. + */ +struct skb_shared_hwtstamps { + ktime_t hwtstamp; +}; + +/* Definitions for tx_flags in struct skb_shared_info */ +enum { + /* generate hardware time stamp */ + SKBTX_HW_TSTAMP = 1 << 0, + + /* generate software time stamp when queueing packet to NIC */ + SKBTX_SW_TSTAMP = 1 << 1, + + /* device driver is going to provide hardware time stamp */ + SKBTX_IN_PROGRESS = 1 << 2, + + /* device driver supports TX zero-copy buffers */ + SKBTX_DEV_ZEROCOPY = 1 << 3, + + /* generate wifi status information (where possible) */ + SKBTX_WIFI_STATUS = 1 << 4, + + /* This indicates at least one fragment might be overwritten + * (as in vmsplice(), sendfile() ...) + * If we need to compute a TX checksum, we'll need to copy + * all frags to avoid possible bad checksum + */ + SKBTX_SHARED_FRAG = 1 << 5, + + /* generate software time stamp when entering packet scheduling */ + SKBTX_SCHED_TSTAMP = 1 << 6, +}; + +#define SKBTX_ZEROCOPY_FRAG (SKBTX_DEV_ZEROCOPY | SKBTX_SHARED_FRAG) +#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \ + SKBTX_SCHED_TSTAMP) +#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP) + +/* + * The callback notifies userspace to release buffers when skb DMA is done in + * lower device, the skb last reference should be 0 when calling this. + * The zerocopy_success argument is true if zero copy transmit occurred, + * false on data copy or out of memory error caused by data copy attempt. + * The ctx field is used to track device context. + * The desc field is used to track userspace buffer index. + */ +struct ubuf_info { + void (*callback)(struct ubuf_info *, bool zerocopy_success); + union { + struct { + unsigned long desc; + void *ctx; + }; + struct { + u32 id; + u16 len; + u16 zerocopy:1; + u32 bytelen; + }; + }; + refcount_t refcnt; + + struct mmpin { + struct user_struct *user; + unsigned int num_pg; + } mmp; +}; + +#define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg)) + +int mm_account_pinned_pages(struct mmpin *mmp, size_t size); +void mm_unaccount_pinned_pages(struct mmpin *mmp); + +struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size); +struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size, + struct ubuf_info *uarg); + +static inline void sock_zerocopy_get(struct ubuf_info *uarg) +{ + refcount_inc(&uarg->refcnt); +} + +void sock_zerocopy_put(struct ubuf_info *uarg); +void sock_zerocopy_put_abort(struct ubuf_info *uarg); + +void sock_zerocopy_callback(struct ubuf_info *uarg, bool success); + +int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, + struct msghdr *msg, int len, + struct ubuf_info *uarg); + +/* This data is invariant across clones and lives at + * the end of the header data, ie. at skb->end. + */ +struct skb_shared_info { + __u8 __unused; + __u8 meta_len; + __u8 nr_frags; + __u8 tx_flags; + unsigned short gso_size; + /* Warning: this field is not always filled in (UFO)! */ + unsigned short gso_segs; + struct sk_buff *frag_list; + struct skb_shared_hwtstamps hwtstamps; + unsigned int gso_type; + u32 tskey; + + /* + * Warning : all fields before dataref are cleared in __alloc_skb() + */ + atomic_t dataref; + + /* Intermediate layers must ensure that destructor_arg + * remains valid until skb destructor */ + void * destructor_arg; + + /* must be last field, see pskb_expand_head() */ + skb_frag_t frags[MAX_SKB_FRAGS]; +}; + +/* We divide dataref into two halves. The higher 16 bits hold references + * to the payload part of skb->data. The lower 16 bits hold references to + * the entire skb->data. A clone of a headerless skb holds the length of + * the header in skb->hdr_len. + * + * All users must obey the rule that the skb->data reference count must be + * greater than or equal to the payload reference count. + * + * Holding a reference to the payload part means that the user does not + * care about modifications to the header part of skb->data. + */ +#define SKB_DATAREF_SHIFT 16 +#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1) + + +enum { + SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */ + SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */ + SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */ +}; + +enum { + SKB_GSO_TCPV4 = 1 << 0, + + /* This indicates the skb is from an untrusted source. */ + SKB_GSO_DODGY = 1 << 1, + + /* This indicates the tcp segment has CWR set. */ + SKB_GSO_TCP_ECN = 1 << 2, + + SKB_GSO_TCP_FIXEDID = 1 << 3, + + SKB_GSO_TCPV6 = 1 << 4, + + SKB_GSO_FCOE = 1 << 5, + + SKB_GSO_GRE = 1 << 6, + + SKB_GSO_GRE_CSUM = 1 << 7, + + SKB_GSO_IPXIP4 = 1 << 8, + + SKB_GSO_IPXIP6 = 1 << 9, + + SKB_GSO_UDP_TUNNEL = 1 << 10, + + SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11, + + SKB_GSO_PARTIAL = 1 << 12, + + SKB_GSO_TUNNEL_REMCSUM = 1 << 13, + + SKB_GSO_SCTP = 1 << 14, + + SKB_GSO_ESP = 1 << 15, + + SKB_GSO_UDP = 1 << 16, + + SKB_GSO_UDP_L4 = 1 << 17, +}; + +#if BITS_PER_LONG > 32 +#define NET_SKBUFF_DATA_USES_OFFSET 1 +#endif + +#ifdef NET_SKBUFF_DATA_USES_OFFSET +typedef unsigned int sk_buff_data_t; +#else +typedef unsigned char *sk_buff_data_t; +#endif + +/** + * struct sk_buff - socket buffer + * @next: Next buffer in list + * @prev: Previous buffer in list + * @tstamp: Time we arrived/left + * @rbnode: RB tree node, alternative to next/prev for netem/tcp + * @sk: Socket we are owned by + * @dev: Device we arrived on/are leaving by + * @cb: Control buffer. Free for use by every layer. Put private vars here + * @_skb_refdst: destination entry (with norefcount bit) + * @sp: the security path, used for xfrm + * @len: Length of actual data + * @data_len: Data length + * @mac_len: Length of link layer header + * @hdr_len: writable header length of cloned skb + * @csum: Checksum (must include start/offset pair) + * @csum_start: Offset from skb->head where checksumming should start + * @csum_offset: Offset from csum_start where checksum should be stored + * @priority: Packet queueing priority + * @ignore_df: allow local fragmentation + * @cloned: Head may be cloned (check refcnt to be sure) + * @ip_summed: Driver fed us an IP checksum + * @nohdr: Payload reference only, must not modify header + * @pkt_type: Packet class + * @fclone: skbuff clone status + * @ipvs_property: skbuff is owned by ipvs + * @tc_skip_classify: do not classify packet. set by IFB device + * @tc_at_ingress: used within tc_classify to distinguish in/egress + * @tc_redirected: packet was redirected by a tc action + * @tc_from_ingress: if tc_redirected, tc_at_ingress at time of redirect + * @peeked: this packet has been seen already, so stats have been + * done for it, don't do them again + * @nf_trace: netfilter packet trace flag + * @protocol: Packet protocol from driver + * @destructor: Destruct function + * @tcp_tsorted_anchor: list structure for TCP (tp->tsorted_sent_queue) + * @_nfct: Associated connection, if any (with nfctinfo bits) + * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c + * @skb_iif: ifindex of device we arrived on + * @tc_index: Traffic control index + * @hash: the packet hash + * @queue_mapping: Queue mapping for multiqueue devices + * @xmit_more: More SKBs are pending for this queue + * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves + * @ndisc_nodetype: router type (from link layer) + * @ooo_okay: allow the mapping of a socket to a queue to be changed + * @l4_hash: indicate hash is a canonical 4-tuple hash over transport + * ports. + * @sw_hash: indicates hash was computed in software stack + * @wifi_acked_valid: wifi_acked was set + * @wifi_acked: whether frame was acked on wifi or not + * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS + * @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL + * @dst_pending_confirm: need to confirm neighbour + * @decrypted: Decrypted SKB + * @napi_id: id of the NAPI struct this skb came from + * @secmark: security marking + * @mark: Generic packet mark + * @vlan_proto: vlan encapsulation protocol + * @vlan_tci: vlan tag control information + * @inner_protocol: Protocol (encapsulation) + * @inner_transport_header: Inner transport layer header (encapsulation) + * @inner_network_header: Network layer header (encapsulation) + * @inner_mac_header: Link layer header (encapsulation) + * @transport_header: Transport layer header + * @network_header: Network layer header + * @mac_header: Link layer header + * @tail: Tail pointer + * @end: End pointer + * @head: Head of buffer + * @data: Data head pointer + * @truesize: Buffer size + * @users: User count - see {datagram,tcp}.c + */ + +struct sk_buff { + union { + struct { + /* These two members must be first. */ + struct sk_buff *next; + struct sk_buff *prev; + + union { + struct net_device *dev; + /* Some protocols might use this space to store information, + * while device pointer would be NULL. + * UDP receive path is one user. + */ + unsigned long dev_scratch; + }; + }; + struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */ + struct list_head list; + }; + + union { + struct sock *sk; + int ip_defrag_offset; + }; + + union { + ktime_t tstamp; + u64 skb_mstamp; + }; + /* + * This is the control buffer. It is free to use for every + * layer. Please put your private variables there. If you + * want to keep them across layers you have to do a skb_clone() + * first. This is owned by whoever has the skb queued ATM. + */ + char cb[48] __aligned(8); + + union { + struct { + unsigned long _skb_refdst; + void (*destructor)(struct sk_buff *skb); + }; + struct list_head tcp_tsorted_anchor; + }; + +#ifdef CONFIG_XFRM + struct sec_path *sp; +#endif +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) + unsigned long _nfct; +#endif +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) + struct nf_bridge_info *nf_bridge; +#endif + unsigned int len, + data_len; + __u16 mac_len, + hdr_len; + + /* Following fields are _not_ copied in __copy_skb_header() + * Note that queue_mapping is here mostly to fill a hole. + */ + __u16 queue_mapping; + +/* if you move cloned around you also must adapt those constants */ +#ifdef __BIG_ENDIAN_BITFIELD +#define CLONED_MASK (1 << 7) +#else +#define CLONED_MASK 1 +#endif +#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset) + + __u8 __cloned_offset[0]; + __u8 cloned:1, + nohdr:1, + fclone:2, + peeked:1, + head_frag:1, + xmit_more:1, + pfmemalloc:1; + + /* fields enclosed in headers_start/headers_end are copied + * using a single memcpy() in __copy_skb_header() + */ + /* private: */ + __u32 headers_start[0]; + /* public: */ + +/* if you move pkt_type around you also must adapt those constants */ +#ifdef __BIG_ENDIAN_BITFIELD +#define PKT_TYPE_MAX (7 << 5) +#else +#define PKT_TYPE_MAX 7 +#endif +#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset) + + __u8 __pkt_type_offset[0]; + __u8 pkt_type:3; + __u8 ignore_df:1; + __u8 nf_trace:1; + __u8 ip_summed:2; + __u8 ooo_okay:1; + + __u8 l4_hash:1; + __u8 sw_hash:1; + __u8 wifi_acked_valid:1; + __u8 wifi_acked:1; + __u8 no_fcs:1; + /* Indicates the inner headers are valid in the skbuff. */ + __u8 encapsulation:1; + __u8 encap_hdr_csum:1; + __u8 csum_valid:1; + + __u8 csum_complete_sw:1; + __u8 csum_level:2; + __u8 csum_not_inet:1; + __u8 dst_pending_confirm:1; +#ifdef CONFIG_IPV6_NDISC_NODETYPE + __u8 ndisc_nodetype:2; +#endif + __u8 ipvs_property:1; + + __u8 inner_protocol_type:1; + __u8 remcsum_offload:1; +#ifdef CONFIG_NET_SWITCHDEV + __u8 offload_fwd_mark:1; + __u8 offload_mr_fwd_mark:1; +#endif +#ifdef CONFIG_NET_CLS_ACT + __u8 tc_skip_classify:1; + __u8 tc_at_ingress:1; + __u8 tc_redirected:1; + __u8 tc_from_ingress:1; +#endif +#ifdef CONFIG_TLS_DEVICE + __u8 decrypted:1; +#endif + +#ifdef CONFIG_NET_SCHED + __u16 tc_index; /* traffic control index */ +#endif + + union { + __wsum csum; + struct { + __u16 csum_start; + __u16 csum_offset; + }; + }; + __u32 priority; + int skb_iif; + __u32 hash; + __be16 vlan_proto; + __u16 vlan_tci; +#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS) + union { + unsigned int napi_id; + unsigned int sender_cpu; + }; +#endif +#ifdef CONFIG_NETWORK_SECMARK + __u32 secmark; +#endif + + union { + __u32 mark; + __u32 reserved_tailroom; + }; + + union { + __be16 inner_protocol; + __u8 inner_ipproto; + }; + + __u16 inner_transport_header; + __u16 inner_network_header; + __u16 inner_mac_header; + + __be16 protocol; + __u16 transport_header; + __u16 network_header; + __u16 mac_header; + + /* private: */ + __u32 headers_end[0]; + /* public: */ + + /* These elements must be at the end, see alloc_skb() for details. */ + sk_buff_data_t tail; + sk_buff_data_t end; + unsigned char *head, + *data; + unsigned int truesize; + refcount_t users; +}; + +#ifdef __KERNEL__ +/* + * Handling routines are only of interest to the kernel + */ + +#define SKB_ALLOC_FCLONE 0x01 +#define SKB_ALLOC_RX 0x02 +#define SKB_ALLOC_NAPI 0x04 + +/* Returns true if the skb was allocated from PFMEMALLOC reserves */ +static inline bool skb_pfmemalloc(const struct sk_buff *skb) +{ + return unlikely(skb->pfmemalloc); +} + +/* + * skb might have a dst pointer attached, refcounted or not. + * _skb_refdst low order bit is set if refcount was _not_ taken + */ +#define SKB_DST_NOREF 1UL +#define SKB_DST_PTRMASK ~(SKB_DST_NOREF) + +#define SKB_NFCT_PTRMASK ~(7UL) +/** + * skb_dst - returns skb dst_entry + * @skb: buffer + * + * Returns skb dst_entry, regardless of reference taken or not. + */ +static inline struct dst_entry *skb_dst(const struct sk_buff *skb) +{ + /* If refdst was not refcounted, check we still are in a + * rcu_read_lock section + */ + WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) && + !rcu_read_lock_held() && + !rcu_read_lock_bh_held()); + return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK); +} + +/** + * skb_dst_set - sets skb dst + * @skb: buffer + * @dst: dst entry + * + * Sets skb dst, assuming a reference was taken on dst and should + * be released by skb_dst_drop() + */ +static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) +{ + skb->_skb_refdst = (unsigned long)dst; +} + +/** + * skb_dst_set_noref - sets skb dst, hopefully, without taking reference + * @skb: buffer + * @dst: dst entry + * + * Sets skb dst, assuming a reference was not taken on dst. + * If dst entry is cached, we do not take reference and dst_release + * will be avoided by refdst_drop. If dst entry is not cached, we take + * reference, so that last dst_release can destroy the dst immediately. + */ +static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) +{ + WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); + skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF; +} + +/** + * skb_dst_is_noref - Test if skb dst isn't refcounted + * @skb: buffer + */ +static inline bool skb_dst_is_noref(const struct sk_buff *skb) +{ + return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb); +} + +static inline struct rtable *skb_rtable(const struct sk_buff *skb) +{ + return (struct rtable *)skb_dst(skb); +} + +/* For mangling skb->pkt_type from user space side from applications + * such as nft, tc, etc, we only allow a conservative subset of + * possible pkt_types to be set. +*/ +static inline bool skb_pkt_type_ok(u32 ptype) +{ + return ptype <= PACKET_OTHERHOST; +} + +static inline unsigned int skb_napi_id(const struct sk_buff *skb) +{ +#ifdef CONFIG_NET_RX_BUSY_POLL + return skb->napi_id; +#else + return 0; +#endif +} + +/* decrement the reference count and return true if we can free the skb */ +static inline bool skb_unref(struct sk_buff *skb) +{ + if (unlikely(!skb)) + return false; + if (likely(refcount_read(&skb->users) == 1)) + smp_rmb(); + else if (likely(!refcount_dec_and_test(&skb->users))) + return false; + + return true; +} + +void skb_release_head_state(struct sk_buff *skb); +void kfree_skb(struct sk_buff *skb); +void kfree_skb_list(struct sk_buff *segs); +void skb_tx_error(struct sk_buff *skb); +void consume_skb(struct sk_buff *skb); +void __consume_stateless_skb(struct sk_buff *skb); +void __kfree_skb(struct sk_buff *skb); +extern struct kmem_cache *skbuff_head_cache; + +void kfree_skb_partial(struct sk_buff *skb, bool head_stolen); +bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, + bool *fragstolen, int *delta_truesize); + +struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags, + int node); +struct sk_buff *__build_skb(void *data, unsigned int frag_size); +struct sk_buff *build_skb(void *data, unsigned int frag_size); +static inline struct sk_buff *alloc_skb(unsigned int size, + gfp_t priority) +{ + return __alloc_skb(size, priority, 0, NUMA_NO_NODE); +} + +struct sk_buff *alloc_skb_with_frags(unsigned long header_len, + unsigned long data_len, + int max_page_order, + int *errcode, + gfp_t gfp_mask); + +/* Layout of fast clones : [skb1][skb2][fclone_ref] */ +struct sk_buff_fclones { + struct sk_buff skb1; + + struct sk_buff skb2; + + refcount_t fclone_ref; +}; + +/** + * skb_fclone_busy - check if fclone is busy + * @sk: socket + * @skb: buffer + * + * Returns true if skb is a fast clone, and its clone is not freed. + * Some drivers call skb_orphan() in their ndo_start_xmit(), + * so we also check that this didnt happen. + */ +static inline bool skb_fclone_busy(const struct sock *sk, + const struct sk_buff *skb) +{ + const struct sk_buff_fclones *fclones; + + fclones = container_of(skb, struct sk_buff_fclones, skb1); + + return skb->fclone == SKB_FCLONE_ORIG && + refcount_read(&fclones->fclone_ref) > 1 && + fclones->skb2.sk == sk; +} + +static inline struct sk_buff *alloc_skb_fclone(unsigned int size, + gfp_t priority) +{ + return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE); +} + +struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); +void skb_headers_offset_update(struct sk_buff *skb, int off); +int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); +struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority); +void skb_copy_header(struct sk_buff *new, const struct sk_buff *old); +struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority); +struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, + gfp_t gfp_mask, bool fclone); +static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, + gfp_t gfp_mask) +{ + return __pskb_copy_fclone(skb, headroom, gfp_mask, false); +} + +int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask); +struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, + unsigned int headroom); +struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, + int newtailroom, gfp_t priority); +int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, + int offset, int len); +int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, + int offset, int len); +int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer); +int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error); + +/** + * skb_pad - zero pad the tail of an skb + * @skb: buffer to pad + * @pad: space to pad + * + * Ensure that a buffer is followed by a padding area that is zero + * filled. Used by network drivers which may DMA or transfer data + * beyond the buffer end onto the wire. + * + * May return error in out of memory cases. The skb is freed on error. + */ +static inline int skb_pad(struct sk_buff *skb, int pad) +{ + return __skb_pad(skb, pad, true); +} +#define dev_kfree_skb(a) consume_skb(a) + +int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, + int getfrag(void *from, char *to, int offset, + int len, int odd, struct sk_buff *skb), + void *from, int length); + +int skb_append_pagefrags(struct sk_buff *skb, struct page *page, + int offset, size_t size); + +struct skb_seq_state { + __u32 lower_offset; + __u32 upper_offset; + __u32 frag_idx; + __u32 stepped_offset; + struct sk_buff *root_skb; + struct sk_buff *cur_skb; + __u8 *frag_data; +}; + +void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, + unsigned int to, struct skb_seq_state *st); +unsigned int skb_seq_read(unsigned int consumed, const u8 **data, + struct skb_seq_state *st); +void skb_abort_seq_read(struct skb_seq_state *st); + +unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, + unsigned int to, struct ts_config *config); + +/* + * Packet hash types specify the type of hash in skb_set_hash. + * + * Hash types refer to the protocol layer addresses which are used to + * construct a packet's hash. The hashes are used to differentiate or identify + * flows of the protocol layer for the hash type. Hash types are either + * layer-2 (L2), layer-3 (L3), or layer-4 (L4). + * + * Properties of hashes: + * + * 1) Two packets in different flows have different hash values + * 2) Two packets in the same flow should have the same hash value + * + * A hash at a higher layer is considered to be more specific. A driver should + * set the most specific hash possible. + * + * A driver cannot indicate a more specific hash than the layer at which a hash + * was computed. For instance an L3 hash cannot be set as an L4 hash. + * + * A driver may indicate a hash level which is less specific than the + * actual layer the hash was computed on. For instance, a hash computed + * at L4 may be considered an L3 hash. This should only be done if the + * driver can't unambiguously determine that the HW computed the hash at + * the higher layer. Note that the "should" in the second property above + * permits this. + */ +enum pkt_hash_types { + PKT_HASH_TYPE_NONE, /* Undefined type */ + PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */ + PKT_HASH_TYPE_L3, /* Input: src_IP, dst_IP */ + PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */ +}; + +static inline void skb_clear_hash(struct sk_buff *skb) +{ + skb->hash = 0; + skb->sw_hash = 0; + skb->l4_hash = 0; +} + +static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb) +{ + if (!skb->l4_hash) + skb_clear_hash(skb); +} + +static inline void +__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4) +{ + skb->l4_hash = is_l4; + skb->sw_hash = is_sw; + skb->hash = hash; +} + +static inline void +skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type) +{ + /* Used by drivers to set hash from HW */ + __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4); +} + +static inline void +__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4) +{ + __skb_set_hash(skb, hash, true, is_l4); +} + +void __skb_get_hash(struct sk_buff *skb); +u32 __skb_get_hash_symmetric(const struct sk_buff *skb); +u32 skb_get_poff(const struct sk_buff *skb); +u32 __skb_get_poff(const struct sk_buff *skb, void *data, + const struct flow_keys_basic *keys, int hlen); +__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto, + void *data, int hlen_proto); + +static inline __be32 skb_flow_get_ports(const struct sk_buff *skb, + int thoff, u8 ip_proto) +{ + return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0); +} + +void skb_flow_dissector_init(struct flow_dissector *flow_dissector, + const struct flow_dissector_key *key, + unsigned int key_count); + +bool __skb_flow_dissect(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container, + void *data, __be16 proto, int nhoff, int hlen, + unsigned int flags); + +static inline bool skb_flow_dissect(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container, unsigned int flags) +{ + return __skb_flow_dissect(skb, flow_dissector, target_container, + NULL, 0, 0, 0, flags); +} + +static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb, + struct flow_keys *flow, + unsigned int flags) +{ + memset(flow, 0, sizeof(*flow)); + return __skb_flow_dissect(skb, &flow_keys_dissector, flow, + NULL, 0, 0, 0, flags); +} + +static inline bool +skb_flow_dissect_flow_keys_basic(const struct sk_buff *skb, + struct flow_keys_basic *flow, void *data, + __be16 proto, int nhoff, int hlen, + unsigned int flags) +{ + memset(flow, 0, sizeof(*flow)); + return __skb_flow_dissect(skb, &flow_keys_basic_dissector, flow, + data, proto, nhoff, hlen, flags); +} + +void +skb_flow_dissect_tunnel_info(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container); + +static inline __u32 skb_get_hash(struct sk_buff *skb) +{ + if (!skb->l4_hash && !skb->sw_hash) + __skb_get_hash(skb); + + return skb->hash; +} + +static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6) +{ + if (!skb->l4_hash && !skb->sw_hash) { + struct flow_keys keys; + __u32 hash = __get_hash_from_flowi6(fl6, &keys); + + __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys)); + } + + return skb->hash; +} + +__u32 skb_get_hash_perturb(const struct sk_buff *skb, + const siphash_key_t *perturb); + +static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) +{ + return skb->hash; +} + +static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) +{ + to->hash = from->hash; + to->sw_hash = from->sw_hash; + to->l4_hash = from->l4_hash; +}; + +#ifdef NET_SKBUFF_DATA_USES_OFFSET +static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) +{ + return skb->head + skb->end; +} + +static inline unsigned int skb_end_offset(const struct sk_buff *skb) +{ + return skb->end; +} +#else +static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) +{ + return skb->end; +} + +static inline unsigned int skb_end_offset(const struct sk_buff *skb) +{ + return skb->end - skb->head; +} +#endif + +/* Internal */ +#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB))) + +static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb) +{ + return &skb_shinfo(skb)->hwtstamps; +} + +static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb) +{ + bool is_zcopy = skb && skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY; + + return is_zcopy ? skb_uarg(skb) : NULL; +} + +static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg) +{ + if (skb && uarg && !skb_zcopy(skb)) { + sock_zerocopy_get(uarg); + skb_shinfo(skb)->destructor_arg = uarg; + skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG; + } +} + +static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val) +{ + skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL); + skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG; +} + +static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb) +{ + return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL; +} + +static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb) +{ + return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL); +} + +/* Release a reference on a zerocopy structure */ +static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy) +{ + struct ubuf_info *uarg = skb_zcopy(skb); + + if (uarg) { + if (skb_zcopy_is_nouarg(skb)) { + /* no notification callback */ + } else if (uarg->callback == sock_zerocopy_callback) { + uarg->zerocopy = uarg->zerocopy && zerocopy; + sock_zerocopy_put(uarg); + } else { + uarg->callback(uarg, zerocopy); + } + + skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG; + } +} + +/* Abort a zerocopy operation and revert zckey on error in send syscall */ +static inline void skb_zcopy_abort(struct sk_buff *skb) +{ + struct ubuf_info *uarg = skb_zcopy(skb); + + if (uarg) { + sock_zerocopy_put_abort(uarg); + skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG; + } +} + +static inline void skb_mark_not_on_list(struct sk_buff *skb) +{ + skb->next = NULL; +} + +/* Iterate through singly-linked GSO fragments of an skb. */ +#define skb_list_walk_safe(first, skb, next_skb) \ + for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \ + (skb) = (next_skb), (next_skb) = (skb) ? (skb)->next : NULL) + +static inline void skb_list_del_init(struct sk_buff *skb) +{ + __list_del_entry(&skb->list); + skb_mark_not_on_list(skb); +} + +/** + * skb_queue_empty - check if a queue is empty + * @list: queue head + * + * Returns true if the queue is empty, false otherwise. + */ +static inline int skb_queue_empty(const struct sk_buff_head *list) +{ + return list->next == (const struct sk_buff *) list; +} + +/** + * skb_queue_empty_lockless - check if a queue is empty + * @list: queue head + * + * Returns true if the queue is empty, false otherwise. + * This variant can be used in lockless contexts. + */ +static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list) +{ + return READ_ONCE(list->next) == (const struct sk_buff *) list; +} + + +/** + * skb_queue_is_last - check if skb is the last entry in the queue + * @list: queue head + * @skb: buffer + * + * Returns true if @skb is the last buffer on the list. + */ +static inline bool skb_queue_is_last(const struct sk_buff_head *list, + const struct sk_buff *skb) +{ + return skb->next == (const struct sk_buff *) list; +} + +/** + * skb_queue_is_first - check if skb is the first entry in the queue + * @list: queue head + * @skb: buffer + * + * Returns true if @skb is the first buffer on the list. + */ +static inline bool skb_queue_is_first(const struct sk_buff_head *list, + const struct sk_buff *skb) +{ + return skb->prev == (const struct sk_buff *) list; +} + +/** + * skb_queue_next - return the next packet in the queue + * @list: queue head + * @skb: current buffer + * + * Return the next packet in @list after @skb. It is only valid to + * call this if skb_queue_is_last() evaluates to false. + */ +static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list, + const struct sk_buff *skb) +{ + /* This BUG_ON may seem severe, but if we just return then we + * are going to dereference garbage. + */ + BUG_ON(skb_queue_is_last(list, skb)); + return skb->next; +} + +/** + * skb_queue_prev - return the prev packet in the queue + * @list: queue head + * @skb: current buffer + * + * Return the prev packet in @list before @skb. It is only valid to + * call this if skb_queue_is_first() evaluates to false. + */ +static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list, + const struct sk_buff *skb) +{ + /* This BUG_ON may seem severe, but if we just return then we + * are going to dereference garbage. + */ + BUG_ON(skb_queue_is_first(list, skb)); + return skb->prev; +} + +/** + * skb_get - reference buffer + * @skb: buffer to reference + * + * Makes another reference to a socket buffer and returns a pointer + * to the buffer. + */ +static inline struct sk_buff *skb_get(struct sk_buff *skb) +{ + refcount_inc(&skb->users); + return skb; +} + +/* + * If users == 1, we are the only owner and can avoid redundant atomic changes. + */ + +/** + * skb_cloned - is the buffer a clone + * @skb: buffer to check + * + * Returns true if the buffer was generated with skb_clone() and is + * one of multiple shared copies of the buffer. Cloned buffers are + * shared data so must not be written to under normal circumstances. + */ +static inline int skb_cloned(const struct sk_buff *skb) +{ + return skb->cloned && + (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1; +} + +static inline int skb_unclone(struct sk_buff *skb, gfp_t pri) +{ + might_sleep_if(gfpflags_allow_blocking(pri)); + + if (skb_cloned(skb)) + return pskb_expand_head(skb, 0, 0, pri); + + return 0; +} + +/** + * skb_header_cloned - is the header a clone + * @skb: buffer to check + * + * Returns true if modifying the header part of the buffer requires + * the data to be copied. + */ +static inline int skb_header_cloned(const struct sk_buff *skb) +{ + int dataref; + + if (!skb->cloned) + return 0; + + dataref = atomic_read(&skb_shinfo(skb)->dataref); + dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT); + return dataref != 1; +} + +static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri) +{ + might_sleep_if(gfpflags_allow_blocking(pri)); + + if (skb_header_cloned(skb)) + return pskb_expand_head(skb, 0, 0, pri); + + return 0; +} + +/** + * __skb_header_release - release reference to header + * @skb: buffer to operate on + */ +static inline void __skb_header_release(struct sk_buff *skb) +{ + skb->nohdr = 1; + atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT)); +} + + +/** + * skb_shared - is the buffer shared + * @skb: buffer to check + * + * Returns true if more than one person has a reference to this + * buffer. + */ +static inline int skb_shared(const struct sk_buff *skb) +{ + return refcount_read(&skb->users) != 1; +} + +/** + * skb_share_check - check if buffer is shared and if so clone it + * @skb: buffer to check + * @pri: priority for memory allocation + * + * If the buffer is shared the buffer is cloned and the old copy + * drops a reference. A new clone with a single reference is returned. + * If the buffer is not shared the original buffer is returned. When + * being called from interrupt status or with spinlocks held pri must + * be GFP_ATOMIC. + * + * NULL is returned on a memory allocation failure. + */ +static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri) +{ + might_sleep_if(gfpflags_allow_blocking(pri)); + if (skb_shared(skb)) { + struct sk_buff *nskb = skb_clone(skb, pri); + + if (likely(nskb)) + consume_skb(skb); + else + kfree_skb(skb); + skb = nskb; + } + return skb; +} + +/* + * Copy shared buffers into a new sk_buff. We effectively do COW on + * packets to handle cases where we have a local reader and forward + * and a couple of other messy ones. The normal one is tcpdumping + * a packet thats being forwarded. + */ + +/** + * skb_unshare - make a copy of a shared buffer + * @skb: buffer to check + * @pri: priority for memory allocation + * + * If the socket buffer is a clone then this function creates a new + * copy of the data, drops a reference count on the old copy and returns + * the new copy with the reference count at 1. If the buffer is not a clone + * the original buffer is returned. When called with a spinlock held or + * from interrupt state @pri must be %GFP_ATOMIC + * + * %NULL is returned on a memory allocation failure. + */ +static inline struct sk_buff *skb_unshare(struct sk_buff *skb, + gfp_t pri) +{ + might_sleep_if(gfpflags_allow_blocking(pri)); + if (skb_cloned(skb)) { + struct sk_buff *nskb = skb_copy(skb, pri); + + /* Free our shared copy */ + if (likely(nskb)) + consume_skb(skb); + else + kfree_skb(skb); + skb = nskb; + } + return skb; +} + +/** + * skb_peek - peek at the head of an &sk_buff_head + * @list_: list to peek at + * + * Peek an &sk_buff. Unlike most other operations you _MUST_ + * be careful with this one. A peek leaves the buffer on the + * list and someone else may run off with it. You must hold + * the appropriate locks or have a private queue to do this. + * + * Returns %NULL for an empty list or a pointer to the head element. + * The reference count is not incremented and the reference is therefore + * volatile. Use with caution. + */ +static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_) +{ + struct sk_buff *skb = list_->next; + + if (skb == (struct sk_buff *)list_) + skb = NULL; + return skb; +} + +/** + * skb_peek_next - peek skb following the given one from a queue + * @skb: skb to start from + * @list_: list to peek at + * + * Returns %NULL when the end of the list is met or a pointer to the + * next element. The reference count is not incremented and the + * reference is therefore volatile. Use with caution. + */ +static inline struct sk_buff *skb_peek_next(struct sk_buff *skb, + const struct sk_buff_head *list_) +{ + struct sk_buff *next = skb->next; + + if (next == (struct sk_buff *)list_) + next = NULL; + return next; +} + +/** + * skb_peek_tail - peek at the tail of an &sk_buff_head + * @list_: list to peek at + * + * Peek an &sk_buff. Unlike most other operations you _MUST_ + * be careful with this one. A peek leaves the buffer on the + * list and someone else may run off with it. You must hold + * the appropriate locks or have a private queue to do this. + * + * Returns %NULL for an empty list or a pointer to the tail element. + * The reference count is not incremented and the reference is therefore + * volatile. Use with caution. + */ +static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_) +{ + struct sk_buff *skb = READ_ONCE(list_->prev); + + if (skb == (struct sk_buff *)list_) + skb = NULL; + return skb; + +} + +/** + * skb_queue_len - get queue length + * @list_: list to measure + * + * Return the length of an &sk_buff queue. + */ +static inline __u32 skb_queue_len(const struct sk_buff_head *list_) +{ + return list_->qlen; +} + +/** + * skb_queue_len_lockless - get queue length + * @list_: list to measure + * + * Return the length of an &sk_buff queue. + * This variant can be used in lockless contexts. + */ +static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_) +{ + return READ_ONCE(list_->qlen); +} + +/** + * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head + * @list: queue to initialize + * + * This initializes only the list and queue length aspects of + * an sk_buff_head object. This allows to initialize the list + * aspects of an sk_buff_head without reinitializing things like + * the spinlock. It can also be used for on-stack sk_buff_head + * objects where the spinlock is known to not be used. + */ +static inline void __skb_queue_head_init(struct sk_buff_head *list) +{ + list->prev = list->next = (struct sk_buff *)list; + list->qlen = 0; +} + +/* + * This function creates a split out lock class for each invocation; + * this is needed for now since a whole lot of users of the skb-queue + * infrastructure in drivers have different locking usage (in hardirq) + * than the networking core (in softirq only). In the long run either the + * network layer or drivers should need annotation to consolidate the + * main types of usage into 3 classes. + */ +static inline void skb_queue_head_init(struct sk_buff_head *list) +{ + spin_lock_init(&list->lock); + __skb_queue_head_init(list); +} + +static inline void skb_queue_head_init_class(struct sk_buff_head *list, + struct lock_class_key *class) +{ + skb_queue_head_init(list); + lockdep_set_class(&list->lock, class); +} + +/* + * Insert an sk_buff on a list. + * + * The "__skb_xxxx()" functions are the non-atomic ones that + * can only be called with interrupts disabled. + */ +void skb_insert(struct sk_buff *old, struct sk_buff *newsk, + struct sk_buff_head *list); +static inline void __skb_insert(struct sk_buff *newsk, + struct sk_buff *prev, struct sk_buff *next, + struct sk_buff_head *list) +{ + /* See skb_queue_empty_lockless() and skb_peek_tail() + * for the opposite READ_ONCE() + */ + WRITE_ONCE(newsk->next, next); + WRITE_ONCE(newsk->prev, prev); + WRITE_ONCE(next->prev, newsk); + WRITE_ONCE(prev->next, newsk); + WRITE_ONCE(list->qlen, list->qlen + 1); +} + +static inline void __skb_queue_splice(const struct sk_buff_head *list, + struct sk_buff *prev, + struct sk_buff *next) +{ + struct sk_buff *first = list->next; + struct sk_buff *last = list->prev; + + WRITE_ONCE(first->prev, prev); + WRITE_ONCE(prev->next, first); + + WRITE_ONCE(last->next, next); + WRITE_ONCE(next->prev, last); +} + +/** + * skb_queue_splice - join two skb lists, this is designed for stacks + * @list: the new list to add + * @head: the place to add it in the first list + */ +static inline void skb_queue_splice(const struct sk_buff_head *list, + struct sk_buff_head *head) +{ + if (!skb_queue_empty(list)) { + __skb_queue_splice(list, (struct sk_buff *) head, head->next); + head->qlen += list->qlen; + } +} + +/** + * skb_queue_splice_init - join two skb lists and reinitialise the emptied list + * @list: the new list to add + * @head: the place to add it in the first list + * + * The list at @list is reinitialised + */ +static inline void skb_queue_splice_init(struct sk_buff_head *list, + struct sk_buff_head *head) +{ + if (!skb_queue_empty(list)) { + __skb_queue_splice(list, (struct sk_buff *) head, head->next); + head->qlen += list->qlen; + __skb_queue_head_init(list); + } +} + +/** + * skb_queue_splice_tail - join two skb lists, each list being a queue + * @list: the new list to add + * @head: the place to add it in the first list + */ +static inline void skb_queue_splice_tail(const struct sk_buff_head *list, + struct sk_buff_head *head) +{ + if (!skb_queue_empty(list)) { + __skb_queue_splice(list, head->prev, (struct sk_buff *) head); + head->qlen += list->qlen; + } +} + +/** + * skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list + * @list: the new list to add + * @head: the place to add it in the first list + * + * Each of the lists is a queue. + * The list at @list is reinitialised + */ +static inline void skb_queue_splice_tail_init(struct sk_buff_head *list, + struct sk_buff_head *head) +{ + if (!skb_queue_empty(list)) { + __skb_queue_splice(list, head->prev, (struct sk_buff *) head); + head->qlen += list->qlen; + __skb_queue_head_init(list); + } +} + +/** + * __skb_queue_after - queue a buffer at the list head + * @list: list to use + * @prev: place after this buffer + * @newsk: buffer to queue + * + * Queue a buffer int the middle of a list. This function takes no locks + * and you must therefore hold required locks before calling it. + * + * A buffer cannot be placed on two lists at the same time. + */ +static inline void __skb_queue_after(struct sk_buff_head *list, + struct sk_buff *prev, + struct sk_buff *newsk) +{ + __skb_insert(newsk, prev, prev->next, list); +} + +void skb_append(struct sk_buff *old, struct sk_buff *newsk, + struct sk_buff_head *list); + +static inline void __skb_queue_before(struct sk_buff_head *list, + struct sk_buff *next, + struct sk_buff *newsk) +{ + __skb_insert(newsk, next->prev, next, list); +} + +/** + * __skb_queue_head - queue a buffer at the list head + * @list: list to use + * @newsk: buffer to queue + * + * Queue a buffer at the start of a list. This function takes no locks + * and you must therefore hold required locks before calling it. + * + * A buffer cannot be placed on two lists at the same time. + */ +void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); +static inline void __skb_queue_head(struct sk_buff_head *list, + struct sk_buff *newsk) +{ + __skb_queue_after(list, (struct sk_buff *)list, newsk); +} + +/** + * __skb_queue_tail - queue a buffer at the list tail + * @list: list to use + * @newsk: buffer to queue + * + * Queue a buffer at the end of a list. This function takes no locks + * and you must therefore hold required locks before calling it. + * + * A buffer cannot be placed on two lists at the same time. + */ +void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); +static inline void __skb_queue_tail(struct sk_buff_head *list, + struct sk_buff *newsk) +{ + __skb_queue_before(list, (struct sk_buff *)list, newsk); +} + +/* + * remove sk_buff from list. _Must_ be called atomically, and with + * the list known.. + */ +void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list); +static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) +{ + struct sk_buff *next, *prev; + + WRITE_ONCE(list->qlen, list->qlen - 1); + next = skb->next; + prev = skb->prev; + skb->next = skb->prev = NULL; + WRITE_ONCE(next->prev, prev); + WRITE_ONCE(prev->next, next); +} + +/** + * __skb_dequeue - remove from the head of the queue + * @list: list to dequeue from + * + * Remove the head of the list. This function does not take any locks + * so must be used with appropriate locks held only. The head item is + * returned or %NULL if the list is empty. + */ +struct sk_buff *skb_dequeue(struct sk_buff_head *list); +static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) +{ + struct sk_buff *skb = skb_peek(list); + if (skb) + __skb_unlink(skb, list); + return skb; +} + +/** + * __skb_dequeue_tail - remove from the tail of the queue + * @list: list to dequeue from + * + * Remove the tail of the list. This function does not take any locks + * so must be used with appropriate locks held only. The tail item is + * returned or %NULL if the list is empty. + */ +struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); +static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) +{ + struct sk_buff *skb = skb_peek_tail(list); + if (skb) + __skb_unlink(skb, list); + return skb; +} + + +static inline bool skb_is_nonlinear(const struct sk_buff *skb) +{ + return skb->data_len; +} + +static inline unsigned int skb_headlen(const struct sk_buff *skb) +{ + return skb->len - skb->data_len; +} + +static inline unsigned int __skb_pagelen(const struct sk_buff *skb) +{ + unsigned int i, len = 0; + + for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--) + len += skb_frag_size(&skb_shinfo(skb)->frags[i]); + return len; +} + +static inline unsigned int skb_pagelen(const struct sk_buff *skb) +{ + return skb_headlen(skb) + __skb_pagelen(skb); +} + +/** + * __skb_fill_page_desc - initialise a paged fragment in an skb + * @skb: buffer containing fragment to be initialised + * @i: paged fragment index to initialise + * @page: the page to use for this fragment + * @off: the offset to the data with @page + * @size: the length of the data + * + * Initialises the @i'th fragment of @skb to point to &size bytes at + * offset @off within @page. + * + * Does not take any additional reference on the fragment. + */ +static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, + struct page *page, int off, int size) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + /* + * Propagate page pfmemalloc to the skb if we can. The problem is + * that not all callers have unique ownership of the page but rely + * on page_is_pfmemalloc doing the right thing(tm). + */ + frag->page.p = page; + frag->page_offset = off; + skb_frag_size_set(frag, size); + + page = compound_head(page); + if (page_is_pfmemalloc(page)) + skb->pfmemalloc = true; +} + +/** + * skb_fill_page_desc - initialise a paged fragment in an skb + * @skb: buffer containing fragment to be initialised + * @i: paged fragment index to initialise + * @page: the page to use for this fragment + * @off: the offset to the data with @page + * @size: the length of the data + * + * As per __skb_fill_page_desc() -- initialises the @i'th fragment of + * @skb to point to @size bytes at offset @off within @page. In + * addition updates @skb such that @i is the last fragment. + * + * Does not take any additional reference on the fragment. + */ +static inline void skb_fill_page_desc(struct sk_buff *skb, int i, + struct page *page, int off, int size) +{ + __skb_fill_page_desc(skb, i, page, off, size); + skb_shinfo(skb)->nr_frags = i + 1; +} + +void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, + int size, unsigned int truesize); + +void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, + unsigned int truesize); + +#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) +#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb)) +#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) + +#ifdef NET_SKBUFF_DATA_USES_OFFSET +static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) +{ + return skb->head + skb->tail; +} + +static inline void skb_reset_tail_pointer(struct sk_buff *skb) +{ + skb->tail = skb->data - skb->head; +} + +static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) +{ + skb_reset_tail_pointer(skb); + skb->tail += offset; +} + +#else /* NET_SKBUFF_DATA_USES_OFFSET */ +static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) +{ + return skb->tail; +} + +static inline void skb_reset_tail_pointer(struct sk_buff *skb) +{ + skb->tail = skb->data; +} + +static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) +{ + skb->tail = skb->data + offset; +} + +#endif /* NET_SKBUFF_DATA_USES_OFFSET */ + +/* + * Add data to an sk_buff + */ +void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len); +void *skb_put(struct sk_buff *skb, unsigned int len); +static inline void *__skb_put(struct sk_buff *skb, unsigned int len) +{ + void *tmp = skb_tail_pointer(skb); + SKB_LINEAR_ASSERT(skb); + skb->tail += len; + skb->len += len; + return tmp; +} + +static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len) +{ + void *tmp = __skb_put(skb, len); + + memset(tmp, 0, len); + return tmp; +} + +static inline void *__skb_put_data(struct sk_buff *skb, const void *data, + unsigned int len) +{ + void *tmp = __skb_put(skb, len); + + memcpy(tmp, data, len); + return tmp; +} + +static inline void __skb_put_u8(struct sk_buff *skb, u8 val) +{ + *(u8 *)__skb_put(skb, 1) = val; +} + +static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len) +{ + void *tmp = skb_put(skb, len); + + memset(tmp, 0, len); + + return tmp; +} + +static inline void *skb_put_data(struct sk_buff *skb, const void *data, + unsigned int len) +{ + void *tmp = skb_put(skb, len); + + memcpy(tmp, data, len); + + return tmp; +} + +static inline void skb_put_u8(struct sk_buff *skb, u8 val) +{ + *(u8 *)skb_put(skb, 1) = val; +} + +void *skb_push(struct sk_buff *skb, unsigned int len); +static inline void *__skb_push(struct sk_buff *skb, unsigned int len) +{ + skb->data -= len; + skb->len += len; + return skb->data; +} + +void *skb_pull(struct sk_buff *skb, unsigned int len); +static inline void *__skb_pull(struct sk_buff *skb, unsigned int len) +{ + skb->len -= len; + BUG_ON(skb->len < skb->data_len); + return skb->data += len; +} + +static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len) +{ + return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); +} + +void *__pskb_pull_tail(struct sk_buff *skb, int delta); + +static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len) +{ + if (len > skb_headlen(skb) && + !__pskb_pull_tail(skb, len - skb_headlen(skb))) + return NULL; + skb->len -= len; + return skb->data += len; +} + +static inline void *pskb_pull(struct sk_buff *skb, unsigned int len) +{ + return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len); +} + +static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) +{ + if (likely(len <= skb_headlen(skb))) + return 1; + if (unlikely(len > skb->len)) + return 0; + return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; +} + +void skb_condense(struct sk_buff *skb); + +/** + * skb_headroom - bytes at buffer head + * @skb: buffer to check + * + * Return the number of bytes of free space at the head of an &sk_buff. + */ +static inline unsigned int skb_headroom(const struct sk_buff *skb) +{ + return skb->data - skb->head; +} + +/** + * skb_tailroom - bytes at buffer end + * @skb: buffer to check + * + * Return the number of bytes of free space at the tail of an sk_buff + */ +static inline int skb_tailroom(const struct sk_buff *skb) +{ + return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; +} + +/** + * skb_availroom - bytes at buffer end + * @skb: buffer to check + * + * Return the number of bytes of free space at the tail of an sk_buff + * allocated by sk_stream_alloc() + */ +static inline int skb_availroom(const struct sk_buff *skb) +{ + if (skb_is_nonlinear(skb)) + return 0; + + return skb->end - skb->tail - skb->reserved_tailroom; +} + +/** + * skb_reserve - adjust headroom + * @skb: buffer to alter + * @len: bytes to move + * + * Increase the headroom of an empty &sk_buff by reducing the tail + * room. This is only allowed for an empty buffer. + */ +static inline void skb_reserve(struct sk_buff *skb, int len) +{ + skb->data += len; + skb->tail += len; +} + +/** + * skb_tailroom_reserve - adjust reserved_tailroom + * @skb: buffer to alter + * @mtu: maximum amount of headlen permitted + * @needed_tailroom: minimum amount of reserved_tailroom + * + * Set reserved_tailroom so that headlen can be as large as possible but + * not larger than mtu and tailroom cannot be smaller than + * needed_tailroom. + * The required headroom should already have been reserved before using + * this function. + */ +static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu, + unsigned int needed_tailroom) +{ + SKB_LINEAR_ASSERT(skb); + if (mtu < skb_tailroom(skb) - needed_tailroom) + /* use at most mtu */ + skb->reserved_tailroom = skb_tailroom(skb) - mtu; + else + /* use up to all available space */ + skb->reserved_tailroom = needed_tailroom; +} + +#define ENCAP_TYPE_ETHER 0 +#define ENCAP_TYPE_IPPROTO 1 + +static inline void skb_set_inner_protocol(struct sk_buff *skb, + __be16 protocol) +{ + skb->inner_protocol = protocol; + skb->inner_protocol_type = ENCAP_TYPE_ETHER; +} + +static inline void skb_set_inner_ipproto(struct sk_buff *skb, + __u8 ipproto) +{ + skb->inner_ipproto = ipproto; + skb->inner_protocol_type = ENCAP_TYPE_IPPROTO; +} + +static inline void skb_reset_inner_headers(struct sk_buff *skb) +{ + skb->inner_mac_header = skb->mac_header; + skb->inner_network_header = skb->network_header; + skb->inner_transport_header = skb->transport_header; +} + +static inline void skb_reset_mac_len(struct sk_buff *skb) +{ + skb->mac_len = skb->network_header - skb->mac_header; +} + +static inline unsigned char *skb_inner_transport_header(const struct sk_buff + *skb) +{ + return skb->head + skb->inner_transport_header; +} + +static inline int skb_inner_transport_offset(const struct sk_buff *skb) +{ + return skb_inner_transport_header(skb) - skb->data; +} + +static inline void skb_reset_inner_transport_header(struct sk_buff *skb) +{ + skb->inner_transport_header = skb->data - skb->head; +} + +static inline void skb_set_inner_transport_header(struct sk_buff *skb, + const int offset) +{ + skb_reset_inner_transport_header(skb); + skb->inner_transport_header += offset; +} + +static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb) +{ + return skb->head + skb->inner_network_header; +} + +static inline void skb_reset_inner_network_header(struct sk_buff *skb) +{ + skb->inner_network_header = skb->data - skb->head; +} + +static inline void skb_set_inner_network_header(struct sk_buff *skb, + const int offset) +{ + skb_reset_inner_network_header(skb); + skb->inner_network_header += offset; +} + +static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb) +{ + return skb->head + skb->inner_mac_header; +} + +static inline void skb_reset_inner_mac_header(struct sk_buff *skb) +{ + skb->inner_mac_header = skb->data - skb->head; +} + +static inline void skb_set_inner_mac_header(struct sk_buff *skb, + const int offset) +{ + skb_reset_inner_mac_header(skb); + skb->inner_mac_header += offset; +} +static inline bool skb_transport_header_was_set(const struct sk_buff *skb) +{ + return skb->transport_header != (typeof(skb->transport_header))~0U; +} + +static inline unsigned char *skb_transport_header(const struct sk_buff *skb) +{ + return skb->head + skb->transport_header; +} + +static inline void skb_reset_transport_header(struct sk_buff *skb) +{ + skb->transport_header = skb->data - skb->head; +} + +static inline void skb_set_transport_header(struct sk_buff *skb, + const int offset) +{ + skb_reset_transport_header(skb); + skb->transport_header += offset; +} + +static inline unsigned char *skb_network_header(const struct sk_buff *skb) +{ + return skb->head + skb->network_header; +} + +static inline void skb_reset_network_header(struct sk_buff *skb) +{ + skb->network_header = skb->data - skb->head; +} + +static inline void skb_set_network_header(struct sk_buff *skb, const int offset) +{ + skb_reset_network_header(skb); + skb->network_header += offset; +} + +static inline unsigned char *skb_mac_header(const struct sk_buff *skb) +{ + return skb->head + skb->mac_header; +} + +static inline int skb_mac_offset(const struct sk_buff *skb) +{ + return skb_mac_header(skb) - skb->data; +} + +static inline u32 skb_mac_header_len(const struct sk_buff *skb) +{ + return skb->network_header - skb->mac_header; +} + +static inline int skb_mac_header_was_set(const struct sk_buff *skb) +{ + return skb->mac_header != (typeof(skb->mac_header))~0U; +} + +static inline void skb_reset_mac_header(struct sk_buff *skb) +{ + skb->mac_header = skb->data - skb->head; +} + +static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) +{ + skb_reset_mac_header(skb); + skb->mac_header += offset; +} + +static inline void skb_pop_mac_header(struct sk_buff *skb) +{ + skb->mac_header = skb->network_header; +} + +static inline void skb_probe_transport_header(struct sk_buff *skb, + const int offset_hint) +{ + struct flow_keys_basic keys; + + if (skb_transport_header_was_set(skb)) + return; + + if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0)) + skb_set_transport_header(skb, keys.control.thoff); + else if (offset_hint >= 0) + skb_set_transport_header(skb, offset_hint); +} + +static inline void skb_mac_header_rebuild(struct sk_buff *skb) +{ + if (skb_mac_header_was_set(skb)) { + const unsigned char *old_mac = skb_mac_header(skb); + + skb_set_mac_header(skb, -skb->mac_len); + memmove(skb_mac_header(skb), old_mac, skb->mac_len); + } +} + +static inline int skb_checksum_start_offset(const struct sk_buff *skb) +{ + return skb->csum_start - skb_headroom(skb); +} + +static inline unsigned char *skb_checksum_start(const struct sk_buff *skb) +{ + return skb->head + skb->csum_start; +} + +static inline int skb_transport_offset(const struct sk_buff *skb) +{ + return skb_transport_header(skb) - skb->data; +} + +static inline u32 skb_network_header_len(const struct sk_buff *skb) +{ + return skb->transport_header - skb->network_header; +} + +static inline u32 skb_inner_network_header_len(const struct sk_buff *skb) +{ + return skb->inner_transport_header - skb->inner_network_header; +} + +static inline int skb_network_offset(const struct sk_buff *skb) +{ + return skb_network_header(skb) - skb->data; +} + +static inline int skb_inner_network_offset(const struct sk_buff *skb) +{ + return skb_inner_network_header(skb) - skb->data; +} + +static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) +{ + return pskb_may_pull(skb, skb_network_offset(skb) + len); +} + +/* + * CPUs often take a performance hit when accessing unaligned memory + * locations. The actual performance hit varies, it can be small if the + * hardware handles it or large if we have to take an exception and fix it + * in software. + * + * Since an ethernet header is 14 bytes network drivers often end up with + * the IP header at an unaligned offset. The IP header can be aligned by + * shifting the start of the packet by 2 bytes. Drivers should do this + * with: + * + * skb_reserve(skb, NET_IP_ALIGN); + * + * The downside to this alignment of the IP header is that the DMA is now + * unaligned. On some architectures the cost of an unaligned DMA is high + * and this cost outweighs the gains made by aligning the IP header. + * + * Since this trade off varies between architectures, we allow NET_IP_ALIGN + * to be overridden. + */ +#ifndef NET_IP_ALIGN +#define NET_IP_ALIGN 2 +#endif + +/* + * The networking layer reserves some headroom in skb data (via + * dev_alloc_skb). This is used to avoid having to reallocate skb data when + * the header has to grow. In the default case, if the header has to grow + * 32 bytes or less we avoid the reallocation. + * + * Unfortunately this headroom changes the DMA alignment of the resulting + * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive + * on some architectures. An architecture can override this value, + * perhaps setting it to a cacheline in size (since that will maintain + * cacheline alignment of the DMA). It must be a power of 2. + * + * Various parts of the networking layer expect at least 32 bytes of + * headroom, you should not reduce this. + * + * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS) + * to reduce average number of cache lines per packet. + * get_rps_cpus() for example only access one 64 bytes aligned block : + * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) + */ +#ifndef NET_SKB_PAD +#define NET_SKB_PAD max(32, L1_CACHE_BYTES) +#endif + +int ___pskb_trim(struct sk_buff *skb, unsigned int len); + +static inline void __skb_set_length(struct sk_buff *skb, unsigned int len) +{ + if (unlikely(skb_is_nonlinear(skb))) { + WARN_ON(1); + return; + } + skb->len = len; + skb_set_tail_pointer(skb, len); +} + +static inline void __skb_trim(struct sk_buff *skb, unsigned int len) +{ + __skb_set_length(skb, len); +} + +void skb_trim(struct sk_buff *skb, unsigned int len); + +static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) +{ + if (skb->data_len) + return ___pskb_trim(skb, len); + __skb_trim(skb, len); + return 0; +} + +static inline int pskb_trim(struct sk_buff *skb, unsigned int len) +{ + return (len < skb->len) ? __pskb_trim(skb, len) : 0; +} + +/** + * pskb_trim_unique - remove end from a paged unique (not cloned) buffer + * @skb: buffer to alter + * @len: new length + * + * This is identical to pskb_trim except that the caller knows that + * the skb is not cloned so we should never get an error due to out- + * of-memory. + */ +static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len) +{ + int err = pskb_trim(skb, len); + BUG_ON(err); +} + +static inline int __skb_grow(struct sk_buff *skb, unsigned int len) +{ + unsigned int diff = len - skb->len; + + if (skb_tailroom(skb) < diff) { + int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb), + GFP_ATOMIC); + if (ret) + return ret; + } + __skb_set_length(skb, len); + return 0; +} + +/** + * skb_orphan - orphan a buffer + * @skb: buffer to orphan + * + * If a buffer currently has an owner then we call the owner's + * destructor function and make the @skb unowned. The buffer continues + * to exist but is no longer charged to its former owner. + */ +static inline void skb_orphan(struct sk_buff *skb) +{ + if (skb->destructor) { + skb->destructor(skb); + skb->destructor = NULL; + skb->sk = NULL; + } else { + BUG_ON(skb->sk); + } +} + +/** + * skb_orphan_frags - orphan the frags contained in a buffer + * @skb: buffer to orphan frags from + * @gfp_mask: allocation mask for replacement pages + * + * For each frag in the SKB which needs a destructor (i.e. has an + * owner) create a copy of that frag and release the original + * page by calling the destructor. + */ +static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask) +{ + if (likely(!skb_zcopy(skb))) + return 0; + if (!skb_zcopy_is_nouarg(skb) && + skb_uarg(skb)->callback == sock_zerocopy_callback) + return 0; + return skb_copy_ubufs(skb, gfp_mask); +} + +/* Frags must be orphaned, even if refcounted, if skb might loop to rx path */ +static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask) +{ + if (likely(!skb_zcopy(skb))) + return 0; + return skb_copy_ubufs(skb, gfp_mask); +} + +/** + * __skb_queue_purge - empty a list + * @list: list to empty + * + * Delete all buffers on an &sk_buff list. Each buffer is removed from + * the list and one reference dropped. This function does not take the + * list lock and the caller must hold the relevant locks to use it. + */ +void skb_queue_purge(struct sk_buff_head *list); +static inline void __skb_queue_purge(struct sk_buff_head *list) +{ + struct sk_buff *skb; + while ((skb = __skb_dequeue(list)) != NULL) + kfree_skb(skb); +} + +unsigned int skb_rbtree_purge(struct rb_root *root); + +void *netdev_alloc_frag(unsigned int fragsz); + +struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length, + gfp_t gfp_mask); + +/** + * netdev_alloc_skb - allocate an skbuff for rx on a specific device + * @dev: network device to receive on + * @length: length to allocate + * + * Allocate a new &sk_buff and assign it a usage count of one. The + * buffer has unspecified headroom built in. Users should allocate + * the headroom they think they need without accounting for the + * built in space. The built in space is used for optimisations. + * + * %NULL is returned if there is no free memory. Although this function + * allocates memory it can be called from an interrupt. + */ +static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, + unsigned int length) +{ + return __netdev_alloc_skb(dev, length, GFP_ATOMIC); +} + +/* legacy helper around __netdev_alloc_skb() */ +static inline struct sk_buff *__dev_alloc_skb(unsigned int length, + gfp_t gfp_mask) +{ + return __netdev_alloc_skb(NULL, length, gfp_mask); +} + +/* legacy helper around netdev_alloc_skb() */ +static inline struct sk_buff *dev_alloc_skb(unsigned int length) +{ + return netdev_alloc_skb(NULL, length); +} + + +static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev, + unsigned int length, gfp_t gfp) +{ + struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp); + + if (NET_IP_ALIGN && skb) + skb_reserve(skb, NET_IP_ALIGN); + return skb; +} + +static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, + unsigned int length) +{ + return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); +} + +static inline void skb_free_frag(void *addr) +{ + page_frag_free(addr); +} + +void *napi_alloc_frag(unsigned int fragsz); +struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, + unsigned int length, gfp_t gfp_mask); +static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi, + unsigned int length) +{ + return __napi_alloc_skb(napi, length, GFP_ATOMIC); +} +void napi_consume_skb(struct sk_buff *skb, int budget); + +void __kfree_skb_flush(void); +void __kfree_skb_defer(struct sk_buff *skb); + +/** + * __dev_alloc_pages - allocate page for network Rx + * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx + * @order: size of the allocation + * + * Allocate a new page. + * + * %NULL is returned if there is no free memory. +*/ +static inline struct page *__dev_alloc_pages(gfp_t gfp_mask, + unsigned int order) +{ + /* This piece of code contains several assumptions. + * 1. This is for device Rx, therefor a cold page is preferred. + * 2. The expectation is the user wants a compound page. + * 3. If requesting a order 0 page it will not be compound + * due to the check to see if order has a value in prep_new_page + * 4. __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to + * code in gfp_to_alloc_flags that should be enforcing this. + */ + gfp_mask |= __GFP_COMP | __GFP_MEMALLOC; + + return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); +} + +static inline struct page *dev_alloc_pages(unsigned int order) +{ + return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order); +} + +/** + * __dev_alloc_page - allocate a page for network Rx + * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx + * + * Allocate a new page. + * + * %NULL is returned if there is no free memory. + */ +static inline struct page *__dev_alloc_page(gfp_t gfp_mask) +{ + return __dev_alloc_pages(gfp_mask, 0); +} + +static inline struct page *dev_alloc_page(void) +{ + return dev_alloc_pages(0); +} + +/** + * skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page + * @page: The page that was allocated from skb_alloc_page + * @skb: The skb that may need pfmemalloc set + */ +static inline void skb_propagate_pfmemalloc(struct page *page, + struct sk_buff *skb) +{ + if (page_is_pfmemalloc(page)) + skb->pfmemalloc = true; +} + +/** + * skb_frag_off() - Returns the offset of a skb fragment + * @frag: the paged fragment + */ +static inline unsigned int skb_frag_off(const skb_frag_t *frag) +{ + return frag->page_offset; +} + +/** + * skb_frag_page - retrieve the page referred to by a paged fragment + * @frag: the paged fragment + * + * Returns the &struct page associated with @frag. + */ +static inline struct page *skb_frag_page(const skb_frag_t *frag) +{ + return frag->page.p; +} + +/** + * __skb_frag_ref - take an addition reference on a paged fragment. + * @frag: the paged fragment + * + * Takes an additional reference on the paged fragment @frag. + */ +static inline void __skb_frag_ref(skb_frag_t *frag) +{ + get_page(skb_frag_page(frag)); +} + +/** + * skb_frag_ref - take an addition reference on a paged fragment of an skb. + * @skb: the buffer + * @f: the fragment offset. + * + * Takes an additional reference on the @f'th paged fragment of @skb. + */ +static inline void skb_frag_ref(struct sk_buff *skb, int f) +{ + __skb_frag_ref(&skb_shinfo(skb)->frags[f]); +} + +/** + * __skb_frag_unref - release a reference on a paged fragment. + * @frag: the paged fragment + * + * Releases a reference on the paged fragment @frag. + */ +static inline void __skb_frag_unref(skb_frag_t *frag) +{ + put_page(skb_frag_page(frag)); +} + +/** + * skb_frag_unref - release a reference on a paged fragment of an skb. + * @skb: the buffer + * @f: the fragment offset + * + * Releases a reference on the @f'th paged fragment of @skb. + */ +static inline void skb_frag_unref(struct sk_buff *skb, int f) +{ + __skb_frag_unref(&skb_shinfo(skb)->frags[f]); +} + +/** + * skb_frag_address - gets the address of the data contained in a paged fragment + * @frag: the paged fragment buffer + * + * Returns the address of the data within @frag. The page must already + * be mapped. + */ +static inline void *skb_frag_address(const skb_frag_t *frag) +{ + return page_address(skb_frag_page(frag)) + frag->page_offset; +} + +/** + * skb_frag_address_safe - gets the address of the data contained in a paged fragment + * @frag: the paged fragment buffer + * + * Returns the address of the data within @frag. Checks that the page + * is mapped and returns %NULL otherwise. + */ +static inline void *skb_frag_address_safe(const skb_frag_t *frag) +{ + void *ptr = page_address(skb_frag_page(frag)); + if (unlikely(!ptr)) + return NULL; + + return ptr + frag->page_offset; +} + +/** + * __skb_frag_set_page - sets the page contained in a paged fragment + * @frag: the paged fragment + * @page: the page to set + * + * Sets the fragment @frag to contain @page. + */ +static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page) +{ + frag->page.p = page; +} + +/** + * skb_frag_set_page - sets the page contained in a paged fragment of an skb + * @skb: the buffer + * @f: the fragment offset + * @page: the page to set + * + * Sets the @f'th fragment of @skb to contain @page. + */ +static inline void skb_frag_set_page(struct sk_buff *skb, int f, + struct page *page) +{ + __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page); +} + +bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio); + +/** + * skb_frag_dma_map - maps a paged fragment via the DMA API + * @dev: the device to map the fragment to + * @frag: the paged fragment to map + * @offset: the offset within the fragment (starting at the + * fragment's own offset) + * @size: the number of bytes to map + * @dir: the direction of the mapping (``PCI_DMA_*``) + * + * Maps the page associated with @frag to @device. + */ +static inline dma_addr_t skb_frag_dma_map(struct device *dev, + const skb_frag_t *frag, + size_t offset, size_t size, + enum dma_data_direction dir) +{ + return dma_map_page(dev, skb_frag_page(frag), + frag->page_offset + offset, size, dir); +} + +static inline struct sk_buff *pskb_copy(struct sk_buff *skb, + gfp_t gfp_mask) +{ + return __pskb_copy(skb, skb_headroom(skb), gfp_mask); +} + + +static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb, + gfp_t gfp_mask) +{ + return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true); +} + + +/** + * skb_clone_writable - is the header of a clone writable + * @skb: buffer to check + * @len: length up to which to write + * + * Returns true if modifying the header part of the cloned buffer + * does not requires the data to be copied. + */ +static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len) +{ + return !skb_header_cloned(skb) && + skb_headroom(skb) + len <= skb->hdr_len; +} + +static inline int skb_try_make_writable(struct sk_buff *skb, + unsigned int write_len) +{ + return skb_cloned(skb) && !skb_clone_writable(skb, write_len) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC); +} + +static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom, + int cloned) +{ + int delta = 0; + + if (headroom > skb_headroom(skb)) + delta = headroom - skb_headroom(skb); + + if (delta || cloned) + return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, + GFP_ATOMIC); + return 0; +} + +/** + * skb_cow - copy header of skb when it is required + * @skb: buffer to cow + * @headroom: needed headroom + * + * If the skb passed lacks sufficient headroom or its data part + * is shared, data is reallocated. If reallocation fails, an error + * is returned and original skb is not changed. + * + * The result is skb with writable area skb->head...skb->tail + * and at least @headroom of space at head. + */ +static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) +{ + return __skb_cow(skb, headroom, skb_cloned(skb)); +} + +/** + * skb_cow_head - skb_cow but only making the head writable + * @skb: buffer to cow + * @headroom: needed headroom + * + * This function is identical to skb_cow except that we replace the + * skb_cloned check by skb_header_cloned. It should be used when + * you only need to push on some header and do not need to modify + * the data. + */ +static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) +{ + return __skb_cow(skb, headroom, skb_header_cloned(skb)); +} + +/** + * skb_padto - pad an skbuff up to a minimal size + * @skb: buffer to pad + * @len: minimal length + * + * Pads up a buffer to ensure the trailing bytes exist and are + * blanked. If the buffer already contains sufficient data it + * is untouched. Otherwise it is extended. Returns zero on + * success. The skb is freed on error. + */ +static inline int skb_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + if (likely(size >= len)) + return 0; + return skb_pad(skb, len - size); +} + +/** + * skb_put_padto - increase size and pad an skbuff up to a minimal size + * @skb: buffer to pad + * @len: minimal length + * @free_on_error: free buffer on error + * + * Pads up a buffer to ensure the trailing bytes exist and are + * blanked. If the buffer already contains sufficient data it + * is untouched. Otherwise it is extended. Returns zero on + * success. The skb is freed on error if @free_on_error is true. + */ +static inline int __must_check __skb_put_padto(struct sk_buff *skb, + unsigned int len, + bool free_on_error) +{ + unsigned int size = skb->len; + + if (unlikely(size < len)) { + len -= size; + if (__skb_pad(skb, len, free_on_error)) + return -ENOMEM; + __skb_put(skb, len); + } + return 0; +} + +/** + * skb_put_padto - increase size and pad an skbuff up to a minimal size + * @skb: buffer to pad + * @len: minimal length + * + * Pads up a buffer to ensure the trailing bytes exist and are + * blanked. If the buffer already contains sufficient data it + * is untouched. Otherwise it is extended. Returns zero on + * success. The skb is freed on error. + */ +static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int len) +{ + return __skb_put_padto(skb, len, true); +} + +static inline int skb_add_data(struct sk_buff *skb, + struct iov_iter *from, int copy) +{ + const int off = skb->len; + + if (skb->ip_summed == CHECKSUM_NONE) { + __wsum csum = 0; + if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy, + &csum, from)) { + skb->csum = csum_block_add(skb->csum, csum, off); + return 0; + } + } else if (copy_from_iter_full(skb_put(skb, copy), copy, from)) + return 0; + + __skb_trim(skb, off); + return -EFAULT; +} + +static inline bool skb_can_coalesce(struct sk_buff *skb, int i, + const struct page *page, int off) +{ + if (skb_zcopy(skb)) + return false; + if (i) { + const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; + + return page == skb_frag_page(frag) && + off == frag->page_offset + skb_frag_size(frag); + } + return false; +} + +static inline int __skb_linearize(struct sk_buff *skb) +{ + return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM; +} + +/** + * skb_linearize - convert paged skb to linear one + * @skb: buffer to linarize + * + * If there is no free memory -ENOMEM is returned, otherwise zero + * is returned and the old skb data released. + */ +static inline int skb_linearize(struct sk_buff *skb) +{ + return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0; +} + +/** + * skb_has_shared_frag - can any frag be overwritten + * @skb: buffer to test + * + * Return true if the skb has at least one frag that might be modified + * by an external entity (as in vmsplice()/sendfile()) + */ +static inline bool skb_has_shared_frag(const struct sk_buff *skb) +{ + return skb_is_nonlinear(skb) && + skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; +} + +/** + * skb_linearize_cow - make sure skb is linear and writable + * @skb: buffer to process + * + * If there is no free memory -ENOMEM is returned, otherwise zero + * is returned and the old skb data released. + */ +static inline int skb_linearize_cow(struct sk_buff *skb) +{ + return skb_is_nonlinear(skb) || skb_cloned(skb) ? + __skb_linearize(skb) : 0; +} + +static __always_inline void +__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len, + unsigned int off) +{ + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->csum = csum_block_sub(skb->csum, + csum_partial(start, len, 0), off); + else if (skb->ip_summed == CHECKSUM_PARTIAL && + skb_checksum_start_offset(skb) < 0) + skb->ip_summed = CHECKSUM_NONE; +} + +/** + * skb_postpull_rcsum - update checksum for received skb after pull + * @skb: buffer to update + * @start: start of data before pull + * @len: length of data pulled + * + * After doing a pull on a received packet, you need to call this to + * update the CHECKSUM_COMPLETE checksum, or set ip_summed to + * CHECKSUM_NONE so that it can be recomputed from scratch. + */ +static inline void skb_postpull_rcsum(struct sk_buff *skb, + const void *start, unsigned int len) +{ + __skb_postpull_rcsum(skb, start, len, 0); +} + +static __always_inline void +__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len, + unsigned int off) +{ + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->csum = csum_block_add(skb->csum, + csum_partial(start, len, 0), off); +} + +/** + * skb_postpush_rcsum - update checksum for received skb after push + * @skb: buffer to update + * @start: start of data after push + * @len: length of data pushed + * + * After doing a push on a received packet, you need to call this to + * update the CHECKSUM_COMPLETE checksum. + */ +static inline void skb_postpush_rcsum(struct sk_buff *skb, + const void *start, unsigned int len) +{ + __skb_postpush_rcsum(skb, start, len, 0); +} + +void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); + +/** + * skb_push_rcsum - push skb and update receive checksum + * @skb: buffer to update + * @len: length of data pulled + * + * This function performs an skb_push on the packet and updates + * the CHECKSUM_COMPLETE checksum. It should be used on + * receive path processing instead of skb_push unless you know + * that the checksum difference is zero (e.g., a valid IP header) + * or you are setting ip_summed to CHECKSUM_NONE. + */ +static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len) +{ + skb_push(skb, len); + skb_postpush_rcsum(skb, skb->data, len); + return skb->data; +} + +int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len); +/** + * pskb_trim_rcsum - trim received skb and update checksum + * @skb: buffer to trim + * @len: new length + * + * This is exactly the same as pskb_trim except that it ensures the + * checksum of received packets are still valid after the operation. + * It can change skb pointers. + */ + +static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) +{ + if (likely(len >= skb->len)) + return 0; + return pskb_trim_rcsum_slow(skb, len); +} + +static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len) +{ + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->ip_summed = CHECKSUM_NONE; + __skb_trim(skb, len); + return 0; +} + +static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len) +{ + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->ip_summed = CHECKSUM_NONE; + return __skb_grow(skb, len); +} + +#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode) +#define skb_rb_first(root) rb_to_skb(rb_first(root)) +#define skb_rb_last(root) rb_to_skb(rb_last(root)) +#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode)) +#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode)) + +#define skb_queue_walk(queue, skb) \ + for (skb = (queue)->next; \ + skb != (struct sk_buff *)(queue); \ + skb = skb->next) + +#define skb_queue_walk_safe(queue, skb, tmp) \ + for (skb = (queue)->next, tmp = skb->next; \ + skb != (struct sk_buff *)(queue); \ + skb = tmp, tmp = skb->next) + +#define skb_queue_walk_from(queue, skb) \ + for (; skb != (struct sk_buff *)(queue); \ + skb = skb->next) + +#define skb_rbtree_walk(skb, root) \ + for (skb = skb_rb_first(root); skb != NULL; \ + skb = skb_rb_next(skb)) + +#define skb_rbtree_walk_from(skb) \ + for (; skb != NULL; \ + skb = skb_rb_next(skb)) + +#define skb_rbtree_walk_from_safe(skb, tmp) \ + for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \ + skb = tmp) + +#define skb_queue_walk_from_safe(queue, skb, tmp) \ + for (tmp = skb->next; \ + skb != (struct sk_buff *)(queue); \ + skb = tmp, tmp = skb->next) + +#define skb_queue_reverse_walk(queue, skb) \ + for (skb = (queue)->prev; \ + skb != (struct sk_buff *)(queue); \ + skb = skb->prev) + +#define skb_queue_reverse_walk_safe(queue, skb, tmp) \ + for (skb = (queue)->prev, tmp = skb->prev; \ + skb != (struct sk_buff *)(queue); \ + skb = tmp, tmp = skb->prev) + +#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \ + for (tmp = skb->prev; \ + skb != (struct sk_buff *)(queue); \ + skb = tmp, tmp = skb->prev) + +static inline bool skb_has_frag_list(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->frag_list != NULL; +} + +static inline void skb_frag_list_init(struct sk_buff *skb) +{ + skb_shinfo(skb)->frag_list = NULL; +} + +#define skb_walk_frags(skb, iter) \ + for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next) + + +int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p, + const struct sk_buff *skb); +struct sk_buff *__skb_try_recv_from_queue(struct sock *sk, + struct sk_buff_head *queue, + unsigned int flags, + void (*destructor)(struct sock *sk, + struct sk_buff *skb), + int *peeked, int *off, int *err, + struct sk_buff **last); +struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags, + void (*destructor)(struct sock *sk, + struct sk_buff *skb), + int *peeked, int *off, int *err, + struct sk_buff **last); +struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, + void (*destructor)(struct sock *sk, + struct sk_buff *skb), + int *peeked, int *off, int *err); +struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, + int *err); +__poll_t datagram_poll(struct file *file, struct socket *sock, + struct poll_table_struct *wait); +int skb_copy_datagram_iter(const struct sk_buff *from, int offset, + struct iov_iter *to, int size); +static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset, + struct msghdr *msg, int size) +{ + return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size); +} +int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen, + struct msghdr *msg); +int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset, + struct iov_iter *from, int len); +int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm); +void skb_free_datagram(struct sock *sk, struct sk_buff *skb); +void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len); +static inline void skb_free_datagram_locked(struct sock *sk, + struct sk_buff *skb) +{ + __skb_free_datagram_locked(sk, skb, 0); +} +int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); +int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len); +int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len); +__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, + int len, __wsum csum); +int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, + struct pipe_inode_info *pipe, unsigned int len, + unsigned int flags); +int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, + int len); +int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len); +void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); +unsigned int skb_zerocopy_headlen(const struct sk_buff *from); +int skb_zerocopy(struct sk_buff *to, struct sk_buff *from, + int len, int hlen); +void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); +int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); +void skb_scrub_packet(struct sk_buff *skb, bool xnet); +bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu); +bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len); +struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); +struct sk_buff *skb_vlan_untag(struct sk_buff *skb); +int skb_ensure_writable(struct sk_buff *skb, int write_len); +int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci); +int skb_vlan_pop(struct sk_buff *skb); +int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); +struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy, + gfp_t gfp); + +static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len) +{ + return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT; +} + +static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len) +{ + return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT; +} + +struct skb_checksum_ops { + __wsum (*update)(const void *mem, int len, __wsum wsum); + __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len); +}; + +extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly; + +__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, + __wsum csum, const struct skb_checksum_ops *ops); +__wsum skb_checksum(const struct sk_buff *skb, int offset, int len, + __wsum csum); + +static inline void * __must_check +__skb_header_pointer(const struct sk_buff *skb, int offset, + int len, void *data, int hlen, void *buffer) +{ + if (hlen - offset >= len) + return data + offset; + + if (!skb || + skb_copy_bits(skb, offset, buffer, len) < 0) + return NULL; + + return buffer; +} + +static inline void * __must_check +skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer) +{ + return __skb_header_pointer(skb, offset, len, skb->data, + skb_headlen(skb), buffer); +} + +/** + * skb_needs_linearize - check if we need to linearize a given skb + * depending on the given device features. + * @skb: socket buffer to check + * @features: net device features + * + * Returns true if either: + * 1. skb has frag_list and the device doesn't support FRAGLIST, or + * 2. skb is fragmented and the device does not support SG. + */ +static inline bool skb_needs_linearize(struct sk_buff *skb, + netdev_features_t features) +{ + return skb_is_nonlinear(skb) && + ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) || + (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG))); +} + +static inline void skb_copy_from_linear_data(const struct sk_buff *skb, + void *to, + const unsigned int len) +{ + memcpy(to, skb->data, len); +} + +static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb, + const int offset, void *to, + const unsigned int len) +{ + memcpy(to, skb->data + offset, len); +} + +static inline void skb_copy_to_linear_data(struct sk_buff *skb, + const void *from, + const unsigned int len) +{ + memcpy(skb->data, from, len); +} + +static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb, + const int offset, + const void *from, + const unsigned int len) +{ + memcpy(skb->data + offset, from, len); +} + +void skb_init(void); + +static inline ktime_t skb_get_ktime(const struct sk_buff *skb) +{ + return skb->tstamp; +} + +/** + * skb_get_timestamp - get timestamp from a skb + * @skb: skb to get stamp from + * @stamp: pointer to struct timeval to store stamp in + * + * Timestamps are stored in the skb as offsets to a base timestamp. + * This function converts the offset back to a struct timeval and stores + * it in stamp. + */ +static inline void skb_get_timestamp(const struct sk_buff *skb, + struct timeval *stamp) +{ + *stamp = ktime_to_timeval(skb->tstamp); +} + +static inline void skb_get_timestampns(const struct sk_buff *skb, + struct timespec *stamp) +{ + *stamp = ktime_to_timespec(skb->tstamp); +} + +static inline void __net_timestamp(struct sk_buff *skb) +{ + skb->tstamp = ktime_get_real(); +} + +static inline ktime_t net_timedelta(ktime_t t) +{ + return ktime_sub(ktime_get_real(), t); +} + +static inline ktime_t net_invalid_timestamp(void) +{ + return 0; +} + +static inline u8 skb_metadata_len(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->meta_len; +} + +static inline void *skb_metadata_end(const struct sk_buff *skb) +{ + return skb_mac_header(skb); +} + +static inline bool __skb_metadata_differs(const struct sk_buff *skb_a, + const struct sk_buff *skb_b, + u8 meta_len) +{ + const void *a = skb_metadata_end(skb_a); + const void *b = skb_metadata_end(skb_b); + /* Using more efficient varaiant than plain call to memcmp(). */ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 + u64 diffs = 0; + + switch (meta_len) { +#define __it(x, op) (x -= sizeof(u##op)) +#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op)) + case 32: diffs |= __it_diff(a, b, 64); + case 24: diffs |= __it_diff(a, b, 64); + case 16: diffs |= __it_diff(a, b, 64); + case 8: diffs |= __it_diff(a, b, 64); + break; + case 28: diffs |= __it_diff(a, b, 64); + case 20: diffs |= __it_diff(a, b, 64); + case 12: diffs |= __it_diff(a, b, 64); + case 4: diffs |= __it_diff(a, b, 32); + break; + } + return diffs; +#else + return memcmp(a - meta_len, b - meta_len, meta_len); +#endif +} + +static inline bool skb_metadata_differs(const struct sk_buff *skb_a, + const struct sk_buff *skb_b) +{ + u8 len_a = skb_metadata_len(skb_a); + u8 len_b = skb_metadata_len(skb_b); + + if (!(len_a | len_b)) + return false; + + return len_a != len_b ? + true : __skb_metadata_differs(skb_a, skb_b, len_a); +} + +static inline void skb_metadata_set(struct sk_buff *skb, u8 meta_len) +{ + skb_shinfo(skb)->meta_len = meta_len; +} + +static inline void skb_metadata_clear(struct sk_buff *skb) +{ + skb_metadata_set(skb, 0); +} + +struct sk_buff *skb_clone_sk(struct sk_buff *skb); + +#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING + +void skb_clone_tx_timestamp(struct sk_buff *skb); +bool skb_defer_rx_timestamp(struct sk_buff *skb); + +#else /* CONFIG_NETWORK_PHY_TIMESTAMPING */ + +static inline void skb_clone_tx_timestamp(struct sk_buff *skb) +{ +} + +static inline bool skb_defer_rx_timestamp(struct sk_buff *skb) +{ + return false; +} + +#endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */ + +/** + * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps + * + * PHY drivers may accept clones of transmitted packets for + * timestamping via their phy_driver.txtstamp method. These drivers + * must call this function to return the skb back to the stack with a + * timestamp. + * + * @skb: clone of the the original outgoing packet + * @hwtstamps: hardware time stamps + * + */ +void skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps); + +void __skb_tstamp_tx(struct sk_buff *orig_skb, + struct skb_shared_hwtstamps *hwtstamps, + struct sock *sk, int tstype); + +/** + * skb_tstamp_tx - queue clone of skb with send time stamps + * @orig_skb: the original outgoing packet + * @hwtstamps: hardware time stamps, may be NULL if not available + * + * If the skb has a socket associated, then this function clones the + * skb (thus sharing the actual data and optional structures), stores + * the optional hardware time stamping information (if non NULL) or + * generates a software time stamp (otherwise), then queues the clone + * to the error queue of the socket. Errors are silently ignored. + */ +void skb_tstamp_tx(struct sk_buff *orig_skb, + struct skb_shared_hwtstamps *hwtstamps); + +/** + * skb_tx_timestamp() - Driver hook for transmit timestamping + * + * Ethernet MAC Drivers should call this function in their hard_xmit() + * function immediately before giving the sk_buff to the MAC hardware. + * + * Specifically, one should make absolutely sure that this function is + * called before TX completion of this packet can trigger. Otherwise + * the packet could potentially already be freed. + * + * @skb: A socket buffer. + */ +static inline void skb_tx_timestamp(struct sk_buff *skb) +{ + skb_clone_tx_timestamp(skb); + if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP) + skb_tstamp_tx(skb, NULL); +} + +/** + * skb_complete_wifi_ack - deliver skb with wifi status + * + * @skb: the original outgoing packet + * @acked: ack status + * + */ +void skb_complete_wifi_ack(struct sk_buff *skb, bool acked); + +__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); +__sum16 __skb_checksum_complete(struct sk_buff *skb); + +static inline int skb_csum_unnecessary(const struct sk_buff *skb) +{ + return ((skb->ip_summed == CHECKSUM_UNNECESSARY) || + skb->csum_valid || + (skb->ip_summed == CHECKSUM_PARTIAL && + skb_checksum_start_offset(skb) >= 0)); +} + +/** + * skb_checksum_complete - Calculate checksum of an entire packet + * @skb: packet to process + * + * This function calculates the checksum over the entire packet plus + * the value of skb->csum. The latter can be used to supply the + * checksum of a pseudo header as used by TCP/UDP. It returns the + * checksum. + * + * For protocols that contain complete checksums such as ICMP/TCP/UDP, + * this function can be used to verify that checksum on received + * packets. In that case the function should return zero if the + * checksum is correct. In particular, this function will return zero + * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the + * hardware has already verified the correctness of the checksum. + */ +static inline __sum16 skb_checksum_complete(struct sk_buff *skb) +{ + return skb_csum_unnecessary(skb) ? + 0 : __skb_checksum_complete(skb); +} + +static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb) +{ + if (skb->ip_summed == CHECKSUM_UNNECESSARY) { + if (skb->csum_level == 0) + skb->ip_summed = CHECKSUM_NONE; + else + skb->csum_level--; + } +} + +static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb) +{ + if (skb->ip_summed == CHECKSUM_UNNECESSARY) { + if (skb->csum_level < SKB_MAX_CSUM_LEVEL) + skb->csum_level++; + } else if (skb->ip_summed == CHECKSUM_NONE) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = 0; + } +} + +/* Check if we need to perform checksum complete validation. + * + * Returns true if checksum complete is needed, false otherwise + * (either checksum is unnecessary or zero checksum is allowed). + */ +static inline bool __skb_checksum_validate_needed(struct sk_buff *skb, + bool zero_okay, + __sum16 check) +{ + if (skb_csum_unnecessary(skb) || (zero_okay && !check)) { + skb->csum_valid = 1; + __skb_decr_checksum_unnecessary(skb); + return false; + } + + return true; +} + +/* For small packets <= CHECKSUM_BREAK perform checksum complete directly + * in checksum_init. + */ +#define CHECKSUM_BREAK 76 + +/* Unset checksum-complete + * + * Unset checksum complete can be done when packet is being modified + * (uncompressed for instance) and checksum-complete value is + * invalidated. + */ +static inline void skb_checksum_complete_unset(struct sk_buff *skb) +{ + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->ip_summed = CHECKSUM_NONE; +} + +/* Validate (init) checksum based on checksum complete. + * + * Return values: + * 0: checksum is validated or try to in skb_checksum_complete. In the latter + * case the ip_summed will not be CHECKSUM_UNNECESSARY and the pseudo + * checksum is stored in skb->csum for use in __skb_checksum_complete + * non-zero: value of invalid checksum + * + */ +static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb, + bool complete, + __wsum psum) +{ + if (skb->ip_summed == CHECKSUM_COMPLETE) { + if (!csum_fold(csum_add(psum, skb->csum))) { + skb->csum_valid = 1; + return 0; + } + } + + skb->csum = psum; + + if (complete || skb->len <= CHECKSUM_BREAK) { + __sum16 csum; + + csum = __skb_checksum_complete(skb); + skb->csum_valid = !csum; + return csum; + } + + return 0; +} + +static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto) +{ + return 0; +} + +/* Perform checksum validate (init). Note that this is a macro since we only + * want to calculate the pseudo header which is an input function if necessary. + * First we try to validate without any computation (checksum unnecessary) and + * then calculate based on checksum complete calling the function to compute + * pseudo header. + * + * Return values: + * 0: checksum is validated or try to in skb_checksum_complete + * non-zero: value of invalid checksum + */ +#define __skb_checksum_validate(skb, proto, complete, \ + zero_okay, check, compute_pseudo) \ +({ \ + __sum16 __ret = 0; \ + skb->csum_valid = 0; \ + if (__skb_checksum_validate_needed(skb, zero_okay, check)) \ + __ret = __skb_checksum_validate_complete(skb, \ + complete, compute_pseudo(skb, proto)); \ + __ret; \ +}) + +#define skb_checksum_init(skb, proto, compute_pseudo) \ + __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo) + +#define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \ + __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo) + +#define skb_checksum_validate(skb, proto, compute_pseudo) \ + __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo) + +#define skb_checksum_validate_zero_check(skb, proto, check, \ + compute_pseudo) \ + __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo) + +#define skb_checksum_simple_validate(skb) \ + __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo) + +static inline bool __skb_checksum_convert_check(struct sk_buff *skb) +{ + return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid); +} + +static inline void __skb_checksum_convert(struct sk_buff *skb, + __sum16 check, __wsum pseudo) +{ + skb->csum = ~pseudo; + skb->ip_summed = CHECKSUM_COMPLETE; +} + +#define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \ +do { \ + if (__skb_checksum_convert_check(skb)) \ + __skb_checksum_convert(skb, check, \ + compute_pseudo(skb, proto)); \ +} while (0) + +static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr, + u16 start, u16 offset) +{ + skb->ip_summed = CHECKSUM_PARTIAL; + skb->csum_start = ((unsigned char *)ptr + start) - skb->head; + skb->csum_offset = offset - start; +} + +/* Update skbuf and packet to reflect the remote checksum offload operation. + * When called, ptr indicates the starting point for skb->csum when + * ip_summed is CHECKSUM_COMPLETE. If we need create checksum complete + * here, skb_postpull_rcsum is done so skb->csum start is ptr. + */ +static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr, + int start, int offset, bool nopartial) +{ + __wsum delta; + + if (!nopartial) { + skb_remcsum_adjust_partial(skb, ptr, start, offset); + return; + } + + if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) { + __skb_checksum_complete(skb); + skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data); + } + + delta = remcsum_adjust(ptr, skb->csum, start, offset); + + /* Adjust skb->csum since we changed the packet */ + skb->csum = csum_add(skb->csum, delta); +} + +static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb) +{ +#if IS_ENABLED(CONFIG_NF_CONNTRACK) + return (void *)(skb->_nfct & SKB_NFCT_PTRMASK); +#else + return NULL; +#endif +} + +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) +void nf_conntrack_destroy(struct nf_conntrack *nfct); +static inline void nf_conntrack_put(struct nf_conntrack *nfct) +{ + if (nfct && atomic_dec_and_test(&nfct->use)) + nf_conntrack_destroy(nfct); +} +static inline void nf_conntrack_get(struct nf_conntrack *nfct) +{ + if (nfct) + atomic_inc(&nfct->use); +} +#endif +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) +static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) +{ + if (nf_bridge && refcount_dec_and_test(&nf_bridge->use)) + kfree(nf_bridge); +} +static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge) +{ + if (nf_bridge) + refcount_inc(&nf_bridge->use); +} +#endif /* CONFIG_BRIDGE_NETFILTER */ +static inline void nf_reset(struct sk_buff *skb) +{ +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) + nf_conntrack_put(skb_nfct(skb)); + skb->_nfct = 0; +#endif +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) + nf_bridge_put(skb->nf_bridge); + skb->nf_bridge = NULL; +#endif +} + +static inline void nf_reset_trace(struct sk_buff *skb) +{ +#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) + skb->nf_trace = 0; +#endif +} + +static inline void ipvs_reset(struct sk_buff *skb) +{ +#if IS_ENABLED(CONFIG_IP_VS) + skb->ipvs_property = 0; +#endif +} + +/* Note: This doesn't put any conntrack and bridge info in dst. */ +static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src, + bool copy) +{ +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) + dst->_nfct = src->_nfct; + nf_conntrack_get(skb_nfct(src)); +#endif +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) + dst->nf_bridge = src->nf_bridge; + nf_bridge_get(src->nf_bridge); +#endif +#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) + if (copy) + dst->nf_trace = src->nf_trace; +#endif +} + +static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) +{ +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) + nf_conntrack_put(skb_nfct(dst)); +#endif +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) + nf_bridge_put(dst->nf_bridge); +#endif + __nf_copy(dst, src, true); +} + +#ifdef CONFIG_NETWORK_SECMARK +static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) +{ + to->secmark = from->secmark; +} + +static inline void skb_init_secmark(struct sk_buff *skb) +{ + skb->secmark = 0; +} +#else +static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) +{ } + +static inline void skb_init_secmark(struct sk_buff *skb) +{ } +#endif + +static inline bool skb_irq_freeable(const struct sk_buff *skb) +{ + return !skb->destructor && +#if IS_ENABLED(CONFIG_XFRM) + !skb->sp && +#endif + !skb_nfct(skb) && + !skb->_skb_refdst && + !skb_has_frag_list(skb); +} + +static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping) +{ + skb->queue_mapping = queue_mapping; +} + +static inline u16 skb_get_queue_mapping(const struct sk_buff *skb) +{ + return skb->queue_mapping; +} + +static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from) +{ + to->queue_mapping = from->queue_mapping; +} + +static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue) +{ + skb->queue_mapping = rx_queue + 1; +} + +static inline u16 skb_get_rx_queue(const struct sk_buff *skb) +{ + return skb->queue_mapping - 1; +} + +static inline bool skb_rx_queue_recorded(const struct sk_buff *skb) +{ + return skb->queue_mapping != 0; +} + +static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val) +{ + skb->dst_pending_confirm = val; +} + +static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb) +{ + return skb->dst_pending_confirm != 0; +} + +static inline struct sec_path *skb_sec_path(struct sk_buff *skb) +{ +#ifdef CONFIG_XFRM + return skb->sp; +#else + return NULL; +#endif +} + +/* Keeps track of mac header offset relative to skb->head. + * It is useful for TSO of Tunneling protocol. e.g. GRE. + * For non-tunnel skb it points to skb_mac_header() and for + * tunnel skb it points to outer mac header. + * Keeps track of level of encapsulation of network headers. + */ +struct skb_gso_cb { + union { + int mac_offset; + int data_offset; + }; + int encap_level; + __wsum csum; + __u16 csum_start; +}; +#define SKB_SGO_CB_OFFSET 32 +#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET)) + +static inline int skb_tnl_header_len(const struct sk_buff *inner_skb) +{ + return (skb_mac_header(inner_skb) - inner_skb->head) - + SKB_GSO_CB(inner_skb)->mac_offset; +} + +static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra) +{ + int new_headroom, headroom; + int ret; + + headroom = skb_headroom(skb); + ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC); + if (ret) + return ret; + + new_headroom = skb_headroom(skb); + SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom); + return 0; +} + +static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res) +{ + /* Do not update partial checksums if remote checksum is enabled. */ + if (skb->remcsum_offload) + return; + + SKB_GSO_CB(skb)->csum = res; + SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head; +} + +/* Compute the checksum for a gso segment. First compute the checksum value + * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and + * then add in skb->csum (checksum from csum_start to end of packet). + * skb->csum and csum_start are then updated to reflect the checksum of the + * resultant packet starting from the transport header-- the resultant checksum + * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo + * header. + */ +static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res) +{ + unsigned char *csum_start = skb_transport_header(skb); + int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start; + __wsum partial = SKB_GSO_CB(skb)->csum; + + SKB_GSO_CB(skb)->csum = res; + SKB_GSO_CB(skb)->csum_start = csum_start - skb->head; + + return csum_fold(csum_partial(csum_start, plen, partial)); +} + +static inline bool skb_is_gso(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_size; +} + +/* Note: Should be called only if skb_is_gso(skb) is true */ +static inline bool skb_is_gso_v6(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; +} + +/* Note: Should be called only if skb_is_gso(skb) is true */ +static inline bool skb_is_gso_sctp(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP; +} + +/* Note: Should be called only if skb_is_gso(skb) is true */ +static inline bool skb_is_gso_tcp(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6); +} + +static inline void skb_gso_reset(struct sk_buff *skb) +{ + skb_shinfo(skb)->gso_size = 0; + skb_shinfo(skb)->gso_segs = 0; + skb_shinfo(skb)->gso_type = 0; +} + +static inline void skb_increase_gso_size(struct skb_shared_info *shinfo, + u16 increment) +{ + if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS)) + return; + shinfo->gso_size += increment; +} + +static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo, + u16 decrement) +{ + if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS)) + return; + shinfo->gso_size -= decrement; +} + +void __skb_warn_lro_forwarding(const struct sk_buff *skb); + +static inline bool skb_warn_if_lro(const struct sk_buff *skb) +{ + /* LRO sets gso_size but not gso_type, whereas if GSO is really + * wanted then gso_type will be set. */ + const struct skb_shared_info *shinfo = skb_shinfo(skb); + + if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 && + unlikely(shinfo->gso_type == 0)) { + __skb_warn_lro_forwarding(skb); + return true; + } + return false; +} + +static inline void skb_forward_csum(struct sk_buff *skb) +{ + /* Unfortunately we don't support this one. Any brave souls? */ + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->ip_summed = CHECKSUM_NONE; +} + +/** + * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE + * @skb: skb to check + * + * fresh skbs have their ip_summed set to CHECKSUM_NONE. + * Instead of forcing ip_summed to CHECKSUM_NONE, we can + * use this helper, to document places where we make this assertion. + */ +static inline void skb_checksum_none_assert(const struct sk_buff *skb) +{ +#ifdef DEBUG + BUG_ON(skb->ip_summed != CHECKSUM_NONE); +#endif +} + +bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); + +int skb_checksum_setup(struct sk_buff *skb, bool recalculate); +struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, + unsigned int transport_len, + __sum16(*skb_chkf)(struct sk_buff *skb)); + +/** + * skb_head_is_locked - Determine if the skb->head is locked down + * @skb: skb to check + * + * The head on skbs build around a head frag can be removed if they are + * not cloned. This function returns true if the skb head is locked down + * due to either being allocated via kmalloc, or by being a clone with + * multiple references to the head. + */ +static inline bool skb_head_is_locked(const struct sk_buff *skb) +{ + return !skb->head_frag || skb_cloned(skb); +} + +/* Local Checksum Offload. + * Compute outer checksum based on the assumption that the + * inner checksum will be offloaded later. + * See Documentation/networking/checksum-offloads.txt for + * explanation of how this works. + * Fill in outer checksum adjustment (e.g. with sum of outer + * pseudo-header) before calling. + * Also ensure that inner checksum is in linear data area. + */ +static inline __wsum lco_csum(struct sk_buff *skb) +{ + unsigned char *csum_start = skb_checksum_start(skb); + unsigned char *l4_hdr = skb_transport_header(skb); + __wsum partial; + + /* Start with complement of inner checksum adjustment */ + partial = ~csum_unfold(*(__force __sum16 *)(csum_start + + skb->csum_offset)); + + /* Add in checksum of our headers (incl. outer checksum + * adjustment filled in by caller) and return result. + */ + return csum_partial(l4_hdr, csum_start - l4_hdr, partial); +} + +#endif /* __KERNEL__ */ +#endif /* _LINUX_SKBUFF_H */ diff --git a/include/linux/slab.h b/include/linux/slab.h new file mode 100644 index 000000000..d6393413e --- /dev/null +++ b/include/linux/slab.h @@ -0,0 +1,734 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk). + * + * (C) SGI 2006, Christoph Lameter + * Cleaned up and restructured to ease the addition of alternative + * implementations of SLAB allocators. + * (C) Linux Foundation 2008-2013 + * Unified interface for all slab allocators + */ + +#ifndef _LINUX_SLAB_H +#define _LINUX_SLAB_H + +#include +#include +#include +#include + + +/* + * Flags to pass to kmem_cache_create(). + * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set. + */ +/* DEBUG: Perform (expensive) checks on alloc/free */ +#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U) +/* DEBUG: Red zone objs in a cache */ +#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U) +/* DEBUG: Poison objects */ +#define SLAB_POISON ((slab_flags_t __force)0x00000800U) +/* Align objs on cache lines */ +#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U) +/* Use GFP_DMA memory */ +#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U) +/* Use GFP_DMA32 memory */ +#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U) +/* DEBUG: Store the last owner for bug hunting */ +#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U) +/* Panic if kmem_cache_create() fails */ +#define SLAB_PANIC ((slab_flags_t __force)0x00040000U) +/* + * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS! + * + * This delays freeing the SLAB page by a grace period, it does _NOT_ + * delay object freeing. This means that if you do kmem_cache_free() + * that memory location is free to be reused at any time. Thus it may + * be possible to see another object there in the same RCU grace period. + * + * This feature only ensures the memory location backing the object + * stays valid, the trick to using this is relying on an independent + * object validation pass. Something like: + * + * rcu_read_lock() + * again: + * obj = lockless_lookup(key); + * if (obj) { + * if (!try_get_ref(obj)) // might fail for free objects + * goto again; + * + * if (obj->key != key) { // not the object we expected + * put_ref(obj); + * goto again; + * } + * } + * rcu_read_unlock(); + * + * This is useful if we need to approach a kernel structure obliquely, + * from its address obtained without the usual locking. We can lock + * the structure to stabilize it and check it's still at the given address, + * only if we can be sure that the memory has not been meanwhile reused + * for some other kind of object (which our subsystem's lock might corrupt). + * + * rcu_read_lock before reading the address, then rcu_read_unlock after + * taking the spinlock within the structure expected at that address. + * + * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU. + */ +/* Defer freeing slabs to RCU */ +#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U) +/* Spread some memory over cpuset */ +#define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U) +/* Trace allocations and frees */ +#define SLAB_TRACE ((slab_flags_t __force)0x00200000U) + +/* Flag to prevent checks on free */ +#ifdef CONFIG_DEBUG_OBJECTS +# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U) +#else +# define SLAB_DEBUG_OBJECTS 0 +#endif + +/* Avoid kmemleak tracing */ +#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U) + +/* Fault injection mark */ +#ifdef CONFIG_FAILSLAB +# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U) +#else +# define SLAB_FAILSLAB 0 +#endif +/* Account to memcg */ +#ifdef CONFIG_MEMCG_KMEM +# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U) +#else +# define SLAB_ACCOUNT 0 +#endif + +#ifdef CONFIG_KASAN +#define SLAB_KASAN ((slab_flags_t __force)0x08000000U) +#else +#define SLAB_KASAN 0 +#endif + +/* The following flags affect the page allocator grouping pages by mobility */ +/* Objects are reclaimable */ +#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U) +#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ +/* + * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. + * + * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. + * + * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. + * Both make kfree a no-op. + */ +#define ZERO_SIZE_PTR ((void *)16) + +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ + (unsigned long)ZERO_SIZE_PTR) + +#include + +struct mem_cgroup; +/* + * struct kmem_cache related prototypes + */ +void __init kmem_cache_init(void); +bool slab_is_available(void); + +extern bool usercopy_fallback; + +struct kmem_cache *kmem_cache_create(const char *name, unsigned int size, + unsigned int align, slab_flags_t flags, + void (*ctor)(void *)); +struct kmem_cache *kmem_cache_create_usercopy(const char *name, + unsigned int size, unsigned int align, + slab_flags_t flags, + unsigned int useroffset, unsigned int usersize, + void (*ctor)(void *)); +void kmem_cache_destroy(struct kmem_cache *); +int kmem_cache_shrink(struct kmem_cache *); + +void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *); +void memcg_deactivate_kmem_caches(struct mem_cgroup *); +void memcg_destroy_kmem_caches(struct mem_cgroup *); + +/* + * Please use this macro to create slab caches. Simply specify the + * name of the structure and maybe some flags that are listed above. + * + * The alignment of the struct determines object alignment. If you + * f.e. add ____cacheline_aligned_in_smp to the struct declaration + * then the objects will be properly aligned in SMP configurations. + */ +#define KMEM_CACHE(__struct, __flags) \ + kmem_cache_create(#__struct, sizeof(struct __struct), \ + __alignof__(struct __struct), (__flags), NULL) + +/* + * To whitelist a single field for copying to/from usercopy, use this + * macro instead for KMEM_CACHE() above. + */ +#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \ + kmem_cache_create_usercopy(#__struct, \ + sizeof(struct __struct), \ + __alignof__(struct __struct), (__flags), \ + offsetof(struct __struct, __field), \ + sizeof_field(struct __struct, __field), NULL) + +/* + * Common kmalloc functions provided by all allocators + */ +void * __must_check __krealloc(const void *, size_t, gfp_t); +void * __must_check krealloc(const void *, size_t, gfp_t); +void kfree(const void *); +void kzfree(const void *); +size_t ksize(const void *); + +#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR +void __check_heap_object(const void *ptr, unsigned long n, struct page *page, + bool to_user); +#else +static inline void __check_heap_object(const void *ptr, unsigned long n, + struct page *page, bool to_user) { } +#endif + +/* + * Some archs want to perform DMA into kmalloc caches and need a guaranteed + * alignment larger than the alignment of a 64-bit integer. + * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that. + */ +#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 +#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN +#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN +#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN) +#else +#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) +#endif + +/* + * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. + * Intended for arches that get misalignment faults even for 64 bit integer + * aligned buffers. + */ +#ifndef ARCH_SLAB_MINALIGN +#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) +#endif + +/* + * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned + * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN + * aligned pointers. + */ +#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN) +#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN) +#define __assume_page_alignment __assume_aligned(PAGE_SIZE) + +/* + * Kmalloc array related definitions + */ + +#ifdef CONFIG_SLAB +/* + * The largest kmalloc size supported by the SLAB allocators is + * 32 megabyte (2^25) or the maximum allocatable page order if that is + * less than 32 MB. + * + * WARNING: Its not easy to increase this value since the allocators have + * to do various tricks to work around compiler limitations in order to + * ensure proper constant folding. + */ +#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \ + (MAX_ORDER + PAGE_SHIFT - 1) : 25) +#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH +#ifndef KMALLOC_SHIFT_LOW +#define KMALLOC_SHIFT_LOW 5 +#endif +#endif + +#ifdef CONFIG_SLUB +/* + * SLUB directly allocates requests fitting in to an order-1 page + * (PAGE_SIZE*2). Larger requests are passed to the page allocator. + */ +#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) +#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) +#ifndef KMALLOC_SHIFT_LOW +#define KMALLOC_SHIFT_LOW 3 +#endif +#endif + +#ifdef CONFIG_SLOB +/* + * SLOB passes all requests larger than one page to the page allocator. + * No kmalloc array is necessary since objects of different sizes can + * be allocated from the same page. + */ +#define KMALLOC_SHIFT_HIGH PAGE_SHIFT +#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) +#ifndef KMALLOC_SHIFT_LOW +#define KMALLOC_SHIFT_LOW 3 +#endif +#endif + +/* Maximum allocatable size */ +#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) +/* Maximum size for which we actually use a slab cache */ +#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH) +/* Maximum order allocatable via the slab allocagtor */ +#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT) + +/* + * Kmalloc subsystem. + */ +#ifndef KMALLOC_MIN_SIZE +#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW) +#endif + +/* + * This restriction comes from byte sized index implementation. + * Page size is normally 2^12 bytes and, in this case, if we want to use + * byte sized index which can represent 2^8 entries, the size of the object + * should be equal or greater to 2^12 / 2^8 = 2^4 = 16. + * If minimum size of kmalloc is less than 16, we use it as minimum object + * size and give up to use byte sized index. + */ +#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \ + (KMALLOC_MIN_SIZE) : 16) + +#ifndef CONFIG_SLOB +extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; +#ifdef CONFIG_ZONE_DMA +extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; +#endif + +/* + * Figure out which kmalloc slab an allocation of a certain size + * belongs to. + * 0 = zero alloc + * 1 = 65 .. 96 bytes + * 2 = 129 .. 192 bytes + * n = 2^(n-1)+1 .. 2^n + */ +static __always_inline unsigned int kmalloc_index(size_t size) +{ + if (!size) + return 0; + + if (size <= KMALLOC_MIN_SIZE) + return KMALLOC_SHIFT_LOW; + + if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) + return 1; + if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) + return 2; + if (size <= 8) return 3; + if (size <= 16) return 4; + if (size <= 32) return 5; + if (size <= 64) return 6; + if (size <= 128) return 7; + if (size <= 256) return 8; + if (size <= 512) return 9; + if (size <= 1024) return 10; + if (size <= 2 * 1024) return 11; + if (size <= 4 * 1024) return 12; + if (size <= 8 * 1024) return 13; + if (size <= 16 * 1024) return 14; + if (size <= 32 * 1024) return 15; + if (size <= 64 * 1024) return 16; + if (size <= 128 * 1024) return 17; + if (size <= 256 * 1024) return 18; + if (size <= 512 * 1024) return 19; + if (size <= 1024 * 1024) return 20; + if (size <= 2 * 1024 * 1024) return 21; + if (size <= 4 * 1024 * 1024) return 22; + if (size <= 8 * 1024 * 1024) return 23; + if (size <= 16 * 1024 * 1024) return 24; + if (size <= 32 * 1024 * 1024) return 25; + if (size <= 64 * 1024 * 1024) return 26; + BUG(); + + /* Will never be reached. Needed because the compiler may complain */ + return -1; +} +#endif /* !CONFIG_SLOB */ + +void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc; +void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc; +void kmem_cache_free(struct kmem_cache *, void *); + +/* + * Bulk allocation and freeing operations. These are accelerated in an + * allocator specific way to avoid taking locks repeatedly or building + * metadata structures unnecessarily. + * + * Note that interrupts must be enabled when calling these functions. + */ +void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); +int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); + +/* + * Caller must not use kfree_bulk() on memory not originally allocated + * by kmalloc(), because the SLOB allocator cannot handle this. + */ +static __always_inline void kfree_bulk(size_t size, void **p) +{ + kmem_cache_free_bulk(NULL, size, p); +} + +#ifdef CONFIG_NUMA +void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc; +void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc; +#else +static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) +{ + return __kmalloc(size, flags); +} + +static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) +{ + return kmem_cache_alloc(s, flags); +} +#endif + +#ifdef CONFIG_TRACING +extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc; + +#ifdef CONFIG_NUMA +extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, + gfp_t gfpflags, + int node, size_t size) __assume_slab_alignment __malloc; +#else +static __always_inline void * +kmem_cache_alloc_node_trace(struct kmem_cache *s, + gfp_t gfpflags, + int node, size_t size) +{ + return kmem_cache_alloc_trace(s, gfpflags, size); +} +#endif /* CONFIG_NUMA */ + +#else /* CONFIG_TRACING */ +static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, + gfp_t flags, size_t size) +{ + void *ret = kmem_cache_alloc(s, flags); + + kasan_kmalloc(s, ret, size, flags); + return ret; +} + +static __always_inline void * +kmem_cache_alloc_node_trace(struct kmem_cache *s, + gfp_t gfpflags, + int node, size_t size) +{ + void *ret = kmem_cache_alloc_node(s, gfpflags, node); + + kasan_kmalloc(s, ret, size, gfpflags); + return ret; +} +#endif /* CONFIG_TRACING */ + +extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc; + +#ifdef CONFIG_TRACING +extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc; +#else +static __always_inline void * +kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) +{ + return kmalloc_order(size, flags, order); +} +#endif + +static __always_inline void *kmalloc_large(size_t size, gfp_t flags) +{ + unsigned int order = get_order(size); + return kmalloc_order_trace(size, flags, order); +} + +/** + * kmalloc - allocate memory + * @size: how many bytes of memory are required. + * @flags: the type of memory to allocate. + * + * kmalloc is the normal method of allocating memory + * for objects smaller than page size in the kernel. + * + * The @flags argument may be one of: + * + * %GFP_USER - Allocate memory on behalf of user. May sleep. + * + * %GFP_KERNEL - Allocate normal kernel ram. May sleep. + * + * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools. + * For example, use this inside interrupt handlers. + * + * %GFP_HIGHUSER - Allocate pages from high memory. + * + * %GFP_NOIO - Do not do any I/O at all while trying to get memory. + * + * %GFP_NOFS - Do not make any fs calls while trying to get memory. + * + * %GFP_NOWAIT - Allocation will not sleep. + * + * %__GFP_THISNODE - Allocate node-local memory only. + * + * %GFP_DMA - Allocation suitable for DMA. + * Should only be used for kmalloc() caches. Otherwise, use a + * slab created with SLAB_DMA. + * + * Also it is possible to set different flags by OR'ing + * in one or more of the following additional @flags: + * + * %__GFP_HIGH - This allocation has high priority and may use emergency pools. + * + * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail + * (think twice before using). + * + * %__GFP_NORETRY - If memory is not immediately available, + * then give up at once. + * + * %__GFP_NOWARN - If allocation fails, don't issue any warnings. + * + * %__GFP_RETRY_MAYFAIL - Try really hard to succeed the allocation but fail + * eventually. + * + * There are other flags available as well, but these are not intended + * for general use, and so are not documented here. For a full list of + * potential flags, always refer to linux/gfp.h. + */ +static __always_inline void *kmalloc(size_t size, gfp_t flags) +{ + if (__builtin_constant_p(size)) { + if (size > KMALLOC_MAX_CACHE_SIZE) + return kmalloc_large(size, flags); +#ifndef CONFIG_SLOB + if (!(flags & GFP_DMA)) { + unsigned int index = kmalloc_index(size); + + if (!index) + return ZERO_SIZE_PTR; + + return kmem_cache_alloc_trace(kmalloc_caches[index], + flags, size); + } +#endif + } + return __kmalloc(size, flags); +} + +/* + * Determine size used for the nth kmalloc cache. + * return size or 0 if a kmalloc cache for that + * size does not exist + */ +static __always_inline unsigned int kmalloc_size(unsigned int n) +{ +#ifndef CONFIG_SLOB + if (n > 2) + return 1U << n; + + if (n == 1 && KMALLOC_MIN_SIZE <= 32) + return 96; + + if (n == 2 && KMALLOC_MIN_SIZE <= 64) + return 192; +#endif + return 0; +} + +static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) +{ +#ifndef CONFIG_SLOB + if (__builtin_constant_p(size) && + size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) { + unsigned int i = kmalloc_index(size); + + if (!i) + return ZERO_SIZE_PTR; + + return kmem_cache_alloc_node_trace(kmalloc_caches[i], + flags, node, size); + } +#endif + return __kmalloc_node(size, flags, node); +} + +struct memcg_cache_array { + struct rcu_head rcu; + struct kmem_cache *entries[0]; +}; + +/* + * This is the main placeholder for memcg-related information in kmem caches. + * Both the root cache and the child caches will have it. For the root cache, + * this will hold a dynamically allocated array large enough to hold + * information about the currently limited memcgs in the system. To allow the + * array to be accessed without taking any locks, on relocation we free the old + * version only after a grace period. + * + * Root and child caches hold different metadata. + * + * @root_cache: Common to root and child caches. NULL for root, pointer to + * the root cache for children. + * + * The following fields are specific to root caches. + * + * @memcg_caches: kmemcg ID indexed table of child caches. This table is + * used to index child cachces during allocation and cleared + * early during shutdown. + * + * @root_caches_node: List node for slab_root_caches list. + * + * @children: List of all child caches. While the child caches are also + * reachable through @memcg_caches, a child cache remains on + * this list until it is actually destroyed. + * + * The following fields are specific to child caches. + * + * @memcg: Pointer to the memcg this cache belongs to. + * + * @children_node: List node for @root_cache->children list. + * + * @kmem_caches_node: List node for @memcg->kmem_caches list. + */ +struct memcg_cache_params { + struct kmem_cache *root_cache; + union { + struct { + struct memcg_cache_array __rcu *memcg_caches; + struct list_head __root_caches_node; + struct list_head children; + bool dying; + }; + struct { + struct mem_cgroup *memcg; + struct list_head children_node; + struct list_head kmem_caches_node; + + void (*deact_fn)(struct kmem_cache *); + union { + struct rcu_head deact_rcu_head; + struct work_struct deact_work; + }; + }; + }; +}; + +int memcg_update_all_caches(int num_memcgs); + +/** + * kmalloc_array - allocate memory for an array. + * @n: number of elements. + * @size: element size. + * @flags: the type of memory to allocate (see kmalloc). + */ +static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) +{ + size_t bytes; + + if (unlikely(check_mul_overflow(n, size, &bytes))) + return NULL; + if (__builtin_constant_p(n) && __builtin_constant_p(size)) + return kmalloc(bytes, flags); + return __kmalloc(bytes, flags); +} + +/** + * kcalloc - allocate memory for an array. The memory is set to zero. + * @n: number of elements. + * @size: element size. + * @flags: the type of memory to allocate (see kmalloc). + */ +static inline void *kcalloc(size_t n, size_t size, gfp_t flags) +{ + return kmalloc_array(n, size, flags | __GFP_ZERO); +} + +/* + * kmalloc_track_caller is a special version of kmalloc that records the + * calling function of the routine calling it for slab leak tracking instead + * of just the calling function (confusing, eh?). + * It's useful when the call to kmalloc comes from a widely-used standard + * allocator where we care about the real place the memory allocation + * request comes from. + */ +extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); +#define kmalloc_track_caller(size, flags) \ + __kmalloc_track_caller(size, flags, _RET_IP_) + +static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags, + int node) +{ + size_t bytes; + + if (unlikely(check_mul_overflow(n, size, &bytes))) + return NULL; + if (__builtin_constant_p(n) && __builtin_constant_p(size)) + return kmalloc_node(bytes, flags, node); + return __kmalloc_node(bytes, flags, node); +} + +static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node) +{ + return kmalloc_array_node(n, size, flags | __GFP_ZERO, node); +} + + +#ifdef CONFIG_NUMA +extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); +#define kmalloc_node_track_caller(size, flags, node) \ + __kmalloc_node_track_caller(size, flags, node, \ + _RET_IP_) + +#else /* CONFIG_NUMA */ + +#define kmalloc_node_track_caller(size, flags, node) \ + kmalloc_track_caller(size, flags) + +#endif /* CONFIG_NUMA */ + +/* + * Shortcuts + */ +static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) +{ + return kmem_cache_alloc(k, flags | __GFP_ZERO); +} + +/** + * kzalloc - allocate memory. The memory is set to zero. + * @size: how many bytes of memory are required. + * @flags: the type of memory to allocate (see kmalloc). + */ +static inline void *kzalloc(size_t size, gfp_t flags) +{ + return kmalloc(size, flags | __GFP_ZERO); +} + +/** + * kzalloc_node - allocate zeroed memory from a particular memory node. + * @size: how many bytes of memory are required. + * @flags: the type of memory to allocate (see kmalloc). + * @node: memory node from which to allocate + */ +static inline void *kzalloc_node(size_t size, gfp_t flags, int node) +{ + return kmalloc_node(size, flags | __GFP_ZERO, node); +} + +unsigned int kmem_cache_size(struct kmem_cache *s); +void __init kmem_cache_init_late(void); + +#if defined(CONFIG_SMP) && defined(CONFIG_SLAB) +int slab_prepare_cpu(unsigned int cpu); +int slab_dead_cpu(unsigned int cpu); +#else +#define slab_prepare_cpu NULL +#define slab_dead_cpu NULL +#endif + +#endif /* _LINUX_SLAB_H */ diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h new file mode 100644 index 000000000..3485c58cf --- /dev/null +++ b/include/linux/slab_def.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SLAB_DEF_H +#define _LINUX_SLAB_DEF_H + +#include + +/* + * Definitions unique to the original Linux SLAB allocator. + */ + +struct kmem_cache { + struct array_cache __percpu *cpu_cache; + +/* 1) Cache tunables. Protected by slab_mutex */ + unsigned int batchcount; + unsigned int limit; + unsigned int shared; + + unsigned int size; + struct reciprocal_value reciprocal_buffer_size; +/* 2) touched by every alloc & free from the backend */ + + slab_flags_t flags; /* constant flags */ + unsigned int num; /* # of objs per slab */ + +/* 3) cache_grow/shrink */ + /* order of pgs per slab (2^n) */ + unsigned int gfporder; + + /* force GFP flags, e.g. GFP_DMA */ + gfp_t allocflags; + + size_t colour; /* cache colouring range */ + unsigned int colour_off; /* colour offset */ + struct kmem_cache *freelist_cache; + unsigned int freelist_size; + + /* constructor func */ + void (*ctor)(void *obj); + +/* 4) cache creation/removal */ + const char *name; + struct list_head list; + int refcount; + int object_size; + int align; + +/* 5) statistics */ +#ifdef CONFIG_DEBUG_SLAB + unsigned long num_active; + unsigned long num_allocations; + unsigned long high_mark; + unsigned long grown; + unsigned long reaped; + unsigned long errors; + unsigned long max_freeable; + unsigned long node_allocs; + unsigned long node_frees; + unsigned long node_overflow; + atomic_t allochit; + atomic_t allocmiss; + atomic_t freehit; + atomic_t freemiss; +#ifdef CONFIG_DEBUG_SLAB_LEAK + atomic_t store_user_clean; +#endif + + /* + * If debugging is enabled, then the allocator can add additional + * fields and/or padding to every object. 'size' contains the total + * object size including these internal fields, while 'obj_offset' + * and 'object_size' contain the offset to the user object and its + * size. + */ + int obj_offset; +#endif /* CONFIG_DEBUG_SLAB */ + +#ifdef CONFIG_MEMCG + struct memcg_cache_params memcg_params; +#endif +#ifdef CONFIG_KASAN + struct kasan_cache kasan_info; +#endif + +#ifdef CONFIG_SLAB_FREELIST_RANDOM + unsigned int *random_seq; +#endif + + unsigned int useroffset; /* Usercopy region offset */ + unsigned int usersize; /* Usercopy region size */ + + struct kmem_cache_node *node[MAX_NUMNODES]; +}; + +static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, + void *x) +{ + void *object = x - (x - page->s_mem) % cache->size; + void *last_object = page->s_mem + (cache->num - 1) * cache->size; + + if (unlikely(object > last_object)) + return last_object; + else + return object; +} + +#endif /* _LINUX_SLAB_DEF_H */ diff --git a/include/linux/slimbus.h b/include/linux/slimbus.h new file mode 100644 index 000000000..12c9719b2 --- /dev/null +++ b/include/linux/slimbus.h @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2011-2017, The Linux Foundation + */ + +#ifndef _LINUX_SLIMBUS_H +#define _LINUX_SLIMBUS_H +#include +#include +#include +#include + +extern struct bus_type slimbus_bus; + +/** + * struct slim_eaddr - Enumeration address for a SLIMbus device + * @instance: Instance value + * @dev_index: Device index + * @prod_code: Product code + * @manf_id: Manufacturer Id for the device + */ +struct slim_eaddr { + u8 instance; + u8 dev_index; + u16 prod_code; + u16 manf_id; +} __packed; + +/** + * enum slim_device_status - slim device status + * @SLIM_DEVICE_STATUS_DOWN: Slim device is absent or not reported yet. + * @SLIM_DEVICE_STATUS_UP: Slim device is announced on the bus. + * @SLIM_DEVICE_STATUS_RESERVED: Reserved for future use. + */ +enum slim_device_status { + SLIM_DEVICE_STATUS_DOWN = 0, + SLIM_DEVICE_STATUS_UP, + SLIM_DEVICE_STATUS_RESERVED, +}; + +struct slim_controller; + +/** + * struct slim_device - Slim device handle. + * @dev: Driver model representation of the device. + * @e_addr: Enumeration address of this device. + * @status: slim device status + * @ctrl: slim controller instance. + * @laddr: 1-byte Logical address of this device. + * @is_laddr_valid: indicates if the laddr is valid or not + * @stream_list: List of streams on this device + * @stream_list_lock: lock to protect the stream list + * + * This is the client/device handle returned when a SLIMbus + * device is registered with a controller. + * Pointer to this structure is used by client-driver as a handle. + */ +struct slim_device { + struct device dev; + struct slim_eaddr e_addr; + struct slim_controller *ctrl; + enum slim_device_status status; + u8 laddr; + bool is_laddr_valid; + struct list_head stream_list; + spinlock_t stream_list_lock; +}; + +#define to_slim_device(d) container_of(d, struct slim_device, dev) + +/** + * struct slim_driver - SLIMbus 'generic device' (slave) device driver + * (similar to 'spi_device' on SPI) + * @probe: Binds this driver to a SLIMbus device. + * @remove: Unbinds this driver from the SLIMbus device. + * @shutdown: Standard shutdown callback used during powerdown/halt. + * @device_status: This callback is called when + * - The device reports present and gets a laddr assigned + * - The device reports absent, or the bus goes down. + * @driver: SLIMbus device drivers should initialize name and owner field of + * this structure + * @id_table: List of SLIMbus devices supported by this driver + */ + +struct slim_driver { + int (*probe)(struct slim_device *sl); + void (*remove)(struct slim_device *sl); + void (*shutdown)(struct slim_device *sl); + int (*device_status)(struct slim_device *sl, + enum slim_device_status s); + struct device_driver driver; + const struct slim_device_id *id_table; +}; +#define to_slim_driver(d) container_of(d, struct slim_driver, driver) + +/** + * struct slim_val_inf - Slimbus value or information element + * @start_offset: Specifies starting offset in information/value element map + * @rbuf: buffer to read the values + * @wbuf: buffer to write + * @num_bytes: upto 16. This ensures that the message will fit the slicesize + * per SLIMbus spec + * @comp: completion for asynchronous operations, valid only if TID is + * required for transaction, like REQUEST operations. + * Rest of the transactions are synchronous anyway. + */ +struct slim_val_inf { + u16 start_offset; + u8 num_bytes; + u8 *rbuf; + const u8 *wbuf; + struct completion *comp; +}; + +#define SLIM_DEVICE_MAX_CHANNELS 256 +/* A SLIMBus Device may have frmo 0 to 31 Ports (inclusive) */ +#define SLIM_DEVICE_MAX_PORTS 32 + +/** + * struct slim_stream_config - SLIMbus stream configuration + * Configuring a stream is done at hw_params or prepare call + * from audio drivers where they have all the required information + * regarding rate, number of channels and so on. + * There is a 1:1 mapping of channel and ports. + * + * @rate: data rate + * @bps: bits per data sample + * @ch_count: number of channels + * @chs: pointer to list of channel numbers + * @port_mask: port mask of ports to use for this stream + * @direction: direction of the stream, SNDRV_PCM_STREAM_PLAYBACK + * or SNDRV_PCM_STREAM_CAPTURE. + */ +struct slim_stream_config { + unsigned int rate; + unsigned int bps; + /* MAX 256 channels */ + unsigned int ch_count; + unsigned int *chs; + /* Max 32 ports per device */ + unsigned long port_mask; + int direction; +}; + +/* + * use a macro to avoid include chaining to get THIS_MODULE + */ +#define slim_driver_register(drv) \ + __slim_driver_register(drv, THIS_MODULE) +int __slim_driver_register(struct slim_driver *drv, struct module *owner); +void slim_driver_unregister(struct slim_driver *drv); + +/** + * module_slim_driver() - Helper macro for registering a SLIMbus driver + * @__slim_driver: slimbus_driver struct + * + * Helper macro for SLIMbus drivers which do not do anything special in module + * init/exit. This eliminates a lot of boilerplate. Each module may only + * use this macro once, and calling it replaces module_init() and module_exit() + */ +#define module_slim_driver(__slim_driver) \ + module_driver(__slim_driver, slim_driver_register, \ + slim_driver_unregister) + +static inline void *slim_get_devicedata(const struct slim_device *dev) +{ + return dev_get_drvdata(&dev->dev); +} + +static inline void slim_set_devicedata(struct slim_device *dev, void *data) +{ + dev_set_drvdata(&dev->dev, data); +} + +struct slim_device *of_slim_get_device(struct slim_controller *ctrl, + struct device_node *np); +struct slim_device *slim_get_device(struct slim_controller *ctrl, + struct slim_eaddr *e_addr); +int slim_get_logical_addr(struct slim_device *sbdev); + +/* Information Element management messages */ +#define SLIM_MSG_MC_REQUEST_INFORMATION 0x20 +#define SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION 0x21 +#define SLIM_MSG_MC_REPLY_INFORMATION 0x24 +#define SLIM_MSG_MC_CLEAR_INFORMATION 0x28 +#define SLIM_MSG_MC_REPORT_INFORMATION 0x29 + +/* Value Element management messages */ +#define SLIM_MSG_MC_REQUEST_VALUE 0x60 +#define SLIM_MSG_MC_REQUEST_CHANGE_VALUE 0x61 +#define SLIM_MSG_MC_REPLY_VALUE 0x64 +#define SLIM_MSG_MC_CHANGE_VALUE 0x68 + +int slim_xfer_msg(struct slim_device *sbdev, struct slim_val_inf *msg, + u8 mc); +int slim_readb(struct slim_device *sdev, u32 addr); +int slim_writeb(struct slim_device *sdev, u32 addr, u8 value); +int slim_read(struct slim_device *sdev, u32 addr, size_t count, u8 *val); +int slim_write(struct slim_device *sdev, u32 addr, size_t count, u8 *val); + +/* SLIMbus Stream apis */ +struct slim_stream_runtime; +struct slim_stream_runtime *slim_stream_allocate(struct slim_device *dev, + const char *sname); +int slim_stream_prepare(struct slim_stream_runtime *stream, + struct slim_stream_config *c); +int slim_stream_enable(struct slim_stream_runtime *stream); +int slim_stream_disable(struct slim_stream_runtime *stream); +int slim_stream_unprepare(struct slim_stream_runtime *stream); +int slim_stream_free(struct slim_stream_runtime *stream); + +#endif /* _LINUX_SLIMBUS_H */ diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h new file mode 100644 index 000000000..3a1a1dbc6 --- /dev/null +++ b/include/linux/slub_def.h @@ -0,0 +1,185 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SLUB_DEF_H +#define _LINUX_SLUB_DEF_H + +/* + * SLUB : A Slab allocator without object queues. + * + * (C) 2007 SGI, Christoph Lameter + */ +#include + +enum stat_item { + ALLOC_FASTPATH, /* Allocation from cpu slab */ + ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ + FREE_FASTPATH, /* Free to cpu slab */ + FREE_SLOWPATH, /* Freeing not to cpu slab */ + FREE_FROZEN, /* Freeing to frozen slab */ + FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ + FREE_REMOVE_PARTIAL, /* Freeing removes last object */ + ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */ + ALLOC_SLAB, /* Cpu slab acquired from page allocator */ + ALLOC_REFILL, /* Refill cpu slab from slab freelist */ + ALLOC_NODE_MISMATCH, /* Switching cpu slab */ + FREE_SLAB, /* Slab freed to the page allocator */ + CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ + DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ + DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */ + DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ + DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ + DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ + DEACTIVATE_BYPASS, /* Implicit deactivation */ + ORDER_FALLBACK, /* Number of times fallback was necessary */ + CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */ + CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */ + CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */ + CPU_PARTIAL_FREE, /* Refill cpu partial on free */ + CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */ + CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */ + NR_SLUB_STAT_ITEMS }; + +struct kmem_cache_cpu { + void **freelist; /* Pointer to next available object */ + unsigned long tid; /* Globally unique transaction id */ + struct page *page; /* The slab from which we are allocating */ +#ifdef CONFIG_SLUB_CPU_PARTIAL + struct page *partial; /* Partially allocated frozen slabs */ +#endif +#ifdef CONFIG_SLUB_STATS + unsigned stat[NR_SLUB_STAT_ITEMS]; +#endif +}; + +#ifdef CONFIG_SLUB_CPU_PARTIAL +#define slub_percpu_partial(c) ((c)->partial) + +#define slub_set_percpu_partial(c, p) \ +({ \ + slub_percpu_partial(c) = (p)->next; \ +}) + +#define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c)) +#else +#define slub_percpu_partial(c) NULL + +#define slub_set_percpu_partial(c, p) + +#define slub_percpu_partial_read_once(c) NULL +#endif // CONFIG_SLUB_CPU_PARTIAL + +/* + * Word size structure that can be atomically updated or read and that + * contains both the order and the number of objects that a slab of the + * given order would contain. + */ +struct kmem_cache_order_objects { + unsigned int x; +}; + +/* + * Slab cache management. + */ +struct kmem_cache { + struct kmem_cache_cpu __percpu *cpu_slab; + /* Used for retriving partial slabs etc */ + slab_flags_t flags; + unsigned long min_partial; + unsigned int size; /* The size of an object including meta data */ + unsigned int object_size;/* The size of an object without meta data */ + unsigned int offset; /* Free pointer offset. */ +#ifdef CONFIG_SLUB_CPU_PARTIAL + /* Number of per cpu partial objects to keep around */ + unsigned int cpu_partial; +#endif + struct kmem_cache_order_objects oo; + + /* Allocation and freeing of slabs */ + struct kmem_cache_order_objects max; + struct kmem_cache_order_objects min; + gfp_t allocflags; /* gfp flags to use on each alloc */ + int refcount; /* Refcount for slab cache destroy */ + void (*ctor)(void *); + unsigned int inuse; /* Offset to metadata */ + unsigned int align; /* Alignment */ + unsigned int red_left_pad; /* Left redzone padding size */ + const char *name; /* Name (only for display!) */ + struct list_head list; /* List of slab caches */ +#ifdef CONFIG_SYSFS + struct kobject kobj; /* For sysfs */ + struct work_struct kobj_remove_work; +#endif +#ifdef CONFIG_MEMCG + struct memcg_cache_params memcg_params; + /* for propagation, maximum size of a stored attr */ + unsigned int max_attr_size; +#ifdef CONFIG_SYSFS + struct kset *memcg_kset; +#endif +#endif + +#ifdef CONFIG_SLAB_FREELIST_HARDENED + unsigned long random; +#endif + +#ifdef CONFIG_NUMA + /* + * Defragmentation by allocating from a remote node. + */ + unsigned int remote_node_defrag_ratio; +#endif + +#ifdef CONFIG_SLAB_FREELIST_RANDOM + unsigned int *random_seq; +#endif + +#ifdef CONFIG_KASAN + struct kasan_cache kasan_info; +#endif + + unsigned int useroffset; /* Usercopy region offset */ + unsigned int usersize; /* Usercopy region size */ + + struct kmem_cache_node *node[MAX_NUMNODES]; +}; + +#ifdef CONFIG_SLUB_CPU_PARTIAL +#define slub_cpu_partial(s) ((s)->cpu_partial) +#define slub_set_cpu_partial(s, n) \ +({ \ + slub_cpu_partial(s) = (n); \ +}) +#else +#define slub_cpu_partial(s) (0) +#define slub_set_cpu_partial(s, n) +#endif // CONFIG_SLUB_CPU_PARTIAL + +#ifdef CONFIG_SYSFS +#define SLAB_SUPPORTS_SYSFS +void sysfs_slab_unlink(struct kmem_cache *); +void sysfs_slab_release(struct kmem_cache *); +#else +static inline void sysfs_slab_unlink(struct kmem_cache *s) +{ +} +static inline void sysfs_slab_release(struct kmem_cache *s) +{ +} +#endif + +void object_err(struct kmem_cache *s, struct page *page, + u8 *object, char *reason); + +void *fixup_red_left(struct kmem_cache *s, void *p); + +static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, + void *x) { + void *object = x - (x - page_address(page)) % cache->size; + void *last_object = page_address(page) + + (page->objects - 1) * cache->size; + void *result = (unlikely(object > last_object)) ? last_object : object; + + result = fixup_red_left(cache, result); + return result; +} + +#endif /* _LINUX_SLUB_DEF_H */ diff --git a/include/linux/sm501-regs.h b/include/linux/sm501-regs.h new file mode 100644 index 000000000..67ed2c542 --- /dev/null +++ b/include/linux/sm501-regs.h @@ -0,0 +1,388 @@ +/* sm501-regs.h + * + * Copyright 2006 Simtec Electronics + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Silicon Motion SM501 register definitions +*/ + +/* System Configuration area */ +/* System config base */ +#define SM501_SYS_CONFIG (0x000000) + +/* config 1 */ +#define SM501_SYSTEM_CONTROL (0x000000) + +#define SM501_SYSCTRL_PANEL_TRISTATE (1<<0) +#define SM501_SYSCTRL_MEM_TRISTATE (1<<1) +#define SM501_SYSCTRL_CRT_TRISTATE (1<<2) + +#define SM501_SYSCTRL_PCI_SLAVE_BURST_MASK (3<<4) +#define SM501_SYSCTRL_PCI_SLAVE_BURST_1 (0<<4) +#define SM501_SYSCTRL_PCI_SLAVE_BURST_2 (1<<4) +#define SM501_SYSCTRL_PCI_SLAVE_BURST_4 (2<<4) +#define SM501_SYSCTRL_PCI_SLAVE_BURST_8 (3<<4) + +#define SM501_SYSCTRL_PCI_CLOCK_RUN_EN (1<<6) +#define SM501_SYSCTRL_PCI_RETRY_DISABLE (1<<7) +#define SM501_SYSCTRL_PCI_SUBSYS_LOCK (1<<11) +#define SM501_SYSCTRL_PCI_BURST_READ_EN (1<<15) + +#define SM501_SYSCTRL_2D_ENGINE_STATUS (1<<19) + +/* miscellaneous control */ + +#define SM501_MISC_CONTROL (0x000004) + +#define SM501_MISC_BUS_SH (0x0) +#define SM501_MISC_BUS_PCI (0x1) +#define SM501_MISC_BUS_XSCALE (0x2) +#define SM501_MISC_BUS_NEC (0x6) +#define SM501_MISC_BUS_MASK (0x7) + +#define SM501_MISC_VR_62MB (1<<3) +#define SM501_MISC_CDR_RESET (1<<7) +#define SM501_MISC_USB_LB (1<<8) +#define SM501_MISC_USB_SLAVE (1<<9) +#define SM501_MISC_BL_1 (1<<10) +#define SM501_MISC_MC (1<<11) +#define SM501_MISC_DAC_POWER (1<<12) +#define SM501_MISC_IRQ_INVERT (1<<16) +#define SM501_MISC_SH (1<<17) + +#define SM501_MISC_HOLD_EMPTY (0<<18) +#define SM501_MISC_HOLD_8 (1<<18) +#define SM501_MISC_HOLD_16 (2<<18) +#define SM501_MISC_HOLD_24 (3<<18) +#define SM501_MISC_HOLD_32 (4<<18) +#define SM501_MISC_HOLD_MASK (7<<18) + +#define SM501_MISC_FREQ_12 (1<<24) +#define SM501_MISC_PNL_24BIT (1<<25) +#define SM501_MISC_8051_LE (1<<26) + + + +#define SM501_GPIO31_0_CONTROL (0x000008) +#define SM501_GPIO63_32_CONTROL (0x00000C) +#define SM501_DRAM_CONTROL (0x000010) + +/* command list */ +#define SM501_ARBTRTN_CONTROL (0x000014) + +/* command list */ +#define SM501_COMMAND_LIST_STATUS (0x000024) + +/* interrupt debug */ +#define SM501_RAW_IRQ_STATUS (0x000028) +#define SM501_RAW_IRQ_CLEAR (0x000028) +#define SM501_IRQ_STATUS (0x00002C) +#define SM501_IRQ_MASK (0x000030) +#define SM501_DEBUG_CONTROL (0x000034) + +/* power management */ +#define SM501_POWERMODE_P2X_SRC (1<<29) +#define SM501_POWERMODE_V2X_SRC (1<<20) +#define SM501_POWERMODE_M_SRC (1<<12) +#define SM501_POWERMODE_M1_SRC (1<<4) + +#define SM501_CURRENT_GATE (0x000038) +#define SM501_CURRENT_CLOCK (0x00003C) +#define SM501_POWER_MODE_0_GATE (0x000040) +#define SM501_POWER_MODE_0_CLOCK (0x000044) +#define SM501_POWER_MODE_1_GATE (0x000048) +#define SM501_POWER_MODE_1_CLOCK (0x00004C) +#define SM501_SLEEP_MODE_GATE (0x000050) +#define SM501_POWER_MODE_CONTROL (0x000054) + +/* power gates for units within the 501 */ +#define SM501_GATE_HOST (0) +#define SM501_GATE_MEMORY (1) +#define SM501_GATE_DISPLAY (2) +#define SM501_GATE_2D_ENGINE (3) +#define SM501_GATE_CSC (4) +#define SM501_GATE_ZVPORT (5) +#define SM501_GATE_GPIO (6) +#define SM501_GATE_UART0 (7) +#define SM501_GATE_UART1 (8) +#define SM501_GATE_SSP (10) +#define SM501_GATE_USB_HOST (11) +#define SM501_GATE_USB_GADGET (12) +#define SM501_GATE_UCONTROLLER (17) +#define SM501_GATE_AC97 (18) + +/* panel clock */ +#define SM501_CLOCK_P2XCLK (24) +/* crt clock */ +#define SM501_CLOCK_V2XCLK (16) +/* main clock */ +#define SM501_CLOCK_MCLK (8) +/* SDRAM controller clock */ +#define SM501_CLOCK_M1XCLK (0) + +/* config 2 */ +#define SM501_PCI_MASTER_BASE (0x000058) +#define SM501_ENDIAN_CONTROL (0x00005C) +#define SM501_DEVICEID (0x000060) +/* 0x050100A0 */ + +#define SM501_DEVICEID_SM501 (0x05010000) +#define SM501_DEVICEID_IDMASK (0xffff0000) +#define SM501_DEVICEID_REVMASK (0x000000ff) + +#define SM501_PLLCLOCK_COUNT (0x000064) +#define SM501_MISC_TIMING (0x000068) +#define SM501_CURRENT_SDRAM_CLOCK (0x00006C) + +#define SM501_PROGRAMMABLE_PLL_CONTROL (0x000074) + +/* GPIO base */ +#define SM501_GPIO (0x010000) +#define SM501_GPIO_DATA_LOW (0x00) +#define SM501_GPIO_DATA_HIGH (0x04) +#define SM501_GPIO_DDR_LOW (0x08) +#define SM501_GPIO_DDR_HIGH (0x0C) +#define SM501_GPIO_IRQ_SETUP (0x10) +#define SM501_GPIO_IRQ_STATUS (0x14) +#define SM501_GPIO_IRQ_RESET (0x14) + +/* I2C controller base */ +#define SM501_I2C (0x010040) +#define SM501_I2C_BYTE_COUNT (0x00) +#define SM501_I2C_CONTROL (0x01) +#define SM501_I2C_STATUS (0x02) +#define SM501_I2C_RESET (0x02) +#define SM501_I2C_SLAVE_ADDRESS (0x03) +#define SM501_I2C_DATA (0x04) + +/* SSP base */ +#define SM501_SSP (0x020000) + +/* Uart 0 base */ +#define SM501_UART0 (0x030000) + +/* Uart 1 base */ +#define SM501_UART1 (0x030020) + +/* USB host port base */ +#define SM501_USB_HOST (0x040000) + +/* USB slave/gadget base */ +#define SM501_USB_GADGET (0x060000) + +/* USB slave/gadget data port base */ +#define SM501_USB_GADGET_DATA (0x070000) + +/* Display controller/video engine base */ +#define SM501_DC (0x080000) + +/* common defines for the SM501 address registers */ +#define SM501_ADDR_FLIP (1<<31) +#define SM501_ADDR_EXT (1<<27) +#define SM501_ADDR_CS1 (1<<26) +#define SM501_ADDR_MASK (0x3f << 26) + +#define SM501_FIFO_MASK (0x3 << 16) +#define SM501_FIFO_1 (0x0 << 16) +#define SM501_FIFO_3 (0x1 << 16) +#define SM501_FIFO_7 (0x2 << 16) +#define SM501_FIFO_11 (0x3 << 16) + +/* common registers for panel and the crt */ +#define SM501_OFF_DC_H_TOT (0x000) +#define SM501_OFF_DC_V_TOT (0x008) +#define SM501_OFF_DC_H_SYNC (0x004) +#define SM501_OFF_DC_V_SYNC (0x00C) + +#define SM501_DC_PANEL_CONTROL (0x000) + +#define SM501_DC_PANEL_CONTROL_FPEN (1<<27) +#define SM501_DC_PANEL_CONTROL_BIAS (1<<26) +#define SM501_DC_PANEL_CONTROL_DATA (1<<25) +#define SM501_DC_PANEL_CONTROL_VDD (1<<24) +#define SM501_DC_PANEL_CONTROL_DP (1<<23) + +#define SM501_DC_PANEL_CONTROL_TFT_888 (0<<21) +#define SM501_DC_PANEL_CONTROL_TFT_333 (1<<21) +#define SM501_DC_PANEL_CONTROL_TFT_444 (2<<21) + +#define SM501_DC_PANEL_CONTROL_DE (1<<20) + +#define SM501_DC_PANEL_CONTROL_LCD_TFT (0<<18) +#define SM501_DC_PANEL_CONTROL_LCD_STN8 (1<<18) +#define SM501_DC_PANEL_CONTROL_LCD_STN12 (2<<18) + +#define SM501_DC_PANEL_CONTROL_CP (1<<14) +#define SM501_DC_PANEL_CONTROL_VSP (1<<13) +#define SM501_DC_PANEL_CONTROL_HSP (1<<12) +#define SM501_DC_PANEL_CONTROL_CK (1<<9) +#define SM501_DC_PANEL_CONTROL_TE (1<<8) +#define SM501_DC_PANEL_CONTROL_VPD (1<<7) +#define SM501_DC_PANEL_CONTROL_VP (1<<6) +#define SM501_DC_PANEL_CONTROL_HPD (1<<5) +#define SM501_DC_PANEL_CONTROL_HP (1<<4) +#define SM501_DC_PANEL_CONTROL_GAMMA (1<<3) +#define SM501_DC_PANEL_CONTROL_EN (1<<2) + +#define SM501_DC_PANEL_CONTROL_8BPP (0<<0) +#define SM501_DC_PANEL_CONTROL_16BPP (1<<0) +#define SM501_DC_PANEL_CONTROL_32BPP (2<<0) + + +#define SM501_DC_PANEL_PANNING_CONTROL (0x004) +#define SM501_DC_PANEL_COLOR_KEY (0x008) +#define SM501_DC_PANEL_FB_ADDR (0x00C) +#define SM501_DC_PANEL_FB_OFFSET (0x010) +#define SM501_DC_PANEL_FB_WIDTH (0x014) +#define SM501_DC_PANEL_FB_HEIGHT (0x018) +#define SM501_DC_PANEL_TL_LOC (0x01C) +#define SM501_DC_PANEL_BR_LOC (0x020) +#define SM501_DC_PANEL_H_TOT (0x024) +#define SM501_DC_PANEL_H_SYNC (0x028) +#define SM501_DC_PANEL_V_TOT (0x02C) +#define SM501_DC_PANEL_V_SYNC (0x030) +#define SM501_DC_PANEL_CUR_LINE (0x034) + +#define SM501_DC_VIDEO_CONTROL (0x040) +#define SM501_DC_VIDEO_FB0_ADDR (0x044) +#define SM501_DC_VIDEO_FB_WIDTH (0x048) +#define SM501_DC_VIDEO_FB0_LAST_ADDR (0x04C) +#define SM501_DC_VIDEO_TL_LOC (0x050) +#define SM501_DC_VIDEO_BR_LOC (0x054) +#define SM501_DC_VIDEO_SCALE (0x058) +#define SM501_DC_VIDEO_INIT_SCALE (0x05C) +#define SM501_DC_VIDEO_YUV_CONSTANTS (0x060) +#define SM501_DC_VIDEO_FB1_ADDR (0x064) +#define SM501_DC_VIDEO_FB1_LAST_ADDR (0x068) + +#define SM501_DC_VIDEO_ALPHA_CONTROL (0x080) +#define SM501_DC_VIDEO_ALPHA_FB_ADDR (0x084) +#define SM501_DC_VIDEO_ALPHA_FB_OFFSET (0x088) +#define SM501_DC_VIDEO_ALPHA_FB_LAST_ADDR (0x08C) +#define SM501_DC_VIDEO_ALPHA_TL_LOC (0x090) +#define SM501_DC_VIDEO_ALPHA_BR_LOC (0x094) +#define SM501_DC_VIDEO_ALPHA_SCALE (0x098) +#define SM501_DC_VIDEO_ALPHA_INIT_SCALE (0x09C) +#define SM501_DC_VIDEO_ALPHA_CHROMA_KEY (0x0A0) +#define SM501_DC_VIDEO_ALPHA_COLOR_LOOKUP (0x0A4) + +#define SM501_DC_PANEL_HWC_BASE (0x0F0) +#define SM501_DC_PANEL_HWC_ADDR (0x0F0) +#define SM501_DC_PANEL_HWC_LOC (0x0F4) +#define SM501_DC_PANEL_HWC_COLOR_1_2 (0x0F8) +#define SM501_DC_PANEL_HWC_COLOR_3 (0x0FC) + +#define SM501_HWC_EN (1<<31) + +#define SM501_OFF_HWC_ADDR (0x00) +#define SM501_OFF_HWC_LOC (0x04) +#define SM501_OFF_HWC_COLOR_1_2 (0x08) +#define SM501_OFF_HWC_COLOR_3 (0x0C) + +#define SM501_DC_ALPHA_CONTROL (0x100) +#define SM501_DC_ALPHA_FB_ADDR (0x104) +#define SM501_DC_ALPHA_FB_OFFSET (0x108) +#define SM501_DC_ALPHA_TL_LOC (0x10C) +#define SM501_DC_ALPHA_BR_LOC (0x110) +#define SM501_DC_ALPHA_CHROMA_KEY (0x114) +#define SM501_DC_ALPHA_COLOR_LOOKUP (0x118) + +#define SM501_DC_CRT_CONTROL (0x200) + +#define SM501_DC_CRT_CONTROL_TVP (1<<15) +#define SM501_DC_CRT_CONTROL_CP (1<<14) +#define SM501_DC_CRT_CONTROL_VSP (1<<13) +#define SM501_DC_CRT_CONTROL_HSP (1<<12) +#define SM501_DC_CRT_CONTROL_VS (1<<11) +#define SM501_DC_CRT_CONTROL_BLANK (1<<10) +#define SM501_DC_CRT_CONTROL_SEL (1<<9) +#define SM501_DC_CRT_CONTROL_TE (1<<8) +#define SM501_DC_CRT_CONTROL_PIXEL_MASK (0xF << 4) +#define SM501_DC_CRT_CONTROL_GAMMA (1<<3) +#define SM501_DC_CRT_CONTROL_ENABLE (1<<2) + +#define SM501_DC_CRT_CONTROL_8BPP (0<<0) +#define SM501_DC_CRT_CONTROL_16BPP (1<<0) +#define SM501_DC_CRT_CONTROL_32BPP (2<<0) + +#define SM501_DC_CRT_FB_ADDR (0x204) +#define SM501_DC_CRT_FB_OFFSET (0x208) +#define SM501_DC_CRT_H_TOT (0x20C) +#define SM501_DC_CRT_H_SYNC (0x210) +#define SM501_DC_CRT_V_TOT (0x214) +#define SM501_DC_CRT_V_SYNC (0x218) +#define SM501_DC_CRT_SIGNATURE_ANALYZER (0x21C) +#define SM501_DC_CRT_CUR_LINE (0x220) +#define SM501_DC_CRT_MONITOR_DETECT (0x224) + +#define SM501_DC_CRT_HWC_BASE (0x230) +#define SM501_DC_CRT_HWC_ADDR (0x230) +#define SM501_DC_CRT_HWC_LOC (0x234) +#define SM501_DC_CRT_HWC_COLOR_1_2 (0x238) +#define SM501_DC_CRT_HWC_COLOR_3 (0x23C) + +#define SM501_DC_PANEL_PALETTE (0x400) + +#define SM501_DC_VIDEO_PALETTE (0x800) + +#define SM501_DC_CRT_PALETTE (0xC00) + +/* Zoom Video port base */ +#define SM501_ZVPORT (0x090000) + +/* AC97/I2S base */ +#define SM501_AC97 (0x0A0000) + +/* 8051 micro controller base */ +#define SM501_UCONTROLLER (0x0B0000) + +/* 8051 micro controller SRAM base */ +#define SM501_UCONTROLLER_SRAM (0x0C0000) + +/* DMA base */ +#define SM501_DMA (0x0D0000) + +/* 2d engine base */ +#define SM501_2D_ENGINE (0x100000) +#define SM501_2D_SOURCE (0x00) +#define SM501_2D_DESTINATION (0x04) +#define SM501_2D_DIMENSION (0x08) +#define SM501_2D_CONTROL (0x0C) +#define SM501_2D_PITCH (0x10) +#define SM501_2D_FOREGROUND (0x14) +#define SM501_2D_BACKGROUND (0x18) +#define SM501_2D_STRETCH (0x1C) +#define SM501_2D_COLOR_COMPARE (0x20) +#define SM501_2D_COLOR_COMPARE_MASK (0x24) +#define SM501_2D_MASK (0x28) +#define SM501_2D_CLIP_TL (0x2C) +#define SM501_2D_CLIP_BR (0x30) +#define SM501_2D_MONO_PATTERN_LOW (0x34) +#define SM501_2D_MONO_PATTERN_HIGH (0x38) +#define SM501_2D_WINDOW_WIDTH (0x3C) +#define SM501_2D_SOURCE_BASE (0x40) +#define SM501_2D_DESTINATION_BASE (0x44) +#define SM501_2D_ALPHA (0x48) +#define SM501_2D_WRAP (0x4C) +#define SM501_2D_STATUS (0x50) + +#define SM501_CSC_Y_SOURCE_BASE (0xC8) +#define SM501_CSC_CONSTANTS (0xCC) +#define SM501_CSC_Y_SOURCE_X (0xD0) +#define SM501_CSC_Y_SOURCE_Y (0xD4) +#define SM501_CSC_U_SOURCE_BASE (0xD8) +#define SM501_CSC_V_SOURCE_BASE (0xDC) +#define SM501_CSC_SOURCE_DIMENSION (0xE0) +#define SM501_CSC_SOURCE_PITCH (0xE4) +#define SM501_CSC_DESTINATION (0xE8) +#define SM501_CSC_DESTINATION_DIMENSION (0xEC) +#define SM501_CSC_DESTINATION_PITCH (0xF0) +#define SM501_CSC_SCALE_FACTOR (0xF4) +#define SM501_CSC_DESTINATION_BASE (0xF8) +#define SM501_CSC_CONTROL (0xFC) + +/* 2d engine data port base */ +#define SM501_2D_ENGINE_DATA (0x110000) diff --git a/include/linux/sm501.h b/include/linux/sm501.h new file mode 100644 index 000000000..02fde50a7 --- /dev/null +++ b/include/linux/sm501.h @@ -0,0 +1,182 @@ +/* include/linux/sm501.h + * + * Copyright (c) 2006 Simtec Electronics + * Ben Dooks + * Vincent Sanders + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +extern int sm501_unit_power(struct device *dev, + unsigned int unit, unsigned int to); + +extern unsigned long sm501_set_clock(struct device *dev, + int clksrc, unsigned long freq); + +extern unsigned long sm501_find_clock(struct device *dev, + int clksrc, unsigned long req_freq); + +/* sm501_misc_control + * + * Modify the SM501's MISC_CONTROL register +*/ + +extern int sm501_misc_control(struct device *dev, + unsigned long set, unsigned long clear); + +/* sm501_modify_reg + * + * Modify a register in the SM501 which may be shared with other + * drivers. +*/ + +extern unsigned long sm501_modify_reg(struct device *dev, + unsigned long reg, + unsigned long set, + unsigned long clear); + + +/* Platform data definitions */ + +#define SM501FB_FLAG_USE_INIT_MODE (1<<0) +#define SM501FB_FLAG_DISABLE_AT_EXIT (1<<1) +#define SM501FB_FLAG_USE_HWCURSOR (1<<2) +#define SM501FB_FLAG_USE_HWACCEL (1<<3) +#define SM501FB_FLAG_PANEL_NO_FPEN (1<<4) +#define SM501FB_FLAG_PANEL_NO_VBIASEN (1<<5) +#define SM501FB_FLAG_PANEL_INV_FPEN (1<<6) +#define SM501FB_FLAG_PANEL_INV_VBIASEN (1<<7) + +struct sm501_platdata_fbsub { + struct fb_videomode *def_mode; + unsigned int def_bpp; + unsigned long max_mem; + unsigned int flags; +}; + +enum sm501_fb_routing { + SM501_FB_OWN = 0, /* CRT=>CRT, Panel=>Panel */ + SM501_FB_CRT_PANEL = 1, /* Panel=>CRT, Panel=>Panel */ +}; + +/* sm501_platdata_fb flag field bit definitions */ + +#define SM501_FBPD_SWAP_FB_ENDIAN (1<<0) /* need to endian swap */ + +/* sm501_platdata_fb + * + * configuration data for the framebuffer driver +*/ + +struct sm501_platdata_fb { + enum sm501_fb_routing fb_route; + unsigned int flags; + struct sm501_platdata_fbsub *fb_crt; + struct sm501_platdata_fbsub *fb_pnl; +}; + +/* gpio i2c + * + * Note, we have to pass in the bus number, as the number used will be + * passed to the i2c-gpio driver's platform_device.id, subsequently used + * to register the i2c bus. +*/ + +struct sm501_platdata_gpio_i2c { + unsigned int bus_num; + unsigned int pin_sda; + unsigned int pin_scl; + int udelay; + int timeout; +}; + +/* sm501_initdata + * + * use for initialising values that may not have been setup + * before the driver is loaded. +*/ + +struct sm501_reg_init { + unsigned long set; + unsigned long mask; +}; + +#define SM501_USE_USB_HOST (1<<0) +#define SM501_USE_USB_SLAVE (1<<1) +#define SM501_USE_SSP0 (1<<2) +#define SM501_USE_SSP1 (1<<3) +#define SM501_USE_UART0 (1<<4) +#define SM501_USE_UART1 (1<<5) +#define SM501_USE_FBACCEL (1<<6) +#define SM501_USE_AC97 (1<<7) +#define SM501_USE_I2S (1<<8) +#define SM501_USE_GPIO (1<<9) + +#define SM501_USE_ALL (0xffffffff) + +struct sm501_initdata { + struct sm501_reg_init gpio_low; + struct sm501_reg_init gpio_high; + struct sm501_reg_init misc_timing; + struct sm501_reg_init misc_control; + + unsigned long devices; + unsigned long mclk; /* non-zero to modify */ + unsigned long m1xclk; /* non-zero to modify */ +}; + +/* sm501_init_gpio + * + * default gpio settings +*/ + +struct sm501_init_gpio { + struct sm501_reg_init gpio_data_low; + struct sm501_reg_init gpio_data_high; + struct sm501_reg_init gpio_ddr_low; + struct sm501_reg_init gpio_ddr_high; +}; + +#define SM501_FLAG_SUSPEND_OFF (1<<4) + +/* sm501_platdata + * + * This is passed with the platform device to allow the board + * to control the behaviour of the SM501 driver(s) which attach + * to the device. + * +*/ + +struct sm501_platdata { + struct sm501_initdata *init; + struct sm501_init_gpio *init_gpiop; + struct sm501_platdata_fb *fb; + + int flags; + int gpio_base; + + int (*get_power)(struct device *dev); + int (*set_power)(struct device *dev, unsigned int on); + + struct sm501_platdata_gpio_i2c *gpio_i2c; + unsigned int gpio_i2c_nr; +}; + +#if defined(CONFIG_PPC32) +#define smc501_readl(addr) ioread32be((addr)) +#define smc501_writel(val, addr) iowrite32be((val), (addr)) +#else +#define smc501_readl(addr) readl(addr) +#define smc501_writel(val, addr) writel(val, addr) +#endif diff --git a/include/linux/smc911x.h b/include/linux/smc911x.h new file mode 100644 index 000000000..8cace8189 --- /dev/null +++ b/include/linux/smc911x.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __SMC911X_H__ +#define __SMC911X_H__ + +#define SMC911X_USE_16BIT (1 << 0) +#define SMC911X_USE_32BIT (1 << 1) + +struct smc911x_platdata { + unsigned long flags; + unsigned long irq_flags; /* IRQF_... */ + int irq_polarity; +}; + +#endif /* __SMC911X_H__ */ diff --git a/include/linux/smc91x.h b/include/linux/smc91x.h new file mode 100644 index 000000000..f3b195fa7 --- /dev/null +++ b/include/linux/smc91x.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __SMC91X_H__ +#define __SMC91X_H__ + +/* + * These bits define which access sizes a platform can support, rather + * than the maximal access size. So, if your platform can do 16-bit + * and 32-bit accesses to the SMC91x device, but not 8-bit, set both + * SMC91X_USE_16BIT and SMC91X_USE_32BIT. + * + * The SMC91x driver requires at least one of SMC91X_USE_8BIT or + * SMC91X_USE_16BIT to be supported - just setting SMC91X_USE_32BIT is + * an invalid configuration. + */ +#define SMC91X_USE_8BIT (1 << 0) +#define SMC91X_USE_16BIT (1 << 1) +#define SMC91X_USE_32BIT (1 << 2) + +#define SMC91X_NOWAIT (1 << 3) + +/* two bits for IO_SHIFT, let's hope later designs will keep this sane */ +#define SMC91X_IO_SHIFT_0 (0 << 4) +#define SMC91X_IO_SHIFT_1 (1 << 4) +#define SMC91X_IO_SHIFT_2 (2 << 4) +#define SMC91X_IO_SHIFT_3 (3 << 4) +#define SMC91X_IO_SHIFT(x) (((x) >> 4) & 0x3) + +#define SMC91X_USE_DMA (1 << 6) + +#define RPC_LED_100_10 (0x00) /* LED = 100Mbps OR's with 10Mbps link detect */ +#define RPC_LED_RES (0x01) /* LED = Reserved */ +#define RPC_LED_10 (0x02) /* LED = 10Mbps link detect */ +#define RPC_LED_FD (0x03) /* LED = Full Duplex Mode */ +#define RPC_LED_TX_RX (0x04) /* LED = TX or RX packet occurred */ +#define RPC_LED_100 (0x05) /* LED = 100Mbps link detect */ +#define RPC_LED_TX (0x06) /* LED = TX packet occurred */ +#define RPC_LED_RX (0x07) /* LED = RX packet occurred */ + +struct smc91x_platdata { + unsigned long flags; + unsigned char leda; + unsigned char ledb; + bool pxa_u16_align4; /* PXA buggy u16 writes on 4*n+2 addresses */ +}; + +#endif /* __SMC91X_H__ */ diff --git a/include/linux/smp.h b/include/linux/smp.h new file mode 100644 index 000000000..6bb7f07bc --- /dev/null +++ b/include/linux/smp.h @@ -0,0 +1,224 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_SMP_H +#define __LINUX_SMP_H + +/* + * Generic SMP support + * Alan Cox. + */ + +#include +#include +#include +#include +#include +#include + +typedef void (*smp_call_func_t)(void *info); +struct __call_single_data { + struct llist_node llist; + smp_call_func_t func; + void *info; + unsigned int flags; +}; + +/* Use __aligned() to avoid to use 2 cache lines for 1 csd */ +typedef struct __call_single_data call_single_data_t + __aligned(sizeof(struct __call_single_data)); + +/* total number of cpus in this system (may exceed NR_CPUS) */ +extern unsigned int total_cpus; + +int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, + int wait); + +/* + * Call a function on all processors + */ +int on_each_cpu(smp_call_func_t func, void *info, int wait); + +/* + * Call a function on processors specified by mask, which might include + * the local one. + */ +void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, + void *info, bool wait); + +/* + * Call a function on each processor for which the supplied function + * cond_func returns a positive value. This may include the local + * processor. + */ +void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), + smp_call_func_t func, void *info, bool wait, + gfp_t gfp_flags); + +int smp_call_function_single_async(int cpu, struct __call_single_data *csd); + +#ifdef CONFIG_SMP + +#include +#include +#include +#include +#include + +/* + * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc. + * (defined in asm header): + */ + +/* + * stops all CPUs but the current one: + */ +extern void smp_send_stop(void); + +/* + * sends a 'reschedule' event to another CPU: + */ +extern void smp_send_reschedule(int cpu); + + +/* + * Prepare machine for booting other CPUs. + */ +extern void smp_prepare_cpus(unsigned int max_cpus); + +/* + * Bring a CPU up + */ +extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle); + +/* + * Final polishing of CPUs + */ +extern void smp_cpus_done(unsigned int max_cpus); + +/* + * Call a function on all other processors + */ +int smp_call_function(smp_call_func_t func, void *info, int wait); +void smp_call_function_many(const struct cpumask *mask, + smp_call_func_t func, void *info, bool wait); + +int smp_call_function_any(const struct cpumask *mask, + smp_call_func_t func, void *info, int wait); + +void kick_all_cpus_sync(void); +void wake_up_all_idle_cpus(void); + +/* + * Generic and arch helpers + */ +void __init call_function_init(void); +void generic_smp_call_function_single_interrupt(void); +#define generic_smp_call_function_interrupt \ + generic_smp_call_function_single_interrupt + +/* + * Mark the boot cpu "online" so that it can call console drivers in + * printk() and can access its per-cpu storage. + */ +void smp_prepare_boot_cpu(void); + +extern unsigned int setup_max_cpus; +extern void __init setup_nr_cpu_ids(void); +extern void __init smp_init(void); + +extern int __boot_cpu_id; + +static inline int get_boot_cpu_id(void) +{ + return __boot_cpu_id; +} + +#else /* !SMP */ + +static inline void smp_send_stop(void) { } + +/* + * These macros fold the SMP functionality into a single CPU system + */ +#define raw_smp_processor_id() 0 +static inline int up_smp_call_function(smp_call_func_t func, void *info) +{ + return 0; +} +#define smp_call_function(func, info, wait) \ + (up_smp_call_function(func, info)) + +static inline void smp_send_reschedule(int cpu) { } +#define smp_prepare_boot_cpu() do {} while (0) +#define smp_call_function_many(mask, func, info, wait) \ + (up_smp_call_function(func, info)) +static inline void call_function_init(void) { } + +static inline int +smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, + void *info, int wait) +{ + return smp_call_function_single(0, func, info, wait); +} + +static inline void kick_all_cpus_sync(void) { } +static inline void wake_up_all_idle_cpus(void) { } + +#ifdef CONFIG_UP_LATE_INIT +extern void __init up_late_init(void); +static inline void smp_init(void) { up_late_init(); } +#else +static inline void smp_init(void) { } +#endif + +static inline int get_boot_cpu_id(void) +{ + return 0; +} + +#endif /* !SMP */ + +/* + * smp_processor_id(): get the current CPU ID. + * + * if DEBUG_PREEMPT is enabled then we check whether it is + * used in a preemption-safe way. (smp_processor_id() is safe + * if it's used in a preemption-off critical section, or in + * a thread that is bound to the current CPU.) + * + * NOTE: raw_smp_processor_id() is for internal use only + * (smp_processor_id() is the preferred variant), but in rare + * instances it might also be used to turn off false positives + * (i.e. smp_processor_id() use that the debugging code reports but + * which use for some reason is legal). Don't use this to hack around + * the warning message, as your code might not work under PREEMPT. + */ +#ifdef CONFIG_DEBUG_PREEMPT + extern unsigned int debug_smp_processor_id(void); +# define smp_processor_id() debug_smp_processor_id() +#else +# define smp_processor_id() raw_smp_processor_id() +#endif + +#define get_cpu() ({ preempt_disable(); smp_processor_id(); }) +#define put_cpu() preempt_enable() + +/* + * Callback to arch code if there's nosmp or maxcpus=0 on the + * boot command line: + */ +extern void arch_disable_smp_support(void); + +extern void arch_enable_nonboot_cpus_begin(void); +extern void arch_enable_nonboot_cpus_end(void); + +void smp_setup_processor_id(void); + +int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, + bool phys); + +/* SMP core functions */ +int smpcfd_prepare_cpu(unsigned int cpu); +int smpcfd_dead_cpu(unsigned int cpu); +int smpcfd_dying_cpu(unsigned int cpu); + +#endif /* __LINUX_SMP_H */ diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h new file mode 100644 index 000000000..9d1bc65d2 --- /dev/null +++ b/include/linux/smpboot.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SMPBOOT_H +#define _LINUX_SMPBOOT_H + +#include + +struct task_struct; +/* Cookie handed to the thread_fn*/ +struct smpboot_thread_data; + +/** + * struct smp_hotplug_thread - CPU hotplug related thread descriptor + * @store: Pointer to per cpu storage for the task pointers + * @list: List head for core management + * @thread_should_run: Check whether the thread should run or not. Called with + * preemption disabled. + * @thread_fn: The associated thread function + * @create: Optional setup function, called when the thread gets + * created (Not called from the thread context) + * @setup: Optional setup function, called when the thread gets + * operational the first time + * @cleanup: Optional cleanup function, called when the thread + * should stop (module exit) + * @park: Optional park function, called when the thread is + * parked (cpu offline) + * @unpark: Optional unpark function, called when the thread is + * unparked (cpu online) + * @selfparking: Thread is not parked by the park function. + * @thread_comm: The base name of the thread + */ +struct smp_hotplug_thread { + struct task_struct * __percpu *store; + struct list_head list; + int (*thread_should_run)(unsigned int cpu); + void (*thread_fn)(unsigned int cpu); + void (*create)(unsigned int cpu); + void (*setup)(unsigned int cpu); + void (*cleanup)(unsigned int cpu, bool online); + void (*park)(unsigned int cpu); + void (*unpark)(unsigned int cpu); + bool selfparking; + const char *thread_comm; +}; + +int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread); + +void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread); + +#endif diff --git a/include/linux/smsc911x.h b/include/linux/smsc911x.h new file mode 100644 index 000000000..eec3efd19 --- /dev/null +++ b/include/linux/smsc911x.h @@ -0,0 +1,63 @@ +/*************************************************************************** + * + * Copyright (C) 2004-2008 SMSC + * Copyright (C) 2005-2008 ARM + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + ***************************************************************************/ +#ifndef __LINUX_SMSC911X_H__ +#define __LINUX_SMSC911X_H__ + +#include +#include + +/* platform_device configuration data, should be assigned to + * the platform_device's dev.platform_data */ +struct smsc911x_platform_config { + unsigned int irq_polarity; + unsigned int irq_type; + unsigned int flags; + unsigned int shift; + phy_interface_t phy_interface; + unsigned char mac[ETH_ALEN]; +}; + +/* Constants for platform_device irq polarity configuration */ +#define SMSC911X_IRQ_POLARITY_ACTIVE_LOW 0 +#define SMSC911X_IRQ_POLARITY_ACTIVE_HIGH 1 + +/* Constants for platform_device irq type configuration */ +#define SMSC911X_IRQ_TYPE_OPEN_DRAIN 0 +#define SMSC911X_IRQ_TYPE_PUSH_PULL 1 + +/* Constants for flags */ +#define SMSC911X_USE_16BIT (BIT(0)) +#define SMSC911X_USE_32BIT (BIT(1)) +#define SMSC911X_FORCE_INTERNAL_PHY (BIT(2)) +#define SMSC911X_FORCE_EXTERNAL_PHY (BIT(3)) +#define SMSC911X_SAVE_MAC_ADDRESS (BIT(4)) + +/* + * SMSC911X_SWAP_FIFO: + * Enables software byte swap for fifo data. Should only be used as a + * "last resort" in the case of big endian mode on boards with incorrectly + * routed data bus to older devices such as LAN9118. Newer devices such as + * LAN9221 can handle this in hardware, there are registers to control + * this swapping but the driver doesn't currently use them. + */ +#define SMSC911X_SWAP_FIFO (BIT(5)) + +#endif /* __LINUX_SMSC911X_H__ */ diff --git a/include/linux/smscphy.h b/include/linux/smscphy.h new file mode 100644 index 000000000..1a136271b --- /dev/null +++ b/include/linux/smscphy.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_SMSCPHY_H__ +#define __LINUX_SMSCPHY_H__ + +#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */ +#define MII_LAN83C185_IM 30 /* Interrupt Mask */ +#define MII_LAN83C185_CTRL_STATUS 17 /* Mode/Status Register */ +#define MII_LAN83C185_SPECIAL_MODES 18 /* Special Modes Register */ + +#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */ +#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */ +#define MII_LAN83C185_ISF_INT3 (1<<3) /* Auto-Negotiation LP Ack */ +#define MII_LAN83C185_ISF_INT4 (1<<4) /* Link Down */ +#define MII_LAN83C185_ISF_INT5 (1<<5) /* Remote Fault Detected */ +#define MII_LAN83C185_ISF_INT6 (1<<6) /* Auto-Negotiation complete */ +#define MII_LAN83C185_ISF_INT7 (1<<7) /* ENERGYON */ + +#define MII_LAN83C185_ISF_INT_ALL (0x0e) + +#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \ + (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4 | \ + MII_LAN83C185_ISF_INT7) + +#define MII_LAN83C185_EDPWRDOWN (1 << 13) /* EDPWRDOWN */ +#define MII_LAN83C185_ENERGYON (1 << 1) /* ENERGYON */ + +#define MII_LAN83C185_MODE_MASK 0xE0 +#define MII_LAN83C185_MODE_POWERDOWN 0xC0 /* Power Down mode */ +#define MII_LAN83C185_MODE_ALL 0xE0 /* All capable mode */ + +#endif /* __LINUX_SMSCPHY_H__ */ diff --git a/include/linux/soc/actions/owl-sps.h b/include/linux/soc/actions/owl-sps.h new file mode 100644 index 000000000..33d0dbece --- /dev/null +++ b/include/linux/soc/actions/owl-sps.h @@ -0,0 +1,11 @@ +/* + * Copyright (c) 2017 Andreas Färber + * + * SPDX-License-Identifier: GPL-2.0+ + */ +#ifndef SOC_ACTIONS_OWL_SPS_H +#define SOC_ACTIONS_OWL_SPS_H + +int owl_sps_set_pg(void __iomem *base, u32 pwr_mask, u32 ack_mask, bool enable); + +#endif diff --git a/include/linux/soc/brcmstb/brcmstb.h b/include/linux/soc/brcmstb/brcmstb.h new file mode 100644 index 000000000..8e884e0dd --- /dev/null +++ b/include/linux/soc/brcmstb/brcmstb.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __BRCMSTB_SOC_H +#define __BRCMSTB_SOC_H + +static inline u32 BRCM_ID(u32 reg) +{ + return reg >> 28 ? reg >> 16 : reg >> 8; +} + +static inline u32 BRCM_REV(u32 reg) +{ + return reg & 0xff; +} + +/* + * Helper functions for getting family or product id from the + * SoC driver. + */ +u32 brcmstb_get_family_id(void); +u32 brcmstb_get_product_id(void); + +#endif /* __BRCMSTB_SOC_H */ diff --git a/include/linux/soc/dove/pmu.h b/include/linux/soc/dove/pmu.h new file mode 100644 index 000000000..1955c01de --- /dev/null +++ b/include/linux/soc/dove/pmu.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_SOC_DOVE_PMU_H +#define LINUX_SOC_DOVE_PMU_H + +#include + +struct dove_pmu_domain_initdata { + u32 pwr_mask; + u32 rst_mask; + u32 iso_mask; + const char *name; +}; + +struct dove_pmu_initdata { + void __iomem *pmc_base; + void __iomem *pmu_base; + int irq; + int irq_domain_start; + const struct dove_pmu_domain_initdata *domains; +}; + +int dove_init_pmu_legacy(const struct dove_pmu_initdata *); + +int dove_init_pmu(void); + +#endif diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h new file mode 100644 index 000000000..fd25f0148 --- /dev/null +++ b/include/linux/soc/mediatek/infracfg.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __SOC_MEDIATEK_INFRACFG_H +#define __SOC_MEDIATEK_INFRACFG_H + +#define MT8173_TOP_AXI_PROT_EN_MCI_M2 BIT(0) +#define MT8173_TOP_AXI_PROT_EN_MM_M0 BIT(1) +#define MT8173_TOP_AXI_PROT_EN_MM_M1 BIT(2) +#define MT8173_TOP_AXI_PROT_EN_MMAPB_S BIT(6) +#define MT8173_TOP_AXI_PROT_EN_L2C_M2 BIT(9) +#define MT8173_TOP_AXI_PROT_EN_L2SS_SMI BIT(11) +#define MT8173_TOP_AXI_PROT_EN_L2SS_ADD BIT(12) +#define MT8173_TOP_AXI_PROT_EN_CCI_M2 BIT(13) +#define MT8173_TOP_AXI_PROT_EN_MFG_S BIT(14) +#define MT8173_TOP_AXI_PROT_EN_PERI_M0 BIT(15) +#define MT8173_TOP_AXI_PROT_EN_PERI_M1 BIT(16) +#define MT8173_TOP_AXI_PROT_EN_DEBUGSYS BIT(17) +#define MT8173_TOP_AXI_PROT_EN_CQ_DMA BIT(18) +#define MT8173_TOP_AXI_PROT_EN_GCPU BIT(19) +#define MT8173_TOP_AXI_PROT_EN_IOMMU BIT(20) +#define MT8173_TOP_AXI_PROT_EN_MFG_M0 BIT(21) +#define MT8173_TOP_AXI_PROT_EN_MFG_M1 BIT(22) +#define MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT BIT(23) + +#define MT2701_TOP_AXI_PROT_EN_MM_M0 BIT(1) +#define MT2701_TOP_AXI_PROT_EN_CONN_M BIT(2) +#define MT2701_TOP_AXI_PROT_EN_CONN_S BIT(8) + +#define MT7622_TOP_AXI_PROT_EN_ETHSYS (BIT(3) | BIT(17)) +#define MT7622_TOP_AXI_PROT_EN_HIF0 (BIT(24) | BIT(25)) +#define MT7622_TOP_AXI_PROT_EN_HIF1 (BIT(26) | BIT(27) | \ + BIT(28)) +#define MT7622_TOP_AXI_PROT_EN_WB (BIT(2) | BIT(6) | \ + BIT(7) | BIT(8)) + +int mtk_infracfg_set_bus_protection(struct regmap *infracfg, u32 mask, + bool reg_update); +int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask, + bool reg_update); +#endif /* __SOC_MEDIATEK_INFRACFG_H */ diff --git a/include/linux/soc/qcom/apr.h b/include/linux/soc/qcom/apr.h new file mode 100644 index 000000000..c5d52e2cb --- /dev/null +++ b/include/linux/soc/qcom/apr.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __QCOM_APR_H_ +#define __QCOM_APR_H_ + +#include +#include +#include +#include + +extern struct bus_type aprbus; + +#define APR_HDR_LEN(hdr_len) ((hdr_len)/4) + +/* + * HEADER field + * version:0:3 + * header_size : 4:7 + * message_type : 8:9 + * reserved: 10:15 + */ +#define APR_HDR_FIELD(msg_type, hdr_len, ver)\ + (((msg_type & 0x3) << 8) | ((hdr_len & 0xF) << 4) | (ver & 0xF)) + +#define APR_HDR_SIZE sizeof(struct apr_hdr) +#define APR_SEQ_CMD_HDR_FIELD APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \ + APR_HDR_LEN(APR_HDR_SIZE), \ + APR_PKT_VER) +/* Version */ +#define APR_PKT_VER 0x0 + +/* Command and Response Types */ +#define APR_MSG_TYPE_EVENT 0x0 +#define APR_MSG_TYPE_CMD_RSP 0x1 +#define APR_MSG_TYPE_SEQ_CMD 0x2 +#define APR_MSG_TYPE_NSEQ_CMD 0x3 +#define APR_MSG_TYPE_MAX 0x04 + +/* APR Basic Response Message */ +#define APR_BASIC_RSP_RESULT 0x000110E8 +#define APR_RSP_ACCEPTED 0x000100BE + +struct aprv2_ibasic_rsp_result_t { + uint32_t opcode; + uint32_t status; +}; + +/* hdr field Ver [0:3], Size [4:7], Message type [8:10] */ +#define APR_HDR_FIELD_VER(h) (h & 0x000F) +#define APR_HDR_FIELD_SIZE(h) ((h & 0x00F0) >> 4) +#define APR_HDR_FIELD_SIZE_BYTES(h) (((h & 0x00F0) >> 4) * 4) +#define APR_HDR_FIELD_MT(h) ((h & 0x0300) >> 8) + +struct apr_hdr { + uint16_t hdr_field; + uint16_t pkt_size; + uint8_t src_svc; + uint8_t src_domain; + uint16_t src_port; + uint8_t dest_svc; + uint8_t dest_domain; + uint16_t dest_port; + uint32_t token; + uint32_t opcode; +} __packed; + +struct apr_pkt { + struct apr_hdr hdr; + uint8_t payload[]; +}; + +struct apr_resp_pkt { + struct apr_hdr hdr; + void *payload; + int payload_size; +}; + +/* Bits 0 to 15 -- Minor version, Bits 16 to 31 -- Major version */ +#define APR_SVC_MAJOR_VERSION(v) ((v >> 16) & 0xFF) +#define APR_SVC_MINOR_VERSION(v) (v & 0xFF) + +struct apr_device { + struct device dev; + uint16_t svc_id; + uint16_t domain_id; + uint32_t version; + char name[APR_NAME_SIZE]; + spinlock_t lock; + struct list_head node; +}; + +#define to_apr_device(d) container_of(d, struct apr_device, dev) + +struct apr_driver { + int (*probe)(struct apr_device *sl); + int (*remove)(struct apr_device *sl); + int (*callback)(struct apr_device *a, + struct apr_resp_pkt *d); + struct device_driver driver; + const struct apr_device_id *id_table; +}; + +#define to_apr_driver(d) container_of(d, struct apr_driver, driver) + +/* + * use a macro to avoid include chaining to get THIS_MODULE + */ +#define apr_driver_register(drv) __apr_driver_register(drv, THIS_MODULE) + +int __apr_driver_register(struct apr_driver *drv, struct module *owner); +void apr_driver_unregister(struct apr_driver *drv); + +/** + * module_apr_driver() - Helper macro for registering a aprbus driver + * @__aprbus_driver: aprbus_driver struct + * + * Helper macro for aprbus drivers which do not do anything special in + * module init/exit. This eliminates a lot of boilerplate. Each module + * may only use this macro once, and calling it replaces module_init() + * and module_exit() + */ +#define module_apr_driver(__apr_driver) \ + module_driver(__apr_driver, apr_driver_register, \ + apr_driver_unregister) + +int apr_send_pkt(struct apr_device *adev, struct apr_pkt *pkt); + +#endif /* __QCOM_APR_H_ */ diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h new file mode 100644 index 000000000..7e3b9c605 --- /dev/null +++ b/include/linux/soc/qcom/llcc-qcom.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * + */ + +#include +#ifndef __LLCC_QCOM__ +#define __LLCC_QCOM__ + +#define LLCC_CPUSS 1 +#define LLCC_VIDSC0 2 +#define LLCC_VIDSC1 3 +#define LLCC_ROTATOR 4 +#define LLCC_VOICE 5 +#define LLCC_AUDIO 6 +#define LLCC_MDMHPGRW 7 +#define LLCC_MDM 8 +#define LLCC_CMPT 10 +#define LLCC_GPUHTW 11 +#define LLCC_GPU 12 +#define LLCC_MMUHWT 13 +#define LLCC_CMPTDMA 15 +#define LLCC_DISP 16 +#define LLCC_VIDFW 17 +#define LLCC_MDMHPFX 20 +#define LLCC_MDMPNG 21 +#define LLCC_AUDHW 22 + +/** + * llcc_slice_desc - Cache slice descriptor + * @slice_id: llcc slice id + * @slice_size: Size allocated for the llcc slice + */ +struct llcc_slice_desc { + u32 slice_id; + size_t slice_size; +}; + +/** + * llcc_slice_config - Data associated with the llcc slice + * @usecase_id: usecase id for which the llcc slice is used + * @slice_id: llcc slice id assigned to each slice + * @max_cap: maximum capacity of the llcc slice + * @priority: priority of the llcc slice + * @fixed_size: whether the llcc slice can grow beyond its size + * @bonus_ways: bonus ways associated with llcc slice + * @res_ways: reserved ways associated with llcc slice + * @cache_mode: mode of the llcc slice + * @probe_target_ways: Probe only reserved and bonus ways on a cache miss + * @dis_cap_alloc: Disable capacity based allocation + * @retain_on_pc: Retain through power collapse + * @activate_on_init: activate the slice on init + */ +struct llcc_slice_config { + u32 usecase_id; + u32 slice_id; + u32 max_cap; + u32 priority; + bool fixed_size; + u32 bonus_ways; + u32 res_ways; + u32 cache_mode; + u32 probe_target_ways; + bool dis_cap_alloc; + bool retain_on_pc; + bool activate_on_init; +}; + +/** + * llcc_drv_data - Data associated with the llcc driver + * @regmap: regmap associated with the llcc device + * @cfg: pointer to the data structure for slice configuration + * @lock: mutex associated with each slice + * @cfg_size: size of the config data table + * @max_slices: max slices as read from device tree + * @bcast_off: Offset of the broadcast bank + * @num_banks: Number of llcc banks + * @bitmap: Bit map to track the active slice ids + * @offsets: Pointer to the bank offsets array + */ +struct llcc_drv_data { + struct regmap *regmap; + const struct llcc_slice_config *cfg; + struct mutex lock; + u32 cfg_size; + u32 max_slices; + u32 bcast_off; + u32 num_banks; + unsigned long *bitmap; + u32 *offsets; +}; + +#if IS_ENABLED(CONFIG_QCOM_LLCC) +/** + * llcc_slice_getd - get llcc slice descriptor + * @uid: usecase_id of the client + */ +struct llcc_slice_desc *llcc_slice_getd(u32 uid); + +/** + * llcc_slice_putd - llcc slice descritpor + * @desc: Pointer to llcc slice descriptor + */ +void llcc_slice_putd(struct llcc_slice_desc *desc); + +/** + * llcc_get_slice_id - get slice id + * @desc: Pointer to llcc slice descriptor + */ +int llcc_get_slice_id(struct llcc_slice_desc *desc); + +/** + * llcc_get_slice_size - llcc slice size + * @desc: Pointer to llcc slice descriptor + */ +size_t llcc_get_slice_size(struct llcc_slice_desc *desc); + +/** + * llcc_slice_activate - Activate the llcc slice + * @desc: Pointer to llcc slice descriptor + */ +int llcc_slice_activate(struct llcc_slice_desc *desc); + +/** + * llcc_slice_deactivate - Deactivate the llcc slice + * @desc: Pointer to llcc slice descriptor + */ +int llcc_slice_deactivate(struct llcc_slice_desc *desc); + +/** + * qcom_llcc_probe - program the sct table + * @pdev: platform device pointer + * @table: soc sct table + * @sz: Size of the config table + */ +int qcom_llcc_probe(struct platform_device *pdev, + const struct llcc_slice_config *table, u32 sz); +#else +static inline struct llcc_slice_desc *llcc_slice_getd(u32 uid) +{ + return NULL; +} + +static inline void llcc_slice_putd(struct llcc_slice_desc *desc) +{ + +}; + +static inline int llcc_get_slice_id(struct llcc_slice_desc *desc) +{ + return -EINVAL; +} + +static inline size_t llcc_get_slice_size(struct llcc_slice_desc *desc) +{ + return 0; +} +static inline int llcc_slice_activate(struct llcc_slice_desc *desc) +{ + return -EINVAL; +} + +static inline int llcc_slice_deactivate(struct llcc_slice_desc *desc) +{ + return -EINVAL; +} +static inline int qcom_llcc_probe(struct platform_device *pdev, + const struct llcc_slice_config *table, u32 sz) +{ + return -ENODEV; +} + +static inline int qcom_llcc_remove(struct platform_device *pdev) +{ + return -ENODEV; +} +#endif + +#endif diff --git a/include/linux/soc/qcom/mdt_loader.h b/include/linux/soc/qcom/mdt_loader.h new file mode 100644 index 000000000..944b06aef --- /dev/null +++ b/include/linux/soc/qcom/mdt_loader.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __QCOM_MDT_LOADER_H__ +#define __QCOM_MDT_LOADER_H__ + +#include + +#define QCOM_MDT_TYPE_MASK (7 << 24) +#define QCOM_MDT_TYPE_HASH (2 << 24) +#define QCOM_MDT_RELOCATABLE BIT(27) + +struct device; +struct firmware; + +ssize_t qcom_mdt_get_size(const struct firmware *fw); +int qcom_mdt_load(struct device *dev, const struct firmware *fw, + const char *fw_name, int pas_id, void *mem_region, + phys_addr_t mem_phys, size_t mem_size, + phys_addr_t *reloc_base); + +int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw, + const char *fw_name, int pas_id, void *mem_region, + phys_addr_t mem_phys, size_t mem_size, + phys_addr_t *reloc_base); +#endif diff --git a/include/linux/soc/qcom/qmi.h b/include/linux/soc/qcom/qmi.h new file mode 100644 index 000000000..f4de33654 --- /dev/null +++ b/include/linux/soc/qcom/qmi.h @@ -0,0 +1,271 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. + * Copyright (c) 2017, Linaro Ltd. + */ +#ifndef __QMI_HELPERS_H__ +#define __QMI_HELPERS_H__ + +#include +#include +#include +#include +#include +#include + +struct socket; + +/** + * qmi_header - wireformat header of QMI messages + * @type: type of message + * @txn_id: transaction id + * @msg_id: message id + * @msg_len: length of message payload following header + */ +struct qmi_header { + u8 type; + u16 txn_id; + u16 msg_id; + u16 msg_len; +} __packed; + +#define QMI_REQUEST 0 +#define QMI_RESPONSE 2 +#define QMI_INDICATION 4 + +#define QMI_COMMON_TLV_TYPE 0 + +enum qmi_elem_type { + QMI_EOTI, + QMI_OPT_FLAG, + QMI_DATA_LEN, + QMI_UNSIGNED_1_BYTE, + QMI_UNSIGNED_2_BYTE, + QMI_UNSIGNED_4_BYTE, + QMI_UNSIGNED_8_BYTE, + QMI_SIGNED_2_BYTE_ENUM, + QMI_SIGNED_4_BYTE_ENUM, + QMI_STRUCT, + QMI_STRING, +}; + +enum qmi_array_type { + NO_ARRAY, + STATIC_ARRAY, + VAR_LEN_ARRAY, +}; + +/** + * struct qmi_elem_info - describes how to encode a single QMI element + * @data_type: Data type of this element. + * @elem_len: Array length of this element, if an array. + * @elem_size: Size of a single instance of this data type. + * @array_type: Array type of this element. + * @tlv_type: QMI message specific type to identify which element + * is present in an incoming message. + * @offset: Specifies the offset of the first instance of this + * element in the data structure. + * @ei_array: Null-terminated array of @qmi_elem_info to describe nested + * structures. + */ +struct qmi_elem_info { + enum qmi_elem_type data_type; + u32 elem_len; + u32 elem_size; + enum qmi_array_type array_type; + u8 tlv_type; + u32 offset; + struct qmi_elem_info *ei_array; +}; + +#define QMI_RESULT_SUCCESS_V01 0 +#define QMI_RESULT_FAILURE_V01 1 + +#define QMI_ERR_NONE_V01 0 +#define QMI_ERR_MALFORMED_MSG_V01 1 +#define QMI_ERR_NO_MEMORY_V01 2 +#define QMI_ERR_INTERNAL_V01 3 +#define QMI_ERR_CLIENT_IDS_EXHAUSTED_V01 5 +#define QMI_ERR_INVALID_ID_V01 41 +#define QMI_ERR_ENCODING_V01 58 +#define QMI_ERR_INCOMPATIBLE_STATE_V01 90 +#define QMI_ERR_NOT_SUPPORTED_V01 94 + +/** + * qmi_response_type_v01 - common response header (decoded) + * @result: result of the transaction + * @error: error value, when @result is QMI_RESULT_FAILURE_V01 + */ +struct qmi_response_type_v01 { + u16 result; + u16 error; +}; + +extern struct qmi_elem_info qmi_response_type_v01_ei[]; + +/** + * struct qmi_service - context to track lookup-results + * @service: service type + * @version: version of the @service + * @instance: instance id of the @service + * @node: node of the service + * @port: port of the service + * @priv: handle for client's use + * @list_node: list_head for house keeping + */ +struct qmi_service { + unsigned int service; + unsigned int version; + unsigned int instance; + + unsigned int node; + unsigned int port; + + void *priv; + struct list_head list_node; +}; + +struct qmi_handle; + +/** + * struct qmi_ops - callbacks for qmi_handle + * @new_server: inform client of a new_server lookup-result, returning + * successfully from this call causes the library to call + * @del_server as the service is removed from the + * lookup-result. @priv of the qmi_service can be used by + * the client + * @del_server: inform client of a del_server lookup-result + * @net_reset: inform client that the name service was restarted and + * that and any state needs to be released + * @msg_handler: invoked for incoming messages, allows a client to + * override the usual QMI message handler + * @bye: inform a client that all clients from a node are gone + * @del_client: inform a client that a particular client is gone + */ +struct qmi_ops { + int (*new_server)(struct qmi_handle *qmi, struct qmi_service *svc); + void (*del_server)(struct qmi_handle *qmi, struct qmi_service *svc); + void (*net_reset)(struct qmi_handle *qmi); + void (*msg_handler)(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + const void *data, size_t count); + void (*bye)(struct qmi_handle *qmi, unsigned int node); + void (*del_client)(struct qmi_handle *qmi, + unsigned int node, unsigned int port); +}; + +/** + * struct qmi_txn - transaction context + * @qmi: QMI handle this transaction is associated with + * @id: transaction id + * @lock: for synchronization between handler and waiter of messages + * @completion: completion object as the transaction receives a response + * @result: result code for the completed transaction + * @ei: description of the QMI encoded response (optional) + * @dest: destination buffer to decode message into (optional) + */ +struct qmi_txn { + struct qmi_handle *qmi; + + int id; + + struct mutex lock; + struct completion completion; + int result; + + struct qmi_elem_info *ei; + void *dest; +}; + +/** + * struct qmi_msg_handler - description of QMI message handler + * @type: type of message + * @msg_id: message id + * @ei: description of the QMI encoded message + * @decoded_size: size of the decoded object + * @fn: function to invoke as the message is decoded + */ +struct qmi_msg_handler { + unsigned int type; + unsigned int msg_id; + + struct qmi_elem_info *ei; + + size_t decoded_size; + void (*fn)(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + struct qmi_txn *txn, const void *decoded); +}; + +/** + * struct qmi_handle - QMI context + * @sock: socket handle + * @sock_lock: synchronization of @sock modifications + * @sq: sockaddr of @sock + * @work: work for handling incoming messages + * @wq: workqueue to post @work on + * @recv_buf: scratch buffer for handling incoming messages + * @recv_buf_size: size of @recv_buf + * @lookups: list of registered lookup requests + * @lookup_results: list of lookup-results advertised to the client + * @services: list of registered services (by this client) + * @ops: reference to callbacks + * @txns: outstanding transactions + * @txn_lock: lock for modifications of @txns + * @handlers: list of handlers for incoming messages + */ +struct qmi_handle { + struct socket *sock; + struct mutex sock_lock; + + struct sockaddr_qrtr sq; + + struct work_struct work; + struct workqueue_struct *wq; + + void *recv_buf; + size_t recv_buf_size; + + struct list_head lookups; + struct list_head lookup_results; + struct list_head services; + + struct qmi_ops ops; + + struct idr txns; + struct mutex txn_lock; + + const struct qmi_msg_handler *handlers; +}; + +int qmi_add_lookup(struct qmi_handle *qmi, unsigned int service, + unsigned int version, unsigned int instance); +int qmi_add_server(struct qmi_handle *qmi, unsigned int service, + unsigned int version, unsigned int instance); + +int qmi_handle_init(struct qmi_handle *qmi, size_t max_msg_len, + const struct qmi_ops *ops, + const struct qmi_msg_handler *handlers); +void qmi_handle_release(struct qmi_handle *qmi); + +ssize_t qmi_send_request(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + struct qmi_txn *txn, int msg_id, size_t len, + struct qmi_elem_info *ei, const void *c_struct); +ssize_t qmi_send_response(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + struct qmi_txn *txn, int msg_id, size_t len, + struct qmi_elem_info *ei, const void *c_struct); +ssize_t qmi_send_indication(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + int msg_id, size_t len, struct qmi_elem_info *ei, + const void *c_struct); + +void *qmi_encode_message(int type, unsigned int msg_id, size_t *len, + unsigned int txn_id, struct qmi_elem_info *ei, + const void *c_struct); + +int qmi_decode_message(const void *buf, size_t len, + struct qmi_elem_info *ei, void *c_struct); + +int qmi_txn_init(struct qmi_handle *qmi, struct qmi_txn *txn, + struct qmi_elem_info *ei, void *c_struct); +int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout); +void qmi_txn_cancel(struct qmi_txn *txn); + +#endif diff --git a/include/linux/soc/qcom/smd-rpm.h b/include/linux/soc/qcom/smd-rpm.h new file mode 100644 index 000000000..9e4fdd861 --- /dev/null +++ b/include/linux/soc/qcom/smd-rpm.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __QCOM_SMD_RPM_H__ +#define __QCOM_SMD_RPM_H__ + +struct qcom_smd_rpm; + +#define QCOM_SMD_RPM_ACTIVE_STATE 0 +#define QCOM_SMD_RPM_SLEEP_STATE 1 + +/* + * Constants used for addressing resources in the RPM. + */ +#define QCOM_SMD_RPM_BOBB 0x62626f62 +#define QCOM_SMD_RPM_BOOST 0x61747362 +#define QCOM_SMD_RPM_BUS_CLK 0x316b6c63 +#define QCOM_SMD_RPM_BUS_MASTER 0x73616d62 +#define QCOM_SMD_RPM_BUS_SLAVE 0x766c7362 +#define QCOM_SMD_RPM_CLK_BUF_A 0x616B6C63 +#define QCOM_SMD_RPM_LDOA 0x616f646c +#define QCOM_SMD_RPM_LDOB 0x626F646C +#define QCOM_SMD_RPM_MEM_CLK 0x326b6c63 +#define QCOM_SMD_RPM_MISC_CLK 0x306b6c63 +#define QCOM_SMD_RPM_NCPA 0x6170636E +#define QCOM_SMD_RPM_NCPB 0x6270636E +#define QCOM_SMD_RPM_OCMEM_PWR 0x706d636f +#define QCOM_SMD_RPM_QPIC_CLK 0x63697071 +#define QCOM_SMD_RPM_SMPA 0x61706d73 +#define QCOM_SMD_RPM_SMPB 0x62706d73 +#define QCOM_SMD_RPM_SPDM 0x63707362 +#define QCOM_SMD_RPM_VSA 0x00617376 +#define QCOM_SMD_RPM_MMAXI_CLK 0x69786d6d +#define QCOM_SMD_RPM_IPA_CLK 0x617069 +#define QCOM_SMD_RPM_CE_CLK 0x6563 +#define QCOM_SMD_RPM_AGGR_CLK 0x72676761 + +int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm, + int state, + u32 resource_type, u32 resource_id, + void *buf, size_t count); + +#endif diff --git a/include/linux/soc/qcom/smem.h b/include/linux/soc/qcom/smem.h new file mode 100644 index 000000000..86e1b3586 --- /dev/null +++ b/include/linux/soc/qcom/smem.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __QCOM_SMEM_H__ +#define __QCOM_SMEM_H__ + +#define QCOM_SMEM_HOST_ANY -1 + +int qcom_smem_alloc(unsigned host, unsigned item, size_t size); +void *qcom_smem_get(unsigned host, unsigned item, size_t *size); + +int qcom_smem_get_free_space(unsigned host); + +phys_addr_t qcom_smem_virt_to_phys(void *p); + +#endif diff --git a/include/linux/soc/qcom/smem_state.h b/include/linux/soc/qcom/smem_state.h new file mode 100644 index 000000000..63ad8cdda --- /dev/null +++ b/include/linux/soc/qcom/smem_state.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __QCOM_SMEM_STATE__ +#define __QCOM_SMEM_STATE__ + +#include + +struct device_node; +struct qcom_smem_state; + +struct qcom_smem_state_ops { + int (*update_bits)(void *, u32, u32); +}; + +#ifdef CONFIG_QCOM_SMEM_STATE + +struct qcom_smem_state *qcom_smem_state_get(struct device *dev, const char *con_id, unsigned *bit); +void qcom_smem_state_put(struct qcom_smem_state *); + +int qcom_smem_state_update_bits(struct qcom_smem_state *state, u32 mask, u32 value); + +struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node, const struct qcom_smem_state_ops *ops, void *data); +void qcom_smem_state_unregister(struct qcom_smem_state *state); + +#else + +static inline struct qcom_smem_state *qcom_smem_state_get(struct device *dev, + const char *con_id, unsigned *bit) +{ + return ERR_PTR(-EINVAL); +} + +static inline void qcom_smem_state_put(struct qcom_smem_state *state) +{ +} + +static inline int qcom_smem_state_update_bits(struct qcom_smem_state *state, + u32 mask, u32 value) +{ + return -EINVAL; +} + +static inline struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node, + const struct qcom_smem_state_ops *ops, void *data) +{ + return ERR_PTR(-EINVAL); +} + +static inline void qcom_smem_state_unregister(struct qcom_smem_state *state) +{ +} + +#endif + +#endif diff --git a/include/linux/soc/qcom/wcnss_ctrl.h b/include/linux/soc/qcom/wcnss_ctrl.h new file mode 100644 index 000000000..bbeb6b9c0 --- /dev/null +++ b/include/linux/soc/qcom/wcnss_ctrl.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __WCNSS_CTRL_H__ +#define __WCNSS_CTRL_H__ + +#include + +#if IS_ENABLED(CONFIG_QCOM_WCNSS_CTRL) + +struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, const char *name, + rpmsg_rx_cb_t cb, void *priv); + +#else + +static struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, + const char *name, + rpmsg_rx_cb_t cb, + void *priv) +{ + WARN_ON(1); + return ERR_PTR(-ENXIO); +} + +#endif + +#endif diff --git a/include/linux/soc/renesas/rcar-rst.h b/include/linux/soc/renesas/rcar-rst.h new file mode 100644 index 000000000..7899a5b8c --- /dev/null +++ b/include/linux/soc/renesas/rcar-rst.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_SOC_RENESAS_RCAR_RST_H__ +#define __LINUX_SOC_RENESAS_RCAR_RST_H__ + +#ifdef CONFIG_RST_RCAR +int rcar_rst_read_mode_pins(u32 *mode); +#else +static inline int rcar_rst_read_mode_pins(u32 *mode) { return -ENODEV; } +#endif + +#endif /* __LINUX_SOC_RENESAS_RCAR_RST_H__ */ diff --git a/include/linux/soc/renesas/rcar-sysc.h b/include/linux/soc/renesas/rcar-sysc.h new file mode 100644 index 000000000..00fae6fd2 --- /dev/null +++ b/include/linux/soc/renesas/rcar-sysc.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_SOC_RENESAS_RCAR_SYSC_H__ +#define __LINUX_SOC_RENESAS_RCAR_SYSC_H__ + +int rcar_sysc_power_down_cpu(unsigned int cpu); +int rcar_sysc_power_up_cpu(unsigned int cpu); + +#endif /* __LINUX_SOC_RENESAS_RCAR_SYSC_H__ */ diff --git a/include/linux/soc/samsung/exynos-pmu.h b/include/linux/soc/samsung/exynos-pmu.h new file mode 100644 index 000000000..fc0b445bb --- /dev/null +++ b/include/linux/soc/samsung/exynos-pmu.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Header for EXYNOS PMU Driver support + */ + +#ifndef __LINUX_SOC_EXYNOS_PMU_H +#define __LINUX_SOC_EXYNOS_PMU_H + +struct regmap; + +enum sys_powerdown { + SYS_AFTR, + SYS_LPA, + SYS_SLEEP, + NUM_SYS_POWERDOWN, +}; + +extern void exynos_sys_powerdown_conf(enum sys_powerdown mode); +#ifdef CONFIG_EXYNOS_PMU +extern struct regmap *exynos_get_pmu_regmap(void); +#else +static inline struct regmap *exynos_get_pmu_regmap(void) +{ + return ERR_PTR(-ENODEV); +} +#endif + +#endif /* __LINUX_SOC_EXYNOS_PMU_H */ diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h new file mode 100644 index 000000000..5addaf5cc --- /dev/null +++ b/include/linux/soc/samsung/exynos-regs-pmu.h @@ -0,0 +1,666 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2010-2015 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * EXYNOS - Power management unit definition + * + * Notice: + * This is not a list of all Exynos Power Management Unit SFRs. + * There are too many of them, not mentioning subtle differences + * between SoCs. For now, put here only the used registers. + */ + +#ifndef __LINUX_SOC_EXYNOS_REGS_PMU_H +#define __LINUX_SOC_EXYNOS_REGS_PMU_H __FILE__ + +#define S5P_CENTRAL_SEQ_CONFIGURATION 0x0200 + +#define S5P_CENTRAL_LOWPWR_CFG (1 << 16) + +#define S5P_CENTRAL_SEQ_OPTION 0x0208 + +#define S5P_USE_STANDBY_WFI0 (1 << 16) +#define S5P_USE_STANDBY_WFI1 (1 << 17) +#define S5P_USE_STANDBY_WFI2 (1 << 19) +#define S5P_USE_STANDBY_WFI3 (1 << 20) +#define S5P_USE_STANDBY_WFE0 (1 << 24) +#define S5P_USE_STANDBY_WFE1 (1 << 25) +#define S5P_USE_STANDBY_WFE2 (1 << 27) +#define S5P_USE_STANDBY_WFE3 (1 << 28) + +#define S5P_USE_STANDBY_WFI_ALL \ + (S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFI1 | \ + S5P_USE_STANDBY_WFI2 | S5P_USE_STANDBY_WFI3 | \ + S5P_USE_STANDBY_WFE0 | S5P_USE_STANDBY_WFE1 | \ + S5P_USE_STANDBY_WFE2 | S5P_USE_STANDBY_WFE3) + +#define S5P_USE_DELAYED_RESET_ASSERTION BIT(12) + +#define EXYNOS_CORE_PO_RESET(n) ((1 << 4) << n) +#define EXYNOS_WAKEUP_FROM_LOWPWR (1 << 28) +#define EXYNOS_SWRESET 0x0400 + +#define S5P_WAKEUP_STAT 0x0600 +/* Value for EXYNOS_EINT_WAKEUP_MASK disabling all external wakeup interrupts */ +#define EXYNOS_EINT_WAKEUP_MASK_DISABLED 0xffffffff +#define EXYNOS_EINT_WAKEUP_MASK 0x0604 +#define S5P_WAKEUP_MASK 0x0608 +#define S5P_WAKEUP_MASK2 0x0614 + +/* MIPI_PHYn_CONTROL, valid for Exynos3250, Exynos4, Exynos5250 and Exynos5433 */ +#define EXYNOS4_MIPI_PHY_CONTROL(n) (0x0710 + (n) * 4) +/* Phy enable bit, common for all phy registers, not only MIPI */ +#define EXYNOS4_PHY_ENABLE (1 << 0) +#define EXYNOS4_MIPI_PHY_SRESETN (1 << 1) +#define EXYNOS4_MIPI_PHY_MRESETN (1 << 2) +#define EXYNOS4_MIPI_PHY_RESET_MASK (3 << 1) + +#define S5P_INFORM0 0x0800 +#define S5P_INFORM1 0x0804 +#define S5P_INFORM5 0x0814 +#define S5P_INFORM6 0x0818 +#define S5P_INFORM7 0x081C +#define S5P_PMU_SPARE2 0x0908 +#define S5P_PMU_SPARE3 0x090C + +#define EXYNOS_IROM_DATA2 0x0988 +#define S5P_ARM_CORE0_LOWPWR 0x1000 +#define S5P_DIS_IRQ_CORE0 0x1004 +#define S5P_DIS_IRQ_CENTRAL0 0x1008 +#define S5P_ARM_CORE1_LOWPWR 0x1010 +#define S5P_DIS_IRQ_CORE1 0x1014 +#define S5P_DIS_IRQ_CENTRAL1 0x1018 +#define S5P_ARM_COMMON_LOWPWR 0x1080 +#define S5P_L2_0_LOWPWR 0x10C0 +#define S5P_L2_1_LOWPWR 0x10C4 +#define S5P_CMU_ACLKSTOP_LOWPWR 0x1100 +#define S5P_CMU_SCLKSTOP_LOWPWR 0x1104 +#define S5P_CMU_RESET_LOWPWR 0x110C +#define S5P_APLL_SYSCLK_LOWPWR 0x1120 +#define S5P_MPLL_SYSCLK_LOWPWR 0x1124 +#define S5P_VPLL_SYSCLK_LOWPWR 0x1128 +#define S5P_EPLL_SYSCLK_LOWPWR 0x112C +#define S5P_CMU_CLKSTOP_GPS_ALIVE_LOWPWR 0x1138 +#define S5P_CMU_RESET_GPSALIVE_LOWPWR 0x113C +#define S5P_CMU_CLKSTOP_CAM_LOWPWR 0x1140 +#define S5P_CMU_CLKSTOP_TV_LOWPWR 0x1144 +#define S5P_CMU_CLKSTOP_MFC_LOWPWR 0x1148 +#define S5P_CMU_CLKSTOP_G3D_LOWPWR 0x114C +#define S5P_CMU_CLKSTOP_LCD0_LOWPWR 0x1150 +#define S5P_CMU_CLKSTOP_MAUDIO_LOWPWR 0x1158 +#define S5P_CMU_CLKSTOP_GPS_LOWPWR 0x115C +#define S5P_CMU_RESET_CAM_LOWPWR 0x1160 +#define S5P_CMU_RESET_TV_LOWPWR 0x1164 +#define S5P_CMU_RESET_MFC_LOWPWR 0x1168 +#define S5P_CMU_RESET_G3D_LOWPWR 0x116C +#define S5P_CMU_RESET_LCD0_LOWPWR 0x1170 +#define S5P_CMU_RESET_MAUDIO_LOWPWR 0x1178 +#define S5P_CMU_RESET_GPS_LOWPWR 0x117C +#define S5P_TOP_BUS_LOWPWR 0x1180 +#define S5P_TOP_RETENTION_LOWPWR 0x1184 +#define S5P_TOP_PWR_LOWPWR 0x1188 +#define S5P_LOGIC_RESET_LOWPWR 0x11A0 +#define S5P_ONENAND_MEM_LOWPWR 0x11C0 +#define S5P_G2D_ACP_MEM_LOWPWR 0x11C8 +#define S5P_USBOTG_MEM_LOWPWR 0x11CC +#define S5P_HSMMC_MEM_LOWPWR 0x11D0 +#define S5P_CSSYS_MEM_LOWPWR 0x11D4 +#define S5P_SECSS_MEM_LOWPWR 0x11D8 +#define S5P_PAD_RETENTION_DRAM_LOWPWR 0x1200 +#define S5P_PAD_RETENTION_MAUDIO_LOWPWR 0x1204 +#define S5P_PAD_RETENTION_GPIO_LOWPWR 0x1220 +#define S5P_PAD_RETENTION_UART_LOWPWR 0x1224 +#define S5P_PAD_RETENTION_MMCA_LOWPWR 0x1228 +#define S5P_PAD_RETENTION_MMCB_LOWPWR 0x122C +#define S5P_PAD_RETENTION_EBIA_LOWPWR 0x1230 +#define S5P_PAD_RETENTION_EBIB_LOWPWR 0x1234 +#define S5P_PAD_RETENTION_ISOLATION_LOWPWR 0x1240 +#define S5P_PAD_RETENTION_ALV_SEL_LOWPWR 0x1260 +#define S5P_XUSBXTI_LOWPWR 0x1280 +#define S5P_XXTI_LOWPWR 0x1284 +#define S5P_EXT_REGULATOR_LOWPWR 0x12C0 +#define S5P_GPIO_MODE_LOWPWR 0x1300 +#define S5P_GPIO_MODE_MAUDIO_LOWPWR 0x1340 +#define S5P_CAM_LOWPWR 0x1380 +#define S5P_TV_LOWPWR 0x1384 +#define S5P_MFC_LOWPWR 0x1388 +#define S5P_G3D_LOWPWR 0x138C +#define S5P_LCD0_LOWPWR 0x1390 +#define S5P_MAUDIO_LOWPWR 0x1398 +#define S5P_GPS_LOWPWR 0x139C +#define S5P_GPS_ALIVE_LOWPWR 0x13A0 + +#define EXYNOS_ARM_CORE0_CONFIGURATION 0x2000 +#define EXYNOS_ARM_CORE_CONFIGURATION(_nr) \ + (EXYNOS_ARM_CORE0_CONFIGURATION + (0x80 * (_nr))) +#define EXYNOS_ARM_CORE_STATUS(_nr) \ + (EXYNOS_ARM_CORE_CONFIGURATION(_nr) + 0x4) +#define EXYNOS_ARM_CORE_OPTION(_nr) \ + (EXYNOS_ARM_CORE_CONFIGURATION(_nr) + 0x8) + +#define EXYNOS_ARM_COMMON_CONFIGURATION 0x2500 +#define EXYNOS_COMMON_CONFIGURATION(_nr) \ + (EXYNOS_ARM_COMMON_CONFIGURATION + (0x80 * (_nr))) +#define EXYNOS_COMMON_STATUS(_nr) \ + (EXYNOS_COMMON_CONFIGURATION(_nr) + 0x4) +#define EXYNOS_COMMON_OPTION(_nr) \ + (EXYNOS_COMMON_CONFIGURATION(_nr) + 0x8) + +#define EXYNOS_ARM_L2_CONFIGURATION 0x2600 +#define EXYNOS_L2_CONFIGURATION(_nr) \ + (EXYNOS_ARM_L2_CONFIGURATION + ((_nr) * 0x80)) +#define EXYNOS_L2_STATUS(_nr) \ + (EXYNOS_L2_CONFIGURATION(_nr) + 0x4) +#define EXYNOS_L2_OPTION(_nr) \ + (EXYNOS_L2_CONFIGURATION(_nr) + 0x8) + +#define EXYNOS_L2_USE_RETENTION BIT(4) + +#define S5P_PAD_RET_MAUDIO_OPTION 0x3028 +#define S5P_PAD_RET_MMC2_OPTION 0x30c8 +#define S5P_PAD_RET_GPIO_OPTION 0x3108 +#define S5P_PAD_RET_UART_OPTION 0x3128 +#define S5P_PAD_RET_MMCA_OPTION 0x3148 +#define S5P_PAD_RET_MMCB_OPTION 0x3168 +#define S5P_PAD_RET_EBIA_OPTION 0x3188 +#define S5P_PAD_RET_EBIB_OPTION 0x31A8 +#define S5P_PAD_RET_SPI_OPTION 0x31c8 + +#define S5P_PS_HOLD_CONTROL 0x330C +#define S5P_PS_HOLD_EN (1 << 31) +#define S5P_PS_HOLD_OUTPUT_HIGH (3 << 8) + +#define S5P_CAM_OPTION 0x3C08 +#define S5P_MFC_OPTION 0x3C48 +#define S5P_G3D_OPTION 0x3C68 +#define S5P_LCD0_OPTION 0x3C88 +#define S5P_LCD1_OPTION 0x3CA8 +#define S5P_ISP_OPTION S5P_LCD1_OPTION + +#define S5P_CORE_LOCAL_PWR_EN 0x3 +#define S5P_CORE_WAKEUP_FROM_LOCAL_CFG (0x3 << 8) +#define S5P_CORE_AUTOWAKEUP_EN (1 << 31) + +/* Only for S5Pv210 */ +#define S5PV210_EINT_WAKEUP_MASK 0xC004 + +/* Only for EXYNOS4210 */ +#define S5P_CMU_CLKSTOP_LCD1_LOWPWR 0x1154 +#define S5P_CMU_RESET_LCD1_LOWPWR 0x1174 +#define S5P_MODIMIF_MEM_LOWPWR 0x11C4 +#define S5P_PCIE_MEM_LOWPWR 0x11E0 +#define S5P_SATA_MEM_LOWPWR 0x11E4 +#define S5P_LCD1_LOWPWR 0x1394 + +/* Only for EXYNOS4x12 */ +#define S5P_ISP_ARM_LOWPWR 0x1050 +#define S5P_DIS_IRQ_ISP_ARM_LOCAL_LOWPWR 0x1054 +#define S5P_DIS_IRQ_ISP_ARM_CENTRAL_LOWPWR 0x1058 +#define S5P_CMU_ACLKSTOP_COREBLK_LOWPWR 0x1110 +#define S5P_CMU_SCLKSTOP_COREBLK_LOWPWR 0x1114 +#define S5P_CMU_RESET_COREBLK_LOWPWR 0x111C +#define S5P_MPLLUSER_SYSCLK_LOWPWR 0x1130 +#define S5P_CMU_CLKSTOP_ISP_LOWPWR 0x1154 +#define S5P_CMU_RESET_ISP_LOWPWR 0x1174 +#define S5P_TOP_BUS_COREBLK_LOWPWR 0x1190 +#define S5P_TOP_RETENTION_COREBLK_LOWPWR 0x1194 +#define S5P_TOP_PWR_COREBLK_LOWPWR 0x1198 +#define S5P_OSCCLK_GATE_LOWPWR 0x11A4 +#define S5P_LOGIC_RESET_COREBLK_LOWPWR 0x11B0 +#define S5P_OSCCLK_GATE_COREBLK_LOWPWR 0x11B4 +#define S5P_HSI_MEM_LOWPWR 0x11C4 +#define S5P_ROTATOR_MEM_LOWPWR 0x11DC +#define S5P_PAD_RETENTION_GPIO_COREBLK_LOWPWR 0x123C +#define S5P_PAD_ISOLATION_COREBLK_LOWPWR 0x1250 +#define S5P_GPIO_MODE_COREBLK_LOWPWR 0x1320 +#define S5P_TOP_ASB_RESET_LOWPWR 0x1344 +#define S5P_TOP_ASB_ISOLATION_LOWPWR 0x1348 +#define S5P_ISP_LOWPWR 0x1394 +#define S5P_DRAM_FREQ_DOWN_LOWPWR 0x13B0 +#define S5P_DDRPHY_DLLOFF_LOWPWR 0x13B4 +#define S5P_CMU_SYSCLK_ISP_LOWPWR 0x13B8 +#define S5P_CMU_SYSCLK_GPS_LOWPWR 0x13BC +#define S5P_LPDDR_PHY_DLL_LOCK_LOWPWR 0x13C0 + +#define S5P_ARM_L2_0_OPTION 0x2608 +#define S5P_ARM_L2_1_OPTION 0x2628 +#define S5P_ONENAND_MEM_OPTION 0x2E08 +#define S5P_HSI_MEM_OPTION 0x2E28 +#define S5P_G2D_ACP_MEM_OPTION 0x2E48 +#define S5P_USBOTG_MEM_OPTION 0x2E68 +#define S5P_HSMMC_MEM_OPTION 0x2E88 +#define S5P_CSSYS_MEM_OPTION 0x2EA8 +#define S5P_SECSS_MEM_OPTION 0x2EC8 +#define S5P_ROTATOR_MEM_OPTION 0x2F48 + +/* Only for EXYNOS4412 */ +#define S5P_ARM_CORE2_LOWPWR 0x1020 +#define S5P_DIS_IRQ_CORE2 0x1024 +#define S5P_DIS_IRQ_CENTRAL2 0x1028 +#define S5P_ARM_CORE3_LOWPWR 0x1030 +#define S5P_DIS_IRQ_CORE3 0x1034 +#define S5P_DIS_IRQ_CENTRAL3 0x1038 + +/* Only for EXYNOS3XXX */ +#define EXYNOS3_ARM_CORE0_SYS_PWR_REG 0x1000 +#define EXYNOS3_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG 0x1004 +#define EXYNOS3_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG 0x1008 +#define EXYNOS3_ARM_CORE1_SYS_PWR_REG 0x1010 +#define EXYNOS3_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG 0x1014 +#define EXYNOS3_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG 0x1018 +#define EXYNOS3_ISP_ARM_SYS_PWR_REG 0x1050 +#define EXYNOS3_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG 0x1054 +#define EXYNOS3_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG 0x1058 +#define EXYNOS3_ARM_COMMON_SYS_PWR_REG 0x1080 +#define EXYNOS3_ARM_L2_SYS_PWR_REG 0x10C0 +#define EXYNOS3_CMU_ACLKSTOP_SYS_PWR_REG 0x1100 +#define EXYNOS3_CMU_SCLKSTOP_SYS_PWR_REG 0x1104 +#define EXYNOS3_CMU_RESET_SYS_PWR_REG 0x110C +#define EXYNOS3_CMU_ACLKSTOP_COREBLK_SYS_PWR_REG 0x1110 +#define EXYNOS3_CMU_SCLKSTOP_COREBLK_SYS_PWR_REG 0x1114 +#define EXYNOS3_CMU_RESET_COREBLK_SYS_PWR_REG 0x111C +#define EXYNOS3_APLL_SYSCLK_SYS_PWR_REG 0x1120 +#define EXYNOS3_MPLL_SYSCLK_SYS_PWR_REG 0x1124 +#define EXYNOS3_VPLL_SYSCLK_SYS_PWR_REG 0x1128 +#define EXYNOS3_EPLL_SYSCLK_SYS_PWR_REG 0x112C +#define EXYNOS3_MPLLUSER_SYSCLK_SYS_PWR_REG 0x1130 +#define EXYNOS3_BPLLUSER_SYSCLK_SYS_PWR_REG 0x1134 +#define EXYNOS3_EPLLUSER_SYSCLK_SYS_PWR_REG 0x1138 +#define EXYNOS3_CMU_CLKSTOP_CAM_SYS_PWR_REG 0x1140 +#define EXYNOS3_CMU_CLKSTOP_MFC_SYS_PWR_REG 0x1148 +#define EXYNOS3_CMU_CLKSTOP_G3D_SYS_PWR_REG 0x114C +#define EXYNOS3_CMU_CLKSTOP_LCD0_SYS_PWR_REG 0x1150 +#define EXYNOS3_CMU_CLKSTOP_ISP_SYS_PWR_REG 0x1154 +#define EXYNOS3_CMU_CLKSTOP_MAUDIO_SYS_PWR_REG 0x1158 +#define EXYNOS3_CMU_RESET_CAM_SYS_PWR_REG 0x1160 +#define EXYNOS3_CMU_RESET_MFC_SYS_PWR_REG 0x1168 +#define EXYNOS3_CMU_RESET_G3D_SYS_PWR_REG 0x116C +#define EXYNOS3_CMU_RESET_LCD0_SYS_PWR_REG 0x1170 +#define EXYNOS3_CMU_RESET_ISP_SYS_PWR_REG 0x1174 +#define EXYNOS3_CMU_RESET_MAUDIO_SYS_PWR_REG 0x1178 +#define EXYNOS3_TOP_BUS_SYS_PWR_REG 0x1180 +#define EXYNOS3_TOP_RETENTION_SYS_PWR_REG 0x1184 +#define EXYNOS3_TOP_PWR_SYS_PWR_REG 0x1188 +#define EXYNOS3_TOP_BUS_COREBLK_SYS_PWR_REG 0x1190 +#define EXYNOS3_TOP_RETENTION_COREBLK_SYS_PWR_REG 0x1194 +#define EXYNOS3_TOP_PWR_COREBLK_SYS_PWR_REG 0x1198 +#define EXYNOS3_LOGIC_RESET_SYS_PWR_REG 0x11A0 +#define EXYNOS3_OSCCLK_GATE_SYS_PWR_REG 0x11A4 +#define EXYNOS3_LOGIC_RESET_COREBLK_SYS_PWR_REG 0x11B0 +#define EXYNOS3_OSCCLK_GATE_COREBLK_SYS_PWR_REG 0x11B4 +#define EXYNOS3_PAD_RETENTION_DRAM_SYS_PWR_REG 0x1200 +#define EXYNOS3_PAD_RETENTION_MAUDIO_SYS_PWR_REG 0x1204 +#define EXYNOS3_PAD_RETENTION_SPI_SYS_PWR_REG 0x1208 +#define EXYNOS3_PAD_RETENTION_MMC2_SYS_PWR_REG 0x1218 +#define EXYNOS3_PAD_RETENTION_GPIO_SYS_PWR_REG 0x1220 +#define EXYNOS3_PAD_RETENTION_UART_SYS_PWR_REG 0x1224 +#define EXYNOS3_PAD_RETENTION_MMC0_SYS_PWR_REG 0x1228 +#define EXYNOS3_PAD_RETENTION_MMC1_SYS_PWR_REG 0x122C +#define EXYNOS3_PAD_RETENTION_EBIA_SYS_PWR_REG 0x1230 +#define EXYNOS3_PAD_RETENTION_EBIB_SYS_PWR_REG 0x1234 +#define EXYNOS3_PAD_RETENTION_JTAG_SYS_PWR_REG 0x1238 +#define EXYNOS3_PAD_ISOLATION_SYS_PWR_REG 0x1240 +#define EXYNOS3_PAD_ALV_SEL_SYS_PWR_REG 0x1260 +#define EXYNOS3_XUSBXTI_SYS_PWR_REG 0x1280 +#define EXYNOS3_XXTI_SYS_PWR_REG 0x1284 +#define EXYNOS3_EXT_REGULATOR_SYS_PWR_REG 0x12C0 +#define EXYNOS3_EXT_REGULATOR_COREBLK_SYS_PWR_REG 0x12C4 +#define EXYNOS3_GPIO_MODE_SYS_PWR_REG 0x1300 +#define EXYNOS3_GPIO_MODE_MAUDIO_SYS_PWR_REG 0x1340 +#define EXYNOS3_TOP_ASB_RESET_SYS_PWR_REG 0x1344 +#define EXYNOS3_TOP_ASB_ISOLATION_SYS_PWR_REG 0x1348 +#define EXYNOS3_TOP_ASB_RESET_COREBLK_SYS_PWR_REG 0x1350 +#define EXYNOS3_TOP_ASB_ISOLATION_COREBLK_SYS_PWR_REG 0x1354 +#define EXYNOS3_CAM_SYS_PWR_REG 0x1380 +#define EXYNOS3_MFC_SYS_PWR_REG 0x1388 +#define EXYNOS3_G3D_SYS_PWR_REG 0x138C +#define EXYNOS3_LCD0_SYS_PWR_REG 0x1390 +#define EXYNOS3_ISP_SYS_PWR_REG 0x1394 +#define EXYNOS3_MAUDIO_SYS_PWR_REG 0x1398 +#define EXYNOS3_DRAM_FREQ_DOWN_SYS_PWR_REG 0x13B0 +#define EXYNOS3_DDRPHY_DLLOFF_SYS_PWR_REG 0x13B4 +#define EXYNOS3_CMU_SYSCLK_ISP_SYS_PWR_REG 0x13B8 +#define EXYNOS3_LPDDR_PHY_DLL_LOCK_SYS_PWR_REG 0x13C0 +#define EXYNOS3_BPLL_SYSCLK_SYS_PWR_REG 0x13C4 +#define EXYNOS3_UPLL_SYSCLK_SYS_PWR_REG 0x13C8 + +#define EXYNOS3_ARM_CORE0_OPTION 0x2008 +#define EXYNOS3_ARM_CORE_OPTION(_nr) \ + (EXYNOS3_ARM_CORE0_OPTION + ((_nr) * 0x80)) + +#define EXYNOS3_ARM_COMMON_OPTION 0x2408 +#define EXYNOS3_ARM_L2_OPTION 0x2608 +#define EXYNOS3_TOP_PWR_OPTION 0x2C48 +#define EXYNOS3_CORE_TOP_PWR_OPTION 0x2CA8 +#define EXYNOS3_XUSBXTI_DURATION 0x341C +#define EXYNOS3_XXTI_DURATION 0x343C +#define EXYNOS3_EXT_REGULATOR_DURATION 0x361C +#define EXYNOS3_EXT_REGULATOR_COREBLK_DURATION 0x363C +#define XUSBXTI_DURATION 0x00000BB8 +#define XXTI_DURATION XUSBXTI_DURATION +#define EXT_REGULATOR_DURATION 0x00001D4C +#define EXT_REGULATOR_COREBLK_DURATION EXT_REGULATOR_DURATION + +/* for XXX_OPTION */ +#define EXYNOS3_OPTION_USE_SC_COUNTER (1 << 0) +#define EXYNOS3_OPTION_USE_SC_FEEDBACK (1 << 1) +#define EXYNOS3_OPTION_SKIP_DEACTIVATE_ACEACP_IN_PWDN (1 << 7) + +/* For EXYNOS5 */ + +#define EXYNOS5_AUTO_WDTRESET_DISABLE 0x0408 +#define EXYNOS5_MASK_WDTRESET_REQUEST 0x040C +#define EXYNOS5_USBDRD_PHY_CONTROL 0x0704 +#define EXYNOS5_DPTX_PHY_CONTROL 0x0720 + +#define EXYNOS5_USE_RETENTION BIT(4) +#define EXYNOS5_SYS_WDTRESET (1 << 20) + +#define EXYNOS5_ARM_CORE0_SYS_PWR_REG 0x1000 +#define EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG 0x1004 +#define EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG 0x1008 +#define EXYNOS5_ARM_CORE1_SYS_PWR_REG 0x1010 +#define EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG 0x1014 +#define EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG 0x1018 +#define EXYNOS5_FSYS_ARM_SYS_PWR_REG 0x1040 +#define EXYNOS5_DIS_IRQ_FSYS_ARM_CENTRAL_SYS_PWR_REG 0x1048 +#define EXYNOS5_ISP_ARM_SYS_PWR_REG 0x1050 +#define EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG 0x1054 +#define EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG 0x1058 +#define EXYNOS5_ARM_COMMON_SYS_PWR_REG 0x1080 +#define EXYNOS5_ARM_L2_SYS_PWR_REG 0x10C0 +#define EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG 0x1100 +#define EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG 0x1104 +#define EXYNOS5_CMU_RESET_SYS_PWR_REG 0x110C +#define EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG 0x1120 +#define EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG 0x1124 +#define EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG 0x112C +#define EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG 0x1130 +#define EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG 0x1134 +#define EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG 0x1138 +#define EXYNOS5_APLL_SYSCLK_SYS_PWR_REG 0x1140 +#define EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG 0x1144 +#define EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG 0x1148 +#define EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG 0x114C +#define EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG 0x1150 +#define EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG 0x1154 +#define EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG 0x1164 +#define EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG 0x1170 +#define EXYNOS5_TOP_BUS_SYS_PWR_REG 0x1180 +#define EXYNOS5_TOP_RETENTION_SYS_PWR_REG 0x1184 +#define EXYNOS5_TOP_PWR_SYS_PWR_REG 0x1188 +#define EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG 0x1190 +#define EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG 0x1194 +#define EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG 0x1198 +#define EXYNOS5_LOGIC_RESET_SYS_PWR_REG 0x11A0 +#define EXYNOS5_OSCCLK_GATE_SYS_PWR_REG 0x11A4 +#define EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG 0x11B0 +#define EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG 0x11B4 +#define EXYNOS5_USBOTG_MEM_SYS_PWR_REG 0x11C0 +#define EXYNOS5_G2D_MEM_SYS_PWR_REG 0x11C8 +#define EXYNOS5_USBDRD_MEM_SYS_PWR_REG 0x11CC +#define EXYNOS5_SDMMC_MEM_SYS_PWR_REG 0x11D0 +#define EXYNOS5_CSSYS_MEM_SYS_PWR_REG 0x11D4 +#define EXYNOS5_SECSS_MEM_SYS_PWR_REG 0x11D8 +#define EXYNOS5_ROTATOR_MEM_SYS_PWR_REG 0x11DC +#define EXYNOS5_INTRAM_MEM_SYS_PWR_REG 0x11E0 +#define EXYNOS5_INTROM_MEM_SYS_PWR_REG 0x11E4 +#define EXYNOS5_JPEG_MEM_SYS_PWR_REG 0x11E8 +#define EXYNOS5_HSI_MEM_SYS_PWR_REG 0x11EC +#define EXYNOS5_MCUIOP_MEM_SYS_PWR_REG 0x11F4 +#define EXYNOS5_SATA_MEM_SYS_PWR_REG 0x11FC +#define EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG 0x1200 +#define EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG 0x1204 +#define EXYNOS5_PAD_RETENTION_GPIO_SYS_PWR_REG 0x1220 +#define EXYNOS5_PAD_RETENTION_UART_SYS_PWR_REG 0x1224 +#define EXYNOS5_PAD_RETENTION_MMCA_SYS_PWR_REG 0x1228 +#define EXYNOS5_PAD_RETENTION_MMCB_SYS_PWR_REG 0x122C +#define EXYNOS5_PAD_RETENTION_EBIA_SYS_PWR_REG 0x1230 +#define EXYNOS5_PAD_RETENTION_EBIB_SYS_PWR_REG 0x1234 +#define EXYNOS5_PAD_RETENTION_SPI_SYS_PWR_REG 0x1238 +#define EXYNOS5_PAD_RETENTION_GPIO_SYSMEM_SYS_PWR_REG 0x123C +#define EXYNOS5_PAD_ISOLATION_SYS_PWR_REG 0x1240 +#define EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG 0x1250 +#define EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG 0x1260 +#define EXYNOS5_XUSBXTI_SYS_PWR_REG 0x1280 +#define EXYNOS5_XXTI_SYS_PWR_REG 0x1284 +#define EXYNOS5_EXT_REGULATOR_SYS_PWR_REG 0x12C0 +#define EXYNOS5_GPIO_MODE_SYS_PWR_REG 0x1300 +#define EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG 0x1320 +#define EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG 0x1340 +#define EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG 0x1344 +#define EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG 0x1348 +#define EXYNOS5_GSCL_SYS_PWR_REG 0x1400 +#define EXYNOS5_ISP_SYS_PWR_REG 0x1404 +#define EXYNOS5_MFC_SYS_PWR_REG 0x1408 +#define EXYNOS5_G3D_SYS_PWR_REG 0x140C +#define EXYNOS5_DISP1_SYS_PWR_REG 0x1414 +#define EXYNOS5_MAU_SYS_PWR_REG 0x1418 +#define EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG 0x1480 +#define EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG 0x1484 +#define EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG 0x1488 +#define EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG 0x148C +#define EXYNOS5_CMU_CLKSTOP_DISP1_SYS_PWR_REG 0x1494 +#define EXYNOS5_CMU_CLKSTOP_MAU_SYS_PWR_REG 0x1498 +#define EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG 0x14C0 +#define EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG 0x14C4 +#define EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG 0x14C8 +#define EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG 0x14CC +#define EXYNOS5_CMU_SYSCLK_DISP1_SYS_PWR_REG 0x14D4 +#define EXYNOS5_CMU_SYSCLK_MAU_SYS_PWR_REG 0x14D8 +#define EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG 0x1580 +#define EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG 0x1584 +#define EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG 0x1588 +#define EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG 0x158C +#define EXYNOS5_CMU_RESET_DISP1_SYS_PWR_REG 0x1594 +#define EXYNOS5_CMU_RESET_MAU_SYS_PWR_REG 0x1598 + +#define EXYNOS5_ARM_CORE0_OPTION 0x2008 +#define EXYNOS5_ARM_CORE1_OPTION 0x2088 +#define EXYNOS5_FSYS_ARM_OPTION 0x2208 +#define EXYNOS5_ISP_ARM_OPTION 0x2288 +#define EXYNOS5_ARM_COMMON_OPTION 0x2408 +#define EXYNOS5_ARM_L2_OPTION 0x2608 +#define EXYNOS5_TOP_PWR_OPTION 0x2C48 +#define EXYNOS5_TOP_PWR_SYSMEM_OPTION 0x2CC8 +#define EXYNOS5_JPEG_MEM_OPTION 0x2F48 +#define EXYNOS5_GSCL_OPTION 0x4008 +#define EXYNOS5_ISP_OPTION 0x4028 +#define EXYNOS5_MFC_OPTION 0x4048 +#define EXYNOS5_G3D_OPTION 0x4068 +#define EXYNOS5_DISP1_OPTION 0x40A8 +#define EXYNOS5_MAU_OPTION 0x40C8 + +#define EXYNOS5_USE_SC_FEEDBACK (1 << 1) +#define EXYNOS5_USE_SC_COUNTER (1 << 0) + +#define EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN (1 << 7) + +#define EXYNOS5_OPTION_USE_STANDBYWFE (1 << 24) +#define EXYNOS5_OPTION_USE_STANDBYWFI (1 << 16) + +#define EXYNOS5_OPTION_USE_RETENTION (1 << 4) + +#define EXYNOS5420_SWRESET_KFC_SEL 0x3 + +/* Only for EXYNOS5420 */ +#define EXYNOS5420_L2RSTDISABLE_VALUE BIT(3) + +#define EXYNOS5420_LPI_MASK 0x0004 +#define EXYNOS5420_LPI_MASK1 0x0008 +#define EXYNOS5420_UFS BIT(8) +#define EXYNOS5420_ATB_KFC BIT(13) +#define EXYNOS5420_ATB_ISP_ARM BIT(19) +#define EXYNOS5420_EMULATION BIT(31) + +#define EXYNOS5420_ARM_INTR_SPREAD_ENABLE 0x0100 +#define EXYNOS5420_ARM_INTR_SPREAD_USE_STANDBYWFI 0x0104 +#define EXYNOS5420_UP_SCHEDULER 0x0120 +#define SPREAD_ENABLE 0xF +#define SPREAD_USE_STANDWFI 0xF + +#define EXYNOS5420_KFC_CORE_RESET0 BIT(8) +#define EXYNOS5420_KFC_ETM_RESET0 BIT(20) + +#define EXYNOS5420_KFC_CORE_RESET(_nr) \ + ((EXYNOS5420_KFC_CORE_RESET0 | EXYNOS5420_KFC_ETM_RESET0) << (_nr)) + +#define EXYNOS5420_USBDRD1_PHY_CONTROL 0x0708 +#define EXYNOS5420_MIPI_PHY_CONTROL(n) (0x0714 + (n) * 4) +#define EXYNOS5420_DPTX_PHY_CONTROL 0x0728 +#define EXYNOS5420_ARM_CORE2_SYS_PWR_REG 0x1020 +#define EXYNOS5420_DIS_IRQ_ARM_CORE2_LOCAL_SYS_PWR_REG 0x1024 +#define EXYNOS5420_DIS_IRQ_ARM_CORE2_CENTRAL_SYS_PWR_REG 0x1028 +#define EXYNOS5420_ARM_CORE3_SYS_PWR_REG 0x1030 +#define EXYNOS5420_DIS_IRQ_ARM_CORE3_LOCAL_SYS_PWR_REG 0x1034 +#define EXYNOS5420_DIS_IRQ_ARM_CORE3_CENTRAL_SYS_PWR_REG 0x1038 +#define EXYNOS5420_KFC_CORE0_SYS_PWR_REG 0x1040 +#define EXYNOS5420_DIS_IRQ_KFC_CORE0_LOCAL_SYS_PWR_REG 0x1044 +#define EXYNOS5420_DIS_IRQ_KFC_CORE0_CENTRAL_SYS_PWR_REG 0x1048 +#define EXYNOS5420_KFC_CORE1_SYS_PWR_REG 0x1050 +#define EXYNOS5420_DIS_IRQ_KFC_CORE1_LOCAL_SYS_PWR_REG 0x1054 +#define EXYNOS5420_DIS_IRQ_KFC_CORE1_CENTRAL_SYS_PWR_REG 0x1058 +#define EXYNOS5420_KFC_CORE2_SYS_PWR_REG 0x1060 +#define EXYNOS5420_DIS_IRQ_KFC_CORE2_LOCAL_SYS_PWR_REG 0x1064 +#define EXYNOS5420_DIS_IRQ_KFC_CORE2_CENTRAL_SYS_PWR_REG 0x1068 +#define EXYNOS5420_KFC_CORE3_SYS_PWR_REG 0x1070 +#define EXYNOS5420_DIS_IRQ_KFC_CORE3_LOCAL_SYS_PWR_REG 0x1074 +#define EXYNOS5420_DIS_IRQ_KFC_CORE3_CENTRAL_SYS_PWR_REG 0x1078 +#define EXYNOS5420_ISP_ARM_SYS_PWR_REG 0x1090 +#define EXYNOS5420_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG 0x1094 +#define EXYNOS5420_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG 0x1098 +#define EXYNOS5420_ARM_COMMON_SYS_PWR_REG 0x10A0 +#define EXYNOS5420_KFC_COMMON_SYS_PWR_REG 0x10B0 +#define EXYNOS5420_KFC_L2_SYS_PWR_REG 0x10D0 +#define EXYNOS5420_DPLL_SYSCLK_SYS_PWR_REG 0x1158 +#define EXYNOS5420_IPLL_SYSCLK_SYS_PWR_REG 0x115C +#define EXYNOS5420_KPLL_SYSCLK_SYS_PWR_REG 0x1160 +#define EXYNOS5420_RPLL_SYSCLK_SYS_PWR_REG 0x1174 +#define EXYNOS5420_SPLL_SYSCLK_SYS_PWR_REG 0x1178 +#define EXYNOS5420_INTRAM_MEM_SYS_PWR_REG 0x11B8 +#define EXYNOS5420_INTROM_MEM_SYS_PWR_REG 0x11BC +#define EXYNOS5420_PAD_RETENTION_JTAG_SYS_PWR_REG 0x1208 +#define EXYNOS5420_PAD_RETENTION_DRAM_SYS_PWR_REG 0x1210 +#define EXYNOS5420_PAD_RETENTION_UART_SYS_PWR_REG 0x1214 +#define EXYNOS5420_PAD_RETENTION_MMC0_SYS_PWR_REG 0x1218 +#define EXYNOS5420_PAD_RETENTION_MMC1_SYS_PWR_REG 0x121C +#define EXYNOS5420_PAD_RETENTION_MMC2_SYS_PWR_REG 0x1220 +#define EXYNOS5420_PAD_RETENTION_HSI_SYS_PWR_REG 0x1224 +#define EXYNOS5420_PAD_RETENTION_EBIA_SYS_PWR_REG 0x1228 +#define EXYNOS5420_PAD_RETENTION_EBIB_SYS_PWR_REG 0x122C +#define EXYNOS5420_PAD_RETENTION_SPI_SYS_PWR_REG 0x1230 +#define EXYNOS5420_PAD_RETENTION_DRAM_COREBLK_SYS_PWR_REG 0x1234 +#define EXYNOS5420_DISP1_SYS_PWR_REG 0x1410 +#define EXYNOS5420_MAU_SYS_PWR_REG 0x1414 +#define EXYNOS5420_G2D_SYS_PWR_REG 0x1418 +#define EXYNOS5420_MSC_SYS_PWR_REG 0x141C +#define EXYNOS5420_FSYS_SYS_PWR_REG 0x1420 +#define EXYNOS5420_FSYS2_SYS_PWR_REG 0x1424 +#define EXYNOS5420_PSGEN_SYS_PWR_REG 0x1428 +#define EXYNOS5420_PERIC_SYS_PWR_REG 0x142C +#define EXYNOS5420_WCORE_SYS_PWR_REG 0x1430 +#define EXYNOS5420_CMU_CLKSTOP_DISP1_SYS_PWR_REG 0x1490 +#define EXYNOS5420_CMU_CLKSTOP_MAU_SYS_PWR_REG 0x1494 +#define EXYNOS5420_CMU_CLKSTOP_G2D_SYS_PWR_REG 0x1498 +#define EXYNOS5420_CMU_CLKSTOP_MSC_SYS_PWR_REG 0x149C +#define EXYNOS5420_CMU_CLKSTOP_FSYS_SYS_PWR_REG 0x14A0 +#define EXYNOS5420_CMU_CLKSTOP_FSYS2_SYS_PWR_REG 0x14A4 +#define EXYNOS5420_CMU_CLKSTOP_PSGEN_SYS_PWR_REG 0x14A8 +#define EXYNOS5420_CMU_CLKSTOP_PERIC_SYS_PWR_REG 0x14AC +#define EXYNOS5420_CMU_CLKSTOP_WCORE_SYS_PWR_REG 0x14B0 +#define EXYNOS5420_CMU_SYSCLK_TOPPWR_SYS_PWR_REG 0x14BC +#define EXYNOS5420_CMU_SYSCLK_DISP1_SYS_PWR_REG 0x14D0 +#define EXYNOS5420_CMU_SYSCLK_MAU_SYS_PWR_REG 0x14D4 +#define EXYNOS5420_CMU_SYSCLK_G2D_SYS_PWR_REG 0x14D8 +#define EXYNOS5420_CMU_SYSCLK_MSC_SYS_PWR_REG 0x14DC +#define EXYNOS5420_CMU_SYSCLK_FSYS_SYS_PWR_REG 0x14E0 +#define EXYNOS5420_CMU_SYSCLK_FSYS2_SYS_PWR_REG 0x14E4 +#define EXYNOS5420_CMU_SYSCLK_PSGEN_SYS_PWR_REG 0x14E8 +#define EXYNOS5420_CMU_SYSCLK_PERIC_SYS_PWR_REG 0x14EC +#define EXYNOS5420_CMU_SYSCLK_WCORE_SYS_PWR_REG 0x14F0 +#define EXYNOS5420_CMU_SYSCLK_SYSMEM_TOPPWR_SYS_PWR_REG 0x14F4 +#define EXYNOS5420_CMU_RESET_FSYS2_SYS_PWR_REG 0x1570 +#define EXYNOS5420_CMU_RESET_PSGEN_SYS_PWR_REG 0x1574 +#define EXYNOS5420_CMU_RESET_PERIC_SYS_PWR_REG 0x1578 +#define EXYNOS5420_CMU_RESET_WCORE_SYS_PWR_REG 0x157C +#define EXYNOS5420_CMU_RESET_DISP1_SYS_PWR_REG 0x1590 +#define EXYNOS5420_CMU_RESET_MAU_SYS_PWR_REG 0x1594 +#define EXYNOS5420_CMU_RESET_G2D_SYS_PWR_REG 0x1598 +#define EXYNOS5420_CMU_RESET_MSC_SYS_PWR_REG 0x159C +#define EXYNOS5420_CMU_RESET_FSYS_SYS_PWR_REG 0x15A0 +#define EXYNOS5420_SFR_AXI_CGDIS1 0x15E4 +#define EXYNOS5420_ARM_COMMON_OPTION 0x2508 +#define EXYNOS5420_KFC_COMMON_OPTION 0x2588 +#define EXYNOS5420_LOGIC_RESET_DURATION3 0x2D1C + +#define EXYNOS5420_PAD_RET_GPIO_OPTION 0x30C8 +#define EXYNOS5420_PAD_RET_UART_OPTION 0x30E8 +#define EXYNOS5420_PAD_RET_MMCA_OPTION 0x3108 +#define EXYNOS5420_PAD_RET_MMCB_OPTION 0x3128 +#define EXYNOS5420_PAD_RET_MMCC_OPTION 0x3148 +#define EXYNOS5420_PAD_RET_HSI_OPTION 0x3168 +#define EXYNOS5420_PAD_RET_SPI_OPTION 0x31C8 +#define EXYNOS5420_PAD_RET_DRAM_COREBLK_OPTION 0x31E8 +#define EXYNOS_PAD_RET_DRAM_OPTION 0x3008 +#define EXYNOS_PAD_RET_MAUDIO_OPTION 0x3028 +#define EXYNOS_PAD_RET_JTAG_OPTION 0x3048 +#define EXYNOS_PAD_RET_EBIA_OPTION 0x3188 +#define EXYNOS_PAD_RET_EBIB_OPTION 0x31A8 + +#define EXYNOS5420_FSYS2_OPTION 0x4168 +#define EXYNOS5420_PSGEN_OPTION 0x4188 + +/* For EXYNOS_CENTRAL_SEQ_OPTION */ +#define EXYNOS5_USE_STANDBYWFI_ARM_CORE0 BIT(16) +#define EXYNOS5_USE_STANDBYWFI_ARM_CORE1 BUT(17) +#define EXYNOS5_USE_STANDBYWFE_ARM_CORE0 BIT(24) +#define EXYNOS5_USE_STANDBYWFE_ARM_CORE1 BIT(25) + +#define EXYNOS5420_ARM_USE_STANDBY_WFI0 BIT(4) +#define EXYNOS5420_ARM_USE_STANDBY_WFI1 BIT(5) +#define EXYNOS5420_ARM_USE_STANDBY_WFI2 BIT(6) +#define EXYNOS5420_ARM_USE_STANDBY_WFI3 BIT(7) +#define EXYNOS5420_KFC_USE_STANDBY_WFI0 BIT(8) +#define EXYNOS5420_KFC_USE_STANDBY_WFI1 BIT(9) +#define EXYNOS5420_KFC_USE_STANDBY_WFI2 BIT(10) +#define EXYNOS5420_KFC_USE_STANDBY_WFI3 BIT(11) +#define EXYNOS5420_ARM_USE_STANDBY_WFE0 BIT(16) +#define EXYNOS5420_ARM_USE_STANDBY_WFE1 BIT(17) +#define EXYNOS5420_ARM_USE_STANDBY_WFE2 BIT(18) +#define EXYNOS5420_ARM_USE_STANDBY_WFE3 BIT(19) +#define EXYNOS5420_KFC_USE_STANDBY_WFE0 BIT(20) +#define EXYNOS5420_KFC_USE_STANDBY_WFE1 BIT(21) +#define EXYNOS5420_KFC_USE_STANDBY_WFE2 BIT(22) +#define EXYNOS5420_KFC_USE_STANDBY_WFE3 BIT(23) + +#define DUR_WAIT_RESET 0xF + +#define EXYNOS5420_USE_STANDBY_WFI_ALL (EXYNOS5420_ARM_USE_STANDBY_WFI0 \ + | EXYNOS5420_ARM_USE_STANDBY_WFI1 \ + | EXYNOS5420_ARM_USE_STANDBY_WFI2 \ + | EXYNOS5420_ARM_USE_STANDBY_WFI3 \ + | EXYNOS5420_KFC_USE_STANDBY_WFI0 \ + | EXYNOS5420_KFC_USE_STANDBY_WFI1 \ + | EXYNOS5420_KFC_USE_STANDBY_WFI2 \ + | EXYNOS5420_KFC_USE_STANDBY_WFI3) + +/* For EXYNOS5433 */ +#define EXYNOS5433_EINT_WAKEUP_MASK (0x060C) +#define EXYNOS5433_USBHOST30_PHY_CONTROL (0x0728) +#define EXYNOS5433_PAD_RETENTION_AUD_OPTION (0x3028) +#define EXYNOS5433_PAD_RETENTION_MMC2_OPTION (0x30C8) +#define EXYNOS5433_PAD_RETENTION_TOP_OPTION (0x3108) +#define EXYNOS5433_PAD_RETENTION_UART_OPTION (0x3128) +#define EXYNOS5433_PAD_RETENTION_MMC0_OPTION (0x3148) +#define EXYNOS5433_PAD_RETENTION_MMC1_OPTION (0x3168) +#define EXYNOS5433_PAD_RETENTION_EBIA_OPTION (0x3188) +#define EXYNOS5433_PAD_RETENTION_EBIB_OPTION (0x31A8) +#define EXYNOS5433_PAD_RETENTION_SPI_OPTION (0x31C8) +#define EXYNOS5433_PAD_RETENTION_MIF_OPTION (0x31E8) +#define EXYNOS5433_PAD_RETENTION_USBXTI_OPTION (0x3228) +#define EXYNOS5433_PAD_RETENTION_BOOTLDO_OPTION (0x3248) +#define EXYNOS5433_PAD_RETENTION_UFS_OPTION (0x3268) +#define EXYNOS5433_PAD_RETENTION_FSYSGENIO_OPTION (0x32A8) + +#endif /* __LINUX_SOC_EXYNOS_REGS_PMU_H */ diff --git a/include/linux/soc/sunxi/sunxi_sram.h b/include/linux/soc/sunxi/sunxi_sram.h new file mode 100644 index 000000000..c5f663bba --- /dev/null +++ b/include/linux/soc/sunxi/sunxi_sram.h @@ -0,0 +1,19 @@ +/* + * Allwinner SoCs SRAM Controller Driver + * + * Copyright (C) 2015 Maxime Ripard + * + * Author: Maxime Ripard + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef _SUNXI_SRAM_H_ +#define _SUNXI_SRAM_H_ + +int sunxi_sram_claim(struct device *dev); +int sunxi_sram_release(struct device *dev); + +#endif /* _SUNXI_SRAM_H_ */ diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h new file mode 100644 index 000000000..7127ec301 --- /dev/null +++ b/include/linux/soc/ti/knav_dma.h @@ -0,0 +1,193 @@ +/* + * Copyright (C) 2014 Texas Instruments Incorporated + * Authors: Sandeep Nair + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__ +#define __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__ + +#include + +/* + * PKTDMA descriptor manipulation macros for host packet descriptor + */ +#define MASK(x) (BIT(x) - 1) +#define KNAV_DMA_DESC_PKT_LEN_MASK MASK(22) +#define KNAV_DMA_DESC_PKT_LEN_SHIFT 0 +#define KNAV_DMA_DESC_PS_INFO_IN_SOP BIT(22) +#define KNAV_DMA_DESC_PS_INFO_IN_DESC 0 +#define KNAV_DMA_DESC_TAG_MASK MASK(8) +#define KNAV_DMA_DESC_SAG_HI_SHIFT 24 +#define KNAV_DMA_DESC_STAG_LO_SHIFT 16 +#define KNAV_DMA_DESC_DTAG_HI_SHIFT 8 +#define KNAV_DMA_DESC_DTAG_LO_SHIFT 0 +#define KNAV_DMA_DESC_HAS_EPIB BIT(31) +#define KNAV_DMA_DESC_NO_EPIB 0 +#define KNAV_DMA_DESC_PSLEN_SHIFT 24 +#define KNAV_DMA_DESC_PSLEN_MASK MASK(6) +#define KNAV_DMA_DESC_ERR_FLAG_SHIFT 20 +#define KNAV_DMA_DESC_ERR_FLAG_MASK MASK(4) +#define KNAV_DMA_DESC_PSFLAG_SHIFT 16 +#define KNAV_DMA_DESC_PSFLAG_MASK MASK(4) +#define KNAV_DMA_DESC_RETQ_SHIFT 0 +#define KNAV_DMA_DESC_RETQ_MASK MASK(14) +#define KNAV_DMA_DESC_BUF_LEN_MASK MASK(22) +#define KNAV_DMA_DESC_EFLAGS_MASK MASK(4) +#define KNAV_DMA_DESC_EFLAGS_SHIFT 20 + +#define KNAV_DMA_NUM_EPIB_WORDS 4 +#define KNAV_DMA_NUM_PS_WORDS 16 +#define KNAV_DMA_NUM_SW_DATA_WORDS 4 +#define KNAV_DMA_FDQ_PER_CHAN 4 + +/* Tx channel scheduling priority */ +enum knav_dma_tx_priority { + DMA_PRIO_HIGH = 0, + DMA_PRIO_MED_H, + DMA_PRIO_MED_L, + DMA_PRIO_LOW +}; + +/* Rx channel error handling mode during buffer starvation */ +enum knav_dma_rx_err_mode { + DMA_DROP = 0, + DMA_RETRY +}; + +/* Rx flow size threshold configuration */ +enum knav_dma_rx_thresholds { + DMA_THRESH_NONE = 0, + DMA_THRESH_0 = 1, + DMA_THRESH_0_1 = 3, + DMA_THRESH_0_1_2 = 7 +}; + +/* Descriptor type */ +enum knav_dma_desc_type { + DMA_DESC_HOST = 0, + DMA_DESC_MONOLITHIC = 2 +}; + +/** + * struct knav_dma_tx_cfg: Tx channel configuration + * @filt_einfo: Filter extended packet info + * @filt_pswords: Filter PS words present + * @knav_dma_tx_priority: Tx channel scheduling priority + */ +struct knav_dma_tx_cfg { + bool filt_einfo; + bool filt_pswords; + enum knav_dma_tx_priority priority; +}; + +/** + * struct knav_dma_rx_cfg: Rx flow configuration + * @einfo_present: Extended packet info present + * @psinfo_present: PS words present + * @knav_dma_rx_err_mode: Error during buffer starvation + * @knav_dma_desc_type: Host or Monolithic desc + * @psinfo_at_sop: PS word located at start of packet + * @sop_offset: Start of packet offset + * @dst_q: Destination queue for a given flow + * @thresh: Rx flow size threshold + * @fdq[]: Free desc Queue array + * @sz_thresh0: RX packet size threshold 0 + * @sz_thresh1: RX packet size threshold 1 + * @sz_thresh2: RX packet size threshold 2 + */ +struct knav_dma_rx_cfg { + bool einfo_present; + bool psinfo_present; + enum knav_dma_rx_err_mode err_mode; + enum knav_dma_desc_type desc_type; + bool psinfo_at_sop; + unsigned int sop_offset; + unsigned int dst_q; + enum knav_dma_rx_thresholds thresh; + unsigned int fdq[KNAV_DMA_FDQ_PER_CHAN]; + unsigned int sz_thresh0; + unsigned int sz_thresh1; + unsigned int sz_thresh2; +}; + +/** + * struct knav_dma_cfg: Pktdma channel configuration + * @sl_cfg: Slave configuration + * @tx: Tx channel configuration + * @rx: Rx flow configuration + */ +struct knav_dma_cfg { + enum dma_transfer_direction direction; + union { + struct knav_dma_tx_cfg tx; + struct knav_dma_rx_cfg rx; + } u; +}; + +/** + * struct knav_dma_desc: Host packet descriptor layout + * @desc_info: Descriptor information like id, type, length + * @tag_info: Flow tag info written in during RX + * @packet_info: Queue Manager, policy, flags etc + * @buff_len: Buffer length in bytes + * @buff: Buffer pointer + * @next_desc: For chaining the descriptors + * @orig_len: length since 'buff_len' can be overwritten + * @orig_buff: buff pointer since 'buff' can be overwritten + * @epib: Extended packet info block + * @psdata: Protocol specific + * @sw_data: Software private data not touched by h/w + */ +struct knav_dma_desc { + __le32 desc_info; + __le32 tag_info; + __le32 packet_info; + __le32 buff_len; + __le32 buff; + __le32 next_desc; + __le32 orig_len; + __le32 orig_buff; + __le32 epib[KNAV_DMA_NUM_EPIB_WORDS]; + __le32 psdata[KNAV_DMA_NUM_PS_WORDS]; + u32 sw_data[KNAV_DMA_NUM_SW_DATA_WORDS]; +} ____cacheline_aligned; + +#if IS_ENABLED(CONFIG_KEYSTONE_NAVIGATOR_DMA) +void *knav_dma_open_channel(struct device *dev, const char *name, + struct knav_dma_cfg *config); +void knav_dma_close_channel(void *channel); +int knav_dma_get_flow(void *channel); +bool knav_dma_device_ready(void); +#else +static inline void *knav_dma_open_channel(struct device *dev, const char *name, + struct knav_dma_cfg *config) +{ + return (void *) NULL; +} +static inline void knav_dma_close_channel(void *channel) +{} + +static inline int knav_dma_get_flow(void *channel) +{ + return -EINVAL; +} + +static inline bool knav_dma_device_ready(void) +{ + return false; +} + +#endif + +#endif /* __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__ */ diff --git a/include/linux/soc/ti/knav_qmss.h b/include/linux/soc/ti/knav_qmss.h new file mode 100644 index 000000000..9745df6ed --- /dev/null +++ b/include/linux/soc/ti/knav_qmss.h @@ -0,0 +1,91 @@ +/* + * Keystone Navigator Queue Management Sub-System header + * + * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com + * Author: Sandeep Nair + * Cyril Chemparathy + * Santosh Shilimkar + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __SOC_TI_KNAV_QMSS_H__ +#define __SOC_TI_KNAV_QMSS_H__ + +#include +#include +#include +#include +#include +#include + +/* queue types */ +#define KNAV_QUEUE_QPEND ((unsigned)-2) /* interruptible qpend queue */ +#define KNAV_QUEUE_ACC ((unsigned)-3) /* Accumulated queue */ +#define KNAV_QUEUE_GP ((unsigned)-4) /* General purpose queue */ + +/* queue flags */ +#define KNAV_QUEUE_SHARED 0x0001 /* Queue can be shared */ + +/** + * enum knav_queue_ctrl_cmd - queue operations. + * @KNAV_QUEUE_GET_ID: Get the ID number for an open queue + * @KNAV_QUEUE_FLUSH: forcibly empty a queue if possible + * @KNAV_QUEUE_SET_NOTIFIER: Set a notifier callback to a queue handle. + * @KNAV_QUEUE_ENABLE_NOTIFY: Enable notifier callback for a queue handle. + * @KNAV_QUEUE_DISABLE_NOTIFY: Disable notifier callback for a queue handle. + * @KNAV_QUEUE_GET_COUNT: Get number of queues. + */ +enum knav_queue_ctrl_cmd { + KNAV_QUEUE_GET_ID, + KNAV_QUEUE_FLUSH, + KNAV_QUEUE_SET_NOTIFIER, + KNAV_QUEUE_ENABLE_NOTIFY, + KNAV_QUEUE_DISABLE_NOTIFY, + KNAV_QUEUE_GET_COUNT +}; + +/* Queue notifier callback prototype */ +typedef void (*knav_queue_notify_fn)(void *arg); + +/** + * struct knav_queue_notify_config: Notifier configuration + * @fn: Notifier function + * @fn_arg: Notifier function arguments + */ +struct knav_queue_notify_config { + knav_queue_notify_fn fn; + void *fn_arg; +}; + +void *knav_queue_open(const char *name, unsigned id, + unsigned flags); +void knav_queue_close(void *qhandle); +int knav_queue_device_control(void *qhandle, + enum knav_queue_ctrl_cmd cmd, + unsigned long arg); +dma_addr_t knav_queue_pop(void *qhandle, unsigned *size); +int knav_queue_push(void *qhandle, dma_addr_t dma, + unsigned size, unsigned flags); + +void *knav_pool_create(const char *name, + int num_desc, int region_id); +void knav_pool_destroy(void *ph); +int knav_pool_count(void *ph); +void *knav_pool_desc_get(void *ph); +void knav_pool_desc_put(void *ph, void *desc); +int knav_pool_desc_map(void *ph, void *desc, unsigned size, + dma_addr_t *dma, unsigned *dma_sz); +void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz); +dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt); +void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma); +bool knav_qmss_device_ready(void); + +#endif /* __SOC_TI_KNAV_QMSS_H__ */ diff --git a/include/linux/soc/ti/ti-msgmgr.h b/include/linux/soc/ti/ti-msgmgr.h new file mode 100644 index 000000000..eac8e0c6f --- /dev/null +++ b/include/linux/soc/ti/ti-msgmgr.h @@ -0,0 +1,35 @@ +/* + * Texas Instruments' Message Manager + * + * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ + * Nishanth Menon + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef TI_MSGMGR_H +#define TI_MSGMGR_H + +/** + * struct ti_msgmgr_message - Message Manager structure + * @len: Length of data in the Buffer + * @buf: Buffer pointer + * + * This is the structure for data used in mbox_send_message + * the length of data buffer used depends on the SoC integration + * parameters - each message may be 64, 128 bytes long depending + * on SoC. Client is supposed to be aware of this. + */ +struct ti_msgmgr_message { + size_t len; + u8 *buf; +}; + +#endif /* TI_MSGMGR_H */ diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h new file mode 100644 index 000000000..18435e5c6 --- /dev/null +++ b/include/linux/soc/ti/ti_sci_protocol.h @@ -0,0 +1,241 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Texas Instruments System Control Interface Protocol + * + * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ + * Nishanth Menon + */ + +#ifndef __TISCI_PROTOCOL_H +#define __TISCI_PROTOCOL_H + +/** + * struct ti_sci_version_info - version information structure + * @abi_major: Major ABI version. Change here implies risk of backward + * compatibility break. + * @abi_minor: Minor ABI version. Change here implies new feature addition, + * or compatible change in ABI. + * @firmware_revision: Firmware revision (not usually used). + * @firmware_description: Firmware description (not usually used). + */ +struct ti_sci_version_info { + u8 abi_major; + u8 abi_minor; + u16 firmware_revision; + char firmware_description[32]; +}; + +struct ti_sci_handle; + +/** + * struct ti_sci_core_ops - SoC Core Operations + * @reboot_device: Reboot the SoC + * Returns 0 for successful request(ideally should never return), + * else returns corresponding error value. + */ +struct ti_sci_core_ops { + int (*reboot_device)(const struct ti_sci_handle *handle); +}; + +/** + * struct ti_sci_dev_ops - Device control operations + * @get_device: Command to request for device managed by TISCI + * Returns 0 for successful exclusive request, else returns + * corresponding error message. + * @idle_device: Command to idle a device managed by TISCI + * Returns 0 for successful exclusive request, else returns + * corresponding error message. + * @put_device: Command to release a device managed by TISCI + * Returns 0 for successful release, else returns corresponding + * error message. + * @is_valid: Check if the device ID is a valid ID. + * Returns 0 if the ID is valid, else returns corresponding error. + * @get_context_loss_count: Command to retrieve context loss counter - this + * increments every time the device looses context. Overflow + * is possible. + * - count: pointer to u32 which will retrieve counter + * Returns 0 for successful information request and count has + * proper data, else returns corresponding error message. + * @is_idle: Reports back about device idle state + * - req_state: Returns requested idle state + * Returns 0 for successful information request and req_state and + * current_state has proper data, else returns corresponding error + * message. + * @is_stop: Reports back about device stop state + * - req_state: Returns requested stop state + * - current_state: Returns current stop state + * Returns 0 for successful information request and req_state and + * current_state has proper data, else returns corresponding error + * message. + * @is_on: Reports back about device ON(or active) state + * - req_state: Returns requested ON state + * - current_state: Returns current ON state + * Returns 0 for successful information request and req_state and + * current_state has proper data, else returns corresponding error + * message. + * @is_transitioning: Reports back if the device is in the middle of transition + * of state. + * -current_state: Returns 'true' if currently transitioning. + * @set_device_resets: Command to configure resets for device managed by TISCI. + * -reset_state: Device specific reset bit field + * Returns 0 for successful request, else returns + * corresponding error message. + * @get_device_resets: Command to read state of resets for device managed + * by TISCI. + * -reset_state: pointer to u32 which will retrieve resets + * Returns 0 for successful request, else returns + * corresponding error message. + * + * NOTE: for all these functions, the following parameters are generic in + * nature: + * -handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle + * -id: Device Identifier + * + * Request for the device - NOTE: the client MUST maintain integrity of + * usage count by balancing get_device with put_device. No refcounting is + * managed by driver for that purpose. + */ +struct ti_sci_dev_ops { + int (*get_device)(const struct ti_sci_handle *handle, u32 id); + int (*idle_device)(const struct ti_sci_handle *handle, u32 id); + int (*put_device)(const struct ti_sci_handle *handle, u32 id); + int (*is_valid)(const struct ti_sci_handle *handle, u32 id); + int (*get_context_loss_count)(const struct ti_sci_handle *handle, + u32 id, u32 *count); + int (*is_idle)(const struct ti_sci_handle *handle, u32 id, + bool *requested_state); + int (*is_stop)(const struct ti_sci_handle *handle, u32 id, + bool *req_state, bool *current_state); + int (*is_on)(const struct ti_sci_handle *handle, u32 id, + bool *req_state, bool *current_state); + int (*is_transitioning)(const struct ti_sci_handle *handle, u32 id, + bool *current_state); + int (*set_device_resets)(const struct ti_sci_handle *handle, u32 id, + u32 reset_state); + int (*get_device_resets)(const struct ti_sci_handle *handle, u32 id, + u32 *reset_state); +}; + +/** + * struct ti_sci_clk_ops - Clock control operations + * @get_clock: Request for activation of clock and manage by processor + * - needs_ssc: 'true' if Spread Spectrum clock is desired. + * - can_change_freq: 'true' if frequency change is desired. + * - enable_input_term: 'true' if input termination is desired. + * @idle_clock: Request for Idling a clock managed by processor + * @put_clock: Release the clock to be auto managed by TISCI + * @is_auto: Is the clock being auto managed + * - req_state: state indicating if the clock is auto managed + * @is_on: Is the clock ON + * - req_state: if the clock is requested to be forced ON + * - current_state: if the clock is currently ON + * @is_off: Is the clock OFF + * - req_state: if the clock is requested to be forced OFF + * - current_state: if the clock is currently Gated + * @set_parent: Set the clock source of a specific device clock + * - parent_id: Parent clock identifier to set. + * @get_parent: Get the current clock source of a specific device clock + * - parent_id: Parent clock identifier which is the parent. + * @get_num_parents: Get the number of parents of the current clock source + * - num_parents: returns the number of parent clocks. + * @get_best_match_freq: Find a best matching frequency for a frequency + * range. + * - match_freq: Best matching frequency in Hz. + * @set_freq: Set the Clock frequency + * @get_freq: Get the Clock frequency + * - current_freq: Frequency in Hz that the clock is at. + * + * NOTE: for all these functions, the following parameters are generic in + * nature: + * -handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle + * -did: Device identifier this request is for + * -cid: Clock identifier for the device for this request. + * Each device has it's own set of clock inputs. This indexes + * which clock input to modify. + * -min_freq: The minimum allowable frequency in Hz. This is the minimum + * allowable programmed frequency and does not account for clock + * tolerances and jitter. + * -target_freq: The target clock frequency in Hz. A frequency will be + * processed as close to this target frequency as possible. + * -max_freq: The maximum allowable frequency in Hz. This is the maximum + * allowable programmed frequency and does not account for clock + * tolerances and jitter. + * + * Request for the clock - NOTE: the client MUST maintain integrity of + * usage count by balancing get_clock with put_clock. No refcounting is + * managed by driver for that purpose. + */ +struct ti_sci_clk_ops { + int (*get_clock)(const struct ti_sci_handle *handle, u32 did, u8 cid, + bool needs_ssc, bool can_change_freq, + bool enable_input_term); + int (*idle_clock)(const struct ti_sci_handle *handle, u32 did, u8 cid); + int (*put_clock)(const struct ti_sci_handle *handle, u32 did, u8 cid); + int (*is_auto)(const struct ti_sci_handle *handle, u32 did, u8 cid, + bool *req_state); + int (*is_on)(const struct ti_sci_handle *handle, u32 did, u8 cid, + bool *req_state, bool *current_state); + int (*is_off)(const struct ti_sci_handle *handle, u32 did, u8 cid, + bool *req_state, bool *current_state); + int (*set_parent)(const struct ti_sci_handle *handle, u32 did, u8 cid, + u8 parent_id); + int (*get_parent)(const struct ti_sci_handle *handle, u32 did, u8 cid, + u8 *parent_id); + int (*get_num_parents)(const struct ti_sci_handle *handle, u32 did, + u8 cid, u8 *num_parents); + int (*get_best_match_freq)(const struct ti_sci_handle *handle, u32 did, + u8 cid, u64 min_freq, u64 target_freq, + u64 max_freq, u64 *match_freq); + int (*set_freq)(const struct ti_sci_handle *handle, u32 did, u8 cid, + u64 min_freq, u64 target_freq, u64 max_freq); + int (*get_freq)(const struct ti_sci_handle *handle, u32 did, u8 cid, + u64 *current_freq); +}; + +/** + * struct ti_sci_ops - Function support for TI SCI + * @dev_ops: Device specific operations + * @clk_ops: Clock specific operations + */ +struct ti_sci_ops { + struct ti_sci_core_ops core_ops; + struct ti_sci_dev_ops dev_ops; + struct ti_sci_clk_ops clk_ops; +}; + +/** + * struct ti_sci_handle - Handle returned to TI SCI clients for usage. + * @version: structure containing version information + * @ops: operations that are made available to TI SCI clients + */ +struct ti_sci_handle { + struct ti_sci_version_info version; + struct ti_sci_ops ops; +}; + +#if IS_ENABLED(CONFIG_TI_SCI_PROTOCOL) +const struct ti_sci_handle *ti_sci_get_handle(struct device *dev); +int ti_sci_put_handle(const struct ti_sci_handle *handle); +const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev); + +#else /* CONFIG_TI_SCI_PROTOCOL */ + +static inline const struct ti_sci_handle *ti_sci_get_handle(struct device *dev) +{ + return ERR_PTR(-EINVAL); +} + +static inline int ti_sci_put_handle(const struct ti_sci_handle *handle) +{ + return -EINVAL; +} + +static inline +const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev) +{ + return ERR_PTR(-EINVAL); +} + +#endif /* CONFIG_TI_SCI_PROTOCOL */ + +#endif /* __TISCI_PROTOCOL_H */ diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h new file mode 100644 index 000000000..15fe980a2 --- /dev/null +++ b/include/linux/sock_diag.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __SOCK_DIAG_H__ +#define __SOCK_DIAG_H__ + +#include +#include +#include +#include +#include + +struct sk_buff; +struct nlmsghdr; +struct sock; + +struct sock_diag_handler { + __u8 family; + int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh); + int (*get_info)(struct sk_buff *skb, struct sock *sk); + int (*destroy)(struct sk_buff *skb, struct nlmsghdr *nlh); +}; + +int sock_diag_register(const struct sock_diag_handler *h); +void sock_diag_unregister(const struct sock_diag_handler *h); + +void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)); +void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)); + +u64 sock_gen_cookie(struct sock *sk); +int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie); +void sock_diag_save_cookie(struct sock *sk, __u32 *cookie); + +int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr); +int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk, + struct sk_buff *skb, int attrtype); + +static inline +enum sknetlink_groups sock_diag_destroy_group(const struct sock *sk) +{ + switch (sk->sk_family) { + case AF_INET: + if (sk->sk_type == SOCK_RAW) + return SKNLGRP_NONE; + + switch (sk->sk_protocol) { + case IPPROTO_TCP: + return SKNLGRP_INET_TCP_DESTROY; + case IPPROTO_UDP: + return SKNLGRP_INET_UDP_DESTROY; + default: + return SKNLGRP_NONE; + } + case AF_INET6: + if (sk->sk_type == SOCK_RAW) + return SKNLGRP_NONE; + + switch (sk->sk_protocol) { + case IPPROTO_TCP: + return SKNLGRP_INET6_TCP_DESTROY; + case IPPROTO_UDP: + return SKNLGRP_INET6_UDP_DESTROY; + default: + return SKNLGRP_NONE; + } + default: + return SKNLGRP_NONE; + } +} + +static inline +bool sock_diag_has_destroy_listeners(const struct sock *sk) +{ + const struct net *n = sock_net(sk); + const enum sknetlink_groups group = sock_diag_destroy_group(sk); + + return group != SKNLGRP_NONE && n->diag_nlsk && + netlink_has_listeners(n->diag_nlsk, group); +} +void sock_diag_broadcast_destroy(struct sock *sk); + +int sock_diag_destroy(struct sock *sk, int err); +#endif diff --git a/include/linux/socket.h b/include/linux/socket.h new file mode 100644 index 000000000..15a7eb24f --- /dev/null +++ b/include/linux/socket.h @@ -0,0 +1,387 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SOCKET_H +#define _LINUX_SOCKET_H + + +#include /* arch-dependent defines */ +#include /* the SIOCxxx I/O controls */ +#include /* iovec support */ +#include /* pid_t */ +#include /* __user */ +#include + +struct pid; +struct cred; + +#define __sockaddr_check_size(size) \ + BUILD_BUG_ON(((size) > sizeof(struct __kernel_sockaddr_storage))) + +#ifdef CONFIG_PROC_FS +struct seq_file; +extern void socket_seq_show(struct seq_file *seq); +#endif + +typedef __kernel_sa_family_t sa_family_t; + +/* + * 1003.1g requires sa_family_t and that sa_data is char. + */ + +struct sockaddr { + sa_family_t sa_family; /* address family, AF_xxx */ + char sa_data[14]; /* 14 bytes of protocol address */ +}; + +struct linger { + int l_onoff; /* Linger active */ + int l_linger; /* How long to linger for */ +}; + +#define sockaddr_storage __kernel_sockaddr_storage + +/* + * As we do 4.4BSD message passing we use a 4.4BSD message passing + * system, not 4.3. Thus msg_accrights(len) are now missing. They + * belong in an obscure libc emulation or the bin. + */ + +struct msghdr { + void *msg_name; /* ptr to socket address structure */ + int msg_namelen; /* size of socket address structure */ + struct iov_iter msg_iter; /* data */ + void *msg_control; /* ancillary data */ + __kernel_size_t msg_controllen; /* ancillary data buffer length */ + unsigned int msg_flags; /* flags on received message */ + struct kiocb *msg_iocb; /* ptr to iocb for async requests */ +}; + +struct user_msghdr { + void __user *msg_name; /* ptr to socket address structure */ + int msg_namelen; /* size of socket address structure */ + struct iovec __user *msg_iov; /* scatter/gather array */ + __kernel_size_t msg_iovlen; /* # elements in msg_iov */ + void __user *msg_control; /* ancillary data */ + __kernel_size_t msg_controllen; /* ancillary data buffer length */ + unsigned int msg_flags; /* flags on received message */ +}; + +/* For recvmmsg/sendmmsg */ +struct mmsghdr { + struct user_msghdr msg_hdr; + unsigned int msg_len; +}; + +/* + * POSIX 1003.1g - ancillary data object information + * Ancillary data consits of a sequence of pairs of + * (cmsghdr, cmsg_data[]) + */ + +struct cmsghdr { + __kernel_size_t cmsg_len; /* data byte count, including hdr */ + int cmsg_level; /* originating protocol */ + int cmsg_type; /* protocol-specific type */ +}; + +/* + * Ancillary data object information MACROS + * Table 5-14 of POSIX 1003.1g + */ + +#define __CMSG_NXTHDR(ctl, len, cmsg) __cmsg_nxthdr((ctl),(len),(cmsg)) +#define CMSG_NXTHDR(mhdr, cmsg) cmsg_nxthdr((mhdr), (cmsg)) + +#define CMSG_ALIGN(len) ( ((len)+sizeof(long)-1) & ~(sizeof(long)-1) ) + +#define CMSG_DATA(cmsg) ((void *)((char *)(cmsg) + sizeof(struct cmsghdr))) +#define CMSG_SPACE(len) (sizeof(struct cmsghdr) + CMSG_ALIGN(len)) +#define CMSG_LEN(len) (sizeof(struct cmsghdr) + (len)) + +#define __CMSG_FIRSTHDR(ctl,len) ((len) >= sizeof(struct cmsghdr) ? \ + (struct cmsghdr *)(ctl) : \ + (struct cmsghdr *)NULL) +#define CMSG_FIRSTHDR(msg) __CMSG_FIRSTHDR((msg)->msg_control, (msg)->msg_controllen) +#define CMSG_OK(mhdr, cmsg) ((cmsg)->cmsg_len >= sizeof(struct cmsghdr) && \ + (cmsg)->cmsg_len <= (unsigned long) \ + ((mhdr)->msg_controllen - \ + ((char *)(cmsg) - (char *)(mhdr)->msg_control))) +#define for_each_cmsghdr(cmsg, msg) \ + for (cmsg = CMSG_FIRSTHDR(msg); \ + cmsg; \ + cmsg = CMSG_NXTHDR(msg, cmsg)) + +/* + * Get the next cmsg header + * + * PLEASE, do not touch this function. If you think, that it is + * incorrect, grep kernel sources and think about consequences + * before trying to improve it. + * + * Now it always returns valid, not truncated ancillary object + * HEADER. But caller still MUST check, that cmsg->cmsg_len is + * inside range, given by msg->msg_controllen before using + * ancillary object DATA. --ANK (980731) + */ + +static inline struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size, + struct cmsghdr *__cmsg) +{ + struct cmsghdr * __ptr; + + __ptr = (struct cmsghdr*)(((unsigned char *) __cmsg) + CMSG_ALIGN(__cmsg->cmsg_len)); + if ((unsigned long)((char*)(__ptr+1) - (char *) __ctl) > __size) + return (struct cmsghdr *)0; + + return __ptr; +} + +static inline struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr *__cmsg) +{ + return __cmsg_nxthdr(__msg->msg_control, __msg->msg_controllen, __cmsg); +} + +static inline size_t msg_data_left(struct msghdr *msg) +{ + return iov_iter_count(&msg->msg_iter); +} + +/* "Socket"-level control message types: */ + +#define SCM_RIGHTS 0x01 /* rw: access rights (array of int) */ +#define SCM_CREDENTIALS 0x02 /* rw: struct ucred */ +#define SCM_SECURITY 0x03 /* rw: security label */ + +struct ucred { + __u32 pid; + __u32 uid; + __u32 gid; +}; + +/* Supported address families. */ +#define AF_UNSPEC 0 +#define AF_UNIX 1 /* Unix domain sockets */ +#define AF_LOCAL 1 /* POSIX name for AF_UNIX */ +#define AF_INET 2 /* Internet IP Protocol */ +#define AF_AX25 3 /* Amateur Radio AX.25 */ +#define AF_IPX 4 /* Novell IPX */ +#define AF_APPLETALK 5 /* AppleTalk DDP */ +#define AF_NETROM 6 /* Amateur Radio NET/ROM */ +#define AF_BRIDGE 7 /* Multiprotocol bridge */ +#define AF_ATMPVC 8 /* ATM PVCs */ +#define AF_X25 9 /* Reserved for X.25 project */ +#define AF_INET6 10 /* IP version 6 */ +#define AF_ROSE 11 /* Amateur Radio X.25 PLP */ +#define AF_DECnet 12 /* Reserved for DECnet project */ +#define AF_NETBEUI 13 /* Reserved for 802.2LLC project*/ +#define AF_SECURITY 14 /* Security callback pseudo AF */ +#define AF_KEY 15 /* PF_KEY key management API */ +#define AF_NETLINK 16 +#define AF_ROUTE AF_NETLINK /* Alias to emulate 4.4BSD */ +#define AF_PACKET 17 /* Packet family */ +#define AF_ASH 18 /* Ash */ +#define AF_ECONET 19 /* Acorn Econet */ +#define AF_ATMSVC 20 /* ATM SVCs */ +#define AF_RDS 21 /* RDS sockets */ +#define AF_SNA 22 /* Linux SNA Project (nutters!) */ +#define AF_IRDA 23 /* IRDA sockets */ +#define AF_PPPOX 24 /* PPPoX sockets */ +#define AF_WANPIPE 25 /* Wanpipe API Sockets */ +#define AF_LLC 26 /* Linux LLC */ +#define AF_IB 27 /* Native InfiniBand address */ +#define AF_MPLS 28 /* MPLS */ +#define AF_CAN 29 /* Controller Area Network */ +#define AF_TIPC 30 /* TIPC sockets */ +#define AF_BLUETOOTH 31 /* Bluetooth sockets */ +#define AF_IUCV 32 /* IUCV sockets */ +#define AF_RXRPC 33 /* RxRPC sockets */ +#define AF_ISDN 34 /* mISDN sockets */ +#define AF_PHONET 35 /* Phonet sockets */ +#define AF_IEEE802154 36 /* IEEE802154 sockets */ +#define AF_CAIF 37 /* CAIF sockets */ +#define AF_ALG 38 /* Algorithm sockets */ +#define AF_NFC 39 /* NFC sockets */ +#define AF_VSOCK 40 /* vSockets */ +#define AF_KCM 41 /* Kernel Connection Multiplexor*/ +#define AF_QIPCRTR 42 /* Qualcomm IPC Router */ +#define AF_SMC 43 /* smc sockets: reserve number for + * PF_SMC protocol family that + * reuses AF_INET address family + */ +#define AF_XDP 44 /* XDP sockets */ + +#define AF_MAX 45 /* For now.. */ + +/* Protocol families, same as address families. */ +#define PF_UNSPEC AF_UNSPEC +#define PF_UNIX AF_UNIX +#define PF_LOCAL AF_LOCAL +#define PF_INET AF_INET +#define PF_AX25 AF_AX25 +#define PF_IPX AF_IPX +#define PF_APPLETALK AF_APPLETALK +#define PF_NETROM AF_NETROM +#define PF_BRIDGE AF_BRIDGE +#define PF_ATMPVC AF_ATMPVC +#define PF_X25 AF_X25 +#define PF_INET6 AF_INET6 +#define PF_ROSE AF_ROSE +#define PF_DECnet AF_DECnet +#define PF_NETBEUI AF_NETBEUI +#define PF_SECURITY AF_SECURITY +#define PF_KEY AF_KEY +#define PF_NETLINK AF_NETLINK +#define PF_ROUTE AF_ROUTE +#define PF_PACKET AF_PACKET +#define PF_ASH AF_ASH +#define PF_ECONET AF_ECONET +#define PF_ATMSVC AF_ATMSVC +#define PF_RDS AF_RDS +#define PF_SNA AF_SNA +#define PF_IRDA AF_IRDA +#define PF_PPPOX AF_PPPOX +#define PF_WANPIPE AF_WANPIPE +#define PF_LLC AF_LLC +#define PF_IB AF_IB +#define PF_MPLS AF_MPLS +#define PF_CAN AF_CAN +#define PF_TIPC AF_TIPC +#define PF_BLUETOOTH AF_BLUETOOTH +#define PF_IUCV AF_IUCV +#define PF_RXRPC AF_RXRPC +#define PF_ISDN AF_ISDN +#define PF_PHONET AF_PHONET +#define PF_IEEE802154 AF_IEEE802154 +#define PF_CAIF AF_CAIF +#define PF_ALG AF_ALG +#define PF_NFC AF_NFC +#define PF_VSOCK AF_VSOCK +#define PF_KCM AF_KCM +#define PF_QIPCRTR AF_QIPCRTR +#define PF_SMC AF_SMC +#define PF_XDP AF_XDP +#define PF_MAX AF_MAX + +/* Maximum queue length specifiable by listen. */ +#define SOMAXCONN 128 + +/* Flags we can use with send/ and recv. + Added those for 1003.1g not all are supported yet + */ + +#define MSG_OOB 1 +#define MSG_PEEK 2 +#define MSG_DONTROUTE 4 +#define MSG_TRYHARD 4 /* Synonym for MSG_DONTROUTE for DECnet */ +#define MSG_CTRUNC 8 +#define MSG_PROBE 0x10 /* Do not send. Only probe path f.e. for MTU */ +#define MSG_TRUNC 0x20 +#define MSG_DONTWAIT 0x40 /* Nonblocking io */ +#define MSG_EOR 0x80 /* End of record */ +#define MSG_WAITALL 0x100 /* Wait for a full request */ +#define MSG_FIN 0x200 +#define MSG_SYN 0x400 +#define MSG_CONFIRM 0x800 /* Confirm path validity */ +#define MSG_RST 0x1000 +#define MSG_ERRQUEUE 0x2000 /* Fetch message from error queue */ +#define MSG_NOSIGNAL 0x4000 /* Do not generate SIGPIPE */ +#define MSG_MORE 0x8000 /* Sender will send more */ +#define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */ +#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */ +#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */ +#define MSG_EOF MSG_FIN +#define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */ + +#define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */ +#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */ +#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exec for file + descriptor received through + SCM_RIGHTS */ +#if defined(CONFIG_COMPAT) +#define MSG_CMSG_COMPAT 0x80000000 /* This message needs 32 bit fixups */ +#else +#define MSG_CMSG_COMPAT 0 /* We never have 32 bit fixups */ +#endif + + +/* Setsockoptions(2) level. Thanks to BSD these must match IPPROTO_xxx */ +#define SOL_IP 0 +/* #define SOL_ICMP 1 No-no-no! Due to Linux :-) we cannot use SOL_ICMP=1 */ +#define SOL_TCP 6 +#define SOL_UDP 17 +#define SOL_IPV6 41 +#define SOL_ICMPV6 58 +#define SOL_SCTP 132 +#define SOL_UDPLITE 136 /* UDP-Lite (RFC 3828) */ +#define SOL_RAW 255 +#define SOL_IPX 256 +#define SOL_AX25 257 +#define SOL_ATALK 258 +#define SOL_NETROM 259 +#define SOL_ROSE 260 +#define SOL_DECNET 261 +#define SOL_X25 262 +#define SOL_PACKET 263 +#define SOL_ATM 264 /* ATM layer (cell level) */ +#define SOL_AAL 265 /* ATM Adaption Layer (packet level) */ +#define SOL_IRDA 266 +#define SOL_NETBEUI 267 +#define SOL_LLC 268 +#define SOL_DCCP 269 +#define SOL_NETLINK 270 +#define SOL_TIPC 271 +#define SOL_RXRPC 272 +#define SOL_PPPOL2TP 273 +#define SOL_BLUETOOTH 274 +#define SOL_PNPIPE 275 +#define SOL_RDS 276 +#define SOL_IUCV 277 +#define SOL_CAIF 278 +#define SOL_ALG 279 +#define SOL_NFC 280 +#define SOL_KCM 281 +#define SOL_TLS 282 +#define SOL_XDP 283 + +/* IPX options */ +#define IPX_TYPE 1 + +extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); +extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); + +struct timespec; + +/* The __sys_...msg variants allow MSG_CMSG_COMPAT iff + * forbid_cmsg_compat==false + */ +extern long __sys_recvmsg(int fd, struct user_msghdr __user *msg, + unsigned int flags, bool forbid_cmsg_compat); +extern long __sys_sendmsg(int fd, struct user_msghdr __user *msg, + unsigned int flags, bool forbid_cmsg_compat); +extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, + unsigned int flags, struct timespec *timeout); +extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, + unsigned int vlen, unsigned int flags, + bool forbid_cmsg_compat); + +/* helpers which do the actual work for syscalls */ +extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size, + unsigned int flags, struct sockaddr __user *addr, + int __user *addr_len); +extern int __sys_sendto(int fd, void __user *buff, size_t len, + unsigned int flags, struct sockaddr __user *addr, + int addr_len); +extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr, + int __user *upeer_addrlen, int flags); +extern int __sys_socket(int family, int type, int protocol); +extern int __sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen); +extern int __sys_connect(int fd, struct sockaddr __user *uservaddr, + int addrlen); +extern int __sys_listen(int fd, int backlog); +extern int __sys_getsockname(int fd, struct sockaddr __user *usockaddr, + int __user *usockaddr_len); +extern int __sys_getpeername(int fd, struct sockaddr __user *usockaddr, + int __user *usockaddr_len); +extern int __sys_socketpair(int family, int type, int protocol, + int __user *usockvec); +extern int __sys_shutdown(int fd, int how); +#endif /* _LINUX_SOCKET_H */ diff --git a/include/linux/sonet.h b/include/linux/sonet.h new file mode 100644 index 000000000..2b802b6d1 --- /dev/null +++ b/include/linux/sonet.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* sonet.h - SONET/SHD physical layer control */ +#ifndef LINUX_SONET_H +#define LINUX_SONET_H + + +#include +#include + +struct k_sonet_stats { +#define __HANDLE_ITEM(i) atomic_t i + __SONET_ITEMS +#undef __HANDLE_ITEM +}; + +extern void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to); +extern void sonet_subtract_stats(struct k_sonet_stats *from, + struct sonet_stats *to); + +#endif diff --git a/include/linux/sony-laptop.h b/include/linux/sony-laptop.h new file mode 100644 index 000000000..374d0fdb0 --- /dev/null +++ b/include/linux/sony-laptop.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SONYLAPTOP_H_ +#define _SONYLAPTOP_H_ + +#include + +#ifdef __KERNEL__ + +/* used only for communication between v4l and sony-laptop */ + +#define SONY_PIC_COMMAND_GETCAMERA 1 /* obsolete */ +#define SONY_PIC_COMMAND_SETCAMERA 2 +#define SONY_PIC_COMMAND_GETCAMERABRIGHTNESS 3 /* obsolete */ +#define SONY_PIC_COMMAND_SETCAMERABRIGHTNESS 4 +#define SONY_PIC_COMMAND_GETCAMERACONTRAST 5 /* obsolete */ +#define SONY_PIC_COMMAND_SETCAMERACONTRAST 6 +#define SONY_PIC_COMMAND_GETCAMERAHUE 7 /* obsolete */ +#define SONY_PIC_COMMAND_SETCAMERAHUE 8 +#define SONY_PIC_COMMAND_GETCAMERACOLOR 9 /* obsolete */ +#define SONY_PIC_COMMAND_SETCAMERACOLOR 10 +#define SONY_PIC_COMMAND_GETCAMERASHARPNESS 11 /* obsolete */ +#define SONY_PIC_COMMAND_SETCAMERASHARPNESS 12 +#define SONY_PIC_COMMAND_GETCAMERAPICTURE 13 /* obsolete */ +#define SONY_PIC_COMMAND_SETCAMERAPICTURE 14 +#define SONY_PIC_COMMAND_GETCAMERAAGC 15 /* obsolete */ +#define SONY_PIC_COMMAND_SETCAMERAAGC 16 +#define SONY_PIC_COMMAND_GETCAMERADIRECTION 17 /* obsolete */ +#define SONY_PIC_COMMAND_GETCAMERAROMVERSION 18 /* obsolete */ +#define SONY_PIC_COMMAND_GETCAMERAREVISION 19 /* obsolete */ + +#if IS_ENABLED(CONFIG_SONY_LAPTOP) +int sony_pic_camera_command(int command, u8 value); +#else +static inline int sony_pic_camera_command(int command, u8 value) { return 0; }; +#endif + +#endif /* __KERNEL__ */ + +#endif /* _SONYLAPTOP_H_ */ diff --git a/include/linux/sonypi.h b/include/linux/sonypi.h new file mode 100644 index 000000000..0b7cc265c --- /dev/null +++ b/include/linux/sonypi.h @@ -0,0 +1,63 @@ +/* + * Sony Programmable I/O Control Device driver for VAIO + * + * Copyright (C) 2001-2005 Stelian Pop + * + * Copyright (C) 2005 Narayanan R S + + * Copyright (C) 2001-2002 Alcôve + * + * Copyright (C) 2001 Michael Ashley + * + * Copyright (C) 2001 Junichi Morita + * + * Copyright (C) 2000 Takaya Kinjo + * + * Copyright (C) 2000 Andrew Tridgell + * + * Earlier work by Werner Almesberger, Paul `Rusty' Russell and Paul Mackerras. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ +#ifndef _SONYPI_H_ +#define _SONYPI_H_ + +#include + + +/* used only for communication between v4l and sonypi */ + +#define SONYPI_COMMAND_GETCAMERA 1 /* obsolete */ +#define SONYPI_COMMAND_SETCAMERA 2 +#define SONYPI_COMMAND_GETCAMERABRIGHTNESS 3 /* obsolete */ +#define SONYPI_COMMAND_SETCAMERABRIGHTNESS 4 +#define SONYPI_COMMAND_GETCAMERACONTRAST 5 /* obsolete */ +#define SONYPI_COMMAND_SETCAMERACONTRAST 6 +#define SONYPI_COMMAND_GETCAMERAHUE 7 /* obsolete */ +#define SONYPI_COMMAND_SETCAMERAHUE 8 +#define SONYPI_COMMAND_GETCAMERACOLOR 9 /* obsolete */ +#define SONYPI_COMMAND_SETCAMERACOLOR 10 +#define SONYPI_COMMAND_GETCAMERASHARPNESS 11 /* obsolete */ +#define SONYPI_COMMAND_SETCAMERASHARPNESS 12 +#define SONYPI_COMMAND_GETCAMERAPICTURE 13 /* obsolete */ +#define SONYPI_COMMAND_SETCAMERAPICTURE 14 +#define SONYPI_COMMAND_GETCAMERAAGC 15 /* obsolete */ +#define SONYPI_COMMAND_SETCAMERAAGC 16 +#define SONYPI_COMMAND_GETCAMERADIRECTION 17 /* obsolete */ +#define SONYPI_COMMAND_GETCAMERAROMVERSION 18 /* obsolete */ +#define SONYPI_COMMAND_GETCAMERAREVISION 19 /* obsolete */ + +#endif /* _SONYPI_H_ */ diff --git a/include/linux/sort.h b/include/linux/sort.h new file mode 100644 index 000000000..2b99a5dd0 --- /dev/null +++ b/include/linux/sort.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SORT_H +#define _LINUX_SORT_H + +#include + +void sort(void *base, size_t num, size_t size, + int (*cmp)(const void *, const void *), + void (*swap)(void *, void *, int)); + +#endif diff --git a/include/linux/sound.h b/include/linux/sound.h new file mode 100644 index 000000000..ec85b7a1f --- /dev/null +++ b/include/linux/sound.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SOUND_H +#define _LINUX_SOUND_H + +#include + +/* + * Sound core interface functions + */ + +struct device; +extern int register_sound_special(const struct file_operations *fops, int unit); +extern int register_sound_special_device(const struct file_operations *fops, int unit, struct device *dev); +extern int register_sound_mixer(const struct file_operations *fops, int dev); +extern int register_sound_dsp(const struct file_operations *fops, int dev); + +extern void unregister_sound_special(int unit); +extern void unregister_sound_mixer(int unit); +extern void unregister_sound_dsp(int unit); +#endif /* _LINUX_SOUND_H */ diff --git a/include/linux/soundcard.h b/include/linux/soundcard.h new file mode 100644 index 000000000..96c79cbd7 --- /dev/null +++ b/include/linux/soundcard.h @@ -0,0 +1,37 @@ +/* + * Copyright by Hannu Savolainen 1993-1997 + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. 2. + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +#ifndef SOUNDCARD_H +#define SOUNDCARD_H + +# include +#include + +# if defined(__BIG_ENDIAN) +# define AFMT_S16_NE AFMT_S16_BE +# elif defined(__LITTLE_ENDIAN) +# define AFMT_S16_NE AFMT_S16_LE +# else +# error "could not determine byte order" +# endif +#endif diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h new file mode 100644 index 000000000..962971e6a --- /dev/null +++ b/include/linux/soundwire/sdw.h @@ -0,0 +1,809 @@ +// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) +// Copyright(c) 2015-17 Intel Corporation. + +#ifndef __SOUNDWIRE_H +#define __SOUNDWIRE_H + +struct sdw_bus; +struct sdw_slave; + +/* SDW spec defines and enums, as defined by MIPI 1.1. Spec */ + +/* SDW Broadcast Device Number */ +#define SDW_BROADCAST_DEV_NUM 15 + +/* SDW Enumeration Device Number */ +#define SDW_ENUM_DEV_NUM 0 + +/* SDW Group Device Numbers */ +#define SDW_GROUP12_DEV_NUM 12 +#define SDW_GROUP13_DEV_NUM 13 + +/* SDW Master Device Number, not supported yet */ +#define SDW_MASTER_DEV_NUM 14 + +#define SDW_NUM_DEV_ID_REGISTERS 6 +/* frame shape defines */ + +/* + * Note: The maximum row define in SoundWire spec 1.1 is 23. In order to + * fill hole with 0, one more dummy entry is added + */ +#define SDW_FRAME_ROWS 24 +#define SDW_FRAME_COLS 8 +#define SDW_FRAME_ROW_COLS (SDW_FRAME_ROWS * SDW_FRAME_COLS) + +#define SDW_FRAME_CTRL_BITS 48 +#define SDW_MAX_DEVICES 11 + +#define SDW_VALID_PORT_RANGE(n) (n <= 14 && n >= 1) + +#define SDW_DAI_ID_RANGE_START 100 +#define SDW_DAI_ID_RANGE_END 200 + +/** + * enum sdw_slave_status - Slave status + * @SDW_SLAVE_UNATTACHED: Slave is not attached with the bus. + * @SDW_SLAVE_ATTACHED: Slave is attached with bus. + * @SDW_SLAVE_ALERT: Some alert condition on the Slave + * @SDW_SLAVE_RESERVED: Reserved for future use + */ +enum sdw_slave_status { + SDW_SLAVE_UNATTACHED = 0, + SDW_SLAVE_ATTACHED = 1, + SDW_SLAVE_ALERT = 2, + SDW_SLAVE_RESERVED = 3, +}; + +/** + * enum sdw_command_response - Command response as defined by SDW spec + * @SDW_CMD_OK: cmd was successful + * @SDW_CMD_IGNORED: cmd was ignored + * @SDW_CMD_FAIL: cmd was NACKed + * @SDW_CMD_TIMEOUT: cmd timedout + * @SDW_CMD_FAIL_OTHER: cmd failed due to other reason than above + * + * NOTE: The enum is different than actual Spec as response in the Spec is + * combination of ACK/NAK bits + * + * SDW_CMD_TIMEOUT/FAIL_OTHER is defined for SW use, not in spec + */ +enum sdw_command_response { + SDW_CMD_OK = 0, + SDW_CMD_IGNORED = 1, + SDW_CMD_FAIL = 2, + SDW_CMD_TIMEOUT = 3, + SDW_CMD_FAIL_OTHER = 4, +}; + +/** + * enum sdw_stream_type: data stream type + * + * @SDW_STREAM_PCM: PCM data stream + * @SDW_STREAM_PDM: PDM data stream + * + * spec doesn't define this, but is used in implementation + */ +enum sdw_stream_type { + SDW_STREAM_PCM = 0, + SDW_STREAM_PDM = 1, +}; + +/** + * enum sdw_data_direction: Data direction + * + * @SDW_DATA_DIR_RX: Data into Port + * @SDW_DATA_DIR_TX: Data out of Port + */ +enum sdw_data_direction { + SDW_DATA_DIR_RX = 0, + SDW_DATA_DIR_TX = 1, +}; + +/* + * SDW properties, defined in MIPI DisCo spec v1.0 + */ +enum sdw_clk_stop_reset_behave { + SDW_CLK_STOP_KEEP_STATUS = 1, +}; + +/** + * enum sdw_p15_behave - Slave Port 15 behaviour when the Master attempts a + * read + * @SDW_P15_READ_IGNORED: Read is ignored + * @SDW_P15_CMD_OK: Command is ok + */ +enum sdw_p15_behave { + SDW_P15_READ_IGNORED = 0, + SDW_P15_CMD_OK = 1, +}; + +/** + * enum sdw_dpn_type - Data port types + * @SDW_DPN_FULL: Full Data Port is supported + * @SDW_DPN_SIMPLE: Simplified Data Port as defined in spec. + * DPN_SampleCtrl2, DPN_OffsetCtrl2, DPN_HCtrl and DPN_BlockCtrl3 + * are not implemented. + * @SDW_DPN_REDUCED: Reduced Data Port as defined in spec. + * DPN_SampleCtrl2, DPN_HCtrl are not implemented. + */ +enum sdw_dpn_type { + SDW_DPN_FULL = 0, + SDW_DPN_SIMPLE = 1, + SDW_DPN_REDUCED = 2, +}; + +/** + * enum sdw_clk_stop_mode - Clock Stop modes + * @SDW_CLK_STOP_MODE0: Slave can continue operation seamlessly on clock + * restart + * @SDW_CLK_STOP_MODE1: Slave may have entered a deeper power-saving mode, + * not capable of continuing operation seamlessly when the clock restarts + */ +enum sdw_clk_stop_mode { + SDW_CLK_STOP_MODE0 = 0, + SDW_CLK_STOP_MODE1 = 1, +}; + +/** + * struct sdw_dp0_prop - DP0 properties + * @max_word: Maximum number of bits in a Payload Channel Sample, 1 to 64 + * (inclusive) + * @min_word: Minimum number of bits in a Payload Channel Sample, 1 to 64 + * (inclusive) + * @num_words: number of wordlengths supported + * @words: wordlengths supported + * @flow_controlled: Slave implementation results in an OK_NotReady + * response + * @simple_ch_prep_sm: If channel prepare sequence is required + * @device_interrupts: If implementation-defined interrupts are supported + * + * The wordlengths are specified by Spec as max, min AND number of + * discrete values, implementation can define based on the wordlengths they + * support + */ +struct sdw_dp0_prop { + u32 max_word; + u32 min_word; + u32 num_words; + u32 *words; + bool flow_controlled; + bool simple_ch_prep_sm; + bool device_interrupts; +}; + +/** + * struct sdw_dpn_audio_mode - Audio mode properties for DPn + * @bus_min_freq: Minimum bus frequency, in Hz + * @bus_max_freq: Maximum bus frequency, in Hz + * @bus_num_freq: Number of discrete frequencies supported + * @bus_freq: Discrete bus frequencies, in Hz + * @min_freq: Minimum sampling frequency, in Hz + * @max_freq: Maximum sampling bus frequency, in Hz + * @num_freq: Number of discrete sampling frequency supported + * @freq: Discrete sampling frequencies, in Hz + * @prep_ch_behave: Specifies the dependencies between Channel Prepare + * sequence and bus clock configuration + * If 0, Channel Prepare can happen at any Bus clock rate + * If 1, Channel Prepare sequence shall happen only after Bus clock is + * changed to a frequency supported by this mode or compatible modes + * described by the next field + * @glitchless: Bitmap describing possible glitchless transitions from this + * Audio Mode to other Audio Modes + */ +struct sdw_dpn_audio_mode { + u32 bus_min_freq; + u32 bus_max_freq; + u32 bus_num_freq; + u32 *bus_freq; + u32 max_freq; + u32 min_freq; + u32 num_freq; + u32 *freq; + u32 prep_ch_behave; + u32 glitchless; +}; + +/** + * struct sdw_dpn_prop - Data Port DPn properties + * @num: port number + * @max_word: Maximum number of bits in a Payload Channel Sample, 1 to 64 + * (inclusive) + * @min_word: Minimum number of bits in a Payload Channel Sample, 1 to 64 + * (inclusive) + * @num_words: Number of discrete supported wordlengths + * @words: Discrete supported wordlength + * @type: Data port type. Full, Simplified or Reduced + * @max_grouping: Maximum number of samples that can be grouped together for + * a full data port + * @simple_ch_prep_sm: If the port supports simplified channel prepare state + * machine + * @ch_prep_timeout: Port-specific timeout value, in milliseconds + * @device_interrupts: If set, each bit corresponds to support for + * implementation-defined interrupts + * @max_ch: Maximum channels supported + * @min_ch: Minimum channels supported + * @num_ch: Number of discrete channels supported + * @ch: Discrete channels supported + * @num_ch_combinations: Number of channel combinations supported + * @ch_combinations: Channel combinations supported + * @modes: SDW mode supported + * @max_async_buffer: Number of samples that this port can buffer in + * asynchronous modes + * @block_pack_mode: Type of block port mode supported + * @port_encoding: Payload Channel Sample encoding schemes supported + * @audio_modes: Audio modes supported + */ +struct sdw_dpn_prop { + u32 num; + u32 max_word; + u32 min_word; + u32 num_words; + u32 *words; + enum sdw_dpn_type type; + u32 max_grouping; + bool simple_ch_prep_sm; + u32 ch_prep_timeout; + u32 device_interrupts; + u32 max_ch; + u32 min_ch; + u32 num_ch; + u32 *ch; + u32 num_ch_combinations; + u32 *ch_combinations; + u32 modes; + u32 max_async_buffer; + bool block_pack_mode; + u32 port_encoding; + struct sdw_dpn_audio_mode *audio_modes; +}; + +/** + * struct sdw_slave_prop - SoundWire Slave properties + * @mipi_revision: Spec version of the implementation + * @wake_capable: Wake-up events are supported + * @test_mode_capable: If test mode is supported + * @clk_stop_mode1: Clock-Stop Mode 1 is supported + * @simple_clk_stop_capable: Simple clock mode is supported + * @clk_stop_timeout: Worst-case latency of the Clock Stop Prepare State + * Machine transitions, in milliseconds + * @ch_prep_timeout: Worst-case latency of the Channel Prepare State Machine + * transitions, in milliseconds + * @reset_behave: Slave keeps the status of the SlaveStopClockPrepare + * state machine (P=1 SCSP_SM) after exit from clock-stop mode1 + * @high_PHY_capable: Slave is HighPHY capable + * @paging_support: Slave implements paging registers SCP_AddrPage1 and + * SCP_AddrPage2 + * @bank_delay_support: Slave implements bank delay/bridge support registers + * SCP_BankDelay and SCP_NextFrame + * @p15_behave: Slave behavior when the Master attempts a read to the Port15 + * alias + * @lane_control_support: Slave supports lane control + * @master_count: Number of Masters present on this Slave + * @source_ports: Bitmap identifying source ports + * @sink_ports: Bitmap identifying sink ports + * @dp0_prop: Data Port 0 properties + * @src_dpn_prop: Source Data Port N properties + * @sink_dpn_prop: Sink Data Port N properties + */ +struct sdw_slave_prop { + u32 mipi_revision; + bool wake_capable; + bool test_mode_capable; + bool clk_stop_mode1; + bool simple_clk_stop_capable; + u32 clk_stop_timeout; + u32 ch_prep_timeout; + enum sdw_clk_stop_reset_behave reset_behave; + bool high_PHY_capable; + bool paging_support; + bool bank_delay_support; + enum sdw_p15_behave p15_behave; + bool lane_control_support; + u32 master_count; + u32 source_ports; + u32 sink_ports; + struct sdw_dp0_prop *dp0_prop; + struct sdw_dpn_prop *src_dpn_prop; + struct sdw_dpn_prop *sink_dpn_prop; +}; + +/** + * struct sdw_master_prop - Master properties + * @revision: MIPI spec version of the implementation + * @master_count: Number of masters + * @clk_stop_mode: Bitmap for Clock Stop modes supported + * @max_freq: Maximum Bus clock frequency, in Hz + * @num_clk_gears: Number of clock gears supported + * @clk_gears: Clock gears supported + * @num_freq: Number of clock frequencies supported, in Hz + * @freq: Clock frequencies supported, in Hz + * @default_frame_rate: Controller default Frame rate, in Hz + * @default_row: Number of rows + * @default_col: Number of columns + * @dynamic_frame: Dynamic frame supported + * @err_threshold: Number of times that software may retry sending a single + * command + * @dpn_prop: Data Port N properties + */ +struct sdw_master_prop { + u32 revision; + u32 master_count; + enum sdw_clk_stop_mode clk_stop_mode; + u32 max_freq; + u32 num_clk_gears; + u32 *clk_gears; + u32 num_freq; + u32 *freq; + u32 default_frame_rate; + u32 default_row; + u32 default_col; + bool dynamic_frame; + u32 err_threshold; + struct sdw_dpn_prop *dpn_prop; +}; + +int sdw_master_read_prop(struct sdw_bus *bus); +int sdw_slave_read_prop(struct sdw_slave *slave); + +/* + * SDW Slave Structures and APIs + */ + +/** + * struct sdw_slave_id - Slave ID + * @mfg_id: MIPI Manufacturer ID + * @part_id: Device Part ID + * @class_id: MIPI Class ID, unused now. + * Currently a placeholder in MIPI SoundWire Spec + * @unique_id: Device unique ID + * @sdw_version: SDW version implemented + * + * The order of the IDs here does not follow the DisCo spec definitions + */ +struct sdw_slave_id { + __u16 mfg_id; + __u16 part_id; + __u8 class_id; + __u8 unique_id:4; + __u8 sdw_version:4; +}; + +/** + * struct sdw_slave_intr_status - Slave interrupt status + * @control_port: control port status + * @port: data port status + */ +struct sdw_slave_intr_status { + u8 control_port; + u8 port[15]; +}; + +/** + * sdw_reg_bank - SoundWire register banks + * @SDW_BANK0: Soundwire register bank 0 + * @SDW_BANK1: Soundwire register bank 1 + */ +enum sdw_reg_bank { + SDW_BANK0, + SDW_BANK1, +}; + +/** + * struct sdw_bus_conf: Bus configuration + * + * @clk_freq: Clock frequency, in Hz + * @num_rows: Number of rows in frame + * @num_cols: Number of columns in frame + * @bank: Next register bank + */ +struct sdw_bus_conf { + unsigned int clk_freq; + unsigned int num_rows; + unsigned int num_cols; + unsigned int bank; +}; + +/** + * struct sdw_prepare_ch: Prepare/De-prepare Data Port channel + * + * @num: Port number + * @ch_mask: Active channel mask + * @prepare: Prepare (true) /de-prepare (false) channel + * @bank: Register bank, which bank Slave/Master driver should program for + * implementation defined registers. This is always updated to next_bank + * value read from bus params. + * + */ +struct sdw_prepare_ch { + unsigned int num; + unsigned int ch_mask; + bool prepare; + unsigned int bank; +}; + +/** + * enum sdw_port_prep_ops: Prepare operations for Data Port + * + * @SDW_OPS_PORT_PRE_PREP: Pre prepare operation for the Port + * @SDW_OPS_PORT_PREP: Prepare operation for the Port + * @SDW_OPS_PORT_POST_PREP: Post prepare operation for the Port + */ +enum sdw_port_prep_ops { + SDW_OPS_PORT_PRE_PREP = 0, + SDW_OPS_PORT_PREP = 1, + SDW_OPS_PORT_POST_PREP = 2, +}; + +/** + * struct sdw_bus_params: Structure holding bus configuration + * + * @curr_bank: Current bank in use (BANK0/BANK1) + * @next_bank: Next bank to use (BANK0/BANK1). next_bank will always be + * set to !curr_bank + * @max_dr_freq: Maximum double rate clock frequency supported, in Hz + * @curr_dr_freq: Current double rate clock frequency, in Hz + * @bandwidth: Current bandwidth + * @col: Active columns + * @row: Active rows + */ +struct sdw_bus_params { + enum sdw_reg_bank curr_bank; + enum sdw_reg_bank next_bank; + unsigned int max_dr_freq; + unsigned int curr_dr_freq; + unsigned int bandwidth; + unsigned int col; + unsigned int row; +}; + +/** + * struct sdw_slave_ops: Slave driver callback ops + * + * @read_prop: Read Slave properties + * @interrupt_callback: Device interrupt notification (invoked in thread + * context) + * @update_status: Update Slave status + * @bus_config: Update the bus config for Slave + * @port_prep: Prepare the port with parameters + */ +struct sdw_slave_ops { + int (*read_prop)(struct sdw_slave *sdw); + int (*interrupt_callback)(struct sdw_slave *slave, + struct sdw_slave_intr_status *status); + int (*update_status)(struct sdw_slave *slave, + enum sdw_slave_status status); + int (*bus_config)(struct sdw_slave *slave, + struct sdw_bus_params *params); + int (*port_prep)(struct sdw_slave *slave, + struct sdw_prepare_ch *prepare_ch, + enum sdw_port_prep_ops pre_ops); +}; + +/** + * struct sdw_slave - SoundWire Slave + * @id: MIPI device ID + * @dev: Linux device + * @status: Status reported by the Slave + * @bus: Bus handle + * @ops: Slave callback ops + * @prop: Slave properties + * @node: node for bus list + * @port_ready: Port ready completion flag for each Slave port + * @dev_num: Device Number assigned by Bus + */ +struct sdw_slave { + struct sdw_slave_id id; + struct device dev; + enum sdw_slave_status status; + struct sdw_bus *bus; + const struct sdw_slave_ops *ops; + struct sdw_slave_prop prop; + struct list_head node; + struct completion *port_ready; + u16 dev_num; +}; + +#define dev_to_sdw_dev(_dev) container_of(_dev, struct sdw_slave, dev) + +struct sdw_driver { + const char *name; + + int (*probe)(struct sdw_slave *sdw, + const struct sdw_device_id *id); + int (*remove)(struct sdw_slave *sdw); + void (*shutdown)(struct sdw_slave *sdw); + + const struct sdw_device_id *id_table; + const struct sdw_slave_ops *ops; + + struct device_driver driver; +}; + +#define SDW_SLAVE_ENTRY(_mfg_id, _part_id, _drv_data) \ + { .mfg_id = (_mfg_id), .part_id = (_part_id), \ + .driver_data = (unsigned long)(_drv_data) } + +int sdw_handle_slave_status(struct sdw_bus *bus, + enum sdw_slave_status status[]); + +/* + * SDW master structures and APIs + */ + +/** + * struct sdw_port_params: Data Port parameters + * + * @num: Port number + * @bps: Word length of the Port + * @flow_mode: Port Data flow mode + * @data_mode: Test modes or normal mode + * + * This is used to program the Data Port based on Data Port stream + * parameters. + */ +struct sdw_port_params { + unsigned int num; + unsigned int bps; + unsigned int flow_mode; + unsigned int data_mode; +}; + +/** + * struct sdw_transport_params: Data Port Transport Parameters + * + * @blk_grp_ctrl_valid: Port implements block group control + * @num: Port number + * @blk_grp_ctrl: Block group control value + * @sample_interval: Sample interval + * @offset1: Blockoffset of the payload data + * @offset2: Blockoffset of the payload data + * @hstart: Horizontal start of the payload data + * @hstop: Horizontal stop of the payload data + * @blk_pkg_mode: Block per channel or block per port + * @lane_ctrl: Data lane Port uses for Data transfer. Currently only single + * data lane is supported in bus + * + * This is used to program the Data Port based on Data Port transport + * parameters. All these parameters are banked and can be modified + * during a bank switch without any artifacts in audio stream. + */ +struct sdw_transport_params { + bool blk_grp_ctrl_valid; + unsigned int port_num; + unsigned int blk_grp_ctrl; + unsigned int sample_interval; + unsigned int offset1; + unsigned int offset2; + unsigned int hstart; + unsigned int hstop; + unsigned int blk_pkg_mode; + unsigned int lane_ctrl; +}; + +/** + * struct sdw_enable_ch: Enable/disable Data Port channel + * + * @num: Port number + * @ch_mask: Active channel mask + * @enable: Enable (true) /disable (false) channel + */ +struct sdw_enable_ch { + unsigned int port_num; + unsigned int ch_mask; + bool enable; +}; + +/** + * struct sdw_master_port_ops: Callback functions from bus to Master + * driver to set Master Data ports. + * + * @dpn_set_port_params: Set the Port parameters for the Master Port. + * Mandatory callback + * @dpn_set_port_transport_params: Set transport parameters for the Master + * Port. Mandatory callback + * @dpn_port_prep: Port prepare operations for the Master Data Port. + * @dpn_port_enable_ch: Enable the channels of Master Port. + */ +struct sdw_master_port_ops { + int (*dpn_set_port_params)(struct sdw_bus *bus, + struct sdw_port_params *port_params, + unsigned int bank); + int (*dpn_set_port_transport_params)(struct sdw_bus *bus, + struct sdw_transport_params *transport_params, + enum sdw_reg_bank bank); + int (*dpn_port_prep)(struct sdw_bus *bus, + struct sdw_prepare_ch *prepare_ch); + int (*dpn_port_enable_ch)(struct sdw_bus *bus, + struct sdw_enable_ch *enable_ch, unsigned int bank); +}; + +struct sdw_msg; + +/** + * struct sdw_defer - SDW deffered message + * @length: message length + * @complete: message completion + * @msg: SDW message + */ +struct sdw_defer { + int length; + struct completion complete; + struct sdw_msg *msg; +}; + +/** + * struct sdw_master_ops - Master driver ops + * @read_prop: Read Master properties + * @xfer_msg: Transfer message callback + * @xfer_msg_defer: Defer version of transfer message callback + * @reset_page_addr: Reset the SCP page address registers + * @set_bus_conf: Set the bus configuration + * @pre_bank_switch: Callback for pre bank switch + * @post_bank_switch: Callback for post bank switch + */ +struct sdw_master_ops { + int (*read_prop)(struct sdw_bus *bus); + + enum sdw_command_response (*xfer_msg) + (struct sdw_bus *bus, struct sdw_msg *msg); + enum sdw_command_response (*xfer_msg_defer) + (struct sdw_bus *bus, struct sdw_msg *msg, + struct sdw_defer *defer); + enum sdw_command_response (*reset_page_addr) + (struct sdw_bus *bus, unsigned int dev_num); + int (*set_bus_conf)(struct sdw_bus *bus, + struct sdw_bus_params *params); + int (*pre_bank_switch)(struct sdw_bus *bus); + int (*post_bank_switch)(struct sdw_bus *bus); + +}; + +/** + * struct sdw_bus - SoundWire bus + * @dev: Master linux device + * @link_id: Link id number, can be 0 to N, unique for each Master + * @slaves: list of Slaves on this bus + * @assigned: Bitmap for Slave device numbers. + * Bit set implies used number, bit clear implies unused number. + * @bus_lock: bus lock + * @msg_lock: message lock + * @ops: Master callback ops + * @port_ops: Master port callback ops + * @params: Current bus parameters + * @prop: Master properties + * @m_rt_list: List of Master instance of all stream(s) running on Bus. This + * is used to compute and program bus bandwidth, clock, frame shape, + * transport and port parameters + * @defer_msg: Defer message + * @clk_stop_timeout: Clock stop timeout computed + * @bank_switch_timeout: Bank switch timeout computed + */ +struct sdw_bus { + struct device *dev; + unsigned int link_id; + struct list_head slaves; + DECLARE_BITMAP(assigned, SDW_MAX_DEVICES); + struct mutex bus_lock; + struct mutex msg_lock; + const struct sdw_master_ops *ops; + const struct sdw_master_port_ops *port_ops; + struct sdw_bus_params params; + struct sdw_master_prop prop; + struct list_head m_rt_list; + struct sdw_defer defer_msg; + unsigned int clk_stop_timeout; + u32 bank_switch_timeout; +}; + +int sdw_add_bus_master(struct sdw_bus *bus); +void sdw_delete_bus_master(struct sdw_bus *bus); + +/** + * sdw_port_config: Master or Slave Port configuration + * + * @num: Port number + * @ch_mask: channels mask for port + */ +struct sdw_port_config { + unsigned int num; + unsigned int ch_mask; +}; + +/** + * sdw_stream_config: Master or Slave stream configuration + * + * @frame_rate: Audio frame rate of the stream, in Hz + * @ch_count: Channel count of the stream + * @bps: Number of bits per audio sample + * @direction: Data direction + * @type: Stream type PCM or PDM + */ +struct sdw_stream_config { + unsigned int frame_rate; + unsigned int ch_count; + unsigned int bps; + enum sdw_data_direction direction; + enum sdw_stream_type type; +}; + +/** + * sdw_stream_state: Stream states + * + * @SDW_STREAM_ALLOCATED: New stream allocated. + * @SDW_STREAM_CONFIGURED: Stream configured + * @SDW_STREAM_PREPARED: Stream prepared + * @SDW_STREAM_ENABLED: Stream enabled + * @SDW_STREAM_DISABLED: Stream disabled + * @SDW_STREAM_DEPREPARED: Stream de-prepared + * @SDW_STREAM_RELEASED: Stream released + */ +enum sdw_stream_state { + SDW_STREAM_ALLOCATED = 0, + SDW_STREAM_CONFIGURED = 1, + SDW_STREAM_PREPARED = 2, + SDW_STREAM_ENABLED = 3, + SDW_STREAM_DISABLED = 4, + SDW_STREAM_DEPREPARED = 5, + SDW_STREAM_RELEASED = 6, +}; + +/** + * sdw_stream_params: Stream parameters + * + * @rate: Sampling frequency, in Hz + * @ch_count: Number of channels + * @bps: bits per channel sample + */ +struct sdw_stream_params { + unsigned int rate; + unsigned int ch_count; + unsigned int bps; +}; + +/** + * sdw_stream_runtime: Runtime stream parameters + * + * @name: SoundWire stream name + * @params: Stream parameters + * @state: Current state of the stream + * @type: Stream type PCM or PDM + * @m_rt: Master runtime + */ +struct sdw_stream_runtime { + char *name; + struct sdw_stream_params params; + enum sdw_stream_state state; + enum sdw_stream_type type; + struct sdw_master_runtime *m_rt; +}; + +struct sdw_stream_runtime *sdw_alloc_stream(char *stream_name); +void sdw_release_stream(struct sdw_stream_runtime *stream); +int sdw_stream_add_master(struct sdw_bus *bus, + struct sdw_stream_config *stream_config, + struct sdw_port_config *port_config, + unsigned int num_ports, + struct sdw_stream_runtime *stream); +int sdw_stream_add_slave(struct sdw_slave *slave, + struct sdw_stream_config *stream_config, + struct sdw_port_config *port_config, + unsigned int num_ports, + struct sdw_stream_runtime *stream); +int sdw_stream_remove_master(struct sdw_bus *bus, + struct sdw_stream_runtime *stream); +int sdw_stream_remove_slave(struct sdw_slave *slave, + struct sdw_stream_runtime *stream); +int sdw_prepare_stream(struct sdw_stream_runtime *stream); +int sdw_enable_stream(struct sdw_stream_runtime *stream); +int sdw_disable_stream(struct sdw_stream_runtime *stream); +int sdw_deprepare_stream(struct sdw_stream_runtime *stream); + +/* messaging and data APIs */ + +int sdw_read(struct sdw_slave *slave, u32 addr); +int sdw_write(struct sdw_slave *slave, u32 addr, u8 value); +int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val); +int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, u8 *val); + +#endif /* __SOUNDWIRE_H */ diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h new file mode 100644 index 000000000..2b9573b8a --- /dev/null +++ b/include/linux/soundwire/sdw_intel.h @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) +// Copyright(c) 2015-17 Intel Corporation. + +#ifndef __SDW_INTEL_H +#define __SDW_INTEL_H + +/** + * struct sdw_intel_ops: Intel audio driver callback ops + * + * @config_stream: configure the stream with the hw_params + */ +struct sdw_intel_ops { + int (*config_stream)(void *arg, void *substream, + void *dai, void *hw_params, int stream_num); +}; + +/** + * struct sdw_intel_res - Soundwire Intel resource structure + * @mmio_base: mmio base of SoundWire registers + * @irq: interrupt number + * @handle: ACPI parent handle + * @parent: parent device + * @ops: callback ops + * @arg: callback arg + */ +struct sdw_intel_res { + void __iomem *mmio_base; + int irq; + acpi_handle handle; + struct device *parent; + const struct sdw_intel_ops *ops; + void *arg; +}; + +void *sdw_intel_init(acpi_handle *parent_handle, struct sdw_intel_res *res); +void sdw_intel_exit(void *arg); + +#endif diff --git a/include/linux/soundwire/sdw_registers.h b/include/linux/soundwire/sdw_registers.h new file mode 100644 index 000000000..df472b1ab --- /dev/null +++ b/include/linux/soundwire/sdw_registers.h @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) +// Copyright(c) 2015-17 Intel Corporation. + +#ifndef __SDW_REGISTERS_H +#define __SDW_REGISTERS_H + +/* + * typically we define register and shifts but if one observes carefully, + * the shift can be generated from MASKS using few bit primitaives like ffs + * etc, so we use that and avoid defining shifts + */ +#define SDW_REG_SHIFT(n) (ffs(n) - 1) + +/* + * SDW registers as defined by MIPI 1.1 Spec + */ +#define SDW_REGADDR GENMASK(14, 0) +#define SDW_SCP_ADDRPAGE2_MASK GENMASK(22, 15) +#define SDW_SCP_ADDRPAGE1_MASK GENMASK(30, 23) + +#define SDW_REG_NO_PAGE 0x00008000 +#define SDW_REG_OPTIONAL_PAGE 0x00010000 +#define SDW_REG_MAX 0x80000000 + +#define SDW_DPN_SIZE 0x100 +#define SDW_BANK1_OFFSET 0x10 + +/* + * DP0 Interrupt register & bits + * + * Spec treats Status (RO) and Clear (WC) as separate but they are same + * address, so treat as same register with WC. + */ + +/* both INT and STATUS register are same */ +#define SDW_DP0_INT 0x0 +#define SDW_DP0_INTMASK 0x1 +#define SDW_DP0_PORTCTRL 0x2 +#define SDW_DP0_BLOCKCTRL1 0x3 +#define SDW_DP0_PREPARESTATUS 0x4 +#define SDW_DP0_PREPARECTRL 0x5 + +#define SDW_DP0_INT_TEST_FAIL BIT(0) +#define SDW_DP0_INT_PORT_READY BIT(1) +#define SDW_DP0_INT_BRA_FAILURE BIT(2) +#define SDW_DP0_INT_IMPDEF1 BIT(5) +#define SDW_DP0_INT_IMPDEF2 BIT(6) +#define SDW_DP0_INT_IMPDEF3 BIT(7) + +#define SDW_DP0_PORTCTRL_DATAMODE GENMASK(3, 2) +#define SDW_DP0_PORTCTRL_NXTINVBANK BIT(4) +#define SDW_DP0_PORTCTRL_BPT_PAYLD GENMASK(7, 6) + +#define SDW_DP0_CHANNELEN 0x20 +#define SDW_DP0_SAMPLECTRL1 0x22 +#define SDW_DP0_SAMPLECTRL2 0x23 +#define SDW_DP0_OFFSETCTRL1 0x24 +#define SDW_DP0_OFFSETCTRL2 0x25 +#define SDW_DP0_HCTRL 0x26 +#define SDW_DP0_LANECTRL 0x28 + +/* Both INT and STATUS register are same */ +#define SDW_SCP_INT1 0x40 +#define SDW_SCP_INTMASK1 0x41 + +#define SDW_SCP_INT1_PARITY BIT(0) +#define SDW_SCP_INT1_BUS_CLASH BIT(1) +#define SDW_SCP_INT1_IMPL_DEF BIT(2) +#define SDW_SCP_INT1_SCP2_CASCADE BIT(7) +#define SDW_SCP_INT1_PORT0_3 GENMASK(6, 3) + +#define SDW_SCP_INTSTAT2 0x42 +#define SDW_SCP_INTSTAT2_SCP3_CASCADE BIT(7) +#define SDW_SCP_INTSTAT2_PORT4_10 GENMASK(6, 0) + + +#define SDW_SCP_INTSTAT3 0x43 +#define SDW_SCP_INTSTAT3_PORT11_14 GENMASK(3, 0) + +/* Number of interrupt status registers */ +#define SDW_NUM_INT_STAT_REGISTERS 3 + +/* Number of interrupt clear registers */ +#define SDW_NUM_INT_CLEAR_REGISTERS 1 + +#define SDW_SCP_CTRL 0x44 +#define SDW_SCP_CTRL_CLK_STP_NOW BIT(1) +#define SDW_SCP_CTRL_FORCE_RESET BIT(7) + +#define SDW_SCP_STAT 0x44 +#define SDW_SCP_STAT_CLK_STP_NF BIT(0) +#define SDW_SCP_STAT_HPHY_NOK BIT(5) +#define SDW_SCP_STAT_CURR_BANK BIT(6) + +#define SDW_SCP_SYSTEMCTRL 0x45 +#define SDW_SCP_SYSTEMCTRL_CLK_STP_PREP BIT(0) +#define SDW_SCP_SYSTEMCTRL_CLK_STP_MODE BIT(2) +#define SDW_SCP_SYSTEMCTRL_WAKE_UP_EN BIT(3) +#define SDW_SCP_SYSTEMCTRL_HIGH_PHY BIT(4) + +#define SDW_SCP_SYSTEMCTRL_CLK_STP_MODE0 0 +#define SDW_SCP_SYSTEMCTRL_CLK_STP_MODE1 BIT(2) + +#define SDW_SCP_DEVNUMBER 0x46 +#define SDW_SCP_HIGH_PHY_CHECK 0x47 +#define SDW_SCP_ADDRPAGE1 0x48 +#define SDW_SCP_ADDRPAGE2 0x49 +#define SDW_SCP_KEEPEREN 0x4A +#define SDW_SCP_BANKDELAY 0x4B +#define SDW_SCP_TESTMODE 0x4F +#define SDW_SCP_DEVID_0 0x50 +#define SDW_SCP_DEVID_1 0x51 +#define SDW_SCP_DEVID_2 0x52 +#define SDW_SCP_DEVID_3 0x53 +#define SDW_SCP_DEVID_4 0x54 +#define SDW_SCP_DEVID_5 0x55 + +/* Banked Registers */ +#define SDW_SCP_FRAMECTRL_B0 0x60 +#define SDW_SCP_FRAMECTRL_B1 (0x60 + SDW_BANK1_OFFSET) +#define SDW_SCP_NEXTFRAME_B0 0x61 +#define SDW_SCP_NEXTFRAME_B1 (0x61 + SDW_BANK1_OFFSET) + +/* Both INT and STATUS register is same */ +#define SDW_DPN_INT(n) (0x0 + SDW_DPN_SIZE * (n)) +#define SDW_DPN_INTMASK(n) (0x1 + SDW_DPN_SIZE * (n)) +#define SDW_DPN_PORTCTRL(n) (0x2 + SDW_DPN_SIZE * (n)) +#define SDW_DPN_BLOCKCTRL1(n) (0x3 + SDW_DPN_SIZE * (n)) +#define SDW_DPN_PREPARESTATUS(n) (0x4 + SDW_DPN_SIZE * (n)) +#define SDW_DPN_PREPARECTRL(n) (0x5 + SDW_DPN_SIZE * (n)) + +#define SDW_DPN_INT_TEST_FAIL BIT(0) +#define SDW_DPN_INT_PORT_READY BIT(1) +#define SDW_DPN_INT_IMPDEF1 BIT(5) +#define SDW_DPN_INT_IMPDEF2 BIT(6) +#define SDW_DPN_INT_IMPDEF3 BIT(7) + +#define SDW_DPN_PORTCTRL_FLOWMODE GENMASK(1, 0) +#define SDW_DPN_PORTCTRL_DATAMODE GENMASK(3, 2) +#define SDW_DPN_PORTCTRL_NXTINVBANK BIT(4) + +#define SDW_DPN_BLOCKCTRL1_WDLEN GENMASK(5, 0) + +#define SDW_DPN_PREPARECTRL_CH_PREP GENMASK(7, 0) + +#define SDW_DPN_CHANNELEN_B0(n) (0x20 + SDW_DPN_SIZE * (n)) +#define SDW_DPN_CHANNELEN_B1(n) (0x30 + SDW_DPN_SIZE * (n)) + +#define SDW_DPN_BLOCKCTRL2_B0(n) (0x21 + SDW_DPN_SIZE * (n)) +#define SDW_DPN_BLOCKCTRL2_B1(n) (0x31 + SDW_DPN_SIZE * (n)) + +#define SDW_DPN_SAMPLECTRL1_B0(n) (0x22 + SDW_DPN_SIZE * (n)) +#define SDW_DPN_SAMPLECTRL1_B1(n) (0x32 + SDW_DPN_SIZE * (n)) + +#define SDW_DPN_SAMPLECTRL2_B0(n) (0x23 + SDW_DPN_SIZE * (n)) +#define SDW_DPN_SAMPLECTRL2_B1(n) (0x33 + SDW_DPN_SIZE * (n)) + +#define SDW_DPN_OFFSETCTRL1_B0(n) (0x24 + SDW_DPN_SIZE * (n)) +#define SDW_DPN_OFFSETCTRL1_B1(n) (0x34 + SDW_DPN_SIZE * (n)) + +#define SDW_DPN_OFFSETCTRL2_B0(n) (0x25 + SDW_DPN_SIZE * (n)) +#define SDW_DPN_OFFSETCTRL2_B1(n) (0x35 + SDW_DPN_SIZE * (n)) + +#define SDW_DPN_HCTRL_B0(n) (0x26 + SDW_DPN_SIZE * (n)) +#define SDW_DPN_HCTRL_B1(n) (0x36 + SDW_DPN_SIZE * (n)) + +#define SDW_DPN_BLOCKCTRL3_B0(n) (0x27 + SDW_DPN_SIZE * (n)) +#define SDW_DPN_BLOCKCTRL3_B1(n) (0x37 + SDW_DPN_SIZE * (n)) + +#define SDW_DPN_LANECTRL_B0(n) (0x28 + SDW_DPN_SIZE * (n)) +#define SDW_DPN_LANECTRL_B1(n) (0x38 + SDW_DPN_SIZE * (n)) + +#define SDW_DPN_SAMPLECTRL_LOW GENMASK(7, 0) +#define SDW_DPN_SAMPLECTRL_HIGH GENMASK(15, 8) + +#define SDW_DPN_HCTRL_HSTART GENMASK(7, 4) +#define SDW_DPN_HCTRL_HSTOP GENMASK(3, 0) + +#define SDW_NUM_CASC_PORT_INTSTAT1 4 +#define SDW_CASC_PORT_START_INTSTAT1 0 +#define SDW_CASC_PORT_MASK_INTSTAT1 0x8 +#define SDW_CASC_PORT_REG_OFFSET_INTSTAT1 0x0 + +#define SDW_NUM_CASC_PORT_INTSTAT2 7 +#define SDW_CASC_PORT_START_INTSTAT2 4 +#define SDW_CASC_PORT_MASK_INTSTAT2 1 +#define SDW_CASC_PORT_REG_OFFSET_INTSTAT2 1 + +#define SDW_NUM_CASC_PORT_INTSTAT3 4 +#define SDW_CASC_PORT_START_INTSTAT3 11 +#define SDW_CASC_PORT_MASK_INTSTAT3 1 +#define SDW_CASC_PORT_REG_OFFSET_INTSTAT3 2 + +#endif /* __SDW_REGISTERS_H */ diff --git a/include/linux/soundwire/sdw_type.h b/include/linux/soundwire/sdw_type.h new file mode 100644 index 000000000..9fd553e55 --- /dev/null +++ b/include/linux/soundwire/sdw_type.h @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright(c) 2015-17 Intel Corporation. + +#ifndef __SOUNDWIRE_TYPES_H +#define __SOUNDWIRE_TYPES_H + +extern struct bus_type sdw_bus_type; + +#define drv_to_sdw_driver(_drv) container_of(_drv, struct sdw_driver, driver) + +#define sdw_register_driver(drv) \ + __sdw_register_driver(drv, THIS_MODULE) + +int __sdw_register_driver(struct sdw_driver *drv, struct module *); +void sdw_unregister_driver(struct sdw_driver *drv); + +int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size); + +#endif /* __SOUNDWIRE_TYPES_H */ diff --git a/include/linux/spi/ad7877.h b/include/linux/spi/ad7877.h new file mode 100644 index 000000000..b7be843c8 --- /dev/null +++ b/include/linux/spi/ad7877.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* linux/spi/ad7877.h */ + +/* Touchscreen characteristics vary between boards and models. The + * platform_data for the device's "struct device" holds this information. + * + * It's OK if the min/max values are zero. + */ +struct ad7877_platform_data { + u16 model; /* 7877 */ + u16 vref_delay_usecs; /* 0 for external vref; etc */ + u16 x_plate_ohms; + u16 y_plate_ohms; + + u16 x_min, x_max; + u16 y_min, y_max; + u16 pressure_min, pressure_max; + + u8 stopacq_polarity; /* 1 = Active HIGH, 0 = Active LOW */ + u8 first_conversion_delay; /* 0 = 0.5us, 1 = 128us, 2 = 1ms, 3 = 8ms */ + u8 acquisition_time; /* 0 = 2us, 1 = 4us, 2 = 8us, 3 = 16us */ + u8 averaging; /* 0 = 1, 1 = 4, 2 = 8, 3 = 16 */ + u8 pen_down_acc_interval; /* 0 = covert once, 1 = every 0.5 ms, + 2 = ever 1 ms, 3 = every 8 ms,*/ +}; diff --git a/include/linux/spi/ads7846.h b/include/linux/spi/ads7846.h new file mode 100644 index 000000000..1a5eaef3b --- /dev/null +++ b/include/linux/spi/ads7846.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* linux/spi/ads7846.h */ + +/* Touchscreen characteristics vary between boards and models. The + * platform_data for the device's "struct device" holds this information. + * + * It's OK if the min/max values are zero. + */ +enum ads7846_filter { + ADS7846_FILTER_OK, + ADS7846_FILTER_REPEAT, + ADS7846_FILTER_IGNORE, +}; + +struct ads7846_platform_data { + u16 model; /* 7843, 7845, 7846, 7873. */ + u16 vref_delay_usecs; /* 0 for external vref; etc */ + u16 vref_mv; /* external vref value, milliVolts + * ads7846: if 0, use internal vref */ + bool keep_vref_on; /* set to keep vref on for differential + * measurements as well */ + bool swap_xy; /* swap x and y axes */ + + /* Settling time of the analog signals; a function of Vcc and the + * capacitance on the X/Y drivers. If set to non-zero, two samples + * are taken with settle_delay us apart, and the second one is used. + * ~150 uSec with 0.01uF caps. + */ + u16 settle_delay_usecs; + + /* If set to non-zero, after samples are taken this delay is applied + * and penirq is rechecked, to help avoid false events. This value + * is affected by the material used to build the touch layer. + */ + u16 penirq_recheck_delay_usecs; + + u16 x_plate_ohms; + u16 y_plate_ohms; + + u16 x_min, x_max; + u16 y_min, y_max; + u16 pressure_min, pressure_max; + + u16 debounce_max; /* max number of additional readings + * per sample */ + u16 debounce_tol; /* tolerance used for filtering */ + u16 debounce_rep; /* additional consecutive good readings + * required after the first two */ + int gpio_pendown; /* the GPIO used to decide the pendown + * state if get_pendown_state == NULL */ + int gpio_pendown_debounce; /* platform specific debounce time for + * the gpio_pendown */ + int (*get_pendown_state)(void); + int (*filter_init) (const struct ads7846_platform_data *pdata, + void **filter_data); + int (*filter) (void *filter_data, int data_idx, int *val); + void (*filter_cleanup)(void *filter_data); + void (*wait_for_sync)(void); + bool wakeup; + unsigned long irq_flags; +}; + diff --git a/include/linux/spi/at73c213.h b/include/linux/spi/at73c213.h new file mode 100644 index 000000000..cbca6654a --- /dev/null +++ b/include/linux/spi/at73c213.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Board-specific data used to set up AT73c213 audio DAC driver. + */ + +#ifndef __LINUX_SPI_AT73C213_H +#define __LINUX_SPI_AT73C213_H + +/** + * at73c213_board_info - how the external DAC is wired to the device. + * + * @ssc_id: SSC platform_driver id the DAC shall use to stream the audio. + * @dac_clk: the external clock used to provide master clock to the DAC. + * @shortname: a short discription for the DAC, seen by userspace tools. + * + * This struct contains the configuration of the hardware connection to the + * external DAC. The DAC needs a master clock and a I2S audio stream. It also + * provides a name which is used to identify it in userspace tools. + */ +struct at73c213_board_info { + int ssc_id; + struct clk *dac_clk; + char shortname[32]; +}; + +#endif /* __LINUX_SPI_AT73C213_H */ diff --git a/include/linux/spi/at86rf230.h b/include/linux/spi/at86rf230.h new file mode 100644 index 000000000..b63fe6f5f --- /dev/null +++ b/include/linux/spi/at86rf230.h @@ -0,0 +1,28 @@ +/* + * AT86RF230/RF231 driver + * + * Copyright (C) 2009-2012 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Written by: + * Dmitry Eremin-Solenikov + */ +#ifndef AT86RF230_H +#define AT86RF230_H + +struct at86rf230_platform_data { + int rstn; + int slp_tr; + int dig2; + u8 xtal_trim; +}; + +#endif diff --git a/include/linux/spi/cc2520.h b/include/linux/spi/cc2520.h new file mode 100644 index 000000000..85b8ee67e --- /dev/null +++ b/include/linux/spi/cc2520.h @@ -0,0 +1,26 @@ +/* Header file for cc2520 radio driver + * + * Copyright (C) 2014 Varka Bhadram + * Md.Jamal Mohiuddin + * P Sowjanya + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#ifndef __CC2520_H +#define __CC2520_H + +struct cc2520_platform_data { + int fifo; + int fifop; + int cca; + int sfd; + int reset; + int vreg; +}; + +#endif diff --git a/include/linux/spi/corgi_lcd.h b/include/linux/spi/corgi_lcd.h new file mode 100644 index 000000000..edf4beccd --- /dev/null +++ b/include/linux/spi/corgi_lcd.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_SPI_CORGI_LCD_H +#define __LINUX_SPI_CORGI_LCD_H + +#define CORGI_LCD_MODE_QVGA 1 +#define CORGI_LCD_MODE_VGA 2 + +struct corgi_lcd_platform_data { + int init_mode; + int max_intensity; + int default_intensity; + int limit_mask; + + int gpio_backlight_on; /* -1 if n/a */ + int gpio_backlight_cont; /* -1 if n/a */ + + void (*notify)(int intensity); + void (*kick_battery)(void); +}; + +#endif /* __LINUX_SPI_CORGI_LCD_H */ diff --git a/include/linux/spi/ds1305.h b/include/linux/spi/ds1305.h new file mode 100644 index 000000000..82db6cd15 --- /dev/null +++ b/include/linux/spi/ds1305.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_SPI_DS1305_H +#define __LINUX_SPI_DS1305_H + +/* + * One-time configuration for ds1305 and ds1306 RTC chips. + * + * Put a pointer to this in spi_board_info.platform_data if you want to + * be sure that Linux (re)initializes this as needed ... after losing + * backup power, and potentially on the first boot. + */ +struct ds1305_platform_data { + + /* Trickle charge configuration: it's OK to leave out the MAGIC + * bitmask; mask in either DS1 or DS2, and then one of 2K/4k/8K. + */ +#define DS1305_TRICKLE_MAGIC 0xa0 +#define DS1305_TRICKLE_DS2 0x08 /* two diodes */ +#define DS1305_TRICKLE_DS1 0x04 /* one diode */ +#define DS1305_TRICKLE_2K 0x01 /* 2 KOhm resistance */ +#define DS1305_TRICKLE_4K 0x02 /* 4 KOhm resistance */ +#define DS1305_TRICKLE_8K 0x03 /* 8 KOhm resistance */ + u8 trickle; + + /* set only on ds1306 parts */ + bool is_ds1306; + + /* ds1306 only: enable 1 Hz output */ + bool en_1hz; + + /* REVISIT: the driver currently expects nINT0 to be wired + * as the alarm IRQ. ALM1 may also need to be set up ... + */ +}; + +#endif /* __LINUX_SPI_DS1305_H */ diff --git a/include/linux/spi/eeprom.h b/include/linux/spi/eeprom.h new file mode 100644 index 000000000..aceccf9c7 --- /dev/null +++ b/include/linux/spi/eeprom.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_SPI_EEPROM_H +#define __LINUX_SPI_EEPROM_H + +#include + +/* + * Put one of these structures in platform_data for SPI EEPROMS handled + * by the "at25" driver. On SPI, most EEPROMS understand the same core + * command set. If you need to support EEPROMs that don't yet fit, add + * flags to support those protocol options. These values all come from + * the chip datasheets. + */ +struct spi_eeprom { + u32 byte_len; + char name[10]; + u16 page_size; /* for writes */ + u16 flags; +#define EE_ADDR1 0x0001 /* 8 bit addrs */ +#define EE_ADDR2 0x0002 /* 16 bit addrs */ +#define EE_ADDR3 0x0004 /* 24 bit addrs */ +#define EE_READONLY 0x0008 /* disallow writes */ + + /* + * Certain EEPROMS have a size that is larger than the number of address + * bytes would allow (e.g. like M95040 from ST that has 512 Byte size + * but uses only one address byte (A0 to A7) for addressing.) For + * the extra address bit (A8, A16 or A24) bit 3 of the instruction byte + * is used. This instruction bit is normally defined as don't care for + * other AT25 like chips. + */ +#define EE_INSTR_BIT3_IS_ADDR 0x0010 + + void *context; +}; + +#endif /* __LINUX_SPI_EEPROM_H */ diff --git a/include/linux/spi/flash.h b/include/linux/spi/flash.h new file mode 100644 index 000000000..2401a0887 --- /dev/null +++ b/include/linux/spi/flash.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_SPI_FLASH_H +#define LINUX_SPI_FLASH_H + +struct mtd_partition; + +/** + * struct flash_platform_data: board-specific flash data + * @name: optional flash device name (eg, as used with mtdparts=) + * @parts: optional array of mtd_partitions for static partitioning + * @nr_parts: number of mtd_partitions for static partitioning + * @type: optional flash device type (e.g. m25p80 vs m25p64), for use + * with chips that can't be queried for JEDEC or other IDs + * + * Board init code (in arch/.../mach-xxx/board-yyy.c files) can + * provide information about SPI flash parts (such as DataFlash) to + * help set up the device and its appropriate default partitioning. + * + * Note that for DataFlash, sizes for pages, blocks, and sectors are + * rarely powers of two; and partitions should be sector-aligned. + */ +struct flash_platform_data { + char *name; + struct mtd_partition *parts; + unsigned int nr_parts; + + char *type; + + /* we'll likely add more ... use JEDEC IDs, etc */ +}; + +#endif diff --git a/include/linux/spi/ifx_modem.h b/include/linux/spi/ifx_modem.h new file mode 100644 index 000000000..694268c78 --- /dev/null +++ b/include/linux/spi/ifx_modem.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_IFX_MODEM_H +#define LINUX_IFX_MODEM_H + +struct ifx_modem_platform_data { + unsigned short rst_out; /* modem reset out */ + unsigned short pwr_on; /* power on */ + unsigned short rst_pmu; /* reset modem */ + unsigned short tx_pwr; /* modem power threshold */ + unsigned short srdy; /* SRDY */ + unsigned short mrdy; /* MRDY */ + unsigned char modem_type; /* Modem type */ + unsigned long max_hz; /* max SPI frequency */ + unsigned short use_dma:1; /* spi protocol driver supplies + dma-able addrs */ +}; +#define IFX_MODEM_6160 1 +#define IFX_MODEM_6260 2 + +#endif diff --git a/include/linux/spi/l4f00242t03.h b/include/linux/spi/l4f00242t03.h new file mode 100644 index 000000000..e69e9b51b --- /dev/null +++ b/include/linux/spi/l4f00242t03.h @@ -0,0 +1,25 @@ +/* + * l4f00242t03.h -- Platform glue for Epson L4F00242T03 LCD + * + * Copyright (c) 2009 Alberto Panizzo + * Based on Marek Vasut work in lms283gf05.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. +*/ + +#ifndef _INCLUDE_LINUX_SPI_L4F00242T03_H_ +#define _INCLUDE_LINUX_SPI_L4F00242T03_H_ + +struct l4f00242t03_pdata { + unsigned int reset_gpio; + unsigned int data_enable_gpio; +}; + +#endif /* _INCLUDE_LINUX_SPI_L4F00242T03_H_ */ diff --git a/include/linux/spi/libertas_spi.h b/include/linux/spi/libertas_spi.h new file mode 100644 index 000000000..1b5d5384f --- /dev/null +++ b/include/linux/spi/libertas_spi.h @@ -0,0 +1,29 @@ +/* + * board-specific data for the libertas_spi driver. + * + * Copyright 2008 Analog Devices Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ +#ifndef _LIBERTAS_SPI_H_ +#define _LIBERTAS_SPI_H_ + +struct spi_device; + +struct libertas_spi_platform_data { + /* There are two ways to read data from the WLAN module's SPI + * interface. Setting 0 or 1 here controls which one is used. + * + * Usually you want to set use_dummy_writes = 1. + * However, if that doesn't work or if you are using a slow SPI clock + * speed, you may want to use 0 here. */ + u16 use_dummy_writes; + + /* Board specific setup/teardown */ + int (*setup)(struct spi_device *spi); + int (*teardown)(struct spi_device *spi); +}; +#endif diff --git a/include/linux/spi/lms283gf05.h b/include/linux/spi/lms283gf05.h new file mode 100644 index 000000000..fdd1d1d51 --- /dev/null +++ b/include/linux/spi/lms283gf05.h @@ -0,0 +1,24 @@ +/* + * lms283gf05.h - Platform glue for Samsung LMS283GF05 LCD + * + * Copyright (C) 2009 Marek Vasut + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. +*/ + +#ifndef _INCLUDE_LINUX_SPI_LMS283GF05_H_ +#define _INCLUDE_LINUX_SPI_LMS283GF05_H_ + +struct lms283gf05_pdata { + unsigned long reset_gpio; + bool reset_inverted; +}; + +#endif /* _INCLUDE_LINUX_SPI_LMS283GF05_H_ */ diff --git a/include/linux/spi/max7301.h b/include/linux/spi/max7301.h new file mode 100644 index 000000000..433c20e2f --- /dev/null +++ b/include/linux/spi/max7301.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_SPI_MAX7301_H +#define LINUX_SPI_MAX7301_H + +#include + +/* + * Some registers must be read back to modify. + * To save time we cache them here in memory + */ +struct max7301 { + struct mutex lock; + u8 port_config[8]; /* field 0 is unused */ + u32 out_level; /* cached output levels */ + u32 input_pullup_active; + struct gpio_chip chip; + struct device *dev; + int (*write)(struct device *dev, unsigned int reg, unsigned int val); + int (*read)(struct device *dev, unsigned int reg); +}; + +struct max7301_platform_data { + /* number assigned to the first GPIO */ + unsigned base; + /* + * bitmask controlling the pullup configuration, + * + * _note_ the 4 lowest bits are unused, because the first 4 + * ports of the controller are not used, too. + */ + u32 input_pullup_active; +}; + +extern int __max730x_remove(struct device *dev); +extern int __max730x_probe(struct max7301 *ts); +#endif diff --git a/include/linux/spi/mc33880.h b/include/linux/spi/mc33880.h new file mode 100644 index 000000000..205a49cb9 --- /dev/null +++ b/include/linux/spi/mc33880.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_SPI_MC33880_H +#define LINUX_SPI_MC33880_H + +struct mc33880_platform_data { + /* number assigned to the first GPIO */ + unsigned base; +}; + +#endif + diff --git a/include/linux/spi/mcp23s08.h b/include/linux/spi/mcp23s08.h new file mode 100644 index 000000000..738a45b43 --- /dev/null +++ b/include/linux/spi/mcp23s08.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +struct mcp23s08_platform_data { + /* For mcp23s08, up to 4 slaves (numbered 0..3) can share one SPI + * chipselect, each providing 1 gpio_chip instance with 8 gpios. + * For mpc23s17, up to 8 slaves (numbered 0..7) can share one SPI + * chipselect, each providing 1 gpio_chip (port A + port B) with + * 16 gpios. + */ + u32 spi_present_mask; + + /* "base" is the number of the first GPIO or -1 for dynamic + * assignment. If there are gaps in chip addressing the GPIO + * numbers are sequential .. so for example if only slaves 0 + * and 3 are present, their GPIOs range from base to base+15 + * (or base+31 for s17 variant). + */ + unsigned base; +}; diff --git a/include/linux/spi/mmc_spi.h b/include/linux/spi/mmc_spi.h new file mode 100644 index 000000000..bfde741a5 --- /dev/null +++ b/include/linux/spi/mmc_spi.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_SPI_MMC_SPI_H +#define __LINUX_SPI_MMC_SPI_H + +#include +#include + +struct device; +struct mmc_host; + +#define MMC_SPI_USE_CD_GPIO (1 << 0) +#define MMC_SPI_USE_RO_GPIO (1 << 1) +#define MMC_SPI_CD_GPIO_ACTIVE_LOW (1 << 2) +#define MMC_SPI_RO_GPIO_ACTIVE_LOW (1 << 3) + +/* Put this in platform_data of a device being used to manage an MMC/SD + * card slot. (Modeled after PXA mmc glue; see that for usage examples.) + * + * REVISIT This is not a spi-specific notion. Any card slot should be + * able to handle it. If the MMC core doesn't adopt this kind of notion, + * switch the "struct device *" parameters over to "struct spi_device *". + */ +struct mmc_spi_platform_data { + /* driver activation and (optional) card detect irq hookup */ + int (*init)(struct device *, + irqreturn_t (*)(int, void *), + void *); + void (*exit)(struct device *, void *); + + /* + * Card Detect and Read Only GPIOs. To enable debouncing on the card + * detect GPIO, set the cd_debounce to the debounce time in + * microseconds. + */ + unsigned int flags; + unsigned int cd_gpio; + unsigned int cd_debounce; + unsigned int ro_gpio; + + /* Capabilities to pass into mmc core (e.g. MMC_CAP_NEEDS_POLL). */ + unsigned long caps; + unsigned long caps2; + + /* how long to debounce card detect, in msecs */ + u16 detect_delay; + + /* power management */ + u16 powerup_msecs; /* delay of up to 250 msec */ + u32 ocr_mask; /* available voltages */ + void (*setpower)(struct device *, unsigned int maskval); +}; + +#ifdef CONFIG_OF +extern struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi); +extern void mmc_spi_put_pdata(struct spi_device *spi); +#else +static inline struct mmc_spi_platform_data * +mmc_spi_get_pdata(struct spi_device *spi) +{ + return spi->dev.platform_data; +} +static inline void mmc_spi_put_pdata(struct spi_device *spi) {} +#endif /* CONFIG_OF */ + +#endif /* __LINUX_SPI_MMC_SPI_H */ diff --git a/include/linux/spi/mxs-spi.h b/include/linux/spi/mxs-spi.h new file mode 100644 index 000000000..381d368b9 --- /dev/null +++ b/include/linux/spi/mxs-spi.h @@ -0,0 +1,144 @@ +/* + * include/linux/spi/mxs-spi.h + * + * Freescale i.MX233/i.MX28 SPI controller register definition + * + * Copyright 2008 Embedded Alley Solutions, Inc. + * Copyright 2009-2011 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_SPI_MXS_SPI_H__ +#define __LINUX_SPI_MXS_SPI_H__ + +#include + +#define ssp_is_old(host) ((host)->devid == IMX23_SSP) + +/* SSP registers */ +#define HW_SSP_CTRL0 0x000 +#define BM_SSP_CTRL0_RUN (1 << 29) +#define BM_SSP_CTRL0_SDIO_IRQ_CHECK (1 << 28) +#define BM_SSP_CTRL0_LOCK_CS (1 << 27) +#define BM_SSP_CTRL0_IGNORE_CRC (1 << 26) +#define BM_SSP_CTRL0_READ (1 << 25) +#define BM_SSP_CTRL0_DATA_XFER (1 << 24) +#define BP_SSP_CTRL0_BUS_WIDTH 22 +#define BM_SSP_CTRL0_BUS_WIDTH (0x3 << 22) +#define BM_SSP_CTRL0_WAIT_FOR_IRQ (1 << 21) +#define BM_SSP_CTRL0_WAIT_FOR_CMD (1 << 20) +#define BM_SSP_CTRL0_LONG_RESP (1 << 19) +#define BM_SSP_CTRL0_GET_RESP (1 << 17) +#define BM_SSP_CTRL0_ENABLE (1 << 16) +#define BP_SSP_CTRL0_XFER_COUNT 0 +#define BM_SSP_CTRL0_XFER_COUNT 0xffff +#define HW_SSP_CMD0 0x010 +#define BM_SSP_CMD0_DBL_DATA_RATE_EN (1 << 25) +#define BM_SSP_CMD0_SLOW_CLKING_EN (1 << 22) +#define BM_SSP_CMD0_CONT_CLKING_EN (1 << 21) +#define BM_SSP_CMD0_APPEND_8CYC (1 << 20) +#define BP_SSP_CMD0_BLOCK_SIZE 16 +#define BM_SSP_CMD0_BLOCK_SIZE (0xf << 16) +#define BP_SSP_CMD0_BLOCK_COUNT 8 +#define BM_SSP_CMD0_BLOCK_COUNT (0xff << 8) +#define BP_SSP_CMD0_CMD 0 +#define BM_SSP_CMD0_CMD 0xff +#define HW_SSP_CMD1 0x020 +#define HW_SSP_XFER_SIZE 0x030 +#define HW_SSP_BLOCK_SIZE 0x040 +#define BP_SSP_BLOCK_SIZE_BLOCK_COUNT 4 +#define BM_SSP_BLOCK_SIZE_BLOCK_COUNT (0xffffff << 4) +#define BP_SSP_BLOCK_SIZE_BLOCK_SIZE 0 +#define BM_SSP_BLOCK_SIZE_BLOCK_SIZE 0xf +#define HW_SSP_TIMING(h) (ssp_is_old(h) ? 0x050 : 0x070) +#define BP_SSP_TIMING_TIMEOUT 16 +#define BM_SSP_TIMING_TIMEOUT (0xffff << 16) +#define BP_SSP_TIMING_CLOCK_DIVIDE 8 +#define BM_SSP_TIMING_CLOCK_DIVIDE (0xff << 8) +#define BF_SSP_TIMING_CLOCK_DIVIDE(v) \ + (((v) << 8) & BM_SSP_TIMING_CLOCK_DIVIDE) +#define BP_SSP_TIMING_CLOCK_RATE 0 +#define BM_SSP_TIMING_CLOCK_RATE 0xff +#define BF_SSP_TIMING_CLOCK_RATE(v) \ + (((v) << 0) & BM_SSP_TIMING_CLOCK_RATE) +#define HW_SSP_CTRL1(h) (ssp_is_old(h) ? 0x060 : 0x080) +#define BM_SSP_CTRL1_SDIO_IRQ (1 << 31) +#define BM_SSP_CTRL1_SDIO_IRQ_EN (1 << 30) +#define BM_SSP_CTRL1_RESP_ERR_IRQ (1 << 29) +#define BM_SSP_CTRL1_RESP_ERR_IRQ_EN (1 << 28) +#define BM_SSP_CTRL1_RESP_TIMEOUT_IRQ (1 << 27) +#define BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN (1 << 26) +#define BM_SSP_CTRL1_DATA_TIMEOUT_IRQ (1 << 25) +#define BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN (1 << 24) +#define BM_SSP_CTRL1_DATA_CRC_IRQ (1 << 23) +#define BM_SSP_CTRL1_DATA_CRC_IRQ_EN (1 << 22) +#define BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ (1 << 21) +#define BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ_EN (1 << 20) +#define BM_SSP_CTRL1_RECV_TIMEOUT_IRQ (1 << 17) +#define BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN (1 << 16) +#define BM_SSP_CTRL1_FIFO_OVERRUN_IRQ (1 << 15) +#define BM_SSP_CTRL1_FIFO_OVERRUN_IRQ_EN (1 << 14) +#define BM_SSP_CTRL1_DMA_ENABLE (1 << 13) +#define BM_SSP_CTRL1_PHASE (1 << 10) +#define BM_SSP_CTRL1_POLARITY (1 << 9) +#define BP_SSP_CTRL1_WORD_LENGTH 4 +#define BM_SSP_CTRL1_WORD_LENGTH (0xf << 4) +#define BF_SSP_CTRL1_WORD_LENGTH(v) \ + (((v) << 4) & BM_SSP_CTRL1_WORD_LENGTH) +#define BV_SSP_CTRL1_WORD_LENGTH__FOUR_BITS 0x3 +#define BV_SSP_CTRL1_WORD_LENGTH__EIGHT_BITS 0x7 +#define BV_SSP_CTRL1_WORD_LENGTH__SIXTEEN_BITS 0xF +#define BP_SSP_CTRL1_SSP_MODE 0 +#define BM_SSP_CTRL1_SSP_MODE 0xf +#define BF_SSP_CTRL1_SSP_MODE(v) \ + (((v) << 0) & BM_SSP_CTRL1_SSP_MODE) +#define BV_SSP_CTRL1_SSP_MODE__SPI 0x0 +#define BV_SSP_CTRL1_SSP_MODE__SSI 0x1 +#define BV_SSP_CTRL1_SSP_MODE__SD_MMC 0x3 +#define BV_SSP_CTRL1_SSP_MODE__MS 0x4 + +#define HW_SSP_DATA(h) (ssp_is_old(h) ? 0x070 : 0x090) + +#define HW_SSP_SDRESP0(h) (ssp_is_old(h) ? 0x080 : 0x0a0) +#define HW_SSP_SDRESP1(h) (ssp_is_old(h) ? 0x090 : 0x0b0) +#define HW_SSP_SDRESP2(h) (ssp_is_old(h) ? 0x0a0 : 0x0c0) +#define HW_SSP_SDRESP3(h) (ssp_is_old(h) ? 0x0b0 : 0x0d0) +#define HW_SSP_STATUS(h) (ssp_is_old(h) ? 0x0c0 : 0x100) +#define BM_SSP_STATUS_CARD_DETECT (1 << 28) +#define BM_SSP_STATUS_SDIO_IRQ (1 << 17) +#define BM_SSP_STATUS_FIFO_EMPTY (1 << 5) + +#define BF_SSP(value, field) (((value) << BP_SSP_##field) & BM_SSP_##field) + +#define SSP_PIO_NUM 3 + +enum mxs_ssp_id { + IMX23_SSP, + IMX28_SSP, +}; + +struct mxs_ssp { + struct device *dev; + void __iomem *base; + struct clk *clk; + unsigned int clk_rate; + enum mxs_ssp_id devid; + + struct dma_chan *dmach; + unsigned int dma_dir; + enum dma_transfer_direction slave_dirn; + u32 ssp_pio_words[SSP_PIO_NUM]; +}; + +void mxs_ssp_set_clk_rate(struct mxs_ssp *ssp, unsigned int rate); + +#endif /* __LINUX_SPI_MXS_SPI_H__ */ diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h new file mode 100644 index 000000000..9ec4c147a --- /dev/null +++ b/include/linux/spi/pxa2xx_spi.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __linux_pxa2xx_spi_h +#define __linux_pxa2xx_spi_h + +#include + +#define PXA2XX_CS_ASSERT (0x01) +#define PXA2XX_CS_DEASSERT (0x02) + +struct dma_chan; + +/* device.platform_data for SSP controller devices */ +struct pxa2xx_spi_master { + u16 num_chipselect; + u8 enable_dma; + + /* DMA engine specific config */ + bool (*dma_filter)(struct dma_chan *chan, void *param); + void *tx_param; + void *rx_param; + + /* For non-PXA arches */ + struct ssp_device ssp; +}; + +/* spi_board_info.controller_data for SPI slave devices, + * copied to spi_device.platform_data ... mostly for dma tuning + */ +struct pxa2xx_spi_chip { + u8 tx_threshold; + u8 tx_hi_threshold; + u8 rx_threshold; + u8 dma_burst_size; + u32 timeout; + u8 enable_loopback; + int gpio_cs; + void (*cs_control)(u32 command); +}; + +#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP) + +#include + +extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info); + +#endif +#endif diff --git a/include/linux/spi/rspi.h b/include/linux/spi/rspi.h new file mode 100644 index 000000000..a693188cc --- /dev/null +++ b/include/linux/spi/rspi.h @@ -0,0 +1,26 @@ +/* + * Renesas SPI driver + * + * Copyright (C) 2012 Renesas Solutions Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_SPI_RENESAS_SPI_H__ +#define __LINUX_SPI_RENESAS_SPI_H__ + +struct rspi_plat_data { + unsigned int dma_tx_id; + unsigned int dma_rx_id; + + u16 num_chipselect; +}; + +#endif diff --git a/include/linux/spi/s3c24xx.h b/include/linux/spi/s3c24xx.h new file mode 100644 index 000000000..ca271c06c --- /dev/null +++ b/include/linux/spi/s3c24xx.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2006 Simtec Electronics + * Ben Dooks + * + * S3C2410 - SPI Controller platform_device info + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#ifndef __LINUX_SPI_S3C24XX_H +#define __LINUX_SPI_S3C24XX_H __FILE__ + +struct s3c2410_spi_info { + int pin_cs; /* simple gpio cs */ + unsigned int num_cs; /* total chipselects */ + int bus_num; /* bus number to use. */ + + unsigned int use_fiq:1; /* use fiq */ + + void (*gpio_setup)(struct s3c2410_spi_info *spi, int enable); + void (*set_cs)(struct s3c2410_spi_info *spi, int cs, int pol); +}; + +extern int s3c24xx_set_fiq(unsigned int irq, bool on); + +#endif /* __LINUX_SPI_S3C24XX_H */ diff --git a/include/linux/spi/sh_hspi.h b/include/linux/spi/sh_hspi.h new file mode 100644 index 000000000..aa0d440ab --- /dev/null +++ b/include/linux/spi/sh_hspi.h @@ -0,0 +1,19 @@ +/* + * Copyright (C) 2011 Kuninori Morimoto + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef SH_HSPI_H +#define SH_HSPI_H + +struct sh_hspi_info { +}; + +#endif diff --git a/include/linux/spi/sh_msiof.h b/include/linux/spi/sh_msiof.h new file mode 100644 index 000000000..dc2a0cbd2 --- /dev/null +++ b/include/linux/spi/sh_msiof.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __SPI_SH_MSIOF_H__ +#define __SPI_SH_MSIOF_H__ + +enum { + MSIOF_SPI_MASTER, + MSIOF_SPI_SLAVE, +}; + +struct sh_msiof_spi_info { + int tx_fifo_override; + int rx_fifo_override; + u16 num_chipselect; + int mode; + unsigned int dma_tx_id; + unsigned int dma_rx_id; + u32 dtdl; + u32 syncdl; +}; + +#endif /* __SPI_SH_MSIOF_H__ */ diff --git a/include/linux/spi/spi-fsl-dspi.h b/include/linux/spi/spi-fsl-dspi.h new file mode 100644 index 000000000..74c9bae20 --- /dev/null +++ b/include/linux/spi/spi-fsl-dspi.h @@ -0,0 +1,31 @@ +/* + * Freescale DSPI controller driver + * + * Copyright (c) 2017 Angelo Dureghello + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef SPI_FSL_DSPI_HEADER_H +#define SPI_FSL_DSPI_HEADER_H + +/** + * struct fsl_dspi_platform_data - platform data for the Freescale DSPI driver + * @bus_num: board specific identifier for this DSPI driver. + * @cs_num: number of chip selects supported by this DSPI driver. + */ +struct fsl_dspi_platform_data { + u32 cs_num; + u32 bus_num; + u32 sck_cs_delay; + u32 cs_sck_delay; +}; + +#endif /* SPI_FSL_DSPI_HEADER_H */ diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h new file mode 100644 index 000000000..69ee30456 --- /dev/null +++ b/include/linux/spi/spi-mem.h @@ -0,0 +1,264 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2018 Exceet Electronics GmbH + * Copyright (C) 2018 Bootlin + * + * Author: + * Peter Pan + * Boris Brezillon + */ + +#ifndef __LINUX_SPI_MEM_H +#define __LINUX_SPI_MEM_H + +#include + +#define SPI_MEM_OP_CMD(__opcode, __buswidth) \ + { \ + .buswidth = __buswidth, \ + .opcode = __opcode, \ + } + +#define SPI_MEM_OP_ADDR(__nbytes, __val, __buswidth) \ + { \ + .nbytes = __nbytes, \ + .val = __val, \ + .buswidth = __buswidth, \ + } + +#define SPI_MEM_OP_NO_ADDR { } + +#define SPI_MEM_OP_DUMMY(__nbytes, __buswidth) \ + { \ + .nbytes = __nbytes, \ + .buswidth = __buswidth, \ + } + +#define SPI_MEM_OP_NO_DUMMY { } + +#define SPI_MEM_OP_DATA_IN(__nbytes, __buf, __buswidth) \ + { \ + .dir = SPI_MEM_DATA_IN, \ + .nbytes = __nbytes, \ + .buf.in = __buf, \ + .buswidth = __buswidth, \ + } + +#define SPI_MEM_OP_DATA_OUT(__nbytes, __buf, __buswidth) \ + { \ + .dir = SPI_MEM_DATA_OUT, \ + .nbytes = __nbytes, \ + .buf.out = __buf, \ + .buswidth = __buswidth, \ + } + +#define SPI_MEM_OP_NO_DATA { } + +/** + * enum spi_mem_data_dir - describes the direction of a SPI memory data + * transfer from the controller perspective + * @SPI_MEM_DATA_IN: data coming from the SPI memory + * @SPI_MEM_DATA_OUT: data sent the SPI memory + */ +enum spi_mem_data_dir { + SPI_MEM_DATA_IN, + SPI_MEM_DATA_OUT, +}; + +/** + * struct spi_mem_op - describes a SPI memory operation + * @cmd.buswidth: number of IO lines used to transmit the command + * @cmd.opcode: operation opcode + * @addr.nbytes: number of address bytes to send. Can be zero if the operation + * does not need to send an address + * @addr.buswidth: number of IO lines used to transmit the address cycles + * @addr.val: address value. This value is always sent MSB first on the bus. + * Note that only @addr.nbytes are taken into account in this + * address value, so users should make sure the value fits in the + * assigned number of bytes. + * @dummy.nbytes: number of dummy bytes to send after an opcode or address. Can + * be zero if the operation does not require dummy bytes + * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes + * @data.buswidth: number of IO lanes used to send/receive the data + * @data.dir: direction of the transfer + * @data.nbytes: number of data bytes to send/receive. Can be zero if the + * operation does not involve transferring data + * @data.buf.in: input buffer (must be DMA-able) + * @data.buf.out: output buffer (must be DMA-able) + */ +struct spi_mem_op { + struct { + u8 buswidth; + u8 opcode; + } cmd; + + struct { + u8 nbytes; + u8 buswidth; + u64 val; + } addr; + + struct { + u8 nbytes; + u8 buswidth; + } dummy; + + struct { + u8 buswidth; + enum spi_mem_data_dir dir; + unsigned int nbytes; + union { + void *in; + const void *out; + } buf; + } data; +}; + +#define SPI_MEM_OP(__cmd, __addr, __dummy, __data) \ + { \ + .cmd = __cmd, \ + .addr = __addr, \ + .dummy = __dummy, \ + .data = __data, \ + } + +/** + * struct spi_mem - describes a SPI memory device + * @spi: the underlying SPI device + * @drvpriv: spi_mem_driver private data + * @name: name of the SPI memory device + * + * Extra information that describe the SPI memory device and may be needed by + * the controller to properly handle this device should be placed here. + * + * One example would be the device size since some controller expose their SPI + * mem devices through a io-mapped region. + */ +struct spi_mem { + struct spi_device *spi; + void *drvpriv; + const char *name; +}; + +/** + * struct spi_mem_set_drvdata() - attach driver private data to a SPI mem + * device + * @mem: memory device + * @data: data to attach to the memory device + */ +static inline void spi_mem_set_drvdata(struct spi_mem *mem, void *data) +{ + mem->drvpriv = data; +} + +/** + * struct spi_mem_get_drvdata() - get driver private data attached to a SPI mem + * device + * @mem: memory device + * + * Return: the data attached to the mem device. + */ +static inline void *spi_mem_get_drvdata(struct spi_mem *mem) +{ + return mem->drvpriv; +} + +/** + * struct spi_controller_mem_ops - SPI memory operations + * @adjust_op_size: shrink the data xfer of an operation to match controller's + * limitations (can be alignment of max RX/TX size + * limitations) + * @supports_op: check if an operation is supported by the controller + * @exec_op: execute a SPI memory operation + * @get_name: get a custom name for the SPI mem device from the controller. + * This might be needed if the controller driver has been ported + * to use the SPI mem layer and a custom name is used to keep + * mtdparts compatible. + * Note that if the implementation of this function allocates memory + * dynamically, then it should do so with devm_xxx(), as we don't + * have a ->free_name() function. + * + * This interface should be implemented by SPI controllers providing an + * high-level interface to execute SPI memory operation, which is usually the + * case for QSPI controllers. + */ +struct spi_controller_mem_ops { + int (*adjust_op_size)(struct spi_mem *mem, struct spi_mem_op *op); + bool (*supports_op)(struct spi_mem *mem, + const struct spi_mem_op *op); + int (*exec_op)(struct spi_mem *mem, + const struct spi_mem_op *op); + const char *(*get_name)(struct spi_mem *mem); +}; + +/** + * struct spi_mem_driver - SPI memory driver + * @spidrv: inherit from a SPI driver + * @probe: probe a SPI memory. Usually where detection/initialization takes + * place + * @remove: remove a SPI memory + * @shutdown: take appropriate action when the system is shutdown + * + * This is just a thin wrapper around a spi_driver. The core takes care of + * allocating the spi_mem object and forwarding the probe/remove/shutdown + * request to the spi_mem_driver. The reason we use this wrapper is because + * we might have to stuff more information into the spi_mem struct to let + * SPI controllers know more about the SPI memory they interact with, and + * having this intermediate layer allows us to do that without adding more + * useless fields to the spi_device object. + */ +struct spi_mem_driver { + struct spi_driver spidrv; + int (*probe)(struct spi_mem *mem); + int (*remove)(struct spi_mem *mem); + void (*shutdown)(struct spi_mem *mem); +}; + +#if IS_ENABLED(CONFIG_SPI_MEM) +int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr, + const struct spi_mem_op *op, + struct sg_table *sg); + +void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr, + const struct spi_mem_op *op, + struct sg_table *sg); +#else +static inline int +spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr, + const struct spi_mem_op *op, + struct sg_table *sg) +{ + return -ENOTSUPP; +} + +static inline void +spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr, + const struct spi_mem_op *op, + struct sg_table *sg) +{ +} +#endif /* CONFIG_SPI_MEM */ + +int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op); + +bool spi_mem_supports_op(struct spi_mem *mem, + const struct spi_mem_op *op); + +int spi_mem_exec_op(struct spi_mem *mem, + const struct spi_mem_op *op); + +const char *spi_mem_get_name(struct spi_mem *mem); + +int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv, + struct module *owner); + +void spi_mem_driver_unregister(struct spi_mem_driver *drv); + +#define spi_mem_driver_register(__drv) \ + spi_mem_driver_register_with_owner(__drv, THIS_MODULE) + +#define module_spi_mem_driver(__drv) \ + module_driver(__drv, spi_mem_driver_register, \ + spi_mem_driver_unregister) + +#endif /* __LINUX_SPI_MEM_H */ diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h new file mode 100644 index 000000000..16158fe09 --- /dev/null +++ b/include/linux/spi/spi.h @@ -0,0 +1,1357 @@ +/* + * Copyright (C) 2005 David Brownell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_SPI_H +#define __LINUX_SPI_H + +#include +#include +#include +#include +#include +#include + +struct dma_chan; +struct property_entry; +struct spi_controller; +struct spi_transfer; +struct spi_controller_mem_ops; + +/* + * INTERFACES between SPI master-side drivers and SPI slave protocol handlers, + * and SPI infrastructure. + */ +extern struct bus_type spi_bus_type; + +/** + * struct spi_statistics - statistics for spi transfers + * @lock: lock protecting this structure + * + * @messages: number of spi-messages handled + * @transfers: number of spi_transfers handled + * @errors: number of errors during spi_transfer + * @timedout: number of timeouts during spi_transfer + * + * @spi_sync: number of times spi_sync is used + * @spi_sync_immediate: + * number of times spi_sync is executed immediately + * in calling context without queuing and scheduling + * @spi_async: number of times spi_async is used + * + * @bytes: number of bytes transferred to/from device + * @bytes_tx: number of bytes sent to device + * @bytes_rx: number of bytes received from device + * + * @transfer_bytes_histo: + * transfer bytes histogramm + * + * @transfers_split_maxsize: + * number of transfers that have been split because of + * maxsize limit + */ +struct spi_statistics { + spinlock_t lock; /* lock for the whole structure */ + + unsigned long messages; + unsigned long transfers; + unsigned long errors; + unsigned long timedout; + + unsigned long spi_sync; + unsigned long spi_sync_immediate; + unsigned long spi_async; + + unsigned long long bytes; + unsigned long long bytes_rx; + unsigned long long bytes_tx; + +#define SPI_STATISTICS_HISTO_SIZE 17 + unsigned long transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE]; + + unsigned long transfers_split_maxsize; +}; + +void spi_statistics_add_transfer_stats(struct spi_statistics *stats, + struct spi_transfer *xfer, + struct spi_controller *ctlr); + +#define SPI_STATISTICS_ADD_TO_FIELD(stats, field, count) \ + do { \ + unsigned long flags; \ + spin_lock_irqsave(&(stats)->lock, flags); \ + (stats)->field += count; \ + spin_unlock_irqrestore(&(stats)->lock, flags); \ + } while (0) + +#define SPI_STATISTICS_INCREMENT_FIELD(stats, field) \ + SPI_STATISTICS_ADD_TO_FIELD(stats, field, 1) + +/** + * struct spi_device - Controller side proxy for an SPI slave device + * @dev: Driver model representation of the device. + * @controller: SPI controller used with the device. + * @master: Copy of controller, for backwards compatibility. + * @max_speed_hz: Maximum clock rate to be used with this chip + * (on this board); may be changed by the device's driver. + * The spi_transfer.speed_hz can override this for each transfer. + * @chip_select: Chipselect, distinguishing chips handled by @controller. + * @mode: The spi mode defines how data is clocked out and in. + * This may be changed by the device's driver. + * The "active low" default for chipselect mode can be overridden + * (by specifying SPI_CS_HIGH) as can the "MSB first" default for + * each word in a transfer (by specifying SPI_LSB_FIRST). + * @bits_per_word: Data transfers involve one or more words; word sizes + * like eight or 12 bits are common. In-memory wordsizes are + * powers of two bytes (e.g. 20 bit samples use 32 bits). + * This may be changed by the device's driver, or left at the + * default (0) indicating protocol words are eight bit bytes. + * The spi_transfer.bits_per_word can override this for each transfer. + * @irq: Negative, or the number passed to request_irq() to receive + * interrupts from this device. + * @controller_state: Controller's runtime state + * @controller_data: Board-specific definitions for controller, such as + * FIFO initialization parameters; from board_info.controller_data + * @modalias: Name of the driver to use with this device, or an alias + * for that name. This appears in the sysfs "modalias" attribute + * for driver coldplugging, and in uevents used for hotplugging + * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when + * not using a GPIO line) + * + * @statistics: statistics for the spi_device + * + * A @spi_device is used to interchange data between an SPI slave + * (usually a discrete chip) and CPU memory. + * + * In @dev, the platform_data is used to hold information about this + * device that's meaningful to the device's protocol driver, but not + * to its controller. One example might be an identifier for a chip + * variant with slightly different functionality; another might be + * information about how this particular board wires the chip's pins. + */ +struct spi_device { + struct device dev; + struct spi_controller *controller; + struct spi_controller *master; /* compatibility layer */ + u32 max_speed_hz; + u8 chip_select; + u8 bits_per_word; + u16 mode; +#define SPI_CPHA 0x01 /* clock phase */ +#define SPI_CPOL 0x02 /* clock polarity */ +#define SPI_MODE_0 (0|0) /* (original MicroWire) */ +#define SPI_MODE_1 (0|SPI_CPHA) +#define SPI_MODE_2 (SPI_CPOL|0) +#define SPI_MODE_3 (SPI_CPOL|SPI_CPHA) +#define SPI_CS_HIGH 0x04 /* chipselect active high? */ +#define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */ +#define SPI_3WIRE 0x10 /* SI/SO signals shared */ +#define SPI_LOOP 0x20 /* loopback mode */ +#define SPI_NO_CS 0x40 /* 1 dev/bus, no chipselect */ +#define SPI_READY 0x80 /* slave pulls low to pause */ +#define SPI_TX_DUAL 0x100 /* transmit with 2 wires */ +#define SPI_TX_QUAD 0x200 /* transmit with 4 wires */ +#define SPI_RX_DUAL 0x400 /* receive with 2 wires */ +#define SPI_RX_QUAD 0x800 /* receive with 4 wires */ + int irq; + void *controller_state; + void *controller_data; + char modalias[SPI_NAME_SIZE]; + int cs_gpio; /* chip select gpio */ + + /* the statistics */ + struct spi_statistics statistics; + + /* + * likely need more hooks for more protocol options affecting how + * the controller talks to each chip, like: + * - memory packing (12 bit samples into low bits, others zeroed) + * - priority + * - drop chipselect after each word + * - chipselect delays + * - ... + */ +}; + +static inline struct spi_device *to_spi_device(struct device *dev) +{ + return dev ? container_of(dev, struct spi_device, dev) : NULL; +} + +/* most drivers won't need to care about device refcounting */ +static inline struct spi_device *spi_dev_get(struct spi_device *spi) +{ + return (spi && get_device(&spi->dev)) ? spi : NULL; +} + +static inline void spi_dev_put(struct spi_device *spi) +{ + if (spi) + put_device(&spi->dev); +} + +/* ctldata is for the bus_controller driver's runtime state */ +static inline void *spi_get_ctldata(struct spi_device *spi) +{ + return spi->controller_state; +} + +static inline void spi_set_ctldata(struct spi_device *spi, void *state) +{ + spi->controller_state = state; +} + +/* device driver data */ + +static inline void spi_set_drvdata(struct spi_device *spi, void *data) +{ + dev_set_drvdata(&spi->dev, data); +} + +static inline void *spi_get_drvdata(struct spi_device *spi) +{ + return dev_get_drvdata(&spi->dev); +} + +struct spi_message; +struct spi_transfer; + +/** + * struct spi_driver - Host side "protocol" driver + * @id_table: List of SPI devices supported by this driver + * @probe: Binds this driver to the spi device. Drivers can verify + * that the device is actually present, and may need to configure + * characteristics (such as bits_per_word) which weren't needed for + * the initial configuration done during system setup. + * @remove: Unbinds this driver from the spi device + * @shutdown: Standard shutdown callback used during system state + * transitions such as powerdown/halt and kexec + * @driver: SPI device drivers should initialize the name and owner + * field of this structure. + * + * This represents the kind of device driver that uses SPI messages to + * interact with the hardware at the other end of a SPI link. It's called + * a "protocol" driver because it works through messages rather than talking + * directly to SPI hardware (which is what the underlying SPI controller + * driver does to pass those messages). These protocols are defined in the + * specification for the device(s) supported by the driver. + * + * As a rule, those device protocols represent the lowest level interface + * supported by a driver, and it will support upper level interfaces too. + * Examples of such upper levels include frameworks like MTD, networking, + * MMC, RTC, filesystem character device nodes, and hardware monitoring. + */ +struct spi_driver { + const struct spi_device_id *id_table; + int (*probe)(struct spi_device *spi); + int (*remove)(struct spi_device *spi); + void (*shutdown)(struct spi_device *spi); + struct device_driver driver; +}; + +static inline struct spi_driver *to_spi_driver(struct device_driver *drv) +{ + return drv ? container_of(drv, struct spi_driver, driver) : NULL; +} + +extern int __spi_register_driver(struct module *owner, struct spi_driver *sdrv); + +/** + * spi_unregister_driver - reverse effect of spi_register_driver + * @sdrv: the driver to unregister + * Context: can sleep + */ +static inline void spi_unregister_driver(struct spi_driver *sdrv) +{ + if (sdrv) + driver_unregister(&sdrv->driver); +} + +/* use a define to avoid include chaining to get THIS_MODULE */ +#define spi_register_driver(driver) \ + __spi_register_driver(THIS_MODULE, driver) + +/** + * module_spi_driver() - Helper macro for registering a SPI driver + * @__spi_driver: spi_driver struct + * + * Helper macro for SPI drivers which do not do anything special in module + * init/exit. This eliminates a lot of boilerplate. Each module may only + * use this macro once, and calling it replaces module_init() and module_exit() + */ +#define module_spi_driver(__spi_driver) \ + module_driver(__spi_driver, spi_register_driver, \ + spi_unregister_driver) + +/** + * struct spi_controller - interface to SPI master or slave controller + * @dev: device interface to this driver + * @list: link with the global spi_controller list + * @bus_num: board-specific (and often SOC-specific) identifier for a + * given SPI controller. + * @num_chipselect: chipselects are used to distinguish individual + * SPI slaves, and are numbered from zero to num_chipselects. + * each slave has a chipselect signal, but it's common that not + * every chipselect is connected to a slave. + * @dma_alignment: SPI controller constraint on DMA buffers alignment. + * @mode_bits: flags understood by this controller driver + * @bits_per_word_mask: A mask indicating which values of bits_per_word are + * supported by the driver. Bit n indicates that a bits_per_word n+1 is + * supported. If set, the SPI core will reject any transfer with an + * unsupported bits_per_word. If not set, this value is simply ignored, + * and it's up to the individual driver to perform any validation. + * @min_speed_hz: Lowest supported transfer speed + * @max_speed_hz: Highest supported transfer speed + * @flags: other constraints relevant to this driver + * @slave: indicates that this is an SPI slave controller + * @max_transfer_size: function that returns the max transfer size for + * a &spi_device; may be %NULL, so the default %SIZE_MAX will be used. + * @max_message_size: function that returns the max message size for + * a &spi_device; may be %NULL, so the default %SIZE_MAX will be used. + * @io_mutex: mutex for physical bus access + * @bus_lock_spinlock: spinlock for SPI bus locking + * @bus_lock_mutex: mutex for exclusion of multiple callers + * @bus_lock_flag: indicates that the SPI bus is locked for exclusive use + * @setup: updates the device mode and clocking records used by a + * device's SPI controller; protocol code may call this. This + * must fail if an unrecognized or unsupported mode is requested. + * It's always safe to call this unless transfers are pending on + * the device whose settings are being modified. + * @transfer: adds a message to the controller's transfer queue. + * @cleanup: frees controller-specific state + * @can_dma: determine whether this controller supports DMA + * @queued: whether this controller is providing an internal message queue + * @kworker: thread struct for message pump + * @kworker_task: pointer to task for message pump kworker thread + * @pump_messages: work struct for scheduling work to the message pump + * @queue_lock: spinlock to syncronise access to message queue + * @queue: message queue + * @idling: the device is entering idle state + * @cur_msg: the currently in-flight message + * @cur_msg_prepared: spi_prepare_message was called for the currently + * in-flight message + * @cur_msg_mapped: message has been mapped for DMA + * @xfer_completion: used by core transfer_one_message() + * @busy: message pump is busy + * @running: message pump is running + * @rt: whether this queue is set to run as a realtime task + * @auto_runtime_pm: the core should ensure a runtime PM reference is held + * while the hardware is prepared, using the parent + * device for the spidev + * @max_dma_len: Maximum length of a DMA transfer for the device. + * @prepare_transfer_hardware: a message will soon arrive from the queue + * so the subsystem requests the driver to prepare the transfer hardware + * by issuing this call + * @transfer_one_message: the subsystem calls the driver to transfer a single + * message while queuing transfers that arrive in the meantime. When the + * driver is finished with this message, it must call + * spi_finalize_current_message() so the subsystem can issue the next + * message + * @unprepare_transfer_hardware: there are currently no more messages on the + * queue so the subsystem notifies the driver that it may relax the + * hardware by issuing this call + * @set_cs: set the logic level of the chip select line. May be called + * from interrupt context. + * @prepare_message: set up the controller to transfer a single message, + * for example doing DMA mapping. Called from threaded + * context. + * @transfer_one: transfer a single spi_transfer. + * - return 0 if the transfer is finished, + * - return 1 if the transfer is still in progress. When + * the driver is finished with this transfer it must + * call spi_finalize_current_transfer() so the subsystem + * can issue the next transfer. Note: transfer_one and + * transfer_one_message are mutually exclusive; when both + * are set, the generic subsystem does not call your + * transfer_one callback. + * @handle_err: the subsystem calls the driver to handle an error that occurs + * in the generic implementation of transfer_one_message(). + * @mem_ops: optimized/dedicated operations for interactions with SPI memory. + * This field is optional and should only be implemented if the + * controller has native support for memory like operations. + * @unprepare_message: undo any work done by prepare_message(). + * @slave_abort: abort the ongoing transfer request on an SPI slave controller + * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS + * number. Any individual value may be -ENOENT for CS lines that + * are not GPIOs (driven by the SPI controller itself). + * @statistics: statistics for the spi_controller + * @dma_tx: DMA transmit channel + * @dma_rx: DMA receive channel + * @dummy_rx: dummy receive buffer for full-duplex devices + * @dummy_tx: dummy transmit buffer for full-duplex devices + * @fw_translate_cs: If the boot firmware uses different numbering scheme + * what Linux expects, this optional hook can be used to translate + * between the two. + * + * Each SPI controller can communicate with one or more @spi_device + * children. These make a small bus, sharing MOSI, MISO and SCK signals + * but not chip select signals. Each device may be configured to use a + * different clock rate, since those shared signals are ignored unless + * the chip is selected. + * + * The driver for an SPI controller manages access to those devices through + * a queue of spi_message transactions, copying data between CPU memory and + * an SPI slave device. For each such message it queues, it calls the + * message's completion function when the transaction completes. + */ +struct spi_controller { + struct device dev; + + struct list_head list; + + /* other than negative (== assign one dynamically), bus_num is fully + * board-specific. usually that simplifies to being SOC-specific. + * example: one SOC has three SPI controllers, numbered 0..2, + * and one board's schematics might show it using SPI-2. software + * would normally use bus_num=2 for that controller. + */ + s16 bus_num; + + /* chipselects will be integral to many controllers; some others + * might use board-specific GPIOs. + */ + u16 num_chipselect; + + /* some SPI controllers pose alignment requirements on DMAable + * buffers; let protocol drivers know about these requirements. + */ + u16 dma_alignment; + + /* spi_device.mode flags understood by this controller driver */ + u16 mode_bits; + + /* bitmask of supported bits_per_word for transfers */ + u32 bits_per_word_mask; +#define SPI_BPW_MASK(bits) BIT((bits) - 1) +#define SPI_BIT_MASK(bits) (((bits) == 32) ? ~0U : (BIT(bits) - 1)) +#define SPI_BPW_RANGE_MASK(min, max) (SPI_BIT_MASK(max) - SPI_BIT_MASK(min - 1)) + + /* limits on transfer speed */ + u32 min_speed_hz; + u32 max_speed_hz; + + /* other constraints relevant to this driver */ + u16 flags; +#define SPI_CONTROLLER_HALF_DUPLEX BIT(0) /* can't do full duplex */ +#define SPI_CONTROLLER_NO_RX BIT(1) /* can't do buffer read */ +#define SPI_CONTROLLER_NO_TX BIT(2) /* can't do buffer write */ +#define SPI_CONTROLLER_MUST_RX BIT(3) /* requires rx */ +#define SPI_CONTROLLER_MUST_TX BIT(4) /* requires tx */ + +#define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */ + + /* flag indicating this is a non-devres managed controller */ + bool devm_allocated; + + /* flag indicating this is an SPI slave controller */ + bool slave; + + /* + * on some hardware transfer / message size may be constrained + * the limit may depend on device transfer settings + */ + size_t (*max_transfer_size)(struct spi_device *spi); + size_t (*max_message_size)(struct spi_device *spi); + + /* I/O mutex */ + struct mutex io_mutex; + + /* lock and mutex for SPI bus locking */ + spinlock_t bus_lock_spinlock; + struct mutex bus_lock_mutex; + + /* flag indicating that the SPI bus is locked for exclusive use */ + bool bus_lock_flag; + + /* Setup mode and clock, etc (spi driver may call many times). + * + * IMPORTANT: this may be called when transfers to another + * device are active. DO NOT UPDATE SHARED REGISTERS in ways + * which could break those transfers. + */ + int (*setup)(struct spi_device *spi); + + /* bidirectional bulk transfers + * + * + The transfer() method may not sleep; its main role is + * just to add the message to the queue. + * + For now there's no remove-from-queue operation, or + * any other request management + * + To a given spi_device, message queueing is pure fifo + * + * + The controller's main job is to process its message queue, + * selecting a chip (for masters), then transferring data + * + If there are multiple spi_device children, the i/o queue + * arbitration algorithm is unspecified (round robin, fifo, + * priority, reservations, preemption, etc) + * + * + Chipselect stays active during the entire message + * (unless modified by spi_transfer.cs_change != 0). + * + The message transfers use clock and SPI mode parameters + * previously established by setup() for this device + */ + int (*transfer)(struct spi_device *spi, + struct spi_message *mesg); + + /* called on release() to free memory provided by spi_controller */ + void (*cleanup)(struct spi_device *spi); + + /* + * Used to enable core support for DMA handling, if can_dma() + * exists and returns true then the transfer will be mapped + * prior to transfer_one() being called. The driver should + * not modify or store xfer and dma_tx and dma_rx must be set + * while the device is prepared. + */ + bool (*can_dma)(struct spi_controller *ctlr, + struct spi_device *spi, + struct spi_transfer *xfer); + + /* + * These hooks are for drivers that want to use the generic + * controller transfer queueing mechanism. If these are used, the + * transfer() function above must NOT be specified by the driver. + * Over time we expect SPI drivers to be phased over to this API. + */ + bool queued; + struct kthread_worker kworker; + struct task_struct *kworker_task; + struct kthread_work pump_messages; + spinlock_t queue_lock; + struct list_head queue; + struct spi_message *cur_msg; + bool idling; + bool busy; + bool running; + bool rt; + bool auto_runtime_pm; + bool cur_msg_prepared; + bool cur_msg_mapped; + struct completion xfer_completion; + size_t max_dma_len; + + int (*prepare_transfer_hardware)(struct spi_controller *ctlr); + int (*transfer_one_message)(struct spi_controller *ctlr, + struct spi_message *mesg); + int (*unprepare_transfer_hardware)(struct spi_controller *ctlr); + int (*prepare_message)(struct spi_controller *ctlr, + struct spi_message *message); + int (*unprepare_message)(struct spi_controller *ctlr, + struct spi_message *message); + int (*slave_abort)(struct spi_controller *ctlr); + + /* + * These hooks are for drivers that use a generic implementation + * of transfer_one_message() provied by the core. + */ + void (*set_cs)(struct spi_device *spi, bool enable); + int (*transfer_one)(struct spi_controller *ctlr, struct spi_device *spi, + struct spi_transfer *transfer); + void (*handle_err)(struct spi_controller *ctlr, + struct spi_message *message); + + /* Optimized handlers for SPI memory-like operations. */ + const struct spi_controller_mem_ops *mem_ops; + + /* gpio chip select */ + int *cs_gpios; + + /* statistics */ + struct spi_statistics statistics; + + /* DMA channels for use with core dmaengine helpers */ + struct dma_chan *dma_tx; + struct dma_chan *dma_rx; + + /* dummy data for full duplex devices */ + void *dummy_rx; + void *dummy_tx; + + int (*fw_translate_cs)(struct spi_controller *ctlr, unsigned cs); +}; + +static inline void *spi_controller_get_devdata(struct spi_controller *ctlr) +{ + return dev_get_drvdata(&ctlr->dev); +} + +static inline void spi_controller_set_devdata(struct spi_controller *ctlr, + void *data) +{ + dev_set_drvdata(&ctlr->dev, data); +} + +static inline struct spi_controller *spi_controller_get(struct spi_controller *ctlr) +{ + if (!ctlr || !get_device(&ctlr->dev)) + return NULL; + return ctlr; +} + +static inline void spi_controller_put(struct spi_controller *ctlr) +{ + if (ctlr) + put_device(&ctlr->dev); +} + +static inline bool spi_controller_is_slave(struct spi_controller *ctlr) +{ + return IS_ENABLED(CONFIG_SPI_SLAVE) && ctlr->slave; +} + +/* PM calls that need to be issued by the driver */ +extern int spi_controller_suspend(struct spi_controller *ctlr); +extern int spi_controller_resume(struct spi_controller *ctlr); + +/* Calls the driver make to interact with the message queue */ +extern struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr); +extern void spi_finalize_current_message(struct spi_controller *ctlr); +extern void spi_finalize_current_transfer(struct spi_controller *ctlr); + +/* the spi driver core manages memory for the spi_controller classdev */ +extern struct spi_controller *__spi_alloc_controller(struct device *host, + unsigned int size, bool slave); + +static inline struct spi_controller *spi_alloc_master(struct device *host, + unsigned int size) +{ + return __spi_alloc_controller(host, size, false); +} + +static inline struct spi_controller *spi_alloc_slave(struct device *host, + unsigned int size) +{ + if (!IS_ENABLED(CONFIG_SPI_SLAVE)) + return NULL; + + return __spi_alloc_controller(host, size, true); +} + +struct spi_controller *__devm_spi_alloc_controller(struct device *dev, + unsigned int size, + bool slave); + +static inline struct spi_controller *devm_spi_alloc_master(struct device *dev, + unsigned int size) +{ + return __devm_spi_alloc_controller(dev, size, false); +} + +static inline struct spi_controller *devm_spi_alloc_slave(struct device *dev, + unsigned int size) +{ + if (!IS_ENABLED(CONFIG_SPI_SLAVE)) + return NULL; + + return __devm_spi_alloc_controller(dev, size, true); +} + +extern int spi_register_controller(struct spi_controller *ctlr); +extern int devm_spi_register_controller(struct device *dev, + struct spi_controller *ctlr); +extern void spi_unregister_controller(struct spi_controller *ctlr); + +extern struct spi_controller *spi_busnum_to_master(u16 busnum); + +/* + * SPI resource management while processing a SPI message + */ + +typedef void (*spi_res_release_t)(struct spi_controller *ctlr, + struct spi_message *msg, + void *res); + +/** + * struct spi_res - spi resource management structure + * @entry: list entry + * @release: release code called prior to freeing this resource + * @data: extra data allocated for the specific use-case + * + * this is based on ideas from devres, but focused on life-cycle + * management during spi_message processing + */ +struct spi_res { + struct list_head entry; + spi_res_release_t release; + unsigned long long data[]; /* guarantee ull alignment */ +}; + +extern void *spi_res_alloc(struct spi_device *spi, + spi_res_release_t release, + size_t size, gfp_t gfp); +extern void spi_res_add(struct spi_message *message, void *res); +extern void spi_res_free(void *res); + +extern void spi_res_release(struct spi_controller *ctlr, + struct spi_message *message); + +/*---------------------------------------------------------------------------*/ + +/* + * I/O INTERFACE between SPI controller and protocol drivers + * + * Protocol drivers use a queue of spi_messages, each transferring data + * between the controller and memory buffers. + * + * The spi_messages themselves consist of a series of read+write transfer + * segments. Those segments always read the same number of bits as they + * write; but one or the other is easily ignored by passing a null buffer + * pointer. (This is unlike most types of I/O API, because SPI hardware + * is full duplex.) + * + * NOTE: Allocation of spi_transfer and spi_message memory is entirely + * up to the protocol driver, which guarantees the integrity of both (as + * well as the data buffers) for as long as the message is queued. + */ + +/** + * struct spi_transfer - a read/write buffer pair + * @tx_buf: data to be written (dma-safe memory), or NULL + * @rx_buf: data to be read (dma-safe memory), or NULL + * @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped + * @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped + * @tx_nbits: number of bits used for writing. If 0 the default + * (SPI_NBITS_SINGLE) is used. + * @rx_nbits: number of bits used for reading. If 0 the default + * (SPI_NBITS_SINGLE) is used. + * @len: size of rx and tx buffers (in bytes) + * @speed_hz: Select a speed other than the device default for this + * transfer. If 0 the default (from @spi_device) is used. + * @bits_per_word: select a bits_per_word other than the device default + * for this transfer. If 0 the default (from @spi_device) is used. + * @cs_change: affects chipselect after this transfer completes + * @delay_usecs: microseconds to delay after this transfer before + * (optionally) changing the chipselect status, then starting + * the next transfer or completing this @spi_message. + * @transfer_list: transfers are sequenced through @spi_message.transfers + * @tx_sg: Scatterlist for transmit, currently not for client use + * @rx_sg: Scatterlist for receive, currently not for client use + * + * SPI transfers always write the same number of bytes as they read. + * Protocol drivers should always provide @rx_buf and/or @tx_buf. + * In some cases, they may also want to provide DMA addresses for + * the data being transferred; that may reduce overhead, when the + * underlying driver uses dma. + * + * If the transmit buffer is null, zeroes will be shifted out + * while filling @rx_buf. If the receive buffer is null, the data + * shifted in will be discarded. Only "len" bytes shift out (or in). + * It's an error to try to shift out a partial word. (For example, by + * shifting out three bytes with word size of sixteen or twenty bits; + * the former uses two bytes per word, the latter uses four bytes.) + * + * In-memory data values are always in native CPU byte order, translated + * from the wire byte order (big-endian except with SPI_LSB_FIRST). So + * for example when bits_per_word is sixteen, buffers are 2N bytes long + * (@len = 2N) and hold N sixteen bit words in CPU byte order. + * + * When the word size of the SPI transfer is not a power-of-two multiple + * of eight bits, those in-memory words include extra bits. In-memory + * words are always seen by protocol drivers as right-justified, so the + * undefined (rx) or unused (tx) bits are always the most significant bits. + * + * All SPI transfers start with the relevant chipselect active. Normally + * it stays selected until after the last transfer in a message. Drivers + * can affect the chipselect signal using cs_change. + * + * (i) If the transfer isn't the last one in the message, this flag is + * used to make the chipselect briefly go inactive in the middle of the + * message. Toggling chipselect in this way may be needed to terminate + * a chip command, letting a single spi_message perform all of group of + * chip transactions together. + * + * (ii) When the transfer is the last one in the message, the chip may + * stay selected until the next transfer. On multi-device SPI busses + * with nothing blocking messages going to other devices, this is just + * a performance hint; starting a message to another device deselects + * this one. But in other cases, this can be used to ensure correctness. + * Some devices need protocol transactions to be built from a series of + * spi_message submissions, where the content of one message is determined + * by the results of previous messages and where the whole transaction + * ends when the chipselect goes intactive. + * + * When SPI can transfer in 1x,2x or 4x. It can get this transfer information + * from device through @tx_nbits and @rx_nbits. In Bi-direction, these + * two should both be set. User can set transfer mode with SPI_NBITS_SINGLE(1x) + * SPI_NBITS_DUAL(2x) and SPI_NBITS_QUAD(4x) to support these three transfer. + * + * The code that submits an spi_message (and its spi_transfers) + * to the lower layers is responsible for managing its memory. + * Zero-initialize every field you don't set up explicitly, to + * insulate against future API updates. After you submit a message + * and its transfers, ignore them until its completion callback. + */ +struct spi_transfer { + /* it's ok if tx_buf == rx_buf (right?) + * for MicroWire, one buffer must be null + * buffers must work with dma_*map_single() calls, unless + * spi_message.is_dma_mapped reports a pre-existing mapping + */ + const void *tx_buf; + void *rx_buf; + unsigned len; + + dma_addr_t tx_dma; + dma_addr_t rx_dma; + struct sg_table tx_sg; + struct sg_table rx_sg; + + unsigned cs_change:1; + unsigned tx_nbits:3; + unsigned rx_nbits:3; +#define SPI_NBITS_SINGLE 0x01 /* 1bit transfer */ +#define SPI_NBITS_DUAL 0x02 /* 2bits transfer */ +#define SPI_NBITS_QUAD 0x04 /* 4bits transfer */ + u8 bits_per_word; + u16 delay_usecs; + u32 speed_hz; + + struct list_head transfer_list; +}; + +/** + * struct spi_message - one multi-segment SPI transaction + * @transfers: list of transfer segments in this transaction + * @spi: SPI device to which the transaction is queued + * @is_dma_mapped: if true, the caller provided both dma and cpu virtual + * addresses for each transfer buffer + * @complete: called to report transaction completions + * @context: the argument to complete() when it's called + * @frame_length: the total number of bytes in the message + * @actual_length: the total number of bytes that were transferred in all + * successful segments + * @status: zero for success, else negative errno + * @queue: for use by whichever driver currently owns the message + * @state: for use by whichever driver currently owns the message + * @resources: for resource management when the spi message is processed + * + * A @spi_message is used to execute an atomic sequence of data transfers, + * each represented by a struct spi_transfer. The sequence is "atomic" + * in the sense that no other spi_message may use that SPI bus until that + * sequence completes. On some systems, many such sequences can execute as + * as single programmed DMA transfer. On all systems, these messages are + * queued, and might complete after transactions to other devices. Messages + * sent to a given spi_device are always executed in FIFO order. + * + * The code that submits an spi_message (and its spi_transfers) + * to the lower layers is responsible for managing its memory. + * Zero-initialize every field you don't set up explicitly, to + * insulate against future API updates. After you submit a message + * and its transfers, ignore them until its completion callback. + */ +struct spi_message { + struct list_head transfers; + + struct spi_device *spi; + + unsigned is_dma_mapped:1; + + /* REVISIT: we might want a flag affecting the behavior of the + * last transfer ... allowing things like "read 16 bit length L" + * immediately followed by "read L bytes". Basically imposing + * a specific message scheduling algorithm. + * + * Some controller drivers (message-at-a-time queue processing) + * could provide that as their default scheduling algorithm. But + * others (with multi-message pipelines) could need a flag to + * tell them about such special cases. + */ + + /* completion is reported through a callback */ + void (*complete)(void *context); + void *context; + unsigned frame_length; + unsigned actual_length; + int status; + + /* for optional use by whatever driver currently owns the + * spi_message ... between calls to spi_async and then later + * complete(), that's the spi_controller controller driver. + */ + struct list_head queue; + void *state; + + /* list of spi_res reources when the spi message is processed */ + struct list_head resources; +}; + +static inline void spi_message_init_no_memset(struct spi_message *m) +{ + INIT_LIST_HEAD(&m->transfers); + INIT_LIST_HEAD(&m->resources); +} + +static inline void spi_message_init(struct spi_message *m) +{ + memset(m, 0, sizeof *m); + spi_message_init_no_memset(m); +} + +static inline void +spi_message_add_tail(struct spi_transfer *t, struct spi_message *m) +{ + list_add_tail(&t->transfer_list, &m->transfers); +} + +static inline void +spi_transfer_del(struct spi_transfer *t) +{ + list_del(&t->transfer_list); +} + +/** + * spi_message_init_with_transfers - Initialize spi_message and append transfers + * @m: spi_message to be initialized + * @xfers: An array of spi transfers + * @num_xfers: Number of items in the xfer array + * + * This function initializes the given spi_message and adds each spi_transfer in + * the given array to the message. + */ +static inline void +spi_message_init_with_transfers(struct spi_message *m, +struct spi_transfer *xfers, unsigned int num_xfers) +{ + unsigned int i; + + spi_message_init(m); + for (i = 0; i < num_xfers; ++i) + spi_message_add_tail(&xfers[i], m); +} + +/* It's fine to embed message and transaction structures in other data + * structures so long as you don't free them while they're in use. + */ + +static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags) +{ + struct spi_message *m; + + m = kzalloc(sizeof(struct spi_message) + + ntrans * sizeof(struct spi_transfer), + flags); + if (m) { + unsigned i; + struct spi_transfer *t = (struct spi_transfer *)(m + 1); + + spi_message_init_no_memset(m); + for (i = 0; i < ntrans; i++, t++) + spi_message_add_tail(t, m); + } + return m; +} + +static inline void spi_message_free(struct spi_message *m) +{ + kfree(m); +} + +extern int spi_setup(struct spi_device *spi); +extern int spi_async(struct spi_device *spi, struct spi_message *message); +extern int spi_async_locked(struct spi_device *spi, + struct spi_message *message); +extern int spi_slave_abort(struct spi_device *spi); + +static inline size_t +spi_max_message_size(struct spi_device *spi) +{ + struct spi_controller *ctlr = spi->controller; + + if (!ctlr->max_message_size) + return SIZE_MAX; + return ctlr->max_message_size(spi); +} + +static inline size_t +spi_max_transfer_size(struct spi_device *spi) +{ + struct spi_controller *ctlr = spi->controller; + size_t tr_max = SIZE_MAX; + size_t msg_max = spi_max_message_size(spi); + + if (ctlr->max_transfer_size) + tr_max = ctlr->max_transfer_size(spi); + + /* transfer size limit must not be greater than messsage size limit */ + return min(tr_max, msg_max); +} + +/*---------------------------------------------------------------------------*/ + +/* SPI transfer replacement methods which make use of spi_res */ + +struct spi_replaced_transfers; +typedef void (*spi_replaced_release_t)(struct spi_controller *ctlr, + struct spi_message *msg, + struct spi_replaced_transfers *res); +/** + * struct spi_replaced_transfers - structure describing the spi_transfer + * replacements that have occurred + * so that they can get reverted + * @release: some extra release code to get executed prior to + * relasing this structure + * @extradata: pointer to some extra data if requested or NULL + * @replaced_transfers: transfers that have been replaced and which need + * to get restored + * @replaced_after: the transfer after which the @replaced_transfers + * are to get re-inserted + * @inserted: number of transfers inserted + * @inserted_transfers: array of spi_transfers of array-size @inserted, + * that have been replacing replaced_transfers + * + * note: that @extradata will point to @inserted_transfers[@inserted] + * if some extra allocation is requested, so alignment will be the same + * as for spi_transfers + */ +struct spi_replaced_transfers { + spi_replaced_release_t release; + void *extradata; + struct list_head replaced_transfers; + struct list_head *replaced_after; + size_t inserted; + struct spi_transfer inserted_transfers[]; +}; + +extern struct spi_replaced_transfers *spi_replace_transfers( + struct spi_message *msg, + struct spi_transfer *xfer_first, + size_t remove, + size_t insert, + spi_replaced_release_t release, + size_t extradatasize, + gfp_t gfp); + +/*---------------------------------------------------------------------------*/ + +/* SPI transfer transformation methods */ + +extern int spi_split_transfers_maxsize(struct spi_controller *ctlr, + struct spi_message *msg, + size_t maxsize, + gfp_t gfp); + +/*---------------------------------------------------------------------------*/ + +/* All these synchronous SPI transfer routines are utilities layered + * over the core async transfer primitive. Here, "synchronous" means + * they will sleep uninterruptibly until the async transfer completes. + */ + +extern int spi_sync(struct spi_device *spi, struct spi_message *message); +extern int spi_sync_locked(struct spi_device *spi, struct spi_message *message); +extern int spi_bus_lock(struct spi_controller *ctlr); +extern int spi_bus_unlock(struct spi_controller *ctlr); + +/** + * spi_sync_transfer - synchronous SPI data transfer + * @spi: device with which data will be exchanged + * @xfers: An array of spi_transfers + * @num_xfers: Number of items in the xfer array + * Context: can sleep + * + * Does a synchronous SPI data transfer of the given spi_transfer array. + * + * For more specific semantics see spi_sync(). + * + * Return: Return: zero on success, else a negative error code. + */ +static inline int +spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers, + unsigned int num_xfers) +{ + struct spi_message msg; + + spi_message_init_with_transfers(&msg, xfers, num_xfers); + + return spi_sync(spi, &msg); +} + +/** + * spi_write - SPI synchronous write + * @spi: device to which data will be written + * @buf: data buffer + * @len: data buffer size + * Context: can sleep + * + * This function writes the buffer @buf. + * Callable only from contexts that can sleep. + * + * Return: zero on success, else a negative error code. + */ +static inline int +spi_write(struct spi_device *spi, const void *buf, size_t len) +{ + struct spi_transfer t = { + .tx_buf = buf, + .len = len, + }; + + return spi_sync_transfer(spi, &t, 1); +} + +/** + * spi_read - SPI synchronous read + * @spi: device from which data will be read + * @buf: data buffer + * @len: data buffer size + * Context: can sleep + * + * This function reads the buffer @buf. + * Callable only from contexts that can sleep. + * + * Return: zero on success, else a negative error code. + */ +static inline int +spi_read(struct spi_device *spi, void *buf, size_t len) +{ + struct spi_transfer t = { + .rx_buf = buf, + .len = len, + }; + + return spi_sync_transfer(spi, &t, 1); +} + +/* this copies txbuf and rxbuf data; for small transfers only! */ +extern int spi_write_then_read(struct spi_device *spi, + const void *txbuf, unsigned n_tx, + void *rxbuf, unsigned n_rx); + +/** + * spi_w8r8 - SPI synchronous 8 bit write followed by 8 bit read + * @spi: device with which data will be exchanged + * @cmd: command to be written before data is read back + * Context: can sleep + * + * Callable only from contexts that can sleep. + * + * Return: the (unsigned) eight bit number returned by the + * device, or else a negative error code. + */ +static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd) +{ + ssize_t status; + u8 result; + + status = spi_write_then_read(spi, &cmd, 1, &result, 1); + + /* return negative errno or unsigned value */ + return (status < 0) ? status : result; +} + +/** + * spi_w8r16 - SPI synchronous 8 bit write followed by 16 bit read + * @spi: device with which data will be exchanged + * @cmd: command to be written before data is read back + * Context: can sleep + * + * The number is returned in wire-order, which is at least sometimes + * big-endian. + * + * Callable only from contexts that can sleep. + * + * Return: the (unsigned) sixteen bit number returned by the + * device, or else a negative error code. + */ +static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd) +{ + ssize_t status; + u16 result; + + status = spi_write_then_read(spi, &cmd, 1, &result, 2); + + /* return negative errno or unsigned value */ + return (status < 0) ? status : result; +} + +/** + * spi_w8r16be - SPI synchronous 8 bit write followed by 16 bit big-endian read + * @spi: device with which data will be exchanged + * @cmd: command to be written before data is read back + * Context: can sleep + * + * This function is similar to spi_w8r16, with the exception that it will + * convert the read 16 bit data word from big-endian to native endianness. + * + * Callable only from contexts that can sleep. + * + * Return: the (unsigned) sixteen bit number returned by the device in cpu + * endianness, or else a negative error code. + */ +static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd) + +{ + ssize_t status; + __be16 result; + + status = spi_write_then_read(spi, &cmd, 1, &result, 2); + if (status < 0) + return status; + + return be16_to_cpu(result); +} + +/*---------------------------------------------------------------------------*/ + +/* + * INTERFACE between board init code and SPI infrastructure. + * + * No SPI driver ever sees these SPI device table segments, but + * it's how the SPI core (or adapters that get hotplugged) grows + * the driver model tree. + * + * As a rule, SPI devices can't be probed. Instead, board init code + * provides a table listing the devices which are present, with enough + * information to bind and set up the device's driver. There's basic + * support for nonstatic configurations too; enough to handle adding + * parport adapters, or microcontrollers acting as USB-to-SPI bridges. + */ + +/** + * struct spi_board_info - board-specific template for a SPI device + * @modalias: Initializes spi_device.modalias; identifies the driver. + * @platform_data: Initializes spi_device.platform_data; the particular + * data stored there is driver-specific. + * @properties: Additional device properties for the device. + * @controller_data: Initializes spi_device.controller_data; some + * controllers need hints about hardware setup, e.g. for DMA. + * @irq: Initializes spi_device.irq; depends on how the board is wired. + * @max_speed_hz: Initializes spi_device.max_speed_hz; based on limits + * from the chip datasheet and board-specific signal quality issues. + * @bus_num: Identifies which spi_controller parents the spi_device; unused + * by spi_new_device(), and otherwise depends on board wiring. + * @chip_select: Initializes spi_device.chip_select; depends on how + * the board is wired. + * @mode: Initializes spi_device.mode; based on the chip datasheet, board + * wiring (some devices support both 3WIRE and standard modes), and + * possibly presence of an inverter in the chipselect path. + * + * When adding new SPI devices to the device tree, these structures serve + * as a partial device template. They hold information which can't always + * be determined by drivers. Information that probe() can establish (such + * as the default transfer wordsize) is not included here. + * + * These structures are used in two places. Their primary role is to + * be stored in tables of board-specific device descriptors, which are + * declared early in board initialization and then used (much later) to + * populate a controller's device tree after the that controller's driver + * initializes. A secondary (and atypical) role is as a parameter to + * spi_new_device() call, which happens after those controller drivers + * are active in some dynamic board configuration models. + */ +struct spi_board_info { + /* the device name and module name are coupled, like platform_bus; + * "modalias" is normally the driver name. + * + * platform_data goes to spi_device.dev.platform_data, + * controller_data goes to spi_device.controller_data, + * device properties are copied and attached to spi_device, + * irq is copied too + */ + char modalias[SPI_NAME_SIZE]; + const void *platform_data; + const struct property_entry *properties; + void *controller_data; + int irq; + + /* slower signaling on noisy or low voltage boards */ + u32 max_speed_hz; + + + /* bus_num is board specific and matches the bus_num of some + * spi_controller that will probably be registered later. + * + * chip_select reflects how this chip is wired to that master; + * it's less than num_chipselect. + */ + u16 bus_num; + u16 chip_select; + + /* mode becomes spi_device.mode, and is essential for chips + * where the default of SPI_CS_HIGH = 0 is wrong. + */ + u16 mode; + + /* ... may need additional spi_device chip config data here. + * avoid stuff protocol drivers can set; but include stuff + * needed to behave without being bound to a driver: + * - quirks like clock rate mattering when not selected + */ +}; + +#ifdef CONFIG_SPI +extern int +spi_register_board_info(struct spi_board_info const *info, unsigned n); +#else +/* board init code may ignore whether SPI is configured or not */ +static inline int +spi_register_board_info(struct spi_board_info const *info, unsigned n) + { return 0; } +#endif + + +/* If you're hotplugging an adapter with devices (parport, usb, etc) + * use spi_new_device() to describe each device. You can also call + * spi_unregister_device() to start making that device vanish, but + * normally that would be handled by spi_unregister_controller(). + * + * You can also use spi_alloc_device() and spi_add_device() to use a two + * stage registration sequence for each spi_device. This gives the caller + * some more control over the spi_device structure before it is registered, + * but requires that caller to initialize fields that would otherwise + * be defined using the board info. + */ +extern struct spi_device * +spi_alloc_device(struct spi_controller *ctlr); + +extern int +spi_add_device(struct spi_device *spi); + +extern struct spi_device * +spi_new_device(struct spi_controller *, struct spi_board_info *); + +extern void spi_unregister_device(struct spi_device *spi); + +extern const struct spi_device_id * +spi_get_device_id(const struct spi_device *sdev); + +static inline bool +spi_transfer_is_last(struct spi_controller *ctlr, struct spi_transfer *xfer) +{ + return list_is_last(&xfer->transfer_list, &ctlr->cur_msg->transfers); +} + + +/* Compatibility layer */ +#define spi_master spi_controller + +#define SPI_MASTER_HALF_DUPLEX SPI_CONTROLLER_HALF_DUPLEX +#define SPI_MASTER_NO_RX SPI_CONTROLLER_NO_RX +#define SPI_MASTER_NO_TX SPI_CONTROLLER_NO_TX +#define SPI_MASTER_MUST_RX SPI_CONTROLLER_MUST_RX +#define SPI_MASTER_MUST_TX SPI_CONTROLLER_MUST_TX + +#define spi_master_get_devdata(_ctlr) spi_controller_get_devdata(_ctlr) +#define spi_master_set_devdata(_ctlr, _data) \ + spi_controller_set_devdata(_ctlr, _data) +#define spi_master_get(_ctlr) spi_controller_get(_ctlr) +#define spi_master_put(_ctlr) spi_controller_put(_ctlr) +#define spi_master_suspend(_ctlr) spi_controller_suspend(_ctlr) +#define spi_master_resume(_ctlr) spi_controller_resume(_ctlr) + +#define spi_register_master(_ctlr) spi_register_controller(_ctlr) +#define devm_spi_register_master(_dev, _ctlr) \ + devm_spi_register_controller(_dev, _ctlr) +#define spi_unregister_master(_ctlr) spi_unregister_controller(_ctlr) + +#endif /* __LINUX_SPI_H */ diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h new file mode 100644 index 000000000..b7e021b27 --- /dev/null +++ b/include/linux/spi/spi_bitbang.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __SPI_BITBANG_H +#define __SPI_BITBANG_H + +#include + +struct spi_bitbang { + struct mutex lock; + u8 busy; + u8 use_dma; + u16 flags; /* extra spi->mode support */ + + struct spi_master *master; + + /* setup_transfer() changes clock and/or wordsize to match settings + * for this transfer; zeroes restore defaults from spi_device. + */ + int (*setup_transfer)(struct spi_device *spi, + struct spi_transfer *t); + + void (*chipselect)(struct spi_device *spi, int is_on); +#define BITBANG_CS_ACTIVE 1 /* normally nCS, active low */ +#define BITBANG_CS_INACTIVE 0 + + /* txrx_bufs() may handle dma mapping for transfers that don't + * already have one (transfer.{tx,rx}_dma is zero), or use PIO + */ + int (*txrx_bufs)(struct spi_device *spi, struct spi_transfer *t); + + /* txrx_word[SPI_MODE_*]() just looks like a shift register */ + u32 (*txrx_word[4])(struct spi_device *spi, + unsigned nsecs, + u32 word, u8 bits, unsigned flags); + int (*set_line_direction)(struct spi_device *spi, bool output); +}; + +/* you can call these default bitbang->master methods from your custom + * methods, if you like. + */ +extern int spi_bitbang_setup(struct spi_device *spi); +extern void spi_bitbang_cleanup(struct spi_device *spi); +extern int spi_bitbang_setup_transfer(struct spi_device *spi, + struct spi_transfer *t); + +/* start or stop queue processing */ +extern int spi_bitbang_start(struct spi_bitbang *spi); +extern void spi_bitbang_stop(struct spi_bitbang *spi); + +#endif /* __SPI_BITBANG_H */ diff --git a/include/linux/spi/spi_gpio.h b/include/linux/spi/spi_gpio.h new file mode 100644 index 000000000..9e7e83d86 --- /dev/null +++ b/include/linux/spi/spi_gpio.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_SPI_GPIO_H +#define __LINUX_SPI_GPIO_H + +/* + * For each bitbanged SPI bus, set up a platform_device node with: + * - name "spi_gpio" + * - id the same as the SPI bus number it implements + * - dev.platform data pointing to a struct spi_gpio_platform_data + * + * Use spi_board_info with these busses in the usual way. + * + * If the bitbanged bus is later switched to a "native" controller, + * that platform_device and controller_data should be removed. + */ + +/** + * struct spi_gpio_platform_data - parameter for bitbanged SPI master + * @num_chipselect: how many slaves to allow + */ +struct spi_gpio_platform_data { + u16 num_chipselect; +}; + +#endif /* __LINUX_SPI_GPIO_H */ diff --git a/include/linux/spi/spi_oc_tiny.h b/include/linux/spi/spi_oc_tiny.h new file mode 100644 index 000000000..a3ecf2fea --- /dev/null +++ b/include/linux/spi/spi_oc_tiny.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SPI_SPI_OC_TINY_H +#define _LINUX_SPI_SPI_OC_TINY_H + +/** + * struct tiny_spi_platform_data - platform data of the OpenCores tiny SPI + * @freq: input clock freq to the core. + * @baudwidth: baud rate divider width of the core. + * @gpio_cs_count: number of gpio pins used for chipselect. + * @gpio_cs: array of gpio pins used for chipselect. + * + * freq and baudwidth are used only if the divider is programmable. + */ +struct tiny_spi_platform_data { + unsigned int freq; + unsigned int baudwidth; + unsigned int gpio_cs_count; + int *gpio_cs; +}; + +#endif /* _LINUX_SPI_SPI_OC_TINY_H */ diff --git a/include/linux/spi/tdo24m.h b/include/linux/spi/tdo24m.h new file mode 100644 index 000000000..48dd58ac5 --- /dev/null +++ b/include/linux/spi/tdo24m.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __TDO24M_H__ +#define __TDO24M_H__ + +enum tdo24m_model { + TDO24M, + TDO35S, +}; + +struct tdo24m_platform_data { + enum tdo24m_model model; +}; + +#endif /* __TDO24M_H__ */ diff --git a/include/linux/spi/tle62x0.h b/include/linux/spi/tle62x0.h new file mode 100644 index 000000000..414c6fddf --- /dev/null +++ b/include/linux/spi/tle62x0.h @@ -0,0 +1,20 @@ +/* + * tle62x0.h - platform glue to Infineon TLE62x0 driver chips + * + * Copyright 2007 Simtec Electronics + * Ben Dooks + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. +*/ + +struct tle62x0_pdata { + unsigned int init_state; + unsigned int gpio_count; +}; diff --git a/include/linux/spi/xilinx_spi.h b/include/linux/spi/xilinx_spi.h new file mode 100644 index 000000000..c15d69d28 --- /dev/null +++ b/include/linux/spi/xilinx_spi.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_SPI_XILINX_SPI_H +#define __LINUX_SPI_XILINX_SPI_H + +/** + * struct xspi_platform_data - Platform data of the Xilinx SPI driver + * @num_chipselect: Number of chip select by the IP. + * @little_endian: If registers should be accessed little endian or not. + * @bits_per_word: Number of bits per word. + * @devices: Devices to add when the driver is probed. + * @num_devices: Number of devices in the devices array. + */ +struct xspi_platform_data { + u16 num_chipselect; + u8 bits_per_word; + struct spi_board_info *devices; + u8 num_devices; +}; + +#endif /* __LINUX_SPI_XILINX_SPI_H */ diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h new file mode 100644 index 000000000..e089157dc --- /dev/null +++ b/include/linux/spinlock.h @@ -0,0 +1,471 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_SPINLOCK_H +#define __LINUX_SPINLOCK_H + +/* + * include/linux/spinlock.h - generic spinlock/rwlock declarations + * + * here's the role of the various spinlock/rwlock related include files: + * + * on SMP builds: + * + * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the + * initializers + * + * linux/spinlock_types.h: + * defines the generic type and initializers + * + * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel + * implementations, mostly inline assembly code + * + * (also included on UP-debug builds:) + * + * linux/spinlock_api_smp.h: + * contains the prototypes for the _spin_*() APIs. + * + * linux/spinlock.h: builds the final spin_*() APIs. + * + * on UP builds: + * + * linux/spinlock_type_up.h: + * contains the generic, simplified UP spinlock type. + * (which is an empty structure on non-debug builds) + * + * linux/spinlock_types.h: + * defines the generic type and initializers + * + * linux/spinlock_up.h: + * contains the arch_spin_*()/etc. version of UP + * builds. (which are NOPs on non-debug, non-preempt + * builds) + * + * (included on UP-non-debug builds:) + * + * linux/spinlock_api_up.h: + * builds the _spin_*() APIs. + * + * linux/spinlock.h: builds the final spin_*() APIs. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* + * Must define these before including other files, inline functions need them + */ +#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME + +#define LOCK_SECTION_START(extra) \ + ".subsection 1\n\t" \ + extra \ + ".ifndef " LOCK_SECTION_NAME "\n\t" \ + LOCK_SECTION_NAME ":\n\t" \ + ".endif\n" + +#define LOCK_SECTION_END \ + ".previous\n\t" + +#define __lockfunc __attribute__((section(".spinlock.text"))) + +/* + * Pull the arch_spinlock_t and arch_rwlock_t definitions: + */ +#include + +/* + * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them): + */ +#ifdef CONFIG_SMP +# include +#else +# include +#endif + +#ifdef CONFIG_DEBUG_SPINLOCK + extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, + struct lock_class_key *key); +# define raw_spin_lock_init(lock) \ +do { \ + static struct lock_class_key __key; \ + \ + __raw_spin_lock_init((lock), #lock, &__key); \ +} while (0) + +#else +# define raw_spin_lock_init(lock) \ + do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) +#endif + +#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) + +#ifdef arch_spin_is_contended +#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) +#else +#define raw_spin_is_contended(lock) (((void)(lock), 0)) +#endif /*arch_spin_is_contended*/ + +/* + * smp_mb__after_spinlock() provides the equivalent of a full memory barrier + * between program-order earlier lock acquisitions and program-order later + * memory accesses. + * + * This guarantees that the following two properties hold: + * + * 1) Given the snippet: + * + * { X = 0; Y = 0; } + * + * CPU0 CPU1 + * + * WRITE_ONCE(X, 1); WRITE_ONCE(Y, 1); + * spin_lock(S); smp_mb(); + * smp_mb__after_spinlock(); r1 = READ_ONCE(X); + * r0 = READ_ONCE(Y); + * spin_unlock(S); + * + * it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0) + * and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments + * preceding the call to smp_mb__after_spinlock() in __schedule() and in + * try_to_wake_up(). + * + * 2) Given the snippet: + * + * { X = 0; Y = 0; } + * + * CPU0 CPU1 CPU2 + * + * spin_lock(S); spin_lock(S); r1 = READ_ONCE(Y); + * WRITE_ONCE(X, 1); smp_mb__after_spinlock(); smp_rmb(); + * spin_unlock(S); r0 = READ_ONCE(X); r2 = READ_ONCE(X); + * WRITE_ONCE(Y, 1); + * spin_unlock(S); + * + * it is forbidden that CPU0's critical section executes before CPU1's + * critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1) + * and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments + * preceding the calls to smp_rmb() in try_to_wake_up() for similar + * snippets but "projected" onto two CPUs. + * + * Property (2) upgrades the lock to an RCsc lock. + * + * Since most load-store architectures implement ACQUIRE with an smp_mb() after + * the LL/SC loop, they need no further barriers. Similarly all our TSO + * architectures imply an smp_mb() for each atomic instruction and equally don't + * need more. + * + * Architectures that can implement ACQUIRE better need to take care. + */ +#ifndef smp_mb__after_spinlock +#define smp_mb__after_spinlock() do { } while (0) +#endif + +#ifdef CONFIG_DEBUG_SPINLOCK + extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); +#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) + extern int do_raw_spin_trylock(raw_spinlock_t *lock); + extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); +#else +static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) +{ + __acquire(lock); + arch_spin_lock(&lock->raw_lock); +} + +#ifndef arch_spin_lock_flags +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) +#endif + +static inline void +do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock) +{ + __acquire(lock); + arch_spin_lock_flags(&lock->raw_lock, *flags); +} + +static inline int do_raw_spin_trylock(raw_spinlock_t *lock) +{ + return arch_spin_trylock(&(lock)->raw_lock); +} + +static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) +{ + arch_spin_unlock(&lock->raw_lock); + __release(lock); +} +#endif + +/* + * Define the various spin_lock methods. Note we define these + * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The + * various methods are defined as nops in the case they are not + * required. + */ +#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) + +#define raw_spin_lock(lock) _raw_spin_lock(lock) + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define raw_spin_lock_nested(lock, subclass) \ + _raw_spin_lock_nested(lock, subclass) + +# define raw_spin_lock_nest_lock(lock, nest_lock) \ + do { \ + typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ + _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ + } while (0) +#else +/* + * Always evaluate the 'subclass' argument to avoid that the compiler + * warns about set-but-not-used variables when building with + * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. + */ +# define raw_spin_lock_nested(lock, subclass) \ + _raw_spin_lock(((void)(subclass), (lock))) +# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) +#endif + +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + +#define raw_spin_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _raw_spin_lock_irqsave(lock); \ + } while (0) + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ + } while (0) +#else +#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _raw_spin_lock_irqsave(lock); \ + } while (0) +#endif + +#else + +#define raw_spin_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _raw_spin_lock_irqsave(lock, flags); \ + } while (0) + +#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ + raw_spin_lock_irqsave(lock, flags) + +#endif + +#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) +#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) +#define raw_spin_unlock(lock) _raw_spin_unlock(lock) +#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) + +#define raw_spin_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _raw_spin_unlock_irqrestore(lock, flags); \ + } while (0) +#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) + +#define raw_spin_trylock_bh(lock) \ + __cond_lock(lock, _raw_spin_trylock_bh(lock)) + +#define raw_spin_trylock_irq(lock) \ +({ \ + local_irq_disable(); \ + raw_spin_trylock(lock) ? \ + 1 : ({ local_irq_enable(); 0; }); \ +}) + +#define raw_spin_trylock_irqsave(lock, flags) \ +({ \ + local_irq_save(flags); \ + raw_spin_trylock(lock) ? \ + 1 : ({ local_irq_restore(flags); 0; }); \ +}) + +/* Include rwlock functions */ +#include + +/* + * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: + */ +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) +# include +#else +# include +#endif + +/* + * Map the spin_lock functions to the raw variants for PREEMPT_RT=n + */ + +static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock) +{ + return &lock->rlock; +} + +#define spin_lock_init(_lock) \ +do { \ + spinlock_check(_lock); \ + raw_spin_lock_init(&(_lock)->rlock); \ +} while (0) + +static __always_inline void spin_lock(spinlock_t *lock) +{ + raw_spin_lock(&lock->rlock); +} + +static __always_inline void spin_lock_bh(spinlock_t *lock) +{ + raw_spin_lock_bh(&lock->rlock); +} + +static __always_inline int spin_trylock(spinlock_t *lock) +{ + return raw_spin_trylock(&lock->rlock); +} + +#define spin_lock_nested(lock, subclass) \ +do { \ + raw_spin_lock_nested(spinlock_check(lock), subclass); \ +} while (0) + +#define spin_lock_nest_lock(lock, nest_lock) \ +do { \ + raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ +} while (0) + +static __always_inline void spin_lock_irq(spinlock_t *lock) +{ + raw_spin_lock_irq(&lock->rlock); +} + +#define spin_lock_irqsave(lock, flags) \ +do { \ + raw_spin_lock_irqsave(spinlock_check(lock), flags); \ +} while (0) + +#define spin_lock_irqsave_nested(lock, flags, subclass) \ +do { \ + raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ +} while (0) + +static __always_inline void spin_unlock(spinlock_t *lock) +{ + raw_spin_unlock(&lock->rlock); +} + +static __always_inline void spin_unlock_bh(spinlock_t *lock) +{ + raw_spin_unlock_bh(&lock->rlock); +} + +static __always_inline void spin_unlock_irq(spinlock_t *lock) +{ + raw_spin_unlock_irq(&lock->rlock); +} + +static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) +{ + raw_spin_unlock_irqrestore(&lock->rlock, flags); +} + +static __always_inline int spin_trylock_bh(spinlock_t *lock) +{ + return raw_spin_trylock_bh(&lock->rlock); +} + +static __always_inline int spin_trylock_irq(spinlock_t *lock) +{ + return raw_spin_trylock_irq(&lock->rlock); +} + +#define spin_trylock_irqsave(lock, flags) \ +({ \ + raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ +}) + +/** + * spin_is_locked() - Check whether a spinlock is locked. + * @lock: Pointer to the spinlock. + * + * This function is NOT required to provide any memory ordering + * guarantees; it could be used for debugging purposes or, when + * additional synchronization is needed, accompanied with other + * constructs (memory barriers) enforcing the synchronization. + * + * Returns: 1 if @lock is locked, 0 otherwise. + * + * Note that the function only tells you that the spinlock is + * seen to be locked, not that it is locked on your CPU. + * + * Further, on CONFIG_SMP=n builds with CONFIG_DEBUG_SPINLOCK=n, + * the return value is always 0 (see include/linux/spinlock_up.h). + * Therefore you should not rely heavily on the return value. + */ +static __always_inline int spin_is_locked(spinlock_t *lock) +{ + return raw_spin_is_locked(&lock->rlock); +} + +static __always_inline int spin_is_contended(spinlock_t *lock) +{ + return raw_spin_is_contended(&lock->rlock); +} + +#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) + +/* + * Pull the atomic_t declaration: + * (asm-mips/atomic.h needs above definitions) + */ +#include +/** + * atomic_dec_and_lock - lock on reaching reference count zero + * @atomic: the atomic counter + * @lock: the spinlock in question + * + * Decrements @atomic by 1. If the result is 0, returns true and locks + * @lock. Returns false for all other cases. + */ +extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); +#define atomic_dec_and_lock(atomic, lock) \ + __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) + +extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock, + unsigned long *flags); +#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \ + __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags))) + +int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask, + size_t max_size, unsigned int cpu_mult, + gfp_t gfp, const char *name, + struct lock_class_key *key); + +#define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp) \ + ({ \ + static struct lock_class_key key; \ + int ret; \ + \ + ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size, \ + cpu_mult, gfp, #locks, &key); \ + ret; \ + }) + +void free_bucket_spinlocks(spinlock_t *locks); + +#endif /* __LINUX_SPINLOCK_H */ diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h new file mode 100644 index 000000000..42dfab89e --- /dev/null +++ b/include/linux/spinlock_api_smp.h @@ -0,0 +1,192 @@ +#ifndef __LINUX_SPINLOCK_API_SMP_H +#define __LINUX_SPINLOCK_API_SMP_H + +#ifndef __LINUX_SPINLOCK_H +# error "please don't include this file directly" +#endif + +/* + * include/linux/spinlock_api_smp.h + * + * spinlock API declarations on SMP (and debug) + * (implemented in kernel/spinlock.c) + * + * portions Copyright 2005, Red Hat, Inc., Ingo Molnar + * Released under the General Public License (GPL). + */ + +int in_lock_functions(unsigned long addr); + +#define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x)) + +void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); +void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) + __acquires(lock); +void __lockfunc +_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) + __acquires(lock); +void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); +void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) + __acquires(lock); + +unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) + __acquires(lock); +unsigned long __lockfunc +_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) + __acquires(lock); +int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock); +int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock); +void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); +void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock); +void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock); +void __lockfunc +_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) + __releases(lock); + +#ifdef CONFIG_INLINE_SPIN_LOCK +#define _raw_spin_lock(lock) __raw_spin_lock(lock) +#endif + +#ifdef CONFIG_INLINE_SPIN_LOCK_BH +#define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock) +#endif + +#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ +#define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock) +#endif + +#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE +#define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock) +#endif + +#ifdef CONFIG_INLINE_SPIN_TRYLOCK +#define _raw_spin_trylock(lock) __raw_spin_trylock(lock) +#endif + +#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH +#define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock) +#endif + +#ifndef CONFIG_UNINLINE_SPIN_UNLOCK +#define _raw_spin_unlock(lock) __raw_spin_unlock(lock) +#endif + +#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH +#define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock) +#endif + +#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ +#define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock) +#endif + +#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE +#define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags) +#endif + +static inline int __raw_spin_trylock(raw_spinlock_t *lock) +{ + preempt_disable(); + if (do_raw_spin_trylock(lock)) { + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); + return 1; + } + preempt_enable(); + return 0; +} + +/* + * If lockdep is enabled then we use the non-preemption spin-ops + * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are + * not re-enabled during lock-acquire (which the preempt-spin-ops do): + */ +#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) + +static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock) +{ + unsigned long flags; + + local_irq_save(flags); + preempt_disable(); + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); + /* + * On lockdep we dont want the hand-coded irq-enable of + * do_raw_spin_lock_flags() code, because lockdep assumes + * that interrupts are not re-enabled during lock-acquire: + */ +#ifdef CONFIG_LOCKDEP + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); +#else + do_raw_spin_lock_flags(lock, &flags); +#endif + return flags; +} + +static inline void __raw_spin_lock_irq(raw_spinlock_t *lock) +{ + local_irq_disable(); + preempt_disable(); + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); +} + +static inline void __raw_spin_lock_bh(raw_spinlock_t *lock) +{ + __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); +} + +static inline void __raw_spin_lock(raw_spinlock_t *lock) +{ + preempt_disable(); + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); +} + +#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */ + +static inline void __raw_spin_unlock(raw_spinlock_t *lock) +{ + spin_release(&lock->dep_map, 1, _RET_IP_); + do_raw_spin_unlock(lock); + preempt_enable(); +} + +static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock, + unsigned long flags) +{ + spin_release(&lock->dep_map, 1, _RET_IP_); + do_raw_spin_unlock(lock); + local_irq_restore(flags); + preempt_enable(); +} + +static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock) +{ + spin_release(&lock->dep_map, 1, _RET_IP_); + do_raw_spin_unlock(lock); + local_irq_enable(); + preempt_enable(); +} + +static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock) +{ + spin_release(&lock->dep_map, 1, _RET_IP_); + do_raw_spin_unlock(lock); + __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); +} + +static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) +{ + __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); + if (do_raw_spin_trylock(lock)) { + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); + return 1; + } + __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); + return 0; +} + +#include + +#endif /* __LINUX_SPINLOCK_API_SMP_H */ diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h new file mode 100644 index 000000000..d0d188861 --- /dev/null +++ b/include/linux/spinlock_api_up.h @@ -0,0 +1,91 @@ +#ifndef __LINUX_SPINLOCK_API_UP_H +#define __LINUX_SPINLOCK_API_UP_H + +#ifndef __LINUX_SPINLOCK_H +# error "please don't include this file directly" +#endif + +/* + * include/linux/spinlock_api_up.h + * + * spinlock API implementation on UP-nondebug (inlined implementation) + * + * portions Copyright 2005, Red Hat, Inc., Ingo Molnar + * Released under the General Public License (GPL). + */ + +#define in_lock_functions(ADDR) 0 + +#define assert_raw_spin_locked(lock) do { (void)(lock); } while (0) + +/* + * In the UP-nondebug case there's no real locking going on, so the + * only thing we have to do is to keep the preempt counts and irq + * flags straight, to suppress compiler warnings of unused lock + * variables, and to add the proper checker annotations: + */ +#define ___LOCK(lock) \ + do { __acquire(lock); (void)(lock); } while (0) + +#define __LOCK(lock) \ + do { preempt_disable(); ___LOCK(lock); } while (0) + +#define __LOCK_BH(lock) \ + do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK(lock); } while (0) + +#define __LOCK_IRQ(lock) \ + do { local_irq_disable(); __LOCK(lock); } while (0) + +#define __LOCK_IRQSAVE(lock, flags) \ + do { local_irq_save(flags); __LOCK(lock); } while (0) + +#define ___UNLOCK(lock) \ + do { __release(lock); (void)(lock); } while (0) + +#define __UNLOCK(lock) \ + do { preempt_enable(); ___UNLOCK(lock); } while (0) + +#define __UNLOCK_BH(lock) \ + do { __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); \ + ___UNLOCK(lock); } while (0) + +#define __UNLOCK_IRQ(lock) \ + do { local_irq_enable(); __UNLOCK(lock); } while (0) + +#define __UNLOCK_IRQRESTORE(lock, flags) \ + do { local_irq_restore(flags); __UNLOCK(lock); } while (0) + +#define _raw_spin_lock(lock) __LOCK(lock) +#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock) +#define _raw_read_lock(lock) __LOCK(lock) +#define _raw_write_lock(lock) __LOCK(lock) +#define _raw_spin_lock_bh(lock) __LOCK_BH(lock) +#define _raw_read_lock_bh(lock) __LOCK_BH(lock) +#define _raw_write_lock_bh(lock) __LOCK_BH(lock) +#define _raw_spin_lock_irq(lock) __LOCK_IRQ(lock) +#define _raw_read_lock_irq(lock) __LOCK_IRQ(lock) +#define _raw_write_lock_irq(lock) __LOCK_IRQ(lock) +#define _raw_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) +#define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) +#define _raw_write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) +#define _raw_spin_trylock(lock) ({ __LOCK(lock); 1; }) +#define _raw_read_trylock(lock) ({ __LOCK(lock); 1; }) +#define _raw_write_trylock(lock) ({ __LOCK(lock); 1; }) +#define _raw_spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; }) +#define _raw_spin_unlock(lock) __UNLOCK(lock) +#define _raw_read_unlock(lock) __UNLOCK(lock) +#define _raw_write_unlock(lock) __UNLOCK(lock) +#define _raw_spin_unlock_bh(lock) __UNLOCK_BH(lock) +#define _raw_write_unlock_bh(lock) __UNLOCK_BH(lock) +#define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock) +#define _raw_spin_unlock_irq(lock) __UNLOCK_IRQ(lock) +#define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock) +#define _raw_write_unlock_irq(lock) __UNLOCK_IRQ(lock) +#define _raw_spin_unlock_irqrestore(lock, flags) \ + __UNLOCK_IRQRESTORE(lock, flags) +#define _raw_read_unlock_irqrestore(lock, flags) \ + __UNLOCK_IRQRESTORE(lock, flags) +#define _raw_write_unlock_irqrestore(lock, flags) \ + __UNLOCK_IRQRESTORE(lock, flags) + +#endif /* __LINUX_SPINLOCK_API_UP_H */ diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h new file mode 100644 index 000000000..24b4e6f2c --- /dev/null +++ b/include/linux/spinlock_types.h @@ -0,0 +1,85 @@ +#ifndef __LINUX_SPINLOCK_TYPES_H +#define __LINUX_SPINLOCK_TYPES_H + +/* + * include/linux/spinlock_types.h - generic spinlock type definitions + * and initializers + * + * portions Copyright 2005, Red Hat, Inc., Ingo Molnar + * Released under the General Public License (GPL). + */ + +#if defined(CONFIG_SMP) +# include +#else +# include +#endif + +#include + +typedef struct raw_spinlock { + arch_spinlock_t raw_lock; +#ifdef CONFIG_DEBUG_SPINLOCK + unsigned int magic, owner_cpu; + void *owner; +#endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} raw_spinlock_t; + +#define SPINLOCK_MAGIC 0xdead4ead + +#define SPINLOCK_OWNER_INIT ((void *)-1L) + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +#else +# define SPIN_DEP_MAP_INIT(lockname) +#endif + +#ifdef CONFIG_DEBUG_SPINLOCK +# define SPIN_DEBUG_INIT(lockname) \ + .magic = SPINLOCK_MAGIC, \ + .owner_cpu = -1, \ + .owner = SPINLOCK_OWNER_INIT, +#else +# define SPIN_DEBUG_INIT(lockname) +#endif + +#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ + { \ + .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ + SPIN_DEBUG_INIT(lockname) \ + SPIN_DEP_MAP_INIT(lockname) } + +#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ + (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) + +#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) + +typedef struct spinlock { + union { + struct raw_spinlock rlock; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) + struct { + u8 __padding[LOCK_PADSIZE]; + struct lockdep_map dep_map; + }; +#endif + }; +} spinlock_t; + +#define __SPIN_LOCK_INITIALIZER(lockname) \ + { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } + +#define __SPIN_LOCK_UNLOCKED(lockname) \ + (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) + +#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) + +#include + +#endif /* __LINUX_SPINLOCK_TYPES_H */ diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h new file mode 100644 index 000000000..c09b6407a --- /dev/null +++ b/include/linux/spinlock_types_up.h @@ -0,0 +1,37 @@ +#ifndef __LINUX_SPINLOCK_TYPES_UP_H +#define __LINUX_SPINLOCK_TYPES_UP_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +# error "please don't include this file directly" +#endif + +/* + * include/linux/spinlock_types_up.h - spinlock type definitions for UP + * + * portions Copyright 2005, Red Hat, Inc., Ingo Molnar + * Released under the General Public License (GPL). + */ + +#ifdef CONFIG_DEBUG_SPINLOCK + +typedef struct { + volatile unsigned int slock; +} arch_spinlock_t; + +#define __ARCH_SPIN_LOCK_UNLOCKED { 1 } + +#else + +typedef struct { } arch_spinlock_t; + +#define __ARCH_SPIN_LOCK_UNLOCKED { } + +#endif + +typedef struct { + /* no debug version on UP */ +} arch_rwlock_t; + +#define __ARCH_RW_LOCK_UNLOCKED { } + +#endif /* __LINUX_SPINLOCK_TYPES_UP_H */ diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h new file mode 100644 index 000000000..0ac9112c1 --- /dev/null +++ b/include/linux/spinlock_up.h @@ -0,0 +1,72 @@ +#ifndef __LINUX_SPINLOCK_UP_H +#define __LINUX_SPINLOCK_UP_H + +#ifndef __LINUX_SPINLOCK_H +# error "please don't include this file directly" +#endif + +#include /* for cpu_relax() */ +#include + +/* + * include/linux/spinlock_up.h - UP-debug version of spinlocks. + * + * portions Copyright 2005, Red Hat, Inc., Ingo Molnar + * Released under the General Public License (GPL). + * + * In the debug case, 1 means unlocked, 0 means locked. (the values + * are inverted, to catch initialization bugs) + * + * No atomicity anywhere, we are on UP. However, we still need + * the compiler barriers, because we do not want the compiler to + * move potentially faulting instructions (notably user accesses) + * into the locked sequence, resulting in non-atomic execution. + */ + +#ifdef CONFIG_DEBUG_SPINLOCK +#define arch_spin_is_locked(x) ((x)->slock == 0) + +static inline void arch_spin_lock(arch_spinlock_t *lock) +{ + lock->slock = 0; + barrier(); +} + +static inline int arch_spin_trylock(arch_spinlock_t *lock) +{ + char oldval = lock->slock; + + lock->slock = 0; + barrier(); + + return oldval > 0; +} + +static inline void arch_spin_unlock(arch_spinlock_t *lock) +{ + barrier(); + lock->slock = 1; +} + +/* + * Read-write spinlocks. No debug version. + */ +#define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0) +#define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0) +#define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; }) +#define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; }) +#define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0) +#define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0) + +#else /* DEBUG_SPINLOCK */ +#define arch_spin_is_locked(lock) ((void)(lock), 0) +/* for sched/core.c and kernel_lock.c: */ +# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0) +# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0) +# define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0) +# define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; }) +#endif /* DEBUG_SPINLOCK */ + +#define arch_spin_is_contended(lock) (((void)(lock), 0)) + +#endif /* __LINUX_SPINLOCK_UP_H */ diff --git a/include/linux/splice.h b/include/linux/splice.h new file mode 100644 index 000000000..74b4911ac --- /dev/null +++ b/include/linux/splice.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Function declerations and data structures related to the splice + * implementation. + * + * Copyright (C) 2007 Jens Axboe + * + */ +#ifndef SPLICE_H +#define SPLICE_H + +#include + +/* + * Flags passed in from splice/tee/vmsplice + */ +#define SPLICE_F_MOVE (0x01) /* move pages instead of copying */ +#define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */ + /* we may still block on the fd we splice */ + /* from/to, of course */ +#define SPLICE_F_MORE (0x04) /* expect more data */ +#define SPLICE_F_GIFT (0x08) /* pages passed in are a gift */ + +#define SPLICE_F_ALL (SPLICE_F_MOVE|SPLICE_F_NONBLOCK|SPLICE_F_MORE|SPLICE_F_GIFT) + +/* + * Passed to the actors + */ +struct splice_desc { + size_t total_len; /* remaining length */ + unsigned int len; /* current length */ + unsigned int flags; /* splice flags */ + /* + * actor() private data + */ + union { + void __user *userptr; /* memory to write to */ + struct file *file; /* file to read/write */ + void *data; /* cookie */ + } u; + loff_t pos; /* file position */ + loff_t *opos; /* sendfile: output position */ + size_t num_spliced; /* number of bytes already spliced */ + bool need_wakeup; /* need to wake up writer */ +}; + +struct partial_page { + unsigned int offset; + unsigned int len; + unsigned long private; +}; + +/* + * Passed to splice_to_pipe + */ +struct splice_pipe_desc { + struct page **pages; /* page map */ + struct partial_page *partial; /* pages[] may not be contig */ + int nr_pages; /* number of populated pages in map */ + unsigned int nr_pages_max; /* pages[] & partial[] arrays size */ + const struct pipe_buf_operations *ops;/* ops associated with output pipe */ + void (*spd_release)(struct splice_pipe_desc *, unsigned int); +}; + +typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *, + struct splice_desc *); +typedef int (splice_direct_actor)(struct pipe_inode_info *, + struct splice_desc *); + +extern ssize_t splice_from_pipe(struct pipe_inode_info *, struct file *, + loff_t *, size_t, unsigned int, + splice_actor *); +extern ssize_t __splice_from_pipe(struct pipe_inode_info *, + struct splice_desc *, splice_actor *); +extern ssize_t splice_to_pipe(struct pipe_inode_info *, + struct splice_pipe_desc *); +extern ssize_t add_to_pipe(struct pipe_inode_info *, + struct pipe_buffer *); +extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *, + splice_direct_actor *); + +/* + * for dynamic pipe sizing + */ +extern int splice_grow_spd(const struct pipe_inode_info *, struct splice_pipe_desc *); +extern void splice_shrink_spd(struct splice_pipe_desc *); + +extern const struct pipe_buf_operations page_cache_pipe_buf_ops; +extern const struct pipe_buf_operations default_pipe_buf_ops; +#endif diff --git a/include/linux/spmi.h b/include/linux/spmi.h new file mode 100644 index 000000000..1396a255d --- /dev/null +++ b/include/linux/spmi.h @@ -0,0 +1,190 @@ +/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _LINUX_SPMI_H +#define _LINUX_SPMI_H + +#include +#include +#include + +/* Maximum slave identifier */ +#define SPMI_MAX_SLAVE_ID 16 + +/* SPMI Commands */ +#define SPMI_CMD_EXT_WRITE 0x00 +#define SPMI_CMD_RESET 0x10 +#define SPMI_CMD_SLEEP 0x11 +#define SPMI_CMD_SHUTDOWN 0x12 +#define SPMI_CMD_WAKEUP 0x13 +#define SPMI_CMD_AUTHENTICATE 0x14 +#define SPMI_CMD_MSTR_READ 0x15 +#define SPMI_CMD_MSTR_WRITE 0x16 +#define SPMI_CMD_TRANSFER_BUS_OWNERSHIP 0x1A +#define SPMI_CMD_DDB_MASTER_READ 0x1B +#define SPMI_CMD_DDB_SLAVE_READ 0x1C +#define SPMI_CMD_EXT_READ 0x20 +#define SPMI_CMD_EXT_WRITEL 0x30 +#define SPMI_CMD_EXT_READL 0x38 +#define SPMI_CMD_WRITE 0x40 +#define SPMI_CMD_READ 0x60 +#define SPMI_CMD_ZERO_WRITE 0x80 + +/** + * struct spmi_device - Basic representation of an SPMI device + * @dev: Driver model representation of the device. + * @ctrl: SPMI controller managing the bus hosting this device. + * @usid: This devices' Unique Slave IDentifier. + */ +struct spmi_device { + struct device dev; + struct spmi_controller *ctrl; + u8 usid; +}; + +static inline struct spmi_device *to_spmi_device(struct device *d) +{ + return container_of(d, struct spmi_device, dev); +} + +static inline void *spmi_device_get_drvdata(const struct spmi_device *sdev) +{ + return dev_get_drvdata(&sdev->dev); +} + +static inline void spmi_device_set_drvdata(struct spmi_device *sdev, void *data) +{ + dev_set_drvdata(&sdev->dev, data); +} + +struct spmi_device *spmi_device_alloc(struct spmi_controller *ctrl); + +static inline void spmi_device_put(struct spmi_device *sdev) +{ + if (sdev) + put_device(&sdev->dev); +} + +int spmi_device_add(struct spmi_device *sdev); + +void spmi_device_remove(struct spmi_device *sdev); + +/** + * struct spmi_controller - interface to the SPMI master controller + * @dev: Driver model representation of the device. + * @nr: board-specific number identifier for this controller/bus + * @cmd: sends a non-data command sequence on the SPMI bus. + * @read_cmd: sends a register read command sequence on the SPMI bus. + * @write_cmd: sends a register write command sequence on the SPMI bus. + */ +struct spmi_controller { + struct device dev; + unsigned int nr; + int (*cmd)(struct spmi_controller *ctrl, u8 opcode, u8 sid); + int (*read_cmd)(struct spmi_controller *ctrl, u8 opcode, + u8 sid, u16 addr, u8 *buf, size_t len); + int (*write_cmd)(struct spmi_controller *ctrl, u8 opcode, + u8 sid, u16 addr, const u8 *buf, size_t len); +}; + +static inline struct spmi_controller *to_spmi_controller(struct device *d) +{ + return container_of(d, struct spmi_controller, dev); +} + +static inline +void *spmi_controller_get_drvdata(const struct spmi_controller *ctrl) +{ + return dev_get_drvdata(&ctrl->dev); +} + +static inline void spmi_controller_set_drvdata(struct spmi_controller *ctrl, + void *data) +{ + dev_set_drvdata(&ctrl->dev, data); +} + +struct spmi_controller *spmi_controller_alloc(struct device *parent, + size_t size); + +/** + * spmi_controller_put() - decrement controller refcount + * @ctrl SPMI controller. + */ +static inline void spmi_controller_put(struct spmi_controller *ctrl) +{ + if (ctrl) + put_device(&ctrl->dev); +} + +int spmi_controller_add(struct spmi_controller *ctrl); +void spmi_controller_remove(struct spmi_controller *ctrl); + +/** + * struct spmi_driver - SPMI slave device driver + * @driver: SPMI device drivers should initialize name and owner field of + * this structure. + * @probe: binds this driver to a SPMI device. + * @remove: unbinds this driver from the SPMI device. + * + * If PM runtime support is desired for a slave, a device driver can call + * pm_runtime_put() from their probe() routine (and a balancing + * pm_runtime_get() in remove()). PM runtime support for a slave is + * implemented by issuing a SLEEP command to the slave on runtime_suspend(), + * transitioning the slave into the SLEEP state. On runtime_resume(), a WAKEUP + * command is sent to the slave to bring it back to ACTIVE. + */ +struct spmi_driver { + struct device_driver driver; + int (*probe)(struct spmi_device *sdev); + void (*remove)(struct spmi_device *sdev); +}; + +static inline struct spmi_driver *to_spmi_driver(struct device_driver *d) +{ + return container_of(d, struct spmi_driver, driver); +} + +#define spmi_driver_register(sdrv) \ + __spmi_driver_register(sdrv, THIS_MODULE) +int __spmi_driver_register(struct spmi_driver *sdrv, struct module *owner); + +/** + * spmi_driver_unregister() - unregister an SPMI client driver + * @sdrv: the driver to unregister + */ +static inline void spmi_driver_unregister(struct spmi_driver *sdrv) +{ + if (sdrv) + driver_unregister(&sdrv->driver); +} + +#define module_spmi_driver(__spmi_driver) \ + module_driver(__spmi_driver, spmi_driver_register, \ + spmi_driver_unregister) + +int spmi_register_read(struct spmi_device *sdev, u8 addr, u8 *buf); +int spmi_ext_register_read(struct spmi_device *sdev, u8 addr, u8 *buf, + size_t len); +int spmi_ext_register_readl(struct spmi_device *sdev, u16 addr, u8 *buf, + size_t len); +int spmi_register_write(struct spmi_device *sdev, u8 addr, u8 data); +int spmi_register_zero_write(struct spmi_device *sdev, u8 data); +int spmi_ext_register_write(struct spmi_device *sdev, u8 addr, + const u8 *buf, size_t len); +int spmi_ext_register_writel(struct spmi_device *sdev, u16 addr, + const u8 *buf, size_t len); +int spmi_command_reset(struct spmi_device *sdev); +int spmi_command_sleep(struct spmi_device *sdev); +int spmi_command_wakeup(struct spmi_device *sdev); +int spmi_command_shutdown(struct spmi_device *sdev); + +#endif diff --git a/include/linux/sram.h b/include/linux/sram.h new file mode 100644 index 000000000..4fb405fb0 --- /dev/null +++ b/include/linux/sram.h @@ -0,0 +1,27 @@ +/* + * Generic SRAM Driver Interface + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __LINUX_SRAM_H__ +#define __LINUX_SRAM_H__ + +struct gen_pool; + +#ifdef CONFIG_SRAM_EXEC +void *sram_exec_copy(struct gen_pool *pool, void *dst, void *src, size_t size); +#else +static inline void *sram_exec_copy(struct gen_pool *pool, void *dst, void *src, + size_t size) +{ + return NULL; +} +#endif /* CONFIG_SRAM_EXEC */ +#endif /* __LINUX_SRAM_H__ */ diff --git a/include/linux/srcu.h b/include/linux/srcu.h new file mode 100644 index 000000000..67135d4a8 --- /dev/null +++ b/include/linux/srcu.h @@ -0,0 +1,248 @@ +/* + * Sleepable Read-Copy Update mechanism for mutual exclusion + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + * Copyright (C) IBM Corporation, 2006 + * Copyright (C) Fujitsu, 2012 + * + * Author: Paul McKenney + * Lai Jiangshan + * + * For detailed explanation of Read-Copy Update mechanism see - + * Documentation/RCU/ *.txt + * + */ + +#ifndef _LINUX_SRCU_H +#define _LINUX_SRCU_H + +#include +#include +#include +#include + +struct srcu_struct; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + +int __init_srcu_struct(struct srcu_struct *sp, const char *name, + struct lock_class_key *key); + +#define init_srcu_struct(sp) \ +({ \ + static struct lock_class_key __srcu_key; \ + \ + __init_srcu_struct((sp), #sp, &__srcu_key); \ +}) + +#define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name }, +#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +int init_srcu_struct(struct srcu_struct *sp); + +#define __SRCU_DEP_MAP_INIT(srcu_name) +#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +#ifdef CONFIG_TINY_SRCU +#include +#elif defined(CONFIG_TREE_SRCU) +#include +#elif defined(CONFIG_SRCU) +#error "Unknown SRCU implementation specified to kernel configuration" +#else +/* Dummy definition for things like notifiers. Actual use gets link error. */ +struct srcu_struct { }; +#endif + +void call_srcu(struct srcu_struct *sp, struct rcu_head *head, + void (*func)(struct rcu_head *head)); +void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced); +int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp); +void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); +void synchronize_srcu(struct srcu_struct *sp); + +/** + * cleanup_srcu_struct - deconstruct a sleep-RCU structure + * @sp: structure to clean up. + * + * Must invoke this after you are finished using a given srcu_struct that + * was initialized via init_srcu_struct(), else you leak memory. + */ +static inline void cleanup_srcu_struct(struct srcu_struct *sp) +{ + _cleanup_srcu_struct(sp, false); +} + +/** + * cleanup_srcu_struct_quiesced - deconstruct a quiesced sleep-RCU structure + * @sp: structure to clean up. + * + * Must invoke this after you are finished using a given srcu_struct that + * was initialized via init_srcu_struct(), else you leak memory. Also, + * all grace-period processing must have completed. + * + * "Completed" means that the last synchronize_srcu() and + * synchronize_srcu_expedited() calls must have returned before the call + * to cleanup_srcu_struct_quiesced(). It also means that the callback + * from the last call_srcu() must have been invoked before the call to + * cleanup_srcu_struct_quiesced(), but you can use srcu_barrier() to help + * with this last. Violating these rules will get you a WARN_ON() splat + * (with high probability, anyway), and will also cause the srcu_struct + * to be leaked. + */ +static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *sp) +{ + _cleanup_srcu_struct(sp, true); +} + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + +/** + * srcu_read_lock_held - might we be in SRCU read-side critical section? + * @sp: The srcu_struct structure to check + * + * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU + * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, + * this assumes we are in an SRCU read-side critical section unless it can + * prove otherwise. + * + * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot + * and while lockdep is disabled. + * + * Note that SRCU is based on its own statemachine and it doesn't + * relies on normal RCU, it can be called from the CPU which + * is in the idle loop from an RCU point of view or offline. + */ +static inline int srcu_read_lock_held(const struct srcu_struct *sp) +{ + if (!debug_lockdep_rcu_enabled()) + return 1; + return lock_is_held(&sp->dep_map); +} + +#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +static inline int srcu_read_lock_held(const struct srcu_struct *sp) +{ + return 1; +} + +#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +/** + * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing + * @p: the pointer to fetch and protect for later dereferencing + * @sp: pointer to the srcu_struct, which is used to check that we + * really are in an SRCU read-side critical section. + * @c: condition to check for update-side use + * + * If PROVE_RCU is enabled, invoking this outside of an RCU read-side + * critical section will result in an RCU-lockdep splat, unless @c evaluates + * to 1. The @c argument will normally be a logical expression containing + * lockdep_is_held() calls. + */ +#define srcu_dereference_check(p, sp, c) \ + __rcu_dereference_check((p), (c) || srcu_read_lock_held(sp), __rcu) + +/** + * srcu_dereference - fetch SRCU-protected pointer for later dereferencing + * @p: the pointer to fetch and protect for later dereferencing + * @sp: pointer to the srcu_struct, which is used to check that we + * really are in an SRCU read-side critical section. + * + * Makes rcu_dereference_check() do the dirty work. If PROVE_RCU + * is enabled, invoking this outside of an RCU read-side critical + * section will result in an RCU-lockdep splat. + */ +#define srcu_dereference(p, sp) srcu_dereference_check((p), (sp), 0) + +/** + * srcu_dereference_notrace - no tracing and no lockdep calls from here + */ +#define srcu_dereference_notrace(p, sp) srcu_dereference_check((p), (sp), 1) + +/** + * srcu_read_lock - register a new reader for an SRCU-protected structure. + * @sp: srcu_struct in which to register the new reader. + * + * Enter an SRCU read-side critical section. Note that SRCU read-side + * critical sections may be nested. However, it is illegal to + * call anything that waits on an SRCU grace period for the same + * srcu_struct, whether directly or indirectly. Please note that + * one way to indirectly wait on an SRCU grace period is to acquire + * a mutex that is held elsewhere while calling synchronize_srcu() or + * synchronize_srcu_expedited(). + * + * Note that srcu_read_lock() and the matching srcu_read_unlock() must + * occur in the same context, for example, it is illegal to invoke + * srcu_read_unlock() in an irq handler if the matching srcu_read_lock() + * was invoked in process context. + */ +static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) +{ + int retval; + + retval = __srcu_read_lock(sp); + rcu_lock_acquire(&(sp)->dep_map); + return retval; +} + +/* Used by tracing, cannot be traced and cannot invoke lockdep. */ +static inline notrace int +srcu_read_lock_notrace(struct srcu_struct *sp) __acquires(sp) +{ + int retval; + + retval = __srcu_read_lock(sp); + return retval; +} + +/** + * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. + * @sp: srcu_struct in which to unregister the old reader. + * @idx: return value from corresponding srcu_read_lock(). + * + * Exit an SRCU read-side critical section. + */ +static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) + __releases(sp) +{ + rcu_lock_release(&(sp)->dep_map); + __srcu_read_unlock(sp, idx); +} + +/* Used by tracing, cannot be traced and cannot call lockdep. */ +static inline notrace void +srcu_read_unlock_notrace(struct srcu_struct *sp, int idx) __releases(sp) +{ + __srcu_read_unlock(sp, idx); +} + +/** + * smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock + * + * Converts the preceding srcu_read_unlock into a two-way memory barrier. + * + * Call this after srcu_read_unlock, to guarantee that all memory operations + * that occur after smp_mb__after_srcu_read_unlock will appear to happen after + * the preceding srcu_read_unlock. + */ +static inline void smp_mb__after_srcu_read_unlock(void) +{ + /* __srcu_read_unlock has smp_mb() internally so nothing to do here. */ +} + +#endif diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h new file mode 100644 index 000000000..f41d2fb09 --- /dev/null +++ b/include/linux/srcutiny.h @@ -0,0 +1,103 @@ +/* + * Sleepable Read-Copy Update mechanism for mutual exclusion, + * tiny variant. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + * Copyright (C) IBM Corporation, 2017 + * + * Author: Paul McKenney + */ + +#ifndef _LINUX_SRCU_TINY_H +#define _LINUX_SRCU_TINY_H + +#include + +struct srcu_struct { + short srcu_lock_nesting[2]; /* srcu_read_lock() nesting depth. */ + short srcu_idx; /* Current reader array element. */ + u8 srcu_gp_running; /* GP workqueue running? */ + u8 srcu_gp_waiting; /* GP waiting for readers? */ + struct swait_queue_head srcu_wq; + /* Last srcu_read_unlock() wakes GP. */ + struct rcu_head *srcu_cb_head; /* Pending callbacks: Head. */ + struct rcu_head **srcu_cb_tail; /* Pending callbacks: Tail. */ + struct work_struct srcu_work; /* For driving grace periods. */ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ +}; + +void srcu_drive_gp(struct work_struct *wp); + +#define __SRCU_STRUCT_INIT(name, __ignored) \ +{ \ + .srcu_wq = __SWAIT_QUEUE_HEAD_INITIALIZER(name.srcu_wq), \ + .srcu_cb_tail = &name.srcu_cb_head, \ + .srcu_work = __WORK_INITIALIZER(name.srcu_work, srcu_drive_gp), \ + __SRCU_DEP_MAP_INIT(name) \ +} + +/* + * This odd _STATIC_ arrangement is needed for API compatibility with + * Tree SRCU, which needs some per-CPU data. + */ +#define DEFINE_SRCU(name) \ + struct srcu_struct name = __SRCU_STRUCT_INIT(name, name) +#define DEFINE_STATIC_SRCU(name) \ + static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name) + +void synchronize_srcu(struct srcu_struct *sp); + +/* + * Counts the new reader in the appropriate per-CPU element of the + * srcu_struct. Can be invoked from irq/bh handlers, but the matching + * __srcu_read_unlock() must be in the same handler instance. Returns an + * index that must be passed to the matching srcu_read_unlock(). + */ +static inline int __srcu_read_lock(struct srcu_struct *sp) +{ + int idx; + + idx = READ_ONCE(sp->srcu_idx); + WRITE_ONCE(sp->srcu_lock_nesting[idx], sp->srcu_lock_nesting[idx] + 1); + return idx; +} + +static inline void synchronize_srcu_expedited(struct srcu_struct *sp) +{ + synchronize_srcu(sp); +} + +static inline void srcu_barrier(struct srcu_struct *sp) +{ + synchronize_srcu(sp); +} + +/* Defined here to avoid size increase for non-torture kernels. */ +static inline void srcu_torture_stats_print(struct srcu_struct *sp, + char *tt, char *tf) +{ + int idx; + + idx = READ_ONCE(sp->srcu_idx) & 0x1; + pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n", + tt, tf, idx, + READ_ONCE(sp->srcu_lock_nesting[!idx]), + READ_ONCE(sp->srcu_lock_nesting[idx])); +} + +#endif diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h new file mode 100644 index 000000000..745d4ca4d --- /dev/null +++ b/include/linux/srcutree.h @@ -0,0 +1,144 @@ +/* + * Sleepable Read-Copy Update mechanism for mutual exclusion, + * tree variant. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + * Copyright (C) IBM Corporation, 2017 + * + * Author: Paul McKenney + */ + +#ifndef _LINUX_SRCU_TREE_H +#define _LINUX_SRCU_TREE_H + +#include +#include + +struct srcu_node; +struct srcu_struct; + +/* + * Per-CPU structure feeding into leaf srcu_node, similar in function + * to rcu_node. + */ +struct srcu_data { + /* Read-side state. */ + unsigned long srcu_lock_count[2]; /* Locks per CPU. */ + unsigned long srcu_unlock_count[2]; /* Unlocks per CPU. */ + + /* Update-side state. */ + spinlock_t __private lock ____cacheline_internodealigned_in_smp; + struct rcu_segcblist srcu_cblist; /* List of callbacks.*/ + unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */ + unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */ + bool srcu_cblist_invoking; /* Invoking these CBs? */ + struct delayed_work work; /* Context for CB invoking. */ + struct rcu_head srcu_barrier_head; /* For srcu_barrier() use. */ + struct srcu_node *mynode; /* Leaf srcu_node. */ + unsigned long grpmask; /* Mask for leaf srcu_node */ + /* ->srcu_data_have_cbs[]. */ + int cpu; + struct srcu_struct *sp; +}; + +/* + * Node in SRCU combining tree, similar in function to rcu_data. + */ +struct srcu_node { + spinlock_t __private lock; + unsigned long srcu_have_cbs[4]; /* GP seq for children */ + /* having CBs, but only */ + /* is > ->srcu_gq_seq. */ + unsigned long srcu_data_have_cbs[4]; /* Which srcu_data structs */ + /* have CBs for given GP? */ + unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */ + struct srcu_node *srcu_parent; /* Next up in tree. */ + int grplo; /* Least CPU for node. */ + int grphi; /* Biggest CPU for node. */ +}; + +/* + * Per-SRCU-domain structure, similar in function to rcu_state. + */ +struct srcu_struct { + struct srcu_node node[NUM_RCU_NODES]; /* Combining tree. */ + struct srcu_node *level[RCU_NUM_LVLS + 1]; + /* First node at each level. */ + struct mutex srcu_cb_mutex; /* Serialize CB preparation. */ + spinlock_t __private lock; /* Protect counters */ + struct mutex srcu_gp_mutex; /* Serialize GP work. */ + unsigned int srcu_idx; /* Current rdr array element. */ + unsigned long srcu_gp_seq; /* Grace-period seq #. */ + unsigned long srcu_gp_seq_needed; /* Latest gp_seq needed. */ + unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */ + unsigned long srcu_last_gp_end; /* Last GP end timestamp (ns) */ + struct srcu_data __percpu *sda; /* Per-CPU srcu_data array. */ + unsigned long srcu_barrier_seq; /* srcu_barrier seq #. */ + struct mutex srcu_barrier_mutex; /* Serialize barrier ops. */ + struct completion srcu_barrier_completion; + /* Awaken barrier rq at end. */ + atomic_t srcu_barrier_cpu_cnt; /* # CPUs not yet posting a */ + /* callback for the barrier */ + /* operation. */ + struct delayed_work work; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ +}; + +/* Values for state variable (bottom bits of ->srcu_gp_seq). */ +#define SRCU_STATE_IDLE 0 +#define SRCU_STATE_SCAN1 1 +#define SRCU_STATE_SCAN2 2 + +#define __SRCU_STRUCT_INIT(name, pcpu_name) \ + { \ + .sda = &pcpu_name, \ + .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ + .srcu_gp_seq_needed = 0 - 1, \ + __SRCU_DEP_MAP_INIT(name) \ + } + +/* + * Define and initialize a srcu struct at build time. + * Do -not- call init_srcu_struct() nor cleanup_srcu_struct() on it. + * + * Note that although DEFINE_STATIC_SRCU() hides the name from other + * files, the per-CPU variable rules nevertheless require that the + * chosen name be globally unique. These rules also prohibit use of + * DEFINE_STATIC_SRCU() within a function. If these rules are too + * restrictive, declare the srcu_struct manually. For example, in + * each file: + * + * static struct srcu_struct my_srcu; + * + * Then, before the first use of each my_srcu, manually initialize it: + * + * init_srcu_struct(&my_srcu); + * + * See include/linux/percpu-defs.h for the rules on per-CPU variables. + */ +#define __DEFINE_SRCU(name, is_static) \ + static DEFINE_PER_CPU(struct srcu_data, name##_srcu_data);\ + is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name##_srcu_data) +#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */) +#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static) + +void synchronize_srcu_expedited(struct srcu_struct *sp); +void srcu_barrier(struct srcu_struct *sp); +void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf); + +#endif diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h new file mode 100644 index 000000000..0d5a2691e --- /dev/null +++ b/include/linux/ssb/ssb.h @@ -0,0 +1,682 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_SSB_H_ +#define LINUX_SSB_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + + +struct pcmcia_device; +struct ssb_bus; +struct ssb_driver; + +struct ssb_sprom_core_pwr_info { + u8 itssi_2g, itssi_5g; + u8 maxpwr_2g, maxpwr_5gl, maxpwr_5g, maxpwr_5gh; + u16 pa_2g[4], pa_5gl[4], pa_5g[4], pa_5gh[4]; +}; + +struct ssb_sprom { + u8 revision; + u8 il0mac[6] __aligned(sizeof(u16)); /* MAC address for 802.11b/g */ + u8 et0mac[6] __aligned(sizeof(u16)); /* MAC address for Ethernet */ + u8 et1mac[6] __aligned(sizeof(u16)); /* MAC address for 802.11a */ + u8 et2mac[6] __aligned(sizeof(u16)); /* MAC address for extra Ethernet */ + u8 et0phyaddr; /* MII address for enet0 */ + u8 et1phyaddr; /* MII address for enet1 */ + u8 et2phyaddr; /* MII address for enet2 */ + u8 et0mdcport; /* MDIO for enet0 */ + u8 et1mdcport; /* MDIO for enet1 */ + u8 et2mdcport; /* MDIO for enet2 */ + u16 dev_id; /* Device ID overriding e.g. PCI ID */ + u16 board_rev; /* Board revision number from SPROM. */ + u16 board_num; /* Board number from SPROM. */ + u16 board_type; /* Board type from SPROM. */ + u8 country_code; /* Country Code */ + char alpha2[2]; /* Country Code as two chars like EU or US */ + u8 leddc_on_time; /* LED Powersave Duty Cycle On Count */ + u8 leddc_off_time; /* LED Powersave Duty Cycle Off Count */ + u8 ant_available_a; /* 2GHz antenna available bits (up to 4) */ + u8 ant_available_bg; /* 5GHz antenna available bits (up to 4) */ + u16 pa0b0; + u16 pa0b1; + u16 pa0b2; + u16 pa1b0; + u16 pa1b1; + u16 pa1b2; + u16 pa1lob0; + u16 pa1lob1; + u16 pa1lob2; + u16 pa1hib0; + u16 pa1hib1; + u16 pa1hib2; + u8 gpio0; /* GPIO pin 0 */ + u8 gpio1; /* GPIO pin 1 */ + u8 gpio2; /* GPIO pin 2 */ + u8 gpio3; /* GPIO pin 3 */ + u8 maxpwr_bg; /* 2.4GHz Amplifier Max Power (in dBm Q5.2) */ + u8 maxpwr_al; /* 5.2GHz Amplifier Max Power (in dBm Q5.2) */ + u8 maxpwr_a; /* 5.3GHz Amplifier Max Power (in dBm Q5.2) */ + u8 maxpwr_ah; /* 5.8GHz Amplifier Max Power (in dBm Q5.2) */ + u8 itssi_a; /* Idle TSSI Target for A-PHY */ + u8 itssi_bg; /* Idle TSSI Target for B/G-PHY */ + u8 tri2g; /* 2.4GHz TX isolation */ + u8 tri5gl; /* 5.2GHz TX isolation */ + u8 tri5g; /* 5.3GHz TX isolation */ + u8 tri5gh; /* 5.8GHz TX isolation */ + u8 txpid2g[4]; /* 2GHz TX power index */ + u8 txpid5gl[4]; /* 4.9 - 5.1GHz TX power index */ + u8 txpid5g[4]; /* 5.1 - 5.5GHz TX power index */ + u8 txpid5gh[4]; /* 5.5 - ...GHz TX power index */ + s8 rxpo2g; /* 2GHz RX power offset */ + s8 rxpo5g; /* 5GHz RX power offset */ + u8 rssisav2g; /* 2GHz RSSI params */ + u8 rssismc2g; + u8 rssismf2g; + u8 bxa2g; /* 2GHz BX arch */ + u8 rssisav5g; /* 5GHz RSSI params */ + u8 rssismc5g; + u8 rssismf5g; + u8 bxa5g; /* 5GHz BX arch */ + u16 cck2gpo; /* CCK power offset */ + u32 ofdm2gpo; /* 2.4GHz OFDM power offset */ + u32 ofdm5glpo; /* 5.2GHz OFDM power offset */ + u32 ofdm5gpo; /* 5.3GHz OFDM power offset */ + u32 ofdm5ghpo; /* 5.8GHz OFDM power offset */ + u32 boardflags; + u32 boardflags2; + u32 boardflags3; + /* TODO: Switch all drivers to new u32 fields and drop below ones */ + u16 boardflags_lo; /* Board flags (bits 0-15) */ + u16 boardflags_hi; /* Board flags (bits 16-31) */ + u16 boardflags2_lo; /* Board flags (bits 32-47) */ + u16 boardflags2_hi; /* Board flags (bits 48-63) */ + + struct ssb_sprom_core_pwr_info core_pwr_info[4]; + + /* Antenna gain values for up to 4 antennas + * on each band. Values in dBm/4 (Q5.2). Negative gain means the + * loss in the connectors is bigger than the gain. */ + struct { + s8 a0, a1, a2, a3; + } antenna_gain; + + struct { + struct { + u8 tssipos, extpa_gain, pdet_range, tr_iso, antswlut; + } ghz2; + struct { + u8 tssipos, extpa_gain, pdet_range, tr_iso, antswlut; + } ghz5; + } fem; + + u16 mcs2gpo[8]; + u16 mcs5gpo[8]; + u16 mcs5glpo[8]; + u16 mcs5ghpo[8]; + u8 opo; + + u8 rxgainerr2ga[3]; + u8 rxgainerr5gla[3]; + u8 rxgainerr5gma[3]; + u8 rxgainerr5gha[3]; + u8 rxgainerr5gua[3]; + + u8 noiselvl2ga[3]; + u8 noiselvl5gla[3]; + u8 noiselvl5gma[3]; + u8 noiselvl5gha[3]; + u8 noiselvl5gua[3]; + + u8 regrev; + u8 txchain; + u8 rxchain; + u8 antswitch; + u16 cddpo; + u16 stbcpo; + u16 bw40po; + u16 bwduppo; + + u8 tempthresh; + u8 tempoffset; + u16 rawtempsense; + u8 measpower; + u8 tempsense_slope; + u8 tempcorrx; + u8 tempsense_option; + u8 freqoffset_corr; + u8 iqcal_swp_dis; + u8 hw_iqcal_en; + u8 elna2g; + u8 elna5g; + u8 phycal_tempdelta; + u8 temps_period; + u8 temps_hysteresis; + u8 measpower1; + u8 measpower2; + u8 pcieingress_war; + + /* power per rate from sromrev 9 */ + u16 cckbw202gpo; + u16 cckbw20ul2gpo; + u32 legofdmbw202gpo; + u32 legofdmbw20ul2gpo; + u32 legofdmbw205glpo; + u32 legofdmbw20ul5glpo; + u32 legofdmbw205gmpo; + u32 legofdmbw20ul5gmpo; + u32 legofdmbw205ghpo; + u32 legofdmbw20ul5ghpo; + u32 mcsbw202gpo; + u32 mcsbw20ul2gpo; + u32 mcsbw402gpo; + u32 mcsbw205glpo; + u32 mcsbw20ul5glpo; + u32 mcsbw405glpo; + u32 mcsbw205gmpo; + u32 mcsbw20ul5gmpo; + u32 mcsbw405gmpo; + u32 mcsbw205ghpo; + u32 mcsbw20ul5ghpo; + u32 mcsbw405ghpo; + u16 mcs32po; + u16 legofdm40duppo; + u8 sar2g; + u8 sar5g; +}; + +/* Information about the PCB the circuitry is soldered on. */ +struct ssb_boardinfo { + u16 vendor; + u16 type; +}; + + +struct ssb_device; +/* Lowlevel read/write operations on the device MMIO. + * Internal, don't use that outside of ssb. */ +struct ssb_bus_ops { + u8 (*read8)(struct ssb_device *dev, u16 offset); + u16 (*read16)(struct ssb_device *dev, u16 offset); + u32 (*read32)(struct ssb_device *dev, u16 offset); + void (*write8)(struct ssb_device *dev, u16 offset, u8 value); + void (*write16)(struct ssb_device *dev, u16 offset, u16 value); + void (*write32)(struct ssb_device *dev, u16 offset, u32 value); +#ifdef CONFIG_SSB_BLOCKIO + void (*block_read)(struct ssb_device *dev, void *buffer, + size_t count, u16 offset, u8 reg_width); + void (*block_write)(struct ssb_device *dev, const void *buffer, + size_t count, u16 offset, u8 reg_width); +#endif +}; + + +/* Core-ID values. */ +#define SSB_DEV_CHIPCOMMON 0x800 +#define SSB_DEV_ILINE20 0x801 +#define SSB_DEV_SDRAM 0x803 +#define SSB_DEV_PCI 0x804 +#define SSB_DEV_MIPS 0x805 +#define SSB_DEV_ETHERNET 0x806 +#define SSB_DEV_V90 0x807 +#define SSB_DEV_USB11_HOSTDEV 0x808 +#define SSB_DEV_ADSL 0x809 +#define SSB_DEV_ILINE100 0x80A +#define SSB_DEV_IPSEC 0x80B +#define SSB_DEV_PCMCIA 0x80D +#define SSB_DEV_INTERNAL_MEM 0x80E +#define SSB_DEV_MEMC_SDRAM 0x80F +#define SSB_DEV_EXTIF 0x811 +#define SSB_DEV_80211 0x812 +#define SSB_DEV_MIPS_3302 0x816 +#define SSB_DEV_USB11_HOST 0x817 +#define SSB_DEV_USB11_DEV 0x818 +#define SSB_DEV_USB20_HOST 0x819 +#define SSB_DEV_USB20_DEV 0x81A +#define SSB_DEV_SDIO_HOST 0x81B +#define SSB_DEV_ROBOSWITCH 0x81C +#define SSB_DEV_PARA_ATA 0x81D +#define SSB_DEV_SATA_XORDMA 0x81E +#define SSB_DEV_ETHERNET_GBIT 0x81F +#define SSB_DEV_PCIE 0x820 +#define SSB_DEV_MIMO_PHY 0x821 +#define SSB_DEV_SRAM_CTRLR 0x822 +#define SSB_DEV_MINI_MACPHY 0x823 +#define SSB_DEV_ARM_1176 0x824 +#define SSB_DEV_ARM_7TDMI 0x825 +#define SSB_DEV_ARM_CM3 0x82A + +/* Vendor-ID values */ +#define SSB_VENDOR_BROADCOM 0x4243 + +/* Some kernel subsystems poke with dev->drvdata, so we must use the + * following ugly workaround to get from struct device to struct ssb_device */ +struct __ssb_dev_wrapper { + struct device dev; + struct ssb_device *sdev; +}; + +struct ssb_device { + /* Having a copy of the ops pointer in each dev struct + * is an optimization. */ + const struct ssb_bus_ops *ops; + + struct device *dev, *dma_dev; + + struct ssb_bus *bus; + struct ssb_device_id id; + + u8 core_index; + unsigned int irq; + + /* Internal-only stuff follows. */ + void *drvdata; /* Per-device data */ + void *devtypedata; /* Per-devicetype (eg 802.11) data */ +}; + +/* Go from struct device to struct ssb_device. */ +static inline +struct ssb_device * dev_to_ssb_dev(struct device *dev) +{ + struct __ssb_dev_wrapper *wrap; + wrap = container_of(dev, struct __ssb_dev_wrapper, dev); + return wrap->sdev; +} + +/* Device specific user data */ +static inline +void ssb_set_drvdata(struct ssb_device *dev, void *data) +{ + dev->drvdata = data; +} +static inline +void * ssb_get_drvdata(struct ssb_device *dev) +{ + return dev->drvdata; +} + +/* Devicetype specific user data. This is per device-type (not per device) */ +void ssb_set_devtypedata(struct ssb_device *dev, void *data); +static inline +void * ssb_get_devtypedata(struct ssb_device *dev) +{ + return dev->devtypedata; +} + + +struct ssb_driver { + const char *name; + const struct ssb_device_id *id_table; + + int (*probe)(struct ssb_device *dev, const struct ssb_device_id *id); + void (*remove)(struct ssb_device *dev); + int (*suspend)(struct ssb_device *dev, pm_message_t state); + int (*resume)(struct ssb_device *dev); + void (*shutdown)(struct ssb_device *dev); + + struct device_driver drv; +}; +#define drv_to_ssb_drv(_drv) container_of(_drv, struct ssb_driver, drv) + +extern int __ssb_driver_register(struct ssb_driver *drv, struct module *owner); +#define ssb_driver_register(drv) \ + __ssb_driver_register(drv, THIS_MODULE) + +extern void ssb_driver_unregister(struct ssb_driver *drv); + + + + +enum ssb_bustype { + SSB_BUSTYPE_SSB, /* This SSB bus is the system bus */ + SSB_BUSTYPE_PCI, /* SSB is connected to PCI bus */ + SSB_BUSTYPE_PCMCIA, /* SSB is connected to PCMCIA bus */ + SSB_BUSTYPE_SDIO, /* SSB is connected to SDIO bus */ +}; + +/* board_vendor */ +#define SSB_BOARDVENDOR_BCM 0x14E4 /* Broadcom */ +#define SSB_BOARDVENDOR_DELL 0x1028 /* Dell */ +#define SSB_BOARDVENDOR_HP 0x0E11 /* HP */ +/* board_type */ +#define SSB_BOARD_BCM94301CB 0x0406 +#define SSB_BOARD_BCM94301MP 0x0407 +#define SSB_BOARD_BU4309 0x040A +#define SSB_BOARD_BCM94309CB 0x040B +#define SSB_BOARD_BCM4309MP 0x040C +#define SSB_BOARD_BU4306 0x0416 +#define SSB_BOARD_BCM94306MP 0x0418 +#define SSB_BOARD_BCM4309G 0x0421 +#define SSB_BOARD_BCM4306CB 0x0417 +#define SSB_BOARD_BCM94306PC 0x0425 /* pcmcia 3.3v 4306 card */ +#define SSB_BOARD_BCM94306CBSG 0x042B /* with SiGe PA */ +#define SSB_BOARD_PCSG94306 0x042D /* with SiGe PA */ +#define SSB_BOARD_BU4704SD 0x042E /* with sdram */ +#define SSB_BOARD_BCM94704AGR 0x042F /* dual 11a/11g Router */ +#define SSB_BOARD_BCM94308MP 0x0430 /* 11a-only minipci */ +#define SSB_BOARD_BU4318 0x0447 +#define SSB_BOARD_CB4318 0x0448 +#define SSB_BOARD_MPG4318 0x0449 +#define SSB_BOARD_MP4318 0x044A +#define SSB_BOARD_SD4318 0x044B +#define SSB_BOARD_BCM94306P 0x044C /* with SiGe */ +#define SSB_BOARD_BCM94303MP 0x044E +#define SSB_BOARD_BCM94306MPM 0x0450 +#define SSB_BOARD_BCM94306MPL 0x0453 +#define SSB_BOARD_PC4303 0x0454 /* pcmcia */ +#define SSB_BOARD_BCM94306MPLNA 0x0457 +#define SSB_BOARD_BCM94306MPH 0x045B +#define SSB_BOARD_BCM94306PCIV 0x045C +#define SSB_BOARD_BCM94318MPGH 0x0463 +#define SSB_BOARD_BU4311 0x0464 +#define SSB_BOARD_BCM94311MC 0x0465 +#define SSB_BOARD_BCM94311MCAG 0x0466 +/* 4321 boards */ +#define SSB_BOARD_BU4321 0x046B +#define SSB_BOARD_BU4321E 0x047C +#define SSB_BOARD_MP4321 0x046C +#define SSB_BOARD_CB2_4321 0x046D +#define SSB_BOARD_CB2_4321_AG 0x0066 +#define SSB_BOARD_MC4321 0x046E +/* 4325 boards */ +#define SSB_BOARD_BCM94325DEVBU 0x0490 +#define SSB_BOARD_BCM94325BGABU 0x0491 +#define SSB_BOARD_BCM94325SDGWB 0x0492 +#define SSB_BOARD_BCM94325SDGMDL 0x04AA +#define SSB_BOARD_BCM94325SDGMDL2 0x04C6 +#define SSB_BOARD_BCM94325SDGMDL3 0x04C9 +#define SSB_BOARD_BCM94325SDABGWBA 0x04E1 +/* 4322 boards */ +#define SSB_BOARD_BCM94322MC 0x04A4 +#define SSB_BOARD_BCM94322USB 0x04A8 /* dualband */ +#define SSB_BOARD_BCM94322HM 0x04B0 +#define SSB_BOARD_BCM94322USB2D 0x04Bf /* single band discrete front end */ +/* 4312 boards */ +#define SSB_BOARD_BU4312 0x048A +#define SSB_BOARD_BCM4312MCGSG 0x04B5 +/* chip_package */ +#define SSB_CHIPPACK_BCM4712S 1 /* Small 200pin 4712 */ +#define SSB_CHIPPACK_BCM4712M 2 /* Medium 225pin 4712 */ +#define SSB_CHIPPACK_BCM4712L 0 /* Large 340pin 4712 */ + +#include +#include +#include +#include + +struct ssb_bus { + /* The MMIO area. */ + void __iomem *mmio; + + const struct ssb_bus_ops *ops; + + /* The core currently mapped into the MMIO window. + * Not valid on all host-buses. So don't use outside of SSB. */ + struct ssb_device *mapped_device; + union { + /* Currently mapped PCMCIA segment. (bustype == SSB_BUSTYPE_PCMCIA only) */ + u8 mapped_pcmcia_seg; + /* Current SSB base address window for SDIO. */ + u32 sdio_sbaddr; + }; + /* Lock for core and segment switching. + * On PCMCIA-host busses this is used to protect the whole MMIO access. */ + spinlock_t bar_lock; + + /* The host-bus this backplane is running on. */ + enum ssb_bustype bustype; + /* Pointers to the host-bus. Check bustype before using any of these pointers. */ + union { + /* Pointer to the PCI bus (only valid if bustype == SSB_BUSTYPE_PCI). */ + struct pci_dev *host_pci; + /* Pointer to the PCMCIA device (only if bustype == SSB_BUSTYPE_PCMCIA). */ + struct pcmcia_device *host_pcmcia; + /* Pointer to the SDIO device (only if bustype == SSB_BUSTYPE_SDIO). */ + struct sdio_func *host_sdio; + }; + + /* See enum ssb_quirks */ + unsigned int quirks; + +#ifdef CONFIG_SSB_SPROM + /* Mutex to protect the SPROM writing. */ + struct mutex sprom_mutex; +#endif + + /* ID information about the Chip. */ + u16 chip_id; + u8 chip_rev; + u16 sprom_offset; + u16 sprom_size; /* number of words in sprom */ + u8 chip_package; + + /* List of devices (cores) on the backplane. */ + struct ssb_device devices[SSB_MAX_NR_CORES]; + u8 nr_devices; + + /* Software ID number for this bus. */ + unsigned int busnumber; + + /* The ChipCommon device (if available). */ + struct ssb_chipcommon chipco; + /* The PCI-core device (if available). */ + struct ssb_pcicore pcicore; + /* The MIPS-core device (if available). */ + struct ssb_mipscore mipscore; + /* The EXTif-core device (if available). */ + struct ssb_extif extif; + + /* The following structure elements are not available in early + * SSB initialization. Though, they are available for regular + * registered drivers at any stage. So be careful when + * using them in the ssb core code. */ + + /* ID information about the PCB. */ + struct ssb_boardinfo boardinfo; + /* Contents of the SPROM. */ + struct ssb_sprom sprom; + /* If the board has a cardbus slot, this is set to true. */ + bool has_cardbus_slot; + +#ifdef CONFIG_SSB_EMBEDDED + /* Lock for GPIO register access. */ + spinlock_t gpio_lock; + struct platform_device *watchdog; +#endif /* EMBEDDED */ +#ifdef CONFIG_SSB_DRIVER_GPIO + struct gpio_chip gpio; + struct irq_domain *irq_domain; +#endif /* DRIVER_GPIO */ + + /* Internal-only stuff follows. Do not touch. */ + struct list_head list; + /* Is the bus already powered up? */ + bool powered_up; + int power_warn_count; +}; + +enum ssb_quirks { + /* SDIO connected card requires performing a read after writing a 32-bit value */ + SSB_QUIRK_SDIO_READ_AFTER_WRITE32 = (1 << 0), +}; + +/* The initialization-invariants. */ +struct ssb_init_invariants { + /* Versioning information about the PCB. */ + struct ssb_boardinfo boardinfo; + /* The SPROM information. That's either stored in an + * EEPROM or NVRAM on the board. */ + struct ssb_sprom sprom; + /* If the board has a cardbus slot, this is set to true. */ + bool has_cardbus_slot; +}; +/* Type of function to fetch the invariants. */ +typedef int (*ssb_invariants_func_t)(struct ssb_bus *bus, + struct ssb_init_invariants *iv); + +/* Register SoC bus. */ +extern int ssb_bus_host_soc_register(struct ssb_bus *bus, + unsigned long baseaddr); +#ifdef CONFIG_SSB_PCIHOST +extern int ssb_bus_pcibus_register(struct ssb_bus *bus, + struct pci_dev *host_pci); +#endif /* CONFIG_SSB_PCIHOST */ +#ifdef CONFIG_SSB_PCMCIAHOST +extern int ssb_bus_pcmciabus_register(struct ssb_bus *bus, + struct pcmcia_device *pcmcia_dev, + unsigned long baseaddr); +#endif /* CONFIG_SSB_PCMCIAHOST */ +#ifdef CONFIG_SSB_SDIOHOST +extern int ssb_bus_sdiobus_register(struct ssb_bus *bus, + struct sdio_func *sdio_func, + unsigned int quirks); +#endif /* CONFIG_SSB_SDIOHOST */ + + +extern void ssb_bus_unregister(struct ssb_bus *bus); + +/* Does the device have an SPROM? */ +extern bool ssb_is_sprom_available(struct ssb_bus *bus); + +/* Set a fallback SPROM. + * See kdoc at the function definition for complete documentation. */ +extern int ssb_arch_register_fallback_sprom( + int (*sprom_callback)(struct ssb_bus *bus, + struct ssb_sprom *out)); + +/* Suspend a SSB bus. + * Call this from the parent bus suspend routine. */ +extern int ssb_bus_suspend(struct ssb_bus *bus); +/* Resume a SSB bus. + * Call this from the parent bus resume routine. */ +extern int ssb_bus_resume(struct ssb_bus *bus); + +extern u32 ssb_clockspeed(struct ssb_bus *bus); + +/* Is the device enabled in hardware? */ +int ssb_device_is_enabled(struct ssb_device *dev); +/* Enable a device and pass device-specific SSB_TMSLOW flags. + * If no device-specific flags are available, use 0. */ +void ssb_device_enable(struct ssb_device *dev, u32 core_specific_flags); +/* Disable a device in hardware and pass SSB_TMSLOW flags (if any). */ +void ssb_device_disable(struct ssb_device *dev, u32 core_specific_flags); + + +/* Device MMIO register read/write functions. */ +static inline u8 ssb_read8(struct ssb_device *dev, u16 offset) +{ + return dev->ops->read8(dev, offset); +} +static inline u16 ssb_read16(struct ssb_device *dev, u16 offset) +{ + return dev->ops->read16(dev, offset); +} +static inline u32 ssb_read32(struct ssb_device *dev, u16 offset) +{ + return dev->ops->read32(dev, offset); +} +static inline void ssb_write8(struct ssb_device *dev, u16 offset, u8 value) +{ + dev->ops->write8(dev, offset, value); +} +static inline void ssb_write16(struct ssb_device *dev, u16 offset, u16 value) +{ + dev->ops->write16(dev, offset, value); +} +static inline void ssb_write32(struct ssb_device *dev, u16 offset, u32 value) +{ + dev->ops->write32(dev, offset, value); +} +#ifdef CONFIG_SSB_BLOCKIO +static inline void ssb_block_read(struct ssb_device *dev, void *buffer, + size_t count, u16 offset, u8 reg_width) +{ + dev->ops->block_read(dev, buffer, count, offset, reg_width); +} + +static inline void ssb_block_write(struct ssb_device *dev, const void *buffer, + size_t count, u16 offset, u8 reg_width) +{ + dev->ops->block_write(dev, buffer, count, offset, reg_width); +} +#endif /* CONFIG_SSB_BLOCKIO */ + + +/* The SSB DMA API. Use this API for any DMA operation on the device. + * This API basically is a wrapper that calls the correct DMA API for + * the host device type the SSB device is attached to. */ + +/* Translation (routing) bits that need to be ORed to DMA + * addresses before they are given to a device. */ +extern u32 ssb_dma_translation(struct ssb_device *dev); +#define SSB_DMA_TRANSLATION_MASK 0xC0000000 +#define SSB_DMA_TRANSLATION_SHIFT 30 + +static inline void __cold __ssb_dma_not_implemented(struct ssb_device *dev) +{ +#ifdef CONFIG_SSB_DEBUG + printk(KERN_ERR "SSB: BUG! Calling DMA API for " + "unsupported bustype %d\n", dev->bus->bustype); +#endif /* DEBUG */ +} + +#ifdef CONFIG_SSB_PCIHOST +/* PCI-host wrapper driver */ +extern int ssb_pcihost_register(struct pci_driver *driver); +static inline void ssb_pcihost_unregister(struct pci_driver *driver) +{ + pci_unregister_driver(driver); +} + +static inline +void ssb_pcihost_set_power_state(struct ssb_device *sdev, pci_power_t state) +{ + if (sdev->bus->bustype == SSB_BUSTYPE_PCI) + pci_set_power_state(sdev->bus->host_pci, state); +} +#else +static inline void ssb_pcihost_unregister(struct pci_driver *driver) +{ +} + +static inline +void ssb_pcihost_set_power_state(struct ssb_device *sdev, pci_power_t state) +{ +} +#endif /* CONFIG_SSB_PCIHOST */ + + +/* If a driver is shutdown or suspended, call this to signal + * that the bus may be completely powered down. SSB will decide, + * if it's really time to power down the bus, based on if there + * are other devices that want to run. */ +extern int ssb_bus_may_powerdown(struct ssb_bus *bus); +/* Before initializing and enabling a device, call this to power-up the bus. + * If you want to allow use of dynamic-power-control, pass the flag. + * Otherwise static always-on powercontrol will be used. */ +extern int ssb_bus_powerup(struct ssb_bus *bus, bool dynamic_pctl); + +extern void ssb_commit_settings(struct ssb_bus *bus); + +/* Various helper functions */ +extern u32 ssb_admatch_base(u32 adm); +extern u32 ssb_admatch_size(u32 adm); + +/* PCI device mapping and fixup routines. + * Called from the architecture pcibios init code. + * These are only available on SSB_EMBEDDED configurations. */ +#ifdef CONFIG_SSB_EMBEDDED +int ssb_pcibios_plat_dev_init(struct pci_dev *dev); +int ssb_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); +#endif /* CONFIG_SSB_EMBEDDED */ + +#endif /* LINUX_SSB_H_ */ diff --git a/include/linux/ssb/ssb_driver_chipcommon.h b/include/linux/ssb/ssb_driver_chipcommon.h new file mode 100644 index 000000000..6fcfe99bd --- /dev/null +++ b/include/linux/ssb/ssb_driver_chipcommon.h @@ -0,0 +1,673 @@ +#ifndef LINUX_SSB_CHIPCO_H_ +#define LINUX_SSB_CHIPCO_H_ + +/* SonicsSiliconBackplane CHIPCOMMON core hardware definitions + * + * The chipcommon core provides chip identification, SB control, + * jtag, 0/1/2 uarts, clock frequency control, a watchdog interrupt timer, + * gpio interface, extbus, and support for serial and parallel flashes. + * + * Copyright 2005, Broadcom Corporation + * Copyright 2006, Michael Buesch + * + * Licensed under the GPL version 2. See COPYING for details. + */ + +/** ChipCommon core registers. **/ + +#define SSB_CHIPCO_CHIPID 0x0000 +#define SSB_CHIPCO_IDMASK 0x0000FFFF +#define SSB_CHIPCO_REVMASK 0x000F0000 +#define SSB_CHIPCO_REVSHIFT 16 +#define SSB_CHIPCO_PACKMASK 0x00F00000 +#define SSB_CHIPCO_PACKSHIFT 20 +#define SSB_CHIPCO_NRCORESMASK 0x0F000000 +#define SSB_CHIPCO_NRCORESSHIFT 24 +#define SSB_CHIPCO_CAP 0x0004 /* Capabilities */ +#define SSB_CHIPCO_CAP_NRUART 0x00000003 /* # of UARTs */ +#define SSB_CHIPCO_CAP_MIPSEB 0x00000004 /* MIPS in BigEndian Mode */ +#define SSB_CHIPCO_CAP_UARTCLK 0x00000018 /* UART clock select */ +#define SSB_CHIPCO_CAP_UARTCLK_INT 0x00000008 /* UARTs are driven by internal divided clock */ +#define SSB_CHIPCO_CAP_UARTGPIO 0x00000020 /* UARTs on GPIO 15-12 */ +#define SSB_CHIPCO_CAP_EXTBUS 0x000000C0 /* External buses present */ +#define SSB_CHIPCO_CAP_FLASHT 0x00000700 /* Flash Type */ +#define SSB_CHIPCO_FLASHT_NONE 0x00000000 /* No flash */ +#define SSB_CHIPCO_FLASHT_STSER 0x00000100 /* ST serial flash */ +#define SSB_CHIPCO_FLASHT_ATSER 0x00000200 /* Atmel serial flash */ +#define SSB_CHIPCO_FLASHT_PARA 0x00000700 /* Parallel flash */ +#define SSB_CHIPCO_CAP_PLLT 0x00038000 /* PLL Type */ +#define SSB_PLLTYPE_NONE 0x00000000 +#define SSB_PLLTYPE_1 0x00010000 /* 48Mhz base, 3 dividers */ +#define SSB_PLLTYPE_2 0x00020000 /* 48Mhz, 4 dividers */ +#define SSB_PLLTYPE_3 0x00030000 /* 25Mhz, 2 dividers */ +#define SSB_PLLTYPE_4 0x00008000 /* 48Mhz, 4 dividers */ +#define SSB_PLLTYPE_5 0x00018000 /* 25Mhz, 4 dividers */ +#define SSB_PLLTYPE_6 0x00028000 /* 100/200 or 120/240 only */ +#define SSB_PLLTYPE_7 0x00038000 /* 25Mhz, 4 dividers */ +#define SSB_CHIPCO_CAP_PCTL 0x00040000 /* Power Control */ +#define SSB_CHIPCO_CAP_OTPS 0x00380000 /* OTP size */ +#define SSB_CHIPCO_CAP_OTPS_SHIFT 19 +#define SSB_CHIPCO_CAP_OTPS_BASE 5 +#define SSB_CHIPCO_CAP_JTAGM 0x00400000 /* JTAG master present */ +#define SSB_CHIPCO_CAP_BROM 0x00800000 /* Internal boot ROM active */ +#define SSB_CHIPCO_CAP_64BIT 0x08000000 /* 64-bit Backplane */ +#define SSB_CHIPCO_CAP_PMU 0x10000000 /* PMU available (rev >= 20) */ +#define SSB_CHIPCO_CAP_ECI 0x20000000 /* ECI available (rev >= 20) */ +#define SSB_CHIPCO_CAP_SPROM 0x40000000 /* SPROM present */ +#define SSB_CHIPCO_CORECTL 0x0008 +#define SSB_CHIPCO_CORECTL_UARTCLK0 0x00000001 /* Drive UART with internal clock */ +#define SSB_CHIPCO_CORECTL_SE 0x00000002 /* sync clk out enable (corerev >= 3) */ +#define SSB_CHIPCO_CORECTL_UARTCLKEN 0x00000008 /* UART clock enable (rev >= 21) */ +#define SSB_CHIPCO_BIST 0x000C +#define SSB_CHIPCO_OTPS 0x0010 /* OTP status */ +#define SSB_CHIPCO_OTPS_PROGFAIL 0x80000000 +#define SSB_CHIPCO_OTPS_PROTECT 0x00000007 +#define SSB_CHIPCO_OTPS_HW_PROTECT 0x00000001 +#define SSB_CHIPCO_OTPS_SW_PROTECT 0x00000002 +#define SSB_CHIPCO_OTPS_CID_PROTECT 0x00000004 +#define SSB_CHIPCO_OTPC 0x0014 /* OTP control */ +#define SSB_CHIPCO_OTPC_RECWAIT 0xFF000000 +#define SSB_CHIPCO_OTPC_PROGWAIT 0x00FFFF00 +#define SSB_CHIPCO_OTPC_PRW_SHIFT 8 +#define SSB_CHIPCO_OTPC_MAXFAIL 0x00000038 +#define SSB_CHIPCO_OTPC_VSEL 0x00000006 +#define SSB_CHIPCO_OTPC_SELVL 0x00000001 +#define SSB_CHIPCO_OTPP 0x0018 /* OTP prog */ +#define SSB_CHIPCO_OTPP_COL 0x000000FF +#define SSB_CHIPCO_OTPP_ROW 0x0000FF00 +#define SSB_CHIPCO_OTPP_ROW_SHIFT 8 +#define SSB_CHIPCO_OTPP_READERR 0x10000000 +#define SSB_CHIPCO_OTPP_VALUE 0x20000000 +#define SSB_CHIPCO_OTPP_READ 0x40000000 +#define SSB_CHIPCO_OTPP_START 0x80000000 +#define SSB_CHIPCO_OTPP_BUSY 0x80000000 +#define SSB_CHIPCO_IRQSTAT 0x0020 +#define SSB_CHIPCO_IRQMASK 0x0024 +#define SSB_CHIPCO_IRQ_GPIO 0x00000001 /* gpio intr */ +#define SSB_CHIPCO_IRQ_EXT 0x00000002 /* ro: ext intr pin (corerev >= 3) */ +#define SSB_CHIPCO_IRQ_WDRESET 0x80000000 /* watchdog reset occurred */ +#define SSB_CHIPCO_CHIPCTL 0x0028 /* Rev >= 11 only */ +#define SSB_CHIPCO_CHIPSTAT 0x002C /* Rev >= 11 only */ +#define SSB_CHIPCO_JCMD 0x0030 /* Rev >= 10 only */ +#define SSB_CHIPCO_JCMD_START 0x80000000 +#define SSB_CHIPCO_JCMD_BUSY 0x80000000 +#define SSB_CHIPCO_JCMD_PAUSE 0x40000000 +#define SSB_CHIPCO_JCMD0_ACC_MASK 0x0000F000 +#define SSB_CHIPCO_JCMD0_ACC_IRDR 0x00000000 +#define SSB_CHIPCO_JCMD0_ACC_DR 0x00001000 +#define SSB_CHIPCO_JCMD0_ACC_IR 0x00002000 +#define SSB_CHIPCO_JCMD0_ACC_RESET 0x00003000 +#define SSB_CHIPCO_JCMD0_ACC_IRPDR 0x00004000 +#define SSB_CHIPCO_JCMD0_ACC_PDR 0x00005000 +#define SSB_CHIPCO_JCMD0_IRW_MASK 0x00000F00 +#define SSB_CHIPCO_JCMD_ACC_MASK 0x000F0000 /* Changes for corerev 11 */ +#define SSB_CHIPCO_JCMD_ACC_IRDR 0x00000000 +#define SSB_CHIPCO_JCMD_ACC_DR 0x00010000 +#define SSB_CHIPCO_JCMD_ACC_IR 0x00020000 +#define SSB_CHIPCO_JCMD_ACC_RESET 0x00030000 +#define SSB_CHIPCO_JCMD_ACC_IRPDR 0x00040000 +#define SSB_CHIPCO_JCMD_ACC_PDR 0x00050000 +#define SSB_CHIPCO_JCMD_IRW_MASK 0x00001F00 +#define SSB_CHIPCO_JCMD_IRW_SHIFT 8 +#define SSB_CHIPCO_JCMD_DRW_MASK 0x0000003F +#define SSB_CHIPCO_JIR 0x0034 /* Rev >= 10 only */ +#define SSB_CHIPCO_JDR 0x0038 /* Rev >= 10 only */ +#define SSB_CHIPCO_JCTL 0x003C /* Rev >= 10 only */ +#define SSB_CHIPCO_JCTL_FORCE_CLK 4 /* Force clock */ +#define SSB_CHIPCO_JCTL_EXT_EN 2 /* Enable external targets */ +#define SSB_CHIPCO_JCTL_EN 1 /* Enable Jtag master */ +#define SSB_CHIPCO_FLASHCTL 0x0040 +#define SSB_CHIPCO_FLASHCTL_START 0x80000000 +#define SSB_CHIPCO_FLASHCTL_BUSY SSB_CHIPCO_FLASHCTL_START +#define SSB_CHIPCO_FLASHADDR 0x0044 +#define SSB_CHIPCO_FLASHDATA 0x0048 +#define SSB_CHIPCO_BCAST_ADDR 0x0050 +#define SSB_CHIPCO_BCAST_DATA 0x0054 +#define SSB_CHIPCO_GPIOPULLUP 0x0058 /* Rev >= 20 only */ +#define SSB_CHIPCO_GPIOPULLDOWN 0x005C /* Rev >= 20 only */ +#define SSB_CHIPCO_GPIOIN 0x0060 +#define SSB_CHIPCO_GPIOOUT 0x0064 +#define SSB_CHIPCO_GPIOOUTEN 0x0068 +#define SSB_CHIPCO_GPIOCTL 0x006C +#define SSB_CHIPCO_GPIOPOL 0x0070 +#define SSB_CHIPCO_GPIOIRQ 0x0074 +#define SSB_CHIPCO_WATCHDOG 0x0080 +#define SSB_CHIPCO_GPIOTIMER 0x0088 /* LED powersave (corerev >= 16) */ +#define SSB_CHIPCO_GPIOTIMER_OFFTIME 0x0000FFFF +#define SSB_CHIPCO_GPIOTIMER_OFFTIME_SHIFT 0 +#define SSB_CHIPCO_GPIOTIMER_ONTIME 0xFFFF0000 +#define SSB_CHIPCO_GPIOTIMER_ONTIME_SHIFT 16 +#define SSB_CHIPCO_GPIOTOUTM 0x008C /* LED powersave (corerev >= 16) */ +#define SSB_CHIPCO_CLOCK_N 0x0090 +#define SSB_CHIPCO_CLOCK_SB 0x0094 +#define SSB_CHIPCO_CLOCK_PCI 0x0098 +#define SSB_CHIPCO_CLOCK_M2 0x009C +#define SSB_CHIPCO_CLOCK_MIPS 0x00A0 +#define SSB_CHIPCO_CLKDIV 0x00A4 /* Rev >= 3 only */ +#define SSB_CHIPCO_CLKDIV_SFLASH 0x0F000000 +#define SSB_CHIPCO_CLKDIV_SFLASH_SHIFT 24 +#define SSB_CHIPCO_CLKDIV_OTP 0x000F0000 +#define SSB_CHIPCO_CLKDIV_OTP_SHIFT 16 +#define SSB_CHIPCO_CLKDIV_JTAG 0x00000F00 +#define SSB_CHIPCO_CLKDIV_JTAG_SHIFT 8 +#define SSB_CHIPCO_CLKDIV_UART 0x000000FF +#define SSB_CHIPCO_PLLONDELAY 0x00B0 /* Rev >= 4 only */ +#define SSB_CHIPCO_FREFSELDELAY 0x00B4 /* Rev >= 4 only */ +#define SSB_CHIPCO_SLOWCLKCTL 0x00B8 /* 6 <= Rev <= 9 only */ +#define SSB_CHIPCO_SLOWCLKCTL_SRC 0x00000007 /* slow clock source mask */ +#define SSB_CHIPCO_SLOWCLKCTL_SRC_LPO 0x00000000 /* source of slow clock is LPO */ +#define SSB_CHIPCO_SLOWCLKCTL_SRC_XTAL 0x00000001 /* source of slow clock is crystal */ +#define SSB_CHIPCO_SLOECLKCTL_SRC_PCI 0x00000002 /* source of slow clock is PCI */ +#define SSB_CHIPCO_SLOWCLKCTL_LPOFREQ 0x00000200 /* LPOFreqSel, 1: 160Khz, 0: 32KHz */ +#define SSB_CHIPCO_SLOWCLKCTL_LPOPD 0x00000400 /* LPOPowerDown, 1: LPO is disabled, 0: LPO is enabled */ +#define SSB_CHIPCO_SLOWCLKCTL_FSLOW 0x00000800 /* ForceSlowClk, 1: sb/cores running on slow clock, 0: power logic control */ +#define SSB_CHIPCO_SLOWCLKCTL_IPLL 0x00001000 /* IgnorePllOffReq, 1/0: power logic ignores/honors PLL clock disable requests from core */ +#define SSB_CHIPCO_SLOWCLKCTL_ENXTAL 0x00002000 /* XtalControlEn, 1/0: power logic does/doesn't disable crystal when appropriate */ +#define SSB_CHIPCO_SLOWCLKCTL_XTALPU 0x00004000 /* XtalPU (RO), 1/0: crystal running/disabled */ +#define SSB_CHIPCO_SLOWCLKCTL_CLKDIV 0xFFFF0000 /* ClockDivider (SlowClk = 1/(4+divisor)) */ +#define SSB_CHIPCO_SLOWCLKCTL_CLKDIV_SHIFT 16 +#define SSB_CHIPCO_SYSCLKCTL 0x00C0 /* Rev >= 3 only */ +#define SSB_CHIPCO_SYSCLKCTL_IDLPEN 0x00000001 /* ILPen: Enable Idle Low Power */ +#define SSB_CHIPCO_SYSCLKCTL_ALPEN 0x00000002 /* ALPen: Enable Active Low Power */ +#define SSB_CHIPCO_SYSCLKCTL_PLLEN 0x00000004 /* ForcePLLOn */ +#define SSB_CHIPCO_SYSCLKCTL_FORCEALP 0x00000008 /* Force ALP (or HT if ALPen is not set */ +#define SSB_CHIPCO_SYSCLKCTL_FORCEHT 0x00000010 /* Force HT */ +#define SSB_CHIPCO_SYSCLKCTL_CLKDIV 0xFFFF0000 /* ClkDiv (ILP = 1/(4+divisor)) */ +#define SSB_CHIPCO_SYSCLKCTL_CLKDIV_SHIFT 16 +#define SSB_CHIPCO_CLKSTSTR 0x00C4 /* Rev >= 3 only */ +#define SSB_CHIPCO_PCMCIA_CFG 0x0100 +#define SSB_CHIPCO_PCMCIA_MEMWAIT 0x0104 +#define SSB_CHIPCO_PCMCIA_ATTRWAIT 0x0108 +#define SSB_CHIPCO_PCMCIA_IOWAIT 0x010C +#define SSB_CHIPCO_IDE_CFG 0x0110 +#define SSB_CHIPCO_IDE_MEMWAIT 0x0114 +#define SSB_CHIPCO_IDE_ATTRWAIT 0x0118 +#define SSB_CHIPCO_IDE_IOWAIT 0x011C +#define SSB_CHIPCO_PROG_CFG 0x0120 +#define SSB_CHIPCO_PROG_WAITCNT 0x0124 +#define SSB_CHIPCO_FLASH_CFG 0x0128 +#define SSB_CHIPCO_FLASH_WAITCNT 0x012C +#define SSB_CHIPCO_CLKCTLST 0x01E0 /* Clock control and status (rev >= 20) */ +#define SSB_CHIPCO_CLKCTLST_FORCEALP 0x00000001 /* Force ALP request */ +#define SSB_CHIPCO_CLKCTLST_FORCEHT 0x00000002 /* Force HT request */ +#define SSB_CHIPCO_CLKCTLST_FORCEILP 0x00000004 /* Force ILP request */ +#define SSB_CHIPCO_CLKCTLST_HAVEALPREQ 0x00000008 /* ALP available request */ +#define SSB_CHIPCO_CLKCTLST_HAVEHTREQ 0x00000010 /* HT available request */ +#define SSB_CHIPCO_CLKCTLST_HWCROFF 0x00000020 /* Force HW clock request off */ +#define SSB_CHIPCO_CLKCTLST_HAVEALP 0x00010000 /* ALP available */ +#define SSB_CHIPCO_CLKCTLST_HAVEHT 0x00020000 /* HT available */ +#define SSB_CHIPCO_CLKCTLST_4328A0_HAVEHT 0x00010000 /* 4328a0 has reversed bits */ +#define SSB_CHIPCO_CLKCTLST_4328A0_HAVEALP 0x00020000 /* 4328a0 has reversed bits */ +#define SSB_CHIPCO_HW_WORKAROUND 0x01E4 /* Hardware workaround (rev >= 20) */ +#define SSB_CHIPCO_UART0_DATA 0x0300 +#define SSB_CHIPCO_UART0_IMR 0x0304 +#define SSB_CHIPCO_UART0_FCR 0x0308 +#define SSB_CHIPCO_UART0_LCR 0x030C +#define SSB_CHIPCO_UART0_MCR 0x0310 +#define SSB_CHIPCO_UART0_LSR 0x0314 +#define SSB_CHIPCO_UART0_MSR 0x0318 +#define SSB_CHIPCO_UART0_SCRATCH 0x031C +#define SSB_CHIPCO_UART1_DATA 0x0400 +#define SSB_CHIPCO_UART1_IMR 0x0404 +#define SSB_CHIPCO_UART1_FCR 0x0408 +#define SSB_CHIPCO_UART1_LCR 0x040C +#define SSB_CHIPCO_UART1_MCR 0x0410 +#define SSB_CHIPCO_UART1_LSR 0x0414 +#define SSB_CHIPCO_UART1_MSR 0x0418 +#define SSB_CHIPCO_UART1_SCRATCH 0x041C +/* PMU registers (rev >= 20) */ +#define SSB_CHIPCO_PMU_CTL 0x0600 /* PMU control */ +#define SSB_CHIPCO_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */ +#define SSB_CHIPCO_PMU_CTL_ILP_DIV_SHIFT 16 +#define SSB_CHIPCO_PMU_CTL_PLL_UPD 0x00000400 +#define SSB_CHIPCO_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */ +#define SSB_CHIPCO_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */ +#define SSB_CHIPCO_PMU_CTL_ALPREQEN 0x00000080 /* ALP req enable */ +#define SSB_CHIPCO_PMU_CTL_XTALFREQ 0x0000007C /* Crystal freq */ +#define SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT 2 +#define SSB_CHIPCO_PMU_CTL_ILPDIVEN 0x00000002 /* ILP div enable */ +#define SSB_CHIPCO_PMU_CTL_LPOSEL 0x00000001 /* LPO sel */ +#define SSB_CHIPCO_PMU_CAP 0x0604 /* PMU capabilities */ +#define SSB_CHIPCO_PMU_CAP_REVISION 0x000000FF /* Revision mask */ +#define SSB_CHIPCO_PMU_STAT 0x0608 /* PMU status */ +#define SSB_CHIPCO_PMU_STAT_INTPEND 0x00000040 /* Interrupt pending */ +#define SSB_CHIPCO_PMU_STAT_SBCLKST 0x00000030 /* Backplane clock status? */ +#define SSB_CHIPCO_PMU_STAT_HAVEALP 0x00000008 /* ALP available */ +#define SSB_CHIPCO_PMU_STAT_HAVEHT 0x00000004 /* HT available */ +#define SSB_CHIPCO_PMU_STAT_RESINIT 0x00000003 /* Res init */ +#define SSB_CHIPCO_PMU_RES_STAT 0x060C /* PMU res status */ +#define SSB_CHIPCO_PMU_RES_PEND 0x0610 /* PMU res pending */ +#define SSB_CHIPCO_PMU_TIMER 0x0614 /* PMU timer */ +#define SSB_CHIPCO_PMU_MINRES_MSK 0x0618 /* PMU min res mask */ +#define SSB_CHIPCO_PMU_MAXRES_MSK 0x061C /* PMU max res mask */ +#define SSB_CHIPCO_PMU_RES_TABSEL 0x0620 /* PMU res table sel */ +#define SSB_CHIPCO_PMU_RES_DEPMSK 0x0624 /* PMU res dep mask */ +#define SSB_CHIPCO_PMU_RES_UPDNTM 0x0628 /* PMU res updown timer */ +#define SSB_CHIPCO_PMU_RES_TIMER 0x062C /* PMU res timer */ +#define SSB_CHIPCO_PMU_CLKSTRETCH 0x0630 /* PMU clockstretch */ +#define SSB_CHIPCO_PMU_WATCHDOG 0x0634 /* PMU watchdog */ +#define SSB_CHIPCO_PMU_RES_REQTS 0x0640 /* PMU res req timer sel */ +#define SSB_CHIPCO_PMU_RES_REQT 0x0644 /* PMU res req timer */ +#define SSB_CHIPCO_PMU_RES_REQM 0x0648 /* PMU res req mask */ +#define SSB_CHIPCO_CHIPCTL_ADDR 0x0650 +#define SSB_CHIPCO_CHIPCTL_DATA 0x0654 +#define SSB_CHIPCO_REGCTL_ADDR 0x0658 +#define SSB_CHIPCO_REGCTL_DATA 0x065C +#define SSB_CHIPCO_PLLCTL_ADDR 0x0660 +#define SSB_CHIPCO_PLLCTL_DATA 0x0664 + + + +/** PMU PLL registers */ + +/* PMU rev 0 PLL registers */ +#define SSB_PMU0_PLLCTL0 0 +#define SSB_PMU0_PLLCTL0_PDIV_MSK 0x00000001 +#define SSB_PMU0_PLLCTL0_PDIV_FREQ 25000 /* kHz */ +#define SSB_PMU0_PLLCTL1 1 +#define SSB_PMU0_PLLCTL1_WILD_IMSK 0xF0000000 /* Wild int mask (low nibble) */ +#define SSB_PMU0_PLLCTL1_WILD_IMSK_SHIFT 28 +#define SSB_PMU0_PLLCTL1_WILD_FMSK 0x0FFFFF00 /* Wild frac mask */ +#define SSB_PMU0_PLLCTL1_WILD_FMSK_SHIFT 8 +#define SSB_PMU0_PLLCTL1_STOPMOD 0x00000040 /* Stop mod */ +#define SSB_PMU0_PLLCTL2 2 +#define SSB_PMU0_PLLCTL2_WILD_IMSKHI 0x0000000F /* Wild int mask (high nibble) */ +#define SSB_PMU0_PLLCTL2_WILD_IMSKHI_SHIFT 0 + +/* PMU rev 1 PLL registers */ +#define SSB_PMU1_PLLCTL0 0 +#define SSB_PMU1_PLLCTL0_P1DIV 0x00F00000 /* P1 div */ +#define SSB_PMU1_PLLCTL0_P1DIV_SHIFT 20 +#define SSB_PMU1_PLLCTL0_P2DIV 0x0F000000 /* P2 div */ +#define SSB_PMU1_PLLCTL0_P2DIV_SHIFT 24 +#define SSB_PMU1_PLLCTL1 1 +#define SSB_PMU1_PLLCTL1_M1DIV 0x000000FF /* M1 div */ +#define SSB_PMU1_PLLCTL1_M1DIV_SHIFT 0 +#define SSB_PMU1_PLLCTL1_M2DIV 0x0000FF00 /* M2 div */ +#define SSB_PMU1_PLLCTL1_M2DIV_SHIFT 8 +#define SSB_PMU1_PLLCTL1_M3DIV 0x00FF0000 /* M3 div */ +#define SSB_PMU1_PLLCTL1_M3DIV_SHIFT 16 +#define SSB_PMU1_PLLCTL1_M4DIV 0xFF000000 /* M4 div */ +#define SSB_PMU1_PLLCTL1_M4DIV_SHIFT 24 +#define SSB_PMU1_PLLCTL2 2 +#define SSB_PMU1_PLLCTL2_M5DIV 0x000000FF /* M5 div */ +#define SSB_PMU1_PLLCTL2_M5DIV_SHIFT 0 +#define SSB_PMU1_PLLCTL2_M6DIV 0x0000FF00 /* M6 div */ +#define SSB_PMU1_PLLCTL2_M6DIV_SHIFT 8 +#define SSB_PMU1_PLLCTL2_NDIVMODE 0x000E0000 /* NDIV mode */ +#define SSB_PMU1_PLLCTL2_NDIVMODE_SHIFT 17 +#define SSB_PMU1_PLLCTL2_NDIVINT 0x1FF00000 /* NDIV int */ +#define SSB_PMU1_PLLCTL2_NDIVINT_SHIFT 20 +#define SSB_PMU1_PLLCTL3 3 +#define SSB_PMU1_PLLCTL3_NDIVFRAC 0x00FFFFFF /* NDIV frac */ +#define SSB_PMU1_PLLCTL3_NDIVFRAC_SHIFT 0 +#define SSB_PMU1_PLLCTL4 4 +#define SSB_PMU1_PLLCTL5 5 +#define SSB_PMU1_PLLCTL5_CLKDRV 0xFFFFFF00 /* clk drv */ +#define SSB_PMU1_PLLCTL5_CLKDRV_SHIFT 8 + +/* BCM4312 PLL resource numbers. */ +#define SSB_PMURES_4312_SWITCHER_BURST 0 +#define SSB_PMURES_4312_SWITCHER_PWM 1 +#define SSB_PMURES_4312_PA_REF_LDO 2 +#define SSB_PMURES_4312_CORE_LDO_BURST 3 +#define SSB_PMURES_4312_CORE_LDO_PWM 4 +#define SSB_PMURES_4312_RADIO_LDO 5 +#define SSB_PMURES_4312_ILP_REQUEST 6 +#define SSB_PMURES_4312_BG_FILTBYP 7 +#define SSB_PMURES_4312_TX_FILTBYP 8 +#define SSB_PMURES_4312_RX_FILTBYP 9 +#define SSB_PMURES_4312_XTAL_PU 10 +#define SSB_PMURES_4312_ALP_AVAIL 11 +#define SSB_PMURES_4312_BB_PLL_FILTBYP 12 +#define SSB_PMURES_4312_RF_PLL_FILTBYP 13 +#define SSB_PMURES_4312_HT_AVAIL 14 + +/* BCM4325 PLL resource numbers. */ +#define SSB_PMURES_4325_BUCK_BOOST_BURST 0 +#define SSB_PMURES_4325_CBUCK_BURST 1 +#define SSB_PMURES_4325_CBUCK_PWM 2 +#define SSB_PMURES_4325_CLDO_CBUCK_BURST 3 +#define SSB_PMURES_4325_CLDO_CBUCK_PWM 4 +#define SSB_PMURES_4325_BUCK_BOOST_PWM 5 +#define SSB_PMURES_4325_ILP_REQUEST 6 +#define SSB_PMURES_4325_ABUCK_BURST 7 +#define SSB_PMURES_4325_ABUCK_PWM 8 +#define SSB_PMURES_4325_LNLDO1_PU 9 +#define SSB_PMURES_4325_LNLDO2_PU 10 +#define SSB_PMURES_4325_LNLDO3_PU 11 +#define SSB_PMURES_4325_LNLDO4_PU 12 +#define SSB_PMURES_4325_XTAL_PU 13 +#define SSB_PMURES_4325_ALP_AVAIL 14 +#define SSB_PMURES_4325_RX_PWRSW_PU 15 +#define SSB_PMURES_4325_TX_PWRSW_PU 16 +#define SSB_PMURES_4325_RFPLL_PWRSW_PU 17 +#define SSB_PMURES_4325_LOGEN_PWRSW_PU 18 +#define SSB_PMURES_4325_AFE_PWRSW_PU 19 +#define SSB_PMURES_4325_BBPLL_PWRSW_PU 20 +#define SSB_PMURES_4325_HT_AVAIL 21 + +/* BCM4328 PLL resource numbers. */ +#define SSB_PMURES_4328_EXT_SWITCHER_PWM 0 +#define SSB_PMURES_4328_BB_SWITCHER_PWM 1 +#define SSB_PMURES_4328_BB_SWITCHER_BURST 2 +#define SSB_PMURES_4328_BB_EXT_SWITCHER_BURST 3 +#define SSB_PMURES_4328_ILP_REQUEST 4 +#define SSB_PMURES_4328_RADIO_SWITCHER_PWM 5 +#define SSB_PMURES_4328_RADIO_SWITCHER_BURST 6 +#define SSB_PMURES_4328_ROM_SWITCH 7 +#define SSB_PMURES_4328_PA_REF_LDO 8 +#define SSB_PMURES_4328_RADIO_LDO 9 +#define SSB_PMURES_4328_AFE_LDO 10 +#define SSB_PMURES_4328_PLL_LDO 11 +#define SSB_PMURES_4328_BG_FILTBYP 12 +#define SSB_PMURES_4328_TX_FILTBYP 13 +#define SSB_PMURES_4328_RX_FILTBYP 14 +#define SSB_PMURES_4328_XTAL_PU 15 +#define SSB_PMURES_4328_XTAL_EN 16 +#define SSB_PMURES_4328_BB_PLL_FILTBYP 17 +#define SSB_PMURES_4328_RF_PLL_FILTBYP 18 +#define SSB_PMURES_4328_BB_PLL_PU 19 + +/* BCM5354 PLL resource numbers. */ +#define SSB_PMURES_5354_EXT_SWITCHER_PWM 0 +#define SSB_PMURES_5354_BB_SWITCHER_PWM 1 +#define SSB_PMURES_5354_BB_SWITCHER_BURST 2 +#define SSB_PMURES_5354_BB_EXT_SWITCHER_BURST 3 +#define SSB_PMURES_5354_ILP_REQUEST 4 +#define SSB_PMURES_5354_RADIO_SWITCHER_PWM 5 +#define SSB_PMURES_5354_RADIO_SWITCHER_BURST 6 +#define SSB_PMURES_5354_ROM_SWITCH 7 +#define SSB_PMURES_5354_PA_REF_LDO 8 +#define SSB_PMURES_5354_RADIO_LDO 9 +#define SSB_PMURES_5354_AFE_LDO 10 +#define SSB_PMURES_5354_PLL_LDO 11 +#define SSB_PMURES_5354_BG_FILTBYP 12 +#define SSB_PMURES_5354_TX_FILTBYP 13 +#define SSB_PMURES_5354_RX_FILTBYP 14 +#define SSB_PMURES_5354_XTAL_PU 15 +#define SSB_PMURES_5354_XTAL_EN 16 +#define SSB_PMURES_5354_BB_PLL_FILTBYP 17 +#define SSB_PMURES_5354_RF_PLL_FILTBYP 18 +#define SSB_PMURES_5354_BB_PLL_PU 19 + + + +/** Chip specific Chip-Status register contents. */ +#define SSB_CHIPCO_CHST_4322_SPROM_EXISTS 0x00000040 /* SPROM present */ +#define SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL 0x00000003 +#define SSB_CHIPCO_CHST_4325_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */ +#define SSB_CHIPCO_CHST_4325_SPROM_SEL 1 /* OTP is powered up, SPROM is present */ +#define SSB_CHIPCO_CHST_4325_OTP_SEL 2 /* OTP is powered up, no SPROM */ +#define SSB_CHIPCO_CHST_4325_OTP_PWRDN 3 /* OTP is powered down, SPROM is present */ +#define SSB_CHIPCO_CHST_4325_SDIO_USB_MODE 0x00000004 +#define SSB_CHIPCO_CHST_4325_SDIO_USB_MODE_SHIFT 2 +#define SSB_CHIPCO_CHST_4325_RCAL_VALID 0x00000008 +#define SSB_CHIPCO_CHST_4325_RCAL_VALID_SHIFT 3 +#define SSB_CHIPCO_CHST_4325_RCAL_VALUE 0x000001F0 +#define SSB_CHIPCO_CHST_4325_RCAL_VALUE_SHIFT 4 +#define SSB_CHIPCO_CHST_4325_PMUTOP_2B 0x00000200 /* 1 for 2b, 0 for to 2a */ + +/** Macros to determine SPROM presence based on Chip-Status register. */ +#define SSB_CHIPCO_CHST_4312_SPROM_PRESENT(status) \ + ((status & SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL) != \ + SSB_CHIPCO_CHST_4325_OTP_SEL) +#define SSB_CHIPCO_CHST_4322_SPROM_PRESENT(status) \ + (status & SSB_CHIPCO_CHST_4322_SPROM_EXISTS) +#define SSB_CHIPCO_CHST_4325_SPROM_PRESENT(status) \ + (((status & SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL) != \ + SSB_CHIPCO_CHST_4325_DEFCIS_SEL) && \ + ((status & SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL) != \ + SSB_CHIPCO_CHST_4325_OTP_SEL)) + + + +/** Clockcontrol masks and values **/ + +/* SSB_CHIPCO_CLOCK_N */ +#define SSB_CHIPCO_CLK_N1 0x0000003F /* n1 control */ +#define SSB_CHIPCO_CLK_N2 0x00003F00 /* n2 control */ +#define SSB_CHIPCO_CLK_N2_SHIFT 8 +#define SSB_CHIPCO_CLK_PLLC 0x000F0000 /* pll control */ +#define SSB_CHIPCO_CLK_PLLC_SHIFT 16 + +/* SSB_CHIPCO_CLOCK_SB/PCI/UART */ +#define SSB_CHIPCO_CLK_M1 0x0000003F /* m1 control */ +#define SSB_CHIPCO_CLK_M2 0x00003F00 /* m2 control */ +#define SSB_CHIPCO_CLK_M2_SHIFT 8 +#define SSB_CHIPCO_CLK_M3 0x003F0000 /* m3 control */ +#define SSB_CHIPCO_CLK_M3_SHIFT 16 +#define SSB_CHIPCO_CLK_MC 0x1F000000 /* mux control */ +#define SSB_CHIPCO_CLK_MC_SHIFT 24 + +/* N3M Clock control magic field values */ +#define SSB_CHIPCO_CLK_F6_2 0x02 /* A factor of 2 in */ +#define SSB_CHIPCO_CLK_F6_3 0x03 /* 6-bit fields like */ +#define SSB_CHIPCO_CLK_F6_4 0x05 /* N1, M1 or M3 */ +#define SSB_CHIPCO_CLK_F6_5 0x09 +#define SSB_CHIPCO_CLK_F6_6 0x11 +#define SSB_CHIPCO_CLK_F6_7 0x21 + +#define SSB_CHIPCO_CLK_F5_BIAS 5 /* 5-bit fields get this added */ + +#define SSB_CHIPCO_CLK_MC_BYPASS 0x08 +#define SSB_CHIPCO_CLK_MC_M1 0x04 +#define SSB_CHIPCO_CLK_MC_M1M2 0x02 +#define SSB_CHIPCO_CLK_MC_M1M2M3 0x01 +#define SSB_CHIPCO_CLK_MC_M1M3 0x11 + +/* Type 2 Clock control magic field values */ +#define SSB_CHIPCO_CLK_T2_BIAS 2 /* n1, n2, m1 & m3 bias */ +#define SSB_CHIPCO_CLK_T2M2_BIAS 3 /* m2 bias */ + +#define SSB_CHIPCO_CLK_T2MC_M1BYP 1 +#define SSB_CHIPCO_CLK_T2MC_M2BYP 2 +#define SSB_CHIPCO_CLK_T2MC_M3BYP 4 + +/* Type 6 Clock control magic field values */ +#define SSB_CHIPCO_CLK_T6_MMASK 1 /* bits of interest in m */ +#define SSB_CHIPCO_CLK_T6_M0 120000000 /* sb clock for m = 0 */ +#define SSB_CHIPCO_CLK_T6_M1 100000000 /* sb clock for m = 1 */ +#define SSB_CHIPCO_CLK_SB2MIPS_T6(sb) (2 * (sb)) + +/* Common clock base */ +#define SSB_CHIPCO_CLK_BASE1 24000000 /* Half the clock freq */ +#define SSB_CHIPCO_CLK_BASE2 12500000 /* Alternate crystal on some PLL's */ + +/* Clock control values for 200Mhz in 5350 */ +#define SSB_CHIPCO_CLK_5350_N 0x0311 +#define SSB_CHIPCO_CLK_5350_M 0x04020009 + + +/** Bits in the config registers **/ + +#define SSB_CHIPCO_CFG_EN 0x0001 /* Enable */ +#define SSB_CHIPCO_CFG_EXTM 0x000E /* Extif Mode */ +#define SSB_CHIPCO_CFG_EXTM_ASYNC 0x0002 /* Async/Parallel flash */ +#define SSB_CHIPCO_CFG_EXTM_SYNC 0x0004 /* Synchronous */ +#define SSB_CHIPCO_CFG_EXTM_PCMCIA 0x0008 /* PCMCIA */ +#define SSB_CHIPCO_CFG_EXTM_IDE 0x000A /* IDE */ +#define SSB_CHIPCO_CFG_DS16 0x0010 /* Data size, 0=8bit, 1=16bit */ +#define SSB_CHIPCO_CFG_CLKDIV 0x0060 /* Sync: Clock divisor */ +#define SSB_CHIPCO_CFG_CLKEN 0x0080 /* Sync: Clock enable */ +#define SSB_CHIPCO_CFG_BSTRO 0x0100 /* Sync: Size/Bytestrobe */ + + +/** Flash-specific control/status values */ + +/* flashcontrol opcodes for ST flashes */ +#define SSB_CHIPCO_FLASHCTL_ST_WREN 0x0006 /* Write Enable */ +#define SSB_CHIPCO_FLASHCTL_ST_WRDIS 0x0004 /* Write Disable */ +#define SSB_CHIPCO_FLASHCTL_ST_RDSR 0x0105 /* Read Status Register */ +#define SSB_CHIPCO_FLASHCTL_ST_WRSR 0x0101 /* Write Status Register */ +#define SSB_CHIPCO_FLASHCTL_ST_READ 0x0303 /* Read Data Bytes */ +#define SSB_CHIPCO_FLASHCTL_ST_PP 0x0302 /* Page Program */ +#define SSB_CHIPCO_FLASHCTL_ST_SE 0x02D8 /* Sector Erase */ +#define SSB_CHIPCO_FLASHCTL_ST_BE 0x00C7 /* Bulk Erase */ +#define SSB_CHIPCO_FLASHCTL_ST_DP 0x00B9 /* Deep Power-down */ +#define SSB_CHIPCO_FLASHCTL_ST_RES 0x03AB /* Read Electronic Signature */ +#define SSB_CHIPCO_FLASHCTL_ST_CSA 0x1000 /* Keep chip select asserted */ +#define SSB_CHIPCO_FLASHCTL_ST_SSE 0x0220 /* Sub-sector Erase */ + +/* Status register bits for ST flashes */ +#define SSB_CHIPCO_FLASHSTA_ST_WIP 0x01 /* Write In Progress */ +#define SSB_CHIPCO_FLASHSTA_ST_WEL 0x02 /* Write Enable Latch */ +#define SSB_CHIPCO_FLASHSTA_ST_BP 0x1C /* Block Protect */ +#define SSB_CHIPCO_FLASHSTA_ST_BP_SHIFT 2 +#define SSB_CHIPCO_FLASHSTA_ST_SRWD 0x80 /* Status Register Write Disable */ + +/* flashcontrol opcodes for Atmel flashes */ +#define SSB_CHIPCO_FLASHCTL_AT_READ 0x07E8 +#define SSB_CHIPCO_FLASHCTL_AT_PAGE_READ 0x07D2 +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_READ /* FIXME */ +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_READ /* FIXME */ +#define SSB_CHIPCO_FLASHCTL_AT_STATUS 0x01D7 +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_WRITE 0x0384 +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_WRITE 0x0387 +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_ERASE_PRGM 0x0283 /* Erase program */ +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_ERASE_PRGM 0x0286 /* Erase program */ +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_PROGRAM 0x0288 +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_PROGRAM 0x0289 +#define SSB_CHIPCO_FLASHCTL_AT_PAGE_ERASE 0x0281 +#define SSB_CHIPCO_FLASHCTL_AT_BLOCK_ERASE 0x0250 +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_WRER_PRGM 0x0382 /* Write erase program */ +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_WRER_PRGM 0x0385 /* Write erase program */ +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_LOAD 0x0253 +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_LOAD 0x0255 +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_COMPARE 0x0260 +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_COMPARE 0x0261 +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_REPROGRAM 0x0258 +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_REPROGRAM 0x0259 + +/* Status register bits for Atmel flashes */ +#define SSB_CHIPCO_FLASHSTA_AT_READY 0x80 +#define SSB_CHIPCO_FLASHSTA_AT_MISMATCH 0x40 +#define SSB_CHIPCO_FLASHSTA_AT_ID 0x38 +#define SSB_CHIPCO_FLASHSTA_AT_ID_SHIFT 3 + + +/** OTP **/ + +/* OTP regions */ +#define SSB_CHIPCO_OTP_HW_REGION SSB_CHIPCO_OTPS_HW_PROTECT +#define SSB_CHIPCO_OTP_SW_REGION SSB_CHIPCO_OTPS_SW_PROTECT +#define SSB_CHIPCO_OTP_CID_REGION SSB_CHIPCO_OTPS_CID_PROTECT + +/* OTP regions (Byte offsets from otp size) */ +#define SSB_CHIPCO_OTP_SWLIM_OFF (-8) +#define SSB_CHIPCO_OTP_CIDBASE_OFF 0 +#define SSB_CHIPCO_OTP_CIDLIM_OFF 8 + +/* Predefined OTP words (Word offset from otp size) */ +#define SSB_CHIPCO_OTP_BOUNDARY_OFF (-4) +#define SSB_CHIPCO_OTP_HWSIGN_OFF (-3) +#define SSB_CHIPCO_OTP_SWSIGN_OFF (-2) +#define SSB_CHIPCO_OTP_CIDSIGN_OFF (-1) + +#define SSB_CHIPCO_OTP_CID_OFF 0 +#define SSB_CHIPCO_OTP_PKG_OFF 1 +#define SSB_CHIPCO_OTP_FID_OFF 2 +#define SSB_CHIPCO_OTP_RSV_OFF 3 +#define SSB_CHIPCO_OTP_LIM_OFF 4 + +#define SSB_CHIPCO_OTP_SIGNATURE 0x578A +#define SSB_CHIPCO_OTP_MAGIC 0x4E56 + + +struct ssb_device; +struct ssb_serial_port; + +/* Data for the PMU, if available. + * Check availability with ((struct ssb_chipcommon)->capabilities & SSB_CHIPCO_CAP_PMU) + */ +struct ssb_chipcommon_pmu { + u8 rev; /* PMU revision */ + u32 crystalfreq; /* The active crystal frequency (in kHz) */ +}; + +struct ssb_chipcommon { + struct ssb_device *dev; + u32 capabilities; + u32 status; + /* Fast Powerup Delay constant */ + u16 fast_pwrup_delay; + spinlock_t gpio_lock; + struct ssb_chipcommon_pmu pmu; + u32 ticks_per_ms; + u32 max_timer_ms; +}; + +static inline bool ssb_chipco_available(struct ssb_chipcommon *cc) +{ + return (cc->dev != NULL); +} + +/* Register access */ +#define chipco_read32(cc, offset) ssb_read32((cc)->dev, offset) +#define chipco_write32(cc, offset, val) ssb_write32((cc)->dev, offset, val) + +#define chipco_mask32(cc, offset, mask) \ + chipco_write32(cc, offset, chipco_read32(cc, offset) & (mask)) +#define chipco_set32(cc, offset, set) \ + chipco_write32(cc, offset, chipco_read32(cc, offset) | (set)) +#define chipco_maskset32(cc, offset, mask, set) \ + chipco_write32(cc, offset, (chipco_read32(cc, offset) & (mask)) | (set)) + +extern void ssb_chipcommon_init(struct ssb_chipcommon *cc); + +extern void ssb_chipco_suspend(struct ssb_chipcommon *cc); +extern void ssb_chipco_resume(struct ssb_chipcommon *cc); + +extern void ssb_chipco_get_clockcpu(struct ssb_chipcommon *cc, + u32 *plltype, u32 *n, u32 *m); +extern void ssb_chipco_get_clockcontrol(struct ssb_chipcommon *cc, + u32 *plltype, u32 *n, u32 *m); +extern void ssb_chipco_timing_init(struct ssb_chipcommon *cc, + unsigned long ns_per_cycle); + +enum ssb_clkmode { + SSB_CLKMODE_SLOW, + SSB_CLKMODE_FAST, + SSB_CLKMODE_DYNAMIC, +}; + +extern void ssb_chipco_set_clockmode(struct ssb_chipcommon *cc, + enum ssb_clkmode mode); + +extern u32 ssb_chipco_watchdog_timer_set(struct ssb_chipcommon *cc, u32 ticks); + +void ssb_chipco_irq_mask(struct ssb_chipcommon *cc, u32 mask, u32 value); + +u32 ssb_chipco_irq_status(struct ssb_chipcommon *cc, u32 mask); + +/* Chipcommon GPIO pin access. */ +u32 ssb_chipco_gpio_in(struct ssb_chipcommon *cc, u32 mask); +u32 ssb_chipco_gpio_out(struct ssb_chipcommon *cc, u32 mask, u32 value); +u32 ssb_chipco_gpio_outen(struct ssb_chipcommon *cc, u32 mask, u32 value); +u32 ssb_chipco_gpio_control(struct ssb_chipcommon *cc, u32 mask, u32 value); +u32 ssb_chipco_gpio_intmask(struct ssb_chipcommon *cc, u32 mask, u32 value); +u32 ssb_chipco_gpio_polarity(struct ssb_chipcommon *cc, u32 mask, u32 value); +u32 ssb_chipco_gpio_pullup(struct ssb_chipcommon *cc, u32 mask, u32 value); +u32 ssb_chipco_gpio_pulldown(struct ssb_chipcommon *cc, u32 mask, u32 value); + +#ifdef CONFIG_SSB_SERIAL +extern int ssb_chipco_serial_init(struct ssb_chipcommon *cc, + struct ssb_serial_port *ports); +#endif /* CONFIG_SSB_SERIAL */ + +/* PMU support */ +extern void ssb_pmu_init(struct ssb_chipcommon *cc); + +enum ssb_pmu_ldo_volt_id { + LDO_PAREF = 0, + LDO_VOLT1, + LDO_VOLT2, + LDO_VOLT3, +}; + +void ssb_pmu_set_ldo_voltage(struct ssb_chipcommon *cc, + enum ssb_pmu_ldo_volt_id id, u32 voltage); +void ssb_pmu_set_ldo_paref(struct ssb_chipcommon *cc, bool on); +void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid); + +#endif /* LINUX_SSB_CHIPCO_H_ */ diff --git a/include/linux/ssb/ssb_driver_extif.h b/include/linux/ssb/ssb_driver_extif.h new file mode 100644 index 000000000..a410e841e --- /dev/null +++ b/include/linux/ssb/ssb_driver_extif.h @@ -0,0 +1,259 @@ +/* + * Hardware-specific External Interface I/O core definitions + * for the BCM47xx family of SiliconBackplane-based chips. + * + * The External Interface core supports a total of three external chip selects + * supporting external interfaces. One of the external chip selects is + * used for Flash, one is used for PCMCIA, and the other may be + * programmed to support either a synchronous interface or an + * asynchronous interface. The asynchronous interface can be used to + * support external devices such as UARTs and the BCM2019 Bluetooth + * baseband processor. + * The external interface core also contains 2 on-chip 16550 UARTs, clock + * frequency control, a watchdog interrupt timer, and a GPIO interface. + * + * Copyright 2005, Broadcom Corporation + * Copyright 2006, Michael Buesch + * + * Licensed under the GPL version 2. See COPYING for details. + */ +#ifndef LINUX_SSB_EXTIFCORE_H_ +#define LINUX_SSB_EXTIFCORE_H_ + +/* external interface address space */ +#define SSB_EXTIF_PCMCIA_MEMBASE(x) (x) +#define SSB_EXTIF_PCMCIA_IOBASE(x) ((x) + 0x100000) +#define SSB_EXTIF_PCMCIA_CFGBASE(x) ((x) + 0x200000) +#define SSB_EXTIF_CFGIF_BASE(x) ((x) + 0x800000) +#define SSB_EXTIF_FLASH_BASE(x) ((x) + 0xc00000) + +#define SSB_EXTIF_NR_GPIOOUT 5 +/* GPIO NOTE: + * The multiple instances of output and output enable registers + * are present to allow driver software for multiple cores to control + * gpio outputs without needing to share a single register pair. + * Use the following helper macro to get a register offset value. + */ +#define SSB_EXTIF_GPIO_OUT(index) ({ \ + BUILD_BUG_ON(index >= SSB_EXTIF_NR_GPIOOUT); \ + SSB_EXTIF_GPIO_OUT_BASE + ((index) * 8); \ + }) +#define SSB_EXTIF_GPIO_OUTEN(index) ({ \ + BUILD_BUG_ON(index >= SSB_EXTIF_NR_GPIOOUT); \ + SSB_EXTIF_GPIO_OUTEN_BASE + ((index) * 8); \ + }) + +/** EXTIF core registers **/ + +#define SSB_EXTIF_CTL 0x0000 +#define SSB_EXTIF_CTL_UARTEN (1 << 0) /* UART enable */ +#define SSB_EXTIF_EXTSTAT 0x0004 +#define SSB_EXTIF_EXTSTAT_EMODE (1 << 0) /* Endian mode (ro) */ +#define SSB_EXTIF_EXTSTAT_EIRQPIN (1 << 1) /* External interrupt pin (ro) */ +#define SSB_EXTIF_EXTSTAT_GPIOIRQPIN (1 << 2) /* GPIO interrupt pin (ro) */ +#define SSB_EXTIF_PCMCIA_CFG 0x0010 +#define SSB_EXTIF_PCMCIA_MEMWAIT 0x0014 +#define SSB_EXTIF_PCMCIA_ATTRWAIT 0x0018 +#define SSB_EXTIF_PCMCIA_IOWAIT 0x001C +#define SSB_EXTIF_PROG_CFG 0x0020 +#define SSB_EXTIF_PROG_WAITCNT 0x0024 +#define SSB_EXTIF_FLASH_CFG 0x0028 +#define SSB_EXTIF_FLASH_WAITCNT 0x002C +#define SSB_EXTIF_WATCHDOG 0x0040 +#define SSB_EXTIF_CLOCK_N 0x0044 +#define SSB_EXTIF_CLOCK_SB 0x0048 +#define SSB_EXTIF_CLOCK_PCI 0x004C +#define SSB_EXTIF_CLOCK_MII 0x0050 +#define SSB_EXTIF_GPIO_IN 0x0060 +#define SSB_EXTIF_GPIO_OUT_BASE 0x0064 +#define SSB_EXTIF_GPIO_OUTEN_BASE 0x0068 +#define SSB_EXTIF_EJTAG_OUTEN 0x0090 +#define SSB_EXTIF_GPIO_INTPOL 0x0094 +#define SSB_EXTIF_GPIO_INTMASK 0x0098 +#define SSB_EXTIF_UART_DATA 0x0300 +#define SSB_EXTIF_UART_TIMER 0x0310 +#define SSB_EXTIF_UART_FCR 0x0320 +#define SSB_EXTIF_UART_LCR 0x0330 +#define SSB_EXTIF_UART_MCR 0x0340 +#define SSB_EXTIF_UART_LSR 0x0350 +#define SSB_EXTIF_UART_MSR 0x0360 +#define SSB_EXTIF_UART_SCRATCH 0x0370 + + + + +/* pcmcia/prog/flash_config */ +#define SSB_EXTCFG_EN (1 << 0) /* enable */ +#define SSB_EXTCFG_MODE 0xE /* mode */ +#define SSB_EXTCFG_MODE_SHIFT 1 +#define SSB_EXTCFG_MODE_FLASH 0x0 /* flash/asynchronous mode */ +#define SSB_EXTCFG_MODE_SYNC 0x2 /* synchronous mode */ +#define SSB_EXTCFG_MODE_PCMCIA 0x4 /* pcmcia mode */ +#define SSB_EXTCFG_DS16 (1 << 4) /* destsize: 0=8bit, 1=16bit */ +#define SSB_EXTCFG_BSWAP (1 << 5) /* byteswap */ +#define SSB_EXTCFG_CLKDIV 0xC0 /* clock divider */ +#define SSB_EXTCFG_CLKDIV_SHIFT 6 +#define SSB_EXTCFG_CLKDIV_2 0x0 /* backplane/2 */ +#define SSB_EXTCFG_CLKDIV_3 0x40 /* backplane/3 */ +#define SSB_EXTCFG_CLKDIV_4 0x80 /* backplane/4 */ +#define SSB_EXTCFG_CLKEN (1 << 8) /* clock enable */ +#define SSB_EXTCFG_STROBE (1 << 9) /* size/bytestrobe (synch only) */ + +/* pcmcia_memwait */ +#define SSB_PCMCIA_MEMW_0 0x0000003F /* waitcount0 */ +#define SSB_PCMCIA_MEMW_1 0x00001F00 /* waitcount1 */ +#define SSB_PCMCIA_MEMW_1_SHIFT 8 +#define SSB_PCMCIA_MEMW_2 0x001F0000 /* waitcount2 */ +#define SSB_PCMCIA_MEMW_2_SHIFT 16 +#define SSB_PCMCIA_MEMW_3 0x1F000000 /* waitcount3 */ +#define SSB_PCMCIA_MEMW_3_SHIFT 24 + +/* pcmcia_attrwait */ +#define SSB_PCMCIA_ATTW_0 0x0000003F /* waitcount0 */ +#define SSB_PCMCIA_ATTW_1 0x00001F00 /* waitcount1 */ +#define SSB_PCMCIA_ATTW_1_SHIFT 8 +#define SSB_PCMCIA_ATTW_2 0x001F0000 /* waitcount2 */ +#define SSB_PCMCIA_ATTW_2_SHIFT 16 +#define SSB_PCMCIA_ATTW_3 0x1F000000 /* waitcount3 */ +#define SSB_PCMCIA_ATTW_3_SHIFT 24 + +/* pcmcia_iowait */ +#define SSB_PCMCIA_IOW_0 0x0000003F /* waitcount0 */ +#define SSB_PCMCIA_IOW_1 0x00001F00 /* waitcount1 */ +#define SSB_PCMCIA_IOW_1_SHIFT 8 +#define SSB_PCMCIA_IOW_2 0x001F0000 /* waitcount2 */ +#define SSB_PCMCIA_IOW_2_SHIFT 16 +#define SSB_PCMCIA_IOW_3 0x1F000000 /* waitcount3 */ +#define SSB_PCMCIA_IOW_3_SHIFT 24 + +/* prog_waitcount */ +#define SSB_PROG_WCNT_0 0x0000001F /* waitcount0 */ +#define SSB_PROG_WCNT_1 0x00001F00 /* waitcount1 */ +#define SSB_PROG_WCNT_1_SHIFT 8 +#define SSB_PROG_WCNT_2 0x001F0000 /* waitcount2 */ +#define SSB_PROG_WCNT_2_SHIFT 16 +#define SSB_PROG_WCNT_3 0x1F000000 /* waitcount3 */ +#define SSB_PROG_WCNT_3_SHIFT 24 + +#define SSB_PROG_W0 0x0000000C +#define SSB_PROG_W1 0x00000A00 +#define SSB_PROG_W2 0x00020000 +#define SSB_PROG_W3 0x01000000 + +/* flash_waitcount */ +#define SSB_FLASH_WCNT_0 0x0000001F /* waitcount0 */ +#define SSB_FLASH_WCNT_1 0x00001F00 /* waitcount1 */ +#define SSB_FLASH_WCNT_1_SHIFT 8 +#define SSB_FLASH_WCNT_2 0x001F0000 /* waitcount2 */ +#define SSB_FLASH_WCNT_2_SHIFT 16 +#define SSB_FLASH_WCNT_3 0x1F000000 /* waitcount3 */ +#define SSB_FLASH_WCNT_3_SHIFT 24 + +/* watchdog */ +#define SSB_EXTIF_WATCHDOG_CLK 48000000 /* Hz */ + +#define SSB_EXTIF_WATCHDOG_MAX_TIMER ((1 << 28) - 1) +#define SSB_EXTIF_WATCHDOG_MAX_TIMER_MS (SSB_EXTIF_WATCHDOG_MAX_TIMER \ + / (SSB_EXTIF_WATCHDOG_CLK / 1000)) + + +#ifdef CONFIG_SSB_DRIVER_EXTIF + +struct ssb_extif { + struct ssb_device *dev; + spinlock_t gpio_lock; +}; + +static inline bool ssb_extif_available(struct ssb_extif *extif) +{ + return (extif->dev != NULL); +} + +extern void ssb_extif_get_clockcontrol(struct ssb_extif *extif, + u32 *plltype, u32 *n, u32 *m); + +extern void ssb_extif_timing_init(struct ssb_extif *extif, + unsigned long ns); + +extern u32 ssb_extif_watchdog_timer_set(struct ssb_extif *extif, u32 ticks); + +/* Extif GPIO pin access */ +u32 ssb_extif_gpio_in(struct ssb_extif *extif, u32 mask); +u32 ssb_extif_gpio_out(struct ssb_extif *extif, u32 mask, u32 value); +u32 ssb_extif_gpio_outen(struct ssb_extif *extif, u32 mask, u32 value); +u32 ssb_extif_gpio_polarity(struct ssb_extif *extif, u32 mask, u32 value); +u32 ssb_extif_gpio_intmask(struct ssb_extif *extif, u32 mask, u32 value); + +#ifdef CONFIG_SSB_SERIAL +extern int ssb_extif_serial_init(struct ssb_extif *extif, + struct ssb_serial_port *ports); +#endif /* CONFIG_SSB_SERIAL */ + + +#else /* CONFIG_SSB_DRIVER_EXTIF */ +/* extif disabled */ + +struct ssb_extif { +}; + +static inline bool ssb_extif_available(struct ssb_extif *extif) +{ + return 0; +} + +static inline +void ssb_extif_get_clockcontrol(struct ssb_extif *extif, + u32 *plltype, u32 *n, u32 *m) +{ +} + +static inline +void ssb_extif_timing_init(struct ssb_extif *extif, unsigned long ns) +{ +} + +static inline +u32 ssb_extif_watchdog_timer_set(struct ssb_extif *extif, u32 ticks) +{ + return 0; +} + +static inline u32 ssb_extif_gpio_in(struct ssb_extif *extif, u32 mask) +{ + return 0; +} + +static inline u32 ssb_extif_gpio_out(struct ssb_extif *extif, u32 mask, + u32 value) +{ + return 0; +} + +static inline u32 ssb_extif_gpio_outen(struct ssb_extif *extif, u32 mask, + u32 value) +{ + return 0; +} + +static inline u32 ssb_extif_gpio_polarity(struct ssb_extif *extif, u32 mask, + u32 value) +{ + return 0; +} + +static inline u32 ssb_extif_gpio_intmask(struct ssb_extif *extif, u32 mask, + u32 value) +{ + return 0; +} + +#ifdef CONFIG_SSB_SERIAL +static inline int ssb_extif_serial_init(struct ssb_extif *extif, + struct ssb_serial_port *ports) +{ + return 0; +} +#endif /* CONFIG_SSB_SERIAL */ + +#endif /* CONFIG_SSB_DRIVER_EXTIF */ +#endif /* LINUX_SSB_EXTIFCORE_H_ */ diff --git a/include/linux/ssb/ssb_driver_gige.h b/include/linux/ssb/ssb_driver_gige.h new file mode 100644 index 000000000..31593b346 --- /dev/null +++ b/include/linux/ssb/ssb_driver_gige.h @@ -0,0 +1,194 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_SSB_DRIVER_GIGE_H_ +#define LINUX_SSB_DRIVER_GIGE_H_ + +#include +#include +#include +#include + + +#ifdef CONFIG_SSB_DRIVER_GIGE + + +#define SSB_GIGE_PCIIO 0x0000 /* PCI I/O Registers (1024 bytes) */ +#define SSB_GIGE_RESERVED 0x0400 /* Reserved (1024 bytes) */ +#define SSB_GIGE_PCICFG 0x0800 /* PCI config space (256 bytes) */ +#define SSB_GIGE_SHIM_FLUSHSTAT 0x0C00 /* PCI to OCP: Flush status control (32bit) */ +#define SSB_GIGE_SHIM_FLUSHRDA 0x0C04 /* PCI to OCP: Flush read address (32bit) */ +#define SSB_GIGE_SHIM_FLUSHTO 0x0C08 /* PCI to OCP: Flush timeout counter (32bit) */ +#define SSB_GIGE_SHIM_BARRIER 0x0C0C /* PCI to OCP: Barrier register (32bit) */ +#define SSB_GIGE_SHIM_MAOCPSI 0x0C10 /* PCI to OCP: MaocpSI Control (32bit) */ +#define SSB_GIGE_SHIM_SIOCPMA 0x0C14 /* PCI to OCP: SiocpMa Control (32bit) */ + +/* TM Status High flags */ +#define SSB_GIGE_TMSHIGH_RGMII 0x00010000 /* Have an RGMII PHY-bus */ +/* TM Status Low flags */ +#define SSB_GIGE_TMSLOW_TXBYPASS 0x00080000 /* TX bypass (no delay) */ +#define SSB_GIGE_TMSLOW_RXBYPASS 0x00100000 /* RX bypass (no delay) */ +#define SSB_GIGE_TMSLOW_DLLEN 0x01000000 /* Enable DLL controls */ + +/* Boardflags (low) */ +#define SSB_GIGE_BFL_ROBOSWITCH 0x0010 + + +#define SSB_GIGE_MEM_RES_NAME "SSB Broadcom 47xx GigE memory" +#define SSB_GIGE_IO_RES_NAME "SSB Broadcom 47xx GigE I/O" + +struct ssb_gige { + struct ssb_device *dev; + + spinlock_t lock; + + /* True, if the device has an RGMII bus. + * False, if the device has a GMII bus. */ + bool has_rgmii; + + /* The PCI controller device. */ + struct pci_controller pci_controller; + struct pci_ops pci_ops; + struct resource mem_resource; + struct resource io_resource; +}; + +/* Check whether a PCI device is a SSB Gigabit Ethernet core. */ +extern bool pdev_is_ssb_gige_core(struct pci_dev *pdev); + +/* Convert a pci_dev pointer to a ssb_gige pointer. */ +static inline struct ssb_gige * pdev_to_ssb_gige(struct pci_dev *pdev) +{ + if (!pdev_is_ssb_gige_core(pdev)) + return NULL; + return container_of(pdev->bus->ops, struct ssb_gige, pci_ops); +} + +/* Returns whether the PHY is connected by an RGMII bus. */ +static inline bool ssb_gige_is_rgmii(struct pci_dev *pdev) +{ + struct ssb_gige *dev = pdev_to_ssb_gige(pdev); + return (dev ? dev->has_rgmii : 0); +} + +/* Returns whether we have a Roboswitch. */ +static inline bool ssb_gige_have_roboswitch(struct pci_dev *pdev) +{ + struct ssb_gige *dev = pdev_to_ssb_gige(pdev); + if (dev) + return !!(dev->dev->bus->sprom.boardflags_lo & + SSB_GIGE_BFL_ROBOSWITCH); + return 0; +} + +/* Returns whether we can only do one DMA at once. */ +static inline bool ssb_gige_one_dma_at_once(struct pci_dev *pdev) +{ + struct ssb_gige *dev = pdev_to_ssb_gige(pdev); + if (dev) + return ((dev->dev->bus->chip_id == 0x4785) && + (dev->dev->bus->chip_rev < 2)); + return 0; +} + +/* Returns whether we must flush posted writes. */ +static inline bool ssb_gige_must_flush_posted_writes(struct pci_dev *pdev) +{ + struct ssb_gige *dev = pdev_to_ssb_gige(pdev); + if (dev) + return (dev->dev->bus->chip_id == 0x4785); + return 0; +} + +/* Get the device MAC address */ +static inline int ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr) +{ + struct ssb_gige *dev = pdev_to_ssb_gige(pdev); + if (!dev) + return -ENODEV; + + memcpy(macaddr, dev->dev->bus->sprom.et0mac, 6); + return 0; +} + +/* Get the device phy address */ +static inline int ssb_gige_get_phyaddr(struct pci_dev *pdev) +{ + struct ssb_gige *dev = pdev_to_ssb_gige(pdev); + if (!dev) + return -ENODEV; + + return dev->dev->bus->sprom.et0phyaddr; +} + +extern int ssb_gige_pcibios_plat_dev_init(struct ssb_device *sdev, + struct pci_dev *pdev); +extern int ssb_gige_map_irq(struct ssb_device *sdev, + const struct pci_dev *pdev); + +/* The GigE driver is not a standalone module, because we don't have support + * for unregistering the driver. So we could not unload the module anyway. */ +extern int ssb_gige_init(void); +static inline void ssb_gige_exit(void) +{ + /* Currently we can not unregister the GigE driver, + * because we can not unregister the PCI bridge. */ + BUG(); +} + + +#else /* CONFIG_SSB_DRIVER_GIGE */ +/* Gigabit Ethernet driver disabled */ + + +static inline int ssb_gige_pcibios_plat_dev_init(struct ssb_device *sdev, + struct pci_dev *pdev) +{ + return -ENOSYS; +} +static inline int ssb_gige_map_irq(struct ssb_device *sdev, + const struct pci_dev *pdev) +{ + return -ENOSYS; +} +static inline int ssb_gige_init(void) +{ + return 0; +} +static inline void ssb_gige_exit(void) +{ +} + +static inline bool pdev_is_ssb_gige_core(struct pci_dev *pdev) +{ + return 0; +} +static inline struct ssb_gige * pdev_to_ssb_gige(struct pci_dev *pdev) +{ + return NULL; +} +static inline bool ssb_gige_is_rgmii(struct pci_dev *pdev) +{ + return 0; +} +static inline bool ssb_gige_have_roboswitch(struct pci_dev *pdev) +{ + return 0; +} +static inline bool ssb_gige_one_dma_at_once(struct pci_dev *pdev) +{ + return 0; +} +static inline bool ssb_gige_must_flush_posted_writes(struct pci_dev *pdev) +{ + return 0; +} +static inline int ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr) +{ + return -ENODEV; +} +static inline int ssb_gige_get_phyaddr(struct pci_dev *pdev) +{ + return -ENODEV; +} + +#endif /* CONFIG_SSB_DRIVER_GIGE */ +#endif /* LINUX_SSB_DRIVER_GIGE_H_ */ diff --git a/include/linux/ssb/ssb_driver_mips.h b/include/linux/ssb/ssb_driver_mips.h new file mode 100644 index 000000000..bef6bba32 --- /dev/null +++ b/include/linux/ssb/ssb_driver_mips.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_SSB_MIPSCORE_H_ +#define LINUX_SSB_MIPSCORE_H_ + +#ifdef CONFIG_SSB_DRIVER_MIPS + +struct ssb_device; + +struct ssb_serial_port { + void *regs; + unsigned long clockspeed; + unsigned int irq; + unsigned int baud_base; + unsigned int reg_shift; +}; + +struct ssb_pflash { + bool present; + u8 buswidth; + u32 window; + u32 window_size; +}; + +#ifdef CONFIG_SSB_SFLASH +struct ssb_sflash { + bool present; + u32 window; + u32 blocksize; + u16 numblocks; + u32 size; + + void *priv; +}; +#endif + +struct ssb_mipscore { + struct ssb_device *dev; + + int nr_serial_ports; + struct ssb_serial_port serial_ports[4]; + + struct ssb_pflash pflash; +#ifdef CONFIG_SSB_SFLASH + struct ssb_sflash sflash; +#endif +}; + +extern void ssb_mipscore_init(struct ssb_mipscore *mcore); +extern u32 ssb_cpu_clock(struct ssb_mipscore *mcore); + +extern unsigned int ssb_mips_irq(struct ssb_device *dev); + + +#else /* CONFIG_SSB_DRIVER_MIPS */ + +struct ssb_mipscore { +}; + +static inline +void ssb_mipscore_init(struct ssb_mipscore *mcore) +{ +} + +static inline unsigned int ssb_mips_irq(struct ssb_device *dev) +{ + return 0; +} + +#endif /* CONFIG_SSB_DRIVER_MIPS */ + +#endif /* LINUX_SSB_MIPSCORE_H_ */ diff --git a/include/linux/ssb/ssb_driver_pci.h b/include/linux/ssb/ssb_driver_pci.h new file mode 100644 index 000000000..42824bdfe --- /dev/null +++ b/include/linux/ssb/ssb_driver_pci.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_SSB_PCICORE_H_ +#define LINUX_SSB_PCICORE_H_ + +#include + +struct pci_dev; + + +#ifdef CONFIG_SSB_DRIVER_PCICORE + +/* PCI core registers. */ +#define SSB_PCICORE_CTL 0x0000 /* PCI Control */ +#define SSB_PCICORE_CTL_RST_OE 0x00000001 /* PCI_RESET Output Enable */ +#define SSB_PCICORE_CTL_RST 0x00000002 /* PCI_RESET driven out to pin */ +#define SSB_PCICORE_CTL_CLK_OE 0x00000004 /* Clock gate Output Enable */ +#define SSB_PCICORE_CTL_CLK 0x00000008 /* Gate for clock driven out to pin */ +#define SSB_PCICORE_ARBCTL 0x0010 /* PCI Arbiter Control */ +#define SSB_PCICORE_ARBCTL_INTERN 0x00000001 /* Use internal arbiter */ +#define SSB_PCICORE_ARBCTL_EXTERN 0x00000002 /* Use external arbiter */ +#define SSB_PCICORE_ARBCTL_PARKID 0x00000006 /* Mask, selects which agent is parked on an idle bus */ +#define SSB_PCICORE_ARBCTL_PARKID_LAST 0x00000000 /* Last requestor */ +#define SSB_PCICORE_ARBCTL_PARKID_4710 0x00000002 /* 4710 */ +#define SSB_PCICORE_ARBCTL_PARKID_EXT0 0x00000004 /* External requestor 0 */ +#define SSB_PCICORE_ARBCTL_PARKID_EXT1 0x00000006 /* External requestor 1 */ +#define SSB_PCICORE_ISTAT 0x0020 /* Interrupt status */ +#define SSB_PCICORE_ISTAT_INTA 0x00000001 /* PCI INTA# */ +#define SSB_PCICORE_ISTAT_INTB 0x00000002 /* PCI INTB# */ +#define SSB_PCICORE_ISTAT_SERR 0x00000004 /* PCI SERR# (write to clear) */ +#define SSB_PCICORE_ISTAT_PERR 0x00000008 /* PCI PERR# (write to clear) */ +#define SSB_PCICORE_ISTAT_PME 0x00000010 /* PCI PME# */ +#define SSB_PCICORE_IMASK 0x0024 /* Interrupt mask */ +#define SSB_PCICORE_IMASK_INTA 0x00000001 /* PCI INTA# */ +#define SSB_PCICORE_IMASK_INTB 0x00000002 /* PCI INTB# */ +#define SSB_PCICORE_IMASK_SERR 0x00000004 /* PCI SERR# */ +#define SSB_PCICORE_IMASK_PERR 0x00000008 /* PCI PERR# */ +#define SSB_PCICORE_IMASK_PME 0x00000010 /* PCI PME# */ +#define SSB_PCICORE_MBOX 0x0028 /* Backplane to PCI Mailbox */ +#define SSB_PCICORE_MBOX_F0_0 0x00000100 /* PCI function 0, INT 0 */ +#define SSB_PCICORE_MBOX_F0_1 0x00000200 /* PCI function 0, INT 1 */ +#define SSB_PCICORE_MBOX_F1_0 0x00000400 /* PCI function 1, INT 0 */ +#define SSB_PCICORE_MBOX_F1_1 0x00000800 /* PCI function 1, INT 1 */ +#define SSB_PCICORE_MBOX_F2_0 0x00001000 /* PCI function 2, INT 0 */ +#define SSB_PCICORE_MBOX_F2_1 0x00002000 /* PCI function 2, INT 1 */ +#define SSB_PCICORE_MBOX_F3_0 0x00004000 /* PCI function 3, INT 0 */ +#define SSB_PCICORE_MBOX_F3_1 0x00008000 /* PCI function 3, INT 1 */ +#define SSB_PCICORE_BCAST_ADDR 0x0050 /* Backplane Broadcast Address */ +#define SSB_PCICORE_BCAST_ADDR_MASK 0x000000FF +#define SSB_PCICORE_BCAST_DATA 0x0054 /* Backplane Broadcast Data */ +#define SSB_PCICORE_GPIO_IN 0x0060 /* rev >= 2 only */ +#define SSB_PCICORE_GPIO_OUT 0x0064 /* rev >= 2 only */ +#define SSB_PCICORE_GPIO_ENABLE 0x0068 /* rev >= 2 only */ +#define SSB_PCICORE_GPIO_CTL 0x006C /* rev >= 2 only */ +#define SSB_PCICORE_SBTOPCI0 0x0100 /* Backplane to PCI translation 0 (sbtopci0) */ +#define SSB_PCICORE_SBTOPCI0_MASK 0xFC000000 +#define SSB_PCICORE_SBTOPCI1 0x0104 /* Backplane to PCI translation 1 (sbtopci1) */ +#define SSB_PCICORE_SBTOPCI1_MASK 0xFC000000 +#define SSB_PCICORE_SBTOPCI2 0x0108 /* Backplane to PCI translation 2 (sbtopci2) */ +#define SSB_PCICORE_SBTOPCI2_MASK 0xC0000000 +#define SSB_PCICORE_PCICFG0 0x0400 /* PCI config space 0 (rev >= 8) */ +#define SSB_PCICORE_PCICFG1 0x0500 /* PCI config space 1 (rev >= 8) */ +#define SSB_PCICORE_PCICFG2 0x0600 /* PCI config space 2 (rev >= 8) */ +#define SSB_PCICORE_PCICFG3 0x0700 /* PCI config space 3 (rev >= 8) */ +#define SSB_PCICORE_SPROM(wordoffset) (0x0800 + ((wordoffset) * 2)) /* SPROM shadow area (72 bytes) */ + +/* SBtoPCIx */ +#define SSB_PCICORE_SBTOPCI_MEM 0x00000000 +#define SSB_PCICORE_SBTOPCI_IO 0x00000001 +#define SSB_PCICORE_SBTOPCI_CFG0 0x00000002 +#define SSB_PCICORE_SBTOPCI_CFG1 0x00000003 +#define SSB_PCICORE_SBTOPCI_PREF 0x00000004 /* Prefetch enable */ +#define SSB_PCICORE_SBTOPCI_BURST 0x00000008 /* Burst enable */ +#define SSB_PCICORE_SBTOPCI_MRM 0x00000020 /* Memory Read Multiple */ +#define SSB_PCICORE_SBTOPCI_RC 0x00000030 /* Read Command mask (rev >= 11) */ +#define SSB_PCICORE_SBTOPCI_RC_READ 0x00000000 /* Memory read */ +#define SSB_PCICORE_SBTOPCI_RC_READL 0x00000010 /* Memory read line */ +#define SSB_PCICORE_SBTOPCI_RC_READM 0x00000020 /* Memory read multiple */ + + +/* PCIcore specific boardflags */ +#define SSB_PCICORE_BFL_NOPCI 0x00000400 /* Board leaves PCI floating */ + + +struct ssb_pcicore { + struct ssb_device *dev; + u8 setup_done:1; + u8 hostmode:1; + u8 cardbusmode:1; +}; + +extern void ssb_pcicore_init(struct ssb_pcicore *pc); + +/* Enable IRQ routing for a specific device */ +extern int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc, + struct ssb_device *dev); + +int ssb_pcicore_plat_dev_init(struct pci_dev *d); +int ssb_pcicore_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); + + +#else /* CONFIG_SSB_DRIVER_PCICORE */ + + +struct ssb_pcicore { +}; + +static inline +void ssb_pcicore_init(struct ssb_pcicore *pc) +{ +} + +static inline +int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc, + struct ssb_device *dev) +{ + return 0; +} + +static inline +int ssb_pcicore_plat_dev_init(struct pci_dev *d) +{ + return -ENODEV; +} +static inline +int ssb_pcicore_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + return -ENODEV; +} + +#endif /* CONFIG_SSB_DRIVER_PCICORE */ +#endif /* LINUX_SSB_PCICORE_H_ */ diff --git a/include/linux/ssb/ssb_embedded.h b/include/linux/ssb/ssb_embedded.h new file mode 100644 index 000000000..49604ac3d --- /dev/null +++ b/include/linux/ssb/ssb_embedded.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_SSB_EMBEDDED_H_ +#define LINUX_SSB_EMBEDDED_H_ + +#include +#include + + +extern int ssb_watchdog_timer_set(struct ssb_bus *bus, u32 ticks); + +/* Generic GPIO API */ +u32 ssb_gpio_in(struct ssb_bus *bus, u32 mask); +u32 ssb_gpio_out(struct ssb_bus *bus, u32 mask, u32 value); +u32 ssb_gpio_outen(struct ssb_bus *bus, u32 mask, u32 value); +u32 ssb_gpio_control(struct ssb_bus *bus, u32 mask, u32 value); +u32 ssb_gpio_intmask(struct ssb_bus *bus, u32 mask, u32 value); +u32 ssb_gpio_polarity(struct ssb_bus *bus, u32 mask, u32 value); + +#endif /* LINUX_SSB_EMBEDDED_H_ */ diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h new file mode 100644 index 000000000..210f46494 --- /dev/null +++ b/include/linux/ssb/ssb_regs.h @@ -0,0 +1,687 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_SSB_REGS_H_ +#define LINUX_SSB_REGS_H_ + + +/* SiliconBackplane Address Map. + * All regions may not exist on all chips. + */ +#define SSB_SDRAM_BASE 0x00000000U /* Physical SDRAM */ +#define SSB_PCI_MEM 0x08000000U /* Host Mode sb2pcitranslation0 (64 MB) */ +#define SSB_PCI_CFG 0x0c000000U /* Host Mode sb2pcitranslation1 (64 MB) */ +#define SSB_SDRAM_SWAPPED 0x10000000U /* Byteswapped Physical SDRAM */ +#define SSB_ENUM_BASE 0x18000000U /* Enumeration space base */ +#define SSB_ENUM_LIMIT 0x18010000U /* Enumeration space limit */ + +#define SSB_FLASH2 0x1c000000U /* Flash Region 2 (region 1 shadowed here) */ +#define SSB_FLASH2_SZ 0x02000000U /* Size of Flash Region 2 */ + +#define SSB_EXTIF_BASE 0x1f000000U /* External Interface region base address */ +#define SSB_FLASH1 0x1fc00000U /* Flash Region 1 */ +#define SSB_FLASH1_SZ 0x00400000U /* Size of Flash Region 1 */ + +#define SSB_PCI_DMA 0x40000000U /* Client Mode sb2pcitranslation2 (1 GB) */ +#define SSB_PCI_DMA_SZ 0x40000000U /* Client Mode sb2pcitranslation2 size in bytes */ +#define SSB_PCIE_DMA_L32 0x00000000U /* PCIE Client Mode sb2pcitranslation2 (2 ZettaBytes), low 32 bits */ +#define SSB_PCIE_DMA_H32 0x80000000U /* PCIE Client Mode sb2pcitranslation2 (2 ZettaBytes), high 32 bits */ +#define SSB_EUART (SSB_EXTIF_BASE + 0x00800000) +#define SSB_LED (SSB_EXTIF_BASE + 0x00900000) + + +/* Enumeration space constants */ +#define SSB_CORE_SIZE 0x1000 /* Size of a core MMIO area */ +#define SSB_MAX_NR_CORES ((SSB_ENUM_LIMIT - SSB_ENUM_BASE) / SSB_CORE_SIZE) + + +/* mips address */ +#define SSB_EJTAG 0xff200000 /* MIPS EJTAG space (2M) */ + + +/* SSB PCI config space registers. */ +#define SSB_PMCSR 0x44 +#define SSB_PE 0x100 +#define SSB_BAR0_WIN 0x80 /* Backplane address space 0 */ +#define SSB_BAR1_WIN 0x84 /* Backplane address space 1 */ +#define SSB_SPROMCTL 0x88 /* SPROM control */ +#define SSB_SPROMCTL_WE 0x10 /* SPROM write enable */ +#define SSB_BAR1_CONTROL 0x8c /* Address space 1 burst control */ +#define SSB_PCI_IRQS 0x90 /* PCI interrupts */ +#define SSB_PCI_IRQMASK 0x94 /* PCI IRQ control and mask (pcirev >= 6 only) */ +#define SSB_BACKPLANE_IRQS 0x98 /* Backplane Interrupts */ +#define SSB_GPIO_IN 0xB0 /* GPIO Input (pcirev >= 3 only) */ +#define SSB_GPIO_OUT 0xB4 /* GPIO Output (pcirev >= 3 only) */ +#define SSB_GPIO_OUT_ENABLE 0xB8 /* GPIO Output Enable/Disable (pcirev >= 3 only) */ +#define SSB_GPIO_SCS 0x10 /* PCI config space bit 4 for 4306c0 slow clock source */ +#define SSB_GPIO_HWRAD 0x20 /* PCI config space GPIO 13 for hw radio disable */ +#define SSB_GPIO_XTAL 0x40 /* PCI config space GPIO 14 for Xtal powerup */ +#define SSB_GPIO_PLL 0x80 /* PCI config space GPIO 15 for PLL powerdown */ + + +#define SSB_BAR0_MAX_RETRIES 50 + +/* Silicon backplane configuration register definitions */ +#define SSB_IPSFLAG 0x0F08 +#define SSB_IPSFLAG_IRQ1 0x0000003F /* which sbflags get routed to mips interrupt 1 */ +#define SSB_IPSFLAG_IRQ1_SHIFT 0 +#define SSB_IPSFLAG_IRQ2 0x00003F00 /* which sbflags get routed to mips interrupt 2 */ +#define SSB_IPSFLAG_IRQ2_SHIFT 8 +#define SSB_IPSFLAG_IRQ3 0x003F0000 /* which sbflags get routed to mips interrupt 3 */ +#define SSB_IPSFLAG_IRQ3_SHIFT 16 +#define SSB_IPSFLAG_IRQ4 0x3F000000 /* which sbflags get routed to mips interrupt 4 */ +#define SSB_IPSFLAG_IRQ4_SHIFT 24 +#define SSB_TPSFLAG 0x0F18 +#define SSB_TPSFLAG_BPFLAG 0x0000003F /* Backplane flag # */ +#define SSB_TPSFLAG_ALWAYSIRQ 0x00000040 /* IRQ is always sent on the Backplane */ +#define SSB_TMERRLOGA 0x0F48 +#define SSB_TMERRLOG 0x0F50 +#define SSB_ADMATCH3 0x0F60 +#define SSB_ADMATCH2 0x0F68 +#define SSB_ADMATCH1 0x0F70 +#define SSB_IMSTATE 0x0F90 /* SB Initiator Agent State */ +#define SSB_IMSTATE_PC 0x0000000f /* Pipe Count */ +#define SSB_IMSTATE_AP_MASK 0x00000030 /* Arbitration Priority */ +#define SSB_IMSTATE_AP_BOTH 0x00000000 /* Use both timeslices and token */ +#define SSB_IMSTATE_AP_TS 0x00000010 /* Use timeslices only */ +#define SSB_IMSTATE_AP_TK 0x00000020 /* Use token only */ +#define SSB_IMSTATE_AP_RSV 0x00000030 /* Reserved */ +#define SSB_IMSTATE_IBE 0x00020000 /* In Band Error */ +#define SSB_IMSTATE_TO 0x00040000 /* Timeout */ +#define SSB_IMSTATE_BUSY 0x01800000 /* Busy (Backplane rev >= 2.3 only) */ +#define SSB_IMSTATE_REJECT 0x02000000 /* Reject (Backplane rev >= 2.3 only) */ +#define SSB_INTVEC 0x0F94 /* SB Interrupt Mask */ +#define SSB_INTVEC_PCI 0x00000001 /* Enable interrupts for PCI */ +#define SSB_INTVEC_ENET0 0x00000002 /* Enable interrupts for enet 0 */ +#define SSB_INTVEC_ILINE20 0x00000004 /* Enable interrupts for iline20 */ +#define SSB_INTVEC_CODEC 0x00000008 /* Enable interrupts for v90 codec */ +#define SSB_INTVEC_USB 0x00000010 /* Enable interrupts for usb */ +#define SSB_INTVEC_EXTIF 0x00000020 /* Enable interrupts for external i/f */ +#define SSB_INTVEC_ENET1 0x00000040 /* Enable interrupts for enet 1 */ +#define SSB_TMSLOW 0x0F98 /* SB Target State Low */ +#define SSB_TMSLOW_RESET 0x00000001 /* Reset */ +#define SSB_TMSLOW_REJECT 0x00000002 /* Reject (Standard Backplane) */ +#define SSB_TMSLOW_REJECT_23 0x00000004 /* Reject (Backplane rev 2.3) */ +#define SSB_TMSLOW_CLOCK 0x00010000 /* Clock Enable */ +#define SSB_TMSLOW_FGC 0x00020000 /* Force Gated Clocks On */ +#define SSB_TMSLOW_PE 0x40000000 /* Power Management Enable */ +#define SSB_TMSLOW_BE 0x80000000 /* BIST Enable */ +#define SSB_TMSHIGH 0x0F9C /* SB Target State High */ +#define SSB_TMSHIGH_SERR 0x00000001 /* S-error */ +#define SSB_TMSHIGH_INT 0x00000002 /* Interrupt */ +#define SSB_TMSHIGH_BUSY 0x00000004 /* Busy */ +#define SSB_TMSHIGH_TO 0x00000020 /* Timeout. Backplane rev >= 2.3 only */ +#define SSB_TMSHIGH_COREFL 0x1FFF0000 /* Core specific flags */ +#define SSB_TMSHIGH_COREFL_SHIFT 16 +#define SSB_TMSHIGH_DMA64 0x10000000 /* 64bit DMA supported */ +#define SSB_TMSHIGH_GCR 0x20000000 /* Gated Clock Request */ +#define SSB_TMSHIGH_BISTF 0x40000000 /* BIST Failed */ +#define SSB_TMSHIGH_BISTD 0x80000000 /* BIST Done */ +#define SSB_BWA0 0x0FA0 +#define SSB_IMCFGLO 0x0FA8 +#define SSB_IMCFGLO_SERTO 0x00000007 /* Service timeout */ +#define SSB_IMCFGLO_REQTO 0x00000070 /* Request timeout */ +#define SSB_IMCFGLO_REQTO_SHIFT 4 +#define SSB_IMCFGLO_CONNID 0x00FF0000 /* Connection ID */ +#define SSB_IMCFGLO_CONNID_SHIFT 16 +#define SSB_IMCFGHI 0x0FAC +#define SSB_ADMATCH0 0x0FB0 +#define SSB_TMCFGLO 0x0FB8 +#define SSB_TMCFGHI 0x0FBC +#define SSB_BCONFIG 0x0FC0 +#define SSB_BSTATE 0x0FC8 +#define SSB_ACTCFG 0x0FD8 +#define SSB_FLAGST 0x0FE8 +#define SSB_IDLOW 0x0FF8 +#define SSB_IDLOW_CFGSP 0x00000003 /* Config Space */ +#define SSB_IDLOW_ADDRNGE 0x00000038 /* Address Ranges supported */ +#define SSB_IDLOW_ADDRNGE_SHIFT 3 +#define SSB_IDLOW_SYNC 0x00000040 +#define SSB_IDLOW_INITIATOR 0x00000080 +#define SSB_IDLOW_MIBL 0x00000F00 /* Minimum Backplane latency */ +#define SSB_IDLOW_MIBL_SHIFT 8 +#define SSB_IDLOW_MABL 0x0000F000 /* Maximum Backplane latency */ +#define SSB_IDLOW_MABL_SHIFT 12 +#define SSB_IDLOW_TIF 0x00010000 /* This Initiator is first */ +#define SSB_IDLOW_CCW 0x000C0000 /* Cycle counter width */ +#define SSB_IDLOW_CCW_SHIFT 18 +#define SSB_IDLOW_TPT 0x00F00000 /* Target ports */ +#define SSB_IDLOW_TPT_SHIFT 20 +#define SSB_IDLOW_INITP 0x0F000000 /* Initiator ports */ +#define SSB_IDLOW_INITP_SHIFT 24 +#define SSB_IDLOW_SSBREV 0xF0000000 /* Sonics Backplane Revision code */ +#define SSB_IDLOW_SSBREV_22 0x00000000 /* <= 2.2 */ +#define SSB_IDLOW_SSBREV_23 0x10000000 /* 2.3 */ +#define SSB_IDLOW_SSBREV_24 0x40000000 /* ?? Found in BCM4328 */ +#define SSB_IDLOW_SSBREV_25 0x50000000 /* ?? Not Found yet */ +#define SSB_IDLOW_SSBREV_26 0x60000000 /* ?? Found in some BCM4311/2 */ +#define SSB_IDLOW_SSBREV_27 0x70000000 /* ?? Found in some BCM4311/2 */ +#define SSB_IDHIGH 0x0FFC /* SB Identification High */ +#define SSB_IDHIGH_RCLO 0x0000000F /* Revision Code (low part) */ +#define SSB_IDHIGH_CC 0x00008FF0 /* Core Code */ +#define SSB_IDHIGH_CC_SHIFT 4 +#define SSB_IDHIGH_RCHI 0x00007000 /* Revision Code (high part) */ +#define SSB_IDHIGH_RCHI_SHIFT 8 /* yes, shift 8 is right */ +#define SSB_IDHIGH_VC 0xFFFF0000 /* Vendor Code */ +#define SSB_IDHIGH_VC_SHIFT 16 + +/* SPROM shadow area. If not otherwise noted, fields are + * two bytes wide. Note that the SPROM can _only_ be read + * in two-byte quantities. + */ +#define SSB_SPROMSIZE_WORDS 64 +#define SSB_SPROMSIZE_BYTES (SSB_SPROMSIZE_WORDS * sizeof(u16)) +#define SSB_SPROMSIZE_WORDS_R123 64 +#define SSB_SPROMSIZE_WORDS_R4 220 +#define SSB_SPROMSIZE_BYTES_R123 (SSB_SPROMSIZE_WORDS_R123 * sizeof(u16)) +#define SSB_SPROMSIZE_BYTES_R4 (SSB_SPROMSIZE_WORDS_R4 * sizeof(u16)) +#define SSB_SPROMSIZE_WORDS_R10 230 +#define SSB_SPROMSIZE_WORDS_R11 234 +#define SSB_SPROM_BASE1 0x1000 +#define SSB_SPROM_BASE31 0x0800 +#define SSB_SPROM_REVISION 0x007E +#define SSB_SPROM_REVISION_REV 0x00FF /* SPROM Revision number */ +#define SSB_SPROM_REVISION_CRC 0xFF00 /* SPROM CRC8 value */ +#define SSB_SPROM_REVISION_CRC_SHIFT 8 + +/* SPROM Revision 1 */ +#define SSB_SPROM1_SPID 0x0004 /* Subsystem Product ID for PCI */ +#define SSB_SPROM1_SVID 0x0006 /* Subsystem Vendor ID for PCI */ +#define SSB_SPROM1_PID 0x0008 /* Product ID for PCI */ +#define SSB_SPROM1_IL0MAC 0x0048 /* 6 bytes MAC address for 802.11b/g */ +#define SSB_SPROM1_ET0MAC 0x004E /* 6 bytes MAC address for Ethernet */ +#define SSB_SPROM1_ET1MAC 0x0054 /* 6 bytes MAC address for 802.11a */ +#define SSB_SPROM1_ETHPHY 0x005A /* Ethernet PHY settings */ +#define SSB_SPROM1_ETHPHY_ET0A 0x001F /* MII Address for enet0 */ +#define SSB_SPROM1_ETHPHY_ET1A 0x03E0 /* MII Address for enet1 */ +#define SSB_SPROM1_ETHPHY_ET1A_SHIFT 5 +#define SSB_SPROM1_ETHPHY_ET0M (1<<14) /* MDIO for enet0 */ +#define SSB_SPROM1_ETHPHY_ET1M (1<<15) /* MDIO for enet1 */ +#define SSB_SPROM1_BINF 0x005C /* Board info */ +#define SSB_SPROM1_BINF_BREV 0x00FF /* Board Revision */ +#define SSB_SPROM1_BINF_CCODE 0x0F00 /* Country Code */ +#define SSB_SPROM1_BINF_CCODE_SHIFT 8 +#define SSB_SPROM1_BINF_ANTBG 0x3000 /* Available B-PHY and G-PHY antennas */ +#define SSB_SPROM1_BINF_ANTBG_SHIFT 12 +#define SSB_SPROM1_BINF_ANTA 0xC000 /* Available A-PHY antennas */ +#define SSB_SPROM1_BINF_ANTA_SHIFT 14 +#define SSB_SPROM1_PA0B0 0x005E +#define SSB_SPROM1_PA0B1 0x0060 +#define SSB_SPROM1_PA0B2 0x0062 +#define SSB_SPROM1_GPIOA 0x0064 /* General Purpose IO pins 0 and 1 */ +#define SSB_SPROM1_GPIOA_P0 0x00FF /* Pin 0 */ +#define SSB_SPROM1_GPIOA_P1 0xFF00 /* Pin 1 */ +#define SSB_SPROM1_GPIOA_P1_SHIFT 8 +#define SSB_SPROM1_GPIOB 0x0066 /* General Purpuse IO pins 2 and 3 */ +#define SSB_SPROM1_GPIOB_P2 0x00FF /* Pin 2 */ +#define SSB_SPROM1_GPIOB_P3 0xFF00 /* Pin 3 */ +#define SSB_SPROM1_GPIOB_P3_SHIFT 8 +#define SSB_SPROM1_MAXPWR 0x0068 /* Power Amplifier Max Power */ +#define SSB_SPROM1_MAXPWR_BG 0x00FF /* B-PHY and G-PHY (in dBm Q5.2) */ +#define SSB_SPROM1_MAXPWR_A 0xFF00 /* A-PHY (in dBm Q5.2) */ +#define SSB_SPROM1_MAXPWR_A_SHIFT 8 +#define SSB_SPROM1_PA1B0 0x006A +#define SSB_SPROM1_PA1B1 0x006C +#define SSB_SPROM1_PA1B2 0x006E +#define SSB_SPROM1_ITSSI 0x0070 /* Idle TSSI Target */ +#define SSB_SPROM1_ITSSI_BG 0x00FF /* B-PHY and G-PHY*/ +#define SSB_SPROM1_ITSSI_A 0xFF00 /* A-PHY */ +#define SSB_SPROM1_ITSSI_A_SHIFT 8 +#define SSB_SPROM1_BFLLO 0x0072 /* Boardflags (low 16 bits) */ +#define SSB_SPROM1_AGAIN 0x0074 /* Antenna Gain (in dBm Q5.2) */ +#define SSB_SPROM1_AGAIN_BG 0x00FF /* B-PHY and G-PHY */ +#define SSB_SPROM1_AGAIN_BG_SHIFT 0 +#define SSB_SPROM1_AGAIN_A 0xFF00 /* A-PHY */ +#define SSB_SPROM1_AGAIN_A_SHIFT 8 +#define SSB_SPROM1_CCODE 0x0076 + +/* SPROM Revision 2 (inherits from rev 1) */ +#define SSB_SPROM2_BFLHI 0x0038 /* Boardflags (high 16 bits) */ +#define SSB_SPROM2_MAXP_A 0x003A /* A-PHY Max Power */ +#define SSB_SPROM2_MAXP_A_HI 0x00FF /* Max Power High */ +#define SSB_SPROM2_MAXP_A_LO 0xFF00 /* Max Power Low */ +#define SSB_SPROM2_MAXP_A_LO_SHIFT 8 +#define SSB_SPROM2_PA1LOB0 0x003C /* A-PHY PowerAmplifier Low Settings */ +#define SSB_SPROM2_PA1LOB1 0x003E /* A-PHY PowerAmplifier Low Settings */ +#define SSB_SPROM2_PA1LOB2 0x0040 /* A-PHY PowerAmplifier Low Settings */ +#define SSB_SPROM2_PA1HIB0 0x0042 /* A-PHY PowerAmplifier High Settings */ +#define SSB_SPROM2_PA1HIB1 0x0044 /* A-PHY PowerAmplifier High Settings */ +#define SSB_SPROM2_PA1HIB2 0x0046 /* A-PHY PowerAmplifier High Settings */ +#define SSB_SPROM2_OPO 0x0078 /* OFDM Power Offset from CCK Level */ +#define SSB_SPROM2_OPO_VALUE 0x00FF +#define SSB_SPROM2_OPO_UNUSED 0xFF00 +#define SSB_SPROM2_CCODE 0x007C /* Two char Country Code */ + +/* SPROM Revision 3 (inherits most data from rev 2) */ +#define SSB_SPROM3_OFDMAPO 0x002C /* A-PHY OFDM Mid Power Offset (4 bytes, BigEndian) */ +#define SSB_SPROM3_OFDMALPO 0x0030 /* A-PHY OFDM Low Power Offset (4 bytes, BigEndian) */ +#define SSB_SPROM3_OFDMAHPO 0x0034 /* A-PHY OFDM High Power Offset (4 bytes, BigEndian) */ +#define SSB_SPROM3_GPIOLDC 0x0042 /* GPIO LED Powersave Duty Cycle (4 bytes, BigEndian) */ +#define SSB_SPROM3_GPIOLDC_OFF 0x0000FF00 /* Off Count */ +#define SSB_SPROM3_GPIOLDC_OFF_SHIFT 8 +#define SSB_SPROM3_GPIOLDC_ON 0x00FF0000 /* On Count */ +#define SSB_SPROM3_GPIOLDC_ON_SHIFT 16 +#define SSB_SPROM3_IL0MAC 0x004A /* 6 bytes MAC address for 802.11b/g */ +#define SSB_SPROM3_CCKPO 0x0078 /* CCK Power Offset */ +#define SSB_SPROM3_CCKPO_1M 0x000F /* 1M Rate PO */ +#define SSB_SPROM3_CCKPO_2M 0x00F0 /* 2M Rate PO */ +#define SSB_SPROM3_CCKPO_2M_SHIFT 4 +#define SSB_SPROM3_CCKPO_55M 0x0F00 /* 5.5M Rate PO */ +#define SSB_SPROM3_CCKPO_55M_SHIFT 8 +#define SSB_SPROM3_CCKPO_11M 0xF000 /* 11M Rate PO */ +#define SSB_SPROM3_CCKPO_11M_SHIFT 12 +#define SSB_SPROM3_OFDMGPO 0x107A /* G-PHY OFDM Power Offset (4 bytes, BigEndian) */ + +/* SPROM Revision 4 */ +#define SSB_SPROM4_BOARDREV 0x0042 /* Board revision */ +#define SSB_SPROM4_BFLLO 0x0044 /* Boardflags (low 16 bits) */ +#define SSB_SPROM4_BFLHI 0x0046 /* Board Flags Hi */ +#define SSB_SPROM4_BFL2LO 0x0048 /* Board flags 2 (low 16 bits) */ +#define SSB_SPROM4_BFL2HI 0x004A /* Board flags 2 Hi */ +#define SSB_SPROM4_IL0MAC 0x004C /* 6 byte MAC address for a/b/g/n */ +#define SSB_SPROM4_CCODE 0x0052 /* Country Code (2 bytes) */ +#define SSB_SPROM4_GPIOA 0x0056 /* Gen. Purpose IO # 0 and 1 */ +#define SSB_SPROM4_GPIOA_P0 0x00FF /* Pin 0 */ +#define SSB_SPROM4_GPIOA_P1 0xFF00 /* Pin 1 */ +#define SSB_SPROM4_GPIOA_P1_SHIFT 8 +#define SSB_SPROM4_GPIOB 0x0058 /* Gen. Purpose IO # 2 and 3 */ +#define SSB_SPROM4_GPIOB_P2 0x00FF /* Pin 2 */ +#define SSB_SPROM4_GPIOB_P3 0xFF00 /* Pin 3 */ +#define SSB_SPROM4_GPIOB_P3_SHIFT 8 +#define SSB_SPROM4_ETHPHY 0x005A /* Ethernet PHY settings ?? */ +#define SSB_SPROM4_ETHPHY_ET0A 0x001F /* MII Address for enet0 */ +#define SSB_SPROM4_ETHPHY_ET1A 0x03E0 /* MII Address for enet1 */ +#define SSB_SPROM4_ETHPHY_ET1A_SHIFT 5 +#define SSB_SPROM4_ETHPHY_ET0M (1<<14) /* MDIO for enet0 */ +#define SSB_SPROM4_ETHPHY_ET1M (1<<15) /* MDIO for enet1 */ +#define SSB_SPROM4_ANTAVAIL 0x005C /* Antenna available bitfields */ +#define SSB_SPROM4_ANTAVAIL_BG 0x00FF /* B-PHY and G-PHY bitfield */ +#define SSB_SPROM4_ANTAVAIL_BG_SHIFT 0 +#define SSB_SPROM4_ANTAVAIL_A 0xFF00 /* A-PHY bitfield */ +#define SSB_SPROM4_ANTAVAIL_A_SHIFT 8 +#define SSB_SPROM4_AGAIN01 0x005E /* Antenna Gain (in dBm Q5.2) */ +#define SSB_SPROM4_AGAIN0 0x00FF /* Antenna 0 */ +#define SSB_SPROM4_AGAIN0_SHIFT 0 +#define SSB_SPROM4_AGAIN1 0xFF00 /* Antenna 1 */ +#define SSB_SPROM4_AGAIN1_SHIFT 8 +#define SSB_SPROM4_AGAIN23 0x0060 +#define SSB_SPROM4_AGAIN2 0x00FF /* Antenna 2 */ +#define SSB_SPROM4_AGAIN2_SHIFT 0 +#define SSB_SPROM4_AGAIN3 0xFF00 /* Antenna 3 */ +#define SSB_SPROM4_AGAIN3_SHIFT 8 +#define SSB_SPROM4_TXPID2G01 0x0062 /* TX Power Index 2GHz */ +#define SSB_SPROM4_TXPID2G0 0x00FF +#define SSB_SPROM4_TXPID2G0_SHIFT 0 +#define SSB_SPROM4_TXPID2G1 0xFF00 +#define SSB_SPROM4_TXPID2G1_SHIFT 8 +#define SSB_SPROM4_TXPID2G23 0x0064 /* TX Power Index 2GHz */ +#define SSB_SPROM4_TXPID2G2 0x00FF +#define SSB_SPROM4_TXPID2G2_SHIFT 0 +#define SSB_SPROM4_TXPID2G3 0xFF00 +#define SSB_SPROM4_TXPID2G3_SHIFT 8 +#define SSB_SPROM4_TXPID5G01 0x0066 /* TX Power Index 5GHz middle subband */ +#define SSB_SPROM4_TXPID5G0 0x00FF +#define SSB_SPROM4_TXPID5G0_SHIFT 0 +#define SSB_SPROM4_TXPID5G1 0xFF00 +#define SSB_SPROM4_TXPID5G1_SHIFT 8 +#define SSB_SPROM4_TXPID5G23 0x0068 /* TX Power Index 5GHz middle subband */ +#define SSB_SPROM4_TXPID5G2 0x00FF +#define SSB_SPROM4_TXPID5G2_SHIFT 0 +#define SSB_SPROM4_TXPID5G3 0xFF00 +#define SSB_SPROM4_TXPID5G3_SHIFT 8 +#define SSB_SPROM4_TXPID5GL01 0x006A /* TX Power Index 5GHz low subband */ +#define SSB_SPROM4_TXPID5GL0 0x00FF +#define SSB_SPROM4_TXPID5GL0_SHIFT 0 +#define SSB_SPROM4_TXPID5GL1 0xFF00 +#define SSB_SPROM4_TXPID5GL1_SHIFT 8 +#define SSB_SPROM4_TXPID5GL23 0x006C /* TX Power Index 5GHz low subband */ +#define SSB_SPROM4_TXPID5GL2 0x00FF +#define SSB_SPROM4_TXPID5GL2_SHIFT 0 +#define SSB_SPROM4_TXPID5GL3 0xFF00 +#define SSB_SPROM4_TXPID5GL3_SHIFT 8 +#define SSB_SPROM4_TXPID5GH01 0x006E /* TX Power Index 5GHz high subband */ +#define SSB_SPROM4_TXPID5GH0 0x00FF +#define SSB_SPROM4_TXPID5GH0_SHIFT 0 +#define SSB_SPROM4_TXPID5GH1 0xFF00 +#define SSB_SPROM4_TXPID5GH1_SHIFT 8 +#define SSB_SPROM4_TXPID5GH23 0x0070 /* TX Power Index 5GHz high subband */ +#define SSB_SPROM4_TXPID5GH2 0x00FF +#define SSB_SPROM4_TXPID5GH2_SHIFT 0 +#define SSB_SPROM4_TXPID5GH3 0xFF00 +#define SSB_SPROM4_TXPID5GH3_SHIFT 8 + +/* There are 4 blocks with power info sharing the same layout */ +#define SSB_SPROM4_PWR_INFO_CORE0 0x0080 +#define SSB_SPROM4_PWR_INFO_CORE1 0x00AE +#define SSB_SPROM4_PWR_INFO_CORE2 0x00DC +#define SSB_SPROM4_PWR_INFO_CORE3 0x010A + +#define SSB_SPROM4_2G_MAXP_ITSSI 0x00 /* 2 GHz ITSSI and 2 GHz Max Power */ +#define SSB_SPROM4_2G_MAXP 0x00FF +#define SSB_SPROM4_2G_ITSSI 0xFF00 +#define SSB_SPROM4_2G_ITSSI_SHIFT 8 +#define SSB_SPROM4_2G_PA_0 0x02 /* 2 GHz power amp */ +#define SSB_SPROM4_2G_PA_1 0x04 +#define SSB_SPROM4_2G_PA_2 0x06 +#define SSB_SPROM4_2G_PA_3 0x08 +#define SSB_SPROM4_5G_MAXP_ITSSI 0x0A /* 5 GHz ITSSI and 5.3 GHz Max Power */ +#define SSB_SPROM4_5G_MAXP 0x00FF +#define SSB_SPROM4_5G_ITSSI 0xFF00 +#define SSB_SPROM4_5G_ITSSI_SHIFT 8 +#define SSB_SPROM4_5GHL_MAXP 0x0C /* 5.2 GHz and 5.8 GHz Max Power */ +#define SSB_SPROM4_5GH_MAXP 0x00FF +#define SSB_SPROM4_5GL_MAXP 0xFF00 +#define SSB_SPROM4_5GL_MAXP_SHIFT 8 +#define SSB_SPROM4_5G_PA_0 0x0E /* 5.3 GHz power amp */ +#define SSB_SPROM4_5G_PA_1 0x10 +#define SSB_SPROM4_5G_PA_2 0x12 +#define SSB_SPROM4_5G_PA_3 0x14 +#define SSB_SPROM4_5GL_PA_0 0x16 /* 5.2 GHz power amp */ +#define SSB_SPROM4_5GL_PA_1 0x18 +#define SSB_SPROM4_5GL_PA_2 0x1A +#define SSB_SPROM4_5GL_PA_3 0x1C +#define SSB_SPROM4_5GH_PA_0 0x1E /* 5.8 GHz power amp */ +#define SSB_SPROM4_5GH_PA_1 0x20 +#define SSB_SPROM4_5GH_PA_2 0x22 +#define SSB_SPROM4_5GH_PA_3 0x24 + +/* TODO: Make it deprecated */ +#define SSB_SPROM4_MAXP_BG 0x0080 /* Max Power BG in path 1 */ +#define SSB_SPROM4_MAXP_BG_MASK 0x00FF /* Mask for Max Power BG */ +#define SSB_SPROM4_ITSSI_BG 0xFF00 /* Mask for path 1 itssi_bg */ +#define SSB_SPROM4_ITSSI_BG_SHIFT 8 +#define SSB_SPROM4_MAXP_A 0x008A /* Max Power A in path 1 */ +#define SSB_SPROM4_MAXP_A_MASK 0x00FF /* Mask for Max Power A */ +#define SSB_SPROM4_ITSSI_A 0xFF00 /* Mask for path 1 itssi_a */ +#define SSB_SPROM4_ITSSI_A_SHIFT 8 +#define SSB_SPROM4_PA0B0 0x0082 /* The paXbY locations are */ +#define SSB_SPROM4_PA0B1 0x0084 /* only guesses */ +#define SSB_SPROM4_PA0B2 0x0086 +#define SSB_SPROM4_PA1B0 0x008E +#define SSB_SPROM4_PA1B1 0x0090 +#define SSB_SPROM4_PA1B2 0x0092 + +/* SPROM Revision 5 (inherits most data from rev 4) */ +#define SSB_SPROM5_CCODE 0x0044 /* Country Code (2 bytes) */ +#define SSB_SPROM5_BFLLO 0x004A /* Boardflags (low 16 bits) */ +#define SSB_SPROM5_BFLHI 0x004C /* Board Flags Hi */ +#define SSB_SPROM5_BFL2LO 0x004E /* Board flags 2 (low 16 bits) */ +#define SSB_SPROM5_BFL2HI 0x0050 /* Board flags 2 Hi */ +#define SSB_SPROM5_IL0MAC 0x0052 /* 6 byte MAC address for a/b/g/n */ +#define SSB_SPROM5_GPIOA 0x0076 /* Gen. Purpose IO # 0 and 1 */ +#define SSB_SPROM5_GPIOA_P0 0x00FF /* Pin 0 */ +#define SSB_SPROM5_GPIOA_P1 0xFF00 /* Pin 1 */ +#define SSB_SPROM5_GPIOA_P1_SHIFT 8 +#define SSB_SPROM5_GPIOB 0x0078 /* Gen. Purpose IO # 2 and 3 */ +#define SSB_SPROM5_GPIOB_P2 0x00FF /* Pin 2 */ +#define SSB_SPROM5_GPIOB_P3 0xFF00 /* Pin 3 */ +#define SSB_SPROM5_GPIOB_P3_SHIFT 8 + +/* SPROM Revision 8 */ +#define SSB_SPROM8_BOARDREV 0x0082 /* Board revision */ +#define SSB_SPROM8_BFLLO 0x0084 /* Board flags (bits 0-15) */ +#define SSB_SPROM8_BFLHI 0x0086 /* Board flags (bits 16-31) */ +#define SSB_SPROM8_BFL2LO 0x0088 /* Board flags (bits 32-47) */ +#define SSB_SPROM8_BFL2HI 0x008A /* Board flags (bits 48-63) */ +#define SSB_SPROM8_IL0MAC 0x008C /* 6 byte MAC address */ +#define SSB_SPROM8_CCODE 0x0092 /* 2 byte country code */ +#define SSB_SPROM8_GPIOA 0x0096 /*Gen. Purpose IO # 0 and 1 */ +#define SSB_SPROM8_GPIOA_P0 0x00FF /* Pin 0 */ +#define SSB_SPROM8_GPIOA_P1 0xFF00 /* Pin 1 */ +#define SSB_SPROM8_GPIOA_P1_SHIFT 8 +#define SSB_SPROM8_GPIOB 0x0098 /* Gen. Purpose IO # 2 and 3 */ +#define SSB_SPROM8_GPIOB_P2 0x00FF /* Pin 2 */ +#define SSB_SPROM8_GPIOB_P3 0xFF00 /* Pin 3 */ +#define SSB_SPROM8_GPIOB_P3_SHIFT 8 +#define SSB_SPROM8_LEDDC 0x009A +#define SSB_SPROM8_LEDDC_ON 0xFF00 /* oncount */ +#define SSB_SPROM8_LEDDC_ON_SHIFT 8 +#define SSB_SPROM8_LEDDC_OFF 0x00FF /* offcount */ +#define SSB_SPROM8_LEDDC_OFF_SHIFT 0 +#define SSB_SPROM8_ANTAVAIL 0x009C /* Antenna available bitfields*/ +#define SSB_SPROM8_ANTAVAIL_A 0xFF00 /* A-PHY bitfield */ +#define SSB_SPROM8_ANTAVAIL_A_SHIFT 8 +#define SSB_SPROM8_ANTAVAIL_BG 0x00FF /* B-PHY and G-PHY bitfield */ +#define SSB_SPROM8_ANTAVAIL_BG_SHIFT 0 +#define SSB_SPROM8_AGAIN01 0x009E /* Antenna Gain (in dBm Q5.2) */ +#define SSB_SPROM8_AGAIN0 0x00FF /* Antenna 0 */ +#define SSB_SPROM8_AGAIN0_SHIFT 0 +#define SSB_SPROM8_AGAIN1 0xFF00 /* Antenna 1 */ +#define SSB_SPROM8_AGAIN1_SHIFT 8 +#define SSB_SPROM8_AGAIN23 0x00A0 +#define SSB_SPROM8_AGAIN2 0x00FF /* Antenna 2 */ +#define SSB_SPROM8_AGAIN2_SHIFT 0 +#define SSB_SPROM8_AGAIN3 0xFF00 /* Antenna 3 */ +#define SSB_SPROM8_AGAIN3_SHIFT 8 +#define SSB_SPROM8_TXRXC 0x00A2 +#define SSB_SPROM8_TXRXC_TXCHAIN 0x000f +#define SSB_SPROM8_TXRXC_TXCHAIN_SHIFT 0 +#define SSB_SPROM8_TXRXC_RXCHAIN 0x00f0 +#define SSB_SPROM8_TXRXC_RXCHAIN_SHIFT 4 +#define SSB_SPROM8_TXRXC_SWITCH 0xff00 +#define SSB_SPROM8_TXRXC_SWITCH_SHIFT 8 +#define SSB_SPROM8_RSSIPARM2G 0x00A4 /* RSSI params for 2GHz */ +#define SSB_SPROM8_RSSISMF2G 0x000F +#define SSB_SPROM8_RSSISMC2G 0x00F0 +#define SSB_SPROM8_RSSISMC2G_SHIFT 4 +#define SSB_SPROM8_RSSISAV2G 0x0700 +#define SSB_SPROM8_RSSISAV2G_SHIFT 8 +#define SSB_SPROM8_BXA2G 0x1800 +#define SSB_SPROM8_BXA2G_SHIFT 11 +#define SSB_SPROM8_RSSIPARM5G 0x00A6 /* RSSI params for 5GHz */ +#define SSB_SPROM8_RSSISMF5G 0x000F +#define SSB_SPROM8_RSSISMC5G 0x00F0 +#define SSB_SPROM8_RSSISMC5G_SHIFT 4 +#define SSB_SPROM8_RSSISAV5G 0x0700 +#define SSB_SPROM8_RSSISAV5G_SHIFT 8 +#define SSB_SPROM8_BXA5G 0x1800 +#define SSB_SPROM8_BXA5G_SHIFT 11 +#define SSB_SPROM8_TRI25G 0x00A8 /* TX isolation 2.4&5.3GHz */ +#define SSB_SPROM8_TRI2G 0x00FF /* TX isolation 2.4GHz */ +#define SSB_SPROM8_TRI5G 0xFF00 /* TX isolation 5.3GHz */ +#define SSB_SPROM8_TRI5G_SHIFT 8 +#define SSB_SPROM8_TRI5GHL 0x00AA /* TX isolation 5.2/5.8GHz */ +#define SSB_SPROM8_TRI5GL 0x00FF /* TX isolation 5.2GHz */ +#define SSB_SPROM8_TRI5GH 0xFF00 /* TX isolation 5.8GHz */ +#define SSB_SPROM8_TRI5GH_SHIFT 8 +#define SSB_SPROM8_RXPO 0x00AC /* RX power offsets */ +#define SSB_SPROM8_RXPO2G 0x00FF /* 2GHz RX power offset */ +#define SSB_SPROM8_RXPO2G_SHIFT 0 +#define SSB_SPROM8_RXPO5G 0xFF00 /* 5GHz RX power offset */ +#define SSB_SPROM8_RXPO5G_SHIFT 8 +#define SSB_SPROM8_FEM2G 0x00AE +#define SSB_SPROM8_FEM5G 0x00B0 +#define SSB_SROM8_FEM_TSSIPOS 0x0001 +#define SSB_SROM8_FEM_TSSIPOS_SHIFT 0 +#define SSB_SROM8_FEM_EXTPA_GAIN 0x0006 +#define SSB_SROM8_FEM_EXTPA_GAIN_SHIFT 1 +#define SSB_SROM8_FEM_PDET_RANGE 0x00F8 +#define SSB_SROM8_FEM_PDET_RANGE_SHIFT 3 +#define SSB_SROM8_FEM_TR_ISO 0x0700 +#define SSB_SROM8_FEM_TR_ISO_SHIFT 8 +#define SSB_SROM8_FEM_ANTSWLUT 0xF800 +#define SSB_SROM8_FEM_ANTSWLUT_SHIFT 11 +#define SSB_SPROM8_THERMAL 0x00B2 +#define SSB_SPROM8_THERMAL_OFFSET 0x00ff +#define SSB_SPROM8_THERMAL_OFFSET_SHIFT 0 +#define SSB_SPROM8_THERMAL_TRESH 0xff00 +#define SSB_SPROM8_THERMAL_TRESH_SHIFT 8 +/* Temp sense related entries */ +#define SSB_SPROM8_RAWTS 0x00B4 +#define SSB_SPROM8_RAWTS_RAWTEMP 0x01ff +#define SSB_SPROM8_RAWTS_RAWTEMP_SHIFT 0 +#define SSB_SPROM8_RAWTS_MEASPOWER 0xfe00 +#define SSB_SPROM8_RAWTS_MEASPOWER_SHIFT 9 +#define SSB_SPROM8_OPT_CORRX 0x00B6 +#define SSB_SPROM8_OPT_CORRX_TEMP_SLOPE 0x00ff +#define SSB_SPROM8_OPT_CORRX_TEMP_SLOPE_SHIFT 0 +#define SSB_SPROM8_OPT_CORRX_TEMPCORRX 0xfc00 +#define SSB_SPROM8_OPT_CORRX_TEMPCORRX_SHIFT 10 +#define SSB_SPROM8_OPT_CORRX_TEMP_OPTION 0x0300 +#define SSB_SPROM8_OPT_CORRX_TEMP_OPTION_SHIFT 8 +/* FOC: freiquency offset correction, HWIQ: H/W IOCAL enable, IQSWP: IQ CAL swap disable */ +#define SSB_SPROM8_HWIQ_IQSWP 0x00B8 +#define SSB_SPROM8_HWIQ_IQSWP_FREQ_CORR 0x000f +#define SSB_SPROM8_HWIQ_IQSWP_FREQ_CORR_SHIFT 0 +#define SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP 0x0010 +#define SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP_SHIFT 4 +#define SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL 0x0020 +#define SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL_SHIFT 5 +#define SSB_SPROM8_TEMPDELTA 0x00BC +#define SSB_SPROM8_TEMPDELTA_PHYCAL 0x00ff +#define SSB_SPROM8_TEMPDELTA_PHYCAL_SHIFT 0 +#define SSB_SPROM8_TEMPDELTA_PERIOD 0x0f00 +#define SSB_SPROM8_TEMPDELTA_PERIOD_SHIFT 8 +#define SSB_SPROM8_TEMPDELTA_HYSTERESIS 0xf000 +#define SSB_SPROM8_TEMPDELTA_HYSTERESIS_SHIFT 12 + +/* There are 4 blocks with power info sharing the same layout */ +#define SSB_SROM8_PWR_INFO_CORE0 0x00C0 +#define SSB_SROM8_PWR_INFO_CORE1 0x00E0 +#define SSB_SROM8_PWR_INFO_CORE2 0x0100 +#define SSB_SROM8_PWR_INFO_CORE3 0x0120 + +#define SSB_SROM8_2G_MAXP_ITSSI 0x00 +#define SSB_SPROM8_2G_MAXP 0x00FF +#define SSB_SPROM8_2G_ITSSI 0xFF00 +#define SSB_SPROM8_2G_ITSSI_SHIFT 8 +#define SSB_SROM8_2G_PA_0 0x02 /* 2GHz power amp settings */ +#define SSB_SROM8_2G_PA_1 0x04 +#define SSB_SROM8_2G_PA_2 0x06 +#define SSB_SROM8_5G_MAXP_ITSSI 0x08 /* 5GHz ITSSI and 5.3GHz Max Power */ +#define SSB_SPROM8_5G_MAXP 0x00FF +#define SSB_SPROM8_5G_ITSSI 0xFF00 +#define SSB_SPROM8_5G_ITSSI_SHIFT 8 +#define SSB_SPROM8_5GHL_MAXP 0x0A /* 5.2GHz and 5.8GHz Max Power */ +#define SSB_SPROM8_5GH_MAXP 0x00FF +#define SSB_SPROM8_5GL_MAXP 0xFF00 +#define SSB_SPROM8_5GL_MAXP_SHIFT 8 +#define SSB_SROM8_5G_PA_0 0x0C /* 5.3GHz power amp settings */ +#define SSB_SROM8_5G_PA_1 0x0E +#define SSB_SROM8_5G_PA_2 0x10 +#define SSB_SROM8_5GL_PA_0 0x12 /* 5.2GHz power amp settings */ +#define SSB_SROM8_5GL_PA_1 0x14 +#define SSB_SROM8_5GL_PA_2 0x16 +#define SSB_SROM8_5GH_PA_0 0x18 /* 5.8GHz power amp settings */ +#define SSB_SROM8_5GH_PA_1 0x1A +#define SSB_SROM8_5GH_PA_2 0x1C + +/* TODO: Make it deprecated */ +#define SSB_SPROM8_MAXP_BG 0x00C0 /* Max Power 2GHz in path 1 */ +#define SSB_SPROM8_MAXP_BG_MASK 0x00FF /* Mask for Max Power 2GHz */ +#define SSB_SPROM8_ITSSI_BG 0xFF00 /* Mask for path 1 itssi_bg */ +#define SSB_SPROM8_ITSSI_BG_SHIFT 8 +#define SSB_SPROM8_PA0B0 0x00C2 /* 2GHz power amp settings */ +#define SSB_SPROM8_PA0B1 0x00C4 +#define SSB_SPROM8_PA0B2 0x00C6 +#define SSB_SPROM8_MAXP_A 0x00C8 /* Max Power 5.3GHz */ +#define SSB_SPROM8_MAXP_A_MASK 0x00FF /* Mask for Max Power 5.3GHz */ +#define SSB_SPROM8_ITSSI_A 0xFF00 /* Mask for path 1 itssi_a */ +#define SSB_SPROM8_ITSSI_A_SHIFT 8 +#define SSB_SPROM8_MAXP_AHL 0x00CA /* Max Power 5.2/5.8GHz */ +#define SSB_SPROM8_MAXP_AH_MASK 0x00FF /* Mask for Max Power 5.8GHz */ +#define SSB_SPROM8_MAXP_AL_MASK 0xFF00 /* Mask for Max Power 5.2GHz */ +#define SSB_SPROM8_MAXP_AL_SHIFT 8 +#define SSB_SPROM8_PA1B0 0x00CC /* 5.3GHz power amp settings */ +#define SSB_SPROM8_PA1B1 0x00CE +#define SSB_SPROM8_PA1B2 0x00D0 +#define SSB_SPROM8_PA1LOB0 0x00D2 /* 5.2GHz power amp settings */ +#define SSB_SPROM8_PA1LOB1 0x00D4 +#define SSB_SPROM8_PA1LOB2 0x00D6 +#define SSB_SPROM8_PA1HIB0 0x00D8 /* 5.8GHz power amp settings */ +#define SSB_SPROM8_PA1HIB1 0x00DA +#define SSB_SPROM8_PA1HIB2 0x00DC + +#define SSB_SPROM8_CCK2GPO 0x0140 /* CCK power offset */ +#define SSB_SPROM8_OFDM2GPO 0x0142 /* 2.4GHz OFDM power offset */ +#define SSB_SPROM8_OFDM5GPO 0x0146 /* 5.3GHz OFDM power offset */ +#define SSB_SPROM8_OFDM5GLPO 0x014A /* 5.2GHz OFDM power offset */ +#define SSB_SPROM8_OFDM5GHPO 0x014E /* 5.8GHz OFDM power offset */ + +#define SSB_SPROM8_2G_MCSPO 0x0152 +#define SSB_SPROM8_5G_MCSPO 0x0162 +#define SSB_SPROM8_5GL_MCSPO 0x0172 +#define SSB_SPROM8_5GH_MCSPO 0x0182 + +#define SSB_SPROM8_CDDPO 0x0192 +#define SSB_SPROM8_STBCPO 0x0194 +#define SSB_SPROM8_BW40PO 0x0196 +#define SSB_SPROM8_BWDUPPO 0x0198 + +/* Values for boardflags_lo read from SPROM */ +#define SSB_BFL_BTCOEXIST 0x0001 /* implements Bluetooth coexistance */ +#define SSB_BFL_PACTRL 0x0002 /* GPIO 9 controlling the PA */ +#define SSB_BFL_AIRLINEMODE 0x0004 /* implements GPIO 13 radio disable indication */ +#define SSB_BFL_RSSI 0x0008 /* software calculates nrssi slope. */ +#define SSB_BFL_ENETSPI 0x0010 /* has ephy roboswitch spi */ +#define SSB_BFL_XTAL_NOSLOW 0x0020 /* no slow clock available */ +#define SSB_BFL_CCKHIPWR 0x0040 /* can do high power CCK transmission */ +#define SSB_BFL_ENETADM 0x0080 /* has ADMtek switch */ +#define SSB_BFL_ENETVLAN 0x0100 /* can do vlan */ +#define SSB_BFL_AFTERBURNER 0x0200 /* supports Afterburner mode */ +#define SSB_BFL_NOPCI 0x0400 /* board leaves PCI floating */ +#define SSB_BFL_FEM 0x0800 /* supports the Front End Module */ +#define SSB_BFL_EXTLNA 0x1000 /* has an external LNA */ +#define SSB_BFL_HGPA 0x2000 /* had high gain PA */ +#define SSB_BFL_BTCMOD 0x4000 /* BFL_BTCOEXIST is given in alternate GPIOs */ +#define SSB_BFL_ALTIQ 0x8000 /* alternate I/Q settings */ + +/* Values for boardflags_hi read from SPROM */ +#define SSB_BFH_NOPA 0x0001 /* has no PA */ +#define SSB_BFH_RSSIINV 0x0002 /* RSSI uses positive slope (not TSSI) */ +#define SSB_BFH_PAREF 0x0004 /* uses the PARef LDO */ +#define SSB_BFH_3TSWITCH 0x0008 /* uses a triple throw switch shared with bluetooth */ +#define SSB_BFH_PHASESHIFT 0x0010 /* can support phase shifter */ +#define SSB_BFH_BUCKBOOST 0x0020 /* has buck/booster */ +#define SSB_BFH_FEM_BT 0x0040 /* has FEM and switch to share antenna with bluetooth */ + +/* Values for boardflags2_lo read from SPROM */ +#define SSB_BFL2_RXBB_INT_REG_DIS 0x0001 /* external RX BB regulator present */ +#define SSB_BFL2_APLL_WAR 0x0002 /* alternative A-band PLL settings implemented */ +#define SSB_BFL2_TXPWRCTRL_EN 0x0004 /* permits enabling TX Power Control */ +#define SSB_BFL2_2X4_DIV 0x0008 /* 2x4 diversity switch */ +#define SSB_BFL2_5G_PWRGAIN 0x0010 /* supports 5G band power gain */ +#define SSB_BFL2_PCIEWAR_OVR 0x0020 /* overrides ASPM and Clkreq settings */ +#define SSB_BFL2_CAESERS_BRD 0x0040 /* is Caesers board (unused) */ +#define SSB_BFL2_BTC3WIRE 0x0080 /* used 3-wire bluetooth coexist */ +#define SSB_BFL2_SKWRKFEM_BRD 0x0100 /* 4321mcm93 uses Skyworks FEM */ +#define SSB_BFL2_SPUR_WAR 0x0200 /* has a workaround for clock-harmonic spurs */ +#define SSB_BFL2_GPLL_WAR 0x0400 /* altenative G-band PLL settings implemented */ + +/* Values for SSB_SPROM1_BINF_CCODE */ +enum { + SSB_SPROM1CCODE_WORLD = 0, + SSB_SPROM1CCODE_THAILAND, + SSB_SPROM1CCODE_ISRAEL, + SSB_SPROM1CCODE_JORDAN, + SSB_SPROM1CCODE_CHINA, + SSB_SPROM1CCODE_JAPAN, + SSB_SPROM1CCODE_USA_CANADA_ANZ, + SSB_SPROM1CCODE_EUROPE, + SSB_SPROM1CCODE_USA_LOW, + SSB_SPROM1CCODE_JAPAN_HIGH, + SSB_SPROM1CCODE_ALL, + SSB_SPROM1CCODE_NONE, +}; + +/* Address-Match values and masks (SSB_ADMATCHxxx) */ +#define SSB_ADM_TYPE 0x00000003 /* Address type */ +#define SSB_ADM_TYPE0 0 +#define SSB_ADM_TYPE1 1 +#define SSB_ADM_TYPE2 2 +#define SSB_ADM_AD64 0x00000004 +#define SSB_ADM_SZ0 0x000000F8 /* Type0 size */ +#define SSB_ADM_SZ0_SHIFT 3 +#define SSB_ADM_SZ1 0x000001F8 /* Type1 size */ +#define SSB_ADM_SZ1_SHIFT 3 +#define SSB_ADM_SZ2 0x000001F8 /* Type2 size */ +#define SSB_ADM_SZ2_SHIFT 3 +#define SSB_ADM_EN 0x00000400 /* Enable */ +#define SSB_ADM_NEG 0x00000800 /* Negative decode */ +#define SSB_ADM_BASE0 0xFFFFFF00 /* Type0 base address */ +#define SSB_ADM_BASE0_SHIFT 8 +#define SSB_ADM_BASE1 0xFFFFF000 /* Type1 base address for the core */ +#define SSB_ADM_BASE1_SHIFT 12 +#define SSB_ADM_BASE2 0xFFFF0000 /* Type2 base address for the core */ +#define SSB_ADM_BASE2_SHIFT 16 + + +#endif /* LINUX_SSB_REGS_H_ */ diff --git a/include/linux/ssbi.h b/include/linux/ssbi.h new file mode 100644 index 000000000..087b08a4d --- /dev/null +++ b/include/linux/ssbi.h @@ -0,0 +1,43 @@ +/* Copyright (C) 2010 Google, Inc. + * Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * Author: Dima Zavin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _LINUX_SSBI_H +#define _LINUX_SSBI_H + +#include + +int ssbi_write(struct device *dev, u16 addr, const u8 *buf, int len); +int ssbi_read(struct device *dev, u16 addr, u8 *buf, int len); + +static inline int +ssbi_reg_read(void *context, unsigned int reg, unsigned int *val) +{ + int ret; + u8 v; + + ret = ssbi_read(context, reg, &v, 1); + if (!ret) + *val = v; + + return ret; +} + +static inline int +ssbi_reg_write(void *context, unsigned int reg, unsigned int val) +{ + u8 v = val; + return ssbi_write(context, reg, &v, 1); +} + +#endif diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h new file mode 100644 index 000000000..7978b3e2c --- /dev/null +++ b/include/linux/stackdepot.h @@ -0,0 +1,32 @@ +/* + * A generic stack depot implementation + * + * Author: Alexander Potapenko + * Copyright (C) 2016 Google, Inc. + * + * Based on code by Dmitry Chernenkov. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_STACKDEPOT_H +#define _LINUX_STACKDEPOT_H + +typedef u32 depot_stack_handle_t; + +struct stack_trace; + +depot_stack_handle_t depot_save_stack(struct stack_trace *trace, gfp_t flags); + +void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace); + +#endif diff --git a/include/linux/stackprotector.h b/include/linux/stackprotector.h new file mode 100644 index 000000000..6b792d080 --- /dev/null +++ b/include/linux/stackprotector.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_STACKPROTECTOR_H +#define _LINUX_STACKPROTECTOR_H 1 + +#include +#include +#include + +#ifdef CONFIG_STACKPROTECTOR +# include +#else +static inline void boot_init_stack_canary(void) +{ +} +#endif + +#endif diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h new file mode 100644 index 000000000..ba29a0613 --- /dev/null +++ b/include/linux/stacktrace.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_STACKTRACE_H +#define __LINUX_STACKTRACE_H + +#include + +struct task_struct; +struct pt_regs; + +#ifdef CONFIG_STACKTRACE +struct stack_trace { + unsigned int nr_entries, max_entries; + unsigned long *entries; + int skip; /* input argument: How many entries to skip */ +}; + +extern void save_stack_trace(struct stack_trace *trace); +extern void save_stack_trace_regs(struct pt_regs *regs, + struct stack_trace *trace); +extern void save_stack_trace_tsk(struct task_struct *tsk, + struct stack_trace *trace); +extern int save_stack_trace_tsk_reliable(struct task_struct *tsk, + struct stack_trace *trace); + +extern void print_stack_trace(struct stack_trace *trace, int spaces); +extern int snprint_stack_trace(char *buf, size_t size, + struct stack_trace *trace, int spaces); + +#ifdef CONFIG_USER_STACKTRACE_SUPPORT +extern void save_stack_trace_user(struct stack_trace *trace); +#else +# define save_stack_trace_user(trace) do { } while (0) +#endif + +#else /* !CONFIG_STACKTRACE */ +# define save_stack_trace(trace) do { } while (0) +# define save_stack_trace_tsk(tsk, trace) do { } while (0) +# define save_stack_trace_user(trace) do { } while (0) +# define print_stack_trace(trace, spaces) do { } while (0) +# define snprint_stack_trace(buf, size, trace, spaces) do { } while (0) +# define save_stack_trace_tsk_reliable(tsk, trace) ({ -ENOSYS; }) +#endif /* CONFIG_STACKTRACE */ + +#endif /* __LINUX_STACKTRACE_H */ diff --git a/include/linux/start_kernel.h b/include/linux/start_kernel.h new file mode 100644 index 000000000..4b268d86a --- /dev/null +++ b/include/linux/start_kernel.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_START_KERNEL_H +#define _LINUX_START_KERNEL_H + +#include +#include + +/* Define the prototype for start_kernel here, rather than cluttering + up something else. */ + +extern asmlinkage void __init start_kernel(void); + +#endif /* _LINUX_START_KERNEL_H */ diff --git a/include/linux/stat.h b/include/linux/stat.h new file mode 100644 index 000000000..765573dc1 --- /dev/null +++ b/include/linux/stat.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_STAT_H +#define _LINUX_STAT_H + + +#include +#include + +#define S_IRWXUGO (S_IRWXU|S_IRWXG|S_IRWXO) +#define S_IALLUGO (S_ISUID|S_ISGID|S_ISVTX|S_IRWXUGO) +#define S_IRUGO (S_IRUSR|S_IRGRP|S_IROTH) +#define S_IWUGO (S_IWUSR|S_IWGRP|S_IWOTH) +#define S_IXUGO (S_IXUSR|S_IXGRP|S_IXOTH) + +#define UTIME_NOW ((1l << 30) - 1l) +#define UTIME_OMIT ((1l << 30) - 2l) + +#include +#include +#include + +#define KSTAT_QUERY_FLAGS (AT_STATX_SYNC_TYPE) + +struct kstat { + u32 result_mask; /* What fields the user got */ + umode_t mode; + unsigned int nlink; + uint32_t blksize; /* Preferred I/O size */ + u64 attributes; + u64 attributes_mask; +#define KSTAT_ATTR_FS_IOC_FLAGS \ + (STATX_ATTR_COMPRESSED | \ + STATX_ATTR_IMMUTABLE | \ + STATX_ATTR_APPEND | \ + STATX_ATTR_NODUMP | \ + STATX_ATTR_ENCRYPTED \ + )/* Attrs corresponding to FS_*_FL flags */ + u64 ino; + dev_t dev; + dev_t rdev; + kuid_t uid; + kgid_t gid; + loff_t size; + struct timespec64 atime; + struct timespec64 mtime; + struct timespec64 ctime; + struct timespec64 btime; /* File creation time */ + u64 blocks; +}; + +#endif diff --git a/include/linux/statfs.h b/include/linux/statfs.h new file mode 100644 index 000000000..3142e9854 --- /dev/null +++ b/include/linux/statfs.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_STATFS_H +#define _LINUX_STATFS_H + +#include +#include + +struct kstatfs { + long f_type; + long f_bsize; + u64 f_blocks; + u64 f_bfree; + u64 f_bavail; + u64 f_files; + u64 f_ffree; + __kernel_fsid_t f_fsid; + long f_namelen; + long f_frsize; + long f_flags; + long f_spare[4]; +}; + +/* + * Definitions for the flag in f_flag. + * + * Generally these flags are equivalent to the MS_ flags used in the mount + * ABI. The exception is ST_VALID which has the same value as MS_REMOUNT + * which doesn't make any sense for statfs. + */ +#define ST_RDONLY 0x0001 /* mount read-only */ +#define ST_NOSUID 0x0002 /* ignore suid and sgid bits */ +#define ST_NODEV 0x0004 /* disallow access to device special files */ +#define ST_NOEXEC 0x0008 /* disallow program execution */ +#define ST_SYNCHRONOUS 0x0010 /* writes are synced at once */ +#define ST_VALID 0x0020 /* f_flags support is implemented */ +#define ST_MANDLOCK 0x0040 /* allow mandatory locks on an FS */ +/* 0x0080 used for ST_WRITE in glibc */ +/* 0x0100 used for ST_APPEND in glibc */ +/* 0x0200 used for ST_IMMUTABLE in glibc */ +#define ST_NOATIME 0x0400 /* do not update access times */ +#define ST_NODIRATIME 0x0800 /* do not update directory access times */ +#define ST_RELATIME 0x1000 /* update atime relative to mtime/ctime */ + +#endif diff --git a/include/linux/static_key.h b/include/linux/static_key.h new file mode 100644 index 000000000..27bd3f8a0 --- /dev/null +++ b/include/linux/static_key.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/stddef.h b/include/linux/stddef.h new file mode 100644 index 000000000..998a4ba28 --- /dev/null +++ b/include/linux/stddef.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_STDDEF_H +#define _LINUX_STDDEF_H + +#include + +#undef NULL +#define NULL ((void *)0) + +enum { + false = 0, + true = 1 +}; + +#undef offsetof +#ifdef __compiler_offsetof +#define offsetof(TYPE, MEMBER) __compiler_offsetof(TYPE, MEMBER) +#else +#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER) +#endif + +/** + * sizeof_field(TYPE, MEMBER) + * + * @TYPE: The structure containing the field of interest + * @MEMBER: The field to return the size of + */ +#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER)) + +/** + * offsetofend(TYPE, MEMBER) + * + * @TYPE: The type of the structure + * @MEMBER: The member within the structure to get the end offset of + */ +#define offsetofend(TYPE, MEMBER) \ + (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER)) + +#endif diff --git a/include/linux/stm.h b/include/linux/stm.h new file mode 100644 index 000000000..c6f577ab6 --- /dev/null +++ b/include/linux/stm.h @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Trace Module (STM) infrastructure apis + * Copyright (C) 2014 Intel Corporation. + */ + +#ifndef _STM_H_ +#define _STM_H_ + +#include + +/** + * enum stp_packet_type - STP packets that an STM driver sends + */ +enum stp_packet_type { + STP_PACKET_DATA = 0, + STP_PACKET_FLAG, + STP_PACKET_USER, + STP_PACKET_MERR, + STP_PACKET_GERR, + STP_PACKET_TRIG, + STP_PACKET_XSYNC, +}; + +/** + * enum stp_packet_flags - STP packet modifiers + */ +enum stp_packet_flags { + STP_PACKET_MARKED = 0x1, + STP_PACKET_TIMESTAMPED = 0x2, +}; + +struct stp_policy; + +struct stm_device; + +/** + * struct stm_data - STM device description and callbacks + * @name: device name + * @stm: internal structure, only used by stm class code + * @sw_start: first STP master available to software + * @sw_end: last STP master available to software + * @sw_nchannels: number of STP channels per master + * @sw_mmiosz: size of one channel's IO space, for mmap, optional + * @hw_override: masters in the STP stream will not match the ones + * assigned by software, but are up to the STM hardware + * @packet: callback that sends an STP packet + * @mmio_addr: mmap callback, optional + * @link: called when a new stm_source gets linked to us, optional + * @unlink: likewise for unlinking, again optional + * @set_options: set device-specific options on a channel + * + * Fill out this structure before calling stm_register_device() to create + * an STM device and stm_unregister_device() to destroy it. It will also be + * passed back to @packet(), @mmio_addr(), @link(), @unlink() and @set_options() + * callbacks. + * + * Normally, an STM device will have a range of masters available to software + * and the rest being statically assigned to various hardware trace sources. + * The former is defined by the the range [@sw_start..@sw_end] of the device + * description. That is, the lowest master that can be allocated to software + * writers is @sw_start and data from this writer will appear is @sw_start + * master in the STP stream. + * + * The @packet callback should adhere to the following rules: + * 1) it must return the number of bytes it consumed from the payload; + * 2) therefore, if it sent a packet that does not have payload (like FLAG), + * it must return zero; + * 3) if it does not support the requested packet type/flag combination, + * it must return -ENOTSUPP. + * + * The @unlink callback is called when there are no more active writers so + * that the master/channel can be quiesced. + */ +struct stm_data { + const char *name; + struct stm_device *stm; + unsigned int sw_start; + unsigned int sw_end; + unsigned int sw_nchannels; + unsigned int sw_mmiosz; + unsigned int hw_override; + ssize_t (*packet)(struct stm_data *, unsigned int, + unsigned int, unsigned int, + unsigned int, unsigned int, + const unsigned char *); + phys_addr_t (*mmio_addr)(struct stm_data *, unsigned int, + unsigned int, unsigned int); + int (*link)(struct stm_data *, unsigned int, + unsigned int); + void (*unlink)(struct stm_data *, unsigned int, + unsigned int); + long (*set_options)(struct stm_data *, unsigned int, + unsigned int, unsigned int, + unsigned long); +}; + +int stm_register_device(struct device *parent, struct stm_data *stm_data, + struct module *owner); +void stm_unregister_device(struct stm_data *stm_data); + +struct stm_source_device; + +/** + * struct stm_source_data - STM source device description and callbacks + * @name: device name, will be used for policy lookup + * @src: internal structure, only used by stm class code + * @nr_chans: number of channels to allocate + * @link: called when this source gets linked to an STM device + * @unlink: called when this source is about to get unlinked from its STM + * + * Fill in this structure before calling stm_source_register_device() to + * register a source device. Also pass it to unregister and write calls. + */ +struct stm_source_data { + const char *name; + struct stm_source_device *src; + unsigned int percpu; + unsigned int nr_chans; + int (*link)(struct stm_source_data *data); + void (*unlink)(struct stm_source_data *data); +}; + +int stm_source_register_device(struct device *parent, + struct stm_source_data *data); +void stm_source_unregister_device(struct stm_source_data *data); + +int notrace stm_source_write(struct stm_source_data *data, unsigned int chan, + const char *buf, size_t count); + +#endif /* _STM_H_ */ diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h new file mode 100644 index 000000000..4335bd771 --- /dev/null +++ b/include/linux/stmmac.h @@ -0,0 +1,197 @@ +/******************************************************************************* + + Header file for stmmac platform data + + Copyright (C) 2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#ifndef __STMMAC_PLATFORM_DATA +#define __STMMAC_PLATFORM_DATA + +#include + +#define MTL_MAX_RX_QUEUES 8 +#define MTL_MAX_TX_QUEUES 8 +#define STMMAC_CH_MAX 8 + +#define STMMAC_RX_COE_NONE 0 +#define STMMAC_RX_COE_TYPE1 1 +#define STMMAC_RX_COE_TYPE2 2 + +/* Define the macros for CSR clock range parameters to be passed by + * platform code. + * This could also be configured at run time using CPU freq framework. */ + +/* MDC Clock Selection define*/ +#define STMMAC_CSR_60_100M 0x0 /* MDC = clk_scr_i/42 */ +#define STMMAC_CSR_100_150M 0x1 /* MDC = clk_scr_i/62 */ +#define STMMAC_CSR_20_35M 0x2 /* MDC = clk_scr_i/16 */ +#define STMMAC_CSR_35_60M 0x3 /* MDC = clk_scr_i/26 */ +#define STMMAC_CSR_150_250M 0x4 /* MDC = clk_scr_i/102 */ +#define STMMAC_CSR_250_300M 0x5 /* MDC = clk_scr_i/122 */ + +/* MTL algorithms identifiers */ +#define MTL_TX_ALGORITHM_WRR 0x0 +#define MTL_TX_ALGORITHM_WFQ 0x1 +#define MTL_TX_ALGORITHM_DWRR 0x2 +#define MTL_TX_ALGORITHM_SP 0x3 +#define MTL_RX_ALGORITHM_SP 0x4 +#define MTL_RX_ALGORITHM_WSP 0x5 + +/* RX/TX Queue Mode */ +#define MTL_QUEUE_AVB 0x0 +#define MTL_QUEUE_DCB 0x1 + +/* The MDC clock could be set higher than the IEEE 802.3 + * specified frequency limit 0f 2.5 MHz, by programming a clock divider + * of value different than the above defined values. The resultant MDIO + * clock frequency of 12.5 MHz is applicable for the interfacing chips + * supporting higher MDC clocks. + * The MDC clock selection macros need to be defined for MDC clock rate + * of 12.5 MHz, corresponding to the following selection. + */ +#define STMMAC_CSR_I_4 0x8 /* clk_csr_i/4 */ +#define STMMAC_CSR_I_6 0x9 /* clk_csr_i/6 */ +#define STMMAC_CSR_I_8 0xA /* clk_csr_i/8 */ +#define STMMAC_CSR_I_10 0xB /* clk_csr_i/10 */ +#define STMMAC_CSR_I_12 0xC /* clk_csr_i/12 */ +#define STMMAC_CSR_I_14 0xD /* clk_csr_i/14 */ +#define STMMAC_CSR_I_16 0xE /* clk_csr_i/16 */ +#define STMMAC_CSR_I_18 0xF /* clk_csr_i/18 */ + +/* AXI DMA Burst length supported */ +#define DMA_AXI_BLEN_4 (1 << 1) +#define DMA_AXI_BLEN_8 (1 << 2) +#define DMA_AXI_BLEN_16 (1 << 3) +#define DMA_AXI_BLEN_32 (1 << 4) +#define DMA_AXI_BLEN_64 (1 << 5) +#define DMA_AXI_BLEN_128 (1 << 6) +#define DMA_AXI_BLEN_256 (1 << 7) +#define DMA_AXI_BLEN_ALL (DMA_AXI_BLEN_4 | DMA_AXI_BLEN_8 | DMA_AXI_BLEN_16 \ + | DMA_AXI_BLEN_32 | DMA_AXI_BLEN_64 \ + | DMA_AXI_BLEN_128 | DMA_AXI_BLEN_256) + +/* Platfrom data for platform device structure's platform_data field */ + +struct stmmac_mdio_bus_data { + int (*phy_reset)(void *priv); + unsigned int phy_mask; + int *irqs; + int probed_phy_irq; +#ifdef CONFIG_OF + int reset_gpio, active_low; + u32 delays[3]; +#endif +}; + +struct stmmac_dma_cfg { + int pbl; + int txpbl; + int rxpbl; + bool pblx8; + int fixed_burst; + int mixed_burst; + bool aal; +}; + +#define AXI_BLEN 7 +struct stmmac_axi { + bool axi_lpi_en; + bool axi_xit_frm; + u32 axi_wr_osr_lmt; + u32 axi_rd_osr_lmt; + bool axi_kbbe; + u32 axi_blen[AXI_BLEN]; + bool axi_fb; + bool axi_mb; + bool axi_rb; +}; + +struct stmmac_rxq_cfg { + u8 mode_to_use; + u32 chan; + u8 pkt_route; + bool use_prio; + u32 prio; +}; + +struct stmmac_txq_cfg { + u32 weight; + u8 mode_to_use; + /* Credit Base Shaper parameters */ + u32 send_slope; + u32 idle_slope; + u32 high_credit; + u32 low_credit; + bool use_prio; + u32 prio; +}; + +struct plat_stmmacenet_data { + int bus_id; + int phy_addr; + int interface; + struct stmmac_mdio_bus_data *mdio_bus_data; + struct device_node *phy_node; + struct device_node *mdio_node; + struct stmmac_dma_cfg *dma_cfg; + int clk_csr; + int has_gmac; + int enh_desc; + int tx_coe; + int rx_coe; + int bugged_jumbo; + int pmt; + int force_sf_dma_mode; + int force_thresh_dma_mode; + int riwt_off; + int max_speed; + int maxmtu; + int multicast_filter_bins; + int unicast_filter_entries; + int tx_fifo_size; + int rx_fifo_size; + u32 rx_queues_to_use; + u32 tx_queues_to_use; + u8 rx_sched_algorithm; + u8 tx_sched_algorithm; + struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES]; + struct stmmac_txq_cfg tx_queues_cfg[MTL_MAX_TX_QUEUES]; + void (*fix_mac_speed)(void *priv, unsigned int speed); + int (*init)(struct platform_device *pdev, void *priv); + void (*exit)(struct platform_device *pdev, void *priv); + struct mac_device_info *(*setup)(void *priv); + void *bsp_priv; + struct clk *stmmac_clk; + struct clk *pclk; + struct clk *clk_ptp_ref; + unsigned int clk_ptp_rate; + unsigned int clk_ref_rate; + struct reset_control *stmmac_rst; + struct stmmac_axi *axi; + int has_gmac4; + bool has_sun8i; + bool tso_en; + int mac_port_sel_speed; + bool en_tx_lpi_clockgating; + int has_xgmac; +}; +#endif diff --git a/include/linux/stmp3xxx_rtc_wdt.h b/include/linux/stmp3xxx_rtc_wdt.h new file mode 100644 index 000000000..1dd12c962 --- /dev/null +++ b/include/linux/stmp3xxx_rtc_wdt.h @@ -0,0 +1,15 @@ +/* + * stmp3xxx_rtc_wdt.h + * + * Copyright (C) 2011 Wolfram Sang, Pengutronix e.K. + * + * This file is released under the GPLv2. + */ +#ifndef __LINUX_STMP3XXX_RTC_WDT_H +#define __LINUX_STMP3XXX_RTC_WDT_H + +struct stmp3xxx_wdt_pdata { + void (*wdt_set_timeout)(struct device *dev, u32 timeout); +}; + +#endif /* __LINUX_STMP3XXX_RTC_WDT_H */ diff --git a/include/linux/stmp_device.h b/include/linux/stmp_device.h new file mode 100644 index 000000000..6cf7ec954 --- /dev/null +++ b/include/linux/stmp_device.h @@ -0,0 +1,20 @@ +/* + * basic functions for devices following the "stmp" style register layout + * + * Copyright (C) 2011 Wolfram Sang, Pengutronix e.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __STMP_DEVICE_H__ +#define __STMP_DEVICE_H__ + +#define STMP_OFFSET_REG_SET 0x4 +#define STMP_OFFSET_REG_CLR 0x8 +#define STMP_OFFSET_REG_TOG 0xc + +extern int stmp_reset_block(void __iomem *); +#endif /* __STMP_DEVICE_H__ */ diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h new file mode 100644 index 000000000..ccdaa8fd5 --- /dev/null +++ b/include/linux/stop_machine.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_STOP_MACHINE +#define _LINUX_STOP_MACHINE + +#include +#include +#include +#include + +/* + * stop_cpu[s]() is simplistic per-cpu maximum priority cpu + * monopolization mechanism. The caller can specify a non-sleeping + * function to be executed on a single or multiple cpus preempting all + * other processes and monopolizing those cpus until it finishes. + * + * Resources for this mechanism are preallocated when a cpu is brought + * up and requests are guaranteed to be served as long as the target + * cpus are online. + */ +typedef int (*cpu_stop_fn_t)(void *arg); + +#ifdef CONFIG_SMP + +struct cpu_stop_work { + struct list_head list; /* cpu_stopper->works */ + cpu_stop_fn_t fn; + void *arg; + struct cpu_stop_done *done; +}; + +int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg); +int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg); +bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, + struct cpu_stop_work *work_buf); +int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); +int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); +void stop_machine_park(int cpu); +void stop_machine_unpark(int cpu); + +#else /* CONFIG_SMP */ + +#include + +struct cpu_stop_work { + struct work_struct work; + cpu_stop_fn_t fn; + void *arg; +}; + +static inline int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) +{ + int ret = -ENOENT; + preempt_disable(); + if (cpu == smp_processor_id()) + ret = fn(arg); + preempt_enable(); + return ret; +} + +static void stop_one_cpu_nowait_workfn(struct work_struct *work) +{ + struct cpu_stop_work *stwork = + container_of(work, struct cpu_stop_work, work); + preempt_disable(); + stwork->fn(stwork->arg); + preempt_enable(); +} + +static inline bool stop_one_cpu_nowait(unsigned int cpu, + cpu_stop_fn_t fn, void *arg, + struct cpu_stop_work *work_buf) +{ + if (cpu == smp_processor_id()) { + INIT_WORK(&work_buf->work, stop_one_cpu_nowait_workfn); + work_buf->fn = fn; + work_buf->arg = arg; + schedule_work(&work_buf->work); + return true; + } + + return false; +} + +static inline int stop_cpus(const struct cpumask *cpumask, + cpu_stop_fn_t fn, void *arg) +{ + if (cpumask_test_cpu(raw_smp_processor_id(), cpumask)) + return stop_one_cpu(raw_smp_processor_id(), fn, arg); + return -ENOENT; +} + +static inline int try_stop_cpus(const struct cpumask *cpumask, + cpu_stop_fn_t fn, void *arg) +{ + return stop_cpus(cpumask, fn, arg); +} + +#endif /* CONFIG_SMP */ + +/* + * stop_machine "Bogolock": stop the entire machine, disable + * interrupts. This is a very heavy lock, which is equivalent to + * grabbing every spinlock (and more). So the "read" side to such a + * lock is anything which disables preemption. + */ +#if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU) + +/** + * stop_machine: freeze the machine on all CPUs and run this function + * @fn: the function to run + * @data: the data ptr for the @fn() + * @cpus: the cpus to run the @fn() on (NULL = any online cpu) + * + * Description: This causes a thread to be scheduled on every cpu, + * each of which disables interrupts. The result is that no one is + * holding a spinlock or inside any other preempt-disabled region when + * @fn() runs. + * + * This can be thought of as a very heavy write lock, equivalent to + * grabbing every spinlock in the kernel. + * + * Protects against CPU hotplug. + */ +int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus); + +/** + * stop_machine_cpuslocked: freeze the machine on all CPUs and run this function + * @fn: the function to run + * @data: the data ptr for the @fn() + * @cpus: the cpus to run the @fn() on (NULL = any online cpu) + * + * Same as above. Must be called from with in a cpus_read_lock() protected + * region. Avoids nested calls to cpus_read_lock(). + */ +int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus); + +int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, + const struct cpumask *cpus); +#else /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */ + +static __always_inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, + const struct cpumask *cpus) +{ + unsigned long flags; + int ret; + local_irq_save(flags); + ret = fn(data); + local_irq_restore(flags); + return ret; +} + +static __always_inline int +stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) +{ + return stop_machine_cpuslocked(fn, data, cpus); +} + +static __always_inline int +stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, + const struct cpumask *cpus) +{ + return stop_machine(fn, data, cpus); +} + +#endif /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */ +#endif /* _LINUX_STOP_MACHINE */ diff --git a/include/linux/string.h b/include/linux/string.h new file mode 100644 index 000000000..1e0c442b9 --- /dev/null +++ b/include/linux/string.h @@ -0,0 +1,495 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_STRING_H_ +#define _LINUX_STRING_H_ + + +#include /* for inline */ +#include /* for size_t */ +#include /* for NULL */ +#include +#include + +extern char *strndup_user(const char __user *, long); +extern void *memdup_user(const void __user *, size_t); +extern void *vmemdup_user(const void __user *, size_t); +extern void *memdup_user_nul(const void __user *, size_t); + +/* + * Include machine specific inline routines + */ +#include + +#ifndef __HAVE_ARCH_STRCPY +extern char * strcpy(char *,const char *); +#endif +#ifndef __HAVE_ARCH_STRNCPY +extern char * strncpy(char *,const char *, __kernel_size_t); +#endif +#ifndef __HAVE_ARCH_STRLCPY +size_t strlcpy(char *, const char *, size_t); +#endif +#ifndef __HAVE_ARCH_STRSCPY +ssize_t strscpy(char *, const char *, size_t); +#endif + +/* Wraps calls to strscpy()/memset(), no arch specific code required */ +ssize_t strscpy_pad(char *dest, const char *src, size_t count); + +#ifndef __HAVE_ARCH_STRCAT +extern char * strcat(char *, const char *); +#endif +#ifndef __HAVE_ARCH_STRNCAT +extern char * strncat(char *, const char *, __kernel_size_t); +#endif +#ifndef __HAVE_ARCH_STRLCAT +extern size_t strlcat(char *, const char *, __kernel_size_t); +#endif +#ifndef __HAVE_ARCH_STRCMP +extern int strcmp(const char *,const char *); +#endif +#ifndef __HAVE_ARCH_STRNCMP +extern int strncmp(const char *,const char *,__kernel_size_t); +#endif +#ifndef __HAVE_ARCH_STRCASECMP +extern int strcasecmp(const char *s1, const char *s2); +#endif +#ifndef __HAVE_ARCH_STRNCASECMP +extern int strncasecmp(const char *s1, const char *s2, size_t n); +#endif +#ifndef __HAVE_ARCH_STRCHR +extern char * strchr(const char *,int); +#endif +#ifndef __HAVE_ARCH_STRCHRNUL +extern char * strchrnul(const char *,int); +#endif +#ifndef __HAVE_ARCH_STRNCHR +extern char * strnchr(const char *, size_t, int); +#endif +#ifndef __HAVE_ARCH_STRRCHR +extern char * strrchr(const char *,int); +#endif +extern char * __must_check skip_spaces(const char *); + +extern char *strim(char *); + +static inline __must_check char *strstrip(char *str) +{ + return strim(str); +} + +#ifndef __HAVE_ARCH_STRSTR +extern char * strstr(const char *, const char *); +#endif +#ifndef __HAVE_ARCH_STRNSTR +extern char * strnstr(const char *, const char *, size_t); +#endif +#ifndef __HAVE_ARCH_STRLEN +extern __kernel_size_t strlen(const char *); +#endif +#ifndef __HAVE_ARCH_STRNLEN +extern __kernel_size_t strnlen(const char *,__kernel_size_t); +#endif +#ifndef __HAVE_ARCH_STRPBRK +extern char * strpbrk(const char *,const char *); +#endif +#ifndef __HAVE_ARCH_STRSEP +extern char * strsep(char **,const char *); +#endif +#ifndef __HAVE_ARCH_STRSPN +extern __kernel_size_t strspn(const char *,const char *); +#endif +#ifndef __HAVE_ARCH_STRCSPN +extern __kernel_size_t strcspn(const char *,const char *); +#endif + +#ifndef __HAVE_ARCH_MEMSET +extern void * memset(void *,int,__kernel_size_t); +#endif + +#ifndef __HAVE_ARCH_MEMSET16 +extern void *memset16(uint16_t *, uint16_t, __kernel_size_t); +#endif + +#ifndef __HAVE_ARCH_MEMSET32 +extern void *memset32(uint32_t *, uint32_t, __kernel_size_t); +#endif + +#ifndef __HAVE_ARCH_MEMSET64 +extern void *memset64(uint64_t *, uint64_t, __kernel_size_t); +#endif + +static inline void *memset_l(unsigned long *p, unsigned long v, + __kernel_size_t n) +{ + if (BITS_PER_LONG == 32) + return memset32((uint32_t *)p, v, n); + else + return memset64((uint64_t *)p, v, n); +} + +static inline void *memset_p(void **p, void *v, __kernel_size_t n) +{ + if (BITS_PER_LONG == 32) + return memset32((uint32_t *)p, (uintptr_t)v, n); + else + return memset64((uint64_t *)p, (uintptr_t)v, n); +} + +#ifndef __HAVE_ARCH_MEMCPY +extern void * memcpy(void *,const void *,__kernel_size_t); +#endif +#ifndef __HAVE_ARCH_MEMMOVE +extern void * memmove(void *,const void *,__kernel_size_t); +#endif +#ifndef __HAVE_ARCH_MEMSCAN +extern void * memscan(void *,int,__kernel_size_t); +#endif +#ifndef __HAVE_ARCH_MEMCMP +extern int memcmp(const void *,const void *,__kernel_size_t); +#endif +#ifndef __HAVE_ARCH_BCMP +extern int bcmp(const void *,const void *,__kernel_size_t); +#endif +#ifndef __HAVE_ARCH_MEMCHR +extern void * memchr(const void *,int,__kernel_size_t); +#endif +#ifndef __HAVE_ARCH_MEMCPY_MCSAFE +static inline __must_check unsigned long memcpy_mcsafe(void *dst, + const void *src, size_t cnt) +{ + memcpy(dst, src, cnt); + return 0; +} +#endif +#ifndef __HAVE_ARCH_MEMCPY_FLUSHCACHE +static inline void memcpy_flushcache(void *dst, const void *src, size_t cnt) +{ + memcpy(dst, src, cnt); +} +#endif +void *memchr_inv(const void *s, int c, size_t n); +char *strreplace(char *s, char old, char new); + +extern void kfree_const(const void *x); + +extern char *kstrdup(const char *s, gfp_t gfp) __malloc; +extern const char *kstrdup_const(const char *s, gfp_t gfp); +extern char *kstrndup(const char *s, size_t len, gfp_t gfp); +extern void *kmemdup(const void *src, size_t len, gfp_t gfp); +extern char *kmemdup_nul(const char *s, size_t len, gfp_t gfp); + +extern char **argv_split(gfp_t gfp, const char *str, int *argcp); +extern void argv_free(char **argv); + +extern bool sysfs_streq(const char *s1, const char *s2); +extern int kstrtobool(const char *s, bool *res); +static inline int strtobool(const char *s, bool *res) +{ + return kstrtobool(s, res); +} + +int match_string(const char * const *array, size_t n, const char *string); +int __sysfs_match_string(const char * const *array, size_t n, const char *s); + +/** + * sysfs_match_string - matches given string in an array + * @_a: array of strings + * @_s: string to match with + * + * Helper for __sysfs_match_string(). Calculates the size of @a automatically. + */ +#define sysfs_match_string(_a, _s) __sysfs_match_string(_a, ARRAY_SIZE(_a), _s) + +#ifdef CONFIG_BINARY_PRINTF +int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args); +int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf); +int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4); +#endif + +extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, + const void *from, size_t available); + +/** + * strstarts - does @str start with @prefix? + * @str: string to examine + * @prefix: prefix to look for. + */ +static inline bool strstarts(const char *str, const char *prefix) +{ + return strncmp(str, prefix, strlen(prefix)) == 0; +} + +size_t memweight(const void *ptr, size_t bytes); +void memzero_explicit(void *s, size_t count); + +/** + * kbasename - return the last part of a pathname. + * + * @path: path to extract the filename from. + */ +static inline const char *kbasename(const char *path) +{ + const char *tail = strrchr(path, '/'); + return tail ? tail + 1 : path; +} + +#define __FORTIFY_INLINE extern __always_inline __attribute__((gnu_inline)) +#define __RENAME(x) __asm__(#x) + +void fortify_panic(const char *name) __noreturn __cold; +void __read_overflow(void) __compiletime_error("detected read beyond size of object passed as 1st parameter"); +void __read_overflow2(void) __compiletime_error("detected read beyond size of object passed as 2nd parameter"); +void __read_overflow3(void) __compiletime_error("detected read beyond size of object passed as 3rd parameter"); +void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter"); + +#if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE) + +#ifdef CONFIG_KASAN +extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr); +extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp); +extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy); +extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove); +extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset); +extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat); +extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy); +extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen); +extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat); +extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy); +#else +#define __underlying_memchr __builtin_memchr +#define __underlying_memcmp __builtin_memcmp +#define __underlying_memcpy __builtin_memcpy +#define __underlying_memmove __builtin_memmove +#define __underlying_memset __builtin_memset +#define __underlying_strcat __builtin_strcat +#define __underlying_strcpy __builtin_strcpy +#define __underlying_strlen __builtin_strlen +#define __underlying_strncat __builtin_strncat +#define __underlying_strncpy __builtin_strncpy +#endif + +__FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + if (__builtin_constant_p(size) && p_size < size) + __write_overflow(); + if (p_size < size) + fortify_panic(__func__); + return __underlying_strncpy(p, q, size); +} + +__FORTIFY_INLINE char *strcat(char *p, const char *q) +{ + size_t p_size = __builtin_object_size(p, 0); + if (p_size == (size_t)-1) + return __underlying_strcat(p, q); + if (strlcat(p, q, p_size) >= p_size) + fortify_panic(__func__); + return p; +} + +__FORTIFY_INLINE __kernel_size_t strlen(const char *p) +{ + __kernel_size_t ret; + size_t p_size = __builtin_object_size(p, 0); + + /* Work around gcc excess stack consumption issue */ + if (p_size == (size_t)-1 || + (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0')) + return __underlying_strlen(p); + ret = strnlen(p, p_size); + if (p_size <= ret) + fortify_panic(__func__); + return ret; +} + +extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen); +__FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen) +{ + size_t p_size = __builtin_object_size(p, 0); + __kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size); + if (p_size <= ret && maxlen != ret) + fortify_panic(__func__); + return ret; +} + +/* defined after fortified strlen to reuse it */ +extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy); +__FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size) +{ + size_t ret; + size_t p_size = __builtin_object_size(p, 0); + size_t q_size = __builtin_object_size(q, 0); + if (p_size == (size_t)-1 && q_size == (size_t)-1) + return __real_strlcpy(p, q, size); + ret = strlen(q); + if (size) { + size_t len = (ret >= size) ? size - 1 : ret; + if (__builtin_constant_p(len) && len >= p_size) + __write_overflow(); + if (len >= p_size) + fortify_panic(__func__); + __underlying_memcpy(p, q, len); + p[len] = '\0'; + } + return ret; +} + +/* defined after fortified strlen and strnlen to reuse them */ +__FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count) +{ + size_t p_len, copy_len; + size_t p_size = __builtin_object_size(p, 0); + size_t q_size = __builtin_object_size(q, 0); + if (p_size == (size_t)-1 && q_size == (size_t)-1) + return __underlying_strncat(p, q, count); + p_len = strlen(p); + copy_len = strnlen(q, count); + if (p_size < p_len + copy_len + 1) + fortify_panic(__func__); + __underlying_memcpy(p + p_len, q, copy_len); + p[p_len + copy_len] = '\0'; + return p; +} + +__FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + if (__builtin_constant_p(size) && p_size < size) + __write_overflow(); + if (p_size < size) + fortify_panic(__func__); + return __underlying_memset(p, c, size); +} + +__FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + size_t q_size = __builtin_object_size(q, 0); + if (__builtin_constant_p(size)) { + if (p_size < size) + __write_overflow(); + if (q_size < size) + __read_overflow2(); + } + if (p_size < size || q_size < size) + fortify_panic(__func__); + return __underlying_memcpy(p, q, size); +} + +__FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + size_t q_size = __builtin_object_size(q, 0); + if (__builtin_constant_p(size)) { + if (p_size < size) + __write_overflow(); + if (q_size < size) + __read_overflow2(); + } + if (p_size < size || q_size < size) + fortify_panic(__func__); + return __underlying_memmove(p, q, size); +} + +extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan); +__FORTIFY_INLINE void *memscan(void *p, int c, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + if (__builtin_constant_p(size) && p_size < size) + __read_overflow(); + if (p_size < size) + fortify_panic(__func__); + return __real_memscan(p, c, size); +} + +__FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + size_t q_size = __builtin_object_size(q, 0); + if (__builtin_constant_p(size)) { + if (p_size < size) + __read_overflow(); + if (q_size < size) + __read_overflow2(); + } + if (p_size < size || q_size < size) + fortify_panic(__func__); + return __underlying_memcmp(p, q, size); +} + +__FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + if (__builtin_constant_p(size) && p_size < size) + __read_overflow(); + if (p_size < size) + fortify_panic(__func__); + return __underlying_memchr(p, c, size); +} + +void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv); +__FORTIFY_INLINE void *memchr_inv(const void *p, int c, size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + if (__builtin_constant_p(size) && p_size < size) + __read_overflow(); + if (p_size < size) + fortify_panic(__func__); + return __real_memchr_inv(p, c, size); +} + +extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup); +__FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp) +{ + size_t p_size = __builtin_object_size(p, 0); + if (__builtin_constant_p(size) && p_size < size) + __read_overflow(); + if (p_size < size) + fortify_panic(__func__); + return __real_kmemdup(p, size, gfp); +} + +/* defined after fortified strlen and memcpy to reuse them */ +__FORTIFY_INLINE char *strcpy(char *p, const char *q) +{ + size_t p_size = __builtin_object_size(p, 0); + size_t q_size = __builtin_object_size(q, 0); + if (p_size == (size_t)-1 && q_size == (size_t)-1) + return __underlying_strcpy(p, q); + memcpy(p, q, strlen(q) + 1); + return p; +} + +/* Don't use these outside the FORITFY_SOURCE implementation */ +#undef __underlying_memchr +#undef __underlying_memcmp +#undef __underlying_memcpy +#undef __underlying_memmove +#undef __underlying_memset +#undef __underlying_strcat +#undef __underlying_strcpy +#undef __underlying_strlen +#undef __underlying_strncat +#undef __underlying_strncpy +#endif + +/** + * memcpy_and_pad - Copy one buffer to another with padding + * @dest: Where to copy to + * @dest_len: The destination buffer size + * @src: Where to copy from + * @count: The number of bytes to copy + * @pad: Character to use for padding if space is left in destination. + */ +static inline void memcpy_and_pad(void *dest, size_t dest_len, + const void *src, size_t count, int pad) +{ + if (dest_len > count) { + memcpy(dest, src, count); + memset(dest + count, pad, dest_len - count); + } else + memcpy(dest, src, dest_len); +} + +#endif /* _LINUX_STRING_H_ */ diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h new file mode 100644 index 000000000..d23c50309 --- /dev/null +++ b/include/linux/string_helpers.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_STRING_HELPERS_H_ +#define _LINUX_STRING_HELPERS_H_ + +#include + +struct file; +struct task_struct; + +/* Descriptions of the types of units to + * print in */ +enum string_size_units { + STRING_UNITS_10, /* use powers of 10^3 (standard SI) */ + STRING_UNITS_2, /* use binary powers of 2^10 */ +}; + +void string_get_size(u64 size, u64 blk_size, enum string_size_units units, + char *buf, int len); + +#define UNESCAPE_SPACE 0x01 +#define UNESCAPE_OCTAL 0x02 +#define UNESCAPE_HEX 0x04 +#define UNESCAPE_SPECIAL 0x08 +#define UNESCAPE_ANY \ + (UNESCAPE_SPACE | UNESCAPE_OCTAL | UNESCAPE_HEX | UNESCAPE_SPECIAL) + +int string_unescape(char *src, char *dst, size_t size, unsigned int flags); + +static inline int string_unescape_inplace(char *buf, unsigned int flags) +{ + return string_unescape(buf, buf, 0, flags); +} + +static inline int string_unescape_any(char *src, char *dst, size_t size) +{ + return string_unescape(src, dst, size, UNESCAPE_ANY); +} + +static inline int string_unescape_any_inplace(char *buf) +{ + return string_unescape_any(buf, buf, 0); +} + +#define ESCAPE_SPACE 0x01 +#define ESCAPE_SPECIAL 0x02 +#define ESCAPE_NULL 0x04 +#define ESCAPE_OCTAL 0x08 +#define ESCAPE_ANY \ + (ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_SPECIAL | ESCAPE_NULL) +#define ESCAPE_NP 0x10 +#define ESCAPE_ANY_NP (ESCAPE_ANY | ESCAPE_NP) +#define ESCAPE_HEX 0x20 + +int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz, + unsigned int flags, const char *only); + +static inline int string_escape_mem_any_np(const char *src, size_t isz, + char *dst, size_t osz, const char *only) +{ + return string_escape_mem(src, isz, dst, osz, ESCAPE_ANY_NP, only); +} + +static inline int string_escape_str(const char *src, char *dst, size_t sz, + unsigned int flags, const char *only) +{ + return string_escape_mem(src, strlen(src), dst, sz, flags, only); +} + +static inline int string_escape_str_any_np(const char *src, char *dst, + size_t sz, const char *only) +{ + return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, only); +} + +char *kstrdup_quotable(const char *src, gfp_t gfp); +char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp); +char *kstrdup_quotable_file(struct file *file, gfp_t gfp); + +#endif diff --git a/include/linux/stringhash.h b/include/linux/stringhash.h new file mode 100644 index 000000000..c0c5c5b73 --- /dev/null +++ b/include/linux/stringhash.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_STRINGHASH_H +#define __LINUX_STRINGHASH_H + +#include /* For __pure */ +#include /* For u32, u64 */ +#include + +/* + * Routines for hashing strings of bytes to a 32-bit hash value. + * + * These hash functions are NOT GUARANTEED STABLE between kernel + * versions, architectures, or even repeated boots of the same kernel. + * (E.g. they may depend on boot-time hardware detection or be + * deliberately randomized.) + * + * They are also not intended to be secure against collisions caused by + * malicious inputs; much slower hash functions are required for that. + * + * They are optimized for pathname components, meaning short strings. + * Even if a majority of files have longer names, the dynamic profile of + * pathname components skews short due to short directory names. + * (E.g. /usr/lib/libsesquipedalianism.so.3.141.) + */ + +/* + * Version 1: one byte at a time. Example of use: + * + * unsigned long hash = init_name_hash; + * while (*p) + * hash = partial_name_hash(tolower(*p++), hash); + * hash = end_name_hash(hash); + * + * Although this is designed for bytes, fs/hfsplus/unicode.c + * abuses it to hash 16-bit values. + */ + +/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */ +#define init_name_hash(salt) (unsigned long)(salt) + +/* partial hash update function. Assume roughly 4 bits per character */ +static inline unsigned long +partial_name_hash(unsigned long c, unsigned long prevhash) +{ + return (prevhash + (c << 4) + (c >> 4)) * 11; +} + +/* + * Finally: cut down the number of bits to a int value (and try to avoid + * losing bits). This also has the property (wanted by the dcache) + * that the msbits make a good hash table index. + */ +static inline unsigned int end_name_hash(unsigned long hash) +{ + return hash_long(hash, 32); +} + +/* + * Version 2: One word (32 or 64 bits) at a time. + * If CONFIG_DCACHE_WORD_ACCESS is defined (meaning + * exists, which describes major Linux platforms like x86 and ARM), then + * this computes a different hash function much faster. + * + * If not set, this falls back to a wrapper around the preceding. + */ +extern unsigned int __pure full_name_hash(const void *salt, const char *, unsigned int); + +/* + * A hash_len is a u64 with the hash of a string in the low + * half and the length in the high half. + */ +#define hashlen_hash(hashlen) ((u32)(hashlen)) +#define hashlen_len(hashlen) ((u32)((hashlen) >> 32)) +#define hashlen_create(hash, len) ((u64)(len)<<32 | (u32)(hash)) + +/* Return the "hash_len" (hash and length) of a null-terminated string */ +extern u64 __pure hashlen_string(const void *salt, const char *name); + +#endif /* __LINUX_STRINGHASH_H */ diff --git a/include/linux/stringify.h b/include/linux/stringify.h new file mode 100644 index 000000000..841cec8ed --- /dev/null +++ b/include/linux/stringify.h @@ -0,0 +1,12 @@ +#ifndef __LINUX_STRINGIFY_H +#define __LINUX_STRINGIFY_H + +/* Indirect stringification. Doing two levels allows the parameter to be a + * macro itself. For example, compile with -DFOO=bar, __stringify(FOO) + * converts to "bar". + */ + +#define __stringify_1(x...) #x +#define __stringify(x...) __stringify_1(x) + +#endif /* !__LINUX_STRINGIFY_H */ diff --git a/include/linux/sudmac.h b/include/linux/sudmac.h new file mode 100644 index 000000000..377b8a578 --- /dev/null +++ b/include/linux/sudmac.h @@ -0,0 +1,52 @@ +/* + * Header for the SUDMAC driver + * + * Copyright (C) 2013 Renesas Solutions Corp. + * + * This is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + */ +#ifndef SUDMAC_H +#define SUDMAC_H + +#include +#include +#include + +/* Used by slave DMA clients to request DMA to/from a specific peripheral */ +struct sudmac_slave { + struct shdma_slave shdma_slave; /* Set by the platform */ +}; + +/* + * Supplied by platforms to specify, how a DMA channel has to be configured for + * a certain peripheral + */ +struct sudmac_slave_config { + int slave_id; +}; + +struct sudmac_channel { + unsigned long offset; + unsigned long config; + unsigned long wait; /* The configuable range is 0 to 3 */ + unsigned long dint_end_bit; +}; + +struct sudmac_pdata { + const struct sudmac_slave_config *slave; + int slave_num; + const struct sudmac_channel *channel; + int channel_num; +}; + +/* Definitions for the sudmac_channel.config */ +#define SUDMAC_TX_BUFFER_MODE BIT(0) +#define SUDMAC_RX_END_MODE BIT(1) + +/* Definitions for the sudmac_channel.dint_end_bit */ +#define SUDMAC_DMA_BIT_CH0 BIT(0) +#define SUDMAC_DMA_BIT_CH1 BIT(1) + +#endif diff --git a/include/linux/sungem_phy.h b/include/linux/sungem_phy.h new file mode 100644 index 000000000..3a11fa41a --- /dev/null +++ b/include/linux/sungem_phy.h @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __SUNGEM_PHY_H__ +#define __SUNGEM_PHY_H__ + +struct mii_phy; + +/* Operations supported by any kind of PHY */ +struct mii_phy_ops +{ + int (*init)(struct mii_phy *phy); + int (*suspend)(struct mii_phy *phy); + int (*setup_aneg)(struct mii_phy *phy, u32 advertise); + int (*setup_forced)(struct mii_phy *phy, int speed, int fd); + int (*poll_link)(struct mii_phy *phy); + int (*read_link)(struct mii_phy *phy); + int (*enable_fiber)(struct mii_phy *phy, int autoneg); +}; + +/* Structure used to statically define an mii/gii based PHY */ +struct mii_phy_def +{ + u32 phy_id; /* Concatenated ID1 << 16 | ID2 */ + u32 phy_id_mask; /* Significant bits */ + u32 features; /* Ethtool SUPPORTED_* defines */ + int magic_aneg; /* Autoneg does all speed test for us */ + const char* name; + const struct mii_phy_ops* ops; +}; + +enum { + BCM54XX_COPPER, + BCM54XX_FIBER, + BCM54XX_GBIC, + BCM54XX_SGMII, + BCM54XX_UNKNOWN, +}; + +/* An instance of a PHY, partially borrowed from mii_if_info */ +struct mii_phy +{ + struct mii_phy_def* def; + u32 advertising; + int mii_id; + + /* 1: autoneg enabled, 0: disabled */ + int autoneg; + + /* forced speed & duplex (no autoneg) + * partner speed & duplex & pause (autoneg) + */ + int speed; + int duplex; + int pause; + + /* Provided by host chip */ + struct net_device *dev; + int (*mdio_read) (struct net_device *dev, int mii_id, int reg); + void (*mdio_write) (struct net_device *dev, int mii_id, int reg, int val); + void *platform_data; +}; + +/* Pass in a struct mii_phy with dev, mdio_read and mdio_write + * filled, the remaining fields will be filled on return + */ +extern int sungem_phy_probe(struct mii_phy *phy, int mii_id); + + +/* MII definitions missing from mii.h */ + +#define BMCR_SPD2 0x0040 /* Gigabit enable (bcm54xx) */ +#define LPA_PAUSE 0x0400 + +/* More PHY registers (model specific) */ + +/* MII BCM5201 MULTIPHY interrupt register */ +#define MII_BCM5201_INTERRUPT 0x1A +#define MII_BCM5201_INTERRUPT_INTENABLE 0x4000 + +#define MII_BCM5201_AUXMODE2 0x1B +#define MII_BCM5201_AUXMODE2_LOWPOWER 0x0008 + +#define MII_BCM5201_MULTIPHY 0x1E + +/* MII BCM5201 MULTIPHY register bits */ +#define MII_BCM5201_MULTIPHY_SERIALMODE 0x0002 +#define MII_BCM5201_MULTIPHY_SUPERISOLATE 0x0008 + +/* MII BCM5221 Additional registers */ +#define MII_BCM5221_TEST 0x1f +#define MII_BCM5221_TEST_ENABLE_SHADOWS 0x0080 +#define MII_BCM5221_SHDOW_AUX_STAT2 0x1b +#define MII_BCM5221_SHDOW_AUX_STAT2_APD 0x0020 +#define MII_BCM5221_SHDOW_AUX_MODE4 0x1a +#define MII_BCM5221_SHDOW_AUX_MODE4_IDDQMODE 0x0001 +#define MII_BCM5221_SHDOW_AUX_MODE4_CLKLOPWR 0x0004 + +/* MII BCM5241 Additional registers */ +#define MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR 0x0008 + +/* MII BCM5400 1000-BASET Control register */ +#define MII_BCM5400_GB_CONTROL 0x09 +#define MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP 0x0200 + +/* MII BCM5400 AUXCONTROL register */ +#define MII_BCM5400_AUXCONTROL 0x18 +#define MII_BCM5400_AUXCONTROL_PWR10BASET 0x0004 + +/* MII BCM5400 AUXSTATUS register */ +#define MII_BCM5400_AUXSTATUS 0x19 +#define MII_BCM5400_AUXSTATUS_LINKMODE_MASK 0x0700 +#define MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT 8 + +/* 1000BT control (Marvell & BCM54xx at least) */ +#define MII_1000BASETCONTROL 0x09 +#define MII_1000BASETCONTROL_FULLDUPLEXCAP 0x0200 +#define MII_1000BASETCONTROL_HALFDUPLEXCAP 0x0100 + +/* Marvell 88E1011 PHY control */ +#define MII_M1011_PHY_SPEC_CONTROL 0x10 +#define MII_M1011_PHY_SPEC_CONTROL_MANUAL_MDIX 0x20 +#define MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX 0x40 + +/* Marvell 88E1011 PHY status */ +#define MII_M1011_PHY_SPEC_STATUS 0x11 +#define MII_M1011_PHY_SPEC_STATUS_1000 0x8000 +#define MII_M1011_PHY_SPEC_STATUS_100 0x4000 +#define MII_M1011_PHY_SPEC_STATUS_SPD_MASK 0xc000 +#define MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX 0x2000 +#define MII_M1011_PHY_SPEC_STATUS_RESOLVED 0x0800 +#define MII_M1011_PHY_SPEC_STATUS_TX_PAUSE 0x0008 +#define MII_M1011_PHY_SPEC_STATUS_RX_PAUSE 0x0004 + +#endif /* __SUNGEM_PHY_H__ */ diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h new file mode 100644 index 000000000..07d454873 --- /dev/null +++ b/include/linux/sunrpc/addr.h @@ -0,0 +1,184 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/sunrpc/addr.h + * + * Various routines for copying and comparing sockaddrs and for + * converting them to and from presentation format. + */ +#ifndef _LINUX_SUNRPC_ADDR_H +#define _LINUX_SUNRPC_ADDR_H + +#include +#include +#include +#include + +size_t rpc_ntop(const struct sockaddr *, char *, const size_t); +size_t rpc_pton(struct net *, const char *, const size_t, + struct sockaddr *, const size_t); +char * rpc_sockaddr2uaddr(const struct sockaddr *, gfp_t); +size_t rpc_uaddr2sockaddr(struct net *, const char *, const size_t, + struct sockaddr *, const size_t); + +static inline unsigned short rpc_get_port(const struct sockaddr *sap) +{ + switch (sap->sa_family) { + case AF_INET: + return ntohs(((struct sockaddr_in *)sap)->sin_port); + case AF_INET6: + return ntohs(((struct sockaddr_in6 *)sap)->sin6_port); + } + return 0; +} + +static inline void rpc_set_port(struct sockaddr *sap, + const unsigned short port) +{ + switch (sap->sa_family) { + case AF_INET: + ((struct sockaddr_in *)sap)->sin_port = htons(port); + break; + case AF_INET6: + ((struct sockaddr_in6 *)sap)->sin6_port = htons(port); + break; + } +} + +#define IPV6_SCOPE_DELIMITER '%' +#define IPV6_SCOPE_ID_LEN sizeof("%nnnnnnnnnn") + +static inline bool rpc_cmp_addr4(const struct sockaddr *sap1, + const struct sockaddr *sap2) +{ + const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sap1; + const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sap2; + + return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr; +} + +static inline bool __rpc_copy_addr4(struct sockaddr *dst, + const struct sockaddr *src) +{ + const struct sockaddr_in *ssin = (struct sockaddr_in *) src; + struct sockaddr_in *dsin = (struct sockaddr_in *) dst; + + dsin->sin_family = ssin->sin_family; + dsin->sin_addr.s_addr = ssin->sin_addr.s_addr; + return true; +} + +#if IS_ENABLED(CONFIG_IPV6) +static inline bool rpc_cmp_addr6(const struct sockaddr *sap1, + const struct sockaddr *sap2) +{ + const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sap1; + const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2; + + if (!ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr)) + return false; + else if (ipv6_addr_type(&sin1->sin6_addr) & IPV6_ADDR_LINKLOCAL) + return sin1->sin6_scope_id == sin2->sin6_scope_id; + + return true; +} + +static inline bool __rpc_copy_addr6(struct sockaddr *dst, + const struct sockaddr *src) +{ + const struct sockaddr_in6 *ssin6 = (const struct sockaddr_in6 *) src; + struct sockaddr_in6 *dsin6 = (struct sockaddr_in6 *) dst; + + dsin6->sin6_family = ssin6->sin6_family; + dsin6->sin6_addr = ssin6->sin6_addr; + dsin6->sin6_scope_id = ssin6->sin6_scope_id; + return true; +} +#else /* !(IS_ENABLED(CONFIG_IPV6) */ +static inline bool rpc_cmp_addr6(const struct sockaddr *sap1, + const struct sockaddr *sap2) +{ + return false; +} + +static inline bool __rpc_copy_addr6(struct sockaddr *dst, + const struct sockaddr *src) +{ + return false; +} +#endif /* !(IS_ENABLED(CONFIG_IPV6) */ + +/** + * rpc_cmp_addr - compare the address portion of two sockaddrs. + * @sap1: first sockaddr + * @sap2: second sockaddr + * + * Just compares the family and address portion. Ignores port, but + * compares the scope if it's a link-local address. + * + * Returns true if the addrs are equal, false if they aren't. + */ +static inline bool rpc_cmp_addr(const struct sockaddr *sap1, + const struct sockaddr *sap2) +{ + if (sap1->sa_family == sap2->sa_family) { + switch (sap1->sa_family) { + case AF_INET: + return rpc_cmp_addr4(sap1, sap2); + case AF_INET6: + return rpc_cmp_addr6(sap1, sap2); + } + } + return false; +} + +/** + * rpc_cmp_addr_port - compare the address and port number of two sockaddrs. + * @sap1: first sockaddr + * @sap2: second sockaddr + */ +static inline bool rpc_cmp_addr_port(const struct sockaddr *sap1, + const struct sockaddr *sap2) +{ + if (!rpc_cmp_addr(sap1, sap2)) + return false; + return rpc_get_port(sap1) == rpc_get_port(sap2); +} + +/** + * rpc_copy_addr - copy the address portion of one sockaddr to another + * @dst: destination sockaddr + * @src: source sockaddr + * + * Just copies the address portion and family. Ignores port, scope, etc. + * Caller is responsible for making certain that dst is large enough to hold + * the address in src. Returns true if address family is supported. Returns + * false otherwise. + */ +static inline bool rpc_copy_addr(struct sockaddr *dst, + const struct sockaddr *src) +{ + switch (src->sa_family) { + case AF_INET: + return __rpc_copy_addr4(dst, src); + case AF_INET6: + return __rpc_copy_addr6(dst, src); + } + return false; +} + +/** + * rpc_get_scope_id - return scopeid for a given sockaddr + * @sa: sockaddr to get scopeid from + * + * Returns the value of the sin6_scope_id for AF_INET6 addrs, or 0 if + * not an AF_INET6 address. + */ +static inline u32 rpc_get_scope_id(const struct sockaddr *sa) +{ + if (sa->sa_family != AF_INET6) + return 0; + + return ((struct sockaddr_in6 *) sa)->sin6_scope_id; +} + +#endif /* _LINUX_SUNRPC_ADDR_H */ diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h new file mode 100644 index 000000000..58a6765c1 --- /dev/null +++ b/include/linux/sunrpc/auth.h @@ -0,0 +1,233 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/sunrpc/auth.h + * + * Declarations for the RPC client authentication machinery. + * + * Copyright (C) 1996, Olaf Kirch + */ + +#ifndef _LINUX_SUNRPC_AUTH_H +#define _LINUX_SUNRPC_AUTH_H + +#ifdef __KERNEL__ + +#include +#include +#include + +#include +#include +#include +#include + +/* + * Maximum size of AUTH_NONE authentication information, in XDR words. + */ +#define NUL_CALLSLACK (4) +#define NUL_REPLYSLACK (2) + +/* + * Size of the nodename buffer. RFC1831 specifies a hard limit of 255 bytes, + * but Linux hostnames are actually limited to __NEW_UTS_LEN bytes. + */ +#define UNX_MAXNODENAME __NEW_UTS_LEN +#define UNX_CALLSLACK (21 + XDR_QUADLEN(UNX_MAXNODENAME)) +#define UNX_NGROUPS 16 + +struct rpcsec_gss_info; + +/* auth_cred ac_flags bits */ +enum { + RPC_CRED_KEY_EXPIRE_SOON = 1, /* underlying cred key will expire soon */ + RPC_CRED_NOTIFY_TIMEOUT = 2, /* nofity generic cred when underlying + key will expire soon */ +}; + +/* Work around the lack of a VFS credential */ +struct auth_cred { + kuid_t uid; + kgid_t gid; + struct group_info *group_info; + const char *principal; + unsigned long ac_flags; + unsigned char machine_cred : 1; +}; + +/* + * Client user credentials + */ +struct rpc_auth; +struct rpc_credops; +struct rpc_cred { + struct hlist_node cr_hash; /* hash chain */ + struct list_head cr_lru; /* lru garbage collection */ + struct rcu_head cr_rcu; + struct rpc_auth * cr_auth; + const struct rpc_credops *cr_ops; + unsigned long cr_expire; /* when to gc */ + unsigned long cr_flags; /* various flags */ + atomic_t cr_count; /* ref count */ + + kuid_t cr_uid; + + /* per-flavor data */ +}; +#define RPCAUTH_CRED_NEW 0 +#define RPCAUTH_CRED_UPTODATE 1 +#define RPCAUTH_CRED_HASHED 2 +#define RPCAUTH_CRED_NEGATIVE 3 + +/* rpc_auth au_flags */ +#define RPCAUTH_AUTH_NO_CRKEY_TIMEOUT 0x0001 /* underlying cred has no key timeout */ + +/* + * Client authentication handle + */ +struct rpc_cred_cache; +struct rpc_authops; +struct rpc_auth { + unsigned int au_cslack; /* call cred size estimate */ + /* guess at number of u32's auth adds before + * reply data; normally the verifier size: */ + unsigned int au_rslack; + /* for gss, used to calculate au_rslack: */ + unsigned int au_verfsize; + + unsigned int au_flags; /* various flags */ + const struct rpc_authops *au_ops; /* operations */ + rpc_authflavor_t au_flavor; /* pseudoflavor (note may + * differ from the flavor in + * au_ops->au_flavor in gss + * case) */ + atomic_t au_count; /* Reference counter */ + + struct rpc_cred_cache * au_credcache; + /* per-flavor data */ +}; + +/* rpc_auth au_flags */ +#define RPCAUTH_AUTH_DATATOUCH 0x00000002 + +struct rpc_auth_create_args { + rpc_authflavor_t pseudoflavor; + const char *target_name; +}; + +/* Flags for rpcauth_lookupcred() */ +#define RPCAUTH_LOOKUP_NEW 0x01 /* Accept an uninitialised cred */ +#define RPCAUTH_LOOKUP_RCU 0x02 /* lock-less lookup */ + +/* + * Client authentication ops + */ +struct rpc_authops { + struct module *owner; + rpc_authflavor_t au_flavor; /* flavor (RPC_AUTH_*) */ + char * au_name; + struct rpc_auth * (*create)(const struct rpc_auth_create_args *, + struct rpc_clnt *); + void (*destroy)(struct rpc_auth *); + + int (*hash_cred)(struct auth_cred *, unsigned int); + struct rpc_cred * (*lookup_cred)(struct rpc_auth *, struct auth_cred *, int); + struct rpc_cred * (*crcreate)(struct rpc_auth*, struct auth_cred *, int, gfp_t); + int (*list_pseudoflavors)(rpc_authflavor_t *, int); + rpc_authflavor_t (*info2flavor)(struct rpcsec_gss_info *); + int (*flavor2info)(rpc_authflavor_t, + struct rpcsec_gss_info *); + int (*key_timeout)(struct rpc_auth *, + struct rpc_cred *); +}; + +struct rpc_credops { + const char * cr_name; /* Name of the auth flavour */ + int (*cr_init)(struct rpc_auth *, struct rpc_cred *); + void (*crdestroy)(struct rpc_cred *); + + int (*crmatch)(struct auth_cred *, struct rpc_cred *, int); + struct rpc_cred * (*crbind)(struct rpc_task *, struct rpc_cred *, int); + __be32 * (*crmarshal)(struct rpc_task *, __be32 *); + int (*crrefresh)(struct rpc_task *); + __be32 * (*crvalidate)(struct rpc_task *, __be32 *); + int (*crwrap_req)(struct rpc_task *, kxdreproc_t, + void *, __be32 *, void *); + int (*crunwrap_resp)(struct rpc_task *, kxdrdproc_t, + void *, __be32 *, void *); + int (*crkey_timeout)(struct rpc_cred *); + bool (*crkey_to_expire)(struct rpc_cred *); + char * (*crstringify_acceptor)(struct rpc_cred *); +}; + +extern const struct rpc_authops authunix_ops; +extern const struct rpc_authops authnull_ops; + +int __init rpc_init_authunix(void); +int __init rpc_init_generic_auth(void); +int __init rpcauth_init_module(void); +void rpcauth_remove_module(void); +void rpc_destroy_generic_auth(void); +void rpc_destroy_authunix(void); + +struct rpc_cred * rpc_lookup_cred(void); +struct rpc_cred * rpc_lookup_cred_nonblock(void); +struct rpc_cred * rpc_lookup_generic_cred(struct auth_cred *, int, gfp_t); +struct rpc_cred * rpc_lookup_machine_cred(const char *service_name); +int rpcauth_register(const struct rpc_authops *); +int rpcauth_unregister(const struct rpc_authops *); +struct rpc_auth * rpcauth_create(const struct rpc_auth_create_args *, + struct rpc_clnt *); +void rpcauth_release(struct rpc_auth *); +rpc_authflavor_t rpcauth_get_pseudoflavor(rpc_authflavor_t, + struct rpcsec_gss_info *); +int rpcauth_get_gssinfo(rpc_authflavor_t, + struct rpcsec_gss_info *); +int rpcauth_list_flavors(rpc_authflavor_t *, int); +struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int, gfp_t); +void rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *); +struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int); +struct rpc_cred * rpcauth_generic_bind_cred(struct rpc_task *, struct rpc_cred *, int); +void put_rpccred(struct rpc_cred *); +__be32 * rpcauth_marshcred(struct rpc_task *, __be32 *); +__be32 * rpcauth_checkverf(struct rpc_task *, __be32 *); +int rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp, __be32 *data, void *obj); +int rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp, __be32 *data, void *obj); +int rpcauth_refreshcred(struct rpc_task *); +void rpcauth_invalcred(struct rpc_task *); +int rpcauth_uptodatecred(struct rpc_task *); +int rpcauth_init_credcache(struct rpc_auth *); +void rpcauth_destroy_credcache(struct rpc_auth *); +void rpcauth_clear_credcache(struct rpc_cred_cache *); +int rpcauth_key_timeout_notify(struct rpc_auth *, + struct rpc_cred *); +bool rpcauth_cred_key_to_expire(struct rpc_auth *, struct rpc_cred *); +char * rpcauth_stringify_acceptor(struct rpc_cred *); + +static inline +struct rpc_cred * get_rpccred(struct rpc_cred *cred) +{ + if (cred != NULL) + atomic_inc(&cred->cr_count); + return cred; +} + +/** + * get_rpccred_rcu - get a reference to a cred using rcu-protected pointer + * @cred: cred of which to take a reference + * + * In some cases, we may have a pointer to a credential to which we + * want to take a reference, but don't already have one. Because these + * objects are freed using RCU, we can access the cr_count while its + * on its way to destruction and only take a reference if it's not already + * zero. + */ +static inline struct rpc_cred * +get_rpccred_rcu(struct rpc_cred *cred) +{ + if (atomic_inc_not_zero(&cred->cr_count)) + return cred; + return NULL; +} + +#endif /* __KERNEL__ */ +#endif /* _LINUX_SUNRPC_AUTH_H */ diff --git a/include/linux/sunrpc/auth_gss.h b/include/linux/sunrpc/auth_gss.h new file mode 100644 index 000000000..0c9eac351 --- /dev/null +++ b/include/linux/sunrpc/auth_gss.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/sunrpc/auth_gss.h + * + * Declarations for RPCSEC_GSS + * + * Dug Song + * Andy Adamson + * Bruce Fields + * Copyright (c) 2000 The Regents of the University of Michigan + */ + +#ifndef _LINUX_SUNRPC_AUTH_GSS_H +#define _LINUX_SUNRPC_AUTH_GSS_H + +#ifdef __KERNEL__ +#include +#include +#include +#include + +#define RPC_GSS_VERSION 1 + +#define MAXSEQ 0x80000000 /* maximum legal sequence number, from rfc 2203 */ + +enum rpc_gss_proc { + RPC_GSS_PROC_DATA = 0, + RPC_GSS_PROC_INIT = 1, + RPC_GSS_PROC_CONTINUE_INIT = 2, + RPC_GSS_PROC_DESTROY = 3 +}; + +enum rpc_gss_svc { + RPC_GSS_SVC_NONE = 1, + RPC_GSS_SVC_INTEGRITY = 2, + RPC_GSS_SVC_PRIVACY = 3 +}; + +/* on-the-wire gss cred: */ +struct rpc_gss_wire_cred { + u32 gc_v; /* version */ + u32 gc_proc; /* control procedure */ + u32 gc_seq; /* sequence number */ + u32 gc_svc; /* service */ + struct xdr_netobj gc_ctx; /* context handle */ +}; + +/* on-the-wire gss verifier: */ +struct rpc_gss_wire_verf { + u32 gv_flavor; + struct xdr_netobj gv_verf; +}; + +/* return from gss NULL PROC init sec context */ +struct rpc_gss_init_res { + struct xdr_netobj gr_ctx; /* context handle */ + u32 gr_major; /* major status */ + u32 gr_minor; /* minor status */ + u32 gr_win; /* sequence window */ + struct xdr_netobj gr_token; /* token */ +}; + +/* The gss_cl_ctx struct holds all the information the rpcsec_gss client + * code needs to know about a single security context. In particular, + * gc_gss_ctx is the context handle that is used to do gss-api calls, while + * gc_wire_ctx is the context handle that is used to identify the context on + * the wire when communicating with a server. */ + +struct gss_cl_ctx { + refcount_t count; + enum rpc_gss_proc gc_proc; + u32 gc_seq; + spinlock_t gc_seq_lock; + struct gss_ctx *gc_gss_ctx; + struct xdr_netobj gc_wire_ctx; + struct xdr_netobj gc_acceptor; + u32 gc_win; + unsigned long gc_expiry; + struct rcu_head gc_rcu; +}; + +struct gss_upcall_msg; +struct gss_cred { + struct rpc_cred gc_base; + enum rpc_gss_svc gc_service; + struct gss_cl_ctx __rcu *gc_ctx; + struct gss_upcall_msg *gc_upcall; + const char *gc_principal; + unsigned long gc_upcall_timestamp; +}; + +#endif /* __KERNEL__ */ +#endif /* _LINUX_SUNRPC_AUTH_GSS_H */ + diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h new file mode 100644 index 000000000..4397a4824 --- /dev/null +++ b/include/linux/sunrpc/bc_xprt.h @@ -0,0 +1,72 @@ +/****************************************************************************** + +(c) 2008 NetApp. All Rights Reserved. + +NetApp provides this source code under the GPL v2 License. +The GPL v2 license is available at +http://opensource.org/licenses/gpl-license.php. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ + +/* + * Functions to create and manage the backchannel + */ + +#ifndef _LINUX_SUNRPC_BC_XPRT_H +#define _LINUX_SUNRPC_BC_XPRT_H + +#include +#include +#include + +#ifdef CONFIG_SUNRPC_BACKCHANNEL +struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid); +void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied); +void xprt_free_bc_request(struct rpc_rqst *req); +int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs); +void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs); + +/* Socket backchannel transport methods */ +int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs); +void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs); +void xprt_free_bc_rqst(struct rpc_rqst *req); + +/* + * Determine if a shared backchannel is in use + */ +static inline int svc_is_backchannel(const struct svc_rqst *rqstp) +{ + if (rqstp->rq_server->sv_bc_xprt) + return 1; + return 0; +} +#else /* CONFIG_SUNRPC_BACKCHANNEL */ +static inline int xprt_setup_backchannel(struct rpc_xprt *xprt, + unsigned int min_reqs) +{ + return 0; +} + +static inline int svc_is_backchannel(const struct svc_rqst *rqstp) +{ + return 0; +} + +static inline void xprt_free_bc_request(struct rpc_rqst *req) +{ +} +#endif /* CONFIG_SUNRPC_BACKCHANNEL */ +#endif /* _LINUX_SUNRPC_BC_XPRT_H */ + diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h new file mode 100644 index 000000000..40d2822f0 --- /dev/null +++ b/include/linux/sunrpc/cache.h @@ -0,0 +1,302 @@ +/* + * include/linux/sunrpc/cache.h + * + * Generic code for various authentication-related caches + * used by sunrpc clients and servers. + * + * Copyright (C) 2002 Neil Brown + * + * Released under terms in GPL version 2. See COPYING. + * + */ + +#ifndef _LINUX_SUNRPC_CACHE_H_ +#define _LINUX_SUNRPC_CACHE_H_ + +#include +#include +#include +#include + +/* + * Each cache requires: + * - A 'struct cache_detail' which contains information specific to the cache + * for common code to use. + * - An item structure that must contain a "struct cache_head" + * - A lookup function defined using DefineCacheLookup + * - A 'put' function that can release a cache item. It will only + * be called after cache_put has succeed, so there are guarantee + * to be no references. + * - A function to calculate a hash of an item's key. + * + * as well as assorted code fragments (e.g. compare keys) and numbers + * (e.g. hash size, goal_age, etc). + * + * Each cache must be registered so that it can be cleaned regularly. + * When the cache is unregistered, it is flushed completely. + * + * Entries have a ref count and a 'hashed' flag which counts the existence + * in the hash table. + * We only expire entries when refcount is zero. + * Existence in the cache is counted the refcount. + */ + +/* Every cache item has a common header that is used + * for expiring and refreshing entries. + * + */ +struct cache_head { + struct hlist_node cache_list; + time_t expiry_time; /* After time time, don't use the data */ + time_t last_refresh; /* If CACHE_PENDING, this is when upcall was + * sent, else this is when update was + * received, though it is alway set to + * be *after* ->flush_time. + */ + struct kref ref; + unsigned long flags; +}; +#define CACHE_VALID 0 /* Entry contains valid data */ +#define CACHE_NEGATIVE 1 /* Negative entry - there is no match for the key */ +#define CACHE_PENDING 2 /* An upcall has been sent but no reply received yet*/ +#define CACHE_CLEANED 3 /* Entry has been cleaned from cache */ + +#define CACHE_NEW_EXPIRY 120 /* keep new things pending confirmation for 120 seconds */ + +struct cache_detail { + struct module * owner; + int hash_size; + struct hlist_head * hash_table; + rwlock_t hash_lock; + + char *name; + void (*cache_put)(struct kref *); + + int (*cache_upcall)(struct cache_detail *, + struct cache_head *); + + void (*cache_request)(struct cache_detail *cd, + struct cache_head *ch, + char **bpp, int *blen); + + int (*cache_parse)(struct cache_detail *, + char *buf, int len); + + int (*cache_show)(struct seq_file *m, + struct cache_detail *cd, + struct cache_head *h); + void (*warn_no_listener)(struct cache_detail *cd, + int has_died); + + struct cache_head * (*alloc)(void); + int (*match)(struct cache_head *orig, struct cache_head *new); + void (*init)(struct cache_head *orig, struct cache_head *new); + void (*update)(struct cache_head *orig, struct cache_head *new); + + /* fields below this comment are for internal use + * and should not be touched by cache owners + */ + time_t flush_time; /* flush all cache items with + * last_refresh at or earlier + * than this. last_refresh + * is never set at or earlier + * than this. + */ + struct list_head others; + time_t nextcheck; + int entries; + + /* fields for communication over channel */ + struct list_head queue; + + atomic_t readers; /* how many time is /chennel open */ + time_t last_close; /* if no readers, when did last close */ + time_t last_warn; /* when we last warned about no readers */ + + union { + struct proc_dir_entry *procfs; + struct dentry *pipefs; + }; + struct net *net; +}; + + +/* this must be embedded in any request structure that + * identifies an object that will want a callback on + * a cache fill + */ +struct cache_req { + struct cache_deferred_req *(*defer)(struct cache_req *req); + int thread_wait; /* How long (jiffies) we can block the + * current thread to wait for updates. + */ +}; +/* this must be embedded in a deferred_request that is being + * delayed awaiting cache-fill + */ +struct cache_deferred_req { + struct hlist_node hash; /* on hash chain */ + struct list_head recent; /* on fifo */ + struct cache_head *item; /* cache item we wait on */ + void *owner; /* we might need to discard all defered requests + * owned by someone */ + void (*revisit)(struct cache_deferred_req *req, + int too_many); +}; + +/* + * timestamps kept in the cache are expressed in seconds + * since boot. This is the best for measuring differences in + * real time. + */ +static inline time_t seconds_since_boot(void) +{ + struct timespec boot; + getboottime(&boot); + return get_seconds() - boot.tv_sec; +} + +static inline time_t convert_to_wallclock(time_t sinceboot) +{ + struct timespec boot; + getboottime(&boot); + return boot.tv_sec + sinceboot; +} + +extern const struct file_operations cache_file_operations_pipefs; +extern const struct file_operations content_file_operations_pipefs; +extern const struct file_operations cache_flush_operations_pipefs; + +extern struct cache_head * +sunrpc_cache_lookup(struct cache_detail *detail, + struct cache_head *key, int hash); +extern struct cache_head * +sunrpc_cache_update(struct cache_detail *detail, + struct cache_head *new, struct cache_head *old, int hash); + +extern int +sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h); + + +extern void cache_clean_deferred(void *owner); + +static inline struct cache_head *cache_get(struct cache_head *h) +{ + kref_get(&h->ref); + return h; +} + + +static inline void cache_put(struct cache_head *h, struct cache_detail *cd) +{ + if (kref_read(&h->ref) <= 2 && + h->expiry_time < cd->nextcheck) + cd->nextcheck = h->expiry_time; + kref_put(&h->ref, cd->cache_put); +} + +static inline bool cache_is_expired(struct cache_detail *detail, struct cache_head *h) +{ + if (!test_bit(CACHE_VALID, &h->flags)) + return false; + + return (h->expiry_time < seconds_since_boot()) || + (detail->flush_time >= h->last_refresh); +} + +extern int cache_check(struct cache_detail *detail, + struct cache_head *h, struct cache_req *rqstp); +extern void cache_flush(void); +extern void cache_purge(struct cache_detail *detail); +#define NEVER (0x7FFFFFFF) +extern void __init cache_initialize(void); +extern int cache_register_net(struct cache_detail *cd, struct net *net); +extern void cache_unregister_net(struct cache_detail *cd, struct net *net); + +extern struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net); +extern void cache_destroy_net(struct cache_detail *cd, struct net *net); + +extern void sunrpc_init_cache_detail(struct cache_detail *cd); +extern void sunrpc_destroy_cache_detail(struct cache_detail *cd); +extern int sunrpc_cache_register_pipefs(struct dentry *parent, const char *, + umode_t, struct cache_detail *); +extern void sunrpc_cache_unregister_pipefs(struct cache_detail *); +extern void sunrpc_cache_unhash(struct cache_detail *, struct cache_head *); + +/* Must store cache_detail in seq_file->private if using next three functions */ +extern void *cache_seq_start(struct seq_file *file, loff_t *pos); +extern void *cache_seq_next(struct seq_file *file, void *p, loff_t *pos); +extern void cache_seq_stop(struct seq_file *file, void *p); + +extern void qword_add(char **bpp, int *lp, char *str); +extern void qword_addhex(char **bpp, int *lp, char *buf, int blen); +extern int qword_get(char **bpp, char *dest, int bufsize); + +static inline int get_int(char **bpp, int *anint) +{ + char buf[50]; + char *ep; + int rv; + int len = qword_get(bpp, buf, sizeof(buf)); + + if (len < 0) + return -EINVAL; + if (len == 0) + return -ENOENT; + + rv = simple_strtol(buf, &ep, 0); + if (*ep) + return -EINVAL; + + *anint = rv; + return 0; +} + +static inline int get_uint(char **bpp, unsigned int *anint) +{ + char buf[50]; + int len = qword_get(bpp, buf, sizeof(buf)); + + if (len < 0) + return -EINVAL; + if (len == 0) + return -ENOENT; + + if (kstrtouint(buf, 0, anint)) + return -EINVAL; + + return 0; +} + +static inline int get_time(char **bpp, time_t *time) +{ + char buf[50]; + long long ll; + int len = qword_get(bpp, buf, sizeof(buf)); + + if (len < 0) + return -EINVAL; + if (len == 0) + return -ENOENT; + + if (kstrtoll(buf, 0, &ll)) + return -EINVAL; + + *time = (time_t)ll; + return 0; +} + +static inline time_t get_expiry(char **bpp) +{ + time_t rv; + struct timespec boot; + + if (get_time(bpp, &rv)) + return 0; + if (rv < 0) + return 0; + getboottime(&boot); + return rv - boot.tv_sec; +} + +#endif /* _LINUX_SUNRPC_CACHE_H_ */ diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h new file mode 100644 index 000000000..73d5c4a87 --- /dev/null +++ b/include/linux/sunrpc/clnt.h @@ -0,0 +1,229 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/sunrpc/clnt.h + * + * Declarations for the high-level RPC client interface + * + * Copyright (C) 1995, 1996, Olaf Kirch + */ + +#ifndef _LINUX_SUNRPC_CLNT_H +#define _LINUX_SUNRPC_CLNT_H + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct rpc_inode; + +/* + * The high-level client handle + */ +struct rpc_clnt { + atomic_t cl_count; /* Number of references */ + unsigned int cl_clid; /* client id */ + struct list_head cl_clients; /* Global list of clients */ + struct list_head cl_tasks; /* List of tasks */ + spinlock_t cl_lock; /* spinlock */ + struct rpc_xprt __rcu * cl_xprt; /* transport */ + const struct rpc_procinfo *cl_procinfo; /* procedure info */ + u32 cl_prog, /* RPC program number */ + cl_vers, /* RPC version number */ + cl_maxproc; /* max procedure number */ + + struct rpc_auth * cl_auth; /* authenticator */ + struct rpc_stat * cl_stats; /* per-program statistics */ + struct rpc_iostats * cl_metrics; /* per-client statistics */ + + unsigned int cl_softrtry : 1,/* soft timeouts */ + cl_discrtry : 1,/* disconnect before retry */ + cl_noretranstimeo: 1,/* No retransmit timeouts */ + cl_autobind : 1,/* use getport() */ + cl_chatty : 1;/* be verbose */ + + struct rpc_rtt * cl_rtt; /* RTO estimator data */ + const struct rpc_timeout *cl_timeout; /* Timeout strategy */ + + atomic_t cl_swapper; /* swapfile count */ + int cl_nodelen; /* nodename length */ + char cl_nodename[UNX_MAXNODENAME+1]; + struct rpc_pipe_dir_head cl_pipedir_objects; + struct rpc_clnt * cl_parent; /* Points to parent of clones */ + struct rpc_rtt cl_rtt_default; + struct rpc_timeout cl_timeout_default; + const struct rpc_program *cl_program; +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) + struct dentry *cl_debugfs; /* debugfs directory */ +#endif + struct rpc_xprt_iter cl_xpi; +}; + +/* + * General RPC program info + */ +#define RPC_MAXVERSION 4 +struct rpc_program { + const char * name; /* protocol name */ + u32 number; /* program number */ + unsigned int nrvers; /* number of versions */ + const struct rpc_version ** version; /* version array */ + struct rpc_stat * stats; /* statistics */ + const char * pipe_dir_name; /* path to rpc_pipefs dir */ +}; + +struct rpc_version { + u32 number; /* version number */ + unsigned int nrprocs; /* number of procs */ + const struct rpc_procinfo *procs; /* procedure array */ + unsigned int *counts; /* call counts */ +}; + +/* + * Procedure information + */ +struct rpc_procinfo { + u32 p_proc; /* RPC procedure number */ + kxdreproc_t p_encode; /* XDR encode function */ + kxdrdproc_t p_decode; /* XDR decode function */ + unsigned int p_arglen; /* argument hdr length (u32) */ + unsigned int p_replen; /* reply hdr length (u32) */ + unsigned int p_timer; /* Which RTT timer to use */ + u32 p_statidx; /* Which procedure to account */ + const char * p_name; /* name of procedure */ +}; + +#ifdef __KERNEL__ + +struct rpc_create_args { + struct net *net; + int protocol; + struct sockaddr *address; + size_t addrsize; + struct sockaddr *saddress; + const struct rpc_timeout *timeout; + const char *servername; + const char *nodename; + const struct rpc_program *program; + u32 prognumber; /* overrides program->number */ + u32 version; + rpc_authflavor_t authflavor; + unsigned long flags; + char *client_name; + struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ +}; + +struct rpc_add_xprt_test { + int (*add_xprt_test)(struct rpc_clnt *, + struct rpc_xprt *, + void *calldata); + void *data; +}; + +/* Values for "flags" field */ +#define RPC_CLNT_CREATE_HARDRTRY (1UL << 0) +#define RPC_CLNT_CREATE_AUTOBIND (1UL << 2) +#define RPC_CLNT_CREATE_NONPRIVPORT (1UL << 3) +#define RPC_CLNT_CREATE_NOPING (1UL << 4) +#define RPC_CLNT_CREATE_DISCRTRY (1UL << 5) +#define RPC_CLNT_CREATE_QUIET (1UL << 6) +#define RPC_CLNT_CREATE_INFINITE_SLOTS (1UL << 7) +#define RPC_CLNT_CREATE_NO_IDLE_TIMEOUT (1UL << 8) +#define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9) + +struct rpc_clnt *rpc_create(struct rpc_create_args *args); +struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *, + const struct rpc_program *, u32); +struct rpc_clnt *rpc_clone_client(struct rpc_clnt *); +struct rpc_clnt *rpc_clone_client_set_auth(struct rpc_clnt *, + rpc_authflavor_t); +int rpc_switch_client_transport(struct rpc_clnt *, + struct xprt_create *, + const struct rpc_timeout *); + +void rpc_shutdown_client(struct rpc_clnt *); +void rpc_release_client(struct rpc_clnt *); +void rpc_task_release_transport(struct rpc_task *); +void rpc_task_release_client(struct rpc_task *); + +int rpcb_create_local(struct net *); +void rpcb_put_local(struct net *); +int rpcb_register(struct net *, u32, u32, int, unsigned short); +int rpcb_v4_register(struct net *net, const u32 program, + const u32 version, + const struct sockaddr *address, + const char *netid); +void rpcb_getport_async(struct rpc_task *); + +void rpc_call_start(struct rpc_task *); +int rpc_call_async(struct rpc_clnt *clnt, + const struct rpc_message *msg, int flags, + const struct rpc_call_ops *tk_ops, + void *calldata); +int rpc_call_sync(struct rpc_clnt *clnt, + const struct rpc_message *msg, int flags); +struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, + int flags); +int rpc_restart_call_prepare(struct rpc_task *); +int rpc_restart_call(struct rpc_task *); +void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int); +struct net * rpc_net_ns(struct rpc_clnt *); +size_t rpc_max_payload(struct rpc_clnt *); +size_t rpc_max_bc_payload(struct rpc_clnt *); +void rpc_force_rebind(struct rpc_clnt *); +size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t); +const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); +int rpc_localaddr(struct rpc_clnt *, struct sockaddr *, size_t); + +int rpc_clnt_iterate_for_each_xprt(struct rpc_clnt *clnt, + int (*fn)(struct rpc_clnt *, struct rpc_xprt *, void *), + void *data); + +int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt, + struct rpc_xprt_switch *xps, + struct rpc_xprt *xprt, + void *dummy); +int rpc_clnt_add_xprt(struct rpc_clnt *, struct xprt_create *, + int (*setup)(struct rpc_clnt *, + struct rpc_xprt_switch *, + struct rpc_xprt *, + void *), + void *data); +void rpc_set_connect_timeout(struct rpc_clnt *clnt, + unsigned long connect_timeout, + unsigned long reconnect_timeout); + +int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *, + struct rpc_xprt_switch *, + struct rpc_xprt *, + void *); + +const char *rpc_proc_name(const struct rpc_task *task); + +void rpc_clnt_xprt_switch_put(struct rpc_clnt *); +void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *); +bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt, + const struct sockaddr *sap); +void rpc_cleanup_clids(void); + +static inline int rpc_reply_expected(struct rpc_task *task) +{ + return (task->tk_msg.rpc_proc != NULL) && + (task->tk_msg.rpc_proc->p_decode != NULL); +} + +#endif /* __KERNEL__ */ +#endif /* _LINUX_SUNRPC_CLNT_H */ diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h new file mode 100644 index 000000000..f6aeed07f --- /dev/null +++ b/include/linux/sunrpc/debug.h @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/sunrpc/debug.h + * + * Debugging support for sunrpc module + * + * Copyright (C) 1996, Olaf Kirch + */ +#ifndef _LINUX_SUNRPC_DEBUG_H_ +#define _LINUX_SUNRPC_DEBUG_H_ + +#include + +/* + * Debugging macros etc + */ +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) +extern unsigned int rpc_debug; +extern unsigned int nfs_debug; +extern unsigned int nfsd_debug; +extern unsigned int nlm_debug; +#endif + +#define dprintk(fmt, ...) \ + dfprintk(FACILITY, fmt, ##__VA_ARGS__) +#define dprintk_cont(fmt, ...) \ + dfprintk_cont(FACILITY, fmt, ##__VA_ARGS__) +#define dprintk_rcu(fmt, ...) \ + dfprintk_rcu(FACILITY, fmt, ##__VA_ARGS__) +#define dprintk_rcu_cont(fmt, ...) \ + dfprintk_rcu_cont(FACILITY, fmt, ##__VA_ARGS__) + +#undef ifdebug +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) +# define ifdebug(fac) if (unlikely(rpc_debug & RPCDBG_##fac)) + +# define dfprintk(fac, fmt, ...) \ +do { \ + ifdebug(fac) \ + printk(KERN_DEFAULT fmt, ##__VA_ARGS__); \ +} while (0) + +# define dfprintk_cont(fac, fmt, ...) \ +do { \ + ifdebug(fac) \ + printk(KERN_CONT fmt, ##__VA_ARGS__); \ +} while (0) + +# define dfprintk_rcu(fac, fmt, ...) \ +do { \ + ifdebug(fac) { \ + rcu_read_lock(); \ + printk(KERN_DEFAULT fmt, ##__VA_ARGS__); \ + rcu_read_unlock(); \ + } \ +} while (0) + +# define dfprintk_rcu_cont(fac, fmt, ...) \ +do { \ + ifdebug(fac) { \ + rcu_read_lock(); \ + printk(KERN_CONT fmt, ##__VA_ARGS__); \ + rcu_read_unlock(); \ + } \ +} while (0) + +# define RPC_IFDEBUG(x) x +#else +# define ifdebug(fac) if (0) +# define dfprintk(fac, fmt, ...) do {} while (0) +# define dfprintk_cont(fac, fmt, ...) do {} while (0) +# define dfprintk_rcu(fac, fmt, ...) do {} while (0) +# define RPC_IFDEBUG(x) +#endif + +/* + * Sysctl interface for RPC debugging + */ + +struct rpc_clnt; +struct rpc_xprt; + +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) +void rpc_register_sysctl(void); +void rpc_unregister_sysctl(void); +void sunrpc_debugfs_init(void); +void sunrpc_debugfs_exit(void); +void rpc_clnt_debugfs_register(struct rpc_clnt *); +void rpc_clnt_debugfs_unregister(struct rpc_clnt *); +void rpc_xprt_debugfs_register(struct rpc_xprt *); +void rpc_xprt_debugfs_unregister(struct rpc_xprt *); +#else +static inline void +sunrpc_debugfs_init(void) +{ + return; +} + +static inline void +sunrpc_debugfs_exit(void) +{ + return; +} + +static inline void +rpc_clnt_debugfs_register(struct rpc_clnt *clnt) +{ + return; +} + +static inline void +rpc_clnt_debugfs_unregister(struct rpc_clnt *clnt) +{ + return; +} + +static inline void +rpc_xprt_debugfs_register(struct rpc_xprt *xprt) +{ + return; +} + +static inline void +rpc_xprt_debugfs_unregister(struct rpc_xprt *xprt) +{ + return; +} +#endif + +#endif /* _LINUX_SUNRPC_DEBUG_H_ */ diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h new file mode 100644 index 000000000..566d5f547 --- /dev/null +++ b/include/linux/sunrpc/gss_api.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/sunrpc/gss_api.h + * + * Somewhat simplified version of the gss api. + * + * Dug Song + * Andy Adamson + * Bruce Fields + * Copyright (c) 2000 The Regents of the University of Michigan + */ + +#ifndef _LINUX_SUNRPC_GSS_API_H +#define _LINUX_SUNRPC_GSS_API_H + +#ifdef __KERNEL__ +#include +#include +#include + +/* The mechanism-independent gss-api context: */ +struct gss_ctx { + struct gss_api_mech *mech_type; + void *internal_ctx_id; +}; + +#define GSS_C_NO_BUFFER ((struct xdr_netobj) 0) +#define GSS_C_NO_CONTEXT ((struct gss_ctx *) 0) +#define GSS_C_QOP_DEFAULT (0) + +/*XXX arbitrary length - is this set somewhere? */ +#define GSS_OID_MAX_LEN 32 +struct rpcsec_gss_oid { + unsigned int len; + u8 data[GSS_OID_MAX_LEN]; +}; + +/* From RFC 3530 */ +struct rpcsec_gss_info { + struct rpcsec_gss_oid oid; + u32 qop; + u32 service; +}; + +/* gss-api prototypes; note that these are somewhat simplified versions of + * the prototypes specified in RFC 2744. */ +int gss_import_sec_context( + const void* input_token, + size_t bufsize, + struct gss_api_mech *mech, + struct gss_ctx **ctx_id, + time_t *endtime, + gfp_t gfp_mask); +u32 gss_get_mic( + struct gss_ctx *ctx_id, + struct xdr_buf *message, + struct xdr_netobj *mic_token); +u32 gss_verify_mic( + struct gss_ctx *ctx_id, + struct xdr_buf *message, + struct xdr_netobj *mic_token); +u32 gss_wrap( + struct gss_ctx *ctx_id, + int offset, + struct xdr_buf *outbuf, + struct page **inpages); +u32 gss_unwrap( + struct gss_ctx *ctx_id, + int offset, + struct xdr_buf *inbuf); +u32 gss_delete_sec_context( + struct gss_ctx **ctx_id); + +rpc_authflavor_t gss_svc_to_pseudoflavor(struct gss_api_mech *, u32 qop, + u32 service); +u32 gss_pseudoflavor_to_service(struct gss_api_mech *, u32 pseudoflavor); +bool gss_pseudoflavor_to_datatouch(struct gss_api_mech *, u32 pseudoflavor); +char *gss_service_to_auth_domain_name(struct gss_api_mech *, u32 service); + +struct pf_desc { + u32 pseudoflavor; + u32 qop; + u32 service; + char *name; + char *auth_domain_name; + struct auth_domain *domain; + bool datatouch; +}; + +/* Different mechanisms (e.g., krb5 or spkm3) may implement gss-api, and + * mechanisms may be dynamically registered or unregistered by modules. */ + +/* Each mechanism is described by the following struct: */ +struct gss_api_mech { + struct list_head gm_list; + struct module *gm_owner; + struct rpcsec_gss_oid gm_oid; + char *gm_name; + const struct gss_api_ops *gm_ops; + /* pseudoflavors supported by this mechanism: */ + int gm_pf_num; + struct pf_desc * gm_pfs; + /* Should the following be a callback operation instead? */ + const char *gm_upcall_enctypes; +}; + +/* and must provide the following operations: */ +struct gss_api_ops { + int (*gss_import_sec_context)( + const void *input_token, + size_t bufsize, + struct gss_ctx *ctx_id, + time_t *endtime, + gfp_t gfp_mask); + u32 (*gss_get_mic)( + struct gss_ctx *ctx_id, + struct xdr_buf *message, + struct xdr_netobj *mic_token); + u32 (*gss_verify_mic)( + struct gss_ctx *ctx_id, + struct xdr_buf *message, + struct xdr_netobj *mic_token); + u32 (*gss_wrap)( + struct gss_ctx *ctx_id, + int offset, + struct xdr_buf *outbuf, + struct page **inpages); + u32 (*gss_unwrap)( + struct gss_ctx *ctx_id, + int offset, + struct xdr_buf *buf); + void (*gss_delete_sec_context)( + void *internal_ctx_id); +}; + +int gss_mech_register(struct gss_api_mech *); +void gss_mech_unregister(struct gss_api_mech *); + +/* returns a mechanism descriptor given an OID, and increments the mechanism's + * reference count. */ +struct gss_api_mech * gss_mech_get_by_OID(struct rpcsec_gss_oid *); + +/* Given a GSS security tuple, look up a pseudoflavor */ +rpc_authflavor_t gss_mech_info2flavor(struct rpcsec_gss_info *); + +/* Given a pseudoflavor, look up a GSS security tuple */ +int gss_mech_flavor2info(rpc_authflavor_t, struct rpcsec_gss_info *); + +/* Returns a reference to a mechanism, given a name like "krb5" etc. */ +struct gss_api_mech *gss_mech_get_by_name(const char *); + +/* Similar, but get by pseudoflavor. */ +struct gss_api_mech *gss_mech_get_by_pseudoflavor(u32); + +/* Fill in an array with a list of supported pseudoflavors */ +int gss_mech_list_pseudoflavors(rpc_authflavor_t *, int); + +struct gss_api_mech * gss_mech_get(struct gss_api_mech *); + +/* For every successful gss_mech_get or gss_mech_get_by_* call there must be a + * corresponding call to gss_mech_put. */ +void gss_mech_put(struct gss_api_mech *); + +#endif /* __KERNEL__ */ +#endif /* _LINUX_SUNRPC_GSS_API_H */ + diff --git a/include/linux/sunrpc/gss_asn1.h b/include/linux/sunrpc/gss_asn1.h new file mode 100644 index 000000000..3ccecd0ad --- /dev/null +++ b/include/linux/sunrpc/gss_asn1.h @@ -0,0 +1,81 @@ +/* + * linux/include/linux/sunrpc/gss_asn1.h + * + * minimal asn1 for generic encoding/decoding of gss tokens + * + * Adapted from MIT Kerberos 5-1.2.1 lib/include/krb5.h, + * lib/gssapi/krb5/gssapiP_krb5.h, and others + * + * Copyright (c) 2000 The Regents of the University of Michigan. + * All rights reserved. + * + * Andy Adamson + */ + +/* + * Copyright 1995 by the Massachusetts Institute of Technology. + * All Rights Reserved. + * + * Export of this software from the United States of America may + * require a specific license from the United States Government. + * It is the responsibility of any person or organization contemplating + * export to obtain such a license before exporting. + * + * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and + * distribute this software and its documentation for any purpose and + * without fee is hereby granted, provided that the above copyright + * notice appear in all copies and that both that copyright notice and + * this permission notice appear in supporting documentation, and that + * the name of M.I.T. not be used in advertising or publicity pertaining + * to distribution of the software without specific, written prior + * permission. Furthermore if you modify this software you must label + * your software as modified software and not distribute it in such a + * fashion that it might be confused with the original M.I.T. software. + * M.I.T. makes no representations about the suitability of + * this software for any purpose. It is provided "as is" without express + * or implied warranty. + * + */ + + +#include + +#define SIZEOF_INT 4 + +/* from gssapi_err_generic.h */ +#define G_BAD_SERVICE_NAME (-2045022976L) +#define G_BAD_STRING_UID (-2045022975L) +#define G_NOUSER (-2045022974L) +#define G_VALIDATE_FAILED (-2045022973L) +#define G_BUFFER_ALLOC (-2045022972L) +#define G_BAD_MSG_CTX (-2045022971L) +#define G_WRONG_SIZE (-2045022970L) +#define G_BAD_USAGE (-2045022969L) +#define G_UNKNOWN_QOP (-2045022968L) +#define G_NO_HOSTNAME (-2045022967L) +#define G_BAD_HOSTNAME (-2045022966L) +#define G_WRONG_MECH (-2045022965L) +#define G_BAD_TOK_HEADER (-2045022964L) +#define G_BAD_DIRECTION (-2045022963L) +#define G_TOK_TRUNC (-2045022962L) +#define G_REFLECT (-2045022961L) +#define G_WRONG_TOKID (-2045022960L) + +#define g_OID_equal(o1,o2) \ + (((o1)->len == (o2)->len) && \ + (memcmp((o1)->data,(o2)->data,(int) (o1)->len) == 0)) + +u32 g_verify_token_header( + struct xdr_netobj *mech, + int *body_size, + unsigned char **buf_in, + int toksize); + +int g_token_size( + struct xdr_netobj *mech, + unsigned int body_size); + +void g_make_token_header( + struct xdr_netobj *mech, + int body_size, + unsigned char **buf); diff --git a/include/linux/sunrpc/gss_err.h b/include/linux/sunrpc/gss_err.h new file mode 100644 index 000000000..a6807867b --- /dev/null +++ b/include/linux/sunrpc/gss_err.h @@ -0,0 +1,167 @@ +/* + * linux/include/sunrpc/gss_err.h + * + * Adapted from MIT Kerberos 5-1.2.1 include/gssapi/gssapi.h + * + * Copyright (c) 2002 The Regents of the University of Michigan. + * All rights reserved. + * + * Andy Adamson + */ + +/* + * Copyright 1993 by OpenVision Technologies, Inc. + * + * Permission to use, copy, modify, distribute, and sell this software + * and its documentation for any purpose is hereby granted without fee, + * provided that the above copyright notice appears in all copies and + * that both that copyright notice and this permission notice appear in + * supporting documentation, and that the name of OpenVision not be used + * in advertising or publicity pertaining to distribution of the software + * without specific, written prior permission. OpenVision makes no + * representations about the suitability of this software for any + * purpose. It is provided "as is" without express or implied warranty. + * + * OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF + * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR + * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _LINUX_SUNRPC_GSS_ERR_H +#define _LINUX_SUNRPC_GSS_ERR_H + +#ifdef __KERNEL__ + +typedef unsigned int OM_uint32; + +/* + * Flag bits for context-level services. + */ +#define GSS_C_DELEG_FLAG 1 +#define GSS_C_MUTUAL_FLAG 2 +#define GSS_C_REPLAY_FLAG 4 +#define GSS_C_SEQUENCE_FLAG 8 +#define GSS_C_CONF_FLAG 16 +#define GSS_C_INTEG_FLAG 32 +#define GSS_C_ANON_FLAG 64 +#define GSS_C_PROT_READY_FLAG 128 +#define GSS_C_TRANS_FLAG 256 + +/* + * Credential usage options + */ +#define GSS_C_BOTH 0 +#define GSS_C_INITIATE 1 +#define GSS_C_ACCEPT 2 + +/* + * Status code types for gss_display_status + */ +#define GSS_C_GSS_CODE 1 +#define GSS_C_MECH_CODE 2 + + +/* + * Expiration time of 2^32-1 seconds means infinite lifetime for a + * credential or security context + */ +#define GSS_C_INDEFINITE ((OM_uint32) 0xfffffffful) + + +/* Major status codes */ + +#define GSS_S_COMPLETE 0 + +/* + * Some "helper" definitions to make the status code macros obvious. + */ +#define GSS_C_CALLING_ERROR_OFFSET 24 +#define GSS_C_ROUTINE_ERROR_OFFSET 16 +#define GSS_C_SUPPLEMENTARY_OFFSET 0 +#define GSS_C_CALLING_ERROR_MASK ((OM_uint32) 0377ul) +#define GSS_C_ROUTINE_ERROR_MASK ((OM_uint32) 0377ul) +#define GSS_C_SUPPLEMENTARY_MASK ((OM_uint32) 0177777ul) + +/* + * The macros that test status codes for error conditions. Note that the + * GSS_ERROR() macro has changed slightly from the V1 GSSAPI so that it now + * evaluates its argument only once. + */ +#define GSS_CALLING_ERROR(x) \ + ((x) & (GSS_C_CALLING_ERROR_MASK << GSS_C_CALLING_ERROR_OFFSET)) +#define GSS_ROUTINE_ERROR(x) \ + ((x) & (GSS_C_ROUTINE_ERROR_MASK << GSS_C_ROUTINE_ERROR_OFFSET)) +#define GSS_SUPPLEMENTARY_INFO(x) \ + ((x) & (GSS_C_SUPPLEMENTARY_MASK << GSS_C_SUPPLEMENTARY_OFFSET)) +#define GSS_ERROR(x) \ + ((x) & ((GSS_C_CALLING_ERROR_MASK << GSS_C_CALLING_ERROR_OFFSET) | \ + (GSS_C_ROUTINE_ERROR_MASK << GSS_C_ROUTINE_ERROR_OFFSET))) + +/* + * Now the actual status code definitions + */ + +/* + * Calling errors: + */ +#define GSS_S_CALL_INACCESSIBLE_READ \ + (((OM_uint32) 1ul) << GSS_C_CALLING_ERROR_OFFSET) +#define GSS_S_CALL_INACCESSIBLE_WRITE \ + (((OM_uint32) 2ul) << GSS_C_CALLING_ERROR_OFFSET) +#define GSS_S_CALL_BAD_STRUCTURE \ + (((OM_uint32) 3ul) << GSS_C_CALLING_ERROR_OFFSET) + +/* + * Routine errors: + */ +#define GSS_S_BAD_MECH (((OM_uint32) 1ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_BAD_NAME (((OM_uint32) 2ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_BAD_NAMETYPE (((OM_uint32) 3ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_BAD_BINDINGS (((OM_uint32) 4ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_BAD_STATUS (((OM_uint32) 5ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_BAD_SIG (((OM_uint32) 6ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_NO_CRED (((OM_uint32) 7ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_NO_CONTEXT (((OM_uint32) 8ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_DEFECTIVE_TOKEN (((OM_uint32) 9ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_DEFECTIVE_CREDENTIAL \ + (((OM_uint32) 10ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_CREDENTIALS_EXPIRED \ + (((OM_uint32) 11ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_CONTEXT_EXPIRED \ + (((OM_uint32) 12ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_FAILURE (((OM_uint32) 13ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_BAD_QOP (((OM_uint32) 14ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_UNAUTHORIZED (((OM_uint32) 15ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_UNAVAILABLE (((OM_uint32) 16ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_DUPLICATE_ELEMENT \ + (((OM_uint32) 17ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_NAME_NOT_MN \ + (((OM_uint32) 18ul) << GSS_C_ROUTINE_ERROR_OFFSET) + +/* + * Supplementary info bits: + */ +#define GSS_S_CONTINUE_NEEDED (1 << (GSS_C_SUPPLEMENTARY_OFFSET + 0)) +#define GSS_S_DUPLICATE_TOKEN (1 << (GSS_C_SUPPLEMENTARY_OFFSET + 1)) +#define GSS_S_OLD_TOKEN (1 << (GSS_C_SUPPLEMENTARY_OFFSET + 2)) +#define GSS_S_UNSEQ_TOKEN (1 << (GSS_C_SUPPLEMENTARY_OFFSET + 3)) +#define GSS_S_GAP_TOKEN (1 << (GSS_C_SUPPLEMENTARY_OFFSET + 4)) + +/* XXXX these are not part of the GSSAPI C bindings! (but should be) */ + +#define GSS_CALLING_ERROR_FIELD(x) \ + (((x) >> GSS_C_CALLING_ERROR_OFFSET) & GSS_C_CALLING_ERROR_MASK) +#define GSS_ROUTINE_ERROR_FIELD(x) \ + (((x) >> GSS_C_ROUTINE_ERROR_OFFSET) & GSS_C_ROUTINE_ERROR_MASK) +#define GSS_SUPPLEMENTARY_INFO_FIELD(x) \ + (((x) >> GSS_C_SUPPLEMENTARY_OFFSET) & GSS_C_SUPPLEMENTARY_MASK) + +/* XXXX This is a necessary evil until the spec is fixed */ +#define GSS_S_CRED_UNAVAIL GSS_S_FAILURE + +#endif /* __KERNEL__ */ +#endif /* __LINUX_SUNRPC_GSS_ERR_H */ diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h new file mode 100644 index 000000000..7df625d41 --- /dev/null +++ b/include/linux/sunrpc/gss_krb5.h @@ -0,0 +1,331 @@ +/* + * linux/include/linux/sunrpc/gss_krb5_types.h + * + * Adapted from MIT Kerberos 5-1.2.1 lib/include/krb5.h, + * lib/gssapi/krb5/gssapiP_krb5.h, and others + * + * Copyright (c) 2000-2008 The Regents of the University of Michigan. + * All rights reserved. + * + * Andy Adamson + * Bruce Fields + */ + +/* + * Copyright 1995 by the Massachusetts Institute of Technology. + * All Rights Reserved. + * + * Export of this software from the United States of America may + * require a specific license from the United States Government. + * It is the responsibility of any person or organization contemplating + * export to obtain such a license before exporting. + * + * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and + * distribute this software and its documentation for any purpose and + * without fee is hereby granted, provided that the above copyright + * notice appear in all copies and that both that copyright notice and + * this permission notice appear in supporting documentation, and that + * the name of M.I.T. not be used in advertising or publicity pertaining + * to distribution of the software without specific, written prior + * permission. Furthermore if you modify this software you must label + * your software as modified software and not distribute it in such a + * fashion that it might be confused with the original M.I.T. software. + * M.I.T. makes no representations about the suitability of + * this software for any purpose. It is provided "as is" without express + * or implied warranty. + * + */ + +#include +#include +#include +#include + +/* Length of constant used in key derivation */ +#define GSS_KRB5_K5CLENGTH (5) + +/* Maximum key length (in bytes) for the supported crypto algorithms*/ +#define GSS_KRB5_MAX_KEYLEN (32) + +/* Maximum checksum function output for the supported crypto algorithms */ +#define GSS_KRB5_MAX_CKSUM_LEN (20) + +/* Maximum blocksize for the supported crypto algorithms */ +#define GSS_KRB5_MAX_BLOCKSIZE (16) + +struct krb5_ctx; + +struct gss_krb5_enctype { + const u32 etype; /* encryption (key) type */ + const u32 ctype; /* checksum type */ + const char *name; /* "friendly" name */ + const char *encrypt_name; /* crypto encrypt name */ + const char *cksum_name; /* crypto checksum name */ + const u16 signalg; /* signing algorithm */ + const u16 sealalg; /* sealing algorithm */ + const u32 blocksize; /* encryption blocksize */ + const u32 conflen; /* confounder length + (normally the same as + the blocksize) */ + const u32 cksumlength; /* checksum length */ + const u32 keyed_cksum; /* is it a keyed cksum? */ + const u32 keybytes; /* raw key len, in bytes */ + const u32 keylength; /* final key len, in bytes */ + u32 (*encrypt) (struct crypto_skcipher *tfm, + void *iv, void *in, void *out, + int length); /* encryption function */ + u32 (*decrypt) (struct crypto_skcipher *tfm, + void *iv, void *in, void *out, + int length); /* decryption function */ + u32 (*mk_key) (const struct gss_krb5_enctype *gk5e, + struct xdr_netobj *in, + struct xdr_netobj *out); /* complete key generation */ + u32 (*encrypt_v2) (struct krb5_ctx *kctx, u32 offset, + struct xdr_buf *buf, + struct page **pages); /* v2 encryption function */ + u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset, + struct xdr_buf *buf, u32 *headskip, + u32 *tailskip); /* v2 decryption function */ +}; + +/* krb5_ctx flags definitions */ +#define KRB5_CTX_FLAG_INITIATOR 0x00000001 +#define KRB5_CTX_FLAG_CFX 0x00000002 +#define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY 0x00000004 + +struct krb5_ctx { + int initiate; /* 1 = initiating, 0 = accepting */ + u32 enctype; + u32 flags; + const struct gss_krb5_enctype *gk5e; /* enctype-specific info */ + struct crypto_skcipher *enc; + struct crypto_skcipher *seq; + struct crypto_skcipher *acceptor_enc; + struct crypto_skcipher *initiator_enc; + struct crypto_skcipher *acceptor_enc_aux; + struct crypto_skcipher *initiator_enc_aux; + u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */ + u8 cksum[GSS_KRB5_MAX_KEYLEN]; + s32 endtime; + u32 seq_send; + u64 seq_send64; + struct xdr_netobj mech_used; + u8 initiator_sign[GSS_KRB5_MAX_KEYLEN]; + u8 acceptor_sign[GSS_KRB5_MAX_KEYLEN]; + u8 initiator_seal[GSS_KRB5_MAX_KEYLEN]; + u8 acceptor_seal[GSS_KRB5_MAX_KEYLEN]; + u8 initiator_integ[GSS_KRB5_MAX_KEYLEN]; + u8 acceptor_integ[GSS_KRB5_MAX_KEYLEN]; +}; + +extern spinlock_t krb5_seq_lock; + +/* The length of the Kerberos GSS token header */ +#define GSS_KRB5_TOK_HDR_LEN (16) + +#define KG_TOK_MIC_MSG 0x0101 +#define KG_TOK_WRAP_MSG 0x0201 + +#define KG2_TOK_INITIAL 0x0101 +#define KG2_TOK_RESPONSE 0x0202 +#define KG2_TOK_MIC 0x0404 +#define KG2_TOK_WRAP 0x0504 + +#define KG2_TOKEN_FLAG_SENTBYACCEPTOR 0x01 +#define KG2_TOKEN_FLAG_SEALED 0x02 +#define KG2_TOKEN_FLAG_ACCEPTORSUBKEY 0x04 + +#define KG2_RESP_FLAG_ERROR 0x0001 +#define KG2_RESP_FLAG_DELEG_OK 0x0002 + +enum sgn_alg { + SGN_ALG_DES_MAC_MD5 = 0x0000, + SGN_ALG_MD2_5 = 0x0001, + SGN_ALG_DES_MAC = 0x0002, + SGN_ALG_3 = 0x0003, /* not published */ + SGN_ALG_HMAC_MD5 = 0x0011, /* microsoft w2k; no support */ + SGN_ALG_HMAC_SHA1_DES3_KD = 0x0004 +}; +enum seal_alg { + SEAL_ALG_NONE = 0xffff, + SEAL_ALG_DES = 0x0000, + SEAL_ALG_1 = 0x0001, /* not published */ + SEAL_ALG_MICROSOFT_RC4 = 0x0010,/* microsoft w2k; no support */ + SEAL_ALG_DES3KD = 0x0002 +}; + +#define CKSUMTYPE_CRC32 0x0001 +#define CKSUMTYPE_RSA_MD4 0x0002 +#define CKSUMTYPE_RSA_MD4_DES 0x0003 +#define CKSUMTYPE_DESCBC 0x0004 +#define CKSUMTYPE_RSA_MD5 0x0007 +#define CKSUMTYPE_RSA_MD5_DES 0x0008 +#define CKSUMTYPE_NIST_SHA 0x0009 +#define CKSUMTYPE_HMAC_SHA1_DES3 0x000c +#define CKSUMTYPE_HMAC_SHA1_96_AES128 0x000f +#define CKSUMTYPE_HMAC_SHA1_96_AES256 0x0010 +#define CKSUMTYPE_HMAC_MD5_ARCFOUR -138 /* Microsoft md5 hmac cksumtype */ + +/* from gssapi_err_krb5.h */ +#define KG_CCACHE_NOMATCH (39756032L) +#define KG_KEYTAB_NOMATCH (39756033L) +#define KG_TGT_MISSING (39756034L) +#define KG_NO_SUBKEY (39756035L) +#define KG_CONTEXT_ESTABLISHED (39756036L) +#define KG_BAD_SIGN_TYPE (39756037L) +#define KG_BAD_LENGTH (39756038L) +#define KG_CTX_INCOMPLETE (39756039L) +#define KG_CONTEXT (39756040L) +#define KG_CRED (39756041L) +#define KG_ENC_DESC (39756042L) +#define KG_BAD_SEQ (39756043L) +#define KG_EMPTY_CCACHE (39756044L) +#define KG_NO_CTYPES (39756045L) + +/* per Kerberos v5 protocol spec crypto types from the wire. + * these get mapped to linux kernel crypto routines. + */ +#define ENCTYPE_NULL 0x0000 +#define ENCTYPE_DES_CBC_CRC 0x0001 /* DES cbc mode with CRC-32 */ +#define ENCTYPE_DES_CBC_MD4 0x0002 /* DES cbc mode with RSA-MD4 */ +#define ENCTYPE_DES_CBC_MD5 0x0003 /* DES cbc mode with RSA-MD5 */ +#define ENCTYPE_DES_CBC_RAW 0x0004 /* DES cbc mode raw */ +/* XXX deprecated? */ +#define ENCTYPE_DES3_CBC_SHA 0x0005 /* DES-3 cbc mode with NIST-SHA */ +#define ENCTYPE_DES3_CBC_RAW 0x0006 /* DES-3 cbc mode raw */ +#define ENCTYPE_DES_HMAC_SHA1 0x0008 +#define ENCTYPE_DES3_CBC_SHA1 0x0010 +#define ENCTYPE_AES128_CTS_HMAC_SHA1_96 0x0011 +#define ENCTYPE_AES256_CTS_HMAC_SHA1_96 0x0012 +#define ENCTYPE_ARCFOUR_HMAC 0x0017 +#define ENCTYPE_ARCFOUR_HMAC_EXP 0x0018 +#define ENCTYPE_UNKNOWN 0x01ff + +/* + * Constants used for key derivation + */ +/* for 3DES */ +#define KG_USAGE_SEAL (22) +#define KG_USAGE_SIGN (23) +#define KG_USAGE_SEQ (24) + +/* from rfc3961 */ +#define KEY_USAGE_SEED_CHECKSUM (0x99) +#define KEY_USAGE_SEED_ENCRYPTION (0xAA) +#define KEY_USAGE_SEED_INTEGRITY (0x55) + +/* from rfc4121 */ +#define KG_USAGE_ACCEPTOR_SEAL (22) +#define KG_USAGE_ACCEPTOR_SIGN (23) +#define KG_USAGE_INITIATOR_SEAL (24) +#define KG_USAGE_INITIATOR_SIGN (25) + +/* + * This compile-time check verifies that we will not exceed the + * slack space allotted by the client and server auth_gss code + * before they call gss_wrap(). + */ +#define GSS_KRB5_MAX_SLACK_NEEDED \ + (GSS_KRB5_TOK_HDR_LEN /* gss token header */ \ + + GSS_KRB5_MAX_CKSUM_LEN /* gss token checksum */ \ + + GSS_KRB5_MAX_BLOCKSIZE /* confounder */ \ + + GSS_KRB5_MAX_BLOCKSIZE /* possible padding */ \ + + GSS_KRB5_TOK_HDR_LEN /* encrypted hdr in v2 token */\ + + GSS_KRB5_MAX_CKSUM_LEN /* encryption hmac */ \ + + 4 + 4 /* RPC verifier */ \ + + GSS_KRB5_TOK_HDR_LEN \ + + GSS_KRB5_MAX_CKSUM_LEN) + +u32 +make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen, + struct xdr_buf *body, int body_offset, u8 *cksumkey, + unsigned int usage, struct xdr_netobj *cksumout); + +u32 +make_checksum_v2(struct krb5_ctx *, char *header, int hdrlen, + struct xdr_buf *body, int body_offset, u8 *key, + unsigned int usage, struct xdr_netobj *cksum); + +u32 gss_get_mic_kerberos(struct gss_ctx *, struct xdr_buf *, + struct xdr_netobj *); + +u32 gss_verify_mic_kerberos(struct gss_ctx *, struct xdr_buf *, + struct xdr_netobj *); + +u32 +gss_wrap_kerberos(struct gss_ctx *ctx_id, int offset, + struct xdr_buf *outbuf, struct page **pages); + +u32 +gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, + struct xdr_buf *buf); + + +u32 +krb5_encrypt(struct crypto_skcipher *key, + void *iv, void *in, void *out, int length); + +u32 +krb5_decrypt(struct crypto_skcipher *key, + void *iv, void *in, void *out, int length); + +int +gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *outbuf, + int offset, struct page **pages); + +int +gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *inbuf, + int offset); + +s32 +krb5_make_seq_num(struct krb5_ctx *kctx, + struct crypto_skcipher *key, + int direction, + u32 seqnum, unsigned char *cksum, unsigned char *buf); + +s32 +krb5_get_seq_num(struct krb5_ctx *kctx, + unsigned char *cksum, + unsigned char *buf, int *direction, u32 *seqnum); + +int +xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen); + +u32 +krb5_derive_key(const struct gss_krb5_enctype *gk5e, + const struct xdr_netobj *inkey, + struct xdr_netobj *outkey, + const struct xdr_netobj *in_constant, + gfp_t gfp_mask); + +u32 +gss_krb5_des3_make_key(const struct gss_krb5_enctype *gk5e, + struct xdr_netobj *randombits, + struct xdr_netobj *key); + +u32 +gss_krb5_aes_make_key(const struct gss_krb5_enctype *gk5e, + struct xdr_netobj *randombits, + struct xdr_netobj *key); + +u32 +gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset, + struct xdr_buf *buf, + struct page **pages); + +u32 +gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, + struct xdr_buf *buf, u32 *plainoffset, + u32 *plainlen); + +int +krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, + struct crypto_skcipher *cipher, + unsigned char *cksum); + +int +krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, + struct crypto_skcipher *cipher, + s32 seqnum); +void +gss_krb5_make_confounder(char *p, u32 conflen); diff --git a/include/linux/sunrpc/gss_krb5_enctypes.h b/include/linux/sunrpc/gss_krb5_enctypes.h new file mode 100644 index 000000000..ec6234eee --- /dev/null +++ b/include/linux/sunrpc/gss_krb5_enctypes.h @@ -0,0 +1,4 @@ +/* + * Dumb way to share this static piece of information with nfsd + */ +#define KRB5_SUPPORTED_ENCTYPES "18,17,16,23,3,1,2" diff --git a/include/linux/sunrpc/metrics.h b/include/linux/sunrpc/metrics.h new file mode 100644 index 000000000..1b3751327 --- /dev/null +++ b/include/linux/sunrpc/metrics.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/sunrpc/metrics.h + * + * Declarations for RPC client per-operation metrics + * + * Copyright (C) 2005 Chuck Lever + * + * RPC client per-operation statistics provide latency and retry + * information about each type of RPC procedure in a given RPC program. + * These statistics are not for detailed problem diagnosis, but simply + * to indicate whether the problem is local or remote. + * + * These counters are not meant to be human-readable, but are meant to be + * integrated into system monitoring tools such as "sar" and "iostat". As + * such, the counters are sampled by the tools over time, and are never + * zeroed after a file system is mounted. Moving averages can be computed + * by the tools by taking the difference between two instantaneous samples + * and dividing that by the time between the samples. + * + * The counters are maintained in a single array per RPC client, indexed + * by procedure number. There is no need to maintain separate counter + * arrays per-CPU because these counters are always modified behind locks. + */ + +#ifndef _LINUX_SUNRPC_METRICS_H +#define _LINUX_SUNRPC_METRICS_H + +#include +#include +#include + +#define RPC_IOSTATS_VERS "1.0" + +struct rpc_iostats { + spinlock_t om_lock; + + /* + * These counters give an idea about how many request + * transmissions are required, on average, to complete that + * particular procedure. Some procedures may require more + * than one transmission because the server is unresponsive, + * the client is retransmitting too aggressively, or the + * requests are large and the network is congested. + */ + unsigned long om_ops, /* count of operations */ + om_ntrans, /* count of RPC transmissions */ + om_timeouts; /* count of major timeouts */ + + /* + * These count how many bytes are sent and received for a + * given RPC procedure type. This indicates how much load a + * particular procedure is putting on the network. These + * counts include the RPC and ULP headers, and the request + * payload. + */ + unsigned long long om_bytes_sent, /* count of bytes out */ + om_bytes_recv; /* count of bytes in */ + + /* + * The length of time an RPC request waits in queue before + * transmission, the network + server latency of the request, + * and the total time the request spent from init to release + * are measured. + */ + ktime_t om_queue, /* queued for xmit */ + om_rtt, /* RPC RTT */ + om_execute; /* RPC execution */ +} ____cacheline_aligned; + +struct rpc_task; +struct rpc_clnt; + +/* + * EXPORTed functions for managing rpc_iostats structures + */ + +#ifdef CONFIG_PROC_FS + +struct rpc_iostats * rpc_alloc_iostats(struct rpc_clnt *); +void rpc_count_iostats(const struct rpc_task *, + struct rpc_iostats *); +void rpc_count_iostats_metrics(const struct rpc_task *, + struct rpc_iostats *); +void rpc_clnt_show_stats(struct seq_file *, struct rpc_clnt *); +void rpc_free_iostats(struct rpc_iostats *); + +#else /* CONFIG_PROC_FS */ + +static inline struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) { return NULL; } +static inline void rpc_count_iostats(const struct rpc_task *task, + struct rpc_iostats *stats) {} +static inline void rpc_count_iostats_metrics(const struct rpc_task *task, + struct rpc_iostats *stats) +{ +} + +static inline void rpc_clnt_show_stats(struct seq_file *seq, struct rpc_clnt *clnt) {} +static inline void rpc_free_iostats(struct rpc_iostats *stats) {} + +#endif /* CONFIG_PROC_FS */ + +#endif /* _LINUX_SUNRPC_METRICS_H */ diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h new file mode 100644 index 000000000..4722b28ec --- /dev/null +++ b/include/linux/sunrpc/msg_prot.h @@ -0,0 +1,221 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/sunrpc/msg_prot.h + * + * Copyright (C) 1996, Olaf Kirch + */ + +#ifndef _LINUX_SUNRPC_MSGPROT_H_ +#define _LINUX_SUNRPC_MSGPROT_H_ + +#ifdef __KERNEL__ /* user programs should get these from the rpc header files */ + +#define RPC_VERSION 2 + +/* size of an XDR encoding unit in bytes, i.e. 32bit */ +#define XDR_UNIT (4) + +/* spec defines authentication flavor as an unsigned 32 bit integer */ +typedef u32 rpc_authflavor_t; + +enum rpc_auth_flavors { + RPC_AUTH_NULL = 0, + RPC_AUTH_UNIX = 1, + RPC_AUTH_SHORT = 2, + RPC_AUTH_DES = 3, + RPC_AUTH_KRB = 4, + RPC_AUTH_GSS = 6, + RPC_AUTH_MAXFLAVOR = 8, + /* pseudoflavors: */ + RPC_AUTH_GSS_KRB5 = 390003, + RPC_AUTH_GSS_KRB5I = 390004, + RPC_AUTH_GSS_KRB5P = 390005, + RPC_AUTH_GSS_LKEY = 390006, + RPC_AUTH_GSS_LKEYI = 390007, + RPC_AUTH_GSS_LKEYP = 390008, + RPC_AUTH_GSS_SPKM = 390009, + RPC_AUTH_GSS_SPKMI = 390010, + RPC_AUTH_GSS_SPKMP = 390011, +}; + +/* Maximum size (in bytes) of an rpc credential or verifier */ +#define RPC_MAX_AUTH_SIZE (400) + +enum rpc_msg_type { + RPC_CALL = 0, + RPC_REPLY = 1 +}; + +enum rpc_reply_stat { + RPC_MSG_ACCEPTED = 0, + RPC_MSG_DENIED = 1 +}; + +enum rpc_accept_stat { + RPC_SUCCESS = 0, + RPC_PROG_UNAVAIL = 1, + RPC_PROG_MISMATCH = 2, + RPC_PROC_UNAVAIL = 3, + RPC_GARBAGE_ARGS = 4, + RPC_SYSTEM_ERR = 5, + /* internal use only */ + RPC_DROP_REPLY = 60000, +}; + +enum rpc_reject_stat { + RPC_MISMATCH = 0, + RPC_AUTH_ERROR = 1 +}; + +enum rpc_auth_stat { + RPC_AUTH_OK = 0, + RPC_AUTH_BADCRED = 1, + RPC_AUTH_REJECTEDCRED = 2, + RPC_AUTH_BADVERF = 3, + RPC_AUTH_REJECTEDVERF = 4, + RPC_AUTH_TOOWEAK = 5, + /* RPCSEC_GSS errors */ + RPCSEC_GSS_CREDPROBLEM = 13, + RPCSEC_GSS_CTXPROBLEM = 14 +}; + +#define RPC_MAXNETNAMELEN 256 + +/* + * From RFC 1831: + * + * "A record is composed of one or more record fragments. A record + * fragment is a four-byte header followed by 0 to (2**31) - 1 bytes of + * fragment data. The bytes encode an unsigned binary number; as with + * XDR integers, the byte order is from highest to lowest. The number + * encodes two values -- a boolean which indicates whether the fragment + * is the last fragment of the record (bit value 1 implies the fragment + * is the last fragment) and a 31-bit unsigned binary value which is the + * length in bytes of the fragment's data. The boolean value is the + * highest-order bit of the header; the length is the 31 low-order bits. + * (Note that this record specification is NOT in XDR standard form!)" + * + * The Linux RPC client always sends its requests in a single record + * fragment, limiting the maximum payload size for stream transports to + * 2GB. + */ + +typedef __be32 rpc_fraghdr; + +#define RPC_LAST_STREAM_FRAGMENT (1U << 31) +#define RPC_FRAGMENT_SIZE_MASK (~RPC_LAST_STREAM_FRAGMENT) +#define RPC_MAX_FRAGMENT_SIZE ((1U << 31) - 1) + +/* + * RPC call and reply header size as number of 32bit words (verifier + * size computed separately, see below) + */ +#define RPC_CALLHDRSIZE (6) +#define RPC_REPHDRSIZE (4) + + +/* + * Maximum RPC header size, including authentication, + * as number of 32bit words (see RFCs 1831, 1832). + * + * xid 1 xdr unit = 4 bytes + * mtype 1 + * rpc_version 1 + * program 1 + * prog_version 1 + * procedure 1 + * cred { + * flavor 1 + * length 1 + * body 100 xdr units = 400 bytes + * } + * verf { + * flavor 1 + * length 1 + * body 100 xdr units = 400 bytes + * } + * TOTAL 210 xdr units = 840 bytes + */ +#define RPC_MAX_HEADER_WITH_AUTH \ + (RPC_CALLHDRSIZE + 2*(2+RPC_MAX_AUTH_SIZE/4)) + +#define RPC_MAX_REPHEADER_WITH_AUTH \ + (RPC_REPHDRSIZE + (2 + RPC_MAX_AUTH_SIZE/4)) + +/* + * Well-known netids. See: + * + * http://www.iana.org/assignments/rpc-netids/rpc-netids.xhtml + */ +#define RPCBIND_NETID_UDP "udp" +#define RPCBIND_NETID_TCP "tcp" +#define RPCBIND_NETID_RDMA "rdma" +#define RPCBIND_NETID_SCTP "sctp" +#define RPCBIND_NETID_UDP6 "udp6" +#define RPCBIND_NETID_TCP6 "tcp6" +#define RPCBIND_NETID_RDMA6 "rdma6" +#define RPCBIND_NETID_SCTP6 "sctp6" +#define RPCBIND_NETID_LOCAL "local" + +/* + * Note that RFC 1833 does not put any size restrictions on the + * netid string, but all currently defined netid's fit in 5 bytes. + */ +#define RPCBIND_MAXNETIDLEN (5u) + +/* + * Universal addresses are introduced in RFC 1833 and further spelled + * out in RFC 3530. RPCBIND_MAXUADDRLEN defines a maximum byte length + * of a universal address for use in allocating buffers and character + * arrays. + * + * Quoting RFC 3530, section 2.2: + * + * For TCP over IPv4 and for UDP over IPv4, the format of r_addr is the + * US-ASCII string: + * + * h1.h2.h3.h4.p1.p2 + * + * The prefix, "h1.h2.h3.h4", is the standard textual form for + * representing an IPv4 address, which is always four octets long. + * Assuming big-endian ordering, h1, h2, h3, and h4, are respectively, + * the first through fourth octets each converted to ASCII-decimal. + * Assuming big-endian ordering, p1 and p2 are, respectively, the first + * and second octets each converted to ASCII-decimal. For example, if a + * host, in big-endian order, has an address of 0x0A010307 and there is + * a service listening on, in big endian order, port 0x020F (decimal + * 527), then the complete universal address is "10.1.3.7.2.15". + * + * ... + * + * For TCP over IPv6 and for UDP over IPv6, the format of r_addr is the + * US-ASCII string: + * + * x1:x2:x3:x4:x5:x6:x7:x8.p1.p2 + * + * The suffix "p1.p2" is the service port, and is computed the same way + * as with universal addresses for TCP and UDP over IPv4. The prefix, + * "x1:x2:x3:x4:x5:x6:x7:x8", is the standard textual form for + * representing an IPv6 address as defined in Section 2.2 of [RFC2373]. + * Additionally, the two alternative forms specified in Section 2.2 of + * [RFC2373] are also acceptable. + */ + +#include + +/* Maximum size of the port number part of a universal address */ +#define RPCBIND_MAXUADDRPLEN sizeof(".255.255") + +/* Maximum size of an IPv4 universal address */ +#define RPCBIND_MAXUADDR4LEN \ + (INET_ADDRSTRLEN + RPCBIND_MAXUADDRPLEN) + +/* Maximum size of an IPv6 universal address */ +#define RPCBIND_MAXUADDR6LEN \ + (INET6_ADDRSTRLEN + RPCBIND_MAXUADDRPLEN) + +/* Assume INET6_ADDRSTRLEN will always be larger than INET_ADDRSTRLEN... */ +#define RPCBIND_MAXUADDRLEN RPCBIND_MAXUADDR6LEN + +#endif /* __KERNEL__ */ +#endif /* _LINUX_SUNRPC_MSGPROT_H_ */ diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h new file mode 100644 index 000000000..e90b9bd99 --- /dev/null +++ b/include/linux/sunrpc/rpc_pipe_fs.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SUNRPC_RPC_PIPE_FS_H +#define _LINUX_SUNRPC_RPC_PIPE_FS_H + +#ifdef __KERNEL__ + +#include + +struct rpc_pipe_dir_head { + struct list_head pdh_entries; + struct dentry *pdh_dentry; +}; + +struct rpc_pipe_dir_object_ops; +struct rpc_pipe_dir_object { + struct list_head pdo_head; + const struct rpc_pipe_dir_object_ops *pdo_ops; + + void *pdo_data; +}; + +struct rpc_pipe_dir_object_ops { + int (*create)(struct dentry *dir, + struct rpc_pipe_dir_object *pdo); + void (*destroy)(struct dentry *dir, + struct rpc_pipe_dir_object *pdo); +}; + +struct rpc_pipe_msg { + struct list_head list; + void *data; + size_t len; + size_t copied; + int errno; +}; + +struct rpc_pipe_ops { + ssize_t (*upcall)(struct file *, struct rpc_pipe_msg *, char __user *, size_t); + ssize_t (*downcall)(struct file *, const char __user *, size_t); + void (*release_pipe)(struct inode *); + int (*open_pipe)(struct inode *); + void (*destroy_msg)(struct rpc_pipe_msg *); +}; + +struct rpc_pipe { + struct list_head pipe; + struct list_head in_upcall; + struct list_head in_downcall; + int pipelen; + int nreaders; + int nwriters; +#define RPC_PIPE_WAIT_FOR_OPEN 1 + int flags; + struct delayed_work queue_timeout; + const struct rpc_pipe_ops *ops; + spinlock_t lock; + struct dentry *dentry; +}; + +struct rpc_inode { + struct inode vfs_inode; + void *private; + struct rpc_pipe *pipe; + wait_queue_head_t waitq; +}; + +static inline struct rpc_inode * +RPC_I(struct inode *inode) +{ + return container_of(inode, struct rpc_inode, vfs_inode); +} + +enum { + SUNRPC_PIPEFS_NFS_PRIO, + SUNRPC_PIPEFS_RPC_PRIO, +}; + +extern int rpc_pipefs_notifier_register(struct notifier_block *); +extern void rpc_pipefs_notifier_unregister(struct notifier_block *); + +enum { + RPC_PIPEFS_MOUNT, + RPC_PIPEFS_UMOUNT, +}; + +extern struct dentry *rpc_d_lookup_sb(const struct super_block *sb, + const unsigned char *dir_name); +extern int rpc_pipefs_init_net(struct net *net); +extern void rpc_pipefs_exit_net(struct net *net); +extern struct super_block *rpc_get_sb_net(const struct net *net); +extern void rpc_put_sb_net(const struct net *net); + +extern ssize_t rpc_pipe_generic_upcall(struct file *, struct rpc_pipe_msg *, + char __user *, size_t); +extern int rpc_queue_upcall(struct rpc_pipe *, struct rpc_pipe_msg *); + +struct rpc_clnt; +extern struct dentry *rpc_create_client_dir(struct dentry *, const char *, struct rpc_clnt *); +extern int rpc_remove_client_dir(struct rpc_clnt *); + +extern void rpc_init_pipe_dir_head(struct rpc_pipe_dir_head *pdh); +extern void rpc_init_pipe_dir_object(struct rpc_pipe_dir_object *pdo, + const struct rpc_pipe_dir_object_ops *pdo_ops, + void *pdo_data); +extern int rpc_add_pipe_dir_object(struct net *net, + struct rpc_pipe_dir_head *pdh, + struct rpc_pipe_dir_object *pdo); +extern void rpc_remove_pipe_dir_object(struct net *net, + struct rpc_pipe_dir_head *pdh, + struct rpc_pipe_dir_object *pdo); +extern struct rpc_pipe_dir_object *rpc_find_or_alloc_pipe_dir_object( + struct net *net, + struct rpc_pipe_dir_head *pdh, + int (*match)(struct rpc_pipe_dir_object *, void *), + struct rpc_pipe_dir_object *(*alloc)(void *), + void *data); + +struct cache_detail; +extern struct dentry *rpc_create_cache_dir(struct dentry *, + const char *, + umode_t umode, + struct cache_detail *); +extern void rpc_remove_cache_dir(struct dentry *); + +struct rpc_pipe *rpc_mkpipe_data(const struct rpc_pipe_ops *ops, int flags); +void rpc_destroy_pipe_data(struct rpc_pipe *pipe); +extern struct dentry *rpc_mkpipe_dentry(struct dentry *, const char *, void *, + struct rpc_pipe *); +extern int rpc_unlink(struct dentry *); +extern int register_rpc_pipefs(void); +extern void unregister_rpc_pipefs(void); + +extern bool gssd_running(struct net *net); + +#endif +#endif diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h new file mode 100644 index 000000000..92d182fd8 --- /dev/null +++ b/include/linux/sunrpc/rpc_rdma.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (c) 2015-2017 Oracle. All rights reserved. + * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the BSD-type + * license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * Neither the name of the Network Appliance, Inc. nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _LINUX_SUNRPC_RPC_RDMA_H +#define _LINUX_SUNRPC_RPC_RDMA_H + +#include +#include + +#define RPCRDMA_VERSION 1 +#define rpcrdma_version cpu_to_be32(RPCRDMA_VERSION) + +enum { + RPCRDMA_V1_DEF_INLINE_SIZE = 1024, +}; + +/* + * XDR sizes, in quads + */ +enum { + rpcrdma_fixed_maxsz = 4, + rpcrdma_segment_maxsz = 4, + rpcrdma_readchunk_maxsz = 2 + rpcrdma_segment_maxsz, +}; + +/* + * Smallest RPC/RDMA header: rm_xid through rm_type, then rm_nochunks + */ +#define RPCRDMA_HDRLEN_MIN (sizeof(__be32) * 7) +#define RPCRDMA_HDRLEN_ERR (sizeof(__be32) * 5) + +enum rpcrdma_errcode { + ERR_VERS = 1, + ERR_CHUNK = 2 +}; + +enum rpcrdma_proc { + RDMA_MSG = 0, /* An RPC call or reply msg */ + RDMA_NOMSG = 1, /* An RPC call or reply msg - separate body */ + RDMA_MSGP = 2, /* An RPC call or reply msg with padding */ + RDMA_DONE = 3, /* Client signals reply completion */ + RDMA_ERROR = 4 /* An RPC RDMA encoding error */ +}; + +#define rdma_msg cpu_to_be32(RDMA_MSG) +#define rdma_nomsg cpu_to_be32(RDMA_NOMSG) +#define rdma_msgp cpu_to_be32(RDMA_MSGP) +#define rdma_done cpu_to_be32(RDMA_DONE) +#define rdma_error cpu_to_be32(RDMA_ERROR) + +#define err_vers cpu_to_be32(ERR_VERS) +#define err_chunk cpu_to_be32(ERR_CHUNK) + +/* + * Private extension to RPC-over-RDMA Version One. + * Message passed during RDMA-CM connection set-up. + * + * Add new fields at the end, and don't permute existing + * fields. + */ +struct rpcrdma_connect_private { + __be32 cp_magic; + u8 cp_version; + u8 cp_flags; + u8 cp_send_size; + u8 cp_recv_size; +} __packed; + +#define rpcrdma_cmp_magic __cpu_to_be32(0xf6ab0e18) + +enum { + RPCRDMA_CMP_VERSION = 1, + RPCRDMA_CMP_F_SND_W_INV_OK = BIT(0), +}; + +static inline u8 +rpcrdma_encode_buffer_size(unsigned int size) +{ + return (size >> 10) - 1; +} + +static inline unsigned int +rpcrdma_decode_buffer_size(u8 val) +{ + return ((unsigned int)val + 1) << 10; +} + +#endif /* _LINUX_SUNRPC_RPC_RDMA_H */ diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h new file mode 100644 index 000000000..ad2e243f3 --- /dev/null +++ b/include/linux/sunrpc/sched.h @@ -0,0 +1,300 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/sunrpc/sched.h + * + * Scheduling primitives for kernel Sun RPC. + * + * Copyright (C) 1996, Olaf Kirch + */ + +#ifndef _LINUX_SUNRPC_SCHED_H_ +#define _LINUX_SUNRPC_SCHED_H_ + +#include +#include +#include +#include +#include +#include +#include + +/* + * This is the actual RPC procedure call info. + */ +struct rpc_procinfo; +struct rpc_message { + const struct rpc_procinfo *rpc_proc; /* Procedure information */ + void * rpc_argp; /* Arguments */ + void * rpc_resp; /* Result */ + struct rpc_cred * rpc_cred; /* Credentials */ +}; + +struct rpc_call_ops; +struct rpc_wait_queue; +struct rpc_wait { + struct list_head list; /* wait queue links */ + struct list_head links; /* Links to related tasks */ + struct list_head timer_list; /* Timer list */ + unsigned long expires; +}; + +/* + * This is the RPC task struct + */ +struct rpc_task { + atomic_t tk_count; /* Reference count */ + int tk_status; /* result of last operation */ + struct list_head tk_task; /* global list of tasks */ + + /* + * callback to be executed after waking up + * action next procedure for async tasks + */ + void (*tk_callback)(struct rpc_task *); + void (*tk_action)(struct rpc_task *); + + unsigned long tk_timeout; /* timeout for rpc_sleep() */ + unsigned long tk_runstate; /* Task run status */ + + struct rpc_wait_queue *tk_waitqueue; /* RPC wait queue we're on */ + union { + struct work_struct tk_work; /* Async task work queue */ + struct rpc_wait tk_wait; /* RPC wait */ + } u; + + /* + * RPC call state + */ + struct rpc_message tk_msg; /* RPC call info */ + void * tk_calldata; /* Caller private data */ + const struct rpc_call_ops *tk_ops; /* Caller callbacks */ + + struct rpc_clnt * tk_client; /* RPC client */ + struct rpc_xprt * tk_xprt; /* Transport */ + + struct rpc_rqst * tk_rqstp; /* RPC request */ + + struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could + * be any workqueue + */ + ktime_t tk_start; /* RPC task init timestamp */ + + pid_t tk_owner; /* Process id for batching tasks */ + unsigned short tk_flags; /* misc flags */ + unsigned short tk_timeouts; /* maj timeouts */ + +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) + unsigned short tk_pid; /* debugging aid */ +#endif + unsigned char tk_priority : 2,/* Task priority */ + tk_garb_retry : 2, + tk_cred_retry : 2, + tk_rebind_retry : 2; +}; + +typedef void (*rpc_action)(struct rpc_task *); + +struct rpc_call_ops { + void (*rpc_call_prepare)(struct rpc_task *, void *); + void (*rpc_call_done)(struct rpc_task *, void *); + void (*rpc_count_stats)(struct rpc_task *, void *); + void (*rpc_release)(void *); +}; + +struct rpc_task_setup { + struct rpc_task *task; + struct rpc_clnt *rpc_client; + struct rpc_xprt *rpc_xprt; + const struct rpc_message *rpc_message; + const struct rpc_call_ops *callback_ops; + void *callback_data; + struct workqueue_struct *workqueue; + unsigned short flags; + signed char priority; +}; + +/* + * RPC task flags + */ +#define RPC_TASK_ASYNC 0x0001 /* is an async task */ +#define RPC_TASK_SWAPPER 0x0002 /* is swapping in/out */ +#define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */ +#define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */ +#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */ +#define RPC_TASK_KILLED 0x0100 /* task was killed */ +#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */ +#define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */ +#define RPC_TASK_SENT 0x0800 /* message was sent */ +#define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */ +#define RPC_TASK_NOCONNECT 0x2000 /* return ENOTCONN if not connected */ +#define RPC_TASK_NO_RETRANS_TIMEOUT 0x4000 /* wait forever for a reply */ + +#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC) +#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER) +#define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS) +#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED) +#define RPC_IS_SOFT(t) ((t)->tk_flags & (RPC_TASK_SOFT|RPC_TASK_TIMEOUT)) +#define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN) +#define RPC_WAS_SENT(t) ((t)->tk_flags & RPC_TASK_SENT) + +#define RPC_TASK_RUNNING 0 +#define RPC_TASK_QUEUED 1 +#define RPC_TASK_ACTIVE 2 +#define RPC_TASK_MSG_RECV 3 +#define RPC_TASK_MSG_RECV_WAIT 4 + +#define RPC_IS_RUNNING(t) test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) +#define rpc_set_running(t) set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) +#define rpc_test_and_set_running(t) \ + test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) +#define rpc_clear_running(t) \ + do { \ + smp_mb__before_atomic(); \ + clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate); \ + smp_mb__after_atomic(); \ + } while (0) + +#define RPC_IS_QUEUED(t) test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate) +#define rpc_set_queued(t) set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate) +#define rpc_clear_queued(t) \ + do { \ + smp_mb__before_atomic(); \ + clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate); \ + smp_mb__after_atomic(); \ + } while (0) + +#define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate) + +/* + * Task priorities. + * Note: if you change these, you must also change + * the task initialization definitions below. + */ +#define RPC_PRIORITY_LOW (-1) +#define RPC_PRIORITY_NORMAL (0) +#define RPC_PRIORITY_HIGH (1) +#define RPC_PRIORITY_PRIVILEGED (2) +#define RPC_NR_PRIORITY (1 + RPC_PRIORITY_PRIVILEGED - RPC_PRIORITY_LOW) + +struct rpc_timer { + struct timer_list timer; + struct list_head list; + unsigned long expires; +}; + +/* + * RPC synchronization objects + */ +struct rpc_wait_queue { + spinlock_t lock; + struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */ + unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */ + unsigned char priority; /* current priority */ + unsigned char nr; /* # tasks remaining for cookie */ + unsigned short qlen; /* total # tasks waiting in queue */ + struct rpc_timer timer_list; +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) + const char * name; +#endif +}; + +/* + * This is the # requests to send consecutively + * from a single cookie. The aim is to improve + * performance of NFS operations such as read/write. + */ +#define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0) + +/* + * Function prototypes + */ +struct rpc_task *rpc_new_task(const struct rpc_task_setup *); +struct rpc_task *rpc_run_task(const struct rpc_task_setup *); +struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req); +void rpc_put_task(struct rpc_task *); +void rpc_put_task_async(struct rpc_task *); +void rpc_exit_task(struct rpc_task *); +void rpc_exit(struct rpc_task *, int); +void rpc_release_calldata(const struct rpc_call_ops *, void *); +void rpc_killall_tasks(struct rpc_clnt *); +void rpc_execute(struct rpc_task *); +void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *); +void rpc_init_wait_queue(struct rpc_wait_queue *, const char *); +void rpc_destroy_wait_queue(struct rpc_wait_queue *); +void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *, + rpc_action action); +void rpc_sleep_on_priority(struct rpc_wait_queue *, + struct rpc_task *, + rpc_action action, + int priority); +void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq, + struct rpc_wait_queue *queue, + struct rpc_task *task); +void rpc_wake_up_queued_task(struct rpc_wait_queue *, + struct rpc_task *); +void rpc_wake_up(struct rpc_wait_queue *); +struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *); +struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq, + struct rpc_wait_queue *, + bool (*)(struct rpc_task *, void *), + void *); +struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *, + bool (*)(struct rpc_task *, void *), + void *); +void rpc_wake_up_status(struct rpc_wait_queue *, int); +void rpc_delay(struct rpc_task *, unsigned long); +int rpc_malloc(struct rpc_task *); +void rpc_free(struct rpc_task *); +int rpciod_up(void); +void rpciod_down(void); +int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *); +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) +struct net; +void rpc_show_tasks(struct net *); +#endif +int rpc_init_mempool(void); +void rpc_destroy_mempool(void); +extern struct workqueue_struct *rpciod_workqueue; +extern struct workqueue_struct *xprtiod_workqueue; +void rpc_prepare_task(struct rpc_task *task); + +static inline int rpc_wait_for_completion_task(struct rpc_task *task) +{ + return __rpc_wait_for_completion_task(task, NULL); +} + +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) +static inline const char * rpc_qname(const struct rpc_wait_queue *q) +{ + return ((q && q->name) ? q->name : "unknown"); +} + +static inline void rpc_assign_waitqueue_name(struct rpc_wait_queue *q, + const char *name) +{ + q->name = name; +} +#else +static inline void rpc_assign_waitqueue_name(struct rpc_wait_queue *q, + const char *name) +{ +} +#endif + +#if IS_ENABLED(CONFIG_SUNRPC_SWAP) +int rpc_clnt_swap_activate(struct rpc_clnt *clnt); +void rpc_clnt_swap_deactivate(struct rpc_clnt *clnt); +#else +static inline int +rpc_clnt_swap_activate(struct rpc_clnt *clnt) +{ + return -EINVAL; +} + +static inline void +rpc_clnt_swap_deactivate(struct rpc_clnt *clnt) +{ +} +#endif /* CONFIG_SUNRPC_SWAP */ + +#endif /* _LINUX_SUNRPC_SCHED_H_ */ diff --git a/include/linux/sunrpc/stats.h b/include/linux/sunrpc/stats.h new file mode 100644 index 000000000..84b92b4ad --- /dev/null +++ b/include/linux/sunrpc/stats.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/sunrpc/stats.h + * + * Client statistics collection for SUN RPC + * + * Copyright (C) 1996 Olaf Kirch + */ + +#ifndef _LINUX_SUNRPC_STATS_H +#define _LINUX_SUNRPC_STATS_H + +#include + +struct rpc_stat { + const struct rpc_program *program; + + unsigned int netcnt, + netudpcnt, + nettcpcnt, + nettcpconn, + netreconn; + unsigned int rpccnt, + rpcretrans, + rpcauthrefresh, + rpcgarbage; +}; + +struct svc_stat { + struct svc_program * program; + + unsigned int netcnt, + netudpcnt, + nettcpcnt, + nettcpconn; + unsigned int rpccnt, + rpcbadfmt, + rpcbadauth, + rpcbadclnt; +}; + +struct net; +#ifdef CONFIG_PROC_FS +int rpc_proc_init(struct net *); +void rpc_proc_exit(struct net *); +#else +static inline int rpc_proc_init(struct net *net) +{ + return 0; +} + +static inline void rpc_proc_exit(struct net *net) +{ +} +#endif + +#ifdef MODULE +void rpc_modcount(struct inode *, int); +#endif + +#ifdef CONFIG_PROC_FS +struct proc_dir_entry * rpc_proc_register(struct net *,struct rpc_stat *); +void rpc_proc_unregister(struct net *,const char *); +void rpc_proc_zero(const struct rpc_program *); +struct proc_dir_entry * svc_proc_register(struct net *, struct svc_stat *, + const struct file_operations *); +void svc_proc_unregister(struct net *, const char *); + +void svc_seq_show(struct seq_file *, + const struct svc_stat *); +#else + +static inline struct proc_dir_entry *rpc_proc_register(struct net *net, struct rpc_stat *s) { return NULL; } +static inline void rpc_proc_unregister(struct net *net, const char *p) {} +static inline void rpc_proc_zero(const struct rpc_program *p) {} + +static inline struct proc_dir_entry *svc_proc_register(struct net *net, struct svc_stat *s, + const struct file_operations *f) { return NULL; } +static inline void svc_proc_unregister(struct net *net, const char *p) {} + +static inline void svc_seq_show(struct seq_file *seq, + const struct svc_stat *st) {} +#endif + +#endif /* _LINUX_SUNRPC_STATS_H */ diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h new file mode 100644 index 000000000..c46abf35c --- /dev/null +++ b/include/linux/sunrpc/svc.h @@ -0,0 +1,524 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/sunrpc/svc.h + * + * RPC server declarations. + * + * Copyright (C) 1995, 1996 Olaf Kirch + */ + + +#ifndef SUNRPC_SVC_H +#define SUNRPC_SVC_H + +#include +#include +#include +#include +#include +#include +#include +#include + +/* statistics for svc_pool structures */ +struct svc_pool_stats { + atomic_long_t packets; + unsigned long sockets_queued; + atomic_long_t threads_woken; + atomic_long_t threads_timedout; +}; + +/* + * + * RPC service thread pool. + * + * Pool of threads and temporary sockets. Generally there is only + * a single one of these per RPC service, but on NUMA machines those + * services that can benefit from it (i.e. nfs but not lockd) will + * have one pool per NUMA node. This optimisation reduces cross- + * node traffic on multi-node NUMA NFS servers. + */ +struct svc_pool { + unsigned int sp_id; /* pool id; also node id on NUMA */ + spinlock_t sp_lock; /* protects all fields */ + struct list_head sp_sockets; /* pending sockets */ + unsigned int sp_nrthreads; /* # of threads in pool */ + struct list_head sp_all_threads; /* all server threads */ + struct svc_pool_stats sp_stats; /* statistics on pool operation */ +#define SP_TASK_PENDING (0) /* still work to do even if no + * xprt is queued. */ +#define SP_CONGESTED (1) + unsigned long sp_flags; +} ____cacheline_aligned_in_smp; + +struct svc_serv; + +struct svc_serv_ops { + /* Callback to use when last thread exits. */ + void (*svo_shutdown)(struct svc_serv *, struct net *); + + /* function for service threads to run */ + int (*svo_function)(void *); + + /* queue up a transport for servicing */ + void (*svo_enqueue_xprt)(struct svc_xprt *); + + /* set up thread (or whatever) execution context */ + int (*svo_setup)(struct svc_serv *, struct svc_pool *, int); + + /* optional module to count when adding threads (pooled svcs only) */ + struct module *svo_module; +}; + +/* + * RPC service. + * + * An RPC service is a ``daemon,'' possibly multithreaded, which + * receives and processes incoming RPC messages. + * It has one or more transport sockets associated with it, and maintains + * a list of idle threads waiting for input. + * + * We currently do not support more than one RPC program per daemon. + */ +struct svc_serv { + struct svc_program * sv_program; /* RPC program */ + struct svc_stat * sv_stats; /* RPC statistics */ + spinlock_t sv_lock; + unsigned int sv_nrthreads; /* # of server threads */ + unsigned int sv_maxconn; /* max connections allowed or + * '0' causing max to be based + * on number of threads. */ + + unsigned int sv_max_payload; /* datagram payload size */ + unsigned int sv_max_mesg; /* max_payload + 1 page for overheads */ + unsigned int sv_xdrsize; /* XDR buffer size */ + struct list_head sv_permsocks; /* all permanent sockets */ + struct list_head sv_tempsocks; /* all temporary sockets */ + int sv_tmpcnt; /* count of temporary sockets */ + struct timer_list sv_temptimer; /* timer for aging temporary sockets */ + + char * sv_name; /* service name */ + + unsigned int sv_nrpools; /* number of thread pools */ + struct svc_pool * sv_pools; /* array of thread pools */ + const struct svc_serv_ops *sv_ops; /* server operations */ +#if defined(CONFIG_SUNRPC_BACKCHANNEL) + struct list_head sv_cb_list; /* queue for callback requests + * that arrive over the same + * connection */ + spinlock_t sv_cb_lock; /* protects the svc_cb_list */ + wait_queue_head_t sv_cb_waitq; /* sleep here if there are no + * entries in the svc_cb_list */ + struct svc_xprt *sv_bc_xprt; /* callback on fore channel */ +#endif /* CONFIG_SUNRPC_BACKCHANNEL */ +}; + +/* + * We use sv_nrthreads as a reference count. svc_destroy() drops + * this refcount, so we need to bump it up around operations that + * change the number of threads. Horrible, but there it is. + * Should be called with the "service mutex" held. + */ +static inline void svc_get(struct svc_serv *serv) +{ + serv->sv_nrthreads++; +} + +/* + * Maximum payload size supported by a kernel RPC server. + * This is use to determine the max number of pages nfsd is + * willing to return in a single READ operation. + * + * These happen to all be powers of 2, which is not strictly + * necessary but helps enforce the real limitation, which is + * that they should be multiples of PAGE_SIZE. + * + * For UDP transports, a block plus NFS,RPC, and UDP headers + * has to fit into the IP datagram limit of 64K. The largest + * feasible number for all known page sizes is probably 48K, + * but we choose 32K here. This is the same as the historical + * Linux limit; someone who cares more about NFS/UDP performance + * can test a larger number. + * + * For TCP transports we have more freedom. A size of 1MB is + * chosen to match the client limit. Other OSes are known to + * have larger limits, but those numbers are probably beyond + * the point of diminishing returns. + */ +#define RPCSVC_MAXPAYLOAD (1*1024*1024u) +#define RPCSVC_MAXPAYLOAD_TCP RPCSVC_MAXPAYLOAD +#define RPCSVC_MAXPAYLOAD_UDP (32*1024u) + +extern u32 svc_max_payload(const struct svc_rqst *rqstp); + +/* + * RPC Requsts and replies are stored in one or more pages. + * We maintain an array of pages for each server thread. + * Requests are copied into these pages as they arrive. Remaining + * pages are available to write the reply into. + * + * Pages are sent using ->sendpage so each server thread needs to + * allocate more to replace those used in sending. To help keep track + * of these pages we have a receive list where all pages initialy live, + * and a send list where pages are moved to when there are to be part + * of a reply. + * + * We use xdr_buf for holding responses as it fits well with NFS + * read responses (that have a header, and some data pages, and possibly + * a tail) and means we can share some client side routines. + * + * The xdr_buf.head kvec always points to the first page in the rq_*pages + * list. The xdr_buf.pages pointer points to the second page on that + * list. xdr_buf.tail points to the end of the first page. + * This assumes that the non-page part of an rpc reply will fit + * in a page - NFSd ensures this. lockd also has no trouble. + * + * Each request/reply pair can have at most one "payload", plus two pages, + * one for the request, and one for the reply. + * We using ->sendfile to return read data, we might need one extra page + * if the request is not page-aligned. So add another '1'. + */ +#define RPCSVC_MAXPAGES ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE \ + + 2 + 1) + +static inline u32 svc_getnl(struct kvec *iov) +{ + __be32 val, *vp; + vp = iov->iov_base; + val = *vp++; + iov->iov_base = (void*)vp; + iov->iov_len -= sizeof(__be32); + return ntohl(val); +} + +static inline void svc_putnl(struct kvec *iov, u32 val) +{ + __be32 *vp = iov->iov_base + iov->iov_len; + *vp = htonl(val); + iov->iov_len += sizeof(__be32); +} + +static inline __be32 svc_getu32(struct kvec *iov) +{ + __be32 val, *vp; + vp = iov->iov_base; + val = *vp++; + iov->iov_base = (void*)vp; + iov->iov_len -= sizeof(__be32); + return val; +} + +static inline void svc_ungetu32(struct kvec *iov) +{ + __be32 *vp = (__be32 *)iov->iov_base; + iov->iov_base = (void *)(vp - 1); + iov->iov_len += sizeof(*vp); +} + +static inline void svc_putu32(struct kvec *iov, __be32 val) +{ + __be32 *vp = iov->iov_base + iov->iov_len; + *vp = val; + iov->iov_len += sizeof(__be32); +} + +/* + * The context of a single thread, including the request currently being + * processed. + */ +struct svc_rqst { + struct list_head rq_all; /* all threads list */ + struct rcu_head rq_rcu_head; /* for RCU deferred kfree */ + struct svc_xprt * rq_xprt; /* transport ptr */ + + struct sockaddr_storage rq_addr; /* peer address */ + size_t rq_addrlen; + struct sockaddr_storage rq_daddr; /* dest addr of request + * - reply from here */ + size_t rq_daddrlen; + + struct svc_serv * rq_server; /* RPC service definition */ + struct svc_pool * rq_pool; /* thread pool */ + const struct svc_procedure *rq_procinfo;/* procedure info */ + struct auth_ops * rq_authop; /* authentication flavour */ + struct svc_cred rq_cred; /* auth info */ + void * rq_xprt_ctxt; /* transport specific context ptr */ + struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */ + + size_t rq_xprt_hlen; /* xprt header len */ + struct xdr_buf rq_arg; + struct xdr_buf rq_res; + struct page *rq_pages[RPCSVC_MAXPAGES + 1]; + struct page * *rq_respages; /* points into rq_pages */ + struct page * *rq_next_page; /* next reply page to use */ + struct page * *rq_page_end; /* one past the last page */ + + struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */ + + __be32 rq_xid; /* transmission id */ + u32 rq_prog; /* program number */ + u32 rq_vers; /* program version */ + u32 rq_proc; /* procedure number */ + u32 rq_prot; /* IP protocol */ + int rq_cachetype; /* catering to nfsd */ +#define RQ_SECURE (0) /* secure port */ +#define RQ_LOCAL (1) /* local request */ +#define RQ_USEDEFERRAL (2) /* use deferral */ +#define RQ_DROPME (3) /* drop current reply */ +#define RQ_SPLICE_OK (4) /* turned off in gss privacy + * to prevent encrypting page + * cache pages */ +#define RQ_VICTIM (5) /* about to be shut down */ +#define RQ_BUSY (6) /* request is busy */ +#define RQ_DATA (7) /* request has data */ +#define RQ_AUTHERR (8) /* Request status is auth error */ + unsigned long rq_flags; /* flags field */ + ktime_t rq_qtime; /* enqueue time */ + + void * rq_argp; /* decoded arguments */ + void * rq_resp; /* xdr'd results */ + void * rq_auth_data; /* flavor-specific data */ + int rq_auth_slack; /* extra space xdr code + * should leave in head + * for krb5i, krb5p. + */ + int rq_reserved; /* space on socket outq + * reserved for this request + */ + ktime_t rq_stime; /* start time */ + + struct cache_req rq_chandle; /* handle passed to caches for + * request delaying + */ + /* Catering to nfsd */ + struct auth_domain * rq_client; /* RPC peer info */ + struct auth_domain * rq_gssclient; /* "gss/"-style peer info */ + struct svc_cacherep * rq_cacherep; /* cache info */ + struct task_struct *rq_task; /* service thread */ + spinlock_t rq_lock; /* per-request lock */ + struct net *rq_bc_net; /* pointer to backchannel's + * net namespace + */ +}; + +#define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net) + +/* + * Rigorous type checking on sockaddr type conversions + */ +static inline struct sockaddr_in *svc_addr_in(const struct svc_rqst *rqst) +{ + return (struct sockaddr_in *) &rqst->rq_addr; +} + +static inline struct sockaddr_in6 *svc_addr_in6(const struct svc_rqst *rqst) +{ + return (struct sockaddr_in6 *) &rqst->rq_addr; +} + +static inline struct sockaddr *svc_addr(const struct svc_rqst *rqst) +{ + return (struct sockaddr *) &rqst->rq_addr; +} + +static inline struct sockaddr_in *svc_daddr_in(const struct svc_rqst *rqst) +{ + return (struct sockaddr_in *) &rqst->rq_daddr; +} + +static inline struct sockaddr_in6 *svc_daddr_in6(const struct svc_rqst *rqst) +{ + return (struct sockaddr_in6 *) &rqst->rq_daddr; +} + +static inline struct sockaddr *svc_daddr(const struct svc_rqst *rqst) +{ + return (struct sockaddr *) &rqst->rq_daddr; +} + +/* + * Check buffer bounds after decoding arguments + */ +static inline int +xdr_argsize_check(struct svc_rqst *rqstp, __be32 *p) +{ + char *cp = (char *)p; + struct kvec *vec = &rqstp->rq_arg.head[0]; + return cp >= (char*)vec->iov_base + && cp <= (char*)vec->iov_base + vec->iov_len; +} + +static inline int +xdr_ressize_check(struct svc_rqst *rqstp, __be32 *p) +{ + struct kvec *vec = &rqstp->rq_res.head[0]; + char *cp = (char*)p; + + vec->iov_len = cp - (char*)vec->iov_base; + + return vec->iov_len <= PAGE_SIZE; +} + +static inline void svc_free_res_pages(struct svc_rqst *rqstp) +{ + while (rqstp->rq_next_page != rqstp->rq_respages) { + struct page **pp = --rqstp->rq_next_page; + if (*pp) { + put_page(*pp); + *pp = NULL; + } + } +} + +struct svc_deferred_req { + u32 prot; /* protocol (UDP or TCP) */ + struct svc_xprt *xprt; + struct sockaddr_storage addr; /* where reply must go */ + size_t addrlen; + struct sockaddr_storage daddr; /* where reply must come from */ + size_t daddrlen; + struct cache_deferred_req handle; + size_t xprt_hlen; + int argslen; + __be32 args[0]; +}; + +/* + * List of RPC programs on the same transport endpoint + */ +struct svc_program { + struct svc_program * pg_next; /* other programs (same xprt) */ + u32 pg_prog; /* program number */ + unsigned int pg_lovers; /* lowest version */ + unsigned int pg_hivers; /* highest version */ + unsigned int pg_nvers; /* number of versions */ + const struct svc_version **pg_vers; /* version array */ + char * pg_name; /* service name */ + char * pg_class; /* class name: services sharing authentication */ + struct svc_stat * pg_stats; /* rpc statistics */ + int (*pg_authenticate)(struct svc_rqst *); +}; + +/* + * RPC program version + */ +struct svc_version { + u32 vs_vers; /* version number */ + u32 vs_nproc; /* number of procedures */ + const struct svc_procedure *vs_proc; /* per-procedure info */ + unsigned int *vs_count; /* call counts */ + u32 vs_xdrsize; /* xdrsize needed for this version */ + + /* Don't register with rpcbind */ + bool vs_hidden; + + /* Don't care if the rpcbind registration fails */ + bool vs_rpcb_optnl; + + /* Need xprt with congestion control */ + bool vs_need_cong_ctrl; + + /* Override dispatch function (e.g. when caching replies). + * A return value of 0 means drop the request. + * vs_dispatch == NULL means use default dispatcher. + */ + int (*vs_dispatch)(struct svc_rqst *, __be32 *); +}; + +/* + * RPC procedure info + */ +struct svc_procedure { + /* process the request: */ + __be32 (*pc_func)(struct svc_rqst *); + /* XDR decode args: */ + int (*pc_decode)(struct svc_rqst *, __be32 *data); + /* XDR encode result: */ + int (*pc_encode)(struct svc_rqst *, __be32 *data); + /* XDR free result: */ + void (*pc_release)(struct svc_rqst *); + unsigned int pc_argsize; /* argument struct size */ + unsigned int pc_ressize; /* result struct size */ + unsigned int pc_cachetype; /* cache info (NFS) */ + unsigned int pc_xdrressize; /* maximum size of XDR reply */ +}; + +/* + * Mode for mapping cpus to pools. + */ +enum { + SVC_POOL_AUTO = -1, /* choose one of the others */ + SVC_POOL_GLOBAL, /* no mapping, just a single global pool + * (legacy & UP mode) */ + SVC_POOL_PERCPU, /* one pool per cpu */ + SVC_POOL_PERNODE /* one pool per numa node */ +}; + +struct svc_pool_map { + int count; /* How many svc_servs use us */ + int mode; /* Note: int not enum to avoid + * warnings about "enumeration value + * not handled in switch" */ + unsigned int npools; + unsigned int *pool_to; /* maps pool id to cpu or node */ + unsigned int *to_pool; /* maps cpu or node to pool id */ +}; + +extern struct svc_pool_map svc_pool_map; + +/* + * Function prototypes. + */ +int svc_rpcb_setup(struct svc_serv *serv, struct net *net); +void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net); +int svc_bind(struct svc_serv *serv, struct net *net); +struct svc_serv *svc_create(struct svc_program *, unsigned int, + const struct svc_serv_ops *); +struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv, + struct svc_pool *pool, int node); +struct svc_rqst *svc_prepare_thread(struct svc_serv *serv, + struct svc_pool *pool, int node); +void svc_rqst_free(struct svc_rqst *); +void svc_exit_thread(struct svc_rqst *); +unsigned int svc_pool_map_get(void); +void svc_pool_map_put(void); +struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int, + const struct svc_serv_ops *); +int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int); +int svc_set_num_threads_sync(struct svc_serv *, struct svc_pool *, int); +int svc_pool_stats_open(struct svc_serv *serv, struct file *file); +void svc_destroy(struct svc_serv *); +void svc_shutdown_net(struct svc_serv *, struct net *); +int svc_process(struct svc_rqst *); +int bc_svc_process(struct svc_serv *, struct rpc_rqst *, + struct svc_rqst *); +int svc_register(const struct svc_serv *, struct net *, const int, + const unsigned short, const unsigned short); + +void svc_wake_up(struct svc_serv *); +void svc_reserve(struct svc_rqst *rqstp, int space); +struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu); +char * svc_print_addr(struct svc_rqst *, char *, size_t); +unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, + struct page **pages, + struct kvec *first, size_t total); +char *svc_fill_symlink_pathname(struct svc_rqst *rqstp, + struct kvec *first, void *p, + size_t total); +__be32 svc_return_autherr(struct svc_rqst *rqstp, __be32 auth_err); + +#define RPC_MAX_ADDRBUFLEN (63U) + +/* + * When we want to reduce the size of the reserved space in the response + * buffer, we need to take into account the size of any checksum data that + * may be at the end of the packet. This is difficult to determine exactly + * for all cases without actually generating the checksum, so we just use a + * static value. + */ +static inline void svc_reserve_auth(struct svc_rqst *rqstp, int space) +{ + svc_reserve(rqstp, space + rqstp->rq_auth_slack); +} + +#endif /* SUNRPC_SVC_H */ diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h new file mode 100644 index 000000000..3e3214cca --- /dev/null +++ b/include/linux/sunrpc/svc_rdma.h @@ -0,0 +1,207 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the BSD-type + * license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * Neither the name of the Network Appliance, Inc. nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Tom Tucker + */ + +#ifndef SVC_RDMA_H +#define SVC_RDMA_H +#include +#include +#include +#include +#include +#define SVCRDMA_DEBUG + +/* Default and maximum inline threshold sizes */ +enum { + RPCRDMA_DEF_INLINE_THRESH = 4096, + RPCRDMA_MAX_INLINE_THRESH = 65536 +}; + +/* RPC/RDMA parameters and stats */ +extern unsigned int svcrdma_ord; +extern unsigned int svcrdma_max_requests; +extern unsigned int svcrdma_max_bc_requests; +extern unsigned int svcrdma_max_req_size; + +extern atomic_t rdma_stat_recv; +extern atomic_t rdma_stat_read; +extern atomic_t rdma_stat_write; +extern atomic_t rdma_stat_sq_starve; +extern atomic_t rdma_stat_rq_starve; +extern atomic_t rdma_stat_rq_poll; +extern atomic_t rdma_stat_rq_prod; +extern atomic_t rdma_stat_sq_poll; +extern atomic_t rdma_stat_sq_prod; + +struct svcxprt_rdma { + struct svc_xprt sc_xprt; /* SVC transport structure */ + struct rdma_cm_id *sc_cm_id; /* RDMA connection id */ + struct list_head sc_accept_q; /* Conn. waiting accept */ + int sc_ord; /* RDMA read limit */ + int sc_max_send_sges; + bool sc_snd_w_inv; /* OK to use Send With Invalidate */ + + atomic_t sc_sq_avail; /* SQEs ready to be consumed */ + unsigned int sc_sq_depth; /* Depth of SQ */ + __be32 sc_fc_credits; /* Forward credits */ + u32 sc_max_requests; /* Max requests */ + u32 sc_max_bc_requests;/* Backward credits */ + int sc_max_req_size; /* Size of each RQ WR buf */ + u8 sc_port_num; + + struct ib_pd *sc_pd; + + spinlock_t sc_send_lock; + struct list_head sc_send_ctxts; + spinlock_t sc_rw_ctxt_lock; + struct list_head sc_rw_ctxts; + + struct list_head sc_rq_dto_q; + spinlock_t sc_rq_dto_lock; + struct ib_qp *sc_qp; + struct ib_cq *sc_rq_cq; + struct ib_cq *sc_sq_cq; + + spinlock_t sc_lock; /* transport lock */ + + wait_queue_head_t sc_send_wait; /* SQ exhaustion waitlist */ + unsigned long sc_flags; + struct list_head sc_read_complete_q; + struct work_struct sc_work; + + spinlock_t sc_recv_lock; + struct list_head sc_recv_ctxts; +}; +/* sc_flags */ +#define RDMAXPRT_CONN_PENDING 3 + +#define RPCRDMA_LISTEN_BACKLOG 10 +#define RPCRDMA_MAX_REQUESTS 32 + +/* Typical ULP usage of BC requests is NFSv4.1 backchannel. Our + * current NFSv4.1 implementation supports one backchannel slot. + */ +#define RPCRDMA_MAX_BC_REQUESTS 2 + +#define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD + +struct svc_rdma_recv_ctxt { + struct list_head rc_list; + struct ib_recv_wr rc_recv_wr; + struct ib_cqe rc_cqe; + struct ib_sge rc_recv_sge; + void *rc_recv_buf; + struct xdr_buf rc_arg; + bool rc_temp; + u32 rc_byte_len; + unsigned int rc_page_count; + unsigned int rc_hdr_count; + struct page *rc_pages[RPCSVC_MAXPAGES]; +}; + +struct svc_rdma_send_ctxt { + struct list_head sc_list; + struct ib_send_wr sc_send_wr; + struct ib_cqe sc_cqe; + void *sc_xprt_buf; + int sc_page_count; + int sc_cur_sge_no; + struct page *sc_pages[RPCSVC_MAXPAGES]; + struct ib_sge sc_sges[]; +}; + +/* svc_rdma_backchannel.c */ +extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, + __be32 *rdma_resp, + struct xdr_buf *rcvbuf); + +/* svc_rdma_recvfrom.c */ +extern void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma); +extern bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma); +extern void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma, + struct svc_rdma_recv_ctxt *ctxt); +extern void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma); +extern void svc_rdma_release_rqst(struct svc_rqst *rqstp); +extern int svc_rdma_recvfrom(struct svc_rqst *); + +/* svc_rdma_rw.c */ +extern void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma); +extern int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, + struct svc_rqst *rqstp, + struct svc_rdma_recv_ctxt *head, __be32 *p); +extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, + __be32 *wr_ch, struct xdr_buf *xdr); +extern int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, + __be32 *rp_ch, bool writelist, + struct xdr_buf *xdr); + +/* svc_rdma_sendto.c */ +extern void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma); +extern struct svc_rdma_send_ctxt * + svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma); +extern void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma, + struct svc_rdma_send_ctxt *ctxt); +extern int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr); +extern void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma, + struct svc_rdma_send_ctxt *ctxt, + unsigned int len); +extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, + struct svc_rdma_send_ctxt *ctxt, + struct xdr_buf *xdr, __be32 *wr_lst); +extern int svc_rdma_sendto(struct svc_rqst *); + +/* svc_rdma_transport.c */ +extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *); +extern void svc_sq_reap(struct svcxprt_rdma *); +extern void svc_rq_reap(struct svcxprt_rdma *); +extern void svc_rdma_prep_reply_hdr(struct svc_rqst *); + +extern struct svc_xprt_class svc_rdma_class; +#ifdef CONFIG_SUNRPC_BACKCHANNEL +extern struct svc_xprt_class svc_rdma_bc_class; +#endif + +/* svc_rdma.c */ +extern struct workqueue_struct *svc_rdma_wq; +extern int svc_rdma_init(void); +extern void svc_rdma_cleanup(void); + +#endif diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h new file mode 100644 index 000000000..c3d72066d --- /dev/null +++ b/include/linux/sunrpc/svc_xprt.h @@ -0,0 +1,221 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/sunrpc/svc_xprt.h + * + * RPC server transport I/O + */ + +#ifndef SUNRPC_SVC_XPRT_H +#define SUNRPC_SVC_XPRT_H + +#include + +struct module; + +struct svc_xprt_ops { + struct svc_xprt *(*xpo_create)(struct svc_serv *, + struct net *net, + struct sockaddr *, int, + int); + struct svc_xprt *(*xpo_accept)(struct svc_xprt *); + int (*xpo_has_wspace)(struct svc_xprt *); + int (*xpo_recvfrom)(struct svc_rqst *); + void (*xpo_prep_reply_hdr)(struct svc_rqst *); + int (*xpo_sendto)(struct svc_rqst *); + void (*xpo_release_rqst)(struct svc_rqst *); + void (*xpo_detach)(struct svc_xprt *); + void (*xpo_free)(struct svc_xprt *); + void (*xpo_secure_port)(struct svc_rqst *rqstp); + void (*xpo_kill_temp_xprt)(struct svc_xprt *); +}; + +struct svc_xprt_class { + const char *xcl_name; + struct module *xcl_owner; + const struct svc_xprt_ops *xcl_ops; + struct list_head xcl_list; + u32 xcl_max_payload; + int xcl_ident; +}; + +/* + * This is embedded in an object that wants a callback before deleting + * an xprt; intended for use by NFSv4.1, which needs to know when a + * client's tcp connection (and hence possibly a backchannel) goes away. + */ +struct svc_xpt_user { + struct list_head list; + void (*callback)(struct svc_xpt_user *); +}; + +struct svc_xprt { + struct svc_xprt_class *xpt_class; + const struct svc_xprt_ops *xpt_ops; + struct kref xpt_ref; + struct list_head xpt_list; + struct list_head xpt_ready; + unsigned long xpt_flags; +#define XPT_BUSY 0 /* enqueued/receiving */ +#define XPT_CONN 1 /* conn pending */ +#define XPT_CLOSE 2 /* dead or dying */ +#define XPT_DATA 3 /* data pending */ +#define XPT_TEMP 4 /* connected transport */ +#define XPT_DEAD 6 /* transport closed */ +#define XPT_CHNGBUF 7 /* need to change snd/rcv buf sizes */ +#define XPT_DEFERRED 8 /* deferred request pending */ +#define XPT_OLD 9 /* used for xprt aging mark+sweep */ +#define XPT_LISTENER 10 /* listening endpoint */ +#define XPT_CACHE_AUTH 11 /* cache auth info */ +#define XPT_LOCAL 12 /* connection from loopback interface */ +#define XPT_KILL_TEMP 13 /* call xpo_kill_temp_xprt before closing */ +#define XPT_CONG_CTRL 14 /* has congestion control */ + + struct svc_serv *xpt_server; /* service for transport */ + atomic_t xpt_reserved; /* space on outq that is rsvd */ + atomic_t xpt_nr_rqsts; /* Number of requests */ + struct mutex xpt_mutex; /* to serialize sending data */ + spinlock_t xpt_lock; /* protects sk_deferred + * and xpt_auth_cache */ + void *xpt_auth_cache;/* auth cache */ + struct list_head xpt_deferred; /* deferred requests that need + * to be revisted */ + struct sockaddr_storage xpt_local; /* local address */ + size_t xpt_locallen; /* length of address */ + struct sockaddr_storage xpt_remote; /* remote peer's address */ + size_t xpt_remotelen; /* length of address */ + char xpt_remotebuf[INET6_ADDRSTRLEN + 10]; + struct rpc_wait_queue xpt_bc_pending; /* backchannel wait queue */ + struct list_head xpt_users; /* callbacks on free */ + + struct net *xpt_net; + struct rpc_xprt *xpt_bc_xprt; /* NFSv4.1 backchannel */ + struct rpc_xprt_switch *xpt_bc_xps; /* NFSv4.1 backchannel */ +}; + +static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u) +{ + spin_lock(&xpt->xpt_lock); + list_del_init(&u->list); + spin_unlock(&xpt->xpt_lock); +} + +static inline int register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u) +{ + spin_lock(&xpt->xpt_lock); + if (test_bit(XPT_CLOSE, &xpt->xpt_flags)) { + /* + * The connection is about to be deleted soon (or, + * worse, may already be deleted--in which case we've + * already notified the xpt_users). + */ + spin_unlock(&xpt->xpt_lock); + return -ENOTCONN; + } + list_add(&u->list, &xpt->xpt_users); + spin_unlock(&xpt->xpt_lock); + return 0; +} + +int svc_reg_xprt_class(struct svc_xprt_class *); +void svc_unreg_xprt_class(struct svc_xprt_class *); +void svc_xprt_init(struct net *, struct svc_xprt_class *, struct svc_xprt *, + struct svc_serv *); +int svc_create_xprt(struct svc_serv *, const char *, struct net *, + const int, const unsigned short, int); +void svc_xprt_do_enqueue(struct svc_xprt *xprt); +void svc_xprt_enqueue(struct svc_xprt *xprt); +void svc_xprt_put(struct svc_xprt *xprt); +void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt); +void svc_close_xprt(struct svc_xprt *xprt); +int svc_port_is_privileged(struct sockaddr *sin); +int svc_print_xprts(char *buf, int maxlen); +struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name, + struct net *net, const sa_family_t af, + const unsigned short port); +int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen); +void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *xprt); +void svc_age_temp_xprts_now(struct svc_serv *, struct sockaddr *); + +static inline void svc_xprt_get(struct svc_xprt *xprt) +{ + kref_get(&xprt->xpt_ref); +} +static inline void svc_xprt_set_local(struct svc_xprt *xprt, + const struct sockaddr *sa, + const size_t salen) +{ + memcpy(&xprt->xpt_local, sa, salen); + xprt->xpt_locallen = salen; +} +static inline void svc_xprt_set_remote(struct svc_xprt *xprt, + const struct sockaddr *sa, + const size_t salen) +{ + memcpy(&xprt->xpt_remote, sa, salen); + xprt->xpt_remotelen = salen; + snprintf(xprt->xpt_remotebuf, sizeof(xprt->xpt_remotebuf) - 1, + "%pISpc", sa); +} + +static inline unsigned short svc_addr_port(const struct sockaddr *sa) +{ + const struct sockaddr_in *sin = (const struct sockaddr_in *)sa; + const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sa; + + switch (sa->sa_family) { + case AF_INET: + return ntohs(sin->sin_port); + case AF_INET6: + return ntohs(sin6->sin6_port); + } + + return 0; +} + +static inline size_t svc_addr_len(const struct sockaddr *sa) +{ + switch (sa->sa_family) { + case AF_INET: + return sizeof(struct sockaddr_in); + case AF_INET6: + return sizeof(struct sockaddr_in6); + } + BUG(); +} + +static inline unsigned short svc_xprt_local_port(const struct svc_xprt *xprt) +{ + return svc_addr_port((const struct sockaddr *)&xprt->xpt_local); +} + +static inline unsigned short svc_xprt_remote_port(const struct svc_xprt *xprt) +{ + return svc_addr_port((const struct sockaddr *)&xprt->xpt_remote); +} + +static inline char *__svc_print_addr(const struct sockaddr *addr, + char *buf, const size_t len) +{ + const struct sockaddr_in *sin = (const struct sockaddr_in *)addr; + const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)addr; + + switch (addr->sa_family) { + case AF_INET: + snprintf(buf, len, "%pI4, port=%u", &sin->sin_addr, + ntohs(sin->sin_port)); + break; + + case AF_INET6: + snprintf(buf, len, "%pI6, port=%u", + &sin6->sin6_addr, + ntohs(sin6->sin6_port)); + break; + + default: + snprintf(buf, len, "unknown address type: %d", addr->sa_family); + break; + } + + return buf; +} +#endif /* SUNRPC_SVC_XPRT_H */ diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h new file mode 100644 index 000000000..04e404a07 --- /dev/null +++ b/include/linux/sunrpc/svcauth.h @@ -0,0 +1,189 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/sunrpc/svcauth.h + * + * RPC server-side authentication stuff. + * + * Copyright (C) 1995, 1996 Olaf Kirch + */ + +#ifndef _LINUX_SUNRPC_SVCAUTH_H_ +#define _LINUX_SUNRPC_SVCAUTH_H_ + +#ifdef __KERNEL__ + +#include +#include +#include +#include +#include +#include +#include + +struct svc_cred { + kuid_t cr_uid; + kgid_t cr_gid; + struct group_info *cr_group_info; + u32 cr_flavor; /* pseudoflavor */ + /* name of form servicetype/hostname@REALM, passed down by + * gss-proxy: */ + char *cr_raw_principal; + /* name of form servicetype@hostname, passed down by + * rpc.svcgssd, or computed from the above: */ + char *cr_principal; + char *cr_targ_princ; + struct gss_api_mech *cr_gss_mech; +}; + +static inline void init_svc_cred(struct svc_cred *cred) +{ + cred->cr_group_info = NULL; + cred->cr_raw_principal = NULL; + cred->cr_principal = NULL; + cred->cr_targ_princ = NULL; + cred->cr_gss_mech = NULL; +} + +static inline void free_svc_cred(struct svc_cred *cred) +{ + if (cred->cr_group_info) + put_group_info(cred->cr_group_info); + kfree(cred->cr_raw_principal); + kfree(cred->cr_principal); + kfree(cred->cr_targ_princ); + gss_mech_put(cred->cr_gss_mech); + init_svc_cred(cred); +} + +struct svc_rqst; /* forward decl */ +struct in6_addr; + +/* Authentication is done in the context of a domain. + * + * Currently, the nfs server uses the auth_domain to stand + * for the "client" listed in /etc/exports. + * + * More generally, a domain might represent a group of clients using + * a common mechanism for authentication and having a common mapping + * between local identity (uid) and network identity. All clients + * in a domain have similar general access rights. Each domain can + * contain multiple principals which will have different specific right + * based on normal Discretionary Access Control. + * + * A domain is created by an authentication flavour module based on name + * only. Userspace then fills in detail on demand. + * + * In the case of auth_unix and auth_null, the auth_domain is also + * associated with entries in another cache representing the mapping + * of ip addresses to the given client. + */ +struct auth_domain { + struct kref ref; + struct hlist_node hash; + char *name; + struct auth_ops *flavour; +}; + +/* + * Each authentication flavour registers an auth_ops + * structure. + * name is simply the name. + * flavour gives the auth flavour. It determines where the flavour is registered + * accept() is given a request and should verify it. + * It should inspect the authenticator and verifier, and possibly the data. + * If there is a problem with the authentication *authp should be set. + * The return value of accept() can indicate: + * OK - authorised. client and credential are set in rqstp. + * reqbuf points to arguments + * resbuf points to good place for results. verfier + * is (probably) already in place. Certainly space is + * reserved for it. + * DROP - simply drop the request. It may have been deferred + * GARBAGE - rpc garbage_args error + * SYSERR - rpc system_err error + * DENIED - authp holds reason for denial. + * COMPLETE - the reply is encoded already and ready to be sent; no + * further processing is necessary. (This is used for processing + * null procedure calls which are used to set up encryption + * contexts.) + * + * accept is passed the proc number so that it can accept NULL rpc requests + * even if it cannot authenticate the client (as is sometimes appropriate). + * + * release() is given a request after the procedure has been run. + * It should sign/encrypt the results if needed + * It should return: + * OK - the resbuf is ready to be sent + * DROP - the reply should be quitely dropped + * DENIED - authp holds a reason for MSG_DENIED + * SYSERR - rpc system_err + * + * domain_release() + * This call releases a domain. + * set_client() + * Givens a pending request (struct svc_rqst), finds and assigns + * an appropriate 'auth_domain' as the client. + */ +struct auth_ops { + char * name; + struct module *owner; + int flavour; + int (*accept)(struct svc_rqst *rq, __be32 *authp); + int (*release)(struct svc_rqst *rq); + void (*domain_release)(struct auth_domain *); + int (*set_client)(struct svc_rqst *rq); +}; + +#define SVC_GARBAGE 1 +#define SVC_SYSERR 2 +#define SVC_VALID 3 +#define SVC_NEGATIVE 4 +#define SVC_OK 5 +#define SVC_DROP 6 +#define SVC_CLOSE 7 /* Like SVC_DROP, but request is definitely + * lost so if there is a tcp connection, it + * should be closed + */ +#define SVC_DENIED 8 +#define SVC_PENDING 9 +#define SVC_COMPLETE 10 + +struct svc_xprt; + +extern int svc_authenticate(struct svc_rqst *rqstp, __be32 *authp); +extern int svc_authorise(struct svc_rqst *rqstp); +extern int svc_set_client(struct svc_rqst *rqstp); +extern int svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops); +extern void svc_auth_unregister(rpc_authflavor_t flavor); + +extern struct auth_domain *unix_domain_find(char *name); +extern void auth_domain_put(struct auth_domain *item); +extern int auth_unix_add_addr(struct net *net, struct in6_addr *addr, struct auth_domain *dom); +extern struct auth_domain *auth_domain_lookup(char *name, struct auth_domain *new); +extern struct auth_domain *auth_domain_find(char *name); +extern struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr); +extern int auth_unix_forget_old(struct auth_domain *dom); +extern void svcauth_unix_purge(struct net *net); +extern void svcauth_unix_info_release(struct svc_xprt *xpt); +extern int svcauth_unix_set_client(struct svc_rqst *rqstp); + +extern int unix_gid_cache_create(struct net *net); +extern void unix_gid_cache_destroy(struct net *net); + +/* + * The functions are good enough that we don't need to + * use hash_32() on them; just extracting the high bits is enough. + */ +static inline unsigned long hash_str(char const *name, int bits) +{ + return hashlen_hash(hashlen_string(NULL, name)) >> (32 - bits); +} + +static inline unsigned long hash_mem(char const *buf, int length, int bits) +{ + return full_name_hash(NULL, buf, length) >> (32 - bits); +} + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_SUNRPC_SVCAUTH_H_ */ diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h new file mode 100644 index 000000000..d229d27ab --- /dev/null +++ b/include/linux/sunrpc/svcauth_gss.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/sunrpc/svcauth_gss.h + * + * Bruce Fields + * Copyright (c) 2002 The Regents of the University of Michigan + */ + +#ifndef _LINUX_SUNRPC_SVCAUTH_GSS_H +#define _LINUX_SUNRPC_SVCAUTH_GSS_H + +#ifdef __KERNEL__ +#include +#include +#include +#include +#include +#include + +int gss_svc_init(void); +void gss_svc_shutdown(void); +int gss_svc_init_net(struct net *net); +void gss_svc_shutdown_net(struct net *net); +struct auth_domain *svcauth_gss_register_pseudoflavor(u32 pseudoflavor, + char *name); +u32 svcauth_gss_flavor(struct auth_domain *dom); + +#endif /* __KERNEL__ */ +#endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */ diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h new file mode 100644 index 000000000..119718a92 --- /dev/null +++ b/include/linux/sunrpc/svcsock.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/sunrpc/svcsock.h + * + * RPC server socket I/O. + * + * Copyright (C) 1995, 1996 Olaf Kirch + */ + +#ifndef SUNRPC_SVCSOCK_H +#define SUNRPC_SVCSOCK_H + +#include +#include + +/* + * RPC server socket. + */ +struct svc_sock { + struct svc_xprt sk_xprt; + struct socket * sk_sock; /* berkeley socket layer */ + struct sock * sk_sk; /* INET layer */ + + /* We keep the old state_change and data_ready CB's here */ + void (*sk_ostate)(struct sock *); + void (*sk_odata)(struct sock *); + void (*sk_owspace)(struct sock *); + + /* private TCP part */ + /* On-the-wire fragment header: */ + __be32 sk_reclen; + /* As we receive a record, this includes the length received so + * far (including the fragment header): */ + u32 sk_tcplen; + /* Total length of the data (not including fragment headers) + * received so far in the fragments making up this rpc: */ + u32 sk_datalen; + + struct page * sk_pages[RPCSVC_MAXPAGES]; /* received data */ +}; + +static inline u32 svc_sock_reclen(struct svc_sock *svsk) +{ + return ntohl(svsk->sk_reclen) & RPC_FRAGMENT_SIZE_MASK; +} + +static inline u32 svc_sock_final_rec(struct svc_sock *svsk) +{ + return ntohl(svsk->sk_reclen) & RPC_LAST_STREAM_FRAGMENT; +} + +/* + * Function prototypes. + */ +void svc_close_net(struct svc_serv *, struct net *); +int svc_recv(struct svc_rqst *, long); +int svc_send(struct svc_rqst *); +void svc_drop(struct svc_rqst *); +void svc_sock_update_bufs(struct svc_serv *serv); +bool svc_alien_sock(struct net *net, int fd); +int svc_addsock(struct svc_serv *serv, const int fd, + char *name_return, const size_t len); +void svc_init_xprt_sock(void); +void svc_cleanup_xprt_sock(void); +struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot); +void svc_sock_destroy(struct svc_xprt *); + +/* + * svc_makesock socket characteristics + */ +#define SVC_SOCK_DEFAULTS (0U) +#define SVC_SOCK_ANONYMOUS (1U << 0) /* don't register with pmap */ +#define SVC_SOCK_TEMPORARY (1U << 1) /* flag socket as temporary */ + +#endif /* SUNRPC_SVCSOCK_H */ diff --git a/include/linux/sunrpc/timer.h b/include/linux/sunrpc/timer.h new file mode 100644 index 000000000..242dbe00b --- /dev/null +++ b/include/linux/sunrpc/timer.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/sunrpc/timer.h + * + * Declarations for the RPC transport timer. + * + * Copyright (C) 2002 Trond Myklebust + */ + +#ifndef _LINUX_SUNRPC_TIMER_H +#define _LINUX_SUNRPC_TIMER_H + +#include + +struct rpc_rtt { + unsigned long timeo; /* default timeout value */ + unsigned long srtt[5]; /* smoothed round trip time << 3 */ + unsigned long sdrtt[5]; /* smoothed medium deviation of RTT */ + int ntimeouts[5]; /* Number of timeouts for the last request */ +}; + + +extern void rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo); +extern void rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m); +extern unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned timer); + +static inline void rpc_set_timeo(struct rpc_rtt *rt, int timer, int ntimeo) +{ + int *t; + if (!timer) + return; + t = &rt->ntimeouts[timer-1]; + if (ntimeo < *t) { + if (*t > 0) + (*t)--; + } else { + if (ntimeo > 8) + ntimeo = 8; + *t = ntimeo; + } +} + +static inline int rpc_ntimeo(struct rpc_rtt *rt, int timer) +{ + if (!timer) + return 0; + return rt->ntimeouts[timer-1]; +} + +#endif /* _LINUX_SUNRPC_TIMER_H */ diff --git a/include/linux/sunrpc/types.h b/include/linux/sunrpc/types.h new file mode 100644 index 000000000..bd3c8e056 --- /dev/null +++ b/include/linux/sunrpc/types.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/sunrpc/types.h + * + * Generic types and misc stuff for RPC. + * + * Copyright (C) 1996, Olaf Kirch + */ + +#ifndef _LINUX_SUNRPC_TYPES_H_ +#define _LINUX_SUNRPC_TYPES_H_ + +#include +#include +#include +#include +#include + +/* + * Shorthands + */ +#define signalled() (signal_pending(current)) + +#endif /* _LINUX_SUNRPC_TYPES_H_ */ diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h new file mode 100644 index 000000000..8e3d35189 --- /dev/null +++ b/include/linux/sunrpc/xdr.h @@ -0,0 +1,532 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * XDR standard data types and function declarations + * + * Copyright (C) 1995-1997 Olaf Kirch + * + * Based on: + * RFC 4506 "XDR: External Data Representation Standard", May 2006 + */ + +#ifndef _SUNRPC_XDR_H_ +#define _SUNRPC_XDR_H_ + +#ifdef __KERNEL__ + +#include +#include +#include +#include + +struct rpc_rqst; + +/* + * Buffer adjustment + */ +#define XDR_QUADLEN(l) (((l) + 3) >> 2) + +/* + * Generic opaque `network object.' + */ +#define XDR_MAX_NETOBJ 1024 +struct xdr_netobj { + unsigned int len; + u8 * data; +}; + +/* + * Basic structure for transmission/reception of a client XDR message. + * Features a header (for a linear buffer containing RPC headers + * and the data payload for short messages), and then an array of + * pages. + * The tail iovec allows you to append data after the page array. Its + * main interest is for appending padding to the pages in order to + * satisfy the int_32-alignment requirements in RFC1832. + * + * For the future, we might want to string several of these together + * in a list if anybody wants to make use of NFSv4 COMPOUND + * operations and/or has a need for scatter/gather involving pages. + */ +struct xdr_buf { + struct kvec head[1], /* RPC header + non-page data */ + tail[1]; /* Appended after page data */ + + struct page ** pages; /* Array of pages */ + unsigned int page_base, /* Start of page data */ + page_len, /* Length of page data */ + flags; /* Flags for data disposition */ +#define XDRBUF_READ 0x01 /* target of file read */ +#define XDRBUF_WRITE 0x02 /* source of file write */ + + unsigned int buflen, /* Total length of storage buffer */ + len; /* Length of XDR encoded message */ +}; + +static inline void +xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) +{ + buf->head[0].iov_base = start; + buf->head[0].iov_len = len; + buf->tail[0].iov_len = 0; + buf->page_len = 0; + buf->flags = 0; + buf->len = 0; + buf->buflen = len; +} + +/* + * pre-xdr'ed macros. + */ + +#define xdr_zero cpu_to_be32(0) +#define xdr_one cpu_to_be32(1) +#define xdr_two cpu_to_be32(2) + +#define rpc_success cpu_to_be32(RPC_SUCCESS) +#define rpc_prog_unavail cpu_to_be32(RPC_PROG_UNAVAIL) +#define rpc_prog_mismatch cpu_to_be32(RPC_PROG_MISMATCH) +#define rpc_proc_unavail cpu_to_be32(RPC_PROC_UNAVAIL) +#define rpc_garbage_args cpu_to_be32(RPC_GARBAGE_ARGS) +#define rpc_system_err cpu_to_be32(RPC_SYSTEM_ERR) +#define rpc_drop_reply cpu_to_be32(RPC_DROP_REPLY) + +#define rpc_auth_ok cpu_to_be32(RPC_AUTH_OK) +#define rpc_autherr_badcred cpu_to_be32(RPC_AUTH_BADCRED) +#define rpc_autherr_rejectedcred cpu_to_be32(RPC_AUTH_REJECTEDCRED) +#define rpc_autherr_badverf cpu_to_be32(RPC_AUTH_BADVERF) +#define rpc_autherr_rejectedverf cpu_to_be32(RPC_AUTH_REJECTEDVERF) +#define rpc_autherr_tooweak cpu_to_be32(RPC_AUTH_TOOWEAK) +#define rpcsec_gsserr_credproblem cpu_to_be32(RPCSEC_GSS_CREDPROBLEM) +#define rpcsec_gsserr_ctxproblem cpu_to_be32(RPCSEC_GSS_CTXPROBLEM) +#define rpc_autherr_oldseqnum cpu_to_be32(101) + +/* + * Miscellaneous XDR helper functions + */ +__be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int len); +__be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int len); +__be32 *xdr_encode_string(__be32 *p, const char *s); +__be32 *xdr_decode_string_inplace(__be32 *p, char **sp, unsigned int *lenp, + unsigned int maxlen); +__be32 *xdr_encode_netobj(__be32 *p, const struct xdr_netobj *); +__be32 *xdr_decode_netobj(__be32 *p, struct xdr_netobj *); + +void xdr_inline_pages(struct xdr_buf *, unsigned int, + struct page **, unsigned int, unsigned int); +void xdr_terminate_string(struct xdr_buf *, const u32); + +static inline __be32 *xdr_encode_array(__be32 *p, const void *s, unsigned int len) +{ + return xdr_encode_opaque(p, s, len); +} + +/* + * Decode 64bit quantities (NFSv3 support) + */ +static inline __be32 * +xdr_encode_hyper(__be32 *p, __u64 val) +{ + put_unaligned_be64(val, p); + return p + 2; +} + +static inline __be32 * +xdr_decode_hyper(__be32 *p, __u64 *valp) +{ + *valp = get_unaligned_be64(p); + return p + 2; +} + +static inline __be32 * +xdr_decode_opaque_fixed(__be32 *p, void *ptr, unsigned int len) +{ + memcpy(ptr, p, len); + return p + XDR_QUADLEN(len); +} + +/* + * Adjust kvec to reflect end of xdr'ed data (RPC client XDR) + */ +static inline int +xdr_adjust_iovec(struct kvec *iov, __be32 *p) +{ + return iov->iov_len = ((u8 *) p - (u8 *) iov->iov_base); +} + +/* + * XDR buffer helper functions + */ +extern void xdr_shift_buf(struct xdr_buf *, size_t); +extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *); +extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int); +extern void xdr_buf_trim(struct xdr_buf *, unsigned int); +extern int xdr_buf_read_netobj(struct xdr_buf *, struct xdr_netobj *, unsigned int); +extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); +extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); + +/* + * Helper structure for copying from an sk_buff. + */ +struct xdr_skb_reader { + struct sk_buff *skb; + unsigned int offset; + size_t count; + __wsum csum; +}; + +typedef size_t (*xdr_skb_read_actor)(struct xdr_skb_reader *desc, void *to, size_t len); + +size_t xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len); +extern int csum_partial_copy_to_xdr(struct xdr_buf *, struct sk_buff *); +extern ssize_t xdr_partial_copy_from_skb(struct xdr_buf *, unsigned int, + struct xdr_skb_reader *, xdr_skb_read_actor); + +extern int xdr_encode_word(struct xdr_buf *, unsigned int, u32); +extern int xdr_decode_word(struct xdr_buf *, unsigned int, u32 *); + +struct xdr_array2_desc; +typedef int (*xdr_xcode_elem_t)(struct xdr_array2_desc *desc, void *elem); +struct xdr_array2_desc { + unsigned int elem_size; + unsigned int array_len; + unsigned int array_maxlen; + xdr_xcode_elem_t xcode; +}; + +extern int xdr_decode_array2(struct xdr_buf *buf, unsigned int base, + struct xdr_array2_desc *desc); +extern int xdr_encode_array2(struct xdr_buf *buf, unsigned int base, + struct xdr_array2_desc *desc); +extern void _copy_from_pages(char *p, struct page **pages, size_t pgbase, + size_t len); + +/* + * Provide some simple tools for XDR buffer overflow-checking etc. + */ +struct xdr_stream { + __be32 *p; /* start of available buffer */ + struct xdr_buf *buf; /* XDR buffer to read/write */ + + __be32 *end; /* end of available buffer space */ + struct kvec *iov; /* pointer to the current kvec */ + struct kvec scratch; /* Scratch buffer */ + struct page **page_ptr; /* pointer to the current page */ + unsigned int nwords; /* Remaining decode buffer length */ +}; + +/* + * These are the xdr_stream style generic XDR encode and decode functions. + */ +typedef void (*kxdreproc_t)(struct rpc_rqst *rqstp, struct xdr_stream *xdr, + const void *obj); +typedef int (*kxdrdproc_t)(struct rpc_rqst *rqstp, struct xdr_stream *xdr, + void *obj); + +extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p); +extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes); +extern void xdr_commit_encode(struct xdr_stream *xdr); +extern void xdr_truncate_encode(struct xdr_stream *xdr, size_t len); +extern int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen); +extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, + unsigned int base, unsigned int len); +extern unsigned int xdr_stream_pos(const struct xdr_stream *xdr); +extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p); +extern void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf, + struct page **pages, unsigned int len); +extern void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen); +extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes); +extern unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len); +extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len); +extern int xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data); + +/** + * xdr_stream_remaining - Return the number of bytes remaining in the stream + * @xdr: pointer to struct xdr_stream + * + * Return value: + * Number of bytes remaining in @xdr before xdr->end + */ +static inline size_t +xdr_stream_remaining(const struct xdr_stream *xdr) +{ + return xdr->nwords << 2; +} + +ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr, + size_t size); +ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr, + size_t maxlen, gfp_t gfp_flags); +ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str, + size_t size); +ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str, + size_t maxlen, gfp_t gfp_flags); +/** + * xdr_align_size - Calculate padded size of an object + * @n: Size of an object being XDR encoded (in bytes) + * + * Return value: + * Size (in bytes) of the object including xdr padding + */ +static inline size_t +xdr_align_size(size_t n) +{ + const size_t mask = sizeof(__u32) - 1; + + return (n + mask) & ~mask; +} + +/** + * xdr_stream_encode_u32 - Encode a 32-bit integer + * @xdr: pointer to xdr_stream + * @n: integer to encode + * + * Return values: + * On success, returns length in bytes of XDR buffer consumed + * %-EMSGSIZE on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_encode_u32(struct xdr_stream *xdr, __u32 n) +{ + const size_t len = sizeof(n); + __be32 *p = xdr_reserve_space(xdr, len); + + if (unlikely(!p)) + return -EMSGSIZE; + *p = cpu_to_be32(n); + return len; +} + +/** + * xdr_stream_encode_u64 - Encode a 64-bit integer + * @xdr: pointer to xdr_stream + * @n: 64-bit integer to encode + * + * Return values: + * On success, returns length in bytes of XDR buffer consumed + * %-EMSGSIZE on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_encode_u64(struct xdr_stream *xdr, __u64 n) +{ + const size_t len = sizeof(n); + __be32 *p = xdr_reserve_space(xdr, len); + + if (unlikely(!p)) + return -EMSGSIZE; + xdr_encode_hyper(p, n); + return len; +} + +/** + * xdr_stream_encode_opaque_inline - Encode opaque xdr data + * @xdr: pointer to xdr_stream + * @ptr: pointer to void pointer + * @len: size of object + * + * Return values: + * On success, returns length in bytes of XDR buffer consumed + * %-EMSGSIZE on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_encode_opaque_inline(struct xdr_stream *xdr, void **ptr, size_t len) +{ + size_t count = sizeof(__u32) + xdr_align_size(len); + __be32 *p = xdr_reserve_space(xdr, count); + + if (unlikely(!p)) { + *ptr = NULL; + return -EMSGSIZE; + } + xdr_encode_opaque(p, NULL, len); + *ptr = ++p; + return count; +} + +/** + * xdr_stream_encode_opaque_fixed - Encode fixed length opaque xdr data + * @xdr: pointer to xdr_stream + * @ptr: pointer to opaque data object + * @len: size of object pointed to by @ptr + * + * Return values: + * On success, returns length in bytes of XDR buffer consumed + * %-EMSGSIZE on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_encode_opaque_fixed(struct xdr_stream *xdr, const void *ptr, size_t len) +{ + __be32 *p = xdr_reserve_space(xdr, len); + + if (unlikely(!p)) + return -EMSGSIZE; + xdr_encode_opaque_fixed(p, ptr, len); + return xdr_align_size(len); +} + +/** + * xdr_stream_encode_opaque - Encode variable length opaque xdr data + * @xdr: pointer to xdr_stream + * @ptr: pointer to opaque data object + * @len: size of object pointed to by @ptr + * + * Return values: + * On success, returns length in bytes of XDR buffer consumed + * %-EMSGSIZE on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_encode_opaque(struct xdr_stream *xdr, const void *ptr, size_t len) +{ + size_t count = sizeof(__u32) + xdr_align_size(len); + __be32 *p = xdr_reserve_space(xdr, count); + + if (unlikely(!p)) + return -EMSGSIZE; + xdr_encode_opaque(p, ptr, len); + return count; +} + +/** + * xdr_stream_encode_uint32_array - Encode variable length array of integers + * @xdr: pointer to xdr_stream + * @array: array of integers + * @array_size: number of elements in @array + * + * Return values: + * On success, returns length in bytes of XDR buffer consumed + * %-EMSGSIZE on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_encode_uint32_array(struct xdr_stream *xdr, + const __u32 *array, size_t array_size) +{ + ssize_t ret = (array_size+1) * sizeof(__u32); + __be32 *p = xdr_reserve_space(xdr, ret); + + if (unlikely(!p)) + return -EMSGSIZE; + *p++ = cpu_to_be32(array_size); + for (; array_size > 0; p++, array++, array_size--) + *p = cpu_to_be32p(array); + return ret; +} + +/** + * xdr_stream_decode_u32 - Decode a 32-bit integer + * @xdr: pointer to xdr_stream + * @ptr: location to store integer + * + * Return values: + * %0 on success + * %-EBADMSG on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_decode_u32(struct xdr_stream *xdr, __u32 *ptr) +{ + const size_t count = sizeof(*ptr); + __be32 *p = xdr_inline_decode(xdr, count); + + if (unlikely(!p)) + return -EBADMSG; + *ptr = be32_to_cpup(p); + return 0; +} + +/** + * xdr_stream_decode_opaque_fixed - Decode fixed length opaque xdr data + * @xdr: pointer to xdr_stream + * @ptr: location to store data + * @len: size of buffer pointed to by @ptr + * + * Return values: + * On success, returns size of object stored in @ptr + * %-EBADMSG on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_decode_opaque_fixed(struct xdr_stream *xdr, void *ptr, size_t len) +{ + __be32 *p = xdr_inline_decode(xdr, len); + + if (unlikely(!p)) + return -EBADMSG; + xdr_decode_opaque_fixed(p, ptr, len); + return len; +} + +/** + * xdr_stream_decode_opaque_inline - Decode variable length opaque xdr data + * @xdr: pointer to xdr_stream + * @ptr: location to store pointer to opaque data + * @maxlen: maximum acceptable object size + * + * Note: the pointer stored in @ptr cannot be assumed valid after the XDR + * buffer has been destroyed, or even after calling xdr_inline_decode() + * on @xdr. It is therefore expected that the object it points to should + * be processed immediately. + * + * Return values: + * On success, returns size of object stored in *@ptr + * %-EBADMSG on XDR buffer overflow + * %-EMSGSIZE if the size of the object would exceed @maxlen + */ +static inline ssize_t +xdr_stream_decode_opaque_inline(struct xdr_stream *xdr, void **ptr, size_t maxlen) +{ + __be32 *p; + __u32 len; + + *ptr = NULL; + if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0)) + return -EBADMSG; + if (len != 0) { + p = xdr_inline_decode(xdr, len); + if (unlikely(!p)) + return -EBADMSG; + if (unlikely(len > maxlen)) + return -EMSGSIZE; + *ptr = p; + } + return len; +} + +/** + * xdr_stream_decode_uint32_array - Decode variable length array of integers + * @xdr: pointer to xdr_stream + * @array: location to store the integer array or NULL + * @array_size: number of elements to store + * + * Return values: + * On success, returns number of elements stored in @array + * %-EBADMSG on XDR buffer overflow + * %-EMSGSIZE if the size of the array exceeds @array_size + */ +static inline ssize_t +xdr_stream_decode_uint32_array(struct xdr_stream *xdr, + __u32 *array, size_t array_size) +{ + __be32 *p; + __u32 len; + ssize_t retval; + + if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0)) + return -EBADMSG; + if (len > SIZE_MAX / sizeof(*p)) + return -EBADMSG; + p = xdr_inline_decode(xdr, len * sizeof(*p)); + if (unlikely(!p)) + return -EBADMSG; + if (array == NULL) + return len; + if (len <= array_size) { + if (len < array_size) + memset(array+len, 0, (array_size-len)*sizeof(*array)); + array_size = len; + retval = len; + } else + retval = -EMSGSIZE; + for (; array_size > 0; p++, array++, array_size--) + *array = be32_to_cpup(p); + return retval; +} +#endif /* __KERNEL__ */ + +#endif /* _SUNRPC_XDR_H_ */ diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h new file mode 100644 index 000000000..69fed13e6 --- /dev/null +++ b/include/linux/sunrpc/xprt.h @@ -0,0 +1,497 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/sunrpc/xprt.h + * + * Declarations for the RPC transport interface. + * + * Copyright (C) 1995, 1996 Olaf Kirch + */ + +#ifndef _LINUX_SUNRPC_XPRT_H +#define _LINUX_SUNRPC_XPRT_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __KERNEL__ + +#define RPC_MIN_SLOT_TABLE (2U) +#define RPC_DEF_SLOT_TABLE (16U) +#define RPC_MAX_SLOT_TABLE_LIMIT (65536U) +#define RPC_MAX_SLOT_TABLE RPC_MAX_SLOT_TABLE_LIMIT + +#define RPC_CWNDSHIFT (8U) +#define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT) +#define RPC_INITCWND RPC_CWNDSCALE +#define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT) +#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd) + +/* + * This describes a timeout strategy + */ +struct rpc_timeout { + unsigned long to_initval, /* initial timeout */ + to_maxval, /* max timeout */ + to_increment; /* if !exponential */ + unsigned int to_retries; /* max # of retries */ + unsigned char to_exponential; +}; + +enum rpc_display_format_t { + RPC_DISPLAY_ADDR = 0, + RPC_DISPLAY_PORT, + RPC_DISPLAY_PROTO, + RPC_DISPLAY_HEX_ADDR, + RPC_DISPLAY_HEX_PORT, + RPC_DISPLAY_NETID, + RPC_DISPLAY_MAX, +}; + +struct rpc_task; +struct rpc_xprt; +struct seq_file; +struct svc_serv; +struct net; + +/* + * This describes a complete RPC request + */ +struct rpc_rqst { + /* + * This is the user-visible part + */ + struct rpc_xprt * rq_xprt; /* RPC client */ + struct xdr_buf rq_snd_buf; /* send buffer */ + struct xdr_buf rq_rcv_buf; /* recv buffer */ + + /* + * This is the private part + */ + struct rpc_task * rq_task; /* RPC task data */ + struct rpc_cred * rq_cred; /* Bound cred */ + __be32 rq_xid; /* request XID */ + int rq_cong; /* has incremented xprt->cong */ + u32 rq_seqno; /* gss seq no. used on req. */ + int rq_enc_pages_num; + struct page **rq_enc_pages; /* scratch pages for use by + gss privacy code */ + void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */ + struct list_head rq_list; + + void *rq_buffer; /* Call XDR encode buffer */ + size_t rq_callsize; + void *rq_rbuffer; /* Reply XDR decode buffer */ + size_t rq_rcvsize; + size_t rq_xmit_bytes_sent; /* total bytes sent */ + size_t rq_reply_bytes_recvd; /* total reply bytes */ + /* received */ + + struct xdr_buf rq_private_buf; /* The receive buffer + * used in the softirq. + */ + unsigned long rq_majortimeo; /* major timeout alarm */ + unsigned long rq_timeout; /* Current timeout value */ + ktime_t rq_rtt; /* round-trip time */ + unsigned int rq_retries; /* # of retries */ + unsigned int rq_connect_cookie; + /* A cookie used to track the + state of the transport + connection */ + + /* + * Partial send handling + */ + u32 rq_bytes_sent; /* Bytes we have sent */ + + ktime_t rq_xtime; /* transmit time stamp */ + int rq_ntrans; + +#if defined(CONFIG_SUNRPC_BACKCHANNEL) + struct list_head rq_bc_list; /* Callback service list */ + unsigned long rq_bc_pa_state; /* Backchannel prealloc state */ + struct list_head rq_bc_pa_list; /* Backchannel prealloc list */ +#endif /* CONFIG_SUNRPC_BACKCHANEL */ +}; +#define rq_svec rq_snd_buf.head +#define rq_slen rq_snd_buf.len + +struct rpc_xprt_ops { + void (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize); + int (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task); + void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task); + void (*alloc_slot)(struct rpc_xprt *xprt, struct rpc_task *task); + void (*free_slot)(struct rpc_xprt *xprt, + struct rpc_rqst *req); + void (*rpcbind)(struct rpc_task *task); + void (*set_port)(struct rpc_xprt *xprt, unsigned short port); + void (*connect)(struct rpc_xprt *xprt, struct rpc_task *task); + int (*buf_alloc)(struct rpc_task *task); + void (*buf_free)(struct rpc_task *task); + int (*send_request)(struct rpc_task *task); + void (*set_retrans_timeout)(struct rpc_task *task); + void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task); + void (*release_request)(struct rpc_task *task); + void (*close)(struct rpc_xprt *xprt); + void (*destroy)(struct rpc_xprt *xprt); + void (*set_connect_timeout)(struct rpc_xprt *xprt, + unsigned long connect_timeout, + unsigned long reconnect_timeout); + void (*print_stats)(struct rpc_xprt *xprt, struct seq_file *seq); + int (*enable_swap)(struct rpc_xprt *xprt); + void (*disable_swap)(struct rpc_xprt *xprt); + void (*inject_disconnect)(struct rpc_xprt *xprt); + int (*bc_setup)(struct rpc_xprt *xprt, + unsigned int min_reqs); + int (*bc_up)(struct svc_serv *serv, struct net *net); + size_t (*bc_maxpayload)(struct rpc_xprt *xprt); + void (*bc_free_rqst)(struct rpc_rqst *rqst); + void (*bc_destroy)(struct rpc_xprt *xprt, + unsigned int max_reqs); +}; + +/* + * RPC transport identifiers + * + * To preserve compatibility with the historical use of raw IP protocol + * id's for transport selection, UDP and TCP identifiers are specified + * with the previous values. No such restriction exists for new transports, + * except that they may not collide with these values (17 and 6, + * respectively). + */ +#define XPRT_TRANSPORT_BC (1 << 31) +enum xprt_transports { + XPRT_TRANSPORT_UDP = IPPROTO_UDP, + XPRT_TRANSPORT_TCP = IPPROTO_TCP, + XPRT_TRANSPORT_BC_TCP = IPPROTO_TCP | XPRT_TRANSPORT_BC, + XPRT_TRANSPORT_RDMA = 256, + XPRT_TRANSPORT_BC_RDMA = XPRT_TRANSPORT_RDMA | XPRT_TRANSPORT_BC, + XPRT_TRANSPORT_LOCAL = 257, +}; + +struct rpc_xprt { + struct kref kref; /* Reference count */ + const struct rpc_xprt_ops *ops; /* transport methods */ + + const struct rpc_timeout *timeout; /* timeout parms */ + struct sockaddr_storage addr; /* server address */ + size_t addrlen; /* size of server address */ + int prot; /* IP protocol */ + + unsigned long cong; /* current congestion */ + unsigned long cwnd; /* congestion window */ + + size_t max_payload; /* largest RPC payload size, + in bytes */ + unsigned int tsh_size; /* size of transport specific + header */ + + struct rpc_wait_queue binding; /* requests waiting on rpcbind */ + struct rpc_wait_queue sending; /* requests waiting to send */ + struct rpc_wait_queue pending; /* requests in flight */ + struct rpc_wait_queue backlog; /* waiting for slot */ + struct list_head free; /* free slots */ + unsigned int max_reqs; /* max number of slots */ + unsigned int min_reqs; /* min number of slots */ + unsigned int num_reqs; /* total slots */ + unsigned long state; /* transport state */ + unsigned char resvport : 1; /* use a reserved port */ + atomic_t swapper; /* we're swapping over this + transport */ + unsigned int bind_index; /* bind function index */ + + /* + * Multipath + */ + struct list_head xprt_switch; + + /* + * Connection of transports + */ + unsigned long bind_timeout, + reestablish_timeout; + unsigned int connect_cookie; /* A cookie that gets bumped + every time the transport + is reconnected */ + + /* + * Disconnection of idle transports + */ + struct work_struct task_cleanup; + struct timer_list timer; + unsigned long last_used, + idle_timeout, + connect_timeout, + max_reconnect_timeout; + + /* + * Send stuff + */ + spinlock_t transport_lock; /* lock transport info */ + spinlock_t reserve_lock; /* lock slot table */ + spinlock_t recv_lock; /* lock receive list */ + u32 xid; /* Next XID value to use */ + struct rpc_task * snd_task; /* Task blocked in send */ + struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ +#if defined(CONFIG_SUNRPC_BACKCHANNEL) + struct svc_serv *bc_serv; /* The RPC service which will */ + /* process the callback */ + int bc_alloc_count; /* Total number of preallocs */ + atomic_t bc_free_slots; + spinlock_t bc_pa_lock; /* Protects the preallocated + * items */ + struct list_head bc_pa_list; /* List of preallocated + * backchannel rpc_rqst's */ +#endif /* CONFIG_SUNRPC_BACKCHANNEL */ + struct list_head recv; + + struct { + unsigned long bind_count, /* total number of binds */ + connect_count, /* total number of connects */ + connect_start, /* connect start timestamp */ + connect_time, /* jiffies waiting for connect */ + sends, /* how many complete requests */ + recvs, /* how many complete requests */ + bad_xids, /* lookup_rqst didn't find XID */ + max_slots; /* max rpc_slots used */ + + unsigned long long req_u, /* average requests on the wire */ + bklog_u, /* backlog queue utilization */ + sending_u, /* send q utilization */ + pending_u; /* pend q utilization */ + } stat; + + struct net *xprt_net; + const char *servername; + const char *address_strings[RPC_DISPLAY_MAX]; +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) + struct dentry *debugfs; /* debugfs directory */ + atomic_t inject_disconnect; +#endif + struct rcu_head rcu; +}; + +#if defined(CONFIG_SUNRPC_BACKCHANNEL) +/* + * Backchannel flags + */ +#define RPC_BC_PA_IN_USE 0x0001 /* Preallocated backchannel */ + /* buffer in use */ +#endif /* CONFIG_SUNRPC_BACKCHANNEL */ + +#if defined(CONFIG_SUNRPC_BACKCHANNEL) +static inline int bc_prealloc(struct rpc_rqst *req) +{ + return test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); +} +#else +static inline int bc_prealloc(struct rpc_rqst *req) +{ + return 0; +} +#endif /* CONFIG_SUNRPC_BACKCHANNEL */ + +#define XPRT_CREATE_INFINITE_SLOTS (1U) +#define XPRT_CREATE_NO_IDLE_TIMEOUT (1U << 1) + +struct xprt_create { + int ident; /* XPRT_TRANSPORT identifier */ + struct net * net; + struct sockaddr * srcaddr; /* optional local address */ + struct sockaddr * dstaddr; /* remote peer address */ + size_t addrlen; + const char *servername; + struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ + struct rpc_xprt_switch *bc_xps; + unsigned int flags; +}; + +struct xprt_class { + struct list_head list; + int ident; /* XPRT_TRANSPORT identifier */ + struct rpc_xprt * (*setup)(struct xprt_create *); + struct module *owner; + char name[32]; + const char * netid[]; +}; + +/* + * Generic internal transport functions + */ +struct rpc_xprt *xprt_create_transport(struct xprt_create *args); +void xprt_connect(struct rpc_task *task); +void xprt_reserve(struct rpc_task *task); +void xprt_retry_reserve(struct rpc_task *task); +int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task); +int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task); +void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task); +void xprt_free_slot(struct rpc_xprt *xprt, + struct rpc_rqst *req); +void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task); +bool xprt_prepare_transmit(struct rpc_task *task); +void xprt_transmit(struct rpc_task *task); +void xprt_end_transmit(struct rpc_task *task); +int xprt_adjust_timeout(struct rpc_rqst *req); +void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task); +void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task); +void xprt_release(struct rpc_task *task); +struct rpc_xprt * xprt_get(struct rpc_xprt *xprt); +void xprt_put(struct rpc_xprt *xprt); +struct rpc_xprt * xprt_alloc(struct net *net, size_t size, + unsigned int num_prealloc, + unsigned int max_req); +void xprt_free(struct rpc_xprt *); + +static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 *p) +{ + return p + xprt->tsh_size; +} + +static inline int +xprt_enable_swap(struct rpc_xprt *xprt) +{ + return xprt->ops->enable_swap(xprt); +} + +static inline void +xprt_disable_swap(struct rpc_xprt *xprt) +{ + xprt->ops->disable_swap(xprt); +} + +/* + * Transport switch helper functions + */ +int xprt_register_transport(struct xprt_class *type); +int xprt_unregister_transport(struct xprt_class *type); +int xprt_load_transport(const char *); +void xprt_set_retrans_timeout_def(struct rpc_task *task); +void xprt_set_retrans_timeout_rtt(struct rpc_task *task); +void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status); +void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action); +void xprt_write_space(struct rpc_xprt *xprt); +void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result); +struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid); +void xprt_update_rtt(struct rpc_task *task); +void xprt_complete_rqst(struct rpc_task *task, int copied); +void xprt_pin_rqst(struct rpc_rqst *req); +void xprt_unpin_rqst(struct rpc_rqst *req); +void xprt_release_rqst_cong(struct rpc_task *task); +void xprt_disconnect_done(struct rpc_xprt *xprt); +void xprt_force_disconnect(struct rpc_xprt *xprt); +void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie); + +bool xprt_lock_connect(struct rpc_xprt *, struct rpc_task *, void *); +void xprt_unlock_connect(struct rpc_xprt *, void *); + +/* + * Reserved bit positions in xprt->state + */ +#define XPRT_LOCKED (0) +#define XPRT_CONNECTED (1) +#define XPRT_CONNECTING (2) +#define XPRT_CLOSE_WAIT (3) +#define XPRT_BOUND (4) +#define XPRT_BINDING (5) +#define XPRT_CLOSING (6) +#define XPRT_CONGESTED (9) + +static inline void xprt_set_connected(struct rpc_xprt *xprt) +{ + set_bit(XPRT_CONNECTED, &xprt->state); +} + +static inline void xprt_clear_connected(struct rpc_xprt *xprt) +{ + clear_bit(XPRT_CONNECTED, &xprt->state); +} + +static inline int xprt_connected(struct rpc_xprt *xprt) +{ + return test_bit(XPRT_CONNECTED, &xprt->state); +} + +static inline int xprt_test_and_set_connected(struct rpc_xprt *xprt) +{ + return test_and_set_bit(XPRT_CONNECTED, &xprt->state); +} + +static inline int xprt_test_and_clear_connected(struct rpc_xprt *xprt) +{ + return test_and_clear_bit(XPRT_CONNECTED, &xprt->state); +} + +static inline void xprt_clear_connecting(struct rpc_xprt *xprt) +{ + smp_mb__before_atomic(); + clear_bit(XPRT_CONNECTING, &xprt->state); + smp_mb__after_atomic(); +} + +static inline int xprt_connecting(struct rpc_xprt *xprt) +{ + return test_bit(XPRT_CONNECTING, &xprt->state); +} + +static inline int xprt_test_and_set_connecting(struct rpc_xprt *xprt) +{ + return test_and_set_bit(XPRT_CONNECTING, &xprt->state); +} + +static inline int xprt_close_wait(struct rpc_xprt *xprt) +{ + return test_bit(XPRT_CLOSE_WAIT, &xprt->state); +} + +static inline void xprt_set_bound(struct rpc_xprt *xprt) +{ + test_and_set_bit(XPRT_BOUND, &xprt->state); +} + +static inline int xprt_bound(struct rpc_xprt *xprt) +{ + return test_bit(XPRT_BOUND, &xprt->state); +} + +static inline void xprt_clear_bound(struct rpc_xprt *xprt) +{ + clear_bit(XPRT_BOUND, &xprt->state); +} + +static inline void xprt_clear_binding(struct rpc_xprt *xprt) +{ + smp_mb__before_atomic(); + clear_bit(XPRT_BINDING, &xprt->state); + smp_mb__after_atomic(); +} + +static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt) +{ + return test_and_set_bit(XPRT_BINDING, &xprt->state); +} + +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) +extern unsigned int rpc_inject_disconnect; +static inline void xprt_inject_disconnect(struct rpc_xprt *xprt) +{ + if (!rpc_inject_disconnect) + return; + if (atomic_dec_return(&xprt->inject_disconnect)) + return; + atomic_set(&xprt->inject_disconnect, rpc_inject_disconnect); + xprt->ops->inject_disconnect(xprt); +} +#else +static inline void xprt_inject_disconnect(struct rpc_xprt *xprt) +{ +} +#endif + +#endif /* __KERNEL__*/ + +#endif /* _LINUX_SUNRPC_XPRT_H */ diff --git a/include/linux/sunrpc/xprtmultipath.h b/include/linux/sunrpc/xprtmultipath.h new file mode 100644 index 000000000..af1257c03 --- /dev/null +++ b/include/linux/sunrpc/xprtmultipath.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * RPC client multipathing definitions + * + * Copyright (c) 2015, 2016, Primary Data, Inc. All rights reserved. + * + * Trond Myklebust + */ +#ifndef _NET_SUNRPC_XPRTMULTIPATH_H +#define _NET_SUNRPC_XPRTMULTIPATH_H + +struct rpc_xprt_iter_ops; +struct rpc_xprt_switch { + spinlock_t xps_lock; + struct kref xps_kref; + + unsigned int xps_nxprts; + struct list_head xps_xprt_list; + + struct net * xps_net; + + const struct rpc_xprt_iter_ops *xps_iter_ops; + + struct rcu_head xps_rcu; +}; + +struct rpc_xprt_iter { + struct rpc_xprt_switch __rcu *xpi_xpswitch; + struct rpc_xprt * xpi_cursor; + + const struct rpc_xprt_iter_ops *xpi_ops; +}; + + +struct rpc_xprt_iter_ops { + void (*xpi_rewind)(struct rpc_xprt_iter *); + struct rpc_xprt *(*xpi_xprt)(struct rpc_xprt_iter *); + struct rpc_xprt *(*xpi_next)(struct rpc_xprt_iter *); +}; + +extern struct rpc_xprt_switch *xprt_switch_alloc(struct rpc_xprt *xprt, + gfp_t gfp_flags); + +extern struct rpc_xprt_switch *xprt_switch_get(struct rpc_xprt_switch *xps); +extern void xprt_switch_put(struct rpc_xprt_switch *xps); + +extern void rpc_xprt_switch_set_roundrobin(struct rpc_xprt_switch *xps); + +extern void rpc_xprt_switch_add_xprt(struct rpc_xprt_switch *xps, + struct rpc_xprt *xprt); +extern void rpc_xprt_switch_remove_xprt(struct rpc_xprt_switch *xps, + struct rpc_xprt *xprt); + +extern void xprt_iter_init(struct rpc_xprt_iter *xpi, + struct rpc_xprt_switch *xps); + +extern void xprt_iter_init_listall(struct rpc_xprt_iter *xpi, + struct rpc_xprt_switch *xps); + +extern void xprt_iter_destroy(struct rpc_xprt_iter *xpi); + +extern struct rpc_xprt_switch *xprt_iter_xchg_switch( + struct rpc_xprt_iter *xpi, + struct rpc_xprt_switch *newswitch); + +extern struct rpc_xprt *xprt_iter_xprt(struct rpc_xprt_iter *xpi); +extern struct rpc_xprt *xprt_iter_get_xprt(struct rpc_xprt_iter *xpi); +extern struct rpc_xprt *xprt_iter_get_next(struct rpc_xprt_iter *xpi); + +extern bool rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps, + const struct sockaddr *sap); +#endif diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h new file mode 100644 index 000000000..86fc38ff0 --- /dev/null +++ b/include/linux/sunrpc/xprtrdma.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the BSD-type + * license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * Neither the name of the Network Appliance, Inc. nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _LINUX_SUNRPC_XPRTRDMA_H +#define _LINUX_SUNRPC_XPRTRDMA_H + +/* + * Constants. Max RPC/NFS header is big enough to account for + * additional marshaling buffers passed down by Linux client. + * + * RDMA header is currently fixed max size, and is big enough for a + * fully-chunked NFS message (read chunks are the largest). Note only + * a single chunk type per message is supported currently. + */ +#define RPCRDMA_MIN_SLOT_TABLE (2U) +#define RPCRDMA_DEF_SLOT_TABLE (128U) +#define RPCRDMA_MAX_SLOT_TABLE (256U) + +#define RPCRDMA_MIN_INLINE (1024) /* min inline thresh */ +#define RPCRDMA_DEF_INLINE (4096) /* default inline thresh */ +#define RPCRDMA_MAX_INLINE (65536) /* max inline thresh */ + +/* Memory registration strategies, by number. + * This is part of a kernel / user space API. Do not remove. */ +enum rpcrdma_memreg { + RPCRDMA_BOUNCEBUFFERS = 0, + RPCRDMA_REGISTER, + RPCRDMA_MEMWINDOWS, + RPCRDMA_MEMWINDOWS_ASYNC, + RPCRDMA_MTHCAFMR, + RPCRDMA_FRWR, + RPCRDMA_ALLPHYSICAL, + RPCRDMA_LAST +}; + +#endif /* _LINUX_SUNRPC_XPRTRDMA_H */ diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h new file mode 100644 index 000000000..ae0f99b9b --- /dev/null +++ b/include/linux/sunrpc/xprtsock.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/sunrpc/xprtsock.h + * + * Declarations for the RPC transport socket provider. + */ + +#ifndef _LINUX_SUNRPC_XPRTSOCK_H +#define _LINUX_SUNRPC_XPRTSOCK_H + +#ifdef __KERNEL__ + +int init_socket_xprt(void); +void cleanup_socket_xprt(void); + +#define RPC_MIN_RESVPORT (1U) +#define RPC_MAX_RESVPORT (65535U) +#define RPC_DEF_MIN_RESVPORT (665U) +#define RPC_DEF_MAX_RESVPORT (1023U) + +struct sock_xprt { + struct rpc_xprt xprt; + + /* + * Network layer + */ + struct socket * sock; + struct sock * inet; + + /* + * State of TCP reply receive + */ + __be32 tcp_fraghdr, + tcp_xid, + tcp_calldir; + + u32 tcp_offset, + tcp_reclen; + + unsigned long tcp_copied, + tcp_flags; + + /* + * Connection of transports + */ + unsigned long sock_state; + struct delayed_work connect_worker; + struct work_struct recv_worker; + struct mutex recv_mutex; + struct sockaddr_storage srcaddr; + unsigned short srcport; + + /* + * UDP socket buffer size parameters + */ + size_t rcvsize, + sndsize; + + struct rpc_timeout tcp_timeout; + + /* + * Saved socket callback addresses + */ + void (*old_data_ready)(struct sock *); + void (*old_state_change)(struct sock *); + void (*old_write_space)(struct sock *); + void (*old_error_report)(struct sock *); +}; + +/* + * TCP receive state flags + */ +#define TCP_RCV_LAST_FRAG (1UL << 0) +#define TCP_RCV_COPY_FRAGHDR (1UL << 1) +#define TCP_RCV_COPY_XID (1UL << 2) +#define TCP_RCV_COPY_DATA (1UL << 3) +#define TCP_RCV_READ_CALLDIR (1UL << 4) +#define TCP_RCV_COPY_CALLDIR (1UL << 5) + +/* + * TCP RPC flags + */ +#define TCP_RPC_REPLY (1UL << 6) + +#define XPRT_SOCK_CONNECTING 1U +#define XPRT_SOCK_DATA_READY (2) +#define XPRT_SOCK_UPD_TIMEOUT (3) + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_SUNRPC_XPRTSOCK_H */ diff --git a/include/linux/sunserialcore.h b/include/linux/sunserialcore.h new file mode 100644 index 000000000..c12d1c7fa --- /dev/null +++ b/include/linux/sunserialcore.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* sunserialcore.h + * + * Generic SUN serial/kbd/ms layer. Based entirely + * upon drivers/sbus/char/sunserial.h which is: + * + * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) + * + * Port to new UART layer is: + * + * Copyright (C) 2002 David S. Miller (davem@redhat.com) + */ + +#ifndef _SERIAL_SUN_H +#define _SERIAL_SUN_H + +#include +#include +#include + +/* Serial keyboard defines for L1-A processing... */ +#define SUNKBD_RESET 0xff +#define SUNKBD_L1 0x01 +#define SUNKBD_UP 0x80 +#define SUNKBD_A 0x4d + +extern unsigned int suncore_mouse_baud_cflag_next(unsigned int, int *); +extern int suncore_mouse_baud_detection(unsigned char, int); + +extern int sunserial_register_minors(struct uart_driver *, int); +extern void sunserial_unregister_minors(struct uart_driver *, int); + +extern int sunserial_console_match(struct console *, struct device_node *, + struct uart_driver *, int, bool); +extern void sunserial_console_termios(struct console *, + struct device_node *); + +#endif /* !(_SERIAL_SUN_H) */ diff --git a/include/linux/sunxi-rsb.h b/include/linux/sunxi-rsb.h new file mode 100644 index 000000000..7e75bb034 --- /dev/null +++ b/include/linux/sunxi-rsb.h @@ -0,0 +1,105 @@ +/* + * Allwinner Reduced Serial Bus Driver + * + * Copyright (c) 2015 Chen-Yu Tsai + * + * Author: Chen-Yu Tsai + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ +#ifndef _SUNXI_RSB_H +#define _SUNXI_RSB_H + +#include +#include +#include + +struct sunxi_rsb; + +/** + * struct sunxi_rsb_device - Basic representation of an RSB device + * @dev: Driver model representation of the device. + * @ctrl: RSB controller managing the bus hosting this device. + * @rtaddr: This device's runtime address + * @hwaddr: This device's hardware address + */ +struct sunxi_rsb_device { + struct device dev; + struct sunxi_rsb *rsb; + int irq; + u8 rtaddr; + u16 hwaddr; +}; + +static inline struct sunxi_rsb_device *to_sunxi_rsb_device(struct device *d) +{ + return container_of(d, struct sunxi_rsb_device, dev); +} + +static inline void *sunxi_rsb_device_get_drvdata(const struct sunxi_rsb_device *rdev) +{ + return dev_get_drvdata(&rdev->dev); +} + +static inline void sunxi_rsb_device_set_drvdata(struct sunxi_rsb_device *rdev, + void *data) +{ + dev_set_drvdata(&rdev->dev, data); +} + +/** + * struct sunxi_rsb_driver - RSB slave device driver + * @driver: RSB device drivers should initialize name and owner field of + * this structure. + * @probe: binds this driver to a RSB device. + * @remove: unbinds this driver from the RSB device. + */ +struct sunxi_rsb_driver { + struct device_driver driver; + int (*probe)(struct sunxi_rsb_device *rdev); + int (*remove)(struct sunxi_rsb_device *rdev); +}; + +static inline struct sunxi_rsb_driver *to_sunxi_rsb_driver(struct device_driver *d) +{ + return container_of(d, struct sunxi_rsb_driver, driver); +} + +int sunxi_rsb_driver_register(struct sunxi_rsb_driver *rdrv); + +/** + * sunxi_rsb_driver_unregister() - unregister an RSB client driver + * @rdrv: the driver to unregister + */ +static inline void sunxi_rsb_driver_unregister(struct sunxi_rsb_driver *rdrv) +{ + if (rdrv) + driver_unregister(&rdrv->driver); +} + +#define module_sunxi_rsb_driver(__sunxi_rsb_driver) \ + module_driver(__sunxi_rsb_driver, sunxi_rsb_driver_register, \ + sunxi_rsb_driver_unregister) + +struct regmap *__devm_regmap_init_sunxi_rsb(struct sunxi_rsb_device *rdev, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); + +/** + * devm_regmap_init_sunxi_rsb(): Initialise managed register map + * + * @rdev: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. The regmap will be automatically freed by the + * device management code. + */ +#define devm_regmap_init_sunxi_rsb(rdev, config) \ + __regmap_lockdep_wrapper(__devm_regmap_init_sunxi_rsb, #config, \ + rdev, config) + +#endif /* _SUNXI_RSB_H */ diff --git a/include/linux/superhyway.h b/include/linux/superhyway.h new file mode 100644 index 000000000..8d3376775 --- /dev/null +++ b/include/linux/superhyway.h @@ -0,0 +1,107 @@ +/* + * include/linux/superhyway.h + * + * SuperHyway Bus definitions + * + * Copyright (C) 2004, 2005 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#ifndef __LINUX_SUPERHYWAY_H +#define __LINUX_SUPERHYWAY_H + +#include + +/* + * SuperHyway IDs + */ +#define SUPERHYWAY_DEVICE_ID_SH5_DMAC 0x0183 + +struct superhyway_vcr_info { + u8 perr_flags; /* P-port Error flags */ + u8 merr_flags; /* Module Error flags */ + u16 mod_vers; /* Module Version */ + u16 mod_id; /* Module ID */ + u8 bot_mb; /* Bottom Memory block */ + u8 top_mb; /* Top Memory block */ +}; + +struct superhyway_ops { + int (*read_vcr)(unsigned long base, struct superhyway_vcr_info *vcr); + int (*write_vcr)(unsigned long base, struct superhyway_vcr_info vcr); +}; + +struct superhyway_bus { + struct superhyway_ops *ops; +}; + +extern struct superhyway_bus superhyway_channels[]; + +struct superhyway_device_id { + unsigned int id; + unsigned long driver_data; +}; + +struct superhyway_device; +extern struct bus_type superhyway_bus_type; + +struct superhyway_driver { + char *name; + + const struct superhyway_device_id *id_table; + struct device_driver drv; + + int (*probe)(struct superhyway_device *dev, const struct superhyway_device_id *id); + void (*remove)(struct superhyway_device *dev); +}; + +#define to_superhyway_driver(d) container_of((d), struct superhyway_driver, drv) + +struct superhyway_device { + char name[32]; + + struct device dev; + + struct superhyway_device_id id; + struct superhyway_driver *drv; + struct superhyway_bus *bus; + + int num_resources; + struct resource *resource; + struct superhyway_vcr_info vcr; +}; + +#define to_superhyway_device(d) container_of((d), struct superhyway_device, dev) + +#define superhyway_get_drvdata(d) dev_get_drvdata(&(d)->dev) +#define superhyway_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, (p)) + +static inline int +superhyway_read_vcr(struct superhyway_device *dev, unsigned long base, + struct superhyway_vcr_info *vcr) +{ + return dev->bus->ops->read_vcr(base, vcr); +} + +static inline int +superhyway_write_vcr(struct superhyway_device *dev, unsigned long base, + struct superhyway_vcr_info vcr) +{ + return dev->bus->ops->write_vcr(base, vcr); +} + +extern int superhyway_scan_bus(struct superhyway_bus *); + +/* drivers/sh/superhyway/superhyway.c */ +int superhyway_register_driver(struct superhyway_driver *); +void superhyway_unregister_driver(struct superhyway_driver *); +int superhyway_add_device(unsigned long base, struct superhyway_device *, struct superhyway_bus *); +int superhyway_add_devices(struct superhyway_bus *bus, struct superhyway_device **devices, int nr_devices); + +/* drivers/sh/superhyway/superhyway-sysfs.c */ +extern const struct attribute_group *superhyway_dev_groups[]; + +#endif /* __LINUX_SUPERHYWAY_H */ + diff --git a/include/linux/suspend.h b/include/linux/suspend.h new file mode 100644 index 000000000..3f529ad9a --- /dev/null +++ b/include/linux/suspend.h @@ -0,0 +1,542 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SUSPEND_H +#define _LINUX_SUSPEND_H + +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_VT +extern void pm_set_vt_switch(int); +#else +static inline void pm_set_vt_switch(int do_switch) +{ +} +#endif + +#ifdef CONFIG_VT_CONSOLE_SLEEP +extern void pm_prepare_console(void); +extern void pm_restore_console(void); +#else +static inline void pm_prepare_console(void) +{ +} + +static inline void pm_restore_console(void) +{ +} +#endif + +typedef int __bitwise suspend_state_t; + +#define PM_SUSPEND_ON ((__force suspend_state_t) 0) +#define PM_SUSPEND_TO_IDLE ((__force suspend_state_t) 1) +#define PM_SUSPEND_STANDBY ((__force suspend_state_t) 2) +#define PM_SUSPEND_MEM ((__force suspend_state_t) 3) +#define PM_SUSPEND_MIN PM_SUSPEND_TO_IDLE +#define PM_SUSPEND_MAX ((__force suspend_state_t) 4) + +enum suspend_stat_step { + SUSPEND_FREEZE = 1, + SUSPEND_PREPARE, + SUSPEND_SUSPEND, + SUSPEND_SUSPEND_LATE, + SUSPEND_SUSPEND_NOIRQ, + SUSPEND_RESUME_NOIRQ, + SUSPEND_RESUME_EARLY, + SUSPEND_RESUME +}; + +struct suspend_stats { + int success; + int fail; + int failed_freeze; + int failed_prepare; + int failed_suspend; + int failed_suspend_late; + int failed_suspend_noirq; + int failed_resume; + int failed_resume_early; + int failed_resume_noirq; +#define REC_FAILED_NUM 2 + int last_failed_dev; + char failed_devs[REC_FAILED_NUM][40]; + int last_failed_errno; + int errno[REC_FAILED_NUM]; + int last_failed_step; + enum suspend_stat_step failed_steps[REC_FAILED_NUM]; +}; + +extern struct suspend_stats suspend_stats; + +static inline void dpm_save_failed_dev(const char *name) +{ + strlcpy(suspend_stats.failed_devs[suspend_stats.last_failed_dev], + name, + sizeof(suspend_stats.failed_devs[0])); + suspend_stats.last_failed_dev++; + suspend_stats.last_failed_dev %= REC_FAILED_NUM; +} + +static inline void dpm_save_failed_errno(int err) +{ + suspend_stats.errno[suspend_stats.last_failed_errno] = err; + suspend_stats.last_failed_errno++; + suspend_stats.last_failed_errno %= REC_FAILED_NUM; +} + +static inline void dpm_save_failed_step(enum suspend_stat_step step) +{ + suspend_stats.failed_steps[suspend_stats.last_failed_step] = step; + suspend_stats.last_failed_step++; + suspend_stats.last_failed_step %= REC_FAILED_NUM; +} + +/** + * struct platform_suspend_ops - Callbacks for managing platform dependent + * system sleep states. + * + * @valid: Callback to determine if given system sleep state is supported by + * the platform. + * Valid (ie. supported) states are advertised in /sys/power/state. Note + * that it still may be impossible to enter given system sleep state if the + * conditions aren't right. + * There is the %suspend_valid_only_mem function available that can be + * assigned to this if the platform only supports mem sleep. + * + * @begin: Initialise a transition to given system sleep state. + * @begin() is executed right prior to suspending devices. The information + * conveyed to the platform code by @begin() should be disregarded by it as + * soon as @end() is executed. If @begin() fails (ie. returns nonzero), + * @prepare(), @enter() and @finish() will not be called by the PM core. + * This callback is optional. However, if it is implemented, the argument + * passed to @enter() is redundant and should be ignored. + * + * @prepare: Prepare the platform for entering the system sleep state indicated + * by @begin(). + * @prepare() is called right after devices have been suspended (ie. the + * appropriate .suspend() method has been executed for each device) and + * before device drivers' late suspend callbacks are executed. It returns + * 0 on success or a negative error code otherwise, in which case the + * system cannot enter the desired sleep state (@prepare_late(), @enter(), + * and @wake() will not be called in that case). + * + * @prepare_late: Finish preparing the platform for entering the system sleep + * state indicated by @begin(). + * @prepare_late is called before disabling nonboot CPUs and after + * device drivers' late suspend callbacks have been executed. It returns + * 0 on success or a negative error code otherwise, in which case the + * system cannot enter the desired sleep state (@enter() will not be + * executed). + * + * @enter: Enter the system sleep state indicated by @begin() or represented by + * the argument if @begin() is not implemented. + * This callback is mandatory. It returns 0 on success or a negative + * error code otherwise, in which case the system cannot enter the desired + * sleep state. + * + * @wake: Called when the system has just left a sleep state, right after + * the nonboot CPUs have been enabled and before device drivers' early + * resume callbacks are executed. + * This callback is optional, but should be implemented by the platforms + * that implement @prepare_late(). If implemented, it is always called + * after @prepare_late and @enter(), even if one of them fails. + * + * @finish: Finish wake-up of the platform. + * @finish is called right prior to calling device drivers' regular suspend + * callbacks. + * This callback is optional, but should be implemented by the platforms + * that implement @prepare(). If implemented, it is always called after + * @enter() and @wake(), even if any of them fails. It is executed after + * a failing @prepare. + * + * @suspend_again: Returns whether the system should suspend again (true) or + * not (false). If the platform wants to poll sensors or execute some + * code during suspended without invoking userspace and most of devices, + * suspend_again callback is the place assuming that periodic-wakeup or + * alarm-wakeup is already setup. This allows to execute some codes while + * being kept suspended in the view of userland and devices. + * + * @end: Called by the PM core right after resuming devices, to indicate to + * the platform that the system has returned to the working state or + * the transition to the sleep state has been aborted. + * This callback is optional, but should be implemented by the platforms + * that implement @begin(). Accordingly, platforms implementing @begin() + * should also provide a @end() which cleans up transitions aborted before + * @enter(). + * + * @recover: Recover the platform from a suspend failure. + * Called by the PM core if the suspending of devices fails. + * This callback is optional and should only be implemented by platforms + * which require special recovery actions in that situation. + */ +struct platform_suspend_ops { + int (*valid)(suspend_state_t state); + int (*begin)(suspend_state_t state); + int (*prepare)(void); + int (*prepare_late)(void); + int (*enter)(suspend_state_t state); + void (*wake)(void); + void (*finish)(void); + bool (*suspend_again)(void); + void (*end)(void); + void (*recover)(void); +}; + +struct platform_s2idle_ops { + int (*begin)(void); + int (*prepare)(void); + void (*wake)(void); + void (*sync)(void); + void (*restore)(void); + void (*end)(void); +}; + +#ifdef CONFIG_SUSPEND +extern suspend_state_t mem_sleep_current; +extern suspend_state_t mem_sleep_default; + +/** + * suspend_set_ops - set platform dependent suspend operations + * @ops: The new suspend operations to set. + */ +extern void suspend_set_ops(const struct platform_suspend_ops *ops); +extern int suspend_valid_only_mem(suspend_state_t state); + +extern unsigned int pm_suspend_global_flags; + +#define PM_SUSPEND_FLAG_FW_SUSPEND (1 << 0) +#define PM_SUSPEND_FLAG_FW_RESUME (1 << 1) + +static inline void pm_suspend_clear_flags(void) +{ + pm_suspend_global_flags = 0; +} + +static inline void pm_set_suspend_via_firmware(void) +{ + pm_suspend_global_flags |= PM_SUSPEND_FLAG_FW_SUSPEND; +} + +static inline void pm_set_resume_via_firmware(void) +{ + pm_suspend_global_flags |= PM_SUSPEND_FLAG_FW_RESUME; +} + +static inline bool pm_suspend_via_firmware(void) +{ + return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_FW_SUSPEND); +} + +static inline bool pm_resume_via_firmware(void) +{ + return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_FW_RESUME); +} + +/* Suspend-to-idle state machnine. */ +enum s2idle_states { + S2IDLE_STATE_NONE, /* Not suspended/suspending. */ + S2IDLE_STATE_ENTER, /* Enter suspend-to-idle. */ + S2IDLE_STATE_WAKE, /* Wake up from suspend-to-idle. */ +}; + +extern enum s2idle_states __read_mostly s2idle_state; + +static inline bool idle_should_enter_s2idle(void) +{ + return unlikely(s2idle_state == S2IDLE_STATE_ENTER); +} + +extern bool pm_suspend_via_s2idle(void); +extern void __init pm_states_init(void); +extern void s2idle_set_ops(const struct platform_s2idle_ops *ops); +extern void s2idle_wake(void); + +/** + * arch_suspend_disable_irqs - disable IRQs for suspend + * + * Disables IRQs (in the default case). This is a weak symbol in the common + * code and thus allows architectures to override it if more needs to be + * done. Not called for suspend to disk. + */ +extern void arch_suspend_disable_irqs(void); + +/** + * arch_suspend_enable_irqs - enable IRQs after suspend + * + * Enables IRQs (in the default case). This is a weak symbol in the common + * code and thus allows architectures to override it if more needs to be + * done. Not called for suspend to disk. + */ +extern void arch_suspend_enable_irqs(void); + +extern int pm_suspend(suspend_state_t state); +#else /* !CONFIG_SUSPEND */ +#define suspend_valid_only_mem NULL + +static inline void pm_suspend_clear_flags(void) {} +static inline void pm_set_suspend_via_firmware(void) {} +static inline void pm_set_resume_via_firmware(void) {} +static inline bool pm_suspend_via_firmware(void) { return false; } +static inline bool pm_resume_via_firmware(void) { return false; } +static inline bool pm_suspend_via_s2idle(void) { return false; } + +static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} +static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } +static inline bool idle_should_enter_s2idle(void) { return false; } +static inline void __init pm_states_init(void) {} +static inline void s2idle_set_ops(const struct platform_s2idle_ops *ops) {} +static inline void s2idle_wake(void) {} +#endif /* !CONFIG_SUSPEND */ + +/* struct pbe is used for creating lists of pages that should be restored + * atomically during the resume from disk, because the page frames they have + * occupied before the suspend are in use. + */ +struct pbe { + void *address; /* address of the copy */ + void *orig_address; /* original address of a page */ + struct pbe *next; +}; + +/* mm/page_alloc.c */ +extern void mark_free_pages(struct zone *zone); + +/** + * struct platform_hibernation_ops - hibernation platform support + * + * The methods in this structure allow a platform to carry out special + * operations required by it during a hibernation transition. + * + * All the methods below, except for @recover(), must be implemented. + * + * @begin: Tell the platform driver that we're starting hibernation. + * Called right after shrinking memory and before freezing devices. + * + * @end: Called by the PM core right after resuming devices, to indicate to + * the platform that the system has returned to the working state. + * + * @pre_snapshot: Prepare the platform for creating the hibernation image. + * Called right after devices have been frozen and before the nonboot + * CPUs are disabled (runs with IRQs on). + * + * @finish: Restore the previous state of the platform after the hibernation + * image has been created *or* put the platform into the normal operation + * mode after the hibernation (the same method is executed in both cases). + * Called right after the nonboot CPUs have been enabled and before + * thawing devices (runs with IRQs on). + * + * @prepare: Prepare the platform for entering the low power state. + * Called right after the hibernation image has been saved and before + * devices are prepared for entering the low power state. + * + * @enter: Put the system into the low power state after the hibernation image + * has been saved to disk. + * Called after the nonboot CPUs have been disabled and all of the low + * level devices have been shut down (runs with IRQs off). + * + * @leave: Perform the first stage of the cleanup after the system sleep state + * indicated by @set_target() has been left. + * Called right after the control has been passed from the boot kernel to + * the image kernel, before the nonboot CPUs are enabled and before devices + * are resumed. Executed with interrupts disabled. + * + * @pre_restore: Prepare system for the restoration from a hibernation image. + * Called right after devices have been frozen and before the nonboot + * CPUs are disabled (runs with IRQs on). + * + * @restore_cleanup: Clean up after a failing image restoration. + * Called right after the nonboot CPUs have been enabled and before + * thawing devices (runs with IRQs on). + * + * @recover: Recover the platform from a failure to suspend devices. + * Called by the PM core if the suspending of devices during hibernation + * fails. This callback is optional and should only be implemented by + * platforms which require special recovery actions in that situation. + */ +struct platform_hibernation_ops { + int (*begin)(void); + void (*end)(void); + int (*pre_snapshot)(void); + void (*finish)(void); + int (*prepare)(void); + int (*enter)(void); + void (*leave)(void); + int (*pre_restore)(void); + void (*restore_cleanup)(void); + void (*recover)(void); +}; + +#ifdef CONFIG_HIBERNATION +/* kernel/power/snapshot.c */ +extern void __register_nosave_region(unsigned long b, unsigned long e, int km); +static inline void __init register_nosave_region(unsigned long b, unsigned long e) +{ + __register_nosave_region(b, e, 0); +} +static inline void __init register_nosave_region_late(unsigned long b, unsigned long e) +{ + __register_nosave_region(b, e, 1); +} +extern int swsusp_page_is_forbidden(struct page *); +extern void swsusp_set_page_free(struct page *); +extern void swsusp_unset_page_free(struct page *); +extern unsigned long get_safe_page(gfp_t gfp_mask); +extern asmlinkage int swsusp_arch_suspend(void); +extern asmlinkage int swsusp_arch_resume(void); + +extern void hibernation_set_ops(const struct platform_hibernation_ops *ops); +extern int hibernate(void); +extern bool system_entering_hibernation(void); +extern bool hibernation_available(void); +asmlinkage int swsusp_save(void); +extern struct pbe *restore_pblist; +#else /* CONFIG_HIBERNATION */ +static inline void register_nosave_region(unsigned long b, unsigned long e) {} +static inline void register_nosave_region_late(unsigned long b, unsigned long e) {} +static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } +static inline void swsusp_set_page_free(struct page *p) {} +static inline void swsusp_unset_page_free(struct page *p) {} + +static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {} +static inline int hibernate(void) { return -ENOSYS; } +static inline bool system_entering_hibernation(void) { return false; } +static inline bool hibernation_available(void) { return false; } +#endif /* CONFIG_HIBERNATION */ + +/* Hibernation and suspend events */ +#define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */ +#define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */ +#define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */ +#define PM_POST_SUSPEND 0x0004 /* Suspend finished */ +#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */ +#define PM_POST_RESTORE 0x0006 /* Restore failed */ + +extern struct mutex system_transition_mutex; + +#ifdef CONFIG_PM_SLEEP +void save_processor_state(void); +void restore_processor_state(void); + +/* kernel/power/main.c */ +extern int register_pm_notifier(struct notifier_block *nb); +extern int unregister_pm_notifier(struct notifier_block *nb); + +#define pm_notifier(fn, pri) { \ + static struct notifier_block fn##_nb = \ + { .notifier_call = fn, .priority = pri }; \ + register_pm_notifier(&fn##_nb); \ +} + +/* drivers/base/power/wakeup.c */ +extern bool events_check_enabled; +extern unsigned int pm_wakeup_irq; +extern suspend_state_t pm_suspend_target_state; + +extern bool pm_wakeup_pending(void); +extern void pm_system_wakeup(void); +extern void pm_system_cancel_wakeup(void); +extern void pm_wakeup_clear(bool reset); +extern void pm_system_irq_wakeup(unsigned int irq_number); +extern bool pm_get_wakeup_count(unsigned int *count, bool block); +extern bool pm_save_wakeup_count(unsigned int count); +extern void pm_wakep_autosleep_enabled(bool set); +extern void pm_print_active_wakeup_sources(void); + +extern void lock_system_sleep(void); +extern void unlock_system_sleep(void); + +#else /* !CONFIG_PM_SLEEP */ + +static inline int register_pm_notifier(struct notifier_block *nb) +{ + return 0; +} + +static inline int unregister_pm_notifier(struct notifier_block *nb) +{ + return 0; +} + +#define pm_notifier(fn, pri) do { (void)(fn); } while (0) + +static inline bool pm_wakeup_pending(void) { return false; } +static inline void pm_system_wakeup(void) {} +static inline void pm_wakeup_clear(bool reset) {} +static inline void pm_system_irq_wakeup(unsigned int irq_number) {} + +static inline void lock_system_sleep(void) {} +static inline void unlock_system_sleep(void) {} + +#endif /* !CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM_SLEEP_DEBUG +extern bool pm_print_times_enabled; +extern bool pm_debug_messages_on; +extern __printf(2, 3) void __pm_pr_dbg(bool defer, const char *fmt, ...); +#else +#define pm_print_times_enabled (false) +#define pm_debug_messages_on (false) + +#include + +#define __pm_pr_dbg(defer, fmt, ...) \ + no_printk(KERN_DEBUG fmt, ##__VA_ARGS__) +#endif + +#define pm_pr_dbg(fmt, ...) \ + __pm_pr_dbg(false, fmt, ##__VA_ARGS__) + +#define pm_deferred_pr_dbg(fmt, ...) \ + __pm_pr_dbg(true, fmt, ##__VA_ARGS__) + +#ifdef CONFIG_PM_AUTOSLEEP + +/* kernel/power/autosleep.c */ +void queue_up_suspend_work(void); + +#else /* !CONFIG_PM_AUTOSLEEP */ + +static inline void queue_up_suspend_work(void) {} + +#endif /* !CONFIG_PM_AUTOSLEEP */ + +#ifdef CONFIG_ARCH_SAVE_PAGE_KEYS +/* + * The ARCH_SAVE_PAGE_KEYS functions can be used by an architecture + * to save/restore additional information to/from the array of page + * frame numbers in the hibernation image. For s390 this is used to + * save and restore the storage key for each page that is included + * in the hibernation image. + */ +unsigned long page_key_additional_pages(unsigned long pages); +int page_key_alloc(unsigned long pages); +void page_key_free(void); +void page_key_read(unsigned long *pfn); +void page_key_memorize(unsigned long *pfn); +void page_key_write(void *address); + +#else /* !CONFIG_ARCH_SAVE_PAGE_KEYS */ + +static inline unsigned long page_key_additional_pages(unsigned long pages) +{ + return 0; +} + +static inline int page_key_alloc(unsigned long pages) +{ + return 0; +} + +static inline void page_key_free(void) {} +static inline void page_key_read(unsigned long *pfn) {} +static inline void page_key_memorize(unsigned long *pfn) {} +static inline void page_key_write(void *address) {} + +#endif /* !CONFIG_ARCH_SAVE_PAGE_KEYS */ + +#endif /* _LINUX_SUSPEND_H */ diff --git a/include/linux/svga.h b/include/linux/svga.h new file mode 100644 index 000000000..3bfe46269 --- /dev/null +++ b/include/linux/svga.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SVGA_H +#define _LINUX_SVGA_H + +#include +#include